summaryrefslogtreecommitdiff
path: root/deps/v8
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2016-09-06 22:49:51 +0200
committerMichaël Zasso <targos@protonmail.com>2016-09-22 09:51:19 +0200
commitec02b811a8a5c999bab4de312be2d732b7d9d50b (patch)
treeca3068017254f238cf413a451c57a803572983a4 /deps/v8
parentd2eb7ce0105369a9cad82787cb33a665e9bd00ad (diff)
downloadandroid-node-v8-ec02b811a8a5c999bab4de312be2d732b7d9d50b.tar.gz
android-node-v8-ec02b811a8a5c999bab4de312be2d732b7d9d50b.tar.bz2
android-node-v8-ec02b811a8a5c999bab4de312be2d732b7d9d50b.zip
deps: update V8 to 5.4.500.27
Pick up latest commit from the 5.4-lkgr branch. deps: edit V8 gitignore to allow trace event copy deps: update V8 trace event to 315bf1e2d45be7d53346c31cfcc37424a32c30c8 deps: edit V8 gitignore to allow gtest_prod.h copy deps: update V8 gtest to 6f8a66431cb592dad629028a50b3dd418a408c87 PR-URL: https://github.com/nodejs/node/pull/8317 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com>
Diffstat (limited to 'deps/v8')
-rw-r--r--deps/v8/.gitignore27
-rw-r--r--deps/v8/.gn51
-rw-r--r--deps/v8/AUTHORS8
-rw-r--r--deps/v8/BUILD.gn1015
-rw-r--r--deps/v8/ChangeLog4959
-rw-r--r--deps/v8/DEPS67
-rw-r--r--deps/v8/LICENSE.fdlibm (renamed from deps/v8/src/third_party/fdlibm/LICENSE)0
-rw-r--r--deps/v8/Makefile98
-rw-r--r--deps/v8/Makefile.android6
-rw-r--r--deps/v8/Makefile.nacl97
-rw-r--r--deps/v8/OWNERS4
-rw-r--r--deps/v8/PRESUBMIT.py19
-rw-r--r--deps/v8/WATCHLISTS8
-rw-r--r--deps/v8/base/trace_event/common/trace_event_common.h34
-rwxr-xr-xdeps/v8/build/has_valgrind.py21
-rw-r--r--deps/v8/build_overrides/build.gni18
-rw-r--r--deps/v8/build_overrides/gtest.gni15
-rw-r--r--deps/v8/build_overrides/v8.gni26
-rw-r--r--deps/v8/gni/isolate.gni175
-rw-r--r--deps/v8/gni/v8.gni108
-rw-r--r--deps/v8/gypfiles/OWNERS (renamed from deps/v8/build/OWNERS)0
-rw-r--r--deps/v8/gypfiles/README.txt (renamed from deps/v8/build/README.txt)0
-rw-r--r--deps/v8/gypfiles/all.gyp (renamed from deps/v8/build/all.gyp)14
-rw-r--r--deps/v8/gypfiles/config/win/msvs_dependencies.isolate (renamed from deps/v8/build/config/win/msvs_dependencies.isolate)0
-rwxr-xr-xdeps/v8/gypfiles/coverage_wrapper.py (renamed from deps/v8/build/coverage_wrapper.py)0
-rw-r--r--deps/v8/gypfiles/detect_v8_host_arch.py (renamed from deps/v8/build/detect_v8_host_arch.py)0
-rwxr-xr-xdeps/v8/gypfiles/download_gold_plugin.py (renamed from deps/v8/build/download_gold_plugin.py)0
-rw-r--r--deps/v8/gypfiles/features.gypi (renamed from deps/v8/build/features.gypi)0
-rwxr-xr-xdeps/v8/gypfiles/get_landmines.py (renamed from deps/v8/build/get_landmines.py)1
-rw-r--r--deps/v8/gypfiles/gyp_environment.py (renamed from deps/v8/build/gyp_environment.py)6
-rwxr-xr-xdeps/v8/gypfiles/gyp_v8 (renamed from deps/v8/build/gyp_v8)8
-rw-r--r--deps/v8/gypfiles/gyp_v8.py (renamed from deps/v8/build/gyp_v8.py)0
-rw-r--r--deps/v8/gypfiles/isolate.gypi (renamed from deps/v8/build/isolate.gypi)6
-rw-r--r--deps/v8/gypfiles/landmine_utils.py (renamed from deps/v8/build/landmine_utils.py)0
-rwxr-xr-xdeps/v8/gypfiles/landmines.py (renamed from deps/v8/build/landmines.py)2
-rw-r--r--deps/v8/gypfiles/mac/asan.gyp (renamed from deps/v8/build/mac/asan.gyp)0
-rw-r--r--deps/v8/gypfiles/set_clang_warning_flags.gypi59
-rw-r--r--deps/v8/gypfiles/shim_headers.gypi (renamed from deps/v8/build/shim_headers.gypi)0
-rw-r--r--deps/v8/gypfiles/standalone.gypi (renamed from deps/v8/build/standalone.gypi)250
-rw-r--r--deps/v8/gypfiles/toolchain.gypi (renamed from deps/v8/build/toolchain.gypi)99
-rw-r--r--deps/v8/gypfiles/vs_toolchain.py (renamed from deps/v8/build/vs_toolchain.py)235
-rw-r--r--deps/v8/include/libplatform/DEPS5
-rw-r--r--deps/v8/include/libplatform/libplatform.h9
-rw-r--r--deps/v8/include/libplatform/v8-tracing.h253
-rw-r--r--deps/v8/include/v8-debug.h27
-rw-r--r--deps/v8/include/v8-experimental.h4
-rw-r--r--deps/v8/include/v8-platform.h6
-rw-r--r--deps/v8/include/v8-profiler.h99
-rw-r--r--deps/v8/include/v8-util.h25
-rw-r--r--deps/v8/include/v8-version.h6
-rw-r--r--deps/v8/include/v8.h887
-rw-r--r--deps/v8/include/v8config.h4
-rw-r--r--deps/v8/infra/config/cq.cfg58
-rw-r--r--deps/v8/infra/mb/PRESUBMIT.py35
-rw-r--r--deps/v8/infra/mb/mb_config.pyl670
-rw-r--r--deps/v8/samples/hello-world.cc18
-rw-r--r--deps/v8/samples/process.cc17
-rw-r--r--deps/v8/samples/samples.gyp14
-rw-r--r--deps/v8/samples/shell.cc18
-rw-r--r--deps/v8/snapshot_toolchain.gni81
-rw-r--r--deps/v8/src/DEPS8
-rw-r--r--deps/v8/src/accessors.cc420
-rw-r--r--deps/v8/src/accessors.h23
-rw-r--r--deps/v8/src/address-map.cc4
-rw-r--r--deps/v8/src/address-map.h181
-rw-r--r--deps/v8/src/allocation-site-scopes.cc3
-rw-r--r--deps/v8/src/allocation-site-scopes.h2
-rw-r--r--deps/v8/src/allocation.cc11
-rw-r--r--deps/v8/src/allocation.h6
-rw-r--r--deps/v8/src/api-arguments-inl.h116
-rw-r--r--deps/v8/src/api-arguments.cc12
-rw-r--r--deps/v8/src/api-arguments.h105
-rw-r--r--deps/v8/src/api-experimental.cc10
-rw-r--r--deps/v8/src/api-natives.cc559
-rw-r--r--deps/v8/src/api-natives.h4
-rw-r--r--deps/v8/src/api.cc2122
-rw-r--r--deps/v8/src/api.h96
-rw-r--r--deps/v8/src/arguments.h42
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h37
-rw-r--r--deps/v8/src/arm/assembler-arm.cc326
-rw-r--r--deps/v8/src/arm/assembler-arm.h89
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc795
-rw-r--r--deps/v8/src/arm/codegen-arm.cc165
-rw-r--r--deps/v8/src/arm/codegen-arm.h17
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc61
-rw-r--r--deps/v8/src/arm/disasm-arm.cc106
-rw-r--r--deps/v8/src/arm/eh-frame-arm.cc64
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.cc110
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc433
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h62
-rw-r--r--deps/v8/src/arm/simulator-arm.cc300
-rw-r--r--deps/v8/src/arm/simulator-arm.h16
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h37
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc161
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h65
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc812
-rw-r--r--deps/v8/src/arm64/codegen-arm64.cc206
-rw-r--r--deps/v8/src/arm64/codegen-arm64.h17
-rw-r--r--deps/v8/src/arm64/constants-arm64.h189
-rw-r--r--deps/v8/src/arm64/cpu-arm64.cc18
-rw-r--r--deps/v8/src/arm64/decoder-arm64-inl.h11
-rw-r--r--deps/v8/src/arm64/decoder-arm64.h87
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc3
-rw-r--r--deps/v8/src/arm64/disasm-arm64.cc36
-rw-r--r--deps/v8/src/arm64/eh-frame-arm64.cc69
-rw-r--r--deps/v8/src/arm64/instrument-arm64.cc25
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc111
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64-inl.h16
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc267
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h76
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc102
-rw-r--r--deps/v8/src/arm64/simulator-arm64.h15
-rw-r--r--deps/v8/src/asmjs/OWNERS10
-rw-r--r--deps/v8/src/asmjs/asm-js.cc275
-rw-r--r--deps/v8/src/asmjs/asm-js.h33
-rw-r--r--deps/v8/src/asmjs/asm-typer.cc2773
-rw-r--r--deps/v8/src/asmjs/asm-typer.h337
-rw-r--r--deps/v8/src/asmjs/asm-types.cc354
-rw-r--r--deps/v8/src/asmjs/asm-types.h347
-rw-r--r--deps/v8/src/asmjs/asm-wasm-builder.cc (renamed from deps/v8/src/wasm/asm-wasm-builder.cc)1135
-rw-r--r--deps/v8/src/asmjs/asm-wasm-builder.h (renamed from deps/v8/src/wasm/asm-wasm-builder.h)11
-rw-r--r--deps/v8/src/assembler.cc633
-rw-r--r--deps/v8/src/assembler.h249
-rw-r--r--deps/v8/src/ast/OWNERS2
-rw-r--r--deps/v8/src/ast/ast-expression-rewriter.cc14
-rw-r--r--deps/v8/src/ast/ast-expression-rewriter.h18
-rw-r--r--deps/v8/src/ast/ast-expression-visitor.cc411
-rw-r--r--deps/v8/src/ast/ast-expression-visitor.h48
-rw-r--r--deps/v8/src/ast/ast-literal-reindexer.cc15
-rw-r--r--deps/v8/src/ast/ast-literal-reindexer.h13
-rw-r--r--deps/v8/src/ast/ast-numbering.cc118
-rw-r--r--deps/v8/src/ast/ast-numbering.h20
-rw-r--r--deps/v8/src/ast/ast-traversal-visitor.h504
-rw-r--r--deps/v8/src/ast/ast-type-bounds.h40
-rw-r--r--deps/v8/src/ast/ast-value-factory.cc123
-rw-r--r--deps/v8/src/ast/ast-value-factory.h156
-rw-r--r--deps/v8/src/ast/ast.cc327
-rw-r--r--deps/v8/src/ast/ast.h1724
-rw-r--r--deps/v8/src/ast/context-slot-cache.cc91
-rw-r--r--deps/v8/src/ast/context-slot-cache.h113
-rw-r--r--deps/v8/src/ast/modules.cc154
-rw-r--r--deps/v8/src/ast/modules.h173
-rw-r--r--deps/v8/src/ast/prettyprinter.cc769
-rw-r--r--deps/v8/src/ast/prettyprinter.h89
-rw-r--r--deps/v8/src/ast/scopeinfo.cc154
-rw-r--r--deps/v8/src/ast/scopeinfo.h157
-rw-r--r--deps/v8/src/ast/scopes.cc1482
-rw-r--r--deps/v8/src/ast/scopes.h898
-rw-r--r--deps/v8/src/ast/variables.cc11
-rw-r--r--deps/v8/src/ast/variables.h43
-rw-r--r--deps/v8/src/background-parsing-task.cc48
-rw-r--r--deps/v8/src/background-parsing-task.h17
-rw-r--r--deps/v8/src/bailout-reason.h37
-rw-r--r--deps/v8/src/base.isolate32
-rw-r--r--deps/v8/src/base/accounting-allocator.cc13
-rw-r--r--deps/v8/src/base/accounting-allocator.h10
-rw-r--r--deps/v8/src/base/atomic-utils.h (renamed from deps/v8/src/atomic-utils.h)17
-rw-r--r--deps/v8/src/base/atomicops.h8
-rw-r--r--deps/v8/src/base/atomicops_internals_mips64_gcc.h50
-rw-r--r--deps/v8/src/base/atomicops_internals_portable.h138
-rw-r--r--deps/v8/src/base/bits.cc46
-rw-r--r--deps/v8/src/base/bits.h38
-rw-r--r--deps/v8/src/base/build_config.h15
-rw-r--r--deps/v8/src/base/compiler-specific.h11
-rw-r--r--deps/v8/src/base/cpu.cc20
-rw-r--r--deps/v8/src/base/cpu.h4
-rw-r--r--deps/v8/src/base/debug/stack_trace.cc40
-rw-r--r--deps/v8/src/base/debug/stack_trace.h96
-rw-r--r--deps/v8/src/base/debug/stack_trace_android.cc91
-rw-r--r--deps/v8/src/base/debug/stack_trace_posix.cc460
-rw-r--r--deps/v8/src/base/debug/stack_trace_win.cc248
-rw-r--r--deps/v8/src/base/file-utils.cc36
-rw-r--r--deps/v8/src/base/file-utils.h18
-rw-r--r--deps/v8/src/base/format-macros.h97
-rw-r--r--deps/v8/src/base/free_deleter.h28
-rw-r--r--deps/v8/src/base/hashmap.h (renamed from deps/v8/src/hashmap.h)103
-rw-r--r--deps/v8/src/base/ieee754.cc2746
-rw-r--r--deps/v8/src/base/ieee754.h80
-rw-r--r--deps/v8/src/base/logging.cc74
-rw-r--r--deps/v8/src/base/logging.h8
-rw-r--r--deps/v8/src/base/macros.h91
-rw-r--r--deps/v8/src/base/platform/condition-variable.cc13
-rw-r--r--deps/v8/src/base/platform/platform-aix.cc18
-rw-r--r--deps/v8/src/base/platform/platform-cygwin.cc6
-rw-r--r--deps/v8/src/base/platform/platform-freebsd.cc10
-rw-r--r--deps/v8/src/base/platform/platform-linux.cc48
-rw-r--r--deps/v8/src/base/platform/platform-macos.cc16
-rw-r--r--deps/v8/src/base/platform/platform-openbsd.cc10
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc27
-rw-r--r--deps/v8/src/base/platform/platform-qnx.cc6
-rw-r--r--deps/v8/src/base/platform/platform-solaris.cc7
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc7
-rw-r--r--deps/v8/src/base/platform/platform.h59
-rw-r--r--deps/v8/src/base/platform/semaphore.cc22
-rw-r--r--deps/v8/src/base/platform/semaphore.h11
-rw-r--r--deps/v8/src/base/platform/time.cc228
-rw-r--r--deps/v8/src/base/platform/time.h318
-rw-r--r--deps/v8/src/base/smart-pointers.h124
-rw-r--r--deps/v8/src/base/sys-info.cc5
-rw-r--r--deps/v8/src/base/utils/random-number-generator.cc4
-rw-r--r--deps/v8/src/base/win32-headers.h6
-rw-r--r--deps/v8/src/bignum.cc7
-rw-r--r--deps/v8/src/bootstrapper.cc1392
-rw-r--r--deps/v8/src/bootstrapper.h9
-rw-r--r--deps/v8/src/builtins.cc4955
-rw-r--r--deps/v8/src/builtins.h671
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc (renamed from deps/v8/src/arm/builtins-arm.cc)1029
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc (renamed from deps/v8/src/arm64/builtins-arm64.cc)1122
-rw-r--r--deps/v8/src/builtins/builtins-api.cc291
-rw-r--r--deps/v8/src/builtins/builtins-array.cc2119
-rw-r--r--deps/v8/src/builtins/builtins-arraybuffer.cc88
-rw-r--r--deps/v8/src/builtins/builtins-boolean.cc62
-rw-r--r--deps/v8/src/builtins/builtins-call.cc151
-rw-r--r--deps/v8/src/builtins/builtins-callsite.cc203
-rw-r--r--deps/v8/src/builtins/builtins-conversion.cc357
-rw-r--r--deps/v8/src/builtins/builtins-dataview.cc133
-rw-r--r--deps/v8/src/builtins/builtins-date.cc1002
-rw-r--r--deps/v8/src/builtins/builtins-debug.cc27
-rw-r--r--deps/v8/src/builtins/builtins-error.cc135
-rw-r--r--deps/v8/src/builtins/builtins-function.cc297
-rw-r--r--deps/v8/src/builtins/builtins-generator.cc116
-rw-r--r--deps/v8/src/builtins/builtins-global.cc103
-rw-r--r--deps/v8/src/builtins/builtins-handler.cc148
-rw-r--r--deps/v8/src/builtins/builtins-internal.cc145
-rw-r--r--deps/v8/src/builtins/builtins-interpreter.cc54
-rw-r--r--deps/v8/src/builtins/builtins-json.cc41
-rw-r--r--deps/v8/src/builtins/builtins-math.cc561
-rw-r--r--deps/v8/src/builtins/builtins-number.cc235
-rw-r--r--deps/v8/src/builtins/builtins-object.cc914
-rw-r--r--deps/v8/src/builtins/builtins-proxy.cc30
-rw-r--r--deps/v8/src/builtins/builtins-reflect.cc274
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer.cc266
-rw-r--r--deps/v8/src/builtins/builtins-string.cc526
-rw-r--r--deps/v8/src/builtins/builtins-symbol.cc76
-rw-r--r--deps/v8/src/builtins/builtins-typedarray.cc101
-rw-r--r--deps/v8/src/builtins/builtins-utils.h137
-rw-r--r--deps/v8/src/builtins/builtins.cc296
-rw-r--r--deps/v8/src/builtins/builtins.h677
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc (renamed from deps/v8/src/ia32/builtins-ia32.cc)1051
-rw-r--r--deps/v8/src/builtins/mips/OWNERS6
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc (renamed from deps/v8/src/mips/builtins-mips.cc)1074
-rw-r--r--deps/v8/src/builtins/mips64/OWNERS6
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc (renamed from deps/v8/src/mips64/builtins-mips64.cc)1059
-rw-r--r--deps/v8/src/builtins/ppc/OWNERS6
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc (renamed from deps/v8/src/ppc/builtins-ppc.cc)1012
-rw-r--r--deps/v8/src/builtins/s390/OWNERS6
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc (renamed from deps/v8/src/s390/builtins-s390.cc)959
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc (renamed from deps/v8/src/x64/builtins-x64.cc)997
-rw-r--r--deps/v8/src/builtins/x87/OWNERS1
-rw-r--r--deps/v8/src/builtins/x87/builtins-x87.cc (renamed from deps/v8/src/x87/builtins-x87.cc)1049
-rw-r--r--deps/v8/src/cancelable-task.cc63
-rw-r--r--deps/v8/src/cancelable-task.h11
-rw-r--r--deps/v8/src/code-events.h183
-rw-r--r--deps/v8/src/code-factory.cc412
-rw-r--r--deps/v8/src/code-factory.h67
-rw-r--r--deps/v8/src/code-stub-assembler.cc4023
-rw-r--r--deps/v8/src/code-stub-assembler.h609
-rw-r--r--deps/v8/src/code-stubs-hydrogen.cc1352
-rw-r--r--deps/v8/src/code-stubs.cc3579
-rw-r--r--deps/v8/src/code-stubs.h1404
-rw-r--r--deps/v8/src/codegen.cc60
-rw-r--r--deps/v8/src/codegen.h9
-rw-r--r--deps/v8/src/compilation-cache.cc12
-rw-r--r--deps/v8/src/compilation-cache.h2
-rw-r--r--deps/v8/src/compilation-statistics.cc60
-rw-r--r--deps/v8/src/compilation-statistics.h10
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc185
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h85
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc (renamed from deps/v8/src/optimizing-compile-dispatcher.cc)64
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h (renamed from deps/v8/src/optimizing-compile-dispatcher.h)21
-rw-r--r--deps/v8/src/compiler.cc1717
-rw-r--r--deps/v8/src/compiler.h296
-rw-r--r--deps/v8/src/compiler/OWNERS1
-rw-r--r--deps/v8/src/compiler/access-builder.cc540
-rw-r--r--deps/v8/src/compiler/access-builder.h83
-rw-r--r--deps/v8/src/compiler/access-info.cc287
-rw-r--r--deps/v8/src/compiler/access-info.h62
-rw-r--r--deps/v8/src/compiler/all-nodes.cc38
-rw-r--r--deps/v8/src/compiler/all-nodes.h18
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc900
-rw-r--r--deps/v8/src/compiler/arm/instruction-codes-arm.h9
-rw-r--r--deps/v8/src/compiler/arm/instruction-scheduler-arm.cc9
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc609
-rw-r--r--deps/v8/src/compiler/arm/unwinding-info-writer-arm.cc108
-rw-r--r--deps/v8/src/compiler/arm/unwinding-info-writer-arm.h72
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc539
-rw-r--r--deps/v8/src/compiler/arm64/instruction-codes-arm64.h11
-rw-r--r--deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc171
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc819
-rw-r--r--deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.cc109
-rw-r--r--deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.h72
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.cc1018
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.h48
-rw-r--r--deps/v8/src/compiler/ast-loop-assignment-analyzer.cc13
-rw-r--r--deps/v8/src/compiler/ast-loop-assignment-analyzer.h12
-rw-r--r--deps/v8/src/compiler/branch-elimination.cc26
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc518
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h41
-rw-r--r--deps/v8/src/compiler/bytecode-loop-analysis.cc100
-rw-r--r--deps/v8/src/compiler/bytecode-loop-analysis.h67
-rw-r--r--deps/v8/src/compiler/c-linkage.cc26
-rw-r--r--deps/v8/src/compiler/change-lowering.cc713
-rw-r--r--deps/v8/src/compiler/change-lowering.h84
-rw-r--r--deps/v8/src/compiler/checkpoint-elimination.cc53
-rw-r--r--deps/v8/src/compiler/checkpoint-elimination.h30
-rw-r--r--deps/v8/src/compiler/coalesced-live-ranges.cc143
-rw-r--r--deps/v8/src/compiler/coalesced-live-ranges.h158
-rw-r--r--deps/v8/src/compiler/code-assembler.cc1081
-rw-r--r--deps/v8/src/compiler/code-assembler.h487
-rw-r--r--deps/v8/src/compiler/code-generator-impl.h36
-rw-r--r--deps/v8/src/compiler/code-generator.cc280
-rw-r--r--deps/v8/src/compiler/code-generator.h103
-rw-r--r--deps/v8/src/compiler/code-stub-assembler.cc1353
-rw-r--r--deps/v8/src/compiler/code-stub-assembler.h475
-rw-r--r--deps/v8/src/compiler/common-node-cache.cc4
-rw-r--r--deps/v8/src/compiler/common-node-cache.h12
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc87
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.h1
-rw-r--r--deps/v8/src/compiler/common-operator.cc437
-rw-r--r--deps/v8/src/compiler/common-operator.h100
-rw-r--r--deps/v8/src/compiler/control-builders.cc12
-rw-r--r--deps/v8/src/compiler/control-builders.h8
-rw-r--r--deps/v8/src/compiler/control-flow-optimizer.cc136
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.cc26
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.h3
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc3295
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h203
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.cc49
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.h17
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc332
-rw-r--r--deps/v8/src/compiler/escape-analysis.h124
-rw-r--r--deps/v8/src/compiler/frame-states.cc6
-rw-r--r--deps/v8/src/compiler/frame-states.h4
-rw-r--r--deps/v8/src/compiler/frame.cc4
-rw-r--r--deps/v8/src/compiler/frame.h41
-rw-r--r--deps/v8/src/compiler/gap-resolver.cc5
-rw-r--r--deps/v8/src/compiler/graph-reducer.cc10
-rw-r--r--deps/v8/src/compiler/graph-reducer.h3
-rw-r--r--deps/v8/src/compiler/graph-replay.cc4
-rw-r--r--deps/v8/src/compiler/graph-trimmer.cc2
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc133
-rw-r--r--deps/v8/src/compiler/graph-visualizer.h6
-rw-r--r--deps/v8/src/compiler/graph.h23
-rw-r--r--deps/v8/src/compiler/greedy-allocator.cc629
-rw-r--r--deps/v8/src/compiler/greedy-allocator.h199
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc649
-rw-r--r--deps/v8/src/compiler/ia32/instruction-codes-ia32.h14
-rw-r--r--deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc14
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc307
-rw-r--r--deps/v8/src/compiler/instruction-codes.h39
-rw-r--r--deps/v8/src/compiler/instruction-scheduler.cc109
-rw-r--r--deps/v8/src/compiler/instruction-scheduler.h21
-rw-r--r--deps/v8/src/compiler/instruction-selector-impl.h118
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc447
-rw-r--r--deps/v8/src/compiler/instruction-selector.h46
-rw-r--r--deps/v8/src/compiler/instruction.cc202
-rw-r--r--deps/v8/src/compiler/instruction.h190
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc198
-rw-r--r--deps/v8/src/compiler/int64-lowering.h7
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.cc983
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.h64
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc59
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h3
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc2
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc230
-rw-r--r--deps/v8/src/compiler/js-create-lowering.h3
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc370
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.h5
-rw-r--r--deps/v8/src/compiler/js-global-object-specialization.cc43
-rw-r--r--deps/v8/src/compiler/js-graph.cc122
-rw-r--r--deps/v8/src/compiler/js-graph.h39
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc17
-rw-r--r--deps/v8/src/compiler/js-inlining.cc277
-rw-r--r--deps/v8/src/compiler/js-inlining.h10
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc198
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.h13
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc1801
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h83
-rw-r--r--deps/v8/src/compiler/js-operator.cc362
-rw-r--r--deps/v8/src/compiler/js-operator.h51
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc1596
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h21
-rw-r--r--deps/v8/src/compiler/linkage.cc305
-rw-r--r--deps/v8/src/compiler/linkage.h120
-rw-r--r--deps/v8/src/compiler/load-elimination.cc736
-rw-r--r--deps/v8/src/compiler/load-elimination.h171
-rw-r--r--deps/v8/src/compiler/loop-analysis.cc121
-rw-r--r--deps/v8/src/compiler/loop-analysis.h21
-rw-r--r--deps/v8/src/compiler/loop-peeling.cc223
-rw-r--r--deps/v8/src/compiler/loop-peeling.h5
-rw-r--r--deps/v8/src/compiler/loop-variable-optimizer.cc406
-rw-r--r--deps/v8/src/compiler/loop-variable-optimizer.h117
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc436
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.h14
-rw-r--r--deps/v8/src/compiler/machine-operator.cc745
-rw-r--r--deps/v8/src/compiler/machine-operator.h453
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc503
-rw-r--r--deps/v8/src/compiler/memory-optimizer.h149
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc603
-rw-r--r--deps/v8/src/compiler/mips/instruction-codes-mips.h24
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc445
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc562
-rw-r--r--deps/v8/src/compiler/mips64/instruction-codes-mips64.h29
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc502
-rw-r--r--deps/v8/src/compiler/move-optimizer.cc20
-rw-r--r--deps/v8/src/compiler/node-cache.cc3
-rw-r--r--deps/v8/src/compiler/node-cache.h8
-rw-r--r--deps/v8/src/compiler/node-marker.h19
-rw-r--r--deps/v8/src/compiler/node-matchers.h197
-rw-r--r--deps/v8/src/compiler/node-properties.cc41
-rw-r--r--deps/v8/src/compiler/node-properties.h12
-rw-r--r--deps/v8/src/compiler/node.cc22
-rw-r--r--deps/v8/src/compiler/node.h1
-rw-r--r--deps/v8/src/compiler/opcodes.h518
-rw-r--r--deps/v8/src/compiler/operation-typer.cc968
-rw-r--r--deps/v8/src/compiler/operation-typer.h92
-rw-r--r--deps/v8/src/compiler/operator-properties.cc87
-rw-r--r--deps/v8/src/compiler/operator-properties.h7
-rw-r--r--deps/v8/src/compiler/operator.cc16
-rw-r--r--deps/v8/src/compiler/operator.h57
-rw-r--r--deps/v8/src/compiler/osr.cc20
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.cc12
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.h13
-rw-r--r--deps/v8/src/compiler/pipeline.cc1155
-rw-r--r--deps/v8/src/compiler/pipeline.h41
-rw-r--r--deps/v8/src/compiler/ppc/OWNERS1
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc667
-rw-r--r--deps/v8/src/compiler/ppc/instruction-codes-ppc.h3
-rw-r--r--deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc3
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc240
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc74
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h140
-rw-r--r--deps/v8/src/compiler/redundancy-elimination.cc239
-rw-r--r--deps/v8/src/compiler/redundancy-elimination.h77
-rw-r--r--deps/v8/src/compiler/register-allocator-verifier.cc765
-rw-r--r--deps/v8/src/compiler/register-allocator-verifier.h195
-rw-r--r--deps/v8/src/compiler/register-allocator.cc425
-rw-r--r--deps/v8/src/compiler/register-allocator.h65
-rw-r--r--deps/v8/src/compiler/representation-change.cc443
-rw-r--r--deps/v8/src/compiler/representation-change.h141
-rw-r--r--deps/v8/src/compiler/s390/OWNERS1
-rw-r--r--deps/v8/src/compiler/s390/code-generator-s390.cc1061
-rw-r--r--deps/v8/src/compiler/s390/instruction-codes-s390.h46
-rw-r--r--deps/v8/src/compiler/s390/instruction-scheduler-s390.cc40
-rw-r--r--deps/v8/src/compiler/s390/instruction-selector-s390.cc664
-rw-r--r--deps/v8/src/compiler/schedule.cc111
-rw-r--r--deps/v8/src/compiler/schedule.h6
-rw-r--r--deps/v8/src/compiler/scheduler.cc8
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc2723
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h25
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.cc151
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.h12
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc645
-rw-r--r--deps/v8/src/compiler/simplified-operator.h201
-rw-r--r--deps/v8/src/compiler/source-position.cc7
-rw-r--r--deps/v8/src/compiler/source-position.h11
-rw-r--r--deps/v8/src/compiler/store-store-elimination.cc570
-rw-r--r--deps/v8/src/compiler/store-store-elimination.h25
-rw-r--r--deps/v8/src/compiler/tail-call-optimization.cc4
-rw-r--r--deps/v8/src/compiler/type-hint-analyzer.cc86
-rw-r--r--deps/v8/src/compiler/type-hint-analyzer.h14
-rw-r--r--deps/v8/src/compiler/type-hints.cc38
-rw-r--r--deps/v8/src/compiler/type-hints.h61
-rw-r--r--deps/v8/src/compiler/typer.cc1251
-rw-r--r--deps/v8/src/compiler/typer.h31
-rw-r--r--deps/v8/src/compiler/unwinding-info-writer.h55
-rw-r--r--deps/v8/src/compiler/value-numbering-reducer.cc34
-rw-r--r--deps/v8/src/compiler/value-numbering-reducer.h8
-rw-r--r--deps/v8/src/compiler/verifier.cc439
-rw-r--r--deps/v8/src/compiler/verifier.h4
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc2528
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h230
-rw-r--r--deps/v8/src/compiler/wasm-linkage.cc166
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc787
-rw-r--r--deps/v8/src/compiler/x64/instruction-codes-x64.h18
-rw-r--r--deps/v8/src/compiler/x64/instruction-scheduler-x64.cc18
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc456
-rw-r--r--deps/v8/src/compiler/x64/unwinding-info-writer-x64.cc102
-rw-r--r--deps/v8/src/compiler/x64/unwinding-info-writer-x64.h79
-rw-r--r--deps/v8/src/compiler/x87/code-generator-x87.cc776
-rw-r--r--deps/v8/src/compiler/x87/instruction-codes-x87.h12
-rw-r--r--deps/v8/src/compiler/x87/instruction-selector-x87.cc272
-rw-r--r--deps/v8/src/context-measure.cc6
-rw-r--r--deps/v8/src/context-measure.h2
-rw-r--r--deps/v8/src/contexts-inl.h7
-rw-r--r--deps/v8/src/contexts.cc115
-rw-r--r--deps/v8/src/contexts.h228
-rw-r--r--deps/v8/src/conversions-inl.h17
-rw-r--r--deps/v8/src/conversions.h17
-rw-r--r--deps/v8/src/counters-inl.h24
-rw-r--r--deps/v8/src/counters.cc110
-rw-r--r--deps/v8/src/counters.h435
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-arm.cc102
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-arm.h110
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc452
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-codegen-arm.h10
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-arm64.cc104
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-arm64.h106
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc470
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h32
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-gap-resolver-arm64.h6
-rw-r--r--deps/v8/src/crankshaft/hydrogen-gvn.cc9
-rw-r--r--deps/v8/src/crankshaft/hydrogen-gvn.h1
-rw-r--r--deps/v8/src/crankshaft/hydrogen-instructions.cc262
-rw-r--r--deps/v8/src/crankshaft/hydrogen-instructions.h372
-rw-r--r--deps/v8/src/crankshaft/hydrogen-osr.h3
-rw-r--r--deps/v8/src/crankshaft/hydrogen-range-analysis.h3
-rw-r--r--deps/v8/src/crankshaft/hydrogen-types.cc22
-rw-r--r--deps/v8/src/crankshaft/hydrogen.cc1646
-rw-r--r--deps/v8/src/crankshaft/hydrogen.h219
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc515
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h10
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-gap-resolver-ia32.cc9
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-ia32.cc108
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-ia32.h104
-rw-r--r--deps/v8/src/crankshaft/lithium-allocator.cc26
-rw-r--r--deps/v8/src/crankshaft/lithium-allocator.h3
-rw-r--r--deps/v8/src/crankshaft/lithium-codegen.cc23
-rw-r--r--deps/v8/src/crankshaft/lithium-codegen.h18
-rw-r--r--deps/v8/src/crankshaft/lithium.cc28
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc503
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-codegen-mips.h16
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-mips.cc102
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-mips.h109
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc524
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h16
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-mips64.cc102
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-mips64.h109
-rw-r--r--deps/v8/src/crankshaft/ppc/OWNERS1
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc490
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h10
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-ppc.cc133
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-ppc.h149
-rw-r--r--deps/v8/src/crankshaft/s390/OWNERS1
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc466
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-codegen-s390.h13
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-s390.cc105
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-s390.h103
-rw-r--r--deps/v8/src/crankshaft/typing.cc59
-rw-r--r--deps/v8/src/crankshaft/typing.h22
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc482
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-codegen-x64.h11
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-gap-resolver-x64.cc16
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-x64.cc119
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-x64.h101
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc583
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-codegen-x87.h10
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-gap-resolver-x87.cc9
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-x87.cc135
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-x87.h105
-rw-r--r--deps/v8/src/d8-posix.cc28
-rw-r--r--deps/v8/src/d8.cc284
-rw-r--r--deps/v8/src/d8.gyp14
-rw-r--r--deps/v8/src/d8.h25
-rw-r--r--deps/v8/src/dateparser-inl.h17
-rw-r--r--deps/v8/src/dateparser.h2
-rw-r--r--deps/v8/src/debug/OWNERS1
-rw-r--r--deps/v8/src/debug/arm/debug-arm.cc2
-rw-r--r--deps/v8/src/debug/arm64/debug-arm64.cc2
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc28
-rw-r--r--deps/v8/src/debug/debug-frames.cc107
-rw-r--r--deps/v8/src/debug/debug-frames.h21
-rw-r--r--deps/v8/src/debug/debug-scopes.cc420
-rw-r--r--deps/v8/src/debug/debug-scopes.h37
-rw-r--r--deps/v8/src/debug/debug.cc984
-rw-r--r--deps/v8/src/debug/debug.h292
-rw-r--r--deps/v8/src/debug/debug.js241
-rw-r--r--deps/v8/src/debug/ia32/debug-ia32.cc2
-rw-r--r--deps/v8/src/debug/liveedit.cc549
-rw-r--r--deps/v8/src/debug/liveedit.h62
-rw-r--r--deps/v8/src/debug/liveedit.js14
-rw-r--r--deps/v8/src/debug/mips/debug-mips.cc2
-rw-r--r--deps/v8/src/debug/mips64/debug-mips64.cc2
-rw-r--r--deps/v8/src/debug/mirrors.js152
-rw-r--r--deps/v8/src/debug/ppc/OWNERS1
-rw-r--r--deps/v8/src/debug/ppc/debug-ppc.cc2
-rw-r--r--deps/v8/src/debug/s390/OWNERS1
-rw-r--r--deps/v8/src/debug/s390/debug-s390.cc2
-rw-r--r--deps/v8/src/debug/x64/debug-x64.cc2
-rw-r--r--deps/v8/src/debug/x87/debug-x87.cc2
-rw-r--r--deps/v8/src/deoptimize-reason.cc38
-rw-r--r--deps/v8/src/deoptimize-reason.h98
-rw-r--r--deps/v8/src/deoptimizer.cc319
-rw-r--r--deps/v8/src/deoptimizer.h142
-rw-r--r--deps/v8/src/disassembler.cc55
-rw-r--r--deps/v8/src/eh-frame.cc629
-rw-r--r--deps/v8/src/eh-frame.h297
-rw-r--r--deps/v8/src/elements.cc1418
-rw-r--r--deps/v8/src/elements.h36
-rw-r--r--deps/v8/src/execution.cc86
-rw-r--r--deps/v8/src/extensions/ignition-statistics-extension.cc37
-rw-r--r--deps/v8/src/extensions/ignition-statistics-extension.h31
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc28
-rw-r--r--deps/v8/src/external-reference-table.cc270
-rw-r--r--deps/v8/src/external-reference-table.h10
-rw-r--r--deps/v8/src/factory.cc399
-rw-r--r--deps/v8/src/factory.h79
-rw-r--r--deps/v8/src/fast-accessor-assembler.cc118
-rw-r--r--deps/v8/src/fast-accessor-assembler.h27
-rw-r--r--deps/v8/src/field-index-inl.h36
-rw-r--r--deps/v8/src/field-index.h7
-rw-r--r--deps/v8/src/field-type.cc6
-rw-r--r--deps/v8/src/flag-definitions.h168
-rw-r--r--deps/v8/src/flags.cc4
-rw-r--r--deps/v8/src/frames-inl.h82
-rw-r--r--deps/v8/src/frames.cc375
-rw-r--r--deps/v8/src/frames.h244
-rw-r--r--deps/v8/src/full-codegen/arm/full-codegen-arm.cc833
-rw-r--r--deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc826
-rw-r--r--deps/v8/src/full-codegen/full-codegen.cc433
-rw-r--r--deps/v8/src/full-codegen/full-codegen.h103
-rw-r--r--deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc814
-rw-r--r--deps/v8/src/full-codegen/mips/full-codegen-mips.cc890
-rw-r--r--deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc873
-rw-r--r--deps/v8/src/full-codegen/ppc/OWNERS1
-rw-r--r--deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc825
-rw-r--r--deps/v8/src/full-codegen/s390/OWNERS1
-rw-r--r--deps/v8/src/full-codegen/s390/full-codegen-s390.cc823
-rw-r--r--deps/v8/src/full-codegen/x64/full-codegen-x64.cc813
-rw-r--r--deps/v8/src/full-codegen/x87/full-codegen-x87.cc814
-rw-r--r--deps/v8/src/futex-emulation.cc52
-rw-r--r--deps/v8/src/futex-emulation.h23
-rw-r--r--deps/v8/src/gdb-jit.cc18
-rw-r--r--deps/v8/src/global-handles.cc123
-rw-r--r--deps/v8/src/global-handles.h35
-rw-r--r--deps/v8/src/globals.h222
-rw-r--r--deps/v8/src/handles.h12
-rw-r--r--deps/v8/src/heap-symbols.h42
-rw-r--r--deps/v8/src/heap/array-buffer-tracker-inl.h68
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.cc214
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.h108
-rw-r--r--deps/v8/src/heap/code-stats.cc220
-rw-r--r--deps/v8/src/heap/code-stats.h42
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.cc3
-rw-r--r--deps/v8/src/heap/gc-tracer.cc464
-rw-r--r--deps/v8/src/heap/gc-tracer.h242
-rw-r--r--deps/v8/src/heap/heap-inl.h106
-rw-r--r--deps/v8/src/heap/heap.cc1148
-rw-r--r--deps/v8/src/heap/heap.h444
-rw-r--r--deps/v8/src/heap/incremental-marking-job.cc8
-rw-r--r--deps/v8/src/heap/incremental-marking.cc258
-rw-r--r--deps/v8/src/heap/incremental-marking.h41
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h94
-rw-r--r--deps/v8/src/heap/mark-compact.cc1657
-rw-r--r--deps/v8/src/heap/mark-compact.h373
-rw-r--r--deps/v8/src/heap/marking.h385
-rw-r--r--deps/v8/src/heap/object-stats.cc540
-rw-r--r--deps/v8/src/heap/object-stats.h87
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h77
-rw-r--r--deps/v8/src/heap/objects-visiting.cc11
-rw-r--r--deps/v8/src/heap/objects-visiting.h41
-rw-r--r--deps/v8/src/heap/page-parallel-job.h4
-rw-r--r--deps/v8/src/heap/remembered-set.cc34
-rw-r--r--deps/v8/src/heap/remembered-set.h292
-rw-r--r--deps/v8/src/heap/scavenger-inl.h29
-rw-r--r--deps/v8/src/heap/scavenger.cc77
-rw-r--r--deps/v8/src/heap/scavenger.h6
-rw-r--r--deps/v8/src/heap/slot-set.h56
-rw-r--r--deps/v8/src/heap/spaces-inl.h225
-rw-r--r--deps/v8/src/heap/spaces.cc1165
-rw-r--r--deps/v8/src/heap/spaces.h1088
-rw-r--r--deps/v8/src/i18n.cc48
-rw-r--r--deps/v8/src/i18n.h12
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h40
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc133
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h49
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc840
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.h4
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc109
-rw-r--r--deps/v8/src/ia32/codegen-ia32.h14
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc4
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc79
-rw-r--r--deps/v8/src/ia32/interface-descriptors-ia32.cc109
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc240
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h64
-rw-r--r--deps/v8/src/ic/access-compiler.cc4
-rw-r--r--deps/v8/src/ic/access-compiler.h1
-rw-r--r--deps/v8/src/ic/arm/access-compiler-arm.cc8
-rw-r--r--deps/v8/src/ic/arm/handler-compiler-arm.cc163
-rw-r--r--deps/v8/src/ic/arm/ic-arm.cc55
-rw-r--r--deps/v8/src/ic/arm/stub-cache-arm.cc73
-rw-r--r--deps/v8/src/ic/arm64/access-compiler-arm64.cc8
-rw-r--r--deps/v8/src/ic/arm64/handler-compiler-arm64.cc163
-rw-r--r--deps/v8/src/ic/arm64/ic-arm64.cc55
-rw-r--r--deps/v8/src/ic/arm64/stub-cache-arm64.cc62
-rw-r--r--deps/v8/src/ic/call-optimization.cc17
-rw-r--r--deps/v8/src/ic/handler-compiler.cc173
-rw-r--r--deps/v8/src/ic/handler-compiler.h32
-rw-r--r--deps/v8/src/ic/handler-configuration.h45
-rw-r--r--deps/v8/src/ic/ia32/access-compiler-ia32.cc8
-rw-r--r--deps/v8/src/ic/ia32/handler-compiler-ia32.cc168
-rw-r--r--deps/v8/src/ic/ia32/ic-ia32.cc44
-rw-r--r--deps/v8/src/ic/ia32/stub-cache-ia32.cc53
-rw-r--r--deps/v8/src/ic/ic-compiler.cc199
-rw-r--r--deps/v8/src/ic/ic-compiler.h64
-rw-r--r--deps/v8/src/ic/ic-inl.h32
-rw-r--r--deps/v8/src/ic/ic-state.cc15
-rw-r--r--deps/v8/src/ic/ic-state.h41
-rw-r--r--deps/v8/src/ic/ic.cc1716
-rw-r--r--deps/v8/src/ic/ic.h267
-rw-r--r--deps/v8/src/ic/mips/access-compiler-mips.cc8
-rw-r--r--deps/v8/src/ic/mips/handler-compiler-mips.cc172
-rw-r--r--deps/v8/src/ic/mips/ic-mips.cc55
-rw-r--r--deps/v8/src/ic/mips/stub-cache-mips.cc65
-rw-r--r--deps/v8/src/ic/mips64/access-compiler-mips64.cc8
-rw-r--r--deps/v8/src/ic/mips64/handler-compiler-mips64.cc172
-rw-r--r--deps/v8/src/ic/mips64/ic-mips64.cc55
-rw-r--r--deps/v8/src/ic/mips64/stub-cache-mips64.cc70
-rw-r--r--deps/v8/src/ic/ppc/OWNERS1
-rw-r--r--deps/v8/src/ic/ppc/access-compiler-ppc.cc8
-rw-r--r--deps/v8/src/ic/ppc/handler-compiler-ppc.cc162
-rw-r--r--deps/v8/src/ic/ppc/ic-ppc.cc56
-rw-r--r--deps/v8/src/ic/ppc/stub-cache-ppc.cc54
-rw-r--r--deps/v8/src/ic/s390/OWNERS1
-rw-r--r--deps/v8/src/ic/s390/access-compiler-s390.cc8
-rw-r--r--deps/v8/src/ic/s390/handler-compiler-s390.cc160
-rw-r--r--deps/v8/src/ic/s390/ic-s390.cc54
-rw-r--r--deps/v8/src/ic/s390/stub-cache-s390.cc50
-rw-r--r--deps/v8/src/ic/stub-cache.cc62
-rw-r--r--deps/v8/src/ic/stub-cache.h57
-rw-r--r--deps/v8/src/ic/x64/access-compiler-x64.cc8
-rw-r--r--deps/v8/src/ic/x64/handler-compiler-x64.cc170
-rw-r--r--deps/v8/src/ic/x64/ic-x64.cc45
-rw-r--r--deps/v8/src/ic/x64/stub-cache-x64.cc47
-rw-r--r--deps/v8/src/ic/x87/access-compiler-x87.cc8
-rw-r--r--deps/v8/src/ic/x87/handler-compiler-x87.cc168
-rw-r--r--deps/v8/src/ic/x87/ic-x87.cc44
-rw-r--r--deps/v8/src/ic/x87/stub-cache-x87.cc53
-rw-r--r--deps/v8/src/icu_util.cc29
-rw-r--r--deps/v8/src/icu_util.h5
-rw-r--r--deps/v8/src/inspector/BUILD.gn101
-rw-r--r--deps/v8/src/inspector/inspector.gyp113
-rw-r--r--deps/v8/src/inspector/js_protocol.json1011
-rw-r--r--deps/v8/src/interface-descriptors.cc435
-rw-r--r--deps/v8/src/interface-descriptors.h562
-rw-r--r--deps/v8/src/interpreter/OWNERS1
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc1250
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h231
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.cc59
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.h5
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.cc392
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.h88
-rw-r--r--deps/v8/src/interpreter/bytecode-dead-code-optimizer.cc77
-rw-r--r--deps/v8/src/interpreter/bytecode-dead-code-optimizer.h41
-rw-r--r--deps/v8/src/interpreter/bytecode-decoder.cc157
-rw-r--r--deps/v8/src/interpreter/bytecode-decoder.h43
-rw-r--r--deps/v8/src/interpreter/bytecode-flags.cc42
-rw-r--r--deps/v8/src/interpreter/bytecode-flags.h42
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc1198
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h82
-rw-r--r--deps/v8/src/interpreter/bytecode-label.cc34
-rw-r--r--deps/v8/src/interpreter/bytecode-label.h87
-rw-r--r--deps/v8/src/interpreter/bytecode-peephole-optimizer.cc345
-rw-r--r--deps/v8/src/interpreter/bytecode-peephole-optimizer.h62
-rw-r--r--deps/v8/src/interpreter/bytecode-peephole-table.h72
-rw-r--r--deps/v8/src/interpreter/bytecode-pipeline.cc134
-rw-r--r--deps/v8/src/interpreter/bytecode-pipeline.h228
-rw-r--r--deps/v8/src/interpreter/bytecode-register-allocator.cc16
-rw-r--r--deps/v8/src/interpreter/bytecode-register-allocator.h13
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.cc627
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.h155
-rw-r--r--deps/v8/src/interpreter/bytecode-register.cc149
-rw-r--r--deps/v8/src/interpreter/bytecode-register.h105
-rw-r--r--deps/v8/src/interpreter/bytecode-traits.h183
-rw-r--r--deps/v8/src/interpreter/bytecodes.cc469
-rw-r--r--deps/v8/src/interpreter/bytecodes.h581
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.cc124
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.h29
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.cc128
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.h104
-rw-r--r--deps/v8/src/interpreter/handler-table-builder.cc19
-rw-r--r--deps/v8/src/interpreter/handler-table-builder.h11
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc653
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h130
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics.cc343
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics.h59
-rw-r--r--deps/v8/src/interpreter/interpreter.cc1572
-rw-r--r--deps/v8/src/interpreter/interpreter.h118
-rw-r--r--deps/v8/src/interpreter/mkpeephole.cc383
-rw-r--r--deps/v8/src/interpreter/source-position-table.h100
-rw-r--r--deps/v8/src/isolate-inl.h65
-rw-r--r--deps/v8/src/isolate.cc1024
-rw-r--r--deps/v8/src/isolate.h291
-rw-r--r--deps/v8/src/js/array-iterator.js21
-rw-r--r--deps/v8/src/js/array.js497
-rw-r--r--deps/v8/src/js/arraybuffer.js31
-rw-r--r--deps/v8/src/js/collection-iterator.js19
-rw-r--r--deps/v8/src/js/collection.js64
-rw-r--r--deps/v8/src/js/generator.js119
-rw-r--r--deps/v8/src/js/harmony-async-await.js51
-rw-r--r--deps/v8/src/js/harmony-atomics.js72
-rw-r--r--deps/v8/src/js/harmony-object-observe.js17
-rw-r--r--deps/v8/src/js/harmony-regexp-exec.js37
-rw-r--r--deps/v8/src/js/harmony-sharedarraybuffer.js31
-rw-r--r--deps/v8/src/js/harmony-simd.js50
-rw-r--r--deps/v8/src/js/harmony-species.js60
-rw-r--r--deps/v8/src/js/harmony-string-padding.js8
-rw-r--r--deps/v8/src/js/harmony-unicode-regexps.js40
-rw-r--r--deps/v8/src/js/i18n.js579
-rw-r--r--deps/v8/src/js/icu-case-mapping.js24
-rw-r--r--deps/v8/src/js/intl-extra.js22
-rw-r--r--deps/v8/src/js/json.js297
-rw-r--r--deps/v8/src/js/macros.py76
-rw-r--r--deps/v8/src/js/math.js176
-rw-r--r--deps/v8/src/js/messages.js973
-rw-r--r--deps/v8/src/js/object-observe.js717
-rw-r--r--deps/v8/src/js/prologue.js43
-rw-r--r--deps/v8/src/js/promise-extra.js26
-rw-r--r--deps/v8/src/js/promise.js405
-rw-r--r--deps/v8/src/js/regexp.js162
-rw-r--r--deps/v8/src/js/runtime.js74
-rw-r--r--deps/v8/src/js/spread.js7
-rw-r--r--deps/v8/src/js/string-iterator.js7
-rw-r--r--deps/v8/src/js/string.js168
-rw-r--r--deps/v8/src/js/symbol.js69
-rw-r--r--deps/v8/src/js/typedarray.js343
-rw-r--r--deps/v8/src/js/uri.js379
-rw-r--r--deps/v8/src/js/v8natives.js834
-rw-r--r--deps/v8/src/js/weak-collection.js30
-rw-r--r--deps/v8/src/json-parser.cc812
-rw-r--r--deps/v8/src/json-parser.h754
-rw-r--r--deps/v8/src/json-stringifier.cc722
-rw-r--r--deps/v8/src/json-stringifier.h620
-rw-r--r--deps/v8/src/keys.cc909
-rw-r--r--deps/v8/src/keys.h137
-rw-r--r--deps/v8/src/libplatform/default-platform.cc39
-rw-r--r--deps/v8/src/libplatform/default-platform.h10
-rw-r--r--deps/v8/src/libplatform/task-queue.cc12
-rw-r--r--deps/v8/src/libplatform/task-queue.h7
-rw-r--r--deps/v8/src/libplatform/tracing/trace-buffer.cc109
-rw-r--r--deps/v8/src/libplatform/tracing/trace-buffer.h48
-rw-r--r--deps/v8/src/libplatform/tracing/trace-config.cc42
-rw-r--r--deps/v8/src/libplatform/tracing/trace-object.cc130
-rw-r--r--deps/v8/src/libplatform/tracing/trace-writer.cc163
-rw-r--r--deps/v8/src/libplatform/tracing/trace-writer.h32
-rw-r--r--deps/v8/src/libplatform/tracing/tracing-controller.cc177
-rw-r--r--deps/v8/src/libsampler/DEPS6
-rw-r--r--deps/v8/src/libsampler/sampler.cc663
-rw-r--r--deps/v8/src/libsampler/sampler.h103
-rw-r--r--deps/v8/src/locked-queue-inl.h4
-rw-r--r--deps/v8/src/log-inl.h34
-rw-r--r--deps/v8/src/log-utils.cc6
-rw-r--r--deps/v8/src/log-utils.h5
-rw-r--r--deps/v8/src/log.cc487
-rw-r--r--deps/v8/src/log.h252
-rw-r--r--deps/v8/src/lookup.cc218
-rw-r--r--deps/v8/src/lookup.h42
-rw-r--r--deps/v8/src/machine-type.cc31
-rw-r--r--deps/v8/src/machine-type.h42
-rw-r--r--deps/v8/src/macro-assembler.h15
-rw-r--r--deps/v8/src/messages.cc905
-rw-r--r--deps/v8/src/messages.h140
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h46
-rw-r--r--deps/v8/src/mips/assembler-mips.cc94
-rw-r--r--deps/v8/src/mips/assembler-mips.h60
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc788
-rw-r--r--deps/v8/src/mips/codegen-mips.cc173
-rw-r--r--deps/v8/src/mips/codegen-mips.h19
-rw-r--r--deps/v8/src/mips/constants-mips.h21
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc3
-rw-r--r--deps/v8/src/mips/disasm-mips.cc36
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc105
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc976
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h99
-rw-r--r--deps/v8/src/mips/simulator-mips.cc199
-rw-r--r--deps/v8/src/mips/simulator-mips.h16
-rw-r--r--deps/v8/src/mips64/assembler-mips64-inl.h46
-rw-r--r--deps/v8/src/mips64/assembler-mips64.cc109
-rw-r--r--deps/v8/src/mips64/assembler-mips64.h62
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.cc794
-rw-r--r--deps/v8/src/mips64/codegen-mips64.cc151
-rw-r--r--deps/v8/src/mips64/codegen-mips64.h19
-rw-r--r--deps/v8/src/mips64/constants-mips64.h45
-rw-r--r--deps/v8/src/mips64/deoptimizer-mips64.cc3
-rw-r--r--deps/v8/src/mips64/disasm-mips64.cc83
-rw-r--r--deps/v8/src/mips64/interface-descriptors-mips64.cc105
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc792
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.h83
-rw-r--r--deps/v8/src/mips64/simulator-mips64.cc593
-rw-r--r--deps/v8/src/mips64/simulator-mips64.h16
-rw-r--r--deps/v8/src/objects-body-descriptors-inl.h7
-rw-r--r--deps/v8/src/objects-debug.cc177
-rw-r--r--deps/v8/src/objects-inl.h916
-rw-r--r--deps/v8/src/objects-printer.cc355
-rw-r--r--deps/v8/src/objects.cc5231
-rw-r--r--deps/v8/src/objects.h1845
-rw-r--r--deps/v8/src/ostreams.cc16
-rw-r--r--deps/v8/src/ostreams.h9
-rw-r--r--deps/v8/src/parsing/OWNERS2
-rw-r--r--deps/v8/src/parsing/expression-classifier.h409
-rw-r--r--deps/v8/src/parsing/func-name-inferrer.cc5
-rw-r--r--deps/v8/src/parsing/func-name-inferrer.h4
-rw-r--r--deps/v8/src/parsing/parameter-initializer-rewriter.cc81
-rw-r--r--deps/v8/src/parsing/parameter-initializer-rewriter.h19
-rw-r--r--deps/v8/src/parsing/parse-info.cc113
-rw-r--r--deps/v8/src/parsing/parse-info.h245
-rw-r--r--deps/v8/src/parsing/parser-base.h1961
-rw-r--r--deps/v8/src/parsing/parser.cc4622
-rw-r--r--deps/v8/src/parsing/parser.h769
-rw-r--r--deps/v8/src/parsing/pattern-rewriter.cc352
-rw-r--r--deps/v8/src/parsing/preparse-data.cc4
-rw-r--r--deps/v8/src/parsing/preparse-data.h2
-rw-r--r--deps/v8/src/parsing/preparser.cc542
-rw-r--r--deps/v8/src/parsing/preparser.h531
-rw-r--r--deps/v8/src/parsing/rewriter.cc89
-rw-r--r--deps/v8/src/parsing/rewriter.h12
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.cc253
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.h57
-rw-r--r--deps/v8/src/parsing/scanner.cc183
-rw-r--r--deps/v8/src/parsing/scanner.h425
-rw-r--r--deps/v8/src/parsing/token.cc10
-rw-r--r--deps/v8/src/parsing/token.h18
-rw-r--r--deps/v8/src/perf-jit.cc75
-rw-r--r--deps/v8/src/perf-jit.h4
-rw-r--r--deps/v8/src/ppc/OWNERS1
-rw-r--r--deps/v8/src/ppc/assembler-ppc-inl.h37
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc59
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h28
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc775
-rw-r--r--deps/v8/src/ppc/codegen-ppc.cc159
-rw-r--r--deps/v8/src/ppc/codegen-ppc.h13
-rw-r--r--deps/v8/src/ppc/constants-ppc.h3
-rw-r--r--deps/v8/src/ppc/deoptimizer-ppc.cc3
-rw-r--r--deps/v8/src/ppc/disasm-ppc.cc9
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc108
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc454
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h63
-rw-r--r--deps/v8/src/ppc/simulator-ppc.cc92
-rw-r--r--deps/v8/src/ppc/simulator-ppc.h16
-rw-r--r--deps/v8/src/profiler/allocation-tracker.cc25
-rw-r--r--deps/v8/src/profiler/allocation-tracker.h7
-rw-r--r--deps/v8/src/profiler/cpu-profiler-inl.h13
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc369
-rw-r--r--deps/v8/src/profiler/cpu-profiler.h88
-rw-r--r--deps/v8/src/profiler/heap-profiler.cc24
-rw-r--r--deps/v8/src/profiler/heap-profiler.h22
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc338
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h29
-rw-r--r--deps/v8/src/profiler/profile-generator-inl.h4
-rw-r--r--deps/v8/src/profiler/profile-generator.cc283
-rw-r--r--deps/v8/src/profiler/profile-generator.h160
-rw-r--r--deps/v8/src/profiler/profiler-listener.cc335
-rw-r--r--deps/v8/src/profiler/profiler-listener.h97
-rw-r--r--deps/v8/src/profiler/sampler.cc898
-rw-r--r--deps/v8/src/profiler/sampler.h139
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.cc109
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.h50
-rw-r--r--deps/v8/src/profiler/strings-storage.cc22
-rw-r--r--deps/v8/src/profiler/strings-storage.h12
-rw-r--r--deps/v8/src/profiler/tick-sample.cc272
-rw-r--r--deps/v8/src/profiler/tick-sample.h27
-rw-r--r--deps/v8/src/property-descriptor.cc4
-rw-r--r--deps/v8/src/property-details.h28
-rw-r--r--deps/v8/src/prototype.h21
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc6
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h1
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc7
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h1
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc1
-rw-r--r--deps/v8/src/regexp/jsregexp.cc54
-rw-r--r--deps/v8/src/regexp/jsregexp.h29
-rw-r--r--deps/v8/src/regexp/ppc/OWNERS1
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc45
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h1
-rw-r--r--deps/v8/src/regexp/regexp-ast.h20
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.cc13
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.h4
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc401
-rw-r--r--deps/v8/src/regexp/regexp-parser.h38
-rw-r--r--deps/v8/src/regexp/s390/OWNERS1
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc54
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h1
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc1
-rw-r--r--deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc1
-rw-r--r--deps/v8/src/register-configuration.cc232
-rw-r--r--deps/v8/src/register-configuration.h90
-rw-r--r--deps/v8/src/runtime-profiler.cc172
-rw-r--r--deps/v8/src/runtime-profiler.h21
-rw-r--r--deps/v8/src/runtime/runtime-array.cc341
-rw-r--r--deps/v8/src/runtime/runtime-atomics.cc172
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc68
-rw-r--r--deps/v8/src/runtime/runtime-collections.cc56
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc157
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc837
-rw-r--r--deps/v8/src/runtime/runtime-error.cc24
-rw-r--r--deps/v8/src/runtime/runtime-forin.cc44
-rw-r--r--deps/v8/src/runtime/runtime-function.cc67
-rw-r--r--deps/v8/src/runtime/runtime-futex.cc58
-rw-r--r--deps/v8/src/runtime/runtime-generator.cc146
-rw-r--r--deps/v8/src/runtime/runtime-i18n.cc540
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc346
-rw-r--r--deps/v8/src/runtime/runtime-interpreter.cc27
-rw-r--r--deps/v8/src/runtime/runtime-json.cc58
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc37
-rw-r--r--deps/v8/src/runtime/runtime-liveedit.cc56
-rw-r--r--deps/v8/src/runtime/runtime-maths.cc137
-rw-r--r--deps/v8/src/runtime/runtime-numbers.cc100
-rw-r--r--deps/v8/src/runtime/runtime-object.cc489
-rw-r--r--deps/v8/src/runtime/runtime-observe.cc160
-rw-r--r--deps/v8/src/runtime/runtime-operators.cc65
-rw-r--r--deps/v8/src/runtime/runtime-proxy.cc26
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc48
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc557
-rw-r--r--deps/v8/src/runtime/runtime-simd.cc166
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc234
-rw-r--r--deps/v8/src/runtime/runtime-symbol.cc8
-rw-r--r--deps/v8/src/runtime/runtime-test.cc357
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc96
-rw-r--r--deps/v8/src/runtime/runtime-uri.cc293
-rw-r--r--deps/v8/src/runtime/runtime-utils.h115
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc120
-rw-r--r--deps/v8/src/runtime/runtime.h461
-rw-r--r--deps/v8/src/s390/OWNERS1
-rw-r--r--deps/v8/src/s390/assembler-s390-inl.h36
-rw-r--r--deps/v8/src/s390/assembler-s390.cc104
-rw-r--r--deps/v8/src/s390/assembler-s390.h48
-rw-r--r--deps/v8/src/s390/code-stubs-s390.cc765
-rw-r--r--deps/v8/src/s390/codegen-s390.cc153
-rw-r--r--deps/v8/src/s390/codegen-s390.h14
-rw-r--r--deps/v8/src/s390/constants-s390.cc261
-rw-r--r--deps/v8/src/s390/constants-s390.h38
-rw-r--r--deps/v8/src/s390/deoptimizer-s390.cc3
-rw-r--r--deps/v8/src/s390/disasm-s390.cc70
-rw-r--r--deps/v8/src/s390/interface-descriptors-s390.cc97
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.cc472
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.h123
-rw-r--r--deps/v8/src/s390/simulator-s390.cc7725
-rw-r--r--deps/v8/src/s390/simulator-s390.h754
-rw-r--r--deps/v8/src/signature.h6
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc251
-rw-r--r--deps/v8/src/snapshot/code-serializer.h102
-rw-r--r--deps/v8/src/snapshot/deserializer.cc74
-rw-r--r--deps/v8/src/snapshot/deserializer.h23
-rw-r--r--deps/v8/src/snapshot/mksnapshot.cc2
-rw-r--r--deps/v8/src/snapshot/natives-common.cc19
-rw-r--r--deps/v8/src/snapshot/natives.h1
-rw-r--r--deps/v8/src/snapshot/partial-serializer.cc55
-rw-r--r--deps/v8/src/snapshot/partial-serializer.h32
-rw-r--r--deps/v8/src/snapshot/serializer-common.cc18
-rw-r--r--deps/v8/src/snapshot/serializer-common.h117
-rw-r--r--deps/v8/src/snapshot/serializer.cc272
-rw-r--r--deps/v8/src/snapshot/serializer.h63
-rw-r--r--deps/v8/src/snapshot/snapshot-common.cc253
-rw-r--r--deps/v8/src/snapshot/snapshot-source-sink.h2
-rw-r--r--deps/v8/src/snapshot/snapshot.h125
-rw-r--r--deps/v8/src/snapshot/startup-serializer.cc38
-rw-r--r--deps/v8/src/snapshot/startup-serializer.h37
-rw-r--r--deps/v8/src/source-position-table.cc (renamed from deps/v8/src/interpreter/source-position-table.cc)151
-rw-r--r--deps/v8/src/source-position-table.h90
-rw-r--r--deps/v8/src/source-position.h5
-rw-r--r--deps/v8/src/startup-data-util.cc27
-rw-r--r--deps/v8/src/string-builder.h10
-rw-r--r--deps/v8/src/string-stream.cc26
-rw-r--r--deps/v8/src/string-stream.h5
-rw-r--r--deps/v8/src/third_party/fdlibm/README.v818
-rw-r--r--deps/v8/src/third_party/fdlibm/fdlibm.cc228
-rw-r--r--deps/v8/src/third_party/fdlibm/fdlibm.h27
-rw-r--r--deps/v8/src/third_party/fdlibm/fdlibm.js1117
-rw-r--r--deps/v8/src/third_party/vtune/v8vtune.gyp4
-rw-r--r--deps/v8/src/third_party/vtune/vtune-jit.cc27
-rw-r--r--deps/v8/src/tracing/trace-event.cc121
-rw-r--r--deps/v8/src/tracing/trace-event.h180
-rw-r--r--deps/v8/src/transitions-inl.h3
-rw-r--r--deps/v8/src/type-cache.h25
-rw-r--r--deps/v8/src/type-feedback-vector-inl.h69
-rw-r--r--deps/v8/src/type-feedback-vector.cc286
-rw-r--r--deps/v8/src/type-feedback-vector.h176
-rw-r--r--deps/v8/src/type-info.cc52
-rw-r--r--deps/v8/src/type-info.h15
-rw-r--r--deps/v8/src/types.cc22
-rw-r--r--deps/v8/src/types.h77
-rw-r--r--deps/v8/src/typing-asm.cc1622
-rw-r--r--deps/v8/src/typing-asm.h185
-rw-r--r--deps/v8/src/typing-reset.cc25
-rw-r--r--deps/v8/src/typing-reset.h26
-rw-r--r--deps/v8/src/unicode-inl.h6
-rw-r--r--deps/v8/src/unicode.cc14
-rw-r--r--deps/v8/src/unicode.h5
-rw-r--r--deps/v8/src/uri.cc505
-rw-r--r--deps/v8/src/uri.h54
-rw-r--r--deps/v8/src/utils.cc7
-rw-r--r--deps/v8/src/utils.h116
-rw-r--r--deps/v8/src/v8.cc7
-rw-r--r--deps/v8/src/v8.gyp2419
-rw-r--r--deps/v8/src/v8memory.h7
-rw-r--r--deps/v8/src/value-serializer.cc967
-rw-r--r--deps/v8/src/value-serializer.h181
-rw-r--r--deps/v8/src/vector.h7
-rw-r--r--deps/v8/src/vm-state-inl.h8
-rw-r--r--deps/v8/src/vm-state.h1
-rw-r--r--deps/v8/src/wasm/OWNERS6
-rw-r--r--deps/v8/src/wasm/ast-decoder.cc2134
-rw-r--r--deps/v8/src/wasm/ast-decoder.h219
-rw-r--r--deps/v8/src/wasm/decoder.h100
-rw-r--r--deps/v8/src/wasm/encoder.cc709
-rw-r--r--deps/v8/src/wasm/encoder.h215
-rw-r--r--deps/v8/src/wasm/leb-helper.h134
-rw-r--r--deps/v8/src/wasm/module-decoder.cc604
-rw-r--r--deps/v8/src/wasm/module-decoder.h7
-rw-r--r--deps/v8/src/wasm/switch-logic.cc63
-rw-r--r--deps/v8/src/wasm/switch-logic.h31
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc234
-rw-r--r--deps/v8/src/wasm/wasm-debug.h46
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc216
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.h192
-rw-r--r--deps/v8/src/wasm/wasm-function-name-table.cc71
-rw-r--r--deps/v8/src/wasm/wasm-function-name-table.h33
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc1813
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.h209
-rw-r--r--deps/v8/src/wasm/wasm-js.cc361
-rw-r--r--deps/v8/src/wasm/wasm-js.h2
-rw-r--r--deps/v8/src/wasm/wasm-macro-gen.h476
-rw-r--r--deps/v8/src/wasm/wasm-module.cc1810
-rw-r--r--deps/v8/src/wasm/wasm-module.h288
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc131
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h360
-rw-r--r--deps/v8/src/wasm/wasm-result.cc19
-rw-r--r--deps/v8/src/wasm/wasm-result.h40
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h48
-rw-r--r--deps/v8/src/x64/assembler-x64.cc490
-rw-r--r--deps/v8/src/x64/assembler-x64.h186
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc782
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h4
-rw-r--r--deps/v8/src/x64/codegen-x64.cc94
-rw-r--r--deps/v8/src/x64/codegen-x64.h15
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc3
-rw-r--r--deps/v8/src/x64/disasm-x64.cc176
-rw-r--r--deps/v8/src/x64/eh-frame-x64.cc63
-rw-r--r--deps/v8/src/x64/interface-descriptors-x64.cc116
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc328
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h88
-rw-r--r--deps/v8/src/x87/assembler-x87-inl.h41
-rw-r--r--deps/v8/src/x87/assembler-x87.cc118
-rw-r--r--deps/v8/src/x87/assembler-x87.h48
-rw-r--r--deps/v8/src/x87/code-stubs-x87.cc717
-rw-r--r--deps/v8/src/x87/code-stubs-x87.h4
-rw-r--r--deps/v8/src/x87/codegen-x87.cc18
-rw-r--r--deps/v8/src/x87/codegen-x87.h1
-rw-r--r--deps/v8/src/x87/deoptimizer-x87.cc4
-rw-r--r--deps/v8/src/x87/disasm-x87.cc91
-rw-r--r--deps/v8/src/x87/interface-descriptors-x87.cc109
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.cc238
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.h61
-rw-r--r--deps/v8/src/zone.h5
-rw-r--r--deps/v8/test/BUILD.gn194
-rw-r--r--deps/v8/test/benchmarks/benchmarks.gyp4
-rw-r--r--deps/v8/test/bot_default.gyp4
-rw-r--r--deps/v8/test/cctest/BUILD.gn173
-rw-r--r--deps/v8/test/cctest/OWNERS4
-rw-r--r--deps/v8/test/cctest/asmjs/OWNERS10
-rw-r--r--deps/v8/test/cctest/asmjs/test-asm-typer.cc2003
-rw-r--r--deps/v8/test/cctest/cctest.cc2
-rw-r--r--deps/v8/test/cctest/cctest.gyp523
-rw-r--r--deps/v8/test/cctest/cctest.status372
-rw-r--r--deps/v8/test/cctest/compiler/call-tester.h2
-rw-r--r--deps/v8/test/cctest/compiler/code-assembler-tester.h61
-rw-r--r--deps/v8/test/cctest/compiler/codegen-tester.h8
-rw-r--r--deps/v8/test/cctest/compiler/function-tester.h95
-rw-r--r--deps/v8/test/cctest/compiler/graph-builder-tester.h16
-rw-r--r--deps/v8/test/cctest/compiler/test-branch-combine.cc448
-rw-r--r--deps/v8/test/cctest/compiler/test-changes-lowering.cc290
-rw-r--r--deps/v8/test/cctest/compiler/test-code-assembler.cc (renamed from deps/v8/test/cctest/compiler/test-code-stub-assembler.cc)352
-rw-r--r--deps/v8/test/cctest/compiler/test-gap-resolver.cc108
-rw-r--r--deps/v8/test/cctest/compiler/test-js-context-specialization.cc87
-rw-r--r--deps/v8/test/cctest/compiler/test-js-typed-lowering.cc204
-rw-r--r--deps/v8/test/cctest/compiler/test-linkage.cc20
-rw-r--r--deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc8
-rw-r--r--deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc189
-rw-r--r--deps/v8/test/cctest/compiler/test-multiple-return.cc18
-rw-r--r--deps/v8/test/cctest/compiler/test-node.cc74
-rw-r--r--deps/v8/test/cctest/compiler/test-operator.cc6
-rw-r--r--deps/v8/test/cctest/compiler/test-osr.cc6
-rw-r--r--deps/v8/test/cctest/compiler/test-pipeline.cc44
-rw-r--r--deps/v8/test/cctest/compiler/test-representation-change.cc271
-rw-r--r--deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc36
-rw-r--r--deps/v8/test/cctest/compiler/test-run-calls-to-external-references.cc532
-rw-r--r--deps/v8/test/cctest/compiler/test-run-inlining.cc381
-rw-r--r--deps/v8/test/cctest/compiler/test-run-intrinsics.cc81
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jscalls.cc6
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jsops.cc1
-rw-r--r--deps/v8/test/cctest/compiler/test-run-load-store.cc1190
-rw-r--r--deps/v8/test/cctest/compiler/test-run-machops.cc815
-rw-r--r--deps/v8/test/cctest/compiler/test-run-native-calls.cc175
-rw-r--r--deps/v8/test/cctest/compiler/test-run-stubs.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-run-unwinding-info.cc58
-rw-r--r--deps/v8/test/cctest/compiler/test-run-wasm-machops.cc170
-rw-r--r--deps/v8/test/cctest/compiler/test-simplified-lowering.cc491
-rw-r--r--deps/v8/test/cctest/compiler/value-helper.h24
-rw-r--r--deps/v8/test/cctest/expression-type-collector.cc63
-rw-r--r--deps/v8/test/cctest/expression-type-collector.h39
-rw-r--r--deps/v8/test/cctest/heap/heap-tester.h1
-rw-r--r--deps/v8/test/cctest/heap/heap-utils.cc (renamed from deps/v8/test/cctest/heap/utils-inl.h)121
-rw-r--r--deps/v8/test/cctest/heap/heap-utils.h51
-rw-r--r--deps/v8/test/cctest/heap/test-alloc.cc15
-rw-r--r--deps/v8/test/cctest/heap/test-array-buffer-tracker.cc318
-rw-r--r--deps/v8/test/cctest/heap/test-compaction.cc76
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc1185
-rw-r--r--deps/v8/test/cctest/heap/test-incremental-marking.cc11
-rw-r--r--deps/v8/test/cctest/heap/test-lab.cc4
-rw-r--r--deps/v8/test/cctest/heap/test-mark-compact.cc129
-rw-r--r--deps/v8/test/cctest/heap/test-page-promotion.cc129
-rw-r--r--deps/v8/test/cctest/heap/test-spaces.cc159
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc81
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h11
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden110
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiteralsWide.golden1030
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden297
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/BasicBlockToBoolean.golden88
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden895
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden250
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden42
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden68
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden55
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden49
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden252
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden308
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden115
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Conditional.golden75
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden99
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden138
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden72
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden1143
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden242
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden108
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden92
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DeadCodeRemoval.golden38
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden119
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden89
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DeleteLookupSlotInEval.golden28
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DoDebugger.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DoExpression.golden55
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden56
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden266
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden1140
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden40
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden604
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden28
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden60
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden60
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/HeapNumberConstants.golden1060
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden854
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/IntegerConstants.golden26
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/JumpsRequiringConstantWideOperands.golden1302
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden96
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden138
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden678
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LogicalExpressions.golden1268
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden178
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotInEval.golden34
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotWideInEval.golden4130
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/NewTarget.golden37
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden418
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiteralsWide.golden1034
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden38
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Parameters.golden50
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveExpressions.golden202
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveReturnStatements.golden77
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden720
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden2022
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden3774
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden33
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiteralsWide.golden1030
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/RemoveRedundantLdar.golden88
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden1348
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StringConstants.golden26
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden604
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ThisFunction.golden40
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Throw.golden28
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden39
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden112
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden241
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Typeof.golden21
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden157
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden217
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden26
-rw-r--r--deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc22
-rw-r--r--deps/v8/test/cctest/interpreter/interpreter-tester.h2
-rw-r--r--deps/v8/test/cctest/interpreter/source-position-matcher.cc224
-rw-r--r--deps/v8/test/cctest/interpreter/source-position-matcher.h50
-rw-r--r--deps/v8/test/cctest/interpreter/test-bytecode-generator.cc846
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc201
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter.cc1413
-rw-r--r--deps/v8/test/cctest/interpreter/test-source-positions.cc250
-rw-r--r--deps/v8/test/cctest/libplatform/test-tracing.cc312
-rw-r--r--deps/v8/test/cctest/libsampler/test-sampler.cc140
-rw-r--r--deps/v8/test/cctest/profiler-extension.cc21
-rw-r--r--deps/v8/test/cctest/profiler-extension.h10
-rw-r--r--deps/v8/test/cctest/test-access-checks.cc305
-rw-r--r--deps/v8/test/cctest/test-accessors.cc10
-rw-r--r--deps/v8/test/cctest/test-api-fast-accessor-builder.cc159
-rw-r--r--deps/v8/test/cctest/test-api-interceptors.cc95
-rw-r--r--deps/v8/test/cctest/test-api.cc1276
-rw-r--r--deps/v8/test/cctest/test-api.h15
-rw-r--r--deps/v8/test/cctest/test-asm-validator.cc2514
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc303
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc495
-rw-r--r--deps/v8/test/cctest/test-assembler-mips.cc4
-rw-r--r--deps/v8/test/cctest/test-assembler-mips64.cc135
-rw-r--r--deps/v8/test/cctest/test-assembler-s390.cc2
-rw-r--r--deps/v8/test/cctest/test-assembler-x64.cc82
-rw-r--r--deps/v8/test/cctest/test-assembler-x87.cc41
-rw-r--r--deps/v8/test/cctest/test-ast-expression-visitor.cc423
-rw-r--r--deps/v8/test/cctest/test-ast.cc2
-rw-r--r--deps/v8/test/cctest/test-code-cache.cc77
-rw-r--r--deps/v8/test/cctest/test-code-layout.cc84
-rw-r--r--deps/v8/test/cctest/test-code-stub-assembler.cc1477
-rw-r--r--deps/v8/test/cctest/test-code-stubs-arm.cc10
-rw-r--r--deps/v8/test/cctest/test-code-stubs-arm64.cc10
-rw-r--r--deps/v8/test/cctest/test-code-stubs-ia32.cc10
-rw-r--r--deps/v8/test/cctest/test-code-stubs-mips.cc10
-rw-r--r--deps/v8/test/cctest/test-code-stubs-mips64.cc3
-rw-r--r--deps/v8/test/cctest/test-code-stubs-x64.cc4
-rw-r--r--deps/v8/test/cctest/test-code-stubs-x87.cc10
-rw-r--r--deps/v8/test/cctest/test-code-stubs.cc14
-rw-r--r--deps/v8/test/cctest/test-compiler.cc118
-rw-r--r--deps/v8/test/cctest/test-conversions.cc27
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc369
-rw-r--r--deps/v8/test/cctest/test-date.cc49
-rw-r--r--deps/v8/test/cctest/test-debug.cc266
-rw-r--r--deps/v8/test/cctest/test-decls.cc2
-rw-r--r--deps/v8/test/cctest/test-dictionary.cc86
-rw-r--r--deps/v8/test/cctest/test-disasm-arm.cc77
-rw-r--r--deps/v8/test/cctest/test-disasm-arm64.cc18
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc25
-rw-r--r--deps/v8/test/cctest/test-disasm-mips.cc30
-rw-r--r--deps/v8/test/cctest/test-disasm-mips64.cc93
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc124
-rw-r--r--deps/v8/test/cctest/test-disasm-x87.cc22
-rw-r--r--deps/v8/test/cctest/test-feedback-vector.cc138
-rw-r--r--deps/v8/test/cctest/test-field-type-tracking.cc108
-rw-r--r--deps/v8/test/cctest/test-func-name-inference.cc10
-rw-r--r--deps/v8/test/cctest/test-global-handles.cc52
-rw-r--r--deps/v8/test/cctest/test-hashmap.cc12
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc124
-rw-r--r--deps/v8/test/cctest/test-inobject-slack-tracking.cc17
-rw-r--r--deps/v8/test/cctest/test-lockers.cc14
-rw-r--r--deps/v8/test/cctest/test-log-stack-tracer.cc20
-rw-r--r--deps/v8/test/cctest/test-log.cc17
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips.cc745
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips64.cc1146
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x64.cc173
-rw-r--r--deps/v8/test/cctest/test-microtask-delivery.cc161
-rw-r--r--deps/v8/test/cctest/test-object-observe.cc1078
-rw-r--r--deps/v8/test/cctest/test-object.cc71
-rw-r--r--deps/v8/test/cctest/test-parsing.cc1576
-rw-r--r--deps/v8/test/cctest/test-platform.cc2
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc145
-rw-r--r--deps/v8/test/cctest/test-regexp.cc49
-rw-r--r--deps/v8/test/cctest/test-reloc-info.cc126
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-arm.cc58
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc64
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc67
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-x64.cc63
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-x87.cc67
-rw-r--r--deps/v8/test/cctest/test-serialize.cc416
-rw-r--r--deps/v8/test/cctest/test-slots-buffer.cc4
-rw-r--r--deps/v8/test/cctest/test-strings.cc3
-rw-r--r--deps/v8/test/cctest/test-thread-termination.cc28
-rw-r--r--deps/v8/test/cctest/test-trace-event.cc2
-rw-r--r--deps/v8/test/cctest/test-typing-reset.cc298
-rw-r--r--deps/v8/test/cctest/test-unboxed-doubles.cc24
-rw-r--r--deps/v8/test/cctest/test-usecounters.cc73
-rw-r--r--deps/v8/test/cctest/test-utils-arm64.cc6
-rw-r--r--deps/v8/test/cctest/test-utils-arm64.h6
-rw-r--r--deps/v8/test/cctest/test-weakmaps.cc25
-rw-r--r--deps/v8/test/cctest/test-weaksets.cc23
-rw-r--r--deps/v8/test/cctest/trace-extension.cc16
-rw-r--r--deps/v8/test/cctest/trace-extension.h5
-rw-r--r--deps/v8/test/cctest/types-fuzz.h2
-rw-r--r--deps/v8/test/cctest/wasm/OWNERS6
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-64.cc703
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc297
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc291
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-js.cc184
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-module.cc229
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc60
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm.cc2141
-rw-r--r--deps/v8/test/cctest/wasm/test-signatures.h8
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-function-name-table.cc120
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-stack.cc164
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-trap-position.cc139
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.h383
-rw-r--r--deps/v8/test/default.gyp4
-rw-r--r--deps/v8/test/fuzzer/fuzzer-support.cc18
-rw-r--r--deps/v8/test/fuzzer/fuzzer-support.h3
-rw-r--r--deps/v8/test/fuzzer/fuzzer.gyp28
-rw-r--r--deps/v8/test/fuzzer/fuzzer.isolate10
-rw-r--r--deps/v8/test/fuzzer/json.cc4
-rw-r--r--deps/v8/test/fuzzer/parser.cc3
-rw-r--r--deps/v8/test/fuzzer/regexp.cc8
-rw-r--r--deps/v8/test/fuzzer/testcfg.py2
-rw-r--r--deps/v8/test/fuzzer/wasm-asmjs.cc4
-rw-r--r--deps/v8/test/fuzzer/wasm.cc4
-rw-r--r--deps/v8/test/ignition.gyp27
-rw-r--r--deps/v8/test/inspector_protocol_parser_test/BUILD.gn6
-rw-r--r--deps/v8/test/inspector_protocol_parser_test/DEPS3
-rw-r--r--deps/v8/test/inspector_protocol_parser_test/RunTests.cpp17
-rw-r--r--deps/v8/test/inspector_protocol_parser_test/inspector_protocol_parser_test.gyp12
-rw-r--r--deps/v8/test/inspector_protocol_parser_test/inspector_protocol_parser_test.isolate (renamed from deps/v8/test/ignition.isolate)11
-rw-r--r--deps/v8/test/inspector_protocol_parser_test/inspector_protocol_parser_test.status6
-rw-r--r--deps/v8/test/intl/assert.js37
-rw-r--r--deps/v8/test/intl/date-format/calendar-with-multiple-type-subtags.js39
-rw-r--r--deps/v8/test/intl/date-format/parse-MMMdy.js16
-rw-r--r--deps/v8/test/intl/date-format/parse-invalid-input.js2
-rw-r--r--deps/v8/test/intl/date-format/parse-mdy.js2
-rw-r--r--deps/v8/test/intl/date-format/parse-mdyhms.js18
-rw-r--r--deps/v8/test/intl/extra-flag.js23
-rw-r--r--deps/v8/test/intl/general/case-mapping.js138
-rw-r--r--deps/v8/test/intl/general/getCanonicalLocales.js25
-rw-r--r--deps/v8/test/intl/intl.gyp4
-rw-r--r--deps/v8/test/intl/intl.status28
-rw-r--r--deps/v8/test/intl/no-extra-flag.js23
-rw-r--r--deps/v8/test/intl/number-format/format-is-bound.js4
-rw-r--r--deps/v8/test/intl/number-format/parse-currency.js2
-rw-r--r--deps/v8/test/intl/number-format/parse-decimal.js25
-rw-r--r--deps/v8/test/intl/number-format/parse-invalid-input.js2
-rw-r--r--deps/v8/test/intl/number-format/parse-percent.js18
-rw-r--r--deps/v8/test/intl/regress-4870.js8
-rw-r--r--deps/v8/test/intl/regress-5179.js15
-rw-r--r--deps/v8/test/intl/testcfg.py10
-rw-r--r--deps/v8/test/js-perf-test/Generators/generators.js131
-rw-r--r--deps/v8/test/js-perf-test/Generators/run.js26
-rw-r--r--deps/v8/test/js-perf-test/JSTests.json55
-rw-r--r--deps/v8/test/js-perf-test/PropertyQueries/PropertyQueries.json38
-rw-r--r--deps/v8/test/js-perf-test/PropertyQueries/property-queries.js274
-rw-r--r--deps/v8/test/js-perf-test/PropertyQueries/run.js23
-rw-r--r--deps/v8/test/memory/Memory.json4
-rw-r--r--deps/v8/test/message/const-decl-no-init-sloppy.js9
-rw-r--r--deps/v8/test/message/generators-throw1.js7
-rw-r--r--deps/v8/test/message/generators-throw1.out3
-rw-r--r--deps/v8/test/message/generators-throw2.js9
-rw-r--r--deps/v8/test/message/generators-throw2.out3
-rw-r--r--deps/v8/test/message/instanceof-noncallable.js2
-rw-r--r--deps/v8/test/message/instanceof-nonobject.js2
-rw-r--r--deps/v8/test/message/let-lexical-name-in-array-prohibited.js2
-rw-r--r--deps/v8/test/message/let-lexical-name-in-object-prohibited.js2
-rw-r--r--deps/v8/test/message/let-lexical-name-prohibited.js2
-rw-r--r--deps/v8/test/message/message.gyp4
-rw-r--r--deps/v8/test/message/message.status2
-rw-r--r--deps/v8/test/message/no-legacy-const-2.js8
-rw-r--r--deps/v8/test/message/no-legacy-const-2.out5
-rw-r--r--deps/v8/test/message/no-legacy-const-3.js8
-rw-r--r--deps/v8/test/message/no-legacy-const-3.out5
-rw-r--r--deps/v8/test/message/no-legacy-const.js8
-rw-r--r--deps/v8/test/message/no-legacy-const.out5
-rw-r--r--deps/v8/test/message/paren_in_arg_string.out1
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-binop-lhs.js14
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-binop-lhs.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-binop-rhs.js14
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-binop-rhs.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-comma.js14
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-comma.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-extends.js10
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-extends.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-for-in.js16
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-for-in.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-for-of.js16
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-for-of.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-logical-and.js14
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-logical-and.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-logical-or.js14
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-logical-or.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-subclass.js15
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-subclass.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-try-catch-finally.js20
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-try-catch-finally.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-try-try-catch-finally.js22
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-try-try-catch-finally.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-try.js17
-rw-r--r--deps/v8/test/message/syntactic-tail-call-in-try.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-inside-member-expr.js14
-rw-r--r--deps/v8/test/message/syntactic-tail-call-inside-member-expr.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-of-eval.js9
-rw-r--r--deps/v8/test/message/syntactic-tail-call-of-eval.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-of-identifier.js10
-rw-r--r--deps/v8/test/message/syntactic-tail-call-of-identifier.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-of-new.js13
-rw-r--r--deps/v8/test/message/syntactic-tail-call-of-new.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-sloppy.js9
-rw-r--r--deps/v8/test/message/syntactic-tail-call-sloppy.out4
-rw-r--r--deps/v8/test/message/syntactic-tail-call-without-return.js14
-rw-r--r--deps/v8/test/message/syntactic-tail-call-without-return.out4
-rw-r--r--deps/v8/test/message/testcfg.py8
-rw-r--r--deps/v8/test/mjsunit/arguments.js67
-rw-r--r--deps/v8/test/mjsunit/array-constructor-feedback.js21
-rw-r--r--deps/v8/test/mjsunit/array-feedback.js1
-rw-r--r--deps/v8/test/mjsunit/array-indexing-receiver.js632
-rw-r--r--deps/v8/test/mjsunit/array-literal-transitions.js1
-rw-r--r--deps/v8/test/mjsunit/array-push.js13
-rw-r--r--deps/v8/test/mjsunit/array-push7.js60
-rw-r--r--deps/v8/test/mjsunit/array-slice.js1
-rw-r--r--deps/v8/test/mjsunit/array-sort.js65
-rw-r--r--deps/v8/test/mjsunit/array-splice.js93
-rw-r--r--deps/v8/test/mjsunit/array-tostring.js24
-rw-r--r--deps/v8/test/mjsunit/asm-directive.js11
-rw-r--r--deps/v8/test/mjsunit/asm/asm-validation.js215
-rw-r--r--deps/v8/test/mjsunit/asm/construct-double.js33
-rw-r--r--deps/v8/test/mjsunit/asm/double-hi.js40
-rw-r--r--deps/v8/test/mjsunit/asm/double-lo.js40
-rw-r--r--deps/v8/test/mjsunit/asm/load-elimination.js26
-rw-r--r--deps/v8/test/mjsunit/call-counts.js46
-rw-r--r--deps/v8/test/mjsunit/callsite.js17
-rw-r--r--deps/v8/test/mjsunit/compiler/accessor-exceptions1.js21
-rw-r--r--deps/v8/test/mjsunit/compiler/accessor-exceptions2.js21
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-accessors1.js28
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-accessors2.js28
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-accessors3.js29
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-accessors4.js29
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-accessors5.js23
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-accessors6.js24
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-accessors7.js30
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-materialize-accumulator.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/dont-constant-fold-deopting-checks.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-610228.js)7
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-1.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-10.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-2.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-3.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-4.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-5.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-6.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-7.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-8.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-9.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-dead-jscreate.js14
-rw-r--r--deps/v8/test/mjsunit/compiler/inlined-array-pop-getter1.js18
-rw-r--r--deps/v8/test/mjsunit/compiler/inlined-array-pop-getter2.js23
-rw-r--r--deps/v8/test/mjsunit/compiler/inlined-array-pop-opt.js83
-rw-r--r--deps/v8/test/mjsunit/compiler/integral32-add-sub.js131
-rw-r--r--deps/v8/test/mjsunit/compiler/math-mul.js45
-rw-r--r--deps/v8/test/mjsunit/compiler/optimized-float32array-length.js13
-rw-r--r--deps/v8/test/mjsunit/compiler/optimized-float64array-length.js13
-rw-r--r--deps/v8/test/mjsunit/compiler/optimized-for-in.js103
-rw-r--r--deps/v8/test/mjsunit/compiler/optimized-instanceof-1.js17
-rw-r--r--deps/v8/test/mjsunit/compiler/optimized-instanceof-2.js19
-rw-r--r--deps/v8/test/mjsunit/compiler/optimized-int32array-length.js13
-rw-r--r--deps/v8/test/mjsunit/compiler/optimized-uint32array-length.js13
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-alignment.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-backedges1.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-block-scope-func.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-block-scope-id.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-block-scope.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-follow.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-for-let.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-forin-nested.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-forin.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-forof.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-function-id.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-function-id2.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-function.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-infinite.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-labeled.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-literals-adapted.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-literals.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-manual1.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-manual2.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-multiple.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-multiple2.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-multiple3.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-nested2.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-nested2b.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-nested3.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-nested3b.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-regex-id.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-sar.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-warm.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-while-let.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-5074.js18
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-5100.js51
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-5129.js15
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-5158.js16
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-5278.js13
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-607493.js37
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-621423.js21
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-625558.js14
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-628403.js27
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-628516.js13
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-628773.js21
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-630611.js16
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-633497.js29
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-loop-variable-if.js13
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-loop-variable-unsigned.js23
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-number-is-hole-nan.js14
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-store-holey-double-array.js43
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-string-to-number-add.js15
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-truncate-number-or-undefined-to-float64.js19
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-valueof.js35
-rw-r--r--deps/v8/test/mjsunit/compiler/try-osr.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/turbo-number-feedback.js102
-rw-r--r--deps/v8/test/mjsunit/cross-realm-filtering.js86
-rw-r--r--deps/v8/test/mjsunit/debug-allscopes-on-debugger.js7
-rw-r--r--deps/v8/test/mjsunit/debug-eval-scope.js144
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate-nested.js2
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate-shadowed-context-2.js41
-rw-r--r--deps/v8/test/mjsunit/debug-exceptions.js88
-rw-r--r--deps/v8/test/mjsunit/debug-function-scopes.js12
-rw-r--r--deps/v8/test/mjsunit/debug-generator-break-on-stack.js46
-rw-r--r--deps/v8/test/mjsunit/debug-generator-break.js44
-rw-r--r--deps/v8/test/mjsunit/debug-handle.js4
-rw-r--r--deps/v8/test/mjsunit/debug-liveedit-exceptions.js67
-rw-r--r--deps/v8/test/mjsunit/debug-liveedit-patch-positions.js129
-rw-r--r--deps/v8/test/mjsunit/debug-liveedit-stepin.js18
-rw-r--r--deps/v8/test/mjsunit/debug-scopes-suspended-generators.js470
-rw-r--r--deps/v8/test/mjsunit/debug-scopes.js9
-rw-r--r--deps/v8/test/mjsunit/debug-script.js5
-rw-r--r--deps/v8/test/mjsunit/debug-sourceinfo.js77
-rw-r--r--deps/v8/test/mjsunit/debug-sourceslice.js74
-rw-r--r--deps/v8/test/mjsunit/debug-stack-check-position.js30
-rw-r--r--deps/v8/test/mjsunit/delete.js14
-rw-r--r--deps/v8/test/mjsunit/dictionary-properties.js8
-rw-r--r--deps/v8/test/mjsunit/double-intrinsics.js36
-rw-r--r--deps/v8/test/mjsunit/eagerly-parsed-lazily-compiled-functions.js33
-rw-r--r--deps/v8/test/mjsunit/error-tostring-omit.js2
-rw-r--r--deps/v8/test/mjsunit/es6/array-concat-revoked-proxy-1.js19
-rw-r--r--deps/v8/test/mjsunit/es6/array-concat-revoked-proxy-2.js19
-rw-r--r--deps/v8/test/mjsunit/es6/array-concat.js32
-rw-r--r--deps/v8/test/mjsunit/es6/array-prototype-values.js1
-rw-r--r--deps/v8/test/mjsunit/es6/array-species-constructor-accessor.js (renamed from deps/v8/test/mjsunit/harmony/array-species-constructor-accessor.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/array-species-constructor-delete.js (renamed from deps/v8/test/mjsunit/harmony/array-species-constructor-delete.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/array-species-constructor.js (renamed from deps/v8/test/mjsunit/harmony/array-species-constructor.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/array-species-delete.js (renamed from deps/v8/test/mjsunit/harmony/array-species-delete.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/array-species-modified.js (renamed from deps/v8/test/mjsunit/harmony/array-species-modified.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/array-species-neg-zero.js23
-rw-r--r--deps/v8/test/mjsunit/es6/array-species-parent-constructor.js (renamed from deps/v8/test/mjsunit/harmony/array-species-parent-constructor.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/array-species-proto.js (renamed from deps/v8/test/mjsunit/harmony/array-species-proto.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/array-species.js (renamed from deps/v8/test/mjsunit/harmony/array-species.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/arraybuffer-species.js (renamed from deps/v8/test/mjsunit/harmony/arraybuffer-species.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/block-conflicts-sloppy.js (renamed from deps/v8/test/mjsunit/harmony/block-conflicts-sloppy.js)5
-rw-r--r--deps/v8/test/mjsunit/es6/block-conflicts.js3
-rw-r--r--deps/v8/test/mjsunit/es6/block-const-assign-sloppy.js (renamed from deps/v8/test/mjsunit/harmony/block-const-assign-sloppy.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/block-early-errors.js54
-rw-r--r--deps/v8/test/mjsunit/es6/block-eval-var-over-let.js (renamed from deps/v8/test/mjsunit/harmony/block-eval-var-over-let.js)36
-rw-r--r--deps/v8/test/mjsunit/es6/block-for-sloppy.js (renamed from deps/v8/test/mjsunit/harmony/block-for-sloppy.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/block-leave-sloppy.js (renamed from deps/v8/test/mjsunit/harmony/block-leave-sloppy.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/block-let-contextual-sloppy.js2
-rw-r--r--deps/v8/test/mjsunit/es6/block-let-crankshaft-sloppy.js (renamed from deps/v8/test/mjsunit/harmony/block-let-crankshaft-sloppy.js)1
-rw-r--r--deps/v8/test/mjsunit/es6/block-let-declaration-sloppy.js (renamed from deps/v8/test/mjsunit/harmony/block-let-declaration-sloppy.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/block-let-semantics-sloppy.js (renamed from deps/v8/test/mjsunit/harmony/block-let-semantics-sloppy.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/block-non-strict-errors.js42
-rw-r--r--deps/v8/test/mjsunit/es6/block-scope-class.js2
-rw-r--r--deps/v8/test/mjsunit/es6/block-scoping-sloppy.js (renamed from deps/v8/test/mjsunit/harmony/block-scoping-sloppy.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/block-scoping-top-level-sloppy.js (renamed from deps/v8/test/mjsunit/harmony/block-scoping-top-level-sloppy.js)1
-rw-r--r--deps/v8/test/mjsunit/es6/block-sloppy-function.js656
-rw-r--r--deps/v8/test/mjsunit/es6/catch-parameter-redeclaration.js112
-rw-r--r--deps/v8/test/mjsunit/es6/class-computed-property-names-super.js1
-rw-r--r--deps/v8/test/mjsunit/es6/class-property-name-eval-arguments.js2
-rw-r--r--deps/v8/test/mjsunit/es6/classes-derived-return-type.js2
-rw-r--r--deps/v8/test/mjsunit/es6/classes-subclass-builtins.js3
-rw-r--r--deps/v8/test/mjsunit/es6/classes.js3
-rw-r--r--deps/v8/test/mjsunit/es6/completion.js2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-blockscopes.js27
-rw-r--r--deps/v8/test/mjsunit/es6/debug-exception-generators.js49
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/async-task-event.js6
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/evaluate-across-microtasks.js66
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/events.js122
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reentry.js4
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reject-after-resolve.js4
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reject-caught-all.js4
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reject-caught-late.js4
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reject-caught-uncaught.js4
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-all.js4
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-uncaught.js4
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reject-with-invalid-reject.js4
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reject-with-throw-in-reject.js4
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/reject-with-undefined-reject.js4
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/resolve-after-aborted-try-finally.js32
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/resolve-after-try-catch.js29
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/rethrow-in-try-finally.js30
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/stepin-constructor.js12
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/throw-caught-all.js4
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/throw-caught-late.js4
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/throw-caught-uncaught.js4
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/throw-finally-caught-all.js72
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/throw-uncaught-all.js4
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/throw-uncaught-uncaught.js4
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/throw-with-throw-in-reject.js10
-rw-r--r--deps/v8/test/mjsunit/es6/debug-scope-default-param-with-eval.js61
-rw-r--r--deps/v8/test/mjsunit/es6/debug-step-into-regexp-subclass.js2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-stepin-microtasks.js35
-rw-r--r--deps/v8/test/mjsunit/es6/debug-stepin-proxies.js2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-stepnext-for.js23
-rw-r--r--deps/v8/test/mjsunit/es6/default-parameters.js21
-rw-r--r--deps/v8/test/mjsunit/es6/destructuring.js22
-rw-r--r--deps/v8/test/mjsunit/es6/for-each-in-catch.js194
-rw-r--r--deps/v8/test/mjsunit/es6/function-name.js (renamed from deps/v8/test/mjsunit/harmony/function-name.js)8
-rw-r--r--deps/v8/test/mjsunit/es6/generator-destructuring.js317
-rw-r--r--deps/v8/test/mjsunit/es6/generators-objects.js40
-rw-r--r--deps/v8/test/mjsunit/es6/instanceof.js (renamed from deps/v8/test/mjsunit/harmony/instanceof-es6.js)21
-rw-r--r--deps/v8/test/mjsunit/es6/iterator-close.js (renamed from deps/v8/test/mjsunit/harmony/iterator-close.js)80
-rw-r--r--deps/v8/test/mjsunit/es6/json.js2
-rw-r--r--deps/v8/test/mjsunit/es6/legacy-subclassing.js38
-rw-r--r--deps/v8/test/mjsunit/es6/math-log2-log10.js4
-rw-r--r--deps/v8/test/mjsunit/es6/math.js2
-rw-r--r--deps/v8/test/mjsunit/es6/microtask-delivery.js86
-rw-r--r--deps/v8/test/mjsunit/es6/mirror-collections.js46
-rw-r--r--deps/v8/test/mjsunit/es6/no-unicode-regexp-flag.js24
-rw-r--r--deps/v8/test/mjsunit/es6/object-tostring.js91
-rw-r--r--deps/v8/test/mjsunit/es6/pattern-brand-check.js2
-rw-r--r--deps/v8/test/mjsunit/es6/promise-internal-setter.js3
-rw-r--r--deps/v8/test/mjsunit/es6/promise-species.js (renamed from deps/v8/test/mjsunit/harmony/promise-species.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/promises.js458
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-for.js19
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-global-reference.js7
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-json.js59
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-keys.js102
-rw-r--r--deps/v8/test/mjsunit/es6/reflect-construct.js2
-rw-r--r--deps/v8/test/mjsunit/es6/reflect-define-property.js47
-rw-r--r--deps/v8/test/mjsunit/es6/reflect.js7
-rw-r--r--deps/v8/test/mjsunit/es6/regexp-constructor.js2
-rw-r--r--deps/v8/test/mjsunit/es6/regexp-flags.js2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-3750.js9
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-4482.js (renamed from deps/v8/test/mjsunit/harmony/regress/regress-4482.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-468661.js4
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-cr372788.js4
-rw-r--r--deps/v8/test/mjsunit/es6/species.js (renamed from deps/v8/test/mjsunit/harmony/species.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/spread-call-new-class.js2
-rw-r--r--deps/v8/test/mjsunit/es6/spread-call-super-property.js2
-rw-r--r--deps/v8/test/mjsunit/es6/string-match.js (renamed from deps/v8/test/mjsunit/harmony/string-match.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/string-replace.js (renamed from deps/v8/test/mjsunit/harmony/string-replace.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/string-search.js2
-rw-r--r--deps/v8/test/mjsunit/es6/string-split.js (renamed from deps/v8/test/mjsunit/harmony/string-split.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/super.js1
-rw-r--r--deps/v8/test/mjsunit/es6/symbols.js4
-rw-r--r--deps/v8/test/mjsunit/es6/tail-call-megatest.js29
-rw-r--r--deps/v8/test/mjsunit/es6/tail-call.js77
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-set-length-internal.js35
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-species.js (renamed from deps/v8/test/mjsunit/harmony/typedarray-species.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-tostring.js13
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray.js23
-rw-r--r--deps/v8/test/mjsunit/es6/unicode-character-ranges.js (renamed from deps/v8/test/mjsunit/harmony/unicode-character-ranges.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/unicode-escapes-in-regexps.js (renamed from deps/v8/test/mjsunit/harmony/unicode-escapes-in-regexps.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/unicode-regexp-backrefs.js (renamed from deps/v8/test/mjsunit/harmony/unicode-regexp-backrefs.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/unicode-regexp-ignore-case-noi18n.js (renamed from deps/v8/test/mjsunit/harmony/unicode-regexp-ignore-case-noi18n.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/unicode-regexp-ignore-case.js (renamed from deps/v8/test/mjsunit/harmony/unicode-regexp-ignore-case.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/unicode-regexp-last-index.js (renamed from deps/v8/test/mjsunit/harmony/unicode-regexp-last-index.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/unicode-regexp-restricted-syntax.js (renamed from deps/v8/test/mjsunit/harmony/unicode-regexp-restricted-syntax.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/unicode-regexp-unanchored-advance.js (renamed from deps/v8/test/mjsunit/harmony/unicode-regexp-unanchored-advance.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/unicode-regexp-zero-length.js (renamed from deps/v8/test/mjsunit/harmony/unicode-regexp-zero-length.js)2
-rw-r--r--deps/v8/test/mjsunit/es7/array-includes-receiver.js634
-rw-r--r--deps/v8/test/mjsunit/es7/array-includes.js5
-rw-r--r--deps/v8/test/mjsunit/es7/exponentiation-operator.js (renamed from deps/v8/test/mjsunit/harmony/exponentiation-operator.js)2
-rw-r--r--deps/v8/test/mjsunit/es7/object-observe-runtime.js19
-rw-r--r--deps/v8/test/mjsunit/es7/object-observe.js1865
-rw-r--r--deps/v8/test/mjsunit/es7/regress/regress-443982.js24
-rw-r--r--deps/v8/test/mjsunit/es7/regress/regress-633883.js9
-rw-r--r--deps/v8/test/mjsunit/es7/regress/regress-634269.js7
-rw-r--r--deps/v8/test/mjsunit/es7/regress/regress-634273.js14
-rw-r--r--deps/v8/test/mjsunit/es7/regress/regress-634357.js14
-rw-r--r--deps/v8/test/mjsunit/es8/syntactic-tail-call-parsing-sloppy.js410
-rw-r--r--deps/v8/test/mjsunit/es8/syntactic-tail-call-parsing.js393
-rw-r--r--deps/v8/test/mjsunit/es8/syntactic-tail-call-simple.js143
-rw-r--r--deps/v8/test/mjsunit/es8/syntactic-tail-call.js604
-rw-r--r--deps/v8/test/mjsunit/eval-origin.js39
-rw-r--r--deps/v8/test/mjsunit/fast-prototype.js10
-rw-r--r--deps/v8/test/mjsunit/for-in.js175
-rw-r--r--deps/v8/test/mjsunit/function-name-eval-shadowed.js5
-rw-r--r--deps/v8/test/mjsunit/global-arrow-delete-this.js18
-rw-r--r--deps/v8/test/mjsunit/harmony/array-concat-array-proto-getter.js53
-rw-r--r--deps/v8/test/mjsunit/harmony/array-concat-array-proto.js48
-rw-r--r--deps/v8/test/mjsunit/harmony/array-concat-object-proto-dict-getter.js57
-rw-r--r--deps/v8/test/mjsunit/harmony/array-concat-object-proto-dict.js53
-rw-r--r--deps/v8/test/mjsunit/harmony/array-concat-object-proto-generic-dict.js65
-rw-r--r--deps/v8/test/mjsunit/harmony/array-concat-object-proto.js48
-rw-r--r--deps/v8/test/mjsunit/harmony/async-arrow-lexical-arguments.js42
-rw-r--r--deps/v8/test/mjsunit/harmony/async-arrow-lexical-new.target.js43
-rw-r--r--deps/v8/test/mjsunit/harmony/async-arrow-lexical-super.js58
-rw-r--r--deps/v8/test/mjsunit/harmony/async-arrow-lexical-this.js48
-rw-r--r--deps/v8/test/mjsunit/harmony/async-await-basic.js378
-rw-r--r--deps/v8/test/mjsunit/harmony/async-await-no-constructor.js27
-rw-r--r--deps/v8/test/mjsunit/harmony/async-await-resolve-new.js9
-rw-r--r--deps/v8/test/mjsunit/harmony/async-await-species.js101
-rw-r--r--deps/v8/test/mjsunit/harmony/async-debug-basic.js40
-rw-r--r--deps/v8/test/mjsunit/harmony/async-debug-caught-exception.js89
-rw-r--r--deps/v8/test/mjsunit/harmony/async-debug-step-abort-at-break.js55
-rw-r--r--deps/v8/test/mjsunit/harmony/async-debug-step-continue-at-break.js55
-rw-r--r--deps/v8/test/mjsunit/harmony/async-debug-step-in-and-out.js51
-rw-r--r--deps/v8/test/mjsunit/harmony/async-debug-step-in-out-out.js51
-rw-r--r--deps/v8/test/mjsunit/harmony/async-debug-step-in.js51
-rw-r--r--deps/v8/test/mjsunit/harmony/async-debug-step-nested.js58
-rw-r--r--deps/v8/test/mjsunit/harmony/async-debug-step-next-constant.js39
-rw-r--r--deps/v8/test/mjsunit/harmony/async-debug-step-next.js51
-rw-r--r--deps/v8/test/mjsunit/harmony/async-debug-step-out.js49
-rw-r--r--deps/v8/test/mjsunit/harmony/async-destructuring.js515
-rw-r--r--deps/v8/test/mjsunit/harmony/async-function-debug-evaluate.js139
-rw-r--r--deps/v8/test/mjsunit/harmony/async-function-debug-scopes.js616
-rw-r--r--deps/v8/test/mjsunit/harmony/async-function-stacktrace.js178
-rw-r--r--deps/v8/test/mjsunit/harmony/atomics.js54
-rw-r--r--deps/v8/test/mjsunit/harmony/block-sloppy-function.js301
-rw-r--r--deps/v8/test/mjsunit/harmony/dataview-accessors.js54
-rw-r--r--deps/v8/test/mjsunit/harmony/debug-async-break-on-stack.js78
-rw-r--r--deps/v8/test/mjsunit/harmony/debug-async-break.js76
-rw-r--r--deps/v8/test/mjsunit/harmony/debug-async-function-async-task-event.js (renamed from deps/v8/test/mjsunit/es7/object-observe-debug-event.js)38
-rw-r--r--deps/v8/test/mjsunit/harmony/debug-async-liveedit.js133
-rw-r--r--deps/v8/test/mjsunit/harmony/do-expressions.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/for-in.js9
-rw-r--r--deps/v8/test/mjsunit/harmony/function-sent.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/futex.js193
-rw-r--r--deps/v8/test/mjsunit/harmony/generators-turbo.js667
-rw-r--r--deps/v8/test/mjsunit/harmony/generators.js587
-rw-r--r--deps/v8/test/mjsunit/harmony/harmony-string-pad-end.js13
-rw-r--r--deps/v8/test/mjsunit/harmony/harmony-string-pad-start.js13
-rw-r--r--deps/v8/test/mjsunit/harmony/mirror-async-function-promise.js93
-rw-r--r--deps/v8/test/mjsunit/harmony/mirror-async-function.js76
-rw-r--r--deps/v8/test/mjsunit/harmony/object-get-own-property-descriptors.js31
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-change-exec.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-named-captures.js76
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-binary.js25
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-blocks.js34
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-char-class.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-disabled.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-enumerated.js28
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-exact-match.js33
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-general-category.js34
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-lu-ui.js13
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-scripts.js46
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-special.js51
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-4904.js24
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-618603.js14
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-624300.js13
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-crbug-621111.js6
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-crbug-621496.js7
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-observe-empty-double-array.js39
-rw-r--r--deps/v8/test/mjsunit/harmony/sharedarraybuffer.js18
-rw-r--r--deps/v8/test/mjsunit/harmony/simd.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/sloppy-legacy-duplicate-generators.js60
-rw-r--r--deps/v8/test/mjsunit/harmony/sloppy-no-duplicate-async.js30
-rw-r--r--deps/v8/test/mjsunit/harmony/sloppy-no-duplicate-generators.js28
-rw-r--r--deps/v8/test/mjsunit/harmony/to-name.js13
-rw-r--r--deps/v8/test/mjsunit/harmony/to-primitive.js52
-rw-r--r--deps/v8/test/mjsunit/harmony/to-string.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/trailing-commas-length.js31
-rw-r--r--deps/v8/test/mjsunit/holy-double-no-arg-array.js14
-rw-r--r--deps/v8/test/mjsunit/ignition/debug-break-mixed-stack.js52
-rw-r--r--deps/v8/test/mjsunit/ignition/debug-step-mixed-stack.js53
-rw-r--r--deps/v8/test/mjsunit/ignition/elided-instruction-no-ignition.js37
-rw-r--r--deps/v8/test/mjsunit/ignition/elided-instruction.js12
-rw-r--r--deps/v8/test/mjsunit/ignition/ignition-statistics-extension.js62
-rw-r--r--deps/v8/test/mjsunit/ignition/osr-from-bytecode.js12
-rw-r--r--deps/v8/test/mjsunit/ignition/osr-from-generator.js65
-rw-r--r--deps/v8/test/mjsunit/ignition/regress-599001-verifyheap.js2
-rw-r--r--deps/v8/test/mjsunit/ignition/regress-612386-smi-to-double-transition.js29
-rw-r--r--deps/v8/test/mjsunit/ignition/regress-616064.js26
-rw-r--r--deps/v8/test/mjsunit/ignition/regress-629792-source-position-on-jump.js14
-rw-r--r--deps/v8/test/mjsunit/induction-variable-turbofan.js102
-rw-r--r--deps/v8/test/mjsunit/json-replacer-order.js1
-rw-r--r--deps/v8/test/mjsunit/json-stringify-holder.js104
-rw-r--r--deps/v8/test/mjsunit/json.js11
-rw-r--r--deps/v8/test/mjsunit/json2.js8
-rw-r--r--deps/v8/test/mjsunit/lithium/SeqStringSetChar.js46
-rw-r--r--deps/v8/test/mjsunit/messages.js9
-rw-r--r--deps/v8/test/mjsunit/migrations.js5
-rw-r--r--deps/v8/test/mjsunit/mirror-object.js32
-rw-r--r--deps/v8/test/mjsunit/mirror-regexp.js2
-rw-r--r--deps/v8/test/mjsunit/mirror-script.js13
-rw-r--r--deps/v8/test/mjsunit/mjsunit.gyp4
-rw-r--r--deps/v8/test/mjsunit/mjsunit.isolate3
-rw-r--r--deps/v8/test/mjsunit/mjsunit.js9
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status535
-rw-r--r--deps/v8/test/mjsunit/object-define-property.js47
-rw-r--r--deps/v8/test/mjsunit/object-literal.js2
-rw-r--r--deps/v8/test/mjsunit/osr-elements-kind.js2
-rw-r--r--deps/v8/test/mjsunit/realm-property-access.js20
-rw-r--r--deps/v8/test/mjsunit/regexp-compile.js4
-rw-r--r--deps/v8/test/mjsunit/regexp-lastIndex.js18
-rw-r--r--deps/v8/test/mjsunit/regexp-string-methods.js10
-rw-r--r--deps/v8/test/mjsunit/regress-604044.js7
-rw-r--r--deps/v8/test/mjsunit/regress-crbug-619476.js (renamed from deps/v8/test/mjsunit/harmony/modules.js)7
-rw-r--r--deps/v8/test/mjsunit/regress/redeclaration-error-types.js145
-rw-r--r--deps/v8/test/mjsunit/regress/regress-105.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1132.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1246.js82
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1403.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1980.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2618.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3229.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3315.js28
-rw-r--r--deps/v8/test/mjsunit/regress/regress-349870.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-353004.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-353551.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-356589.js36
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3926.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-403292.js55
-rw-r--r--deps/v8/test/mjsunit/regress/regress-417709a.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-417709b.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4659.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4665-2.js33
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4665.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4693.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4703.js30
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4815.js52
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4908.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4945.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4964.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4967.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4971.js41
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5004.js27
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5018.js29
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5036.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5071.js26
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5085.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5106.js29
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5164.js44
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5173.js51
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5174.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5178.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5181.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-520029.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5205.js37
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5207.js30
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5213.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5214.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5216.js (renamed from deps/v8/test/mjsunit/harmony/regexp-no-change-exec.js)5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5245.js24
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5252.js31
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5262.js25
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5275-1.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5275-2.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5279.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5286.js41
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5342.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-536751.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-542099.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-542100.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-544991.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-575364.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-585041.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-592352.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-599068-func-bindings.js45
-rw-r--r--deps/v8/test/mjsunit/regress/regress-599717.js26
-rw-r--r--deps/v8/test/mjsunit/regress/regress-599719.js25
-rw-r--r--deps/v8/test/mjsunit/regress/regress-599825.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-605470.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-606021.js32
-rw-r--r--deps/v8/test/mjsunit/regress/regress-608630.js71
-rw-r--r--deps/v8/test/mjsunit/regress/regress-610633.js40
-rw-r--r--deps/v8/test/mjsunit/regress/regress-612146.js33
-rw-r--r--deps/v8/test/mjsunit/regress/regress-612412.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-613928.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-615776.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-616386.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-617525.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-617526.js24
-rw-r--r--deps/v8/test/mjsunit/regress/regress-617529.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-617882.js29
-rw-r--r--deps/v8/test/mjsunit/regress/regress-618608.js1470
-rw-r--r--deps/v8/test/mjsunit/regress/regress-618657.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-619382.js35
-rw-r--r--deps/v8/test/mjsunit/regress/regress-620553.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-620750.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-621869.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-622663.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-625121.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-631050.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-632289.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-633998.js44
-rw-r--r--deps/v8/test/mjsunit/regress/regress-635429.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-638134.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-639270.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-674753.js76
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-119800.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-320922.js50
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-401915.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-451770.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-480807.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-492526.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-495493.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-498142.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-500497.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-515897.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-570651.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-571517.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-582048.js31
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-599067.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-600257.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-600995.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-602184.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-602595.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-603463.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-604299.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-605060.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-605581.js28
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-605862.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-608279.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-609029.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-609046.js36
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-610207.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-612109.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-612142.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-613494.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-613570.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-613905.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-613919.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-614292.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-614644.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-614727.js23
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-615774.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-616709-1.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-616709-2.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-617527.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-617567.js24
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-618788.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-618845.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-620119.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-620253.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-620650.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-621361.js40
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-621611.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-621816.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-624747.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-624919.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-625547.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-625590.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-626715.js28
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-627828.js40
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-627934.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-627935.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-628573.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-629062.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-629435.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-629823.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-629996.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-630559.js (renamed from deps/v8/test/mjsunit/regress/regress-449070.js)9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-630561.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-630923.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-630951.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-630952.js25
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631318-1.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631318-10.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631318-11.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631318-12.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631318-13.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631318-14.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631318-15.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631318-2.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631318-3.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631318-4.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631318-5.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631318-6.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631318-7.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631318-8.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631318-9.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-631917.js38
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-632800.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-633585.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-633884.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-633999.js40
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-635798.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-638551.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-640369.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-390925.js)13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-642056.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-644215.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-double-canonicalization.js24
-rw-r--r--deps/v8/test/mjsunit/regress/regress-object-assign-deprecated-2.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-object-assign-deprecated.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-observe-map-cache.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-put-prototype-transition.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-recurse-patch-binary-op.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex1.js59
-rw-r--r--deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex3.js44
-rw-r--r--deps/v8/test/mjsunit/regress/regress-string-from-char-code-tonumber.js26
-rw-r--r--deps/v8/test/mjsunit/regress/regress-typedarray-length.js32
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-5254-1.js27
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-5254-2.js27
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-5255-1.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-5255-2.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-5255-3.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-wasm-crbug-599413.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-wasm-crbug-618602.js15
-rw-r--r--deps/v8/test/mjsunit/regress/string-set-char-deopt.js85
-rw-r--r--deps/v8/test/mjsunit/stack-traces-overflow.js18
-rw-r--r--deps/v8/test/mjsunit/stack-traces.js92
-rw-r--r--deps/v8/test/mjsunit/string-natives.js79
-rw-r--r--deps/v8/test/mjsunit/string-wrapper.js62
-rw-r--r--deps/v8/test/mjsunit/substr.js19
-rw-r--r--deps/v8/test/mjsunit/tools/dumpcpp.js53
-rw-r--r--deps/v8/test/mjsunit/tools/tickprocessor-test-func-info.log6
-rw-r--r--deps/v8/test/mjsunit/tools/tickprocessor-test.log6
-rw-r--r--deps/v8/test/mjsunit/tools/tickprocessor.js32
-rw-r--r--deps/v8/test/mjsunit/unicode-test.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/OWNERS6
-rw-r--r--deps/v8/test/mjsunit/wasm/adapter-frame.js7
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-copy.js5
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-deopt.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-f32.js13
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-f64.js13
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-heap.js27
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-i32.js17
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-literals.js30
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-stdlib.js160
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-switch.js486
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-u32.js13
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm.js586
-rw-r--r--deps/v8/test/mjsunit/wasm/calls.js17
-rw-r--r--deps/v8/test/mjsunit/wasm/compiled-module-serialization.js80
-rw-r--r--deps/v8/test/mjsunit/wasm/debug-disassembly.js128
-rw-r--r--deps/v8/test/mjsunit/wasm/divrem-trap.js8
-rw-r--r--deps/v8/test/mjsunit/wasm/embenchen/box2d.js10
-rw-r--r--deps/v8/test/mjsunit/wasm/embenchen/copy.js10
-rw-r--r--deps/v8/test/mjsunit/wasm/embenchen/corrections.js10
-rw-r--r--deps/v8/test/mjsunit/wasm/embenchen/fannkuch.js10
-rw-r--r--deps/v8/test/mjsunit/wasm/embenchen/fasta.js10
-rw-r--r--deps/v8/test/mjsunit/wasm/embenchen/lua_binarytrees.js10
-rw-r--r--deps/v8/test/mjsunit/wasm/embenchen/memops.js10
-rw-r--r--deps/v8/test/mjsunit/wasm/embenchen/primes.js10
-rw-r--r--deps/v8/test/mjsunit/wasm/embenchen/zlib.js10
-rw-r--r--deps/v8/test/mjsunit/wasm/ensure-wasm-binaries-up-to-date.js16
-rw-r--r--deps/v8/test/mjsunit/wasm/export-table.js51
-rw-r--r--deps/v8/test/mjsunit/wasm/ffi-error.js28
-rw-r--r--deps/v8/test/mjsunit/wasm/ffi.js100
-rw-r--r--deps/v8/test/mjsunit/wasm/frame-inspection.js74
-rw-r--r--deps/v8/test/mjsunit/wasm/function-names.js69
-rw-r--r--deps/v8/test/mjsunit/wasm/function-prototype.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-frame.js33
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-memory.js119
-rw-r--r--deps/v8/test/mjsunit/wasm/import-table.js47
-rw-r--r--deps/v8/test/mjsunit/wasm/incrementer.wasmbin0 -> 66 bytes
-rw-r--r--deps/v8/test/mjsunit/wasm/indirect-calls.js17
-rw-r--r--deps/v8/test/mjsunit/wasm/instantiate-module-basic.js167
-rw-r--r--deps/v8/test/mjsunit/wasm/instantiate-run-basic.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/module-memory.js38
-rw-r--r--deps/v8/test/mjsunit/wasm/no-wasm-by-default.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/parallel_compilation.js100
-rw-r--r--deps/v8/test/mjsunit/wasm/params.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/receiver.js45
-rw-r--r--deps/v8/test/mjsunit/wasm/stack.js139
-rw-r--r--deps/v8/test/mjsunit/wasm/stackwalk.js9
-rw-r--r--deps/v8/test/mjsunit/wasm/start-function.js26
-rw-r--r--deps/v8/test/mjsunit/wasm/test-import-export-wrapper.js241
-rw-r--r--deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js48
-rw-r--r--deps/v8/test/mjsunit/wasm/trap-location.js78
-rw-r--r--deps/v8/test/mjsunit/wasm/unicode-validation.js121
-rw-r--r--deps/v8/test/mjsunit/wasm/unreachable.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/verify-function-simple.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-constants.js112
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-module-builder.js554
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-object-api.js7
-rw-r--r--deps/v8/test/mozilla/mozilla.gyp4
-rw-r--r--deps/v8/test/mozilla/mozilla.status7
-rw-r--r--deps/v8/test/mozilla/testcfg.py17
-rw-r--r--deps/v8/test/optimize_for_size.gyp4
-rw-r--r--deps/v8/test/perf.gyp4
-rw-r--r--deps/v8/test/preparser/preparser.gyp4
-rw-r--r--deps/v8/test/simdjs/simdjs.gyp4
-rw-r--r--deps/v8/test/simdjs/testcfg.py16
-rw-r--r--deps/v8/test/test262/BUILD.gn34
-rwxr-xr-xdeps/v8/test/test262/archive.py17
-rw-r--r--deps/v8/test/test262/detachArrayBuffer.js7
-rwxr-xr-xdeps/v8/test/test262/list.py3
-rw-r--r--deps/v8/test/test262/test262.gyp4
-rw-r--r--deps/v8/test/test262/test262.isolate1
-rw-r--r--deps/v8/test/test262/test262.status498
-rw-r--r--deps/v8/test/test262/testcfg.py39
-rw-r--r--deps/v8/test/unittests/BUILD.gn77
-rw-r--r--deps/v8/test/unittests/base/atomic-utils-unittest.cc (renamed from deps/v8/test/unittests/atomic-utils-unittest.cc)29
-rw-r--r--deps/v8/test/unittests/base/ieee754-unittest.cc405
-rw-r--r--deps/v8/test/unittests/base/platform/time-unittest.cc75
-rw-r--r--deps/v8/test/unittests/base/sys-info-unittest.cc9
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-job-unittest.cc156
-rw-r--r--deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc381
-rw-r--r--deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc1087
-rw-r--r--deps/v8/test/unittests/compiler/change-lowering-unittest.cc628
-rw-r--r--deps/v8/test/unittests/compiler/checkpoint-elimination-unittest.cc59
-rw-r--r--deps/v8/test/unittests/compiler/coalesced-live-ranges-unittest.cc268
-rw-r--r--deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc178
-rw-r--r--deps/v8/test/unittests/compiler/common-operator-unittest.cc58
-rw-r--r--deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc28
-rw-r--r--deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc14
-rw-r--r--deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc399
-rw-r--r--deps/v8/test/unittests/compiler/escape-analysis-unittest.cc23
-rw-r--r--deps/v8/test/unittests/compiler/graph-reducer-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.h3
-rw-r--r--deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc65
-rw-r--r--deps/v8/test/unittests/compiler/instruction-selector-unittest.cc26
-rw-r--r--deps/v8/test/unittests/compiler/instruction-selector-unittest.h7
-rw-r--r--deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc22
-rw-r--r--deps/v8/test/unittests/compiler/instruction-sequence-unittest.h6
-rw-r--r--deps/v8/test/unittests/compiler/int64-lowering-unittest.cc243
-rw-r--r--deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc1269
-rw-r--r--deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc86
-rw-r--r--deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc144
-rw-r--r--deps/v8/test/unittests/compiler/js-operator-unittest.cc16
-rw-r--r--deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc526
-rw-r--r--deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc89
-rw-r--r--deps/v8/test/unittests/compiler/load-elimination-unittest.cc227
-rw-r--r--deps/v8/test/unittests/compiler/loop-peeling-unittest.cc188
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc539
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-unittest.cc10
-rw-r--r--deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc37
-rw-r--r--deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc135
-rw-r--r--deps/v8/test/unittests/compiler/move-optimizer-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/node-matchers-unittest.cc53
-rw-r--r--deps/v8/test/unittests/compiler/node-properties-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc458
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h87
-rw-r--r--deps/v8/test/unittests/compiler/node-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/register-allocator-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/scheduler-rpo-unittest.cc30
-rw-r--r--deps/v8/test/unittests/compiler/scheduler-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc272
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-unittest.cc75
-rw-r--r--deps/v8/test/unittests/compiler/tail-call-optimization-unittest.cc57
-rw-r--r--deps/v8/test/unittests/compiler/typer-unittest.cc40
-rw-r--r--deps/v8/test/unittests/compiler/value-numbering-reducer-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc150
-rw-r--r--deps/v8/test/unittests/eh-frame-iterator-unittest.cc61
-rw-r--r--deps/v8/test/unittests/eh-frame-writer-unittest.cc464
-rw-r--r--deps/v8/test/unittests/heap/gc-tracer-unittest.cc190
-rw-r--r--deps/v8/test/unittests/heap/marking-unittest.cc160
-rw-r--r--deps/v8/test/unittests/heap/slot-set-unittest.cc14
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc511
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc119
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc254
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-dead-code-optimizer-unittest.cc149
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc87
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-peephole-optimizer-unittest.cc531
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-pipeline-unittest.cc185
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc219
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-utils.h37
-rw-r--r--deps/v8/test/unittests/interpreter/bytecodes-unittest.cc161
-rw-r--r--deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc229
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc376
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h2
-rw-r--r--deps/v8/test/unittests/libplatform/worker-thread-unittest.cc16
-rw-r--r--deps/v8/test/unittests/register-configuration-unittest.cc166
-rw-r--r--deps/v8/test/unittests/source-position-table-unittest.cc (renamed from deps/v8/test/unittests/interpreter/source-position-table-unittest.cc)50
-rw-r--r--deps/v8/test/unittests/test-utils.cc18
-rw-r--r--deps/v8/test/unittests/test-utils.h6
-rw-r--r--deps/v8/test/unittests/unittests.gyp296
-rw-r--r--deps/v8/test/unittests/unittests.status8
-rw-r--r--deps/v8/test/unittests/value-serializer-unittest.cc1368
-rw-r--r--deps/v8/test/unittests/wasm/OWNERS6
-rw-r--r--deps/v8/test/unittests/wasm/asm-types-unittest.cc723
-rw-r--r--deps/v8/test/unittests/wasm/ast-decoder-unittest.cc1400
-rw-r--r--deps/v8/test/unittests/wasm/control-transfer-unittest.cc402
-rw-r--r--deps/v8/test/unittests/wasm/decoder-unittest.cc39
-rw-r--r--deps/v8/test/unittests/wasm/encoder-unittest.cc196
-rw-r--r--deps/v8/test/unittests/wasm/leb-helper-unittest.cc191
-rw-r--r--deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc53
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc992
-rw-r--r--deps/v8/test/unittests/wasm/switch-logic-unittest.cc89
-rw-r--r--deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc94
-rw-r--r--deps/v8/test/webkit/class-constructor-return.js2
-rw-r--r--deps/v8/test/webkit/class-syntax-call.js2
-rw-r--r--deps/v8/test/webkit/class-syntax-declaration.js2
-rw-r--r--deps/v8/test/webkit/class-syntax-default-constructor.js2
-rw-r--r--deps/v8/test/webkit/class-syntax-expression.js2
-rw-r--r--deps/v8/test/webkit/class-syntax-extends-expected.txt22
-rw-r--r--deps/v8/test/webkit/class-syntax-extends.js14
-rw-r--r--deps/v8/test/webkit/class-syntax-name.js2
-rw-r--r--deps/v8/test/webkit/class-syntax-prototype.js2
-rw-r--r--deps/v8/test/webkit/class-syntax-scoping.js2
-rw-r--r--deps/v8/test/webkit/class-syntax-semicolon.js2
-rw-r--r--deps/v8/test/webkit/class-syntax-super.js2
-rw-r--r--deps/v8/test/webkit/dfg-redundant-load-of-captured-variable-proven-constant.js4
-rw-r--r--deps/v8/test/webkit/dfg-resolve-global-specific-dictionary.js2
-rw-r--r--deps/v8/test/webkit/exception-for-nonobject.js2
-rw-r--r--deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt112
-rw-r--r--deps/v8/test/webkit/fast/js/basic-strict-mode.js2
-rw-r--r--deps/v8/test/webkit/fast/js/deep-recursion-test.js2
-rw-r--r--deps/v8/test/webkit/fast/js/excessive-comma-usage.js2
-rw-r--r--deps/v8/test/webkit/fast/js/function-toString-semicolon-insertion-expected.txt10
-rw-r--r--deps/v8/test/webkit/fast/js/function-toString-semicolon-insertion.js10
-rw-r--r--deps/v8/test/webkit/fast/js/kde/lval-exceptions.js14
-rw-r--r--deps/v8/test/webkit/fast/js/toString-overrides-expected.txt4
-rw-r--r--deps/v8/test/webkit/fast/regex/lastIndex-expected.txt6
-rw-r--r--deps/v8/test/webkit/fast/regex/toString-expected.txt4
-rw-r--r--deps/v8/test/webkit/function-declaration-statement.js2
-rw-r--r--deps/v8/test/webkit/instance-of-immediates.js2
-rw-r--r--deps/v8/test/webkit/parser-xml-close-comment-expected.txt2
-rw-r--r--deps/v8/test/webkit/parser-xml-close-comment.js2
-rw-r--r--deps/v8/test/webkit/resources/JSON-stringify.js232
-rw-r--r--deps/v8/test/webkit/run-json-stringify-expected.txt4
-rw-r--r--deps/v8/test/webkit/testcfg.py6
-rw-r--r--deps/v8/test/webkit/toString-for-var-decl.js2
-rw-r--r--deps/v8/test/webkit/webkit.gyp4
-rw-r--r--deps/v8/test/webkit/webkit.status29
-rw-r--r--deps/v8/testing/gmock.gyp5
-rw-r--r--deps/v8/testing/gmock_custom/gmock/internal/custom/gmock-port.h29
-rw-r--r--deps/v8/testing/gtest.gyp41
-rw-r--r--deps/v8/testing/gtest/include/gtest/gtest_prod.h58
-rw-r--r--deps/v8/tools/BUILD.gn59
-rwxr-xr-xdeps/v8/tools/android-run.py5
-rw-r--r--deps/v8/tools/callstats.html1809
-rwxr-xr-xdeps/v8/tools/callstats.py638
-rw-r--r--deps/v8/tools/check-static-initializers.gyp4
-rw-r--r--deps/v8/tools/codemap.js8
-rw-r--r--deps/v8/tools/detect-builtins.js13
-rwxr-xr-xdeps/v8/tools/dev/v8gen.py244
-rw-r--r--deps/v8/tools/dump-cpp.py74
-rw-r--r--deps/v8/tools/dumpcpp-driver.js45
-rw-r--r--deps/v8/tools/dumpcpp.js58
-rwxr-xr-xdeps/v8/tools/eval_gc_nvp.py29
-rwxr-xr-xdeps/v8/tools/eval_gc_time.sh9
-rw-r--r--deps/v8/tools/gcmole/gcmole.lua34
-rw-r--r--deps/v8/tools/gcmole/run-gcmole.isolate2
-rw-r--r--deps/v8/tools/gcmole/run_gcmole.gyp4
-rw-r--r--deps/v8/tools/gdb-v8-support.py2
-rw-r--r--deps/v8/tools/gdbinit24
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py52
-rw-r--r--deps/v8/tools/gyp/v8.gyp2281
-rwxr-xr-xdeps/v8/tools/gyp_flag_compare.py280
-rw-r--r--deps/v8/tools/ic-explorer.html624
-rwxr-xr-xdeps/v8/tools/ignition/bytecode_dispatches_report.py281
-rw-r--r--deps/v8/tools/ignition/bytecode_dispatches_report_test.py62
-rwxr-xr-xdeps/v8/tools/ignition/linux_perf_bytecode_annotate.py174
-rw-r--r--deps/v8/tools/ignition/linux_perf_bytecode_annotate_test.py85
-rwxr-xr-xdeps/v8/tools/ignition/linux_perf_report.py223
-rw-r--r--deps/v8/tools/ignition/linux_perf_report_test.py147
-rw-r--r--deps/v8/tools/isolate_driver.py293
-rwxr-xr-xdeps/v8/tools/js2c.py10
-rw-r--r--deps/v8/tools/jsfunfuzz/jsfunfuzz.gyp4
-rw-r--r--deps/v8/tools/memory/asan/blacklist.txt4
-rw-r--r--deps/v8/tools/memory/tsan_v2/ignores.txt5
-rwxr-xr-xdeps/v8/tools/mingw-generate-makefiles.sh10
-rw-r--r--deps/v8/tools/msan/blacklist.txt4
-rwxr-xr-xdeps/v8/tools/nacl-run.py147
-rwxr-xr-xdeps/v8/tools/objdump-v883
-rw-r--r--deps/v8/tools/parser-shell.cc20
-rw-r--r--deps/v8/tools/parser-shell.gyp10
-rwxr-xr-xdeps/v8/tools/perf-to-html.py4
-rwxr-xr-xdeps/v8/tools/plot-timer-events5
-rwxr-xr-xdeps/v8/tools/presubmit.py3
-rwxr-xr-xdeps/v8/tools/release/auto_roll.py4
-rwxr-xr-xdeps/v8/tools/release/check_clusterfuzz.py9
-rwxr-xr-xdeps/v8/tools/release/create_release.py22
-rw-r--r--deps/v8/tools/release/git_recipes.py4
-rwxr-xr-xdeps/v8/tools/release/merge_to_branch.py108
-rwxr-xr-xdeps/v8/tools/release/roll_merge.py290
-rw-r--r--deps/v8/tools/release/test_scripts.py386
-rw-r--r--deps/v8/tools/run-deopt-fuzzer.gyp4
-rwxr-xr-xdeps/v8/tools/run-deopt-fuzzer.py7
-rwxr-xr-xdeps/v8/tools/run-perf.sh3
-rwxr-xr-xdeps/v8/tools/run-tests.py160
-rw-r--r--deps/v8/tools/run-valgrind.gyp4
-rwxr-xr-xdeps/v8/tools/run_perf.py237
-rw-r--r--deps/v8/tools/testrunner/local/commands.py4
-rw-r--r--deps/v8/tools/testrunner/local/execution.py5
-rw-r--r--deps/v8/tools/testrunner/local/progress.py21
-rw-r--r--deps/v8/tools/testrunner/local/statusfile.py132
-rwxr-xr-xdeps/v8/tools/testrunner/local/statusfile_unittest.py163
-rw-r--r--deps/v8/tools/testrunner/local/testsuite.py115
-rwxr-xr-xdeps/v8/tools/testrunner/local/testsuite_unittest.py98
-rw-r--r--deps/v8/tools/testrunner/local/utils.py21
-rw-r--r--deps/v8/tools/testrunner/local/variants.py32
-rw-r--r--deps/v8/tools/testrunner/network/network_execution.py1
-rw-r--r--deps/v8/tools/testrunner/objects/testcase.py9
-rw-r--r--deps/v8/tools/testrunner/server/main.py2
-rw-r--r--deps/v8/tools/testrunner/utils/dump_build_config.py26
-rw-r--r--deps/v8/tools/tickprocessor-driver.js3
-rw-r--r--deps/v8/tools/tickprocessor.js34
-rwxr-xr-xdeps/v8/tools/try_perf.py6
-rw-r--r--deps/v8/tools/turbolizer-perf.py56
-rw-r--r--deps/v8/tools/turbolizer/OWNERS1
-rw-r--r--deps/v8/tools/turbolizer/README.md62
-rw-r--r--deps/v8/tools/turbolizer/code-view.js172
-rw-r--r--deps/v8/tools/turbolizer/constants.js24
-rw-r--r--deps/v8/tools/turbolizer/disassembly-view.js245
-rw-r--r--deps/v8/tools/turbolizer/edge.js79
-rw-r--r--deps/v8/tools/turbolizer/empty-view.js19
-rw-r--r--deps/v8/tools/turbolizer/expand-all.jpgbin0 -> 2839 bytes
-rw-r--r--deps/v8/tools/turbolizer/graph-layout.js493
-rw-r--r--deps/v8/tools/turbolizer/graph-view.js1033
-rw-r--r--deps/v8/tools/turbolizer/hide-selected.pngbin0 -> 3681 bytes
-rw-r--r--deps/v8/tools/turbolizer/hide-unselected.pngbin0 -> 3701 bytes
-rw-r--r--deps/v8/tools/turbolizer/index.html99
-rw-r--r--deps/v8/tools/turbolizer/lang-disassembly.js14
-rw-r--r--deps/v8/tools/turbolizer/layout-icon.pngbin0 -> 4577 bytes
-rw-r--r--deps/v8/tools/turbolizer/left-arrow.pngbin0 -> 4175 bytes
-rw-r--r--deps/v8/tools/turbolizer/live.pngbin0 -> 3730 bytes
-rw-r--r--deps/v8/tools/turbolizer/monkey.js26
-rw-r--r--deps/v8/tools/turbolizer/node.js147
-rw-r--r--deps/v8/tools/turbolizer/right-arrow.pngbin0 -> 2774 bytes
-rw-r--r--deps/v8/tools/turbolizer/schedule-view.js128
-rw-r--r--deps/v8/tools/turbolizer/search.pngbin0 -> 3751 bytes
-rw-r--r--deps/v8/tools/turbolizer/search2.pngbin0 -> 689 bytes
-rw-r--r--deps/v8/tools/turbolizer/selection-broker.js99
-rw-r--r--deps/v8/tools/turbolizer/selection.js108
-rw-r--r--deps/v8/tools/turbolizer/text-view.js296
-rw-r--r--deps/v8/tools/turbolizer/turbo-visualizer.css348
-rw-r--r--deps/v8/tools/turbolizer/turbo-visualizer.js257
-rw-r--r--deps/v8/tools/turbolizer/types.pngbin0 -> 753 bytes
-rw-r--r--deps/v8/tools/turbolizer/upload-icon.pngbin0 -> 4747 bytes
-rw-r--r--deps/v8/tools/turbolizer/util.js80
-rw-r--r--deps/v8/tools/turbolizer/view.js45
-rw-r--r--deps/v8/tools/v8heapconst.py397
-rwxr-xr-xdeps/v8/tools/verify_source_deps.py2
-rw-r--r--deps/v8/tools/whitespace.txt3
2386 files changed, 260073 insertions, 148096 deletions
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index 805f349a6e..ac9b0bd9ac 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -40,15 +40,13 @@ gcsuspects
shell
shell_g
/_*
-/build/Debug
-/build/gyp
-/build/ipch
-/build/Release
-/build/win_toolchain.json
+/build
+/gypfiles/win_toolchain.json
/buildtools
/hydrogen.cfg
/obj
/out
+/out.gn
/perf.data
/perf.data.old
/test/benchmarks/data
@@ -59,20 +57,35 @@ shell_g
/test/simdjs/data
/test/test262/data
/test/test262/data.tar
+/test/test262/harness
/testing/gmock
-/testing/gtest
+/testing/gtest/*
+!/testing/gtest/include
+/testing/gtest/include/*
+!/testing/gtest/include/gtest
+/testing/gtest/include/gtest/*
+!/testing/gtest/include/gtest/gtest_prod.h
/third_party
+/third_party/android_tools
+/third_party/cygwin
/third_party/icu
+/third_party/instrumented_libraries
+/third_party/inspector_protocol
+/third_party/jinga2
/third_party/llvm
/third_party/llvm-build
+/third_party/markupsafe
+/third_party/WebKit
/tools/clang
/tools/gcmole/gcmole-tools
/tools/gcmole/gcmole-tools.tar.gz
+/tools/gyp
/tools/jsfunfuzz/jsfunfuzz
/tools/jsfunfuzz/jsfunfuzz.tar.gz
/tools/luci-go/linux64/isolate
/tools/luci-go/mac64/isolate
/tools/luci-go/win64/isolate.exe
+/tools/mb
/tools/oom_dump/oom_dump
/tools/oom_dump/oom_dump.o
/tools/swarming_client
@@ -86,7 +99,9 @@ GTAGS
GRTAGS
GSYMS
GPATH
+tags
gtags.files
turbo*.cfg
turbo*.dot
turbo*.json
+v8.ignition_dispatches_table.json
diff --git a/deps/v8/.gn b/deps/v8/.gn
new file mode 100644
index 0000000000..a1c0ff8dce
--- /dev/null
+++ b/deps/v8/.gn
@@ -0,0 +1,51 @@
+# This file is used by the GN meta build system to find the root of the source
+# tree and to set startup options. For documentation on the values set in this
+# file, run "gn help dotfile" at the command line.
+
+# The location of the build configuration file.
+buildconfig = "//build/config/BUILDCONFIG.gn"
+
+# The secondary source root is a parallel directory tree where
+# GN build files are placed when they can not be placed directly
+# in the source tree, e.g. for third party source trees.
+secondary_source = "//build/secondary/"
+
+# These are the targets to check headers for by default. The files in targets
+# matching these patterns (see "gn help label_pattern" for format) will have
+# their includes checked for proper dependencies when you run either
+# "gn check" or "gn gen --check".
+check_targets = [
+]
+
+# These are the list of GN files that run exec_script. This whitelist exists
+# to force additional review for new uses of exec_script, which is strongly
+# discouraged except for gypi_to_gn calls.
+exec_script_whitelist = [
+ "//build/config/android/BUILD.gn",
+ "//build/config/android/config.gni",
+ "//build/config/android/internal_rules.gni",
+ "//build/config/android/rules.gni",
+ "//build/config/BUILD.gn",
+ "//build/config/compiler/BUILD.gn",
+ "//build/config/gcc/gcc_version.gni",
+ "//build/config/ios/ios_sdk.gni",
+ "//build/config/linux/atk/BUILD.gn",
+ "//build/config/linux/BUILD.gn",
+ "//build/config/linux/pkg_config.gni",
+ "//build/config/mac/mac_sdk.gni",
+ "//build/config/posix/BUILD.gn",
+ "//build/config/sysroot.gni",
+ "//build/config/win/BUILD.gn",
+ "//build/config/win/visual_studio_version.gni",
+ "//build/gn_helpers.py",
+ "//build/gypi_to_gn.py",
+ "//build/toolchain/concurrent_links.gni",
+ "//build/toolchain/gcc_toolchain.gni",
+ "//build/toolchain/mac/BUILD.gn",
+ "//build/toolchain/win/BUILD.gn",
+ "//build/util/branding.gni",
+ "//build/util/version.gni",
+ "//test/cctest/BUILD.gn",
+ "//test/test262/BUILD.gn",
+ "//test/unittests/BUILD.gn",
+]
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index 60b6c51db5..d70be77d86 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -40,6 +40,7 @@ Alexis Campailla <alexis@janeasystems.com>
Andreas Anyuru <andreas.anyuru@gmail.com>
Andrew Paprocki <andrew@ishiboo.com>
Andrei Kashcha <anvaka@gmail.com>
+Anna Henningsen <addaleax@gmail.com>
Bangfu Tao <bangfu.tao@samsung.com>
Ben Noordhuis <info@bnoordhuis.nl>
Benjamin Tan <demoneaux@gmail.com>
@@ -50,7 +51,9 @@ Craig Schlenter <craig.schlenter@gmail.com>
Chris Nardi <hichris123@gmail.com>
Christopher A. Taylor <chris@gameclosure.com>
Daniel Andersson <kodandersson@gmail.com>
+Daniel Bevenius <daniel.bevenius@gmail.com>
Daniel James <dnljms@gmail.com>
+Deon Dior <diaoyuanjie@gmail.com>
Douglas Crosher <dtc-v8@scieneer.com>
Dusan Milosavljevic <dusan.m.milosavljevic@gmail.com>
Erich Ocean <erich.ocean@me.com>
@@ -62,6 +65,7 @@ Franziska Hinkelmann <franziska.hinkelmann@gmail.com>
Geoffrey Garside <ggarside@gmail.com>
Han Choongwoo <cwhan.tunz@gmail.com>
Hirofumi Mako <mkhrfm@gmail.com>
+Honggyu Kim <honggyu.kp@gmail.com>
Ioseb Dzmanashvili <ioseb.dzmanashvili@gmail.com>
Isiah Meadows <impinball@gmail.com>
Jan de Mooij <jandemooij@gmail.com>
@@ -85,11 +89,13 @@ Matthew Sporleder <msporleder@gmail.com>
Maxim Mossienko <maxim.mossienko@gmail.com>
Michael Lutz <michi@icosahedron.de>
Michael Smith <mike@w3.org>
+Michaël Zasso <mic.besace@gmail.com>
Mike Gilbert <floppymaster@gmail.com>
Mike Pennisi <mike@mikepennisi.com>
Milton Chiang <milton.chiang@mediatek.com>
Myeong-bo Shim <m0609.shim@samsung.com>
Nicolas Antonius Ernst Leopold Maria Kaiser <nikai@nikai.net>
+Oleksandr Chekhovskyi <oleksandr.chekhovskyi@gmail.com>
Paolo Giarrusso <p.giarrusso@gmail.com>
Patrick Gansterer <paroga@paroga.com>
Peter Varga <pvarga@inf.u-szeged.hu>
@@ -113,4 +119,4 @@ Vladimir Shutoff <vovan@shutoff.ru>
Yu Yin <xwafish@gmail.com>
Zac Hansen <xaxxon@gmail.com>
Zhongping Wang <kewpie.w.zp@gmail.com>
-柳荣一 <admin@web-tinker.com> \ No newline at end of file
+柳荣一 <admin@web-tinker.com>
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index 28aca24afc..dcefe3706b 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -4,6 +4,7 @@
import("//build/config/android/config.gni")
import("//build/config/arm.gni")
+import("//build/config/dcheck_always_on.gni")
import("//build/config/mips.gni")
import("//build/config/sanitizers/sanitizers.gni")
@@ -11,16 +12,55 @@ if (is_android) {
import("//build/config/android/rules.gni")
}
-# Because standalone V8 builds are not supported, assume this is part of a
-# Chromium build.
+import("gni/v8.gni")
+import("gni/isolate.gni")
import("//build_overrides/v8.gni")
import("snapshot_toolchain.gni")
declare_args() {
- # Enable the snapshot feature, for fast context creation.
- # http://v8project.blogspot.com/2015/09/custom-startup-snapshots.html
- v8_use_snapshot = true
+ # Print to stdout on Android.
+ v8_android_log_stdout = false
+
+ # Sets -DVERIFY_HEAP.
+ v8_enable_verify_heap = false
+
+ # Enable compiler warnings when using V8_DEPRECATED apis.
+ v8_deprecation_warnings = false
+
+ # Enable compiler warnings when using V8_DEPRECATE_SOON apis.
+ v8_imminent_deprecation_warnings = ""
+
+ # Embeds the given script into the snapshot.
+ v8_embed_script = ""
+
+ # Sets -dENABLE_DISASSEMBLER.
+ v8_enable_disassembler = ""
+
+ # Sets -dENABLE_GDB_JIT_INTERFACE.
+ v8_enable_gdbjit = ""
+
+ # Sets -dENABLE_HANDLE_ZAPPING.
+ v8_enable_handle_zapping = is_debug
+
+ # Enable ECMAScript Internationalization API. Enabling this feature will
+ # add a dependency on the ICU library.
+ v8_enable_i18n_support = true
+
+ # Enable slow dchecks.
+ v8_enable_slow_dchecks = false
+
+ # Interpreted regexp engine exists as platform-independent alternative
+ # based where the regular expression is compiled to a bytecode.
+ v8_interpreted_regexp = false
+
+ # Sets -dOBJECT_PRINT.
+ v8_object_print = ""
+
+ # With post mortem support enabled, metadata is embedded into libv8 that
+ # describes various parameters of the VM for use by debuggers. See
+ # tools/gen-postmortem-metadata.py for details.
+ v8_postmortem_support = false
# Similar to vfp but on MIPS.
v8_can_use_fpu_instructions = true
@@ -29,38 +69,35 @@ declare_args() {
v8_use_mips_abi_hardfloat = true
}
-# TODO(jochen): These will need to be user-settable to support standalone V8
-# builds.
-v8_deprecation_warnings = false
-v8_enable_disassembler = false
-v8_enable_gdbjit = false
-v8_enable_handle_zapping = false
-v8_enable_i18n_support = true
-v8_enable_verify_heap = false
-v8_interpreted_regexp = false
-v8_object_print = false
-v8_postmortem_support = false
-v8_random_seed = "314159265"
-v8_toolset_for_d8 = "host"
-
-if (is_msan) {
- # Running the V8-generated code on an ARM simulator is a powerful hack that
- # allows the tool to see the memory accesses from JITted code. Without this
- # flag, JS code causes false positive reports from MSan.
- v8_target_arch = "arm64"
-} else {
- v8_target_arch = target_cpu
+# Set project-specific defaults for some args if not provided in args.gn. The
+# defaults can be set in the respective build_overrides files.
+if (v8_imminent_deprecation_warnings == "") {
+ if (defined(v8_imminent_deprecation_warnings_default)) {
+ v8_imminent_deprecation_warnings = v8_imminent_deprecation_warnings_default
+ } else {
+ v8_imminent_deprecation_warnings = false
+ }
+}
+if (v8_enable_gdbjit == "") {
+ if (defined(v8_enable_gdbjit_default)) {
+ v8_enable_gdbjit = v8_enable_gdbjit_default
+ } else {
+ v8_enable_gdbjit = false
+ }
}
-if (v8_use_snapshot && v8_use_external_startup_data) {
- snapshot_target = ":v8_external_snapshot"
-} else if (v8_use_snapshot) {
- snapshot_target = ":v8_snapshot"
-} else {
- assert(!v8_use_external_startup_data)
- snapshot_target = ":v8_nosnapshot"
+# Derived defaults.
+if (v8_object_print == "") {
+ v8_object_print = is_debug && !v8_optimized_debug
+}
+if (v8_enable_disassembler == "") {
+ v8_enable_disassembler = is_debug && !v8_optimized_debug
}
+v8_generated_peephole_source = "$target_gen_dir/bytecode-peephole-table.cc"
+v8_random_seed = "314159265"
+v8_toolset_for_shell = "host"
+
###############################################################################
# Configurations
#
@@ -88,6 +125,11 @@ config("libplatform_config") {
include_dirs = [ "include" ]
}
+# This config should be applied to code using the libsampler.
+config("libsampler_config") {
+ include_dirs = [ "include" ]
+}
+
# This config should only be applied to code using V8 and not any V8 code
# itself.
config("external_config") {
@@ -98,6 +140,10 @@ config("external_config") {
]
}
include_dirs = [ "include" ]
+ libs = []
+ if (is_android && current_toolchain != host_toolchain) {
+ libs += [ "log" ]
+ }
}
# This config should only be applied to code that needs to be explicitly
@@ -113,31 +159,34 @@ config("features") {
defines = []
- if (v8_enable_disassembler == true) {
+ if (v8_enable_disassembler) {
defines += [ "ENABLE_DISASSEMBLER" ]
}
- if (v8_enable_gdbjit == true) {
+ if (v8_enable_gdbjit) {
defines += [ "ENABLE_GDB_JIT_INTERFACE" ]
}
- if (v8_object_print == true) {
+ if (v8_object_print) {
defines += [ "OBJECT_PRINT" ]
}
- if (v8_enable_verify_heap == true) {
+ if (v8_enable_verify_heap) {
defines += [ "VERIFY_HEAP" ]
}
- if (v8_interpreted_regexp == true) {
+ if (v8_interpreted_regexp) {
defines += [ "V8_INTERPRETED_REGEXP" ]
}
- if (v8_deprecation_warnings == true) {
+ if (v8_deprecation_warnings) {
defines += [ "V8_DEPRECATION_WARNINGS" ]
}
- if (v8_enable_i18n_support == true) {
+ if (v8_imminent_deprecation_warnings) {
+ defines += [ "V8_IMMINENT_DEPRECATION_WARNINGS" ]
+ }
+ if (v8_enable_i18n_support) {
defines += [ "V8_I18N_SUPPORT" ]
}
- if (v8_enable_handle_zapping == true) {
+ if (v8_enable_handle_zapping) {
defines += [ "ENABLE_HANDLE_ZAPPING" ]
}
- if (v8_use_external_startup_data == true) {
+ if (v8_use_external_startup_data) {
defines += [ "V8_USE_EXTERNAL_STARTUP_DATA" ]
}
}
@@ -147,8 +196,9 @@ config("toolchain") {
defines = []
cflags = []
+ ldflags = []
- if (v8_target_arch == "arm") {
+ if (v8_current_cpu == "arm") {
defines += [ "V8_TARGET_ARCH_ARM" ]
if (arm_version == 7) {
defines += [ "CAN_USE_ARMV7_INSTRUCTIONS" ]
@@ -167,6 +217,7 @@ config("toolchain") {
"CAN_USE_NEON",
]
}
+
# TODO(jochen): Add support for arm_test_noprobe.
if (current_cpu != "arm") {
@@ -178,11 +229,12 @@ config("toolchain") {
}
}
}
- if (v8_target_arch == "arm64") {
+ if (v8_current_cpu == "arm64") {
defines += [ "V8_TARGET_ARCH_ARM64" ]
}
+
# TODO(jochen): Add support for mips.
- if (v8_target_arch == "mipsel") {
+ if (v8_current_cpu == "mipsel") {
defines += [ "V8_TARGET_ARCH_MIPS" ]
if (v8_can_use_fpu_instructions) {
defines += [ "CAN_USE_FPU_INSTRUCTIONS" ]
@@ -212,14 +264,17 @@ config("toolchain") {
} else if (mips_arch_variant == "r1") {
defines += [ "FPU_MODE_FP32" ]
}
+
# TODO(jochen): Add support for mips_arch_variant rx and loongson.
}
+
# TODO(jochen): Add support for mips64.
- if (v8_target_arch == "mips64el") {
+ if (v8_current_cpu == "mips64el") {
defines += [ "V8_TARGET_ARCH_MIPS64" ]
if (v8_can_use_fpu_instructions) {
defines += [ "CAN_USE_FPU_INSTRUCTIONS" ]
}
+
# TODO(jochen): Add support for big endian host byteorder.
defines += [ "V8_TARGET_ARCH_MIPS64_LE" ]
if (v8_use_mips_abi_hardfloat) {
@@ -236,30 +291,43 @@ config("toolchain") {
defines += [ "_MIPS_ARCH_MIPS64R2" ]
}
}
- if (v8_target_arch == "s390") {
+ if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
defines += [ "V8_TARGET_ARCH_S390" ]
- }
- if (v8_target_arch == "s390x") {
- defines += [
- "V8_TARGET_ARCH_S390",
- "V8_TARGET_ARCH_S390X",
- ]
+ if (v8_current_cpu == "s390x") {
+ defines += [ "V8_TARGET_ARCH_S390X" ]
+ }
+ if (host_cpu == "x64" || host_cpu == "x86") {
+ defines += [ "V8_TARGET_ARCH_S390_LE_SIM" ]
+ }
}
- if (v8_target_arch == "x86") {
+ if (v8_current_cpu == "x86") {
defines += [ "V8_TARGET_ARCH_IA32" ]
+ if (is_win) {
+ # Ensure no surprising artifacts from 80bit double math with x86.
+ cflags += [ "/arch:SSE2" ]
+ }
}
- if (v8_target_arch == "x64") {
+ if (v8_current_cpu == "x64") {
defines += [ "V8_TARGET_ARCH_X64" ]
+ if (is_win) {
+ # Increase the initial stack size. The default is 1MB, this is 2MB. This
+ # applies only to executables and shared libraries produced by V8 since
+ # ldflags are not pushed to dependants.
+ ldflags += [ "/STACK:2097152" ]
+ }
}
-
- if (is_win) {
- defines += [ "WIN32" ]
- # TODO(jochen): Support v8_enable_prof.
+ if (is_android && v8_android_log_stdout) {
+ defines += [ "V8_ANDROID_LOG_STDOUT" ]
}
+ # TODO(jochen): Support v8_enable_prof on Windows.
# TODO(jochen): Add support for compiling with simulators.
if (is_debug) {
+ if (is_linux && v8_enable_backtrace) {
+ ldflags += [ "-rdynamic" ]
+ }
+
# TODO(jochen): Add support for different debug optimization levels.
defines += [
"ENABLE_DISASSEMBLER",
@@ -267,8 +335,13 @@ config("toolchain") {
"OBJECT_PRINT",
"VERIFY_HEAP",
"DEBUG",
- "OPTIMIZED_DEBUG",
+ "TRACE_MAPS",
]
+ if (v8_enable_slow_dchecks) {
+ defines += [ "ENABLE_SLOW_DCHECKS" ]
+ }
+ } else if (dcheck_always_on) {
+ defines += [ "DEBUG" ]
}
}
@@ -297,21 +370,16 @@ action("js2c") {
"src/js/symbol.js",
"src/js/array.js",
"src/js/string.js",
- "src/js/uri.js",
"src/js/math.js",
- "src/third_party/fdlibm/fdlibm.js",
"src/js/regexp.js",
"src/js/arraybuffer.js",
"src/js/typedarray.js",
"src/js/iterator-prototype.js",
- "src/js/generator.js",
- "src/js/object-observe.js",
"src/js/collection.js",
"src/js/weak-collection.js",
"src/js/collection-iterator.js",
"src/js/promise.js",
"src/js/messages.js",
- "src/js/json.js",
"src/js/array-iterator.js",
"src/js/string-iterator.js",
"src/js/templates.js",
@@ -359,22 +427,23 @@ action("js2c_experimental") {
sources = [
"src/js/macros.py",
"src/messages.h",
- "src/js/generator.js",
+ "src/js/harmony-async-await.js",
"src/js/harmony-atomics.js",
- "src/js/harmony-regexp-exec.js",
- "src/js/harmony-object-observe.js",
- "src/js/harmony-sharedarraybuffer.js",
"src/js/harmony-simd.js",
- "src/js/harmony-species.js",
- "src/js/harmony-unicode-regexps.js",
"src/js/harmony-string-padding.js",
- "src/js/promise-extra.js",
]
outputs = [
"$target_gen_dir/experimental-libraries.cc",
]
+ if (v8_enable_i18n_support) {
+ sources += [
+ "src/js/icu-case-mapping.js",
+ "src/js/intl-extra.js",
+ ]
+ }
+
args = [
rebase_path("$target_gen_dir/experimental-libraries.cc",
root_build_dir),
@@ -473,14 +542,22 @@ action("d8_js2c") {
rebase_path(inputs, root_build_dir)
}
-if (is_android) {
+if (is_android && enable_java_templates) {
android_assets("v8_external_startup_data_assets") {
if (v8_use_external_startup_data) {
deps = [
"//v8",
]
- renaming_sources = v8_external_startup_data_renaming_sources
- renaming_destinations = v8_external_startup_data_renaming_destinations
+ sources = [
+ "$root_out_dir/natives_blob.bin",
+ ]
+ renaming_sources = [ "$root_out_dir/snapshot_blob.bin" ]
+ if (current_cpu == "arm" || current_cpu == "x86" ||
+ current_cpu == "mipsel") {
+ renaming_destinations = [ "snapshot_blob_32.bin" ]
+ } else {
+ renaming_destinations = [ "snapshot_blob_64.bin" ]
+ }
disable_compression = true
}
}
@@ -543,17 +620,19 @@ action("run_mksnapshot") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [
- ":mksnapshot($snapshot_toolchain)",
+ ":mksnapshot($v8_snapshot_toolchain)",
]
script = "tools/run.py"
+ sources = []
+
outputs = [
"$target_gen_dir/snapshot.cc",
]
args = [
- "./" + rebase_path(get_label_info(":mksnapshot($snapshot_toolchain)",
+ "./" + rebase_path(get_label_info(":mksnapshot($v8_snapshot_toolchain)",
"root_out_dir") + "/mksnapshot",
root_build_dir),
"--startup_src",
@@ -574,13 +653,79 @@ action("run_mksnapshot") {
rebase_path("$root_out_dir/snapshot_blob.bin", root_build_dir),
]
}
+
+ if (v8_embed_script != "") {
+ sources += [ v8_embed_script ]
+ args += [ rebase_path(v8_embed_script, root_build_dir) ]
+ }
+}
+
+action("run_mkpeephole") {
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
+
+ deps = [
+ ":mkpeephole($v8_snapshot_toolchain)",
+ ]
+
+ outputs = [
+ v8_generated_peephole_source,
+ ]
+
+ sources = []
+
+ script = "tools/run.py"
+
+ args = [
+ "./" + rebase_path(get_label_info(":mkpeephole($v8_snapshot_toolchain)",
+ "root_out_dir") + "/mkpeephole",
+ root_build_dir),
+ rebase_path(v8_generated_peephole_source, root_build_dir),
+ ]
+}
+
+action("v8_dump_build_config") {
+ script = "tools/testrunner/utils/dump_build_config.py"
+ outputs = [
+ "$root_out_dir/v8_build_config.json",
+ ]
+ args = [
+ rebase_path("$root_out_dir/v8_build_config.json", root_build_dir),
+ "dcheck_always_on=$dcheck_always_on",
+ "is_asan=$is_asan",
+ "is_cfi=$is_cfi",
+ "is_component_build=$is_component_build",
+ "is_debug=$is_debug",
+ "is_msan=$is_msan",
+ "is_tsan=$is_tsan",
+ "target_cpu=\"$target_cpu\"",
+ "v8_enable_i18n_support=$v8_enable_i18n_support",
+ "v8_target_cpu=\"$v8_target_cpu\"",
+ "v8_use_snapshot=$v8_use_snapshot",
+ ]
}
###############################################################################
# Source Sets (aka static libraries)
#
-source_set("v8_nosnapshot") {
+source_set("v8_maybe_snapshot") {
+ if (v8_use_snapshot && v8_use_external_startup_data) {
+ public_deps = [
+ ":v8_external_snapshot",
+ ]
+ } else if (v8_use_snapshot) {
+ public_deps = [
+ ":v8_snapshot",
+ ]
+ } else {
+ # Ignore v8_use_external_startup_data setting if no snapshot is used.
+ public_deps = [
+ ":v8_nosnapshot",
+ ]
+ }
+}
+
+v8_source_set("v8_nosnapshot") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [
@@ -599,16 +744,10 @@ source_set("v8_nosnapshot") {
"src/snapshot/snapshot-empty.cc",
]
- configs -= [ "//build/config/compiler:chromium_code" ]
- configs += [ "//build/config/compiler:no_chromium_code" ]
- configs += [
- ":internal_config",
- ":features",
- ":toolchain",
- ]
+ configs = [ ":internal_config" ]
}
-source_set("v8_snapshot") {
+v8_source_set("v8_snapshot") {
# Only targets in this file and the top-level visibility target can
# depend on this.
visibility = [
@@ -637,17 +776,11 @@ source_set("v8_snapshot") {
"$target_gen_dir/snapshot.cc",
]
- configs -= [ "//build/config/compiler:chromium_code" ]
- configs += [ "//build/config/compiler:no_chromium_code" ]
- configs += [
- ":internal_config",
- ":features",
- ":toolchain",
- ]
+ configs = [ ":internal_config" ]
}
if (v8_use_external_startup_data) {
- source_set("v8_external_snapshot") {
+ v8_source_set("v8_external_snapshot") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [
@@ -667,22 +800,15 @@ if (v8_use_external_startup_data) {
"src/snapshot/snapshot-external.cc",
]
- configs -= [ "//build/config/compiler:chromium_code" ]
- configs += [ "//build/config/compiler:no_chromium_code" ]
- configs += [
- ":internal_config",
- ":features",
- ":toolchain",
- ]
+ configs = [ ":internal_config" ]
}
}
-source_set("v8_base") {
+v8_source_set("v8_base") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [
- # TODO(fmeawad): This needs to be updated to support standalone V8 builds.
- "../base/trace_event/common/trace_event_common.h",
+ "//base/trace_event/common/trace_event_common.h",
"include/v8-debug.h",
"include/v8-experimental.h",
"include/v8-platform.h",
@@ -700,6 +826,7 @@ source_set("v8_base") {
"src/allocation-site-scopes.h",
"src/allocation.cc",
"src/allocation.h",
+ "src/api-arguments-inl.h",
"src/api-arguments.cc",
"src/api-arguments.h",
"src/api-experimental.cc",
@@ -710,22 +837,32 @@ source_set("v8_base") {
"src/api.h",
"src/arguments.cc",
"src/arguments.h",
+ "src/asmjs/asm-js.cc",
+ "src/asmjs/asm-js.h",
+ "src/asmjs/asm-typer.cc",
+ "src/asmjs/asm-typer.h",
+ "src/asmjs/asm-types.cc",
+ "src/asmjs/asm-types.h",
+ "src/asmjs/asm-wasm-builder.cc",
+ "src/asmjs/asm-wasm-builder.h",
"src/assembler.cc",
"src/assembler.h",
"src/assert-scope.cc",
"src/assert-scope.h",
"src/ast/ast-expression-rewriter.cc",
"src/ast/ast-expression-rewriter.h",
- "src/ast/ast-expression-visitor.cc",
- "src/ast/ast-expression-visitor.h",
"src/ast/ast-literal-reindexer.cc",
"src/ast/ast-literal-reindexer.h",
"src/ast/ast-numbering.cc",
"src/ast/ast-numbering.h",
+ "src/ast/ast-traversal-visitor.h",
+ "src/ast/ast-type-bounds.h",
"src/ast/ast-value-factory.cc",
"src/ast/ast-value-factory.h",
"src/ast/ast.cc",
"src/ast/ast.h",
+ "src/ast/context-slot-cache.cc",
+ "src/ast/context-slot-cache.h",
"src/ast/modules.cc",
"src/ast/modules.h",
"src/ast/prettyprinter.cc",
@@ -736,7 +873,6 @@ source_set("v8_base") {
"src/ast/scopes.h",
"src/ast/variables.cc",
"src/ast/variables.h",
- "src/atomic-utils.h",
"src/background-parsing-task.cc",
"src/background-parsing-task.h",
"src/bailout-reason.cc",
@@ -751,8 +887,36 @@ source_set("v8_base") {
"src/bit-vector.h",
"src/bootstrapper.cc",
"src/bootstrapper.h",
- "src/builtins.cc",
- "src/builtins.h",
+ "src/builtins/builtins-api.cc",
+ "src/builtins/builtins-array.cc",
+ "src/builtins/builtins-arraybuffer.cc",
+ "src/builtins/builtins-boolean.cc",
+ "src/builtins/builtins-call.cc",
+ "src/builtins/builtins-callsite.cc",
+ "src/builtins/builtins-conversion.cc",
+ "src/builtins/builtins-dataview.cc",
+ "src/builtins/builtins-date.cc",
+ "src/builtins/builtins-debug.cc",
+ "src/builtins/builtins-error.cc",
+ "src/builtins/builtins-function.cc",
+ "src/builtins/builtins-generator.cc",
+ "src/builtins/builtins-global.cc",
+ "src/builtins/builtins-handler.cc",
+ "src/builtins/builtins-internal.cc",
+ "src/builtins/builtins-interpreter.cc",
+ "src/builtins/builtins-json.cc",
+ "src/builtins/builtins-math.cc",
+ "src/builtins/builtins-number.cc",
+ "src/builtins/builtins-object.cc",
+ "src/builtins/builtins-proxy.cc",
+ "src/builtins/builtins-reflect.cc",
+ "src/builtins/builtins-sharedarraybuffer.cc",
+ "src/builtins/builtins-string.cc",
+ "src/builtins/builtins-symbol.cc",
+ "src/builtins/builtins-typedarray.cc",
+ "src/builtins/builtins-utils.h",
+ "src/builtins/builtins.cc",
+ "src/builtins/builtins.h",
"src/cached-powers.cc",
"src/cached-powers.h",
"src/cancelable-task.cc",
@@ -761,8 +925,11 @@ source_set("v8_base") {
"src/char-predicates.cc",
"src/char-predicates.h",
"src/checks.h",
+ "src/code-events.h",
"src/code-factory.cc",
"src/code-factory.h",
+ "src/code-stub-assembler.cc",
+ "src/code-stub-assembler.h",
"src/code-stubs-hydrogen.cc",
"src/code-stubs.cc",
"src/code-stubs.h",
@@ -775,6 +942,10 @@ source_set("v8_base") {
"src/compilation-dependencies.h",
"src/compilation-statistics.cc",
"src/compilation-statistics.h",
+ "src/compiler-dispatcher/compiler-dispatcher-job.cc",
+ "src/compiler-dispatcher/compiler-dispatcher-job.h",
+ "src/compiler-dispatcher/optimizing-compile-dispatcher.cc",
+ "src/compiler-dispatcher/optimizing-compile-dispatcher.h",
"src/compiler.cc",
"src/compiler.h",
"src/compiler/access-builder.cc",
@@ -795,16 +966,16 @@ source_set("v8_base") {
"src/compiler/bytecode-branch-analysis.h",
"src/compiler/bytecode-graph-builder.cc",
"src/compiler/bytecode-graph-builder.h",
+ "src/compiler/bytecode-loop-analysis.cc",
+ "src/compiler/bytecode-loop-analysis.h",
"src/compiler/c-linkage.cc",
- "src/compiler/change-lowering.cc",
- "src/compiler/change-lowering.h",
- "src/compiler/coalesced-live-ranges.cc",
- "src/compiler/coalesced-live-ranges.h",
+ "src/compiler/checkpoint-elimination.cc",
+ "src/compiler/checkpoint-elimination.h",
+ "src/compiler/code-assembler.cc",
+ "src/compiler/code-assembler.h",
"src/compiler/code-generator-impl.h",
"src/compiler/code-generator.cc",
"src/compiler/code-generator.h",
- "src/compiler/code-stub-assembler.cc",
- "src/compiler/code-stub-assembler.h",
"src/compiler/common-node-cache.cc",
"src/compiler/common-node-cache.h",
"src/compiler/common-operator-reducer.cc",
@@ -820,6 +991,8 @@ source_set("v8_base") {
"src/compiler/dead-code-elimination.cc",
"src/compiler/dead-code-elimination.h",
"src/compiler/diamond.h",
+ "src/compiler/effect-control-linearizer.cc",
+ "src/compiler/effect-control-linearizer.h",
"src/compiler/escape-analysis-reducer.cc",
"src/compiler/escape-analysis-reducer.h",
"src/compiler/escape-analysis.cc",
@@ -842,8 +1015,6 @@ source_set("v8_base") {
"src/compiler/graph-visualizer.h",
"src/compiler/graph.cc",
"src/compiler/graph.h",
- "src/compiler/greedy-allocator.cc",
- "src/compiler/greedy-allocator.h",
"src/compiler/instruction-codes.h",
"src/compiler/instruction-scheduler.cc",
"src/compiler/instruction-scheduler.h",
@@ -895,10 +1066,15 @@ source_set("v8_base") {
"src/compiler/loop-analysis.cc",
"src/compiler/loop-analysis.h",
"src/compiler/loop-peeling.cc",
+ "src/compiler/loop-peeling.h",
+ "src/compiler/loop-variable-optimizer.cc",
+ "src/compiler/loop-variable-optimizer.h",
"src/compiler/machine-operator-reducer.cc",
"src/compiler/machine-operator-reducer.h",
"src/compiler/machine-operator.cc",
"src/compiler/machine-operator.h",
+ "src/compiler/memory-optimizer.cc",
+ "src/compiler/memory-optimizer.h",
"src/compiler/move-optimizer.cc",
"src/compiler/move-optimizer.h",
"src/compiler/node-aux-data.h",
@@ -914,6 +1090,8 @@ source_set("v8_base") {
"src/compiler/node.h",
"src/compiler/opcodes.cc",
"src/compiler/opcodes.h",
+ "src/compiler/operation-typer.cc",
+ "src/compiler/operation-typer.h",
"src/compiler/operator-properties.cc",
"src/compiler/operator-properties.h",
"src/compiler/operator.cc",
@@ -926,6 +1104,8 @@ source_set("v8_base") {
"src/compiler/pipeline.h",
"src/compiler/raw-machine-assembler.cc",
"src/compiler/raw-machine-assembler.h",
+ "src/compiler/redundancy-elimination.cc",
+ "src/compiler/redundancy-elimination.h",
"src/compiler/register-allocator-verifier.cc",
"src/compiler/register-allocator-verifier.h",
"src/compiler/register-allocator.cc",
@@ -948,6 +1128,8 @@ source_set("v8_base") {
"src/compiler/source-position.h",
"src/compiler/state-values-utils.cc",
"src/compiler/state-values-utils.h",
+ "src/compiler/store-store-elimination.cc",
+ "src/compiler/store-store-elimination.h",
"src/compiler/tail-call-optimization.cc",
"src/compiler/tail-call-optimization.h",
"src/compiler/type-hint-analyzer.cc",
@@ -956,6 +1138,7 @@ source_set("v8_base") {
"src/compiler/type-hints.h",
"src/compiler/typer.cc",
"src/compiler/typer.h",
+ "src/compiler/unwinding-info-writer.h",
"src/compiler/value-numbering-reducer.cc",
"src/compiler/value-numbering-reducer.h",
"src/compiler/verifier.cc",
@@ -973,6 +1156,7 @@ source_set("v8_base") {
"src/conversions-inl.h",
"src/conversions.cc",
"src/conversions.h",
+ "src/counters-inl.h",
"src/counters.cc",
"src/counters.h",
"src/crankshaft/compilation-phase.cc",
@@ -1052,6 +1236,8 @@ source_set("v8_base") {
"src/debug/debug.h",
"src/debug/liveedit.cc",
"src/debug/liveedit.h",
+ "src/deoptimize-reason.cc",
+ "src/deoptimize-reason.h",
"src/deoptimizer.cc",
"src/deoptimizer.h",
"src/disasm.h",
@@ -1063,6 +1249,8 @@ source_set("v8_base") {
"src/dtoa.cc",
"src/dtoa.h",
"src/effects.h",
+ "src/eh-frame.cc",
+ "src/eh-frame.h",
"src/elements-kind.cc",
"src/elements-kind.h",
"src/elements.cc",
@@ -1075,6 +1263,8 @@ source_set("v8_base") {
"src/extensions/free-buffer-extension.h",
"src/extensions/gc-extension.cc",
"src/extensions/gc-extension.h",
+ "src/extensions/ignition-statistics-extension.cc",
+ "src/extensions/ignition-statistics-extension.h",
"src/extensions/statistics-extension.cc",
"src/extensions/statistics-extension.h",
"src/extensions/trigger-failure-extension.cc",
@@ -1111,10 +1301,12 @@ source_set("v8_base") {
"src/handles-inl.h",
"src/handles.cc",
"src/handles.h",
- "src/hashmap.h",
"src/heap-symbols.h",
+ "src/heap/array-buffer-tracker-inl.h",
"src/heap/array-buffer-tracker.cc",
"src/heap/array-buffer-tracker.h",
+ "src/heap/code-stats.cc",
+ "src/heap/code-stats.h",
"src/heap/gc-idle-time-handler.cc",
"src/heap/gc-idle-time-handler.h",
"src/heap/gc-tracer.cc",
@@ -1129,6 +1321,7 @@ source_set("v8_base") {
"src/heap/mark-compact-inl.h",
"src/heap/mark-compact.cc",
"src/heap/mark-compact.h",
+ "src/heap/marking.h",
"src/heap/memory-reducer.cc",
"src/heap/memory-reducer.h",
"src/heap/object-stats.cc",
@@ -1177,10 +1370,29 @@ source_set("v8_base") {
"src/interpreter/bytecode-array-builder.h",
"src/interpreter/bytecode-array-iterator.cc",
"src/interpreter/bytecode-array-iterator.h",
+ "src/interpreter/bytecode-array-writer.cc",
+ "src/interpreter/bytecode-array-writer.h",
+ "src/interpreter/bytecode-dead-code-optimizer.cc",
+ "src/interpreter/bytecode-dead-code-optimizer.h",
+ "src/interpreter/bytecode-decoder.cc",
+ "src/interpreter/bytecode-decoder.h",
+ "src/interpreter/bytecode-flags.cc",
+ "src/interpreter/bytecode-flags.h",
"src/interpreter/bytecode-generator.cc",
"src/interpreter/bytecode-generator.h",
+ "src/interpreter/bytecode-label.cc",
+ "src/interpreter/bytecode-label.h",
+ "src/interpreter/bytecode-peephole-optimizer.cc",
+ "src/interpreter/bytecode-peephole-optimizer.h",
+ "src/interpreter/bytecode-peephole-table.h",
+ "src/interpreter/bytecode-pipeline.cc",
+ "src/interpreter/bytecode-pipeline.h",
"src/interpreter/bytecode-register-allocator.cc",
"src/interpreter/bytecode-register-allocator.h",
+ "src/interpreter/bytecode-register-optimizer.cc",
+ "src/interpreter/bytecode-register-optimizer.h",
+ "src/interpreter/bytecode-register.cc",
+ "src/interpreter/bytecode-register.h",
"src/interpreter/bytecode-traits.h",
"src/interpreter/bytecodes.cc",
"src/interpreter/bytecodes.h",
@@ -1196,12 +1408,12 @@ source_set("v8_base") {
"src/interpreter/interpreter-intrinsics.h",
"src/interpreter/interpreter.cc",
"src/interpreter/interpreter.h",
- "src/interpreter/source-position-table.cc",
- "src/interpreter/source-position-table.h",
"src/isolate-inl.h",
"src/isolate.cc",
"src/isolate.h",
+ "src/json-parser.cc",
"src/json-parser.h",
+ "src/json-stringifier.cc",
"src/json-stringifier.h",
"src/keys.cc",
"src/keys.h",
@@ -1230,8 +1442,6 @@ source_set("v8_base") {
"src/objects-printer.cc",
"src/objects.cc",
"src/objects.h",
- "src/optimizing-compile-dispatcher.cc",
- "src/optimizing-compile-dispatcher.h",
"src/ostreams.cc",
"src/ostreams.h",
"src/parsing/expression-classifier.h",
@@ -1239,6 +1449,8 @@ source_set("v8_base") {
"src/parsing/func-name-inferrer.h",
"src/parsing/parameter-initializer-rewriter.cc",
"src/parsing/parameter-initializer-rewriter.h",
+ "src/parsing/parse-info.cc",
+ "src/parsing/parse-info.h",
"src/parsing/parser-base.h",
"src/parsing/parser.cc",
"src/parsing/parser.h",
@@ -1275,12 +1487,14 @@ source_set("v8_base") {
"src/profiler/profile-generator-inl.h",
"src/profiler/profile-generator.cc",
"src/profiler/profile-generator.h",
- "src/profiler/sampler.cc",
- "src/profiler/sampler.h",
+ "src/profiler/profiler-listener.cc",
+ "src/profiler/profiler-listener.h",
"src/profiler/sampling-heap-profiler.cc",
"src/profiler/sampling-heap-profiler.h",
"src/profiler/strings-storage.cc",
"src/profiler/strings-storage.h",
+ "src/profiler/tick-sample.cc",
+ "src/profiler/tick-sample.h",
"src/profiler/unbound-queue-inl.h",
"src/profiler/unbound-queue.h",
"src/property-descriptor.cc",
@@ -1319,6 +1533,7 @@ source_set("v8_base") {
"src/runtime/runtime-compiler.cc",
"src/runtime/runtime-date.cc",
"src/runtime/runtime-debug.cc",
+ "src/runtime/runtime-error.cc",
"src/runtime/runtime-forin.cc",
"src/runtime/runtime-function.cc",
"src/runtime/runtime-futex.cc",
@@ -1326,13 +1541,11 @@ source_set("v8_base") {
"src/runtime/runtime-i18n.cc",
"src/runtime/runtime-internal.cc",
"src/runtime/runtime-interpreter.cc",
- "src/runtime/runtime-json.cc",
"src/runtime/runtime-literals.cc",
"src/runtime/runtime-liveedit.cc",
"src/runtime/runtime-maths.cc",
"src/runtime/runtime-numbers.cc",
"src/runtime/runtime-object.cc",
- "src/runtime/runtime-observe.cc",
"src/runtime/runtime-operators.cc",
"src/runtime/runtime-proxy.cc",
"src/runtime/runtime-regexp.cc",
@@ -1342,8 +1555,8 @@ source_set("v8_base") {
"src/runtime/runtime-symbol.cc",
"src/runtime/runtime-test.cc",
"src/runtime/runtime-typedarray.cc",
- "src/runtime/runtime-uri.cc",
"src/runtime/runtime-utils.h",
+ "src/runtime/runtime-wasm.cc",
"src/runtime/runtime.cc",
"src/runtime/runtime.h",
"src/safepoint-table.cc",
@@ -1369,6 +1582,8 @@ source_set("v8_base") {
"src/snapshot/snapshot.h",
"src/snapshot/startup-serializer.cc",
"src/snapshot/startup-serializer.h",
+ "src/source-position-table.cc",
+ "src/source-position-table.h",
"src/source-position.h",
"src/splay-tree-inl.h",
"src/splay-tree.h",
@@ -1381,8 +1596,6 @@ source_set("v8_base") {
"src/string-stream.h",
"src/strtod.cc",
"src/strtod.h",
- "src/third_party/fdlibm/fdlibm.cc",
- "src/third_party/fdlibm/fdlibm.h",
"src/tracing/trace-event.cc",
"src/tracing/trace-event.h",
"src/transitions-inl.h",
@@ -1397,10 +1610,6 @@ source_set("v8_base") {
"src/type-info.h",
"src/types.cc",
"src/types.h",
- "src/typing-asm.cc",
- "src/typing-asm.h",
- "src/typing-reset.cc",
- "src/typing-reset.h",
"src/unicode-cache-inl.h",
"src/unicode-cache.h",
"src/unicode-decoder.cc",
@@ -1408,6 +1617,8 @@ source_set("v8_base") {
"src/unicode-inl.h",
"src/unicode.cc",
"src/unicode.h",
+ "src/uri.cc",
+ "src/uri.h",
"src/utils-inl.h",
"src/utils.cc",
"src/utils.h",
@@ -1416,20 +1627,30 @@ source_set("v8_base") {
"src/v8memory.h",
"src/v8threads.cc",
"src/v8threads.h",
+ "src/value-serializer.cc",
+ "src/value-serializer.h",
"src/version.cc",
"src/version.h",
"src/vm-state-inl.h",
"src/vm-state.h",
- "src/wasm/asm-wasm-builder.cc",
- "src/wasm/asm-wasm-builder.h",
"src/wasm/ast-decoder.cc",
"src/wasm/ast-decoder.h",
"src/wasm/decoder.h",
"src/wasm/encoder.cc",
"src/wasm/encoder.h",
+ "src/wasm/leb-helper.h",
"src/wasm/module-decoder.cc",
"src/wasm/module-decoder.h",
+ "src/wasm/switch-logic.cc",
+ "src/wasm/switch-logic.h",
+ "src/wasm/wasm-debug.cc",
+ "src/wasm/wasm-debug.h",
+ "src/wasm/wasm-external-refs.cc",
"src/wasm/wasm-external-refs.h",
+ "src/wasm/wasm-function-name-table.cc",
+ "src/wasm/wasm-function-name-table.h",
+ "src/wasm/wasm-interpreter.cc",
+ "src/wasm/wasm-interpreter.h",
"src/wasm/wasm-js.cc",
"src/wasm/wasm-js.h",
"src/wasm/wasm-macro-gen.h",
@@ -1445,8 +1666,9 @@ source_set("v8_base") {
"src/zone.h",
]
- if (v8_target_arch == "x86") {
+ if (v8_current_cpu == "x86") {
sources += [
+ "src/builtins/ia32/builtins-ia32.cc",
"src/compiler/ia32/code-generator-ia32.cc",
"src/compiler/ia32/instruction-codes-ia32.h",
"src/compiler/ia32/instruction-scheduler-ia32.cc",
@@ -1462,7 +1684,6 @@ source_set("v8_base") {
"src/ia32/assembler-ia32-inl.h",
"src/ia32/assembler-ia32.cc",
"src/ia32/assembler-ia32.h",
- "src/ia32/builtins-ia32.cc",
"src/ia32/code-stubs-ia32.cc",
"src/ia32/code-stubs-ia32.h",
"src/ia32/codegen-ia32.cc",
@@ -1483,12 +1704,15 @@ source_set("v8_base") {
"src/regexp/ia32/regexp-macro-assembler-ia32.cc",
"src/regexp/ia32/regexp-macro-assembler-ia32.h",
]
- } else if (v8_target_arch == "x64") {
+ } else if (v8_current_cpu == "x64") {
sources += [
+ "src/builtins/x64/builtins-x64.cc",
"src/compiler/x64/code-generator-x64.cc",
"src/compiler/x64/instruction-codes-x64.h",
"src/compiler/x64/instruction-scheduler-x64.cc",
"src/compiler/x64/instruction-selector-x64.cc",
+ "src/compiler/x64/unwinding-info-writer-x64.cc",
+ "src/compiler/x64/unwinding-info-writer-x64.h",
"src/crankshaft/x64/lithium-codegen-x64.cc",
"src/crankshaft/x64/lithium-codegen-x64.h",
"src/crankshaft/x64/lithium-gap-resolver-x64.cc",
@@ -1507,7 +1731,6 @@ source_set("v8_base") {
"src/x64/assembler-x64-inl.h",
"src/x64/assembler-x64.cc",
"src/x64/assembler-x64.h",
- "src/x64/builtins-x64.cc",
"src/x64/code-stubs-x64.cc",
"src/x64/code-stubs-x64.h",
"src/x64/codegen-x64.cc",
@@ -1515,18 +1738,18 @@ source_set("v8_base") {
"src/x64/cpu-x64.cc",
"src/x64/deoptimizer-x64.cc",
"src/x64/disasm-x64.cc",
+ "src/x64/eh-frame-x64.cc",
"src/x64/frames-x64.cc",
"src/x64/frames-x64.h",
"src/x64/interface-descriptors-x64.cc",
"src/x64/macro-assembler-x64.cc",
"src/x64/macro-assembler-x64.h",
]
- } else if (v8_target_arch == "arm") {
+ } else if (v8_current_cpu == "arm") {
sources += [
"src/arm/assembler-arm-inl.h",
"src/arm/assembler-arm.cc",
"src/arm/assembler-arm.h",
- "src/arm/builtins-arm.cc",
"src/arm/code-stubs-arm.cc",
"src/arm/code-stubs-arm.h",
"src/arm/codegen-arm.cc",
@@ -1536,6 +1759,7 @@ source_set("v8_base") {
"src/arm/cpu-arm.cc",
"src/arm/deoptimizer-arm.cc",
"src/arm/disasm-arm.cc",
+ "src/arm/eh-frame-arm.cc",
"src/arm/frames-arm.cc",
"src/arm/frames-arm.h",
"src/arm/interface-descriptors-arm.cc",
@@ -1544,10 +1768,13 @@ source_set("v8_base") {
"src/arm/macro-assembler-arm.h",
"src/arm/simulator-arm.cc",
"src/arm/simulator-arm.h",
+ "src/builtins/arm/builtins-arm.cc",
"src/compiler/arm/code-generator-arm.cc",
"src/compiler/arm/instruction-codes-arm.h",
"src/compiler/arm/instruction-scheduler-arm.cc",
"src/compiler/arm/instruction-selector-arm.cc",
+ "src/compiler/arm/unwinding-info-writer-arm.cc",
+ "src/compiler/arm/unwinding-info-writer-arm.h",
"src/crankshaft/arm/lithium-arm.cc",
"src/crankshaft/arm/lithium-arm.h",
"src/crankshaft/arm/lithium-codegen-arm.cc",
@@ -1564,12 +1791,11 @@ source_set("v8_base") {
"src/regexp/arm/regexp-macro-assembler-arm.cc",
"src/regexp/arm/regexp-macro-assembler-arm.h",
]
- } else if (v8_target_arch == "arm64") {
+ } else if (v8_current_cpu == "arm64") {
sources += [
"src/arm64/assembler-arm64-inl.h",
"src/arm64/assembler-arm64.cc",
"src/arm64/assembler-arm64.h",
- "src/arm64/builtins-arm64.cc",
"src/arm64/code-stubs-arm64.cc",
"src/arm64/code-stubs-arm64.h",
"src/arm64/codegen-arm64.cc",
@@ -1582,6 +1808,7 @@ source_set("v8_base") {
"src/arm64/deoptimizer-arm64.cc",
"src/arm64/disasm-arm64.cc",
"src/arm64/disasm-arm64.h",
+ "src/arm64/eh-frame-arm64.cc",
"src/arm64/frames-arm64.cc",
"src/arm64/frames-arm64.h",
"src/arm64/instructions-arm64.cc",
@@ -1597,10 +1824,13 @@ source_set("v8_base") {
"src/arm64/simulator-arm64.h",
"src/arm64/utils-arm64.cc",
"src/arm64/utils-arm64.h",
+ "src/builtins/arm64/builtins-arm64.cc",
"src/compiler/arm64/code-generator-arm64.cc",
"src/compiler/arm64/instruction-codes-arm64.h",
"src/compiler/arm64/instruction-scheduler-arm64.cc",
"src/compiler/arm64/instruction-selector-arm64.cc",
+ "src/compiler/arm64/unwinding-info-writer-arm64.cc",
+ "src/compiler/arm64/unwinding-info-writer-arm64.h",
"src/crankshaft/arm64/delayed-masm-arm64-inl.h",
"src/crankshaft/arm64/delayed-masm-arm64.cc",
"src/crankshaft/arm64/delayed-masm-arm64.h",
@@ -1620,8 +1850,9 @@ source_set("v8_base") {
"src/regexp/arm64/regexp-macro-assembler-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.h",
]
- } else if (v8_target_arch == "mipsel") {
+ } else if (v8_current_cpu == "mipsel") {
sources += [
+ "src/builtins/mips/builtins-mips.cc",
"src/compiler/mips/code-generator-mips.cc",
"src/compiler/mips/instruction-codes-mips.h",
"src/compiler/mips/instruction-scheduler-mips.cc",
@@ -1642,7 +1873,6 @@ source_set("v8_base") {
"src/mips/assembler-mips-inl.h",
"src/mips/assembler-mips.cc",
"src/mips/assembler-mips.h",
- "src/mips/builtins-mips.cc",
"src/mips/code-stubs-mips.cc",
"src/mips/code-stubs-mips.h",
"src/mips/codegen-mips.cc",
@@ -1662,12 +1892,13 @@ source_set("v8_base") {
"src/regexp/mips/regexp-macro-assembler-mips.cc",
"src/regexp/mips/regexp-macro-assembler-mips.h",
]
- } else if (v8_target_arch == "mips64el") {
+ } else if (v8_current_cpu == "mips64el") {
sources += [
- "compiler/mips64/code-generator-mips64.cc",
- "compiler/mips64/instruction-codes-mips64.h",
- "compiler/mips64/instruction-scheduler-mips64.cc",
- "compiler/mips64/instruction-selector-mips64.cc",
+ "src/builtins/mips64/builtins-mips64.cc",
+ "src/compiler/mips64/code-generator-mips64.cc",
+ "src/compiler/mips64/instruction-codes-mips64.h",
+ "src/compiler/mips64/instruction-scheduler-mips64.cc",
+ "src/compiler/mips64/instruction-selector-mips64.cc",
"src/crankshaft/mips64/lithium-codegen-mips64.cc",
"src/crankshaft/mips64/lithium-codegen-mips64.h",
"src/crankshaft/mips64/lithium-gap-resolver-mips64.cc",
@@ -1684,7 +1915,6 @@ source_set("v8_base") {
"src/mips64/assembler-mips64-inl.h",
"src/mips64/assembler-mips64.cc",
"src/mips64/assembler-mips64.h",
- "src/mips64/builtins-mips64.cc",
"src/mips64/code-stubs-mips64.cc",
"src/mips64/code-stubs-mips64.h",
"src/mips64/codegen-mips64.cc",
@@ -1704,8 +1934,9 @@ source_set("v8_base") {
"src/regexp/mips64/regexp-macro-assembler-mips64.cc",
"src/regexp/mips64/regexp-macro-assembler-mips64.h",
]
- } else if (v8_target_arch == "s390" || v8_target_arch == "s390x") {
+ } else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
sources += [
+ "src/builtins/s390/builtins-s390.cc",
"src/compiler/s390/code-generator-s390.cc",
"src/compiler/s390/instruction-codes-s390.h",
"src/compiler/s390/instruction-scheduler-s390.cc",
@@ -1728,7 +1959,6 @@ source_set("v8_base") {
"src/s390/assembler-s390-inl.h",
"src/s390/assembler-s390.cc",
"src/s390/assembler-s390.h",
- "src/s390/builtins-s390.cc",
"src/s390/code-stubs-s390.cc",
"src/s390/code-stubs-s390.h",
"src/s390/codegen-s390.cc",
@@ -1748,24 +1978,17 @@ source_set("v8_base") {
]
}
- configs -= [ "//build/config/compiler:chromium_code" ]
- configs += [ "//build/config/compiler:no_chromium_code" ]
- configs += [
- ":internal_config",
- ":features",
- ":toolchain",
- ]
-
- if (!is_debug) {
- configs -= [ "//build/config/compiler:default_optimization" ]
- configs += [ "//build/config/compiler:optimize_max" ]
- }
+ configs = [ ":internal_config" ]
defines = []
deps = [
":v8_libbase",
+ ":v8_libsampler",
]
+ sources += [ v8_generated_peephole_source ]
+ deps += [ ":run_mkpeephole" ]
+
if (is_win) {
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
cflags = [ "/wd4267" ]
@@ -1776,9 +1999,6 @@ source_set("v8_base") {
if (is_win) {
deps += [ "//third_party/icu:icudata" ]
}
-
- # TODO(jochen): Add support for icu_use_data_file_flag
- defines += [ "ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_FILE" ]
} else {
sources -= [
"src/i18n.cc",
@@ -1792,13 +2012,14 @@ source_set("v8_base") {
}
}
-source_set("v8_libbase") {
+v8_source_set("v8_libbase") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [
"src/base/accounting-allocator.cc",
"src/base/accounting-allocator.h",
"src/base/adapters.h",
+ "src/base/atomic-utils.h",
"src/base/atomicops.h",
"src/base/atomicops_internals_arm64_gcc.h",
"src/base/atomicops_internals_arm_gcc.h",
@@ -1806,7 +2027,6 @@ source_set("v8_libbase") {
"src/base/atomicops_internals_mac.h",
"src/base/atomicops_internals_mips64_gcc.h",
"src/base/atomicops_internals_mips_gcc.h",
- "src/base/atomicops_internals_portable.h",
"src/base/atomicops_internals_s390_gcc.h",
"src/base/atomicops_internals_tsan.h",
"src/base/atomicops_internals_x86_gcc.cc",
@@ -1817,11 +2037,20 @@ source_set("v8_libbase") {
"src/base/build_config.h",
"src/base/cpu.cc",
"src/base/cpu.h",
+ "src/base/debug/stack_trace.cc",
+ "src/base/debug/stack_trace.h",
"src/base/division-by-constant.cc",
"src/base/division-by-constant.h",
+ "src/base/file-utils.cc",
+ "src/base/file-utils.h",
"src/base/flags.h",
+ "src/base/format-macros.h",
+ "src/base/free_deleter.h",
"src/base/functional.cc",
"src/base/functional.h",
+ "src/base/hashmap.h",
+ "src/base/ieee754.cc",
+ "src/base/ieee754.h",
"src/base/iterator.h",
"src/base/lazy-instance.h",
"src/base/logging.cc",
@@ -1843,25 +2072,13 @@ source_set("v8_libbase") {
"src/base/safe_conversions_impl.h",
"src/base/safe_math.h",
"src/base/safe_math_impl.h",
- "src/base/smart-pointers.h",
"src/base/sys-info.cc",
"src/base/sys-info.h",
"src/base/utils/random-number-generator.cc",
"src/base/utils/random-number-generator.h",
]
- configs -= [ "//build/config/compiler:chromium_code" ]
- configs += [ "//build/config/compiler:no_chromium_code" ]
- configs += [
- ":internal_config_base",
- ":features",
- ":toolchain",
- ]
-
- if (!is_debug) {
- configs -= [ "//build/config/compiler:default_optimization" ]
- configs += [ "//build/config/compiler:optimize_max" ]
- }
+ configs = [ ":internal_config_base" ]
defines = []
@@ -1870,7 +2087,10 @@ source_set("v8_libbase") {
}
if (is_linux) {
- sources += [ "src/base/platform/platform-linux.cc" ]
+ sources += [
+ "src/base/debug/stack_trace_posix.cc",
+ "src/base/platform/platform-linux.cc",
+ ]
libs = [
"dl",
@@ -1883,18 +2103,31 @@ source_set("v8_libbase") {
"rt",
]
if (host_os == "mac") {
- sources += [ "src/base/platform/platform-macos.cc" ]
+ sources += [
+ "src/base/debug/stack_trace_posix.cc",
+ "src/base/platform/platform-macos.cc",
+ ]
} else {
- sources += [ "src/base/platform/platform-linux.cc" ]
+ sources += [
+ "src/base/debug/stack_trace_posix.cc",
+ "src/base/platform/platform-linux.cc",
+ ]
}
} else {
- sources += [ "src/base/platform/platform-linux.cc" ]
+ sources += [
+ "src/base/debug/stack_trace_android.cc",
+ "src/base/platform/platform-linux.cc",
+ ]
}
} else if (is_mac) {
- sources += [ "src/base/platform/platform-macos.cc" ]
+ sources += [
+ "src/base/debug/stack_trace_posix.cc",
+ "src/base/platform/platform-macos.cc",
+ ]
} else if (is_win) {
# TODO(jochen): Add support for cygwin.
sources += [
+ "src/base/debug/stack_trace_win.cc",
"src/base/platform/platform-win32.cc",
"src/base/win32-headers.h",
]
@@ -1902,6 +2135,8 @@ source_set("v8_libbase") {
defines += [ "_CRT_RAND_S" ] # for rand_s()
libs = [
+ "dbghelp.lib",
+ "shlwapi.lib",
"winmm.lib",
"ws2_32.lib",
]
@@ -1910,36 +2145,51 @@ source_set("v8_libbase") {
# TODO(jochen): Add support for qnx, freebsd, openbsd, netbsd, and solaris.
}
-source_set("v8_libplatform") {
+v8_source_set("v8_libplatform") {
sources = [
+ "//base/trace_event/common/trace_event_common.h",
"include/libplatform/libplatform.h",
+ "include/libplatform/v8-tracing.h",
"src/libplatform/default-platform.cc",
"src/libplatform/default-platform.h",
"src/libplatform/task-queue.cc",
"src/libplatform/task-queue.h",
+ "src/libplatform/tracing/trace-buffer.cc",
+ "src/libplatform/tracing/trace-buffer.h",
+ "src/libplatform/tracing/trace-config.cc",
+ "src/libplatform/tracing/trace-object.cc",
+ "src/libplatform/tracing/trace-writer.cc",
+ "src/libplatform/tracing/trace-writer.h",
+ "src/libplatform/tracing/tracing-controller.cc",
"src/libplatform/worker-thread.cc",
"src/libplatform/worker-thread.h",
]
- configs -= [ "//build/config/compiler:chromium_code" ]
- configs += [ "//build/config/compiler:no_chromium_code" ]
- configs += [
- ":internal_config_base",
- ":features",
- ":toolchain",
+ configs = [ ":internal_config_base" ]
+
+ public_configs = [ ":libplatform_config" ]
+
+ deps = [
+ ":v8_libbase",
]
+}
- if (!is_debug) {
- configs -= [ "//build/config/compiler:default_optimization" ]
- configs += [ "//build/config/compiler:optimize_max" ]
- }
+v8_source_set("v8_libsampler") {
+ sources = [
+ "src/libsampler/sampler.cc",
+ "src/libsampler/sampler.h",
+ ]
+
+ configs = [ ":internal_config_base" ]
+
+ public_configs = [ ":libsampler_config" ]
deps = [
":v8_libbase",
]
}
-source_set("fuzzer_support") {
+v8_source_set("fuzzer_support") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [
@@ -1947,136 +2197,317 @@ source_set("fuzzer_support") {
"test/fuzzer/fuzzer-support.h",
]
- configs -= [ "//build/config/compiler:chromium_code" ]
- configs += [ "//build/config/compiler:no_chromium_code" ]
- configs += [
- ":internal_config_base",
- ":libplatform_config",
- ":features",
- ":toolchain",
- ]
+ configs = [ ":internal_config_base" ]
deps = [
+ ":v8_maybe_snapshot",
+ ]
+
+ public_deps = [
":v8_libplatform",
- snapshot_target,
]
}
+v8_source_set("simple_fuzzer") {
+ sources = [
+ "test/fuzzer/fuzzer.cc",
+ ]
+
+ configs = [ ":internal_config_base" ]
+}
+
###############################################################################
# Executables
#
-if (current_toolchain == snapshot_toolchain) {
- executable("mksnapshot") {
+if (current_toolchain == v8_snapshot_toolchain) {
+ v8_executable("mksnapshot") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [
"src/snapshot/mksnapshot.cc",
]
- configs -= [ "//build/config/compiler:chromium_code" ]
- configs += [ "//build/config/compiler:no_chromium_code" ]
- configs += [
- ":internal_config",
- ":libplatform_config",
- ":features",
- ":toolchain",
- ]
+ configs = [ ":internal_config" ]
deps = [
":v8_base",
":v8_libplatform",
":v8_nosnapshot",
"//build/config/sanitizers:deps",
+ "//build/win:default_exe_manifest",
]
}
}
+v8_executable("mkpeephole") {
+ # mkpeephole needs to be built for the build host so the peephole lookup
+ # table can built during build. The table depends on the properties of
+ # bytecodes that are described in bytecodes.{cc,h}.
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
+
+ sources = [
+ "src/interpreter/bytecode-peephole-optimizer.h",
+ "src/interpreter/bytecodes.cc",
+ "src/interpreter/bytecodes.h",
+ "src/interpreter/mkpeephole.cc",
+ ]
+
+ configs = [
+ ":external_config",
+ ":internal_config",
+ ]
+
+ deps = [
+ ":v8_libbase",
+ "//build/config/sanitizers:deps",
+ "//build/win:default_exe_manifest",
+ ]
+}
+
###############################################################################
# Public targets
#
+want_v8_shell =
+ (current_toolchain == host_toolchain && v8_toolset_for_shell == "host") ||
+ (current_toolchain == v8_snapshot_toolchain &&
+ v8_toolset_for_shell == "host") ||
+ (current_toolchain != host_toolchain && v8_toolset_for_shell == "target")
+
+group("gn_all") {
+ testonly = true
+
+ deps = [
+ ":d8",
+ ":v8_hello_world",
+ ":v8_parser_shell",
+ ":v8_sample_process",
+ ":v8_simple_json_fuzzer",
+ ":v8_simple_parser_fuzzer",
+ ":v8_simple_regexp_fuzzer",
+ ":v8_simple_wasm_asmjs_fuzzer",
+ ":v8_simple_wasm_fuzzer",
+ "test:gn_all",
+ "tools:gn_all",
+ ]
+
+ if (want_v8_shell) {
+ deps += [ ":v8_shell" ]
+ }
+
+ if (v8_test_isolation_mode != "noop") {
+ deps += [ ":d8_run" ]
+ }
+}
+
if (is_component_build) {
- component("v8") {
+ v8_component("v8") {
sources = [
"src/v8dll-main.cc",
]
+ deps = [
+ ":v8_dump_build_config",
+ ]
+
public_deps = [
":v8_base",
- snapshot_target,
+ ":v8_maybe_snapshot",
]
- configs -= [ "//build/config/compiler:chromium_code" ]
- configs += [ "//build/config/compiler:no_chromium_code" ]
- configs += [
- ":internal_config",
- ":features",
- ":toolchain",
- ]
+ configs = [ ":internal_config" ]
public_configs = [ ":external_config" ]
-
- libs = []
- if (is_android && current_toolchain != host_toolchain) {
- libs += [ "log" ]
- }
}
} else {
group("v8") {
+ deps = [
+ ":v8_dump_build_config",
+ ]
+
public_deps = [
":v8_base",
- snapshot_target,
+ ":v8_maybe_snapshot",
]
public_configs = [ ":external_config" ]
}
}
-if ((current_toolchain == host_toolchain && v8_toolset_for_d8 == "host") ||
- (current_toolchain == snapshot_toolchain && v8_toolset_for_d8 == "host") ||
- (current_toolchain != host_toolchain && v8_toolset_for_d8 == "target")) {
- executable("d8") {
+v8_executable("d8") {
+ sources = [
+ "src/d8.cc",
+ "src/d8.h",
+ ]
+
+ configs = [
+ # Note: don't use :internal_config here because this target will get
+ # the :external_config applied to it by virtue of depending on :v8, and
+ # you can't have both applied to the same target.
+ ":internal_config_base",
+ ]
+
+ deps = [
+ ":d8_js2c",
+ ":v8",
+ ":v8_libplatform",
+ "//build/config/sanitizers:deps",
+ "//build/win:default_exe_manifest",
+ ]
+
+ # TODO(jochen): Add support for vtunejit.
+
+ if (is_posix) {
+ sources += [ "src/d8-posix.cc" ]
+ } else if (is_win) {
+ sources += [ "src/d8-windows.cc" ]
+ }
+
+ if (!is_component_build) {
+ sources += [ "$target_gen_dir/d8-js.cc" ]
+ }
+ if (v8_enable_i18n_support) {
+ deps += [ "//third_party/icu" ]
+ }
+}
+
+v8_isolate_run("d8") {
+ deps = [
+ ":d8",
+ ]
+
+ isolate = "//src/d8.isolate"
+}
+
+v8_executable("v8_hello_world") {
+ sources = [
+ "samples/hello-world.cc",
+ ]
+
+ configs = [
+ # Note: don't use :internal_config here because this target will get
+ # the :external_config applied to it by virtue of depending on :v8, and
+ # you can't have both applied to the same target.
+ ":internal_config_base",
+ ]
+
+ deps = [
+ ":v8",
+ ":v8_libplatform",
+ "//build/config/sanitizers:deps",
+ "//build/win:default_exe_manifest",
+ ]
+
+ if (v8_enable_i18n_support) {
+ deps += [ "//third_party/icu" ]
+ }
+}
+
+v8_executable("v8_sample_process") {
+ sources = [
+ "samples/process.cc",
+ ]
+
+ configs = [
+ # Note: don't use :internal_config here because this target will get
+ # the :external_config applied to it by virtue of depending on :v8, and
+ # you can't have both applied to the same target.
+ ":internal_config_base",
+ ]
+
+ deps = [
+ ":v8",
+ ":v8_libplatform",
+ "//build/config/sanitizers:deps",
+ "//build/win:default_exe_manifest",
+ ]
+
+ if (v8_enable_i18n_support) {
+ deps += [ "//third_party/icu" ]
+ }
+}
+
+v8_executable("v8_parser_shell") {
+ sources = [
+ "tools/parser-shell.cc",
+ "tools/shell-utils.h",
+ ]
+
+ configs = [
+ ":external_config",
+ ":internal_config_base",
+ ]
+
+ deps = [
+ ":v8_libplatform",
+ "//build/config/sanitizers:deps",
+ "//build/win:default_exe_manifest",
+ ]
+
+ if (is_component_build) {
+ # v8_parser_shell can't be built against a shared library, so we
+ # need to depend on the underlying static target in that case.
+ deps += [ ":v8_maybe_snapshot" ]
+ } else {
+ deps += [ ":v8" ]
+ }
+
+ if (v8_enable_i18n_support) {
+ deps += [ "//third_party/icu" ]
+ }
+
+ if (is_win) {
+ # Suppress warnings about importing locally defined symbols.
+ if (is_component_build) {
+ ldflags = [
+ "/ignore:4049",
+ "/ignore:4217",
+ ]
+ }
+ }
+}
+
+if (want_v8_shell) {
+ v8_executable("v8_shell") {
sources = [
- "src/d8.cc",
- "src/d8.h",
+ "samples/shell.cc",
]
- configs -= [ "//build/config/compiler:chromium_code" ]
- configs += [ "//build/config/compiler:no_chromium_code" ]
- configs += [
+ configs = [
# Note: don't use :internal_config here because this target will get
# the :external_config applied to it by virtue of depending on :v8, and
# you can't have both applied to the same target.
":internal_config_base",
- ":features",
- ":toolchain",
]
deps = [
- ":d8_js2c",
":v8",
":v8_libplatform",
"//build/config/sanitizers:deps",
+ "//build/win:default_exe_manifest",
]
- # TODO(jochen): Add support for vtunejit.
-
- if (is_posix) {
- sources += [ "src/d8-posix.cc" ]
- } else if (is_win) {
- sources += [ "src/d8-windows.cc" ]
- }
-
- if (!is_component_build) {
- sources += [ "$target_gen_dir/d8-js.cc" ]
- }
if (v8_enable_i18n_support) {
deps += [ "//third_party/icu" ]
}
}
}
-source_set("json_fuzzer") {
+template("v8_fuzzer") {
+ name = target_name
+ forward_variables_from(invoker, "*")
+ v8_executable("v8_simple_" + name) {
+ deps = [
+ ":" + name,
+ ":simple_fuzzer",
+ "//build/win:default_exe_manifest",
+ ]
+
+ configs = [ ":external_config" ]
+ }
+}
+
+v8_source_set("json_fuzzer") {
sources = [
"test/fuzzer/json.cc",
]
@@ -2085,17 +2516,13 @@ source_set("json_fuzzer") {
":fuzzer_support",
]
- configs -= [ "//build/config/compiler:chromium_code" ]
- configs += [ "//build/config/compiler:no_chromium_code" ]
- configs += [
- ":internal_config",
- ":libplatform_config",
- ":features",
- ":toolchain",
- ]
+ configs = [ ":internal_config" ]
+}
+
+v8_fuzzer("json_fuzzer") {
}
-source_set("parser_fuzzer") {
+v8_source_set("parser_fuzzer") {
sources = [
"test/fuzzer/parser.cc",
]
@@ -2104,17 +2531,13 @@ source_set("parser_fuzzer") {
":fuzzer_support",
]
- configs -= [ "//build/config/compiler:chromium_code" ]
- configs += [ "//build/config/compiler:no_chromium_code" ]
- configs += [
- ":internal_config",
- ":libplatform_config",
- ":features",
- ":toolchain",
- ]
+ configs = [ ":internal_config" ]
+}
+
+v8_fuzzer("parser_fuzzer") {
}
-source_set("regexp_fuzzer") {
+v8_source_set("regexp_fuzzer") {
sources = [
"test/fuzzer/regexp.cc",
]
@@ -2123,17 +2546,13 @@ source_set("regexp_fuzzer") {
":fuzzer_support",
]
- configs -= [ "//build/config/compiler:chromium_code" ]
- configs += [ "//build/config/compiler:no_chromium_code" ]
- configs += [
- ":internal_config",
- ":libplatform_config",
- ":features",
- ":toolchain",
- ]
+ configs = [ ":internal_config" ]
+}
+
+v8_fuzzer("regexp_fuzzer") {
}
-source_set("wasm_fuzzer") {
+v8_source_set("wasm_fuzzer") {
sources = [
"test/fuzzer/wasm.cc",
]
@@ -2142,17 +2561,13 @@ source_set("wasm_fuzzer") {
":fuzzer_support",
]
- configs -= [ "//build/config/compiler:chromium_code" ]
- configs += [ "//build/config/compiler:no_chromium_code" ]
- configs += [
- ":internal_config",
- ":libplatform_config",
- ":features",
- ":toolchain",
- ]
+ configs = [ ":internal_config" ]
+}
+
+v8_fuzzer("wasm_fuzzer") {
}
-source_set("wasm_asmjs_fuzzer") {
+v8_source_set("wasm_asmjs_fuzzer") {
sources = [
"test/fuzzer/wasm-asmjs.cc",
]
@@ -2161,12 +2576,8 @@ source_set("wasm_asmjs_fuzzer") {
":fuzzer_support",
]
- configs -= [ "//build/config/compiler:chromium_code" ]
- configs += [ "//build/config/compiler:no_chromium_code" ]
- configs += [
- ":internal_config",
- ":libplatform_config",
- ":features",
- ":toolchain",
- ]
+ configs = [ ":internal_config" ]
+}
+
+v8_fuzzer("wasm_asmjs_fuzzer") {
}
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index dc97b80422..b2a43a1121 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,2145 +1,6074 @@
-2016-04-06: Version 5.1.281
+2016-08-23: Version 5.4.500
Performance and stability improvements on all platforms.
-2016-04-05: Version 5.1.280
+2016-08-23: Version 5.4.499
Performance and stability improvements on all platforms.
-2016-04-05: Version 5.1.279
+2016-08-23: Version 5.4.498
+
+ Performance and stability improvements on all platforms.
- Ship --harmony-regexp-exec (issue 4602).
+
+2016-08-23: Version 5.4.497
Performance and stability improvements on all platforms.
-2016-04-05: Version 5.1.278
+2016-08-23: Version 5.4.496
- [V8] Removed debugger V8::PromiseEvent (Chromium issue 526811).
+ Performance and stability improvements on all platforms.
- [asm.js] Fix typing bug for non-literals in heap access (Chromium issue
- 599825).
- Ensure CreateDataProperty works correctly on TypedArrays (Chromium issue
- 596394).
+2016-08-23: Version 5.4.495
Performance and stability improvements on all platforms.
-2016-04-05: Version 5.1.277
+2016-08-23: Version 5.4.494
Performance and stability improvements on all platforms.
-2016-04-05: Version 5.1.276
+2016-08-23: Version 5.4.493
Performance and stability improvements on all platforms.
-2016-04-05: Version 5.1.275
+2016-08-23: Version 5.4.492
Performance and stability improvements on all platforms.
-2016-04-05: Version 5.1.274
+2016-08-22: Version 5.4.491
Performance and stability improvements on all platforms.
-2016-04-05: Version 5.1.273
+2016-08-22: Version 5.4.490
Performance and stability improvements on all platforms.
-2016-04-05: Version 5.1.272
+2016-08-22: Version 5.4.489
Performance and stability improvements on all platforms.
-2016-04-05: Version 5.1.271
+2016-08-22: Version 5.4.488
Performance and stability improvements on all platforms.
-2016-04-04: Version 5.1.270
+2016-08-22: Version 5.4.487
Performance and stability improvements on all platforms.
-2016-04-04: Version 5.1.269
+2016-08-22: Version 5.4.486
Performance and stability improvements on all platforms.
-2016-04-04: Version 5.1.268
+2016-08-22: Version 5.4.485
Performance and stability improvements on all platforms.
-2016-04-04: Version 5.1.267
+2016-08-22: Version 5.4.484
+
+ Performance and stability improvements on all platforms.
+
- [api] Restrict Template::Set to take templates or primitive values.
+2016-08-22: Version 5.4.483
Performance and stability improvements on all platforms.
-2016-04-04: Version 5.1.266
+2016-08-22: Version 5.4.482
Performance and stability improvements on all platforms.
-2016-04-04: Version 5.1.265
+2016-08-22: Version 5.4.481
Performance and stability improvements on all platforms.
-2016-04-04: Version 5.1.264
+2016-08-22: Version 5.4.480
Performance and stability improvements on all platforms.
-2016-04-04: Version 5.1.263
+2016-08-22: Version 5.4.479
Performance and stability improvements on all platforms.
-2016-04-04: Version 5.1.262
+2016-08-20: Version 5.4.478
Performance and stability improvements on all platforms.
-2016-04-04: Version 5.1.261
+2016-08-20: Version 5.4.477
Performance and stability improvements on all platforms.
-2016-04-04: Version 5.1.260
+2016-08-20: Version 5.4.476
Performance and stability improvements on all platforms.
-2016-04-04: Version 5.1.259
+2016-08-19: Version 5.4.475
- Further ES2015 RegExp spec compliance fixes (issue 4602).
+ Performance and stability improvements on all platforms.
+
+
+2016-08-19: Version 5.4.474
Performance and stability improvements on all platforms.
-2016-04-03: Version 5.1.258
+2016-08-19: Version 5.4.473
Performance and stability improvements on all platforms.
-2016-04-02: Version 5.1.257
+2016-08-19: Version 5.4.472
Performance and stability improvements on all platforms.
-2016-04-02: Version 5.1.256
+2016-08-19: Version 5.4.471
Performance and stability improvements on all platforms.
-2016-04-02: Version 5.1.255
+2016-08-19: Version 5.4.470
Performance and stability improvements on all platforms.
-2016-04-01: Version 5.1.254
+2016-08-19: Version 5.4.469
Performance and stability improvements on all platforms.
-2016-04-01: Version 5.1.253
+2016-08-19: Version 5.4.468
Performance and stability improvements on all platforms.
-2016-04-01: Version 5.1.252
+2016-08-19: Version 5.4.467
Performance and stability improvements on all platforms.
-2016-04-01: Version 5.1.251
+2016-08-19: Version 5.4.466
Performance and stability improvements on all platforms.
-2016-04-01: Version 5.1.250
+2016-08-19: Version 5.4.465
Performance and stability improvements on all platforms.
-2016-04-01: Version 5.1.249
+2016-08-19: Version 5.4.464
Performance and stability improvements on all platforms.
-2016-04-01: Version 5.1.248
+2016-08-19: Version 5.4.463
Performance and stability improvements on all platforms.
-2016-04-01: Version 5.1.247
+2016-08-19: Version 5.4.462
Performance and stability improvements on all platforms.
-2016-04-01: Version 5.1.246
+2016-08-19: Version 5.4.461
Performance and stability improvements on all platforms.
-2016-04-01: Version 5.1.245
+2016-08-19: Version 5.4.460
Performance and stability improvements on all platforms.
-2016-04-01: Version 5.1.244
+2016-08-18: Version 5.4.459
Performance and stability improvements on all platforms.
-2016-04-01: Version 5.1.243
+2016-08-18: Version 5.4.458
Performance and stability improvements on all platforms.
-2016-04-01: Version 5.1.242
+2016-08-18: Version 5.4.457
Performance and stability improvements on all platforms.
-2016-04-01: Version 5.1.241
+2016-08-18: Version 5.4.456
+
+ Performance and stability improvements on all platforms.
- [GN] Define USE_EABI_HARDFLOAT=1 when arm_float_abi=="hard" (Chromium
- issue 592660).
- Ship --harmony-regexp-exec (issue 4602).
+2016-08-18: Version 5.4.455
Performance and stability improvements on all platforms.
-2016-03-31: Version 5.1.240
+2016-08-18: Version 5.4.454
Performance and stability improvements on all platforms.
-2016-03-31: Version 5.1.239
+2016-08-18: Version 5.4.453
Performance and stability improvements on all platforms.
-2016-03-31: Version 5.1.238
+2016-08-18: Version 5.4.452
Performance and stability improvements on all platforms.
-2016-03-31: Version 5.1.237
+2016-08-18: Version 5.4.451
Performance and stability improvements on all platforms.
-2016-03-31: Version 5.1.236
+2016-08-18: Version 5.4.450
Performance and stability improvements on all platforms.
-2016-03-31: Version 5.1.235
+2016-08-17: Version 5.4.449
Performance and stability improvements on all platforms.
-2016-03-31: Version 5.1.234
+2016-08-17: Version 5.4.448
- [arm/Linux] Don't rely on KUSER_HELPERS feature (Chromium issue 599051).
+ Performance and stability improvements on all platforms.
+
+
+2016-08-17: Version 5.4.447
Performance and stability improvements on all platforms.
-2016-03-31: Version 5.1.233
+2016-08-17: Version 5.4.446
Performance and stability improvements on all platforms.
-2016-03-31: Version 5.1.232
+2016-08-17: Version 5.4.445
Performance and stability improvements on all platforms.
-2016-03-31: Version 5.1.231
+2016-08-17: Version 5.4.444
+
+ Performance and stability improvements on all platforms.
+
- Turn scavenge_reclaim_unmodified_objects on by default (Chromium issue
- 4880).
+2016-08-17: Version 5.4.443
Performance and stability improvements on all platforms.
-2016-03-31: Version 5.1.230
+2016-08-17: Version 5.4.442
Performance and stability improvements on all platforms.
-2016-03-31: Version 5.1.229
+2016-08-17: Version 5.4.441
Performance and stability improvements on all platforms.
-2016-03-31: Version 5.1.228
+2016-08-17: Version 5.4.440
Performance and stability improvements on all platforms.
-2016-03-31: Version 5.1.227
+2016-08-16: Version 5.4.439
Performance and stability improvements on all platforms.
-2016-03-31: Version 5.1.226
+2016-08-16: Version 5.4.438
Performance and stability improvements on all platforms.
-2016-03-31: Version 5.1.225
+2016-08-16: Version 5.4.437
Performance and stability improvements on all platforms.
-2016-03-31: Version 5.1.224
+2016-08-16: Version 5.4.436
+
+ Performance and stability improvements on all platforms.
+
- Raise minimum Mac OS version to 10.7 (issue 4847).
+2016-08-16: Version 5.4.435
Performance and stability improvements on all platforms.
-2016-03-31: Version 5.1.223
+2016-08-16: Version 5.4.434
Performance and stability improvements on all platforms.
-2016-03-30: Version 5.1.222
+2016-08-16: Version 5.4.433
Performance and stability improvements on all platforms.
-2016-03-30: Version 5.1.221
+2016-08-16: Version 5.4.432
Performance and stability improvements on all platforms.
-2016-03-30: Version 5.1.220
+2016-08-16: Version 5.4.431
- Stage --harmony-regexp-exec (issue 4602).
+ Performance and stability improvements on all platforms.
- Add fast paths for native RegExps in ES2015 subclass-aware code (issue
- 4602).
- [V8] Add FunctionMirror.prototype.contextDebugId method (Chromium issue
- 595206).
+2016-08-16: Version 5.4.430
Performance and stability improvements on all platforms.
-2016-03-30: Version 5.1.219
+2016-08-16: Version 5.4.429
- Remove RegExp.prototype.source getter compat workaround (issue 4827,
- Chromium issue 581577).
+ Performance and stability improvements on all platforms.
- Check for proper types from error handling code (Chromium issue 596718).
- Add ES2015 RegExp full subclassing semantics behind a flag (issue 4602).
+2016-08-16: Version 5.4.428
Performance and stability improvements on all platforms.
-2016-03-24: Version 5.1.218
+2016-08-15: Version 5.4.427
Performance and stability improvements on all platforms.
-2016-03-24: Version 5.1.217
+2016-08-15: Version 5.4.426
+
+ Performance and stability improvements on all platforms.
+
- [esnext] implement String padding proposal.
+2016-08-15: Version 5.4.425
Performance and stability improvements on all platforms.
-2016-03-24: Version 5.1.216
+2016-08-15: Version 5.4.424
Performance and stability improvements on all platforms.
-2016-03-24: Version 5.1.215
+2016-08-15: Version 5.4.423
Performance and stability improvements on all platforms.
-2016-03-24: Version 5.1.214
+2016-08-14: Version 5.4.422
Performance and stability improvements on all platforms.
-2016-03-23: Version 5.1.213
+2016-08-14: Version 5.4.421
+
+ Performance and stability improvements on all platforms.
+
- Implement ES2015 labelled function declaration restrictions (Chromium
- issue 595309).
+2016-08-13: Version 5.4.420
Performance and stability improvements on all platforms.
-2016-03-23: Version 5.1.212
+2016-08-12: Version 5.4.419
Performance and stability improvements on all platforms.
-2016-03-23: Version 5.1.211
+2016-08-12: Version 5.4.418
Performance and stability improvements on all platforms.
-2016-03-23: Version 5.1.210
+2016-08-12: Version 5.4.417
Performance and stability improvements on all platforms.
-2016-03-23: Version 5.1.209
+2016-08-12: Version 5.4.416
Performance and stability improvements on all platforms.
-2016-03-23: Version 5.1.208
+2016-08-12: Version 5.4.415
Performance and stability improvements on all platforms.
-2016-03-23: Version 5.1.207
+2016-08-12: Version 5.4.414
Performance and stability improvements on all platforms.
-2016-03-23: Version 5.1.206
+2016-08-12: Version 5.4.413
Performance and stability improvements on all platforms.
-2016-03-23: Version 5.1.205
+2016-08-12: Version 5.4.412
Performance and stability improvements on all platforms.
-2016-03-23: Version 5.1.204
+2016-08-12: Version 5.4.411
Performance and stability improvements on all platforms.
-2016-03-23: Version 5.1.203
+2016-08-12: Version 5.4.410
Performance and stability improvements on all platforms.
-2016-03-23: Version 5.1.202
+2016-08-12: Version 5.4.409
Performance and stability improvements on all platforms.
-2016-03-23: Version 5.1.201
+2016-08-12: Version 5.4.408
Performance and stability improvements on all platforms.
-2016-03-23: Version 5.1.200
+2016-08-12: Version 5.4.407
Performance and stability improvements on all platforms.
-2016-03-22: Version 5.1.199
+2016-08-11: Version 5.4.406
Performance and stability improvements on all platforms.
-2016-03-22: Version 5.1.198
+2016-08-11: Version 5.4.405
Performance and stability improvements on all platforms.
-2016-03-22: Version 5.1.197
+2016-08-11: Version 5.4.404
Performance and stability improvements on all platforms.
-2016-03-22: Version 5.1.196
+2016-08-11: Version 5.4.403
Performance and stability improvements on all platforms.
-2016-03-22: Version 5.1.195
+2016-08-11: Version 5.4.402
Performance and stability improvements on all platforms.
-2016-03-22: Version 5.1.194
+2016-08-11: Version 5.4.401
+
+ Performance and stability improvements on all platforms.
+
- Fix match default behavior on strings for ES2015 semantics (issue 4602).
+2016-08-11: Version 5.4.400
Performance and stability improvements on all platforms.
-2016-03-22: Version 5.1.193
+2016-08-11: Version 5.4.399
Performance and stability improvements on all platforms.
-2016-03-22: Version 5.1.192
+2016-08-10: Version 5.4.398
Performance and stability improvements on all platforms.
-2016-03-22: Version 5.1.191
+2016-08-10: Version 5.4.397
- [Interpreter] TurboFan implementation of intrinsics (issue 4822).
+ Performance and stability improvements on all platforms.
+
+
+2016-08-10: Version 5.4.396
Performance and stability improvements on all platforms.
-2016-03-22: Version 5.1.190
+2016-08-10: Version 5.4.395
Performance and stability improvements on all platforms.
-2016-03-21: Version 5.1.189
+2016-08-10: Version 5.4.394
Performance and stability improvements on all platforms.
-2016-03-21: Version 5.1.188
+2016-08-10: Version 5.4.393
Performance and stability improvements on all platforms.
-2016-03-21: Version 5.1.187
+2016-08-10: Version 5.4.392
Performance and stability improvements on all platforms.
-2016-03-21: Version 5.1.186
+2016-08-09: Version 5.4.391
Performance and stability improvements on all platforms.
-2016-03-21: Version 5.1.185
+2016-08-09: Version 5.4.390
Performance and stability improvements on all platforms.
-2016-03-21: Version 5.1.184
+2016-08-09: Version 5.4.389
Performance and stability improvements on all platforms.
-2016-03-21: Version 5.1.183
+2016-08-09: Version 5.4.388
Performance and stability improvements on all platforms.
-2016-03-21: Version 5.1.182
+2016-08-09: Version 5.4.387
Performance and stability improvements on all platforms.
-2016-03-21: Version 5.1.181
+2016-08-09: Version 5.4.386
+
+ Performance and stability improvements on all platforms.
+
- Temporarily undeprecate ForceSet (Chromium issue 595601).
+2016-08-09: Version 5.4.385
Performance and stability improvements on all platforms.
-2016-03-21: Version 5.1.180
+2016-08-09: Version 5.4.384
Performance and stability improvements on all platforms.
-2016-03-20: Version 5.1.179
+2016-08-09: Version 5.4.383
Performance and stability improvements on all platforms.
-2016-03-20: Version 5.1.178
+2016-08-09: Version 5.4.382
Performance and stability improvements on all platforms.
-2016-03-20: Version 5.1.177
+2016-08-09: Version 5.4.381
Performance and stability improvements on all platforms.
-2016-03-19: Version 5.1.176
+2016-08-09: Version 5.4.380
Performance and stability improvements on all platforms.
-2016-03-18: Version 5.1.175
+2016-08-09: Version 5.4.379
Performance and stability improvements on all platforms.
-2016-03-18: Version 5.1.174
+2016-08-09: Version 5.4.378
Performance and stability improvements on all platforms.
-2016-03-18: Version 5.1.173
+2016-08-08: Version 5.4.377
+
+ Performance and stability improvements on all platforms.
- Parser: Make skipping HTML comments optional (Chromium issue 573887).
- [es7] implement exponentiation operator proposal (issue 3915).
+2016-08-08: Version 5.4.376
Performance and stability improvements on all platforms.
-2016-03-18: Version 5.1.172
+2016-08-08: Version 5.4.375
Performance and stability improvements on all platforms.
-2016-03-18: Version 5.1.171
+2016-08-08: Version 5.4.374
Performance and stability improvements on all platforms.
-2016-03-18: Version 5.1.170
+2016-08-08: Version 5.4.373
Performance and stability improvements on all platforms.
-2016-03-18: Version 5.1.169
+2016-08-08: Version 5.4.372
Performance and stability improvements on all platforms.
-2016-03-17: Version 5.1.168
+2016-08-08: Version 5.4.371
Performance and stability improvements on all platforms.
-2016-03-17: Version 5.1.167
+2016-08-08: Version 5.4.370
+
+ Performance and stability improvements on all platforms.
+
- Throw the right exceptions from setting elements in
- Array.prototype.concat (Chromium issue 595319).
+2016-08-08: Version 5.4.369
Performance and stability improvements on all platforms.
-2016-03-17: Version 5.1.166
+2016-08-08: Version 5.4.368
+
+ Performance and stability improvements on all platforms.
+
- Throw exceptions from CreateDataProperty when should_throw (Chromium
- issue 595319).
+2016-08-08: Version 5.4.367
Performance and stability improvements on all platforms.
-2016-03-17: Version 5.1.165
+2016-08-05: Version 5.4.366
Performance and stability improvements on all platforms.
-2016-03-17: Version 5.1.164
+2016-08-05: Version 5.4.365
Performance and stability improvements on all platforms.
-2016-03-17: Version 5.1.163
+2016-08-05: Version 5.4.364
Performance and stability improvements on all platforms.
-2016-03-17: Version 5.1.162
+2016-08-05: Version 5.4.363
Performance and stability improvements on all platforms.
-2016-03-17: Version 5.1.161
+2016-08-05: Version 5.4.362
Performance and stability improvements on all platforms.
-2016-03-17: Version 5.1.160
+2016-08-05: Version 5.4.361
Performance and stability improvements on all platforms.
-2016-03-17: Version 5.1.159
+2016-08-05: Version 5.4.360
Performance and stability improvements on all platforms.
-2016-03-17: Version 5.1.158
+2016-08-05: Version 5.4.359
Performance and stability improvements on all platforms.
-2016-03-17: Version 5.1.157
+2016-08-05: Version 5.4.358
Performance and stability improvements on all platforms.
-2016-03-17: Version 5.1.156
+2016-08-05: Version 5.4.357
Performance and stability improvements on all platforms.
-2016-03-17: Version 5.1.155
+2016-08-05: Version 5.4.356
Performance and stability improvements on all platforms.
-2016-03-17: Version 5.1.154
+2016-08-04: Version 5.4.355
Performance and stability improvements on all platforms.
-2016-03-17: Version 5.1.153
+2016-08-04: Version 5.4.354
Performance and stability improvements on all platforms.
-2016-03-17: Version 5.1.152
+2016-08-04: Version 5.4.353
Performance and stability improvements on all platforms.
-2016-03-17: Version 5.1.151
+2016-08-04: Version 5.4.352
- Move FastAccessorAssembler from RawMachineAssembler to CodeStubAssembler
- (Chromium issue 508898).
+ Performance and stability improvements on all platforms.
+
+
+2016-08-04: Version 5.4.351
Performance and stability improvements on all platforms.
-2016-03-17: Version 5.1.150
+2016-08-04: Version 5.4.350
+
+ Performance and stability improvements on all platforms.
- [serializer] Add API to warm up startup snapshot with an additional
- script (issue 4836).
+
+2016-08-04: Version 5.4.349
Performance and stability improvements on all platforms.
-2016-03-17: Version 5.1.149
+2016-08-03: Version 5.4.348
Performance and stability improvements on all platforms.
-2016-03-17: Version 5.1.148
+2016-08-03: Version 5.4.347
Performance and stability improvements on all platforms.
-2016-03-17: Version 5.1.147
+2016-08-03: Version 5.4.346
Performance and stability improvements on all platforms.
-2016-03-16: Version 5.1.146
+2016-08-03: Version 5.4.345
- Ship ES2015 restrictions on function declaration locations (issue 4824).
+ Performance and stability improvements on all platforms.
+
+
+2016-08-03: Version 5.4.344
Performance and stability improvements on all platforms.
-2016-03-16: Version 5.1.145
+2016-08-03: Version 5.4.343
Performance and stability improvements on all platforms.
-2016-03-16: Version 5.1.144
+2016-08-03: Version 5.4.342
Performance and stability improvements on all platforms.
-2016-03-16: Version 5.1.143
+2016-08-03: Version 5.4.341
Performance and stability improvements on all platforms.
-2016-03-16: Version 5.1.142
+2016-08-03: Version 5.4.340
Performance and stability improvements on all platforms.
-2016-03-16: Version 5.1.141
+2016-08-03: Version 5.4.339
Performance and stability improvements on all platforms.
-2016-03-16: Version 5.1.140
+2016-08-03: Version 5.4.338
- Put RegExp js code in strict mode (issue 4504).
+ Performance and stability improvements on all platforms.
+
+
+2016-08-03: Version 5.4.337
Performance and stability improvements on all platforms.
-2016-03-15: Version 5.1.139
+2016-08-02: Version 5.4.336
Performance and stability improvements on all platforms.
-2016-03-15: Version 5.1.138
+2016-08-02: Version 5.4.335
+
+ Performance and stability improvements on all platforms.
+
- [builtins] Fix Array.prototype.concat bug (Chromium issue 594574).
+2016-08-02: Version 5.4.334
Performance and stability improvements on all platforms.
-2016-03-15: Version 5.1.137
+2016-08-02: Version 5.4.333
Performance and stability improvements on all platforms.
-2016-03-15: Version 5.1.136
+2016-08-02: Version 5.4.332
Performance and stability improvements on all platforms.
-2016-03-15: Version 5.1.135
+2016-08-02: Version 5.4.331
- Ship Array.prototype.values (issue 4247).
+ Performance and stability improvements on all platforms.
+
+
+2016-08-02: Version 5.4.330
Performance and stability improvements on all platforms.
-2016-03-15: Version 5.1.134
+2016-08-02: Version 5.4.329
Performance and stability improvements on all platforms.
-2016-03-15: Version 5.1.133
+2016-08-02: Version 5.4.328
Performance and stability improvements on all platforms.
-2016-03-15: Version 5.1.132
+2016-08-02: Version 5.4.327
Performance and stability improvements on all platforms.
-2016-03-15: Version 5.1.131
+2016-08-02: Version 5.4.326
Performance and stability improvements on all platforms.
-2016-03-15: Version 5.1.130
+2016-08-02: Version 5.4.325
Performance and stability improvements on all platforms.
-2016-03-15: Version 5.1.129
+2016-08-02: Version 5.4.324
Performance and stability improvements on all platforms.
-2016-03-15: Version 5.1.128
+2016-08-02: Version 5.4.323
Performance and stability improvements on all platforms.
-2016-03-14: Version 5.1.127
+2016-08-02: Version 5.4.322
Performance and stability improvements on all platforms.
-2016-03-14: Version 5.1.126
+2016-08-02: Version 5.4.321
+
+ Performance and stability improvements on all platforms.
+
- Remove --harmony-modules flag and let embedder decide when modules are
- used (issue 1569, Chromium issue 594639).
+2016-08-01: Version 5.4.320
Performance and stability improvements on all platforms.
-2016-03-14: Version 5.1.125
+2016-08-01: Version 5.4.319
+
+ Performance and stability improvements on all platforms.
+
- Make test262 test runner check for which exception is thrown (issue
- 4803).
+2016-08-01: Version 5.4.318
Performance and stability improvements on all platforms.
-2016-03-14: Version 5.1.124
+2016-08-01: Version 5.4.317
Performance and stability improvements on all platforms.
-2016-03-14: Version 5.1.123
+2016-08-01: Version 5.4.316
Performance and stability improvements on all platforms.
-2016-03-14: Version 5.1.122
+2016-08-01: Version 5.4.315
Performance and stability improvements on all platforms.
-2016-03-14: Version 5.1.121
+2016-08-01: Version 5.4.314
Performance and stability improvements on all platforms.
-2016-03-14: Version 5.1.120
+2016-08-01: Version 5.4.313
Performance and stability improvements on all platforms.
-2016-03-14: Version 5.1.119
+2016-08-01: Version 5.4.312
Performance and stability improvements on all platforms.
-2016-03-13: Version 5.1.118
+2016-08-01: Version 5.4.311
Performance and stability improvements on all platforms.
-2016-03-11: Version 5.1.117
+2016-08-01: Version 5.4.310
Performance and stability improvements on all platforms.
-2016-03-11: Version 5.1.116
+2016-08-01: Version 5.4.309
Performance and stability improvements on all platforms.
-2016-03-11: Version 5.1.115
+2016-08-01: Version 5.4.308
Performance and stability improvements on all platforms.
-2016-03-11: Version 5.1.114
+2016-08-01: Version 5.4.307
+
+ Performance and stability improvements on all platforms.
+
- [arm64] Fix i/d cache line size confusion typo (Chromium issue 593867).
+2016-08-01: Version 5.4.306
Performance and stability improvements on all platforms.
-2016-03-11: Version 5.1.113
+2016-07-31: Version 5.4.305
- Fix expression positions for for-loops (issue 4690).
+ Performance and stability improvements on all platforms.
+
+
+2016-07-31: Version 5.4.304
Performance and stability improvements on all platforms.
-2016-03-11: Version 5.1.112
+2016-07-30: Version 5.4.303
Performance and stability improvements on all platforms.
-2016-03-11: Version 5.1.111
+2016-07-30: Version 5.4.302
Performance and stability improvements on all platforms.
-2016-03-10: Version 5.1.110
+2016-07-29: Version 5.4.301
+
+ Performance and stability improvements on all platforms.
- Minor library function fixes for TypedArray spec compliance (issue
- 4785).
- Check that Promise subclasses have callable resolve/reject (issue 4633).
+2016-07-29: Version 5.4.300
Performance and stability improvements on all platforms.
-2016-03-10: Version 5.1.109
+2016-07-29: Version 5.4.299
Performance and stability improvements on all platforms.
-2016-03-10: Version 5.1.108
+2016-07-29: Version 5.4.298
Performance and stability improvements on all platforms.
-2016-03-10: Version 5.1.107
+2016-07-29: Version 5.4.297
Performance and stability improvements on all platforms.
-2016-03-10: Version 5.1.106
+2016-07-29: Version 5.4.296
Performance and stability improvements on all platforms.
-2016-03-10: Version 5.1.105
+2016-07-29: Version 5.4.295
Performance and stability improvements on all platforms.
-2016-03-10: Version 5.1.104
+2016-07-29: Version 5.4.294
Performance and stability improvements on all platforms.
-2016-03-10: Version 5.1.103
+2016-07-29: Version 5.4.293
Performance and stability improvements on all platforms.
-2016-03-10: Version 5.1.102
+2016-07-29: Version 5.4.292
Performance and stability improvements on all platforms.
-2016-03-10: Version 5.1.101
+2016-07-29: Version 5.4.291
Performance and stability improvements on all platforms.
-2016-03-10: Version 5.1.100
+2016-07-29: Version 5.4.290
+
+ Performance and stability improvements on all platforms.
+
- [strong] Remove all remainders of strong mode (issue 3956).
+2016-07-29: Version 5.4.289
Performance and stability improvements on all platforms.
-2016-03-10: Version 5.1.99
+2016-07-29: Version 5.4.288
+
+ Performance and stability improvements on all platforms.
+
- Marks the label associated with the runtime call in
- CodeStubAssembler::Allocate as deferred (Chromium issue 593359).
+2016-07-28: Version 5.4.287
Performance and stability improvements on all platforms.
-2016-03-10: Version 5.1.98
+2016-07-28: Version 5.4.286
+
+ Performance and stability improvements on all platforms.
+
- Implement iterator finalization in array destructuring (issue 3566).
+2016-07-28: Version 5.4.285
Performance and stability improvements on all platforms.
-2016-03-10: Version 5.1.97
+2016-07-28: Version 5.4.284
Performance and stability improvements on all platforms.
-2016-03-10: Version 5.1.96
+2016-07-28: Version 5.4.283
Performance and stability improvements on all platforms.
-2016-03-10: Version 5.1.95
+2016-07-28: Version 5.4.282
- String.prototype[Symbol.iterator] does RequireObjectCoercible(this)
- (issue 4348).
+ Performance and stability improvements on all platforms.
- Stage restrictive declarations flag (issue 4824).
- Expose Array.prototype.values behind a flag and stage it (issue 4247).
+2016-07-28: Version 5.4.281
Performance and stability improvements on all platforms.
-2016-03-09: Version 5.1.94
+2016-07-27: Version 5.4.280
Performance and stability improvements on all platforms.
-2016-03-09: Version 5.1.93
+2016-07-27: Version 5.4.279
+
+ Performance and stability improvements on all platforms.
+
- Ensure appropriate bounds checking for Array subclass concat (Chromium
- issue 592340).
+2016-07-27: Version 5.4.278
Performance and stability improvements on all platforms.
-2016-03-09: Version 5.1.92
+2016-07-27: Version 5.4.277
Performance and stability improvements on all platforms.
-2016-03-09: Version 5.1.91
+2016-07-27: Version 5.4.276
Performance and stability improvements on all platforms.
-2016-03-09: Version 5.1.90
+2016-07-27: Version 5.4.275
Performance and stability improvements on all platforms.
-2016-03-09: Version 5.1.89
+2016-07-27: Version 5.4.274
Performance and stability improvements on all platforms.
-2016-03-09: Version 5.1.88
+2016-07-27: Version 5.4.273
Performance and stability improvements on all platforms.
-2016-03-09: Version 5.1.87
+2016-07-27: Version 5.4.272
Performance and stability improvements on all platforms.
-2016-03-09: Version 5.1.86
+2016-07-27: Version 5.4.271
Performance and stability improvements on all platforms.
-2016-03-09: Version 5.1.85
+2016-07-27: Version 5.4.270
Performance and stability improvements on all platforms.
-2016-03-09: Version 5.1.84
+2016-07-27: Version 5.4.269
Performance and stability improvements on all platforms.
-2016-03-08: Version 5.1.83
+2016-07-27: Version 5.4.268
Performance and stability improvements on all platforms.
-2016-03-08: Version 5.1.82
+2016-07-27: Version 5.4.267
Performance and stability improvements on all platforms.
-2016-03-08: Version 5.1.81
+2016-07-26: Version 5.4.266
- Optimize new TypedArray(typedArray) constructor (Chromium issue 592007).
+ Performance and stability improvements on all platforms.
- Ensure the @@species protector is updated for accessors (issue 4093).
- Add UseCounters for various RegExp compatibility issues (Chromium issue
- 581577).
+2016-07-26: Version 5.4.265
Performance and stability improvements on all platforms.
-2016-03-08: Version 5.1.80
+2016-07-26: Version 5.4.264
Performance and stability improvements on all platforms.
-2016-03-08: Version 5.1.79
+2016-07-26: Version 5.4.263
Performance and stability improvements on all platforms.
-2016-03-08: Version 5.1.78
+2016-07-26: Version 5.4.262
Performance and stability improvements on all platforms.
-2016-03-08: Version 5.1.77
+2016-07-26: Version 5.4.261
Performance and stability improvements on all platforms.
-2016-03-08: Version 5.1.76
+2016-07-26: Version 5.4.260
Performance and stability improvements on all platforms.
-2016-03-08: Version 5.1.75
+2016-07-26: Version 5.4.259
Performance and stability improvements on all platforms.
-2016-03-08: Version 5.1.74
+2016-07-26: Version 5.4.258
Performance and stability improvements on all platforms.
-2016-03-08: Version 5.1.73
+2016-07-26: Version 5.4.257
Performance and stability improvements on all platforms.
-2016-03-08: Version 5.1.72
+2016-07-26: Version 5.4.256
Performance and stability improvements on all platforms.
-2016-03-08: Version 5.1.71
+2016-07-26: Version 5.4.255
Performance and stability improvements on all platforms.
-2016-03-07: Version 5.1.70
+2016-07-26: Version 5.4.254
Performance and stability improvements on all platforms.
-2016-03-07: Version 5.1.69
+2016-07-26: Version 5.4.253
Performance and stability improvements on all platforms.
-2016-03-07: Version 5.1.68
+2016-07-26: Version 5.4.252
- [key-accumulator] Starting to reimplement the key-accumulator (issue
- 4758, Chromium issue 545503).
+ Performance and stability improvements on all platforms.
+
+
+2016-07-26: Version 5.4.251
Performance and stability improvements on all platforms.
-2016-03-07: Version 5.1.67
+2016-07-26: Version 5.4.250
Performance and stability improvements on all platforms.
-2016-03-07: Version 5.1.66
+2016-07-25: Version 5.4.249
Performance and stability improvements on all platforms.
-2016-03-07: Version 5.1.65
+2016-07-25: Version 5.4.248
+
+ Performance and stability improvements on all platforms.
+
- [key-accumulator] Starting to reimplement the key-accumulator (issue
- 4758, Chromium issue 545503).
+2016-07-25: Version 5.4.247
Performance and stability improvements on all platforms.
-2016-03-07: Version 5.1.64
+2016-07-25: Version 5.4.246
Performance and stability improvements on all platforms.
-2016-03-07: Version 5.1.63
+2016-07-25: Version 5.4.245
Performance and stability improvements on all platforms.
-2016-03-07: Version 5.1.62
+2016-07-25: Version 5.4.244
Performance and stability improvements on all platforms.
-2016-03-07: Version 5.1.61
+2016-07-25: Version 5.4.243
Performance and stability improvements on all platforms.
-2016-03-07: Version 5.1.60
+2016-07-25: Version 5.4.242
Performance and stability improvements on all platforms.
-2016-03-07: Version 5.1.59
+2016-07-25: Version 5.4.241
+
+ Performance and stability improvements on all platforms.
- Use v8::kGCCallbackFlagCollectAllAvailableGarbage in
- Heap::CollectAllAvailableGarbage (Chromium issue 591463).
- [key-accumulator] Starting to reimplement the key-accumulator (issue
- 4758, Chromium issue 545503).
+2016-07-25: Version 5.4.240
Performance and stability improvements on all platforms.
-2016-03-07: Version 5.1.58
+2016-07-25: Version 5.4.239
+
+ Performance and stability improvements on all platforms.
- [regexp] Fix off-by-one in CharacterRange::Negate (Chromium issue
- 592343).
+
+2016-07-25: Version 5.4.238
Performance and stability improvements on all platforms.
-2016-03-07: Version 5.1.57
+2016-07-25: Version 5.4.237
Performance and stability improvements on all platforms.
-2016-03-07: Version 5.1.56
+2016-07-25: Version 5.4.236
+
+ Performance and stability improvements on all platforms.
- Use v8::kGCCallbackFlagCollectAllAvailableGarbage in
- Heap::CollectAllAvailableGarbage (Chromium issue 591463).
+
+2016-07-25: Version 5.4.235
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-24: Version 5.4.234
Performance and stability improvements on all platforms.
-2016-03-06: Version 5.1.55
+2016-07-24: Version 5.4.233
Performance and stability improvements on all platforms.
-2016-03-06: Version 5.1.54
+2016-07-23: Version 5.4.232
Performance and stability improvements on all platforms.
-2016-03-04: Version 5.1.53
+2016-07-22: Version 5.4.231
Performance and stability improvements on all platforms.
-2016-03-04: Version 5.1.52
+2016-07-22: Version 5.4.230
Performance and stability improvements on all platforms.
-2016-03-04: Version 5.1.51
+2016-07-22: Version 5.4.229
Performance and stability improvements on all platforms.
-2016-03-04: Version 5.1.50
+2016-07-22: Version 5.4.228
Performance and stability improvements on all platforms.
-2016-03-04: Version 5.1.49
+2016-07-22: Version 5.4.227
Performance and stability improvements on all platforms.
-2016-03-04: Version 5.1.48
+2016-07-22: Version 5.4.226
Performance and stability improvements on all platforms.
-2016-03-04: Version 5.1.47
+2016-07-22: Version 5.4.225
Performance and stability improvements on all platforms.
-2016-03-04: Version 5.1.46
+2016-07-22: Version 5.4.224
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-22: Version 5.4.223
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-22: Version 5.4.222
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-22: Version 5.4.221
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-22: Version 5.4.220
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-22: Version 5.4.219
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-22: Version 5.4.218
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-21: Version 5.4.217
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-21: Version 5.4.216
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-21: Version 5.4.215
+
+ Performance and stability improvements on all platforms.
+
- Introduce v8::MicrotasksScope (Chromium issue 585949).
+2016-07-21: Version 5.4.214
Performance and stability improvements on all platforms.
-2016-03-04: Version 5.1.45
+2016-07-21: Version 5.4.213
Performance and stability improvements on all platforms.
-2016-03-04: Version 5.1.44
+2016-07-21: Version 5.4.212
+
+ Performance and stability improvements on all platforms.
- Use a different GCCallbackFlag for GCs triggered by
- CollectAllAvailableGarbage (Chromium issue 591463).
+
+2016-07-21: Version 5.4.211
Performance and stability improvements on all platforms.
-2016-03-04: Version 5.1.43
+2016-07-21: Version 5.4.210
Performance and stability improvements on all platforms.
-2016-03-04: Version 5.1.42
+2016-07-21: Version 5.4.209
Performance and stability improvements on all platforms.
-2016-03-04: Version 5.1.41
+2016-07-21: Version 5.4.208
Performance and stability improvements on all platforms.
-2016-03-04: Version 5.1.40
+2016-07-21: Version 5.4.207
Performance and stability improvements on all platforms.
-2016-03-04: Version 5.1.39
+2016-07-21: Version 5.4.206
+
+ Performance and stability improvements on all platforms.
- Ship ES2015 Function.name reform (issue 3699, Chromium issue 588803).
- Introduce v8::MicrotasksScope (Chromium issue 585949).
+2016-07-21: Version 5.4.205
Performance and stability improvements on all platforms.
-2016-03-04: Version 5.1.38
+2016-07-21: Version 5.4.204
Performance and stability improvements on all platforms.
-2016-03-03: Version 5.1.37
+2016-07-21: Version 5.4.203
- Restrict FunctionDeclarations in Statement position (issue 4647).
+ Performance and stability improvements on all platforms.
+
+
+2016-07-21: Version 5.4.202
Performance and stability improvements on all platforms.
-2016-03-03: Version 5.1.36
+2016-07-21: Version 5.4.201
Performance and stability improvements on all platforms.
-2016-03-03: Version 5.1.35
+2016-07-20: Version 5.4.200
Performance and stability improvements on all platforms.
-2016-03-03: Version 5.1.34
+2016-07-20: Version 5.4.199
Performance and stability improvements on all platforms.
-2016-03-03: Version 5.1.33
+2016-07-20: Version 5.4.198
Performance and stability improvements on all platforms.
-2016-03-03: Version 5.1.32
+2016-07-20: Version 5.4.197
Performance and stability improvements on all platforms.
-2016-03-03: Version 5.1.31
+2016-07-20: Version 5.4.196
Performance and stability improvements on all platforms.
-2016-03-03: Version 5.1.30
+2016-07-20: Version 5.4.195
+
+ Performance and stability improvements on all platforms.
+
- Implement TypedArray(typedarray) constructor (issue 4726).
+2016-07-20: Version 5.4.194
Performance and stability improvements on all platforms.
-2016-03-02: Version 5.1.29
+2016-07-20: Version 5.4.193
Performance and stability improvements on all platforms.
-2016-03-02: Version 5.1.28
+2016-07-20: Version 5.4.192
- [turbofan] Adds an Allocate macro to the CodeStubAssembler (Chromium
- issue 588692).
+ Performance and stability improvements on all platforms.
+
+
+2016-07-20: Version 5.4.191
Performance and stability improvements on all platforms.
-2016-03-02: Version 5.1.27
+2016-07-19: Version 5.4.190
Performance and stability improvements on all platforms.
-2016-03-02: Version 5.1.26
+2016-07-19: Version 5.4.189
Performance and stability improvements on all platforms.
-2016-03-02: Version 5.1.25
+2016-07-18: Version 5.4.188
Performance and stability improvements on all platforms.
-2016-03-02: Version 5.1.24
+2016-07-18: Version 5.4.187
+
+ Performance and stability improvements on all platforms.
- Devtools: expose scopes source location to debugger (Chromium issue
- 327092).
- CodeStubAssembler can generate code for builtins (issue 4614).
+2016-07-18: Version 5.4.186
Performance and stability improvements on all platforms.
-2016-03-01: Version 5.1.23
+2016-07-18: Version 5.4.185
Performance and stability improvements on all platforms.
-2016-03-01: Version 5.1.22
+2016-07-18: Version 5.4.184
Performance and stability improvements on all platforms.
-2016-03-01: Version 5.1.21
+2016-07-18: Version 5.4.183
Performance and stability improvements on all platforms.
-2016-03-01: Version 5.1.20
+2016-07-18: Version 5.4.182
Performance and stability improvements on all platforms.
-2016-03-01: Version 5.1.19
+2016-07-18: Version 5.4.181
Performance and stability improvements on all platforms.
-2016-03-01: Version 5.1.18
+2016-07-18: Version 5.4.180
Performance and stability improvements on all platforms.
-2016-03-01: Version 5.1.17
+2016-07-18: Version 5.4.179
Performance and stability improvements on all platforms.
-2016-03-01: Version 5.1.16
+2016-07-18: Version 5.4.178
Performance and stability improvements on all platforms.
-2016-03-01: Version 5.1.15
+2016-07-18: Version 5.4.177
Performance and stability improvements on all platforms.
-2016-03-01: Version 5.1.14
+2016-07-18: Version 5.4.176
Performance and stability improvements on all platforms.
-2016-03-01: Version 5.1.13
+2016-07-18: Version 5.4.175
Performance and stability improvements on all platforms.
-2016-03-01: Version 5.1.12
+2016-07-17: Version 5.4.174
Performance and stability improvements on all platforms.
-2016-03-01: Version 5.1.11
+2016-07-17: Version 5.4.173
+
+ Performance and stability improvements on all platforms.
+
- Make %TypedArray%.from spec-compliant (issue 4782).
+2016-07-17: Version 5.4.172
Performance and stability improvements on all platforms.
-2016-02-29: Version 5.1.10
+2016-07-16: Version 5.4.171
Performance and stability improvements on all platforms.
-2016-02-29: Version 5.1.9
+2016-07-16: Version 5.4.170
Performance and stability improvements on all platforms.
-2016-02-28: Version 5.1.8
+2016-07-16: Version 5.4.169
Performance and stability improvements on all platforms.
-2016-02-28: Version 5.1.7
+2016-07-15: Version 5.4.168
Performance and stability improvements on all platforms.
-2016-02-28: Version 5.1.6
+2016-07-15: Version 5.4.167
Performance and stability improvements on all platforms.
-2016-02-28: Version 5.1.5
+2016-07-15: Version 5.4.166
Performance and stability improvements on all platforms.
-2016-02-28: Version 5.1.4
+2016-07-15: Version 5.4.165
Performance and stability improvements on all platforms.
-2016-02-28: Version 5.1.3
+2016-07-15: Version 5.4.164
Performance and stability improvements on all platforms.
-2016-02-28: Version 5.1.2
+2016-07-15: Version 5.4.163
Performance and stability improvements on all platforms.
-2016-02-27: Version 5.1.1
+2016-07-15: Version 5.4.162
- Fix strict mode function error message (issue 2198).
+ Performance and stability improvements on all platforms.
- Reland of Make Intl install properties more like how other builtins do
- (patchset #1 id:1 of https://codereview.chromium.org/1733293003/ )
- (issue 4778).
- [turbofan] Bailout if LoadBuffer typing assumption doesn't hold
- (Chromium issue 589792).
+2016-07-15: Version 5.4.161
Performance and stability improvements on all platforms.
-2016-02-26: Version 5.0.104
+2016-07-15: Version 5.4.160
Performance and stability improvements on all platforms.
-2016-02-26: Version 5.0.103
+2016-07-15: Version 5.4.159
+
+ Performance and stability improvements on all platforms.
+
- Make Intl install properties more like how other builtins do (issue
- 4778).
+2016-07-15: Version 5.4.158
Performance and stability improvements on all platforms.
-2016-02-26: Version 5.0.102
+2016-07-15: Version 5.4.157
- Make TypedArray.from and TypedArray.of writable and configurable (issue
- 4315).
+ Performance and stability improvements on all platforms.
+
+
+2016-07-14: Version 5.4.156
Performance and stability improvements on all platforms.
-2016-02-25: Version 5.0.101
+2016-07-14: Version 5.4.155
Performance and stability improvements on all platforms.
-2016-02-25: Version 5.0.100
+2016-07-14: Version 5.4.154
+
+ Performance and stability improvements on all platforms.
+
- Ship ES2015 iterator finalization (issue 3566).
+2016-07-14: Version 5.4.153
Performance and stability improvements on all platforms.
-2016-02-25: Version 5.0.99
+2016-07-14: Version 5.4.152
- Introduce MicrotasksCompletedCallback (Chromium issue 585949).
+ Performance and stability improvements on all platforms.
+
+
+2016-07-14: Version 5.4.151
Performance and stability improvements on all platforms.
-2016-02-25: Version 5.0.98
+2016-07-14: Version 5.4.150
Performance and stability improvements on all platforms.
-2016-02-25: Version 5.0.97
+2016-07-14: Version 5.4.149
Performance and stability improvements on all platforms.
-2016-02-25: Version 5.0.96
+2016-07-14: Version 5.4.148
Performance and stability improvements on all platforms.
-2016-02-25: Version 5.0.95
+2016-07-14: Version 5.4.147
Performance and stability improvements on all platforms.
-2016-02-25: Version 5.0.94
+2016-07-14: Version 5.4.146
Performance and stability improvements on all platforms.
-2016-02-25: Version 5.0.93
+2016-07-14: Version 5.4.145
Performance and stability improvements on all platforms.
-2016-02-25: Version 5.0.92
+2016-07-14: Version 5.4.144
Performance and stability improvements on all platforms.
-2016-02-25: Version 5.0.91
+2016-07-14: Version 5.4.143
Performance and stability improvements on all platforms.
-2016-02-25: Version 5.0.90
+2016-07-14: Version 5.4.142
Performance and stability improvements on all platforms.
-2016-02-25: Version 5.0.89
+2016-07-14: Version 5.4.141
Performance and stability improvements on all platforms.
-2016-02-25: Version 5.0.88
+2016-07-14: Version 5.4.140
Performance and stability improvements on all platforms.
-2016-02-25: Version 5.0.87
+2016-07-14: Version 5.4.139
Performance and stability improvements on all platforms.
-2016-02-25: Version 5.0.86
+2016-07-14: Version 5.4.138
Performance and stability improvements on all platforms.
-2016-02-25: Version 5.0.85
+2016-07-14: Version 5.4.137
Performance and stability improvements on all platforms.
-2016-02-24: Version 5.0.84
+2016-07-14: Version 5.4.136
Performance and stability improvements on all platforms.
-2016-02-24: Version 5.0.83
+2016-07-14: Version 5.4.135
Performance and stability improvements on all platforms.
-2016-02-24: Version 5.0.82
+2016-07-13: Version 5.4.134
+
+ Performance and stability improvements on all platforms.
- Ensure IteratorClose is called for errors in non-declaring assignments
- (issue 4776).
- Fix priority of exceptions being thrown from for-of loops (issue 4775).
+2016-07-13: Version 5.4.133
Performance and stability improvements on all platforms.
-2016-02-24: Version 5.0.81
+2016-07-13: Version 5.4.132
Performance and stability improvements on all platforms.
-2016-02-24: Version 5.0.80
+2016-07-13: Version 5.4.131
+
+ Performance and stability improvements on all platforms.
- Encode interpreter::SourcePositionTable as variable-length ints (issue
- 4690).
- Stage ES2015 iterator finalization (issue 3566).
+2016-07-13: Version 5.4.130
Performance and stability improvements on all platforms.
-2016-02-24: Version 5.0.79
+2016-07-13: Version 5.4.129
Performance and stability improvements on all platforms.
-2016-02-24: Version 5.0.78
+2016-07-13: Version 5.4.128
Performance and stability improvements on all platforms.
-2016-02-24: Version 5.0.77
+2016-07-13: Version 5.4.127
Performance and stability improvements on all platforms.
-2016-02-24: Version 5.0.76
+2016-07-13: Version 5.4.126
Performance and stability improvements on all platforms.
-2016-02-24: Version 5.0.75
+2016-07-13: Version 5.4.125
Performance and stability improvements on all platforms.
-2016-02-24: Version 5.0.74
+2016-07-13: Version 5.4.124
Performance and stability improvements on all platforms.
-2016-02-23: Version 5.0.73
+2016-07-13: Version 5.4.123
- Intl: Use private symbols to memoize bound functions (issue 3785).
+ Performance and stability improvements on all platforms.
- Ensure Array.prototype.indexOf returns +0 rather than -0.
- Ship ES2015 Symbol.species (issue 4093).
+2016-07-13: Version 5.4.122
Performance and stability improvements on all platforms.
-2016-02-23: Version 5.0.72
+2016-07-13: Version 5.4.121
Performance and stability improvements on all platforms.
-2016-02-23: Version 5.0.71
+2016-07-13: Version 5.4.120
Performance and stability improvements on all platforms.
-2016-02-23: Version 5.0.70
+2016-07-13: Version 5.4.119
Performance and stability improvements on all platforms.
-2016-02-23: Version 5.0.69
+2016-07-13: Version 5.4.118
Performance and stability improvements on all platforms.
-2016-02-23: Version 5.0.68
+2016-07-13: Version 5.4.117
Performance and stability improvements on all platforms.
-2016-02-23: Version 5.0.67
+2016-07-13: Version 5.4.116
Performance and stability improvements on all platforms.
-2016-02-23: Version 5.0.66
+2016-07-13: Version 5.4.115
Performance and stability improvements on all platforms.
-2016-02-22: Version 5.0.65
+2016-07-13: Version 5.4.114
Performance and stability improvements on all platforms.
-2016-02-22: Version 5.0.64
+2016-07-13: Version 5.4.113
+
+ Performance and stability improvements on all platforms.
- ES2015 web compat workaround: RegExp.prototype.flags => "" (Chromium
- issue 581577).
- Remove the Proxy enumerate trap (issue 4768).
+2016-07-13: Version 5.4.112
Performance and stability improvements on all platforms.
-2016-02-22: Version 5.0.63
+2016-07-13: Version 5.4.111
Performance and stability improvements on all platforms.
-2016-02-22: Version 5.0.62
+2016-07-12: Version 5.4.110
+
+ Performance and stability improvements on all platforms.
+
- Remove Reflect.enumerate (issue 4768).
+2016-07-12: Version 5.4.109
Performance and stability improvements on all platforms.
-2016-02-22: Version 5.0.61
+2016-07-12: Version 5.4.108
Performance and stability improvements on all platforms.
-2016-02-22: Version 5.0.60
+2016-07-12: Version 5.4.107
Performance and stability improvements on all platforms.
-2016-02-22: Version 5.0.59
+2016-07-12: Version 5.4.106
Performance and stability improvements on all platforms.
-2016-02-22: Version 5.0.58
+2016-07-12: Version 5.4.105
Performance and stability improvements on all platforms.
-2016-02-22: Version 5.0.57
+2016-07-12: Version 5.4.104
Performance and stability improvements on all platforms.
-2016-02-22: Version 5.0.56
+2016-07-12: Version 5.4.103
Performance and stability improvements on all platforms.
-2016-02-22: Version 5.0.55
+2016-07-12: Version 5.4.102
Performance and stability improvements on all platforms.
-2016-02-22: Version 5.0.54
+2016-07-12: Version 5.4.101
Performance and stability improvements on all platforms.
-2016-02-21: Version 5.0.53
+2016-07-12: Version 5.4.100
Performance and stability improvements on all platforms.
-2016-02-21: Version 5.0.52
+2016-07-12: Version 5.4.99
Performance and stability improvements on all platforms.
-2016-02-21: Version 5.0.51
+2016-07-12: Version 5.4.98
Performance and stability improvements on all platforms.
-2016-02-21: Version 5.0.50
+2016-07-12: Version 5.4.97
Performance and stability improvements on all platforms.
-2016-02-21: Version 5.0.49
+2016-07-12: Version 5.4.96
Performance and stability improvements on all platforms.
-2016-02-21: Version 5.0.48
+2016-07-12: Version 5.4.95
Performance and stability improvements on all platforms.
-2016-02-20: Version 5.0.47
+2016-07-12: Version 5.4.94
Performance and stability improvements on all platforms.
-2016-02-20: Version 5.0.46
+2016-07-12: Version 5.4.93
Performance and stability improvements on all platforms.
-2016-02-19: Version 5.0.45
+2016-07-11: Version 5.4.92
Performance and stability improvements on all platforms.
-2016-02-19: Version 5.0.44
+2016-07-11: Version 5.4.91
+
+ Performance and stability improvements on all platforms.
+
- Return undefined from RegExp.prototype.compile (Chromium issue 585775).
+2016-07-11: Version 5.4.90
Performance and stability improvements on all platforms.
-2016-02-19: Version 5.0.43
+2016-07-11: Version 5.4.89
- Disable --harmony-object-observe (Chromium issue 552100).
+ Performance and stability improvements on all platforms.
+
+
+2016-07-11: Version 5.4.88
+
+ Recognize HTMLCloseComment after multiline comment (issue 5142).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-11: Version 5.4.87
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-11: Version 5.4.86
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-11: Version 5.4.85
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-11: Version 5.4.84
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-11: Version 5.4.83
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-11: Version 5.4.82
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-11: Version 5.4.81
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-11: Version 5.4.80
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-11: Version 5.4.79
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-11: Version 5.4.78
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-11: Version 5.4.77
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-11: Version 5.4.76
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-11: Version 5.4.75
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-11: Version 5.4.74
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-11: Version 5.4.73
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-11: Version 5.4.72
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-11: Version 5.4.71
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-08: Version 5.4.70
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-08: Version 5.4.69
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-08: Version 5.4.68
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-08: Version 5.4.67
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-08: Version 5.4.66
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-08: Version 5.4.65
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-08: Version 5.4.64
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-08: Version 5.4.63
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-08: Version 5.4.62
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-08: Version 5.4.61
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-08: Version 5.4.60
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-07: Version 5.4.59
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-07: Version 5.4.58
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-07: Version 5.4.57
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-07: Version 5.4.56
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-07: Version 5.4.55
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-07: Version 5.4.54
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-07: Version 5.4.53
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-07: Version 5.4.52
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-07: Version 5.4.51
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-07: Version 5.4.50
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-07: Version 5.4.49
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-07: Version 5.4.48
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-07: Version 5.4.47
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-07: Version 5.4.46
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-06: Version 5.4.45
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-06: Version 5.4.44
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-06: Version 5.4.43
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-06: Version 5.4.42
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-06: Version 5.4.41
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-06: Version 5.4.40
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-06: Version 5.4.39
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-06: Version 5.4.38
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-06: Version 5.4.37
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-06: Version 5.4.36
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-06: Version 5.4.35
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-06: Version 5.4.34
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-06: Version 5.4.33
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-06: Version 5.4.32
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-06: Version 5.4.31
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-05: Version 5.4.30
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-05: Version 5.4.29
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-05: Version 5.4.28
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-05: Version 5.4.27
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-05: Version 5.4.26
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-05: Version 5.4.25
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-05: Version 5.4.24
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-05: Version 5.4.23
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-05: Version 5.4.22
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-04: Version 5.4.21
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-04: Version 5.4.20
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-04: Version 5.4.19
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-04: Version 5.4.18
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-04: Version 5.4.17
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-04: Version 5.4.16
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-04: Version 5.4.15
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-04: Version 5.4.14
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-04: Version 5.4.13
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-04: Version 5.4.12
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-04: Version 5.4.11
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-04: Version 5.4.10
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-04: Version 5.4.9
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-04: Version 5.4.8
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-03: Version 5.4.7
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-03: Version 5.4.6
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-03: Version 5.4.5
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-03: Version 5.4.4
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-01: Version 5.4.3
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-01: Version 5.4.2
+
+ Performance and stability improvements on all platforms.
+
+
+2016-07-01: Version 5.4.1
+
+ [stubs] GetPropertyStub added (issue 4911).
+
+ [wasm] Fix receiver conversion for WASM->JS calls (Chromium issue
+ 624713).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-30: Version 5.3.351
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-30: Version 5.3.350
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-29: Version 5.3.349
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-29: Version 5.3.348
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-29: Version 5.3.347
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-29: Version 5.3.346
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-29: Version 5.3.345
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-29: Version 5.3.344
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-29: Version 5.3.343
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-29: Version 5.3.342
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-29: Version 5.3.341
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-29: Version 5.3.340
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-29: Version 5.3.339
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-29: Version 5.3.338
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-29: Version 5.3.337
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-29: Version 5.3.336
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-29: Version 5.3.335
+
+ Allow trailing commas in function parameter lists (issue 5051).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-28: Version 5.3.334
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-28: Version 5.3.333
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-27: Version 5.3.332
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-27: Version 5.3.331
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-27: Version 5.3.330
+
+ [stubs] Implementing CodeStubAssembler::GetOwnProperty() (issue 4911).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-27: Version 5.3.329
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-27: Version 5.3.328
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-27: Version 5.3.327
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-27: Version 5.3.326
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-27: Version 5.3.325
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-26: Version 5.3.324
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-26: Version 5.3.323
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-26: Version 5.3.322
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-26: Version 5.3.321
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-24: Version 5.3.320
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-24: Version 5.3.319
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-24: Version 5.3.318
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-24: Version 5.3.317
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-24: Version 5.3.316
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-23: Version 5.3.315
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-23: Version 5.3.314
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-23: Version 5.3.313
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-23: Version 5.3.312
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-22: Version 5.3.311
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-22: Version 5.3.310
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-22: Version 5.3.309
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-22: Version 5.3.308
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-22: Version 5.3.307
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-22: Version 5.3.306
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-22: Version 5.3.305
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-22: Version 5.3.304
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-22: Version 5.3.303
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-22: Version 5.3.302
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-22: Version 5.3.301
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-22: Version 5.3.300
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-22: Version 5.3.299
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-22: Version 5.3.298
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-22: Version 5.3.297
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-22: Version 5.3.296
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-22: Version 5.3.295
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-22: Version 5.3.294
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-22: Version 5.3.293
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-22: Version 5.3.292
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-21: Version 5.3.291
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-21: Version 5.3.290
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-21: Version 5.3.289
+
+ Reland: change most cases of variable redeclaration from TypeError to
+ SyntaxError (issue 4955).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-21: Version 5.3.288
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-21: Version 5.3.287
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-21: Version 5.3.286
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-21: Version 5.3.285
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-21: Version 5.3.284
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-21: Version 5.3.283
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-21: Version 5.3.282
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-21: Version 5.3.281
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-21: Version 5.3.280
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-20: Version 5.3.279
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-20: Version 5.3.278
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-20: Version 5.3.277
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-20: Version 5.3.276
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-20: Version 5.3.275
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-20: Version 5.3.274
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-20: Version 5.3.273
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-20: Version 5.3.272
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-20: Version 5.3.271
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-20: Version 5.3.270
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-20: Version 5.3.269
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-20: Version 5.3.268
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-20: Version 5.3.267
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-20: Version 5.3.266
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-20: Version 5.3.265
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-20: Version 5.3.264
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-20: Version 5.3.263
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-20: Version 5.3.262
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-20: Version 5.3.261
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-20: Version 5.3.260
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-20: Version 5.3.259
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-20: Version 5.3.258
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-20: Version 5.3.257
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-17: Version 5.3.256
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-17: Version 5.3.255
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-17: Version 5.3.254
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-17: Version 5.3.253
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-17: Version 5.3.252
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-17: Version 5.3.251
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-17: Version 5.3.250
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-16: Version 5.3.249
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-16: Version 5.3.248
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-16: Version 5.3.247
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-16: Version 5.3.246
+
+ [es8] Unstage syntactic tail calls (issue 4915).
+
+ [ic] LoadICState cleanup (Chromium issue 576312).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-16: Version 5.3.245
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-16: Version 5.3.244
+
+ [ic] Remove --new-load-global-ic switch (Chromium issue 576312).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-16: Version 5.3.243
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-16: Version 5.3.242
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-16: Version 5.3.241
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-15: Version 5.3.240
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-15: Version 5.3.239
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-15: Version 5.3.238
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-15: Version 5.3.237
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-15: Version 5.3.236
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-15: Version 5.3.235
+
+ [ic] Enable new LoadGlobalIC machinery (Chromium issue 576312).
+
+ [ic] LoadGlobalIC is now able to cache PropertyCells in the feedback
+ vector (Chromium issue 576312).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-15: Version 5.3.234
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-15: Version 5.3.233
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-15: Version 5.3.232
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-15: Version 5.3.231
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-15: Version 5.3.230
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-14: Version 5.3.229
+
+ [stubs] Ensure that StoreTransitionStub does not bailout after the
+ properties backing store is enlarged (Chromium issue 601420).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-14: Version 5.3.228
+
+ [ic] Split LoadIC into LoadGlobalIC and LoadIC (Chromium issue 576312).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-14: Version 5.3.227
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-14: Version 5.3.226
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-13: Version 5.3.225
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-13: Version 5.3.224
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-13: Version 5.3.223
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-13: Version 5.3.222
+
+ change most cases of variable redeclaration from TypeError to
+ SyntaxError (issue 4955).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-13: Version 5.3.221
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-13: Version 5.3.220
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-13: Version 5.3.219
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-13: Version 5.3.218
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-13: Version 5.3.217
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-13: Version 5.3.216
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-12: Version 5.3.215
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-12: Version 5.3.214
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-10: Version 5.3.213
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-10: Version 5.3.212
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-10: Version 5.3.211
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-10: Version 5.3.210
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-10: Version 5.3.209
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-10: Version 5.3.208
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-10: Version 5.3.207
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-10: Version 5.3.206
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-10: Version 5.3.205
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-10: Version 5.3.204
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-10: Version 5.3.203
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-10: Version 5.3.202
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-10: Version 5.3.201
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-09: Version 5.3.200
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-09: Version 5.3.199
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-09: Version 5.3.198
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-09: Version 5.3.197
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-09: Version 5.3.196
+
+ [ic] [stubs] Remove InlineCacheState field from the code flags (Chromium
+ issue 618701).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-09: Version 5.3.195
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-09: Version 5.3.194
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-09: Version 5.3.193
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-09: Version 5.3.192
+
+ [build] Use icu data file by default (Chromium issue 616033).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-09: Version 5.3.191
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-09: Version 5.3.190
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-09: Version 5.3.189
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-09: Version 5.3.188
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-08: Version 5.3.187
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-08: Version 5.3.186
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-08: Version 5.3.185
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-08: Version 5.3.184
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-08: Version 5.3.183
+
+ [build] Use sysroot for linux compilation with clang (Chromium issues
+ 474921, 616032).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-08: Version 5.3.182
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-08: Version 5.3.181
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-08: Version 5.3.180
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-08: Version 5.3.179
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-08: Version 5.3.178
+
+ [icu] Support loading data file from default location (Chromium issue
+ 616033).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-08: Version 5.3.177
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-08: Version 5.3.176
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-08: Version 5.3.175
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-08: Version 5.3.174
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-08: Version 5.3.173
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-08: Version 5.3.172
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-08: Version 5.3.171
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-07: Version 5.3.170
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-07: Version 5.3.169
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-07: Version 5.3.168
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-07: Version 5.3.167
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-07: Version 5.3.166
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-07: Version 5.3.165
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-07: Version 5.3.164
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-07: Version 5.3.163
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-07: Version 5.3.162
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-07: Version 5.3.161
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-07: Version 5.3.160
+
+ [runtime] Don't use ElementsTransitionAndStoreStub for transitions that
+ involve instance rewriting (issue 5009, Chromium issue 617524).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-07: Version 5.3.159
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-07: Version 5.3.158
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-07: Version 5.3.157
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-07: Version 5.3.156
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-07: Version 5.3.155
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-07: Version 5.3.154
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-06: Version 5.3.153
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-06: Version 5.3.152
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-06: Version 5.3.151
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-06: Version 5.3.150
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-06: Version 5.3.149
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-06: Version 5.3.148
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-06: Version 5.3.147
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-06: Version 5.3.146
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-06: Version 5.3.145
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-06: Version 5.3.144
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-06: Version 5.3.143
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-06: Version 5.3.142
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-06: Version 5.3.141
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-06: Version 5.3.140
+
+ [build] Use sysroot for linux compilation with clang (Chromium issues
+ 474921, 616032).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-05: Version 5.3.139
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-04: Version 5.3.138
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-03: Version 5.3.137
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-03: Version 5.3.136
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-03: Version 5.3.135
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-03: Version 5.3.134
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-03: Version 5.3.133
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-03: Version 5.3.132
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-03: Version 5.3.131
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-03: Version 5.3.130
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-03: Version 5.3.129
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-03: Version 5.3.128
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-03: Version 5.3.127
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-03: Version 5.3.126
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-03: Version 5.3.125
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-03: Version 5.3.124
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-03: Version 5.3.123
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-03: Version 5.3.122
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-03: Version 5.3.121
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-02: Version 5.3.120
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-02: Version 5.3.119
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-02: Version 5.3.118
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-02: Version 5.3.117
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-02: Version 5.3.116
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-02: Version 5.3.115
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-02: Version 5.3.114
+
+ [stubs] Extend HasProperty stub with dictionary-mode, string wrapper and
+ double-elements objects support (issue 2743).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-02: Version 5.3.113
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-02: Version 5.3.112
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-02: Version 5.3.111
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-02: Version 5.3.110
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-02: Version 5.3.109
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-02: Version 5.3.108
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-02: Version 5.3.107
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-01: Version 5.3.106
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-01: Version 5.3.105
+
+ Extend HasProperty stub with dictionary-mode, string wrapper and double-
+ elements objects support (issue 2743).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-01: Version 5.3.104
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-01: Version 5.3.103
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-01: Version 5.3.102
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-01: Version 5.3.101
+
+ [runtime] Ensure that all elements kind transitions are chained to the
+ root map (issue 5009).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-01: Version 5.3.100
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-01: Version 5.3.99
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-01: Version 5.3.98
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-01: Version 5.3.97
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-01: Version 5.3.96
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-01: Version 5.3.95
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-01: Version 5.3.94
+
+ Performance and stability improvements on all platforms.
+
+
+2016-06-01: Version 5.3.93
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-31: Version 5.3.92
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-31: Version 5.3.91
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-31: Version 5.3.90
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-31: Version 5.3.89
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-31: Version 5.3.88
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-31: Version 5.3.87
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-31: Version 5.3.86
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-31: Version 5.3.85
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-31: Version 5.3.84
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-31: Version 5.3.83
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-30: Version 5.3.82
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-30: Version 5.3.81
+
+ [api] Remove deprectated memory allocation callback API (issue 4813).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-30: Version 5.3.80
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-30: Version 5.3.79
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-30: Version 5.3.78
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-30: Version 5.3.77
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-30: Version 5.3.76
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-30: Version 5.3.75
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-30: Version 5.3.74
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-30: Version 5.3.73
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-30: Version 5.3.72
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-30: Version 5.3.71
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-30: Version 5.3.70
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-30: Version 5.3.69
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-30: Version 5.3.68
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-29: Version 5.3.67
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-28: Version 5.3.66
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-28: Version 5.3.65
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-27: Version 5.3.64
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-27: Version 5.3.63
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-27: Version 5.3.62
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-27: Version 5.3.61
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-27: Version 5.3.60
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-27: Version 5.3.59
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-27: Version 5.3.58
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-27: Version 5.3.57
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-26: Version 5.3.56
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-26: Version 5.3.55
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-26: Version 5.3.54
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-26: Version 5.3.53
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-25: Version 5.3.52
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-25: Version 5.3.51
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-25: Version 5.3.50
+
+ TypedArray: Make byteOffset, byteLength, and length configurable (issue
+ 4902).
+
+ Make %ThrowTypeError% function(s) strict (issue 4925).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-25: Version 5.3.49
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-25: Version 5.3.48
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-25: Version 5.3.47
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-25: Version 5.3.46
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-25: Version 5.3.45
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-25: Version 5.3.44
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-25: Version 5.3.43
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-25: Version 5.3.42
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-25: Version 5.3.41
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-25: Version 5.3.40
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-25: Version 5.3.39
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-24: Version 5.3.38
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-24: Version 5.3.37
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-24: Version 5.3.36
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-24: Version 5.3.35
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-24: Version 5.3.34
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-24: Version 5.3.33
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-24: Version 5.3.32
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-24: Version 5.3.31
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-24: Version 5.3.30
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-24: Version 5.3.29
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-24: Version 5.3.28
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-24: Version 5.3.27
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-24: Version 5.3.26
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-24: Version 5.3.25
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-24: Version 5.3.24
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-24: Version 5.3.23
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-24: Version 5.3.22
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-23: Version 5.3.21
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-23: Version 5.3.20
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-23: Version 5.3.19
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-23: Version 5.3.18
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-23: Version 5.3.17
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-23: Version 5.3.16
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-23: Version 5.3.15
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-23: Version 5.3.14
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-23: Version 5.3.13
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-23: Version 5.3.12
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-23: Version 5.3.11
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-23: Version 5.3.10
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-23: Version 5.3.9
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-21: Version 5.3.8
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-21: Version 5.3.7
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-21: Version 5.3.6
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-20: Version 5.3.5
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-20: Version 5.3.4
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-20: Version 5.3.3
+
+ Move case conversion with ICU to 'staged' (issues 4476, 4477).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-20: Version 5.3.2
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-20: Version 5.3.1
+
+ Move case conversion with ICU to 'staged' (issues 4476, 4477).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-19: Version 5.2.371
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-19: Version 5.2.370
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-19: Version 5.2.369
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-19: Version 5.2.368
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-18: Version 5.2.367
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-18: Version 5.2.366
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-18: Version 5.2.365
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-18: Version 5.2.364
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-18: Version 5.2.363
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-18: Version 5.2.362
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-18: Version 5.2.361
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-18: Version 5.2.360
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-18: Version 5.2.359
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-18: Version 5.2.358
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-18: Version 5.2.357
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-18: Version 5.2.356
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-18: Version 5.2.355
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-18: Version 5.2.354
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-18: Version 5.2.353
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-18: Version 5.2.352
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-18: Version 5.2.351
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-18: Version 5.2.350
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-18: Version 5.2.349
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-18: Version 5.2.348
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-17: Version 5.2.347
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-17: Version 5.2.346
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-17: Version 5.2.345
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-17: Version 5.2.344
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-17: Version 5.2.343
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-17: Version 5.2.342
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-17: Version 5.2.341
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-17: Version 5.2.340
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-17: Version 5.2.339
+
+ [wasm] remove extra nops in asm-wasm (issue 4203).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-17: Version 5.2.338
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-17: Version 5.2.337
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-17: Version 5.2.336
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-17: Version 5.2.335
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-17: Version 5.2.334
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-17: Version 5.2.333
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-17: Version 5.2.332
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-16: Version 5.2.331
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-16: Version 5.2.330
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-16: Version 5.2.329
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-16: Version 5.2.328
+
+ Add UseCounter for decimal with leading zero (issue 4973).
+
+ [esnext] implement frontend changes for async/await proposal (issue
+ 4483).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-16: Version 5.2.327
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-15: Version 5.2.326
Performance and stability improvements on all platforms.
-2016-02-19: Version 5.0.42
+2016-05-14: Version 5.2.325
- Introduce BeforeCallEnteredCallback (Chromium issue 585949).
+ Performance and stability improvements on all platforms.
+
+
+2016-05-14: Version 5.2.324
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-13: Version 5.2.323
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-13: Version 5.2.322
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-13: Version 5.2.321
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-13: Version 5.2.320
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-13: Version 5.2.319
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-13: Version 5.2.318
Performance and stability improvements on all platforms.
-2016-02-19: Version 5.0.41
+2016-05-13: Version 5.2.317
Performance and stability improvements on all platforms.
-2016-02-19: Version 5.0.40
+2016-05-13: Version 5.2.316
Performance and stability improvements on all platforms.
-2016-02-19: Version 5.0.39
+2016-05-13: Version 5.2.315
Performance and stability improvements on all platforms.
-2016-02-19: Version 5.0.38
+2016-05-13: Version 5.2.314
+
+ Performance and stability improvements on all platforms.
- [wasm] Add support for import section (Chromium issue 575167).
+
+2016-05-13: Version 5.2.313
Performance and stability improvements on all platforms.
-2016-02-19: Version 5.0.37
+2016-05-13: Version 5.2.312
Performance and stability improvements on all platforms.
-2016-02-19: Version 5.0.36
+2016-05-13: Version 5.2.311
Performance and stability improvements on all platforms.
-2016-02-19: Version 5.0.35
+2016-05-13: Version 5.2.310
+
+ [api] Clarify expectations of ArrayBuffer::Allocator in API (Chromium
+ issue 611688).
Performance and stability improvements on all platforms.
-2016-02-19: Version 5.0.34
+2016-05-13: Version 5.2.309
Performance and stability improvements on all platforms.
-2016-02-19: Version 5.0.33
+2016-05-13: Version 5.2.308
Performance and stability improvements on all platforms.
-2016-02-19: Version 5.0.32
+2016-05-13: Version 5.2.307
+
+ In parallel to the strict octal check that would reject `012` in strict
+ mode, this patch collects UseCounters for `089` in strict mode. The spec
+ says this should be an error, but this patch does not report it as such
+ (issue 4973).
Performance and stability improvements on all platforms.
-2016-02-19: Version 5.0.31
+2016-05-12: Version 5.2.306
Performance and stability improvements on all platforms.
-2016-02-19: Version 5.0.30
+2016-05-12: Version 5.2.305
- Mark old SetAccessCheckCallback as deprecated.
+ Performance and stability improvements on all platforms.
+
+
+2016-05-12: Version 5.2.304
Performance and stability improvements on all platforms.
-2016-02-19: Version 5.0.29
+2016-05-12: Version 5.2.303
Performance and stability improvements on all platforms.
-2016-02-19: Version 5.0.28
+2016-05-12: Version 5.2.302
Performance and stability improvements on all platforms.
-2016-02-19: Version 5.0.27
+2016-05-12: Version 5.2.301
Performance and stability improvements on all platforms.
-2016-02-19: Version 5.0.26
+2016-05-12: Version 5.2.300
Performance and stability improvements on all platforms.
-2016-02-18: Version 5.0.25
+2016-05-12: Version 5.2.299
Performance and stability improvements on all platforms.
-2016-02-18: Version 5.0.24
+2016-05-12: Version 5.2.298
+
+ Performance and stability improvements on all platforms.
+
- Make Date.prototype.toGMTString an alias for Date.prototype.toUTCString
- (issue 4708).
+2016-05-12: Version 5.2.297
Performance and stability improvements on all platforms.
-2016-02-18: Version 5.0.23
+2016-05-12: Version 5.2.296
Performance and stability improvements on all platforms.
-2016-02-18: Version 5.0.22
+2016-05-12: Version 5.2.295
Performance and stability improvements on all platforms.
-2016-02-18: Version 5.0.21
+2016-05-11: Version 5.2.294
Performance and stability improvements on all platforms.
-2016-02-18: Version 5.0.20
+2016-05-11: Version 5.2.293
Performance and stability improvements on all platforms.
-2016-02-18: Version 5.0.19
+2016-05-11: Version 5.2.292
Performance and stability improvements on all platforms.
-2016-02-18: Version 5.0.18
+2016-05-11: Version 5.2.291
+
+ Use ICU case conversion/transliterator for case conversion (issues 4476,
+ 4477).
Performance and stability improvements on all platforms.
-2016-02-18: Version 5.0.17
+2016-05-11: Version 5.2.290
Performance and stability improvements on all platforms.
-2016-02-18: Version 5.0.16
+2016-05-11: Version 5.2.289
+
+ Performance and stability improvements on all platforms.
+
- [es6] Implement for-of iterator finalization (issue 2214).
+2016-05-11: Version 5.2.288
Performance and stability improvements on all platforms.
-2016-02-18: Version 5.0.15
+2016-05-11: Version 5.2.287
Performance and stability improvements on all platforms.
-2016-02-18: Version 5.0.14
+2016-05-11: Version 5.2.286
+
+ Performance and stability improvements on all platforms.
+
- Use displayName in Error.stack rendering if present (issue 4761).
+2016-05-11: Version 5.2.285
Performance and stability improvements on all platforms.
-2016-02-18: Version 5.0.13
+2016-05-11: Version 5.2.284
Performance and stability improvements on all platforms.
-2016-02-18: Version 5.0.12
+2016-05-10: Version 5.2.283
Performance and stability improvements on all platforms.
-2016-02-18: Version 5.0.11
+2016-05-10: Version 5.2.282
+
+ Fix Map::AsArray to properly iterate over the backing store (issue
+ 4946).
Performance and stability improvements on all platforms.
-2016-02-17: Version 5.0.10
+2016-05-10: Version 5.2.281
+
+ Remove LOG line checking from presubmit.
+
+ Performance and stability improvements on all platforms.
+
- [Atomics] Add dmb/dsb/isb instructions to ARM (issue 4614).
+2016-05-10: Version 5.2.280
Performance and stability improvements on all platforms.
-2016-02-17: Version 5.0.9
+2016-05-10: Version 5.2.279
Performance and stability improvements on all platforms.
-2016-02-17: Version 5.0.8
+2016-05-10: Version 5.2.278
Performance and stability improvements on all platforms.
-2016-02-17: Version 5.0.7
+2016-05-10: Version 5.2.277
Performance and stability improvements on all platforms.
-2016-02-17: Version 5.0.6
+2016-05-10: Version 5.2.276
Performance and stability improvements on all platforms.
-2016-02-17: Version 5.0.5
+2016-05-10: Version 5.2.275
Performance and stability improvements on all platforms.
-2016-02-17: Version 5.0.4
+2016-05-10: Version 5.2.274
Performance and stability improvements on all platforms.
-2016-02-17: Version 5.0.3
+2016-05-10: Version 5.2.273
Performance and stability improvements on all platforms.
-2016-02-17: Version 5.0.2
+2016-05-10: Version 5.2.272
Performance and stability improvements on all platforms.
-2016-02-17: Version 5.0.1
+2016-05-10: Version 5.2.271
Performance and stability improvements on all platforms.
+2016-05-10: Version 5.2.270
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-10: Version 5.2.269
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-10: Version 5.2.268
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-10: Version 5.2.267
+
+ Various species micro-optimizations (Chromium issue 609739).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-09: Version 5.2.266
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-09: Version 5.2.265
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-09: Version 5.2.264
+
+ Fix TypedArray Property optimizations (Chromium issue 593634).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-09: Version 5.2.263
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-09: Version 5.2.262
+
+ [V8] Add v8::Value::TypeOf to API (Chromium issue 595206).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-09: Version 5.2.261
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-09: Version 5.2.260
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-09: Version 5.2.259
+
+ Expose IsConstructor to the C++ API (issue 4993).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-09: Version 5.2.258
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-09: Version 5.2.257
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-09: Version 5.2.256
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-09: Version 5.2.255
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-09: Version 5.2.254
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-08: Version 5.2.253
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-07: Version 5.2.252
+
+ fix Set::AsArray to not leave undefined holes in output array (issue
+ 4946).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-06: Version 5.2.251
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-06: Version 5.2.250
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-06: Version 5.2.249
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-06: Version 5.2.248
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-06: Version 5.2.247
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-06: Version 5.2.246
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-06: Version 5.2.245
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-06: Version 5.2.244
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-06: Version 5.2.243
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-04: Version 5.2.242
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-04: Version 5.2.241
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-04: Version 5.2.240
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-04: Version 5.2.239
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-04: Version 5.2.238
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-04: Version 5.2.237
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-04: Version 5.2.236
+
+ S390: Add HasProperty code stub that tries simple lookups or jumps to
+ runtime otherwise (issue 2743).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-04: Version 5.2.235
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-04: Version 5.2.234
+
+ Make array __proto__ manipulations not disturb the species protector
+ (Chromium issue 606207).
+
+ Add v8::Object::GetOwnPropertyNames(context, filter) method (issue 3861,
+ Chromium issue 581495).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-04: Version 5.2.233
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-04: Version 5.2.232
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-04: Version 5.2.231
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-04: Version 5.2.230
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-04: Version 5.2.229
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-04: Version 5.2.228
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-04: Version 5.2.227
+
+ MIPS64: Fix [turbofan] Length and index2 are unsigned in
+ CheckedLoad/CheckedStore (Chromium issue 599717).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-04: Version 5.2.226
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-04: Version 5.2.225
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-04: Version 5.2.224
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-04: Version 5.2.223
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-04: Version 5.2.222
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-04: Version 5.2.221
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-04: Version 5.2.220
+
+ [wasm] Fix for 608630: allow proxies as FFI (Chromium issue 608630).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-04: Version 5.2.219
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-03: Version 5.2.218
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-03: Version 5.2.217
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-03: Version 5.2.216
+
+ [wasm] Disallow runtime calls in asm.js modules (Chromium issue 592352).
+
+ [API] remove (deprecated) hidden properties.
+
+ [wasm] Fix bug with empty input to Wasm.instantiateModuleFromAsm()
+ (Chromium issue 605488).
+
+ Add HasProperty code stub that tries simple lookups or jumps to runtime
+ otherwise (issue 2743).
+
+ Fix ExternalTwobyteStringUtf16CharacterStream::PushBack(kEndOfInput)
+ (Chromium issue 607903).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-03: Version 5.2.215
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-03: Version 5.2.214
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-03: Version 5.2.213
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-03: Version 5.2.212
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-03: Version 5.2.211
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-02: Version 5.2.210
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-02: Version 5.2.209
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-02: Version 5.2.208
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-02: Version 5.2.207
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-02: Version 5.2.206
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-02: Version 5.2.205
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-02: Version 5.2.204
+
+ [debugger] Add JSProxy support in Runtime::GetInternalProperties
+ (Chromium issue 588705).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-02: Version 5.2.203
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-02: Version 5.2.202
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-02: Version 5.2.201
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-02: Version 5.2.200
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-02: Version 5.2.199
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-02: Version 5.2.198
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-02: Version 5.2.197
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-02: Version 5.2.196
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-02: Version 5.2.195
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-02: Version 5.2.194
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-02: Version 5.2.193
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-02: Version 5.2.192
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-02: Version 5.2.191
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-01: Version 5.2.190
+
+ Performance and stability improvements on all platforms.
+
+
+2016-05-01: Version 5.2.189
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-30: Version 5.2.188
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-30: Version 5.2.187
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-30: Version 5.2.186
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-29: Version 5.2.185
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-29: Version 5.2.184
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-29: Version 5.2.183
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-29: Version 5.2.182
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-29: Version 5.2.181
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-29: Version 5.2.180
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-29: Version 5.2.179
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-29: Version 5.2.178
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-29: Version 5.2.177
+
+ Fix overflow issue in Zone::New (Chromium issue 606115).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-29: Version 5.2.176
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-29: Version 5.2.175
+
+ [wasm] Binary 11: Bump module version to 0xB. [wasm] Binary 11: Swap the
+ order of section name / section length. [wasm] Binary 11: Shorter
+ section names. [wasm] Binary 11: Add a prefix for function type
+ declarations. [wasm] Binary 11: Function types encoded as pcount, p*,
+ rcount, r* [wasm] Fix numeric names for functions (Chromium issue
+ 575167).
+
+ [wasm] Binary 11: WASM AST is now postorder. [wasm] Binary 11: br_table
+ takes a value. [wasm] Binary 11: Add implicit blocks to if arms. [wasm]
+ Binary 11: Add arities to call, return, and breaks [wasm] Binary 11: Add
+ experimental version (Chromium issue 575167).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-29: Version 5.2.174
+
+ Add checks for detached ArrayBuffers to ArrayBuffer.prototype.slice
+ (issue 4964).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-28: Version 5.2.173
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-28: Version 5.2.172
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-28: Version 5.2.171
+
+ Ship for-in initializer deprecation (issue 4942).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-28: Version 5.2.170
+
+ Add GC request to libFuzzers in attempt to avoid parasitic coverage
+ (Chromium issue 584819).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-28: Version 5.2.169
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-28: Version 5.2.168
+
+ Prevent unnecessary memory (de-)allocations in LiteralBuffer::CopyFrom
+ (issue 4947).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-28: Version 5.2.167
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-28: Version 5.2.166
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-28: Version 5.2.165
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-28: Version 5.2.164
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-27: Version 5.2.163
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-27: Version 5.2.162
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-27: Version 5.2.161
+
+ [esnext] stage --harmony-string-padding (issue 4954).
+
+ Disallow generator declarations in certain locations (issue 4824).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-27: Version 5.2.160
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-27: Version 5.2.159
+
+ [api] Expose FunctionCallbackInfo::NewTarget (issue 4261).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-27: Version 5.2.158
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-27: Version 5.2.157
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-27: Version 5.2.156
+
+ [es8] Report proper syntax error for tail call expressions in for-in and
+ for-of bodies (issue 4915).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-27: Version 5.2.155
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-27: Version 5.2.154
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-27: Version 5.2.153
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-27: Version 5.2.152
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-27: Version 5.2.151
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-26: Version 5.2.150
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-26: Version 5.2.149
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-26: Version 5.2.148
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-26: Version 5.2.147
+
+ Do not penalize performance when --harmony-species is off (Chromium
+ issue 606207).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-26: Version 5.2.146
+
+ [es8] Initial set of changes to support syntactic tail calls (issue
+ 4915).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-26: Version 5.2.145
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-26: Version 5.2.144
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-26: Version 5.2.143
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-26: Version 5.2.142
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-26: Version 5.2.141
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-26: Version 5.2.140
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-26: Version 5.2.139
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-26: Version 5.2.138
+
+ [es6] Fix tail call elimination in single-expression arrow functions
+ (issue 4698).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-26: Version 5.2.137
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-25: Version 5.2.136
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-25: Version 5.2.135
+
+ Widen --harmony-for-in flag to throw errors in PreParser (issue 4942).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-25: Version 5.2.134
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-25: Version 5.2.133
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-25: Version 5.2.132
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-25: Version 5.2.131
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-25: Version 5.2.130
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-25: Version 5.2.129
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-25: Version 5.2.128
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-25: Version 5.2.127
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-25: Version 5.2.126
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-25: Version 5.2.125
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-25: Version 5.2.124
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-25: Version 5.2.123
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-25: Version 5.2.122
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-22: Version 5.2.121
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-22: Version 5.2.120
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-22: Version 5.2.119
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-22: Version 5.2.118
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-22: Version 5.2.117
+
+ [debugger] Hide scopes that originate from desugaring (Chromium issue
+ 604458).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-22: Version 5.2.116
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-22: Version 5.2.115
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-21: Version 5.2.114
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-21: Version 5.2.113
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-21: Version 5.2.112
+
+ [ic] Restore PROPERTY key tracking in keyed ICs (Chromium issue 594183).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-21: Version 5.2.111
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-21: Version 5.2.110
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-21: Version 5.2.109
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-21: Version 5.2.108
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-21: Version 5.2.107
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-21: Version 5.2.106
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-21: Version 5.2.105
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-21: Version 5.2.104
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-21: Version 5.2.103
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-21: Version 5.2.102
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-20: Version 5.2.101
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-20: Version 5.2.100
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-20: Version 5.2.99
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-20: Version 5.2.98
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-20: Version 5.2.97
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-20: Version 5.2.96
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-20: Version 5.2.95
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-20: Version 5.2.94
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-20: Version 5.2.93
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-20: Version 5.2.92
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-20: Version 5.2.91
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-20: Version 5.2.90
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-20: Version 5.2.89
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-20: Version 5.2.88
+
+ [turbofan] Length and index2 are unsigned in CheckedLoad/CheckedStore
+ (Chromium issue 599717).
+
+ Prevent un-parsed LiteralFunction reaching the compiler (Chromium issue
+ 604044).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-20: Version 5.2.87
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-20: Version 5.2.86
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-19: Version 5.2.85
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-19: Version 5.2.84
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-19: Version 5.2.83
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-19: Version 5.2.82
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-19: Version 5.2.81
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-19: Version 5.2.80
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-19: Version 5.2.79
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-19: Version 5.2.78
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-19: Version 5.2.77
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-19: Version 5.2.76
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-19: Version 5.2.75
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-19: Version 5.2.74
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-19: Version 5.2.73
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-19: Version 5.2.72
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-19: Version 5.2.71
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-19: Version 5.2.70
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-19: Version 5.2.69
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-19: Version 5.2.68
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-19: Version 5.2.67
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-19: Version 5.2.66
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-18: Version 5.2.65
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-18: Version 5.2.64
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-18: Version 5.2.63
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-18: Version 5.2.62
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-18: Version 5.2.61
+
+ [Atomics] Remove Atomics code stubs; use TF ops (issue 4614).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-18: Version 5.2.60
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-18: Version 5.2.59
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-18: Version 5.2.58
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-18: Version 5.2.57
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-18: Version 5.2.56
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-18: Version 5.2.55
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-18: Version 5.2.54
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-18: Version 5.2.53
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-18: Version 5.2.52
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-18: Version 5.2.51
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-18: Version 5.2.50
+
+ [Atomics] Remove Atomics code stubs; use TF ops (issue 4614).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-04-18: Version 5.2.49
+
+ [api] Bring back finalizers on global handles.
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-17: Sentinel
+
+ The ChangeLog file is no longer maintained on master. This
+ sentinel should stay on top of this list.
+
2016-02-17: Version 4.10.253
Performance and stability improvements on all platforms.
@@ -7268,12 +11197,6 @@
Performance and stability improvements on all platforms.
-2015-05-17: Sentinel
-
- The ChangeLog file is no longer maintained on bleeding_edge. This
- sentinel should stay on top of this list.
-
-
2015-05-17: Version 4.5.2
Performance and stability improvements on all platforms.
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index 0559523283..6cac01d597 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -7,16 +7,28 @@ vars = {
}
deps = {
- "v8/build/gyp":
- Var("git_url") + "/external/gyp.git" + "@" + "4ec6c4e3a94bd04a6da2858163d40b2429b8aad1",
+ "v8/build":
+ Var("git_url") + "/chromium/src/build.git" + "@" + "59daf502c36f20b5c9292f4bd9af85791f8a5884",
+ "v8/tools/gyp":
+ Var("git_url") + "/external/gyp.git" + "@" + "702ac58e477214c635d9b541932e75a95d349352",
"v8/third_party/icu":
- Var("git_url") + "/chromium/deps/icu.git" + "@" + "c291cde264469b20ca969ce8832088acb21e0c48",
+ Var("git_url") + "/chromium/deps/icu.git" + "@" + "2341038bf72869a5683a893a2b319a48ffec7f62",
+ "v8/third_party/instrumented_libraries":
+ Var("git_url") + "/chromium/src/third_party/instrumented_libraries.git" + "@" + "f15768d7fdf68c0748d20738184120c8ab2e6db7",
"v8/buildtools":
- Var("git_url") + "/chromium/buildtools.git" + "@" + "80b5126f91be4eb359248d28696746ef09d5be67",
+ Var("git_url") + "/chromium/buildtools.git" + "@" + "adb8bf4e8fc92aa1717bf151b862d58e6f27c4f2",
"v8/base/trace_event/common":
- Var("git_url") + "/chromium/src/base/trace_event/common.git" + "@" + "c8c8665c2deaf1cc749d9f8e153256d4f67bf1b8",
+ Var("git_url") + "/chromium/src/base/trace_event/common.git" + "@" + "315bf1e2d45be7d53346c31cfcc37424a32c30c8",
+ "v8/third_party/WebKit/Source/platform/inspector_protocol":
+ Var("git_url") + "/chromium/src/third_party/WebKit/Source/platform/inspector_protocol.git" + "@" + "547960151fb364dd9a382fa79ffc9abfb184e3d1",
+ "v8/third_party/jinja2":
+ Var("git_url") + "/chromium/src/third_party/jinja2.git" + "@" + "2222b31554f03e62600cd7e383376a7c187967a1",
+ "v8/third_party/markupsafe":
+ Var("git_url") + "/chromium/src/third_party/markupsafe.git" + "@" + "484a5661041cac13bfc688a26ec5434b05d18961",
+ "v8/tools/mb":
+ Var('git_url') + '/chromium/src/tools/mb.git' + '@' + "99788b8b516c44d7db25cfb68695bc234fdee5ed",
"v8/tools/swarming_client":
- Var('git_url') + '/external/swarming.client.git' + '@' + "df6e95e7669883c8fe9ef956c69a544154701a49",
+ Var('git_url') + '/external/swarming.client.git' + '@' + "e4288c3040a32f2e7ad92f957668f2ee3d36e5a6",
"v8/testing/gtest":
Var("git_url") + "/external/github.com/google/googletest.git" + "@" + "6f8a66431cb592dad629028a50b3dd418a408c87",
"v8/testing/gmock":
@@ -25,17 +37,19 @@ deps = {
Var("git_url") + "/v8/deps/third_party/benchmarks.git" + "@" + "05d7188267b4560491ff9155c5ee13e207ecd65f",
"v8/test/mozilla/data":
Var("git_url") + "/v8/deps/third_party/mozilla-tests.git" + "@" + "f6c578a10ea707b1a8ab0b88943fe5115ce2b9be",
- "v8/test/simdjs/data": Var("git_url") + "/external/github.com/tc39/ecmascript_simd.git" + "@" + "c8ef63c728283debc25891123eb00482fee4b8cd",
+ "v8/test/simdjs/data": Var("git_url") + "/external/github.com/tc39/ecmascript_simd.git" + "@" + "baf493985cb9ea7cdbd0d68704860a8156de9556",
"v8/test/test262/data":
- Var("git_url") + "/external/github.com/tc39/test262.git" + "@" + "57d3e2216fa86ad63b6c0a54914ba9dcbff96003",
+ Var("git_url") + "/external/github.com/tc39/test262.git" + "@" + "88bc7fe7586f161201c5f14f55c9c489f82b1b67",
+ "v8/test/test262/harness":
+ Var("git_url") + "/external/github.com/test262-utils/test262-harness-py.git" + "@" + "cbd968f54f7a95c6556d53ba852292a4c49d11d8",
"v8/tools/clang":
- Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "faee82e064e04e5cbf60cc7327e7a81d2a4557ad",
+ Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "3afb04a8153e40ff00f9eaa14337851c3ab4a368",
}
deps_os = {
"android": {
"v8/third_party/android_tools":
- Var("git_url") + "/android_tools.git" + "@" + "adfd31794011488cd0fc716b53558b2d8a67af8b",
+ Var("git_url") + "/android_tools.git" + "@" + "af1c5a4cd6329ccdcf8c2bc93d9eea02f9d74869",
},
"win": {
"v8/third_party/cygwin":
@@ -43,6 +57,8 @@ deps_os = {
}
}
+recursedeps = [ 'v8/third_party/android_tools' ]
+
include_rules = [
# Everybody can use some things.
"+include",
@@ -53,6 +69,7 @@ include_rules = [
# checkdeps.py shouldn't check for includes in these directories:
skip_child_includes = [
"build",
+ "gypfiles",
"third_party",
]
@@ -65,7 +82,7 @@ hooks = [
'pattern': '.',
'action': [
'python',
- 'v8/build/landmines.py',
+ 'v8/gypfiles/landmines.py',
],
},
# Pull clang-format binaries using checked-in hashes.
@@ -187,10 +204,32 @@ hooks = [
],
},
{
+ # Downloads the current stable linux sysroot to build/linux/ if needed.
+ # This sysroot updates at about the same rate that the chrome build deps
+ # change.
+ 'name': 'sysroot',
+ 'pattern': '.',
+ 'action': [
+ 'python',
+ 'v8/build/linux/sysroot_scripts/install-sysroot.py',
+ '--running-as-hook',
+ ],
+ },
+ {
+ # Pull sanitizer-instrumented third-party libraries if requested via
+ # GYP_DEFINES.
+ 'name': 'instrumented_libraries',
+ 'pattern': '\\.sha1',
+ 'action': [
+ 'python',
+ 'v8/third_party/instrumented_libraries/scripts/download_binaries.py',
+ ],
+ },
+ {
# Update the Windows toolchain if necessary.
'name': 'win_toolchain',
'pattern': '.',
- 'action': ['python', 'v8/build/vs_toolchain.py', 'update'],
+ 'action': ['python', 'v8/gypfiles/vs_toolchain.py', 'update'],
},
# Pull binutils for linux, enabled debug fission for faster linking /
# debugging when used with clang on Ubuntu Precise.
@@ -208,7 +247,7 @@ hooks = [
# Note: This must run before the clang update.
'name': 'gold_plugin',
'pattern': '.',
- 'action': ['python', 'v8/build/download_gold_plugin.py'],
+ 'action': ['python', 'v8/gypfiles/download_gold_plugin.py'],
},
{
# Pull clang if needed or requested via GYP_DEFINES.
@@ -220,6 +259,6 @@ hooks = [
{
# A change to a .gyp, .gypi, or to GYP itself should run the generator.
"pattern": ".",
- "action": ["python", "v8/build/gyp_v8"],
+ "action": ["python", "v8/gypfiles/gyp_v8"],
},
]
diff --git a/deps/v8/src/third_party/fdlibm/LICENSE b/deps/v8/LICENSE.fdlibm
index b54cb52278..b54cb52278 100644
--- a/deps/v8/src/third_party/fdlibm/LICENSE
+++ b/deps/v8/LICENSE.fdlibm
diff --git a/deps/v8/Makefile b/deps/v8/Makefile
index a0c08a6d96..a6d4d135da 100644
--- a/deps/v8/Makefile
+++ b/deps/v8/Makefile
@@ -33,7 +33,6 @@ GYPFLAGS ?=
TESTFLAGS ?=
ANDROID_NDK_HOST_ARCH ?=
ANDROID_V8 ?= /data/local/tmp/v8
-NACL_SDK_ROOT ?=
# Special build flags. Use them like this: "make library=shared"
@@ -122,10 +121,6 @@ endif
ifeq ($(werror), no)
GYPFLAGS += -Dwerror=''
endif
-# presubmit=no
-ifeq ($(presubmit), no)
- TESTFLAGS += --no-presubmit
-endif
# strictaliasing=off (workaround for GCC-4.5)
ifeq ($(strictaliasing), off)
GYPFLAGS += -Dv8_no_strict_aliasing=1
@@ -227,6 +222,11 @@ ifeq ($(no_omit_framepointer), on)
GYPFLAGS += -Drelease_extra_cflags=-fno-omit-frame-pointer
endif
+ifdef android_ndk_root
+ GYPFLAGS += -Dandroid_ndk_root=$(android_ndk_root)
+ export ANDROID_NDK_ROOT = $(android_ndk_root)
+endif
+
# ----------------- available targets: --------------------
# - "grokdump": rebuilds heap constants lists used by grokdump
# - any arch listed in ARCHES (see below)
@@ -235,7 +235,6 @@ endif
# - "native": current host's architecture, release mode
# - any of the above with .check appended, e.g. "ia32.release.check"
# - "android": cross-compile for Android/ARM
-# - "nacl" : cross-compile for Native Client (ia32 and x64)
# - default (no target specified): build all DEFAULT_ARCHES and MODES
# - "check": build all targets and run all tests
# - "<arch>.clean" for any <arch> in ARCHES
@@ -245,21 +244,22 @@ endif
# Architectures and modes to be compiled. Consider these to be internal
# variables, don't override them (use the targets instead).
-ARCHES = ia32 x64 x32 arm arm64 mips mipsel mips64 mips64el x87 ppc ppc64 \
- s390 s390x
+ARCHES = ia32 x64 arm arm64 mips mipsel mips64 mips64el x87 ppc ppc64 s390 \
+ s390x
+ARCHES32 = ia32 arm mips mipsel x87 ppc s390
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug optdebug
DEFAULT_MODES = release debug
ANDROID_ARCHES = android_ia32 android_x64 android_arm android_arm64 \
android_mipsel android_x87
-NACL_ARCHES = nacl_ia32 nacl_x64
# List of files that trigger Makefile regeneration:
GYPFILES = third_party/icu/icu.gypi third_party/icu/icu.gyp \
- build/shim_headers.gypi build/features.gypi build/standalone.gypi \
- build/toolchain.gypi build/all.gyp build/mac/asan.gyp \
+ gypfiles/shim_headers.gypi gypfiles/features.gypi \
+ gypfiles/standalone.gypi \
+ gypfiles/toolchain.gypi gypfiles/all.gyp gypfiles/mac/asan.gyp \
test/cctest/cctest.gyp test/fuzzer/fuzzer.gyp \
- test/unittests/unittests.gyp tools/gyp/v8.gyp \
+ test/unittests/unittests.gyp src/v8.gyp \
tools/parser-shell.gyp testing/gmock.gyp testing/gtest.gyp \
buildtools/third_party/libc++abi/libc++abi.gyp \
buildtools/third_party/libc++/libc++.gyp samples/samples.gyp \
@@ -273,13 +273,10 @@ endif
BUILDS = $(foreach mode,$(MODES),$(addsuffix .$(mode),$(ARCHES)))
ANDROID_BUILDS = $(foreach mode,$(MODES), \
$(addsuffix .$(mode),$(ANDROID_ARCHES)))
-NACL_BUILDS = $(foreach mode,$(MODES), \
- $(addsuffix .$(mode),$(NACL_ARCHES)))
# Generates corresponding test targets, e.g. "ia32.release.check".
CHECKS = $(addsuffix .check,$(BUILDS))
QUICKCHECKS = $(addsuffix .quickcheck,$(BUILDS))
ANDROID_CHECKS = $(addsuffix .check,$(ANDROID_BUILDS))
-NACL_CHECKS = $(addsuffix .check,$(NACL_BUILDS))
# File where previously used GYPFLAGS are stored.
ENVFILE = $(OUTDIR)/environment
@@ -288,9 +285,7 @@ ENVFILE = $(OUTDIR)/environment
$(addsuffix .quickcheck,$(MODES)) $(addsuffix .quickcheck,$(ARCHES)) \
$(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \
$(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES)) \
- $(ANDROID_ARCHES) $(ANDROID_BUILDS) $(ANDROID_CHECKS) \
- $(NACL_ARCHES) $(NACL_BUILDS) $(NACL_CHECKS) \
- must-set-NACL_SDK_ROOT
+ $(ANDROID_ARCHES) $(ANDROID_BUILDS) $(ANDROID_CHECKS)
# Target definitions. "all" is the default.
all: $(DEFAULT_MODES)
@@ -329,16 +324,6 @@ $(ANDROID_BUILDS): $(GYPFILES) $(ENVFILE) Makefile.android
OUTDIR="$(OUTDIR)" \
GYPFLAGS="$(GYPFLAGS)"
-$(NACL_ARCHES): $(addprefix $$@.,$(MODES))
-
-$(NACL_BUILDS): $(GYPFILES) $(ENVFILE) \
- Makefile.nacl must-set-NACL_SDK_ROOT
- @$(MAKE) -f Makefile.nacl $@ \
- ARCH="$(basename $@)" \
- MODE="$(subst .,,$(suffix $@))" \
- OUTDIR="$(OUTDIR)" \
- GYPFLAGS="$(GYPFLAGS)"
-
# Test targets.
check: all
@tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
@@ -382,15 +367,6 @@ $(addsuffix .check, $(ANDROID_BUILDS)): $$(basename $$@).sync
$(addsuffix .check, $(ANDROID_ARCHES)): \
$(addprefix $$(basename $$@).,$(MODES)).check
-$(addsuffix .check, $(NACL_BUILDS)): $$(basename $$@)
- @tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
- --arch-and-mode=$(basename $@) \
- --timeout=600 --nopresubmit --noi18n \
- --command-prefix="tools/nacl-run.py"
-
-$(addsuffix .check, $(NACL_ARCHES)): \
- $(addprefix $$(basename $$@).,$(MODES)).check
-
native.check: native
@tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR)/native \
--arch-and-mode=. $(TESTFLAGS)
@@ -420,7 +396,7 @@ turbocheck: $(subst $(COMMA),$(SPACE),$(FASTCOMPILEMODES))
tc: turbocheck
# Clean targets. You can clean each architecture individually, or everything.
-$(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES) $(NACL_ARCHES)):
+$(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES)):
rm -f $(OUTDIR)/Makefile.$(basename $@)*
rm -rf $(OUTDIR)/$(basename $@).release
rm -rf $(OUTDIR)/$(basename $@).debug
@@ -432,7 +408,7 @@ native.clean:
rm -rf $(OUTDIR)/native
find $(OUTDIR) -regex '.*\(host\|target\)\.native\.mk' -delete
-clean: $(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES) $(NACL_ARCHES)) native.clean gtags.clean
+clean: $(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES)) native.clean gtags.clean tags.clean
# GYP file generation targets.
OUT_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(BUILDS))
@@ -441,34 +417,28 @@ $(OUT_MAKEFILES): $(GYPFILES) $(ENVFILE)
cut -f 2 -d " " | cut -f 1 -d "-" ))
$(eval CXX_TARGET_ARCH:=$(subst aarch64,arm64,$(CXX_TARGET_ARCH)))
$(eval CXX_TARGET_ARCH:=$(subst x86_64,x64,$(CXX_TARGET_ARCH)))
+ $(eval CXX_TARGET_ARCH:=$(subst s390x,s390,$(CXX_TARGET_ARCH)))
+ $(eval CXX_TARGET_ARCH:=$(subst powerpc,ppc,$(CXX_TARGET_ARCH)))
+ $(eval CXX_TARGET_ARCH:=$(subst ppc64,ppc,$(CXX_TARGET_ARCH)))
+ $(eval CXX_TARGET_ARCH:=$(subst ppcle,ppc,$(CXX_TARGET_ARCH)))
$(eval V8_TARGET_ARCH:=$(subst .,,$(suffix $(basename $@))))
- PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/build:$(PYTHONPATH):$(shell pwd)/build/gyp/pylib:$(PYTHONPATH)" \
+ PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/gypfiles:$(PYTHONPATH):$(shell pwd)/tools/gyp/pylib:$(PYTHONPATH)" \
GYP_GENERATORS=make \
- build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
- -Ibuild/standalone.gypi --depth=. \
+ tools/gyp/gyp --generator-output="$(OUTDIR)" gypfiles/all.gyp \
+ -Igypfiles/standalone.gypi --depth=. \
-Dv8_target_arch=$(V8_TARGET_ARCH) \
$(if $(findstring $(CXX_TARGET_ARCH),$(V8_TARGET_ARCH)), \
- -Dtarget_arch=$(V8_TARGET_ARCH),) \
+ -Dtarget_arch=$(V8_TARGET_ARCH), \
+ $(if $(shell echo $(ARCHES32) | grep $(V8_TARGET_ARCH)), \
+ -Dtarget_arch=ia32,)) \
$(if $(findstring optdebug,$@),-Dv8_optimized_debug=1,) \
-S$(suffix $(basename $@))$(suffix $@) $(GYPFLAGS)
$(OUTDIR)/Makefile.native: $(GYPFILES) $(ENVFILE)
- PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/build:$(PYTHONPATH):$(shell pwd)/build/gyp/pylib:$(PYTHONPATH)" \
+ PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/gypfiles:$(PYTHONPATH):$(shell pwd)/tools/gyp/pylib:$(PYTHONPATH)" \
GYP_GENERATORS=make \
- build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
- -Ibuild/standalone.gypi --depth=. -S.native $(GYPFLAGS)
-
-# Note that NACL_SDK_ROOT must be set to point to an appropriate
-# Native Client SDK before using this makefile. You can download
-# an SDK here:
-# https://developers.google.com/native-client/sdk/download
-# The path indicated by NACL_SDK_ROOT will typically end with
-# a folder for a pepper version such as "pepper_25" that should
-# have "tools" and "toolchain" subdirectories.
-must-set-NACL_SDK_ROOT:
-ifndef NACL_SDK_ROOT
- $(error NACL_SDK_ROOT must be set)
-endif
+ tools/gyp/gyp --generator-output="$(OUTDIR)" gypfiles/all.gyp \
+ -Igypfiles/standalone.gypi --depth=. -S.native $(GYPFLAGS)
# Replaces the old with the new environment file if they're different, which
# will trigger GYP to regenerate Makefiles.
@@ -497,11 +467,21 @@ gtags.files: $(GYPFILES) $(ENVFILE)
# We need to manually set the stack limit here, to work around bugs in
# gmake-3.81 and global-5.7.1 on recent 64-bit Linux systems.
-GPATH GRTAGS GSYMS GTAGS: gtags.files $(shell cat gtags.files 2> /dev/null)
+# Using $(wildcard ...) gracefully ignores non-existing files, so that stale
+# gtags.files after switching branches don't cause recipe failures.
+GPATH GRTAGS GSYMS GTAGS: gtags.files $(wildcard $(shell cat gtags.files 2> /dev/null))
@bash -c 'ulimit -s 10240 && GTAGSFORCECPP=yes gtags -i -q -f $<'
gtags.clean:
rm -f gtags.files GPATH GRTAGS GSYMS GTAGS
+tags: gtags.files $(wildcard $(shell cat gtags.files 2> /dev/null))
+ @(ctags --version | grep 'Exuberant Ctags' >/dev/null) || \
+ (echo "Please install Exuberant Ctags (check 'ctags --version')" >&2; false)
+ ctags --fields=+l -L $<
+
+tags.clean:
+ rm -r tags
+
dependencies builddeps:
$(error Use 'gclient sync' instead)
diff --git a/deps/v8/Makefile.android b/deps/v8/Makefile.android
index c49cb85b9b..417152177d 100644
--- a/deps/v8/Makefile.android
+++ b/deps/v8/Makefile.android
@@ -66,7 +66,7 @@ ANDROID_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ANDROID_BUILDS))
$(ANDROID_MAKEFILES):
GYP_GENERATORS=make-android \
GYP_DEFINES="${DEFINES}" \
- PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/build:$(PYTHONPATH)" \
- build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \
- -Ibuild/standalone.gypi --depth=. \
+ PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/gypfiles:$(PYTHONPATH)" \
+ tools/gyp/gyp --generator-output="${OUTDIR}" gypfiles/all.gyp \
+ -Igypfiles/standalone.gypi --depth=. \
-S$(suffix $(basename $@))$(suffix $@) ${GYPFLAGS}
diff --git a/deps/v8/Makefile.nacl b/deps/v8/Makefile.nacl
deleted file mode 100644
index 3459c42c0d..0000000000
--- a/deps/v8/Makefile.nacl
+++ /dev/null
@@ -1,97 +0,0 @@
-#
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# Those definitions should be consistent with the main Makefile
-NACL_ARCHES = nacl_ia32 nacl_x64
-MODES = release debug
-
-# Generates all combinations of NACL ARCHES and MODES,
-# e.g. "nacl_ia32.release" or "nacl_x64.release"
-NACL_BUILDS = $(foreach mode,$(MODES), \
- $(addsuffix .$(mode),$(NACL_ARCHES)))
-
-HOST_OS = $(shell uname -s | sed -e 's/Linux/linux/;s/Darwin/mac/')
-TOOLCHAIN_PATH = $(realpath ${NACL_SDK_ROOT}/toolchain)
-NACL_TOOLCHAIN ?= ${TOOLCHAIN_PATH}/linux_pnacl
-
-ifeq ($(wildcard $(NACL_TOOLCHAIN)),)
- $(error Cannot find Native Client toolchain in "${NACL_TOOLCHAIN}")
-endif
-
-ifeq ($(ARCH), nacl_ia32)
- GYPENV = nacl_target_arch=nacl_ia32 v8_target_arch=arm v8_host_arch=ia32
- NACL_CC = "$(NACL_TOOLCHAIN)/bin/pnacl-clang"
- NACL_CXX = "$(NACL_TOOLCHAIN)/bin/pnacl-clang++"
- NACL_LINK = "$(NACL_TOOLCHAIN)/bin/pnacl-clang++ --pnacl-allow-native -arch x86-32"
-else
- ifeq ($(ARCH), nacl_x64)
- GYPENV = nacl_target_arch=nacl_x64 v8_target_arch=arm v8_host_arch=ia32
- NACL_CC = "$(NACL_TOOLCHAIN)/bin/pnacl-clang"
- NACL_CXX = "$(NACL_TOOLCHAIN)/bin/pnacl-clang++"
- NACL_LINK = "$(NACL_TOOLCHAIN)/bin/pnacl-clang++ --pnacl-allow-native -arch x86-64"
- else
- $(error Target architecture "${ARCH}" is not supported)
- endif
-endif
-
-# For mksnapshot host generation.
-GYPENV += host_os=${HOST_OS}
-
-# ICU doesn't support NaCl.
-GYPENV += v8_enable_i18n_support=0
-
-# Disable strict aliasing - v8 code often relies on undefined behavior of C++.
-GYPENV += v8_no_strict_aliasing=1
-
-NACL_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(NACL_BUILDS))
-.SECONDEXPANSION:
-# For some reason the $$(basename $$@) expansion didn't work here...
-$(NACL_BUILDS): $(NACL_MAKEFILES)
- @$(MAKE) -C "$(OUTDIR)" -f Makefile.$@ \
- CC=${NACL_CC} \
- CXX=${NACL_CXX} \
- AR="$(NACL_TOOLCHAIN)/bin/pnacl-ar" \
- RANLIB="$(NACL_TOOLCHAIN)/bin/pnacl-ranlib" \
- LD="$(NACL_TOOLCHAIN)/bin/pnacl-ld" \
- LINK=${NACL_LINK} \
- BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
- python -c "print raw_input().capitalize()") \
- builddir="$(shell pwd)/$(OUTDIR)/$@"
-
-# NACL GYP file generation targets.
-$(NACL_MAKEFILES):
- GYP_GENERATORS=make \
- GYP_DEFINES="${GYPENV}" \
- CC=${NACL_CC} \
- CXX=${NACL_CXX} \
- LINK=${NACL_LINK} \
- PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/build:$(PYTHONPATH)" \
- build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \
- -Ibuild/standalone.gypi --depth=. \
- -S$(suffix $(basename $@))$(suffix $@) $(GYPFLAGS) \
- -Dwno_array_bounds=-Wno-array-bounds
diff --git a/deps/v8/OWNERS b/deps/v8/OWNERS
index 3f2caecd49..26701eef59 100644
--- a/deps/v8/OWNERS
+++ b/deps/v8/OWNERS
@@ -1,6 +1,9 @@
adamk@chromium.org
ahaas@chromium.org
+bbudge@chromium.org
+binji@chromium.org
bmeurer@chromium.org
+bradnelson@chromium.org
cbruni@chromium.org
danno@chromium.org
epertoso@chromium.org
@@ -15,6 +18,7 @@ machenbach@chromium.org
marja@chromium.org
mlippautz@chromium.org
mstarzinger@chromium.org
+mtrofin@chromium.org
mvstanton@chromium.org
mythria@chromium.org
neis@chromium.org
diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py
index f8516afc44..5255ca11fa 100644
--- a/deps/v8/PRESUBMIT.py
+++ b/deps/v8/PRESUBMIT.py
@@ -223,6 +223,8 @@ def _CommonChecks(input_api, output_api):
input_api, output_api, source_file_filter=None))
results.extend(input_api.canned_checks.CheckPatchFormatted(
input_api, output_api))
+ results.extend(input_api.canned_checks.CheckGenderNeutral(
+ input_api, output_api))
results.extend(_V8PresubmitChecks(input_api, output_api))
results.extend(_CheckUnwantedDependencies(input_api, output_api))
results.extend(
@@ -242,32 +244,15 @@ def _SkipTreeCheck(input_api, output_api):
return input_api.environ.get('PRESUBMIT_TREE_CHECK') == 'skip'
-def _CheckChangeLogFlag(input_api, output_api, warn):
- """Checks usage of LOG= flag in the commit message."""
- results = []
- if (input_api.change.BUG and input_api.change.BUG != 'none' and
- not 'LOG' in input_api.change.tags):
- text = ('An issue reference (BUG=) requires a change log flag (LOG=). '
- 'Use LOG=Y for including this commit message in the change log. '
- 'Use LOG=N or leave blank otherwise.')
- if warn:
- results.append(output_api.PresubmitPromptWarning(text))
- else:
- results.append(output_api.PresubmitError(text))
- return results
-
-
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
- results.extend(_CheckChangeLogFlag(input_api, output_api, True))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
- results.extend(_CheckChangeLogFlag(input_api, output_api, False))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
if not _SkipTreeCheck(input_api, output_api):
diff --git a/deps/v8/WATCHLISTS b/deps/v8/WATCHLISTS
index 29b957b091..bd07d318da 100644
--- a/deps/v8/WATCHLISTS
+++ b/deps/v8/WATCHLISTS
@@ -44,7 +44,6 @@
},
'interpreter': {
'filepath': 'src/interpreter/' \
- '|src/compiler/interpreter' \
'|src/compiler/bytecode' \
'|test/cctest/interpreter/' \
'|test/unittests/interpreter/',
@@ -60,6 +59,9 @@
},
'ia32': {
'filepath': '/ia32/',
+ },
+ 'merges': {
+ 'filepath': '.',
}
},
@@ -91,5 +93,9 @@
'ia32': [
'v8-x87-ports@googlegroups.com',
],
+ 'merges': [
+ # Only enabled on branches created with tools/release/create_release.py
+ 'v8-merges@googlegroups.com',
+ ],
},
}
diff --git a/deps/v8/base/trace_event/common/trace_event_common.h b/deps/v8/base/trace_event/common/trace_event_common.h
index 7a1533ed82..0c16e7b723 100644
--- a/deps/v8/base/trace_event/common/trace_event_common.h
+++ b/deps/v8/base/trace_event/common/trace_event_common.h
@@ -612,6 +612,13 @@
TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP2(category_group, name, id, \
+ timestamp, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+ arg1_name, arg1_val, arg2_name, arg2_val)
#define TRACE_EVENT_COPY_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, id, \
timestamp) \
INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
@@ -701,6 +708,13 @@
TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP2(category_group, name, id, \
+ timestamp, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+ arg1_name, arg1_val, arg2_name, arg2_val)
// NESTABLE_ASYNC_* APIs are used to describe an async operation, which can
// be nested within a NESTABLE_ASYNC event and/or have inner NESTABLE_ASYNC
@@ -928,12 +942,8 @@
// Special trace event macro to trace task execution with the location where it
// was posted from.
-#define TRACE_TASK_EXECUTION(run_function, task) \
- TRACE_EVENT2("toplevel", run_function, "src_file", \
- (task).posted_from.file_name(), "src_func", \
- (task).posted_from.function_name()); \
- TRACE_EVENT_API_SCOPED_TASK_EXECUTION_EVENT INTERNAL_TRACE_EVENT_UID( \
- task_event)((task).posted_from.file_name());
+#define TRACE_TASK_EXECUTION(run_function, task) \
+ INTERNAL_TRACE_TASK_EXECUTION(run_function, task)
// TRACE_EVENT_METADATA* events are information related to other
// injected events, not events in their own right.
@@ -991,6 +1001,17 @@
INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, \
TRACE_ID_DONT_MANGLE(context))
+// Macro to specify that two trace IDs are identical. For example,
+// TRACE_BIND_IDS(
+// "category", "name",
+// TRACE_ID_WITH_SCOPE("net::URLRequest", 0x1000),
+// TRACE_ID_WITH_SCOPE("blink::ResourceFetcher::FetchRequest", 0x2000))
+// tells the trace consumer that events with ID ("net::URLRequest", 0x1000) from
+// the current process have the same ID as events with ID
+// ("blink::ResourceFetcher::FetchRequest", 0x2000).
+#define TRACE_BIND_IDS(category_group, name, id, bind_id) \
+ INTERNAL_TRACE_EVENT_ADD_BIND_IDS(category_group, name, id, bind_id);
+
// Macro to efficiently determine if a given category group is enabled.
#define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret) \
do { \
@@ -1056,6 +1077,7 @@
#define TRACE_EVENT_PHASE_CLOCK_SYNC ('c')
#define TRACE_EVENT_PHASE_ENTER_CONTEXT ('(')
#define TRACE_EVENT_PHASE_LEAVE_CONTEXT (')')
+#define TRACE_EVENT_PHASE_BIND_IDS ('=')
// Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT.
#define TRACE_EVENT_FLAG_NONE (static_cast<unsigned int>(0))
diff --git a/deps/v8/build/has_valgrind.py b/deps/v8/build/has_valgrind.py
deleted file mode 100755
index 83a848d50b..0000000000
--- a/deps/v8/build/has_valgrind.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import os
-
-BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-VALGRIND_DIR = os.path.join(BASE_DIR, 'third_party', 'valgrind')
-LINUX32_DIR = os.path.join(VALGRIND_DIR, 'linux_x86')
-LINUX64_DIR = os.path.join(VALGRIND_DIR, 'linux_x64')
-
-
-def DoMain(_):
- """Hook to be called from gyp without starting a separate python
- interpreter."""
- return int(os.path.exists(LINUX32_DIR) and os.path.exists(LINUX64_DIR))
-
-
-if __name__ == '__main__':
- print DoMain([])
diff --git a/deps/v8/build_overrides/build.gni b/deps/v8/build_overrides/build.gni
new file mode 100644
index 0000000000..da6d3e0ded
--- /dev/null
+++ b/deps/v8/build_overrides/build.gni
@@ -0,0 +1,18 @@
+# Copyright 2016 The V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+mac_sdk_min_build_override = "10.10"
+mac_deployment_target_build_override = "10.7"
+
+# Variable that can be used to support multiple build scenarios, like having
+# Chromium specific targets in a client project's GN file etc.
+build_with_chromium = false
+
+# Uncomment these to specify a different NDK location and version in
+# non-Chromium builds.
+# default_android_ndk_root = "//third_party/android_tools/ndk"
+# default_android_ndk_version = "r10e"
+
+# Some non-Chromium builds don't support building java targets.
+enable_java_templates = false
diff --git a/deps/v8/build_overrides/gtest.gni b/deps/v8/build_overrides/gtest.gni
new file mode 100644
index 0000000000..54c16b149b
--- /dev/null
+++ b/deps/v8/build_overrides/gtest.gni
@@ -0,0 +1,15 @@
+# Copyright 2016 The V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Exclude support for registering main function in multi-process tests.
+gtest_include_multiprocess = false
+
+# Exclude support for platform-specific operations across unit tests.
+gtest_include_platform_test = false
+
+# Exclude support for testing Objective C code on OS X and iOS.
+gtest_include_objc_support = false
+
+# Exclude support for flushing coverage files on iOS.
+gtest_include_ios_coverage = false
diff --git a/deps/v8/build_overrides/v8.gni b/deps/v8/build_overrides/v8.gni
new file mode 100644
index 0000000000..fc4a70e579
--- /dev/null
+++ b/deps/v8/build_overrides/v8.gni
@@ -0,0 +1,26 @@
+# Copyright 2015 The V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/config/features.gni")
+import("//build/config/ui.gni")
+import("//build/config/v8_target_cpu.gni")
+import("//gni/v8.gni")
+
+if (is_android) {
+ import("//build/config/android/config.gni")
+}
+
+if (((v8_current_cpu == "x86" ||
+ v8_current_cpu == "x64" ||
+ v8_current_cpu=="x87") &&
+ (is_linux || is_mac)) ||
+ (v8_current_cpu == "ppc64" && is_linux)) {
+ v8_enable_gdbjit_default = true
+}
+
+v8_imminent_deprecation_warnings_default = true
+
+# Add simple extras solely for the purpose of the cctests.
+v8_extra_library_files = [ "//test/cctest/test-extra.js" ]
+v8_experimental_extra_library_files = [ "//test/cctest/test-experimental-extra.js" ]
diff --git a/deps/v8/gni/isolate.gni b/deps/v8/gni/isolate.gni
new file mode 100644
index 0000000000..93c828d2cd
--- /dev/null
+++ b/deps/v8/gni/isolate.gni
@@ -0,0 +1,175 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/config/sanitizers/sanitizers.gni")
+import("//third_party/icu/config.gni")
+import("v8.gni")
+
+declare_args() {
+ # Sets the test isolation mode (noop|prepare|check).
+ v8_test_isolation_mode = "noop"
+}
+
+template("v8_isolate_run") {
+ # Remember target name as within the action scope the target name will be
+ # different.
+ name = target_name
+ if (name != "" && invoker.isolate != "" && invoker.deps != [] &&
+ v8_test_isolation_mode != "noop") {
+ action(name + "_run") {
+ testonly = true
+
+ deps = invoker.deps
+
+ script = "//tools/isolate_driver.py"
+
+ sources = [
+ invoker.isolate,
+ ]
+
+ inputs = [
+ # Files that are known to be involved in this step.
+ "//tools/swarming_client/isolate.py",
+ "//tools/swarming_client/run_isolated.py",
+ ]
+
+ if (v8_test_isolation_mode == "prepare") {
+ outputs = [
+ "$root_out_dir/$name.isolated.gen.json",
+ ]
+ } else if (v8_test_isolation_mode == "check") {
+ outputs = [
+ "$root_out_dir/$name.isolated",
+ "$root_out_dir/$name.isolated.state",
+ ]
+ }
+
+ # Translate gn to gyp variables.
+ if (is_asan) {
+ asan = "1"
+ } else {
+ asan = "0"
+ }
+ if (is_msan) {
+ msan = "1"
+ } else {
+ msan = "0"
+ }
+ if (is_tsan) {
+ tsan = "1"
+ } else {
+ tsan = "0"
+ }
+ if (is_cfi) {
+ cfi_vptr = "1"
+ } else {
+ cfi_vptr = "0"
+ }
+ if (target_cpu == "x86") {
+ target_arch = "ia32"
+ } else {
+ target_arch = target_cpu
+ }
+ if (is_debug) {
+ configuration_name = "Debug"
+ } else {
+ configuration_name = "Release"
+ }
+ if (is_component_build) {
+ component = "shared_library"
+ } else {
+ component = "static_library"
+ }
+ if (icu_use_data_file) {
+ icu_use_data_file_flag = "1"
+ } else {
+ icu_use_data_file_flag = "0"
+ }
+ if (v8_use_external_startup_data) {
+ use_external_startup_data = "1"
+ } else {
+ use_external_startup_data = "0"
+ }
+ if (v8_use_snapshot) {
+ use_snapshot = "true"
+ } else {
+ use_snapshot = "false"
+ }
+ if (v8_has_valgrind) {
+ has_valgrind = "1"
+ } else {
+ has_valgrind = "0"
+ }
+ if (v8_gcmole) {
+ gcmole = "1"
+ } else {
+ gcmole = "0"
+ }
+
+
+ # Note, all paths will be rebased in isolate_driver.py to be relative to
+ # the isolate file.
+ args = [
+ v8_test_isolation_mode,
+ "--isolated",
+ rebase_path("$root_out_dir/$name.isolated", root_build_dir),
+ "--isolate",
+ rebase_path(invoker.isolate, root_build_dir),
+
+ # Path variables are used to replace file paths when loading a .isolate
+ # file
+ "--path-variable",
+ "DEPTH",
+ rebase_path("//", root_build_dir),
+ "--path-variable",
+ "PRODUCT_DIR",
+ rebase_path(root_out_dir, root_build_dir),
+
+ # TODO(machenbach): Set variables for remaining features.
+ "--config-variable",
+ "CONFIGURATION_NAME=$configuration_name",
+ "--config-variable",
+ "OS=$target_os",
+ "--config-variable",
+ "asan=$asan",
+ "--config-variable",
+ "cfi_vptr=$cfi_vptr",
+ "--config-variable",
+ "gcmole=$gcmole",
+ "--config-variable",
+ "has_valgrind=$has_valgrind",
+ "--config-variable",
+ "icu_use_data_file_flag=$icu_use_data_file_flag",
+ "--config-variable",
+ "msan=$msan",
+ "--config-variable",
+ "tsan=$tsan",
+ "--config-variable",
+ "coverage=0",
+ "--config-variable",
+ "sanitizer_coverage=0",
+ "--config-variable",
+ "component=$component",
+ "--config-variable",
+ "target_arch=$target_arch",
+ "--config-variable",
+ "v8_use_external_startup_data=$use_external_startup_data",
+ "--config-variable",
+ "v8_use_snapshot=$use_snapshot",
+ ]
+
+ if (is_win) {
+ args += [
+ "--config-variable",
+ "msvs_version=2013",
+ ]
+ } else {
+ args += [
+ "--config-variable",
+ "msvs_version=0",
+ ]
+ }
+ }
+ }
+}
diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni
new file mode 100644
index 0000000000..7ff7f6fb89
--- /dev/null
+++ b/deps/v8/gni/v8.gni
@@ -0,0 +1,108 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/config/sanitizers/sanitizers.gni")
+import("//build/config/v8_target_cpu.gni")
+
+declare_args() {
+ # Indicate if valgrind was fetched as a custom deps to make it available on
+ # swarming.
+ v8_has_valgrind = false
+
+ # Indicate if gcmole was fetched as a hook to make it available on swarming.
+ v8_gcmole = false
+
+ # Turns on compiler optimizations in V8 in Debug build.
+ v8_optimized_debug = true
+
+ # Support for backtrace_symbols on linux.
+ v8_enable_backtrace = ""
+
+ # Enable the snapshot feature, for fast context creation.
+ # http://v8project.blogspot.com/2015/09/custom-startup-snapshots.html
+ v8_use_snapshot = true
+
+ # Use external files for startup data blobs:
+ # the JS builtins sources and the start snapshot.
+ v8_use_external_startup_data = ""
+}
+
+if (v8_use_external_startup_data == "") {
+ # If not specified as a gn arg, use external startup data by default if
+ # a snapshot is used and if we're not on ios.
+ v8_use_external_startup_data = v8_use_snapshot && !is_ios
+}
+
+if (v8_enable_backtrace == "") {
+ v8_enable_backtrace = is_debug && !v8_optimized_debug
+}
+
+###############################################################################
+# Templates
+#
+
+# Points to // in v8 stand-alone or to //v8/ in chromium. We need absolute
+# paths for all configs in templates as they are shared in different
+# subdirectories.
+v8_path_prefix = get_path_info("../", "abspath")
+
+# Common configs to remove or add in all v8 targets.
+v8_remove_configs = [ "//build/config/compiler:chromium_code" ]
+v8_add_configs = [
+ "//build/config/compiler:no_chromium_code",
+ v8_path_prefix + ":features",
+ v8_path_prefix + ":toolchain",
+]
+
+if (is_debug && !v8_optimized_debug) {
+ v8_remove_configs += [ "//build/config/compiler:default_optimization" ]
+ v8_add_configs += [ "//build/config/compiler:no_optimize" ]
+} else {
+ v8_remove_configs += [ "//build/config/compiler:default_optimization" ]
+
+ # TODO(crbug.com/621335) Rework this so that we don't have the confusion
+ # between "optimize_speed" and "optimize_max".
+ if (is_posix && !is_android && !using_sanitizer) {
+ v8_add_configs += [ "//build/config/compiler:optimize_speed" ]
+ } else {
+ v8_add_configs += [ "//build/config/compiler:optimize_max" ]
+ }
+}
+
+if (is_posix && v8_enable_backtrace) {
+ v8_remove_configs += [ "//build/config/gcc:symbol_visibility_hidden" ]
+ v8_add_configs += [ "//build/config/gcc:symbol_visibility_default" ]
+}
+
+# All templates should be kept in sync.
+template("v8_source_set") {
+ source_set(target_name) {
+ forward_variables_from(invoker, "*", [ "configs" ])
+ configs += invoker.configs
+ configs -= v8_remove_configs
+ configs += v8_add_configs
+ }
+}
+
+template("v8_executable") {
+ executable(target_name) {
+ forward_variables_from(invoker, "*", [ "configs" ])
+ configs += invoker.configs
+ configs -= v8_remove_configs
+ configs += v8_add_configs
+ if (is_linux) {
+ # For enabling ASLR.
+ ldflags = [ "-pie" ]
+ }
+ }
+}
+
+template("v8_component") {
+ component(target_name) {
+ forward_variables_from(invoker, "*", [ "configs" ])
+ configs += invoker.configs
+ configs -= v8_remove_configs
+ configs += v8_add_configs
+ }
+}
diff --git a/deps/v8/build/OWNERS b/deps/v8/gypfiles/OWNERS
index 0e56af3129..0e56af3129 100644
--- a/deps/v8/build/OWNERS
+++ b/deps/v8/gypfiles/OWNERS
diff --git a/deps/v8/build/README.txt b/deps/v8/gypfiles/README.txt
index 5f242ada34..5f242ada34 100644
--- a/deps/v8/build/README.txt
+++ b/deps/v8/gypfiles/README.txt
diff --git a/deps/v8/build/all.gyp b/deps/v8/gypfiles/all.gyp
index feaf4feccc..6b4ef82d69 100644
--- a/deps/v8/build/all.gyp
+++ b/deps/v8/gypfiles/all.gyp
@@ -8,11 +8,7 @@
'target_name': 'All',
'type': 'none',
'dependencies': [
- '../samples/samples.gyp:*',
'../src/d8.gyp:d8',
- '../test/cctest/cctest.gyp:*',
- '../test/fuzzer/fuzzer.gyp:*',
- '../test/unittests/unittests.gyp:*',
],
'conditions': [
['component!="shared_library"', {
@@ -20,12 +16,20 @@
'../tools/parser-shell.gyp:parser-shell',
],
}],
+ # These items don't compile for Android on Mac.
+ ['host_os!="mac" or OS!="android"', {
+ 'dependencies': [
+ '../samples/samples.gyp:*',
+ '../test/cctest/cctest.gyp:*',
+ '../test/fuzzer/fuzzer.gyp:*',
+ '../test/unittests/unittests.gyp:*',
+ ],
+ }],
['test_isolation_mode != "noop"', {
'dependencies': [
'../test/bot_default.gyp:*',
'../test/benchmarks/benchmarks.gyp:*',
'../test/default.gyp:*',
- '../test/ignition.gyp:*',
'../test/intl/intl.gyp:*',
'../test/message/message.gyp:*',
'../test/mjsunit/mjsunit.gyp:*',
diff --git a/deps/v8/build/config/win/msvs_dependencies.isolate b/deps/v8/gypfiles/config/win/msvs_dependencies.isolate
index ff92227363..ff92227363 100644
--- a/deps/v8/build/config/win/msvs_dependencies.isolate
+++ b/deps/v8/gypfiles/config/win/msvs_dependencies.isolate
diff --git a/deps/v8/build/coverage_wrapper.py b/deps/v8/gypfiles/coverage_wrapper.py
index 5b365d8e63..5b365d8e63 100755
--- a/deps/v8/build/coverage_wrapper.py
+++ b/deps/v8/gypfiles/coverage_wrapper.py
diff --git a/deps/v8/build/detect_v8_host_arch.py b/deps/v8/gypfiles/detect_v8_host_arch.py
index 89e8286e1f..89e8286e1f 100644
--- a/deps/v8/build/detect_v8_host_arch.py
+++ b/deps/v8/gypfiles/detect_v8_host_arch.py
diff --git a/deps/v8/build/download_gold_plugin.py b/deps/v8/gypfiles/download_gold_plugin.py
index b8131fd449..b8131fd449 100755
--- a/deps/v8/build/download_gold_plugin.py
+++ b/deps/v8/gypfiles/download_gold_plugin.py
diff --git a/deps/v8/build/features.gypi b/deps/v8/gypfiles/features.gypi
index 5a21a63e32..5a21a63e32 100644
--- a/deps/v8/build/features.gypi
+++ b/deps/v8/gypfiles/features.gypi
diff --git a/deps/v8/build/get_landmines.py b/deps/v8/gypfiles/get_landmines.py
index 2bbf7a61bf..9fcca4b968 100755
--- a/deps/v8/build/get_landmines.py
+++ b/deps/v8/gypfiles/get_landmines.py
@@ -27,6 +27,7 @@ def main():
print 'Switching to pinned msvs toolchain.'
print 'Clobbering to hopefully resolve problem with mksnapshot'
print 'Clobber after ICU roll.'
+ print 'Clobber after Android NDK update.'
return 0
diff --git a/deps/v8/build/gyp_environment.py b/deps/v8/gypfiles/gyp_environment.py
index 7a4e622148..76ae841ffb 100644
--- a/deps/v8/build/gyp_environment.py
+++ b/deps/v8/gypfiles/gyp_environment.py
@@ -31,6 +31,7 @@ def apply_gyp_environment(file_path=None):
supported_vars = ( 'V8_GYP_FILE',
'V8_GYP_SYNTAX_CHECK',
'GYP_DEFINES',
+ 'GYP_GENERATORS',
'GYP_GENERATOR_FLAGS',
'GYP_GENERATOR_OUTPUT', )
for var in supported_vars:
@@ -51,4 +52,9 @@ def set_environment():
# Update the environment based on v8.gyp_env
gyp_env_path = os.path.join(os.path.dirname(V8_ROOT), 'v8.gyp_env')
apply_gyp_environment(gyp_env_path)
+
+ if not os.environ.get('GYP_GENERATORS'):
+ # Default to ninja on all platforms.
+ os.environ['GYP_GENERATORS'] = 'ninja'
+
vs_toolchain.SetEnvironmentAndGetRuntimeDllDirs()
diff --git a/deps/v8/build/gyp_v8 b/deps/v8/gypfiles/gyp_v8
index 8813f2c121..8be39d9615 100755
--- a/deps/v8/build/gyp_v8
+++ b/deps/v8/gypfiles/gyp_v8
@@ -43,7 +43,7 @@ import vs_toolchain
script_dir = os.path.dirname(os.path.realpath(__file__))
v8_root = os.path.abspath(os.path.join(script_dir, os.pardir))
-sys.path.insert(0, os.path.join(v8_root, 'build', 'gyp', 'pylib'))
+sys.path.insert(0, os.path.join(v8_root, 'tools', 'gyp', 'pylib'))
import gyp
# Add paths so that pymod_do_main(...) can import files.
@@ -90,7 +90,7 @@ def additional_include_files(args=[]):
result.append(path)
# Always include standalone.gypi
- AddInclude(os.path.join(v8_root, 'build', 'standalone.gypi'))
+ AddInclude(os.path.join(v8_root, 'gypfiles', 'standalone.gypi'))
# Optionally add supplemental .gypi files if present.
supplements = glob.glob(os.path.join(v8_root, '*', 'supplement.gypi'))
@@ -118,6 +118,10 @@ def run_gyp(args):
if __name__ == '__main__':
args = sys.argv[1:]
+ if int(os.environ.get('GYP_CHROMIUM_NO_ACTION', 0)):
+ print 'Skipping gyp_v8 due to GYP_CHROMIUM_NO_ACTION env var.'
+ sys.exit(0)
+
gyp_environment.set_environment()
# This could give false positives since it doesn't actually do real option
diff --git a/deps/v8/build/gyp_v8.py b/deps/v8/gypfiles/gyp_v8.py
index 462ee674ac..462ee674ac 100644
--- a/deps/v8/build/gyp_v8.py
+++ b/deps/v8/gypfiles/gyp_v8.py
diff --git a/deps/v8/build/isolate.gypi b/deps/v8/gypfiles/isolate.gypi
index 4cfbbfddd1..149818c8d0 100644
--- a/deps/v8/build/isolate.gypi
+++ b/deps/v8/gypfiles/isolate.gypi
@@ -17,7 +17,7 @@
# 'foo_test',
# ],
# 'includes': [
-# '../build/isolate.gypi',
+# '../gypfiles/isolate.gypi',
# ],
# 'sources': [
# 'foo_test.isolate',
@@ -73,15 +73,13 @@
'--config-variable', 'cfi_vptr=<(cfi_vptr)',
'--config-variable', 'gcmole=<(gcmole)',
'--config-variable', 'has_valgrind=<(has_valgrind)',
- '--config-variable', 'icu_use_data_file_flag=0',
+ '--config-variable', 'icu_use_data_file_flag=<(icu_use_data_file_flag)',
'--config-variable', 'msan=<(msan)',
'--config-variable', 'tsan=<(tsan)',
'--config-variable', 'coverage=<(coverage)',
'--config-variable', 'sanitizer_coverage=<(sanitizer_coverage)',
'--config-variable', 'component=<(component)',
'--config-variable', 'target_arch=<(target_arch)',
- '--config-variable', 'use_custom_libcxx=<(use_custom_libcxx)',
- '--config-variable', 'v8_separate_ignition_snapshot=<(v8_separate_ignition_snapshot)',
'--config-variable', 'v8_use_external_startup_data=<(v8_use_external_startup_data)',
'--config-variable', 'v8_use_snapshot=<(v8_use_snapshot)',
],
diff --git a/deps/v8/build/landmine_utils.py b/deps/v8/gypfiles/landmine_utils.py
index cb3499132a..cb3499132a 100644
--- a/deps/v8/build/landmine_utils.py
+++ b/deps/v8/gypfiles/landmine_utils.py
diff --git a/deps/v8/build/landmines.py b/deps/v8/gypfiles/landmines.py
index 97c63901c1..2a81c66d1a 100755
--- a/deps/v8/build/landmines.py
+++ b/deps/v8/gypfiles/landmines.py
@@ -198,7 +198,7 @@ def process_options():
parser = optparse.OptionParser()
parser.add_option(
'-s', '--landmine-scripts', action='append',
- default=[os.path.join(SRC_DIR, 'build', 'get_landmines.py')],
+ default=[os.path.join(SRC_DIR, 'gypfiles', 'get_landmines.py')],
help='Path to the script which emits landmines to stdout. The target '
'is passed to this script via option -t. Note that an extra '
'script can be specified via an env var EXTRA_LANDMINES_SCRIPT.')
diff --git a/deps/v8/build/mac/asan.gyp b/deps/v8/gypfiles/mac/asan.gyp
index 3fc7f58d43..3fc7f58d43 100644
--- a/deps/v8/build/mac/asan.gyp
+++ b/deps/v8/gypfiles/mac/asan.gyp
diff --git a/deps/v8/gypfiles/set_clang_warning_flags.gypi b/deps/v8/gypfiles/set_clang_warning_flags.gypi
new file mode 100644
index 0000000000..63d5f1435c
--- /dev/null
+++ b/deps/v8/gypfiles/set_clang_warning_flags.gypi
@@ -0,0 +1,59 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Copyright (c) 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is meant to be included to set clang-specific compiler flags.
+# To use this the following variable can be defined:
+# clang_warning_flags: list: Compiler flags to pass to clang.
+# clang_warning_flags_unset: list: Compiler flags to not pass to clang.
+#
+# Only use this in third-party code. In chromium_code, fix your code to not
+# warn instead!
+#
+# Note that the gypi file is included in target_defaults, so it does not need
+# to be explicitly included.
+#
+# Warning flags set by this will be used on all platforms. If you want to set
+# warning flags on only some platforms, you have to do so manually.
+#
+# To use this, create a gyp target with the following form:
+# {
+# 'target_name': 'my_target',
+# 'variables': {
+# 'clang_warning_flags': ['-Wno-awesome-warning'],
+# 'clang_warning_flags_unset': ['-Wpreviously-set-flag'],
+# }
+# }
+
+{
+ 'variables': {
+ 'clang_warning_flags_unset%': [], # Provide a default value.
+ },
+ 'conditions': [
+ ['clang==1', {
+ # This uses >@ instead of @< to also see clang_warning_flags set in
+ # targets directly, not just the clang_warning_flags in target_defaults.
+ 'cflags': [ '>@(clang_warning_flags)' ],
+ 'cflags!': [ '>@(clang_warning_flags_unset)' ],
+ 'xcode_settings': {
+ 'WARNING_CFLAGS': ['>@(clang_warning_flags)'],
+ 'WARNING_CFLAGS!': ['>@(clang_warning_flags_unset)'],
+ },
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'AdditionalOptions': [ '>@(clang_warning_flags)' ],
+ 'AdditionalOptions!': [ '>@(clang_warning_flags_unset)' ],
+ },
+ },
+ }],
+ ['clang==0 and host_clang==1', {
+ 'target_conditions': [
+ ['_toolset=="host"', {
+ 'cflags': [ '>@(clang_warning_flags)' ],
+ 'cflags!': [ '>@(clang_warning_flags_unset)' ],
+ }],
+ ],
+ }],
+ ],
+}
diff --git a/deps/v8/build/shim_headers.gypi b/deps/v8/gypfiles/shim_headers.gypi
index 940211c240..940211c240 100644
--- a/deps/v8/build/shim_headers.gypi
+++ b/deps/v8/gypfiles/shim_headers.gypi
diff --git a/deps/v8/build/standalone.gypi b/deps/v8/gypfiles/standalone.gypi
index 6c88409dbe..6599bb8351 100644
--- a/deps/v8/build/standalone.gypi
+++ b/deps/v8/gypfiles/standalone.gypi
@@ -49,35 +49,70 @@
'variables': {
'variables': {
'variables': {
- 'conditions': [
- ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or \
- OS=="netbsd" or OS=="mac" or OS=="qnx" or OS=="aix"', {
- # This handles the Unix platforms we generally deal with.
- # Anything else gets passed through, which probably won't work
- # very well; such hosts should pass an explicit target_arch
- # to gyp.
- 'host_arch%': '<!pymod_do_main(detect_v8_host_arch)',
- }, {
- # OS!="linux" and OS!="freebsd" and OS!="openbsd" and
- # OS!="netbsd" and OS!="mac" and OS!="aix"
- 'host_arch%': 'ia32',
- }],
- ],
+ 'variables': {
+ 'conditions': [
+ ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or \
+ OS=="netbsd" or OS=="mac" or OS=="qnx" or OS=="aix"', {
+ # This handles the Unix platforms we generally deal with.
+ # Anything else gets passed through, which probably won't work
+ # very well; such hosts should pass an explicit target_arch
+ # to gyp.
+ 'host_arch%': '<!pymod_do_main(detect_v8_host_arch)',
+ }, {
+ # OS!="linux" and OS!="freebsd" and OS!="openbsd" and
+ # OS!="netbsd" and OS!="mac" and OS!="aix"
+ 'host_arch%': 'ia32',
+ }],
+ ],
+ },
+ 'host_arch%': '<(host_arch)',
+ 'target_arch%': '<(host_arch)',
+
+ # By default we build against a stable sysroot image to avoid
+ # depending on the packages installed on the local machine. Set this
+ # to 0 to build against locally installed headers and libraries (e.g.
+ # if packaging for a linux distro)
+ 'use_sysroot%': 1,
},
'host_arch%': '<(host_arch)',
- 'target_arch%': '<(host_arch)',
+ 'target_arch%': '<(target_arch)',
+ 'use_sysroot%': '<(use_sysroot)',
'base_dir%': '<!(cd <(DEPTH) && python -c "import os; print os.getcwd()")',
# Instrument for code coverage and use coverage wrapper to exclude some
# files. Uses gcov if clang=0 is set explicitly. Otherwise,
# sanitizer_coverage must be set too.
'coverage%': 0,
+
+ # Default sysroot if no sysroot can be provided.
+ 'sysroot%': '',
+
+ 'conditions': [
+ # The system root for linux builds.
+ ['OS=="linux" and use_sysroot==1', {
+ 'conditions': [
+ ['target_arch=="arm"', {
+ 'sysroot%': '<!(cd <(DEPTH) && pwd -P)/build/linux/debian_wheezy_arm-sysroot',
+ }],
+ ['target_arch=="x64"', {
+ 'sysroot%': '<!(cd <(DEPTH) && pwd -P)/build/linux/debian_wheezy_amd64-sysroot',
+ }],
+ ['target_arch=="ia32"', {
+ 'sysroot%': '<!(cd <(DEPTH) && pwd -P)/build/linux/debian_wheezy_i386-sysroot',
+ }],
+ ['target_arch=="mipsel"', {
+ 'sysroot%': '<!(cd <(DEPTH) && pwd -P)/build/linux/debian_wheezy_mips-sysroot',
+ }],
+ ],
+ }], # OS=="linux" and use_sysroot==1
+ ],
},
'base_dir%': '<(base_dir)',
'host_arch%': '<(host_arch)',
'target_arch%': '<(target_arch)',
'v8_target_arch%': '<(target_arch)',
'coverage%': '<(coverage)',
+ 'sysroot%': '<(sysroot)',
'asan%': 0,
'lsan%': 0,
'msan%': 0,
@@ -86,12 +121,19 @@
# also controls coverage granularity (1 for function-level, 2 for
# block-level, 3 for edge-level).
'sanitizer_coverage%': 0,
+
+ # Use dynamic libraries instrumented by one of the sanitizers
+ # instead of the standard system libraries. Set this flag to download
+ # prebuilt binaries from GCS.
+ 'use_prebuilt_instrumented_libraries%': 0,
+
# Use libc++ (buildtools/third_party/libc++ and
# buildtools/third_party/libc++abi) instead of stdlibc++ as standard
# library. This is intended to be used for instrumented builds.
'use_custom_libcxx%': 0,
'clang_dir%': '<(base_dir)/third_party/llvm-build/Release+Asserts',
+ 'make_clang_dir%': '<(base_dir)/third_party/llvm-build/Release+Asserts',
'use_lto%': 0,
@@ -112,11 +154,11 @@
'use_goma%': 0,
'gomadir%': '',
- # Check if valgrind directories are present.
- 'has_valgrind%': '<!pymod_do_main(has_valgrind)',
-
'test_isolation_mode%': 'noop',
+ # By default, use ICU data file (icudtl.dat).
+ 'icu_use_data_file_flag%': 1,
+
'conditions': [
# Set default gomadir.
['OS=="win"', {
@@ -134,7 +176,7 @@
# are using a custom toolchain and need to control -B in ldflags.
# Do not use 32-bit gold on 32-bit hosts as it runs out address space
# for component=static_library builds.
- ['(OS=="linux" or OS=="android") and (target_arch=="x64" or target_arch=="arm" or (target_arch=="ia32" and host_arch=="x64"))', {
+ ['((OS=="linux" or OS=="android") and (target_arch=="x64" or target_arch=="arm" or (target_arch=="ia32" and host_arch=="x64"))) or (OS=="linux" and target_arch=="mipsel")', {
'linux_use_bundled_gold%': 1,
}, {
'linux_use_bundled_gold%': 0,
@@ -143,6 +185,7 @@
},
'base_dir%': '<(base_dir)',
'clang_dir%': '<(clang_dir)',
+ 'make_clang_dir%': '<(make_clang_dir)',
'host_arch%': '<(host_arch)',
'host_clang%': '<(host_clang)',
'target_arch%': '<(target_arch)',
@@ -155,6 +198,7 @@
'msan%': '<(msan)',
'tsan%': '<(tsan)',
'sanitizer_coverage%': '<(sanitizer_coverage)',
+ 'use_prebuilt_instrumented_libraries%': '<(use_prebuilt_instrumented_libraries)',
'use_custom_libcxx%': '<(use_custom_libcxx)',
'linux_use_bundled_gold%': '<(linux_use_bundled_gold)',
'use_lto%': '<(use_lto)',
@@ -164,7 +208,8 @@
'test_isolation_mode%': '<(test_isolation_mode)',
'fastbuild%': '<(fastbuild)',
'coverage%': '<(coverage)',
- 'has_valgrind%': '<(has_valgrind)',
+ 'sysroot%': '<(sysroot)',
+ 'icu_use_data_file_flag%': '<(icu_use_data_file_flag)',
# Add a simple extras solely for the purpose of the cctests
'v8_extra_library_files': ['../test/cctest/test-extra.js'],
@@ -194,12 +239,12 @@
# their own default value.
'v8_use_external_startup_data%': 1,
- # Use a separate ignition snapshot file in standalone builds.
- 'v8_separate_ignition_snapshot': 1,
-
# Relative path to icu.gyp from this file.
'icu_gyp_path': '../third_party/icu/icu.gyp',
+ # Relative path to inspector.gyp from this file.
+ 'inspector_gyp_path': '../src/v8-inspector/inspector.gyp',
+
'conditions': [
['(v8_target_arch=="arm" and host_arch!="arm") or \
(v8_target_arch=="arm64" and host_arch!="arm64") or \
@@ -211,6 +256,18 @@
}, {
'want_separate_host_toolset': 0,
}],
+ ['(v8_target_arch=="arm" and host_arch!="arm") or \
+ (v8_target_arch=="arm64" and host_arch!="arm64") or \
+ (v8_target_arch=="mipsel" and host_arch!="mipsel") or \
+ (v8_target_arch=="mips64el" and host_arch!="mips64el") or \
+ (v8_target_arch=="mips" and host_arch!="mips") or \
+ (v8_target_arch=="mips64" and host_arch!="mips64") or \
+ (v8_target_arch=="x64" and host_arch!="x64") or \
+ (OS=="android" or OS=="qnx")', {
+ 'want_separate_host_toolset_mkpeephole': 1,
+ }, {
+ 'want_separate_host_toolset_mkpeephole': 0,
+ }],
['OS == "win"', {
'os_posix%': 0,
}, {
@@ -261,50 +318,60 @@
# because it is used at different levels in the GYP files.
'android_ndk_root%': '<(base_dir)/third_party/android_tools/ndk/',
'android_host_arch%': "<!(uname -m | sed -e 's/i[3456]86/x86/')",
+ # Version of the NDK. Used to ensure full rebuilds on NDK rolls.
+ 'android_ndk_version%': 'r11c',
'host_os%': "<!(uname -s | sed -e 's/Linux/linux/;s/Darwin/mac/')",
+ 'os_folder_name%': "<!(uname -s | sed -e 's/Linux/linux/;s/Darwin/darwin/')",
},
# Copy conditionally-set variables out one scope.
'android_ndk_root%': '<(android_ndk_root)',
+ 'android_ndk_version%': '<(android_ndk_version)',
'host_os%': '<(host_os)',
+ 'os_folder_name%': '<(os_folder_name)',
'conditions': [
['target_arch == "ia32"', {
- 'android_toolchain%': '<(android_ndk_root)/toolchains/x86-4.9/prebuilt/<(host_os)-<(android_host_arch)/bin',
+ 'android_toolchain%': '<(android_ndk_root)/toolchains/x86-4.9/prebuilt/<(os_folder_name)-<(android_host_arch)/bin',
'android_target_arch%': 'x86',
'android_target_platform%': '16',
+ 'arm_version%': 'default',
}],
['target_arch == "x64"', {
- 'android_toolchain%': '<(android_ndk_root)/toolchains/x86_64-4.9/prebuilt/<(host_os)-<(android_host_arch)/bin',
+ 'android_toolchain%': '<(android_ndk_root)/toolchains/x86_64-4.9/prebuilt/<(os_folder_name)-<(android_host_arch)/bin',
'android_target_arch%': 'x86_64',
'android_target_platform%': '21',
+ 'arm_version%': 'default',
}],
['target_arch=="arm"', {
- 'android_toolchain%': '<(android_ndk_root)/toolchains/arm-linux-androideabi-4.9/prebuilt/<(host_os)-<(android_host_arch)/bin',
+ 'android_toolchain%': '<(android_ndk_root)/toolchains/arm-linux-androideabi-4.9/prebuilt/<(os_folder_name)-<(android_host_arch)/bin',
'android_target_arch%': 'arm',
'android_target_platform%': '16',
'arm_version%': 7,
}],
['target_arch == "arm64"', {
- 'android_toolchain%': '<(android_ndk_root)/toolchains/aarch64-linux-android-4.9/prebuilt/<(host_os)-<(android_host_arch)/bin',
+ 'android_toolchain%': '<(android_ndk_root)/toolchains/aarch64-linux-android-4.9/prebuilt/<(os_folder_name)-<(android_host_arch)/bin',
'android_target_arch%': 'arm64',
'android_target_platform%': '21',
'arm_version%': 'default',
}],
['target_arch == "mipsel"', {
- 'android_toolchain%': '<(android_ndk_root)/toolchains/mipsel-linux-android-4.9/prebuilt/<(host_os)-<(android_host_arch)/bin',
+ 'android_toolchain%': '<(android_ndk_root)/toolchains/mipsel-linux-android-4.9/prebuilt/<(os_folder_name)-<(android_host_arch)/bin',
'android_target_arch%': 'mips',
'android_target_platform%': '16',
+ 'arm_version%': 'default',
}],
['target_arch == "mips64el"', {
- 'android_toolchain%': '<(android_ndk_root)/toolchains/mips64el-linux-android-4.9/prebuilt/<(host_os)-<(android_host_arch)/bin',
+ 'android_toolchain%': '<(android_ndk_root)/toolchains/mips64el-linux-android-4.9/prebuilt/<(os_folder_name)-<(android_host_arch)/bin',
'android_target_arch%': 'mips64',
'android_target_platform%': '21',
+ 'arm_version%': 'default',
}],
],
},
# Copy conditionally-set variables out one scope.
+ 'android_ndk_version%': '<(android_ndk_version)',
'android_target_arch%': '<(android_target_arch)',
'android_target_platform%': '<(android_target_platform)',
'android_toolchain%': '<(android_toolchain)',
@@ -351,6 +418,12 @@
'android_libcpp_library': 'c++_static',
}], # OS=="android"
['host_clang==1', {
+ 'conditions':[
+ ['OS=="android"', {
+ 'host_ld': '<!(which ld)',
+ 'host_ranlib': '<!(which ranlib)',
+ }],
+ ],
'host_cc': '<(clang_dir)/bin/clang',
'host_cxx': '<(clang_dir)/bin/clang++',
}, {
@@ -373,19 +446,23 @@
# fpxx - compatibility mode, it chooses fp32 or fp64 depending on runtime
# detection
'mips_fpu_mode%': 'fp32',
-
- # Indicates if gcmole tools are downloaded by a hook.
- 'gcmole%': 0,
},
'target_defaults': {
'variables': {
'v8_code%': '<(v8_code)',
+ 'clang_warning_flags': [
+ # TODO(thakis): https://crbug.com/604888
+ '-Wno-undefined-var-template',
+ # TODO(yangguo): issue 5258
+ '-Wno-nonportable-include-path',
+ ],
'conditions':[
['OS=="android"', {
'host_os%': '<(host_os)',
}],
],
},
+ 'includes': [ 'set_clang_warning_flags.gypi', ],
'default_configuration': 'Debug',
'configurations': {
'DebugBaseCommon': {
@@ -431,26 +508,9 @@
# things when their commandline changes). Nothing should ever read this
# define.
'defines': ['CR_CLANG_REVISION=<!(python <(DEPTH)/tools/clang/scripts/update.py --print-revision)'],
- 'conditions': [
- ['host_clang==1', {
- 'target_conditions': [
- ['_toolset=="host"', {
- 'cflags+': [
- '-Wno-format-pedantic',
- ],
- }],
- ],
- }],
- ['clang==1', {
- 'target_conditions': [
- ['_toolset=="target"', {
- 'cflags+': [
- '-Wno-format-pedantic',
- ],
- }],
- ],
- }],
- ],
+ }],
+ ['clang==1 and target_arch=="ia32"', {
+ 'cflags': ['-mstack-alignment=16', '-mstackrealign'],
}],
['fastbuild!=0', {
'conditions': [
@@ -605,6 +665,11 @@
}],
],
}],
+ ['use_prebuilt_instrumented_libraries==1', {
+ 'dependencies': [
+ '<(DEPTH)/third_party/instrumented_libraries/instrumented_libraries.gyp:prebuilt_instrumented_libraries',
+ ],
+ }],
['use_custom_libcxx==1', {
'dependencies': [
'<(DEPTH)/buildtools/third_party/libc++/libc++.gyp:libcxx_proxy',
@@ -637,6 +702,18 @@
'-B<(base_dir)/third_party/binutils/Linux_x64/Release/bin',
],
}],
+ ['sysroot!="" and clang==1', {
+ 'target_conditions': [
+ ['_toolset=="target"', {
+ 'cflags': [
+ '--sysroot=<(sysroot)',
+ ],
+ 'ldflags': [
+ '--sysroot=<(sysroot)',
+ '<!(<(DEPTH)/build/linux/sysroot_ld_path.sh <(sysroot))',
+ ],
+ }]]
+ }],
],
},
}],
@@ -660,7 +737,7 @@
],
},
'dependencies': [
- '<(DEPTH)/build/mac/asan.gyp:asan_dynamic_runtime',
+ '<(DEPTH)/gypfiles/mac/asan.gyp:asan_dynamic_runtime',
],
'target_conditions': [
['_type!="static_library"', {
@@ -690,11 +767,9 @@
'-Wall',
'<(werror)',
'-Wno-unused-parameter',
- '-Wno-long-long',
'-pthread',
'-pedantic',
- # Don't warn about the "struct foo f = {0};" initialization pattern.
- '-Wno-missing-field-initializers',
+ '-Wmissing-field-initializers',
'-Wno-gnu-zero-variadic-macro-arguments',
],
'cflags_cc': [
@@ -932,6 +1007,40 @@
}],
],
},
+ 'conditions': [
+ ['clang==1', {
+ 'VCCLCompilerTool': {
+ 'AdditionalOptions': [
+ # Don't warn about unused function parameters.
+ # (This is also used on other platforms.)
+ '-Wno-unused-parameter',
+ # Don't warn about the "struct foo f = {0};" initialization
+ # pattern.
+ '-Wno-missing-field-initializers',
+
+ # TODO(hans): Make this list shorter eventually, http://crbug.com/504657
+ '-Qunused-arguments', # http://crbug.com/504658
+ '-Wno-microsoft-enum-value', # http://crbug.com/505296
+ '-Wno-unknown-pragmas', # http://crbug.com/505314
+ '-Wno-microsoft-cast', # http://crbug.com/550065
+ ],
+ },
+ }],
+ ['clang==1 and MSVS_VERSION == "2013"', {
+ 'VCCLCompilerTool': {
+ 'AdditionalOptions': [
+ '-fmsc-version=1800',
+ ],
+ },
+ }],
+ ['clang==1 and MSVS_VERSION == "2015"', {
+ 'VCCLCompilerTool': {
+ 'AdditionalOptions': [
+ '-fmsc-version=1900',
+ ],
+ },
+ }],
+ ],
},
},
}], # OS=="win"
@@ -984,6 +1093,13 @@
'CLANG_CXX_LANGUAGE_STANDARD': 'c++11', # -std=c++11
},
'conditions': [
+ ['clang_xcode==0', {
+ 'xcode_settings': {
+ 'CC': '<(clang_dir)/bin/clang',
+ 'LDPLUSPLUS': '<(clang_dir)/bin/clang++',
+ 'CLANG_CXX_LIBRARY': 'libc++'
+ },
+ }],
['v8_target_arch=="x64" or v8_target_arch=="arm64" \
or v8_target_arch=="mips64el"', {
'xcode_settings': {'WARNING_CFLAGS': ['-Wshorten-64-to-32']},
@@ -1002,7 +1118,6 @@
'target_defaults': {
'defines': [
'ANDROID',
- 'V8_ANDROID_LOG_STDOUT',
],
'configurations': {
'Release': {
@@ -1039,6 +1154,7 @@
'HAVE_OFF64_T',
'HAVE_SYS_UIO_H',
'ANDROID_BINSIZE_HACK', # Enable temporary hacks to reduce binsize.
+ 'ANDROID_NDK_VERSION=<(android_ndk_version)',
],
'ldflags!': [
'-pthread', # Not supported by Android toolchain.
@@ -1188,8 +1304,12 @@
# Hardcode the compiler names in the Makefile so that
# it won't depend on the environment at make time.
'make_global_settings': [
+ ['LD', '<!(/bin/echo -n <(android_toolchain)/../*/bin/ld)'],
+ ['RANLIB', '<!(/bin/echo -n <(android_toolchain)/../*/bin/ranlib)'],
['CC', '<!(/bin/echo -n <(android_toolchain)/*-gcc)'],
['CXX', '<!(/bin/echo -n <(android_toolchain)/*-g++)'],
+ ['LD.host', '<(host_ld)'],
+ ['RANLIB.host', '<(host_ranlib)'],
['CC.host', '<(host_cc)'],
['CXX.host', '<(host_cxx)'],
],
@@ -1261,10 +1381,10 @@
['coverage==1', {
# Wrap goma with coverage wrapper.
'make_global_settings': [
- ['CC_wrapper', '<(base_dir)/build/coverage_wrapper.py <(gomadir)/gomacc'],
- ['CXX_wrapper', '<(base_dir)/build/coverage_wrapper.py <(gomadir)/gomacc'],
- ['CC.host_wrapper', '<(base_dir)/build/coverage_wrapper.py <(gomadir)/gomacc'],
- ['CXX.host_wrapper', '<(base_dir)/build/coverage_wrapper.py <(gomadir)/gomacc'],
+ ['CC_wrapper', '<(base_dir)/gypfiles/coverage_wrapper.py <(gomadir)/gomacc'],
+ ['CXX_wrapper', '<(base_dir)/gypfiles/coverage_wrapper.py <(gomadir)/gomacc'],
+ ['CC.host_wrapper', '<(base_dir)/gypfiles/coverage_wrapper.py <(gomadir)/gomacc'],
+ ['CXX.host_wrapper', '<(base_dir)/gypfiles/coverage_wrapper.py <(gomadir)/gomacc'],
],
}, {
# Use only goma wrapper.
@@ -1281,10 +1401,10 @@
['coverage==1', {
# Use only coverage wrapper.
'make_global_settings': [
- ['CC_wrapper', '<(base_dir)/build/coverage_wrapper.py'],
- ['CXX_wrapper', '<(base_dir)/build/coverage_wrapper.py'],
- ['CC.host_wrapper', '<(base_dir)/build/coverage_wrapper.py'],
- ['CXX.host_wrapper', '<(base_dir)/build/coverage_wrapper.py'],
+ ['CC_wrapper', '<(base_dir)/gypfiles/coverage_wrapper.py'],
+ ['CXX_wrapper', '<(base_dir)/gypfiles/coverage_wrapper.py'],
+ ['CC.host_wrapper', '<(base_dir)/gypfiles/coverage_wrapper.py'],
+ ['CXX.host_wrapper', '<(base_dir)/gypfiles/coverage_wrapper.py'],
],
}],
],
diff --git a/deps/v8/build/toolchain.gypi b/deps/v8/gypfiles/toolchain.gypi
index 519779edb4..95eb1d99cb 100644
--- a/deps/v8/build/toolchain.gypi
+++ b/deps/v8/gypfiles/toolchain.gypi
@@ -37,16 +37,11 @@
'tsan%': 0,
'ubsan%': 0,
'ubsan_vptr%': 0,
+ 'has_valgrind%': 0,
+ 'coverage%': 0,
'v8_target_arch%': '<(target_arch)',
'v8_host_byteorder%': '<!(python -c "import sys; print sys.byteorder")',
'force_dynamic_crt%': 0,
- # Native Client builds currently use the V8 ARM JIT and
- # arm/simulator-arm.cc to defer the significant effort required
- # for NaCl JIT support. The nacl_target_arch variable provides
- # the 'true' target arch for places in this file that need it.
- # TODO(bradchen): get rid of nacl_target_arch when someday
- # NaCl V8 builds stop using the ARM simulator
- 'nacl_target_arch%': 'none', # must be set externally
# Setting 'v8_can_use_vfp32dregs' to 'true' will cause V8 to use the VFP
# registers d16-d31 in the generated code, both in the snapshot and for the
@@ -62,6 +57,9 @@
# Similar to the ARM hard float ABI but on MIPS.
'v8_use_mips_abi_hardfloat%': 'true',
+ # Print to stdout on Android.
+ 'v8_android_log_stdout%': 0,
+
# Force disable libstdc++ debug mode.
'disable_glibcxx_debug%': 0,
@@ -74,16 +72,14 @@
'v8_no_strict_aliasing%': 0,
# Chrome needs this definition unconditionally. For standalone V8 builds,
- # it's handled in build/standalone.gypi.
+ # it's handled in gypfiles/standalone.gypi.
'want_separate_host_toolset%': 1,
+ 'want_separate_host_toolset_mkpeephole%': 1,
- # Toolset the d8 binary should be compiled for. Possible values are 'host'
- # and 'target'. If you want to run v8 tests, it needs to be set to 'target'.
+ # Toolset the shell binary should be compiled for. Possible values are
+ # 'host' and 'target'.
# The setting is ignored if want_separate_host_toolset is 0.
- 'v8_toolset_for_d8%': 'target',
-
- # Control usage of a separate ignition snapshot file.
- 'v8_separate_ignition_snapshot%': 0,
+ 'v8_toolset_for_shell%': 'target',
'host_os%': '<(OS)',
'werror%': '-Werror',
@@ -109,7 +105,7 @@
# are using a custom toolchain and need to control -B in ldflags.
# Do not use 32-bit gold on 32-bit hosts as it runs out address space
# for component=static_library builds.
- ['OS=="linux" and (target_arch=="x64" or target_arch=="arm")', {
+ ['((OS=="linux" or OS=="android") and (target_arch=="x64" or target_arch=="arm" or (target_arch=="ia32" and host_arch=="x64"))) or (OS=="linux" and target_arch=="mipsel")', {
'linux_use_bundled_gold%': 1,
}, {
'linux_use_bundled_gold%': 0,
@@ -135,6 +131,9 @@
# Link-Time Optimizations
'use_lto%': 0,
+
+ # Indicates if gcmole tools are downloaded by a hook.
+ 'gcmole%': 0,
},
'conditions': [
['host_arch=="ia32" or host_arch=="x64" or \
@@ -362,28 +361,47 @@
],
'cflags': ['-march=i586'],
}], # v8_target_arch=="x87"
- ['(v8_target_arch=="mips" or v8_target_arch=="mipsel" \
- or v8_target_arch=="mips64" or v8_target_arch=="mips64el") \
- and v8_target_arch==target_arch', {
+ ['v8_target_arch=="mips" or v8_target_arch=="mipsel" \
+ or v8_target_arch=="mips64" or v8_target_arch=="mips64el"', {
'target_conditions': [
['_toolset=="target"', {
- # Target built with a Mips CXX compiler.
- 'variables': {
- 'ldso_path%': '<!(/bin/echo -n $LDSO_PATH)',
- 'ld_r_path%': '<!(/bin/echo -n $LD_R_PATH)',
- },
'conditions': [
- ['ldso_path!=""', {
- 'ldflags': ['-Wl,--dynamic-linker=<(ldso_path)'],
- }],
- ['ld_r_path!=""', {
- 'ldflags': ['-Wl,--rpath=<(ld_r_path)'],
+ ['v8_target_arch==target_arch', {
+ # Target built with a Mips CXX compiler.
+ 'variables': {
+ 'ldso_path%': '<!(/bin/echo -n $LDSO_PATH)',
+ 'ld_r_path%': '<!(/bin/echo -n $LD_R_PATH)',
+ },
+ 'conditions': [
+ ['ldso_path!=""', {
+ 'ldflags': ['-Wl,--dynamic-linker=<(ldso_path)'],
+ }],
+ ['ld_r_path!=""', {
+ 'ldflags': ['-Wl,--rpath=<(ld_r_path)'],
+ }],
+ [ 'clang==1', {
+ 'cflags': ['-integrated-as'],
+ }],
+ ['OS!="mac"', {
+ 'defines': ['_MIPS_TARGET_HW',],
+ }, {
+ 'defines': ['_MIPS_TARGET_SIMULATOR',],
+ }],
+ ],
+ }, {
+ 'defines': ['_MIPS_TARGET_SIMULATOR',],
}],
- [ 'clang==1', {
- 'cflags': ['-integrated-as'],
+ ],
+ }], #'_toolset=="target"
+ ['_toolset=="host"', {
+ 'conditions': [
+ ['v8_target_arch==target_arch and OS!="mac"', {
+ 'defines': ['_MIPS_TARGET_HW',],
+ }, {
+ 'defines': ['_MIPS_TARGET_SIMULATOR',],
}],
],
- }],
+ }], #'_toolset=="host"
],
}],
['v8_target_arch=="mips"', {
@@ -671,6 +689,9 @@
'ldflags': ['-mips32r2'],
}],
['mips_arch_variant=="r1"', {
+ 'defines': [
+ 'FPU_MODE_FP32',
+ ],
'cflags!': ['-mfp64', '-mfpxx'],
'conditions': [
[ 'clang==0', {
@@ -1024,7 +1045,7 @@
}],
['_toolset=="target"', {
'conditions': [
- ['target_cxx_is_biarch==1 and nacl_target_arch!="nacl_x64"', {
+ ['target_cxx_is_biarch==1', {
'conditions': [
['host_arch=="s390" or host_arch=="s390x"', {
'cflags': [ '-m31' ],
@@ -1064,6 +1085,11 @@
}],
],
}],
+ ['OS=="android" and v8_android_log_stdout==1', {
+ 'defines': [
+ 'V8_ANDROID_LOG_STDOUT',
+ ],
+ }],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd" or OS=="qnx" or OS=="aix"', {
'conditions': [
@@ -1085,6 +1111,7 @@
'defines': [
# Support for malloc(0)
'_LINUX_SOURCE_COMPAT=1',
+ '__STDC_FORMAT_MACROS',
'_ALL_SOURCE=1'],
'conditions': [
[ 'v8_target_arch=="ppc"', {
@@ -1092,7 +1119,7 @@
}],
[ 'v8_target_arch=="ppc64"', {
'cflags': [ '-maix64' ],
- 'ldflags': [ '-maix64' ],
+ 'ldflags': [ '-maix64 -Wl,-bbigtoc' ],
}],
],
}],
@@ -1186,9 +1213,8 @@
'-ffunction-sections',
],
'conditions': [
- # TODO(crbug.com/272548): Avoid -O3 in NaCl
# Don't use -O3 with sanitizers.
- ['nacl_target_arch=="none" and asan==0 and msan==0 and lsan==0 \
+ ['asan==0 and msan==0 and lsan==0 \
and tsan==0 and ubsan==0 and ubsan_vptr==0', {
'cflags': ['-O3'],
'cflags!': ['-O2'],
@@ -1305,9 +1331,8 @@
'<(wno_array_bounds)',
],
'conditions': [
- # TODO(crbug.com/272548): Avoid -O3 in NaCl
# Don't use -O3 with sanitizers.
- ['nacl_target_arch=="none" and asan==0 and msan==0 and lsan==0 \
+ ['asan==0 and msan==0 and lsan==0 \
and tsan==0 and ubsan==0 and ubsan_vptr==0', {
'cflags': ['-O3'],
'cflags!': ['-O2'],
diff --git a/deps/v8/build/vs_toolchain.py b/deps/v8/gypfiles/vs_toolchain.py
index 95fbcf4993..d7676c8da0 100644
--- a/deps/v8/build/vs_toolchain.py
+++ b/deps/v8/gypfiles/vs_toolchain.py
@@ -4,39 +4,43 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import glob
import json
import os
import pipes
import shutil
import subprocess
import sys
-import vs_toolchain
script_dir = os.path.dirname(os.path.realpath(__file__))
chrome_src = os.path.abspath(os.path.join(script_dir, os.pardir))
SRC_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(1, os.path.join(chrome_src, 'tools'))
-sys.path.insert(0, os.path.join(chrome_src, 'build', 'gyp', 'pylib'))
+sys.path.insert(0, os.path.join(chrome_src, 'tools', 'gyp', 'pylib'))
json_data_file = os.path.join(script_dir, 'win_toolchain.json')
import gyp
+# Use MSVS2013 as the default toolchain.
+CURRENT_DEFAULT_TOOLCHAIN_VERSION = '2013'
+
+
def SetEnvironmentAndGetRuntimeDllDirs():
"""Sets up os.environ to use the depot_tools VS toolchain with gyp, and
returns the location of the VS runtime DLLs so they can be copied into
the output directory after gyp generation.
"""
- vs2013_runtime_dll_dirs = None
+ vs_runtime_dll_dirs = None
depot_tools_win_toolchain = \
bool(int(os.environ.get('DEPOT_TOOLS_WIN_TOOLCHAIN', '1')))
# When running on a non-Windows host, only do this if the SDK has explicitly
# been downloaded before (in which case json_data_file will exist).
if ((sys.platform in ('win32', 'cygwin') or os.path.exists(json_data_file))
and depot_tools_win_toolchain):
- if not os.path.exists(json_data_file):
+ if ShouldUpdateToolchain():
Update()
with open(json_data_file, 'r') as tempf:
toolchain_data = json.load(tempf)
@@ -50,7 +54,7 @@ def SetEnvironmentAndGetRuntimeDllDirs():
# TODO(scottmg): The order unfortunately matters in these. They should be
# split into separate keys for x86 and x64. (See CopyVsRuntimeDlls call
# below). http://crbug.com/345992
- vs2013_runtime_dll_dirs = toolchain_data['runtime_dirs']
+ vs_runtime_dll_dirs = toolchain_data['runtime_dirs']
os.environ['GYP_MSVS_OVERRIDE_PATH'] = toolchain
os.environ['GYP_MSVS_VERSION'] = version
@@ -65,30 +69,100 @@ def SetEnvironmentAndGetRuntimeDllDirs():
os.environ['WINDOWSSDKDIR'] = win_sdk
os.environ['WDK_DIR'] = wdk
# Include the VS runtime in the PATH in case it's not machine-installed.
- runtime_path = ';'.join(vs2013_runtime_dll_dirs)
- os.environ['PATH'] = runtime_path + ';' + os.environ['PATH']
- return vs2013_runtime_dll_dirs
+ runtime_path = os.path.pathsep.join(vs_runtime_dll_dirs)
+ os.environ['PATH'] = runtime_path + os.path.pathsep + os.environ['PATH']
+ elif sys.platform == 'win32' and not depot_tools_win_toolchain:
+ if not 'GYP_MSVS_OVERRIDE_PATH' in os.environ:
+ os.environ['GYP_MSVS_OVERRIDE_PATH'] = DetectVisualStudioPath()
+ if not 'GYP_MSVS_VERSION' in os.environ:
+ os.environ['GYP_MSVS_VERSION'] = GetVisualStudioVersion()
+
+ return vs_runtime_dll_dirs
+
+
+def _RegistryGetValueUsingWinReg(key, value):
+ """Use the _winreg module to obtain the value of a registry key.
+
+ Args:
+ key: The registry key.
+ value: The particular registry value to read.
+ Return:
+ contents of the registry key's value, or None on failure. Throws
+ ImportError if _winreg is unavailable.
+ """
+ import _winreg
+ try:
+ root, subkey = key.split('\\', 1)
+ assert root == 'HKLM' # Only need HKLM for now.
+ with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, subkey) as hkey:
+ return _winreg.QueryValueEx(hkey, value)[0]
+ except WindowsError:
+ return None
+
+
+def _RegistryGetValue(key, value):
+ try:
+ return _RegistryGetValueUsingWinReg(key, value)
+ except ImportError:
+ raise Exception('The python library _winreg not found.')
+
+
+def GetVisualStudioVersion():
+ """Return GYP_MSVS_VERSION of Visual Studio.
+ """
+ return os.environ.get('GYP_MSVS_VERSION', CURRENT_DEFAULT_TOOLCHAIN_VERSION)
+
+
+def DetectVisualStudioPath():
+ """Return path to the GYP_MSVS_VERSION of Visual Studio.
+ """
+
+ # Note that this code is used from
+ # build/toolchain/win/setup_toolchain.py as well.
+ version_as_year = GetVisualStudioVersion()
+ year_to_version = {
+ '2013': '12.0',
+ '2015': '14.0',
+ }
+ if version_as_year not in year_to_version:
+ raise Exception(('Visual Studio version %s (from GYP_MSVS_VERSION)'
+ ' not supported. Supported versions are: %s') % (
+ version_as_year, ', '.join(year_to_version.keys())))
+ version = year_to_version[version_as_year]
+ keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
+ r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version]
+ for key in keys:
+ path = _RegistryGetValue(key, 'InstallDir')
+ if not path:
+ continue
+ path = os.path.normpath(os.path.join(path, '..', '..'))
+ return path
+
+ raise Exception(('Visual Studio Version %s (from GYP_MSVS_VERSION)'
+ ' not found.') % (version_as_year))
def _VersionNumber():
"""Gets the standard version number ('120', '140', etc.) based on
GYP_MSVS_VERSION."""
- if os.environ['GYP_MSVS_VERSION'] == '2013':
+ vs_version = GetVisualStudioVersion()
+ if vs_version == '2013':
return '120'
- elif os.environ['GYP_MSVS_VERSION'] == '2015':
+ elif vs_version == '2015':
return '140'
else:
raise ValueError('Unexpected GYP_MSVS_VERSION')
-def _CopyRuntimeImpl(target, source):
+def _CopyRuntimeImpl(target, source, verbose=True):
"""Copy |source| to |target| if it doesn't already exist or if it
needs to be updated.
"""
if (os.path.isdir(os.path.dirname(target)) and
(not os.path.isfile(target) or
os.stat(target).st_mtime != os.stat(source).st_mtime)):
- print 'Copying %s to %s...' % (source, target)
+ if verbose:
+ print 'Copying %s to %s...' % (source, target)
if os.path.exists(target):
os.unlink(target)
shutil.copy2(source, target)
@@ -104,14 +178,50 @@ def _CopyRuntime2013(target_dir, source_dir, dll_pattern):
_CopyRuntimeImpl(target, source)
-def _CopyRuntime2015(target_dir, source_dir, dll_pattern):
+def _CopyRuntime2015(target_dir, source_dir, dll_pattern, suffix):
"""Copy both the msvcp and vccorlib runtime DLLs, only if the target doesn't
exist, but the target directory does exist."""
- for file_part in ('msvcp', 'vccorlib'):
+ for file_part in ('msvcp', 'vccorlib', 'vcruntime'):
dll = dll_pattern % file_part
target = os.path.join(target_dir, dll)
source = os.path.join(source_dir, dll)
_CopyRuntimeImpl(target, source)
+ ucrt_src_dir = os.path.join(source_dir, 'api-ms-win-*.dll')
+ print 'Copying %s to %s...' % (ucrt_src_dir, target_dir)
+ for ucrt_src_file in glob.glob(ucrt_src_dir):
+ file_part = os.path.basename(ucrt_src_file)
+ ucrt_dst_file = os.path.join(target_dir, file_part)
+ _CopyRuntimeImpl(ucrt_dst_file, ucrt_src_file, False)
+ _CopyRuntimeImpl(os.path.join(target_dir, 'ucrtbase' + suffix),
+ os.path.join(source_dir, 'ucrtbase' + suffix))
+
+
+def _CopyRuntime(target_dir, source_dir, target_cpu, debug):
+ """Copy the VS runtime DLLs, only if the target doesn't exist, but the target
+ directory does exist. Handles VS 2013 and VS 2015."""
+ suffix = "d.dll" if debug else ".dll"
+ if GetVisualStudioVersion() == '2015':
+ _CopyRuntime2015(target_dir, source_dir, '%s140' + suffix, suffix)
+ else:
+ _CopyRuntime2013(target_dir, source_dir, 'msvc%s120' + suffix)
+
+ # Copy the PGO runtime library to the release directories.
+ if not debug and os.environ.get('GYP_MSVS_OVERRIDE_PATH'):
+ pgo_x86_runtime_dir = os.path.join(os.environ.get('GYP_MSVS_OVERRIDE_PATH'),
+ 'VC', 'bin')
+ pgo_x64_runtime_dir = os.path.join(pgo_x86_runtime_dir, 'amd64')
+ pgo_runtime_dll = 'pgort' + _VersionNumber() + '.dll'
+ if target_cpu == "x86":
+ source_x86 = os.path.join(pgo_x86_runtime_dir, pgo_runtime_dll)
+ if os.path.exists(source_x86):
+ _CopyRuntimeImpl(os.path.join(target_dir, pgo_runtime_dll), source_x86)
+ elif target_cpu == "x64":
+ source_x64 = os.path.join(pgo_x64_runtime_dir, pgo_runtime_dll)
+ if os.path.exists(source_x64):
+ _CopyRuntimeImpl(os.path.join(target_dir, pgo_runtime_dll),
+ source_x64)
+ else:
+ raise NotImplementedError("Unexpected target_cpu value:" + target_cpu)
def CopyVsRuntimeDlls(output_dir, runtime_dirs):
@@ -121,48 +231,19 @@ def CopyVsRuntimeDlls(output_dir, runtime_dirs):
This needs to be run after gyp has been run so that the expected target
output directories are already created.
+
+ This is used for the GYP build and gclient runhooks.
"""
x86, x64 = runtime_dirs
out_debug = os.path.join(output_dir, 'Debug')
- out_debug_nacl64 = os.path.join(output_dir, 'Debug', 'x64')
out_release = os.path.join(output_dir, 'Release')
- out_release_nacl64 = os.path.join(output_dir, 'Release', 'x64')
out_debug_x64 = os.path.join(output_dir, 'Debug_x64')
out_release_x64 = os.path.join(output_dir, 'Release_x64')
- if os.path.exists(out_debug) and not os.path.exists(out_debug_nacl64):
- os.makedirs(out_debug_nacl64)
- if os.path.exists(out_release) and not os.path.exists(out_release_nacl64):
- os.makedirs(out_release_nacl64)
- if os.environ.get('GYP_MSVS_VERSION') == '2015':
- _CopyRuntime2015(out_debug, x86, '%s140d.dll')
- _CopyRuntime2015(out_release, x86, '%s140.dll')
- _CopyRuntime2015(out_debug_x64, x64, '%s140d.dll')
- _CopyRuntime2015(out_release_x64, x64, '%s140.dll')
- _CopyRuntime2015(out_debug_nacl64, x64, '%s140d.dll')
- _CopyRuntime2015(out_release_nacl64, x64, '%s140.dll')
- else:
- # VS2013 is the default.
- _CopyRuntime2013(out_debug, x86, 'msvc%s120d.dll')
- _CopyRuntime2013(out_release, x86, 'msvc%s120.dll')
- _CopyRuntime2013(out_debug_x64, x64, 'msvc%s120d.dll')
- _CopyRuntime2013(out_release_x64, x64, 'msvc%s120.dll')
- _CopyRuntime2013(out_debug_nacl64, x64, 'msvc%s120d.dll')
- _CopyRuntime2013(out_release_nacl64, x64, 'msvc%s120.dll')
-
- # Copy the PGO runtime library to the release directories.
- if os.environ.get('GYP_MSVS_OVERRIDE_PATH'):
- pgo_x86_runtime_dir = os.path.join(os.environ.get('GYP_MSVS_OVERRIDE_PATH'),
- 'VC', 'bin')
- pgo_x64_runtime_dir = os.path.join(pgo_x86_runtime_dir, 'amd64')
- pgo_runtime_dll = 'pgort' + _VersionNumber() + '.dll'
- source_x86 = os.path.join(pgo_x86_runtime_dir, pgo_runtime_dll)
- if os.path.exists(source_x86):
- _CopyRuntimeImpl(os.path.join(out_release, pgo_runtime_dll), source_x86)
- source_x64 = os.path.join(pgo_x64_runtime_dir, pgo_runtime_dll)
- if os.path.exists(source_x64):
- _CopyRuntimeImpl(os.path.join(out_release_x64, pgo_runtime_dll),
- source_x64)
+ _CopyRuntime(out_debug, x86, "x86", debug=True)
+ _CopyRuntime(out_release, x86, "x86", debug=False)
+ _CopyRuntime(out_debug_x64, x64, "x64", debug=True)
+ _CopyRuntime(out_release_x64, x64, "x64", debug=False)
def CopyDlls(target_dir, configuration, target_cpu):
@@ -173,28 +254,41 @@ def CopyDlls(target_dir, configuration, target_cpu):
The debug configuration gets both the debug and release DLLs; the
release config only the latter.
+
+ This is used for the GN build.
"""
- vs2013_runtime_dll_dirs = SetEnvironmentAndGetRuntimeDllDirs()
- if not vs2013_runtime_dll_dirs:
+ vs_runtime_dll_dirs = SetEnvironmentAndGetRuntimeDllDirs()
+ if not vs_runtime_dll_dirs:
return
- x64_runtime, x86_runtime = vs2013_runtime_dll_dirs
+ x64_runtime, x86_runtime = vs_runtime_dll_dirs
runtime_dir = x64_runtime if target_cpu == 'x64' else x86_runtime
- _CopyRuntime2013(
- target_dir, runtime_dir, 'msvc%s' + _VersionNumber() + '.dll')
+ _CopyRuntime(target_dir, runtime_dir, target_cpu, debug=False)
if configuration == 'Debug':
- _CopyRuntime2013(
- target_dir, runtime_dir, 'msvc%s' + _VersionNumber() + 'd.dll')
+ _CopyRuntime(target_dir, runtime_dir, target_cpu, debug=True)
def _GetDesiredVsToolchainHashes():
"""Load a list of SHA1s corresponding to the toolchains that we want installed
to build with."""
- if os.environ.get('GYP_MSVS_VERSION') == '2015':
- return ['5a85cf1ce842f7cc96b9d17039a445a9dc9cf0dd']
+ if GetVisualStudioVersion() == '2015':
+ # Update 2.
+ return ['95ddda401ec5678f15eeed01d2bee08fcbc5ee97']
else:
- # Default to VS2013.
- return ['9ff97c632ae1fee0c98bcd53e71770eb3a0d8deb']
+ return ['03a4e939cd325d6bc5216af41b92d02dda1366a6']
+
+
+def ShouldUpdateToolchain():
+ """Check if the toolchain should be upgraded."""
+ if not os.path.exists(json_data_file):
+ return True
+ with open(json_data_file, 'r') as tempf:
+ toolchain_data = json.load(tempf)
+ version = toolchain_data['version']
+ env_version = GetVisualStudioVersion()
+ # If there's a mismatch between the version set in the environment and the one
+ # in the json file then the toolchain should be updated.
+ return version != env_version
def Update(force=False):
@@ -214,6 +308,9 @@ def Update(force=False):
depot_tools_win_toolchain):
import find_depot_tools
depot_tools_path = find_depot_tools.add_depot_tools_to_path()
+ # Necessary so that get_toolchain_if_necessary.py will put the VS toolkit
+ # in the correct directory.
+ os.environ['GYP_MSVS_VERSION'] = GetVisualStudioVersion()
get_toolchain_args = [
sys.executable,
os.path.join(depot_tools_path,
@@ -228,6 +325,12 @@ def Update(force=False):
return 0
+def NormalizePath(path):
+ while path.endswith("\\"):
+ path = path[:-1]
+ return path
+
+
def GetToolchainDir():
"""Gets location information about the current toolchain (must have been
previously updated by 'update'). This is used for the GN build."""
@@ -235,7 +338,7 @@ def GetToolchainDir():
# If WINDOWSSDKDIR is not set, search the default SDK path and set it.
if not 'WINDOWSSDKDIR' in os.environ:
- default_sdk_path = 'C:\\Program Files (x86)\\Windows Kits\\8.1'
+ default_sdk_path = 'C:\\Program Files (x86)\\Windows Kits\\10'
if os.path.isdir(default_sdk_path):
os.environ['WINDOWSSDKDIR'] = default_sdk_path
@@ -245,11 +348,11 @@ vs_version = "%s"
wdk_dir = "%s"
runtime_dirs = "%s"
''' % (
- os.environ['GYP_MSVS_OVERRIDE_PATH'],
- os.environ['WINDOWSSDKDIR'],
- os.environ['GYP_MSVS_VERSION'],
- os.environ.get('WDK_DIR', ''),
- ';'.join(runtime_dll_dirs or ['None']))
+ NormalizePath(os.environ['GYP_MSVS_OVERRIDE_PATH']),
+ NormalizePath(os.environ['WINDOWSSDKDIR']),
+ GetVisualStudioVersion(),
+ NormalizePath(os.environ.get('WDK_DIR', '')),
+ os.path.pathsep.join(runtime_dll_dirs or ['None']))
def main():
diff --git a/deps/v8/include/libplatform/DEPS b/deps/v8/include/libplatform/DEPS
new file mode 100644
index 0000000000..15e75e6b4f
--- /dev/null
+++ b/deps/v8/include/libplatform/DEPS
@@ -0,0 +1,5 @@
+specific_include_rules = {
+ "libplatform\.h": [
+ "+libplatform/v8-tracing.h",
+ ],
+}
diff --git a/deps/v8/include/libplatform/libplatform.h b/deps/v8/include/libplatform/libplatform.h
index f180b4fe82..5b5eee6513 100644
--- a/deps/v8/include/libplatform/libplatform.h
+++ b/deps/v8/include/libplatform/libplatform.h
@@ -5,6 +5,7 @@
#ifndef V8_LIBPLATFORM_LIBPLATFORM_H_
#define V8_LIBPLATFORM_LIBPLATFORM_H_
+#include "libplatform/v8-tracing.h"
#include "v8-platform.h" // NOLINT(build/include)
namespace v8 {
@@ -31,6 +32,14 @@ v8::Platform* CreateDefaultPlatform(int thread_pool_size = 0);
*/
bool PumpMessageLoop(v8::Platform* platform, v8::Isolate* isolate);
+/**
+ * Attempts to set the tracing controller for the given platform.
+ *
+ * The |platform| has to be created using |CreateDefaultPlatform|.
+ */
+void SetTracingController(
+ v8::Platform* platform,
+ v8::platform::tracing::TracingController* tracing_controller);
} // namespace platform
} // namespace v8
diff --git a/deps/v8/include/libplatform/v8-tracing.h b/deps/v8/include/libplatform/v8-tracing.h
new file mode 100644
index 0000000000..7646ea5489
--- /dev/null
+++ b/deps/v8/include/libplatform/v8-tracing.h
@@ -0,0 +1,253 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LIBPLATFORM_V8_TRACING_H_
+#define V8_LIBPLATFORM_V8_TRACING_H_
+
+#include <fstream>
+#include <memory>
+#include <vector>
+
+namespace v8 {
+namespace platform {
+namespace tracing {
+
+const int kTraceMaxNumArgs = 2;
+
+class TraceObject {
+ public:
+ union ArgValue {
+ bool as_bool;
+ uint64_t as_uint;
+ int64_t as_int;
+ double as_double;
+ const void* as_pointer;
+ const char* as_string;
+ };
+
+ TraceObject() {}
+ ~TraceObject();
+ void Initialize(char phase, const uint8_t* category_enabled_flag,
+ const char* name, const char* scope, uint64_t id,
+ uint64_t bind_id, int num_args, const char** arg_names,
+ const uint8_t* arg_types, const uint64_t* arg_values,
+ unsigned int flags);
+ void UpdateDuration();
+ void InitializeForTesting(char phase, const uint8_t* category_enabled_flag,
+ const char* name, const char* scope, uint64_t id,
+ uint64_t bind_id, int num_args,
+ const char** arg_names, const uint8_t* arg_types,
+ const uint64_t* arg_values, unsigned int flags,
+ int pid, int tid, int64_t ts, int64_t tts,
+ uint64_t duration, uint64_t cpu_duration);
+
+ int pid() const { return pid_; }
+ int tid() const { return tid_; }
+ char phase() const { return phase_; }
+ const uint8_t* category_enabled_flag() const {
+ return category_enabled_flag_;
+ }
+ const char* name() const { return name_; }
+ const char* scope() const { return scope_; }
+ uint64_t id() const { return id_; }
+ uint64_t bind_id() const { return bind_id_; }
+ int num_args() const { return num_args_; }
+ const char** arg_names() { return arg_names_; }
+ uint8_t* arg_types() { return arg_types_; }
+ ArgValue* arg_values() { return arg_values_; }
+ unsigned int flags() const { return flags_; }
+ int64_t ts() { return ts_; }
+ int64_t tts() { return tts_; }
+ uint64_t duration() { return duration_; }
+ uint64_t cpu_duration() { return cpu_duration_; }
+
+ private:
+ int pid_;
+ int tid_;
+ char phase_;
+ const char* name_;
+ const char* scope_;
+ const uint8_t* category_enabled_flag_;
+ uint64_t id_;
+ uint64_t bind_id_;
+ int num_args_;
+ const char* arg_names_[kTraceMaxNumArgs];
+ uint8_t arg_types_[kTraceMaxNumArgs];
+ ArgValue arg_values_[kTraceMaxNumArgs];
+ char* parameter_copy_storage_ = nullptr;
+ unsigned int flags_;
+ int64_t ts_;
+ int64_t tts_;
+ uint64_t duration_;
+ uint64_t cpu_duration_;
+
+ // Disallow copy and assign
+ TraceObject(const TraceObject&) = delete;
+ void operator=(const TraceObject&) = delete;
+};
+
+class TraceWriter {
+ public:
+ TraceWriter() {}
+ virtual ~TraceWriter() {}
+ virtual void AppendTraceEvent(TraceObject* trace_event) = 0;
+ virtual void Flush() = 0;
+
+ static TraceWriter* CreateJSONTraceWriter(std::ostream& stream);
+
+ private:
+ // Disallow copy and assign
+ TraceWriter(const TraceWriter&) = delete;
+ void operator=(const TraceWriter&) = delete;
+};
+
+class TraceBufferChunk {
+ public:
+ explicit TraceBufferChunk(uint32_t seq);
+
+ void Reset(uint32_t new_seq);
+ bool IsFull() const { return next_free_ == kChunkSize; }
+ TraceObject* AddTraceEvent(size_t* event_index);
+ TraceObject* GetEventAt(size_t index) { return &chunk_[index]; }
+
+ uint32_t seq() const { return seq_; }
+ size_t size() const { return next_free_; }
+
+ static const size_t kChunkSize = 64;
+
+ private:
+ size_t next_free_ = 0;
+ TraceObject chunk_[kChunkSize];
+ uint32_t seq_;
+
+ // Disallow copy and assign
+ TraceBufferChunk(const TraceBufferChunk&) = delete;
+ void operator=(const TraceBufferChunk&) = delete;
+};
+
+class TraceBuffer {
+ public:
+ TraceBuffer() {}
+ virtual ~TraceBuffer() {}
+
+ virtual TraceObject* AddTraceEvent(uint64_t* handle) = 0;
+ virtual TraceObject* GetEventByHandle(uint64_t handle) = 0;
+ virtual bool Flush() = 0;
+
+ static const size_t kRingBufferChunks = 1024;
+
+ static TraceBuffer* CreateTraceBufferRingBuffer(size_t max_chunks,
+ TraceWriter* trace_writer);
+
+ private:
+ // Disallow copy and assign
+ TraceBuffer(const TraceBuffer&) = delete;
+ void operator=(const TraceBuffer&) = delete;
+};
+
+// Options determines how the trace buffer stores data.
+enum TraceRecordMode {
+ // Record until the trace buffer is full.
+ RECORD_UNTIL_FULL,
+
+ // Record until the user ends the trace. The trace buffer is a fixed size
+ // and we use it as a ring buffer during recording.
+ RECORD_CONTINUOUSLY,
+
+ // Record until the trace buffer is full, but with a huge buffer size.
+ RECORD_AS_MUCH_AS_POSSIBLE,
+
+ // Echo to console. Events are discarded.
+ ECHO_TO_CONSOLE,
+};
+
+class TraceConfig {
+ public:
+ typedef std::vector<std::string> StringList;
+
+ static TraceConfig* CreateDefaultTraceConfig();
+
+ TraceConfig()
+ : enable_sampling_(false),
+ enable_systrace_(false),
+ enable_argument_filter_(false) {}
+ TraceRecordMode GetTraceRecordMode() const { return record_mode_; }
+ bool IsSamplingEnabled() const { return enable_sampling_; }
+ bool IsSystraceEnabled() const { return enable_systrace_; }
+ bool IsArgumentFilterEnabled() const { return enable_argument_filter_; }
+
+ void SetTraceRecordMode(TraceRecordMode mode) { record_mode_ = mode; }
+ void EnableSampling() { enable_sampling_ = true; }
+ void EnableSystrace() { enable_systrace_ = true; }
+ void EnableArgumentFilter() { enable_argument_filter_ = true; }
+
+ void AddIncludedCategory(const char* included_category);
+ void AddExcludedCategory(const char* excluded_category);
+
+ bool IsCategoryGroupEnabled(const char* category_group) const;
+
+ private:
+ TraceRecordMode record_mode_;
+ bool enable_sampling_ : 1;
+ bool enable_systrace_ : 1;
+ bool enable_argument_filter_ : 1;
+ StringList included_categories_;
+ StringList excluded_categories_;
+
+ // Disallow copy and assign
+ TraceConfig(const TraceConfig&) = delete;
+ void operator=(const TraceConfig&) = delete;
+};
+
+class TracingController {
+ public:
+ enum Mode { DISABLED = 0, RECORDING_MODE };
+
+ // The pointer returned from GetCategoryGroupEnabledInternal() points to a
+ // value with zero or more of the following bits. Used in this class only.
+ // The TRACE_EVENT macros should only use the value as a bool.
+ // These values must be in sync with macro values in TraceEvent.h in Blink.
+ enum CategoryGroupEnabledFlags {
+ // Category group enabled for the recording mode.
+ ENABLED_FOR_RECORDING = 1 << 0,
+ // Category group enabled by SetEventCallbackEnabled().
+ ENABLED_FOR_EVENT_CALLBACK = 1 << 2,
+ // Category group enabled to export events to ETW.
+ ENABLED_FOR_ETW_EXPORT = 1 << 3
+ };
+
+ TracingController() {}
+ void Initialize(TraceBuffer* trace_buffer);
+ const uint8_t* GetCategoryGroupEnabled(const char* category_group);
+ static const char* GetCategoryGroupName(const uint8_t* category_enabled_flag);
+ uint64_t AddTraceEvent(char phase, const uint8_t* category_enabled_flag,
+ const char* name, const char* scope, uint64_t id,
+ uint64_t bind_id, int32_t num_args,
+ const char** arg_names, const uint8_t* arg_types,
+ const uint64_t* arg_values, unsigned int flags);
+ void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
+ const char* name, uint64_t handle);
+
+ void StartTracing(TraceConfig* trace_config);
+ void StopTracing();
+
+ private:
+ const uint8_t* GetCategoryGroupEnabledInternal(const char* category_group);
+ void UpdateCategoryGroupEnabledFlag(size_t category_index);
+ void UpdateCategoryGroupEnabledFlags();
+
+ std::unique_ptr<TraceBuffer> trace_buffer_;
+ std::unique_ptr<TraceConfig> trace_config_;
+ Mode mode_ = DISABLED;
+
+ // Disallow copy and assign
+ TracingController(const TracingController&) = delete;
+ void operator=(const TracingController&) = delete;
+};
+
+} // namespace tracing
+} // namespace platform
+} // namespace v8
+
+#endif // V8_LIBPLATFORM_V8_TRACING_H_
diff --git a/deps/v8/include/v8-debug.h b/deps/v8/include/v8-debug.h
index 50314501e3..6385a31d85 100644
--- a/deps/v8/include/v8-debug.h
+++ b/deps/v8/include/v8-debug.h
@@ -18,13 +18,11 @@ enum DebugEvent {
Exception = 2,
NewFunction = 3,
BeforeCompile = 4,
- AfterCompile = 5,
+ AfterCompile = 5,
CompileError = 6,
- PromiseEvent = 7,
- AsyncTaskEvent = 8,
+ AsyncTaskEvent = 7,
};
-
class V8_EXPORT Debug {
public:
/**
@@ -127,6 +125,8 @@ class V8_EXPORT Debug {
*/
virtual ClientData* GetClientData() const = 0;
+ virtual Isolate* GetIsolate() const = 0;
+
virtual ~EventDetails() {}
};
@@ -157,9 +157,6 @@ class V8_EXPORT Debug {
static bool SetDebugEventListener(Isolate* isolate, EventCallback that,
Local<Value> data = Local<Value>());
- V8_DEPRECATED("Use version with an Isolate",
- static bool SetDebugEventListener(
- EventCallback that, Local<Value> data = Local<Value>()));
// Schedule a debugger break to happen when JavaScript code is run
// in the given isolate.
@@ -174,8 +171,6 @@ class V8_EXPORT Debug {
// Message based interface. The message protocol is JSON.
static void SetMessageHandler(Isolate* isolate, MessageHandler handler);
- V8_DEPRECATED("Use version with an Isolate",
- static void SetMessageHandler(MessageHandler handler));
static void SendCommand(Isolate* isolate,
const uint16_t* command, int length,
@@ -199,9 +194,6 @@ class V8_EXPORT Debug {
* }
* \endcode
*/
- static V8_DEPRECATED("Use maybe version",
- Local<Value> Call(v8::Local<v8::Function> fun,
- Local<Value> data = Local<Value>()));
// TODO(dcarney): data arg should be a MaybeLocal
static MaybeLocal<Value> Call(Local<Context> context,
v8::Local<v8::Function> fun,
@@ -210,8 +202,6 @@ class V8_EXPORT Debug {
/**
* Returns a mirror object for the given object.
*/
- static V8_DEPRECATED("Use maybe version",
- Local<Value> GetMirror(v8::Local<v8::Value> obj));
static MaybeLocal<Value> GetMirror(Local<Context> context,
v8::Local<v8::Value> obj);
@@ -247,8 +237,6 @@ class V8_EXPORT Debug {
* of this method.
*/
static void ProcessDebugMessages(Isolate* isolate);
- V8_DEPRECATED("Use version with an Isolate",
- static void ProcessDebugMessages());
/**
* Debugger is running in its own context which is entered while debugger
@@ -258,9 +246,12 @@ class V8_EXPORT Debug {
* least one DebugEventListener or MessageHandler is set.
*/
static Local<Context> GetDebugContext(Isolate* isolate);
- V8_DEPRECATED("Use version with an Isolate",
- static Local<Context> GetDebugContext());
+ /**
+ * While in the debug context, this method returns the top-most non-debug
+ * context, if it exists.
+ */
+ static MaybeLocal<Context> GetDebuggedContext(Isolate* isolate);
/**
* Enable/disable LiveEdit functionality for the given Isolate
diff --git a/deps/v8/include/v8-experimental.h b/deps/v8/include/v8-experimental.h
index 294ba647f0..1773345e09 100644
--- a/deps/v8/include/v8-experimental.h
+++ b/deps/v8/include/v8-experimental.h
@@ -31,13 +31,17 @@ class V8_EXPORT FastAccessorBuilder {
ValueId IntegerConstant(int int_constant);
ValueId GetReceiver();
ValueId LoadInternalField(ValueId value_id, int field_no);
+ ValueId LoadInternalFieldUnchecked(ValueId value_id, int field_no);
ValueId LoadValue(ValueId value_id, int offset);
ValueId LoadObject(ValueId value_id, int offset);
+ ValueId ToSmi(ValueId value_id);
+
void ReturnValue(ValueId value_id);
void CheckFlagSetOrReturnNull(ValueId value_id, int mask);
void CheckNotZeroOrReturnNull(ValueId value_id);
LabelId MakeLabel();
void SetLabel(LabelId label_id);
+ void Goto(LabelId label_id);
void CheckNotZeroOrJump(ValueId value_id, LabelId label_id);
ValueId Call(v8::FunctionCallback callback, ValueId value_id);
diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h
index 11f8d51f02..4023a5b234 100644
--- a/deps/v8/include/v8-platform.h
+++ b/deps/v8/include/v8-platform.h
@@ -152,9 +152,9 @@ class Platform {
*/
virtual uint64_t AddTraceEvent(
char phase, const uint8_t* category_enabled_flag, const char* name,
- uint64_t id, uint64_t bind_id, int32_t num_args, const char** arg_names,
- const uint8_t* arg_types, const uint64_t* arg_values,
- unsigned int flags) {
+ const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
+ const char** arg_names, const uint8_t* arg_types,
+ const uint64_t* arg_values, unsigned int flags) {
return 0;
}
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index 007ae2eca5..bcb69f3763 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -46,6 +46,75 @@ template class V8_EXPORT std::vector<v8::CpuProfileDeoptInfo>;
namespace v8 {
+// TickSample captures the information collected for each sample.
+struct TickSample {
+ // Internal profiling (with --prof + tools/$OS-tick-processor) wants to
+ // include the runtime function we're calling. Externally exposed tick
+ // samples don't care.
+ enum RecordCEntryFrame { kIncludeCEntryFrame, kSkipCEntryFrame };
+
+ TickSample()
+ : state(OTHER),
+ pc(nullptr),
+ external_callback_entry(nullptr),
+ frames_count(0),
+ has_external_callback(false),
+ update_stats(true) {}
+
+ /**
+ * Initialize a tick sample from the isolate.
+ * \param isolate The isolate.
+ * \param state Execution state.
+ * \param record_c_entry_frame Include or skip the runtime function.
+ * \param update_stats Whether update the sample to the aggregated stats.
+ * \param use_simulator_reg_state When set to true and V8 is running under a
+ * simulator, the method will use the simulator
+ * register state rather than the one provided
+ * with |state| argument. Otherwise the method
+ * will use provided register |state| as is.
+ */
+ void Init(Isolate* isolate, const v8::RegisterState& state,
+ RecordCEntryFrame record_c_entry_frame, bool update_stats,
+ bool use_simulator_reg_state = true);
+ /**
+ * Get a call stack sample from the isolate.
+ * \param isolate The isolate.
+ * \param state Register state.
+ * \param record_c_entry_frame Include or skip the runtime function.
+ * \param frames Caller allocated buffer to store stack frames.
+ * \param frames_limit Maximum number of frames to capture. The buffer must
+ * be large enough to hold the number of frames.
+ * \param sample_info The sample info is filled up by the function
+ * provides number of actual captured stack frames and
+ * the current VM state.
+ * \param use_simulator_reg_state When set to true and V8 is running under a
+ * simulator, the method will use the simulator
+ * register state rather than the one provided
+ * with |state| argument. Otherwise the method
+ * will use provided register |state| as is.
+ * \note GetStackSample is thread and signal safe and should only be called
+ * when the JS thread is paused or interrupted.
+ * Otherwise the behavior is undefined.
+ */
+ static bool GetStackSample(Isolate* isolate, v8::RegisterState* state,
+ RecordCEntryFrame record_c_entry_frame,
+ void** frames, size_t frames_limit,
+ v8::SampleInfo* sample_info,
+ bool use_simulator_reg_state = true);
+ StateTag state; // The state of the VM.
+ void* pc; // Instruction pointer.
+ union {
+ void* tos; // Top stack value (*sp).
+ void* external_callback_entry;
+ };
+ static const unsigned kMaxFramesCountLog2 = 8;
+ static const unsigned kMaxFramesCount = (1 << kMaxFramesCountLog2) - 1;
+ void* stack[kMaxFramesCount]; // Call stack.
+ unsigned frames_count : kMaxFramesCountLog2; // Number of captured frames.
+ bool has_external_callback : 1;
+ bool update_stats : 1; // Whether the sample should update aggregated stats.
+};
+
/**
* CpuProfileNode represents a node in a call graph.
*/
@@ -103,7 +172,9 @@ class V8_EXPORT CpuProfileNode {
unsigned GetHitCount() const;
/** Returns function entry UID. */
- unsigned GetCallUid() const;
+ V8_DEPRECATE_SOON(
+ "Use GetScriptId, GetLineNumber, and GetColumnNumber instead.",
+ unsigned GetCallUid() const);
/** Returns id of the node. The id is unique within the tree */
unsigned GetNodeId() const;
@@ -173,14 +244,25 @@ class V8_EXPORT CpuProfile {
void Delete();
};
-
/**
* Interface for controlling CPU profiling. Instance of the
- * profiler can be retrieved using v8::Isolate::GetCpuProfiler.
+ * profiler can be created using v8::CpuProfiler::New method.
*/
class V8_EXPORT CpuProfiler {
public:
/**
+ * Creates a new CPU profiler for the |isolate|. The isolate must be
+ * initialized. The profiler object must be disposed after use by calling
+ * |Dispose| method.
+ */
+ static CpuProfiler* New(Isolate* isolate);
+
+ /**
+ * Disposes the CPU profiler object.
+ */
+ void Dispose();
+
+ /**
* Changes default CPU profiler sampling interval to the specified number
* of microseconds. Default interval is 1000us. This method must be called
* when there are no profiles being recorded.
@@ -515,6 +597,11 @@ class V8_EXPORT AllocationProfile {
*/
class V8_EXPORT HeapProfiler {
public:
+ enum SamplingFlags {
+ kSamplingNoFlags = 0,
+ kSamplingForceGC = 1 << 0,
+ };
+
/**
* Callback function invoked for obtaining RetainedObjectInfo for
* the given JavaScript wrapper object. It is prohibited to enter V8
@@ -640,7 +727,8 @@ class V8_EXPORT HeapProfiler {
* Returns false if a sampling heap profiler is already running.
*/
bool StartSamplingHeapProfiler(uint64_t sample_interval = 512 * 1024,
- int stack_depth = 16);
+ int stack_depth = 16,
+ SamplingFlags flags = kSamplingNoFlags);
/**
* Stops the sampling heap profile and discards the current profile.
@@ -688,7 +776,6 @@ class V8_EXPORT HeapProfiler {
HeapProfiler& operator=(const HeapProfiler&);
};
-
/**
* Interface for providing information about embedder's objects
* held by global handles. This information is reported in two ways:
@@ -703,7 +790,7 @@ class V8_EXPORT HeapProfiler {
* were not previously reported via AddObjectGroup.
*
* Thus, if an embedder wants to provide information about native
- * objects for heap snapshots, he can do it in a GC prologue
+ * objects for heap snapshots, it can do it in a GC prologue
* handler, and / or by assigning wrapper class ids in the following way:
*
* 1. Bind a callback to class id by calling SetWrapperClassInfoProvider.
diff --git a/deps/v8/include/v8-util.h b/deps/v8/include/v8-util.h
index 73ec658f7b..8133fdd49d 100644
--- a/deps/v8/include/v8-util.h
+++ b/deps/v8/include/v8-util.h
@@ -95,12 +95,12 @@ class DefaultPersistentValueMapTraits : public StdMapTraits<K, V> {
MapType* map, const K& key, Local<V> value) {
return NULL;
}
- static MapType* MapFromWeakCallbackData(
- const WeakCallbackData<V, WeakCallbackDataType>& data) {
+ static MapType* MapFromWeakCallbackInfo(
+ const WeakCallbackInfo<WeakCallbackDataType>& data) {
return NULL;
}
- static K KeyFromWeakCallbackData(
- const WeakCallbackData<V, WeakCallbackDataType>& data) {
+ static K KeyFromWeakCallbackInfo(
+ const WeakCallbackInfo<WeakCallbackDataType>& data) {
return K();
}
static void DisposeCallbackData(WeakCallbackDataType* data) { }
@@ -206,6 +206,17 @@ class PersistentValueMapBase {
}
/**
+ * Call V8::RegisterExternallyReferencedObject with the map value for given
+ * key.
+ */
+ void RegisterExternallyReferencedObject(K& key) {
+ DCHECK(Contains(key));
+ V8::RegisterExternallyReferencedObject(
+ reinterpret_cast<internal::Object**>(FromVal(Traits::Get(&impl_, key))),
+ reinterpret_cast<internal::Isolate*>(GetIsolate()));
+ }
+
+ /**
* Return value for key and remove it from the map.
*/
Global<V> Remove(const K& key) {
@@ -402,11 +413,11 @@ class PersistentValueMap : public PersistentValueMapBase<K, V, Traits> {
private:
static void WeakCallback(
- const WeakCallbackData<V, typename Traits::WeakCallbackDataType>& data) {
+ const WeakCallbackInfo<typename Traits::WeakCallbackDataType>& data) {
if (Traits::kCallbackType != kNotWeak) {
PersistentValueMap<K, V, Traits>* persistentValueMap =
- Traits::MapFromWeakCallbackData(data);
- K key = Traits::KeyFromWeakCallbackData(data);
+ Traits::MapFromWeakCallbackInfo(data);
+ K key = Traits::KeyFromWeakCallbackInfo(data);
Traits::Dispose(data.GetIsolate(),
persistentValueMap->Remove(key).Pass(), key);
Traits::DisposeCallbackData(data.GetParameter());
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index 9f61de8293..3edc10eb65 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 5
-#define V8_MINOR_VERSION 1
-#define V8_BUILD_NUMBER 281
-#define V8_PATCH_LEVEL 82
+#define V8_MINOR_VERSION 4
+#define V8_BUILD_NUMBER 500
+#define V8_PATCH_LEVEL 27
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 8b7b7c2cc4..d7e39adbae 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -18,6 +18,7 @@
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
+#include <memory>
#include <utility>
#include <vector>
@@ -292,8 +293,8 @@ class Local {
return Local<T>(T::Cast(*that));
}
-
- template <class S> V8_INLINE Local<S> As() {
+ template <class S>
+ V8_INLINE Local<S> As() const {
return Local<S>::Cast(*this);
}
@@ -457,32 +458,12 @@ class WeakCallbackInfo {
};
-template <class T, class P>
-class WeakCallbackData {
- public:
- typedef void (*Callback)(const WeakCallbackData<T, P>& data);
-
- WeakCallbackData(Isolate* isolate, P* parameter, Local<T> handle)
- : isolate_(isolate), parameter_(parameter), handle_(handle) {}
-
- V8_INLINE Isolate* GetIsolate() const { return isolate_; }
- V8_INLINE P* GetParameter() const { return parameter_; }
- V8_INLINE Local<T> GetValue() const { return handle_; }
-
- private:
- Isolate* isolate_;
- P* parameter_;
- Local<T> handle_;
-};
-
-
-// TODO(dcarney): delete this with WeakCallbackData
-template <class T>
-using PhantomCallbackData = WeakCallbackInfo<T>;
-
-
-enum class WeakCallbackType { kParameter, kInternalFields };
-
+// kParameter will pass a void* parameter back to the callback, kInternalFields
+// will pass the first two internal fields back to the callback, kFinalizer
+// will pass a void* parameter back, but is invoked before the object is
+// actually collected, so it can be resurrected. In the last case, it is not
+// possible to request a second pass callback.
+enum class WeakCallbackType { kParameter, kInternalFields, kFinalizer };
/**
* An object reference that is independent of any handle scope. Where
@@ -561,36 +542,19 @@ template <class T> class PersistentBase {
* critical form of resource management!
*/
template <typename P>
- V8_INLINE V8_DEPRECATED(
- "use WeakCallbackInfo version",
- void SetWeak(P* parameter,
- typename WeakCallbackData<T, P>::Callback callback));
-
- template <typename S, typename P>
- V8_INLINE V8_DEPRECATED(
- "use WeakCallbackInfo version",
- void SetWeak(P* parameter,
- typename WeakCallbackData<S, P>::Callback callback));
-
- // Phantom persistents work like weak persistents, except that the pointer to
- // the object being collected is not available in the finalization callback.
- // This enables the garbage collector to collect the object and any objects
- // it references transitively in one GC cycle. At the moment you can either
- // specify a parameter for the callback or the location of two internal
- // fields in the dying object.
- template <typename P>
- V8_INLINE V8_DEPRECATED(
- "use SetWeak",
- void SetPhantom(P* parameter,
- typename WeakCallbackInfo<P>::Callback callback,
- int internal_field_index1 = -1,
- int internal_field_index2 = -1));
-
- template <typename P>
V8_INLINE void SetWeak(P* parameter,
typename WeakCallbackInfo<P>::Callback callback,
WeakCallbackType type);
+ /**
+ * Turns this handle into a weak phantom handle without finalization callback.
+ * The handle will be reset automatically when the garbage collector detects
+ * that the object is no longer reachable.
+ * A related function Isolate::NumberOfPhantomHandleResetsSinceLastCall
+ * returns how many phantom handles were reset by the garbage collector.
+ */
+ V8_INLINE void SetWeak();
+
template<typename P>
V8_INLINE P* ClearWeak();
@@ -602,7 +566,7 @@ template <class T> class PersistentBase {
* is alive. Only allowed when the embedder is asked to trace its heap by
* EmbedderHeapTracer.
*/
- V8_INLINE void RegisterExternalReference(Isolate* isolate);
+ V8_INLINE void RegisterExternalReference(Isolate* isolate) const;
/**
* Marks the reference to this object independent. Garbage collector is free
@@ -620,7 +584,9 @@ template <class T> class PersistentBase {
* external dependencies. This mark is automatically cleared after each
* garbage collection.
*/
- V8_INLINE void MarkPartiallyDependent();
+ V8_INLINE V8_DEPRECATED(
+ "deprecated optimization, do not use partially dependent groups",
+ void MarkPartiallyDependent());
/**
* Marks the reference to this object as active. The scavenge garbage
@@ -778,17 +744,18 @@ template <class T, class M> class Persistent : public PersistentBase<T> {
// TODO(dcarney): this is pretty useless, fix or remove
template <class S>
- V8_INLINE static Persistent<T>& Cast(Persistent<S>& that) { // NOLINT
+ V8_INLINE static Persistent<T>& Cast(const Persistent<S>& that) { // NOLINT
#ifdef V8_ENABLE_CHECKS
// If we're going to perform the type check then we have to check
// that the handle isn't empty before doing the checked cast.
if (!that.IsEmpty()) T::Cast(*that);
#endif
- return reinterpret_cast<Persistent<T>&>(that);
+ return reinterpret_cast<Persistent<T>&>(const_cast<Persistent<S>&>(that));
}
// TODO(dcarney): this is pretty useless, fix or remove
- template <class S> V8_INLINE Persistent<S>& As() { // NOLINT
+ template <class S>
+ V8_INLINE Persistent<S>& As() const { // NOLINT
return Persistent<S>::Cast(*this);
}
@@ -991,7 +958,7 @@ class V8_EXPORT SealHandleScope {
void* operator new(size_t size);
void operator delete(void*, size_t);
- internal::Isolate* isolate_;
+ internal::Isolate* const isolate_;
internal::Object** prev_limit_;
int prev_sealed_level_;
};
@@ -1648,26 +1615,25 @@ class V8_EXPORT StackFrame {
// A StateTag represents a possible state of the VM.
enum StateTag { JS, GC, COMPILER, OTHER, EXTERNAL, IDLE };
-
// A RegisterState represents the current state of registers used
// by the sampling profiler API.
struct RegisterState {
- RegisterState() : pc(NULL), sp(NULL), fp(NULL) {}
+ RegisterState() : pc(nullptr), sp(nullptr), fp(nullptr) {}
void* pc; // Instruction pointer.
void* sp; // Stack pointer.
void* fp; // Frame pointer.
};
-
// The output structure filled up by GetStackSample API function.
struct SampleInfo {
- size_t frames_count;
- StateTag vm_state;
+ size_t frames_count; // Number of frames collected.
+ StateTag vm_state; // Current VM state.
+ void* external_callback_entry; // External callback address if VM is
+ // executing an external callback.
};
-
/**
- * A JSON Parser.
+ * A JSON Parser and Stringifier.
*/
class V8_EXPORT JSON {
public:
@@ -1678,10 +1644,24 @@ class V8_EXPORT JSON {
* \param json_string The string to parse.
* \return The corresponding value if successfully parsed.
*/
- static V8_DEPRECATED("Use maybe version",
+ static V8_DEPRECATED("Use the maybe version taking context",
Local<Value> Parse(Local<String> json_string));
+ static V8_DEPRECATE_SOON("Use the maybe version taking context",
+ MaybeLocal<Value> Parse(Isolate* isolate,
+ Local<String> json_string));
static V8_WARN_UNUSED_RESULT MaybeLocal<Value> Parse(
- Isolate* isolate, Local<String> json_string);
+ Local<Context> context, Local<String> json_string);
+
+ /**
+ * Tries to stringify the JSON-serializable object |json_object| and returns
+ * it as string if successful.
+ *
+ * \param json_object The JSON-serializable object to stringify.
+ * \return The corresponding string if successfully stringified.
+ */
+ static V8_WARN_UNUSED_RESULT MaybeLocal<String> Stringify(
+ Local<Context> context, Local<Object> json_object,
+ Local<String> gap = Local<String>());
};
@@ -1973,6 +1953,7 @@ class V8_EXPORT Value : public Data {
*/
bool IsProxy() const;
+ bool IsWebAssemblyCompiledModule() const;
V8_WARN_UNUSED_RESULT MaybeLocal<Boolean> ToBoolean(
Local<Context> context) const;
@@ -2050,6 +2031,8 @@ class V8_EXPORT Value : public Data {
template <class T> V8_INLINE static Value* Cast(T* value);
+ Local<String> TypeOf(v8::Isolate*);
+
private:
V8_INLINE bool QuickIsUndefined() const;
V8_INLINE bool QuickIsNull() const;
@@ -2640,6 +2623,33 @@ enum AccessControl {
};
/**
+ * Property filter bits. They can be or'ed to build a composite filter.
+ */
+enum PropertyFilter {
+ ALL_PROPERTIES = 0,
+ ONLY_WRITABLE = 1,
+ ONLY_ENUMERABLE = 2,
+ ONLY_CONFIGURABLE = 4,
+ SKIP_STRINGS = 8,
+ SKIP_SYMBOLS = 16
+};
+
+/**
+ * Keys/Properties filter enums:
+ *
+ * KeyCollectionMode limits the range of collected properties. kOwnOnly limits
+ * the collected properties to the given Object only. kIncludesPrototypes will
+ * include all keys of the objects's prototype chain as well.
+ */
+enum class KeyCollectionMode { kOwnOnly, kIncludePrototypes };
+
+/**
+ * kIncludesIndices allows for integer indices to be collected, while
+ * kSkipIndices will exclude integer indicies from being collected.
+ */
+enum class IndexFilter { kIncludeIndices, kSkipIndices };
+
+/**
* Integrity level for objects.
*/
enum class IntegrityLevel { kFrozen, kSealed };
@@ -2788,6 +2798,9 @@ class V8_EXPORT Object : public Value {
V8_DEPRECATE_SOON("Use maybe version", Local<Array> GetPropertyNames());
V8_WARN_UNUSED_RESULT MaybeLocal<Array> GetPropertyNames(
Local<Context> context);
+ V8_WARN_UNUSED_RESULT MaybeLocal<Array> GetPropertyNames(
+ Local<Context> context, KeyCollectionMode mode,
+ PropertyFilter property_filter, IndexFilter index_filter);
/**
* This function has the same functionality as GetPropertyNames but
@@ -2799,6 +2812,15 @@ class V8_EXPORT Object : public Value {
Local<Context> context);
/**
+ * Returns an array containing the names of the filtered properties
+ * of this object, including properties from prototype objects. The
+ * array returned by this method contains the same values as would
+ * be enumerated by a for-in statement over this object.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Array> GetOwnPropertyNames(
+ Local<Context> context, PropertyFilter filter);
+
+ /**
* Get the prototype object. This does not skip objects marked to
* be skipped by __proto__ and it does not consult the security
* handler.
@@ -2873,11 +2895,15 @@ class V8_EXPORT Object : public Value {
* leads to undefined behavior.
*/
void SetAlignedPointerInInternalField(int index, void* value);
+ void SetAlignedPointerInInternalFields(int argc, int indices[],
+ void* values[]);
// Testers for local properties.
V8_DEPRECATED("Use maybe version", bool HasOwnProperty(Local<String> key));
V8_WARN_UNUSED_RESULT Maybe<bool> HasOwnProperty(Local<Context> context,
Local<Name> key);
+ V8_WARN_UNUSED_RESULT Maybe<bool> HasOwnProperty(Local<Context> context,
+ uint32_t index);
V8_DEPRECATE_SOON("Use maybe version",
bool HasRealNamedProperty(Local<String> key));
V8_WARN_UNUSED_RESULT Maybe<bool> HasRealNamedProperty(Local<Context> context,
@@ -2950,13 +2976,6 @@ class V8_EXPORT Object : public Value {
*/
int GetIdentityHash();
- V8_DEPRECATED("Use v8::Object::SetPrivate instead.",
- bool SetHiddenValue(Local<String> key, Local<Value> value));
- V8_DEPRECATED("Use v8::Object::GetPrivate instead.",
- Local<Value> GetHiddenValue(Local<String> key));
- V8_DEPRECATED("Use v8::Object::DeletePrivate instead.",
- bool DeleteHiddenValue(Local<String> key));
-
/**
* Clone this object with a fast but shallow copy. Values will point
* to the same values as the original object.
@@ -2977,6 +2996,11 @@ class V8_EXPORT Object : public Value {
bool IsCallable();
/**
+ * True if this object is a constructor.
+ */
+ bool IsConstructor();
+
+ /**
* Call an Object as a function if a callback is set by the
* ObjectTemplate::SetCallAsFunctionHandler method.
*/
@@ -3177,12 +3201,13 @@ class FunctionCallbackInfo {
Local<Function> Callee() const);
V8_INLINE Local<Object> This() const;
V8_INLINE Local<Object> Holder() const;
+ V8_INLINE Local<Value> NewTarget() const;
V8_INLINE bool IsConstructCall() const;
V8_INLINE Local<Value> Data() const;
V8_INLINE Isolate* GetIsolate() const;
V8_INLINE ReturnValue<T> GetReturnValue() const;
// This shouldn't be public, but the arm compiler needs it.
- static const int kArgsLength = 7;
+ static const int kArgsLength = 8;
protected:
friend class internal::FunctionCallbackArguments;
@@ -3194,15 +3219,13 @@ class FunctionCallbackInfo {
static const int kDataIndex = 4;
static const int kCalleeIndex = 5;
static const int kContextSaveIndex = 6;
+ static const int kNewTargetIndex = 7;
V8_INLINE FunctionCallbackInfo(internal::Object** implicit_args,
- internal::Object** values,
- int length,
- bool is_construct_call);
+ internal::Object** values, int length);
internal::Object** implicit_args_;
internal::Object** values_;
int length_;
- int is_construct_call_;
};
@@ -3252,15 +3275,10 @@ class V8_EXPORT Function : public Object {
* Create a function in the current execution context
* for a given FunctionCallback.
*/
- static MaybeLocal<Function> New(Local<Context> context,
- FunctionCallback callback,
- Local<Value> data = Local<Value>(),
- int length = 0);
- static MaybeLocal<Function> New(Local<Context> context,
- FunctionCallback callback,
- Local<Value> data,
- int length,
- ConstructorBehavior behavior);
+ static MaybeLocal<Function> New(
+ Local<Context> context, FunctionCallback callback,
+ Local<Value> data = Local<Value>(), int length = 0,
+ ConstructorBehavior behavior = ConstructorBehavior::kAllow);
static V8_DEPRECATE_SOON(
"Use maybe version",
Local<Function> New(Isolate* isolate, FunctionCallback callback,
@@ -3390,12 +3408,6 @@ class V8_EXPORT Promise : public Object {
* an argument. If the promise is already resolved/rejected, the handler is
* invoked at the end of turn.
*/
- V8_DEPRECATED("Use maybe version of Then",
- Local<Promise> Chain(Local<Function> handler));
- V8_DEPRECATED("Use Then",
- V8_WARN_UNUSED_RESULT MaybeLocal<Promise> Chain(
- Local<Context> context, Local<Function> handler));
-
V8_DEPRECATED("Use maybe version",
Local<Promise> Catch(Local<Function> handler));
V8_WARN_UNUSED_RESULT MaybeLocal<Promise> Catch(Local<Context> context,
@@ -3445,6 +3457,19 @@ class V8_EXPORT Proxy : public Object {
static void CheckCast(Value* obj);
};
+class V8_EXPORT WasmCompiledModule : public Object {
+ public:
+ typedef std::pair<std::unique_ptr<const uint8_t[]>, size_t> SerializedModule;
+
+ SerializedModule Serialize();
+ static MaybeLocal<WasmCompiledModule> Deserialize(
+ Isolate* isolate, const SerializedModule& serialized_data);
+ V8_INLINE static WasmCompiledModule* Cast(Value* obj);
+
+ private:
+ WasmCompiledModule();
+ static void CheckCast(Value* obj);
+};
#ifndef V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT
// The number of required internal fields can be defined by embedder.
@@ -3462,11 +3487,19 @@ enum class ArrayBufferCreationMode { kInternalized, kExternalized };
class V8_EXPORT ArrayBuffer : public Object {
public:
/**
- * Allocator that V8 uses to allocate |ArrayBuffer|'s memory.
+ * A thread-safe allocator that V8 uses to allocate |ArrayBuffer|'s memory.
* The allocator is a global V8 setting. It has to be set via
* Isolate::CreateParams.
*
- * This API is experimental and may change significantly.
+ * Memory allocated through this allocator by V8 is accounted for as external
+ * memory by V8. Note that V8 keeps track of the memory for all internalized
+ * |ArrayBuffer|s. Responsibility for tracking external memory (using
+ * Isolate::AdjustAmountOfExternalAllocatedMemory) is handed over to the
+ * embedder upon externalization and taken over upon internalization (creating
+ * an internalized buffer from an existing buffer).
+ *
+ * Note that it is unsafe to call back into V8 from any of the allocator
+ * functions.
*/
class V8_EXPORT Allocator { // NOLINT
public:
@@ -3483,11 +3516,19 @@ class V8_EXPORT ArrayBuffer : public Object {
* Memory does not have to be initialized.
*/
virtual void* AllocateUninitialized(size_t length) = 0;
+
/**
* Free the memory block of size |length|, pointed to by |data|.
* That memory is guaranteed to be previously allocated by |Allocate|.
*/
virtual void Free(void* data, size_t length) = 0;
+
+ /**
+ * malloc/free based convenience allocator.
+ *
+ * Caller takes ownership.
+ */
+ static Allocator* NewDefaultAllocator();
};
/**
@@ -3560,7 +3601,7 @@ class V8_EXPORT ArrayBuffer : public Object {
/**
* Make this ArrayBuffer external. The pointer to underlying memory block
* and byte length are returned as |Contents| structure. After ArrayBuffer
- * had been etxrenalized, it does no longer owns the memory block. The caller
+ * had been externalized, it does no longer own the memory block. The caller
* should take steps to free memory when it is no longer needed.
*
* The memory block is guaranteed to be allocated with |Allocator::Allocate|
@@ -3571,7 +3612,7 @@ class V8_EXPORT ArrayBuffer : public Object {
/**
* Get a pointer to the ArrayBuffer's underlying memory block without
* externalizing it. If the ArrayBuffer is not externalized, this pointer
- * will become invalid as soon as the ArrayBuffer became garbage collected.
+ * will become invalid as soon as the ArrayBuffer gets garbage collected.
*
* The embedder should make sure to hold a strong reference to the
* ArrayBuffer while accessing this pointer.
@@ -3623,7 +3664,7 @@ class V8_EXPORT ArrayBufferView : public Object {
* might incur.
*
* Will write at most min(|byte_length|, ByteLength) bytes starting at
- * ByteOffset of the underling buffer to the memory starting at |dest|.
+ * ByteOffset of the underlying buffer to the memory starting at |dest|.
* Returns the number of bytes actually written.
*/
size_t CopyContents(void* dest, size_t byte_length);
@@ -3910,7 +3951,7 @@ class V8_EXPORT SharedArrayBuffer : public Object {
/**
* Make this SharedArrayBuffer external. The pointer to underlying memory
* block and byte length are returned as |Contents| structure. After
- * SharedArrayBuffer had been etxrenalized, it does no longer owns the memory
+ * SharedArrayBuffer had been externalized, it does no longer own the memory
* block. The caller should take steps to free memory when it is no longer
* needed.
*
@@ -4133,7 +4174,11 @@ enum Intrinsic {
*/
class V8_EXPORT Template : public Data {
public:
- /** Adds a property to each instance created by this template.*/
+ /**
+ * Adds a property to each instance created by this template.
+ *
+ * The property must be defined either as a primitive value, or a template.
+ */
void Set(Local<Name> name, Local<Data> value,
PropertyAttribute attributes = None);
V8_INLINE void Set(Isolate* isolate, const char* name, Local<Data> value);
@@ -4359,28 +4404,6 @@ enum AccessType {
typedef bool (*AccessCheckCallback)(Local<Context> accessing_context,
Local<Object> accessed_object,
Local<Value> data);
-typedef bool (*DeprecatedAccessCheckCallback)(Local<Context> accessing_context,
- Local<Object> accessed_object);
-
-/**
- * Returns true if cross-context access should be allowed to the named
- * property with the given key on the host object.
- */
-typedef bool (*NamedSecurityCallback)(Local<Object> host,
- Local<Value> key,
- AccessType type,
- Local<Value> data);
-
-
-/**
- * Returns true if cross-context access should be allowed to the indexed
- * property with the given index on the host object.
- */
-typedef bool (*IndexedSecurityCallback)(Local<Object> host,
- uint32_t index,
- AccessType type,
- Local<Value> data);
-
/**
* A FunctionTemplate is used to create functions at runtime. There
@@ -4391,7 +4414,7 @@ typedef bool (*IndexedSecurityCallback)(Local<Object> host,
* preferred.
*
* Any modification of a FunctionTemplate after first instantiation will trigger
- *a crash.
+ * a crash.
*
* A FunctionTemplate can have properties, these properties are added to the
* function object when it is created.
@@ -4407,17 +4430,21 @@ typedef bool (*IndexedSecurityCallback)(Local<Object> host,
* The following example shows how to use a FunctionTemplate:
*
* \code
- * v8::Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New();
- * t->Set("func_property", v8::Number::New(1));
+ * v8::Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
+ * t->Set(isolate, "func_property", v8::Number::New(isolate, 1));
*
* v8::Local<v8::Template> proto_t = t->PrototypeTemplate();
- * proto_t->Set("proto_method", v8::FunctionTemplate::New(InvokeCallback));
- * proto_t->Set("proto_const", v8::Number::New(2));
+ * proto_t->Set(isolate,
+ * "proto_method",
+ * v8::FunctionTemplate::New(isolate, InvokeCallback));
+ * proto_t->Set(isolate, "proto_const", v8::Number::New(isolate, 2));
*
* v8::Local<v8::ObjectTemplate> instance_t = t->InstanceTemplate();
- * instance_t->SetAccessor("instance_accessor", InstanceAccessorCallback);
- * instance_t->SetNamedPropertyHandler(PropertyHandlerCallback, ...);
- * instance_t->Set("instance_property", Number::New(3));
+ * instance_t->SetAccessor(String::NewFromUtf8(isolate, "instance_accessor"),
+ * InstanceAccessorCallback);
+ * instance_t->SetNamedPropertyHandler(PropertyHandlerCallback);
+ * instance_t->Set(String::NewFromUtf8(isolate, "instance_property"),
+ * Number::New(isolate, 3));
*
* v8::Local<v8::Function> function = t->GetFunction();
* v8::Local<v8::Object> instance = function->NewInstance();
@@ -4483,10 +4510,12 @@ class V8_EXPORT FunctionTemplate : public Template {
static Local<FunctionTemplate> New(
Isolate* isolate, FunctionCallback callback = 0,
Local<Value> data = Local<Value>(),
- Local<Signature> signature = Local<Signature>(), int length = 0);
- static Local<FunctionTemplate> New(
- Isolate* isolate, FunctionCallback callback, Local<Value> data,
- Local<Signature> signature, int length, ConstructorBehavior behavior);
+ Local<Signature> signature = Local<Signature>(), int length = 0,
+ ConstructorBehavior behavior = ConstructorBehavior::kAllow);
+
+ /** Get a template included in the snapshot by index. */
+ static MaybeLocal<FunctionTemplate> FromSnapshot(Isolate* isolate,
+ size_t index);
/**
* Creates a function template with a fast handler. If a fast handler is set,
@@ -4504,6 +4533,15 @@ class V8_EXPORT FunctionTemplate : public Template {
Local<Context> context);
/**
+ * Similar to Context::NewRemoteContext, this creates an instance that
+ * isn't backed by an actual object.
+ *
+ * The InstanceTemplate of this FunctionTemplate must have access checks with
+ * handlers installed.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewRemoteInstance();
+
+ /**
* Set the call-handler callback for a FunctionTemplate. This
* callback is called whenever the function created from this
* FunctionTemplate is called.
@@ -4663,6 +4701,10 @@ class V8_EXPORT ObjectTemplate : public Template {
Local<FunctionTemplate> constructor = Local<FunctionTemplate>());
static V8_DEPRECATED("Use isolate version", Local<ObjectTemplate> New());
+ /** Get a template included in the snapshot by index. */
+ static MaybeLocal<ObjectTemplate> FromSnapshot(Isolate* isolate,
+ size_t index);
+
/** Creates a new instance of this template.*/
V8_DEPRECATE_SOON("Use maybe version", Local<Object> NewInstance());
V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewInstance(Local<Context> context);
@@ -4714,8 +4756,10 @@ class V8_EXPORT ObjectTemplate : public Template {
* from this object template, the provided callback is invoked instead of
* accessing the property directly on the JavaScript object.
*
- * Note that new code should use the second version that can intercept
- * symbol-named properties as well as string-named properties.
+ * SetNamedPropertyHandler() is different from SetHandler(), in
+ * that the latter can intercept symbol-named properties as well as
+ * string-named properties when called with a
+ * NamedPropertyHandlerConfiguration. New code should use SetHandler().
*
* \param getter The callback to invoke when getting a property.
* \param setter The callback to invoke when setting a property.
@@ -4734,6 +4778,18 @@ class V8_EXPORT ObjectTemplate : public Template {
NamedPropertyDeleterCallback deleter = 0,
NamedPropertyEnumeratorCallback enumerator = 0,
Local<Value> data = Local<Value>());
+
+ /**
+ * Sets a named property handler on the object template.
+ *
+ * Whenever a property whose name is a string or a symbol is accessed on
+ * objects created from this object template, the provided callback is
+ * invoked instead of accessing the property directly on the JavaScript
+ * object.
+ *
+ * @param configuration The NamedPropertyHandlerConfiguration that defines the
+ * callbacks to invoke when accessing a property.
+ */
void SetHandler(const NamedPropertyHandlerConfiguration& configuration);
/**
@@ -4752,7 +4808,6 @@ class V8_EXPORT ObjectTemplate : public Template {
* \param data A piece of data that will be passed to the callbacks
* whenever they are invoked.
*/
- void SetHandler(const IndexedPropertyHandlerConfiguration& configuration);
// TODO(dcarney): deprecate
void SetIndexedPropertyHandler(
IndexedPropertyGetterCallback getter,
@@ -4764,6 +4819,19 @@ class V8_EXPORT ObjectTemplate : public Template {
SetHandler(IndexedPropertyHandlerConfiguration(getter, setter, query,
deleter, enumerator, data));
}
+
+ /**
+ * Sets an indexed property handler on the object template.
+ *
+ * Whenever an indexed property is accessed on objects created from
+ * this object template, the provided callback is invoked instead of
+ * accessing the property directly on the JavaScript object.
+ *
+ * @param configuration The IndexedPropertyHandlerConfiguration that defines
+ * the callbacks to invoke when accessing a property.
+ */
+ void SetHandler(const IndexedPropertyHandlerConfiguration& configuration);
+
/**
* Sets the callback to be used when calling instances created from
* this template as a function. If no callback is set, instances
@@ -4793,16 +4861,18 @@ class V8_EXPORT ObjectTemplate : public Template {
*/
void SetAccessCheckCallback(AccessCheckCallback callback,
Local<Value> data = Local<Value>());
- V8_DEPRECATED(
- "Use SetAccessCheckCallback with new AccessCheckCallback signature.",
- void SetAccessCheckCallback(DeprecatedAccessCheckCallback callback,
- Local<Value> data = Local<Value>()));
- V8_DEPRECATED(
- "Use SetAccessCheckCallback instead",
- void SetAccessCheckCallbacks(NamedSecurityCallback named_handler,
- IndexedSecurityCallback indexed_handler,
- Local<Value> data = Local<Value>()));
+ /**
+ * Like SetAccessCheckCallback but invokes an interceptor on failed access
+ * checks instead of looking up all-can-read properties. You can only use
+ * either this method or SetAccessCheckCallback, but not both at the same
+ * time.
+ */
+ void SetAccessCheckCallbackAndHandler(
+ AccessCheckCallback callback,
+ const NamedPropertyHandlerConfiguration& named_handler,
+ const IndexedPropertyHandlerConfiguration& indexed_handler,
+ Local<Value> data = Local<Value>());
/**
* Gets the number of internal fields for objects generated from
@@ -4816,6 +4886,17 @@ class V8_EXPORT ObjectTemplate : public Template {
*/
void SetInternalFieldCount(int value);
+ /**
+ * Returns true if the object will be an immutable prototype exotic object.
+ */
+ bool IsImmutableProto();
+
+ /**
+ * Makes the ObjectTempate for an immutable prototype exotic object, with an
+ * immutable __proto__.
+ */
+ void SetImmutableProto();
+
private:
ObjectTemplate();
static Local<ObjectTemplate> New(internal::Isolate* isolate,
@@ -4982,6 +5063,7 @@ class V8_EXPORT ResourceConstraints {
typedef void (*FatalErrorCallback)(const char* location, const char* message);
+typedef void (*OOMErrorCallback)(const char* location, bool is_heap_oom);
typedef void (*MessageCallback)(Local<Message> message, Local<Value> error);
@@ -5047,10 +5129,6 @@ enum ObjectSpace {
kAllocationActionAll = kAllocationActionAllocate | kAllocationActionFree
};
-typedef void (*MemoryAllocationCallback)(ObjectSpace space,
- AllocationAction action,
- int size);
-
// --- Enter/Leave Script Callback ---
typedef void (*BeforeCallEnteredCallback)(Isolate*);
typedef void (*CallCompletedCallback)(Isolate*);
@@ -5130,6 +5208,11 @@ class V8_EXPORT MicrotasksScope {
*/
static int GetCurrentDepth(Isolate* isolate);
+ /**
+ * Returns true while microtasks are being executed.
+ */
+ static bool IsRunningMicrotasks(Isolate* isolate);
+
private:
internal::Isolate* const isolate_;
bool run_;
@@ -5189,6 +5272,7 @@ enum GCCallbackFlags {
kGCCallbackFlagForced = 1 << 2,
kGCCallbackFlagSynchronousPhantomCallbackProcessing = 1 << 3,
kGCCallbackFlagCollectAllAvailableGarbage = 1 << 4,
+ kGCCallbackFlagCollectAllExternalMemory = 1 << 5,
};
typedef void (*GCCallback)(GCType type, GCCallbackFlags flags);
@@ -5211,6 +5295,8 @@ class V8_EXPORT HeapStatistics {
size_t total_available_size() { return total_available_size_; }
size_t used_heap_size() { return used_heap_size_; }
size_t heap_size_limit() { return heap_size_limit_; }
+ size_t malloced_memory() { return malloced_memory_; }
+ size_t peak_malloced_memory() { return peak_malloced_memory_; }
size_t does_zap_garbage() { return does_zap_garbage_; }
private:
@@ -5220,6 +5306,8 @@ class V8_EXPORT HeapStatistics {
size_t total_available_size_;
size_t used_heap_size_;
size_t heap_size_limit_;
+ size_t malloced_memory_;
+ size_t peak_malloced_memory_;
bool does_zap_garbage_;
friend class V8;
@@ -5264,6 +5352,18 @@ class V8_EXPORT HeapObjectStatistics {
friend class Isolate;
};
+class V8_EXPORT HeapCodeStatistics {
+ public:
+ HeapCodeStatistics();
+ size_t code_and_metadata_size() { return code_and_metadata_size_; }
+ size_t bytecode_and_metadata_size() { return bytecode_and_metadata_size_; }
+
+ private:
+ size_t code_and_metadata_size_;
+ size_t bytecode_and_metadata_size_;
+
+ friend class Isolate;
+};
class RetainedObjectInfo;
@@ -5347,6 +5447,28 @@ struct JitCodeEvent {
};
/**
+ * Option flags passed to the SetRAILMode function.
+ * See documentation https://developers.google.com/web/tools/chrome-devtools/
+ * profile/evaluate-performance/rail
+ */
+enum RAILMode {
+ // Response performance mode: In this mode very low virtual machine latency
+ // is provided. V8 will try to avoid JavaScript execution interruptions.
+ // Throughput may be throttled.
+ PERFORMANCE_RESPONSE,
+ // Animation performance mode: In this mode low virtual machine latency is
+ // provided. V8 will try to avoid as many JavaScript execution interruptions
+ // as possible. Throughput may be throttled. This is the default mode.
+ PERFORMANCE_ANIMATION,
+ // Idle performance mode: The embedder is idle. V8 can complete deferred work
+ // in this mode.
+ PERFORMANCE_IDLE,
+ // Load performance mode: In this mode high throughput is provided. V8 may
+ // turn off latency optimizations.
+ PERFORMANCE_LOAD
+};
+
+/**
* Option flags passed to the SetJitCodeEventHandler function.
*/
enum JitCodeEventOptions {
@@ -5401,31 +5523,56 @@ enum class MemoryPressureLevel { kNone, kModerate, kCritical };
* trace through its heap and call PersistentBase::RegisterExternalReference on
* each js object reachable from any of the given wrappers.
*
- * Before the first call to the TraceWrappableFrom function v8 will call
- * TraceRoots. When the v8 garbage collection is finished, v8 will call
- * ClearTracingMarks.
+ * Before the first call to the TraceWrappersFrom function TracePrologue will be
+ * called. When the garbage collection cycle is finished, TraceEpilogue will be
+ * called.
*/
-class EmbedderHeapTracer {
+class V8_EXPORT EmbedderHeapTracer {
public:
+ enum ForceCompletionAction { FORCE_COMPLETION, DO_NOT_FORCE_COMPLETION };
+ struct AdvanceTracingActions {
+ explicit AdvanceTracingActions(ForceCompletionAction force_completion_)
+ : force_completion(force_completion_) {}
+
+ ForceCompletionAction force_completion;
+ };
+ /**
+ * V8 will call this method with internal fields of found wrappers.
+ * Embedder is expected to store them in it's marking deque and trace
+ * reachable wrappers from them when asked by AdvanceTracing method.
+ */
+ virtual void RegisterV8References(
+ const std::vector<std::pair<void*, void*> >& internal_fields) = 0;
/**
* V8 will call this method at the beginning of the gc cycle.
*/
- virtual void TraceRoots(Isolate* isolate) = 0;
-
+ virtual void TracePrologue() = 0;
/**
- * V8 will call this method with internal fields of a potential wrappers.
- * Embedder is expected to trace its heap (synchronously) and call
- * PersistentBase::RegisterExternalReference() on all wrappers reachable from
- * any of the given wrappers.
+ * Embedder is expected to trace its heap starting from wrappers reported by
+ * RegisterV8References method, and call
+ * PersistentBase::RegisterExternalReference() on all reachable wrappers.
+ * Embedder is expected to stop tracing by the given deadline.
+ *
+ * Returns true if there is still work to do.
*/
- virtual void TraceWrappableFrom(
- Isolate* isolate,
- const std::vector<std::pair<void*, void*> >& internal_fields) = 0;
+ virtual bool AdvanceTracing(double deadline_in_ms,
+ AdvanceTracingActions actions) = 0;
/**
* V8 will call this method at the end of the gc cycle. Allocation is *not*
- * allowed in the ClearTracingMarks.
+ * allowed in the TraceEpilogue.
+ */
+ virtual void TraceEpilogue() = 0;
+
+ /**
+ * Let embedder know v8 entered final marking pause (no more incremental steps
+ * will follow).
+ */
+ virtual void EnterFinalPause() {}
+
+ /**
+ * Throw away all intermediate data and reset to the initial state.
*/
- virtual void ClearTracingMarks(Isolate* isolate) = 0;
+ virtual void AbortTracing() {}
protected:
virtual ~EmbedderHeapTracer() = default;
@@ -5446,20 +5593,21 @@ class V8_EXPORT Isolate {
*/
struct CreateParams {
CreateParams()
- : entry_hook(NULL),
- code_event_handler(NULL),
- snapshot_blob(NULL),
- counter_lookup_callback(NULL),
- create_histogram_callback(NULL),
- add_histogram_sample_callback(NULL),
- array_buffer_allocator(NULL) {}
+ : entry_hook(nullptr),
+ code_event_handler(nullptr),
+ snapshot_blob(nullptr),
+ counter_lookup_callback(nullptr),
+ create_histogram_callback(nullptr),
+ add_histogram_sample_callback(nullptr),
+ array_buffer_allocator(nullptr),
+ external_references(nullptr) {}
/**
* The optional entry_hook allows the host application to provide the
* address of a function that's invoked on entry to every V8-generated
* function. Note that entry_hook is invoked at the very start of each
- * generated function. Furthermore, if an entry_hook is given, V8 will
- * always run without a context snapshot.
+ * generated function. Furthermore, if an entry_hook is given, V8 will
+ * not use a snapshot, including custom snapshots.
*/
FunctionEntryHook entry_hook;
@@ -5500,6 +5648,14 @@ class V8_EXPORT Isolate {
* store of ArrayBuffers.
*/
ArrayBuffer::Allocator* array_buffer_allocator;
+
+ /**
+ * Specifies an optional nullptr-terminated array of raw addresses in the
+ * embedder that V8 can match against during serialization and use for
+ * deserialization. This array and its content must stay valid for the
+ * entire lifetime of the isolate.
+ */
+ intptr_t* external_references;
};
@@ -5573,7 +5729,7 @@ class V8_EXPORT Isolate {
~SuppressMicrotaskExecutionScope();
private:
- internal::Isolate* isolate_;
+ internal::Isolate* const isolate_;
// Prevent copying of Scope objects.
SuppressMicrotaskExecutionScope(const SuppressMicrotaskExecutionScope&);
@@ -5628,9 +5784,13 @@ class V8_EXPORT Isolate {
kLegacyFunctionDeclaration = 29,
kRegExpPrototypeSourceGetter = 30,
kRegExpPrototypeOldFlagGetter = 31,
+ kDecimalWithLeadingZeroInStrictMode = 32,
+ kLegacyDateParser = 33,
+ kDefineGetterOrSetterWouldThrow = 34,
+ kFunctionConstructorReturnedUndefined = 35,
- // If you add new values here, you'll also need to update V8Initializer.cpp
- // in Chromium.
+ // If you add new values here, you'll also need to update Chromium's:
+ // UseCounter.h, V8PerIsolateData.cpp, histograms.xml
kUseCounterFeatureCount // This enum value must be last.
};
@@ -5772,6 +5932,15 @@ class V8_EXPORT Isolate {
size_t type_index);
/**
+ * Get statistics about code and its metadata in the heap.
+ *
+ * \param object_statistics The HeapCodeStatistics object to fill in
+ * statistics of code, bytecode and their metadata.
+ * \returns true on success.
+ */
+ bool GetHeapCodeAndMetadataStatistics(HeapCodeStatistics* object_statistics);
+
+ /**
* Get a call stack sample from the isolate.
* \param state Execution state.
* \param frames Caller allocated buffer to store stack frames.
@@ -5803,6 +5972,12 @@ class V8_EXPORT Isolate {
AdjustAmountOfExternalAllocatedMemory(int64_t change_in_bytes);
/**
+ * Returns the number of phantom handles without callbacks that were reset
+ * by the garbage collector since the last call to this function.
+ */
+ size_t NumberOfPhantomHandleResetsSinceLastCall();
+
+ /**
* Returns heap profiler for this isolate. Will return NULL until the isolate
* is initialized.
*/
@@ -5813,7 +5988,8 @@ class V8_EXPORT Isolate {
* is initialized. It is the embedder's responsibility to stop all CPU
* profiling activities if it has started any.
*/
- CpuProfiler* GetCpuProfiler();
+ V8_DEPRECATE_SOON("CpuProfiler should be created with CpuProfiler::New call.",
+ CpuProfiler* GetCpuProfiler());
/** Returns true if this isolate has a current context. */
bool InContext();
@@ -6143,6 +6319,15 @@ class V8_EXPORT Isolate {
void IsolateInBackgroundNotification();
/**
+ * Optional notification to tell V8 the current performance requirements
+ * of the embedder based on RAIL.
+ * V8 uses these notifications to guide heuristics.
+ * This is an unfinished experimental feature. Semantics and implementation
+ * may change frequently.
+ */
+ void SetRAILMode(RAILMode rail_mode);
+
+ /**
* Allows the host application to provide the address of a function that is
* notified each time code is added, moved or removed.
*
@@ -6196,6 +6381,9 @@ class V8_EXPORT Isolate {
/** Set the callback to invoke in case of fatal errors. */
void SetFatalErrorHandler(FatalErrorCallback that);
+ /** Set the callback to invoke in case of OOM errors. */
+ void SetOOMErrorHandler(OOMErrorCallback that);
+
/**
* Set the callback to invoke to check if code generation from
* strings should be allowed.
@@ -6238,18 +6426,6 @@ class V8_EXPORT Isolate {
StackTrace::StackTraceOptions options = StackTrace::kOverview);
/**
- * Enables the host application to provide a mechanism to be notified
- * and perform custom logging when V8 Allocates Executable Memory.
- */
- void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
- ObjectSpace space, AllocationAction action);
-
- /**
- * Removes callback that was installed by AddMemoryAllocationCallback.
- */
- void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
-
- /**
* Iterates through all external resources referenced from current isolate
* heap. GC is not invoked prior to iterating, therefore there is no
* guarantee that visited objects are still alive.
@@ -6278,6 +6454,12 @@ class V8_EXPORT Isolate {
*/
void VisitWeakHandles(PersistentHandleVisitor* visitor);
+ /**
+ * Check if this isolate is in use.
+ * True if at least one thread Enter'ed this isolate.
+ */
+ bool IsInUse();
+
private:
template <class K, class V, class Traits>
friend class PersistentValueMapBase;
@@ -6308,19 +6490,18 @@ class V8_EXPORT StartupData {
*/
typedef bool (*EntropySource)(unsigned char* buffer, size_t length);
-
/**
* ReturnAddressLocationResolver is used as a callback function when v8 is
* resolving the location of a return address on the stack. Profilers that
* change the return address on the stack can use this to resolve the stack
* location to whereever the profiler stashed the original return address.
*
- * \param return_addr_location points to a location on stack where a machine
+ * \param return_addr_location A location on stack where a machine
* return address resides.
- * \returns either return_addr_location, or else a pointer to the profiler's
+ * \returns Either return_addr_location, or else a pointer to the profiler's
* copy of the original return address.
*
- * \note the resolver function must not cause garbage collection.
+ * \note The resolver function must not cause garbage collection.
*/
typedef uintptr_t (*ReturnAddressLocationResolver)(
uintptr_t return_addr_location);
@@ -6484,23 +6665,6 @@ class V8_EXPORT V8 {
void RemoveGCEpilogueCallback(GCCallback callback));
/**
- * Enables the host application to provide a mechanism to be notified
- * and perform custom logging when V8 Allocates Executable Memory.
- */
- V8_INLINE static V8_DEPRECATED(
- "Use isolate version",
- void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
- ObjectSpace space,
- AllocationAction action));
-
- /**
- * Removes callback that was installed by AddMemoryAllocationCallback.
- */
- V8_INLINE static V8_DEPRECATED(
- "Use isolate version",
- void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback));
-
- /**
* Initializes V8. This function needs to be called before the first Isolate
* is created. It always returns true.
*/
@@ -6620,7 +6784,24 @@ class V8_EXPORT V8 {
* If V8 was compiled with the ICU data in an external file, the location
* of the data file has to be provided.
*/
- static bool InitializeICU(const char* icu_data_file = NULL);
+ V8_DEPRECATE_SOON(
+ "Use version with default location.",
+ static bool InitializeICU(const char* icu_data_file = nullptr));
+
+ /**
+ * Initialize the ICU library bundled with V8. The embedder should only
+ * invoke this method when using the bundled ICU. If V8 was compiled with
+ * the ICU data in an external file and when the default location of that
+ * file should be used, a path to the executable must be provided.
+ * Returns true on success.
+ *
+ * The default is a file called icudtl.dat side-by-side with the executable.
+ *
+ * Optionally, the location of the data file can be provided to override the
+ * default.
+ */
+ static bool InitializeICUDefaultLocation(const char* exec_path,
+ const char* icu_data_file = nullptr);
/**
* Initialize the external startup data. The embedder only needs to
@@ -6660,26 +6841,27 @@ class V8_EXPORT V8 {
internal::Object** handle);
static internal::Object** CopyPersistent(internal::Object** handle);
static void DisposeGlobal(internal::Object** global_handle);
- typedef WeakCallbackData<Value, void>::Callback WeakCallback;
- static void RegisterExternallyReferencedObject(internal::Object** object,
- internal::Isolate* isolate);
- static void MakeWeak(internal::Object** global_handle, void* data,
- WeakCallback weak_callback);
- static void MakeWeak(internal::Object** global_handle, void* data,
+ static void MakeWeak(internal::Object** location, void* data,
WeakCallbackInfo<void>::Callback weak_callback,
WeakCallbackType type);
- static void MakeWeak(internal::Object** global_handle, void* data,
+ static void MakeWeak(internal::Object** location, void* data,
// Must be 0 or -1.
int internal_field_index1,
// Must be 1 or -1.
int internal_field_index2,
WeakCallbackInfo<void>::Callback weak_callback);
- static void* ClearWeak(internal::Object** global_handle);
+ static void MakeWeak(internal::Object*** location_addr);
+ static void* ClearWeak(internal::Object** location);
static void Eternalize(Isolate* isolate,
Value* handle,
int* index);
static Local<Value> GetEternal(Isolate* isolate, int index);
+ static void RegisterExternallyReferencedObject(internal::Object** object,
+ internal::Isolate* isolate);
+ template <class K, class V, class T>
+ friend class PersistentValueMapBase;
+
static void FromJustIsNothing();
static void ToLocalEmpty();
static void InternalFieldOutOfBounds(int index);
@@ -6696,6 +6878,60 @@ class V8_EXPORT V8 {
friend class Context;
};
+/**
+ * Helper class to create a snapshot data blob.
+ */
+class SnapshotCreator {
+ public:
+ enum class FunctionCodeHandling { kClear, kKeep };
+
+ /**
+ * Create and enter an isolate, and set it up for serialization.
+ * The isolate is either created from scratch or from an existing snapshot.
+ * The caller keeps ownership of the argument snapshot.
+ * \param existing_blob existing snapshot from which to create this one.
+ * \param external_references a null-terminated array of external references
+ * that must be equivalent to CreateParams::external_references.
+ */
+ SnapshotCreator(intptr_t* external_references = nullptr,
+ StartupData* existing_blob = nullptr);
+
+ ~SnapshotCreator();
+
+ /**
+ * \returns the isolate prepared by the snapshot creator.
+ */
+ Isolate* GetIsolate();
+
+ /**
+ * Add a context to be included in the snapshot blob.
+ * \returns the index of the context in the snapshot blob.
+ */
+ size_t AddContext(Local<Context> context);
+
+ /**
+ * Add a template to be included in the snapshot blob.
+ * \returns the index of the template in the snapshot blob.
+ */
+ size_t AddTemplate(Local<Template> template_obj);
+
+ /**
+ * Created a snapshot data blob.
+ * This must not be called from within a handle scope.
+ * \param function_code_handling whether to include compiled function code
+ * in the snapshot.
+ * \returns { nullptr, 0 } on failure, and a startup snapshot on success. The
+ * caller acquires ownership of the data array in the return value.
+ */
+ StartupData CreateBlob(FunctionCodeHandling function_code_handling);
+
+ private:
+ void* data_;
+
+ // Disallow copying and assigning.
+ SnapshotCreator(const SnapshotCreator&);
+ void operator=(const SnapshotCreator&);
+};
/**
* A simple Maybe type, representing an object which may or may not have a
@@ -6710,17 +6946,25 @@ class V8_EXPORT V8 {
template <class T>
class Maybe {
public:
- V8_INLINE bool IsNothing() const { return !has_value; }
- V8_INLINE bool IsJust() const { return has_value; }
+ V8_INLINE bool IsNothing() const { return !has_value_; }
+ V8_INLINE bool IsJust() const { return has_value_; }
+
+ // Will crash if the Maybe<> is nothing.
+ V8_INLINE T ToChecked() const { return FromJust(); }
+
+ V8_WARN_UNUSED_RESULT V8_INLINE bool To(T* out) const {
+ if (V8_LIKELY(IsJust())) *out = value_;
+ return IsJust();
+ }
// Will crash if the Maybe<> is nothing.
V8_INLINE T FromJust() const {
if (V8_UNLIKELY(!IsJust())) V8::FromJustIsNothing();
- return value;
+ return value_;
}
V8_INLINE T FromMaybe(const T& default_value) const {
- return has_value ? value : default_value;
+ return has_value_ ? value_ : default_value;
}
V8_INLINE bool operator==(const Maybe& other) const {
@@ -6733,11 +6977,11 @@ class Maybe {
}
private:
- Maybe() : has_value(false) {}
- explicit Maybe(const T& t) : has_value(true), value(t) {}
+ Maybe() : has_value_(false) {}
+ explicit Maybe(const T& t) : has_value_(true), value_(t) {}
- bool has_value;
- T value;
+ bool has_value_;
+ T value_;
template <class U>
friend Maybe<U> Nothing();
@@ -6981,8 +7225,35 @@ class V8_EXPORT Context {
*/
static Local<Context> New(
Isolate* isolate, ExtensionConfiguration* extensions = NULL,
- Local<ObjectTemplate> global_template = Local<ObjectTemplate>(),
- Local<Value> global_object = Local<Value>());
+ MaybeLocal<ObjectTemplate> global_template = MaybeLocal<ObjectTemplate>(),
+ MaybeLocal<Value> global_object = MaybeLocal<Value>());
+
+ static MaybeLocal<Context> FromSnapshot(
+ Isolate* isolate, size_t context_snapshot_index,
+ ExtensionConfiguration* extensions = nullptr,
+ MaybeLocal<ObjectTemplate> global_template = MaybeLocal<ObjectTemplate>(),
+ MaybeLocal<Value> global_object = MaybeLocal<Value>());
+
+ /**
+ * Returns an global object that isn't backed by an actual context.
+ *
+ * The global template needs to have access checks with handlers installed.
+ * If an existing global object is passed in, the global object is detached
+ * from its context.
+ *
+ * Note that this is different from a detached context where all accesses to
+ * the global proxy will fail. Instead, the access check handlers are invoked.
+ *
+ * It is also not possible to detach an object returned by this method.
+ * Instead, the access check handlers need to return nothing to achieve the
+ * same effect.
+ *
+ * It is possible, however, to create a new context from the global object
+ * returned by this method.
+ */
+ static MaybeLocal<Object> NewRemoteContext(
+ Isolate* isolate, Local<ObjectTemplate> global_template,
+ MaybeLocal<Value> global_object = MaybeLocal<Value>());
/**
* Sets the security token for the context. To access an object in
@@ -7044,7 +7315,7 @@ class V8_EXPORT Context {
/**
* Gets a 2-byte-aligned native pointer from the embedder data with the given
- * index, which must have bees set by a previous call to
+ * index, which must have been set by a previous call to
* SetAlignedPointerInEmbedderData with the same index. Note that index 0
* currently has a special meaning for Chrome's debugger.
*/
@@ -7124,7 +7395,7 @@ class V8_EXPORT Context {
* It is up to the user of V8 to ensure, perhaps with locking, that this
* constraint is not violated. In addition to any other synchronization
* mechanism that may be used, the v8::Locker and v8::Unlocker classes must be
- * used to signal thead switches to V8.
+ * used to signal thread switches to V8.
*
* v8::Locker is a scoped lock object. While it's active, i.e. between its
* construction and destruction, the current thread is allowed to use the locked
@@ -7337,7 +7608,7 @@ class Internals {
1 * kApiPointerSize + kApiIntSize;
static const int kStringResourceOffset = 3 * kApiPointerSize;
- static const int kOddballKindOffset = 4 * kApiPointerSize;
+ static const int kOddballKindOffset = 4 * kApiPointerSize + sizeof(double);
static const int kForeignAddressOffset = kApiPointerSize;
static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
static const int kFixedArrayHeaderSize = 2 * kApiPointerSize;
@@ -7349,23 +7620,18 @@ class Internals {
static const int kExternalOneByteRepresentationTag = 0x06;
static const int kIsolateEmbedderDataOffset = 0 * kApiPointerSize;
- static const int kAmountOfExternalAllocatedMemoryOffset =
- 4 * kApiPointerSize;
- static const int kAmountOfExternalAllocatedMemoryAtLastGlobalGCOffset =
- kAmountOfExternalAllocatedMemoryOffset + kApiInt64Size;
- static const int kIsolateRootsOffset =
- kAmountOfExternalAllocatedMemoryAtLastGlobalGCOffset + kApiInt64Size +
- kApiPointerSize;
- static const int kUndefinedValueRootIndex = 5;
- static const int kNullValueRootIndex = 7;
- static const int kTrueValueRootIndex = 8;
- static const int kFalseValueRootIndex = 9;
- static const int kEmptyStringRootIndex = 10;
- static const int kTheHoleValueRootIndex = 11;
-
- // The external allocation limit should be below 256 MB on all architectures
- // to avoid that resource-constrained embedders run low on memory.
- static const int kExternalAllocationLimit = 192 * 1024 * 1024;
+ static const int kExternalMemoryOffset = 4 * kApiPointerSize;
+ static const int kExternalMemoryLimitOffset =
+ kExternalMemoryOffset + kApiInt64Size;
+ static const int kIsolateRootsOffset = kExternalMemoryLimitOffset +
+ kApiInt64Size + kApiInt64Size +
+ kApiPointerSize + kApiPointerSize;
+ static const int kUndefinedValueRootIndex = 4;
+ static const int kTheHoleValueRootIndex = 5;
+ static const int kNullValueRootIndex = 6;
+ static const int kTrueValueRootIndex = 7;
+ static const int kFalseValueRootIndex = 8;
+ static const int kEmptyStringRootIndex = 9;
static const int kNodeClassIdOffset = 1 * kApiPointerSize;
static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3;
@@ -7377,7 +7643,8 @@ class Internals {
static const int kNodeIsPartiallyDependentShift = 4;
static const int kNodeIsActiveShift = 4;
- static const int kJSObjectType = 0xb5;
+ static const int kJSObjectType = 0xb7;
+ static const int kJSApiObjectType = 0xb6;
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x83;
static const int kForeignType = 0x87;
@@ -7631,39 +7898,6 @@ void PersistentBase<T>::Reset(Isolate* isolate,
template <class T>
-template <typename S, typename P>
-void PersistentBase<T>::SetWeak(
- P* parameter,
- typename WeakCallbackData<S, P>::Callback callback) {
- TYPE_CHECK(S, T);
- typedef typename WeakCallbackData<Value, void>::Callback Callback;
- V8::MakeWeak(reinterpret_cast<internal::Object**>(this->val_), parameter,
- reinterpret_cast<Callback>(callback));
-}
-
-
-template <class T>
-template <typename P>
-void PersistentBase<T>::SetWeak(
- P* parameter,
- typename WeakCallbackData<T, P>::Callback callback) {
- SetWeak<T, P>(parameter, callback);
-}
-
-
-template <class T>
-template <typename P>
-void PersistentBase<T>::SetPhantom(
- P* parameter, typename WeakCallbackInfo<P>::Callback callback,
- int internal_field_index1, int internal_field_index2) {
- typedef typename WeakCallbackInfo<void>::Callback Callback;
- V8::MakeWeak(reinterpret_cast<internal::Object**>(this->val_), parameter,
- internal_field_index1, internal_field_index2,
- reinterpret_cast<Callback>(callback));
-}
-
-
-template <class T>
template <typename P>
V8_INLINE void PersistentBase<T>::SetWeak(
P* parameter, typename WeakCallbackInfo<P>::Callback callback,
@@ -7673,6 +7907,10 @@ V8_INLINE void PersistentBase<T>::SetWeak(
reinterpret_cast<Callback>(callback), type);
}
+template <class T>
+void PersistentBase<T>::SetWeak() {
+ V8::MakeWeak(reinterpret_cast<internal::Object***>(&this->val_));
+}
template <class T>
template <typename P>
@@ -7682,7 +7920,7 @@ P* PersistentBase<T>::ClearWeak() {
}
template <class T>
-void PersistentBase<T>::RegisterExternalReference(Isolate* isolate) {
+void PersistentBase<T>::RegisterExternalReference(Isolate* isolate) const {
if (IsEmpty()) return;
V8::RegisterExternallyReferencedObject(
reinterpret_cast<internal::Object**>(this->val_),
@@ -7864,17 +8102,11 @@ internal::Object* ReturnValue<T>::GetDefaultValue() {
return value_[-1];
}
-
-template<typename T>
+template <typename T>
FunctionCallbackInfo<T>::FunctionCallbackInfo(internal::Object** implicit_args,
internal::Object** values,
- int length,
- bool is_construct_call)
- : implicit_args_(implicit_args),
- values_(values),
- length_(length),
- is_construct_call_(is_construct_call) { }
-
+ int length)
+ : implicit_args_(implicit_args), values_(values), length_(length) {}
template<typename T>
Local<Value> FunctionCallbackInfo<T>::operator[](int i) const {
@@ -7902,8 +8134,13 @@ Local<Object> FunctionCallbackInfo<T>::Holder() const {
&implicit_args_[kHolderIndex]));
}
+template <typename T>
+Local<Value> FunctionCallbackInfo<T>::NewTarget() const {
+ return Local<Value>(
+ reinterpret_cast<Value*>(&implicit_args_[kNewTargetIndex]));
+}
-template<typename T>
+template <typename T>
Local<Value> FunctionCallbackInfo<T>::Data() const {
return Local<Value>(reinterpret_cast<Value*>(&implicit_args_[kDataIndex]));
}
@@ -7923,7 +8160,7 @@ ReturnValue<T> FunctionCallbackInfo<T>::GetReturnValue() const {
template<typename T>
bool FunctionCallbackInfo<T>::IsConstructCall() const {
- return is_construct_call_ & 0x1;
+ return !NewTarget()->IsUndefined();
}
@@ -8017,7 +8254,9 @@ Local<Value> Object::GetInternalField(int index) {
O* obj = *reinterpret_cast<O**>(this);
// Fast path: If the object is a plain JSObject, which is the common case, we
// know where to find the internal fields and can return the value directly.
- if (I::GetInstanceType(obj) == I::kJSObjectType) {
+ auto instance_type = I::GetInstanceType(obj);
+ if (instance_type == I::kJSObjectType ||
+ instance_type == I::kJSApiObjectType) {
int offset = I::kJSObjectHeaderSize + (internal::kApiPointerSize * index);
O* value = I::ReadField<O*>(obj, offset);
O** result = HandleScope::CreateHandle(reinterpret_cast<HO*>(obj), value);
@@ -8035,7 +8274,9 @@ void* Object::GetAlignedPointerFromInternalField(int index) {
O* obj = *reinterpret_cast<O**>(this);
// Fast path: If the object is a plain JSObject, which is the common case, we
// know where to find the internal fields and can return the value directly.
- if (V8_LIKELY(I::GetInstanceType(obj) == I::kJSObjectType)) {
+ auto instance_type = I::GetInstanceType(obj);
+ if (V8_LIKELY(instance_type == I::kJSObjectType ||
+ instance_type == I::kJSApiObjectType)) {
int offset = I::kJSObjectHeaderSize + (internal::kApiPointerSize * index);
return I::ReadField<void*>(obj, offset);
}
@@ -8043,7 +8284,6 @@ void* Object::GetAlignedPointerFromInternalField(int index) {
return SlowGetAlignedPointerFromInternalField(index);
}
-
String* String::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
@@ -8356,6 +8596,12 @@ Proxy* Proxy::Cast(v8::Value* value) {
return static_cast<Proxy*>(value);
}
+WasmCompiledModule* WasmCompiledModule::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<WasmCompiledModule*>(value);
+}
Promise::Resolver* Promise::Resolver::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
@@ -8586,21 +8832,16 @@ uint32_t Isolate::GetNumberOfDataSlots() {
int64_t Isolate::AdjustAmountOfExternalAllocatedMemory(
int64_t change_in_bytes) {
typedef internal::Internals I;
- int64_t* amount_of_external_allocated_memory =
- reinterpret_cast<int64_t*>(reinterpret_cast<uint8_t*>(this) +
- I::kAmountOfExternalAllocatedMemoryOffset);
- int64_t* amount_of_external_allocated_memory_at_last_global_gc =
- reinterpret_cast<int64_t*>(
- reinterpret_cast<uint8_t*>(this) +
- I::kAmountOfExternalAllocatedMemoryAtLastGlobalGCOffset);
- int64_t amount = *amount_of_external_allocated_memory + change_in_bytes;
- if (change_in_bytes > 0 &&
- amount - *amount_of_external_allocated_memory_at_last_global_gc >
- I::kExternalAllocationLimit) {
+ int64_t* external_memory = reinterpret_cast<int64_t*>(
+ reinterpret_cast<uint8_t*>(this) + I::kExternalMemoryOffset);
+ const int64_t external_memory_limit = *reinterpret_cast<int64_t*>(
+ reinterpret_cast<uint8_t*>(this) + I::kExternalMemoryLimitOffset);
+ const int64_t amount = *external_memory + change_in_bytes;
+ *external_memory = amount;
+ if (change_in_bytes > 0 && amount > external_memory_limit) {
ReportExternalAllocationLimitReached();
}
- *amount_of_external_allocated_memory = amount;
- return *amount_of_external_allocated_memory;
+ return *external_memory;
}
@@ -8701,7 +8942,6 @@ void V8::SetFatalErrorHandler(FatalErrorCallback callback) {
isolate->SetFatalErrorHandler(callback);
}
-
void V8::RemoveGCPrologueCallback(GCCallback callback) {
Isolate* isolate = Isolate::GetCurrent();
isolate->RemoveGCPrologueCallback(
@@ -8715,21 +8955,6 @@ void V8::RemoveGCEpilogueCallback(GCCallback callback) {
reinterpret_cast<v8::Isolate::GCCallback>(callback));
}
-
-void V8::AddMemoryAllocationCallback(MemoryAllocationCallback callback,
- ObjectSpace space,
- AllocationAction action) {
- Isolate* isolate = Isolate::GetCurrent();
- isolate->AddMemoryAllocationCallback(callback, space, action);
-}
-
-
-void V8::RemoveMemoryAllocationCallback(MemoryAllocationCallback callback) {
- Isolate* isolate = Isolate::GetCurrent();
- isolate->RemoveMemoryAllocationCallback(callback);
-}
-
-
void V8::TerminateExecution(Isolate* isolate) { isolate->TerminateExecution(); }
diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h
index ce3a9d2f4f..c34cb69a89 100644
--- a/deps/v8/include/v8config.h
+++ b/deps/v8/include/v8config.h
@@ -63,7 +63,6 @@
// V8_OS_FREEBSD - FreeBSD
// V8_OS_LINUX - Linux
// V8_OS_MACOSX - Mac OS X
-// V8_OS_NACL - Native Client
// V8_OS_NETBSD - NetBSD
// V8_OS_OPENBSD - OpenBSD
// V8_OS_POSIX - POSIX compatible (mostly everything except Windows)
@@ -80,9 +79,6 @@
# define V8_OS_BSD 1
# define V8_OS_MACOSX 1
# define V8_OS_POSIX 1
-#elif defined(__native_client__)
-# define V8_OS_NACL 1
-# define V8_OS_POSIX 1
#elif defined(__CYGWIN__)
# define V8_OS_CYGWIN 1
# define V8_OS_POSIX 1
diff --git a/deps/v8/infra/config/cq.cfg b/deps/v8/infra/config/cq.cfg
index 5f85111f20..3c645fd90b 100644
--- a/deps/v8/infra/config/cq.cfg
+++ b/deps/v8/infra/config/cq.cfg
@@ -25,17 +25,38 @@ verifiers {
try_job {
buckets {
- name: "tryserver.v8"
+ name: "master.tryserver.v8"
builders { name: "v8_android_arm_compile_rel" }
- builders { name: "v8_linux64_asan_rel" }
- builders { name: "v8_linux64_avx2_rel" }
+ builders { name: "v8_linux64_asan_rel_ng" }
+ builders {
+ name: "v8_linux64_asan_rel_ng_triggered"
+ triggered_by: "v8_linux64_asan_rel_ng"
+ }
+ builders { name: "v8_linux64_avx2_rel_ng" }
+ builders {
+ name: "v8_linux64_avx2_rel_ng_triggered"
+ triggered_by: "v8_linux64_avx2_rel_ng"
+ }
+ builders { name: "v8_linux64_gyp_rel_ng" }
+ builders {
+ name: "v8_linux64_gyp_rel_ng_triggered"
+ triggered_by: "v8_linux64_gyp_rel_ng"
+ }
builders { name: "v8_linux64_rel_ng" }
builders {
name: "v8_linux64_rel_ng_triggered"
triggered_by: "v8_linux64_rel_ng"
}
- builders { name: "v8_linux_arm64_rel" }
- builders { name: "v8_linux_arm_rel" }
+ builders { name: "v8_linux_arm64_rel_ng" }
+ builders {
+ name: "v8_linux_arm64_rel_ng_triggered"
+ triggered_by: "v8_linux_arm64_rel_ng"
+ }
+ builders { name: "v8_linux_arm_rel_ng" }
+ builders {
+ name: "v8_linux_arm_rel_ng_triggered"
+ triggered_by: "v8_linux_arm_rel_ng"
+ }
builders { name: "v8_linux_chromium_gn_rel" }
builders { name: "v8_linux_dbg_ng" }
builders {
@@ -45,13 +66,21 @@ verifiers {
builders { name: "v8_linux_gcc_compile_rel" }
builders { name: "v8_linux_mipsel_compile_rel" }
builders { name: "v8_linux_mips64el_compile_rel" }
- builders { name: "v8_linux_nodcheck_rel" }
+ builders { name: "v8_linux_nodcheck_rel_ng" }
+ builders {
+ name: "v8_linux_nodcheck_rel_ng_triggered"
+ triggered_by: "v8_linux_nodcheck_rel_ng"
+ }
builders { name: "v8_linux_rel_ng" }
builders {
name: "v8_linux_rel_ng_triggered"
triggered_by: "v8_linux_rel_ng"
}
- builders { name: "v8_mac_rel" }
+ builders { name: "v8_mac_rel_ng" }
+ builders {
+ name: "v8_mac_rel_ng_triggered"
+ triggered_by: "v8_mac_rel_ng"
+ }
builders { name: "v8_presubmit" }
builders { name: "v8_win64_rel_ng" }
builders {
@@ -59,7 +88,11 @@ verifiers {
triggered_by: "v8_win64_rel_ng"
}
builders { name: "v8_win_compile_dbg" }
- builders { name: "v8_win_nosnap_shared_compile_rel" }
+ builders { name: "v8_win_nosnap_shared_rel_ng" }
+ builders {
+ name: "v8_win_nosnap_shared_rel_ng_triggered"
+ triggered_by: "v8_win_nosnap_shared_rel_ng"
+ }
builders { name: "v8_win_rel_ng" }
builders {
name: "v8_win_rel_ng_triggered"
@@ -67,13 +100,20 @@ verifiers {
}
builders {
name: "v8_linux_blink_rel"
- experiment_percentage: 20
+ experiment_percentage: 100
}
builders {
name: "v8_linux64_sanitizer_coverage_rel"
experiment_percentage: 100
}
}
+ buckets {
+ name: "master.tryserver.chromium.win"
+ builders {
+ name: "win_chromium_compile_dbg_ng"
+ experiment_percentage: 100
+ }
+ }
}
sign_cla {}
diff --git a/deps/v8/infra/mb/PRESUBMIT.py b/deps/v8/infra/mb/PRESUBMIT.py
new file mode 100644
index 0000000000..39d15e80b4
--- /dev/null
+++ b/deps/v8/infra/mb/PRESUBMIT.py
@@ -0,0 +1,35 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+
+def _CommonChecks(input_api, output_api):
+ results = []
+
+ # Validate the format of the mb_config.pyl file.
+ mb_script = input_api.os_path.join(input_api.PresubmitLocalPath(), '..',
+ '..', 'tools', 'mb', 'mb.py')
+ mb_config_path = input_api.os_path.join(input_api.PresubmitLocalPath(),
+ 'mb_config.pyl')
+ cmd = [input_api.python_executable, mb_script, 'validate', '--config-file',
+ mb_config_path]
+ kwargs = {'cwd': input_api.PresubmitLocalPath()}
+ results.extend(input_api.RunTests([
+ input_api.Command(name='mb_validate',
+ cmd=cmd, kwargs=kwargs,
+ message=output_api.PresubmitError)]))
+
+ return results
+
+
+def CheckChangeOnUpload(input_api, output_api):
+ return _CommonChecks(input_api, output_api)
+
+
+def CheckChangeOnCommit(input_api, output_api):
+ return _CommonChecks(input_api, output_api)
diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl
new file mode 100644
index 0000000000..edfd254316
--- /dev/null
+++ b/deps/v8/infra/mb/mb_config.pyl
@@ -0,0 +1,670 @@
+# Copyright 2016 The V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ # This is a map of buildbot master names -> buildbot builder names ->
+ # config names (where each config name is a key in the 'configs' dict,
+ # below). MB uses this dict to look up which config to use for a given bot.
+ # Bots are ordered by appearance on waterfall.
+ 'masters': {
+ 'developer_default': {
+ 'x64.debug': 'default_debug_x64',
+ 'x64.optdebug': 'default_optdebug_x64',
+ 'x64.release': 'default_release_x64',
+ 'x86.debug': 'default_debug_x86',
+ 'x86.optdebug': 'default_optdebug_x86',
+ 'x86.release': 'default_release_x86',
+ },
+
+ 'client.dart.fyi': {
+ 'v8-linux-release': 'gyp_release_x86_disassembler',
+ 'v8-win-release': 'gyp_release_x86_disassembler',
+ 'v8-mac-release': 'gyp_release_x86_disassembler',
+ },
+ 'client.dynamorio': {
+ 'linux-v8-dr': 'gyp_release_x64',
+ },
+ 'client.v8': {
+ # Linux.
+ 'V8 Linux - builder': 'gn_release_x86_gcmole',
+ 'V8 Linux - debug builder': 'gn_debug_x86',
+ 'V8 Linux - nosnap builder': 'gn_release_x86_no_snap',
+ 'V8 Linux - nosnap debug builder': 'gn_debug_x86_no_snap',
+ 'V8 Linux - shared': 'gn_release_x86_shared_verify_heap',
+ 'V8 Linux - noi18n - debug': 'gyp_debug_x86_no_i18n',
+ # Linux64.
+ 'V8 Linux64 - builder': 'gn_release_x64',
+ 'V8 Linux64 - debug builder': 'gn_debug_x64_valgrind',
+ 'V8 Linux64 - custom snapshot - debug builder': 'gn_debug_x64_custom',
+ 'V8 Linux64 - internal snapshot': 'gn_release_x64_internal',
+ 'V8 Linux64 - gyp': 'gyp_release_x64',
+ # Windows.
+ 'V8 Win32 - builder': 'gyp_release_x86_minimal_symbols',
+ 'V8 Win32 - debug builder': 'gyp_debug_x86_minimal_symbols',
+ 'V8 Win32 - nosnap - shared':
+ 'gyp_release_x86_no_snap_shared_minimal_symbols',
+ 'V8 Win64': 'gyp_release_x64_minimal_symbols',
+ 'V8 Win64 - debug': 'gyp_debug_x64_minimal_symbols',
+ 'V8 Win64 - clang': 'gyp_release_x64_clang',
+ # Mac.
+ 'V8 Mac': 'gn_release_x86',
+ 'V8 Mac - debug': 'gn_debug_x86',
+ 'V8 Mac64': 'gn_release_x64',
+ 'V8 Mac64 - debug': 'gn_debug_x64',
+ 'V8 Mac GC Stress': 'gn_debug_x86',
+ 'V8 Mac64 ASAN': 'gyp_release_x64_asan',
+ # Sanitizers.
+ 'V8 Linux64 ASAN': 'gyp_release_x64_asan',
+ 'V8 Linux64 TSAN': 'gn_release_x64_tsan',
+ 'V8 Linux - arm64 - sim - MSAN': 'gn_release_simulate_arm64_msan',
+ # Clusterfuzz.
+ 'V8 Linux64 ASAN no inline - release builder':
+ 'gyp_release_x64_asan_symbolized_edge_verify_heap',
+ 'V8 Linux64 ASAN - debug builder': 'gyp_debug_x64_asan_edge',
+ 'V8 Linux64 ASAN arm64 - debug builder':
+ 'gyp_debug_simulate_arm64_asan_edge',
+ 'V8 Linux ASAN arm - debug builder':
+ 'gyp_debug_simulate_arm_asan_edge',
+ 'V8 Linux ASAN mipsel - debug builder':
+ 'gyp_debug_simulate_mipsel_asan_edge',
+ # Misc.
+ 'V8 Linux gcc 4.8': 'gn_release_x86_gcc',
+ # FYI.
+ 'V8 Linux - swarming staging': 'gn_release_x64',
+ # TODO(machenbach): Figure out if symbolized is still needed. The
+ # original config also specified -O1, which we dropped because chromium
+ # doesn't have it (anymore).
+ 'V8 Linux64 - cfi': 'gyp_release_x64_cfi_symbolized',
+ 'V8 Linux - vtunejit': 'gyp_debug_x86_vtunejit',
+ 'V8 Linux64 - gcov coverage': 'gyp_release_x64_gcc_coverage',
+ 'V8 Linux - predictable': 'gyp_release_x86_predictable',
+ 'V8 Linux - full debug': 'gyp_full_debug_x86',
+ 'V8 Linux - interpreted regexp': 'gyp_release_x86_interpreted_regexp',
+ 'V8 Random Deopt Fuzzer - debug': 'gyp_debug_x86',
+ },
+
+ 'client.v8.ports': {
+ # Arm.
+ 'V8 Arm - builder': 'gyp_release_arm',
+ 'V8 Arm - debug builder': 'gyp_debug_arm',
+ 'V8 Android Arm - builder': 'gyp_release_android_arm',
+ 'V8 Linux - arm - sim': 'gyp_release_simulate_arm',
+ 'V8 Linux - arm - sim - debug': 'gyp_debug_simulate_arm',
+ # Arm64.
+ 'V8 Android Arm64 - builder': 'gyp_release_android_arm64',
+ 'V8 Linux - arm64 - sim': 'gn_release_simulate_arm64',
+ 'V8 Linux - arm64 - sim - debug': 'gn_debug_simulate_arm64',
+ 'V8 Linux - arm64 - sim - nosnap - debug':
+ 'gn_debug_simulate_arm64_no_snap',
+ 'V8 Linux - arm64 - sim - gc stress': 'gn_debug_simulate_arm64',
+ # Mips.
+ 'V8 Mips - builder': 'gyp_release_mips_no_snap_no_i18n',
+ 'V8 Linux - mipsel - sim - builder': 'gyp_release_simulate_mipsel',
+ 'V8 Linux - mips64el - sim - builder': 'gyp_release_simulate_mips64el',
+ # PPC.
+ 'V8 Linux - ppc - sim': 'gyp_release_simulate_ppc',
+ 'V8 Linux - ppc64 - sim': 'gyp_release_simulate_ppc64',
+ # S390.
+ 'V8 Linux - s390 - sim': 'gyp_release_simulate_s390',
+ 'V8 Linux - s390x - sim': 'gyp_release_simulate_s390x',
+ # X87.
+ 'V8 Linux - x87 - nosnap - debug builder':
+ 'gyp_debug_simulate_x87_no_snap',
+ },
+ 'client.v8.branches': {
+ 'V8 Linux - beta branch': 'gn_release_x86',
+ 'V8 Linux - beta branch - debug': 'gn_debug_x86',
+ 'V8 Linux - stable branch': 'gn_release_x86',
+ 'V8 Linux - stable branch - debug': 'gn_debug_x86',
+ 'V8 Linux64 - beta branch': 'gyp_release_x64',
+ 'V8 Linux64 - beta branch - debug': 'gn_debug_x64',
+ 'V8 Linux64 - stable branch': 'gn_release_x64',
+ 'V8 Linux64 - stable branch - debug': 'gn_debug_x64',
+ 'V8 arm - sim - beta branch': 'gyp_release_simulate_arm',
+ 'V8 arm - sim - beta branch - debug': 'gyp_debug_simulate_arm',
+ 'V8 arm - sim - stable branch': 'gyp_release_simulate_arm',
+ 'V8 arm - sim - stable branch - debug': 'gyp_debug_simulate_arm',
+ 'V8 mips64el - sim - beta branch': 'gyp_release_simulate_mips64el',
+ 'V8 mips64el - sim - stable branch': 'gyp_release_simulate_mips64el',
+ 'V8 mipsel - sim - beta branch': 'gyp_release_simulate_mipsel',
+ 'V8 mipsel - sim - stable branch': 'gyp_release_simulate_mipsel',
+ 'V8 ppc - sim - beta branch': 'gyp_release_simulate_ppc',
+ 'V8 ppc - sim - stable branch': 'gyp_release_simulate_ppc',
+ 'V8 ppc64 - sim - beta branch': 'gyp_release_simulate_ppc64',
+ 'V8 ppc64 - sim - stable branch': 'gyp_release_simulate_ppc64',
+ 'V8 s390 - sim - beta branch': 'gyp_release_simulate_s390',
+ 'V8 s390 - sim - stable branch': 'gyp_release_simulate_s390',
+ 'V8 s390x - sim - beta branch': 'gyp_release_simulate_s390x',
+ 'V8 s390x - sim - stable branch': 'gyp_release_simulate_s390x',
+ },
+ 'tryserver.v8': {
+ 'v8_linux_rel_ng': 'gn_release_x86_gcmole_trybot',
+ 'v8_linux_avx2_dbg': 'gn_debug_x86_trybot',
+ 'v8_linux_nodcheck_rel_ng': 'gn_release_x86_minimal_symbols',
+ 'v8_linux_dbg_ng': 'gn_debug_x86_trybot',
+ 'v8_linux_noi18n_rel_ng': 'gyp_release_x86_no_i18n_trybot',
+ 'v8_linux_gc_stress_dbg': 'gyp_debug_x86_trybot',
+ 'v8_linux_nosnap_rel': 'gn_release_x86_no_snap_trybot',
+ 'v8_linux_nosnap_dbg': 'gn_debug_x86_no_snap_trybot',
+ 'v8_linux_gcc_compile_rel': 'gn_release_x86_gcc_minimal_symbols',
+ 'v8_linux_gcc_rel': 'gn_release_x86_gcc_minimal_symbols',
+ 'v8_linux64_rel_ng': 'gn_release_x64_trybot',
+ 'v8_linux64_gyp_rel_ng': 'gyp_release_x64',
+ 'v8_linux64_avx2_rel_ng': 'gn_release_x64_trybot',
+ 'v8_linux64_avx2_dbg': 'gn_debug_x64_trybot',
+ 'v8_linux64_asan_rel_ng': 'gyp_release_x64_asan_minimal_symbols',
+ 'v8_linux64_msan_rel': 'gn_release_simulate_arm64_msan_minimal_symbols',
+ 'v8_linux64_sanitizer_coverage_rel':
+ 'gyp_release_x64_asan_minimal_symbols_coverage',
+ 'v8_linux64_tsan_rel': 'gn_release_x64_tsan_minimal_symbols',
+ 'v8_win_dbg': 'gyp_debug_x86_trybot',
+ 'v8_win_compile_dbg': 'gyp_debug_x86_trybot',
+ 'v8_win_rel_ng': 'gyp_release_x86_trybot',
+ 'v8_win_nosnap_shared_rel_ng':
+ 'gyp_release_x86_no_snap_shared_minimal_symbols',
+ 'v8_win64_dbg': 'gyp_debug_x64_minimal_symbols',
+ 'v8_win64_rel_ng': 'gyp_release_x64_trybot',
+ 'v8_mac_rel_ng': 'gn_release_x86_trybot',
+ 'v8_mac_dbg': 'gn_debug_x86_trybot',
+ 'v8_mac_gc_stress_dbg': 'gn_debug_x86_trybot',
+ 'v8_mac64_rel': 'gn_release_x64_trybot',
+ 'v8_mac64_dbg': 'gn_debug_x64_minimal_symbols',
+ 'v8_mac64_asan_rel': 'gyp_release_x64_asan',
+ 'v8_linux_arm_rel_ng': 'gyp_release_simulate_arm_trybot',
+ 'v8_linux_arm_dbg': 'gyp_debug_simulate_arm',
+ 'v8_linux_arm_armv8a_rel': 'gyp_release_simulate_arm_trybot',
+ 'v8_linux_arm_armv8a_dbg': 'gyp_debug_simulate_arm',
+ 'v8_linux_arm64_rel_ng': 'gn_release_simulate_arm64_trybot',
+ 'v8_linux_arm64_dbg': 'gn_debug_simulate_arm64',
+ 'v8_linux_arm64_gc_stress_dbg': 'gn_debug_simulate_arm64',
+ 'v8_linux_mipsel_compile_rel': 'gyp_release_simulate_mipsel',
+ 'v8_linux_mips64el_compile_rel': 'gyp_release_simulate_mips64el',
+ 'v8_android_arm_compile_rel': 'gyp_release_android_arm',
+ },
+ },
+
+
+ # To ease readability, config values are ordered by:
+ # gyp/gn, release/debug, arch type, other values alphabetically.
+ 'configs': {
+ # Developer default configs.
+ 'default_debug_x64': [
+ 'gn', 'debug', 'x64', 'v8_enable_slow_dchecks', 'v8_full_debug'],
+ 'default_optdebug_x64': [
+ 'gn', 'debug', 'x64', 'v8_enable_slow_dchecks'],
+ 'default_release_x64': [
+ 'gn', 'release', 'x64'],
+ 'default_debug_x86': [
+ 'gn', 'debug', 'x86', 'v8_enable_slow_dchecks', 'v8_full_debug'],
+ 'default_optdebug_x86': [
+ 'gn', 'debug', 'x86', 'v8_enable_slow_dchecks'],
+ 'default_release_x86': [
+ 'gn', 'release', 'x86'],
+
+
+ # GN debug configs for simulators.
+ 'gn_debug_simulate_arm64': [
+ 'gn', 'debug_bot', 'simulate_arm64', 'swarming'],
+ 'gn_debug_simulate_arm64_no_snap': [
+ 'gn', 'debug_bot', 'simulate_arm64', 'swarming', 'v8_snapshot_none'],
+
+ # GN release configs for simulators.
+ 'gn_release_simulate_arm64': [
+ 'gn', 'release_bot', 'simulate_arm64', 'swarming'],
+ 'gn_release_simulate_arm64_msan': [
+ 'gn', 'release_bot', 'simulate_arm64', 'msan', 'swarming'],
+ 'gn_release_simulate_arm64_msan_minimal_symbols': [
+ 'gn', 'release_bot', 'simulate_arm64', 'msan', 'minimal_symbols',
+ 'swarming'],
+ 'gn_release_simulate_arm64_trybot': [
+ 'gn', 'release_trybot', 'simulate_arm64', 'swarming'],
+
+ # GN release configs for x64.
+ 'gn_release_x64': [
+ 'gn', 'release_bot', 'x64', 'swarming'],
+ 'gn_release_x64_internal': [
+ 'gn', 'release_bot', 'x64', 'swarming', 'v8_snapshot_internal'],
+ 'gn_release_x64_trybot': [
+ 'gn', 'release_trybot', 'x64', 'swarming'],
+ 'gn_release_x64_tsan': [
+ 'gn', 'release_bot', 'x64', 'tsan', 'swarming'],
+ 'gn_release_x64_tsan_minimal_symbols': [
+ 'gn', 'release_bot', 'x64', 'tsan', 'minimal_symbols', 'swarming'],
+
+ # GN debug configs for x64.
+ 'gn_debug_x64': [
+ 'gn', 'debug_bot', 'x64', 'swarming'],
+ 'gn_debug_x64_custom': [
+ 'gn', 'debug_bot', 'x64', 'swarming', 'v8_snapshot_custom'],
+ 'gn_debug_x64_minimal_symbols': [
+ 'gn', 'debug_bot', 'x64', 'minimal_symbols', 'swarming'],
+ 'gn_debug_x64_trybot': [
+ 'gn', 'debug_trybot', 'x64', 'swarming'],
+ 'gn_debug_x64_valgrind': [
+ 'gn', 'debug_bot', 'x64', 'swarming', 'valgrind'],
+
+ # GN debug configs for x86.
+ 'gn_debug_x86': [
+ 'gn', 'debug_bot', 'x86', 'swarming'],
+ 'gn_debug_x86_no_snap': [
+ 'gn', 'debug_bot', 'x86', 'swarming', 'v8_snapshot_none'],
+ 'gn_debug_x86_no_snap_trybot': [
+ 'gn', 'debug_trybot', 'x86', 'swarming', 'v8_snapshot_none'],
+ 'gn_debug_x86_trybot': [
+ 'gn', 'debug_trybot', 'x86', 'swarming'],
+
+ # GN release configs for x86.
+ 'gn_release_x86': [
+ 'gn', 'release_bot', 'x86', 'swarming'],
+ 'gn_release_x86_gcc': [
+ 'gn', 'release_bot', 'x86', 'gcc'],
+ 'gn_release_x86_gcc_minimal_symbols': [
+ 'gn', 'release_bot', 'x86', 'gcc', 'minimal_symbols'],
+ 'gn_release_x86_gcmole': [
+ 'gn', 'release_bot', 'x86', 'gcmole', 'swarming'],
+ 'gn_release_x86_gcmole_trybot': [
+ 'gn', 'release_trybot', 'x86', 'gcmole', 'swarming'],
+ 'gn_release_x86_minimal_symbols': [
+ 'gn', 'release_bot', 'x86', 'minimal_symbols', 'swarming'],
+ 'gn_release_x86_no_snap': [
+ 'gn', 'release_bot', 'x86', 'swarming', 'v8_snapshot_none'],
+ 'gn_release_x86_no_snap_trybot': [
+ 'gn', 'release_trybot', 'x86', 'swarming', 'v8_snapshot_none'],
+ 'gn_release_x86_shared_verify_heap': [
+ 'gn', 'release', 'x86', 'goma', 'shared', 'swarming', 'v8_verify_heap'],
+ 'gn_release_x86_trybot': [
+ 'gn', 'release_trybot', 'x86', 'swarming'],
+
+ # Gyp debug configs for arm.
+ 'gyp_debug_arm': [
+ 'gyp', 'debug_bot', 'arm', 'crosscompile', 'hard_float', 'swarming'],
+
+ # Gyp debug configs for simulators.
+ 'gyp_debug_simulate_arm': [
+ 'gyp', 'debug_bot', 'simulate_arm', 'swarming'],
+ 'gyp_debug_simulate_arm_asan_edge': [
+ 'gyp', 'debug_bot', 'simulate_arm', 'asan', 'edge'],
+ 'gyp_debug_simulate_arm64_asan_edge': [
+ 'gyp', 'debug_bot', 'simulate_arm64', 'asan', 'lsan', 'edge'],
+ 'gyp_debug_simulate_mipsel_asan_edge': [
+ 'gyp', 'debug_bot', 'simulate_mipsel', 'asan', 'edge'],
+ 'gyp_debug_simulate_x87_no_snap': [
+ 'gyp', 'debug_bot', 'simulate_x87', 'swarming', 'v8_snapshot_none'],
+
+ # Gyp debug configs for x64.
+ 'gyp_debug_x64_asan_edge': [
+ 'gyp', 'debug_bot', 'x64', 'asan', 'lsan', 'edge'],
+ 'gyp_debug_x64_minimal_symbols': [
+ 'gyp', 'debug_bot', 'x64', 'minimal_symbols', 'swarming'],
+
+ # Gyp debug configs for x86.
+ 'gyp_debug_x86': [
+ 'gyp', 'debug_bot', 'x86', 'swarming'],
+ 'gyp_debug_x86_minimal_symbols': [
+ 'gyp', 'debug_bot', 'x86', 'minimal_symbols', 'swarming'],
+ 'gyp_debug_x86_trybot': [
+ 'gyp', 'debug_trybot', 'x86', 'swarming'],
+ 'gyp_debug_x86_no_i18n': [
+ 'gyp', 'debug_bot', 'x86', 'v8_no_i18n'],
+ 'gyp_debug_x86_vtunejit': [
+ 'gyp', 'debug_bot', 'x86', 'v8_enable_vtunejit'],
+ 'gyp_full_debug_x86': [
+ 'gyp', 'debug', 'x86', 'goma', 'static', 'v8_enable_slow_dchecks',
+ 'v8_full_debug'],
+
+ # Gyp release configs for arm.
+ 'gyp_release_arm': [
+ 'gyp', 'release_bot', 'arm', 'crosscompile', 'hard_float', 'swarming'],
+ 'gyp_release_android_arm': [
+ 'gyp', 'release_bot', 'arm', 'android', 'crosscompile', 'swarming'],
+ 'gyp_release_android_arm64': [
+ 'gyp', 'release_bot', 'arm64', 'android', 'crosscompile', 'swarming'],
+
+ # Gyp release configs for mips.
+ 'gyp_release_mips_no_snap_no_i18n': [
+ 'gyp', 'release', 'mips', 'crosscompile', 'static', 'v8_no_i18n',
+ 'v8_snapshot_none'],
+
+ # Gyp release configs for simulators.
+ 'gyp_release_simulate_arm': [
+ 'gyp', 'release_bot', 'simulate_arm', 'swarming'],
+ 'gyp_release_simulate_arm_trybot': [
+ 'gyp', 'release_trybot', 'simulate_arm', 'swarming'],
+ 'gyp_release_simulate_mipsel': [
+ 'gyp', 'release_bot', 'simulate_mipsel', 'swarming'],
+ 'gyp_release_simulate_mips64el': [
+ 'gyp', 'release_bot', 'simulate_mips64el', 'swarming'],
+ 'gyp_release_simulate_ppc': [
+ 'gyp', 'release_bot', 'simulate_ppc', 'swarming'],
+ 'gyp_release_simulate_ppc64': [
+ 'gyp', 'release_bot', 'simulate_ppc64', 'swarming'],
+ 'gyp_release_simulate_s390': [
+ 'gyp', 'release_bot', 'simulate_s390', 'swarming'],
+ 'gyp_release_simulate_s390x': [
+ 'gyp', 'release_bot', 'simulate_s390x', 'swarming'],
+
+ # Gyp release configs for x64.
+ 'gyp_release_x64': [
+ 'gyp', 'release_bot', 'x64', 'swarming'],
+ 'gyp_release_x64_asan': [
+ 'gyp', 'release_bot', 'x64', 'asan', 'lsan', 'swarming'],
+ 'gyp_release_x64_asan_minimal_symbols': [
+ 'gyp', 'release_bot', 'x64', 'asan', 'lsan', 'minimal_symbols',
+ 'swarming'],
+ 'gyp_release_x64_asan_minimal_symbols_coverage': [
+ 'gyp', 'release_bot', 'x64', 'asan', 'bb', 'coverage', 'lsan',
+ 'minimal_symbols', 'swarming'],
+ 'gyp_release_x64_asan_symbolized_edge_verify_heap': [
+ 'gyp', 'release_bot', 'x64', 'asan', 'edge', 'lsan', 'symbolized',
+ 'v8_verify_heap'],
+ 'gyp_release_x64_cfi_symbolized': [
+ 'gyp', 'release_bot', 'x64', 'cfi', 'swarming', 'symbolized'],
+ 'gyp_release_x64_clang': [
+ 'gyp', 'release_bot', 'x64', 'clang', 'swarming'],
+ 'gyp_release_x64_gcc_coverage': [
+ 'gyp', 'release_bot', 'x64', 'coverage', 'gcc'],
+ 'gyp_release_x64_minimal_symbols': [
+ 'gyp', 'release_bot', 'x64', 'minimal_symbols', 'swarming'],
+ 'gyp_release_x64_trybot': [
+ 'gyp', 'release_trybot', 'x64', 'swarming'],
+
+ # Gyp release configs for x86.
+ 'gyp_release_x86_disassembler': [
+ 'gyp', 'release_bot', 'x86', 'v8_enable_disassembler'],
+ 'gyp_release_x86_interpreted_regexp': [
+ 'gyp', 'release_bot', 'x86', 'v8_interpreted_regexp'],
+ 'gyp_release_x86_minimal_symbols': [
+ 'gyp', 'release_bot', 'x86', 'minimal_symbols', 'swarming'],
+ 'gyp_release_x86_no_i18n_trybot': [
+ 'gyp', 'release_trybot', 'x86', 'swarming', 'v8_no_i18n'],
+ 'gyp_release_x86_no_snap_shared_minimal_symbols': [
+ 'gyp', 'release', 'x86', 'goma', 'minimal_symbols', 'shared', 'swarming',
+ 'v8_snapshot_none'],
+ 'gyp_release_x86_predictable': [
+ 'gyp', 'release_bot', 'x86', 'v8_enable_verify_predictable'],
+ 'gyp_release_x86_trybot': [
+ 'gyp', 'release_trybot', 'x86', 'swarming'],
+ },
+
+ 'mixins': {
+ 'android': {
+ 'gn_args': 'target_os="android" v8_android_log_stdout=true',
+ 'gyp_defines': 'OS=android v8_android_log_stdout=1',
+ },
+
+ 'arm': {
+ 'gn_args': 'target_cpu="arm"',
+ 'gyp_defines': 'target_arch=arm',
+ },
+
+ 'arm64': {
+ 'gn_args': 'target_cpu="arm64"',
+ 'gyp_defines': 'target_arch=arm64',
+ },
+
+ 'asan': {
+ 'gn_args': 'is_asan=true',
+ 'gyp_defines': 'clang=1 asan=1',
+ },
+
+ 'bb': {
+ 'gn_args': 'sanitizer_coverage_flags="bb"',
+ 'gyp_defines': 'sanitizer_coverage=bb',
+ },
+
+ 'cfi': {
+ 'gn_args': 'is_cfi=true use_cfi_diag=true',
+ 'gyp_defines': 'cfi_vptr=1 cfi_diag=1',
+ },
+
+ 'clang': {
+ 'gn_args': 'is_clang=true',
+ 'gyp_defines': 'clang=1',
+ },
+
+ 'coverage': {
+ # TODO(machenbach): Add this to gn.
+ 'gyp_defines': 'coverage=1',
+ },
+
+ 'crosscompile': {
+ 'gyp_crosscompile': True,
+ },
+
+ 'dcheck_always_on': {
+ 'gn_args': 'dcheck_always_on=true',
+ 'gyp_defines': 'dcheck_always_on=1',
+ },
+
+ 'debug': {
+ 'gn_args': 'is_debug=true v8_enable_backtrace=true',
+ 'gyp_defines': 'v8_enable_backtrace=1',
+ },
+
+ 'debug_bot': {
+ 'mixins': [
+ 'debug', 'static', 'goma', 'v8_enable_slow_dchecks',
+ 'v8_optimized_debug'],
+ },
+
+ 'debug_trybot': {
+ 'mixins': ['debug_bot', 'minimal_symbols'],
+ },
+
+ 'edge': {
+ 'gn_args': 'sanitizer_coverage_flags="edge"',
+ 'gyp_defines': 'sanitizer_coverage=edge',
+ },
+
+ 'gcc': {
+ 'gn_args': 'is_clang=false use_sysroot=false',
+ 'gyp_defines': 'clang=0',
+ },
+
+ 'gcmole': {
+ 'gn_args': 'v8_gcmole=true',
+ 'gyp_defines': 'gcmole=1',
+ },
+
+ 'gn': {'type': 'gn'},
+
+ 'goma': {
+ # The MB code will properly escape goma_dir if necessary in the GYP
+ # code path; the GN code path needs no escaping.
+ 'gn_args': 'use_goma=true',
+ 'gyp_defines': 'use_goma=1',
+ },
+
+ 'gyp': {'type': 'gyp'},
+
+ 'hard_float': {
+ 'gn_args': 'arm_float_abi="hard"',
+ 'gyp_defines': 'arm_float_abi=hard',
+ },
+
+ 'lsan': {
+ 'gn_args': 'is_lsan=true',
+ 'gyp_defines': 'lsan=1',
+ },
+
+ 'minimal_symbols': {
+ 'gn_args': 'symbol_level=1',
+ 'gyp_defines': 'fastbuild=1',
+ },
+
+ 'mips': {
+ 'gn_args': 'target_cpu="mips"',
+ 'gyp_defines': 'target_arch=mips',
+ },
+
+ 'msan': {
+ 'gn_args': ('is_msan=true msan_track_origins=2 '
+ 'use_prebuilt_instrumented_libraries=true'),
+ 'gyp_defines': ('clang=1 msan=1 msan_track_origins=2 '
+ 'use_prebuilt_instrumented_libraries=1'),
+ },
+
+ 'release': {
+ 'gn_args': 'is_debug=false',
+ },
+
+ 'release_bot': {
+ 'mixins': ['release', 'static', 'goma'],
+ },
+
+ 'release_trybot': {
+ 'mixins': ['release_bot', 'minimal_symbols', 'dcheck_always_on'],
+ },
+
+ 'shared': {
+ 'gn_args': 'is_component_build=true',
+ 'gyp_defines': 'component=shared_library',
+ },
+
+ 'simulate_arm': {
+ 'gn_args': 'target_cpu="x86" v8_target_cpu="arm"',
+ 'gyp_defines': 'target_arch=ia32 v8_target_arch=arm',
+ },
+
+ 'simulate_arm64': {
+ 'gn_args': 'target_cpu="x64" v8_target_cpu="arm64"',
+ 'gyp_defines': 'target_arch=x64 v8_target_arch=arm64',
+ },
+
+ 'simulate_mipsel': {
+ 'gn_args': 'target_cpu="x86" v8_target_cpu="mipsel"',
+ 'gyp_defines': 'target_arch=ia32 v8_target_arch=mipsel',
+ },
+
+ 'simulate_mips64el': {
+ 'gn_args': 'target_cpu="x64" v8_target_cpu="mips64el"',
+ 'gyp_defines': 'target_arch=x64 v8_target_arch=mips64el',
+ },
+
+ 'simulate_ppc': {
+ 'gn_args': 'target_cpu="x86" v8_target_cpu="ppc"',
+ 'gyp_defines': 'target_arch=ia32 v8_target_arch=ppc',
+ },
+
+ 'simulate_ppc64': {
+ 'gn_args': 'target_cpu="x64" v8_target_cpu="ppc64"',
+ 'gyp_defines': 'target_arch=x64 v8_target_arch=ppc64',
+ },
+
+ 'simulate_s390': {
+ 'gn_args': 'target_cpu="x86" v8_target_cpu="s390"',
+ 'gyp_defines': 'target_arch=ia32 v8_target_arch=s390',
+ },
+
+ 'simulate_s390x': {
+ 'gn_args': 'target_cpu="x64" v8_target_cpu="s390x"',
+ 'gyp_defines': 'target_arch=x64 v8_target_arch=s390x',
+ },
+
+ 'simulate_x87': {
+ 'gn_args': 'target_cpu="x86" v8_target_cpu="x87"',
+ 'gyp_defines': 'target_arch=ia32 v8_target_arch=x87',
+ },
+
+ 'static': {
+ 'gn_args': 'is_component_build=false',
+ 'gyp_defines': 'component=static_library',
+ },
+
+ 'swarming': {
+ 'gn_args': 'v8_test_isolation_mode="prepare"',
+ 'gyp_defines': 'test_isolation_mode=prepare',
+ },
+
+ # TODO(machenbach): Remove the symbolized config after the bots are gone.
+ 'symbolized': {
+ 'gn_args': 'symbolized=true',
+ 'gyp_defines':
+ 'release_extra_cflags="-fno-inline-functions -fno-inline"',
+ },
+
+ 'tsan': {
+ 'gn_args': 'is_tsan=true',
+ 'gyp_defines': 'clang=1 tsan=1',
+ },
+
+ 'valgrind': {
+ 'gn_args': 'v8_has_valgrind=true',
+ 'gyp_defines': 'has_valgrind=1',
+ },
+
+ 'v8_no_i18n': {
+ 'gn_args': 'v8_enable_i18n_support=false',
+ 'gyp_defines': 'v8_enable_i18n_support=0',
+ },
+
+ 'v8_enable_disassembler': {
+ 'gn_args': 'v8_enable_disassembler=true',
+ 'gyp_defines': 'v8_enable_disassembler=1',
+ },
+
+ 'v8_enable_slow_dchecks': {
+ 'gn_args': 'v8_enable_slow_dchecks=true',
+ 'gyp_defines': 'v8_enable_slow_dchecks=1',
+ },
+
+ 'v8_enable_verify_predictable': {
+ 'gn_args': 'v8_enable_verify_predictable=true',
+ 'gyp_defines': 'v8_enable_verify_predictable=1',
+ },
+
+ 'v8_enable_vtunejit': {
+ 'gn_args': 'v8_enable_vtunejit=true',
+ 'gyp_defines': 'v8_enable_vtunejit=1',
+ },
+
+ 'v8_full_debug': {
+ 'gn_args': 'v8_optimized_debug=false',
+ 'gyp_defines': 'v8_optimized_debug=0',
+ },
+
+ 'v8_interpreted_regexp': {
+ 'gn_args': 'v8_interpreted_regexp=true',
+ 'gyp_defines': 'v8_interpreted_regexp=1',
+ },
+
+ 'v8_optimized_debug': {
+ # This is the default in gn for debug.
+ 'gyp_defines': 'v8_optimized_debug=1',
+ },
+
+ 'v8_snapshot_custom': {
+ # GN path is relative to project root.
+ 'gn_args': 'v8_embed_script="test/mjsunit/mjsunit.js"',
+
+ # Gyp path is relative to src/v8.gyp.
+ 'gyp_defines': 'embed_script=../test/mjsunit/mjsunit.js',
+ },
+
+ 'v8_snapshot_internal': {
+ 'gn_args': 'v8_use_external_startup_data=false',
+ 'gyp_defines': 'v8_use_external_startup_data=0',
+ },
+
+ 'v8_snapshot_none': {
+ 'gn_args': 'v8_use_snapshot=false',
+ 'gyp_defines': 'v8_use_snapshot=false',
+ },
+
+ 'v8_verify_heap': {
+ 'gn_args': 'v8_enable_verify_heap=true',
+ 'gyp_defines': 'v8_enable_verify_heap=1',
+ },
+
+ 'x64': {
+ 'gn_args': 'target_cpu="x64"',
+ 'gyp_defines': 'target_arch=x64',
+ },
+
+ 'x86': {
+ 'gn_args': 'target_cpu="x86"',
+ 'gyp_defines': 'target_arch=ia32',
+ },
+ },
+}
diff --git a/deps/v8/samples/hello-world.cc b/deps/v8/samples/hello-world.cc
index 3b952d816b..9e5188f479 100644
--- a/deps/v8/samples/hello-world.cc
+++ b/deps/v8/samples/hello-world.cc
@@ -11,29 +11,18 @@
using namespace v8;
-class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
- public:
- virtual void* Allocate(size_t length) {
- void* data = AllocateUninitialized(length);
- return data == NULL ? data : memset(data, 0, length);
- }
- virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
- virtual void Free(void* data, size_t) { free(data); }
-};
-
-
int main(int argc, char* argv[]) {
// Initialize V8.
- V8::InitializeICU();
+ V8::InitializeICUDefaultLocation(argv[0]);
V8::InitializeExternalStartupData(argv[0]);
Platform* platform = platform::CreateDefaultPlatform();
V8::InitializePlatform(platform);
V8::Initialize();
// Create a new Isolate and make it the current one.
- ArrayBufferAllocator allocator;
Isolate::CreateParams create_params;
- create_params.array_buffer_allocator = &allocator;
+ create_params.array_buffer_allocator =
+ v8::ArrayBuffer::Allocator::NewDefaultAllocator();
Isolate* isolate = Isolate::New(create_params);
{
Isolate::Scope isolate_scope(isolate);
@@ -68,5 +57,6 @@ int main(int argc, char* argv[]) {
V8::Dispose();
V8::ShutdownPlatform();
delete platform;
+ delete create_params.array_buffer_allocator;
return 0;
}
diff --git a/deps/v8/samples/process.cc b/deps/v8/samples/process.cc
index cfbd054c16..29ddb5cf2f 100644
--- a/deps/v8/samples/process.cc
+++ b/deps/v8/samples/process.cc
@@ -38,17 +38,6 @@
using namespace std;
using namespace v8;
-class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
- public:
- virtual void* Allocate(size_t length) {
- void* data = AllocateUninitialized(length);
- return data == NULL ? data : memset(data, 0, length);
- }
- virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
- virtual void Free(void* data, size_t) { free(data); }
-};
-
-
// These interfaces represent an existing request processing interface.
// The idea is to imagine a real application that uses these interfaces
// and then add scripting capabilities that allow you to interact with
@@ -687,7 +676,7 @@ void PrintMap(map<string, string>* m) {
int main(int argc, char* argv[]) {
- v8::V8::InitializeICU();
+ v8::V8::InitializeICUDefaultLocation(argv[0]);
v8::V8::InitializeExternalStartupData(argv[0]);
v8::Platform* platform = v8::platform::CreateDefaultPlatform();
v8::V8::InitializePlatform(platform);
@@ -699,9 +688,9 @@ int main(int argc, char* argv[]) {
fprintf(stderr, "No script was specified.\n");
return 1;
}
- ArrayBufferAllocator array_buffer_allocator;
Isolate::CreateParams create_params;
- create_params.array_buffer_allocator = &array_buffer_allocator;
+ create_params.array_buffer_allocator =
+ v8::ArrayBuffer::Allocator::NewDefaultAllocator();
Isolate* isolate = Isolate::New(create_params);
Isolate::Scope isolate_scope(isolate);
HandleScope scope(isolate);
diff --git a/deps/v8/samples/samples.gyp b/deps/v8/samples/samples.gyp
index 7e0608b213..e5e9ef0f8c 100644
--- a/deps/v8/samples/samples.gyp
+++ b/deps/v8/samples/samples.gyp
@@ -29,13 +29,14 @@
'variables': {
'v8_code': 1,
'v8_enable_i18n_support%': 1,
+ 'v8_toolset_for_shell%': 'target',
},
- 'includes': ['../build/toolchain.gypi', '../build/features.gypi'],
+ 'includes': ['../gypfiles/toolchain.gypi', '../gypfiles/features.gypi'],
'target_defaults': {
'type': 'executable',
'dependencies': [
- '../tools/gyp/v8.gyp:v8',
- '../tools/gyp/v8.gyp:v8_libplatform',
+ '../src/v8.gyp:v8',
+ '../src/v8.gyp:v8_libplatform',
],
'include_dirs': [
'..',
@@ -56,10 +57,15 @@
},
'targets': [
{
- 'target_name': 'shell',
+ 'target_name': 'v8_shell',
'sources': [
'shell.cc',
],
+ 'conditions': [
+ [ 'want_separate_host_toolset==1', {
+ 'toolsets': [ '<(v8_toolset_for_shell)', ],
+ }],
+ ],
},
{
'target_name': 'hello-world',
diff --git a/deps/v8/samples/shell.cc b/deps/v8/samples/shell.cc
index b89ffdd180..e042815e7a 100644
--- a/deps/v8/samples/shell.cc
+++ b/deps/v8/samples/shell.cc
@@ -63,27 +63,16 @@ void ReportException(v8::Isolate* isolate, v8::TryCatch* handler);
static bool run_shell;
-class ShellArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
- public:
- virtual void* Allocate(size_t length) {
- void* data = AllocateUninitialized(length);
- return data == NULL ? data : memset(data, 0, length);
- }
- virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
- virtual void Free(void* data, size_t) { free(data); }
-};
-
-
int main(int argc, char* argv[]) {
- v8::V8::InitializeICU();
+ v8::V8::InitializeICUDefaultLocation(argv[0]);
v8::V8::InitializeExternalStartupData(argv[0]);
v8::Platform* platform = v8::platform::CreateDefaultPlatform();
v8::V8::InitializePlatform(platform);
v8::V8::Initialize();
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
- ShellArrayBufferAllocator array_buffer_allocator;
v8::Isolate::CreateParams create_params;
- create_params.array_buffer_allocator = &array_buffer_allocator;
+ create_params.array_buffer_allocator =
+ v8::ArrayBuffer::Allocator::NewDefaultAllocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
run_shell = (argc == 1);
int result;
@@ -103,6 +92,7 @@ int main(int argc, char* argv[]) {
v8::V8::Dispose();
v8::V8::ShutdownPlatform();
delete platform;
+ delete create_params.array_buffer_allocator;
return result;
}
diff --git a/deps/v8/snapshot_toolchain.gni b/deps/v8/snapshot_toolchain.gni
index 4932110489..893bdc589f 100644
--- a/deps/v8/snapshot_toolchain.gni
+++ b/deps/v8/snapshot_toolchain.gni
@@ -25,22 +25,73 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# The snapshot needs to be compiled for the host, but compiled with
-# a toolchain that matches the bit-width of the target.
+import("//build/config/v8_target_cpu.gni")
-# TODO(GYP): For now we only support 32-bit little-endian target builds from an
-# x64 Linux host. Eventually we need to support all of the host/target
-# configurations v8 runs on.
-if (host_cpu == "x64" && host_os == "linux") {
- if (target_cpu == "arm" || target_cpu == "mipsel" || target_cpu == "x86") {
- snapshot_toolchain = "//build/toolchain/linux:clang_x86"
- } else if (target_cpu == "x64" || target_cpu == "arm64" || target_cpu == "mips64el") {
- snapshot_toolchain = "//build/toolchain/linux:clang_x64"
- } else {
- assert(false, "Need environment for this arch: $target_cpu")
- }
-} else {
- snapshot_toolchain = default_toolchain
+declare_args() {
+ # The v8 snapshot needs to be built by code that is compiled with a
+ # toolchain that matches the bit-width of the target CPU, but runs on
+ # the host.
+ v8_snapshot_toolchain = ""
}
+# Try to infer the appropriate snapshot toolchain for the v8_current_cpu
+# where possible.
+#
+# Assume that v8_target_cpu (and hence v8_current_cpu) has been validated
+# as supported on the current host CPU and OS in v8_target_cpu.gni. The
+# logic below is complicated enough without also needing to do input
+# validation.
+#
+# There are test cases for this code posted as an attachment to
+# https://crbug.com/625353.
+#
+# TODO(GYP): Currently only regular (non-cross) compiles, and cross-compiles
+# from x64 hosts to Intel, ARM, or MIPS targets, are implemented. Add support
+# for the other supported configurations.
+
+if (v8_snapshot_toolchain == "") {
+ if (current_os == host_os && current_cpu == host_cpu) {
+ # This is not a cross-compile, so build the snapshot with the current
+ # toolchain.
+ v8_snapshot_toolchain = current_toolchain
+ } else if (current_os == host_os && current_cpu == "x86" &&
+ host_cpu == "x64") {
+ # This is an x64 -> x86 cross-compile, but x64 hosts can usually run x86
+ # binaries built for the same OS, so build the snapshot with the current
+ # toolchain here, too.
+ v8_snapshot_toolchain = current_toolchain
+ } else if (current_os == "win" && host_os == "mac" && is_clang) {
+ # This is a mac -> win cross-compile, which is only supported w/ clang.
+ v8_snapshot_toolchain = "//build/toolchain/mac:clang_${v8_current_cpu}"
+ } else if (host_cpu == "x64") {
+ # This is a cross-compile from an x64 host to either a non-Intel target
+ # cpu or a different target OS. Clang will always be used by default on the
+ # host, unless this is a ChromeOS build, in which case the same toolchain
+ # (Clang or GCC) will be used for target and host by default.
+ if (is_chromeos && !is_clang) {
+ _clang = ""
+ } else {
+ _clang = "clang_"
+ }
+
+ if (v8_current_cpu == "x64" || v8_current_cpu == "x86") {
+ _cpus = v8_current_cpu
+ } else if (v8_current_cpu == "arm64" || v8_current_cpu == "mips64el") {
+ _cpus = "x64_v8_${v8_current_cpu}"
+ } else if (v8_current_cpu == "arm" || v8_current_cpu == "mipsel") {
+ _cpus = "x86_v8_${v8_current_cpu}"
+ } else {
+ # This branch should not be reached; leave _cpus blank so the assert
+ # below will fail.
+ _cpus = ""
+ }
+
+ if (_cpus != "") {
+ v8_snapshot_toolchain = "//build/toolchain/${host_os}:${_clang}${_cpus}"
+ }
+ }
+}
+assert(v8_snapshot_toolchain != "",
+ "Do not know how to build a snapshot for $current_toolchain " +
+ "on $host_os $host_cpu")
diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS
index 1bb616ef33..b1c428d5d7 100644
--- a/deps/v8/src/DEPS
+++ b/deps/v8/src/DEPS
@@ -3,16 +3,19 @@ include_rules = [
"+src",
"-src/compiler",
"+src/compiler/pipeline.h",
- "+src/compiler/code-stub-assembler.h",
+ "+src/compiler/code-assembler.h",
"+src/compiler/wasm-compiler.h",
"-src/heap",
"+src/heap/heap.h",
"+src/heap/heap-inl.h",
"-src/interpreter",
"+src/interpreter/bytecode-array-iterator.h",
+ "+src/interpreter/bytecode-decoder.h",
+ "+src/interpreter/bytecode-flags.h",
+ "+src/interpreter/bytecode-register.h",
"+src/interpreter/bytecodes.h",
"+src/interpreter/interpreter.h",
- "+src/interpreter/source-position-table.h",
+ "+testing/gtest/include/gtest/gtest_prod.h",
"-src/libplatform",
"-include/libplatform"
]
@@ -20,5 +23,6 @@ include_rules = [
specific_include_rules = {
"d8\.cc": [
"+include/libplatform/libplatform.h",
+ "+include/libplatform/v8-tracing.h",
],
}
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index 374c0a21f8..da44151b3e 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -40,6 +40,11 @@ Handle<AccessorInfo> Accessors::MakeAccessor(
Handle<Object> set = v8::FromCData(isolate, setter);
info->set_getter(*get);
info->set_setter(*set);
+ Address redirected = info->redirected_getter();
+ if (redirected != nullptr) {
+ Handle<Object> js_get = v8::FromCData(isolate, redirected);
+ info->set_js_getter(*js_get);
+ }
return info;
}
@@ -67,9 +72,6 @@ bool Accessors::IsJSObjectFieldAccessor(Handle<Map> map, Handle<Name> name,
return
CheckForName(name, isolate->factory()->length_string(),
JSArray::kLengthOffset, object_offset);
- case JS_ARRAY_BUFFER_TYPE:
- return CheckForName(name, isolate->factory()->byte_length_string(),
- JSArrayBuffer::kByteLengthOffset, object_offset);
default:
if (map->instance_type() < FIRST_NONSTRING_TYPE) {
return CheckForName(name, isolate->factory()->length_string(),
@@ -81,57 +83,11 @@ bool Accessors::IsJSObjectFieldAccessor(Handle<Map> map, Handle<Name> name,
}
-bool Accessors::IsJSArrayBufferViewFieldAccessor(Handle<Map> map,
- Handle<Name> name,
- int* object_offset) {
- DCHECK(name->IsUniqueName());
- Isolate* isolate = name->GetIsolate();
-
- switch (map->instance_type()) {
- case JS_TYPED_ARRAY_TYPE: {
- if (!CheckForName(name, isolate->factory()->length_string(),
- JSTypedArray::kLengthOffset, object_offset) &&
- !CheckForName(name, isolate->factory()->byte_length_string(),
- JSTypedArray::kByteLengthOffset, object_offset) &&
- !CheckForName(name, isolate->factory()->byte_offset_string(),
- JSTypedArray::kByteOffsetOffset, object_offset)) {
- return false;
- }
-
- if (map->is_dictionary_map()) return false;
-
- // Check if the property is overridden on the instance.
- DescriptorArray* descriptors = map->instance_descriptors();
- int descriptor = descriptors->SearchWithCache(isolate, *name, *map);
- if (descriptor != DescriptorArray::kNotFound) return false;
-
- Handle<Object> proto = Handle<Object>(map->prototype(), isolate);
- if (!proto->IsJSReceiver()) return false;
-
- // Check if the property is defined in the prototype chain.
- LookupIterator it(proto, name);
- if (!it.IsFound()) return false;
-
- Object* original_proto =
- JSFunction::cast(map->GetConstructor())->prototype();
-
- // Property is not configurable. It is enough to verify that
- // the holder is the same.
- return *it.GetHolder<Object>() == original_proto;
- }
- case JS_DATA_VIEW_TYPE:
- return CheckForName(name, isolate->factory()->byte_length_string(),
- JSDataView::kByteLengthOffset, object_offset) ||
- CheckForName(name, isolate->factory()->byte_offset_string(),
- JSDataView::kByteOffsetOffset, object_offset);
- default:
- return false;
- }
-}
+namespace {
-MUST_USE_RESULT static MaybeHandle<Object> ReplaceAccessorWithDataProperty(
- Isolate* isolate, Handle<JSObject> receiver, Handle<JSObject> holder,
- Handle<Name> name, Handle<Object> value, bool observe) {
+MUST_USE_RESULT MaybeHandle<Object> ReplaceAccessorWithDataProperty(
+ Isolate* isolate, Handle<Object> receiver, Handle<JSObject> holder,
+ Handle<Name> name, Handle<Object> value) {
LookupIterator it(receiver, name, holder,
LookupIterator::OWN_SKIP_INTERCEPTOR);
// Skip any access checks we might hit. This accessor should never hit in a
@@ -140,37 +96,26 @@ MUST_USE_RESULT static MaybeHandle<Object> ReplaceAccessorWithDataProperty(
CHECK(it.HasAccess());
it.Next();
}
+ DCHECK(holder.is_identical_to(it.GetHolder<JSObject>()));
CHECK_EQ(LookupIterator::ACCESSOR, it.state());
-
- Handle<Object> old_value;
- bool is_observed = observe && receiver->map()->is_observed();
- if (is_observed) {
- MaybeHandle<Object> maybe_old = Object::GetPropertyWithAccessor(&it);
- if (!maybe_old.ToHandle(&old_value)) return maybe_old;
- }
-
it.ReconfigureDataProperty(value, it.property_attributes());
-
- if (is_observed && !old_value->SameValue(*value)) {
- return JSObject::EnqueueChangeRecord(receiver, "update", name, old_value);
- }
-
return value;
}
+} // namespace
+
void Accessors::ReconfigureToDataProperty(
v8::Local<v8::Name> key, v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<void>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
- Handle<JSObject> receiver =
- Handle<JSObject>::cast(Utils::OpenHandle(*info.This()));
+ Handle<Object> receiver = Utils::OpenHandle(*info.This());
Handle<JSObject> holder =
Handle<JSObject>::cast(Utils::OpenHandle(*info.Holder()));
Handle<Name> name = Utils::OpenHandle(*key);
Handle<Object> value = Utils::OpenHandle(*val);
- MaybeHandle<Object> result = ReplaceAccessorWithDataProperty(
- isolate, receiver, holder, name, value, false);
+ MaybeHandle<Object> result =
+ ReplaceAccessorWithDataProperty(isolate, receiver, holder, name, value);
if (result.is_null()) isolate->OptionalRescheduleException(false);
}
@@ -221,7 +166,7 @@ void Accessors::ArrayLengthSetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
- Handle<JSReceiver> object = Utils::OpenHandle(*info.This());
+ Handle<JSReceiver> object = Utils::OpenHandle(*info.Holder());
Handle<JSArray> array = Handle<JSArray>::cast(object);
Handle<Object> length_obj = Utils::OpenHandle(*val);
@@ -231,9 +176,7 @@ void Accessors::ArrayLengthSetter(
return;
}
- if (JSArray::ObservableSetLength(array, length).is_null()) {
- isolate->OptionalRescheduleException(false);
- }
+ JSArray::SetLength(array, length);
if (info.ShouldThrowOnError()) {
uint32_t actual_new_len = 0;
@@ -305,7 +248,7 @@ void Accessors::ScriptColumnOffsetGetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
- Object* object = *Utils::OpenHandle(*info.This());
+ Object* object = *Utils::OpenHandle(*info.Holder());
Object* res = Smi::FromInt(
Script::cast(JSValue::cast(object)->value())->column_offset());
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
@@ -332,7 +275,7 @@ void Accessors::ScriptIdGetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
- Object* object = *Utils::OpenHandle(*info.This());
+ Object* object = *Utils::OpenHandle(*info.Holder());
Object* id = Smi::FromInt(Script::cast(JSValue::cast(object)->value())->id());
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(id, isolate)));
}
@@ -357,7 +300,7 @@ void Accessors::ScriptNameGetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
- Object* object = *Utils::OpenHandle(*info.This());
+ Object* object = *Utils::OpenHandle(*info.Holder());
Object* source = Script::cast(JSValue::cast(object)->value())->name();
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(source, isolate)));
}
@@ -381,7 +324,7 @@ void Accessors::ScriptSourceGetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
- Object* object = *Utils::OpenHandle(*info.This());
+ Object* object = *Utils::OpenHandle(*info.Holder());
Object* source = Script::cast(JSValue::cast(object)->value())->source();
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(source, isolate)));
}
@@ -405,7 +348,7 @@ void Accessors::ScriptLineOffsetGetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
- Object* object = *Utils::OpenHandle(*info.This());
+ Object* object = *Utils::OpenHandle(*info.Holder());
Object* res =
Smi::FromInt(Script::cast(JSValue::cast(object)->value())->line_offset());
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
@@ -432,7 +375,7 @@ void Accessors::ScriptTypeGetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
- Object* object = *Utils::OpenHandle(*info.This());
+ Object* object = *Utils::OpenHandle(*info.Holder());
Object* res =
Smi::FromInt(Script::cast(JSValue::cast(object)->value())->type());
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
@@ -458,7 +401,7 @@ void Accessors::ScriptCompilationTypeGetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
- Object* object = *Utils::OpenHandle(*info.This());
+ Object* object = *Utils::OpenHandle(*info.Holder());
Object* res = Smi::FromInt(
Script::cast(JSValue::cast(object)->value())->compilation_type());
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
@@ -484,7 +427,7 @@ void Accessors::ScriptLineEndsGetter(
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
- Handle<Object> object = Utils::OpenHandle(*info.This());
+ Handle<Object> object = Utils::OpenHandle(*info.Holder());
Handle<Script> script(
Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
Script::InitLineEnds(script);
@@ -519,7 +462,7 @@ void Accessors::ScriptSourceUrlGetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
- Object* object = *Utils::OpenHandle(*info.This());
+ Object* object = *Utils::OpenHandle(*info.Holder());
Object* url = Script::cast(JSValue::cast(object)->value())->source_url();
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(url, isolate)));
}
@@ -543,7 +486,7 @@ void Accessors::ScriptSourceMappingUrlGetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
- Object* object = *Utils::OpenHandle(*info.This());
+ Object* object = *Utils::OpenHandle(*info.Holder());
Object* url =
Script::cast(JSValue::cast(object)->value())->source_mapping_url();
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(url, isolate)));
@@ -567,7 +510,7 @@ void Accessors::ScriptIsEmbedderDebugScriptGetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
- Object* object = *Utils::OpenHandle(*info.This());
+ Object* object = *Utils::OpenHandle(*info.Holder());
bool is_embedder_debug_script = Script::cast(JSValue::cast(object)->value())
->origin_options()
.IsEmbedderDebugScript();
@@ -596,7 +539,7 @@ void Accessors::ScriptContextDataGetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
- Object* object = *Utils::OpenHandle(*info.This());
+ Object* object = *Utils::OpenHandle(*info.Holder());
Object* res = Script::cast(JSValue::cast(object)->value())->context_data();
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
}
@@ -621,11 +564,11 @@ void Accessors::ScriptEvalFromScriptGetter(
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
- Handle<Object> object = Utils::OpenHandle(*info.This());
+ Handle<Object> object = Utils::OpenHandle(*info.Holder());
Handle<Script> script(
Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
Handle<Object> result = isolate->factory()->undefined_value();
- if (!script->eval_from_shared()->IsUndefined()) {
+ if (!script->eval_from_shared()->IsUndefined(isolate)) {
Handle<SharedFunctionInfo> eval_from_shared(
SharedFunctionInfo::cast(script->eval_from_shared()));
if (eval_from_shared->script()->IsScript()) {
@@ -657,16 +600,12 @@ void Accessors::ScriptEvalFromScriptPositionGetter(
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
- Handle<Object> object = Utils::OpenHandle(*info.This());
+ Handle<Object> object = Utils::OpenHandle(*info.Holder());
Handle<Script> script(
Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
Handle<Object> result = isolate->factory()->undefined_value();
if (script->compilation_type() == Script::COMPILATION_TYPE_EVAL) {
- Handle<Code> code(SharedFunctionInfo::cast(
- script->eval_from_shared())->code());
- result = Handle<Object>(Smi::FromInt(code->SourcePosition(
- script->eval_from_instructions_offset())),
- isolate);
+ result = Handle<Object>(Smi::FromInt(script->GetEvalPosition()), isolate);
}
info.GetReturnValue().Set(Utils::ToLocal(result));
}
@@ -691,17 +630,19 @@ void Accessors::ScriptEvalFromFunctionNameGetter(
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
- Handle<Object> object = Utils::OpenHandle(*info.This());
+ Handle<Object> object = Utils::OpenHandle(*info.Holder());
Handle<Script> script(
Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
- Handle<Object> result;
- Handle<SharedFunctionInfo> shared(
- SharedFunctionInfo::cast(script->eval_from_shared()));
- // Find the name of the function calling eval.
- if (!shared->name()->IsUndefined()) {
- result = Handle<Object>(shared->name(), isolate);
- } else {
- result = Handle<Object>(shared->inferred_name(), isolate);
+ Handle<Object> result = isolate->factory()->undefined_value();
+ if (!script->eval_from_shared()->IsUndefined(isolate)) {
+ Handle<SharedFunctionInfo> shared(
+ SharedFunctionInfo::cast(script->eval_from_shared()));
+ // Find the name of the function calling eval.
+ if (!shared->name()->IsUndefined(isolate)) {
+ result = Handle<Object>(shared->name(), isolate);
+ } else {
+ result = Handle<Object>(shared->inferred_name(), isolate);
+ }
}
info.GetReturnValue().Set(Utils::ToLocal(result));
}
@@ -732,24 +673,8 @@ static Handle<Object> GetFunctionPrototype(Isolate* isolate,
MUST_USE_RESULT static MaybeHandle<Object> SetFunctionPrototype(
Isolate* isolate, Handle<JSFunction> function, Handle<Object> value) {
- Handle<Object> old_value;
- bool is_observed = function->map()->is_observed();
- if (is_observed) {
- if (function->has_prototype())
- old_value = handle(function->prototype(), isolate);
- else
- old_value = isolate->factory()->NewFunctionPrototype(function);
- }
-
JSFunction::SetPrototype(function, value);
DCHECK(function->prototype() == *value);
-
- if (is_observed && !old_value->SameValue(*value)) {
- MaybeHandle<Object> result = JSObject::EnqueueChangeRecord(
- function, "update", isolate->factory()->prototype_string(), old_value);
- if (result.is_null()) return MaybeHandle<Object>();
- }
-
return function;
}
@@ -811,45 +736,19 @@ void Accessors::FunctionLengthGetter(
HandleScope scope(isolate);
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
-
- int length = 0;
- if (function->shared()->is_compiled()) {
- length = function->shared()->length();
- } else {
- // If the function isn't compiled yet, the length is not computed
- // correctly yet. Compile it now and return the right length.
- if (Compiler::Compile(function, Compiler::KEEP_EXCEPTION)) {
- length = function->shared()->length();
- }
- if (isolate->has_pending_exception()) {
- isolate->OptionalRescheduleException(false);
- }
+ Handle<Object> result;
+ if (!JSFunction::GetLength(isolate, function).ToHandle(&result)) {
+ result = handle(Smi::FromInt(0), isolate);
+ isolate->OptionalRescheduleException(false);
}
- Handle<Object> result(Smi::FromInt(length), isolate);
- info.GetReturnValue().Set(Utils::ToLocal(result));
-}
-void Accessors::ObservedReconfigureToDataProperty(
- v8::Local<v8::Name> key, v8::Local<v8::Value> val,
- const v8::PropertyCallbackInfo<void>& info) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- HandleScope scope(isolate);
- Handle<JSObject> receiver =
- Handle<JSObject>::cast(Utils::OpenHandle(*info.This()));
- Handle<JSObject> holder =
- Handle<JSObject>::cast(Utils::OpenHandle(*info.Holder()));
- Handle<Name> name = Utils::OpenHandle(*key);
- Handle<Object> value = Utils::OpenHandle(*val);
- MaybeHandle<Object> result = ReplaceAccessorWithDataProperty(
- isolate, receiver, holder, name, value, true);
- if (result.is_null()) isolate->OptionalRescheduleException(false);
+ info.GetReturnValue().Set(Utils::ToLocal(result));
}
-
Handle<AccessorInfo> Accessors::FunctionLengthInfo(
Isolate* isolate, PropertyAttributes attributes) {
return MakeAccessor(isolate, isolate->factory()->length_string(),
- &FunctionLengthGetter, &ObservedReconfigureToDataProperty,
+ &FunctionLengthGetter, &ReconfigureToDataProperty,
attributes);
}
@@ -866,19 +765,14 @@ void Accessors::FunctionNameGetter(
HandleScope scope(isolate);
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
- Handle<Object> result;
- if (function->shared()->name_should_print_as_anonymous()) {
- result = isolate->factory()->anonymous_string();
- } else {
- result = handle(function->shared()->name(), isolate);
- }
+ Handle<Object> result = JSFunction::GetName(isolate, function);
info.GetReturnValue().Set(Utils::ToLocal(result));
}
Handle<AccessorInfo> Accessors::FunctionNameInfo(
Isolate* isolate, PropertyAttributes attributes) {
return MakeAccessor(isolate, isolate->factory()->name_string(),
- &FunctionNameGetter, &ObservedReconfigureToDataProperty,
+ &FunctionNameGetter, &ReconfigureToDataProperty,
attributes);
}
@@ -977,7 +871,16 @@ Handle<Object> GetFunctionArguments(Isolate* isolate,
// Copy the parameters to the arguments object.
DCHECK(array->length() == length);
- for (int i = 0; i < length; i++) array->set(i, frame->GetParameter(i));
+ for (int i = 0; i < length; i++) {
+ Object* value = frame->GetParameter(i);
+ if (value->IsTheHole(isolate)) {
+ // Generators currently use holes as dummy arguments when resuming. We
+ // must not leak those.
+ DCHECK(IsResumableFunction(function->shared()->kind()));
+ value = isolate->heap()->undefined_value();
+ }
+ array->set(i, value);
+ }
arguments->set_elements(*array);
// Return the freshly allocated arguments object.
@@ -1155,62 +1058,179 @@ Handle<AccessorInfo> Accessors::FunctionCallerInfo(
//
-// Accessors::MakeModuleExport
+// Accessors::BoundFunctionLength
//
-static void ModuleGetExport(v8::Local<v8::Name> property,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
- Context* context = Context::cast(instance->context());
- DCHECK(context->IsModuleContext());
- Isolate* isolate = instance->GetIsolate();
- int slot = info.Data()
- ->Int32Value(info.GetIsolate()->GetCurrentContext())
- .FromMaybe(-1);
- if (slot < 0 || slot >= context->length()) {
- Handle<Name> name = v8::Utils::OpenHandle(*property);
-
- Handle<Object> exception = isolate->factory()->NewReferenceError(
- MessageTemplate::kNotDefined, name);
- isolate->ScheduleThrow(*exception);
+void Accessors::BoundFunctionLengthGetter(
+ v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ HandleScope scope(isolate);
+ Handle<JSBoundFunction> function =
+ Handle<JSBoundFunction>::cast(Utils::OpenHandle(*info.Holder()));
+
+ Handle<Smi> target_length;
+ Handle<JSFunction> target(JSFunction::cast(function->bound_target_function()),
+ isolate);
+ if (!JSFunction::GetLength(isolate, target).ToHandle(&target_length)) {
+ target_length = handle(Smi::FromInt(0), isolate);
+ isolate->OptionalRescheduleException(false);
return;
}
- Object* value = context->get(slot);
- if (value->IsTheHole()) {
- Handle<Name> name = v8::Utils::OpenHandle(*property);
- Handle<Object> exception = isolate->factory()->NewReferenceError(
- MessageTemplate::kNotDefined, name);
- isolate->ScheduleThrow(*exception);
+ int bound_length = function->bound_arguments()->length();
+ int length = Max(0, target_length->value() - bound_length);
+
+ Handle<Object> result(Smi::FromInt(length), isolate);
+ info.GetReturnValue().Set(Utils::ToLocal(result));
+}
+
+Handle<AccessorInfo> Accessors::BoundFunctionLengthInfo(
+ Isolate* isolate, PropertyAttributes attributes) {
+ return MakeAccessor(isolate, isolate->factory()->length_string(),
+ &BoundFunctionLengthGetter, &ReconfigureToDataProperty,
+ attributes);
+}
+
+//
+// Accessors::BoundFunctionName
+//
+
+void Accessors::BoundFunctionNameGetter(
+ v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ HandleScope scope(isolate);
+ Handle<JSBoundFunction> function =
+ Handle<JSBoundFunction>::cast(Utils::OpenHandle(*info.Holder()));
+ Handle<Object> result;
+ if (!JSBoundFunction::GetName(isolate, function).ToHandle(&result)) {
+ isolate->OptionalRescheduleException(false);
return;
}
- info.GetReturnValue().Set(v8::Utils::ToLocal(Handle<Object>(value, isolate)));
+ info.GetReturnValue().Set(Utils::ToLocal(result));
+}
+
+Handle<AccessorInfo> Accessors::BoundFunctionNameInfo(
+ Isolate* isolate, PropertyAttributes attributes) {
+ return MakeAccessor(isolate, isolate->factory()->name_string(),
+ &BoundFunctionNameGetter, &ReconfigureToDataProperty,
+ attributes);
}
+//
+// Accessors::ErrorStack
+//
-static void ModuleSetExport(v8::Local<v8::Name> property,
- v8::Local<v8::Value> value,
- const v8::PropertyCallbackInfo<void>& info) {
- if (!info.ShouldThrowOnError()) return;
- Handle<Name> name = v8::Utils::OpenHandle(*property);
- Isolate* isolate = name->GetIsolate();
- Handle<Object> exception =
- isolate->factory()->NewTypeError(MessageTemplate::kNotDefined, name);
- isolate->ScheduleThrow(*exception);
+namespace {
+
+MaybeHandle<JSReceiver> ClearInternalStackTrace(Isolate* isolate,
+ Handle<JSObject> error) {
+ RETURN_ON_EXCEPTION(
+ isolate,
+ JSReceiver::SetProperty(error, isolate->factory()->stack_trace_symbol(),
+ isolate->factory()->undefined_value(), STRICT),
+ JSReceiver);
+ return error;
}
+bool IsAccessor(Handle<Object> receiver, Handle<Name> name,
+ Handle<JSObject> holder) {
+ LookupIterator it(receiver, name, holder,
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
+ // Skip any access checks we might hit. This accessor should never hit in a
+ // situation where the caller does not have access.
+ if (it.state() == LookupIterator::ACCESS_CHECK) {
+ CHECK(it.HasAccess());
+ it.Next();
+ }
+ return (it.state() == LookupIterator::ACCESSOR);
+}
-Handle<AccessorInfo> Accessors::MakeModuleExport(
- Handle<String> name,
- int index,
- PropertyAttributes attributes) {
- Isolate* isolate = name->GetIsolate();
- Handle<AccessorInfo> info = MakeAccessor(isolate, name, &ModuleGetExport,
- &ModuleSetExport, attributes);
- info->set_data(Smi::FromInt(index));
- return info;
+} // namespace
+
+void Accessors::ErrorStackGetter(
+ v8::Local<v8::Name> key, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ HandleScope scope(isolate);
+ Handle<JSObject> holder =
+ Handle<JSObject>::cast(Utils::OpenHandle(*info.Holder()));
+
+ // Retrieve the structured stack trace.
+
+ Handle<Object> stack_trace;
+ Handle<Symbol> stack_trace_symbol = isolate->factory()->stack_trace_symbol();
+ MaybeHandle<Object> maybe_stack_trace =
+ JSObject::GetProperty(holder, stack_trace_symbol);
+ if (!maybe_stack_trace.ToHandle(&stack_trace) ||
+ stack_trace->IsUndefined(isolate)) {
+ Handle<Object> result = isolate->factory()->undefined_value();
+ info.GetReturnValue().Set(Utils::ToLocal(result));
+ return;
+ }
+
+ // Format it, clear the internal structured trace and reconfigure as a data
+ // property.
+
+ Handle<Object> formatted_stack_trace;
+ if (!ErrorUtils::FormatStackTrace(isolate, holder, stack_trace)
+ .ToHandle(&formatted_stack_trace)) {
+ isolate->OptionalRescheduleException(false);
+ return;
+ }
+
+ MaybeHandle<Object> result = ClearInternalStackTrace(isolate, holder);
+ if (result.is_null()) {
+ isolate->OptionalRescheduleException(false);
+ return;
+ }
+
+ // If stack is still an accessor (this could have changed in the meantime
+ // since FormatStackTrace can execute arbitrary JS), replace it with a data
+ // property.
+ Handle<Object> receiver = Utils::OpenHandle(*info.This());
+ Handle<Name> name = Utils::OpenHandle(*key);
+ if (IsAccessor(receiver, name, holder)) {
+ result = ReplaceAccessorWithDataProperty(isolate, receiver, holder, name,
+ formatted_stack_trace);
+ if (result.is_null()) {
+ isolate->OptionalRescheduleException(false);
+ return;
+ }
+ } else {
+ // The stack property has been modified in the meantime.
+ if (!JSObject::GetProperty(holder, name).ToHandle(&formatted_stack_trace)) {
+ isolate->OptionalRescheduleException(false);
+ return;
+ }
+ }
+
+ v8::Local<v8::Value> value = Utils::ToLocal(formatted_stack_trace);
+ info.GetReturnValue().Set(value);
+}
+
+void Accessors::ErrorStackSetter(v8::Local<v8::Name> name,
+ v8::Local<v8::Value> val,
+ const v8::PropertyCallbackInfo<void>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ HandleScope scope(isolate);
+ Handle<JSObject> obj =
+ Handle<JSObject>::cast(Utils::OpenHandle(*info.This()));
+
+ // Clear internal properties to avoid memory leaks.
+ Handle<Symbol> stack_trace_symbol = isolate->factory()->stack_trace_symbol();
+ if (JSReceiver::HasOwnProperty(obj, stack_trace_symbol).FromMaybe(false)) {
+ ClearInternalStackTrace(isolate, obj);
+ }
+
+ Accessors::ReconfigureToDataProperty(name, val, info);
}
+Handle<AccessorInfo> Accessors::ErrorStackInfo(Isolate* isolate,
+ PropertyAttributes attributes) {
+ Handle<AccessorInfo> info =
+ MakeAccessor(isolate, isolate->factory()->stack_string(),
+ &ErrorStackGetter, &ErrorStackSetter, attributes);
+ return info;
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h
index 3fe550c25a..2171a35c74 100644
--- a/deps/v8/src/accessors.h
+++ b/deps/v8/src/accessors.h
@@ -22,6 +22,9 @@ class AccessorInfo;
#define ACCESSOR_INFO_LIST(V) \
V(ArgumentsIterator) \
V(ArrayLength) \
+ V(BoundFunctionLength) \
+ V(BoundFunctionName) \
+ V(ErrorStack) \
V(FunctionArguments) \
V(FunctionCaller) \
V(FunctionName) \
@@ -44,10 +47,10 @@ class AccessorInfo;
V(ScriptIsEmbedderDebugScript) \
V(StringLength)
-#define ACCESSOR_SETTER_LIST(V) \
- V(ReconfigureToDataProperty) \
- V(ObservedReconfigureToDataProperty) \
- V(ArrayLengthSetter) \
+#define ACCESSOR_SETTER_LIST(V) \
+ V(ReconfigureToDataProperty) \
+ V(ArrayLengthSetter) \
+ V(ErrorStackSetter) \
V(FunctionPrototypeSetter)
// Accessors contains all predefined proxy accessors.
@@ -85,23 +88,11 @@ class Accessors : public AllStatic {
Handle<JSFunction> object, Handle<Object> value);
static Handle<JSObject> FunctionGetArguments(Handle<JSFunction> object);
- // Accessor infos.
- static Handle<AccessorInfo> MakeModuleExport(
- Handle<String> name, int index, PropertyAttributes attributes);
-
// Returns true for properties that are accessors to object fields.
// If true, *object_offset contains offset of object field.
static bool IsJSObjectFieldAccessor(Handle<Map> map, Handle<Name> name,
int* object_offset);
- // Returns true for properties that are accessors to ArrayBufferView and
- // derived classes fields. If true, *object_offset contains offset of
- // object field. The caller still has to check whether the underlying
- // buffer was neutered.
- static bool IsJSArrayBufferViewFieldAccessor(Handle<Map> map,
- Handle<Name> name,
- int* object_offset);
-
static Handle<AccessorInfo> MakeAccessor(
Isolate* isolate,
Handle<Name> name,
diff --git a/deps/v8/src/address-map.cc b/deps/v8/src/address-map.cc
index 86558e094a..61292bf562 100644
--- a/deps/v8/src/address-map.cc
+++ b/deps/v8/src/address-map.cc
@@ -13,7 +13,7 @@ namespace internal {
RootIndexMap::RootIndexMap(Isolate* isolate) {
map_ = isolate->root_index_map();
if (map_ != NULL) return;
- map_ = new HashMap(HashMap::PointersMatch);
+ map_ = new base::HashMap(base::HashMap::PointersMatch);
for (uint32_t i = 0; i < Heap::kStrongRootListLength; i++) {
Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(i);
Object* root = isolate->heap()->root(root_index);
@@ -22,7 +22,7 @@ RootIndexMap::RootIndexMap(Isolate* isolate) {
// not be referenced through the root list in the snapshot.
if (isolate->heap()->RootCanBeTreatedAsConstant(root_index)) {
HeapObject* heap_object = HeapObject::cast(root);
- HashMap::Entry* entry = LookupEntry(map_, heap_object, false);
+ base::HashMap::Entry* entry = LookupEntry(map_, heap_object, false);
if (entry != NULL) {
// Some are initialized to a previous value in the root list.
DCHECK_LT(GetValue(entry), i);
diff --git a/deps/v8/src/address-map.h b/deps/v8/src/address-map.h
index df32f89c1e..0ce93d24e8 100644
--- a/deps/v8/src/address-map.h
+++ b/deps/v8/src/address-map.h
@@ -6,7 +6,7 @@
#define V8_ADDRESS_MAP_H_
#include "src/assert-scope.h"
-#include "src/hashmap.h"
+#include "src/base/hashmap.h"
#include "src/objects.h"
namespace v8 {
@@ -14,16 +14,17 @@ namespace internal {
class AddressMapBase {
protected:
- static void SetValue(HashMap::Entry* entry, uint32_t v) {
+ static void SetValue(base::HashMap::Entry* entry, uint32_t v) {
entry->value = reinterpret_cast<void*>(v);
}
- static uint32_t GetValue(HashMap::Entry* entry) {
+ static uint32_t GetValue(base::HashMap::Entry* entry) {
return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
}
- inline static HashMap::Entry* LookupEntry(HashMap* map, HeapObject* obj,
- bool insert) {
+ inline static base::HashMap::Entry* LookupEntry(base::HashMap* map,
+ HeapObject* obj,
+ bool insert) {
if (insert) {
map->LookupOrInsert(Key(obj), Hash(obj));
}
@@ -40,7 +41,6 @@ class AddressMapBase {
}
};
-
class RootIndexMap : public AddressMapBase {
public:
explicit RootIndexMap(Isolate* isolate);
@@ -48,134 +48,175 @@ class RootIndexMap : public AddressMapBase {
static const int kInvalidRootIndex = -1;
int Lookup(HeapObject* obj) {
- HashMap::Entry* entry = LookupEntry(map_, obj, false);
+ base::HashMap::Entry* entry = LookupEntry(map_, obj, false);
if (entry) return GetValue(entry);
return kInvalidRootIndex;
}
private:
- HashMap* map_;
+ base::HashMap* map_;
DISALLOW_COPY_AND_ASSIGN(RootIndexMap);
};
-
-class BackReference {
+class SerializerReference {
public:
- explicit BackReference(uint32_t bitfield) : bitfield_(bitfield) {}
+ SerializerReference() : bitfield_(Special(kInvalidValue)) {}
- BackReference() : bitfield_(kInvalidValue) {}
+ static SerializerReference FromBitfield(uint32_t bitfield) {
+ return SerializerReference(bitfield);
+ }
- static BackReference SourceReference() { return BackReference(kSourceValue); }
+ static SerializerReference BackReference(AllocationSpace space,
+ uint32_t chunk_index,
+ uint32_t chunk_offset) {
+ DCHECK(IsAligned(chunk_offset, kObjectAlignment));
+ DCHECK_NE(LO_SPACE, space);
+ return SerializerReference(
+ SpaceBits::encode(space) | ChunkIndexBits::encode(chunk_index) |
+ ChunkOffsetBits::encode(chunk_offset >> kObjectAlignmentBits));
+ }
- static BackReference GlobalProxyReference() {
- return BackReference(kGlobalProxyValue);
+ static SerializerReference MapReference(uint32_t index) {
+ return SerializerReference(SpaceBits::encode(MAP_SPACE) |
+ ValueIndexBits::encode(index));
}
- static BackReference LargeObjectReference(uint32_t index) {
- return BackReference(SpaceBits::encode(LO_SPACE) |
- ChunkOffsetBits::encode(index));
+ static SerializerReference LargeObjectReference(uint32_t index) {
+ return SerializerReference(SpaceBits::encode(LO_SPACE) |
+ ValueIndexBits::encode(index));
}
- static BackReference DummyReference() { return BackReference(kDummyValue); }
+ static SerializerReference AttachedReference(uint32_t index) {
+ return SerializerReference(SpaceBits::encode(kAttachedReferenceSpace) |
+ ValueIndexBits::encode(index));
+ }
- static BackReference Reference(AllocationSpace space, uint32_t chunk_index,
- uint32_t chunk_offset) {
- DCHECK(IsAligned(chunk_offset, kObjectAlignment));
- DCHECK_NE(LO_SPACE, space);
- return BackReference(
- SpaceBits::encode(space) | ChunkIndexBits::encode(chunk_index) |
- ChunkOffsetBits::encode(chunk_offset >> kObjectAlignmentBits));
+ static SerializerReference DummyReference() {
+ return SerializerReference(Special(kDummyValue));
}
- bool is_valid() const { return bitfield_ != kInvalidValue; }
- bool is_source() const { return bitfield_ == kSourceValue; }
- bool is_global_proxy() const { return bitfield_ == kGlobalProxyValue; }
+ bool is_valid() const { return bitfield_ != Special(kInvalidValue); }
+
+ bool is_back_reference() const {
+ return SpaceBits::decode(bitfield_) <= LAST_SPACE;
+ }
AllocationSpace space() const {
- DCHECK(is_valid());
- return SpaceBits::decode(bitfield_);
+ DCHECK(is_back_reference());
+ return static_cast<AllocationSpace>(SpaceBits::decode(bitfield_));
}
uint32_t chunk_offset() const {
- DCHECK(is_valid());
+ DCHECK(is_back_reference());
return ChunkOffsetBits::decode(bitfield_) << kObjectAlignmentBits;
}
+ uint32_t map_index() const {
+ DCHECK(is_back_reference());
+ return ValueIndexBits::decode(bitfield_);
+ }
+
uint32_t large_object_index() const {
- DCHECK(is_valid());
- DCHECK(chunk_index() == 0);
- return ChunkOffsetBits::decode(bitfield_);
+ DCHECK(is_back_reference());
+ return ValueIndexBits::decode(bitfield_);
}
uint32_t chunk_index() const {
- DCHECK(is_valid());
+ DCHECK(is_back_reference());
return ChunkIndexBits::decode(bitfield_);
}
- uint32_t reference() const {
- DCHECK(is_valid());
+ uint32_t back_reference() const {
+ DCHECK(is_back_reference());
return bitfield_ & (ChunkOffsetBits::kMask | ChunkIndexBits::kMask);
}
- uint32_t bitfield() const { return bitfield_; }
+ bool is_attached_reference() const {
+ return SpaceBits::decode(bitfield_) == kAttachedReferenceSpace;
+ }
+
+ int attached_reference_index() const {
+ DCHECK(is_attached_reference());
+ return ValueIndexBits::decode(bitfield_);
+ }
private:
- static const uint32_t kInvalidValue = 0xFFFFFFFF;
- static const uint32_t kSourceValue = 0xFFFFFFFE;
- static const uint32_t kGlobalProxyValue = 0xFFFFFFFD;
- static const uint32_t kDummyValue = 0xFFFFFFFC;
+ explicit SerializerReference(uint32_t bitfield) : bitfield_(bitfield) {}
+
+ inline static uint32_t Special(int value) {
+ return SpaceBits::encode(kSpecialValueSpace) |
+ ValueIndexBits::encode(value);
+ }
+
+ // We use the 32-bit bitfield to encode either a back reference, a special
+ // value, or an attached reference index.
+ // Back reference:
+ // [ Space index ] [ Chunk index ] [ Chunk offset ]
+ // [ LO_SPACE ] [ large object index ]
+ // Special value
+ // [ kSpecialValueSpace ] [ Special value index ]
+ // Attached reference
+ // [ kAttachedReferenceSpace ] [ Attached reference index ]
+
static const int kChunkOffsetSize = kPageSizeBits - kObjectAlignmentBits;
static const int kChunkIndexSize = 32 - kChunkOffsetSize - kSpaceTagSize;
+ static const int kValueIndexSize = kChunkOffsetSize + kChunkIndexSize;
- public:
- static const int kMaxChunkIndex = (1 << kChunkIndexSize) - 1;
+ static const int kSpecialValueSpace = LAST_SPACE + 1;
+ static const int kAttachedReferenceSpace = kSpecialValueSpace + 1;
+ STATIC_ASSERT(kAttachedReferenceSpace < (1 << kSpaceTagSize));
- private:
+ static const int kInvalidValue = 0;
+ static const int kDummyValue = 1;
+
+ // The chunk offset can also be used to encode the index of special values.
class ChunkOffsetBits : public BitField<uint32_t, 0, kChunkOffsetSize> {};
class ChunkIndexBits
: public BitField<uint32_t, ChunkOffsetBits::kNext, kChunkIndexSize> {};
- class SpaceBits
- : public BitField<AllocationSpace, ChunkIndexBits::kNext, kSpaceTagSize> {
- };
+ class ValueIndexBits : public BitField<uint32_t, 0, kValueIndexSize> {};
+ STATIC_ASSERT(ChunkIndexBits::kNext == ValueIndexBits::kNext);
+ class SpaceBits : public BitField<int, kValueIndexSize, kSpaceTagSize> {};
+ STATIC_ASSERT(SpaceBits::kNext == 32);
uint32_t bitfield_;
-};
+ friend class SerializerReferenceMap;
+};
// Mapping objects to their location after deserialization.
// This is used during building, but not at runtime by V8.
-class BackReferenceMap : public AddressMapBase {
+class SerializerReferenceMap : public AddressMapBase {
public:
- BackReferenceMap()
- : no_allocation_(), map_(new HashMap(HashMap::PointersMatch)) {}
-
- ~BackReferenceMap() { delete map_; }
+ SerializerReferenceMap()
+ : no_allocation_(),
+ map_(base::HashMap::PointersMatch),
+ attached_reference_index_(0) {}
- BackReference Lookup(HeapObject* obj) {
- HashMap::Entry* entry = LookupEntry(map_, obj, false);
- return entry ? BackReference(GetValue(entry)) : BackReference();
+ SerializerReference Lookup(HeapObject* obj) {
+ base::HashMap::Entry* entry = LookupEntry(&map_, obj, false);
+ return entry ? SerializerReference(GetValue(entry)) : SerializerReference();
}
- void Add(HeapObject* obj, BackReference b) {
+ void Add(HeapObject* obj, SerializerReference b) {
DCHECK(b.is_valid());
- DCHECK_NULL(LookupEntry(map_, obj, false));
- HashMap::Entry* entry = LookupEntry(map_, obj, true);
- SetValue(entry, b.bitfield());
- }
-
- void AddSourceString(String* string) {
- Add(string, BackReference::SourceReference());
+ DCHECK_NULL(LookupEntry(&map_, obj, false));
+ base::HashMap::Entry* entry = LookupEntry(&map_, obj, true);
+ SetValue(entry, b.bitfield_);
}
- void AddGlobalProxy(HeapObject* global_proxy) {
- Add(global_proxy, BackReference::GlobalProxyReference());
+ SerializerReference AddAttachedReference(HeapObject* attached_reference) {
+ SerializerReference reference =
+ SerializerReference::AttachedReference(attached_reference_index_++);
+ Add(attached_reference, reference);
+ return reference;
}
private:
DisallowHeapAllocation no_allocation_;
- HashMap* map_;
- DISALLOW_COPY_AND_ASSIGN(BackReferenceMap);
+ base::HashMap map_;
+ int attached_reference_index_;
+ DISALLOW_COPY_AND_ASSIGN(SerializerReferenceMap);
};
} // namespace internal
diff --git a/deps/v8/src/allocation-site-scopes.cc b/deps/v8/src/allocation-site-scopes.cc
index 4c8cda52ed..6b9fd03a21 100644
--- a/deps/v8/src/allocation-site-scopes.cc
+++ b/deps/v8/src/allocation-site-scopes.cc
@@ -3,6 +3,9 @@
// found in the LICENSE file.
#include "src/allocation-site-scopes.h"
+#include "src/factory.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/allocation-site-scopes.h b/deps/v8/src/allocation-site-scopes.h
index 70dd63e1dd..da2b9dc45c 100644
--- a/deps/v8/src/allocation-site-scopes.h
+++ b/deps/v8/src/allocation-site-scopes.h
@@ -5,10 +5,8 @@
#ifndef V8_ALLOCATION_SITE_SCOPES_H_
#define V8_ALLOCATION_SITE_SCOPES_H_
-#include "src/ast/ast.h"
#include "src/handles.h"
#include "src/objects.h"
-#include "src/zone.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/allocation.cc b/deps/v8/src/allocation.cc
index 851cd61ffc..195a5443c8 100644
--- a/deps/v8/src/allocation.cc
+++ b/deps/v8/src/allocation.cc
@@ -46,17 +46,6 @@ void Embedded::operator delete(void* p) {
UNREACHABLE();
}
-
-void* AllStatic::operator new(size_t size) {
- UNREACHABLE();
- return invalid;
-}
-
-
-void AllStatic::operator delete(void* p) {
- UNREACHABLE();
-}
-
#endif
diff --git a/deps/v8/src/allocation.h b/deps/v8/src/allocation.h
index 7c1e023b86..8581cc9e9d 100644
--- a/deps/v8/src/allocation.h
+++ b/deps/v8/src/allocation.h
@@ -45,12 +45,12 @@ class Embedded {
#endif
-// Superclass for classes only using statics.
+// Superclass for classes only using static method functions.
+// The subclass of AllStatic cannot be instantiated at all.
class AllStatic {
#ifdef DEBUG
public:
- void* operator new(size_t size);
- void operator delete(void* p);
+ AllStatic() = delete;
#endif
};
diff --git a/deps/v8/src/api-arguments-inl.h b/deps/v8/src/api-arguments-inl.h
new file mode 100644
index 0000000000..eefdf35adc
--- /dev/null
+++ b/deps/v8/src/api-arguments-inl.h
@@ -0,0 +1,116 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/api-arguments.h"
+
+#include "src/tracing/trace-event.h"
+#include "src/vm-state-inl.h"
+
+namespace v8 {
+namespace internal {
+
+#define FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(F) \
+ F(AccessorNameGetterCallback, "get", v8::Value, Object) \
+ F(GenericNamedPropertyQueryCallback, "has", v8::Integer, Object) \
+ F(GenericNamedPropertyDeleterCallback, "delete", v8::Boolean, Object)
+
+#define WRITE_CALL_1_NAME(Function, type, ApiReturn, InternalReturn) \
+ Handle<InternalReturn> PropertyCallbackArguments::Call(Function f, \
+ Handle<Name> name) { \
+ Isolate* isolate = this->isolate(); \
+ RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Function); \
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED( \
+ isolate, &tracing::TraceEventStatsTable::Function); \
+ VMState<EXTERNAL> state(isolate); \
+ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
+ PropertyCallbackInfo<ApiReturn> info(begin()); \
+ LOG(isolate, \
+ ApiNamedPropertyAccess("interceptor-named-" type, holder(), *name)); \
+ f(v8::Utils::ToLocal(name), info); \
+ return GetReturnValue<InternalReturn>(isolate); \
+ }
+
+FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(WRITE_CALL_1_NAME)
+
+#undef FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME
+#undef WRITE_CALL_1_NAME
+
+#define FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX(F) \
+ F(IndexedPropertyGetterCallback, "get", v8::Value, Object) \
+ F(IndexedPropertyQueryCallback, "has", v8::Integer, Object) \
+ F(IndexedPropertyDeleterCallback, "delete", v8::Boolean, Object)
+
+#define WRITE_CALL_1_INDEX(Function, type, ApiReturn, InternalReturn) \
+ Handle<InternalReturn> PropertyCallbackArguments::Call(Function f, \
+ uint32_t index) { \
+ Isolate* isolate = this->isolate(); \
+ RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Function); \
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED( \
+ isolate, &tracing::TraceEventStatsTable::Function); \
+ VMState<EXTERNAL> state(isolate); \
+ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
+ PropertyCallbackInfo<ApiReturn> info(begin()); \
+ LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-" type, \
+ holder(), index)); \
+ f(index, info); \
+ return GetReturnValue<InternalReturn>(isolate); \
+ }
+
+FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX(WRITE_CALL_1_INDEX)
+
+#undef FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX
+#undef WRITE_CALL_1_INDEX
+
+Handle<Object> PropertyCallbackArguments::Call(
+ GenericNamedPropertySetterCallback f, Handle<Name> name,
+ Handle<Object> value) {
+ Isolate* isolate = this->isolate();
+ RuntimeCallTimerScope timer(
+ isolate, &RuntimeCallStats::GenericNamedPropertySetterCallback);
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
+ isolate,
+ &tracing::TraceEventStatsTable::GenericNamedPropertySetterCallback);
+ VMState<EXTERNAL> state(isolate);
+ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
+ PropertyCallbackInfo<v8::Value> info(begin());
+ LOG(isolate,
+ ApiNamedPropertyAccess("interceptor-named-set", holder(), *name));
+ f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
+ return GetReturnValue<Object>(isolate);
+}
+
+Handle<Object> PropertyCallbackArguments::Call(IndexedPropertySetterCallback f,
+ uint32_t index,
+ Handle<Object> value) {
+ Isolate* isolate = this->isolate();
+ RuntimeCallTimerScope timer(isolate,
+ &RuntimeCallStats::IndexedPropertySetterCallback);
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
+ isolate, &tracing::TraceEventStatsTable::IndexedPropertySetterCallback);
+ VMState<EXTERNAL> state(isolate);
+ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
+ PropertyCallbackInfo<v8::Value> info(begin());
+ LOG(isolate,
+ ApiIndexedPropertyAccess("interceptor-indexed-set", holder(), index));
+ f(index, v8::Utils::ToLocal(value), info);
+ return GetReturnValue<Object>(isolate);
+}
+
+void PropertyCallbackArguments::Call(AccessorNameSetterCallback f,
+ Handle<Name> name, Handle<Object> value) {
+ Isolate* isolate = this->isolate();
+ RuntimeCallTimerScope timer(isolate,
+ &RuntimeCallStats::AccessorNameSetterCallback);
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
+ isolate, &tracing::TraceEventStatsTable::AccessorNameSetterCallback);
+ VMState<EXTERNAL> state(isolate);
+ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
+ PropertyCallbackInfo<void> info(begin());
+ LOG(isolate,
+ ApiNamedPropertyAccess("interceptor-named-set", holder(), *name));
+ f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/api-arguments.cc b/deps/v8/src/api-arguments.cc
index c4b698c5a2..6e347c7b62 100644
--- a/deps/v8/src/api-arguments.cc
+++ b/deps/v8/src/api-arguments.cc
@@ -4,15 +4,20 @@
#include "src/api-arguments.h"
+#include "src/tracing/trace-event.h"
+#include "src/vm-state-inl.h"
+
namespace v8 {
namespace internal {
Handle<Object> FunctionCallbackArguments::Call(FunctionCallback f) {
Isolate* isolate = this->isolate();
+ RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::FunctionCallback);
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
+ isolate, &internal::tracing::TraceEventStatsTable::FunctionCallback);
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
- FunctionCallbackInfo<v8::Value> info(begin(), argv_, argc_,
- is_construct_call_);
+ FunctionCallbackInfo<v8::Value> info(begin(), argv_, argc_);
f(info);
return GetReturnValue<Object>(isolate);
}
@@ -20,6 +25,9 @@ Handle<Object> FunctionCallbackArguments::Call(FunctionCallback f) {
Handle<JSObject> PropertyCallbackArguments::Call(
IndexedPropertyEnumeratorCallback f) {
Isolate* isolate = this->isolate();
+ RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::PropertyCallback);
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
+ isolate, &internal::tracing::TraceEventStatsTable::PropertyCallback);
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
PropertyCallbackInfo<v8::Array> info(begin());
diff --git a/deps/v8/src/api-arguments.h b/deps/v8/src/api-arguments.h
index 3bfe34dc89..0dfe61824a 100644
--- a/deps/v8/src/api-arguments.h
+++ b/deps/v8/src/api-arguments.h
@@ -7,8 +7,6 @@
#include "src/api.h"
#include "src/isolate.h"
-#include "src/tracing/trace-event.h"
-#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
@@ -58,7 +56,7 @@ Handle<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
// Check the ReturnValue.
Object** handle = &this->begin()[kReturnValueOffset];
// Nothing was set, return empty handle as per previous behaviour.
- if ((*handle)->IsTheHole()) return Handle<V>();
+ if ((*handle)->IsTheHole(isolate)) return Handle<V>();
Handle<V> result = Handle<V>::cast(Handle<Object>(handle));
result->VerifyApiCallResultType();
return result;
@@ -108,84 +106,24 @@ class PropertyCallbackArguments
*/
Handle<JSObject> Call(IndexedPropertyEnumeratorCallback f);
-#define FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(F) \
- F(AccessorNameGetterCallback, "get", v8::Value, Object) \
- F(GenericNamedPropertyQueryCallback, "has", v8::Integer, Object) \
- F(GenericNamedPropertyDeleterCallback, "delete", v8::Boolean, Object)
-
-#define WRITE_CALL_1_NAME(Function, type, ApiReturn, InternalReturn) \
- Handle<InternalReturn> Call(Function f, Handle<Name> name) { \
- Isolate* isolate = this->isolate(); \
- VMState<EXTERNAL> state(isolate); \
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
- PropertyCallbackInfo<ApiReturn> info(begin()); \
- LOG(isolate, \
- ApiNamedPropertyAccess("interceptor-named-" type, holder(), *name)); \
- f(v8::Utils::ToLocal(name), info); \
- return GetReturnValue<InternalReturn>(isolate); \
- }
+ inline Handle<Object> Call(AccessorNameGetterCallback f, Handle<Name> name);
+ inline Handle<Object> Call(GenericNamedPropertyQueryCallback f,
+ Handle<Name> name);
+ inline Handle<Object> Call(GenericNamedPropertyDeleterCallback f,
+ Handle<Name> name);
- FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(WRITE_CALL_1_NAME)
-
-#undef FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME
-#undef WRITE_CALL_1_NAME
-
-#define FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX(F) \
- F(IndexedPropertyGetterCallback, "get", v8::Value, Object) \
- F(IndexedPropertyQueryCallback, "has", v8::Integer, Object) \
- F(IndexedPropertyDeleterCallback, "delete", v8::Boolean, Object)
-
-#define WRITE_CALL_1_INDEX(Function, type, ApiReturn, InternalReturn) \
- Handle<InternalReturn> Call(Function f, uint32_t index) { \
- Isolate* isolate = this->isolate(); \
- VMState<EXTERNAL> state(isolate); \
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
- PropertyCallbackInfo<ApiReturn> info(begin()); \
- LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-" type, \
- holder(), index)); \
- f(index, info); \
- return GetReturnValue<InternalReturn>(isolate); \
- }
+ inline Handle<Object> Call(IndexedPropertyGetterCallback f, uint32_t index);
+ inline Handle<Object> Call(IndexedPropertyQueryCallback f, uint32_t index);
+ inline Handle<Object> Call(IndexedPropertyDeleterCallback f, uint32_t index);
- FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX(WRITE_CALL_1_INDEX)
-
-#undef FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX
-#undef WRITE_CALL_1_INDEX
-
- Handle<Object> Call(GenericNamedPropertySetterCallback f, Handle<Name> name,
- Handle<Object> value) {
- Isolate* isolate = this->isolate();
- VMState<EXTERNAL> state(isolate);
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
- PropertyCallbackInfo<v8::Value> info(begin());
- LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-set", holder(), *name));
- f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
- return GetReturnValue<Object>(isolate);
- }
+ inline Handle<Object> Call(GenericNamedPropertySetterCallback f,
+ Handle<Name> name, Handle<Object> value);
- Handle<Object> Call(IndexedPropertySetterCallback f, uint32_t index,
- Handle<Object> value) {
- Isolate* isolate = this->isolate();
- VMState<EXTERNAL> state(isolate);
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
- PropertyCallbackInfo<v8::Value> info(begin());
- LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-set", holder(), index));
- f(index, v8::Utils::ToLocal(value), info);
- return GetReturnValue<Object>(isolate);
- }
+ inline Handle<Object> Call(IndexedPropertySetterCallback f, uint32_t index,
+ Handle<Object> value);
- void Call(AccessorNameSetterCallback f, Handle<Name> name,
- Handle<Object> value) {
- Isolate* isolate = this->isolate();
- VMState<EXTERNAL> state(isolate);
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
- PropertyCallbackInfo<void> info(begin());
- LOG(isolate,
- ApiNamedPropertyAccess("interceptor-named-set", holder(), *name));
- f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
- }
+ inline void Call(AccessorNameSetterCallback f, Handle<Name> name,
+ Handle<Object> value);
private:
inline JSObject* holder() {
@@ -206,19 +144,19 @@ class FunctionCallbackArguments
static const int kIsolateIndex = T::kIsolateIndex;
static const int kCalleeIndex = T::kCalleeIndex;
static const int kContextSaveIndex = T::kContextSaveIndex;
+ static const int kNewTargetIndex = T::kNewTargetIndex;
FunctionCallbackArguments(internal::Isolate* isolate, internal::Object* data,
internal::HeapObject* callee,
- internal::Object* holder, internal::Object** argv,
- int argc, bool is_construct_call)
- : Super(isolate),
- argv_(argv),
- argc_(argc),
- is_construct_call_(is_construct_call) {
+ internal::Object* holder,
+ internal::HeapObject* new_target,
+ internal::Object** argv, int argc)
+ : Super(isolate), argv_(argv), argc_(argc) {
Object** values = begin();
values[T::kDataIndex] = data;
values[T::kCalleeIndex] = callee;
values[T::kHolderIndex] = holder;
+ values[T::kNewTargetIndex] = new_target;
values[T::kContextSaveIndex] = isolate->heap()->the_hole_value();
values[T::kIsolateIndex] = reinterpret_cast<internal::Object*>(isolate);
// Here the hole is set as default value.
@@ -245,7 +183,6 @@ class FunctionCallbackArguments
private:
internal::Object** argv_;
int argc_;
- bool is_construct_call_;
};
} // namespace internal
diff --git a/deps/v8/src/api-experimental.cc b/deps/v8/src/api-experimental.cc
index 3928434270..934b27aa5d 100644
--- a/deps/v8/src/api-experimental.cc
+++ b/deps/v8/src/api-experimental.cc
@@ -76,6 +76,10 @@ FastAccessorBuilder::ValueId FastAccessorBuilder::LoadInternalField(
return FromApi(this)->LoadInternalField(value, field_no);
}
+FastAccessorBuilder::ValueId FastAccessorBuilder::LoadInternalFieldUnchecked(
+ ValueId value, int field_no) {
+ return FromApi(this)->LoadInternalFieldUnchecked(value, field_no);
+}
FastAccessorBuilder::ValueId FastAccessorBuilder::LoadValue(ValueId value_id,
int offset) {
@@ -88,6 +92,9 @@ FastAccessorBuilder::ValueId FastAccessorBuilder::LoadObject(ValueId value_id,
return FromApi(this)->LoadObject(value_id, offset);
}
+FastAccessorBuilder::ValueId FastAccessorBuilder::ToSmi(ValueId value_id) {
+ return FromApi(this)->ToSmi(value_id);
+}
void FastAccessorBuilder::ReturnValue(ValueId value) {
FromApi(this)->ReturnValue(value);
@@ -113,6 +120,9 @@ void FastAccessorBuilder::SetLabel(LabelId label_id) {
FromApi(this)->SetLabel(label_id);
}
+void FastAccessorBuilder::Goto(LabelId label_id) {
+ FromApi(this)->Goto(label_id);
+}
void FastAccessorBuilder::CheckNotZeroOrJump(ValueId value_id,
LabelId label_id) {
diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api-natives.cc
index adf4b6af57..0f3c3b69ed 100644
--- a/deps/v8/src/api-natives.cc
+++ b/deps/v8/src/api-natives.cc
@@ -15,23 +15,44 @@ namespace internal {
namespace {
-MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
- Handle<ObjectTemplateInfo> data,
- bool is_hidden_prototype);
+class InvokeScope {
+ public:
+ explicit InvokeScope(Isolate* isolate) : save_context_(isolate) {}
+ ~InvokeScope() {
+ Isolate* isolate = save_context_.isolate();
+ bool has_exception = isolate->has_pending_exception();
+ if (has_exception) {
+ isolate->ReportPendingMessages();
+ } else {
+ isolate->clear_pending_message();
+ }
+ }
-MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
- Handle<FunctionTemplateInfo> data,
- Handle<Name> name = Handle<Name>());
+ private:
+ SaveContext save_context_;
+};
+
+enum class CacheCheck { kCheck, kSkip };
+
+MaybeHandle<JSObject> InstantiateObject(
+ Isolate* isolate, Handle<ObjectTemplateInfo> data,
+ Handle<JSReceiver> new_target, CacheCheck cache_check = CacheCheck::kCheck,
+ bool is_hidden_prototype = false);
+MaybeHandle<JSFunction> InstantiateFunction(
+ Isolate* isolate, Handle<FunctionTemplateInfo> data,
+ CacheCheck cache_check = CacheCheck::kCheck,
+ Handle<Name> name = Handle<Name>());
MaybeHandle<Object> Instantiate(Isolate* isolate, Handle<Object> data,
Handle<Name> name = Handle<Name>()) {
if (data->IsFunctionTemplateInfo()) {
return InstantiateFunction(isolate,
- Handle<FunctionTemplateInfo>::cast(data), name);
+ Handle<FunctionTemplateInfo>::cast(data),
+ CacheCheck::kCheck, name);
} else if (data->IsObjectTemplateInfo()) {
return InstantiateObject(isolate, Handle<ObjectTemplateInfo>::cast(data),
- false);
+ Handle<JSReceiver>());
} else {
return data;
}
@@ -152,25 +173,6 @@ Object* GetIntrinsic(Isolate* isolate, v8::Intrinsic intrinsic) {
return nullptr;
}
-// Returns parent function template or null.
-FunctionTemplateInfo* GetParent(FunctionTemplateInfo* data) {
- Object* parent = data->parent_template();
- return parent->IsUndefined() ? nullptr : FunctionTemplateInfo::cast(parent);
-}
-
-// Starting from given object template's constructor walk up the inheritance
-// chain till a function template that has an instance template is found.
-ObjectTemplateInfo* GetParent(ObjectTemplateInfo* data) {
- Object* maybe_ctor = data->constructor();
- if (maybe_ctor->IsUndefined()) return nullptr;
- FunctionTemplateInfo* ctor = FunctionTemplateInfo::cast(maybe_ctor);
- while (true) {
- ctor = GetParent(ctor);
- if (ctor == nullptr) return nullptr;
- Object* maybe_obj = ctor->instance_template();
- if (!maybe_obj->IsUndefined()) return ObjectTemplateInfo::cast(maybe_obj);
- }
-}
template <typename TemplateInfoT>
MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
@@ -184,15 +186,11 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
int max_number_of_properties = 0;
TemplateInfoT* info = *data;
while (info != nullptr) {
- if (!info->property_accessors()->IsUndefined()) {
- Object* props = info->property_accessors();
- if (!props->IsUndefined()) {
- Handle<Object> props_handle(props, isolate);
- NeanderArray props_array(props_handle);
- max_number_of_properties += props_array.length();
- }
+ Object* props = info->property_accessors();
+ if (!props->IsUndefined(isolate)) {
+ max_number_of_properties += TemplateList::cast(props)->length();
}
- info = GetParent(info);
+ info = info->GetParent(isolate);
}
if (max_number_of_properties > 0) {
@@ -204,12 +202,12 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
info = *data;
while (info != nullptr) {
// Accumulate accessors.
- if (!info->property_accessors()->IsUndefined()) {
- Handle<Object> props(info->property_accessors(), isolate);
- valid_descriptors =
- AccessorInfo::AppendUnique(props, array, valid_descriptors);
+ Object* maybe_properties = info->property_accessors();
+ if (!maybe_properties->IsUndefined(isolate)) {
+ valid_descriptors = AccessorInfo::AppendUnique(
+ handle(maybe_properties, isolate), array, valid_descriptors);
}
- info = GetParent(info);
+ info = info->GetParent(isolate);
}
// Install accumulated accessors.
@@ -219,29 +217,29 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
}
}
- auto property_list = handle(data->property_list(), isolate);
- if (property_list->IsUndefined()) return obj;
- // TODO(dcarney): just use a FixedArray here.
- NeanderArray properties(property_list);
- if (properties.length() == 0) return obj;
+ Object* maybe_property_list = data->property_list();
+ if (maybe_property_list->IsUndefined(isolate)) return obj;
+ Handle<TemplateList> properties(TemplateList::cast(maybe_property_list),
+ isolate);
+ if (properties->length() == 0) return obj;
int i = 0;
for (int c = 0; c < data->number_of_properties(); c++) {
- auto name = handle(Name::cast(properties.get(i++)), isolate);
- auto bit = handle(properties.get(i++), isolate);
+ auto name = handle(Name::cast(properties->get(i++)), isolate);
+ Object* bit = properties->get(i++);
if (bit->IsSmi()) {
- PropertyDetails details(Smi::cast(*bit));
+ PropertyDetails details(Smi::cast(bit));
PropertyAttributes attributes = details.attributes();
PropertyKind kind = details.kind();
if (kind == kData) {
- auto prop_data = handle(properties.get(i++), isolate);
+ auto prop_data = handle(properties->get(i++), isolate);
RETURN_ON_EXCEPTION(isolate, DefineDataProperty(isolate, obj, name,
prop_data, attributes),
JSObject);
} else {
- auto getter = handle(properties.get(i++), isolate);
- auto setter = handle(properties.get(i++), isolate);
+ auto getter = handle(properties->get(i++), isolate);
+ auto setter = handle(properties->get(i++), isolate);
RETURN_ON_EXCEPTION(
isolate, DefineAccessorProperty(isolate, obj, name, getter, setter,
attributes, is_hidden_prototype),
@@ -250,12 +248,12 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
} else {
// Intrinsic data property --- Get appropriate value from the current
// context.
- PropertyDetails details(Smi::cast(properties.get(i++)));
+ PropertyDetails details(Smi::cast(properties->get(i++)));
PropertyAttributes attributes = details.attributes();
DCHECK_EQ(kData, details.kind());
v8::Intrinsic intrinsic =
- static_cast<v8::Intrinsic>(Smi::cast(properties.get(i++))->value());
+ static_cast<v8::Intrinsic>(Smi::cast(properties->get(i++))->value());
auto prop_data = handle(GetIntrinsic(isolate, intrinsic), isolate);
RETURN_ON_EXCEPTION(isolate, DefineDataProperty(isolate, obj, name,
@@ -266,106 +264,189 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
return obj;
}
-void CacheTemplateInstantiation(Isolate* isolate, uint32_t serial_number,
+MaybeHandle<JSObject> ProbeInstantiationsCache(Isolate* isolate,
+ int serial_number) {
+ DCHECK_LE(1, serial_number);
+ if (serial_number <= TemplateInfo::kFastTemplateInstantiationsCacheSize) {
+ Handle<FixedArray> fast_cache =
+ isolate->fast_template_instantiations_cache();
+ return fast_cache->GetValue<JSObject>(isolate, serial_number - 1);
+ } else {
+ Handle<UnseededNumberDictionary> slow_cache =
+ isolate->slow_template_instantiations_cache();
+ int entry = slow_cache->FindEntry(serial_number);
+ if (entry == UnseededNumberDictionary::kNotFound) {
+ return MaybeHandle<JSObject>();
+ }
+ return handle(JSObject::cast(slow_cache->ValueAt(entry)), isolate);
+ }
+}
+
+void CacheTemplateInstantiation(Isolate* isolate, int serial_number,
Handle<JSObject> object) {
- auto cache = isolate->template_instantiations_cache();
- auto new_cache =
- UnseededNumberDictionary::AtNumberPut(cache, serial_number, object);
- isolate->native_context()->set_template_instantiations_cache(*new_cache);
+ DCHECK_LE(1, serial_number);
+ if (serial_number <= TemplateInfo::kFastTemplateInstantiationsCacheSize) {
+ Handle<FixedArray> fast_cache =
+ isolate->fast_template_instantiations_cache();
+ Handle<FixedArray> new_cache =
+ FixedArray::SetAndGrow(fast_cache, serial_number - 1, object);
+ if (*new_cache != *fast_cache) {
+ isolate->native_context()->set_fast_template_instantiations_cache(
+ *new_cache);
+ }
+ } else {
+ Handle<UnseededNumberDictionary> cache =
+ isolate->slow_template_instantiations_cache();
+ auto new_cache =
+ UnseededNumberDictionary::AtNumberPut(cache, serial_number, object);
+ if (*new_cache != *cache) {
+ isolate->native_context()->set_slow_template_instantiations_cache(
+ *new_cache);
+ }
+ }
}
-void UncacheTemplateInstantiation(Isolate* isolate, uint32_t serial_number) {
- auto cache = isolate->template_instantiations_cache();
- int entry = cache->FindEntry(serial_number);
- DCHECK(entry != UnseededNumberDictionary::kNotFound);
- Handle<Object> result =
- UnseededNumberDictionary::DeleteProperty(cache, entry);
- USE(result);
- DCHECK(result->IsTrue());
- auto new_cache = UnseededNumberDictionary::Shrink(cache, entry);
- isolate->native_context()->set_template_instantiations_cache(*new_cache);
+void UncacheTemplateInstantiation(Isolate* isolate, int serial_number) {
+ DCHECK_LE(1, serial_number);
+ if (serial_number <= TemplateInfo::kFastTemplateInstantiationsCacheSize) {
+ Handle<FixedArray> fast_cache =
+ isolate->fast_template_instantiations_cache();
+ DCHECK(!fast_cache->get(serial_number - 1)->IsUndefined(isolate));
+ fast_cache->set_undefined(serial_number - 1);
+ } else {
+ Handle<UnseededNumberDictionary> cache =
+ isolate->slow_template_instantiations_cache();
+ int entry = cache->FindEntry(serial_number);
+ DCHECK(entry != UnseededNumberDictionary::kNotFound);
+ Handle<Object> result =
+ UnseededNumberDictionary::DeleteProperty(cache, entry);
+ USE(result);
+ DCHECK(result->IsTrue(isolate));
+ auto new_cache = UnseededNumberDictionary::Shrink(cache, entry);
+ isolate->native_context()->set_slow_template_instantiations_cache(
+ *new_cache);
+ }
+}
+
+bool IsSimpleInstantiation(Isolate* isolate, ObjectTemplateInfo* info,
+ JSReceiver* new_target) {
+ DisallowHeapAllocation no_gc;
+
+ if (!new_target->IsJSFunction()) return false;
+ JSFunction* fun = JSFunction::cast(new_target);
+ if (fun->shared()->function_data() != info->constructor()) return false;
+ if (info->immutable_proto()) return false;
+ return fun->context()->native_context() == isolate->raw_native_context();
+}
+
+MaybeHandle<JSObject> InstantiateObjectWithInvokeScope(
+ Isolate* isolate, Handle<ObjectTemplateInfo> info,
+ Handle<JSReceiver> new_target) {
+ InvokeScope invoke_scope(isolate);
+ return InstantiateObject(isolate, info, new_target, CacheCheck::kSkip);
}
MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
Handle<ObjectTemplateInfo> info,
+ Handle<JSReceiver> new_target,
+ CacheCheck cache_check,
bool is_hidden_prototype) {
+ Handle<JSFunction> constructor;
+ int serial_number = Smi::cast(info->serial_number())->value();
+ if (!new_target.is_null()) {
+ if (IsSimpleInstantiation(isolate, *info, *new_target)) {
+ constructor = Handle<JSFunction>::cast(new_target);
+ } else {
+ // Disable caching for subclass instantiation.
+ serial_number = 0;
+ }
+ }
// Fast path.
Handle<JSObject> result;
- uint32_t serial_number =
- static_cast<uint32_t>(Smi::cast(info->serial_number())->value());
- if (serial_number) {
- // Probe cache.
- auto cache = isolate->template_instantiations_cache();
- int entry = cache->FindEntry(serial_number);
- if (entry != UnseededNumberDictionary::kNotFound) {
- Object* boilerplate = cache->ValueAt(entry);
- result = handle(JSObject::cast(boilerplate), isolate);
+ if (serial_number && cache_check == CacheCheck::kCheck) {
+ if (ProbeInstantiationsCache(isolate, serial_number).ToHandle(&result)) {
return isolate->factory()->CopyJSObject(result);
}
}
- // Enter a new scope. Recursion could otherwise create a lot of handles.
- HandleScope scope(isolate);
- auto constructor = handle(info->constructor(), isolate);
- Handle<JSFunction> cons;
- if (constructor->IsUndefined()) {
- cons = isolate->object_function();
- } else {
- auto cons_templ = Handle<FunctionTemplateInfo>::cast(constructor);
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, cons, InstantiateFunction(isolate, cons_templ), JSFunction);
+
+ if (constructor.is_null()) {
+ Object* maybe_constructor_info = info->constructor();
+ if (maybe_constructor_info->IsUndefined(isolate)) {
+ constructor = isolate->object_function();
+ } else {
+ // Enter a new scope. Recursion could otherwise create a lot of handles.
+ HandleScope scope(isolate);
+ Handle<FunctionTemplateInfo> cons_templ(
+ FunctionTemplateInfo::cast(maybe_constructor_info), isolate);
+ Handle<JSFunction> tmp_constructor;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, tmp_constructor,
+ InstantiateFunction(isolate, cons_templ),
+ JSObject);
+ constructor = scope.CloseAndEscape(tmp_constructor);
+ }
+
+ if (new_target.is_null()) new_target = constructor;
}
- auto object = isolate->factory()->NewJSObject(cons);
+
+ Handle<JSObject> object;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, object,
+ JSObject::New(constructor, new_target), JSObject);
ASSIGN_RETURN_ON_EXCEPTION(
isolate, result,
- ConfigureInstance(isolate, object, info, is_hidden_prototype),
- JSFunction);
- // TODO(dcarney): is this necessary?
+ ConfigureInstance(isolate, object, info, is_hidden_prototype), JSObject);
+ if (info->immutable_proto()) {
+ JSObject::SetImmutableProto(object);
+ }
JSObject::MigrateSlowToFast(result, 0, "ApiNatives::InstantiateObject");
if (serial_number) {
CacheTemplateInstantiation(isolate, serial_number, result);
result = isolate->factory()->CopyJSObject(result);
}
- return scope.CloseAndEscape(result);
+ return result;
}
+MaybeHandle<JSFunction> InstantiateFunctionWithInvokeScope(
+ Isolate* isolate, Handle<FunctionTemplateInfo> info) {
+ InvokeScope invoke_scope(isolate);
+ return InstantiateFunction(isolate, info, CacheCheck::kSkip);
+}
MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
Handle<FunctionTemplateInfo> data,
+ CacheCheck cache_check,
Handle<Name> name) {
- uint32_t serial_number =
- static_cast<uint32_t>(Smi::cast(data->serial_number())->value());
- if (serial_number) {
- // Probe cache.
- auto cache = isolate->template_instantiations_cache();
- int entry = cache->FindEntry(serial_number);
- if (entry != UnseededNumberDictionary::kNotFound) {
- Object* element = cache->ValueAt(entry);
- return handle(JSFunction::cast(element), isolate);
+ int serial_number = Smi::cast(data->serial_number())->value();
+ if (serial_number && cache_check == CacheCheck::kCheck) {
+ Handle<JSObject> result;
+ if (ProbeInstantiationsCache(isolate, serial_number).ToHandle(&result)) {
+ return Handle<JSFunction>::cast(result);
}
}
- // Enter a new scope. Recursion could otherwise create a lot of handles.
- HandleScope scope(isolate);
Handle<JSObject> prototype;
if (!data->remove_prototype()) {
- auto prototype_templ = handle(data->prototype_template(), isolate);
- if (prototype_templ->IsUndefined()) {
+ Object* prototype_templ = data->prototype_template();
+ if (prototype_templ->IsUndefined(isolate)) {
prototype = isolate->factory()->NewJSObject(isolate->object_function());
} else {
ASSIGN_RETURN_ON_EXCEPTION(
isolate, prototype,
- InstantiateObject(isolate,
- Handle<ObjectTemplateInfo>::cast(prototype_templ),
- data->hidden_prototype()),
+ InstantiateObject(
+ isolate,
+ handle(ObjectTemplateInfo::cast(prototype_templ), isolate),
+ Handle<JSReceiver>(), CacheCheck::kCheck,
+ data->hidden_prototype()),
JSFunction);
}
- auto parent = handle(data->parent_template(), isolate);
- if (!parent->IsUndefined()) {
+ Object* parent = data->parent_template();
+ if (!parent->IsUndefined(isolate)) {
+ // Enter a new scope. Recursion could otherwise create a lot of handles.
+ HandleScope scope(isolate);
Handle<JSFunction> parent_instance;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, parent_instance,
- InstantiateFunction(isolate,
- Handle<FunctionTemplateInfo>::cast(parent)),
+ InstantiateFunction(
+ isolate, handle(FunctionTemplateInfo::cast(parent), isolate)),
JSFunction);
// TODO(dcarney): decide what to do here.
Handle<Object> parent_prototype;
@@ -379,7 +460,7 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
MaybeHandle<JSFunction>());
}
}
- auto function = ApiNatives::CreateApiFunction(
+ Handle<JSFunction> function = ApiNatives::CreateApiFunction(
isolate, data, prototype, ApiNatives::JavaScriptObjectType);
if (!name.is_null() && name->IsString()) {
function->shared()->set_name(*name);
@@ -388,7 +469,7 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
// Cache the function.
CacheTemplateInstantiation(isolate, serial_number, function);
}
- auto result =
+ MaybeHandle<JSObject> result =
ConfigureInstance(isolate, function, data, data->hidden_prototype());
if (result.is_null()) {
// Uncache on error.
@@ -397,65 +478,89 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
}
return MaybeHandle<JSFunction>();
}
- return scope.CloseAndEscape(function);
+ return function;
}
-class InvokeScope {
- public:
- explicit InvokeScope(Isolate* isolate)
- : isolate_(isolate), save_context_(isolate) {}
- ~InvokeScope() {
- bool has_exception = isolate_->has_pending_exception();
- if (has_exception) {
- isolate_->ReportPendingMessages();
- } else {
- isolate_->clear_pending_message();
- }
- }
-
- private:
- Isolate* isolate_;
- SaveContext save_context_;
-};
-
-
void AddPropertyToPropertyList(Isolate* isolate, Handle<TemplateInfo> templ,
int length, Handle<Object>* data) {
- auto list = handle(templ->property_list(), isolate);
- if (list->IsUndefined()) {
- list = NeanderArray(isolate).value();
- templ->set_property_list(*list);
+ Object* maybe_list = templ->property_list();
+ Handle<TemplateList> list;
+ if (maybe_list->IsUndefined(isolate)) {
+ list = TemplateList::New(isolate, length);
+ } else {
+ list = handle(TemplateList::cast(maybe_list), isolate);
}
templ->set_number_of_properties(templ->number_of_properties() + 1);
- NeanderArray array(list);
for (int i = 0; i < length; i++) {
Handle<Object> value =
data[i].is_null()
? Handle<Object>::cast(isolate->factory()->undefined_value())
: data[i];
- array.add(isolate, value);
+ list = TemplateList::Add(isolate, list, value);
}
+ templ->set_property_list(*list);
}
} // namespace
-
MaybeHandle<JSFunction> ApiNatives::InstantiateFunction(
- Handle<FunctionTemplateInfo> data) {
- Isolate* isolate = data->GetIsolate();
- InvokeScope invoke_scope(isolate);
- return ::v8::internal::InstantiateFunction(isolate, data);
+ Handle<FunctionTemplateInfo> info) {
+ Isolate* isolate = info->GetIsolate();
+ int serial_number = Smi::cast(info->serial_number())->value();
+ if (serial_number) {
+ Handle<JSObject> result;
+ if (ProbeInstantiationsCache(isolate, serial_number).ToHandle(&result)) {
+ return Handle<JSFunction>::cast(result);
+ }
+ }
+ return InstantiateFunctionWithInvokeScope(isolate, info);
}
-
MaybeHandle<JSObject> ApiNatives::InstantiateObject(
+ Handle<ObjectTemplateInfo> info, Handle<JSReceiver> new_target) {
+ Isolate* isolate = info->GetIsolate();
+ int serial_number = Smi::cast(info->serial_number())->value();
+ if (serial_number && !new_target.is_null() &&
+ IsSimpleInstantiation(isolate, *info, *new_target)) {
+ // Fast path.
+ Handle<JSObject> result;
+ if (ProbeInstantiationsCache(isolate, serial_number).ToHandle(&result)) {
+ return isolate->factory()->CopyJSObject(result);
+ }
+ }
+ return InstantiateObjectWithInvokeScope(isolate, info, new_target);
+}
+
+MaybeHandle<JSObject> ApiNatives::InstantiateRemoteObject(
Handle<ObjectTemplateInfo> data) {
Isolate* isolate = data->GetIsolate();
InvokeScope invoke_scope(isolate);
- return ::v8::internal::InstantiateObject(isolate, data, false);
-}
+ Handle<FunctionTemplateInfo> constructor(
+ FunctionTemplateInfo::cast(data->constructor()));
+ Handle<SharedFunctionInfo> shared =
+ FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(isolate, constructor);
+ Handle<Map> initial_map = isolate->factory()->CreateSloppyFunctionMap(
+ FUNCTION_WITH_WRITEABLE_PROTOTYPE);
+ Handle<JSFunction> object_function =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ initial_map, shared, isolate->factory()->undefined_value());
+ Handle<Map> object_map = isolate->factory()->NewMap(
+ JS_SPECIAL_API_OBJECT_TYPE,
+ JSObject::kHeaderSize + data->internal_field_count() * kPointerSize,
+ FAST_HOLEY_SMI_ELEMENTS);
+ JSFunction::SetInitialMap(object_function, object_map,
+ isolate->factory()->null_value());
+ object_map->set_is_access_check_needed(true);
+ object_map->set_is_callable();
+ object_map->set_is_constructor(true);
+
+ Handle<JSObject> object = isolate->factory()->NewJSObject(object_function);
+ JSObject::ForceSetPrototype(object, isolate->factory()->null_value());
+
+ return object;
+}
void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
Handle<Name> name, Handle<Object> value,
@@ -498,108 +603,91 @@ void ApiNatives::AddAccessorProperty(Isolate* isolate,
void ApiNatives::AddNativeDataProperty(Isolate* isolate,
Handle<TemplateInfo> info,
Handle<AccessorInfo> property) {
- auto list = handle(info->property_accessors(), isolate);
- if (list->IsUndefined()) {
- list = NeanderArray(isolate).value();
- info->set_property_accessors(*list);
+ Object* maybe_list = info->property_accessors();
+ Handle<TemplateList> list;
+ if (maybe_list->IsUndefined(isolate)) {
+ list = TemplateList::New(isolate, 1);
+ } else {
+ list = handle(TemplateList::cast(maybe_list), isolate);
}
- NeanderArray array(list);
- array.add(isolate, property);
+ list = TemplateList::Add(isolate, list, property);
+ info->set_property_accessors(*list);
}
Handle<JSFunction> ApiNatives::CreateApiFunction(
Isolate* isolate, Handle<FunctionTemplateInfo> obj,
Handle<Object> prototype, ApiInstanceType instance_type) {
- Handle<Code> code;
- if (obj->call_code()->IsCallHandlerInfo() &&
- CallHandlerInfo::cast(obj->call_code())->fast_handler()->IsCode()) {
- code = isolate->builtins()->HandleFastApiCall();
- } else {
- code = isolate->builtins()->HandleApiCall();
- }
- Handle<Code> construct_stub =
- prototype.is_null() ? isolate->builtins()->ConstructedNonConstructable()
- : isolate->builtins()->JSConstructStubApi();
-
- obj->set_instantiated(true);
- Handle<JSFunction> result;
- if (obj->remove_prototype()) {
- result = isolate->factory()->NewFunctionWithoutPrototype(
- isolate->factory()->empty_string(), code);
- } else {
- int internal_field_count = 0;
- if (!obj->instance_template()->IsUndefined()) {
- Handle<ObjectTemplateInfo> instance_template = Handle<ObjectTemplateInfo>(
- ObjectTemplateInfo::cast(obj->instance_template()));
- internal_field_count =
- Smi::cast(instance_template->internal_field_count())->value();
- }
-
- // TODO(svenpanne) Kill ApiInstanceType and refactor things by generalizing
- // JSObject::GetHeaderSize.
- int instance_size = kPointerSize * internal_field_count;
- InstanceType type;
- switch (instance_type) {
- case JavaScriptObjectType:
- if (!obj->needs_access_check() &&
- obj->named_property_handler()->IsUndefined() &&
- obj->indexed_property_handler()->IsUndefined()) {
- type = JS_OBJECT_TYPE;
- } else {
- type = JS_SPECIAL_API_OBJECT_TYPE;
- }
- instance_size += JSObject::kHeaderSize;
- break;
- case GlobalObjectType:
- type = JS_GLOBAL_OBJECT_TYPE;
- instance_size += JSGlobalObject::kSize;
- break;
- case GlobalProxyType:
- type = JS_GLOBAL_PROXY_TYPE;
- instance_size += JSGlobalProxy::kSize;
- break;
- default:
- UNREACHABLE();
- type = JS_OBJECT_TYPE; // Keep the compiler happy.
- break;
- }
-
- result = isolate->factory()->NewFunction(
- isolate->factory()->empty_string(), code, prototype, type,
- instance_size, obj->read_only_prototype(), true);
- }
-
- result->shared()->set_length(obj->length());
- Handle<Object> class_name(obj->class_name(), isolate);
- if (class_name->IsString()) {
- result->shared()->set_instance_class_name(*class_name);
- result->shared()->set_name(*class_name);
- }
- result->shared()->set_api_func_data(*obj);
- result->shared()->set_construct_stub(*construct_stub);
- result->shared()->DontAdaptArguments();
+ Handle<SharedFunctionInfo> shared =
+ FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(isolate, obj);
+ Handle<JSFunction> result =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ shared, isolate->native_context());
if (obj->remove_prototype()) {
+ result->set_map(*isolate->sloppy_function_without_prototype_map());
+ DCHECK(prototype.is_null());
DCHECK(result->shared()->IsApiFunction());
DCHECK(!result->has_initial_map());
DCHECK(!result->has_prototype());
+ DCHECK(!result->IsConstructor());
return result;
}
-#ifdef DEBUG
- LookupIterator it(handle(JSObject::cast(result->prototype())),
- isolate->factory()->constructor_string(),
- LookupIterator::OWN_SKIP_INTERCEPTOR);
- MaybeHandle<Object> maybe_prop = Object::GetProperty(&it);
- DCHECK(it.IsFound());
- DCHECK(maybe_prop.ToHandleChecked().is_identical_to(result));
-#endif
-
// Down from here is only valid for API functions that can be used as a
// constructor (don't set the "remove prototype" flag).
- Handle<Map> map(result->initial_map());
+ if (obj->read_only_prototype()) {
+ result->set_map(*isolate->sloppy_function_with_readonly_prototype_map());
+ }
+
+ if (prototype->IsTheHole(isolate)) {
+ prototype = isolate->factory()->NewFunctionPrototype(result);
+ } else {
+ JSObject::AddProperty(Handle<JSObject>::cast(prototype),
+ isolate->factory()->constructor_string(), result,
+ DONT_ENUM);
+ }
+
+ int internal_field_count = 0;
+ if (!obj->instance_template()->IsUndefined(isolate)) {
+ Handle<ObjectTemplateInfo> instance_template = Handle<ObjectTemplateInfo>(
+ ObjectTemplateInfo::cast(obj->instance_template()));
+ internal_field_count = instance_template->internal_field_count();
+ }
+
+ // TODO(svenpanne) Kill ApiInstanceType and refactor things by generalizing
+ // JSObject::GetHeaderSize.
+ int instance_size = kPointerSize * internal_field_count;
+ InstanceType type;
+ switch (instance_type) {
+ case JavaScriptObjectType:
+ if (!obj->needs_access_check() &&
+ obj->named_property_handler()->IsUndefined(isolate) &&
+ obj->indexed_property_handler()->IsUndefined(isolate)) {
+ type = JS_API_OBJECT_TYPE;
+ } else {
+ type = JS_SPECIAL_API_OBJECT_TYPE;
+ }
+ instance_size += JSObject::kHeaderSize;
+ break;
+ case GlobalObjectType:
+ type = JS_GLOBAL_OBJECT_TYPE;
+ instance_size += JSGlobalObject::kSize;
+ break;
+ case GlobalProxyType:
+ type = JS_GLOBAL_PROXY_TYPE;
+ instance_size += JSGlobalProxy::kSize;
+ break;
+ default:
+ UNREACHABLE();
+ type = JS_OBJECT_TYPE; // Keep the compiler happy.
+ break;
+ }
+
+ Handle<Map> map =
+ isolate->factory()->NewMap(type, instance_size, FAST_HOLEY_SMI_ELEMENTS);
+ JSFunction::SetInitialMap(result, map, Handle<JSObject>::cast(prototype));
// Mark as undetectable if needed.
if (obj->undetectable()) {
@@ -612,20 +700,19 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
}
// Set interceptor information in the map.
- if (!obj->named_property_handler()->IsUndefined()) {
+ if (!obj->named_property_handler()->IsUndefined(isolate)) {
map->set_has_named_interceptor();
}
- if (!obj->indexed_property_handler()->IsUndefined()) {
+ if (!obj->indexed_property_handler()->IsUndefined(isolate)) {
map->set_has_indexed_interceptor();
}
// Mark instance as callable in the map.
- if (!obj->instance_call_handler()->IsUndefined()) {
+ if (!obj->instance_call_handler()->IsUndefined(isolate)) {
map->set_is_callable();
map->set_is_constructor(true);
}
- DCHECK(result->shared()->IsApiFunction());
return result;
}
diff --git a/deps/v8/src/api-natives.h b/deps/v8/src/api-natives.h
index 91f0b168d9..74d3788fd1 100644
--- a/deps/v8/src/api-natives.h
+++ b/deps/v8/src/api-natives.h
@@ -23,6 +23,10 @@ class ApiNatives {
Handle<FunctionTemplateInfo> data);
MUST_USE_RESULT static MaybeHandle<JSObject> InstantiateObject(
+ Handle<ObjectTemplateInfo> data,
+ Handle<JSReceiver> new_target = Handle<JSReceiver>());
+
+ MUST_USE_RESULT static MaybeHandle<JSObject> InstantiateRemoteObject(
Handle<ObjectTemplateInfo> data);
enum ApiInstanceType {
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index bf35154843..6858a325c4 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -15,6 +15,7 @@
#include "include/v8-experimental.h"
#include "include/v8-profiler.h"
#include "include/v8-testing.h"
+#include "include/v8-util.h"
#include "src/accessors.h"
#include "src/api-experimental.h"
#include "src/api-natives.h"
@@ -35,11 +36,14 @@
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
+#include "src/frames-inl.h"
#include "src/gdb-jit.h"
#include "src/global-handles.h"
+#include "src/globals.h"
#include "src/icu_util.h"
#include "src/isolate-inl.h"
#include "src/json-parser.h"
+#include "src/json-stringifier.h"
#include "src/messages.h"
#include "src/parsing/parser.h"
#include "src/parsing/scanner-character-streams.h"
@@ -48,14 +52,15 @@
#include "src/profiler/heap-profiler.h"
#include "src/profiler/heap-snapshot-generator-inl.h"
#include "src/profiler/profile-generator-inl.h"
-#include "src/profiler/sampler.h"
-#include "src/property.h"
+#include "src/profiler/tick-sample.h"
#include "src/property-descriptor.h"
#include "src/property-details.h"
+#include "src/property.h"
#include "src/prototype.h"
-#include "src/runtime/runtime.h"
#include "src/runtime-profiler.h"
+#include "src/runtime/runtime.h"
#include "src/simulator.h"
+#include "src/snapshot/code-serializer.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
#include "src/startup-data-util.h"
@@ -65,58 +70,61 @@
#include "src/v8threads.h"
#include "src/version.h"
#include "src/vm-state-inl.h"
-
+#include "src/wasm/wasm-module.h"
namespace v8 {
-#define LOG_API(isolate, expr) LOG(isolate, ApiEntryCall(expr))
-
+#define LOG_API(isolate, class_name, function_name) \
+ i::RuntimeCallTimerScope _runtime_timer( \
+ isolate, &i::RuntimeCallStats::API_##class_name##_##function_name); \
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED( \
+ isolate, &internal::tracing::TraceEventStatsTable:: \
+ API_##class_name##_##function_name); \
+ LOG(isolate, ApiEntryCall("v8::" #class_name "::" #function_name))
#define ENTER_V8(isolate) i::VMState<v8::OTHER> __state__((isolate))
-
-#define PREPARE_FOR_EXECUTION_GENERIC(isolate, context, function_name, \
- bailout_value, HandleScopeClass, \
- do_callback) \
- if (IsExecutionTerminatingCheck(isolate)) { \
- return bailout_value; \
- } \
- HandleScopeClass handle_scope(isolate); \
- CallDepthScope call_depth_scope(isolate, context, do_callback); \
- LOG_API(isolate, function_name); \
- ENTER_V8(isolate); \
+#define PREPARE_FOR_EXECUTION_GENERIC(isolate, context, class_name, \
+ function_name, bailout_value, \
+ HandleScopeClass, do_callback) \
+ if (IsExecutionTerminatingCheck(isolate)) { \
+ return bailout_value; \
+ } \
+ HandleScopeClass handle_scope(isolate); \
+ CallDepthScope<do_callback> call_depth_scope(isolate, context); \
+ LOG_API(isolate, class_name, function_name); \
+ ENTER_V8(isolate); \
bool has_pending_exception = false
-
-#define PREPARE_FOR_EXECUTION_WITH_CONTEXT( \
- context, function_name, bailout_value, HandleScopeClass, do_callback) \
- auto isolate = context.IsEmpty() \
- ? i::Isolate::Current() \
- : reinterpret_cast<i::Isolate*>(context->GetIsolate()); \
- PREPARE_FOR_EXECUTION_GENERIC(isolate, context, function_name, \
+#define PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name, \
+ bailout_value, HandleScopeClass, \
+ do_callback) \
+ auto isolate = context.IsEmpty() \
+ ? i::Isolate::Current() \
+ : reinterpret_cast<i::Isolate*>(context->GetIsolate()); \
+ PREPARE_FOR_EXECUTION_GENERIC(isolate, context, class_name, function_name, \
bailout_value, HandleScopeClass, do_callback);
+#define PREPARE_FOR_EXECUTION_WITH_ISOLATE(isolate, class_name, function_name, \
+ T) \
+ PREPARE_FOR_EXECUTION_GENERIC(isolate, Local<Context>(), class_name, \
+ function_name, MaybeLocal<T>(), \
+ InternalEscapableScope, false);
-#define PREPARE_FOR_EXECUTION_WITH_ISOLATE(isolate, function_name, T) \
- PREPARE_FOR_EXECUTION_GENERIC(isolate, Local<Context>(), function_name, \
- MaybeLocal<T>(), InternalEscapableScope, \
- false);
+#define PREPARE_FOR_EXECUTION(context, class_name, function_name, T) \
+ PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name, \
+ MaybeLocal<T>(), InternalEscapableScope, \
+ false)
+#define PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, class_name, \
+ function_name, T) \
+ PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name, \
+ MaybeLocal<T>(), InternalEscapableScope, \
+ true)
-#define PREPARE_FOR_EXECUTION(context, function_name, T) \
- PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, function_name, MaybeLocal<T>(), \
- InternalEscapableScope, false)
-
-
-#define PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, function_name, T) \
- PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, function_name, MaybeLocal<T>(), \
- InternalEscapableScope, true)
-
-
-#define PREPARE_FOR_EXECUTION_PRIMITIVE(context, function_name, T) \
- PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, function_name, Nothing<T>(), \
- i::HandleScope, false)
-
+#define PREPARE_FOR_EXECUTION_PRIMITIVE(context, class_name, function_name, T) \
+ PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name, \
+ Nothing<T>(), i::HandleScope, false)
#define EXCEPTION_BAILOUT_CHECK_SCOPED(isolate, value) \
do { \
@@ -167,28 +175,34 @@ void CheckMicrotasksScopesConsistency(i::Isolate* isolate) {
}
#endif
-
+template <bool do_callback>
class CallDepthScope {
public:
- explicit CallDepthScope(i::Isolate* isolate, Local<Context> context,
- bool do_callback)
- : isolate_(isolate),
- context_(context),
- escaped_(false),
- do_callback_(do_callback) {
+ explicit CallDepthScope(i::Isolate* isolate, Local<Context> context)
+ : isolate_(isolate), context_(context), escaped_(false) {
// TODO(dcarney): remove this when blink stops crashing.
DCHECK(!isolate_->external_caught_exception());
isolate_->IncrementJsCallsFromApiCounter();
isolate_->handle_scope_implementer()->IncrementCallDepth();
- if (!context_.IsEmpty()) context_->Enter();
- if (do_callback_) isolate_->FireBeforeCallEnteredCallback();
+ if (!context.IsEmpty()) {
+ i::Handle<i::Context> env = Utils::OpenHandle(*context);
+ i::HandleScopeImplementer* impl = isolate->handle_scope_implementer();
+ if (isolate->context() != nullptr &&
+ isolate->context()->native_context() == env->native_context() &&
+ impl->LastEnteredContextWas(env)) {
+ context_ = Local<Context>();
+ } else {
+ context_->Enter();
+ }
+ }
+ if (do_callback) isolate_->FireBeforeCallEnteredCallback();
}
~CallDepthScope() {
if (!context_.IsEmpty()) context_->Exit();
if (!escaped_) isolate_->handle_scope_implementer()->DecrementCallDepth();
- if (do_callback_) isolate_->FireCallCompletedCallback();
+ if (do_callback) isolate_->FireCallCompletedCallback();
#ifdef DEBUG
- if (do_callback_) CheckMicrotasksScopesConsistency(isolate_);
+ if (do_callback) CheckMicrotasksScopesConsistency(isolate_);
#endif
}
@@ -238,9 +252,8 @@ void i::FatalProcessOutOfMemory(const char* location) {
i::V8::FatalProcessOutOfMemory(location, false);
}
-
-// When V8 cannot allocated memory FatalProcessOutOfMemory is called.
-// The default fatal error handler is called and execution is stopped.
+// When V8 cannot allocate memory FatalProcessOutOfMemory is called. The default
+// OOM error handler is called and execution is stopped.
void i::V8::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
i::Isolate* isolate = i::Isolate::Current();
char last_few_messages[Heap::kTraceRingBufferSize + 1];
@@ -249,49 +262,53 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
memset(js_stacktrace, 0, Heap::kStacktraceBufferSize + 1);
i::HeapStats heap_stats;
- int start_marker;
+ intptr_t start_marker;
heap_stats.start_marker = &start_marker;
- int new_space_size;
+ size_t new_space_size;
heap_stats.new_space_size = &new_space_size;
- int new_space_capacity;
+ size_t new_space_capacity;
heap_stats.new_space_capacity = &new_space_capacity;
- intptr_t old_space_size;
+ size_t old_space_size;
heap_stats.old_space_size = &old_space_size;
- intptr_t old_space_capacity;
+ size_t old_space_capacity;
heap_stats.old_space_capacity = &old_space_capacity;
- intptr_t code_space_size;
+ size_t code_space_size;
heap_stats.code_space_size = &code_space_size;
- intptr_t code_space_capacity;
+ size_t code_space_capacity;
heap_stats.code_space_capacity = &code_space_capacity;
- intptr_t map_space_size;
+ size_t map_space_size;
heap_stats.map_space_size = &map_space_size;
- intptr_t map_space_capacity;
+ size_t map_space_capacity;
heap_stats.map_space_capacity = &map_space_capacity;
- intptr_t lo_space_size;
+ size_t lo_space_size;
heap_stats.lo_space_size = &lo_space_size;
- int global_handle_count;
+ size_t global_handle_count;
heap_stats.global_handle_count = &global_handle_count;
- int weak_global_handle_count;
+ size_t weak_global_handle_count;
heap_stats.weak_global_handle_count = &weak_global_handle_count;
- int pending_global_handle_count;
+ size_t pending_global_handle_count;
heap_stats.pending_global_handle_count = &pending_global_handle_count;
- int near_death_global_handle_count;
+ size_t near_death_global_handle_count;
heap_stats.near_death_global_handle_count = &near_death_global_handle_count;
- int free_global_handle_count;
+ size_t free_global_handle_count;
heap_stats.free_global_handle_count = &free_global_handle_count;
- intptr_t memory_allocator_size;
+ size_t memory_allocator_size;
heap_stats.memory_allocator_size = &memory_allocator_size;
- intptr_t memory_allocator_capacity;
+ size_t memory_allocator_capacity;
heap_stats.memory_allocator_capacity = &memory_allocator_capacity;
- int objects_per_type[LAST_TYPE + 1] = {0};
+ size_t malloced_memory;
+ heap_stats.malloced_memory = &malloced_memory;
+ size_t malloced_peak_memory;
+ heap_stats.malloced_peak_memory = &malloced_peak_memory;
+ size_t objects_per_type[LAST_TYPE + 1] = {0};
heap_stats.objects_per_type = objects_per_type;
- int size_per_type[LAST_TYPE + 1] = {0};
+ size_t size_per_type[LAST_TYPE + 1] = {0};
heap_stats.size_per_type = size_per_type;
int os_error;
heap_stats.os_error = &os_error;
heap_stats.last_few_messages = last_few_messages;
heap_stats.js_stacktrace = js_stacktrace;
- int end_marker;
+ intptr_t end_marker;
heap_stats.end_marker = &end_marker;
if (isolate->heap()->HasBeenSetUp()) {
// BUG(1718): Don't use the take_snapshot since we don't support
@@ -303,9 +320,7 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
PrintF("\n<--- Last few GCs --->\n%s\n", first_newline);
PrintF("\n<--- JS stacktrace --->\n%s\n", js_stacktrace);
}
- Utils::ApiCheck(false, location, is_heap_oom
- ? "Allocation failed - JavaScript heap out of memory"
- : "Allocation failed - process out of memory");
+ Utils::ReportOOMFailure(location, is_heap_oom);
// If the fatal error handler returns, we stop execution.
FATAL("API fatal error handler returned after process out of memory");
}
@@ -314,7 +329,7 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
void Utils::ReportApiFailure(const char* location, const char* message) {
i::Isolate* isolate = i::Isolate::Current();
FatalErrorCallback callback = isolate->exception_behavior();
- if (callback == NULL) {
+ if (callback == nullptr) {
base::OS::PrintError("\n#\n# Fatal error in %s\n# %s\n#\n\n", location,
message);
base::OS::Abort();
@@ -324,6 +339,28 @@ void Utils::ReportApiFailure(const char* location, const char* message) {
isolate->SignalFatalError();
}
+void Utils::ReportOOMFailure(const char* location, bool is_heap_oom) {
+ i::Isolate* isolate = i::Isolate::Current();
+ OOMErrorCallback oom_callback = isolate->oom_behavior();
+ if (oom_callback == nullptr) {
+ // TODO(wfh): Remove this fallback once Blink is setting OOM handler. See
+ // crbug.com/614440.
+ FatalErrorCallback fatal_callback = isolate->exception_behavior();
+ if (fatal_callback == nullptr) {
+ base::OS::PrintError("\n#\n# Fatal %s OOM in %s\n#\n\n",
+ is_heap_oom ? "javascript" : "process", location);
+ base::OS::Abort();
+ } else {
+ fatal_callback(location,
+ is_heap_oom
+ ? "Allocation failed - JavaScript heap out of memory"
+ : "Allocation failed - process out of memory");
+ }
+ } else {
+ oom_callback(location, is_heap_oom);
+ }
+ isolate->SignalFatalError();
+}
static inline bool IsExecutionTerminatingCheck(i::Isolate* isolate) {
if (isolate->has_scheduled_exception()) {
@@ -383,91 +420,159 @@ bool RunExtraCode(Isolate* isolate, Local<Context> context,
return true;
}
-StartupData SerializeIsolateAndContext(
- Isolate* isolate, Persistent<Context>* context,
- i::Snapshot::Metadata metadata,
- i::StartupSerializer::FunctionCodeHandling function_code_handling) {
- if (context->IsEmpty()) return {NULL, 0};
+struct SnapshotCreatorData {
+ explicit SnapshotCreatorData(Isolate* isolate)
+ : isolate_(isolate),
+ contexts_(isolate),
+ templates_(isolate),
+ created_(false) {}
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ static SnapshotCreatorData* cast(void* data) {
+ return reinterpret_cast<SnapshotCreatorData*>(data);
+ }
+
+ ArrayBufferAllocator allocator_;
+ Isolate* isolate_;
+ PersistentValueVector<Context> contexts_;
+ PersistentValueVector<Template> templates_;
+ bool created_;
+};
+
+} // namespace
+
+SnapshotCreator::SnapshotCreator(intptr_t* external_references,
+ StartupData* existing_snapshot) {
+ i::Isolate* internal_isolate = new i::Isolate(true);
+ Isolate* isolate = reinterpret_cast<Isolate*>(internal_isolate);
+ SnapshotCreatorData* data = new SnapshotCreatorData(isolate);
+ data->isolate_ = isolate;
+ internal_isolate->set_array_buffer_allocator(&data->allocator_);
+ internal_isolate->set_api_external_references(external_references);
+ isolate->Enter();
+ if (existing_snapshot) {
+ internal_isolate->set_snapshot_blob(existing_snapshot);
+ i::Snapshot::Initialize(internal_isolate);
+ } else {
+ internal_isolate->Init(nullptr);
+ }
+ data_ = data;
+}
+
+SnapshotCreator::~SnapshotCreator() {
+ SnapshotCreatorData* data = SnapshotCreatorData::cast(data_);
+ DCHECK(data->created_);
+ Isolate* isolate = data->isolate_;
+ isolate->Exit();
+ isolate->Dispose();
+ delete data;
+}
+
+Isolate* SnapshotCreator::GetIsolate() {
+ return SnapshotCreatorData::cast(data_)->isolate_;
+}
+
+size_t SnapshotCreator::AddContext(Local<Context> context) {
+ DCHECK(!context.IsEmpty());
+ SnapshotCreatorData* data = SnapshotCreatorData::cast(data_);
+ DCHECK(!data->created_);
+ Isolate* isolate = data->isolate_;
+ CHECK_EQ(isolate, context->GetIsolate());
+ size_t index = static_cast<int>(data->contexts_.Size());
+ data->contexts_.Append(context);
+ return index;
+}
+
+size_t SnapshotCreator::AddTemplate(Local<Template> template_obj) {
+ DCHECK(!template_obj.IsEmpty());
+ SnapshotCreatorData* data = SnapshotCreatorData::cast(data_);
+ DCHECK(!data->created_);
+ DCHECK_EQ(reinterpret_cast<i::Isolate*>(data->isolate_),
+ Utils::OpenHandle(*template_obj)->GetIsolate());
+ size_t index = static_cast<int>(data->templates_.Size());
+ data->templates_.Append(template_obj);
+ return index;
+}
+
+StartupData SnapshotCreator::CreateBlob(
+ SnapshotCreator::FunctionCodeHandling function_code_handling) {
+ SnapshotCreatorData* data = SnapshotCreatorData::cast(data_);
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(data->isolate_);
+ DCHECK(!data->created_);
+
+ {
+ int num_templates = static_cast<int>(data->templates_.Size());
+ i::HandleScope scope(isolate);
+ i::Handle<i::FixedArray> templates =
+ isolate->factory()->NewFixedArray(num_templates, i::TENURED);
+ for (int i = 0; i < num_templates; i++) {
+ templates->set(i, *v8::Utils::OpenHandle(*data->templates_.Get(i)));
+ }
+ isolate->heap()->SetSerializedTemplates(*templates);
+ data->templates_.Clear();
+ }
// If we don't do this then we end up with a stray root pointing at the
// context even after we have disposed of the context.
- internal_isolate->heap()->CollectAllAvailableGarbage("mksnapshot");
-
- // GC may have cleared weak cells, so compact any WeakFixedArrays
- // found on the heap.
- i::HeapIterator iterator(internal_isolate->heap(),
- i::HeapIterator::kFilterUnreachable);
- for (i::HeapObject* o = iterator.next(); o != NULL; o = iterator.next()) {
- if (o->IsPrototypeInfo()) {
- i::Object* prototype_users = i::PrototypeInfo::cast(o)->prototype_users();
- if (prototype_users->IsWeakFixedArray()) {
- i::WeakFixedArray* array = i::WeakFixedArray::cast(prototype_users);
- array->Compact<i::JSObject::PrototypeRegistryCompactionCallback>();
- }
- } else if (o->IsScript()) {
- i::Object* shared_list = i::Script::cast(o)->shared_function_infos();
- if (shared_list->IsWeakFixedArray()) {
- i::WeakFixedArray* array = i::WeakFixedArray::cast(shared_list);
- array->Compact<i::WeakFixedArray::NullCallback>();
- }
- }
+ isolate->heap()->CollectAllAvailableGarbage("mksnapshot");
+ isolate->heap()->CompactWeakFixedArrays();
+
+ i::DisallowHeapAllocation no_gc_from_here_on;
+
+ int num_contexts = static_cast<int>(data->contexts_.Size());
+ i::List<i::Object*> contexts(num_contexts);
+ for (int i = 0; i < num_contexts; i++) {
+ i::HandleScope scope(isolate);
+ i::Handle<i::Context> context =
+ v8::Utils::OpenHandle(*data->contexts_.Get(i));
+ contexts.Add(*context);
}
+ data->contexts_.Clear();
- i::Object* raw_context = *v8::Utils::OpenPersistent(*context);
- context->Reset();
+ i::StartupSerializer startup_serializer(isolate, function_code_handling);
+ startup_serializer.SerializeStrongReferences();
- i::SnapshotByteSink snapshot_sink;
- i::StartupSerializer ser(internal_isolate, &snapshot_sink,
- function_code_handling);
- ser.SerializeStrongReferences();
+ // Serialize each context with a new partial serializer.
+ i::List<i::SnapshotData*> context_snapshots(num_contexts);
+ for (int i = 0; i < num_contexts; i++) {
+ i::PartialSerializer partial_serializer(isolate, &startup_serializer);
+ partial_serializer.Serialize(&contexts[i]);
+ context_snapshots.Add(new i::SnapshotData(&partial_serializer));
+ }
- i::SnapshotByteSink context_sink;
- i::PartialSerializer context_ser(internal_isolate, &ser, &context_sink);
- context_ser.Serialize(&raw_context);
- ser.SerializeWeakReferencesAndDeferred();
+ startup_serializer.SerializeWeakReferencesAndDeferred();
+ i::SnapshotData startup_snapshot(&startup_serializer);
+ StartupData result =
+ i::Snapshot::CreateSnapshotBlob(&startup_snapshot, &context_snapshots);
- return i::Snapshot::CreateSnapshotBlob(ser, context_ser, metadata);
+ // Delete heap-allocated context snapshot instances.
+ for (const auto& context_snapshot : context_snapshots) {
+ delete context_snapshot;
+ }
+ data->created_ = true;
+ return result;
}
-} // namespace
-
StartupData V8::CreateSnapshotDataBlob(const char* embedded_source) {
// Create a new isolate and a new context from scratch, optionally run
// a script to embed, and serialize to create a snapshot blob.
- StartupData result = {NULL, 0};
-
+ StartupData result = {nullptr, 0};
base::ElapsedTimer timer;
timer.Start();
-
- ArrayBufferAllocator allocator;
- i::Isolate* internal_isolate = new i::Isolate(true);
- internal_isolate->set_array_buffer_allocator(&allocator);
- Isolate* isolate = reinterpret_cast<Isolate*>(internal_isolate);
-
{
- Isolate::Scope isolate_scope(isolate);
- internal_isolate->Init(NULL);
- Persistent<Context> context;
+ SnapshotCreator snapshot_creator;
+ Isolate* isolate = snapshot_creator.GetIsolate();
{
- HandleScope handle_scope(isolate);
- Local<Context> new_context = Context::New(isolate);
- context.Reset(isolate, new_context);
+ HandleScope scope(isolate);
+ Local<Context> context = Context::New(isolate);
if (embedded_source != NULL &&
- !RunExtraCode(isolate, new_context, embedded_source, "<embedded>")) {
- context.Reset();
+ !RunExtraCode(isolate, context, embedded_source, "<embedded>")) {
+ return result;
}
+ snapshot_creator.AddContext(context);
}
-
- i::Snapshot::Metadata metadata;
- metadata.set_embeds_script(embedded_source != NULL);
-
- result = SerializeIsolateAndContext(
- isolate, &context, metadata, i::StartupSerializer::CLEAR_FUNCTION_CODE);
- DCHECK(context.IsEmpty());
+ result = snapshot_creator.CreateBlob(
+ SnapshotCreator::FunctionCodeHandling::kClear);
}
- isolate->Dispose();
if (i::FLAG_profile_deserialization) {
i::PrintF("Creating snapshot took %0.3f ms\n",
@@ -487,42 +592,28 @@ StartupData V8::WarmUpSnapshotDataBlob(StartupData cold_snapshot_blob,
// compilation of executed functions.
// - Create a new context. This context will be unpolluted.
// - Serialize the isolate and the second context into a new snapshot blob.
- StartupData result = {NULL, 0};
-
+ StartupData result = {nullptr, 0};
base::ElapsedTimer timer;
timer.Start();
-
- ArrayBufferAllocator allocator;
- i::Isolate* internal_isolate = new i::Isolate(true);
- internal_isolate->set_array_buffer_allocator(&allocator);
- internal_isolate->set_snapshot_blob(&cold_snapshot_blob);
- Isolate* isolate = reinterpret_cast<Isolate*>(internal_isolate);
-
{
- Isolate::Scope isolate_scope(isolate);
- i::Snapshot::Initialize(internal_isolate);
- Persistent<Context> context;
- bool success;
+ SnapshotCreator snapshot_creator(nullptr, &cold_snapshot_blob);
+ Isolate* isolate = snapshot_creator.GetIsolate();
{
- HandleScope handle_scope(isolate);
- Local<Context> new_context = Context::New(isolate);
- success = RunExtraCode(isolate, new_context, warmup_source, "<warm-up>");
+ HandleScope scope(isolate);
+ Local<Context> context = Context::New(isolate);
+ if (!RunExtraCode(isolate, context, warmup_source, "<warm-up>")) {
+ return result;
+ }
}
- if (success) {
+ {
HandleScope handle_scope(isolate);
isolate->ContextDisposedNotification(false);
- Local<Context> new_context = Context::New(isolate);
- context.Reset(isolate, new_context);
+ Local<Context> context = Context::New(isolate);
+ snapshot_creator.AddContext(context);
}
-
- i::Snapshot::Metadata metadata;
- metadata.set_embeds_script(i::Snapshot::EmbedsScript(internal_isolate));
-
- result = SerializeIsolateAndContext(
- isolate, &context, metadata, i::StartupSerializer::KEEP_FUNCTION_CODE);
- DCHECK(context.IsEmpty());
+ result = snapshot_creator.CreateBlob(
+ SnapshotCreator::FunctionCodeHandling::kKeep);
}
- isolate->Dispose();
if (i::FLAG_profile_deserialization) {
i::PrintF("Warming up snapshot took %0.3f ms\n",
@@ -658,7 +749,7 @@ void SetResourceConstraints(i::Isolate* isolate,
i::Object** V8::GlobalizeReference(i::Isolate* isolate, i::Object** obj) {
- LOG_API(isolate, "Persistent::New");
+ LOG_API(isolate, Persistent, New);
i::Handle<i::Object> result = isolate->global_handles()->Create(*obj);
#ifdef VERIFY_HEAP
if (i::FLAG_verify_heap) {
@@ -684,13 +775,7 @@ void V8::RegisterExternallyReferencedObject(i::Object** object,
isolate->heap()->RegisterExternallyReferencedObject(object);
}
-void V8::MakeWeak(i::Object** object, void* parameter,
- WeakCallback weak_callback) {
- i::GlobalHandles::MakeWeak(object, parameter, weak_callback);
-}
-
-
-void V8::MakeWeak(i::Object** object, void* parameter,
+void V8::MakeWeak(i::Object** location, void* parameter,
int internal_field_index1, int internal_field_index2,
WeakCallbackInfo<void>::Callback weak_callback) {
WeakCallbackType type = WeakCallbackType::kParameter;
@@ -705,24 +790,25 @@ void V8::MakeWeak(i::Object** object, void* parameter,
DCHECK_EQ(internal_field_index1, -1);
DCHECK_EQ(internal_field_index2, -1);
}
- i::GlobalHandles::MakeWeak(object, parameter, weak_callback, type);
+ i::GlobalHandles::MakeWeak(location, parameter, weak_callback, type);
}
-
-void V8::MakeWeak(i::Object** object, void* parameter,
+void V8::MakeWeak(i::Object** location, void* parameter,
WeakCallbackInfo<void>::Callback weak_callback,
WeakCallbackType type) {
- i::GlobalHandles::MakeWeak(object, parameter, weak_callback, type);
+ i::GlobalHandles::MakeWeak(location, parameter, weak_callback, type);
}
-
-void* V8::ClearWeak(i::Object** obj) {
- return i::GlobalHandles::ClearWeakness(obj);
+void V8::MakeWeak(i::Object*** location_addr) {
+ i::GlobalHandles::MakeWeak(location_addr);
}
+void* V8::ClearWeak(i::Object** location) {
+ return i::GlobalHandles::ClearWeakness(location);
+}
-void V8::DisposeGlobal(i::Object** obj) {
- i::GlobalHandles::Destroy(obj);
+void V8::DisposeGlobal(i::Object** location) {
+ i::GlobalHandles::Destroy(location);
}
@@ -817,9 +903,8 @@ EscapableHandleScope::EscapableHandleScope(Isolate* v8_isolate) {
i::Object** EscapableHandleScope::Escape(i::Object** escape_value) {
i::Heap* heap = reinterpret_cast<i::Isolate*>(GetIsolate())->heap();
- Utils::ApiCheck(*escape_slot_ == heap->the_hole_value(),
- "EscapeableHandleScope::Escape",
- "Escape value set twice");
+ Utils::ApiCheck((*escape_slot_)->IsTheHole(heap->isolate()),
+ "EscapableHandleScope::Escape", "Escape value set twice");
if (escape_value == NULL) {
*escape_slot_ = heap->undefined_value();
return NULL;
@@ -828,12 +913,9 @@ i::Object** EscapableHandleScope::Escape(i::Object** escape_value) {
return escape_slot_;
}
-
-SealHandleScope::SealHandleScope(Isolate* isolate) {
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
-
- isolate_ = internal_isolate;
- i::HandleScopeData* current = internal_isolate->handle_scope_data();
+SealHandleScope::SealHandleScope(Isolate* isolate)
+ : isolate_(reinterpret_cast<i::Isolate*>(isolate)) {
+ i::HandleScopeData* current = isolate_->handle_scope_data();
prev_limit_ = current->limit;
current->limit = current->next;
prev_sealed_level_ = current->sealed_level;
@@ -950,70 +1032,6 @@ void Context::SetAlignedPointerInEmbedderData(int index, void* value) {
}
-// --- N e a n d e r ---
-
-
-// A constructor cannot easily return an error value, therefore it is necessary
-// to check for a dead VM with ON_BAILOUT before constructing any Neander
-// objects. To remind you about this there is no HandleScope in the
-// NeanderObject constructor. When you add one to the site calling the
-// constructor you should check that you ensured the VM was not dead first.
-NeanderObject::NeanderObject(v8::internal::Isolate* isolate, int size) {
- ENTER_V8(isolate);
- value_ = isolate->factory()->NewNeanderObject();
- i::Handle<i::FixedArray> elements = isolate->factory()->NewFixedArray(size);
- value_->set_elements(*elements);
-}
-
-
-int NeanderObject::size() {
- return i::FixedArray::cast(value_->elements())->length();
-}
-
-
-NeanderArray::NeanderArray(v8::internal::Isolate* isolate) : obj_(isolate, 2) {
- obj_.set(0, i::Smi::FromInt(0));
-}
-
-
-int NeanderArray::length() {
- return i::Smi::cast(obj_.get(0))->value();
-}
-
-
-i::Object* NeanderArray::get(int offset) {
- DCHECK_LE(0, offset);
- DCHECK_LT(offset, length());
- return obj_.get(offset + 1);
-}
-
-
-// This method cannot easily return an error value, therefore it is necessary
-// to check for a dead VM with ON_BAILOUT before calling it. To remind you
-// about this there is no HandleScope in this method. When you add one to the
-// site calling this method you should check that you ensured the VM was not
-// dead first.
-void NeanderArray::add(i::Isolate* isolate, i::Handle<i::Object> value) {
- int length = this->length();
- int size = obj_.size();
- if (length == size - 1) {
- i::Factory* factory = isolate->factory();
- i::Handle<i::FixedArray> new_elms = factory->NewFixedArray(2 * size);
- for (int i = 0; i < length; i++)
- new_elms->set(i + 1, get(i));
- obj_.value()->set_elements(*new_elms);
- }
- obj_.set(length + 1, *value);
- obj_.set(0, i::Smi::FromInt(length + 1));
-}
-
-
-void NeanderArray::set(int index, i::Object* value) {
- if (index < 0 || index >= this->length()) return;
- obj_.set(index + 1, value);
-}
-
-
// --- T e m p l a t e ---
@@ -1030,23 +1048,13 @@ void Template::Set(v8::Local<Name> name, v8::Local<Data> value,
ENTER_V8(isolate);
i::HandleScope scope(isolate);
auto value_obj = Utils::OpenHandle(*value);
+ CHECK(!value_obj->IsJSReceiver() || value_obj->IsTemplateInfo());
if (value_obj->IsObjectTemplateInfo()) {
templ->set_serial_number(i::Smi::FromInt(0));
if (templ->IsFunctionTemplateInfo()) {
i::Handle<i::FunctionTemplateInfo>::cast(templ)->set_do_not_cache(true);
}
}
- if (i::FLAG_warn_template_set &&
- value_obj->IsJSReceiver() &&
- !value_obj->IsTemplateInfo()) {
- base::OS::PrintError(
- "(node) v8::%sTemplate::Set() with non-primitive values is deprecated\n"
- "(node) and will stop working in the next major release.\n",
- templ->IsFunctionTemplateInfo() ? "Function" : "Object");
- isolate->PrintStack(stderr, i::Isolate::kPrintStackConcise);
- base::DumpBacktrace();
- }
- // TODO(dcarney): split api to allow values of v8::Value or v8::TemplateInfo.
i::ApiNatives::AddDataProperty(isolate, templ, Utils::OpenHandle(*name),
value_obj,
static_cast<i::PropertyAttributes>(attribute));
@@ -1090,7 +1098,7 @@ Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() {
ENTER_V8(i_isolate);
i::Handle<i::Object> result(Utils::OpenHandle(this)->prototype_template(),
i_isolate);
- if (result->IsUndefined()) {
+ if (result->IsUndefined(i_isolate)) {
// Do not cache prototype objects.
result = Utils::OpenHandle(
*ObjectTemplateNew(i_isolate, Local<FunctionTemplate>(), true));
@@ -1128,8 +1136,7 @@ static Local<FunctionTemplate> FunctionTemplateNew(
obj->set_do_not_cache(do_not_cache);
int next_serial_number = 0;
if (!do_not_cache) {
- next_serial_number = isolate->next_serial_number() + 1;
- isolate->set_next_serial_number(next_serial_number);
+ next_serial_number = isolate->heap()->GetNextTemplateSerialNumber();
}
obj->set_serial_number(i::Smi::FromInt(next_serial_number));
if (callback != 0) {
@@ -1147,34 +1154,34 @@ static Local<FunctionTemplate> FunctionTemplateNew(
return Utils::ToLocal(obj);
}
-
-Local<FunctionTemplate> FunctionTemplate::New(Isolate* isolate,
- FunctionCallback callback,
- v8::Local<Value> data,
- v8::Local<Signature> signature,
- int length) {
- return New(
- isolate, callback, data, signature, length, ConstructorBehavior::kAllow);
-}
-
-Local<FunctionTemplate> FunctionTemplate::New(Isolate* isolate,
- FunctionCallback callback,
- v8::Local<Value> data,
- v8::Local<Signature> signature,
- int length,
- ConstructorBehavior behavior) {
+Local<FunctionTemplate> FunctionTemplate::New(
+ Isolate* isolate, FunctionCallback callback, v8::Local<Value> data,
+ v8::Local<Signature> signature, int length, ConstructorBehavior behavior) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
// Changes to the environment cannot be captured in the snapshot. Expect no
// function templates when the isolate is created for serialization.
- DCHECK(!i_isolate->serializer_enabled());
- LOG_API(i_isolate, "FunctionTemplate::New");
+ LOG_API(i_isolate, FunctionTemplate, New);
ENTER_V8(i_isolate);
- auto tmpl = FunctionTemplateNew(i_isolate, callback, nullptr, data, signature,
- length, false);
- if (behavior == ConstructorBehavior::kThrow) tmpl->RemovePrototype();
- return tmpl;
+ auto templ = FunctionTemplateNew(i_isolate, callback, nullptr, data,
+ signature, length, false);
+ if (behavior == ConstructorBehavior::kThrow) templ->RemovePrototype();
+ return templ;
}
+MaybeLocal<FunctionTemplate> FunctionTemplate::FromSnapshot(Isolate* isolate,
+ size_t index) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::FixedArray* templates = i_isolate->heap()->serialized_templates();
+ int int_index = static_cast<int>(index);
+ if (int_index < templates->length()) {
+ i::Object* info = templates->get(int_index);
+ if (info->IsFunctionTemplateInfo()) {
+ return Utils::ToLocal(i::Handle<i::FunctionTemplateInfo>(
+ i::FunctionTemplateInfo::cast(info)));
+ }
+ }
+ return Local<FunctionTemplate>();
+}
Local<FunctionTemplate> FunctionTemplate::NewWithFastHandler(
Isolate* isolate, FunctionCallback callback,
@@ -1182,7 +1189,7 @@ Local<FunctionTemplate> FunctionTemplate::NewWithFastHandler(
v8::Local<Signature> signature, int length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
DCHECK(!i_isolate->serializer_enabled());
- LOG_API(i_isolate, "FunctionTemplate::NewWithFastHandler");
+ LOG_API(i_isolate, FunctionTemplate, NewWithFastHandler);
ENTER_V8(i_isolate);
return FunctionTemplateNew(i_isolate, callback, fast_handler, data, signature,
length, false);
@@ -1247,8 +1254,10 @@ static i::Handle<i::AccessorInfo> SetAccessorInfoProperties(
return obj;
}
+namespace {
+
template <typename Getter, typename Setter>
-static i::Handle<i::AccessorInfo> MakeAccessorInfo(
+i::Handle<i::AccessorInfo> MakeAccessorInfo(
v8::Local<Name> name, Getter getter, Setter setter, v8::Local<Value> data,
v8::AccessControl settings, v8::PropertyAttribute attributes,
v8::Local<AccessorSignature> signature, bool is_special_data_property) {
@@ -1259,6 +1268,8 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
setter = reinterpret_cast<Setter>(&i::Accessors::ReconfigureToDataProperty);
}
SET_FIELD_WRAPPED(obj, set_setter, setter);
+ i::Address redirected = obj->redirected_getter();
+ if (redirected != nullptr) SET_FIELD_WRAPPED(obj, set_js_getter, redirected);
if (data.IsEmpty()) {
data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
}
@@ -1267,6 +1278,7 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
return SetAccessorInfoProperties(obj, name, settings, attributes, signature);
}
+} // namespace
Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() {
i::Handle<i::FunctionTemplateInfo> handle = Utils::OpenHandle(this, true);
@@ -1277,7 +1289,7 @@ Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() {
}
i::Isolate* isolate = handle->GetIsolate();
ENTER_V8(isolate);
- if (handle->instance_template()->IsUndefined()) {
+ if (handle->instance_template()->IsUndefined(isolate)) {
Local<ObjectTemplate> templ =
ObjectTemplate::New(isolate, ToApiHandle<FunctionTemplate>(handle));
handle->set_instance_template(*Utils::OpenHandle(*templ));
@@ -1358,10 +1370,7 @@ Local<ObjectTemplate> ObjectTemplate::New() {
static Local<ObjectTemplate> ObjectTemplateNew(
i::Isolate* isolate, v8::Local<FunctionTemplate> constructor,
bool do_not_cache) {
- // Changes to the environment cannot be captured in the snapshot. Expect no
- // object templates when the isolate is created for serialization.
- DCHECK(!isolate->serializer_enabled());
- LOG_API(isolate, "ObjectTemplate::New");
+ LOG_API(isolate, ObjectTemplate, New);
ENTER_V8(isolate);
i::Handle<i::Struct> struct_obj =
isolate->factory()->NewStruct(i::OBJECT_TEMPLATE_INFO_TYPE);
@@ -1370,13 +1379,12 @@ static Local<ObjectTemplate> ObjectTemplateNew(
InitializeTemplate(obj, Consts::OBJECT_TEMPLATE);
int next_serial_number = 0;
if (!do_not_cache) {
- next_serial_number = isolate->next_serial_number() + 1;
- isolate->set_next_serial_number(next_serial_number);
+ next_serial_number = isolate->heap()->GetNextTemplateSerialNumber();
}
obj->set_serial_number(i::Smi::FromInt(next_serial_number));
if (!constructor.IsEmpty())
obj->set_constructor(*Utils::OpenHandle(*constructor));
- obj->set_internal_field_count(i::Smi::FromInt(0));
+ obj->set_data(i::Smi::FromInt(0));
return Utils::ToLocal(obj);
}
@@ -1385,13 +1393,28 @@ Local<ObjectTemplate> ObjectTemplate::New(
return ObjectTemplateNew(isolate, constructor, false);
}
+MaybeLocal<ObjectTemplate> ObjectTemplate::FromSnapshot(Isolate* isolate,
+ size_t index) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::FixedArray* templates = i_isolate->heap()->serialized_templates();
+ int int_index = static_cast<int>(index);
+ if (int_index < templates->length()) {
+ i::Object* info = templates->get(int_index);
+ if (info->IsObjectTemplateInfo()) {
+ return Utils::ToLocal(
+ i::Handle<i::ObjectTemplateInfo>(i::ObjectTemplateInfo::cast(info)));
+ }
+ }
+ return Local<ObjectTemplate>();
+}
+
// Ensure that the object template has a constructor. If no
// constructor is available we create one.
static i::Handle<i::FunctionTemplateInfo> EnsureConstructor(
i::Isolate* isolate,
ObjectTemplate* object_template) {
i::Object* obj = Utils::OpenHandle(object_template)->constructor();
- if (!obj ->IsUndefined()) {
+ if (!obj->IsUndefined(isolate)) {
i::FunctionTemplateInfo* info = i::FunctionTemplateInfo::cast(obj);
return i::Handle<i::FunctionTemplateInfo>(info, isolate);
}
@@ -1480,20 +1503,12 @@ void ObjectTemplate::SetAccessor(v8::Local<Name> name,
signature, i::FLAG_disable_old_api_accessors);
}
-
template <typename Getter, typename Setter, typename Query, typename Deleter,
typename Enumerator>
-static void ObjectTemplateSetNamedPropertyHandler(ObjectTemplate* templ,
- Getter getter, Setter setter,
- Query query, Deleter remover,
- Enumerator enumerator,
- Local<Value> data,
- PropertyHandlerFlags flags) {
- i::Isolate* isolate = Utils::OpenHandle(templ)->GetIsolate();
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- auto cons = EnsureConstructor(isolate, templ);
- EnsureNotInstantiated(cons, "ObjectTemplateSetNamedPropertyHandler");
+static i::Handle<i::InterceptorInfo> CreateInterceptorInfo(
+ i::Isolate* isolate, Getter getter, Setter setter, Query query,
+ Deleter remover, Enumerator enumerator, Local<Value> data,
+ PropertyHandlerFlags flags) {
auto obj = i::Handle<i::InterceptorInfo>::cast(
isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE));
obj->set_flags(0);
@@ -1515,6 +1530,24 @@ static void ObjectTemplateSetNamedPropertyHandler(ObjectTemplate* templ,
data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
}
obj->set_data(*Utils::OpenHandle(*data));
+ return obj;
+}
+
+template <typename Getter, typename Setter, typename Query, typename Deleter,
+ typename Enumerator>
+static void ObjectTemplateSetNamedPropertyHandler(ObjectTemplate* templ,
+ Getter getter, Setter setter,
+ Query query, Deleter remover,
+ Enumerator enumerator,
+ Local<Value> data,
+ PropertyHandlerFlags flags) {
+ i::Isolate* isolate = Utils::OpenHandle(templ)->GetIsolate();
+ ENTER_V8(isolate);
+ i::HandleScope scope(isolate);
+ auto cons = EnsureConstructor(isolate, templ);
+ EnsureNotInstantiated(cons, "ObjectTemplateSetNamedPropertyHandler");
+ auto obj = CreateInterceptorInfo(isolate, getter, setter, query, remover,
+ enumerator, data, flags);
cons->set_named_property_handler(*obj);
}
@@ -1561,8 +1594,8 @@ void ObjectTemplate::SetAccessCheckCallback(AccessCheckCallback callback,
i::Handle<i::AccessCheckInfo>::cast(struct_info);
SET_FIELD_WRAPPED(info, set_callback, callback);
- SET_FIELD_WRAPPED(info, set_named_callback, nullptr);
- SET_FIELD_WRAPPED(info, set_indexed_callback, nullptr);
+ info->set_named_interceptor(nullptr);
+ info->set_indexed_interceptor(nullptr);
if (data.IsEmpty()) {
data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
@@ -1573,28 +1606,34 @@ void ObjectTemplate::SetAccessCheckCallback(AccessCheckCallback callback,
cons->set_needs_access_check(true);
}
-void ObjectTemplate::SetAccessCheckCallback(
- DeprecatedAccessCheckCallback callback, Local<Value> data) {
- SetAccessCheckCallback(reinterpret_cast<AccessCheckCallback>(callback), data);
-}
-
-void ObjectTemplate::SetAccessCheckCallbacks(
- NamedSecurityCallback named_callback,
- IndexedSecurityCallback indexed_callback, Local<Value> data) {
+void ObjectTemplate::SetAccessCheckCallbackAndHandler(
+ AccessCheckCallback callback,
+ const NamedPropertyHandlerConfiguration& named_handler,
+ const IndexedPropertyHandlerConfiguration& indexed_handler,
+ Local<Value> data) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
auto cons = EnsureConstructor(isolate, this);
- EnsureNotInstantiated(cons, "v8::ObjectTemplate::SetAccessCheckCallbacks");
+ EnsureNotInstantiated(
+ cons, "v8::ObjectTemplate::SetAccessCheckCallbackWithHandler");
i::Handle<i::Struct> struct_info =
isolate->factory()->NewStruct(i::ACCESS_CHECK_INFO_TYPE);
i::Handle<i::AccessCheckInfo> info =
i::Handle<i::AccessCheckInfo>::cast(struct_info);
- SET_FIELD_WRAPPED(info, set_callback, nullptr);
- SET_FIELD_WRAPPED(info, set_named_callback, named_callback);
- SET_FIELD_WRAPPED(info, set_indexed_callback, indexed_callback);
+ SET_FIELD_WRAPPED(info, set_callback, callback);
+ auto named_interceptor = CreateInterceptorInfo(
+ isolate, named_handler.getter, named_handler.setter, named_handler.query,
+ named_handler.deleter, named_handler.enumerator, named_handler.data,
+ named_handler.flags);
+ info->set_named_interceptor(*named_interceptor);
+ auto indexed_interceptor = CreateInterceptorInfo(
+ isolate, indexed_handler.getter, indexed_handler.setter,
+ indexed_handler.query, indexed_handler.deleter,
+ indexed_handler.enumerator, indexed_handler.data, indexed_handler.flags);
+ info->set_indexed_interceptor(*indexed_interceptor);
if (data.IsEmpty()) {
data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
@@ -1605,7 +1644,6 @@ void ObjectTemplate::SetAccessCheckCallbacks(
cons->set_needs_access_check(true);
}
-
void ObjectTemplate::SetHandler(
const IndexedPropertyHandlerConfiguration& config) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
@@ -1613,25 +1651,9 @@ void ObjectTemplate::SetHandler(
i::HandleScope scope(isolate);
auto cons = EnsureConstructor(isolate, this);
EnsureNotInstantiated(cons, "v8::ObjectTemplate::SetHandler");
- auto obj = i::Handle<i::InterceptorInfo>::cast(
- isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE));
- obj->set_flags(0);
-
- if (config.getter != 0) SET_FIELD_WRAPPED(obj, set_getter, config.getter);
- if (config.setter != 0) SET_FIELD_WRAPPED(obj, set_setter, config.setter);
- if (config.query != 0) SET_FIELD_WRAPPED(obj, set_query, config.query);
- if (config.deleter != 0) SET_FIELD_WRAPPED(obj, set_deleter, config.deleter);
- if (config.enumerator != 0) {
- SET_FIELD_WRAPPED(obj, set_enumerator, config.enumerator);
- }
- obj->set_all_can_read(static_cast<int>(config.flags) &
- static_cast<int>(PropertyHandlerFlags::kAllCanRead));
-
- v8::Local<v8::Value> data = config.data;
- if (data.IsEmpty()) {
- data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
- }
- obj->set_data(*Utils::OpenHandle(*data));
+ auto obj = CreateInterceptorInfo(
+ isolate, config.getter, config.setter, config.query, config.deleter,
+ config.enumerator, config.data, config.flags);
cons->set_indexed_property_handler(*obj);
}
@@ -1657,7 +1679,7 @@ void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback,
int ObjectTemplate::InternalFieldCount() {
- return i::Smi::cast(Utils::OpenHandle(this)->internal_field_count())->value();
+ return Utils::OpenHandle(this)->internal_field_count();
}
@@ -1675,9 +1697,18 @@ void ObjectTemplate::SetInternalFieldCount(int value) {
// function to do the setting.
EnsureConstructor(isolate, this);
}
- Utils::OpenHandle(this)->set_internal_field_count(i::Smi::FromInt(value));
+ Utils::OpenHandle(this)->set_internal_field_count(value);
+}
+
+bool ObjectTemplate::IsImmutableProto() {
+ return Utils::OpenHandle(this)->immutable_proto();
}
+void ObjectTemplate::SetImmutableProto() {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ENTER_V8(isolate);
+ Utils::OpenHandle(this)->set_immutable_proto(true);
+}
// --- S c r i p t s ---
@@ -1723,13 +1754,11 @@ ScriptCompiler::StreamedSource::GetCachedData() const {
Local<Script> UnboundScript::BindToCurrentContext() {
i::Handle<i::HeapObject> obj =
i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
- i::Handle<i::SharedFunctionInfo>
- function_info(i::SharedFunctionInfo::cast(*obj), obj->GetIsolate());
i::Isolate* isolate = obj->GetIsolate();
-
- i::Handle<i::JSReceiver> global(isolate->native_context()->global_object());
+ i::Handle<i::SharedFunctionInfo> function_info(
+ i::SharedFunctionInfo::cast(*obj), isolate);
i::Handle<i::JSFunction> function =
- obj->GetIsolate()->factory()->NewFunctionFromSharedFunctionInfo(
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(
function_info, isolate->native_context());
return ToApiHandle<Script>(function);
}
@@ -1739,7 +1768,7 @@ int UnboundScript::GetId() {
i::Handle<i::HeapObject> obj =
i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
i::Isolate* isolate = obj->GetIsolate();
- LOG_API(isolate, "v8::UnboundScript::GetId");
+ LOG_API(isolate, UnboundScript, GetId);
i::HandleScope scope(isolate);
i::Handle<i::SharedFunctionInfo> function_info(
i::SharedFunctionInfo::cast(*obj));
@@ -1752,7 +1781,7 @@ int UnboundScript::GetLineNumber(int code_pos) {
i::Handle<i::SharedFunctionInfo> obj =
i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
i::Isolate* isolate = obj->GetIsolate();
- LOG_API(isolate, "UnboundScript::GetLineNumber");
+ LOG_API(isolate, UnboundScript, GetLineNumber);
if (obj->script()->IsScript()) {
i::Handle<i::Script> script(i::Script::cast(obj->script()));
return i::Script::GetLineNumber(script, code_pos);
@@ -1766,7 +1795,7 @@ Local<Value> UnboundScript::GetScriptName() {
i::Handle<i::SharedFunctionInfo> obj =
i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
i::Isolate* isolate = obj->GetIsolate();
- LOG_API(isolate, "UnboundScript::GetName");
+ LOG_API(isolate, UnboundScript, GetName);
if (obj->script()->IsScript()) {
i::Object* name = i::Script::cast(obj->script())->name();
return Utils::ToLocal(i::Handle<i::Object>(name, isolate));
@@ -1780,7 +1809,7 @@ Local<Value> UnboundScript::GetSourceURL() {
i::Handle<i::SharedFunctionInfo> obj =
i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
i::Isolate* isolate = obj->GetIsolate();
- LOG_API(isolate, "UnboundScript::GetSourceURL");
+ LOG_API(isolate, UnboundScript, GetSourceURL);
if (obj->script()->IsScript()) {
i::Object* url = i::Script::cast(obj->script())->source_url();
return Utils::ToLocal(i::Handle<i::Object>(url, isolate));
@@ -1794,7 +1823,7 @@ Local<Value> UnboundScript::GetSourceMappingURL() {
i::Handle<i::SharedFunctionInfo> obj =
i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
i::Isolate* isolate = obj->GetIsolate();
- LOG_API(isolate, "UnboundScript::GetSourceMappingURL");
+ LOG_API(isolate, UnboundScript, GetSourceMappingURL);
if (obj->script()->IsScript()) {
i::Object* url = i::Script::cast(obj->script())->source_mapping_url();
return Utils::ToLocal(i::Handle<i::Object>(url, isolate));
@@ -1805,12 +1834,13 @@ Local<Value> UnboundScript::GetSourceMappingURL() {
MaybeLocal<Value> Script::Run(Local<Context> context) {
- PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, "v8::Script::Run()", Value)
+ PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, Script, Run, Value)
+ i::HistogramTimerScope execute_timer(isolate->counters()->execute(), true);
i::AggregatingHistogramTimerScope timer(isolate->counters()->compile_lazy());
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
- TRACE_EVENT0("v8", "V8.Execute");
+ TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
auto fun = i::Handle<i::JSFunction>::cast(Utils::OpenHandle(this));
- i::Handle<i::Object> receiver(isolate->global_proxy(), isolate);
+ i::Handle<i::Object> receiver = isolate->global_proxy();
Local<Value> result;
has_pending_exception =
!ToLocal<Value>(i::Execution::Call(isolate, fun, receiver, 0, NULL),
@@ -1841,8 +1871,9 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
Isolate* v8_isolate, Source* source, CompileOptions options,
bool is_module) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- PREPARE_FOR_EXECUTION_WITH_ISOLATE(
- isolate, "v8::ScriptCompiler::CompileUnbound()", UnboundScript);
+ PREPARE_FOR_EXECUTION_WITH_ISOLATE(isolate, ScriptCompiler, CompileUnbound,
+ UnboundScript);
+ TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.ScriptCompiler");
// Don't try to produce any kind of cache when the debugger is loaded.
if (isolate->debug()->is_loaded() &&
@@ -1862,7 +1893,7 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
i::Handle<i::SharedFunctionInfo> result;
{
i::HistogramTimerScope total(isolate->counters()->compile_script(), true);
- TRACE_EVENT0("v8", "V8.CompileScript");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileScript");
i::Handle<i::Object> name_obj;
i::Handle<i::Object> source_map_url;
int line_offset = 0;
@@ -2001,8 +2032,9 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
Local<Context> v8_context, Source* source, size_t arguments_count,
Local<String> arguments[], size_t context_extension_count,
Local<Object> context_extensions[]) {
- PREPARE_FOR_EXECUTION(
- v8_context, "v8::ScriptCompiler::CompileFunctionInContext()", Function);
+ PREPARE_FOR_EXECUTION(v8_context, ScriptCompiler, CompileFunctionInContext,
+ Function);
+ TRACE_EVENT0("v8", "V8.ScriptCompiler");
i::Handle<i::String> source_string;
auto factory = isolate->factory();
if (arguments_count) {
@@ -2056,6 +2088,8 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
}
i::Handle<i::Object> name_obj;
+ int eval_scope_position = 0;
+ int eval_position = i::kNoSourcePosition;
int line_offset = 0;
int column_offset = 0;
if (!source->resource_name.IsEmpty()) {
@@ -2068,11 +2102,13 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
column_offset = static_cast<int>(source->resource_column_offset->Value());
}
i::Handle<i::JSFunction> fun;
- has_pending_exception = !i::Compiler::GetFunctionFromEval(
- source_string, outer_info, context, i::SLOPPY,
- i::ONLY_SINGLE_FUNCTION_LITERAL, line_offset,
- column_offset - scope_position, name_obj,
- source->resource_options).ToHandle(&fun);
+ has_pending_exception =
+ !i::Compiler::GetFunctionFromEval(
+ source_string, outer_info, context, i::SLOPPY,
+ i::ONLY_SINGLE_FUNCTION_LITERAL, eval_scope_position, eval_position,
+ line_offset, column_offset - scope_position, name_obj,
+ source->resource_options)
+ .ToHandle(&fun);
if (has_pending_exception) {
isolate->ReportPendingMessages();
}
@@ -2112,7 +2148,8 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
StreamedSource* v8_source,
Local<String> full_source_string,
const ScriptOrigin& origin) {
- PREPARE_FOR_EXECUTION(context, "v8::ScriptCompiler::Compile()", Script);
+ PREPARE_FOR_EXECUTION(context, ScriptCompiler, Compile, Script);
+ TRACE_EVENT0("v8", "V8.ScriptCompiler");
i::StreamedSource* source = v8_source->impl();
i::Handle<i::String> str = Utils::OpenHandle(*(full_source_string));
i::Handle<i::Script> script = isolate->factory()->NewScript(str);
@@ -2136,6 +2173,11 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
source->info->set_script(script);
source->info->set_context(isolate->native_context());
+ // Create a canonical handle scope before internalizing parsed values if
+ // compiling bytecode. This is required for off-thread bytecode generation.
+ std::unique_ptr<i::CanonicalHandleScope> canonical;
+ if (i::FLAG_ignition) canonical.reset(new i::CanonicalHandleScope(isolate));
+
// Do the parsing tasks which need to be done on the main thread. This will
// also handle parse errors.
source->parser->Internalize(isolate, script,
@@ -2275,7 +2317,7 @@ v8::TryCatch::~TryCatch() {
bool v8::TryCatch::HasCaught() const {
- return !reinterpret_cast<i::Object*>(exception_)->IsTheHole();
+ return !reinterpret_cast<i::Object*>(exception_)->IsTheHole(isolate_);
}
@@ -2311,7 +2353,7 @@ MaybeLocal<Value> v8::TryCatch::StackTrace(Local<Context> context) const {
if (!HasCaught()) return v8::Local<Value>();
i::Object* raw_obj = reinterpret_cast<i::Object*>(exception_);
if (!raw_obj->IsJSObject()) return v8::Local<Value>();
- PREPARE_FOR_EXECUTION(context, "v8::TryCatch::StackTrace", Value);
+ PREPARE_FOR_EXECUTION(context, TryCatch, StackTrace, Value);
i::Handle<i::JSObject> obj(i::JSObject::cast(raw_obj), isolate_);
i::Handle<i::String> name = isolate->factory()->stack_string();
Maybe<bool> maybe = i::JSReceiver::HasProperty(obj, name);
@@ -2334,8 +2376,8 @@ v8::Local<Value> v8::TryCatch::StackTrace() const {
v8::Local<v8::Message> v8::TryCatch::Message() const {
i::Object* message = reinterpret_cast<i::Object*>(message_obj_);
- DCHECK(message->IsJSMessageObject() || message->IsTheHole());
- if (HasCaught() && !message->IsTheHole()) {
+ DCHECK(message->IsJSMessageObject() || message->IsTheHole(isolate_));
+ if (HasCaught() && !message->IsTheHole(isolate_)) {
return v8::Utils::MessageToLocal(i::Handle<i::Object>(message, isolate_));
} else {
return v8::Local<v8::Message>();
@@ -2413,16 +2455,12 @@ v8::Local<v8::StackTrace> Message::GetStackTrace() const {
Maybe<int> Message::GetLineNumber(Local<Context> context) const {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Message::GetLineNumber()", int);
- i::Handle<i::JSFunction> fun = isolate->message_get_line_number();
- i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
- i::Handle<i::Object> args[] = {Utils::OpenHandle(this)};
- i::Handle<i::Object> result;
- has_pending_exception =
- !i::Execution::Call(isolate, fun, undefined, arraysize(args), args)
- .ToHandle(&result);
- RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int);
- return Just(static_cast<int>(result->Number()));
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
+ ENTER_V8(isolate);
+ EscapableHandleScope handle_scope(reinterpret_cast<Isolate*>(isolate));
+ auto msg = i::Handle<i::JSMessageObject>::cast(self);
+ return Just(msg->GetLineNumber());
}
@@ -2445,17 +2483,12 @@ int Message::GetEndPosition() const {
Maybe<int> Message::GetStartColumn(Local<Context> context) const {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Message::GetStartColumn()",
- int);
- i::Handle<i::JSFunction> fun = isolate->message_get_column_number();
- i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
- i::Handle<i::Object> args[] = {Utils::OpenHandle(this)};
- i::Handle<i::Object> result;
- has_pending_exception =
- !i::Execution::Call(isolate, fun, undefined, arraysize(args), args)
- .ToHandle(&result);
- RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int);
- return Just(static_cast<int>(result->Number()));
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
+ ENTER_V8(isolate);
+ EscapableHandleScope handle_scope(reinterpret_cast<Isolate*>(isolate));
+ auto msg = i::Handle<i::JSMessageObject>::cast(self);
+ return Just(msg->GetColumnNumber());
}
@@ -2468,18 +2501,15 @@ int Message::GetStartColumn() const {
Maybe<int> Message::GetEndColumn(Local<Context> context) const {
auto self = Utils::OpenHandle(this);
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Message::GetEndColumn()", int);
- i::Handle<i::JSFunction> fun = isolate->message_get_column_number();
- i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
- i::Handle<i::Object> args[] = {self};
- i::Handle<i::Object> result;
- has_pending_exception =
- !i::Execution::Call(isolate, fun, undefined, arraysize(args), args)
- .ToHandle(&result);
- RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int);
- int start = self->start_position();
- int end = self->end_position();
- return Just(static_cast<int>(result->Number()) + (end - start));
+ i::Isolate* isolate = self->GetIsolate();
+ ENTER_V8(isolate);
+ EscapableHandleScope handle_scope(reinterpret_cast<Isolate*>(isolate));
+ auto msg = i::Handle<i::JSMessageObject>::cast(self);
+ const int column_number = msg->GetColumnNumber();
+ if (column_number == -1) return Just(-1);
+ const int start = self->start_position();
+ const int end = self->end_position();
+ return Just(column_number + (end - start));
}
@@ -2512,20 +2542,12 @@ bool Message::IsOpaque() const {
MaybeLocal<String> Message::GetSourceLine(Local<Context> context) const {
- PREPARE_FOR_EXECUTION(context, "v8::Message::GetSourceLine()", String);
- i::Handle<i::JSFunction> fun = isolate->message_get_source_line();
- i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
- i::Handle<i::Object> args[] = {Utils::OpenHandle(this)};
- i::Handle<i::Object> result;
- has_pending_exception =
- !i::Execution::Call(isolate, fun, undefined, arraysize(args), args)
- .ToHandle(&result);
- RETURN_ON_FAILED_EXECUTION(String);
- Local<String> str;
- if (result->IsString()) {
- str = Utils::ToLocal(i::Handle<i::String>::cast(result));
- }
- RETURN_ESCAPED(str);
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
+ ENTER_V8(isolate);
+ EscapableHandleScope handle_scope(reinterpret_cast<Isolate*>(isolate));
+ auto msg = i::Handle<i::JSMessageObject>::cast(self);
+ RETURN_ESCAPED(Utils::ToLocal(msg->GetSourceLine()));
}
@@ -2645,7 +2667,7 @@ static bool getBoolProperty(const StackFrame* f, const char* propertyName) {
i::Handle<i::JSObject> self = Utils::OpenHandle(f);
i::Handle<i::Object> obj =
i::JSReceiver::GetProperty(isolate, self, propertyName).ToHandleChecked();
- return obj->IsTrue();
+ return obj->IsTrue(isolate);
}
bool StackFrame::IsEval() const { return getBoolProperty(this, "isEval"); }
@@ -2680,7 +2702,7 @@ void NativeWeakMap::Set(Local<Value> v8_key, Local<Value> v8_value) {
}
i::Handle<i::ObjectHashTable> table(
i::ObjectHashTable::cast(weak_collection->table()));
- if (!table->IsKey(*key)) {
+ if (!table->IsKey(isolate, *key)) {
DCHECK(false);
return;
}
@@ -2700,12 +2722,12 @@ Local<Value> NativeWeakMap::Get(Local<Value> v8_key) {
}
i::Handle<i::ObjectHashTable> table(
i::ObjectHashTable::cast(weak_collection->table()));
- if (!table->IsKey(*key)) {
+ if (!table->IsKey(isolate, *key)) {
DCHECK(false);
return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
}
i::Handle<i::Object> lookup(table->Lookup(key), isolate);
- if (lookup->IsTheHole())
+ if (lookup->IsTheHole(isolate))
return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
return Utils::ToLocal(lookup);
}
@@ -2723,12 +2745,12 @@ bool NativeWeakMap::Has(Local<Value> v8_key) {
}
i::Handle<i::ObjectHashTable> table(
i::ObjectHashTable::cast(weak_collection->table()));
- if (!table->IsKey(*key)) {
+ if (!table->IsKey(isolate, *key)) {
DCHECK(false);
return false;
}
i::Handle<i::Object> lookup(table->Lookup(key), isolate);
- return !lookup->IsTheHole();
+ return !lookup->IsTheHole(isolate);
}
@@ -2744,7 +2766,7 @@ bool NativeWeakMap::Delete(Local<Value> v8_key) {
}
i::Handle<i::ObjectHashTable> table(
i::ObjectHashTable::cast(weak_collection->table()));
- if (!table->IsKey(*key)) {
+ if (!table->IsKey(isolate, *key)) {
DCHECK(false);
return false;
}
@@ -2757,49 +2779,94 @@ bool NativeWeakMap::Delete(Local<Value> v8_key) {
MaybeLocal<Value> JSON::Parse(Isolate* v8_isolate, Local<String> json_string) {
auto isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- PREPARE_FOR_EXECUTION_WITH_ISOLATE(isolate, "JSON::Parse", Value);
+ PREPARE_FOR_EXECUTION_WITH_ISOLATE(isolate, JSON, Parse, Value);
i::Handle<i::String> string = Utils::OpenHandle(*json_string);
i::Handle<i::String> source = i::String::Flatten(string);
+ i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
auto maybe = source->IsSeqOneByteString()
- ? i::JsonParser<true>::Parse(source)
- : i::JsonParser<false>::Parse(source);
+ ? i::JsonParser<true>::Parse(isolate, source, undefined)
+ : i::JsonParser<false>::Parse(isolate, source, undefined);
Local<Value> result;
has_pending_exception = !ToLocal<Value>(maybe, &result);
RETURN_ON_FAILED_EXECUTION(Value);
RETURN_ESCAPED(result);
}
+MaybeLocal<Value> JSON::Parse(Local<Context> context,
+ Local<String> json_string) {
+ PREPARE_FOR_EXECUTION(context, JSON, Parse, Value);
+ i::Handle<i::String> string = Utils::OpenHandle(*json_string);
+ i::Handle<i::String> source = i::String::Flatten(string);
+ i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
+ auto maybe = source->IsSeqOneByteString()
+ ? i::JsonParser<true>::Parse(isolate, source, undefined)
+ : i::JsonParser<false>::Parse(isolate, source, undefined);
+ Local<Value> result;
+ has_pending_exception = !ToLocal<Value>(maybe, &result);
+ RETURN_ON_FAILED_EXECUTION(Value);
+ RETURN_ESCAPED(result);
+}
Local<Value> JSON::Parse(Local<String> json_string) {
- auto isolate = reinterpret_cast<v8::Isolate*>(
- Utils::OpenHandle(*json_string)->GetIsolate());
- RETURN_TO_LOCAL_UNCHECKED(Parse(isolate, json_string), Value);
+ RETURN_TO_LOCAL_UNCHECKED(Parse(Local<Context>(), json_string), Value);
+}
+
+MaybeLocal<String> JSON::Stringify(Local<Context> context,
+ Local<Object> json_object,
+ Local<String> gap) {
+ PREPARE_FOR_EXECUTION(context, JSON, Stringify, String);
+ i::Handle<i::Object> object = Utils::OpenHandle(*json_object);
+ i::Handle<i::Object> replacer = isolate->factory()->undefined_value();
+ i::Handle<i::String> gap_string = gap.IsEmpty()
+ ? isolate->factory()->empty_string()
+ : Utils::OpenHandle(*gap);
+ i::Handle<i::Object> maybe;
+ has_pending_exception = !i::JsonStringifier(isolate)
+ .Stringify(object, replacer, gap_string)
+ .ToHandle(&maybe);
+ RETURN_ON_FAILED_EXECUTION(String);
+ Local<String> result;
+ has_pending_exception =
+ !ToLocal<String>(i::Object::ToString(isolate, maybe), &result);
+ RETURN_ON_FAILED_EXECUTION(String);
+ RETURN_ESCAPED(result);
}
-
// --- D a t a ---
bool Value::FullIsUndefined() const {
- bool result = Utils::OpenHandle(this)->IsUndefined();
+ i::Handle<i::Object> object = Utils::OpenHandle(this);
+ bool result = false;
+ if (!object->IsSmi()) {
+ result = object->IsUndefined(i::HeapObject::cast(*object)->GetIsolate());
+ }
DCHECK_EQ(result, QuickIsUndefined());
return result;
}
bool Value::FullIsNull() const {
- bool result = Utils::OpenHandle(this)->IsNull();
+ i::Handle<i::Object> object = Utils::OpenHandle(this);
+ bool result = false;
+ if (!object->IsSmi()) {
+ result = object->IsNull(i::HeapObject::cast(*object)->GetIsolate());
+ }
DCHECK_EQ(result, QuickIsNull());
return result;
}
bool Value::IsTrue() const {
- return Utils::OpenHandle(this)->IsTrue();
+ i::Handle<i::Object> object = Utils::OpenHandle(this);
+ if (object->IsSmi()) return false;
+ return object->IsTrue(i::HeapObject::cast(*object)->GetIsolate());
}
bool Value::IsFalse() const {
- return Utils::OpenHandle(this)->IsFalse();
+ i::Handle<i::Object> object = Utils::OpenHandle(this);
+ if (object->IsSmi()) return false;
+ return object->IsFalse(i::HeapObject::cast(*object)->GetIsolate());
}
@@ -2878,6 +2945,13 @@ bool Value::IsNumber() const {
bool Value::IsProxy() const { return Utils::OpenHandle(this)->IsJSProxy(); }
+bool Value::IsWebAssemblyCompiledModule() const {
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ if (!obj->IsJSObject()) return false;
+ i::Handle<i::JSObject> js_obj = i::Handle<i::JSObject>::cast(obj);
+ return js_obj->GetIsolate()->native_context()->wasm_module_constructor() ==
+ js_obj->map()->GetConstructor();
+}
#define VALUE_IS_SPECIFIC_TYPE(Type, Class) \
bool Value::Is##Type() const { \
@@ -2936,22 +3010,7 @@ bool Value::IsUint32() const {
bool Value::IsNativeError() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (!obj->IsJSObject()) return false;
- i::Handle<i::JSObject> js_obj = i::Handle<i::JSObject>::cast(obj);
- i::Isolate* isolate = js_obj->GetIsolate();
- i::Handle<i::Object> constructor(js_obj->map()->GetConstructor(), isolate);
- if (!constructor->IsJSFunction()) return false;
- i::Handle<i::JSFunction> function =
- i::Handle<i::JSFunction>::cast(constructor);
- if (!function->shared()->native()) return false;
- return function.is_identical_to(isolate->error_function()) ||
- function.is_identical_to(isolate->eval_error_function()) ||
- function.is_identical_to(isolate->range_error_function()) ||
- function.is_identical_to(isolate->reference_error_function()) ||
- function.is_identical_to(isolate->syntax_error_function()) ||
- function.is_identical_to(isolate->type_error_function()) ||
- function.is_identical_to(isolate->uri_error_function());
+ return Utils::OpenHandle(this)->IsJSError();
}
@@ -2983,17 +3042,12 @@ bool Value::IsSetIterator() const {
return Utils::OpenHandle(this)->IsJSSetIterator();
}
-
-bool Value::IsPromise() const {
- auto self = Utils::OpenHandle(this);
- return i::Object::IsPromise(self);
-}
-
+bool Value::IsPromise() const { return Utils::OpenHandle(this)->IsJSPromise(); }
MaybeLocal<String> Value::ToString(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsString()) return ToApiHandle<String>(obj);
- PREPARE_FOR_EXECUTION(context, "ToString", String);
+ PREPARE_FOR_EXECUTION(context, Object, ToString, String);
Local<String> result;
has_pending_exception =
!ToLocal<String>(i::Object::ToString(isolate, obj), &result);
@@ -3010,14 +3064,9 @@ Local<String> Value::ToString(Isolate* isolate) const {
MaybeLocal<String> Value::ToDetailString(Local<Context> context) const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsString()) return ToApiHandle<String>(obj);
- PREPARE_FOR_EXECUTION(context, "ToDetailString", String);
- Local<String> result;
- i::Handle<i::Object> args[] = {obj};
- has_pending_exception = !ToLocal<String>(
- i::Execution::TryCall(isolate, isolate->no_side_effects_to_string_fun(),
- isolate->factory()->undefined_value(),
- arraysize(args), args),
- &result);
+ PREPARE_FOR_EXECUTION(context, Object, ToDetailString, String);
+ Local<String> result =
+ Utils::ToLocal(i::Object::NoSideEffectsToString(isolate, obj));
RETURN_ON_FAILED_EXECUTION(String);
RETURN_ESCAPED(result);
}
@@ -3032,7 +3081,7 @@ Local<String> Value::ToDetailString(Isolate* isolate) const {
MaybeLocal<Object> Value::ToObject(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsJSReceiver()) return ToApiHandle<Object>(obj);
- PREPARE_FOR_EXECUTION(context, "ToObject", Object);
+ PREPARE_FOR_EXECUTION(context, Object, ToObject, Object);
Local<Object> result;
has_pending_exception =
!ToLocal<Object>(i::Object::ToObject(isolate, obj), &result);
@@ -3063,7 +3112,7 @@ Local<Boolean> Value::ToBoolean(Isolate* v8_isolate) const {
MaybeLocal<Number> Value::ToNumber(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsNumber()) return ToApiHandle<Number>(obj);
- PREPARE_FOR_EXECUTION(context, "ToNumber", Number);
+ PREPARE_FOR_EXECUTION(context, Object, ToNumber, Number);
Local<Number> result;
has_pending_exception = !ToLocal<Number>(i::Object::ToNumber(obj), &result);
RETURN_ON_FAILED_EXECUTION(Number);
@@ -3079,7 +3128,7 @@ Local<Number> Value::ToNumber(Isolate* isolate) const {
MaybeLocal<Integer> Value::ToInteger(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsSmi()) return ToApiHandle<Integer>(obj);
- PREPARE_FOR_EXECUTION(context, "ToInteger", Integer);
+ PREPARE_FOR_EXECUTION(context, Object, ToInteger, Integer);
Local<Integer> result;
has_pending_exception =
!ToLocal<Integer>(i::Object::ToInteger(isolate, obj), &result);
@@ -3097,7 +3146,7 @@ MaybeLocal<Int32> Value::ToInt32(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsSmi()) return ToApiHandle<Int32>(obj);
Local<Int32> result;
- PREPARE_FOR_EXECUTION(context, "ToInt32", Int32);
+ PREPARE_FOR_EXECUTION(context, Object, ToInt32, Int32);
has_pending_exception =
!ToLocal<Int32>(i::Object::ToInt32(isolate, obj), &result);
RETURN_ON_FAILED_EXECUTION(Int32);
@@ -3114,7 +3163,7 @@ MaybeLocal<Uint32> Value::ToUint32(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsSmi()) return ToApiHandle<Uint32>(obj);
Local<Uint32> result;
- PREPARE_FOR_EXECUTION(context, "ToUint32", Uint32);
+ PREPARE_FOR_EXECUTION(context, Object, ToUint32, Uint32);
has_pending_exception =
!ToLocal<Uint32>(i::Object::ToUint32(isolate, obj), &result);
RETURN_ON_FAILED_EXECUTION(Uint32);
@@ -3129,62 +3178,55 @@ Local<Uint32> Value::ToUint32(Isolate* isolate) const {
void i::Internals::CheckInitializedImpl(v8::Isolate* external_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
- Utils::ApiCheck(isolate != NULL &&
- !isolate->IsDead(),
- "v8::internal::Internals::CheckInitialized()",
+ Utils::ApiCheck(isolate != NULL && !isolate->IsDead(),
+ "v8::internal::Internals::CheckInitialized",
"Isolate is not initialized or V8 has died");
}
void External::CheckCast(v8::Value* that) {
- Utils::ApiCheck(Utils::OpenHandle(that)->IsExternal(),
- "v8::External::Cast()",
+ Utils::ApiCheck(Utils::OpenHandle(that)->IsExternal(), "v8::External::Cast",
"Could not convert to external");
}
void v8::Object::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- Utils::ApiCheck(obj->IsJSReceiver(), "v8::Object::Cast()",
+ Utils::ApiCheck(obj->IsJSReceiver(), "v8::Object::Cast",
"Could not convert to object");
}
void v8::Function::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- Utils::ApiCheck(obj->IsCallable(), "v8::Function::Cast()",
+ Utils::ApiCheck(obj->IsCallable(), "v8::Function::Cast",
"Could not convert to function");
}
void v8::Boolean::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- Utils::ApiCheck(obj->IsBoolean(),
- "v8::Boolean::Cast()",
+ Utils::ApiCheck(obj->IsBoolean(), "v8::Boolean::Cast",
"Could not convert to boolean");
}
void v8::Name::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- Utils::ApiCheck(obj->IsName(),
- "v8::Name::Cast()",
- "Could not convert to name");
+ Utils::ApiCheck(obj->IsName(), "v8::Name::Cast", "Could not convert to name");
}
void v8::String::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- Utils::ApiCheck(obj->IsString(),
- "v8::String::Cast()",
+ Utils::ApiCheck(obj->IsString(), "v8::String::Cast",
"Could not convert to string");
}
void v8::Symbol::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- Utils::ApiCheck(obj->IsSymbol(),
- "v8::Symbol::Cast()",
+ Utils::ApiCheck(obj->IsSymbol(), "v8::Symbol::Cast",
"Could not convert to symbol");
}
@@ -3199,65 +3241,64 @@ void v8::Number::CheckCast(v8::Value* that) {
void v8::Integer::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- Utils::ApiCheck(obj->IsNumber(),
- "v8::Integer::Cast()",
+ Utils::ApiCheck(obj->IsNumber(), "v8::Integer::Cast",
"Could not convert to number");
}
void v8::Int32::CheckCast(v8::Value* that) {
- Utils::ApiCheck(that->IsInt32(), "v8::Int32::Cast()",
+ Utils::ApiCheck(that->IsInt32(), "v8::Int32::Cast",
"Could not convert to 32-bit signed integer");
}
void v8::Uint32::CheckCast(v8::Value* that) {
- Utils::ApiCheck(that->IsUint32(), "v8::Uint32::Cast()",
+ Utils::ApiCheck(that->IsUint32(), "v8::Uint32::Cast",
"Could not convert to 32-bit unsigned integer");
}
void v8::Array::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- Utils::ApiCheck(obj->IsJSArray(),
- "v8::Array::Cast()",
+ Utils::ApiCheck(obj->IsJSArray(), "v8::Array::Cast",
"Could not convert to array");
}
void v8::Map::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- Utils::ApiCheck(obj->IsJSMap(), "v8::Map::Cast()",
- "Could not convert to Map");
+ Utils::ApiCheck(obj->IsJSMap(), "v8::Map::Cast", "Could not convert to Map");
}
void v8::Set::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- Utils::ApiCheck(obj->IsJSSet(), "v8::Set::Cast()",
- "Could not convert to Set");
+ Utils::ApiCheck(obj->IsJSSet(), "v8_Set_Cast", "Could not convert to Set");
}
void v8::Promise::CheckCast(Value* that) {
- Utils::ApiCheck(that->IsPromise(),
- "v8::Promise::Cast()",
+ Utils::ApiCheck(that->IsPromise(), "v8::Promise::Cast",
"Could not convert to promise");
}
void v8::Promise::Resolver::CheckCast(Value* that) {
- Utils::ApiCheck(that->IsPromise(),
- "v8::Promise::Resolver::Cast()",
+ Utils::ApiCheck(that->IsPromise(), "v8::Promise::Resolver::Cast",
"Could not convert to promise resolver");
}
void v8::Proxy::CheckCast(Value* that) {
- Utils::ApiCheck(that->IsProxy(), "v8::Proxy::Cast()",
+ Utils::ApiCheck(that->IsProxy(), "v8::Proxy::Cast",
"Could not convert to proxy");
}
+void v8::WasmCompiledModule::CheckCast(Value* that) {
+ Utils::ApiCheck(that->IsWebAssemblyCompiledModule(),
+ "v8::WasmCompiledModule::Cast",
+ "Could not convert to wasm compiled module");
+}
void v8::ArrayBuffer::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
@@ -3391,7 +3432,7 @@ bool Value::BooleanValue() const {
Maybe<double> Value::NumberValue(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsNumber()) return Just(obj->Number());
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "NumberValue", double);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, NumberValue, double);
i::Handle<i::Object> num;
has_pending_exception = !i::Object::ToNumber(obj).ToHandle(&num);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(double);
@@ -3412,7 +3453,7 @@ Maybe<int64_t> Value::IntegerValue(Local<Context> context) const {
if (obj->IsNumber()) {
return Just(NumberToInt64(*obj));
}
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "IntegerValue", int64_t);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, IntegerValue, int64_t);
i::Handle<i::Object> num;
has_pending_exception = !i::Object::ToInteger(isolate, obj).ToHandle(&num);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int64_t);
@@ -3436,7 +3477,7 @@ int64_t Value::IntegerValue() const {
Maybe<int32_t> Value::Int32Value(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsNumber()) return Just(NumberToInt32(*obj));
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "Int32Value", int32_t);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, Int32Value, int32_t);
i::Handle<i::Object> num;
has_pending_exception = !i::Object::ToInt32(isolate, obj).ToHandle(&num);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int32_t);
@@ -3455,7 +3496,7 @@ int32_t Value::Int32Value() const {
Maybe<uint32_t> Value::Uint32Value(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsNumber()) return Just(NumberToUint32(*obj));
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "Uint32Value", uint32_t);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, Uint32Value, uint32_t);
i::Handle<i::Object> num;
has_pending_exception = !i::Object::ToUint32(isolate, obj).ToHandle(&num);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(uint32_t);
@@ -3477,7 +3518,7 @@ MaybeLocal<Uint32> Value::ToArrayIndex(Local<Context> context) const {
if (i::Smi::cast(*self)->value() >= 0) return Utils::Uint32ToLocal(self);
return Local<Uint32>();
}
- PREPARE_FOR_EXECUTION(context, "ToArrayIndex", Uint32);
+ PREPARE_FOR_EXECUTION(context, Object, ToArrayIndex, Uint32);
i::Handle<i::Object> string_obj;
has_pending_exception =
!i::Object::ToString(isolate, self).ToHandle(&string_obj);
@@ -3543,10 +3584,16 @@ bool Value::SameValue(Local<Value> that) const {
return self->SameValue(*other);
}
+Local<String> Value::TypeOf(v8::Isolate* external_isolate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
+ ENTER_V8(isolate);
+ LOG_API(isolate, Value, TypeOf);
+ return Utils::ToLocal(i::Object::TypeOf(isolate, Utils::OpenHandle(this)));
+}
Maybe<bool> v8::Object::Set(v8::Local<v8::Context> context,
v8::Local<Value> key, v8::Local<Value> value) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Set()", bool);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, Set, bool);
auto self = Utils::OpenHandle(this);
auto key_obj = Utils::OpenHandle(*key);
auto value_obj = Utils::OpenHandle(*value);
@@ -3566,7 +3613,7 @@ bool v8::Object::Set(v8::Local<Value> key, v8::Local<Value> value) {
Maybe<bool> v8::Object::Set(v8::Local<v8::Context> context, uint32_t index,
v8::Local<Value> value) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Set()", bool);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, Set, bool);
auto self = Utils::OpenHandle(this);
auto value_obj = Utils::OpenHandle(*value);
has_pending_exception = i::Object::SetElement(isolate, self, index, value_obj,
@@ -3585,8 +3632,7 @@ bool v8::Object::Set(uint32_t index, v8::Local<Value> value) {
Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
v8::Local<Name> key,
v8::Local<Value> value) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::CreateDataProperty()",
- bool);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, CreateDataProperty, bool);
i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
@@ -3604,8 +3650,7 @@ Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
uint32_t index,
v8::Local<Value> value) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::CreateDataProperty()",
- bool);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, CreateDataProperty, bool);
i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
@@ -3622,8 +3667,7 @@ Maybe<bool> v8::Object::DefineOwnProperty(v8::Local<v8::Context> context,
v8::Local<Name> key,
v8::Local<Value> value,
v8::PropertyAttribute attributes) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::DefineOwnProperty()",
- bool);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, DefineOwnProperty, bool);
i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
@@ -3666,7 +3710,7 @@ static i::MaybeHandle<i::Object> DefineObjectProperty(
Maybe<bool> v8::Object::ForceSet(v8::Local<v8::Context> context,
v8::Local<Value> key, v8::Local<Value> value,
v8::PropertyAttribute attribs) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::ForceSet()", bool);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, ForceSet, bool);
auto self = i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
auto key_obj = Utils::OpenHandle(*key);
auto value_obj = Utils::OpenHandle(*value);
@@ -3682,9 +3726,8 @@ Maybe<bool> v8::Object::ForceSet(v8::Local<v8::Context> context,
bool v8::Object::ForceSet(v8::Local<Value> key, v8::Local<Value> value,
v8::PropertyAttribute attribs) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- PREPARE_FOR_EXECUTION_GENERIC(isolate, Local<Context>(),
- "v8::Object::ForceSet", false, i::HandleScope,
- false);
+ PREPARE_FOR_EXECUTION_GENERIC(isolate, Local<Context>(), Object, ForceSet,
+ false, i::HandleScope, false);
i::Handle<i::JSObject> self =
i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
@@ -3700,7 +3743,7 @@ bool v8::Object::ForceSet(v8::Local<Value> key, v8::Local<Value> value,
Maybe<bool> v8::Object::SetPrivate(Local<Context> context, Local<Private> key,
Local<Value> value) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::SetPrivate()", bool);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, SetPrivate, bool);
auto self = Utils::OpenHandle(this);
auto key_obj = Utils::OpenHandle(reinterpret_cast<Name*>(*key));
auto value_obj = Utils::OpenHandle(*value);
@@ -3726,7 +3769,7 @@ Maybe<bool> v8::Object::SetPrivate(Local<Context> context, Local<Private> key,
MaybeLocal<Value> v8::Object::Get(Local<v8::Context> context,
Local<Value> key) {
- PREPARE_FOR_EXECUTION(context, "v8::Object::Get()", Value);
+ PREPARE_FOR_EXECUTION(context, Object, Get, Value);
auto self = Utils::OpenHandle(this);
auto key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> result;
@@ -3744,7 +3787,7 @@ Local<Value> v8::Object::Get(v8::Local<Value> key) {
MaybeLocal<Value> v8::Object::Get(Local<Context> context, uint32_t index) {
- PREPARE_FOR_EXECUTION(context, "v8::Object::Get()", Value);
+ PREPARE_FOR_EXECUTION(context, Object, Get, Value);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> result;
has_pending_exception =
@@ -3768,8 +3811,8 @@ MaybeLocal<Value> v8::Object::GetPrivate(Local<Context> context,
Maybe<PropertyAttribute> v8::Object::GetPropertyAttributes(
Local<Context> context, Local<Value> key) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(
- context, "v8::Object::GetPropertyAttributes()", PropertyAttribute);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, GetPropertyAttributes,
+ PropertyAttribute);
auto self = Utils::OpenHandle(this);
auto key_obj = Utils::OpenHandle(*key);
if (!key_obj->IsName()) {
@@ -3797,8 +3840,7 @@ PropertyAttribute v8::Object::GetPropertyAttributes(v8::Local<Value> key) {
MaybeLocal<Value> v8::Object::GetOwnPropertyDescriptor(Local<Context> context,
Local<String> key) {
- PREPARE_FOR_EXECUTION(context, "v8::Object::GetOwnPropertyDescriptor()",
- Value);
+ PREPARE_FOR_EXECUTION(context, Object, GetOwnPropertyDescriptor, Value);
i::Handle<i::JSReceiver> obj = Utils::OpenHandle(this);
i::Handle<i::String> key_name = Utils::OpenHandle(*key);
@@ -3830,7 +3872,7 @@ Local<Value> v8::Object::GetPrototype() {
Maybe<bool> v8::Object::SetPrototype(Local<Context> context,
Local<Value> value) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::SetPrototype()", bool);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, SetPrototype, bool);
auto self = Utils::OpenHandle(this);
auto value_obj = Utils::OpenHandle(*value);
// We do not allow exceptions thrown while setting the prototype
@@ -3854,27 +3896,38 @@ Local<Object> v8::Object::FindInstanceInPrototypeChain(
v8::Local<FunctionTemplate> tmpl) {
auto isolate = Utils::OpenHandle(this)->GetIsolate();
i::PrototypeIterator iter(isolate, *Utils::OpenHandle(this),
- i::PrototypeIterator::START_AT_RECEIVER);
+ i::kStartAtReceiver);
auto tmpl_info = *Utils::OpenHandle(*tmpl);
- while (!tmpl_info->IsTemplateFor(iter.GetCurrent())) {
+ while (!tmpl_info->IsTemplateFor(iter.GetCurrent<i::JSObject>())) {
iter.Advance();
- if (iter.IsAtEnd()) {
- return Local<Object>();
- }
+ if (iter.IsAtEnd()) return Local<Object>();
+ if (!iter.GetCurrent()->IsJSObject()) return Local<Object>();
}
// IsTemplateFor() ensures that iter.GetCurrent() can't be a Proxy here.
return Utils::ToLocal(i::handle(iter.GetCurrent<i::JSObject>(), isolate));
}
-
MaybeLocal<Array> v8::Object::GetPropertyNames(Local<Context> context) {
- PREPARE_FOR_EXECUTION(context, "v8::Object::GetPropertyNames()", Array);
+ return GetPropertyNames(
+ context, v8::KeyCollectionMode::kIncludePrototypes,
+ static_cast<v8::PropertyFilter>(ONLY_ENUMERABLE | SKIP_SYMBOLS),
+ v8::IndexFilter::kIncludeIndices);
+}
+
+MaybeLocal<Array> v8::Object::GetPropertyNames(Local<Context> context,
+ KeyCollectionMode mode,
+ PropertyFilter property_filter,
+ IndexFilter index_filter) {
+ PREPARE_FOR_EXECUTION(context, Object, GetPropertyNames, Array);
auto self = Utils::OpenHandle(this);
i::Handle<i::FixedArray> value;
- has_pending_exception =
- !i::JSReceiver::GetKeys(self, i::INCLUDE_PROTOS, i::ENUMERABLE_STRINGS)
- .ToHandle(&value);
+ i::KeyAccumulator accumulator(
+ isolate, static_cast<i::KeyCollectionMode>(mode),
+ static_cast<i::PropertyFilter>(property_filter));
+ accumulator.set_skip_indices(index_filter == IndexFilter::kSkipIndices);
+ has_pending_exception = accumulator.CollectKeys(self, self).IsNothing();
RETURN_ON_FAILED_EXECUTION(Array);
+ value = accumulator.GetKeys(i::GetKeysConversion::kKeepNumbers);
DCHECK(self->map()->EnumLength() == i::kInvalidEnumCacheSentinel ||
self->map()->EnumLength() == 0 ||
self->map()->instance_descriptors()->GetEnumCache() != *value);
@@ -3888,31 +3941,24 @@ Local<Array> v8::Object::GetPropertyNames() {
RETURN_TO_LOCAL_UNCHECKED(GetPropertyNames(context), Array);
}
-
MaybeLocal<Array> v8::Object::GetOwnPropertyNames(Local<Context> context) {
- PREPARE_FOR_EXECUTION(context, "v8::Object::GetOwnPropertyNames()", Array);
- auto self = Utils::OpenHandle(this);
- i::Handle<i::FixedArray> value;
- has_pending_exception =
- !i::JSReceiver::GetKeys(self, i::OWN_ONLY, i::ENUMERABLE_STRINGS)
- .ToHandle(&value);
- RETURN_ON_FAILED_EXECUTION(Array);
- DCHECK(self->map()->EnumLength() == i::kInvalidEnumCacheSentinel ||
- self->map()->EnumLength() == 0 ||
- self->map()->instance_descriptors()->GetEnumCache() != *value);
- auto result = isolate->factory()->NewJSArrayWithElements(value);
- RETURN_ESCAPED(Utils::ToLocal(result));
+ return GetOwnPropertyNames(
+ context, static_cast<v8::PropertyFilter>(ONLY_ENUMERABLE | SKIP_SYMBOLS));
}
-
Local<Array> v8::Object::GetOwnPropertyNames() {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
RETURN_TO_LOCAL_UNCHECKED(GetOwnPropertyNames(context), Array);
}
+MaybeLocal<Array> v8::Object::GetOwnPropertyNames(Local<Context> context,
+ PropertyFilter filter) {
+ return GetPropertyNames(context, KeyCollectionMode::kOwnOnly, filter,
+ v8::IndexFilter::kIncludeIndices);
+}
MaybeLocal<String> v8::Object::ObjectProtoToString(Local<Context> context) {
- PREPARE_FOR_EXECUTION(context, "v8::Object::ObjectProtoToString", String);
+ PREPARE_FOR_EXECUTION(context, Object, ObjectProtoToString, String);
auto obj = Utils::OpenHandle(this);
Local<String> result;
has_pending_exception =
@@ -3936,8 +3982,7 @@ Local<String> v8::Object::GetConstructorName() {
Maybe<bool> v8::Object::SetIntegrityLevel(Local<Context> context,
IntegrityLevel level) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::SetIntegrityLevel()",
- bool);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, SetIntegrityLevel, bool);
auto self = Utils::OpenHandle(this);
i::JSReceiver::IntegrityLevel i_level =
level == IntegrityLevel::kFrozen ? i::FROZEN : i::SEALED;
@@ -3949,7 +3994,7 @@ Maybe<bool> v8::Object::SetIntegrityLevel(Local<Context> context,
}
Maybe<bool> v8::Object::Delete(Local<Context> context, Local<Value> key) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Delete()", bool);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, Delete, bool);
auto self = Utils::OpenHandle(this);
auto key_obj = Utils::OpenHandle(*key);
Maybe<bool> result =
@@ -3973,7 +4018,7 @@ Maybe<bool> v8::Object::DeletePrivate(Local<Context> context,
Maybe<bool> v8::Object::Has(Local<Context> context, Local<Value> key) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Get()", bool);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, Get, bool);
auto self = Utils::OpenHandle(this);
auto key_obj = Utils::OpenHandle(*key);
Maybe<bool> maybe = Nothing<bool>();
@@ -4006,8 +4051,7 @@ Maybe<bool> v8::Object::HasPrivate(Local<Context> context, Local<Private> key) {
Maybe<bool> v8::Object::Delete(Local<Context> context, uint32_t index) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::DeleteProperty()",
- bool);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, DeleteProperty, bool);
auto self = Utils::OpenHandle(this);
Maybe<bool> result = i::JSReceiver::DeleteElement(self, index);
has_pending_exception = result.IsNothing();
@@ -4023,7 +4067,7 @@ bool v8::Object::Delete(uint32_t index) {
Maybe<bool> v8::Object::Has(Local<Context> context, uint32_t index) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Get()", bool);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, Get, bool);
auto self = Utils::OpenHandle(this);
auto maybe = i::JSReceiver::HasElement(self, index);
has_pending_exception = maybe.IsNothing();
@@ -4044,7 +4088,7 @@ static Maybe<bool> ObjectSetAccessor(Local<Context> context, Object* self,
Setter setter, Data data,
AccessControl settings,
PropertyAttribute attributes) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::SetAccessor()", bool);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, SetAccessor, bool);
if (!Utils::OpenHandle(self)->IsJSObject()) return Just(false);
i::Handle<i::JSObject> obj =
i::Handle<i::JSObject>::cast(Utils::OpenHandle(self));
@@ -4057,7 +4101,7 @@ static Maybe<bool> ObjectSetAccessor(Local<Context> context, Object* self,
has_pending_exception =
!i::JSObject::SetAccessor(obj, info).ToHandle(&result);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
- if (result->IsUndefined()) return Nothing<bool>();
+ if (result->IsUndefined(obj->GetIsolate())) return Nothing<bool>();
if (fast) {
i::JSObject::MigrateSlowToFast(obj, 0, "APISetAccessor");
}
@@ -4116,8 +4160,7 @@ void Object::SetAccessorProperty(Local<Name> name, Local<Function> getter,
Maybe<bool> v8::Object::HasOwnProperty(Local<Context> context,
Local<Name> key) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::HasOwnProperty()",
- bool);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, HasOwnProperty, bool);
auto self = Utils::OpenHandle(this);
auto key_val = Utils::OpenHandle(*key);
auto result = i::JSReceiver::HasOwnProperty(self, key_val);
@@ -4126,6 +4169,14 @@ Maybe<bool> v8::Object::HasOwnProperty(Local<Context> context,
return result;
}
+Maybe<bool> v8::Object::HasOwnProperty(Local<Context> context, uint32_t index) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, HasOwnProperty, bool);
+ auto self = Utils::OpenHandle(this);
+ auto result = i::JSReceiver::HasOwnProperty(self, index);
+ has_pending_exception = result.IsNothing();
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return result;
+}
bool v8::Object::HasOwnProperty(Local<String> key) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
@@ -4135,8 +4186,7 @@ bool v8::Object::HasOwnProperty(Local<String> key) {
Maybe<bool> v8::Object::HasRealNamedProperty(Local<Context> context,
Local<Name> key) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::HasRealNamedProperty()",
- bool);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, HasRealNamedProperty, bool);
auto self = Utils::OpenHandle(this);
if (!self->IsJSObject()) return Just(false);
auto key_val = Utils::OpenHandle(*key);
@@ -4156,8 +4206,8 @@ bool v8::Object::HasRealNamedProperty(Local<String> key) {
Maybe<bool> v8::Object::HasRealIndexedProperty(Local<Context> context,
uint32_t index) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context,
- "v8::Object::HasRealIndexedProperty()", bool);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, HasRealIndexedProperty,
+ bool);
auto self = Utils::OpenHandle(this);
if (!self->IsJSObject()) return Just(false);
auto result = i::JSObject::HasRealElementProperty(
@@ -4176,8 +4226,8 @@ bool v8::Object::HasRealIndexedProperty(uint32_t index) {
Maybe<bool> v8::Object::HasRealNamedCallbackProperty(Local<Context> context,
Local<Name> key) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(
- context, "v8::Object::HasRealNamedCallbackProperty()", bool);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, HasRealNamedCallbackProperty,
+ bool);
auto self = Utils::OpenHandle(this);
if (!self->IsJSObject()) return Just(false);
auto key_val = Utils::OpenHandle(*key);
@@ -4211,8 +4261,8 @@ bool v8::Object::HasIndexedLookupInterceptor() {
MaybeLocal<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
Local<Context> context, Local<Name> key) {
- PREPARE_FOR_EXECUTION(
- context, "v8::Object::GetRealNamedPropertyInPrototypeChain()", Value);
+ PREPARE_FOR_EXECUTION(context, Object, GetRealNamedPropertyInPrototypeChain,
+ Value);
i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
if (!self->IsJSObject()) return MaybeLocal<Value>();
i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
@@ -4243,7 +4293,7 @@ Maybe<PropertyAttribute>
v8::Object::GetRealNamedPropertyAttributesInPrototypeChain(
Local<Context> context, Local<Name> key) {
PREPARE_FOR_EXECUTION_PRIMITIVE(
- context, "v8::Object::GetRealNamedPropertyAttributesInPrototypeChain()",
+ context, Object, GetRealNamedPropertyAttributesInPrototypeChain,
PropertyAttribute);
i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
if (!self->IsJSObject()) return Nothing<PropertyAttribute>();
@@ -4273,7 +4323,7 @@ v8::Object::GetRealNamedPropertyAttributesInPrototypeChain(Local<String> key) {
MaybeLocal<Value> v8::Object::GetRealNamedProperty(Local<Context> context,
Local<Name> key) {
- PREPARE_FOR_EXECUTION(context, "v8::Object::GetRealNamedProperty()", Value);
+ PREPARE_FOR_EXECUTION(context, Object, GetRealNamedProperty, Value);
auto self = Utils::OpenHandle(this);
auto key_obj = Utils::OpenHandle(*key);
i::LookupIterator it = i::LookupIterator::PropertyOrElement(
@@ -4296,8 +4346,7 @@ Local<Value> v8::Object::GetRealNamedProperty(Local<String> key) {
Maybe<PropertyAttribute> v8::Object::GetRealNamedPropertyAttributes(
Local<Context> context, Local<Name> key) {
PREPARE_FOR_EXECUTION_PRIMITIVE(
- context, "v8::Object::GetRealNamedPropertyAttributes()",
- PropertyAttribute);
+ context, Object, GetRealNamedPropertyAttributes, PropertyAttribute);
auto self = Utils::OpenHandle(this);
auto key_obj = Utils::OpenHandle(*key);
i::LookupIterator it = i::LookupIterator::PropertyOrElement(
@@ -4342,60 +4391,7 @@ int v8::Object::GetIdentityHash() {
auto isolate = Utils::OpenHandle(this)->GetIsolate();
i::HandleScope scope(isolate);
auto self = Utils::OpenHandle(this);
- return i::JSReceiver::GetOrCreateIdentityHash(self)->value();
-}
-
-
-bool v8::Object::SetHiddenValue(v8::Local<v8::String> key,
- v8::Local<v8::Value> value) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
- if (!self->IsJSObject()) return false;
- i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- i::Handle<i::String> key_string =
- isolate->factory()->InternalizeString(key_obj);
- if (value.IsEmpty()) {
- i::JSObject::DeleteHiddenProperty(i::Handle<i::JSObject>::cast(self),
- key_string);
- return true;
- }
- i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- i::Handle<i::Object> result = i::JSObject::SetHiddenProperty(
- i::Handle<i::JSObject>::cast(self), key_string, value_obj);
- return *result == *self;
-}
-
-
-v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Local<v8::String> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8(isolate);
- i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
- if (!self->IsJSObject()) return v8::Local<v8::Value>();
- i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- i::Handle<i::String> key_string =
- isolate->factory()->InternalizeString(key_obj);
- i::Handle<i::Object> result(
- i::Handle<i::JSObject>::cast(self)->GetHiddenProperty(key_string),
- isolate);
- if (result->IsTheHole()) return v8::Local<v8::Value>();
- return Utils::ToLocal(result);
-}
-
-
-bool v8::Object::DeleteHiddenValue(v8::Local<v8::String> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
- if (!self->IsJSObject()) return false;
- i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- i::Handle<i::String> key_string =
- isolate->factory()->InternalizeString(key_obj);
- i::JSObject::DeleteHiddenProperty(i::Handle<i::JSObject>::cast(self),
- key_string);
- return true;
+ return i::JSReceiver::GetOrCreateIdentityHash(isolate, self)->value();
}
@@ -4404,14 +4400,17 @@ bool v8::Object::IsCallable() {
return self->IsCallable();
}
+bool v8::Object::IsConstructor() {
+ auto self = Utils::OpenHandle(this);
+ return self->IsConstructor();
+}
MaybeLocal<Value> Object::CallAsFunction(Local<Context> context,
Local<Value> recv, int argc,
Local<Value> argv[]) {
- PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, "v8::Object::CallAsFunction()",
- Value);
+ PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, Object, CallAsFunction, Value);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
- TRACE_EVENT0("v8", "V8.Execute");
+ TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
auto self = Utils::OpenHandle(this);
auto recv_obj = Utils::OpenHandle(*recv);
STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
@@ -4435,10 +4434,10 @@ Local<v8::Value> Object::CallAsFunction(v8::Local<v8::Value> recv, int argc,
MaybeLocal<Value> Object::CallAsConstructor(Local<Context> context, int argc,
Local<Value> argv[]) {
- PREPARE_FOR_EXECUTION_WITH_CALLBACK(context,
- "v8::Object::CallAsConstructor()", Value);
+ PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, Object, CallAsConstructor,
+ Value);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
- TRACE_EVENT0("v8", "V8.Execute");
+ TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
auto self = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
@@ -4457,28 +4456,23 @@ Local<v8::Value> Object::CallAsConstructor(int argc,
RETURN_TO_LOCAL_UNCHECKED(CallAsConstructor(context, argc, argv_cast), Value);
}
-
-MaybeLocal<Function> Function::New(Local<Context> context,
- FunctionCallback callback, Local<Value> data,
- int length) {
- return New(context, callback, data, length, ConstructorBehavior::kAllow);
-}
-
MaybeLocal<Function> Function::New(Local<Context> context,
FunctionCallback callback, Local<Value> data,
int length, ConstructorBehavior behavior) {
i::Isolate* isolate = Utils::OpenHandle(*context)->GetIsolate();
- LOG_API(isolate, "Function::New");
+ LOG_API(isolate, Function, New);
ENTER_V8(isolate);
- auto tmpl = FunctionTemplateNew(isolate, callback, nullptr, data,
- Local<Signature>(), length, true);
- if (behavior == ConstructorBehavior::kThrow) tmpl->RemovePrototype();
- return tmpl->GetFunction(context);
+ auto templ = FunctionTemplateNew(isolate, callback, nullptr, data,
+ Local<Signature>(), length, true);
+ if (behavior == ConstructorBehavior::kThrow) templ->RemovePrototype();
+ return templ->GetFunction(context);
}
+
Local<Function> Function::New(Isolate* v8_isolate, FunctionCallback callback,
Local<Value> data, int length) {
- return Function::New(v8_isolate->GetCurrentContext(), callback, data, length)
+ return Function::New(v8_isolate->GetCurrentContext(), callback, data, length,
+ ConstructorBehavior::kAllow)
.FromMaybe(Local<Function>());
}
@@ -4491,10 +4485,9 @@ Local<v8::Object> Function::NewInstance() const {
MaybeLocal<Object> Function::NewInstance(Local<Context> context, int argc,
v8::Local<v8::Value> argv[]) const {
- PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, "v8::Function::NewInstance()",
- Object);
+ PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, Function, NewInstance, Object);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
- TRACE_EVENT0("v8", "V8.Execute");
+ TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
auto self = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
@@ -4516,9 +4509,9 @@ Local<v8::Object> Function::NewInstance(int argc,
MaybeLocal<v8::Value> Function::Call(Local<Context> context,
v8::Local<v8::Value> recv, int argc,
v8::Local<v8::Value> argv[]) {
- PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, "v8::Function::Call()", Value);
+ PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, Function, Call, Value);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
- TRACE_EVENT0("v8", "V8.Execute");
+ TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
@@ -4548,16 +4541,20 @@ void Function::SetName(v8::Local<v8::String> name) {
Local<Value> Function::GetName() const {
auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
if (self->IsJSBoundFunction()) {
auto func = i::Handle<i::JSBoundFunction>::cast(self);
- return Utils::ToLocal(handle(func->name(), func->GetIsolate()));
+ i::Handle<i::Object> name;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, name,
+ i::JSBoundFunction::GetName(isolate, func),
+ Local<Value>());
+ return Utils::ToLocal(name);
}
if (self->IsJSFunction()) {
auto func = i::Handle<i::JSFunction>::cast(self);
- return Utils::ToLocal(handle(func->shared()->name(), func->GetIsolate()));
+ return Utils::ToLocal(handle(func->shared()->name(), isolate));
}
- return ToApiHandle<Primitive>(
- self->GetIsolate()->factory()->undefined_value());
+ return ToApiHandle<Primitive>(isolate->factory()->undefined_value());
}
@@ -5224,10 +5221,10 @@ int String::WriteUtf8(char* buffer,
int capacity,
int* nchars_ref,
int options) const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- LOG_API(isolate, "String::WriteUtf8");
- ENTER_V8(isolate);
i::Handle<i::String> str = Utils::OpenHandle(this);
+ i::Isolate* isolate = str->GetIsolate();
+ LOG_API(isolate, String, WriteUtf8);
+ ENTER_V8(isolate);
if (options & HINT_MANY_WRITES_EXPECTED) {
str = i::String::Flatten(str); // Flatten the string for efficiency.
}
@@ -5243,7 +5240,7 @@ int String::WriteUtf8(char* buffer,
if (success) return writer.CompleteWrite(write_null, nchars_ref);
} else if (capacity >= string_length) {
// First check that the buffer is large enough.
- int utf8_bytes = v8::Utf8Length(*str, str->GetIsolate());
+ int utf8_bytes = v8::Utf8Length(*str, isolate);
if (utf8_bytes <= capacity) {
// one-byte fast path.
if (utf8_bytes == string_length) {
@@ -5278,7 +5275,7 @@ static inline int WriteHelper(const String* string,
int length,
int options) {
i::Isolate* isolate = Utils::OpenHandle(string)->GetIsolate();
- LOG_API(isolate, "String::Write");
+ LOG_API(isolate, String, Write);
ENTER_V8(isolate);
DCHECK(start >= 0 && length >= -1);
i::Handle<i::String> str = Utils::OpenHandle(string);
@@ -5399,7 +5396,7 @@ double Number::Value() const {
bool Boolean::Value() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->IsTrue();
+ return obj->IsTrue(i::HeapObject::cast(*obj)->GetIsolate());
}
@@ -5477,7 +5474,6 @@ void* v8::Object::SlowGetAlignedPointerFromInternalField(int index) {
i::Handle<i::JSObject>::cast(obj)->GetInternalField(index), location);
}
-
void v8::Object::SetAlignedPointerInInternalField(int index, void* value) {
i::Handle<i::JSReceiver> obj = Utils::OpenHandle(this);
const char* location = "v8::Object::SetAlignedPointerInInternalField()";
@@ -5487,10 +5483,31 @@ void v8::Object::SetAlignedPointerInInternalField(int index, void* value) {
DCHECK_EQ(value, GetAlignedPointerFromInternalField(index));
}
+void v8::Object::SetAlignedPointerInInternalFields(int argc, int indices[],
+ void* values[]) {
+ i::Handle<i::JSReceiver> obj = Utils::OpenHandle(this);
+ const char* location = "v8::Object::SetAlignedPointerInInternalFields()";
+ i::DisallowHeapAllocation no_gc;
+ i::JSObject* object = i::JSObject::cast(*obj);
+ int nof_internal_fields = object->GetInternalFieldCount();
+ for (int i = 0; i < argc; i++) {
+ int index = indices[i];
+ if (!Utils::ApiCheck(index < nof_internal_fields, location,
+ "Internal field out of bounds")) {
+ return;
+ }
+ void* value = values[i];
+ object->SetInternalField(index, EncodeAlignedAsSmi(value, location));
+ DCHECK_EQ(value, GetAlignedPointerFromInternalField(index));
+ }
+}
static void* ExternalValue(i::Object* obj) {
// Obscure semantics for undefined, but somehow checked in our unit tests...
- if (obj->IsUndefined()) return NULL;
+ if (!obj->IsSmi() &&
+ obj->IsUndefined(i::HeapObject::cast(obj)->GetIsolate())) {
+ return NULL;
+ }
i::Object* foreign = i::JSObject::cast(obj)->GetInternalField(0);
return i::Foreign::cast(foreign)->foreign_address();
}
@@ -5544,6 +5561,8 @@ HeapStatistics::HeapStatistics()
total_available_size_(0),
used_heap_size_(0),
heap_size_limit_(0),
+ malloced_memory_(0),
+ peak_malloced_memory_(0),
does_zap_garbage_(0) {}
HeapSpaceStatistics::HeapSpaceStatistics(): space_name_(0),
@@ -5559,11 +5578,17 @@ HeapObjectStatistics::HeapObjectStatistics()
object_count_(0),
object_size_(0) {}
+HeapCodeStatistics::HeapCodeStatistics()
+ : code_and_metadata_size_(0), bytecode_and_metadata_size_(0) {}
bool v8::V8::InitializeICU(const char* icu_data_file) {
return i::InitializeICU(icu_data_file);
}
+bool v8::V8::InitializeICUDefaultLocation(const char* exec_path,
+ const char* icu_data_file) {
+ return i::InitializeICUDefaultLocation(exec_path, icu_data_file);
+}
void v8::V8::InitializeExternalStartupData(const char* directory_path) {
i::InitializeExternalStartupData(directory_path);
@@ -5580,21 +5605,51 @@ const char* v8::V8::GetVersion() {
return i::Version::GetVersion();
}
+template <typename ObjectType>
+struct InvokeBootstrapper;
-static i::Handle<i::Context> CreateEnvironment(
+template <>
+struct InvokeBootstrapper<i::Context> {
+ i::Handle<i::Context> Invoke(
+ i::Isolate* isolate, i::MaybeHandle<i::JSGlobalProxy> maybe_global_proxy,
+ v8::Local<v8::ObjectTemplate> global_object_template,
+ v8::ExtensionConfiguration* extensions, size_t context_snapshot_index) {
+ return isolate->bootstrapper()->CreateEnvironment(
+ maybe_global_proxy, global_object_template, extensions,
+ context_snapshot_index);
+ }
+};
+
+template <>
+struct InvokeBootstrapper<i::JSGlobalProxy> {
+ i::Handle<i::JSGlobalProxy> Invoke(
+ i::Isolate* isolate, i::MaybeHandle<i::JSGlobalProxy> maybe_global_proxy,
+ v8::Local<v8::ObjectTemplate> global_object_template,
+ v8::ExtensionConfiguration* extensions, size_t context_snapshot_index) {
+ USE(extensions);
+ USE(context_snapshot_index);
+ return isolate->bootstrapper()->NewRemoteContext(maybe_global_proxy,
+ global_object_template);
+ }
+};
+
+template <typename ObjectType>
+static i::Handle<ObjectType> CreateEnvironment(
i::Isolate* isolate, v8::ExtensionConfiguration* extensions,
- v8::Local<ObjectTemplate> global_template,
- v8::Local<Value> maybe_global_proxy) {
- i::Handle<i::Context> env;
+ v8::MaybeLocal<ObjectTemplate> maybe_global_template,
+ v8::MaybeLocal<Value> maybe_global_proxy, size_t context_snapshot_index) {
+ i::Handle<ObjectType> result;
// Enter V8 via an ENTER_V8 scope.
{
ENTER_V8(isolate);
- v8::Local<ObjectTemplate> proxy_template = global_template;
+ v8::Local<ObjectTemplate> proxy_template;
i::Handle<i::FunctionTemplateInfo> proxy_constructor;
i::Handle<i::FunctionTemplateInfo> global_constructor;
- if (!global_template.IsEmpty()) {
+ if (!maybe_global_template.IsEmpty()) {
+ v8::Local<v8::ObjectTemplate> global_template =
+ maybe_global_template.ToLocalChecked();
// Make sure that the global_template has a constructor.
global_constructor = EnsureConstructor(isolate, *global_template);
@@ -5611,7 +5666,7 @@ static i::Handle<i::Context> CreateEnvironment(
// Migrate security handlers from global_template to
// proxy_template. Temporarily removing access check
// information from the global template.
- if (!global_constructor->access_check_info()->IsUndefined()) {
+ if (!global_constructor->access_check_info()->IsUndefined(isolate)) {
proxy_constructor->set_access_check_info(
global_constructor->access_check_info());
proxy_constructor->set_needs_access_check(
@@ -5622,17 +5677,18 @@ static i::Handle<i::Context> CreateEnvironment(
}
}
- i::Handle<i::Object> proxy = Utils::OpenHandle(*maybe_global_proxy, true);
i::MaybeHandle<i::JSGlobalProxy> maybe_proxy;
- if (!proxy.is_null()) {
- maybe_proxy = i::Handle<i::JSGlobalProxy>::cast(proxy);
+ if (!maybe_global_proxy.IsEmpty()) {
+ maybe_proxy = i::Handle<i::JSGlobalProxy>::cast(
+ Utils::OpenHandle(*maybe_global_proxy.ToLocalChecked()));
}
// Create the environment.
- env = isolate->bootstrapper()->CreateEnvironment(
- maybe_proxy, proxy_template, extensions);
+ InvokeBootstrapper<ObjectType> invoke;
+ result = invoke.Invoke(isolate, maybe_proxy, proxy_template, extensions,
+ context_snapshot_index);
// Restore the access check info on the global template.
- if (!global_template.IsEmpty()) {
+ if (!maybe_global_template.IsEmpty()) {
DCHECK(!global_constructor.is_null());
DCHECK(!proxy_constructor.is_null());
global_constructor->set_access_check_info(
@@ -5643,20 +5699,23 @@ static i::Handle<i::Context> CreateEnvironment(
}
// Leave V8.
- return env;
+ return result;
}
-Local<Context> v8::Context::New(v8::Isolate* external_isolate,
- v8::ExtensionConfiguration* extensions,
- v8::Local<ObjectTemplate> global_template,
- v8::Local<Value> global_object) {
+Local<Context> NewContext(v8::Isolate* external_isolate,
+ v8::ExtensionConfiguration* extensions,
+ v8::MaybeLocal<ObjectTemplate> global_template,
+ v8::MaybeLocal<Value> global_object,
+ size_t context_snapshot_index) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
- LOG_API(isolate, "Context::New");
+ LOG_API(isolate, Context, New);
+ TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.NewContext");
i::HandleScope scope(isolate);
ExtensionConfiguration no_extensions;
if (extensions == NULL) extensions = &no_extensions;
i::Handle<i::Context> env =
- CreateEnvironment(isolate, extensions, global_template, global_object);
+ CreateEnvironment<i::Context>(isolate, extensions, global_template,
+ global_object, context_snapshot_index);
if (env.is_null()) {
if (isolate->has_pending_exception()) {
isolate->OptionalRescheduleException(true);
@@ -5666,6 +5725,57 @@ Local<Context> v8::Context::New(v8::Isolate* external_isolate,
return Utils::ToLocal(scope.CloseAndEscape(env));
}
+Local<Context> v8::Context::New(v8::Isolate* external_isolate,
+ v8::ExtensionConfiguration* extensions,
+ v8::MaybeLocal<ObjectTemplate> global_template,
+ v8::MaybeLocal<Value> global_object) {
+ return NewContext(external_isolate, extensions, global_template,
+ global_object, 0);
+}
+
+MaybeLocal<Context> v8::Context::FromSnapshot(
+ v8::Isolate* external_isolate, size_t context_snapshot_index,
+ v8::ExtensionConfiguration* extensions,
+ v8::MaybeLocal<ObjectTemplate> global_template,
+ v8::MaybeLocal<Value> global_object) {
+ if (!i::Snapshot::HasContextSnapshot(
+ reinterpret_cast<i::Isolate*>(external_isolate),
+ context_snapshot_index)) {
+ return MaybeLocal<Context>();
+ }
+ return NewContext(external_isolate, extensions, global_template,
+ global_object, context_snapshot_index);
+}
+
+MaybeLocal<Object> v8::Context::NewRemoteContext(
+ v8::Isolate* external_isolate, v8::Local<ObjectTemplate> global_template,
+ v8::MaybeLocal<v8::Value> global_object) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
+ LOG_API(isolate, Context, NewRemoteContext);
+ i::HandleScope scope(isolate);
+ i::Handle<i::FunctionTemplateInfo> global_constructor =
+ EnsureConstructor(isolate, *global_template);
+ Utils::ApiCheck(global_constructor->needs_access_check(),
+ "v8::Context::NewRemoteContext",
+ "Global template needs to have access checks enabled.");
+ i::Handle<i::AccessCheckInfo> access_check_info = i::handle(
+ i::AccessCheckInfo::cast(global_constructor->access_check_info()),
+ isolate);
+ Utils::ApiCheck(access_check_info->named_interceptor() != nullptr,
+ "v8::Context::NewRemoteContext",
+ "Global template needs to have access check handlers.");
+ i::Handle<i::JSGlobalProxy> global_proxy =
+ CreateEnvironment<i::JSGlobalProxy>(isolate, nullptr, global_template,
+ global_object, 0);
+ if (global_proxy.is_null()) {
+ if (isolate->has_pending_exception()) {
+ isolate->OptionalRescheduleException(true);
+ }
+ return MaybeLocal<Object>();
+ }
+ return Utils::ToLocal(
+ scope.CloseAndEscape(i::Handle<i::JSObject>::cast(global_proxy)));
+}
void v8::Context::SetSecurityToken(Local<Value> token) {
i::Handle<i::Context> env = Utils::OpenHandle(this);
@@ -5736,7 +5846,8 @@ void Context::AllowCodeGenerationFromStrings(bool allow) {
bool Context::IsCodeGenerationFromStringsAllowed() {
i::Handle<i::Context> context = Utils::OpenHandle(this);
- return !context->allow_code_gen_from_strings()->IsFalse();
+ return !context->allow_code_gen_from_strings()->IsFalse(
+ context->GetIsolate());
}
@@ -5754,7 +5865,7 @@ size_t Context::EstimatedSize() {
MaybeLocal<v8::Object> ObjectTemplate::NewInstance(Local<Context> context) {
- PREPARE_FOR_EXECUTION(context, "v8::ObjectTemplate::NewInstance()", Object);
+ PREPARE_FOR_EXECUTION(context, ObjectTemplate, NewInstance, Object);
auto self = Utils::OpenHandle(this);
Local<Object> result;
has_pending_exception =
@@ -5771,8 +5882,7 @@ Local<v8::Object> ObjectTemplate::NewInstance() {
MaybeLocal<v8::Function> FunctionTemplate::GetFunction(Local<Context> context) {
- PREPARE_FOR_EXECUTION(context, "v8::FunctionTemplate::GetFunction()",
- Function);
+ PREPARE_FOR_EXECUTION(context, FunctionTemplate, GetFunction, Function);
auto self = Utils::OpenHandle(this);
Local<Function> result;
has_pending_exception =
@@ -5787,18 +5897,44 @@ Local<v8::Function> FunctionTemplate::GetFunction() {
RETURN_TO_LOCAL_UNCHECKED(GetFunction(context), Function);
}
+MaybeLocal<v8::Object> FunctionTemplate::NewRemoteInstance() {
+ auto self = Utils::OpenHandle(this);
+ i::Isolate* isolate = self->GetIsolate();
+ LOG_API(isolate, FunctionTemplate, NewRemoteInstance);
+ i::HandleScope scope(isolate);
+ i::Handle<i::FunctionTemplateInfo> constructor =
+ EnsureConstructor(isolate, *InstanceTemplate());
+ Utils::ApiCheck(constructor->needs_access_check(),
+ "v8::FunctionTemplate::NewRemoteInstance",
+ "InstanceTemplate needs to have access checks enabled.");
+ i::Handle<i::AccessCheckInfo> access_check_info = i::handle(
+ i::AccessCheckInfo::cast(constructor->access_check_info()), isolate);
+ Utils::ApiCheck(access_check_info->named_interceptor() != nullptr,
+ "v8::FunctionTemplate::NewRemoteInstance",
+ "InstanceTemplate needs to have access check handlers.");
+ i::Handle<i::JSObject> object;
+ if (!i::ApiNatives::InstantiateRemoteObject(
+ Utils::OpenHandle(*InstanceTemplate()))
+ .ToHandle(&object)) {
+ if (isolate->has_pending_exception()) {
+ isolate->OptionalRescheduleException(true);
+ }
+ return MaybeLocal<Object>();
+ }
+ return Utils::ToLocal(scope.CloseAndEscape(object));
+}
bool FunctionTemplate::HasInstance(v8::Local<v8::Value> value) {
auto self = Utils::OpenHandle(this);
auto obj = Utils::OpenHandle(*value);
- return self->IsTemplateFor(*obj);
+ return obj->IsJSObject() && self->IsTemplateFor(i::JSObject::cast(*obj));
}
Local<External> v8::External::New(Isolate* isolate, void* value) {
STATIC_ASSERT(sizeof(value) == sizeof(i::Address));
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, "External::New");
+ LOG_API(i_isolate, External, New);
ENTER_V8(i_isolate);
i::Handle<i::JSObject> external = i_isolate->factory()->NewExternal(value);
return Utils::ExternalToLocal(external);
@@ -5866,42 +6002,42 @@ inline i::MaybeHandle<i::String> NewString(i::Factory* factory,
STATIC_ASSERT(v8::String::kMaxLength == i::String::kMaxLength);
-
-template <typename Char>
-inline MaybeLocal<String> NewString(Isolate* v8_isolate, const char* location,
- const char* env, const Char* data,
- v8::NewStringType type, int length) {
- i::Isolate* isolate = reinterpret_cast<internal::Isolate*>(v8_isolate);
- if (length == 0) return String::Empty(v8_isolate);
- // TODO(dcarney): throw a context free exception.
- if (length > i::String::kMaxLength) return MaybeLocal<String>();
- ENTER_V8(isolate);
- LOG_API(isolate, env);
- if (length < 0) length = StringLength(data);
- i::Handle<i::String> result =
- NewString(isolate->factory(), type, i::Vector<const Char>(data, length))
- .ToHandleChecked();
- return Utils::ToLocal(result);
-}
-
} // anonymous namespace
+// TODO(dcarney): throw a context free exception.
+#define NEW_STRING(isolate, class_name, function_name, Char, data, type, \
+ length) \
+ MaybeLocal<String> result; \
+ if (length == 0) { \
+ result = String::Empty(isolate); \
+ } else if (length > i::String::kMaxLength) { \
+ result = MaybeLocal<String>(); \
+ } else { \
+ i::Isolate* i_isolate = reinterpret_cast<internal::Isolate*>(isolate); \
+ ENTER_V8(i_isolate); \
+ LOG_API(i_isolate, class_name, function_name); \
+ if (length < 0) length = StringLength(data); \
+ i::Handle<i::String> handle_result = \
+ NewString(i_isolate->factory(), type, \
+ i::Vector<const Char>(data, length)) \
+ .ToHandleChecked(); \
+ result = Utils::ToLocal(handle_result); \
+ }
Local<String> String::NewFromUtf8(Isolate* isolate,
const char* data,
NewStringType type,
int length) {
- RETURN_TO_LOCAL_UNCHECKED(
- NewString(isolate, "v8::String::NewFromUtf8()", "String::NewFromUtf8",
- data, static_cast<v8::NewStringType>(type), length),
- String);
+ NEW_STRING(isolate, String, NewFromUtf8, char, data,
+ static_cast<v8::NewStringType>(type), length);
+ RETURN_TO_LOCAL_UNCHECKED(result, String);
}
MaybeLocal<String> String::NewFromUtf8(Isolate* isolate, const char* data,
v8::NewStringType type, int length) {
- return NewString(isolate, "v8::String::NewFromUtf8()", "String::NewFromUtf8",
- data, type, length);
+ NEW_STRING(isolate, String, NewFromUtf8, char, data, type, length);
+ return result;
}
@@ -5909,18 +6045,16 @@ Local<String> String::NewFromOneByte(Isolate* isolate,
const uint8_t* data,
NewStringType type,
int length) {
- RETURN_TO_LOCAL_UNCHECKED(
- NewString(isolate, "v8::String::NewFromOneByte()",
- "String::NewFromOneByte", data,
- static_cast<v8::NewStringType>(type), length),
- String);
+ NEW_STRING(isolate, String, NewFromOneByte, uint8_t, data,
+ static_cast<v8::NewStringType>(type), length);
+ RETURN_TO_LOCAL_UNCHECKED(result, String);
}
MaybeLocal<String> String::NewFromOneByte(Isolate* isolate, const uint8_t* data,
v8::NewStringType type, int length) {
- return NewString(isolate, "v8::String::NewFromOneByte()",
- "String::NewFromOneByte", data, type, length);
+ NEW_STRING(isolate, String, NewFromOneByte, uint8_t, data, type, length);
+ return result;
}
@@ -5928,19 +6062,17 @@ Local<String> String::NewFromTwoByte(Isolate* isolate,
const uint16_t* data,
NewStringType type,
int length) {
- RETURN_TO_LOCAL_UNCHECKED(
- NewString(isolate, "v8::String::NewFromTwoByte()",
- "String::NewFromTwoByte", data,
- static_cast<v8::NewStringType>(type), length),
- String);
+ NEW_STRING(isolate, String, NewFromTwoByte, uint16_t, data,
+ static_cast<v8::NewStringType>(type), length);
+ RETURN_TO_LOCAL_UNCHECKED(result, String);
}
MaybeLocal<String> String::NewFromTwoByte(Isolate* isolate,
const uint16_t* data,
v8::NewStringType type, int length) {
- return NewString(isolate, "v8::String::NewFromTwoByte()",
- "String::NewFromTwoByte", data, type, length);
+ NEW_STRING(isolate, String, NewFromTwoByte, uint16_t, data, type, length);
+ return result;
}
@@ -5948,7 +6080,7 @@ Local<String> v8::String::Concat(Local<String> left, Local<String> right) {
i::Handle<i::String> left_string = Utils::OpenHandle(*left);
i::Isolate* isolate = left_string->GetIsolate();
ENTER_V8(isolate);
- LOG_API(isolate, "v8::String::Concat");
+ LOG_API(isolate, String, Concat);
i::Handle<i::String> right_string = Utils::OpenHandle(*right);
// If we are steering towards a range error, do not wait for the error to be
// thrown, and return the null handle instead.
@@ -5970,7 +6102,7 @@ MaybeLocal<String> v8::String::NewExternalTwoByte(
}
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8(i_isolate);
- LOG_API(i_isolate, "String::NewExternalTwoByte");
+ LOG_API(i_isolate, String, NewExternalTwoByte);
i::Handle<i::String> string = i_isolate->factory()
->NewExternalStringFromTwoByte(resource)
.ToHandleChecked();
@@ -5994,7 +6126,7 @@ MaybeLocal<String> v8::String::NewExternalOneByte(
}
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8(i_isolate);
- LOG_API(i_isolate, "String::NewExternalOneByte");
+ LOG_API(i_isolate, String, NewExternalOneByte);
i::Handle<i::String> string = i_isolate->factory()
->NewExternalStringFromOneByte(resource)
.ToHandleChecked();
@@ -6058,14 +6190,11 @@ bool v8::String::MakeExternal(
bool v8::String::CanMakeExternal() {
i::Handle<i::String> obj = Utils::OpenHandle(this);
- i::Isolate* isolate = obj->GetIsolate();
+ if (obj->IsExternalString()) return false;
// Old space strings should be externalized.
- if (!isolate->heap()->new_space()->Contains(*obj)) return true;
- int size = obj->Size(); // Byte size of the original string.
- if (size <= i::ExternalString::kShortSize) return false;
- i::StringShape shape(*obj);
- return !shape.IsExternal();
+ i::Isolate* isolate = obj->GetIsolate();
+ return !isolate->heap()->new_space()->Contains(*obj);
}
@@ -6077,7 +6206,7 @@ Isolate* v8::Object::GetIsolate() {
Local<v8::Object> v8::Object::New(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, "Object::New");
+ LOG_API(i_isolate, Object, New);
ENTER_V8(i_isolate);
i::Handle<i::JSObject> obj =
i_isolate->factory()->NewJSObject(i_isolate->object_function());
@@ -6087,7 +6216,7 @@ Local<v8::Object> v8::Object::New(Isolate* isolate) {
Local<v8::Value> v8::NumberObject::New(Isolate* isolate, double value) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, "NumberObject::New");
+ LOG_API(i_isolate, NumberObject, New);
ENTER_V8(i_isolate);
i::Handle<i::Object> number = i_isolate->factory()->NewNumber(value);
i::Handle<i::Object> obj =
@@ -6100,14 +6229,14 @@ double v8::NumberObject::ValueOf() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
i::Isolate* isolate = jsvalue->GetIsolate();
- LOG_API(isolate, "NumberObject::NumberValue");
+ LOG_API(isolate, NumberObject, NumberValue);
return jsvalue->value()->Number();
}
Local<v8::Value> v8::BooleanObject::New(Isolate* isolate, bool value) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, "BooleanObject::New");
+ LOG_API(i_isolate, BooleanObject, New);
ENTER_V8(i_isolate);
i::Handle<i::Object> boolean(value ? i_isolate->heap()->true_value()
: i_isolate->heap()->false_value(),
@@ -6127,15 +6256,15 @@ bool v8::BooleanObject::ValueOf() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
i::Isolate* isolate = jsvalue->GetIsolate();
- LOG_API(isolate, "BooleanObject::BooleanValue");
- return jsvalue->value()->IsTrue();
+ LOG_API(isolate, BooleanObject, BooleanValue);
+ return jsvalue->value()->IsTrue(isolate);
}
Local<v8::Value> v8::StringObject::New(Local<String> value) {
i::Handle<i::String> string = Utils::OpenHandle(*value);
i::Isolate* isolate = string->GetIsolate();
- LOG_API(isolate, "StringObject::New");
+ LOG_API(isolate, StringObject, New);
ENTER_V8(isolate);
i::Handle<i::Object> obj =
i::Object::ToObject(isolate, string).ToHandleChecked();
@@ -6147,7 +6276,7 @@ Local<v8::String> v8::StringObject::ValueOf() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
i::Isolate* isolate = jsvalue->GetIsolate();
- LOG_API(isolate, "StringObject::StringValue");
+ LOG_API(isolate, StringObject, StringValue);
return Utils::ToLocal(
i::Handle<i::String>(i::String::cast(jsvalue->value())));
}
@@ -6155,7 +6284,7 @@ Local<v8::String> v8::StringObject::ValueOf() const {
Local<v8::Value> v8::SymbolObject::New(Isolate* isolate, Local<Symbol> value) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, "SymbolObject::New");
+ LOG_API(i_isolate, SymbolObject, New);
ENTER_V8(i_isolate);
i::Handle<i::Object> obj = i::Object::ToObject(
i_isolate, Utils::OpenHandle(*value)).ToHandleChecked();
@@ -6167,7 +6296,7 @@ Local<v8::Symbol> v8::SymbolObject::ValueOf() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
i::Isolate* isolate = jsvalue->GetIsolate();
- LOG_API(isolate, "SymbolObject::SymbolValue");
+ LOG_API(isolate, SymbolObject, SymbolValue);
return Utils::ToLocal(
i::Handle<i::Symbol>(i::Symbol::cast(jsvalue->value())));
}
@@ -6178,7 +6307,7 @@ MaybeLocal<v8::Value> v8::Date::New(Local<Context> context, double time) {
// Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
time = std::numeric_limits<double>::quiet_NaN();
}
- PREPARE_FOR_EXECUTION(context, "Date::New", Value);
+ PREPARE_FOR_EXECUTION(context, Date, New, Value);
Local<Value> result;
has_pending_exception = !ToLocal<Value>(
i::JSDate::New(isolate->date_function(), isolate->date_function(), time),
@@ -6198,14 +6327,14 @@ double v8::Date::ValueOf() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSDate> jsdate = i::Handle<i::JSDate>::cast(obj);
i::Isolate* isolate = jsdate->GetIsolate();
- LOG_API(isolate, "Date::NumberValue");
+ LOG_API(isolate, Date, NumberValue);
return jsdate->value()->Number();
}
void v8::Date::DateTimeConfigurationChangeNotification(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, "Date::DateTimeConfigurationChangeNotification");
+ LOG_API(i_isolate, Date, DateTimeConfigurationChangeNotification);
ENTER_V8(i_isolate);
i_isolate->date_cache()->ResetDateCache();
if (!i_isolate->eternal_handles()->Exists(
@@ -6225,7 +6354,7 @@ void v8::Date::DateTimeConfigurationChangeNotification(Isolate* isolate) {
MaybeLocal<v8::RegExp> v8::RegExp::New(Local<Context> context,
Local<String> pattern, Flags flags) {
- PREPARE_FOR_EXECUTION(context, "RegExp::New", RegExp);
+ PREPARE_FOR_EXECUTION(context, RegExp, New, RegExp);
Local<v8::RegExp> result;
has_pending_exception =
!ToLocal<RegExp>(i::JSRegExp::New(Utils::OpenHandle(*pattern),
@@ -6270,7 +6399,7 @@ v8::RegExp::Flags v8::RegExp::GetFlags() const {
Local<v8::Array> v8::Array::New(Isolate* isolate, int length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, "Array::New");
+ LOG_API(i_isolate, Array, New);
ENTER_V8(i_isolate);
int real_length = length > 0 ? length : 0;
i::Handle<i::JSArray> obj = i_isolate->factory()->NewJSArray(real_length);
@@ -6294,7 +6423,7 @@ uint32_t v8::Array::Length() const {
MaybeLocal<Object> Array::CloneElementAt(Local<Context> context,
uint32_t index) {
- PREPARE_FOR_EXECUTION(context, "v8::Array::CloneElementAt()", Object);
+ PREPARE_FOR_EXECUTION(context, Array, CloneElementAt, Object);
auto self = Utils::OpenHandle(this);
if (!self->HasFastObjectElements()) return Local<Object>();
i::FixedArray* elms = i::FixedArray::cast(self->elements());
@@ -6315,7 +6444,7 @@ Local<Object> Array::CloneElementAt(uint32_t index) { return Local<Object>(); }
Local<v8::Map> v8::Map::New(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, "Map::New");
+ LOG_API(i_isolate, Map, New);
ENTER_V8(i_isolate);
i::Handle<i::JSMap> obj = i_isolate->factory()->NewJSMap();
return Utils::ToLocal(obj);
@@ -6331,14 +6460,14 @@ size_t v8::Map::Size() const {
void Map::Clear() {
auto self = Utils::OpenHandle(this);
i::Isolate* isolate = self->GetIsolate();
- LOG_API(isolate, "Map::Clear");
+ LOG_API(isolate, Map, Clear);
ENTER_V8(isolate);
i::JSMap::Clear(self);
}
MaybeLocal<Value> Map::Get(Local<Context> context, Local<Value> key) {
- PREPARE_FOR_EXECUTION(context, "Map::Get", Value);
+ PREPARE_FOR_EXECUTION(context, Map, Get, Value);
auto self = Utils::OpenHandle(this);
Local<Value> result;
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
@@ -6353,7 +6482,7 @@ MaybeLocal<Value> Map::Get(Local<Context> context, Local<Value> key) {
MaybeLocal<Map> Map::Set(Local<Context> context, Local<Value> key,
Local<Value> value) {
- PREPARE_FOR_EXECUTION(context, "Map::Set", Map);
+ PREPARE_FOR_EXECUTION(context, Map, Set, Map);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> result;
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key),
@@ -6367,7 +6496,7 @@ MaybeLocal<Map> Map::Set(Local<Context> context, Local<Value> key,
Maybe<bool> Map::Has(Local<Context> context, Local<Value> key) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "Map::Has", bool);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Map, Has, bool);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> result;
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
@@ -6375,12 +6504,12 @@ Maybe<bool> Map::Has(Local<Context> context, Local<Value> key) {
arraysize(argv), argv)
.ToHandle(&result);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
- return Just(result->IsTrue());
+ return Just(result->IsTrue(isolate));
}
Maybe<bool> Map::Delete(Local<Context> context, Local<Value> key) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "Map::Delete", bool);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Map, Delete, bool);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> result;
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
@@ -6388,7 +6517,7 @@ Maybe<bool> Map::Delete(Local<Context> context, Local<Value> key) {
self, arraysize(argv), argv)
.ToHandle(&result);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
- return Just(result->IsTrue());
+ return Just(result->IsTrue(isolate));
}
@@ -6396,17 +6525,25 @@ Local<Array> Map::AsArray() const {
i::Handle<i::JSMap> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
i::Factory* factory = isolate->factory();
- LOG_API(isolate, "Map::AsArray");
+ LOG_API(isolate, Map, AsArray);
ENTER_V8(isolate);
i::Handle<i::OrderedHashMap> table(i::OrderedHashMap::cast(obj->table()));
- int size = table->NumberOfElements();
- int length = size * 2;
+ int length = table->NumberOfElements() * 2;
i::Handle<i::FixedArray> result = factory->NewFixedArray(length);
- for (int i = 0; i < size; ++i) {
- if (table->KeyAt(i)->IsTheHole()) continue;
- result->set(i * 2, table->KeyAt(i));
- result->set(i * 2 + 1, table->ValueAt(i));
+ int result_index = 0;
+ {
+ i::DisallowHeapAllocation no_gc;
+ int capacity = table->UsedCapacity();
+ i::Oddball* the_hole = isolate->heap()->the_hole_value();
+ for (int i = 0; i < capacity; ++i) {
+ i::Object* key = table->KeyAt(i);
+ if (key == the_hole) continue;
+ result->set(result_index++, key);
+ result->set(result_index++, table->ValueAt(i));
+ }
}
+ DCHECK_EQ(result_index, result->length());
+ DCHECK_EQ(result_index, length);
i::Handle<i::JSArray> result_array =
factory->NewJSArrayWithElements(result, i::FAST_ELEMENTS, length);
return Utils::ToLocal(result_array);
@@ -6415,7 +6552,7 @@ Local<Array> Map::AsArray() const {
Local<v8::Set> v8::Set::New(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, "Set::New");
+ LOG_API(i_isolate, Set, New);
ENTER_V8(i_isolate);
i::Handle<i::JSSet> obj = i_isolate->factory()->NewJSSet();
return Utils::ToLocal(obj);
@@ -6431,14 +6568,14 @@ size_t v8::Set::Size() const {
void Set::Clear() {
auto self = Utils::OpenHandle(this);
i::Isolate* isolate = self->GetIsolate();
- LOG_API(isolate, "Set::Clear");
+ LOG_API(isolate, Set, Clear);
ENTER_V8(isolate);
i::JSSet::Clear(self);
}
MaybeLocal<Set> Set::Add(Local<Context> context, Local<Value> key) {
- PREPARE_FOR_EXECUTION(context, "Set::Add", Set);
+ PREPARE_FOR_EXECUTION(context, Set, Add, Set);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> result;
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
@@ -6451,7 +6588,7 @@ MaybeLocal<Set> Set::Add(Local<Context> context, Local<Value> key) {
Maybe<bool> Set::Has(Local<Context> context, Local<Value> key) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "Set::Has", bool);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Set, Has, bool);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> result;
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
@@ -6459,12 +6596,12 @@ Maybe<bool> Set::Has(Local<Context> context, Local<Value> key) {
arraysize(argv), argv)
.ToHandle(&result);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
- return Just(result->IsTrue());
+ return Just(result->IsTrue(isolate));
}
Maybe<bool> Set::Delete(Local<Context> context, Local<Value> key) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "Set::Delete", bool);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Set, Delete, bool);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> result;
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
@@ -6472,7 +6609,7 @@ Maybe<bool> Set::Delete(Local<Context> context, Local<Value> key) {
self, arraysize(argv), argv)
.ToHandle(&result);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
- return Just(result->IsTrue());
+ return Just(result->IsTrue(isolate));
}
@@ -6480,17 +6617,24 @@ Local<Array> Set::AsArray() const {
i::Handle<i::JSSet> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
i::Factory* factory = isolate->factory();
- LOG_API(isolate, "Set::AsArray");
+ LOG_API(isolate, Set, AsArray);
ENTER_V8(isolate);
i::Handle<i::OrderedHashSet> table(i::OrderedHashSet::cast(obj->table()));
int length = table->NumberOfElements();
i::Handle<i::FixedArray> result = factory->NewFixedArray(length);
- for (int i = 0; i < length; ++i) {
- i::Object* key = table->KeyAt(i);
- if (!key->IsTheHole()) {
- result->set(i, key);
+ int result_index = 0;
+ {
+ i::DisallowHeapAllocation no_gc;
+ int capacity = table->UsedCapacity();
+ i::Oddball* the_hole = isolate->heap()->the_hole_value();
+ for (int i = 0; i < capacity; ++i) {
+ i::Object* key = table->KeyAt(i);
+ if (key == the_hole) continue;
+ result->set(result_index++, key);
}
}
+ DCHECK_EQ(result_index, result->length());
+ DCHECK_EQ(result_index, length);
i::Handle<i::JSArray> result_array =
factory->NewJSArrayWithElements(result, i::FAST_ELEMENTS, length);
return Utils::ToLocal(result_array);
@@ -6498,7 +6642,7 @@ Local<Array> Set::AsArray() const {
MaybeLocal<Promise::Resolver> Promise::Resolver::New(Local<Context> context) {
- PREPARE_FOR_EXECUTION(context, "Promise::Resolver::New", Resolver);
+ PREPARE_FOR_EXECUTION(context, Promise_Resolver, New, Resolver);
i::Handle<i::Object> result;
has_pending_exception =
!i::Execution::Call(isolate, isolate->promise_create(),
@@ -6523,7 +6667,7 @@ Local<Promise> Promise::Resolver::GetPromise() {
Maybe<bool> Promise::Resolver::Resolve(Local<Context> context,
Local<Value> value) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "Promise::Resolver::Resolve", bool);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Promise_Resolver, Resolve, bool);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> argv[] = {self, Utils::OpenHandle(*value)};
has_pending_exception =
@@ -6544,7 +6688,7 @@ void Promise::Resolver::Resolve(Local<Value> value) {
Maybe<bool> Promise::Resolver::Reject(Local<Context> context,
Local<Value> value) {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "Promise::Resolver::Resolve", bool);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Promise_Resolver, Resolve, bool);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> argv[] = {self, Utils::OpenHandle(*value)};
has_pending_exception =
@@ -6563,39 +6707,9 @@ void Promise::Resolver::Reject(Local<Value> value) {
}
-namespace {
-
-MaybeLocal<Promise> DoChain(Value* value, Local<Context> context,
- Local<Function> handler) {
- PREPARE_FOR_EXECUTION(context, "Promise::Chain", Promise);
- auto self = Utils::OpenHandle(value);
- i::Handle<i::Object> argv[] = {Utils::OpenHandle(*handler)};
- i::Handle<i::Object> result;
- has_pending_exception = !i::Execution::Call(isolate, isolate->promise_chain(),
- self, arraysize(argv), argv)
- .ToHandle(&result);
- RETURN_ON_FAILED_EXECUTION(Promise);
- RETURN_ESCAPED(Local<Promise>::Cast(Utils::ToLocal(result)));
-}
-
-} // namespace
-
-
-MaybeLocal<Promise> Promise::Chain(Local<Context> context,
- Local<Function> handler) {
- return DoChain(this, context, handler);
-}
-
-
-Local<Promise> Promise::Chain(Local<Function> handler) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- RETURN_TO_LOCAL_UNCHECKED(DoChain(this, context, handler), Promise);
-}
-
-
MaybeLocal<Promise> Promise::Catch(Local<Context> context,
Local<Function> handler) {
- PREPARE_FOR_EXECUTION(context, "Promise::Catch", Promise);
+ PREPARE_FOR_EXECUTION(context, Promise, Catch, Promise);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> argv[] = { Utils::OpenHandle(*handler) };
i::Handle<i::Object> result;
@@ -6615,7 +6729,7 @@ Local<Promise> Promise::Catch(Local<Function> handler) {
MaybeLocal<Promise> Promise::Then(Local<Context> context,
Local<Function> handler) {
- PREPARE_FOR_EXECUTION(context, "Promise::Then", Promise);
+ PREPARE_FOR_EXECUTION(context, Promise, Then, Promise);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> argv[] = { Utils::OpenHandle(*handler) };
i::Handle<i::Object> result;
@@ -6636,10 +6750,10 @@ Local<Promise> Promise::Then(Local<Function> handler) {
bool Promise::HasHandler() {
i::Handle<i::JSReceiver> promise = Utils::OpenHandle(this);
i::Isolate* isolate = promise->GetIsolate();
- LOG_API(isolate, "Promise::HasRejectHandler");
+ LOG_API(isolate, Promise, HasRejectHandler);
ENTER_V8(isolate);
i::Handle<i::Symbol> key = isolate->factory()->promise_has_handler_symbol();
- return i::JSReceiver::GetDataProperty(promise, key)->IsTrue();
+ return i::JSReceiver::GetDataProperty(promise, key)->IsTrue(isolate);
}
@@ -6671,7 +6785,7 @@ void Proxy::Revoke() {
MaybeLocal<Proxy> Proxy::New(Local<Context> context, Local<Object> local_target,
Local<Object> local_handler) {
- PREPARE_FOR_EXECUTION(context, "Proxy::New", Proxy);
+ PREPARE_FOR_EXECUTION(context, Proxy, New, Proxy);
i::Handle<i::JSReceiver> target = Utils::OpenHandle(*local_target);
i::Handle<i::JSReceiver> handler = Utils::OpenHandle(*local_handler);
Local<Proxy> result;
@@ -6681,6 +6795,40 @@ MaybeLocal<Proxy> Proxy::New(Local<Context> context, Local<Object> local_target,
RETURN_ESCAPED(result);
}
+WasmCompiledModule::SerializedModule WasmCompiledModule::Serialize() {
+ i::Handle<i::JSObject> obj =
+ i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
+ i::Handle<i::FixedArray> compiled_part =
+ i::handle(i::FixedArray::cast(obj->GetInternalField(0)));
+ std::unique_ptr<i::ScriptData> script_data =
+ i::WasmCompiledModuleSerializer::SerializeWasmModule(obj->GetIsolate(),
+ compiled_part);
+ script_data->ReleaseDataOwnership();
+ size_t size = static_cast<size_t>(script_data->length());
+ return {std::unique_ptr<const uint8_t[]>(script_data->data()), size};
+}
+
+MaybeLocal<WasmCompiledModule> WasmCompiledModule::Deserialize(
+ Isolate* isolate,
+ const WasmCompiledModule::SerializedModule& serialized_data) {
+ int size = static_cast<int>(serialized_data.second);
+ i::ScriptData sc(serialized_data.first.get(), size);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::MaybeHandle<i::FixedArray> maybe_compiled_part =
+ i::WasmCompiledModuleSerializer::DeserializeWasmModule(i_isolate, &sc);
+ i::Handle<i::FixedArray> compiled_part;
+ if (!maybe_compiled_part.ToHandle(&compiled_part)) {
+ return MaybeLocal<WasmCompiledModule>();
+ }
+ return Local<WasmCompiledModule>::Cast(Utils::ToLocal(
+ i::wasm::CreateCompiledModuleObject(i_isolate, compiled_part)));
+}
+
+// static
+v8::ArrayBuffer::Allocator* v8::ArrayBuffer::Allocator::NewDefaultAllocator() {
+ return new ArrayBufferAllocator();
+}
+
bool v8::ArrayBuffer::IsExternal() const {
return Utils::OpenHandle(this)->is_external();
}
@@ -6694,7 +6842,7 @@ bool v8::ArrayBuffer::IsNeuterable() const {
v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() {
i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
i::Isolate* isolate = self->GetIsolate();
- Utils::ApiCheck(!self->is_external(), "v8::ArrayBuffer::Externalize",
+ Utils::ApiCheck(!self->is_external(), "v8_ArrayBuffer_Externalize",
"ArrayBuffer already externalized");
self->set_is_external(true);
isolate->heap()->UnregisterArrayBuffer(*self);
@@ -6721,7 +6869,7 @@ void v8::ArrayBuffer::Neuter() {
"Only externalized ArrayBuffers can be neutered");
Utils::ApiCheck(obj->is_neuterable(), "v8::ArrayBuffer::Neuter",
"Only neuterable ArrayBuffers can be neutered");
- LOG_API(obj->GetIsolate(), "v8::ArrayBuffer::Neuter()");
+ LOG_API(isolate, ArrayBuffer, Neuter);
ENTER_V8(isolate);
obj->Neuter();
}
@@ -6735,7 +6883,7 @@ size_t v8::ArrayBuffer::ByteLength() const {
Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, size_t byte_length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, "v8::ArrayBuffer::New(size_t)");
+ LOG_API(i_isolate, ArrayBuffer, New);
ENTER_V8(i_isolate);
i::Handle<i::JSArrayBuffer> obj =
i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kNotShared);
@@ -6750,7 +6898,7 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, void* data,
// Embedders must guarantee that the external backing store is valid.
CHECK(byte_length == 0 || data != NULL);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, "v8::ArrayBuffer::New(void*, size_t)");
+ LOG_API(i_isolate, ArrayBuffer, New);
ENTER_V8(i_isolate);
i::Handle<i::JSArrayBuffer> obj =
i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kNotShared);
@@ -6778,10 +6926,9 @@ Local<ArrayBuffer> v8::ArrayBufferView::Buffer() {
size_t v8::ArrayBufferView::CopyContents(void* dest, size_t byte_length) {
i::Handle<i::JSArrayBufferView> self = Utils::OpenHandle(this);
- i::Isolate* isolate = self->GetIsolate();
- size_t byte_offset = i::NumberToSize(isolate, self->byte_offset());
+ size_t byte_offset = i::NumberToSize(self->byte_offset());
size_t bytes_to_copy =
- i::Min(byte_length, i::NumberToSize(isolate, self->byte_length()));
+ i::Min(byte_length, i::NumberToSize(self->byte_length()));
if (bytes_to_copy) {
i::DisallowHeapAllocation no_gc;
i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(self->buffer()));
@@ -6823,49 +6970,45 @@ size_t v8::TypedArray::Length() {
return static_cast<size_t>(obj->length_value());
}
-
-#define TYPED_ARRAY_NEW(Type, type, TYPE, ctype, size) \
- Local<Type##Array> Type##Array::New(Local<ArrayBuffer> array_buffer, \
- size_t byte_offset, size_t length) { \
- i::Isolate* isolate = Utils::OpenHandle(*array_buffer)->GetIsolate(); \
- LOG_API(isolate, \
- "v8::" #Type "Array::New(Local<ArrayBuffer>, size_t, size_t)"); \
- ENTER_V8(isolate); \
- if (!Utils::ApiCheck(length <= static_cast<size_t>(i::Smi::kMaxValue), \
- "v8::" #Type \
- "Array::New(Local<ArrayBuffer>, size_t, size_t)", \
- "length exceeds max allowed value")) { \
- return Local<Type##Array>(); \
- } \
- i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer); \
- i::Handle<i::JSTypedArray> obj = isolate->factory()->NewJSTypedArray( \
- i::kExternal##Type##Array, buffer, byte_offset, length); \
- return Utils::ToLocal##Type##Array(obj); \
- } \
- Local<Type##Array> Type##Array::New( \
- Local<SharedArrayBuffer> shared_array_buffer, size_t byte_offset, \
- size_t length) { \
- CHECK(i::FLAG_harmony_sharedarraybuffer); \
- i::Isolate* isolate = \
- Utils::OpenHandle(*shared_array_buffer)->GetIsolate(); \
- LOG_API(isolate, "v8::" #Type \
- "Array::New(Local<SharedArrayBuffer>, size_t, size_t)"); \
- ENTER_V8(isolate); \
- if (!Utils::ApiCheck( \
- length <= static_cast<size_t>(i::Smi::kMaxValue), \
- "v8::" #Type \
- "Array::New(Local<SharedArrayBuffer>, size_t, size_t)", \
- "length exceeds max allowed value")) { \
- return Local<Type##Array>(); \
- } \
- i::Handle<i::JSArrayBuffer> buffer = \
- Utils::OpenHandle(*shared_array_buffer); \
- i::Handle<i::JSTypedArray> obj = isolate->factory()->NewJSTypedArray( \
- i::kExternal##Type##Array, buffer, byte_offset, length); \
- return Utils::ToLocal##Type##Array(obj); \
+#define TYPED_ARRAY_NEW(Type, type, TYPE, ctype, size) \
+ Local<Type##Array> Type##Array::New(Local<ArrayBuffer> array_buffer, \
+ size_t byte_offset, size_t length) { \
+ i::Isolate* isolate = Utils::OpenHandle(*array_buffer)->GetIsolate(); \
+ LOG_API(isolate, Type##Array, New); \
+ ENTER_V8(isolate); \
+ if (!Utils::ApiCheck(length <= static_cast<size_t>(i::Smi::kMaxValue), \
+ "v8::" #Type \
+ "Array::New(Local<ArrayBuffer>, size_t, size_t)", \
+ "length exceeds max allowed value")) { \
+ return Local<Type##Array>(); \
+ } \
+ i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer); \
+ i::Handle<i::JSTypedArray> obj = isolate->factory()->NewJSTypedArray( \
+ i::kExternal##Type##Array, buffer, byte_offset, length); \
+ return Utils::ToLocal##Type##Array(obj); \
+ } \
+ Local<Type##Array> Type##Array::New( \
+ Local<SharedArrayBuffer> shared_array_buffer, size_t byte_offset, \
+ size_t length) { \
+ CHECK(i::FLAG_harmony_sharedarraybuffer); \
+ i::Isolate* isolate = \
+ Utils::OpenHandle(*shared_array_buffer)->GetIsolate(); \
+ LOG_API(isolate, Type##Array, New); \
+ ENTER_V8(isolate); \
+ if (!Utils::ApiCheck( \
+ length <= static_cast<size_t>(i::Smi::kMaxValue), \
+ "v8::" #Type \
+ "Array::New(Local<SharedArrayBuffer>, size_t, size_t)", \
+ "length exceeds max allowed value")) { \
+ return Local<Type##Array>(); \
+ } \
+ i::Handle<i::JSArrayBuffer> buffer = \
+ Utils::OpenHandle(*shared_array_buffer); \
+ i::Handle<i::JSTypedArray> obj = isolate->factory()->NewJSTypedArray( \
+ i::kExternal##Type##Array, buffer, byte_offset, length); \
+ return Utils::ToLocal##Type##Array(obj); \
}
-
TYPED_ARRAYS(TYPED_ARRAY_NEW)
#undef TYPED_ARRAY_NEW
@@ -6873,7 +7016,7 @@ Local<DataView> DataView::New(Local<ArrayBuffer> array_buffer,
size_t byte_offset, size_t byte_length) {
i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer);
i::Isolate* isolate = buffer->GetIsolate();
- LOG_API(isolate, "v8::DataView::New(Local<ArrayBuffer>, size_t, size_t)");
+ LOG_API(isolate, DataView, New);
ENTER_V8(isolate);
i::Handle<i::JSDataView> obj =
isolate->factory()->NewJSDataView(buffer, byte_offset, byte_length);
@@ -6886,8 +7029,7 @@ Local<DataView> DataView::New(Local<SharedArrayBuffer> shared_array_buffer,
CHECK(i::FLAG_harmony_sharedarraybuffer);
i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*shared_array_buffer);
i::Isolate* isolate = buffer->GetIsolate();
- LOG_API(isolate,
- "v8::DataView::New(Local<SharedArrayBuffer>, size_t, size_t)");
+ LOG_API(isolate, DataView, New);
ENTER_V8(isolate);
i::Handle<i::JSDataView> obj =
isolate->factory()->NewJSDataView(buffer, byte_offset, byte_length);
@@ -6903,7 +7045,7 @@ bool v8::SharedArrayBuffer::IsExternal() const {
v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::Externalize() {
i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
i::Isolate* isolate = self->GetIsolate();
- Utils::ApiCheck(!self->is_external(), "v8::SharedArrayBuffer::Externalize",
+ Utils::ApiCheck(!self->is_external(), "v8_SharedArrayBuffer_Externalize",
"SharedArrayBuffer already externalized");
self->set_is_external(true);
isolate->heap()->UnregisterArrayBuffer(*self);
@@ -6931,7 +7073,7 @@ Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(Isolate* isolate,
size_t byte_length) {
CHECK(i::FLAG_harmony_sharedarraybuffer);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, "v8::SharedArrayBuffer::New(size_t)");
+ LOG_API(i_isolate, SharedArrayBuffer, New);
ENTER_V8(i_isolate);
i::Handle<i::JSArrayBuffer> obj =
i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kShared);
@@ -6948,7 +7090,7 @@ Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(
// Embedders must guarantee that the external backing store is valid.
CHECK(byte_length == 0 || data != NULL);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, "v8::SharedArrayBuffer::New(void*, size_t)");
+ LOG_API(i_isolate, SharedArrayBuffer, New);
ENTER_V8(i_isolate);
i::Handle<i::JSArrayBuffer> obj =
i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kShared);
@@ -6961,7 +7103,7 @@ Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(
Local<Symbol> v8::Symbol::New(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, "Symbol::New()");
+ LOG_API(i_isolate, Symbol, New);
ENTER_V8(i_isolate);
i::Handle<i::Symbol> result = i_isolate->factory()->NewSymbol();
if (!name.IsEmpty()) result->set_name(*Utils::OpenHandle(*name));
@@ -6980,7 +7122,7 @@ static i::Handle<i::Symbol> SymbolFor(i::Isolate* isolate,
i::Handle<i::Object> symbol =
i::Object::GetPropertyOrElement(symbols, name).ToHandleChecked();
if (!symbol->IsSymbol()) {
- DCHECK(symbol->IsUndefined());
+ DCHECK(symbol->IsUndefined(isolate));
if (private_symbol)
symbol = isolate->factory()->NewPrivateSymbol();
else
@@ -7034,7 +7176,7 @@ Local<Symbol> v8::Symbol::GetIsConcatSpreadable(Isolate* isolate) {
Local<Private> v8::Private::New(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, "Private::New()");
+ LOG_API(i_isolate, Private, New);
ENTER_V8(i_isolate);
i::Handle<i::Symbol> symbol = i_isolate->factory()->NewPrivateSymbol();
if (!name.IsEmpty()) symbol->set_name(*Utils::OpenHandle(*name));
@@ -7227,23 +7369,6 @@ void Isolate::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
isolate->heap()->SetEmbedderHeapTracer(tracer);
}
-void Isolate::AddMemoryAllocationCallback(MemoryAllocationCallback callback,
- ObjectSpace space,
- AllocationAction action) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- isolate->memory_allocator()->AddMemoryAllocationCallback(
- callback, space, action);
-}
-
-
-void Isolate::RemoveMemoryAllocationCallback(
- MemoryAllocationCallback callback) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- isolate->memory_allocator()->RemoveMemoryAllocationCallback(
- callback);
-}
-
-
void Isolate::TerminateExecution() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->stack_guard()->RequestTerminateExecution();
@@ -7326,18 +7451,12 @@ Isolate* Isolate::New(const Isolate::CreateParams& params) {
v8_isolate->SetAddHistogramSampleFunction(
params.add_histogram_sample_callback);
}
+
+ isolate->set_api_external_references(params.external_references);
SetResourceConstraints(isolate, params.constraints);
// TODO(jochen): Once we got rid of Isolate::Current(), we can remove this.
Isolate::Scope isolate_scope(v8_isolate);
if (params.entry_hook || !i::Snapshot::Initialize(isolate)) {
- // If the isolate has a function entry hook, it needs to re-build all its
- // code stubs with entry hooks embedded, so don't deserialize a snapshot.
- if (i::Snapshot::EmbedsScript(isolate)) {
- // If the snapshot embeds a script, we cannot initialize the isolate
- // without the snapshot as a fallback. This is unlikely to happen though.
- V8_Fatal(__FILE__, __LINE__,
- "Initializing isolate from custom startup snapshot failed");
- }
isolate->Init(NULL);
}
return v8_isolate;
@@ -7445,6 +7564,10 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
heap_statistics->total_available_size_ = heap->Available();
heap_statistics->used_heap_size_ = heap->SizeOfObjects();
heap_statistics->heap_size_limit_ = heap->MaxReserved();
+ heap_statistics->malloced_memory_ =
+ isolate->allocator()->GetCurrentMemoryUsage();
+ heap_statistics->peak_malloced_memory_ =
+ isolate->allocator()->GetMaxMemoryUsage();
heap_statistics->does_zap_garbage_ = heap->ShouldZapGarbage();
}
@@ -7507,14 +7630,37 @@ bool Isolate::GetHeapObjectStatisticsAtLastGC(
return true;
}
+bool Isolate::GetHeapCodeAndMetadataStatistics(
+ HeapCodeStatistics* code_statistics) {
+ if (!code_statistics) return false;
+
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->heap()->CollectCodeStatistics();
+
+ code_statistics->code_and_metadata_size_ = isolate->code_and_metadata_size();
+ code_statistics->bytecode_and_metadata_size_ =
+ isolate->bytecode_and_metadata_size();
+ return true;
+}
void Isolate::GetStackSample(const RegisterState& state, void** frames,
size_t frames_limit, SampleInfo* sample_info) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- i::TickSample::GetStackSample(isolate, state, i::TickSample::kSkipCEntryFrame,
- frames, frames_limit, sample_info);
+ RegisterState regs = state;
+ if (TickSample::GetStackSample(this, &regs, TickSample::kSkipCEntryFrame,
+ frames, frames_limit, sample_info)) {
+ return;
+ }
+ sample_info->frames_count = 0;
+ sample_info->vm_state = OTHER;
+ sample_info->external_callback_entry = nullptr;
}
+size_t Isolate::NumberOfPhantomHandleResetsSinceLastCall() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ size_t result = isolate->global_handles()->NumberOfPhantomHandleResets();
+ isolate->global_handles()->ResetNumberOfPhantomHandleResets();
+ return result;
+}
void Isolate::SetEventLogger(LogEventCallback that) {
// Do not overwrite the event logger if we want to log explicitly.
@@ -7700,13 +7846,13 @@ int Isolate::ContextDisposedNotification(bool dependant_context) {
void Isolate::IsolateInForegroundNotification() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- return isolate->heap()->SetOptimizeForLatency();
+ return isolate->IsolateInForegroundNotification();
}
void Isolate::IsolateInBackgroundNotification() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- return isolate->heap()->SetOptimizeForMemoryUsage();
+ return isolate->IsolateInBackgroundNotification();
}
void Isolate::MemoryPressureNotification(MemoryPressureLevel level) {
@@ -7715,6 +7861,11 @@ void Isolate::MemoryPressureNotification(MemoryPressureLevel level) {
Locker::IsLocked(this));
}
+void Isolate::SetRAILMode(RAILMode rail_mode) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ return isolate->SetRAILMode(rail_mode);
+}
+
void Isolate::SetJitCodeEventHandler(JitCodeEventOptions options,
JitCodeEventHandler event_handler) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -7733,9 +7884,10 @@ void Isolate::SetStackLimit(uintptr_t stack_limit) {
void Isolate::GetCodeRange(void** start, size_t* length_in_bytes) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- if (isolate->code_range()->valid()) {
- *start = isolate->code_range()->start();
- *length_in_bytes = isolate->code_range()->size();
+ if (isolate->heap()->memory_allocator()->code_range()->valid()) {
+ *start = isolate->heap()->memory_allocator()->code_range()->start();
+ *length_in_bytes =
+ isolate->heap()->memory_allocator()->code_range()->size();
} else {
*start = NULL;
*length_in_bytes = 0;
@@ -7748,6 +7900,10 @@ void Isolate::SetFatalErrorHandler(FatalErrorCallback that) {
isolate->set_exception_behavior(that);
}
+void Isolate::SetOOMErrorHandler(OOMErrorCallback that) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->set_oom_behavior(that);
+}
void Isolate::SetAllowCodeGenerationFromStringsCallback(
AllowCodeGenerationFromStringsCallback callback) {
@@ -7766,12 +7922,15 @@ bool Isolate::AddMessageListener(MessageCallback that, Local<Value> data) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- NeanderArray listeners(isolate->factory()->message_listeners());
- NeanderObject obj(isolate, 2);
- obj.set(0, *isolate->factory()->NewForeign(FUNCTION_ADDR(that)));
- obj.set(1, data.IsEmpty() ? isolate->heap()->undefined_value()
- : *Utils::OpenHandle(*data));
- listeners.add(isolate, obj.value());
+ i::Handle<i::TemplateList> list = isolate->factory()->message_listeners();
+ i::Handle<i::FixedArray> listener = isolate->factory()->NewFixedArray(2);
+ i::Handle<i::Foreign> foreign =
+ isolate->factory()->NewForeign(FUNCTION_ADDR(that));
+ listener->set(0, *foreign);
+ listener->set(1, data.IsEmpty() ? isolate->heap()->undefined_value()
+ : *Utils::OpenHandle(*data));
+ list = i::TemplateList::Add(isolate, list, listener);
+ isolate->heap()->SetMessageListeners(*list);
return true;
}
@@ -7780,14 +7939,14 @@ void Isolate::RemoveMessageListeners(MessageCallback that) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- NeanderArray listeners(isolate->factory()->message_listeners());
- for (int i = 0; i < listeners.length(); i++) {
- if (listeners.get(i)->IsUndefined()) continue; // skip deleted ones
-
- NeanderObject listener(i::JSObject::cast(listeners.get(i)));
- i::Handle<i::Foreign> callback_obj(i::Foreign::cast(listener.get(0)));
+ i::DisallowHeapAllocation no_gc;
+ i::TemplateList* listeners = isolate->heap()->message_listeners();
+ for (int i = 0; i < listeners->length(); i++) {
+ if (listeners->get(i)->IsUndefined(isolate)) continue; // skip deleted ones
+ i::FixedArray* listener = i::FixedArray::cast(listeners->get(i));
+ i::Foreign* callback_obj = i::Foreign::cast(listener->get(0));
if (callback_obj->foreign_address() == FUNCTION_ADDR(that)) {
- listeners.set(i, isolate->heap()->undefined_value());
+ listeners->set(i, isolate->heap()->undefined_value());
}
}
}
@@ -7814,6 +7973,12 @@ void Isolate::VisitExternalResources(ExternalResourceVisitor* visitor) {
}
+bool Isolate::IsInUse() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ return isolate->IsInUse();
+}
+
+
class VisitorAdapter : public i::ObjectVisitor {
public:
explicit VisitorAdapter(PersistentHandleVisitor* visitor)
@@ -7821,6 +7986,7 @@ class VisitorAdapter : public i::ObjectVisitor {
void VisitPointers(i::Object** start, i::Object** end) override {
UNREACHABLE();
}
+ DISABLE_CFI_PERF
void VisitEmbedderReference(i::Object** p, uint16_t class_id) override {
Value* value = ToApi<Value>(i::Handle<i::Object>(p));
visitor_->VisitPersistentHandle(
@@ -7901,6 +8067,10 @@ int MicrotasksScope::GetCurrentDepth(Isolate* v8Isolate) {
return isolate->handle_scope_implementer()->GetMicrotasksScopeDepth();
}
+bool MicrotasksScope::IsRunningMicrotasks(Isolate* v8Isolate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8Isolate);
+ return isolate->IsRunningMicrotasks();
+}
String::Utf8Value::Utf8Value(v8::Local<v8::Value> obj)
: str_(NULL), length_(0) {
@@ -7945,11 +8115,10 @@ String::Value::~Value() {
i::DeleteArray(str_);
}
-
#define DEFINE_ERROR(NAME, name) \
Local<Value> Exception::NAME(v8::Local<v8::String> raw_message) { \
i::Isolate* isolate = i::Isolate::Current(); \
- LOG_API(isolate, #NAME); \
+ LOG_API(isolate, NAME, New); \
ENTER_V8(isolate); \
i::Object* error; \
{ \
@@ -8016,12 +8185,6 @@ bool Debug::SetDebugEventListener(Isolate* isolate, EventCallback that,
}
-bool Debug::SetDebugEventListener(EventCallback that, Local<Value> data) {
- return SetDebugEventListener(
- reinterpret_cast<Isolate*>(i::Isolate::Current()), that, data);
-}
-
-
void Debug::DebugBreak(Isolate* isolate) {
reinterpret_cast<i::Isolate*>(isolate)->stack_guard()->RequestDebugBreak();
}
@@ -8047,11 +8210,6 @@ void Debug::SetMessageHandler(Isolate* isolate,
}
-void Debug::SetMessageHandler(v8::Debug::MessageHandler handler) {
- SetMessageHandler(reinterpret_cast<Isolate*>(i::Isolate::Current()), handler);
-}
-
-
void Debug::SendCommand(Isolate* isolate,
const uint16_t* command,
int length,
@@ -8065,7 +8223,7 @@ void Debug::SendCommand(Isolate* isolate,
MaybeLocal<Value> Debug::Call(Local<Context> context,
v8::Local<v8::Function> fun,
v8::Local<v8::Value> data) {
- PREPARE_FOR_EXECUTION(context, "v8::Debug::Call()", Value);
+ PREPARE_FOR_EXECUTION(context, Debug, Call, Value);
i::Handle<i::Object> data_obj;
if (data.IsEmpty()) {
data_obj = isolate->factory()->undefined_value();
@@ -8081,16 +8239,9 @@ MaybeLocal<Value> Debug::Call(Local<Context> context,
}
-Local<Value> Debug::Call(v8::Local<v8::Function> fun,
- v8::Local<v8::Value> data) {
- auto context = ContextFromHeapObject(Utils::OpenHandle(*fun));
- RETURN_TO_LOCAL_UNCHECKED(Call(context, fun, data), Value);
-}
-
-
MaybeLocal<Value> Debug::GetMirror(Local<Context> context,
v8::Local<v8::Value> obj) {
- PREPARE_FOR_EXECUTION(context, "v8::Debug::GetMirror()", Value);
+ PREPARE_FOR_EXECUTION(context, Debug, GetMirror, Value);
i::Debug* isolate_debug = isolate->debug();
has_pending_exception = !isolate_debug->Load();
RETURN_ON_FAILED_EXECUTION(Value);
@@ -8109,21 +8260,11 @@ MaybeLocal<Value> Debug::GetMirror(Local<Context> context,
}
-Local<Value> Debug::GetMirror(v8::Local<v8::Value> obj) {
- RETURN_TO_LOCAL_UNCHECKED(GetMirror(Local<Context>(), obj), Value);
-}
-
-
void Debug::ProcessDebugMessages(Isolate* isolate) {
reinterpret_cast<i::Isolate*>(isolate)->debug()->ProcessDebugMessages(true);
}
-void Debug::ProcessDebugMessages() {
- ProcessDebugMessages(reinterpret_cast<Isolate*>(i::Isolate::Current()));
-}
-
-
Local<Context> Debug::GetDebugContext(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8(i_isolate);
@@ -8131,11 +8272,15 @@ Local<Context> Debug::GetDebugContext(Isolate* isolate) {
}
-Local<Context> Debug::GetDebugContext() {
- return GetDebugContext(reinterpret_cast<Isolate*>(i::Isolate::Current()));
+MaybeLocal<Context> Debug::GetDebuggedContext(Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ENTER_V8(i_isolate);
+ if (!i_isolate->debug()->in_debug_scope()) return MaybeLocal<Context>();
+ i::Handle<i::Object> calling = i_isolate->GetCallingNativeContext();
+ if (calling.is_null()) return MaybeLocal<Context>();
+ return Utils::ToLocal(i::Handle<i::Context>::cast(calling));
}
-
void Debug::SetLiveEditEnabled(Isolate* isolate, bool enable) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
internal_isolate->debug()->set_live_edit_enabled(enable);
@@ -8261,9 +8406,8 @@ const std::vector<CpuProfileDeoptInfo>& CpuProfileNode::GetDeoptInfos() const {
void CpuProfile::Delete() {
i::CpuProfile* profile = reinterpret_cast<i::CpuProfile*>(this);
- i::Isolate* isolate = profile->top_down()->isolate();
- i::CpuProfiler* profiler = isolate->cpu_profiler();
- DCHECK(profiler != NULL);
+ i::CpuProfiler* profiler = profile->cpu_profiler();
+ DCHECK(profiler != nullptr);
profiler->DeleteProfile(profile);
}
@@ -8311,6 +8455,12 @@ int CpuProfile::GetSamplesCount() const {
return reinterpret_cast<const i::CpuProfile*>(this)->samples_count();
}
+CpuProfiler* CpuProfiler::New(Isolate* isolate) {
+ return reinterpret_cast<CpuProfiler*>(
+ new i::CpuProfiler(reinterpret_cast<i::Isolate*>(isolate)));
+}
+
+void CpuProfiler::Dispose() { delete reinterpret_cast<i::CpuProfiler*>(this); }
void CpuProfiler::SetSamplingInterval(int us) {
DCHECK_GE(us, 0);
@@ -8336,7 +8486,9 @@ CpuProfile* CpuProfiler::StopProfiling(Local<String> title) {
void CpuProfiler::SetIdle(bool is_idle) {
- i::Isolate* isolate = reinterpret_cast<i::CpuProfiler*>(this)->isolate();
+ i::CpuProfiler* profiler = reinterpret_cast<i::CpuProfiler*>(this);
+ i::Isolate* isolate = profiler->isolate();
+ if (!isolate->is_profiling()) return;
v8::StateTag state = isolate->current_vm_state();
DCHECK(state == v8::EXTERNAL || state == v8::IDLE);
if (isolate->js_entry_sp() != NULL) return;
@@ -8548,11 +8700,11 @@ SnapshotObjectId HeapProfiler::GetHeapStats(OutputStream* stream,
return heap_profiler->PushHeapObjectsStats(stream, timestamp_us);
}
-
bool HeapProfiler::StartSamplingHeapProfiler(uint64_t sample_interval,
- int stack_depth) {
- return reinterpret_cast<i::HeapProfiler*>(this)
- ->StartSamplingHeapProfiler(sample_interval, stack_depth);
+ int stack_depth,
+ SamplingFlags flags) {
+ return reinterpret_cast<i::HeapProfiler*>(this)->StartSamplingHeapProfiler(
+ sample_interval, stack_depth, flags);
}
@@ -8724,6 +8876,10 @@ void HandleScopeImplementer::IterateThis(ObjectVisitor* v) {
Object** start = reinterpret_cast<Object**>(&context_lists[i]->first());
v->VisitPointers(start, start + context_lists[i]->length());
}
+ if (microtask_context_) {
+ Object** start = reinterpret_cast<Object**>(&microtask_context_);
+ v->VisitPointers(start, start + 1);
+ }
}
@@ -8808,6 +8964,11 @@ void InvokeAccessorGetterCallback(
v8::AccessorNameGetterCallback getter) {
// Leaving JavaScript.
Isolate* isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
+ RuntimeCallTimerScope timer(isolate,
+ &RuntimeCallStats::AccessorGetterCallback);
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
+ isolate,
+ &internal::tracing::TraceEventStatsTable::AccessorGetterCallback);
Address getter_address = reinterpret_cast<Address>(reinterpret_cast<intptr_t>(
getter));
VMState<EXTERNAL> state(isolate);
@@ -8819,6 +8980,11 @@ void InvokeAccessorGetterCallback(
void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
v8::FunctionCallback callback) {
Isolate* isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
+ RuntimeCallTimerScope timer(isolate,
+ &RuntimeCallStats::InvokeFunctionCallback);
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
+ isolate,
+ &internal::tracing::TraceEventStatsTable::InvokeFunctionCallback);
Address callback_address =
reinterpret_cast<Address>(reinterpret_cast<intptr_t>(callback));
VMState<EXTERNAL> state(isolate);
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index cb2b5c386c..26e47ccb52 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -26,72 +26,6 @@ class Consts {
};
};
-
-// Utilities for working with neander-objects, primitive
-// env-independent JSObjects used by the api.
-class NeanderObject {
- public:
- explicit NeanderObject(v8::internal::Isolate* isolate, int size);
- explicit inline NeanderObject(v8::internal::Handle<v8::internal::Object> obj);
- explicit inline NeanderObject(v8::internal::Object* obj);
- inline v8::internal::Object* get(int index);
- inline void set(int index, v8::internal::Object* value);
- inline v8::internal::Handle<v8::internal::JSObject> value() { return value_; }
- int size();
- private:
- v8::internal::Handle<v8::internal::JSObject> value_;
-};
-
-
-// Utilities for working with neander-arrays, a simple extensible
-// array abstraction built on neander-objects.
-class NeanderArray {
- public:
- explicit NeanderArray(v8::internal::Isolate* isolate);
- explicit inline NeanderArray(v8::internal::Handle<v8::internal::Object> obj);
- inline v8::internal::Handle<v8::internal::JSObject> value() {
- return obj_.value();
- }
-
- void add(internal::Isolate* isolate,
- v8::internal::Handle<v8::internal::Object> value);
-
- int length();
-
- v8::internal::Object* get(int index);
- // Change the value at an index to undefined value. If the index is
- // out of bounds, the request is ignored. Returns the old value.
- void set(int index, v8::internal::Object* value);
- private:
- NeanderObject obj_;
-};
-
-
-NeanderObject::NeanderObject(v8::internal::Handle<v8::internal::Object> obj)
- : value_(v8::internal::Handle<v8::internal::JSObject>::cast(obj)) { }
-
-
-NeanderObject::NeanderObject(v8::internal::Object* obj)
- : value_(v8::internal::Handle<v8::internal::JSObject>(
- v8::internal::JSObject::cast(obj))) { }
-
-
-NeanderArray::NeanderArray(v8::internal::Handle<v8::internal::Object> obj)
- : obj_(obj) { }
-
-
-v8::internal::Object* NeanderObject::get(int offset) {
- DCHECK(value()->HasFastObjectElements());
- return v8::internal::FixedArray::cast(value()->elements())->get(offset);
-}
-
-
-void NeanderObject::set(int offset, v8::internal::Object* value) {
- DCHECK(value_->HasFastObjectElements());
- v8::internal::FixedArray::cast(value_->elements())->set(offset, value);
-}
-
-
template <typename T> inline T ToCData(v8::internal::Object* obj) {
STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
if (obj == v8::internal::Smi::FromInt(0)) return nullptr;
@@ -184,9 +118,7 @@ class Utils {
if (!condition) Utils::ReportApiFailure(location, message);
return condition;
}
-
- static Local<FunctionTemplate> ToFunctionTemplate(NeanderObject obj);
- static Local<ObjectTemplate> ToObjectTemplate(NeanderObject obj);
+ static void ReportOOMFailure(const char* location, bool is_heap_oom);
static inline Local<Context> ToLocal(
v8::internal::Handle<v8::internal::Context> obj);
@@ -281,7 +213,9 @@ OPEN_HANDLE_LIST(DECLARE_OPEN_HANDLE)
template<class From, class To>
static inline Local<To> Convert(v8::internal::Handle<From> obj) {
- DCHECK(obj.is_null() || !obj->IsTheHole());
+ DCHECK(obj.is_null() ||
+ (obj->IsSmi() ||
+ !obj->IsTheHole(i::HeapObject::cast(*obj)->GetIsolate())));
return Local<To>(reinterpret_cast<To*>(obj.location()));
}
@@ -450,6 +384,7 @@ class HandleScopeImplementer {
blocks_(0),
entered_contexts_(0),
saved_contexts_(0),
+ microtask_context_(nullptr),
spare_(NULL),
call_depth_(0),
microtasks_depth_(0),
@@ -516,6 +451,10 @@ class HandleScopeImplementer {
// contexts have been entered.
inline Handle<Context> LastEnteredContext();
+ inline void EnterMicrotaskContext(Handle<Context> context);
+ inline void LeaveMicrotaskContext();
+ inline Handle<Context> MicrotaskContext();
+
inline void SaveContext(Context* context);
inline Context* RestoreContext();
inline bool HasSavedContexts();
@@ -534,6 +473,7 @@ class HandleScopeImplementer {
blocks_.Initialize(0);
entered_contexts_.Initialize(0);
saved_contexts_.Initialize(0);
+ microtask_context_ = nullptr;
spare_ = NULL;
last_handle_before_deferred_block_ = NULL;
call_depth_ = 0;
@@ -543,6 +483,7 @@ class HandleScopeImplementer {
DCHECK(blocks_.length() == 0);
DCHECK(entered_contexts_.length() == 0);
DCHECK(saved_contexts_.length() == 0);
+ DCHECK(!microtask_context_);
blocks_.Free();
entered_contexts_.Free();
saved_contexts_.Free();
@@ -562,6 +503,7 @@ class HandleScopeImplementer {
List<Context*> entered_contexts_;
// Used as a stack to keep track of saved contexts.
List<Context*> saved_contexts_;
+ Context* microtask_context_;
Object** spare_;
int call_depth_;
int microtasks_depth_;
@@ -634,6 +576,20 @@ Handle<Context> HandleScopeImplementer::LastEnteredContext() {
return Handle<Context>(entered_contexts_.last());
}
+void HandleScopeImplementer::EnterMicrotaskContext(Handle<Context> context) {
+ DCHECK(!microtask_context_);
+ microtask_context_ = *context;
+}
+
+void HandleScopeImplementer::LeaveMicrotaskContext() {
+ DCHECK(microtask_context_);
+ microtask_context_ = nullptr;
+}
+
+Handle<Context> HandleScopeImplementer::MicrotaskContext() {
+ if (microtask_context_) return Handle<Context>(microtask_context_);
+ return Handle<Context>::null();
+}
// If there's a spare block, use it for growing the current scope.
internal::Object** HandleScopeImplementer::GetSpareOrNewBlock() {
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h
index 02090f9fe3..9c629ce936 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/arguments.h
@@ -79,22 +79,32 @@ double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
#define CLOBBER_DOUBLE_REGISTERS()
#endif
-#define RUNTIME_FUNCTION_RETURNS_TYPE(Type, Name) \
- static INLINE(Type __RT_impl_##Name(Arguments args, Isolate* isolate)); \
- Type Name(int args_length, Object** args_object, Isolate* isolate) { \
- CLOBBER_DOUBLE_REGISTERS(); \
- Type value; \
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"), "V8." #Name); \
- Arguments args(args_length, args_object); \
- if (FLAG_runtime_call_stats) { \
- RuntimeCallStats* stats = isolate->counters()->runtime_call_stats(); \
- RuntimeCallTimerScope timer(isolate, &stats->Name); \
- value = __RT_impl_##Name(args, isolate); \
- } else { \
- value = __RT_impl_##Name(args, isolate); \
- } \
- return value; \
- } \
+// TODO(cbruni): add global flag to check whether any tracing events have been
+// enabled.
+// TODO(cbruni): Convert the IsContext CHECK back to a DCHECK.
+#define RUNTIME_FUNCTION_RETURNS_TYPE(Type, Name) \
+ static INLINE(Type __RT_impl_##Name(Arguments args, Isolate* isolate)); \
+ \
+ V8_NOINLINE static Type Stats_##Name(int args_length, Object** args_object, \
+ Isolate* isolate) { \
+ RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Name); \
+ Arguments args(args_length, args_object); \
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED( \
+ isolate, &tracing::TraceEventStatsTable::Name); \
+ return __RT_impl_##Name(args, isolate); \
+ } \
+ \
+ Type Name(int args_length, Object** args_object, Isolate* isolate) { \
+ CHECK(isolate->context() == nullptr || isolate->context()->IsContext()); \
+ CLOBBER_DOUBLE_REGISTERS(); \
+ if (V8_UNLIKELY(TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() || \
+ FLAG_runtime_call_stats)) { \
+ return Stats_##Name(args_length, args_object, isolate); \
+ } \
+ Arguments args(args_length, args_object); \
+ return __RT_impl_##Name(args, isolate); \
+ } \
+ \
static Type __RT_impl_##Name(Arguments args, Isolate* isolate)
#define RUNTIME_FUNCTION(Name) RUNTIME_FUNCTION_RETURNS_TYPE(Object*, Name)
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index b0b22b63c2..b1f33e009e 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -46,9 +46,9 @@
namespace v8 {
namespace internal {
-
bool CpuFeatures::SupportsCrankshaft() { return IsSupported(VFP3); }
+bool CpuFeatures::SupportsSimd128() { return false; }
int DoubleRegister::NumRegisters() {
return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
@@ -71,11 +71,6 @@ Address RelocInfo::target_address() {
return Assembler::target_address_at(pc_, host_);
}
-Address RelocInfo::wasm_memory_reference() {
- DCHECK(IsWasmMemoryReference(rmode_));
- return Assembler::target_address_at(pc_, host_);
-}
-
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
@@ -104,33 +99,6 @@ int RelocInfo::target_address_size() {
}
-void RelocInfo::set_target_address(Address target,
- WriteBarrierMode write_barrier_mode,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(isolate_, pc_, host_, target,
- icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
- host() != NULL && IsCodeTarget(rmode_)) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
-}
-
-void RelocInfo::update_wasm_memory_reference(
- Address old_base, Address new_base, size_t old_size, size_t new_size,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(IsWasmMemoryReference(rmode_));
- DCHECK(old_base <= wasm_memory_reference() &&
- wasm_memory_reference() < old_base + old_size);
- Address updated_reference = new_base + (wasm_memory_reference() - old_base);
- DCHECK(new_base <= updated_reference &&
- updated_reference < new_base + new_size);
- Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
- icache_flush_mode);
-}
-
Object* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
@@ -156,6 +124,7 @@ void RelocInfo::set_target_object(Object* target,
target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target));
+ host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
}
}
@@ -276,7 +245,7 @@ void RelocInfo::WipeOut() {
}
}
-
+template <typename ObjectVisitor>
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 62516e82c9..78ffe25390 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -57,7 +57,7 @@ static unsigned CpuFeaturesImpliedByCompiler() {
answer |= 1u << ARMv8;
// ARMv8 always features VFP and NEON.
answer |= 1u << ARMv7 | 1u << VFP3 | 1u << NEON | 1u << VFP32DREGS;
- answer |= 1u << SUDIV | 1u << MLS;
+ answer |= 1u << SUDIV;
}
#endif // CAN_USE_ARMV8_INSTRUCTIONS
#ifdef CAN_USE_ARMV7_INSTRUCTIONS
@@ -72,9 +72,6 @@ static unsigned CpuFeaturesImpliedByCompiler() {
#ifdef CAN_USE_NEON
if (FLAG_enable_neon) answer |= 1u << NEON;
#endif // CAN_USE_VFP32DREGS
- if ((answer & (1u << ARMv7)) && FLAG_enable_unaligned_accesses) {
- answer |= 1u << UNALIGNED_ACCESSES;
- }
return answer;
}
@@ -93,7 +90,7 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
supported_ |= 1u << ARMv8;
// ARMv8 always features VFP and NEON.
supported_ |= 1u << ARMv7 | 1u << VFP3 | 1u << NEON | 1u << VFP32DREGS;
- supported_ |= 1u << SUDIV | 1u << MLS;
+ supported_ |= 1u << SUDIV;
if (FLAG_enable_movw_movt) supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
}
if (FLAG_enable_armv7) {
@@ -104,8 +101,6 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
if (FLAG_enable_movw_movt) supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
if (FLAG_enable_32dregs) supported_ |= 1u << VFP32DREGS;
}
- if (FLAG_enable_mls) supported_ |= 1u << MLS;
- if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES;
#else // __arm__
// Probe for additional features at runtime.
@@ -119,14 +114,12 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
if (FLAG_enable_neon && cpu.has_neon()) supported_ |= 1u << NEON;
if (FLAG_enable_sudiv && cpu.has_idiva()) supported_ |= 1u << SUDIV;
- if (FLAG_enable_mls && cpu.has_thumb2()) supported_ |= 1u << MLS;
if (cpu.architecture() >= 7) {
if (FLAG_enable_armv7) supported_ |= 1u << ARMv7;
if (FLAG_enable_armv8 && cpu.architecture() >= 8) {
supported_ |= 1u << ARMv8;
}
- if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES;
// Use movw/movt for QUALCOMM ARMv7 cores.
if (FLAG_enable_movw_movt && cpu.implementer() == base::CPU::QUALCOMM) {
supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
@@ -141,15 +134,6 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
}
if (FLAG_enable_32dregs && cpu.has_vfp3_d32()) supported_ |= 1u << VFP32DREGS;
-
- if (cpu.implementer() == base::CPU::NVIDIA &&
- cpu.variant() == base::CPU::NVIDIA_DENVER &&
- cpu.part() <= base::CPU::NVIDIA_DENVER_V10) {
- // TODO(jkummerow): This is turned off as an experiment to see if it
- // affects crash rates. Keep an eye on crash reports and either remove
- // coherent cache support permanently, or re-enable it!
- // supported_ |= 1u << COHERENT_CACHE;
- }
#endif
DCHECK(!IsSupported(VFP3) || IsSupported(ARMv7));
@@ -212,18 +196,12 @@ void CpuFeatures::PrintTarget() {
void CpuFeatures::PrintFeatures() {
printf(
- "ARMv8=%d ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d MLS=%d"
- "UNALIGNED_ACCESSES=%d MOVW_MOVT_IMMEDIATE_LOADS=%d COHERENT_CACHE=%d",
- CpuFeatures::IsSupported(ARMv8),
- CpuFeatures::IsSupported(ARMv7),
- CpuFeatures::IsSupported(VFP3),
- CpuFeatures::IsSupported(VFP32DREGS),
- CpuFeatures::IsSupported(NEON),
- CpuFeatures::IsSupported(SUDIV),
- CpuFeatures::IsSupported(MLS),
- CpuFeatures::IsSupported(UNALIGNED_ACCESSES),
- CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS),
- CpuFeatures::IsSupported(COHERENT_CACHE));
+ "ARMv8=%d ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d "
+ "MOVW_MOVT_IMMEDIATE_LOADS=%d",
+ CpuFeatures::IsSupported(ARMv8), CpuFeatures::IsSupported(ARMv7),
+ CpuFeatures::IsSupported(VFP3), CpuFeatures::IsSupported(VFP32DREGS),
+ CpuFeatures::IsSupported(NEON), CpuFeatures::IsSupported(SUDIV),
+ CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS));
#ifdef __arm__
bool eabi_hardfloat = base::OS::ArmUsingHardFloat();
#elif USE_EABI_HARDFLOAT
@@ -255,6 +233,31 @@ bool RelocInfo::IsInConstantPool() {
return Assembler::is_constant_pool_load(pc_);
}
+Address RelocInfo::wasm_memory_reference() {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ return Assembler::target_address_at(pc_, host_);
+}
+
+uint32_t RelocInfo::wasm_memory_size_reference() {
+ DCHECK(IsWasmMemorySizeReference(rmode_));
+ return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
+}
+
+Address RelocInfo::wasm_global_reference() {
+ DCHECK(IsWasmGlobalReference(rmode_));
+ return Assembler::target_address_at(pc_, host_);
+}
+
+void RelocInfo::unchecked_update_wasm_memory_reference(
+ Address address, ICacheFlushMode flush_mode) {
+ Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
+}
+
+void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
+ ICacheFlushMode flush_mode) {
+ Assembler::set_target_address_at(isolate_, pc_, host_,
+ reinterpret_cast<Address>(size), flush_mode);
+}
// -----------------------------------------------------------------------------
// Implementation of Operand and MemOperand
@@ -266,7 +269,6 @@ Operand::Operand(Handle<Object> handle) {
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
if (obj->IsHeapObject()) {
- DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
imm32_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
} else {
@@ -463,17 +465,15 @@ const Instr kStrRegFpNegOffsetPattern =
al | B26 | NegOffset | Register::kCode_fp * B16;
const Instr kLdrStrInstrTypeMask = 0xffff0000;
-
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
recorded_ast_id_(TypeFeedbackId::None()),
- pending_32_bit_constants_(&pending_32_bit_constants_buffer_[0]),
- pending_64_bit_constants_(&pending_64_bit_constants_buffer_[0]),
- constant_pool_builder_(kLdrMaxReachBits, kVldrMaxReachBits),
- positions_recorder_(this) {
+ pending_32_bit_constants_(),
+ pending_64_bit_constants_(),
+ constant_pool_builder_(kLdrMaxReachBits, kVldrMaxReachBits) {
+ pending_32_bit_constants_.reserve(kMinNumPendingConstants);
+ pending_64_bit_constants_.reserve(kMinNumPendingConstants);
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
- num_pending_32_bit_constants_ = 0;
- num_pending_64_bit_constants_ = 0;
next_buffer_check_ = 0;
const_pool_blocked_nesting_ = 0;
no_const_pool_before_ = 0;
@@ -486,26 +486,18 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
Assembler::~Assembler() {
DCHECK(const_pool_blocked_nesting_ == 0);
- if (pending_32_bit_constants_ != &pending_32_bit_constants_buffer_[0]) {
- delete[] pending_32_bit_constants_;
- }
- if (pending_64_bit_constants_ != &pending_64_bit_constants_buffer_[0]) {
- delete[] pending_64_bit_constants_;
- }
}
void Assembler::GetCode(CodeDesc* desc) {
- reloc_info_writer.Finish();
-
// Emit constant pool if necessary.
int constant_pool_offset = 0;
if (FLAG_enable_embedded_constant_pool) {
constant_pool_offset = EmitEmbeddedConstantPool();
} else {
CheckConstPool(true, false);
- DCHECK(num_pending_32_bit_constants_ == 0);
- DCHECK(num_pending_64_bit_constants_ == 0);
+ DCHECK(pending_32_bit_constants_.empty());
+ DCHECK(pending_64_bit_constants_.empty());
}
// Set up code descriptor.
desc->buffer = buffer_;
@@ -515,6 +507,8 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->constant_pool_size =
(constant_pool_offset ? desc->instr_size - constant_pool_offset : 0);
desc->origin = this;
+ desc->unwinding_info_size = 0;
+ desc->unwinding_info = nullptr;
}
@@ -828,6 +822,19 @@ void Assembler::target_at_put(int pos, int target_pos) {
// Load the position of the label relative to the generated code object
// pointer in a register.
+ // The existing code must be a single 24-bit label chain link, followed by
+ // nops encoding the destination register. See mov_label_offset.
+
+ // Extract the destination register from the first nop instructions.
+ Register dst =
+ Register::from_code(Instruction::RmValue(instr_at(pos + kInstrSize)));
+ // In addition to the 24-bit label chain link, we expect to find one nop for
+ // ARMv7 and above, or two nops for ARMv6. See mov_label_offset.
+ DCHECK(IsNop(instr_at(pos + kInstrSize), dst.code()));
+ if (!CpuFeatures::IsSupported(ARMv7)) {
+ DCHECK(IsNop(instr_at(pos + 2 * kInstrSize), dst.code()));
+ }
+
// Here are the instructions we need to emit:
// For ARMv7: target24 => target16_1:target16_0
// movw dst, #target16_0
@@ -837,10 +844,6 @@ void Assembler::target_at_put(int pos, int target_pos) {
// orr dst, dst, #target8_1 << 8
// orr dst, dst, #target8_2 << 16
- // We extract the destination register from the emitted nop instruction.
- Register dst = Register::from_code(
- Instruction::RmValue(instr_at(pos + kInstrSize)));
- DCHECK(IsNop(instr_at(pos + kInstrSize), dst.code()));
uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
DCHECK(is_uint24(target24));
if (is_uint8(target24)) {
@@ -1367,7 +1370,6 @@ void Assembler::b(int branch_offset, Condition cond) {
void Assembler::bl(int branch_offset, Condition cond) {
- positions_recorder()->WriteRecordedPositions();
DCHECK((branch_offset & 3) == 0);
int imm24 = branch_offset >> 2;
CHECK(is_int24(imm24));
@@ -1376,7 +1378,6 @@ void Assembler::bl(int branch_offset, Condition cond) {
void Assembler::blx(int branch_offset) { // v5 and above
- positions_recorder()->WriteRecordedPositions();
DCHECK((branch_offset & 1) == 0);
int h = ((branch_offset & 2) >> 1)*B24;
int imm24 = branch_offset >> 2;
@@ -1386,14 +1387,12 @@ void Assembler::blx(int branch_offset) { // v5 and above
void Assembler::blx(Register target, Condition cond) { // v5 and above
- positions_recorder()->WriteRecordedPositions();
DCHECK(!target.is(pc));
emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
}
void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
- positions_recorder()->WriteRecordedPositions();
DCHECK(!target.is(pc)); // use of pc is actually allowed, but discouraged
emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
}
@@ -1501,9 +1500,6 @@ void Assembler::orr(Register dst, Register src1, const Operand& src2,
void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
- if (dst.is(pc)) {
- positions_recorder()->WriteRecordedPositions();
- }
// Don't allow nop instructions in the form mov rn, rn to be generated using
// the mov instruction. They must be generated using nop(int/NopMarkerTypes)
// or MarkCode(int/NopMarkerTypes) pseudo instructions.
@@ -1586,7 +1582,7 @@ void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
Condition cond) {
DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
- DCHECK(IsEnabled(MLS));
+ DCHECK(IsEnabled(ARMv7));
emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -1702,8 +1698,6 @@ void Assembler::usat(Register dst,
int satpos,
const Operand& src,
Condition cond) {
- // v6 and above.
- DCHECK(CpuFeatures::IsSupported(ARMv7));
DCHECK(!dst.is(pc) && !src.rm_.is(pc));
DCHECK((satpos >= 0) && (satpos <= 31));
DCHECK((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
@@ -1994,9 +1988,6 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
// Load/Store instructions.
void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
- if (dst.is(pc)) {
- positions_recorder()->WriteRecordedPositions();
- }
addrmod2(cond | B26 | L, dst, src);
}
@@ -2038,7 +2029,6 @@ void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
void Assembler::ldrd(Register dst1, Register dst2,
const MemOperand& src, Condition cond) {
- DCHECK(IsEnabled(ARMv7));
DCHECK(src.rm().is(no_reg));
DCHECK(!dst1.is(lr)); // r14.
DCHECK_EQ(0, dst1.code() % 2);
@@ -2053,10 +2043,56 @@ void Assembler::strd(Register src1, Register src2,
DCHECK(!src1.is(lr)); // r14.
DCHECK_EQ(0, src1.code() % 2);
DCHECK_EQ(src1.code() + 1, src2.code());
- DCHECK(IsEnabled(ARMv7));
addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
}
+// Load/Store exclusive instructions.
+void Assembler::ldrex(Register dst, Register src, Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.75.
+ // cond(31-28) | 00011001(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
+ emit(cond | B24 | B23 | B20 | src.code() * B16 | dst.code() * B12 | 0xf9f);
+}
+
+void Assembler::strex(Register src1, Register src2, Register dst,
+ Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.212.
+ // cond(31-28) | 00011000(27-20) | Rn(19-16) | Rd(15-12) | 11111001(11-4) |
+ // Rt(3-0)
+ emit(cond | B24 | B23 | dst.code() * B16 | src1.code() * B12 | 0xf9 * B4 |
+ src2.code());
+}
+
+void Assembler::ldrexb(Register dst, Register src, Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.76.
+ // cond(31-28) | 00011101(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
+ emit(cond | B24 | B23 | B22 | B20 | src.code() * B16 | dst.code() * B12 |
+ 0xf9f);
+}
+
+void Assembler::strexb(Register src1, Register src2, Register dst,
+ Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.213.
+ // cond(31-28) | 00011100(27-20) | Rn(19-16) | Rd(15-12) | 11111001(11-4) |
+ // Rt(3-0)
+ emit(cond | B24 | B23 | B22 | dst.code() * B16 | src1.code() * B12 |
+ 0xf9 * B4 | src2.code());
+}
+
+void Assembler::ldrexh(Register dst, Register src, Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.78.
+ // cond(31-28) | 00011111(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
+ emit(cond | B24 | B23 | B22 | B21 | B20 | src.code() * B16 |
+ dst.code() * B12 | 0xf9f);
+}
+
+void Assembler::strexh(Register src1, Register src2, Register dst,
+ Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.215.
+ // cond(31-28) | 00011110(27-20) | Rn(19-16) | Rd(15-12) | 11111001(11-4) |
+ // Rt(3-0)
+ emit(cond | B24 | B23 | B22 | B21 | dst.code() * B16 | src1.code() * B12 |
+ 0xf9 * B4 | src2.code());
+}
// Preload instructions.
void Assembler::pld(const MemOperand& address) {
@@ -2122,7 +2158,11 @@ void Assembler::stop(const char* msg, Condition cond, int32_t code) {
} else {
svc(kStopCode + kMaxStopCode, cond);
}
- emit(reinterpret_cast<Instr>(msg));
+ // Do not embed the message string address! We used to do this, but that
+ // made snapshots created from position-independent executable builds
+ // non-deterministic.
+ // TODO(yangguo): remove this field entirely.
+ nop();
}
#else // def __arm__
if (cond != al) {
@@ -3371,6 +3411,69 @@ void Assembler::vcmp(const SwVfpRegister src1, const float src2,
0x5 * B9 | B6);
}
+void Assembler::vsel(Condition cond, const DwVfpRegister dst,
+ const DwVfpRegister src1, const DwVfpRegister src2) {
+ // cond=kSpecialCondition(31-28) | 11100(27-23) | D(22) |
+ // vsel_cond=XX(21-20) | Vn(19-16) | Vd(15-12) | 101(11-9) | sz=1(8) | N(7) |
+ // 0(6) | M(5) | 0(4) | Vm(3-0)
+ DCHECK(CpuFeatures::IsSupported(ARMv8));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ int sz = 1;
+
+ // VSEL has a special (restricted) condition encoding.
+ // eq(0b0000)... -> 0b00
+ // ge(0b1010)... -> 0b10
+ // gt(0b1100)... -> 0b11
+ // vs(0b0110)... -> 0b01
+ // No other conditions are supported.
+ int vsel_cond = (cond >> 30) & 0x3;
+ if ((cond != eq) && (cond != ge) && (cond != gt) && (cond != vs)) {
+ // We can implement some other conditions by swapping the inputs.
+ DCHECK((cond == ne) | (cond == lt) | (cond == le) | (cond == vc));
+ std::swap(vn, vm);
+ std::swap(n, m);
+ }
+
+ emit(kSpecialCondition | 0x1C * B23 | d * B22 | vsel_cond * B20 | vn * B16 |
+ vd * B12 | 0x5 * B9 | sz * B8 | n * B7 | m * B5 | vm);
+}
+
+void Assembler::vsel(Condition cond, const SwVfpRegister dst,
+ const SwVfpRegister src1, const SwVfpRegister src2) {
+ // cond=kSpecialCondition(31-28) | 11100(27-23) | D(22) |
+ // vsel_cond=XX(21-20) | Vn(19-16) | Vd(15-12) | 101(11-9) | sz=0(8) | N(7) |
+ // 0(6) | M(5) | 0(4) | Vm(3-0)
+ DCHECK(CpuFeatures::IsSupported(ARMv8));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ int sz = 0;
+
+ // VSEL has a special (restricted) condition encoding.
+ // eq(0b0000)... -> 0b00
+ // ge(0b1010)... -> 0b10
+ // gt(0b1100)... -> 0b11
+ // vs(0b0110)... -> 0b01
+ // No other conditions are supported.
+ int vsel_cond = (cond >> 30) & 0x3;
+ if ((cond != eq) && (cond != ge) && (cond != gt) && (cond != vs)) {
+ // We can implement some other conditions by swapping the inputs.
+ DCHECK((cond == ne) | (cond == lt) | (cond == le) | (cond == vc));
+ std::swap(vn, vm);
+ std::swap(n, m);
+ }
+
+ emit(kSpecialCondition | 0x1C * B23 | d * B22 | vsel_cond * B20 | vn * B16 |
+ vd * B12 | 0x5 * B9 | sz * B8 | n * B7 | m * B5 | vm);
+}
void Assembler::vsqrt(const DwVfpRegister dst,
const DwVfpRegister src,
@@ -3745,8 +3848,8 @@ void Assembler::GrowBuffer() {
void Assembler::db(uint8_t data) {
// db is used to write raw data. The constant pool should be emitted or
// blocked before using db.
- DCHECK(is_const_pool_blocked() || (num_pending_32_bit_constants_ == 0));
- DCHECK(is_const_pool_blocked() || (num_pending_64_bit_constants_ == 0));
+ DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
+ DCHECK(is_const_pool_blocked() || pending_64_bit_constants_.empty());
CheckBuffer();
*reinterpret_cast<uint8_t*>(pc_) = data;
pc_ += sizeof(uint8_t);
@@ -3756,8 +3859,8 @@ void Assembler::db(uint8_t data) {
void Assembler::dd(uint32_t data) {
// dd is used to write raw data. The constant pool should be emitted or
// blocked before using dd.
- DCHECK(is_const_pool_blocked() || (num_pending_32_bit_constants_ == 0));
- DCHECK(is_const_pool_blocked() || (num_pending_64_bit_constants_ == 0));
+ DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
+ DCHECK(is_const_pool_blocked() || pending_64_bit_constants_.empty());
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) = data;
pc_ += sizeof(uint32_t);
@@ -3767,8 +3870,8 @@ void Assembler::dd(uint32_t data) {
void Assembler::dq(uint64_t value) {
// dq is used to write raw data. The constant pool should be emitted or
// blocked before using dq.
- DCHECK(is_const_pool_blocked() || (num_pending_32_bit_constants_ == 0));
- DCHECK(is_const_pool_blocked() || (num_pending_64_bit_constants_ == 0));
+ DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
+ DCHECK(is_const_pool_blocked() || pending_64_bit_constants_.empty());
CheckBuffer();
*reinterpret_cast<uint64_t*>(pc_) = value;
pc_ += sizeof(uint64_t);
@@ -3803,29 +3906,19 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
ConstantPoolEntry::Access Assembler::ConstantPoolAddEntry(int position,
RelocInfo::Mode rmode,
intptr_t value) {
- DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::POSITION &&
- rmode != RelocInfo::STATEMENT_POSITION &&
- rmode != RelocInfo::CONST_POOL && rmode != RelocInfo::NONE64);
+ DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::CONST_POOL &&
+ rmode != RelocInfo::NONE64);
bool sharing_ok = RelocInfo::IsNone(rmode) ||
!(serializer_enabled() || rmode < RelocInfo::CELL);
if (FLAG_enable_embedded_constant_pool) {
return constant_pool_builder_.AddEntry(position, value, sharing_ok);
} else {
- DCHECK(num_pending_32_bit_constants_ < kMaxNumPending32Constants);
- if (num_pending_32_bit_constants_ == 0) {
+ DCHECK(pending_32_bit_constants_.size() < kMaxNumPending32Constants);
+ if (pending_32_bit_constants_.empty()) {
first_const_pool_32_use_ = position;
- } else if (num_pending_32_bit_constants_ == kMinNumPendingConstants &&
- pending_32_bit_constants_ ==
- &pending_32_bit_constants_buffer_[0]) {
- // Inline buffer is full, switch to dynamically allocated buffer.
- pending_32_bit_constants_ =
- new ConstantPoolEntry[kMaxNumPending32Constants];
- std::copy(&pending_32_bit_constants_buffer_[0],
- &pending_32_bit_constants_buffer_[kMinNumPendingConstants],
- &pending_32_bit_constants_[0]);
}
ConstantPoolEntry entry(position, value, sharing_ok);
- pending_32_bit_constants_[num_pending_32_bit_constants_++] = entry;
+ pending_32_bit_constants_.push_back(entry);
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
@@ -3840,21 +3933,12 @@ ConstantPoolEntry::Access Assembler::ConstantPoolAddEntry(int position,
if (FLAG_enable_embedded_constant_pool) {
return constant_pool_builder_.AddEntry(position, value);
} else {
- DCHECK(num_pending_64_bit_constants_ < kMaxNumPending64Constants);
- if (num_pending_64_bit_constants_ == 0) {
+ DCHECK(pending_64_bit_constants_.size() < kMaxNumPending64Constants);
+ if (pending_64_bit_constants_.empty()) {
first_const_pool_64_use_ = position;
- } else if (num_pending_64_bit_constants_ == kMinNumPendingConstants &&
- pending_64_bit_constants_ ==
- &pending_64_bit_constants_buffer_[0]) {
- // Inline buffer is full, switch to dynamically allocated buffer.
- pending_64_bit_constants_ =
- new ConstantPoolEntry[kMaxNumPending64Constants];
- std::copy(&pending_64_bit_constants_buffer_[0],
- &pending_64_bit_constants_buffer_[kMinNumPendingConstants],
- &pending_64_bit_constants_[0]);
}
ConstantPoolEntry entry(position, value);
- pending_64_bit_constants_[num_pending_64_bit_constants_++] = entry;
+ pending_64_bit_constants_.push_back(entry);
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
@@ -3867,8 +3951,8 @@ ConstantPoolEntry::Access Assembler::ConstantPoolAddEntry(int position,
void Assembler::BlockConstPoolFor(int instructions) {
if (FLAG_enable_embedded_constant_pool) {
// Should be a no-op if using an embedded constant pool.
- DCHECK(num_pending_32_bit_constants_ == 0);
- DCHECK(num_pending_64_bit_constants_ == 0);
+ DCHECK(pending_32_bit_constants_.empty());
+ DCHECK(pending_64_bit_constants_.empty());
return;
}
@@ -3877,11 +3961,11 @@ void Assembler::BlockConstPoolFor(int instructions) {
// Max pool start (if we need a jump and an alignment).
#ifdef DEBUG
int start = pc_limit + kInstrSize + 2 * kPointerSize;
- DCHECK((num_pending_32_bit_constants_ == 0) ||
+ DCHECK(pending_32_bit_constants_.empty() ||
(start - first_const_pool_32_use_ +
- num_pending_64_bit_constants_ * kDoubleSize <
+ pending_64_bit_constants_.size() * kDoubleSize <
kMaxDistToIntPool));
- DCHECK((num_pending_64_bit_constants_ == 0) ||
+ DCHECK(pending_64_bit_constants_.empty() ||
(start - first_const_pool_64_use_ < kMaxDistToFPPool));
#endif
no_const_pool_before_ = pc_limit;
@@ -3896,8 +3980,8 @@ void Assembler::BlockConstPoolFor(int instructions) {
void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
if (FLAG_enable_embedded_constant_pool) {
// Should be a no-op if using an embedded constant pool.
- DCHECK(num_pending_32_bit_constants_ == 0);
- DCHECK(num_pending_64_bit_constants_ == 0);
+ DCHECK(pending_32_bit_constants_.empty());
+ DCHECK(pending_64_bit_constants_.empty());
return;
}
@@ -3911,8 +3995,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
// There is nothing to do if there are no pending constant pool entries.
- if ((num_pending_32_bit_constants_ == 0) &&
- (num_pending_64_bit_constants_ == 0)) {
+ if (pending_32_bit_constants_.empty() && pending_64_bit_constants_.empty()) {
// Calculate the offset of the next check.
next_buffer_check_ = pc_offset() + kCheckPoolInterval;
return;
@@ -3924,9 +4007,9 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
int jump_instr = require_jump ? kInstrSize : 0;
int size_up_to_marker = jump_instr + kInstrSize;
int estimated_size_after_marker =
- num_pending_32_bit_constants_ * kPointerSize;
- bool has_int_values = (num_pending_32_bit_constants_ > 0);
- bool has_fp_values = (num_pending_64_bit_constants_ > 0);
+ pending_32_bit_constants_.size() * kPointerSize;
+ bool has_int_values = !pending_32_bit_constants_.empty();
+ bool has_fp_values = !pending_64_bit_constants_.empty();
bool require_64_bit_align = false;
if (has_fp_values) {
require_64_bit_align =
@@ -3935,7 +4018,8 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
if (require_64_bit_align) {
estimated_size_after_marker += kInstrSize;
}
- estimated_size_after_marker += num_pending_64_bit_constants_ * kDoubleSize;
+ estimated_size_after_marker +=
+ pending_64_bit_constants_.size() * kDoubleSize;
}
int estimated_size = size_up_to_marker + estimated_size_after_marker;
@@ -3954,7 +4038,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// The 64-bit constants are always emitted before the 32-bit constants, so
// we can ignore the effect of the 32-bit constants on estimated_size.
int dist64 = pc_offset() + estimated_size -
- num_pending_32_bit_constants_ * kPointerSize -
+ pending_32_bit_constants_.size() * kPointerSize -
first_const_pool_64_use_;
if ((dist64 >= kMaxDistToFPPool - kCheckPoolInterval) ||
(!require_jump && (dist64 >= kMaxDistToFPPool / 2))) {
@@ -3973,7 +4057,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Deduplicate constants.
int size_after_marker = estimated_size_after_marker;
- for (int i = 0; i < num_pending_64_bit_constants_; i++) {
+ for (int i = 0; i < pending_64_bit_constants_.size(); i++) {
ConstantPoolEntry& entry = pending_64_bit_constants_[i];
DCHECK(!entry.is_merged());
for (int j = 0; j < i; j++) {
@@ -3986,7 +4070,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
}
- for (int i = 0; i < num_pending_32_bit_constants_; i++) {
+ for (int i = 0; i < pending_32_bit_constants_.size(); i++) {
ConstantPoolEntry& entry = pending_32_bit_constants_[i];
DCHECK(!entry.is_merged());
if (!entry.sharing_ok()) continue;
@@ -4031,7 +4115,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Emit 64-bit constant pool entries first: their range is smaller than
// 32-bit entries.
- for (int i = 0; i < num_pending_64_bit_constants_; i++) {
+ for (int i = 0; i < pending_64_bit_constants_.size(); i++) {
ConstantPoolEntry& entry = pending_64_bit_constants_[i];
Instr instr = instr_at(entry.position());
@@ -4060,7 +4144,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
// Emit 32-bit constant pool entries.
- for (int i = 0; i < num_pending_32_bit_constants_; i++) {
+ for (int i = 0; i < pending_32_bit_constants_.size(); i++) {
ConstantPoolEntry& entry = pending_32_bit_constants_[i];
Instr instr = instr_at(entry.position());
@@ -4094,8 +4178,8 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
}
- num_pending_32_bit_constants_ = 0;
- num_pending_64_bit_constants_ = 0;
+ pending_32_bit_constants_.clear();
+ pending_64_bit_constants_.clear();
first_const_pool_32_use_ = -1;
first_const_pool_64_use_ = -1;
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 08ad64c2a2..0b9cd91733 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -57,12 +57,22 @@ namespace internal {
#define ALLOCATABLE_GENERAL_REGISTERS(V) \
V(r0) V(r1) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) V(r8)
+#define FLOAT_REGISTERS(V) \
+ V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) \
+ V(s8) V(s9) V(s10) V(s11) V(s12) V(s13) V(s14) V(s15) \
+ V(s16) V(s17) V(s18) V(s19) V(s20) V(s21) V(s22) V(s23) \
+ V(s24) V(s25) V(s26) V(s27) V(s28) V(s29) V(s30) V(s31)
+
#define DOUBLE_REGISTERS(V) \
V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) V(d14) V(d15) \
V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
+#define SIMD128_REGISTERS(V) \
+ V(q0) V(q1) V(q2) V(q3) V(q4) V(q5) V(q6) V(q7) \
+ V(q8) V(q9) V(q10) V(q11) V(q12) V(q13) V(q14) V(q15)
+
#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) \
@@ -112,8 +122,6 @@ struct Register {
Register r = {code};
return r;
}
- const char* ToString();
- bool IsAllocatable() const;
bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
bool is(Register reg) const { return reg_code == reg.reg_code; }
int code() const {
@@ -141,9 +149,22 @@ GENERAL_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
const Register no_reg = {Register::kCode_no_reg};
+static const bool kSimpleFPAliasing = false;
+
// Single word VFP register.
struct SwVfpRegister {
+ enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+ FLOAT_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kAfterLast,
+ kCode_no_reg = -1
+ };
+
+ static const int kMaxNumRegisters = Code::kAfterLast;
+
static const int kSizeInBytes = 4;
+
bool is_valid() const { return 0 <= reg_code && reg_code < 32; }
bool is(SwVfpRegister reg) const { return reg_code == reg.reg_code; }
int code() const {
@@ -154,6 +175,10 @@ struct SwVfpRegister {
DCHECK(is_valid());
return 1 << reg_code;
}
+ static SwVfpRegister from_code(int code) {
+ SwVfpRegister r = {code};
+ return r;
+ }
void split_code(int* vm, int* m) const {
DCHECK(is_valid());
*m = reg_code & 0x1;
@@ -163,9 +188,10 @@ struct SwVfpRegister {
int reg_code;
};
+typedef SwVfpRegister FloatRegister;
// Double word VFP register.
-struct DoubleRegister {
+struct DwVfpRegister {
enum Code {
#define REGISTER_CODE(R) kCode_##R,
DOUBLE_REGISTERS(REGISTER_CODE)
@@ -184,10 +210,8 @@ struct DoubleRegister {
// d15: scratch register.
static const int kSizeInBytes = 8;
- const char* ToString();
- bool IsAllocatable() const;
bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
- bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
+ bool is(DwVfpRegister reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
return reg_code;
@@ -197,8 +221,8 @@ struct DoubleRegister {
return 1 << reg_code;
}
- static DoubleRegister from_code(int code) {
- DoubleRegister r = {code};
+ static DwVfpRegister from_code(int code) {
+ DwVfpRegister r = {code};
return r;
}
void split_code(int* vm, int* m) const {
@@ -211,7 +235,7 @@ struct DoubleRegister {
};
-typedef DoubleRegister DwVfpRegister;
+typedef DwVfpRegister DoubleRegister;
// Double word VFP register d0-15.
@@ -975,6 +999,14 @@ class Assembler : public AssemblerBase {
Register src2,
const MemOperand& dst, Condition cond = al);
+ // Load/Store exclusive instructions
+ void ldrex(Register dst, Register src, Condition cond = al);
+ void strex(Register src1, Register src2, Register dst, Condition cond = al);
+ void ldrexb(Register dst, Register src, Condition cond = al);
+ void strexb(Register src1, Register src2, Register dst, Condition cond = al);
+ void ldrexh(Register dst, Register src, Condition cond = al);
+ void strexh(Register src1, Register src2, Register dst, Condition cond = al);
+
// Preload instructions
void pld(const MemOperand& address);
@@ -1225,6 +1257,17 @@ class Assembler : public AssemblerBase {
const Condition cond = al);
void vcmp(const SwVfpRegister src1, const float src2,
const Condition cond = al);
+
+ // VSEL supports cond in {eq, ne, ge, lt, gt, le, vs, vc}.
+ void vsel(const Condition cond,
+ const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2);
+ void vsel(const Condition cond,
+ const SwVfpRegister dst,
+ const SwVfpRegister src1,
+ const SwVfpRegister src2);
+
void vsqrt(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond = al);
@@ -1290,6 +1333,10 @@ class Assembler : public AssemblerBase {
vstm(db_w, sp, src, src, cond);
}
+ void vpush(SwVfpRegister src, Condition cond = al) {
+ vstm(db_w, sp, src, src, cond);
+ }
+
void vpop(DwVfpRegister dst, Condition cond = al) {
vldm(ia_w, sp, dst, dst, cond);
}
@@ -1357,7 +1404,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, int raw_position);
+ void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
// Record the emission of a constant pool.
//
@@ -1390,10 +1437,6 @@ class Assembler : public AssemblerBase {
// Emits the address of the code stub's first instruction.
void emit_code_stub_address(Code* stub);
- AssemblerPositionsRecorder* positions_recorder() {
- return &positions_recorder_;
- }
-
// Read/patch instructions
Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
void instr_at_put(int pos, Instr instr) {
@@ -1523,10 +1566,10 @@ class Assembler : public AssemblerBase {
// Max pool start (if we need a jump and an alignment).
int start = pc_offset() + kInstrSize + 2 * kPointerSize;
// Check the constant pool hasn't been blocked for too long.
- DCHECK((num_pending_32_bit_constants_ == 0) ||
- (start + num_pending_64_bit_constants_ * kDoubleSize <
+ DCHECK(pending_32_bit_constants_.empty() ||
+ (start + pending_64_bit_constants_.size() * kDoubleSize <
(first_const_pool_32_use_ + kMaxDistToIntPool)));
- DCHECK((num_pending_64_bit_constants_ == 0) ||
+ DCHECK(pending_64_bit_constants_.empty() ||
(start < (first_const_pool_64_use_ + kMaxDistToFPPool)));
#endif
// Two cases:
@@ -1593,14 +1636,8 @@ class Assembler : public AssemblerBase {
// pending relocation entry per instruction.
// The buffers of pending constant pool entries.
- ConstantPoolEntry pending_32_bit_constants_buffer_[kMinNumPendingConstants];
- ConstantPoolEntry pending_64_bit_constants_buffer_[kMinNumPendingConstants];
- ConstantPoolEntry* pending_32_bit_constants_;
- ConstantPoolEntry* pending_64_bit_constants_;
- // Number of pending constant pool entries in the 32 bits buffer.
- int num_pending_32_bit_constants_;
- // Number of pending constant pool entries in the 64 bits buffer.
- int num_pending_64_bit_constants_;
+ std::vector<ConstantPoolEntry> pending_32_bit_constants_;
+ std::vector<ConstantPoolEntry> pending_64_bit_constants_;
ConstantPoolBuilder constant_pool_builder_;
@@ -1639,8 +1676,6 @@ class Assembler : public AssemblerBase {
friend class RelocInfo;
friend class CodePatcher;
friend class BlockConstPoolScope;
- AssemblerPositionsRecorder positions_recorder_;
- friend class AssemblerPositionsRecorder;
friend class EnsureSpace;
};
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 31e3e95f03..264f24f8da 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -22,60 +22,15 @@
namespace v8 {
namespace internal {
+#define __ ACCESS_MASM(masm)
-static void InitializeArrayConstructorDescriptor(
- Isolate* isolate, CodeStubDescriptor* descriptor,
- int constant_stack_parameter_count) {
- Address deopt_handler = Runtime::FunctionForId(
- Runtime::kArrayConstructor)->entry;
-
- if (constant_stack_parameter_count == 0) {
- descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- } else {
- descriptor->Initialize(r0, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- }
-}
-
-
-static void InitializeInternalArrayConstructorDescriptor(
- Isolate* isolate, CodeStubDescriptor* descriptor,
- int constant_stack_parameter_count) {
- Address deopt_handler = Runtime::FunctionForId(
- Runtime::kInternalArrayConstructor)->entry;
-
- if (constant_stack_parameter_count == 0) {
- descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- } else {
- descriptor->Initialize(r0, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- }
-}
-
-
-void ArrayNoArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
-
-void ArraySingleArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
-}
-
-
-void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
+void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
+ __ lsl(r5, r0, Operand(kPointerSizeLog2));
+ __ str(r1, MemOperand(sp, r5));
+ __ Push(r1);
+ __ Push(r2);
+ __ add(r0, r0, Operand(3));
+ __ TailCallRuntime(Runtime::kNewArray);
}
void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
@@ -83,20 +38,12 @@ void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize(r0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
}
-void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+void FastFunctionBindStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
-}
-
-
-void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
+ Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
+ descriptor->Initialize(r0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
}
-
-#define __ ACCESS_MASM(masm)
-
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
Condition cond);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
@@ -747,53 +694,19 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
__ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
}
-
void MathPowStub::Generate(MacroAssembler* masm) {
- const Register base = r1;
const Register exponent = MathPowTaggedDescriptor::exponent();
DCHECK(exponent.is(r2));
- const Register heapnumbermap = r5;
- const Register heapnumber = r0;
- const DwVfpRegister double_base = d0;
- const DwVfpRegister double_exponent = d1;
- const DwVfpRegister double_result = d2;
- const DwVfpRegister double_scratch = d3;
+ const LowDwVfpRegister double_base = d0;
+ const LowDwVfpRegister double_exponent = d1;
+ const LowDwVfpRegister double_result = d2;
+ const LowDwVfpRegister double_scratch = d3;
const SwVfpRegister single_scratch = s6;
const Register scratch = r9;
const Register scratch2 = r4;
Label call_runtime, done, int_exponent;
- if (exponent_type() == ON_STACK) {
- Label base_is_smi, unpack_exponent;
- // The exponent and base are supplied as arguments on the stack.
- // This can only happen if the stub is called from non-optimized code.
- // Load input parameters from stack to double registers.
- __ ldr(base, MemOperand(sp, 1 * kPointerSize));
- __ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
-
- __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
-
- __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
- __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
- __ cmp(scratch, heapnumbermap);
- __ b(ne, &call_runtime);
-
- __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
- __ jmp(&unpack_exponent);
-
- __ bind(&base_is_smi);
- __ vmov(single_scratch, scratch);
- __ vcvt_f64_s32(double_base, single_scratch);
- __ bind(&unpack_exponent);
-
- __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
-
- __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
- __ cmp(scratch, heapnumbermap);
- __ b(ne, &call_runtime);
- __ vldr(double_exponent,
- FieldMemOperand(exponent, HeapNumber::kValueOffset));
- } else if (exponent_type() == TAGGED) {
+ if (exponent_type() == TAGGED) {
// Base is already in double_base.
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
@@ -802,57 +715,9 @@ void MathPowStub::Generate(MacroAssembler* masm) {
}
if (exponent_type() != INTEGER) {
- Label int_exponent_convert;
// Detect integer exponents stored as double.
- __ vcvt_u32_f64(single_scratch, double_exponent);
- // We do not check for NaN or Infinity here because comparing numbers on
- // ARM correctly distinguishes NaNs. We end up calling the built-in.
- __ vcvt_f64_u32(double_scratch, single_scratch);
- __ VFPCompareAndSetFlags(double_scratch, double_exponent);
- __ b(eq, &int_exponent_convert);
-
- if (exponent_type() == ON_STACK) {
- // Detect square root case. Crankshaft detects constant +/-0.5 at
- // compile time and uses DoMathPowHalf instead. We then skip this check
- // for non-constant cases of +/-0.5 as these hardly occur.
- Label not_plus_half;
-
- // Test for 0.5.
- __ vmov(double_scratch, 0.5, scratch);
- __ VFPCompareAndSetFlags(double_exponent, double_scratch);
- __ b(ne, &not_plus_half);
-
- // Calculates square root of base. Check for the special case of
- // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
- __ vmov(double_scratch, -V8_INFINITY, scratch);
- __ VFPCompareAndSetFlags(double_base, double_scratch);
- __ vneg(double_result, double_scratch, eq);
- __ b(eq, &done);
-
- // Add +0 to convert -0 to +0.
- __ vadd(double_scratch, double_base, kDoubleRegZero);
- __ vsqrt(double_result, double_scratch);
- __ jmp(&done);
-
- __ bind(&not_plus_half);
- __ vmov(double_scratch, -0.5, scratch);
- __ VFPCompareAndSetFlags(double_exponent, double_scratch);
- __ b(ne, &call_runtime);
-
- // Calculates square root of base. Check for the special case of
- // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
- __ vmov(double_scratch, -V8_INFINITY, scratch);
- __ VFPCompareAndSetFlags(double_base, double_scratch);
- __ vmov(double_result, kDoubleRegZero, eq);
- __ b(eq, &done);
-
- // Add +0 to convert -0 to +0.
- __ vadd(double_scratch, double_base, kDoubleRegZero);
- __ vmov(double_result, 1.0, scratch);
- __ vsqrt(double_scratch, double_scratch);
- __ vdiv(double_result, double_result, double_scratch);
- __ jmp(&done);
- }
+ __ TryDoubleToInt32Exact(scratch, double_exponent, double_scratch);
+ __ b(eq, &int_exponent);
__ push(lr);
{
@@ -860,16 +725,11 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ PrepareCallCFunction(0, 2, scratch);
__ MovToFloatParameters(double_base, double_exponent);
__ CallCFunction(
- ExternalReference::power_double_double_function(isolate()),
- 0, 2);
+ ExternalReference::power_double_double_function(isolate()), 0, 2);
}
__ pop(lr);
__ MovFromFloatResult(double_result);
- __ jmp(&done);
-
- __ bind(&int_exponent_convert);
- __ vcvt_u32_f64(single_scratch, double_exponent);
- __ vmov(scratch, single_scratch);
+ __ b(&done);
}
// Calculate power with integer exponent.
@@ -887,12 +747,11 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Get absolute value of exponent.
__ cmp(scratch, Operand::Zero());
- __ mov(scratch2, Operand::Zero(), LeaveCC, mi);
- __ sub(scratch, scratch2, scratch, LeaveCC, mi);
+ __ rsb(scratch, scratch, Operand::Zero(), LeaveCC, mi);
Label while_true;
__ bind(&while_true);
- __ mov(scratch, Operand(scratch, ASR, 1), SetCC);
+ __ mov(scratch, Operand(scratch, LSR, 1), SetCC);
__ vmul(double_result, double_result, double_scratch, cs);
__ vmul(double_scratch, double_scratch, double_scratch, ne);
__ b(ne, &while_true);
@@ -911,38 +770,20 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ vcvt_f64_s32(double_exponent, single_scratch);
// Returning or bailing out.
- if (exponent_type() == ON_STACK) {
- // The arguments are still on the stack.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMathPowRT);
-
- // The stub is called from non-optimized code, which expects the result
- // as heap number in exponent.
- __ bind(&done);
- __ AllocateHeapNumber(
- heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
- __ vstr(double_result,
- FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
- DCHECK(heapnumber.is(r0));
- __ Ret(2);
- } else {
- __ push(lr);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch);
- __ MovToFloatParameters(double_base, double_exponent);
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()),
- 0, 2);
- }
- __ pop(lr);
- __ MovFromFloatResult(double_result);
-
- __ bind(&done);
- __ Ret();
+ __ push(lr);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ MovToFloatParameters(double_base, double_exponent);
+ __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
+ 0, 2);
}
-}
+ __ pop(lr);
+ __ MovFromFloatResult(double_result);
+ __ bind(&done);
+ __ Ret();
+}
bool CEntryStub::NeedsImmovableCode() {
return true;
@@ -953,13 +794,12 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
- ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+ CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
- TypeofStub::GenerateAheadOfTime(isolate);
}
@@ -1003,7 +843,9 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(save_doubles());
+ __ EnterExitFrame(save_doubles(), 0, is_builtin_exit()
+ ? StackFrame::BUILTIN_EXIT
+ : StackFrame::EXIT);
// Store a copy of argc in callee-saved registers for later.
__ mov(r4, Operand(r0));
@@ -1072,8 +914,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
}
// Result returned in r0, r1:r0 or r2:r1:r0 - do not destroy these registers!
- __ VFPEnsureFPSCRState(r3);
-
// Check result for exception sentinel.
Label exception_returned;
__ CompareRoot(r0, Heap::kExceptionRootIndex);
@@ -1183,7 +1023,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
// Set up the reserved register for 0.0.
__ vmov(kDoubleRegZero, 0.0);
- __ VFPEnsureFPSCRState(r4);
// Get address of argv, see stm above.
// r0: code entry
@@ -1266,12 +1105,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// restores all kCalleeSaved registers (including cp and fp) to their
// saved values before returning a failure to C.
- // Clear any pending exceptions.
- __ mov(r5, Operand(isolate()->factory()->the_hole_value()));
- __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate())));
- __ str(r5, MemOperand(ip));
-
// Invoke the function by calling through JS entry trampoline builtin.
// Notice that we cannot store a reference to the trampoline code directly in
// this stub, because runtime stubs are not traversed when doing GC.
@@ -1333,126 +1166,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
}
-void InstanceOfStub::Generate(MacroAssembler* masm) {
- Register const object = r1; // Object (lhs).
- Register const function = r0; // Function (rhs).
- Register const object_map = r2; // Map of {object}.
- Register const function_map = r3; // Map of {function}.
- Register const function_prototype = r4; // Prototype of {function}.
- Register const scratch = r5;
-
- DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
- DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
-
- // Check if {object} is a smi.
- Label object_is_smi;
- __ JumpIfSmi(object, &object_is_smi);
-
- // Lookup the {function} and the {object} map in the global instanceof cache.
- // Note: This is safe because we clear the global instanceof cache whenever
- // we change the prototype of any object.
- Label fast_case, slow_case;
- __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
- __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ b(ne, &fast_case);
- __ CompareRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
- __ b(ne, &fast_case);
- __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
- __ Ret();
-
- // If {object} is a smi we can safely return false if {function} is a JS
- // function, otherwise we have to miss to the runtime and throw an exception.
- __ bind(&object_is_smi);
- __ JumpIfSmi(function, &slow_case);
- __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
- __ b(ne, &slow_case);
- __ LoadRoot(r0, Heap::kFalseValueRootIndex);
- __ Ret();
-
- // Fast-case: The {function} must be a valid JSFunction.
- __ bind(&fast_case);
- __ JumpIfSmi(function, &slow_case);
- __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
- __ b(ne, &slow_case);
-
- // Go to the runtime if the function is not a constructor.
- __ ldrb(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
- __ tst(scratch, Operand(1 << Map::kIsConstructor));
- __ b(eq, &slow_case);
-
- // Ensure that {function} has an instance prototype.
- __ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
- __ b(ne, &slow_case);
-
- // Get the "prototype" (or initial map) of the {function}.
- __ ldr(function_prototype,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- __ AssertNotSmi(function_prototype);
-
- // Resolve the prototype if the {function} has an initial map. Afterwards the
- // {function_prototype} will be either the JSReceiver prototype object or the
- // hole value, which means that no instances of the {function} were created so
- // far and hence we should return false.
- Label function_prototype_valid;
- __ CompareObjectType(function_prototype, scratch, scratch, MAP_TYPE);
- __ b(ne, &function_prototype_valid);
- __ ldr(function_prototype,
- FieldMemOperand(function_prototype, Map::kPrototypeOffset));
- __ bind(&function_prototype_valid);
- __ AssertNotSmi(function_prototype);
-
- // Update the global instanceof cache with the current {object} map and
- // {function}. The cached answer will be set when it is known below.
- __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
-
- // Loop through the prototype chain looking for the {function} prototype.
- // Assume true, and change to false if not found.
- Register const object_instance_type = function_map;
- Register const map_bit_field = function_map;
- Register const null = scratch;
- Register const result = r0;
-
- Label done, loop, fast_runtime_fallback;
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ LoadRoot(null, Heap::kNullValueRootIndex);
- __ bind(&loop);
-
- // Check if the object needs to be access checked.
- __ ldrb(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
- __ tst(map_bit_field, Operand(1 << Map::kIsAccessCheckNeeded));
- __ b(ne, &fast_runtime_fallback);
- // Check if the current object is a Proxy.
- __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
- __ b(eq, &fast_runtime_fallback);
-
- __ ldr(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
- __ cmp(object, function_prototype);
- __ b(eq, &done);
- __ cmp(object, null);
- __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
- __ b(ne, &loop);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
- __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
- __ Ret();
-
- // Found Proxy or access check needed: Call the runtime
- __ bind(&fast_runtime_fallback);
- __ Push(object, function_prototype);
- // Invalidate the instanceof cache.
- __ Move(scratch, Smi::FromInt(0));
- __ StoreRoot(scratch, Heap::kInstanceofCacheFunctionRootIndex);
- __ TailCallRuntime(Runtime::kHasInPrototypeChain);
-
- // Slow-case: Call the %InstanceOf runtime function.
- __ bind(&slow_case);
- __ Push(object, function);
- __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
- : Runtime::kInstanceOf);
-}
-
-
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
Register receiver = LoadDescriptor::ReceiverRegister();
@@ -1488,7 +1201,6 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
&miss, // When not a string.
&miss, // When not a number.
&miss, // When index out of range.
- STRING_INDEX_IS_ARRAY_INDEX,
RECEIVER_IS_STRING);
char_at_generator.GenerateFast(masm);
__ Ret();
@@ -1787,9 +1499,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
__ JumpIfSmi(r0, &runtime);
- __ CompareObjectType(r0, r2, r2, JS_ARRAY_TYPE);
+ __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
__ b(ne, &runtime);
- // Check that the JSArray is in fast case.
+ // Check that the object has fast elements.
__ ldr(last_match_info_elements,
FieldMemOperand(r0, JSArray::kElementsOffset));
__ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
@@ -1915,9 +1627,11 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(r0);
__ Push(r3, r2, r1, r0);
+ __ Push(cp);
__ CallStub(stub);
+ __ Pop(cp);
__ Pop(r3, r2, r1, r0);
__ SmiUntag(r0);
}
@@ -1932,6 +1646,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// r2 : feedback vector
// r3 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
+ Label done_initialize_count, done_increment_count;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
@@ -1951,7 +1666,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
Register weak_value = r9;
__ ldr(weak_value, FieldMemOperand(r5, WeakCell::kValueOffset));
__ cmp(r1, weak_value);
- __ b(eq, &done);
+ __ b(eq, &done_increment_count);
__ CompareRoot(r5, Heap::kmegamorphic_symbolRootIndex);
__ b(eq, &done);
__ ldr(feedback_map, FieldMemOperand(r5, HeapObject::kMapOffset));
@@ -1974,7 +1689,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
__ cmp(r1, r5);
__ b(ne, &megamorphic);
- __ jmp(&done);
+ __ jmp(&done_increment_count);
__ bind(&miss);
@@ -2003,11 +1718,28 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub);
- __ b(&done);
+ __ b(&done_initialize_count);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &weak_cell_stub);
+
+ __ bind(&done_initialize_count);
+ // Initialize the call counter.
+ __ Move(r5, Operand(Smi::FromInt(1)));
+ __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ str(r5, FieldMemOperand(r4, FixedArray::kHeaderSize + kPointerSize));
+ __ b(&done);
+
+ __ bind(&done_increment_count);
+
+ // Increment the call count for monomorphic function calls.
+ __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
+ __ add(r5, r5, Operand(FixedArray::kHeaderSize + kPointerSize));
+ __ ldr(r4, FieldMemOperand(r5, 0));
+ __ add(r4, r4, Operand(Smi::FromInt(1)));
+ __ str(r4, FieldMemOperand(r5, 0));
+
__ bind(&done);
}
@@ -2069,7 +1801,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
__ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
__ add(r2, r2, Operand(FixedArray::kHeaderSize + kPointerSize));
__ ldr(r3, FieldMemOperand(r2, 0));
- __ add(r3, r3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+ __ add(r3, r3, Operand(Smi::FromInt(1)));
__ str(r3, FieldMemOperand(r2, 0));
__ mov(r2, r4);
@@ -2117,7 +1849,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
__ add(r2, r2, Operand(FixedArray::kHeaderSize + kPointerSize));
__ ldr(r3, FieldMemOperand(r2, 0));
- __ add(r3, r3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+ __ add(r3, r3, Operand(Smi::FromInt(1)));
__ str(r3, FieldMemOperand(r2, 0));
__ bind(&call_function);
@@ -2188,7 +1920,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ b(ne, &miss);
// Initialize the call counter.
- __ Move(r5, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+ __ Move(r5, Operand(Smi::FromInt(1)));
__ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
__ str(r5, FieldMemOperand(r4, FixedArray::kHeaderSize + kPointerSize));
@@ -2199,9 +1931,9 @@ void CallICStub::Generate(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
CreateWeakCellStub create_stub(masm->isolate());
- __ Push(r1);
+ __ Push(cp, r1);
__ CallStub(&create_stub);
- __ Pop(r1);
+ __ Pop(cp, r1);
}
__ jmp(&call_function);
@@ -2286,13 +2018,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
// index_ is consumed by runtime conversion function.
__ Push(object_, index_);
}
- if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
- } else {
- DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
- // NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi);
- }
+ __ CallRuntime(Runtime::kNumberToSmi);
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
__ Move(index_, r0);
@@ -2622,67 +2348,13 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// r3: from index (untagged)
__ SmiTag(r3, r3);
StringCharAtGenerator generator(r0, r3, r2, r0, &runtime, &runtime, &runtime,
- STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
+ RECEIVER_IS_STRING);
generator.GenerateFast(masm);
__ Drop(3);
__ Ret();
generator.SkipSlow(masm, &runtime);
}
-
-void ToNumberStub::Generate(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in r0.
- STATIC_ASSERT(kSmiTag == 0);
- __ tst(r0, Operand(kSmiTagMask));
- __ Ret(eq);
-
- __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
- // r0: receiver
- // r1: receiver instance type
- __ Ret(eq);
-
- NonNumberToNumberStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
- // The NonNumberToNumber stub takes one argument in r0.
- __ AssertNotNumber(r0);
-
- __ CompareObjectType(r0, r1, r1, FIRST_NONSTRING_TYPE);
- // r0: receiver
- // r1: receiver instance type
- StringToNumberStub stub(masm->isolate());
- __ TailCallStub(&stub, lo);
-
- Label not_oddball;
- __ cmp(r1, Operand(ODDBALL_TYPE));
- __ b(ne, &not_oddball);
- __ ldr(r0, FieldMemOperand(r0, Oddball::kToNumberOffset));
- __ Ret();
- __ bind(&not_oddball);
-
- __ Push(r0); // Push argument.
- __ TailCallRuntime(Runtime::kToNumber);
-}
-
-void StringToNumberStub::Generate(MacroAssembler* masm) {
- // The StringToNumber stub takes one argument in r0.
- __ AssertString(r0);
-
- // Check if string has a cached array index.
- Label runtime;
- __ ldr(r2, FieldMemOperand(r0, String::kHashFieldOffset));
- __ tst(r2, Operand(String::kContainsCachedArrayIndexMask));
- __ b(ne, &runtime);
- __ IndexFromHash(r2, r0);
- __ Ret();
-
- __ bind(&runtime);
- __ Push(r0); // Push argument.
- __ TailCallRuntime(Runtime::kStringToNumber);
-}
-
void ToStringStub::Generate(MacroAssembler* masm) {
// The ToString stub takes one argument in r0.
Label is_number;
@@ -2848,7 +2520,7 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// Load r2 with the allocation site. We stick an undefined dummy value here
// and replace it with the real allocation site later when we instantiate this
// stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
- __ Move(r2, handle(isolate()->heap()->undefined_value()));
+ __ Move(r2, isolate()->factory()->undefined_value());
// Make sure that we actually patched the allocation site.
if (FLAG_debug_code) {
@@ -3227,7 +2899,6 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
// GC safe. The RegExp backend also relies on this.
__ str(lr, MemOperand(sp, 0));
__ blx(ip); // Call the C++ function.
- __ VFPEnsureFPSCRState(r2);
__ ldr(pc, MemOperand(sp, 0));
}
@@ -3694,14 +3365,14 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- LoadICStub stub(isolate(), state());
+ LoadICStub stub(isolate());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- KeyedLoadICStub stub(isolate(), state());
+ KeyedLoadICStub stub(isolate());
stub.GenerateForTrampoline(masm);
}
@@ -3839,11 +3510,8 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ bind(&not_array);
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ b(ne, &miss);
- Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
- receiver, name, feedback,
- receiver_map, scratch1, r9);
+ masm->isolate()->load_stub_cache()->GenerateProbe(
+ masm, receiver, name, feedback, receiver_map, scratch1, r9);
__ bind(&miss);
LoadIC::GenerateMiss(masm);
@@ -3922,37 +3590,30 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ jmp(&compare_map);
}
-
-void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
- VectorStoreICStub stub(isolate(), state());
+void StoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
+ StoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
-
-void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
- VectorKeyedStoreICStub stub(isolate(), state());
+void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
+ KeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
+void StoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-void VectorStoreICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-
-void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+void StoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
-
-void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // r1
- Register key = VectorStoreICDescriptor::NameRegister(); // r2
- Register vector = VectorStoreICDescriptor::VectorRegister(); // r3
- Register slot = VectorStoreICDescriptor::SlotRegister(); // r4
- DCHECK(VectorStoreICDescriptor::ValueRegister().is(r0)); // r0
+void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // r1
+ Register key = StoreWithVectorDescriptor::NameRegister(); // r2
+ Register vector = StoreWithVectorDescriptor::VectorRegister(); // r3
+ Register slot = StoreWithVectorDescriptor::SlotRegister(); // r4
+ DCHECK(StoreWithVectorDescriptor::ValueRegister().is(r0)); // r0
Register feedback = r5;
Register receiver_map = r6;
Register scratch1 = r9;
@@ -3984,11 +3645,8 @@ void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ bind(&not_array);
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ b(ne, &miss);
- Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map,
- scratch1, scratch2);
+ masm->isolate()->store_stub_cache()->GenerateProbe(
+ masm, receiver, key, feedback, receiver_map, scratch1, scratch2);
__ bind(&miss);
StoreIC::GenerateMiss(masm);
@@ -3998,13 +3656,11 @@ void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ jmp(&compare_map);
}
-
-void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
+void KeyedStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
-
-void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
@@ -4070,13 +3726,12 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
__ jmp(miss);
}
-
-void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // r1
- Register key = VectorStoreICDescriptor::NameRegister(); // r2
- Register vector = VectorStoreICDescriptor::VectorRegister(); // r3
- Register slot = VectorStoreICDescriptor::SlotRegister(); // r4
- DCHECK(VectorStoreICDescriptor::ValueRegister().is(r0)); // r0
+void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // r1
+ Register key = StoreWithVectorDescriptor::NameRegister(); // r2
+ Register vector = StoreWithVectorDescriptor::VectorRegister(); // r3
+ Register slot = StoreWithVectorDescriptor::SlotRegister(); // r4
+ DCHECK(StoreWithVectorDescriptor::ValueRegister().is(r0)); // r0
Register feedback = r5;
Register receiver_map = r6;
Register scratch1 = r9;
@@ -4331,19 +3986,13 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
}
}
-
-void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
isolate);
ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
isolate);
- ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
- isolate);
-}
-
-
-void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
- Isolate* isolate) {
+ ArrayNArgumentsConstructorStub stub(isolate);
+ stub.GetCode();
ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
@@ -4351,8 +4000,6 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
stubh1.GetCode();
InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
stubh2.GetCode();
- InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
- stubh3.GetCode();
}
}
@@ -4372,13 +4019,15 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
CreateArrayDispatchOneArgument(masm, mode);
__ bind(&not_one_case);
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ ArrayNArgumentsConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
} else if (argument_count() == NONE) {
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
} else if (argument_count() == ONE) {
CreateArrayDispatchOneArgument(masm, mode);
} else if (argument_count() == MORE_THAN_ONE) {
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ ArrayNArgumentsConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
} else {
UNREACHABLE();
}
@@ -4460,7 +4109,7 @@ void InternalArrayConstructorStub::GenerateCase(
InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
__ TailCallStub(&stub0, lo);
- InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+ ArrayNArgumentsConstructorStub stubN(isolate());
__ TailCallStub(&stubN, hi);
if (IsFastPackedElementsKind(kind)) {
@@ -4562,15 +4211,15 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
__ bind(&done_allocate);
// Initialize the JSObject fields.
- __ str(r2, MemOperand(r0, JSObject::kMapOffset));
+ __ str(r2, FieldMemOperand(r0, JSObject::kMapOffset));
__ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
- __ str(r3, MemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r3, MemOperand(r0, JSObject::kElementsOffset));
+ __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
+ __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ add(r1, r0, Operand(JSObject::kHeaderSize));
+ __ add(r1, r0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
// ----------- S t a t e -------------
- // -- r0 : result (untagged)
+ // -- r0 : result (tagged)
// -- r1 : result fields (untagged)
// -- r5 : result end (untagged)
// -- r2 : initial map
@@ -4588,10 +4237,6 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
{
// Initialize all in-object fields with undefined.
__ InitializeFieldsWithFiller(r1, r5, r6);
-
- // Add the object tag to make the JSObject real.
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ add(r0, r0, Operand(kHeapObjectTag));
__ Ret();
}
__ bind(&slack_tracking);
@@ -4610,10 +4255,6 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
__ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
__ InitializeFieldsWithFiller(r1, r5, r6);
- // Add the object tag to make the JSObject real.
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ add(r0, r0, Operand(kHeapObjectTag));
-
// Check if we can finalize the instance size.
STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
__ tst(r3, Operand(Map::ConstructionCounter::kMask));
@@ -4640,10 +4281,10 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ Pop(r2);
}
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ sub(r0, r0, Operand(kHeapObjectTag));
__ ldrb(r5, FieldMemOperand(r2, Map::kInstanceSizeOffset));
__ add(r5, r0, Operand(r5, LSL, kPointerSizeLog2));
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ sub(r5, r5, Operand(kHeapObjectTag));
__ b(&done_allocate);
// Fall back to %NewObject.
@@ -4662,20 +4303,20 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// -----------------------------------
__ AssertFunction(r1);
- // For Ignition we need to skip all possible handler/stub frames until
- // we reach the JavaScript frame for the function (similar to what the
- // runtime fallback implementation does). So make r2 point to that
- // JavaScript frame.
- {
- Label loop, loop_entry;
- __ mov(r2, fp);
- __ b(&loop_entry);
- __ bind(&loop);
+ // Make r2 point to the JavaScript frame.
+ __ mov(r2, fp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
__ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
- __ bind(&loop_entry);
+ }
+ if (FLAG_debug_code) {
+ Label ok;
__ ldr(ip, MemOperand(r2, StandardFrameConstants::kFunctionOffset));
__ cmp(ip, r1);
- __ b(ne, &loop);
+ __ b(eq, &ok);
+ __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+ __ bind(&ok);
}
// Check if we have rest parameters (only possible if we have an
@@ -4690,10 +4331,10 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// specified by the function's internal formal parameter count.
Label rest_parameters;
__ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r1,
- FieldMemOperand(r1, SharedFunctionInfo::kFormalParameterCountOffset));
- __ sub(r0, r0, r1, SetCC);
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r3,
+ FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ sub(r0, r0, r3, SetCC);
__ b(gt, &rest_parameters);
// Return an empty rest parameter array.
@@ -4706,7 +4347,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// Allocate an empty rest parameter array.
Label allocate, done_allocate;
- __ Allocate(JSArray::kSize, r0, r1, r2, &allocate, TAG_OBJECT);
+ __ Allocate(JSArray::kSize, r0, r1, r2, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Setup the rest parameter array in r0.
@@ -4740,15 +4381,16 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- cp : context
// -- r0 : number of rest parameters (tagged)
+ // -- r1 : function
// -- r2 : pointer to first rest parameters
// -- lr : return address
// -----------------------------------
// Allocate space for the rest parameter array plus the backing store.
Label allocate, done_allocate;
- __ mov(r1, Operand(JSArray::kSize + FixedArray::kHeaderSize));
- __ add(r1, r1, Operand(r0, LSL, kPointerSizeLog2 - 1));
- __ Allocate(r1, r3, r4, r5, &allocate, TAG_OBJECT);
+ __ mov(r6, Operand(JSArray::kSize + FixedArray::kHeaderSize));
+ __ add(r6, r6, Operand(r0, LSL, kPointerSizeLog2 - 1));
+ __ Allocate(r6, r3, r4, r5, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Setup the elements array in r3.
@@ -4780,17 +4422,25 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
__ mov(r0, r4);
__ Ret();
- // Fall back to %AllocateInNewSpace.
+ // Fall back to %AllocateInNewSpace (if not too big).
+ Label too_big_for_new_space;
__ bind(&allocate);
+ __ cmp(r6, Operand(Page::kMaxRegularHeapObjectSize));
+ __ b(gt, &too_big_for_new_space);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(r1);
- __ Push(r0, r2, r1);
+ __ SmiTag(r6);
+ __ Push(r0, r2, r6);
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ mov(r3, r0);
__ Pop(r0, r2);
}
__ jmp(&done_allocate);
+
+ // Fall back to %NewRestParameter.
+ __ bind(&too_big_for_new_space);
+ __ push(r1);
+ __ TailCallRuntime(Runtime::kNewRestParameter);
}
}
@@ -4804,23 +4454,40 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
// -----------------------------------
__ AssertFunction(r1);
+ // Make r9 point to the JavaScript frame.
+ __ mov(r9, fp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
+ __ ldr(r9, MemOperand(r9, StandardFrameConstants::kCallerFPOffset));
+ }
+ if (FLAG_debug_code) {
+ Label ok;
+ __ ldr(ip, MemOperand(r9, StandardFrameConstants::kFunctionOffset));
+ __ cmp(ip, r1);
+ __ b(eq, &ok);
+ __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+ __ bind(&ok);
+ }
+
// TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r2,
FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ add(r3, fp, Operand(r2, LSL, kPointerSizeLog2 - 1));
+ __ add(r3, r9, Operand(r2, LSL, kPointerSizeLog2 - 1));
__ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
// r1 : function
// r2 : number of parameters (tagged)
// r3 : parameters pointer
+ // r9 : JavaScript frame pointer
// Registers used over whole function:
// r5 : arguments count (tagged)
// r6 : mapped parameter count (tagged)
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
- __ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r4, MemOperand(r9, StandardFrameConstants::kCallerFPOffset));
__ ldr(r0, MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(eq, &adaptor_frame);
@@ -4863,7 +4530,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
__ add(r9, r9, Operand(JSSloppyArgumentsObject::kSize));
// Do the allocation of all three objects in one go.
- __ Allocate(r9, r0, r9, r4, &runtime, TAG_OBJECT);
+ __ Allocate(r9, r0, r9, r4, &runtime, NO_ALLOCATION_FLAGS);
// r0 = address of new object(s) (tagged)
// r2 = argument count (smi-tagged)
@@ -5009,20 +4676,20 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// -----------------------------------
__ AssertFunction(r1);
- // For Ignition we need to skip all possible handler/stub frames until
- // we reach the JavaScript frame for the function (similar to what the
- // runtime fallback implementation does). So make r2 point to that
- // JavaScript frame.
- {
- Label loop, loop_entry;
- __ mov(r2, fp);
- __ b(&loop_entry);
- __ bind(&loop);
+ // Make r2 point to the JavaScript frame.
+ __ mov(r2, fp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
__ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
- __ bind(&loop_entry);
+ }
+ if (FLAG_debug_code) {
+ Label ok;
__ ldr(ip, MemOperand(r2, StandardFrameConstants::kFunctionOffset));
__ cmp(ip, r1);
- __ b(ne, &loop);
+ __ b(eq, &ok);
+ __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+ __ bind(&ok);
}
// Check if we have an arguments adaptor frame below the function frame.
@@ -5032,9 +4699,9 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ cmp(ip, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(eq, &arguments_adaptor);
{
- __ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r0, FieldMemOperand(
- r1, SharedFunctionInfo::kFormalParameterCountOffset));
+ r4, SharedFunctionInfo::kFormalParameterCountOffset));
__ add(r2, r2, Operand(r0, LSL, kPointerSizeLog2 - 1));
__ add(r2, r2,
Operand(StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
@@ -5052,15 +4719,16 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- cp : context
// -- r0 : number of rest parameters (tagged)
+ // -- r1 : function
// -- r2 : pointer to first rest parameters
// -- lr : return address
// -----------------------------------
// Allocate space for the strict arguments object plus the backing store.
Label allocate, done_allocate;
- __ mov(r1, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
- __ add(r1, r1, Operand(r0, LSL, kPointerSizeLog2 - 1));
- __ Allocate(r1, r3, r4, r5, &allocate, TAG_OBJECT);
+ __ mov(r6, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
+ __ add(r6, r6, Operand(r0, LSL, kPointerSizeLog2 - 1));
+ __ Allocate(r6, r3, r4, r5, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Setup the elements array in r3.
@@ -5092,44 +4760,25 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ mov(r0, r4);
__ Ret();
- // Fall back to %AllocateInNewSpace.
+ // Fall back to %AllocateInNewSpace (if not too big).
+ Label too_big_for_new_space;
__ bind(&allocate);
+ __ cmp(r6, Operand(Page::kMaxRegularHeapObjectSize));
+ __ b(gt, &too_big_for_new_space);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(r1);
- __ Push(r0, r2, r1);
+ __ SmiTag(r6);
+ __ Push(r0, r2, r6);
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ mov(r3, r0);
__ Pop(r0, r2);
}
__ b(&done_allocate);
-}
-
-
-void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
- Register context = cp;
- Register result = r0;
- Register slot = r2;
-
- // Go up the context chain to the script context.
- for (int i = 0; i < depth(); ++i) {
- __ ldr(result, ContextMemOperand(context, Context::PREVIOUS_INDEX));
- context = result;
- }
-
- // Load the PropertyCell value at the specified slot.
- __ add(result, context, Operand(slot, LSL, kPointerSizeLog2));
- __ ldr(result, ContextMemOperand(result));
- __ ldr(result, FieldMemOperand(result, PropertyCell::kValueOffset));
-
- // If the result is not the_hole, return. Otherwise, handle in the runtime.
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- __ Ret(ne);
- // Fallback to runtime.
- __ SmiTag(slot);
- __ push(slot);
- __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
+ // Fall back to %NewStrictArguments.
+ __ bind(&too_big_for_new_space);
+ __ push(r1);
+ __ TailCallRuntime(Runtime::kNewStrictArguments);
}
@@ -5423,7 +5072,11 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
STATIC_ASSERT(FCA::kIsolateIndex == 1);
STATIC_ASSERT(FCA::kHolderIndex == 0);
- STATIC_ASSERT(FCA::kArgsLength == 7);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 7);
+ STATIC_ASSERT(FCA::kArgsLength == 8);
+
+ // new target
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
// context save
__ push(context);
@@ -5457,7 +5110,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
- const int kApiStackSpace = 4;
+ const int kApiStackSpace = 3;
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
@@ -5474,9 +5127,6 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// FunctionCallbackInfo::length_ = argc
__ mov(ip, Operand(argc()));
__ str(ip, MemOperand(r0, 2 * kPointerSize));
- // FunctionCallbackInfo::is_construct_call_ = 0
- __ mov(ip, Operand::Zero());
- __ str(ip, MemOperand(r0, 3 * kPointerSize));
ExternalReference thunk_ref =
ExternalReference::invoke_function_callback(masm->isolate());
@@ -5493,8 +5143,8 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
}
MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
int stack_space = 0;
- MemOperand is_construct_call_operand = MemOperand(sp, 4 * kPointerSize);
- MemOperand* stack_space_operand = &is_construct_call_operand;
+ MemOperand length_operand = MemOperand(sp, 3 * kPointerSize);
+ MemOperand* stack_space_operand = &length_operand;
stack_space = argc() + FCA::kArgsLength + 1;
stack_space_operand = NULL;
@@ -5505,16 +5155,36 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
void CallApiGetterStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- sp[0] : name
- // -- sp[4 .. (4 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_
- // -- ...
- // -- r2 : api_function_address
- // -----------------------------------
-
- Register api_function_address = ApiGetterDescriptor::function_address();
- DCHECK(api_function_address.is(r2));
-
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+ Register receiver = ApiGetterDescriptor::ReceiverRegister();
+ Register holder = ApiGetterDescriptor::HolderRegister();
+ Register callback = ApiGetterDescriptor::CallbackRegister();
+ Register scratch = r4;
+ DCHECK(!AreAliased(receiver, holder, callback, scratch));
+
+ Register api_function_address = r2;
+
+ __ push(receiver);
+ // Push data from AccessorInfo.
+ __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+ __ push(scratch);
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ Push(scratch, scratch);
+ __ mov(scratch, Operand(ExternalReference::isolate_address(isolate())));
+ __ Push(scratch, holder);
+ __ Push(Smi::FromInt(0)); // should_throw_on_error -> false
+ __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+ __ push(scratch);
// v8::PropertyCallbackInfo::args_ array and name handle.
const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
@@ -5534,6 +5204,10 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
+ __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+ __ ldr(api_function_address,
+ FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
+
// +3 is to skip prolog, return address and name handle.
MemOperand return_value_operand(
fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
@@ -5541,7 +5215,6 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
kStackUnwindSpace, NULL, return_value_operand, NULL);
}
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 7e1a5500f1..75801454e8 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -6,6 +6,8 @@
#if V8_TARGET_ARCH_ARM
+#include <memory>
+
#include "src/arm/simulator-arm.h"
#include "src/codegen.h"
#include "src/macro-assembler.h"
@@ -16,75 +18,12 @@ namespace internal {
#define __ masm.
-
-#if defined(USE_SIMULATOR)
-byte* fast_exp_arm_machine_code = nullptr;
-double fast_exp_simulator(double x, Isolate* isolate) {
- return Simulator::current(isolate)
- ->CallFPReturnsDouble(fast_exp_arm_machine_code, x, 0);
-}
-#endif
-
-
-UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
- size_t actual_size;
- byte* buffer =
- static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == nullptr) return nullptr;
- ExternalReference::InitializeMathExpData();
-
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
- CodeObjectRequired::kNo);
-
- {
- DwVfpRegister input = d0;
- DwVfpRegister result = d1;
- DwVfpRegister double_scratch1 = d2;
- DwVfpRegister double_scratch2 = d3;
- Register temp1 = r4;
- Register temp2 = r5;
- Register temp3 = r6;
-
- if (masm.use_eabi_hardfloat()) {
- // Input value is in d0 anyway, nothing to do.
- } else {
- __ vmov(input, r0, r1);
- }
- __ Push(temp3, temp2, temp1);
- MathExpGenerator::EmitMathExp(
- &masm, input, result, double_scratch1, double_scratch2,
- temp1, temp2, temp3);
- __ Pop(temp3, temp2, temp1);
- if (masm.use_eabi_hardfloat()) {
- __ vmov(d0, result);
- } else {
- __ vmov(r0, r1, result);
- }
- __ Ret();
- }
-
- CodeDesc desc;
- masm.GetCode(&desc);
- DCHECK(!RelocInfo::RequiresRelocation(desc));
-
- Assembler::FlushICache(isolate, buffer, actual_size);
- base::OS::ProtectCode(buffer, actual_size);
-
-#if !defined(USE_SIMULATOR)
- return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
-#else
- fast_exp_arm_machine_code = buffer;
- return &fast_exp_simulator;
-#endif
-}
-
#if defined(V8_HOST_ARCH_ARM)
MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
- if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub;
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
@@ -242,7 +181,6 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
#if defined(USE_SIMULATOR)
return stub;
#else
- if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub;
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
@@ -450,6 +388,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ mov(lr, Operand(length, LSL, 2));
__ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
__ Allocate(lr, array, elements, scratch2, &gc_required, DOUBLE_ALIGNMENT);
+ __ sub(array, array, Operand(kHeapObjectTag));
// array: destination FixedDoubleArray, not tagged as heap object.
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
// r4: source FixedArray.
@@ -594,11 +533,13 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ add(array_size, array_size, Operand(length, LSL, 1));
__ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
NO_ALLOCATION_FLAGS);
- // array: destination FixedArray, not tagged as heap object
+ // array: destination FixedArray, tagged as heap object
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
- __ str(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
- __ str(scratch, MemOperand(array, HeapObject::kMapOffset));
+ __ str(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
+ __ str(scratch, FieldMemOperand(array, HeapObject::kMapOffset));
+
+ __ sub(array, array, Operand(kHeapObjectTag));
// Prepare for conversion loop.
Register src_elements = elements;
@@ -791,94 +732,6 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ bind(&done);
}
-
-static MemOperand ExpConstant(int index, Register base) {
- return MemOperand(base, index * kDoubleSize);
-}
-
-
-void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
- DwVfpRegister input,
- DwVfpRegister result,
- DwVfpRegister double_scratch1,
- DwVfpRegister double_scratch2,
- Register temp1,
- Register temp2,
- Register temp3) {
- DCHECK(!input.is(result));
- DCHECK(!input.is(double_scratch1));
- DCHECK(!input.is(double_scratch2));
- DCHECK(!result.is(double_scratch1));
- DCHECK(!result.is(double_scratch2));
- DCHECK(!double_scratch1.is(double_scratch2));
- DCHECK(!temp1.is(temp2));
- DCHECK(!temp1.is(temp3));
- DCHECK(!temp2.is(temp3));
- DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
- DCHECK(!masm->serializer_enabled()); // External references not serializable.
-
- Label zero, infinity, done;
-
- __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
-
- __ vldr(double_scratch1, ExpConstant(0, temp3));
- __ VFPCompareAndSetFlags(double_scratch1, input);
- __ b(ge, &zero);
-
- __ vldr(double_scratch2, ExpConstant(1, temp3));
- __ VFPCompareAndSetFlags(input, double_scratch2);
- __ b(ge, &infinity);
-
- __ vldr(double_scratch1, ExpConstant(3, temp3));
- __ vldr(result, ExpConstant(4, temp3));
- __ vmul(double_scratch1, double_scratch1, input);
- __ vadd(double_scratch1, double_scratch1, result);
- __ VmovLow(temp2, double_scratch1);
- __ vsub(double_scratch1, double_scratch1, result);
- __ vldr(result, ExpConstant(6, temp3));
- __ vldr(double_scratch2, ExpConstant(5, temp3));
- __ vmul(double_scratch1, double_scratch1, double_scratch2);
- __ vsub(double_scratch1, double_scratch1, input);
- __ vsub(result, result, double_scratch1);
- __ vmul(double_scratch2, double_scratch1, double_scratch1);
- __ vmul(result, result, double_scratch2);
- __ vldr(double_scratch2, ExpConstant(7, temp3));
- __ vmul(result, result, double_scratch2);
- __ vsub(result, result, double_scratch1);
- // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
- DCHECK(*reinterpret_cast<double*>
- (ExternalReference::math_exp_constants(8).address()) == 1);
- __ vmov(double_scratch2, 1);
- __ vadd(result, result, double_scratch2);
- __ mov(temp1, Operand(temp2, LSR, 11));
- __ Ubfx(temp2, temp2, 0, 11);
- __ add(temp1, temp1, Operand(0x3ff));
-
- // Must not call ExpConstant() after overwriting temp3!
- __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
- __ add(temp3, temp3, Operand(temp2, LSL, 3));
- __ ldm(ia, temp3, temp2.bit() | temp3.bit());
- // The first word is loaded is the lower number register.
- if (temp2.code() < temp3.code()) {
- __ orr(temp1, temp3, Operand(temp1, LSL, 20));
- __ vmov(double_scratch1, temp2, temp1);
- } else {
- __ orr(temp1, temp2, Operand(temp1, LSL, 20));
- __ vmov(double_scratch1, temp3, temp1);
- }
- __ vmul(result, result, double_scratch1);
- __ b(&done);
-
- __ bind(&zero);
- __ vmov(result, kDoubleRegZero);
- __ b(&done);
-
- __ bind(&infinity);
- __ vldr(result, ExpConstant(2, temp3));
-
- __ bind(&done);
-}
-
#undef __
#ifdef DEBUG
@@ -893,7 +746,7 @@ CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
// the process, before ARM simulator ICache is setup.
- base::SmartPointer<CodePatcher> patcher(
+ std::unique_ptr<CodePatcher> patcher(
new CodePatcher(isolate, young_sequence_.start(),
young_sequence_.length() / Assembler::kInstrSize,
CodePatcher::DONT_FLUSH));
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index 880825a1be..6d328bd117 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -5,7 +5,6 @@
#ifndef V8_ARM_CODEGEN_ARM_H_
#define V8_ARM_CODEGEN_ARM_H_
-#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {
@@ -28,22 +27,6 @@ class StringCharLoadGenerator : public AllStatic {
};
-class MathExpGenerator : public AllStatic {
- public:
- // Register input isn't modified. All other registers are clobbered.
- static void EmitMathExp(MacroAssembler* masm,
- DwVfpRegister input,
- DwVfpRegister result,
- DwVfpRegister double_scratch1,
- DwVfpRegister double_scratch2,
- Register temp1,
- Register temp2,
- Register temp3);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 2785b755d5..c569e6615b 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -66,15 +66,12 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
// We need calls to have a predictable size in the unoptimized code, but
// this is optimized code, so we don't have to have a predictable size.
- int call_size_in_bytes =
- MacroAssembler::CallSizeNotPredictableCodeSize(isolate,
- deopt_entry,
- RelocInfo::NONE32);
+ int call_size_in_bytes = MacroAssembler::CallDeoptimizerSize();
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0);
DCHECK(call_size_in_bytes <= patch_size());
CodePatcher patcher(isolate, call_address, call_size_in_words);
- patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
+ patcher.masm()->CallDeoptimizer(deopt_entry);
DCHECK(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
DCHECK(call_address + patch_size() <= code->instruction_end());
@@ -189,8 +186,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Copy VFP registers to
// double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
int double_regs_offset = FrameDescription::double_registers_offset();
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
int dst_offset = code * kDoubleSize + double_regs_offset;
@@ -307,15 +303,50 @@ void Deoptimizer::TableEntryGenerator::Generate() {
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
// Create a sequence of deoptimization entries.
// Note that registers are still live when jumping to an entry.
- Label done;
- for (int i = 0; i < count(); i++) {
- int start = masm()->pc_offset();
- USE(start);
- __ mov(ip, Operand(i));
- __ b(&done);
- DCHECK(masm()->pc_offset() - start == table_entry_size_);
+
+ // We need to be able to generate immediates up to kMaxNumberOfEntries. On
+ // ARMv7, we can use movw (with a maximum immediate of 0xffff). On ARMv6, we
+ // need two instructions.
+ STATIC_ASSERT((kMaxNumberOfEntries - 1) <= 0xffff);
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ CpuFeatureScope scope(masm(), ARMv7);
+ Label done;
+ for (int i = 0; i < count(); i++) {
+ int start = masm()->pc_offset();
+ USE(start);
+ __ movw(ip, i);
+ __ b(&done);
+ DCHECK_EQ(table_entry_size_, masm()->pc_offset() - start);
+ }
+ __ bind(&done);
+ } else {
+ // We want to keep table_entry_size_ == 8 (since this is the common case),
+ // but we need two instructions to load most immediates over 0xff. To handle
+ // this, we set the low byte in the main table, and then set the high byte
+ // in a separate table if necessary.
+ Label high_fixes[256];
+ int high_fix_max = (count() - 1) >> 8;
+ DCHECK_GT(arraysize(high_fixes), high_fix_max);
+ for (int i = 0; i < count(); i++) {
+ int start = masm()->pc_offset();
+ USE(start);
+ __ mov(ip, Operand(i & 0xff)); // Set the low byte.
+ __ b(&high_fixes[i >> 8]); // Jump to the secondary table.
+ DCHECK_EQ(table_entry_size_, masm()->pc_offset() - start);
+ }
+ // Generate the secondary table, to set the high byte.
+ for (int high = 1; high <= high_fix_max; high++) {
+ __ bind(&high_fixes[high]);
+ __ orr(ip, ip, Operand(high << 8));
+ // If this isn't the last entry, emit a branch to the end of the table.
+ // The last entry can just fall through.
+ if (high < high_fix_max) __ b(&high_fixes[0]);
+ }
+ // Bind high_fixes[0] last, for indices like 0x00**. This case requires no
+ // fix-up, so for (common) small tables we can jump here, then just fall
+ // through with no additional branch.
+ __ bind(&high_fixes[0]);
}
- __ bind(&done);
__ push(ip);
}
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 287152ad59..1e1c75d8b8 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -40,6 +40,7 @@
namespace v8 {
namespace internal {
+const auto GetRegConfig = RegisterConfiguration::Crankshaft;
//------------------------------------------------------------------------------
@@ -755,7 +756,45 @@ void Decoder::DecodeType01(Instruction* instr) {
Format(instr, "'um'al'cond's 'rd, 'rn, 'rm, 'rs");
}
} else {
- Unknown(instr); // not used by V8
+ if (instr->Bits(24, 23) == 3) {
+ if (instr->Bit(20) == 1) {
+ // ldrex
+ switch (instr->Bits(22, 21)) {
+ case 0:
+ Format(instr, "ldrex'cond 'rt, ['rn]");
+ break;
+ case 2:
+ Format(instr, "ldrexb'cond 'rt, ['rn]");
+ break;
+ case 3:
+ Format(instr, "ldrexh'cond 'rt, ['rn]");
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ // strex
+ // The instruction is documented as strex rd, rt, [rn], but the
+ // "rt" register is using the rm bits.
+ switch (instr->Bits(22, 21)) {
+ case 0:
+ Format(instr, "strex'cond 'rd, 'rm, ['rn]");
+ break;
+ case 2:
+ Format(instr, "strexb'cond 'rd, 'rm, ['rn]");
+ break;
+ case 3:
+ Format(instr, "strexh'cond 'rd, 'rm, ['rn]");
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ } else {
+ Unknown(instr); // not used by V8
+ }
}
} else if ((instr->Bit(20) == 0) && ((instr->Bits(7, 4) & 0xd) == 0xd)) {
// ldrd, strd
@@ -1325,16 +1364,10 @@ int Decoder::DecodeType7(Instruction* instr) {
if (instr->Bit(24) == 1) {
if (instr->SvcValue() >= kStopCode) {
Format(instr, "stop'cond 'svc");
- // Also print the stop message. Its address is encoded
- // in the following 4 bytes.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "\n %p %08x stop message: %s",
- reinterpret_cast<void*>(instr
- + Instruction::kInstrSize),
- *reinterpret_cast<uint32_t*>(instr
- + Instruction::kInstrSize),
- *reinterpret_cast<char**>(instr
- + Instruction::kInstrSize));
+ out_buffer_pos_ += SNPrintF(
+ out_buffer_ + out_buffer_pos_, "\n %p %08x",
+ reinterpret_cast<void*>(instr + Instruction::kInstrSize),
+ *reinterpret_cast<uint32_t*>(instr + Instruction::kInstrSize));
// We have decoded 2 * Instruction::kInstrSize bytes.
return 2 * Instruction::kInstrSize;
} else {
@@ -1869,6 +1902,48 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
Unknown(instr);
}
break;
+ case 0x1C:
+ if ((instr->Bits(11, 9) == 0x5) && (instr->Bit(6) == 0) &&
+ (instr->Bit(4) == 0)) {
+ // VSEL* (floating-point)
+ bool dp_operation = (instr->SzValue() == 1);
+ switch (instr->Bits(21, 20)) {
+ case 0x0:
+ if (dp_operation) {
+ Format(instr, "vseleq.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Format(instr, "vseleq.f32 'Sd, 'Sn, 'Sm");
+ }
+ break;
+ case 0x1:
+ if (dp_operation) {
+ Format(instr, "vselvs.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Format(instr, "vselvs.f32 'Sd, 'Sn, 'Sm");
+ }
+ break;
+ case 0x2:
+ if (dp_operation) {
+ Format(instr, "vselge.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Format(instr, "vselge.f32 'Sd, 'Sn, 'Sm");
+ }
+ break;
+ case 0x3:
+ if (dp_operation) {
+ Format(instr, "vselgt.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Format(instr, "vselgt.f32 'Sd, 'Sn, 'Sm");
+ }
+ break;
+ default:
+ UNREACHABLE(); // Case analysis is exhaustive.
+ break;
+ }
+ } else {
+ Unknown(instr);
+ }
+ break;
default:
Unknown(instr);
break;
@@ -1968,7 +2043,7 @@ namespace disasm {
const char* NameConverter::NameOfAddress(byte* addr) const {
- v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
+ v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
return tmp_buffer_.start();
}
@@ -1979,7 +2054,7 @@ const char* NameConverter::NameOfConstant(byte* addr) const {
const char* NameConverter::NameOfCPURegister(int reg) const {
- return v8::internal::Register::from_code(reg).ToString();
+ return v8::internal::GetRegConfig()->GetGeneralRegisterName(reg);
}
@@ -2031,9 +2106,8 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
buffer[0] = '\0';
byte* prev_pc = pc;
pc += d.InstructionDecode(buffer, pc);
- v8::internal::PrintF(
- f, "%p %08x %s\n",
- prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+ v8::internal::PrintF(f, "%p %08x %s\n", static_cast<void*>(prev_pc),
+ *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
}
}
diff --git a/deps/v8/src/arm/eh-frame-arm.cc b/deps/v8/src/arm/eh-frame-arm.cc
new file mode 100644
index 0000000000..7405b1365f
--- /dev/null
+++ b/deps/v8/src/arm/eh-frame-arm.cc
@@ -0,0 +1,64 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/eh-frame.h"
+
+namespace v8 {
+namespace internal {
+
+static const int kR0DwarfCode = 0;
+static const int kFpDwarfCode = 11;
+static const int kSpDwarfCode = 13;
+static const int kLrDwarfCode = 14;
+
+const int EhFrameConstants::kCodeAlignmentFactor = 4;
+const int EhFrameConstants::kDataAlignmentFactor = -4;
+
+void EhFrameWriter::WriteReturnAddressRegisterCode() {
+ WriteULeb128(kLrDwarfCode);
+}
+
+void EhFrameWriter::WriteInitialStateInCie() {
+ SetBaseAddressRegisterAndOffset(fp, 0);
+ RecordRegisterNotModified(lr);
+}
+
+// static
+int EhFrameWriter::RegisterToDwarfCode(Register name) {
+ switch (name.code()) {
+ case Register::kCode_fp:
+ return kFpDwarfCode;
+ case Register::kCode_sp:
+ return kSpDwarfCode;
+ case Register::kCode_lr:
+ return kLrDwarfCode;
+ case Register::kCode_r0:
+ return kR0DwarfCode;
+ default:
+ UNIMPLEMENTED();
+ return -1;
+ }
+}
+
+#ifdef ENABLE_DISASSEMBLER
+
+// static
+const char* EhFrameDisassembler::DwarfRegisterCodeToString(int code) {
+ switch (code) {
+ case kFpDwarfCode:
+ return "fp";
+ case kSpDwarfCode:
+ return "sp";
+ case kLrDwarfCode:
+ return "lr";
+ default:
+ UNIMPLEMENTED();
+ return nullptr;
+ }
+}
+
+#endif
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc
index b6cac760b2..d26804a4ff 100644
--- a/deps/v8/src/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/arm/interface-descriptors-arm.cc
@@ -13,6 +13,19 @@ namespace internal {
const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
+void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
+ CallInterfaceDescriptorData* data, int register_parameter_count) {
+ const Register default_stub_registers[] = {r0, r1, r2, r3, r4};
+ CHECK_LE(static_cast<size_t>(register_parameter_count),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(register_parameter_count,
+ default_stub_registers);
+}
+
+const Register FastNewFunctionContextDescriptor::FunctionRegister() {
+ return r1;
+}
+const Register FastNewFunctionContextDescriptor::SlotsRegister() { return r0; }
const Register LoadDescriptor::ReceiverRegister() { return r1; }
const Register LoadDescriptor::NameRegister() { return r2; }
@@ -25,13 +38,9 @@ const Register LoadWithVectorDescriptor::VectorRegister() { return r3; }
const Register StoreDescriptor::ReceiverRegister() { return r1; }
const Register StoreDescriptor::NameRegister() { return r2; }
const Register StoreDescriptor::ValueRegister() { return r0; }
+const Register StoreDescriptor::SlotRegister() { return r4; }
-
-const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return r4; }
-
-
-const Register VectorStoreICDescriptor::VectorRegister() { return r3; }
-
+const Register StoreWithVectorDescriptor::VectorRegister() { return r3; }
const Register VectorStoreTransitionDescriptor::SlotRegister() { return r4; }
const Register VectorStoreTransitionDescriptor::VectorRegister() { return r3; }
@@ -41,23 +50,15 @@ const Register VectorStoreTransitionDescriptor::MapRegister() { return r5; }
const Register StoreTransitionDescriptor::MapRegister() { return r3; }
-const Register LoadGlobalViaContextDescriptor::SlotRegister() { return r2; }
-
-
const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r2; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r0; }
-const Register InstanceOfDescriptor::LeftRegister() { return r1; }
-const Register InstanceOfDescriptor::RightRegister() { return r0; }
-
-
const Register StringCompareDescriptor::LeftRegister() { return r1; }
const Register StringCompareDescriptor::RightRegister() { return r0; }
-
-const Register ApiGetterDescriptor::function_address() { return r2; }
-
+const Register ApiGetterDescriptor::HolderRegister() { return r0; }
+const Register ApiGetterDescriptor::CallbackRegister() { return r3; }
const Register MathPowTaggedDescriptor::exponent() { return r2; }
@@ -77,13 +78,6 @@ void FastNewClosureDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void FastNewContextDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void FastNewObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r3};
@@ -248,50 +242,35 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC
-void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
+void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
// r0 -- number of arguments
// r1 -- function
// r2 -- allocation site with elements kind
- Register registers[] = {r1, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ArrayConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // stack param count needs (constructor pointer, and single argument)
Register registers[] = {r1, r2, r0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-
-void InternalArrayConstructorConstantArgCountDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// register state
// r0 -- number of arguments
- // r1 -- constructor function
- Register registers[] = {r1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ // r1 -- function
+ // r2 -- allocation site with elements kind
+ Register registers[] = {r1, r2, r0};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-
-void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
+void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
- Register registers[] = {r1, r0};
+ Register registers[] = {r1, r2, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void FastArrayPushDescriptor::InitializePlatformSpecific(
+
+void VarArgFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (arg count)
Register registers[] = {r0};
@@ -318,6 +297,22 @@ void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void BinaryOpWithVectorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // register state
+ // r1 -- lhs
+ // r0 -- rhs
+ // r4 -- slot id
+ // r3 -- vector
+ Register registers[] = {r1, r0, r4, r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CountOpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void StringAddDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -398,9 +393,8 @@ void ApiCallbackDescriptorBase::InitializePlatformSpecific(
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
- kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
- kInterpreterDispatchTableRegister};
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -435,6 +429,16 @@ void InterpreterCEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void ResumeGeneratorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ r0, // the value to pass to the generator
+ r1, // the JSGeneratorObject to resume
+ r2 // the resume mode (tagged)
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 6af3d6c20c..a08673d462 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -89,17 +89,6 @@ int MacroAssembler::CallStubSize(
}
-int MacroAssembler::CallSizeNotPredictableCodeSize(Isolate* isolate,
- Address target,
- RelocInfo::Mode rmode,
- Condition cond) {
- Instr mov_instr = cond | MOV | LeaveCC;
- Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
- return kInstrSize +
- mov_operand.instructions_required(NULL, mov_instr) * kInstrSize;
-}
-
-
void MacroAssembler::Call(Address target,
RelocInfo::Mode rmode,
Condition cond,
@@ -131,12 +120,6 @@ void MacroAssembler::Call(Address target,
// blx ip
// @ return address
- // Statement positions are expected to be recorded when the target
- // address is loaded. The mov method will automatically record
- // positions when pc is the target, since this is not the case here
- // we have to do it explicitly.
- positions_recorder()->WriteRecordedPositions();
-
mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
blx(ip, cond);
@@ -173,6 +156,40 @@ void MacroAssembler::Call(Handle<Code> code,
Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
}
+void MacroAssembler::CallDeoptimizer(Address target) {
+ BlockConstPoolScope block_const_pool(this);
+
+ uintptr_t target_raw = reinterpret_cast<uintptr_t>(target);
+
+ // We use blx, like a call, but it does not return here. The link register is
+ // used by the deoptimizer to work out what called it.
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ CpuFeatureScope scope(this, ARMv7);
+ movw(ip, target_raw & 0xffff);
+ movt(ip, (target_raw >> 16) & 0xffff);
+ blx(ip);
+ } else {
+ // We need to load a literal, but we can't use the usual constant pool
+ // because we call this from a patcher, and cannot afford the guard
+ // instruction and other administrative overhead.
+ ldr(ip, MemOperand(pc, (2 * kInstrSize) - kPcLoadDelta));
+ blx(ip);
+ dd(target_raw);
+ }
+}
+
+int MacroAssembler::CallDeoptimizerSize() {
+ // ARMv7+:
+ // movw ip, ...
+ // movt ip, ...
+ // blx ip @ This never returns.
+ //
+ // ARMv6:
+ // ldr ip, =address
+ // blx ip @ This never returns.
+ // .word address
+ return 3 * kInstrSize;
+}
void MacroAssembler::Ret(Condition cond) {
bx(lr, cond);
@@ -223,19 +240,7 @@ void MacroAssembler::Push(Handle<Object> handle) {
void MacroAssembler::Move(Register dst, Handle<Object> value) {
- AllowDeferredHandleDereference smi_check;
- if (value->IsSmi()) {
- mov(dst, Operand(value));
- } else {
- DCHECK(value->IsHeapObject());
- if (isolate()->heap()->InNewSpace(*value)) {
- Handle<Cell> cell = isolate()->factory()->NewCell(value);
- mov(dst, Operand(cell));
- ldr(dst, FieldMemOperand(dst, Cell::kValueOffset));
- } else {
- mov(dst, Operand(value));
- }
- }
+ mov(dst, Operand(value));
}
@@ -245,6 +250,11 @@ void MacroAssembler::Move(Register dst, Register src, Condition cond) {
}
}
+void MacroAssembler::Move(SwVfpRegister dst, SwVfpRegister src) {
+ if (!dst.is(src)) {
+ vmov(dst, src);
+ }
+}
void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
if (!dst.is(src)) {
@@ -252,11 +262,10 @@ void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
}
}
-
void MacroAssembler::Mls(Register dst, Register src1, Register src2,
Register srcA, Condition cond) {
- if (CpuFeatures::IsSupported(MLS)) {
- CpuFeatureScope scope(this, MLS);
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ CpuFeatureScope scope(this, ARMv7);
mls(dst, src1, src2, srcA, cond);
} else {
DCHECK(!srcA.is(ip));
@@ -355,37 +364,6 @@ void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
}
-void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
- Condition cond) {
- if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
- DCHECK(!dst.is(pc) && !src.rm().is(pc));
- DCHECK((satpos >= 0) && (satpos <= 31));
-
- // These asserts are required to ensure compatibility with the ARMv7
- // implementation.
- DCHECK((src.shift_op() == ASR) || (src.shift_op() == LSL));
- DCHECK(src.rs().is(no_reg));
-
- Label done;
- int satval = (1 << satpos) - 1;
-
- if (cond != al) {
- b(NegateCondition(cond), &done); // Skip saturate if !condition.
- }
- if (!(src.is_reg() && dst.is(src.rm()))) {
- mov(dst, src);
- }
- tst(dst, Operand(~satval));
- b(eq, &done);
- mov(dst, Operand::Zero(), LeaveCC, mi); // 0 if negative.
- mov(dst, Operand(satval), LeaveCC, pl); // satval if positive.
- bind(&done);
- } else {
- usat(dst, satpos, src, cond);
- }
-}
-
-
void MacroAssembler::Load(Register dst,
const MemOperand& src,
Representation r) {
@@ -872,8 +850,7 @@ MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
// Number of d-regs not known at snapshot time.
DCHECK(!serializer_enabled());
// General purpose registers are pushed last on the stack.
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
int doubles_size = config->num_allocatable_double_registers() * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
return MemOperand(sp, doubles_size + register_offset);
@@ -889,10 +866,8 @@ void MacroAssembler::Ldrd(Register dst1, Register dst2,
// below doesn't support it yet.
DCHECK((src.am() != PreIndex) && (src.am() != NegPreIndex));
- // Generate two ldr instructions if ldrd is not available.
- if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
- (dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) {
- CpuFeatureScope scope(this, ARMv7);
+ // Generate two ldr instructions if ldrd is not applicable.
+ if ((dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) {
ldrd(dst1, dst2, src, cond);
} else {
if ((src.am() == Offset) || (src.am() == NegOffset)) {
@@ -930,10 +905,8 @@ void MacroAssembler::Strd(Register src1, Register src2,
// below doesn't support it yet.
DCHECK((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
- // Generate two str instructions if strd is not available.
- if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
- (src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) {
- CpuFeatureScope scope(this, ARMv7);
+ // Generate two str instructions if strd is not applicable.
+ if ((src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) {
strd(src1, src2, dst, cond);
} else {
MemOperand dst2(dst);
@@ -950,30 +923,12 @@ void MacroAssembler::Strd(Register src1, Register src2,
}
}
-
-void MacroAssembler::VFPEnsureFPSCRState(Register scratch) {
- // If needed, restore wanted bits of FPSCR.
- Label fpscr_done;
- vmrs(scratch);
- if (emit_debug_code()) {
- Label rounding_mode_correct;
- tst(scratch, Operand(kVFPRoundingModeMask));
- b(eq, &rounding_mode_correct);
- // Don't call Assert here, since Runtime_Abort could re-enter here.
- stop("Default rounding mode not set");
- bind(&rounding_mode_correct);
- }
- tst(scratch, Operand(kVFPDefaultNaNModeControlBit));
- b(ne, &fpscr_done);
- orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit));
- vmsr(scratch);
- bind(&fpscr_done);
-}
-
-
void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
+ // Subtracting 0.0 preserves all inputs except for signalling NaNs, which
+ // become quiet NaNs. We use vsub rather than vadd because vsub preserves -0.0
+ // inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
vsub(dst, src, kDoubleRegZero, cond);
}
@@ -1049,13 +1004,11 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
void MacroAssembler::Vmov(const DwVfpRegister dst,
const double imm,
const Register scratch) {
- static const DoubleRepresentation minus_zero(-0.0);
- static const DoubleRepresentation zero(0.0);
- DoubleRepresentation value_rep(imm);
+ int64_t imm_bits = bit_cast<int64_t>(imm);
// Handle special values first.
- if (value_rep == zero) {
+ if (imm_bits == bit_cast<int64_t>(0.0)) {
vmov(dst, kDoubleRegZero);
- } else if (value_rep == minus_zero) {
+ } else if (imm_bits == bit_cast<int64_t>(-0.0)) {
vneg(dst, kDoubleRegZero);
} else {
vmov(dst, imm, scratch);
@@ -1290,9 +1243,8 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- ldr(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
- ldr(vector,
- FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+ ldr(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
+ ldr(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
}
@@ -1332,13 +1284,29 @@ int MacroAssembler::LeaveFrame(StackFrame::Type type) {
return frame_ends;
}
+void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
+ Register argc) {
+ Push(lr, fp, context, target);
+ add(fp, sp, Operand(2 * kPointerSize));
+ Push(argc);
+}
+
+void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
+ Register argc) {
+ Pop(argc);
+ Pop(lr, fp, context, target);
+}
+
+void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
+ StackFrame::Type frame_type) {
+ DCHECK(frame_type == StackFrame::EXIT ||
+ frame_type == StackFrame::BUILTIN_EXIT);
-void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
// Set up the frame structure on the stack.
DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
- mov(ip, Operand(Smi::FromInt(StackFrame::EXIT)));
+ mov(ip, Operand(Smi::FromInt(frame_type)));
PushCommonFrame(ip);
// Reserve room for saved entry sp and code object.
sub(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
@@ -1606,12 +1574,13 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
Label skip_flooding;
- ExternalReference step_in_enabled =
- ExternalReference::debug_step_in_enabled_address(isolate());
- mov(r4, Operand(step_in_enabled));
- ldrb(r4, MemOperand(r4));
- cmp(r4, Operand(0));
- b(eq, &skip_flooding);
+ ExternalReference last_step_action =
+ ExternalReference::debug_last_step_action_address(isolate());
+ STATIC_ASSERT(StepFrame > StepIn);
+ mov(r4, Operand(last_step_action));
+ ldrsb(r4, MemOperand(r4));
+ cmp(r4, Operand(StepIn));
+ b(lt, &skip_flooding);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -2003,6 +1972,7 @@ void MacroAssembler::Allocate(int object_size,
Label* gc_required,
AllocationFlags flags) {
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -2090,26 +2060,29 @@ void MacroAssembler::Allocate(int object_size,
shift += 8;
Operand bits_operand(bits);
DCHECK(bits_operand.instructions_required(this) == 1);
- add(result_end, source, bits_operand, SetCC, cond);
+ add(result_end, source, bits_operand, LeaveCC, cond);
source = result_end;
cond = cc;
}
}
- b(cs, gc_required);
+
cmp(result_end, Operand(alloc_limit));
b(hi, gc_required);
- str(result_end, MemOperand(top_address));
- // Tag object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- add(result, result, Operand(kHeapObjectTag));
+ if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+ // The top pointer is not updated for allocation folding dominators.
+ str(result_end, MemOperand(top_address));
}
+
+ // Tag object.
+ add(result, result, Operand(kHeapObjectTag));
}
void MacroAssembler::Allocate(Register object_size, Register result,
Register result_end, Register scratch,
Label* gc_required, AllocationFlags flags) {
+ DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -2185,7 +2158,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
} else {
add(result_end, result, Operand(object_size), SetCC);
}
- b(cs, gc_required);
+
cmp(result_end, Operand(alloc_limit));
b(hi, gc_required);
@@ -2194,14 +2167,122 @@ void MacroAssembler::Allocate(Register object_size, Register result,
tst(result_end, Operand(kObjectAlignmentMask));
Check(eq, kUnalignedAllocationInNewSpace);
}
- str(result_end, MemOperand(top_address));
+ if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+ // The top pointer is not updated for allocation folding dominators.
+ str(result_end, MemOperand(top_address));
+ }
+
+ // Tag object.
+ add(result, result, Operand(kHeapObjectTag));
+}
+
+void MacroAssembler::FastAllocate(Register object_size, Register result,
+ Register result_end, Register scratch,
+ AllocationFlags flags) {
+ // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
+ // is not specified. Other registers must not overlap.
+ DCHECK(!AreAliased(object_size, result, scratch, ip));
+ DCHECK(!AreAliased(result_end, result, scratch, ip));
+ DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
+
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
- // Tag object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- add(result, result, Operand(kHeapObjectTag));
+ Register top_address = scratch;
+ mov(top_address, Operand(allocation_top));
+ ldr(result, MemOperand(top_address));
+
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+ and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
+ Label aligned;
+ b(eq, &aligned);
+ mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
+ bind(&aligned);
}
+
+ // Calculate new top using result. Object size may be in words so a shift is
+ // required to get the number of bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ add(result_end, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
+ } else {
+ add(result_end, result, Operand(object_size), SetCC);
+ }
+
+ // Update allocation top. result temporarily holds the new top.
+ if (emit_debug_code()) {
+ tst(result_end, Operand(kObjectAlignmentMask));
+ Check(eq, kUnalignedAllocationInNewSpace);
+ }
+ // The top pointer is not updated for allocation folding dominators.
+ str(result_end, MemOperand(top_address));
+
+ add(result, result, Operand(kHeapObjectTag));
}
+void MacroAssembler::FastAllocate(int object_size, Register result,
+ Register scratch1, Register scratch2,
+ AllocationFlags flags) {
+ DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK(!AreAliased(result, scratch1, scratch2, ip));
+
+ // Make object size into bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ object_size *= kPointerSize;
+ }
+ DCHECK_EQ(0, object_size & kObjectAlignmentMask);
+
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+ // Set up allocation top address register.
+ Register top_address = scratch1;
+ Register result_end = scratch2;
+ mov(top_address, Operand(allocation_top));
+ ldr(result, MemOperand(top_address));
+
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
+ Label aligned;
+ b(eq, &aligned);
+ mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
+ bind(&aligned);
+ }
+
+ // Calculate new top using result. Object size may be in words so a shift is
+ // required to get the number of bytes. We must preserve the ip register at
+ // this point, so we cannot just use add().
+ DCHECK(object_size > 0);
+ Register source = result;
+ Condition cond = al;
+ int shift = 0;
+ while (object_size != 0) {
+ if (((object_size >> shift) & 0x03) == 0) {
+ shift += 2;
+ } else {
+ int bits = object_size & (0xff << shift);
+ object_size -= bits;
+ shift += 8;
+ Operand bits_operand(bits);
+ DCHECK(bits_operand.instructions_required(this) == 1);
+ add(result_end, source, bits_operand, LeaveCC, cond);
+ source = result_end;
+ cond = cc;
+ }
+ }
+
+ // The top pointer is not updated for allocation folding dominators.
+ str(result_end, MemOperand(top_address));
+
+ add(result, result, Operand(kHeapObjectTag));
+}
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,
@@ -2218,12 +2299,8 @@ void MacroAssembler::AllocateTwoByteString(Register result,
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate two-byte string in new space.
- Allocate(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(scratch1, result, scratch2, scratch3, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
InitializeNewString(result,
@@ -2247,12 +2324,8 @@ void MacroAssembler::AllocateOneByteString(Register result, Register length,
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate one-byte string in new space.
- Allocate(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(scratch1, result, scratch2, scratch3, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
@@ -2266,7 +2339,7 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
Register scratch2,
Label* gc_required) {
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result,
length,
@@ -2280,12 +2353,8 @@ void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
Register scratch1,
Register scratch2,
Label* gc_required) {
- Allocate(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
scratch1, scratch2);
@@ -2298,7 +2367,7 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
Register scratch2,
Label* gc_required) {
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result,
length,
@@ -2314,7 +2383,7 @@ void MacroAssembler::AllocateOneByteSlicedString(Register result,
Register scratch2,
Label* gc_required) {
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
scratch1, scratch2);
@@ -2414,12 +2483,6 @@ void MacroAssembler::StoreNumberToDoubleElements(
DONT_DO_SMI_CHECK);
vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
- // Force a canonical NaN.
- if (emit_debug_code()) {
- vmrs(ip);
- tst(ip, Operand(kVFPDefaultNaNModeControlBit));
- Assert(ne, kDefaultNaNModeNotSet);
- }
VFPCanonicalizeNaN(double_scratch);
b(&store);
@@ -2803,18 +2866,18 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
JumpToExternalReference(ExternalReference(fid, isolate()));
}
-
-void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
+ bool builtin_exit_frame) {
#if defined(__thumb__)
// Thumb mode builtin.
DCHECK((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
#endif
mov(r1, Operand(builtin));
- CEntryStub stub(isolate(), 1);
+ CEntryStub stub(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
+ builtin_exit_frame);
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-
void MacroAssembler::SetCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
if (FLAG_native_code_counters && counter->Enabled()) {
@@ -2902,17 +2965,19 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- mov(r0, Operand(Smi::FromInt(reason)));
- push(r0);
+ // Check if Abort() has already been initialized.
+ DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
+
+ Move(r1, Smi::FromInt(static_cast<int>(reason)));
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort);
+ Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
} else {
- CallRuntime(Runtime::kAbort);
+ Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
}
// will not return here
if (is_const_pool_blocked()) {
@@ -3129,6 +3194,17 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
}
+void MacroAssembler::AssertGeneratorObject(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ tst(object, Operand(kSmiTagMask));
+ Check(ne, kOperandIsASmiAndNotAGeneratorObject);
+ push(object);
+ CompareObjectType(object, object, object, JS_GENERATOR_OBJECT_TYPE);
+ pop(object);
+ Check(eq, kOperandIsNotAGeneratorObject);
+ }
+}
void MacroAssembler::AssertReceiver(Register object) {
if (emit_debug_code()) {
@@ -3225,12 +3301,11 @@ void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch2,
Register heap_number_map,
Label* gc_required,
- TaggingMode tagging_mode,
MutableMode mode) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
- tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
+ NO_ALLOCATION_FLAGS);
Heap::RootListIndex map_index = mode == MUTABLE
? Heap::kMutableHeapNumberMapRootIndex
@@ -3238,11 +3313,7 @@ void MacroAssembler::AllocateHeapNumber(Register result,
AssertIsRoot(heap_number_map, map_index);
// Store heap number map in the allocated object.
- if (tagging_mode == TAG_RESULT) {
- str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
- } else {
- str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
- }
+ str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
}
@@ -3267,7 +3338,8 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
DCHECK(!result.is(value));
// Allocate JSValue in new space.
- Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
+ Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
+ NO_ALLOCATION_FLAGS);
// Initialize the JSValue.
LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
@@ -3306,17 +3378,7 @@ void MacroAssembler::CopyBytes(Register src,
cmp(length, Operand(kPointerSize));
b(lt, &byte_loop);
ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
- if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
- str(scratch, MemOperand(dst, kPointerSize, PostIndex));
- } else {
- strb(scratch, MemOperand(dst, 1, PostIndex));
- mov(scratch, Operand(scratch, LSR, 8));
- strb(scratch, MemOperand(dst, 1, PostIndex));
- mov(scratch, Operand(scratch, LSR, 8));
- strb(scratch, MemOperand(dst, 1, PostIndex));
- mov(scratch, Operand(scratch, LSR, 8));
- strb(scratch, MemOperand(dst, 1, PostIndex));
- }
+ str(scratch, MemOperand(dst, kPointerSize, PostIndex));
sub(length, length, Operand(kPointerSize));
b(&word_loop);
@@ -3662,7 +3724,7 @@ void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
- Usat(output_reg, 8, Operand(input_reg));
+ usat(output_reg, 8, Operand(input_reg));
}
@@ -3770,7 +3832,7 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
Label* no_memento_found) {
Label map_check;
Label top_check;
- ExternalReference new_space_allocation_top =
+ ExternalReference new_space_allocation_top_adr =
ExternalReference::new_space_allocation_top_address(isolate());
const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
@@ -3780,7 +3842,9 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
// If the object is in new space, we need to check whether it is on the same
// page as the current top.
add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
- eor(scratch_reg, scratch_reg, Operand(new_space_allocation_top));
+ mov(ip, Operand(new_space_allocation_top_adr));
+ ldr(ip, MemOperand(ip));
+ eor(scratch_reg, scratch_reg, Operand(ip));
tst(scratch_reg, Operand(~Page::kPageAlignmentMask));
b(eq, &top_check);
// The object is on a different page than allocation top. Bail out if the
@@ -3796,7 +3860,9 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
// we are below top.
bind(&top_check);
add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
- cmp(scratch_reg, Operand(new_space_allocation_top));
+ mov(ip, Operand(new_space_allocation_top_adr));
+ ldr(ip, MemOperand(ip));
+ cmp(scratch_reg, ip);
b(gt, no_memento_found);
// Memento map check.
bind(&map_check);
@@ -3818,8 +3884,7 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
if (reg5.is_valid()) regs |= reg5.bit();
if (reg6.is_valid()) regs |= reg6.bit();
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
int code = config->GetAllocatableGeneralCode(i);
Register candidate = Register::from_code(code);
@@ -3917,6 +3982,10 @@ CodePatcher::~CodePatcher() {
Assembler::FlushICache(masm_.isolate(), address_, size_);
}
+ // Check that we don't have any pending constant pools.
+ DCHECK(masm_.pending_32_bit_constants_.empty());
+ DCHECK(masm_.pending_64_bit_constants_.empty());
+
// Check that the code was patched as expected.
DCHECK(masm_.pc_ == address_ + size_);
DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index f32630444e..2f1b3c2cae 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -19,8 +19,8 @@ const Register kReturnRegister1 = {Register::kCode_r1};
const Register kReturnRegister2 = {Register::kCode_r2};
const Register kJSFunctionRegister = {Register::kCode_r1};
const Register kContextRegister = {Register::kCode_r7};
+const Register kAllocateSizeRegister = {Register::kCode_r1};
const Register kInterpreterAccumulatorRegister = {Register::kCode_r0};
-const Register kInterpreterRegisterFileRegister = {Register::kCode_r4};
const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r5};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r6};
const Register kInterpreterDispatchTableRegister = {Register::kCode_r8};
@@ -101,10 +101,6 @@ class MacroAssembler: public Assembler {
int CallStubSize(CodeStub* stub,
TypeFeedbackId ast_id = TypeFeedbackId::None(),
Condition cond = al);
- static int CallSizeNotPredictableCodeSize(Isolate* isolate,
- Address target,
- RelocInfo::Mode rmode,
- Condition cond = al);
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target, Condition cond = al);
@@ -114,17 +110,19 @@ class MacroAssembler: public Assembler {
void Call(Address target, RelocInfo::Mode rmode,
Condition cond = al,
TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
+ void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ TypeFeedbackId ast_id = TypeFeedbackId::None(), Condition cond = al,
+ TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
TypeFeedbackId ast_id = TypeFeedbackId::None(),
Condition cond = al);
- void Call(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- TypeFeedbackId ast_id = TypeFeedbackId::None(),
- Condition cond = al,
- TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
void Ret(Condition cond = al);
+ // Used for patching in calls to the deoptimizer.
+ void CallDeoptimizer(Address target);
+ static int CallDeoptimizerSize();
+
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
void Drop(int count, Condition cond = al);
@@ -157,8 +155,6 @@ class MacroAssembler: public Assembler {
int width,
Condition cond = al);
void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al);
- void Usat(Register dst, int satpos, const Operand& src,
- Condition cond = al);
void Call(Label* target);
void Push(Register src) { push(src); }
@@ -174,6 +170,7 @@ class MacroAssembler: public Assembler {
mov(dst, src, sbit, cond);
}
}
+ void Move(SwVfpRegister dst, SwVfpRegister src);
void Move(DwVfpRegister dst, DwVfpRegister src);
void Load(Register dst, const MemOperand& src, Representation r);
@@ -489,15 +486,6 @@ class MacroAssembler: public Assembler {
const MemOperand& dst,
Condition cond = al);
- // Ensure that FPSCR contains values needed by JavaScript.
- // We need the NaNModeControlBit to be sure that operations like
- // vadd and vsub generate the Canonical NaN (if a NaN must be generated).
- // In VFP3 it will be always the Canonical NaN.
- // In VFP2 it will be either the Canonical NaN or the negative version
- // of the Canonical NaN. It doesn't matter if we have two values. The aim
- // is to be sure to never generate the hole NaN.
- void VFPEnsureFPSCRState(Register scratch);
-
// If the value is a NaN, canonicalize the value else, do nothing.
void VFPCanonicalizeNaN(const DwVfpRegister dst,
const DwVfpRegister src,
@@ -602,7 +590,8 @@ class MacroAssembler: public Assembler {
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
- void EnterExitFrame(bool save_doubles, int stack_space = 0);
+ void EnterExitFrame(bool save_doubles, int stack_space = 0,
+ StackFrame::Type frame_type = StackFrame::EXIT);
// Leave the current exit frame. Expects the return value in r0.
// Expect the number of values, pushed prior to the exit frame, to
@@ -792,6 +781,15 @@ class MacroAssembler: public Assembler {
void Allocate(Register object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
+ // FastAllocate is right now only used for folded allocations. It just
+ // increments the top pointer without checking against limit. This can only
+ // be done if it was proved earlier that the allocation will succeed.
+ void FastAllocate(int object_size, Register result, Register scratch1,
+ Register scratch2, AllocationFlags flags);
+
+ void FastAllocate(Register object_size, Register result, Register result_end,
+ Register scratch, AllocationFlags flags);
+
void AllocateTwoByteString(Register result,
Register length,
Register scratch1,
@@ -826,7 +824,6 @@ class MacroAssembler: public Assembler {
Register scratch2,
Register heap_number_map,
Label* gc_required,
- TaggingMode tagging_mode = TAG_RESULT,
MutableMode mode = IMMUTABLE);
void AllocateHeapNumberWithValue(Register result,
DwVfpRegister value,
@@ -1170,7 +1167,8 @@ class MacroAssembler: public Assembler {
void MovFromFloatResult(DwVfpRegister dst);
// Jump to a runtime routine.
- void JumpToExternalReference(const ExternalReference& builtin);
+ void JumpToExternalReference(const ExternalReference& builtin,
+ bool builtin_exit_frame = false);
Handle<Object> CodeObject() {
DCHECK(!code_object_.is_null());
@@ -1326,6 +1324,10 @@ class MacroAssembler: public Assembler {
// enabled via --debug-code.
void AssertBoundFunction(Register object);
+ // Abort execution if argument is not a JSGeneratorObject,
+ // enabled via --debug-code.
+ void AssertGeneratorObject(Register object);
+
// Abort execution if argument is not a JSReceiver, enabled via --debug-code.
void AssertReceiver(Register object);
@@ -1436,6 +1438,9 @@ class MacroAssembler: public Assembler {
// Returns the pc offset at which the frame ends.
int LeaveFrame(StackFrame::Type type);
+ void EnterBuiltinFrame(Register context, Register target, Register argc);
+ void LeaveBuiltinFrame(Register context, Register target, Register argc);
+
// Expects object in r0 and returns map with validated enum cache
// in r0. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Label* call_runtime);
@@ -1568,16 +1573,7 @@ inline MemOperand NativeContextMemOperand() {
return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}
-
-#ifdef GENERATED_CODE_COVERAGE
-#define CODE_COVERAGE_STRINGIFY(x) #x
-#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
-#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
-#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
-#else
#define ACCESS_MASM(masm) masm->
-#endif
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 6c22a0a86a..cfcc5b16c5 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -33,7 +33,6 @@ namespace internal {
class ArmDebugger {
public:
explicit ArmDebugger(Simulator* sim) : sim_(sim) { }
- ~ArmDebugger();
void Stop(Instruction* instr);
void Debug();
@@ -62,77 +61,18 @@ class ArmDebugger {
void RedoBreakpoints();
};
-
-ArmDebugger::~ArmDebugger() {
-}
-
-
-
-#ifdef GENERATED_CODE_COVERAGE
-static FILE* coverage_log = NULL;
-
-
-static void InitializeCoverage() {
- char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
- if (file_name != NULL) {
- coverage_log = fopen(file_name, "aw+");
- }
-}
-
-
-void ArmDebugger::Stop(Instruction* instr) {
- // Get the stop code.
- uint32_t code = instr->SvcValue() & kStopCodeMask;
- // Retrieve the encoded address, which comes just after this stop.
- char** msg_address =
- reinterpret_cast<char**>(sim_->get_pc() + Instruction::kInstrSize);
- char* msg = *msg_address;
- DCHECK(msg != NULL);
-
- // Update this stop description.
- if (isWatchedStop(code) && !watched_stops_[code].desc) {
- watched_stops_[code].desc = msg;
- }
-
- if (strlen(msg) > 0) {
- if (coverage_log != NULL) {
- fprintf(coverage_log, "%s\n", msg);
- fflush(coverage_log);
- }
- // Overwrite the instruction and address with nops.
- instr->SetInstructionBits(kNopInstr);
- reinterpret_cast<Instruction*>(msg_address)->SetInstructionBits(kNopInstr);
- }
- sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstrSize);
-}
-
-#else // ndef GENERATED_CODE_COVERAGE
-
-static void InitializeCoverage() {
-}
-
-
void ArmDebugger::Stop(Instruction* instr) {
// Get the stop code.
uint32_t code = instr->SvcValue() & kStopCodeMask;
- // Retrieve the encoded address, which comes just after this stop.
- char* msg = *reinterpret_cast<char**>(sim_->get_pc()
- + Instruction::kInstrSize);
- // Update this stop description.
- if (sim_->isWatchedStop(code) && !sim_->watched_stops_[code].desc) {
- sim_->watched_stops_[code].desc = msg;
- }
// Print the stop message and code if it is not the default code.
if (code != kMaxStopCode) {
- PrintF("Simulator hit stop %u: %s\n", code, msg);
+ PrintF("Simulator hit stop %u\n", code);
} else {
- PrintF("Simulator hit %s\n", msg);
+ PrintF("Simulator hit\n");
}
sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstrSize);
Debug();
}
-#endif
-
int32_t ArmDebugger::GetRegisterValue(int regnum) {
if (regnum == kPCRegister) {
@@ -142,7 +82,6 @@ int32_t ArmDebugger::GetRegisterValue(int regnum) {
}
}
-
double ArmDebugger::GetRegisterPairDoubleValue(int regnum) {
return sim_->get_double_from_register_pair(regnum);
}
@@ -299,8 +238,11 @@ void ArmDebugger::Debug() {
if (strcmp(arg1, "all") == 0) {
for (int i = 0; i < kNumRegisters; i++) {
value = GetRegisterValue(i);
- PrintF("%3s: 0x%08x %10d", Register::from_code(i).ToString(),
- value, value);
+ PrintF(
+ "%3s: 0x%08x %10d",
+ RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
+ i),
+ value, value);
if ((argc == 3 && strcmp(arg2, "fp") == 0) &&
i < 8 &&
(i % 2) == 0) {
@@ -387,7 +329,7 @@ void ArmDebugger::Debug() {
end = cur + words;
while (cur < end) {
- PrintF(" 0x%08x: 0x%08x %10d",
+ PrintF(" 0x%08" V8PRIxPTR ": 0x%08x %10d",
reinterpret_cast<intptr_t>(cur), *cur, *cur);
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
int value = *cur;
@@ -449,8 +391,8 @@ void ArmDebugger::Debug() {
while (cur < end) {
prev = cur;
cur += dasm.InstructionDecode(buffer, cur);
- PrintF(" 0x%08x %s\n",
- reinterpret_cast<intptr_t>(prev), buffer.start());
+ PrintF(" 0x%08" V8PRIxPTR " %s\n", reinterpret_cast<intptr_t>(prev),
+ buffer.start());
}
} else if (strcmp(cmd, "gdb") == 0) {
PrintF("relinquishing control to gdb\n");
@@ -633,9 +575,7 @@ void Simulator::set_last_debugger_input(char* input) {
last_debugger_input_ = input;
}
-
-void Simulator::FlushICache(v8::internal::HashMap* i_cache,
- void* start_addr,
+void Simulator::FlushICache(base::HashMap* i_cache, void* start_addr,
size_t size) {
intptr_t start = reinterpret_cast<intptr_t>(start_addr);
int intra_line = (start & CachePage::kLineMask);
@@ -656,10 +596,8 @@ void Simulator::FlushICache(v8::internal::HashMap* i_cache,
}
}
-
-CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
- v8::internal::HashMap::Entry* entry =
- i_cache->LookupOrInsert(page, ICacheHash(page));
+CachePage* Simulator::GetCachePage(base::HashMap* i_cache, void* page) {
+ base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
if (entry->value == NULL) {
CachePage* new_page = new CachePage();
entry->value = new_page;
@@ -669,9 +607,7 @@ CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
// Flush from start up to and not including start + size.
-void Simulator::FlushOnePage(v8::internal::HashMap* i_cache,
- intptr_t start,
- int size) {
+void Simulator::FlushOnePage(base::HashMap* i_cache, intptr_t start, int size) {
DCHECK(size <= CachePage::kPageSize);
DCHECK(AllOnOnePage(start, size - 1));
DCHECK((start & CachePage::kLineMask) == 0);
@@ -683,9 +619,7 @@ void Simulator::FlushOnePage(v8::internal::HashMap* i_cache,
memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
}
-
-void Simulator::CheckICache(v8::internal::HashMap* i_cache,
- Instruction* instr) {
+void Simulator::CheckICache(base::HashMap* i_cache, Instruction* instr) {
intptr_t address = reinterpret_cast<intptr_t>(instr);
void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
@@ -718,7 +652,7 @@ void Simulator::Initialize(Isolate* isolate) {
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
i_cache_ = isolate_->simulator_i_cache();
if (i_cache_ == NULL) {
- i_cache_ = new v8::internal::HashMap(&ICacheMatch);
+ i_cache_ = new base::HashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
}
Initialize(isolate);
@@ -769,7 +703,6 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
// access violation if the simulator ever tries to execute it.
registers_[pc] = bad_lr;
registers_[lr] = bad_lr;
- InitializeCoverage();
last_debugger_input_ = NULL;
}
@@ -850,10 +783,10 @@ class Redirection {
// static
-void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
+void Simulator::TearDown(base::HashMap* i_cache, Redirection* first) {
Redirection::DeleteChain(first);
if (i_cache != nullptr) {
- for (HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
+ for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
entry = i_cache->Next(entry)) {
delete static_cast<CachePage*>(entry->value);
}
@@ -1112,98 +1045,51 @@ void Simulator::TrashCallerSaveRegisters() {
}
-// Some Operating Systems allow unaligned access on ARMv7 targets. We
-// assume that unaligned accesses are not allowed unless the v8 build system
-// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
-// The following statements below describes the behavior of the ARM CPUs
-// that don't support unaligned access.
-// Some ARM platforms raise an interrupt on detecting unaligned access.
-// On others it does a funky rotation thing. For now we
-// simply disallow unaligned reads. Note that simulator runs have the runtime
-// system running directly on the host system and only generated code is
-// executed in the simulator. Since the host is typically IA32 we will not
-// get the correct ARM-like behaviour on unaligned accesses for those ARM
-// targets that don't support unaligned loads and stores.
-
-
int Simulator::ReadW(int32_t addr, Instruction* instr) {
- if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- return *ptr;
- } else {
- PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
- return 0;
- }
+ // All supported ARM targets allow unaligned accesses, so we don't need to
+ // check the alignment here.
+ intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+ return *ptr;
}
void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
- if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
- intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
- *ptr = value;
- } else {
- PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
- }
+ // All supported ARM targets allow unaligned accesses, so we don't need to
+ // check the alignment here.
+ intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+ *ptr = value;
}
uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
- if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- return *ptr;
- } else {
- PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08"
- V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
- return 0;
- }
+ // All supported ARM targets allow unaligned accesses, so we don't need to
+ // check the alignment here.
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ return *ptr;
}
int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
- if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
- int16_t* ptr = reinterpret_cast<int16_t*>(addr);
- return *ptr;
- } else {
- PrintF("Unaligned signed halfword read at 0x%08x\n", addr);
- UNIMPLEMENTED();
- return 0;
- }
+ // All supported ARM targets allow unaligned accesses, so we don't need to
+ // check the alignment here.
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ return *ptr;
}
void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
- if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
- uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
- *ptr = value;
- } else {
- PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08"
- V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
- }
+ // All supported ARM targets allow unaligned accesses, so we don't need to
+ // check the alignment here.
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ *ptr = value;
}
void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
- if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
- int16_t* ptr = reinterpret_cast<int16_t*>(addr);
- *ptr = value;
- } else {
- PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
- UNIMPLEMENTED();
- }
+ // All supported ARM targets allow unaligned accesses, so we don't need to
+ // check the alignment here.
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ *ptr = value;
}
@@ -1232,26 +1118,19 @@ void Simulator::WriteB(int32_t addr, int8_t value) {
int32_t* Simulator::ReadDW(int32_t addr) {
- if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
- int32_t* ptr = reinterpret_cast<int32_t*>(addr);
- return ptr;
- } else {
- PrintF("Unaligned read at 0x%08x\n", addr);
- UNIMPLEMENTED();
- return 0;
- }
+ // All supported ARM targets allow unaligned accesses, so we don't need to
+ // check the alignment here.
+ int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+ return ptr;
}
void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
- if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
- int32_t* ptr = reinterpret_cast<int32_t*>(addr);
- *ptr++ = value1;
- *ptr = value2;
- } else {
- PrintF("Unaligned write at 0x%08x\n", addr);
- UNIMPLEMENTED();
- }
+ // All supported ARM targets allow unaligned accesses, so we don't need to
+ // check the alignment here.
+ int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+ *ptr++ = value1;
+ *ptr = value2;
}
@@ -1271,7 +1150,7 @@ uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
// Unsupported instructions use Format to print an error and stop execution.
void Simulator::Format(Instruction* instr, const char* format) {
- PrintF("Simulator found unsupported instruction:\n 0x%08x: %s\n",
+ PrintF("Simulator found unsupported instruction:\n 0x%08" V8PRIxPTR ": %s\n",
reinterpret_cast<intptr_t>(instr), format);
UNIMPLEMENTED();
}
@@ -1808,15 +1687,17 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
case ExternalReference::BUILTIN_FP_FP_CALL:
case ExternalReference::BUILTIN_COMPARE_CALL:
PrintF("Call to host function at %p with args %f, %f",
- FUNCTION_ADDR(generic_target), dval0, dval1);
+ static_cast<void*>(FUNCTION_ADDR(generic_target)), dval0,
+ dval1);
break;
case ExternalReference::BUILTIN_FP_CALL:
PrintF("Call to host function at %p with arg %f",
- FUNCTION_ADDR(generic_target), dval0);
+ static_cast<void*>(FUNCTION_ADDR(generic_target)), dval0);
break;
case ExternalReference::BUILTIN_FP_INT_CALL:
PrintF("Call to host function at %p with args %f, %d",
- FUNCTION_ADDR(generic_target), dval0, ival);
+ static_cast<void*>(FUNCTION_ADDR(generic_target)), dval0,
+ ival);
break;
default:
UNREACHABLE();
@@ -1942,7 +1823,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF(
"Call to host triple returning runtime function %p "
"args %08x, %08x, %08x, %08x, %08x",
- FUNCTION_ADDR(target), arg1, arg2, arg3, arg4, arg5);
+ static_cast<void*>(FUNCTION_ADDR(target)), arg1, arg2, arg3, arg4,
+ arg5);
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
}
@@ -1953,7 +1835,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
// pass it to the target function.
ObjectTriple result = target(arg1, arg2, arg3, arg4, arg5);
if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned { %p, %p, %p }\n", result.x, result.y, result.z);
+ PrintF("Returned { %p, %p, %p }\n", static_cast<void*>(result.x),
+ static_cast<void*>(result.y), static_cast<void*>(result.z));
}
// Return is passed back in address pointed to by hidden first argument.
ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(arg0);
@@ -1969,13 +1852,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF(
"Call to host function at %p "
"args %08x, %08x, %08x, %08x, %08x, %08x",
- FUNCTION_ADDR(target),
- arg0,
- arg1,
- arg2,
- arg3,
- arg4,
- arg5);
+ static_cast<void*>(FUNCTION_ADDR(target)), arg0, arg1, arg2, arg3,
+ arg4, arg5);
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
}
@@ -3733,11 +3611,14 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
}
int32_t address = get_register(rn) + 4 * offset;
+ // Load and store address for singles must be at least four-byte
+ // aligned.
+ DCHECK((address % 4) == 0);
if (instr->HasL()) {
- // Load double from memory: vldr.
+ // Load single from memory: vldr.
set_s_register_from_sinteger(vd, ReadW(address, instr));
} else {
- // Store double to memory: vstr.
+ // Store single to memory: vstr.
WriteW(address, get_sinteger_from_s_register(vd), instr);
}
break;
@@ -3786,6 +3667,9 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
offset = -offset;
}
int32_t address = get_register(rn) + 4 * offset;
+ // Load and store address for doubles must be at least four-byte
+ // aligned.
+ DCHECK((address % 4) == 0);
if (instr->HasL()) {
// Load double from memory: vldr.
int32_t data[] = {
@@ -4028,6 +3912,45 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
UNIMPLEMENTED();
}
break;
+ case 0x1C:
+ if ((instr->Bits(11, 9) == 0x5) && (instr->Bit(6) == 0) &&
+ (instr->Bit(4) == 0)) {
+ // VSEL* (floating-point)
+ bool condition_holds;
+ switch (instr->Bits(21, 20)) {
+ case 0x0: // VSELEQ
+ condition_holds = (z_flag_ == 1);
+ break;
+ case 0x1: // VSELVS
+ condition_holds = (v_flag_ == 1);
+ break;
+ case 0x2: // VSELGE
+ condition_holds = (n_flag_ == v_flag_);
+ break;
+ case 0x3: // VSELGT
+ condition_holds = ((z_flag_ == 0) && (n_flag_ == v_flag_));
+ break;
+ default:
+ UNREACHABLE(); // Case analysis is exhaustive.
+ break;
+ }
+ if (instr->SzValue() == 0x1) {
+ int n = instr->VFPNRegValue(kDoublePrecision);
+ int m = instr->VFPMRegValue(kDoublePrecision);
+ int d = instr->VFPDRegValue(kDoublePrecision);
+ double result = get_double_from_d_register(condition_holds ? n : m);
+ set_d_register_from_double(d, result);
+ } else {
+ int n = instr->VFPNRegValue(kSinglePrecision);
+ int m = instr->VFPMRegValue(kSinglePrecision);
+ int d = instr->VFPDRegValue(kSinglePrecision);
+ float result = get_float_from_s_register(condition_holds ? n : m);
+ set_s_register_from_float(d, result);
+ }
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
default:
UNIMPLEMENTED();
break;
@@ -4048,7 +3971,8 @@ void Simulator::InstructionDecode(Instruction* instr) {
v8::internal::EmbeddedVector<char, 256> buffer;
dasm.InstructionDecode(buffer,
reinterpret_cast<byte*>(instr));
- PrintF(" 0x%08x %s\n", reinterpret_cast<intptr_t>(instr), buffer.start());
+ PrintF(" 0x%08" V8PRIxPTR " %s\n", reinterpret_cast<intptr_t>(instr),
+ buffer.start());
}
if (instr->ConditionField() == kSpecialCondition) {
DecodeSpecialCondition(instr);
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index b3c8eb41e5..71b8e40862 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -68,7 +68,7 @@ class SimulatorStack : public v8::internal::AllStatic {
#include "src/arm/constants-arm.h"
#include "src/assembler.h"
-#include "src/hashmap.h"
+#include "src/base/hashmap.h"
namespace v8 {
namespace internal {
@@ -200,7 +200,7 @@ class Simulator {
// Call on program start.
static void Initialize(Isolate* isolate);
- static void TearDown(HashMap* i_cache, Redirection* first);
+ static void TearDown(base::HashMap* i_cache, Redirection* first);
// V8 generally calls into generated JS code with 5 parameters and into
// generated RegExp code with 7 parameters. This is a convenience function,
@@ -222,8 +222,7 @@ class Simulator {
char* last_debugger_input() { return last_debugger_input_; }
// ICache checking.
- static void FlushICache(v8::internal::HashMap* i_cache, void* start,
- size_t size);
+ static void FlushICache(base::HashMap* i_cache, void* start, size_t size);
// Returns true if pc register contains one of the 'special_values' defined
// below (bad_lr, end_sim_pc).
@@ -342,10 +341,9 @@ class Simulator {
void InstructionDecode(Instruction* instr);
// ICache.
- static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
- static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
- int size);
- static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
+ static void CheckICache(base::HashMap* i_cache, Instruction* instr);
+ static void FlushOnePage(base::HashMap* i_cache, intptr_t start, int size);
+ static CachePage* GetCachePage(base::HashMap* i_cache, void* page);
// Runtime call support.
static void* RedirectExternalReference(
@@ -405,7 +403,7 @@ class Simulator {
char* last_debugger_input_;
// Icache simulation
- v8::internal::HashMap* i_cache_;
+ base::HashMap* i_cache_;
// Registered breakpoints.
Instruction* break_pc_;
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index 6191216281..a639e3e7ac 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -16,6 +16,7 @@ namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; }
+bool CpuFeatures::SupportsSimd128() { return false; }
void RelocInfo::apply(intptr_t delta) {
// On arm64 only internal references need extra work.
@@ -26,34 +27,6 @@ void RelocInfo::apply(intptr_t delta) {
*p += delta; // Relocate entry.
}
-
-void RelocInfo::set_target_address(Address target,
- WriteBarrierMode write_barrier_mode,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(isolate_, pc_, host_, target,
- icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
- IsCodeTarget(rmode_)) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
-}
-
-void RelocInfo::update_wasm_memory_reference(
- Address old_base, Address new_base, size_t old_size, size_t new_size,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(IsWasmMemoryReference(rmode_));
- DCHECK(old_base <= wasm_memory_reference() &&
- wasm_memory_reference() < old_base + old_size);
- Address updated_reference = new_base + (wasm_memory_reference() - old_base);
- DCHECK(new_base <= updated_reference &&
- updated_reference < new_base + new_size);
- Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
- icache_flush_mode);
-}
-
inline int CPURegister::code() const {
DCHECK(IsValid());
return reg_code;
@@ -705,11 +678,6 @@ Address RelocInfo::target_address() {
return Assembler::target_address_at(pc_, host_);
}
-Address RelocInfo::wasm_memory_reference() {
- DCHECK(IsWasmMemoryReference(rmode_));
- return Assembler::target_address_at(pc_, host_);
-}
-
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
@@ -749,6 +717,7 @@ void RelocInfo::set_target_object(Object* target,
target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target));
+ host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
}
}
@@ -868,7 +837,7 @@ void RelocInfo::WipeOut() {
}
}
-
+template <typename ObjectVisitor>
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index 2471d5eebd..9ee20d45e2 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -51,26 +51,13 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
// Only use statically determined features for cross compile (snapshot).
if (cross_compile) return;
- // Probe for runtime features
- base::CPU cpu;
- if (cpu.implementer() == base::CPU::NVIDIA &&
- cpu.variant() == base::CPU::NVIDIA_DENVER &&
- cpu.part() <= base::CPU::NVIDIA_DENVER_V10) {
- // TODO(jkummerow): This is turned off as an experiment to see if it
- // affects crash rates. Keep an eye on crash reports and either remove
- // coherent cache support permanently, or re-enable it!
- // supported_ |= 1u << COHERENT_CACHE;
- }
+ // We used to probe for coherent cache support, but on older CPUs it
+ // causes crashes (crbug.com/524337), and newer CPUs don't even have
+ // the feature any more.
}
-
void CpuFeatures::PrintTarget() { }
-
-
-void CpuFeatures::PrintFeatures() {
- printf("COHERENT_CACHE=%d\n", CpuFeatures::IsSupported(COHERENT_CACHE));
-}
-
+void CpuFeatures::PrintFeatures() {}
// -----------------------------------------------------------------------------
// CPURegList utilities.
@@ -192,12 +179,35 @@ bool RelocInfo::IsInConstantPool() {
return instr->IsLdrLiteralX();
}
+Address RelocInfo::wasm_memory_reference() {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ return Memory::Address_at(Assembler::target_pointer_address_at(pc_));
+}
+
+uint32_t RelocInfo::wasm_memory_size_reference() {
+ DCHECK(IsWasmMemorySizeReference(rmode_));
+ return Memory::uint32_at(Assembler::target_pointer_address_at(pc_));
+}
+
+Address RelocInfo::wasm_global_reference() {
+ DCHECK(IsWasmGlobalReference(rmode_));
+ return Memory::Address_at(Assembler::target_pointer_address_at(pc_));
+}
+
+void RelocInfo::unchecked_update_wasm_memory_reference(
+ Address address, ICacheFlushMode flush_mode) {
+ Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
+}
+
+void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
+ ICacheFlushMode flush_mode) {
+ Memory::uint32_at(Assembler::target_pointer_address_at(pc_)) = size;
+}
Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2,
Register reg3, Register reg4) {
CPURegList regs(reg1, reg2, reg3, reg4);
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
Register candidate = Register::from_code(code);
@@ -269,7 +279,6 @@ void Immediate::InitializeHandle(Handle<Object> handle) {
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
if (obj->IsHeapObject()) {
- DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
value_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
} else {
@@ -294,13 +303,11 @@ bool Operand::NeedsRelocation(const Assembler* assembler) const {
// Constant Pool.
void ConstPool::RecordEntry(intptr_t data,
RelocInfo::Mode mode) {
- DCHECK(mode != RelocInfo::COMMENT &&
- mode != RelocInfo::POSITION &&
- mode != RelocInfo::STATEMENT_POSITION &&
- mode != RelocInfo::CONST_POOL &&
+ DCHECK(mode != RelocInfo::COMMENT && mode != RelocInfo::CONST_POOL &&
mode != RelocInfo::VENEER_POOL &&
mode != RelocInfo::CODE_AGE_SEQUENCE &&
- mode != RelocInfo::DEOPT_REASON);
+ mode != RelocInfo::DEOPT_POSITION && mode != RelocInfo::DEOPT_REASON &&
+ mode != RelocInfo::DEOPT_ID);
uint64_t raw_data = static_cast<uint64_t>(data);
int offset = assm_->pc_offset();
if (IsEmpty()) {
@@ -544,8 +551,7 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
constpool_(this),
recorded_ast_id_(TypeFeedbackId::None()),
- unresolved_branches_(),
- positions_recorder_(this) {
+ unresolved_branches_() {
const_pool_blocked_nesting_ = 0;
veneer_pool_blocked_nesting_ = 0;
Reset();
@@ -579,7 +585,6 @@ void Assembler::Reset() {
void Assembler::GetCode(CodeDesc* desc) {
- reloc_info_writer.Finish();
// Emit constant pool if necessary.
CheckConstPool(true, false);
DCHECK(constpool_.IsEmpty());
@@ -594,6 +599,8 @@ void Assembler::GetCode(CodeDesc* desc) {
reloc_info_writer.pos());
desc->origin = this;
desc->constant_pool_size = 0;
+ desc->unwinding_info_size = 0;
+ desc->unwinding_info = nullptr;
}
}
@@ -962,14 +969,12 @@ void Assembler::EndBlockVeneerPool() {
void Assembler::br(const Register& xn) {
- positions_recorder()->WriteRecordedPositions();
DCHECK(xn.Is64Bits());
Emit(BR | Rn(xn));
}
void Assembler::blr(const Register& xn) {
- positions_recorder()->WriteRecordedPositions();
DCHECK(xn.Is64Bits());
// The pattern 'blr xzr' is used as a guard to detect when execution falls
// through the constant pool. It should not be emitted.
@@ -979,7 +984,6 @@ void Assembler::blr(const Register& xn) {
void Assembler::ret(const Register& xn) {
- positions_recorder()->WriteRecordedPositions();
DCHECK(xn.Is64Bits());
Emit(RET | Rn(xn));
}
@@ -991,7 +995,6 @@ void Assembler::b(int imm26) {
void Assembler::b(Label* label) {
- positions_recorder()->WriteRecordedPositions();
b(LinkAndGetInstructionOffsetTo(label));
}
@@ -1002,47 +1005,40 @@ void Assembler::b(int imm19, Condition cond) {
void Assembler::b(Label* label, Condition cond) {
- positions_recorder()->WriteRecordedPositions();
b(LinkAndGetInstructionOffsetTo(label), cond);
}
void Assembler::bl(int imm26) {
- positions_recorder()->WriteRecordedPositions();
Emit(BL | ImmUncondBranch(imm26));
}
void Assembler::bl(Label* label) {
- positions_recorder()->WriteRecordedPositions();
bl(LinkAndGetInstructionOffsetTo(label));
}
void Assembler::cbz(const Register& rt,
int imm19) {
- positions_recorder()->WriteRecordedPositions();
Emit(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
}
void Assembler::cbz(const Register& rt,
Label* label) {
- positions_recorder()->WriteRecordedPositions();
cbz(rt, LinkAndGetInstructionOffsetTo(label));
}
void Assembler::cbnz(const Register& rt,
int imm19) {
- positions_recorder()->WriteRecordedPositions();
Emit(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
}
void Assembler::cbnz(const Register& rt,
Label* label) {
- positions_recorder()->WriteRecordedPositions();
cbnz(rt, LinkAndGetInstructionOffsetTo(label));
}
@@ -1050,7 +1046,6 @@ void Assembler::cbnz(const Register& rt,
void Assembler::tbz(const Register& rt,
unsigned bit_pos,
int imm14) {
- positions_recorder()->WriteRecordedPositions();
DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
}
@@ -1059,7 +1054,6 @@ void Assembler::tbz(const Register& rt,
void Assembler::tbz(const Register& rt,
unsigned bit_pos,
Label* label) {
- positions_recorder()->WriteRecordedPositions();
tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
}
@@ -1067,7 +1061,6 @@ void Assembler::tbz(const Register& rt,
void Assembler::tbnz(const Register& rt,
unsigned bit_pos,
int imm14) {
- positions_recorder()->WriteRecordedPositions();
DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
}
@@ -1076,7 +1069,6 @@ void Assembler::tbnz(const Register& rt,
void Assembler::tbnz(const Register& rt,
unsigned bit_pos,
Label* label) {
- positions_recorder()->WriteRecordedPositions();
tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
}
@@ -1696,6 +1688,83 @@ void Assembler::ldr(const CPURegister& rt, const Immediate& imm) {
ldr_pcrel(rt, 0);
}
+void Assembler::ldar(const Register& rt, const Register& rn) {
+ DCHECK(rn.Is64Bits());
+ LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? LDAR_w : LDAR_x;
+ Emit(op | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
+}
+
+void Assembler::ldaxr(const Register& rt, const Register& rn) {
+ DCHECK(rn.Is64Bits());
+ LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? LDAXR_w : LDAXR_x;
+ Emit(op | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
+}
+
+void Assembler::stlr(const Register& rt, const Register& rn) {
+ DCHECK(rn.Is64Bits());
+ LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLR_w : STLR_x;
+ Emit(op | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
+}
+
+void Assembler::stlxr(const Register& rs, const Register& rt,
+ const Register& rn) {
+ DCHECK(rs.Is32Bits());
+ DCHECK(rn.Is64Bits());
+ LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLXR_w : STLXR_x;
+ Emit(op | Rs(rs) | Rt2(x31) | Rn(rn) | Rt(rt));
+}
+
+void Assembler::ldarb(const Register& rt, const Register& rn) {
+ DCHECK(rt.Is32Bits());
+ DCHECK(rn.Is64Bits());
+ Emit(LDAR_b | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
+}
+
+void Assembler::ldaxrb(const Register& rt, const Register& rn) {
+ DCHECK(rt.Is32Bits());
+ DCHECK(rn.Is64Bits());
+ Emit(LDAXR_b | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
+}
+
+void Assembler::stlrb(const Register& rt, const Register& rn) {
+ DCHECK(rt.Is32Bits());
+ DCHECK(rn.Is64Bits());
+ Emit(STLR_b | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
+}
+
+void Assembler::stlxrb(const Register& rs, const Register& rt,
+ const Register& rn) {
+ DCHECK(rs.Is32Bits());
+ DCHECK(rt.Is32Bits());
+ DCHECK(rn.Is64Bits());
+ Emit(STLXR_b | Rs(rs) | Rt2(x31) | Rn(rn) | Rt(rt));
+}
+
+void Assembler::ldarh(const Register& rt, const Register& rn) {
+ DCHECK(rt.Is32Bits());
+ DCHECK(rn.Is64Bits());
+ Emit(LDAR_h | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
+}
+
+void Assembler::ldaxrh(const Register& rt, const Register& rn) {
+ DCHECK(rt.Is32Bits());
+ DCHECK(rn.Is64Bits());
+ Emit(LDAXR_h | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
+}
+
+void Assembler::stlrh(const Register& rt, const Register& rn) {
+ DCHECK(rt.Is32Bits());
+ DCHECK(rn.Is64Bits());
+ Emit(STLR_h | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
+}
+
+void Assembler::stlxrh(const Register& rs, const Register& rt,
+ const Register& rn) {
+ DCHECK(rs.Is32Bits());
+ DCHECK(rt.Is32Bits());
+ DCHECK(rn.Is64Bits());
+ Emit(STLXR_h | Rs(rs) | Rt2(x31) | Rn(rn) | Rt(rt));
+}
void Assembler::mov(const Register& rd, const Register& rm) {
// Moves involving the stack pointer are encoded as add immediate with
@@ -2878,11 +2947,13 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
(rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL)) ||
(rmode == RelocInfo::INTERNAL_REFERENCE) ||
(rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) ||
- (rmode == RelocInfo::DEOPT_REASON) ||
+ (rmode == RelocInfo::DEOPT_POSITION) ||
+ (rmode == RelocInfo::DEOPT_REASON) || (rmode == RelocInfo::DEOPT_ID) ||
(rmode == RelocInfo::GENERATOR_CONTINUATION)) {
// Adjust code for new modes.
DCHECK(RelocInfo::IsDebugBreakSlot(rmode) || RelocInfo::IsComment(rmode) ||
- RelocInfo::IsDeoptReason(rmode) || RelocInfo::IsPosition(rmode) ||
+ RelocInfo::IsDeoptReason(rmode) || RelocInfo::IsDeoptId(rmode) ||
+ RelocInfo::IsDeoptPosition(rmode) ||
RelocInfo::IsInternalReference(rmode) ||
RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode) ||
RelocInfo::IsGeneratorContinuation(rmode));
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index 546025475e..16b7eae03f 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -40,12 +40,22 @@ namespace internal {
R(x8) R(x9) R(x10) R(x11) R(x12) R(x13) R(x14) R(x15) \
R(x18) R(x19) R(x20) R(x21) R(x22) R(x23) R(x24) R(x27)
+#define FLOAT_REGISTERS(V) \
+ V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) \
+ V(s8) V(s9) V(s10) V(s11) V(s12) V(s13) V(s14) V(s15) \
+ V(s16) V(s17) V(s18) V(s19) V(s20) V(s21) V(s22) V(s23) \
+ V(s24) V(s25) V(s26) V(s27) V(s28) V(s29) V(s30) V(s31)
+
#define DOUBLE_REGISTERS(R) \
R(d0) R(d1) R(d2) R(d3) R(d4) R(d5) R(d6) R(d7) \
R(d8) R(d9) R(d10) R(d11) R(d12) R(d13) R(d14) R(d15) \
R(d16) R(d17) R(d18) R(d19) R(d20) R(d21) R(d22) R(d23) \
R(d24) R(d25) R(d26) R(d27) R(d28) R(d29) R(d30) R(d31)
+#define SIMD128_REGISTERS(V) \
+ V(q0) V(q1) V(q2) V(q3) V(q4) V(q5) V(q6) V(q7) \
+ V(q8) V(q9) V(q10) V(q11) V(q12) V(q13) V(q14) V(q15)
+
#define ALLOCATABLE_DOUBLE_REGISTERS(R) \
R(d0) R(d1) R(d2) R(d3) R(d4) R(d5) R(d6) R(d7) \
R(d8) R(d9) R(d10) R(d11) R(d12) R(d13) R(d14) R(d16) \
@@ -148,8 +158,6 @@ struct Register : public CPURegister {
DCHECK(IsValidOrNone());
}
- const char* ToString();
- bool IsAllocatable() const;
bool IsValid() const {
DCHECK(IsRegister() || IsNone());
return IsValidRegister();
@@ -189,6 +197,7 @@ struct Register : public CPURegister {
// End of V8 compatibility section -----------------------
};
+static const bool kSimpleFPAliasing = true;
struct FPRegister : public CPURegister {
enum Code {
@@ -224,8 +233,6 @@ struct FPRegister : public CPURegister {
DCHECK(IsValidOrNone());
}
- const char* ToString();
- bool IsAllocatable() const;
bool IsValid() const {
DCHECK(IsFPRegister() || IsNone());
return IsValidFPRegister();
@@ -366,7 +373,7 @@ bool AreSameSizeAndType(const CPURegister& reg1,
const CPURegister& reg7 = NoCPUReg,
const CPURegister& reg8 = NoCPUReg);
-
+typedef FPRegister FloatRegister;
typedef FPRegister DoubleRegister;
// TODO(arm64) Define SIMD registers.
@@ -922,14 +929,11 @@ class Assembler : public AssemblerBase {
}
// Debugging ----------------------------------------------------------------
- AssemblerPositionsRecorder* positions_recorder() {
- return &positions_recorder_;
- }
void RecordComment(const char* msg);
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, int raw_position);
+ void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
int buffer_space() const;
@@ -1395,6 +1399,42 @@ class Assembler : public AssemblerBase {
// Load literal to register.
void ldr(const CPURegister& rt, const Immediate& imm);
+ // Load-acquire word.
+ void ldar(const Register& rt, const Register& rn);
+
+ // Load-acquire exclusive word.
+ void ldaxr(const Register& rt, const Register& rn);
+
+ // Store-release word.
+ void stlr(const Register& rt, const Register& rn);
+
+ // Store-release exclusive word.
+ void stlxr(const Register& rs, const Register& rt, const Register& rn);
+
+ // Load-acquire byte.
+ void ldarb(const Register& rt, const Register& rn);
+
+ // Load-acquire exclusive byte.
+ void ldaxrb(const Register& rt, const Register& rn);
+
+ // Store-release byte.
+ void stlrb(const Register& rt, const Register& rn);
+
+ // Store-release exclusive byte.
+ void stlxrb(const Register& rs, const Register& rt, const Register& rn);
+
+ // Load-acquire half-word.
+ void ldarh(const Register& rt, const Register& rn);
+
+ // Load-acquire exclusive half-word.
+ void ldaxrh(const Register& rt, const Register& rn);
+
+ // Store-release half-word.
+ void stlrh(const Register& rt, const Register& rn);
+
+ // Store-release exclusive half-word.
+ void stlxrh(const Register& rs, const Register& rt, const Register& rn);
+
// Move instructions. The default shift of -1 indicates that the move
// instruction will calculate an appropriate 16-bit immediate and left shift
// that is equal to the 64-bit immediate argument. If an explicit left shift
@@ -1689,6 +1729,11 @@ class Assembler : public AssemblerBase {
return rt2.code() << Rt2_offset;
}
+ static Instr Rs(CPURegister rs) {
+ DCHECK(rs.code() != kSPRegInternalCode);
+ return rs.code() << Rs_offset;
+ }
+
// These encoding functions allow the stack pointer to be encoded, and
// disallow the zero register.
static Instr RdSP(Register rd) {
@@ -2137,8 +2182,6 @@ class Assembler : public AssemblerBase {
void DeleteUnresolvedBranchInfoForLabelTraverse(Label* label);
private:
- AssemblerPositionsRecorder positions_recorder_;
- friend class AssemblerPositionsRecorder;
friend class EnsureSpace;
friend class ConstPool;
};
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index ee4053515a..5f103bc20c 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -22,64 +22,15 @@
namespace v8 {
namespace internal {
+#define __ ACCESS_MASM(masm)
-static void InitializeArrayConstructorDescriptor(
- Isolate* isolate, CodeStubDescriptor* descriptor,
- int constant_stack_parameter_count) {
- // cp: context
- // x1: function
- // x2: allocation site with elements kind
- // x0: number of arguments to the constructor function
- Address deopt_handler = Runtime::FunctionForId(
- Runtime::kArrayConstructor)->entry;
-
- if (constant_stack_parameter_count == 0) {
- descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- } else {
- descriptor->Initialize(x0, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- }
-}
-
-
-void ArrayNoArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
-
-void ArraySingleArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
-}
-
-
-static void InitializeInternalArrayConstructorDescriptor(
- Isolate* isolate, CodeStubDescriptor* descriptor,
- int constant_stack_parameter_count) {
- Address deopt_handler = Runtime::FunctionForId(
- Runtime::kInternalArrayConstructor)->entry;
-
- if (constant_stack_parameter_count == 0) {
- descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- } else {
- descriptor->Initialize(x0, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- }
-}
-
-
-void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
+void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
+ __ Mov(x5, Operand(x0, LSL, kPointerSizeLog2));
+ __ Str(x1, MemOperand(jssp, x5));
+ __ Push(x1);
+ __ Push(x2);
+ __ Add(x0, x0, Operand(3));
+ __ TailCallRuntime(Runtime::kNewArray);
}
void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
@@ -87,21 +38,12 @@ void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize(x0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
}
-void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+void FastFunctionBindStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
-}
-
-
-void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
+ Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
+ descriptor->Initialize(x0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
}
-
-#define __ ACCESS_MASM(masm)
-
-
void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
ExternalReference miss) {
// Update the static counter each time a new code stub is generated.
@@ -741,7 +683,6 @@ void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
__ Ret(return_address);
}
-
void MathPowStub::Generate(MacroAssembler* masm) {
// Stack on entry:
// jssp[0]: Exponent (as a tagged value).
@@ -749,14 +690,10 @@ void MathPowStub::Generate(MacroAssembler* masm) {
//
// The (tagged) result will be returned in x0, as a heap number.
- Register result_tagged = x0;
- Register base_tagged = x10;
Register exponent_tagged = MathPowTaggedDescriptor::exponent();
DCHECK(exponent_tagged.is(x11));
Register exponent_integer = MathPowIntegerDescriptor::exponent();
DCHECK(exponent_integer.is(x12));
- Register scratch1 = x14;
- Register scratch0 = x15;
Register saved_lr = x19;
FPRegister result_double = d0;
FPRegister base_double = d0;
@@ -767,37 +704,11 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// A fast-path for integer exponents.
Label exponent_is_smi, exponent_is_integer;
- // Bail out to runtime.
- Label call_runtime;
// Allocate a heap number for the result, and return it.
Label done;
// Unpack the inputs.
- if (exponent_type() == ON_STACK) {
- Label base_is_smi;
- Label unpack_exponent;
-
- __ Pop(exponent_tagged, base_tagged);
-
- __ JumpIfSmi(base_tagged, &base_is_smi);
- __ JumpIfNotHeapNumber(base_tagged, &call_runtime);
- // base_tagged is a heap number, so load its double value.
- __ Ldr(base_double, FieldMemOperand(base_tagged, HeapNumber::kValueOffset));
- __ B(&unpack_exponent);
- __ Bind(&base_is_smi);
- // base_tagged is a SMI, so untag it and convert it to a double.
- __ SmiUntagToDouble(base_double, base_tagged);
-
- __ Bind(&unpack_exponent);
- // x10 base_tagged The tagged base (input).
- // x11 exponent_tagged The tagged exponent (input).
- // d1 base_double The base as a double.
- __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
- __ JumpIfNotHeapNumber(exponent_tagged, &call_runtime);
- // exponent_tagged is a heap number, so load its double value.
- __ Ldr(exponent_double,
- FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
- } else if (exponent_type() == TAGGED) {
+ if (exponent_type() == TAGGED) {
__ JumpIfSmi(exponent_tagged, &exponent_is_smi);
__ Ldr(exponent_double,
FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
@@ -810,89 +721,11 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ TryRepresentDoubleAsInt64(exponent_integer, exponent_double,
scratch0_double, &exponent_is_integer);
- if (exponent_type() == ON_STACK) {
- FPRegister half_double = d3;
- FPRegister minus_half_double = d4;
- // Detect square root case. Crankshaft detects constant +/-0.5 at compile
- // time and uses DoMathPowHalf instead. We then skip this check for
- // non-constant cases of +/-0.5 as these hardly occur.
-
- __ Fmov(minus_half_double, -0.5);
- __ Fmov(half_double, 0.5);
- __ Fcmp(minus_half_double, exponent_double);
- __ Fccmp(half_double, exponent_double, NZFlag, ne);
- // Condition flags at this point:
- // 0.5; nZCv // Identified by eq && pl
- // -0.5: NZcv // Identified by eq && mi
- // other: ?z?? // Identified by ne
- __ B(ne, &call_runtime);
-
- // The exponent is 0.5 or -0.5.
-
- // Given that exponent is known to be either 0.5 or -0.5, the following
- // special cases could apply (according to ECMA-262 15.8.2.13):
- //
- // base.isNaN(): The result is NaN.
- // (base == +INFINITY) || (base == -INFINITY)
- // exponent == 0.5: The result is +INFINITY.
- // exponent == -0.5: The result is +0.
- // (base == +0) || (base == -0)
- // exponent == 0.5: The result is +0.
- // exponent == -0.5: The result is +INFINITY.
- // (base < 0) && base.isFinite(): The result is NaN.
- //
- // Fsqrt (and Fdiv for the -0.5 case) can handle all of those except
- // where base is -INFINITY or -0.
-
- // Add +0 to base. This has no effect other than turning -0 into +0.
- __ Fadd(base_double, base_double, fp_zero);
- // The operation -0+0 results in +0 in all cases except where the
- // FPCR rounding mode is 'round towards minus infinity' (RM). The
- // ARM64 simulator does not currently simulate FPCR (where the rounding
- // mode is set), so test the operation with some debug code.
- if (masm->emit_debug_code()) {
- UseScratchRegisterScope temps(masm);
- Register temp = temps.AcquireX();
- __ Fneg(scratch0_double, fp_zero);
- // Verify that we correctly generated +0.0 and -0.0.
- // bits(+0.0) = 0x0000000000000000
- // bits(-0.0) = 0x8000000000000000
- __ Fmov(temp, fp_zero);
- __ CheckRegisterIsClear(temp, kCouldNotGenerateZero);
- __ Fmov(temp, scratch0_double);
- __ Eor(temp, temp, kDSignMask);
- __ CheckRegisterIsClear(temp, kCouldNotGenerateNegativeZero);
- // Check that -0.0 + 0.0 == +0.0.
- __ Fadd(scratch0_double, scratch0_double, fp_zero);
- __ Fmov(temp, scratch0_double);
- __ CheckRegisterIsClear(temp, kExpectedPositiveZero);
- }
-
- // If base is -INFINITY, make it +INFINITY.
- // * Calculate base - base: All infinities will become NaNs since both
- // -INFINITY+INFINITY and +INFINITY-INFINITY are NaN in ARM64.
- // * If the result is NaN, calculate abs(base).
- __ Fsub(scratch0_double, base_double, base_double);
- __ Fcmp(scratch0_double, 0.0);
- __ Fabs(scratch1_double, base_double);
- __ Fcsel(base_double, scratch1_double, base_double, vs);
-
- // Calculate the square root of base.
- __ Fsqrt(result_double, base_double);
- __ Fcmp(exponent_double, 0.0);
- __ B(ge, &done); // Finish now for exponents of 0.5.
- // Find the inverse for exponents of -0.5.
- __ Fmov(scratch0_double, 1.0);
- __ Fdiv(result_double, scratch0_double, result_double);
- __ B(&done);
- }
-
{
AllowExternalCallThatCantCauseGC scope(masm);
__ Mov(saved_lr, lr);
__ CallCFunction(
- ExternalReference::power_double_double_function(isolate()),
- 0, 2);
+ ExternalReference::power_double_double_function(isolate()), 0, 2);
__ Mov(lr, saved_lr);
__ B(&done);
}
@@ -961,34 +794,17 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ Fcmp(result_double, 0.0);
__ B(&done, ne);
- if (exponent_type() == ON_STACK) {
- // Bail out to runtime code.
- __ Bind(&call_runtime);
- // Put the arguments back on the stack.
- __ Push(base_tagged, exponent_tagged);
- __ TailCallRuntime(Runtime::kMathPowRT);
-
- // Return.
- __ Bind(&done);
- __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1,
- result_double);
- DCHECK(result_tagged.is(x0));
- __ Ret();
- } else {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ Mov(saved_lr, lr);
- __ Fmov(base_double, base_double_copy);
- __ Scvtf(exponent_double, exponent_integer);
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()),
- 0, 2);
- __ Mov(lr, saved_lr);
- __ Bind(&done);
- __ Ret();
- }
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ Mov(saved_lr, lr);
+ __ Fmov(base_double, base_double_copy);
+ __ Scvtf(exponent_double, exponent_integer);
+ __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
+ 0, 2);
+ __ Mov(lr, saved_lr);
+ __ Bind(&done);
+ __ Ret();
}
-
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
// It is important that the following stubs are generated in this order
// because pregenerated stubs can only call other pregenerated stubs.
@@ -997,7 +813,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
- ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+ CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
@@ -1005,7 +821,6 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
- TypeofStub::GenerateAheadOfTime(isolate);
}
@@ -1108,7 +923,9 @@ void CEntryStub::Generate(MacroAssembler* masm) {
int extra_stack_space = 3 + (result_size() <= 2 ? 0 : result_size());
// Enter the exit frame.
FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(save_doubles(), x10, extra_stack_space);
+ __ EnterExitFrame(
+ save_doubles(), x10, extra_stack_space,
+ is_builtin_exit() ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
DCHECK(csp.Is(__ StackPointer()));
// Poke callee-saved registers into reserved space.
@@ -1323,11 +1140,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Mov(jssp, csp);
__ SetStackPointer(jssp);
- // Configure the FPCR. We don't restore it, so this is technically not allowed
- // according to AAPCS64. However, we only set default-NaN mode and this will
- // be harmless for most C code. Also, it works for ARM.
- __ ConfigureFPCR();
-
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Set up the reserved register for 0.0.
@@ -1402,12 +1214,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// restores all callee-saved registers (including cp and fp) to their
// saved values before returning a failure to C.
- // Clear any pending exceptions.
- __ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
- __ Mov(x11, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate())));
- __ Str(x10, MemOperand(x11));
-
// Invoke the function by calling through the JS entry trampoline builtin.
// Notice that we cannot store a reference to the trampoline code directly in
// this stub, because runtime stubs are not traversed when doing GC.
@@ -1506,7 +1312,6 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
&miss, // When not a string.
&miss, // When not a number.
&miss, // When index out of range.
- STRING_INDEX_IS_ARRAY_INDEX,
RECEIVER_IS_STRING);
char_at_generator.GenerateFast(masm);
__ Ret();
@@ -1520,123 +1325,6 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
}
-void InstanceOfStub::Generate(MacroAssembler* masm) {
- Register const object = x1; // Object (lhs).
- Register const function = x0; // Function (rhs).
- Register const object_map = x2; // Map of {object}.
- Register const function_map = x3; // Map of {function}.
- Register const function_prototype = x4; // Prototype of {function}.
- Register const scratch = x5;
-
- DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
- DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
-
- // Check if {object} is a smi.
- Label object_is_smi;
- __ JumpIfSmi(object, &object_is_smi);
-
- // Lookup the {function} and the {object} map in the global instanceof cache.
- // Note: This is safe because we clear the global instanceof cache whenever
- // we change the prototype of any object.
- Label fast_case, slow_case;
- __ Ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
- __ JumpIfNotRoot(function, Heap::kInstanceofCacheFunctionRootIndex,
- &fast_case);
- __ JumpIfNotRoot(object_map, Heap::kInstanceofCacheMapRootIndex, &fast_case);
- __ LoadRoot(x0, Heap::kInstanceofCacheAnswerRootIndex);
- __ Ret();
-
- // If {object} is a smi we can safely return false if {function} is a JS
- // function, otherwise we have to miss to the runtime and throw an exception.
- __ Bind(&object_is_smi);
- __ JumpIfSmi(function, &slow_case);
- __ JumpIfNotObjectType(function, function_map, scratch, JS_FUNCTION_TYPE,
- &slow_case);
- __ LoadRoot(x0, Heap::kFalseValueRootIndex);
- __ Ret();
-
- // Fast-case: The {function} must be a valid JSFunction.
- __ Bind(&fast_case);
- __ JumpIfSmi(function, &slow_case);
- __ JumpIfNotObjectType(function, function_map, scratch, JS_FUNCTION_TYPE,
- &slow_case);
-
- // Go to the runtime if the function is not a constructor.
- __ Ldrb(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
- __ Tbz(scratch, Map::kIsConstructor, &slow_case);
-
- // Ensure that {function} has an instance prototype.
- __ Tbnz(scratch, Map::kHasNonInstancePrototype, &slow_case);
-
- // Get the "prototype" (or initial map) of the {function}.
- __ Ldr(function_prototype,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- __ AssertNotSmi(function_prototype);
-
- // Resolve the prototype if the {function} has an initial map. Afterwards the
- // {function_prototype} will be either the JSReceiver prototype object or the
- // hole value, which means that no instances of the {function} were created so
- // far and hence we should return false.
- Label function_prototype_valid;
- __ JumpIfNotObjectType(function_prototype, scratch, scratch, MAP_TYPE,
- &function_prototype_valid);
- __ Ldr(function_prototype,
- FieldMemOperand(function_prototype, Map::kPrototypeOffset));
- __ Bind(&function_prototype_valid);
- __ AssertNotSmi(function_prototype);
-
- // Update the global instanceof cache with the current {object} map and
- // {function}. The cached answer will be set when it is known below.
- __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
-
- // Loop through the prototype chain looking for the {function} prototype.
- // Assume true, and change to false if not found.
- Register const object_instance_type = function_map;
- Register const map_bit_field = function_map;
- Register const null = scratch;
- Register const result = x0;
-
- Label done, loop, fast_runtime_fallback;
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ LoadRoot(null, Heap::kNullValueRootIndex);
- __ Bind(&loop);
-
- // Check if the object needs to be access checked.
- __ Ldrb(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
- __ TestAndBranchIfAnySet(map_bit_field, 1 << Map::kIsAccessCheckNeeded,
- &fast_runtime_fallback);
- // Check if the current object is a Proxy.
- __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
- __ B(eq, &fast_runtime_fallback);
-
- __ Ldr(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
- __ Cmp(object, function_prototype);
- __ B(eq, &done);
- __ Cmp(object, null);
- __ Ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
- __ B(ne, &loop);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ Bind(&done);
- __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
- __ Ret();
-
- // Found Proxy or access check needed: Call the runtime
- __ Bind(&fast_runtime_fallback);
- __ Push(object, function_prototype);
- // Invalidate the instanceof cache.
- __ Move(scratch, Smi::FromInt(0));
- __ StoreRoot(scratch, Heap::kInstanceofCacheFunctionRootIndex);
- __ TailCallRuntime(Runtime::kHasInPrototypeChain);
-
- // Slow-case: Call the %InstanceOf runtime function.
- __ bind(&slow_case);
- __ Push(object, function);
- __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
- : Runtime::kInstanceOf);
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec);
@@ -1960,15 +1648,15 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Add(x10, x10, x10);
__ Add(number_of_capture_registers, x10, 2);
- // Check that the fourth object is a JSArray object.
+ // Check that the fourth object is a JSObject.
DCHECK(jssp.Is(__ StackPointer()));
__ Peek(x10, kLastMatchInfoOffset);
__ JumpIfSmi(x10, &runtime);
- __ JumpIfNotObjectType(x10, x11, x11, JS_ARRAY_TYPE, &runtime);
+ __ JumpIfNotObjectType(x10, x11, x11, JS_OBJECT_TYPE, &runtime);
- // Check that the JSArray is the fast case.
+ // Check that the object has fast elements.
__ Ldr(last_match_info_elements,
- FieldMemOperand(x10, JSArray::kElementsOffset));
+ FieldMemOperand(x10, JSObject::kElementsOffset));
__ Ldr(x10,
FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
__ JumpIfNotRoot(x10, Heap::kFixedArrayMapRootIndex, &runtime);
@@ -2131,10 +1819,12 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(argc);
__ Push(argc, function, feedback_vector, index);
+ __ Push(cp);
DCHECK(feedback_vector.Is(x2) && index.Is(x3));
__ CallStub(stub);
+ __ Pop(cp);
__ Pop(index, feedback_vector, function, argc);
__ SmiUntag(argc);
}
@@ -2155,6 +1845,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
// feedback_vector : the feedback vector
// index : slot in feedback vector (smi)
Label initialize, done, miss, megamorphic, not_array_function;
+ Label done_initialize_count, done_increment_count;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
@@ -2177,7 +1868,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
Label check_allocation_site;
__ Ldr(feedback_value, FieldMemOperand(feedback, WeakCell::kValueOffset));
__ Cmp(function, feedback_value);
- __ B(eq, &done);
+ __ B(eq, &done_increment_count);
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ B(eq, &done);
__ Ldr(feedback_map, FieldMemOperand(feedback, HeapObject::kMapOffset));
@@ -2199,7 +1890,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch1);
__ Cmp(function, scratch1);
__ B(ne, &megamorphic);
- __ B(&done);
+ __ B(&done_increment_count);
__ Bind(&miss);
@@ -2230,12 +1921,32 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub, argc, function,
feedback_vector, index, new_target);
- __ B(&done);
+ __ B(&done_initialize_count);
__ Bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &weak_cell_stub, argc, function,
feedback_vector, index, new_target);
+
+ __ bind(&done_initialize_count);
+ // Initialize the call counter.
+ __ Mov(scratch1, Operand(Smi::FromInt(1)));
+ __ Adds(scratch2, feedback_vector,
+ Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Str(scratch1,
+ FieldMemOperand(scratch2, FixedArray::kHeaderSize + kPointerSize));
+ __ b(&done);
+
+ __ bind(&done_increment_count);
+
+ // Increment the call count for monomorphic function calls.
+ __ Add(scratch1, feedback_vector,
+ Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+ __ Add(scratch1, scratch1, Operand(FixedArray::kHeaderSize + kPointerSize));
+ __ Ldr(scratch2, FieldMemOperand(scratch1, 0));
+ __ Add(scratch2, scratch2, Operand(Smi::FromInt(1)));
+ __ Str(scratch2, FieldMemOperand(scratch1, 0));
+
__ Bind(&done);
}
@@ -2308,7 +2019,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
__ Add(feedback_vector, feedback_vector,
Operand(FixedArray::kHeaderSize + kPointerSize));
__ Ldr(index, FieldMemOperand(feedback_vector, 0));
- __ Add(index, index, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+ __ Add(index, index, Operand(Smi::FromInt(1)));
__ Str(index, FieldMemOperand(feedback_vector, 0));
// Set up arguments for the array constructor stub.
@@ -2368,7 +2079,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Add(feedback_vector, feedback_vector,
Operand(FixedArray::kHeaderSize + kPointerSize));
__ Ldr(index, FieldMemOperand(feedback_vector, 0));
- __ Add(index, index, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+ __ Add(index, index, Operand(Smi::FromInt(1)));
__ Str(index, FieldMemOperand(feedback_vector, 0));
__ Bind(&call_function);
@@ -2433,7 +2144,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ B(ne, &miss);
// Initialize the call counter.
- __ Mov(x5, Smi::FromInt(CallICNexus::kCallCountIncrement));
+ __ Mov(x5, Smi::FromInt(1));
__ Adds(x4, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize + kPointerSize));
@@ -2445,9 +2156,9 @@ void CallICStub::Generate(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
CreateWeakCellStub create_stub(masm->isolate());
- __ Push(function);
+ __ Push(cp, function);
__ CallStub(&create_stub);
- __ Pop(function);
+ __ Pop(cp, function);
}
__ B(&call_function);
@@ -2527,13 +2238,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
// Save object_ on the stack and pass index_ as argument for runtime call.
__ Push(object_, index_);
}
- if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
- } else {
- DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
- // NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi);
- }
+ __ CallRuntime(Runtime::kNumberToSmi);
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
__ Mov(index_, x0);
@@ -3219,74 +2924,13 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ SmiTag(from);
StringCharAtGenerator generator(input_string, from, result_length, x0,
&runtime, &runtime, &runtime,
- STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
+ RECEIVER_IS_STRING);
generator.GenerateFast(masm);
__ Drop(3);
__ Ret();
generator.SkipSlow(masm, &runtime);
}
-
-void ToNumberStub::Generate(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in x0.
- Label not_smi;
- __ JumpIfNotSmi(x0, &not_smi);
- __ Ret();
- __ Bind(&not_smi);
-
- Label not_heap_number;
- __ CompareObjectType(x0, x1, x1, HEAP_NUMBER_TYPE);
- // x0: receiver
- // x1: receiver instance type
- __ B(ne, &not_heap_number);
- __ Ret();
- __ Bind(&not_heap_number);
-
- NonNumberToNumberStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
- // The NonNumberToNumber stub takes one argument in x0.
- __ AssertNotNumber(x0);
-
- Label not_string;
- __ CompareObjectType(x0, x1, x1, FIRST_NONSTRING_TYPE);
- // x0: receiver
- // x1: receiver instance type
- __ B(hs, &not_string);
- StringToNumberStub stub(masm->isolate());
- __ TailCallStub(&stub);
- __ Bind(&not_string);
-
- Label not_oddball;
- __ Cmp(x1, ODDBALL_TYPE);
- __ B(ne, &not_oddball);
- __ Ldr(x0, FieldMemOperand(x0, Oddball::kToNumberOffset));
- __ Ret();
- __ Bind(&not_oddball);
-
- __ Push(x0); // Push argument.
- __ TailCallRuntime(Runtime::kToNumber);
-}
-
-void StringToNumberStub::Generate(MacroAssembler* masm) {
- // The StringToNumber stub takes one argument in x0.
- __ AssertString(x0);
-
- // Check if string has a cached array index.
- Label runtime;
- __ Ldr(x2, FieldMemOperand(x0, String::kHashFieldOffset));
- __ Tst(x2, Operand(String::kContainsCachedArrayIndexMask));
- __ B(ne, &runtime);
- __ IndexFromHash(x2, x0);
- __ Ret();
-
- __ Bind(&runtime);
- __ Push(x0); // Push argument.
- __ TailCallRuntime(Runtime::kStringToNumber);
-}
-
void ToStringStub::Generate(MacroAssembler* masm) {
// The ToString stub takes one argument in x0.
Label is_number;
@@ -3669,14 +3313,14 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- LoadICStub stub(isolate(), state());
+ LoadICStub stub(isolate());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- KeyedLoadICStub stub(isolate(), state());
+ KeyedLoadICStub stub(isolate());
stub.GenerateForTrampoline(masm);
}
@@ -3815,11 +3459,8 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ Bind(&not_array);
__ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex, &miss);
- Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
- receiver, name, feedback,
- receiver_map, scratch1, x7);
+ masm->isolate()->load_stub_cache()->GenerateProbe(
+ masm, receiver, name, feedback, receiver_map, scratch1, x7);
__ Bind(&miss);
LoadIC::GenerateMiss(masm);
@@ -3897,37 +3538,30 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ jmp(&compare_map);
}
-
-void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
- VectorStoreICStub stub(isolate(), state());
+void StoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
+ StoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
-
-void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
- VectorKeyedStoreICStub stub(isolate(), state());
+void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
+ KeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
+void StoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-void VectorStoreICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-
-void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+void StoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
-
-void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // x1
- Register key = VectorStoreICDescriptor::NameRegister(); // x2
- Register vector = VectorStoreICDescriptor::VectorRegister(); // x3
- Register slot = VectorStoreICDescriptor::SlotRegister(); // x4
- DCHECK(VectorStoreICDescriptor::ValueRegister().is(x0)); // x0
+void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // x1
+ Register key = StoreWithVectorDescriptor::NameRegister(); // x2
+ Register vector = StoreWithVectorDescriptor::VectorRegister(); // x3
+ Register slot = StoreWithVectorDescriptor::SlotRegister(); // x4
+ DCHECK(StoreWithVectorDescriptor::ValueRegister().is(x0)); // x0
Register feedback = x5;
Register receiver_map = x6;
Register scratch1 = x7;
@@ -3951,11 +3585,8 @@ void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ Bind(&not_array);
__ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex, &miss);
- Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, code_flags,
- receiver, key, feedback,
- receiver_map, scratch1, x8);
+ masm->isolate()->store_stub_cache()->GenerateProbe(
+ masm, receiver, key, feedback, receiver_map, scratch1, x8);
__ Bind(&miss);
StoreIC::GenerateMiss(masm);
@@ -3965,13 +3596,11 @@ void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ jmp(&compare_map);
}
-
-void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
+void KeyedStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
-
-void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
@@ -4040,13 +3669,12 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
__ jmp(miss);
}
-
-void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // x1
- Register key = VectorStoreICDescriptor::NameRegister(); // x2
- Register vector = VectorStoreICDescriptor::VectorRegister(); // x3
- Register slot = VectorStoreICDescriptor::SlotRegister(); // x4
- DCHECK(VectorStoreICDescriptor::ValueRegister().is(x0)); // x0
+void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // x1
+ Register key = StoreWithVectorDescriptor::NameRegister(); // x2
+ Register vector = StoreWithVectorDescriptor::VectorRegister(); // x3
+ Register slot = StoreWithVectorDescriptor::SlotRegister(); // x4
+ DCHECK(StoreWithVectorDescriptor::ValueRegister().is(x0)); // x0
Register feedback = x5;
Register receiver_map = x6;
Register scratch1 = x7;
@@ -4576,19 +4204,13 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
}
}
-
-void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
isolate);
ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
isolate);
- ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
- isolate);
-}
-
-
-void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
- Isolate* isolate) {
+ ArrayNArgumentsConstructorStub stub(isolate);
+ stub.GetCode();
ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
@@ -4596,8 +4218,6 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
stubh1.GetCode();
InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
stubh2.GetCode();
- InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
- stubh3.GetCode();
}
}
@@ -4621,14 +4241,15 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
__ Bind(&n_case);
// N arguments.
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
-
+ ArrayNArgumentsConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
} else if (argument_count() == NONE) {
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
} else if (argument_count() == ONE) {
CreateArrayDispatchOneArgument(masm, mode);
} else if (argument_count() == MORE_THAN_ONE) {
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ ArrayNArgumentsConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
} else {
UNREACHABLE();
}
@@ -4743,7 +4364,7 @@ void InternalArrayConstructorStub::GenerateCase(
__ Bind(&n_case);
// N arguments.
- InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+ ArrayNArgumentsConstructorStub stubN(isolate());
__ TailCallStub(&stubN);
}
@@ -4829,17 +4450,18 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
__ Bind(&done_allocate);
// Initialize the JSObject fields.
- __ Mov(x1, x0);
STATIC_ASSERT(JSObject::kMapOffset == 0 * kPointerSize);
- __ Str(x2, MemOperand(x1, kPointerSize, PostIndex));
+ __ Str(x2, FieldMemOperand(x0, JSObject::kMapOffset));
__ LoadRoot(x3, Heap::kEmptyFixedArrayRootIndex);
STATIC_ASSERT(JSObject::kPropertiesOffset == 1 * kPointerSize);
STATIC_ASSERT(JSObject::kElementsOffset == 2 * kPointerSize);
- __ Stp(x3, x3, MemOperand(x1, 2 * kPointerSize, PostIndex));
+ __ Str(x3, FieldMemOperand(x0, JSObject::kPropertiesOffset));
+ __ Str(x3, FieldMemOperand(x0, JSObject::kElementsOffset));
STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+ __ Add(x1, x0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
// ----------- S t a t e -------------
- // -- x0 : result (untagged)
+ // -- x0 : result (tagged)
// -- x1 : result fields (untagged)
// -- x5 : result end (untagged)
// -- x2 : initial map
@@ -4857,10 +4479,6 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
{
// Initialize all in-object fields with undefined.
__ InitializeFieldsWithFiller(x1, x5, x6);
-
- // Add the object tag to make the JSObject real.
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ Add(x0, x0, kHeapObjectTag);
__ Ret();
}
__ Bind(&slack_tracking);
@@ -4879,10 +4497,6 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
__ LoadRoot(x6, Heap::kOnePointerFillerMapRootIndex);
__ InitializeFieldsWithFiller(x1, x5, x6);
- // Add the object tag to make the JSObject real.
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ Add(x0, x0, kHeapObjectTag);
-
// Check if we can finalize the instance size.
Label finalize;
STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
@@ -4912,10 +4526,10 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ Pop(x2);
}
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ Sub(x0, x0, kHeapObjectTag);
__ Ldrb(x5, FieldMemOperand(x2, Map::kInstanceSizeOffset));
__ Add(x5, x0, Operand(x5, LSL, kPointerSizeLog2));
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ Sub(x5, x5, kHeapObjectTag); // Subtract the tag from end.
__ B(&done_allocate);
// Fall back to %NewObject.
@@ -4934,20 +4548,20 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// -----------------------------------
__ AssertFunction(x1);
- // For Ignition we need to skip all possible handler/stub frames until
- // we reach the JavaScript frame for the function (similar to what the
- // runtime fallback implementation does). So make x2 point to that
- // JavaScript frame.
- {
- Label loop, loop_entry;
- __ Mov(x2, fp);
- __ B(&loop_entry);
- __ Bind(&loop);
+ // Make x2 point to the JavaScript frame.
+ __ Mov(x2, fp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
__ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
- __ Bind(&loop_entry);
+ }
+ if (FLAG_debug_code) {
+ Label ok;
__ Ldr(x3, MemOperand(x2, StandardFrameConstants::kFunctionOffset));
__ Cmp(x3, x1);
- __ B(ne, &loop);
+ __ B(eq, &ok);
+ __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+ __ Bind(&ok);
}
// Check if we have rest parameters (only possible if we have an
@@ -4963,10 +4577,10 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
Label rest_parameters;
__ Ldrsw(x0, UntagSmiMemOperand(
x2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ Ldr(x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldrsw(
- x1, FieldMemOperand(x1, SharedFunctionInfo::kFormalParameterCountOffset));
- __ Subs(x0, x0, x1);
+ x3, FieldMemOperand(x3, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Subs(x0, x0, x3);
__ B(gt, &rest_parameters);
// Return an empty rest parameter array.
@@ -4979,7 +4593,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// Allocate an empty rest parameter array.
Label allocate, done_allocate;
- __ Allocate(JSArray::kSize, x0, x1, x2, &allocate, TAG_OBJECT);
+ __ Allocate(JSArray::kSize, x0, x1, x2, &allocate, NO_ALLOCATION_FLAGS);
__ Bind(&done_allocate);
// Setup the rest parameter array in x0.
@@ -5012,15 +4626,16 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- cp : context
// -- x0 : number of rest parameters
+ // -- x1 : function
// -- x2 : pointer to first rest parameters
// -- lr : return address
// -----------------------------------
// Allocate space for the rest parameter array plus the backing store.
Label allocate, done_allocate;
- __ Mov(x1, JSArray::kSize + FixedArray::kHeaderSize);
- __ Add(x1, x1, Operand(x0, LSL, kPointerSizeLog2));
- __ Allocate(x1, x3, x4, x5, &allocate, TAG_OBJECT);
+ __ Mov(x6, JSArray::kSize + FixedArray::kHeaderSize);
+ __ Add(x6, x6, Operand(x0, LSL, kPointerSizeLog2));
+ __ Allocate(x6, x3, x4, x5, &allocate, NO_ALLOCATION_FLAGS);
__ Bind(&done_allocate);
// Compute arguments.length in x6.
@@ -5055,19 +4670,27 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
__ Ret();
- // Fall back to %AllocateInNewSpace.
+ // Fall back to %AllocateInNewSpace (if not too big).
+ Label too_big_for_new_space;
__ Bind(&allocate);
+ __ Cmp(x6, Operand(Page::kMaxRegularHeapObjectSize));
+ __ B(gt, &too_big_for_new_space);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(x0);
- __ SmiTag(x1);
- __ Push(x0, x2, x1);
+ __ SmiTag(x6);
+ __ Push(x0, x2, x6);
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ Mov(x3, x0);
__ Pop(x2, x0);
__ SmiUntag(x0);
}
__ B(&done_allocate);
+
+ // Fall back to %NewRestParameter.
+ __ Bind(&too_big_for_new_space);
+ __ Push(x1);
+ __ TailCallRuntime(Runtime::kNewRestParameter);
}
}
@@ -5081,17 +4704,34 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
// -----------------------------------
__ AssertFunction(x1);
+ // Make x6 point to the JavaScript frame.
+ __ Mov(x6, fp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
+ __ Ldr(x6, MemOperand(x6, StandardFrameConstants::kCallerFPOffset));
+ }
+ if (FLAG_debug_code) {
+ Label ok;
+ __ Ldr(x3, MemOperand(x6, StandardFrameConstants::kFunctionOffset));
+ __ Cmp(x3, x1);
+ __ B(eq, &ok);
+ __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+ __ Bind(&ok);
+ }
+
// TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
__ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldrsw(
x2, FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ Add(x3, fp, Operand(x2, LSL, kPointerSizeLog2));
+ __ Add(x3, x6, Operand(x2, LSL, kPointerSizeLog2));
__ Add(x3, x3, Operand(StandardFrameConstants::kCallerSPOffset));
__ SmiTag(x2);
// x1 : function
// x2 : number of parameters (tagged)
// x3 : parameters pointer
+ // x6 : JavaScript frame pointer
//
// Returns pointer to result object in x0.
@@ -5109,7 +4749,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
Register caller_ctx = x12;
Label runtime;
Label adaptor_frame, try_allocate;
- __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(caller_fp, MemOperand(x6, StandardFrameConstants::kCallerFPOffset));
__ Ldr(
caller_ctx,
MemOperand(caller_fp, CommonFrameConstants::kContextOrFrameTypeOffset));
@@ -5180,7 +4820,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
// Do the allocation of all three objects in one go. Assign this to x0, as it
// will be returned to the caller.
Register alloc_obj = x0;
- __ Allocate(size, alloc_obj, x11, x12, &runtime, TAG_OBJECT);
+ __ Allocate(size, alloc_obj, x11, x12, &runtime, NO_ALLOCATION_FLAGS);
// Get the arguments boilerplate from the current (global) context.
@@ -5364,20 +5004,20 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// -----------------------------------
__ AssertFunction(x1);
- // For Ignition we need to skip all possible handler/stub frames until
- // we reach the JavaScript frame for the function (similar to what the
- // runtime fallback implementation does). So make x2 point to that
- // JavaScript frame.
- {
- Label loop, loop_entry;
- __ Mov(x2, fp);
- __ B(&loop_entry);
- __ Bind(&loop);
+ // Make x2 point to the JavaScript frame.
+ __ Mov(x2, fp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
__ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
- __ Bind(&loop_entry);
+ }
+ if (FLAG_debug_code) {
+ Label ok;
__ Ldr(x3, MemOperand(x2, StandardFrameConstants::kFunctionOffset));
__ Cmp(x3, x1);
- __ B(ne, &loop);
+ __ B(eq, &ok);
+ __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+ __ Bind(&ok);
}
// Check if we have an arguments adaptor frame below the function frame.
@@ -5387,9 +5027,9 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ Cmp(x4, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ B(eq, &arguments_adaptor);
{
- __ Ldr(x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
__ Ldrsw(x0, FieldMemOperand(
- x1, SharedFunctionInfo::kFormalParameterCountOffset));
+ x4, SharedFunctionInfo::kFormalParameterCountOffset));
__ Add(x2, x2, Operand(x0, LSL, kPointerSizeLog2));
__ Add(x2, x2, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize);
}
@@ -5406,15 +5046,16 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- cp : context
// -- x0 : number of rest parameters
+ // -- x1 : function
// -- x2 : pointer to first rest parameters
// -- lr : return address
// -----------------------------------
// Allocate space for the strict arguments object plus the backing store.
Label allocate, done_allocate;
- __ Mov(x1, JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize);
- __ Add(x1, x1, Operand(x0, LSL, kPointerSizeLog2));
- __ Allocate(x1, x3, x4, x5, &allocate, TAG_OBJECT);
+ __ Mov(x6, JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize);
+ __ Add(x6, x6, Operand(x0, LSL, kPointerSizeLog2));
+ __ Allocate(x6, x3, x4, x5, &allocate, NO_ALLOCATION_FLAGS);
__ Bind(&done_allocate);
// Compute arguments.length in x6.
@@ -5449,48 +5090,27 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
__ Ret();
- // Fall back to %AllocateInNewSpace.
+ // Fall back to %AllocateInNewSpace (if not too big).
+ Label too_big_for_new_space;
__ Bind(&allocate);
+ __ Cmp(x6, Operand(Page::kMaxRegularHeapObjectSize));
+ __ B(gt, &too_big_for_new_space);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(x0);
- __ SmiTag(x1);
- __ Push(x0, x2, x1);
+ __ SmiTag(x6);
+ __ Push(x0, x2, x6);
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ Mov(x3, x0);
__ Pop(x2, x0);
__ SmiUntag(x0);
}
__ B(&done_allocate);
-}
-
-
-void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
- Register context = cp;
- Register result = x0;
- Register slot = x2;
- Label slow_case;
-
- // Go up the context chain to the script context.
- for (int i = 0; i < depth(); ++i) {
- __ Ldr(result, ContextMemOperand(context, Context::PREVIOUS_INDEX));
- context = result;
- }
-
- // Load the PropertyCell value at the specified slot.
- __ Add(result, context, Operand(slot, LSL, kPointerSizeLog2));
- __ Ldr(result, ContextMemOperand(result));
- __ Ldr(result, FieldMemOperand(result, PropertyCell::kValueOffset));
- // If the result is not the_hole, return. Otherwise, handle in the runtime.
- __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &slow_case);
- __ Ret();
-
- // Fallback to runtime.
- __ Bind(&slow_case);
- __ SmiTag(slot);
- __ Push(slot);
- __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
+ // Fall back to %NewStrictArguments.
+ __ Bind(&too_big_for_new_space);
+ __ Push(x1);
+ __ TailCallRuntime(Runtime::kNewStrictArguments);
}
@@ -5807,9 +5427,15 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
STATIC_ASSERT(FCA::kIsolateIndex == 1);
STATIC_ASSERT(FCA::kHolderIndex == 0);
- STATIC_ASSERT(FCA::kArgsLength == 7);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 7);
+ STATIC_ASSERT(FCA::kArgsLength == 8);
+
+ // FunctionCallbackArguments
+
+ // new target
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
- // FunctionCallbackArguments: context, callee and call data.
+ // context, callee and call data.
__ Push(context, callee, call_data);
if (!is_lazy()) {
@@ -5833,7 +5459,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// Allocate the v8::Arguments structure in the arguments' space, since it's
// not controlled by GC.
- const int kApiStackSpace = 4;
+ const int kApiStackSpace = 3;
// Allocate space for CallApiFunctionAndReturn can store some scratch
// registeres on the stack.
@@ -5849,10 +5475,9 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
__ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
__ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
- // FunctionCallbackInfo::length_ = argc and
- // FunctionCallbackInfo::is_construct_call = 0
+ // FunctionCallbackInfo::length_ = argc
__ Mov(x10, argc());
- __ Stp(x10, xzr, MemOperand(x0, 2 * kPointerSize));
+ __ Str(x10, MemOperand(x0, 2 * kPointerSize));
ExternalReference thunk_ref =
ExternalReference::invoke_function_callback(masm->isolate());
@@ -5869,9 +5494,9 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
}
MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
int stack_space = 0;
- MemOperand is_construct_call_operand =
- MemOperand(masm->StackPointer(), 4 * kPointerSize);
- MemOperand* stack_space_operand = &is_construct_call_operand;
+ MemOperand length_operand =
+ MemOperand(masm->StackPointer(), 3 * kPointerSize);
+ MemOperand* stack_space_operand = &length_operand;
stack_space = argc() + FCA::kArgsLength + 1;
stack_space_operand = NULL;
@@ -5883,15 +5508,34 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
void CallApiGetterStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- sp[0] : name
- // -- sp[8 .. (8 + kArgsLength*8)] : v8::PropertyCallbackInfo::args_
- // -- ...
- // -- x2 : api_function_address
- // -----------------------------------
-
- Register api_function_address = ApiGetterDescriptor::function_address();
- DCHECK(api_function_address.is(x2));
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+ Register receiver = ApiGetterDescriptor::ReceiverRegister();
+ Register holder = ApiGetterDescriptor::HolderRegister();
+ Register callback = ApiGetterDescriptor::CallbackRegister();
+ Register scratch = x4;
+ Register scratch2 = x5;
+ Register scratch3 = x6;
+ DCHECK(!AreAliased(receiver, holder, callback, scratch));
+
+ __ Push(receiver);
+
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ Mov(scratch2, Operand(ExternalReference::isolate_address(isolate())));
+ __ Ldr(scratch3, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+ __ Push(scratch3, scratch, scratch, scratch2, holder);
+ __ Push(Smi::FromInt(0)); // should_throw_on_error -> false
+ __ Ldr(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+ __ Push(scratch);
// v8::PropertyCallbackInfo::args_ array and name handle.
const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
@@ -5918,6 +5562,11 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
+ Register api_function_address = x2;
+ __ Ldr(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+ __ Ldr(api_function_address,
+ FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
+
const int spill_offset = 1 + kApiStackSpace;
// +3 is to skip prolog, return address and name handle.
MemOperand return_value_operand(
@@ -5927,7 +5576,6 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
return_value_operand, NULL);
}
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/arm64/codegen-arm64.cc b/deps/v8/src/arm64/codegen-arm64.cc
index c2073f1f4b..edd289900e 100644
--- a/deps/v8/src/arm64/codegen-arm64.cc
+++ b/deps/v8/src/arm64/codegen-arm64.cc
@@ -15,66 +15,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-#if defined(USE_SIMULATOR)
-byte* fast_exp_arm64_machine_code = nullptr;
-double fast_exp_simulator(double x, Isolate* isolate) {
- Simulator * simulator = Simulator::current(isolate);
- Simulator::CallArgument args[] = {
- Simulator::CallArgument(x),
- Simulator::CallArgument::End()
- };
- return simulator->CallDouble(fast_exp_arm64_machine_code, args);
-}
-#endif
-
-
-UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
- // Use the Math.exp implemetation in MathExpGenerator::EmitMathExp() to create
- // an AAPCS64-compliant exp() function. This will be faster than the C
- // library's exp() function, but probably less accurate.
- size_t actual_size;
- byte* buffer =
- static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == nullptr) return nullptr;
-
- ExternalReference::InitializeMathExpData();
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
- CodeObjectRequired::kNo);
- masm.SetStackPointer(csp);
-
- // The argument will be in d0 on entry.
- DoubleRegister input = d0;
- // Use other caller-saved registers for all other values.
- DoubleRegister result = d1;
- DoubleRegister double_temp1 = d2;
- DoubleRegister double_temp2 = d3;
- Register temp1 = x10;
- Register temp2 = x11;
- Register temp3 = x12;
-
- MathExpGenerator::EmitMathExp(&masm, input, result,
- double_temp1, double_temp2,
- temp1, temp2, temp3);
- // Move the result to the return register.
- masm.Fmov(d0, result);
- masm.Ret();
-
- CodeDesc desc;
- masm.GetCode(&desc);
- DCHECK(!RelocInfo::RequiresRelocation(desc));
-
- Assembler::FlushICache(isolate, buffer, actual_size);
- base::OS::ProtectCode(buffer, actual_size);
-
-#if !defined(USE_SIMULATOR)
- return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
-#else
- fast_exp_arm64_machine_code = buffer;
- return &fast_exp_simulator;
-#endif
-}
-
-
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
return nullptr;
}
@@ -175,8 +115,8 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
Register map_root = array_size;
__ LoadRoot(map_root, Heap::kFixedDoubleArrayMapRootIndex);
__ SmiTag(x11, length);
- __ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
- __ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
+ __ Str(x11, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
+ __ Str(map_root, FieldMemOperand(array, HeapObject::kMapOffset));
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
@@ -184,18 +124,18 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created FixedDoubleArray.
- __ Add(x10, array, kHeapObjectTag);
- __ Str(x10, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ RecordWriteField(receiver, JSObject::kElementsOffset, x10,
- scratch, kLRHasBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Move(x10, array);
+ __ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ RecordWriteField(receiver, JSObject::kElementsOffset, x10, scratch,
+ kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
// Prepare for conversion loop.
Register src_elements = x10;
Register dst_elements = x11;
Register dst_end = x12;
__ Add(src_elements, elements, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Add(dst_elements, array, FixedDoubleArray::kHeaderSize);
+ __ Add(dst_elements, array, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
__ Add(dst_end, dst_elements, Operand(length, LSL, kDoubleSizeLog2));
FPRegister nan_d = d1;
@@ -282,8 +222,8 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
Register map_root = array_size;
__ LoadRoot(map_root, Heap::kFixedArrayMapRootIndex);
__ SmiTag(x11, length);
- __ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
- __ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
+ __ Str(x11, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
+ __ Str(map_root, FieldMemOperand(array, HeapObject::kMapOffset));
// Prepare for conversion loop.
Register src_elements = x10;
@@ -293,7 +233,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
__ Add(src_elements, elements,
FixedDoubleArray::kHeaderSize - kHeapObjectTag);
- __ Add(dst_elements, array, FixedArray::kHeaderSize);
+ __ Add(dst_elements, array, FixedArray::kHeaderSize - kHeapObjectTag);
__ Add(dst_end, dst_elements, Operand(length, LSL, kPointerSizeLog2));
// Allocating heap numbers in the loop below can fail and cause a jump to
@@ -307,8 +247,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ Cmp(dst_elements, dst_end);
__ B(lt, &initialization_loop);
- __ Add(dst_elements, array, FixedArray::kHeaderSize);
- __ Add(array, array, kHeapObjectTag);
+ __ Add(dst_elements, array, FixedArray::kHeaderSize - kHeapObjectTag);
Register heap_num_map = x15;
__ LoadRoot(heap_num_map, Heap::kHeapNumberMapRootIndex);
@@ -511,127 +450,6 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ Bind(&done);
}
-
-static MemOperand ExpConstant(Register base, int index) {
- return MemOperand(base, index * kDoubleSize);
-}
-
-
-void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
- DoubleRegister input,
- DoubleRegister result,
- DoubleRegister double_temp1,
- DoubleRegister double_temp2,
- Register temp1,
- Register temp2,
- Register temp3) {
- // TODO(jbramley): There are several instances where fnmsub could be used
- // instead of fmul and fsub. Doing this changes the result, but since this is
- // an estimation anyway, does it matter?
-
- DCHECK(!AreAliased(input, result,
- double_temp1, double_temp2,
- temp1, temp2, temp3));
- DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
- DCHECK(!masm->serializer_enabled()); // External references not serializable.
-
- Label done;
- DoubleRegister double_temp3 = result;
- Register constants = temp3;
-
- // The algorithm used relies on some magic constants which are initialized in
- // ExternalReference::InitializeMathExpData().
-
- // Load the address of the start of the array.
- __ Mov(constants, ExternalReference::math_exp_constants(0));
-
- // We have to do a four-way split here:
- // - If input <= about -708.4, the output always rounds to zero.
- // - If input >= about 709.8, the output always rounds to +infinity.
- // - If the input is NaN, the output is NaN.
- // - Otherwise, the result needs to be calculated.
- Label result_is_finite_non_zero;
- // Assert that we can load offset 0 (the small input threshold) and offset 1
- // (the large input threshold) with a single ldp.
- DCHECK(kDRegSize == (ExpConstant(constants, 1).offset() -
- ExpConstant(constants, 0).offset()));
- __ Ldp(double_temp1, double_temp2, ExpConstant(constants, 0));
-
- __ Fcmp(input, double_temp1);
- __ Fccmp(input, double_temp2, NoFlag, hi);
- // At this point, the condition flags can be in one of five states:
- // NZCV
- // 1000 -708.4 < input < 709.8 result = exp(input)
- // 0110 input == 709.8 result = +infinity
- // 0010 input > 709.8 result = +infinity
- // 0011 input is NaN result = input
- // 0000 input <= -708.4 result = +0.0
-
- // Continue the common case first. 'mi' tests N == 1.
- __ B(&result_is_finite_non_zero, mi);
-
- // TODO(jbramley): Consider adding a +infinity register for ARM64.
- __ Ldr(double_temp2, ExpConstant(constants, 2)); // Synthesize +infinity.
-
- // Select between +0.0 and +infinity. 'lo' tests C == 0.
- __ Fcsel(result, fp_zero, double_temp2, lo);
- // Select between {+0.0 or +infinity} and input. 'vc' tests V == 0.
- __ Fcsel(result, result, input, vc);
- __ B(&done);
-
- // The rest is magic, as described in InitializeMathExpData().
- __ Bind(&result_is_finite_non_zero);
-
- // Assert that we can load offset 3 and offset 4 with a single ldp.
- DCHECK(kDRegSize == (ExpConstant(constants, 4).offset() -
- ExpConstant(constants, 3).offset()));
- __ Ldp(double_temp1, double_temp3, ExpConstant(constants, 3));
- __ Fmadd(double_temp1, double_temp1, input, double_temp3);
- __ Fmov(temp2.W(), double_temp1.S());
- __ Fsub(double_temp1, double_temp1, double_temp3);
-
- // Assert that we can load offset 5 and offset 6 with a single ldp.
- DCHECK(kDRegSize == (ExpConstant(constants, 6).offset() -
- ExpConstant(constants, 5).offset()));
- __ Ldp(double_temp2, double_temp3, ExpConstant(constants, 5));
- // TODO(jbramley): Consider using Fnmsub here.
- __ Fmul(double_temp1, double_temp1, double_temp2);
- __ Fsub(double_temp1, double_temp1, input);
-
- __ Fmul(double_temp2, double_temp1, double_temp1);
- __ Fsub(double_temp3, double_temp3, double_temp1);
- __ Fmul(double_temp3, double_temp3, double_temp2);
-
- __ Mov(temp1.W(), Operand(temp2.W(), LSR, 11));
-
- __ Ldr(double_temp2, ExpConstant(constants, 7));
- // TODO(jbramley): Consider using Fnmsub here.
- __ Fmul(double_temp3, double_temp3, double_temp2);
- __ Fsub(double_temp3, double_temp3, double_temp1);
-
- // The 8th constant is 1.0, so use an immediate move rather than a load.
- // We can't generate a runtime assertion here as we would need to call Abort
- // in the runtime and we don't have an Isolate when we generate this code.
- __ Fmov(double_temp2, 1.0);
- __ Fadd(double_temp3, double_temp3, double_temp2);
-
- __ And(temp2, temp2, 0x7ff);
- __ Add(temp1, temp1, 0x3ff);
-
- // Do the final table lookup.
- __ Mov(temp3, ExternalReference::math_exp_log_table());
-
- __ Add(temp3, temp3, Operand(temp2, LSL, kDRegSizeLog2));
- __ Ldp(temp2.W(), temp3.W(), MemOperand(temp3));
- __ Orr(temp1.W(), temp3.W(), Operand(temp1.W(), LSL, 20));
- __ Bfi(temp2, temp1, 32, 32);
- __ Fmov(double_temp1, temp2);
-
- __ Fmul(result, double_temp3, double_temp1);
-
- __ Bind(&done);
-}
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/arm64/codegen-arm64.h b/deps/v8/src/arm64/codegen-arm64.h
index 573f6fe159..7ccd5ac444 100644
--- a/deps/v8/src/arm64/codegen-arm64.h
+++ b/deps/v8/src/arm64/codegen-arm64.h
@@ -5,7 +5,6 @@
#ifndef V8_ARM64_CODEGEN_ARM64_H_
#define V8_ARM64_CODEGEN_ARM64_H_
-#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {
@@ -27,22 +26,6 @@ class StringCharLoadGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
-
-class MathExpGenerator : public AllStatic {
- public:
- static void EmitMathExp(MacroAssembler* masm,
- DoubleRegister input,
- DoubleRegister result,
- DoubleRegister double_scratch1,
- DoubleRegister double_scratch2,
- Register temp1,
- Register temp2,
- Register temp3);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm64/constants-arm64.h b/deps/v8/src/arm64/constants-arm64.h
index 00b24e9375..65b8b30610 100644
--- a/deps/v8/src/arm64/constants-arm64.h
+++ b/deps/v8/src/arm64/constants-arm64.h
@@ -117,89 +117,89 @@ const unsigned kDoubleExponentBias = 1023;
const unsigned kFloatMantissaBits = 23;
const unsigned kFloatExponentBits = 8;
-#define INSTRUCTION_FIELDS_LIST(V_) \
-/* Register fields */ \
-V_(Rd, 4, 0, Bits) /* Destination register. */ \
-V_(Rn, 9, 5, Bits) /* First source register. */ \
-V_(Rm, 20, 16, Bits) /* Second source register. */ \
-V_(Ra, 14, 10, Bits) /* Third source register. */ \
-V_(Rt, 4, 0, Bits) /* Load dest / store source. */ \
-V_(Rt2, 14, 10, Bits) /* Load second dest / */ \
- /* store second source. */ \
-V_(PrefetchMode, 4, 0, Bits) \
- \
-/* Common bits */ \
-V_(SixtyFourBits, 31, 31, Bits) \
-V_(FlagsUpdate, 29, 29, Bits) \
- \
-/* PC relative addressing */ \
-V_(ImmPCRelHi, 23, 5, SignedBits) \
-V_(ImmPCRelLo, 30, 29, Bits) \
- \
-/* Add/subtract/logical shift register */ \
-V_(ShiftDP, 23, 22, Bits) \
-V_(ImmDPShift, 15, 10, Bits) \
- \
-/* Add/subtract immediate */ \
-V_(ImmAddSub, 21, 10, Bits) \
-V_(ShiftAddSub, 23, 22, Bits) \
- \
-/* Add/substract extend */ \
-V_(ImmExtendShift, 12, 10, Bits) \
-V_(ExtendMode, 15, 13, Bits) \
- \
-/* Move wide */ \
-V_(ImmMoveWide, 20, 5, Bits) \
-V_(ShiftMoveWide, 22, 21, Bits) \
- \
-/* Logical immediate, bitfield and extract */ \
-V_(BitN, 22, 22, Bits) \
-V_(ImmRotate, 21, 16, Bits) \
-V_(ImmSetBits, 15, 10, Bits) \
-V_(ImmR, 21, 16, Bits) \
-V_(ImmS, 15, 10, Bits) \
- \
-/* Test and branch immediate */ \
-V_(ImmTestBranch, 18, 5, SignedBits) \
-V_(ImmTestBranchBit40, 23, 19, Bits) \
-V_(ImmTestBranchBit5, 31, 31, Bits) \
- \
-/* Conditionals */ \
-V_(Condition, 15, 12, Bits) \
-V_(ConditionBranch, 3, 0, Bits) \
-V_(Nzcv, 3, 0, Bits) \
-V_(ImmCondCmp, 20, 16, Bits) \
-V_(ImmCondBranch, 23, 5, SignedBits) \
- \
-/* Floating point */ \
-V_(FPType, 23, 22, Bits) \
-V_(ImmFP, 20, 13, Bits) \
-V_(FPScale, 15, 10, Bits) \
- \
-/* Load Store */ \
-V_(ImmLS, 20, 12, SignedBits) \
-V_(ImmLSUnsigned, 21, 10, Bits) \
-V_(ImmLSPair, 21, 15, SignedBits) \
-V_(SizeLS, 31, 30, Bits) \
-V_(ImmShiftLS, 12, 12, Bits) \
- \
-/* Other immediates */ \
-V_(ImmUncondBranch, 25, 0, SignedBits) \
-V_(ImmCmpBranch, 23, 5, SignedBits) \
-V_(ImmLLiteral, 23, 5, SignedBits) \
-V_(ImmException, 20, 5, Bits) \
-V_(ImmHint, 11, 5, Bits) \
-V_(ImmBarrierDomain, 11, 10, Bits) \
-V_(ImmBarrierType, 9, 8, Bits) \
- \
-/* System (MRS, MSR) */ \
-V_(ImmSystemRegister, 19, 5, Bits) \
-V_(SysO0, 19, 19, Bits) \
-V_(SysOp1, 18, 16, Bits) \
-V_(SysOp2, 7, 5, Bits) \
-V_(CRn, 15, 12, Bits) \
-V_(CRm, 11, 8, Bits) \
-
+#define INSTRUCTION_FIELDS_LIST(V_) \
+ /* Register fields */ \
+ V_(Rd, 4, 0, Bits) /* Destination register. */ \
+ V_(Rn, 9, 5, Bits) /* First source register. */ \
+ V_(Rm, 20, 16, Bits) /* Second source register. */ \
+ V_(Ra, 14, 10, Bits) /* Third source register. */ \
+ V_(Rt, 4, 0, Bits) /* Load dest / store source. */ \
+ V_(Rt2, 14, 10, Bits) /* Load second dest / */ \
+ /* store second source. */ \
+ V_(Rs, 20, 16, Bits) /* Store-exclusive status */ \
+ V_(PrefetchMode, 4, 0, Bits) \
+ \
+ /* Common bits */ \
+ V_(SixtyFourBits, 31, 31, Bits) \
+ V_(FlagsUpdate, 29, 29, Bits) \
+ \
+ /* PC relative addressing */ \
+ V_(ImmPCRelHi, 23, 5, SignedBits) \
+ V_(ImmPCRelLo, 30, 29, Bits) \
+ \
+ /* Add/subtract/logical shift register */ \
+ V_(ShiftDP, 23, 22, Bits) \
+ V_(ImmDPShift, 15, 10, Bits) \
+ \
+ /* Add/subtract immediate */ \
+ V_(ImmAddSub, 21, 10, Bits) \
+ V_(ShiftAddSub, 23, 22, Bits) \
+ \
+ /* Add/substract extend */ \
+ V_(ImmExtendShift, 12, 10, Bits) \
+ V_(ExtendMode, 15, 13, Bits) \
+ \
+ /* Move wide */ \
+ V_(ImmMoveWide, 20, 5, Bits) \
+ V_(ShiftMoveWide, 22, 21, Bits) \
+ \
+ /* Logical immediate, bitfield and extract */ \
+ V_(BitN, 22, 22, Bits) \
+ V_(ImmRotate, 21, 16, Bits) \
+ V_(ImmSetBits, 15, 10, Bits) \
+ V_(ImmR, 21, 16, Bits) \
+ V_(ImmS, 15, 10, Bits) \
+ \
+ /* Test and branch immediate */ \
+ V_(ImmTestBranch, 18, 5, SignedBits) \
+ V_(ImmTestBranchBit40, 23, 19, Bits) \
+ V_(ImmTestBranchBit5, 31, 31, Bits) \
+ \
+ /* Conditionals */ \
+ V_(Condition, 15, 12, Bits) \
+ V_(ConditionBranch, 3, 0, Bits) \
+ V_(Nzcv, 3, 0, Bits) \
+ V_(ImmCondCmp, 20, 16, Bits) \
+ V_(ImmCondBranch, 23, 5, SignedBits) \
+ \
+ /* Floating point */ \
+ V_(FPType, 23, 22, Bits) \
+ V_(ImmFP, 20, 13, Bits) \
+ V_(FPScale, 15, 10, Bits) \
+ \
+ /* Load Store */ \
+ V_(ImmLS, 20, 12, SignedBits) \
+ V_(ImmLSUnsigned, 21, 10, Bits) \
+ V_(ImmLSPair, 21, 15, SignedBits) \
+ V_(SizeLS, 31, 30, Bits) \
+ V_(ImmShiftLS, 12, 12, Bits) \
+ \
+ /* Other immediates */ \
+ V_(ImmUncondBranch, 25, 0, SignedBits) \
+ V_(ImmCmpBranch, 23, 5, SignedBits) \
+ V_(ImmLLiteral, 23, 5, SignedBits) \
+ V_(ImmException, 20, 5, Bits) \
+ V_(ImmHint, 11, 5, Bits) \
+ V_(ImmBarrierDomain, 11, 10, Bits) \
+ V_(ImmBarrierType, 9, 8, Bits) \
+ \
+ /* System (MRS, MSR) */ \
+ V_(ImmSystemRegister, 19, 5, Bits) \
+ V_(SysO0, 19, 19, Bits) \
+ V_(SysOp1, 18, 16, Bits) \
+ V_(SysOp2, 7, 5, Bits) \
+ V_(CRn, 15, 12, Bits) \
+ V_(CRm, 11, 8, Bits)
#define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \
/* NZCV */ \
@@ -857,6 +857,29 @@ enum LoadStoreRegisterOffset {
#undef LOAD_STORE_REGISTER_OFFSET
};
+// Load/store acquire/release
+enum LoadStoreAcquireReleaseOp {
+ LoadStoreAcquireReleaseFixed = 0x08000000,
+ LoadStoreAcquireReleaseFMask = 0x3F000000,
+ LoadStoreAcquireReleaseMask = 0xCFC08000,
+ STLXR_b = LoadStoreAcquireReleaseFixed | 0x00008000,
+ LDAXR_b = LoadStoreAcquireReleaseFixed | 0x00408000,
+ STLR_b = LoadStoreAcquireReleaseFixed | 0x00808000,
+ LDAR_b = LoadStoreAcquireReleaseFixed | 0x00C08000,
+ STLXR_h = LoadStoreAcquireReleaseFixed | 0x40008000,
+ LDAXR_h = LoadStoreAcquireReleaseFixed | 0x40408000,
+ STLR_h = LoadStoreAcquireReleaseFixed | 0x40808000,
+ LDAR_h = LoadStoreAcquireReleaseFixed | 0x40C08000,
+ STLXR_w = LoadStoreAcquireReleaseFixed | 0x80008000,
+ LDAXR_w = LoadStoreAcquireReleaseFixed | 0x80408000,
+ STLR_w = LoadStoreAcquireReleaseFixed | 0x80808000,
+ LDAR_w = LoadStoreAcquireReleaseFixed | 0x80C08000,
+ STLXR_x = LoadStoreAcquireReleaseFixed | 0xC0008000,
+ LDAXR_x = LoadStoreAcquireReleaseFixed | 0xC0408000,
+ STLR_x = LoadStoreAcquireReleaseFixed | 0xC0808000,
+ LDAR_x = LoadStoreAcquireReleaseFixed | 0xC0C08000,
+};
+
// Conditional compare.
enum ConditionalCompareOp {
ConditionalCompareMask = 0x60000000,
diff --git a/deps/v8/src/arm64/cpu-arm64.cc b/deps/v8/src/arm64/cpu-arm64.cc
index 712dbbd650..7c1084f62d 100644
--- a/deps/v8/src/arm64/cpu-arm64.cc
+++ b/deps/v8/src/arm64/cpu-arm64.cc
@@ -58,14 +58,16 @@ void CpuFeatures::FlushICache(void* address, size_t length) {
__asm__ __volatile__ ( // NOLINT
// Clean every line of the D cache containing the target data.
"0: \n\t"
- // dc : Data Cache maintenance
- // c : Clean
- // va : by (Virtual) Address
- // u : to the point of Unification
- // The point of unification for a processor is the point by which the
- // instruction and data caches are guaranteed to see the same copy of a
- // memory location. See ARM DDI 0406B page B2-12 for more information.
- "dc cvau, %[dline] \n\t"
+ // dc : Data Cache maintenance
+ // c : Clean
+ // i : Invalidate
+ // va : by (Virtual) Address
+ // c : to the point of Coherency
+ // See ARM DDI 0406B page B2-12 for more information.
+ // We would prefer to use "cvau" (clean to the point of unification) here
+ // but we use "civac" to work around Cortex-A53 errata 819472, 826319,
+ // 827319 and 824069.
+ "dc civac, %[dline] \n\t"
"add %[dline], %[dline], %[dsize] \n\t"
"cmp %[dline], %[end] \n\t"
"b.lt 0b \n\t"
diff --git a/deps/v8/src/arm64/decoder-arm64-inl.h b/deps/v8/src/arm64/decoder-arm64-inl.h
index e00105e7bc..2405f87830 100644
--- a/deps/v8/src/arm64/decoder-arm64-inl.h
+++ b/deps/v8/src/arm64/decoder-arm64-inl.h
@@ -217,8 +217,15 @@ void Decoder<V>::DecodeLoadStore(Instruction* instr) {
if (instr->Bit(28) == 0) {
if (instr->Bit(29) == 0) {
if (instr->Bit(26) == 0) {
- // TODO(all): VisitLoadStoreExclusive.
- V::VisitUnimplemented(instr);
+ if (instr->Mask(0xA08000) == 0x800000 ||
+ instr->Mask(0xA00000) == 0xA00000) {
+ V::VisitUnallocated(instr);
+ } else if (instr->Mask(0x808000) == 0) {
+ // Load/Store exclusive without acquire/release are unimplemented.
+ V::VisitUnimplemented(instr);
+ } else {
+ V::VisitLoadStoreAcquireRelease(instr);
+ }
} else {
DecodeAdvSIMDLoadStore(instr);
}
diff --git a/deps/v8/src/arm64/decoder-arm64.h b/deps/v8/src/arm64/decoder-arm64.h
index b1ef41f1a2..a17b324412 100644
--- a/deps/v8/src/arm64/decoder-arm64.h
+++ b/deps/v8/src/arm64/decoder-arm64.h
@@ -16,49 +16,50 @@ namespace internal {
// List macro containing all visitors needed by the decoder class.
-#define VISITOR_LIST(V) \
- V(PCRelAddressing) \
- V(AddSubImmediate) \
- V(LogicalImmediate) \
- V(MoveWideImmediate) \
- V(Bitfield) \
- V(Extract) \
- V(UnconditionalBranch) \
- V(UnconditionalBranchToRegister) \
- V(CompareBranch) \
- V(TestBranch) \
- V(ConditionalBranch) \
- V(System) \
- V(Exception) \
- V(LoadStorePairPostIndex) \
- V(LoadStorePairOffset) \
- V(LoadStorePairPreIndex) \
- V(LoadLiteral) \
- V(LoadStoreUnscaledOffset) \
- V(LoadStorePostIndex) \
- V(LoadStorePreIndex) \
- V(LoadStoreRegisterOffset) \
- V(LoadStoreUnsignedOffset) \
- V(LogicalShifted) \
- V(AddSubShifted) \
- V(AddSubExtended) \
- V(AddSubWithCarry) \
- V(ConditionalCompareRegister) \
- V(ConditionalCompareImmediate) \
- V(ConditionalSelect) \
- V(DataProcessing1Source) \
- V(DataProcessing2Source) \
- V(DataProcessing3Source) \
- V(FPCompare) \
- V(FPConditionalCompare) \
- V(FPConditionalSelect) \
- V(FPImmediate) \
- V(FPDataProcessing1Source) \
- V(FPDataProcessing2Source) \
- V(FPDataProcessing3Source) \
- V(FPIntegerConvert) \
- V(FPFixedPointConvert) \
- V(Unallocated) \
+#define VISITOR_LIST(V) \
+ V(PCRelAddressing) \
+ V(AddSubImmediate) \
+ V(LogicalImmediate) \
+ V(MoveWideImmediate) \
+ V(Bitfield) \
+ V(Extract) \
+ V(UnconditionalBranch) \
+ V(UnconditionalBranchToRegister) \
+ V(CompareBranch) \
+ V(TestBranch) \
+ V(ConditionalBranch) \
+ V(System) \
+ V(Exception) \
+ V(LoadStorePairPostIndex) \
+ V(LoadStorePairOffset) \
+ V(LoadStorePairPreIndex) \
+ V(LoadLiteral) \
+ V(LoadStoreUnscaledOffset) \
+ V(LoadStorePostIndex) \
+ V(LoadStorePreIndex) \
+ V(LoadStoreRegisterOffset) \
+ V(LoadStoreUnsignedOffset) \
+ V(LoadStoreAcquireRelease) \
+ V(LogicalShifted) \
+ V(AddSubShifted) \
+ V(AddSubExtended) \
+ V(AddSubWithCarry) \
+ V(ConditionalCompareRegister) \
+ V(ConditionalCompareImmediate) \
+ V(ConditionalSelect) \
+ V(DataProcessing1Source) \
+ V(DataProcessing2Source) \
+ V(DataProcessing3Source) \
+ V(FPCompare) \
+ V(FPConditionalCompare) \
+ V(FPConditionalSelect) \
+ V(FPImmediate) \
+ V(FPDataProcessing1Source) \
+ V(FPDataProcessing2Source) \
+ V(FPDataProcessing3Source) \
+ V(FPIntegerConvert) \
+ V(FPFixedPointConvert) \
+ V(Unallocated) \
V(Unimplemented)
// The Visitor interface. Disassembler and simulator (and other tools)
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index fe2a269935..c1d04ac3fb 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -97,8 +97,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Save all allocatable floating point registers.
CPURegList saved_fp_registers(
CPURegister::kFPRegister, kDRegSizeInBits,
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
- ->allocatable_double_codes_mask());
+ RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask());
__ PushCPURegList(saved_fp_registers);
// We save all the registers expcept jssp, sp and lr.
diff --git a/deps/v8/src/arm64/disasm-arm64.cc b/deps/v8/src/arm64/disasm-arm64.cc
index 00c3ec25d6..8e022b1690 100644
--- a/deps/v8/src/arm64/disasm-arm64.cc
+++ b/deps/v8/src/arm64/disasm-arm64.cc
@@ -914,6 +914,34 @@ void DisassemblingDecoder::VisitLoadStorePairOffset(Instruction* instr) {
Format(instr, mnemonic, form);
}
+void DisassemblingDecoder::VisitLoadStoreAcquireRelease(Instruction *instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Wt, ['Xn]";
+ const char *form_x = "'Xt, ['Xn]";
+ const char *form_stlx = "'Ws, 'Wt, ['Xn]";
+ const char *form_stlx_x = "'Ws, 'Xt, ['Xn]";
+
+ switch (instr->Mask(LoadStoreAcquireReleaseMask)) {
+ case LDAXR_b: mnemonic = "ldaxrb"; break;
+ case STLR_b: mnemonic = "stlrb"; break;
+ case LDAR_b: mnemonic = "ldarb"; break;
+ case LDAXR_h: mnemonic = "ldaxrh"; break;
+ case STLR_h: mnemonic = "stlrh"; break;
+ case LDAR_h: mnemonic = "ldarh"; break;
+ case LDAXR_w: mnemonic = "ldaxr"; break;
+ case STLR_w: mnemonic = "stlr"; break;
+ case LDAR_w: mnemonic = "ldar"; break;
+ case LDAXR_x: mnemonic = "ldaxr"; form = form_x; break;
+ case STLR_x: mnemonic = "stlr"; form = form_x; break;
+ case LDAR_x: mnemonic = "ldar"; form = form_x; break;
+ case STLXR_h: mnemonic = "stlxrh"; form = form_stlx; break;
+ case STLXR_b: mnemonic = "stlxrb"; form = form_stlx; break;
+ case STLXR_w: mnemonic = "stlxr"; form = form_stlx; break;
+ case STLXR_x: mnemonic = "stlxr"; form = form_stlx_x; break;
+ default: form = "(LoadStoreAcquireReleaseMask)";
+ }
+ Format(instr, mnemonic, form);
+}
void DisassemblingDecoder::VisitFPCompare(Instruction* instr) {
const char *mnemonic = "unimplemented";
@@ -1295,6 +1323,9 @@ int DisassemblingDecoder::SubstituteRegisterField(Instruction* instr,
}
break;
}
+ case 's':
+ reg_num = instr->Rs();
+ break;
default: UNREACHABLE();
}
@@ -1719,7 +1750,7 @@ namespace disasm {
const char* NameConverter::NameOfAddress(byte* addr) const {
- v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
+ v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void *>(addr));
return tmp_buffer_.start();
}
@@ -1771,7 +1802,8 @@ class BufferDisassembler : public v8::internal::DisassemblingDecoder {
~BufferDisassembler() { }
virtual void ProcessOutput(v8::internal::Instruction* instr) {
- v8::internal::SNPrintF(out_buffer_, "%s", GetOutput());
+ v8::internal::SNPrintF(out_buffer_, "%08" PRIx32 " %s",
+ instr->InstructionBits(), GetOutput());
}
private:
diff --git a/deps/v8/src/arm64/eh-frame-arm64.cc b/deps/v8/src/arm64/eh-frame-arm64.cc
new file mode 100644
index 0000000000..bcdcffb960
--- /dev/null
+++ b/deps/v8/src/arm64/eh-frame-arm64.cc
@@ -0,0 +1,69 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/eh-frame.h"
+
+namespace v8 {
+namespace internal {
+
+static const int kX0DwarfCode = 0;
+static const int kJsSpDwarfCode = 28;
+static const int kFpDwarfCode = 29;
+static const int kLrDwarfCode = 30;
+static const int kCSpDwarfCode = 31;
+
+const int EhFrameConstants::kCodeAlignmentFactor = 4;
+const int EhFrameConstants::kDataAlignmentFactor = -8;
+
+void EhFrameWriter::WriteReturnAddressRegisterCode() {
+ WriteULeb128(kLrDwarfCode);
+}
+
+void EhFrameWriter::WriteInitialStateInCie() {
+ SetBaseAddressRegisterAndOffset(x29, 0);
+ RecordRegisterNotModified(x30);
+}
+
+// static
+int EhFrameWriter::RegisterToDwarfCode(Register name) {
+ switch (name.code()) {
+ case Register::kCode_x28:
+ return kJsSpDwarfCode;
+ case Register::kCode_x29:
+ return kFpDwarfCode;
+ case Register::kCode_x30:
+ return kLrDwarfCode;
+ case Register::kCode_x31:
+ return kCSpDwarfCode;
+ case Register::kCode_x0:
+ return kX0DwarfCode;
+ default:
+ UNIMPLEMENTED();
+ return -1;
+ }
+}
+
+#ifdef ENABLE_DISASSEMBLER
+
+// static
+const char* EhFrameDisassembler::DwarfRegisterCodeToString(int code) {
+ switch (code) {
+ case kFpDwarfCode:
+ return "fp";
+ case kLrDwarfCode:
+ return "lr";
+ case kJsSpDwarfCode:
+ return "jssp";
+ case kCSpDwarfCode:
+ return "csp"; // This could be zr as well
+ default:
+ UNIMPLEMENTED();
+ return nullptr;
+ }
+}
+
+#endif
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/arm64/instrument-arm64.cc b/deps/v8/src/arm64/instrument-arm64.cc
index 7a8e2f4ee1..dad89fe6bf 100644
--- a/deps/v8/src/arm64/instrument-arm64.cc
+++ b/deps/v8/src/arm64/instrument-arm64.cc
@@ -429,6 +429,31 @@ void Instrument::VisitLoadStoreUnsignedOffset(Instruction* instr) {
InstrumentLoadStore(instr);
}
+void Instrument::VisitLoadStoreAcquireRelease(Instruction* instr) {
+ Update();
+ static Counter* load_counter = GetCounter("Load Acquire");
+ static Counter* store_counter = GetCounter("Store Release");
+
+ switch (instr->Mask(LoadStoreAcquireReleaseMask)) {
+ case LDAR_b: // Fall-through.
+ case LDAR_h: // Fall-through.
+ case LDAR_w: // Fall-through.
+ case LDAR_x: // Fall-through.
+ case LDAXR_b: // Fall-through.
+ case LDAXR_h: // Fall-through.
+ case LDAXR_w: // Fall-through.
+ case LDAXR_x: load_counter->Increment(); break;
+ case STLR_b: // Fall-through.
+ case STLR_h: // Fall-through.
+ case STLR_w: // Fall-through.
+ case STLR_x: // Fall-through.
+ case STLXR_b: // Fall-through.
+ case STLXR_h: // Fall-through.
+ case STLXR_w: // Fall-through.
+ case STLXR_x: store_counter->Increment(); break;
+ default: UNREACHABLE();
+ }
+}
void Instrument::VisitLogicalShifted(Instruction* instr) {
Update();
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index f307aeb6d4..881d2d83be 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -13,6 +13,19 @@ namespace internal {
const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
+void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
+ CallInterfaceDescriptorData* data, int register_parameter_count) {
+ const Register default_stub_registers[] = {x0, x1, x2, x3, x4};
+ CHECK_LE(static_cast<size_t>(register_parameter_count),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(register_parameter_count,
+ default_stub_registers);
+}
+
+const Register FastNewFunctionContextDescriptor::FunctionRegister() {
+ return x1;
+}
+const Register FastNewFunctionContextDescriptor::SlotsRegister() { return x0; }
const Register LoadDescriptor::ReceiverRegister() { return x1; }
const Register LoadDescriptor::NameRegister() { return x2; }
@@ -25,13 +38,9 @@ const Register LoadWithVectorDescriptor::VectorRegister() { return x3; }
const Register StoreDescriptor::ReceiverRegister() { return x1; }
const Register StoreDescriptor::NameRegister() { return x2; }
const Register StoreDescriptor::ValueRegister() { return x0; }
+const Register StoreDescriptor::SlotRegister() { return x4; }
-
-const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return x4; }
-
-
-const Register VectorStoreICDescriptor::VectorRegister() { return x3; }
-
+const Register StoreWithVectorDescriptor::VectorRegister() { return x3; }
const Register VectorStoreTransitionDescriptor::SlotRegister() { return x4; }
const Register VectorStoreTransitionDescriptor::VectorRegister() { return x3; }
@@ -41,23 +50,15 @@ const Register VectorStoreTransitionDescriptor::MapRegister() { return x5; }
const Register StoreTransitionDescriptor::MapRegister() { return x3; }
-const Register LoadGlobalViaContextDescriptor::SlotRegister() { return x2; }
-
-
const Register StoreGlobalViaContextDescriptor::SlotRegister() { return x2; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return x0; }
-const Register InstanceOfDescriptor::LeftRegister() { return x1; }
-const Register InstanceOfDescriptor::RightRegister() { return x0; }
-
-
const Register StringCompareDescriptor::LeftRegister() { return x1; }
const Register StringCompareDescriptor::RightRegister() { return x0; }
-
-const Register ApiGetterDescriptor::function_address() { return x2; }
-
+const Register ApiGetterDescriptor::HolderRegister() { return x0; }
+const Register ApiGetterDescriptor::CallbackRegister() { return x3; }
const Register MathPowTaggedDescriptor::exponent() { return x11; }
@@ -76,14 +77,6 @@ void FastNewClosureDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void FastNewContextDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x1: function
- Register registers[] = {x1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void FastNewObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {x1, x3};
@@ -273,48 +266,34 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC
-void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {x0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
+void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
+ // register state
// x1: function
// x2: allocation site with elements kind
// x0: number of arguments to the constructor function
- Register registers[] = {x1, x2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ Register registers[] = {x1, x2, x0};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-
-void ArrayConstructorDescriptor::InitializePlatformSpecific(
+void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // stack param count needs (constructor pointer, and single argument)
+ // register state
+ // x0: number of arguments
+ // x1: function
+ // x2: allocation site with elements kind
Register registers[] = {x1, x2, x0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void InternalArrayConstructorConstantArgCountDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
- // x1: constructor function
- // x0: number of arguments to the constructor function
- Register registers[] = {x1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-
-void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
+void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
- Register registers[] = {x1, x0};
+ Register registers[] = {x1, x2, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void FastArrayPushDescriptor::InitializePlatformSpecific(
+void VarArgFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (arg count)
Register registers[] = {x0};
@@ -348,6 +327,22 @@ void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void BinaryOpWithVectorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // register state
+ // x1 -- lhs
+ // x0 -- rhs
+ // x4 -- slot id
+ // x3 -- vector
+ Register registers[] = {x1, x0, x4, x3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CountOpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {x1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void StringAddDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -430,9 +425,8 @@ void ApiCallbackDescriptorBase::InitializePlatformSpecific(
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
- kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
- kInterpreterDispatchTableRegister};
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -467,6 +461,15 @@ void InterpreterCEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void ResumeGeneratorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ x0, // the value to pass to the generator
+ x1, // the JSGeneratorObject to resume
+ x2 // the resume mode (tagged)
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
index 60418ad839..f19d6909f8 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64-inl.h
@@ -309,6 +309,22 @@ LS_MACRO_LIST(DEFINE_FUNCTION)
LSPAIR_MACRO_LIST(DEFINE_FUNCTION)
#undef DEFINE_FUNCTION
+#define DECLARE_FUNCTION(FN, OP) \
+ void MacroAssembler::FN(const Register& rt, const Register& rn) { \
+ DCHECK(allow_macro_instructions_); \
+ OP(rt, rn); \
+ }
+LDA_STL_MACRO_LIST(DECLARE_FUNCTION)
+#undef DECLARE_FUNCTION
+
+#define DECLARE_FUNCTION(FN, OP) \
+ void MacroAssembler::FN(const Register& rs, const Register& rt, \
+ const Register& rn) { \
+ DCHECK(allow_macro_instructions_); \
+ OP(rs, rt, rn); \
+ }
+STLX_MACRO_LIST(DECLARE_FUNCTION)
+#undef DECLARE_FUNCTION
void MacroAssembler::Asr(const Register& rd,
const Register& rn,
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 12ddd8145e..f674dd53e7 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -1373,10 +1373,6 @@ void MacroAssembler::AssertFPCRState(Register fpcr) {
Mrs(fpcr, FPCR);
}
- // Settings overridden by ConfiugreFPCR():
- // - Assert that default-NaN mode is set.
- Tbz(fpcr, DN_offset, &unexpected_mode);
-
// Settings left to their default values:
// - Assert that flush-to-zero is not set.
Tbnz(fpcr, FZ_offset, &unexpected_mode);
@@ -1393,31 +1389,13 @@ void MacroAssembler::AssertFPCRState(Register fpcr) {
}
-void MacroAssembler::ConfigureFPCR() {
- UseScratchRegisterScope temps(this);
- Register fpcr = temps.AcquireX();
- Mrs(fpcr, FPCR);
-
- // If necessary, enable default-NaN mode. The default values of the other FPCR
- // options should be suitable, and AssertFPCRState will verify that.
- Label no_write_required;
- Tbnz(fpcr, DN_offset, &no_write_required);
-
- Orr(fpcr, fpcr, DN_mask);
- Msr(FPCR, fpcr);
-
- Bind(&no_write_required);
- AssertFPCRState(fpcr);
-}
-
-
void MacroAssembler::CanonicalizeNaN(const FPRegister& dst,
const FPRegister& src) {
AssertFPCRState();
- // With DN=1 and RMode=FPTieEven, subtracting 0.0 preserves all inputs except
- // for NaNs, which become the default NaN. We use fsub rather than fadd
- // because sub preserves -0.0 inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
+ // Subtracting 0.0 preserves all inputs except for signalling NaNs, which
+ // become quiet NaNs. We use fsub rather than fadd because fsub preserves -0.0
+ // inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
Fsub(dst, src, fp_zero);
}
@@ -1447,14 +1425,7 @@ void MacroAssembler::LoadTrueFalseRoots(Register true_root,
void MacroAssembler::LoadHeapObject(Register result,
Handle<HeapObject> object) {
- AllowDeferredHandleDereference using_raw_address;
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<Cell> cell = isolate()->factory()->NewCell(object);
- Mov(result, Operand(cell));
- Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
- } else {
- Mov(result, Operand(object));
- }
+ Mov(result, Operand(object));
}
@@ -1558,7 +1529,7 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
Label* no_memento_found) {
Label map_check;
Label top_check;
- ExternalReference new_space_allocation_top =
+ ExternalReference new_space_allocation_top_adr =
ExternalReference::new_space_allocation_top_address(isolate());
const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
@@ -1568,7 +1539,9 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
Add(scratch1, receiver, kMementoEndOffset);
// If the object is in new space, we need to check whether it is on the same
// page as the current top.
- Eor(scratch2, scratch1, new_space_allocation_top);
+ Mov(scratch2, new_space_allocation_top_adr);
+ Ldr(scratch2, MemOperand(scratch2));
+ Eor(scratch2, scratch1, scratch2);
Tst(scratch2, ~Page::kPageAlignmentMask);
B(eq, &top_check);
// The object is on a different page than allocation top. Bail out if the
@@ -1582,7 +1555,9 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
// If top is on the same page as the current object, we need to check whether
// we are below top.
bind(&top_check);
- Cmp(scratch1, new_space_allocation_top);
+ Mov(scratch2, new_space_allocation_top_adr);
+ Ldr(scratch2, MemOperand(scratch2));
+ Cmp(scratch1, scratch2);
B(gt, no_memento_found);
// Memento map check.
bind(&map_check);
@@ -1659,6 +1634,17 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
}
+void MacroAssembler::AssertGeneratorObject(Register object) {
+ if (emit_debug_code()) {
+ AssertNotSmi(object, kOperandIsASmiAndNotAGeneratorObject);
+
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ CompareObjectType(object, temp, temp, JS_GENERATOR_OBJECT_TYPE);
+ Check(eq, kOperandIsNotAGeneratorObject);
+ }
+}
void MacroAssembler::AssertReceiver(Register object) {
if (emit_debug_code()) {
@@ -1773,14 +1759,14 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
CallStub(&stub);
}
-
-void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
+ bool builtin_exit_frame) {
Mov(x1, builtin);
- CEntryStub stub(isolate(), 1);
+ CEntryStub stub(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
+ builtin_exit_frame);
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
DCHECK_EQ(1, function->result_size);
@@ -1978,9 +1964,6 @@ void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
Label start_call;
Bind(&start_call);
#endif
- // Statement positions are expected to be recorded when the target
- // address is loaded.
- positions_recorder()->WriteRecordedPositions();
// Addresses always have 64 bits, so we shouldn't encounter NONE32.
DCHECK(rmode != RelocInfo::NONE32);
@@ -2503,11 +2486,12 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
Label skip_flooding;
- ExternalReference step_in_enabled =
- ExternalReference::debug_step_in_enabled_address(isolate());
- Mov(x4, Operand(step_in_enabled));
- ldrb(x4, MemOperand(x4));
- CompareAndBranch(x4, Operand(0), eq, &skip_flooding);
+ ExternalReference last_step_action =
+ ExternalReference::debug_last_step_action_address(isolate());
+ STATIC_ASSERT(StepFrame > StepIn);
+ Mov(x4, Operand(last_step_action));
+ Ldrsb(x4, MemOperand(x4));
+ CompareAndBranch(x4, Operand(StepIn), lt, &skip_flooding);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -2768,9 +2752,8 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
Ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- Ldr(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
- Ldr(vector,
- FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+ Ldr(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
+ Ldr(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
}
@@ -2840,16 +2823,30 @@ void MacroAssembler::ExitFrameRestoreFPRegs() {
}
}
+void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
+ Register argc) {
+ Push(lr, fp, context, target);
+ add(fp, jssp, Operand(2 * kPointerSize));
+ Push(argc);
+}
+
+void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
+ Register argc) {
+ Pop(argc);
+ Pop(target, context, fp, lr);
+}
-void MacroAssembler::EnterExitFrame(bool save_doubles,
- const Register& scratch,
- int extra_space) {
+void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
+ int extra_space,
+ StackFrame::Type frame_type) {
DCHECK(jssp.Is(StackPointer()));
+ DCHECK(frame_type == StackFrame::EXIT ||
+ frame_type == StackFrame::BUILTIN_EXIT);
// Set up the new stack frame.
Push(lr, fp);
Mov(fp, StackPointer());
- Mov(scratch, Smi::FromInt(StackFrame::EXIT));
+ Mov(scratch, Smi::FromInt(frame_type));
Push(scratch);
Push(xzr);
Mov(scratch, Operand(CodeObject()));
@@ -2958,7 +2955,7 @@ void MacroAssembler::SetCounter(StatsCounter* counter, int value,
if (FLAG_native_code_counters && counter->Enabled()) {
Mov(scratch1, value);
Mov(scratch2, ExternalReference(counter));
- Str(scratch1, MemOperand(scratch2));
+ Str(scratch1.W(), MemOperand(scratch2));
}
}
@@ -2968,9 +2965,9 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
DCHECK(value != 0);
if (FLAG_native_code_counters && counter->Enabled()) {
Mov(scratch2, ExternalReference(counter));
- Ldr(scratch1, MemOperand(scratch2));
- Add(scratch1, scratch1, value);
- Str(scratch1, MemOperand(scratch2));
+ Ldr(scratch1.W(), MemOperand(scratch2));
+ Add(scratch1.W(), scratch1.W(), value);
+ Str(scratch1.W(), MemOperand(scratch2));
}
}
@@ -3041,6 +3038,7 @@ void MacroAssembler::Allocate(int object_size,
Label* gc_required,
AllocationFlags flags) {
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -3101,14 +3099,16 @@ void MacroAssembler::Allocate(int object_size,
// Calculate new top and bail out if new space is exhausted.
Adds(result_end, result, object_size);
- Ccmp(result_end, alloc_limit, CFlag, cc);
+ Ccmp(result_end, alloc_limit, NoFlag, cc);
B(hi, gc_required);
- Str(result_end, MemOperand(top_address));
- // Tag the object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- ObjectTag(result, result);
+ if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+ // The top pointer is not updated for allocation folding dominators.
+ Str(result_end, MemOperand(top_address));
}
+
+ // Tag the object.
+ ObjectTag(result, result);
}
@@ -3181,16 +3181,88 @@ void MacroAssembler::Allocate(Register object_size, Register result,
Check(eq, kUnalignedAllocationInNewSpace);
}
- Ccmp(result_end, alloc_limit, CFlag, cc);
+ Ccmp(result_end, alloc_limit, NoFlag, cc);
B(hi, gc_required);
- Str(result_end, MemOperand(top_address));
- // Tag the object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- ObjectTag(result, result);
+ if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+ // The top pointer is not updated for allocation folding dominators.
+ Str(result_end, MemOperand(top_address));
}
+
+ // Tag the object.
+ ObjectTag(result, result);
}
+void MacroAssembler::FastAllocate(int object_size, Register result,
+ Register scratch1, Register scratch2,
+ AllocationFlags flags) {
+ DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+
+ DCHECK(!AreAliased(result, scratch1, scratch2));
+ DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
+
+ // Make object size into bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ object_size *= kPointerSize;
+ }
+ DCHECK(0 == (object_size & kObjectAlignmentMask));
+
+ ExternalReference heap_allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+ // Set up allocation top address and allocation limit registers.
+ Register top_address = scratch1;
+ Register result_end = scratch2;
+ Mov(top_address, Operand(heap_allocation_top));
+ Ldr(result, MemOperand(top_address));
+
+ // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
+ // the same alignment on ARM64.
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+
+ // Calculate new top and write it back.
+ Adds(result_end, result, object_size);
+ Str(result_end, MemOperand(top_address));
+
+ ObjectTag(result, result);
+}
+
+void MacroAssembler::FastAllocate(Register object_size, Register result,
+ Register result_end, Register scratch,
+ AllocationFlags flags) {
+ // |object_size| and |result_end| may overlap, other registers must not.
+ DCHECK(!AreAliased(object_size, result, scratch));
+ DCHECK(!AreAliased(result_end, result, scratch));
+ DCHECK(object_size.Is64Bits() && result.Is64Bits() && scratch.Is64Bits() &&
+ result_end.Is64Bits());
+
+ ExternalReference heap_allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+ // Set up allocation top address and allocation limit registers.
+ Register top_address = scratch;
+ Mov(top_address, heap_allocation_top);
+ Ldr(result, MemOperand(top_address));
+
+ // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
+ // the same alignment on ARM64.
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+
+ // Calculate new top and write it back.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ Adds(result_end, result, Operand(object_size, LSL, kPointerSizeLog2));
+ } else {
+ Adds(result_end, result, object_size);
+ }
+ Str(result_end, MemOperand(top_address));
+
+ if (emit_debug_code()) {
+ Tst(result_end, kObjectAlignmentMask);
+ Check(eq, kUnalignedAllocationInNewSpace);
+ }
+
+ ObjectTag(result, result);
+}
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,
@@ -3207,12 +3279,8 @@ void MacroAssembler::AllocateTwoByteString(Register result,
Bic(scratch1, scratch1, kObjectAlignmentMask);
// Allocate two-byte string in new space.
- Allocate(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(scratch1, result, scratch2, scratch3, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
InitializeNewString(result,
@@ -3236,12 +3304,8 @@ void MacroAssembler::AllocateOneByteString(Register result, Register length,
Bic(scratch1, scratch1, kObjectAlignmentMask);
// Allocate one-byte string in new space.
- Allocate(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(scratch1, result, scratch2, scratch3, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
@@ -3255,7 +3319,7 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
Register scratch2,
Label* gc_required) {
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result,
length,
@@ -3269,12 +3333,8 @@ void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
Register scratch1,
Register scratch2,
Label* gc_required) {
- Allocate(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
scratch1, scratch2);
@@ -3288,7 +3348,7 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
Label* gc_required) {
DCHECK(!AreAliased(result, length, scratch1, scratch2));
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result,
length,
@@ -3305,7 +3365,7 @@ void MacroAssembler::AllocateOneByteSlicedString(Register result,
Label* gc_required) {
DCHECK(!AreAliased(result, length, scratch1, scratch2));
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
scratch1, scratch2);
@@ -3359,14 +3419,14 @@ void MacroAssembler::AllocateHeapNumber(Register result,
if (value.IsSameSizeAndType(heap_number_map)) {
STATIC_ASSERT(HeapObject::kMapOffset + kPointerSize ==
HeapNumber::kValueOffset);
- Stp(heap_number_map, value, MemOperand(result, HeapObject::kMapOffset));
+ Stp(heap_number_map, value,
+ FieldMemOperand(result, HeapObject::kMapOffset));
} else {
- Str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
+ Str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
if (value.IsValid()) {
- Str(value, MemOperand(result, HeapNumber::kValueOffset));
+ Str(value, FieldMemOperand(result, HeapNumber::kValueOffset));
}
}
- ObjectTag(result, result);
}
@@ -3390,7 +3450,8 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
DCHECK(!result.is(value));
// Allocate JSValue in new space.
- Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
+ Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
+ NO_ALLOCATION_FLAGS);
// Initialize the JSValue.
LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
@@ -4075,16 +4136,14 @@ void MacroAssembler::PushSafepointRegistersAndDoubles() {
PushSafepointRegisters();
PushCPURegList(CPURegList(
CPURegister::kFPRegister, kDRegSizeInBits,
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
- ->allocatable_double_codes_mask()));
+ RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask()));
}
void MacroAssembler::PopSafepointRegistersAndDoubles() {
PopCPURegList(CPURegList(
CPURegister::kFPRegister, kDRegSizeInBits,
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
- ->allocatable_double_codes_mask()));
+ RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask()));
PopSafepointRegisters();
}
@@ -4594,16 +4653,18 @@ void MacroAssembler::Abort(BailoutReason reason) {
// Avoid infinite recursion; Push contains some assertions that use Abort.
NoUseRealAbortsScope no_real_aborts(this);
- Mov(x0, Smi::FromInt(reason));
- Push(x0);
+ // Check if Abort() has already been initialized.
+ DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
+
+ Move(x1, Smi::FromInt(static_cast<int>(reason)));
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort);
+ Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
} else {
- CallRuntime(Runtime::kAbort);
+ Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
}
} else {
// Load the string to pass to Printf.
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index 4b6b3c0fb1..06e9a1d9bb 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -21,12 +21,15 @@
#define ASM_UNIMPLEMENTED_BREAK(message) \
__ Debug(message, __LINE__, \
FLAG_ignore_asm_unimplemented_break ? NO_PARAM : BREAK)
- #define ASM_LOCATION(message) \
- __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
+#if DEBUG
+#define ASM_LOCATION(message) __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
+#else
+#define ASM_LOCATION(message)
+#endif
#else
- #define ASM_UNIMPLEMENTED(message)
- #define ASM_UNIMPLEMENTED_BREAK(message)
- #define ASM_LOCATION(message)
+#define ASM_UNIMPLEMENTED(message)
+#define ASM_UNIMPLEMENTED_BREAK(message)
+#define ASM_LOCATION(message)
#endif
@@ -39,8 +42,8 @@ namespace internal {
#define kReturnRegister2 x2
#define kJSFunctionRegister x1
#define kContextRegister cp
+#define kAllocateSizeRegister x1
#define kInterpreterAccumulatorRegister x0
-#define kInterpreterRegisterFileRegister x18
#define kInterpreterBytecodeOffsetRegister x19
#define kInterpreterBytecodeArrayRegister x20
#define kInterpreterDispatchTableRegister x21
@@ -65,6 +68,21 @@ namespace internal {
V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)
+#define LDA_STL_MACRO_LIST(V) \
+ V(Ldarb, ldarb) \
+ V(Ldarh, ldarh) \
+ V(Ldar, ldar) \
+ V(Ldaxrb, ldaxrb) \
+ V(Ldaxrh, ldaxrh) \
+ V(Ldaxr, ldaxr) \
+ V(Stlrb, stlrb) \
+ V(Stlrh, stlrh) \
+ V(Stlr, stlr)
+
+#define STLX_MACRO_LIST(V) \
+ V(Stlxrb, stlxrb) \
+ V(Stlxrh, stlxrh) \
+ V(Stlxr, stlxr)
// ----------------------------------------------------------------------------
// Static helper functions
@@ -292,6 +310,17 @@ class MacroAssembler : public Assembler {
void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& addr, LoadStorePairOp op);
+// Load-acquire/store-release macros.
+#define DECLARE_FUNCTION(FN, OP) \
+ inline void FN(const Register& rt, const Register& rn);
+ LDA_STL_MACRO_LIST(DECLARE_FUNCTION)
+#undef DECLARE_FUNCTION
+
+#define DECLARE_FUNCTION(FN, OP) \
+ inline void FN(const Register& rs, const Register& rt, const Register& rn);
+ STLX_MACRO_LIST(DECLARE_FUNCTION)
+#undef DECLARE_FUNCTION
+
// V8-specific load/store helpers.
void Load(const Register& rt, const MemOperand& addr, Representation r);
void Store(const Register& rt, const MemOperand& addr, Representation r);
@@ -865,7 +894,6 @@ class MacroAssembler : public Assembler {
inline void InitializeRootRegister();
void AssertFPCRState(Register fpcr = NoReg);
- void ConfigureFPCR();
void CanonicalizeNaN(const FPRegister& dst, const FPRegister& src);
void CanonicalizeNaN(const FPRegister& reg) {
CanonicalizeNaN(reg, reg);
@@ -970,6 +998,10 @@ class MacroAssembler : public Assembler {
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
+ // Abort execution if argument is not a JSGeneratorObject,
+ // enabled via --debug-code.
+ void AssertGeneratorObject(Register object);
+
// Abort execution if argument is not a JSBoundFunction,
// enabled via --debug-code.
void AssertBoundFunction(Register object);
@@ -1142,7 +1174,8 @@ class MacroAssembler : public Assembler {
int num_double_arguments);
// Jump to a runtime routine.
- void JumpToExternalReference(const ExternalReference& builtin);
+ void JumpToExternalReference(const ExternalReference& builtin,
+ bool builtin_exit_frame = false);
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
@@ -1306,7 +1339,6 @@ class MacroAssembler : public Assembler {
//
// If the new space is exhausted control continues at the gc_required label.
// In this case, the result and scratch registers may still be clobbered.
- // If flags includes TAG_OBJECT, the result is tagged as as a heap object.
void Allocate(Register object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
@@ -1317,6 +1349,15 @@ class MacroAssembler : public Assembler {
Label* gc_required,
AllocationFlags flags);
+ // FastAllocate is right now only used for folded allocations. It just
+ // increments the top pointer without checking against limit. This can only
+ // be done if it was proved earlier that the allocation will succeed.
+ void FastAllocate(Register object_size, Register result, Register result_end,
+ Register scratch, AllocationFlags flags);
+
+ void FastAllocate(int object_size, Register result, Register scratch1,
+ Register scratch2, AllocationFlags flags);
+
void AllocateTwoByteString(Register result,
Register length,
Register scratch1,
@@ -1601,6 +1642,9 @@ class MacroAssembler : public Assembler {
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
void LeaveFrame(StackFrame::Type type);
+ void EnterBuiltinFrame(Register context, Register target, Register argc);
+ void LeaveBuiltinFrame(Register context, Register target, Register argc);
+
// Returns map with validated enum cache in object register.
void CheckEnumCache(Register object, Register scratch0, Register scratch1,
Register scratch2, Register scratch3, Register scratch4,
@@ -1662,9 +1706,9 @@ class MacroAssembler : public Assembler {
//
// This function also stores the new frame information in the top frame, so
// that the new frame becomes the current frame.
- void EnterExitFrame(bool save_doubles,
- const Register& scratch,
- int extra_space = 0);
+ void EnterExitFrame(bool save_doubles, const Register& scratch,
+ int extra_space = 0,
+ StackFrame::Type frame_type = StackFrame::EXIT);
// Leave the current exit frame, after a C function has returned to generated
// (JavaScript) code.
@@ -2268,14 +2312,6 @@ class InlineSmiCheckInfo {
} // namespace internal
} // namespace v8
-#ifdef GENERATED_CODE_COVERAGE
-#error "Unsupported option"
-#define CODE_COVERAGE_STRINGIFY(x) #x
-#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
-#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
-#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
-#else
#define ACCESS_MASM(masm) masm->
-#endif
#endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
index 81dbdf8850..f5595a8ed1 100644
--- a/deps/v8/src/arm64/simulator-arm64.cc
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -524,7 +524,7 @@ class Redirection {
// static
-void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
+void Simulator::TearDown(base::HashMap* i_cache, Redirection* first) {
Redirection::DeleteChain(first);
}
@@ -609,7 +609,8 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
xreg(4), xreg(5), xreg(6), xreg(7));
ObjectPair result = target(xreg(0), xreg(1), xreg(2), xreg(3),
xreg(4), xreg(5), xreg(6), xreg(7));
- TraceSim("Returned: {%p, %p}\n", result.x, result.y);
+ TraceSim("Returned: {%p, %p}\n", static_cast<void*>(result.x),
+ static_cast<void*>(result.y));
#ifdef DEBUG
CorruptAllCallerSavedCPURegisters();
#endif
@@ -639,7 +640,8 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(xreg(8));
ObjectTriple result = target(xreg(0), xreg(1), xreg(2), xreg(3), xreg(4),
xreg(5), xreg(6), xreg(7));
- TraceSim("Returned: {%p, %p, %p}\n", result.x, result.y, result.z);
+ TraceSim("Returned: {%p, %p, %p}\n", static_cast<void*>(result.x),
+ static_cast<void*>(result.y), static_cast<void*>(result.z));
#ifdef DEBUG
CorruptAllCallerSavedCPURegisters();
#endif
@@ -886,36 +888,31 @@ int Simulator::CodeFromName(const char* name) {
// Helpers ---------------------------------------------------------------------
template <typename T>
-T Simulator::AddWithCarry(bool set_flags,
- T src1,
- T src2,
- T carry_in) {
- typedef typename make_unsigned<T>::type unsignedT;
- DCHECK((carry_in == 0) || (carry_in == 1));
-
- T signed_sum = src1 + src2 + carry_in;
- T result = signed_sum;
+T Simulator::AddWithCarry(bool set_flags, T left, T right, int carry_in) {
+ // Use unsigned types to avoid implementation-defined overflow behaviour.
+ static_assert(std::is_unsigned<T>::value, "operands must be unsigned");
+ static_assert((sizeof(T) == kWRegSize) || (sizeof(T) == kXRegSize),
+ "Only W- or X-sized operands are tested");
- bool N, Z, C, V;
+ DCHECK((carry_in == 0) || (carry_in == 1));
+ T result = left + right + carry_in;
- // Compute the C flag
- unsignedT u1 = static_cast<unsignedT>(src1);
- unsignedT u2 = static_cast<unsignedT>(src2);
- unsignedT urest = std::numeric_limits<unsignedT>::max() - u1;
- C = (u2 > urest) || (carry_in && (((u2 + 1) > urest) || (u2 > (urest - 1))));
+ if (set_flags) {
+ nzcv().SetN(CalcNFlag(result));
+ nzcv().SetZ(CalcZFlag(result));
- // Overflow iff the sign bit is the same for the two inputs and different
- // for the result.
- V = ((src1 ^ src2) >= 0) && ((src1 ^ result) < 0);
+ // Compute the C flag by comparing the result to the max unsigned integer.
+ T max_uint_2op = std::numeric_limits<T>::max() - carry_in;
+ nzcv().SetC((left > max_uint_2op) || ((max_uint_2op - left) < right));
- N = CalcNFlag(result);
- Z = CalcZFlag(result);
+ // Overflow iff the sign bit is the same for the two inputs and different
+ // for the result.
+ T sign_mask = T(1) << (sizeof(T) * 8 - 1);
+ T left_sign = left & sign_mask;
+ T right_sign = right & sign_mask;
+ T result_sign = result & sign_mask;
+ nzcv().SetV((left_sign == right_sign) && (left_sign != result_sign));
- if (set_flags) {
- nzcv().SetN(N);
- nzcv().SetZ(Z);
- nzcv().SetC(C);
- nzcv().SetV(V);
LogSystemRegister(NZCV);
}
return result;
@@ -924,6 +921,9 @@ T Simulator::AddWithCarry(bool set_flags,
template<typename T>
void Simulator::AddSubWithCarry(Instruction* instr) {
+ // Use unsigned types to avoid implementation-defined overflow behaviour.
+ static_assert(std::is_unsigned<T>::value, "operands must be unsigned");
+
T op2 = reg<T>(instr->Rm());
T new_val;
@@ -1416,6 +1416,9 @@ void Simulator::VisitCompareBranch(Instruction* instr) {
template<typename T>
void Simulator::AddSubHelper(Instruction* instr, T op2) {
+ // Use unsigned types to avoid implementation-defined overflow behaviour.
+ static_assert(std::is_unsigned<T>::value, "operands must be unsigned");
+
bool set_flags = instr->FlagsUpdate();
T new_val = 0;
Instr operation = instr->Mask(AddSubOpMask);
@@ -1448,11 +1451,10 @@ void Simulator::VisitAddSubShifted(Instruction* instr) {
unsigned shift_amount = instr->ImmDPShift();
if (instr->SixtyFourBits()) {
- int64_t op2 = ShiftOperand(xreg(instr->Rm()), shift_type, shift_amount);
+ uint64_t op2 = ShiftOperand(xreg(instr->Rm()), shift_type, shift_amount);
AddSubHelper(instr, op2);
} else {
- int32_t op2 = static_cast<int32_t>(
- ShiftOperand(wreg(instr->Rm()), shift_type, shift_amount));
+ uint32_t op2 = ShiftOperand(wreg(instr->Rm()), shift_type, shift_amount);
AddSubHelper(instr, op2);
}
}
@@ -1461,9 +1463,9 @@ void Simulator::VisitAddSubShifted(Instruction* instr) {
void Simulator::VisitAddSubImmediate(Instruction* instr) {
int64_t op2 = instr->ImmAddSub() << ((instr->ShiftAddSub() == 1) ? 12 : 0);
if (instr->SixtyFourBits()) {
- AddSubHelper<int64_t>(instr, op2);
+ AddSubHelper(instr, static_cast<uint64_t>(op2));
} else {
- AddSubHelper<int32_t>(instr, static_cast<int32_t>(op2));
+ AddSubHelper(instr, static_cast<uint32_t>(op2));
}
}
@@ -1472,10 +1474,10 @@ void Simulator::VisitAddSubExtended(Instruction* instr) {
Extend ext = static_cast<Extend>(instr->ExtendMode());
unsigned left_shift = instr->ImmExtendShift();
if (instr->SixtyFourBits()) {
- int64_t op2 = ExtendValue(xreg(instr->Rm()), ext, left_shift);
+ uint64_t op2 = ExtendValue(xreg(instr->Rm()), ext, left_shift);
AddSubHelper(instr, op2);
} else {
- int32_t op2 = ExtendValue(wreg(instr->Rm()), ext, left_shift);
+ uint32_t op2 = ExtendValue(wreg(instr->Rm()), ext, left_shift);
AddSubHelper(instr, op2);
}
}
@@ -1483,9 +1485,9 @@ void Simulator::VisitAddSubExtended(Instruction* instr) {
void Simulator::VisitAddSubWithCarry(Instruction* instr) {
if (instr->SixtyFourBits()) {
- AddSubWithCarry<int64_t>(instr);
+ AddSubWithCarry<uint64_t>(instr);
} else {
- AddSubWithCarry<int32_t>(instr);
+ AddSubWithCarry<uint32_t>(instr);
}
}
@@ -1495,22 +1497,22 @@ void Simulator::VisitLogicalShifted(Instruction* instr) {
unsigned shift_amount = instr->ImmDPShift();
if (instr->SixtyFourBits()) {
- int64_t op2 = ShiftOperand(xreg(instr->Rm()), shift_type, shift_amount);
+ uint64_t op2 = ShiftOperand(xreg(instr->Rm()), shift_type, shift_amount);
op2 = (instr->Mask(NOT) == NOT) ? ~op2 : op2;
- LogicalHelper<int64_t>(instr, op2);
+ LogicalHelper(instr, op2);
} else {
- int32_t op2 = ShiftOperand(wreg(instr->Rm()), shift_type, shift_amount);
+ uint32_t op2 = ShiftOperand(wreg(instr->Rm()), shift_type, shift_amount);
op2 = (instr->Mask(NOT) == NOT) ? ~op2 : op2;
- LogicalHelper<int32_t>(instr, op2);
+ LogicalHelper(instr, op2);
}
}
void Simulator::VisitLogicalImmediate(Instruction* instr) {
if (instr->SixtyFourBits()) {
- LogicalHelper<int64_t>(instr, instr->ImmLogical());
+ LogicalHelper(instr, static_cast<uint64_t>(instr->ImmLogical()));
} else {
- LogicalHelper<int32_t>(instr, static_cast<int32_t>(instr->ImmLogical()));
+ LogicalHelper(instr, static_cast<uint32_t>(instr->ImmLogical()));
}
}
@@ -1546,24 +1548,27 @@ void Simulator::LogicalHelper(Instruction* instr, T op2) {
void Simulator::VisitConditionalCompareRegister(Instruction* instr) {
if (instr->SixtyFourBits()) {
- ConditionalCompareHelper(instr, xreg(instr->Rm()));
+ ConditionalCompareHelper(instr, static_cast<uint64_t>(xreg(instr->Rm())));
} else {
- ConditionalCompareHelper(instr, wreg(instr->Rm()));
+ ConditionalCompareHelper(instr, static_cast<uint32_t>(wreg(instr->Rm())));
}
}
void Simulator::VisitConditionalCompareImmediate(Instruction* instr) {
if (instr->SixtyFourBits()) {
- ConditionalCompareHelper<int64_t>(instr, instr->ImmCondCmp());
+ ConditionalCompareHelper(instr, static_cast<uint64_t>(instr->ImmCondCmp()));
} else {
- ConditionalCompareHelper<int32_t>(instr, instr->ImmCondCmp());
+ ConditionalCompareHelper(instr, static_cast<uint32_t>(instr->ImmCondCmp()));
}
}
template<typename T>
void Simulator::ConditionalCompareHelper(Instruction* instr, T op2) {
+ // Use unsigned types to avoid implementation-defined overflow behaviour.
+ static_assert(std::is_unsigned<T>::value, "operands must be unsigned");
+
T op1 = reg<T>(instr->Rn());
if (ConditionPassed(static_cast<Condition>(instr->Condition()))) {
@@ -1900,6 +1905,9 @@ void Simulator::LoadStoreWriteBack(unsigned addr_reg,
}
}
+void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
+ // TODO(binji)
+}
void Simulator::CheckMemoryAccess(uintptr_t address, uintptr_t stack) {
if ((address >= stack_limit_) && (address < stack)) {
diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/arm64/simulator-arm64.h
index 724c767ab7..d4901098ef 100644
--- a/deps/v8/src/arm64/simulator-arm64.h
+++ b/deps/v8/src/arm64/simulator-arm64.h
@@ -14,6 +14,7 @@
#include "src/arm64/disasm-arm64.h"
#include "src/arm64/instrument-arm64.h"
#include "src/assembler.h"
+#include "src/base/compiler-specific.h"
#include "src/globals.h"
#include "src/utils.h"
@@ -150,8 +151,7 @@ typedef SimRegisterBase SimFPRegister; // v0-v31
class Simulator : public DecoderVisitor {
public:
- static void FlushICache(v8::internal::HashMap* i_cache, void* start,
- size_t size) {
+ static void FlushICache(base::HashMap* i_cache, void* start, size_t size) {
USE(i_cache);
USE(start);
USE(size);
@@ -167,7 +167,7 @@ class Simulator : public DecoderVisitor {
static void Initialize(Isolate* isolate);
- static void TearDown(HashMap* i_cache, Redirection* first);
+ static void TearDown(base::HashMap* i_cache, Redirection* first);
static Simulator* current(v8::internal::Isolate* isolate);
@@ -652,11 +652,8 @@ class Simulator : public DecoderVisitor {
template<typename T>
void AddSubHelper(Instruction* instr, T op2);
- template<typename T>
- T AddWithCarry(bool set_flags,
- T src1,
- T src2,
- T carry_in = 0);
+ template <typename T>
+ T AddWithCarry(bool set_flags, T left, T right, int carry_in = 0);
template<typename T>
void AddSubWithCarry(Instruction* instr);
template<typename T>
@@ -794,7 +791,7 @@ class Simulator : public DecoderVisitor {
// Output stream.
FILE* stream_;
PrintDisassembler* print_disasm_;
- void PRINTF_METHOD_CHECKING TraceSim(const char* format, ...);
+ void PRINTF_FORMAT(2, 3) TraceSim(const char* format, ...);
// Instrumentation.
Instrument* instrument_;
diff --git a/deps/v8/src/asmjs/OWNERS b/deps/v8/src/asmjs/OWNERS
new file mode 100644
index 0000000000..78e688d86e
--- /dev/null
+++ b/deps/v8/src/asmjs/OWNERS
@@ -0,0 +1,10 @@
+# Keep in sync with test/cctest/asmjs/OWNERS.
+
+set noparent
+
+ahaas@chromium.org
+bradnelson@chromium.org
+jpp@chromium.org
+mtrofin@chromium.org
+rossberg@chromium.org
+titzer@chromium.org
diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc
new file mode 100644
index 0000000000..e94d91730e
--- /dev/null
+++ b/deps/v8/src/asmjs/asm-js.cc
@@ -0,0 +1,275 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/asmjs/asm-js.h"
+
+#include "src/api-natives.h"
+#include "src/api.h"
+#include "src/asmjs/asm-typer.h"
+#include "src/asmjs/asm-wasm-builder.h"
+#include "src/assert-scope.h"
+#include "src/execution.h"
+#include "src/factory.h"
+#include "src/handles.h"
+#include "src/isolate.h"
+#include "src/objects.h"
+#include "src/parsing/parse-info.h"
+
+#include "src/wasm/encoder.h"
+#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-js.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-result.h"
+
+typedef uint8_t byte;
+
+using v8::internal::wasm::ErrorThrower;
+
+namespace v8 {
+namespace internal {
+
+namespace {
+i::MaybeHandle<i::FixedArray> CompileModule(
+ i::Isolate* isolate, const byte* start, const byte* end,
+ ErrorThrower* thrower,
+ internal::wasm::ModuleOrigin origin = i::wasm::kWasmOrigin) {
+ // Decode but avoid a redundant pass over function bodies for verification.
+ // Verification will happen during compilation.
+ i::Zone zone(isolate->allocator());
+ internal::wasm::ModuleResult result = internal::wasm::DecodeWasmModule(
+ isolate, &zone, start, end, false, origin);
+
+ i::MaybeHandle<i::FixedArray> compiled_module;
+ if (result.failed() && origin == internal::wasm::kAsmJsOrigin) {
+ thrower->Error("Asm.js converted module failed to decode");
+ } else if (result.failed()) {
+ thrower->Failed("", result);
+ } else {
+ compiled_module = result.val->CompileFunctions(isolate, thrower);
+ }
+
+ if (result.val) delete result.val;
+ return compiled_module;
+}
+
+Handle<i::Object> StdlibMathMember(i::Isolate* isolate,
+ Handle<JSReceiver> stdlib,
+ Handle<Name> name) {
+ if (stdlib.is_null()) {
+ return Handle<i::Object>();
+ }
+ Handle<i::Name> math_name(
+ isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("Math")));
+ MaybeHandle<i::Object> maybe_math = i::Object::GetProperty(stdlib, math_name);
+ if (maybe_math.is_null()) {
+ return Handle<i::Object>();
+ }
+ Handle<i::Object> math = maybe_math.ToHandleChecked();
+ if (!math->IsJSReceiver()) {
+ return Handle<i::Object>();
+ }
+ MaybeHandle<i::Object> maybe_value = i::Object::GetProperty(math, name);
+ if (maybe_value.is_null()) {
+ return Handle<i::Object>();
+ }
+ return maybe_value.ToHandleChecked();
+}
+
+bool IsStdlibMemberValid(i::Isolate* isolate, Handle<JSReceiver> stdlib,
+ Handle<i::Object> member_id) {
+ int32_t member_kind;
+ if (!member_id->ToInt32(&member_kind)) {
+ UNREACHABLE();
+ }
+ switch (member_kind) {
+ case wasm::AsmTyper::StandardMember::kNone:
+ case wasm::AsmTyper::StandardMember::kModule:
+ case wasm::AsmTyper::StandardMember::kStdlib:
+ case wasm::AsmTyper::StandardMember::kHeap:
+ case wasm::AsmTyper::StandardMember::kFFI: {
+ // Nothing to check for these.
+ return true;
+ }
+ case wasm::AsmTyper::StandardMember::kInfinity: {
+ if (stdlib.is_null()) {
+ return false;
+ }
+ Handle<i::Name> name(isolate->factory()->InternalizeOneByteString(
+ STATIC_CHAR_VECTOR("Infinity")));
+ MaybeHandle<i::Object> maybe_value = i::Object::GetProperty(stdlib, name);
+ if (maybe_value.is_null()) {
+ return false;
+ }
+ Handle<i::Object> value = maybe_value.ToHandleChecked();
+ return value->IsNumber() && std::isinf(value->Number());
+ }
+ case wasm::AsmTyper::StandardMember::kNaN: {
+ if (stdlib.is_null()) {
+ return false;
+ }
+ Handle<i::Name> name(isolate->factory()->InternalizeOneByteString(
+ STATIC_CHAR_VECTOR("NaN")));
+ MaybeHandle<i::Object> maybe_value = i::Object::GetProperty(stdlib, name);
+ if (maybe_value.is_null()) {
+ return false;
+ }
+ Handle<i::Object> value = maybe_value.ToHandleChecked();
+ return value->IsNaN();
+ }
+#define STDLIB_MATH_FUNC(CamelName, fname) \
+ case wasm::AsmTyper::StandardMember::k##CamelName: { \
+ Handle<i::Name> name(isolate->factory()->InternalizeOneByteString( \
+ STATIC_CHAR_VECTOR(#fname))); \
+ Handle<i::Object> value = StdlibMathMember(isolate, stdlib, name); \
+ if (value.is_null() || !value->IsJSFunction()) { \
+ return false; \
+ } \
+ Handle<i::JSFunction> func(i::JSFunction::cast(*value)); \
+ return func->shared()->code() == \
+ isolate->builtins()->builtin(Builtins::k##CamelName); \
+ }
+ STDLIB_MATH_FUNC(MathAcos, acos)
+ STDLIB_MATH_FUNC(MathAsin, asin)
+ STDLIB_MATH_FUNC(MathAtan, atan)
+ STDLIB_MATH_FUNC(MathCos, cos)
+ STDLIB_MATH_FUNC(MathSin, sin)
+ STDLIB_MATH_FUNC(MathTan, tan)
+ STDLIB_MATH_FUNC(MathExp, exp)
+ STDLIB_MATH_FUNC(MathLog, log)
+ STDLIB_MATH_FUNC(MathCeil, ceil)
+ STDLIB_MATH_FUNC(MathFloor, floor)
+ STDLIB_MATH_FUNC(MathSqrt, sqrt)
+ STDLIB_MATH_FUNC(MathAbs, abs)
+ STDLIB_MATH_FUNC(MathClz32, clz32)
+ STDLIB_MATH_FUNC(MathMin, min)
+ STDLIB_MATH_FUNC(MathMax, max)
+ STDLIB_MATH_FUNC(MathAtan2, atan2)
+ STDLIB_MATH_FUNC(MathPow, pow)
+ STDLIB_MATH_FUNC(MathImul, imul)
+ STDLIB_MATH_FUNC(MathFround, fround)
+#undef STDLIB_MATH_FUNC
+#define STDLIB_MATH_CONST(cname, const_value) \
+ case wasm::AsmTyper::StandardMember::kMath##cname: { \
+ i::Handle<i::Name> name(isolate->factory()->InternalizeOneByteString( \
+ STATIC_CHAR_VECTOR(#cname))); \
+ i::Handle<i::Object> value = StdlibMathMember(isolate, stdlib, name); \
+ return !value.is_null() && value->IsNumber() && \
+ value->Number() == const_value; \
+ }
+ STDLIB_MATH_CONST(E, 2.718281828459045)
+ STDLIB_MATH_CONST(LN10, 2.302585092994046)
+ STDLIB_MATH_CONST(LN2, 0.6931471805599453)
+ STDLIB_MATH_CONST(LOG2E, 1.4426950408889634)
+ STDLIB_MATH_CONST(LOG10E, 0.4342944819032518)
+ STDLIB_MATH_CONST(PI, 3.141592653589793)
+ STDLIB_MATH_CONST(SQRT1_2, 0.7071067811865476)
+ STDLIB_MATH_CONST(SQRT2, 1.4142135623730951)
+#undef STDLIB_MATH_CONST
+ default: { UNREACHABLE(); }
+ }
+ return false;
+}
+
+} // namespace
+
+MaybeHandle<FixedArray> AsmJs::ConvertAsmToWasm(ParseInfo* info) {
+ ErrorThrower thrower(info->isolate(), "Asm.js -> WebAssembly conversion");
+ wasm::AsmTyper typer(info->isolate(), info->zone(), *(info->script()),
+ info->literal());
+ if (!typer.Validate()) {
+ DCHECK(!info->isolate()->has_pending_exception());
+ PrintF("Validation of asm.js module failed: %s", typer.error_message());
+ return MaybeHandle<FixedArray>();
+ }
+ v8::internal::wasm::AsmWasmBuilder builder(info->isolate(), info->zone(),
+ info->literal(), &typer);
+ i::Handle<i::FixedArray> foreign_globals;
+ auto module = builder.Run(&foreign_globals);
+
+ i::MaybeHandle<i::FixedArray> compiled =
+ CompileModule(info->isolate(), module->begin(), module->end(), &thrower,
+ internal::wasm::kAsmJsOrigin);
+ DCHECK(!compiled.is_null());
+
+ wasm::AsmTyper::StdlibSet uses = typer.StdlibUses();
+ Handle<FixedArray> uses_array =
+ info->isolate()->factory()->NewFixedArray(static_cast<int>(uses.size()));
+ int count = 0;
+ for (auto i : uses) {
+ uses_array->set(count++, Smi::FromInt(i));
+ }
+
+ Handle<FixedArray> result = info->isolate()->factory()->NewFixedArray(3);
+ result->set(0, *compiled.ToHandleChecked());
+ result->set(1, *foreign_globals);
+ result->set(2, *uses_array);
+ return result;
+}
+
+bool AsmJs::IsStdlibValid(i::Isolate* isolate, Handle<FixedArray> wasm_data,
+ Handle<JSReceiver> stdlib) {
+ i::Handle<i::FixedArray> uses(i::FixedArray::cast(wasm_data->get(2)));
+ for (int i = 0; i < uses->length(); ++i) {
+ if (!IsStdlibMemberValid(isolate, stdlib,
+ uses->GetValueChecked<i::Object>(isolate, i))) {
+ return false;
+ }
+ }
+ return true;
+}
+
+MaybeHandle<Object> AsmJs::InstantiateAsmWasm(i::Isolate* isolate,
+ Handle<FixedArray> wasm_data,
+ Handle<JSArrayBuffer> memory,
+ Handle<JSReceiver> foreign) {
+ i::Handle<i::FixedArray> compiled(i::FixedArray::cast(wasm_data->get(0)));
+ i::Handle<i::FixedArray> foreign_globals(
+ i::FixedArray::cast(wasm_data->get(1)));
+
+ ErrorThrower thrower(isolate, "Asm.js -> WebAssembly instantiation");
+
+ i::MaybeHandle<i::JSObject> maybe_module_object =
+ i::wasm::WasmModule::Instantiate(isolate, compiled, foreign, memory);
+ if (maybe_module_object.is_null()) {
+ return MaybeHandle<Object>();
+ }
+
+ i::Handle<i::Name> name(isolate->factory()->InternalizeOneByteString(
+ STATIC_CHAR_VECTOR("__foreign_init__")));
+
+ i::Handle<i::Object> module_object = maybe_module_object.ToHandleChecked();
+ i::MaybeHandle<i::Object> maybe_init =
+ i::Object::GetProperty(module_object, name);
+ DCHECK(!maybe_init.is_null());
+
+ i::Handle<i::Object> init = maybe_init.ToHandleChecked();
+ i::Handle<i::Object> undefined(isolate->heap()->undefined_value(), isolate);
+ i::Handle<i::Object>* foreign_args_array =
+ new i::Handle<i::Object>[foreign_globals->length()];
+ for (int j = 0; j < foreign_globals->length(); j++) {
+ if (!foreign.is_null()) {
+ i::MaybeHandle<i::Name> name = i::Object::ToName(
+ isolate, i::Handle<i::Object>(foreign_globals->get(j), isolate));
+ if (!name.is_null()) {
+ i::MaybeHandle<i::Object> val =
+ i::Object::GetProperty(foreign, name.ToHandleChecked());
+ if (!val.is_null()) {
+ foreign_args_array[j] = val.ToHandleChecked();
+ continue;
+ }
+ }
+ }
+ foreign_args_array[j] = undefined;
+ }
+ i::MaybeHandle<i::Object> retval = i::Execution::Call(
+ isolate, init, undefined, foreign_globals->length(), foreign_args_array);
+ delete[] foreign_args_array;
+
+ DCHECK(!retval.is_null());
+
+ return maybe_module_object;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/asmjs/asm-js.h b/deps/v8/src/asmjs/asm-js.h
new file mode 100644
index 0000000000..44bf04df9e
--- /dev/null
+++ b/deps/v8/src/asmjs/asm-js.h
@@ -0,0 +1,33 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ASMJS_ASM_JS_H_
+#define V8_ASMJS_ASM_JS_H_
+
+#ifndef V8_SHARED
+#include "src/allocation.h"
+#include "src/base/hashmap.h"
+#else
+#include "include/v8.h"
+#include "src/base/compiler-specific.h"
+#endif // !V8_SHARED
+#include "src/parsing/parser.h"
+
+namespace v8 {
+namespace internal {
+// Interface to compile and instantiate for asmjs.
+class AsmJs {
+ public:
+ static MaybeHandle<FixedArray> ConvertAsmToWasm(i::ParseInfo* info);
+ static bool IsStdlibValid(i::Isolate* isolate, Handle<FixedArray> wasm_data,
+ Handle<JSReceiver> stdlib);
+ static MaybeHandle<Object> InstantiateAsmWasm(i::Isolate* isolate,
+ Handle<FixedArray> wasm_data,
+ Handle<JSArrayBuffer> memory,
+ Handle<JSReceiver> foreign);
+};
+
+} // namespace internal
+} // namespace v8
+#endif
diff --git a/deps/v8/src/asmjs/asm-typer.cc b/deps/v8/src/asmjs/asm-typer.cc
new file mode 100644
index 0000000000..1d070a0207
--- /dev/null
+++ b/deps/v8/src/asmjs/asm-typer.cc
@@ -0,0 +1,2773 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/asmjs/asm-typer.h"
+
+#include <algorithm>
+#include <limits>
+#include <memory>
+#include <string>
+
+#include "src/v8.h"
+
+#include "src/asmjs/asm-types.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
+#include "src/base/bits.h"
+#include "src/codegen.h"
+#include "src/globals.h"
+#include "src/type-cache.h"
+#include "src/utils.h"
+
+#define FAIL(node, msg) \
+ do { \
+ int line = node->position() == kNoSourcePosition \
+ ? -1 \
+ : script_->GetLineNumber(node->position()); \
+ base::OS::SNPrintF(error_message_, sizeof(error_message_), \
+ "asm: line %d: %s\n", line + 1, msg); \
+ return AsmType::None(); \
+ } while (false)
+
+#define RECURSE(call) \
+ do { \
+ if (GetCurrentStackPosition() < stack_limit_) { \
+ stack_overflow_ = true; \
+ FAIL(root_, "Stack overflow while parsing asm.js module."); \
+ } \
+ \
+ AsmType* result = (call); \
+ if (stack_overflow_) { \
+ return AsmType::None(); \
+ } \
+ \
+ if (result == AsmType::None()) { \
+ return AsmType::None(); \
+ } \
+ } while (false)
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace {
+static const uint32_t LargestFixNum = std::numeric_limits<int32_t>::max();
+} // namespace
+
+using v8::internal::AstNode;
+using v8::internal::GetCurrentStackPosition;
+
+// ----------------------------------------------------------------------------
+// Implementation of AsmTyper::FlattenedStatements
+
+AsmTyper::FlattenedStatements::FlattenedStatements(Zone* zone,
+ ZoneList<Statement*>* s)
+ : context_stack_(zone) {
+ context_stack_.emplace_back(Context(s));
+}
+
+Statement* AsmTyper::FlattenedStatements::Next() {
+ for (;;) {
+ if (context_stack_.empty()) {
+ return nullptr;
+ }
+
+ Context* current = &context_stack_.back();
+
+ if (current->statements_->length() <= current->next_index_) {
+ context_stack_.pop_back();
+ continue;
+ }
+
+ Statement* current_statement =
+ current->statements_->at(current->next_index_++);
+ if (current_statement->IsBlock()) {
+ context_stack_.emplace_back(
+ Context(current_statement->AsBlock()->statements()));
+ continue;
+ }
+
+ return current_statement;
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Implementation of AsmTyper::VariableInfo
+
+AsmTyper::VariableInfo* AsmTyper::VariableInfo::ForSpecialSymbol(
+ Zone* zone, StandardMember standard_member) {
+ DCHECK(standard_member == kStdlib || standard_member == kFFI ||
+ standard_member == kHeap || standard_member == kModule);
+ auto* new_var_info = new (zone) VariableInfo(AsmType::None());
+ new_var_info->standard_member_ = standard_member;
+ new_var_info->mutability_ = kImmutableGlobal;
+ return new_var_info;
+}
+
+AsmTyper::VariableInfo* AsmTyper::VariableInfo::Clone(Zone* zone) const {
+ CHECK(standard_member_ != kNone);
+ CHECK(!type_->IsA(AsmType::None()));
+ auto* new_var_info = new (zone) VariableInfo(type_);
+ new_var_info->standard_member_ = standard_member_;
+ new_var_info->mutability_ = mutability_;
+ return new_var_info;
+}
+
+void AsmTyper::VariableInfo::FirstForwardUseIs(VariableProxy* var) {
+ DCHECK(first_forward_use_ == nullptr);
+ missing_definition_ = true;
+ first_forward_use_ = var;
+}
+
+// ----------------------------------------------------------------------------
+// Implementation of AsmTyper
+
+AsmTyper::AsmTyper(Isolate* isolate, Zone* zone, Script* script,
+ FunctionLiteral* root)
+ : isolate_(isolate),
+ zone_(zone),
+ script_(script),
+ root_(root),
+ forward_definitions_(zone),
+ stdlib_types_(zone),
+ stdlib_math_types_(zone),
+ module_info_(VariableInfo::ForSpecialSymbol(zone_, kModule)),
+ global_scope_(ZoneHashMap::PointersMatch,
+ ZoneHashMap::kDefaultHashMapCapacity,
+ ZoneAllocationPolicy(zone)),
+ local_scope_(ZoneHashMap::PointersMatch,
+ ZoneHashMap::kDefaultHashMapCapacity,
+ ZoneAllocationPolicy(zone)),
+ stack_limit_(isolate->stack_guard()->real_climit()),
+ node_types_(zone_),
+ fround_type_(AsmType::FroundType(zone_)),
+ ffi_type_(AsmType::FFIType(zone_)) {
+ InitializeStdlib();
+}
+
+namespace {
+bool ValidAsmIdentifier(Handle<String> name) {
+ static const char* kInvalidAsmNames[] = {"eval", "arguments"};
+
+ for (size_t ii = 0; ii < arraysize(kInvalidAsmNames); ++ii) {
+ if (strcmp(name->ToCString().get(), kInvalidAsmNames[ii]) == 0) {
+ return false;
+ }
+ }
+ return true;
+}
+} // namespace
+
+void AsmTyper::InitializeStdlib() {
+ auto* d = AsmType::Double();
+ auto* dq = AsmType::DoubleQ();
+ auto* dq2d = AsmType::Function(zone_, d);
+ dq2d->AsFunctionType()->AddArgument(dq);
+
+ auto* dqdq2d = AsmType::Function(zone_, d);
+ dqdq2d->AsFunctionType()->AddArgument(dq);
+ dqdq2d->AsFunctionType()->AddArgument(dq);
+
+ auto* f = AsmType::Float();
+ auto* fq = AsmType::FloatQ();
+ auto* fq2f = AsmType::Function(zone_, f);
+ fq2f->AsFunctionType()->AddArgument(fq);
+
+ auto* s = AsmType::Signed();
+ auto* s2s = AsmType::Function(zone_, s);
+ s2s->AsFunctionType()->AddArgument(s);
+
+ auto* i = AsmType::Int();
+ auto* i2s = AsmType::Function(zone_, s);
+ i2s->AsFunctionType()->AddArgument(i);
+
+ auto* ii2s = AsmType::Function(zone_, s);
+ ii2s->AsFunctionType()->AddArgument(i);
+ ii2s->AsFunctionType()->AddArgument(i);
+
+ auto* minmax_d = AsmType::MinMaxType(zone_, d, d);
+ // *VIOLATION* The float variant is not part of the spec, but firefox accepts
+ // it.
+ auto* minmax_f = AsmType::MinMaxType(zone_, f, f);
+ auto* minmax_i = AsmType::MinMaxType(zone_, s, i);
+ auto* minmax = AsmType::OverloadedFunction(zone_);
+ minmax->AsOverloadedFunctionType()->AddOverload(minmax_i);
+ minmax->AsOverloadedFunctionType()->AddOverload(minmax_f);
+ minmax->AsOverloadedFunctionType()->AddOverload(minmax_d);
+
+ auto* fround = fround_type_;
+
+ auto* abs = AsmType::OverloadedFunction(zone_);
+ abs->AsOverloadedFunctionType()->AddOverload(s2s);
+ abs->AsOverloadedFunctionType()->AddOverload(dq2d);
+ abs->AsOverloadedFunctionType()->AddOverload(fq2f);
+
+ auto* ceil = AsmType::OverloadedFunction(zone_);
+ ceil->AsOverloadedFunctionType()->AddOverload(dq2d);
+ ceil->AsOverloadedFunctionType()->AddOverload(fq2f);
+
+ auto* floor = ceil;
+ auto* sqrt = ceil;
+
+ struct StandardMemberInitializer {
+ const char* name;
+ StandardMember standard_member;
+ AsmType* type;
+ };
+
+ const StandardMemberInitializer stdlib[] = {{"Infinity", kInfinity, d},
+ {"NaN", kNaN, d},
+#define ASM_TYPED_ARRAYS(V) \
+ V(Uint8) \
+ V(Int8) \
+ V(Uint16) \
+ V(Int16) \
+ V(Uint32) \
+ V(Int32) \
+ V(Float32) \
+ V(Float64)
+
+#define ASM_TYPED_ARRAY(TypeName) \
+ {#TypeName "Array", kNone, AsmType::TypeName##Array()},
+ ASM_TYPED_ARRAYS(ASM_TYPED_ARRAY)
+#undef ASM_TYPED_ARRAY
+ };
+ for (size_t ii = 0; ii < arraysize(stdlib); ++ii) {
+ stdlib_types_[stdlib[ii].name] = new (zone_) VariableInfo(stdlib[ii].type);
+ stdlib_types_[stdlib[ii].name]->set_standard_member(
+ stdlib[ii].standard_member);
+ stdlib_types_[stdlib[ii].name]->set_mutability(
+ VariableInfo::kImmutableGlobal);
+ }
+
+ const StandardMemberInitializer math[] = {
+ {"PI", kMathPI, d},
+ {"E", kMathE, d},
+ {"LN2", kMathLN2, d},
+ {"LN10", kMathLN10, d},
+ {"LOG2E", kMathLOG2E, d},
+ {"LOG10E", kMathLOG10E, d},
+ {"SQRT2", kMathSQRT2, d},
+ {"SQRT1_2", kMathSQRT1_2, d},
+ {"imul", kMathImul, ii2s},
+ {"abs", kMathAbs, abs},
+ // NOTE: clz32 should return fixnum. The current typer can only return
+ // Signed, Float, or Double, so it returns Signed in our version of
+ // asm.js.
+ {"clz32", kMathClz32, i2s},
+ {"ceil", kMathCeil, ceil},
+ {"floor", kMathFloor, floor},
+ {"fround", kMathFround, fround},
+ {"pow", kMathPow, dqdq2d},
+ {"exp", kMathExp, dq2d},
+ {"log", kMathLog, dq2d},
+ {"min", kMathMin, minmax},
+ {"max", kMathMax, minmax},
+ {"sqrt", kMathSqrt, sqrt},
+ {"cos", kMathCos, dq2d},
+ {"sin", kMathSin, dq2d},
+ {"tan", kMathTan, dq2d},
+ {"acos", kMathAcos, dq2d},
+ {"asin", kMathAsin, dq2d},
+ {"atan", kMathAtan, dq2d},
+ {"atan2", kMathAtan2, dqdq2d},
+ };
+ for (size_t ii = 0; ii < arraysize(math); ++ii) {
+ stdlib_math_types_[math[ii].name] = new (zone_) VariableInfo(math[ii].type);
+ stdlib_math_types_[math[ii].name]->set_standard_member(
+ math[ii].standard_member);
+ stdlib_math_types_[math[ii].name]->set_mutability(
+ VariableInfo::kImmutableGlobal);
+ }
+}
+
+// Used for 5.5 GlobalVariableTypeAnnotations
+AsmTyper::VariableInfo* AsmTyper::ImportLookup(Property* import) {
+ auto* obj = import->obj();
+ auto* key = import->key()->AsLiteral();
+
+ ObjectTypeMap* stdlib = &stdlib_types_;
+ if (auto* obj_as_property = obj->AsProperty()) {
+ // This can only be stdlib.Math
+ auto* math_name = obj_as_property->key()->AsLiteral();
+ if (math_name == nullptr || !math_name->IsPropertyName()) {
+ return nullptr;
+ }
+
+ if (!math_name->AsPropertyName()->IsUtf8EqualTo(CStrVector("Math"))) {
+ return nullptr;
+ }
+
+ auto* stdlib_var_proxy = obj_as_property->obj()->AsVariableProxy();
+ if (stdlib_var_proxy == nullptr) {
+ return nullptr;
+ }
+ obj = stdlib_var_proxy;
+ stdlib = &stdlib_math_types_;
+ }
+
+ auto* obj_as_var_proxy = obj->AsVariableProxy();
+ if (obj_as_var_proxy == nullptr) {
+ return nullptr;
+ }
+
+ auto* obj_info = Lookup(obj_as_var_proxy->var());
+ if (obj_info == nullptr) {
+ return nullptr;
+ }
+
+ if (obj_info->IsFFI()) {
+ // For FFI we can't validate import->key, so assume this is OK.
+ return obj_info;
+ }
+
+ std::unique_ptr<char[]> aname = key->AsPropertyName()->ToCString();
+ ObjectTypeMap::iterator i = stdlib->find(std::string(aname.get()));
+ if (i == stdlib->end()) {
+ return nullptr;
+ }
+ stdlib_uses_.insert(i->second->standard_member());
+ return i->second;
+}
+
+AsmTyper::VariableInfo* AsmTyper::Lookup(Variable* variable) {
+ ZoneHashMap* scope = in_function_ ? &local_scope_ : &global_scope_;
+ ZoneHashMap::Entry* entry =
+ scope->Lookup(variable, ComputePointerHash(variable));
+ if (entry == nullptr && in_function_) {
+ entry = global_scope_.Lookup(variable, ComputePointerHash(variable));
+ }
+
+ if (entry == nullptr && !module_name_.is_null() &&
+ module_name_->Equals(*variable->name())) {
+ return module_info_;
+ }
+
+ return entry ? reinterpret_cast<VariableInfo*>(entry->value) : nullptr;
+}
+
+void AsmTyper::AddForwardReference(VariableProxy* proxy, VariableInfo* info) {
+ info->FirstForwardUseIs(proxy);
+ forward_definitions_.push_back(info);
+}
+
+bool AsmTyper::AddGlobal(Variable* variable, VariableInfo* info) {
+ // We can't DCHECK(!in_function_) because function may actually install global
+ // names (forward defined functions and function tables.)
+ DCHECK(info->mutability() != VariableInfo::kInvalidMutability);
+ DCHECK(info->IsGlobal());
+ DCHECK(ValidAsmIdentifier(variable->name()));
+
+ if (!module_name_.is_null() && module_name_->Equals(*variable->name())) {
+ return false;
+ }
+
+ ZoneHashMap::Entry* entry = global_scope_.LookupOrInsert(
+ variable, ComputePointerHash(variable), ZoneAllocationPolicy(zone_));
+
+ if (entry->value != nullptr) {
+ return false;
+ }
+
+ entry->value = info;
+ return true;
+}
+
+bool AsmTyper::AddLocal(Variable* variable, VariableInfo* info) {
+ DCHECK(in_function_);
+ DCHECK(info->mutability() != VariableInfo::kInvalidMutability);
+ DCHECK(!info->IsGlobal());
+ DCHECK(ValidAsmIdentifier(variable->name()));
+
+ ZoneHashMap::Entry* entry = local_scope_.LookupOrInsert(
+ variable, ComputePointerHash(variable), ZoneAllocationPolicy(zone_));
+
+ if (entry->value != nullptr) {
+ return false;
+ }
+
+ entry->value = info;
+ return true;
+}
+
+void AsmTyper::SetTypeOf(AstNode* node, AsmType* type) {
+ DCHECK_NE(type, AsmType::None());
+ DCHECK(node_types_.find(node) == node_types_.end());
+ node_types_.insert(std::make_pair(node, type));
+}
+
+AsmType* AsmTyper::TypeOf(AstNode* node) const {
+ auto node_type_iter = node_types_.find(node);
+ if (node_type_iter != node_types_.end()) {
+ return node_type_iter->second;
+ }
+
+ // Sometimes literal nodes are not added to the node_type_ map simply because
+ // their are not visited with ValidateExpression().
+ if (auto* literal = node->AsLiteral()) {
+ if (literal->raw_value()->ContainsDot()) {
+ return AsmType::Double();
+ }
+ uint32_t u;
+ if (literal->value()->ToUint32(&u)) {
+ if (u > LargestFixNum) {
+ return AsmType::Unsigned();
+ }
+ return AsmType::FixNum();
+ }
+ int32_t i;
+ if (literal->value()->ToInt32(&i)) {
+ return AsmType::Signed();
+ }
+ }
+
+ return AsmType::None();
+}
+
+AsmTyper::StandardMember AsmTyper::VariableAsStandardMember(Variable* var) {
+ auto* var_info = Lookup(var);
+ if (var_info == nullptr) {
+ return kNone;
+ }
+ StandardMember member = var_info->standard_member();
+ return member;
+}
+
+bool AsmTyper::Validate() {
+ if (!AsmType::None()->IsExactly(ValidateModule(root_))) {
+ return true;
+ }
+ return false;
+}
+
+namespace {
+bool IsUseAsmDirective(Statement* first_statement) {
+ ExpressionStatement* use_asm = first_statement->AsExpressionStatement();
+ if (use_asm == nullptr) {
+ return false;
+ }
+
+ Literal* use_asm_literal = use_asm->expression()->AsLiteral();
+
+ if (use_asm_literal == nullptr) {
+ return false;
+ }
+
+ return use_asm_literal->raw_value()->AsString()->IsOneByteEqualTo("use asm");
+}
+
+Assignment* ExtractInitializerExpression(Statement* statement) {
+ auto* expr_stmt = statement->AsExpressionStatement();
+ if (expr_stmt == nullptr) {
+ // Done with initializers.
+ return nullptr;
+ }
+ auto* assign = expr_stmt->expression()->AsAssignment();
+ if (assign == nullptr) {
+ // Done with initializers.
+ return nullptr;
+ }
+ if (assign->op() != Token::INIT) {
+ // Done with initializers.
+ return nullptr;
+ }
+ return assign;
+}
+
+} // namespace
+
+// 6.1 ValidateModule
+namespace {
+// SourceLayoutTracker keeps track of the start and end positions of each
+// section in the asm.js source. The sections should not overlap, otherwise the
+// asm.js source is invalid.
+class SourceLayoutTracker {
+ public:
+ SourceLayoutTracker() = default;
+
+ bool IsValid() const {
+ const Section* kAllSections[] = {&use_asm_, &globals_, &functions_,
+ &tables_, &exports_};
+ for (size_t ii = 0; ii < arraysize(kAllSections); ++ii) {
+ const auto& curr_section = *kAllSections[ii];
+ for (size_t jj = ii + 1; jj < arraysize(kAllSections); ++jj) {
+ if (curr_section.OverlapsWith(*kAllSections[jj])) {
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ void AddUseAsm(const AstNode& node) { use_asm_.AddNewElement(node); }
+
+ void AddGlobal(const AstNode& node) { globals_.AddNewElement(node); }
+
+ void AddFunction(const AstNode& node) { functions_.AddNewElement(node); }
+
+ void AddTable(const AstNode& node) { tables_.AddNewElement(node); }
+
+ void AddExport(const AstNode& node) { exports_.AddNewElement(node); }
+
+ private:
+ class Section {
+ public:
+ Section() = default;
+ Section(const Section&) = default;
+ Section& operator=(const Section&) = default;
+
+ void AddNewElement(const AstNode& node) {
+ const int node_pos = node.position();
+ if (start_ == kNoSourcePosition) {
+ start_ = node_pos;
+ } else {
+ start_ = std::max(start_, node_pos);
+ }
+ if (end_ == kNoSourcePosition) {
+ end_ = node_pos;
+ } else {
+ end_ = std::max(end_, node_pos);
+ }
+ }
+
+ bool OverlapsWith(const Section& other) const {
+ if (start_ == kNoSourcePosition) {
+ DCHECK_EQ(end_, kNoSourcePosition);
+ return false;
+ }
+ if (other.start_ == kNoSourcePosition) {
+ DCHECK_EQ(other.end_, kNoSourcePosition);
+ return false;
+ }
+ return other.start_ < end_ || other.end_ < start_;
+ }
+
+ private:
+ int start_ = kNoSourcePosition;
+ int end_ = kNoSourcePosition;
+ };
+
+ Section use_asm_;
+ Section globals_;
+ Section functions_;
+ Section tables_;
+ Section exports_;
+
+ DISALLOW_COPY_AND_ASSIGN(SourceLayoutTracker);
+};
+} // namespace
+
+AsmType* AsmTyper::ValidateModule(FunctionLiteral* fun) {
+ SourceLayoutTracker source_layout;
+
+ DeclarationScope* scope = fun->scope();
+ if (!scope->is_function_scope()) FAIL(fun, "Not at function scope.");
+ if (!ValidAsmIdentifier(fun->name()))
+ FAIL(fun, "Invalid asm.js identifier in module name.");
+ module_name_ = fun->name();
+
+ // Allowed parameters: Stdlib, FFI, Mem
+ static const uint32_t MaxModuleParameters = 3;
+ if (scope->num_parameters() > MaxModuleParameters) {
+ FAIL(fun, "asm.js modules may not have more than three parameters.");
+ }
+
+ struct {
+ StandardMember standard_member;
+ } kModuleParamInfo[3] = {
+ {kStdlib}, {kFFI}, {kHeap},
+ };
+
+ for (int ii = 0; ii < scope->num_parameters(); ++ii) {
+ Variable* param = scope->parameter(ii);
+ DCHECK(param);
+
+ if (!ValidAsmIdentifier(param->name())) {
+ FAIL(fun, "Invalid asm.js identifier in module parameter.");
+ }
+
+ auto* param_info = VariableInfo::ForSpecialSymbol(
+ zone_, kModuleParamInfo[ii].standard_member);
+
+ if (!AddGlobal(param, param_info)) {
+ FAIL(fun, "Redeclared identifier in module parameter.");
+ }
+ }
+
+ ZoneVector<Assignment*> function_pointer_tables(zone_);
+ FlattenedStatements iter(zone_, fun->body());
+ auto* use_asm_directive = iter.Next();
+ if (use_asm_directive == nullptr) {
+ FAIL(fun, "Missing \"use asm\".");
+ }
+ // Check for extra assignment inserted by the parser when in this form:
+ // (function Module(a, b, c) {... })
+ ExpressionStatement* estatement = use_asm_directive->AsExpressionStatement();
+ if (estatement != nullptr) {
+ Assignment* assignment = estatement->expression()->AsAssignment();
+ if (assignment != nullptr && assignment->target()->IsVariableProxy() &&
+ assignment->target()->AsVariableProxy()->var()->mode() ==
+ CONST_LEGACY) {
+ use_asm_directive = iter.Next();
+ }
+ }
+ if (!IsUseAsmDirective(use_asm_directive)) {
+ FAIL(fun, "Missing \"use asm\".");
+ }
+ source_layout.AddUseAsm(*use_asm_directive);
+ ReturnStatement* module_return = nullptr;
+
+ // *VIOLATION* The spec states that globals should be followed by function
+ // declarations, which should be followed by function pointer tables, followed
+ // by the module export (return) statement. Our AST might be rearraged by the
+ // parser, so we can't rely on it being in source code order.
+ while (Statement* current = iter.Next()) {
+ if (auto* assign = ExtractInitializerExpression(current)) {
+ if (assign->value()->IsArrayLiteral()) {
+ // Save function tables for later validation.
+ function_pointer_tables.push_back(assign);
+ } else {
+ RECURSE(ValidateGlobalDeclaration(assign));
+ source_layout.AddGlobal(*assign);
+ }
+ continue;
+ }
+
+ if (auto* current_as_return = current->AsReturnStatement()) {
+ if (module_return != nullptr) {
+ FAIL(fun, "Multiple export statements.");
+ }
+ module_return = current_as_return;
+ source_layout.AddExport(*module_return);
+ continue;
+ }
+
+ FAIL(current, "Invalid top-level statement in asm.js module.");
+ }
+
+ ZoneList<Declaration*>* decls = scope->declarations();
+
+ for (int ii = 0; ii < decls->length(); ++ii) {
+ Declaration* decl = decls->at(ii);
+
+ if (FunctionDeclaration* fun_decl = decl->AsFunctionDeclaration()) {
+ RECURSE(ValidateFunction(fun_decl));
+ source_layout.AddFunction(*fun_decl);
+ continue;
+ }
+ }
+
+ for (auto* function_table : function_pointer_tables) {
+ RECURSE(ValidateFunctionTable(function_table));
+ source_layout.AddTable(*function_table);
+ }
+
+ for (int ii = 0; ii < decls->length(); ++ii) {
+ Declaration* decl = decls->at(ii);
+
+ if (decl->IsFunctionDeclaration()) {
+ continue;
+ }
+
+ VariableDeclaration* var_decl = decl->AsVariableDeclaration();
+ if (var_decl == nullptr) {
+ FAIL(decl, "Invalid asm.js declaration.");
+ }
+
+ auto* var_proxy = var_decl->proxy();
+ if (var_proxy == nullptr) {
+ FAIL(decl, "Invalid asm.js declaration.");
+ }
+
+ if (Lookup(var_proxy->var()) == nullptr) {
+ FAIL(decl, "Global variable missing initializer in asm.js module.");
+ }
+ }
+
+ // 6.2 ValidateExport
+ if (module_return == nullptr) {
+ FAIL(fun, "Missing asm.js module export.");
+ }
+
+ for (auto* forward_def : forward_definitions_) {
+ if (forward_def->missing_definition()) {
+ FAIL(forward_def->first_forward_use(),
+ "Missing definition for forward declared identifier.");
+ }
+ }
+
+ RECURSE(ValidateExport(module_return));
+
+ if (!source_layout.IsValid()) {
+ FAIL(fun, "Invalid asm.js source code layout.");
+ }
+
+ return AsmType::Int(); // Any type that is not AsmType::None();
+}
+
+namespace {
+bool IsDoubleAnnotation(BinaryOperation* binop) {
+ // *VIOLATION* The parser replaces uses of +x with x*1.0.
+ if (binop->op() != Token::MUL) {
+ return false;
+ }
+
+ auto* right_as_literal = binop->right()->AsLiteral();
+ if (right_as_literal == nullptr) {
+ return false;
+ }
+
+ return right_as_literal->raw_value()->ContainsDot() &&
+ right_as_literal->raw_value()->AsNumber() == 1.0;
+}
+
+bool IsIntAnnotation(BinaryOperation* binop) {
+ if (binop->op() != Token::BIT_OR) {
+ return false;
+ }
+
+ auto* right_as_literal = binop->right()->AsLiteral();
+ if (right_as_literal == nullptr) {
+ return false;
+ }
+
+ return !right_as_literal->raw_value()->ContainsDot() &&
+ right_as_literal->raw_value()->AsNumber() == 0.0;
+}
+} // namespace
+
+AsmType* AsmTyper::ValidateGlobalDeclaration(Assignment* assign) {
+ DCHECK(!assign->is_compound());
+ if (assign->is_compound()) {
+ FAIL(assign,
+ "Compound assignment not supported when declaring global variables.");
+ }
+
+ auto* target = assign->target();
+ if (!target->IsVariableProxy()) {
+ FAIL(target, "Module assignments may only assign to globals.");
+ }
+ auto* target_variable = target->AsVariableProxy()->var();
+ auto* target_info = Lookup(target_variable);
+
+ if (target_info != nullptr) {
+ FAIL(target, "Redefined global variable.");
+ }
+
+ auto* value = assign->value();
+ // Not all types of assignment are allowed by asm.js. See
+ // 5.5 Global Variable Type Annotations.
+ bool global_variable = false;
+ if (value->IsLiteral() || value->IsCall()) {
+ AsmType* type = nullptr;
+ RECURSE(type = VariableTypeAnnotations(value));
+ target_info = new (zone_) VariableInfo(type);
+ target_info->set_mutability(VariableInfo::kMutableGlobal);
+ global_variable = true;
+ } else if (value->IsProperty()) {
+ target_info = ImportLookup(value->AsProperty());
+ if (target_info == nullptr) {
+ FAIL(assign, "Invalid import.");
+ }
+ CHECK(target_info->mutability() == VariableInfo::kImmutableGlobal);
+ if (target_info->IsFFI()) {
+ // create a new target info that represents a foreign variable.
+ target_info = new (zone_) VariableInfo(ffi_type_);
+ target_info->set_mutability(VariableInfo::kImmutableGlobal);
+ } else if (target_info->type()->IsA(AsmType::Heap())) {
+ FAIL(assign, "Heap view types can not be aliased.");
+ } else {
+ target_info = target_info->Clone(zone_);
+ }
+ } else if (value->IsBinaryOperation()) {
+ // This should either be:
+ //
+ // var <> = ffi.<>|0
+ //
+ // or
+ //
+ // var <> = +ffi.<>
+ auto* value_binop = value->AsBinaryOperation();
+ auto* left = value_binop->left();
+ AsmType* import_type = nullptr;
+
+ if (IsDoubleAnnotation(value_binop)) {
+ import_type = AsmType::Double();
+ } else if (IsIntAnnotation(value_binop)) {
+ import_type = AsmType::Int();
+ } else {
+ FAIL(value,
+ "Invalid initializer for foreign import - unrecognized annotation.");
+ }
+
+ if (!left->IsProperty()) {
+ FAIL(value,
+ "Invalid initializer for foreign import - must import member.");
+ }
+ target_info = ImportLookup(left->AsProperty());
+ if (target_info == nullptr) {
+ // TODO(jpp): this error message is innacurate: this may fail if the
+ // object lookup fails, or if the property lookup fails, or even if the
+ // import is bogus like a().c.
+ FAIL(value,
+ "Invalid initializer for foreign import - object lookup failed.");
+ }
+ CHECK(target_info->mutability() == VariableInfo::kImmutableGlobal);
+ if (!target_info->IsFFI()) {
+ FAIL(value,
+ "Invalid initializer for foreign import - object is not the ffi.");
+ }
+
+ // Create a new target info that represents the foreign import.
+ target_info = new (zone_) VariableInfo(import_type);
+ target_info->set_mutability(VariableInfo::kMutableGlobal);
+ } else if (value->IsCallNew()) {
+ AsmType* type = nullptr;
+ RECURSE(type = NewHeapView(value->AsCallNew()));
+ target_info = new (zone_) VariableInfo(type);
+ target_info->set_mutability(VariableInfo::kImmutableGlobal);
+ }
+
+ if (target_info == nullptr) {
+ FAIL(assign, "Invalid global variable initializer.");
+ }
+
+ if (!ValidAsmIdentifier(target_variable->name())) {
+ FAIL(target, "Invalid asm.js identifier in global variable.");
+ }
+
+ if (!AddGlobal(target_variable, target_info)) {
+ FAIL(assign, "Redeclared global identifier.");
+ }
+
+ DCHECK(target_info->type() != AsmType::None());
+ if (!global_variable) {
+ // Global variables have their types set in VariableTypeAnnotations.
+ SetTypeOf(value, target_info->type());
+ }
+ SetTypeOf(assign, target_info->type());
+ SetTypeOf(target, target_info->type());
+ return target_info->type();
+}
+
+// 6.2 ValidateExport
+AsmType* AsmTyper::ExportType(VariableProxy* fun_export) {
+ auto* fun_info = Lookup(fun_export->var());
+ if (fun_info == nullptr) {
+ FAIL(fun_export, "Undefined identifier in asm.js module export.");
+ }
+
+ if (fun_info->standard_member() != kNone) {
+ FAIL(fun_export, "Module cannot export standard library functions.");
+ }
+
+ auto* type = fun_info->type();
+ if (type->AsFFIType() != nullptr) {
+ FAIL(fun_export, "Module cannot export foreign functions.");
+ }
+
+ if (type->AsFunctionTableType() != nullptr) {
+ FAIL(fun_export, "Module cannot export function tables.");
+ }
+
+ if (fun_info->type()->AsFunctionType() == nullptr) {
+ FAIL(fun_export, "Module export is not an asm.js function.");
+ }
+
+ return type;
+}
+
+AsmType* AsmTyper::ValidateExport(ReturnStatement* exports) {
+ // asm.js modules can export single functions, or multiple functions in an
+ // object literal.
+ if (auto* fun_export = exports->expression()->AsVariableProxy()) {
+ // Exporting single function.
+ AsmType* export_type;
+ RECURSE(export_type = ExportType(fun_export));
+ return export_type;
+ }
+
+ if (auto* obj_export = exports->expression()->AsObjectLiteral()) {
+ // Exporting object literal.
+ for (auto* prop : *obj_export->properties()) {
+ if (!prop->key()->IsLiteral()) {
+ FAIL(prop->key(),
+ "Only normal object properties may be used in the export object "
+ "literal.");
+ }
+
+ auto* export_obj = prop->value()->AsVariableProxy();
+ if (export_obj == nullptr) {
+ FAIL(prop->value(), "Exported value must be an asm.js function name.");
+ }
+
+ RECURSE(ExportType(export_obj));
+ }
+
+ return AsmType::Int();
+ }
+
+ FAIL(exports, "Unrecognized expression in asm.js module export expression.");
+}
+
+// 6.3 ValidateFunctionTable
+AsmType* AsmTyper::ValidateFunctionTable(Assignment* assign) {
+ if (assign->is_compound()) {
+ FAIL(assign,
+ "Compound assignment not supported when declaring global variables.");
+ }
+
+ auto* target = assign->target();
+ if (!target->IsVariableProxy()) {
+ FAIL(target, "Module assignments may only assign to globals.");
+ }
+ auto* target_variable = target->AsVariableProxy()->var();
+
+ auto* value = assign->value()->AsArrayLiteral();
+ CHECK(value != nullptr);
+ ZoneList<Expression*>* pointers = value->values();
+
+ // The function table size must be n = 2 ** m, for m >= 0;
+ // TODO(jpp): should this be capped?
+ if (!base::bits::IsPowerOfTwo32(pointers->length())) {
+ FAIL(assign, "Invalid length for function pointer table.");
+ }
+
+ AsmType* table_element_type = nullptr;
+ for (auto* initializer : *pointers) {
+ auto* var_proxy = initializer->AsVariableProxy();
+ if (var_proxy == nullptr) {
+ FAIL(initializer,
+ "Function pointer table initializer must be a function name.");
+ }
+
+ auto* var_info = Lookup(var_proxy->var());
+ if (var_info == nullptr) {
+ FAIL(var_proxy,
+ "Undefined identifier in function pointer table initializer.");
+ }
+
+ if (var_info->standard_member() != kNone) {
+ FAIL(initializer,
+ "Function pointer table must not be a member of the standard "
+ "library.");
+ }
+
+ auto* initializer_type = var_info->type();
+ if (initializer_type->AsFunctionType() == nullptr) {
+ FAIL(initializer,
+ "Function pointer table initializer must be an asm.js function.");
+ }
+
+ DCHECK(var_info->type()->AsFFIType() == nullptr);
+ DCHECK(var_info->type()->AsFunctionTableType() == nullptr);
+
+ if (table_element_type == nullptr) {
+ table_element_type = initializer_type;
+ } else if (!initializer_type->IsA(table_element_type)) {
+ FAIL(initializer, "Type mismatch in function pointer table initializer.");
+ }
+ }
+
+ auto* target_info = Lookup(target_variable);
+ if (target_info == nullptr) {
+ // Function pointer tables are the last entities to be validates, so this is
+ // unlikely to happen: only unreferenced function tables will not already
+ // have an entry in the global scope.
+ target_info = new (zone_) VariableInfo(AsmType::FunctionTableType(
+ zone_, pointers->length(), table_element_type));
+ target_info->set_mutability(VariableInfo::kImmutableGlobal);
+ if (!ValidAsmIdentifier(target_variable->name())) {
+ FAIL(target, "Invalid asm.js identifier in function table name.");
+ }
+ if (!AddGlobal(target_variable, target_info)) {
+ DCHECK(false);
+ FAIL(assign, "Redeclared global identifier in function table name.");
+ }
+ SetTypeOf(value, target_info->type());
+ return target_info->type();
+ }
+
+ auto* target_info_table = target_info->type()->AsFunctionTableType();
+ if (target_info_table == nullptr) {
+ FAIL(assign, "Identifier redefined as function pointer table.");
+ }
+
+ if (!target_info->missing_definition()) {
+ FAIL(assign, "Identifier redefined (function table name).");
+ }
+
+ if (target_info_table->length() != pointers->length()) {
+ FAIL(assign, "Function table size mismatch.");
+ }
+
+ DCHECK(target_info_table->signature()->AsFunctionType());
+ if (!table_element_type->IsA(target_info_table->signature())) {
+ FAIL(assign, "Function table initializer does not match previous type.");
+ }
+
+ target_info->MarkDefined();
+ DCHECK(target_info->type() != AsmType::None());
+ SetTypeOf(value, target_info->type());
+
+ return target_info->type();
+}
+
+// 6.4 ValidateFunction
+AsmType* AsmTyper::ValidateFunction(FunctionDeclaration* fun_decl) {
+ FunctionScope _(this);
+
+ // Extract parameter types.
+ auto* fun = fun_decl->fun();
+
+ auto* fun_decl_proxy = fun_decl->proxy();
+ if (fun_decl_proxy == nullptr) {
+ FAIL(fun_decl, "Anonymous functions are not support in asm.js.");
+ }
+
+ Statement* current;
+ FlattenedStatements iter(zone_, fun->body());
+
+ size_t annotated_parameters = 0;
+
+ // 5.3 Function type annotations
+ // * parameters
+ ZoneVector<AsmType*> parameter_types(zone_);
+ for (; (current = iter.Next()) != nullptr; ++annotated_parameters) {
+ auto* stmt = current->AsExpressionStatement();
+ if (stmt == nullptr) {
+ // Done with parameters.
+ break;
+ }
+ auto* expr = stmt->expression()->AsAssignment();
+ if (expr == nullptr || expr->is_compound()) {
+ // Done with parameters.
+ break;
+ }
+ auto* proxy = expr->target()->AsVariableProxy();
+ if (proxy == nullptr) {
+ // Done with parameters.
+ break;
+ }
+ auto* param = proxy->var();
+ if (param->location() != VariableLocation::PARAMETER ||
+ param->index() != annotated_parameters) {
+ // Done with parameters.
+ break;
+ }
+
+ AsmType* type;
+ RECURSE(type = ParameterTypeAnnotations(param, expr->value()));
+ DCHECK(type->IsParameterType());
+ auto* param_info = new (zone_) VariableInfo(type);
+ param_info->set_mutability(VariableInfo::kLocal);
+ if (!ValidAsmIdentifier(proxy->name())) {
+ FAIL(proxy, "Invalid asm.js identifier in parameter name.");
+ }
+
+ if (!AddLocal(param, param_info)) {
+ FAIL(proxy, "Redeclared parameter.");
+ }
+ parameter_types.push_back(type);
+ SetTypeOf(proxy, type);
+ SetTypeOf(expr, type);
+ }
+
+ if (annotated_parameters != fun->parameter_count()) {
+ FAIL(fun_decl, "Incorrect parameter type annotations.");
+ }
+
+ // 5.3 Function type annotations
+ // * locals
+ for (; current; current = iter.Next()) {
+ auto* initializer = ExtractInitializerExpression(current);
+ if (initializer == nullptr) {
+ // Done with locals.
+ break;
+ }
+
+ auto* local = initializer->target()->AsVariableProxy();
+ if (local == nullptr) {
+ // Done with locals. It should never happen. Even if it does, the asm.js
+ // code should not declare any other locals after this point, so we assume
+ // this is OK. If any other variable declaration is found we report a
+ // validation error.
+ DCHECK(false);
+ break;
+ }
+
+ AsmType* type;
+ RECURSE(type = VariableTypeAnnotations(initializer->value()));
+ auto* local_info = new (zone_) VariableInfo(type);
+ local_info->set_mutability(VariableInfo::kLocal);
+ if (!ValidAsmIdentifier(local->name())) {
+ FAIL(local, "Invalid asm.js identifier in local variable.");
+ }
+
+ if (!AddLocal(local->var(), local_info)) {
+ FAIL(initializer, "Redeclared local.");
+ }
+
+ SetTypeOf(local, type);
+ SetTypeOf(initializer, type);
+ }
+
+ // 5.2 Return Type Annotations
+ // *VIOLATION* we peel blocks to find the last statement in the asm module
+ // because the parser may introduce synthetic blocks.
+ ZoneList<Statement*>* statements = fun->body();
+
+ do {
+ if (statements->length() == 0) {
+ return_type_ = AsmType::Void();
+ } else {
+ auto* last_statement = statements->last();
+ auto* as_block = last_statement->AsBlock();
+ if (as_block != nullptr) {
+ statements = as_block->statements();
+ } else {
+ // We don't check whether AsReturnStatement() below returns non-null --
+ // we leave that to the ReturnTypeAnnotations method.
+ RECURSE(return_type_ =
+ ReturnTypeAnnotations(last_statement->AsReturnStatement()));
+ }
+ }
+ } while (return_type_ == AsmType::None());
+
+ DCHECK(return_type_->IsReturnType());
+
+ for (auto* decl : *fun->scope()->declarations()) {
+ auto* var_decl = decl->AsVariableDeclaration();
+ if (var_decl == nullptr) {
+ FAIL(decl, "Functions may only define inner variables.");
+ }
+
+ auto* var_proxy = var_decl->proxy();
+ if (var_proxy == nullptr) {
+ FAIL(decl, "Invalid local declaration declaration.");
+ }
+
+ auto* var_info = Lookup(var_proxy->var());
+ if (var_info == nullptr || var_info->IsGlobal()) {
+ FAIL(decl, "Local variable missing initializer in asm.js module.");
+ }
+ }
+
+ for (; current; current = iter.Next()) {
+ AsmType* current_type;
+ RECURSE(current_type = ValidateStatement(current));
+ }
+
+ auto* fun_type = AsmType::Function(zone_, return_type_);
+ auto* fun_type_as_function = fun_type->AsFunctionType();
+ for (auto* param_type : parameter_types) {
+ fun_type_as_function->AddArgument(param_type);
+ }
+
+ auto* fun_var = fun_decl_proxy->var();
+ auto* fun_info = new (zone_) VariableInfo(fun_type);
+ fun_info->set_mutability(VariableInfo::kImmutableGlobal);
+ auto* old_fun_info = Lookup(fun_var);
+ if (old_fun_info == nullptr) {
+ if (!ValidAsmIdentifier(fun_var->name())) {
+ FAIL(fun_decl_proxy, "Invalid asm.js identifier in function name.");
+ }
+ if (!AddGlobal(fun_var, fun_info)) {
+ DCHECK(false);
+ FAIL(fun_decl, "Redeclared global identifier.");
+ }
+
+ SetTypeOf(fun, fun_type);
+ return fun_type;
+ }
+
+ // Not necessarily an error -- fun_decl might have been used before being
+ // defined. If that's the case, then the type in the global environment must
+ // be the same as the type inferred by the parameter/return type annotations.
+ auto* old_fun_type = old_fun_info->type();
+ if (old_fun_type->AsFunctionType() == nullptr) {
+ FAIL(fun_decl, "Identifier redefined as function.");
+ }
+
+ if (!old_fun_info->missing_definition()) {
+ FAIL(fun_decl, "Identifier redefined (function name).");
+ }
+
+ if (!fun_type->IsA(old_fun_type)) {
+ FAIL(fun_decl, "Signature mismatch when defining function.");
+ }
+
+ old_fun_info->MarkDefined();
+ SetTypeOf(fun, fun_type);
+
+ return fun_type;
+}
+
+// 6.5 ValidateStatement
+AsmType* AsmTyper::ValidateStatement(Statement* statement) {
+ switch (statement->node_type()) {
+ default:
+ FAIL(statement, "Statement type invalid for asm.js.");
+ case AstNode::kBlock:
+ return ValidateBlockStatement(statement->AsBlock());
+ case AstNode::kExpressionStatement:
+ return ValidateExpressionStatement(statement->AsExpressionStatement());
+ case AstNode::kEmptyStatement:
+ return ValidateEmptyStatement(statement->AsEmptyStatement());
+ case AstNode::kIfStatement:
+ return ValidateIfStatement(statement->AsIfStatement());
+ case AstNode::kReturnStatement:
+ return ValidateReturnStatement(statement->AsReturnStatement());
+ case AstNode::kWhileStatement:
+ return ValidateWhileStatement(statement->AsWhileStatement());
+ case AstNode::kDoWhileStatement:
+ return ValidateDoWhileStatement(statement->AsDoWhileStatement());
+ case AstNode::kForStatement:
+ return ValidateForStatement(statement->AsForStatement());
+ case AstNode::kBreakStatement:
+ return ValidateBreakStatement(statement->AsBreakStatement());
+ case AstNode::kContinueStatement:
+ return ValidateContinueStatement(statement->AsContinueStatement());
+ case AstNode::kSwitchStatement:
+ return ValidateSwitchStatement(statement->AsSwitchStatement());
+ }
+
+ return AsmType::Void();
+}
+
+// 6.5.1 BlockStatement
+AsmType* AsmTyper::ValidateBlockStatement(Block* block) {
+ FlattenedStatements iter(zone_, block->statements());
+
+ while (auto* current = iter.Next()) {
+ RECURSE(ValidateStatement(current));
+ }
+
+ return AsmType::Void();
+}
+
+// 6.5.2 ExpressionStatement
+AsmType* AsmTyper::ValidateExpressionStatement(ExpressionStatement* expr) {
+ auto* expression = expr->expression();
+ if (auto* call = expression->AsCall()) {
+ RECURSE(ValidateCall(AsmType::Void(), call));
+ } else {
+ RECURSE(ValidateExpression(expression));
+ }
+
+ return AsmType::Void();
+}
+
+// 6.5.3 EmptyStatement
+AsmType* AsmTyper::ValidateEmptyStatement(EmptyStatement* empty) {
+ return AsmType::Void();
+}
+
+// 6.5.4 IfStatement
+AsmType* AsmTyper::ValidateIfStatement(IfStatement* if_stmt) {
+ AsmType* cond_type;
+ RECURSE(cond_type = ValidateExpression(if_stmt->condition()));
+ if (!cond_type->IsA(AsmType::Int())) {
+ FAIL(if_stmt->condition(), "If condition must be type int.");
+ }
+ RECURSE(ValidateStatement(if_stmt->then_statement()));
+ RECURSE(ValidateStatement(if_stmt->else_statement()));
+ return AsmType::Void();
+}
+
+// 6.5.5 ReturnStatement
+AsmType* AsmTyper::ValidateReturnStatement(ReturnStatement* ret_stmt) {
+ AsmType* ret_expr_type = AsmType::Void();
+ if (auto* ret_expr = ret_stmt->expression()) {
+ RECURSE(ret_expr_type = ValidateExpression(ret_expr));
+ if (ret_expr_type == AsmType::Void()) {
+ // *VIOLATION* The parser modifies the source code so that expressionless
+ // returns will return undefined, so we need to allow that.
+ if (!ret_expr->IsUndefinedLiteral()) {
+ FAIL(ret_stmt, "Return statement expression can't be void.");
+ }
+ }
+ }
+
+ if (!ret_expr_type->IsA(return_type_)) {
+ FAIL(ret_stmt, "Type mismatch in return statement.");
+ }
+
+ return ret_expr_type;
+}
+
+// 6.5.6 IterationStatement
+// 6.5.6.a WhileStatement
+AsmType* AsmTyper::ValidateWhileStatement(WhileStatement* while_stmt) {
+ AsmType* cond_type;
+ RECURSE(cond_type = ValidateExpression(while_stmt->cond()));
+ if (!cond_type->IsA(AsmType::Int())) {
+ FAIL(while_stmt->cond(), "While condition must be type int.");
+ }
+
+ if (auto* body = while_stmt->body()) {
+ RECURSE(ValidateStatement(body));
+ }
+ return AsmType::Void();
+}
+
+// 6.5.6.b DoWhileStatement
+AsmType* AsmTyper::ValidateDoWhileStatement(DoWhileStatement* do_while) {
+ AsmType* cond_type;
+ RECURSE(cond_type = ValidateExpression(do_while->cond()));
+ if (!cond_type->IsA(AsmType::Int())) {
+ FAIL(do_while->cond(), "Do {} While condition must be type int.");
+ }
+
+ if (auto* body = do_while->body()) {
+ RECURSE(ValidateStatement(body));
+ }
+ return AsmType::Void();
+}
+
+// 6.5.6.c ForStatement
+AsmType* AsmTyper::ValidateForStatement(ForStatement* for_stmt) {
+ if (auto* init = for_stmt->init()) {
+ RECURSE(ValidateStatement(init));
+ }
+
+ if (auto* cond = for_stmt->cond()) {
+ AsmType* cond_type;
+ RECURSE(cond_type = ValidateExpression(cond));
+ if (!cond_type->IsA(AsmType::Int())) {
+ FAIL(cond, "For condition must be type int.");
+ }
+ }
+
+ if (auto* next = for_stmt->next()) {
+ RECURSE(ValidateStatement(next));
+ }
+
+ if (auto* body = for_stmt->body()) {
+ RECURSE(ValidateStatement(body));
+ }
+
+ return AsmType::Void();
+}
+
+// 6.5.7 BreakStatement
+AsmType* AsmTyper::ValidateBreakStatement(BreakStatement* brk_stmt) {
+ return AsmType::Void();
+}
+
+// 6.5.8 ContinueStatement
+AsmType* AsmTyper::ValidateContinueStatement(ContinueStatement* cont_stmt) {
+ return AsmType::Void();
+}
+
+// 6.5.9 LabelledStatement
+// No need to handle these here -- see the AsmTyper's definition.
+
+// 6.5.10 SwitchStatement
+AsmType* AsmTyper::ValidateSwitchStatement(SwitchStatement* stmt) {
+ AsmType* cond_type;
+ RECURSE(cond_type = ValidateExpression(stmt->tag()));
+ if (!cond_type->IsA(AsmType::Signed())) {
+ FAIL(stmt, "Switch tag must be signed.");
+ }
+
+ int default_pos = kNoSourcePosition;
+ int last_case_pos = kNoSourcePosition;
+ ZoneSet<int32_t> cases_seen(zone_);
+ for (auto* a_case : *stmt->cases()) {
+ if (a_case->is_default()) {
+ CHECK(default_pos == kNoSourcePosition);
+ RECURSE(ValidateDefault(a_case));
+ default_pos = a_case->position();
+ continue;
+ }
+
+ if (last_case_pos == kNoSourcePosition) {
+ last_case_pos = a_case->position();
+ } else {
+ last_case_pos = std::max(last_case_pos, a_case->position());
+ }
+
+ int32_t case_lbl;
+ RECURSE(ValidateCase(a_case, &case_lbl));
+ auto case_lbl_pos = cases_seen.find(case_lbl);
+ if (case_lbl_pos != cases_seen.end() && *case_lbl_pos == case_lbl) {
+ FAIL(a_case, "Duplicated case label.");
+ }
+ cases_seen.insert(case_lbl);
+ }
+
+ if (!cases_seen.empty()) {
+ const int64_t max_lbl = *cases_seen.rbegin();
+ const int64_t min_lbl = *cases_seen.begin();
+ if (max_lbl - min_lbl > std::numeric_limits<int32_t>::max()) {
+ FAIL(stmt, "Out-of-bounds case label range.");
+ }
+ }
+
+ if (last_case_pos != kNoSourcePosition && default_pos != kNoSourcePosition &&
+ default_pos < last_case_pos) {
+ FAIL(stmt, "Switch default must appear last.");
+ }
+
+ return AsmType::Void();
+}
+
+// 6.6 ValidateCase
+namespace {
+bool ExtractInt32CaseLabel(CaseClause* clause, int32_t* lbl) {
+ auto* lbl_expr = clause->label()->AsLiteral();
+
+ if (lbl_expr == nullptr) {
+ return false;
+ }
+
+ if (lbl_expr->raw_value()->ContainsDot()) {
+ return false;
+ }
+
+ return lbl_expr->value()->ToInt32(lbl);
+}
+} // namespace
+
+AsmType* AsmTyper::ValidateCase(CaseClause* label, int32_t* case_lbl) {
+ if (!ExtractInt32CaseLabel(label, case_lbl)) {
+ FAIL(label, "Case label must be a 32-bit signed integer.");
+ }
+
+ FlattenedStatements iter(zone_, label->statements());
+ while (auto* current = iter.Next()) {
+ RECURSE(ValidateStatement(current));
+ }
+ return AsmType::Void();
+}
+
+// 6.7 ValidateDefault
+AsmType* AsmTyper::ValidateDefault(CaseClause* label) {
+ FlattenedStatements iter(zone_, label->statements());
+ while (auto* current = iter.Next()) {
+ RECURSE(ValidateStatement(current));
+ }
+ return AsmType::Void();
+}
+
+// 6.8 ValidateExpression
+AsmType* AsmTyper::ValidateExpression(Expression* expr) {
+ AsmType* expr_ty = AsmType::None();
+
+ switch (expr->node_type()) {
+ default:
+ FAIL(expr, "Invalid asm.js expression.");
+ case AstNode::kLiteral:
+ RECURSE(expr_ty = ValidateNumericLiteral(expr->AsLiteral()));
+ break;
+ case AstNode::kVariableProxy:
+ RECURSE(expr_ty = ValidateIdentifier(expr->AsVariableProxy()));
+ break;
+ case AstNode::kCall:
+ RECURSE(expr_ty = ValidateCallExpression(expr->AsCall()));
+ break;
+ case AstNode::kProperty:
+ RECURSE(expr_ty = ValidateMemberExpression(expr->AsProperty()));
+ break;
+ case AstNode::kAssignment:
+ RECURSE(expr_ty = ValidateAssignmentExpression(expr->AsAssignment()));
+ break;
+ case AstNode::kUnaryOperation:
+ RECURSE(expr_ty = ValidateUnaryExpression(expr->AsUnaryOperation()));
+ break;
+ case AstNode::kConditional:
+ RECURSE(expr_ty = ValidateConditionalExpression(expr->AsConditional()));
+ break;
+ case AstNode::kCompareOperation:
+ RECURSE(expr_ty = ValidateCompareOperation(expr->AsCompareOperation()));
+ break;
+ case AstNode::kBinaryOperation:
+ RECURSE(expr_ty = ValidateBinaryOperation(expr->AsBinaryOperation()));
+ break;
+ }
+
+ SetTypeOf(expr, expr_ty);
+ return expr_ty;
+}
+
+AsmType* AsmTyper::ValidateCompareOperation(CompareOperation* cmp) {
+ switch (cmp->op()) {
+ default:
+ FAIL(cmp, "Invalid asm.js comparison operator.");
+ case Token::LT:
+ case Token::LTE:
+ case Token::GT:
+ case Token::GTE:
+ return ValidateRelationalExpression(cmp);
+ case Token::EQ:
+ case Token::NE:
+ return ValidateEqualityExpression(cmp);
+ }
+
+ UNREACHABLE();
+}
+
+namespace {
+bool IsNegate(BinaryOperation* binop) {
+ if (binop->op() != Token::BIT_XOR) {
+ return false;
+ }
+
+ auto* right_as_literal = binop->right()->AsLiteral();
+ if (right_as_literal == nullptr) {
+ return false;
+ }
+
+ return !right_as_literal->raw_value()->ContainsDot() &&
+ right_as_literal->raw_value()->AsNumber() == -1.0;
+}
+
+bool IsUnaryMinus(BinaryOperation* binop) {
+ // *VIOLATION* The parser replaces uses of +x with x*1.0.
+ if (binop->op() != Token::MUL) {
+ return false;
+ }
+
+ auto* right_as_literal = binop->right()->AsLiteral();
+ if (right_as_literal == nullptr) {
+ return false;
+ }
+
+ return !right_as_literal->raw_value()->ContainsDot() &&
+ right_as_literal->raw_value()->AsNumber() == -1.0;
+}
+} // namespace
+
+AsmType* AsmTyper::ValidateBinaryOperation(BinaryOperation* expr) {
+#define UNOP_OVERLOAD(Src, Dest) \
+ do { \
+ if (left_type->IsA(AsmType::Src())) { \
+ return AsmType::Dest(); \
+ } \
+ } while (0)
+
+ switch (expr->op()) {
+ default:
+ FAIL(expr, "Invalid asm.js binary expression.");
+ case Token::COMMA:
+ return ValidateCommaExpression(expr);
+ case Token::MUL:
+ if (IsDoubleAnnotation(expr)) {
+ // *VIOLATION* We can't be 100% sure this really IS a unary + in the asm
+ // source so we have to be lenient, and treat this as a unary +.
+ if (auto* Call = expr->left()->AsCall()) {
+ return ValidateCall(AsmType::Double(), Call);
+ }
+ AsmType* left_type;
+ RECURSE(left_type = ValidateExpression(expr->left()));
+ SetTypeOf(expr->right(), AsmType::Double());
+ UNOP_OVERLOAD(Signed, Double);
+ UNOP_OVERLOAD(Unsigned, Double);
+ UNOP_OVERLOAD(DoubleQ, Double);
+ UNOP_OVERLOAD(FloatQ, Double);
+ FAIL(expr, "Invalid type for conversion to double.");
+ }
+
+ if (IsUnaryMinus(expr)) {
+ // *VIOLATION* the parser converts -x to x * -1.0.
+ AsmType* left_type;
+ RECURSE(left_type = ValidateExpression(expr->left()));
+ SetTypeOf(expr->right(), left_type);
+ UNOP_OVERLOAD(Int, Intish);
+ UNOP_OVERLOAD(DoubleQ, Double);
+ UNOP_OVERLOAD(FloatQ, Floatish);
+ FAIL(expr, "Invalid type for unary -.");
+ }
+ // FALTHROUGH
+ case Token::DIV:
+ case Token::MOD:
+ return ValidateMultiplicativeExpression(expr);
+ case Token::ADD:
+ case Token::SUB: {
+ static const uint32_t kInitialIntishCount = 0;
+ return ValidateAdditiveExpression(expr, kInitialIntishCount);
+ }
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR:
+ return ValidateShiftExpression(expr);
+ case Token::BIT_AND:
+ return ValidateBitwiseANDExpression(expr);
+ case Token::BIT_XOR:
+ if (IsNegate(expr)) {
+ auto* left = expr->left();
+ auto* left_as_binop = left->AsBinaryOperation();
+
+ if (left_as_binop != nullptr && IsNegate(left_as_binop)) {
+ // This is the special ~~ operator.
+ AsmType* left_type;
+ RECURSE(left_type = ValidateExpression(left_as_binop->left()));
+ SetTypeOf(left_as_binop->right(), AsmType::FixNum());
+ SetTypeOf(left_as_binop, AsmType::Signed());
+ SetTypeOf(expr->right(), AsmType::FixNum());
+ UNOP_OVERLOAD(Double, Signed);
+ UNOP_OVERLOAD(FloatQ, Signed);
+ FAIL(left_as_binop, "Invalid type for conversion to signed.");
+ }
+
+ AsmType* left_type;
+ RECURSE(left_type = ValidateExpression(left));
+ UNOP_OVERLOAD(Intish, Signed);
+ FAIL(left, "Invalid type for ~.");
+ }
+
+ return ValidateBitwiseXORExpression(expr);
+ case Token::BIT_OR:
+ return ValidateBitwiseORExpression(expr);
+ }
+#undef UNOP_OVERLOAD
+ UNREACHABLE();
+}
+
+// 6.8.1 Expression
+AsmType* AsmTyper::ValidateCommaExpression(BinaryOperation* comma) {
+ // The AST looks like:
+ // (expr COMMA (expr COMMA (expr COMMA (... ))))
+
+ auto* left = comma->left();
+ if (auto* left_as_call = left->AsCall()) {
+ RECURSE(ValidateCall(AsmType::Void(), left_as_call));
+ } else {
+ RECURSE(ValidateExpression(left));
+ }
+
+ auto* right = comma->right();
+ AsmType* right_type = nullptr;
+ if (auto* right_as_call = right->AsCall()) {
+ RECURSE(right_type = ValidateCall(AsmType::Void(), right_as_call));
+ } else {
+ RECURSE(right_type = ValidateExpression(right));
+ }
+
+ return right_type;
+}
+
+// 6.8.2 NumericLiteral
+AsmType* AsmTyper::ValidateNumericLiteral(Literal* literal) {
+ // *VIOLATION* asm.js does not allow the use of undefined, but our parser
+ // inserts them, so we have to handle them.
+ if (literal->IsUndefinedLiteral()) {
+ return AsmType::Void();
+ }
+
+ if (literal->raw_value()->ContainsDot()) {
+ return AsmType::Double();
+ }
+
+ uint32_t value;
+ if (!literal->value()->ToUint32(&value)) {
+ int32_t value;
+ if (!literal->value()->ToInt32(&value)) {
+ FAIL(literal, "Integer literal is out of range.");
+ }
+ // *VIOLATION* Not really a violation, but rather a different in the
+ // validation. The spec handles -NumericLiteral in ValidateUnaryExpression,
+ // but V8's AST represents the negative literals as Literals.
+ return AsmType::Signed();
+ }
+
+ if (value <= LargestFixNum) {
+ return AsmType::FixNum();
+ }
+
+ return AsmType::Unsigned();
+}
+
+// 6.8.3 Identifier
+AsmType* AsmTyper::ValidateIdentifier(VariableProxy* proxy) {
+ auto* proxy_info = Lookup(proxy->var());
+ if (proxy_info == nullptr) {
+ FAIL(proxy, "Undeclared identifier.");
+ }
+ auto* type = proxy_info->type();
+ if (type->IsA(AsmType::None()) || type->AsCallableType() != nullptr) {
+ FAIL(proxy, "Identifier may not be accessed by ordinary expressions.");
+ }
+ return type;
+}
+
+// 6.8.4 CallExpression
+AsmType* AsmTyper::ValidateCallExpression(Call* call) {
+ AsmType* return_type;
+ RECURSE(return_type = ValidateFloatCoercion(call));
+ if (return_type == nullptr) {
+ FAIL(call, "Unanotated call to a function must be a call to fround.");
+ }
+ return return_type;
+}
+
+// 6.8.5 MemberExpression
+AsmType* AsmTyper::ValidateMemberExpression(Property* prop) {
+ AsmType* return_type;
+ RECURSE(return_type = ValidateHeapAccess(prop, LoadFromHeap));
+ return return_type;
+}
+
+// 6.8.6 AssignmentExpression
+AsmType* AsmTyper::ValidateAssignmentExpression(Assignment* assignment) {
+ AsmType* value_type;
+ RECURSE(value_type = ValidateExpression(assignment->value()));
+
+ if (assignment->op() == Token::INIT) {
+ FAIL(assignment,
+ "Local variable declaration must be at the top of the function.");
+ }
+
+ if (auto* target_as_proxy = assignment->target()->AsVariableProxy()) {
+ auto* var = target_as_proxy->var();
+ auto* target_info = Lookup(var);
+
+ if (target_info == nullptr) {
+ if (var->mode() != TEMPORARY) {
+ FAIL(target_as_proxy, "Undeclared identifier.");
+ }
+ // Temporary variables are special: we add them to the local symbol table
+ // as we see them, with the exact type of the variable's initializer. This
+ // means that temporary variables might have nonsensical types (i.e.,
+ // intish, float?, fixnum, and not just the "canonical" types.)
+ auto* var_info = new (zone_) VariableInfo(value_type);
+ var_info->set_mutability(VariableInfo::kLocal);
+ if (!ValidAsmIdentifier(target_as_proxy->name())) {
+ FAIL(target_as_proxy,
+ "Invalid asm.js identifier in temporary variable.");
+ }
+
+ if (!AddLocal(var, var_info)) {
+ FAIL(assignment, "Failed to add temporary variable to symbol table.");
+ }
+ return value_type;
+ }
+
+ if (!target_info->IsMutable()) {
+ FAIL(assignment, "Can't assign to immutable symbol.");
+ }
+
+ DCHECK_NE(AsmType::None(), target_info->type());
+ if (!value_type->IsA(target_info->type())) {
+ FAIL(assignment, "Type mismatch in assignment.");
+ }
+
+ return value_type;
+ }
+
+ if (auto* target_as_property = assignment->target()->AsProperty()) {
+ AsmType* allowed_store_types;
+ RECURSE(allowed_store_types =
+ ValidateHeapAccess(target_as_property, StoreToHeap));
+
+ if (!value_type->IsA(allowed_store_types)) {
+ FAIL(assignment, "Type mismatch in heap assignment.");
+ }
+
+ return value_type;
+ }
+
+ FAIL(assignment, "Invalid asm.js assignment.");
+}
+
+// 6.8.7 UnaryExpression
+AsmType* AsmTyper::ValidateUnaryExpression(UnaryOperation* unop) {
+ // *VIOLATION* -NumericLiteral is validated in ValidateLiteral.
+ // *VIOLATION* +UnaryExpression is validated in ValidateBinaryOperation.
+ // *VIOLATION* ~UnaryOperation is validated in ValidateBinaryOperation.
+ // *VIOLATION* ~~UnaryOperation is validated in ValidateBinaryOperation.
+ DCHECK(unop->op() != Token::BIT_NOT);
+ DCHECK(unop->op() != Token::ADD);
+ AsmType* exp_type;
+ RECURSE(exp_type = ValidateExpression(unop->expression()));
+#define UNOP_OVERLOAD(Src, Dest) \
+ do { \
+ if (exp_type->IsA(AsmType::Src())) { \
+ return AsmType::Dest(); \
+ } \
+ } while (0)
+
+ // 8.1 Unary Operators
+ switch (unop->op()) {
+ default:
+ FAIL(unop, "Invalid unary operator.");
+ case Token::ADD:
+ // We can't test this because of the +x -> x * 1.0 transformation.
+ DCHECK(false);
+ UNOP_OVERLOAD(Signed, Double);
+ UNOP_OVERLOAD(Unsigned, Double);
+ UNOP_OVERLOAD(DoubleQ, Double);
+ UNOP_OVERLOAD(FloatQ, Double);
+ FAIL(unop, "Invalid type for unary +.");
+ case Token::SUB:
+ // We can't test this because of the -x -> x * -1.0 transformation.
+ DCHECK(false);
+ UNOP_OVERLOAD(Int, Intish);
+ UNOP_OVERLOAD(DoubleQ, Double);
+ UNOP_OVERLOAD(FloatQ, Floatish);
+ FAIL(unop, "Invalid type for unary -.");
+ case Token::BIT_NOT:
+ // We can't test this because of the ~x -> x ^ -1 transformation.
+ DCHECK(false);
+ UNOP_OVERLOAD(Intish, Signed);
+ FAIL(unop, "Invalid type for ~.");
+ case Token::NOT:
+ UNOP_OVERLOAD(Int, Int);
+ FAIL(unop, "Invalid type for !.");
+ }
+
+#undef UNOP_OVERLOAD
+
+ UNREACHABLE();
+}
+
+// 6.8.8 MultiplicativeExpression
+namespace {
+bool IsIntishLiteralFactor(Expression* expr, int32_t* factor) {
+ auto* literal = expr->AsLiteral();
+ if (literal == nullptr) {
+ return false;
+ }
+
+ if (literal->raw_value()->ContainsDot()) {
+ return false;
+ }
+
+ if (!literal->value()->ToInt32(factor)) {
+ return false;
+ }
+ static const int32_t kIntishBound = 1 << 20;
+ return -kIntishBound < *factor && *factor < kIntishBound;
+}
+} // namespace
+
+AsmType* AsmTyper::ValidateMultiplicativeExpression(BinaryOperation* binop) {
+ DCHECK(!IsDoubleAnnotation(binop));
+
+ auto* left = binop->left();
+ auto* right = binop->right();
+
+ bool intish_mul_failed = false;
+ if (binop->op() == Token::MUL) {
+ int32_t factor;
+ if (IsIntishLiteralFactor(left, &factor)) {
+ AsmType* right_type;
+ RECURSE(right_type = ValidateExpression(right));
+ if (right_type->IsA(AsmType::Int())) {
+ return AsmType::Intish();
+ }
+ // Can't fail here, because the rhs might contain a valid intish factor.
+ //
+ // The solution is to flag that there was an error, and later on -- when
+ // both lhs and rhs are evaluated -- complain.
+ intish_mul_failed = true;
+ }
+
+ if (IsIntishLiteralFactor(right, &factor)) {
+ AsmType* left_type;
+ RECURSE(left_type = ValidateExpression(left));
+ if (left_type->IsA(AsmType::Int())) {
+ // *VIOLATION* This will also (and correctly) handle -X, when X is an
+ // integer. Therefore, we don't need to handle this case within the if
+ // block below.
+ return AsmType::Intish();
+ }
+ intish_mul_failed = true;
+
+ if (factor == -1) {
+ // *VIOLATION* The frontend transforms -x into x * -1 (not -1.0, because
+ // consistency is overrated.)
+ if (left_type->IsA(AsmType::DoubleQ())) {
+ return AsmType::Double();
+ } else if (left_type->IsA(AsmType::FloatQ())) {
+ return AsmType::Floatish();
+ }
+ }
+ }
+ }
+
+ if (intish_mul_failed) {
+ FAIL(binop, "Invalid types for intish * (or unary -).");
+ }
+
+ AsmType* left_type;
+ AsmType* right_type;
+ RECURSE(left_type = ValidateExpression(left));
+ RECURSE(right_type = ValidateExpression(right));
+
+#define BINOP_OVERLOAD(Src0, Src1, Dest) \
+ do { \
+ if (left_type->IsA(AsmType::Src0()) && right_type->IsA(AsmType::Src1())) { \
+ return AsmType::Dest(); \
+ } \
+ } while (0)
+ switch (binop->op()) {
+ default:
+ FAIL(binop, "Invalid multiplicative expression.");
+ case Token::MUL:
+ BINOP_OVERLOAD(DoubleQ, DoubleQ, Double);
+ BINOP_OVERLOAD(FloatQ, FloatQ, Floatish);
+ FAIL(binop, "Invalid operands for *.");
+ case Token::DIV:
+ BINOP_OVERLOAD(Signed, Signed, Intish);
+ BINOP_OVERLOAD(Unsigned, Unsigned, Intish);
+ BINOP_OVERLOAD(DoubleQ, DoubleQ, Double);
+ BINOP_OVERLOAD(FloatQ, FloatQ, Floatish);
+ FAIL(binop, "Invalid operands for /.");
+ case Token::MOD:
+ BINOP_OVERLOAD(Signed, Signed, Intish);
+ BINOP_OVERLOAD(Unsigned, Unsigned, Intish);
+ BINOP_OVERLOAD(DoubleQ, DoubleQ, Double);
+ FAIL(binop, "Invalid operands for %.");
+ }
+#undef BINOP_OVERLOAD
+
+ UNREACHABLE();
+}
+
+// 6.8.9 AdditiveExpression
+AsmType* AsmTyper::ValidateAdditiveExpression(BinaryOperation* binop,
+ uint32_t intish_count) {
+ static const uint32_t kMaxIntish = 1 << 20;
+
+ auto* left = binop->left();
+ auto* left_as_binop = left->AsBinaryOperation();
+ AsmType* left_type;
+
+ // TODO(jpp): maybe use an iterative approach instead of the recursion to
+ // ValidateAdditiveExpression.
+ if (left_as_binop != nullptr && (left_as_binop->op() == Token::ADD ||
+ left_as_binop->op() == Token::SUB)) {
+ RECURSE(left_type =
+ ValidateAdditiveExpression(left_as_binop, intish_count + 1));
+ SetTypeOf(left_as_binop, left_type);
+ } else {
+ RECURSE(left_type = ValidateExpression(left));
+ }
+
+ auto* right = binop->right();
+ auto* right_as_binop = right->AsBinaryOperation();
+ AsmType* right_type;
+
+ if (right_as_binop != nullptr && (right_as_binop->op() == Token::ADD ||
+ right_as_binop->op() == Token::SUB)) {
+ RECURSE(right_type =
+ ValidateAdditiveExpression(right_as_binop, intish_count + 1));
+ SetTypeOf(right_as_binop, right_type);
+ } else {
+ RECURSE(right_type = ValidateExpression(right));
+ }
+
+ if (left_type->IsA(AsmType::FloatQ()) && right_type->IsA(AsmType::FloatQ())) {
+ return AsmType::Floatish();
+ }
+
+ if (left_type->IsA(AsmType::Int()) && right_type->IsA(AsmType::Int())) {
+ if (intish_count == 0) {
+ return AsmType::Intish();
+ }
+ if (intish_count < kMaxIntish) {
+ return AsmType::Int();
+ }
+ FAIL(binop, "Too many uncoerced integer additive expressions.");
+ }
+
+ if (left_type->IsA(AsmType::Double()) && right_type->IsA(AsmType::Double())) {
+ return AsmType::Double();
+ }
+
+ if (binop->op() == Token::SUB) {
+ if (left_type->IsA(AsmType::DoubleQ()) &&
+ right_type->IsA(AsmType::DoubleQ())) {
+ return AsmType::Double();
+ }
+ }
+
+ FAIL(binop, "Invalid operands for additive expression.");
+}
+
+// 6.8.10 ShiftExpression
+AsmType* AsmTyper::ValidateShiftExpression(BinaryOperation* binop) {
+ auto* left = binop->left();
+ auto* right = binop->right();
+
+ AsmType* left_type;
+ AsmType* right_type;
+ RECURSE(left_type = ValidateExpression(left));
+ RECURSE(right_type = ValidateExpression(right));
+
+#define BINOP_OVERLOAD(Src0, Src1, Dest) \
+ do { \
+ if (left_type->IsA(AsmType::Src0()) && right_type->IsA(AsmType::Src1())) { \
+ return AsmType::Dest(); \
+ } \
+ } while (0)
+ switch (binop->op()) {
+ default:
+ FAIL(binop, "Invalid shift expression.");
+ case Token::SHL:
+ BINOP_OVERLOAD(Intish, Intish, Signed);
+ FAIL(binop, "Invalid operands for <<.");
+ case Token::SAR:
+ BINOP_OVERLOAD(Intish, Intish, Signed);
+ FAIL(binop, "Invalid operands for >>.");
+ case Token::SHR:
+ BINOP_OVERLOAD(Intish, Intish, Unsigned);
+ FAIL(binop, "Invalid operands for >>>.");
+ }
+#undef BINOP_OVERLOAD
+
+ UNREACHABLE();
+}
+
+// 6.8.11 RelationalExpression
+AsmType* AsmTyper::ValidateRelationalExpression(CompareOperation* cmpop) {
+ auto* left = cmpop->left();
+ auto* right = cmpop->right();
+
+ AsmType* left_type;
+ AsmType* right_type;
+ RECURSE(left_type = ValidateExpression(left));
+ RECURSE(right_type = ValidateExpression(right));
+
+#define CMPOP_OVERLOAD(Src0, Src1, Dest) \
+ do { \
+ if (left_type->IsA(AsmType::Src0()) && right_type->IsA(AsmType::Src1())) { \
+ return AsmType::Dest(); \
+ } \
+ } while (0)
+ switch (cmpop->op()) {
+ default:
+ FAIL(cmpop, "Invalid relational expression.");
+ case Token::LT:
+ CMPOP_OVERLOAD(Signed, Signed, Int);
+ CMPOP_OVERLOAD(Unsigned, Unsigned, Int);
+ CMPOP_OVERLOAD(Float, Float, Int);
+ CMPOP_OVERLOAD(Double, Double, Int);
+ FAIL(cmpop, "Invalid operands for <.");
+ case Token::GT:
+ CMPOP_OVERLOAD(Signed, Signed, Int);
+ CMPOP_OVERLOAD(Unsigned, Unsigned, Int);
+ CMPOP_OVERLOAD(Float, Float, Int);
+ CMPOP_OVERLOAD(Double, Double, Int);
+ FAIL(cmpop, "Invalid operands for >.");
+ case Token::LTE:
+ CMPOP_OVERLOAD(Signed, Signed, Int);
+ CMPOP_OVERLOAD(Unsigned, Unsigned, Int);
+ CMPOP_OVERLOAD(Float, Float, Int);
+ CMPOP_OVERLOAD(Double, Double, Int);
+ FAIL(cmpop, "Invalid operands for <=.");
+ case Token::GTE:
+ CMPOP_OVERLOAD(Signed, Signed, Int);
+ CMPOP_OVERLOAD(Unsigned, Unsigned, Int);
+ CMPOP_OVERLOAD(Float, Float, Int);
+ CMPOP_OVERLOAD(Double, Double, Int);
+ FAIL(cmpop, "Invalid operands for >=.");
+ }
+#undef CMPOP_OVERLOAD
+
+ UNREACHABLE();
+}
+
+// 6.8.12 EqualityExpression
+AsmType* AsmTyper::ValidateEqualityExpression(CompareOperation* cmpop) {
+ auto* left = cmpop->left();
+ auto* right = cmpop->right();
+
+ AsmType* left_type;
+ AsmType* right_type;
+ RECURSE(left_type = ValidateExpression(left));
+ RECURSE(right_type = ValidateExpression(right));
+
+#define CMPOP_OVERLOAD(Src0, Src1, Dest) \
+ do { \
+ if (left_type->IsA(AsmType::Src0()) && right_type->IsA(AsmType::Src1())) { \
+ return AsmType::Dest(); \
+ } \
+ } while (0)
+ switch (cmpop->op()) {
+ default:
+ FAIL(cmpop, "Invalid equality expression.");
+ case Token::EQ:
+ CMPOP_OVERLOAD(Signed, Signed, Int);
+ CMPOP_OVERLOAD(Unsigned, Unsigned, Int);
+ CMPOP_OVERLOAD(Float, Float, Int);
+ CMPOP_OVERLOAD(Double, Double, Int);
+ FAIL(cmpop, "Invalid operands for ==.");
+ case Token::NE:
+ CMPOP_OVERLOAD(Signed, Signed, Int);
+ CMPOP_OVERLOAD(Unsigned, Unsigned, Int);
+ CMPOP_OVERLOAD(Float, Float, Int);
+ CMPOP_OVERLOAD(Double, Double, Int);
+ FAIL(cmpop, "Invalid operands for !=.");
+ }
+#undef CMPOP_OVERLOAD
+
+ UNREACHABLE();
+}
+
+// 6.8.13 BitwiseANDExpression
+AsmType* AsmTyper::ValidateBitwiseANDExpression(BinaryOperation* binop) {
+ auto* left = binop->left();
+ auto* right = binop->right();
+
+ AsmType* left_type;
+ AsmType* right_type;
+ RECURSE(left_type = ValidateExpression(left));
+ RECURSE(right_type = ValidateExpression(right));
+
+ if (binop->op() != Token::BIT_AND) {
+ FAIL(binop, "Invalid & expression.");
+ }
+
+#define BINOP_OVERLOAD(Src0, Src1, Dest) \
+ do { \
+ if (left_type->IsA(AsmType::Src0()) && right_type->IsA(AsmType::Src1())) { \
+ return AsmType::Dest(); \
+ } \
+ } while (0)
+ BINOP_OVERLOAD(Intish, Intish, Signed);
+ FAIL(binop, "Invalid operands for &.");
+#undef BINOP_OVERLOAD
+
+ UNREACHABLE();
+}
+
+// 6.8.14 BitwiseXORExpression
+AsmType* AsmTyper::ValidateBitwiseXORExpression(BinaryOperation* binop) {
+ auto* left = binop->left();
+ auto* right = binop->right();
+
+ AsmType* left_type;
+ AsmType* right_type;
+ RECURSE(left_type = ValidateExpression(left));
+ RECURSE(right_type = ValidateExpression(right));
+
+ if (binop->op() != Token::BIT_XOR) {
+ FAIL(binop, "Invalid ^ expression.");
+ }
+
+#define BINOP_OVERLOAD(Src0, Src1, Dest) \
+ do { \
+ if (left_type->IsA(AsmType::Src0()) && right_type->IsA(AsmType::Src1())) { \
+ return AsmType::Dest(); \
+ } \
+ } while (0)
+ BINOP_OVERLOAD(Intish, Intish, Signed);
+ FAIL(binop, "Invalid operands for ^.");
+#undef BINOP_OVERLOAD
+
+ UNREACHABLE();
+}
+
+// 6.8.15 BitwiseORExpression
+AsmType* AsmTyper::ValidateBitwiseORExpression(BinaryOperation* binop) {
+ auto* left = binop->left();
+ if (IsIntAnnotation(binop)) {
+ if (auto* left_as_call = left->AsCall()) {
+ AsmType* type;
+ RECURSE(type = ValidateCall(AsmType::Signed(), left_as_call));
+ return type;
+ }
+
+ // TODO(jpp): at this point we know that binop is expr|0. We could sinply
+ //
+ // RECURSE(t = ValidateExpression(left));
+ // FAIL_IF(t->IsNotA(Intish));
+ // return Signed;
+ }
+
+ auto* right = binop->right();
+ AsmType* left_type;
+ AsmType* right_type;
+ RECURSE(left_type = ValidateExpression(left));
+ RECURSE(right_type = ValidateExpression(right));
+
+ if (binop->op() != Token::BIT_OR) {
+ FAIL(binop, "Invalid | expression.");
+ }
+
+#define BINOP_OVERLOAD(Src0, Src1, Dest) \
+ do { \
+ if (left_type->IsA(AsmType::Src0()) && right_type->IsA(AsmType::Src1())) { \
+ return AsmType::Dest(); \
+ } \
+ } while (0)
+ BINOP_OVERLOAD(Intish, Intish, Signed);
+ FAIL(binop, "Invalid operands for |.");
+#undef BINOP_OVERLOAD
+
+ UNREACHABLE();
+}
+
+// 6.8.16 ConditionalExpression
+AsmType* AsmTyper::ValidateConditionalExpression(Conditional* cond) {
+ AsmType* cond_type;
+ RECURSE(cond_type = ValidateExpression(cond->condition()));
+ if (!cond_type->IsA(AsmType::Int())) {
+ FAIL(cond, "Ternary operation condition should be int.");
+ }
+
+ AsmType* then_type;
+ RECURSE(then_type = ValidateExpression(cond->then_expression()));
+ AsmType* else_type;
+ RECURSE(else_type = ValidateExpression(cond->else_expression()));
+
+#define SUCCEED_IF_BOTH_ARE(type) \
+ do { \
+ if (then_type->IsA(AsmType::type())) { \
+ if (!else_type->IsA(AsmType::type())) { \
+ FAIL(cond, "Type mismatch for ternary operation result type."); \
+ } \
+ return AsmType::type(); \
+ } \
+ } while (0)
+ SUCCEED_IF_BOTH_ARE(Int);
+ SUCCEED_IF_BOTH_ARE(Float);
+ SUCCEED_IF_BOTH_ARE(Double);
+#undef SUCCEED_IF_BOTH_ARE
+
+ FAIL(cond, "Ternary operator must return int, float, or double.");
+}
+
+// 6.9 ValidateCall
+namespace {
+bool ExtractIndirectCallMask(Expression* expr, uint32_t* value) {
+ auto* as_literal = expr->AsLiteral();
+ if (as_literal == nullptr) {
+ return false;
+ }
+
+ if (as_literal->raw_value()->ContainsDot()) {
+ return false;
+ }
+
+ if (!as_literal->value()->ToUint32(value)) {
+ return false;
+ }
+
+ return base::bits::IsPowerOfTwo32(1 + *value);
+}
+} // namespace
+
+AsmType* AsmTyper::ValidateCall(AsmType* return_type, Call* call) {
+ AsmType* float_coercion_type;
+ RECURSE(float_coercion_type = ValidateFloatCoercion(call));
+ if (float_coercion_type == AsmType::Float()) {
+ SetTypeOf(call, AsmType::Float());
+ return return_type;
+ }
+
+ // TODO(jpp): we should be able to reuse the args vector's storage space.
+ ZoneVector<AsmType*> args(zone_);
+ args.reserve(call->arguments()->length());
+
+ for (auto* arg : *call->arguments()) {
+ AsmType* arg_type;
+ RECURSE(arg_type = ValidateExpression(arg));
+ args.emplace_back(arg_type);
+ }
+
+ auto* call_expr = call->expression();
+
+ // identifier(Expression...)
+ if (auto* call_var_proxy = call_expr->AsVariableProxy()) {
+ auto* call_var_info = Lookup(call_var_proxy->var());
+
+ if (call_var_info == nullptr) {
+ // We can't fail here: the validator performs a single pass over the AST,
+ // so it is possible for some calls to be currently unresolved. We eagerly
+ // add the function to the table of globals.
+ auto* call_type = AsmType::Function(zone_, return_type)->AsFunctionType();
+ for (auto* arg : args) {
+ call_type->AddArgument(arg->ToParameterType());
+ }
+ auto* fun_info =
+ new (zone_) VariableInfo(reinterpret_cast<AsmType*>(call_type));
+ fun_info->set_mutability(VariableInfo::kImmutableGlobal);
+ AddForwardReference(call_var_proxy, fun_info);
+ if (!ValidAsmIdentifier(call_var_proxy->name())) {
+ FAIL(call_var_proxy,
+ "Invalid asm.js identifier in (forward) function name.");
+ }
+ if (!AddGlobal(call_var_proxy->var(), fun_info)) {
+ DCHECK(false);
+ FAIL(call, "Redeclared global identifier.");
+ }
+ SetTypeOf(call_var_proxy, reinterpret_cast<AsmType*>(call_type));
+ SetTypeOf(call, return_type);
+ return return_type;
+ }
+
+ auto* callee_type = call_var_info->type()->AsCallableType();
+ if (callee_type == nullptr) {
+ FAIL(call, "Calling something that's not a function.");
+ }
+
+ if (callee_type->AsFFIType() != nullptr &&
+ return_type == AsmType::Float()) {
+ FAIL(call, "Foreign functions can't return float.");
+ }
+
+ if (!callee_type->CanBeInvokedWith(return_type, args)) {
+ FAIL(call, "Function invocation does not match function type.");
+ }
+
+ SetTypeOf(call_var_proxy, call_var_info->type());
+ SetTypeOf(call, return_type);
+ return return_type;
+ }
+
+ // identifier[expr & n](Expression...)
+ if (auto* call_property = call_expr->AsProperty()) {
+ auto* index = call_property->key()->AsBinaryOperation();
+ if (index == nullptr || index->op() != Token::BIT_AND) {
+ FAIL(call_property->key(),
+ "Indirect call index must be in the expr & mask form.");
+ }
+
+ auto* left = index->left();
+ auto* right = index->right();
+ uint32_t mask;
+ if (!ExtractIndirectCallMask(right, &mask)) {
+ if (!ExtractIndirectCallMask(left, &mask)) {
+ FAIL(right, "Invalid indirect call mask.");
+ } else {
+ left = right;
+ }
+ }
+ const uint32_t table_length = mask + 1;
+
+ AsmType* left_type;
+ RECURSE(left_type = ValidateExpression(left));
+ if (!left_type->IsA(AsmType::Intish())) {
+ FAIL(left, "Indirect call index should be an intish.");
+ }
+
+ auto* name_var = call_property->obj()->AsVariableProxy();
+
+ if (name_var == nullptr) {
+ FAIL(call_property, "Invalid call.");
+ }
+
+ auto* name_info = Lookup(name_var->var());
+ if (name_info == nullptr) {
+ // We can't fail here -- just like above.
+ auto* call_type = AsmType::Function(zone_, return_type)->AsFunctionType();
+ for (auto* arg : args) {
+ call_type->AddArgument(arg->ToParameterType());
+ }
+ auto* table_type = AsmType::FunctionTableType(
+ zone_, table_length, reinterpret_cast<AsmType*>(call_type));
+ auto* fun_info =
+ new (zone_) VariableInfo(reinterpret_cast<AsmType*>(table_type));
+ fun_info->set_mutability(VariableInfo::kImmutableGlobal);
+ AddForwardReference(name_var, fun_info);
+ if (!ValidAsmIdentifier(name_var->name())) {
+ FAIL(name_var,
+ "Invalid asm.js identifier in (forward) function table name.");
+ }
+ if (!AddGlobal(name_var->var(), fun_info)) {
+ DCHECK(false);
+ FAIL(call, "Redeclared global identifier.");
+ }
+ SetTypeOf(call_property, reinterpret_cast<AsmType*>(call_type));
+ SetTypeOf(call, return_type);
+ return return_type;
+ }
+
+ auto* previous_type = name_info->type()->AsFunctionTableType();
+ if (previous_type == nullptr) {
+ FAIL(call, "Identifier does not name a function table.");
+ }
+
+ if (table_length != previous_type->length()) {
+ FAIL(call, "Function table size does not match expected size.");
+ }
+
+ auto* previous_type_signature =
+ previous_type->signature()->AsFunctionType();
+ DCHECK(previous_type_signature != nullptr);
+ if (!previous_type_signature->CanBeInvokedWith(return_type, args)) {
+ // TODO(jpp): better error messages.
+ FAIL(call,
+ "Function pointer table signature does not match previous "
+ "signature.");
+ }
+
+ SetTypeOf(call_property, previous_type->signature());
+ SetTypeOf(call, return_type);
+ return return_type;
+ }
+
+ FAIL(call, "Invalid call.");
+}
+
+// 6.10 ValidateHeapAccess
+namespace {
+bool ExtractHeapAccessShift(Expression* expr, uint32_t* value) {
+ auto* as_literal = expr->AsLiteral();
+ if (as_literal == nullptr) {
+ return false;
+ }
+
+ if (as_literal->raw_value()->ContainsDot()) {
+ return false;
+ }
+
+ return as_literal->value()->ToUint32(value);
+}
+
+// Returns whether index is too large to access a heap with the given type.
+bool LiteralIndexOutOfBounds(AsmType* obj_type, uint32_t index) {
+ switch (obj_type->ElementSizeInBytes()) {
+ case 1:
+ return false;
+ case 2:
+ return (index & 0x80000000u) != 0;
+ case 4:
+ return (index & 0xC0000000u) != 0;
+ case 8:
+ return (index & 0xE0000000u) != 0;
+ }
+ UNREACHABLE();
+ return true;
+}
+
+} // namespace
+
+AsmType* AsmTyper::ValidateHeapAccess(Property* heap,
+ HeapAccessType access_type) {
+ auto* obj = heap->obj()->AsVariableProxy();
+ if (obj == nullptr) {
+ FAIL(heap, "Invalid heap access.");
+ }
+
+ auto* obj_info = Lookup(obj->var());
+ if (obj_info == nullptr) {
+ FAIL(heap, "Undeclared identifier in heap access.");
+ }
+
+ auto* obj_type = obj_info->type();
+ if (!obj_type->IsA(AsmType::Heap())) {
+ FAIL(heap, "Identifier does not represent a heap view.");
+ }
+ SetTypeOf(obj, obj_type);
+
+ if (auto* key_as_literal = heap->key()->AsLiteral()) {
+ if (key_as_literal->raw_value()->ContainsDot()) {
+ FAIL(key_as_literal, "Heap access index must be int.");
+ }
+
+ uint32_t index;
+ if (!key_as_literal->value()->ToUint32(&index)) {
+ FAIL(key_as_literal,
+ "Heap access index must be a 32-bit unsigned integer.");
+ }
+
+ if (LiteralIndexOutOfBounds(obj_type, index)) {
+ FAIL(key_as_literal, "Heap access index is out of bounds");
+ }
+
+ if (access_type == LoadFromHeap) {
+ return obj_type->LoadType();
+ }
+ return obj_type->StoreType();
+ }
+
+ if (auto* key_as_binop = heap->key()->AsBinaryOperation()) {
+ uint32_t shift;
+ if (key_as_binop->op() == Token::SAR &&
+ ExtractHeapAccessShift(key_as_binop->right(), &shift) &&
+ (1 << shift) == obj_type->ElementSizeInBytes()) {
+ AsmType* type;
+ RECURSE(type = ValidateExpression(key_as_binop->left()));
+ if (type->IsA(AsmType::Intish())) {
+ if (access_type == LoadFromHeap) {
+ return obj_type->LoadType();
+ }
+ return obj_type->StoreType();
+ }
+ FAIL(key_as_binop, "Invalid heap access index.");
+ }
+ }
+
+ if (obj_type->ElementSizeInBytes() == 1) {
+ // Leniency: if this is a byte array, we don't require the shift operation
+ // to be present.
+ AsmType* index_type;
+ RECURSE(index_type = ValidateExpression(heap->key()));
+ if (!index_type->IsA(AsmType::Int())) {
+ FAIL(heap, "Invalid heap access index for byte array.");
+ }
+ if (access_type == LoadFromHeap) {
+ return obj_type->LoadType();
+ }
+ return obj_type->StoreType();
+ }
+
+ FAIL(heap, "Invalid heap access index.");
+}
+
+// 6.11 ValidateFloatCoercion
+bool AsmTyper::IsCallToFround(Call* call) {
+ if (call->arguments()->length() != 1) {
+ return false;
+ }
+
+ auto* call_var_proxy = call->expression()->AsVariableProxy();
+ if (call_var_proxy == nullptr) {
+ return false;
+ }
+
+ auto* call_var_info = Lookup(call_var_proxy->var());
+ if (call_var_info == nullptr) {
+ return false;
+ }
+
+ return call_var_info->standard_member() == kMathFround;
+}
+
+AsmType* AsmTyper::ValidateFloatCoercion(Call* call) {
+ if (!IsCallToFround(call)) {
+ return nullptr;
+ }
+
+ auto* arg = call->arguments()->at(0);
+ // call is a fround() node. From now, there can be two possible outcomes:
+ // 1. fround is used as a return type annotation.
+ if (auto* arg_as_call = arg->AsCall()) {
+ RECURSE(ValidateCall(AsmType::Float(), arg_as_call));
+ return AsmType::Float();
+ }
+
+ // 2. fround is used for converting to float.
+ AsmType* arg_type;
+ RECURSE(arg_type = ValidateExpression(arg));
+ if (arg_type->IsA(AsmType::Floatish()) || arg_type->IsA(AsmType::DoubleQ()) ||
+ arg_type->IsA(AsmType::Signed()) || arg_type->IsA(AsmType::Unsigned())) {
+ SetTypeOf(call->expression(), fround_type_);
+ return AsmType::Float();
+ }
+
+ FAIL(call, "Invalid argument type to fround.");
+}
+
+// 5.1 ParameterTypeAnnotations
+AsmType* AsmTyper::ParameterTypeAnnotations(Variable* parameter,
+ Expression* annotation) {
+ if (auto* binop = annotation->AsBinaryOperation()) {
+ // Must be:
+ // * x|0
+ // * x*1 (*VIOLATION* i.e.,, +x)
+ auto* left = binop->left()->AsVariableProxy();
+ if (left == nullptr) {
+ FAIL(
+ binop->left(),
+ "Invalid parameter type annotation - should annotate an identifier.");
+ }
+ if (left->var() != parameter) {
+ FAIL(binop->left(),
+ "Invalid parameter type annotation - should annotate a parameter.");
+ }
+ if (IsDoubleAnnotation(binop)) {
+ SetTypeOf(left, AsmType::Double());
+ return AsmType::Double();
+ }
+ if (IsIntAnnotation(binop)) {
+ SetTypeOf(left, AsmType::Int());
+ return AsmType::Int();
+ }
+ FAIL(binop, "Invalid parameter type annotation.");
+ }
+
+ auto* call = annotation->AsCall();
+ if (call == nullptr) {
+ FAIL(
+ annotation,
+ "Invalid float parameter type annotation - must be fround(parameter).");
+ }
+
+ if (!IsCallToFround(call)) {
+ FAIL(annotation,
+ "Invalid float parameter type annotation - must be call to fround.");
+ }
+
+ auto* src_expr = call->arguments()->at(0)->AsVariableProxy();
+ if (src_expr == nullptr) {
+ FAIL(annotation,
+ "Invalid float parameter type annotation - argument to fround is not "
+ "an identifier.");
+ }
+
+ if (src_expr->var() != parameter) {
+ FAIL(annotation,
+ "Invalid float parameter type annotation - argument to fround is not "
+ "a parameter.");
+ }
+
+ SetTypeOf(src_expr, AsmType::Float());
+ return AsmType::Float();
+}
+
+// 5.2 ReturnTypeAnnotations
+AsmType* AsmTyper::ReturnTypeAnnotations(ReturnStatement* statement) {
+ if (statement == nullptr) {
+ return AsmType::Void();
+ }
+
+ auto* ret_expr = statement->expression();
+ if (ret_expr == nullptr) {
+ return AsmType::Void();
+ }
+
+ if (auto* binop = ret_expr->AsBinaryOperation()) {
+ if (IsDoubleAnnotation(binop)) {
+ return AsmType::Double();
+ } else if (IsIntAnnotation(binop)) {
+ return AsmType::Signed();
+ }
+ FAIL(statement, "Invalid return type annotation.");
+ }
+
+ if (auto* call = ret_expr->AsCall()) {
+ if (IsCallToFround(call)) {
+ return AsmType::Float();
+ }
+ FAIL(statement, "Invalid function call in return statement.");
+ }
+
+ if (auto* literal = ret_expr->AsLiteral()) {
+ int32_t _;
+ if (literal->raw_value()->ContainsDot()) {
+ return AsmType::Double();
+ } else if (literal->value()->ToInt32(&_)) {
+ return AsmType::Signed();
+ } else if (literal->IsUndefinedLiteral()) {
+ // *VIOLATION* The parser changes
+ //
+ // return;
+ //
+ // into
+ //
+ // return undefined
+ return AsmType::Void();
+ }
+ FAIL(statement, "Invalid literal in return statement.");
+ }
+
+ FAIL(statement, "Invalid return type expression.");
+}
+
+// 5.4 VariableTypeAnnotations
+// Also used for 5.5 GlobalVariableTypeAnnotations
+AsmType* AsmTyper::VariableTypeAnnotations(Expression* initializer) {
+ if (auto* literal = initializer->AsLiteral()) {
+ if (literal->raw_value()->ContainsDot()) {
+ SetTypeOf(initializer, AsmType::Double());
+ return AsmType::Double();
+ }
+ int32_t i32;
+ uint32_t u32;
+ if (literal->value()->ToUint32(&u32)) {
+ if (u32 > LargestFixNum) {
+ SetTypeOf(initializer, AsmType::Unsigned());
+ } else {
+ SetTypeOf(initializer, AsmType::FixNum());
+ }
+ } else if (literal->value()->ToInt32(&i32)) {
+ SetTypeOf(initializer, AsmType::Signed());
+ } else {
+ FAIL(initializer, "Invalid type annotation - forbidden literal.");
+ }
+ return AsmType::Int();
+ }
+
+ auto* call = initializer->AsCall();
+ if (call == nullptr) {
+ FAIL(initializer,
+ "Invalid variable initialization - it should be a literal, or "
+ "fround(literal).");
+ }
+
+ if (!IsCallToFround(call)) {
+ FAIL(initializer,
+ "Invalid float coercion - expected call fround(literal).");
+ }
+
+ auto* src_expr = call->arguments()->at(0)->AsLiteral();
+ if (src_expr == nullptr) {
+ FAIL(initializer,
+ "Invalid float type annotation - expected literal argument for call "
+ "to fround.");
+ }
+
+ if (!src_expr->raw_value()->ContainsDot()) {
+ FAIL(initializer,
+ "Invalid float type annotation - expected literal argument to be a "
+ "floating point literal.");
+ }
+
+ return AsmType::Float();
+}
+
+// 5.5 GlobalVariableTypeAnnotations
+AsmType* AsmTyper::NewHeapView(CallNew* new_heap_view) {
+ auto* heap_type = new_heap_view->expression()->AsProperty();
+ if (heap_type == nullptr) {
+ FAIL(new_heap_view, "Invalid type after new.");
+ }
+ auto* heap_view_info = ImportLookup(heap_type);
+
+ if (heap_view_info == nullptr) {
+ FAIL(new_heap_view, "Unknown stdlib member in heap view declaration.");
+ }
+
+ if (!heap_view_info->type()->IsA(AsmType::Heap())) {
+ FAIL(new_heap_view, "Type is not a heap view type.");
+ }
+
+ if (new_heap_view->arguments()->length() != 1) {
+ FAIL(new_heap_view, "Invalid number of arguments when creating heap view.");
+ }
+
+ auto* heap = new_heap_view->arguments()->at(0);
+ auto* heap_var_proxy = heap->AsVariableProxy();
+
+ if (heap_var_proxy == nullptr) {
+ FAIL(heap,
+ "Heap view creation parameter should be the module's heap parameter.");
+ }
+
+ auto* heap_var_info = Lookup(heap_var_proxy->var());
+
+ if (heap_var_info == nullptr) {
+ FAIL(heap, "Undeclared identifier instead of heap parameter.");
+ }
+
+ if (!heap_var_info->IsHeap()) {
+ FAIL(heap,
+ "Heap view creation parameter should be the module's heap parameter.");
+ }
+
+ DCHECK(heap_view_info->type()->IsA(AsmType::Heap()));
+ return heap_view_info->type();
+}
+
+bool IsValidAsm(Isolate* isolate, Zone* zone, Script* script,
+ FunctionLiteral* root, std::string* error_message) {
+ error_message->clear();
+
+ AsmTyper typer(isolate, zone, script, root);
+ if (typer.Validate()) {
+ return true;
+ }
+
+ *error_message = typer.error_message();
+ return false;
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/asmjs/asm-typer.h b/deps/v8/src/asmjs/asm-typer.h
new file mode 100644
index 0000000000..6b9c70cf00
--- /dev/null
+++ b/deps/v8/src/asmjs/asm-typer.h
@@ -0,0 +1,337 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SRC_ASMJS_ASM_TYPER_H_
+#define SRC_ASMJS_ASM_TYPER_H_
+
+#include <cstdint>
+#include <string>
+#include <unordered_set>
+
+#include "src/allocation.h"
+#include "src/asmjs/asm-types.h"
+#include "src/ast/ast-type-bounds.h"
+#include "src/ast/ast.h"
+#include "src/effects.h"
+#include "src/type-info.h"
+#include "src/types.h"
+#include "src/zone-containers.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class AsmType;
+class AsmTyperHarnessBuilder;
+
+class AsmTyper final {
+ public:
+ enum StandardMember {
+ kHeap = -4,
+ kFFI = -3,
+ kStdlib = -2,
+ kModule = -1,
+ kNone = 0,
+ kInfinity,
+ kNaN,
+ kMathAcos,
+ kMathAsin,
+ kMathAtan,
+ kMathCos,
+ kMathSin,
+ kMathTan,
+ kMathExp,
+ kMathLog,
+ kMathCeil,
+ kMathFloor,
+ kMathSqrt,
+ kMathAbs,
+ kMathClz32,
+ kMathMin,
+ kMathMax,
+ kMathAtan2,
+ kMathPow,
+ kMathImul,
+ kMathFround,
+ kMathE,
+ kMathLN10,
+ kMathLN2,
+ kMathLOG2E,
+ kMathLOG10E,
+ kMathPI,
+ kMathSQRT1_2,
+ kMathSQRT2,
+ };
+
+ ~AsmTyper() = default;
+ AsmTyper(Isolate* isolate, Zone* zone, Script* script, FunctionLiteral* root);
+
+ bool Validate();
+
+ const char* error_message() const { return error_message_; }
+
+ AsmType* TypeOf(AstNode* node) const;
+ StandardMember VariableAsStandardMember(Variable* var);
+
+ typedef std::unordered_set<StandardMember, std::hash<int> > StdlibSet;
+
+ StdlibSet StdlibUses() const { return stdlib_uses_; }
+
+ private:
+ friend class v8::internal::wasm::AsmTyperHarnessBuilder;
+
+ class VariableInfo : public ZoneObject {
+ public:
+ enum Mutability {
+ kInvalidMutability,
+ kLocal,
+ kMutableGlobal,
+ kImmutableGlobal,
+ };
+
+ explicit VariableInfo(AsmType* t) : type_(t) {}
+
+ VariableInfo* Clone(Zone* zone) const;
+
+ bool IsMutable() const {
+ return mutability_ == kLocal || mutability_ == kMutableGlobal;
+ }
+
+ bool IsGlobal() const {
+ return mutability_ == kImmutableGlobal || mutability_ == kMutableGlobal;
+ }
+
+ bool IsStdlib() const { return standard_member_ == kStdlib; }
+ bool IsFFI() const { return standard_member_ == kFFI; }
+ bool IsHeap() const { return standard_member_ == kHeap; }
+
+ void MarkDefined() { missing_definition_ = false; }
+ void FirstForwardUseIs(VariableProxy* var);
+
+ StandardMember standard_member() const { return standard_member_; }
+ void set_standard_member(StandardMember standard_member) {
+ standard_member_ = standard_member;
+ }
+
+ AsmType* type() const { return type_; }
+ void set_type(AsmType* type) { type_ = type; }
+
+ Mutability mutability() const { return mutability_; }
+ void set_mutability(Mutability mutability) { mutability_ = mutability; }
+
+ bool missing_definition() const { return missing_definition_; }
+
+ VariableProxy* first_forward_use() const { return first_forward_use_; }
+
+ static VariableInfo* ForSpecialSymbol(Zone* zone,
+ StandardMember standard_member);
+
+ private:
+ AsmType* type_;
+ StandardMember standard_member_ = kNone;
+ Mutability mutability_ = kInvalidMutability;
+ // missing_definition_ is set to true for forward definition - i.e., use
+ // before definition.
+ bool missing_definition_ = false;
+ // first_forward_use_ holds the AST node that first referenced this
+ // VariableInfo. Used for error messages.
+ VariableProxy* first_forward_use_ = nullptr;
+ };
+
+ // RAII-style manager for the in_function_ member variable.
+ struct FunctionScope {
+ explicit FunctionScope(AsmTyper* typer) : typer_(typer) {
+ DCHECK(!typer_->in_function_);
+ typer_->in_function_ = true;
+ typer_->local_scope_.Clear();
+ typer_->return_type_ = AsmType::None();
+ }
+
+ ~FunctionScope() {
+ DCHECK(typer_->in_function_);
+ typer_->in_function_ = false;
+ }
+
+ AsmTyper* typer_;
+ };
+
+ // FlattenedStatements is an iterator class for ZoneList<Statement*> that
+ // flattens the Block construct in the AST. This is here because we need it in
+ // the tests.
+ class FlattenedStatements {
+ public:
+ explicit FlattenedStatements(Zone* zone, ZoneList<Statement*>* s);
+ Statement* Next();
+
+ private:
+ struct Context {
+ explicit Context(ZoneList<Statement*>* s) : statements_(s) {}
+ ZoneList<Statement*>* statements_;
+ int next_index_ = 0;
+ };
+
+ ZoneVector<Context> context_stack_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FlattenedStatements);
+ };
+
+ using ObjectTypeMap = ZoneMap<std::string, VariableInfo*>;
+ void InitializeStdlib();
+ void SetTypeOf(AstNode* node, AsmType* type);
+
+ void AddForwardReference(VariableProxy* proxy, VariableInfo* info);
+ bool AddGlobal(Variable* global, VariableInfo* info);
+ bool AddLocal(Variable* global, VariableInfo* info);
+ // Used for 5.5 GlobalVariableTypeAnnotations
+ VariableInfo* ImportLookup(Property* expr);
+ // 3.3 Environment Lookup
+ // NOTE: In the spec, the lookup function's prototype is
+ //
+ // Lookup(Delta, Gamma, x)
+ //
+ // Delta is the global_scope_ member, and Gamma, local_scope_.
+ VariableInfo* Lookup(Variable* variable);
+
+ // All of the ValidateXXX methods below return AsmType::None() in case of
+ // validation failure.
+
+ // 6.1 ValidateModule
+ AsmType* ValidateModule(FunctionLiteral* fun);
+ AsmType* ValidateGlobalDeclaration(Assignment* assign);
+ // 6.2 ValidateExport
+ AsmType* ExportType(VariableProxy* fun_export);
+ AsmType* ValidateExport(ReturnStatement* exports);
+ // 6.3 ValidateFunctionTable
+ AsmType* ValidateFunctionTable(Assignment* assign);
+ // 6.4 ValidateFunction
+ AsmType* ValidateFunction(FunctionDeclaration* fun_decl);
+ // 6.5 ValidateStatement
+ AsmType* ValidateStatement(Statement* statement);
+ // 6.5.1 BlockStatement
+ AsmType* ValidateBlockStatement(Block* block);
+ // 6.5.2 ExpressionStatement
+ AsmType* ValidateExpressionStatement(ExpressionStatement* expr);
+ // 6.5.3 EmptyStatement
+ AsmType* ValidateEmptyStatement(EmptyStatement* empty);
+ // 6.5.4 IfStatement
+ AsmType* ValidateIfStatement(IfStatement* if_stmt);
+ // 6.5.5 ReturnStatement
+ AsmType* ValidateReturnStatement(ReturnStatement* ret_stmt);
+ // 6.5.6 IterationStatement
+ // 6.5.6.a WhileStatement
+ AsmType* ValidateWhileStatement(WhileStatement* while_stmt);
+ // 6.5.6.b DoWhileStatement
+ AsmType* ValidateDoWhileStatement(DoWhileStatement* do_while);
+ // 6.5.6.c ForStatement
+ AsmType* ValidateForStatement(ForStatement* for_stmt);
+ // 6.5.7 BreakStatement
+ AsmType* ValidateBreakStatement(BreakStatement* brk_stmt);
+ // 6.5.8 ContinueStatement
+ AsmType* ValidateContinueStatement(ContinueStatement* cont_stmt);
+ // 6.5.9 LabelledStatement
+ // NOTE: we don't need to handle these: Labelled statements are
+ // BreakableStatements in our AST, but BreakableStatement is not a concrete
+ // class -- and we're handling all of BreakableStatement's subclasses.
+ // 6.5.10 SwitchStatement
+ AsmType* ValidateSwitchStatement(SwitchStatement* stmt);
+ // 6.6 ValidateCase
+ AsmType* ValidateCase(CaseClause* label, int32_t* case_lbl);
+ // 6.7 ValidateDefault
+ AsmType* ValidateDefault(CaseClause* label);
+ // 6.8 ValidateExpression
+ AsmType* ValidateExpression(Expression* expr);
+ AsmType* ValidateCompareOperation(CompareOperation* cmp);
+ AsmType* ValidateBinaryOperation(BinaryOperation* binop);
+ // 6.8.1 Expression
+ AsmType* ValidateCommaExpression(BinaryOperation* comma);
+ // 6.8.2 NumericLiteral
+ AsmType* ValidateNumericLiteral(Literal* literal);
+ // 6.8.3 Identifier
+ AsmType* ValidateIdentifier(VariableProxy* proxy);
+ // 6.8.4 CallExpression
+ AsmType* ValidateCallExpression(Call* call);
+ // 6.8.5 MemberExpression
+ AsmType* ValidateMemberExpression(Property* prop);
+ // 6.8.6 AssignmentExpression
+ AsmType* ValidateAssignmentExpression(Assignment* assignment);
+ // 6.8.7 UnaryExpression
+ AsmType* ValidateUnaryExpression(UnaryOperation* unop);
+ // 6.8.8 MultiplicativeExpression
+ AsmType* ValidateMultiplicativeExpression(BinaryOperation* binop);
+ // 6.8.9 AdditiveExpression
+ AsmType* ValidateAdditiveExpression(BinaryOperation* binop,
+ uint32_t intish_count);
+ // 6.8.10 ShiftExpression
+ AsmType* ValidateShiftExpression(BinaryOperation* binop);
+ // 6.8.11 RelationalExpression
+ AsmType* ValidateRelationalExpression(CompareOperation* cmpop);
+ // 6.8.12 EqualityExpression
+ AsmType* ValidateEqualityExpression(CompareOperation* cmpop);
+ // 6.8.13 BitwiseANDExpression
+ AsmType* ValidateBitwiseANDExpression(BinaryOperation* binop);
+ // 6.8.14 BitwiseXORExpression
+ AsmType* ValidateBitwiseXORExpression(BinaryOperation* binop);
+ // 6.8.15 BitwiseORExpression
+ AsmType* ValidateBitwiseORExpression(BinaryOperation* binop);
+ // 6.8.16 ConditionalExpression
+ AsmType* ValidateConditionalExpression(Conditional* cond);
+ // 6.9 ValidateCall
+ AsmType* ValidateCall(AsmType* return_type, Call* call);
+ // 6.10 ValidateHeapAccess
+ enum HeapAccessType { LoadFromHeap, StoreToHeap };
+ AsmType* ValidateHeapAccess(Property* heap, HeapAccessType access_type);
+ // 6.11 ValidateFloatCoercion
+ bool IsCallToFround(Call* call);
+ AsmType* ValidateFloatCoercion(Call* call);
+
+ // 5.1 ParameterTypeAnnotations
+ AsmType* ParameterTypeAnnotations(Variable* parameter,
+ Expression* annotation);
+ // 5.2 ReturnTypeAnnotations
+ AsmType* ReturnTypeAnnotations(ReturnStatement* statement);
+ // 5.4 VariableTypeAnnotations
+ AsmType* VariableTypeAnnotations(Expression* initializer);
+ // 5.5 GlobalVariableTypeAnnotations
+ AsmType* ImportExpression(Property* import);
+ AsmType* NewHeapView(CallNew* new_heap_view);
+
+ Isolate* isolate_;
+ Zone* zone_;
+ Script* script_;
+ FunctionLiteral* root_;
+ bool in_function_ = false;
+
+ AsmType* return_type_ = nullptr;
+
+ ZoneVector<VariableInfo*> forward_definitions_;
+ ObjectTypeMap stdlib_types_;
+ ObjectTypeMap stdlib_math_types_;
+
+ // The ASM module name. This member is used to prevent globals from redefining
+ // the module name.
+ VariableInfo* module_info_;
+ Handle<String> module_name_;
+
+ // 3 Environments
+ ZoneHashMap global_scope_; // 3.1 Global environment
+ ZoneHashMap local_scope_; // 3.2 Variable environment
+
+ std::uintptr_t stack_limit_;
+ bool stack_overflow_ = false;
+ ZoneMap<AstNode*, AsmType*> node_types_;
+ static const int kErrorMessageLimit = 100;
+ AsmType* fround_type_;
+ AsmType* ffi_type_;
+ char error_message_[kErrorMessageLimit];
+ StdlibSet stdlib_uses_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AsmTyper);
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // SRC_ASMJS_ASM_TYPER_H_
diff --git a/deps/v8/src/asmjs/asm-types.cc b/deps/v8/src/asmjs/asm-types.cc
new file mode 100644
index 0000000000..8f3c9a51e6
--- /dev/null
+++ b/deps/v8/src/asmjs/asm-types.cc
@@ -0,0 +1,354 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/asmjs/asm-types.h"
+
+#include <cinttypes>
+
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+AsmCallableType* AsmType::AsCallableType() {
+ if (AsValueType() != nullptr) {
+ return nullptr;
+ }
+
+ return reinterpret_cast<AsmCallableType*>(this);
+}
+
+std::string AsmType::Name() {
+ AsmValueType* avt = this->AsValueType();
+ if (avt != nullptr) {
+ switch (avt->Bitset()) {
+#define RETURN_TYPE_NAME(CamelName, string_name, number, parent_types) \
+ case AsmValueType::kAsm##CamelName: \
+ return string_name;
+ FOR_EACH_ASM_VALUE_TYPE_LIST(RETURN_TYPE_NAME)
+#undef RETURN_TYPE_NAME
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ return this->AsCallableType()->Name();
+}
+
+bool AsmType::IsExactly(AsmType* that) {
+ // TODO(jpp): maybe this can become this == that.
+ AsmValueType* avt = this->AsValueType();
+ if (avt != nullptr) {
+ AsmValueType* tavt = that->AsValueType();
+ if (tavt == nullptr) {
+ return false;
+ }
+ return avt->Bitset() == tavt->Bitset();
+ }
+
+ // TODO(jpp): is it useful to allow non-value types to be tested with
+ // IsExactly?
+ return that == this;
+}
+
+bool AsmType::IsA(AsmType* that) {
+ // IsA is used for querying inheritance relationships. Therefore it is only
+ // meaningful for basic types.
+ if (auto* avt = this->AsValueType()) {
+ if (auto* tavt = that->AsValueType()) {
+ return (avt->Bitset() & tavt->Bitset()) == tavt->Bitset();
+ }
+ return false;
+ }
+
+ if (auto* as_callable = this->AsCallableType()) {
+ return as_callable->IsA(that);
+ }
+
+ UNREACHABLE();
+ return that == this;
+}
+
+int32_t AsmType::ElementSizeInBytes() {
+ auto* value = AsValueType();
+ if (value == nullptr) {
+ return AsmType::kNotHeapType;
+ }
+ switch (value->Bitset()) {
+ case AsmValueType::kAsmInt8Array:
+ case AsmValueType::kAsmUint8Array:
+ return 1;
+ case AsmValueType::kAsmInt16Array:
+ case AsmValueType::kAsmUint16Array:
+ return 2;
+ case AsmValueType::kAsmInt32Array:
+ case AsmValueType::kAsmUint32Array:
+ case AsmValueType::kAsmFloat32Array:
+ return 4;
+ case AsmValueType::kAsmFloat64Array:
+ return 8;
+ default:
+ return AsmType::kNotHeapType;
+ }
+}
+
+AsmType* AsmType::LoadType() {
+ auto* value = AsValueType();
+ if (value == nullptr) {
+ return AsmType::None();
+ }
+ switch (value->Bitset()) {
+ case AsmValueType::kAsmInt8Array:
+ case AsmValueType::kAsmUint8Array:
+ case AsmValueType::kAsmInt16Array:
+ case AsmValueType::kAsmUint16Array:
+ case AsmValueType::kAsmInt32Array:
+ case AsmValueType::kAsmUint32Array:
+ return AsmType::Intish();
+ case AsmValueType::kAsmFloat32Array:
+ return AsmType::FloatQ();
+ case AsmValueType::kAsmFloat64Array:
+ return AsmType::DoubleQ();
+ default:
+ return AsmType::None();
+ }
+}
+
+AsmType* AsmType::StoreType() {
+ auto* value = AsValueType();
+ if (value == nullptr) {
+ return AsmType::None();
+ }
+ switch (value->Bitset()) {
+ case AsmValueType::kAsmInt8Array:
+ case AsmValueType::kAsmUint8Array:
+ case AsmValueType::kAsmInt16Array:
+ case AsmValueType::kAsmUint16Array:
+ case AsmValueType::kAsmInt32Array:
+ case AsmValueType::kAsmUint32Array:
+ return AsmType::Intish();
+ case AsmValueType::kAsmFloat32Array:
+ return AsmType::FloatishDoubleQ();
+ case AsmValueType::kAsmFloat64Array:
+ return AsmType::FloatQDoubleQ();
+ default:
+ return AsmType::None();
+ }
+}
+
+bool AsmCallableType::IsA(AsmType* other) {
+ return other->AsCallableType() == this;
+}
+
+std::string AsmFunctionType::Name() {
+ std::string ret;
+ ret += "(";
+ for (size_t ii = 0; ii < args_.size(); ++ii) {
+ ret += args_[ii]->Name();
+ if (ii != args_.size() - 1) {
+ ret += ", ";
+ }
+ }
+ ret += ") -> ";
+ ret += return_type_->Name();
+ return ret;
+}
+
+namespace {
+class AsmFroundType final : public AsmCallableType {
+ public:
+ friend AsmType;
+
+ AsmFroundType() : AsmCallableType() {}
+
+ bool CanBeInvokedWith(AsmType* return_type,
+ const ZoneVector<AsmType*>& args) override;
+
+ std::string Name() override { return "fround"; }
+};
+} // namespace
+
+AsmType* AsmType::FroundType(Zone* zone) {
+ auto* Fround = new (zone) AsmFroundType();
+ return reinterpret_cast<AsmType*>(Fround);
+}
+
+bool AsmFroundType::CanBeInvokedWith(AsmType* return_type,
+ const ZoneVector<AsmType*>& args) {
+ if (args.size() != 1) {
+ return false;
+ }
+
+ auto* arg = args[0];
+ if (!arg->IsA(AsmType::Floatish()) && !arg->IsA(AsmType::DoubleQ()) &&
+ !arg->IsA(AsmType::Signed()) && !arg->IsA(AsmType::Unsigned())) {
+ return false;
+ }
+
+ return true;
+}
+
+namespace {
+class AsmMinMaxType final : public AsmCallableType {
+ private:
+ friend AsmType;
+
+ AsmMinMaxType(AsmType* dest, AsmType* src)
+ : AsmCallableType(), return_type_(dest), arg_(src) {}
+
+ bool CanBeInvokedWith(AsmType* return_type,
+ const ZoneVector<AsmType*>& args) override {
+ if (!return_type_->IsExactly(return_type)) {
+ return false;
+ }
+
+ if (args.size() < 2) {
+ return false;
+ }
+
+ for (size_t ii = 0; ii < args.size(); ++ii) {
+ if (!args[ii]->IsA(arg_)) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ std::string Name() override {
+ return "(" + arg_->Name() + ", " + arg_->Name() + "...) -> " +
+ return_type_->Name();
+ }
+
+ AsmType* return_type_;
+ AsmType* arg_;
+};
+} // namespace
+
+AsmType* AsmType::MinMaxType(Zone* zone, AsmType* dest, AsmType* src) {
+ DCHECK(dest->AsValueType() != nullptr);
+ DCHECK(src->AsValueType() != nullptr);
+ auto* MinMax = new (zone) AsmMinMaxType(dest, src);
+ return reinterpret_cast<AsmType*>(MinMax);
+}
+
+bool AsmFFIType::CanBeInvokedWith(AsmType* return_type,
+ const ZoneVector<AsmType*>& args) {
+ if (return_type->IsExactly(AsmType::Float())) {
+ return false;
+ }
+
+ for (size_t ii = 0; ii < args.size(); ++ii) {
+ if (!args[ii]->IsA(AsmType::Extern())) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool AsmFunctionType::IsA(AsmType* other) {
+ auto* that = other->AsFunctionType();
+ if (that == nullptr) {
+ return false;
+ }
+ if (!return_type_->IsExactly(that->return_type_)) {
+ return false;
+ }
+
+ if (args_.size() != that->args_.size()) {
+ return false;
+ }
+
+ for (size_t ii = 0; ii < args_.size(); ++ii) {
+ if (!args_[ii]->IsExactly(that->args_[ii])) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool AsmFunctionType::CanBeInvokedWith(AsmType* return_type,
+ const ZoneVector<AsmType*>& args) {
+ if (!return_type_->IsExactly(return_type)) {
+ return false;
+ }
+
+ if (args_.size() != args.size()) {
+ return false;
+ }
+
+ for (size_t ii = 0; ii < args_.size(); ++ii) {
+ if (!args[ii]->IsA(args_[ii])) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+std::string AsmOverloadedFunctionType::Name() {
+ std::string ret;
+
+ for (size_t ii = 0; ii < overloads_.size(); ++ii) {
+ if (ii != 0) {
+ ret += " /\\ ";
+ }
+ ret += overloads_[ii]->Name();
+ }
+
+ return ret;
+}
+
+bool AsmOverloadedFunctionType::CanBeInvokedWith(
+ AsmType* return_type, const ZoneVector<AsmType*>& args) {
+ for (size_t ii = 0; ii < overloads_.size(); ++ii) {
+ if (overloads_[ii]->AsCallableType()->CanBeInvokedWith(return_type, args)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void AsmOverloadedFunctionType::AddOverload(AsmType* overload) {
+ DCHECK(overload->AsCallableType() != nullptr);
+ overloads_.push_back(overload);
+}
+
+AsmFunctionTableType::AsmFunctionTableType(size_t length, AsmType* signature)
+ : length_(length), signature_(signature) {
+ DCHECK(signature_ != nullptr);
+ DCHECK(signature_->AsFunctionType() != nullptr);
+}
+
+namespace {
+// ToString is used for reporting function tables' names. It converts its
+// argument to uint32_t because asm.js integers are 32-bits, thus effectively
+// limiting the max function table's length.
+std::string ToString(size_t s) {
+ auto u32 = static_cast<uint32_t>(s);
+ // 16 bytes is more than enough to represent a 32-bit integer as a base 10
+ // string.
+ char digits[16];
+ int length = base::OS::SNPrintF(digits, arraysize(digits), "%" PRIu32, u32);
+ DCHECK_NE(length, -1);
+ return std::string(digits, length);
+}
+} // namespace
+
+std::string AsmFunctionTableType::Name() {
+ return "(" + signature_->Name() + ")[" + ToString(length_) + "]";
+}
+
+bool AsmFunctionTableType::CanBeInvokedWith(AsmType* return_type,
+ const ZoneVector<AsmType*>& args) {
+ return signature_->AsCallableType()->CanBeInvokedWith(return_type, args);
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/asmjs/asm-types.h b/deps/v8/src/asmjs/asm-types.h
new file mode 100644
index 0000000000..c307bf534b
--- /dev/null
+++ b/deps/v8/src/asmjs/asm-types.h
@@ -0,0 +1,347 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SRC_ASMJS_ASM_TYPES_H_
+#define SRC_ASMJS_ASM_TYPES_H_
+
+#include <string>
+
+#include "src/base/macros.h"
+#include "src/zone-containers.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class AsmType;
+class AsmFFIType;
+class AsmFunctionType;
+class AsmOverloadedFunctionType;
+class AsmFunctionTableType;
+
+// List of V(CamelName, string_name, number, parent_types)
+#define FOR_EACH_ASM_VALUE_TYPE_LIST(V) \
+ /* These tags are not types that are expressable in the asm source. They */ \
+ /* are used to express semantic information about the types they tag. */ \
+ V(Heap, "[]", 1, 0) \
+ V(FloatishDoubleQ, "floatish|double?", 2, 0) \
+ V(FloatQDoubleQ, "float?|double?", 3, 0) \
+ /* The following are actual types that appear in the asm source. */ \
+ V(Void, "void", 4, 0) \
+ V(Extern, "extern", 5, 0) \
+ V(DoubleQ, "double?", 6, kAsmFloatishDoubleQ | kAsmFloatQDoubleQ) \
+ V(Double, "double", 7, kAsmDoubleQ | kAsmExtern) \
+ V(Intish, "intish", 8, 0) \
+ V(Int, "int", 9, kAsmIntish) \
+ V(Signed, "signed", 10, kAsmInt | kAsmExtern) \
+ V(Unsigned, "unsigned", 11, kAsmInt) \
+ V(FixNum, "fixnum", 12, kAsmSigned | kAsmUnsigned) \
+ V(Floatish, "floatish", 13, kAsmFloatishDoubleQ) \
+ V(FloatQ, "float?", 14, kAsmFloatQDoubleQ | kAsmFloatish) \
+ V(Float, "float", 15, kAsmFloatQ) \
+ /* Types used for expressing the Heap accesses. */ \
+ V(Uint8Array, "Uint8Array", 16, kAsmHeap) \
+ V(Int8Array, "Int8Array", 17, kAsmHeap) \
+ V(Uint16Array, "Uint16Array", 18, kAsmHeap) \
+ V(Int16Array, "Int16Array", 19, kAsmHeap) \
+ V(Uint32Array, "Uint32Array", 20, kAsmHeap) \
+ V(Int32Array, "Int32Array", 21, kAsmHeap) \
+ V(Float32Array, "Float32Array", 22, kAsmHeap) \
+ V(Float64Array, "Float64Array", 23, kAsmHeap) \
+ /* None is used to represent errors in the type checker. */ \
+ V(None, "<none>", 31, 0)
+
+// List of V(CamelName)
+#define FOR_EACH_ASM_CALLABLE_TYPE_LIST(V) \
+ V(FunctionType) \
+ V(FFIType) \
+ V(OverloadedFunctionType) \
+ V(FunctionTableType)
+
+class AsmValueType {
+ public:
+ typedef uint32_t bitset_t;
+
+ enum : uint32_t {
+#define DEFINE_TAG(CamelName, string_name, number, parent_types) \
+ kAsm##CamelName = ((1u << (number)) | (parent_types)),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(DEFINE_TAG)
+#undef DEFINE_TAG
+ kAsmUnknown = 0,
+ kAsmValueTypeTag = 1u
+ };
+
+ private:
+ friend class AsmType;
+
+ static AsmValueType* AsValueType(AsmType* type) {
+ if ((reinterpret_cast<uintptr_t>(type) & kAsmValueTypeTag) ==
+ kAsmValueTypeTag) {
+ return reinterpret_cast<AsmValueType*>(type);
+ }
+ return nullptr;
+ }
+
+ bitset_t Bitset() const {
+ DCHECK((reinterpret_cast<uintptr_t>(this) & kAsmValueTypeTag) ==
+ kAsmValueTypeTag);
+ return static_cast<bitset_t>(reinterpret_cast<uintptr_t>(this) &
+ ~kAsmValueTypeTag);
+ }
+
+ static AsmType* New(bitset_t bits) {
+ DCHECK_EQ((bits & kAsmValueTypeTag), 0);
+ return reinterpret_cast<AsmType*>(
+ static_cast<uintptr_t>(bits | kAsmValueTypeTag));
+ }
+
+ // AsmValueTypes can't be created except through AsmValueType::New.
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AsmValueType);
+};
+
+class AsmCallableType : public ZoneObject {
+ public:
+ virtual std::string Name() = 0;
+
+ virtual bool CanBeInvokedWith(AsmType* return_type,
+ const ZoneVector<AsmType*>& args) = 0;
+
+#define DECLARE_CAST(CamelName) \
+ virtual Asm##CamelName* As##CamelName() { return nullptr; }
+ FOR_EACH_ASM_CALLABLE_TYPE_LIST(DECLARE_CAST)
+#undef DECLARE_CAST
+
+ protected:
+ AsmCallableType() = default;
+ virtual ~AsmCallableType() = default;
+ virtual bool IsA(AsmType* other);
+
+ private:
+ friend class AsmType;
+
+ DISALLOW_COPY_AND_ASSIGN(AsmCallableType);
+};
+
+class AsmFunctionType final : public AsmCallableType {
+ public:
+ AsmFunctionType* AsFunctionType() final { return this; }
+
+ void AddArgument(AsmType* type) { args_.push_back(type); }
+ const ZoneVector<AsmType*> Arguments() const { return args_; }
+ AsmType* ReturnType() const { return return_type_; }
+
+ bool CanBeInvokedWith(AsmType* return_type,
+ const ZoneVector<AsmType*>& args) override;
+
+ protected:
+ AsmFunctionType(Zone* zone, AsmType* return_type)
+ : return_type_(return_type), args_(zone) {}
+
+ private:
+ friend AsmType;
+
+ std::string Name() override;
+ bool IsA(AsmType* other) override;
+
+ AsmType* return_type_;
+ ZoneVector<AsmType*> args_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsmFunctionType);
+};
+
+class AsmOverloadedFunctionType final : public AsmCallableType {
+ public:
+ AsmOverloadedFunctionType* AsOverloadedFunctionType() override {
+ return this;
+ }
+
+ void AddOverload(AsmType* overload);
+
+ private:
+ friend AsmType;
+
+ explicit AsmOverloadedFunctionType(Zone* zone) : overloads_(zone) {}
+
+ std::string Name() override;
+ bool CanBeInvokedWith(AsmType* return_type,
+ const ZoneVector<AsmType*>& args) override;
+
+ ZoneVector<AsmType*> overloads_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AsmOverloadedFunctionType);
+};
+
+class AsmFFIType final : public AsmCallableType {
+ public:
+ AsmFFIType* AsFFIType() override { return this; }
+
+ std::string Name() override { return "Function"; }
+ bool CanBeInvokedWith(AsmType* return_type,
+ const ZoneVector<AsmType*>& args) override;
+
+ private:
+ friend AsmType;
+
+ AsmFFIType() = default;
+
+ DISALLOW_COPY_AND_ASSIGN(AsmFFIType);
+};
+
+class AsmFunctionTableType : public AsmCallableType {
+ public:
+ AsmFunctionTableType* AsFunctionTableType() override { return this; }
+
+ std::string Name() override;
+
+ bool CanBeInvokedWith(AsmType* return_type,
+ const ZoneVector<AsmType*>& args) override;
+
+ size_t length() const { return length_; }
+ AsmType* signature() { return signature_; }
+
+ private:
+ friend class AsmType;
+
+ AsmFunctionTableType(size_t length, AsmType* signature);
+
+ size_t length_;
+ AsmType* signature_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AsmFunctionTableType);
+};
+
+class AsmType {
+ public:
+#define DEFINE_CONSTRUCTOR(CamelName, string_name, number, parent_types) \
+ static AsmType* CamelName() { \
+ return AsmValueType::New(AsmValueType::kAsm##CamelName); \
+ }
+ FOR_EACH_ASM_VALUE_TYPE_LIST(DEFINE_CONSTRUCTOR)
+#undef DEFINE_CONSTRUCTOR
+
+#define DEFINE_CAST(CamelCase) \
+ Asm##CamelCase* As##CamelCase() { \
+ if (AsValueType() != nullptr) { \
+ return nullptr; \
+ } \
+ return reinterpret_cast<AsmCallableType*>(this)->As##CamelCase(); \
+ }
+ FOR_EACH_ASM_CALLABLE_TYPE_LIST(DEFINE_CAST)
+#undef DEFINE_CAST
+ AsmValueType* AsValueType() { return AsmValueType::AsValueType(this); }
+ AsmCallableType* AsCallableType();
+
+ // A function returning ret. Callers still need to invoke AddArgument with the
+ // returned type to fully create this type.
+ static AsmType* Function(Zone* zone, AsmType* ret) {
+ AsmFunctionType* f = new (zone) AsmFunctionType(zone, ret);
+ return reinterpret_cast<AsmType*>(f);
+ }
+
+ // Overloaded function types. Not creatable by asm source, but useful to
+ // represent the overloaded stdlib functions.
+ static AsmType* OverloadedFunction(Zone* zone) {
+ auto* f = new (zone) AsmOverloadedFunctionType(zone);
+ return reinterpret_cast<AsmType*>(f);
+ }
+
+ // The type for fround(src).
+ static AsmType* FroundType(Zone* zone);
+
+ // The (variadic) type for min and max.
+ static AsmType* MinMaxType(Zone* zone, AsmType* dest, AsmType* src);
+
+ // The type for foreign functions.
+ static AsmType* FFIType(Zone* zone) {
+ auto* f = new (zone) AsmFFIType();
+ return reinterpret_cast<AsmType*>(f);
+ }
+
+ // The type for function tables.
+ static AsmType* FunctionTableType(Zone* zone, size_t length,
+ AsmType* signature) {
+ auto* f = new (zone) AsmFunctionTableType(length, signature);
+ return reinterpret_cast<AsmType*>(f);
+ }
+
+ std::string Name();
+ // IsExactly returns true if this is the exact same type as that. For
+ // non-value types (e.g., callables), this returns this == that.
+ bool IsExactly(AsmType* that);
+ // IsA is used to query whether this is an instance of that (i.e., if this is
+ // a type derived from that.) For non-value types (e.g., callables), this
+ // returns this == that.
+ bool IsA(AsmType* that);
+
+ // Types allowed in return statements. void is the type for returns without
+ // an expression.
+ bool IsReturnType() {
+ return this == AsmType::Void() || this == AsmType::Double() ||
+ this == AsmType::Signed() || this == AsmType::Float();
+ }
+
+ // Converts this to the corresponding valid argument type.
+ AsmType* ToReturnType() {
+ if (this->IsA(AsmType::Signed())) {
+ return AsmType::Signed();
+ }
+ if (this->IsA(AsmType::Double())) {
+ return AsmType::Double();
+ }
+ if (this->IsA(AsmType::Float())) {
+ return AsmType::Float();
+ }
+ if (this->IsA(AsmType::Void())) {
+ return AsmType::Void();
+ }
+ return AsmType::None();
+ }
+
+ // Types allowed to be parameters in asm functions.
+ bool IsParameterType() {
+ return this == AsmType::Double() || this == AsmType::Int() ||
+ this == AsmType::Float();
+ }
+
+ // Converts this to the corresponding valid argument type.
+ AsmType* ToParameterType() {
+ if (this->IsA(AsmType::Int())) {
+ return AsmType::Int();
+ }
+ if (this->IsA(AsmType::Double())) {
+ return AsmType::Double();
+ }
+ if (this->IsA(AsmType::Float())) {
+ return AsmType::Float();
+ }
+ return AsmType::None();
+ }
+
+ // Types allowed to be compared using the comparison operators.
+ bool IsComparableType() {
+ return this == AsmType::Double() || this == AsmType::Signed() ||
+ this == AsmType::Unsigned() || this == AsmType::Float();
+ }
+
+ // The following methods are meant to be used for inspecting the traits of
+ // element types for the heap view types.
+ enum : int32_t { kNotHeapType = -1 };
+
+ // Returns the element size if this is a heap type. Otherwise returns
+ // kNotHeapType.
+ int32_t ElementSizeInBytes();
+ // Returns the load type if this is a heap type. AsmType::None is returned if
+ // this is not a heap type.
+ AsmType* LoadType();
+ // Returns the store type if this is a heap type. AsmType::None is returned if
+ // this is not a heap type.
+ AsmType* StoreType();
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // SRC_ASMJS_ASM_TYPES_H_
diff --git a/deps/v8/src/wasm/asm-wasm-builder.cc b/deps/v8/src/asmjs/asm-wasm-builder.cc
index d16d3a8bdd..6419459307 100644
--- a/deps/v8/src/wasm/asm-wasm-builder.cc
+++ b/deps/v8/src/asmjs/asm-wasm-builder.cc
@@ -10,14 +10,15 @@
#endif
#include <math.h>
-#include "src/wasm/asm-wasm-builder.h"
+#include "src/asmjs/asm-types.h"
+#include "src/asmjs/asm-wasm-builder.h"
+#include "src/wasm/switch-logic.h"
#include "src/wasm/wasm-macro-gen.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/codegen.h"
-#include "src/type-cache.h"
namespace v8 {
namespace internal {
@@ -30,35 +31,40 @@ namespace wasm {
if (HasStackOverflow()) return; \
} while (false)
+enum AsmScope { kModuleScope, kInitScope, kFuncScope, kExportScope };
-class AsmWasmBuilderImpl : public AstVisitor {
+struct ForeignVariable {
+ Handle<Name> name;
+ Variable* var;
+ LocalType type;
+};
+
+class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
public:
AsmWasmBuilderImpl(Isolate* isolate, Zone* zone, FunctionLiteral* literal,
- Handle<Object> foreign, AsmTyper* typer)
- : local_variables_(HashMap::PointersMatch,
+ AsmTyper* typer)
+ : local_variables_(base::HashMap::PointersMatch,
ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(zone)),
- functions_(HashMap::PointersMatch, ZoneHashMap::kDefaultHashMapCapacity,
+ functions_(base::HashMap::PointersMatch,
+ ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(zone)),
- global_variables_(HashMap::PointersMatch,
+ global_variables_(base::HashMap::PointersMatch,
ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(zone)),
- in_function_(false),
- is_set_op_(false),
- marking_exported(false),
+ scope_(kModuleScope),
builder_(new (zone) WasmModuleBuilder(zone)),
current_function_builder_(nullptr),
literal_(literal),
isolate_(isolate),
zone_(zone),
- foreign_(foreign),
typer_(typer),
- cache_(TypeCache::Get()),
breakable_blocks_(zone),
- block_size_(0),
+ foreign_variables_(zone),
init_function_index_(0),
+ foreign_init_function_index_(0),
next_table_index_(0),
- function_tables_(HashMap::PointersMatch,
+ function_tables_(base::HashMap::PointersMatch,
ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(zone)),
imported_function_table_(this) {
@@ -67,38 +73,73 @@ class AsmWasmBuilderImpl : public AstVisitor {
void InitializeInitFunction() {
init_function_index_ = builder_->AddFunction();
+ FunctionSig::Builder b(zone(), 0, 0);
current_function_builder_ = builder_->FunctionAt(init_function_index_);
- current_function_builder_->ReturnType(kAstStmt);
+ current_function_builder_->SetSignature(b.Build());
builder_->MarkStartFunction(init_function_index_);
current_function_builder_ = nullptr;
}
- void Compile() {
+ void BuildForeignInitFunction() {
+ foreign_init_function_index_ = builder_->AddFunction();
+ FunctionSig::Builder b(zone(), 0, foreign_variables_.size());
+ for (auto i = foreign_variables_.begin(); i != foreign_variables_.end();
+ ++i) {
+ b.AddParam(i->type);
+ }
+ current_function_builder_ =
+ builder_->FunctionAt(foreign_init_function_index_);
+ current_function_builder_->SetExported();
+ std::string raw_name = "__foreign_init__";
+ current_function_builder_->SetName(raw_name.data(),
+ static_cast<int>(raw_name.size()));
+ current_function_builder_->SetSignature(b.Build());
+ for (size_t pos = 0; pos < foreign_variables_.size(); ++pos) {
+ current_function_builder_->EmitGetLocal(static_cast<uint32_t>(pos));
+ ForeignVariable* fv = &foreign_variables_[pos];
+ uint32_t index = LookupOrInsertGlobal(fv->var, fv->type);
+ current_function_builder_->EmitWithVarInt(kExprSetGlobal, index);
+ }
+ current_function_builder_ = nullptr;
+ }
+
+ i::Handle<i::FixedArray> GetForeignArgs() {
+ i::Handle<FixedArray> ret = isolate_->factory()->NewFixedArray(
+ static_cast<int>(foreign_variables_.size()));
+ for (size_t i = 0; i < foreign_variables_.size(); ++i) {
+ ForeignVariable* fv = &foreign_variables_[i];
+ ret->set(static_cast<int>(i), *fv->name);
+ }
+ return ret;
+ }
+
+ void Build() {
InitializeInitFunction();
RECURSE(VisitFunctionLiteral(literal_));
+ BuildForeignInitFunction();
}
void VisitVariableDeclaration(VariableDeclaration* decl) {}
void VisitFunctionDeclaration(FunctionDeclaration* decl) {
- DCHECK(!in_function_);
+ DCHECK_EQ(kModuleScope, scope_);
DCHECK_NULL(current_function_builder_);
- uint16_t index = LookupOrInsertFunction(decl->proxy()->var());
+ uint32_t index = LookupOrInsertFunction(decl->proxy()->var());
current_function_builder_ = builder_->FunctionAt(index);
- in_function_ = true;
+ scope_ = kFuncScope;
RECURSE(Visit(decl->fun()));
- in_function_ = false;
+ scope_ = kModuleScope;
current_function_builder_ = nullptr;
local_variables_.Clear();
}
- void VisitImportDeclaration(ImportDeclaration* decl) {}
-
- void VisitExportDeclaration(ExportDeclaration* decl) {}
-
void VisitStatements(ZoneList<Statement*>* stmts) {
for (int i = 0; i < stmts->length(); ++i) {
Statement* stmt = stmts->at(i);
+ ExpressionStatement* e = stmt->AsExpressionStatement();
+ if (e != nullptr && e->expression()->IsUndefinedLiteral()) {
+ continue;
+ }
RECURSE(Visit(stmt));
if (stmt->IsJump()) break;
}
@@ -115,12 +156,10 @@ class AsmWasmBuilderImpl : public AstVisitor {
}
}
}
- if (in_function_) {
+ if (scope_ == kFuncScope) {
BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock,
- false,
- static_cast<byte>(stmt->statements()->length()));
+ false);
RECURSE(VisitStatements(stmt->statements()));
- DCHECK(block_size_ >= 0);
} else {
RECURSE(VisitStatements(stmt->statements()));
}
@@ -128,25 +167,17 @@ class AsmWasmBuilderImpl : public AstVisitor {
class BlockVisitor {
private:
- int prev_block_size_;
- uint32_t index_;
AsmWasmBuilderImpl* builder_;
public:
BlockVisitor(AsmWasmBuilderImpl* builder, BreakableStatement* stmt,
- WasmOpcode opcode, bool is_loop, int initial_block_size)
+ WasmOpcode opcode, bool is_loop)
: builder_(builder) {
builder_->breakable_blocks_.push_back(std::make_pair(stmt, is_loop));
builder_->current_function_builder_->Emit(opcode);
- index_ =
- builder_->current_function_builder_->EmitEditableVarIntImmediate();
- prev_block_size_ = builder_->block_size_;
- builder_->block_size_ = initial_block_size;
}
~BlockVisitor() {
- builder_->current_function_builder_->EditVarIntImmediate(
- index_, builder_->block_size_);
- builder_->block_size_ = prev_block_size_;
+ builder_->current_function_builder_->Emit(kExprEnd);
builder_->breakable_blocks_.pop_back();
}
};
@@ -160,25 +191,24 @@ class AsmWasmBuilderImpl : public AstVisitor {
void VisitEmptyParentheses(EmptyParentheses* paren) { UNREACHABLE(); }
void VisitIfStatement(IfStatement* stmt) {
- DCHECK(in_function_);
- if (stmt->HasElseStatement()) {
- current_function_builder_->Emit(kExprIfElse);
- } else {
- current_function_builder_->Emit(kExprIf);
- }
+ DCHECK_EQ(kFuncScope, scope_);
RECURSE(Visit(stmt->condition()));
+ current_function_builder_->Emit(kExprIf);
+ // WASM ifs come with implement blocks for both arms.
+ breakable_blocks_.push_back(std::make_pair(nullptr, false));
if (stmt->HasThenStatement()) {
RECURSE(Visit(stmt->then_statement()));
- } else {
- current_function_builder_->Emit(kExprNop);
}
if (stmt->HasElseStatement()) {
+ current_function_builder_->Emit(kExprElse);
RECURSE(Visit(stmt->else_statement()));
}
+ current_function_builder_->Emit(kExprEnd);
+ breakable_blocks_.pop_back();
}
void VisitContinueStatement(ContinueStatement* stmt) {
- DCHECK(in_function_);
+ DCHECK_EQ(kFuncScope, scope_);
DCHECK_NOT_NULL(stmt->target());
int i = static_cast<int>(breakable_blocks_.size()) - 1;
int block_distance = 0;
@@ -194,12 +224,12 @@ class AsmWasmBuilderImpl : public AstVisitor {
}
}
DCHECK(i >= 0);
- current_function_builder_->EmitWithVarInt(kExprBr, block_distance);
- current_function_builder_->Emit(kExprNop);
+ current_function_builder_->EmitWithU8(kExprBr, ARITY_0);
+ current_function_builder_->EmitVarInt(block_distance);
}
void VisitBreakStatement(BreakStatement* stmt) {
- DCHECK(in_function_);
+ DCHECK_EQ(kFuncScope, scope_);
DCHECK_NOT_NULL(stmt->target());
int i = static_cast<int>(breakable_blocks_.size()) - 1;
int block_distance = 0;
@@ -217,123 +247,189 @@ class AsmWasmBuilderImpl : public AstVisitor {
}
}
DCHECK(i >= 0);
- current_function_builder_->EmitWithVarInt(kExprBr, block_distance);
- current_function_builder_->Emit(kExprNop);
+ current_function_builder_->EmitWithU8(kExprBr, ARITY_0);
+ current_function_builder_->EmitVarInt(block_distance);
}
void VisitReturnStatement(ReturnStatement* stmt) {
- if (in_function_) {
- current_function_builder_->Emit(kExprReturn);
+ if (scope_ == kModuleScope) {
+ scope_ = kExportScope;
+ RECURSE(Visit(stmt->expression()));
+ scope_ = kModuleScope;
+ } else if (scope_ == kFuncScope) {
+ RECURSE(Visit(stmt->expression()));
+ uint8_t arity =
+ TypeOf(stmt->expression()) == kAstStmt ? ARITY_0 : ARITY_1;
+ current_function_builder_->EmitWithU8(kExprReturn, arity);
} else {
- marking_exported = true;
- }
- RECURSE(Visit(stmt->expression()));
- if (!in_function_) {
- marking_exported = false;
+ UNREACHABLE();
}
}
void VisitWithStatement(WithStatement* stmt) { UNREACHABLE(); }
- void SetLocalTo(uint16_t index, int value) {
- current_function_builder_->Emit(kExprSetLocal);
- AddLeb128(index, true);
- // TODO(bradnelson): variable size
- byte code[] = {WASM_I32V(value)};
- current_function_builder_->EmitCode(code, sizeof(code));
- block_size_++;
- }
+ void HandleCase(CaseNode* node,
+ ZoneMap<int, unsigned int>& case_to_block,
+ VariableProxy* tag, int default_block, int if_depth) {
+ int prev_if_depth = if_depth;
+ if (node->left != nullptr) {
+ VisitVariableProxy(tag);
+ current_function_builder_->EmitI32Const(node->begin);
+ current_function_builder_->Emit(kExprI32LtS);
+ current_function_builder_->Emit(kExprIf);
+ if_depth++;
+ breakable_blocks_.push_back(std::make_pair(nullptr, false));
+ HandleCase(node->left, case_to_block, tag, default_block, if_depth);
+ current_function_builder_->Emit(kExprElse);
+ }
+ if (node->right != nullptr) {
+ VisitVariableProxy(tag);
+ current_function_builder_->EmitI32Const(node->end);
+ current_function_builder_->Emit(kExprI32GtS);
+ current_function_builder_->Emit(kExprIf);
+ if_depth++;
+ breakable_blocks_.push_back(std::make_pair(nullptr, false));
+ HandleCase(node->right, case_to_block, tag, default_block, if_depth);
+ current_function_builder_->Emit(kExprElse);
+ }
+ if (node->begin == node->end) {
+ VisitVariableProxy(tag);
+ current_function_builder_->EmitI32Const(node->begin);
+ current_function_builder_->Emit(kExprI32Eq);
+ current_function_builder_->Emit(kExprIf);
+ DCHECK(case_to_block.find(node->begin) != case_to_block.end());
+ current_function_builder_->EmitWithU8(kExprBr, ARITY_0);
+ current_function_builder_->EmitVarInt(1 + if_depth +
+ case_to_block[node->begin]);
+ current_function_builder_->Emit(kExprEnd);
+ } else {
+ if (node->begin != 0) {
+ VisitVariableProxy(tag);
+ current_function_builder_->EmitI32Const(node->begin);
+ current_function_builder_->Emit(kExprI32Sub);
+ } else {
+ VisitVariableProxy(tag);
+ }
+ current_function_builder_->EmitWithU8(kExprBrTable, ARITY_0);
+ current_function_builder_->EmitVarInt(node->end - node->begin + 1);
+ for (int v = node->begin; v <= node->end; v++) {
+ if (case_to_block.find(v) != case_to_block.end()) {
+ byte break_code[] = {BR_TARGET(if_depth + case_to_block[v])};
+ current_function_builder_->EmitCode(break_code, sizeof(break_code));
+ } else {
+ byte break_code[] = {BR_TARGET(if_depth + default_block)};
+ current_function_builder_->EmitCode(break_code, sizeof(break_code));
+ }
+ if (v == kMaxInt) {
+ break;
+ }
+ }
+ byte break_code[] = {BR_TARGET(if_depth + default_block)};
+ current_function_builder_->EmitCode(break_code, sizeof(break_code));
+ }
- void CompileCase(CaseClause* clause, uint16_t fall_through,
- VariableProxy* tag) {
- Literal* label = clause->label()->AsLiteral();
- DCHECK_NOT_NULL(label);
- block_size_++;
- current_function_builder_->Emit(kExprIf);
- current_function_builder_->Emit(kExprI32Ior);
- current_function_builder_->Emit(kExprI32Eq);
- VisitVariableProxy(tag);
- VisitLiteral(label);
- current_function_builder_->Emit(kExprGetLocal);
- AddLeb128(fall_through, true);
- BlockVisitor visitor(this, nullptr, kExprBlock, false, 0);
- SetLocalTo(fall_through, 1);
- ZoneList<Statement*>* stmts = clause->statements();
- block_size_ += stmts->length();
- RECURSE(VisitStatements(stmts));
+ while (if_depth-- != prev_if_depth) {
+ breakable_blocks_.pop_back();
+ current_function_builder_->Emit(kExprEnd);
+ }
}
void VisitSwitchStatement(SwitchStatement* stmt) {
VariableProxy* tag = stmt->tag()->AsVariableProxy();
DCHECK_NOT_NULL(tag);
- BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock, false,
- 0);
- uint16_t fall_through = current_function_builder_->AddLocal(kAstI32);
- SetLocalTo(fall_through, 0);
-
ZoneList<CaseClause*>* clauses = stmt->cases();
- for (int i = 0; i < clauses->length(); ++i) {
+ int case_count = clauses->length();
+ if (case_count == 0) {
+ return;
+ }
+ BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock, false);
+ ZoneVector<BlockVisitor*> blocks(zone_);
+ ZoneVector<int32_t> cases(zone_);
+ ZoneMap<int, unsigned int> case_to_block(zone_);
+ bool has_default = false;
+ for (int i = case_count - 1; i >= 0; i--) {
CaseClause* clause = clauses->at(i);
+ blocks.push_back(new BlockVisitor(this, nullptr, kExprBlock, false));
if (!clause->is_default()) {
- CompileCase(clause, fall_through, tag);
+ Literal* label = clause->label()->AsLiteral();
+ Handle<Object> value = label->value();
+ int32_t label_value;
+ bool label_is_i32 = value->ToInt32(&label_value);
+ DCHECK(value->IsNumber() && label_is_i32);
+ (void)label_is_i32;
+ case_to_block[label_value] = i;
+ cases.push_back(label_value);
} else {
- ZoneList<Statement*>* stmts = clause->statements();
- block_size_ += stmts->length();
- RECURSE(VisitStatements(stmts));
+ DCHECK_EQ(i, case_count - 1);
+ has_default = true;
}
}
+ if (!has_default || case_count > 1) {
+ int default_block = has_default ? case_count - 1 : case_count;
+ BlockVisitor switch_logic_block(this, nullptr, kExprBlock, false);
+ CaseNode* root = OrderCases(&cases, zone_);
+ HandleCase(root, case_to_block, tag, default_block, 0);
+ if (root->left != nullptr || root->right != nullptr ||
+ root->begin == root->end) {
+ current_function_builder_->EmitWithU8(kExprBr, ARITY_0);
+ current_function_builder_->EmitVarInt(default_block);
+ }
+ }
+ for (int i = 0; i < case_count; ++i) {
+ CaseClause* clause = clauses->at(i);
+ RECURSE(VisitStatements(clause->statements()));
+ BlockVisitor* v = blocks.at(case_count - i - 1);
+ blocks.pop_back();
+ delete v;
+ }
}
void VisitCaseClause(CaseClause* clause) { UNREACHABLE(); }
void VisitDoWhileStatement(DoWhileStatement* stmt) {
- DCHECK(in_function_);
- BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprLoop, true,
- 2);
+ DCHECK_EQ(kFuncScope, scope_);
+ BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprLoop, true);
RECURSE(Visit(stmt->body()));
- current_function_builder_->Emit(kExprIf);
RECURSE(Visit(stmt->cond()));
- current_function_builder_->EmitWithVarInt(kExprBr, 0);
- current_function_builder_->Emit(kExprNop);
+ current_function_builder_->Emit(kExprIf);
+ current_function_builder_->EmitWithU8U8(kExprBr, ARITY_0, 1);
+ current_function_builder_->Emit(kExprEnd);
}
void VisitWhileStatement(WhileStatement* stmt) {
- DCHECK(in_function_);
- BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprLoop, true,
- 1);
- current_function_builder_->Emit(kExprIf);
+ DCHECK_EQ(kFuncScope, scope_);
+ BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprLoop, true);
RECURSE(Visit(stmt->cond()));
- current_function_builder_->EmitWithVarInt(kExprBr, 0);
+ breakable_blocks_.push_back(std::make_pair(nullptr, false));
+ current_function_builder_->Emit(kExprIf);
RECURSE(Visit(stmt->body()));
+ current_function_builder_->EmitWithU8U8(kExprBr, ARITY_0, 1);
+ current_function_builder_->Emit(kExprEnd);
+ breakable_blocks_.pop_back();
}
void VisitForStatement(ForStatement* stmt) {
- DCHECK(in_function_);
+ DCHECK_EQ(kFuncScope, scope_);
if (stmt->init() != nullptr) {
- block_size_++;
RECURSE(Visit(stmt->init()));
}
- BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprLoop, true,
- 0);
+ BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprLoop, true);
if (stmt->cond() != nullptr) {
- block_size_++;
- current_function_builder_->Emit(kExprIf);
- current_function_builder_->Emit(kExprI32Eqz);
RECURSE(Visit(stmt->cond()));
- current_function_builder_->EmitWithVarInt(kExprBr, 1);
+ current_function_builder_->Emit(kExprI32Eqz);
+ current_function_builder_->Emit(kExprIf);
current_function_builder_->Emit(kExprNop);
+ current_function_builder_->EmitWithU8U8(kExprBr, ARITY_0, 2);
+ current_function_builder_->Emit(kExprEnd);
}
if (stmt->body() != nullptr) {
- block_size_++;
RECURSE(Visit(stmt->body()));
}
if (stmt->next() != nullptr) {
- block_size_++;
RECURSE(Visit(stmt->next()));
}
- block_size_++;
- current_function_builder_->EmitWithVarInt(kExprBr, 0);
current_function_builder_->Emit(kExprNop);
+ current_function_builder_->EmitWithU8U8(kExprBr, ARITY_0, 0);
}
void VisitForInStatement(ForInStatement* stmt) { UNREACHABLE(); }
@@ -347,17 +443,22 @@ class AsmWasmBuilderImpl : public AstVisitor {
void VisitDebuggerStatement(DebuggerStatement* stmt) { UNREACHABLE(); }
void VisitFunctionLiteral(FunctionLiteral* expr) {
- Scope* scope = expr->scope();
- if (in_function_) {
- if (expr->bounds().lower->IsFunction()) {
- FunctionType* func_type = expr->bounds().lower->AsFunction();
- LocalType return_type = TypeFrom(func_type->Result());
- current_function_builder_->ReturnType(return_type);
- for (int i = 0; i < expr->parameter_count(); i++) {
- LocalType type = TypeFrom(func_type->Parameter(i));
+ DeclarationScope* scope = expr->scope();
+ if (scope_ == kFuncScope) {
+ if (auto* func_type = typer_->TypeOf(expr)->AsFunctionType()) {
+ // Build the signature for the function.
+ LocalType return_type = TypeFrom(func_type->ReturnType());
+ const auto& arguments = func_type->Arguments();
+ FunctionSig::Builder b(zone(), return_type == kAstStmt ? 0 : 1,
+ arguments.size());
+ if (return_type != kAstStmt) b.AddReturn(return_type);
+ for (int i = 0; i < expr->parameter_count(); ++i) {
+ LocalType type = TypeFrom(arguments[i]);
DCHECK_NE(kAstStmt, type);
- LookupOrInsertLocal(scope->parameter(i), type);
+ b.AddParam(type);
+ InsertParameter(scope->parameter(i), type, i);
}
+ current_function_builder_->SetSignature(b.Build());
} else {
UNREACHABLE();
}
@@ -371,11 +472,16 @@ class AsmWasmBuilderImpl : public AstVisitor {
}
void VisitConditional(Conditional* expr) {
- DCHECK(in_function_);
- current_function_builder_->Emit(kExprIfElse);
+ DCHECK_EQ(kFuncScope, scope_);
RECURSE(Visit(expr->condition()));
+ // WASM ifs come with implicit blocks for both arms.
+ breakable_blocks_.push_back(std::make_pair(nullptr, false));
+ current_function_builder_->Emit(kExprIf);
RECURSE(Visit(expr->then_expression()));
+ current_function_builder_->Emit(kExprElse);
RECURSE(Visit(expr->else_expression()));
+ current_function_builder_->Emit(kExprEnd);
+ breakable_blocks_.pop_back();
}
bool VisitStdlibConstant(Variable* var) {
@@ -431,49 +537,39 @@ class AsmWasmBuilderImpl : public AstVisitor {
}
void VisitVariableProxy(VariableProxy* expr) {
- if (in_function_) {
+ if (scope_ == kFuncScope || scope_ == kInitScope) {
Variable* var = expr->var();
- if (is_set_op_) {
- if (var->IsContextSlot()) {
- current_function_builder_->Emit(kExprStoreGlobal);
- } else {
- current_function_builder_->Emit(kExprSetLocal);
- }
- is_set_op_ = false;
- } else {
- if (VisitStdlibConstant(var)) {
- return;
- }
- if (var->IsContextSlot()) {
- current_function_builder_->Emit(kExprLoadGlobal);
- } else {
- current_function_builder_->Emit(kExprGetLocal);
- }
+ if (VisitStdlibConstant(var)) {
+ return;
}
LocalType var_type = TypeOf(expr);
DCHECK_NE(kAstStmt, var_type);
if (var->IsContextSlot()) {
- AddLeb128(LookupOrInsertGlobal(var, var_type), false);
+ current_function_builder_->EmitWithVarInt(
+ kExprGetGlobal, LookupOrInsertGlobal(var, var_type));
} else {
- AddLeb128(LookupOrInsertLocal(var, var_type), true);
+ current_function_builder_->EmitGetLocal(
+ LookupOrInsertLocal(var, var_type));
}
}
}
void VisitLiteral(Literal* expr) {
Handle<Object> value = expr->value();
- if (!in_function_ || !value->IsNumber()) {
+ if (!value->IsNumber() || (scope_ != kFuncScope && scope_ != kInitScope)) {
return;
}
- Type* type = expr->bounds().upper;
- if (type->Is(cache_.kAsmSigned)) {
+ AsmType* type = typer_->TypeOf(expr);
+ DCHECK_NE(type, AsmType::None());
+
+ if (type->IsA(AsmType::Signed())) {
int32_t i = 0;
if (!value->ToInt32(&i)) {
UNREACHABLE();
}
byte code[] = {WASM_I32V(i)};
current_function_builder_->EmitCode(code, sizeof(code));
- } else if (type->Is(cache_.kAsmUnsigned) || type->Is(cache_.kAsmFixnum)) {
+ } else if (type->IsA(AsmType::Unsigned()) || type->IsA(AsmType::FixNum())) {
uint32_t u = 0;
if (!value->ToUint32(&u)) {
UNREACHABLE();
@@ -481,7 +577,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
int32_t i = static_cast<int32_t>(u);
byte code[] = {WASM_I32V(i)};
current_function_builder_->EmitCode(code, sizeof(code));
- } else if (type->Is(cache_.kAsmDouble)) {
+ } else if (type->IsA(AsmType::Double())) {
double val = expr->raw_value()->AsNumber();
byte code[] = {WASM_F64(val)};
current_function_builder_->EmitCode(code, sizeof(code));
@@ -496,7 +592,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
ZoneList<ObjectLiteralProperty*>* props = expr->properties();
for (int i = 0; i < props->length(); ++i) {
ObjectLiteralProperty* prop = props->at(i);
- DCHECK(marking_exported);
+ DCHECK_EQ(kExportScope, scope_);
VariableProxy* expr = prop->value()->AsVariableProxy();
DCHECK_NOT_NULL(expr);
Variable* var = expr->var();
@@ -505,10 +601,11 @@ class AsmWasmBuilderImpl : public AstVisitor {
DCHECK(name->IsPropertyName());
const AstRawString* raw_name = name->AsRawPropertyName();
if (var->is_function()) {
- uint16_t index = LookupOrInsertFunction(var);
- builder_->FunctionAt(index)->Exported(1);
- builder_->FunctionAt(index)
- ->SetName(raw_name->raw_data(), raw_name->length());
+ uint32_t index = LookupOrInsertFunction(var);
+ builder_->FunctionAt(index)->SetExported();
+ builder_->FunctionAt(index)->SetName(
+ reinterpret_cast<const char*>(raw_name->raw_data()),
+ raw_name->length());
}
}
}
@@ -517,30 +614,32 @@ class AsmWasmBuilderImpl : public AstVisitor {
void LoadInitFunction() {
current_function_builder_ = builder_->FunctionAt(init_function_index_);
- in_function_ = true;
+ scope_ = kInitScope;
}
void UnLoadInitFunction() {
- in_function_ = false;
+ scope_ = kModuleScope;
current_function_builder_ = nullptr;
}
void AddFunctionTable(VariableProxy* table, ArrayLiteral* funcs) {
- FunctionType* func_type =
- funcs->bounds().lower->AsArray()->Element()->AsFunction();
- LocalType return_type = TypeFrom(func_type->Result());
+ auto* func_tbl_type = typer_->TypeOf(funcs)->AsFunctionTableType();
+ DCHECK_NOT_NULL(func_tbl_type);
+ auto* func_type = func_tbl_type->signature()->AsFunctionType();
+ const auto& arguments = func_type->Arguments();
+ LocalType return_type = TypeFrom(func_type->ReturnType());
FunctionSig::Builder sig(zone(), return_type == kAstStmt ? 0 : 1,
- func_type->Arity());
+ arguments.size());
if (return_type != kAstStmt) {
- sig.AddReturn(static_cast<LocalType>(return_type));
+ sig.AddReturn(return_type);
}
- for (int i = 0; i < func_type->Arity(); i++) {
- sig.AddParam(TypeFrom(func_type->Parameter(i)));
+ for (auto* arg : arguments) {
+ sig.AddParam(TypeFrom(arg));
}
- uint16_t signature_index = builder_->AddSignature(sig.Build());
+ uint32_t signature_index = builder_->AddSignature(sig.Build());
InsertFunctionTable(table->var(), next_table_index_, signature_index);
next_table_index_ += funcs->values()->length();
- for (int i = 0; i < funcs->values()->length(); i++) {
+ for (int i = 0; i < funcs->values()->length(); ++i) {
VariableProxy* func = funcs->values()->at(i)->AsVariableProxy();
DCHECK_NOT_NULL(func);
builder_->AddIndirectFunction(LookupOrInsertFunction(func->var()));
@@ -549,11 +648,11 @@ class AsmWasmBuilderImpl : public AstVisitor {
struct FunctionTableIndices : public ZoneObject {
uint32_t start_index;
- uint16_t signature_index;
+ uint32_t signature_index;
};
void InsertFunctionTable(Variable* v, uint32_t start_index,
- uint16_t signature_index) {
+ uint32_t signature_index) {
FunctionTableIndices* container = new (zone()) FunctionTableIndices();
container->start_index = start_index;
container->signature_index = signature_index;
@@ -573,12 +672,11 @@ class AsmWasmBuilderImpl : public AstVisitor {
private:
class ImportedFunctionIndices : public ZoneObject {
public:
- const unsigned char* name_;
+ const char* name_;
int name_length_;
WasmModuleBuilder::SignatureMap signature_to_index_;
- ImportedFunctionIndices(const unsigned char* name, int name_length,
- Zone* zone)
+ ImportedFunctionIndices(const char* name, int name_length, Zone* zone)
: name_(name), name_length_(name_length), signature_to_index_(zone) {}
};
ZoneHashMap table_;
@@ -586,11 +684,12 @@ class AsmWasmBuilderImpl : public AstVisitor {
public:
explicit ImportedFunctionTable(AsmWasmBuilderImpl* builder)
- : table_(HashMap::PointersMatch, ZoneHashMap::kDefaultHashMapCapacity,
+ : table_(base::HashMap::PointersMatch,
+ ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(builder->zone())),
builder_(builder) {}
- void AddImport(Variable* v, const unsigned char* name, int name_length) {
+ void AddImport(Variable* v, const char* name, int name_length) {
ImportedFunctionIndices* indices = new (builder_->zone())
ImportedFunctionIndices(name, name_length, builder_->zone());
ZoneHashMap::Entry* entry = table_.LookupOrInsert(
@@ -598,7 +697,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
entry->value = indices;
}
- uint16_t GetFunctionIndex(Variable* v, FunctionSig* sig) {
+ uint32_t GetFunctionIndex(Variable* v, FunctionSig* sig) {
ZoneHashMap::Entry* entry = table_.Lookup(v, ComputePointerHash(v));
DCHECK_NOT_NULL(entry);
ImportedFunctionIndices* indices =
@@ -608,47 +707,133 @@ class AsmWasmBuilderImpl : public AstVisitor {
if (pos != indices->signature_to_index_.end()) {
return pos->second;
} else {
- uint16_t index = builder_->builder_->AddFunction();
+ uint32_t index = builder_->builder_->AddImport(
+ indices->name_, indices->name_length_, sig);
indices->signature_to_index_[sig] = index;
- WasmFunctionBuilder* function = builder_->builder_->FunctionAt(index);
- function->External(1);
- function->SetName(indices->name_, indices->name_length_);
- if (sig->return_count() > 0) {
- function->ReturnType(sig->GetReturn());
- }
- for (size_t i = 0; i < sig->parameter_count(); i++) {
- function->AddParam(sig->GetParam(i));
- }
return index;
}
}
};
- void VisitAssignment(Assignment* expr) {
- bool in_init = false;
- if (!in_function_) {
- BinaryOperation* binop = expr->value()->AsBinaryOperation();
- if (binop != nullptr) {
+ void EmitAssignmentLhs(Expression* target, MachineType* mtype) {
+ // Match the left hand side of the assignment.
+ VariableProxy* target_var = target->AsVariableProxy();
+ if (target_var != nullptr) {
+ // Left hand side is a local or a global variable, no code on LHS.
+ return;
+ }
+
+ Property* target_prop = target->AsProperty();
+ if (target_prop != nullptr) {
+ // Left hand side is a property access, i.e. the asm.js heap.
+ VisitPropertyAndEmitIndex(target_prop, mtype);
+ return;
+ }
+
+ if (target_var == nullptr && target_prop == nullptr) {
+ UNREACHABLE(); // invalid assignment.
+ }
+ }
+
+ void EmitAssignmentRhs(Expression* target, Expression* value, bool* is_nop) {
+ BinaryOperation* binop = value->AsBinaryOperation();
+ if (binop != nullptr) {
+ if (scope_ == kInitScope) {
+ // Handle foreign variables in the initialization scope.
Property* prop = binop->left()->AsProperty();
- DCHECK_NOT_NULL(prop);
- LoadInitFunction();
- is_set_op_ = true;
- RECURSE(Visit(expr->target()));
- DCHECK(!is_set_op_);
if (binop->op() == Token::MUL) {
DCHECK(binop->right()->IsLiteral());
DCHECK_EQ(1.0, binop->right()->AsLiteral()->raw_value()->AsNumber());
DCHECK(binop->right()->AsLiteral()->raw_value()->ContainsDot());
- VisitForeignVariable(true, prop);
+ DCHECK(target->IsVariableProxy());
+ VisitForeignVariable(true, target->AsVariableProxy()->var(), prop);
+ *is_nop = true;
+ return;
} else if (binop->op() == Token::BIT_OR) {
DCHECK(binop->right()->IsLiteral());
DCHECK_EQ(0.0, binop->right()->AsLiteral()->raw_value()->AsNumber());
DCHECK(!binop->right()->AsLiteral()->raw_value()->ContainsDot());
- VisitForeignVariable(false, prop);
+ DCHECK(target->IsVariableProxy());
+ VisitForeignVariable(false, target->AsVariableProxy()->var(), prop);
+ *is_nop = true;
+ return;
} else {
UNREACHABLE();
}
- UnLoadInitFunction();
+ }
+ if (MatchBinaryOperation(binop) == kAsIs) {
+ VariableProxy* target_var = target->AsVariableProxy();
+ VariableProxy* effective_value_var = GetLeft(binop)->AsVariableProxy();
+ if (target_var != nullptr && effective_value_var != nullptr &&
+ target_var->var() == effective_value_var->var()) {
+ *is_nop = true;
+ return;
+ }
+ }
+ }
+ RECURSE(Visit(value));
+ }
+
+ void EmitAssignment(Assignment* expr, MachineType type) {
+ // Match the left hand side of the assignment.
+ VariableProxy* target_var = expr->target()->AsVariableProxy();
+ if (target_var != nullptr) {
+ // Left hand side is a local or a global variable.
+ Variable* var = target_var->var();
+ LocalType var_type = TypeOf(expr);
+ DCHECK_NE(kAstStmt, var_type);
+ if (var->IsContextSlot()) {
+ current_function_builder_->EmitWithVarInt(
+ kExprSetGlobal, LookupOrInsertGlobal(var, var_type));
+ } else {
+ current_function_builder_->EmitSetLocal(
+ LookupOrInsertLocal(var, var_type));
+ }
+ }
+
+ Property* target_prop = expr->target()->AsProperty();
+ if (target_prop != nullptr) {
+ // Left hand side is a property access, i.e. the asm.js heap.
+ if (TypeOf(expr->value()) == kAstF64 && expr->target()->IsProperty() &&
+ typer_->TypeOf(expr->target()->AsProperty()->obj())
+ ->IsA(AsmType::Float32Array())) {
+ current_function_builder_->Emit(kExprF32ConvertF64);
+ }
+ WasmOpcode opcode;
+ if (type == MachineType::Int8()) {
+ opcode = kExprI32AsmjsStoreMem8;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kExprI32AsmjsStoreMem8;
+ } else if (type == MachineType::Int16()) {
+ opcode = kExprI32AsmjsStoreMem16;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kExprI32AsmjsStoreMem16;
+ } else if (type == MachineType::Int32()) {
+ opcode = kExprI32AsmjsStoreMem;
+ } else if (type == MachineType::Uint32()) {
+ opcode = kExprI32AsmjsStoreMem;
+ } else if (type == MachineType::Float32()) {
+ opcode = kExprF32AsmjsStoreMem;
+ } else if (type == MachineType::Float64()) {
+ opcode = kExprF64AsmjsStoreMem;
+ } else {
+ UNREACHABLE();
+ }
+ current_function_builder_->Emit(opcode);
+ }
+
+ if (target_var == nullptr && target_prop == nullptr) {
+ UNREACHABLE(); // invalid assignment.
+ }
+ }
+
+ void VisitAssignment(Assignment* expr) {
+ bool as_init = false;
+ if (scope_ == kModuleScope) {
+ // Skip extra assignment inserted by the parser when in this form:
+ // (function Module(a, b, c) {... })
+ if (expr->target()->IsVariableProxy() &&
+ expr->target()->AsVariableProxy()->var()->mode() == CONST_LEGACY) {
return;
}
Property* prop = expr->value()->AsProperty();
@@ -657,11 +842,12 @@ class AsmWasmBuilderImpl : public AstVisitor {
if (vp != nullptr && vp->var()->IsParameter() &&
vp->var()->index() == 1) {
VariableProxy* target = expr->target()->AsVariableProxy();
- if (target->bounds().lower->Is(Type::Function())) {
+ if (typer_->TypeOf(target)->AsFFIType() != nullptr) {
const AstRawString* name =
prop->key()->AsLiteral()->AsRawPropertyName();
- imported_function_table_.AddImport(target->var(), name->raw_data(),
- name->length());
+ imported_function_table_.AddImport(
+ target->var(), reinterpret_cast<const char*>(name->raw_data()),
+ name->length());
}
}
// Property values in module scope don't emit code, so return.
@@ -669,7 +855,10 @@ class AsmWasmBuilderImpl : public AstVisitor {
}
ArrayLiteral* funcs = expr->value()->AsArrayLiteral();
if (funcs != nullptr &&
- funcs->bounds().lower->AsArray()->Element()->IsFunction()) {
+ typer_->TypeOf(funcs)
+ ->AsFunctionTableType()
+ ->signature()
+ ->AsFunctionType()) {
VariableProxy* target = expr->target()->AsVariableProxy();
DCHECK_NOT_NULL(target);
AddFunctionTable(target, funcs);
@@ -680,128 +869,73 @@ class AsmWasmBuilderImpl : public AstVisitor {
// No init code to emit for CallNew nodes.
return;
}
- in_init = true;
- LoadInitFunction();
- }
- BinaryOperation* value_op = expr->value()->AsBinaryOperation();
- if (value_op != nullptr && MatchBinaryOperation(value_op) == kAsIs) {
- VariableProxy* target_var = expr->target()->AsVariableProxy();
- VariableProxy* effective_value_var = GetLeft(value_op)->AsVariableProxy();
- if (target_var != nullptr && effective_value_var != nullptr &&
- target_var->var() == effective_value_var->var()) {
- block_size_--;
- return;
- }
+ as_init = true;
}
- is_set_op_ = true;
- RECURSE(Visit(expr->target()));
- DCHECK(!is_set_op_);
- // Assignment to heapf32 from float64 converts.
- if (TypeOf(expr->value()) == kAstF64 && expr->target()->IsProperty() &&
- expr->target()->AsProperty()->obj()->bounds().lower->Is(
- cache_.kFloat32Array)) {
- current_function_builder_->Emit(kExprF32ConvertF64);
- }
- RECURSE(Visit(expr->value()));
- if (in_init) {
- UnLoadInitFunction();
+
+ if (as_init) LoadInitFunction();
+ MachineType mtype;
+ bool is_nop = false;
+ EmitAssignmentLhs(expr->target(), &mtype);
+ EmitAssignmentRhs(expr->target(), expr->value(), &is_nop);
+ if (!is_nop) {
+ EmitAssignment(expr, mtype);
}
+ if (as_init) UnLoadInitFunction();
}
void VisitYield(Yield* expr) { UNREACHABLE(); }
void VisitThrow(Throw* expr) { UNREACHABLE(); }
- void VisitForeignVariable(bool is_float, Property* expr) {
+ void VisitForeignVariable(bool is_float, Variable* var, Property* expr) {
DCHECK(expr->obj()->AsVariableProxy());
DCHECK(VariableLocation::PARAMETER ==
expr->obj()->AsVariableProxy()->var()->location());
DCHECK_EQ(1, expr->obj()->AsVariableProxy()->var()->index());
Literal* key_literal = expr->key()->AsLiteral();
DCHECK_NOT_NULL(key_literal);
- if (!key_literal->value().is_null() && !foreign_.is_null() &&
- foreign_->IsObject()) {
+ if (!key_literal->value().is_null()) {
Handle<Name> name =
i::Object::ToName(isolate_, key_literal->value()).ToHandleChecked();
- MaybeHandle<Object> maybe_value = i::Object::GetProperty(foreign_, name);
- if (!maybe_value.is_null()) {
- Handle<Object> value = maybe_value.ToHandleChecked();
- if (is_float) {
- MaybeHandle<Object> maybe_nvalue = i::Object::ToNumber(value);
- if (!maybe_nvalue.is_null()) {
- Handle<Object> nvalue = maybe_nvalue.ToHandleChecked();
- if (nvalue->IsNumber()) {
- double val = nvalue->Number();
- byte code[] = {WASM_F64(val)};
- current_function_builder_->EmitCode(code, sizeof(code));
- return;
- }
- }
- } else {
- MaybeHandle<Object> maybe_nvalue =
- i::Object::ToInt32(isolate_, value);
- if (!maybe_nvalue.is_null()) {
- Handle<Object> nvalue = maybe_nvalue.ToHandleChecked();
- if (nvalue->IsNumber()) {
- int32_t val = static_cast<int32_t>(nvalue->Number());
- // TODO(bradnelson): variable size
- byte code[] = {WASM_I32V(val)};
- current_function_builder_->EmitCode(code, sizeof(code));
- return;
- }
- }
- }
- }
- }
- if (is_float) {
- byte code[] = {WASM_F64(std::numeric_limits<double>::quiet_NaN())};
- current_function_builder_->EmitCode(code, sizeof(code));
- } else {
- byte code[] = {WASM_I32V_1(0)};
- current_function_builder_->EmitCode(code, sizeof(code));
+ LocalType type = is_float ? kAstF64 : kAstI32;
+ foreign_variables_.push_back({name, var, type});
}
}
- void VisitProperty(Property* expr) {
+ void VisitPropertyAndEmitIndex(Property* expr, MachineType* mtype) {
Expression* obj = expr->obj();
- DCHECK_EQ(obj->bounds().lower, obj->bounds().upper);
- Type* type = obj->bounds().lower;
- MachineType mtype;
+ AsmType* type = typer_->TypeOf(obj);
int size;
- if (type->Is(cache_.kUint8Array)) {
- mtype = MachineType::Uint8();
+ if (type->IsA(AsmType::Uint8Array())) {
+ *mtype = MachineType::Uint8();
size = 1;
- } else if (type->Is(cache_.kInt8Array)) {
- mtype = MachineType::Int8();
+ } else if (type->IsA(AsmType::Int8Array())) {
+ *mtype = MachineType::Int8();
size = 1;
- } else if (type->Is(cache_.kUint16Array)) {
- mtype = MachineType::Uint16();
+ } else if (type->IsA(AsmType::Uint16Array())) {
+ *mtype = MachineType::Uint16();
size = 2;
- } else if (type->Is(cache_.kInt16Array)) {
- mtype = MachineType::Int16();
+ } else if (type->IsA(AsmType::Int16Array())) {
+ *mtype = MachineType::Int16();
size = 2;
- } else if (type->Is(cache_.kUint32Array)) {
- mtype = MachineType::Uint32();
+ } else if (type->IsA(AsmType::Uint32Array())) {
+ *mtype = MachineType::Uint32();
size = 4;
- } else if (type->Is(cache_.kInt32Array)) {
- mtype = MachineType::Int32();
+ } else if (type->IsA(AsmType::Int32Array())) {
+ *mtype = MachineType::Int32();
size = 4;
- } else if (type->Is(cache_.kUint32Array)) {
- mtype = MachineType::Uint32();
+ } else if (type->IsA(AsmType::Uint32Array())) {
+ *mtype = MachineType::Uint32();
size = 4;
- } else if (type->Is(cache_.kFloat32Array)) {
- mtype = MachineType::Float32();
+ } else if (type->IsA(AsmType::Float32Array())) {
+ *mtype = MachineType::Float32();
size = 4;
- } else if (type->Is(cache_.kFloat64Array)) {
- mtype = MachineType::Float64();
+ } else if (type->IsA(AsmType::Float64Array())) {
+ *mtype = MachineType::Float64();
size = 8;
} else {
UNREACHABLE();
}
- // TODO(titzer): use special asm-compatibility opcodes?
- current_function_builder_->EmitWithU8U8(
- WasmOpcodes::LoadStoreOpcodeOf(mtype, is_set_op_), 0, 0);
- is_set_op_ = false;
if (size == 1) {
// Allow more general expression in byte arrays than the spec
// strictly permits.
@@ -809,87 +943,123 @@ class AsmWasmBuilderImpl : public AstVisitor {
// places that strictly should be HEAP8[HEAP32[..]>>0].
RECURSE(Visit(expr->key()));
return;
- } else {
- Literal* value = expr->key()->AsLiteral();
- if (value) {
- DCHECK(value->raw_value()->IsNumber());
- DCHECK_EQ(kAstI32, TypeOf(value));
- int val = static_cast<int>(value->raw_value()->AsNumber());
- // TODO(bradnelson): variable size
- byte code[] = {WASM_I32V(val * size)};
- current_function_builder_->EmitCode(code, sizeof(code));
- return;
- }
- BinaryOperation* binop = expr->key()->AsBinaryOperation();
- if (binop) {
- DCHECK_EQ(Token::SAR, binop->op());
- DCHECK(binop->right()->AsLiteral()->raw_value()->IsNumber());
- DCHECK(kAstI32 == TypeOf(binop->right()->AsLiteral()));
- DCHECK_EQ(size,
- 1 << static_cast<int>(
- binop->right()->AsLiteral()->raw_value()->AsNumber()));
- // Mask bottom bits to match asm.js behavior.
- current_function_builder_->Emit(kExprI32And);
- byte code[] = {WASM_I8(~(size - 1))};
- current_function_builder_->EmitCode(code, sizeof(code));
- RECURSE(Visit(binop->left()));
- return;
- }
+ }
+
+ Literal* value = expr->key()->AsLiteral();
+ if (value) {
+ DCHECK(value->raw_value()->IsNumber());
+ DCHECK_EQ(kAstI32, TypeOf(value));
+ int32_t val = static_cast<int32_t>(value->raw_value()->AsNumber());
+ // TODO(titzer): handle overflow here.
+ current_function_builder_->EmitI32Const(val * size);
+ return;
+ }
+ BinaryOperation* binop = expr->key()->AsBinaryOperation();
+ if (binop) {
+ DCHECK_EQ(Token::SAR, binop->op());
+ DCHECK(binop->right()->AsLiteral()->raw_value()->IsNumber());
+ DCHECK(kAstI32 == TypeOf(binop->right()->AsLiteral()));
+ DCHECK_EQ(size,
+ 1 << static_cast<int>(
+ binop->right()->AsLiteral()->raw_value()->AsNumber()));
+ // Mask bottom bits to match asm.js behavior.
+ byte mask = static_cast<byte>(~(size - 1));
+ RECURSE(Visit(binop->left()));
+ current_function_builder_->EmitWithU8(kExprI8Const, mask);
+ current_function_builder_->Emit(kExprI32And);
+ return;
}
UNREACHABLE();
}
+ void VisitProperty(Property* expr) {
+ MachineType type;
+ VisitPropertyAndEmitIndex(expr, &type);
+ WasmOpcode opcode;
+ if (type == MachineType::Int8()) {
+ opcode = kExprI32AsmjsLoadMem8S;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kExprI32AsmjsLoadMem8U;
+ } else if (type == MachineType::Int16()) {
+ opcode = kExprI32AsmjsLoadMem16S;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kExprI32AsmjsLoadMem16U;
+ } else if (type == MachineType::Int32()) {
+ opcode = kExprI32AsmjsLoadMem;
+ } else if (type == MachineType::Uint32()) {
+ opcode = kExprI32AsmjsLoadMem;
+ } else if (type == MachineType::Float32()) {
+ opcode = kExprF32AsmjsLoadMem;
+ } else if (type == MachineType::Float64()) {
+ opcode = kExprF64AsmjsLoadMem;
+ } else {
+ UNREACHABLE();
+ }
+
+ current_function_builder_->Emit(opcode);
+ }
+
bool VisitStdlibFunction(Call* call, VariableProxy* expr) {
Variable* var = expr->var();
AsmTyper::StandardMember standard_object =
typer_->VariableAsStandardMember(var);
ZoneList<Expression*>* args = call->arguments();
LocalType call_type = TypeOf(call);
+
switch (standard_object) {
case AsmTyper::kNone: {
return false;
}
case AsmTyper::kMathAcos: {
+ VisitCallArgs(call);
DCHECK_EQ(kAstF64, call_type);
current_function_builder_->Emit(kExprF64Acos);
break;
}
case AsmTyper::kMathAsin: {
+ VisitCallArgs(call);
DCHECK_EQ(kAstF64, call_type);
current_function_builder_->Emit(kExprF64Asin);
break;
}
case AsmTyper::kMathAtan: {
+ VisitCallArgs(call);
DCHECK_EQ(kAstF64, call_type);
current_function_builder_->Emit(kExprF64Atan);
break;
}
case AsmTyper::kMathCos: {
+ VisitCallArgs(call);
DCHECK_EQ(kAstF64, call_type);
current_function_builder_->Emit(kExprF64Cos);
break;
}
case AsmTyper::kMathSin: {
+ VisitCallArgs(call);
DCHECK_EQ(kAstF64, call_type);
current_function_builder_->Emit(kExprF64Sin);
break;
}
case AsmTyper::kMathTan: {
+ VisitCallArgs(call);
DCHECK_EQ(kAstF64, call_type);
current_function_builder_->Emit(kExprF64Tan);
break;
}
case AsmTyper::kMathExp: {
+ VisitCallArgs(call);
DCHECK_EQ(kAstF64, call_type);
current_function_builder_->Emit(kExprF64Exp);
break;
}
case AsmTyper::kMathLog: {
+ VisitCallArgs(call);
DCHECK_EQ(kAstF64, call_type);
current_function_builder_->Emit(kExprF64Log);
break;
}
case AsmTyper::kMathCeil: {
+ VisitCallArgs(call);
if (call_type == kAstF32) {
current_function_builder_->Emit(kExprF32Ceil);
} else if (call_type == kAstF64) {
@@ -900,6 +1070,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
break;
}
case AsmTyper::kMathFloor: {
+ VisitCallArgs(call);
if (call_type == kAstF32) {
current_function_builder_->Emit(kExprF32Floor);
} else if (call_type == kAstF64) {
@@ -910,6 +1081,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
break;
}
case AsmTyper::kMathSqrt: {
+ VisitCallArgs(call);
if (call_type == kAstF32) {
current_function_builder_->Emit(kExprF32Sqrt);
} else if (call_type == kAstF64) {
@@ -919,20 +1091,40 @@ class AsmWasmBuilderImpl : public AstVisitor {
}
break;
}
+ case AsmTyper::kMathClz32: {
+ VisitCallArgs(call);
+ DCHECK(call_type == kAstI32);
+ current_function_builder_->Emit(kExprI32Clz);
+ break;
+ }
case AsmTyper::kMathAbs: {
- // TODO(bradnelson): Should this be cast to float?
if (call_type == kAstI32) {
- current_function_builder_->Emit(kExprIfElse);
- current_function_builder_->Emit(kExprI32LtS);
- Visit(args->at(0));
+ uint32_t tmp = current_function_builder_->AddLocal(kAstI32);
+
+ // if set_local(tmp, x) < 0
+ Visit(call->arguments()->at(0));
+ current_function_builder_->EmitSetLocal(tmp);
byte code[] = {WASM_I8(0)};
current_function_builder_->EmitCode(code, sizeof(code));
- current_function_builder_->Emit(kExprI32Sub);
+ current_function_builder_->Emit(kExprI32LtS);
+ current_function_builder_->Emit(kExprIf);
+
+ // then (0 - tmp)
current_function_builder_->EmitCode(code, sizeof(code));
- Visit(args->at(0));
+ current_function_builder_->EmitGetLocal(tmp);
+ current_function_builder_->Emit(kExprI32Sub);
+
+ // else tmp
+ current_function_builder_->Emit(kExprElse);
+ current_function_builder_->EmitGetLocal(tmp);
+ // end
+ current_function_builder_->Emit(kExprEnd);
+
} else if (call_type == kAstF32) {
+ VisitCallArgs(call);
current_function_builder_->Emit(kExprF32Abs);
} else if (call_type == kAstF64) {
+ VisitCallArgs(call);
current_function_builder_->Emit(kExprF64Abs);
} else {
UNREACHABLE();
@@ -942,13 +1134,32 @@ class AsmWasmBuilderImpl : public AstVisitor {
case AsmTyper::kMathMin: {
// TODO(bradnelson): Change wasm to match Math.min in asm.js mode.
if (call_type == kAstI32) {
- current_function_builder_->Emit(kExprIfElse);
+ uint32_t tmp_x = current_function_builder_->AddLocal(kAstI32);
+ uint32_t tmp_y = current_function_builder_->AddLocal(kAstI32);
+
+ // if set_local(tmp_x, x) < set_local(tmp_y, y)
+ Visit(call->arguments()->at(0));
+ current_function_builder_->EmitSetLocal(tmp_x);
+
+ Visit(call->arguments()->at(1));
+ current_function_builder_->EmitSetLocal(tmp_y);
+
current_function_builder_->Emit(kExprI32LeS);
- Visit(args->at(0));
- Visit(args->at(1));
+ current_function_builder_->Emit(kExprIf);
+
+ // then tmp_x
+ current_function_builder_->EmitGetLocal(tmp_x);
+
+ // else tmp_y
+ current_function_builder_->Emit(kExprElse);
+ current_function_builder_->EmitGetLocal(tmp_y);
+ current_function_builder_->Emit(kExprEnd);
+
} else if (call_type == kAstF32) {
+ VisitCallArgs(call);
current_function_builder_->Emit(kExprF32Min);
} else if (call_type == kAstF64) {
+ VisitCallArgs(call);
current_function_builder_->Emit(kExprF64Min);
} else {
UNREACHABLE();
@@ -958,13 +1169,33 @@ class AsmWasmBuilderImpl : public AstVisitor {
case AsmTyper::kMathMax: {
// TODO(bradnelson): Change wasm to match Math.max in asm.js mode.
if (call_type == kAstI32) {
- current_function_builder_->Emit(kExprIfElse);
- current_function_builder_->Emit(kExprI32GtS);
- Visit(args->at(0));
- Visit(args->at(1));
+ uint32_t tmp_x = current_function_builder_->AddLocal(kAstI32);
+ uint32_t tmp_y = current_function_builder_->AddLocal(kAstI32);
+
+ // if set_local(tmp_x, x) < set_local(tmp_y, y)
+ Visit(call->arguments()->at(0));
+
+ current_function_builder_->EmitSetLocal(tmp_x);
+
+ Visit(call->arguments()->at(1));
+ current_function_builder_->EmitSetLocal(tmp_y);
+
+ current_function_builder_->Emit(kExprI32LeS);
+ current_function_builder_->Emit(kExprIf);
+
+ // then tmp_y
+ current_function_builder_->EmitGetLocal(tmp_y);
+
+ // else tmp_x
+ current_function_builder_->Emit(kExprElse);
+ current_function_builder_->EmitGetLocal(tmp_x);
+ current_function_builder_->Emit(kExprEnd);
+
} else if (call_type == kAstF32) {
+ VisitCallArgs(call);
current_function_builder_->Emit(kExprF32Max);
} else if (call_type == kAstF64) {
+ VisitCallArgs(call);
current_function_builder_->Emit(kExprF64Max);
} else {
UNREACHABLE();
@@ -972,16 +1203,19 @@ class AsmWasmBuilderImpl : public AstVisitor {
break;
}
case AsmTyper::kMathAtan2: {
+ VisitCallArgs(call);
DCHECK_EQ(kAstF64, call_type);
current_function_builder_->Emit(kExprF64Atan2);
break;
}
case AsmTyper::kMathPow: {
+ VisitCallArgs(call);
DCHECK_EQ(kAstF64, call_type);
current_function_builder_->Emit(kExprF64Pow);
break;
}
case AsmTyper::kMathImul: {
+ VisitCallArgs(call);
current_function_builder_->Emit(kExprI32Mul);
break;
}
@@ -989,6 +1223,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
DCHECK(args->length() == 1);
Literal* literal = args->at(0)->AsLiteral();
if (literal != nullptr) {
+ // constant fold Math.fround(#const);
if (literal->raw_value()->IsNumber()) {
float val = static_cast<float>(literal->raw_value()->AsNumber());
byte code[] = {WASM_F32(val)};
@@ -996,7 +1231,9 @@ class AsmWasmBuilderImpl : public AstVisitor {
return true;
}
}
- switch (TypeIndexOf(args->at(0))) {
+ VisitCallArgs(call);
+ static const bool kDontIgnoreSign = false;
+ switch (TypeIndexOf(args->at(0), kDontIgnoreSign)) {
case kInt32:
case kFixnum:
current_function_builder_->Emit(kExprF32SConvertI32);
@@ -1019,7 +1256,6 @@ class AsmWasmBuilderImpl : public AstVisitor {
break;
}
}
- VisitCallArgs(call);
return true;
}
@@ -1032,20 +1268,20 @@ class AsmWasmBuilderImpl : public AstVisitor {
}
void VisitCall(Call* expr) {
- Call::CallType call_type = expr->GetCallType(isolate_);
+ Call::CallType call_type = expr->GetCallType();
switch (call_type) {
case Call::OTHER_CALL: {
- DCHECK(in_function_);
+ DCHECK_EQ(kFuncScope, scope_);
VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (proxy != nullptr) {
if (VisitStdlibFunction(expr, proxy)) {
return;
}
}
- uint16_t index;
+ uint32_t index;
VariableProxy* vp = expr->expression()->AsVariableProxy();
- if (vp != nullptr &&
- Type::Any()->Is(vp->bounds().lower->AsFunction()->Result())) {
+ DCHECK_NOT_NULL(vp);
+ if (typer_->TypeOf(vp)->AsFFIType() != nullptr) {
LocalType return_type = TypeOf(expr);
ZoneList<Expression*>* args = expr->arguments();
FunctionSig::Builder sig(zone(), return_type == kAstStmt ? 0 : 1,
@@ -1053,40 +1289,43 @@ class AsmWasmBuilderImpl : public AstVisitor {
if (return_type != kAstStmt) {
sig.AddReturn(return_type);
}
- for (int i = 0; i < args->length(); i++) {
+ for (int i = 0; i < args->length(); ++i) {
sig.AddParam(TypeOf(args->at(i)));
}
index =
imported_function_table_.GetFunctionIndex(vp->var(), sig.Build());
+ VisitCallArgs(expr);
+ current_function_builder_->Emit(kExprCallImport);
+ current_function_builder_->EmitVarInt(expr->arguments()->length());
+ current_function_builder_->EmitVarInt(index);
} else {
index = LookupOrInsertFunction(vp->var());
+ VisitCallArgs(expr);
+ current_function_builder_->Emit(kExprCallFunction);
+ current_function_builder_->EmitVarInt(expr->arguments()->length());
+ current_function_builder_->EmitVarInt(index);
}
- current_function_builder_->Emit(kExprCallFunction);
- std::vector<uint8_t> index_arr = UnsignedLEB128From(index);
- current_function_builder_->EmitCode(
- &index_arr[0], static_cast<uint32_t>(index_arr.size()));
break;
}
case Call::KEYED_PROPERTY_CALL: {
- DCHECK(in_function_);
+ DCHECK_EQ(kFuncScope, scope_);
Property* p = expr->expression()->AsProperty();
DCHECK_NOT_NULL(p);
VariableProxy* var = p->obj()->AsVariableProxy();
DCHECK_NOT_NULL(var);
FunctionTableIndices* indices = LookupFunctionTable(var->var());
- current_function_builder_->EmitWithVarInt(kExprCallIndirect,
- indices->signature_index);
- current_function_builder_->Emit(kExprI32Add);
- // TODO(bradnelson): variable size
- byte code[] = {WASM_I32V(indices->start_index)};
- current_function_builder_->EmitCode(code, sizeof(code));
RECURSE(Visit(p->key()));
+ current_function_builder_->EmitI32Const(indices->start_index);
+ current_function_builder_->Emit(kExprI32Add);
+ VisitCallArgs(expr);
+ current_function_builder_->Emit(kExprCallIndirect);
+ current_function_builder_->EmitVarInt(expr->arguments()->length());
+ current_function_builder_->EmitVarInt(indices->signature_index);
break;
}
default:
UNREACHABLE();
}
- VisitCallArgs(expr);
}
void VisitCallNew(CallNew* expr) { UNREACHABLE(); }
@@ -1094,6 +1333,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
void VisitCallRuntime(CallRuntime* expr) { UNREACHABLE(); }
void VisitUnaryOperation(UnaryOperation* expr) {
+ RECURSE(Visit(expr->expression()));
switch (expr->op()) {
case Token::NOT: {
DCHECK_EQ(kAstI32, TypeOf(expr->expression()));
@@ -1103,7 +1343,6 @@ class AsmWasmBuilderImpl : public AstVisitor {
default:
UNREACHABLE();
}
- RECURSE(Visit(expr->expression()));
}
void VisitCountOperation(CountOperation* expr) { UNREACHABLE(); }
@@ -1207,9 +1446,6 @@ class AsmWasmBuilderImpl : public AstVisitor {
#ifdef Mul
#undef Mul
#endif
-#ifdef Div
-#undef Div
-#endif
#define NON_SIGNED_BINOP(op) \
static WasmOpcode opcodes[] = { \
@@ -1248,8 +1484,10 @@ class AsmWasmBuilderImpl : public AstVisitor {
void VisitBinaryOperation(BinaryOperation* expr) {
ConvertOperation convertOperation = MatchBinaryOperation(expr);
+ static const bool kDontIgnoreSign = false;
if (convertOperation == kToDouble) {
- TypeIndex type = TypeIndexOf(expr->left());
+ RECURSE(Visit(expr->left()));
+ TypeIndex type = TypeIndexOf(expr->left(), kDontIgnoreSign);
if (type == kInt32 || type == kFixnum) {
current_function_builder_->Emit(kExprF64SConvertI32);
} else if (type == kUint32) {
@@ -1259,37 +1497,53 @@ class AsmWasmBuilderImpl : public AstVisitor {
} else {
UNREACHABLE();
}
- RECURSE(Visit(expr->left()));
} else if (convertOperation == kToInt) {
- TypeIndex type = TypeIndexOf(GetLeft(expr));
+ RECURSE(Visit(GetLeft(expr)));
+ TypeIndex type = TypeIndexOf(GetLeft(expr), kDontIgnoreSign);
if (type == kFloat32) {
- current_function_builder_->Emit(kExprI32SConvertF32);
+ current_function_builder_->Emit(kExprI32AsmjsSConvertF32);
} else if (type == kFloat64) {
- current_function_builder_->Emit(kExprI32SConvertF64);
+ current_function_builder_->Emit(kExprI32AsmjsSConvertF64);
} else {
UNREACHABLE();
}
- RECURSE(Visit(GetLeft(expr)));
} else if (convertOperation == kAsIs) {
RECURSE(Visit(GetLeft(expr)));
} else {
+ if (expr->op() == Token::COMMA) {
+ current_function_builder_->Emit(kExprBlock);
+ }
+
+ RECURSE(Visit(expr->left()));
+ RECURSE(Visit(expr->right()));
+
+ if (expr->op() == Token::COMMA) {
+ current_function_builder_->Emit(kExprEnd);
+ }
+
switch (expr->op()) {
BINOP_CASE(Token::ADD, Add, NON_SIGNED_BINOP, true);
BINOP_CASE(Token::SUB, Sub, NON_SIGNED_BINOP, true);
BINOP_CASE(Token::MUL, Mul, NON_SIGNED_BINOP, true);
- BINOP_CASE(Token::DIV, Div, SIGNED_BINOP, false);
BINOP_CASE(Token::BIT_OR, Ior, NON_SIGNED_INT_BINOP, true);
BINOP_CASE(Token::BIT_AND, And, NON_SIGNED_INT_BINOP, true);
BINOP_CASE(Token::BIT_XOR, Xor, NON_SIGNED_INT_BINOP, true);
BINOP_CASE(Token::SHL, Shl, NON_SIGNED_INT_BINOP, true);
BINOP_CASE(Token::SAR, ShrS, NON_SIGNED_INT_BINOP, true);
BINOP_CASE(Token::SHR, ShrU, NON_SIGNED_INT_BINOP, true);
+ case Token::DIV: {
+ static WasmOpcode opcodes[] = {kExprI32AsmjsDivS, kExprI32AsmjsDivU,
+ kExprF32Div, kExprF64Div};
+ int type = TypeIndexOf(expr->left(), expr->right(), false);
+ current_function_builder_->Emit(opcodes[type]);
+ break;
+ }
case Token::MOD: {
TypeIndex type = TypeIndexOf(expr->left(), expr->right(), false);
if (type == kInt32) {
- current_function_builder_->Emit(kExprI32RemS);
+ current_function_builder_->Emit(kExprI32AsmjsRemS);
} else if (type == kUint32) {
- current_function_builder_->Emit(kExprI32RemU);
+ current_function_builder_->Emit(kExprI32AsmjsRemU);
} else if (type == kFloat64) {
current_function_builder_->Emit(kExprF64Mod);
return;
@@ -1299,31 +1553,17 @@ class AsmWasmBuilderImpl : public AstVisitor {
break;
}
case Token::COMMA: {
- current_function_builder_->EmitWithVarInt(kExprBlock, 2);
break;
}
default:
UNREACHABLE();
}
- RECURSE(Visit(expr->left()));
- RECURSE(Visit(expr->right()));
- }
- }
-
- void AddLeb128(uint32_t index, bool is_local) {
- std::vector<uint8_t> index_vec = UnsignedLEB128From(index);
- if (is_local) {
- uint32_t pos_of_index[1] = {0};
- current_function_builder_->EmitCode(
- &index_vec[0], static_cast<uint32_t>(index_vec.size()), pos_of_index,
- 1);
- } else {
- current_function_builder_->EmitCode(
- &index_vec[0], static_cast<uint32_t>(index_vec.size()));
}
}
void VisitCompareOperation(CompareOperation* expr) {
+ RECURSE(Visit(expr->left()));
+ RECURSE(Visit(expr->right()));
switch (expr->op()) {
BINOP_CASE(Token::EQ, Eq, NON_SIGNED_BINOP, false);
BINOP_CASE(Token::LT, Lt, SIGNED_BINOP, false);
@@ -1333,8 +1573,6 @@ class AsmWasmBuilderImpl : public AstVisitor {
default:
UNREACHABLE();
}
- RECURSE(Visit(expr->left()));
- RECURSE(Visit(expr->right()));
}
#undef BINOP_CASE
@@ -1351,8 +1589,8 @@ class AsmWasmBuilderImpl : public AstVisitor {
};
TypeIndex TypeIndexOf(Expression* left, Expression* right, bool ignore_sign) {
- TypeIndex left_index = TypeIndexOf(left);
- TypeIndex right_index = TypeIndexOf(right);
+ TypeIndex left_index = TypeIndexOf(left, ignore_sign);
+ TypeIndex right_index = TypeIndexOf(right, ignore_sign);
if (left_index == kFixnum) {
left_index = right_index;
}
@@ -1363,30 +1601,43 @@ class AsmWasmBuilderImpl : public AstVisitor {
left_index = kInt32;
right_index = kInt32;
}
- DCHECK((left_index == right_index) ||
- (ignore_sign && (left_index <= 1) && (right_index <= 1)));
+ if (left_index != right_index) {
+ DCHECK(ignore_sign && (left_index <= 1) && (right_index <= 1));
+ }
return left_index;
}
- TypeIndex TypeIndexOf(Expression* expr) {
- DCHECK_EQ(expr->bounds().lower, expr->bounds().upper);
- Type* type = expr->bounds().lower;
- if (type->Is(cache_.kAsmFixnum)) {
+ TypeIndex TypeIndexOf(Expression* expr, bool ignore_sign) {
+ AsmType* type = typer_->TypeOf(expr);
+ if (type->IsA(AsmType::FixNum())) {
return kFixnum;
- } else if (type->Is(cache_.kAsmSigned)) {
+ }
+
+ if (type->IsA(AsmType::Signed())) {
return kInt32;
- } else if (type->Is(cache_.kAsmUnsigned)) {
+ }
+
+ if (type->IsA(AsmType::Unsigned())) {
return kUint32;
- } else if (type->Is(cache_.kAsmInt)) {
+ }
+
+ if (type->IsA(AsmType::Intish())) {
+ if (!ignore_sign) {
+ // TODO(jpp): log a warning and move on.
+ }
return kInt32;
- } else if (type->Is(cache_.kAsmFloat)) {
+ }
+
+ if (type->IsA(AsmType::Floatish())) {
return kFloat32;
- } else if (type->Is(cache_.kAsmDouble)) {
+ }
+
+ if (type->IsA(AsmType::DoubleQ())) {
return kFloat64;
- } else {
- UNREACHABLE();
- return kInt32;
}
+
+ UNREACHABLE();
+ return kInt32;
}
#undef CASE
@@ -1422,20 +1673,17 @@ class AsmWasmBuilderImpl : public AstVisitor {
void VisitRewritableExpression(RewritableExpression* expr) { UNREACHABLE(); }
struct IndexContainer : public ZoneObject {
- uint16_t index;
+ uint32_t index;
};
- uint16_t LookupOrInsertLocal(Variable* v, LocalType type) {
+ uint32_t LookupOrInsertLocal(Variable* v, LocalType type) {
DCHECK_NOT_NULL(current_function_builder_);
ZoneHashMap::Entry* entry =
local_variables_.Lookup(v, ComputePointerHash(v));
if (entry == nullptr) {
- uint16_t index;
- if (v->IsParameter()) {
- index = current_function_builder_->AddParam(type);
- } else {
- index = current_function_builder_->AddLocal(type);
- }
+ uint32_t index;
+ DCHECK(!v->IsParameter());
+ index = current_function_builder_->AddLocal(type);
IndexContainer* container = new (zone()) IndexContainer();
container->index = index;
entry = local_variables_.LookupOrInsert(v, ComputePointerHash(v),
@@ -1445,12 +1693,24 @@ class AsmWasmBuilderImpl : public AstVisitor {
return (reinterpret_cast<IndexContainer*>(entry->value))->index;
}
- uint16_t LookupOrInsertGlobal(Variable* v, LocalType type) {
+ void InsertParameter(Variable* v, LocalType type, uint32_t index) {
+ DCHECK(v->IsParameter());
+ DCHECK_NOT_NULL(current_function_builder_);
+ ZoneHashMap::Entry* entry =
+ local_variables_.Lookup(v, ComputePointerHash(v));
+ DCHECK_NULL(entry);
+ IndexContainer* container = new (zone()) IndexContainer();
+ container->index = index;
+ entry = local_variables_.LookupOrInsert(v, ComputePointerHash(v),
+ ZoneAllocationPolicy(zone()));
+ entry->value = container;
+ }
+
+ uint32_t LookupOrInsertGlobal(Variable* v, LocalType type) {
ZoneHashMap::Entry* entry =
global_variables_.Lookup(v, ComputePointerHash(v));
if (entry == nullptr) {
- uint16_t index =
- builder_->AddGlobal(WasmOpcodes::MachineTypeFor(type), 0);
+ uint32_t index = builder_->AddGlobal(type, 0);
IndexContainer* container = new (zone()) IndexContainer();
container->index = index;
entry = global_variables_.LookupOrInsert(v, ComputePointerHash(v),
@@ -1460,11 +1720,11 @@ class AsmWasmBuilderImpl : public AstVisitor {
return (reinterpret_cast<IndexContainer*>(entry->value))->index;
}
- uint16_t LookupOrInsertFunction(Variable* v) {
+ uint32_t LookupOrInsertFunction(Variable* v) {
DCHECK_NOT_NULL(builder_);
ZoneHashMap::Entry* entry = functions_.Lookup(v, ComputePointerHash(v));
if (entry == nullptr) {
- uint16_t index = builder_->AddFunction();
+ uint32_t index = builder_->AddFunction();
IndexContainer* container = new (zone()) IndexContainer();
container->index = index;
entry = functions_.LookupOrInsert(v, ComputePointerHash(v),
@@ -1474,21 +1734,22 @@ class AsmWasmBuilderImpl : public AstVisitor {
return (reinterpret_cast<IndexContainer*>(entry->value))->index;
}
- LocalType TypeOf(Expression* expr) {
- DCHECK_EQ(expr->bounds().lower, expr->bounds().upper);
- return TypeFrom(expr->bounds().lower);
- }
+ LocalType TypeOf(Expression* expr) { return TypeFrom(typer_->TypeOf(expr)); }
- LocalType TypeFrom(Type* type) {
- if (type->Is(cache_.kAsmInt)) {
+ LocalType TypeFrom(AsmType* type) {
+ if (type->IsA(AsmType::Intish())) {
return kAstI32;
- } else if (type->Is(cache_.kAsmFloat)) {
+ }
+
+ if (type->IsA(AsmType::Floatish())) {
return kAstF32;
- } else if (type->Is(cache_.kAsmDouble)) {
+ }
+
+ if (type->IsA(AsmType::DoubleQ())) {
return kAstF64;
- } else {
- return kAstStmt;
}
+
+ return kAstStmt;
}
Zone* zone() { return zone_; }
@@ -1496,20 +1757,17 @@ class AsmWasmBuilderImpl : public AstVisitor {
ZoneHashMap local_variables_;
ZoneHashMap functions_;
ZoneHashMap global_variables_;
- bool in_function_;
- bool is_set_op_;
- bool marking_exported;
+ AsmScope scope_;
WasmModuleBuilder* builder_;
WasmFunctionBuilder* current_function_builder_;
FunctionLiteral* literal_;
Isolate* isolate_;
Zone* zone_;
- Handle<Object> foreign_;
AsmTyper* typer_;
- TypeCache const& cache_;
ZoneVector<std::pair<BreakableStatement*, bool>> breakable_blocks_;
- int block_size_;
- uint16_t init_function_index_;
+ ZoneVector<ForeignVariable> foreign_variables_;
+ uint32_t init_function_index_;
+ uint32_t foreign_init_function_index_;
uint32_t next_table_index_;
ZoneHashMap function_tables_;
ImportedFunctionTable imported_function_table_;
@@ -1521,21 +1779,18 @@ class AsmWasmBuilderImpl : public AstVisitor {
};
AsmWasmBuilder::AsmWasmBuilder(Isolate* isolate, Zone* zone,
- FunctionLiteral* literal, Handle<Object> foreign,
- AsmTyper* typer)
- : isolate_(isolate),
- zone_(zone),
- literal_(literal),
- foreign_(foreign),
- typer_(typer) {}
+ FunctionLiteral* literal, AsmTyper* typer)
+ : isolate_(isolate), zone_(zone), literal_(literal), typer_(typer) {}
// TODO(aseemgarg): probably should take zone (to write wasm to) as input so
// that zone in constructor may be thrown away once wasm module is written.
-WasmModuleIndex* AsmWasmBuilder::Run() {
- AsmWasmBuilderImpl impl(isolate_, zone_, literal_, foreign_, typer_);
- impl.Compile();
- WasmModuleWriter* writer = impl.builder_->Build(zone_);
- return writer->WriteTo(zone_);
+ZoneBuffer* AsmWasmBuilder::Run(i::Handle<i::FixedArray>* foreign_args) {
+ AsmWasmBuilderImpl impl(isolate_, zone_, literal_, typer_);
+ impl.Build();
+ *foreign_args = impl.GetForeignArgs();
+ ZoneBuffer* buffer = new (zone_) ZoneBuffer(zone_);
+ impl.builder_->WriteTo(*buffer);
+ return buffer;
}
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/asm-wasm-builder.h b/deps/v8/src/asmjs/asm-wasm-builder.h
index 09645ee3c4..3276c887b0 100644
--- a/deps/v8/src/wasm/asm-wasm-builder.h
+++ b/deps/v8/src/asmjs/asm-wasm-builder.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_ASM_WASM_BUILDER_H_
-#define V8_WASM_ASM_WASM_BUILDER_H_
+#ifndef V8_ASMJS_ASM_WASM_BUILDER_H_
+#define V8_ASMJS_ASM_WASM_BUILDER_H_
#include "src/allocation.h"
+#include "src/asmjs/asm-typer.h"
#include "src/objects.h"
-#include "src/typing-asm.h"
#include "src/wasm/encoder.h"
#include "src/zone.h"
@@ -21,14 +21,13 @@ namespace wasm {
class AsmWasmBuilder {
public:
explicit AsmWasmBuilder(Isolate* isolate, Zone* zone, FunctionLiteral* root,
- Handle<Object> foreign, AsmTyper* typer);
- WasmModuleIndex* Run();
+ AsmTyper* typer);
+ ZoneBuffer* Run(Handle<FixedArray>* foreign_args);
private:
Isolate* isolate_;
Zone* zone_;
FunctionLiteral* literal_;
- Handle<Object> foreign_;
AsmTyper* typer_;
};
} // namespace wasm
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index a912bb60e4..83dbbe8134 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -39,10 +39,10 @@
#include "src/api.h"
#include "src/base/cpu.h"
#include "src/base/functional.h"
+#include "src/base/ieee754.h"
#include "src/base/lazy-instance.h"
#include "src/base/platform/platform.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/builtins.h"
#include "src/codegen.h"
#include "src/counters.h"
#include "src/debug/debug.h"
@@ -53,7 +53,6 @@
#include "src/ic/stub-cache.h"
#include "src/interpreter/interpreter.h"
#include "src/ostreams.h"
-#include "src/profiler/cpu-profiler.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
@@ -114,39 +113,6 @@ namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
-// Common register code.
-
-const char* Register::ToString() {
- // This is the mapping of allocation indices to registers.
- DCHECK(reg_code >= 0 && reg_code < kNumRegisters);
- return RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
- ->GetGeneralRegisterName(reg_code);
-}
-
-
-bool Register::IsAllocatable() const {
- return ((1 << reg_code) &
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
- ->allocatable_general_codes_mask()) != 0;
-}
-
-
-const char* DoubleRegister::ToString() {
- // This is the mapping of allocation indices to registers.
- DCHECK(reg_code >= 0 && reg_code < kMaxNumRegisters);
- return RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
- ->GetDoubleRegisterName(reg_code);
-}
-
-
-bool DoubleRegister::IsAllocatable() const {
- return ((1 << reg_code) &
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
- ->allocatable_double_codes_mask()) != 0;
-}
-
-
-// -----------------------------------------------------------------------------
// Common double constants.
struct DoubleConstant BASE_EMBEDDED {
@@ -160,12 +126,33 @@ double uint32_bias;
static DoubleConstant double_constants;
-const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
+static struct V8_ALIGNED(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+} float_absolute_constant = {0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF};
+
+static struct V8_ALIGNED(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+} float_negate_constant = {0x80000000, 0x80000000, 0x80000000, 0x80000000};
+
+static struct V8_ALIGNED(16) {
+ uint64_t a;
+ uint64_t b;
+} double_absolute_constant = {V8_UINT64_C(0x7FFFFFFFFFFFFFFF),
+ V8_UINT64_C(0x7FFFFFFFFFFFFFFF)};
+
+static struct V8_ALIGNED(16) {
+ uint64_t a;
+ uint64_t b;
+} double_negate_constant = {V8_UINT64_C(0x8000000000000000),
+ V8_UINT64_C(0x8000000000000000)};
-static bool math_exp_data_initialized = false;
-static base::Mutex* math_exp_data_mutex = NULL;
-static double* math_exp_constants_array = NULL;
-static double* math_exp_log_table_array = NULL;
+const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
// -----------------------------------------------------------------------------
// Implementation of AssemblerBase
@@ -201,7 +188,6 @@ AssemblerBase::~AssemblerBase() {
void AssemblerBase::FlushICache(Isolate* isolate, void* start, size_t size) {
if (size == 0) return;
- if (CpuFeatures::IsSupported(COHERENT_CACHE)) return;
#if defined(USE_SIMULATOR)
Simulator::FlushICache(isolate->simulator_i_cache(), start, size);
@@ -318,11 +304,9 @@ int Label::pos() const {
// followed by pc delta
// followed by optional data depending on type.
//
-// 2-bit data type tags, used in short_data_record and data_jump long_record:
-// code_target_with_id: 00
-// position: 01
-// statement_position: 10
-// deopt_reason: 11
+// 1-bit data type tags, used in short_data_record and data_jump long_record:
+// code_target_with_id: 0
+// deopt_reason: 1
//
// If a pc delta exceeds 6 bits, it is split into a remainder that fits into
// 6 bits and a part that does not. The latter is encoded as a long record
@@ -339,7 +323,7 @@ int Label::pos() const {
const int kTagBits = 2;
const int kTagMask = (1 << kTagBits) - 1;
const int kLongTagBits = 6;
-const int kShortDataTypeTagBits = 2;
+const int kShortDataTypeTagBits = 1;
const int kShortDataBits = kBitsPerByte - kShortDataTypeTagBits;
const int kEmbeddedObjectTag = 0;
@@ -358,10 +342,65 @@ const int kLastChunkTagMask = 1;
const int kLastChunkTag = 1;
const int kCodeWithIdTag = 0;
-const int kNonstatementPositionTag = 1;
-const int kStatementPositionTag = 2;
-const int kDeoptReasonTag = 3;
+const int kDeoptReasonTag = 1;
+
+void RelocInfo::update_wasm_memory_reference(
+ Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsWasmMemoryReference(rmode_) || IsWasmMemorySizeReference(rmode_));
+ if (IsWasmMemoryReference(rmode_)) {
+ Address updated_reference;
+ DCHECK(old_size == 0 || Memory::IsAddressInRange(
+ old_base, wasm_memory_reference(), old_size));
+ updated_reference = new_base + (wasm_memory_reference() - old_base);
+ DCHECK(new_size == 0 ||
+ Memory::IsAddressInRange(new_base, updated_reference, new_size));
+ unchecked_update_wasm_memory_reference(updated_reference,
+ icache_flush_mode);
+ } else if (IsWasmMemorySizeReference(rmode_)) {
+ uint32_t updated_size_reference;
+ DCHECK(old_size == 0 || wasm_memory_size_reference() <= old_size);
+ updated_size_reference =
+ new_size + (wasm_memory_size_reference() - old_size);
+ DCHECK(updated_size_reference <= new_size);
+ unchecked_update_wasm_memory_size(updated_size_reference,
+ icache_flush_mode);
+ } else {
+ UNREACHABLE();
+ }
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ Assembler::FlushICache(isolate_, pc_, sizeof(int64_t));
+ }
+}
+
+void RelocInfo::update_wasm_global_reference(
+ Address old_base, Address new_base, ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsWasmGlobalReference(rmode_));
+ Address updated_reference;
+ DCHECK(reinterpret_cast<uintptr_t>(old_base) <=
+ reinterpret_cast<uintptr_t>(wasm_global_reference()));
+ updated_reference = new_base + (wasm_global_reference() - old_base);
+ DCHECK(reinterpret_cast<uintptr_t>(new_base) <=
+ reinterpret_cast<uintptr_t>(updated_reference));
+ unchecked_update_wasm_memory_reference(updated_reference, icache_flush_mode);
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ Assembler::FlushICache(isolate_, pc_, sizeof(int32_t));
+ }
+}
+void RelocInfo::set_target_address(Address target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ Assembler::set_target_address_at(isolate_, pc_, host_, target,
+ icache_flush_mode);
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
+ IsCodeTarget(rmode_)) {
+ Object* target_code = Code::GetCodeFromTargetAddress(target);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target_code));
+ }
+}
uint32_t RelocInfoWriter::WriteLongPCJump(uint32_t pc_delta) {
// Return if the pc_delta can fit in kSmallPCDeltaBits bits.
@@ -427,38 +466,8 @@ void RelocInfoWriter::WriteData(intptr_t data_delta) {
}
-void RelocInfoWriter::WritePosition(int pc_delta, int pos_delta,
- RelocInfo::Mode rmode) {
- int pos_type_tag = (rmode == RelocInfo::POSITION) ? kNonstatementPositionTag
- : kStatementPositionTag;
- // Check if delta is small enough to fit in a tagged byte.
- if (is_intn(pos_delta, kShortDataBits)) {
- WriteShortTaggedPC(pc_delta, kLocatableTag);
- WriteShortTaggedData(pos_delta, pos_type_tag);
- } else {
- // Otherwise, use costly encoding.
- WriteModeAndPC(pc_delta, rmode);
- WriteIntData(pos_delta);
- }
-}
-
-
-void RelocInfoWriter::FlushPosition() {
- if (!next_position_candidate_flushed_) {
- WritePosition(next_position_candidate_pc_delta_,
- next_position_candidate_pos_delta_, RelocInfo::POSITION);
- next_position_candidate_pos_delta_ = 0;
- next_position_candidate_pc_delta_ = 0;
- next_position_candidate_flushed_ = true;
- }
-}
-
-
void RelocInfoWriter::Write(const RelocInfo* rinfo) {
RelocInfo::Mode rmode = rinfo->rmode();
- if (rmode != RelocInfo::POSITION) {
- FlushPosition();
- }
#ifdef DEBUG
byte* begin_pos = pos_;
#endif
@@ -491,30 +500,13 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
DCHECK(rinfo->data() < (1 << kShortDataBits));
WriteShortTaggedPC(pc_delta, kLocatableTag);
WriteShortTaggedData(rinfo->data(), kDeoptReasonTag);
- } else if (RelocInfo::IsPosition(rmode)) {
- // Use signed delta-encoding for position.
- DCHECK_EQ(static_cast<int>(rinfo->data()), rinfo->data());
- int pos_delta = static_cast<int>(rinfo->data()) - last_position_;
- if (rmode == RelocInfo::STATEMENT_POSITION) {
- WritePosition(pc_delta, pos_delta, rmode);
- } else {
- DCHECK_EQ(rmode, RelocInfo::POSITION);
- if (pc_delta != 0 || last_mode_ != RelocInfo::POSITION) {
- FlushPosition();
- next_position_candidate_pc_delta_ = pc_delta;
- next_position_candidate_pos_delta_ = pos_delta;
- } else {
- next_position_candidate_pos_delta_ += pos_delta;
- }
- next_position_candidate_flushed_ = false;
- }
- last_position_ = static_cast<int>(rinfo->data());
} else {
WriteModeAndPC(pc_delta, rmode);
if (RelocInfo::IsComment(rmode)) {
WriteData(rinfo->data());
} else if (RelocInfo::IsConstPool(rmode) ||
- RelocInfo::IsVeneerPool(rmode)) {
+ RelocInfo::IsVeneerPool(rmode) || RelocInfo::IsDeoptId(rmode) ||
+ RelocInfo::IsDeoptPosition(rmode)) {
WriteIntData(static_cast<int>(rinfo->data()));
}
}
@@ -566,16 +558,6 @@ void RelocIterator::AdvanceReadInt() {
}
-void RelocIterator::AdvanceReadPosition() {
- int x = 0;
- for (int i = 0; i < kIntSize; i++) {
- x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
- }
- last_position_ += x;
- rinfo_.data_ = last_position_;
-}
-
-
void RelocIterator::AdvanceReadData() {
intptr_t x = 0;
for (int i = 0; i < kIntptrSize; i++) {
@@ -614,26 +596,9 @@ inline void RelocIterator::ReadShortTaggedId() {
}
-inline void RelocIterator::ReadShortTaggedPosition() {
- int8_t signed_b = *pos_;
- // Signed right shift is arithmetic shift. Tested in test-utils.cc.
- last_position_ += signed_b >> kShortDataTypeTagBits;
- rinfo_.data_ = last_position_;
-}
-
-
inline void RelocIterator::ReadShortTaggedData() {
uint8_t unsigned_b = *pos_;
- rinfo_.data_ = unsigned_b >> kTagBits;
-}
-
-
-static inline RelocInfo::Mode GetPositionModeFromTag(int tag) {
- DCHECK(tag == kNonstatementPositionTag ||
- tag == kStatementPositionTag);
- return (tag == kNonstatementPositionTag) ?
- RelocInfo::POSITION :
- RelocInfo::STATEMENT_POSITION;
+ rinfo_.data_ = unsigned_b >> kShortDataTypeTagBits;
}
@@ -661,20 +626,12 @@ void RelocIterator::next() {
ReadShortTaggedId();
return;
}
- } else if (data_type_tag == kDeoptReasonTag) {
+ } else {
+ DCHECK(data_type_tag == kDeoptReasonTag);
if (SetMode(RelocInfo::DEOPT_REASON)) {
ReadShortTaggedData();
return;
}
- } else {
- DCHECK(data_type_tag == kNonstatementPositionTag ||
- data_type_tag == kStatementPositionTag);
- if (mode_mask_ & RelocInfo::kPositionMask) {
- // Always update the position if we are interested in either
- // statement positions or non-statement positions.
- ReadShortTaggedPosition();
- if (SetMode(GetPositionModeFromTag(data_type_tag))) return;
- }
}
} else {
DCHECK(tag == kDefaultTag);
@@ -695,17 +652,10 @@ void RelocIterator::next() {
return;
}
Advance(kIntptrSize);
- } else if (RelocInfo::IsPosition(rmode)) {
- if (mode_mask_ & RelocInfo::kPositionMask) {
- // Always update the position if we are interested in either
- // statement positions or non-statement positions.
- AdvanceReadPosition();
- if (SetMode(rmode)) return;
- } else {
- Advance(kIntSize);
- }
} else if (RelocInfo::IsConstPool(rmode) ||
- RelocInfo::IsVeneerPool(rmode)) {
+ RelocInfo::IsVeneerPool(rmode) ||
+ RelocInfo::IsDeoptId(rmode) ||
+ RelocInfo::IsDeoptPosition(rmode)) {
if (SetMode(rmode)) {
AdvanceReadInt();
return;
@@ -741,7 +691,6 @@ RelocIterator::RelocIterator(Code* code, int mode_mask)
done_ = false;
mode_mask_ = mode_mask;
last_id_ = 0;
- last_position_ = 0;
byte* sequence = code->FindCodeAgeSequence();
// We get the isolate from the map, because at serialization time
// the code pointer has been cloned and isn't really in heap space.
@@ -766,7 +715,6 @@ RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask)
done_ = false;
mode_mask_ = mode_mask;
last_id_ = 0;
- last_position_ = 0;
code_age_sequence_ = NULL;
if (mode_mask_ == 0) pos_ = end_;
next();
@@ -816,18 +764,18 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "runtime entry";
case COMMENT:
return "comment";
- case POSITION:
- return "position";
- case STATEMENT_POSITION:
- return "statement position";
case EXTERNAL_REFERENCE:
return "external reference";
case INTERNAL_REFERENCE:
return "internal reference";
case INTERNAL_REFERENCE_ENCODED:
return "encoded internal reference";
+ case DEOPT_POSITION:
+ return "deopt position";
case DEOPT_REASON:
return "deopt reason";
+ case DEOPT_ID:
+ return "deopt index";
case CONST_POOL:
return "constant pool";
case VENEER_POOL:
@@ -846,6 +794,10 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "generator continuation";
case WASM_MEMORY_REFERENCE:
return "wasm memory reference";
+ case WASM_MEMORY_SIZE_REFERENCE:
+ return "wasm memory size reference";
+ case WASM_GLOBAL_REFERENCE:
+ return "wasm global value reference";
case NUMBER_OF_MODES:
case PC_JUMP:
UNREACHABLE();
@@ -859,9 +811,11 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
os << static_cast<const void*>(pc_) << " " << RelocModeName(rmode_);
if (IsComment(rmode_)) {
os << " (" << reinterpret_cast<char*>(data_) << ")";
+ } else if (rmode_ == DEOPT_POSITION) {
+ os << " (" << data() << ")";
} else if (rmode_ == DEOPT_REASON) {
- os << " (" << Deoptimizer::GetDeoptReason(
- static_cast<Deoptimizer::DeoptReason>(data_)) << ")";
+ os << " ("
+ << DeoptimizeReasonToString(static_cast<DeoptimizeReason>(data_)) << ")";
} else if (rmode_ == EMBEDDED_OBJECT) {
os << " (" << Brief(target_object()) << ")";
} else if (rmode_ == EXTERNAL_REFERENCE) {
@@ -877,8 +831,6 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
if (rmode_ == CODE_TARGET_WITH_ID) {
os << " (id=" << static_cast<int>(data_) << ")";
}
- } else if (IsPosition(rmode_)) {
- os << " (" << data() << ")";
} else if (IsRuntimeEntry(rmode_) &&
isolate->deoptimizer_data() != NULL) {
// Depotimization bailouts are stored as runtime entries.
@@ -929,10 +881,10 @@ void RelocInfo::Verify(Isolate* isolate) {
}
case RUNTIME_ENTRY:
case COMMENT:
- case POSITION:
- case STATEMENT_POSITION:
case EXTERNAL_REFERENCE:
+ case DEOPT_POSITION:
case DEOPT_REASON:
+ case DEOPT_ID:
case CONST_POOL:
case VENEER_POOL:
case DEBUG_BREAK_SLOT_AT_POSITION:
@@ -941,6 +893,8 @@ void RelocInfo::Verify(Isolate* isolate) {
case DEBUG_BREAK_SLOT_AT_TAIL_CALL:
case GENERATOR_CONTINUATION:
case WASM_MEMORY_REFERENCE:
+ case WASM_MEMORY_SIZE_REFERENCE:
+ case WASM_GLOBAL_REFERENCE:
case NONE32:
case NONE64:
break;
@@ -980,67 +934,10 @@ void ExternalReference::SetUp() {
double_constants.negative_infinity = -V8_INFINITY;
double_constants.uint32_bias =
static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
-
- math_exp_data_mutex = new base::Mutex();
-}
-
-
-void ExternalReference::InitializeMathExpData() {
- // Early return?
- if (math_exp_data_initialized) return;
-
- base::LockGuard<base::Mutex> lock_guard(math_exp_data_mutex);
- if (!math_exp_data_initialized) {
- // If this is changed, generated code must be adapted too.
- const int kTableSizeBits = 11;
- const int kTableSize = 1 << kTableSizeBits;
- const double kTableSizeDouble = static_cast<double>(kTableSize);
-
- math_exp_constants_array = new double[9];
- // Input values smaller than this always return 0.
- math_exp_constants_array[0] = -708.39641853226408;
- // Input values larger than this always return +Infinity.
- math_exp_constants_array[1] = 709.78271289338397;
- math_exp_constants_array[2] = V8_INFINITY;
- // The rest is black magic. Do not attempt to understand it. It is
- // loosely based on the "expd" function published at:
- // http://herumi.blogspot.com/2011/08/fast-double-precision-exponential.html
- const double constant3 = (1 << kTableSizeBits) / std::log(2.0);
- math_exp_constants_array[3] = constant3;
- math_exp_constants_array[4] =
- static_cast<double>(static_cast<int64_t>(3) << 51);
- math_exp_constants_array[5] = 1 / constant3;
- math_exp_constants_array[6] = 3.0000000027955394;
- math_exp_constants_array[7] = 0.16666666685227835;
- math_exp_constants_array[8] = 1;
-
- math_exp_log_table_array = new double[kTableSize];
- for (int i = 0; i < kTableSize; i++) {
- double value = std::pow(2, i / kTableSizeDouble);
- uint64_t bits = bit_cast<uint64_t, double>(value);
- bits &= (static_cast<uint64_t>(1) << 52) - 1;
- double mantissa = bit_cast<double, uint64_t>(bits);
- math_exp_log_table_array[i] = mantissa;
- }
-
- math_exp_data_initialized = true;
- }
-}
-
-
-void ExternalReference::TearDownMathExpData() {
- delete[] math_exp_constants_array;
- math_exp_constants_array = NULL;
- delete[] math_exp_log_table_array;
- math_exp_log_table_array = NULL;
- delete math_exp_data_mutex;
- math_exp_data_mutex = NULL;
}
-
-ExternalReference::ExternalReference(Builtins::CFunctionId id, Isolate* isolate)
- : address_(Redirect(isolate, Builtins::c_function_address(id))) {}
-
+ExternalReference::ExternalReference(Address address, Isolate* isolate)
+ : address_(Redirect(isolate, address)) {}
ExternalReference::ExternalReference(
ApiFunction* fun,
@@ -1072,6 +969,12 @@ ExternalReference ExternalReference::interpreter_dispatch_table_address(
return ExternalReference(isolate->interpreter()->dispatch_table_address());
}
+ExternalReference ExternalReference::interpreter_dispatch_counters(
+ Isolate* isolate) {
+ return ExternalReference(
+ isolate->interpreter()->bytecode_dispatch_counters_table());
+}
+
ExternalReference::ExternalReference(StatsCounter* counter)
: address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {}
@@ -1255,97 +1158,52 @@ ExternalReference ExternalReference::wasm_uint64_mod(Isolate* isolate) {
Redirect(isolate, FUNCTION_ADDR(wasm::uint64_mod_wrapper)));
}
-static void f64_acos_wrapper(double* param) { *param = std::acos(*param); }
-
-ExternalReference ExternalReference::f64_acos_wrapper_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_acos_wrapper)));
-}
-
-static void f64_asin_wrapper(double* param) { *param = std::asin(*param); }
-
-ExternalReference ExternalReference::f64_asin_wrapper_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_asin_wrapper)));
-}
-
-static void f64_atan_wrapper(double* param) { *param = std::atan(*param); }
-
-ExternalReference ExternalReference::f64_atan_wrapper_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_atan_wrapper)));
+ExternalReference ExternalReference::wasm_word32_ctz(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::word32_ctz_wrapper)));
}
-static void f64_cos_wrapper(double* param) { *param = std::cos(*param); }
-
-ExternalReference ExternalReference::f64_cos_wrapper_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_cos_wrapper)));
+ExternalReference ExternalReference::wasm_word64_ctz(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::word64_ctz_wrapper)));
}
-static void f64_sin_wrapper(double* param) { *param = std::sin(*param); }
-
-ExternalReference ExternalReference::f64_sin_wrapper_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_sin_wrapper)));
+ExternalReference ExternalReference::wasm_word32_popcnt(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::word32_popcnt_wrapper)));
}
-static void f64_tan_wrapper(double* param) { *param = std::tan(*param); }
-
-ExternalReference ExternalReference::f64_tan_wrapper_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_tan_wrapper)));
+ExternalReference ExternalReference::wasm_word64_popcnt(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::word64_popcnt_wrapper)));
}
-static void f64_exp_wrapper(double* param) { *param = std::exp(*param); }
-
-ExternalReference ExternalReference::f64_exp_wrapper_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_exp_wrapper)));
+static void f64_acos_wrapper(double* param) {
+ WriteDoubleValue(param, base::ieee754::acos(ReadDoubleValue(param)));
}
-static void f64_log_wrapper(double* param) { *param = std::log(*param); }
-
-ExternalReference ExternalReference::f64_log_wrapper_function(
+ExternalReference ExternalReference::f64_acos_wrapper_function(
Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_log_wrapper)));
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_acos_wrapper)));
}
-static void f64_pow_wrapper(double* param0, double* param1) {
- *param0 = power_double_double(*param0, *param1);
+static void f64_asin_wrapper(double* param) {
+ WriteDoubleValue(param, base::ieee754::asin(ReadDoubleValue(param)));
}
-ExternalReference ExternalReference::f64_pow_wrapper_function(
+ExternalReference ExternalReference::f64_asin_wrapper_function(
Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_pow_wrapper)));
-}
-
-static void f64_atan2_wrapper(double* param0, double* param1) {
- double x = *param0;
- double y = *param1;
- // TODO(bradnelson): Find a good place to put this to share
- // with the same code in src/runtime/runtime-math.cc
- static const double kPiDividedBy4 = 0.78539816339744830962;
- if (std::isinf(x) && std::isinf(y)) {
- // Make sure that the result in case of two infinite arguments
- // is a multiple of Pi / 4. The sign of the result is determined
- // by the first argument (x) and the sign of the second argument
- // determines the multiplier: one or three.
- int multiplier = (x < 0) ? -1 : 1;
- if (y < 0) multiplier *= 3;
- *param0 = multiplier * kPiDividedBy4;
- } else {
- *param0 = std::atan2(x, y);
- }
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_asin_wrapper)));
}
-ExternalReference ExternalReference::f64_atan2_wrapper_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_atan2_wrapper)));
+ExternalReference ExternalReference::wasm_float64_pow(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::float64_pow_wrapper)));
}
static void f64_mod_wrapper(double* param0, double* param1) {
- *param0 = modulo(*param0, *param1);
+ WriteDoubleValue(param0,
+ modulo(ReadDoubleValue(param0), ReadDoubleValue(param1)));
}
ExternalReference ExternalReference::f64_mod_wrapper_function(
@@ -1499,8 +1357,28 @@ ExternalReference ExternalReference::address_of_uint32_bias() {
}
+ExternalReference ExternalReference::address_of_float_abs_constant() {
+ return ExternalReference(reinterpret_cast<void*>(&float_absolute_constant));
+}
+
+
+ExternalReference ExternalReference::address_of_float_neg_constant() {
+ return ExternalReference(reinterpret_cast<void*>(&float_negate_constant));
+}
+
+
+ExternalReference ExternalReference::address_of_double_abs_constant() {
+ return ExternalReference(reinterpret_cast<void*>(&double_absolute_constant));
+}
+
+
+ExternalReference ExternalReference::address_of_double_neg_constant() {
+ return ExternalReference(reinterpret_cast<void*>(&double_negate_constant));
+}
+
+
ExternalReference ExternalReference::is_profiling_address(Isolate* isolate) {
- return ExternalReference(isolate->cpu_profiler()->is_profiling_address());
+ return ExternalReference(isolate->is_profiling_address());
}
@@ -1590,28 +1468,105 @@ ExternalReference ExternalReference::address_of_regexp_stack_memory_size(
#endif // V8_INTERPRETED_REGEXP
+ExternalReference ExternalReference::ieee754_acos_function(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(base::ieee754::acos), BUILTIN_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_acosh_function(Isolate* isolate) {
+ return ExternalReference(Redirect(
+ isolate, FUNCTION_ADDR(base::ieee754::acosh), BUILTIN_FP_FP_CALL));
+}
-ExternalReference ExternalReference::math_log_double_function(
- Isolate* isolate) {
- typedef double (*d2d)(double x);
- return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(static_cast<d2d>(std::log)),
- BUILTIN_FP_CALL));
+ExternalReference ExternalReference::ieee754_asin_function(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(base::ieee754::asin), BUILTIN_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_asinh_function(Isolate* isolate) {
+ return ExternalReference(Redirect(
+ isolate, FUNCTION_ADDR(base::ieee754::asinh), BUILTIN_FP_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_atan_function(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(base::ieee754::atan), BUILTIN_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_atanh_function(Isolate* isolate) {
+ return ExternalReference(Redirect(
+ isolate, FUNCTION_ADDR(base::ieee754::atanh), BUILTIN_FP_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_atan2_function(Isolate* isolate) {
+ return ExternalReference(Redirect(
+ isolate, FUNCTION_ADDR(base::ieee754::atan2), BUILTIN_FP_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_cbrt_function(Isolate* isolate) {
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(base::ieee754::cbrt),
+ BUILTIN_FP_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_cos_function(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(base::ieee754::cos), BUILTIN_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_cosh_function(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(base::ieee754::cosh), BUILTIN_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_exp_function(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(base::ieee754::exp), BUILTIN_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_expm1_function(Isolate* isolate) {
+ return ExternalReference(Redirect(
+ isolate, FUNCTION_ADDR(base::ieee754::expm1), BUILTIN_FP_FP_CALL));
}
+ExternalReference ExternalReference::ieee754_log_function(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(base::ieee754::log), BUILTIN_FP_CALL));
+}
-ExternalReference ExternalReference::math_exp_constants(int constant_index) {
- DCHECK(math_exp_data_initialized);
+ExternalReference ExternalReference::ieee754_log1p_function(Isolate* isolate) {
return ExternalReference(
- reinterpret_cast<void*>(math_exp_constants_array + constant_index));
+ Redirect(isolate, FUNCTION_ADDR(base::ieee754::log1p), BUILTIN_FP_CALL));
}
+ExternalReference ExternalReference::ieee754_log10_function(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(base::ieee754::log10), BUILTIN_FP_CALL));
+}
-ExternalReference ExternalReference::math_exp_log_table() {
- DCHECK(math_exp_data_initialized);
- return ExternalReference(reinterpret_cast<void*>(math_exp_log_table_array));
+ExternalReference ExternalReference::ieee754_log2_function(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(base::ieee754::log2), BUILTIN_FP_CALL));
}
+ExternalReference ExternalReference::ieee754_sin_function(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(base::ieee754::sin), BUILTIN_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_sinh_function(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(base::ieee754::sinh), BUILTIN_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_tan_function(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(base::ieee754::tan), BUILTIN_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_tanh_function(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(base::ieee754::tanh), BUILTIN_FP_CALL));
+}
ExternalReference ExternalReference::page_flags(Page* page) {
return ExternalReference(reinterpret_cast<Address>(page) +
@@ -1720,14 +1675,6 @@ ExternalReference ExternalReference::power_double_double_function(
}
-ExternalReference ExternalReference::power_double_int_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(power_double_int),
- BUILTIN_FP_INT_CALL));
-}
-
-
ExternalReference ExternalReference::mod_two_doubles_operation(
Isolate* isolate) {
return ExternalReference(Redirect(isolate,
@@ -1735,12 +1682,15 @@ ExternalReference ExternalReference::mod_two_doubles_operation(
BUILTIN_FP_FP_CALL));
}
-
-ExternalReference ExternalReference::debug_step_in_enabled_address(
+ExternalReference ExternalReference::debug_last_step_action_address(
Isolate* isolate) {
- return ExternalReference(isolate->debug()->step_in_enabled_address());
+ return ExternalReference(isolate->debug()->last_step_action_address());
}
+ExternalReference ExternalReference::debug_suspended_generator_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->debug()->suspended_generator_address());
+}
ExternalReference ExternalReference::fixed_typed_array_base_data_offset() {
return ExternalReference(reinterpret_cast<void*>(
@@ -1770,54 +1720,6 @@ std::ostream& operator<<(std::ostream& os, ExternalReference reference) {
return os;
}
-void AssemblerPositionsRecorder::RecordPosition(int pos) {
- DCHECK(pos != RelocInfo::kNoPosition);
- DCHECK(pos >= 0);
- state_.current_position = pos;
- LOG_CODE_EVENT(assembler_->isolate(),
- CodeLinePosInfoAddPositionEvent(jit_handler_data_,
- assembler_->pc_offset(),
- pos));
-}
-
-void AssemblerPositionsRecorder::RecordStatementPosition(int pos) {
- DCHECK(pos != RelocInfo::kNoPosition);
- DCHECK(pos >= 0);
- state_.current_statement_position = pos;
- LOG_CODE_EVENT(assembler_->isolate(),
- CodeLinePosInfoAddStatementPositionEvent(
- jit_handler_data_,
- assembler_->pc_offset(),
- pos));
-}
-
-bool AssemblerPositionsRecorder::WriteRecordedPositions() {
- bool written = false;
-
- // Write the statement position if it is different from what was written last
- // time.
- if (state_.current_statement_position != state_.written_statement_position) {
- EnsureSpace ensure_space(assembler_);
- assembler_->RecordRelocInfo(RelocInfo::STATEMENT_POSITION,
- state_.current_statement_position);
- state_.written_position = state_.current_statement_position;
- state_.written_statement_position = state_.current_statement_position;
- written = true;
- }
-
- // Write the position if it is different from what was written last time and
- // also different from the statement position that was just written.
- if (state_.current_position != state_.written_position) {
- EnsureSpace ensure_space(assembler_);
- assembler_->RecordRelocInfo(RelocInfo::POSITION, state_.current_position);
- state_.written_position = state_.current_position;
- written = true;
- }
-
- // Return whether something was written.
- return written;
-}
-
ConstantPoolBuilder::ConstantPoolBuilder(int ptr_reach_bits,
int double_reach_bits) {
@@ -2023,12 +1925,13 @@ int ConstantPoolBuilder::Emit(Assembler* assm) {
// Platform specific but identical code for all the platforms.
-
-void Assembler::RecordDeoptReason(const int reason, int raw_position) {
- if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling()) {
+void Assembler::RecordDeoptReason(DeoptimizeReason reason, int raw_position,
+ int id) {
+ if (FLAG_trace_deopt || isolate()->is_profiling()) {
EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::POSITION, raw_position);
- RecordRelocInfo(RelocInfo::DEOPT_REASON, reason);
+ RecordRelocInfo(RelocInfo::DEOPT_POSITION, raw_position);
+ RecordRelocInfo(RelocInfo::DEOPT_REASON, static_cast<int>(reason));
+ RecordRelocInfo(RelocInfo::DEOPT_ID, id);
}
}
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 192d16b64d..77beac12a2 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -36,9 +36,11 @@
#define V8_ASSEMBLER_H_
#include "src/allocation.h"
-#include "src/builtins.h"
+#include "src/builtins/builtins.h"
+#include "src/deoptimize-reason.h"
#include "src/isolate.h"
#include "src/log.h"
+#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -224,6 +226,8 @@ class CpuFeatures : public AllStatic {
static inline bool SupportsCrankshaft();
+ static inline bool SupportsSimd128();
+
static inline unsigned icache_line_size() {
DCHECK(icache_line_size_ != 0);
return icache_line_size_;
@@ -351,17 +355,6 @@ enum ICacheFlushMode { FLUSH_ICACHE_IF_NEEDED, SKIP_ICACHE_FLUSH };
class RelocInfo {
public:
- // The constant kNoPosition is used with the collecting of source positions
- // in the relocation information. Two types of source positions are collected
- // "position" (RelocMode position) and "statement position" (RelocMode
- // statement_position). The "position" is collected at places in the source
- // code which are of interest when making stack traces to pin-point the source
- // location of a stack frame as close as possible. The "statement position" is
- // collected at the beginning at each statement, and is used to indicate
- // possible break locations. kNoPosition is used to indicate an
- // invalid/uninitialized position value.
- static const int kNoPosition = -1;
-
// This string is used to add padding comments to the reloc info in cases
// where we are not sure to have enough space for patching in during
// lazy deoptimization. This is the case if we have indirect calls for which
@@ -384,15 +377,15 @@ class RelocInfo {
CODE_TARGET_WITH_ID,
DEBUGGER_STATEMENT, // Code target for the debugger statement.
EMBEDDED_OBJECT,
- CELL,
// To relocate pointers into the wasm memory embedded in wasm code
WASM_MEMORY_REFERENCE,
+ WASM_GLOBAL_REFERENCE,
+ WASM_MEMORY_SIZE_REFERENCE,
+ CELL,
// Everything after runtime_entry (inclusive) is not GC'ed.
RUNTIME_ENTRY,
COMMENT,
- POSITION, // See comment for kNoPosition above.
- STATEMENT_POSITION, // See comment for kNoPosition above.
// Additional code inserted for debug break slot.
DEBUG_BREAK_SLOT_AT_POSITION,
@@ -414,7 +407,9 @@ class RelocInfo {
CONST_POOL,
VENEER_POOL,
- DEOPT_REASON, // Deoptimization reason index.
+ DEOPT_POSITION, // Deoptimization source position.
+ DEOPT_REASON, // Deoptimization reason index.
+ DEOPT_ID, // Deoptimization inlining id.
// This is not an actual reloc mode, but used to encode a long pc jump that
// cannot be encoded as part of another record.
@@ -430,7 +425,7 @@ class RelocInfo {
FIRST_REAL_RELOC_MODE = CODE_TARGET,
LAST_REAL_RELOC_MODE = VENEER_POOL,
LAST_CODE_ENUM = DEBUGGER_STATEMENT,
- LAST_GCED_ENUM = WASM_MEMORY_REFERENCE,
+ LAST_GCED_ENUM = WASM_MEMORY_SIZE_REFERENCE,
FIRST_SHAREABLE_RELOC_MODE = CELL,
};
@@ -446,8 +441,7 @@ class RelocInfo {
}
static inline bool IsRealRelocMode(Mode mode) {
- return mode >= FIRST_REAL_RELOC_MODE &&
- mode <= LAST_REAL_RELOC_MODE;
+ return mode >= FIRST_REAL_RELOC_MODE && mode <= LAST_REAL_RELOC_MODE;
}
static inline bool IsCodeTarget(Mode mode) {
return mode <= LAST_CODE_ENUM;
@@ -472,14 +466,14 @@ class RelocInfo {
static inline bool IsVeneerPool(Mode mode) {
return mode == VENEER_POOL;
}
+ static inline bool IsDeoptPosition(Mode mode) {
+ return mode == DEOPT_POSITION;
+ }
static inline bool IsDeoptReason(Mode mode) {
return mode == DEOPT_REASON;
}
- static inline bool IsPosition(Mode mode) {
- return mode == POSITION || mode == STATEMENT_POSITION;
- }
- static inline bool IsStatementPosition(Mode mode) {
- return mode == STATEMENT_POSITION;
+ static inline bool IsDeoptId(Mode mode) {
+ return mode == DEOPT_ID;
}
static inline bool IsExternalReference(Mode mode) {
return mode == EXTERNAL_REFERENCE;
@@ -521,6 +515,12 @@ class RelocInfo {
static inline bool IsWasmMemoryReference(Mode mode) {
return mode == WASM_MEMORY_REFERENCE;
}
+ static inline bool IsWasmMemorySizeReference(Mode mode) {
+ return mode == WASM_MEMORY_SIZE_REFERENCE;
+ }
+ static inline bool IsWasmGlobalReference(Mode mode) {
+ return mode == WASM_GLOBAL_REFERENCE;
+ }
static inline int ModeMask(Mode mode) { return 1 << mode; }
// Accessors
@@ -547,44 +547,44 @@ class RelocInfo {
// constant pool, otherwise the pointer is embedded in the instruction stream.
bool IsInConstantPool();
+ Address wasm_memory_reference();
+ Address wasm_global_reference();
+ uint32_t wasm_memory_size_reference();
+ void update_wasm_memory_reference(
+ Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ void update_wasm_global_reference(
+ Address old_base, Address new_base,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ void set_target_address(
+ Address target,
+ WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+
// this relocation applies to;
// can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
INLINE(Address target_address());
- INLINE(void set_target_address(Address target,
- WriteBarrierMode write_barrier_mode =
- UPDATE_WRITE_BARRIER,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED));
INLINE(Object* target_object());
INLINE(Handle<Object> target_object_handle(Assembler* origin));
- INLINE(void set_target_object(Object* target,
- WriteBarrierMode write_barrier_mode =
- UPDATE_WRITE_BARRIER,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED));
+ INLINE(void set_target_object(
+ Object* target,
+ WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(Address target_runtime_entry(Assembler* origin));
- INLINE(void set_target_runtime_entry(Address target,
- WriteBarrierMode write_barrier_mode =
- UPDATE_WRITE_BARRIER,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED));
+ INLINE(void set_target_runtime_entry(
+ Address target,
+ WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(Cell* target_cell());
INLINE(Handle<Cell> target_cell_handle());
- INLINE(void set_target_cell(Cell* cell,
- WriteBarrierMode write_barrier_mode =
- UPDATE_WRITE_BARRIER,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED));
+ INLINE(void set_target_cell(
+ Cell* cell, WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(Handle<Object> code_age_stub_handle(Assembler* origin));
INLINE(Code* code_age_stub());
- INLINE(void set_code_age_stub(Code* stub,
- ICacheFlushMode icache_flush_mode =
- FLUSH_ICACHE_IF_NEEDED));
-
- INLINE(Address wasm_memory_reference());
- INLINE(void update_wasm_memory_reference(
- Address old_base, Address new_base, size_t old_size, size_t new_size,
- ICacheFlushMode icache_flush_mode = SKIP_ICACHE_FLUSH));
+ INLINE(void set_code_age_stub(
+ Code* stub, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
+
// Returns the address of the constant pool entry where the target address
// is held. This should only be called if IsInConstantPool returns true.
INLINE(Address constant_pool_entry_address());
@@ -631,6 +631,8 @@ class RelocInfo {
INLINE(void WipeOut());
template<typename StaticVisitor> inline void Visit(Heap* heap);
+
+ template <typename ObjectVisitor>
inline void Visit(Isolate* isolate, ObjectVisitor* v);
// Check whether this debug break slot has been patched with a call to the
@@ -653,15 +655,18 @@ class RelocInfo {
#endif
static const int kCodeTargetMask = (1 << (LAST_CODE_ENUM + 1)) - 1;
- static const int kPositionMask = 1 << POSITION | 1 << STATEMENT_POSITION;
- static const int kDataMask =
- (1 << CODE_TARGET_WITH_ID) | kPositionMask | (1 << COMMENT);
+ static const int kDataMask = (1 << CODE_TARGET_WITH_ID) | (1 << COMMENT);
static const int kDebugBreakSlotMask = 1 << DEBUG_BREAK_SLOT_AT_POSITION |
1 << DEBUG_BREAK_SLOT_AT_RETURN |
1 << DEBUG_BREAK_SLOT_AT_CALL;
static const int kApplyMask; // Modes affected by apply. Depends on arch.
private:
+ void unchecked_update_wasm_memory_reference(Address address,
+ ICacheFlushMode flush_mode);
+ void unchecked_update_wasm_memory_size(uint32_t size,
+ ICacheFlushMode flush_mode);
+
Isolate* isolate_;
// On ARM, note that pc_ is the address of the constant pool entry
// to be relocated and not the address of the instruction
@@ -679,24 +684,8 @@ class RelocInfo {
// lower addresses.
class RelocInfoWriter BASE_EMBEDDED {
public:
- RelocInfoWriter()
- : pos_(NULL),
- last_pc_(NULL),
- last_id_(0),
- last_position_(0),
- last_mode_(RelocInfo::NUMBER_OF_MODES),
- next_position_candidate_pos_delta_(0),
- next_position_candidate_pc_delta_(0),
- next_position_candidate_flushed_(true) {}
- RelocInfoWriter(byte* pos, byte* pc)
- : pos_(pos),
- last_pc_(pc),
- last_id_(0),
- last_position_(0),
- last_mode_(RelocInfo::NUMBER_OF_MODES),
- next_position_candidate_pos_delta_(0),
- next_position_candidate_pc_delta_(0),
- next_position_candidate_flushed_(true) {}
+ RelocInfoWriter() : pos_(NULL), last_pc_(NULL), last_id_(0) {}
+ RelocInfoWriter(byte* pos, byte* pc) : pos_(pos), last_pc_(pc), last_id_(0) {}
byte* pos() const { return pos_; }
byte* last_pc() const { return last_pc_; }
@@ -710,8 +699,6 @@ class RelocInfoWriter BASE_EMBEDDED {
last_pc_ = pc;
}
- void Finish() { FlushPosition(); }
-
// Max size (bytes) of a written RelocInfo. Longest encoding is
// ExtraTag, VariableLengthPCJump, ExtraTag, pc_delta, data_delta.
// On ia32 and arm this is 1 + 4 + 1 + 1 + 4 = 11.
@@ -729,18 +716,11 @@ class RelocInfoWriter BASE_EMBEDDED {
inline void WriteModeAndPC(uint32_t pc_delta, RelocInfo::Mode rmode);
inline void WriteIntData(int data_delta);
inline void WriteData(intptr_t data_delta);
- inline void WritePosition(int pc_delta, int pos_delta, RelocInfo::Mode rmode);
-
- void FlushPosition();
byte* pos_;
byte* last_pc_;
int last_id_;
- int last_position_;
RelocInfo::Mode last_mode_;
- int next_position_candidate_pos_delta_;
- uint32_t next_position_candidate_pc_delta_;
- bool next_position_candidate_flushed_;
DISALLOW_COPY_AND_ASSIGN(RelocInfoWriter);
};
@@ -786,13 +766,11 @@ class RelocIterator: public Malloced {
int GetShortDataTypeTag();
void ReadShortTaggedPC();
void ReadShortTaggedId();
- void ReadShortTaggedPosition();
void ReadShortTaggedData();
void AdvanceReadPC();
void AdvanceReadId();
void AdvanceReadInt();
- void AdvanceReadPosition();
void AdvanceReadData();
// If the given mode is wanted, set it in rinfo_ and return true.
@@ -808,7 +786,6 @@ class RelocIterator: public Malloced {
bool done_;
int mode_mask_;
int last_id_;
- int last_position_;
DISALLOW_COPY_AND_ASSIGN(RelocIterator);
};
@@ -877,15 +854,13 @@ class ExternalReference BASE_EMBEDDED {
};
static void SetUp();
- static void InitializeMathExpData();
- static void TearDownMathExpData();
typedef void* ExternalReferenceRedirector(Isolate* isolate, void* original,
Type type);
ExternalReference() : address_(NULL) {}
- ExternalReference(Builtins::CFunctionId id, Isolate* isolate);
+ ExternalReference(Address address, Isolate* isolate);
ExternalReference(ApiFunction* ptr, Type type, Isolate* isolate);
@@ -909,6 +884,7 @@ class ExternalReference BASE_EMBEDDED {
// ExternalReferenceTable in serialize.cc manually.
static ExternalReference interpreter_dispatch_table_address(Isolate* isolate);
+ static ExternalReference interpreter_dispatch_counters(Isolate* isolate);
static ExternalReference incremental_marking_record_write_function(
Isolate* isolate);
@@ -948,17 +924,14 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference wasm_int64_mod(Isolate* isolate);
static ExternalReference wasm_uint64_div(Isolate* isolate);
static ExternalReference wasm_uint64_mod(Isolate* isolate);
+ static ExternalReference wasm_word32_ctz(Isolate* isolate);
+ static ExternalReference wasm_word64_ctz(Isolate* isolate);
+ static ExternalReference wasm_word32_popcnt(Isolate* isolate);
+ static ExternalReference wasm_word64_popcnt(Isolate* isolate);
+ static ExternalReference wasm_float64_pow(Isolate* isolate);
static ExternalReference f64_acos_wrapper_function(Isolate* isolate);
static ExternalReference f64_asin_wrapper_function(Isolate* isolate);
- static ExternalReference f64_atan_wrapper_function(Isolate* isolate);
- static ExternalReference f64_cos_wrapper_function(Isolate* isolate);
- static ExternalReference f64_sin_wrapper_function(Isolate* isolate);
- static ExternalReference f64_tan_wrapper_function(Isolate* isolate);
- static ExternalReference f64_exp_wrapper_function(Isolate* isolate);
- static ExternalReference f64_log_wrapper_function(Isolate* isolate);
- static ExternalReference f64_atan2_wrapper_function(Isolate* isolate);
- static ExternalReference f64_pow_wrapper_function(Isolate* isolate);
static ExternalReference f64_mod_wrapper_function(Isolate* isolate);
// Log support.
@@ -1002,7 +975,6 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference mod_two_doubles_operation(Isolate* isolate);
static ExternalReference power_double_double_function(Isolate* isolate);
- static ExternalReference power_double_int_function(Isolate* isolate);
static ExternalReference handle_scope_next_address(Isolate* isolate);
static ExternalReference handle_scope_limit_address(Isolate* isolate);
@@ -1019,10 +991,33 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference address_of_the_hole_nan();
static ExternalReference address_of_uint32_bias();
- static ExternalReference math_log_double_function(Isolate* isolate);
-
- static ExternalReference math_exp_constants(int constant_index);
- static ExternalReference math_exp_log_table();
+ // Static variables containing simd constants.
+ static ExternalReference address_of_float_abs_constant();
+ static ExternalReference address_of_float_neg_constant();
+ static ExternalReference address_of_double_abs_constant();
+ static ExternalReference address_of_double_neg_constant();
+
+ // IEEE 754 functions.
+ static ExternalReference ieee754_acos_function(Isolate* isolate);
+ static ExternalReference ieee754_acosh_function(Isolate* isolate);
+ static ExternalReference ieee754_asin_function(Isolate* isolate);
+ static ExternalReference ieee754_asinh_function(Isolate* isolate);
+ static ExternalReference ieee754_atan_function(Isolate* isolate);
+ static ExternalReference ieee754_atanh_function(Isolate* isolate);
+ static ExternalReference ieee754_atan2_function(Isolate* isolate);
+ static ExternalReference ieee754_cbrt_function(Isolate* isolate);
+ static ExternalReference ieee754_cos_function(Isolate* isolate);
+ static ExternalReference ieee754_cosh_function(Isolate* isolate);
+ static ExternalReference ieee754_exp_function(Isolate* isolate);
+ static ExternalReference ieee754_expm1_function(Isolate* isolate);
+ static ExternalReference ieee754_log_function(Isolate* isolate);
+ static ExternalReference ieee754_log1p_function(Isolate* isolate);
+ static ExternalReference ieee754_log10_function(Isolate* isolate);
+ static ExternalReference ieee754_log2_function(Isolate* isolate);
+ static ExternalReference ieee754_sin_function(Isolate* isolate);
+ static ExternalReference ieee754_sinh_function(Isolate* isolate);
+ static ExternalReference ieee754_tan_function(Isolate* isolate);
+ static ExternalReference ieee754_tanh_function(Isolate* isolate);
static ExternalReference page_flags(Page* page);
@@ -1047,8 +1042,11 @@ class ExternalReference BASE_EMBEDDED {
Address address() const { return reinterpret_cast<Address>(address_); }
- // Used to check if single stepping is enabled in generated code.
- static ExternalReference debug_step_in_enabled_address(Isolate* isolate);
+ // Used to read out the last step action of the debugger.
+ static ExternalReference debug_last_step_action_address(Isolate* isolate);
+
+ // Used to check for suspended generator, used for stepping across await call.
+ static ExternalReference debug_suspended_generator_address(Isolate* isolate);
#ifndef V8_INTERPRETED_REGEXP
// C functions called from RegExp generated code.
@@ -1107,51 +1105,6 @@ size_t hash_value(ExternalReference);
std::ostream& operator<<(std::ostream&, ExternalReference);
-
-// -----------------------------------------------------------------------------
-// Position recording support
-
-struct PositionState {
- PositionState() : current_position(RelocInfo::kNoPosition),
- written_position(RelocInfo::kNoPosition),
- current_statement_position(RelocInfo::kNoPosition),
- written_statement_position(RelocInfo::kNoPosition) {}
-
- int current_position;
- int written_position;
-
- int current_statement_position;
- int written_statement_position;
-};
-
-class AssemblerPositionsRecorder : public PositionsRecorder {
- public:
- explicit AssemblerPositionsRecorder(Assembler* assembler)
- : assembler_(assembler) {}
-
- // Set current position to pos.
- void RecordPosition(int pos);
-
- // Set current statement position to pos.
- void RecordStatementPosition(int pos);
-
- // Write recorded positions to relocation information.
- bool WriteRecordedPositions();
-
- int current_position() const { return state_.current_position; }
-
- int current_statement_position() const {
- return state_.current_statement_position;
- }
-
- private:
- Assembler* assembler_;
- PositionState state_;
-
- DISALLOW_COPY_AND_ASSIGN(AssemblerPositionsRecorder);
-};
-
-
// -----------------------------------------------------------------------------
// Utility functions
diff --git a/deps/v8/src/ast/OWNERS b/deps/v8/src/ast/OWNERS
index 4fdc3f9540..65a00bc68c 100644
--- a/deps/v8/src/ast/OWNERS
+++ b/deps/v8/src/ast/OWNERS
@@ -5,4 +5,4 @@ bmeurer@chromium.org
littledan@chromium.org
mstarzinger@chromium.org
rossberg@chromium.org
-
+verwaest@chromium.org
diff --git a/deps/v8/src/ast/ast-expression-rewriter.cc b/deps/v8/src/ast/ast-expression-rewriter.cc
index edee91d3a1..7bb8f08192 100644
--- a/deps/v8/src/ast/ast-expression-rewriter.cc
+++ b/deps/v8/src/ast/ast-expression-rewriter.cc
@@ -64,18 +64,6 @@ void AstExpressionRewriter::VisitFunctionDeclaration(
}
-void AstExpressionRewriter::VisitImportDeclaration(ImportDeclaration* node) {
- // Not visiting `proxy_`.
- NOTHING();
-}
-
-
-void AstExpressionRewriter::VisitExportDeclaration(ExportDeclaration* node) {
- // Not visiting `proxy_`.
- NOTHING();
-}
-
-
void AstExpressionRewriter::VisitBlock(Block* node) {
VisitStatements(node->statements());
}
@@ -169,12 +157,10 @@ void AstExpressionRewriter::VisitForInStatement(ForInStatement* node) {
void AstExpressionRewriter::VisitForOfStatement(ForOfStatement* node) {
- AST_REWRITE_PROPERTY(Expression, node, each);
AST_REWRITE_PROPERTY(Expression, node, assign_iterator);
AST_REWRITE_PROPERTY(Expression, node, next_result);
AST_REWRITE_PROPERTY(Expression, node, result_done);
AST_REWRITE_PROPERTY(Expression, node, assign_each);
- AST_REWRITE_PROPERTY(Expression, node, subject);
AST_REWRITE_PROPERTY(Statement, node, body);
}
diff --git a/deps/v8/src/ast/ast-expression-rewriter.h b/deps/v8/src/ast/ast-expression-rewriter.h
index 1da3fa8247..ac45d76b30 100644
--- a/deps/v8/src/ast/ast-expression-rewriter.h
+++ b/deps/v8/src/ast/ast-expression-rewriter.h
@@ -17,19 +17,21 @@ namespace internal {
// A rewriting Visitor over a CompilationInfo's AST that invokes
// VisitExpression on each expression node.
-class AstExpressionRewriter : public AstVisitor {
+// This AstVistor is not final, and provides the AstVisitor methods as virtual
+// methods so they can be specialized by subclasses.
+class AstExpressionRewriter : public AstVisitor<AstExpressionRewriter> {
public:
- explicit AstExpressionRewriter(Isolate* isolate) : AstVisitor() {
+ explicit AstExpressionRewriter(Isolate* isolate) {
InitializeAstRewriter(isolate);
}
- explicit AstExpressionRewriter(uintptr_t stack_limit) : AstVisitor() {
+ explicit AstExpressionRewriter(uintptr_t stack_limit) {
InitializeAstRewriter(stack_limit);
}
- ~AstExpressionRewriter() override {}
+ virtual ~AstExpressionRewriter() {}
- void VisitDeclarations(ZoneList<Declaration*>* declarations) override;
- void VisitStatements(ZoneList<Statement*>* statements) override;
- void VisitExpressions(ZoneList<Expression*>* expressions) override;
+ virtual void VisitDeclarations(ZoneList<Declaration*>* declarations);
+ virtual void VisitStatements(ZoneList<Statement*>* statements);
+ virtual void VisitExpressions(ZoneList<Expression*>* expressions);
virtual void VisitObjectLiteralProperty(ObjectLiteralProperty* property);
@@ -39,7 +41,7 @@ class AstExpressionRewriter : public AstVisitor {
private:
DEFINE_AST_REWRITER_SUBCLASS_MEMBERS();
-#define DECLARE_VISIT(type) void Visit##type(type* node) override;
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
diff --git a/deps/v8/src/ast/ast-expression-visitor.cc b/deps/v8/src/ast/ast-expression-visitor.cc
deleted file mode 100644
index dbf4ea463c..0000000000
--- a/deps/v8/src/ast/ast-expression-visitor.cc
+++ /dev/null
@@ -1,411 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/ast/ast-expression-visitor.h"
-
-#include "src/ast/ast.h"
-#include "src/ast/scopes.h"
-#include "src/codegen.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define RECURSE(call) \
- do { \
- DCHECK(!HasStackOverflow()); \
- call; \
- if (HasStackOverflow()) return; \
- } while (false)
-
-
-#define RECURSE_EXPRESSION(call) \
- do { \
- DCHECK(!HasStackOverflow()); \
- ++depth_; \
- call; \
- --depth_; \
- if (HasStackOverflow()) return; \
- } while (false)
-
-
-AstExpressionVisitor::AstExpressionVisitor(Isolate* isolate, Expression* root)
- : root_(root), depth_(0) {
- InitializeAstVisitor(isolate);
-}
-
-
-AstExpressionVisitor::AstExpressionVisitor(uintptr_t stack_limit,
- Expression* root)
- : root_(root), depth_(0) {
- InitializeAstVisitor(stack_limit);
-}
-
-
-void AstExpressionVisitor::Run() { RECURSE(Visit(root_)); }
-
-
-void AstExpressionVisitor::VisitVariableDeclaration(VariableDeclaration* decl) {
-}
-
-
-void AstExpressionVisitor::VisitFunctionDeclaration(FunctionDeclaration* decl) {
- RECURSE(Visit(decl->fun()));
-}
-
-
-void AstExpressionVisitor::VisitImportDeclaration(ImportDeclaration* decl) {}
-
-
-void AstExpressionVisitor::VisitExportDeclaration(ExportDeclaration* decl) {}
-
-
-void AstExpressionVisitor::VisitStatements(ZoneList<Statement*>* stmts) {
- for (int i = 0; i < stmts->length(); ++i) {
- Statement* stmt = stmts->at(i);
- RECURSE(Visit(stmt));
- if (stmt->IsJump()) break;
- }
-}
-
-
-void AstExpressionVisitor::VisitBlock(Block* stmt) {
- RECURSE(VisitStatements(stmt->statements()));
-}
-
-
-void AstExpressionVisitor::VisitExpressionStatement(ExpressionStatement* stmt) {
- RECURSE(Visit(stmt->expression()));
-}
-
-
-void AstExpressionVisitor::VisitEmptyStatement(EmptyStatement* stmt) {}
-
-
-void AstExpressionVisitor::VisitSloppyBlockFunctionStatement(
- SloppyBlockFunctionStatement* stmt) {
- RECURSE(Visit(stmt->statement()));
-}
-
-
-void AstExpressionVisitor::VisitIfStatement(IfStatement* stmt) {
- RECURSE(Visit(stmt->condition()));
- RECURSE(Visit(stmt->then_statement()));
- RECURSE(Visit(stmt->else_statement()));
-}
-
-
-void AstExpressionVisitor::VisitContinueStatement(ContinueStatement* stmt) {}
-
-
-void AstExpressionVisitor::VisitBreakStatement(BreakStatement* stmt) {}
-
-
-void AstExpressionVisitor::VisitReturnStatement(ReturnStatement* stmt) {
- RECURSE(Visit(stmt->expression()));
-}
-
-
-void AstExpressionVisitor::VisitWithStatement(WithStatement* stmt) {
- RECURSE(stmt->expression());
- RECURSE(stmt->statement());
-}
-
-
-void AstExpressionVisitor::VisitSwitchStatement(SwitchStatement* stmt) {
- RECURSE(Visit(stmt->tag()));
-
- ZoneList<CaseClause*>* clauses = stmt->cases();
-
- for (int i = 0; i < clauses->length(); ++i) {
- CaseClause* clause = clauses->at(i);
- if (!clause->is_default()) {
- Expression* label = clause->label();
- RECURSE(Visit(label));
- }
- ZoneList<Statement*>* stmts = clause->statements();
- RECURSE(VisitStatements(stmts));
- }
-}
-
-
-void AstExpressionVisitor::VisitCaseClause(CaseClause* clause) {
- UNREACHABLE();
-}
-
-
-void AstExpressionVisitor::VisitDoWhileStatement(DoWhileStatement* stmt) {
- RECURSE(Visit(stmt->body()));
- RECURSE(Visit(stmt->cond()));
-}
-
-
-void AstExpressionVisitor::VisitWhileStatement(WhileStatement* stmt) {
- RECURSE(Visit(stmt->cond()));
- RECURSE(Visit(stmt->body()));
-}
-
-
-void AstExpressionVisitor::VisitForStatement(ForStatement* stmt) {
- if (stmt->init() != NULL) {
- RECURSE(Visit(stmt->init()));
- }
- if (stmt->cond() != NULL) {
- RECURSE(Visit(stmt->cond()));
- }
- if (stmt->next() != NULL) {
- RECURSE(Visit(stmt->next()));
- }
- RECURSE(Visit(stmt->body()));
-}
-
-
-void AstExpressionVisitor::VisitForInStatement(ForInStatement* stmt) {
- RECURSE(Visit(stmt->enumerable()));
- RECURSE(Visit(stmt->body()));
-}
-
-
-void AstExpressionVisitor::VisitForOfStatement(ForOfStatement* stmt) {
- RECURSE(Visit(stmt->iterable()));
- RECURSE(Visit(stmt->each()));
- RECURSE(Visit(stmt->assign_iterator()));
- RECURSE(Visit(stmt->next_result()));
- RECURSE(Visit(stmt->result_done()));
- RECURSE(Visit(stmt->assign_each()));
- RECURSE(Visit(stmt->body()));
-}
-
-
-void AstExpressionVisitor::VisitTryCatchStatement(TryCatchStatement* stmt) {
- RECURSE(Visit(stmt->try_block()));
- RECURSE(Visit(stmt->catch_block()));
-}
-
-
-void AstExpressionVisitor::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- RECURSE(Visit(stmt->try_block()));
- RECURSE(Visit(stmt->finally_block()));
-}
-
-
-void AstExpressionVisitor::VisitDebuggerStatement(DebuggerStatement* stmt) {}
-
-
-void AstExpressionVisitor::VisitFunctionLiteral(FunctionLiteral* expr) {
- Scope* scope = expr->scope();
- VisitExpression(expr);
- RECURSE_EXPRESSION(VisitDeclarations(scope->declarations()));
- RECURSE_EXPRESSION(VisitStatements(expr->body()));
-}
-
-
-void AstExpressionVisitor::VisitNativeFunctionLiteral(
- NativeFunctionLiteral* expr) {}
-
-
-void AstExpressionVisitor::VisitDoExpression(DoExpression* expr) {
- VisitExpression(expr);
- RECURSE(VisitBlock(expr->block()));
- RECURSE(VisitVariableProxy(expr->result()));
-}
-
-
-void AstExpressionVisitor::VisitConditional(Conditional* expr) {
- VisitExpression(expr);
- RECURSE_EXPRESSION(Visit(expr->condition()));
- RECURSE_EXPRESSION(Visit(expr->then_expression()));
- RECURSE_EXPRESSION(Visit(expr->else_expression()));
-}
-
-
-void AstExpressionVisitor::VisitVariableProxy(VariableProxy* expr) {
- VisitExpression(expr);
-}
-
-
-void AstExpressionVisitor::VisitLiteral(Literal* expr) {
- VisitExpression(expr);
-}
-
-
-void AstExpressionVisitor::VisitRegExpLiteral(RegExpLiteral* expr) {
- VisitExpression(expr);
-}
-
-
-void AstExpressionVisitor::VisitObjectLiteral(ObjectLiteral* expr) {
- VisitExpression(expr);
- ZoneList<ObjectLiteralProperty*>* props = expr->properties();
- for (int i = 0; i < props->length(); ++i) {
- ObjectLiteralProperty* prop = props->at(i);
- if (!prop->key()->IsLiteral()) {
- RECURSE_EXPRESSION(Visit(prop->key()));
- }
- RECURSE_EXPRESSION(Visit(prop->value()));
- }
-}
-
-
-void AstExpressionVisitor::VisitArrayLiteral(ArrayLiteral* expr) {
- VisitExpression(expr);
- ZoneList<Expression*>* values = expr->values();
- for (int i = 0; i < values->length(); ++i) {
- Expression* value = values->at(i);
- RECURSE_EXPRESSION(Visit(value));
- }
-}
-
-
-void AstExpressionVisitor::VisitAssignment(Assignment* expr) {
- VisitExpression(expr);
- RECURSE_EXPRESSION(Visit(expr->target()));
- RECURSE_EXPRESSION(Visit(expr->value()));
-}
-
-
-void AstExpressionVisitor::VisitYield(Yield* expr) {
- VisitExpression(expr);
- RECURSE_EXPRESSION(Visit(expr->generator_object()));
- RECURSE_EXPRESSION(Visit(expr->expression()));
-}
-
-
-void AstExpressionVisitor::VisitThrow(Throw* expr) {
- VisitExpression(expr);
- RECURSE_EXPRESSION(Visit(expr->exception()));
-}
-
-
-void AstExpressionVisitor::VisitProperty(Property* expr) {
- VisitExpression(expr);
- RECURSE_EXPRESSION(Visit(expr->obj()));
- RECURSE_EXPRESSION(Visit(expr->key()));
-}
-
-
-void AstExpressionVisitor::VisitCall(Call* expr) {
- VisitExpression(expr);
- RECURSE_EXPRESSION(Visit(expr->expression()));
- ZoneList<Expression*>* args = expr->arguments();
- for (int i = 0; i < args->length(); ++i) {
- Expression* arg = args->at(i);
- RECURSE_EXPRESSION(Visit(arg));
- }
-}
-
-
-void AstExpressionVisitor::VisitCallNew(CallNew* expr) {
- VisitExpression(expr);
- RECURSE_EXPRESSION(Visit(expr->expression()));
- ZoneList<Expression*>* args = expr->arguments();
- for (int i = 0; i < args->length(); ++i) {
- Expression* arg = args->at(i);
- RECURSE_EXPRESSION(Visit(arg));
- }
-}
-
-
-void AstExpressionVisitor::VisitCallRuntime(CallRuntime* expr) {
- VisitExpression(expr);
- ZoneList<Expression*>* args = expr->arguments();
- for (int i = 0; i < args->length(); ++i) {
- Expression* arg = args->at(i);
- RECURSE_EXPRESSION(Visit(arg));
- }
-}
-
-
-void AstExpressionVisitor::VisitUnaryOperation(UnaryOperation* expr) {
- VisitExpression(expr);
- RECURSE_EXPRESSION(Visit(expr->expression()));
-}
-
-
-void AstExpressionVisitor::VisitCountOperation(CountOperation* expr) {
- VisitExpression(expr);
- RECURSE_EXPRESSION(Visit(expr->expression()));
-}
-
-
-void AstExpressionVisitor::VisitBinaryOperation(BinaryOperation* expr) {
- VisitExpression(expr);
- RECURSE_EXPRESSION(Visit(expr->left()));
- RECURSE_EXPRESSION(Visit(expr->right()));
-}
-
-
-void AstExpressionVisitor::VisitCompareOperation(CompareOperation* expr) {
- VisitExpression(expr);
- RECURSE_EXPRESSION(Visit(expr->left()));
- RECURSE_EXPRESSION(Visit(expr->right()));
-}
-
-
-void AstExpressionVisitor::VisitThisFunction(ThisFunction* expr) {
- VisitExpression(expr);
-}
-
-
-void AstExpressionVisitor::VisitDeclarations(ZoneList<Declaration*>* decls) {
- for (int i = 0; i < decls->length(); ++i) {
- Declaration* decl = decls->at(i);
- RECURSE(Visit(decl));
- }
-}
-
-
-void AstExpressionVisitor::VisitClassLiteral(ClassLiteral* expr) {
- VisitExpression(expr);
- if (expr->extends() != nullptr) {
- RECURSE_EXPRESSION(Visit(expr->extends()));
- }
- RECURSE_EXPRESSION(Visit(expr->constructor()));
- ZoneList<ObjectLiteralProperty*>* props = expr->properties();
- for (int i = 0; i < props->length(); ++i) {
- ObjectLiteralProperty* prop = props->at(i);
- if (!prop->key()->IsLiteral()) {
- RECURSE_EXPRESSION(Visit(prop->key()));
- }
- RECURSE_EXPRESSION(Visit(prop->value()));
- }
-}
-
-
-void AstExpressionVisitor::VisitSpread(Spread* expr) {
- VisitExpression(expr);
- RECURSE_EXPRESSION(Visit(expr->expression()));
-}
-
-
-void AstExpressionVisitor::VisitEmptyParentheses(EmptyParentheses* expr) {}
-
-
-void AstExpressionVisitor::VisitSuperPropertyReference(
- SuperPropertyReference* expr) {
- VisitExpression(expr);
- RECURSE_EXPRESSION(VisitVariableProxy(expr->this_var()));
- RECURSE_EXPRESSION(Visit(expr->home_object()));
-}
-
-
-void AstExpressionVisitor::VisitSuperCallReference(SuperCallReference* expr) {
- VisitExpression(expr);
- RECURSE_EXPRESSION(VisitVariableProxy(expr->this_var()));
- RECURSE_EXPRESSION(VisitVariableProxy(expr->new_target_var()));
- RECURSE_EXPRESSION(VisitVariableProxy(expr->this_function_var()));
-}
-
-
-void AstExpressionVisitor::VisitRewritableExpression(
- RewritableExpression* expr) {
- VisitExpression(expr);
- RECURSE(Visit(expr->expression()));
-}
-
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/ast/ast-expression-visitor.h b/deps/v8/src/ast/ast-expression-visitor.h
deleted file mode 100644
index 545a45c416..0000000000
--- a/deps/v8/src/ast/ast-expression-visitor.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_AST_AST_EXPRESSION_VISITOR_H_
-#define V8_AST_AST_EXPRESSION_VISITOR_H_
-
-#include "src/allocation.h"
-#include "src/ast/ast.h"
-#include "src/ast/scopes.h"
-#include "src/type-info.h"
-#include "src/zone.h"
-
-namespace v8 {
-namespace internal {
-
-// A Visitor over a CompilationInfo's AST that invokes
-// VisitExpression on each expression node.
-
-class AstExpressionVisitor : public AstVisitor {
- public:
- AstExpressionVisitor(Isolate* isolate, Expression* root);
- AstExpressionVisitor(uintptr_t stack_limit, Expression* root);
- void Run();
-
- protected:
- virtual void VisitExpression(Expression* expression) = 0;
- int depth() { return depth_; }
-
- private:
- void VisitDeclarations(ZoneList<Declaration*>* d) override;
- void VisitStatements(ZoneList<Statement*>* s) override;
-
- DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
-
-#define DECLARE_VISIT(type) void Visit##type(type* node) override;
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- Expression* root_;
- int depth_;
-
- DISALLOW_COPY_AND_ASSIGN(AstExpressionVisitor);
-};
-} // namespace internal
-} // namespace v8
-
-#endif // V8_AST_AST_EXPRESSION_VISITOR_H_
diff --git a/deps/v8/src/ast/ast-literal-reindexer.cc b/deps/v8/src/ast/ast-literal-reindexer.cc
index 1f79b12217..a349ae0e97 100644
--- a/deps/v8/src/ast/ast-literal-reindexer.cc
+++ b/deps/v8/src/ast/ast-literal-reindexer.cc
@@ -16,11 +16,6 @@ void AstLiteralReindexer::VisitVariableDeclaration(VariableDeclaration* node) {
}
-void AstLiteralReindexer::VisitExportDeclaration(ExportDeclaration* node) {
- VisitVariableProxy(node->proxy());
-}
-
-
void AstLiteralReindexer::VisitEmptyStatement(EmptyStatement* node) {}
@@ -83,11 +78,6 @@ void AstLiteralReindexer::VisitRewritableExpression(
}
-void AstLiteralReindexer::VisitImportDeclaration(ImportDeclaration* node) {
- VisitVariableProxy(node->proxy());
-}
-
-
void AstLiteralReindexer::VisitExpressionStatement(ExpressionStatement* node) {
Visit(node->expression());
}
@@ -326,9 +316,6 @@ void AstLiteralReindexer::VisitFunctionLiteral(FunctionLiteral* node) {
// We don't recurse into the declarations or body of the function literal:
}
-
-void AstLiteralReindexer::Reindex(Expression* pattern) {
- pattern->Accept(this);
-}
+void AstLiteralReindexer::Reindex(Expression* pattern) { Visit(pattern); }
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ast/ast-literal-reindexer.h b/deps/v8/src/ast/ast-literal-reindexer.h
index e2a71d3c47..b33e0c541c 100644
--- a/deps/v8/src/ast/ast-literal-reindexer.h
+++ b/deps/v8/src/ast/ast-literal-reindexer.h
@@ -11,20 +11,20 @@
namespace v8 {
namespace internal {
-class AstLiteralReindexer final : public AstVisitor {
+class AstLiteralReindexer final : public AstVisitor<AstLiteralReindexer> {
public:
- AstLiteralReindexer() : AstVisitor(), next_index_(0) {}
+ AstLiteralReindexer() : next_index_(0) {}
int count() const { return next_index_; }
void Reindex(Expression* pattern);
private:
-#define DEFINE_VISIT(type) void Visit##type(type* node) override;
+#define DEFINE_VISIT(type) void Visit##type(type* node);
AST_NODE_LIST(DEFINE_VISIT)
#undef DEFINE_VISIT
- void VisitStatements(ZoneList<Statement*>* statements) override;
- void VisitDeclarations(ZoneList<Declaration*>* declarations) override;
+ void VisitStatements(ZoneList<Statement*>* statements);
+ void VisitDeclarations(ZoneList<Declaration*>* declarations);
void VisitArguments(ZoneList<Expression*>* arguments);
void VisitObjectLiteralProperty(ObjectLiteralProperty* property);
@@ -32,10 +32,9 @@ class AstLiteralReindexer final : public AstVisitor {
literal->literal_index_ = next_index_++;
}
- void Visit(AstNode* node) override { node->Accept(this); }
-
int next_index_;
+ DEFINE_AST_VISITOR_MEMBERS_WITHOUT_STACKOVERFLOW()
DISALLOW_COPY_AND_ASSIGN(AstLiteralReindexer);
};
} // namespace internal
diff --git a/deps/v8/src/ast/ast-numbering.cc b/deps/v8/src/ast/ast-numbering.cc
index f54333ff1f..1b9905a2c6 100644
--- a/deps/v8/src/ast/ast-numbering.cc
+++ b/deps/v8/src/ast/ast-numbering.cc
@@ -10,16 +10,17 @@
namespace v8 {
namespace internal {
-class AstNumberingVisitor final : public AstVisitor {
+class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
public:
AstNumberingVisitor(Isolate* isolate, Zone* zone)
- : AstVisitor(),
- isolate_(isolate),
+ : isolate_(isolate),
zone_(zone),
next_id_(BailoutId::FirstUsable().ToInt()),
+ yield_count_(0),
properties_(zone),
slot_cache_(zone),
- dont_optimize_reason_(kNoReason) {
+ dont_optimize_reason_(kNoReason),
+ catch_prediction_(HandlerTable::UNCAUGHT) {
InitializeAstVisitor(isolate);
}
@@ -27,18 +28,16 @@ class AstNumberingVisitor final : public AstVisitor {
private:
// AST node visitor interface.
-#define DEFINE_VISIT(type) void Visit##type(type* node) override;
+#define DEFINE_VISIT(type) void Visit##type(type* node);
AST_NODE_LIST(DEFINE_VISIT)
#undef DEFINE_VISIT
- bool Finish(FunctionLiteral* node);
-
void VisitVariableProxyReference(VariableProxy* node);
void VisitPropertyReference(Property* node);
void VisitReference(Expression* expr);
- void VisitStatements(ZoneList<Statement*>* statements) override;
- void VisitDeclarations(ZoneList<Declaration*>* declarations) override;
+ void VisitStatements(ZoneList<Statement*>* statements);
+ void VisitDeclarations(ZoneList<Declaration*>* declarations);
void VisitArguments(ZoneList<Expression*>* arguments);
void VisitObjectLiteralProperty(ObjectLiteralProperty* property);
@@ -57,12 +56,7 @@ class AstNumberingVisitor final : public AstVisitor {
DisableSelfOptimization();
}
void DisableCrankshaft(BailoutReason reason) {
- if (FLAG_turbo_shipping) {
- properties_.flags() |= AstProperties::kDontCrankshaft;
- } else {
- dont_optimize_reason_ = reason;
- DisableSelfOptimization();
- }
+ properties_.flags() |= AstProperties::kDontCrankshaft;
}
template <typename Node>
@@ -76,10 +70,12 @@ class AstNumberingVisitor final : public AstVisitor {
Isolate* isolate_;
Zone* zone_;
int next_id_;
+ int yield_count_;
AstProperties properties_;
// The slot cache allows us to reuse certain feedback vector slots.
FeedbackVectorSlotCache slot_cache_;
BailoutReason dont_optimize_reason_;
+ HandlerTable::CatchPrediction catch_prediction_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(AstNumberingVisitor);
@@ -92,13 +88,6 @@ void AstNumberingVisitor::VisitVariableDeclaration(VariableDeclaration* node) {
}
-void AstNumberingVisitor::VisitExportDeclaration(ExportDeclaration* node) {
- IncrementNodeCount();
- DisableOptimization(kExportDeclaration);
- VisitVariableProxy(node->proxy());
-}
-
-
void AstNumberingVisitor::VisitEmptyStatement(EmptyStatement* node) {
IncrementNodeCount();
}
@@ -197,13 +186,6 @@ void AstNumberingVisitor::VisitSuperCallReference(SuperCallReference* node) {
}
-void AstNumberingVisitor::VisitImportDeclaration(ImportDeclaration* node) {
- IncrementNodeCount();
- DisableOptimization(kImportDeclaration);
- VisitVariableProxy(node->proxy());
-}
-
-
void AstNumberingVisitor::VisitExpressionStatement(ExpressionStatement* node) {
IncrementNodeCount();
Visit(node->expression());
@@ -217,9 +199,9 @@ void AstNumberingVisitor::VisitReturnStatement(ReturnStatement* node) {
void AstNumberingVisitor::VisitYield(Yield* node) {
+ node->set_yield_id(yield_count_);
+ yield_count_++;
IncrementNodeCount();
- DisableOptimization(kYield);
- ReserveFeedbackSlots(node);
node->set_base_id(ReserveIdRange(Yield::num_ids()));
Visit(node->generator_object());
Visit(node->expression());
@@ -251,6 +233,14 @@ void AstNumberingVisitor::VisitCountOperation(CountOperation* node) {
void AstNumberingVisitor::VisitBlock(Block* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(Block::num_ids()));
+
+ if (FLAG_ignition && node->scope() != nullptr &&
+ node->scope()->NeedsContext()) {
+ // Create ScopeInfo while on the main thread to avoid allocation during
+ // potentially concurrent bytecode generation.
+ node->scope()->GetScopeInfo(isolate_);
+ }
+
if (node->scope() != NULL) VisitDeclarations(node->scope()->declarations());
VisitStatements(node->statements());
}
@@ -265,7 +255,6 @@ void AstNumberingVisitor::VisitFunctionDeclaration(FunctionDeclaration* node) {
void AstNumberingVisitor::VisitCallRuntime(CallRuntime* node) {
IncrementNodeCount();
- ReserveFeedbackSlots(node);
node->set_base_id(ReserveIdRange(CallRuntime::num_ids()));
VisitArguments(node->arguments());
}
@@ -284,8 +273,10 @@ void AstNumberingVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
IncrementNodeCount();
DisableSelfOptimization();
node->set_base_id(ReserveIdRange(DoWhileStatement::num_ids()));
+ node->set_first_yield_id(yield_count_);
Visit(node->body());
Visit(node->cond());
+ node->set_yield_count(yield_count_ - node->first_yield_id());
}
@@ -293,22 +284,38 @@ void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
IncrementNodeCount();
DisableSelfOptimization();
node->set_base_id(ReserveIdRange(WhileStatement::num_ids()));
+ node->set_first_yield_id(yield_count_);
Visit(node->cond());
Visit(node->body());
+ node->set_yield_count(yield_count_ - node->first_yield_id());
}
void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
IncrementNodeCount();
- DisableOptimization(kTryCatchStatement);
- Visit(node->try_block());
+ DisableCrankshaft(kTryCatchStatement);
+ {
+ const HandlerTable::CatchPrediction old_prediction = catch_prediction_;
+ // This node uses its own prediction, unless it's "uncaught", in which case
+ // we adopt the prediction of the outer try-block.
+ HandlerTable::CatchPrediction catch_prediction = node->catch_prediction();
+ if (catch_prediction != HandlerTable::UNCAUGHT) {
+ catch_prediction_ = catch_prediction;
+ }
+ node->set_catch_prediction(catch_prediction_);
+ Visit(node->try_block());
+ catch_prediction_ = old_prediction;
+ }
Visit(node->catch_block());
}
void AstNumberingVisitor::VisitTryFinallyStatement(TryFinallyStatement* node) {
IncrementNodeCount();
- DisableOptimization(kTryFinallyStatement);
+ DisableCrankshaft(kTryFinallyStatement);
+ // We can't know whether the finally block will override ("catch") an
+ // exception thrown in the try block, so we just adopt the outer prediction.
+ node->set_catch_prediction(catch_prediction_);
Visit(node->try_block());
Visit(node->finally_block());
}
@@ -354,6 +361,7 @@ void AstNumberingVisitor::VisitBinaryOperation(BinaryOperation* node) {
node->set_base_id(ReserveIdRange(BinaryOperation::num_ids()));
Visit(node->left());
Visit(node->right());
+ ReserveFeedbackSlots(node);
}
@@ -377,9 +385,11 @@ void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
IncrementNodeCount();
DisableSelfOptimization();
node->set_base_id(ReserveIdRange(ForInStatement::num_ids()));
+ Visit(node->enumerable()); // Not part of loop.
+ node->set_first_yield_id(yield_count_);
Visit(node->each());
- Visit(node->enumerable());
Visit(node->body());
+ node->set_yield_count(yield_count_ - node->first_yield_id());
ReserveFeedbackSlots(node);
}
@@ -388,12 +398,13 @@ void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
IncrementNodeCount();
DisableCrankshaft(kForOfStatement);
node->set_base_id(ReserveIdRange(ForOfStatement::num_ids()));
- Visit(node->assign_iterator());
+ Visit(node->assign_iterator()); // Not part of loop.
+ node->set_first_yield_id(yield_count_);
Visit(node->next_result());
Visit(node->result_done());
Visit(node->assign_each());
Visit(node->body());
- ReserveFeedbackSlots(node);
+ node->set_yield_count(yield_count_ - node->first_yield_id());
}
@@ -440,10 +451,12 @@ void AstNumberingVisitor::VisitForStatement(ForStatement* node) {
IncrementNodeCount();
DisableSelfOptimization();
node->set_base_id(ReserveIdRange(ForStatement::num_ids()));
- if (node->init() != NULL) Visit(node->init());
+ if (node->init() != NULL) Visit(node->init()); // Not part of loop.
+ node->set_first_yield_id(yield_count_);
if (node->cond() != NULL) Visit(node->cond());
if (node->next() != NULL) Visit(node->next());
Visit(node->body());
+ node->set_yield_count(yield_count_ - node->first_yield_id());
}
@@ -554,15 +567,8 @@ void AstNumberingVisitor::VisitRewritableExpression(
}
-bool AstNumberingVisitor::Finish(FunctionLiteral* node) {
- node->set_ast_properties(&properties_);
- node->set_dont_optimize_reason(dont_optimize_reason());
- return !HasStackOverflow();
-}
-
-
bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
- Scope* scope = node->scope();
+ DeclarationScope* scope = node->scope();
if (scope->new_target_var()) DisableCrankshaft(kSuperReference);
if (scope->calls_eval()) DisableOptimization(kFunctionCallsEval);
if (scope->arguments() != NULL && !scope->arguments()->IsStackAllocated()) {
@@ -574,10 +580,26 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
DisableCrankshaft(kRestParameter);
}
+ if (FLAG_ignition && scope->NeedsContext() && scope->is_script_scope()) {
+ // Create ScopeInfo while on the main thread to avoid allocation during
+ // potentially concurrent bytecode generation.
+ node->scope()->GetScopeInfo(isolate_);
+ }
+
+ if (IsGeneratorFunction(node->kind()) || IsAsyncFunction(node->kind())) {
+ // TODO(neis): We may want to allow Turbofan optimization here if
+ // --turbo-from-bytecode is set and we know that Ignition is used.
+ // Unfortunately we can't express that here.
+ DisableOptimization(kGenerator);
+ }
+
VisitDeclarations(scope->declarations());
VisitStatements(node->body());
- return Finish(node);
+ node->set_ast_properties(&properties_);
+ node->set_dont_optimize_reason(dont_optimize_reason());
+ node->set_yield_count(yield_count_);
+ return !HasStackOverflow();
}
diff --git a/deps/v8/src/ast/ast-numbering.h b/deps/v8/src/ast/ast-numbering.h
index 0ac1ef2134..73278950cd 100644
--- a/deps/v8/src/ast/ast-numbering.h
+++ b/deps/v8/src/ast/ast-numbering.h
@@ -14,11 +14,27 @@ class Isolate;
class Zone;
namespace AstNumbering {
-// Assign type feedback IDs and bailout IDs to an AST node tree.
-//
+// Assign type feedback IDs, bailout IDs, and generator yield IDs to an AST node
+// tree; perform catch prediction for TryStatements.
bool Renumber(Isolate* isolate, Zone* zone, FunctionLiteral* function);
}
+// Some details on yield IDs
+// -------------------------
+//
+// In order to assist Ignition in generating bytecode for a generator function,
+// we assign a unique number (the yield ID) to each Yield node in its AST. We
+// also annotate loops with the number of yields they contain (loop.yield_count)
+// and the smallest ID of those (loop.first_yield_id), and we annotate the
+// function itself with the number of yields it contains (function.yield_count).
+//
+// The way in which we choose the IDs is simply by enumerating the Yield nodes.
+// Ignition relies on the following properties:
+// - For each loop l and each yield y of l:
+// l.first_yield_id <= y.yield_id < l.first_yield_id + l.yield_count
+// - For the generator function f itself and each yield y of f:
+// 0 <= y.yield_id < f.yield_count
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ast/ast-traversal-visitor.h b/deps/v8/src/ast/ast-traversal-visitor.h
new file mode 100644
index 0000000000..0f2976c4ca
--- /dev/null
+++ b/deps/v8/src/ast/ast-traversal-visitor.h
@@ -0,0 +1,504 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_AST_AST_TRAVERSAL_VISITOR_H_
+#define V8_AST_AST_TRAVERSAL_VISITOR_H_
+
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// Traversal visitor
+// - fully traverses the entire AST.
+//
+// Sub-class should parametrize AstTraversalVisitor with itself, e.g.:
+// class SpecificVisitor : public AstTraversalVisitor<SpecificVisitor> { ... }
+//
+// It invokes VisitNode on each AST node, before proceeding with its subtrees.
+// It invokes VisitExpression (after VisitNode) on each AST node that is an
+// expression, before proceeding with its subtrees.
+// It proceeds with the subtrees only if these two methods return true.
+// Sub-classes may override VisitNode and VisitExpressions, whose implementation
+// is dummy here. Or they may override the specific Visit* methods.
+
+template <class Subclass>
+class AstTraversalVisitor : public AstVisitor<Subclass> {
+ public:
+ explicit AstTraversalVisitor(Isolate* isolate, AstNode* root = nullptr);
+ explicit AstTraversalVisitor(uintptr_t stack_limit, AstNode* root = nullptr);
+
+ void Run() {
+ DCHECK_NOT_NULL(root_);
+ Visit(root_);
+ }
+
+ bool VisitNode(AstNode* node) { return true; }
+ bool VisitExpression(Expression* node) { return true; }
+
+ // Iteration left-to-right.
+ void VisitDeclarations(ZoneList<Declaration*>* declarations);
+ void VisitStatements(ZoneList<Statement*>* statements);
+
+// Individual nodes
+#define DECLARE_VISIT(type) void Visit##type(type* node);
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ protected:
+ int depth() const { return depth_; }
+
+ private:
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
+ AstNode* root_;
+ int depth_;
+
+ DISALLOW_COPY_AND_ASSIGN(AstTraversalVisitor);
+};
+
+// ----------------------------------------------------------------------------
+// Implementation of AstTraversalVisitor
+
+#define PROCESS_NODE(node) do { \
+ if (!(this->impl()->VisitNode(node))) return; \
+ } while (false)
+
+#define PROCESS_EXPRESSION(node) do { \
+ PROCESS_NODE(node); \
+ if (!(this->impl()->VisitExpression(node))) return; \
+ } while (false)
+
+#define RECURSE(call) \
+ do { \
+ DCHECK(!HasStackOverflow()); \
+ this->impl()->call; \
+ if (HasStackOverflow()) return; \
+ } while (false)
+
+#define RECURSE_EXPRESSION(call) \
+ do { \
+ DCHECK(!HasStackOverflow()); \
+ ++depth_; \
+ this->impl()->call; \
+ --depth_; \
+ if (HasStackOverflow()) return; \
+ } while (false)
+
+template <class Subclass>
+AstTraversalVisitor<Subclass>::AstTraversalVisitor(Isolate* isolate,
+ AstNode* root)
+ : root_(root), depth_(0) {
+ InitializeAstVisitor(isolate);
+}
+
+template <class Subclass>
+AstTraversalVisitor<Subclass>::AstTraversalVisitor(uintptr_t stack_limit,
+ AstNode* root)
+ : root_(root), depth_(0) {
+ InitializeAstVisitor(stack_limit);
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitDeclarations(
+ ZoneList<Declaration*>* decls) {
+ for (int i = 0; i < decls->length(); ++i) {
+ Declaration* decl = decls->at(i);
+ RECURSE(Visit(decl));
+ }
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitStatements(
+ ZoneList<Statement*>* stmts) {
+ for (int i = 0; i < stmts->length(); ++i) {
+ Statement* stmt = stmts->at(i);
+ RECURSE(Visit(stmt));
+ if (stmt->IsJump()) break;
+ }
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitVariableDeclaration(
+ VariableDeclaration* decl) {
+ PROCESS_NODE(decl);
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitFunctionDeclaration(
+ FunctionDeclaration* decl) {
+ PROCESS_NODE(decl);
+ RECURSE(Visit(decl->fun()));
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitBlock(Block* stmt) {
+ PROCESS_NODE(stmt);
+ RECURSE(VisitStatements(stmt->statements()));
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitExpressionStatement(
+ ExpressionStatement* stmt) {
+ PROCESS_NODE(stmt);
+ RECURSE(Visit(stmt->expression()));
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitEmptyStatement(EmptyStatement* stmt) {}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitSloppyBlockFunctionStatement(
+ SloppyBlockFunctionStatement* stmt) {
+ PROCESS_NODE(stmt);
+ RECURSE(Visit(stmt->statement()));
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitIfStatement(IfStatement* stmt) {
+ PROCESS_NODE(stmt);
+ RECURSE(Visit(stmt->condition()));
+ RECURSE(Visit(stmt->then_statement()));
+ RECURSE(Visit(stmt->else_statement()));
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitContinueStatement(
+ ContinueStatement* stmt) {
+ PROCESS_NODE(stmt);
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitBreakStatement(BreakStatement* stmt) {
+ PROCESS_NODE(stmt);
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitReturnStatement(
+ ReturnStatement* stmt) {
+ PROCESS_NODE(stmt);
+ RECURSE(Visit(stmt->expression()));
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitWithStatement(WithStatement* stmt) {
+ PROCESS_NODE(stmt);
+ RECURSE(Visit(stmt->expression()));
+ RECURSE(Visit(stmt->statement()));
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitSwitchStatement(
+ SwitchStatement* stmt) {
+ PROCESS_NODE(stmt);
+ RECURSE(Visit(stmt->tag()));
+
+ ZoneList<CaseClause*>* clauses = stmt->cases();
+ for (int i = 0; i < clauses->length(); ++i) {
+ CaseClause* clause = clauses->at(i);
+ if (!clause->is_default()) {
+ Expression* label = clause->label();
+ RECURSE(Visit(label));
+ }
+ ZoneList<Statement*>* stmts = clause->statements();
+ RECURSE(VisitStatements(stmts));
+ }
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitCaseClause(CaseClause* clause) {
+ UNREACHABLE();
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitDoWhileStatement(
+ DoWhileStatement* stmt) {
+ PROCESS_NODE(stmt);
+ RECURSE(Visit(stmt->body()));
+ RECURSE(Visit(stmt->cond()));
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitWhileStatement(WhileStatement* stmt) {
+ PROCESS_NODE(stmt);
+ RECURSE(Visit(stmt->cond()));
+ RECURSE(Visit(stmt->body()));
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitForStatement(ForStatement* stmt) {
+ PROCESS_NODE(stmt);
+ if (stmt->init() != NULL) {
+ RECURSE(Visit(stmt->init()));
+ }
+ if (stmt->cond() != NULL) {
+ RECURSE(Visit(stmt->cond()));
+ }
+ if (stmt->next() != NULL) {
+ RECURSE(Visit(stmt->next()));
+ }
+ RECURSE(Visit(stmt->body()));
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitForInStatement(ForInStatement* stmt) {
+ PROCESS_NODE(stmt);
+ RECURSE(Visit(stmt->enumerable()));
+ RECURSE(Visit(stmt->body()));
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitForOfStatement(ForOfStatement* stmt) {
+ PROCESS_NODE(stmt);
+ RECURSE(Visit(stmt->assign_iterator()));
+ RECURSE(Visit(stmt->next_result()));
+ RECURSE(Visit(stmt->result_done()));
+ RECURSE(Visit(stmt->assign_each()));
+ RECURSE(Visit(stmt->body()));
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitTryCatchStatement(
+ TryCatchStatement* stmt) {
+ PROCESS_NODE(stmt);
+ RECURSE(Visit(stmt->try_block()));
+ RECURSE(Visit(stmt->catch_block()));
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitTryFinallyStatement(
+ TryFinallyStatement* stmt) {
+ PROCESS_NODE(stmt);
+ RECURSE(Visit(stmt->try_block()));
+ RECURSE(Visit(stmt->finally_block()));
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitDebuggerStatement(
+ DebuggerStatement* stmt) {
+ PROCESS_NODE(stmt);
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitFunctionLiteral(
+ FunctionLiteral* expr) {
+ PROCESS_EXPRESSION(expr);
+ DeclarationScope* scope = expr->scope();
+ RECURSE_EXPRESSION(VisitDeclarations(scope->declarations()));
+ RECURSE_EXPRESSION(VisitStatements(expr->body()));
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitNativeFunctionLiteral(
+ NativeFunctionLiteral* expr) {
+ PROCESS_EXPRESSION(expr);
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitDoExpression(DoExpression* expr) {
+ PROCESS_EXPRESSION(expr);
+ RECURSE(VisitBlock(expr->block()));
+ RECURSE(VisitVariableProxy(expr->result()));
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitConditional(Conditional* expr) {
+ PROCESS_EXPRESSION(expr);
+ RECURSE_EXPRESSION(Visit(expr->condition()));
+ RECURSE_EXPRESSION(Visit(expr->then_expression()));
+ RECURSE_EXPRESSION(Visit(expr->else_expression()));
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitVariableProxy(VariableProxy* expr) {
+ PROCESS_EXPRESSION(expr);
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitLiteral(Literal* expr) {
+ PROCESS_EXPRESSION(expr);
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitRegExpLiteral(RegExpLiteral* expr) {
+ PROCESS_EXPRESSION(expr);
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitObjectLiteral(ObjectLiteral* expr) {
+ PROCESS_EXPRESSION(expr);
+ ZoneList<ObjectLiteralProperty*>* props = expr->properties();
+ for (int i = 0; i < props->length(); ++i) {
+ ObjectLiteralProperty* prop = props->at(i);
+ RECURSE_EXPRESSION(Visit(prop->key()));
+ RECURSE_EXPRESSION(Visit(prop->value()));
+ }
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitArrayLiteral(ArrayLiteral* expr) {
+ PROCESS_EXPRESSION(expr);
+ ZoneList<Expression*>* values = expr->values();
+ for (int i = 0; i < values->length(); ++i) {
+ Expression* value = values->at(i);
+ RECURSE_EXPRESSION(Visit(value));
+ }
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitAssignment(Assignment* expr) {
+ PROCESS_EXPRESSION(expr);
+ RECURSE_EXPRESSION(Visit(expr->target()));
+ RECURSE_EXPRESSION(Visit(expr->value()));
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitYield(Yield* expr) {
+ PROCESS_EXPRESSION(expr);
+ RECURSE_EXPRESSION(Visit(expr->generator_object()));
+ RECURSE_EXPRESSION(Visit(expr->expression()));
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitThrow(Throw* expr) {
+ PROCESS_EXPRESSION(expr);
+ RECURSE_EXPRESSION(Visit(expr->exception()));
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitProperty(Property* expr) {
+ PROCESS_EXPRESSION(expr);
+ RECURSE_EXPRESSION(Visit(expr->obj()));
+ RECURSE_EXPRESSION(Visit(expr->key()));
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitCall(Call* expr) {
+ PROCESS_EXPRESSION(expr);
+ RECURSE_EXPRESSION(Visit(expr->expression()));
+ ZoneList<Expression*>* args = expr->arguments();
+ for (int i = 0; i < args->length(); ++i) {
+ Expression* arg = args->at(i);
+ RECURSE_EXPRESSION(Visit(arg));
+ }
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitCallNew(CallNew* expr) {
+ PROCESS_EXPRESSION(expr);
+ RECURSE_EXPRESSION(Visit(expr->expression()));
+ ZoneList<Expression*>* args = expr->arguments();
+ for (int i = 0; i < args->length(); ++i) {
+ Expression* arg = args->at(i);
+ RECURSE_EXPRESSION(Visit(arg));
+ }
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitCallRuntime(CallRuntime* expr) {
+ PROCESS_EXPRESSION(expr);
+ ZoneList<Expression*>* args = expr->arguments();
+ for (int i = 0; i < args->length(); ++i) {
+ Expression* arg = args->at(i);
+ RECURSE_EXPRESSION(Visit(arg));
+ }
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitUnaryOperation(UnaryOperation* expr) {
+ PROCESS_EXPRESSION(expr);
+ RECURSE_EXPRESSION(Visit(expr->expression()));
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitCountOperation(CountOperation* expr) {
+ PROCESS_EXPRESSION(expr);
+ RECURSE_EXPRESSION(Visit(expr->expression()));
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitBinaryOperation(
+ BinaryOperation* expr) {
+ PROCESS_EXPRESSION(expr);
+ RECURSE_EXPRESSION(Visit(expr->left()));
+ RECURSE_EXPRESSION(Visit(expr->right()));
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitCompareOperation(
+ CompareOperation* expr) {
+ PROCESS_EXPRESSION(expr);
+ RECURSE_EXPRESSION(Visit(expr->left()));
+ RECURSE_EXPRESSION(Visit(expr->right()));
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitThisFunction(ThisFunction* expr) {
+ PROCESS_EXPRESSION(expr);
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitClassLiteral(ClassLiteral* expr) {
+ PROCESS_EXPRESSION(expr);
+ if (expr->extends() != nullptr) {
+ RECURSE_EXPRESSION(Visit(expr->extends()));
+ }
+ RECURSE_EXPRESSION(Visit(expr->constructor()));
+ ZoneList<ObjectLiteralProperty*>* props = expr->properties();
+ for (int i = 0; i < props->length(); ++i) {
+ ObjectLiteralProperty* prop = props->at(i);
+ if (!prop->key()->IsLiteral()) {
+ RECURSE_EXPRESSION(Visit(prop->key()));
+ }
+ RECURSE_EXPRESSION(Visit(prop->value()));
+ }
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitSpread(Spread* expr) {
+ PROCESS_EXPRESSION(expr);
+ RECURSE_EXPRESSION(Visit(expr->expression()));
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitEmptyParentheses(
+ EmptyParentheses* expr) {
+ PROCESS_EXPRESSION(expr);
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitSuperPropertyReference(
+ SuperPropertyReference* expr) {
+ PROCESS_EXPRESSION(expr);
+ RECURSE_EXPRESSION(VisitVariableProxy(expr->this_var()));
+ RECURSE_EXPRESSION(Visit(expr->home_object()));
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitSuperCallReference(
+ SuperCallReference* expr) {
+ PROCESS_EXPRESSION(expr);
+ RECURSE_EXPRESSION(VisitVariableProxy(expr->this_var()));
+ RECURSE_EXPRESSION(VisitVariableProxy(expr->new_target_var()));
+ RECURSE_EXPRESSION(VisitVariableProxy(expr->this_function_var()));
+}
+
+template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitRewritableExpression(
+ RewritableExpression* expr) {
+ PROCESS_EXPRESSION(expr);
+ RECURSE(Visit(expr->expression()));
+}
+
+#undef PROCESS_NODE
+#undef PROCESS_EXPRESSION
+#undef RECURSE_EXPRESSION
+#undef RECURSE
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_AST_AST_TRAVERSAL_VISITOR_H_
diff --git a/deps/v8/src/ast/ast-type-bounds.h b/deps/v8/src/ast/ast-type-bounds.h
new file mode 100644
index 0000000000..ec26fdfc02
--- /dev/null
+++ b/deps/v8/src/ast/ast-type-bounds.h
@@ -0,0 +1,40 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A container to associate type bounds with AST Expression nodes.
+
+#ifndef V8_AST_AST_TYPE_BOUNDS_H_
+#define V8_AST_AST_TYPE_BOUNDS_H_
+
+#include "src/types.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class Expression;
+
+class AstTypeBounds {
+ public:
+ explicit AstTypeBounds(Zone* zone) : bounds_map_(zone) {}
+ ~AstTypeBounds() {}
+
+ Bounds get(Expression* expression) const {
+ ZoneMap<Expression*, Bounds>::const_iterator i =
+ bounds_map_.find(expression);
+ return (i != bounds_map_.end()) ? i->second : Bounds::Unbounded();
+ }
+
+ void set(Expression* expression, Bounds bounds) {
+ bounds_map_[expression] = bounds;
+ }
+
+ private:
+ ZoneMap<Expression*, Bounds> bounds_map_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_AST_AST_TYPE_BOUNDS_H_
diff --git a/deps/v8/src/ast/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc
index 189d4cc0f5..a271751839 100644
--- a/deps/v8/src/ast/ast-value-factory.cc
+++ b/deps/v8/src/ast/ast-value-factory.cc
@@ -58,7 +58,7 @@ class AstRawStringInternalizationKey : public HashTableKey {
: string_(string) {}
bool IsMatch(Object* other) override {
- if (string_->is_one_byte_)
+ if (string_->is_one_byte())
return String::cast(other)->IsOneByteEqualTo(string_->literal_bytes_);
return String::cast(other)->IsTwoByteEqualTo(
Vector<const uint16_t>::cast(string_->literal_bytes_));
@@ -71,7 +71,7 @@ class AstRawStringInternalizationKey : public HashTableKey {
}
Handle<Object> AsHandle(Isolate* isolate) override {
- if (string_->is_one_byte_)
+ if (string_->is_one_byte())
return isolate->factory()->NewOneByteInternalizedString(
string_->literal_bytes_, string_->hash());
return isolate->factory()->NewTwoByteInternalizedString(
@@ -82,9 +82,21 @@ class AstRawStringInternalizationKey : public HashTableKey {
const AstRawString* string_;
};
+int AstString::length() const {
+ if (IsRawStringBits::decode(bit_field_)) {
+ return reinterpret_cast<const AstRawString*>(this)->length();
+ }
+ return reinterpret_cast<const AstConsString*>(this)->length();
+}
+
+void AstString::Internalize(Isolate* isolate) {
+ if (IsRawStringBits::decode(bit_field_)) {
+ return reinterpret_cast<AstRawString*>(this)->Internalize(isolate);
+ }
+ return reinterpret_cast<AstConsString*>(this)->Internalize(isolate);
+}
void AstRawString::Internalize(Isolate* isolate) {
- if (!string_.is_null()) return;
if (literal_bytes_.length() == 0) {
string_ = isolate->factory()->empty_string();
} else {
@@ -93,21 +105,22 @@ void AstRawString::Internalize(Isolate* isolate) {
}
}
-
bool AstRawString::AsArrayIndex(uint32_t* index) const {
- if (!string_.is_null())
- return string_->AsArrayIndex(index);
- if (!is_one_byte_ || literal_bytes_.length() == 0 ||
- literal_bytes_.length() > String::kMaxArrayIndexSize)
- return false;
- OneByteStringStream stream(literal_bytes_);
- return StringToArrayIndex(&stream, index);
+ // The StringHasher will set up the hash in such a way that we can use it to
+ // figure out whether the string is convertible to an array index.
+ if ((hash_ & Name::kIsNotArrayIndexMask) != 0) return false;
+ if (length() <= Name::kMaxCachedArrayIndexLength) {
+ *index = Name::ArrayIndexValueBits::decode(hash_);
+ } else {
+ OneByteStringStream stream(literal_bytes_);
+ CHECK(StringToArrayIndex(&stream, index));
+ }
+ return true;
}
-
bool AstRawString::IsOneByteEqualTo(const char* data) const {
int length = static_cast<int>(strlen(data));
- if (is_one_byte_ && literal_bytes_.length() == length) {
+ if (is_one_byte() && literal_bytes_.length() == length) {
const char* token = reinterpret_cast<const char*>(literal_bytes_.start());
return !strncmp(token, data, length);
}
@@ -123,7 +136,6 @@ void AstConsString::Internalize(Isolate* isolate) {
.ToHandleChecked();
}
-
bool AstValue::IsPropertyName() const {
if (type_ == STRING) {
uint32_t index;
@@ -144,6 +156,7 @@ bool AstValue::BooleanValue() const {
case NUMBER_WITH_DOT:
case NUMBER:
return DoubleToBoolean(number_);
+ case SMI_WITH_DOT:
case SMI:
return smi_ != 0;
case BOOLEAN:
@@ -183,6 +196,7 @@ void AstValue::Internalize(Isolate* isolate) {
case NUMBER:
value_ = isolate->factory()->NewNumber(number_, TENURED);
break;
+ case SMI_WITH_DOT:
case SMI:
value_ = handle(Smi::FromInt(smi_), isolate);
break;
@@ -239,7 +253,13 @@ const AstRawString* AstValueFactory::GetString(Handle<String> literal) {
}
}
isolate_ = saved_isolate;
- if (isolate_) result->Internalize(isolate_);
+ if (strings_ != nullptr && isolate_) {
+ // Only the string we are creating is uninternalized at this point.
+ DCHECK_EQ(result, strings_);
+ DCHECK_NULL(strings_->next());
+ result->Internalize(isolate_);
+ ResetStrings();
+ }
return result;
}
@@ -249,84 +269,69 @@ const AstConsString* AstValueFactory::NewConsString(
// This Vector will be valid as long as the Collector is alive (meaning that
// the AstRawString will not be moved).
AstConsString* new_string = new (zone_) AstConsString(left, right);
- strings_.Add(new_string);
- if (isolate_) {
- new_string->Internalize(isolate_);
- }
+ CHECK(new_string != nullptr);
+ AddString(new_string);
return new_string;
}
void AstValueFactory::Internalize(Isolate* isolate) {
if (isolate_) {
+ DCHECK_NULL(strings_);
+ DCHECK_NULL(values_);
// Everything is already internalized.
return;
}
+
// Strings need to be internalized before values, because values refer to
// strings.
- for (int i = 0; i < strings_.length(); ++i) {
- strings_[i]->Internalize(isolate);
+ for (AstString* current = strings_; current != nullptr;) {
+ AstString* next = current->next();
+ current->Internalize(isolate);
+ current = next;
}
- for (int i = 0; i < values_.length(); ++i) {
- values_[i]->Internalize(isolate);
+ for (AstValue* current = values_; current != nullptr;) {
+ AstValue* next = current->next();
+ current->Internalize(isolate);
+ current = next;
}
isolate_ = isolate;
+ ResetStrings();
+ values_ = nullptr;
}
const AstValue* AstValueFactory::NewString(const AstRawString* string) {
AstValue* value = new (zone_) AstValue(string);
- DCHECK(string != NULL);
- if (isolate_) {
- value->Internalize(isolate_);
- }
- values_.Add(value);
- return value;
+ CHECK(string != nullptr);
+ return AddValue(value);
}
const AstValue* AstValueFactory::NewSymbol(const char* name) {
AstValue* value = new (zone_) AstValue(name);
- if (isolate_) {
- value->Internalize(isolate_);
- }
- values_.Add(value);
- return value;
+ return AddValue(value);
}
const AstValue* AstValueFactory::NewNumber(double number, bool with_dot) {
AstValue* value = new (zone_) AstValue(number, with_dot);
- if (isolate_) {
- value->Internalize(isolate_);
- }
- values_.Add(value);
- return value;
+ return AddValue(value);
}
const AstValue* AstValueFactory::NewSmi(int number) {
AstValue* value =
new (zone_) AstValue(AstValue::SMI, number);
- if (isolate_) {
- value->Internalize(isolate_);
- }
- values_.Add(value);
- return value;
+ return AddValue(value);
}
-
-#define GENERATE_VALUE_GETTER(value, initializer) \
- if (!value) { \
- value = new (zone_) AstValue(initializer); \
- if (isolate_) { \
- value->Internalize(isolate_); \
- } \
- values_.Add(value); \
- } \
+#define GENERATE_VALUE_GETTER(value, initializer) \
+ if (!value) { \
+ value = AddValue(new (zone_) AstValue(initializer)); \
+ } \
return value;
-
const AstValue* AstValueFactory::NewBoolean(bool b) {
if (b) {
GENERATE_VALUE_GETTER(true_value_, true);
@@ -360,7 +365,7 @@ AstRawString* AstValueFactory::GetString(uint32_t hash, bool is_one_byte,
// against the AstRawStrings which are in the string_table_. We should not
// return this AstRawString.
AstRawString key(is_one_byte, literal_bytes, hash);
- HashMap::Entry* entry = string_table_.LookupOrInsert(&key, hash);
+ base::HashMap::Entry* entry = string_table_.LookupOrInsert(&key, hash);
if (entry->value == NULL) {
// Copy literal contents for later comparison.
int length = literal_bytes.length();
@@ -368,11 +373,9 @@ AstRawString* AstValueFactory::GetString(uint32_t hash, bool is_one_byte,
memcpy(new_literal_bytes, literal_bytes.start(), length);
AstRawString* new_string = new (zone_) AstRawString(
is_one_byte, Vector<const byte>(new_literal_bytes, length), hash);
+ CHECK(new_string != nullptr);
entry->key = new_string;
- strings_.Add(new_string);
- if (isolate_) {
- new_string->Internalize(isolate_);
- }
+ AddString(new_string);
entry->value = reinterpret_cast<void*>(1);
}
return reinterpret_cast<AstRawString*>(entry->key);
@@ -382,8 +385,8 @@ AstRawString* AstValueFactory::GetString(uint32_t hash, bool is_one_byte,
bool AstValueFactory::AstRawStringCompare(void* a, void* b) {
const AstRawString* lhs = static_cast<AstRawString*>(a);
const AstRawString* rhs = static_cast<AstRawString*>(b);
+ DCHECK_EQ(lhs->hash(), rhs->hash());
if (lhs->length() != rhs->length()) return false;
- if (lhs->hash() != rhs->hash()) return false;
const unsigned char* l = lhs->raw_data();
const unsigned char* r = rhs->raw_data();
size_t length = rhs->length();
diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h
index 8b3f0ed252..da209e122c 100644
--- a/deps/v8/src/ast/ast-value-factory.h
+++ b/deps/v8/src/ast/ast-value-factory.h
@@ -29,7 +29,7 @@
#define V8_AST_AST_VALUE_FACTORY_H_
#include "src/api.h"
-#include "src/hashmap.h"
+#include "src/base/hashmap.h"
#include "src/utils.h"
// AstString, AstValue and AstValueFactory are for storing strings and values
@@ -42,13 +42,14 @@ namespace internal {
class AstString : public ZoneObject {
public:
- virtual ~AstString() {}
+ explicit AstString(bool is_raw)
+ : next_(nullptr), bit_field_(IsRawStringBits::encode(is_raw)) {}
- virtual int length() const = 0;
+ int length() const;
bool IsEmpty() const { return length() == 0; }
// Puts the string into the V8 heap.
- virtual void Internalize(Isolate* isolate) = 0;
+ void Internalize(Isolate* isolate);
// This function can be called after internalizing.
V8_INLINE Handle<String> string() const {
@@ -56,23 +57,30 @@ class AstString : public ZoneObject {
return string_;
}
+ AstString** next_location() { return &next_; }
+ AstString* next() const { return next_; }
+
protected:
- // This is null until the string is internalized.
+ // Handle<String>::null() until internalized.
Handle<String> string_;
+ AstString* next_;
+ // Poor-man's virtual dispatch to AstRawString / AstConsString. Takes less
+ // memory.
+ class IsRawStringBits : public BitField<bool, 0, 1> {};
+ int bit_field_;
};
class AstRawString final : public AstString {
public:
- int length() const override {
- if (is_one_byte_)
- return literal_bytes_.length();
+ int length() const {
+ if (is_one_byte()) return literal_bytes_.length();
return literal_bytes_.length() / 2;
}
int byte_length() const { return literal_bytes_.length(); }
- void Internalize(Isolate* isolate) override;
+ void Internalize(Isolate* isolate);
bool AsArrayIndex(uint32_t* index) const;
@@ -80,11 +88,12 @@ class AstRawString final : public AstString {
const unsigned char* raw_data() const {
return literal_bytes_.start();
}
- bool is_one_byte() const { return is_one_byte_; }
+
+ bool is_one_byte() const { return IsOneByteBits::decode(bit_field_); }
+
bool IsOneByteEqualTo(const char* data) const;
uint16_t FirstCharacter() const {
- if (is_one_byte_)
- return literal_bytes_[0];
+ if (is_one_byte()) return literal_bytes_[0];
const uint16_t* c =
reinterpret_cast<const uint16_t*>(literal_bytes_.start());
return *c;
@@ -100,29 +109,34 @@ class AstRawString final : public AstString {
friend class AstRawStringInternalizationKey;
AstRawString(bool is_one_byte, const Vector<const byte>& literal_bytes,
- uint32_t hash)
- : is_one_byte_(is_one_byte), literal_bytes_(literal_bytes), hash_(hash) {}
+ uint32_t hash)
+ : AstString(true), hash_(hash), literal_bytes_(literal_bytes) {
+ bit_field_ |= IsOneByteBits::encode(is_one_byte);
+ }
- AstRawString()
- : is_one_byte_(true),
- hash_(0) {}
+ AstRawString() : AstString(true), hash_(0) {
+ bit_field_ |= IsOneByteBits::encode(true);
+ }
- bool is_one_byte_;
+ class IsOneByteBits : public BitField<bool, IsRawStringBits::kNext, 1> {};
+ uint32_t hash_;
// Points to memory owned by Zone.
Vector<const byte> literal_bytes_;
- uint32_t hash_;
};
class AstConsString final : public AstString {
public:
AstConsString(const AstString* left, const AstString* right)
- : length_(left->length() + right->length()), left_(left), right_(right) {}
+ : AstString(false),
+ length_(left->length() + right->length()),
+ left_(left),
+ right_(right) {}
- int length() const override { return length_; }
+ int length() const { return length_; }
- void Internalize(Isolate* isolate) override;
+ void Internalize(Isolate* isolate);
private:
const int length_;
@@ -140,27 +154,33 @@ class AstValue : public ZoneObject {
}
bool IsNumber() const {
- return type_ == NUMBER || type_ == NUMBER_WITH_DOT || type_ == SMI;
+ return type_ == NUMBER || type_ == NUMBER_WITH_DOT || type_ == SMI ||
+ type_ == SMI_WITH_DOT;
}
- bool ContainsDot() const { return type_ == NUMBER_WITH_DOT; }
+ bool ContainsDot() const {
+ return type_ == NUMBER_WITH_DOT || type_ == SMI_WITH_DOT;
+ }
const AstRawString* AsString() const {
- if (type_ == STRING)
- return string_;
- UNREACHABLE();
- return 0;
+ CHECK_EQ(STRING, type_);
+ return string_;
}
double AsNumber() const {
if (type_ == NUMBER || type_ == NUMBER_WITH_DOT)
return number_;
- if (type_ == SMI)
+ if (type_ == SMI || type_ == SMI_WITH_DOT)
return smi_;
UNREACHABLE();
return 0;
}
+ Smi* AsSmi() const {
+ CHECK(type_ == SMI || type_ == SMI_WITH_DOT);
+ return Smi::FromInt(smi_);
+ }
+
bool EqualsString(const AstRawString* string) const {
return type_ == STRING && string_ == string;
}
@@ -169,7 +189,12 @@ class AstValue : public ZoneObject {
bool BooleanValue() const;
+ bool IsSmi() const { return type_ == SMI || type_ == SMI_WITH_DOT; }
+ bool IsFalse() const { return type_ == BOOLEAN && !bool_; }
+ bool IsTrue() const { return type_ == BOOLEAN && bool_; }
+ bool IsUndefined() const { return type_ == UNDEFINED; }
bool IsTheHole() const { return type_ == THE_HOLE; }
+ bool IsNull() const { return type_ == NULL_TYPE; }
void Internalize(Isolate* isolate);
@@ -181,6 +206,8 @@ class AstValue : public ZoneObject {
DCHECK(!value_.is_null());
return value_;
}
+ AstValue* next() const { return next_; }
+ void set_next(AstValue* next) { next_ = next; }
private:
friend class AstValueFactory;
@@ -191,33 +218,40 @@ class AstValue : public ZoneObject {
NUMBER,
NUMBER_WITH_DOT,
SMI,
+ SMI_WITH_DOT,
BOOLEAN,
NULL_TYPE,
UNDEFINED,
THE_HOLE
};
- explicit AstValue(const AstRawString* s) : type_(STRING) { string_ = s; }
+ explicit AstValue(const AstRawString* s) : type_(STRING), next_(nullptr) {
+ string_ = s;
+ }
- explicit AstValue(const char* name) : type_(SYMBOL) { symbol_name_ = name; }
+ explicit AstValue(const char* name) : type_(SYMBOL), next_(nullptr) {
+ symbol_name_ = name;
+ }
- explicit AstValue(double n, bool with_dot) {
- if (with_dot) {
- type_ = NUMBER_WITH_DOT;
+ explicit AstValue(double n, bool with_dot) : next_(nullptr) {
+ int int_value;
+ if (DoubleToSmiInteger(n, &int_value)) {
+ type_ = with_dot ? SMI_WITH_DOT : SMI;
+ smi_ = int_value;
} else {
- type_ = NUMBER;
+ type_ = with_dot ? NUMBER_WITH_DOT : NUMBER;
+ number_ = n;
}
- number_ = n;
}
- AstValue(Type t, int i) : type_(t) {
+ AstValue(Type t, int i) : type_(t), next_(nullptr) {
DCHECK(type_ == SMI);
smi_ = i;
}
- explicit AstValue(bool b) : type_(BOOLEAN) { bool_ = b; }
+ explicit AstValue(bool b) : type_(BOOLEAN), next_(nullptr) { bool_ = b; }
- explicit AstValue(Type t) : type_(t) {
+ explicit AstValue(Type t) : type_(t), next_(nullptr) {
DCHECK(t == NULL_TYPE || t == UNDEFINED || t == THE_HOLE);
}
@@ -229,12 +263,13 @@ class AstValue : public ZoneObject {
double number_;
int smi_;
bool bool_;
- ZoneList<const AstRawString*>* strings_;
+ const AstRawString* strings_;
const char* symbol_name_;
};
- // Internalized value (empty before internalized).
+ // Handle<String>::null() until internalized.
Handle<Object> value_;
+ AstValue* next_;
};
@@ -242,6 +277,8 @@ class AstValue : public ZoneObject {
#define STRING_CONSTANTS(F) \
F(anonymous_function, "(anonymous function)") \
F(arguments, "arguments") \
+ F(async, "async") \
+ F(await, "await") \
F(constructor, "constructor") \
F(default, "default") \
F(done, "done") \
@@ -257,15 +294,16 @@ class AstValue : public ZoneObject {
F(eval, "eval") \
F(function, "function") \
F(get_space, "get ") \
+ F(length, "length") \
F(let, "let") \
F(native, "native") \
F(new_target, ".new.target") \
F(next, "next") \
F(proto, "__proto__") \
F(prototype, "prototype") \
- F(rest_parameter, ".rest_parameter") \
F(return, "return") \
F(set_space, "set ") \
+ F(star_default_star, "*default*") \
F(this, "this") \
F(this_function, ".this_function") \
F(throw, "throw") \
@@ -285,9 +323,12 @@ class AstValueFactory {
public:
AstValueFactory(Zone* zone, uint32_t hash_seed)
: string_table_(AstRawStringCompare),
+ values_(nullptr),
+ strings_end_(&strings_),
zone_(zone),
isolate_(NULL),
hash_seed_(hash_seed) {
+ ResetStrings();
#define F(name, str) name##_string_ = NULL;
STRING_CONSTANTS(F)
#undef F
@@ -342,6 +383,28 @@ class AstValueFactory {
const AstValue* NewTheHole();
private:
+ AstValue* AddValue(AstValue* value) {
+ if (isolate_) {
+ value->Internalize(isolate_);
+ } else {
+ value->set_next(values_);
+ values_ = value;
+ }
+ return value;
+ }
+ AstString* AddString(AstString* string) {
+ if (isolate_) {
+ string->Internalize(isolate_);
+ } else {
+ *strings_end_ = string;
+ strings_end_ = string->next_location();
+ }
+ return string;
+ }
+ void ResetStrings() {
+ strings_ = nullptr;
+ strings_end_ = &strings_;
+ }
AstRawString* GetOneByteStringInternal(Vector<const uint8_t> literal);
AstRawString* GetTwoByteStringInternal(Vector<const uint16_t> literal);
AstRawString* GetString(uint32_t hash, bool is_one_byte,
@@ -350,11 +413,14 @@ class AstValueFactory {
static bool AstRawStringCompare(void* a, void* b);
// All strings are copied here, one after another (no NULLs inbetween).
- HashMap string_table_;
+ base::HashMap string_table_;
// For keeping track of all AstValues and AstRawStrings we've created (so that
// they can be internalized later).
- List<AstValue*> values_;
- List<AstString*> strings_;
+ AstValue* values_;
+ // We need to keep track of strings_ in order, since cons strings require
+ // their members to be internalized first.
+ AstString* strings_;
+ AstString** strings_end_;
Zone* zone_;
Isolate* isolate_;
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index e8b6269648..06037f4e6d 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -8,14 +8,14 @@
#include "src/ast/prettyprinter.h"
#include "src/ast/scopes.h"
-#include "src/builtins.h"
+#include "src/base/hashmap.h"
+#include "src/builtins/builtins.h"
#include "src/code-stubs.h"
#include "src/contexts.h"
#include "src/conversions.h"
-#include "src/hashmap.h"
#include "src/parsing/parser.h"
-#include "src/property.h"
#include "src/property-details.h"
+#include "src/property.h"
#include "src/string-stream.h"
#include "src/type-info.h"
@@ -23,15 +23,6 @@ namespace v8 {
namespace internal {
// ----------------------------------------------------------------------------
-// All the Accept member functions for each syntax tree node type.
-
-#define DECL_ACCEPT(type) \
- void type::Accept(AstVisitor* v) { v->Visit##type(this); }
-AST_NODE_LIST(DECL_ACCEPT)
-#undef DECL_ACCEPT
-
-
-// ----------------------------------------------------------------------------
// Implementation of other node functionality.
#ifdef DEBUG
@@ -41,30 +32,61 @@ void AstNode::Print(Isolate* isolate) {
}
-void AstNode::PrettyPrint(Isolate* isolate) {
- PrettyPrinter::PrintOut(isolate, this);
+#endif // DEBUG
+
+#define RETURN_NODE(Node) \
+ case k##Node: \
+ return static_cast<Node*>(this);
+
+IterationStatement* AstNode::AsIterationStatement() {
+ switch (node_type()) {
+ ITERATION_NODE_LIST(RETURN_NODE);
+ default:
+ return nullptr;
+ }
+}
+
+BreakableStatement* AstNode::AsBreakableStatement() {
+ switch (node_type()) {
+ BREAKABLE_NODE_LIST(RETURN_NODE);
+ ITERATION_NODE_LIST(RETURN_NODE);
+ default:
+ return nullptr;
+ }
}
-#endif // DEBUG
+MaterializedLiteral* AstNode::AsMaterializedLiteral() {
+ switch (node_type()) {
+ LITERAL_NODE_LIST(RETURN_NODE);
+ default:
+ return nullptr;
+ }
+}
+#undef RETURN_NODE
bool Expression::IsSmiLiteral() const {
- return IsLiteral() && AsLiteral()->value()->IsSmi();
+ return IsLiteral() && AsLiteral()->raw_value()->IsSmi();
}
-
bool Expression::IsStringLiteral() const {
- return IsLiteral() && AsLiteral()->value()->IsString();
+ return IsLiteral() && AsLiteral()->raw_value()->IsString();
}
+bool Expression::IsPropertyName() const {
+ return IsLiteral() && AsLiteral()->IsPropertyName();
+}
bool Expression::IsNullLiteral() const {
- return IsLiteral() && AsLiteral()->value()->IsNull();
+ if (!IsLiteral()) return false;
+ return AsLiteral()->raw_value()->IsNull();
}
bool Expression::IsUndefinedLiteral() const {
- if (IsLiteral() && AsLiteral()->value()->IsUndefined()) {
- return true;
+ if (IsLiteral()) {
+ if (AsLiteral()->raw_value()->IsUndefined()) {
+ return true;
+ }
}
const VariableProxy* var_proxy = AsVariableProxy();
@@ -76,35 +98,105 @@ bool Expression::IsUndefinedLiteral() const {
var_proxy->raw_name()->IsOneByteEqualTo("undefined");
}
+bool Expression::ToBooleanIsTrue() const {
+ return IsLiteral() && AsLiteral()->ToBooleanIsTrue();
+}
+
+bool Expression::ToBooleanIsFalse() const {
+ return IsLiteral() && AsLiteral()->ToBooleanIsFalse();
+}
+
+bool Expression::IsValidReferenceExpression() const {
+ // We don't want expressions wrapped inside RewritableExpression to be
+ // considered as valid reference expressions, as they will be rewritten
+ // to something (most probably involving a do expression).
+ if (IsRewritableExpression()) return false;
+ return IsProperty() ||
+ (IsVariableProxy() && AsVariableProxy()->IsValidReferenceExpression());
+}
bool Expression::IsValidReferenceExpressionOrThis() const {
return IsValidReferenceExpression() ||
(IsVariableProxy() && AsVariableProxy()->is_this());
}
+bool Expression::IsAnonymousFunctionDefinition() const {
+ return (IsFunctionLiteral() &&
+ AsFunctionLiteral()->IsAnonymousFunctionDefinition()) ||
+ (IsDoExpression() &&
+ AsDoExpression()->IsAnonymousFunctionDefinition());
+}
+
+void Expression::MarkTail() {
+ if (IsConditional()) {
+ AsConditional()->MarkTail();
+ } else if (IsCall()) {
+ AsCall()->MarkTail();
+ } else if (IsBinaryOperation()) {
+ AsBinaryOperation()->MarkTail();
+ }
+}
+
+bool DoExpression::IsAnonymousFunctionDefinition() const {
+ // This is specifically to allow DoExpressions to represent ClassLiterals.
+ return represented_function_ != nullptr &&
+ represented_function_->raw_name()->length() == 0;
+}
+
+bool Statement::IsJump() const {
+ switch (node_type()) {
+#define JUMP_NODE_LIST(V) \
+ V(Block) \
+ V(ExpressionStatement) \
+ V(ContinueStatement) \
+ V(BreakStatement) \
+ V(ReturnStatement) \
+ V(IfStatement)
+#define GENERATE_CASE(Node) \
+ case k##Node: \
+ return static_cast<const Node*>(this)->IsJump();
+ JUMP_NODE_LIST(GENERATE_CASE)
+#undef GENERATE_CASE
+#undef JUMP_NODE_LIST
+ default:
+ return false;
+ }
+}
-VariableProxy::VariableProxy(Zone* zone, Variable* var, int start_position,
+VariableProxy::VariableProxy(Variable* var, int start_position,
int end_position)
- : Expression(zone, start_position),
+ : Expression(start_position, kVariableProxy),
bit_field_(IsThisField::encode(var->is_this()) |
IsAssignedField::encode(false) |
IsResolvedField::encode(false)),
+ end_position_(end_position),
raw_name_(var->raw_name()),
- end_position_(end_position) {
+ next_unresolved_(nullptr) {
BindTo(var);
}
-
-VariableProxy::VariableProxy(Zone* zone, const AstRawString* name,
+VariableProxy::VariableProxy(const AstRawString* name,
Variable::Kind variable_kind, int start_position,
int end_position)
- : Expression(zone, start_position),
+ : Expression(start_position, kVariableProxy),
bit_field_(IsThisField::encode(variable_kind == Variable::THIS) |
IsAssignedField::encode(false) |
IsResolvedField::encode(false)),
+ end_position_(end_position),
raw_name_(name),
- end_position_(end_position) {}
-
+ next_unresolved_(nullptr) {}
+
+VariableProxy::VariableProxy(const VariableProxy* copy_from)
+ : Expression(copy_from->position(), kVariableProxy),
+ bit_field_(copy_from->bit_field_),
+ end_position_(copy_from->end_position_),
+ next_unresolved_(nullptr) {
+ if (copy_from->is_resolved()) {
+ var_ = copy_from->var_;
+ } else {
+ raw_name_ = copy_from->raw_name_;
+ }
+}
void VariableProxy::BindTo(Variable* var) {
DCHECK((is_this() && var->is_this()) || raw_name() == var->raw_name());
@@ -120,17 +212,17 @@ void VariableProxy::AssignFeedbackVectorSlots(Isolate* isolate,
if (UsesVariableFeedbackSlot()) {
// VariableProxies that point to the same Variable within a function can
// make their loads from the same IC slot.
- if (var()->IsUnallocated()) {
+ if (var()->IsUnallocated() || var()->mode() == DYNAMIC_GLOBAL) {
ZoneHashMap::Entry* entry = cache->Get(var());
if (entry != NULL) {
variable_feedback_slot_ = FeedbackVectorSlot(
static_cast<int>(reinterpret_cast<intptr_t>(entry->value)));
return;
}
- }
- variable_feedback_slot_ = spec->AddLoadICSlot();
- if (var()->IsUnallocated()) {
+ variable_feedback_slot_ = spec->AddLoadGlobalICSlot(var()->name());
cache->Put(var(), variable_feedback_slot_);
+ } else {
+ variable_feedback_slot_ = spec->AddLoadICSlot();
}
}
}
@@ -158,10 +250,9 @@ void ForInStatement::AssignFeedbackVectorSlots(Isolate* isolate,
for_in_feedback_slot_ = spec->AddGeneralSlot();
}
-
-Assignment::Assignment(Zone* zone, Token::Value op, Expression* target,
- Expression* value, int pos)
- : Expression(zone, pos),
+Assignment::Assignment(Token::Value op, Expression* target, Expression* value,
+ int pos)
+ : Expression(pos, kAssignment),
bit_field_(
IsUninitializedField::encode(false) | KeyTypeField::encode(ELEMENT) |
StoreModeField::encode(STANDARD_STORE) | TokenField::encode(op)),
@@ -169,7 +260,6 @@ Assignment::Assignment(Zone* zone, Token::Value op, Expression* target,
value_(value),
binary_operation_(NULL) {}
-
void Assignment::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
@@ -181,6 +271,9 @@ void CountOperation::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
AssignVectorSlots(expression(), spec, &slot_);
+ // Assign a slot to collect feedback about binary operations. Used only in
+ // ignition. Fullcodegen uses AstId to record type feedback.
+ binary_operation_slot_ = spec->AddGeneralSlot();
}
@@ -387,7 +480,7 @@ void ObjectLiteral::CalculateEmitStore(Zone* zone) {
if (property->is_computed_name()) continue;
if (property->kind() == ObjectLiteral::Property::PROTOTYPE) continue;
Literal* literal = property->key()->AsLiteral();
- DCHECK(!literal->value()->IsNull());
+ DCHECK(!literal->IsNullLiteral());
// If there is an existing entry do not emit a store unless the previous
// entry was also an accessor.
@@ -457,11 +550,11 @@ void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
// (value->IsNumber()).
// TODO(verwaest): Remove once we can store them inline.
if (FLAG_track_double_fields &&
- (value->IsNumber() || value->IsUninitialized())) {
+ (value->IsNumber() || value->IsUninitialized(isolate))) {
may_store_doubles_ = true;
}
- is_simple = is_simple && !value->IsUninitialized();
+ is_simple = is_simple && !value->IsUninitialized(isolate);
// Keep track of the number of elements in the object literal and
// the largest element index. If the largest element index is
@@ -524,12 +617,12 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
// New handle scope here, needs to be after BuildContants().
HandleScope scope(isolate);
Handle<Object> boilerplate_value = GetBoilerplateValue(element, isolate);
- if (boilerplate_value->IsTheHole()) {
+ if (boilerplate_value->IsTheHole(isolate)) {
is_holey = true;
continue;
}
- if (boilerplate_value->IsUninitialized()) {
+ if (boilerplate_value->IsUninitialized(isolate)) {
boilerplate_value = handle(Smi::FromInt(0), isolate);
is_simple = false;
}
@@ -626,6 +719,22 @@ void BinaryOperation::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
set_to_boolean_types(oracle->ToBooleanTypes(right()->test_id()));
}
+void BinaryOperation::AssignFeedbackVectorSlots(
+ Isolate* isolate, FeedbackVectorSpec* spec,
+ FeedbackVectorSlotCache* cache) {
+ // Feedback vector slot is only used by interpreter for binary operations.
+ // Full-codegen uses AstId to record type feedback.
+ switch (op()) {
+ // Comma, logical_or and logical_and do not collect type feedback.
+ case Token::COMMA:
+ case Token::AND:
+ case Token::OR:
+ return;
+ default:
+ type_feedback_slot_ = spec->AddGeneralSlot();
+ return;
+ }
+}
static bool IsTypeof(Expression* expr) {
UnaryOperation* maybe_unary = expr->AsUnaryOperation();
@@ -706,59 +815,103 @@ bool CompareOperation::IsLiteralCompareNull(Expression** expr) {
// ----------------------------------------------------------------------------
-// Inlining support
-
-bool Declaration::IsInlineable() const {
- return proxy()->var()->IsStackAllocated();
-}
-
-bool FunctionDeclaration::IsInlineable() const {
- return false;
-}
-
-
-// ----------------------------------------------------------------------------
// Recording of type feedback
// TODO(rossberg): all RecordTypeFeedback functions should disappear
// once we use the common type field in the AST consistently.
void Expression::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
- set_to_boolean_types(oracle->ToBooleanTypes(test_id()));
+ if (IsUnaryOperation()) {
+ AsUnaryOperation()->RecordToBooleanTypeFeedback(oracle);
+ } else if (IsBinaryOperation()) {
+ AsBinaryOperation()->RecordToBooleanTypeFeedback(oracle);
+ } else {
+ set_to_boolean_types(oracle->ToBooleanTypes(test_id()));
+ }
}
+SmallMapList* Expression::GetReceiverTypes() {
+ switch (node_type()) {
+#define NODE_LIST(V) \
+ PROPERTY_NODE_LIST(V) \
+ V(Call)
+#define GENERATE_CASE(Node) \
+ case k##Node: \
+ return static_cast<Node*>(this)->GetReceiverTypes();
+ NODE_LIST(GENERATE_CASE)
+#undef NODE_LIST
+#undef GENERATE_CASE
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+}
-bool Call::IsUsingCallFeedbackICSlot(Isolate* isolate) const {
- CallType call_type = GetCallType(isolate);
- if (call_type == POSSIBLY_EVAL_CALL) {
- return false;
+KeyedAccessStoreMode Expression::GetStoreMode() const {
+ switch (node_type()) {
+#define GENERATE_CASE(Node) \
+ case k##Node: \
+ return static_cast<const Node*>(this)->GetStoreMode();
+ PROPERTY_NODE_LIST(GENERATE_CASE)
+#undef GENERATE_CASE
+ default:
+ UNREACHABLE();
+ return STANDARD_STORE;
}
- return true;
}
+IcCheckType Expression::GetKeyType() const {
+ switch (node_type()) {
+#define GENERATE_CASE(Node) \
+ case k##Node: \
+ return static_cast<const Node*>(this)->GetKeyType();
+ PROPERTY_NODE_LIST(GENERATE_CASE)
+#undef GENERATE_CASE
+ default:
+ UNREACHABLE();
+ return PROPERTY;
+ }
+}
-bool Call::IsUsingCallFeedbackSlot(Isolate* isolate) const {
+bool Expression::IsMonomorphic() const {
+ switch (node_type()) {
+#define GENERATE_CASE(Node) \
+ case k##Node: \
+ return static_cast<const Node*>(this)->IsMonomorphic();
+ PROPERTY_NODE_LIST(GENERATE_CASE)
+ CALL_NODE_LIST(GENERATE_CASE)
+#undef GENERATE_CASE
+ default:
+ UNREACHABLE();
+ return false;
+ }
+}
+
+bool Call::IsUsingCallFeedbackICSlot() const {
+ return GetCallType() != POSSIBLY_EVAL_CALL;
+}
+
+bool Call::IsUsingCallFeedbackSlot() const {
// SuperConstructorCall uses a CallConstructStub, which wants
// a Slot, in addition to any IC slots requested elsewhere.
- return GetCallType(isolate) == SUPER_CALL;
+ return GetCallType() == SUPER_CALL;
}
void Call::AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
- if (IsUsingCallFeedbackICSlot(isolate)) {
+ if (IsUsingCallFeedbackICSlot()) {
ic_slot_ = spec->AddCallICSlot();
}
- if (IsUsingCallFeedbackSlot(isolate)) {
+ if (IsUsingCallFeedbackSlot()) {
stub_slot_ = spec->AddGeneralSlot();
}
}
-
-Call::CallType Call::GetCallType(Isolate* isolate) const {
+Call::CallType Call::GetCallType() const {
VariableProxy* proxy = expression()->AsVariableProxy();
if (proxy != NULL) {
- if (proxy->var()->is_possibly_eval(isolate)) {
+ if (is_possibly_eval()) {
return POSSIBLY_EVAL_CALL;
} else if (proxy->var()->IsUnallocatedOrGlobalSlot()) {
return GLOBAL_CALL;
@@ -782,40 +935,9 @@ Call::CallType Call::GetCallType(Isolate* isolate) const {
return OTHER_CALL;
}
-
-// ----------------------------------------------------------------------------
-// Implementation of AstVisitor
-
-void AstVisitor::VisitDeclarations(ZoneList<Declaration*>* declarations) {
- for (int i = 0; i < declarations->length(); i++) {
- Visit(declarations->at(i));
- }
-}
-
-
-void AstVisitor::VisitStatements(ZoneList<Statement*>* statements) {
- for (int i = 0; i < statements->length(); i++) {
- Statement* stmt = statements->at(i);
- Visit(stmt);
- if (stmt->IsJump()) break;
- }
-}
-
-
-void AstVisitor::VisitExpressions(ZoneList<Expression*>* expressions) {
- for (int i = 0; i < expressions->length(); i++) {
- // The variable statement visiting code may pass NULL expressions
- // to this code. Maybe this should be handled by introducing an
- // undefined expression or literal? Revisit this code if this
- // changes
- Expression* expression = expressions->at(i);
- if (expression != NULL) Visit(expression);
- }
-}
-
-CaseClause::CaseClause(Zone* zone, Expression* label,
- ZoneList<Statement*>* statements, int pos)
- : Expression(zone, pos),
+CaseClause::CaseClause(Expression* label, ZoneList<Statement*>* statements,
+ int pos)
+ : Expression(pos, kCaseClause),
label_(label),
statements_(statements),
compare_type_(Type::None()) {}
@@ -835,6 +957,5 @@ bool Literal::Match(void* literal1, void* literal2) {
(x->IsNumber() && y->IsNumber() && x->AsNumber() == y->AsNumber());
}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index 52bac8efbe..1b80d3f36d 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -5,14 +5,13 @@
#ifndef V8_AST_AST_H_
#define V8_AST_AST_H_
-#include "src/assembler.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/modules.h"
#include "src/ast/variables.h"
#include "src/bailout-reason.h"
#include "src/base/flags.h"
-#include "src/base/smart-pointers.h"
#include "src/factory.h"
+#include "src/globals.h"
#include "src/isolate.h"
#include "src/list.h"
#include "src/parsing/token.h"
@@ -39,12 +38,22 @@ namespace internal {
#define DECLARATION_NODE_LIST(V) \
V(VariableDeclaration) \
- V(FunctionDeclaration) \
- V(ImportDeclaration) \
- V(ExportDeclaration)
+ V(FunctionDeclaration)
+
+#define ITERATION_NODE_LIST(V) \
+ V(DoWhileStatement) \
+ V(WhileStatement) \
+ V(ForStatement) \
+ V(ForInStatement) \
+ V(ForOfStatement)
+
+#define BREAKABLE_NODE_LIST(V) \
+ V(Block) \
+ V(SwitchStatement)
#define STATEMENT_NODE_LIST(V) \
- V(Block) \
+ ITERATION_NODE_LIST(V) \
+ BREAKABLE_NODE_LIST(V) \
V(ExpressionStatement) \
V(EmptyStatement) \
V(SloppyBlockFunctionStatement) \
@@ -53,35 +62,38 @@ namespace internal {
V(BreakStatement) \
V(ReturnStatement) \
V(WithStatement) \
- V(SwitchStatement) \
- V(DoWhileStatement) \
- V(WhileStatement) \
- V(ForStatement) \
- V(ForInStatement) \
- V(ForOfStatement) \
V(TryCatchStatement) \
V(TryFinallyStatement) \
V(DebuggerStatement)
+#define LITERAL_NODE_LIST(V) \
+ V(RegExpLiteral) \
+ V(ObjectLiteral) \
+ V(ArrayLiteral)
+
+#define PROPERTY_NODE_LIST(V) \
+ V(Assignment) \
+ V(CountOperation) \
+ V(Property)
+
+#define CALL_NODE_LIST(V) \
+ V(Call) \
+ V(CallNew)
+
#define EXPRESSION_NODE_LIST(V) \
+ LITERAL_NODE_LIST(V) \
+ PROPERTY_NODE_LIST(V) \
+ CALL_NODE_LIST(V) \
V(FunctionLiteral) \
V(ClassLiteral) \
V(NativeFunctionLiteral) \
V(Conditional) \
V(VariableProxy) \
V(Literal) \
- V(RegExpLiteral) \
- V(ObjectLiteral) \
- V(ArrayLiteral) \
- V(Assignment) \
V(Yield) \
V(Throw) \
- V(Property) \
- V(Call) \
- V(CallNew) \
V(CallRuntime) \
V(UnaryOperation) \
- V(CountOperation) \
V(BinaryOperation) \
V(CompareOperation) \
V(Spread) \
@@ -100,7 +112,6 @@ namespace internal {
// Forward declarations
class AstNodeFactory;
-class AstVisitor;
class Declaration;
class Module;
class BreakableStatement;
@@ -120,17 +131,12 @@ typedef ZoneList<Handle<String>> ZoneStringList;
typedef ZoneList<Handle<Object>> ZoneObjectList;
-#define DECLARE_NODE_TYPE(type) \
- void Accept(AstVisitor* v) override; \
- AstNode::NodeType node_type() const final { return AstNode::k##type; } \
- friend class AstNodeFactory;
-
-
class FeedbackVectorSlotCache {
public:
explicit FeedbackVectorSlotCache(Zone* zone)
: zone_(zone),
- hash_map_(HashMap::PointersMatch, ZoneHashMap::kDefaultHashMapCapacity,
+ hash_map_(base::HashMap::PointersMatch,
+ ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(zone)) {}
void Put(Variable* variable, FeedbackVectorSlot slot) {
@@ -181,23 +187,15 @@ DEFINE_OPERATORS_FOR_FLAGS(AstProperties::Flags)
class AstNode: public ZoneObject {
public:
#define DECLARE_TYPE_ENUM(type) k##type,
- enum NodeType {
- AST_NODE_LIST(DECLARE_TYPE_ENUM)
- kInvalid = -1
- };
+ enum NodeType : uint8_t { AST_NODE_LIST(DECLARE_TYPE_ENUM) };
#undef DECLARE_TYPE_ENUM
void* operator new(size_t size, Zone* zone) { return zone->New(size); }
- explicit AstNode(int position): position_(position) {}
- virtual ~AstNode() {}
-
- virtual void Accept(AstVisitor* v) = 0;
- virtual NodeType node_type() const = 0;
+ NodeType node_type() const { return node_type_; }
int position() const { return position_; }
#ifdef DEBUG
- void PrettyPrint(Isolate* isolate);
void Print(Isolate* isolate);
#endif // DEBUG
@@ -209,35 +207,33 @@ class AstNode: public ZoneObject {
AST_NODE_LIST(DECLARE_NODE_FUNCTIONS)
#undef DECLARE_NODE_FUNCTIONS
- virtual BreakableStatement* AsBreakableStatement() { return NULL; }
- virtual IterationStatement* AsIterationStatement() { return NULL; }
- virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; }
+ BreakableStatement* AsBreakableStatement();
+ IterationStatement* AsIterationStatement();
+ MaterializedLiteral* AsMaterializedLiteral();
- // The interface for feedback slots, with default no-op implementations for
- // node types which don't actually have this. Note that this is conceptually
- // not really nice, but multiple inheritance would introduce yet another
- // vtable entry per node, something we don't want for space reasons.
- virtual void AssignFeedbackVectorSlots(Isolate* isolate,
- FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache) {}
+ protected:
+ AstNode(int position, NodeType type)
+ : position_(position), node_type_(type) {}
private:
// Hidden to prevent accidental usage. It would have to load the
// current zone from the TLS.
void* operator new(size_t size);
- friend class CaseClause; // Generates AST IDs.
-
int position_;
+ NodeType node_type_;
+ // Ends with NodeType which is uint8_t sized. Deriving classes in turn begin
+ // sub-int32_t-sized fields for optimum packing efficiency.
};
class Statement : public AstNode {
public:
- explicit Statement(Zone* zone, int position) : AstNode(position) {}
-
bool IsEmpty() { return AsEmptyStatement() != NULL; }
- virtual bool IsJump() const { return false; }
+ bool IsJump() const;
+
+ protected:
+ Statement(int position, NodeType type) : AstNode(position, type) {}
};
@@ -303,23 +299,23 @@ class Expression : public AstNode {
};
// Mark this expression as being in tail position.
- virtual void MarkTail() {}
+ void MarkTail();
// True iff the expression is a valid reference expression.
- virtual bool IsValidReferenceExpression() const { return false; }
+ bool IsValidReferenceExpression() const;
// Helpers for ToBoolean conversion.
- virtual bool ToBooleanIsTrue() const { return false; }
- virtual bool ToBooleanIsFalse() const { return false; }
+ bool ToBooleanIsTrue() const;
+ bool ToBooleanIsFalse() const;
// Symbols that cannot be parsed as array indices are considered property
// names. We do not treat symbols that can be array indexes as property
// names because [] for string objects is handled only by keyed ICs.
- virtual bool IsPropertyName() const { return false; }
+ bool IsPropertyName() const;
// True iff the expression is a class or function expression without
// a syntactic name.
- virtual bool IsAnonymousFunctionDefinition() const { return false; }
+ bool IsAnonymousFunctionDefinition() const;
// True iff the expression is a literal represented as a smi.
bool IsSmiLiteral() const;
@@ -337,50 +333,32 @@ class Expression : public AstNode {
// True iff the expression is a valid target for an assignment.
bool IsValidReferenceExpressionOrThis() const;
- // Expression type bounds
- Bounds bounds() const { return bounds_; }
- void set_bounds(Bounds bounds) { bounds_ = bounds; }
-
- // Type feedback information for assignments and properties.
- virtual bool IsMonomorphic() {
- UNREACHABLE();
- return false;
- }
- virtual SmallMapList* GetReceiverTypes() {
- UNREACHABLE();
- return NULL;
- }
- virtual KeyedAccessStoreMode GetStoreMode() const {
- UNREACHABLE();
- return STANDARD_STORE;
- }
- virtual IcCheckType GetKeyType() const {
- UNREACHABLE();
- return ELEMENT;
- }
-
// TODO(rossberg): this should move to its own AST node eventually.
- virtual void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle);
+ void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle);
uint16_t to_boolean_types() const {
return ToBooleanTypesField::decode(bit_field_);
}
+ SmallMapList* GetReceiverTypes();
+ KeyedAccessStoreMode GetStoreMode() const;
+ IcCheckType GetKeyType() const;
+ bool IsMonomorphic() const;
+
void set_base_id(int id) { base_id_ = id; }
static int num_ids() { return parent_num_ids() + 2; }
BailoutId id() const { return BailoutId(local_id(0)); }
TypeFeedbackId test_id() const { return TypeFeedbackId(local_id(1)); }
protected:
- Expression(Zone* zone, int pos)
- : AstNode(pos),
- base_id_(BailoutId::None().ToInt()),
- bounds_(Bounds::Unbounded()),
- bit_field_(0) {}
+ Expression(int pos, NodeType type)
+ : AstNode(pos, type),
+ bit_field_(0),
+ base_id_(BailoutId::None().ToInt()) {}
+
static int parent_num_ids() { return 0; }
void set_to_boolean_types(uint16_t types) {
bit_field_ = ToBooleanTypesField::update(bit_field_, types);
}
-
int base_id() const {
DCHECK(!BailoutId(base_id_).IsNone());
return base_id_;
@@ -389,12 +367,9 @@ class Expression : public AstNode {
private:
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
+ uint16_t bit_field_;
int base_id_;
- Bounds bounds_;
class ToBooleanTypesField : public BitField16<uint16_t, 0, 9> {};
- uint16_t bit_field_;
- // Ends with 16-bit field; deriving classes in turn begin with
- // 16-bit fields for optimum packing efficiency.
};
@@ -409,9 +384,6 @@ class BreakableStatement : public Statement {
// if it is != NULL, guaranteed to contain at least one entry.
ZoneList<const AstRawString*>* labels() const { return labels_; }
- // Type testing & conversion.
- BreakableStatement* AsBreakableStatement() final { return this; }
-
// Code generation
Label* break_target() { return &break_target_; }
@@ -426,12 +398,12 @@ class BreakableStatement : public Statement {
BailoutId ExitId() const { return BailoutId(local_id(1)); }
protected:
- BreakableStatement(Zone* zone, ZoneList<const AstRawString*>* labels,
- BreakableType breakable_type, int position)
- : Statement(zone, position),
- labels_(labels),
+ BreakableStatement(ZoneList<const AstRawString*>* labels,
+ BreakableType breakable_type, int position, NodeType type)
+ : Statement(position, type),
breakable_type_(breakable_type),
- base_id_(BailoutId::None().ToInt()) {
+ base_id_(BailoutId::None().ToInt()),
+ labels_(labels) {
DCHECK(labels == NULL || labels->length() > 0);
}
static int parent_num_ids() { return 0; }
@@ -444,24 +416,22 @@ class BreakableStatement : public Statement {
private:
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
- ZoneList<const AstRawString*>* labels_;
BreakableType breakable_type_;
- Label break_target_;
int base_id_;
+ Label break_target_;
+ ZoneList<const AstRawString*>* labels_;
};
class Block final : public BreakableStatement {
public:
- DECLARE_NODE_TYPE(Block)
-
ZoneList<Statement*>* statements() { return &statements_; }
bool ignore_completion_value() const { return ignore_completion_value_; }
static int num_ids() { return parent_num_ids() + 1; }
BailoutId DeclsId() const { return BailoutId(local_id(0)); }
- bool IsJump() const override {
+ bool IsJump() const {
return !statements_.is_empty() && statements_.last()->IsJump()
&& labels() == NULL; // Good enough as an approximation...
}
@@ -469,16 +439,16 @@ class Block final : public BreakableStatement {
Scope* scope() const { return scope_; }
void set_scope(Scope* scope) { scope_ = scope; }
- protected:
+ private:
+ friend class AstNodeFactory;
+
Block(Zone* zone, ZoneList<const AstRawString*>* labels, int capacity,
bool ignore_completion_value, int pos)
- : BreakableStatement(zone, labels, TARGET_FOR_NAMED_ONLY, pos),
+ : BreakableStatement(labels, TARGET_FOR_NAMED_ONLY, pos, kBlock),
statements_(capacity, zone),
ignore_completion_value_(ignore_completion_value),
scope_(NULL) {}
static int parent_num_ids() { return BreakableStatement::num_ids(); }
-
- private:
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
ZoneList<Statement*> statements_;
@@ -489,46 +459,46 @@ class Block final : public BreakableStatement {
class DoExpression final : public Expression {
public:
- DECLARE_NODE_TYPE(DoExpression)
-
Block* block() { return block_; }
void set_block(Block* b) { block_ = b; }
VariableProxy* result() { return result_; }
void set_result(VariableProxy* v) { result_ = v; }
+ FunctionLiteral* represented_function() { return represented_function_; }
+ void set_represented_function(FunctionLiteral* f) {
+ represented_function_ = f;
+ }
+ bool IsAnonymousFunctionDefinition() const;
- protected:
- DoExpression(Zone* zone, Block* block, VariableProxy* result, int pos)
- : Expression(zone, pos), block_(block), result_(result) {
+ private:
+ friend class AstNodeFactory;
+
+ DoExpression(Block* block, VariableProxy* result, int pos)
+ : Expression(pos, kDoExpression),
+ block_(block),
+ result_(result),
+ represented_function_(nullptr) {
DCHECK_NOT_NULL(block_);
DCHECK_NOT_NULL(result_);
}
static int parent_num_ids() { return Expression::num_ids(); }
-
- private:
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
Block* block_;
VariableProxy* result_;
+ FunctionLiteral* represented_function_;
};
class Declaration : public AstNode {
public:
VariableProxy* proxy() const { return proxy_; }
- VariableMode mode() const { return mode_; }
Scope* scope() const { return scope_; }
- virtual InitializationFlag initialization() const = 0;
- virtual bool IsInlineable() const;
protected:
- Declaration(Zone* zone, VariableProxy* proxy, VariableMode mode, Scope* scope,
- int pos)
- : AstNode(pos), mode_(mode), proxy_(proxy), scope_(scope) {
- DCHECK(IsDeclaredVariableMode(mode));
- }
+ Declaration(VariableProxy* proxy, Scope* scope, int pos, NodeType type)
+ : AstNode(pos, type), proxy_(proxy), scope_(scope) {}
private:
- VariableMode mode_;
VariableProxy* proxy_;
// Nested scope from which the declaration originated.
@@ -537,128 +507,57 @@ class Declaration : public AstNode {
class VariableDeclaration final : public Declaration {
- public:
- DECLARE_NODE_TYPE(VariableDeclaration)
-
- InitializationFlag initialization() const override {
- return mode() == VAR ? kCreatedInitialized : kNeedsInitialization;
- }
+ private:
+ friend class AstNodeFactory;
- protected:
- VariableDeclaration(Zone* zone, VariableProxy* proxy, VariableMode mode,
- Scope* scope, int pos)
- : Declaration(zone, proxy, mode, scope, pos) {}
+ VariableDeclaration(VariableProxy* proxy, Scope* scope, int pos)
+ : Declaration(proxy, scope, pos, kVariableDeclaration) {}
};
class FunctionDeclaration final : public Declaration {
public:
- DECLARE_NODE_TYPE(FunctionDeclaration)
-
FunctionLiteral* fun() const { return fun_; }
void set_fun(FunctionLiteral* f) { fun_ = f; }
- InitializationFlag initialization() const override {
- return kCreatedInitialized;
- }
- bool IsInlineable() const override;
- protected:
- FunctionDeclaration(Zone* zone,
- VariableProxy* proxy,
- VariableMode mode,
- FunctionLiteral* fun,
- Scope* scope,
+ private:
+ friend class AstNodeFactory;
+
+ FunctionDeclaration(VariableProxy* proxy, FunctionLiteral* fun, Scope* scope,
int pos)
- : Declaration(zone, proxy, mode, scope, pos),
- fun_(fun) {
- DCHECK(mode == VAR || mode == LET || mode == CONST);
+ : Declaration(proxy, scope, pos, kFunctionDeclaration), fun_(fun) {
DCHECK(fun != NULL);
}
- private:
FunctionLiteral* fun_;
};
-class ImportDeclaration final : public Declaration {
- public:
- DECLARE_NODE_TYPE(ImportDeclaration)
-
- const AstRawString* import_name() const { return import_name_; }
- const AstRawString* module_specifier() const { return module_specifier_; }
- void set_module_specifier(const AstRawString* module_specifier) {
- DCHECK(module_specifier_ == NULL);
- module_specifier_ = module_specifier;
- }
- InitializationFlag initialization() const override {
- return kNeedsInitialization;
- }
-
- protected:
- ImportDeclaration(Zone* zone, VariableProxy* proxy,
- const AstRawString* import_name,
- const AstRawString* module_specifier, Scope* scope, int pos)
- : Declaration(zone, proxy, IMPORT, scope, pos),
- import_name_(import_name),
- module_specifier_(module_specifier) {}
-
- private:
- const AstRawString* import_name_;
- const AstRawString* module_specifier_;
-};
-
-
-class ExportDeclaration final : public Declaration {
- public:
- DECLARE_NODE_TYPE(ExportDeclaration)
-
- InitializationFlag initialization() const override {
- return kCreatedInitialized;
- }
-
- protected:
- ExportDeclaration(Zone* zone, VariableProxy* proxy, Scope* scope, int pos)
- : Declaration(zone, proxy, LET, scope, pos) {}
-};
-
-
-class Module : public AstNode {
- public:
- ModuleDescriptor* descriptor() const { return descriptor_; }
- Block* body() const { return body_; }
-
- protected:
- Module(Zone* zone, int pos)
- : AstNode(pos), descriptor_(ModuleDescriptor::New(zone)), body_(NULL) {}
- Module(Zone* zone, ModuleDescriptor* descriptor, int pos, Block* body = NULL)
- : AstNode(pos), descriptor_(descriptor), body_(body) {}
-
- private:
- ModuleDescriptor* descriptor_;
- Block* body_;
-};
-
-
class IterationStatement : public BreakableStatement {
public:
- // Type testing & conversion.
- IterationStatement* AsIterationStatement() final { return this; }
-
Statement* body() const { return body_; }
void set_body(Statement* s) { body_ = s; }
+ int yield_count() const { return yield_count_; }
+ int first_yield_id() const { return first_yield_id_; }
+ void set_yield_count(int yield_count) { yield_count_ = yield_count; }
+ void set_first_yield_id(int first_yield_id) {
+ first_yield_id_ = first_yield_id;
+ }
+
static int num_ids() { return parent_num_ids() + 1; }
BailoutId OsrEntryId() const { return BailoutId(local_id(0)); }
- virtual BailoutId ContinueId() const = 0;
- virtual BailoutId StackCheckId() const = 0;
// Code generation
Label* continue_target() { return &continue_target_; }
protected:
- IterationStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
- : BreakableStatement(zone, labels, TARGET_FOR_ANONYMOUS, pos),
- body_(NULL) {}
+ IterationStatement(ZoneList<const AstRawString*>* labels, int pos,
+ NodeType type)
+ : BreakableStatement(labels, TARGET_FOR_ANONYMOUS, pos, type),
+ body_(NULL),
+ yield_count_(0),
+ first_yield_id_(0) {}
static int parent_num_ids() { return BreakableStatement::num_ids(); }
void Initialize(Statement* body) { body_ = body; }
@@ -667,13 +566,13 @@ class IterationStatement : public BreakableStatement {
Statement* body_;
Label continue_target_;
+ int yield_count_;
+ int first_yield_id_;
};
class DoWhileStatement final : public IterationStatement {
public:
- DECLARE_NODE_TYPE(DoWhileStatement)
-
void Initialize(Expression* cond, Statement* body) {
IterationStatement::Initialize(body);
cond_ = cond;
@@ -683,16 +582,16 @@ class DoWhileStatement final : public IterationStatement {
void set_cond(Expression* e) { cond_ = e; }
static int num_ids() { return parent_num_ids() + 2; }
- BailoutId ContinueId() const override { return BailoutId(local_id(0)); }
- BailoutId StackCheckId() const override { return BackEdgeId(); }
+ BailoutId ContinueId() const { return BailoutId(local_id(0)); }
+ BailoutId StackCheckId() const { return BackEdgeId(); }
BailoutId BackEdgeId() const { return BailoutId(local_id(1)); }
- protected:
- DoWhileStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
- : IterationStatement(zone, labels, pos), cond_(NULL) {}
- static int parent_num_ids() { return IterationStatement::num_ids(); }
-
private:
+ friend class AstNodeFactory;
+
+ DoWhileStatement(ZoneList<const AstRawString*>* labels, int pos)
+ : IterationStatement(labels, pos, kDoWhileStatement), cond_(NULL) {}
+ static int parent_num_ids() { return IterationStatement::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
Expression* cond_;
@@ -701,8 +600,6 @@ class DoWhileStatement final : public IterationStatement {
class WhileStatement final : public IterationStatement {
public:
- DECLARE_NODE_TYPE(WhileStatement)
-
void Initialize(Expression* cond, Statement* body) {
IterationStatement::Initialize(body);
cond_ = cond;
@@ -712,16 +609,16 @@ class WhileStatement final : public IterationStatement {
void set_cond(Expression* e) { cond_ = e; }
static int num_ids() { return parent_num_ids() + 1; }
- BailoutId ContinueId() const override { return EntryId(); }
- BailoutId StackCheckId() const override { return BodyId(); }
+ BailoutId ContinueId() const { return EntryId(); }
+ BailoutId StackCheckId() const { return BodyId(); }
BailoutId BodyId() const { return BailoutId(local_id(0)); }
- protected:
- WhileStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
- : IterationStatement(zone, labels, pos), cond_(NULL) {}
- static int parent_num_ids() { return IterationStatement::num_ids(); }
-
private:
+ friend class AstNodeFactory;
+
+ WhileStatement(ZoneList<const AstRawString*>* labels, int pos)
+ : IterationStatement(labels, pos, kWhileStatement), cond_(NULL) {}
+ static int parent_num_ids() { return IterationStatement::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
Expression* cond_;
@@ -730,8 +627,6 @@ class WhileStatement final : public IterationStatement {
class ForStatement final : public IterationStatement {
public:
- DECLARE_NODE_TYPE(ForStatement)
-
void Initialize(Statement* init,
Expression* cond,
Statement* next,
@@ -751,19 +646,19 @@ class ForStatement final : public IterationStatement {
void set_next(Statement* s) { next_ = s; }
static int num_ids() { return parent_num_ids() + 2; }
- BailoutId ContinueId() const override { return BailoutId(local_id(0)); }
- BailoutId StackCheckId() const override { return BodyId(); }
+ BailoutId ContinueId() const { return BailoutId(local_id(0)); }
+ BailoutId StackCheckId() const { return BodyId(); }
BailoutId BodyId() const { return BailoutId(local_id(1)); }
- protected:
- ForStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
- : IterationStatement(zone, labels, pos),
+ private:
+ friend class AstNodeFactory;
+
+ ForStatement(ZoneList<const AstRawString*>* labels, int pos)
+ : IterationStatement(labels, pos, kForStatement),
init_(NULL),
cond_(NULL),
next_(NULL) {}
static int parent_num_ids() { return IterationStatement::num_ids(); }
-
- private:
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
Statement* init_;
@@ -779,43 +674,40 @@ class ForEachStatement : public IterationStatement {
ITERATE // for (each of subject) body;
};
- void Initialize(Expression* each, Expression* subject, Statement* body) {
- IterationStatement::Initialize(body);
- each_ = each;
- subject_ = subject;
- }
-
- Expression* each() const { return each_; }
- Expression* subject() const { return subject_; }
-
- void set_each(Expression* e) { each_ = e; }
- void set_subject(Expression* e) { subject_ = e; }
+ using IterationStatement::Initialize;
static const char* VisitModeString(VisitMode mode) {
return mode == ITERATE ? "for-of" : "for-in";
}
protected:
- ForEachStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
- : IterationStatement(zone, labels, pos), each_(NULL), subject_(NULL) {}
-
- private:
- Expression* each_;
- Expression* subject_;
+ ForEachStatement(ZoneList<const AstRawString*>* labels, int pos,
+ NodeType type)
+ : IterationStatement(labels, pos, type) {}
};
class ForInStatement final : public ForEachStatement {
public:
- DECLARE_NODE_TYPE(ForInStatement)
+ void Initialize(Expression* each, Expression* subject, Statement* body) {
+ ForEachStatement::Initialize(body);
+ each_ = each;
+ subject_ = subject;
+ }
Expression* enumerable() const {
return subject();
}
+ Expression* each() const { return each_; }
+ Expression* subject() const { return subject_; }
+
+ void set_each(Expression* e) { each_ = e; }
+ void set_subject(Expression* e) { subject_ = e; }
+
// Type feedback information.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache) override;
+ FeedbackVectorSlotCache* cache);
FeedbackVectorSlot EachFeedbackSlot() const { return each_slot_; }
FeedbackVectorSlot ForInFeedbackSlot() {
DCHECK(!for_in_feedback_slot_.IsInvalid());
@@ -833,17 +725,22 @@ class ForInStatement final : public ForEachStatement {
BailoutId PrepareId() const { return BailoutId(local_id(3)); }
BailoutId FilterId() const { return BailoutId(local_id(4)); }
BailoutId AssignmentId() const { return BailoutId(local_id(5)); }
- BailoutId ContinueId() const override { return EntryId(); }
- BailoutId StackCheckId() const override { return BodyId(); }
-
- protected:
- ForInStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
- : ForEachStatement(zone, labels, pos), for_in_type_(SLOW_FOR_IN) {}
- static int parent_num_ids() { return ForEachStatement::num_ids(); }
+ BailoutId ContinueId() const { return EntryId(); }
+ BailoutId StackCheckId() const { return BodyId(); }
private:
+ friend class AstNodeFactory;
+
+ ForInStatement(ZoneList<const AstRawString*>* labels, int pos)
+ : ForEachStatement(labels, pos, kForInStatement),
+ each_(nullptr),
+ subject_(nullptr),
+ for_in_type_(SLOW_FOR_IN) {}
+ static int parent_num_ids() { return ForEachStatement::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
+ Expression* each_;
+ Expression* subject_;
ForInType for_in_type_;
FeedbackVectorSlot each_slot_;
FeedbackVectorSlot for_in_feedback_slot_;
@@ -852,17 +749,10 @@ class ForInStatement final : public ForEachStatement {
class ForOfStatement final : public ForEachStatement {
public:
- DECLARE_NODE_TYPE(ForOfStatement)
-
- void Initialize(Expression* each,
- Expression* subject,
- Statement* body,
- Variable* iterator,
- Expression* assign_iterator,
- Expression* next_result,
- Expression* result_done,
- Expression* assign_each) {
- ForEachStatement::Initialize(each, subject, body);
+ void Initialize(Statement* body, Variable* iterator,
+ Expression* assign_iterator, Expression* next_result,
+ Expression* result_done, Expression* assign_each) {
+ ForEachStatement::Initialize(body);
iterator_ = iterator;
assign_iterator_ = assign_iterator;
next_result_ = next_result;
@@ -870,10 +760,6 @@ class ForOfStatement final : public ForEachStatement {
assign_each_ = assign_each;
}
- Expression* iterable() const {
- return subject();
- }
-
Variable* iterator() const {
return iterator_;
}
@@ -903,23 +789,23 @@ class ForOfStatement final : public ForEachStatement {
void set_result_done(Expression* e) { result_done_ = e; }
void set_assign_each(Expression* e) { assign_each_ = e; }
- BailoutId ContinueId() const override { return EntryId(); }
- BailoutId StackCheckId() const override { return BackEdgeId(); }
+ BailoutId ContinueId() const { return EntryId(); }
+ BailoutId StackCheckId() const { return BackEdgeId(); }
static int num_ids() { return parent_num_ids() + 1; }
BailoutId BackEdgeId() const { return BailoutId(local_id(0)); }
- protected:
- ForOfStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
- : ForEachStatement(zone, labels, pos),
+ private:
+ friend class AstNodeFactory;
+
+ ForOfStatement(ZoneList<const AstRawString*>* labels, int pos)
+ : ForEachStatement(labels, pos, kForOfStatement),
iterator_(NULL),
assign_iterator_(NULL),
next_result_(NULL),
result_done_(NULL),
assign_each_(NULL) {}
static int parent_num_ids() { return ForEachStatement::num_ids(); }
-
- private:
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
Variable* iterator_;
@@ -932,81 +818,75 @@ class ForOfStatement final : public ForEachStatement {
class ExpressionStatement final : public Statement {
public:
- DECLARE_NODE_TYPE(ExpressionStatement)
-
void set_expression(Expression* e) { expression_ = e; }
Expression* expression() const { return expression_; }
- bool IsJump() const override { return expression_->IsThrow(); }
-
- protected:
- ExpressionStatement(Zone* zone, Expression* expression, int pos)
- : Statement(zone, pos), expression_(expression) { }
+ bool IsJump() const { return expression_->IsThrow(); }
private:
+ friend class AstNodeFactory;
+
+ ExpressionStatement(Expression* expression, int pos)
+ : Statement(pos, kExpressionStatement), expression_(expression) {}
+
Expression* expression_;
};
class JumpStatement : public Statement {
public:
- bool IsJump() const final { return true; }
+ bool IsJump() const { return true; }
protected:
- explicit JumpStatement(Zone* zone, int pos) : Statement(zone, pos) {}
+ JumpStatement(int pos, NodeType type) : Statement(pos, type) {}
};
class ContinueStatement final : public JumpStatement {
public:
- DECLARE_NODE_TYPE(ContinueStatement)
-
IterationStatement* target() const { return target_; }
- protected:
- explicit ContinueStatement(Zone* zone, IterationStatement* target, int pos)
- : JumpStatement(zone, pos), target_(target) { }
-
private:
+ friend class AstNodeFactory;
+
+ ContinueStatement(IterationStatement* target, int pos)
+ : JumpStatement(pos, kContinueStatement), target_(target) {}
+
IterationStatement* target_;
};
class BreakStatement final : public JumpStatement {
public:
- DECLARE_NODE_TYPE(BreakStatement)
-
BreakableStatement* target() const { return target_; }
- protected:
- explicit BreakStatement(Zone* zone, BreakableStatement* target, int pos)
- : JumpStatement(zone, pos), target_(target) { }
-
private:
+ friend class AstNodeFactory;
+
+ BreakStatement(BreakableStatement* target, int pos)
+ : JumpStatement(pos, kBreakStatement), target_(target) {}
+
BreakableStatement* target_;
};
class ReturnStatement final : public JumpStatement {
public:
- DECLARE_NODE_TYPE(ReturnStatement)
-
Expression* expression() const { return expression_; }
void set_expression(Expression* e) { expression_ = e; }
- protected:
- explicit ReturnStatement(Zone* zone, Expression* expression, int pos)
- : JumpStatement(zone, pos), expression_(expression) { }
-
private:
+ friend class AstNodeFactory;
+
+ ReturnStatement(Expression* expression, int pos)
+ : JumpStatement(pos, kReturnStatement), expression_(expression) {}
+
Expression* expression_;
};
class WithStatement final : public Statement {
public:
- DECLARE_NODE_TYPE(WithStatement)
-
Scope* scope() { return scope_; }
Expression* expression() const { return expression_; }
void set_expression(Expression* e) { expression_ = e; }
@@ -1018,35 +898,33 @@ class WithStatement final : public Statement {
BailoutId ToObjectId() const { return BailoutId(local_id(0)); }
BailoutId EntryId() const { return BailoutId(local_id(1)); }
- protected:
- WithStatement(Zone* zone, Scope* scope, Expression* expression,
- Statement* statement, int pos)
- : Statement(zone, pos),
+ private:
+ friend class AstNodeFactory;
+
+ WithStatement(Scope* scope, Expression* expression, Statement* statement,
+ int pos)
+ : Statement(pos, kWithStatement),
+ base_id_(BailoutId::None().ToInt()),
scope_(scope),
expression_(expression),
- statement_(statement),
- base_id_(BailoutId::None().ToInt()) {}
- static int parent_num_ids() { return 0; }
+ statement_(statement) {}
+ static int parent_num_ids() { return 0; }
int base_id() const {
DCHECK(!BailoutId(base_id_).IsNone());
return base_id_;
}
-
- private:
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
+ int base_id_;
Scope* scope_;
Expression* expression_;
Statement* statement_;
- int base_id_;
};
class CaseClause final : public Expression {
public:
- DECLARE_NODE_TYPE(CaseClause)
-
bool is_default() const { return label_ == NULL; }
Expression* label() const {
CHECK(!is_default());
@@ -1063,12 +941,11 @@ class CaseClause final : public Expression {
Type* compare_type() { return compare_type_; }
void set_compare_type(Type* type) { compare_type_ = type; }
- protected:
- static int parent_num_ids() { return Expression::num_ids(); }
-
private:
- CaseClause(Zone* zone, Expression* label, ZoneList<Statement*>* statements,
- int pos);
+ friend class AstNodeFactory;
+
+ static int parent_num_ids() { return Expression::num_ids(); }
+ CaseClause(Expression* label, ZoneList<Statement*>* statements, int pos);
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
Expression* label_;
@@ -1080,8 +957,6 @@ class CaseClause final : public Expression {
class SwitchStatement final : public BreakableStatement {
public:
- DECLARE_NODE_TYPE(SwitchStatement)
-
void Initialize(Expression* tag, ZoneList<CaseClause*>* cases) {
tag_ = tag;
cases_ = cases;
@@ -1092,13 +967,14 @@ class SwitchStatement final : public BreakableStatement {
void set_tag(Expression* t) { tag_ = t; }
- protected:
- SwitchStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
- : BreakableStatement(zone, labels, TARGET_FOR_ANONYMOUS, pos),
+ private:
+ friend class AstNodeFactory;
+
+ SwitchStatement(ZoneList<const AstRawString*>* labels, int pos)
+ : BreakableStatement(labels, TARGET_FOR_ANONYMOUS, pos, kSwitchStatement),
tag_(NULL),
cases_(NULL) {}
- private:
Expression* tag_;
ZoneList<CaseClause*>* cases_;
};
@@ -1111,8 +987,6 @@ class SwitchStatement final : public BreakableStatement {
// given if-statement has a then- or an else-part containing code.
class IfStatement final : public Statement {
public:
- DECLARE_NODE_TYPE(IfStatement)
-
bool HasThenStatement() const { return !then_statement()->IsEmpty(); }
bool HasElseStatement() const { return !else_statement()->IsEmpty(); }
@@ -1124,7 +998,7 @@ class IfStatement final : public Statement {
void set_then_statement(Statement* s) { then_statement_ = s; }
void set_else_statement(Statement* s) { else_statement_ = s; }
- bool IsJump() const override {
+ bool IsJump() const {
return HasThenStatement() && then_statement()->IsJump()
&& HasElseStatement() && else_statement()->IsJump();
}
@@ -1135,28 +1009,28 @@ class IfStatement final : public Statement {
BailoutId ThenId() const { return BailoutId(local_id(1)); }
BailoutId ElseId() const { return BailoutId(local_id(2)); }
- protected:
- IfStatement(Zone* zone, Expression* condition, Statement* then_statement,
+ private:
+ friend class AstNodeFactory;
+
+ IfStatement(Expression* condition, Statement* then_statement,
Statement* else_statement, int pos)
- : Statement(zone, pos),
+ : Statement(pos, kIfStatement),
+ base_id_(BailoutId::None().ToInt()),
condition_(condition),
then_statement_(then_statement),
- else_statement_(else_statement),
- base_id_(BailoutId::None().ToInt()) {}
- static int parent_num_ids() { return 0; }
+ else_statement_(else_statement) {}
+ static int parent_num_ids() { return 0; }
int base_id() const {
DCHECK(!BailoutId(base_id_).IsNone());
return base_id_;
}
-
- private:
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
+ int base_id_;
Expression* condition_;
Statement* then_statement_;
Statement* else_statement_;
- int base_id_;
};
@@ -1165,9 +1039,30 @@ class TryStatement : public Statement {
Block* try_block() const { return try_block_; }
void set_try_block(Block* b) { try_block_ = b; }
+ // Prediction of whether exceptions thrown into the handler for this try block
+ // will be caught.
+ //
+ // This is set in ast-numbering and later compiled into the code's handler
+ // table. The runtime uses this information to implement a feature that
+ // notifies the debugger when an uncaught exception is thrown, _before_ the
+ // exception propagates to the top.
+ //
+ // Since it's generally undecidable whether an exception will be caught, our
+ // prediction is only an approximation.
+ HandlerTable::CatchPrediction catch_prediction() const {
+ return catch_prediction_;
+ }
+ void set_catch_prediction(HandlerTable::CatchPrediction prediction) {
+ catch_prediction_ = prediction;
+ }
+
protected:
- TryStatement(Zone* zone, Block* try_block, int pos)
- : Statement(zone, pos), try_block_(try_block) {}
+ TryStatement(Block* try_block, int pos, NodeType type)
+ : Statement(pos, type),
+ catch_prediction_(HandlerTable::UNCAUGHT),
+ try_block_(try_block) {}
+
+ HandlerTable::CatchPrediction catch_prediction_;
private:
Block* try_block_;
@@ -1176,8 +1071,6 @@ class TryStatement : public Statement {
class TryCatchStatement final : public TryStatement {
public:
- DECLARE_NODE_TYPE(TryCatchStatement)
-
Scope* scope() { return scope_; }
Variable* variable() { return variable_; }
Block* catch_block() const { return catch_block_; }
@@ -1193,62 +1086,63 @@ class TryCatchStatement final : public TryStatement {
// message instead of generating a new one.
// (When the catch block doesn't rethrow but is guaranteed to perform an
// ordinary throw, not clearing the old message is safe but not very useful.)
- bool clear_pending_message() { return clear_pending_message_; }
+ bool clear_pending_message() const {
+ return catch_prediction_ != HandlerTable::UNCAUGHT;
+ }
- protected:
- TryCatchStatement(Zone* zone, Block* try_block, Scope* scope,
- Variable* variable, Block* catch_block,
- bool clear_pending_message, int pos)
- : TryStatement(zone, try_block, pos),
+ private:
+ friend class AstNodeFactory;
+
+ TryCatchStatement(Block* try_block, Scope* scope, Variable* variable,
+ Block* catch_block,
+ HandlerTable::CatchPrediction catch_prediction, int pos)
+ : TryStatement(try_block, pos, kTryCatchStatement),
scope_(scope),
variable_(variable),
- catch_block_(catch_block),
- clear_pending_message_(clear_pending_message) {}
+ catch_block_(catch_block) {
+ catch_prediction_ = catch_prediction;
+ }
- private:
Scope* scope_;
Variable* variable_;
Block* catch_block_;
- bool clear_pending_message_;
};
class TryFinallyStatement final : public TryStatement {
public:
- DECLARE_NODE_TYPE(TryFinallyStatement)
-
Block* finally_block() const { return finally_block_; }
void set_finally_block(Block* b) { finally_block_ = b; }
- protected:
- TryFinallyStatement(Zone* zone, Block* try_block, Block* finally_block,
- int pos)
- : TryStatement(zone, try_block, pos), finally_block_(finally_block) {}
-
private:
+ friend class AstNodeFactory;
+
+ TryFinallyStatement(Block* try_block, Block* finally_block, int pos)
+ : TryStatement(try_block, pos, kTryFinallyStatement),
+ finally_block_(finally_block) {}
+
Block* finally_block_;
};
class DebuggerStatement final : public Statement {
public:
- DECLARE_NODE_TYPE(DebuggerStatement)
-
void set_base_id(int id) { base_id_ = id; }
static int num_ids() { return parent_num_ids() + 1; }
BailoutId DebugBreakId() const { return BailoutId(local_id(0)); }
- protected:
- explicit DebuggerStatement(Zone* zone, int pos)
- : Statement(zone, pos), base_id_(BailoutId::None().ToInt()) {}
- static int parent_num_ids() { return 0; }
+ private:
+ friend class AstNodeFactory;
+
+ explicit DebuggerStatement(int pos)
+ : Statement(pos, kDebuggerStatement),
+ base_id_(BailoutId::None().ToInt()) {}
+ static int parent_num_ids() { return 0; }
int base_id() const {
DCHECK(!BailoutId(base_id_).IsNone());
return base_id_;
}
-
- private:
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
int base_id_;
@@ -1256,11 +1150,9 @@ class DebuggerStatement final : public Statement {
class EmptyStatement final : public Statement {
- public:
- DECLARE_NODE_TYPE(EmptyStatement)
-
- protected:
- explicit EmptyStatement(Zone* zone, int pos): Statement(zone, pos) {}
+ private:
+ friend class AstNodeFactory;
+ explicit EmptyStatement(int pos) : Statement(pos, kEmptyStatement) {}
};
@@ -1270,28 +1162,32 @@ class EmptyStatement final : public Statement {
// from one statement to another during parsing.
class SloppyBlockFunctionStatement final : public Statement {
public:
- DECLARE_NODE_TYPE(SloppyBlockFunctionStatement)
-
Statement* statement() const { return statement_; }
void set_statement(Statement* statement) { statement_ = statement; }
Scope* scope() const { return scope_; }
+ SloppyBlockFunctionStatement* next() { return next_; }
+ void set_next(SloppyBlockFunctionStatement* next) { next_ = next; }
private:
- SloppyBlockFunctionStatement(Zone* zone, Statement* statement, Scope* scope)
- : Statement(zone, RelocInfo::kNoPosition),
+ friend class AstNodeFactory;
+
+ SloppyBlockFunctionStatement(Statement* statement, Scope* scope)
+ : Statement(kNoSourcePosition, kSloppyBlockFunctionStatement),
statement_(statement),
- scope_(scope) {}
+ scope_(scope),
+ next_(nullptr) {}
Statement* statement_;
Scope* const scope_;
+ SloppyBlockFunctionStatement* next_;
};
class Literal final : public Expression {
public:
- DECLARE_NODE_TYPE(Literal)
-
- bool IsPropertyName() const override { return value_->IsPropertyName(); }
+ // Returns true if literal represents a property name (i.e. cannot be parsed
+ // as array indices).
+ bool IsPropertyName() const { return value_->IsPropertyName(); }
Handle<String> AsPropertyName() {
DCHECK(IsPropertyName());
@@ -1303,8 +1199,8 @@ class Literal final : public Expression {
return value_->AsString();
}
- bool ToBooleanIsTrue() const override { return value()->BooleanValue(); }
- bool ToBooleanIsFalse() const override { return !value()->BooleanValue(); }
+ bool ToBooleanIsTrue() const { return raw_value()->BooleanValue(); }
+ bool ToBooleanIsFalse() const { return !raw_value()->BooleanValue(); }
Handle<Object> value() const { return value_->value(); }
const AstValue* raw_value() const { return value_; }
@@ -1319,12 +1215,13 @@ class Literal final : public Expression {
return TypeFeedbackId(local_id(0));
}
- protected:
- Literal(Zone* zone, const AstValue* value, int position)
- : Expression(zone, position), value_(value) {}
- static int parent_num_ids() { return Expression::num_ids(); }
-
private:
+ friend class AstNodeFactory;
+
+ Literal(const AstValue* value, int position)
+ : Expression(position, kLiteral), value_(value) {}
+
+ static int parent_num_ids() { return Expression::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
const AstValue* value_;
@@ -1336,8 +1233,6 @@ class AstLiteralReindexer;
// Base class for literals that needs space in the corresponding JSFunction.
class MaterializedLiteral : public Expression {
public:
- MaterializedLiteral* AsMaterializedLiteral() final { return this; }
-
int literal_index() { return literal_index_; }
int depth() const {
@@ -1347,11 +1242,11 @@ class MaterializedLiteral : public Expression {
}
protected:
- MaterializedLiteral(Zone* zone, int literal_index, int pos)
- : Expression(zone, pos),
- literal_index_(literal_index),
+ MaterializedLiteral(int literal_index, int pos, NodeType type)
+ : Expression(pos, type),
is_simple_(false),
- depth_(0) {}
+ depth_(0),
+ literal_index_(literal_index) {}
// A materialized literal is simple if the values consist of only
// constants and simple object and array literals.
@@ -1360,7 +1255,7 @@ class MaterializedLiteral : public Expression {
friend class CompileTimeValue;
void set_depth(int depth) {
- DCHECK(depth >= 1);
+ DCHECK_LE(1, depth);
depth_ = depth;
}
@@ -1377,9 +1272,9 @@ class MaterializedLiteral : public Expression {
Handle<Object> GetBoilerplateValue(Expression* expression, Isolate* isolate);
private:
+ bool is_simple_ : 1;
+ int depth_ : 31;
int literal_index_;
- bool is_simple_;
- int depth_;
friend class AstLiteralReindexer;
};
@@ -1390,12 +1285,13 @@ class MaterializedLiteral : public Expression {
// to the code generator.
class ObjectLiteralProperty final : public ZoneObject {
public:
- enum Kind {
+ enum Kind : uint8_t {
CONSTANT, // Property with constant value (compile time).
COMPUTED, // Property with computed value (execution time).
MATERIALIZED_LITERAL, // Property value is a materialized literal.
- GETTER, SETTER, // Property is an accessor function.
- PROTOTYPE // Property is __proto__.
+ GETTER,
+ SETTER, // Property is an accessor function.
+ PROTOTYPE // Property is __proto__.
};
Expression* key() { return key_; }
@@ -1430,7 +1326,7 @@ class ObjectLiteralProperty final : public ZoneObject {
bool NeedsSetFunctionName() const;
- protected:
+ private:
friend class AstNodeFactory;
ObjectLiteralProperty(Expression* key, Expression* value, Kind kind,
@@ -1439,7 +1335,6 @@ class ObjectLiteralProperty final : public ZoneObject {
Expression* value, bool is_static,
bool is_computed_name);
- private:
Expression* key_;
Expression* value_;
FeedbackVectorSlot slots_[2];
@@ -1457,12 +1352,10 @@ class ObjectLiteral final : public MaterializedLiteral {
public:
typedef ObjectLiteralProperty Property;
- DECLARE_NODE_TYPE(ObjectLiteral)
-
Handle<FixedArray> constant_properties() const {
return constant_properties_;
}
- int properties_count() const { return constant_properties_->length() / 2; }
+ int properties_count() const { return boilerplate_properties_; }
ZoneList<Property*>* properties() const { return properties_; }
bool fast_elements() const { return fast_elements_; }
bool may_store_doubles() const { return may_store_doubles_; }
@@ -1502,9 +1395,10 @@ class ObjectLiteral final : public MaterializedLiteral {
};
struct Accessors: public ZoneObject {
- Accessors() : getter(NULL), setter(NULL) {}
+ Accessors() : getter(NULL), setter(NULL), bailout_id(BailoutId::None()) {}
ObjectLiteralProperty* getter;
ObjectLiteralProperty* setter;
+ BailoutId bailout_id;
};
BailoutId CreateLiteralId() const { return BailoutId(local_id(0)); }
@@ -1526,39 +1420,42 @@ class ObjectLiteral final : public MaterializedLiteral {
// Object literals need one feedback slot for each non-trivial value, as well
// as some slots for home objects.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache) override;
+ FeedbackVectorSlotCache* cache);
- protected:
- ObjectLiteral(Zone* zone, ZoneList<Property*>* properties, int literal_index,
- int boilerplate_properties, int pos)
- : MaterializedLiteral(zone, literal_index, pos),
- properties_(properties),
+ private:
+ friend class AstNodeFactory;
+
+ ObjectLiteral(ZoneList<Property*>* properties, int literal_index,
+ uint32_t boilerplate_properties, int pos)
+ : MaterializedLiteral(literal_index, pos, kObjectLiteral),
boilerplate_properties_(boilerplate_properties),
fast_elements_(false),
has_elements_(false),
- may_store_doubles_(false) {}
- static int parent_num_ids() { return MaterializedLiteral::num_ids(); }
+ may_store_doubles_(false),
+ properties_(properties) {}
- private:
+ static int parent_num_ids() { return MaterializedLiteral::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
+
+ uint32_t boilerplate_properties_ : 29;
+ bool fast_elements_ : 1;
+ bool has_elements_ : 1;
+ bool may_store_doubles_ : 1;
+ FeedbackVectorSlot slot_;
Handle<FixedArray> constant_properties_;
ZoneList<Property*>* properties_;
- int boilerplate_properties_;
- bool fast_elements_;
- bool has_elements_;
- bool may_store_doubles_;
- FeedbackVectorSlot slot_;
};
// A map from property names to getter/setter pairs allocated in the zone.
-class AccessorTable : public TemplateHashMap<Literal, ObjectLiteral::Accessors,
- ZoneAllocationPolicy> {
+class AccessorTable
+ : public base::TemplateHashMap<Literal, ObjectLiteral::Accessors,
+ ZoneAllocationPolicy> {
public:
explicit AccessorTable(Zone* zone)
- : TemplateHashMap<Literal, ObjectLiteral::Accessors,
- ZoneAllocationPolicy>(Literal::Match,
- ZoneAllocationPolicy(zone)),
+ : base::TemplateHashMap<Literal, ObjectLiteral::Accessors,
+ ZoneAllocationPolicy>(Literal::Match,
+ ZoneAllocationPolicy(zone)),
zone_(zone) {}
Iterator lookup(Literal* literal) {
@@ -1575,23 +1472,22 @@ class AccessorTable : public TemplateHashMap<Literal, ObjectLiteral::Accessors,
// Node for capturing a regexp literal.
class RegExpLiteral final : public MaterializedLiteral {
public:
- DECLARE_NODE_TYPE(RegExpLiteral)
-
Handle<String> pattern() const { return pattern_->string(); }
int flags() const { return flags_; }
- protected:
- RegExpLiteral(Zone* zone, const AstRawString* pattern, int flags,
- int literal_index, int pos)
- : MaterializedLiteral(zone, literal_index, pos),
- pattern_(pattern),
- flags_(flags) {
+ private:
+ friend class AstNodeFactory;
+
+ RegExpLiteral(const AstRawString* pattern, int flags, int literal_index,
+ int pos)
+ : MaterializedLiteral(literal_index, pos, kRegExpLiteral),
+ flags_(flags),
+ pattern_(pattern) {
set_depth(1);
}
- private:
- const AstRawString* const pattern_;
int const flags_;
+ const AstRawString* const pattern_;
};
@@ -1599,8 +1495,6 @@ class RegExpLiteral final : public MaterializedLiteral {
// for minimizing the work when constructing it at runtime.
class ArrayLiteral final : public MaterializedLiteral {
public:
- DECLARE_NODE_TYPE(ArrayLiteral)
-
Handle<FixedArray> constant_elements() const { return constant_elements_; }
ElementsKind constant_elements_kind() const {
DCHECK_EQ(2, constant_elements_->length());
@@ -1651,37 +1545,34 @@ class ArrayLiteral final : public MaterializedLiteral {
};
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache) override;
+ FeedbackVectorSlotCache* cache);
FeedbackVectorSlot LiteralFeedbackSlot() const { return literal_slot_; }
- protected:
- ArrayLiteral(Zone* zone, ZoneList<Expression*>* values,
- int first_spread_index, int literal_index, int pos)
- : MaterializedLiteral(zone, literal_index, pos),
- values_(values),
- first_spread_index_(first_spread_index) {}
- static int parent_num_ids() { return MaterializedLiteral::num_ids(); }
-
private:
+ friend class AstNodeFactory;
+
+ ArrayLiteral(ZoneList<Expression*>* values, int first_spread_index,
+ int literal_index, int pos)
+ : MaterializedLiteral(literal_index, pos, kArrayLiteral),
+ first_spread_index_(first_spread_index),
+ values_(values) {}
+
+ static int parent_num_ids() { return MaterializedLiteral::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
- Handle<FixedArray> constant_elements_;
- ZoneList<Expression*>* values_;
int first_spread_index_;
FeedbackVectorSlot literal_slot_;
+ Handle<FixedArray> constant_elements_;
+ ZoneList<Expression*>* values_;
};
class VariableProxy final : public Expression {
public:
- DECLARE_NODE_TYPE(VariableProxy)
-
- bool IsValidReferenceExpression() const override {
+ bool IsValidReferenceExpression() const {
return !is_this() && !is_new_target();
}
- bool IsArguments() const { return is_resolved() && var()->is_arguments(); }
-
Handle<String> name() const { return raw_name()->string(); }
const AstRawString* raw_name() const {
return is_resolved() ? var_->raw_name() : raw_name_;
@@ -1724,20 +1615,23 @@ class VariableProxy final : public Expression {
}
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache) override;
+ FeedbackVectorSlotCache* cache);
FeedbackVectorSlot VariableFeedbackSlot() { return variable_feedback_slot_; }
static int num_ids() { return parent_num_ids() + 1; }
BailoutId BeforeId() const { return BailoutId(local_id(0)); }
+ void set_next_unresolved(VariableProxy* next) { next_unresolved_ = next; }
+ VariableProxy* next_unresolved() { return next_unresolved_; }
- protected:
- VariableProxy(Zone* zone, Variable* var, int start_position,
- int end_position);
+ private:
+ friend class AstNodeFactory;
+
+ VariableProxy(Variable* var, int start_position, int end_position);
+ VariableProxy(const AstRawString* name, Variable::Kind variable_kind,
+ int start_position, int end_position);
+ explicit VariableProxy(const VariableProxy* copy_from);
- VariableProxy(Zone* zone, const AstRawString* name,
- Variable::Kind variable_kind, int start_position,
- int end_position);
static int parent_num_ids() { return Expression::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
@@ -1746,18 +1640,17 @@ class VariableProxy final : public Expression {
class IsResolvedField : public BitField8<bool, 2, 1> {};
class IsNewTargetField : public BitField8<bool, 3, 1> {};
- // Start with 16-bit (or smaller) field, which should get packed together
- // with Expression's trailing 16-bit field.
uint8_t bit_field_;
+ // Position is stored in the AstNode superclass, but VariableProxy needs to
+ // know its end position too (for error messages). It cannot be inferred from
+ // the variable name length because it can contain escapes.
+ int end_position_;
FeedbackVectorSlot variable_feedback_slot_;
union {
const AstRawString* raw_name_; // if !is_resolved_
Variable* var_; // if is_resolved_
};
- // Position is stored in the AstNode superclass, but VariableProxy needs to
- // know its end position too (for error messages). It cannot be inferred from
- // the variable name length because it can contain escapes.
- int end_position_;
+ VariableProxy* next_unresolved_;
};
@@ -1774,9 +1667,7 @@ enum LhsKind {
class Property final : public Expression {
public:
- DECLARE_NODE_TYPE(Property)
-
- bool IsValidReferenceExpression() const override { return true; }
+ bool IsValidReferenceExpression() const { return true; }
Expression* obj() const { return obj_; }
Expression* key() const { return key_; }
@@ -1792,12 +1683,10 @@ class Property final : public Expression {
}
// Type feedback information.
- bool IsMonomorphic() override { return receiver_types_.length() == 1; }
- SmallMapList* GetReceiverTypes() override { return &receiver_types_; }
- KeyedAccessStoreMode GetStoreMode() const override { return STANDARD_STORE; }
- IcCheckType GetKeyType() const override {
- return KeyTypeField::decode(bit_field_);
- }
+ bool IsMonomorphic() const { return receiver_types_.length() == 1; }
+ SmallMapList* GetReceiverTypes() { return &receiver_types_; }
+ KeyedAccessStoreMode GetStoreMode() const { return STANDARD_STORE; }
+ IcCheckType GetKeyType() const { return KeyTypeField::decode(bit_field_); }
bool IsUninitialized() const {
return !is_for_call() && HasNoTypeInformation();
}
@@ -1824,7 +1713,7 @@ class Property final : public Expression {
bool IsSuperAccess() { return obj()->IsSuperPropertyReference(); }
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache) override {
+ FeedbackVectorSlotCache* cache) {
FeedbackVectorSlotKind kind = key()->IsPropertyName()
? FeedbackVectorSlotKind::LOAD_IC
: FeedbackVectorSlotKind::KEYED_LOAD_IC;
@@ -1835,6 +1724,7 @@ class Property final : public Expression {
return property_feedback_slot_;
}
+ // Returns the properties assign type.
static LhsKind GetAssignType(Property* property) {
if (property == NULL) return VARIABLE;
bool super_access = property->IsSuperAccess();
@@ -1843,23 +1733,25 @@ class Property final : public Expression {
: (super_access ? KEYED_SUPER_PROPERTY : KEYED_PROPERTY);
}
- protected:
- Property(Zone* zone, Expression* obj, Expression* key, int pos)
- : Expression(zone, pos),
+ private:
+ friend class AstNodeFactory;
+
+ Property(Expression* obj, Expression* key, int pos)
+ : Expression(pos, kProperty),
bit_field_(IsForCallField::encode(false) |
IsStringAccessField::encode(false) |
InlineCacheStateField::encode(UNINITIALIZED)),
obj_(obj),
key_(key) {}
- static int parent_num_ids() { return Expression::num_ids(); }
- private:
+ static int parent_num_ids() { return Expression::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
class IsForCallField : public BitField8<bool, 0, 1> {};
class IsStringAccessField : public BitField8<bool, 1, 1> {};
class KeyTypeField : public BitField8<IcCheckType, 2, 1> {};
class InlineCacheStateField : public BitField8<InlineCacheState, 3, 4> {};
+
uint8_t bit_field_;
FeedbackVectorSlot property_feedback_slot_;
Expression* obj_;
@@ -1870,8 +1762,6 @@ class Property final : public Expression {
class Call final : public Expression {
public:
- DECLARE_NODE_TYPE(Call)
-
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
@@ -1879,20 +1769,20 @@ class Call final : public Expression {
// Type feedback information.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache) override;
+ FeedbackVectorSlotCache* cache);
FeedbackVectorSlot CallFeedbackSlot() const { return stub_slot_; }
FeedbackVectorSlot CallFeedbackICSlot() const { return ic_slot_; }
- SmallMapList* GetReceiverTypes() override {
+ SmallMapList* GetReceiverTypes() {
if (expression()->IsProperty()) {
return expression()->AsProperty()->GetReceiverTypes();
}
- return NULL;
+ return nullptr;
}
- bool IsMonomorphic() override {
+ bool IsMonomorphic() const {
if (expression()->IsProperty()) {
return expression()->AsProperty()->IsMonomorphic();
}
@@ -1934,13 +1824,15 @@ class Call final : public Expression {
bit_field_ = IsUninitializedField::update(bit_field_, b);
}
+ bool is_possibly_eval() const {
+ return IsPossiblyEvalField::decode(bit_field_);
+ }
+
TailCallMode tail_call_mode() const {
return IsTailField::decode(bit_field_) ? TailCallMode::kAllow
: TailCallMode::kDisallow;
}
- void MarkTail() override {
- bit_field_ = IsTailField::update(bit_field_, true);
- }
+ void MarkTail() { bit_field_ = IsTailField::update(bit_field_, true); }
enum CallType {
POSSIBLY_EVAL_CALL,
@@ -1954,48 +1846,56 @@ class Call final : public Expression {
OTHER_CALL
};
+ enum PossiblyEval {
+ IS_POSSIBLY_EVAL,
+ NOT_EVAL,
+ };
+
// Helpers to determine how to handle the call.
- CallType GetCallType(Isolate* isolate) const;
- bool IsUsingCallFeedbackSlot(Isolate* isolate) const;
- bool IsUsingCallFeedbackICSlot(Isolate* isolate) const;
+ CallType GetCallType() const;
+ bool IsUsingCallFeedbackSlot() const;
+ bool IsUsingCallFeedbackICSlot() const;
#ifdef DEBUG
// Used to assert that the FullCodeGenerator records the return site.
bool return_is_recorded_;
#endif
- protected:
- Call(Zone* zone, Expression* expression, ZoneList<Expression*>* arguments,
- int pos)
- : Expression(zone, pos),
+ private:
+ friend class AstNodeFactory;
+
+ Call(Expression* expression, ZoneList<Expression*>* arguments, int pos,
+ PossiblyEval possibly_eval)
+ : Expression(pos, kCall),
+ bit_field_(
+ IsUninitializedField::encode(false) |
+ IsPossiblyEvalField::encode(possibly_eval == IS_POSSIBLY_EVAL)),
expression_(expression),
- arguments_(arguments),
- bit_field_(IsUninitializedField::encode(false)) {
+ arguments_(arguments) {
if (expression->IsProperty()) {
expression->AsProperty()->mark_for_call();
}
}
- static int parent_num_ids() { return Expression::num_ids(); }
- private:
+ static int parent_num_ids() { return Expression::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
+ class IsUninitializedField : public BitField8<bool, 0, 1> {};
+ class IsTailField : public BitField8<bool, 1, 1> {};
+ class IsPossiblyEvalField : public BitField8<bool, 2, 1> {};
+
+ uint8_t bit_field_;
FeedbackVectorSlot ic_slot_;
FeedbackVectorSlot stub_slot_;
Expression* expression_;
ZoneList<Expression*>* arguments_;
Handle<JSFunction> target_;
Handle<AllocationSite> allocation_site_;
- class IsUninitializedField : public BitField8<bool, 0, 1> {};
- class IsTailField : public BitField8<bool, 1, 1> {};
- uint8_t bit_field_;
};
class CallNew final : public Expression {
public:
- DECLARE_NODE_TYPE(CallNew)
-
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
@@ -2003,8 +1903,11 @@ class CallNew final : public Expression {
// Type feedback information.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache) override {
+ FeedbackVectorSlotCache* cache) {
callnew_feedback_slot_ = spec->AddGeneralSlot();
+ // Construct calls have two slots, one right after the other.
+ // The second slot stores the call count for monomorphic calls.
+ spec->AddGeneralSlot();
}
FeedbackVectorSlot CallNewFeedbackSlot() {
@@ -2012,7 +1915,7 @@ class CallNew final : public Expression {
return callnew_feedback_slot_;
}
- bool IsMonomorphic() override { return is_monomorphic_; }
+ bool IsMonomorphic() const { return is_monomorphic_; }
Handle<JSFunction> target() const { return target_; }
Handle<AllocationSite> allocation_site() const {
return allocation_site_;
@@ -2032,25 +1935,24 @@ class CallNew final : public Expression {
is_monomorphic_ = true;
}
- protected:
- CallNew(Zone* zone, Expression* expression, ZoneList<Expression*>* arguments,
- int pos)
- : Expression(zone, pos),
+ private:
+ friend class AstNodeFactory;
+
+ CallNew(Expression* expression, ZoneList<Expression*>* arguments, int pos)
+ : Expression(pos, kCallNew),
+ is_monomorphic_(false),
expression_(expression),
- arguments_(arguments),
- is_monomorphic_(false) {}
+ arguments_(arguments) {}
static int parent_num_ids() { return Expression::num_ids(); }
-
- private:
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
+ bool is_monomorphic_;
+ FeedbackVectorSlot callnew_feedback_slot_;
Expression* expression_;
ZoneList<Expression*>* arguments_;
- bool is_monomorphic_;
Handle<JSFunction> target_;
Handle<AllocationSite> allocation_site_;
- FeedbackVectorSlot callnew_feedback_slot_;
};
@@ -2060,8 +1962,6 @@ class CallNew final : public Expression {
// implemented in JavaScript (see "v8natives.js").
class CallRuntime final : public Expression {
public:
- DECLARE_NODE_TYPE(CallRuntime)
-
ZoneList<Expression*>* arguments() const { return arguments_; }
bool is_jsruntime() const { return function_ == NULL; }
@@ -2081,33 +1981,31 @@ class CallRuntime final : public Expression {
return is_jsruntime() ? "(context function)" : function_->name;
}
- protected:
- CallRuntime(Zone* zone, const Runtime::Function* function,
- ZoneList<Expression*>* arguments, int pos)
- : Expression(zone, pos), function_(function), arguments_(arguments) {}
+ private:
+ friend class AstNodeFactory;
- CallRuntime(Zone* zone, int context_index, ZoneList<Expression*>* arguments,
- int pos)
- : Expression(zone, pos),
- function_(NULL),
+ CallRuntime(const Runtime::Function* function,
+ ZoneList<Expression*>* arguments, int pos)
+ : Expression(pos, kCallRuntime),
+ function_(function),
+ arguments_(arguments) {}
+ CallRuntime(int context_index, ZoneList<Expression*>* arguments, int pos)
+ : Expression(pos, kCallRuntime),
context_index_(context_index),
+ function_(NULL),
arguments_(arguments) {}
static int parent_num_ids() { return Expression::num_ids(); }
-
- private:
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
- const Runtime::Function* function_;
int context_index_;
+ const Runtime::Function* function_;
ZoneList<Expression*>* arguments_;
};
class UnaryOperation final : public Expression {
public:
- DECLARE_NODE_TYPE(UnaryOperation)
-
Token::Value op() const { return op_; }
Expression* expression() const { return expression_; }
void set_expression(Expression* e) { expression_ = e; }
@@ -2118,16 +2016,17 @@ class UnaryOperation final : public Expression {
BailoutId MaterializeTrueId() const { return BailoutId(local_id(0)); }
BailoutId MaterializeFalseId() const { return BailoutId(local_id(1)); }
- void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) override;
+ void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle);
- protected:
- UnaryOperation(Zone* zone, Token::Value op, Expression* expression, int pos)
- : Expression(zone, pos), op_(op), expression_(expression) {
+ private:
+ friend class AstNodeFactory;
+
+ UnaryOperation(Token::Value op, Expression* expression, int pos)
+ : Expression(pos, kUnaryOperation), op_(op), expression_(expression) {
DCHECK(Token::IsUnaryOp(op));
}
- static int parent_num_ids() { return Expression::num_ids(); }
- private:
+ static int parent_num_ids() { return Expression::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
Token::Value op_;
@@ -2137,8 +2036,6 @@ class UnaryOperation final : public Expression {
class BinaryOperation final : public Expression {
public:
- DECLARE_NODE_TYPE(BinaryOperation)
-
Token::Value op() const { return static_cast<Token::Value>(op_); }
Expression* left() const { return left_; }
void set_left(Expression* e) { left_ = e; }
@@ -2149,7 +2046,7 @@ class BinaryOperation final : public Expression {
allocation_site_ = allocation_site;
}
- void MarkTail() override {
+ void MarkTail() {
switch (op()) {
case Token::COMMA:
case Token::AND:
@@ -2165,6 +2062,16 @@ class BinaryOperation final : public Expression {
static int num_ids() { return parent_num_ids() + 2; }
BailoutId RightId() const { return BailoutId(local_id(0)); }
+ // BinaryOperation will have both a slot in the feedback vector and the
+ // TypeFeedbackId to record the type information. TypeFeedbackId is used
+ // by full codegen and the feedback vector slot is used by interpreter.
+ void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+ FeedbackVectorSlotCache* cache);
+
+ FeedbackVectorSlot BinaryOperationFeedbackSlot() const {
+ return type_feedback_slot_;
+ }
+
TypeFeedbackId BinaryOperationFeedbackId() const {
return TypeFeedbackId(local_id(1));
}
@@ -2176,12 +2083,13 @@ class BinaryOperation final : public Expression {
if (arg.IsJust()) fixed_right_arg_value_ = arg.FromJust();
}
- void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) override;
+ void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle);
- protected:
- BinaryOperation(Zone* zone, Token::Value op, Expression* left,
- Expression* right, int pos)
- : Expression(zone, pos),
+ private:
+ friend class AstNodeFactory;
+
+ BinaryOperation(Token::Value op, Expression* left, Expression* right, int pos)
+ : Expression(pos, kBinaryOperation),
op_(static_cast<byte>(op)),
has_fixed_right_arg_(false),
fixed_right_arg_value_(0),
@@ -2189,9 +2097,8 @@ class BinaryOperation final : public Expression {
right_(right) {
DCHECK(Token::IsBinaryOp(op));
}
- static int parent_num_ids() { return Expression::num_ids(); }
- private:
+ static int parent_num_ids() { return Expression::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
const byte op_; // actually Token::Value
@@ -2202,13 +2109,12 @@ class BinaryOperation final : public Expression {
Expression* left_;
Expression* right_;
Handle<AllocationSite> allocation_site_;
+ FeedbackVectorSlot type_feedback_slot_;
};
class CountOperation final : public Expression {
public:
- DECLARE_NODE_TYPE(CountOperation)
-
bool is_prefix() const { return IsPrefixField::decode(bit_field_); }
bool is_postfix() const { return !is_prefix(); }
@@ -2220,12 +2126,10 @@ class CountOperation final : public Expression {
Expression* expression() const { return expression_; }
void set_expression(Expression* e) { expression_ = e; }
- bool IsMonomorphic() override { return receiver_types_.length() == 1; }
- SmallMapList* GetReceiverTypes() override { return &receiver_types_; }
- IcCheckType GetKeyType() const override {
- return KeyTypeField::decode(bit_field_);
- }
- KeyedAccessStoreMode GetStoreMode() const override {
+ bool IsMonomorphic() const { return receiver_types_.length() == 1; }
+ SmallMapList* GetReceiverTypes() { return &receiver_types_; }
+ IcCheckType GetKeyType() const { return KeyTypeField::decode(bit_field_); }
+ KeyedAccessStoreMode GetStoreMode() const {
return StoreModeField::decode(bit_field_);
}
Type* type() const { return type_; }
@@ -2247,22 +2151,27 @@ class CountOperation final : public Expression {
return TypeFeedbackId(local_id(3));
}
+ // Feedback slot for binary operation is only used by ignition.
+ FeedbackVectorSlot CountBinaryOpFeedbackSlot() const {
+ return binary_operation_slot_;
+ }
+
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache) override;
+ FeedbackVectorSlotCache* cache);
FeedbackVectorSlot CountSlot() const { return slot_; }
- protected:
- CountOperation(Zone* zone, Token::Value op, bool is_prefix, Expression* expr,
- int pos)
- : Expression(zone, pos),
+ private:
+ friend class AstNodeFactory;
+
+ CountOperation(Token::Value op, bool is_prefix, Expression* expr, int pos)
+ : Expression(pos, kCountOperation),
bit_field_(
IsPrefixField::encode(is_prefix) | KeyTypeField::encode(ELEMENT) |
StoreModeField::encode(STANDARD_STORE) | TokenField::encode(op)),
type_(NULL),
expression_(expr) {}
- static int parent_num_ids() { return Expression::num_ids(); }
- private:
+ static int parent_num_ids() { return Expression::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
class IsPrefixField : public BitField16<bool, 0, 1> {};
@@ -2273,17 +2182,16 @@ class CountOperation final : public Expression {
// Starts with 16-bit field, which should get packed together with
// Expression's trailing 16-bit field.
uint16_t bit_field_;
+ FeedbackVectorSlot slot_;
+ FeedbackVectorSlot binary_operation_slot_;
Type* type_;
Expression* expression_;
SmallMapList receiver_types_;
- FeedbackVectorSlot slot_;
};
class CompareOperation final : public Expression {
public:
- DECLARE_NODE_TYPE(CompareOperation)
-
Token::Value op() const { return op_; }
Expression* left() const { return left_; }
Expression* right() const { return right_; }
@@ -2304,19 +2212,20 @@ class CompareOperation final : public Expression {
bool IsLiteralCompareUndefined(Expression** expr);
bool IsLiteralCompareNull(Expression** expr);
- protected:
- CompareOperation(Zone* zone, Token::Value op, Expression* left,
- Expression* right, int pos)
- : Expression(zone, pos),
+ private:
+ friend class AstNodeFactory;
+
+ CompareOperation(Token::Value op, Expression* left, Expression* right,
+ int pos)
+ : Expression(pos, kCompareOperation),
op_(op),
left_(left),
right_(right),
combined_type_(Type::None()) {
DCHECK(Token::IsCompareOp(op));
}
- static int parent_num_ids() { return Expression::num_ids(); }
- private:
+ static int parent_num_ids() { return Expression::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
Token::Value op_;
@@ -2329,8 +2238,6 @@ class CompareOperation final : public Expression {
class Spread final : public Expression {
public:
- DECLARE_NODE_TYPE(Spread)
-
Expression* expression() const { return expression_; }
void set_expression(Expression* e) { expression_ = e; }
@@ -2338,23 +2245,24 @@ class Spread final : public Expression {
static int num_ids() { return parent_num_ids(); }
- protected:
- Spread(Zone* zone, Expression* expression, int pos, int expr_pos)
- : Expression(zone, pos), expression_(expression), expr_pos_(expr_pos) {}
- static int parent_num_ids() { return Expression::num_ids(); }
-
private:
+ friend class AstNodeFactory;
+
+ Spread(Expression* expression, int pos, int expr_pos)
+ : Expression(pos, kSpread),
+ expr_pos_(expr_pos),
+ expression_(expression) {}
+
+ static int parent_num_ids() { return Expression::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
- Expression* expression_;
int expr_pos_;
+ Expression* expression_;
};
class Conditional final : public Expression {
public:
- DECLARE_NODE_TYPE(Conditional)
-
Expression* condition() const { return condition_; }
Expression* then_expression() const { return then_expression_; }
Expression* else_expression() const { return else_expression_; }
@@ -2363,7 +2271,7 @@ class Conditional final : public Expression {
void set_then_expression(Expression* e) { then_expression_ = e; }
void set_else_expression(Expression* e) { else_expression_ = e; }
- void MarkTail() override {
+ void MarkTail() {
then_expression_->MarkTail();
else_expression_->MarkTail();
}
@@ -2372,16 +2280,17 @@ class Conditional final : public Expression {
BailoutId ThenId() const { return BailoutId(local_id(0)); }
BailoutId ElseId() const { return BailoutId(local_id(1)); }
- protected:
- Conditional(Zone* zone, Expression* condition, Expression* then_expression,
+ private:
+ friend class AstNodeFactory;
+
+ Conditional(Expression* condition, Expression* then_expression,
Expression* else_expression, int position)
- : Expression(zone, position),
+ : Expression(position, kConditional),
condition_(condition),
then_expression_(then_expression),
else_expression_(else_expression) {}
- static int parent_num_ids() { return Expression::num_ids(); }
- private:
+ static int parent_num_ids() { return Expression::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
Expression* condition_;
@@ -2392,8 +2301,6 @@ class Conditional final : public Expression {
class Assignment final : public Expression {
public:
- DECLARE_NODE_TYPE(Assignment)
-
Assignment* AsSimpleAssignment() { return !is_compound() ? this : NULL; }
Token::Value binary_op() const;
@@ -2415,18 +2322,16 @@ class Assignment final : public Expression {
// Type feedback information.
TypeFeedbackId AssignmentFeedbackId() { return TypeFeedbackId(local_id(1)); }
- bool IsMonomorphic() override { return receiver_types_.length() == 1; }
bool IsUninitialized() const {
return IsUninitializedField::decode(bit_field_);
}
bool HasNoTypeInformation() {
return IsUninitializedField::decode(bit_field_);
}
- SmallMapList* GetReceiverTypes() override { return &receiver_types_; }
- IcCheckType GetKeyType() const override {
- return KeyTypeField::decode(bit_field_);
- }
- KeyedAccessStoreMode GetStoreMode() const override {
+ bool IsMonomorphic() const { return receiver_types_.length() == 1; }
+ SmallMapList* GetReceiverTypes() { return &receiver_types_; }
+ IcCheckType GetKeyType() const { return KeyTypeField::decode(bit_field_); }
+ KeyedAccessStoreMode GetStoreMode() const {
return StoreModeField::decode(bit_field_);
}
void set_is_uninitialized(bool b) {
@@ -2440,15 +2345,15 @@ class Assignment final : public Expression {
}
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache) override;
+ FeedbackVectorSlotCache* cache);
FeedbackVectorSlot AssignmentSlot() const { return slot_; }
- protected:
- Assignment(Zone* zone, Token::Value op, Expression* target, Expression* value,
- int pos);
- static int parent_num_ids() { return Expression::num_ids(); }
-
private:
+ friend class AstNodeFactory;
+
+ Assignment(Token::Value op, Expression* target, Expression* value, int pos);
+
+ static int parent_num_ids() { return Expression::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
class IsUninitializedField : public BitField16<bool, 0, 1> {};
@@ -2462,11 +2367,11 @@ class Assignment final : public Expression {
// Starts with 16-bit field, which should get packed together with
// Expression's trailing 16-bit field.
uint16_t bit_field_;
+ FeedbackVectorSlot slot_;
Expression* target_;
Expression* value_;
BinaryOperation* binary_operation_;
SmallMapList receiver_types_;
- FeedbackVectorSlot slot_;
};
@@ -2485,10 +2390,8 @@ class Assignment final : public Expression {
//
// Furthermore, an invariant that should be respected is that the wrapped
// node is not a RewritableExpression.
-class RewritableExpression : public Expression {
+class RewritableExpression final : public Expression {
public:
- DECLARE_NODE_TYPE(RewritableExpression)
-
Expression* expression() const { return expr_; }
bool is_rewritten() const { return is_rewritten_; }
@@ -2502,15 +2405,16 @@ class RewritableExpression : public Expression {
static int num_ids() { return parent_num_ids(); }
- protected:
- RewritableExpression(Zone* zone, Expression* expression)
- : Expression(zone, expression->position()),
+ private:
+ friend class AstNodeFactory;
+
+ explicit RewritableExpression(Expression* expression)
+ : Expression(expression->position(), kRewritableExpression),
is_rewritten_(false),
expr_(expression) {
DCHECK(!expression->IsRewritableExpression());
}
- private:
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
bool is_rewritten_;
@@ -2522,22 +2426,32 @@ class RewritableExpression : public Expression {
// desired, must be done beforehand (see the parser).
class Yield final : public Expression {
public:
- DECLARE_NODE_TYPE(Yield)
+ enum OnException { kOnExceptionThrow, kOnExceptionRethrow };
Expression* generator_object() const { return generator_object_; }
Expression* expression() const { return expression_; }
+ bool rethrow_on_exception() const {
+ return on_exception_ == kOnExceptionRethrow;
+ }
+ int yield_id() const { return yield_id_; }
void set_generator_object(Expression* e) { generator_object_ = e; }
void set_expression(Expression* e) { expression_ = e; }
+ void set_yield_id(int yield_id) { yield_id_ = yield_id; }
- protected:
- Yield(Zone* zone, Expression* generator_object, Expression* expression,
- int pos)
- : Expression(zone, pos),
+ private:
+ friend class AstNodeFactory;
+
+ Yield(Expression* generator_object, Expression* expression, int pos,
+ OnException on_exception)
+ : Expression(pos, kYield),
+ on_exception_(on_exception),
+ yield_id_(-1),
generator_object_(generator_object),
expression_(expression) {}
- private:
+ OnException on_exception_;
+ int yield_id_;
Expression* generator_object_;
Expression* expression_;
};
@@ -2545,16 +2459,15 @@ class Yield final : public Expression {
class Throw final : public Expression {
public:
- DECLARE_NODE_TYPE(Throw)
-
Expression* exception() const { return exception_; }
void set_exception(Expression* e) { exception_ = e; }
- protected:
- Throw(Zone* zone, Expression* exception, int pos)
- : Expression(zone, pos), exception_(exception) {}
-
private:
+ friend class AstNodeFactory;
+
+ Throw(Expression* exception, int pos)
+ : Expression(pos, kThrow), exception_(exception) {}
+
Expression* exception_;
};
@@ -2572,24 +2485,22 @@ class FunctionLiteral final : public Expression {
enum EagerCompileHint { kShouldEagerCompile, kShouldLazyCompile };
- DECLARE_NODE_TYPE(FunctionLiteral)
-
Handle<String> name() const { return raw_name_->string(); }
const AstString* raw_name() const { return raw_name_; }
void set_raw_name(const AstString* name) { raw_name_ = name; }
- Scope* scope() const { return scope_; }
+ DeclarationScope* scope() const { return scope_; }
ZoneList<Statement*>* body() const { return body_; }
void set_function_token_position(int pos) { function_token_position_ = pos; }
int function_token_position() const { return function_token_position_; }
int start_position() const;
int end_position() const;
int SourceSize() const { return end_position() - start_position(); }
- bool is_declaration() const { return IsDeclaration::decode(bitfield_); }
+ bool is_declaration() const { return function_type() == kDeclaration; }
bool is_named_expression() const {
- return IsNamedExpression::decode(bitfield_);
+ return function_type() == kNamedExpression;
}
bool is_anonymous_expression() const {
- return IsAnonymousExpression::decode(bitfield_);
+ return function_type() == kAnonymousExpression;
}
LanguageMode language_mode() const;
@@ -2666,6 +2577,9 @@ class FunctionLiteral final : public Expression {
bitfield_ = ShouldBeUsedOnceHint::update(bitfield_, true);
}
+ FunctionType function_type() const {
+ return FunctionTypeBits::decode(bitfield_);
+ }
FunctionKind kind() const { return FunctionKindBits::decode(bitfield_); }
int ast_node_count() { return ast_properties_.node_count(); }
@@ -2682,35 +2596,38 @@ class FunctionLiteral final : public Expression {
dont_optimize_reason_ = reason;
}
- bool IsAnonymousFunctionDefinition() const final {
+ bool IsAnonymousFunctionDefinition() const {
return is_anonymous_expression();
}
- protected:
+ int yield_count() { return yield_count_; }
+ void set_yield_count(int yield_count) { yield_count_ = yield_count; }
+
+ private:
+ friend class AstNodeFactory;
+
FunctionLiteral(Zone* zone, const AstString* name,
- AstValueFactory* ast_value_factory, Scope* scope,
+ AstValueFactory* ast_value_factory, DeclarationScope* scope,
ZoneList<Statement*>* body, int materialized_literal_count,
int expected_property_count, int parameter_count,
FunctionType function_type,
ParameterFlag has_duplicate_parameters,
EagerCompileHint eager_compile_hint, FunctionKind kind,
int position, bool is_function)
- : Expression(zone, position),
- raw_name_(name),
- scope_(scope),
- body_(body),
- raw_inferred_name_(ast_value_factory->empty_string()),
- ast_properties_(zone),
+ : Expression(position, kFunctionLiteral),
dont_optimize_reason_(kNoReason),
materialized_literal_count_(materialized_literal_count),
expected_property_count_(expected_property_count),
parameter_count_(parameter_count),
- function_token_position_(RelocInfo::kNoPosition) {
+ function_token_position_(kNoSourcePosition),
+ yield_count_(0),
+ raw_name_(name),
+ scope_(scope),
+ body_(body),
+ raw_inferred_name_(ast_value_factory->empty_string()),
+ ast_properties_(zone) {
bitfield_ =
- IsDeclaration::encode(function_type == kDeclaration) |
- IsNamedExpression::encode(function_type == kNamedExpression) |
- IsAnonymousExpression::encode(function_type == kAnonymousExpression) |
- Pretenure::encode(false) |
+ FunctionTypeBits::encode(function_type) | Pretenure::encode(false) |
HasDuplicateParameters::encode(has_duplicate_parameters ==
kHasDuplicateParameters) |
IsFunction::encode(is_function) |
@@ -2719,33 +2636,32 @@ class FunctionLiteral final : public Expression {
DCHECK(IsValidFunctionKind(kind));
}
- private:
- class IsDeclaration : public BitField16<bool, 0, 1> {};
- class IsNamedExpression : public BitField16<bool, 1, 1> {};
- class IsAnonymousExpression : public BitField16<bool, 2, 1> {};
- class Pretenure : public BitField16<bool, 3, 1> {};
- class HasDuplicateParameters : public BitField16<bool, 4, 1> {};
- class IsFunction : public BitField16<bool, 5, 1> {};
- class ShouldEagerCompile : public BitField16<bool, 6, 1> {};
- class ShouldBeUsedOnceHint : public BitField16<bool, 7, 1> {};
- class FunctionKindBits : public BitField16<FunctionKind, 8, 8> {};
+ class FunctionTypeBits : public BitField16<FunctionType, 0, 2> {};
+ class Pretenure : public BitField16<bool, 2, 1> {};
+ class HasDuplicateParameters : public BitField16<bool, 3, 1> {};
+ class IsFunction : public BitField16<bool, 4, 1> {};
+ class ShouldEagerCompile : public BitField16<bool, 5, 1> {};
+ class ShouldBeUsedOnceHint : public BitField16<bool, 6, 1> {};
+ class FunctionKindBits : public BitField16<FunctionKind, 7, 9> {};
// Start with 16-bit field, which should get packed together
// with Expression's trailing 16-bit field.
uint16_t bitfield_;
- const AstString* raw_name_;
- Scope* scope_;
- ZoneList<Statement*>* body_;
- const AstString* raw_inferred_name_;
- Handle<String> inferred_name_;
- AstProperties ast_properties_;
BailoutReason dont_optimize_reason_;
int materialized_literal_count_;
int expected_property_count_;
int parameter_count_;
int function_token_position_;
+ int yield_count_;
+
+ const AstString* raw_name_;
+ DeclarationScope* scope_;
+ ZoneList<Statement*>* body_;
+ const AstString* raw_inferred_name_;
+ Handle<String> inferred_name_;
+ AstProperties ast_properties_;
};
@@ -2753,9 +2669,6 @@ class ClassLiteral final : public Expression {
public:
typedef ObjectLiteralProperty Property;
- DECLARE_NODE_TYPE(ClassLiteral)
-
- Scope* scope() const { return scope_; }
VariableProxy* class_variable_proxy() const { return class_variable_proxy_; }
Expression* extends() const { return extends_; }
void set_extends(Expression* e) { extends_ = e; }
@@ -2765,23 +2678,20 @@ class ClassLiteral final : public Expression {
int start_position() const { return position(); }
int end_position() const { return end_position_; }
- BailoutId EntryId() const { return BailoutId(local_id(0)); }
- BailoutId DeclsId() const { return BailoutId(local_id(1)); }
- BailoutId ExitId() { return BailoutId(local_id(2)); }
- BailoutId CreateLiteralId() const { return BailoutId(local_id(3)); }
- BailoutId PrototypeId() { return BailoutId(local_id(4)); }
+ BailoutId CreateLiteralId() const { return BailoutId(local_id(0)); }
+ BailoutId PrototypeId() { return BailoutId(local_id(1)); }
// Return an AST id for a property that is used in simulate instructions.
- BailoutId GetIdForProperty(int i) { return BailoutId(local_id(i + 5)); }
+ BailoutId GetIdForProperty(int i) { return BailoutId(local_id(i + 2)); }
// Unlike other AST nodes, this number of bailout IDs allocated for an
// ClassLiteral can vary, so num_ids() is not a static method.
- int num_ids() const { return parent_num_ids() + 5 + properties()->length(); }
+ int num_ids() const { return parent_num_ids() + 2 + properties()->length(); }
// Object literals need one feedback slot for each non-trivial value, as well
// as some slots for home objects.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache) override;
+ FeedbackVectorSlotCache* cache);
bool NeedsProxySlot() const {
return class_variable_proxy() != nullptr &&
@@ -2791,84 +2701,77 @@ class ClassLiteral final : public Expression {
FeedbackVectorSlot PrototypeSlot() const { return prototype_slot_; }
FeedbackVectorSlot ProxySlot() const { return proxy_slot_; }
- bool IsAnonymousFunctionDefinition() const final {
- return constructor()->raw_name()->length() == 0;
- }
+ private:
+ friend class AstNodeFactory;
- protected:
- ClassLiteral(Zone* zone, Scope* scope, VariableProxy* class_variable_proxy,
- Expression* extends, FunctionLiteral* constructor,
- ZoneList<Property*>* properties, int start_position,
- int end_position)
- : Expression(zone, start_position),
- scope_(scope),
+ ClassLiteral(VariableProxy* class_variable_proxy, Expression* extends,
+ FunctionLiteral* constructor, ZoneList<Property*>* properties,
+ int start_position, int end_position)
+ : Expression(start_position, kClassLiteral),
+ end_position_(end_position),
class_variable_proxy_(class_variable_proxy),
extends_(extends),
constructor_(constructor),
- properties_(properties),
- end_position_(end_position) {}
+ properties_(properties) {}
static int parent_num_ids() { return Expression::num_ids(); }
-
- private:
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
- Scope* scope_;
+ int end_position_;
+ FeedbackVectorSlot prototype_slot_;
+ FeedbackVectorSlot proxy_slot_;
VariableProxy* class_variable_proxy_;
Expression* extends_;
FunctionLiteral* constructor_;
ZoneList<Property*>* properties_;
- int end_position_;
- FeedbackVectorSlot prototype_slot_;
- FeedbackVectorSlot proxy_slot_;
};
class NativeFunctionLiteral final : public Expression {
public:
- DECLARE_NODE_TYPE(NativeFunctionLiteral)
-
Handle<String> name() const { return name_->string(); }
v8::Extension* extension() const { return extension_; }
- protected:
- NativeFunctionLiteral(Zone* zone, const AstRawString* name,
- v8::Extension* extension, int pos)
- : Expression(zone, pos), name_(name), extension_(extension) {}
-
private:
+ friend class AstNodeFactory;
+
+ NativeFunctionLiteral(const AstRawString* name, v8::Extension* extension,
+ int pos)
+ : Expression(pos, kNativeFunctionLiteral),
+ name_(name),
+ extension_(extension) {}
+
const AstRawString* name_;
v8::Extension* extension_;
};
class ThisFunction final : public Expression {
- public:
- DECLARE_NODE_TYPE(ThisFunction)
-
- protected:
- ThisFunction(Zone* zone, int pos) : Expression(zone, pos) {}
+ private:
+ friend class AstNodeFactory;
+ explicit ThisFunction(int pos) : Expression(pos, kThisFunction) {}
};
class SuperPropertyReference final : public Expression {
public:
- DECLARE_NODE_TYPE(SuperPropertyReference)
-
VariableProxy* this_var() const { return this_var_; }
void set_this_var(VariableProxy* v) { this_var_ = v; }
Expression* home_object() const { return home_object_; }
void set_home_object(Expression* e) { home_object_ = e; }
- protected:
- SuperPropertyReference(Zone* zone, VariableProxy* this_var,
- Expression* home_object, int pos)
- : Expression(zone, pos), this_var_(this_var), home_object_(home_object) {
+ private:
+ friend class AstNodeFactory;
+
+ SuperPropertyReference(VariableProxy* this_var, Expression* home_object,
+ int pos)
+ : Expression(pos, kSuperPropertyReference),
+ this_var_(this_var),
+ home_object_(home_object) {
DCHECK(this_var->is_this());
DCHECK(home_object->IsProperty());
}
- private:
VariableProxy* this_var_;
Expression* home_object_;
};
@@ -2876,8 +2779,6 @@ class SuperPropertyReference final : public Expression {
class SuperCallReference final : public Expression {
public:
- DECLARE_NODE_TYPE(SuperCallReference)
-
VariableProxy* this_var() const { return this_var_; }
void set_this_var(VariableProxy* v) { this_var_ = v; }
VariableProxy* new_target_var() const { return new_target_var_; }
@@ -2885,11 +2786,12 @@ class SuperCallReference final : public Expression {
VariableProxy* this_function_var() const { return this_function_var_; }
void set_this_function_var(VariableProxy* v) { this_function_var_ = v; }
- protected:
- SuperCallReference(Zone* zone, VariableProxy* this_var,
- VariableProxy* new_target_var,
+ private:
+ friend class AstNodeFactory;
+
+ SuperCallReference(VariableProxy* this_var, VariableProxy* new_target_var,
VariableProxy* this_function_var, int pos)
- : Expression(zone, pos),
+ : Expression(pos, kSuperCallReference),
this_var_(this_var),
new_target_var_(new_target_var),
this_function_var_(this_function_var) {
@@ -2898,7 +2800,6 @@ class SuperCallReference final : public Expression {
DCHECK(this_function_var->raw_name()->IsOneByteEqualTo(".this_function"));
}
- private:
VariableProxy* this_var_;
VariableProxy* new_target_var_;
VariableProxy* this_function_var_;
@@ -2908,45 +2809,71 @@ class SuperCallReference final : public Expression {
// This class is produced when parsing the () in arrow functions without any
// arguments and is not actually a valid expression.
class EmptyParentheses final : public Expression {
- public:
- DECLARE_NODE_TYPE(EmptyParentheses)
-
private:
- EmptyParentheses(Zone* zone, int pos) : Expression(zone, pos) {}
-};
+ friend class AstNodeFactory;
+ explicit EmptyParentheses(int pos) : Expression(pos, kEmptyParentheses) {}
+};
-#undef DECLARE_NODE_TYPE
// ----------------------------------------------------------------------------
// Basic visitor
-// - leaf node visitors are abstract.
+// Sub-class should parametrize AstVisitor with itself, e.g.:
+// class SpecificVisitor : public AstVisitor<SpecificVisitor> { ... }
+template <class Subclass>
class AstVisitor BASE_EMBEDDED {
public:
- AstVisitor() {}
- virtual ~AstVisitor() {}
+ void Visit(AstNode* node) { impl()->Visit(node); }
- // Stack overflow check and dynamic dispatch.
- virtual void Visit(AstNode* node) = 0;
+ void VisitDeclarations(ZoneList<Declaration*>* declarations) {
+ for (int i = 0; i < declarations->length(); i++) {
+ Visit(declarations->at(i));
+ }
+ }
+
+ void VisitStatements(ZoneList<Statement*>* statements) {
+ for (int i = 0; i < statements->length(); i++) {
+ Statement* stmt = statements->at(i);
+ Visit(stmt);
+ if (stmt->IsJump()) break;
+ }
+ }
- // Iteration left-to-right.
- virtual void VisitDeclarations(ZoneList<Declaration*>* declarations);
- virtual void VisitStatements(ZoneList<Statement*>* statements);
- virtual void VisitExpressions(ZoneList<Expression*>* expressions);
+ void VisitExpressions(ZoneList<Expression*>* expressions) {
+ for (int i = 0; i < expressions->length(); i++) {
+ // The variable statement visiting code may pass NULL expressions
+ // to this code. Maybe this should be handled by introducing an
+ // undefined expression or literal? Revisit this code if this
+ // changes
+ Expression* expression = expressions->at(i);
+ if (expression != NULL) Visit(expression);
+ }
+ }
- // Individual AST nodes.
-#define DEF_VISIT(type) \
- virtual void Visit##type(type* node) = 0;
- AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
+ protected:
+ Subclass* impl() { return static_cast<Subclass*>(this); }
};
+#define GENERATE_VISIT_CASE(NodeType) \
+ case AstNode::k##NodeType: \
+ return this->impl()->Visit##NodeType(static_cast<NodeType*>(node));
+
+#define GENERATE_AST_VISITOR_SWITCH() \
+ switch (node->node_type()) { \
+ AST_NODE_LIST(GENERATE_VISIT_CASE) \
+ }
+
#define DEFINE_AST_VISITOR_SUBCLASS_MEMBERS() \
public: \
- void Visit(AstNode* node) final { \
- if (!CheckStackOverflow()) node->Accept(this); \
+ void VisitNoStackOverflowCheck(AstNode* node) { \
+ GENERATE_AST_VISITOR_SWITCH() \
+ } \
+ \
+ void Visit(AstNode* node) { \
+ if (CheckStackOverflow()) return; \
+ VisitNoStackOverflowCheck(node); \
} \
\
void SetStackOverflow() { stack_overflow_ = true; } \
@@ -2976,6 +2903,12 @@ class AstVisitor BASE_EMBEDDED {
uintptr_t stack_limit_; \
bool stack_overflow_
+#define DEFINE_AST_VISITOR_MEMBERS_WITHOUT_STACKOVERFLOW() \
+ public: \
+ void Visit(AstNode* node) { GENERATE_AST_VISITOR_SWITCH() } \
+ \
+ private:
+
#define DEFINE_AST_REWRITER_SUBCLASS_MEMBERS() \
public: \
AstNode* Rewrite(AstNode* node) { \
@@ -3004,7 +2937,6 @@ class AstVisitor BASE_EMBEDDED {
\
protected: \
AstNode* replacement_
-
// Generic macro for rewriting things; `GET` is the expression to be
// rewritten; `SET` is a command that should do the rewriting, i.e.
// something sensible with the variable called `replacement`.
@@ -3046,52 +2978,38 @@ class AstVisitor BASE_EMBEDDED {
class AstNodeFactory final BASE_EMBEDDED {
public:
explicit AstNodeFactory(AstValueFactory* ast_value_factory)
- : local_zone_(ast_value_factory->zone()),
- parser_zone_(ast_value_factory->zone()),
- ast_value_factory_(ast_value_factory) {}
+ : zone_(nullptr), ast_value_factory_(ast_value_factory) {
+ if (ast_value_factory != nullptr) {
+ zone_ = ast_value_factory->zone();
+ }
+ }
AstValueFactory* ast_value_factory() const { return ast_value_factory_; }
+ void set_ast_value_factory(AstValueFactory* ast_value_factory) {
+ ast_value_factory_ = ast_value_factory;
+ zone_ = ast_value_factory->zone();
+ }
VariableDeclaration* NewVariableDeclaration(VariableProxy* proxy,
- VariableMode mode, Scope* scope,
- int pos) {
- return new (parser_zone_)
- VariableDeclaration(parser_zone_, proxy, mode, scope, pos);
+ Scope* scope, int pos) {
+ return new (zone_) VariableDeclaration(proxy, scope, pos);
}
FunctionDeclaration* NewFunctionDeclaration(VariableProxy* proxy,
- VariableMode mode,
FunctionLiteral* fun,
- Scope* scope,
- int pos) {
- return new (parser_zone_)
- FunctionDeclaration(parser_zone_, proxy, mode, fun, scope, pos);
- }
-
- ImportDeclaration* NewImportDeclaration(VariableProxy* proxy,
- const AstRawString* import_name,
- const AstRawString* module_specifier,
- Scope* scope, int pos) {
- return new (parser_zone_) ImportDeclaration(
- parser_zone_, proxy, import_name, module_specifier, scope, pos);
- }
-
- ExportDeclaration* NewExportDeclaration(VariableProxy* proxy,
- Scope* scope,
- int pos) {
- return new (parser_zone_)
- ExportDeclaration(parser_zone_, proxy, scope, pos);
+ Scope* scope, int pos) {
+ return new (zone_) FunctionDeclaration(proxy, fun, scope, pos);
}
Block* NewBlock(ZoneList<const AstRawString*>* labels, int capacity,
bool ignore_completion_value, int pos) {
- return new (local_zone_)
- Block(local_zone_, labels, capacity, ignore_completion_value, pos);
+ return new (zone_)
+ Block(zone_, labels, capacity, ignore_completion_value, pos);
}
#define STATEMENT_WITH_LABELS(NodeType) \
NodeType* New##NodeType(ZoneList<const AstRawString*>* labels, int pos) { \
- return new (local_zone_) NodeType(local_zone_, labels, pos); \
+ return new (zone_) NodeType(labels, pos); \
}
STATEMENT_WITH_LABELS(DoWhileStatement)
STATEMENT_WITH_LABELS(WhileStatement)
@@ -3104,10 +3022,10 @@ class AstNodeFactory final BASE_EMBEDDED {
int pos) {
switch (visit_mode) {
case ForEachStatement::ENUMERATE: {
- return new (local_zone_) ForInStatement(local_zone_, labels, pos);
+ return new (zone_) ForInStatement(labels, pos);
}
case ForEachStatement::ITERATE: {
- return new (local_zone_) ForOfStatement(local_zone_, labels, pos);
+ return new (zone_) ForOfStatement(labels, pos);
}
}
UNREACHABLE();
@@ -3115,42 +3033,41 @@ class AstNodeFactory final BASE_EMBEDDED {
}
ExpressionStatement* NewExpressionStatement(Expression* expression, int pos) {
- return new (local_zone_) ExpressionStatement(local_zone_, expression, pos);
+ return new (zone_) ExpressionStatement(expression, pos);
}
ContinueStatement* NewContinueStatement(IterationStatement* target, int pos) {
- return new (local_zone_) ContinueStatement(local_zone_, target, pos);
+ return new (zone_) ContinueStatement(target, pos);
}
BreakStatement* NewBreakStatement(BreakableStatement* target, int pos) {
- return new (local_zone_) BreakStatement(local_zone_, target, pos);
+ return new (zone_) BreakStatement(target, pos);
}
ReturnStatement* NewReturnStatement(Expression* expression, int pos) {
- return new (local_zone_) ReturnStatement(local_zone_, expression, pos);
+ return new (zone_) ReturnStatement(expression, pos);
}
WithStatement* NewWithStatement(Scope* scope,
Expression* expression,
Statement* statement,
int pos) {
- return new (local_zone_)
- WithStatement(local_zone_, scope, expression, statement, pos);
+ return new (zone_) WithStatement(scope, expression, statement, pos);
}
IfStatement* NewIfStatement(Expression* condition,
Statement* then_statement,
Statement* else_statement,
int pos) {
- return new (local_zone_) IfStatement(local_zone_, condition, then_statement,
- else_statement, pos);
+ return new (zone_)
+ IfStatement(condition, then_statement, else_statement, pos);
}
TryCatchStatement* NewTryCatchStatement(Block* try_block, Scope* scope,
Variable* variable,
Block* catch_block, int pos) {
- return new (local_zone_) TryCatchStatement(
- local_zone_, try_block, scope, variable, catch_block, true, pos);
+ return new (zone_) TryCatchStatement(
+ try_block, scope, variable, catch_block, HandlerTable::CAUGHT, pos);
}
TryCatchStatement* NewTryCatchStatementForReThrow(Block* try_block,
@@ -3158,89 +3075,96 @@ class AstNodeFactory final BASE_EMBEDDED {
Variable* variable,
Block* catch_block,
int pos) {
- return new (local_zone_) TryCatchStatement(
- local_zone_, try_block, scope, variable, catch_block, false, pos);
+ return new (zone_) TryCatchStatement(
+ try_block, scope, variable, catch_block, HandlerTable::UNCAUGHT, pos);
+ }
+
+ TryCatchStatement* NewTryCatchStatementForPromiseReject(Block* try_block,
+ Scope* scope,
+ Variable* variable,
+ Block* catch_block,
+ int pos) {
+ return new (zone_) TryCatchStatement(
+ try_block, scope, variable, catch_block, HandlerTable::PROMISE, pos);
+ }
+
+ TryCatchStatement* NewTryCatchStatementForDesugaring(Block* try_block,
+ Scope* scope,
+ Variable* variable,
+ Block* catch_block,
+ int pos) {
+ return new (zone_) TryCatchStatement(
+ try_block, scope, variable, catch_block, HandlerTable::DESUGARING, pos);
}
TryFinallyStatement* NewTryFinallyStatement(Block* try_block,
Block* finally_block, int pos) {
- return new (local_zone_)
- TryFinallyStatement(local_zone_, try_block, finally_block, pos);
+ return new (zone_) TryFinallyStatement(try_block, finally_block, pos);
}
DebuggerStatement* NewDebuggerStatement(int pos) {
- return new (local_zone_) DebuggerStatement(local_zone_, pos);
+ return new (zone_) DebuggerStatement(pos);
}
EmptyStatement* NewEmptyStatement(int pos) {
- return new (local_zone_) EmptyStatement(local_zone_, pos);
+ return new (zone_) EmptyStatement(pos);
}
SloppyBlockFunctionStatement* NewSloppyBlockFunctionStatement(
Statement* statement, Scope* scope) {
- return new (parser_zone_)
- SloppyBlockFunctionStatement(parser_zone_, statement, scope);
+ return new (zone_) SloppyBlockFunctionStatement(statement, scope);
}
CaseClause* NewCaseClause(
Expression* label, ZoneList<Statement*>* statements, int pos) {
- return new (local_zone_) CaseClause(local_zone_, label, statements, pos);
+ return new (zone_) CaseClause(label, statements, pos);
}
Literal* NewStringLiteral(const AstRawString* string, int pos) {
- return new (local_zone_)
- Literal(local_zone_, ast_value_factory_->NewString(string), pos);
+ return new (zone_) Literal(ast_value_factory_->NewString(string), pos);
}
// A JavaScript symbol (ECMA-262 edition 6).
Literal* NewSymbolLiteral(const char* name, int pos) {
- return new (local_zone_)
- Literal(local_zone_, ast_value_factory_->NewSymbol(name), pos);
+ return new (zone_) Literal(ast_value_factory_->NewSymbol(name), pos);
}
Literal* NewNumberLiteral(double number, int pos, bool with_dot = false) {
- return new (local_zone_) Literal(
- local_zone_, ast_value_factory_->NewNumber(number, with_dot), pos);
+ return new (zone_)
+ Literal(ast_value_factory_->NewNumber(number, with_dot), pos);
}
Literal* NewSmiLiteral(int number, int pos) {
- return new (local_zone_)
- Literal(local_zone_, ast_value_factory_->NewSmi(number), pos);
+ return new (zone_) Literal(ast_value_factory_->NewSmi(number), pos);
}
Literal* NewBooleanLiteral(bool b, int pos) {
- return new (local_zone_)
- Literal(local_zone_, ast_value_factory_->NewBoolean(b), pos);
+ return new (zone_) Literal(ast_value_factory_->NewBoolean(b), pos);
}
Literal* NewNullLiteral(int pos) {
- return new (local_zone_)
- Literal(local_zone_, ast_value_factory_->NewNull(), pos);
+ return new (zone_) Literal(ast_value_factory_->NewNull(), pos);
}
Literal* NewUndefinedLiteral(int pos) {
- return new (local_zone_)
- Literal(local_zone_, ast_value_factory_->NewUndefined(), pos);
+ return new (zone_) Literal(ast_value_factory_->NewUndefined(), pos);
}
Literal* NewTheHoleLiteral(int pos) {
- return new (local_zone_)
- Literal(local_zone_, ast_value_factory_->NewTheHole(), pos);
+ return new (zone_) Literal(ast_value_factory_->NewTheHole(), pos);
}
ObjectLiteral* NewObjectLiteral(
- ZoneList<ObjectLiteral::Property*>* properties,
- int literal_index,
- int boilerplate_properties,
- int pos) {
- return new (local_zone_) ObjectLiteral(
- local_zone_, properties, literal_index, boilerplate_properties, pos);
+ ZoneList<ObjectLiteral::Property*>* properties, int literal_index,
+ uint32_t boilerplate_properties, int pos) {
+ return new (zone_)
+ ObjectLiteral(properties, literal_index, boilerplate_properties, pos);
}
ObjectLiteral::Property* NewObjectLiteralProperty(
Expression* key, Expression* value, ObjectLiteralProperty::Kind kind,
bool is_static, bool is_computed_name) {
- return new (local_zone_)
+ return new (zone_)
ObjectLiteral::Property(key, value, kind, is_static, is_computed_name);
}
@@ -3248,123 +3172,120 @@ class AstNodeFactory final BASE_EMBEDDED {
Expression* value,
bool is_static,
bool is_computed_name) {
- return new (local_zone_) ObjectLiteral::Property(
- ast_value_factory_, key, value, is_static, is_computed_name);
+ return new (zone_) ObjectLiteral::Property(ast_value_factory_, key, value,
+ is_static, is_computed_name);
}
RegExpLiteral* NewRegExpLiteral(const AstRawString* pattern, int flags,
int literal_index, int pos) {
- return new (local_zone_)
- RegExpLiteral(local_zone_, pattern, flags, literal_index, pos);
+ return new (zone_) RegExpLiteral(pattern, flags, literal_index, pos);
}
ArrayLiteral* NewArrayLiteral(ZoneList<Expression*>* values,
int literal_index,
int pos) {
- return new (local_zone_)
- ArrayLiteral(local_zone_, values, -1, literal_index, pos);
+ return new (zone_) ArrayLiteral(values, -1, literal_index, pos);
}
ArrayLiteral* NewArrayLiteral(ZoneList<Expression*>* values,
int first_spread_index, int literal_index,
int pos) {
- return new (local_zone_) ArrayLiteral(
- local_zone_, values, first_spread_index, literal_index, pos);
+ return new (zone_)
+ ArrayLiteral(values, first_spread_index, literal_index, pos);
}
VariableProxy* NewVariableProxy(Variable* var,
- int start_position = RelocInfo::kNoPosition,
- int end_position = RelocInfo::kNoPosition) {
- return new (parser_zone_)
- VariableProxy(parser_zone_, var, start_position, end_position);
+ int start_position = kNoSourcePosition,
+ int end_position = kNoSourcePosition) {
+ return new (zone_) VariableProxy(var, start_position, end_position);
}
VariableProxy* NewVariableProxy(const AstRawString* name,
Variable::Kind variable_kind,
- int start_position = RelocInfo::kNoPosition,
- int end_position = RelocInfo::kNoPosition) {
+ int start_position = kNoSourcePosition,
+ int end_position = kNoSourcePosition) {
DCHECK_NOT_NULL(name);
- return new (parser_zone_) VariableProxy(parser_zone_, name, variable_kind,
- start_position, end_position);
+ return new (zone_)
+ VariableProxy(name, variable_kind, start_position, end_position);
+ }
+
+ // Recreates the VariableProxy in this Zone.
+ VariableProxy* CopyVariableProxy(VariableProxy* proxy) {
+ return new (zone_) VariableProxy(proxy);
}
Property* NewProperty(Expression* obj, Expression* key, int pos) {
- return new (local_zone_) Property(local_zone_, obj, key, pos);
+ return new (zone_) Property(obj, key, pos);
}
- Call* NewCall(Expression* expression,
- ZoneList<Expression*>* arguments,
- int pos) {
- return new (local_zone_) Call(local_zone_, expression, arguments, pos);
+ Call* NewCall(Expression* expression, ZoneList<Expression*>* arguments,
+ int pos, Call::PossiblyEval possibly_eval = Call::NOT_EVAL) {
+ return new (zone_) Call(expression, arguments, pos, possibly_eval);
}
CallNew* NewCallNew(Expression* expression,
ZoneList<Expression*>* arguments,
int pos) {
- return new (local_zone_) CallNew(local_zone_, expression, arguments, pos);
+ return new (zone_) CallNew(expression, arguments, pos);
}
CallRuntime* NewCallRuntime(Runtime::FunctionId id,
ZoneList<Expression*>* arguments, int pos) {
- return new (local_zone_)
- CallRuntime(local_zone_, Runtime::FunctionForId(id), arguments, pos);
+ return new (zone_) CallRuntime(Runtime::FunctionForId(id), arguments, pos);
}
CallRuntime* NewCallRuntime(const Runtime::Function* function,
ZoneList<Expression*>* arguments, int pos) {
- return new (local_zone_) CallRuntime(local_zone_, function, arguments, pos);
+ return new (zone_) CallRuntime(function, arguments, pos);
}
CallRuntime* NewCallRuntime(int context_index,
ZoneList<Expression*>* arguments, int pos) {
- return new (local_zone_)
- CallRuntime(local_zone_, context_index, arguments, pos);
+ return new (zone_) CallRuntime(context_index, arguments, pos);
}
UnaryOperation* NewUnaryOperation(Token::Value op,
Expression* expression,
int pos) {
- return new (local_zone_) UnaryOperation(local_zone_, op, expression, pos);
+ return new (zone_) UnaryOperation(op, expression, pos);
}
BinaryOperation* NewBinaryOperation(Token::Value op,
Expression* left,
Expression* right,
int pos) {
- return new (local_zone_) BinaryOperation(local_zone_, op, left, right, pos);
+ return new (zone_) BinaryOperation(op, left, right, pos);
}
CountOperation* NewCountOperation(Token::Value op,
bool is_prefix,
Expression* expr,
int pos) {
- return new (local_zone_)
- CountOperation(local_zone_, op, is_prefix, expr, pos);
+ return new (zone_) CountOperation(op, is_prefix, expr, pos);
}
CompareOperation* NewCompareOperation(Token::Value op,
Expression* left,
Expression* right,
int pos) {
- return new (local_zone_)
- CompareOperation(local_zone_, op, left, right, pos);
+ return new (zone_) CompareOperation(op, left, right, pos);
}
Spread* NewSpread(Expression* expression, int pos, int expr_pos) {
- return new (local_zone_) Spread(local_zone_, expression, pos, expr_pos);
+ return new (zone_) Spread(expression, pos, expr_pos);
}
Conditional* NewConditional(Expression* condition,
Expression* then_expression,
Expression* else_expression,
int position) {
- return new (local_zone_) Conditional(
- local_zone_, condition, then_expression, else_expression, position);
+ return new (zone_)
+ Conditional(condition, then_expression, else_expression, position);
}
RewritableExpression* NewRewritableExpression(Expression* expression) {
DCHECK_NOT_NULL(expression);
- return new (local_zone_) RewritableExpression(local_zone_, expression);
+ return new (zone_) RewritableExpression(expression);
}
Assignment* NewAssignment(Token::Value op,
@@ -3372,8 +3293,7 @@ class AstNodeFactory final BASE_EMBEDDED {
Expression* value,
int pos) {
DCHECK(Token::IsAssignmentOp(op));
- Assignment* assign =
- new (local_zone_) Assignment(local_zone_, op, target, value, pos);
+ Assignment* assign = new (zone_) Assignment(op, target, value, pos);
if (assign->is_compound()) {
DCHECK(Token::IsAssignmentOp(op));
assign->binary_operation_ =
@@ -3382,28 +3302,26 @@ class AstNodeFactory final BASE_EMBEDDED {
return assign;
}
- Yield* NewYield(Expression *generator_object,
- Expression* expression,
- int pos) {
+ Yield* NewYield(Expression* generator_object, Expression* expression, int pos,
+ Yield::OnException on_exception) {
if (!expression) expression = NewUndefinedLiteral(pos);
- return new (local_zone_)
- Yield(local_zone_, generator_object, expression, pos);
+ return new (zone_) Yield(generator_object, expression, pos, on_exception);
}
Throw* NewThrow(Expression* exception, int pos) {
- return new (local_zone_) Throw(local_zone_, exception, pos);
+ return new (zone_) Throw(exception, pos);
}
FunctionLiteral* NewFunctionLiteral(
- const AstRawString* name, Scope* scope, ZoneList<Statement*>* body,
- int materialized_literal_count, int expected_property_count,
- int parameter_count,
+ const AstRawString* name, DeclarationScope* scope,
+ ZoneList<Statement*>* body, int materialized_literal_count,
+ int expected_property_count, int parameter_count,
FunctionLiteral::ParameterFlag has_duplicate_parameters,
FunctionLiteral::FunctionType function_type,
FunctionLiteral::EagerCompileHint eager_compile_hint, FunctionKind kind,
int position) {
- return new (parser_zone_) FunctionLiteral(
- parser_zone_, name, ast_value_factory_, scope, body,
+ return new (zone_) FunctionLiteral(
+ zone_, name, ast_value_factory_, scope, body,
materialized_literal_count, expected_property_count, parameter_count,
function_type, has_duplicate_parameters, eager_compile_hint, kind,
position, true);
@@ -3413,75 +3331,72 @@ class AstNodeFactory final BASE_EMBEDDED {
// result of an eval (top-level or otherwise), or the result of calling
// the Function constructor.
FunctionLiteral* NewScriptOrEvalFunctionLiteral(
- Scope* scope, ZoneList<Statement*>* body, int materialized_literal_count,
- int expected_property_count) {
- return new (parser_zone_) FunctionLiteral(
- parser_zone_, ast_value_factory_->empty_string(), ast_value_factory_,
- scope, body, materialized_literal_count, expected_property_count, 0,
+ DeclarationScope* scope, ZoneList<Statement*>* body,
+ int materialized_literal_count, int expected_property_count) {
+ return new (zone_) FunctionLiteral(
+ zone_, ast_value_factory_->empty_string(), ast_value_factory_, scope,
+ body, materialized_literal_count, expected_property_count, 0,
FunctionLiteral::kAnonymousExpression,
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::kShouldLazyCompile, FunctionKind::kNormalFunction, 0,
false);
}
- ClassLiteral* NewClassLiteral(Scope* scope, VariableProxy* proxy,
- Expression* extends,
+ ClassLiteral* NewClassLiteral(VariableProxy* proxy, Expression* extends,
FunctionLiteral* constructor,
ZoneList<ObjectLiteral::Property*>* properties,
int start_position, int end_position) {
- return new (parser_zone_)
- ClassLiteral(parser_zone_, scope, proxy, extends, constructor,
- properties, start_position, end_position);
+ return new (zone_) ClassLiteral(proxy, extends, constructor, properties,
+ start_position, end_position);
}
NativeFunctionLiteral* NewNativeFunctionLiteral(const AstRawString* name,
v8::Extension* extension,
int pos) {
- return new (parser_zone_)
- NativeFunctionLiteral(parser_zone_, name, extension, pos);
+ return new (zone_) NativeFunctionLiteral(name, extension, pos);
}
DoExpression* NewDoExpression(Block* block, Variable* result_var, int pos) {
VariableProxy* result = NewVariableProxy(result_var, pos);
- return new (parser_zone_) DoExpression(parser_zone_, block, result, pos);
+ return new (zone_) DoExpression(block, result, pos);
}
ThisFunction* NewThisFunction(int pos) {
- return new (local_zone_) ThisFunction(local_zone_, pos);
+ return new (zone_) ThisFunction(pos);
}
SuperPropertyReference* NewSuperPropertyReference(VariableProxy* this_var,
Expression* home_object,
int pos) {
- return new (parser_zone_)
- SuperPropertyReference(parser_zone_, this_var, home_object, pos);
+ return new (zone_) SuperPropertyReference(this_var, home_object, pos);
}
SuperCallReference* NewSuperCallReference(VariableProxy* this_var,
VariableProxy* new_target_var,
VariableProxy* this_function_var,
int pos) {
- return new (parser_zone_) SuperCallReference(
- parser_zone_, this_var, new_target_var, this_function_var, pos);
+ return new (zone_)
+ SuperCallReference(this_var, new_target_var, this_function_var, pos);
}
EmptyParentheses* NewEmptyParentheses(int pos) {
- return new (local_zone_) EmptyParentheses(local_zone_, pos);
+ return new (zone_) EmptyParentheses(pos);
}
- Zone* zone() const { return local_zone_; }
+ Zone* zone() const { return zone_; }
+ void set_zone(Zone* zone) { zone_ = zone; }
// Handles use of temporary zones when parsing inner function bodies.
class BodyScope {
public:
BodyScope(AstNodeFactory* factory, Zone* temp_zone, bool use_temp_zone)
- : factory_(factory), prev_zone_(factory->local_zone_) {
+ : factory_(factory), prev_zone_(factory->zone_) {
if (use_temp_zone) {
- factory->local_zone_ = temp_zone;
+ factory->zone_ = temp_zone;
}
}
- ~BodyScope() { factory_->local_zone_ = prev_zone_; }
+ ~BodyScope() { factory_->zone_ = prev_zone_; }
private:
AstNodeFactory* factory_;
@@ -3493,10 +3408,7 @@ class AstNodeFactory final BASE_EMBEDDED {
// which we can guarantee is not going to be compiled or have its AST
// inspected.
// See ParseFunctionLiteral in parser.cc for preconditions.
- Zone* local_zone_;
- // ZoneObjects which need to persist until scope analysis must be allocated in
- // the parser-level zone.
- Zone* parser_zone_;
+ Zone* zone_;
AstValueFactory* ast_value_factory_;
};
diff --git a/deps/v8/src/ast/context-slot-cache.cc b/deps/v8/src/ast/context-slot-cache.cc
new file mode 100644
index 0000000000..43bd6d6b19
--- /dev/null
+++ b/deps/v8/src/ast/context-slot-cache.cc
@@ -0,0 +1,91 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/ast/context-slot-cache.h"
+
+#include <stdlib.h>
+
+#include "src/ast/scopes.h"
+#include "src/bootstrapper.h"
+
+namespace v8 {
+namespace internal {
+
+int ContextSlotCache::Hash(Object* data, String* name) {
+ // Uses only lower 32 bits if pointers are larger.
+ uintptr_t addr_hash =
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(data)) >> 2;
+ return static_cast<int>((addr_hash ^ name->Hash()) % kLength);
+}
+
+int ContextSlotCache::Lookup(Object* data, String* name, VariableMode* mode,
+ InitializationFlag* init_flag,
+ MaybeAssignedFlag* maybe_assigned_flag) {
+ int index = Hash(data, name);
+ Key& key = keys_[index];
+ if ((key.data == data) && key.name->Equals(name)) {
+ Value result(values_[index]);
+ if (mode != nullptr) *mode = result.mode();
+ if (init_flag != nullptr) *init_flag = result.initialization_flag();
+ if (maybe_assigned_flag != nullptr)
+ *maybe_assigned_flag = result.maybe_assigned_flag();
+ return result.index() + kNotFound;
+ }
+ return kNotFound;
+}
+
+void ContextSlotCache::Update(Handle<Object> data, Handle<String> name,
+ VariableMode mode, InitializationFlag init_flag,
+ MaybeAssignedFlag maybe_assigned_flag,
+ int slot_index) {
+ DisallowHeapAllocation no_gc;
+ Handle<String> internalized_name;
+ DCHECK(slot_index > kNotFound);
+ if (StringTable::InternalizeStringIfExists(name->GetIsolate(), name)
+ .ToHandle(&internalized_name)) {
+ int index = Hash(*data, *internalized_name);
+ Key& key = keys_[index];
+ key.data = *data;
+ key.name = *internalized_name;
+ // Please note value only takes a uint as index.
+ values_[index] =
+ Value(mode, init_flag, maybe_assigned_flag, slot_index - kNotFound)
+ .raw();
+#ifdef DEBUG
+ ValidateEntry(data, name, mode, init_flag, maybe_assigned_flag, slot_index);
+#endif
+ }
+}
+
+void ContextSlotCache::Clear() {
+ for (int index = 0; index < kLength; index++) keys_[index].data = nullptr;
+}
+
+#ifdef DEBUG
+
+void ContextSlotCache::ValidateEntry(Handle<Object> data, Handle<String> name,
+ VariableMode mode,
+ InitializationFlag init_flag,
+ MaybeAssignedFlag maybe_assigned_flag,
+ int slot_index) {
+ DisallowHeapAllocation no_gc;
+ Handle<String> internalized_name;
+ if (StringTable::InternalizeStringIfExists(name->GetIsolate(), name)
+ .ToHandle(&internalized_name)) {
+ int index = Hash(*data, *name);
+ Key& key = keys_[index];
+ DCHECK(key.data == *data);
+ DCHECK(key.name->Equals(*name));
+ Value result(values_[index]);
+ DCHECK(result.mode() == mode);
+ DCHECK(result.initialization_flag() == init_flag);
+ DCHECK(result.maybe_assigned_flag() == maybe_assigned_flag);
+ DCHECK(result.index() + kNotFound == slot_index);
+ }
+}
+
+#endif // DEBUG
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/ast/context-slot-cache.h b/deps/v8/src/ast/context-slot-cache.h
new file mode 100644
index 0000000000..8e9d1f7a8b
--- /dev/null
+++ b/deps/v8/src/ast/context-slot-cache.h
@@ -0,0 +1,113 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_AST_CONTEXT_SLOT_CACHE_H_
+#define V8_AST_CONTEXT_SLOT_CACHE_H_
+
+#include "src/allocation.h"
+#include "src/ast/modules.h"
+#include "src/ast/variables.h"
+
+namespace v8 {
+namespace internal {
+
+// Cache for mapping (data, property name) into context slot index.
+// The cache contains both positive and negative results.
+// Slot index equals -1 means the property is absent.
+// Cleared at startup and prior to mark sweep collection.
+class ContextSlotCache {
+ public:
+ // Lookup context slot index for (data, name).
+ // If absent, kNotFound is returned.
+ int Lookup(Object* data, String* name, VariableMode* mode,
+ InitializationFlag* init_flag,
+ MaybeAssignedFlag* maybe_assigned_flag);
+
+ // Update an element in the cache.
+ void Update(Handle<Object> data, Handle<String> name, VariableMode mode,
+ InitializationFlag init_flag,
+ MaybeAssignedFlag maybe_assigned_flag, int slot_index);
+
+ // Clear the cache.
+ void Clear();
+
+ static const int kNotFound = -2;
+
+ private:
+ ContextSlotCache() {
+ for (int i = 0; i < kLength; ++i) {
+ keys_[i].data = NULL;
+ keys_[i].name = NULL;
+ values_[i] = kNotFound;
+ }
+ }
+
+ inline static int Hash(Object* data, String* name);
+
+#ifdef DEBUG
+ void ValidateEntry(Handle<Object> data, Handle<String> name,
+ VariableMode mode, InitializationFlag init_flag,
+ MaybeAssignedFlag maybe_assigned_flag, int slot_index);
+#endif
+
+ static const int kLength = 256;
+ struct Key {
+ Object* data;
+ String* name;
+ };
+
+ struct Value {
+ Value(VariableMode mode, InitializationFlag init_flag,
+ MaybeAssignedFlag maybe_assigned_flag, int index) {
+ DCHECK(ModeField::is_valid(mode));
+ DCHECK(InitField::is_valid(init_flag));
+ DCHECK(MaybeAssignedField::is_valid(maybe_assigned_flag));
+ DCHECK(IndexField::is_valid(index));
+ value_ = ModeField::encode(mode) | IndexField::encode(index) |
+ InitField::encode(init_flag) |
+ MaybeAssignedField::encode(maybe_assigned_flag);
+ DCHECK(mode == this->mode());
+ DCHECK(init_flag == this->initialization_flag());
+ DCHECK(maybe_assigned_flag == this->maybe_assigned_flag());
+ DCHECK(index == this->index());
+ }
+
+ explicit inline Value(uint32_t value) : value_(value) {}
+
+ uint32_t raw() { return value_; }
+
+ VariableMode mode() { return ModeField::decode(value_); }
+
+ InitializationFlag initialization_flag() {
+ return InitField::decode(value_);
+ }
+
+ MaybeAssignedFlag maybe_assigned_flag() {
+ return MaybeAssignedField::decode(value_);
+ }
+
+ int index() { return IndexField::decode(value_); }
+
+ // Bit fields in value_ (type, shift, size). Must be public so the
+ // constants can be embedded in generated code.
+ class ModeField : public BitField<VariableMode, 0, 4> {};
+ class InitField : public BitField<InitializationFlag, 4, 1> {};
+ class MaybeAssignedField : public BitField<MaybeAssignedFlag, 5, 1> {};
+ class IndexField : public BitField<int, 6, 32 - 6> {};
+
+ private:
+ uint32_t value_;
+ };
+
+ Key keys_[kLength];
+ uint32_t values_[kLength];
+
+ friend class Isolate;
+ DISALLOW_COPY_AND_ASSIGN(ContextSlotCache);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_AST_CONTEXT_SLOT_CACHE_H_
diff --git a/deps/v8/src/ast/modules.cc b/deps/v8/src/ast/modules.cc
index f895756e4a..cd47c00b92 100644
--- a/deps/v8/src/ast/modules.cc
+++ b/deps/v8/src/ast/modules.cc
@@ -3,56 +3,142 @@
// found in the LICENSE file.
#include "src/ast/modules.h"
-
#include "src/ast/ast-value-factory.h"
+#include "src/ast/scopes.h"
namespace v8 {
namespace internal {
+void ModuleDescriptor::AddImport(
+ const AstRawString* import_name, const AstRawString* local_name,
+ const AstRawString* module_request, Scanner::Location loc, Zone* zone) {
+ DCHECK_NOT_NULL(import_name);
+ DCHECK_NOT_NULL(local_name);
+ DCHECK_NOT_NULL(module_request);
+ ModuleEntry* entry = new (zone) ModuleEntry(loc);
+ entry->local_name = local_name;
+ entry->import_name = import_name;
+ entry->module_request = module_request;
+ regular_imports_.insert(std::make_pair(entry->local_name, entry));
+ // We don't care if there's already an entry for this local name, as in that
+ // case we will report an error when declaring the variable.
+}
-void ModuleDescriptor::AddLocalExport(const AstRawString* export_name,
- const AstRawString* local_name,
- Zone* zone, bool* ok) {
- void* key = const_cast<AstRawString*>(export_name);
- ZoneAllocationPolicy allocator(zone);
+void ModuleDescriptor::AddStarImport(
+ const AstRawString* local_name, const AstRawString* module_request,
+ Scanner::Location loc, Zone* zone) {
+ DCHECK_NOT_NULL(local_name);
+ DCHECK_NOT_NULL(module_request);
+ ModuleEntry* entry = new (zone) ModuleEntry(loc);
+ entry->local_name = local_name;
+ entry->module_request = module_request;
+ special_imports_.Add(entry, zone);
+}
- if (exports_ == nullptr) {
- exports_ = new (zone->New(sizeof(ZoneHashMap)))
- ZoneHashMap(ZoneHashMap::PointersMatch,
- ZoneHashMap::kDefaultHashMapCapacity, allocator);
- }
- ZoneHashMap::Entry* p =
- exports_->LookupOrInsert(key, export_name->hash(), allocator);
- DCHECK_NOT_NULL(p);
- if (p->value != nullptr) {
- // Duplicate export.
- *ok = false;
- return;
- }
+void ModuleDescriptor::AddEmptyImport(
+ const AstRawString* module_request, Scanner::Location loc, Zone* zone) {
+ DCHECK_NOT_NULL(module_request);
+ ModuleEntry* entry = new (zone) ModuleEntry(loc);
+ entry->module_request = module_request;
+ special_imports_.Add(entry, zone);
+}
+
+
+void ModuleDescriptor::AddExport(
+ const AstRawString* local_name, const AstRawString* export_name,
+ Scanner::Location loc, Zone* zone) {
+ DCHECK_NOT_NULL(local_name);
+ DCHECK_NOT_NULL(export_name);
+ ModuleEntry* entry = new (zone) ModuleEntry(loc);
+ entry->export_name = export_name;
+ entry->local_name = local_name;
+ exports_.Add(entry, zone);
+}
- p->value = const_cast<AstRawString*>(local_name);
+
+void ModuleDescriptor::AddExport(
+ const AstRawString* import_name, const AstRawString* export_name,
+ const AstRawString* module_request, Scanner::Location loc, Zone* zone) {
+ DCHECK_NOT_NULL(import_name);
+ DCHECK_NOT_NULL(export_name);
+ DCHECK_NOT_NULL(module_request);
+ ModuleEntry* entry = new (zone) ModuleEntry(loc);
+ entry->export_name = export_name;
+ entry->import_name = import_name;
+ entry->module_request = module_request;
+ exports_.Add(entry, zone);
}
-void ModuleDescriptor::AddModuleRequest(const AstRawString* module_specifier,
- Zone* zone) {
- // TODO(adamk): Avoid this O(N) operation on each insert by storing
- // a HashMap, or by de-duping after parsing.
- if (requested_modules_.Contains(module_specifier)) return;
- requested_modules_.Add(module_specifier, zone);
+void ModuleDescriptor::AddStarExport(
+ const AstRawString* module_request, Scanner::Location loc, Zone* zone) {
+ DCHECK_NOT_NULL(module_request);
+ ModuleEntry* entry = new (zone) ModuleEntry(loc);
+ entry->module_request = module_request;
+ exports_.Add(entry, zone);
+}
+
+void ModuleDescriptor::MakeIndirectExportsExplicit() {
+ for (auto entry : exports_) {
+ if (entry->export_name == nullptr) continue;
+ if (entry->import_name != nullptr) continue;
+ DCHECK_NOT_NULL(entry->local_name);
+ auto it = regular_imports_.find(entry->local_name);
+ if (it != regular_imports_.end()) {
+ // Found an indirect export.
+ DCHECK_NOT_NULL(it->second->module_request);
+ DCHECK_NOT_NULL(it->second->import_name);
+ entry->import_name = it->second->import_name;
+ entry->module_request = it->second->module_request;
+ entry->local_name = nullptr;
+ }
+ }
}
+bool ModuleDescriptor::Validate(ModuleScope* module_scope,
+ PendingCompilationErrorHandler* error_handler,
+ Zone* zone) {
+ DCHECK_EQ(this, module_scope->module());
+ DCHECK_NOT_NULL(error_handler);
-const AstRawString* ModuleDescriptor::LookupLocalExport(
- const AstRawString* export_name, Zone* zone) {
- if (exports_ == nullptr) return nullptr;
- ZoneHashMap::Entry* entry = exports_->Lookup(
- const_cast<AstRawString*>(export_name), export_name->hash());
- if (entry == nullptr) return nullptr;
- DCHECK_NOT_NULL(entry->value);
- return static_cast<const AstRawString*>(entry->value);
+ // Report error iff there are duplicate exports.
+ {
+ ZoneAllocationPolicy allocator(zone);
+ ZoneHashMap* export_names = new (zone->New(sizeof(ZoneHashMap)))
+ ZoneHashMap(ZoneHashMap::PointersMatch,
+ ZoneHashMap::kDefaultHashMapCapacity, allocator);
+ for (auto entry : exports_) {
+ if (entry->export_name == nullptr) continue;
+ AstRawString* key = const_cast<AstRawString*>(entry->export_name);
+ ZoneHashMap::Entry* p =
+ export_names->LookupOrInsert(key, key->hash(), allocator);
+ DCHECK_NOT_NULL(p);
+ if (p->value != nullptr) {
+ error_handler->ReportMessageAt(
+ entry->location.beg_pos, entry->location.end_pos,
+ MessageTemplate::kDuplicateExport, entry->export_name);
+ return false;
+ }
+ p->value = key; // Anything but nullptr.
+ }
+ }
+
+ // Report error iff there are exports of non-existent local names.
+ for (auto entry : exports_) {
+ if (entry->local_name == nullptr) continue;
+ if (module_scope->LookupLocal(entry->local_name) == nullptr) {
+ error_handler->ReportMessageAt(
+ entry->location.beg_pos, entry->location.end_pos,
+ MessageTemplate::kModuleExportUndefined, entry->local_name);
+ return false;
+ }
+ }
+
+ MakeIndirectExportsExplicit();
+ return true;
}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ast/modules.h b/deps/v8/src/ast/modules.h
index 1fdf526cd1..c8f7aa3793 100644
--- a/deps/v8/src/ast/modules.h
+++ b/deps/v8/src/ast/modules.h
@@ -5,7 +5,9 @@
#ifndef V8_AST_MODULES_H_
#define V8_AST_MODULES_H_
-#include "src/zone.h"
+#include "src/parsing/scanner.h" // Only for Scanner::Location.
+#include "src/pending-compilation-error-handler.h"
+#include "src/zone-containers.h"
namespace v8 {
namespace internal {
@@ -16,90 +18,105 @@ class AstRawString;
class ModuleDescriptor : public ZoneObject {
public:
- // ---------------------------------------------------------------------------
- // Factory methods.
-
- static ModuleDescriptor* New(Zone* zone) {
- return new (zone) ModuleDescriptor(zone);
- }
-
- // ---------------------------------------------------------------------------
- // Mutators.
-
- // Add a name to the list of exports. If it already exists, that's an error.
- void AddLocalExport(const AstRawString* export_name,
- const AstRawString* local_name, Zone* zone, bool* ok);
-
- // Add module_specifier to the list of requested modules,
- // if not already present.
- void AddModuleRequest(const AstRawString* module_specifier, Zone* zone);
-
- // Assign an index.
- void Allocate(int index) {
- DCHECK_EQ(-1, index_);
- index_ = index;
- }
-
- // ---------------------------------------------------------------------------
- // Accessors.
+ explicit ModuleDescriptor(Zone* zone)
+ : exports_(1, zone), special_imports_(1, zone), regular_imports_(zone) {}
+
+ // import x from "foo.js";
+ // import {x} from "foo.js";
+ // import {x as y} from "foo.js";
+ void AddImport(
+ const AstRawString* import_name, const AstRawString* local_name,
+ const AstRawString* module_request, const Scanner::Location loc,
+ Zone* zone);
+
+ // import * as x from "foo.js";
+ void AddStarImport(
+ const AstRawString* local_name, const AstRawString* module_request,
+ const Scanner::Location loc, Zone* zone);
+
+ // import "foo.js";
+ // import {} from "foo.js";
+ // export {} from "foo.js"; (sic!)
+ void AddEmptyImport(
+ const AstRawString* module_request, const Scanner::Location loc,
+ Zone* zone);
+
+ // export {x};
+ // export {x as y};
+ // export VariableStatement
+ // export Declaration
+ // export default ...
+ void AddExport(
+ const AstRawString* local_name, const AstRawString* export_name,
+ const Scanner::Location loc, Zone* zone);
+
+ // export {x} from "foo.js";
+ // export {x as y} from "foo.js";
+ void AddExport(
+ const AstRawString* export_name, const AstRawString* import_name,
+ const AstRawString* module_request, const Scanner::Location loc,
+ Zone* zone);
+
+ // export * from "foo.js";
+ void AddStarExport(
+ const AstRawString* module_request, const Scanner::Location loc,
+ Zone* zone);
+
+ // Check if module is well-formed and report error if not.
+ // Also canonicalize indirect exports.
+ bool Validate(ModuleScope* module_scope,
+ PendingCompilationErrorHandler* error_handler, Zone* zone);
+
+ struct ModuleEntry : public ZoneObject {
+ const Scanner::Location location;
+ const AstRawString* export_name;
+ const AstRawString* local_name;
+ const AstRawString* import_name;
+ const AstRawString* module_request;
+
+ explicit ModuleEntry(Scanner::Location loc)
+ : location(loc),
+ export_name(nullptr),
+ local_name(nullptr),
+ import_name(nullptr),
+ module_request(nullptr) {}
+ };
- int Length() {
- ZoneHashMap* exports = exports_;
- return exports ? exports->occupancy() : 0;
- }
+ const ZoneList<ModuleEntry*>& exports() const { return exports_; }
- // The context slot in the hosting script context pointing to this module.
- int Index() {
- return index_;
+ // Empty imports and namespace imports.
+ const ZoneList<const ModuleEntry*>& special_imports() const {
+ return special_imports_;
}
- const AstRawString* LookupLocalExport(const AstRawString* export_name,
- Zone* zone);
-
- const ZoneList<const AstRawString*>& requested_modules() const {
- return requested_modules_;
+ // All the remaining imports, indexed by local name.
+ const ZoneMap<const AstRawString*, const ModuleEntry*>& regular_imports()
+ const {
+ return regular_imports_;
}
- // ---------------------------------------------------------------------------
- // Iterators.
-
- // Use like:
- // for (auto it = descriptor->iterator(); !it.done(); it.Advance()) {
- // ... it.name() ...
- // }
- class Iterator {
- public:
- bool done() const { return entry_ == NULL; }
- const AstRawString* export_name() const {
- DCHECK(!done());
- return static_cast<const AstRawString*>(entry_->key);
- }
- const AstRawString* local_name() const {
- DCHECK(!done());
- return static_cast<const AstRawString*>(entry_->value);
- }
- void Advance() { entry_ = exports_->Next(entry_); }
-
- private:
- friend class ModuleDescriptor;
- explicit Iterator(const ZoneHashMap* exports)
- : exports_(exports), entry_(exports ? exports->Start() : NULL) {}
-
- const ZoneHashMap* exports_;
- ZoneHashMap::Entry* entry_;
- };
-
- Iterator iterator() const { return Iterator(this->exports_); }
-
- // ---------------------------------------------------------------------------
- // Implementation.
private:
- explicit ModuleDescriptor(Zone* zone)
- : exports_(NULL), requested_modules_(1, zone), index_(-1) {}
-
- ZoneHashMap* exports_; // Module exports and their types (allocated lazily)
- ZoneList<const AstRawString*> requested_modules_;
- int index_;
+ ZoneList<ModuleEntry*> exports_;
+ ZoneList<const ModuleEntry*> special_imports_;
+ ZoneMap<const AstRawString*, const ModuleEntry*> regular_imports_;
+
+ // Find any implicitly indirect exports and make them explicit.
+ //
+ // An explicitly indirect export is an export entry arising from an export
+ // statement of the following form:
+ // export {a as c} from "X";
+ // An implicitly indirect export corresponds to
+ // export {b as c};
+ // in the presence of an import statement of the form
+ // import {a as b} from "X";
+ // This function finds such implicitly indirect export entries and rewrites
+ // them by filling in the import name and module request, as well as nulling
+ // out the local name. Effectively, it turns
+ // import {a as b} from "X"; export {b as c};
+ // into:
+ // import {a as b} from "X"; export {a as c} from "X";
+ // (The import entry is never deleted.)
+ void MakeIndirectExportsExplicit();
};
} // namespace internal
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index 2a79049b08..f19ee23de1 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -13,26 +13,22 @@
namespace v8 {
namespace internal {
-CallPrinter::CallPrinter(Isolate* isolate, bool is_builtin) {
- output_ = NULL;
- size_ = 0;
- pos_ = 0;
+CallPrinter::CallPrinter(Isolate* isolate, bool is_builtin)
+ : builder_(isolate) {
+ isolate_ = isolate;
position_ = 0;
+ num_prints_ = 0;
found_ = false;
done_ = false;
is_builtin_ = is_builtin;
InitializeAstVisitor(isolate);
}
-
-CallPrinter::~CallPrinter() { DeleteArray(output_); }
-
-
-const char* CallPrinter::Print(FunctionLiteral* program, int position) {
- Init();
+Handle<String> CallPrinter::Print(FunctionLiteral* program, int position) {
+ num_prints_ = 0;
position_ = position;
Find(program);
- return output_;
+ return builder_.Finish().ToHandleChecked();
}
@@ -40,9 +36,9 @@ void CallPrinter::Find(AstNode* node, bool print) {
if (done_) return;
if (found_) {
if (print) {
- int start = pos_;
+ int prev_num_prints = num_prints_;
Visit(node);
- if (start != pos_) return;
+ if (prev_num_prints != num_prints_) return;
}
Print("(intermediate value)");
} else {
@@ -50,45 +46,18 @@ void CallPrinter::Find(AstNode* node, bool print) {
}
}
-
-void CallPrinter::Init() {
- if (size_ == 0) {
- DCHECK(output_ == NULL);
- const int initial_size = 256;
- output_ = NewArray<char>(initial_size);
- size_ = initial_size;
- }
- output_[0] = '\0';
- pos_ = 0;
+void CallPrinter::Print(const char* str) {
+ if (!found_ || done_) return;
+ num_prints_++;
+ builder_.AppendCString(str);
}
-
-void CallPrinter::Print(const char* format, ...) {
+void CallPrinter::Print(Handle<String> str) {
if (!found_ || done_) return;
- for (;;) {
- va_list arguments;
- va_start(arguments, format);
- int n = VSNPrintF(Vector<char>(output_, size_) + pos_, format, arguments);
- va_end(arguments);
-
- if (n >= 0) {
- // there was enough space - we are done
- pos_ += n;
- return;
- } else {
- // there was not enough space - allocate more and try again
- const int slack = 32;
- int new_size = size_ + (size_ >> 1) + slack;
- char* new_output = NewArray<char>(new_size);
- MemCopy(new_output, output_, pos_);
- DeleteArray(output_);
- output_ = new_output;
- size_ = new_size;
- }
- }
+ num_prints_++;
+ builder_.AppendString(str);
}
-
void CallPrinter::VisitBlock(Block* node) {
FindStatements(node->statements());
}
@@ -100,13 +69,6 @@ void CallPrinter::VisitVariableDeclaration(VariableDeclaration* node) {}
void CallPrinter::VisitFunctionDeclaration(FunctionDeclaration* node) {}
-void CallPrinter::VisitImportDeclaration(ImportDeclaration* node) {
-}
-
-
-void CallPrinter::VisitExportDeclaration(ExportDeclaration* node) {}
-
-
void CallPrinter::VisitExpressionStatement(ExpressionStatement* node) {
Find(node->expression());
}
@@ -192,10 +154,11 @@ void CallPrinter::VisitForInStatement(ForInStatement* node) {
void CallPrinter::VisitForOfStatement(ForOfStatement* node) {
- Find(node->each());
Find(node->assign_iterator());
- Find(node->body());
Find(node->next_result());
+ Find(node->result_done());
+ Find(node->assign_each());
+ Find(node->body());
}
@@ -241,13 +204,13 @@ void CallPrinter::VisitConditional(Conditional* node) {
void CallPrinter::VisitLiteral(Literal* node) {
- PrintLiteral(*node->value(), true);
+ PrintLiteral(node->value(), true);
}
void CallPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
Print("/");
- PrintLiteral(*node->pattern(), false);
+ PrintLiteral(node->pattern(), false);
Print("/");
if (node->flags() & RegExp::kGlobal) Print("g");
if (node->flags() & RegExp::kIgnoreCase) Print("i");
@@ -279,7 +242,7 @@ void CallPrinter::VisitVariableProxy(VariableProxy* node) {
// Variable names of builtins are meaningless due to minification.
Print("(var)");
} else {
- PrintLiteral(*node->name(), false);
+ PrintLiteral(node->name(), false);
}
}
@@ -302,7 +265,7 @@ void CallPrinter::VisitProperty(Property* node) {
if (literal != NULL && literal->value()->IsInternalizedString()) {
Find(node->obj(), true);
Print(".");
- PrintLiteral(*literal->value(), false);
+ PrintLiteral(literal->value(), false);
} else {
Find(node->obj(), true);
Print("[");
@@ -356,7 +319,9 @@ void CallPrinter::VisitUnaryOperation(UnaryOperation* node) {
Token::Value op = node->op();
bool needsSpace =
op == Token::DELETE || op == Token::TYPEOF || op == Token::VOID;
- Print("(%s%s", Token::String(op), needsSpace ? " " : "");
+ Print("(");
+ Print(Token::String(op));
+ if (needsSpace) Print(" ");
Find(node->expression(), true);
Print(")");
}
@@ -364,9 +329,9 @@ void CallPrinter::VisitUnaryOperation(UnaryOperation* node) {
void CallPrinter::VisitCountOperation(CountOperation* node) {
Print("(");
- if (node->is_prefix()) Print("%s", Token::String(node->op()));
+ if (node->is_prefix()) Print(Token::String(node->op()));
Find(node->expression(), true);
- if (node->is_postfix()) Print("%s", Token::String(node->op()));
+ if (node->is_postfix()) Print(Token::String(node->op()));
Print(")");
}
@@ -374,7 +339,9 @@ void CallPrinter::VisitCountOperation(CountOperation* node) {
void CallPrinter::VisitBinaryOperation(BinaryOperation* node) {
Print("(");
Find(node->left(), true);
- Print(" %s ", Token::String(node->op()));
+ Print(" ");
+ Print(Token::String(node->op()));
+ Print(" ");
Find(node->right(), true);
Print(")");
}
@@ -383,7 +350,9 @@ void CallPrinter::VisitBinaryOperation(BinaryOperation* node) {
void CallPrinter::VisitCompareOperation(CompareOperation* node) {
Print("(");
Find(node->left(), true);
- Print(" %s ", Token::String(node->op()));
+ Print(" ");
+ Print(Token::String(node->op()));
+ Print(" ");
Find(node->right(), true);
Print(")");
}
@@ -432,32 +401,30 @@ void CallPrinter::FindArguments(ZoneList<Expression*>* arguments) {
}
}
-
-void CallPrinter::PrintLiteral(Object* value, bool quote) {
- Object* object = value;
- if (object->IsString()) {
+void CallPrinter::PrintLiteral(Handle<Object> value, bool quote) {
+ if (value->IsString()) {
if (quote) Print("\"");
- Print("%s", String::cast(object)->ToCString().get());
+ Print(Handle<String>::cast(value));
if (quote) Print("\"");
- } else if (object->IsNull()) {
+ } else if (value->IsNull(isolate_)) {
Print("null");
- } else if (object->IsTrue()) {
+ } else if (value->IsTrue(isolate_)) {
Print("true");
- } else if (object->IsFalse()) {
+ } else if (value->IsFalse(isolate_)) {
Print("false");
- } else if (object->IsUndefined()) {
+ } else if (value->IsUndefined(isolate_)) {
Print("undefined");
- } else if (object->IsNumber()) {
- Print("%g", object->Number());
- } else if (object->IsSymbol()) {
+ } else if (value->IsNumber()) {
+ Print(isolate_->factory()->NumberToString(value));
+ } else if (value->IsSymbol()) {
// Symbols can only occur as literals if they were inserted by the parser.
- PrintLiteral(Symbol::cast(object)->name(), false);
+ PrintLiteral(handle(Handle<Symbol>::cast(value)->name(), isolate_), false);
}
}
void CallPrinter::PrintLiteral(const AstRawString* value, bool quote) {
- PrintLiteral(*value->string(), quote);
+ PrintLiteral(value->string(), quote);
}
@@ -476,494 +443,13 @@ static int FormatSlotNode(Vector<char>* buf, Expression* node,
return pos;
}
-
-PrettyPrinter::PrettyPrinter(Isolate* isolate) {
- output_ = NULL;
- size_ = 0;
- pos_ = 0;
- InitializeAstVisitor(isolate);
-}
-
-
-PrettyPrinter::~PrettyPrinter() {
- DeleteArray(output_);
-}
-
-
-void PrettyPrinter::VisitBlock(Block* node) {
- if (!node->ignore_completion_value()) Print("{ ");
- PrintStatements(node->statements());
- if (node->statements()->length() > 0) Print(" ");
- if (!node->ignore_completion_value()) Print("}");
-}
-
-
-void PrettyPrinter::VisitVariableDeclaration(VariableDeclaration* node) {
- Print("var ");
- PrintLiteral(node->proxy()->name(), false);
- Print(";");
-}
-
-
-void PrettyPrinter::VisitFunctionDeclaration(FunctionDeclaration* node) {
- Print("function ");
- PrintLiteral(node->proxy()->name(), false);
- Print(" = ");
- PrintFunctionLiteral(node->fun());
- Print(";");
-}
-
-
-void PrettyPrinter::VisitImportDeclaration(ImportDeclaration* node) {
- Print("import ");
- PrintLiteral(node->proxy()->name(), false);
- Print(" from ");
- PrintLiteral(node->module_specifier()->string(), true);
- Print(";");
-}
-
-
-void PrettyPrinter::VisitExportDeclaration(ExportDeclaration* node) {
- Print("export ");
- PrintLiteral(node->proxy()->name(), false);
- Print(";");
-}
-
-
-void PrettyPrinter::VisitExpressionStatement(ExpressionStatement* node) {
- Visit(node->expression());
- Print(";");
-}
-
-
-void PrettyPrinter::VisitEmptyStatement(EmptyStatement* node) {
- Print(";");
-}
-
-
-void PrettyPrinter::VisitSloppyBlockFunctionStatement(
- SloppyBlockFunctionStatement* node) {
- Visit(node->statement());
-}
-
-
-void PrettyPrinter::VisitIfStatement(IfStatement* node) {
- Print("if (");
- Visit(node->condition());
- Print(") ");
- Visit(node->then_statement());
- if (node->HasElseStatement()) {
- Print(" else ");
- Visit(node->else_statement());
- }
-}
-
-
-void PrettyPrinter::VisitContinueStatement(ContinueStatement* node) {
- Print("continue");
- ZoneList<const AstRawString*>* labels = node->target()->labels();
- if (labels != NULL) {
- Print(" ");
- DCHECK(labels->length() > 0); // guaranteed to have at least one entry
- PrintLiteral(labels->at(0), false); // any label from the list is fine
- }
- Print(";");
-}
-
-
-void PrettyPrinter::VisitBreakStatement(BreakStatement* node) {
- Print("break");
- ZoneList<const AstRawString*>* labels = node->target()->labels();
- if (labels != NULL) {
- Print(" ");
- DCHECK(labels->length() > 0); // guaranteed to have at least one entry
- PrintLiteral(labels->at(0), false); // any label from the list is fine
- }
- Print(";");
-}
-
-
-void PrettyPrinter::VisitReturnStatement(ReturnStatement* node) {
- Print("return ");
- Visit(node->expression());
- Print(";");
-}
-
-
-void PrettyPrinter::VisitWithStatement(WithStatement* node) {
- Print("with (");
- Visit(node->expression());
- Print(") ");
- Visit(node->statement());
-}
-
-
-void PrettyPrinter::VisitSwitchStatement(SwitchStatement* node) {
- PrintLabels(node->labels());
- Print("switch (");
- Visit(node->tag());
- Print(") { ");
- ZoneList<CaseClause*>* cases = node->cases();
- for (int i = 0; i < cases->length(); i++)
- Visit(cases->at(i));
- Print("}");
-}
-
-
-void PrettyPrinter::VisitCaseClause(CaseClause* clause) {
- if (clause->is_default()) {
- Print("default");
- } else {
- Print("case ");
- Visit(clause->label());
- }
- Print(": ");
- PrintStatements(clause->statements());
- if (clause->statements()->length() > 0)
- Print(" ");
-}
-
-
-void PrettyPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
- PrintLabels(node->labels());
- Print("do ");
- Visit(node->body());
- Print(" while (");
- Visit(node->cond());
- Print(");");
-}
-
-
-void PrettyPrinter::VisitWhileStatement(WhileStatement* node) {
- PrintLabels(node->labels());
- Print("while (");
- Visit(node->cond());
- Print(") ");
- Visit(node->body());
-}
-
-
-void PrettyPrinter::VisitForStatement(ForStatement* node) {
- PrintLabels(node->labels());
- Print("for (");
- if (node->init() != NULL) {
- Visit(node->init());
- Print(" ");
- } else {
- Print("; ");
- }
- if (node->cond() != NULL) Visit(node->cond());
- Print("; ");
- if (node->next() != NULL) {
- Visit(node->next()); // prints extra ';', unfortunately
- // to fix: should use Expression for next
- }
- Print(") ");
- Visit(node->body());
-}
-
-
-void PrettyPrinter::VisitForInStatement(ForInStatement* node) {
- PrintLabels(node->labels());
- Print("for (");
- Visit(node->each());
- Print(" in ");
- Visit(node->enumerable());
- Print(") ");
- Visit(node->body());
-}
-
-
-void PrettyPrinter::VisitForOfStatement(ForOfStatement* node) {
- PrintLabels(node->labels());
- Print("for (");
- Visit(node->each());
- Print(" of ");
- Visit(node->iterable());
- Print(") ");
- Visit(node->body());
-}
-
-
-void PrettyPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
- Print("try ");
- Visit(node->try_block());
- Print(" catch (");
- const bool quote = false;
- PrintLiteral(node->variable()->name(), quote);
- Print(") ");
- Visit(node->catch_block());
-}
-
-
-void PrettyPrinter::VisitTryFinallyStatement(TryFinallyStatement* node) {
- Print("try ");
- Visit(node->try_block());
- Print(" finally ");
- Visit(node->finally_block());
-}
-
-
-void PrettyPrinter::VisitDebuggerStatement(DebuggerStatement* node) {
- Print("debugger ");
-}
-
-
-void PrettyPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
- Print("(");
- PrintFunctionLiteral(node);
- Print(")");
-}
-
-
-void PrettyPrinter::VisitClassLiteral(ClassLiteral* node) {
- Print("(class ");
- PrintLiteral(node->constructor()->name(), false);
- if (node->extends()) {
- Print(" extends ");
- Visit(node->extends());
- }
- Print(" { ");
- for (int i = 0; i < node->properties()->length(); i++) {
- PrintObjectLiteralProperty(node->properties()->at(i));
- }
- Print(" })");
-}
-
-
-void PrettyPrinter::VisitNativeFunctionLiteral(NativeFunctionLiteral* node) {
- Print("(");
- PrintLiteral(node->name(), false);
- Print(")");
-}
-
-
-void PrettyPrinter::VisitDoExpression(DoExpression* node) {
- Print("(do {");
- PrintStatements(node->block()->statements());
- Print("})");
-}
-
-
-void PrettyPrinter::VisitConditional(Conditional* node) {
- Visit(node->condition());
- Print(" ? ");
- Visit(node->then_expression());
- Print(" : ");
- Visit(node->else_expression());
-}
-
-
-void PrettyPrinter::VisitLiteral(Literal* node) {
- PrintLiteral(node->value(), true);
-}
-
-
-void PrettyPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
- Print(" RegExp(");
- PrintLiteral(node->pattern(), false);
- Print(",");
- if (node->flags() & RegExp::kGlobal) Print("g");
- if (node->flags() & RegExp::kIgnoreCase) Print("i");
- if (node->flags() & RegExp::kMultiline) Print("m");
- if (node->flags() & RegExp::kUnicode) Print("u");
- if (node->flags() & RegExp::kSticky) Print("y");
- Print(") ");
-}
-
-
-void PrettyPrinter::VisitObjectLiteral(ObjectLiteral* node) {
- Print("{ ");
- for (int i = 0; i < node->properties()->length(); i++) {
- if (i != 0) Print(",");
- PrintObjectLiteralProperty(node->properties()->at(i));
- }
- Print(" }");
-}
-
-
-void PrettyPrinter::PrintObjectLiteralProperty(
- ObjectLiteralProperty* property) {
- // TODO(arv): Better printing of methods etc.
- Print(" ");
- Visit(property->key());
- Print(": ");
- Visit(property->value());
-}
-
-
-void PrettyPrinter::VisitArrayLiteral(ArrayLiteral* node) {
- Print("[ ");
- Print(" literal_index = %d", node->literal_index());
- for (int i = 0; i < node->values()->length(); i++) {
- if (i != 0) Print(",");
- Visit(node->values()->at(i));
- }
- Print(" ]");
-}
-
-
-void PrettyPrinter::VisitVariableProxy(VariableProxy* node) {
- PrintLiteral(node->name(), false);
-}
-
-
-void PrettyPrinter::VisitAssignment(Assignment* node) {
- Visit(node->target());
- Print(" %s ", Token::String(node->op()));
- Visit(node->value());
-}
-
-
-void PrettyPrinter::VisitYield(Yield* node) {
- Print("yield ");
- Visit(node->expression());
-}
-
-
-void PrettyPrinter::VisitThrow(Throw* node) {
- Print("throw ");
- Visit(node->exception());
-}
-
-
-void PrettyPrinter::VisitProperty(Property* node) {
- Expression* key = node->key();
- Literal* literal = key->AsLiteral();
- if (literal != NULL && literal->value()->IsInternalizedString()) {
- Print("(");
- Visit(node->obj());
- Print(").");
- PrintLiteral(literal->value(), false);
- } else {
- Visit(node->obj());
- Print("[");
- Visit(key);
- Print("]");
- }
-}
-
-
-void PrettyPrinter::VisitCall(Call* node) {
- Visit(node->expression());
- PrintArguments(node->arguments());
-}
-
-
-void PrettyPrinter::VisitCallNew(CallNew* node) {
- Print("new (");
- Visit(node->expression());
- Print(")");
- PrintArguments(node->arguments());
-}
-
-
-void PrettyPrinter::VisitCallRuntime(CallRuntime* node) {
- Print("%%%s\n", node->debug_name());
- PrintArguments(node->arguments());
-}
-
-
-void PrettyPrinter::VisitUnaryOperation(UnaryOperation* node) {
- Token::Value op = node->op();
- bool needsSpace =
- op == Token::DELETE || op == Token::TYPEOF || op == Token::VOID;
- Print("(%s%s", Token::String(op), needsSpace ? " " : "");
- Visit(node->expression());
- Print(")");
-}
-
-
-void PrettyPrinter::VisitCountOperation(CountOperation* node) {
- Print("(");
- if (node->is_prefix()) Print("%s", Token::String(node->op()));
- Visit(node->expression());
- if (node->is_postfix()) Print("%s", Token::String(node->op()));
- Print(")");
-}
-
-
-void PrettyPrinter::VisitBinaryOperation(BinaryOperation* node) {
- Print("(");
- Visit(node->left());
- Print(" %s ", Token::String(node->op()));
- Visit(node->right());
- Print(")");
-}
-
-
-void PrettyPrinter::VisitCompareOperation(CompareOperation* node) {
- Print("(");
- Visit(node->left());
- Print(" %s ", Token::String(node->op()));
- Visit(node->right());
- Print(")");
-}
-
-
-void PrettyPrinter::VisitSpread(Spread* node) {
- Print("(...");
- Visit(node->expression());
- Print(")");
-}
-
-
-void PrettyPrinter::VisitEmptyParentheses(EmptyParentheses* node) {
- Print("()");
-}
-
-
-void PrettyPrinter::VisitThisFunction(ThisFunction* node) {
- Print("<this-function>");
-}
-
-
-void PrettyPrinter::VisitSuperPropertyReference(SuperPropertyReference* node) {
- Print("<super-property-reference>");
-}
-
-
-void PrettyPrinter::VisitSuperCallReference(SuperCallReference* node) {
- Print("<super-call-reference>");
-}
-
-
-void PrettyPrinter::VisitRewritableExpression(RewritableExpression* node) {
- Visit(node->expression());
-}
-
-
-const char* PrettyPrinter::Print(AstNode* node) {
+const char* AstPrinter::Print(AstNode* node) {
Init();
Visit(node);
return output_;
}
-
-const char* PrettyPrinter::PrintExpression(FunctionLiteral* program) {
- Init();
- ExpressionStatement* statement =
- program->body()->at(0)->AsExpressionStatement();
- Visit(statement->expression());
- return output_;
-}
-
-
-const char* PrettyPrinter::PrintProgram(FunctionLiteral* program) {
- Init();
- PrintStatements(program->body());
- Print("\n");
- return output_;
-}
-
-
-void PrettyPrinter::PrintOut(Isolate* isolate, AstNode* node) {
- PrettyPrinter printer(isolate);
- PrintF("%s\n", printer.Print(node));
-}
-
-
-void PrettyPrinter::Init() {
+void AstPrinter::Init() {
if (size_ == 0) {
DCHECK(output_ == NULL);
const int initial_size = 256;
@@ -974,8 +460,7 @@ void PrettyPrinter::Init() {
pos_ = 0;
}
-
-void PrettyPrinter::Print(const char* format, ...) {
+void AstPrinter::Print(const char* format, ...) {
for (;;) {
va_list arguments;
va_start(arguments, format);
@@ -1001,17 +486,7 @@ void PrettyPrinter::Print(const char* format, ...) {
}
}
-
-void PrettyPrinter::PrintStatements(ZoneList<Statement*>* statements) {
- if (statements == NULL) return;
- for (int i = 0; i < statements->length(); i++) {
- if (i != 0) Print(" ");
- Visit(statements->at(i));
- }
-}
-
-
-void PrettyPrinter::PrintLabels(ZoneList<const AstRawString*>* labels) {
+void AstPrinter::PrintLabels(ZoneList<const AstRawString*>* labels) {
if (labels != NULL) {
for (int i = 0; i < labels->length(); i++) {
PrintLiteral(labels->at(i), false);
@@ -1020,18 +495,7 @@ void PrettyPrinter::PrintLabels(ZoneList<const AstRawString*>* labels) {
}
}
-
-void PrettyPrinter::PrintArguments(ZoneList<Expression*>* arguments) {
- Print("(");
- for (int i = 0; i < arguments->length(); i++) {
- if (i != 0) Print(", ");
- Visit(arguments->at(i));
- }
- Print(")");
-}
-
-
-void PrettyPrinter::PrintLiteral(Handle<Object> value, bool quote) {
+void AstPrinter::PrintLiteral(Handle<Object> value, bool quote) {
Object* object = *value;
if (object->IsString()) {
String* string = String::cast(object);
@@ -1040,13 +504,13 @@ void PrettyPrinter::PrintLiteral(Handle<Object> value, bool quote) {
Print("%c", string->Get(i));
}
if (quote) Print("\"");
- } else if (object->IsNull()) {
+ } else if (object->IsNull(isolate_)) {
Print("null");
- } else if (object->IsTrue()) {
+ } else if (object->IsTrue(isolate_)) {
Print("true");
- } else if (object->IsFalse()) {
+ } else if (object->IsFalse(isolate_)) {
Print("false");
- } else if (object->IsUndefined()) {
+ } else if (object->IsUndefined(isolate_)) {
Print("undefined");
} else if (object->IsNumber()) {
Print("%g", object->Number());
@@ -1055,7 +519,8 @@ void PrettyPrinter::PrintLiteral(Handle<Object> value, bool quote) {
if (object->IsJSFunction()) {
Print("JS-Function");
} else if (object->IsJSArray()) {
- Print("JS-array[%u]", JSArray::cast(object)->length());
+ Print("JS-array[%u]",
+ Smi::cast(JSArray::cast(object)->length())->value());
} else if (object->IsJSObject()) {
Print("JS-Object");
} else {
@@ -1064,45 +529,15 @@ void PrettyPrinter::PrintLiteral(Handle<Object> value, bool quote) {
} else if (object->IsFixedArray()) {
Print("FixedArray");
} else {
- Print("<unknown literal %p>", object);
+ Print("<unknown literal %p>", static_cast<void*>(object));
}
}
-
-void PrettyPrinter::PrintLiteral(const AstRawString* value, bool quote) {
+void AstPrinter::PrintLiteral(const AstRawString* value, bool quote) {
PrintLiteral(value->string(), quote);
}
-void PrettyPrinter::PrintParameters(Scope* scope) {
- Print("(");
- for (int i = 0; i < scope->num_parameters(); i++) {
- if (i > 0) Print(", ");
- PrintLiteral(scope->parameter(i)->name(), false);
- }
- Print(")");
-}
-
-
-void PrettyPrinter::PrintDeclarations(ZoneList<Declaration*>* declarations) {
- for (int i = 0; i < declarations->length(); i++) {
- if (i > 0) Print(" ");
- Visit(declarations->at(i));
- }
-}
-
-
-void PrettyPrinter::PrintFunctionLiteral(FunctionLiteral* function) {
- Print("function ");
- PrintLiteral(function->name(), false);
- PrintParameters(function->scope());
- Print(" { ");
- PrintDeclarations(function->scope()->declarations());
- PrintStatements(function->body());
- Print(" }");
-}
-
-
//-----------------------------------------------------------------------------
class IndentedScope BASE_EMBEDDED {
@@ -1132,12 +567,14 @@ class IndentedScope BASE_EMBEDDED {
//-----------------------------------------------------------------------------
-
-AstPrinter::AstPrinter(Isolate* isolate) : PrettyPrinter(isolate), indent_(0) {}
-
+AstPrinter::AstPrinter(Isolate* isolate)
+ : isolate_(isolate), output_(nullptr), size_(0), pos_(0), indent_(0) {
+ InitializeAstVisitor(isolate);
+}
AstPrinter::~AstPrinter() {
DCHECK(indent_ == 0);
+ DeleteArray(output_);
}
@@ -1145,7 +582,7 @@ void AstPrinter::PrintIndented(const char* txt) {
for (int i = 0; i < indent_; i++) {
Print(". ");
}
- Print(txt);
+ Print("%s", txt);
}
@@ -1191,13 +628,17 @@ void AstPrinter::PrintIndentedVisit(const char* s, AstNode* node) {
const char* AstPrinter::PrintProgram(FunctionLiteral* program) {
Init();
{ IndentedScope indent(this, "FUNC", program->position());
+ PrintIndented("KIND");
+ Print(" %d\n", program->kind());
+ PrintIndented("YIELD COUNT");
+ Print(" %d\n", program->yield_count());
PrintLiteralIndented("NAME", program->name(), true);
PrintLiteralIndented("INFERRED NAME", program->inferred_name(), true);
PrintParameters(program->scope());
PrintDeclarations(program->scope()->declarations());
PrintStatements(program->body());
}
- return Output();
+ return output_;
}
@@ -1205,7 +646,7 @@ void AstPrinter::PrintOut(Isolate* isolate, AstNode* node) {
AstPrinter printer(isolate);
printer.Init();
printer.Visit(node);
- PrintF("%s", printer.Output());
+ PrintF("%s", printer.output_);
}
@@ -1218,8 +659,7 @@ void AstPrinter::PrintDeclarations(ZoneList<Declaration*>* declarations) {
}
}
-
-void AstPrinter::PrintParameters(Scope* scope) {
+void AstPrinter::PrintParameters(DeclarationScope* scope) {
if (scope->num_parameters() > 0) {
IndentedScope indent(this, "PARAMS");
for (int i = 0; i < scope->num_parameters(); i++) {
@@ -1254,8 +694,7 @@ void AstPrinter::VisitBlock(Block* node) {
// TODO(svenpanne) Start with IndentedScope.
void AstPrinter::VisitVariableDeclaration(VariableDeclaration* node) {
- PrintLiteralWithModeIndented(Variable::Mode2String(node->mode()),
- node->proxy()->var(),
+ PrintLiteralWithModeIndented("VARIABLE", node->proxy()->var(),
node->proxy()->name());
}
@@ -1270,19 +709,6 @@ void AstPrinter::VisitFunctionDeclaration(FunctionDeclaration* node) {
}
-void AstPrinter::VisitImportDeclaration(ImportDeclaration* node) {
- IndentedScope indent(this, "IMPORT", node->position());
- PrintLiteralIndented("NAME", node->proxy()->name(), true);
- PrintLiteralIndented("FROM", node->module_specifier()->string(), true);
-}
-
-
-void AstPrinter::VisitExportDeclaration(ExportDeclaration* node) {
- IndentedScope indent(this, "EXPORT", node->position());
- PrintLiteral(node->proxy()->name(), true);
-}
-
-
void AstPrinter::VisitExpressionStatement(ExpressionStatement* node) {
IndentedScope indent(this, "EXPRESSION STATEMENT", node->position());
Visit(node->expression());
@@ -1359,6 +785,8 @@ void AstPrinter::VisitCaseClause(CaseClause* clause) {
void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
IndentedScope indent(this, "DO", node->position());
+ PrintIndented("YIELD COUNT");
+ Print(" %d\n", node->yield_count());
PrintLabelsIndented(node->labels());
PrintIndentedVisit("BODY", node->body());
PrintIndentedVisit("COND", node->cond());
@@ -1367,6 +795,8 @@ void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
void AstPrinter::VisitWhileStatement(WhileStatement* node) {
IndentedScope indent(this, "WHILE", node->position());
+ PrintIndented("YIELD COUNT");
+ Print(" %d\n", node->yield_count());
PrintLabelsIndented(node->labels());
PrintIndentedVisit("COND", node->cond());
PrintIndentedVisit("BODY", node->body());
@@ -1375,6 +805,8 @@ void AstPrinter::VisitWhileStatement(WhileStatement* node) {
void AstPrinter::VisitForStatement(ForStatement* node) {
IndentedScope indent(this, "FOR", node->position());
+ PrintIndented("YIELD COUNT");
+ Print(" %d\n", node->yield_count());
PrintLabelsIndented(node->labels());
if (node->init()) PrintIndentedVisit("INIT", node->init());
if (node->cond()) PrintIndentedVisit("COND", node->cond());
@@ -1385,6 +817,8 @@ void AstPrinter::VisitForStatement(ForStatement* node) {
void AstPrinter::VisitForInStatement(ForInStatement* node) {
IndentedScope indent(this, "FOR IN", node->position());
+ PrintIndented("YIELD COUNT");
+ Print(" %d\n", node->yield_count());
PrintIndentedVisit("FOR", node->each());
PrintIndentedVisit("IN", node->enumerable());
PrintIndentedVisit("BODY", node->body());
@@ -1393,19 +827,19 @@ void AstPrinter::VisitForInStatement(ForInStatement* node) {
void AstPrinter::VisitForOfStatement(ForOfStatement* node) {
IndentedScope indent(this, "FOR OF", node->position());
- PrintIndentedVisit("FOR", node->each());
- PrintIndentedVisit("OF", node->iterable());
- PrintIndentedVisit("BODY", node->body());
+ PrintIndented("YIELD COUNT");
+ Print(" %d\n", node->yield_count());
PrintIndentedVisit("INIT", node->assign_iterator());
PrintIndentedVisit("NEXT", node->next_result());
- PrintIndentedVisit("EACH", node->assign_each());
PrintIndentedVisit("DONE", node->result_done());
+ PrintIndentedVisit("EACH", node->assign_each());
+ PrintIndentedVisit("BODY", node->body());
}
void AstPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
IndentedScope indent(this, "TRY CATCH", node->position());
- PrintIndentedVisit("TRY", node->try_block());
+ PrintTryStatement(node);
PrintLiteralWithModeIndented("CATCHVAR",
node->variable(),
node->variable()->name());
@@ -1415,10 +849,30 @@ void AstPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
void AstPrinter::VisitTryFinallyStatement(TryFinallyStatement* node) {
IndentedScope indent(this, "TRY FINALLY", node->position());
- PrintIndentedVisit("TRY", node->try_block());
+ PrintTryStatement(node);
PrintIndentedVisit("FINALLY", node->finally_block());
}
+void AstPrinter::PrintTryStatement(TryStatement* node) {
+ PrintIndentedVisit("TRY", node->try_block());
+ PrintIndented("CATCH PREDICTION");
+ const char* prediction = "";
+ switch (node->catch_prediction()) {
+ case HandlerTable::UNCAUGHT:
+ prediction = "UNCAUGHT";
+ break;
+ case HandlerTable::CAUGHT:
+ prediction = "CAUGHT";
+ break;
+ case HandlerTable::PROMISE:
+ prediction = "PROMISE";
+ break;
+ case HandlerTable::DESUGARING:
+ prediction = "DESUGARING";
+ break;
+ }
+ Print(" %s\n", prediction);
+}
void AstPrinter::VisitDebuggerStatement(DebuggerStatement* node) {
IndentedScope indent(this, "DEBUGGER", node->position());
@@ -1522,7 +976,7 @@ void AstPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
if (node->flags() & RegExp::kSticky) buf[i++] = 'y';
buf[i] = '\0';
PrintIndented("FLAGS ");
- Print(buf.start());
+ Print("%s", buf.start());
Print("\n");
}
@@ -1580,6 +1034,9 @@ void AstPrinter::VisitVariableProxy(VariableProxy* node) {
case VariableLocation::LOOKUP:
SNPrintF(buf + pos, " lookup");
break;
+ case VariableLocation::MODULE:
+ SNPrintF(buf + pos, " module");
+ break;
}
PrintLiteralWithModeIndented(buf.start(), var, node->name());
}
@@ -1594,7 +1051,9 @@ void AstPrinter::VisitAssignment(Assignment* node) {
void AstPrinter::VisitYield(Yield* node) {
- IndentedScope indent(this, "YIELD", node->position());
+ EmbeddedVector<char, 128> buf;
+ SNPrintF(buf, "YIELD id %d", node->yield_id());
+ IndentedScope indent(this, buf.start(), node->position());
Visit(node->expression());
}
diff --git a/deps/v8/src/ast/prettyprinter.h b/deps/v8/src/ast/prettyprinter.h
index 0186203d27..9b0e22abc2 100644
--- a/deps/v8/src/ast/prettyprinter.h
+++ b/deps/v8/src/ast/prettyprinter.h
@@ -7,33 +7,34 @@
#include "src/allocation.h"
#include "src/ast/ast.h"
+#include "src/base/compiler-specific.h"
+#include "src/string-builder.h"
namespace v8 {
namespace internal {
-class CallPrinter : public AstVisitor {
+class CallPrinter final : public AstVisitor<CallPrinter> {
public:
explicit CallPrinter(Isolate* isolate, bool is_builtin);
- virtual ~CallPrinter();
// The following routine prints the node with position |position| into a
- // string. The result string is alive as long as the CallPrinter is alive.
- const char* Print(FunctionLiteral* program, int position);
-
- void Print(const char* format, ...);
-
- void Find(AstNode* node, bool print = false);
+ // string.
+ Handle<String> Print(FunctionLiteral* program, int position);
// Individual nodes
-#define DECLARE_VISIT(type) void Visit##type(type* node) override;
+#define DECLARE_VISIT(type) void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
private:
- void Init();
- char* output_; // output string buffer
- int size_; // output_ size
- int pos_; // current printing position
+ void Print(const char* str);
+ void Print(Handle<String> str);
+
+ void Find(AstNode* node, bool print = false);
+
+ Isolate* isolate_;
+ int num_prints_;
+ IncrementalStringBuilder builder_;
int position_; // position of ast node to print
bool found_;
bool done_;
@@ -42,7 +43,7 @@ class CallPrinter : public AstVisitor {
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
protected:
- void PrintLiteral(Object* value, bool quote);
+ void PrintLiteral(Handle<Object> value, bool quote);
void PrintLiteral(const AstRawString* value, bool quote);
void FindStatements(ZoneList<Statement*>* statements);
void FindArguments(ZoneList<Expression*>* arguments);
@@ -51,75 +52,40 @@ class CallPrinter : public AstVisitor {
#ifdef DEBUG
-class PrettyPrinter: public AstVisitor {
+class AstPrinter final : public AstVisitor<AstPrinter> {
public:
- explicit PrettyPrinter(Isolate* isolate);
- virtual ~PrettyPrinter();
+ explicit AstPrinter(Isolate* isolate);
+ ~AstPrinter();
// The following routines print a node into a string.
- // The result string is alive as long as the PrettyPrinter is alive.
+ // The result string is alive as long as the AstPrinter is alive.
const char* Print(AstNode* node);
- const char* PrintExpression(FunctionLiteral* program);
const char* PrintProgram(FunctionLiteral* program);
- void Print(const char* format, ...);
+ void PRINTF_FORMAT(2, 3) Print(const char* format, ...);
// Print a node to stdout.
static void PrintOut(Isolate* isolate, AstNode* node);
// Individual nodes
-#define DECLARE_VISIT(type) void Visit##type(type* node) override;
+#define DECLARE_VISIT(type) void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
private:
- char* output_; // output string buffer
- int size_; // output_ size
- int pos_; // current printing position
+ friend class IndentedScope;
- protected:
void Init();
- const char* Output() const { return output_; }
- virtual void PrintStatements(ZoneList<Statement*>* statements);
void PrintLabels(ZoneList<const AstRawString*>* labels);
- virtual void PrintArguments(ZoneList<Expression*>* arguments);
- void PrintLiteral(Handle<Object> value, bool quote);
void PrintLiteral(const AstRawString* value, bool quote);
- void PrintParameters(Scope* scope);
- void PrintDeclarations(ZoneList<Declaration*>* declarations);
- void PrintFunctionLiteral(FunctionLiteral* function);
- void PrintCaseClause(CaseClause* clause);
- void PrintObjectLiteralProperty(ObjectLiteralProperty* property);
-
- DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
-};
-
-
-// Prints the AST structure
-class AstPrinter: public PrettyPrinter {
- public:
- explicit AstPrinter(Isolate* isolate);
- virtual ~AstPrinter();
-
- const char* PrintProgram(FunctionLiteral* program);
-
- // Print a node to stdout.
- static void PrintOut(Isolate* isolate, AstNode* node);
-
- // Individual nodes
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- private:
- friend class IndentedScope;
+ void PrintLiteral(Handle<Object> value, bool quote);
void PrintIndented(const char* txt);
void PrintIndentedVisit(const char* s, AstNode* node);
void PrintStatements(ZoneList<Statement*>* statements);
void PrintDeclarations(ZoneList<Declaration*>* declarations);
- void PrintParameters(Scope* scope);
+ void PrintParameters(DeclarationScope* scope);
void PrintArguments(ZoneList<Expression*>* arguments);
void PrintCaseClause(CaseClause* clause);
void PrintLiteralIndented(const char* info, Handle<Object> value, bool quote);
@@ -128,10 +94,17 @@ class AstPrinter: public PrettyPrinter {
Handle<Object> value);
void PrintLabelsIndented(ZoneList<const AstRawString*>* labels);
void PrintProperties(ZoneList<ObjectLiteral::Property*>* properties);
+ void PrintTryStatement(TryStatement* try_statement);
void inc_indent() { indent_++; }
void dec_indent() { indent_--; }
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
+ Isolate* isolate_;
+ char* output_; // output string buffer
+ int size_; // output_ size
+ int pos_; // current printing position
int indent_;
};
diff --git a/deps/v8/src/ast/scopeinfo.cc b/deps/v8/src/ast/scopeinfo.cc
index 4ffc020f61..7189de3372 100644
--- a/deps/v8/src/ast/scopeinfo.cc
+++ b/deps/v8/src/ast/scopeinfo.cc
@@ -6,6 +6,7 @@
#include <stdlib.h>
+#include "src/ast/context-slot-cache.h"
#include "src/ast/scopes.h"
#include "src/bootstrapper.h"
@@ -31,8 +32,9 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
// Determine use and location of the "this" binding if it is present.
VariableAllocationInfo receiver_info;
- if (scope->has_this_declaration()) {
- Variable* var = scope->receiver();
+ if (scope->is_declaration_scope() &&
+ scope->AsDeclarationScope()->has_this_declaration()) {
+ Variable* var = scope->AsDeclarationScope()->receiver();
if (!var->is_used()) {
receiver_info = UNUSED;
} else if (var->IsContextSlot()) {
@@ -45,13 +47,16 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
receiver_info = NONE;
}
- bool has_new_target = scope->new_target_var() != nullptr;
+ bool has_new_target =
+ scope->is_declaration_scope() &&
+ scope->AsDeclarationScope()->new_target_var() != nullptr;
// Determine use and location of the function variable if it is present.
VariableAllocationInfo function_name_info;
VariableMode function_variable_mode;
- if (scope->is_function_scope() && scope->function() != NULL) {
- Variable* var = scope->function()->proxy()->var();
+ if (scope->is_function_scope() &&
+ scope->AsDeclarationScope()->function_var() != nullptr) {
+ Variable* var = scope->AsDeclarationScope()->function_var();
if (!var->is_used()) {
function_name_info = UNUSED;
} else if (var->IsContextSlot()) {
@@ -78,8 +83,17 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
Factory* factory = isolate->factory();
Handle<ScopeInfo> scope_info = factory->NewScopeInfo(length);
- bool has_simple_parameters =
- scope->is_function_scope() && scope->has_simple_parameters();
+ bool has_simple_parameters = false;
+ bool asm_module = false;
+ bool asm_function = false;
+ FunctionKind function_kind = kNormalFunction;
+ if (scope->is_function_scope()) {
+ DeclarationScope* function_scope = scope->AsDeclarationScope();
+ has_simple_parameters = function_scope->has_simple_parameters();
+ asm_module = function_scope->asm_module();
+ asm_function = function_scope->asm_function();
+ function_kind = function_scope->function_kind();
+ }
// Encode the flags.
int flags = ScopeTypeField::encode(scope->scope_type()) |
@@ -90,10 +104,10 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
HasNewTargetField::encode(has_new_target) |
FunctionVariableField::encode(function_name_info) |
FunctionVariableMode::encode(function_variable_mode) |
- AsmModuleField::encode(scope->asm_module()) |
- AsmFunctionField::encode(scope->asm_function()) |
+ AsmModuleField::encode(asm_module) |
+ AsmFunctionField::encode(asm_function) |
HasSimpleParametersField::encode(has_simple_parameters) |
- FunctionKindField::encode(scope->function_kind());
+ FunctionKindField::encode(function_kind);
scope_info->SetFlags(flags);
scope_info->SetParameterCount(parameter_count);
scope_info->SetStackLocalCount(stack_local_count);
@@ -103,8 +117,11 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
int index = kVariablePartIndex;
// Add parameters.
DCHECK(index == scope_info->ParameterEntriesIndex());
- for (int i = 0; i < parameter_count; ++i) {
- scope_info->set(index++, *scope->parameter(i)->name());
+ if (scope->is_declaration_scope()) {
+ for (int i = 0; i < parameter_count; ++i) {
+ scope_info->set(index++,
+ *scope->AsDeclarationScope()->parameter(i)->name());
+ }
}
// Add stack locals' names. We are assuming that the stack locals'
@@ -170,7 +187,7 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
// If the receiver is allocated, add its index.
DCHECK(index == scope_info->ReceiverEntryIndex());
if (has_receiver) {
- int var_index = scope->receiver()->index();
+ int var_index = scope->AsDeclarationScope()->receiver()->index();
scope_info->set(index++, Smi::FromInt(var_index));
// ?? DCHECK(receiver_info != CONTEXT || var_index ==
// scope_info->ContextLength() - 1);
@@ -179,8 +196,9 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
// If present, add the function variable name and its index.
DCHECK(index == scope_info->FunctionNameEntryIndex());
if (has_function_name) {
- int var_index = scope->function()->proxy()->var()->index();
- scope_info->set(index++, *scope->function()->proxy()->name());
+ int var_index = scope->AsDeclarationScope()->function_var()->index();
+ scope_info->set(index++,
+ *scope->AsDeclarationScope()->function_var()->name());
scope_info->set(index++, Smi::FromInt(var_index));
DCHECK(function_name_info != CONTEXT ||
var_index == scope_info->ContextLength() - 1);
@@ -438,16 +456,13 @@ MaybeAssignedFlag ScopeInfo::ContextLocalMaybeAssignedFlag(int var) {
return ContextLocalMaybeAssignedFlag::decode(value);
}
-
-bool ScopeInfo::LocalIsSynthetic(int var) {
- DCHECK(0 <= var && var < LocalCount());
+bool ScopeInfo::VariableIsSynthetic(String* name) {
// There's currently no flag stored on the ScopeInfo to indicate that a
// variable is a compiler-introduced temporary. However, to avoid conflict
// with user declarations, the current temporaries like .generator_object and
// .result start with a dot, so we can use that as a flag. It's a hack!
- Handle<String> name(LocalName(var));
- return (name->length() > 0 && name->Get(0) == '.') ||
- name->Equals(*GetIsolate()->factory()->this_string());
+ return name->length() == 0 || name->Get(0) == '.' ||
+ name->Equals(name->GetHeap()->this_string());
}
@@ -640,84 +655,8 @@ int ScopeInfo::FunctionNameEntryIndex() {
return ReceiverEntryIndex() + (HasAllocatedReceiver() ? 1 : 0);
}
-
-int ContextSlotCache::Hash(Object* data, String* name) {
- // Uses only lower 32 bits if pointers are larger.
- uintptr_t addr_hash =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(data)) >> 2;
- return static_cast<int>((addr_hash ^ name->Hash()) % kLength);
-}
-
-
-int ContextSlotCache::Lookup(Object* data, String* name, VariableMode* mode,
- InitializationFlag* init_flag,
- MaybeAssignedFlag* maybe_assigned_flag) {
- int index = Hash(data, name);
- Key& key = keys_[index];
- if ((key.data == data) && key.name->Equals(name)) {
- Value result(values_[index]);
- if (mode != NULL) *mode = result.mode();
- if (init_flag != NULL) *init_flag = result.initialization_flag();
- if (maybe_assigned_flag != NULL)
- *maybe_assigned_flag = result.maybe_assigned_flag();
- return result.index() + kNotFound;
- }
- return kNotFound;
-}
-
-
-void ContextSlotCache::Update(Handle<Object> data, Handle<String> name,
- VariableMode mode, InitializationFlag init_flag,
- MaybeAssignedFlag maybe_assigned_flag,
- int slot_index) {
- DisallowHeapAllocation no_gc;
- Handle<String> internalized_name;
- DCHECK(slot_index > kNotFound);
- if (StringTable::InternalizeStringIfExists(name->GetIsolate(), name).
- ToHandle(&internalized_name)) {
- int index = Hash(*data, *internalized_name);
- Key& key = keys_[index];
- key.data = *data;
- key.name = *internalized_name;
- // Please note value only takes a uint as index.
- values_[index] = Value(mode, init_flag, maybe_assigned_flag,
- slot_index - kNotFound).raw();
-#ifdef DEBUG
- ValidateEntry(data, name, mode, init_flag, maybe_assigned_flag, slot_index);
-#endif
- }
-}
-
-
-void ContextSlotCache::Clear() {
- for (int index = 0; index < kLength; index++) keys_[index].data = NULL;
-}
-
-
#ifdef DEBUG
-void ContextSlotCache::ValidateEntry(Handle<Object> data, Handle<String> name,
- VariableMode mode,
- InitializationFlag init_flag,
- MaybeAssignedFlag maybe_assigned_flag,
- int slot_index) {
- DisallowHeapAllocation no_gc;
- Handle<String> internalized_name;
- if (StringTable::InternalizeStringIfExists(name->GetIsolate(), name).
- ToHandle(&internalized_name)) {
- int index = Hash(*data, *name);
- Key& key = keys_[index];
- DCHECK(key.data == *data);
- DCHECK(key.name->Equals(*name));
- Value result(values_[index]);
- DCHECK(result.mode() == mode);
- DCHECK(result.initialization_flag() == init_flag);
- DCHECK(result.maybe_assigned_flag() == maybe_assigned_flag);
- DCHECK(result.index() + kNotFound == slot_index);
- }
-}
-
-
static void PrintList(const char* list_name,
int nof_internal_slots,
int start,
@@ -761,26 +700,5 @@ void ScopeInfo::Print() {
#endif // DEBUG
-//---------------------------------------------------------------------------
-// ModuleInfo.
-
-Handle<ModuleInfo> ModuleInfo::Create(Isolate* isolate,
- ModuleDescriptor* descriptor,
- Scope* scope) {
- Handle<ModuleInfo> info = Allocate(isolate, descriptor->Length());
- info->set_host_index(descriptor->Index());
- int i = 0;
- for (ModuleDescriptor::Iterator it = descriptor->iterator(); !it.done();
- it.Advance(), ++i) {
- Variable* var = scope->LookupLocal(it.local_name());
- info->set_name(i, *(it.export_name()->string()));
- info->set_mode(i, var->mode());
- DCHECK(var->index() >= 0);
- info->set_index(i, var->index());
- }
- DCHECK(i == info->length());
- return info;
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ast/scopeinfo.h b/deps/v8/src/ast/scopeinfo.h
index 489a672ed8..515c88b7de 100644
--- a/deps/v8/src/ast/scopeinfo.h
+++ b/deps/v8/src/ast/scopeinfo.h
@@ -12,163 +12,6 @@
namespace v8 {
namespace internal {
-// Cache for mapping (data, property name) into context slot index.
-// The cache contains both positive and negative results.
-// Slot index equals -1 means the property is absent.
-// Cleared at startup and prior to mark sweep collection.
-class ContextSlotCache {
- public:
- // Lookup context slot index for (data, name).
- // If absent, kNotFound is returned.
- int Lookup(Object* data, String* name, VariableMode* mode,
- InitializationFlag* init_flag,
- MaybeAssignedFlag* maybe_assigned_flag);
-
- // Update an element in the cache.
- void Update(Handle<Object> data, Handle<String> name, VariableMode mode,
- InitializationFlag init_flag,
- MaybeAssignedFlag maybe_assigned_flag, int slot_index);
-
- // Clear the cache.
- void Clear();
-
- static const int kNotFound = -2;
-
- private:
- ContextSlotCache() {
- for (int i = 0; i < kLength; ++i) {
- keys_[i].data = NULL;
- keys_[i].name = NULL;
- values_[i] = kNotFound;
- }
- }
-
- inline static int Hash(Object* data, String* name);
-
-#ifdef DEBUG
- void ValidateEntry(Handle<Object> data, Handle<String> name,
- VariableMode mode, InitializationFlag init_flag,
- MaybeAssignedFlag maybe_assigned_flag, int slot_index);
-#endif
-
- static const int kLength = 256;
- struct Key {
- Object* data;
- String* name;
- };
-
- struct Value {
- Value(VariableMode mode, InitializationFlag init_flag,
- MaybeAssignedFlag maybe_assigned_flag, int index) {
- DCHECK(ModeField::is_valid(mode));
- DCHECK(InitField::is_valid(init_flag));
- DCHECK(MaybeAssignedField::is_valid(maybe_assigned_flag));
- DCHECK(IndexField::is_valid(index));
- value_ = ModeField::encode(mode) | IndexField::encode(index) |
- InitField::encode(init_flag) |
- MaybeAssignedField::encode(maybe_assigned_flag);
- DCHECK(mode == this->mode());
- DCHECK(init_flag == this->initialization_flag());
- DCHECK(maybe_assigned_flag == this->maybe_assigned_flag());
- DCHECK(index == this->index());
- }
-
- explicit inline Value(uint32_t value) : value_(value) {}
-
- uint32_t raw() { return value_; }
-
- VariableMode mode() { return ModeField::decode(value_); }
-
- InitializationFlag initialization_flag() {
- return InitField::decode(value_);
- }
-
- MaybeAssignedFlag maybe_assigned_flag() {
- return MaybeAssignedField::decode(value_);
- }
-
- int index() { return IndexField::decode(value_); }
-
- // Bit fields in value_ (type, shift, size). Must be public so the
- // constants can be embedded in generated code.
- class ModeField : public BitField<VariableMode, 0, 4> {};
- class InitField : public BitField<InitializationFlag, 4, 1> {};
- class MaybeAssignedField : public BitField<MaybeAssignedFlag, 5, 1> {};
- class IndexField : public BitField<int, 6, 32 - 6> {};
-
- private:
- uint32_t value_;
- };
-
- Key keys_[kLength];
- uint32_t values_[kLength];
-
- friend class Isolate;
- DISALLOW_COPY_AND_ASSIGN(ContextSlotCache);
-};
-
-
-
-
-//---------------------------------------------------------------------------
-// Auxiliary class used for the description of module instances.
-// Used by Runtime_DeclareModules.
-
-class ModuleInfo: public FixedArray {
- public:
- static ModuleInfo* cast(Object* description) {
- return static_cast<ModuleInfo*>(FixedArray::cast(description));
- }
-
- static Handle<ModuleInfo> Create(Isolate* isolate,
- ModuleDescriptor* descriptor, Scope* scope);
-
- // Index of module's context in host context.
- int host_index() { return Smi::cast(get(HOST_OFFSET))->value(); }
-
- // Name, mode, and index of the i-th export, respectively.
- // For value exports, the index is the slot of the value in the module
- // context, for exported modules it is the slot index of the
- // referred module's context in the host context.
- // TODO(rossberg): This format cannot yet handle exports of modules declared
- // in earlier scripts.
- String* name(int i) { return String::cast(get(name_offset(i))); }
- VariableMode mode(int i) {
- return static_cast<VariableMode>(Smi::cast(get(mode_offset(i)))->value());
- }
- int index(int i) { return Smi::cast(get(index_offset(i)))->value(); }
-
- int length() { return (FixedArray::length() - HEADER_SIZE) / ITEM_SIZE; }
-
- private:
- // The internal format is: Index, (Name, VariableMode, Index)*
- enum {
- HOST_OFFSET,
- NAME_OFFSET,
- MODE_OFFSET,
- INDEX_OFFSET,
- HEADER_SIZE = NAME_OFFSET,
- ITEM_SIZE = INDEX_OFFSET - NAME_OFFSET + 1
- };
- inline int name_offset(int i) { return NAME_OFFSET + i * ITEM_SIZE; }
- inline int mode_offset(int i) { return MODE_OFFSET + i * ITEM_SIZE; }
- inline int index_offset(int i) { return INDEX_OFFSET + i * ITEM_SIZE; }
-
- static Handle<ModuleInfo> Allocate(Isolate* isolate, int length) {
- return Handle<ModuleInfo>::cast(
- isolate->factory()->NewFixedArray(HEADER_SIZE + ITEM_SIZE * length));
- }
- void set_host_index(int index) { set(HOST_OFFSET, Smi::FromInt(index)); }
- void set_name(int i, String* name) { set(name_offset(i), name); }
- void set_mode(int i, VariableMode mode) {
- set(mode_offset(i), Smi::FromInt(mode));
- }
- void set_index(int i, int index) {
- set(index_offset(i), Smi::FromInt(index));
- }
-};
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index 5d4b809876..7689786ce4 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -4,11 +4,12 @@
#include "src/ast/scopes.h"
+#include <set>
+
#include "src/accessors.h"
-#include "src/ast/scopeinfo.h"
#include "src/bootstrapper.h"
#include "src/messages.h"
-#include "src/parsing/parser.h" // for ParseInfo
+#include "src/parsing/parse-info.h"
namespace v8 {
namespace internal {
@@ -23,25 +24,26 @@ namespace internal {
// this is ensured.
VariableMap::VariableMap(Zone* zone)
- : ZoneHashMap(ZoneHashMap::PointersMatch, 8, ZoneAllocationPolicy(zone)),
- zone_(zone) {}
-VariableMap::~VariableMap() {}
+ : ZoneHashMap(ZoneHashMap::PointersMatch, 8, ZoneAllocationPolicy(zone)) {}
-Variable* VariableMap::Declare(Scope* scope, const AstRawString* name,
- VariableMode mode, Variable::Kind kind,
+Variable* VariableMap::Declare(Zone* zone, Scope* scope,
+ const AstRawString* name, VariableMode mode,
+ Variable::Kind kind,
InitializationFlag initialization_flag,
- MaybeAssignedFlag maybe_assigned_flag) {
+ MaybeAssignedFlag maybe_assigned_flag,
+ bool* added) {
// AstRawStrings are unambiguous, i.e., the same string is always represented
// by the same AstRawString*.
// FIXME(marja): fix the type of Lookup.
Entry* p =
ZoneHashMap::LookupOrInsert(const_cast<AstRawString*>(name), name->hash(),
- ZoneAllocationPolicy(zone()));
- if (p->value == NULL) {
+ ZoneAllocationPolicy(zone));
+ if (added) *added = p->value == nullptr;
+ if (p->value == nullptr) {
// The variable has not been declared yet -> insert it.
DCHECK(p->key == name);
- p->value = new (zone()) Variable(scope, name, mode, kind,
- initialization_flag, maybe_assigned_flag);
+ p->value = new (zone) Variable(scope, name, mode, kind, initialization_flag,
+ maybe_assigned_flag);
}
return reinterpret_cast<Variable*>(p->value);
}
@@ -57,229 +59,373 @@ Variable* VariableMap::Lookup(const AstRawString* name) {
return NULL;
}
-
SloppyBlockFunctionMap::SloppyBlockFunctionMap(Zone* zone)
- : ZoneHashMap(ZoneHashMap::PointersMatch, 8, ZoneAllocationPolicy(zone)),
- zone_(zone) {}
-SloppyBlockFunctionMap::~SloppyBlockFunctionMap() {}
+ : ZoneHashMap(ZoneHashMap::PointersMatch, 8, ZoneAllocationPolicy(zone)) {}
-
-void SloppyBlockFunctionMap::Declare(const AstRawString* name,
+void SloppyBlockFunctionMap::Declare(Zone* zone, const AstRawString* name,
SloppyBlockFunctionStatement* stmt) {
// AstRawStrings are unambiguous, i.e., the same string is always represented
// by the same AstRawString*.
Entry* p =
ZoneHashMap::LookupOrInsert(const_cast<AstRawString*>(name), name->hash(),
- ZoneAllocationPolicy(zone_));
- if (p->value == nullptr) {
- p->value = new (zone_->New(sizeof(Vector))) Vector(zone_);
- }
- Vector* delegates = static_cast<Vector*>(p->value);
- delegates->push_back(stmt);
+ ZoneAllocationPolicy(zone));
+ stmt->set_next(static_cast<SloppyBlockFunctionStatement*>(p->value));
+ p->value = stmt;
}
// ----------------------------------------------------------------------------
// Implementation of Scope
-Scope::Scope(Zone* zone, Scope* outer_scope, ScopeType scope_type,
- AstValueFactory* ast_value_factory, FunctionKind function_kind)
- : inner_scopes_(4, zone),
+Scope::Scope(Zone* zone)
+ : zone_(zone),
+ outer_scope_(nullptr),
variables_(zone),
- temps_(4, zone),
- params_(4, zone),
- unresolved_(16, zone),
+ ordered_variables_(4, zone),
decls_(4, zone),
- module_descriptor_(
- scope_type == MODULE_SCOPE ? ModuleDescriptor::New(zone) : NULL),
- sloppy_block_function_map_(zone),
- already_resolved_(false),
- ast_value_factory_(ast_value_factory),
- zone_(zone) {
- SetDefaults(scope_type, outer_scope, Handle<ScopeInfo>::null(),
- function_kind);
- // The outermost scope must be a script scope.
- DCHECK(scope_type == SCRIPT_SCOPE || outer_scope != NULL);
+ scope_type_(SCRIPT_SCOPE) {
+ SetDefaults();
}
-Scope::Scope(Zone* zone, Scope* inner_scope, ScopeType scope_type,
- Handle<ScopeInfo> scope_info, AstValueFactory* value_factory)
- : inner_scopes_(4, zone),
+Scope::Scope(Zone* zone, Scope* outer_scope, ScopeType scope_type)
+ : zone_(zone),
+ outer_scope_(outer_scope),
variables_(zone),
+ ordered_variables_(4, zone),
+ decls_(4, zone),
+ scope_type_(scope_type) {
+ DCHECK_NE(SCRIPT_SCOPE, scope_type);
+ SetDefaults();
+ set_language_mode(outer_scope->language_mode());
+ force_context_allocation_ =
+ !is_function_scope() && outer_scope->has_forced_context_allocation();
+ outer_scope_->AddInnerScope(this);
+}
+
+Scope::Snapshot::Snapshot(Scope* scope)
+ : outer_scope_(scope),
+ top_inner_scope_(scope->inner_scope_),
+ top_unresolved_(scope->unresolved_),
+ top_temp_(scope->GetClosureScope()->temps()->length()) {}
+
+DeclarationScope::DeclarationScope(Zone* zone)
+ : Scope(zone),
+ function_kind_(kNormalFunction),
temps_(4, zone),
params_(4, zone),
- unresolved_(16, zone),
- decls_(4, zone),
- module_descriptor_(NULL),
- sloppy_block_function_map_(zone),
- already_resolved_(true),
- ast_value_factory_(value_factory),
- zone_(zone) {
- SetDefaults(scope_type, NULL, scope_info);
- if (!scope_info.is_null()) {
- num_heap_slots_ = scope_info_->ContextLength();
- }
- // Ensure at least MIN_CONTEXT_SLOTS to indicate a materialized context.
- num_heap_slots_ = Max(num_heap_slots_,
- static_cast<int>(Context::MIN_CONTEXT_SLOTS));
- AddInnerScope(inner_scope);
+ sloppy_block_function_map_(zone) {
+ SetDefaults();
}
-Scope::Scope(Zone* zone, Scope* inner_scope,
- const AstRawString* catch_variable_name,
- AstValueFactory* value_factory)
- : inner_scopes_(1, zone),
+DeclarationScope::DeclarationScope(Zone* zone, Scope* outer_scope,
+ ScopeType scope_type,
+ FunctionKind function_kind)
+ : Scope(zone, outer_scope, scope_type),
+ function_kind_(function_kind),
+ temps_(4, zone),
+ params_(4, zone),
+ sloppy_block_function_map_(zone) {
+ SetDefaults();
+ asm_function_ = outer_scope_->IsAsmModule();
+}
+
+ModuleScope::ModuleScope(Zone* zone, DeclarationScope* script_scope,
+ AstValueFactory* ast_value_factory)
+ : DeclarationScope(zone, script_scope, MODULE_SCOPE) {
+ module_descriptor_ = new (zone) ModuleDescriptor(zone);
+ set_language_mode(STRICT);
+ DeclareThis(ast_value_factory);
+}
+
+Scope::Scope(Zone* zone, Scope* inner_scope, ScopeType scope_type,
+ Handle<ScopeInfo> scope_info)
+ : zone_(zone),
+ outer_scope_(nullptr),
variables_(zone),
+ ordered_variables_(0, zone),
+ decls_(0, zone),
+ scope_info_(scope_info),
+ scope_type_(scope_type) {
+ SetDefaults();
+#ifdef DEBUG
+ already_resolved_ = true;
+#endif
+ if (scope_type == WITH_SCOPE) {
+ DCHECK(scope_info.is_null());
+ } else {
+ if (scope_info->CallsEval()) RecordEvalCall();
+ set_language_mode(scope_info->language_mode());
+ num_heap_slots_ = scope_info->ContextLength();
+ }
+ DCHECK_LE(Context::MIN_CONTEXT_SLOTS, num_heap_slots_);
+
+ if (inner_scope != nullptr) AddInnerScope(inner_scope);
+}
+
+DeclarationScope::DeclarationScope(Zone* zone, Scope* inner_scope,
+ ScopeType scope_type,
+ Handle<ScopeInfo> scope_info)
+ : Scope(zone, inner_scope, scope_type, scope_info),
+ function_kind_(scope_info->function_kind()),
temps_(0, zone),
params_(0, zone),
- unresolved_(0, zone),
+ sloppy_block_function_map_(zone) {
+ SetDefaults();
+}
+
+Scope::Scope(Zone* zone, Scope* inner_scope,
+ const AstRawString* catch_variable_name)
+ : zone_(zone),
+ outer_scope_(nullptr),
+ variables_(zone),
+ ordered_variables_(0, zone),
decls_(0, zone),
- module_descriptor_(NULL),
- sloppy_block_function_map_(zone),
- already_resolved_(true),
- ast_value_factory_(value_factory),
- zone_(zone) {
- SetDefaults(CATCH_SCOPE, NULL, Handle<ScopeInfo>::null());
- AddInnerScope(inner_scope);
- ++num_var_or_const_;
- num_heap_slots_ = Context::MIN_CONTEXT_SLOTS;
- Variable* variable = variables_.Declare(this,
- catch_variable_name,
- VAR,
- Variable::NORMAL,
- kCreatedInitialized);
+ scope_type_(CATCH_SCOPE) {
+ SetDefaults();
+#ifdef DEBUG
+ already_resolved_ = true;
+#endif
+ if (inner_scope != nullptr) AddInnerScope(inner_scope);
+ Variable* variable =
+ variables_.Declare(zone, this, catch_variable_name, VAR, Variable::NORMAL,
+ kCreatedInitialized);
AllocateHeapSlot(variable);
}
-
-void Scope::SetDefaults(ScopeType scope_type, Scope* outer_scope,
- Handle<ScopeInfo> scope_info,
- FunctionKind function_kind) {
- outer_scope_ = outer_scope;
- scope_type_ = scope_type;
- is_declaration_scope_ =
- is_eval_scope() || is_function_scope() ||
- is_module_scope() || is_script_scope();
- function_kind_ = function_kind;
- scope_name_ = ast_value_factory_->empty_string();
- dynamics_ = nullptr;
+void DeclarationScope::SetDefaults() {
+ is_declaration_scope_ = true;
+ has_simple_parameters_ = true;
+ asm_module_ = false;
+ asm_function_ = false;
+ force_eager_compilation_ = false;
+ has_arguments_parameter_ = false;
+ scope_uses_super_property_ = false;
receiver_ = nullptr;
new_target_ = nullptr;
function_ = nullptr;
arguments_ = nullptr;
this_function_ = nullptr;
- scope_inside_with_ = false;
- scope_calls_eval_ = false;
- scope_uses_arguments_ = false;
- scope_uses_super_property_ = false;
- asm_module_ = false;
- asm_function_ = outer_scope != NULL && outer_scope->asm_module_;
- // Inherit the language mode from the parent scope.
- language_mode_ = outer_scope != NULL ? outer_scope->language_mode_ : SLOPPY;
- outer_scope_calls_sloppy_eval_ = false;
- inner_scope_calls_eval_ = false;
- scope_nonlinear_ = false;
- force_eager_compilation_ = false;
- force_context_allocation_ = (outer_scope != NULL && !is_function_scope())
- ? outer_scope->has_forced_context_allocation() : false;
- num_var_or_const_ = 0;
- num_stack_slots_ = 0;
- num_heap_slots_ = 0;
- num_global_slots_ = 0;
arity_ = 0;
- has_simple_parameters_ = true;
- rest_parameter_ = NULL;
rest_index_ = -1;
- scope_info_ = scope_info;
- start_position_ = RelocInfo::kNoPosition;
- end_position_ = RelocInfo::kNoPosition;
- if (!scope_info.is_null()) {
- scope_calls_eval_ = scope_info->CallsEval();
- language_mode_ = scope_info->language_mode();
- is_declaration_scope_ = scope_info->is_declaration_scope();
- function_kind_ = scope_info->function_kind();
- }
}
+void Scope::SetDefaults() {
+#ifdef DEBUG
+ scope_name_ = nullptr;
+ already_resolved_ = false;
+#endif
+ inner_scope_ = nullptr;
+ sibling_ = nullptr;
+ unresolved_ = nullptr;
+ dynamics_ = nullptr;
+
+ start_position_ = kNoSourcePosition;
+ end_position_ = kNoSourcePosition;
+
+ num_stack_slots_ = 0;
+ num_heap_slots_ = Context::MIN_CONTEXT_SLOTS;
+ num_global_slots_ = 0;
+
+ set_language_mode(SLOPPY);
+
+ scope_calls_eval_ = false;
+ scope_nonlinear_ = false;
+ is_hidden_ = false;
+ is_debug_evaluate_scope_ = false;
+
+ inner_scope_calls_eval_ = false;
+ force_context_allocation_ = false;
+
+ is_declaration_scope_ = false;
+}
+
+bool Scope::HasSimpleParameters() {
+ DeclarationScope* scope = GetClosureScope();
+ return !scope->is_function_scope() || scope->has_simple_parameters();
+}
+
+bool Scope::IsAsmModule() const {
+ return is_function_scope() && AsDeclarationScope()->asm_module();
+}
+
+bool Scope::IsAsmFunction() const {
+ return is_function_scope() && AsDeclarationScope()->asm_function();
+}
Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
- Context* context, Scope* script_scope) {
+ Context* context,
+ DeclarationScope* script_scope,
+ AstValueFactory* ast_value_factory,
+ DeserializationMode deserialization_mode) {
// Reconstruct the outer scope chain from a closure's context chain.
- Scope* current_scope = NULL;
- Scope* innermost_scope = NULL;
+ Scope* current_scope = nullptr;
+ Scope* innermost_scope = nullptr;
while (!context->IsNativeContext()) {
if (context->IsWithContext() || context->IsDebugEvaluateContext()) {
// For scope analysis, debug-evaluate is equivalent to a with scope.
Scope* with_scope = new (zone)
- Scope(zone, current_scope, WITH_SCOPE, Handle<ScopeInfo>::null(),
- script_scope->ast_value_factory_);
- current_scope = with_scope;
- // All the inner scopes are inside a with.
- for (Scope* s = innermost_scope; s != NULL; s = s->outer_scope()) {
- s->scope_inside_with_ = true;
+ Scope(zone, current_scope, WITH_SCOPE, Handle<ScopeInfo>());
+ // TODO(yangguo): Remove once debug-evaluate properly keeps track of the
+ // function scope in which we are evaluating.
+ if (context->IsDebugEvaluateContext()) {
+ with_scope->set_is_debug_evaluate_scope();
}
+ current_scope = with_scope;
} else if (context->IsScriptContext()) {
- ScopeInfo* scope_info = context->scope_info();
- current_scope = new (zone) Scope(zone, current_scope, SCRIPT_SCOPE,
- Handle<ScopeInfo>(scope_info),
- script_scope->ast_value_factory_);
- } else if (context->IsModuleContext()) {
- ScopeInfo* scope_info = context->module()->scope_info();
- current_scope = new (zone) Scope(zone, current_scope, MODULE_SCOPE,
- Handle<ScopeInfo>(scope_info),
- script_scope->ast_value_factory_);
+ Handle<ScopeInfo> scope_info(context->scope_info(), isolate);
+ DCHECK_EQ(scope_info->scope_type(), SCRIPT_SCOPE);
+ current_scope = new (zone)
+ DeclarationScope(zone, current_scope, SCRIPT_SCOPE, scope_info);
} else if (context->IsFunctionContext()) {
- ScopeInfo* scope_info = context->closure()->shared()->scope_info();
- current_scope = new (zone) Scope(zone, current_scope, FUNCTION_SCOPE,
- Handle<ScopeInfo>(scope_info),
- script_scope->ast_value_factory_);
- if (scope_info->IsAsmFunction()) current_scope->asm_function_ = true;
- if (scope_info->IsAsmModule()) current_scope->asm_module_ = true;
+ Handle<ScopeInfo> scope_info(context->closure()->shared()->scope_info(),
+ isolate);
+ // TODO(neis): For an eval scope, we currently create an ordinary function
+ // context. This is wrong and needs to be fixed.
+ // https://bugs.chromium.org/p/v8/issues/detail?id=5295
+ DCHECK(scope_info->scope_type() == FUNCTION_SCOPE ||
+ scope_info->scope_type() == EVAL_SCOPE);
+ DeclarationScope* function_scope = new (zone)
+ DeclarationScope(zone, current_scope, FUNCTION_SCOPE, scope_info);
+ if (scope_info->IsAsmFunction()) function_scope->set_asm_function();
+ if (scope_info->IsAsmModule()) function_scope->set_asm_module();
+ current_scope = function_scope;
} else if (context->IsBlockContext()) {
- ScopeInfo* scope_info = context->scope_info();
- current_scope = new (zone)
- Scope(zone, current_scope, BLOCK_SCOPE, Handle<ScopeInfo>(scope_info),
- script_scope->ast_value_factory_);
+ Handle<ScopeInfo> scope_info(context->scope_info(), isolate);
+ DCHECK_EQ(scope_info->scope_type(), BLOCK_SCOPE);
+ if (scope_info->is_declaration_scope()) {
+ current_scope = new (zone)
+ DeclarationScope(zone, current_scope, BLOCK_SCOPE, scope_info);
+ } else {
+ current_scope =
+ new (zone) Scope(zone, current_scope, BLOCK_SCOPE, scope_info);
+ }
} else {
DCHECK(context->IsCatchContext());
String* name = context->catch_name();
- current_scope = new (zone) Scope(
- zone, current_scope,
- script_scope->ast_value_factory_->GetString(Handle<String>(name)),
- script_scope->ast_value_factory_);
+ current_scope =
+ new (zone) Scope(zone, current_scope,
+ ast_value_factory->GetString(handle(name, isolate)));
}
- if (innermost_scope == NULL) innermost_scope = current_scope;
+ if (deserialization_mode == DeserializationMode::kDeserializeOffHeap) {
+ current_scope->DeserializeScopeInfo(isolate, ast_value_factory);
+ }
+ if (innermost_scope == nullptr) innermost_scope = current_scope;
context = context->previous();
}
script_scope->AddInnerScope(current_scope);
- script_scope->PropagateScopeInfo(false);
+ script_scope->PropagateScopeInfo();
return (innermost_scope == NULL) ? script_scope : innermost_scope;
}
+void Scope::DeserializeScopeInfo(Isolate* isolate,
+ AstValueFactory* ast_value_factory) {
+ if (scope_info_.is_null()) return;
+
+ DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
+
+ std::set<const AstRawString*> names_seen;
+ // Internalize context local & globals variables.
+ for (int var = 0; var < scope_info_->ContextLocalCount() +
+ scope_info_->ContextGlobalCount();
+ ++var) {
+ Handle<String> name_handle(scope_info_->ContextLocalName(var), isolate);
+ const AstRawString* name = ast_value_factory->GetString(name_handle);
+ if (!names_seen.insert(name).second) continue;
+ int index = Context::MIN_CONTEXT_SLOTS + var;
+ VariableMode mode = scope_info_->ContextLocalMode(var);
+ InitializationFlag init_flag = scope_info_->ContextLocalInitFlag(var);
+ MaybeAssignedFlag maybe_assigned_flag =
+ scope_info_->ContextLocalMaybeAssignedFlag(var);
+ VariableLocation location = var < scope_info_->ContextLocalCount()
+ ? VariableLocation::CONTEXT
+ : VariableLocation::GLOBAL;
+ Variable::Kind kind = Variable::NORMAL;
+ if (index == scope_info_->ReceiverContextSlotIndex()) {
+ kind = Variable::THIS;
+ }
-bool Scope::Analyze(ParseInfo* info) {
- DCHECK(info->literal() != NULL);
- DCHECK(info->scope() == NULL);
- Scope* scope = info->literal()->scope();
- Scope* top = scope;
+ Variable* result = variables_.Declare(zone(), this, name, mode, kind,
+ init_flag, maybe_assigned_flag);
+ result->AllocateTo(location, index);
+ }
- // Traverse the scope tree up to the first unresolved scope or the global
- // scope and start scope resolution and variable allocation from that scope.
- while (!top->is_script_scope() &&
- !top->outer_scope()->already_resolved()) {
- top = top->outer_scope();
+ // We must read parameters from the end since for multiply declared
+ // parameters the value of the last declaration of that parameter is used
+ // inside a function (and thus we need to look at the last index). Was bug#
+ // 1110337.
+ for (int index = scope_info_->ParameterCount() - 1; index >= 0; --index) {
+ Handle<String> name_handle(scope_info_->ParameterName(index), isolate);
+ const AstRawString* name = ast_value_factory->GetString(name_handle);
+ if (!names_seen.insert(name).second) continue;
+
+ VariableMode mode = DYNAMIC;
+ InitializationFlag init_flag = kCreatedInitialized;
+ MaybeAssignedFlag maybe_assigned_flag = kMaybeAssigned;
+ VariableLocation location = VariableLocation::LOOKUP;
+ Variable::Kind kind = Variable::NORMAL;
+
+ Variable* result = variables_.Declare(zone(), this, name, mode, kind,
+ init_flag, maybe_assigned_flag);
+ result->AllocateTo(location, index);
}
+ // Internalize function proxy for this scope.
+ if (scope_info_->HasFunctionName()) {
+ Handle<String> name_handle(scope_info_->FunctionName(), isolate);
+ const AstRawString* name = ast_value_factory->GetString(name_handle);
+ VariableMode mode;
+ int index = scope_info_->FunctionContextSlotIndex(*name_handle, &mode);
+ if (index >= 0) {
+ Variable* result = AsDeclarationScope()->DeclareFunctionVar(name);
+ DCHECK_EQ(mode, result->mode());
+ result->AllocateTo(VariableLocation::CONTEXT, index);
+ }
+ }
+
+ scope_info_ = Handle<ScopeInfo>::null();
+}
+
+DeclarationScope* Scope::AsDeclarationScope() {
+ DCHECK(is_declaration_scope());
+ return static_cast<DeclarationScope*>(this);
+}
+
+const DeclarationScope* Scope::AsDeclarationScope() const {
+ DCHECK(is_declaration_scope());
+ return static_cast<const DeclarationScope*>(this);
+}
+
+ModuleScope* Scope::AsModuleScope() {
+ DCHECK(is_module_scope());
+ return static_cast<ModuleScope*>(this);
+}
+
+const ModuleScope* Scope::AsModuleScope() const {
+ DCHECK(is_module_scope());
+ return static_cast<const ModuleScope*>(this);
+}
+
+int Scope::num_parameters() const {
+ return is_declaration_scope() ? AsDeclarationScope()->num_parameters() : 0;
+}
+
+void Scope::Analyze(ParseInfo* info) {
+ DCHECK(info->literal() != NULL);
+ DeclarationScope* scope = info->literal()->scope();
+
+ // We are compiling one of three cases:
+ // 1) top-level code,
+ // 2) a function/eval/module on the top-level
+ // 3) a function/eval in a scope that was already resolved.
+ DCHECK(scope->scope_type() == SCRIPT_SCOPE ||
+ scope->outer_scope()->scope_type() == SCRIPT_SCOPE ||
+ scope->outer_scope()->already_resolved_);
+
// Allocate the variables.
{
AstNodeFactory ast_node_factory(info->ast_value_factory());
- if (!top->AllocateVariables(info, &ast_node_factory)) {
- DCHECK(top->pending_error_handler_.has_pending_error());
- top->pending_error_handler_.ThrowPendingError(info->isolate(),
- info->script());
- return false;
- }
+ scope->AllocateVariables(info, &ast_node_factory);
}
#ifdef DEBUG
@@ -287,59 +433,58 @@ bool Scope::Analyze(ParseInfo* info) {
: FLAG_print_scopes) {
scope->Print();
}
+ scope->CheckScopePositions();
+ scope->CheckZones();
#endif
-
- info->set_scope(scope);
- return true;
}
+void DeclarationScope::DeclareThis(AstValueFactory* ast_value_factory) {
+ DCHECK(!already_resolved_);
+ DCHECK(is_declaration_scope());
+ DCHECK(has_this_declaration());
-void Scope::Initialize() {
- DCHECK(!already_resolved());
-
- // Add this scope as a new inner scope of the outer scope.
- if (outer_scope_ != NULL) {
- outer_scope_->inner_scopes_.Add(this, zone());
- scope_inside_with_ = outer_scope_->scope_inside_with_ || is_with_scope();
- } else {
- scope_inside_with_ = is_with_scope();
- }
-
- // Declare convenience variables and the receiver.
- if (is_declaration_scope() && has_this_declaration()) {
- bool subclass_constructor = IsSubclassConstructor(function_kind_);
- Variable* var = variables_.Declare(
- this, ast_value_factory_->this_string(),
- subclass_constructor ? CONST : VAR, Variable::THIS,
- subclass_constructor ? kNeedsInitialization : kCreatedInitialized);
- receiver_ = var;
- }
+ bool subclass_constructor = IsSubclassConstructor(function_kind_);
+ Variable* var = Declare(
+ zone(), this, ast_value_factory->this_string(),
+ subclass_constructor ? CONST : VAR, Variable::THIS,
+ subclass_constructor ? kNeedsInitialization : kCreatedInitialized);
+ receiver_ = var;
+}
- if (is_function_scope() && !is_arrow_scope()) {
- // Declare 'arguments' variable which exists in all non arrow functions.
- // Note that it might never be accessed, in which case it won't be
- // allocated during variable allocation.
- variables_.Declare(this, ast_value_factory_->arguments_string(), VAR,
+void DeclarationScope::DeclareDefaultFunctionVariables(
+ AstValueFactory* ast_value_factory) {
+ DCHECK(is_function_scope());
+ DCHECK(!is_arrow_scope());
+ // Declare 'arguments' variable which exists in all non arrow functions.
+ // Note that it might never be accessed, in which case it won't be
+ // allocated during variable allocation.
+ arguments_ = Declare(zone(), this, ast_value_factory->arguments_string(), VAR,
Variable::ARGUMENTS, kCreatedInitialized);
- variables_.Declare(this, ast_value_factory_->new_target_string(), CONST,
- Variable::NORMAL, kCreatedInitialized);
+ new_target_ = Declare(zone(), this, ast_value_factory->new_target_string(),
+ CONST, Variable::NORMAL, kCreatedInitialized);
- if (IsConciseMethod(function_kind_) || IsClassConstructor(function_kind_) ||
- IsAccessorFunction(function_kind_)) {
- variables_.Declare(this, ast_value_factory_->this_function_string(),
- CONST, Variable::NORMAL, kCreatedInitialized);
- }
+ if (IsConciseMethod(function_kind_) || IsClassConstructor(function_kind_) ||
+ IsAccessorFunction(function_kind_)) {
+ this_function_ =
+ Declare(zone(), this, ast_value_factory->this_function_string(), CONST,
+ Variable::NORMAL, kCreatedInitialized);
}
}
+Variable* DeclarationScope::DeclareFunctionVar(const AstRawString* name) {
+ DCHECK(is_function_scope());
+ DCHECK_NULL(function_);
+ VariableMode mode = is_strict(language_mode()) ? CONST : CONST_LEGACY;
+ function_ = new (zone())
+ Variable(this, name, mode, Variable::NORMAL, kCreatedInitialized);
+ return function_;
+}
Scope* Scope::FinalizeBlockScope() {
DCHECK(is_block_scope());
- DCHECK(temps_.is_empty());
- DCHECK(params_.is_empty());
- if (num_var_or_const() > 0 ||
+ if (variables_.occupancy() > 0 ||
(is_declaration_scope() && calls_sloppy_eval())) {
return this;
}
@@ -348,27 +493,89 @@ Scope* Scope::FinalizeBlockScope() {
outer_scope()->RemoveInnerScope(this);
// Reparent inner scopes.
- for (int i = 0; i < inner_scopes_.length(); i++) {
- outer_scope()->AddInnerScope(inner_scopes_[i]);
+ if (inner_scope_ != nullptr) {
+ Scope* scope = inner_scope_;
+ scope->outer_scope_ = outer_scope();
+ while (scope->sibling_ != nullptr) {
+ scope = scope->sibling_;
+ scope->outer_scope_ = outer_scope();
+ }
+ scope->sibling_ = outer_scope()->inner_scope_;
+ outer_scope()->inner_scope_ = inner_scope_;
+ inner_scope_ = nullptr;
}
// Move unresolved variables
- for (int i = 0; i < unresolved_.length(); i++) {
- outer_scope()->unresolved_.Add(unresolved_[i], zone());
+ if (unresolved_ != nullptr) {
+ if (outer_scope()->unresolved_ != nullptr) {
+ VariableProxy* unresolved = unresolved_;
+ while (unresolved->next_unresolved() != nullptr) {
+ unresolved = unresolved->next_unresolved();
+ }
+ unresolved->set_next_unresolved(outer_scope()->unresolved_);
+ }
+ outer_scope()->unresolved_ = unresolved_;
+ unresolved_ = nullptr;
}
PropagateUsageFlagsToScope(outer_scope_);
-
+ // This block does not need a context.
+ num_heap_slots_ = 0;
return NULL;
}
+void Scope::Snapshot::Reparent(DeclarationScope* new_parent) const {
+ DCHECK_EQ(new_parent, outer_scope_->inner_scope_);
+ DCHECK_EQ(new_parent->outer_scope_, outer_scope_);
+ DCHECK_EQ(new_parent, new_parent->GetClosureScope());
+ DCHECK_NULL(new_parent->inner_scope_);
+ DCHECK_NULL(new_parent->unresolved_);
+ DCHECK_EQ(0, new_parent->temps()->length());
+ Scope* inner_scope = new_parent->sibling_;
+ if (inner_scope != top_inner_scope_) {
+ for (; inner_scope->sibling() != top_inner_scope_;
+ inner_scope = inner_scope->sibling()) {
+ inner_scope->outer_scope_ = new_parent;
+ DCHECK_NE(inner_scope, new_parent);
+ }
+ inner_scope->outer_scope_ = new_parent;
+
+ new_parent->inner_scope_ = new_parent->sibling_;
+ inner_scope->sibling_ = nullptr;
+ // Reset the sibling rather than the inner_scope_ since we
+ // want to keep new_parent there.
+ new_parent->sibling_ = top_inner_scope_;
+ }
+
+ if (outer_scope_->unresolved_ != top_unresolved_) {
+ VariableProxy* last = outer_scope_->unresolved_;
+ while (last->next_unresolved() != top_unresolved_) {
+ last = last->next_unresolved();
+ }
+ last->set_next_unresolved(nullptr);
+ new_parent->unresolved_ = outer_scope_->unresolved_;
+ outer_scope_->unresolved_ = top_unresolved_;
+ }
+
+ if (outer_scope_->GetClosureScope()->temps()->length() != top_temp_) {
+ ZoneList<Variable*>* temps = outer_scope_->GetClosureScope()->temps();
+ for (int i = top_temp_; i < temps->length(); i++) {
+ Variable* temp = temps->at(i);
+ DCHECK_EQ(temp->scope(), temp->scope()->GetClosureScope());
+ DCHECK_NE(temp->scope(), new_parent);
+ temp->set_scope(new_parent);
+ new_parent->AddTemporary(temp);
+ }
+ temps->Rewind(top_temp_);
+ }
+}
void Scope::ReplaceOuterScope(Scope* outer) {
DCHECK_NOT_NULL(outer);
DCHECK_NOT_NULL(outer_scope_);
- DCHECK(!already_resolved());
- DCHECK(!outer->already_resolved());
- DCHECK(!outer_scope_->already_resolved());
+ DCHECK(!already_resolved_);
+ DCHECK(!outer->already_resolved_);
+ DCHECK(!outer_scope_->already_resolved_);
outer_scope_->RemoveInnerScope(this);
outer->AddInnerScope(this);
outer_scope_ = outer;
@@ -377,10 +584,8 @@ void Scope::ReplaceOuterScope(Scope* outer) {
void Scope::PropagateUsageFlagsToScope(Scope* other) {
DCHECK_NOT_NULL(other);
- DCHECK(!already_resolved());
- DCHECK(!other->already_resolved());
- if (uses_arguments()) other->RecordArgumentsUsage();
- if (uses_super_property()) other->RecordSuperPropertyUsage();
+ DCHECK(!already_resolved_);
+ DCHECK(!other->already_resolved_);
if (calls_eval()) other->RecordEvalCall();
}
@@ -396,7 +601,7 @@ Variable* Scope::LookupLocal(const AstRawString* name) {
// it's ok to get the Handle<String> here.
// If we have a serialized scope info, we might find the variable there.
// There should be no local slot with the given name.
- DCHECK(scope_info_->StackSlotIndex(*name_handle) < 0 || is_block_scope());
+ DCHECK(scope_info_->StackSlotIndex(*name_handle) < 0);
// Check context slot lookup.
VariableMode mode;
@@ -436,32 +641,26 @@ Variable* Scope::LookupLocal(const AstRawString* name) {
// TODO(marja, rossberg): Correctly declare FUNCTION, CLASS, NEW_TARGET, and
// ARGUMENTS bindings as their corresponding Variable::Kind.
- Variable* var = variables_.Declare(this, name, mode, kind, init_flag,
+ Variable* var = variables_.Declare(zone(), this, name, mode, kind, init_flag,
maybe_assigned_flag);
var->AllocateTo(location, index);
return var;
}
-
-Variable* Scope::LookupFunctionVar(const AstRawString* name,
- AstNodeFactory* factory) {
- if (function_ != NULL && function_->proxy()->raw_name() == name) {
- return function_->proxy()->var();
+Variable* DeclarationScope::LookupFunctionVar(const AstRawString* name) {
+ if (function_ != nullptr && function_->raw_name() == name) {
+ return function_;
} else if (!scope_info_.is_null()) {
// If we are backed by a scope info, try to lookup the variable there.
VariableMode mode;
int index = scope_info_->FunctionContextSlotIndex(*(name->string()), &mode);
- if (index < 0) return NULL;
- Variable* var = new (zone())
- Variable(this, name, mode, Variable::NORMAL, kCreatedInitialized);
- VariableProxy* proxy = factory->NewVariableProxy(var);
- VariableDeclaration* declaration = factory->NewVariableDeclaration(
- proxy, mode, this, RelocInfo::kNoPosition);
- DeclareFunctionVar(declaration);
+ if (index < 0) return nullptr;
+ Variable* var = DeclareFunctionVar(name);
+ DCHECK_EQ(mode, var->mode());
var->AllocateTo(VariableLocation::CONTEXT, index);
return var;
} else {
- return NULL;
+ return nullptr;
}
}
@@ -476,74 +675,73 @@ Variable* Scope::Lookup(const AstRawString* name) {
return NULL;
}
-
-Variable* Scope::DeclareParameter(
- const AstRawString* name, VariableMode mode,
- bool is_optional, bool is_rest, bool* is_duplicate) {
- DCHECK(!already_resolved());
+Variable* DeclarationScope::DeclareParameter(
+ const AstRawString* name, VariableMode mode, bool is_optional, bool is_rest,
+ bool* is_duplicate, AstValueFactory* ast_value_factory) {
+ DCHECK(!already_resolved_);
DCHECK(is_function_scope());
DCHECK(!is_optional || !is_rest);
Variable* var;
if (mode == TEMPORARY) {
var = NewTemporary(name);
} else {
- var = variables_.Declare(this, name, mode, Variable::NORMAL,
- kCreatedInitialized);
+ var = Declare(zone(), this, name, mode, Variable::NORMAL,
+ kCreatedInitialized);
// TODO(wingo): Avoid O(n^2) check.
*is_duplicate = IsDeclaredParameter(name);
}
if (!is_optional && !is_rest && arity_ == params_.length()) {
++arity_;
}
- if (is_rest) {
- DCHECK_NULL(rest_parameter_);
- rest_parameter_ = var;
- rest_index_ = num_parameters();
- }
+ if (is_rest) rest_index_ = num_parameters();
params_.Add(var, zone());
+ if (name == ast_value_factory->arguments_string()) {
+ has_arguments_parameter_ = true;
+ }
return var;
}
Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
InitializationFlag init_flag, Variable::Kind kind,
MaybeAssignedFlag maybe_assigned_flag) {
- DCHECK(!already_resolved());
+ DCHECK(!already_resolved_);
// This function handles VAR, LET, and CONST modes. DYNAMIC variables are
// introduced during variable allocation, and TEMPORARY variables are
// allocated via NewTemporary().
DCHECK(IsDeclaredVariableMode(mode));
- ++num_var_or_const_;
- return variables_.Declare(this, name, mode, kind, init_flag,
- maybe_assigned_flag);
+ return Declare(zone(), this, name, mode, kind, init_flag,
+ maybe_assigned_flag);
}
-
-Variable* Scope::DeclareDynamicGlobal(const AstRawString* name) {
+Variable* DeclarationScope::DeclareDynamicGlobal(const AstRawString* name,
+ Variable::Kind kind) {
DCHECK(is_script_scope());
- return variables_.Declare(this,
- name,
- DYNAMIC_GLOBAL,
- Variable::NORMAL,
- kCreatedInitialized);
+ return Declare(zone(), this, name, DYNAMIC_GLOBAL, kind, kCreatedInitialized);
}
bool Scope::RemoveUnresolved(VariableProxy* var) {
- // Most likely (always?) any variable we want to remove
- // was just added before, so we search backwards.
- for (int i = unresolved_.length(); i-- > 0;) {
- if (unresolved_[i] == var) {
- unresolved_.Remove(i);
+ if (unresolved_ == var) {
+ unresolved_ = var->next_unresolved();
+ var->set_next_unresolved(nullptr);
+ return true;
+ }
+ VariableProxy* current = unresolved_;
+ while (current != nullptr) {
+ VariableProxy* next = current->next_unresolved();
+ if (var == next) {
+ current->set_next_unresolved(next->next_unresolved());
+ var->set_next_unresolved(nullptr);
return true;
}
+ current = next;
}
return false;
}
Variable* Scope::NewTemporary(const AstRawString* name) {
- DCHECK(!already_resolved());
- Scope* scope = this->ClosureScope();
+ DeclarationScope* scope = GetClosureScope();
Variable* var = new(zone()) Variable(scope,
name,
TEMPORARY,
@@ -553,21 +751,8 @@ Variable* Scope::NewTemporary(const AstRawString* name) {
return var;
}
-
-bool Scope::RemoveTemporary(Variable* var) {
- // Most likely (always?) any temporary variable we want to remove
- // was just added before, so we search backwards.
- for (int i = temps_.length(); i-- > 0;) {
- if (temps_[i] == var) {
- temps_.Remove(i);
- return true;
- }
- }
- return false;
-}
-
-
void Scope::AddDeclaration(Declaration* declaration) {
+ DCHECK(!already_resolved_);
decls_.Add(declaration, zone());
}
@@ -576,12 +761,8 @@ Declaration* Scope::CheckConflictingVarDeclarations() {
int length = decls_.length();
for (int i = 0; i < length; i++) {
Declaration* decl = decls_[i];
- // We don't create a separate scope to hold the function name of a function
- // expression, so we have to make sure not to consider it when checking for
- // conflicts (since it's conceptually "outside" the declaration scope).
- if (is_function_scope() && decl == function()) continue;
- if (IsLexicalVariableMode(decl->mode()) && !is_block_scope()) continue;
- const AstRawString* name = decl->proxy()->raw_name();
+ VariableMode mode = decl->proxy()->var()->mode();
+ if (IsLexicalVariableMode(mode) && !is_block_scope()) continue;
// Iterate through all scopes until and including the declaration scope.
Scope* previous = NULL;
@@ -590,10 +771,11 @@ Declaration* Scope::CheckConflictingVarDeclarations() {
// captured in Parser::Declare. The only conflicts we still need to check
// are lexical vs VAR, or any declarations within a declaration block scope
// vs lexical declarations in its surrounding (function) scope.
- if (IsLexicalVariableMode(decl->mode())) current = current->outer_scope_;
+ if (IsLexicalVariableMode(mode)) current = current->outer_scope_;
do {
// There is a conflict if there exists a non-VAR binding.
- Variable* other_var = current->variables_.Lookup(name);
+ Variable* other_var =
+ current->variables_.Lookup(decl->proxy()->raw_name());
if (other_var != NULL && IsLexicalVariableMode(other_var->mode())) {
return decl;
}
@@ -604,20 +786,25 @@ Declaration* Scope::CheckConflictingVarDeclarations() {
return NULL;
}
-
-class VarAndOrder {
- public:
- VarAndOrder(Variable* var, int order) : var_(var), order_(order) { }
- Variable* var() const { return var_; }
- int order() const { return order_; }
- static int Compare(const VarAndOrder* a, const VarAndOrder* b) {
- return a->order_ - b->order_;
+Declaration* Scope::CheckLexDeclarationsConflictingWith(
+ const ZoneList<const AstRawString*>& names) {
+ DCHECK(is_block_scope());
+ for (int i = 0; i < names.length(); ++i) {
+ Variable* var = LookupLocal(names.at(i));
+ if (var != nullptr) {
+ // Conflict; find and return its declaration.
+ DCHECK(IsLexicalVariableMode(var->mode()));
+ const AstRawString* name = names.at(i);
+ for (int j = 0; j < decls_.length(); ++j) {
+ if (decls_[j]->proxy()->raw_name() == name) {
+ return decls_[j];
+ }
+ }
+ DCHECK(false);
+ }
}
-
- private:
- Variable* var_;
- int order_;
-};
+ return nullptr;
+}
void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
ZoneList<Variable*>* context_locals,
@@ -628,34 +815,25 @@ void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
// Collect temporaries which are always allocated on the stack, unless the
// context as a whole has forced context allocation.
- for (int i = 0; i < temps_.length(); i++) {
- Variable* var = temps_[i];
- if (var->is_used()) {
- if (var->IsContextSlot()) {
- DCHECK(has_forced_context_allocation());
- context_locals->Add(var, zone());
- } else if (var->IsStackLocal()) {
- stack_locals->Add(var, zone());
- } else {
- DCHECK(var->IsParameter());
+ if (is_declaration_scope()) {
+ ZoneList<Variable*>* temps = AsDeclarationScope()->temps();
+ for (int i = 0; i < temps->length(); i++) {
+ Variable* var = (*temps)[i];
+ if (var->is_used()) {
+ if (var->IsContextSlot()) {
+ DCHECK(has_forced_context_allocation());
+ context_locals->Add(var, zone());
+ } else if (var->IsStackLocal()) {
+ stack_locals->Add(var, zone());
+ } else {
+ DCHECK(var->IsParameter());
+ }
}
}
}
- // Collect declared local variables.
- ZoneList<VarAndOrder> vars(variables_.occupancy(), zone());
- for (VariableMap::Entry* p = variables_.Start();
- p != NULL;
- p = variables_.Next(p)) {
- Variable* var = reinterpret_cast<Variable*>(p->value);
- if (var->is_used()) {
- vars.Add(VarAndOrder(var, p->order), zone());
- }
- }
- vars.Sort(VarAndOrder::Compare);
- int var_count = vars.length();
- for (int i = 0; i < var_count; i++) {
- Variable* var = vars[i].var();
+ for (int i = 0; i < ordered_variables_.length(); i++) {
+ Variable* var = ordered_variables_[i];
if (var->IsStackLocal()) {
stack_locals->Add(var, zone());
} else if (var->IsContextSlot()) {
@@ -666,49 +844,16 @@ void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
}
}
-
-bool Scope::AllocateVariables(ParseInfo* info, AstNodeFactory* factory) {
+void DeclarationScope::AllocateVariables(ParseInfo* info,
+ AstNodeFactory* factory) {
// 1) Propagate scope information.
- bool outer_scope_calls_sloppy_eval = false;
- if (outer_scope_ != NULL) {
- outer_scope_calls_sloppy_eval =
- outer_scope_->outer_scope_calls_sloppy_eval() |
- outer_scope_->calls_sloppy_eval();
- }
- PropagateScopeInfo(outer_scope_calls_sloppy_eval);
+ PropagateScopeInfo();
// 2) Resolve variables.
- if (!ResolveVariablesRecursively(info, factory)) return false;
+ ResolveVariablesRecursively(info, factory);
// 3) Allocate variables.
- AllocateVariablesRecursively(info->isolate());
-
- return true;
-}
-
-
-bool Scope::HasTrivialContext() const {
- // A function scope has a trivial context if it always is the global
- // context. We iteratively scan out the context chain to see if
- // there is anything that makes this scope non-trivial; otherwise we
- // return true.
- for (const Scope* scope = this; scope != NULL; scope = scope->outer_scope_) {
- if (scope->is_eval_scope()) return false;
- if (scope->scope_inside_with_) return false;
- if (scope->ContextLocalCount() > 0) return false;
- if (scope->ContextGlobalCount() > 0) return false;
- }
- return true;
-}
-
-
-bool Scope::HasTrivialOuterContext() const {
- Scope* outer = outer_scope_;
- if (outer == NULL) return true;
- // Note that the outer context may be trivial in general, but the current
- // scope may be inside a 'with' statement in which case the outer context
- // for this scope is not trivial.
- return !scope_inside_with_ && outer->HasTrivialContext();
+ AllocateVariablesRecursively();
}
@@ -716,35 +861,52 @@ bool Scope::AllowsLazyParsing() const {
// If we are inside a block scope, we must parse eagerly to find out how
// to allocate variables on the block scope. At this point, declarations may
// not have yet been parsed.
- for (const Scope* scope = this; scope != NULL; scope = scope->outer_scope_) {
- if (scope->is_block_scope()) return false;
+ for (const Scope* s = this; s != nullptr; s = s->outer_scope_) {
+ if (s->is_block_scope()) return false;
}
- return AllowsLazyCompilation();
+ return true;
}
-
-bool Scope::AllowsLazyCompilation() const { return !force_eager_compilation_; }
-
-
-bool Scope::AllowsLazyCompilationWithoutContext() const {
- return !force_eager_compilation_ && HasTrivialOuterContext();
+bool DeclarationScope::AllowsLazyCompilation() const {
+ return !force_eager_compilation_;
}
+bool DeclarationScope::AllowsLazyCompilationWithoutContext() const {
+ if (force_eager_compilation_) return false;
+ // Disallow lazy compilation without context if any outer scope needs a
+ // context.
+ for (const Scope* scope = outer_scope_; scope != nullptr;
+ scope = scope->outer_scope_) {
+ if (scope->NeedsContext()) return false;
+ }
+ return true;
+}
-int Scope::ContextChainLength(Scope* scope) {
+int Scope::ContextChainLength(Scope* scope) const {
int n = 0;
- for (Scope* s = this; s != scope; s = s->outer_scope_) {
+ for (const Scope* s = this; s != scope; s = s->outer_scope_) {
DCHECK(s != NULL); // scope must be in the scope chain
if (s->NeedsContext()) n++;
}
return n;
}
+int Scope::ContextChainLengthUntilOutermostSloppyEval() const {
+ int result = 0;
+ int length = 0;
+
+ for (const Scope* s = this; s != nullptr; s = s->outer_scope()) {
+ if (!s->NeedsContext()) continue;
+ length++;
+ if (s->calls_sloppy_eval()) result = length;
+ }
+
+ return result;
+}
int Scope::MaxNestedContextChainLength() {
int max_context_chain_length = 0;
- for (int i = 0; i < inner_scopes_.length(); i++) {
- Scope* scope = inner_scopes_[i];
+ for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
max_context_chain_length = std::max(scope->MaxNestedContextChainLength(),
max_context_chain_length);
}
@@ -754,32 +916,30 @@ int Scope::MaxNestedContextChainLength() {
return max_context_chain_length;
}
-
-Scope* Scope::DeclarationScope() {
+DeclarationScope* Scope::GetDeclarationScope() {
Scope* scope = this;
while (!scope->is_declaration_scope()) {
scope = scope->outer_scope();
}
- return scope;
+ return scope->AsDeclarationScope();
}
-
-Scope* Scope::ClosureScope() {
+DeclarationScope* Scope::GetClosureScope() {
Scope* scope = this;
while (!scope->is_declaration_scope() || scope->is_block_scope()) {
scope = scope->outer_scope();
}
- return scope;
+ return scope->AsDeclarationScope();
}
-
-Scope* Scope::ReceiverScope() {
+DeclarationScope* Scope::GetReceiverScope() {
Scope* scope = this;
while (!scope->is_script_scope() &&
- (!scope->is_function_scope() || scope->is_arrow_scope())) {
+ (!scope->is_function_scope() ||
+ scope->AsDeclarationScope()->is_arrow_scope())) {
scope = scope->outer_scope();
}
- return scope;
+ return scope->AsDeclarationScope();
}
@@ -791,38 +951,50 @@ Handle<ScopeInfo> Scope::GetScopeInfo(Isolate* isolate) {
return scope_info_;
}
-Handle<StringSet> Scope::CollectNonLocals(Handle<StringSet> non_locals) {
- // Collect non-local variables referenced in the scope.
- // TODO(yangguo): store non-local variables explicitly if we can no longer
- // rely on unresolved_ to find them.
- for (int i = 0; i < unresolved_.length(); i++) {
- VariableProxy* proxy = unresolved_[i];
- if (proxy->is_resolved() && proxy->var()->IsStackAllocated()) continue;
- Handle<String> name = proxy->name();
- non_locals = StringSet::Add(non_locals, name);
- }
- for (int i = 0; i < inner_scopes_.length(); i++) {
- non_locals = inner_scopes_[i]->CollectNonLocals(non_locals);
+Handle<StringSet> DeclarationScope::CollectNonLocals(
+ ParseInfo* info, Handle<StringSet> non_locals) {
+ VariableProxy* free_variables = FetchFreeVariables(this, info);
+ for (VariableProxy* proxy = free_variables; proxy != nullptr;
+ proxy = proxy->next_unresolved()) {
+ non_locals = StringSet::Add(non_locals, proxy->name());
}
return non_locals;
}
-
-void Scope::ReportMessage(int start_position, int end_position,
- MessageTemplate::Template message,
- const AstRawString* arg) {
- // Propagate the error to the topmost scope targeted by this scope analysis
- // phase.
- Scope* top = this;
- while (!top->is_script_scope() && !top->outer_scope()->already_resolved()) {
- top = top->outer_scope();
- }
-
- top->pending_error_handler_.ReportMessageAt(start_position, end_position,
- message, arg, kReferenceError);
+void DeclarationScope::AnalyzePartially(DeclarationScope* migrate_to,
+ AstNodeFactory* ast_node_factory) {
+ // Gather info from inner scopes.
+ PropagateScopeInfo();
+
+ // Try to resolve unresolved variables for this Scope and migrate those which
+ // cannot be resolved inside. It doesn't make sense to try to resolve them in
+ // the outer Scopes here, because they are incomplete.
+ for (VariableProxy* proxy = FetchFreeVariables(this); proxy != nullptr;
+ proxy = proxy->next_unresolved()) {
+ DCHECK(!proxy->is_resolved());
+ VariableProxy* copy = ast_node_factory->CopyVariableProxy(proxy);
+ migrate_to->AddUnresolved(copy);
+ }
+
+ // Push scope data up to migrate_to. Note that migrate_to and this Scope
+ // describe the same Scope, just in different Zones.
+ PropagateUsageFlagsToScope(migrate_to);
+ if (scope_uses_super_property_) migrate_to->scope_uses_super_property_ = true;
+ if (inner_scope_calls_eval_) migrate_to->inner_scope_calls_eval_ = true;
+ DCHECK(!force_eager_compilation_);
+ migrate_to->set_start_position(start_position_);
+ migrate_to->set_end_position(end_position_);
+ migrate_to->set_language_mode(language_mode());
+ migrate_to->arity_ = arity_;
+ migrate_to->force_context_allocation_ = force_context_allocation_;
+ outer_scope_->RemoveInnerScope(this);
+ DCHECK_EQ(outer_scope_, migrate_to->outer_scope_);
+ DCHECK_EQ(outer_scope_->zone(), migrate_to->zone());
+ DCHECK_EQ(NeedsHomeObject(), migrate_to->NeedsHomeObject());
+ DCHECK_EQ(asm_function_, migrate_to->asm_function_);
+ DCHECK_EQ(arguments() != nullptr, migrate_to->arguments() != nullptr);
}
-
#ifdef DEBUG
static const char* Header(ScopeType scope_type, FunctionKind function_kind,
bool is_declaration_scope) {
@@ -830,7 +1002,10 @@ static const char* Header(ScopeType scope_type, FunctionKind function_kind,
case EVAL_SCOPE: return "eval";
// TODO(adamk): Should we print concise method scopes specially?
case FUNCTION_SCOPE:
- return IsArrowFunction(function_kind) ? "arrow" : "function";
+ if (IsGeneratorFunction(function_kind)) return "function*";
+ if (IsAsyncFunction(function_kind)) return "async function";
+ if (IsArrowFunction(function_kind)) return "arrow";
+ return "function";
case MODULE_SCOPE: return "module";
case SCRIPT_SCOPE: return "global";
case CATCH_SCOPE: return "catch";
@@ -871,6 +1046,9 @@ static void PrintLocation(Variable* var) {
case VariableLocation::LOOKUP:
PrintF("lookup");
break;
+ case VariableLocation::MODULE:
+ PrintF("module");
+ break;
}
}
@@ -911,55 +1089,58 @@ static void PrintMap(int indent, VariableMap* map) {
}
}
+void DeclarationScope::PrintParameters() {
+ PrintF(" (");
+ for (int i = 0; i < params_.length(); i++) {
+ if (i > 0) PrintF(", ");
+ const AstRawString* name = params_[i]->raw_name();
+ if (name->IsEmpty())
+ PrintF(".%p", reinterpret_cast<void*>(params_[i]));
+ else
+ PrintName(name);
+ }
+ PrintF(")");
+}
void Scope::Print(int n) {
int n0 = (n > 0 ? n : 0);
int n1 = n0 + 2; // indentation
// Print header.
- Indent(n0, Header(scope_type_, function_kind_, is_declaration_scope()));
+ FunctionKind function_kind = is_function_scope()
+ ? AsDeclarationScope()->function_kind()
+ : kNormalFunction;
+ Indent(n0, Header(scope_type_, function_kind, is_declaration_scope()));
if (scope_name_ != nullptr && !scope_name_->IsEmpty()) {
PrintF(" ");
PrintName(scope_name_);
}
// Print parameters, if any.
+ Variable* function = nullptr;
if (is_function_scope()) {
- PrintF(" (");
- for (int i = 0; i < params_.length(); i++) {
- if (i > 0) PrintF(", ");
- const AstRawString* name = params_[i]->raw_name();
- if (name->IsEmpty())
- PrintF(".%p", reinterpret_cast<void*>(params_[i]));
- else
- PrintName(name);
- }
- PrintF(")");
+ AsDeclarationScope()->PrintParameters();
+ function = AsDeclarationScope()->function_var();
}
PrintF(" { // (%d, %d)\n", start_position(), end_position());
// Function name, if any (named function literals, only).
- if (function_ != NULL) {
+ if (function != nullptr) {
Indent(n1, "// (local) function name: ");
- PrintName(function_->proxy()->raw_name());
+ PrintName(function->raw_name());
PrintF("\n");
}
// Scope info.
- if (HasTrivialOuterContext()) {
- Indent(n1, "// scope has trivial outer context\n");
- }
if (is_strict(language_mode())) {
Indent(n1, "// strict mode scope\n");
}
- if (scope_inside_with_) Indent(n1, "// scope inside 'with'\n");
+ if (IsAsmModule()) Indent(n1, "// scope is an asm module\n");
+ if (IsAsmFunction()) Indent(n1, "// scope is an asm function\n");
if (scope_calls_eval_) Indent(n1, "// scope calls 'eval'\n");
- if (scope_uses_arguments_) Indent(n1, "// scope uses 'arguments'\n");
- if (scope_uses_super_property_)
+ if (is_declaration_scope() && AsDeclarationScope()->uses_super_property()) {
Indent(n1, "// scope uses 'super' property\n");
- if (outer_scope_calls_sloppy_eval_) {
- Indent(n1, "// outer scope calls 'eval' in sloppy context\n");
}
if (inner_scope_calls_eval_) Indent(n1, "// inner scope calls 'eval'\n");
if (num_stack_slots_ > 0) {
@@ -973,15 +1154,20 @@ void Scope::Print(int n) {
}
// Print locals.
- if (function_ != NULL) {
+ if (function != nullptr) {
Indent(n1, "// function var:\n");
- PrintVar(n1, function_->proxy()->var());
+ PrintVar(n1, function);
}
- if (temps_.length() > 0) {
- Indent(n1, "// temporary vars:\n");
- for (int i = 0; i < temps_.length(); i++) {
- PrintVar(n1, temps_[i]);
+ if (is_declaration_scope()) {
+ bool printed_header = false;
+ ZoneList<Variable*>* temps = AsDeclarationScope()->temps();
+ for (int i = 0; i < temps->length(); i++) {
+ if (!printed_header) {
+ printed_header = true;
+ Indent(n1, "// temporary vars:\n");
+ }
+ PrintVar(n1, (*temps)[i]);
}
}
@@ -999,16 +1185,34 @@ void Scope::Print(int n) {
// Print inner scopes (disable by providing negative n).
if (n >= 0) {
- for (int i = 0; i < inner_scopes_.length(); i++) {
+ for (Scope* scope = inner_scope_; scope != nullptr;
+ scope = scope->sibling_) {
PrintF("\n");
- inner_scopes_[i]->Print(n1);
+ scope->Print(n1);
}
}
Indent(n0, "}\n");
}
-#endif // DEBUG
+void Scope::CheckScopePositions() {
+ // A scope is allowed to have invalid positions if it is hidden and has no
+ // inner scopes
+ if (!is_hidden() && inner_scope_ == nullptr) {
+ CHECK_NE(kNoSourcePosition, start_position());
+ CHECK_NE(kNoSourcePosition, end_position());
+ }
+ for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
+ scope->CheckScopePositions();
+ }
+}
+
+void Scope::CheckZones() {
+ for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
+ CHECK_EQ(scope->zone(), zone());
+ }
+}
+#endif // DEBUG
Variable* Scope::NonLocal(const AstRawString* name, VariableMode mode) {
if (dynamics_ == NULL) dynamics_ = new (zone()) DynamicScopePart(zone());
@@ -1016,101 +1220,122 @@ Variable* Scope::NonLocal(const AstRawString* name, VariableMode mode) {
Variable* var = map->Lookup(name);
if (var == NULL) {
// Declare a new non-local.
- InitializationFlag init_flag = (mode == VAR)
- ? kCreatedInitialized : kNeedsInitialization;
- var = map->Declare(NULL,
- name,
- mode,
- Variable::NORMAL,
- init_flag);
+ DCHECK(!IsLexicalVariableMode(mode));
+ var = map->Declare(zone(), NULL, name, mode, Variable::NORMAL,
+ kCreatedInitialized);
// Allocate it by giving it a dynamic lookup.
var->AllocateTo(VariableLocation::LOOKUP, -1);
}
return var;
}
-
Variable* Scope::LookupRecursive(VariableProxy* proxy,
BindingKind* binding_kind,
- AstNodeFactory* factory) {
- DCHECK(binding_kind != NULL);
- if (already_resolved() && is_with_scope()) {
- // Short-cut: if the scope is deserialized from a scope info, variable
- // allocation is already fixed. We can simply return with dynamic lookup.
+ AstNodeFactory* factory,
+ Scope* outer_scope_end) {
+ DCHECK_NE(outer_scope_end, this);
+ DCHECK_NOT_NULL(binding_kind);
+ DCHECK_EQ(UNBOUND, *binding_kind);
+ // Short-cut: whenever we find a debug-evaluate scope, just look everything up
+ // dynamically. Debug-evaluate doesn't properly create scope info for the
+ // lookups it does. It may not have a valid 'this' declaration, and anything
+ // accessed through debug-evaluate might invalidly resolve to stack-allocated
+ // variables.
+ // TODO(yangguo): Remove once debug-evaluate creates proper ScopeInfo for the
+ // scopes in which it's evaluating.
+ if (is_debug_evaluate_scope_) {
*binding_kind = DYNAMIC_LOOKUP;
- return NULL;
+ return nullptr;
}
// Try to find the variable in this scope.
Variable* var = LookupLocal(proxy->raw_name());
- // We found a variable and we are done. (Even if there is an 'eval' in
- // this scope which introduces the same variable again, the resulting
- // variable remains the same.)
- if (var != NULL) {
+ // We found a variable and we are done. (Even if there is an 'eval' in this
+ // scope which introduces the same variable again, the resulting variable
+ // remains the same.)
+ if (var != nullptr) {
*binding_kind = BOUND;
return var;
}
- // We did not find a variable locally. Check against the function variable,
- // if any. We can do this for all scopes, since the function variable is
- // only present - if at all - for function scopes.
- *binding_kind = UNBOUND;
- var = LookupFunctionVar(proxy->raw_name(), factory);
- if (var != NULL) {
- *binding_kind = BOUND;
- } else if (outer_scope_ != NULL) {
- var = outer_scope_->LookupRecursive(proxy, binding_kind, factory);
- if (*binding_kind == BOUND && (is_function_scope() || is_with_scope())) {
+ // We did not find a variable locally. Check against the function variable, if
+ // any.
+ if (is_function_scope()) {
+ var = AsDeclarationScope()->LookupFunctionVar(proxy->raw_name());
+ if (var != nullptr) {
+ *binding_kind = calls_sloppy_eval() ? BOUND_EVAL_SHADOWED : BOUND;
+ return var;
+ }
+ }
+
+ if (outer_scope_ != outer_scope_end) {
+ var = outer_scope_->LookupRecursive(proxy, binding_kind, factory,
+ outer_scope_end);
+ if (*binding_kind == BOUND && is_function_scope()) {
var->ForceContextAllocation();
}
+ // "this" can't be shadowed by "eval"-introduced bindings or by "with"
+ // scopes.
+ // TODO(wingo): There are other variables in this category; add them.
+ if (var != nullptr && var->is_this()) return var;
+
+ if (is_with_scope()) {
+ // The current scope is a with scope, so the variable binding can not be
+ // statically resolved. However, note that it was necessary to do a lookup
+ // in the outer scope anyway, because if a binding exists in an outer
+ // scope, the associated variable has to be marked as potentially being
+ // accessed from inside of an inner with scope (the property may not be in
+ // the 'with' object).
+ if (var != nullptr && var->IsUnallocated()) {
+ DCHECK(!already_resolved_);
+ var->set_is_used();
+ var->ForceContextAllocation();
+ if (proxy->is_assigned()) var->set_maybe_assigned();
+ }
+ *binding_kind = DYNAMIC_LOOKUP;
+ return nullptr;
+ }
} else {
- DCHECK(is_script_scope());
- }
-
- // "this" can't be shadowed by "eval"-introduced bindings or by "with" scopes.
- // TODO(wingo): There are other variables in this category; add them.
- bool name_can_be_shadowed = var == nullptr || !var->is_this();
-
- if (is_with_scope() && name_can_be_shadowed) {
- DCHECK(!already_resolved());
- // The current scope is a with scope, so the variable binding can not be
- // statically resolved. However, note that it was necessary to do a lookup
- // in the outer scope anyway, because if a binding exists in an outer scope,
- // the associated variable has to be marked as potentially being accessed
- // from inside of an inner with scope (the property may not be in the 'with'
- // object).
- if (var != NULL && proxy->is_assigned()) var->set_maybe_assigned();
- *binding_kind = DYNAMIC_LOOKUP;
- return NULL;
- } else if (calls_sloppy_eval() && !is_script_scope() &&
- name_can_be_shadowed) {
+ DCHECK(!is_with_scope());
+ DCHECK(is_function_scope() || is_script_scope() || is_eval_scope());
+ }
+
+ if (calls_sloppy_eval() && is_declaration_scope() && !is_script_scope()) {
// A variable binding may have been found in an outer scope, but the current
- // scope makes a sloppy 'eval' call, so the found variable may not be
- // the correct one (the 'eval' may introduce a binding with the same name).
- // In that case, change the lookup result to reflect this situation.
+ // scope makes a sloppy 'eval' call, so the found variable may not be the
+ // correct one (the 'eval' may introduce a binding with the same name). In
+ // that case, change the lookup result to reflect this situation. Only
+ // scopes that can host var bindings (declaration scopes) need be considered
+ // here (this excludes block and catch scopes), and variable lookups at
+ // script scope are always dynamic.
if (*binding_kind == BOUND) {
*binding_kind = BOUND_EVAL_SHADOWED;
} else if (*binding_kind == UNBOUND) {
*binding_kind = UNBOUND_EVAL_SHADOWED;
}
}
+
return var;
}
-
-bool Scope::ResolveVariable(ParseInfo* info, VariableProxy* proxy,
+void Scope::ResolveVariable(ParseInfo* info, VariableProxy* proxy,
AstNodeFactory* factory) {
DCHECK(info->script_scope()->is_script_scope());
// If the proxy is already resolved there's nothing to do
// (functions and consts may be resolved by the parser).
- if (proxy->is_resolved()) return true;
+ if (proxy->is_resolved()) return;
// Otherwise, try to resolve the variable.
- BindingKind binding_kind;
+ BindingKind binding_kind = UNBOUND;
Variable* var = LookupRecursive(proxy, &binding_kind, factory);
+ ResolveTo(info, binding_kind, proxy, var);
+}
+
+void Scope::ResolveTo(ParseInfo* info, BindingKind binding_kind,
+ VariableProxy* proxy, Variable* var) {
#ifdef DEBUG
if (info->script_is_native()) {
// To avoid polluting the global object in native scripts
@@ -1154,7 +1379,8 @@ bool Scope::ResolveVariable(ParseInfo* info, VariableProxy* proxy,
case UNBOUND:
// No binding has been found. Declare a variable on the global object.
- var = info->script_scope()->DeclareDynamicGlobal(proxy->raw_name());
+ var = info->script_scope()->DeclareDynamicGlobal(proxy->raw_name(),
+ Variable::NORMAL);
break;
case UNBOUND_EVAL_SHADOWED:
@@ -1172,64 +1398,78 @@ bool Scope::ResolveVariable(ParseInfo* info, VariableProxy* proxy,
if (proxy->is_assigned()) var->set_maybe_assigned();
proxy->BindTo(var);
-
- return true;
}
-
-bool Scope::ResolveVariablesRecursively(ParseInfo* info,
+void Scope::ResolveVariablesRecursively(ParseInfo* info,
AstNodeFactory* factory) {
DCHECK(info->script_scope()->is_script_scope());
// Resolve unresolved variables for this scope.
- for (int i = 0; i < unresolved_.length(); i++) {
- if (!ResolveVariable(info, unresolved_[i], factory)) return false;
+ for (VariableProxy* proxy = unresolved_; proxy != nullptr;
+ proxy = proxy->next_unresolved()) {
+ ResolveVariable(info, proxy, factory);
}
// Resolve unresolved variables for inner scopes.
- for (int i = 0; i < inner_scopes_.length(); i++) {
- if (!inner_scopes_[i]->ResolveVariablesRecursively(info, factory))
- return false;
+ for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
+ scope->ResolveVariablesRecursively(info, factory);
+ }
+}
+
+VariableProxy* Scope::FetchFreeVariables(DeclarationScope* max_outer_scope,
+ ParseInfo* info,
+ VariableProxy* stack) {
+ for (VariableProxy *proxy = unresolved_, *next = nullptr; proxy != nullptr;
+ proxy = next) {
+ next = proxy->next_unresolved();
+ if (proxy->is_resolved()) continue;
+ // Note that we pass nullptr as AstNodeFactory: this phase should not create
+ // any new AstNodes, since none of the Scopes involved are backed up by
+ // ScopeInfo.
+ BindingKind binding_kind = UNBOUND;
+ Variable* var = LookupRecursive(proxy, &binding_kind, nullptr,
+ max_outer_scope->outer_scope());
+ if (var == nullptr) {
+ proxy->set_next_unresolved(stack);
+ stack = proxy;
+ } else if (info != nullptr) {
+ DCHECK_NE(UNBOUND, binding_kind);
+ DCHECK_NE(UNBOUND_EVAL_SHADOWED, binding_kind);
+ ResolveTo(info, binding_kind, proxy, var);
+ }
}
- return true;
-}
+ // Clear unresolved_ as it's in an inconsistent state.
+ unresolved_ = nullptr;
-
-void Scope::PropagateScopeInfo(bool outer_scope_calls_sloppy_eval ) {
- if (outer_scope_calls_sloppy_eval) {
- outer_scope_calls_sloppy_eval_ = true;
+ for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
+ stack = scope->FetchFreeVariables(max_outer_scope, info, stack);
}
- bool calls_sloppy_eval =
- this->calls_sloppy_eval() || outer_scope_calls_sloppy_eval_;
- for (int i = 0; i < inner_scopes_.length(); i++) {
- Scope* inner = inner_scopes_[i];
- inner->PropagateScopeInfo(calls_sloppy_eval);
- if (inner->scope_calls_eval_ || inner->inner_scope_calls_eval_) {
- inner_scope_calls_eval_ = true;
- }
- if (inner->force_eager_compilation_) {
- force_eager_compilation_ = true;
- }
- if (asm_module_ && inner->scope_type() == FUNCTION_SCOPE) {
- inner->asm_function_ = true;
+ return stack;
+}
+
+void Scope::PropagateScopeInfo() {
+ for (Scope* inner = inner_scope_; inner != nullptr; inner = inner->sibling_) {
+ inner->PropagateScopeInfo();
+ if (IsAsmModule() && inner->is_function_scope()) {
+ inner->AsDeclarationScope()->set_asm_function();
}
}
}
bool Scope::MustAllocate(Variable* var) {
+ DCHECK(var->location() != VariableLocation::MODULE);
// Give var a read/write use if there is a chance it might be accessed
// via an eval() call. This is only possible if the variable has a
// visible name.
if ((var->is_this() || !var->raw_name()->IsEmpty()) &&
- (var->has_forced_context_allocation() || scope_calls_eval_ ||
- inner_scope_calls_eval_ || is_catch_scope() || is_block_scope() ||
- is_module_scope() || is_script_scope())) {
+ (inner_scope_calls_eval_ || is_catch_scope() || is_script_scope())) {
var->set_is_used();
- if (scope_calls_eval_ || inner_scope_calls_eval_) var->set_maybe_assigned();
+ if (inner_scope_calls_eval_) var->set_maybe_assigned();
}
+ DCHECK(!var->has_forced_context_allocation() || var->is_used());
// Global variables do not need to be allocated.
return !var->IsGlobalObjectProperty() && var->is_used();
}
@@ -1247,27 +1487,15 @@ bool Scope::MustAllocateInContext(Variable* var) {
// always context-allocated.
if (has_forced_context_allocation()) return true;
if (var->mode() == TEMPORARY) return false;
- if (is_catch_scope() || is_module_scope()) return true;
+ if (is_catch_scope()) return true;
if (is_script_scope() && IsLexicalVariableMode(var->mode())) return true;
- return var->has_forced_context_allocation() || scope_calls_eval_ ||
- inner_scope_calls_eval_;
-}
-
-
-bool Scope::HasArgumentsParameter(Isolate* isolate) {
- for (int i = 0; i < params_.length(); i++) {
- if (params_[i]->name().is_identical_to(
- isolate->factory()->arguments_string())) {
- return true;
- }
- }
- return false;
+ return var->has_forced_context_allocation() || inner_scope_calls_eval_;
}
void Scope::AllocateStackSlot(Variable* var) {
if (is_block_scope()) {
- outer_scope()->DeclarationScope()->AllocateStackSlot(var);
+ outer_scope()->GetDeclarationScope()->AllocateStackSlot(var);
} else {
var->AllocateTo(VariableLocation::LOCAL, num_stack_slots_++);
}
@@ -1278,17 +1506,13 @@ void Scope::AllocateHeapSlot(Variable* var) {
var->AllocateTo(VariableLocation::CONTEXT, num_heap_slots_++);
}
-
-void Scope::AllocateParameterLocals(Isolate* isolate) {
+void DeclarationScope::AllocateParameterLocals() {
DCHECK(is_function_scope());
- Variable* arguments = LookupLocal(ast_value_factory_->arguments_string());
- // Functions have 'arguments' declared implicitly in all non arrow functions.
- DCHECK(arguments != nullptr || is_arrow_scope());
bool uses_sloppy_arguments = false;
- if (arguments != nullptr && MustAllocate(arguments) &&
- !HasArgumentsParameter(isolate)) {
+ // Functions have 'arguments' declared implicitly in all non arrow functions.
+ if (arguments_ != nullptr) {
// 'arguments' is used. Unless there is also a parameter called
// 'arguments', we must be conservative and allocate all parameters to
// the context assuming they will be captured by the arguments object.
@@ -1297,21 +1521,21 @@ void Scope::AllocateParameterLocals(Isolate* isolate) {
// that specific parameter value and cannot be used to access the
// parameters, which is why we don't need to allocate an arguments
// object in that case.
+ if (MustAllocate(arguments_) && !has_arguments_parameter_) {
+ // In strict mode 'arguments' does not alias formal parameters.
+ // Therefore in strict mode we allocate parameters as if 'arguments'
+ // were not used.
+ // If the parameter list is not simple, arguments isn't sloppy either.
+ uses_sloppy_arguments =
+ is_sloppy(language_mode()) && has_simple_parameters();
+ } else {
+ // 'arguments' is unused. Tell the code generator that it does not need to
+ // allocate the arguments object by nulling out arguments_.
+ arguments_ = nullptr;
+ }
- // We are using 'arguments'. Tell the code generator that is needs to
- // allocate the arguments object by setting 'arguments_'.
- arguments_ = arguments;
-
- // In strict mode 'arguments' does not alias formal parameters.
- // Therefore in strict mode we allocate parameters as if 'arguments'
- // were not used.
- // If the parameter list is not simple, arguments isn't sloppy either.
- uses_sloppy_arguments =
- is_sloppy(language_mode()) && has_simple_parameters();
- }
-
- if (rest_parameter_ && !MustAllocate(rest_parameter_)) {
- rest_parameter_ = NULL;
+ } else {
+ DCHECK(is_arrow_scope());
}
// The same parameter may occur multiple times in the parameters_ list.
@@ -1319,20 +1543,18 @@ void Scope::AllocateParameterLocals(Isolate* isolate) {
// receive the highest parameter index for that parameter; thus iteration
// order is relevant!
for (int i = params_.length() - 1; i >= 0; --i) {
+ if (i == rest_index_) continue;
Variable* var = params_[i];
- if (var == rest_parameter_) continue;
DCHECK(var->scope() == this);
- if (uses_sloppy_arguments || has_forced_context_allocation()) {
- // Force context allocation of the parameter.
+ if (uses_sloppy_arguments) {
var->ForceContextAllocation();
}
AllocateParameter(var, i);
}
}
-
-void Scope::AllocateParameter(Variable* var, int index) {
+void DeclarationScope::AllocateParameter(Variable* var, int index) {
if (MustAllocate(var)) {
if (MustAllocateInContext(var)) {
DCHECK(var->IsUnallocated() || var->IsContextSlot());
@@ -1350,23 +1572,15 @@ void Scope::AllocateParameter(Variable* var, int index) {
}
}
-
-void Scope::AllocateReceiver() {
+void DeclarationScope::AllocateReceiver() {
+ if (!has_this_declaration()) return;
DCHECK_NOT_NULL(receiver());
DCHECK_EQ(receiver()->scope(), this);
-
- if (has_forced_context_allocation()) {
- // Force context allocation of the receiver.
- receiver()->ForceContextAllocation();
- }
AllocateParameter(receiver(), -1);
}
-
-void Scope::AllocateNonParameterLocal(Isolate* isolate, Variable* var) {
+void Scope::AllocateNonParameterLocal(Variable* var) {
DCHECK(var->scope() == this);
- DCHECK(!var->IsVariable(isolate->factory()->dot_result_string()) ||
- !var->IsStackLocal());
if (var->IsUnallocated() && MustAllocate(var)) {
if (MustAllocateInContext(var)) {
AllocateHeapSlot(var);
@@ -1376,11 +1590,8 @@ void Scope::AllocateNonParameterLocal(Isolate* isolate, Variable* var) {
}
}
-
-void Scope::AllocateDeclaredGlobal(Isolate* isolate, Variable* var) {
+void Scope::AllocateDeclaredGlobal(Variable* var) {
DCHECK(var->scope() == this);
- DCHECK(!var->IsVariable(isolate->factory()->dot_result_string()) ||
- !var->IsStackLocal());
if (var->IsUnallocated()) {
if (var->IsStaticGlobalObjectProperty()) {
DCHECK_EQ(-1, var->index());
@@ -1394,78 +1605,89 @@ void Scope::AllocateDeclaredGlobal(Isolate* isolate, Variable* var) {
}
}
-
-void Scope::AllocateNonParameterLocalsAndDeclaredGlobals(Isolate* isolate) {
+void Scope::AllocateNonParameterLocalsAndDeclaredGlobals() {
// All variables that have no rewrite yet are non-parameter locals.
- for (int i = 0; i < temps_.length(); i++) {
- AllocateNonParameterLocal(isolate, temps_[i]);
+ if (is_declaration_scope()) {
+ ZoneList<Variable*>* temps = AsDeclarationScope()->temps();
+ for (int i = 0; i < temps->length(); i++) {
+ AllocateNonParameterLocal((*temps)[i]);
+ }
}
- ZoneList<VarAndOrder> vars(variables_.occupancy(), zone());
- for (VariableMap::Entry* p = variables_.Start();
- p != NULL;
- p = variables_.Next(p)) {
- Variable* var = reinterpret_cast<Variable*>(p->value);
- vars.Add(VarAndOrder(var, p->order), zone());
- }
- vars.Sort(VarAndOrder::Compare);
- int var_count = vars.length();
- for (int i = 0; i < var_count; i++) {
- AllocateNonParameterLocal(isolate, vars[i].var());
+ for (int i = 0; i < ordered_variables_.length(); i++) {
+ AllocateNonParameterLocal(ordered_variables_[i]);
}
if (FLAG_global_var_shortcuts) {
- for (int i = 0; i < var_count; i++) {
- AllocateDeclaredGlobal(isolate, vars[i].var());
+ for (int i = 0; i < ordered_variables_.length(); i++) {
+ AllocateDeclaredGlobal(ordered_variables_[i]);
}
}
+ if (is_declaration_scope()) {
+ AsDeclarationScope()->AllocateLocals();
+ }
+}
+
+void DeclarationScope::AllocateLocals() {
// For now, function_ must be allocated at the very end. If it gets
// allocated in the context, it must be the last slot in the context,
// because of the current ScopeInfo implementation (see
// ScopeInfo::ScopeInfo(FunctionScope* scope) constructor).
if (function_ != nullptr) {
- AllocateNonParameterLocal(isolate, function_->proxy()->var());
+ AllocateNonParameterLocal(function_);
}
- if (rest_parameter_ != nullptr) {
- AllocateNonParameterLocal(isolate, rest_parameter_);
- }
+ DCHECK(!has_rest_parameter() || !MustAllocate(params_[rest_index_]) ||
+ !params_[rest_index_]->IsUnallocated());
- Variable* new_target_var =
- LookupLocal(ast_value_factory_->new_target_string());
- if (new_target_var != nullptr && MustAllocate(new_target_var)) {
- new_target_ = new_target_var;
+ if (new_target_ != nullptr && !MustAllocate(new_target_)) {
+ new_target_ = nullptr;
}
- Variable* this_function_var =
- LookupLocal(ast_value_factory_->this_function_string());
- if (this_function_var != nullptr && MustAllocate(this_function_var)) {
- this_function_ = this_function_var;
+ if (this_function_ != nullptr && !MustAllocate(this_function_)) {
+ this_function_ = nullptr;
}
}
+void ModuleScope::AllocateModuleVariables() {
+ for (auto it = module()->regular_imports().begin();
+ it != module()->regular_imports().end(); ++it) {
+ Variable* var = LookupLocal(it->second->local_name);
+ // TODO(neis): Use a meaningful index.
+ var->AllocateTo(VariableLocation::MODULE, 42);
+ }
-void Scope::AllocateVariablesRecursively(Isolate* isolate) {
- if (!already_resolved()) {
- num_stack_slots_ = 0;
+ for (auto entry : module()->exports()) {
+ if (entry->local_name == nullptr) continue;
+ Variable* var = LookupLocal(entry->local_name);
+ var->AllocateTo(VariableLocation::MODULE, 42);
}
+}
+
+void Scope::AllocateVariablesRecursively() {
+ DCHECK(!already_resolved_);
+ DCHECK_EQ(0, num_stack_slots_);
+
// Allocate variables for inner scopes.
- for (int i = 0; i < inner_scopes_.length(); i++) {
- inner_scopes_[i]->AllocateVariablesRecursively(isolate);
+ for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
+ scope->AllocateVariablesRecursively();
}
- // If scope is already resolved, we still need to allocate
- // variables in inner scopes which might not have been resolved yet.
- if (already_resolved()) return;
- // The number of slots required for variables.
- num_heap_slots_ = Context::MIN_CONTEXT_SLOTS;
+ DCHECK(!already_resolved_);
+ DCHECK_EQ(Context::MIN_CONTEXT_SLOTS, num_heap_slots_);
// Allocate variables for this scope.
// Parameters must be allocated first, if any.
- if (is_function_scope()) AllocateParameterLocals(isolate);
- if (has_this_declaration()) AllocateReceiver();
- AllocateNonParameterLocalsAndDeclaredGlobals(isolate);
+ if (is_declaration_scope()) {
+ if (is_module_scope()) {
+ AsModuleScope()->AllocateModuleVariables();
+ } else if (is_function_scope()) {
+ AsDeclarationScope()->AllocateParameterLocals();
+ }
+ AsDeclarationScope()->AllocateReceiver();
+ }
+ AllocateNonParameterLocalsAndDeclaredGlobals();
// Force allocation of a context for this scope if necessary. For a 'with'
// scope and for a function scope that makes an 'eval' call we need a context,
@@ -1488,15 +1710,19 @@ void Scope::AllocateVariablesRecursively(Isolate* isolate) {
int Scope::StackLocalCount() const {
+ Variable* function =
+ is_function_scope() ? AsDeclarationScope()->function_var() : nullptr;
return num_stack_slots() -
- (function_ != NULL && function_->proxy()->var()->IsStackLocal() ? 1 : 0);
+ (function != nullptr && function->IsStackLocal() ? 1 : 0);
}
int Scope::ContextLocalCount() const {
if (num_heap_slots() == 0) return 0;
+ Variable* function =
+ is_function_scope() ? AsDeclarationScope()->function_var() : nullptr;
bool is_function_var_in_context =
- function_ != NULL && function_->proxy()->var()->IsContextSlot();
+ function != nullptr && function->IsContextSlot();
return num_heap_slots() - Context::MIN_CONTEXT_SLOTS - num_global_slots() -
(is_function_var_in_context ? 1 : 0);
}
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index dae70c0142..8c00927421 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -6,8 +6,8 @@
#define V8_AST_SCOPES_H_
#include "src/ast/ast.h"
-#include "src/hashmap.h"
-#include "src/pending-compilation-error-handler.h"
+#include "src/base/hashmap.h"
+#include "src/globals.h"
#include "src/zone.h"
namespace v8 {
@@ -20,18 +20,13 @@ class VariableMap: public ZoneHashMap {
public:
explicit VariableMap(Zone* zone);
- virtual ~VariableMap();
-
- Variable* Declare(Scope* scope, const AstRawString* name, VariableMode mode,
- Variable::Kind kind, InitializationFlag initialization_flag,
- MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
+ Variable* Declare(Zone* zone, Scope* scope, const AstRawString* name,
+ VariableMode mode, Variable::Kind kind,
+ InitializationFlag initialization_flag,
+ MaybeAssignedFlag maybe_assigned_flag = kNotAssigned,
+ bool* added = nullptr);
Variable* Lookup(const AstRawString* name);
-
- Zone* zone() const { return zone_; }
-
- private:
- Zone* zone_;
};
@@ -61,16 +56,8 @@ class DynamicScopePart : public ZoneObject {
class SloppyBlockFunctionMap : public ZoneHashMap {
public:
explicit SloppyBlockFunctionMap(Zone* zone);
-
- virtual ~SloppyBlockFunctionMap();
-
- void Declare(const AstRawString* name,
+ void Declare(Zone* zone, const AstRawString* name,
SloppyBlockFunctionStatement* statement);
-
- typedef ZoneVector<SloppyBlockFunctionStatement*> Vector;
-
- private:
- Zone* zone_;
};
@@ -83,29 +70,57 @@ class SloppyBlockFunctionMap : public ZoneHashMap {
// a location. Note that many VariableProxy nodes may refer to the same Java-
// Script variable.
+// JS environments are represented in the parser using Scope, DeclarationScope
+// and ModuleScope. DeclarationScope is used for any scope that hosts 'var'
+// declarations. This includes script, module, eval, varblock, and function
+// scope. ModuleScope further specializes DeclarationScope.
class Scope: public ZoneObject {
public:
// ---------------------------------------------------------------------------
// Construction
- Scope(Zone* zone, Scope* outer_scope, ScopeType scope_type,
- AstValueFactory* value_factory,
- FunctionKind function_kind = kNormalFunction);
-
- // Compute top scope and allocate variables. For lazy compilation the top
- // scope only contains the single lazily compiled function, so this
- // doesn't re-allocate variables repeatedly.
- static bool Analyze(ParseInfo* info);
-
- static Scope* DeserializeScopeChain(Isolate* isolate, Zone* zone,
- Context* context, Scope* script_scope);
+ Scope(Zone* zone, Scope* outer_scope, ScopeType scope_type);
+#ifdef DEBUG
// The scope name is only used for printing/debugging.
void SetScopeName(const AstRawString* scope_name) {
scope_name_ = scope_name;
}
+#endif
+
+ // TODO(verwaest): Is this needed on Scope?
+ int num_parameters() const;
+
+ DeclarationScope* AsDeclarationScope();
+ const DeclarationScope* AsDeclarationScope() const;
+ ModuleScope* AsModuleScope();
+ const ModuleScope* AsModuleScope() const;
+
+ class Snapshot final BASE_EMBEDDED {
+ public:
+ explicit Snapshot(Scope* scope);
+
+ void Reparent(DeclarationScope* new_parent) const;
+
+ private:
+ Scope* outer_scope_;
+ Scope* top_inner_scope_;
+ VariableProxy* top_unresolved_;
+ int top_temp_;
+ };
+
+ // Compute top scope and allocate variables. For lazy compilation the top
+ // scope only contains the single lazily compiled function, so this
+ // doesn't re-allocate variables repeatedly.
+ static void Analyze(ParseInfo* info);
+
+ enum class DeserializationMode { kDeserializeOffHeap, kKeepScopeInfo };
- void Initialize();
+ static Scope* DeserializeScopeChain(Isolate* isolate, Zone* zone,
+ Context* context,
+ DeclarationScope* script_scope,
+ AstValueFactory* ast_value_factory,
+ DeserializationMode deserialization_mode);
// Checks if the block scope is redundant, i.e. it does not contain any
// block scoped declarations. In that case it is removed from the scope
@@ -113,7 +128,7 @@ class Scope: public ZoneObject {
Scope* FinalizeBlockScope();
// Inserts outer_scope into this scope's scope chain (and removes this
- // from the current outer_scope_'s inner_scopes_).
+ // from the current outer_scope_'s inner scope list).
// Assumes outer_scope_ is non-null.
void ReplaceOuterScope(Scope* outer_scope);
@@ -129,67 +144,42 @@ class Scope: public ZoneObject {
// Lookup a variable in this scope. Returns the variable or NULL if not found.
Variable* LookupLocal(const AstRawString* name);
- // This lookup corresponds to a lookup in the "intermediate" scope sitting
- // between this scope and the outer scope. (ECMA-262, 3rd., requires that
- // the name of named function literal is kept in an intermediate scope
- // in between this scope and the next outer scope.)
- Variable* LookupFunctionVar(const AstRawString* name,
- AstNodeFactory* factory);
-
// Lookup a variable in this scope or outer scopes.
// Returns the variable or NULL if not found.
Variable* Lookup(const AstRawString* name);
- // Declare the function variable for a function literal. This variable
- // is in an intermediate scope between this function scope and the the
- // outer scope. Only possible for function scopes; at most one variable.
- void DeclareFunctionVar(VariableDeclaration* declaration) {
- DCHECK(is_function_scope());
- // Handle implicit declaration of the function name in named function
- // expressions before other declarations.
- decls_.InsertAt(0, declaration, zone());
- function_ = declaration;
- }
-
- // Declare a parameter in this scope. When there are duplicated
- // parameters the rightmost one 'wins'. However, the implementation
- // expects all parameters to be declared and from left to right.
- Variable* DeclareParameter(
- const AstRawString* name, VariableMode mode,
- bool is_optional, bool is_rest, bool* is_duplicate);
-
// Declare a local variable in this scope. If the variable has been
// declared before, the previously declared variable is returned.
Variable* DeclareLocal(const AstRawString* name, VariableMode mode,
InitializationFlag init_flag, Variable::Kind kind,
MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
- // Declare an implicit global variable in this scope which must be a
- // script scope. The variable was introduced (possibly from an inner
- // scope) by a reference to an unresolved variable with no intervening
- // with statements or eval calls.
- Variable* DeclareDynamicGlobal(const AstRawString* name);
+ // Declarations list.
+ ZoneList<Declaration*>* declarations() { return &decls_; }
// Create a new unresolved variable.
VariableProxy* NewUnresolved(AstNodeFactory* factory,
const AstRawString* name,
- Variable::Kind kind = Variable::NORMAL,
- int start_position = RelocInfo::kNoPosition,
- int end_position = RelocInfo::kNoPosition) {
+ int start_position = kNoSourcePosition,
+ int end_position = kNoSourcePosition,
+ Variable::Kind kind = Variable::NORMAL) {
// Note that we must not share the unresolved variables with
// the same name because they may be removed selectively via
// RemoveUnresolved().
- DCHECK(!already_resolved());
+ DCHECK(!already_resolved_);
+ DCHECK_EQ(factory->zone(), zone());
VariableProxy* proxy =
factory->NewVariableProxy(name, kind, start_position, end_position);
- unresolved_.Add(proxy, zone_);
+ proxy->set_next_unresolved(unresolved_);
+ unresolved_ = proxy;
return proxy;
}
void AddUnresolved(VariableProxy* proxy) {
- DCHECK(!already_resolved());
+ DCHECK(!already_resolved_);
DCHECK(!proxy->is_resolved());
- unresolved_.Add(proxy, zone_);
+ proxy->set_next_unresolved(unresolved_);
+ unresolved_ = proxy;
}
// Remove a unresolved variable. During parsing, an unresolved variable
@@ -205,17 +195,9 @@ class Scope: public ZoneObject {
// In particular, the only way to get hold of the temporary is by keeping the
// Variable* around. The name should not clash with a legitimate variable
// names.
+ // TODO(verwaest): Move to DeclarationScope?
Variable* NewTemporary(const AstRawString* name);
- // Remove a temporary variable. This is for adjusting the scope of
- // temporaries used when desugaring parameter initializers.
- bool RemoveTemporary(Variable* var);
-
- // Adds a temporary variable in this scope's TemporaryScope. This is for
- // adjusting the scope of temporaries used when desugaring parameter
- // initializers.
- void AddTemporary(Variable* var) { temps_.Add(var, zone()); }
-
// Adds the specific declaration node to the list of declarations in
// this scope. The declarations are processed as part of entering
// the scope; see codegen.cc:ProcessDeclarations.
@@ -229,26 +211,32 @@ class Scope: public ZoneObject {
// scope over a let binding of the same name.
Declaration* CheckConflictingVarDeclarations();
+ // Check if the scope has a conflicting lexical declaration that has a name in
+ // the given list. This is used to catch patterns like
+ // `try{}catch(e){let e;}`,
+ // which is an error even though the two 'e's are declared in different
+ // scopes.
+ Declaration* CheckLexDeclarationsConflictingWith(
+ const ZoneList<const AstRawString*>& names);
+
// ---------------------------------------------------------------------------
// Scope-specific info.
- // Inform the scope that the corresponding code contains an eval call.
- void RecordEvalCall() { scope_calls_eval_ = true; }
-
- // Inform the scope that the corresponding code uses "arguments".
- void RecordArgumentsUsage() { scope_uses_arguments_ = true; }
-
- // Inform the scope that the corresponding code uses "super".
- void RecordSuperPropertyUsage() { scope_uses_super_property_ = true; }
+ // Inform the scope and outer scopes that the corresponding code contains an
+ // eval call.
+ void RecordEvalCall() {
+ scope_calls_eval_ = true;
+ for (Scope* scope = this; scope != nullptr; scope = scope->outer_scope()) {
+ scope->inner_scope_calls_eval_ = true;
+ }
+ }
// Set the language mode flag (unless disabled by a global flag).
void SetLanguageMode(LanguageMode language_mode) {
- language_mode_ = language_mode;
+ DCHECK(!is_module_scope() || is_strict(language_mode));
+ set_language_mode(language_mode);
}
- // Set the ASM module flag.
- void SetAsmModule() { asm_module_ = true; }
-
// Inform the scope that the scope may execute declarations nonlinearly.
// Currently, the only nonlinear scope is a switch statement. The name is
// more general in case something else comes up with similar control flow,
@@ -295,9 +283,13 @@ class Scope: public ZoneObject {
end_position_ = statement_pos;
}
+ // Scopes created for desugaring are hidden. I.e. not visible to the debugger.
+ bool is_hidden() const { return is_hidden_; }
+ void set_is_hidden() { is_hidden_ = true; }
+
// In some cases we want to force context allocation for a whole scope.
void ForceContextAllocation() {
- DCHECK(!already_resolved());
+ DCHECK(!already_resolved_);
force_context_allocation_ = true;
}
bool has_forced_context_allocation() const {
@@ -315,48 +307,23 @@ class Scope: public ZoneObject {
bool is_catch_scope() const { return scope_type_ == CATCH_SCOPE; }
bool is_block_scope() const { return scope_type_ == BLOCK_SCOPE; }
bool is_with_scope() const { return scope_type_ == WITH_SCOPE; }
- bool is_arrow_scope() const {
- return is_function_scope() && IsArrowFunction(function_kind_);
- }
bool is_declaration_scope() const { return is_declaration_scope_; }
- void set_is_declaration_scope() { is_declaration_scope_ = true; }
-
// Information about which scopes calls eval.
bool calls_eval() const { return scope_calls_eval_; }
bool calls_sloppy_eval() const {
- return scope_calls_eval_ && is_sloppy(language_mode_);
+ return scope_calls_eval_ && is_sloppy(language_mode());
}
- bool outer_scope_calls_sloppy_eval() const {
- return outer_scope_calls_sloppy_eval_;
- }
- bool asm_module() const { return asm_module_; }
- bool asm_function() const { return asm_function_; }
-
- // Is this scope inside a with statement.
- bool inside_with() const { return scope_inside_with_; }
-
- // Does this scope access "arguments".
- bool uses_arguments() const { return scope_uses_arguments_; }
- // Does this scope access "super" property (super.foo).
- bool uses_super_property() const { return scope_uses_super_property_; }
+ bool IsAsmModule() const;
+ bool IsAsmFunction() const;
// Does this scope have the potential to execute declarations non-linearly?
bool is_nonlinear() const { return scope_nonlinear_; }
// Whether this needs to be represented by a runtime context.
bool NeedsContext() const {
- // Catch and module scopes always have heap slots.
+ // Catch scopes always have heap slots.
DCHECK(!is_catch_scope() || num_heap_slots() > 0);
- DCHECK(!is_module_scope() || num_heap_slots() > 0);
- return is_with_scope() || num_heap_slots() > 0;
- }
-
- bool NeedsHomeObject() const {
- return scope_uses_super_property_ ||
- ((scope_calls_eval_ || inner_scope_calls_eval_) &&
- (IsConciseMethod(function_kind()) ||
- IsAccessorFunction(function_kind()) ||
- IsClassConstructor(function_kind())));
+ return num_heap_slots() > 0;
}
// ---------------------------------------------------------------------------
@@ -365,119 +332,23 @@ class Scope: public ZoneObject {
// The type of this scope.
ScopeType scope_type() const { return scope_type_; }
- FunctionKind function_kind() const { return function_kind_; }
-
// The language mode of this scope.
- LanguageMode language_mode() const { return language_mode_; }
-
- // The variable corresponding to the 'this' value.
- Variable* receiver() {
- DCHECK(has_this_declaration());
- DCHECK_NOT_NULL(receiver_);
- return receiver_;
- }
-
- // TODO(wingo): Add a GLOBAL_SCOPE scope type which will lexically allocate
- // "this" (and no other variable) on the native context. Script scopes then
- // will not have a "this" declaration.
- bool has_this_declaration() const {
- return (is_function_scope() && !is_arrow_scope()) || is_module_scope();
- }
-
- // The variable corresponding to the 'new.target' value.
- Variable* new_target_var() { return new_target_; }
+ LanguageMode language_mode() const { return is_strict_ ? STRICT : SLOPPY; }
- // The variable holding the function literal for named function
- // literals, or NULL. Only valid for function scopes.
- VariableDeclaration* function() const {
- DCHECK(is_function_scope());
- return function_;
- }
-
- // Parameters. The left-most parameter has index 0.
- // Only valid for function scopes.
- Variable* parameter(int index) const {
- DCHECK(is_function_scope());
- return params_[index];
- }
-
- // Returns the default function arity excluding default or rest parameters.
- int default_function_length() const { return arity_; }
-
- // Returns the number of formal parameters, up to but not including the
- // rest parameter index (if the function has rest parameters), i.e. it
- // says 2 for
- //
- // function foo(a, b) { ... }
- //
- // and
- //
- // function foo(a, b, ...c) { ... }
- //
- // but for
- //
- // function foo(a, b, c = 1) { ... }
- //
- // we return 3 here.
- int num_parameters() const {
- return has_rest_parameter() ? params_.length() - 1 : params_.length();
- }
-
- // A function can have at most one rest parameter. Returns Variable* or NULL.
- Variable* rest_parameter(int* index) const {
- *index = rest_index_;
- if (rest_index_ < 0) return NULL;
- return rest_parameter_;
- }
-
- bool has_rest_parameter() const { return rest_index_ >= 0; }
-
- bool has_simple_parameters() const {
- return has_simple_parameters_;
- }
-
- // TODO(caitp): manage this state in a better way. PreParser must be able to
- // communicate that the scope is non-simple, without allocating any parameters
- // as the Parser does. This is necessary to ensure that TC39's proposed early
- // error can be reported consistently regardless of whether lazily parsed or
- // not.
- void SetHasNonSimpleParameters() {
- DCHECK(is_function_scope());
- has_simple_parameters_ = false;
- }
-
- // Retrieve `IsSimpleParameterList` of current or outer function.
- bool HasSimpleParameters() {
- Scope* scope = ClosureScope();
- return !scope->is_function_scope() || scope->has_simple_parameters();
- }
-
- // The local variable 'arguments' if we need to allocate it; NULL otherwise.
- Variable* arguments() const {
- DCHECK(!is_arrow_scope() || arguments_ == nullptr);
- return arguments_;
- }
-
- Variable* this_function_var() const {
- // This is only used in derived constructors atm.
- DCHECK(this_function_ == nullptr ||
- (is_function_scope() && (IsClassConstructor(function_kind()) ||
- IsConciseMethod(function_kind()) ||
- IsAccessorFunction(function_kind()))));
- return this_function_;
- }
-
- // Declarations list.
- ZoneList<Declaration*>* declarations() { return &decls_; }
-
- // Inner scope list.
- ZoneList<Scope*>* inner_scopes() { return &inner_scopes_; }
+ // inner_scope() and sibling() together implement the inner scope list of a
+ // scope. Inner scope points to the an inner scope of the function, and
+ // "sibling" points to a next inner scope of the outer scope of this scope.
+ Scope* inner_scope() const { return inner_scope_; }
+ Scope* sibling() const { return sibling_; }
// The scope immediately surrounding this scope, or NULL.
Scope* outer_scope() const { return outer_scope_; }
- // The ModuleDescriptor for this scope; only for module scopes.
- ModuleDescriptor* module() const { return module_descriptor_; }
+ const AstRawString* catch_variable_name() const {
+ DCHECK(is_catch_scope());
+ DCHECK_EQ(1, num_var());
+ return static_cast<AstRawString*>(variables_.Start()->key);
+ }
// ---------------------------------------------------------------------------
// Variable allocation.
@@ -489,9 +360,6 @@ class Scope: public ZoneObject {
ZoneList<Variable*>* context_locals,
ZoneList<Variable*>* context_globals);
- // Current number of var or const locals.
- int num_var_or_const() { return num_var_or_const_; }
-
// Result of variable allocation.
int num_stack_slots() const { return num_stack_slots_; }
int num_heap_slots() const { return num_heap_slots_; }
@@ -501,23 +369,15 @@ class Scope: public ZoneObject {
int ContextLocalCount() const;
int ContextGlobalCount() const;
- // Make sure this scope and all outer scopes are eagerly compiled.
- void ForceEagerCompilation() { force_eager_compilation_ = true; }
-
// Determine if we can parse a function literal in this scope lazily.
bool AllowsLazyParsing() const;
- // Determine if we can use lazy compilation for this scope.
- bool AllowsLazyCompilation() const;
-
- // Determine if we can use lazy compilation for this scope without a context.
- bool AllowsLazyCompilationWithoutContext() const;
-
- // True if the outer context of this scope is always the native context.
- bool HasTrivialOuterContext() const;
-
// The number of contexts between this and scope; zero if this == scope.
- int ContextChainLength(Scope* scope);
+ int ContextChainLength(Scope* scope) const;
+
+ // The number of contexts between this and the outermost context that has a
+ // sloppy eval call. One if this->calls_sloppy_eval().
+ int ContextChainLengthUntilOutermostSloppyEval() const;
// The maximum number of nested contexts required for this scope and any inner
// scopes.
@@ -525,22 +385,27 @@ class Scope: public ZoneObject {
// Find the first function, script, eval or (declaration) block scope. This is
// the scope where var declarations will be hoisted to in the implementation.
- Scope* DeclarationScope();
+ DeclarationScope* GetDeclarationScope();
// Find the first non-block declaration scope. This should be either a script,
- // function, or eval scope. Same as DeclarationScope(), but skips
- // declaration "block" scopes. Used for differentiating associated
- // function objects (i.e., the scope for which a function prologue allocates
- // a context) or declaring temporaries.
- Scope* ClosureScope();
+ // function, or eval scope. Same as DeclarationScope(), but skips declaration
+ // "block" scopes. Used for differentiating associated function objects (i.e.,
+ // the scope for which a function prologue allocates a context) or declaring
+ // temporaries.
+ DeclarationScope* GetClosureScope();
// Find the first (non-arrow) function or script scope. This is where
// 'this' is bound, and what determines the function kind.
- Scope* ReceiverScope();
+ DeclarationScope* GetReceiverScope();
+ // Creates a scope info if it doesn't already exist.
Handle<ScopeInfo> GetScopeInfo(Isolate* isolate);
- Handle<StringSet> CollectNonLocals(Handle<StringSet> non_locals);
+ // GetScopeInfo() must have been called once to create the ScopeInfo.
+ Handle<ScopeInfo> scope_info() {
+ DCHECK(!scope_info_.is_null());
+ return scope_info_;
+ }
// ---------------------------------------------------------------------------
// Strict mode support.
@@ -553,43 +418,51 @@ class Scope: public ZoneObject {
return variables_.Lookup(name) != NULL;
}
- bool IsDeclaredParameter(const AstRawString* name) {
- // If IsSimpleParameterList is false, duplicate parameters are not allowed,
- // however `arguments` may be allowed if function is not strict code. Thus,
- // the assumptions explained above do not hold.
- return params_.Contains(variables_.Lookup(name));
- }
-
- SloppyBlockFunctionMap* sloppy_block_function_map() {
- return &sloppy_block_function_map_;
- }
-
- // Error handling.
- void ReportMessage(int start_position, int end_position,
- MessageTemplate::Template message,
- const AstRawString* arg);
+ int num_var() const { return variables_.occupancy(); }
// ---------------------------------------------------------------------------
// Debugging.
#ifdef DEBUG
void Print(int n = 0); // n = indentation; n < 0 => don't print recursively
+
+ // Check that the scope has positions assigned.
+ void CheckScopePositions();
+
+ // Check that all Scopes in the scope tree use the same Zone.
+ void CheckZones();
#endif
- // ---------------------------------------------------------------------------
- // Implementation.
+ // Retrieve `IsSimpleParameterList` of current or outer function.
+ bool HasSimpleParameters();
+ void set_is_debug_evaluate_scope() { is_debug_evaluate_scope_ = true; }
+
+ protected:
+ // Creates a script scope.
+ explicit Scope(Zone* zone);
+
+ void set_language_mode(LanguageMode language_mode) {
+ is_strict_ = is_strict(language_mode);
+ }
+
private:
+ Variable* Declare(Zone* zone, Scope* scope, const AstRawString* name,
+ VariableMode mode, Variable::Kind kind,
+ InitializationFlag initialization_flag,
+ MaybeAssignedFlag maybe_assigned_flag = kNotAssigned) {
+ bool added;
+ Variable* var =
+ variables_.Declare(zone, scope, name, mode, kind, initialization_flag,
+ maybe_assigned_flag, &added);
+ if (added) ordered_variables_.Add(var, zone);
+ return var;
+ }
+ Zone* zone_;
+
// Scope tree.
Scope* outer_scope_; // the immediately enclosing outer scope, or NULL
- ZoneList<Scope*> inner_scopes_; // the immediately enclosed inner scopes
-
- // The scope type.
- ScopeType scope_type_;
- // If the scope is a function scope, this is the function kind.
- FunctionKind function_kind_;
-
- // Debugging support.
- const AstRawString* scope_name_;
+ Scope* inner_scope_; // an inner scope of this scope
+ Scope* sibling_; // a sibling inner scope of the outer scope of this scope.
// The variables declared in this scope:
//
@@ -597,85 +470,60 @@ class Scope: public ZoneObject {
// variables may be implicitly 'declared' by being used (possibly in
// an inner scope) with no intervening with statements or eval calls.
VariableMap variables_;
- // Compiler-allocated (user-invisible) temporaries.
- ZoneList<Variable*> temps_;
- // Parameter list in source order.
- ZoneList<Variable*> params_;
+ // In case of non-scopeinfo-backed scopes, this contains the variables of the
+ // map above in order of addition.
+ // TODO(verwaest): Thread through Variable.
+ ZoneList<Variable*> ordered_variables_;
// Variables that must be looked up dynamically.
DynamicScopePart* dynamics_;
- // Unresolved variables referred to from this scope.
- ZoneList<VariableProxy*> unresolved_;
+ // Unresolved variables referred to from this scope. The proxies themselves
+ // form a linked list of all unresolved proxies.
+ VariableProxy* unresolved_;
// Declarations.
ZoneList<Declaration*> decls_;
- // Convenience variable.
- Variable* receiver_;
- // Function variable, if any; function scopes only.
- VariableDeclaration* function_;
- // new.target variable, function scopes only.
- Variable* new_target_;
- // Convenience variable; function scopes only.
- Variable* arguments_;
- // Convenience variable; Subclass constructor only
- Variable* this_function_;
- // Module descriptor; module scopes only.
- ModuleDescriptor* module_descriptor_;
-
- // Map of function names to lists of functions defined in sloppy blocks
- SloppyBlockFunctionMap sloppy_block_function_map_;
- // Scope-specific information computed during parsing.
- //
- // This scope is inside a 'with' of some outer scope.
- bool scope_inside_with_;
- // This scope or a nested catch scope or with scope contain an 'eval' call. At
- // the 'eval' call site this scope is the declaration scope.
- bool scope_calls_eval_;
- // This scope uses "arguments".
- bool scope_uses_arguments_;
- // This scope uses "super" property ('super.foo').
- bool scope_uses_super_property_;
- // This scope contains an "use asm" annotation.
- bool asm_module_;
- // This scope's outer context is an asm module.
- bool asm_function_;
- // This scope's declarations might not be executed in order (e.g., switch).
- bool scope_nonlinear_;
- // The language mode of this scope.
- LanguageMode language_mode_;
- // Source positions.
- int start_position_;
- int end_position_;
-
- // Computed via PropagateScopeInfo.
- bool outer_scope_calls_sloppy_eval_;
- bool inner_scope_calls_eval_;
- bool force_eager_compilation_;
- bool force_context_allocation_;
+ // Serialized scope info support.
+ Handle<ScopeInfo> scope_info_;
+// Debugging support.
+#ifdef DEBUG
+ const AstRawString* scope_name_;
// True if it doesn't need scope resolution (e.g., if the scope was
// constructed based on a serialized scope info or a catch context).
- bool already_resolved_;
-
- // True if it holds 'var' declarations.
- bool is_declaration_scope_;
+ bool already_resolved_ : 1;
+#endif
- // Computed as variables are declared.
- int num_var_or_const_;
+ // Source positions.
+ int start_position_;
+ int end_position_;
- // Computed via AllocateVariables; function, block and catch scopes only.
+ // Computed via AllocateVariables.
int num_stack_slots_;
int num_heap_slots_;
int num_global_slots_;
- // Info about the parameter list of a function.
- int arity_;
- bool has_simple_parameters_;
- Variable* rest_parameter_;
- int rest_index_;
+ // The scope type.
+ const ScopeType scope_type_;
- // Serialized scope info support.
- Handle<ScopeInfo> scope_info_;
- bool already_resolved() { return already_resolved_; }
+ // Scope-specific information computed during parsing.
+ //
+ // The language mode of this scope.
+ STATIC_ASSERT(LANGUAGE_END == 2);
+ bool is_strict_ : 1;
+ // This scope or a nested catch scope or with scope contain an 'eval' call. At
+ // the 'eval' call site this scope is the declaration scope.
+ bool scope_calls_eval_ : 1;
+ // This scope's declarations might not be executed in order (e.g., switch).
+ bool scope_nonlinear_ : 1;
+ bool is_hidden_ : 1;
+ // Temporary workaround that allows masking of 'this' in debug-evalute scopes.
+ bool is_debug_evaluate_scope_ : 1;
+
+ bool inner_scope_calls_eval_ : 1;
+ bool force_context_allocation_ : 1;
+
+ // True if it holds 'var' declarations.
+ bool is_declaration_scope_ : 1;
// Create a non-local variable with a given name.
// These variables are looked up dynamically at runtime.
@@ -728,80 +576,354 @@ class Scope: public ZoneObject {
};
// Lookup a variable reference given by name recursively starting with this
- // scope. If the code is executed because of a call to 'eval', the context
- // parameter should be set to the calling context of 'eval'.
+ // scope, and stopping when reaching the outer_scope_end scope. If the code is
+ // executed because of a call to 'eval', the context parameter should be set
+ // to the calling context of 'eval'.
Variable* LookupRecursive(VariableProxy* proxy, BindingKind* binding_kind,
- AstNodeFactory* factory);
- MUST_USE_RESULT
- bool ResolveVariable(ParseInfo* info, VariableProxy* proxy,
+ AstNodeFactory* factory,
+ Scope* outer_scope_end = nullptr);
+ void ResolveTo(ParseInfo* info, BindingKind binding_kind,
+ VariableProxy* proxy, Variable* var);
+ void ResolveVariable(ParseInfo* info, VariableProxy* proxy,
AstNodeFactory* factory);
- MUST_USE_RESULT
- bool ResolveVariablesRecursively(ParseInfo* info, AstNodeFactory* factory);
+ void ResolveVariablesRecursively(ParseInfo* info, AstNodeFactory* factory);
+
+ // Finds free variables of this scope. This mutates the unresolved variables
+ // list along the way, so full resolution cannot be done afterwards.
+ // If a ParseInfo* is passed, non-free variables will be resolved.
+ VariableProxy* FetchFreeVariables(DeclarationScope* max_outer_scope,
+ ParseInfo* info = nullptr,
+ VariableProxy* stack = nullptr);
// Scope analysis.
- void PropagateScopeInfo(bool outer_scope_calls_sloppy_eval);
- bool HasTrivialContext() const;
+ void PropagateScopeInfo();
// Predicates.
bool MustAllocate(Variable* var);
bool MustAllocateInContext(Variable* var);
- bool HasArgumentsParameter(Isolate* isolate);
// Variable allocation.
void AllocateStackSlot(Variable* var);
void AllocateHeapSlot(Variable* var);
- void AllocateParameterLocals(Isolate* isolate);
- void AllocateNonParameterLocal(Isolate* isolate, Variable* var);
- void AllocateDeclaredGlobal(Isolate* isolate, Variable* var);
- void AllocateNonParameterLocalsAndDeclaredGlobals(Isolate* isolate);
- void AllocateVariablesRecursively(Isolate* isolate);
- void AllocateParameter(Variable* var, int index);
- void AllocateReceiver();
-
- // Resolve and fill in the allocation information for all variables
- // in this scopes. Must be called *after* all scopes have been
- // processed (parsed) to ensure that unresolved variables can be
- // resolved properly.
- //
- // In the case of code compiled and run using 'eval', the context
- // parameter is the context in which eval was called. In all other
- // cases the context parameter is an empty handle.
- MUST_USE_RESULT
- bool AllocateVariables(ParseInfo* info, AstNodeFactory* factory);
+ void AllocateNonParameterLocal(Variable* var);
+ void AllocateDeclaredGlobal(Variable* var);
+ void AllocateNonParameterLocalsAndDeclaredGlobals();
+ void AllocateVariablesRecursively();
// Construct a scope based on the scope info.
Scope(Zone* zone, Scope* inner_scope, ScopeType type,
- Handle<ScopeInfo> scope_info, AstValueFactory* value_factory);
+ Handle<ScopeInfo> scope_info);
// Construct a catch scope with a binding for the name.
- Scope(Zone* zone, Scope* inner_scope, const AstRawString* catch_variable_name,
- AstValueFactory* value_factory);
+ Scope(Zone* zone, Scope* inner_scope,
+ const AstRawString* catch_variable_name);
void AddInnerScope(Scope* inner_scope) {
- if (inner_scope != NULL) {
- inner_scopes_.Add(inner_scope, zone_);
- inner_scope->outer_scope_ = this;
- }
+ inner_scope->sibling_ = inner_scope_;
+ inner_scope_ = inner_scope;
+ inner_scope->outer_scope_ = this;
}
void RemoveInnerScope(Scope* inner_scope) {
DCHECK_NOT_NULL(inner_scope);
- for (int i = 0; i < inner_scopes_.length(); i++) {
- if (inner_scopes_[i] == inner_scope) {
- inner_scopes_.Remove(i);
- break;
+ if (inner_scope == inner_scope_) {
+ inner_scope_ = inner_scope_->sibling_;
+ return;
+ }
+ for (Scope* scope = inner_scope_; scope != nullptr;
+ scope = scope->sibling_) {
+ if (scope->sibling_ == inner_scope) {
+ scope->sibling_ = scope->sibling_->sibling_;
+ return;
}
}
}
- void SetDefaults(ScopeType type, Scope* outer_scope,
- Handle<ScopeInfo> scope_info,
+ void SetDefaults();
+
+ void DeserializeScopeInfo(Isolate* isolate,
+ AstValueFactory* ast_value_factory);
+
+ friend class DeclarationScope;
+};
+
+class DeclarationScope : public Scope {
+ public:
+ DeclarationScope(Zone* zone, Scope* outer_scope, ScopeType scope_type,
FunctionKind function_kind = kNormalFunction);
+ DeclarationScope(Zone* zone, Scope* inner_scope, ScopeType scope_type,
+ Handle<ScopeInfo> scope_info);
+ // Creates a script scope.
+ explicit DeclarationScope(Zone* zone);
- AstValueFactory* ast_value_factory_;
- Zone* zone_;
+ bool IsDeclaredParameter(const AstRawString* name) {
+ // If IsSimpleParameterList is false, duplicate parameters are not allowed,
+ // however `arguments` may be allowed if function is not strict code. Thus,
+ // the assumptions explained above do not hold.
+ return params_.Contains(variables_.Lookup(name));
+ }
+
+ FunctionKind function_kind() const { return function_kind_; }
+
+ bool is_arrow_scope() const {
+ return is_function_scope() && IsArrowFunction(function_kind_);
+ }
+
+ // Inform the scope that the corresponding code uses "super".
+ void RecordSuperPropertyUsage() { scope_uses_super_property_ = true; }
+ // Does this scope access "super" property (super.foo).
+ bool uses_super_property() const { return scope_uses_super_property_; }
+
+ bool NeedsHomeObject() const {
+ return scope_uses_super_property_ ||
+ (inner_scope_calls_eval_ && (IsConciseMethod(function_kind()) ||
+ IsAccessorFunction(function_kind()) ||
+ IsClassConstructor(function_kind())));
+ }
+
+ bool asm_module() const { return asm_module_; }
+ void set_asm_module() { asm_module_ = true; }
+ bool asm_function() const { return asm_function_; }
+ void set_asm_function() { asm_module_ = true; }
+
+ void DeclareThis(AstValueFactory* ast_value_factory);
+ void DeclareDefaultFunctionVariables(AstValueFactory* ast_value_factory);
+
+ // This lookup corresponds to a lookup in the "intermediate" scope sitting
+ // between this scope and the outer scope. (ECMA-262, 3rd., requires that
+ // the name of named function literal is kept in an intermediate scope
+ // in between this scope and the next outer scope.)
+ Variable* LookupFunctionVar(const AstRawString* name);
+
+ // Declare the function variable for a function literal. This variable
+ // is in an intermediate scope between this function scope and the the
+ // outer scope. Only possible for function scopes; at most one variable.
+ Variable* DeclareFunctionVar(const AstRawString* name);
+
+ // Declare a parameter in this scope. When there are duplicated
+ // parameters the rightmost one 'wins'. However, the implementation
+ // expects all parameters to be declared and from left to right.
+ Variable* DeclareParameter(const AstRawString* name, VariableMode mode,
+ bool is_optional, bool is_rest, bool* is_duplicate,
+ AstValueFactory* ast_value_factory);
+
+ // Declare an implicit global variable in this scope which must be a
+ // script scope. The variable was introduced (possibly from an inner
+ // scope) by a reference to an unresolved variable with no intervening
+ // with statements or eval calls.
+ Variable* DeclareDynamicGlobal(const AstRawString* name,
+ Variable::Kind variable_kind);
+
+ // The variable corresponding to the 'this' value.
+ Variable* receiver() {
+ DCHECK(has_this_declaration());
+ DCHECK_NOT_NULL(receiver_);
+ return receiver_;
+ }
+
+ // TODO(wingo): Add a GLOBAL_SCOPE scope type which will lexically allocate
+ // "this" (and no other variable) on the native context. Script scopes then
+ // will not have a "this" declaration.
+ bool has_this_declaration() const {
+ return (is_function_scope() && !is_arrow_scope()) || is_module_scope();
+ }
+
+ // The variable corresponding to the 'new.target' value.
+ Variable* new_target_var() { return new_target_; }
+
+ // The variable holding the function literal for named function
+ // literals, or NULL. Only valid for function scopes.
+ Variable* function_var() const {
+ DCHECK(is_function_scope());
+ return function_;
+ }
+
+ // Parameters. The left-most parameter has index 0.
+ // Only valid for function scopes.
+ Variable* parameter(int index) const {
+ DCHECK(is_function_scope());
+ return params_[index];
+ }
+
+ // Returns the default function arity excluding default or rest parameters.
+ int default_function_length() const { return arity_; }
+
+ // Returns the number of formal parameters, up to but not including the
+ // rest parameter index (if the function has rest parameters), i.e. it
+ // says 2 for
+ //
+ // function foo(a, b) { ... }
+ //
+ // and
+ //
+ // function foo(a, b, ...c) { ... }
+ //
+ // but for
+ //
+ // function foo(a, b, c = 1) { ... }
+ //
+ // we return 3 here.
+ int num_parameters() const {
+ return has_rest_parameter() ? params_.length() - 1 : params_.length();
+ }
+
+ // A function can have at most one rest parameter. Returns Variable* or NULL.
+ Variable* rest_parameter(int* index) const {
+ *index = rest_index_;
+ if (rest_index_ < 0) return nullptr;
+ return params_[rest_index_];
+ }
+
+ bool has_rest_parameter() const { return rest_index_ >= 0; }
+
+ bool has_simple_parameters() const { return has_simple_parameters_; }
+
+ // TODO(caitp): manage this state in a better way. PreParser must be able to
+ // communicate that the scope is non-simple, without allocating any parameters
+ // as the Parser does. This is necessary to ensure that TC39's proposed early
+ // error can be reported consistently regardless of whether lazily parsed or
+ // not.
+ void SetHasNonSimpleParameters() {
+ DCHECK(is_function_scope());
+ has_simple_parameters_ = false;
+ }
+
+ // The local variable 'arguments' if we need to allocate it; NULL otherwise.
+ Variable* arguments() const {
+ DCHECK(!is_arrow_scope() || arguments_ == nullptr);
+ return arguments_;
+ }
- PendingCompilationErrorHandler pending_error_handler_;
+ Variable* this_function_var() const {
+ // This is only used in derived constructors atm.
+ DCHECK(this_function_ == nullptr ||
+ (is_function_scope() && (IsClassConstructor(function_kind()) ||
+ IsConciseMethod(function_kind()) ||
+ IsAccessorFunction(function_kind()))));
+ return this_function_;
+ }
+
+ // Adds a temporary variable in this scope's TemporaryScope. This is for
+ // adjusting the scope of temporaries used when desugaring parameter
+ // initializers.
+ void AddTemporary(Variable* var) {
+ DCHECK(!already_resolved_);
+ // Temporaries are only placed in ClosureScopes.
+ DCHECK_EQ(GetClosureScope(), this);
+ temps_.Add(var, zone());
+ }
+
+ ZoneList<Variable*>* temps() { return &temps_; }
+
+ void DeclareSloppyBlockFunction(const AstRawString* name,
+ SloppyBlockFunctionStatement* statement) {
+ sloppy_block_function_map_.Declare(zone(), name, statement);
+ }
+
+ SloppyBlockFunctionMap* sloppy_block_function_map() {
+ return &sloppy_block_function_map_;
+ }
+
+ // Resolve and fill in the allocation information for all variables
+ // in this scopes. Must be called *after* all scopes have been
+ // processed (parsed) to ensure that unresolved variables can be
+ // resolved properly.
+ //
+ // In the case of code compiled and run using 'eval', the context
+ // parameter is the context in which eval was called. In all other
+ // cases the context parameter is an empty handle.
+ void AllocateVariables(ParseInfo* info, AstNodeFactory* factory);
+
+ // To be called during parsing. Do just enough scope analysis that we can
+ // discard the Scope for lazily compiled functions. In particular, this
+ // records variables which cannot be resolved inside the Scope (we don't yet
+ // know what they will resolve to since the outer Scopes are incomplete) and
+ // migrates them into migrate_to.
+ void AnalyzePartially(DeclarationScope* migrate_to,
+ AstNodeFactory* ast_node_factory);
+
+ Handle<StringSet> CollectNonLocals(ParseInfo* info,
+ Handle<StringSet> non_locals);
+
+ // Determine if we can use lazy compilation for this scope.
+ bool AllowsLazyCompilation() const;
+
+ // Determine if we can use lazy compilation for this scope without a context.
+ bool AllowsLazyCompilationWithoutContext() const;
+
+ // Make sure this closure and all outer closures are eagerly compiled.
+ void ForceEagerCompilation() {
+ DCHECK_EQ(this, GetClosureScope());
+ for (DeclarationScope* s = this; !s->is_script_scope();
+ s = s->outer_scope()->GetClosureScope()) {
+ s->force_eager_compilation_ = true;
+ }
+ }
+
+#ifdef DEBUG
+ void PrintParameters();
+#endif
+
+ void AllocateLocals();
+ void AllocateParameterLocals();
+ void AllocateReceiver();
+
+ private:
+ void AllocateParameter(Variable* var, int index);
+
+ void SetDefaults();
+
+ // If the scope is a function scope, this is the function kind.
+ const FunctionKind function_kind_;
+
+ bool has_simple_parameters_ : 1;
+ // This scope contains an "use asm" annotation.
+ bool asm_module_ : 1;
+ // This scope's outer context is an asm module.
+ bool asm_function_ : 1;
+ bool force_eager_compilation_ : 1;
+ // This scope has a parameter called "arguments".
+ bool has_arguments_parameter_ : 1;
+ // This scope uses "super" property ('super.foo').
+ bool scope_uses_super_property_ : 1;
+
+ // Info about the parameter list of a function.
+ int arity_;
+ int rest_index_;
+ // Compiler-allocated (user-invisible) temporaries.
+ ZoneList<Variable*> temps_;
+ // Parameter list in source order.
+ ZoneList<Variable*> params_;
+ // Map of function names to lists of functions defined in sloppy blocks
+ SloppyBlockFunctionMap sloppy_block_function_map_;
+ // Convenience variable.
+ Variable* receiver_;
+ // Function variable, if any; function scopes only.
+ Variable* function_;
+ // new.target variable, function scopes only.
+ Variable* new_target_;
+ // Convenience variable; function scopes only.
+ Variable* arguments_;
+ // Convenience variable; Subclass constructor only
+ Variable* this_function_;
+};
+
+class ModuleScope final : public DeclarationScope {
+ public:
+ ModuleScope(Zone* zone, DeclarationScope* script_scope,
+ AstValueFactory* ast_value_factory);
+
+ ModuleDescriptor* module() const {
+ DCHECK_NOT_NULL(module_descriptor_);
+ return module_descriptor_;
+ }
+
+ // Set MODULE as VariableLocation for all variables that will live in some
+ // module's export table.
+ void AllocateModuleVariables();
+
+ private:
+ ModuleDescriptor* module_descriptor_;
};
} // namespace internal
diff --git a/deps/v8/src/ast/variables.cc b/deps/v8/src/ast/variables.cc
index 7b9a5d2957..0541f942f0 100644
--- a/deps/v8/src/ast/variables.cc
+++ b/deps/v8/src/ast/variables.cc
@@ -4,8 +4,8 @@
#include "src/ast/variables.h"
-#include "src/ast/ast.h"
#include "src/ast/scopes.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -19,7 +19,6 @@ const char* Variable::Mode2String(VariableMode mode) {
case CONST_LEGACY: return "CONST_LEGACY";
case LET: return "LET";
case CONST: return "CONST";
- case IMPORT: return "IMPORT";
case DYNAMIC: return "DYNAMIC";
case DYNAMIC_GLOBAL: return "DYNAMIC_GLOBAL";
case DYNAMIC_LOCAL: return "DYNAMIC_LOCAL";
@@ -29,7 +28,6 @@ const char* Variable::Mode2String(VariableMode mode) {
return NULL;
}
-
Variable::Variable(Scope* scope, const AstRawString* name, VariableMode mode,
Kind kind, InitializationFlag initialization_flag,
MaybeAssignedFlag maybe_assigned_flag)
@@ -39,9 +37,8 @@ Variable::Variable(Scope* scope, const AstRawString* name, VariableMode mode,
kind_(kind),
location_(VariableLocation::UNALLOCATED),
index_(-1),
- initializer_position_(RelocInfo::kNoPosition),
+ initializer_position_(kNoSourcePosition),
local_if_not_shadowed_(NULL),
- is_from_eval_(false),
force_context_allocation_(false),
is_used_(false),
initialization_flag_(initialization_flag),
@@ -56,7 +53,7 @@ bool Variable::IsGlobalObjectProperty() const {
// activation frame.
return (IsDynamicVariableMode(mode_) ||
(IsDeclaredVariableMode(mode_) && !IsLexicalVariableMode(mode_))) &&
- scope_ != NULL && scope_->is_script_scope() && !is_this();
+ scope_ != NULL && scope_->is_script_scope();
}
@@ -64,7 +61,7 @@ bool Variable::IsStaticGlobalObjectProperty() const {
// Temporaries are never global, they must always be allocated in the
// activation frame.
return (IsDeclaredVariableMode(mode_) && !IsLexicalVariableMode(mode_)) &&
- scope_ != NULL && scope_->is_script_scope() && !is_this();
+ scope_ != NULL && scope_->is_script_scope();
}
diff --git a/deps/v8/src/ast/variables.h b/deps/v8/src/ast/variables.h
index b8bb07eab7..f1f63b8a14 100644
--- a/deps/v8/src/ast/variables.h
+++ b/deps/v8/src/ast/variables.h
@@ -15,7 +15,7 @@ namespace internal {
// variables. Variables themselves are never directly referred to from the AST,
// they are maintained by scopes, and referred to from VariableProxies and Slots
// after binding and variable allocation.
-class Variable: public ZoneObject {
+class Variable final : public ZoneObject {
public:
enum Kind { NORMAL, FUNCTION, THIS, ARGUMENTS };
@@ -23,8 +23,6 @@ class Variable: public ZoneObject {
InitializationFlag initialization_flag,
MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
- virtual ~Variable() {}
-
// Printing support
static const char* Mode2String(VariableMode mode);
@@ -45,6 +43,7 @@ class Variable: public ZoneObject {
return force_context_allocation_;
}
void ForceContextAllocation() {
+ DCHECK(IsUnallocated() || IsContextSlot());
force_context_allocation_ = true;
}
bool is_used() { return is_used_; }
@@ -55,10 +54,6 @@ class Variable: public ZoneObject {
int initializer_position() { return initializer_position_; }
void set_initializer_position(int pos) { initializer_position_ = pos; }
- bool IsVariable(Handle<String> n) const {
- return !is_this() && name().is_identical_to(n);
- }
-
bool IsUnallocated() const {
return location_ == VariableLocation::UNALLOCATED;
}
@@ -77,6 +72,8 @@ class Variable: public ZoneObject {
bool is_dynamic() const { return IsDynamicVariableMode(mode_); }
bool is_const_mode() const { return IsImmutableVariableMode(mode_); }
bool binding_needs_init() const {
+ DCHECK(initialization_flag_ != kNeedsInitialization ||
+ IsLexicalVariableMode(mode_));
return initialization_flag_ == kNeedsInitialization;
}
@@ -84,21 +81,6 @@ class Variable: public ZoneObject {
bool is_this() const { return kind_ == THIS; }
bool is_arguments() const { return kind_ == ARGUMENTS; }
- // For script scopes, the "this" binding is provided by a ScriptContext added
- // to the global's ScriptContextTable. This binding might not statically
- // resolve to a Variable::THIS binding, instead being DYNAMIC_LOCAL. However
- // any variable named "this" does indeed refer to a Variable::THIS binding;
- // the grammar ensures this to be the case. So wherever a "this" binding
- // might be provided by the global, use HasThisName instead of is_this().
- bool HasThisName(Isolate* isolate) const {
- return is_this() || *name() == *isolate->factory()->this_string();
- }
-
- // True if the variable is named eval and not known to be shadowed.
- bool is_possibly_eval(Isolate* isolate) const {
- return IsVariable(isolate->factory()->eval_string());
- }
-
Variable* local_if_not_shadowed() const {
DCHECK(mode_ == DYNAMIC_LOCAL && local_if_not_shadowed_ != NULL);
return local_if_not_shadowed_;
@@ -115,25 +97,13 @@ class Variable: public ZoneObject {
}
void AllocateTo(VariableLocation location, int index) {
+ DCHECK(IsUnallocated() || (location_ == location && index_ == index));
location_ = location;
index_ = index;
}
- void SetFromEval() { is_from_eval_ = true; }
-
static int CompareIndex(Variable* const* v, Variable* const* w);
- PropertyAttributes DeclarationPropertyAttributes() const {
- int property_attributes = NONE;
- if (IsImmutableVariableMode(mode_)) {
- property_attributes |= READ_ONLY;
- }
- if (is_from_eval_) {
- property_attributes |= EVAL_DECLARED;
- }
- return static_cast<PropertyAttributes>(property_attributes);
- }
-
private:
Scope* scope_;
const AstRawString* name_;
@@ -149,9 +119,6 @@ class Variable: public ZoneObject {
// binding scope (exclusive).
Variable* local_if_not_shadowed_;
- // True if this variable is introduced by a sloppy eval
- bool is_from_eval_;
-
// Usage info.
bool force_context_allocation_; // set by variable resolver
bool is_used_;
diff --git a/deps/v8/src/background-parsing-task.cc b/deps/v8/src/background-parsing-task.cc
index 3e0a5dcc42..5df46c82b9 100644
--- a/deps/v8/src/background-parsing-task.cc
+++ b/deps/v8/src/background-parsing-task.cc
@@ -11,7 +11,7 @@ namespace internal {
BackgroundParsingTask::BackgroundParsingTask(
StreamedSource* source, ScriptCompiler::CompileOptions options,
int stack_size, Isolate* isolate)
- : source_(source), stack_size_(stack_size) {
+ : source_(source), stack_size_(stack_size), script_data_(nullptr) {
// We don't set the context to the CompilationInfo yet, because the background
// thread cannot do anything with it anyway. We set it just before compilation
// on the foreground thread.
@@ -23,8 +23,8 @@ BackgroundParsingTask::BackgroundParsingTask(
// will happen in the main thread after parsing.
Zone* zone = new Zone(isolate->allocator());
ParseInfo* info = new ParseInfo(zone);
- source->zone.Reset(zone);
- source->info.Reset(info);
+ source->zone.reset(zone);
+ source->info.reset(info);
info->set_isolate(isolate);
info->set_source_stream(source->source_stream.get());
info->set_source_stream_encoding(source->encoding);
@@ -34,6 +34,17 @@ BackgroundParsingTask::BackgroundParsingTask(
info->set_compile_options(options);
// Parse eagerly with ignition since we will compile eagerly.
info->set_allow_lazy_parsing(!(i::FLAG_ignition && i::FLAG_ignition_eager));
+
+ if (options == ScriptCompiler::kProduceParserCache ||
+ options == ScriptCompiler::kProduceCodeCache) {
+ source_->info->set_cached_data(&script_data_);
+ }
+ // Parser needs to stay alive for finalizing the parsing on the main
+ // thread.
+ source_->parser.reset(new Parser(source_->info.get()));
+ source_->parser->DeserializeScopeChain(
+ source_->info.get(), Handle<Context>::null(),
+ Scope::DeserializationMode::kDeserializeOffHeap);
}
@@ -42,29 +53,28 @@ void BackgroundParsingTask::Run() {
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
- ScriptData* script_data = NULL;
- ScriptCompiler::CompileOptions options = source_->info->compile_options();
- if (options == ScriptCompiler::kProduceParserCache ||
- options == ScriptCompiler::kProduceCodeCache) {
- source_->info->set_cached_data(&script_data);
- }
-
+ // Reset the stack limit of the parser to reflect correctly that we're on a
+ // background thread.
uintptr_t stack_limit =
reinterpret_cast<uintptr_t>(&stack_limit) - stack_size_ * KB;
+ source_->parser->set_stack_limit(stack_limit);
+
+ // Nullify the Isolate temporarily so that the background parser doesn't
+ // accidentally use it.
+ Isolate* isolate = source_->info->isolate();
+ source_->info->set_isolate(nullptr);
- source_->info->set_stack_limit(stack_limit);
- // Parser needs to stay alive for finalizing the parsing on the main
- // thread. Passing &parse_info is OK because Parser doesn't store it.
- source_->parser.Reset(new Parser(source_->info.get()));
source_->parser->ParseOnBackground(source_->info.get());
- if (script_data != NULL) {
- source_->cached_data.Reset(new ScriptCompiler::CachedData(
- script_data->data(), script_data->length(),
+ if (script_data_ != nullptr) {
+ source_->cached_data.reset(new ScriptCompiler::CachedData(
+ script_data_->data(), script_data_->length(),
ScriptCompiler::CachedData::BufferOwned));
- script_data->ReleaseDataOwnership();
- delete script_data;
+ script_data_->ReleaseDataOwnership();
+ delete script_data_;
+ script_data_ = nullptr;
}
+ source_->info->set_isolate(isolate);
}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/background-parsing-task.h b/deps/v8/src/background-parsing-task.h
index 0f290fb7f0..1bf9d74121 100644
--- a/deps/v8/src/background-parsing-task.h
+++ b/deps/v8/src/background-parsing-task.h
@@ -5,15 +5,19 @@
#ifndef V8_BACKGROUND_PARSING_TASK_H_
#define V8_BACKGROUND_PARSING_TASK_H_
+#include <memory>
+
#include "src/base/platform/platform.h"
#include "src/base/platform/semaphore.h"
-#include "src/base/smart-pointers.h"
#include "src/compiler.h"
+#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
namespace v8 {
namespace internal {
+class ScriptData;
+
// Internal representation of v8::ScriptCompiler::StreamedSource. Contains all
// data which needs to be transmitted between threads for background parsing,
// finalizing it on the main thread, and compiling on the main thread.
@@ -23,17 +27,17 @@ struct StreamedSource {
: source_stream(source_stream), encoding(encoding) {}
// Internal implementation of v8::ScriptCompiler::StreamedSource.
- base::SmartPointer<ScriptCompiler::ExternalSourceStream> source_stream;
+ std::unique_ptr<ScriptCompiler::ExternalSourceStream> source_stream;
ScriptCompiler::StreamedSource::Encoding encoding;
- base::SmartPointer<ScriptCompiler::CachedData> cached_data;
+ std::unique_ptr<ScriptCompiler::CachedData> cached_data;
// Data needed for parsing, and data needed to to be passed between thread
// between parsing and compilation. These need to be initialized before the
// compilation starts.
UnicodeCache unicode_cache;
- base::SmartPointer<Zone> zone;
- base::SmartPointer<ParseInfo> info;
- base::SmartPointer<Parser> parser;
+ std::unique_ptr<Zone> zone;
+ std::unique_ptr<ParseInfo> info;
+ std::unique_ptr<Parser> parser;
private:
// Prevent copying. Not implemented.
@@ -53,6 +57,7 @@ class BackgroundParsingTask : public ScriptCompiler::ScriptStreamingTask {
private:
StreamedSource* source_; // Not owned.
int stack_size_;
+ ScriptData* script_data_;
};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/bailout-reason.h b/deps/v8/src/bailout-reason.h
index 92929cf38c..0966334ffa 100644
--- a/deps/v8/src/bailout-reason.h
+++ b/deps/v8/src/bailout-reason.h
@@ -14,11 +14,11 @@ namespace internal {
\
V(k32BitValueInRegisterIsNotZeroExtended, \
"32 bit value in register is not zero-extended") \
+ V(kAllocatingNonEmptyPackedArray, "Allocating non-empty packed array") \
V(kAllocationIsNotDoubleAligned, "Allocation is not double aligned") \
V(kAPICallReturnedInvalidObject, "API call returned invalid object") \
V(kArgumentsObjectValueInATestContext, \
"Arguments object value in a test context") \
- V(kArrayBoilerplateCreationFailed, "Array boilerplate creation failed") \
V(kArrayIndexConstantValueTooBig, "Array index constant value too big") \
V(kAssignmentToArguments, "Assignment to arguments") \
V(kAssignmentToLetVariableBeforeInitialization, \
@@ -62,10 +62,11 @@ namespace internal {
V(kEmitLoadRegisterUnsupportedDoubleImmediate, \
"EmitLoadRegister: Unsupported double immediate") \
V(kEval, "eval") \
- V(kExpectedAlignmentMarker, "Expected alignment marker") \
V(kExpectedAllocationSite, "Expected allocation site") \
+ V(kExpectedBooleanValue, "Expected boolean value") \
V(kExpectedFunctionObject, "Expected function object in register") \
V(kExpectedHeapNumber, "Expected HeapNumber") \
+ V(kExpectedJSReceiver, "Expected object to have receiver type") \
V(kExpectedNativeContext, "Expected native context") \
V(kExpectedNonIdenticalObjects, "Expected non-identical objects") \
V(kExpectedNonNullContext, "Expected non-null context") \
@@ -73,26 +74,22 @@ namespace internal {
V(kExpectedNewSpaceObject, "Expected new space object") \
V(kExpectedUndefinedOrCell, "Expected undefined or cell in register") \
V(kExpectingAlignmentForCopyBytes, "Expecting alignment for CopyBytes") \
- V(kExportDeclaration, "Export declaration") \
V(kExternalStringExpectedButNotFound, \
"External string expected, but not found") \
V(kForInStatementWithNonLocalEachVariable, \
"ForInStatement with non-local each variable") \
V(kForOfStatement, "ForOfStatement") \
- V(kFrameIsExpectedToBeAligned, "Frame is expected to be aligned") \
V(kFunctionBeingDebugged, "Function is being debugged") \
V(kFunctionCallsEval, "Function calls eval") \
V(kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, \
"The function_data field should be a BytecodeArray on interpreter entry") \
V(kGeneratedCodeIsTooLarge, "Generated code is too large") \
- V(kGeneratorFailedToResume, "Generator failed to resume") \
- V(kGeneratorResumeMethod, "Generator resume method is being called") \
V(kGenerator, "Generator") \
V(kGlobalFunctionsMustHaveInitialMap, \
"Global functions must have initial map") \
+ V(kGraphBuildingFailed, "Optimized graph construction failed") \
V(kHeapNumberMapRegisterClobbered, "HeapNumberMap register clobbered") \
V(kHydrogenFilter, "Optimization disabled by filter") \
- V(kImportDeclaration, "Import declaration") \
V(kIndexIsNegative, "Index is negative") \
V(kIndexIsTooLarge, "Index is too large") \
V(kInliningBailedOut, "Inlining bailed out") \
@@ -102,15 +99,22 @@ namespace internal {
V(kInteger32ToSmiFieldWritingToNonSmiLocation, \
"Integer32ToSmiField writing to non-smi location") \
V(kInvalidBytecode, "Invalid bytecode") \
- V(kInvalidCaptureReferenced, "Invalid capture referenced") \
V(kInvalidElementsKindForInternalArrayOrInternalPackedArray, \
"Invalid ElementsKind for InternalArray or InternalPackedArray") \
+ V(kInvalidFrameForFastNewRestArgumentsStub, \
+ "Invalid frame for FastNewRestArgumentsStub") \
+ V(kInvalidFrameForFastNewSloppyArgumentsStub, \
+ "Invalid frame for FastNewSloppyArgumentsStub") \
+ V(kInvalidFrameForFastNewStrictArgumentsStub, \
+ "Invalid frame for FastNewStrictArgumentsStub") \
V(kInvalidFullCodegenState, "invalid full-codegen state") \
V(kInvalidHandleScopeLevel, "Invalid HandleScope level") \
+ V(kInvalidJumpTableIndex, "Invalid jump table index") \
V(kInvalidLeftHandSideInAssignment, "Invalid left-hand side in assignment") \
V(kInvalidLhsInCompoundAssignment, "Invalid lhs in compound assignment") \
V(kInvalidLhsInCountOperation, "Invalid lhs in count operation") \
V(kInvalidMinLength, "Invalid min_length") \
+ V(kInvalidRegisterFileInGenerator, "invalid register file in generator") \
V(kJSGlobalObjectNativeContextShouldBeANativeContext, \
"JSGlobalObject::native_context should be a native context") \
V(kJSGlobalProxyContextShouldNotBeNull, \
@@ -118,7 +122,6 @@ namespace internal {
V(kJSObjectWithFastElementsMapHasSlowElements, \
"JSObject with fast elements map has slow elements") \
V(kLetBindingReInitialization, "Let binding re-initialization") \
- V(kLiveBytesCountOverflowChunkSize, "Live Bytes Count overflow chunk size") \
V(kLiveEdit, "LiveEdit") \
V(kLookupVariableInCountOperation, "Lookup variable in count operation") \
V(kMapBecameDeprecated, "Map became deprecated") \
@@ -143,13 +146,15 @@ namespace internal {
V(kOperandIsASmiAndNotABoundFunction, \
"Operand is a smi and not a bound function") \
V(kOperandIsASmiAndNotAFunction, "Operand is a smi and not a function") \
+ V(kOperandIsASmiAndNotAGeneratorObject, \
+ "Operand is a smi and not a generator object") \
V(kOperandIsASmiAndNotAName, "Operand is a smi and not a name") \
V(kOperandIsASmiAndNotAReceiver, "Operand is a smi and not a receiver") \
V(kOperandIsASmiAndNotAString, "Operand is a smi and not a string") \
V(kOperandIsASmi, "Operand is a smi") \
- V(kOperandIsNotADate, "Operand is not a date") \
V(kOperandIsNotABoundFunction, "Operand is not a bound function") \
V(kOperandIsNotAFunction, "Operand is not a function") \
+ V(kOperandIsNotAGeneratorObject, "Operand is not a generator object") \
V(kOperandIsNotAName, "Operand is not a name") \
V(kOperandIsNotANumber, "Operand is not a number") \
V(kOperandIsNotAReceiver, "Operand is not a receiver") \
@@ -159,7 +164,8 @@ namespace internal {
V(kOperandNotANumber, "Operand not a number") \
V(kObjectTagged, "The object is tagged") \
V(kObjectNotTagged, "The object is not tagged") \
- V(kOptimizationDisabled, "Optimization is disabled") \
+ V(kOptimizationDisabled, "Optimization disabled") \
+ V(kOptimizationDisabledForTest, "Optimization disabled for test") \
V(kOptimizedTooManyTimes, "Optimized too many times") \
V(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister, \
"Out of virtual registers while trying to allocate temp register") \
@@ -185,8 +191,6 @@ namespace internal {
V(kTailCall, "Tail call") \
V(kTheCurrentStackPointerIsBelowCsp, \
"The current stack pointer is below csp") \
- V(kTheSourceAndDestinationAreTheSame, \
- "The source and destination are the same") \
V(kTheStackWasCorruptedByMacroAssemblerCall, \
"The stack was corrupted by MacroAssembler::Call()") \
V(kTooManyParametersLocals, "Too many parameters/locals") \
@@ -228,8 +232,6 @@ namespace internal {
V(kUnexpectedLevelAfterReturnFromApiCall, \
"Unexpected level after return from api call") \
V(kUnexpectedNegativeValue, "Unexpected negative value") \
- V(kUnexpectedNumberOfPreAllocatedPropertyFields, \
- "Unexpected number of pre-allocated property fields") \
V(kUnexpectedFunctionIDForInvokeIntrinsic, \
"Unexpected runtime function id for the InvokeIntrinsic bytecode") \
V(kUnexpectedFPCRMode, "Unexpected FPCR mode.") \
@@ -252,8 +254,6 @@ namespace internal {
V(kUnsupportedPhiUseOfArguments, "Unsupported phi use of arguments") \
V(kUnsupportedPhiUseOfConstVariable, \
"Unsupported phi use of const or let variable") \
- V(kUnexpectedReturnFromBytecodeHandler, \
- "Unexpectedly returned from a bytecode handler") \
V(kUnexpectedReturnFromThrow, "Unexpectedly returned from a throw") \
V(kUnsupportedSwitchStatement, "Unsupported switch statement") \
V(kUnsupportedTaggedImmediate, "Unsupported tagged immediate") \
@@ -268,7 +268,8 @@ namespace internal {
"Wrong number of arguments for intrinsic") \
V(kShouldNotDirectlyEnterOsrFunction, \
"Should not directly enter OSR-compiled function") \
- V(kYield, "Yield")
+ V(kConversionFromImpossibleValue, \
+ "Reached conversion from value with empty type (i.e., impossible type)")
#define ERROR_MESSAGES_CONSTANTS(C, T) C,
enum BailoutReason {
diff --git a/deps/v8/src/base.isolate b/deps/v8/src/base.isolate
index b51de01ac7..a9cfc89218 100644
--- a/deps/v8/src/base.isolate
+++ b/deps/v8/src/base.isolate
@@ -4,16 +4,9 @@
{
'includes': [
'../third_party/icu/icu.isolate',
- '../build/config/win/msvs_dependencies.isolate',
+ '../gypfiles/config/win/msvs_dependencies.isolate',
],
'conditions': [
- ['use_custom_libcxx==1', {
- 'variables': {
- 'files': [
- '<(PRODUCT_DIR)/lib/libc++.so',
- ],
- },
- }],
['v8_use_snapshot=="true" and v8_use_external_startup_data==1', {
'variables': {
'files': [
@@ -22,29 +15,6 @@
],
},
}],
- ['v8_use_snapshot=="true" and v8_use_external_startup_data==1 and v8_separate_ignition_snapshot==1', {
- 'variables': {
- 'files': [
- '<(PRODUCT_DIR)/snapshot_blob_ignition.bin',
- ],
- },
- }],
- ['OS=="linux" and component=="shared_library" and target_arch=="ia32"', {
- 'variables': {
- 'files': [
- '<(PRODUCT_DIR)/lib/',
- ],
- },
- }],
- ['OS=="win" and component=="shared_library"', {
- 'variables': {
- 'files': [
- '<(PRODUCT_DIR)/icui18n.dll',
- '<(PRODUCT_DIR)/icuuc.dll',
- '<(PRODUCT_DIR)/v8.dll',
- ],
- },
- }],
['OS=="mac" and asan==1', {
'variables': {
'files': [
diff --git a/deps/v8/src/base/accounting-allocator.cc b/deps/v8/src/base/accounting-allocator.cc
index 2269c60680..c56f037c04 100644
--- a/deps/v8/src/base/accounting-allocator.cc
+++ b/deps/v8/src/base/accounting-allocator.cc
@@ -15,7 +15,14 @@ namespace base {
void* AccountingAllocator::Allocate(size_t bytes) {
void* memory = malloc(bytes);
- if (memory) NoBarrier_AtomicIncrement(&current_memory_usage_, bytes);
+ if (memory) {
+ AtomicWord current =
+ NoBarrier_AtomicIncrement(&current_memory_usage_, bytes);
+ AtomicWord max = NoBarrier_Load(&max_memory_usage_);
+ while (current > max) {
+ max = NoBarrier_CompareAndSwap(&max_memory_usage_, max, current);
+ }
+ }
return memory;
}
@@ -29,5 +36,9 @@ size_t AccountingAllocator::GetCurrentMemoryUsage() const {
return NoBarrier_Load(&current_memory_usage_);
}
+size_t AccountingAllocator::GetMaxMemoryUsage() const {
+ return NoBarrier_Load(&max_memory_usage_);
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/accounting-allocator.h b/deps/v8/src/base/accounting-allocator.h
index ce67f3790e..4e1baf18d4 100644
--- a/deps/v8/src/base/accounting-allocator.h
+++ b/deps/v8/src/base/accounting-allocator.h
@@ -11,19 +11,21 @@
namespace v8 {
namespace base {
-class AccountingAllocator final {
+class AccountingAllocator {
public:
AccountingAllocator() = default;
- ~AccountingAllocator() = default;
+ virtual ~AccountingAllocator() = default;
// Returns nullptr on failed allocation.
- void* Allocate(size_t bytes);
- void Free(void* memory, size_t bytes);
+ virtual void* Allocate(size_t bytes);
+ virtual void Free(void* memory, size_t bytes);
size_t GetCurrentMemoryUsage() const;
+ size_t GetMaxMemoryUsage() const;
private:
AtomicWord current_memory_usage_ = 0;
+ AtomicWord max_memory_usage_ = 0;
DISALLOW_COPY_AND_ASSIGN(AccountingAllocator);
};
diff --git a/deps/v8/src/atomic-utils.h b/deps/v8/src/base/atomic-utils.h
index 34e1cb0269..e19385dcb1 100644
--- a/deps/v8/src/atomic-utils.h
+++ b/deps/v8/src/base/atomic-utils.h
@@ -11,7 +11,7 @@
#include "src/base/macros.h"
namespace v8 {
-namespace internal {
+namespace base {
template <class T>
class AtomicNumber {
@@ -19,12 +19,18 @@ class AtomicNumber {
AtomicNumber() : value_(0) {}
explicit AtomicNumber(T initial) : value_(initial) {}
- // Returns the newly set value.
+ // Returns the value after incrementing.
V8_INLINE T Increment(T increment) {
return static_cast<T>(base::Barrier_AtomicIncrement(
&value_, static_cast<base::AtomicWord>(increment)));
}
+ // Returns the value after decrementing.
+ V8_INLINE T Decrement(T decrement) {
+ return static_cast<T>(base::Barrier_AtomicIncrement(
+ &value_, -static_cast<base::AtomicWord>(decrement)));
+ }
+
V8_INLINE T Value() { return static_cast<T>(base::Acquire_Load(&value_)); }
V8_INLINE void SetValue(T new_value) {
@@ -36,6 +42,9 @@ class AtomicNumber {
return value;
}
+ V8_INLINE T operator+=(T value) { return Increment(value); }
+ V8_INLINE T operator-=(T value) { return Decrement(value); }
+
private:
STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
@@ -52,7 +61,7 @@ class AtomicValue {
explicit AtomicValue(T initial)
: value_(cast_helper<T>::to_storage_type(initial)) {}
- V8_INLINE T Value() {
+ V8_INLINE T Value() const {
return cast_helper<T>::to_return_type(base::Acquire_Load(&value_));
}
@@ -169,7 +178,7 @@ class AtomicEnumSet {
base::AtomicWord bits_;
};
-} // namespace internal
+} // namespace base
} // namespace v8
#endif // #define V8_ATOMIC_UTILS_H_
diff --git a/deps/v8/src/base/atomicops.h b/deps/v8/src/base/atomicops.h
index ea33e48928..973e96b9ea 100644
--- a/deps/v8/src/base/atomicops.h
+++ b/deps/v8/src/base/atomicops.h
@@ -42,17 +42,15 @@ namespace base {
typedef char Atomic8;
typedef int32_t Atomic32;
-#if defined(__native_client__)
-typedef int64_t Atomic64;
-#elif defined(V8_HOST_ARCH_64_BIT)
+#if defined(V8_HOST_ARCH_64_BIT)
// We need to be able to go between Atomic64 and AtomicWord implicitly. This
// means Atomic64 and AtomicWord should be the same type on 64-bit.
#if defined(__ILP32__)
typedef int64_t Atomic64;
#else
typedef intptr_t Atomic64;
+#endif // defined(__ILP32__)
#endif // defined(V8_HOST_ARCH_64_BIT)
-#endif // defined(__native_client__)
// Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or
// Atomic64 routines below, depending on your architecture.
@@ -143,8 +141,6 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
#include "src/base/atomicops_internals_x86_msvc.h"
#elif defined(__APPLE__)
#include "src/base/atomicops_internals_mac.h"
-#elif defined(__native_client__)
-#include "src/base/atomicops_internals_portable.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_ARM64
#include "src/base/atomicops_internals_arm64_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_ARM
diff --git a/deps/v8/src/base/atomicops_internals_mips64_gcc.h b/deps/v8/src/base/atomicops_internals_mips64_gcc.h
index 85b4e462b9..cf2e194e50 100644
--- a/deps/v8/src/base/atomicops_internals_mips64_gcc.h
+++ b/deps/v8/src/base/atomicops_internals_mips64_gcc.h
@@ -91,18 +91,19 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 temp, temp2;
- __asm__ __volatile__(".set push\n"
- ".set noreorder\n"
- "1:\n"
- "ll %0, %2\n" // temp = *ptr
- "addu %1, %0, %3\n" // temp2 = temp + increment
- "sc %1, %2\n" // *ptr = temp2 (with atomic check)
- "beqz %1, 1b\n" // start again on atomic error
- "addu %1, %0, %3\n" // temp2 = temp + increment
- ".set pop\n"
- : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
- : "Ir" (increment), "m" (*ptr)
- : "memory");
+ __asm__ __volatile__(
+ ".set push\n"
+ ".set noreorder\n"
+ "1:\n"
+ "ll %0, %2\n" // temp = *ptr
+ "addu %1, %0, %3\n" // temp2 = temp + increment
+ "sc %1, %2\n" // *ptr = temp2 (with atomic check)
+ "beqz %1, 1b\n" // start again on atomic error
+ "addu %1, %0, %3\n" // temp2 = temp + increment
+ ".set pop\n"
+ : "=&r"(temp), "=&r"(temp2), "=ZC"(*ptr)
+ : "Ir"(increment), "m"(*ptr)
+ : "memory");
// temp2 now holds the final value.
return temp2;
}
@@ -228,18 +229,19 @@ inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
Atomic64 temp, temp2;
- __asm__ __volatile__(".set push\n"
- ".set noreorder\n"
- "1:\n"
- "lld %0, %2\n" // temp = *ptr
- "daddu %1, %0, %3\n" // temp2 = temp + increment
- "scd %1, %2\n" // *ptr = temp2 (with atomic check)
- "beqz %1, 1b\n" // start again on atomic error
- "daddu %1, %0, %3\n" // temp2 = temp + increment
- ".set pop\n"
- : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
- : "Ir" (increment), "m" (*ptr)
- : "memory");
+ __asm__ __volatile__(
+ ".set push\n"
+ ".set noreorder\n"
+ "1:\n"
+ "lld %0, %2\n" // temp = *ptr
+ "daddu %1, %0, %3\n" // temp2 = temp + increment
+ "scd %1, %2\n" // *ptr = temp2 (with atomic check)
+ "beqz %1, 1b\n" // start again on atomic error
+ "daddu %1, %0, %3\n" // temp2 = temp + increment
+ ".set pop\n"
+ : "=&r"(temp), "=&r"(temp2), "=ZC"(*ptr)
+ : "Ir"(increment), "m"(*ptr)
+ : "memory");
// temp2 now holds the final value.
return temp2;
}
diff --git a/deps/v8/src/base/atomicops_internals_portable.h b/deps/v8/src/base/atomicops_internals_portable.h
deleted file mode 100644
index bb99973786..0000000000
--- a/deps/v8/src/base/atomicops_internals_portable.h
+++ /dev/null
@@ -1,138 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
-
-namespace v8 {
-namespace base {
-
-inline void MemoryBarrier() { __sync_synchronize(); }
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- return __sync_val_compare_and_swap(ptr, old_value, new_value);
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- return __sync_lock_test_and_set(ptr, new_value);
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return __sync_add_and_fetch(ptr, increment);
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return __sync_add_and_fetch(ptr, increment);
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value, Atomic32 new_value) {
- return __sync_val_compare_and_swap(ptr, old_value, new_value);
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value, Atomic32 new_value) {
- return __sync_val_compare_and_swap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
- __sync_lock_test_and_set(ptr, value);
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- __sync_lock_test_and_set(ptr, value);
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- __sync_lock_test_and_set(ptr, value);
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- __sync_lock_test_and_set(ptr, value);
-}
-
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
- return __sync_add_and_fetch(ptr, 0);
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return __sync_add_and_fetch(ptr, 0);
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- return __sync_add_and_fetch(ptr, 0);
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- return __sync_add_and_fetch(ptr, 0);
-}
-
-// 64-bit versions of the operations.
-// See the 32-bit versions for comments.
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- return __sync_val_compare_and_swap(ptr, old_value, new_value);
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- return __sync_lock_test_and_set(ptr, new_value);
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return __sync_add_and_fetch(ptr, increment);
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return __sync_add_and_fetch(ptr, increment);
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value, Atomic64 new_value) {
- return __sync_val_compare_and_swap(ptr, old_value, new_value);
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value, Atomic64 new_value) {
- return __sync_val_compare_and_swap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
- __sync_lock_test_and_set(ptr, value);
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
- __sync_lock_test_and_set(ptr, value);
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- __sync_lock_test_and_set(ptr, value);
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
- return __sync_add_and_fetch(ptr, 0);
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- return __sync_add_and_fetch(ptr, 0);
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
- return __sync_add_and_fetch(ptr, 0);
-}
-} // namespace base
-} // namespace v8
-
-#endif // V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
diff --git a/deps/v8/src/base/bits.cc b/deps/v8/src/base/bits.cc
index 74d747fc90..909f9deb8c 100644
--- a/deps/v8/src/base/bits.cc
+++ b/deps/v8/src/base/bits.cc
@@ -7,6 +7,7 @@
#include <limits>
#include "src/base/logging.h"
+#include "src/base/safe_math.h"
namespace v8 {
namespace base {
@@ -48,6 +49,51 @@ int32_t SignedMod32(int32_t lhs, int32_t rhs) {
return lhs % rhs;
}
+
+int64_t FromCheckedNumeric(const internal::CheckedNumeric<int64_t> value) {
+ if (value.IsValid())
+ return value.ValueUnsafe();
+
+ // We could return max/min but we don't really expose what the maximum delta
+ // is. Instead, return max/(-max), which is something that clients can reason
+ // about.
+ // TODO(rvargas) crbug.com/332611: don't use internal values.
+ int64_t limit = std::numeric_limits<int64_t>::max();
+ if (value.validity() == internal::RANGE_UNDERFLOW)
+ limit = -limit;
+ return value.ValueOrDefault(limit);
+}
+
+
+int64_t SignedSaturatedAdd64(int64_t lhs, int64_t rhs) {
+ internal::CheckedNumeric<int64_t> rv(lhs);
+ rv += rhs;
+ return FromCheckedNumeric(rv);
+}
+
+
+int64_t SignedSaturatedSub64(int64_t lhs, int64_t rhs) {
+ internal::CheckedNumeric<int64_t> rv(lhs);
+ rv -= rhs;
+ return FromCheckedNumeric(rv);
+}
+
+bool SignedMulOverflow32(int32_t lhs, int32_t rhs, int32_t* val) {
+ internal::CheckedNumeric<int32_t> rv(lhs);
+ rv *= rhs;
+ int32_t limit = std::numeric_limits<int32_t>::max();
+ *val = rv.ValueOrDefault(limit);
+ return !rv.IsValid();
+}
+
+bool SignedMulOverflow64(int64_t lhs, int64_t rhs, int64_t* val) {
+ internal::CheckedNumeric<int64_t> rv(lhs);
+ rv *= rhs;
+ int64_t limit = std::numeric_limits<int64_t>::max();
+ *val = rv.ValueOrDefault(limit);
+ return !rv.IsValid();
+}
+
} // namespace bits
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/bits.h b/deps/v8/src/base/bits.h
index 0e76624884..da12ee60fe 100644
--- a/deps/v8/src/base/bits.h
+++ b/deps/v8/src/base/bits.h
@@ -16,6 +16,12 @@
namespace v8 {
namespace base {
+
+namespace internal {
+template <typename T>
+class CheckedNumeric;
+}
+
namespace bits {
// CountPopulation32(value) returns the number of bits set in |value|.
@@ -105,7 +111,6 @@ T ReverseBits(T value) {
return result;
}
-
// CountTrailingZeros32(value) returns the number of zero bits preceding the
// least significant 1 bit in |value| if |value| is non-zero, otherwise it
// returns 32.
@@ -141,6 +146,14 @@ inline unsigned CountTrailingZeros64(uint64_t value) {
#endif
}
+// Overloaded versions of CountTrailingZeros32/64.
+inline unsigned CountTrailingZeros(uint32_t value) {
+ return CountTrailingZeros32(value);
+}
+
+inline unsigned CountTrailingZeros(uint64_t value) {
+ return CountTrailingZeros64(value);
+}
// Returns true iff |value| is a power of 2.
inline bool IsPowerOfTwo32(uint32_t value) {
@@ -225,6 +238,10 @@ inline bool SignedSubOverflow32(int32_t lhs, int32_t rhs, int32_t* val) {
#endif
}
+// SignedMulOverflow32(lhs,rhs,val) performs a signed multiplication of |lhs|
+// and |rhs| and stores the result into the variable pointed to by |val| and
+// returns true if the signed multiplication resulted in an overflow.
+bool SignedMulOverflow32(int32_t lhs, int32_t rhs, int32_t* val);
// SignedAddOverflow64(lhs,rhs,val) performs a signed summation of |lhs| and
// |rhs| and stores the result into the variable pointed to by |val| and
@@ -245,6 +262,10 @@ inline bool SignedSubOverflow64(int64_t lhs, int64_t rhs, int64_t* val) {
return ((res ^ lhs) & (res ^ ~rhs) & (1ULL << 63)) != 0;
}
+// SignedMulOverflow64(lhs,rhs,val) performs a signed multiplication of |lhs|
+// and |rhs| and stores the result into the variable pointed to by |val| and
+// returns true if the signed multiplication resulted in an overflow.
+bool SignedMulOverflow64(int64_t lhs, int64_t rhs, int64_t* val);
// SignedMulHigh32(lhs, rhs) multiplies two signed 32-bit values |lhs| and
// |rhs|, extracts the most significant 32 bits of the result, and returns
@@ -296,6 +317,21 @@ inline uint32_t UnsignedMod32(uint32_t lhs, uint32_t rhs) {
return rhs ? lhs % rhs : 0u;
}
+
+// Clamp |value| on overflow and underflow conditions.
+int64_t FromCheckedNumeric(const internal::CheckedNumeric<int64_t> value);
+
+
+// SignedSaturatedAdd64(lhs, rhs) adds |lhs| and |rhs|,
+// checks and returns the result.
+int64_t SignedSaturatedAdd64(int64_t lhs, int64_t rhs);
+
+
+// SignedSaturatedSub64(lhs, rhs) substracts |lhs| by |rhs|,
+// checks and returns the result.
+int64_t SignedSaturatedSub64(int64_t lhs, int64_t rhs);
+
+
} // namespace bits
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h
index 9637f657f9..e033134feb 100644
--- a/deps/v8/src/base/build_config.h
+++ b/deps/v8/src/base/build_config.h
@@ -12,27 +12,12 @@
// http://www.agner.org/optimize/calling_conventions.pdf
// or with gcc, run: "echo | gcc -E -dM -"
#if defined(_M_X64) || defined(__x86_64__)
-#if defined(__native_client__)
-// For Native Client builds of V8, use V8_TARGET_ARCH_ARM, so that V8
-// generates ARM machine code, together with a portable ARM simulator
-// compiled for the host architecture in question.
-//
-// Since Native Client is ILP-32 on all architectures we use
-// V8_HOST_ARCH_IA32 on both 32- and 64-bit x86.
-#define V8_HOST_ARCH_IA32 1
-#define V8_HOST_ARCH_32_BIT 1
-#else
#define V8_HOST_ARCH_X64 1
#if defined(__x86_64__) && __SIZEOF_POINTER__ == 4 // Check for x32.
#define V8_HOST_ARCH_32_BIT 1
#else
#define V8_HOST_ARCH_64_BIT 1
#endif
-#endif // __native_client__
-#elif defined(__pnacl__)
-// PNaCl is also ILP-32.
-#define V8_HOST_ARCH_IA32 1
-#define V8_HOST_ARCH_32_BIT 1
#elif defined(_M_IX86) || defined(__i386__)
#define V8_HOST_ARCH_IA32 1
#define V8_HOST_ARCH_32_BIT 1
diff --git a/deps/v8/src/base/compiler-specific.h b/deps/v8/src/base/compiler-specific.h
index ffd5a44e14..822893ffec 100644
--- a/deps/v8/src/base/compiler-specific.h
+++ b/deps/v8/src/base/compiler-specific.h
@@ -26,6 +26,17 @@
#define WARN_UNUSED_RESULT /* NOT SUPPORTED */
#endif
+// Tell the compiler a function is using a printf-style format string.
+// |format_param| is the one-based index of the format string parameter;
+// |dots_param| is the one-based index of the "..." parameter.
+// For v*printf functions (which take a va_list), pass 0 for dots_param.
+// (This is undocumented but matches what the system C headers do.)
+#if defined(__GNUC__)
+#define PRINTF_FORMAT(format_param, dots_param) \
+ __attribute__((format(printf, format_param, dots_param)))
+#else
+#define PRINTF_FORMAT(format_param, dots_param)
+#endif
// The C++ standard requires that static const members have an out-of-class
// definition (in a single compilation unit), but MSVC chokes on this (when
diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc
index 12a3881919..7757192920 100644
--- a/deps/v8/src/base/cpu.cc
+++ b/deps/v8/src/base/cpu.cc
@@ -44,9 +44,7 @@
namespace v8 {
namespace base {
-#if defined(__pnacl__)
-// Portable host shouldn't do feature detection.
-#elif V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
// Define __cpuid() for non-MSVC libraries.
#if !V8_LIBC_MSVCRT
@@ -338,13 +336,10 @@ CPU::CPU()
has_vfp_(false),
has_vfp3_(false),
has_vfp3_d32_(false),
- is_fp64_mode_(false) {
+ is_fp64_mode_(false),
+ has_non_stop_time_stamp_counter_(false) {
memcpy(vendor_, "Unknown", 8);
-#if V8_OS_NACL
-// Portable host shouldn't do feature detection.
-// TODO(jfb): Remove the hardcoded ARM simulator flags in the build, and
-// hardcode them here instead.
-#elif V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
int cpu_info[4];
// __cpuid with an InfoType argument of 0 returns the number of
@@ -419,6 +414,13 @@ CPU::CPU()
has_sahf_ = (cpu_info[2] & 0x00000001) != 0;
}
+ // Check if CPU has non stoppable time stamp counter.
+ const int parameter_containing_non_stop_time_stamp_counter = 0x80000007;
+ if (num_ext_ids >= parameter_containing_non_stop_time_stamp_counter) {
+ __cpuid(cpu_info, parameter_containing_non_stop_time_stamp_counter);
+ has_non_stop_time_stamp_counter_ = (cpu_info[3] & (1 << 8)) != 0;
+ }
+
#elif V8_HOST_ARCH_ARM
#if V8_OS_LINUX
diff --git a/deps/v8/src/base/cpu.h b/deps/v8/src/base/cpu.h
index 3778d27233..19d4102f5b 100644
--- a/deps/v8/src/base/cpu.h
+++ b/deps/v8/src/base/cpu.h
@@ -97,6 +97,9 @@ class CPU final {
bool has_lzcnt() const { return has_lzcnt_; }
bool has_popcnt() const { return has_popcnt_; }
bool is_atom() const { return is_atom_; }
+ bool has_non_stop_time_stamp_counter() const {
+ return has_non_stop_time_stamp_counter_;
+ }
// arm features
bool has_idiva() const { return has_idiva_; }
@@ -148,6 +151,7 @@ class CPU final {
bool has_vfp3_;
bool has_vfp3_d32_;
bool is_fp64_mode_;
+ bool has_non_stop_time_stamp_counter_;
};
} // namespace base
diff --git a/deps/v8/src/base/debug/stack_trace.cc b/deps/v8/src/base/debug/stack_trace.cc
new file mode 100644
index 0000000000..0a7a3f9ab9
--- /dev/null
+++ b/deps/v8/src/base/debug/stack_trace.cc
@@ -0,0 +1,40 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/debug/stack_trace.h"
+
+#include <string.h>
+
+#include <algorithm>
+#include <sstream>
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+namespace debug {
+
+StackTrace::StackTrace(const void* const* trace, size_t count) {
+ count = std::min(count, arraysize(trace_));
+ if (count) memcpy(trace_, trace, count * sizeof(trace_[0]));
+ count_ = count;
+}
+
+StackTrace::~StackTrace() {}
+
+const void* const* StackTrace::Addresses(size_t* count) const {
+ *count = count_;
+ if (count_) return trace_;
+ return NULL;
+}
+
+std::string StackTrace::ToString() const {
+ std::stringstream stream;
+ OutputToStream(&stream);
+ return stream.str();
+}
+
+} // namespace debug
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/debug/stack_trace.h b/deps/v8/src/base/debug/stack_trace.h
new file mode 100644
index 0000000000..e938ef2868
--- /dev/null
+++ b/deps/v8/src/base/debug/stack_trace.h
@@ -0,0 +1,96 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Slightly adapted for inclusion in V8.
+// Copyright 2016 the V8 project authors. All rights reserved.
+
+#ifndef V8_BASE_DEBUG_STACK_TRACE_H_
+#define V8_BASE_DEBUG_STACK_TRACE_H_
+
+#include <stddef.h>
+
+#include <iosfwd>
+#include <string>
+
+#include "src/base/build_config.h"
+
+#if V8_OS_POSIX
+#include <unistd.h>
+#endif
+
+#if V8_OS_WIN
+struct _EXCEPTION_POINTERS;
+struct _CONTEXT;
+#endif
+
+namespace v8 {
+namespace base {
+namespace debug {
+
+// Enables stack dump to console output on exception and signals.
+// When enabled, the process will quit immediately. This is meant to be used in
+// tests only!
+bool EnableInProcessStackDumping();
+void DisableSignalStackDump();
+
+// A stacktrace can be helpful in debugging. For example, you can include a
+// stacktrace member in a object (probably around #ifndef NDEBUG) so that you
+// can later see where the given object was created from.
+class StackTrace {
+ public:
+ // Creates a stacktrace from the current location.
+ StackTrace();
+
+ // Creates a stacktrace from an existing array of instruction
+ // pointers (such as returned by Addresses()). |count| will be
+ // trimmed to |kMaxTraces|.
+ StackTrace(const void* const* trace, size_t count);
+
+#if V8_OS_WIN
+ // Creates a stacktrace for an exception.
+ // Note: this function will throw an import not found (StackWalk64) exception
+ // on system without dbghelp 5.1.
+ explicit StackTrace(_EXCEPTION_POINTERS* exception_pointers);
+ explicit StackTrace(const _CONTEXT* context);
+#endif
+
+ // Copying and assignment are allowed with the default functions.
+
+ ~StackTrace();
+
+ // Gets an array of instruction pointer values. |*count| will be set to the
+ // number of elements in the returned array.
+ const void* const* Addresses(size_t* count) const;
+
+ // Prints the stack trace to stderr.
+ void Print() const;
+
+ // Resolves backtrace to symbols and write to stream.
+ void OutputToStream(std::ostream* os) const;
+
+ // Resolves backtrace to symbols and returns as string.
+ std::string ToString() const;
+
+ private:
+#if V8_OS_WIN
+ void InitTrace(const _CONTEXT* context_record);
+#endif
+
+ // From http://msdn.microsoft.com/en-us/library/bb204633.aspx,
+ // the sum of FramesToSkip and FramesToCapture must be less than 63,
+ // so set it to 62. Even if on POSIX it could be a larger value, it usually
+ // doesn't give much more information.
+ static const int kMaxTraces = 62;
+
+ void* trace_[kMaxTraces];
+
+ // The number of valid frames in |trace_|.
+ size_t count_;
+};
+
+} // namespace debug
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_DEBUG_STACK_TRACE_H_
diff --git a/deps/v8/src/base/debug/stack_trace_android.cc b/deps/v8/src/base/debug/stack_trace_android.cc
new file mode 100644
index 0000000000..e1d5fd2e57
--- /dev/null
+++ b/deps/v8/src/base/debug/stack_trace_android.cc
@@ -0,0 +1,91 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Slightly adapted for inclusion in V8.
+// Copyright 2016 the V8 project authors. All rights reserved.
+
+#include "src/base/debug/stack_trace.h"
+
+#include <signal.h>
+#include <stddef.h>
+#include <string.h>
+#include <unwind.h>
+
+#include <src/base/platform/platform.h>
+
+#include <iomanip>
+#include <ostream>
+
+namespace {
+
+struct StackCrawlState {
+ StackCrawlState(uintptr_t* frames, size_t max_depth)
+ : frames(frames),
+ frame_count(0),
+ max_depth(max_depth),
+ have_skipped_self(false) {}
+
+ uintptr_t* frames;
+ size_t frame_count;
+ size_t max_depth;
+ bool have_skipped_self;
+};
+
+_Unwind_Reason_Code TraceStackFrame(_Unwind_Context* context, void* arg) {
+ StackCrawlState* state = static_cast<StackCrawlState*>(arg);
+ uintptr_t ip = _Unwind_GetIP(context);
+
+ // The first stack frame is this function itself. Skip it.
+ if (ip != 0 && !state->have_skipped_self) {
+ state->have_skipped_self = true;
+ return _URC_NO_REASON;
+ }
+
+ state->frames[state->frame_count++] = ip;
+ if (state->frame_count >= state->max_depth)
+ return _URC_END_OF_STACK;
+ return _URC_NO_REASON;
+}
+
+} // namespace
+
+namespace v8 {
+namespace base {
+namespace debug {
+
+bool EnableInProcessStackDumping() {
+ // When running in an application, our code typically expects SIGPIPE
+ // to be ignored. Therefore, when testing that same code, it should run
+ // with SIGPIPE ignored as well.
+ // TODO(phajdan.jr): De-duplicate this SIGPIPE code.
+ struct sigaction action;
+ memset(&action, 0, sizeof(action));
+ action.sa_handler = SIG_IGN;
+ sigemptyset(&action.sa_mask);
+ return (sigaction(SIGPIPE, &action, NULL) == 0);
+}
+
+void DisableSignalStackDump() {
+}
+
+StackTrace::StackTrace() {
+ StackCrawlState state(reinterpret_cast<uintptr_t*>(trace_), kMaxTraces);
+ _Unwind_Backtrace(&TraceStackFrame, &state);
+ count_ = state.frame_count;
+}
+
+void StackTrace::Print() const {
+ std::string backtrace = ToString();
+ OS::Print("%s\n", backtrace.c_str());
+}
+
+void StackTrace::OutputToStream(std::ostream* os) const {
+ for (size_t i = 0; i < count_; ++i) {
+ *os << "#" << std::setw(2) << i << trace_[i] << "\n";
+ }
+}
+
+} // namespace debug
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/debug/stack_trace_posix.cc b/deps/v8/src/base/debug/stack_trace_posix.cc
new file mode 100644
index 0000000000..87c0a73d19
--- /dev/null
+++ b/deps/v8/src/base/debug/stack_trace_posix.cc
@@ -0,0 +1,460 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Slightly adapted for inclusion in V8.
+// Copyright 2016 the V8 project authors. All rights reserved.
+
+#include "src/base/debug/stack_trace.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/param.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <map>
+#include <memory>
+#include <ostream>
+#include <string>
+#include <vector>
+
+#if V8_LIBC_GLIBC || V8_LIBC_BSD || V8_LIBC_UCLIBC || V8_OS_SOLARIS
+#define HAVE_EXECINFO_H 1
+#endif
+
+#if HAVE_EXECINFO_H
+#include <cxxabi.h>
+#include <execinfo.h>
+#endif
+#if V8_OS_MACOSX
+#include <AvailabilityMacros.h>
+#endif
+
+#include "src/base/build_config.h"
+#include "src/base/free_deleter.h"
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+namespace debug {
+
+namespace internal {
+
+// POSIX doesn't define any async-signal safe function for converting
+// an integer to ASCII. We'll have to define our own version.
+// itoa_r() converts a (signed) integer to ASCII. It returns "buf", if the
+// conversion was successful or NULL otherwise. It never writes more than "sz"
+// bytes. Output will be truncated as needed, and a NUL character is always
+// appended.
+char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding);
+
+} // namespace internal
+
+namespace {
+
+volatile sig_atomic_t in_signal_handler = 0;
+bool dump_stack_in_signal_handler = 1;
+
+// The prefix used for mangled symbols, per the Itanium C++ ABI:
+// http://www.codesourcery.com/cxx-abi/abi.html#mangling
+const char kMangledSymbolPrefix[] = "_Z";
+
+// Characters that can be used for symbols, generated by Ruby:
+// (('a'..'z').to_a+('A'..'Z').to_a+('0'..'9').to_a + ['_']).join
+const char kSymbolCharacters[] =
+ "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_";
+
+// Demangles C++ symbols in the given text. Example:
+//
+// "out/Debug/base_unittests(_ZN10StackTraceC1Ev+0x20) [0x817778c]"
+// =>
+// "out/Debug/base_unittests(StackTrace::StackTrace()+0x20) [0x817778c]"
+void DemangleSymbols(std::string* text) {
+ // Note: code in this function is NOT async-signal safe (std::string uses
+ // malloc internally).
+
+#if HAVE_EXECINFO_H
+
+ std::string::size_type search_from = 0;
+ while (search_from < text->size()) {
+ // Look for the start of a mangled symbol, from search_from.
+ std::string::size_type mangled_start =
+ text->find(kMangledSymbolPrefix, search_from);
+ if (mangled_start == std::string::npos) {
+ break; // Mangled symbol not found.
+ }
+
+ // Look for the end of the mangled symbol.
+ std::string::size_type mangled_end =
+ text->find_first_not_of(kSymbolCharacters, mangled_start);
+ if (mangled_end == std::string::npos) {
+ mangled_end = text->size();
+ }
+ std::string mangled_symbol =
+ text->substr(mangled_start, mangled_end - mangled_start);
+
+ // Try to demangle the mangled symbol candidate.
+ int status = 0;
+ std::unique_ptr<char, FreeDeleter> demangled_symbol(
+ abi::__cxa_demangle(mangled_symbol.c_str(), NULL, 0, &status));
+ if (status == 0) { // Demangling is successful.
+ // Remove the mangled symbol.
+ text->erase(mangled_start, mangled_end - mangled_start);
+ // Insert the demangled symbol.
+ text->insert(mangled_start, demangled_symbol.get());
+ // Next time, we'll start right after the demangled symbol we inserted.
+ search_from = mangled_start + strlen(demangled_symbol.get());
+ } else {
+ // Failed to demangle. Retry after the "_Z" we just found.
+ search_from = mangled_start + 2;
+ }
+ }
+
+#endif // HAVE_EXECINFO_H
+}
+
+class BacktraceOutputHandler {
+ public:
+ virtual void HandleOutput(const char* output) = 0;
+
+ protected:
+ virtual ~BacktraceOutputHandler() {}
+};
+
+void OutputPointer(void* pointer, BacktraceOutputHandler* handler) {
+ // This should be more than enough to store a 64-bit number in hex:
+ // 16 hex digits + 1 for null-terminator.
+ char buf[17] = {'\0'};
+ handler->HandleOutput("0x");
+ internal::itoa_r(reinterpret_cast<intptr_t>(pointer), buf, sizeof(buf), 16,
+ 12);
+ handler->HandleOutput(buf);
+}
+
+#if HAVE_EXECINFO_H
+void ProcessBacktrace(void* const* trace, size_t size,
+ BacktraceOutputHandler* handler) {
+ // NOTE: This code MUST be async-signal safe (it's used by in-process
+ // stack dumping signal handler). NO malloc or stdio is allowed here.
+ handler->HandleOutput("\n");
+ handler->HandleOutput("==== C stack trace ===============================\n");
+ handler->HandleOutput("\n");
+
+ bool printed = false;
+
+ // Below part is async-signal unsafe (uses malloc), so execute it only
+ // when we are not executing the signal handler.
+ if (in_signal_handler == 0) {
+ std::unique_ptr<char*, FreeDeleter> trace_symbols(
+ backtrace_symbols(trace, static_cast<int>(size)));
+ if (trace_symbols.get()) {
+ for (size_t i = 0; i < size; ++i) {
+ std::string trace_symbol = trace_symbols.get()[i];
+ DemangleSymbols(&trace_symbol);
+ handler->HandleOutput(" ");
+ handler->HandleOutput(trace_symbol.c_str());
+ handler->HandleOutput("\n");
+ }
+
+ printed = true;
+ }
+ }
+
+ if (!printed) {
+ for (size_t i = 0; i < size; ++i) {
+ handler->HandleOutput(" [");
+ OutputPointer(trace[i], handler);
+ handler->HandleOutput("]\n");
+ }
+ }
+}
+#endif // HAVE_EXECINFO_H
+
+void PrintToStderr(const char* output) {
+ // NOTE: This code MUST be async-signal safe (it's used by in-process
+ // stack dumping signal handler). NO malloc or stdio is allowed here.
+ ssize_t return_val = write(STDERR_FILENO, output, strlen(output));
+ USE(return_val);
+}
+
+void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) {
+ // NOTE: This code MUST be async-signal safe.
+ // NO malloc or stdio is allowed here.
+
+ // Record the fact that we are in the signal handler now, so that the rest
+ // of StackTrace can behave in an async-signal-safe manner.
+ in_signal_handler = 1;
+
+ PrintToStderr("Received signal ");
+ char buf[1024] = {0};
+ internal::itoa_r(signal, buf, sizeof(buf), 10, 0);
+ PrintToStderr(buf);
+ if (signal == SIGBUS) {
+ if (info->si_code == BUS_ADRALN)
+ PrintToStderr(" BUS_ADRALN ");
+ else if (info->si_code == BUS_ADRERR)
+ PrintToStderr(" BUS_ADRERR ");
+ else if (info->si_code == BUS_OBJERR)
+ PrintToStderr(" BUS_OBJERR ");
+ else
+ PrintToStderr(" <unknown> ");
+ } else if (signal == SIGFPE) {
+ if (info->si_code == FPE_FLTDIV)
+ PrintToStderr(" FPE_FLTDIV ");
+ else if (info->si_code == FPE_FLTINV)
+ PrintToStderr(" FPE_FLTINV ");
+ else if (info->si_code == FPE_FLTOVF)
+ PrintToStderr(" FPE_FLTOVF ");
+ else if (info->si_code == FPE_FLTRES)
+ PrintToStderr(" FPE_FLTRES ");
+ else if (info->si_code == FPE_FLTSUB)
+ PrintToStderr(" FPE_FLTSUB ");
+ else if (info->si_code == FPE_FLTUND)
+ PrintToStderr(" FPE_FLTUND ");
+ else if (info->si_code == FPE_INTDIV)
+ PrintToStderr(" FPE_INTDIV ");
+ else if (info->si_code == FPE_INTOVF)
+ PrintToStderr(" FPE_INTOVF ");
+ else
+ PrintToStderr(" <unknown> ");
+ } else if (signal == SIGILL) {
+ if (info->si_code == ILL_BADSTK)
+ PrintToStderr(" ILL_BADSTK ");
+ else if (info->si_code == ILL_COPROC)
+ PrintToStderr(" ILL_COPROC ");
+ else if (info->si_code == ILL_ILLOPN)
+ PrintToStderr(" ILL_ILLOPN ");
+ else if (info->si_code == ILL_ILLADR)
+ PrintToStderr(" ILL_ILLADR ");
+ else if (info->si_code == ILL_ILLTRP)
+ PrintToStderr(" ILL_ILLTRP ");
+ else if (info->si_code == ILL_PRVOPC)
+ PrintToStderr(" ILL_PRVOPC ");
+ else if (info->si_code == ILL_PRVREG)
+ PrintToStderr(" ILL_PRVREG ");
+ else
+ PrintToStderr(" <unknown> ");
+ } else if (signal == SIGSEGV) {
+ if (info->si_code == SEGV_MAPERR)
+ PrintToStderr(" SEGV_MAPERR ");
+ else if (info->si_code == SEGV_ACCERR)
+ PrintToStderr(" SEGV_ACCERR ");
+ else
+ PrintToStderr(" <unknown> ");
+ }
+ if (signal == SIGBUS || signal == SIGFPE || signal == SIGILL ||
+ signal == SIGSEGV) {
+ internal::itoa_r(reinterpret_cast<intptr_t>(info->si_addr), buf,
+ sizeof(buf), 16, 12);
+ PrintToStderr(buf);
+ }
+ PrintToStderr("\n");
+ if (dump_stack_in_signal_handler) {
+ debug::StackTrace().Print();
+ PrintToStderr("[end of stack trace]\n");
+ }
+
+ if (::signal(signal, SIG_DFL) == SIG_ERR) _exit(1);
+}
+
+class PrintBacktraceOutputHandler : public BacktraceOutputHandler {
+ public:
+ PrintBacktraceOutputHandler() {}
+
+ void HandleOutput(const char* output) override {
+ // NOTE: This code MUST be async-signal safe (it's used by in-process
+ // stack dumping signal handler). NO malloc or stdio is allowed here.
+ PrintToStderr(output);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(PrintBacktraceOutputHandler);
+};
+
+class StreamBacktraceOutputHandler : public BacktraceOutputHandler {
+ public:
+ explicit StreamBacktraceOutputHandler(std::ostream* os) : os_(os) {}
+
+ void HandleOutput(const char* output) override { (*os_) << output; }
+
+ private:
+ std::ostream* os_;
+
+ DISALLOW_COPY_AND_ASSIGN(StreamBacktraceOutputHandler);
+};
+
+void WarmUpBacktrace() {
+ // Warm up stack trace infrastructure. It turns out that on the first
+ // call glibc initializes some internal data structures using pthread_once,
+ // and even backtrace() can call malloc(), leading to hangs.
+ //
+ // Example stack trace snippet (with tcmalloc):
+ //
+ // #8 0x0000000000a173b5 in tc_malloc
+ // at ./third_party/tcmalloc/chromium/src/debugallocation.cc:1161
+ // #9 0x00007ffff7de7900 in _dl_map_object_deps at dl-deps.c:517
+ // #10 0x00007ffff7ded8a9 in dl_open_worker at dl-open.c:262
+ // #11 0x00007ffff7de9176 in _dl_catch_error at dl-error.c:178
+ // #12 0x00007ffff7ded31a in _dl_open (file=0x7ffff625e298 "libgcc_s.so.1")
+ // at dl-open.c:639
+ // #13 0x00007ffff6215602 in do_dlopen at dl-libc.c:89
+ // #14 0x00007ffff7de9176 in _dl_catch_error at dl-error.c:178
+ // #15 0x00007ffff62156c4 in dlerror_run at dl-libc.c:48
+ // #16 __GI___libc_dlopen_mode at dl-libc.c:165
+ // #17 0x00007ffff61ef8f5 in init
+ // at ../sysdeps/x86_64/../ia64/backtrace.c:53
+ // #18 0x00007ffff6aad400 in pthread_once
+ // at ../nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S:104
+ // #19 0x00007ffff61efa14 in __GI___backtrace
+ // at ../sysdeps/x86_64/../ia64/backtrace.c:104
+ // #20 0x0000000000752a54 in base::debug::StackTrace::StackTrace
+ // at base/debug/stack_trace_posix.cc:175
+ // #21 0x00000000007a4ae5 in
+ // base::(anonymous namespace)::StackDumpSignalHandler
+ // at base/process_util_posix.cc:172
+ // #22 <signal handler called>
+ StackTrace stack_trace;
+}
+
+} // namespace
+
+bool EnableInProcessStackDumping() {
+ // When running in an application, our code typically expects SIGPIPE
+ // to be ignored. Therefore, when testing that same code, it should run
+ // with SIGPIPE ignored as well.
+ struct sigaction sigpipe_action;
+ memset(&sigpipe_action, 0, sizeof(sigpipe_action));
+ sigpipe_action.sa_handler = SIG_IGN;
+ sigemptyset(&sigpipe_action.sa_mask);
+ bool success = (sigaction(SIGPIPE, &sigpipe_action, NULL) == 0);
+
+ // Avoid hangs during backtrace initialization, see above.
+ WarmUpBacktrace();
+
+ struct sigaction action;
+ memset(&action, 0, sizeof(action));
+ action.sa_flags = SA_RESETHAND | SA_SIGINFO;
+ action.sa_sigaction = &StackDumpSignalHandler;
+ sigemptyset(&action.sa_mask);
+
+ success &= (sigaction(SIGILL, &action, NULL) == 0);
+ success &= (sigaction(SIGABRT, &action, NULL) == 0);
+ success &= (sigaction(SIGFPE, &action, NULL) == 0);
+ success &= (sigaction(SIGBUS, &action, NULL) == 0);
+ success &= (sigaction(SIGSEGV, &action, NULL) == 0);
+ success &= (sigaction(SIGSYS, &action, NULL) == 0);
+
+ dump_stack_in_signal_handler = true;
+
+ return success;
+}
+
+void DisableSignalStackDump() {
+ dump_stack_in_signal_handler = false;
+}
+
+StackTrace::StackTrace() {
+ // NOTE: This code MUST be async-signal safe (it's used by in-process
+ // stack dumping signal handler). NO malloc or stdio is allowed here.
+
+#if HAVE_EXECINFO_H
+ // Though the backtrace API man page does not list any possible negative
+ // return values, we take no chance.
+ count_ = static_cast<size_t>(backtrace(trace_, arraysize(trace_)));
+#else
+ count_ = 0;
+#endif
+}
+
+void StackTrace::Print() const {
+ // NOTE: This code MUST be async-signal safe (it's used by in-process
+ // stack dumping signal handler). NO malloc or stdio is allowed here.
+
+#if HAVE_EXECINFO_H
+ PrintBacktraceOutputHandler handler;
+ ProcessBacktrace(trace_, count_, &handler);
+#endif
+}
+
+void StackTrace::OutputToStream(std::ostream* os) const {
+#if HAVE_EXECINFO_H
+ StreamBacktraceOutputHandler handler(os);
+ ProcessBacktrace(trace_, count_, &handler);
+#endif
+}
+
+namespace internal {
+
+// NOTE: code from sandbox/linux/seccomp-bpf/demo.cc.
+char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding) {
+ // Make sure we can write at least one NUL byte.
+ size_t n = 1;
+ if (n > sz) return NULL;
+
+ if (base < 2 || base > 16) {
+ buf[0] = '\000';
+ return NULL;
+ }
+
+ char* start = buf;
+
+ uintptr_t j = i;
+
+ // Handle negative numbers (only for base 10).
+ if (i < 0 && base == 10) {
+ // This does "j = -i" while avoiding integer overflow.
+ j = static_cast<uintptr_t>(-(i + 1)) + 1;
+
+ // Make sure we can write the '-' character.
+ if (++n > sz) {
+ buf[0] = '\000';
+ return NULL;
+ }
+ *start++ = '-';
+ }
+
+ // Loop until we have converted the entire number. Output at least one
+ // character (i.e. '0').
+ char* ptr = start;
+ do {
+ // Make sure there is still enough space left in our output buffer.
+ if (++n > sz) {
+ buf[0] = '\000';
+ return NULL;
+ }
+
+ // Output the next digit.
+ *ptr++ = "0123456789abcdef"[j % base];
+ j /= base;
+
+ if (padding > 0) padding--;
+ } while (j > 0 || padding > 0);
+
+ // Terminate the output with a NUL character.
+ *ptr = '\000';
+
+ // Conversion to ASCII actually resulted in the digits being in reverse
+ // order. We can't easily generate them in forward order, as we can't tell
+ // the number of characters needed until we are done converting.
+ // So, now, we reverse the string (except for the possible "-" sign).
+ while (--ptr > start) {
+ char ch = *ptr;
+ *ptr = *start;
+ *start++ = ch;
+ }
+ return buf;
+}
+
+} // namespace internal
+
+} // namespace debug
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/debug/stack_trace_win.cc b/deps/v8/src/base/debug/stack_trace_win.cc
new file mode 100644
index 0000000000..8333cd9ea8
--- /dev/null
+++ b/deps/v8/src/base/debug/stack_trace_win.cc
@@ -0,0 +1,248 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Slightly adapted for inclusion in V8.
+// Copyright 2016 the V8 project authors. All rights reserved.
+
+#include "src/base/debug/stack_trace.h"
+
+#include <windows.h>
+#include <dbghelp.h>
+#include <Shlwapi.h>
+#include <stddef.h>
+
+#include <iostream>
+#include <memory>
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+namespace debug {
+
+namespace {
+
+// Previous unhandled filter. Will be called if not NULL when we intercept an
+// exception. Only used in unit tests.
+LPTOP_LEVEL_EXCEPTION_FILTER g_previous_filter = NULL;
+
+bool g_dump_stack_in_signal_handler = true;
+bool g_initialized_symbols = false;
+DWORD g_init_error = ERROR_SUCCESS;
+
+// Prints the exception call stack.
+// This is the unit tests exception filter.
+long WINAPI StackDumpExceptionFilter(EXCEPTION_POINTERS* info) { // NOLINT
+ if (g_dump_stack_in_signal_handler) {
+ debug::StackTrace(info).Print();
+ }
+ if (g_previous_filter) return g_previous_filter(info);
+ return EXCEPTION_CONTINUE_SEARCH;
+}
+
+void GetExePath(wchar_t* path_out) {
+ GetModuleFileName(NULL, path_out, MAX_PATH);
+ path_out[MAX_PATH - 1] = L'\0';
+ PathRemoveFileSpec(path_out);
+}
+
+bool InitializeSymbols() {
+ if (g_initialized_symbols) return g_init_error == ERROR_SUCCESS;
+ g_initialized_symbols = true;
+ // Defer symbol load until they're needed, use undecorated names, and get line
+ // numbers.
+ SymSetOptions(SYMOPT_DEFERRED_LOADS | SYMOPT_UNDNAME | SYMOPT_LOAD_LINES);
+ if (!SymInitialize(GetCurrentProcess(), NULL, TRUE)) {
+ g_init_error = GetLastError();
+ // TODO(awong): Handle error: SymInitialize can fail with
+ // ERROR_INVALID_PARAMETER.
+ // When it fails, we should not call debugbreak since it kills the current
+ // process (prevents future tests from running or kills the browser
+ // process).
+ return false;
+ }
+
+ // When transferring the binaries e.g. between bots, path put
+ // into the executable will get off. To still retrieve symbols correctly,
+ // add the directory of the executable to symbol search path.
+ // All following errors are non-fatal.
+ const size_t kSymbolsArraySize = 1024;
+ std::unique_ptr<wchar_t[]> symbols_path(new wchar_t[kSymbolsArraySize]);
+
+ // Note: The below function takes buffer size as number of characters,
+ // not number of bytes!
+ if (!SymGetSearchPathW(GetCurrentProcess(), symbols_path.get(),
+ kSymbolsArraySize)) {
+ g_init_error = GetLastError();
+ return false;
+ }
+
+ wchar_t exe_path[MAX_PATH];
+ GetExePath(exe_path);
+ std::wstring new_path(std::wstring(symbols_path.get()) + L";" +
+ std::wstring(exe_path));
+ if (!SymSetSearchPathW(GetCurrentProcess(), new_path.c_str())) {
+ g_init_error = GetLastError();
+ return false;
+ }
+
+ g_init_error = ERROR_SUCCESS;
+ return true;
+}
+
+// For the given trace, attempts to resolve the symbols, and output a trace
+// to the ostream os. The format for each line of the backtrace is:
+//
+// <tab>SymbolName[0xAddress+Offset] (FileName:LineNo)
+//
+// This function should only be called if Init() has been called. We do not
+// LOG(FATAL) here because this code is called might be triggered by a
+// LOG(FATAL) itself. Also, it should not be calling complex code that is
+// extensible like PathService since that can in turn fire CHECKs.
+void OutputTraceToStream(const void* const* trace, size_t count,
+ std::ostream* os) {
+ for (size_t i = 0; (i < count) && os->good(); ++i) {
+ const int kMaxNameLength = 256;
+ DWORD_PTR frame = reinterpret_cast<DWORD_PTR>(trace[i]);
+
+ // Code adapted from MSDN example:
+ // http://msdn.microsoft.com/en-us/library/ms680578(VS.85).aspx
+ ULONG64 buffer[(sizeof(SYMBOL_INFO) + kMaxNameLength * sizeof(wchar_t) +
+ sizeof(ULONG64) - 1) /
+ sizeof(ULONG64)];
+ memset(buffer, 0, sizeof(buffer));
+
+ // Initialize symbol information retrieval structures.
+ DWORD64 sym_displacement = 0;
+ PSYMBOL_INFO symbol = reinterpret_cast<PSYMBOL_INFO>(&buffer[0]);
+ symbol->SizeOfStruct = sizeof(SYMBOL_INFO);
+ symbol->MaxNameLen = kMaxNameLength - 1;
+ BOOL has_symbol =
+ SymFromAddr(GetCurrentProcess(), frame, &sym_displacement, symbol);
+
+ // Attempt to retrieve line number information.
+ DWORD line_displacement = 0;
+ IMAGEHLP_LINE64 line = {};
+ line.SizeOfStruct = sizeof(IMAGEHLP_LINE64);
+ BOOL has_line = SymGetLineFromAddr64(GetCurrentProcess(), frame,
+ &line_displacement, &line);
+
+ // Output the backtrace line.
+ (*os) << "\t";
+ if (has_symbol) {
+ (*os) << symbol->Name << " [0x" << trace[i] << "+" << sym_displacement
+ << "]";
+ } else {
+ // If there is no symbol information, add a spacer.
+ (*os) << "(No symbol) [0x" << trace[i] << "]";
+ }
+ if (has_line) {
+ (*os) << " (" << line.FileName << ":" << line.LineNumber << ")";
+ }
+ (*os) << "\n";
+ }
+}
+
+} // namespace
+
+bool EnableInProcessStackDumping() {
+ // Add stack dumping support on exception on windows. Similar to OS_POSIX
+ // signal() handling in process_util_posix.cc.
+ g_previous_filter = SetUnhandledExceptionFilter(&StackDumpExceptionFilter);
+ g_dump_stack_in_signal_handler = true;
+
+ // Need to initialize symbols early in the process or else this fails on
+ // swarming (since symbols are in different directory than in the exes) and
+ // also release x64.
+ return InitializeSymbols();
+}
+
+void DisableSignalStackDump() {
+ g_dump_stack_in_signal_handler = false;
+}
+
+// Disable optimizations for the StackTrace::StackTrace function. It is
+// important to disable at least frame pointer optimization ("y"), since
+// that breaks CaptureStackBackTrace() and prevents StackTrace from working
+// in Release builds (it may still be janky if other frames are using FPO,
+// but at least it will make it further).
+#if defined(COMPILER_MSVC)
+#pragma optimize("", off)
+#endif
+
+StackTrace::StackTrace() {
+ // When walking our own stack, use CaptureStackBackTrace().
+ count_ = CaptureStackBackTrace(0, arraysize(trace_), trace_, NULL);
+}
+
+#if defined(COMPILER_MSVC)
+#pragma optimize("", on)
+#endif
+
+StackTrace::StackTrace(EXCEPTION_POINTERS* exception_pointers) {
+ InitTrace(exception_pointers->ContextRecord);
+}
+
+StackTrace::StackTrace(const CONTEXT* context) { InitTrace(context); }
+
+void StackTrace::InitTrace(const CONTEXT* context_record) {
+ // StackWalk64 modifies the register context in place, so we have to copy it
+ // so that downstream exception handlers get the right context. The incoming
+ // context may have had more register state (YMM, etc) than we need to unwind
+ // the stack. Typically StackWalk64 only needs integer and control registers.
+ CONTEXT context_copy;
+ memcpy(&context_copy, context_record, sizeof(context_copy));
+ context_copy.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL;
+
+ // When walking an exception stack, we need to use StackWalk64().
+ count_ = 0;
+ // Initialize stack walking.
+ STACKFRAME64 stack_frame;
+ memset(&stack_frame, 0, sizeof(stack_frame));
+#if defined(_WIN64)
+ int machine_type = IMAGE_FILE_MACHINE_AMD64;
+ stack_frame.AddrPC.Offset = context_record->Rip;
+ stack_frame.AddrFrame.Offset = context_record->Rbp;
+ stack_frame.AddrStack.Offset = context_record->Rsp;
+#else
+ int machine_type = IMAGE_FILE_MACHINE_I386;
+ stack_frame.AddrPC.Offset = context_record->Eip;
+ stack_frame.AddrFrame.Offset = context_record->Ebp;
+ stack_frame.AddrStack.Offset = context_record->Esp;
+#endif
+ stack_frame.AddrPC.Mode = AddrModeFlat;
+ stack_frame.AddrFrame.Mode = AddrModeFlat;
+ stack_frame.AddrStack.Mode = AddrModeFlat;
+ while (StackWalk64(machine_type, GetCurrentProcess(), GetCurrentThread(),
+ &stack_frame, &context_copy, NULL,
+ &SymFunctionTableAccess64, &SymGetModuleBase64, NULL) &&
+ count_ < arraysize(trace_)) {
+ trace_[count_++] = reinterpret_cast<void*>(stack_frame.AddrPC.Offset);
+ }
+
+ for (size_t i = count_; i < arraysize(trace_); ++i) trace_[i] = NULL;
+}
+
+void StackTrace::Print() const { OutputToStream(&std::cerr); }
+
+void StackTrace::OutputToStream(std::ostream* os) const {
+ InitializeSymbols();
+ if (g_init_error != ERROR_SUCCESS) {
+ (*os) << "Error initializing symbols (" << g_init_error
+ << "). Dumping unresolved backtrace:\n";
+ for (size_t i = 0; (i < count_) && os->good(); ++i) {
+ (*os) << "\t" << trace_[i] << "\n";
+ }
+ } else {
+ (*os) << "\n";
+ (*os) << "==== C stack trace ===============================\n";
+ (*os) << "\n";
+ OutputTraceToStream(trace_, count_, os);
+ }
+}
+
+} // namespace debug
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/file-utils.cc b/deps/v8/src/base/file-utils.cc
new file mode 100644
index 0000000000..2262df97d0
--- /dev/null
+++ b/deps/v8/src/base/file-utils.cc
@@ -0,0 +1,36 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/file-utils.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "src/base/platform/platform.h"
+
+namespace v8 {
+namespace internal {
+
+char* RelativePath(char** buffer, const char* exec_path, const char* name) {
+ DCHECK(exec_path);
+ int path_separator = static_cast<int>(strlen(exec_path)) - 1;
+ while (path_separator >= 0 &&
+ !base::OS::isDirectorySeparator(exec_path[path_separator])) {
+ path_separator--;
+ }
+ if (path_separator >= 0) {
+ int name_length = static_cast<int>(strlen(name));
+ *buffer =
+ reinterpret_cast<char*>(calloc(path_separator + name_length + 2, 1));
+ *buffer[0] = '\0';
+ strncat(*buffer, exec_path, path_separator + 1);
+ strncat(*buffer, name, name_length);
+ } else {
+ *buffer = strdup(name);
+ }
+ return *buffer;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/base/file-utils.h b/deps/v8/src/base/file-utils.h
new file mode 100644
index 0000000000..ce9e9a1c41
--- /dev/null
+++ b/deps/v8/src/base/file-utils.h
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_FILE_UTILS_H_
+#define V8_FILE_UTILS_H_
+
+namespace v8 {
+namespace internal {
+
+// Helper functions to manipulate file paths.
+
+char* RelativePath(char** buffer, const char* exec_path, const char* name);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_FILE_UTILS_H_
diff --git a/deps/v8/src/base/format-macros.h b/deps/v8/src/base/format-macros.h
new file mode 100644
index 0000000000..5f5fe5df24
--- /dev/null
+++ b/deps/v8/src/base/format-macros.h
@@ -0,0 +1,97 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FORMAT_MACROS_H_
+#define BASE_FORMAT_MACROS_H_
+
+// This file defines the format macros for some integer types.
+
+// To print a 64-bit value in a portable way:
+// int64_t value;
+// printf("xyz:%" PRId64, value);
+// The "d" in the macro corresponds to %d; you can also use PRIu64 etc.
+//
+// For wide strings, prepend "Wide" to the macro:
+// int64_t value;
+// StringPrintf(L"xyz: %" WidePRId64, value);
+//
+// To print a size_t value in a portable way:
+// size_t size;
+// printf("xyz: %" PRIuS, size);
+// The "u" in the macro corresponds to %u, and S is for "size".
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "src/base/build_config.h"
+
+#if defined(V8_OS_POSIX) && (defined(_INTTYPES_H) || defined(_INTTYPES_H_)) && \
+ !defined(PRId64)
+#error "inttypes.h has already been included before this header file, but "
+#error "without __STDC_FORMAT_MACROS defined."
+#endif
+
+#if defined(V8_OS_POSIX) && !defined(__STDC_FORMAT_MACROS)
+#define __STDC_FORMAT_MACROS
+#endif
+
+#include <inttypes.h>
+
+#if defined(V8_OS_POSIX)
+
+// GCC will concatenate wide and narrow strings correctly, so nothing needs to
+// be done here.
+#define WidePRId64 PRId64
+#define WidePRIu64 PRIu64
+#define WidePRIx64 PRIx64
+
+#if !defined(PRIuS)
+#define PRIuS "zu"
+#endif
+
+// The size of NSInteger and NSUInteger varies between 32-bit and 64-bit
+// architectures and Apple does not provides standard format macros and
+// recommends casting. This has many drawbacks, so instead define macros
+// for formatting those types.
+#if defined(V8_OS_MACOSX)
+#if defined(V8_HOST_ARCH_64_BIT)
+#if !defined(PRIdNS)
+#define PRIdNS "ld"
+#endif
+#if !defined(PRIuNS)
+#define PRIuNS "lu"
+#endif
+#if !defined(PRIxNS)
+#define PRIxNS "lx"
+#endif
+#else // defined(V8_HOST_ARCH_64_BIT)
+#if !defined(PRIdNS)
+#define PRIdNS "d"
+#endif
+#if !defined(PRIuNS)
+#define PRIuNS "u"
+#endif
+#if !defined(PRIxNS)
+#define PRIxNS "x"
+#endif
+#endif
+#endif // defined(V8_OS_MACOSX)
+
+#else // V8_OS_WIN
+
+#if !defined(PRId64) || !defined(PRIu64) || !defined(PRIx64)
+#error "inttypes.h provided by win toolchain should define these."
+#endif
+
+#define WidePRId64 L"I64d"
+#define WidePRIu64 L"I64u"
+#define WidePRIx64 L"I64x"
+
+#if !defined(PRIuS)
+#define PRIuS "Iu"
+#endif
+
+#endif
+
+#endif // BASE_FORMAT_MACROS_H_
diff --git a/deps/v8/src/base/free_deleter.h b/deps/v8/src/base/free_deleter.h
new file mode 100644
index 0000000000..77e4f0ed14
--- /dev/null
+++ b/deps/v8/src/base/free_deleter.h
@@ -0,0 +1,28 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Slightly adapted for inclusion in V8.
+// Copyright 2016 the V8 project authors. All rights reserved.
+
+#ifndef V8_BASE_FREE_DELETER_H_
+#define V8_BASE_FREE_DELETER_H_
+
+#include <stdlib.h>
+
+namespace v8 {
+namespace base {
+
+// Function object which invokes 'free' on its parameter, which must be
+// a pointer. Can be used to store malloc-allocated pointers in std::unique_ptr:
+//
+// std::unique_ptr<int, base::FreeDeleter> foo_ptr(
+// static_cast<int*>(malloc(sizeof(int))));
+struct FreeDeleter {
+ inline void operator()(void* ptr) const { free(ptr); }
+};
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_FREE_DELETER_H_
diff --git a/deps/v8/src/hashmap.h b/deps/v8/src/base/hashmap.h
index f94def7c3c..e3c47de6d7 100644
--- a/deps/v8/src/hashmap.h
+++ b/deps/v8/src/base/hashmap.h
@@ -2,21 +2,31 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HASHMAP_H_
-#define V8_HASHMAP_H_
+// The reason we write our own hash map instead of using unordered_map in STL,
+// is that STL containers use a mutex pool on debug build, which will lead to
+// deadlock when we are using async signal handler.
+
+#ifndef V8_BASE_HASHMAP_H_
+#define V8_BASE_HASHMAP_H_
+
+#include <stdlib.h>
-#include "src/allocation.h"
#include "src/base/bits.h"
#include "src/base/logging.h"
-#include "src/utils.h"
namespace v8 {
-namespace internal {
+namespace base {
+
+class DefaultAllocationPolicy {
+ public:
+ V8_INLINE void* New(size_t size) { return malloc(size); }
+ V8_INLINE static void Delete(void* p) { free(p); }
+};
-template<class AllocationPolicy>
+template <class AllocationPolicy>
class TemplateHashMapImpl {
public:
- typedef bool (*MatchFun) (void* key1, void* key2);
+ typedef bool (*MatchFun)(void* key1, void* key2);
// The default capacity. This is used by the call sites which want
// to pass in a non-default AllocationPolicy but want to use the
@@ -38,7 +48,6 @@ class TemplateHashMapImpl {
void* key;
void* value;
uint32_t hash; // The full hash value for key
- int order; // If you never remove entries this is the insertion order.
};
// If an entry with matching key is found, returns that entry.
@@ -51,6 +60,9 @@ class TemplateHashMapImpl {
Entry* LookupOrInsert(void* key, uint32_t hash,
AllocationPolicy allocator = AllocationPolicy());
+ Entry* InsertNew(void* key, uint32_t hash,
+ AllocationPolicy allocator = AllocationPolicy());
+
// Removes the entry with matching key.
// It returns the value of the deleted entry
// or null if there is no value for such key.
@@ -79,9 +91,7 @@ class TemplateHashMapImpl {
Entry* Next(Entry* p) const;
// Some match functions defined for convenience.
- static bool PointersMatch(void* key1, void* key2) {
- return key1 == key2;
- }
+ static bool PointersMatch(void* key1, void* key2) { return key1 == key2; }
private:
MatchFun match_;
@@ -95,22 +105,20 @@ class TemplateHashMapImpl {
void Resize(AllocationPolicy allocator);
};
-typedef TemplateHashMapImpl<FreeStoreAllocationPolicy> HashMap;
+typedef TemplateHashMapImpl<DefaultAllocationPolicy> HashMap;
-template<class AllocationPolicy>
+template <class AllocationPolicy>
TemplateHashMapImpl<AllocationPolicy>::TemplateHashMapImpl(
MatchFun match, uint32_t initial_capacity, AllocationPolicy allocator) {
match_ = match;
Initialize(initial_capacity, allocator);
}
-
-template<class AllocationPolicy>
+template <class AllocationPolicy>
TemplateHashMapImpl<AllocationPolicy>::~TemplateHashMapImpl() {
AllocationPolicy::Delete(map_);
}
-
template <class AllocationPolicy>
typename TemplateHashMapImpl<AllocationPolicy>::Entry*
TemplateHashMapImpl<AllocationPolicy>::Lookup(void* key, uint32_t hash) const {
@@ -118,7 +126,6 @@ TemplateHashMapImpl<AllocationPolicy>::Lookup(void* key, uint32_t hash) const {
return p->key != NULL ? p : NULL;
}
-
template <class AllocationPolicy>
typename TemplateHashMapImpl<AllocationPolicy>::Entry*
TemplateHashMapImpl<AllocationPolicy>::LookupOrInsert(
@@ -129,11 +136,21 @@ TemplateHashMapImpl<AllocationPolicy>::LookupOrInsert(
return p;
}
+ return InsertNew(key, hash, allocator);
+}
+
+template <class AllocationPolicy>
+typename TemplateHashMapImpl<AllocationPolicy>::Entry*
+TemplateHashMapImpl<AllocationPolicy>::InsertNew(void* key, uint32_t hash,
+ AllocationPolicy allocator) {
+ // Find a matching entry.
+ Entry* p = Probe(key, hash);
+ DCHECK(p->key == NULL);
+
// No entry found; insert one.
p->key = key;
p->value = NULL;
p->hash = hash;
- p->order = occupancy_;
occupancy_++;
// Grow the map if we reached >= 80% occupancy.
@@ -145,8 +162,7 @@ TemplateHashMapImpl<AllocationPolicy>::LookupOrInsert(
return p;
}
-
-template<class AllocationPolicy>
+template <class AllocationPolicy>
void* TemplateHashMapImpl<AllocationPolicy>::Remove(void* key, uint32_t hash) {
// Lookup the entry for the key to remove.
Entry* p = Probe(key, hash);
@@ -194,8 +210,7 @@ void* TemplateHashMapImpl<AllocationPolicy>::Remove(void* key, uint32_t hash) {
// If the entry at position q has its initial position outside the range
// between p and q it can be moved forward to position p and will still be
// found. There is now a new candidate entry for clearing.
- if ((q > p && (r <= p || r > q)) ||
- (q < p && (r <= p && r > q))) {
+ if ((q > p && (r <= p || r > q)) || (q < p && (r <= p && r > q))) {
*p = *q;
p = q;
}
@@ -207,8 +222,7 @@ void* TemplateHashMapImpl<AllocationPolicy>::Remove(void* key, uint32_t hash) {
return value;
}
-
-template<class AllocationPolicy>
+template <class AllocationPolicy>
void TemplateHashMapImpl<AllocationPolicy>::Clear() {
// Mark all entries as empty.
const Entry* end = map_end();
@@ -218,17 +232,15 @@ void TemplateHashMapImpl<AllocationPolicy>::Clear() {
occupancy_ = 0;
}
-
-template<class AllocationPolicy>
+template <class AllocationPolicy>
typename TemplateHashMapImpl<AllocationPolicy>::Entry*
- TemplateHashMapImpl<AllocationPolicy>::Start() const {
+TemplateHashMapImpl<AllocationPolicy>::Start() const {
return Next(map_ - 1);
}
-
-template<class AllocationPolicy>
+template <class AllocationPolicy>
typename TemplateHashMapImpl<AllocationPolicy>::Entry*
- TemplateHashMapImpl<AllocationPolicy>::Next(Entry* p) const {
+TemplateHashMapImpl<AllocationPolicy>::Next(Entry* p) const {
const Entry* end = map_end();
DCHECK(map_ - 1 <= p && p < end);
for (p++; p < end; p++) {
@@ -239,7 +251,6 @@ typename TemplateHashMapImpl<AllocationPolicy>::Entry*
return NULL;
}
-
template <class AllocationPolicy>
typename TemplateHashMapImpl<AllocationPolicy>::Entry*
TemplateHashMapImpl<AllocationPolicy>::Probe(void* key, uint32_t hash) const {
@@ -261,22 +272,20 @@ TemplateHashMapImpl<AllocationPolicy>::Probe(void* key, uint32_t hash) const {
return p;
}
-
-template<class AllocationPolicy>
+template <class AllocationPolicy>
void TemplateHashMapImpl<AllocationPolicy>::Initialize(
uint32_t capacity, AllocationPolicy allocator) {
DCHECK(base::bits::IsPowerOfTwo32(capacity));
map_ = reinterpret_cast<Entry*>(allocator.New(capacity * sizeof(Entry)));
if (map_ == NULL) {
- v8::internal::FatalProcessOutOfMemory("HashMap::Initialize");
+ FATAL("Out of memory: HashMap::Initialize");
return;
}
capacity_ = capacity;
Clear();
}
-
-template<class AllocationPolicy>
+template <class AllocationPolicy>
void TemplateHashMapImpl<AllocationPolicy>::Resize(AllocationPolicy allocator) {
Entry* map = map_;
uint32_t n = occupancy_;
@@ -289,7 +298,6 @@ void TemplateHashMapImpl<AllocationPolicy>::Resize(AllocationPolicy allocator) {
if (p->key != NULL) {
Entry* entry = LookupOrInsert(p->key, p->hash, allocator);
entry->value = p->value;
- entry->order = p->order;
n--;
}
}
@@ -298,12 +306,11 @@ void TemplateHashMapImpl<AllocationPolicy>::Resize(AllocationPolicy allocator) {
AllocationPolicy::Delete(map);
}
-
// A hash map for pointer keys and values with an STL-like interface.
-template<class Key, class Value, class AllocationPolicy>
-class TemplateHashMap: private TemplateHashMapImpl<AllocationPolicy> {
+template <class Key, class Value, class AllocationPolicy>
+class TemplateHashMap : private TemplateHashMapImpl<AllocationPolicy> {
public:
- STATIC_ASSERT(sizeof(Key*) == sizeof(void*)); // NOLINT
+ STATIC_ASSERT(sizeof(Key*) == sizeof(void*)); // NOLINT
STATIC_ASSERT(sizeof(Value*) == sizeof(void*)); // NOLINT
struct value_type {
Key* first;
@@ -318,12 +325,12 @@ class TemplateHashMap: private TemplateHashMapImpl<AllocationPolicy> {
}
value_type* operator->() { return reinterpret_cast<value_type*>(entry_); }
- bool operator!=(const Iterator& other) { return entry_ != other.entry_; }
+ bool operator!=(const Iterator& other) { return entry_ != other.entry_; }
private:
Iterator(const TemplateHashMapImpl<AllocationPolicy>* map,
- typename TemplateHashMapImpl<AllocationPolicy>::Entry* entry) :
- map_(map), entry_(entry) { }
+ typename TemplateHashMapImpl<AllocationPolicy>::Entry* entry)
+ : map_(map), entry_(entry) {}
const TemplateHashMapImpl<AllocationPolicy>* map_;
typename TemplateHashMapImpl<AllocationPolicy>::Entry* entry_;
@@ -334,10 +341,10 @@ class TemplateHashMap: private TemplateHashMapImpl<AllocationPolicy> {
TemplateHashMap(
typename TemplateHashMapImpl<AllocationPolicy>::MatchFun match,
AllocationPolicy allocator = AllocationPolicy())
- : TemplateHashMapImpl<AllocationPolicy>(
+ : TemplateHashMapImpl<AllocationPolicy>(
match,
TemplateHashMapImpl<AllocationPolicy>::kDefaultHashMapCapacity,
- allocator) { }
+ allocator) {}
Iterator begin() const { return Iterator(this, this->Start()); }
Iterator end() const { return Iterator(this, NULL); }
@@ -350,7 +357,7 @@ class TemplateHashMap: private TemplateHashMapImpl<AllocationPolicy> {
}
};
-} // namespace internal
+} // namespace base
} // namespace v8
-#endif // V8_HASHMAP_H_
+#endif // V8_BASE_HASHMAP_H_
diff --git a/deps/v8/src/base/ieee754.cc b/deps/v8/src/base/ieee754.cc
new file mode 100644
index 0000000000..d0faeeea00
--- /dev/null
+++ b/deps/v8/src/base/ieee754.cc
@@ -0,0 +1,2746 @@
+// The following is adapted from fdlibm (http://www.netlib.org/fdlibm).
+//
+// ====================================================
+// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+//
+// Developed at SunSoft, a Sun Microsystems, Inc. business.
+// Permission to use, copy, modify, and distribute this
+// software is freely granted, provided that this notice
+// is preserved.
+// ====================================================
+//
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2016 the V8 project authors. All rights reserved.
+
+#include "src/base/ieee754.h"
+
+#include <cmath>
+#include <limits>
+
+#include "src/base/build_config.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+namespace ieee754 {
+
+namespace {
+
+/* Disable "potential divide by 0" warning in Visual Studio compiler. */
+
+#if V8_CC_MSVC
+
+#pragma warning(disable : 4723)
+
+#endif
+
+/*
+ * The original fdlibm code used statements like:
+ * n0 = ((*(int*)&one)>>29)^1; * index of high word *
+ * ix0 = *(n0+(int*)&x); * high word of x *
+ * ix1 = *((1-n0)+(int*)&x); * low word of x *
+ * to dig two 32 bit words out of the 64 bit IEEE floating point
+ * value. That is non-ANSI, and, moreover, the gcc instruction
+ * scheduler gets it wrong. We instead use the following macros.
+ * Unlike the original code, we determine the endianness at compile
+ * time, not at run time; I don't see much benefit to selecting
+ * endianness at run time.
+ */
+
+/*
+ * A union which permits us to convert between a double and two 32 bit
+ * ints.
+ */
+
+#if V8_TARGET_LITTLE_ENDIAN
+
+typedef union {
+ double value;
+ struct {
+ uint32_t lsw;
+ uint32_t msw;
+ } parts;
+ struct {
+ uint64_t w;
+ } xparts;
+} ieee_double_shape_type;
+
+#else
+
+typedef union {
+ double value;
+ struct {
+ uint32_t msw;
+ uint32_t lsw;
+ } parts;
+ struct {
+ uint64_t w;
+ } xparts;
+} ieee_double_shape_type;
+
+#endif
+
+/* Get two 32 bit ints from a double. */
+
+#define EXTRACT_WORDS(ix0, ix1, d) \
+ do { \
+ ieee_double_shape_type ew_u; \
+ ew_u.value = (d); \
+ (ix0) = ew_u.parts.msw; \
+ (ix1) = ew_u.parts.lsw; \
+ } while (0)
+
+/* Get a 64-bit int from a double. */
+#define EXTRACT_WORD64(ix, d) \
+ do { \
+ ieee_double_shape_type ew_u; \
+ ew_u.value = (d); \
+ (ix) = ew_u.xparts.w; \
+ } while (0)
+
+/* Get the more significant 32 bit int from a double. */
+
+#define GET_HIGH_WORD(i, d) \
+ do { \
+ ieee_double_shape_type gh_u; \
+ gh_u.value = (d); \
+ (i) = gh_u.parts.msw; \
+ } while (0)
+
+/* Get the less significant 32 bit int from a double. */
+
+#define GET_LOW_WORD(i, d) \
+ do { \
+ ieee_double_shape_type gl_u; \
+ gl_u.value = (d); \
+ (i) = gl_u.parts.lsw; \
+ } while (0)
+
+/* Set a double from two 32 bit ints. */
+
+#define INSERT_WORDS(d, ix0, ix1) \
+ do { \
+ ieee_double_shape_type iw_u; \
+ iw_u.parts.msw = (ix0); \
+ iw_u.parts.lsw = (ix1); \
+ (d) = iw_u.value; \
+ } while (0)
+
+/* Set a double from a 64-bit int. */
+#define INSERT_WORD64(d, ix) \
+ do { \
+ ieee_double_shape_type iw_u; \
+ iw_u.xparts.w = (ix); \
+ (d) = iw_u.value; \
+ } while (0)
+
+/* Set the more significant 32 bits of a double from an int. */
+
+#define SET_HIGH_WORD(d, v) \
+ do { \
+ ieee_double_shape_type sh_u; \
+ sh_u.value = (d); \
+ sh_u.parts.msw = (v); \
+ (d) = sh_u.value; \
+ } while (0)
+
+/* Set the less significant 32 bits of a double from an int. */
+
+#define SET_LOW_WORD(d, v) \
+ do { \
+ ieee_double_shape_type sl_u; \
+ sl_u.value = (d); \
+ sl_u.parts.lsw = (v); \
+ (d) = sl_u.value; \
+ } while (0)
+
+/* Support macro. */
+
+#define STRICT_ASSIGN(type, lval, rval) ((lval) = (rval))
+
+int32_t __ieee754_rem_pio2(double x, double *y) WARN_UNUSED_RESULT;
+double __kernel_cos(double x, double y) WARN_UNUSED_RESULT;
+int __kernel_rem_pio2(double *x, double *y, int e0, int nx, int prec,
+ const int32_t *ipio2) WARN_UNUSED_RESULT;
+double __kernel_sin(double x, double y, int iy) WARN_UNUSED_RESULT;
+
+/* __ieee754_rem_pio2(x,y)
+ *
+ * return the remainder of x rem pi/2 in y[0]+y[1]
+ * use __kernel_rem_pio2()
+ */
+int32_t __ieee754_rem_pio2(double x, double *y) {
+ /*
+ * Table of constants for 2/pi, 396 Hex digits (476 decimal) of 2/pi
+ */
+ static const int32_t two_over_pi[] = {
+ 0xA2F983, 0x6E4E44, 0x1529FC, 0x2757D1, 0xF534DD, 0xC0DB62, 0x95993C,
+ 0x439041, 0xFE5163, 0xABDEBB, 0xC561B7, 0x246E3A, 0x424DD2, 0xE00649,
+ 0x2EEA09, 0xD1921C, 0xFE1DEB, 0x1CB129, 0xA73EE8, 0x8235F5, 0x2EBB44,
+ 0x84E99C, 0x7026B4, 0x5F7E41, 0x3991D6, 0x398353, 0x39F49C, 0x845F8B,
+ 0xBDF928, 0x3B1FF8, 0x97FFDE, 0x05980F, 0xEF2F11, 0x8B5A0A, 0x6D1F6D,
+ 0x367ECF, 0x27CB09, 0xB74F46, 0x3F669E, 0x5FEA2D, 0x7527BA, 0xC7EBE5,
+ 0xF17B3D, 0x0739F7, 0x8A5292, 0xEA6BFB, 0x5FB11F, 0x8D5D08, 0x560330,
+ 0x46FC7B, 0x6BABF0, 0xCFBC20, 0x9AF436, 0x1DA9E3, 0x91615E, 0xE61B08,
+ 0x659985, 0x5F14A0, 0x68408D, 0xFFD880, 0x4D7327, 0x310606, 0x1556CA,
+ 0x73A8C9, 0x60E27B, 0xC08C6B,
+ };
+
+ static const int32_t npio2_hw[] = {
+ 0x3FF921FB, 0x400921FB, 0x4012D97C, 0x401921FB, 0x401F6A7A, 0x4022D97C,
+ 0x4025FDBB, 0x402921FB, 0x402C463A, 0x402F6A7A, 0x4031475C, 0x4032D97C,
+ 0x40346B9C, 0x4035FDBB, 0x40378FDB, 0x403921FB, 0x403AB41B, 0x403C463A,
+ 0x403DD85A, 0x403F6A7A, 0x40407E4C, 0x4041475C, 0x4042106C, 0x4042D97C,
+ 0x4043A28C, 0x40446B9C, 0x404534AC, 0x4045FDBB, 0x4046C6CB, 0x40478FDB,
+ 0x404858EB, 0x404921FB,
+ };
+
+ /*
+ * invpio2: 53 bits of 2/pi
+ * pio2_1: first 33 bit of pi/2
+ * pio2_1t: pi/2 - pio2_1
+ * pio2_2: second 33 bit of pi/2
+ * pio2_2t: pi/2 - (pio2_1+pio2_2)
+ * pio2_3: third 33 bit of pi/2
+ * pio2_3t: pi/2 - (pio2_1+pio2_2+pio2_3)
+ */
+
+ static const double
+ zero = 0.00000000000000000000e+00, /* 0x00000000, 0x00000000 */
+ half = 5.00000000000000000000e-01, /* 0x3FE00000, 0x00000000 */
+ two24 = 1.67772160000000000000e+07, /* 0x41700000, 0x00000000 */
+ invpio2 = 6.36619772367581382433e-01, /* 0x3FE45F30, 0x6DC9C883 */
+ pio2_1 = 1.57079632673412561417e+00, /* 0x3FF921FB, 0x54400000 */
+ pio2_1t = 6.07710050650619224932e-11, /* 0x3DD0B461, 0x1A626331 */
+ pio2_2 = 6.07710050630396597660e-11, /* 0x3DD0B461, 0x1A600000 */
+ pio2_2t = 2.02226624879595063154e-21, /* 0x3BA3198A, 0x2E037073 */
+ pio2_3 = 2.02226624871116645580e-21, /* 0x3BA3198A, 0x2E000000 */
+ pio2_3t = 8.47842766036889956997e-32; /* 0x397B839A, 0x252049C1 */
+
+ double z, w, t, r, fn;
+ double tx[3];
+ int32_t e0, i, j, nx, n, ix, hx;
+ uint32_t low;
+
+ z = 0;
+ GET_HIGH_WORD(hx, x); /* high word of x */
+ ix = hx & 0x7fffffff;
+ if (ix <= 0x3fe921fb) { /* |x| ~<= pi/4 , no need for reduction */
+ y[0] = x;
+ y[1] = 0;
+ return 0;
+ }
+ if (ix < 0x4002d97c) { /* |x| < 3pi/4, special case with n=+-1 */
+ if (hx > 0) {
+ z = x - pio2_1;
+ if (ix != 0x3ff921fb) { /* 33+53 bit pi is good enough */
+ y[0] = z - pio2_1t;
+ y[1] = (z - y[0]) - pio2_1t;
+ } else { /* near pi/2, use 33+33+53 bit pi */
+ z -= pio2_2;
+ y[0] = z - pio2_2t;
+ y[1] = (z - y[0]) - pio2_2t;
+ }
+ return 1;
+ } else { /* negative x */
+ z = x + pio2_1;
+ if (ix != 0x3ff921fb) { /* 33+53 bit pi is good enough */
+ y[0] = z + pio2_1t;
+ y[1] = (z - y[0]) + pio2_1t;
+ } else { /* near pi/2, use 33+33+53 bit pi */
+ z += pio2_2;
+ y[0] = z + pio2_2t;
+ y[1] = (z - y[0]) + pio2_2t;
+ }
+ return -1;
+ }
+ }
+ if (ix <= 0x413921fb) { /* |x| ~<= 2^19*(pi/2), medium size */
+ t = fabs(x);
+ n = static_cast<int32_t>(t * invpio2 + half);
+ fn = static_cast<double>(n);
+ r = t - fn * pio2_1;
+ w = fn * pio2_1t; /* 1st round good to 85 bit */
+ if (n < 32 && ix != npio2_hw[n - 1]) {
+ y[0] = r - w; /* quick check no cancellation */
+ } else {
+ uint32_t high;
+ j = ix >> 20;
+ y[0] = r - w;
+ GET_HIGH_WORD(high, y[0]);
+ i = j - ((high >> 20) & 0x7ff);
+ if (i > 16) { /* 2nd iteration needed, good to 118 */
+ t = r;
+ w = fn * pio2_2;
+ r = t - w;
+ w = fn * pio2_2t - ((t - r) - w);
+ y[0] = r - w;
+ GET_HIGH_WORD(high, y[0]);
+ i = j - ((high >> 20) & 0x7ff);
+ if (i > 49) { /* 3rd iteration need, 151 bits acc */
+ t = r; /* will cover all possible cases */
+ w = fn * pio2_3;
+ r = t - w;
+ w = fn * pio2_3t - ((t - r) - w);
+ y[0] = r - w;
+ }
+ }
+ }
+ y[1] = (r - y[0]) - w;
+ if (hx < 0) {
+ y[0] = -y[0];
+ y[1] = -y[1];
+ return -n;
+ } else {
+ return n;
+ }
+ }
+ /*
+ * all other (large) arguments
+ */
+ if (ix >= 0x7ff00000) { /* x is inf or NaN */
+ y[0] = y[1] = x - x;
+ return 0;
+ }
+ /* set z = scalbn(|x|,ilogb(x)-23) */
+ GET_LOW_WORD(low, x);
+ SET_LOW_WORD(z, low);
+ e0 = (ix >> 20) - 1046; /* e0 = ilogb(z)-23; */
+ SET_HIGH_WORD(z, ix - static_cast<int32_t>(e0 << 20));
+ for (i = 0; i < 2; i++) {
+ tx[i] = static_cast<double>(static_cast<int32_t>(z));
+ z = (z - tx[i]) * two24;
+ }
+ tx[2] = z;
+ nx = 3;
+ while (tx[nx - 1] == zero) nx--; /* skip zero term */
+ n = __kernel_rem_pio2(tx, y, e0, nx, 2, two_over_pi);
+ if (hx < 0) {
+ y[0] = -y[0];
+ y[1] = -y[1];
+ return -n;
+ }
+ return n;
+}
+
+/* __kernel_cos( x, y )
+ * kernel cos function on [-pi/4, pi/4], pi/4 ~ 0.785398164
+ * Input x is assumed to be bounded by ~pi/4 in magnitude.
+ * Input y is the tail of x.
+ *
+ * Algorithm
+ * 1. Since cos(-x) = cos(x), we need only to consider positive x.
+ * 2. if x < 2^-27 (hx<0x3e400000 0), return 1 with inexact if x!=0.
+ * 3. cos(x) is approximated by a polynomial of degree 14 on
+ * [0,pi/4]
+ * 4 14
+ * cos(x) ~ 1 - x*x/2 + C1*x + ... + C6*x
+ * where the remez error is
+ *
+ * | 2 4 6 8 10 12 14 | -58
+ * |cos(x)-(1-.5*x +C1*x +C2*x +C3*x +C4*x +C5*x +C6*x )| <= 2
+ * | |
+ *
+ * 4 6 8 10 12 14
+ * 4. let r = C1*x +C2*x +C3*x +C4*x +C5*x +C6*x , then
+ * cos(x) = 1 - x*x/2 + r
+ * since cos(x+y) ~ cos(x) - sin(x)*y
+ * ~ cos(x) - x*y,
+ * a correction term is necessary in cos(x) and hence
+ * cos(x+y) = 1 - (x*x/2 - (r - x*y))
+ * For better accuracy when x > 0.3, let qx = |x|/4 with
+ * the last 32 bits mask off, and if x > 0.78125, let qx = 0.28125.
+ * Then
+ * cos(x+y) = (1-qx) - ((x*x/2-qx) - (r-x*y)).
+ * Note that 1-qx and (x*x/2-qx) is EXACT here, and the
+ * magnitude of the latter is at least a quarter of x*x/2,
+ * thus, reducing the rounding error in the subtraction.
+ */
+V8_INLINE double __kernel_cos(double x, double y) {
+ static const double
+ one = 1.00000000000000000000e+00, /* 0x3FF00000, 0x00000000 */
+ C1 = 4.16666666666666019037e-02, /* 0x3FA55555, 0x5555554C */
+ C2 = -1.38888888888741095749e-03, /* 0xBF56C16C, 0x16C15177 */
+ C3 = 2.48015872894767294178e-05, /* 0x3EFA01A0, 0x19CB1590 */
+ C4 = -2.75573143513906633035e-07, /* 0xBE927E4F, 0x809C52AD */
+ C5 = 2.08757232129817482790e-09, /* 0x3E21EE9E, 0xBDB4B1C4 */
+ C6 = -1.13596475577881948265e-11; /* 0xBDA8FAE9, 0xBE8838D4 */
+
+ double a, iz, z, r, qx;
+ int32_t ix;
+ GET_HIGH_WORD(ix, x);
+ ix &= 0x7fffffff; /* ix = |x|'s high word*/
+ if (ix < 0x3e400000) { /* if x < 2**27 */
+ if (static_cast<int>(x) == 0) return one; /* generate inexact */
+ }
+ z = x * x;
+ r = z * (C1 + z * (C2 + z * (C3 + z * (C4 + z * (C5 + z * C6)))));
+ if (ix < 0x3FD33333) { /* if |x| < 0.3 */
+ return one - (0.5 * z - (z * r - x * y));
+ } else {
+ if (ix > 0x3fe90000) { /* x > 0.78125 */
+ qx = 0.28125;
+ } else {
+ INSERT_WORDS(qx, ix - 0x00200000, 0); /* x/4 */
+ }
+ iz = 0.5 * z - qx;
+ a = one - qx;
+ return a - (iz - (z * r - x * y));
+ }
+}
+
+/* __kernel_rem_pio2(x,y,e0,nx,prec,ipio2)
+ * double x[],y[]; int e0,nx,prec; int ipio2[];
+ *
+ * __kernel_rem_pio2 return the last three digits of N with
+ * y = x - N*pi/2
+ * so that |y| < pi/2.
+ *
+ * The method is to compute the integer (mod 8) and fraction parts of
+ * (2/pi)*x without doing the full multiplication. In general we
+ * skip the part of the product that are known to be a huge integer (
+ * more accurately, = 0 mod 8 ). Thus the number of operations are
+ * independent of the exponent of the input.
+ *
+ * (2/pi) is represented by an array of 24-bit integers in ipio2[].
+ *
+ * Input parameters:
+ * x[] The input value (must be positive) is broken into nx
+ * pieces of 24-bit integers in double precision format.
+ * x[i] will be the i-th 24 bit of x. The scaled exponent
+ * of x[0] is given in input parameter e0 (i.e., x[0]*2^e0
+ * match x's up to 24 bits.
+ *
+ * Example of breaking a double positive z into x[0]+x[1]+x[2]:
+ * e0 = ilogb(z)-23
+ * z = scalbn(z,-e0)
+ * for i = 0,1,2
+ * x[i] = floor(z)
+ * z = (z-x[i])*2**24
+ *
+ *
+ * y[] output result in an array of double precision numbers.
+ * The dimension of y[] is:
+ * 24-bit precision 1
+ * 53-bit precision 2
+ * 64-bit precision 2
+ * 113-bit precision 3
+ * The actual value is the sum of them. Thus for 113-bit
+ * precison, one may have to do something like:
+ *
+ * long double t,w,r_head, r_tail;
+ * t = (long double)y[2] + (long double)y[1];
+ * w = (long double)y[0];
+ * r_head = t+w;
+ * r_tail = w - (r_head - t);
+ *
+ * e0 The exponent of x[0]
+ *
+ * nx dimension of x[]
+ *
+ * prec an integer indicating the precision:
+ * 0 24 bits (single)
+ * 1 53 bits (double)
+ * 2 64 bits (extended)
+ * 3 113 bits (quad)
+ *
+ * ipio2[]
+ * integer array, contains the (24*i)-th to (24*i+23)-th
+ * bit of 2/pi after binary point. The corresponding
+ * floating value is
+ *
+ * ipio2[i] * 2^(-24(i+1)).
+ *
+ * External function:
+ * double scalbn(), floor();
+ *
+ *
+ * Here is the description of some local variables:
+ *
+ * jk jk+1 is the initial number of terms of ipio2[] needed
+ * in the computation. The recommended value is 2,3,4,
+ * 6 for single, double, extended,and quad.
+ *
+ * jz local integer variable indicating the number of
+ * terms of ipio2[] used.
+ *
+ * jx nx - 1
+ *
+ * jv index for pointing to the suitable ipio2[] for the
+ * computation. In general, we want
+ * ( 2^e0*x[0] * ipio2[jv-1]*2^(-24jv) )/8
+ * is an integer. Thus
+ * e0-3-24*jv >= 0 or (e0-3)/24 >= jv
+ * Hence jv = max(0,(e0-3)/24).
+ *
+ * jp jp+1 is the number of terms in PIo2[] needed, jp = jk.
+ *
+ * q[] double array with integral value, representing the
+ * 24-bits chunk of the product of x and 2/pi.
+ *
+ * q0 the corresponding exponent of q[0]. Note that the
+ * exponent for q[i] would be q0-24*i.
+ *
+ * PIo2[] double precision array, obtained by cutting pi/2
+ * into 24 bits chunks.
+ *
+ * f[] ipio2[] in floating point
+ *
+ * iq[] integer array by breaking up q[] in 24-bits chunk.
+ *
+ * fq[] final product of x*(2/pi) in fq[0],..,fq[jk]
+ *
+ * ih integer. If >0 it indicates q[] is >= 0.5, hence
+ * it also indicates the *sign* of the result.
+ *
+ */
+int __kernel_rem_pio2(double *x, double *y, int e0, int nx, int prec,
+ const int32_t *ipio2) {
+ /* Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+ static const int init_jk[] = {2, 3, 4, 6}; /* initial value for jk */
+
+ static const double PIo2[] = {
+ 1.57079625129699707031e+00, /* 0x3FF921FB, 0x40000000 */
+ 7.54978941586159635335e-08, /* 0x3E74442D, 0x00000000 */
+ 5.39030252995776476554e-15, /* 0x3CF84698, 0x80000000 */
+ 3.28200341580791294123e-22, /* 0x3B78CC51, 0x60000000 */
+ 1.27065575308067607349e-29, /* 0x39F01B83, 0x80000000 */
+ 1.22933308981111328932e-36, /* 0x387A2520, 0x40000000 */
+ 2.73370053816464559624e-44, /* 0x36E38222, 0x80000000 */
+ 2.16741683877804819444e-51, /* 0x3569F31D, 0x00000000 */
+ };
+
+ static const double
+ zero = 0.0,
+ one = 1.0,
+ two24 = 1.67772160000000000000e+07, /* 0x41700000, 0x00000000 */
+ twon24 = 5.96046447753906250000e-08; /* 0x3E700000, 0x00000000 */
+
+ int32_t jz, jx, jv, jp, jk, carry, n, iq[20], i, j, k, m, q0, ih;
+ double z, fw, f[20], fq[20], q[20];
+
+ /* initialize jk*/
+ jk = init_jk[prec];
+ jp = jk;
+
+ /* determine jx,jv,q0, note that 3>q0 */
+ jx = nx - 1;
+ jv = (e0 - 3) / 24;
+ if (jv < 0) jv = 0;
+ q0 = e0 - 24 * (jv + 1);
+
+ /* set up f[0] to f[jx+jk] where f[jx+jk] = ipio2[jv+jk] */
+ j = jv - jx;
+ m = jx + jk;
+ for (i = 0; i <= m; i++, j++) {
+ f[i] = (j < 0) ? zero : static_cast<double>(ipio2[j]);
+ }
+
+ /* compute q[0],q[1],...q[jk] */
+ for (i = 0; i <= jk; i++) {
+ for (j = 0, fw = 0.0; j <= jx; j++) fw += x[j] * f[jx + i - j];
+ q[i] = fw;
+ }
+
+ jz = jk;
+recompute:
+ /* distill q[] into iq[] reversingly */
+ for (i = 0, j = jz, z = q[jz]; j > 0; i++, j--) {
+ fw = static_cast<double>(static_cast<int32_t>(twon24 * z));
+ iq[i] = static_cast<int32_t>(z - two24 * fw);
+ z = q[j - 1] + fw;
+ }
+
+ /* compute n */
+ z = scalbn(z, q0); /* actual value of z */
+ z -= 8.0 * floor(z * 0.125); /* trim off integer >= 8 */
+ n = static_cast<int32_t>(z);
+ z -= static_cast<double>(n);
+ ih = 0;
+ if (q0 > 0) { /* need iq[jz-1] to determine n */
+ i = (iq[jz - 1] >> (24 - q0));
+ n += i;
+ iq[jz - 1] -= i << (24 - q0);
+ ih = iq[jz - 1] >> (23 - q0);
+ } else if (q0 == 0) {
+ ih = iq[jz - 1] >> 23;
+ } else if (z >= 0.5) {
+ ih = 2;
+ }
+
+ if (ih > 0) { /* q > 0.5 */
+ n += 1;
+ carry = 0;
+ for (i = 0; i < jz; i++) { /* compute 1-q */
+ j = iq[i];
+ if (carry == 0) {
+ if (j != 0) {
+ carry = 1;
+ iq[i] = 0x1000000 - j;
+ }
+ } else {
+ iq[i] = 0xffffff - j;
+ }
+ }
+ if (q0 > 0) { /* rare case: chance is 1 in 12 */
+ switch (q0) {
+ case 1:
+ iq[jz - 1] &= 0x7fffff;
+ break;
+ case 2:
+ iq[jz - 1] &= 0x3fffff;
+ break;
+ }
+ }
+ if (ih == 2) {
+ z = one - z;
+ if (carry != 0) z -= scalbn(one, q0);
+ }
+ }
+
+ /* check if recomputation is needed */
+ if (z == zero) {
+ j = 0;
+ for (i = jz - 1; i >= jk; i--) j |= iq[i];
+ if (j == 0) { /* need recomputation */
+ for (k = 1; jk >= k && iq[jk - k] == 0; k++) {
+ /* k = no. of terms needed */
+ }
+
+ for (i = jz + 1; i <= jz + k; i++) { /* add q[jz+1] to q[jz+k] */
+ f[jx + i] = ipio2[jv + i];
+ for (j = 0, fw = 0.0; j <= jx; j++) fw += x[j] * f[jx + i - j];
+ q[i] = fw;
+ }
+ jz += k;
+ goto recompute;
+ }
+ }
+
+ /* chop off zero terms */
+ if (z == 0.0) {
+ jz -= 1;
+ q0 -= 24;
+ while (iq[jz] == 0) {
+ jz--;
+ q0 -= 24;
+ }
+ } else { /* break z into 24-bit if necessary */
+ z = scalbn(z, -q0);
+ if (z >= two24) {
+ fw = static_cast<double>(static_cast<int32_t>(twon24 * z));
+ iq[jz] = z - two24 * fw;
+ jz += 1;
+ q0 += 24;
+ iq[jz] = fw;
+ } else {
+ iq[jz] = z;
+ }
+ }
+
+ /* convert integer "bit" chunk to floating-point value */
+ fw = scalbn(one, q0);
+ for (i = jz; i >= 0; i--) {
+ q[i] = fw * iq[i];
+ fw *= twon24;
+ }
+
+ /* compute PIo2[0,...,jp]*q[jz,...,0] */
+ for (i = jz; i >= 0; i--) {
+ for (fw = 0.0, k = 0; k <= jp && k <= jz - i; k++) fw += PIo2[k] * q[i + k];
+ fq[jz - i] = fw;
+ }
+
+ /* compress fq[] into y[] */
+ switch (prec) {
+ case 0:
+ fw = 0.0;
+ for (i = jz; i >= 0; i--) fw += fq[i];
+ y[0] = (ih == 0) ? fw : -fw;
+ break;
+ case 1:
+ case 2:
+ fw = 0.0;
+ for (i = jz; i >= 0; i--) fw += fq[i];
+ y[0] = (ih == 0) ? fw : -fw;
+ fw = fq[0] - fw;
+ for (i = 1; i <= jz; i++) fw += fq[i];
+ y[1] = (ih == 0) ? fw : -fw;
+ break;
+ case 3: /* painful */
+ for (i = jz; i > 0; i--) {
+ fw = fq[i - 1] + fq[i];
+ fq[i] += fq[i - 1] - fw;
+ fq[i - 1] = fw;
+ }
+ for (i = jz; i > 1; i--) {
+ fw = fq[i - 1] + fq[i];
+ fq[i] += fq[i - 1] - fw;
+ fq[i - 1] = fw;
+ }
+ for (fw = 0.0, i = jz; i >= 2; i--) fw += fq[i];
+ if (ih == 0) {
+ y[0] = fq[0];
+ y[1] = fq[1];
+ y[2] = fw;
+ } else {
+ y[0] = -fq[0];
+ y[1] = -fq[1];
+ y[2] = -fw;
+ }
+ }
+ return n & 7;
+}
+
+/* __kernel_sin( x, y, iy)
+ * kernel sin function on [-pi/4, pi/4], pi/4 ~ 0.7854
+ * Input x is assumed to be bounded by ~pi/4 in magnitude.
+ * Input y is the tail of x.
+ * Input iy indicates whether y is 0. (if iy=0, y assume to be 0).
+ *
+ * Algorithm
+ * 1. Since sin(-x) = -sin(x), we need only to consider positive x.
+ * 2. if x < 2^-27 (hx<0x3e400000 0), return x with inexact if x!=0.
+ * 3. sin(x) is approximated by a polynomial of degree 13 on
+ * [0,pi/4]
+ * 3 13
+ * sin(x) ~ x + S1*x + ... + S6*x
+ * where
+ *
+ * |sin(x) 2 4 6 8 10 12 | -58
+ * |----- - (1+S1*x +S2*x +S3*x +S4*x +S5*x +S6*x )| <= 2
+ * | x |
+ *
+ * 4. sin(x+y) = sin(x) + sin'(x')*y
+ * ~ sin(x) + (1-x*x/2)*y
+ * For better accuracy, let
+ * 3 2 2 2 2
+ * r = x *(S2+x *(S3+x *(S4+x *(S5+x *S6))))
+ * then 3 2
+ * sin(x) = x + (S1*x + (x *(r-y/2)+y))
+ */
+V8_INLINE double __kernel_sin(double x, double y, int iy) {
+ static const double
+ half = 5.00000000000000000000e-01, /* 0x3FE00000, 0x00000000 */
+ S1 = -1.66666666666666324348e-01, /* 0xBFC55555, 0x55555549 */
+ S2 = 8.33333333332248946124e-03, /* 0x3F811111, 0x1110F8A6 */
+ S3 = -1.98412698298579493134e-04, /* 0xBF2A01A0, 0x19C161D5 */
+ S4 = 2.75573137070700676789e-06, /* 0x3EC71DE3, 0x57B1FE7D */
+ S5 = -2.50507602534068634195e-08, /* 0xBE5AE5E6, 0x8A2B9CEB */
+ S6 = 1.58969099521155010221e-10; /* 0x3DE5D93A, 0x5ACFD57C */
+
+ double z, r, v;
+ int32_t ix;
+ GET_HIGH_WORD(ix, x);
+ ix &= 0x7fffffff; /* high word of x */
+ if (ix < 0x3e400000) { /* |x| < 2**-27 */
+ if (static_cast<int>(x) == 0) return x;
+ } /* generate inexact */
+ z = x * x;
+ v = z * x;
+ r = S2 + z * (S3 + z * (S4 + z * (S5 + z * S6)));
+ if (iy == 0) {
+ return x + v * (S1 + z * r);
+ } else {
+ return x - ((z * (half * y - v * r) - y) - v * S1);
+ }
+}
+
+/* __kernel_tan( x, y, k )
+ * kernel tan function on [-pi/4, pi/4], pi/4 ~ 0.7854
+ * Input x is assumed to be bounded by ~pi/4 in magnitude.
+ * Input y is the tail of x.
+ * Input k indicates whether tan (if k=1) or
+ * -1/tan (if k= -1) is returned.
+ *
+ * Algorithm
+ * 1. Since tan(-x) = -tan(x), we need only to consider positive x.
+ * 2. if x < 2^-28 (hx<0x3e300000 0), return x with inexact if x!=0.
+ * 3. tan(x) is approximated by a odd polynomial of degree 27 on
+ * [0,0.67434]
+ * 3 27
+ * tan(x) ~ x + T1*x + ... + T13*x
+ * where
+ *
+ * |tan(x) 2 4 26 | -59.2
+ * |----- - (1+T1*x +T2*x +.... +T13*x )| <= 2
+ * | x |
+ *
+ * Note: tan(x+y) = tan(x) + tan'(x)*y
+ * ~ tan(x) + (1+x*x)*y
+ * Therefore, for better accuracy in computing tan(x+y), let
+ * 3 2 2 2 2
+ * r = x *(T2+x *(T3+x *(...+x *(T12+x *T13))))
+ * then
+ * 3 2
+ * tan(x+y) = x + (T1*x + (x *(r+y)+y))
+ *
+ * 4. For x in [0.67434,pi/4], let y = pi/4 - x, then
+ * tan(x) = tan(pi/4-y) = (1-tan(y))/(1+tan(y))
+ * = 1 - 2*(tan(y) - (tan(y)^2)/(1+tan(y)))
+ */
+double __kernel_tan(double x, double y, int iy) {
+ static const double xxx[] = {
+ 3.33333333333334091986e-01, /* 3FD55555, 55555563 */
+ 1.33333333333201242699e-01, /* 3FC11111, 1110FE7A */
+ 5.39682539762260521377e-02, /* 3FABA1BA, 1BB341FE */
+ 2.18694882948595424599e-02, /* 3F9664F4, 8406D637 */
+ 8.86323982359930005737e-03, /* 3F8226E3, E96E8493 */
+ 3.59207910759131235356e-03, /* 3F6D6D22, C9560328 */
+ 1.45620945432529025516e-03, /* 3F57DBC8, FEE08315 */
+ 5.88041240820264096874e-04, /* 3F4344D8, F2F26501 */
+ 2.46463134818469906812e-04, /* 3F3026F7, 1A8D1068 */
+ 7.81794442939557092300e-05, /* 3F147E88, A03792A6 */
+ 7.14072491382608190305e-05, /* 3F12B80F, 32F0A7E9 */
+ -1.85586374855275456654e-05, /* BEF375CB, DB605373 */
+ 2.59073051863633712884e-05, /* 3EFB2A70, 74BF7AD4 */
+ /* one */ 1.00000000000000000000e+00, /* 3FF00000, 00000000 */
+ /* pio4 */ 7.85398163397448278999e-01, /* 3FE921FB, 54442D18 */
+ /* pio4lo */ 3.06161699786838301793e-17 /* 3C81A626, 33145C07 */
+ };
+#define one xxx[13]
+#define pio4 xxx[14]
+#define pio4lo xxx[15]
+#define T xxx
+
+ double z, r, v, w, s;
+ int32_t ix, hx;
+
+ GET_HIGH_WORD(hx, x); /* high word of x */
+ ix = hx & 0x7fffffff; /* high word of |x| */
+ if (ix < 0x3e300000) { /* x < 2**-28 */
+ if (static_cast<int>(x) == 0) { /* generate inexact */
+ uint32_t low;
+ GET_LOW_WORD(low, x);
+ if (((ix | low) | (iy + 1)) == 0) {
+ return one / fabs(x);
+ } else {
+ if (iy == 1) {
+ return x;
+ } else { /* compute -1 / (x+y) carefully */
+ double a, t;
+
+ z = w = x + y;
+ SET_LOW_WORD(z, 0);
+ v = y - (z - x);
+ t = a = -one / w;
+ SET_LOW_WORD(t, 0);
+ s = one + t * z;
+ return t + a * (s + t * v);
+ }
+ }
+ }
+ }
+ if (ix >= 0x3FE59428) { /* |x| >= 0.6744 */
+ if (hx < 0) {
+ x = -x;
+ y = -y;
+ }
+ z = pio4 - x;
+ w = pio4lo - y;
+ x = z + w;
+ y = 0.0;
+ }
+ z = x * x;
+ w = z * z;
+ /*
+ * Break x^5*(T[1]+x^2*T[2]+...) into
+ * x^5(T[1]+x^4*T[3]+...+x^20*T[11]) +
+ * x^5(x^2*(T[2]+x^4*T[4]+...+x^22*[T12]))
+ */
+ r = T[1] + w * (T[3] + w * (T[5] + w * (T[7] + w * (T[9] + w * T[11]))));
+ v = z *
+ (T[2] + w * (T[4] + w * (T[6] + w * (T[8] + w * (T[10] + w * T[12])))));
+ s = z * x;
+ r = y + z * (s * (r + v) + y);
+ r += T[0] * s;
+ w = x + r;
+ if (ix >= 0x3FE59428) {
+ v = iy;
+ return (1 - ((hx >> 30) & 2)) * (v - 2.0 * (x - (w * w / (w + v) - r)));
+ }
+ if (iy == 1) {
+ return w;
+ } else {
+ /*
+ * if allow error up to 2 ulp, simply return
+ * -1.0 / (x+r) here
+ */
+ /* compute -1.0 / (x+r) accurately */
+ double a, t;
+ z = w;
+ SET_LOW_WORD(z, 0);
+ v = r - (z - x); /* z+v = r+x */
+ t = a = -1.0 / w; /* a = -1.0/w */
+ SET_LOW_WORD(t, 0);
+ s = 1.0 + t * z;
+ return t + a * (s + t * v);
+ }
+
+#undef one
+#undef pio4
+#undef pio4lo
+#undef T
+}
+
+} // namespace
+
+/* acos(x)
+ * Method :
+ * acos(x) = pi/2 - asin(x)
+ * acos(-x) = pi/2 + asin(x)
+ * For |x|<=0.5
+ * acos(x) = pi/2 - (x + x*x^2*R(x^2)) (see asin.c)
+ * For x>0.5
+ * acos(x) = pi/2 - (pi/2 - 2asin(sqrt((1-x)/2)))
+ * = 2asin(sqrt((1-x)/2))
+ * = 2s + 2s*z*R(z) ...z=(1-x)/2, s=sqrt(z)
+ * = 2f + (2c + 2s*z*R(z))
+ * where f=hi part of s, and c = (z-f*f)/(s+f) is the correction term
+ * for f so that f+c ~ sqrt(z).
+ * For x<-0.5
+ * acos(x) = pi - 2asin(sqrt((1-|x|)/2))
+ * = pi - 0.5*(s+s*z*R(z)), where z=(1-|x|)/2,s=sqrt(z)
+ *
+ * Special cases:
+ * if x is NaN, return x itself;
+ * if |x|>1, return NaN with invalid signal.
+ *
+ * Function needed: sqrt
+ */
+double acos(double x) {
+ static const double
+ one = 1.00000000000000000000e+00, /* 0x3FF00000, 0x00000000 */
+ pi = 3.14159265358979311600e+00, /* 0x400921FB, 0x54442D18 */
+ pio2_hi = 1.57079632679489655800e+00, /* 0x3FF921FB, 0x54442D18 */
+ pio2_lo = 6.12323399573676603587e-17, /* 0x3C91A626, 0x33145C07 */
+ pS0 = 1.66666666666666657415e-01, /* 0x3FC55555, 0x55555555 */
+ pS1 = -3.25565818622400915405e-01, /* 0xBFD4D612, 0x03EB6F7D */
+ pS2 = 2.01212532134862925881e-01, /* 0x3FC9C155, 0x0E884455 */
+ pS3 = -4.00555345006794114027e-02, /* 0xBFA48228, 0xB5688F3B */
+ pS4 = 7.91534994289814532176e-04, /* 0x3F49EFE0, 0x7501B288 */
+ pS5 = 3.47933107596021167570e-05, /* 0x3F023DE1, 0x0DFDF709 */
+ qS1 = -2.40339491173441421878e+00, /* 0xC0033A27, 0x1C8A2D4B */
+ qS2 = 2.02094576023350569471e+00, /* 0x40002AE5, 0x9C598AC8 */
+ qS3 = -6.88283971605453293030e-01, /* 0xBFE6066C, 0x1B8D0159 */
+ qS4 = 7.70381505559019352791e-02; /* 0x3FB3B8C5, 0xB12E9282 */
+
+ double z, p, q, r, w, s, c, df;
+ int32_t hx, ix;
+ GET_HIGH_WORD(hx, x);
+ ix = hx & 0x7fffffff;
+ if (ix >= 0x3ff00000) { /* |x| >= 1 */
+ uint32_t lx;
+ GET_LOW_WORD(lx, x);
+ if (((ix - 0x3ff00000) | lx) == 0) { /* |x|==1 */
+ if (hx > 0)
+ return 0.0; /* acos(1) = 0 */
+ else
+ return pi + 2.0 * pio2_lo; /* acos(-1)= pi */
+ }
+ return (x - x) / (x - x); /* acos(|x|>1) is NaN */
+ }
+ if (ix < 0x3fe00000) { /* |x| < 0.5 */
+ if (ix <= 0x3c600000) return pio2_hi + pio2_lo; /*if|x|<2**-57*/
+ z = x * x;
+ p = z * (pS0 + z * (pS1 + z * (pS2 + z * (pS3 + z * (pS4 + z * pS5)))));
+ q = one + z * (qS1 + z * (qS2 + z * (qS3 + z * qS4)));
+ r = p / q;
+ return pio2_hi - (x - (pio2_lo - x * r));
+ } else if (hx < 0) { /* x < -0.5 */
+ z = (one + x) * 0.5;
+ p = z * (pS0 + z * (pS1 + z * (pS2 + z * (pS3 + z * (pS4 + z * pS5)))));
+ q = one + z * (qS1 + z * (qS2 + z * (qS3 + z * qS4)));
+ s = sqrt(z);
+ r = p / q;
+ w = r * s - pio2_lo;
+ return pi - 2.0 * (s + w);
+ } else { /* x > 0.5 */
+ z = (one - x) * 0.5;
+ s = sqrt(z);
+ df = s;
+ SET_LOW_WORD(df, 0);
+ c = (z - df * df) / (s + df);
+ p = z * (pS0 + z * (pS1 + z * (pS2 + z * (pS3 + z * (pS4 + z * pS5)))));
+ q = one + z * (qS1 + z * (qS2 + z * (qS3 + z * qS4)));
+ r = p / q;
+ w = r * s + c;
+ return 2.0 * (df + w);
+ }
+}
+
+/* acosh(x)
+ * Method :
+ * Based on
+ * acosh(x) = log [ x + sqrt(x*x-1) ]
+ * we have
+ * acosh(x) := log(x)+ln2, if x is large; else
+ * acosh(x) := log(2x-1/(sqrt(x*x-1)+x)) if x>2; else
+ * acosh(x) := log1p(t+sqrt(2.0*t+t*t)); where t=x-1.
+ *
+ * Special cases:
+ * acosh(x) is NaN with signal if x<1.
+ * acosh(NaN) is NaN without signal.
+ */
+double acosh(double x) {
+ static const double
+ one = 1.0,
+ ln2 = 6.93147180559945286227e-01; /* 0x3FE62E42, 0xFEFA39EF */
+ double t;
+ int32_t hx;
+ uint32_t lx;
+ EXTRACT_WORDS(hx, lx, x);
+ if (hx < 0x3ff00000) { /* x < 1 */
+ return (x - x) / (x - x);
+ } else if (hx >= 0x41b00000) { /* x > 2**28 */
+ if (hx >= 0x7ff00000) { /* x is inf of NaN */
+ return x + x;
+ } else {
+ return log(x) + ln2; /* acosh(huge)=log(2x) */
+ }
+ } else if (((hx - 0x3ff00000) | lx) == 0) {
+ return 0.0; /* acosh(1) = 0 */
+ } else if (hx > 0x40000000) { /* 2**28 > x > 2 */
+ t = x * x;
+ return log(2.0 * x - one / (x + sqrt(t - one)));
+ } else { /* 1<x<2 */
+ t = x - one;
+ return log1p(t + sqrt(2.0 * t + t * t));
+ }
+}
+
+/* asin(x)
+ * Method :
+ * Since asin(x) = x + x^3/6 + x^5*3/40 + x^7*15/336 + ...
+ * we approximate asin(x) on [0,0.5] by
+ * asin(x) = x + x*x^2*R(x^2)
+ * where
+ * R(x^2) is a rational approximation of (asin(x)-x)/x^3
+ * and its remez error is bounded by
+ * |(asin(x)-x)/x^3 - R(x^2)| < 2^(-58.75)
+ *
+ * For x in [0.5,1]
+ * asin(x) = pi/2-2*asin(sqrt((1-x)/2))
+ * Let y = (1-x), z = y/2, s := sqrt(z), and pio2_hi+pio2_lo=pi/2;
+ * then for x>0.98
+ * asin(x) = pi/2 - 2*(s+s*z*R(z))
+ * = pio2_hi - (2*(s+s*z*R(z)) - pio2_lo)
+ * For x<=0.98, let pio4_hi = pio2_hi/2, then
+ * f = hi part of s;
+ * c = sqrt(z) - f = (z-f*f)/(s+f) ...f+c=sqrt(z)
+ * and
+ * asin(x) = pi/2 - 2*(s+s*z*R(z))
+ * = pio4_hi+(pio4-2s)-(2s*z*R(z)-pio2_lo)
+ * = pio4_hi+(pio4-2f)-(2s*z*R(z)-(pio2_lo+2c))
+ *
+ * Special cases:
+ * if x is NaN, return x itself;
+ * if |x|>1, return NaN with invalid signal.
+ */
+double asin(double x) {
+ static const double
+ one = 1.00000000000000000000e+00, /* 0x3FF00000, 0x00000000 */
+ huge = 1.000e+300,
+ pio2_hi = 1.57079632679489655800e+00, /* 0x3FF921FB, 0x54442D18 */
+ pio2_lo = 6.12323399573676603587e-17, /* 0x3C91A626, 0x33145C07 */
+ pio4_hi = 7.85398163397448278999e-01, /* 0x3FE921FB, 0x54442D18 */
+ /* coefficient for R(x^2) */
+ pS0 = 1.66666666666666657415e-01, /* 0x3FC55555, 0x55555555 */
+ pS1 = -3.25565818622400915405e-01, /* 0xBFD4D612, 0x03EB6F7D */
+ pS2 = 2.01212532134862925881e-01, /* 0x3FC9C155, 0x0E884455 */
+ pS3 = -4.00555345006794114027e-02, /* 0xBFA48228, 0xB5688F3B */
+ pS4 = 7.91534994289814532176e-04, /* 0x3F49EFE0, 0x7501B288 */
+ pS5 = 3.47933107596021167570e-05, /* 0x3F023DE1, 0x0DFDF709 */
+ qS1 = -2.40339491173441421878e+00, /* 0xC0033A27, 0x1C8A2D4B */
+ qS2 = 2.02094576023350569471e+00, /* 0x40002AE5, 0x9C598AC8 */
+ qS3 = -6.88283971605453293030e-01, /* 0xBFE6066C, 0x1B8D0159 */
+ qS4 = 7.70381505559019352791e-02; /* 0x3FB3B8C5, 0xB12E9282 */
+
+ double t, w, p, q, c, r, s;
+ int32_t hx, ix;
+
+ t = 0;
+ GET_HIGH_WORD(hx, x);
+ ix = hx & 0x7fffffff;
+ if (ix >= 0x3ff00000) { /* |x|>= 1 */
+ uint32_t lx;
+ GET_LOW_WORD(lx, x);
+ if (((ix - 0x3ff00000) | lx) == 0) /* asin(1)=+-pi/2 with inexact */
+ return x * pio2_hi + x * pio2_lo;
+ return (x - x) / (x - x); /* asin(|x|>1) is NaN */
+ } else if (ix < 0x3fe00000) { /* |x|<0.5 */
+ if (ix < 0x3e400000) { /* if |x| < 2**-27 */
+ if (huge + x > one) return x; /* return x with inexact if x!=0*/
+ } else {
+ t = x * x;
+ }
+ p = t * (pS0 + t * (pS1 + t * (pS2 + t * (pS3 + t * (pS4 + t * pS5)))));
+ q = one + t * (qS1 + t * (qS2 + t * (qS3 + t * qS4)));
+ w = p / q;
+ return x + x * w;
+ }
+ /* 1> |x|>= 0.5 */
+ w = one - fabs(x);
+ t = w * 0.5;
+ p = t * (pS0 + t * (pS1 + t * (pS2 + t * (pS3 + t * (pS4 + t * pS5)))));
+ q = one + t * (qS1 + t * (qS2 + t * (qS3 + t * qS4)));
+ s = sqrt(t);
+ if (ix >= 0x3FEF3333) { /* if |x| > 0.975 */
+ w = p / q;
+ t = pio2_hi - (2.0 * (s + s * w) - pio2_lo);
+ } else {
+ w = s;
+ SET_LOW_WORD(w, 0);
+ c = (t - w * w) / (s + w);
+ r = p / q;
+ p = 2.0 * s * r - (pio2_lo - 2.0 * c);
+ q = pio4_hi - 2.0 * w;
+ t = pio4_hi - (p - q);
+ }
+ if (hx > 0)
+ return t;
+ else
+ return -t;
+}
+/* asinh(x)
+ * Method :
+ * Based on
+ * asinh(x) = sign(x) * log [ |x| + sqrt(x*x+1) ]
+ * we have
+ * asinh(x) := x if 1+x*x=1,
+ * := sign(x)*(log(x)+ln2)) for large |x|, else
+ * := sign(x)*log(2|x|+1/(|x|+sqrt(x*x+1))) if|x|>2, else
+ * := sign(x)*log1p(|x| + x^2/(1 + sqrt(1+x^2)))
+ */
+double asinh(double x) {
+ static const double
+ one = 1.00000000000000000000e+00, /* 0x3FF00000, 0x00000000 */
+ ln2 = 6.93147180559945286227e-01, /* 0x3FE62E42, 0xFEFA39EF */
+ huge = 1.00000000000000000000e+300;
+
+ double t, w;
+ int32_t hx, ix;
+ GET_HIGH_WORD(hx, x);
+ ix = hx & 0x7fffffff;
+ if (ix >= 0x7ff00000) return x + x; /* x is inf or NaN */
+ if (ix < 0x3e300000) { /* |x|<2**-28 */
+ if (huge + x > one) return x; /* return x inexact except 0 */
+ }
+ if (ix > 0x41b00000) { /* |x| > 2**28 */
+ w = log(fabs(x)) + ln2;
+ } else if (ix > 0x40000000) { /* 2**28 > |x| > 2.0 */
+ t = fabs(x);
+ w = log(2.0 * t + one / (sqrt(x * x + one) + t));
+ } else { /* 2.0 > |x| > 2**-28 */
+ t = x * x;
+ w = log1p(fabs(x) + t / (one + sqrt(one + t)));
+ }
+ if (hx > 0) {
+ return w;
+ } else {
+ return -w;
+ }
+}
+
+/* atan(x)
+ * Method
+ * 1. Reduce x to positive by atan(x) = -atan(-x).
+ * 2. According to the integer k=4t+0.25 chopped, t=x, the argument
+ * is further reduced to one of the following intervals and the
+ * arctangent of t is evaluated by the corresponding formula:
+ *
+ * [0,7/16] atan(x) = t-t^3*(a1+t^2*(a2+...(a10+t^2*a11)...)
+ * [7/16,11/16] atan(x) = atan(1/2) + atan( (t-0.5)/(1+t/2) )
+ * [11/16.19/16] atan(x) = atan( 1 ) + atan( (t-1)/(1+t) )
+ * [19/16,39/16] atan(x) = atan(3/2) + atan( (t-1.5)/(1+1.5t) )
+ * [39/16,INF] atan(x) = atan(INF) + atan( -1/t )
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+double atan(double x) {
+ static const double atanhi[] = {
+ 4.63647609000806093515e-01, /* atan(0.5)hi 0x3FDDAC67, 0x0561BB4F */
+ 7.85398163397448278999e-01, /* atan(1.0)hi 0x3FE921FB, 0x54442D18 */
+ 9.82793723247329054082e-01, /* atan(1.5)hi 0x3FEF730B, 0xD281F69B */
+ 1.57079632679489655800e+00, /* atan(inf)hi 0x3FF921FB, 0x54442D18 */
+ };
+
+ static const double atanlo[] = {
+ 2.26987774529616870924e-17, /* atan(0.5)lo 0x3C7A2B7F, 0x222F65E2 */
+ 3.06161699786838301793e-17, /* atan(1.0)lo 0x3C81A626, 0x33145C07 */
+ 1.39033110312309984516e-17, /* atan(1.5)lo 0x3C700788, 0x7AF0CBBD */
+ 6.12323399573676603587e-17, /* atan(inf)lo 0x3C91A626, 0x33145C07 */
+ };
+
+ static const double aT[] = {
+ 3.33333333333329318027e-01, /* 0x3FD55555, 0x5555550D */
+ -1.99999999998764832476e-01, /* 0xBFC99999, 0x9998EBC4 */
+ 1.42857142725034663711e-01, /* 0x3FC24924, 0x920083FF */
+ -1.11111104054623557880e-01, /* 0xBFBC71C6, 0xFE231671 */
+ 9.09088713343650656196e-02, /* 0x3FB745CD, 0xC54C206E */
+ -7.69187620504482999495e-02, /* 0xBFB3B0F2, 0xAF749A6D */
+ 6.66107313738753120669e-02, /* 0x3FB10D66, 0xA0D03D51 */
+ -5.83357013379057348645e-02, /* 0xBFADDE2D, 0x52DEFD9A */
+ 4.97687799461593236017e-02, /* 0x3FA97B4B, 0x24760DEB */
+ -3.65315727442169155270e-02, /* 0xBFA2B444, 0x2C6A6C2F */
+ 1.62858201153657823623e-02, /* 0x3F90AD3A, 0xE322DA11 */
+ };
+
+ static const double one = 1.0, huge = 1.0e300;
+
+ double w, s1, s2, z;
+ int32_t ix, hx, id;
+
+ GET_HIGH_WORD(hx, x);
+ ix = hx & 0x7fffffff;
+ if (ix >= 0x44100000) { /* if |x| >= 2^66 */
+ uint32_t low;
+ GET_LOW_WORD(low, x);
+ if (ix > 0x7ff00000 || (ix == 0x7ff00000 && (low != 0)))
+ return x + x; /* NaN */
+ if (hx > 0)
+ return atanhi[3] + *(volatile double *)&atanlo[3];
+ else
+ return -atanhi[3] - *(volatile double *)&atanlo[3];
+ }
+ if (ix < 0x3fdc0000) { /* |x| < 0.4375 */
+ if (ix < 0x3e400000) { /* |x| < 2^-27 */
+ if (huge + x > one) return x; /* raise inexact */
+ }
+ id = -1;
+ } else {
+ x = fabs(x);
+ if (ix < 0x3ff30000) { /* |x| < 1.1875 */
+ if (ix < 0x3fe60000) { /* 7/16 <=|x|<11/16 */
+ id = 0;
+ x = (2.0 * x - one) / (2.0 + x);
+ } else { /* 11/16<=|x|< 19/16 */
+ id = 1;
+ x = (x - one) / (x + one);
+ }
+ } else {
+ if (ix < 0x40038000) { /* |x| < 2.4375 */
+ id = 2;
+ x = (x - 1.5) / (one + 1.5 * x);
+ } else { /* 2.4375 <= |x| < 2^66 */
+ id = 3;
+ x = -1.0 / x;
+ }
+ }
+ }
+ /* end of argument reduction */
+ z = x * x;
+ w = z * z;
+ /* break sum from i=0 to 10 aT[i]z**(i+1) into odd and even poly */
+ s1 = z * (aT[0] +
+ w * (aT[2] + w * (aT[4] + w * (aT[6] + w * (aT[8] + w * aT[10])))));
+ s2 = w * (aT[1] + w * (aT[3] + w * (aT[5] + w * (aT[7] + w * aT[9]))));
+ if (id < 0) {
+ return x - x * (s1 + s2);
+ } else {
+ z = atanhi[id] - ((x * (s1 + s2) - atanlo[id]) - x);
+ return (hx < 0) ? -z : z;
+ }
+}
+
+/* atan2(y,x)
+ * Method :
+ * 1. Reduce y to positive by atan2(y,x)=-atan2(-y,x).
+ * 2. Reduce x to positive by (if x and y are unexceptional):
+ * ARG (x+iy) = arctan(y/x) ... if x > 0,
+ * ARG (x+iy) = pi - arctan[y/(-x)] ... if x < 0,
+ *
+ * Special cases:
+ *
+ * ATAN2((anything), NaN ) is NaN;
+ * ATAN2(NAN , (anything) ) is NaN;
+ * ATAN2(+-0, +(anything but NaN)) is +-0 ;
+ * ATAN2(+-0, -(anything but NaN)) is +-pi ;
+ * ATAN2(+-(anything but 0 and NaN), 0) is +-pi/2;
+ * ATAN2(+-(anything but INF and NaN), +INF) is +-0 ;
+ * ATAN2(+-(anything but INF and NaN), -INF) is +-pi;
+ * ATAN2(+-INF,+INF ) is +-pi/4 ;
+ * ATAN2(+-INF,-INF ) is +-3pi/4;
+ * ATAN2(+-INF, (anything but,0,NaN, and INF)) is +-pi/2;
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+double atan2(double y, double x) {
+ static volatile double tiny = 1.0e-300;
+ static const double
+ zero = 0.0,
+ pi_o_4 = 7.8539816339744827900E-01, /* 0x3FE921FB, 0x54442D18 */
+ pi_o_2 = 1.5707963267948965580E+00, /* 0x3FF921FB, 0x54442D18 */
+ pi = 3.1415926535897931160E+00; /* 0x400921FB, 0x54442D18 */
+ static volatile double pi_lo =
+ 1.2246467991473531772E-16; /* 0x3CA1A626, 0x33145C07 */
+
+ double z;
+ int32_t k, m, hx, hy, ix, iy;
+ uint32_t lx, ly;
+
+ EXTRACT_WORDS(hx, lx, x);
+ ix = hx & 0x7fffffff;
+ EXTRACT_WORDS(hy, ly, y);
+ iy = hy & 0x7fffffff;
+ if (((ix | ((lx | -static_cast<int32_t>(lx)) >> 31)) > 0x7ff00000) ||
+ ((iy | ((ly | -static_cast<int32_t>(ly)) >> 31)) > 0x7ff00000)) {
+ return x + y; /* x or y is NaN */
+ }
+ if (((hx - 0x3ff00000) | lx) == 0) return atan(y); /* x=1.0 */
+ m = ((hy >> 31) & 1) | ((hx >> 30) & 2); /* 2*sign(x)+sign(y) */
+
+ /* when y = 0 */
+ if ((iy | ly) == 0) {
+ switch (m) {
+ case 0:
+ case 1:
+ return y; /* atan(+-0,+anything)=+-0 */
+ case 2:
+ return pi + tiny; /* atan(+0,-anything) = pi */
+ case 3:
+ return -pi - tiny; /* atan(-0,-anything) =-pi */
+ }
+ }
+ /* when x = 0 */
+ if ((ix | lx) == 0) return (hy < 0) ? -pi_o_2 - tiny : pi_o_2 + tiny;
+
+ /* when x is INF */
+ if (ix == 0x7ff00000) {
+ if (iy == 0x7ff00000) {
+ switch (m) {
+ case 0:
+ return pi_o_4 + tiny; /* atan(+INF,+INF) */
+ case 1:
+ return -pi_o_4 - tiny; /* atan(-INF,+INF) */
+ case 2:
+ return 3.0 * pi_o_4 + tiny; /*atan(+INF,-INF)*/
+ case 3:
+ return -3.0 * pi_o_4 - tiny; /*atan(-INF,-INF)*/
+ }
+ } else {
+ switch (m) {
+ case 0:
+ return zero; /* atan(+...,+INF) */
+ case 1:
+ return -zero; /* atan(-...,+INF) */
+ case 2:
+ return pi + tiny; /* atan(+...,-INF) */
+ case 3:
+ return -pi - tiny; /* atan(-...,-INF) */
+ }
+ }
+ }
+ /* when y is INF */
+ if (iy == 0x7ff00000) return (hy < 0) ? -pi_o_2 - tiny : pi_o_2 + tiny;
+
+ /* compute y/x */
+ k = (iy - ix) >> 20;
+ if (k > 60) { /* |y/x| > 2**60 */
+ z = pi_o_2 + 0.5 * pi_lo;
+ m &= 1;
+ } else if (hx < 0 && k < -60) {
+ z = 0.0; /* 0 > |y|/x > -2**-60 */
+ } else {
+ z = atan(fabs(y / x)); /* safe to do y/x */
+ }
+ switch (m) {
+ case 0:
+ return z; /* atan(+,+) */
+ case 1:
+ return -z; /* atan(-,+) */
+ case 2:
+ return pi - (z - pi_lo); /* atan(+,-) */
+ default: /* case 3 */
+ return (z - pi_lo) - pi; /* atan(-,-) */
+ }
+}
+
+/* cos(x)
+ * Return cosine function of x.
+ *
+ * kernel function:
+ * __kernel_sin ... sine function on [-pi/4,pi/4]
+ * __kernel_cos ... cosine function on [-pi/4,pi/4]
+ * __ieee754_rem_pio2 ... argument reduction routine
+ *
+ * Method.
+ * Let S,C and T denote the sin, cos and tan respectively on
+ * [-PI/4, +PI/4]. Reduce the argument x to y1+y2 = x-k*pi/2
+ * in [-pi/4 , +pi/4], and let n = k mod 4.
+ * We have
+ *
+ * n sin(x) cos(x) tan(x)
+ * ----------------------------------------------------------
+ * 0 S C T
+ * 1 C -S -1/T
+ * 2 -S -C T
+ * 3 -C S -1/T
+ * ----------------------------------------------------------
+ *
+ * Special cases:
+ * Let trig be any of sin, cos, or tan.
+ * trig(+-INF) is NaN, with signals;
+ * trig(NaN) is that NaN;
+ *
+ * Accuracy:
+ * TRIG(x) returns trig(x) nearly rounded
+ */
+double cos(double x) {
+ double y[2], z = 0.0;
+ int32_t n, ix;
+
+ /* High word of x. */
+ GET_HIGH_WORD(ix, x);
+
+ /* |x| ~< pi/4 */
+ ix &= 0x7fffffff;
+ if (ix <= 0x3fe921fb) {
+ return __kernel_cos(x, z);
+ } else if (ix >= 0x7ff00000) {
+ /* cos(Inf or NaN) is NaN */
+ return x - x;
+ } else {
+ /* argument reduction needed */
+ n = __ieee754_rem_pio2(x, y);
+ switch (n & 3) {
+ case 0:
+ return __kernel_cos(y[0], y[1]);
+ case 1:
+ return -__kernel_sin(y[0], y[1], 1);
+ case 2:
+ return -__kernel_cos(y[0], y[1]);
+ default:
+ return __kernel_sin(y[0], y[1], 1);
+ }
+ }
+}
+
+/* exp(x)
+ * Returns the exponential of x.
+ *
+ * Method
+ * 1. Argument reduction:
+ * Reduce x to an r so that |r| <= 0.5*ln2 ~ 0.34658.
+ * Given x, find r and integer k such that
+ *
+ * x = k*ln2 + r, |r| <= 0.5*ln2.
+ *
+ * Here r will be represented as r = hi-lo for better
+ * accuracy.
+ *
+ * 2. Approximation of exp(r) by a special rational function on
+ * the interval [0,0.34658]:
+ * Write
+ * R(r**2) = r*(exp(r)+1)/(exp(r)-1) = 2 + r*r/6 - r**4/360 + ...
+ * We use a special Remes algorithm on [0,0.34658] to generate
+ * a polynomial of degree 5 to approximate R. The maximum error
+ * of this polynomial approximation is bounded by 2**-59. In
+ * other words,
+ * R(z) ~ 2.0 + P1*z + P2*z**2 + P3*z**3 + P4*z**4 + P5*z**5
+ * (where z=r*r, and the values of P1 to P5 are listed below)
+ * and
+ * | 5 | -59
+ * | 2.0+P1*z+...+P5*z - R(z) | <= 2
+ * | |
+ * The computation of exp(r) thus becomes
+ * 2*r
+ * exp(r) = 1 + -------
+ * R - r
+ * r*R1(r)
+ * = 1 + r + ----------- (for better accuracy)
+ * 2 - R1(r)
+ * where
+ * 2 4 10
+ * R1(r) = r - (P1*r + P2*r + ... + P5*r ).
+ *
+ * 3. Scale back to obtain exp(x):
+ * From step 1, we have
+ * exp(x) = 2^k * exp(r)
+ *
+ * Special cases:
+ * exp(INF) is INF, exp(NaN) is NaN;
+ * exp(-INF) is 0, and
+ * for finite argument, only exp(0)=1 is exact.
+ *
+ * Accuracy:
+ * according to an error analysis, the error is always less than
+ * 1 ulp (unit in the last place).
+ *
+ * Misc. info.
+ * For IEEE double
+ * if x > 7.09782712893383973096e+02 then exp(x) overflow
+ * if x < -7.45133219101941108420e+02 then exp(x) underflow
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+double exp(double x) {
+ static const double
+ one = 1.0,
+ halF[2] = {0.5, -0.5},
+ o_threshold = 7.09782712893383973096e+02, /* 0x40862E42, 0xFEFA39EF */
+ u_threshold = -7.45133219101941108420e+02, /* 0xc0874910, 0xD52D3051 */
+ ln2HI[2] = {6.93147180369123816490e-01, /* 0x3fe62e42, 0xfee00000 */
+ -6.93147180369123816490e-01}, /* 0xbfe62e42, 0xfee00000 */
+ ln2LO[2] = {1.90821492927058770002e-10, /* 0x3dea39ef, 0x35793c76 */
+ -1.90821492927058770002e-10}, /* 0xbdea39ef, 0x35793c76 */
+ invln2 = 1.44269504088896338700e+00, /* 0x3ff71547, 0x652b82fe */
+ P1 = 1.66666666666666019037e-01, /* 0x3FC55555, 0x5555553E */
+ P2 = -2.77777777770155933842e-03, /* 0xBF66C16C, 0x16BEBD93 */
+ P3 = 6.61375632143793436117e-05, /* 0x3F11566A, 0xAF25DE2C */
+ P4 = -1.65339022054652515390e-06, /* 0xBEBBBD41, 0xC5D26BF1 */
+ P5 = 4.13813679705723846039e-08, /* 0x3E663769, 0x72BEA4D0 */
+ E = 2.718281828459045; /* 0x4005bf0a, 0x8b145769 */
+
+ static volatile double
+ huge = 1.0e+300,
+ twom1000 = 9.33263618503218878990e-302, /* 2**-1000=0x01700000,0*/
+ two1023 = 8.988465674311579539e307; /* 0x1p1023 */
+
+ double y, hi = 0.0, lo = 0.0, c, t, twopk;
+ int32_t k = 0, xsb;
+ uint32_t hx;
+
+ GET_HIGH_WORD(hx, x);
+ xsb = (hx >> 31) & 1; /* sign bit of x */
+ hx &= 0x7fffffff; /* high word of |x| */
+
+ /* filter out non-finite argument */
+ if (hx >= 0x40862E42) { /* if |x|>=709.78... */
+ if (hx >= 0x7ff00000) {
+ uint32_t lx;
+ GET_LOW_WORD(lx, x);
+ if (((hx & 0xfffff) | lx) != 0)
+ return x + x; /* NaN */
+ else
+ return (xsb == 0) ? x : 0.0; /* exp(+-inf)={inf,0} */
+ }
+ if (x > o_threshold) return huge * huge; /* overflow */
+ if (x < u_threshold) return twom1000 * twom1000; /* underflow */
+ }
+
+ /* argument reduction */
+ if (hx > 0x3fd62e42) { /* if |x| > 0.5 ln2 */
+ if (hx < 0x3FF0A2B2) { /* and |x| < 1.5 ln2 */
+ /* TODO(rtoy): We special case exp(1) here to return the correct
+ * value of E, as the computation below would get the last bit
+ * wrong. We should probably fix the algorithm instead.
+ */
+ if (x == 1.0) return E;
+ hi = x - ln2HI[xsb];
+ lo = ln2LO[xsb];
+ k = 1 - xsb - xsb;
+ } else {
+ k = static_cast<int>(invln2 * x + halF[xsb]);
+ t = k;
+ hi = x - t * ln2HI[0]; /* t*ln2HI is exact here */
+ lo = t * ln2LO[0];
+ }
+ STRICT_ASSIGN(double, x, hi - lo);
+ } else if (hx < 0x3e300000) { /* when |x|<2**-28 */
+ if (huge + x > one) return one + x; /* trigger inexact */
+ } else {
+ k = 0;
+ }
+
+ /* x is now in primary range */
+ t = x * x;
+ if (k >= -1021) {
+ INSERT_WORDS(twopk, 0x3ff00000 + (k << 20), 0);
+ } else {
+ INSERT_WORDS(twopk, 0x3ff00000 + ((k + 1000) << 20), 0);
+ }
+ c = x - t * (P1 + t * (P2 + t * (P3 + t * (P4 + t * P5))));
+ if (k == 0) {
+ return one - ((x * c) / (c - 2.0) - x);
+ } else {
+ y = one - ((lo - (x * c) / (2.0 - c)) - hi);
+ }
+ if (k >= -1021) {
+ if (k == 1024) return y * 2.0 * two1023;
+ return y * twopk;
+ } else {
+ return y * twopk * twom1000;
+ }
+}
+
+/*
+ * Method :
+ * 1.Reduced x to positive by atanh(-x) = -atanh(x)
+ * 2.For x>=0.5
+ * 1 2x x
+ * atanh(x) = --- * log(1 + -------) = 0.5 * log1p(2 * --------)
+ * 2 1 - x 1 - x
+ *
+ * For x<0.5
+ * atanh(x) = 0.5*log1p(2x+2x*x/(1-x))
+ *
+ * Special cases:
+ * atanh(x) is NaN if |x| > 1 with signal;
+ * atanh(NaN) is that NaN with no signal;
+ * atanh(+-1) is +-INF with signal.
+ *
+ */
+double atanh(double x) {
+ static const double one = 1.0, huge = 1e300;
+ static const double zero = 0.0;
+
+ double t;
+ int32_t hx, ix;
+ uint32_t lx;
+ EXTRACT_WORDS(hx, lx, x);
+ ix = hx & 0x7fffffff;
+ if ((ix | ((lx | -static_cast<int32_t>(lx)) >> 31)) > 0x3ff00000) /* |x|>1 */
+ return (x - x) / (x - x);
+ if (ix == 0x3ff00000) return x / zero;
+ if (ix < 0x3e300000 && (huge + x) > zero) return x; /* x<2**-28 */
+ SET_HIGH_WORD(x, ix);
+ if (ix < 0x3fe00000) { /* x < 0.5 */
+ t = x + x;
+ t = 0.5 * log1p(t + t * x / (one - x));
+ } else {
+ t = 0.5 * log1p((x + x) / (one - x));
+ }
+ if (hx >= 0)
+ return t;
+ else
+ return -t;
+}
+
+/* log(x)
+ * Return the logrithm of x
+ *
+ * Method :
+ * 1. Argument Reduction: find k and f such that
+ * x = 2^k * (1+f),
+ * where sqrt(2)/2 < 1+f < sqrt(2) .
+ *
+ * 2. Approximation of log(1+f).
+ * Let s = f/(2+f) ; based on log(1+f) = log(1+s) - log(1-s)
+ * = 2s + 2/3 s**3 + 2/5 s**5 + .....,
+ * = 2s + s*R
+ * We use a special Reme algorithm on [0,0.1716] to generate
+ * a polynomial of degree 14 to approximate R The maximum error
+ * of this polynomial approximation is bounded by 2**-58.45. In
+ * other words,
+ * 2 4 6 8 10 12 14
+ * R(z) ~ Lg1*s +Lg2*s +Lg3*s +Lg4*s +Lg5*s +Lg6*s +Lg7*s
+ * (the values of Lg1 to Lg7 are listed in the program)
+ * and
+ * | 2 14 | -58.45
+ * | Lg1*s +...+Lg7*s - R(z) | <= 2
+ * | |
+ * Note that 2s = f - s*f = f - hfsq + s*hfsq, where hfsq = f*f/2.
+ * In order to guarantee error in log below 1ulp, we compute log
+ * by
+ * log(1+f) = f - s*(f - R) (if f is not too large)
+ * log(1+f) = f - (hfsq - s*(hfsq+R)). (better accuracy)
+ *
+ * 3. Finally, log(x) = k*ln2 + log(1+f).
+ * = k*ln2_hi+(f-(hfsq-(s*(hfsq+R)+k*ln2_lo)))
+ * Here ln2 is split into two floating point number:
+ * ln2_hi + ln2_lo,
+ * where n*ln2_hi is always exact for |n| < 2000.
+ *
+ * Special cases:
+ * log(x) is NaN with signal if x < 0 (including -INF) ;
+ * log(+INF) is +INF; log(0) is -INF with signal;
+ * log(NaN) is that NaN with no signal.
+ *
+ * Accuracy:
+ * according to an error analysis, the error is always less than
+ * 1 ulp (unit in the last place).
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+double log(double x) {
+ static const double /* -- */
+ ln2_hi = 6.93147180369123816490e-01, /* 3fe62e42 fee00000 */
+ ln2_lo = 1.90821492927058770002e-10, /* 3dea39ef 35793c76 */
+ two54 = 1.80143985094819840000e+16, /* 43500000 00000000 */
+ Lg1 = 6.666666666666735130e-01, /* 3FE55555 55555593 */
+ Lg2 = 3.999999999940941908e-01, /* 3FD99999 9997FA04 */
+ Lg3 = 2.857142874366239149e-01, /* 3FD24924 94229359 */
+ Lg4 = 2.222219843214978396e-01, /* 3FCC71C5 1D8E78AF */
+ Lg5 = 1.818357216161805012e-01, /* 3FC74664 96CB03DE */
+ Lg6 = 1.531383769920937332e-01, /* 3FC39A09 D078C69F */
+ Lg7 = 1.479819860511658591e-01; /* 3FC2F112 DF3E5244 */
+
+ static const double zero = 0.0;
+ static volatile double vzero = 0.0;
+
+ double hfsq, f, s, z, R, w, t1, t2, dk;
+ int32_t k, hx, i, j;
+ uint32_t lx;
+
+ EXTRACT_WORDS(hx, lx, x);
+
+ k = 0;
+ if (hx < 0x00100000) { /* x < 2**-1022 */
+ if (((hx & 0x7fffffff) | lx) == 0)
+ return -two54 / vzero; /* log(+-0)=-inf */
+ if (hx < 0) return (x - x) / zero; /* log(-#) = NaN */
+ k -= 54;
+ x *= two54; /* subnormal number, scale up x */
+ GET_HIGH_WORD(hx, x);
+ }
+ if (hx >= 0x7ff00000) return x + x;
+ k += (hx >> 20) - 1023;
+ hx &= 0x000fffff;
+ i = (hx + 0x95f64) & 0x100000;
+ SET_HIGH_WORD(x, hx | (i ^ 0x3ff00000)); /* normalize x or x/2 */
+ k += (i >> 20);
+ f = x - 1.0;
+ if ((0x000fffff & (2 + hx)) < 3) { /* -2**-20 <= f < 2**-20 */
+ if (f == zero) {
+ if (k == 0) {
+ return zero;
+ } else {
+ dk = static_cast<double>(k);
+ return dk * ln2_hi + dk * ln2_lo;
+ }
+ }
+ R = f * f * (0.5 - 0.33333333333333333 * f);
+ if (k == 0) {
+ return f - R;
+ } else {
+ dk = static_cast<double>(k);
+ return dk * ln2_hi - ((R - dk * ln2_lo) - f);
+ }
+ }
+ s = f / (2.0 + f);
+ dk = static_cast<double>(k);
+ z = s * s;
+ i = hx - 0x6147a;
+ w = z * z;
+ j = 0x6b851 - hx;
+ t1 = w * (Lg2 + w * (Lg4 + w * Lg6));
+ t2 = z * (Lg1 + w * (Lg3 + w * (Lg5 + w * Lg7)));
+ i |= j;
+ R = t2 + t1;
+ if (i > 0) {
+ hfsq = 0.5 * f * f;
+ if (k == 0)
+ return f - (hfsq - s * (hfsq + R));
+ else
+ return dk * ln2_hi - ((hfsq - (s * (hfsq + R) + dk * ln2_lo)) - f);
+ } else {
+ if (k == 0)
+ return f - s * (f - R);
+ else
+ return dk * ln2_hi - ((s * (f - R) - dk * ln2_lo) - f);
+ }
+}
+
+/* double log1p(double x)
+ *
+ * Method :
+ * 1. Argument Reduction: find k and f such that
+ * 1+x = 2^k * (1+f),
+ * where sqrt(2)/2 < 1+f < sqrt(2) .
+ *
+ * Note. If k=0, then f=x is exact. However, if k!=0, then f
+ * may not be representable exactly. In that case, a correction
+ * term is need. Let u=1+x rounded. Let c = (1+x)-u, then
+ * log(1+x) - log(u) ~ c/u. Thus, we proceed to compute log(u),
+ * and add back the correction term c/u.
+ * (Note: when x > 2**53, one can simply return log(x))
+ *
+ * 2. Approximation of log1p(f).
+ * Let s = f/(2+f) ; based on log(1+f) = log(1+s) - log(1-s)
+ * = 2s + 2/3 s**3 + 2/5 s**5 + .....,
+ * = 2s + s*R
+ * We use a special Reme algorithm on [0,0.1716] to generate
+ * a polynomial of degree 14 to approximate R The maximum error
+ * of this polynomial approximation is bounded by 2**-58.45. In
+ * other words,
+ * 2 4 6 8 10 12 14
+ * R(z) ~ Lp1*s +Lp2*s +Lp3*s +Lp4*s +Lp5*s +Lp6*s +Lp7*s
+ * (the values of Lp1 to Lp7 are listed in the program)
+ * and
+ * | 2 14 | -58.45
+ * | Lp1*s +...+Lp7*s - R(z) | <= 2
+ * | |
+ * Note that 2s = f - s*f = f - hfsq + s*hfsq, where hfsq = f*f/2.
+ * In order to guarantee error in log below 1ulp, we compute log
+ * by
+ * log1p(f) = f - (hfsq - s*(hfsq+R)).
+ *
+ * 3. Finally, log1p(x) = k*ln2 + log1p(f).
+ * = k*ln2_hi+(f-(hfsq-(s*(hfsq+R)+k*ln2_lo)))
+ * Here ln2 is split into two floating point number:
+ * ln2_hi + ln2_lo,
+ * where n*ln2_hi is always exact for |n| < 2000.
+ *
+ * Special cases:
+ * log1p(x) is NaN with signal if x < -1 (including -INF) ;
+ * log1p(+INF) is +INF; log1p(-1) is -INF with signal;
+ * log1p(NaN) is that NaN with no signal.
+ *
+ * Accuracy:
+ * according to an error analysis, the error is always less than
+ * 1 ulp (unit in the last place).
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ *
+ * Note: Assuming log() return accurate answer, the following
+ * algorithm can be used to compute log1p(x) to within a few ULP:
+ *
+ * u = 1+x;
+ * if(u==1.0) return x ; else
+ * return log(u)*(x/(u-1.0));
+ *
+ * See HP-15C Advanced Functions Handbook, p.193.
+ */
+double log1p(double x) {
+ static const double /* -- */
+ ln2_hi = 6.93147180369123816490e-01, /* 3fe62e42 fee00000 */
+ ln2_lo = 1.90821492927058770002e-10, /* 3dea39ef 35793c76 */
+ two54 = 1.80143985094819840000e+16, /* 43500000 00000000 */
+ Lp1 = 6.666666666666735130e-01, /* 3FE55555 55555593 */
+ Lp2 = 3.999999999940941908e-01, /* 3FD99999 9997FA04 */
+ Lp3 = 2.857142874366239149e-01, /* 3FD24924 94229359 */
+ Lp4 = 2.222219843214978396e-01, /* 3FCC71C5 1D8E78AF */
+ Lp5 = 1.818357216161805012e-01, /* 3FC74664 96CB03DE */
+ Lp6 = 1.531383769920937332e-01, /* 3FC39A09 D078C69F */
+ Lp7 = 1.479819860511658591e-01; /* 3FC2F112 DF3E5244 */
+
+ static const double zero = 0.0;
+ static volatile double vzero = 0.0;
+
+ double hfsq, f, c, s, z, R, u;
+ int32_t k, hx, hu, ax;
+
+ GET_HIGH_WORD(hx, x);
+ ax = hx & 0x7fffffff;
+
+ k = 1;
+ if (hx < 0x3FDA827A) { /* 1+x < sqrt(2)+ */
+ if (ax >= 0x3ff00000) { /* x <= -1.0 */
+ if (x == -1.0)
+ return -two54 / vzero; /* log1p(-1)=+inf */
+ else
+ return (x - x) / (x - x); /* log1p(x<-1)=NaN */
+ }
+ if (ax < 0x3e200000) { /* |x| < 2**-29 */
+ if (two54 + x > zero /* raise inexact */
+ && ax < 0x3c900000) /* |x| < 2**-54 */
+ return x;
+ else
+ return x - x * x * 0.5;
+ }
+ if (hx > 0 || hx <= static_cast<int32_t>(0xbfd2bec4)) {
+ k = 0;
+ f = x;
+ hu = 1;
+ } /* sqrt(2)/2- <= 1+x < sqrt(2)+ */
+ }
+ if (hx >= 0x7ff00000) return x + x;
+ if (k != 0) {
+ if (hx < 0x43400000) {
+ STRICT_ASSIGN(double, u, 1.0 + x);
+ GET_HIGH_WORD(hu, u);
+ k = (hu >> 20) - 1023;
+ c = (k > 0) ? 1.0 - (u - x) : x - (u - 1.0); /* correction term */
+ c /= u;
+ } else {
+ u = x;
+ GET_HIGH_WORD(hu, u);
+ k = (hu >> 20) - 1023;
+ c = 0;
+ }
+ hu &= 0x000fffff;
+ /*
+ * The approximation to sqrt(2) used in thresholds is not
+ * critical. However, the ones used above must give less
+ * strict bounds than the one here so that the k==0 case is
+ * never reached from here, since here we have committed to
+ * using the correction term but don't use it if k==0.
+ */
+ if (hu < 0x6a09e) { /* u ~< sqrt(2) */
+ SET_HIGH_WORD(u, hu | 0x3ff00000); /* normalize u */
+ } else {
+ k += 1;
+ SET_HIGH_WORD(u, hu | 0x3fe00000); /* normalize u/2 */
+ hu = (0x00100000 - hu) >> 2;
+ }
+ f = u - 1.0;
+ }
+ hfsq = 0.5 * f * f;
+ if (hu == 0) { /* |f| < 2**-20 */
+ if (f == zero) {
+ if (k == 0) {
+ return zero;
+ } else {
+ c += k * ln2_lo;
+ return k * ln2_hi + c;
+ }
+ }
+ R = hfsq * (1.0 - 0.66666666666666666 * f);
+ if (k == 0)
+ return f - R;
+ else
+ return k * ln2_hi - ((R - (k * ln2_lo + c)) - f);
+ }
+ s = f / (2.0 + f);
+ z = s * s;
+ R = z * (Lp1 +
+ z * (Lp2 + z * (Lp3 + z * (Lp4 + z * (Lp5 + z * (Lp6 + z * Lp7))))));
+ if (k == 0)
+ return f - (hfsq - s * (hfsq + R));
+ else
+ return k * ln2_hi - ((hfsq - (s * (hfsq + R) + (k * ln2_lo + c))) - f);
+}
+
+/*
+ * k_log1p(f):
+ * Return log(1+f) - f for 1+f in ~[sqrt(2)/2, sqrt(2)].
+ *
+ * The following describes the overall strategy for computing
+ * logarithms in base e. The argument reduction and adding the final
+ * term of the polynomial are done by the caller for increased accuracy
+ * when different bases are used.
+ *
+ * Method :
+ * 1. Argument Reduction: find k and f such that
+ * x = 2^k * (1+f),
+ * where sqrt(2)/2 < 1+f < sqrt(2) .
+ *
+ * 2. Approximation of log(1+f).
+ * Let s = f/(2+f) ; based on log(1+f) = log(1+s) - log(1-s)
+ * = 2s + 2/3 s**3 + 2/5 s**5 + .....,
+ * = 2s + s*R
+ * We use a special Reme algorithm on [0,0.1716] to generate
+ * a polynomial of degree 14 to approximate R The maximum error
+ * of this polynomial approximation is bounded by 2**-58.45. In
+ * other words,
+ * 2 4 6 8 10 12 14
+ * R(z) ~ Lg1*s +Lg2*s +Lg3*s +Lg4*s +Lg5*s +Lg6*s +Lg7*s
+ * (the values of Lg1 to Lg7 are listed in the program)
+ * and
+ * | 2 14 | -58.45
+ * | Lg1*s +...+Lg7*s - R(z) | <= 2
+ * | |
+ * Note that 2s = f - s*f = f - hfsq + s*hfsq, where hfsq = f*f/2.
+ * In order to guarantee error in log below 1ulp, we compute log
+ * by
+ * log(1+f) = f - s*(f - R) (if f is not too large)
+ * log(1+f) = f - (hfsq - s*(hfsq+R)). (better accuracy)
+ *
+ * 3. Finally, log(x) = k*ln2 + log(1+f).
+ * = k*ln2_hi+(f-(hfsq-(s*(hfsq+R)+k*ln2_lo)))
+ * Here ln2 is split into two floating point number:
+ * ln2_hi + ln2_lo,
+ * where n*ln2_hi is always exact for |n| < 2000.
+ *
+ * Special cases:
+ * log(x) is NaN with signal if x < 0 (including -INF) ;
+ * log(+INF) is +INF; log(0) is -INF with signal;
+ * log(NaN) is that NaN with no signal.
+ *
+ * Accuracy:
+ * according to an error analysis, the error is always less than
+ * 1 ulp (unit in the last place).
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+
+static const double Lg1 = 6.666666666666735130e-01, /* 3FE55555 55555593 */
+ Lg2 = 3.999999999940941908e-01, /* 3FD99999 9997FA04 */
+ Lg3 = 2.857142874366239149e-01, /* 3FD24924 94229359 */
+ Lg4 = 2.222219843214978396e-01, /* 3FCC71C5 1D8E78AF */
+ Lg5 = 1.818357216161805012e-01, /* 3FC74664 96CB03DE */
+ Lg6 = 1.531383769920937332e-01, /* 3FC39A09 D078C69F */
+ Lg7 = 1.479819860511658591e-01; /* 3FC2F112 DF3E5244 */
+
+/*
+ * We always inline k_log1p(), since doing so produces a
+ * substantial performance improvement (~40% on amd64).
+ */
+static inline double k_log1p(double f) {
+ double hfsq, s, z, R, w, t1, t2;
+
+ s = f / (2.0 + f);
+ z = s * s;
+ w = z * z;
+ t1 = w * (Lg2 + w * (Lg4 + w * Lg6));
+ t2 = z * (Lg1 + w * (Lg3 + w * (Lg5 + w * Lg7)));
+ R = t2 + t1;
+ hfsq = 0.5 * f * f;
+ return s * (hfsq + R);
+}
+
+/*
+ * Return the base 2 logarithm of x. See e_log.c and k_log.h for most
+ * comments.
+ *
+ * This reduces x to {k, 1+f} exactly as in e_log.c, then calls the kernel,
+ * then does the combining and scaling steps
+ * log2(x) = (f - 0.5*f*f + k_log1p(f)) / ln2 + k
+ * in not-quite-routine extra precision.
+ */
+double log2(double x) {
+ static const double
+ two54 = 1.80143985094819840000e+16, /* 0x43500000, 0x00000000 */
+ ivln2hi = 1.44269504072144627571e+00, /* 0x3ff71547, 0x65200000 */
+ ivln2lo = 1.67517131648865118353e-10; /* 0x3de705fc, 0x2eefa200 */
+
+ static const double zero = 0.0;
+ static volatile double vzero = 0.0;
+
+ double f, hfsq, hi, lo, r, val_hi, val_lo, w, y;
+ int32_t i, k, hx;
+ uint32_t lx;
+
+ EXTRACT_WORDS(hx, lx, x);
+
+ k = 0;
+ if (hx < 0x00100000) { /* x < 2**-1022 */
+ if (((hx & 0x7fffffff) | lx) == 0)
+ return -two54 / vzero; /* log(+-0)=-inf */
+ if (hx < 0) return (x - x) / zero; /* log(-#) = NaN */
+ k -= 54;
+ x *= two54; /* subnormal number, scale up x */
+ GET_HIGH_WORD(hx, x);
+ }
+ if (hx >= 0x7ff00000) return x + x;
+ if (hx == 0x3ff00000 && lx == 0) return zero; /* log(1) = +0 */
+ k += (hx >> 20) - 1023;
+ hx &= 0x000fffff;
+ i = (hx + 0x95f64) & 0x100000;
+ SET_HIGH_WORD(x, hx | (i ^ 0x3ff00000)); /* normalize x or x/2 */
+ k += (i >> 20);
+ y = static_cast<double>(k);
+ f = x - 1.0;
+ hfsq = 0.5 * f * f;
+ r = k_log1p(f);
+
+ /*
+ * f-hfsq must (for args near 1) be evaluated in extra precision
+ * to avoid a large cancellation when x is near sqrt(2) or 1/sqrt(2).
+ * This is fairly efficient since f-hfsq only depends on f, so can
+ * be evaluated in parallel with R. Not combining hfsq with R also
+ * keeps R small (though not as small as a true `lo' term would be),
+ * so that extra precision is not needed for terms involving R.
+ *
+ * Compiler bugs involving extra precision used to break Dekker's
+ * theorem for spitting f-hfsq as hi+lo, unless double_t was used
+ * or the multi-precision calculations were avoided when double_t
+ * has extra precision. These problems are now automatically
+ * avoided as a side effect of the optimization of combining the
+ * Dekker splitting step with the clear-low-bits step.
+ *
+ * y must (for args near sqrt(2) and 1/sqrt(2)) be added in extra
+ * precision to avoid a very large cancellation when x is very near
+ * these values. Unlike the above cancellations, this problem is
+ * specific to base 2. It is strange that adding +-1 is so much
+ * harder than adding +-ln2 or +-log10_2.
+ *
+ * This uses Dekker's theorem to normalize y+val_hi, so the
+ * compiler bugs are back in some configurations, sigh. And I
+ * don't want to used double_t to avoid them, since that gives a
+ * pessimization and the support for avoiding the pessimization
+ * is not yet available.
+ *
+ * The multi-precision calculations for the multiplications are
+ * routine.
+ */
+ hi = f - hfsq;
+ SET_LOW_WORD(hi, 0);
+ lo = (f - hi) - hfsq + r;
+ val_hi = hi * ivln2hi;
+ val_lo = (lo + hi) * ivln2lo + lo * ivln2hi;
+
+ /* spadd(val_hi, val_lo, y), except for not using double_t: */
+ w = y + val_hi;
+ val_lo += (y - w) + val_hi;
+ val_hi = w;
+
+ return val_lo + val_hi;
+}
+
+/*
+ * Return the base 10 logarithm of x
+ *
+ * Method :
+ * Let log10_2hi = leading 40 bits of log10(2) and
+ * log10_2lo = log10(2) - log10_2hi,
+ * ivln10 = 1/log(10) rounded.
+ * Then
+ * n = ilogb(x),
+ * if(n<0) n = n+1;
+ * x = scalbn(x,-n);
+ * log10(x) := n*log10_2hi + (n*log10_2lo + ivln10*log(x))
+ *
+ * Note 1:
+ * To guarantee log10(10**n)=n, where 10**n is normal, the rounding
+ * mode must set to Round-to-Nearest.
+ * Note 2:
+ * [1/log(10)] rounded to 53 bits has error .198 ulps;
+ * log10 is monotonic at all binary break points.
+ *
+ * Special cases:
+ * log10(x) is NaN if x < 0;
+ * log10(+INF) is +INF; log10(0) is -INF;
+ * log10(NaN) is that NaN;
+ * log10(10**N) = N for N=0,1,...,22.
+ */
+double log10(double x) {
+ static const double
+ two54 = 1.80143985094819840000e+16, /* 0x43500000, 0x00000000 */
+ ivln10 = 4.34294481903251816668e-01,
+ log10_2hi = 3.01029995663611771306e-01, /* 0x3FD34413, 0x509F6000 */
+ log10_2lo = 3.69423907715893078616e-13; /* 0x3D59FEF3, 0x11F12B36 */
+
+ static const double zero = 0.0;
+ static volatile double vzero = 0.0;
+
+ double y;
+ int32_t i, k, hx;
+ uint32_t lx;
+
+ EXTRACT_WORDS(hx, lx, x);
+
+ k = 0;
+ if (hx < 0x00100000) { /* x < 2**-1022 */
+ if (((hx & 0x7fffffff) | lx) == 0)
+ return -two54 / vzero; /* log(+-0)=-inf */
+ if (hx < 0) return (x - x) / zero; /* log(-#) = NaN */
+ k -= 54;
+ x *= two54; /* subnormal number, scale up x */
+ GET_HIGH_WORD(hx, x);
+ GET_LOW_WORD(lx, x);
+ }
+ if (hx >= 0x7ff00000) return x + x;
+ if (hx == 0x3ff00000 && lx == 0) return zero; /* log(1) = +0 */
+ k += (hx >> 20) - 1023;
+
+ i = (k & 0x80000000) >> 31;
+ hx = (hx & 0x000fffff) | ((0x3ff - i) << 20);
+ y = k + i;
+ SET_HIGH_WORD(x, hx);
+ SET_LOW_WORD(x, lx);
+
+ double z = y * log10_2lo + ivln10 * log(x);
+ return z + y * log10_2hi;
+}
+
+/* expm1(x)
+ * Returns exp(x)-1, the exponential of x minus 1.
+ *
+ * Method
+ * 1. Argument reduction:
+ * Given x, find r and integer k such that
+ *
+ * x = k*ln2 + r, |r| <= 0.5*ln2 ~ 0.34658
+ *
+ * Here a correction term c will be computed to compensate
+ * the error in r when rounded to a floating-point number.
+ *
+ * 2. Approximating expm1(r) by a special rational function on
+ * the interval [0,0.34658]:
+ * Since
+ * r*(exp(r)+1)/(exp(r)-1) = 2+ r^2/6 - r^4/360 + ...
+ * we define R1(r*r) by
+ * r*(exp(r)+1)/(exp(r)-1) = 2+ r^2/6 * R1(r*r)
+ * That is,
+ * R1(r**2) = 6/r *((exp(r)+1)/(exp(r)-1) - 2/r)
+ * = 6/r * ( 1 + 2.0*(1/(exp(r)-1) - 1/r))
+ * = 1 - r^2/60 + r^4/2520 - r^6/100800 + ...
+ * We use a special Reme algorithm on [0,0.347] to generate
+ * a polynomial of degree 5 in r*r to approximate R1. The
+ * maximum error of this polynomial approximation is bounded
+ * by 2**-61. In other words,
+ * R1(z) ~ 1.0 + Q1*z + Q2*z**2 + Q3*z**3 + Q4*z**4 + Q5*z**5
+ * where Q1 = -1.6666666666666567384E-2,
+ * Q2 = 3.9682539681370365873E-4,
+ * Q3 = -9.9206344733435987357E-6,
+ * Q4 = 2.5051361420808517002E-7,
+ * Q5 = -6.2843505682382617102E-9;
+ * z = r*r,
+ * with error bounded by
+ * | 5 | -61
+ * | 1.0+Q1*z+...+Q5*z - R1(z) | <= 2
+ * | |
+ *
+ * expm1(r) = exp(r)-1 is then computed by the following
+ * specific way which minimize the accumulation rounding error:
+ * 2 3
+ * r r [ 3 - (R1 + R1*r/2) ]
+ * expm1(r) = r + --- + --- * [--------------------]
+ * 2 2 [ 6 - r*(3 - R1*r/2) ]
+ *
+ * To compensate the error in the argument reduction, we use
+ * expm1(r+c) = expm1(r) + c + expm1(r)*c
+ * ~ expm1(r) + c + r*c
+ * Thus c+r*c will be added in as the correction terms for
+ * expm1(r+c). Now rearrange the term to avoid optimization
+ * screw up:
+ * ( 2 2 )
+ * ({ ( r [ R1 - (3 - R1*r/2) ] ) } r )
+ * expm1(r+c)~r - ({r*(--- * [--------------------]-c)-c} - --- )
+ * ({ ( 2 [ 6 - r*(3 - R1*r/2) ] ) } 2 )
+ * ( )
+ *
+ * = r - E
+ * 3. Scale back to obtain expm1(x):
+ * From step 1, we have
+ * expm1(x) = either 2^k*[expm1(r)+1] - 1
+ * = or 2^k*[expm1(r) + (1-2^-k)]
+ * 4. Implementation notes:
+ * (A). To save one multiplication, we scale the coefficient Qi
+ * to Qi*2^i, and replace z by (x^2)/2.
+ * (B). To achieve maximum accuracy, we compute expm1(x) by
+ * (i) if x < -56*ln2, return -1.0, (raise inexact if x!=inf)
+ * (ii) if k=0, return r-E
+ * (iii) if k=-1, return 0.5*(r-E)-0.5
+ * (iv) if k=1 if r < -0.25, return 2*((r+0.5)- E)
+ * else return 1.0+2.0*(r-E);
+ * (v) if (k<-2||k>56) return 2^k(1-(E-r)) - 1 (or exp(x)-1)
+ * (vi) if k <= 20, return 2^k((1-2^-k)-(E-r)), else
+ * (vii) return 2^k(1-((E+2^-k)-r))
+ *
+ * Special cases:
+ * expm1(INF) is INF, expm1(NaN) is NaN;
+ * expm1(-INF) is -1, and
+ * for finite argument, only expm1(0)=0 is exact.
+ *
+ * Accuracy:
+ * according to an error analysis, the error is always less than
+ * 1 ulp (unit in the last place).
+ *
+ * Misc. info.
+ * For IEEE double
+ * if x > 7.09782712893383973096e+02 then expm1(x) overflow
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+double expm1(double x) {
+ static const double
+ one = 1.0,
+ tiny = 1.0e-300,
+ o_threshold = 7.09782712893383973096e+02, /* 0x40862E42, 0xFEFA39EF */
+ ln2_hi = 6.93147180369123816490e-01, /* 0x3fe62e42, 0xfee00000 */
+ ln2_lo = 1.90821492927058770002e-10, /* 0x3dea39ef, 0x35793c76 */
+ invln2 = 1.44269504088896338700e+00, /* 0x3ff71547, 0x652b82fe */
+ /* Scaled Q's: Qn_here = 2**n * Qn_above, for R(2*z) where z = hxs =
+ x*x/2: */
+ Q1 = -3.33333333333331316428e-02, /* BFA11111 111110F4 */
+ Q2 = 1.58730158725481460165e-03, /* 3F5A01A0 19FE5585 */
+ Q3 = -7.93650757867487942473e-05, /* BF14CE19 9EAADBB7 */
+ Q4 = 4.00821782732936239552e-06, /* 3ED0CFCA 86E65239 */
+ Q5 = -2.01099218183624371326e-07; /* BE8AFDB7 6E09C32D */
+
+ static volatile double huge = 1.0e+300;
+
+ double y, hi, lo, c, t, e, hxs, hfx, r1, twopk;
+ int32_t k, xsb;
+ uint32_t hx;
+
+ GET_HIGH_WORD(hx, x);
+ xsb = hx & 0x80000000; /* sign bit of x */
+ hx &= 0x7fffffff; /* high word of |x| */
+
+ /* filter out huge and non-finite argument */
+ if (hx >= 0x4043687A) { /* if |x|>=56*ln2 */
+ if (hx >= 0x40862E42) { /* if |x|>=709.78... */
+ if (hx >= 0x7ff00000) {
+ uint32_t low;
+ GET_LOW_WORD(low, x);
+ if (((hx & 0xfffff) | low) != 0)
+ return x + x; /* NaN */
+ else
+ return (xsb == 0) ? x : -1.0; /* exp(+-inf)={inf,-1} */
+ }
+ if (x > o_threshold) return huge * huge; /* overflow */
+ }
+ if (xsb != 0) { /* x < -56*ln2, return -1.0 with inexact */
+ if (x + tiny < 0.0) /* raise inexact */
+ return tiny - one; /* return -1 */
+ }
+ }
+
+ /* argument reduction */
+ if (hx > 0x3fd62e42) { /* if |x| > 0.5 ln2 */
+ if (hx < 0x3FF0A2B2) { /* and |x| < 1.5 ln2 */
+ if (xsb == 0) {
+ hi = x - ln2_hi;
+ lo = ln2_lo;
+ k = 1;
+ } else {
+ hi = x + ln2_hi;
+ lo = -ln2_lo;
+ k = -1;
+ }
+ } else {
+ k = invln2 * x + ((xsb == 0) ? 0.5 : -0.5);
+ t = k;
+ hi = x - t * ln2_hi; /* t*ln2_hi is exact here */
+ lo = t * ln2_lo;
+ }
+ STRICT_ASSIGN(double, x, hi - lo);
+ c = (hi - x) - lo;
+ } else if (hx < 0x3c900000) { /* when |x|<2**-54, return x */
+ t = huge + x; /* return x with inexact flags when x!=0 */
+ return x - (t - (huge + x));
+ } else {
+ k = 0;
+ }
+
+ /* x is now in primary range */
+ hfx = 0.5 * x;
+ hxs = x * hfx;
+ r1 = one + hxs * (Q1 + hxs * (Q2 + hxs * (Q3 + hxs * (Q4 + hxs * Q5))));
+ t = 3.0 - r1 * hfx;
+ e = hxs * ((r1 - t) / (6.0 - x * t));
+ if (k == 0) {
+ return x - (x * e - hxs); /* c is 0 */
+ } else {
+ INSERT_WORDS(twopk, 0x3ff00000 + (k << 20), 0); /* 2^k */
+ e = (x * (e - c) - c);
+ e -= hxs;
+ if (k == -1) return 0.5 * (x - e) - 0.5;
+ if (k == 1) {
+ if (x < -0.25)
+ return -2.0 * (e - (x + 0.5));
+ else
+ return one + 2.0 * (x - e);
+ }
+ if (k <= -2 || k > 56) { /* suffice to return exp(x)-1 */
+ y = one - (e - x);
+ // TODO(mvstanton): is this replacement for the hex float
+ // sufficient?
+ // if (k == 1024) y = y*2.0*0x1p1023;
+ if (k == 1024)
+ y = y * 2.0 * 8.98846567431158e+307;
+ else
+ y = y * twopk;
+ return y - one;
+ }
+ t = one;
+ if (k < 20) {
+ SET_HIGH_WORD(t, 0x3ff00000 - (0x200000 >> k)); /* t=1-2^-k */
+ y = t - (e - x);
+ y = y * twopk;
+ } else {
+ SET_HIGH_WORD(t, ((0x3ff - k) << 20)); /* 2^-k */
+ y = x - (e + t);
+ y += one;
+ y = y * twopk;
+ }
+ }
+ return y;
+}
+
+double cbrt(double x) {
+ static const uint32_t
+ B1 = 715094163, /* B1 = (1023-1023/3-0.03306235651)*2**20 */
+ B2 = 696219795; /* B2 = (1023-1023/3-54/3-0.03306235651)*2**20 */
+
+ /* |1/cbrt(x) - p(x)| < 2**-23.5 (~[-7.93e-8, 7.929e-8]). */
+ static const double P0 = 1.87595182427177009643, /* 0x3ffe03e6, 0x0f61e692 */
+ P1 = -1.88497979543377169875, /* 0xbffe28e0, 0x92f02420 */
+ P2 = 1.621429720105354466140, /* 0x3ff9f160, 0x4a49d6c2 */
+ P3 = -0.758397934778766047437, /* 0xbfe844cb, 0xbee751d9 */
+ P4 = 0.145996192886612446982; /* 0x3fc2b000, 0xd4e4edd7 */
+
+ int32_t hx;
+ union {
+ double value;
+ uint64_t bits;
+ } u;
+ double r, s, t = 0.0, w;
+ uint32_t sign;
+ uint32_t high, low;
+
+ EXTRACT_WORDS(hx, low, x);
+ sign = hx & 0x80000000; /* sign= sign(x) */
+ hx ^= sign;
+ if (hx >= 0x7ff00000) return (x + x); /* cbrt(NaN,INF) is itself */
+
+ /*
+ * Rough cbrt to 5 bits:
+ * cbrt(2**e*(1+m) ~= 2**(e/3)*(1+(e%3+m)/3)
+ * where e is integral and >= 0, m is real and in [0, 1), and "/" and
+ * "%" are integer division and modulus with rounding towards minus
+ * infinity. The RHS is always >= the LHS and has a maximum relative
+ * error of about 1 in 16. Adding a bias of -0.03306235651 to the
+ * (e%3+m)/3 term reduces the error to about 1 in 32. With the IEEE
+ * floating point representation, for finite positive normal values,
+ * ordinary integer divison of the value in bits magically gives
+ * almost exactly the RHS of the above provided we first subtract the
+ * exponent bias (1023 for doubles) and later add it back. We do the
+ * subtraction virtually to keep e >= 0 so that ordinary integer
+ * division rounds towards minus infinity; this is also efficient.
+ */
+ if (hx < 0x00100000) { /* zero or subnormal? */
+ if ((hx | low) == 0) return (x); /* cbrt(0) is itself */
+ SET_HIGH_WORD(t, 0x43500000); /* set t= 2**54 */
+ t *= x;
+ GET_HIGH_WORD(high, t);
+ INSERT_WORDS(t, sign | ((high & 0x7fffffff) / 3 + B2), 0);
+ } else {
+ INSERT_WORDS(t, sign | (hx / 3 + B1), 0);
+ }
+
+ /*
+ * New cbrt to 23 bits:
+ * cbrt(x) = t*cbrt(x/t**3) ~= t*P(t**3/x)
+ * where P(r) is a polynomial of degree 4 that approximates 1/cbrt(r)
+ * to within 2**-23.5 when |r - 1| < 1/10. The rough approximation
+ * has produced t such than |t/cbrt(x) - 1| ~< 1/32, and cubing this
+ * gives us bounds for r = t**3/x.
+ *
+ * Try to optimize for parallel evaluation as in k_tanf.c.
+ */
+ r = (t * t) * (t / x);
+ t = t * ((P0 + r * (P1 + r * P2)) + ((r * r) * r) * (P3 + r * P4));
+
+ /*
+ * Round t away from zero to 23 bits (sloppily except for ensuring that
+ * the result is larger in magnitude than cbrt(x) but not much more than
+ * 2 23-bit ulps larger). With rounding towards zero, the error bound
+ * would be ~5/6 instead of ~4/6. With a maximum error of 2 23-bit ulps
+ * in the rounded t, the infinite-precision error in the Newton
+ * approximation barely affects third digit in the final error
+ * 0.667; the error in the rounded t can be up to about 3 23-bit ulps
+ * before the final error is larger than 0.667 ulps.
+ */
+ u.value = t;
+ u.bits = (u.bits + 0x80000000) & 0xffffffffc0000000ULL;
+ t = u.value;
+
+ /* one step Newton iteration to 53 bits with error < 0.667 ulps */
+ s = t * t; /* t*t is exact */
+ r = x / s; /* error <= 0.5 ulps; |r| < |t| */
+ w = t + t; /* t+t is exact */
+ r = (r - t) / (w + r); /* r-t is exact; w+r ~= 3*t */
+ t = t + t * r; /* error <= 0.5 + 0.5/3 + epsilon */
+
+ return (t);
+}
+
+/* sin(x)
+ * Return sine function of x.
+ *
+ * kernel function:
+ * __kernel_sin ... sine function on [-pi/4,pi/4]
+ * __kernel_cos ... cose function on [-pi/4,pi/4]
+ * __ieee754_rem_pio2 ... argument reduction routine
+ *
+ * Method.
+ * Let S,C and T denote the sin, cos and tan respectively on
+ * [-PI/4, +PI/4]. Reduce the argument x to y1+y2 = x-k*pi/2
+ * in [-pi/4 , +pi/4], and let n = k mod 4.
+ * We have
+ *
+ * n sin(x) cos(x) tan(x)
+ * ----------------------------------------------------------
+ * 0 S C T
+ * 1 C -S -1/T
+ * 2 -S -C T
+ * 3 -C S -1/T
+ * ----------------------------------------------------------
+ *
+ * Special cases:
+ * Let trig be any of sin, cos, or tan.
+ * trig(+-INF) is NaN, with signals;
+ * trig(NaN) is that NaN;
+ *
+ * Accuracy:
+ * TRIG(x) returns trig(x) nearly rounded
+ */
+double sin(double x) {
+ double y[2], z = 0.0;
+ int32_t n, ix;
+
+ /* High word of x. */
+ GET_HIGH_WORD(ix, x);
+
+ /* |x| ~< pi/4 */
+ ix &= 0x7fffffff;
+ if (ix <= 0x3fe921fb) {
+ return __kernel_sin(x, z, 0);
+ } else if (ix >= 0x7ff00000) {
+ /* sin(Inf or NaN) is NaN */
+ return x - x;
+ } else {
+ /* argument reduction needed */
+ n = __ieee754_rem_pio2(x, y);
+ switch (n & 3) {
+ case 0:
+ return __kernel_sin(y[0], y[1], 1);
+ case 1:
+ return __kernel_cos(y[0], y[1]);
+ case 2:
+ return -__kernel_sin(y[0], y[1], 1);
+ default:
+ return -__kernel_cos(y[0], y[1]);
+ }
+ }
+}
+
+/* tan(x)
+ * Return tangent function of x.
+ *
+ * kernel function:
+ * __kernel_tan ... tangent function on [-pi/4,pi/4]
+ * __ieee754_rem_pio2 ... argument reduction routine
+ *
+ * Method.
+ * Let S,C and T denote the sin, cos and tan respectively on
+ * [-PI/4, +PI/4]. Reduce the argument x to y1+y2 = x-k*pi/2
+ * in [-pi/4 , +pi/4], and let n = k mod 4.
+ * We have
+ *
+ * n sin(x) cos(x) tan(x)
+ * ----------------------------------------------------------
+ * 0 S C T
+ * 1 C -S -1/T
+ * 2 -S -C T
+ * 3 -C S -1/T
+ * ----------------------------------------------------------
+ *
+ * Special cases:
+ * Let trig be any of sin, cos, or tan.
+ * trig(+-INF) is NaN, with signals;
+ * trig(NaN) is that NaN;
+ *
+ * Accuracy:
+ * TRIG(x) returns trig(x) nearly rounded
+ */
+double tan(double x) {
+ double y[2], z = 0.0;
+ int32_t n, ix;
+
+ /* High word of x. */
+ GET_HIGH_WORD(ix, x);
+
+ /* |x| ~< pi/4 */
+ ix &= 0x7fffffff;
+ if (ix <= 0x3fe921fb) {
+ return __kernel_tan(x, z, 1);
+ } else if (ix >= 0x7ff00000) {
+ /* tan(Inf or NaN) is NaN */
+ return x - x; /* NaN */
+ } else {
+ /* argument reduction needed */
+ n = __ieee754_rem_pio2(x, y);
+ /* 1 -> n even, -1 -> n odd */
+ return __kernel_tan(y[0], y[1], 1 - ((n & 1) << 1));
+ }
+}
+
+/*
+ * ES6 draft 09-27-13, section 20.2.2.12.
+ * Math.cosh
+ * Method :
+ * mathematically cosh(x) if defined to be (exp(x)+exp(-x))/2
+ * 1. Replace x by |x| (cosh(x) = cosh(-x)).
+ * 2.
+ * [ exp(x) - 1 ]^2
+ * 0 <= x <= ln2/2 : cosh(x) := 1 + -------------------
+ * 2*exp(x)
+ *
+ * exp(x) + 1/exp(x)
+ * ln2/2 <= x <= 22 : cosh(x) := -------------------
+ * 2
+ * 22 <= x <= lnovft : cosh(x) := exp(x)/2
+ * lnovft <= x <= ln2ovft: cosh(x) := exp(x/2)/2 * exp(x/2)
+ * ln2ovft < x : cosh(x) := huge*huge (overflow)
+ *
+ * Special cases:
+ * cosh(x) is |x| if x is +INF, -INF, or NaN.
+ * only cosh(0)=1 is exact for finite x.
+ */
+double cosh(double x) {
+ static const double KCOSH_OVERFLOW = 710.4758600739439;
+ static const double one = 1.0, half = 0.5;
+ static volatile double huge = 1.0e+300;
+
+ int32_t ix;
+
+ /* High word of |x|. */
+ GET_HIGH_WORD(ix, x);
+ ix &= 0x7fffffff;
+
+ // |x| in [0,0.5*log2], return 1+expm1(|x|)^2/(2*exp(|x|))
+ if (ix < 0x3fd62e43) {
+ double t = expm1(fabs(x));
+ double w = one + t;
+ // For |x| < 2^-55, cosh(x) = 1
+ if (ix < 0x3c800000) return w;
+ return one + (t * t) / (w + w);
+ }
+
+ // |x| in [0.5*log2, 22], return (exp(|x|)+1/exp(|x|)/2
+ if (ix < 0x40360000) {
+ double t = exp(fabs(x));
+ return half * t + half / t;
+ }
+
+ // |x| in [22, log(maxdouble)], return half*exp(|x|)
+ if (ix < 0x40862e42) return half * exp(fabs(x));
+
+ // |x| in [log(maxdouble), overflowthreshold]
+ if (fabs(x) <= KCOSH_OVERFLOW) {
+ double w = exp(half * fabs(x));
+ double t = half * w;
+ return t * w;
+ }
+
+ /* x is INF or NaN */
+ if (ix >= 0x7ff00000) return x * x;
+
+ // |x| > overflowthreshold.
+ return huge * huge;
+}
+
+/*
+ * ES6 draft 09-27-13, section 20.2.2.30.
+ * Math.sinh
+ * Method :
+ * mathematically sinh(x) if defined to be (exp(x)-exp(-x))/2
+ * 1. Replace x by |x| (sinh(-x) = -sinh(x)).
+ * 2.
+ * E + E/(E+1)
+ * 0 <= x <= 22 : sinh(x) := --------------, E=expm1(x)
+ * 2
+ *
+ * 22 <= x <= lnovft : sinh(x) := exp(x)/2
+ * lnovft <= x <= ln2ovft: sinh(x) := exp(x/2)/2 * exp(x/2)
+ * ln2ovft < x : sinh(x) := x*shuge (overflow)
+ *
+ * Special cases:
+ * sinh(x) is |x| if x is +Infinity, -Infinity, or NaN.
+ * only sinh(0)=0 is exact for finite x.
+ */
+double sinh(double x) {
+ static const double KSINH_OVERFLOW = 710.4758600739439,
+ TWO_M28 =
+ 3.725290298461914e-9, // 2^-28, empty lower half
+ LOG_MAXD = 709.7822265625; // 0x40862e42 00000000, empty lower half
+ static const double shuge = 1.0e307;
+
+ double h = (x < 0) ? -0.5 : 0.5;
+ // |x| in [0, 22]. return sign(x)*0.5*(E+E/(E+1))
+ double ax = fabs(x);
+ if (ax < 22) {
+ // For |x| < 2^-28, sinh(x) = x
+ if (ax < TWO_M28) return x;
+ double t = expm1(ax);
+ if (ax < 1) {
+ return h * (2 * t - t * t / (t + 1));
+ }
+ return h * (t + t / (t + 1));
+ }
+ // |x| in [22, log(maxdouble)], return 0.5 * exp(|x|)
+ if (ax < LOG_MAXD) return h * exp(ax);
+ // |x| in [log(maxdouble), overflowthreshold]
+ // overflowthreshold = 710.4758600739426
+ if (ax <= KSINH_OVERFLOW) {
+ double w = exp(0.5 * ax);
+ double t = h * w;
+ return t * w;
+ }
+ // |x| > overflowthreshold or is NaN.
+ // Return Infinity of the appropriate sign or NaN.
+ return x * shuge;
+}
+
+/* Tanh(x)
+ * Return the Hyperbolic Tangent of x
+ *
+ * Method :
+ * x -x
+ * e - e
+ * 0. tanh(x) is defined to be -----------
+ * x -x
+ * e + e
+ * 1. reduce x to non-negative by tanh(-x) = -tanh(x).
+ * 2. 0 <= x < 2**-28 : tanh(x) := x with inexact if x != 0
+ * -t
+ * 2**-28 <= x < 1 : tanh(x) := -----; t = expm1(-2x)
+ * t + 2
+ * 2
+ * 1 <= x < 22 : tanh(x) := 1 - -----; t = expm1(2x)
+ * t + 2
+ * 22 <= x <= INF : tanh(x) := 1.
+ *
+ * Special cases:
+ * tanh(NaN) is NaN;
+ * only tanh(0)=0 is exact for finite argument.
+ */
+double tanh(double x) {
+ static const volatile double tiny = 1.0e-300;
+ static const double one = 1.0, two = 2.0, huge = 1.0e300;
+ double t, z;
+ int32_t jx, ix;
+
+ GET_HIGH_WORD(jx, x);
+ ix = jx & 0x7fffffff;
+
+ /* x is INF or NaN */
+ if (ix >= 0x7ff00000) {
+ if (jx >= 0)
+ return one / x + one; /* tanh(+-inf)=+-1 */
+ else
+ return one / x - one; /* tanh(NaN) = NaN */
+ }
+
+ /* |x| < 22 */
+ if (ix < 0x40360000) { /* |x|<22 */
+ if (ix < 0x3e300000) { /* |x|<2**-28 */
+ if (huge + x > one) return x; /* tanh(tiny) = tiny with inexact */
+ }
+ if (ix >= 0x3ff00000) { /* |x|>=1 */
+ t = expm1(two * fabs(x));
+ z = one - two / (t + two);
+ } else {
+ t = expm1(-two * fabs(x));
+ z = -t / (t + two);
+ }
+ /* |x| >= 22, return +-1 */
+ } else {
+ z = one - tiny; /* raise inexact flag */
+ }
+ return (jx >= 0) ? z : -z;
+}
+
+} // namespace ieee754
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/src/base/ieee754.h b/deps/v8/src/base/ieee754.h
new file mode 100644
index 0000000000..80523a1414
--- /dev/null
+++ b/deps/v8/src/base/ieee754.h
@@ -0,0 +1,80 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_IEEE754_H_
+#define V8_BASE_IEEE754_H_
+
+namespace v8 {
+namespace base {
+namespace ieee754 {
+
+// Returns the arc cosine of |x|; that is the value whose cosine is |x|.
+double acos(double x);
+
+// Returns the inverse hyperbolic cosine of |x|; that is the value whose
+// hyperbolic cosine is |x|.
+double acosh(double x);
+
+// Returns the arc sine of |x|; that is the value whose sine is |x|.
+double asin(double x);
+
+// Returns the inverse hyperbolic sine of |x|; that is the value whose
+// hyperbolic sine is |x|.
+double asinh(double x);
+
+// Returns the principal value of the arc tangent of |x|; that is the value
+// whose tangent is |x|.
+double atan(double x);
+
+// Returns the principal value of the arc tangent of |y/x|, using the signs of
+// the two arguments to determine the quadrant of the result.
+double atan2(double y, double x);
+
+// Returns the cosine of |x|, where |x| is given in radians.
+double cos(double x);
+
+// Returns the base-e exponential of |x|.
+double exp(double x);
+
+double atanh(double x);
+
+// Returns the natural logarithm of |x|.
+double log(double x);
+
+// Returns a value equivalent to |log(1+x)|, but computed in a way that is
+// accurate even if the value of |x| is near zero.
+double log1p(double x);
+
+// Returns the base 2 logarithm of |x|.
+double log2(double x);
+
+// Returns the base 10 logarithm of |x|.
+double log10(double x);
+
+// Returns the cube root of |x|.
+double cbrt(double x);
+
+// Returns exp(x)-1, the exponential of |x| minus 1.
+double expm1(double x);
+
+// Returns the sine of |x|, where |x| is given in radians.
+double sin(double x);
+
+// Returns the tangent of |x|, where |x| is given in radians.
+double tan(double x);
+
+// Returns the hyperbolic cosine of |x|, where |x| is given radians.
+double cosh(double x);
+
+// Returns the hyperbolic sine of |x|, where |x| is given radians.
+double sinh(double x);
+
+// Returns the hyperbolic tangent of |x|, where |x| is given radians.
+double tanh(double x);
+
+} // namespace ieee754
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_IEEE754_H_
diff --git a/deps/v8/src/base/logging.cc b/deps/v8/src/base/logging.cc
index ebab129f93..cadcb6f1de 100644
--- a/deps/v8/src/base/logging.cc
+++ b/deps/v8/src/base/logging.cc
@@ -4,17 +4,10 @@
#include "src/base/logging.h"
-#if V8_LIBC_GLIBC || V8_OS_BSD
-#include <cxxabi.h>
-#include <dlfcn.h>
-#include <execinfo.h>
-#elif V8_OS_QNX
-#include <backtrace.h>
-#endif // V8_LIBC_GLIBC || V8_OS_BSD
-
#include <cstdio>
#include <cstdlib>
+#include "src/base/debug/stack_trace.h"
#include "src/base/platform/platform.h"
namespace v8 {
@@ -49,53 +42,6 @@ DEFINE_CHECK_OP_IMPL(GE)
DEFINE_CHECK_OP_IMPL(GT)
#undef DEFINE_CHECK_OP_IMPL
-
-// Attempts to dump a backtrace (if supported).
-void DumpBacktrace() {
-#if V8_LIBC_GLIBC || V8_OS_BSD
- void* trace[100];
- int size = backtrace(trace, arraysize(trace));
- OS::PrintError("\n==== C stack trace ===============================\n\n");
- if (size == 0) {
- OS::PrintError("(empty)\n");
- } else {
- for (int i = 1; i < size; ++i) {
- OS::PrintError("%2d: ", i);
- Dl_info info;
- char* demangled = NULL;
- if (!dladdr(trace[i], &info) || !info.dli_sname) {
- OS::PrintError("%p\n", trace[i]);
- } else if ((demangled = abi::__cxa_demangle(info.dli_sname, 0, 0, 0))) {
- OS::PrintError("%s\n", demangled);
- free(demangled);
- } else {
- OS::PrintError("%s\n", info.dli_sname);
- }
- }
- }
-#elif V8_OS_QNX
- char out[1024];
- bt_accessor_t acc;
- bt_memmap_t memmap;
- bt_init_accessor(&acc, BT_SELF);
- bt_load_memmap(&acc, &memmap);
- bt_sprn_memmap(&memmap, out, sizeof(out));
- OS::PrintError(out);
- bt_addr_t trace[100];
- int size = bt_get_backtrace(&acc, trace, arraysize(trace));
- OS::PrintError("\n==== C stack trace ===============================\n\n");
- if (size == 0) {
- OS::PrintError("(empty)\n");
- } else {
- bt_sprnf_addrs(&memmap, trace, size, const_cast<char*>("%a\n"),
- out, sizeof(out), NULL);
- OS::PrintError(out);
- }
- bt_unload_memmap(&memmap);
- bt_release_accessor(&acc);
-#endif // V8_LIBC_GLIBC || V8_OS_BSD
-}
-
} // namespace base
} // namespace v8
@@ -111,18 +57,12 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
v8::base::OS::VPrintError(format, arguments);
va_end(arguments);
v8::base::OS::PrintError("\n#\n");
- v8::base::DumpBacktrace();
- fflush(stderr);
- v8::base::OS::Abort();
-}
-extern "C" void V8_RuntimeError(const char* file, int line,
- const char* message) {
- fflush(stdout);
- fflush(stderr);
- v8::base::OS::PrintError("\n\n#\n# Runtime error in %s, line %d\n# ", file,
- line);
- v8::base::OS::PrintError("\n# %s\n", message);
- v8::base::DumpBacktrace();
+ v8::base::debug::StackTrace trace;
+ trace.Print();
+
fflush(stderr);
+ // Avoid dumping stack trace on abort signal.
+ v8::base::debug::DisableSignalStackDump();
+ v8::base::OS::Abort();
}
diff --git a/deps/v8/src/base/logging.h b/deps/v8/src/base/logging.h
index 15322f6126..50fceca88b 100644
--- a/deps/v8/src/base/logging.h
+++ b/deps/v8/src/base/logging.h
@@ -10,12 +10,10 @@
#include <string>
#include "src/base/build_config.h"
+#include "src/base/compiler-specific.h"
-extern "C" V8_NORETURN void V8_Fatal(const char* file, int line,
- const char* format, ...);
-
-extern "C" void V8_RuntimeError(const char* file, int line,
- const char* message);
+extern "C" PRINTF_FORMAT(3, 4) V8_NORETURN
+ void V8_Fatal(const char* file, int line, const char* format, ...);
// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
// development, but they should not be relied on in the final product.
diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h
index 3f09b2b9ce..822c88704a 100644
--- a/deps/v8/src/base/macros.h
+++ b/deps/v8/src/base/macros.h
@@ -5,13 +5,8 @@
#ifndef V8_BASE_MACROS_H_
#define V8_BASE_MACROS_H_
-#include <stddef.h>
-#include <stdint.h>
-
-#include <cstring>
-
-#include "src/base/build_config.h"
#include "src/base/compiler-specific.h"
+#include "src/base/format-macros.h"
#include "src/base/logging.h"
@@ -22,55 +17,6 @@
(reinterpret_cast<intptr_t>(&(reinterpret_cast<type*>(16)->field)) - 16)
-#if V8_OS_NACL
-
-// ARRAYSIZE_UNSAFE performs essentially the same calculation as arraysize,
-// but can be used on anonymous types or types defined inside
-// functions. It's less safe than arraysize as it accepts some
-// (although not all) pointers. Therefore, you should use arraysize
-// whenever possible.
-//
-// The expression ARRAYSIZE_UNSAFE(a) is a compile-time constant of type
-// size_t.
-//
-// ARRAYSIZE_UNSAFE catches a few type errors. If you see a compiler error
-//
-// "warning: division by zero in ..."
-//
-// when using ARRAYSIZE_UNSAFE, you are (wrongfully) giving it a pointer.
-// You should only use ARRAYSIZE_UNSAFE on statically allocated arrays.
-//
-// The following comments are on the implementation details, and can
-// be ignored by the users.
-//
-// ARRAYSIZE_UNSAFE(arr) works by inspecting sizeof(arr) (the # of bytes in
-// the array) and sizeof(*(arr)) (the # of bytes in one array
-// element). If the former is divisible by the latter, perhaps arr is
-// indeed an array, in which case the division result is the # of
-// elements in the array. Otherwise, arr cannot possibly be an array,
-// and we generate a compiler error to prevent the code from
-// compiling.
-//
-// Since the size of bool is implementation-defined, we need to cast
-// !(sizeof(a) & sizeof(*(a))) to size_t in order to ensure the final
-// result has type size_t.
-//
-// This macro is not perfect as it wrongfully accepts certain
-// pointers, namely where the pointer size is divisible by the pointee
-// size. Since all our code has to go through a 32-bit compiler,
-// where a pointer is 4 bytes, this means all pointers to a type whose
-// size is 3 or greater than 4 will be (righteously) rejected.
-#define ARRAYSIZE_UNSAFE(a) \
- ((sizeof(a) / sizeof(*(a))) / \
- static_cast<size_t>(!(sizeof(a) % sizeof(*(a))))) // NOLINT
-
-// TODO(bmeurer): For some reason, the NaCl toolchain cannot handle the correct
-// definition of arraysize() below, so we have to use the unsafe version for
-// now.
-#define arraysize ARRAYSIZE_UNSAFE
-
-#else // V8_OS_NACL
-
// The arraysize(arr) macro returns the # of elements in an array arr.
// The expression is a compile-time constant, and therefore can be
// used in defining new arrays, for example. If you use arraysize on
@@ -99,8 +45,6 @@ template <typename T, size_t N>
char (&ArraySizeHelper(const T (&array)[N]))[N];
#endif
-#endif // V8_OS_NACL
-
// bit_cast<Dest,Source> is a template function that implements the
// equivalent of "*reinterpret_cast<Dest*>(&source)". We need this in
@@ -210,6 +154,17 @@ V8_INLINE Dest bit_cast(Source const& source) {
#define DISABLE_ASAN
#endif
+// DISABLE_CFI_PERF -- Disable Control Flow Integrity checks for Perf reasons.
+#if !defined(DISABLE_CFI_PERF)
+#if defined(__clang__) && defined(__has_attribute)
+#if __has_attribute(no_sanitize)
+#define DISABLE_CFI_PERF __attribute__((no_sanitize("cfi")))
+#endif
+#endif
+#endif
+#if !defined(DISABLE_CFI_PERF)
+#define DISABLE_CFI_PERF
+#endif
#if V8_CC_GNU
#define V8_IMMEDIATE_CRASH() __builtin_trap()
@@ -274,23 +229,27 @@ inline void USE(T) { }
#define V8PRIdPTR V8_PTR_PREFIX "d"
#define V8PRIuPTR V8_PTR_PREFIX "u"
+// ptrdiff_t is 't' according to the standard, but MSVC uses 'I'.
+#if V8_CC_MSVC
+#define V8PRIxPTRDIFF "Ix"
+#define V8PRIdPTRDIFF "Id"
+#define V8PRIuPTRDIFF "Iu"
+#else
+#define V8PRIxPTRDIFF "tx"
+#define V8PRIdPTRDIFF "td"
+#define V8PRIuPTRDIFF "tu"
+#endif
+
// Fix for Mac OS X defining uintptr_t as "unsigned long":
#if V8_OS_MACOSX
#undef V8PRIxPTR
#define V8PRIxPTR "lx"
+#undef V8PRIdPTR
+#define V8PRIdPTR "ld"
#undef V8PRIuPTR
#define V8PRIuPTR "lxu"
#endif
-// GCC on S390 31-bit expands 'size_t' to 'long unsigned int'
-// instead of 'int', resulting in compilation errors with %d.
-// The printf format specifier needs to be %zd instead.
-#if V8_HOST_ARCH_S390 && !V8_HOST_ARCH_64_BIT
-#define V8_SIZET_PREFIX "z"
-#else
-#define V8_SIZET_PREFIX ""
-#endif
-
// The following macro works on both 32 and 64-bit platforms.
// Usage: instead of writing 0x1234567890123456
// write V8_2PART_UINT64_C(0x12345678,90123456);
diff --git a/deps/v8/src/base/platform/condition-variable.cc b/deps/v8/src/base/platform/condition-variable.cc
index fcd6cf7974..19c33f8b1f 100644
--- a/deps/v8/src/base/platform/condition-variable.cc
+++ b/deps/v8/src/base/platform/condition-variable.cc
@@ -36,6 +36,19 @@ ConditionVariable::ConditionVariable() {
ConditionVariable::~ConditionVariable() {
+#if defined(V8_OS_MACOSX)
+ // This hack is necessary to avoid a fatal pthreads subsystem bug in the
+ // Darwin kernel. http://crbug.com/517681.
+ {
+ Mutex lock;
+ LockGuard<Mutex> l(&lock);
+ struct timespec ts;
+ ts.tv_sec = 0;
+ ts.tv_nsec = 1;
+ pthread_cond_timedwait_relative_np(&native_handle_, &lock.native_handle(),
+ &ts);
+ }
+#endif
int result = pthread_cond_destroy(&native_handle_);
DCHECK_EQ(0, result);
USE(result);
diff --git a/deps/v8/src/base/platform/platform-aix.cc b/deps/v8/src/base/platform/platform-aix.cc
index ea2824d8c3..6d6adfb2cb 100644
--- a/deps/v8/src/base/platform/platform-aix.cc
+++ b/deps/v8/src/base/platform/platform-aix.cc
@@ -46,7 +46,8 @@ static inline void* mmapHelper(size_t len, int prot, int flags, int fildes,
const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time / msPerSecond));
- struct tm* t = localtime(&tv); // NOLINT(runtime/threadsafe_fn)
+ struct tm tm;
+ struct tm* t = localtime_r(&tv, &tm);
if (NULL == t) return "";
return tzname[0]; // The location of the timezone string on AIX.
}
@@ -56,7 +57,8 @@ double OS::LocalTimeOffset(TimezoneCache* cache) {
// On AIX, struct tm does not contain a tm_gmtoff field.
time_t utc = time(NULL);
DCHECK(utc != -1);
- struct tm* loc = localtime(&utc); // NOLINT(runtime/threadsafe_fn)
+ struct tm tm;
+ struct tm* loc = localtime_r(&utc, &tm);
DCHECK(loc != NULL);
return static_cast<double>((mktime(loc) - utc) * msPerSecond);
}
@@ -215,13 +217,8 @@ void* VirtualMemory::ReserveRegion(size_t size) {
bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
-#if defined(__native_client__)
- // The Native Client port of V8 uses an interpreter,
- // so code pages don't need PROT_EXEC.
- int prot = PROT_READ | PROT_WRITE;
-#else
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-#endif
+
if (mprotect(base, size, prot) == -1) return false;
return true;
@@ -232,6 +229,11 @@ bool VirtualMemory::UncommitRegion(void* base, size_t size) {
return mprotect(base, size, PROT_NONE) != -1;
}
+bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
+ void* free_start, size_t free_size) {
+ return munmap(free_start, free_size) == 0;
+}
+
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
return munmap(base, size) == 0;
diff --git a/deps/v8/src/base/platform/platform-cygwin.cc b/deps/v8/src/base/platform/platform-cygwin.cc
index a49e28723d..ba0f45667f 100644
--- a/deps/v8/src/base/platform/platform-cygwin.cc
+++ b/deps/v8/src/base/platform/platform-cygwin.cc
@@ -29,7 +29,8 @@ namespace base {
const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
- struct tm* t = localtime(&tv); // NOLINT(runtime/threadsafe_fn)
+ struct tm tm;
+ struct tm* t = localtime_r(&tv, &tm);
if (NULL == t) return "";
return tzname[0]; // The location of the timezone string on Cygwin.
}
@@ -39,7 +40,8 @@ double OS::LocalTimeOffset(TimezoneCache* cache) {
// On Cygwin, struct tm does not contain a tm_gmtoff field.
time_t utc = time(NULL);
DCHECK(utc != -1);
- struct tm* loc = localtime(&utc); // NOLINT(runtime/threadsafe_fn)
+ struct tm tm;
+ struct tm* loc = localtime_r(&utc, &tm);
DCHECK(loc != NULL);
// time - localtime includes any daylight savings offset, so subtract it.
return static_cast<double>((mktime(loc) - utc) * msPerSecond -
diff --git a/deps/v8/src/base/platform/platform-freebsd.cc b/deps/v8/src/base/platform/platform-freebsd.cc
index 8b3398039f..efb1872f35 100644
--- a/deps/v8/src/base/platform/platform-freebsd.cc
+++ b/deps/v8/src/base/platform/platform-freebsd.cc
@@ -39,7 +39,8 @@ namespace base {
const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
- struct tm* t = localtime(&tv); // NOLINT(runtime/threadsafe_fn)
+ struct tm tm;
+ struct tm* t = localtime_r(&tv, &tm);
if (NULL == t) return "";
return t->tm_zone;
}
@@ -47,7 +48,8 @@ const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
double OS::LocalTimeOffset(TimezoneCache* cache) {
time_t tv = time(NULL);
- struct tm* t = localtime(&tv); // NOLINT(runtime/threadsafe_fn)
+ struct tm tm;
+ struct tm* t = localtime_r(&tv, &tm);
// tm_gmtoff includes any daylight savings offset, so subtract it.
return static_cast<double>(t->tm_gmtoff * msPerSecond -
(t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
@@ -244,6 +246,10 @@ bool VirtualMemory::UncommitRegion(void* base, size_t size) {
kMmapFdOffset) != MAP_FAILED;
}
+bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
+ void* free_start, size_t free_size) {
+ return munmap(free_start, free_size) == 0;
+}
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
return munmap(base, size) == 0;
diff --git a/deps/v8/src/base/platform/platform-linux.cc b/deps/v8/src/base/platform/platform-linux.cc
index 1323a0dd91..a35d423210 100644
--- a/deps/v8/src/base/platform/platform-linux.cc
+++ b/deps/v8/src/base/platform/platform-linux.cc
@@ -10,7 +10,9 @@
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
+#include <sys/prctl.h>
#include <sys/resource.h>
+#include <sys/syscall.h>
#include <sys/time.h>
// Ubuntu Dapper requires memory pages to be marked as
@@ -44,16 +46,6 @@
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
-#if V8_OS_NACL
-#if !defined(MAP_NORESERVE)
-// PNaCL doesn't have this, so we always grab all of the memory, which is bad.
-#define MAP_NORESERVE 0
-#endif
-#else
-#include <sys/prctl.h>
-#include <sys/syscall.h>
-#endif
-
namespace v8 {
namespace base {
@@ -102,30 +94,22 @@ bool OS::ArmUsingHardFloat() {
const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
-#if V8_OS_NACL
- // Missing support for tm_zone field.
- return "";
-#else
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
- struct tm* t = localtime(&tv); // NOLINT(runtime/threadsafe_fn)
+ struct tm tm;
+ struct tm* t = localtime_r(&tv, &tm);
if (!t || !t->tm_zone) return "";
return t->tm_zone;
-#endif
}
double OS::LocalTimeOffset(TimezoneCache* cache) {
-#if V8_OS_NACL
- // Missing support for tm_zone field.
- return 0;
-#else
time_t tv = time(NULL);
- struct tm* t = localtime(&tv); // NOLINT(runtime/threadsafe_fn)
+ struct tm tm;
+ struct tm* t = localtime_r(&tv, &tm);
// tm_gmtoff includes any daylight savings offset, so subtract it.
return static_cast<double>(t->tm_gmtoff * msPerSecond -
(t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-#endif
}
@@ -220,13 +204,7 @@ void OS::SignalCodeMovingGC() {
OS::Abort();
}
void* addr = mmap(OS::GetRandomMmapAddr(), size,
-#if V8_OS_NACL
- // The Native Client port of V8 uses an interpreter,
- // so code pages don't need PROT_EXEC.
- PROT_READ,
-#else
PROT_READ | PROT_EXEC,
-#endif
MAP_PRIVATE, fileno(f), 0);
DCHECK_NE(MAP_FAILED, addr);
OS::Free(addr, size);
@@ -346,13 +324,7 @@ void* VirtualMemory::ReserveRegion(size_t size) {
bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
-#if V8_OS_NACL
- // The Native Client port of V8 uses an interpreter,
- // so code pages don't need PROT_EXEC.
- int prot = PROT_READ | PROT_WRITE;
-#else
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-#endif
if (MAP_FAILED == mmap(base,
size,
prot,
@@ -375,6 +347,14 @@ bool VirtualMemory::UncommitRegion(void* base, size_t size) {
kMmapFdOffset) != MAP_FAILED;
}
+bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
+ void* free_start, size_t free_size) {
+#if defined(LEAK_SANITIZER)
+ __lsan_unregister_root_region(base, size);
+ __lsan_register_root_region(base, size - free_size);
+#endif
+ return munmap(free_start, free_size) == 0;
+}
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
#if defined(LEAK_SANITIZER)
diff --git a/deps/v8/src/base/platform/platform-macos.cc b/deps/v8/src/base/platform/platform-macos.cc
index 419281f669..b75bc47e31 100644
--- a/deps/v8/src/base/platform/platform-macos.cc
+++ b/deps/v8/src/base/platform/platform-macos.cc
@@ -86,10 +86,10 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
#endif
if (code_ptr == NULL) continue;
- const uintptr_t slide = _dyld_get_image_vmaddr_slide(i);
+ const intptr_t slide = _dyld_get_image_vmaddr_slide(i);
const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
- result.push_back(
- SharedLibraryAddress(_dyld_get_image_name(i), start, start + size));
+ result.push_back(SharedLibraryAddress(_dyld_get_image_name(i), start,
+ start + size, slide));
}
return result;
}
@@ -102,7 +102,8 @@ void OS::SignalCodeMovingGC() {
const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
- struct tm* t = localtime(&tv); // NOLINT(runtime/threadsafe_fn)
+ struct tm tm;
+ struct tm* t = localtime_r(&tv, &tm);
if (NULL == t) return "";
return t->tm_zone;
}
@@ -110,7 +111,8 @@ const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
double OS::LocalTimeOffset(TimezoneCache* cache) {
time_t tv = time(NULL);
- struct tm* t = localtime(&tv); // NOLINT(runtime/threadsafe_fn)
+ struct tm tm;
+ struct tm* t = localtime_r(&tv, &tm);
// tm_gmtoff includes any daylight savings offset, so subtract it.
return static_cast<double>(t->tm_gmtoff * msPerSecond -
(t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
@@ -239,6 +241,10 @@ bool VirtualMemory::UncommitRegion(void* address, size_t size) {
kMmapFdOffset) != MAP_FAILED;
}
+bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
+ void* free_start, size_t free_size) {
+ return munmap(free_start, free_size) == 0;
+}
bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
return munmap(address, size) == 0;
diff --git a/deps/v8/src/base/platform/platform-openbsd.cc b/deps/v8/src/base/platform/platform-openbsd.cc
index af145e2fca..29c518a898 100644
--- a/deps/v8/src/base/platform/platform-openbsd.cc
+++ b/deps/v8/src/base/platform/platform-openbsd.cc
@@ -37,7 +37,8 @@ namespace base {
const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
- struct tm* t = localtime(&tv); // NOLINT(runtime/threadsafe_fn)
+ struct tm tm;
+ struct tm* t = localtime_r(&tv, &tm);
if (NULL == t) return "";
return t->tm_zone;
}
@@ -45,7 +46,8 @@ const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
double OS::LocalTimeOffset(TimezoneCache* cache) {
time_t tv = time(NULL);
- struct tm* t = localtime(&tv); // NOLINT(runtime/threadsafe_fn)
+ struct tm tm;
+ struct tm* t = localtime_r(&tv, &tm);
// tm_gmtoff includes any daylight savings offset, so subtract it.
return static_cast<double>(t->tm_gmtoff * msPerSecond -
(t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
@@ -275,6 +277,10 @@ bool VirtualMemory::UncommitRegion(void* base, size_t size) {
kMmapFdOffset) != MAP_FAILED;
}
+bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
+ void* free_start, size_t free_size) {
+ return munmap(free_start, free_size) == 0;
+}
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
return munmap(base, size) == 0;
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index bb340ab5f9..3f4165de53 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -55,7 +55,7 @@
#include <sys/prctl.h> // NOLINT, for prctl
#endif
-#if !defined(V8_OS_NACL) && !defined(_AIX)
+#ifndef _AIX
#include <sys/syscall.h>
#endif
@@ -113,10 +113,6 @@ void OS::ProtectCode(void* address, const size_t size) {
#if V8_OS_CYGWIN
DWORD old_protect;
VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
-#elif V8_OS_NACL
- // The Native Client port of V8 uses an interpreter, so
- // code pages don't need PROT_EXEC.
- mprotect(address, size, PROT_READ);
#else
mprotect(address, size, PROT_READ | PROT_EXEC);
#endif
@@ -154,12 +150,6 @@ const char* OS::GetGCFakeMMapFile() {
void* OS::GetRandomMmapAddr() {
-#if V8_OS_NACL
- // TODO(bradchen): restore randomization once Native Client gets
- // smarter about using mmap address hints.
- // See http://code.google.com/p/nativeclient/issues/3341
- return NULL;
-#endif
#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
defined(THREAD_SANITIZER)
// Dynamic tools do not support custom mmap addresses.
@@ -256,11 +246,7 @@ void OS::DebugBreak() {
#elif V8_HOST_ARCH_PPC
asm("twge 2,2");
#elif V8_HOST_ARCH_IA32
-#if V8_OS_NACL
- asm("hlt");
-#else
asm("int $3");
-#endif // V8_OS_NACL
#elif V8_HOST_ARCH_X64
asm("int $3");
#elif V8_HOST_ARCH_S390
@@ -358,17 +344,12 @@ int OS::GetCurrentThreadId() {
//
int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
-#if V8_OS_NACL
- // Optionally used in Logger::ResourceEvent.
- return -1;
-#else
struct rusage usage;
if (getrusage(RUSAGE_SELF, &usage) < 0) return -1;
*secs = static_cast<uint32_t>(usage.ru_utime.tv_sec);
*usecs = static_cast<uint32_t>(usage.ru_utime.tv_usec);
return 0;
-#endif
}
@@ -398,7 +379,8 @@ void OS::ClearTimezoneCache(TimezoneCache* cache) {
double OS::DaylightSavingsOffset(double time, TimezoneCache*) {
if (std::isnan(time)) return std::numeric_limits<double>::quiet_NaN();
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
- struct tm* t = localtime(&tv); // NOLINT(runtime/threadsafe_fn)
+ struct tm tm;
+ struct tm* t = localtime_r(&tv, &tm);
if (NULL == t) return std::numeric_limits<double>::quiet_NaN();
return t->tm_isdst > 0 ? 3600 * msPerSecond : 0;
}
@@ -614,8 +596,6 @@ void Thread::Start() {
memset(&attr, 0, sizeof(attr));
result = pthread_attr_init(&attr);
DCHECK_EQ(0, result);
- // Native client uses default stack size.
-#if !V8_OS_NACL
size_t stack_size = stack_size_;
#if V8_OS_AIX
if (stack_size == 0) {
@@ -627,7 +607,6 @@ void Thread::Start() {
result = pthread_attr_setstacksize(&attr, stack_size);
DCHECK_EQ(0, result);
}
-#endif
{
LockGuard<Mutex> lock_guard(&data_->thread_creation_mutex_);
result = pthread_create(&data_->thread_, &attr, ThreadEntry, this);
diff --git a/deps/v8/src/base/platform/platform-qnx.cc b/deps/v8/src/base/platform/platform-qnx.cc
index 3c90467627..6ff953d65c 100644
--- a/deps/v8/src/base/platform/platform-qnx.cc
+++ b/deps/v8/src/base/platform/platform-qnx.cc
@@ -88,7 +88,8 @@ bool OS::ArmUsingHardFloat() {
const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
- struct tm* t = localtime(&tv); // NOLINT(runtime/threadsafe_fn)
+ struct tm tm;
+ struct tm* t = localtime_r(&tv, &tm);
if (NULL == t) return "";
return t->tm_zone;
}
@@ -96,7 +97,8 @@ const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
double OS::LocalTimeOffset(TimezoneCache* cache) {
time_t tv = time(NULL);
- struct tm* t = localtime(&tv); // NOLINT(runtime/threadsafe_fn)
+ struct tm tm;
+ struct tm* t = localtime_r(&tv, &tm);
// tm_gmtoff includes any daylight savings offset, so subtract it.
return static_cast<double>(t->tm_gmtoff * msPerSecond -
(t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
diff --git a/deps/v8/src/base/platform/platform-solaris.cc b/deps/v8/src/base/platform/platform-solaris.cc
index a2ce2c13f6..6783c6ba89 100644
--- a/deps/v8/src/base/platform/platform-solaris.cc
+++ b/deps/v8/src/base/platform/platform-solaris.cc
@@ -38,7 +38,8 @@ namespace base {
const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
- struct tm* t = localtime(&tv); // NOLINT(runtime/threadsafe_fn)
+ struct tm tm;
+ struct tm* t = localtime_r(&tv, &tm);
if (NULL == t) return "";
return tzname[0]; // The location of the timezone string on Solaris.
}
@@ -197,6 +198,10 @@ bool VirtualMemory::UncommitRegion(void* base, size_t size) {
kMmapFdOffset) != MAP_FAILED;
}
+bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
+ void* free_start, size_t free_size) {
+ return munmap(free_start, free_size) == 0;
+}
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
return munmap(base, size) == 0;
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index 0076a35ce5..080e6bc0af 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -46,9 +46,8 @@ inline void MemoryBarrier() {
int localtime_s(tm* out_tm, const time_t* time) {
- tm* posix_local_time_struct = localtime(time); // NOLINT
+ tm* posix_local_time_struct = localtime_r(time, out_tm);
if (posix_local_time_struct == NULL) return 1;
- *out_tm = *posix_local_time_struct;
return 0;
}
@@ -1290,6 +1289,10 @@ bool VirtualMemory::UncommitRegion(void* base, size_t size) {
return VirtualFree(base, size, MEM_DECOMMIT) != 0;
}
+bool VirtualMemory::ReleasePartialRegion(void* base, size_t size,
+ void* free_start, size_t free_size) {
+ return VirtualFree(free_start, free_size, MEM_DECOMMIT) != 0;
+}
bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
return VirtualFree(base, 0, MEM_RELEASE) != 0;
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index 5b2dbc9a0b..d3b6c9c1cf 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -26,6 +26,7 @@
#include <vector>
#include "src/base/build_config.h"
+#include "src/base/compiler-specific.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/semaphore.h"
@@ -154,18 +155,19 @@ class OS {
// Print output to console. This is mostly used for debugging output.
// On platforms that has standard terminal output, the output
// should go to stdout.
- static void Print(const char* format, ...);
- static void VPrint(const char* format, va_list args);
+ static PRINTF_FORMAT(1, 2) void Print(const char* format, ...);
+ static PRINTF_FORMAT(1, 0) void VPrint(const char* format, va_list args);
// Print output to a file. This is mostly used for debugging output.
- static void FPrint(FILE* out, const char* format, ...);
- static void VFPrint(FILE* out, const char* format, va_list args);
+ static PRINTF_FORMAT(2, 3) void FPrint(FILE* out, const char* format, ...);
+ static PRINTF_FORMAT(2, 0) void VFPrint(FILE* out, const char* format,
+ va_list args);
// Print error output to console. This is mostly used for error message
// output. On platforms that has standard terminal output, the output
// should go to stderr.
- static void PrintError(const char* format, ...);
- static void VPrintError(const char* format, va_list args);
+ static PRINTF_FORMAT(1, 2) void PrintError(const char* format, ...);
+ static PRINTF_FORMAT(1, 0) void VPrintError(const char* format, va_list args);
// Allocate/Free memory used by JS heap. Pages are readable/writable, but
// they are not guaranteed to be executable unless 'executable' is true.
@@ -222,11 +224,10 @@ class OS {
// Safe formatting print. Ensures that str is always null-terminated.
// Returns the number of chars written, or -1 if output was truncated.
- static int SNPrintF(char* str, int length, const char* format, ...);
- static int VSNPrintF(char* str,
- int length,
- const char* format,
- va_list args);
+ static PRINTF_FORMAT(3, 4) int SNPrintF(char* str, int length,
+ const char* format, ...);
+ static PRINTF_FORMAT(3, 0) int VSNPrintF(char* str, int length,
+ const char* format, va_list args);
static char* StrChr(char* str, int c);
static void StrNCpy(char* dest, int length, const char* src, size_t n);
@@ -234,13 +235,20 @@ class OS {
// Support for the profiler. Can do nothing, in which case ticks
// occuring in shared libraries will not be properly accounted for.
struct SharedLibraryAddress {
- SharedLibraryAddress(
- const std::string& library_path, uintptr_t start, uintptr_t end)
- : library_path(library_path), start(start), end(end) {}
+ SharedLibraryAddress(const std::string& library_path, uintptr_t start,
+ uintptr_t end)
+ : library_path(library_path), start(start), end(end), aslr_slide(0) {}
+ SharedLibraryAddress(const std::string& library_path, uintptr_t start,
+ uintptr_t end, intptr_t aslr_slide)
+ : library_path(library_path),
+ start(start),
+ end(end),
+ aslr_slide(aslr_slide) {}
std::string library_path;
uintptr_t start;
uintptr_t end;
+ intptr_t aslr_slide;
};
static std::vector<SharedLibraryAddress> GetSharedLibraryAddresses();
@@ -329,6 +337,23 @@ class VirtualMemory {
// Creates a single guard page at the given address.
bool Guard(void* address);
+ // Releases the memory after |free_start|.
+ void ReleasePartial(void* free_start) {
+ DCHECK(IsReserved());
+ // Notice: Order is important here. The VirtualMemory object might live
+ // inside the allocated region.
+ size_t size = size_ - (reinterpret_cast<size_t>(free_start) -
+ reinterpret_cast<size_t>(address_));
+ CHECK(InVM(free_start, size));
+ DCHECK_LT(address_, free_start);
+ DCHECK_LT(free_start, reinterpret_cast<void*>(
+ reinterpret_cast<size_t>(address_) + size_));
+ bool result = ReleasePartialRegion(address_, size_, free_start, size);
+ USE(result);
+ DCHECK(result);
+ size_ -= size;
+ }
+
void Release() {
DCHECK(IsReserved());
// Notice: Order is important here. The VirtualMemory object might live
@@ -361,6 +386,12 @@ class VirtualMemory {
// and the same size it was reserved with.
static bool ReleaseRegion(void* base, size_t size);
+ // Must be called with a base pointer that has been returned by ReserveRegion
+ // and the same size it was reserved with.
+ // [free_start, free_start + free_size] is the memory that will be released.
+ static bool ReleasePartialRegion(void* base, size_t size, void* free_start,
+ size_t free_size);
+
// Returns true if OS performs lazy commits, i.e. the memory allocation call
// defers actual physical memory allocation till the first memory access.
// Otherwise returns false.
diff --git a/deps/v8/src/base/platform/semaphore.cc b/deps/v8/src/base/platform/semaphore.cc
index 284474e937..346705fd02 100644
--- a/deps/v8/src/base/platform/semaphore.cc
+++ b/deps/v8/src/base/platform/semaphore.cc
@@ -34,7 +34,6 @@ Semaphore::~Semaphore() {
USE(result);
}
-
void Semaphore::Signal() {
kern_return_t result = semaphore_signal(native_handle_);
DCHECK_EQ(KERN_SUCCESS, result);
@@ -75,10 +74,6 @@ bool Semaphore::WaitFor(const TimeDelta& rel_time) {
Semaphore::Semaphore(int count) {
DCHECK(count >= 0);
-#if V8_LIBC_GLIBC
- // sem_init in glibc prior to 2.1 does not zero out semaphores.
- memset(&native_handle_, 0, sizeof(native_handle_));
-#endif
int result = sem_init(&native_handle_, 0, count);
DCHECK_EQ(0, result);
USE(result);
@@ -91,9 +86,11 @@ Semaphore::~Semaphore() {
USE(result);
}
-
void Semaphore::Signal() {
int result = sem_post(&native_handle_);
+ // This check may fail with <libc-2.21, which we use on the try bots, if the
+ // semaphore is destroyed while sem_post is still executed. A work around is
+ // to extend the lifetime of the semaphore.
CHECK_EQ(0, result);
}
@@ -110,17 +107,6 @@ void Semaphore::Wait() {
bool Semaphore::WaitFor(const TimeDelta& rel_time) {
-#if V8_OS_NACL
- // PNaCL doesn't support sem_timedwait, do ugly busy waiting.
- ElapsedTimer timer;
- timer.Start();
- do {
- int result = sem_trywait(&native_handle_);
- if (result == 0) return true;
- DCHECK(errno == EAGAIN || errno == EINTR);
- } while (!timer.HasExpired(rel_time));
- return false;
-#else
// Compute the time for end of timeout.
const Time time = Time::NowFromSystemTime() + rel_time;
const struct timespec ts = time.ToTimespec();
@@ -144,7 +130,6 @@ bool Semaphore::WaitFor(const TimeDelta& rel_time) {
DCHECK_EQ(-1, result);
DCHECK_EQ(EINTR, errno);
}
-#endif
}
#elif V8_OS_WIN
@@ -162,7 +147,6 @@ Semaphore::~Semaphore() {
USE(result);
}
-
void Semaphore::Signal() {
LONG dummy;
BOOL result = ReleaseSemaphore(native_handle_, 1, &dummy);
diff --git a/deps/v8/src/base/platform/semaphore.h b/deps/v8/src/base/platform/semaphore.h
index 18700d1ba0..39029c83fc 100644
--- a/deps/v8/src/base/platform/semaphore.h
+++ b/deps/v8/src/base/platform/semaphore.h
@@ -39,14 +39,13 @@ class Semaphore final {
// Increments the semaphore counter.
void Signal();
- // Suspends the calling thread until the semaphore counter is non zero
- // and then decrements the semaphore counter.
+ // Decrements the semaphore counter if it is positive, or blocks until it
+ // becomes positive and then decrements the counter.
void Wait();
- // Suspends the calling thread until the counter is non zero or the timeout
- // time has passed. If timeout happens the return value is false and the
- // counter is unchanged. Otherwise the semaphore counter is decremented and
- // true is returned.
+ // Like Wait() but returns after rel_time time has passed. If the timeout
+ // happens the return value is false and the counter is unchanged. Otherwise
+ // the semaphore counter is decremented and true is returned.
bool WaitFor(const TimeDelta& rel_time) WARN_UNUSED_RESULT;
#if V8_OS_MACOSX
diff --git a/deps/v8/src/base/platform/time.cc b/deps/v8/src/base/platform/time.cc
index 6d5e538970..76a820955f 100644
--- a/deps/v8/src/base/platform/time.cc
+++ b/deps/v8/src/base/platform/time.cc
@@ -10,7 +10,9 @@
#include <unistd.h>
#endif
#if V8_OS_MACOSX
+#include <mach/mach.h>
#include <mach/mach_time.h>
+#include <pthread.h>
#endif
#include <cstring>
@@ -25,6 +27,90 @@
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
+namespace {
+
+#if V8_OS_MACOSX
+int64_t ComputeThreadTicks() {
+ mach_msg_type_number_t thread_info_count = THREAD_BASIC_INFO_COUNT;
+ thread_basic_info_data_t thread_info_data;
+ kern_return_t kr = thread_info(
+ pthread_mach_thread_np(pthread_self()),
+ THREAD_BASIC_INFO,
+ reinterpret_cast<thread_info_t>(&thread_info_data),
+ &thread_info_count);
+ CHECK(kr == KERN_SUCCESS);
+
+ v8::base::CheckedNumeric<int64_t> absolute_micros(
+ thread_info_data.user_time.seconds +
+ thread_info_data.system_time.seconds);
+ absolute_micros *= v8::base::Time::kMicrosecondsPerSecond;
+ absolute_micros += (thread_info_data.user_time.microseconds +
+ thread_info_data.system_time.microseconds);
+ return absolute_micros.ValueOrDie();
+}
+#elif V8_OS_POSIX
+// Helper function to get results from clock_gettime() and convert to a
+// microsecond timebase. Minimum requirement is MONOTONIC_CLOCK to be supported
+// on the system. FreeBSD 6 has CLOCK_MONOTONIC but defines
+// _POSIX_MONOTONIC_CLOCK to -1.
+V8_INLINE int64_t ClockNow(clockid_t clk_id) {
+#if (defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0) || \
+ defined(V8_OS_BSD) || defined(V8_OS_ANDROID)
+// On AIX clock_gettime for CLOCK_THREAD_CPUTIME_ID outputs time with
+// resolution of 10ms. thread_cputime API provides the time in ns
+#if defined(V8_OS_AIX)
+ thread_cputime_t tc;
+ if (clk_id == CLOCK_THREAD_CPUTIME_ID) {
+ if (thread_cputime(-1, &tc) != 0) {
+ UNREACHABLE();
+ return 0;
+ }
+ }
+#endif
+ struct timespec ts;
+ if (clock_gettime(clk_id, &ts) != 0) {
+ UNREACHABLE();
+ return 0;
+ }
+ v8::base::internal::CheckedNumeric<int64_t> result(ts.tv_sec);
+ result *= v8::base::Time::kMicrosecondsPerSecond;
+#if defined(V8_OS_AIX)
+ if (clk_id == CLOCK_THREAD_CPUTIME_ID) {
+ result += (tc.stime / v8::base::Time::kNanosecondsPerMicrosecond);
+ } else {
+ result += (ts.tv_nsec / v8::base::Time::kNanosecondsPerMicrosecond);
+ }
+#else
+ result += (ts.tv_nsec / v8::base::Time::kNanosecondsPerMicrosecond);
+#endif
+ return result.ValueOrDie();
+#else // Monotonic clock not supported.
+ return 0;
+#endif
+}
+#elif V8_OS_WIN
+V8_INLINE bool IsQPCReliable() {
+ v8::base::CPU cpu;
+ // On Athlon X2 CPUs (e.g. model 15) QueryPerformanceCounter is unreliable.
+ return strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15;
+}
+
+// Returns the current value of the performance counter.
+V8_INLINE uint64_t QPCNowRaw() {
+ LARGE_INTEGER perf_counter_now = {};
+ // According to the MSDN documentation for QueryPerformanceCounter(), this
+ // will never fail on systems that run XP or later.
+ // https://msdn.microsoft.com/library/windows/desktop/ms644904.aspx
+ BOOL result = ::QueryPerformanceCounter(&perf_counter_now);
+ DCHECK(result);
+ USE(result);
+ return perf_counter_now.QuadPart;
+}
+#endif // V8_OS_MACOSX
+
+
+} // namespace
+
namespace v8 {
namespace base {
@@ -409,15 +495,12 @@ class HighResolutionTickClock final : public TickClock {
virtual ~HighResolutionTickClock() {}
int64_t Now() override {
- LARGE_INTEGER now;
- BOOL result = QueryPerformanceCounter(&now);
- DCHECK(result);
- USE(result);
+ uint64_t now = QPCNowRaw();
// Intentionally calculate microseconds in a round about manner to avoid
// overflow and precision issues. Think twice before simplifying!
- int64_t whole_seconds = now.QuadPart / ticks_per_second_;
- int64_t leftover_ticks = now.QuadPart % ticks_per_second_;
+ int64_t whole_seconds = now / ticks_per_second_;
+ int64_t leftover_ticks = now % ticks_per_second_;
int64_t ticks = (whole_seconds * Time::kMicrosecondsPerSecond) +
((leftover_ticks * Time::kMicrosecondsPerSecond) / ticks_per_second_);
@@ -482,10 +565,8 @@ struct CreateHighResTickClockTrait {
return tick_clock.Pointer();
}
- // On Athlon X2 CPUs (e.g. model 15) the QueryPerformanceCounter
- // is unreliable, fallback to the low-resolution tick clock.
- CPU cpu;
- if (strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15) {
+ // If QPC not reliable, fallback to low-resolution tick clock.
+ if (IsQPCReliable()) {
return tick_clock.Pointer();
}
@@ -541,12 +622,7 @@ TimeTicks TimeTicks::HighResolutionNow() {
#elif V8_OS_SOLARIS
ticks = (gethrtime() / Time::kNanosecondsPerMicrosecond);
#elif V8_OS_POSIX
- struct timespec ts;
- int result = clock_gettime(CLOCK_MONOTONIC, &ts);
- DCHECK_EQ(0, result);
- USE(result);
- ticks = (ts.tv_sec * Time::kMicrosecondsPerSecond +
- ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
+ ticks = ClockNow(CLOCK_MONOTONIC);
#endif // V8_OS_MACOSX
// Make sure we never return 0 here.
return TimeTicks(ticks + 1);
@@ -560,5 +636,125 @@ bool TimeTicks::IsHighResolutionClockWorking() {
#endif // V8_OS_WIN
+
+bool ThreadTicks::IsSupported() {
+#if (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
+ defined(V8_OS_MACOSX) || defined(V8_OS_ANDROID)
+ return true;
+#elif defined(V8_OS_WIN)
+ return IsSupportedWin();
+#else
+ return false;
+#endif
+}
+
+
+ThreadTicks ThreadTicks::Now() {
+#if V8_OS_MACOSX
+ return ThreadTicks(ComputeThreadTicks());
+#elif(defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
+ defined(V8_OS_ANDROID)
+ return ThreadTicks(ClockNow(CLOCK_THREAD_CPUTIME_ID));
+#elif V8_OS_WIN
+ return ThreadTicks::GetForThread(::GetCurrentThread());
+#else
+ UNREACHABLE();
+ return ThreadTicks();
+#endif
+}
+
+
+#if V8_OS_WIN
+ThreadTicks ThreadTicks::GetForThread(const HANDLE& thread_handle) {
+ DCHECK(IsSupported());
+
+ // Get the number of TSC ticks used by the current thread.
+ ULONG64 thread_cycle_time = 0;
+ ::QueryThreadCycleTime(thread_handle, &thread_cycle_time);
+
+ // Get the frequency of the TSC.
+ double tsc_ticks_per_second = TSCTicksPerSecond();
+ if (tsc_ticks_per_second == 0)
+ return ThreadTicks();
+
+ // Return the CPU time of the current thread.
+ double thread_time_seconds = thread_cycle_time / tsc_ticks_per_second;
+ return ThreadTicks(
+ static_cast<int64_t>(thread_time_seconds * Time::kMicrosecondsPerSecond));
+}
+
+// static
+bool ThreadTicks::IsSupportedWin() {
+ static bool is_supported = base::CPU().has_non_stop_time_stamp_counter() &&
+ !IsQPCReliable();
+ return is_supported;
+}
+
+// static
+void ThreadTicks::WaitUntilInitializedWin() {
+ while (TSCTicksPerSecond() == 0)
+ ::Sleep(10);
+}
+
+double ThreadTicks::TSCTicksPerSecond() {
+ DCHECK(IsSupported());
+
+ // The value returned by QueryPerformanceFrequency() cannot be used as the TSC
+ // frequency, because there is no guarantee that the TSC frequency is equal to
+ // the performance counter frequency.
+
+ // The TSC frequency is cached in a static variable because it takes some time
+ // to compute it.
+ static double tsc_ticks_per_second = 0;
+ if (tsc_ticks_per_second != 0)
+ return tsc_ticks_per_second;
+
+ // Increase the thread priority to reduces the chances of having a context
+ // switch during a reading of the TSC and the performance counter.
+ int previous_priority = ::GetThreadPriority(::GetCurrentThread());
+ ::SetThreadPriority(::GetCurrentThread(), THREAD_PRIORITY_HIGHEST);
+
+ // The first time that this function is called, make an initial reading of the
+ // TSC and the performance counter.
+ static const uint64_t tsc_initial = __rdtsc();
+ static const uint64_t perf_counter_initial = QPCNowRaw();
+
+ // Make a another reading of the TSC and the performance counter every time
+ // that this function is called.
+ uint64_t tsc_now = __rdtsc();
+ uint64_t perf_counter_now = QPCNowRaw();
+
+ // Reset the thread priority.
+ ::SetThreadPriority(::GetCurrentThread(), previous_priority);
+
+ // Make sure that at least 50 ms elapsed between the 2 readings. The first
+ // time that this function is called, we don't expect this to be the case.
+ // Note: The longer the elapsed time between the 2 readings is, the more
+ // accurate the computed TSC frequency will be. The 50 ms value was
+ // chosen because local benchmarks show that it allows us to get a
+ // stddev of less than 1 tick/us between multiple runs.
+ // Note: According to the MSDN documentation for QueryPerformanceFrequency(),
+ // this will never fail on systems that run XP or later.
+ // https://msdn.microsoft.com/library/windows/desktop/ms644905.aspx
+ LARGE_INTEGER perf_counter_frequency = {};
+ ::QueryPerformanceFrequency(&perf_counter_frequency);
+ DCHECK_GE(perf_counter_now, perf_counter_initial);
+ uint64_t perf_counter_ticks = perf_counter_now - perf_counter_initial;
+ double elapsed_time_seconds =
+ perf_counter_ticks / static_cast<double>(perf_counter_frequency.QuadPart);
+
+ const double kMinimumEvaluationPeriodSeconds = 0.05;
+ if (elapsed_time_seconds < kMinimumEvaluationPeriodSeconds)
+ return 0;
+
+ // Compute the frequency of the TSC.
+ DCHECK_GE(tsc_now, tsc_initial);
+ uint64_t tsc_ticks = tsc_now - tsc_initial;
+ tsc_ticks_per_second = tsc_ticks / elapsed_time_seconds;
+
+ return tsc_ticks_per_second;
+}
+#endif // V8_OS_WIN
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/time.h b/deps/v8/src/base/platform/time.h
index c8140efe4a..be62014f91 100644
--- a/deps/v8/src/base/platform/time.h
+++ b/deps/v8/src/base/platform/time.h
@@ -9,7 +9,12 @@
#include <iosfwd>
#include <limits>
+#include "src/base/bits.h"
#include "src/base/macros.h"
+#include "src/base/safe_math.h"
+#if V8_OS_WIN
+#include "src/base/win32-headers.h"
+#endif
// Forward declarations.
extern "C" {
@@ -23,8 +28,14 @@ namespace v8 {
namespace base {
class Time;
+class TimeDelta;
class TimeTicks;
+namespace time_internal {
+template<class TimeClass>
+class TimeBase;
+}
+
// -----------------------------------------------------------------------------
// TimeDelta
//
@@ -143,6 +154,7 @@ class TimeDelta final {
}
private:
+ template<class TimeClass> friend class time_internal::TimeBase;
// Constructs a delta given the duration in microseconds. This is private
// to avoid confusion by callers with an integer constructor. Use
// FromSeconds, FromMilliseconds, etc. instead.
@@ -153,35 +165,123 @@ class TimeDelta final {
};
-// -----------------------------------------------------------------------------
-// Time
-//
-// This class represents an absolute point in time, internally represented as
-// microseconds (s/1,000,000) since 00:00:00 UTC, January 1, 1970.
+namespace time_internal {
-class Time final {
+// TimeBase--------------------------------------------------------------------
+
+// Provides value storage and comparison/math operations common to all time
+// classes. Each subclass provides for strong type-checking to ensure
+// semantically meaningful comparison/math of time values from the same clock
+// source or timeline.
+template<class TimeClass>
+class TimeBase {
public:
+ static const int64_t kHoursPerDay = 24;
static const int64_t kMillisecondsPerSecond = 1000;
+ static const int64_t kMillisecondsPerDay =
+ kMillisecondsPerSecond * 60 * 60 * kHoursPerDay;
static const int64_t kMicrosecondsPerMillisecond = 1000;
- static const int64_t kMicrosecondsPerSecond = kMicrosecondsPerMillisecond *
- kMillisecondsPerSecond;
+ static const int64_t kMicrosecondsPerSecond =
+ kMicrosecondsPerMillisecond * kMillisecondsPerSecond;
static const int64_t kMicrosecondsPerMinute = kMicrosecondsPerSecond * 60;
static const int64_t kMicrosecondsPerHour = kMicrosecondsPerMinute * 60;
- static const int64_t kMicrosecondsPerDay = kMicrosecondsPerHour * 24;
+ static const int64_t kMicrosecondsPerDay =
+ kMicrosecondsPerHour * kHoursPerDay;
static const int64_t kMicrosecondsPerWeek = kMicrosecondsPerDay * 7;
static const int64_t kNanosecondsPerMicrosecond = 1000;
- static const int64_t kNanosecondsPerSecond = kNanosecondsPerMicrosecond *
- kMicrosecondsPerSecond;
+ static const int64_t kNanosecondsPerSecond =
+ kNanosecondsPerMicrosecond * kMicrosecondsPerSecond;
- // Contains the NULL time. Use Time::Now() to get the current time.
- Time() : us_(0) {}
-
- // Returns true if the time object has not been initialized.
- bool IsNull() const { return us_ == 0; }
+ // Returns true if this object has not been initialized.
+ //
+ // Warning: Be careful when writing code that performs math on time values,
+ // since it's possible to produce a valid "zero" result that should not be
+ // interpreted as a "null" value.
+ bool IsNull() const {
+ return us_ == 0;
+ }
- // Returns true if the time object is the maximum time.
+ // Returns true if this object represents the maximum time.
bool IsMax() const { return us_ == std::numeric_limits<int64_t>::max(); }
+ // For serializing only. Use FromInternalValue() to reconstitute. Please don't
+ // use this and do arithmetic on it, as it is more error prone than using the
+ // provided operators.
+ int64_t ToInternalValue() const { return us_; }
+
+ TimeClass& operator=(TimeClass other) {
+ us_ = other.us_;
+ return *(static_cast<TimeClass*>(this));
+ }
+
+ // Compute the difference between two times.
+ TimeDelta operator-(TimeClass other) const {
+ return TimeDelta::FromMicroseconds(us_ - other.us_);
+ }
+
+ // Return a new time modified by some delta.
+ TimeClass operator+(TimeDelta delta) const {
+ return TimeClass(bits::SignedSaturatedAdd64(delta.delta_, us_));
+ }
+ TimeClass operator-(TimeDelta delta) const {
+ return TimeClass(-bits::SignedSaturatedSub64(delta.delta_, us_));
+ }
+
+ // Modify by some time delta.
+ TimeClass& operator+=(TimeDelta delta) {
+ return static_cast<TimeClass&>(*this = (*this + delta));
+ }
+ TimeClass& operator-=(TimeDelta delta) {
+ return static_cast<TimeClass&>(*this = (*this - delta));
+ }
+
+ // Comparison operators
+ bool operator==(TimeClass other) const {
+ return us_ == other.us_;
+ }
+ bool operator!=(TimeClass other) const {
+ return us_ != other.us_;
+ }
+ bool operator<(TimeClass other) const {
+ return us_ < other.us_;
+ }
+ bool operator<=(TimeClass other) const {
+ return us_ <= other.us_;
+ }
+ bool operator>(TimeClass other) const {
+ return us_ > other.us_;
+ }
+ bool operator>=(TimeClass other) const {
+ return us_ >= other.us_;
+ }
+
+ // Converts an integer value representing TimeClass to a class. This is used
+ // when deserializing a |TimeClass| structure, using a value known to be
+ // compatible. It is not provided as a constructor because the integer type
+ // may be unclear from the perspective of a caller.
+ static TimeClass FromInternalValue(int64_t us) { return TimeClass(us); }
+
+ protected:
+ explicit TimeBase(int64_t us) : us_(us) {}
+
+ // Time value in a microsecond timebase.
+ int64_t us_;
+};
+
+} // namespace time_internal
+
+
+// -----------------------------------------------------------------------------
+// Time
+//
+// This class represents an absolute point in time, internally represented as
+// microseconds (s/1,000,000) since 00:00:00 UTC, January 1, 1970.
+
+class Time final : public time_internal::TimeBase<Time> {
+ public:
+ // Contains the NULL time. Use Time::Now() to get the current time.
+ Time() : TimeBase(0) {}
+
// Returns the current time. Watch out, the system might adjust its clock
// in which case time will actually go backwards. We don't guarantee that
// times are increasing, or that two calls to Now() won't be the same.
@@ -200,15 +300,6 @@ class Time final {
// with which we might compare it.
static Time Max() { return Time(std::numeric_limits<int64_t>::max()); }
- // Converts to/from internal values. The meaning of the "internal value" is
- // completely up to the implementation, so it should be treated as opaque.
- static Time FromInternalValue(int64_t value) {
- return Time(value);
- }
- int64_t ToInternalValue() const {
- return us_;
- }
-
// Converts to/from POSIX time specs.
static Time FromTimespec(struct timespec ts);
struct timespec ToTimespec() const;
@@ -226,59 +317,9 @@ class Time final {
static Time FromJsTime(double ms_since_epoch);
double ToJsTime() const;
- Time& operator=(const Time& other) {
- us_ = other.us_;
- return *this;
- }
-
- // Compute the difference between two times.
- TimeDelta operator-(const Time& other) const {
- return TimeDelta::FromMicroseconds(us_ - other.us_);
- }
-
- // Modify by some time delta.
- Time& operator+=(const TimeDelta& delta) {
- us_ += delta.InMicroseconds();
- return *this;
- }
- Time& operator-=(const TimeDelta& delta) {
- us_ -= delta.InMicroseconds();
- return *this;
- }
-
- // Return a new time modified by some delta.
- Time operator+(const TimeDelta& delta) const {
- return Time(us_ + delta.InMicroseconds());
- }
- Time operator-(const TimeDelta& delta) const {
- return Time(us_ - delta.InMicroseconds());
- }
-
- // Comparison operators
- bool operator==(const Time& other) const {
- return us_ == other.us_;
- }
- bool operator!=(const Time& other) const {
- return us_ != other.us_;
- }
- bool operator<(const Time& other) const {
- return us_ < other.us_;
- }
- bool operator<=(const Time& other) const {
- return us_ <= other.us_;
- }
- bool operator>(const Time& other) const {
- return us_ > other.us_;
- }
- bool operator>=(const Time& other) const {
- return us_ >= other.us_;
- }
-
private:
- explicit Time(int64_t us) : us_(us) {}
-
- // Time in microseconds in UTC.
- int64_t us_;
+ friend class time_internal::TimeBase<Time>;
+ explicit Time(int64_t us) : TimeBase(us) {}
};
std::ostream& operator<<(std::ostream&, const Time&);
@@ -298,9 +339,9 @@ inline Time operator+(const TimeDelta& delta, const Time& time) {
// Time::Now() may actually decrease or jump). But note that TimeTicks may
// "stand still", for example if the computer suspended.
-class TimeTicks final {
+class TimeTicks final : public time_internal::TimeBase<TimeTicks> {
public:
- TimeTicks() : ticks_(0) {}
+ TimeTicks() : TimeBase(0) {}
// Platform-dependent tick count representing "right now."
// The resolution of this clock is ~1-15ms. Resolution varies depending
@@ -318,79 +359,68 @@ class TimeTicks final {
// Returns true if the high-resolution clock is working on this system.
static bool IsHighResolutionClockWorking();
- // Returns true if this object has not been initialized.
- bool IsNull() const { return ticks_ == 0; }
-
- // Converts to/from internal values. The meaning of the "internal value" is
- // completely up to the implementation, so it should be treated as opaque.
- static TimeTicks FromInternalValue(int64_t value) {
- return TimeTicks(value);
- }
- int64_t ToInternalValue() const {
- return ticks_;
- }
+ private:
+ friend class time_internal::TimeBase<TimeTicks>;
- TimeTicks& operator=(const TimeTicks other) {
- ticks_ = other.ticks_;
- return *this;
- }
+ // Please use Now() to create a new object. This is for internal use
+ // and testing. Ticks are in microseconds.
+ explicit TimeTicks(int64_t ticks) : TimeBase(ticks) {}
+};
- // Compute the difference between two times.
- TimeDelta operator-(const TimeTicks other) const {
- return TimeDelta::FromMicroseconds(ticks_ - other.ticks_);
- }
+inline TimeTicks operator+(const TimeDelta& delta, const TimeTicks& ticks) {
+ return ticks + delta;
+}
- // Modify by some time delta.
- TimeTicks& operator+=(const TimeDelta& delta) {
- ticks_ += delta.InMicroseconds();
- return *this;
- }
- TimeTicks& operator-=(const TimeDelta& delta) {
- ticks_ -= delta.InMicroseconds();
- return *this;
- }
- // Return a new TimeTicks modified by some delta.
- TimeTicks operator+(const TimeDelta& delta) const {
- return TimeTicks(ticks_ + delta.InMicroseconds());
- }
- TimeTicks operator-(const TimeDelta& delta) const {
- return TimeTicks(ticks_ - delta.InMicroseconds());
- }
+// ThreadTicks ----------------------------------------------------------------
- // Comparison operators
- bool operator==(const TimeTicks& other) const {
- return ticks_ == other.ticks_;
- }
- bool operator!=(const TimeTicks& other) const {
- return ticks_ != other.ticks_;
- }
- bool operator<(const TimeTicks& other) const {
- return ticks_ < other.ticks_;
- }
- bool operator<=(const TimeTicks& other) const {
- return ticks_ <= other.ticks_;
- }
- bool operator>(const TimeTicks& other) const {
- return ticks_ > other.ticks_;
- }
- bool operator>=(const TimeTicks& other) const {
- return ticks_ >= other.ticks_;
- }
+// Represents a clock, specific to a particular thread, than runs only while the
+// thread is running.
+class ThreadTicks final : public time_internal::TimeBase<ThreadTicks> {
+ public:
+ ThreadTicks() : TimeBase(0) {}
+
+ // Returns true if ThreadTicks::Now() is supported on this system.
+ static bool IsSupported();
+
+ // Waits until the initialization is completed. Needs to be guarded with a
+ // call to IsSupported().
+ static void WaitUntilInitialized() {
+#if V8_OS_WIN
+ WaitUntilInitializedWin();
+#endif
+ }
+
+ // Returns thread-specific CPU-time on systems that support this feature.
+ // Needs to be guarded with a call to IsSupported(). Use this timer
+ // to (approximately) measure how much time the calling thread spent doing
+ // actual work vs. being de-scheduled. May return bogus results if the thread
+ // migrates to another CPU between two calls. Returns an empty ThreadTicks
+ // object until the initialization is completed. If a clock reading is
+ // absolutely needed, call WaitUntilInitialized() before this method.
+ static ThreadTicks Now();
+
+#if V8_OS_WIN
+ // Similar to Now() above except this returns thread-specific CPU time for an
+ // arbitrary thread. All comments for Now() method above apply apply to this
+ // method as well.
+ static ThreadTicks GetForThread(const HANDLE& thread_handle);
+#endif
private:
- // Please use Now() to create a new object. This is for internal use
- // and testing. Ticks is in microseconds.
- explicit TimeTicks(int64_t ticks) : ticks_(ticks) {}
-
- // Tick count in microseconds.
- int64_t ticks_;
+ // Please use Now() or GetForThread() to create a new object. This is for
+ // internal use and testing. Ticks are in microseconds.
+ explicit ThreadTicks(int64_t ticks) : TimeBase(ticks) {}
+
+#if V8_OS_WIN
+ // Returns the frequency of the TSC in ticks per second, or 0 if it hasn't
+ // been measured yet. Needs to be guarded with a call to IsSupported().
+ static double TSCTicksPerSecond();
+ static bool IsSupportedWin();
+ static void WaitUntilInitializedWin();
+#endif
};
-inline TimeTicks operator+(const TimeDelta& delta, const TimeTicks& ticks) {
- return ticks + delta;
-}
-
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/smart-pointers.h b/deps/v8/src/base/smart-pointers.h
deleted file mode 100644
index df3fcac662..0000000000
--- a/deps/v8/src/base/smart-pointers.h
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_BASE_SMART_POINTERS_H_
-#define V8_BASE_SMART_POINTERS_H_
-
-#include "src/base/logging.h"
-
-namespace v8 {
-namespace base {
-
-template <typename Deallocator, typename T>
-class SmartPointerBase {
- public:
- // Default constructor. Constructs an empty scoped pointer.
- SmartPointerBase() : p_(NULL) {}
-
- // Constructs a scoped pointer from a plain one.
- explicit SmartPointerBase(T* ptr) : p_(ptr) {}
-
- // Copy constructor removes the pointer from the original to avoid double
- // freeing.
- SmartPointerBase(const SmartPointerBase<Deallocator, T>& rhs) : p_(rhs.p_) {
- const_cast<SmartPointerBase<Deallocator, T>&>(rhs).p_ = NULL;
- }
-
- T* operator->() const { return p_; }
-
- T& operator*() const { return *p_; }
-
- T* get() const { return p_; }
-
- // You can use [n] to index as if it was a plain pointer.
- T& operator[](size_t i) { return p_[i]; }
-
- // You can use [n] to index as if it was a plain pointer.
- const T& operator[](size_t i) const { return p_[i]; }
-
- // We don't have implicit conversion to a T* since that hinders migration:
- // You would not be able to change a method from returning a T* to
- // returning an SmartArrayPointer<T> and then get errors wherever it is used.
-
-
- // If you want to take out the plain pointer and don't want it automatically
- // deleted then call Detach(). Afterwards, the smart pointer is empty
- // (NULL).
- T* Detach() {
- T* temp = p_;
- p_ = NULL;
- return temp;
- }
-
- void Reset(T* new_value) {
- DCHECK(p_ == NULL || p_ != new_value);
- if (p_) Deallocator::Delete(p_);
- p_ = new_value;
- }
-
- // Assignment requires an empty (NULL) SmartArrayPointer as the receiver. Like
- // the copy constructor it removes the pointer in the original to avoid
- // double freeing.
- SmartPointerBase<Deallocator, T>& operator=(
- const SmartPointerBase<Deallocator, T>& rhs) {
- DCHECK(is_empty());
- T* tmp = rhs.p_; // swap to handle self-assignment
- const_cast<SmartPointerBase<Deallocator, T>&>(rhs).p_ = NULL;
- p_ = tmp;
- return *this;
- }
-
- bool is_empty() const { return p_ == NULL; }
-
- protected:
- // When the destructor of the scoped pointer is executed the plain pointer
- // is deleted using DeleteArray. This implies that you must allocate with
- // NewArray.
- ~SmartPointerBase() {
- if (p_) Deallocator::Delete(p_);
- }
-
- private:
- T* p_;
-};
-
-// A 'scoped array pointer' that calls DeleteArray on its pointer when the
-// destructor is called.
-
-template <typename T>
-struct ArrayDeallocator {
- static void Delete(T* array) { delete[] array; }
-};
-
-
-template <typename T>
-class SmartArrayPointer : public SmartPointerBase<ArrayDeallocator<T>, T> {
- public:
- SmartArrayPointer() {}
- explicit SmartArrayPointer(T* ptr)
- : SmartPointerBase<ArrayDeallocator<T>, T>(ptr) {}
- SmartArrayPointer(const SmartArrayPointer<T>& rhs)
- : SmartPointerBase<ArrayDeallocator<T>, T>(rhs) {}
-};
-
-
-template <typename T>
-struct ObjectDeallocator {
- static void Delete(T* object) { delete object; }
-};
-
-template <typename T>
-class SmartPointer : public SmartPointerBase<ObjectDeallocator<T>, T> {
- public:
- SmartPointer() {}
- explicit SmartPointer(T* ptr)
- : SmartPointerBase<ObjectDeallocator<T>, T>(ptr) {}
- SmartPointer(const SmartPointer<T>& rhs)
- : SmartPointerBase<ObjectDeallocator<T>, T>(rhs) {}
-};
-
-} // namespace base
-} // namespace v8
-
-#endif // V8_SMART_POINTERS_H_
diff --git a/deps/v8/src/base/sys-info.cc b/deps/v8/src/base/sys-info.cc
index a2dc6ab27f..1b6d39397e 100644
--- a/deps/v8/src/base/sys-info.cc
+++ b/deps/v8/src/base/sys-info.cc
@@ -85,9 +85,6 @@ int64_t SysInfo::AmountOfPhysicalMemory() {
return 0;
}
return static_cast<int64_t>(stat_buf.st_size);
-#elif V8_OS_NACL
- // No support for _SC_PHYS_PAGES, assume 2GB.
- return static_cast<int64_t>(1) << 31;
#elif V8_OS_AIX
int64_t result = sysconf(_SC_AIX_REALMEM);
return static_cast<int64_t>(result) * 1024L;
@@ -104,7 +101,7 @@ int64_t SysInfo::AmountOfPhysicalMemory() {
// static
int64_t SysInfo::AmountOfVirtualMemory() {
-#if V8_OS_NACL || V8_OS_WIN
+#if V8_OS_WIN
return 0;
#elif V8_OS_POSIX
struct rlimit rlim;
diff --git a/deps/v8/src/base/utils/random-number-generator.cc b/deps/v8/src/base/utils/random-number-generator.cc
index ff428402b6..3a6f2c63cf 100644
--- a/deps/v8/src/base/utils/random-number-generator.cc
+++ b/deps/v8/src/base/utils/random-number-generator.cc
@@ -124,10 +124,10 @@ int RandomNumberGenerator::Next(int bits) {
void RandomNumberGenerator::SetSeed(int64_t seed) {
- if (seed == 0) seed = 1;
initial_seed_ = seed;
state0_ = MurmurHash3(bit_cast<uint64_t>(seed));
- state1_ = MurmurHash3(state0_);
+ state1_ = MurmurHash3(~state0_);
+ CHECK(state0_ != 0 || state1_ != 0);
}
diff --git a/deps/v8/src/base/win32-headers.h b/deps/v8/src/base/win32-headers.h
index 20ec8e0261..b61ce717e5 100644
--- a/deps/v8/src/base/win32-headers.h
+++ b/deps/v8/src/base/win32-headers.h
@@ -27,10 +27,10 @@
#ifndef NOMCX
#define NOMCX
#endif
-// Require Windows XP or higher (this is required for the RtlCaptureContext
-// function to be present).
+// Require Windows Vista or higher (this is required for the
+// QueryThreadCycleTime function to be present).
#ifndef _WIN32_WINNT
-#define _WIN32_WINNT 0x501
+#define _WIN32_WINNT 0x0600
#endif
#include <windows.h>
diff --git a/deps/v8/src/bignum.cc b/deps/v8/src/bignum.cc
index e7c6747665..e2a9c4e557 100644
--- a/deps/v8/src/bignum.cc
+++ b/deps/v8/src/bignum.cc
@@ -539,13 +539,6 @@ static int SizeInHexChars(S number) {
}
-static char HexCharOfValue(int value) {
- DCHECK(0 <= value && value <= 16);
- if (value < 10) return value + '0';
- return value - 10 + 'A';
-}
-
-
bool Bignum::ToHexString(char* buffer, int buffer_size) const {
DCHECK(IsClamped());
// Each bigit must be printable as separate hex-character.
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index f67065dec4..5142817986 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -6,10 +6,13 @@
#include "src/accessors.h"
#include "src/api-natives.h"
+#include "src/base/ieee754.h"
#include "src/code-stubs.h"
+#include "src/compiler.h"
#include "src/extensions/externalize-string-extension.h"
#include "src/extensions/free-buffer-extension.h"
#include "src/extensions/gc-extension.h"
+#include "src/extensions/ignition-statistics-extension.h"
#include "src/extensions/statistics-extension.h"
#include "src/extensions/trigger-failure-extension.h"
#include "src/heap/heap.h"
@@ -30,17 +33,15 @@ template <class Source>
Handle<String> Bootstrapper::SourceLookup(int index) {
DCHECK(0 <= index && index < Source::GetBuiltinsCount());
Heap* heap = isolate_->heap();
- if (Source::GetSourceCache(heap)->get(index)->IsUndefined()) {
+ if (Source::GetSourceCache(heap)->get(index)->IsUndefined(isolate_)) {
// We can use external strings for the natives.
Vector<const char> source = Source::GetScriptSource(index);
NativesExternalStringResource* resource =
new NativesExternalStringResource(source.start(), source.length());
- // We do not expect this to throw an exception. Change this if it does.
- Handle<String> source_code = isolate_->factory()
- ->NewExternalStringFromOneByte(resource)
- .ToHandleChecked();
+ Handle<ExternalOneByteString> source_code =
+ isolate_->factory()->NewNativeSourceString(resource);
// Mark this external string with a special map.
- source_code->set_map(isolate_->heap()->native_source_string_map());
+ DCHECK(source_code->is_short());
Source::GetSourceCache(heap)->set(index, *source_code);
}
Handle<Object> cached_source(Source::GetSourceCache(heap)->get(index),
@@ -73,7 +74,7 @@ v8::Extension* Bootstrapper::gc_extension_ = NULL;
v8::Extension* Bootstrapper::externalize_string_extension_ = NULL;
v8::Extension* Bootstrapper::statistics_extension_ = NULL;
v8::Extension* Bootstrapper::trigger_failure_extension_ = NULL;
-
+v8::Extension* Bootstrapper::ignition_statistics_extension_ = NULL;
void Bootstrapper::InitializeOncePerProcess() {
free_buffer_extension_ = new FreeBufferExtension;
@@ -86,6 +87,8 @@ void Bootstrapper::InitializeOncePerProcess() {
v8::RegisterExtension(statistics_extension_);
trigger_failure_extension_ = new TriggerFailureExtension;
v8::RegisterExtension(trigger_failure_extension_);
+ ignition_statistics_extension_ = new IgnitionStatisticsExtension;
+ v8::RegisterExtension(ignition_statistics_extension_);
}
@@ -100,15 +103,18 @@ void Bootstrapper::TearDownExtensions() {
statistics_extension_ = NULL;
delete trigger_failure_extension_;
trigger_failure_extension_ = NULL;
+ delete ignition_statistics_extension_;
+ ignition_statistics_extension_ = NULL;
}
void DeleteNativeSources(Object* maybe_array) {
if (maybe_array->IsFixedArray()) {
FixedArray* array = FixedArray::cast(maybe_array);
+ Isolate* isolate = array->GetIsolate();
for (int i = 0; i < array->length(); i++) {
Object* natives_source = array->get(i);
- if (!natives_source->IsUndefined()) {
+ if (!natives_source->IsUndefined(isolate)) {
const NativesExternalStringResource* resource =
reinterpret_cast<const NativesExternalStringResource*>(
ExternalOneByteString::cast(natives_source)->resource());
@@ -134,8 +140,10 @@ class Genesis BASE_EMBEDDED {
public:
Genesis(Isolate* isolate, MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_proxy_template,
- v8::ExtensionConfiguration* extensions,
+ v8::ExtensionConfiguration* extensions, size_t context_snapshot_index,
GlobalContextType context_type);
+ Genesis(Isolate* isolate, MaybeHandle<JSGlobalProxy> maybe_global_proxy,
+ v8::Local<v8::ObjectTemplate> global_proxy_template);
~Genesis() { }
Isolate* isolate() const { return isolate_; }
@@ -144,6 +152,8 @@ class Genesis BASE_EMBEDDED {
Handle<Context> result() { return result_; }
+ Handle<JSGlobalProxy> global_proxy() { return global_proxy_; }
+
private:
Handle<Context> native_context() { return native_context_; }
@@ -157,11 +167,12 @@ class Genesis BASE_EMBEDDED {
Handle<JSFunction> GetThrowTypeErrorIntrinsic(Builtins::Name builtin_name);
void CreateStrictModeFunctionMaps(Handle<JSFunction> empty);
- void CreateIteratorMaps();
+ void CreateIteratorMaps(Handle<JSFunction> empty);
+ void CreateAsyncFunctionMaps(Handle<JSFunction> empty);
void CreateJSProxyMaps();
// Make the "arguments" and "caller" properties throw a TypeError on access.
- void AddRestrictedFunctionProperties(Handle<Map> map);
+ void AddRestrictedFunctionProperties(Handle<JSFunction> empty);
// Creates the global objects using the global proxy and the template passed
// in through the API. We call this regardless of whether we are building a
@@ -199,11 +210,12 @@ class Genesis BASE_EMBEDDED {
HARMONY_INPROGRESS(DECLARE_FEATURE_INITIALIZATION)
HARMONY_STAGED(DECLARE_FEATURE_INITIALIZATION)
HARMONY_SHIPPING(DECLARE_FEATURE_INITIALIZATION)
- DECLARE_FEATURE_INITIALIZATION(promise_extra, "")
+ DECLARE_FEATURE_INITIALIZATION(intl_extra, "")
#undef DECLARE_FEATURE_INITIALIZATION
Handle<JSFunction> InstallArrayBuffer(Handle<JSObject> target,
- const char* name);
+ const char* name, Builtins::Name call,
+ BuiltinFunctionId id);
Handle<JSFunction> InstallInternalArray(Handle<JSObject> target,
const char* name,
ElementsKind elements_kind);
@@ -230,7 +242,7 @@ class Genesis BASE_EMBEDDED {
void set_state(RegisteredExtension* extension,
ExtensionTraversalState state);
private:
- HashMap map_;
+ base::HashMap map_;
DISALLOW_COPY_AND_ASSIGN(ExtensionStates);
};
@@ -262,29 +274,8 @@ class Genesis BASE_EMBEDDED {
void TransferNamedProperties(Handle<JSObject> from, Handle<JSObject> to);
void TransferIndexedProperties(Handle<JSObject> from, Handle<JSObject> to);
- enum FunctionMode {
- // With prototype.
- FUNCTION_WITH_WRITEABLE_PROTOTYPE,
- FUNCTION_WITH_READONLY_PROTOTYPE,
- // Without prototype.
- FUNCTION_WITHOUT_PROTOTYPE
- };
-
- static bool IsFunctionModeWithPrototype(FunctionMode function_mode) {
- return (function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE ||
- function_mode == FUNCTION_WITH_READONLY_PROTOTYPE);
- }
-
- Handle<Map> CreateSloppyFunctionMap(FunctionMode function_mode);
-
- void SetFunctionInstanceDescriptor(Handle<Map> map,
- FunctionMode function_mode);
void MakeFunctionInstancePrototypeWritable();
- Handle<Map> CreateStrictFunctionMap(FunctionMode function_mode,
- Handle<JSFunction> empty_function);
-
-
void SetStrictFunctionInstanceDescriptor(Handle<Map> map,
FunctionMode function_mode);
@@ -295,6 +286,7 @@ class Genesis BASE_EMBEDDED {
Isolate* isolate_;
Handle<Context> result_;
Handle<Context> native_context_;
+ Handle<JSGlobalProxy> global_proxy_;
// Function maps. Function maps are created initially with a read only
// prototype for the processing of JS builtins. Later the function maps are
@@ -318,28 +310,28 @@ void Bootstrapper::Iterate(ObjectVisitor* v) {
Handle<Context> Bootstrapper::CreateEnvironment(
MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_proxy_template,
- v8::ExtensionConfiguration* extensions, GlobalContextType context_type) {
+ v8::ExtensionConfiguration* extensions, size_t context_snapshot_index,
+ GlobalContextType context_type) {
HandleScope scope(isolate_);
Genesis genesis(isolate_, maybe_global_proxy, global_proxy_template,
- extensions, context_type);
+ extensions, context_snapshot_index, context_type);
Handle<Context> env = genesis.result();
- if (env.is_null() ||
- (context_type != THIN_CONTEXT && !InstallExtensions(env, extensions))) {
+ if (env.is_null() || !InstallExtensions(env, extensions)) {
return Handle<Context>();
}
return scope.CloseAndEscape(env);
}
-
-static void SetObjectPrototype(Handle<JSObject> object, Handle<Object> proto) {
- // object.__proto__ = proto;
- Handle<Map> old_map = Handle<Map>(object->map());
- Handle<Map> new_map = Map::Copy(old_map, "SetObjectPrototype");
- Map::SetPrototype(new_map, proto, FAST_PROTOTYPE);
- JSObject::MigrateToMap(object, new_map);
+Handle<JSGlobalProxy> Bootstrapper::NewRemoteContext(
+ MaybeHandle<JSGlobalProxy> maybe_global_proxy,
+ v8::Local<v8::ObjectTemplate> global_proxy_template) {
+ HandleScope scope(isolate_);
+ Genesis genesis(isolate_, maybe_global_proxy, global_proxy_template);
+ Handle<JSGlobalProxy> global_proxy = genesis.global_proxy();
+ if (global_proxy.is_null()) return Handle<JSGlobalProxy>();
+ return scope.CloseAndEscape(global_proxy);
}
-
void Bootstrapper::DetachGlobal(Handle<Context> env) {
env->GetIsolate()->counters()->errors_thrown_per_context()->AddSample(
env->GetErrorsThrown());
@@ -347,14 +339,13 @@ void Bootstrapper::DetachGlobal(Handle<Context> env) {
Factory* factory = env->GetIsolate()->factory();
Handle<JSGlobalProxy> global_proxy(JSGlobalProxy::cast(env->global_proxy()));
global_proxy->set_native_context(*factory->null_value());
- SetObjectPrototype(global_proxy, factory->null_value());
+ JSObject::ForceSetPrototype(global_proxy, factory->null_value());
global_proxy->map()->SetConstructor(*factory->null_value());
if (FLAG_track_detached_contexts) {
env->GetIsolate()->AddDetachedContext(env);
}
}
-
namespace {
void InstallFunction(Handle<JSObject> target, Handle<Name> property_name,
@@ -367,34 +358,39 @@ void InstallFunction(Handle<JSObject> target, Handle<Name> property_name,
function->shared()->set_native(true);
}
-
-static void InstallFunction(Handle<JSObject> target,
- Handle<JSFunction> function, Handle<Name> name,
- PropertyAttributes attributes = DONT_ENUM) {
+void InstallFunction(Handle<JSObject> target, Handle<JSFunction> function,
+ Handle<Name> name,
+ PropertyAttributes attributes = DONT_ENUM) {
Handle<String> name_string = Name::ToFunctionName(name).ToHandleChecked();
InstallFunction(target, name, function, name_string, attributes);
}
+Handle<JSFunction> InstallGetter(Handle<JSObject> target,
+ Handle<Name> property_name,
+ Handle<JSFunction> getter,
+ PropertyAttributes attributes = DONT_ENUM) {
+ Handle<Object> setter = target->GetIsolate()->factory()->undefined_value();
+ JSObject::DefineAccessor(target, property_name, getter, setter, attributes)
+ .Check();
+ getter->shared()->set_native(true);
+ return getter;
+}
-static Handle<JSFunction> CreateFunction(Isolate* isolate, Handle<String> name,
- InstanceType type, int instance_size,
- MaybeHandle<JSObject> maybe_prototype,
- Builtins::Name call,
- bool strict_function_map = false) {
+Handle<JSFunction> CreateFunction(Isolate* isolate, Handle<String> name,
+ InstanceType type, int instance_size,
+ MaybeHandle<JSObject> maybe_prototype,
+ Builtins::Name call,
+ bool strict_function_map = false) {
Factory* factory = isolate->factory();
Handle<Code> call_code(isolate->builtins()->builtin(call));
Handle<JSObject> prototype;
- static const bool kReadOnlyPrototype = false;
- static const bool kInstallConstructor = false;
return maybe_prototype.ToHandle(&prototype)
? factory->NewFunction(name, call_code, prototype, type,
- instance_size, kReadOnlyPrototype,
- kInstallConstructor, strict_function_map)
+ instance_size, strict_function_map)
: factory->NewFunctionWithoutPrototype(name, call_code,
strict_function_map);
}
-
Handle<JSFunction> InstallFunction(Handle<JSObject> target, Handle<Name> name,
InstanceType type, int instance_size,
MaybeHandle<JSObject> maybe_prototype,
@@ -409,7 +405,6 @@ Handle<JSFunction> InstallFunction(Handle<JSObject> target, Handle<Name> name,
return function;
}
-
Handle<JSFunction> InstallFunction(Handle<JSObject> target, const char* name,
InstanceType type, int instance_size,
MaybeHandle<JSObject> maybe_prototype,
@@ -422,68 +417,72 @@ Handle<JSFunction> InstallFunction(Handle<JSObject> target, const char* name,
strict_function_map);
}
-} // namespace
-
+Handle<JSFunction> SimpleCreateFunction(Isolate* isolate, Handle<String> name,
+ Builtins::Name call, int len,
+ bool adapt) {
+ Handle<JSFunction> fun =
+ CreateFunction(isolate, name, JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ MaybeHandle<JSObject>(), call);
+ if (adapt) {
+ fun->shared()->set_internal_formal_parameter_count(len);
+ } else {
+ fun->shared()->DontAdaptArguments();
+ }
+ fun->shared()->set_length(len);
+ return fun;
+}
-void Genesis::SetFunctionInstanceDescriptor(Handle<Map> map,
- FunctionMode function_mode) {
- int size = IsFunctionModeWithPrototype(function_mode) ? 5 : 4;
- Map::EnsureDescriptorSlack(map, size);
+Handle<JSFunction> SimpleInstallFunction(Handle<JSObject> base,
+ Handle<String> name,
+ Builtins::Name call, int len,
+ bool adapt,
+ PropertyAttributes attrs = DONT_ENUM) {
+ Handle<JSFunction> fun =
+ SimpleCreateFunction(base->GetIsolate(), name, call, len, adapt);
+ InstallFunction(base, fun, name, attrs);
+ return fun;
+}
- PropertyAttributes ro_attribs =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
- PropertyAttributes roc_attribs =
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
+Handle<JSFunction> SimpleInstallFunction(Handle<JSObject> base,
+ const char* name, Builtins::Name call,
+ int len, bool adapt,
+ PropertyAttributes attrs = DONT_ENUM) {
+ Factory* const factory = base->GetIsolate()->factory();
+ return SimpleInstallFunction(base, factory->InternalizeUtf8String(name), call,
+ len, adapt, attrs);
+}
- Handle<AccessorInfo> length =
- Accessors::FunctionLengthInfo(isolate(), roc_attribs);
- { // Add length.
- AccessorConstantDescriptor d(Handle<Name>(Name::cast(length->name())),
- length, roc_attribs);
- map->AppendDescriptor(&d);
- }
- Handle<AccessorInfo> name =
- Accessors::FunctionNameInfo(isolate(), ro_attribs);
- { // Add name.
- AccessorConstantDescriptor d(Handle<Name>(Name::cast(name->name())), name,
- roc_attribs);
- map->AppendDescriptor(&d);
- }
- Handle<AccessorInfo> args =
- Accessors::FunctionArgumentsInfo(isolate(), ro_attribs);
- { // Add arguments.
- AccessorConstantDescriptor d(Handle<Name>(Name::cast(args->name())), args,
- ro_attribs);
- map->AppendDescriptor(&d);
- }
- Handle<AccessorInfo> caller =
- Accessors::FunctionCallerInfo(isolate(), ro_attribs);
- { // Add caller.
- AccessorConstantDescriptor d(Handle<Name>(Name::cast(caller->name())),
- caller, ro_attribs);
- map->AppendDescriptor(&d);
- }
- if (IsFunctionModeWithPrototype(function_mode)) {
- if (function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE) {
- ro_attribs = static_cast<PropertyAttributes>(ro_attribs & ~READ_ONLY);
- }
- Handle<AccessorInfo> prototype =
- Accessors::FunctionPrototypeInfo(isolate(), ro_attribs);
- AccessorConstantDescriptor d(Handle<Name>(Name::cast(prototype->name())),
- prototype, ro_attribs);
- map->AppendDescriptor(&d);
- }
+Handle<JSFunction> SimpleInstallFunction(Handle<JSObject> base,
+ const char* name, Builtins::Name call,
+ int len, bool adapt,
+ BuiltinFunctionId id) {
+ Handle<JSFunction> fun = SimpleInstallFunction(base, name, call, len, adapt);
+ fun->shared()->set_builtin_function_id(id);
+ return fun;
}
+Handle<JSFunction> SimpleInstallGetter(Handle<JSObject> base,
+ Handle<String> name, Builtins::Name call,
+ bool adapt) {
+ Isolate* const isolate = base->GetIsolate();
+ Handle<String> fun_name =
+ Name::ToFunctionName(name, isolate->factory()->get_string())
+ .ToHandleChecked();
+ Handle<JSFunction> fun =
+ SimpleCreateFunction(isolate, fun_name, call, 0, adapt);
+ InstallGetter(base, name, fun);
+ return fun;
+}
-Handle<Map> Genesis::CreateSloppyFunctionMap(FunctionMode function_mode) {
- Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
- SetFunctionInstanceDescriptor(map, function_mode);
- map->set_is_constructor(IsFunctionModeWithPrototype(function_mode));
- map->set_is_callable();
- return map;
+Handle<JSFunction> SimpleInstallGetter(Handle<JSObject> base,
+ Handle<String> name, Builtins::Name call,
+ bool adapt, BuiltinFunctionId id) {
+ Handle<JSFunction> fun = SimpleInstallGetter(base, name, call, adapt);
+ fun->shared()->set_builtin_function_id(id);
+ return fun;
}
+} // namespace
Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// Allocate the map for function instances. Maps are allocated first and their
@@ -492,7 +491,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// Functions with this map will not have a 'prototype' property, and
// can not be used as constructors.
Handle<Map> function_without_prototype_map =
- CreateSloppyFunctionMap(FUNCTION_WITHOUT_PROTOTYPE);
+ factory()->CreateSloppyFunctionMap(FUNCTION_WITHOUT_PROTOTYPE);
native_context()->set_sloppy_function_without_prototype_map(
*function_without_prototype_map);
@@ -500,7 +499,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// of builtins.
// Later the map is replaced with writable prototype map, allocated below.
Handle<Map> function_map =
- CreateSloppyFunctionMap(FUNCTION_WITH_READONLY_PROTOTYPE);
+ factory()->CreateSloppyFunctionMap(FUNCTION_WITH_READONLY_PROTOTYPE);
native_context()->set_sloppy_function_map(*function_map);
native_context()->set_sloppy_function_with_readonly_prototype_map(
*function_map);
@@ -508,7 +507,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// The final map for functions. Writeable prototype.
// This map is installed in MakeFunctionInstancePrototypeWritable.
sloppy_function_map_writable_prototype_ =
- CreateSloppyFunctionMap(FUNCTION_WITH_WRITEABLE_PROTOTYPE);
+ factory()->CreateSloppyFunctionMap(FUNCTION_WITH_WRITEABLE_PROTOTYPE);
Factory* factory = isolate->factory();
Handle<String> object_name = factory->Object_string();
@@ -534,6 +533,8 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
Handle<Map> map = Map::Copy(handle(object_function_prototype->map()),
"EmptyObjectPrototype");
map->set_is_prototype_map(true);
+ // Ban re-setting Object.prototype.__proto__ to prevent Proxy security bug
+ map->set_immutable_proto(true);
object_function_prototype->set_map(*map);
native_context()->set_initial_object_prototype(*object_function_prototype);
@@ -552,7 +553,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// Allocate the function map first and then patch the prototype later
Handle<Map> empty_function_map =
- CreateSloppyFunctionMap(FUNCTION_WITHOUT_PROTOTYPE);
+ factory->CreateSloppyFunctionMap(FUNCTION_WITHOUT_PROTOTYPE);
DCHECK(!empty_function_map->is_dictionary_map());
Map::SetPrototype(empty_function_map, object_function_prototype);
empty_function_map->set_is_prototype_map(true);
@@ -577,56 +578,10 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
Map::SetPrototype(sloppy_function_without_prototype_map, empty_function);
Map::SetPrototype(sloppy_function_map_writable_prototype_, empty_function);
- // ES6 draft 03-17-2015, section 8.2.2 step 12
- AddRestrictedFunctionProperties(empty_function_map);
-
return empty_function;
}
-void Genesis::SetStrictFunctionInstanceDescriptor(Handle<Map> map,
- FunctionMode function_mode) {
- int size = IsFunctionModeWithPrototype(function_mode) ? 3 : 2;
- Map::EnsureDescriptorSlack(map, size);
-
- PropertyAttributes rw_attribs =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
- PropertyAttributes ro_attribs =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
- PropertyAttributes roc_attribs =
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
-
- DCHECK(function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE ||
- function_mode == FUNCTION_WITH_READONLY_PROTOTYPE ||
- function_mode == FUNCTION_WITHOUT_PROTOTYPE);
- { // Add length.
- Handle<AccessorInfo> length =
- Accessors::FunctionLengthInfo(isolate(), roc_attribs);
- AccessorConstantDescriptor d(Handle<Name>(Name::cast(length->name())),
- length, roc_attribs);
- map->AppendDescriptor(&d);
- }
- { // Add name.
- Handle<AccessorInfo> name =
- Accessors::FunctionNameInfo(isolate(), roc_attribs);
- AccessorConstantDescriptor d(Handle<Name>(Name::cast(name->name())), name,
- roc_attribs);
- map->AppendDescriptor(&d);
- }
- if (IsFunctionModeWithPrototype(function_mode)) {
- // Add prototype.
- PropertyAttributes attribs =
- function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE ? rw_attribs
- : ro_attribs;
- Handle<AccessorInfo> prototype =
- Accessors::FunctionPrototypeInfo(isolate(), attribs);
- AccessorConstantDescriptor d(Handle<Name>(Name::cast(prototype->name())),
- prototype, attribs);
- map->AppendDescriptor(&d);
- }
-}
-
-
// Creates the %ThrowTypeError% function.
Handle<JSFunction> Genesis::GetThrowTypeErrorIntrinsic(
Builtins::Name builtin_name) {
@@ -634,7 +589,7 @@ Handle<JSFunction> Genesis::GetThrowTypeErrorIntrinsic(
factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("ThrowTypeError"));
Handle<Code> code(isolate()->builtins()->builtin(builtin_name));
Handle<JSFunction> function =
- factory()->NewFunctionWithoutPrototype(name, code);
+ factory()->NewFunctionWithoutPrototype(name, code, true);
function->shared()->DontAdaptArguments();
// %ThrowTypeError% must not have a name property.
@@ -678,53 +633,67 @@ Handle<JSFunction> Genesis::GetStrictArgumentsPoisonFunction() {
}
-Handle<Map> Genesis::CreateStrictFunctionMap(
- FunctionMode function_mode, Handle<JSFunction> empty_function) {
- Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
- SetStrictFunctionInstanceDescriptor(map, function_mode);
- map->set_is_constructor(IsFunctionModeWithPrototype(function_mode));
- map->set_is_callable();
- Map::SetPrototype(map, empty_function);
- return map;
-}
-
-
void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
// Allocate map for the prototype-less strict mode instances.
Handle<Map> strict_function_without_prototype_map =
- CreateStrictFunctionMap(FUNCTION_WITHOUT_PROTOTYPE, empty);
+ factory()->CreateStrictFunctionMap(FUNCTION_WITHOUT_PROTOTYPE, empty);
native_context()->set_strict_function_without_prototype_map(
*strict_function_without_prototype_map);
// Allocate map for the strict mode functions. This map is temporary, used
// only for processing of builtins.
// Later the map is replaced with writable prototype map, allocated below.
- Handle<Map> strict_function_map =
- CreateStrictFunctionMap(FUNCTION_WITH_READONLY_PROTOTYPE, empty);
+ Handle<Map> strict_function_map = factory()->CreateStrictFunctionMap(
+ FUNCTION_WITH_READONLY_PROTOTYPE, empty);
native_context()->set_strict_function_map(*strict_function_map);
// The final map for the strict mode functions. Writeable prototype.
// This map is installed in MakeFunctionInstancePrototypeWritable.
- strict_function_map_writable_prototype_ =
- CreateStrictFunctionMap(FUNCTION_WITH_WRITEABLE_PROTOTYPE, empty);
-}
+ strict_function_map_writable_prototype_ = factory()->CreateStrictFunctionMap(
+ FUNCTION_WITH_WRITEABLE_PROTOTYPE, empty);
+ // Now that the strict mode function map is available, set up the
+ // restricted "arguments" and "caller" getters.
+ AddRestrictedFunctionProperties(empty);
+}
-void Genesis::CreateIteratorMaps() {
+void Genesis::CreateIteratorMaps(Handle<JSFunction> empty) {
// Create iterator-related meta-objects.
Handle<JSObject> iterator_prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
Handle<JSObject> generator_object_prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
+ native_context()->set_initial_generator_prototype(
+ *generator_object_prototype);
+ JSObject::ForceSetPrototype(generator_object_prototype, iterator_prototype);
Handle<JSObject> generator_function_prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
- SetObjectPrototype(generator_object_prototype, iterator_prototype);
+ JSObject::ForceSetPrototype(generator_function_prototype, empty);
+ JSObject::AddProperty(
+ generator_function_prototype, factory()->to_string_tag_symbol(),
+ factory()->NewStringFromAsciiChecked("GeneratorFunction"),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
JSObject::AddProperty(generator_function_prototype,
- factory()->InternalizeUtf8String("prototype"),
+ factory()->prototype_string(),
generator_object_prototype,
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ JSObject::AddProperty(generator_object_prototype,
+ factory()->constructor_string(),
+ generator_function_prototype,
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ JSObject::AddProperty(generator_object_prototype,
+ factory()->to_string_tag_symbol(),
+ factory()->NewStringFromAsciiChecked("Generator"),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ SimpleInstallFunction(generator_object_prototype, "next",
+ Builtins::kGeneratorPrototypeNext, 1, true);
+ SimpleInstallFunction(generator_object_prototype, "return",
+ Builtins::kGeneratorPrototypeReturn, 1, true);
+ SimpleInstallFunction(generator_object_prototype, "throw",
+ Builtins::kGeneratorPrototypeThrow, 1, true);
+
// Create maps for generator functions and their prototypes. Store those
// maps in the native context. The "prototype" property descriptor is
// writable, non-enumerable, and non-configurable (as per ES6 draft
@@ -754,6 +723,32 @@ void Genesis::CreateIteratorMaps() {
*generator_object_prototype_map);
}
+void Genesis::CreateAsyncFunctionMaps(Handle<JSFunction> empty) {
+ // %AsyncFunctionPrototype% intrinsic
+ Handle<JSObject> async_function_prototype =
+ factory()->NewJSObject(isolate()->object_function(), TENURED);
+ JSObject::ForceSetPrototype(async_function_prototype, empty);
+
+ JSObject::AddProperty(async_function_prototype,
+ factory()->to_string_tag_symbol(),
+ factory()->NewStringFromAsciiChecked("AsyncFunction"),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ Handle<Map> strict_function_map(
+ native_context()->strict_function_without_prototype_map());
+ Handle<Map> sloppy_async_function_map =
+ Map::Copy(strict_function_map, "SloppyAsyncFunction");
+ sloppy_async_function_map->set_is_constructor(false);
+ Map::SetPrototype(sloppy_async_function_map, async_function_prototype);
+ native_context()->set_sloppy_async_function_map(*sloppy_async_function_map);
+
+ Handle<Map> strict_async_function_map =
+ Map::Copy(strict_function_map, "StrictAsyncFunction");
+ strict_async_function_map->set_is_constructor(false);
+ Map::SetPrototype(strict_async_function_map, async_function_prototype);
+ native_context()->set_strict_async_function_map(*strict_async_function_map);
+}
+
void Genesis::CreateJSProxyMaps() {
// Allocate the different maps for all Proxy types.
// Next to the default proxy, we need maps indicating callable and
@@ -789,14 +784,14 @@ static void ReplaceAccessors(Handle<Map> map,
descriptors->Replace(idx, &descriptor);
}
-
-void Genesis::AddRestrictedFunctionProperties(Handle<Map> map) {
+void Genesis::AddRestrictedFunctionProperties(Handle<JSFunction> empty) {
PropertyAttributes rw_attribs = static_cast<PropertyAttributes>(DONT_ENUM);
Handle<JSFunction> thrower = GetRestrictedFunctionPropertiesThrower();
Handle<AccessorPair> accessors = factory()->NewAccessorPair();
accessors->set_getter(*thrower);
accessors->set_setter(*thrower);
+ Handle<Map> map(empty->map());
ReplaceAccessors(map, factory()->arguments_string(), rw_attribs, accessors);
ReplaceAccessors(map, factory()->caller_string(), rw_attribs, accessors);
}
@@ -804,14 +799,15 @@ void Genesis::AddRestrictedFunctionProperties(Handle<Map> map) {
static void AddToWeakNativeContextList(Context* context) {
DCHECK(context->IsNativeContext());
- Heap* heap = context->GetIsolate()->heap();
+ Isolate* isolate = context->GetIsolate();
+ Heap* heap = isolate->heap();
#ifdef DEBUG
{ // NOLINT
- DCHECK(context->get(Context::NEXT_CONTEXT_LINK)->IsUndefined());
+ DCHECK(context->next_context_link()->IsUndefined(isolate));
// Check that context is not in the list yet.
for (Object* current = heap->native_contexts_list();
- !current->IsUndefined();
- current = Context::cast(current)->get(Context::NEXT_CONTEXT_LINK)) {
+ !current->IsUndefined(isolate);
+ current = Context::cast(current)->next_context_link()) {
DCHECK(current != context);
}
}
@@ -833,8 +829,8 @@ void Genesis::CreateRoots() {
// Allocate the message listeners object.
{
- v8::NeanderArray listeners(isolate());
- native_context()->set_message_listeners(*listeners.value());
+ Handle<TemplateList> list = TemplateList::New(isolate(), 1);
+ native_context()->set_message_listeners(*list);
}
}
@@ -885,7 +881,7 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
FunctionTemplateInfo::cast(data->constructor()));
Handle<Object> proto_template(global_constructor->prototype_template(),
isolate());
- if (!proto_template->IsUndefined()) {
+ if (!proto_template->IsUndefined(isolate())) {
js_global_object_template =
Handle<ObjectTemplateInfo>::cast(proto_template);
}
@@ -955,7 +951,9 @@ void Genesis::HookUpGlobalProxy(Handle<JSGlobalObject> global_object,
global_proxy->set_native_context(*native_context());
// If we deserialized the context, the global proxy is already
// correctly set up. Otherwise it's undefined.
- DCHECK(native_context()->get(Context::GLOBAL_PROXY_INDEX)->IsUndefined() ||
+ DCHECK(native_context()
+ ->get(Context::GLOBAL_PROXY_INDEX)
+ ->IsUndefined(isolate()) ||
native_context()->global_proxy() == *global_proxy);
native_context()->set_global_proxy(*global_proxy);
}
@@ -971,54 +969,96 @@ void Genesis::HookUpGlobalObject(Handle<JSGlobalObject> global_object) {
TransferIndexedProperties(global_object_from_snapshot, global_object);
}
+static void InstallWithIntrinsicDefaultProto(Isolate* isolate,
+ Handle<JSFunction> function,
+ int context_index) {
+ Handle<Smi> index(Smi::FromInt(context_index), isolate);
+ JSObject::AddProperty(
+ function, isolate->factory()->native_context_index_symbol(), index, NONE);
+ isolate->native_context()->set(context_index, *function);
+}
-static Handle<JSFunction> SimpleCreateFunction(Isolate* isolate,
- Handle<String> name,
- Builtins::Name call, int len,
- bool adapt) {
- Handle<JSFunction> fun =
- CreateFunction(isolate, name, JS_OBJECT_TYPE, JSObject::kHeaderSize,
- MaybeHandle<JSObject>(), call);
- if (adapt) {
- fun->shared()->set_internal_formal_parameter_count(len);
- } else {
- fun->shared()->DontAdaptArguments();
+static void InstallError(Isolate* isolate, Handle<JSObject> global,
+ Handle<String> name, int context_index) {
+ Factory* factory = isolate->factory();
+
+ Handle<JSFunction> error_fun =
+ InstallFunction(global, name, JS_ERROR_TYPE, JSObject::kHeaderSize,
+ isolate->initial_object_prototype(),
+ Builtins::kErrorConstructor, DONT_ENUM);
+ error_fun->shared()->set_instance_class_name(*factory->Error_string());
+ error_fun->shared()->DontAdaptArguments();
+ error_fun->shared()->set_construct_stub(
+ *isolate->builtins()->ErrorConstructor());
+ error_fun->shared()->set_length(1);
+ error_fun->shared()->set_native(true);
+
+ if (context_index == Context::ERROR_FUNCTION_INDEX) {
+ Handle<JSFunction> capture_stack_trace_fun =
+ SimpleInstallFunction(error_fun, "captureStackTrace",
+ Builtins::kErrorCaptureStackTrace, 2, false);
+ capture_stack_trace_fun->shared()->set_native(true);
}
- fun->shared()->set_length(len);
- return fun;
-}
+ InstallWithIntrinsicDefaultProto(isolate, error_fun, context_index);
-static Handle<JSFunction> SimpleInstallFunction(Handle<JSObject> base,
- Handle<String> name,
- Builtins::Name call, int len,
- bool adapt) {
- Handle<JSFunction> fun =
- SimpleCreateFunction(base->GetIsolate(), name, call, len, adapt);
- InstallFunction(base, fun, name, DONT_ENUM);
- return fun;
-}
+ {
+ Handle<JSObject> prototype =
+ factory->NewJSObject(isolate->object_function(), TENURED);
+ JSObject::AddProperty(prototype, factory->name_string(), name, DONT_ENUM);
+ JSObject::AddProperty(prototype, factory->message_string(),
+ factory->empty_string(), DONT_ENUM);
+ JSObject::AddProperty(prototype, factory->constructor_string(), error_fun,
+ DONT_ENUM);
-static Handle<JSFunction> SimpleInstallFunction(Handle<JSObject> base,
- const char* name,
- Builtins::Name call, int len,
- bool adapt) {
- Factory* const factory = base->GetIsolate()->factory();
- return SimpleInstallFunction(base, factory->InternalizeUtf8String(name), call,
- len, adapt);
-}
+ if (context_index == Context::ERROR_FUNCTION_INDEX) {
+ Handle<JSFunction> to_string_fun =
+ SimpleInstallFunction(prototype, factory->toString_string(),
+ Builtins::kErrorPrototypeToString, 0, true);
+ to_string_fun->shared()->set_native(true);
+ isolate->native_context()->set_error_to_string(*to_string_fun);
+ } else {
+ DCHECK(context_index != Context::ERROR_FUNCTION_INDEX);
+ DCHECK(isolate->native_context()->error_to_string()->IsJSFunction());
+
+ InstallFunction(prototype, isolate->error_to_string(),
+ factory->toString_string(), DONT_ENUM);
+
+ Handle<JSFunction> global_error = isolate->error_function();
+ CHECK(JSReceiver::SetPrototype(error_fun, global_error, false,
+ Object::THROW_ON_ERROR)
+ .FromMaybe(false));
+ CHECK(JSReceiver::SetPrototype(prototype,
+ handle(global_error->prototype(), isolate),
+ false, Object::THROW_ON_ERROR)
+ .FromMaybe(false));
+ }
+ Accessors::FunctionSetPrototype(error_fun, prototype).Assert();
+ }
-static void InstallWithIntrinsicDefaultProto(Isolate* isolate,
- Handle<JSFunction> function,
- int context_index) {
- Handle<Smi> index(Smi::FromInt(context_index), isolate);
- JSObject::AddProperty(
- function, isolate->factory()->native_context_index_symbol(), index, NONE);
- isolate->native_context()->set(context_index, *function);
+ Handle<Map> initial_map(error_fun->initial_map());
+ Map::EnsureDescriptorSlack(initial_map, 1);
+
+ PropertyAttributes attribs = DONT_ENUM;
+ Handle<AccessorInfo> error_stack =
+ Accessors::ErrorStackInfo(isolate, attribs);
+ {
+ AccessorConstantDescriptor d(Handle<Name>(Name::cast(error_stack->name())),
+ error_stack, attribs);
+ initial_map->AppendDescriptor(&d);
+ }
}
+static void InstallMakeError(Isolate* isolate, Handle<Code> code,
+ int context_index) {
+ Handle<JSFunction> function =
+ isolate->factory()->NewFunction(isolate->factory()->empty_string(), code,
+ JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ function->shared()->DontAdaptArguments();
+ isolate->native_context()->set(context_index, *function);
+}
// This is only called if we are not using snapshots. The equivalent
// work in the snapshot case is done in HookUpGlobalObject.
@@ -1049,40 +1089,77 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> object_function = isolate->object_function();
JSObject::AddProperty(global_object, object_name, object_function,
DONT_ENUM);
+
SimpleInstallFunction(object_function, factory->assign_string(),
Builtins::kObjectAssign, 2, false);
SimpleInstallFunction(object_function, factory->create_string(),
Builtins::kObjectCreate, 2, false);
- Handle<JSFunction> object_freeze = SimpleInstallFunction(
- object_function, "freeze", Builtins::kObjectFreeze, 1, false);
- native_context()->set_object_freeze(*object_freeze);
SimpleInstallFunction(object_function, "getOwnPropertyDescriptor",
Builtins::kObjectGetOwnPropertyDescriptor, 2, false);
SimpleInstallFunction(object_function, "getOwnPropertyNames",
Builtins::kObjectGetOwnPropertyNames, 1, false);
SimpleInstallFunction(object_function, "getOwnPropertySymbols",
Builtins::kObjectGetOwnPropertySymbols, 1, false);
- SimpleInstallFunction(object_function, "is", Builtins::kObjectIs, 2, true);
- Handle<JSFunction> object_is_extensible =
- SimpleInstallFunction(object_function, "isExtensible",
- Builtins::kObjectIsExtensible, 1, false);
+ SimpleInstallFunction(object_function, "is",
+ Builtins::kObjectIs, 2, true);
+ SimpleInstallFunction(object_function, "preventExtensions",
+ Builtins::kObjectPreventExtensions, 1, false);
+ SimpleInstallFunction(object_function, "seal",
+ Builtins::kObjectSeal, 1, false);
+
+ Handle<JSFunction> object_define_properties = SimpleInstallFunction(
+ object_function, "defineProperties",
+ Builtins::kObjectDefineProperties, 2, true);
+ native_context()->set_object_define_properties(*object_define_properties);
+
+ Handle<JSFunction> object_define_property = SimpleInstallFunction(
+ object_function, factory->defineProperty_string(),
+ Builtins::kObjectDefineProperty, 3, true);
+ native_context()->set_object_define_property(*object_define_property);
+
+ Handle<JSFunction> object_freeze = SimpleInstallFunction(
+ object_function, "freeze", Builtins::kObjectFreeze, 1, false);
+ native_context()->set_object_freeze(*object_freeze);
+
+ Handle<JSFunction> object_get_prototype_of = SimpleInstallFunction(
+ object_function, "getPrototypeOf", Builtins::kObjectGetPrototypeOf,
+ 1, false);
+ native_context()->set_object_get_prototype_of(*object_get_prototype_of);
+
+ Handle<JSFunction> object_is_extensible = SimpleInstallFunction(
+ object_function, "isExtensible", Builtins::kObjectIsExtensible,
+ 1, false);
native_context()->set_object_is_extensible(*object_is_extensible);
+
Handle<JSFunction> object_is_frozen = SimpleInstallFunction(
object_function, "isFrozen", Builtins::kObjectIsFrozen, 1, false);
native_context()->set_object_is_frozen(*object_is_frozen);
+
Handle<JSFunction> object_is_sealed = SimpleInstallFunction(
object_function, "isSealed", Builtins::kObjectIsSealed, 1, false);
native_context()->set_object_is_sealed(*object_is_sealed);
+
Handle<JSFunction> object_keys = SimpleInstallFunction(
object_function, "keys", Builtins::kObjectKeys, 1, false);
native_context()->set_object_keys(*object_keys);
- SimpleInstallFunction(object_function, "preventExtensions",
- Builtins::kObjectPreventExtensions, 1, false);
- SimpleInstallFunction(object_function, "seal", Builtins::kObjectSeal, 1,
- false);
+ SimpleInstallFunction(isolate->initial_object_prototype(),
+ "__defineGetter__", Builtins::kObjectDefineGetter, 2,
+ true);
+ SimpleInstallFunction(isolate->initial_object_prototype(),
+ "__defineSetter__", Builtins::kObjectDefineSetter, 2,
+ true);
SimpleInstallFunction(isolate->initial_object_prototype(), "hasOwnProperty",
Builtins::kObjectHasOwnProperty, 1, true);
+ SimpleInstallFunction(isolate->initial_object_prototype(),
+ "__lookupGetter__", Builtins::kObjectLookupGetter, 1,
+ true);
+ SimpleInstallFunction(isolate->initial_object_prototype(),
+ "__lookupSetter__", Builtins::kObjectLookupSetter, 1,
+ true);
+ SimpleInstallFunction(
+ isolate->initial_object_prototype(), "propertyIsEnumerable",
+ Builtins::kObjectPrototypePropertyIsEnumerable, 1, false);
}
Handle<JSObject> global(native_context()->global_object());
@@ -1095,7 +1172,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
function_fun->set_prototype_or_initial_map(
*sloppy_function_map_writable_prototype_);
function_fun->shared()->DontAdaptArguments();
- function_fun->shared()->set_construct_stub(
+ function_fun->shared()->SetConstructStub(
*isolate->builtins()->FunctionConstructor());
function_fun->shared()->set_length(1);
InstallWithIntrinsicDefaultProto(isolate, function_fun,
@@ -1104,8 +1181,20 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Setup the methods on the %FunctionPrototype%.
SimpleInstallFunction(prototype, factory->apply_string(),
Builtins::kFunctionPrototypeApply, 2, false);
- SimpleInstallFunction(prototype, factory->bind_string(),
- Builtins::kFunctionPrototypeBind, 1, false);
+
+ if (FLAG_minimal) {
+ SimpleInstallFunction(prototype, factory->bind_string(),
+ Builtins::kFunctionPrototypeBind, 1, false);
+ } else {
+ FastFunctionBindStub bind_stub(isolate);
+ Handle<JSFunction> bind_function = factory->NewFunctionWithoutPrototype(
+ factory->bind_string(), bind_stub.GetCode(), false);
+ bind_function->shared()->DontAdaptArguments();
+ bind_function->shared()->set_length(1);
+ InstallFunction(prototype, bind_function, factory->bind_string(),
+ DONT_ENUM);
+ }
+
SimpleInstallFunction(prototype, factory->call_string(),
Builtins::kFunctionPrototypeCall, 1, false);
SimpleInstallFunction(prototype, factory->toString_string(),
@@ -1115,7 +1204,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> has_instance = InstallFunction(
prototype, factory->has_instance_symbol(), JS_OBJECT_TYPE,
JSObject::kHeaderSize, MaybeHandle<JSObject>(),
- Builtins::kFunctionHasInstance,
+ Builtins::kFunctionPrototypeHasInstance,
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY));
// Set the expected parameters for @@hasInstance to 1; required by builtin.
@@ -1124,9 +1213,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Set the length for the function to satisfy ECMA-262.
has_instance->shared()->set_length(1);
- // Install in the native context
- native_context()->set_ordinary_has_instance(*has_instance);
-
// Install the "constructor" property on the %FunctionPrototype%.
JSObject::AddProperty(prototype, factory->constructor_string(),
function_fun, DONT_ENUM);
@@ -1173,7 +1259,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
CacheInitialJSArrayMaps(native_context(), initial_map);
ArrayConstructorStub array_constructor_stub(isolate);
Handle<Code> code = array_constructor_stub.GetCode();
- array_function->shared()->set_construct_stub(*code);
+ array_function->shared()->SetConstructStub(*code);
Handle<JSFunction> is_arraylike = SimpleInstallFunction(
array_function, isolate->factory()->InternalizeUtf8String("isArray"),
@@ -1186,11 +1272,37 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
global, "Number", JS_VALUE_TYPE, JSValue::kSize,
isolate->initial_object_prototype(), Builtins::kNumberConstructor);
number_fun->shared()->DontAdaptArguments();
- number_fun->shared()->set_construct_stub(
+ number_fun->shared()->SetConstructStub(
*isolate->builtins()->NumberConstructor_ConstructStub());
number_fun->shared()->set_length(1);
InstallWithIntrinsicDefaultProto(isolate, number_fun,
Context::NUMBER_FUNCTION_INDEX);
+
+ // Create the %NumberPrototype%
+ Handle<JSValue> prototype =
+ Handle<JSValue>::cast(factory->NewJSObject(number_fun, TENURED));
+ prototype->set_value(Smi::FromInt(0));
+ Accessors::FunctionSetPrototype(number_fun, prototype).Assert();
+
+ // Install the "constructor" property on the {prototype}.
+ JSObject::AddProperty(prototype, factory->constructor_string(), number_fun,
+ DONT_ENUM);
+
+ // Install the Number.prototype methods.
+ SimpleInstallFunction(prototype, "toExponential",
+ Builtins::kNumberPrototypeToExponential, 1, false);
+ SimpleInstallFunction(prototype, "toFixed",
+ Builtins::kNumberPrototypeToFixed, 1, false);
+ SimpleInstallFunction(prototype, "toPrecision",
+ Builtins::kNumberPrototypeToPrecision, 1, false);
+ SimpleInstallFunction(prototype, "toString",
+ Builtins::kNumberPrototypeToString, 1, false);
+ SimpleInstallFunction(prototype, "valueOf",
+ Builtins::kNumberPrototypeValueOf, 0, true);
+
+ // Install i18n fallback functions.
+ SimpleInstallFunction(prototype, "toLocaleString",
+ Builtins::kNumberPrototypeToLocaleString, 0, false);
}
{ // --- B o o l e a n ---
@@ -1199,7 +1311,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate->initial_object_prototype(),
Builtins::kBooleanConstructor);
boolean_fun->shared()->DontAdaptArguments();
- boolean_fun->shared()->set_construct_stub(
+ boolean_fun->shared()->SetConstructStub(
*isolate->builtins()->BooleanConstructor_ConstructStub());
boolean_fun->shared()->set_length(1);
InstallWithIntrinsicDefaultProto(isolate, boolean_fun,
@@ -1217,16 +1329,16 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Install the Boolean.prototype methods.
SimpleInstallFunction(prototype, "toString",
- Builtins::kBooleanPrototypeToString, 0, false);
+ Builtins::kBooleanPrototypeToString, 0, true);
SimpleInstallFunction(prototype, "valueOf",
- Builtins::kBooleanPrototypeValueOf, 0, false);
+ Builtins::kBooleanPrototypeValueOf, 0, true);
}
{ // --- S t r i n g ---
Handle<JSFunction> string_fun = InstallFunction(
global, "String", JS_VALUE_TYPE, JSValue::kSize,
isolate->initial_object_prototype(), Builtins::kStringConstructor);
- string_fun->shared()->set_construct_stub(
+ string_fun->shared()->SetConstructStub(
*isolate->builtins()->StringConstructor_ConstructStub());
string_fun->shared()->DontAdaptArguments();
string_fun->shared()->set_length(1);
@@ -1251,7 +1363,37 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Install the String.fromCharCode function.
SimpleInstallFunction(string_fun, "fromCharCode",
- Builtins::kStringFromCharCode, 1, false);
+ Builtins::kStringFromCharCode, 1, true);
+
+ // Install the String.fromCodePoint function.
+ SimpleInstallFunction(string_fun, "fromCodePoint",
+ Builtins::kStringFromCodePoint, 1, false);
+
+ // Create the %StringPrototype%
+ Handle<JSValue> prototype =
+ Handle<JSValue>::cast(factory->NewJSObject(string_fun, TENURED));
+ prototype->set_value(isolate->heap()->empty_string());
+ Accessors::FunctionSetPrototype(string_fun, prototype).Assert();
+
+ // Install the "constructor" property on the {prototype}.
+ JSObject::AddProperty(prototype, factory->constructor_string(), string_fun,
+ DONT_ENUM);
+
+ // Install the String.prototype methods.
+ SimpleInstallFunction(prototype, "charAt", Builtins::kStringPrototypeCharAt,
+ 1, true);
+ SimpleInstallFunction(prototype, "charCodeAt",
+ Builtins::kStringPrototypeCharCodeAt, 1, true);
+ SimpleInstallFunction(prototype, "toString",
+ Builtins::kStringPrototypeToString, 0, true);
+ SimpleInstallFunction(prototype, "trim", Builtins::kStringPrototypeTrim, 0,
+ false);
+ SimpleInstallFunction(prototype, "trimLeft",
+ Builtins::kStringPrototypeTrimLeft, 0, false);
+ SimpleInstallFunction(prototype, "trimRight",
+ Builtins::kStringPrototypeTrimRight, 0, false);
+ SimpleInstallFunction(prototype, "valueOf",
+ Builtins::kStringPrototypeValueOf, 0, true);
}
{
@@ -1261,15 +1403,40 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> symbol_fun =
InstallFunction(global, "Symbol", JS_VALUE_TYPE, JSValue::kSize,
prototype, Builtins::kSymbolConstructor);
- symbol_fun->shared()->set_construct_stub(
+ symbol_fun->shared()->SetConstructStub(
*isolate->builtins()->SymbolConstructor_ConstructStub());
symbol_fun->shared()->set_length(0);
symbol_fun->shared()->DontAdaptArguments();
native_context()->set_symbol_function(*symbol_fun);
+ // Install the @@toStringTag property on the {prototype}.
+ JSObject::AddProperty(
+ prototype, factory->to_string_tag_symbol(),
+ factory->NewStringFromAsciiChecked("Symbol"),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
// Install the "constructor" property on the {prototype}.
JSObject::AddProperty(prototype, factory->constructor_string(), symbol_fun,
DONT_ENUM);
+
+ // Install the Symbol.prototype methods.
+ SimpleInstallFunction(prototype, "toString",
+ Builtins::kSymbolPrototypeToString, 0, true);
+ SimpleInstallFunction(prototype, "valueOf",
+ Builtins::kSymbolPrototypeValueOf, 0, true);
+
+ // Install the @@toPrimitive function.
+ Handle<JSFunction> to_primitive = InstallFunction(
+ prototype, factory->to_primitive_symbol(), JS_OBJECT_TYPE,
+ JSObject::kHeaderSize, MaybeHandle<JSObject>(),
+ Builtins::kSymbolPrototypeToPrimitive,
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ // Set the expected parameters for @@toPrimitive to 1; required by builtin.
+ to_primitive->shared()->set_internal_formal_parameter_count(1);
+
+ // Set the length for the function to satisfy ECMA-262.
+ to_primitive->shared()->set_length(1);
}
{ // --- D a t e ---
@@ -1281,7 +1448,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kDateConstructor);
InstallWithIntrinsicDefaultProto(isolate, date_fun,
Context::DATE_FUNCTION_INDEX);
- date_fun->shared()->set_construct_stub(
+ date_fun->shared()->SetConstructStub(
*isolate->builtins()->DateConstructor_ConstructStub());
date_fun->shared()->set_length(7);
date_fun->shared()->DontAdaptArguments();
@@ -1381,6 +1548,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
0, true);
SimpleInstallFunction(prototype, "setYear", Builtins::kDatePrototypeSetYear,
1, false);
+ SimpleInstallFunction(prototype, "toJSON", Builtins::kDatePrototypeToJson,
+ 1, false);
// Install i18n fallback functions.
SimpleInstallFunction(prototype, "toLocaleString",
@@ -1412,7 +1581,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kIllegal);
InstallWithIntrinsicDefaultProto(isolate, regexp_fun,
Context::REGEXP_FUNCTION_INDEX);
- regexp_fun->shared()->set_construct_stub(
+ regexp_fun->shared()->SetConstructStub(
*isolate->builtins()->JSBuiltinsConstructStub());
DCHECK(regexp_fun->has_initial_map());
@@ -1438,76 +1607,69 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // -- E r r o r
- Handle<JSFunction> error_fun = InstallFunction(
- global, "Error", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- isolate->initial_object_prototype(), Builtins::kIllegal);
- InstallWithIntrinsicDefaultProto(isolate, error_fun,
- Context::ERROR_FUNCTION_INDEX);
+ InstallError(isolate, global, factory->Error_string(),
+ Context::ERROR_FUNCTION_INDEX);
+ InstallMakeError(isolate, isolate->builtins()->MakeError(),
+ Context::MAKE_ERROR_INDEX);
}
{ // -- E v a l E r r o r
- Handle<JSFunction> eval_error_fun = InstallFunction(
- global, "EvalError", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- isolate->initial_object_prototype(), Builtins::kIllegal);
- InstallWithIntrinsicDefaultProto(isolate, eval_error_fun,
- Context::EVAL_ERROR_FUNCTION_INDEX);
+ InstallError(isolate, global, factory->EvalError_string(),
+ Context::EVAL_ERROR_FUNCTION_INDEX);
}
{ // -- R a n g e E r r o r
- Handle<JSFunction> range_error_fun = InstallFunction(
- global, "RangeError", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- isolate->initial_object_prototype(), Builtins::kIllegal);
- InstallWithIntrinsicDefaultProto(isolate, range_error_fun,
- Context::RANGE_ERROR_FUNCTION_INDEX);
+ InstallError(isolate, global, factory->RangeError_string(),
+ Context::RANGE_ERROR_FUNCTION_INDEX);
+ InstallMakeError(isolate, isolate->builtins()->MakeRangeError(),
+ Context::MAKE_RANGE_ERROR_INDEX);
}
{ // -- R e f e r e n c e E r r o r
- Handle<JSFunction> reference_error_fun = InstallFunction(
- global, "ReferenceError", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- isolate->initial_object_prototype(), Builtins::kIllegal);
- InstallWithIntrinsicDefaultProto(isolate, reference_error_fun,
- Context::REFERENCE_ERROR_FUNCTION_INDEX);
+ InstallError(isolate, global, factory->ReferenceError_string(),
+ Context::REFERENCE_ERROR_FUNCTION_INDEX);
}
{ // -- S y n t a x E r r o r
- Handle<JSFunction> syntax_error_fun = InstallFunction(
- global, "SyntaxError", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- isolate->initial_object_prototype(), Builtins::kIllegal);
- InstallWithIntrinsicDefaultProto(isolate, syntax_error_fun,
- Context::SYNTAX_ERROR_FUNCTION_INDEX);
+ InstallError(isolate, global, factory->SyntaxError_string(),
+ Context::SYNTAX_ERROR_FUNCTION_INDEX);
+ InstallMakeError(isolate, isolate->builtins()->MakeSyntaxError(),
+ Context::MAKE_SYNTAX_ERROR_INDEX);
}
{ // -- T y p e E r r o r
- Handle<JSFunction> type_error_fun = InstallFunction(
- global, "TypeError", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- isolate->initial_object_prototype(), Builtins::kIllegal);
- InstallWithIntrinsicDefaultProto(isolate, type_error_fun,
- Context::TYPE_ERROR_FUNCTION_INDEX);
+ InstallError(isolate, global, factory->TypeError_string(),
+ Context::TYPE_ERROR_FUNCTION_INDEX);
+ InstallMakeError(isolate, isolate->builtins()->MakeTypeError(),
+ Context::MAKE_TYPE_ERROR_INDEX);
}
{ // -- U R I E r r o r
- Handle<JSFunction> uri_error_fun = InstallFunction(
- global, "URIError", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- isolate->initial_object_prototype(), Builtins::kIllegal);
- InstallWithIntrinsicDefaultProto(isolate, uri_error_fun,
- Context::URI_ERROR_FUNCTION_INDEX);
+ InstallError(isolate, global, factory->URIError_string(),
+ Context::URI_ERROR_FUNCTION_INDEX);
+ InstallMakeError(isolate, isolate->builtins()->MakeURIError(),
+ Context::MAKE_URI_ERROR_INDEX);
}
// Initialize the embedder data slot.
Handle<FixedArray> embedder_data = factory->NewFixedArray(3);
native_context()->set_embedder_data(*embedder_data);
- if (context_type == THIN_CONTEXT) return;
-
{ // -- J S O N
Handle<String> name = factory->InternalizeUtf8String("JSON");
Handle<JSFunction> cons = factory->NewFunction(name);
JSFunction::SetInstancePrototype(cons,
Handle<Object>(native_context()->initial_object_prototype(), isolate));
- cons->shared()->set_instance_class_name(*name);
Handle<JSObject> json_object = factory->NewJSObject(cons, TENURED);
DCHECK(json_object->IsJSObject());
JSObject::AddProperty(global, name, json_object, DONT_ENUM);
+ SimpleInstallFunction(json_object, "parse", Builtins::kJsonParse, 2, false);
+ SimpleInstallFunction(json_object, "stringify", Builtins::kJsonStringify, 3,
+ true);
+ JSObject::AddProperty(
+ json_object, factory->to_string_tag_symbol(),
+ factory->NewStringFromAsciiChecked("JSON"),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
}
{ // -- M a t h
@@ -1516,36 +1678,121 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
JSFunction::SetInstancePrototype(
cons,
Handle<Object>(native_context()->initial_object_prototype(), isolate));
- cons->shared()->set_instance_class_name(*name);
Handle<JSObject> math = factory->NewJSObject(cons, TENURED);
DCHECK(math->IsJSObject());
JSObject::AddProperty(global, name, math, DONT_ENUM);
+ SimpleInstallFunction(math, "abs", Builtins::kMathAbs, 1, true);
SimpleInstallFunction(math, "acos", Builtins::kMathAcos, 1, true);
+ SimpleInstallFunction(math, "acosh", Builtins::kMathAcosh, 1, true);
SimpleInstallFunction(math, "asin", Builtins::kMathAsin, 1, true);
+ SimpleInstallFunction(math, "asinh", Builtins::kMathAsinh, 1, true);
SimpleInstallFunction(math, "atan", Builtins::kMathAtan, 1, true);
+ SimpleInstallFunction(math, "atanh", Builtins::kMathAtanh, 1, true);
+ SimpleInstallFunction(math, "atan2", Builtins::kMathAtan2, 2, true);
SimpleInstallFunction(math, "ceil", Builtins::kMathCeil, 1, true);
+ SimpleInstallFunction(math, "cbrt", Builtins::kMathCbrt, 1, true);
+ SimpleInstallFunction(math, "expm1", Builtins::kMathExpm1, 1, true);
SimpleInstallFunction(math, "clz32", Builtins::kMathClz32, 1, true);
+ SimpleInstallFunction(math, "cos", Builtins::kMathCos, 1, true);
+ SimpleInstallFunction(math, "cosh", Builtins::kMathCosh, 1, true);
+ SimpleInstallFunction(math, "exp", Builtins::kMathExp, 1, true);
Handle<JSFunction> math_floor =
SimpleInstallFunction(math, "floor", Builtins::kMathFloor, 1, true);
native_context()->set_math_floor(*math_floor);
SimpleInstallFunction(math, "fround", Builtins::kMathFround, 1, true);
+ SimpleInstallFunction(math, "hypot", Builtins::kMathHypot, 2, false);
SimpleInstallFunction(math, "imul", Builtins::kMathImul, 2, true);
+ SimpleInstallFunction(math, "log", Builtins::kMathLog, 1, true);
+ SimpleInstallFunction(math, "log1p", Builtins::kMathLog1p, 1, true);
+ SimpleInstallFunction(math, "log2", Builtins::kMathLog2, 1, true);
+ SimpleInstallFunction(math, "log10", Builtins::kMathLog10, 1, true);
SimpleInstallFunction(math, "max", Builtins::kMathMax, 2, false);
SimpleInstallFunction(math, "min", Builtins::kMathMin, 2, false);
+ Handle<JSFunction> math_pow =
+ SimpleInstallFunction(math, "pow", Builtins::kMathPow, 2, true);
+ native_context()->set_math_pow(*math_pow);
SimpleInstallFunction(math, "round", Builtins::kMathRound, 1, true);
- Handle<JSFunction> math_sqrt =
- SimpleInstallFunction(math, "sqrt", Builtins::kMathSqrt, 1, true);
- native_context()->set_math_sqrt(*math_sqrt);
+ SimpleInstallFunction(math, "sign", Builtins::kMathSign, 1, true);
+ SimpleInstallFunction(math, "sin", Builtins::kMathSin, 1, true);
+ SimpleInstallFunction(math, "sinh", Builtins::kMathSinh, 1, true);
+ SimpleInstallFunction(math, "sqrt", Builtins::kMathSqrt, 1, true);
+ SimpleInstallFunction(math, "tan", Builtins::kMathTan, 1, true);
+ SimpleInstallFunction(math, "tanh", Builtins::kMathTanh, 1, true);
SimpleInstallFunction(math, "trunc", Builtins::kMathTrunc, 1, true);
+
+ // Install math constants.
+ double const kE = base::ieee754::exp(1.0);
+ double const kPI = 3.1415926535897932;
+ JSObject::AddProperty(
+ math, factory->NewStringFromAsciiChecked("E"), factory->NewNumber(kE),
+ static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
+ JSObject::AddProperty(
+ math, factory->NewStringFromAsciiChecked("LN10"),
+ factory->NewNumber(base::ieee754::log(10.0)),
+ static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
+ JSObject::AddProperty(
+ math, factory->NewStringFromAsciiChecked("LN2"),
+ factory->NewNumber(base::ieee754::log(2.0)),
+ static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
+ JSObject::AddProperty(
+ math, factory->NewStringFromAsciiChecked("LOG10E"),
+ factory->NewNumber(base::ieee754::log10(kE)),
+ static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
+ JSObject::AddProperty(
+ math, factory->NewStringFromAsciiChecked("LOG2E"),
+ factory->NewNumber(base::ieee754::log2(kE)),
+ static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
+ JSObject::AddProperty(
+ math, factory->NewStringFromAsciiChecked("PI"), factory->NewNumber(kPI),
+ static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
+ JSObject::AddProperty(
+ math, factory->NewStringFromAsciiChecked("SQRT1_2"),
+ factory->NewNumber(std::sqrt(0.5)),
+ static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
+ JSObject::AddProperty(
+ math, factory->NewStringFromAsciiChecked("SQRT2"),
+ factory->NewNumber(std::sqrt(2.0)),
+ static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
}
{ // -- A r r a y B u f f e r
- Handle<JSFunction> array_buffer_fun =
- InstallArrayBuffer(global, "ArrayBuffer");
+ Handle<JSFunction> array_buffer_fun = InstallArrayBuffer(
+ global, "ArrayBuffer", Builtins::kArrayBufferPrototypeGetByteLength,
+ BuiltinFunctionId::kArrayBufferByteLength);
InstallWithIntrinsicDefaultProto(isolate, array_buffer_fun,
Context::ARRAY_BUFFER_FUN_INDEX);
}
+ { // -- T y p e d A r r a y
+ Handle<JSObject> prototype =
+ factory->NewJSObject(isolate->object_function(), TENURED);
+ native_context()->set_typed_array_prototype(*prototype);
+
+ Handle<JSFunction> typed_array_fun =
+ CreateFunction(isolate, factory->InternalizeUtf8String("TypedArray"),
+ JS_TYPED_ARRAY_TYPE, JSTypedArray::kSize, prototype,
+ Builtins::kIllegal);
+
+ // Install the "constructor" property on the {prototype}.
+ JSObject::AddProperty(prototype, factory->constructor_string(),
+ typed_array_fun, DONT_ENUM);
+ native_context()->set_typed_array_function(*typed_array_fun);
+
+ // Install the "buffer", "byteOffset", "byteLength" and "length"
+ // getters on the {prototype}.
+ SimpleInstallGetter(prototype, factory->buffer_string(),
+ Builtins::kTypedArrayPrototypeBuffer, false);
+ SimpleInstallGetter(prototype, factory->byte_length_string(),
+ Builtins::kTypedArrayPrototypeByteLength, true,
+ kTypedArrayByteLength);
+ SimpleInstallGetter(prototype, factory->byte_offset_string(),
+ Builtins::kTypedArrayPrototypeByteOffset, true,
+ kTypedArrayByteOffset);
+ SimpleInstallGetter(prototype, factory->length_string(),
+ Builtins::kTypedArrayPrototypeLength, true,
+ kTypedArrayLength);
+ }
+
{ // -- T y p e d A r r a y s
#define INSTALL_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
{ \
@@ -1556,17 +1803,43 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
TYPED_ARRAYS(INSTALL_TYPED_ARRAY)
#undef INSTALL_TYPED_ARRAY
+ }
- Handle<JSFunction> data_view_fun = InstallFunction(
- global, "DataView", JS_DATA_VIEW_TYPE,
- JSDataView::kSizeWithInternalFields,
- isolate->initial_object_prototype(), Builtins::kDataViewConstructor);
+ { // -- D a t a V i e w
+ Handle<JSObject> prototype =
+ factory->NewJSObject(isolate->object_function(), TENURED);
+ Handle<JSFunction> data_view_fun =
+ InstallFunction(global, "DataView", JS_DATA_VIEW_TYPE,
+ JSDataView::kSizeWithInternalFields, prototype,
+ Builtins::kDataViewConstructor);
InstallWithIntrinsicDefaultProto(isolate, data_view_fun,
Context::DATA_VIEW_FUN_INDEX);
- data_view_fun->shared()->set_construct_stub(
+ data_view_fun->shared()->SetConstructStub(
*isolate->builtins()->DataViewConstructor_ConstructStub());
data_view_fun->shared()->set_length(3);
data_view_fun->shared()->DontAdaptArguments();
+
+ // Install the @@toStringTag property on the {prototype}.
+ JSObject::AddProperty(
+ prototype, factory->to_string_tag_symbol(),
+ factory->NewStringFromAsciiChecked("DataView"),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ // Install the "constructor" property on the {prototype}.
+ JSObject::AddProperty(prototype, factory->constructor_string(),
+ data_view_fun, DONT_ENUM);
+
+ // Install the "buffer", "byteOffset" and "byteLength" getters
+ // on the {prototype}.
+ SimpleInstallGetter(prototype, factory->buffer_string(),
+ Builtins::kDataViewPrototypeGetBuffer, false,
+ kDataViewBuffer);
+ SimpleInstallGetter(prototype, factory->byte_length_string(),
+ Builtins::kDataViewPrototypeGetByteLength, false,
+ kDataViewByteLength);
+ SimpleInstallGetter(prototype, factory->byte_offset_string(),
+ Builtins::kDataViewPrototypeGetByteOffset, false,
+ kDataViewByteOffset);
}
{ // -- M a p
@@ -1638,7 +1911,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
proxy_function, Handle<Map>(native_context()->proxy_map(), isolate),
factory->null_value());
- proxy_function->shared()->set_construct_stub(
+ proxy_function->shared()->SetConstructStub(
*isolate->builtins()->ProxyConstructor_ConstructStub());
proxy_function->shared()->set_internal_formal_parameter_count(2);
proxy_function->shared()->set_length(2);
@@ -1702,18 +1975,21 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
Map::EnsureDescriptorSlack(map, 2);
+ Handle<AccessorInfo> bound_length =
+ Accessors::BoundFunctionLengthInfo(isolate, roc_attribs);
{ // length
- DataDescriptor d(factory->length_string(), JSBoundFunction::kLengthIndex,
- roc_attribs, Representation::Tagged());
+ AccessorConstantDescriptor d(factory->length_string(), bound_length,
+ roc_attribs);
map->AppendDescriptor(&d);
}
- { // name
- DataDescriptor d(factory->name_string(), JSBoundFunction::kNameIndex,
- roc_attribs, Representation::Tagged());
+ Handle<AccessorInfo> bound_name =
+ Accessors::BoundFunctionNameInfo(isolate, roc_attribs);
+ { // length
+ AccessorConstantDescriptor d(factory->name_string(), bound_name,
+ roc_attribs);
map->AppendDescriptor(&d);
}
-
- map->SetInObjectProperties(2);
+ map->SetInObjectProperties(0);
native_context()->set_bound_function_without_constructor_map(*map);
map = Map::Copy(map, "IsConstructor");
@@ -1732,7 +2008,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
function->shared()->set_instance_class_name(*arguments_string);
Handle<Map> map = factory->NewMap(
- JS_OBJECT_TYPE, JSSloppyArgumentsObject::kSize, FAST_ELEMENTS);
+ JS_ARGUMENTS_TYPE, JSSloppyArgumentsObject::kSize, FAST_ELEMENTS);
// Create the descriptor array for the arguments object.
Map::EnsureDescriptorSlack(map, 2);
@@ -1792,7 +2068,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Create the map. Allocate one in-object field for length.
Handle<Map> map = factory->NewMap(
- JS_OBJECT_TYPE, JSStrictArgumentsObject::kSize, FAST_ELEMENTS);
+ JS_ARGUMENTS_TYPE, JSStrictArgumentsObject::kSize, FAST_ELEMENTS);
// Create the descriptor array for the arguments object.
Map::EnsureDescriptorSlack(map, 3);
@@ -1862,13 +2138,20 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
} // NOLINT(readability/fn_size)
-
void Genesis::InstallTypedArray(const char* name, ElementsKind elements_kind,
Handle<JSFunction>* fun) {
Handle<JSObject> global = Handle<JSObject>(native_context()->global_object());
- Handle<JSFunction> result = InstallFunction(
- global, name, JS_TYPED_ARRAY_TYPE, JSTypedArray::kSize,
- isolate()->initial_object_prototype(), Builtins::kIllegal);
+
+ Handle<JSObject> typed_array_prototype =
+ Handle<JSObject>(isolate()->typed_array_prototype());
+ Handle<JSFunction> typed_array_function =
+ Handle<JSFunction>(isolate()->typed_array_function());
+
+ Handle<JSObject> prototype =
+ factory()->NewJSObject(isolate()->object_function(), TENURED);
+ Handle<JSFunction> result =
+ InstallFunction(global, name, JS_TYPED_ARRAY_TYPE, JSTypedArray::kSize,
+ prototype, Builtins::kIllegal);
Handle<Map> initial_map = isolate()->factory()->NewMap(
JS_TYPED_ARRAY_TYPE,
@@ -1876,6 +2159,14 @@ void Genesis::InstallTypedArray(const char* name, ElementsKind elements_kind,
elements_kind);
JSFunction::SetInitialMap(result, initial_map,
handle(initial_map->prototype(), isolate()));
+
+ CHECK(JSObject::SetPrototype(result, typed_array_function, false,
+ Object::DONT_THROW)
+ .FromJust());
+
+ CHECK(JSObject::SetPrototype(prototype, typed_array_prototype, false,
+ Object::DONT_THROW)
+ .FromJust());
*fun = result;
}
@@ -1886,7 +2177,7 @@ void Genesis::InitializeExperimentalGlobal() {
HARMONY_INPROGRESS(FEATURE_INITIALIZE_GLOBAL)
HARMONY_STAGED(FEATURE_INITIALIZE_GLOBAL)
HARMONY_SHIPPING(FEATURE_INITIALIZE_GLOBAL)
- FEATURE_INITIALIZE_GLOBAL(promise_extra, "")
+ FEATURE_INITIALIZE_GLOBAL(intl_extra, "")
#undef FEATURE_INITIALIZE_GLOBAL
}
@@ -2098,8 +2389,6 @@ void Genesis::ConfigureUtilsObject(GlobalContextType context_type) {
JSObject::AddProperty(global, natives_key, utils, DONT_ENUM);
break;
}
- case THIN_CONTEXT:
- break;
}
// The utils object can be removed for cases that reach this point.
@@ -2129,7 +2418,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<JSFunction> to_string = InstallFunction(
container, "object_to_string", JS_OBJECT_TYPE, JSObject::kHeaderSize,
MaybeHandle<JSObject>(), Builtins::kObjectProtoToString);
- to_string->shared()->DontAdaptArguments();
+ to_string->shared()->set_internal_formal_parameter_count(0);
to_string->shared()->set_length(0);
native_context->set_object_to_string(*to_string);
}
@@ -2162,13 +2451,20 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
generator_function_function->set_prototype_or_initial_map(
native_context->sloppy_generator_function_map());
generator_function_function->shared()->DontAdaptArguments();
- generator_function_function->shared()->set_construct_stub(
+ generator_function_function->shared()->SetConstructStub(
*isolate->builtins()->GeneratorFunctionConstructor());
generator_function_function->shared()->set_length(1);
InstallWithIntrinsicDefaultProto(
isolate, generator_function_function,
Context::GENERATOR_FUNCTION_FUNCTION_INDEX);
+ JSObject::ForceSetPrototype(generator_function_function,
+ isolate->function_function());
+ JSObject::AddProperty(
+ generator_function_prototype, factory->constructor_string(),
+ generator_function_function,
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
native_context->sloppy_generator_function_map()->SetConstructor(
*generator_function_function);
native_context->strict_generator_function_map()->SetConstructor(
@@ -2178,7 +2474,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
{ // -- S e t I t e r a t o r
Handle<JSObject> set_iterator_prototype =
isolate->factory()->NewJSObject(isolate->object_function(), TENURED);
- SetObjectPrototype(set_iterator_prototype, iterator_prototype);
+ JSObject::ForceSetPrototype(set_iterator_prototype, iterator_prototype);
Handle<JSFunction> set_iterator_function = InstallFunction(
container, "SetIterator", JS_SET_ITERATOR_TYPE, JSSetIterator::kSize,
set_iterator_prototype, Builtins::kIllegal);
@@ -2188,7 +2484,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
{ // -- M a p I t e r a t o r
Handle<JSObject> map_iterator_prototype =
isolate->factory()->NewJSObject(isolate->object_function(), TENURED);
- SetObjectPrototype(map_iterator_prototype, iterator_prototype);
+ JSObject::ForceSetPrototype(map_iterator_prototype, iterator_prototype);
Handle<JSFunction> map_iterator_function = InstallFunction(
container, "MapIterator", JS_MAP_ITERATOR_TYPE, JSMapIterator::kSize,
map_iterator_prototype, Builtins::kIllegal);
@@ -2199,7 +2495,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
// Builtin functions for Script.
Handle<JSFunction> script_fun = InstallFunction(
container, "Script", JS_VALUE_TYPE, JSValue::kSize,
- isolate->initial_object_prototype(), Builtins::kIllegal);
+ isolate->initial_object_prototype(), Builtins::kUnsupportedThrower);
Handle<JSObject> prototype =
factory->NewJSObject(isolate->object_function(), TENURED);
Accessors::FunctionSetPrototype(script_fun, prototype).Assert();
@@ -2341,6 +2637,100 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
script_is_embedder_debug_script, attribs);
script_map->AppendDescriptor(&d);
}
+
+ {
+ PrototypeIterator iter(native_context->sloppy_async_function_map());
+ Handle<JSObject> async_function_prototype(iter.GetCurrent<JSObject>());
+
+ static const bool kUseStrictFunctionMap = true;
+ Handle<JSFunction> async_function_constructor = InstallFunction(
+ container, "AsyncFunction", JS_FUNCTION_TYPE, JSFunction::kSize,
+ async_function_prototype, Builtins::kAsyncFunctionConstructor,
+ kUseStrictFunctionMap);
+ async_function_constructor->shared()->DontAdaptArguments();
+ async_function_constructor->shared()->SetConstructStub(
+ *isolate->builtins()->AsyncFunctionConstructor());
+ async_function_constructor->shared()->set_length(1);
+ InstallWithIntrinsicDefaultProto(isolate, async_function_constructor,
+ Context::ASYNC_FUNCTION_FUNCTION_INDEX);
+ JSObject::ForceSetPrototype(async_function_constructor,
+ isolate->function_function());
+
+ JSObject::AddProperty(
+ async_function_prototype, factory->constructor_string(),
+ async_function_constructor,
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ JSFunction::SetPrototype(async_function_constructor,
+ async_function_prototype);
+
+ Handle<JSFunction> async_function_next =
+ SimpleInstallFunction(container, "AsyncFunctionNext",
+ Builtins::kGeneratorPrototypeNext, 1, true);
+ Handle<JSFunction> async_function_throw =
+ SimpleInstallFunction(container, "AsyncFunctionThrow",
+ Builtins::kGeneratorPrototypeThrow, 1, true);
+ async_function_next->shared()->set_native(false);
+ async_function_throw->shared()->set_native(false);
+ }
+ }
+
+ { // -- C a l l S i t e
+ // Builtin functions for CallSite.
+
+ // CallSites are a special case; the constructor is for our private use
+ // only, therefore we set it up as a builtin that throws. Internally, we use
+ // CallSiteUtils::Construct to create CallSite objects.
+
+ Handle<JSFunction> callsite_fun = InstallFunction(
+ container, "CallSite", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ isolate->initial_object_prototype(), Builtins::kUnsupportedThrower);
+ callsite_fun->shared()->DontAdaptArguments();
+ callsite_fun->shared()->set_native(true);
+
+ isolate->native_context()->set_callsite_function(*callsite_fun);
+
+ {
+ Handle<JSObject> proto =
+ factory->NewJSObject(isolate->object_function(), TENURED);
+ JSObject::AddProperty(proto, factory->constructor_string(), callsite_fun,
+ DONT_ENUM);
+
+ struct FunctionInfo {
+ const char* name;
+ Builtins::Name id;
+ };
+
+ FunctionInfo infos[] = {
+ {"getColumnNumber", Builtins::kCallSitePrototypeGetColumnNumber},
+ {"getEvalOrigin", Builtins::kCallSitePrototypeGetEvalOrigin},
+ {"getFileName", Builtins::kCallSitePrototypeGetFileName},
+ {"getFunction", Builtins::kCallSitePrototypeGetFunction},
+ {"getFunctionName", Builtins::kCallSitePrototypeGetFunctionName},
+ {"getLineNumber", Builtins::kCallSitePrototypeGetLineNumber},
+ {"getMethodName", Builtins::kCallSitePrototypeGetMethodName},
+ {"getPosition", Builtins::kCallSitePrototypeGetPosition},
+ {"getScriptNameOrSourceURL",
+ Builtins::kCallSitePrototypeGetScriptNameOrSourceURL},
+ {"getThis", Builtins::kCallSitePrototypeGetThis},
+ {"getTypeName", Builtins::kCallSitePrototypeGetTypeName},
+ {"isConstructor", Builtins::kCallSitePrototypeIsConstructor},
+ {"isEval", Builtins::kCallSitePrototypeIsEval},
+ {"isNative", Builtins::kCallSitePrototypeIsNative},
+ {"isToplevel", Builtins::kCallSitePrototypeIsToplevel},
+ {"toString", Builtins::kCallSitePrototypeToString}};
+
+ PropertyAttributes attrs =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+
+ Handle<JSFunction> fun;
+ for (const FunctionInfo& info : infos) {
+ fun = SimpleInstallFunction(proto, info.name, info.id, 0, true, attrs);
+ fun->shared()->set_native(true);
+ }
+
+ Accessors::FunctionSetPrototype(callsite_fun, proto).Assert();
+ }
}
}
@@ -2357,7 +2747,7 @@ void Bootstrapper::ExportExperimentalFromRuntime(Isolate* isolate,
isolate->factory()->ToBoolean(FLAG), NONE); \
}
- INITIALIZE_FLAG(FLAG_harmony_species)
+ INITIALIZE_FLAG(FLAG_intl_extra)
#undef INITIALIZE_FLAG
}
@@ -2366,24 +2756,23 @@ void Bootstrapper::ExportExperimentalFromRuntime(Isolate* isolate,
#define EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(id) \
void Genesis::InitializeGlobal_##id() {}
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy_function)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy_let)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_object_observe)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_unicode_regexps)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_do_expressions)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_iterator_close)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_exec)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_for_in)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_lookbehind)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_named_captures)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_property)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_name)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_sent)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(promise_extra)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(intl_extra)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_explicit_tailcalls)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tailcalls)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_instanceof)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_restrictive_declarations)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_exponentiation_operator)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_string_padding)
+#ifdef V8_I18N_SUPPORT
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(icu_case_mapping)
+#endif
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_async_await)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_restrictive_generators)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_trailing_commas)
void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
const char* name, Handle<Symbol> value) {
@@ -2399,26 +2788,32 @@ void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
}
-void Genesis::InitializeGlobal_harmony_regexp_subclass() {
- if (!FLAG_harmony_regexp_subclass) return;
- InstallPublicSymbol(factory(), native_context(), "match",
- factory()->match_symbol());
- InstallPublicSymbol(factory(), native_context(), "replace",
- factory()->replace_symbol());
- InstallPublicSymbol(factory(), native_context(), "search",
- factory()->search_symbol());
- InstallPublicSymbol(factory(), native_context(), "split",
- factory()->split_symbol());
-}
-
-
void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
if (!FLAG_harmony_sharedarraybuffer) return;
Handle<JSGlobalObject> global(native_context()->global_object());
+ Isolate* isolate = global->GetIsolate();
+ Factory* factory = isolate->factory();
+
Handle<JSFunction> shared_array_buffer_fun =
- InstallArrayBuffer(global, "SharedArrayBuffer");
+ InstallArrayBuffer(global, "SharedArrayBuffer",
+ Builtins::kSharedArrayBufferPrototypeGetByteLength,
+ BuiltinFunctionId::kSharedArrayBufferByteLength);
native_context()->set_shared_array_buffer_fun(*shared_array_buffer_fun);
+
+ Handle<String> name = factory->InternalizeUtf8String("Atomics");
+ Handle<JSFunction> cons = factory->NewFunction(name);
+ JSFunction::SetInstancePrototype(
+ cons,
+ Handle<Object>(native_context()->initial_object_prototype(), isolate));
+ Handle<JSObject> atomics_object = factory->NewJSObject(cons, TENURED);
+ DCHECK(atomics_object->IsJSObject());
+ JSObject::AddProperty(global, name, atomics_object, DONT_ENUM);
+
+ SimpleInstallFunction(atomics_object, factory->InternalizeUtf8String("load"),
+ Builtins::kAtomicsLoad, 2, true);
+ SimpleInstallFunction(atomics_object, factory->InternalizeUtf8String("store"),
+ Builtins::kAtomicsStore, 3, true);
}
@@ -2504,7 +2899,10 @@ void Genesis::InitializeGlobal_harmony_array_prototype_values() {
}
Handle<JSFunction> Genesis::InstallArrayBuffer(Handle<JSObject> target,
- const char* name) {
+ const char* name,
+ Builtins::Name call,
+ BuiltinFunctionId id) {
+ // Create the %ArrayBufferPrototype%
// Setup the {prototype} with the given {name} for @@toStringTag.
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
@@ -2517,7 +2915,7 @@ Handle<JSFunction> Genesis::InstallArrayBuffer(Handle<JSObject> target,
InstallFunction(target, name, JS_ARRAY_BUFFER_TYPE,
JSArrayBuffer::kSizeWithInternalFields, prototype,
Builtins::kArrayBufferConstructor);
- array_buffer_fun->shared()->set_construct_stub(
+ array_buffer_fun->shared()->SetConstructStub(
*isolate()->builtins()->ArrayBufferConstructor_ConstructStub());
array_buffer_fun->shared()->DontAdaptArguments();
array_buffer_fun->shared()->set_length(1);
@@ -2529,14 +2927,11 @@ Handle<JSFunction> Genesis::InstallArrayBuffer(Handle<JSObject> target,
SimpleInstallFunction(array_buffer_fun, factory()->isView_string(),
Builtins::kArrayBufferIsView, 1, true);
- return array_buffer_fun;
-}
+ // Install the "byteLength" getter on the {prototype}.
+ SimpleInstallGetter(prototype, factory()->byte_length_string(), call, false,
+ id);
-
-void Genesis::InitializeGlobal_harmony_species() {
- if (!FLAG_harmony_species) return;
- InstallPublicSymbol(factory(), native_context(), "species",
- factory()->species_symbol());
+ return array_buffer_fun;
}
@@ -2557,7 +2952,7 @@ Handle<JSFunction> Genesis::InstallInternalArray(Handle<JSObject> target,
InternalArrayConstructorStub internal_array_constructor_stub(isolate());
Handle<Code> code = internal_array_constructor_stub.GetCode();
- array_function->shared()->set_construct_stub(*code);
+ array_function->shared()->SetConstructStub(*code);
array_function->shared()->DontAdaptArguments();
Handle<Map> original_map(array_function->initial_map());
@@ -2606,9 +3001,6 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
DCHECK_EQ(builtin_index, Natives::GetIndex("runtime"));
if (!Bootstrapper::CompileBuiltin(isolate(), builtin_index++)) return false;
- // A thin context is ready at this point.
- if (context_type == THIN_CONTEXT) return true;
-
{
// Builtin function for OpaqueReference -- a JSValue-based object,
// that keeps its field isolated from JavaScript code. It may store
@@ -2643,11 +3035,15 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
}
if (!CallUtilsFunction(isolate(), "PostNatives")) return false;
+ auto fast_template_instantiations_cache = isolate()->factory()->NewFixedArray(
+ TemplateInfo::kFastTemplateInstantiationsCacheSize);
+ native_context()->set_fast_template_instantiations_cache(
+ *fast_template_instantiations_cache);
- auto template_instantiations_cache = UnseededNumberDictionary::New(
+ auto slow_template_instantiations_cache = UnseededNumberDictionary::New(
isolate(), ApiNatives::kInitialFunctionCacheSize);
- native_context()->set_template_instantiations_cache(
- *template_instantiations_cache);
+ native_context()->set_slow_template_instantiations_cache(
+ *slow_template_instantiations_cache);
// Store the map for the %ObjectPrototype% after the natives has been compiled
// and the Object function has been set up.
@@ -2657,6 +3053,14 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
native_context()->set_object_function_prototype_map(
HeapObject::cast(object_function->initial_map()->prototype())->map());
+ // Set up the map for Object.create(null) instances.
+ Handle<Map> object_with_null_prototype_map =
+ Map::CopyInitialMap(handle(object_function->initial_map(), isolate()));
+ Map::SetPrototype(object_with_null_prototype_map,
+ isolate()->factory()->null_value());
+ native_context()->set_object_with_null_prototype_map(
+ *object_with_null_prototype_map);
+
// Store the map for the %StringPrototype% after the natives has been compiled
// and the String function has been set up.
Handle<JSFunction> string_function(native_context()->string_function());
@@ -2665,11 +3069,40 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
native_context()->set_string_function_prototype_map(
HeapObject::cast(string_function->initial_map()->prototype())->map());
+ Handle<JSGlobalObject> global_object =
+ handle(native_context()->global_object());
+
+ // Install Global.decodeURI.
+ SimpleInstallFunction(global_object, "decodeURI", Builtins::kGlobalDecodeURI,
+ 1, false, kGlobalDecodeURI);
+
+ // Install Global.decodeURIComponent.
+ SimpleInstallFunction(global_object, "decodeURIComponent",
+ Builtins::kGlobalDecodeURIComponent, 1, false,
+ kGlobalDecodeURIComponent);
+
+ // Install Global.encodeURI.
+ SimpleInstallFunction(global_object, "encodeURI", Builtins::kGlobalEncodeURI,
+ 1, false, kGlobalEncodeURI);
+
+ // Install Global.encodeURIComponent.
+ SimpleInstallFunction(global_object, "encodeURIComponent",
+ Builtins::kGlobalEncodeURIComponent, 1, false,
+ kGlobalEncodeURIComponent);
+
+ // Install Global.escape.
+ SimpleInstallFunction(global_object, "escape", Builtins::kGlobalEscape, 1,
+ false, kGlobalEscape);
+
+ // Install Global.unescape.
+ SimpleInstallFunction(global_object, "unescape", Builtins::kGlobalUnescape, 1,
+ false, kGlobalUnescape);
+
// Install Global.eval.
{
- Handle<JSFunction> eval = SimpleInstallFunction(
- handle(native_context()->global_object()), factory()->eval_string(),
- Builtins::kGlobalEval, 1, false);
+ Handle<JSFunction> eval =
+ SimpleInstallFunction(global_object, factory()->eval_string(),
+ Builtins::kGlobalEval, 1, false);
native_context()->set_global_eval_fun(*eval);
}
@@ -2712,11 +3145,10 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
{
Handle<String> key = factory()->Promise_string();
Handle<JSFunction> function = Handle<JSFunction>::cast(
- JSReceiver::GetProperty(handle(native_context()->global_object()), key)
- .ToHandleChecked());
+ JSReceiver::GetProperty(global_object, key).ToHandleChecked());
JSFunction::EnsureHasInitialMap(function);
function->initial_map()->set_instance_type(JS_PROMISE_TYPE);
- function->shared()->set_construct_stub(
+ function->shared()->SetConstructStub(
*isolate()->builtins()->JSBuiltinsConstructStub());
InstallWithIntrinsicDefaultProto(isolate(), function,
Context::PROMISE_FUNCTION_INDEX);
@@ -2724,37 +3156,6 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
InstallBuiltinFunctionIds();
- // Also install builtin function ids to some generator object methods. These
- // three methods use the three resume operations (Runtime_GeneratorNext,
- // Runtime_GeneratorReturn, Runtime_GeneratorThrow) respectively. Those
- // operations are not supported by Crankshaft, TurboFan, nor Ignition.
- {
- Handle<JSObject> generator_object_prototype(JSObject::cast(
- native_context()->generator_object_prototype_map()->prototype()));
-
- { // GeneratorObject.prototype.next
- Handle<String> key = factory()->next_string();
- Handle<JSFunction> function = Handle<JSFunction>::cast(
- JSReceiver::GetProperty(generator_object_prototype, key)
- .ToHandleChecked());
- function->shared()->set_builtin_function_id(kGeneratorObjectNext);
- }
- { // GeneratorObject.prototype.return
- Handle<String> key = factory()->NewStringFromAsciiChecked("return");
- Handle<JSFunction> function = Handle<JSFunction>::cast(
- JSReceiver::GetProperty(generator_object_prototype, key)
- .ToHandleChecked());
- function->shared()->set_builtin_function_id(kGeneratorObjectReturn);
- }
- { // GeneratorObject.prototype.throw
- Handle<String> key = factory()->throw_string();
- Handle<JSFunction> function = Handle<JSFunction>::cast(
- JSReceiver::GetProperty(generator_object_prototype, key)
- .ToHandleChecked());
- function->shared()->set_builtin_function_id(kGeneratorObjectThrow);
- }
- }
-
// Create a map for accessor property descriptors (a variant of JSObject
// that predefines four properties get, set, configurable and enumerable).
{
@@ -2935,40 +3336,34 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
bool Genesis::InstallExperimentalNatives() {
- static const char* harmony_iterator_close_natives[] = {nullptr};
- static const char* harmony_sloppy_natives[] = {nullptr};
- static const char* harmony_sloppy_function_natives[] = {nullptr};
- static const char* harmony_sloppy_let_natives[] = {nullptr};
- static const char* harmony_species_natives[] = {"native harmony-species.js",
- nullptr};
+ static const char* harmony_explicit_tailcalls_natives[] = {nullptr};
static const char* harmony_tailcalls_natives[] = {nullptr};
- static const char* harmony_unicode_regexps_natives[] = {
- "native harmony-unicode-regexps.js", nullptr};
- static const char* harmony_object_observe_natives[] = {
- "native harmony-object-observe.js", nullptr};
static const char* harmony_sharedarraybuffer_natives[] = {
- "native harmony-sharedarraybuffer.js", "native harmony-atomics.js", NULL};
+ "native harmony-atomics.js", NULL};
static const char* harmony_simd_natives[] = {"native harmony-simd.js",
nullptr};
static const char* harmony_do_expressions_natives[] = {nullptr};
- static const char* harmony_regexp_exec_natives[] = {
- "native harmony-regexp-exec.js", nullptr};
- static const char* harmony_regexp_subclass_natives[] = {nullptr};
+ static const char* harmony_for_in_natives[] = {nullptr};
static const char* harmony_regexp_lookbehind_natives[] = {nullptr};
- static const char* harmony_instanceof_natives[] = {nullptr};
static const char* harmony_restrictive_declarations_natives[] = {nullptr};
+ static const char* harmony_regexp_named_captures_natives[] = {nullptr};
static const char* harmony_regexp_property_natives[] = {nullptr};
- static const char* harmony_function_name_natives[] = {nullptr};
static const char* harmony_function_sent_natives[] = {nullptr};
- static const char* promise_extra_natives[] = {"native promise-extra.js",
- nullptr};
+ static const char* intl_extra_natives[] = {"native intl-extra.js", nullptr};
static const char* harmony_object_values_entries_natives[] = {nullptr};
static const char* harmony_object_own_property_descriptors_natives[] = {
nullptr};
static const char* harmony_array_prototype_values_natives[] = {nullptr};
- static const char* harmony_exponentiation_operator_natives[] = {nullptr};
static const char* harmony_string_padding_natives[] = {
"native harmony-string-padding.js", nullptr};
+#ifdef V8_I18N_SUPPORT
+ static const char* icu_case_mapping_natives[] = {"native icu-case-mapping.js",
+ nullptr};
+#endif
+ static const char* harmony_async_await_natives[] = {
+ "native harmony-async-await.js", nullptr};
+ static const char* harmony_restrictive_generators_natives[] = {nullptr};
+ static const char* harmony_trailing_commas_natives[] = {nullptr};
for (int i = ExperimentalNatives::GetDebuggerCount();
i < ExperimentalNatives::GetBuiltinsCount(); i++) {
@@ -2987,7 +3382,7 @@ bool Genesis::InstallExperimentalNatives() {
HARMONY_INPROGRESS(INSTALL_EXPERIMENTAL_NATIVES);
HARMONY_STAGED(INSTALL_EXPERIMENTAL_NATIVES);
HARMONY_SHIPPING(INSTALL_EXPERIMENTAL_NATIVES);
- INSTALL_EXPERIMENTAL_NATIVES(promise_extra, "");
+ INSTALL_EXPERIMENTAL_NATIVES(intl_extra, "");
#undef INSTALL_EXPERIMENTAL_NATIVES
}
@@ -3142,9 +3537,7 @@ bool Genesis::InstallSpecialObjects(Handle<Context> native_context) {
JSObject::AddProperty(global, debug_string, global_proxy, DONT_ENUM);
}
- if (FLAG_expose_wasm) {
- WasmJs::Install(isolate, global);
- }
+ WasmJs::Install(isolate, global);
return true;
}
@@ -3154,12 +3547,12 @@ static uint32_t Hash(RegisteredExtension* extension) {
return v8::internal::ComputePointerHash(extension);
}
-
-Genesis::ExtensionStates::ExtensionStates() : map_(HashMap::PointersMatch, 8) {}
+Genesis::ExtensionStates::ExtensionStates()
+ : map_(base::HashMap::PointersMatch, 8) {}
Genesis::ExtensionTraversalState Genesis::ExtensionStates::get_state(
RegisteredExtension* extension) {
- i::HashMap::Entry* entry = map_.Lookup(extension, Hash(extension));
+ base::HashMap::Entry* entry = map_.Lookup(extension, Hash(extension));
if (entry == NULL) {
return UNVISITED;
}
@@ -3179,17 +3572,20 @@ bool Genesis::InstallExtensions(Handle<Context> native_context,
Isolate* isolate = native_context->GetIsolate();
ExtensionStates extension_states; // All extensions have state UNVISITED.
return InstallAutoExtensions(isolate, &extension_states) &&
- (!FLAG_expose_free_buffer ||
- InstallExtension(isolate, "v8/free-buffer", &extension_states)) &&
- (!FLAG_expose_gc ||
- InstallExtension(isolate, "v8/gc", &extension_states)) &&
- (!FLAG_expose_externalize_string ||
- InstallExtension(isolate, "v8/externalize", &extension_states)) &&
- (!FLAG_track_gc_object_stats ||
- InstallExtension(isolate, "v8/statistics", &extension_states)) &&
- (!FLAG_expose_trigger_failure ||
- InstallExtension(isolate, "v8/trigger-failure", &extension_states)) &&
- InstallRequestedExtensions(isolate, extensions, &extension_states);
+ (!FLAG_expose_free_buffer ||
+ InstallExtension(isolate, "v8/free-buffer", &extension_states)) &&
+ (!FLAG_expose_gc ||
+ InstallExtension(isolate, "v8/gc", &extension_states)) &&
+ (!FLAG_expose_externalize_string ||
+ InstallExtension(isolate, "v8/externalize", &extension_states)) &&
+ (!FLAG_track_gc_object_stats ||
+ InstallExtension(isolate, "v8/statistics", &extension_states)) &&
+ (!FLAG_expose_trigger_failure ||
+ InstallExtension(isolate, "v8/trigger-failure", &extension_states)) &&
+ (!(FLAG_ignition && FLAG_trace_ignition_dispatches) ||
+ InstallExtension(isolate, "v8/ignition-statistics",
+ &extension_states)) &&
+ InstallRequestedExtensions(isolate, extensions, &extension_states);
}
@@ -3293,14 +3689,14 @@ bool Genesis::ConfigureGlobalObjects(
// Configure the global object.
Handle<FunctionTemplateInfo> proxy_constructor(
FunctionTemplateInfo::cast(global_proxy_data->constructor()));
- if (!proxy_constructor->prototype_template()->IsUndefined()) {
+ if (!proxy_constructor->prototype_template()->IsUndefined(isolate())) {
Handle<ObjectTemplateInfo> global_object_data(
ObjectTemplateInfo::cast(proxy_constructor->prototype_template()));
if (!ConfigureApiObject(global_object, global_object_data)) return false;
}
}
- SetObjectPrototype(global_proxy, global_object);
+ JSObject::ForceSetPrototype(global_proxy, global_object);
native_context()->set_initial_array_prototype(
JSArray::cast(native_context()->array_function()->prototype()));
@@ -3389,7 +3785,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
int capacity = properties->Capacity();
for (int i = 0; i < capacity; i++) {
Object* raw_key(properties->KeyAt(i));
- if (properties->IsKey(raw_key)) {
+ if (properties->IsKey(isolate(), raw_key)) {
DCHECK(raw_key->IsName());
// If the property is already there we skip it.
Handle<Name> key(Name::cast(raw_key));
@@ -3400,7 +3796,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
DCHECK(properties->ValueAt(i)->IsPropertyCell());
Handle<PropertyCell> cell(PropertyCell::cast(properties->ValueAt(i)));
Handle<Object> value(cell->value(), isolate());
- if (value->IsTheHole()) continue;
+ if (value->IsTheHole(isolate())) continue;
PropertyDetails details = cell->property_details();
DCHECK_EQ(kData, details.kind());
JSObject::AddProperty(to, key, value, details.attributes());
@@ -3412,7 +3808,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
int capacity = properties->Capacity();
for (int i = 0; i < capacity; i++) {
Object* raw_key(properties->KeyAt(i));
- if (properties->IsKey(raw_key)) {
+ if (properties->IsKey(isolate(), raw_key)) {
DCHECK(raw_key->IsName());
// If the property is already there we skip it.
Handle<Name> key(Name::cast(raw_key));
@@ -3423,7 +3819,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
Handle<Object> value = Handle<Object>(properties->ValueAt(i),
isolate());
DCHECK(!value->IsCell());
- DCHECK(!value->IsTheHole());
+ DCHECK(!value->IsTheHole(isolate()));
PropertyDetails details = properties->DetailsAt(i);
DCHECK_EQ(kData, details.kind());
JSObject::AddProperty(to, key, value, details.attributes());
@@ -3454,7 +3850,7 @@ void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) {
// Transfer the prototype (new map is needed).
Handle<Object> proto(from->map()->prototype(), isolate());
- SetObjectPrototype(to, proto);
+ JSObject::ForceSetPrototype(to, proto);
}
@@ -3500,10 +3896,12 @@ Genesis::Genesis(Isolate* isolate,
MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_proxy_template,
v8::ExtensionConfiguration* extensions,
- GlobalContextType context_type)
+ size_t context_snapshot_index, GlobalContextType context_type)
: isolate_(isolate), active_(isolate->bootstrapper()) {
NoTrackDoubleFieldsForSerializerScope disable_scope(isolate);
result_ = Handle<Context>::null();
+ global_proxy_ = Handle<JSGlobalProxy>::null();
+
// Before creating the roots we must save the context and restore it
// on all function exits.
SaveContext saved_context(isolate);
@@ -3529,7 +3927,8 @@ Genesis::Genesis(Isolate* isolate,
// a snapshot. Otherwise we have to build the context from scratch.
// Also create a context from scratch to expose natives, if required by flag.
if (!isolate->initialized_from_snapshot() ||
- !Snapshot::NewContextFromSnapshot(isolate, global_proxy)
+ !Snapshot::NewContextFromSnapshot(isolate, global_proxy,
+ context_snapshot_index)
.ToHandle(&native_context_)) {
native_context_ = Handle<Context>();
}
@@ -3559,7 +3958,8 @@ Genesis::Genesis(Isolate* isolate,
CreateRoots();
Handle<JSFunction> empty_function = CreateEmptyFunction(isolate);
CreateStrictModeFunctionMaps(empty_function);
- CreateIteratorMaps();
+ CreateIteratorMaps(empty_function);
+ CreateAsyncFunctionMaps(empty_function);
Handle<JSGlobalObject> global_object =
CreateNewGlobals(global_proxy_template, global_proxy);
HookUpGlobalProxy(global_object, global_proxy);
@@ -3570,10 +3970,9 @@ Genesis::Genesis(Isolate* isolate,
MakeFunctionInstancePrototypeWritable();
- if (context_type != THIN_CONTEXT) {
- if (!InstallExtraNatives()) return;
- if (!ConfigureGlobalObjects(global_proxy_template)) return;
- }
+ if (!InstallExtraNatives()) return;
+ if (!ConfigureGlobalObjects(global_proxy_template)) return;
+
isolate->counters()->contexts_created_from_scratch()->Increment();
// Re-initialize the counter because it got incremented during snapshot
// creation.
@@ -3611,6 +4010,67 @@ Genesis::Genesis(Isolate* isolate,
result_ = native_context();
}
+Genesis::Genesis(Isolate* isolate,
+ MaybeHandle<JSGlobalProxy> maybe_global_proxy,
+ v8::Local<v8::ObjectTemplate> global_proxy_template)
+ : isolate_(isolate), active_(isolate->bootstrapper()) {
+ NoTrackDoubleFieldsForSerializerScope disable_scope(isolate);
+ result_ = Handle<Context>::null();
+ global_proxy_ = Handle<JSGlobalProxy>::null();
+
+ // Before creating the roots we must save the context and restore it
+ // on all function exits.
+ SaveContext saved_context(isolate);
+
+ // During genesis, the boilerplate for stack overflow won't work until the
+ // environment has been at least partially initialized. Add a stack check
+ // before entering JS code to catch overflow early.
+ StackLimitCheck check(isolate);
+ if (check.HasOverflowed()) {
+ isolate->StackOverflow();
+ return;
+ }
+
+ Handle<JSGlobalProxy> global_proxy;
+ if (!maybe_global_proxy.ToHandle(&global_proxy)) {
+ global_proxy = factory()->NewUninitializedJSGlobalProxy();
+ }
+
+ // CreateNewGlobals.
+ Handle<ObjectTemplateInfo> global_proxy_data =
+ v8::Utils::OpenHandle(*global_proxy_template);
+ Handle<FunctionTemplateInfo> global_constructor(
+ FunctionTemplateInfo::cast(global_proxy_data->constructor()));
+ Handle<SharedFunctionInfo> shared =
+ FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(isolate,
+ global_constructor);
+ Handle<Map> initial_map =
+ factory()->CreateSloppyFunctionMap(FUNCTION_WITH_WRITEABLE_PROTOTYPE);
+ Handle<JSFunction> global_proxy_function =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ initial_map, shared, factory()->undefined_value());
+ DCHECK_EQ(global_proxy_data->internal_field_count(), 0);
+ Handle<Map> global_proxy_map = isolate->factory()->NewMap(
+ JS_GLOBAL_PROXY_TYPE, JSGlobalProxy::kSize, FAST_HOLEY_SMI_ELEMENTS);
+ JSFunction::SetInitialMap(global_proxy_function, global_proxy_map,
+ factory()->null_value());
+ global_proxy_map->set_is_access_check_needed(true);
+ global_proxy_map->set_is_callable();
+ global_proxy_map->set_is_constructor(true);
+ global_proxy_map->set_has_hidden_prototype(true);
+
+ Handle<String> global_name = factory()->global_string();
+ global_proxy_function->shared()->set_instance_class_name(*global_name);
+ factory()->ReinitializeJSGlobalProxy(global_proxy, global_proxy_function);
+
+ // HookUpGlobalProxy.
+ global_proxy->set_native_context(*factory()->null_value());
+
+ // DetachGlobal.
+ JSObject::ForceSetPrototype(global_proxy, factory()->null_value());
+
+ global_proxy_ = global_proxy;
+}
// Support for thread preemption.
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index d1bf201139..51022fd608 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -61,7 +61,7 @@ class SourceCodeCache final BASE_EMBEDDED {
DISALLOW_COPY_AND_ASSIGN(SourceCodeCache);
};
-enum GlobalContextType { FULL_CONTEXT, THIN_CONTEXT, DEBUG_CONTEXT };
+enum GlobalContextType { FULL_CONTEXT, DEBUG_CONTEXT };
// The Boostrapper is the public interface for creating a JavaScript global
// context.
@@ -79,9 +79,13 @@ class Bootstrapper final {
Handle<Context> CreateEnvironment(
MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_object_template,
- v8::ExtensionConfiguration* extensions,
+ v8::ExtensionConfiguration* extensions, size_t context_snapshot_index,
GlobalContextType context_type = FULL_CONTEXT);
+ Handle<JSGlobalProxy> NewRemoteContext(
+ MaybeHandle<JSGlobalProxy> maybe_global_proxy,
+ v8::Local<v8::ObjectTemplate> global_object_template);
+
// Detach the environment from its outer global object.
void DetachGlobal(Handle<Context> env);
@@ -136,6 +140,7 @@ class Bootstrapper final {
static v8::Extension* externalize_string_extension_;
static v8::Extension* statistics_extension_;
static v8::Extension* trigger_failure_extension_;
+ static v8::Extension* ignition_statistics_extension_;
DISALLOW_COPY_AND_ASSIGN(Bootstrapper);
};
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
deleted file mode 100644
index 1ad19946cb..0000000000
--- a/deps/v8/src/builtins.cc
+++ /dev/null
@@ -1,4955 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/builtins.h"
-
-#include "src/api.h"
-#include "src/api-arguments.h"
-#include "src/api-natives.h"
-#include "src/base/once.h"
-#include "src/bootstrapper.h"
-#include "src/code-factory.h"
-#include "src/compiler/code-stub-assembler.h"
-#include "src/dateparser-inl.h"
-#include "src/elements.h"
-#include "src/frames-inl.h"
-#include "src/gdb-jit.h"
-#include "src/ic/handler-compiler.h"
-#include "src/ic/ic.h"
-#include "src/isolate-inl.h"
-#include "src/messages.h"
-#include "src/profiler/cpu-profiler.h"
-#include "src/property-descriptor.h"
-#include "src/prototype.h"
-#include "src/string-builder.h"
-#include "src/vm-state-inl.h"
-
-namespace v8 {
-namespace internal {
-
-namespace {
-
-// Arguments object passed to C++ builtins.
-template <BuiltinExtraArguments extra_args>
-class BuiltinArguments : public Arguments {
- public:
- BuiltinArguments(int length, Object** arguments)
- : Arguments(length, arguments) {
- // Check we have at least the receiver.
- DCHECK_LE(1, this->length());
- }
-
- Object*& operator[] (int index) {
- DCHECK(index < length());
- return Arguments::operator[](index);
- }
-
- template <class S> Handle<S> at(int index) {
- DCHECK(index < length());
- return Arguments::at<S>(index);
- }
-
- Handle<Object> atOrUndefined(Isolate* isolate, int index) {
- if (index >= length()) {
- return isolate->factory()->undefined_value();
- }
- return at<Object>(index);
- }
-
- Handle<Object> receiver() {
- return Arguments::at<Object>(0);
- }
-
- template <class S>
- Handle<S> target();
- Handle<HeapObject> new_target();
-
- // Gets the total number of arguments including the receiver (but
- // excluding extra arguments).
- int length() const;
-};
-
-
-// Specialize BuiltinArguments for the extra arguments.
-
-template <>
-int BuiltinArguments<BuiltinExtraArguments::kNone>::length() const {
- return Arguments::length();
-}
-
-template <>
-int BuiltinArguments<BuiltinExtraArguments::kTarget>::length() const {
- return Arguments::length() - 1;
-}
-
-template <>
-template <class S>
-Handle<S> BuiltinArguments<BuiltinExtraArguments::kTarget>::target() {
- return Arguments::at<S>(Arguments::length() - 1);
-}
-
-template <>
-int BuiltinArguments<BuiltinExtraArguments::kNewTarget>::length() const {
- return Arguments::length() - 1;
-}
-
-template <>
-Handle<HeapObject>
-BuiltinArguments<BuiltinExtraArguments::kNewTarget>::new_target() {
- return Arguments::at<HeapObject>(Arguments::length() - 1);
-}
-
-template <>
-int BuiltinArguments<BuiltinExtraArguments::kTargetAndNewTarget>::length()
- const {
- return Arguments::length() - 2;
-}
-
-template <>
-template <class S>
-Handle<S>
-BuiltinArguments<BuiltinExtraArguments::kTargetAndNewTarget>::target() {
- return Arguments::at<S>(Arguments::length() - 2);
-}
-
-template <>
-Handle<HeapObject>
-BuiltinArguments<BuiltinExtraArguments::kTargetAndNewTarget>::new_target() {
- return Arguments::at<HeapObject>(Arguments::length() - 1);
-}
-
-
-#define DEF_ARG_TYPE(name, spec) \
- typedef BuiltinArguments<BuiltinExtraArguments::spec> name##ArgumentsType;
-BUILTIN_LIST_C(DEF_ARG_TYPE)
-#undef DEF_ARG_TYPE
-
-
-// ----------------------------------------------------------------------------
-// Support macro for defining builtins in C++.
-// ----------------------------------------------------------------------------
-//
-// A builtin function is defined by writing:
-//
-// BUILTIN(name) {
-// ...
-// }
-//
-// In the body of the builtin function the arguments can be accessed
-// through the BuiltinArguments object args.
-
-#define BUILTIN(name) \
- MUST_USE_RESULT static Object* Builtin_Impl_##name(name##ArgumentsType args, \
- Isolate* isolate); \
- MUST_USE_RESULT static Object* Builtin_##name( \
- int args_length, Object** args_object, Isolate* isolate) { \
- Object* value; \
- isolate->counters()->runtime_calls()->Increment(); \
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"), \
- "V8.Builtin_" #name); \
- name##ArgumentsType args(args_length, args_object); \
- if (FLAG_runtime_call_stats) { \
- RuntimeCallStats* stats = isolate->counters()->runtime_call_stats(); \
- RuntimeCallTimerScope timer(isolate, &stats->Builtin_##name); \
- value = Builtin_Impl_##name(args, isolate); \
- } else { \
- value = Builtin_Impl_##name(args, isolate); \
- } \
- return value; \
- } \
- \
- MUST_USE_RESULT static Object* Builtin_Impl_##name(name##ArgumentsType args, \
- Isolate* isolate)
-
-// ----------------------------------------------------------------------------
-
-
-#define CHECK_RECEIVER(Type, name, method) \
- if (!args.receiver()->Is##Type()) { \
- THROW_NEW_ERROR_RETURN_FAILURE( \
- isolate, \
- NewTypeError(MessageTemplate::kIncompatibleMethodReceiver, \
- isolate->factory()->NewStringFromAsciiChecked(method), \
- args.receiver())); \
- } \
- Handle<Type> name = Handle<Type>::cast(args.receiver())
-
-
-inline bool ClampedToInteger(Object* object, int* out) {
- // This is an extended version of ECMA-262 7.1.11 handling signed values
- // Try to convert object to a number and clamp values to [kMinInt, kMaxInt]
- if (object->IsSmi()) {
- *out = Smi::cast(object)->value();
- return true;
- } else if (object->IsHeapNumber()) {
- double value = HeapNumber::cast(object)->value();
- if (std::isnan(value)) {
- *out = 0;
- } else if (value > kMaxInt) {
- *out = kMaxInt;
- } else if (value < kMinInt) {
- *out = kMinInt;
- } else {
- *out = static_cast<int>(value);
- }
- return true;
- } else if (object->IsUndefined() || object->IsNull()) {
- *out = 0;
- return true;
- } else if (object->IsBoolean()) {
- *out = object->IsTrue();
- return true;
- }
- return false;
-}
-
-
-inline bool GetSloppyArgumentsLength(Isolate* isolate, Handle<JSObject> object,
- int* out) {
- Map* arguments_map = isolate->native_context()->sloppy_arguments_map();
- if (object->map() != arguments_map) return false;
- DCHECK(object->HasFastElements());
- Object* len_obj = object->InObjectPropertyAt(JSArgumentsObject::kLengthIndex);
- if (!len_obj->IsSmi()) return false;
- *out = Max(0, Smi::cast(len_obj)->value());
- return *out <= object->elements()->length();
-}
-
-inline bool PrototypeHasNoElements(Isolate* isolate, JSObject* object) {
- DisallowHeapAllocation no_gc;
- HeapObject* prototype = HeapObject::cast(object->map()->prototype());
- HeapObject* null = isolate->heap()->null_value();
- HeapObject* empty = isolate->heap()->empty_fixed_array();
- while (prototype != null) {
- Map* map = prototype->map();
- if (map->instance_type() <= LAST_CUSTOM_ELEMENTS_RECEIVER ||
- map->instance_type() == JS_GLOBAL_PROXY_TYPE) return false;
- if (JSObject::cast(prototype)->elements() != empty) return false;
- prototype = HeapObject::cast(map->prototype());
- }
- return true;
-}
-
-inline bool IsJSArrayFastElementMovingAllowed(Isolate* isolate,
- JSArray* receiver) {
- return PrototypeHasNoElements(isolate, receiver);
-}
-
-inline bool HasSimpleElements(JSObject* current) {
- return current->map()->instance_type() > LAST_CUSTOM_ELEMENTS_RECEIVER &&
- current->map()->instance_type() != JS_GLOBAL_PROXY_TYPE &&
- !current->GetElementsAccessor()->HasAccessors(current);
-}
-
-inline bool HasOnlySimpleReceiverElements(Isolate* isolate,
- JSObject* receiver) {
- // Check that we have no accessors on the receiver's elements.
- if (!HasSimpleElements(receiver)) return false;
- return PrototypeHasNoElements(isolate, receiver);
-}
-
-inline bool HasOnlySimpleElements(Isolate* isolate, JSReceiver* receiver) {
- DisallowHeapAllocation no_gc;
- PrototypeIterator iter(isolate, receiver,
- PrototypeIterator::START_AT_RECEIVER);
- for (; !iter.IsAtEnd(); iter.Advance()) {
- if (iter.GetCurrent()->IsJSProxy()) return false;
- JSObject* current = iter.GetCurrent<JSObject>();
- if (!HasSimpleElements(current)) return false;
- }
- return true;
-}
-
-// Returns |false| if not applicable.
-MUST_USE_RESULT
-inline bool EnsureJSArrayWithWritableFastElements(Isolate* isolate,
- Handle<Object> receiver,
- Arguments* args,
- int first_added_arg) {
- if (!receiver->IsJSArray()) return false;
- Handle<JSArray> array = Handle<JSArray>::cast(receiver);
- ElementsKind origin_kind = array->GetElementsKind();
- if (IsDictionaryElementsKind(origin_kind)) return false;
- if (array->map()->is_observed()) return false;
- if (!array->map()->is_extensible()) return false;
- if (args == nullptr) return true;
-
- // If there may be elements accessors in the prototype chain, the fast path
- // cannot be used if there arguments to add to the array.
- if (!IsJSArrayFastElementMovingAllowed(isolate, *array)) return false;
-
- // Adding elements to the array prototype would break code that makes sure
- // it has no elements. Handle that elsewhere.
- if (isolate->IsAnyInitialArrayPrototype(array)) return false;
-
- // Need to ensure that the arguments passed in args can be contained in
- // the array.
- int args_length = args->length();
- if (first_added_arg >= args_length) return true;
-
- if (IsFastObjectElementsKind(origin_kind)) return true;
- ElementsKind target_kind = origin_kind;
- {
- DisallowHeapAllocation no_gc;
- for (int i = first_added_arg; i < args_length; i++) {
- Object* arg = (*args)[i];
- if (arg->IsHeapObject()) {
- if (arg->IsHeapNumber()) {
- target_kind = FAST_DOUBLE_ELEMENTS;
- } else {
- target_kind = FAST_ELEMENTS;
- break;
- }
- }
- }
- }
- if (target_kind != origin_kind) {
- // Use a short-lived HandleScope to avoid creating several copies of the
- // elements handle which would cause issues when left-trimming later-on.
- HandleScope scope(isolate);
- JSObject::TransitionElementsKind(array, target_kind);
- }
- return true;
-}
-
-
-MUST_USE_RESULT static Object* CallJsIntrinsic(
- Isolate* isolate, Handle<JSFunction> function,
- BuiltinArguments<BuiltinExtraArguments::kNone> args) {
- HandleScope handleScope(isolate);
- int argc = args.length() - 1;
- ScopedVector<Handle<Object> > argv(argc);
- for (int i = 0; i < argc; ++i) {
- argv[i] = args.at<Object>(i + 1);
- }
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Execution::Call(isolate,
- function,
- args.receiver(),
- argc,
- argv.start()));
- return *result;
-}
-
-
-} // namespace
-
-
-BUILTIN(Illegal) {
- UNREACHABLE();
- return isolate->heap()->undefined_value(); // Make compiler happy.
-}
-
-
-BUILTIN(EmptyFunction) { return isolate->heap()->undefined_value(); }
-
-void Builtins::Generate_ObjectHasOwnProperty(
- compiler::CodeStubAssembler* assembler) {
- typedef compiler::Node Node;
- typedef compiler::CodeStubAssembler::Label Label;
- typedef compiler::CodeStubAssembler::Variable Variable;
-
- Node* object = assembler->Parameter(0);
- Node* key = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
-
- Label call_runtime(assembler), return_true(assembler),
- return_false(assembler);
-
- // Smi receivers do not have own properties.
- Label if_objectisnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(object), &return_false,
- &if_objectisnotsmi);
- assembler->Bind(&if_objectisnotsmi);
-
- Node* map = assembler->LoadMap(object);
- Node* instance_type = assembler->LoadMapInstanceType(map);
-
- Variable var_index(assembler, MachineRepresentation::kWord32);
-
- Label if_keyissmi(assembler), if_keyisnotsmi(assembler),
- keyisindex(assembler);
- assembler->Branch(assembler->WordIsSmi(key), &if_keyissmi, &if_keyisnotsmi);
- assembler->Bind(&if_keyissmi);
- {
- // Negative smi keys are named properties. Handle in the runtime.
- Label if_keyispositive(assembler);
- assembler->Branch(assembler->WordIsPositiveSmi(key), &if_keyispositive,
- &call_runtime);
- assembler->Bind(&if_keyispositive);
-
- var_index.Bind(assembler->SmiUntag(key));
- assembler->Goto(&keyisindex);
- }
-
- assembler->Bind(&if_keyisnotsmi);
-
- Node* key_instance_type = assembler->LoadInstanceType(key);
- Label if_iskeyunique(assembler), if_iskeynotsymbol(assembler);
- assembler->Branch(
- assembler->Word32Equal(key_instance_type,
- assembler->Int32Constant(SYMBOL_TYPE)),
- &if_iskeyunique, &if_iskeynotsymbol);
- assembler->Bind(&if_iskeynotsymbol);
- {
- Label if_iskeyinternalized(assembler);
- Node* bits = assembler->WordAnd(
- key_instance_type,
- assembler->Int32Constant(kIsNotStringMask | kIsNotInternalizedMask));
- assembler->Branch(
- assembler->Word32Equal(
- bits, assembler->Int32Constant(kStringTag | kInternalizedTag)),
- &if_iskeyinternalized, &call_runtime);
- assembler->Bind(&if_iskeyinternalized);
-
- // Check whether the key is an array index passed in as string. Handle
- // uniform with smi keys if so.
- // TODO(verwaest): Also support non-internalized strings.
- Node* hash = assembler->LoadNameHash(key);
- Node* bit = assembler->Word32And(
- hash, assembler->Int32Constant(internal::Name::kIsNotArrayIndexMask));
- Label if_isarrayindex(assembler);
- assembler->Branch(assembler->Word32Equal(bit, assembler->Int32Constant(0)),
- &if_isarrayindex, &if_iskeyunique);
- assembler->Bind(&if_isarrayindex);
- var_index.Bind(
- assembler->BitFieldDecode<internal::Name::ArrayIndexValueBits>(hash));
- assembler->Goto(&keyisindex);
- }
- assembler->Bind(&if_iskeyunique);
-
- {
- Label if_objectissimple(assembler);
- assembler->Branch(assembler->Word32Or(
- assembler->Int32LessThanOrEqual(
- instance_type, assembler->Int32Constant(
- LAST_SPECIAL_RECEIVER_TYPE)),
- assembler->Word32Equal(
- instance_type, assembler->Int32Constant(
- JS_GLOBAL_PROXY_TYPE))),
- &call_runtime, &if_objectissimple);
- assembler->Bind(&if_objectissimple);
- }
-
- // TODO(verwaest): Perform a dictonary lookup on slow-mode receivers.
- Node* bit_field3 = assembler->LoadMapBitField3(map);
- Node* bit = assembler->BitFieldDecode<Map::DictionaryMap>(bit_field3);
- Label if_isfastmap(assembler);
- assembler->Branch(assembler->Word32Equal(bit, assembler->Int32Constant(0)),
- &if_isfastmap, &call_runtime);
- assembler->Bind(&if_isfastmap);
- Node* nof =
- assembler->BitFieldDecode<Map::NumberOfOwnDescriptorsBits>(bit_field3);
- // Bail out to the runtime for large numbers of own descriptors. The stub only
- // does linear search, which becomes too expensive in that case.
- {
- static const int32_t kMaxLinear = 256;
- Label above_max(assembler), below_max(assembler);
- assembler->Branch(assembler->Int32LessThanOrEqual(
- nof, assembler->Int32Constant(kMaxLinear)),
- &below_max, &call_runtime);
- assembler->Bind(&below_max);
- }
- Node* descriptors = assembler->LoadMapDescriptors(map);
-
- Variable var_descriptor(assembler, MachineRepresentation::kWord32);
- Label loop(assembler, &var_descriptor);
- var_descriptor.Bind(assembler->Int32Constant(0));
- assembler->Goto(&loop);
- assembler->Bind(&loop);
- {
- Node* index = var_descriptor.value();
- Node* offset = assembler->Int32Constant(DescriptorArray::ToKeyIndex(0));
- Node* factor = assembler->Int32Constant(DescriptorArray::kDescriptorSize);
- Label if_notdone(assembler);
- assembler->Branch(assembler->Word32Equal(index, nof), &return_false,
- &if_notdone);
- assembler->Bind(&if_notdone);
- {
- Node* array_index =
- assembler->Int32Add(offset, assembler->Int32Mul(index, factor));
- Node* current =
- assembler->LoadFixedArrayElementInt32Index(descriptors, array_index);
- Label if_unequal(assembler);
- assembler->Branch(assembler->WordEqual(current, key), &return_true,
- &if_unequal);
- assembler->Bind(&if_unequal);
-
- var_descriptor.Bind(
- assembler->Int32Add(index, assembler->Int32Constant(1)));
- assembler->Goto(&loop);
- }
- }
-
- assembler->Bind(&keyisindex);
- {
- Label if_objectissimple(assembler);
- assembler->Branch(assembler->Word32Or(
- assembler->Int32LessThanOrEqual(
- instance_type, assembler->Int32Constant(
- LAST_CUSTOM_ELEMENTS_RECEIVER)),
- assembler->Word32Equal(
- instance_type, assembler->Int32Constant(
- JS_GLOBAL_PROXY_TYPE))),
- &call_runtime, &if_objectissimple);
- assembler->Bind(&if_objectissimple);
- }
-
- Node* index = var_index.value();
- Node* bit_field2 = assembler->LoadMapBitField2(map);
- Node* elements_kind =
- assembler->BitFieldDecode<Map::ElementsKindBits>(bit_field2);
-
- // TODO(verwaest): Support other elements kinds as well.
- Label if_isobjectorsmi(assembler);
- assembler->Branch(
- assembler->Int32LessThanOrEqual(
- elements_kind, assembler->Int32Constant(FAST_HOLEY_ELEMENTS)),
- &if_isobjectorsmi, &call_runtime);
- assembler->Bind(&if_isobjectorsmi);
- {
- Node* elements = assembler->LoadElements(object);
- Node* length = assembler->LoadFixedArrayBaseLength(elements);
-
- Label if_iskeyinrange(assembler);
- assembler->Branch(
- assembler->Int32LessThan(index, assembler->SmiToWord32(length)),
- &if_iskeyinrange, &return_false);
-
- assembler->Bind(&if_iskeyinrange);
- Node* element = assembler->LoadFixedArrayElementInt32Index(elements, index);
- Node* the_hole = assembler->LoadRoot(Heap::kTheHoleValueRootIndex);
- assembler->Branch(assembler->WordEqual(element, the_hole), &return_false,
- &return_true);
- }
-
- assembler->Bind(&return_true);
- assembler->Return(assembler->BooleanConstant(true));
-
- assembler->Bind(&return_false);
- assembler->Return(assembler->BooleanConstant(false));
-
- assembler->Bind(&call_runtime);
- assembler->Return(assembler->CallRuntime(Runtime::kObjectHasOwnProperty,
- context, object, key));
-}
-
-namespace {
-
-Object* DoArrayPush(Isolate* isolate,
- BuiltinArguments<BuiltinExtraArguments::kNone> args) {
- HandleScope scope(isolate);
- Handle<Object> receiver = args.receiver();
- if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1)) {
- return CallJsIntrinsic(isolate, isolate->array_push(), args);
- }
- // Fast Elements Path
- int to_add = args.length() - 1;
- Handle<JSArray> array = Handle<JSArray>::cast(receiver);
- int len = Smi::cast(array->length())->value();
- if (to_add == 0) return Smi::FromInt(len);
-
- // Currently fixed arrays cannot grow too big, so we should never hit this.
- DCHECK_LE(to_add, Smi::kMaxValue - Smi::cast(array->length())->value());
-
- if (JSArray::HasReadOnlyLength(array)) {
- return CallJsIntrinsic(isolate, isolate->array_push(), args);
- }
-
- ElementsAccessor* accessor = array->GetElementsAccessor();
- int new_length = accessor->Push(array, &args, to_add);
- return Smi::FromInt(new_length);
-}
-
-} // namespace
-
-BUILTIN(ArrayPush) { return DoArrayPush(isolate, args); }
-
-// TODO(verwaest): This is a temporary helper until the FastArrayPush stub can
-// tailcall to the builtin directly.
-RUNTIME_FUNCTION(Runtime_ArrayPush) {
- DCHECK_EQ(2, args.length());
- Arguments* incoming = reinterpret_cast<Arguments*>(args[0]);
- // Rewrap the arguments as builtins arguments.
- BuiltinArguments<BuiltinExtraArguments::kNone> caller_args(
- incoming->length() + 1, incoming->arguments() + 1);
- return DoArrayPush(isolate, caller_args);
-}
-
-BUILTIN(ArrayPop) {
- HandleScope scope(isolate);
- Handle<Object> receiver = args.receiver();
- if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, nullptr, 0)) {
- return CallJsIntrinsic(isolate, isolate->array_pop(), args);
- }
-
- Handle<JSArray> array = Handle<JSArray>::cast(receiver);
- DCHECK(!array->map()->is_observed());
-
- uint32_t len = static_cast<uint32_t>(Smi::cast(array->length())->value());
- if (len == 0) return isolate->heap()->undefined_value();
-
- if (JSArray::HasReadOnlyLength(array)) {
- return CallJsIntrinsic(isolate, isolate->array_pop(), args);
- }
-
- Handle<Object> result;
- if (IsJSArrayFastElementMovingAllowed(isolate, JSArray::cast(*receiver))) {
- // Fast Elements Path
- result = array->GetElementsAccessor()->Pop(array);
- } else {
- // Use Slow Lookup otherwise
- uint32_t new_length = len - 1;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, JSReceiver::GetElement(isolate, array, new_length));
- JSArray::SetLength(array, new_length);
- }
- return *result;
-}
-
-
-BUILTIN(ArrayShift) {
- HandleScope scope(isolate);
- Heap* heap = isolate->heap();
- Handle<Object> receiver = args.receiver();
- if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, nullptr, 0) ||
- !IsJSArrayFastElementMovingAllowed(isolate, JSArray::cast(*receiver))) {
- return CallJsIntrinsic(isolate, isolate->array_shift(), args);
- }
- Handle<JSArray> array = Handle<JSArray>::cast(receiver);
- DCHECK(!array->map()->is_observed());
-
- int len = Smi::cast(array->length())->value();
- if (len == 0) return heap->undefined_value();
-
- if (JSArray::HasReadOnlyLength(array)) {
- return CallJsIntrinsic(isolate, isolate->array_shift(), args);
- }
-
- Handle<Object> first = array->GetElementsAccessor()->Shift(array);
- return *first;
-}
-
-
-BUILTIN(ArrayUnshift) {
- HandleScope scope(isolate);
- Handle<Object> receiver = args.receiver();
- if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1)) {
- return CallJsIntrinsic(isolate, isolate->array_unshift(), args);
- }
- Handle<JSArray> array = Handle<JSArray>::cast(receiver);
- DCHECK(!array->map()->is_observed());
- int to_add = args.length() - 1;
- if (to_add == 0) return array->length();
-
- // Currently fixed arrays cannot grow too big, so we should never hit this.
- DCHECK_LE(to_add, Smi::kMaxValue - Smi::cast(array->length())->value());
-
- if (JSArray::HasReadOnlyLength(array)) {
- return CallJsIntrinsic(isolate, isolate->array_unshift(), args);
- }
-
- ElementsAccessor* accessor = array->GetElementsAccessor();
- int new_length = accessor->Unshift(array, &args, to_add);
- return Smi::FromInt(new_length);
-}
-
-
-BUILTIN(ArraySlice) {
- HandleScope scope(isolate);
- Handle<Object> receiver = args.receiver();
- int len = -1;
- int relative_start = 0;
- int relative_end = 0;
-
- if (receiver->IsJSArray()) {
- DisallowHeapAllocation no_gc;
- JSArray* array = JSArray::cast(*receiver);
- if (V8_UNLIKELY(!array->HasFastElements() ||
- !IsJSArrayFastElementMovingAllowed(isolate, array) ||
- !isolate->IsArraySpeciesLookupChainIntact() ||
- // If this is a subclass of Array, then call out to JS
- !array->HasArrayPrototype(isolate))) {
- AllowHeapAllocation allow_allocation;
- return CallJsIntrinsic(isolate, isolate->array_slice(), args);
- }
- len = Smi::cast(array->length())->value();
- } else if (receiver->IsJSObject() &&
- GetSloppyArgumentsLength(isolate, Handle<JSObject>::cast(receiver),
- &len)) {
- DCHECK_EQ(FAST_ELEMENTS, JSObject::cast(*receiver)->GetElementsKind());
- // Array.prototype.slice(arguments, ...) is quite a common idiom
- // (notably more than 50% of invocations in Web apps).
- // Treat it in C++ as well.
- } else {
- AllowHeapAllocation allow_allocation;
- return CallJsIntrinsic(isolate, isolate->array_slice(), args);
- }
- DCHECK_LE(0, len);
- int argument_count = args.length() - 1;
- // Note carefully chosen defaults---if argument is missing,
- // it's undefined which gets converted to 0 for relative_start
- // and to len for relative_end.
- relative_start = 0;
- relative_end = len;
- if (argument_count > 0) {
- DisallowHeapAllocation no_gc;
- if (!ClampedToInteger(args[1], &relative_start)) {
- AllowHeapAllocation allow_allocation;
- return CallJsIntrinsic(isolate, isolate->array_slice(), args);
- }
- if (argument_count > 1) {
- Object* end_arg = args[2];
- // slice handles the end_arg specially
- if (end_arg->IsUndefined()) {
- relative_end = len;
- } else if (!ClampedToInteger(end_arg, &relative_end)) {
- AllowHeapAllocation allow_allocation;
- return CallJsIntrinsic(isolate, isolate->array_slice(), args);
- }
- }
- }
-
- // ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 6.
- uint32_t actual_start = (relative_start < 0) ? Max(len + relative_start, 0)
- : Min(relative_start, len);
-
- // ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 8.
- uint32_t actual_end =
- (relative_end < 0) ? Max(len + relative_end, 0) : Min(relative_end, len);
-
- Handle<JSObject> object = Handle<JSObject>::cast(receiver);
- ElementsAccessor* accessor = object->GetElementsAccessor();
- return *accessor->Slice(object, actual_start, actual_end);
-}
-
-
-BUILTIN(ArraySplice) {
- HandleScope scope(isolate);
- Handle<Object> receiver = args.receiver();
- if (V8_UNLIKELY(
- !EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 3) ||
- // If this is a subclass of Array, then call out to JS.
- !Handle<JSArray>::cast(receiver)->HasArrayPrototype(isolate) ||
- // If anything with @@species has been messed with, call out to JS.
- !isolate->IsArraySpeciesLookupChainIntact())) {
- return CallJsIntrinsic(isolate, isolate->array_splice(), args);
- }
- Handle<JSArray> array = Handle<JSArray>::cast(receiver);
- DCHECK(!array->map()->is_observed());
-
- int argument_count = args.length() - 1;
- int relative_start = 0;
- if (argument_count > 0) {
- DisallowHeapAllocation no_gc;
- if (!ClampedToInteger(args[1], &relative_start)) {
- AllowHeapAllocation allow_allocation;
- return CallJsIntrinsic(isolate, isolate->array_splice(), args);
- }
- }
- int len = Smi::cast(array->length())->value();
- // clip relative start to [0, len]
- int actual_start = (relative_start < 0) ? Max(len + relative_start, 0)
- : Min(relative_start, len);
-
- int actual_delete_count;
- if (argument_count == 1) {
- // SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is
- // given as a request to delete all the elements from the start.
- // And it differs from the case of undefined delete count.
- // This does not follow ECMA-262, but we do the same for compatibility.
- DCHECK(len - actual_start >= 0);
- actual_delete_count = len - actual_start;
- } else {
- int delete_count = 0;
- DisallowHeapAllocation no_gc;
- if (argument_count > 1) {
- if (!ClampedToInteger(args[2], &delete_count)) {
- AllowHeapAllocation allow_allocation;
- return CallJsIntrinsic(isolate, isolate->array_splice(), args);
- }
- }
- actual_delete_count = Min(Max(delete_count, 0), len - actual_start);
- }
-
- int add_count = (argument_count > 1) ? (argument_count - 2) : 0;
- int new_length = len - actual_delete_count + add_count;
-
- if (new_length != len && JSArray::HasReadOnlyLength(array)) {
- AllowHeapAllocation allow_allocation;
- return CallJsIntrinsic(isolate, isolate->array_splice(), args);
- }
- ElementsAccessor* accessor = array->GetElementsAccessor();
- Handle<JSArray> result_array = accessor->Splice(
- array, actual_start, actual_delete_count, &args, add_count);
- return *result_array;
-}
-
-
-// Array Concat -------------------------------------------------------------
-
-namespace {
-
-/**
- * A simple visitor visits every element of Array's.
- * The backend storage can be a fixed array for fast elements case,
- * or a dictionary for sparse array. Since Dictionary is a subtype
- * of FixedArray, the class can be used by both fast and slow cases.
- * The second parameter of the constructor, fast_elements, specifies
- * whether the storage is a FixedArray or Dictionary.
- *
- * An index limit is used to deal with the situation that a result array
- * length overflows 32-bit non-negative integer.
- */
-class ArrayConcatVisitor {
- public:
- ArrayConcatVisitor(Isolate* isolate, Handle<Object> storage,
- bool fast_elements)
- : isolate_(isolate),
- storage_(isolate->global_handles()->Create(*storage)),
- index_offset_(0u),
- bit_field_(FastElementsField::encode(fast_elements) |
- ExceedsLimitField::encode(false) |
- IsFixedArrayField::encode(storage->IsFixedArray())) {
- DCHECK(!(this->fast_elements() && !is_fixed_array()));
- }
-
- ~ArrayConcatVisitor() { clear_storage(); }
-
- MUST_USE_RESULT bool visit(uint32_t i, Handle<Object> elm) {
- uint32_t index = index_offset_ + i;
-
- if (i >= JSObject::kMaxElementCount - index_offset_) {
- set_exceeds_array_limit(true);
- // Exception hasn't been thrown at this point. Return true to
- // break out, and caller will throw. !visit would imply that
- // there is already a pending exception.
- return true;
- }
-
- if (!is_fixed_array()) {
- LookupIterator it(isolate_, storage_, index, LookupIterator::OWN);
- MAYBE_RETURN(
- JSReceiver::CreateDataProperty(&it, elm, Object::THROW_ON_ERROR),
- false);
- return true;
- }
-
- if (fast_elements()) {
- if (index < static_cast<uint32_t>(storage_fixed_array()->length())) {
- storage_fixed_array()->set(index, *elm);
- return true;
- }
- // Our initial estimate of length was foiled, possibly by
- // getters on the arrays increasing the length of later arrays
- // during iteration.
- // This shouldn't happen in anything but pathological cases.
- SetDictionaryMode();
- // Fall-through to dictionary mode.
- }
- DCHECK(!fast_elements());
- Handle<SeededNumberDictionary> dict(
- SeededNumberDictionary::cast(*storage_));
- // The object holding this backing store has just been allocated, so
- // it cannot yet be used as a prototype.
- Handle<SeededNumberDictionary> result =
- SeededNumberDictionary::AtNumberPut(dict, index, elm, false);
- if (!result.is_identical_to(dict)) {
- // Dictionary needed to grow.
- clear_storage();
- set_storage(*result);
- }
- return true;
- }
-
- void increase_index_offset(uint32_t delta) {
- if (JSObject::kMaxElementCount - index_offset_ < delta) {
- index_offset_ = JSObject::kMaxElementCount;
- } else {
- index_offset_ += delta;
- }
- // If the initial length estimate was off (see special case in visit()),
- // but the array blowing the limit didn't contain elements beyond the
- // provided-for index range, go to dictionary mode now.
- if (fast_elements() &&
- index_offset_ >
- static_cast<uint32_t>(FixedArrayBase::cast(*storage_)->length())) {
- SetDictionaryMode();
- }
- }
-
- bool exceeds_array_limit() const {
- return ExceedsLimitField::decode(bit_field_);
- }
-
- Handle<JSArray> ToArray() {
- DCHECK(is_fixed_array());
- Handle<JSArray> array = isolate_->factory()->NewJSArray(0);
- Handle<Object> length =
- isolate_->factory()->NewNumber(static_cast<double>(index_offset_));
- Handle<Map> map = JSObject::GetElementsTransitionMap(
- array, fast_elements() ? FAST_HOLEY_ELEMENTS : DICTIONARY_ELEMENTS);
- array->set_map(*map);
- array->set_length(*length);
- array->set_elements(*storage_fixed_array());
- return array;
- }
-
- // Storage is either a FixedArray (if is_fixed_array()) or a JSReciever
- // (otherwise)
- Handle<FixedArray> storage_fixed_array() {
- DCHECK(is_fixed_array());
- return Handle<FixedArray>::cast(storage_);
- }
- Handle<JSReceiver> storage_jsreceiver() {
- DCHECK(!is_fixed_array());
- return Handle<JSReceiver>::cast(storage_);
- }
-
- private:
- // Convert storage to dictionary mode.
- void SetDictionaryMode() {
- DCHECK(fast_elements() && is_fixed_array());
- Handle<FixedArray> current_storage = storage_fixed_array();
- Handle<SeededNumberDictionary> slow_storage(
- SeededNumberDictionary::New(isolate_, current_storage->length()));
- uint32_t current_length = static_cast<uint32_t>(current_storage->length());
- FOR_WITH_HANDLE_SCOPE(
- isolate_, uint32_t, i = 0, i, i < current_length, i++, {
- Handle<Object> element(current_storage->get(i), isolate_);
- if (!element->IsTheHole()) {
- // The object holding this backing store has just been allocated, so
- // it cannot yet be used as a prototype.
- Handle<SeededNumberDictionary> new_storage =
- SeededNumberDictionary::AtNumberPut(slow_storage, i, element,
- false);
- if (!new_storage.is_identical_to(slow_storage)) {
- slow_storage = loop_scope.CloseAndEscape(new_storage);
- }
- }
- });
- clear_storage();
- set_storage(*slow_storage);
- set_fast_elements(false);
- }
-
- inline void clear_storage() { GlobalHandles::Destroy(storage_.location()); }
-
- inline void set_storage(FixedArray* storage) {
- DCHECK(is_fixed_array());
- storage_ = isolate_->global_handles()->Create(storage);
- }
-
- class FastElementsField : public BitField<bool, 0, 1> {};
- class ExceedsLimitField : public BitField<bool, 1, 1> {};
- class IsFixedArrayField : public BitField<bool, 2, 1> {};
-
- bool fast_elements() const { return FastElementsField::decode(bit_field_); }
- void set_fast_elements(bool fast) {
- bit_field_ = FastElementsField::update(bit_field_, fast);
- }
- void set_exceeds_array_limit(bool exceeds) {
- bit_field_ = ExceedsLimitField::update(bit_field_, exceeds);
- }
- bool is_fixed_array() const { return IsFixedArrayField::decode(bit_field_); }
-
- Isolate* isolate_;
- Handle<Object> storage_; // Always a global handle.
- // Index after last seen index. Always less than or equal to
- // JSObject::kMaxElementCount.
- uint32_t index_offset_;
- uint32_t bit_field_;
-};
-
-
-uint32_t EstimateElementCount(Handle<JSArray> array) {
- uint32_t length = static_cast<uint32_t>(array->length()->Number());
- int element_count = 0;
- switch (array->GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
- // Fast elements can't have lengths that are not representable by
- // a 32-bit signed integer.
- DCHECK(static_cast<int32_t>(FixedArray::kMaxLength) >= 0);
- int fast_length = static_cast<int>(length);
- Handle<FixedArray> elements(FixedArray::cast(array->elements()));
- for (int i = 0; i < fast_length; i++) {
- if (!elements->get(i)->IsTheHole()) element_count++;
- }
- break;
- }
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS: {
- // Fast elements can't have lengths that are not representable by
- // a 32-bit signed integer.
- DCHECK(static_cast<int32_t>(FixedDoubleArray::kMaxLength) >= 0);
- int fast_length = static_cast<int>(length);
- if (array->elements()->IsFixedArray()) {
- DCHECK(FixedArray::cast(array->elements())->length() == 0);
- break;
- }
- Handle<FixedDoubleArray> elements(
- FixedDoubleArray::cast(array->elements()));
- for (int i = 0; i < fast_length; i++) {
- if (!elements->is_the_hole(i)) element_count++;
- }
- break;
- }
- case DICTIONARY_ELEMENTS: {
- Handle<SeededNumberDictionary> dictionary(
- SeededNumberDictionary::cast(array->elements()));
- int capacity = dictionary->Capacity();
- for (int i = 0; i < capacity; i++) {
- Handle<Object> key(dictionary->KeyAt(i), array->GetIsolate());
- if (dictionary->IsKey(*key)) {
- element_count++;
- }
- }
- break;
- }
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
-
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- // External arrays are always dense.
- return length;
- case NO_ELEMENTS:
- return 0;
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
- case FAST_STRING_WRAPPER_ELEMENTS:
- case SLOW_STRING_WRAPPER_ELEMENTS:
- UNREACHABLE();
- return 0;
- }
- // As an estimate, we assume that the prototype doesn't contain any
- // inherited elements.
- return element_count;
-}
-
-
-// Used for sorting indices in a List<uint32_t>.
-int compareUInt32(const uint32_t* ap, const uint32_t* bp) {
- uint32_t a = *ap;
- uint32_t b = *bp;
- return (a == b) ? 0 : (a < b) ? -1 : 1;
-}
-
-
-void CollectElementIndices(Handle<JSObject> object, uint32_t range,
- List<uint32_t>* indices) {
- Isolate* isolate = object->GetIsolate();
- ElementsKind kind = object->GetElementsKind();
- switch (kind) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
- DisallowHeapAllocation no_gc;
- FixedArray* elements = FixedArray::cast(object->elements());
- uint32_t length = static_cast<uint32_t>(elements->length());
- if (range < length) length = range;
- for (uint32_t i = 0; i < length; i++) {
- if (!elements->get(i)->IsTheHole()) {
- indices->Add(i);
- }
- }
- break;
- }
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS: {
- if (object->elements()->IsFixedArray()) {
- DCHECK(object->elements()->length() == 0);
- break;
- }
- Handle<FixedDoubleArray> elements(
- FixedDoubleArray::cast(object->elements()));
- uint32_t length = static_cast<uint32_t>(elements->length());
- if (range < length) length = range;
- for (uint32_t i = 0; i < length; i++) {
- if (!elements->is_the_hole(i)) {
- indices->Add(i);
- }
- }
- break;
- }
- case DICTIONARY_ELEMENTS: {
- DisallowHeapAllocation no_gc;
- SeededNumberDictionary* dict =
- SeededNumberDictionary::cast(object->elements());
- uint32_t capacity = dict->Capacity();
- Heap* heap = isolate->heap();
- Object* undefined = heap->undefined_value();
- Object* the_hole = heap->the_hole_value();
- FOR_WITH_HANDLE_SCOPE(isolate, uint32_t, j = 0, j, j < capacity, j++, {
- Object* k = dict->KeyAt(j);
- if (k == undefined) continue;
- if (k == the_hole) continue;
- DCHECK(k->IsNumber());
- uint32_t index = static_cast<uint32_t>(k->Number());
- if (index < range) {
- indices->Add(index);
- }
- });
- break;
- }
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
-
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- {
- uint32_t length = static_cast<uint32_t>(
- FixedArrayBase::cast(object->elements())->length());
- if (range <= length) {
- length = range;
- // We will add all indices, so we might as well clear it first
- // and avoid duplicates.
- indices->Clear();
- }
- for (uint32_t i = 0; i < length; i++) {
- indices->Add(i);
- }
- if (length == range) return; // All indices accounted for already.
- break;
- }
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
- ElementsAccessor* accessor = object->GetElementsAccessor();
- for (uint32_t i = 0; i < range; i++) {
- if (accessor->HasElement(object, i)) {
- indices->Add(i);
- }
- }
- break;
- }
- case FAST_STRING_WRAPPER_ELEMENTS:
- case SLOW_STRING_WRAPPER_ELEMENTS: {
- DCHECK(object->IsJSValue());
- Handle<JSValue> js_value = Handle<JSValue>::cast(object);
- DCHECK(js_value->value()->IsString());
- Handle<String> string(String::cast(js_value->value()), isolate);
- uint32_t length = static_cast<uint32_t>(string->length());
- uint32_t i = 0;
- uint32_t limit = Min(length, range);
- for (; i < limit; i++) {
- indices->Add(i);
- }
- ElementsAccessor* accessor = object->GetElementsAccessor();
- for (; i < range; i++) {
- if (accessor->HasElement(object, i)) {
- indices->Add(i);
- }
- }
- break;
- }
- case NO_ELEMENTS:
- break;
- }
-
- PrototypeIterator iter(isolate, object);
- if (!iter.IsAtEnd()) {
- // The prototype will usually have no inherited element indices,
- // but we have to check.
- CollectElementIndices(PrototypeIterator::GetCurrent<JSObject>(iter), range,
- indices);
- }
-}
-
-
-bool IterateElementsSlow(Isolate* isolate, Handle<JSReceiver> receiver,
- uint32_t length, ArrayConcatVisitor* visitor) {
- FOR_WITH_HANDLE_SCOPE(isolate, uint32_t, i = 0, i, i < length, ++i, {
- Maybe<bool> maybe = JSReceiver::HasElement(receiver, i);
- if (!maybe.IsJust()) return false;
- if (maybe.FromJust()) {
- Handle<Object> element_value;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, element_value, JSReceiver::GetElement(isolate, receiver, i),
- false);
- if (!visitor->visit(i, element_value)) return false;
- }
- });
- visitor->increase_index_offset(length);
- return true;
-}
-
-
-/**
- * A helper function that visits "array" elements of a JSReceiver in numerical
- * order.
- *
- * The visitor argument called for each existing element in the array
- * with the element index and the element's value.
- * Afterwards it increments the base-index of the visitor by the array
- * length.
- * Returns false if any access threw an exception, otherwise true.
- */
-bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
- ArrayConcatVisitor* visitor) {
- uint32_t length = 0;
-
- if (receiver->IsJSArray()) {
- Handle<JSArray> array = Handle<JSArray>::cast(receiver);
- length = static_cast<uint32_t>(array->length()->Number());
- } else {
- Handle<Object> val;
- Handle<Object> key = isolate->factory()->length_string();
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, val, Runtime::GetObjectProperty(isolate, receiver, key),
- false);
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, val,
- Object::ToLength(isolate, val), false);
- // TODO(caitp): Support larger element indexes (up to 2^53-1).
- if (!val->ToUint32(&length)) {
- length = 0;
- }
- // TODO(cbruni): handle other element kind as well
- return IterateElementsSlow(isolate, receiver, length, visitor);
- }
-
- if (!HasOnlySimpleElements(isolate, *receiver)) {
- return IterateElementsSlow(isolate, receiver, length, visitor);
- }
- Handle<JSObject> array = Handle<JSObject>::cast(receiver);
-
- switch (array->GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
- // Run through the elements FixedArray and use HasElement and GetElement
- // to check the prototype for missing elements.
- Handle<FixedArray> elements(FixedArray::cast(array->elements()));
- int fast_length = static_cast<int>(length);
- DCHECK(fast_length <= elements->length());
- FOR_WITH_HANDLE_SCOPE(isolate, int, j = 0, j, j < fast_length, j++, {
- Handle<Object> element_value(elements->get(j), isolate);
- if (!element_value->IsTheHole()) {
- if (!visitor->visit(j, element_value)) return false;
- } else {
- Maybe<bool> maybe = JSReceiver::HasElement(array, j);
- if (!maybe.IsJust()) return false;
- if (maybe.FromJust()) {
- // Call GetElement on array, not its prototype, or getters won't
- // have the correct receiver.
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, element_value,
- JSReceiver::GetElement(isolate, array, j), false);
- if (!visitor->visit(j, element_value)) return false;
- }
- }
- });
- break;
- }
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS: {
- // Empty array is FixedArray but not FixedDoubleArray.
- if (length == 0) break;
- // Run through the elements FixedArray and use HasElement and GetElement
- // to check the prototype for missing elements.
- if (array->elements()->IsFixedArray()) {
- DCHECK(array->elements()->length() == 0);
- break;
- }
- Handle<FixedDoubleArray> elements(
- FixedDoubleArray::cast(array->elements()));
- int fast_length = static_cast<int>(length);
- DCHECK(fast_length <= elements->length());
- FOR_WITH_HANDLE_SCOPE(isolate, int, j = 0, j, j < fast_length, j++, {
- if (!elements->is_the_hole(j)) {
- double double_value = elements->get_scalar(j);
- Handle<Object> element_value =
- isolate->factory()->NewNumber(double_value);
- if (!visitor->visit(j, element_value)) return false;
- } else {
- Maybe<bool> maybe = JSReceiver::HasElement(array, j);
- if (!maybe.IsJust()) return false;
- if (maybe.FromJust()) {
- // Call GetElement on array, not its prototype, or getters won't
- // have the correct receiver.
- Handle<Object> element_value;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, element_value,
- JSReceiver::GetElement(isolate, array, j), false);
- if (!visitor->visit(j, element_value)) return false;
- }
- }
- });
- break;
- }
-
- case DICTIONARY_ELEMENTS: {
- Handle<SeededNumberDictionary> dict(array->element_dictionary());
- List<uint32_t> indices(dict->Capacity() / 2);
- // Collect all indices in the object and the prototypes less
- // than length. This might introduce duplicates in the indices list.
- CollectElementIndices(array, length, &indices);
- indices.Sort(&compareUInt32);
- int n = indices.length();
- FOR_WITH_HANDLE_SCOPE(isolate, int, j = 0, j, j < n, (void)0, {
- uint32_t index = indices[j];
- Handle<Object> element;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, element, JSReceiver::GetElement(isolate, array, index),
- false);
- if (!visitor->visit(index, element)) return false;
- // Skip to next different index (i.e., omit duplicates).
- do {
- j++;
- } while (j < n && indices[j] == index);
- });
- break;
- }
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
- FOR_WITH_HANDLE_SCOPE(
- isolate, uint32_t, index = 0, index, index < length, index++, {
- Handle<Object> element;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, element, JSReceiver::GetElement(isolate, array, index),
- false);
- if (!visitor->visit(index, element)) return false;
- });
- break;
- }
- case NO_ELEMENTS:
- break;
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- return IterateElementsSlow(isolate, receiver, length, visitor);
- case FAST_STRING_WRAPPER_ELEMENTS:
- case SLOW_STRING_WRAPPER_ELEMENTS:
- // |array| is guaranteed to be an array or typed array.
- UNREACHABLE();
- break;
- }
- visitor->increase_index_offset(length);
- return true;
-}
-
-
-bool HasConcatSpreadableModifier(Isolate* isolate, Handle<JSArray> obj) {
- Handle<Symbol> key(isolate->factory()->is_concat_spreadable_symbol());
- Maybe<bool> maybe = JSReceiver::HasProperty(obj, key);
- return maybe.FromMaybe(false);
-}
-
-
-static Maybe<bool> IsConcatSpreadable(Isolate* isolate, Handle<Object> obj) {
- HandleScope handle_scope(isolate);
- if (!obj->IsJSReceiver()) return Just(false);
- Handle<Symbol> key(isolate->factory()->is_concat_spreadable_symbol());
- Handle<Object> value;
- MaybeHandle<Object> maybeValue =
- i::Runtime::GetObjectProperty(isolate, obj, key);
- if (!maybeValue.ToHandle(&value)) return Nothing<bool>();
- if (!value->IsUndefined()) return Just(value->BooleanValue());
- return Object::IsArray(obj);
-}
-
-
-Object* Slow_ArrayConcat(Arguments* args, Handle<Object> species,
- Isolate* isolate) {
- int argument_count = args->length();
-
- bool is_array_species = *species == isolate->context()->array_function();
-
- // Pass 1: estimate the length and number of elements of the result.
- // The actual length can be larger if any of the arguments have getters
- // that mutate other arguments (but will otherwise be precise).
- // The number of elements is precise if there are no inherited elements.
-
- ElementsKind kind = FAST_SMI_ELEMENTS;
-
- uint32_t estimate_result_length = 0;
- uint32_t estimate_nof_elements = 0;
- FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < argument_count, i++, {
- Handle<Object> obj((*args)[i], isolate);
- uint32_t length_estimate;
- uint32_t element_estimate;
- if (obj->IsJSArray()) {
- Handle<JSArray> array(Handle<JSArray>::cast(obj));
- length_estimate = static_cast<uint32_t>(array->length()->Number());
- if (length_estimate != 0) {
- ElementsKind array_kind =
- GetPackedElementsKind(array->GetElementsKind());
- kind = GetMoreGeneralElementsKind(kind, array_kind);
- }
- element_estimate = EstimateElementCount(array);
- } else {
- if (obj->IsHeapObject()) {
- kind = GetMoreGeneralElementsKind(
- kind, obj->IsNumber() ? FAST_DOUBLE_ELEMENTS : FAST_ELEMENTS);
- }
- length_estimate = 1;
- element_estimate = 1;
- }
- // Avoid overflows by capping at kMaxElementCount.
- if (JSObject::kMaxElementCount - estimate_result_length < length_estimate) {
- estimate_result_length = JSObject::kMaxElementCount;
- } else {
- estimate_result_length += length_estimate;
- }
- if (JSObject::kMaxElementCount - estimate_nof_elements < element_estimate) {
- estimate_nof_elements = JSObject::kMaxElementCount;
- } else {
- estimate_nof_elements += element_estimate;
- }
- });
-
- // If estimated number of elements is more than half of length, a
- // fixed array (fast case) is more time and space-efficient than a
- // dictionary.
- bool fast_case =
- is_array_species && (estimate_nof_elements * 2) >= estimate_result_length;
-
- if (fast_case && kind == FAST_DOUBLE_ELEMENTS) {
- Handle<FixedArrayBase> storage =
- isolate->factory()->NewFixedDoubleArray(estimate_result_length);
- int j = 0;
- bool failure = false;
- if (estimate_result_length > 0) {
- Handle<FixedDoubleArray> double_storage =
- Handle<FixedDoubleArray>::cast(storage);
- for (int i = 0; i < argument_count; i++) {
- Handle<Object> obj((*args)[i], isolate);
- if (obj->IsSmi()) {
- double_storage->set(j, Smi::cast(*obj)->value());
- j++;
- } else if (obj->IsNumber()) {
- double_storage->set(j, obj->Number());
- j++;
- } else {
- DisallowHeapAllocation no_gc;
- JSArray* array = JSArray::cast(*obj);
- uint32_t length = static_cast<uint32_t>(array->length()->Number());
- switch (array->GetElementsKind()) {
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS: {
- // Empty array is FixedArray but not FixedDoubleArray.
- if (length == 0) break;
- FixedDoubleArray* elements =
- FixedDoubleArray::cast(array->elements());
- for (uint32_t i = 0; i < length; i++) {
- if (elements->is_the_hole(i)) {
- // TODO(jkummerow/verwaest): We could be a bit more clever
- // here: Check if there are no elements/getters on the
- // prototype chain, and if so, allow creation of a holey
- // result array.
- // Same thing below (holey smi case).
- failure = true;
- break;
- }
- double double_value = elements->get_scalar(i);
- double_storage->set(j, double_value);
- j++;
- }
- break;
- }
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_SMI_ELEMENTS: {
- Object* the_hole = isolate->heap()->the_hole_value();
- FixedArray* elements(FixedArray::cast(array->elements()));
- for (uint32_t i = 0; i < length; i++) {
- Object* element = elements->get(i);
- if (element == the_hole) {
- failure = true;
- break;
- }
- int32_t int_value = Smi::cast(element)->value();
- double_storage->set(j, int_value);
- j++;
- }
- break;
- }
- case FAST_HOLEY_ELEMENTS:
- case FAST_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NO_ELEMENTS:
- DCHECK_EQ(0u, length);
- break;
- default:
- UNREACHABLE();
- }
- }
- if (failure) break;
- }
- }
- if (!failure) {
- return *isolate->factory()->NewJSArrayWithElements(storage, kind, j);
- }
- // In case of failure, fall through.
- }
-
- Handle<Object> storage;
- if (fast_case) {
- // The backing storage array must have non-existing elements to preserve
- // holes across concat operations.
- storage =
- isolate->factory()->NewFixedArrayWithHoles(estimate_result_length);
- } else if (is_array_species) {
- // TODO(126): move 25% pre-allocation logic into Dictionary::Allocate
- uint32_t at_least_space_for =
- estimate_nof_elements + (estimate_nof_elements >> 2);
- storage = SeededNumberDictionary::New(isolate, at_least_space_for);
- } else {
- DCHECK(species->IsConstructor());
- Handle<Object> length(Smi::FromInt(0), isolate);
- Handle<Object> storage_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, storage_object,
- Execution::New(isolate, species, species, 1, &length));
- storage = storage_object;
- }
-
- ArrayConcatVisitor visitor(isolate, storage, fast_case);
-
- for (int i = 0; i < argument_count; i++) {
- Handle<Object> obj((*args)[i], isolate);
- Maybe<bool> spreadable = IsConcatSpreadable(isolate, obj);
- MAYBE_RETURN(spreadable, isolate->heap()->exception());
- if (spreadable.FromJust()) {
- Handle<JSReceiver> object = Handle<JSReceiver>::cast(obj);
- if (!IterateElements(isolate, object, &visitor)) {
- return isolate->heap()->exception();
- }
- } else {
- if (!visitor.visit(0, obj)) return isolate->heap()->exception();
- visitor.increase_index_offset(1);
- }
- }
-
- if (visitor.exceeds_array_limit()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kInvalidArrayLength));
- }
-
- if (is_array_species) {
- return *visitor.ToArray();
- } else {
- return *visitor.storage_jsreceiver();
- }
-}
-
-
-MaybeHandle<JSArray> Fast_ArrayConcat(Isolate* isolate, Arguments* args) {
- // We shouldn't overflow when adding another len.
- const int kHalfOfMaxInt = 1 << (kBitsPerInt - 2);
- STATIC_ASSERT(FixedArray::kMaxLength < kHalfOfMaxInt);
- STATIC_ASSERT(FixedDoubleArray::kMaxLength < kHalfOfMaxInt);
- USE(kHalfOfMaxInt);
-
- int n_arguments = args->length();
- int result_len = 0;
- {
- DisallowHeapAllocation no_gc;
- // Iterate through all the arguments performing checks
- // and calculating total length.
- for (int i = 0; i < n_arguments; i++) {
- Object* arg = (*args)[i];
- if (!arg->IsJSArray()) return MaybeHandle<JSArray>();
- if (!JSObject::cast(arg)->HasFastElements()) {
- return MaybeHandle<JSArray>();
- }
- if (!HasOnlySimpleReceiverElements(isolate, JSObject::cast(arg))) {
- return MaybeHandle<JSArray>();
- }
- Handle<JSArray> array(JSArray::cast(arg), isolate);
- if (HasConcatSpreadableModifier(isolate, array)) {
- return MaybeHandle<JSArray>();
- }
- // The Array length is guaranted to be <= kHalfOfMaxInt thus we won't
- // overflow.
- result_len += Smi::cast(array->length())->value();
- DCHECK(result_len >= 0);
- // Throw an Error if we overflow the FixedArray limits
- if (FixedDoubleArray::kMaxLength < result_len ||
- FixedArray::kMaxLength < result_len) {
- AllowHeapAllocation allow_gc;
- THROW_NEW_ERROR(isolate,
- NewRangeError(MessageTemplate::kInvalidArrayLength),
- JSArray);
- }
- }
- }
- return ElementsAccessor::Concat(isolate, args, n_arguments);
-}
-
-} // namespace
-
-
-// ES6 22.1.3.1 Array.prototype.concat
-BUILTIN(ArrayConcat) {
- HandleScope scope(isolate);
-
- Handle<Object> receiver = args.receiver();
- // TODO(bmeurer): Do we really care about the exact exception message here?
- if (receiver->IsNull() || receiver->IsUndefined()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
- isolate->factory()->NewStringFromAsciiChecked(
- "Array.prototype.concat")));
- }
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, receiver, Object::ToObject(isolate, args.receiver()));
- args[0] = *receiver;
-
- Handle<JSArray> result_array;
-
- // Avoid a real species read to avoid extra lookups to the array constructor
- if (V8_LIKELY(receiver->IsJSArray() &&
- Handle<JSArray>::cast(receiver)->HasArrayPrototype(isolate) &&
- isolate->IsArraySpeciesLookupChainIntact())) {
- if (Fast_ArrayConcat(isolate, &args).ToHandle(&result_array)) {
- return *result_array;
- }
- if (isolate->has_pending_exception()) return isolate->heap()->exception();
- }
- // Reading @@species happens before anything else with a side effect, so
- // we can do it here to determine whether to take the fast path.
- Handle<Object> species;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, species, Object::ArraySpeciesConstructor(isolate, receiver));
- if (*species == *isolate->array_function()) {
- if (Fast_ArrayConcat(isolate, &args).ToHandle(&result_array)) {
- return *result_array;
- }
- if (isolate->has_pending_exception()) return isolate->heap()->exception();
- }
- return Slow_ArrayConcat(&args, species, isolate);
-}
-
-
-// ES6 22.1.2.2 Array.isArray
-BUILTIN(ArrayIsArray) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- Handle<Object> object = args.at<Object>(1);
- Maybe<bool> result = Object::IsArray(object);
- MAYBE_RETURN(result, isolate->heap()->exception());
- return *isolate->factory()->ToBoolean(result.FromJust());
-}
-
-namespace {
-
-MUST_USE_RESULT Maybe<bool> FastAssign(Handle<JSReceiver> to,
- Handle<Object> next_source) {
- // Non-empty strings are the only non-JSReceivers that need to be handled
- // explicitly by Object.assign.
- if (!next_source->IsJSReceiver()) {
- return Just(!next_source->IsString() ||
- String::cast(*next_source)->length() == 0);
- }
-
- Isolate* isolate = to->GetIsolate();
- Handle<Map> map(JSReceiver::cast(*next_source)->map(), isolate);
-
- if (!map->IsJSObjectMap()) return Just(false);
- if (!map->OnlyHasSimpleProperties()) return Just(false);
-
- Handle<JSObject> from = Handle<JSObject>::cast(next_source);
- if (from->elements() != isolate->heap()->empty_fixed_array()) {
- return Just(false);
- }
-
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
- int length = map->NumberOfOwnDescriptors();
-
- bool stable = true;
-
- for (int i = 0; i < length; i++) {
- Handle<Name> next_key(descriptors->GetKey(i), isolate);
- Handle<Object> prop_value;
- // Directly decode from the descriptor array if |from| did not change shape.
- if (stable) {
- PropertyDetails details = descriptors->GetDetails(i);
- if (!details.IsEnumerable()) continue;
- if (details.kind() == kData) {
- if (details.location() == kDescriptor) {
- prop_value = handle(descriptors->GetValue(i), isolate);
- } else {
- Representation representation = details.representation();
- FieldIndex index = FieldIndex::ForDescriptor(*map, i);
- prop_value = JSObject::FastPropertyAt(from, representation, index);
- }
- } else {
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, prop_value, JSReceiver::GetProperty(from, next_key),
- Nothing<bool>());
- stable = from->map() == *map;
- }
- } else {
- // If the map did change, do a slower lookup. We are still guaranteed that
- // the object has a simple shape, and that the key is a name.
- LookupIterator it(from, next_key, from,
- LookupIterator::OWN_SKIP_INTERCEPTOR);
- if (!it.IsFound()) continue;
- DCHECK(it.state() == LookupIterator::DATA ||
- it.state() == LookupIterator::ACCESSOR);
- if (!it.IsEnumerable()) continue;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, prop_value, Object::GetProperty(&it), Nothing<bool>());
- }
- LookupIterator it(to, next_key, to);
- bool call_to_js = it.IsFound() && it.state() != LookupIterator::DATA;
- Maybe<bool> result = Object::SetProperty(
- &it, prop_value, STRICT, Object::CERTAINLY_NOT_STORE_FROM_KEYED);
- if (result.IsNothing()) return result;
- if (stable && call_to_js) stable = from->map() == *map;
- }
-
- return Just(true);
-}
-
-} // namespace
-
-// ES6 19.1.2.1 Object.assign
-BUILTIN(ObjectAssign) {
- HandleScope scope(isolate);
- Handle<Object> target = args.atOrUndefined(isolate, 1);
-
- // 1. Let to be ? ToObject(target).
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, target,
- Object::ToObject(isolate, target));
- Handle<JSReceiver> to = Handle<JSReceiver>::cast(target);
- // 2. If only one argument was passed, return to.
- if (args.length() == 2) return *to;
- // 3. Let sources be the List of argument values starting with the
- // second argument.
- // 4. For each element nextSource of sources, in ascending index order,
- for (int i = 2; i < args.length(); ++i) {
- Handle<Object> next_source = args.at<Object>(i);
- Maybe<bool> fast_assign = FastAssign(to, next_source);
- if (fast_assign.IsNothing()) return isolate->heap()->exception();
- if (fast_assign.FromJust()) continue;
- // 4a. If nextSource is undefined or null, let keys be an empty List.
- // 4b. Else,
- // 4b i. Let from be ToObject(nextSource).
- // Only non-empty strings and JSReceivers have enumerable properties.
- Handle<JSReceiver> from =
- Object::ToObject(isolate, next_source).ToHandleChecked();
- // 4b ii. Let keys be ? from.[[OwnPropertyKeys]]().
- Handle<FixedArray> keys;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, keys,
- JSReceiver::GetKeys(from, OWN_ONLY, ALL_PROPERTIES, KEEP_NUMBERS));
- // 4c. Repeat for each element nextKey of keys in List order,
- for (int j = 0; j < keys->length(); ++j) {
- Handle<Object> next_key(keys->get(j), isolate);
- // 4c i. Let desc be ? from.[[GetOwnProperty]](nextKey).
- PropertyDescriptor desc;
- Maybe<bool> found =
- JSReceiver::GetOwnPropertyDescriptor(isolate, from, next_key, &desc);
- if (found.IsNothing()) return isolate->heap()->exception();
- // 4c ii. If desc is not undefined and desc.[[Enumerable]] is true, then
- if (found.FromJust() && desc.enumerable()) {
- // 4c ii 1. Let propValue be ? Get(from, nextKey).
- Handle<Object> prop_value;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, prop_value,
- Runtime::GetObjectProperty(isolate, from, next_key));
- // 4c ii 2. Let status be ? Set(to, nextKey, propValue, true).
- Handle<Object> status;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, status, Runtime::SetObjectProperty(isolate, to, next_key,
- prop_value, STRICT));
- }
- }
- }
- // 5. Return to.
- return *to;
-}
-
-
-// ES6 section 19.1.2.2 Object.create ( O [ , Properties ] )
-BUILTIN(ObjectCreate) {
- HandleScope scope(isolate);
- Handle<Object> prototype = args.atOrUndefined(isolate, 1);
- if (!prototype->IsNull() && !prototype->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kProtoObjectOrNull, prototype));
- }
-
- // Generate the map with the specified {prototype} based on the Object
- // function's initial map from the current native context.
- // TODO(bmeurer): Use a dedicated cache for Object.create; think about
- // slack tracking for Object.create.
- Handle<Map> map(isolate->native_context()->object_function()->initial_map(),
- isolate);
- if (map->prototype() != *prototype) {
- map = Map::TransitionToPrototype(map, prototype, FAST_PROTOTYPE);
- }
-
- // Actually allocate the object.
- Handle<JSObject> object = isolate->factory()->NewJSObjectFromMap(map);
-
- // Define the properties if properties was specified and is not undefined.
- Handle<Object> properties = args.atOrUndefined(isolate, 2);
- if (!properties->IsUndefined()) {
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, JSReceiver::DefineProperties(isolate, object, properties));
- }
-
- return *object;
-}
-
-
-// ES6 section 19.1.2.5 Object.freeze ( O )
-BUILTIN(ObjectFreeze) {
- HandleScope scope(isolate);
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- if (object->IsJSReceiver()) {
- MAYBE_RETURN(JSReceiver::SetIntegrityLevel(Handle<JSReceiver>::cast(object),
- FROZEN, Object::THROW_ON_ERROR),
- isolate->heap()->exception());
- }
- return *object;
-}
-
-
-// ES6 section 19.1.2.6 Object.getOwnPropertyDescriptor ( O, P )
-BUILTIN(ObjectGetOwnPropertyDescriptor) {
- HandleScope scope(isolate);
- // 1. Let obj be ? ToObject(O).
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- Handle<JSReceiver> receiver;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
- Object::ToObject(isolate, object));
- // 2. Let key be ? ToPropertyKey(P).
- Handle<Object> property = args.atOrUndefined(isolate, 2);
- Handle<Name> key;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, key,
- Object::ToName(isolate, property));
- // 3. Let desc be ? obj.[[GetOwnProperty]](key).
- PropertyDescriptor desc;
- Maybe<bool> found =
- JSReceiver::GetOwnPropertyDescriptor(isolate, receiver, key, &desc);
- MAYBE_RETURN(found, isolate->heap()->exception());
- // 4. Return FromPropertyDescriptor(desc).
- if (!found.FromJust()) return isolate->heap()->undefined_value();
- return *desc.ToObject(isolate);
-}
-
-
-namespace {
-
-Object* GetOwnPropertyKeys(Isolate* isolate,
- BuiltinArguments<BuiltinExtraArguments::kNone> args,
- PropertyFilter filter) {
- HandleScope scope(isolate);
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- Handle<JSReceiver> receiver;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
- Object::ToObject(isolate, object));
- Handle<FixedArray> keys;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, keys,
- JSReceiver::GetKeys(receiver, OWN_ONLY, filter, CONVERT_TO_STRING));
- return *isolate->factory()->NewJSArrayWithElements(keys);
-}
-
-} // namespace
-
-
-// ES6 section 19.1.2.7 Object.getOwnPropertyNames ( O )
-BUILTIN(ObjectGetOwnPropertyNames) {
- return GetOwnPropertyKeys(isolate, args, SKIP_SYMBOLS);
-}
-
-
-// ES6 section 19.1.2.8 Object.getOwnPropertySymbols ( O )
-BUILTIN(ObjectGetOwnPropertySymbols) {
- return GetOwnPropertyKeys(isolate, args, SKIP_STRINGS);
-}
-
-
-// ES#sec-object.is Object.is ( value1, value2 )
-BUILTIN(ObjectIs) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(3, args.length());
- Handle<Object> value1 = args.at<Object>(1);
- Handle<Object> value2 = args.at<Object>(2);
- return isolate->heap()->ToBoolean(value1->SameValue(*value2));
-}
-
-
-// ES6 section 19.1.2.11 Object.isExtensible ( O )
-BUILTIN(ObjectIsExtensible) {
- HandleScope scope(isolate);
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- Maybe<bool> result =
- object->IsJSReceiver()
- ? JSReceiver::IsExtensible(Handle<JSReceiver>::cast(object))
- : Just(false);
- MAYBE_RETURN(result, isolate->heap()->exception());
- return isolate->heap()->ToBoolean(result.FromJust());
-}
-
-
-// ES6 section 19.1.2.12 Object.isFrozen ( O )
-BUILTIN(ObjectIsFrozen) {
- HandleScope scope(isolate);
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- Maybe<bool> result = object->IsJSReceiver()
- ? JSReceiver::TestIntegrityLevel(
- Handle<JSReceiver>::cast(object), FROZEN)
- : Just(true);
- MAYBE_RETURN(result, isolate->heap()->exception());
- return isolate->heap()->ToBoolean(result.FromJust());
-}
-
-
-// ES6 section 19.1.2.13 Object.isSealed ( O )
-BUILTIN(ObjectIsSealed) {
- HandleScope scope(isolate);
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- Maybe<bool> result = object->IsJSReceiver()
- ? JSReceiver::TestIntegrityLevel(
- Handle<JSReceiver>::cast(object), SEALED)
- : Just(true);
- MAYBE_RETURN(result, isolate->heap()->exception());
- return isolate->heap()->ToBoolean(result.FromJust());
-}
-
-
-// ES6 section 19.1.2.14 Object.keys ( O )
-BUILTIN(ObjectKeys) {
- HandleScope scope(isolate);
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- Handle<JSReceiver> receiver;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
- Object::ToObject(isolate, object));
-
- Handle<FixedArray> keys;
- int enum_length = receiver->map()->EnumLength();
- if (enum_length != kInvalidEnumCacheSentinel &&
- JSObject::cast(*receiver)->elements() ==
- isolate->heap()->empty_fixed_array()) {
- DCHECK(receiver->IsJSObject());
- DCHECK(!JSObject::cast(*receiver)->HasNamedInterceptor());
- DCHECK(!JSObject::cast(*receiver)->IsAccessCheckNeeded());
- DCHECK(!receiver->map()->has_hidden_prototype());
- DCHECK(JSObject::cast(*receiver)->HasFastProperties());
- if (enum_length == 0) {
- keys = isolate->factory()->empty_fixed_array();
- } else {
- Handle<FixedArray> cache(
- receiver->map()->instance_descriptors()->GetEnumCache());
- keys = isolate->factory()->CopyFixedArrayUpTo(cache, enum_length);
- }
- } else {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, keys,
- JSReceiver::GetKeys(receiver, OWN_ONLY, ENUMERABLE_STRINGS,
- CONVERT_TO_STRING));
- }
- return *isolate->factory()->NewJSArrayWithElements(keys, FAST_ELEMENTS);
-}
-
-BUILTIN(ObjectValues) {
- HandleScope scope(isolate);
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- Handle<JSReceiver> receiver;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
- Object::ToObject(isolate, object));
- Handle<FixedArray> values;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, values, JSReceiver::GetOwnValues(receiver, ENUMERABLE_STRINGS));
- return *isolate->factory()->NewJSArrayWithElements(values);
-}
-
-
-BUILTIN(ObjectEntries) {
- HandleScope scope(isolate);
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- Handle<JSReceiver> receiver;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
- Object::ToObject(isolate, object));
- Handle<FixedArray> entries;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, entries,
- JSReceiver::GetOwnEntries(receiver, ENUMERABLE_STRINGS));
- return *isolate->factory()->NewJSArrayWithElements(entries);
-}
-
-BUILTIN(ObjectGetOwnPropertyDescriptors) {
- HandleScope scope(isolate);
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- Handle<Object> undefined = isolate->factory()->undefined_value();
-
- Handle<JSReceiver> receiver;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
- Object::ToObject(isolate, object));
-
- Handle<FixedArray> keys;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, keys, JSReceiver::GetKeys(receiver, OWN_ONLY, ALL_PROPERTIES,
- CONVERT_TO_STRING));
-
- Handle<JSObject> descriptors =
- isolate->factory()->NewJSObject(isolate->object_function());
-
- for (int i = 0; i < keys->length(); ++i) {
- Handle<Name> key = Handle<Name>::cast(FixedArray::get(*keys, i, isolate));
- PropertyDescriptor descriptor;
- Maybe<bool> did_get_descriptor = JSReceiver::GetOwnPropertyDescriptor(
- isolate, receiver, key, &descriptor);
- MAYBE_RETURN(did_get_descriptor, isolate->heap()->exception());
-
- Handle<Object> from_descriptor = did_get_descriptor.FromJust()
- ? descriptor.ToObject(isolate)
- : undefined;
-
- LookupIterator it = LookupIterator::PropertyOrElement(
- isolate, descriptors, key, descriptors, LookupIterator::OWN);
- Maybe<bool> success = JSReceiver::CreateDataProperty(&it, from_descriptor,
- Object::DONT_THROW);
- CHECK(success.FromJust());
- }
-
- return *descriptors;
-}
-
-// ES6 section 19.1.2.15 Object.preventExtensions ( O )
-BUILTIN(ObjectPreventExtensions) {
- HandleScope scope(isolate);
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- if (object->IsJSReceiver()) {
- MAYBE_RETURN(JSReceiver::PreventExtensions(Handle<JSReceiver>::cast(object),
- Object::THROW_ON_ERROR),
- isolate->heap()->exception());
- }
- return *object;
-}
-
-
-// ES6 section 19.1.2.17 Object.seal ( O )
-BUILTIN(ObjectSeal) {
- HandleScope scope(isolate);
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- if (object->IsJSReceiver()) {
- MAYBE_RETURN(JSReceiver::SetIntegrityLevel(Handle<JSReceiver>::cast(object),
- SEALED, Object::THROW_ON_ERROR),
- isolate->heap()->exception());
- }
- return *object;
-}
-
-
-namespace {
-
-bool CodeGenerationFromStringsAllowed(Isolate* isolate,
- Handle<Context> context) {
- DCHECK(context->allow_code_gen_from_strings()->IsFalse());
- // Check with callback if set.
- AllowCodeGenerationFromStringsCallback callback =
- isolate->allow_code_gen_callback();
- if (callback == NULL) {
- // No callback set and code generation disallowed.
- return false;
- } else {
- // Callback set. Let it decide if code generation is allowed.
- VMState<EXTERNAL> state(isolate);
- return callback(v8::Utils::ToLocal(context));
- }
-}
-
-
-MaybeHandle<JSFunction> CompileString(Handle<Context> context,
- Handle<String> source,
- ParseRestriction restriction) {
- Isolate* const isolate = context->GetIsolate();
- Handle<Context> native_context(context->native_context(), isolate);
-
- // Check if native context allows code generation from
- // strings. Throw an exception if it doesn't.
- if (native_context->allow_code_gen_from_strings()->IsFalse() &&
- !CodeGenerationFromStringsAllowed(isolate, native_context)) {
- Handle<Object> error_message =
- native_context->ErrorMessageForCodeGenerationFromStrings();
- THROW_NEW_ERROR(isolate, NewEvalError(MessageTemplate::kCodeGenFromStrings,
- error_message),
- JSFunction);
- }
-
- // Compile source string in the native context.
- Handle<SharedFunctionInfo> outer_info(native_context->closure()->shared(),
- isolate);
- return Compiler::GetFunctionFromEval(source, outer_info, native_context,
- SLOPPY, restriction,
- RelocInfo::kNoPosition);
-}
-
-} // namespace
-
-
-// ES6 section 18.2.1 eval (x)
-BUILTIN(GlobalEval) {
- HandleScope scope(isolate);
- Handle<Object> x = args.atOrUndefined(isolate, 1);
- Handle<JSFunction> target = args.target<JSFunction>();
- Handle<JSObject> target_global_proxy(target->global_proxy(), isolate);
- if (!x->IsString()) return *x;
- Handle<JSFunction> function;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, function,
- CompileString(handle(target->native_context(), isolate),
- Handle<String>::cast(x), NO_PARSE_RESTRICTION));
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Execution::Call(isolate, function, target_global_proxy, 0, nullptr));
- return *result;
-}
-
-
-// -----------------------------------------------------------------------------
-// ES6 section 20.2.2 Function Properties of the Math Object
-
-
-// ES6 section 20.2.2.2 Math.acos ( x )
-BUILTIN(MathAcos) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- Handle<Object> x = args.at<Object>(1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x, Object::ToNumber(x));
- return *isolate->factory()->NewHeapNumber(std::acos(x->Number()));
-}
-
-
-// ES6 section 20.2.2.4 Math.asin ( x )
-BUILTIN(MathAsin) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- Handle<Object> x = args.at<Object>(1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x, Object::ToNumber(x));
- return *isolate->factory()->NewHeapNumber(std::asin(x->Number()));
-}
-
-
-// ES6 section 20.2.2.6 Math.atan ( x )
-BUILTIN(MathAtan) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- Handle<Object> x = args.at<Object>(1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x, Object::ToNumber(x));
- return *isolate->factory()->NewHeapNumber(std::atan(x->Number()));
-}
-
-namespace {
-
-void Generate_MathRoundingOperation(
- compiler::CodeStubAssembler* assembler,
- compiler::Node* (compiler::CodeStubAssembler::*float64op)(
- compiler::Node*)) {
- typedef compiler::CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef compiler::CodeStubAssembler::Variable Variable;
-
- Node* context = assembler->Parameter(4);
-
- // We might need to loop once for ToNumber conversion.
- Variable var_x(assembler, MachineRepresentation::kTagged);
- Label loop(assembler, &var_x);
- var_x.Bind(assembler->Parameter(1));
- assembler->Goto(&loop);
- assembler->Bind(&loop);
- {
- // Load the current {x} value.
- Node* x = var_x.value();
-
- // Check if {x} is a Smi or a HeapObject.
- Label if_xissmi(assembler), if_xisnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(x), &if_xissmi, &if_xisnotsmi);
-
- assembler->Bind(&if_xissmi);
- {
- // Nothing to do when {x} is a Smi.
- assembler->Return(x);
- }
-
- assembler->Bind(&if_xisnotsmi);
- {
- // Check if {x} is a HeapNumber.
- Label if_xisheapnumber(assembler),
- if_xisnotheapnumber(assembler, Label::kDeferred);
- assembler->Branch(
- assembler->WordEqual(assembler->LoadMap(x),
- assembler->HeapNumberMapConstant()),
- &if_xisheapnumber, &if_xisnotheapnumber);
-
- assembler->Bind(&if_xisheapnumber);
- {
- Node* x_value = assembler->LoadHeapNumberValue(x);
- Node* value = (assembler->*float64op)(x_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
- }
-
- assembler->Bind(&if_xisnotheapnumber);
- {
- // Need to convert {x} to a Number first.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_x.Bind(assembler->CallStub(callable, context, x));
- assembler->Goto(&loop);
- }
- }
- }
-}
-
-} // namespace
-
-// ES6 section 20.2.2.10 Math.ceil ( x )
-void Builtins::Generate_MathCeil(compiler::CodeStubAssembler* assembler) {
- Generate_MathRoundingOperation(assembler,
- &compiler::CodeStubAssembler::Float64Ceil);
-}
-
-// ES6 section 20.2.2.11 Math.clz32 ( x )
-void Builtins::Generate_MathClz32(compiler::CodeStubAssembler* assembler) {
- typedef compiler::CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef compiler::CodeStubAssembler::Variable Variable;
-
- Node* context = assembler->Parameter(4);
-
- // Shared entry point for the clz32 operation.
- Variable var_clz32_x(assembler, MachineRepresentation::kWord32);
- Label do_clz32(assembler);
-
- // We might need to loop once for ToNumber conversion.
- Variable var_x(assembler, MachineRepresentation::kTagged);
- Label loop(assembler, &var_x);
- var_x.Bind(assembler->Parameter(1));
- assembler->Goto(&loop);
- assembler->Bind(&loop);
- {
- // Load the current {x} value.
- Node* x = var_x.value();
-
- // Check if {x} is a Smi or a HeapObject.
- Label if_xissmi(assembler), if_xisnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(x), &if_xissmi, &if_xisnotsmi);
-
- assembler->Bind(&if_xissmi);
- {
- var_clz32_x.Bind(assembler->SmiToWord32(x));
- assembler->Goto(&do_clz32);
- }
-
- assembler->Bind(&if_xisnotsmi);
- {
- // Check if {x} is a HeapNumber.
- Label if_xisheapnumber(assembler),
- if_xisnotheapnumber(assembler, Label::kDeferred);
- assembler->Branch(
- assembler->WordEqual(assembler->LoadMap(x),
- assembler->HeapNumberMapConstant()),
- &if_xisheapnumber, &if_xisnotheapnumber);
-
- assembler->Bind(&if_xisheapnumber);
- {
- var_clz32_x.Bind(assembler->TruncateHeapNumberValueToWord32(x));
- assembler->Goto(&do_clz32);
- }
-
- assembler->Bind(&if_xisnotheapnumber);
- {
- // Need to convert {x} to a Number first.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_x.Bind(assembler->CallStub(callable, context, x));
- assembler->Goto(&loop);
- }
- }
- }
-
- assembler->Bind(&do_clz32);
- {
- Node* x_value = var_clz32_x.value();
- Node* value = assembler->Word32Clz(x_value);
- Node* result = assembler->ChangeInt32ToTagged(value);
- assembler->Return(result);
- }
-}
-
-// ES6 section 20.2.2.16 Math.floor ( x )
-void Builtins::Generate_MathFloor(compiler::CodeStubAssembler* assembler) {
- Generate_MathRoundingOperation(assembler,
- &compiler::CodeStubAssembler::Float64Floor);
-}
-
-// ES6 section 20.2.2.17 Math.fround ( x )
-BUILTIN(MathFround) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- Handle<Object> x = args.at<Object>(1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x, Object::ToNumber(x));
- float x32 = DoubleToFloat32(x->Number());
- return *isolate->factory()->NewNumber(x32);
-}
-
-// ES6 section 20.2.2.19 Math.imul ( x, y )
-BUILTIN(MathImul) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- Handle<Object> x = args.at<Object>(1);
- Handle<Object> y = args.at<Object>(2);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x, Object::ToNumber(x));
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, y, Object::ToNumber(y));
- int product = static_cast<int>(NumberToUint32(*x) * NumberToUint32(*y));
- return *isolate->factory()->NewNumberFromInt(product);
-}
-
-// ES6 section 20.2.2.28 Math.round ( x )
-void Builtins::Generate_MathRound(compiler::CodeStubAssembler* assembler) {
- Generate_MathRoundingOperation(assembler,
- &compiler::CodeStubAssembler::Float64Round);
-}
-
-// ES6 section 20.2.2.32 Math.sqrt ( x )
-void Builtins::Generate_MathSqrt(compiler::CodeStubAssembler* assembler) {
- using compiler::Node;
-
- Node* x = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* value = assembler->Float64Sqrt(x_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
-}
-
-// ES6 section 20.2.2.35 Math.trunc ( x )
-void Builtins::Generate_MathTrunc(compiler::CodeStubAssembler* assembler) {
- Generate_MathRoundingOperation(assembler,
- &compiler::CodeStubAssembler::Float64Trunc);
-}
-
-// -----------------------------------------------------------------------------
-// ES6 section 26.1 The Reflect Object
-
-
-// ES6 section 26.1.3 Reflect.defineProperty
-BUILTIN(ReflectDefineProperty) {
- HandleScope scope(isolate);
- DCHECK_EQ(4, args.length());
- Handle<Object> target = args.at<Object>(1);
- Handle<Object> key = args.at<Object>(2);
- Handle<Object> attributes = args.at<Object>(3);
-
- if (!target->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
- isolate->factory()->NewStringFromAsciiChecked(
- "Reflect.defineProperty")));
- }
-
- Handle<Name> name;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
- Object::ToName(isolate, key));
-
- PropertyDescriptor desc;
- if (!PropertyDescriptor::ToPropertyDescriptor(isolate, attributes, &desc)) {
- return isolate->heap()->exception();
- }
-
- Maybe<bool> result =
- JSReceiver::DefineOwnProperty(isolate, Handle<JSReceiver>::cast(target),
- name, &desc, Object::DONT_THROW);
- MAYBE_RETURN(result, isolate->heap()->exception());
- return *isolate->factory()->ToBoolean(result.FromJust());
-}
-
-
-// ES6 section 26.1.4 Reflect.deleteProperty
-BUILTIN(ReflectDeleteProperty) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- Handle<Object> target = args.at<Object>(1);
- Handle<Object> key = args.at<Object>(2);
-
- if (!target->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
- isolate->factory()->NewStringFromAsciiChecked(
- "Reflect.deleteProperty")));
- }
-
- Handle<Name> name;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
- Object::ToName(isolate, key));
-
- Maybe<bool> result = JSReceiver::DeletePropertyOrElement(
- Handle<JSReceiver>::cast(target), name, SLOPPY);
- MAYBE_RETURN(result, isolate->heap()->exception());
- return *isolate->factory()->ToBoolean(result.FromJust());
-}
-
-
-// ES6 section 26.1.6 Reflect.get
-BUILTIN(ReflectGet) {
- HandleScope scope(isolate);
- Handle<Object> target = args.atOrUndefined(isolate, 1);
- Handle<Object> key = args.atOrUndefined(isolate, 2);
- Handle<Object> receiver = args.length() > 3 ? args.at<Object>(3) : target;
-
- if (!target->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
- isolate->factory()->NewStringFromAsciiChecked(
- "Reflect.get")));
- }
-
- Handle<Name> name;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
- Object::ToName(isolate, key));
-
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Object::GetPropertyOrElement(
- receiver, name, Handle<JSReceiver>::cast(target)));
-
- return *result;
-}
-
-
-// ES6 section 26.1.7 Reflect.getOwnPropertyDescriptor
-BUILTIN(ReflectGetOwnPropertyDescriptor) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- Handle<Object> target = args.at<Object>(1);
- Handle<Object> key = args.at<Object>(2);
-
- if (!target->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
- isolate->factory()->NewStringFromAsciiChecked(
- "Reflect.getOwnPropertyDescriptor")));
- }
-
- Handle<Name> name;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
- Object::ToName(isolate, key));
-
- PropertyDescriptor desc;
- Maybe<bool> found = JSReceiver::GetOwnPropertyDescriptor(
- isolate, Handle<JSReceiver>::cast(target), name, &desc);
- MAYBE_RETURN(found, isolate->heap()->exception());
- if (!found.FromJust()) return isolate->heap()->undefined_value();
- return *desc.ToObject(isolate);
-}
-
-
-// ES6 section 26.1.8 Reflect.getPrototypeOf
-BUILTIN(ReflectGetPrototypeOf) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- Handle<Object> target = args.at<Object>(1);
-
- if (!target->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
- isolate->factory()->NewStringFromAsciiChecked(
- "Reflect.getPrototypeOf")));
- }
- Handle<Object> prototype;
- Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(target);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, prototype, JSReceiver::GetPrototype(isolate, receiver));
- return *prototype;
-}
-
-
-// ES6 section 26.1.9 Reflect.has
-BUILTIN(ReflectHas) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- Handle<Object> target = args.at<Object>(1);
- Handle<Object> key = args.at<Object>(2);
-
- if (!target->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
- isolate->factory()->NewStringFromAsciiChecked(
- "Reflect.has")));
- }
-
- Handle<Name> name;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
- Object::ToName(isolate, key));
-
- Maybe<bool> result =
- JSReceiver::HasProperty(Handle<JSReceiver>::cast(target), name);
- return result.IsJust() ? *isolate->factory()->ToBoolean(result.FromJust())
- : isolate->heap()->exception();
-}
-
-
-// ES6 section 26.1.10 Reflect.isExtensible
-BUILTIN(ReflectIsExtensible) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- Handle<Object> target = args.at<Object>(1);
-
- if (!target->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
- isolate->factory()->NewStringFromAsciiChecked(
- "Reflect.isExtensible")));
- }
-
- Maybe<bool> result =
- JSReceiver::IsExtensible(Handle<JSReceiver>::cast(target));
- MAYBE_RETURN(result, isolate->heap()->exception());
- return *isolate->factory()->ToBoolean(result.FromJust());
-}
-
-
-// ES6 section 26.1.11 Reflect.ownKeys
-BUILTIN(ReflectOwnKeys) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- Handle<Object> target = args.at<Object>(1);
-
- if (!target->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
- isolate->factory()->NewStringFromAsciiChecked(
- "Reflect.ownKeys")));
- }
-
- Handle<FixedArray> keys;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, keys,
- JSReceiver::GetKeys(Handle<JSReceiver>::cast(target), OWN_ONLY,
- ALL_PROPERTIES, CONVERT_TO_STRING));
- return *isolate->factory()->NewJSArrayWithElements(keys);
-}
-
-
-// ES6 section 26.1.12 Reflect.preventExtensions
-BUILTIN(ReflectPreventExtensions) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- Handle<Object> target = args.at<Object>(1);
-
- if (!target->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
- isolate->factory()->NewStringFromAsciiChecked(
- "Reflect.preventExtensions")));
- }
-
- Maybe<bool> result = JSReceiver::PreventExtensions(
- Handle<JSReceiver>::cast(target), Object::DONT_THROW);
- MAYBE_RETURN(result, isolate->heap()->exception());
- return *isolate->factory()->ToBoolean(result.FromJust());
-}
-
-
-// ES6 section 26.1.13 Reflect.set
-BUILTIN(ReflectSet) {
- HandleScope scope(isolate);
- Handle<Object> target = args.atOrUndefined(isolate, 1);
- Handle<Object> key = args.atOrUndefined(isolate, 2);
- Handle<Object> value = args.atOrUndefined(isolate, 3);
- Handle<Object> receiver = args.length() > 4 ? args.at<Object>(4) : target;
-
- if (!target->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
- isolate->factory()->NewStringFromAsciiChecked(
- "Reflect.set")));
- }
-
- Handle<Name> name;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
- Object::ToName(isolate, key));
-
- LookupIterator it = LookupIterator::PropertyOrElement(
- isolate, receiver, name, Handle<JSReceiver>::cast(target));
- Maybe<bool> result = Object::SetSuperProperty(
- &it, value, SLOPPY, Object::MAY_BE_STORE_FROM_KEYED);
- MAYBE_RETURN(result, isolate->heap()->exception());
- return *isolate->factory()->ToBoolean(result.FromJust());
-}
-
-
-// ES6 section 26.1.14 Reflect.setPrototypeOf
-BUILTIN(ReflectSetPrototypeOf) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- Handle<Object> target = args.at<Object>(1);
- Handle<Object> proto = args.at<Object>(2);
-
- if (!target->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
- isolate->factory()->NewStringFromAsciiChecked(
- "Reflect.setPrototypeOf")));
- }
-
- if (!proto->IsJSReceiver() && !proto->IsNull()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kProtoObjectOrNull, proto));
- }
-
- Maybe<bool> result = JSReceiver::SetPrototype(
- Handle<JSReceiver>::cast(target), proto, true, Object::DONT_THROW);
- MAYBE_RETURN(result, isolate->heap()->exception());
- return *isolate->factory()->ToBoolean(result.FromJust());
-}
-
-
-// -----------------------------------------------------------------------------
-// ES6 section 19.3 Boolean Objects
-
-
-// ES6 section 19.3.1.1 Boolean ( value ) for the [[Call]] case.
-BUILTIN(BooleanConstructor) {
- HandleScope scope(isolate);
- Handle<Object> value = args.atOrUndefined(isolate, 1);
- return isolate->heap()->ToBoolean(value->BooleanValue());
-}
-
-
-// ES6 section 19.3.1.1 Boolean ( value ) for the [[Construct]] case.
-BUILTIN(BooleanConstructor_ConstructStub) {
- HandleScope scope(isolate);
- Handle<Object> value = args.atOrUndefined(isolate, 1);
- Handle<JSFunction> target = args.target<JSFunction>();
- Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
- DCHECK(*target == target->native_context()->boolean_function());
- Handle<JSObject> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- JSObject::New(target, new_target));
- Handle<JSValue>::cast(result)->set_value(
- isolate->heap()->ToBoolean(value->BooleanValue()));
- return *result;
-}
-
-
-// ES6 section 19.3.3.2 Boolean.prototype.toString ( )
-BUILTIN(BooleanPrototypeToString) {
- HandleScope scope(isolate);
- Handle<Object> receiver = args.receiver();
- if (receiver->IsJSValue()) {
- receiver = handle(Handle<JSValue>::cast(receiver)->value(), isolate);
- }
- if (!receiver->IsBoolean()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kNotGeneric,
- isolate->factory()->NewStringFromAsciiChecked(
- "Boolean.prototype.toString")));
- }
- return Handle<Oddball>::cast(receiver)->to_string();
-}
-
-
-// ES6 section 19.3.3.3 Boolean.prototype.valueOf ( )
-BUILTIN(BooleanPrototypeValueOf) {
- HandleScope scope(isolate);
- Handle<Object> receiver = args.receiver();
- if (receiver->IsJSValue()) {
- receiver = handle(Handle<JSValue>::cast(receiver)->value(), isolate);
- }
- if (!receiver->IsBoolean()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kNotGeneric,
- isolate->factory()->NewStringFromAsciiChecked(
- "Boolean.prototype.valueOf")));
- }
- return *receiver;
-}
-
-
-// -----------------------------------------------------------------------------
-// ES6 section 24.2 DataView Objects
-
-
-// ES6 section 24.2.2 The DataView Constructor for the [[Call]] case.
-BUILTIN(DataViewConstructor) {
- HandleScope scope(isolate);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewTypeError(MessageTemplate::kConstructorNotFunction,
- isolate->factory()->NewStringFromAsciiChecked("DataView")));
-}
-
-
-// ES6 section 24.2.2 The DataView Constructor for the [[Construct]] case.
-BUILTIN(DataViewConstructor_ConstructStub) {
- HandleScope scope(isolate);
- Handle<JSFunction> target = args.target<JSFunction>();
- Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
- Handle<Object> buffer = args.atOrUndefined(isolate, 1);
- Handle<Object> byte_offset = args.atOrUndefined(isolate, 2);
- Handle<Object> byte_length = args.atOrUndefined(isolate, 3);
-
- // 2. If Type(buffer) is not Object, throw a TypeError exception.
- // 3. If buffer does not have an [[ArrayBufferData]] internal slot, throw a
- // TypeError exception.
- if (!buffer->IsJSArrayBuffer()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kDataViewNotArrayBuffer));
- }
- Handle<JSArrayBuffer> array_buffer = Handle<JSArrayBuffer>::cast(buffer);
-
- // 4. Let numberOffset be ? ToNumber(byteOffset).
- Handle<Object> number_offset;
- if (byte_offset->IsUndefined()) {
- // We intentionally violate the specification at this point to allow
- // for new DataView(buffer) invocations to be equivalent to the full
- // new DataView(buffer, 0) invocation.
- number_offset = handle(Smi::FromInt(0), isolate);
- } else {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_offset,
- Object::ToNumber(byte_offset));
- }
-
- // 5. Let offset be ToInteger(numberOffset).
- Handle<Object> offset;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, offset,
- Object::ToInteger(isolate, number_offset));
-
- // 6. If numberOffset ≠ offset or offset < 0, throw a RangeError exception.
- if (number_offset->Number() != offset->Number() || offset->Number() < 0.0) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kInvalidDataViewOffset));
- }
-
- // 7. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
- // We currently violate the specification at this point.
-
- // 8. Let bufferByteLength be the value of buffer's [[ArrayBufferByteLength]]
- // internal slot.
- double const buffer_byte_length = array_buffer->byte_length()->Number();
-
- // 9. If offset > bufferByteLength, throw a RangeError exception
- if (offset->Number() > buffer_byte_length) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kInvalidDataViewOffset));
- }
-
- Handle<Object> view_byte_length;
- if (byte_length->IsUndefined()) {
- // 10. If byteLength is undefined, then
- // a. Let viewByteLength be bufferByteLength - offset.
- view_byte_length =
- isolate->factory()->NewNumber(buffer_byte_length - offset->Number());
- } else {
- // 11. Else,
- // a. Let viewByteLength be ? ToLength(byteLength).
- // b. If offset+viewByteLength > bufferByteLength, throw a RangeError
- // exception
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, view_byte_length, Object::ToLength(isolate, byte_length));
- if (offset->Number() + view_byte_length->Number() > buffer_byte_length) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kInvalidDataViewLength));
- }
- }
-
- // 12. Let O be ? OrdinaryCreateFromConstructor(NewTarget,
- // "%DataViewPrototype%", «[[DataView]], [[ViewedArrayBuffer]],
- // [[ByteLength]], [[ByteOffset]]»).
- // 13. Set O's [[DataView]] internal slot to true.
- Handle<JSObject> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- JSObject::New(target, new_target));
- for (int i = 0; i < ArrayBufferView::kInternalFieldCount; ++i) {
- Handle<JSDataView>::cast(result)->SetInternalField(i, Smi::FromInt(0));
- }
-
- // 14. Set O's [[ViewedArrayBuffer]] internal slot to buffer.
- Handle<JSDataView>::cast(result)->set_buffer(*array_buffer);
-
- // 15. Set O's [[ByteLength]] internal slot to viewByteLength.
- Handle<JSDataView>::cast(result)->set_byte_length(*view_byte_length);
-
- // 16. Set O's [[ByteOffset]] internal slot to offset.
- Handle<JSDataView>::cast(result)->set_byte_offset(*offset);
-
- // 17. Return O.
- return *result;
-}
-
-
-// -----------------------------------------------------------------------------
-// ES6 section 20.3 Date Objects
-
-
-namespace {
-
-// ES6 section 20.3.1.1 Time Values and Time Range
-const double kMinYear = -1000000.0;
-const double kMaxYear = -kMinYear;
-const double kMinMonth = -10000000.0;
-const double kMaxMonth = -kMinMonth;
-
-
-// 20.3.1.2 Day Number and Time within Day
-const double kMsPerDay = 86400000.0;
-
-
-// ES6 section 20.3.1.11 Hours, Minutes, Second, and Milliseconds
-const double kMsPerSecond = 1000.0;
-const double kMsPerMinute = 60000.0;
-const double kMsPerHour = 3600000.0;
-
-
-// ES6 section 20.3.1.14 MakeDate (day, time)
-double MakeDate(double day, double time) {
- if (std::isfinite(day) && std::isfinite(time)) {
- return time + day * kMsPerDay;
- }
- return std::numeric_limits<double>::quiet_NaN();
-}
-
-
-// ES6 section 20.3.1.13 MakeDay (year, month, date)
-double MakeDay(double year, double month, double date) {
- if ((kMinYear <= year && year <= kMaxYear) &&
- (kMinMonth <= month && month <= kMaxMonth) && std::isfinite(date)) {
- int y = FastD2I(year);
- int m = FastD2I(month);
- y += m / 12;
- m %= 12;
- if (m < 0) {
- m += 12;
- y -= 1;
- }
- DCHECK_LE(0, m);
- DCHECK_LT(m, 12);
-
- // kYearDelta is an arbitrary number such that:
- // a) kYearDelta = -1 (mod 400)
- // b) year + kYearDelta > 0 for years in the range defined by
- // ECMA 262 - 15.9.1.1, i.e. upto 100,000,000 days on either side of
- // Jan 1 1970. This is required so that we don't run into integer
- // division of negative numbers.
- // c) there shouldn't be an overflow for 32-bit integers in the following
- // operations.
- static const int kYearDelta = 399999;
- static const int kBaseDay =
- 365 * (1970 + kYearDelta) + (1970 + kYearDelta) / 4 -
- (1970 + kYearDelta) / 100 + (1970 + kYearDelta) / 400;
- int day_from_year = 365 * (y + kYearDelta) + (y + kYearDelta) / 4 -
- (y + kYearDelta) / 100 + (y + kYearDelta) / 400 -
- kBaseDay;
- if ((y % 4 != 0) || (y % 100 == 0 && y % 400 != 0)) {
- static const int kDayFromMonth[] = {0, 31, 59, 90, 120, 151,
- 181, 212, 243, 273, 304, 334};
- day_from_year += kDayFromMonth[m];
- } else {
- static const int kDayFromMonth[] = {0, 31, 60, 91, 121, 152,
- 182, 213, 244, 274, 305, 335};
- day_from_year += kDayFromMonth[m];
- }
- return static_cast<double>(day_from_year - 1) + date;
- }
- return std::numeric_limits<double>::quiet_NaN();
-}
-
-
-// ES6 section 20.3.1.12 MakeTime (hour, min, sec, ms)
-double MakeTime(double hour, double min, double sec, double ms) {
- if (std::isfinite(hour) && std::isfinite(min) && std::isfinite(sec) &&
- std::isfinite(ms)) {
- double const h = DoubleToInteger(hour);
- double const m = DoubleToInteger(min);
- double const s = DoubleToInteger(sec);
- double const milli = DoubleToInteger(ms);
- return h * kMsPerHour + m * kMsPerMinute + s * kMsPerSecond + milli;
- }
- return std::numeric_limits<double>::quiet_NaN();
-}
-
-
-// ES6 section 20.3.1.15 TimeClip (time)
-double TimeClip(double time) {
- if (-DateCache::kMaxTimeInMs <= time && time <= DateCache::kMaxTimeInMs) {
- return DoubleToInteger(time) + 0.0;
- }
- return std::numeric_limits<double>::quiet_NaN();
-}
-
-
-const char* kShortWeekDays[] = {"Sun", "Mon", "Tue", "Wed",
- "Thu", "Fri", "Sat"};
-const char* kShortMonths[] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun",
- "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"};
-
-
-// ES6 section 20.3.1.16 Date Time String Format
-double ParseDateTimeString(Handle<String> str) {
- Isolate* const isolate = str->GetIsolate();
- str = String::Flatten(str);
- // TODO(bmeurer): Change DateParser to not use the FixedArray.
- Handle<FixedArray> tmp =
- isolate->factory()->NewFixedArray(DateParser::OUTPUT_SIZE);
- DisallowHeapAllocation no_gc;
- String::FlatContent str_content = str->GetFlatContent();
- bool result;
- if (str_content.IsOneByte()) {
- result = DateParser::Parse(str_content.ToOneByteVector(), *tmp,
- isolate->unicode_cache());
- } else {
- result = DateParser::Parse(str_content.ToUC16Vector(), *tmp,
- isolate->unicode_cache());
- }
- if (!result) return std::numeric_limits<double>::quiet_NaN();
- double const day = MakeDay(tmp->get(0)->Number(), tmp->get(1)->Number(),
- tmp->get(2)->Number());
- double const time = MakeTime(tmp->get(3)->Number(), tmp->get(4)->Number(),
- tmp->get(5)->Number(), tmp->get(6)->Number());
- double date = MakeDate(day, time);
- if (tmp->get(7)->IsNull()) {
- if (!std::isnan(date)) {
- date = isolate->date_cache()->ToUTC(static_cast<int64_t>(date));
- }
- } else {
- date -= tmp->get(7)->Number() * 1000.0;
- }
- return date;
-}
-
-
-enum ToDateStringMode { kDateOnly, kTimeOnly, kDateAndTime };
-
-
-// ES6 section 20.3.4.41.1 ToDateString(tv)
-void ToDateString(double time_val, Vector<char> str, DateCache* date_cache,
- ToDateStringMode mode = kDateAndTime) {
- if (std::isnan(time_val)) {
- SNPrintF(str, "Invalid Date");
- return;
- }
- int64_t time_ms = static_cast<int64_t>(time_val);
- int64_t local_time_ms = date_cache->ToLocal(time_ms);
- int year, month, day, weekday, hour, min, sec, ms;
- date_cache->BreakDownTime(local_time_ms, &year, &month, &day, &weekday, &hour,
- &min, &sec, &ms);
- int timezone_offset = -date_cache->TimezoneOffset(time_ms);
- int timezone_hour = std::abs(timezone_offset) / 60;
- int timezone_min = std::abs(timezone_offset) % 60;
- const char* local_timezone = date_cache->LocalTimezone(time_ms);
- switch (mode) {
- case kDateOnly:
- SNPrintF(str, "%s %s %02d %4d", kShortWeekDays[weekday],
- kShortMonths[month], day, year);
- return;
- case kTimeOnly:
- SNPrintF(str, "%02d:%02d:%02d GMT%c%02d%02d (%s)", hour, min, sec,
- (timezone_offset < 0) ? '-' : '+', timezone_hour, timezone_min,
- local_timezone);
- return;
- case kDateAndTime:
- SNPrintF(str, "%s %s %02d %4d %02d:%02d:%02d GMT%c%02d%02d (%s)",
- kShortWeekDays[weekday], kShortMonths[month], day, year, hour,
- min, sec, (timezone_offset < 0) ? '-' : '+', timezone_hour,
- timezone_min, local_timezone);
- return;
- }
- UNREACHABLE();
-}
-
-
-Object* SetLocalDateValue(Handle<JSDate> date, double time_val) {
- if (time_val >= -DateCache::kMaxTimeBeforeUTCInMs &&
- time_val <= DateCache::kMaxTimeBeforeUTCInMs) {
- Isolate* const isolate = date->GetIsolate();
- time_val = isolate->date_cache()->ToUTC(static_cast<int64_t>(time_val));
- } else {
- time_val = std::numeric_limits<double>::quiet_NaN();
- }
- return *JSDate::SetValue(date, TimeClip(time_val));
-}
-
-} // namespace
-
-
-// ES6 section 20.3.2 The Date Constructor for the [[Call]] case.
-BUILTIN(DateConstructor) {
- HandleScope scope(isolate);
- double const time_val = JSDate::CurrentTimeValue(isolate);
- char buffer[128];
- Vector<char> str(buffer, arraysize(buffer));
- ToDateString(time_val, str, isolate->date_cache());
- Handle<String> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
- return *result;
-}
-
-
-// ES6 section 20.3.2 The Date Constructor for the [[Construct]] case.
-BUILTIN(DateConstructor_ConstructStub) {
- HandleScope scope(isolate);
- int const argc = args.length() - 1;
- Handle<JSFunction> target = args.target<JSFunction>();
- Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
- double time_val;
- if (argc == 0) {
- time_val = JSDate::CurrentTimeValue(isolate);
- } else if (argc == 1) {
- Handle<Object> value = args.at<Object>(1);
- if (value->IsJSDate()) {
- time_val = Handle<JSDate>::cast(value)->value()->Number();
- } else {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
- Object::ToPrimitive(value));
- if (value->IsString()) {
- time_val = ParseDateTimeString(Handle<String>::cast(value));
- } else {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
- Object::ToNumber(value));
- time_val = value->Number();
- }
- }
- } else {
- Handle<Object> year_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year_object,
- Object::ToNumber(args.at<Object>(1)));
- Handle<Object> month_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month_object,
- Object::ToNumber(args.at<Object>(2)));
- double year = year_object->Number();
- double month = month_object->Number();
- double date = 1.0, hours = 0.0, minutes = 0.0, seconds = 0.0, ms = 0.0;
- if (argc >= 3) {
- Handle<Object> date_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date_object,
- Object::ToNumber(args.at<Object>(3)));
- date = date_object->Number();
- if (argc >= 4) {
- Handle<Object> hours_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, hours_object, Object::ToNumber(args.at<Object>(4)));
- hours = hours_object->Number();
- if (argc >= 5) {
- Handle<Object> minutes_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, minutes_object, Object::ToNumber(args.at<Object>(5)));
- minutes = minutes_object->Number();
- if (argc >= 6) {
- Handle<Object> seconds_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, seconds_object, Object::ToNumber(args.at<Object>(6)));
- seconds = seconds_object->Number();
- if (argc >= 7) {
- Handle<Object> ms_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, ms_object, Object::ToNumber(args.at<Object>(7)));
- ms = ms_object->Number();
- }
- }
- }
- }
- }
- if (!std::isnan(year)) {
- double const y = DoubleToInteger(year);
- if (0.0 <= y && y <= 99) year = 1900 + y;
- }
- double const day = MakeDay(year, month, date);
- double const time = MakeTime(hours, minutes, seconds, ms);
- time_val = MakeDate(day, time);
- if (time_val >= -DateCache::kMaxTimeBeforeUTCInMs &&
- time_val <= DateCache::kMaxTimeBeforeUTCInMs) {
- time_val = isolate->date_cache()->ToUTC(static_cast<int64_t>(time_val));
- } else {
- time_val = std::numeric_limits<double>::quiet_NaN();
- }
- }
- Handle<JSDate> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- JSDate::New(target, new_target, time_val));
- return *result;
-}
-
-
-// ES6 section 20.3.3.1 Date.now ( )
-BUILTIN(DateNow) {
- HandleScope scope(isolate);
- return *isolate->factory()->NewNumber(JSDate::CurrentTimeValue(isolate));
-}
-
-
-// ES6 section 20.3.3.2 Date.parse ( string )
-BUILTIN(DateParse) {
- HandleScope scope(isolate);
- Handle<String> string;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, string,
- Object::ToString(isolate, args.atOrUndefined(isolate, 1)));
- return *isolate->factory()->NewNumber(ParseDateTimeString(string));
-}
-
-
-// ES6 section 20.3.3.4 Date.UTC (year,month,date,hours,minutes,seconds,ms)
-BUILTIN(DateUTC) {
- HandleScope scope(isolate);
- int const argc = args.length() - 1;
- double year = std::numeric_limits<double>::quiet_NaN();
- double month = std::numeric_limits<double>::quiet_NaN();
- double date = 1.0, hours = 0.0, minutes = 0.0, seconds = 0.0, ms = 0.0;
- if (argc >= 1) {
- Handle<Object> year_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year_object,
- Object::ToNumber(args.at<Object>(1)));
- year = year_object->Number();
- if (argc >= 2) {
- Handle<Object> month_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month_object,
- Object::ToNumber(args.at<Object>(2)));
- month = month_object->Number();
- if (argc >= 3) {
- Handle<Object> date_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, date_object, Object::ToNumber(args.at<Object>(3)));
- date = date_object->Number();
- if (argc >= 4) {
- Handle<Object> hours_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, hours_object, Object::ToNumber(args.at<Object>(4)));
- hours = hours_object->Number();
- if (argc >= 5) {
- Handle<Object> minutes_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, minutes_object, Object::ToNumber(args.at<Object>(5)));
- minutes = minutes_object->Number();
- if (argc >= 6) {
- Handle<Object> seconds_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, seconds_object,
- Object::ToNumber(args.at<Object>(6)));
- seconds = seconds_object->Number();
- if (argc >= 7) {
- Handle<Object> ms_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, ms_object, Object::ToNumber(args.at<Object>(7)));
- ms = ms_object->Number();
- }
- }
- }
- }
- }
- }
- }
- if (!std::isnan(year)) {
- double const y = DoubleToInteger(year);
- if (0.0 <= y && y <= 99) year = 1900 + y;
- }
- double const day = MakeDay(year, month, date);
- double const time = MakeTime(hours, minutes, seconds, ms);
- return *isolate->factory()->NewNumber(TimeClip(MakeDate(day, time)));
-}
-
-
-// ES6 section 20.3.4.20 Date.prototype.setDate ( date )
-BUILTIN(DatePrototypeSetDate) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSDate, date, "Date.prototype.setDate");
- Handle<Object> value = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, Object::ToNumber(value));
- double time_val = date->value()->Number();
- if (!std::isnan(time_val)) {
- int64_t const time_ms = static_cast<int64_t>(time_val);
- int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
- int const days = isolate->date_cache()->DaysFromTime(local_time_ms);
- int time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, days);
- int year, month, day;
- isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
- time_val = MakeDate(MakeDay(year, month, value->Number()), time_within_day);
- }
- return SetLocalDateValue(date, time_val);
-}
-
-
-// ES6 section 20.3.4.21 Date.prototype.setFullYear (year, month, date)
-BUILTIN(DatePrototypeSetFullYear) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSDate, date, "Date.prototype.setFullYear");
- int const argc = args.length() - 1;
- Handle<Object> year = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year, Object::ToNumber(year));
- double y = year->Number(), m = 0.0, dt = 1.0;
- int time_within_day = 0;
- if (!std::isnan(date->value()->Number())) {
- int64_t const time_ms = static_cast<int64_t>(date->value()->Number());
- int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
- int const days = isolate->date_cache()->DaysFromTime(local_time_ms);
- time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, days);
- int year, month, day;
- isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
- m = month;
- dt = day;
- }
- if (argc >= 2) {
- Handle<Object> month = args.at<Object>(2);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(month));
- m = month->Number();
- if (argc >= 3) {
- Handle<Object> date = args.at<Object>(3);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
- dt = date->Number();
- }
- }
- double time_val = MakeDate(MakeDay(y, m, dt), time_within_day);
- return SetLocalDateValue(date, time_val);
-}
-
-
-// ES6 section 20.3.4.22 Date.prototype.setHours(hour, min, sec, ms)
-BUILTIN(DatePrototypeSetHours) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSDate, date, "Date.prototype.setHours");
- int const argc = args.length() - 1;
- Handle<Object> hour = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, hour, Object::ToNumber(hour));
- double h = hour->Number();
- double time_val = date->value()->Number();
- if (!std::isnan(time_val)) {
- int64_t const time_ms = static_cast<int64_t>(time_val);
- int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
- int day = isolate->date_cache()->DaysFromTime(local_time_ms);
- int time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, day);
- double m = (time_within_day / (60 * 1000)) % 60;
- double s = (time_within_day / 1000) % 60;
- double milli = time_within_day % 1000;
- if (argc >= 2) {
- Handle<Object> min = args.at<Object>(2);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min, Object::ToNumber(min));
- m = min->Number();
- if (argc >= 3) {
- Handle<Object> sec = args.at<Object>(3);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
- s = sec->Number();
- if (argc >= 4) {
- Handle<Object> ms = args.at<Object>(4);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
- milli = ms->Number();
- }
- }
- }
- time_val = MakeDate(day, MakeTime(h, m, s, milli));
- }
- return SetLocalDateValue(date, time_val);
-}
-
-
-// ES6 section 20.3.4.23 Date.prototype.setMilliseconds(ms)
-BUILTIN(DatePrototypeSetMilliseconds) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSDate, date, "Date.prototype.setMilliseconds");
- Handle<Object> ms = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
- double time_val = date->value()->Number();
- if (!std::isnan(time_val)) {
- int64_t const time_ms = static_cast<int64_t>(time_val);
- int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
- int day = isolate->date_cache()->DaysFromTime(local_time_ms);
- int time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, day);
- int h = time_within_day / (60 * 60 * 1000);
- int m = (time_within_day / (60 * 1000)) % 60;
- int s = (time_within_day / 1000) % 60;
- time_val = MakeDate(day, MakeTime(h, m, s, ms->Number()));
- }
- return SetLocalDateValue(date, time_val);
-}
-
-
-// ES6 section 20.3.4.24 Date.prototype.setMinutes ( min, sec, ms )
-BUILTIN(DatePrototypeSetMinutes) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSDate, date, "Date.prototype.setMinutes");
- int const argc = args.length() - 1;
- Handle<Object> min = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min, Object::ToNumber(min));
- double time_val = date->value()->Number();
- if (!std::isnan(time_val)) {
- int64_t const time_ms = static_cast<int64_t>(time_val);
- int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
- int day = isolate->date_cache()->DaysFromTime(local_time_ms);
- int time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, day);
- int h = time_within_day / (60 * 60 * 1000);
- double m = min->Number();
- double s = (time_within_day / 1000) % 60;
- double milli = time_within_day % 1000;
- if (argc >= 2) {
- Handle<Object> sec = args.at<Object>(2);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
- s = sec->Number();
- if (argc >= 3) {
- Handle<Object> ms = args.at<Object>(3);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
- milli = ms->Number();
- }
- }
- time_val = MakeDate(day, MakeTime(h, m, s, milli));
- }
- return SetLocalDateValue(date, time_val);
-}
-
-
-// ES6 section 20.3.4.25 Date.prototype.setMonth ( month, date )
-BUILTIN(DatePrototypeSetMonth) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSDate, date, "Date.prototype.setMonth");
- int const argc = args.length() - 1;
- Handle<Object> month = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(month));
- double time_val = date->value()->Number();
- if (!std::isnan(time_val)) {
- int64_t const time_ms = static_cast<int64_t>(time_val);
- int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
- int days = isolate->date_cache()->DaysFromTime(local_time_ms);
- int time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, days);
- int year, unused, day;
- isolate->date_cache()->YearMonthDayFromDays(days, &year, &unused, &day);
- double m = month->Number();
- double dt = day;
- if (argc >= 2) {
- Handle<Object> date = args.at<Object>(2);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
- dt = date->Number();
- }
- time_val = MakeDate(MakeDay(year, m, dt), time_within_day);
- }
- return SetLocalDateValue(date, time_val);
-}
-
-
-// ES6 section 20.3.4.26 Date.prototype.setSeconds ( sec, ms )
-BUILTIN(DatePrototypeSetSeconds) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSDate, date, "Date.prototype.setSeconds");
- int const argc = args.length() - 1;
- Handle<Object> sec = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
- double time_val = date->value()->Number();
- if (!std::isnan(time_val)) {
- int64_t const time_ms = static_cast<int64_t>(time_val);
- int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
- int day = isolate->date_cache()->DaysFromTime(local_time_ms);
- int time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, day);
- int h = time_within_day / (60 * 60 * 1000);
- double m = (time_within_day / (60 * 1000)) % 60;
- double s = sec->Number();
- double milli = time_within_day % 1000;
- if (argc >= 2) {
- Handle<Object> ms = args.at<Object>(2);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
- milli = ms->Number();
- }
- time_val = MakeDate(day, MakeTime(h, m, s, milli));
- }
- return SetLocalDateValue(date, time_val);
-}
-
-
-// ES6 section 20.3.4.27 Date.prototype.setTime ( time )
-BUILTIN(DatePrototypeSetTime) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSDate, date, "Date.prototype.setTime");
- Handle<Object> value = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, Object::ToNumber(value));
- return *JSDate::SetValue(date, TimeClip(value->Number()));
-}
-
-
-// ES6 section 20.3.4.28 Date.prototype.setUTCDate ( date )
-BUILTIN(DatePrototypeSetUTCDate) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCDate");
- Handle<Object> value = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, Object::ToNumber(value));
- if (std::isnan(date->value()->Number())) return date->value();
- int64_t const time_ms = static_cast<int64_t>(date->value()->Number());
- int const days = isolate->date_cache()->DaysFromTime(time_ms);
- int const time_within_day = isolate->date_cache()->TimeInDay(time_ms, days);
- int year, month, day;
- isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
- double const time_val =
- MakeDate(MakeDay(year, month, value->Number()), time_within_day);
- return *JSDate::SetValue(date, TimeClip(time_val));
-}
-
-
-// ES6 section 20.3.4.29 Date.prototype.setUTCFullYear (year, month, date)
-BUILTIN(DatePrototypeSetUTCFullYear) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCFullYear");
- int const argc = args.length() - 1;
- Handle<Object> year = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year, Object::ToNumber(year));
- double y = year->Number(), m = 0.0, dt = 1.0;
- int time_within_day = 0;
- if (!std::isnan(date->value()->Number())) {
- int64_t const time_ms = static_cast<int64_t>(date->value()->Number());
- int const days = isolate->date_cache()->DaysFromTime(time_ms);
- time_within_day = isolate->date_cache()->TimeInDay(time_ms, days);
- int year, month, day;
- isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
- m = month;
- dt = day;
- }
- if (argc >= 2) {
- Handle<Object> month = args.at<Object>(2);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(month));
- m = month->Number();
- if (argc >= 3) {
- Handle<Object> date = args.at<Object>(3);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
- dt = date->Number();
- }
- }
- double const time_val = MakeDate(MakeDay(y, m, dt), time_within_day);
- return *JSDate::SetValue(date, TimeClip(time_val));
-}
-
-
-// ES6 section 20.3.4.30 Date.prototype.setUTCHours(hour, min, sec, ms)
-BUILTIN(DatePrototypeSetUTCHours) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCHours");
- int const argc = args.length() - 1;
- Handle<Object> hour = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, hour, Object::ToNumber(hour));
- double h = hour->Number();
- double time_val = date->value()->Number();
- if (!std::isnan(time_val)) {
- int64_t const time_ms = static_cast<int64_t>(time_val);
- int day = isolate->date_cache()->DaysFromTime(time_ms);
- int time_within_day = isolate->date_cache()->TimeInDay(time_ms, day);
- double m = (time_within_day / (60 * 1000)) % 60;
- double s = (time_within_day / 1000) % 60;
- double milli = time_within_day % 1000;
- if (argc >= 2) {
- Handle<Object> min = args.at<Object>(2);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min, Object::ToNumber(min));
- m = min->Number();
- if (argc >= 3) {
- Handle<Object> sec = args.at<Object>(3);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
- s = sec->Number();
- if (argc >= 4) {
- Handle<Object> ms = args.at<Object>(4);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
- milli = ms->Number();
- }
- }
- }
- time_val = MakeDate(day, MakeTime(h, m, s, milli));
- }
- return *JSDate::SetValue(date, TimeClip(time_val));
-}
-
-
-// ES6 section 20.3.4.31 Date.prototype.setUTCMilliseconds(ms)
-BUILTIN(DatePrototypeSetUTCMilliseconds) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCMilliseconds");
- Handle<Object> ms = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
- double time_val = date->value()->Number();
- if (!std::isnan(time_val)) {
- int64_t const time_ms = static_cast<int64_t>(time_val);
- int day = isolate->date_cache()->DaysFromTime(time_ms);
- int time_within_day = isolate->date_cache()->TimeInDay(time_ms, day);
- int h = time_within_day / (60 * 60 * 1000);
- int m = (time_within_day / (60 * 1000)) % 60;
- int s = (time_within_day / 1000) % 60;
- time_val = MakeDate(day, MakeTime(h, m, s, ms->Number()));
- }
- return *JSDate::SetValue(date, TimeClip(time_val));
-}
-
-
-// ES6 section 20.3.4.32 Date.prototype.setUTCMinutes ( min, sec, ms )
-BUILTIN(DatePrototypeSetUTCMinutes) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCMinutes");
- int const argc = args.length() - 1;
- Handle<Object> min = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min, Object::ToNumber(min));
- double time_val = date->value()->Number();
- if (!std::isnan(time_val)) {
- int64_t const time_ms = static_cast<int64_t>(time_val);
- int day = isolate->date_cache()->DaysFromTime(time_ms);
- int time_within_day = isolate->date_cache()->TimeInDay(time_ms, day);
- int h = time_within_day / (60 * 60 * 1000);
- double m = min->Number();
- double s = (time_within_day / 1000) % 60;
- double milli = time_within_day % 1000;
- if (argc >= 2) {
- Handle<Object> sec = args.at<Object>(2);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
- s = sec->Number();
- if (argc >= 3) {
- Handle<Object> ms = args.at<Object>(3);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
- milli = ms->Number();
- }
- }
- time_val = MakeDate(day, MakeTime(h, m, s, milli));
- }
- return *JSDate::SetValue(date, TimeClip(time_val));
-}
-
-
-// ES6 section 20.3.4.31 Date.prototype.setUTCMonth ( month, date )
-BUILTIN(DatePrototypeSetUTCMonth) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCMonth");
- int const argc = args.length() - 1;
- Handle<Object> month = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(month));
- double time_val = date->value()->Number();
- if (!std::isnan(time_val)) {
- int64_t const time_ms = static_cast<int64_t>(time_val);
- int days = isolate->date_cache()->DaysFromTime(time_ms);
- int time_within_day = isolate->date_cache()->TimeInDay(time_ms, days);
- int year, unused, day;
- isolate->date_cache()->YearMonthDayFromDays(days, &year, &unused, &day);
- double m = month->Number();
- double dt = day;
- if (argc >= 2) {
- Handle<Object> date = args.at<Object>(2);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
- dt = date->Number();
- }
- time_val = MakeDate(MakeDay(year, m, dt), time_within_day);
- }
- return *JSDate::SetValue(date, TimeClip(time_val));
-}
-
-
-// ES6 section 20.3.4.34 Date.prototype.setUTCSeconds ( sec, ms )
-BUILTIN(DatePrototypeSetUTCSeconds) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCSeconds");
- int const argc = args.length() - 1;
- Handle<Object> sec = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
- double time_val = date->value()->Number();
- if (!std::isnan(time_val)) {
- int64_t const time_ms = static_cast<int64_t>(time_val);
- int day = isolate->date_cache()->DaysFromTime(time_ms);
- int time_within_day = isolate->date_cache()->TimeInDay(time_ms, day);
- int h = time_within_day / (60 * 60 * 1000);
- double m = (time_within_day / (60 * 1000)) % 60;
- double s = sec->Number();
- double milli = time_within_day % 1000;
- if (argc >= 2) {
- Handle<Object> ms = args.at<Object>(2);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
- milli = ms->Number();
- }
- time_val = MakeDate(day, MakeTime(h, m, s, milli));
- }
- return *JSDate::SetValue(date, TimeClip(time_val));
-}
-
-
-// ES6 section 20.3.4.35 Date.prototype.toDateString ( )
-BUILTIN(DatePrototypeToDateString) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSDate, date, "Date.prototype.toDateString");
- char buffer[128];
- Vector<char> str(buffer, arraysize(buffer));
- ToDateString(date->value()->Number(), str, isolate->date_cache(), kDateOnly);
- Handle<String> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
- return *result;
-}
-
-
-// ES6 section 20.3.4.36 Date.prototype.toISOString ( )
-BUILTIN(DatePrototypeToISOString) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSDate, date, "Date.prototype.toISOString");
- double const time_val = date->value()->Number();
- if (std::isnan(time_val)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kInvalidTimeValue));
- }
- int64_t const time_ms = static_cast<int64_t>(time_val);
- int year, month, day, weekday, hour, min, sec, ms;
- isolate->date_cache()->BreakDownTime(time_ms, &year, &month, &day, &weekday,
- &hour, &min, &sec, &ms);
- char buffer[128];
- Vector<char> str(buffer, arraysize(buffer));
- if (year >= 0 && year <= 9999) {
- SNPrintF(str, "%04d-%02d-%02dT%02d:%02d:%02d.%03dZ", year, month + 1, day,
- hour, min, sec, ms);
- } else if (year < 0) {
- SNPrintF(str, "-%06d-%02d-%02dT%02d:%02d:%02d.%03dZ", -year, month + 1, day,
- hour, min, sec, ms);
- } else {
- SNPrintF(str, "+%06d-%02d-%02dT%02d:%02d:%02d.%03dZ", year, month + 1, day,
- hour, min, sec, ms);
- }
- return *isolate->factory()->NewStringFromAsciiChecked(str.start());
-}
-
-
-// ES6 section 20.3.4.41 Date.prototype.toString ( )
-BUILTIN(DatePrototypeToString) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSDate, date, "Date.prototype.toString");
- char buffer[128];
- Vector<char> str(buffer, arraysize(buffer));
- ToDateString(date->value()->Number(), str, isolate->date_cache());
- Handle<String> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
- return *result;
-}
-
-
-// ES6 section 20.3.4.42 Date.prototype.toTimeString ( )
-BUILTIN(DatePrototypeToTimeString) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSDate, date, "Date.prototype.toTimeString");
- char buffer[128];
- Vector<char> str(buffer, arraysize(buffer));
- ToDateString(date->value()->Number(), str, isolate->date_cache(), kTimeOnly);
- Handle<String> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
- return *result;
-}
-
-
-// ES6 section 20.3.4.43 Date.prototype.toUTCString ( )
-BUILTIN(DatePrototypeToUTCString) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSDate, date, "Date.prototype.toUTCString");
- double const time_val = date->value()->Number();
- if (std::isnan(time_val)) {
- return *isolate->factory()->NewStringFromAsciiChecked("Invalid Date");
- }
- char buffer[128];
- Vector<char> str(buffer, arraysize(buffer));
- int64_t time_ms = static_cast<int64_t>(time_val);
- int year, month, day, weekday, hour, min, sec, ms;
- isolate->date_cache()->BreakDownTime(time_ms, &year, &month, &day, &weekday,
- &hour, &min, &sec, &ms);
- SNPrintF(str, "%s, %02d %s %4d %02d:%02d:%02d GMT", kShortWeekDays[weekday],
- day, kShortMonths[month], year, hour, min, sec);
- return *isolate->factory()->NewStringFromAsciiChecked(str.start());
-}
-
-
-// ES6 section 20.3.4.44 Date.prototype.valueOf ( )
-BUILTIN(DatePrototypeValueOf) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSDate, date, "Date.prototype.valueOf");
- return date->value();
-}
-
-
-// ES6 section 20.3.4.45 Date.prototype [ @@toPrimitive ] ( hint )
-BUILTIN(DatePrototypeToPrimitive) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CHECK_RECEIVER(JSReceiver, receiver, "Date.prototype [ @@toPrimitive ]");
- Handle<Object> hint = args.at<Object>(1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- JSDate::ToPrimitive(receiver, hint));
- return *result;
-}
-
-
-// ES6 section B.2.4.1 Date.prototype.getYear ( )
-BUILTIN(DatePrototypeGetYear) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSDate, date, "Date.prototype.getYear");
- double time_val = date->value()->Number();
- if (std::isnan(time_val)) return date->value();
- int64_t time_ms = static_cast<int64_t>(time_val);
- int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
- int days = isolate->date_cache()->DaysFromTime(local_time_ms);
- int year, month, day;
- isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
- return Smi::FromInt(year - 1900);
-}
-
-
-// ES6 section B.2.4.2 Date.prototype.setYear ( year )
-BUILTIN(DatePrototypeSetYear) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSDate, date, "Date.prototype.setYear");
- Handle<Object> year = args.atOrUndefined(isolate, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year, Object::ToNumber(year));
- double m = 0.0, dt = 1.0, y = year->Number();
- if (0.0 <= y && y <= 99.0) {
- y = 1900.0 + DoubleToInteger(y);
- }
- int time_within_day = 0;
- if (!std::isnan(date->value()->Number())) {
- int64_t const time_ms = static_cast<int64_t>(date->value()->Number());
- int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
- int const days = isolate->date_cache()->DaysFromTime(local_time_ms);
- time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, days);
- int year, month, day;
- isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
- m = month;
- dt = day;
- }
- double time_val = MakeDate(MakeDay(y, m, dt), time_within_day);
- return SetLocalDateValue(date, time_val);
-}
-
-
-// static
-void Builtins::Generate_DatePrototypeGetDate(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kDay);
-}
-
-
-// static
-void Builtins::Generate_DatePrototypeGetDay(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kWeekday);
-}
-
-
-// static
-void Builtins::Generate_DatePrototypeGetFullYear(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kYear);
-}
-
-
-// static
-void Builtins::Generate_DatePrototypeGetHours(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kHour);
-}
-
-
-// static
-void Builtins::Generate_DatePrototypeGetMilliseconds(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kMillisecond);
-}
-
-
-// static
-void Builtins::Generate_DatePrototypeGetMinutes(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kMinute);
-}
-
-
-// static
-void Builtins::Generate_DatePrototypeGetMonth(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kMonth);
-}
-
-
-// static
-void Builtins::Generate_DatePrototypeGetSeconds(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kSecond);
-}
-
-
-// static
-void Builtins::Generate_DatePrototypeGetTime(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kDateValue);
-}
-
-
-// static
-void Builtins::Generate_DatePrototypeGetTimezoneOffset(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kTimezoneOffset);
-}
-
-
-// static
-void Builtins::Generate_DatePrototypeGetUTCDate(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kDayUTC);
-}
-
-
-// static
-void Builtins::Generate_DatePrototypeGetUTCDay(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kWeekdayUTC);
-}
-
-
-// static
-void Builtins::Generate_DatePrototypeGetUTCFullYear(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kYearUTC);
-}
-
-
-// static
-void Builtins::Generate_DatePrototypeGetUTCHours(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kHourUTC);
-}
-
-
-// static
-void Builtins::Generate_DatePrototypeGetUTCMilliseconds(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kMillisecondUTC);
-}
-
-
-// static
-void Builtins::Generate_DatePrototypeGetUTCMinutes(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kMinuteUTC);
-}
-
-
-// static
-void Builtins::Generate_DatePrototypeGetUTCMonth(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kMonthUTC);
-}
-
-
-// static
-void Builtins::Generate_DatePrototypeGetUTCSeconds(MacroAssembler* masm) {
- Generate_DatePrototype_GetField(masm, JSDate::kSecondUTC);
-}
-
-
-namespace {
-
-// ES6 section 19.2.1.1.1 CreateDynamicFunction
-MaybeHandle<JSFunction> CreateDynamicFunction(
- Isolate* isolate,
- BuiltinArguments<BuiltinExtraArguments::kTargetAndNewTarget> args,
- const char* token) {
- // Compute number of arguments, ignoring the receiver.
- DCHECK_LE(1, args.length());
- int const argc = args.length() - 1;
-
- // Build the source string.
- Handle<String> source;
- {
- IncrementalStringBuilder builder(isolate);
- builder.AppendCharacter('(');
- builder.AppendCString(token);
- builder.AppendCharacter('(');
- bool parenthesis_in_arg_string = false;
- if (argc > 1) {
- for (int i = 1; i < argc; ++i) {
- if (i > 1) builder.AppendCharacter(',');
- Handle<String> param;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, param, Object::ToString(isolate, args.at<Object>(i)),
- JSFunction);
- param = String::Flatten(param);
- builder.AppendString(param);
- // If the formal parameters string include ) - an illegal
- // character - it may make the combined function expression
- // compile. We avoid this problem by checking for this early on.
- DisallowHeapAllocation no_gc; // Ensure vectors stay valid.
- String::FlatContent param_content = param->GetFlatContent();
- for (int i = 0, length = param->length(); i < length; ++i) {
- if (param_content.Get(i) == ')') {
- parenthesis_in_arg_string = true;
- break;
- }
- }
- }
- // If the formal parameters include an unbalanced block comment, the
- // function must be rejected. Since JavaScript does not allow nested
- // comments we can include a trailing block comment to catch this.
- builder.AppendCString("\n/**/");
- }
- builder.AppendCString(") {\n");
- if (argc > 0) {
- Handle<String> body;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, body, Object::ToString(isolate, args.at<Object>(argc)),
- JSFunction);
- builder.AppendString(body);
- }
- builder.AppendCString("\n})");
- ASSIGN_RETURN_ON_EXCEPTION(isolate, source, builder.Finish(), JSFunction);
-
- // The SyntaxError must be thrown after all the (observable) ToString
- // conversions are done.
- if (parenthesis_in_arg_string) {
- THROW_NEW_ERROR(isolate,
- NewSyntaxError(MessageTemplate::kParenthesisInArgString),
- JSFunction);
- }
- }
-
- // Compile the string in the constructor and not a helper so that errors to
- // come from here.
- Handle<JSFunction> target = args.target<JSFunction>();
- Handle<JSObject> target_global_proxy(target->global_proxy(), isolate);
- Handle<JSFunction> function;
- {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, function,
- CompileString(handle(target->native_context(), isolate), source,
- ONLY_SINGLE_FUNCTION_LITERAL),
- JSFunction);
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- Execution::Call(isolate, function, target_global_proxy, 0, nullptr),
- JSFunction);
- function = Handle<JSFunction>::cast(result);
- function->shared()->set_name_should_print_as_anonymous(true);
- }
-
- // If new.target is equal to target then the function created
- // is already correctly setup and nothing else should be done
- // here. But if new.target is not equal to target then we are
- // have a Function builtin subclassing case and therefore the
- // function has wrong initial map. To fix that we create a new
- // function object with correct initial map.
- Handle<Object> unchecked_new_target = args.new_target();
- if (!unchecked_new_target->IsUndefined() &&
- !unchecked_new_target.is_identical_to(target)) {
- Handle<JSReceiver> new_target =
- Handle<JSReceiver>::cast(unchecked_new_target);
- Handle<Map> initial_map;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, initial_map,
- JSFunction::GetDerivedMap(isolate, target, new_target), JSFunction);
-
- Handle<SharedFunctionInfo> shared_info(function->shared(), isolate);
- Handle<Map> map = Map::AsLanguageMode(
- initial_map, shared_info->language_mode(), shared_info->kind());
-
- Handle<Context> context(function->context(), isolate);
- function = isolate->factory()->NewFunctionFromSharedFunctionInfo(
- map, shared_info, context, NOT_TENURED);
- }
- return function;
-}
-
-} // namespace
-
-
-// ES6 section 19.2.1.1 Function ( p1, p2, ... , pn, body )
-BUILTIN(FunctionConstructor) {
- HandleScope scope(isolate);
- Handle<JSFunction> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, CreateDynamicFunction(isolate, args, "function"));
- return *result;
-}
-
-
-// ES6 section 19.2.3.2 Function.prototype.bind ( thisArg, ...args )
-BUILTIN(FunctionPrototypeBind) {
- HandleScope scope(isolate);
- DCHECK_LE(1, args.length());
- if (!args.receiver()->IsCallable()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kFunctionBind));
- }
-
- // Allocate the bound function with the given {this_arg} and {args}.
- Handle<JSReceiver> target = args.at<JSReceiver>(0);
- Handle<Object> this_arg = isolate->factory()->undefined_value();
- ScopedVector<Handle<Object>> argv(std::max(0, args.length() - 2));
- if (args.length() > 1) {
- this_arg = args.at<Object>(1);
- for (int i = 2; i < args.length(); ++i) {
- argv[i - 2] = args.at<Object>(i);
- }
- }
- Handle<JSBoundFunction> function;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, function,
- isolate->factory()->NewJSBoundFunction(target, this_arg, argv));
-
- // TODO(bmeurer): Optimize the rest for the common cases where {target} is
- // a function with some initial map or even a bound function.
- // Setup the "length" property based on the "length" of the {target}.
- Handle<Object> length(Smi::FromInt(0), isolate);
- Maybe<bool> target_has_length =
- JSReceiver::HasOwnProperty(target, isolate->factory()->length_string());
- if (!target_has_length.IsJust()) {
- return isolate->heap()->exception();
- } else if (target_has_length.FromJust()) {
- Handle<Object> target_length;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, target_length,
- JSReceiver::GetProperty(target, isolate->factory()->length_string()));
- if (target_length->IsNumber()) {
- length = isolate->factory()->NewNumber(std::max(
- 0.0, DoubleToInteger(target_length->Number()) - argv.length()));
- }
- }
- function->set_length(*length);
-
- // Setup the "name" property based on the "name" of the {target}.
- Handle<Object> target_name;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, target_name,
- JSReceiver::GetProperty(target, isolate->factory()->name_string()));
- Handle<String> name;
- if (!target_name->IsString()) {
- name = isolate->factory()->bound__string();
- } else {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, name, Name::ToFunctionName(Handle<String>::cast(target_name)));
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, name, isolate->factory()->NewConsString(
- isolate->factory()->bound__string(), name));
- }
- function->set_name(*name);
- return *function;
-}
-
-
-// ES6 section 19.2.3.5 Function.prototype.toString ( )
-BUILTIN(FunctionPrototypeToString) {
- HandleScope scope(isolate);
- Handle<Object> receiver = args.receiver();
- if (receiver->IsJSBoundFunction()) {
- return *JSBoundFunction::ToString(Handle<JSBoundFunction>::cast(receiver));
- } else if (receiver->IsJSFunction()) {
- return *JSFunction::ToString(Handle<JSFunction>::cast(receiver));
- }
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kNotGeneric,
- isolate->factory()->NewStringFromAsciiChecked(
- "Function.prototype.toString")));
-}
-
-
-// ES6 section 25.2.1.1 GeneratorFunction (p1, p2, ... , pn, body)
-BUILTIN(GeneratorFunctionConstructor) {
- HandleScope scope(isolate);
- Handle<JSFunction> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, CreateDynamicFunction(isolate, args, "function*"));
- return *result;
-}
-
-
-// ES6 section 19.4.1.1 Symbol ( [ description ] ) for the [[Call]] case.
-BUILTIN(SymbolConstructor) {
- HandleScope scope(isolate);
- Handle<Symbol> result = isolate->factory()->NewSymbol();
- Handle<Object> description = args.atOrUndefined(isolate, 1);
- if (!description->IsUndefined()) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, description,
- Object::ToString(isolate, description));
- result->set_name(*description);
- }
- return *result;
-}
-
-
-// ES6 section 19.4.1.1 Symbol ( [ description ] ) for the [[Construct]] case.
-BUILTIN(SymbolConstructor_ConstructStub) {
- HandleScope scope(isolate);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kNotConstructor,
- isolate->factory()->Symbol_string()));
-}
-
-
-// ES6 19.1.3.6 Object.prototype.toString
-BUILTIN(ObjectProtoToString) {
- HandleScope scope(isolate);
- Handle<Object> object = args.at<Object>(0);
- Handle<String> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Object::ObjectProtoToString(isolate, object));
- return *result;
-}
-
-// -----------------------------------------------------------------------------
-// ES6 section 21.1 String Objects
-
-namespace {
-
-bool ToUint16(Handle<Object> value, uint16_t* result) {
- if (value->IsNumber() || Object::ToNumber(value).ToHandle(&value)) {
- *result = DoubleToUint32(value->Number());
- return true;
- }
- return false;
-}
-
-} // namespace
-
-// ES6 21.1.2.1 String.fromCharCode ( ...codeUnits )
-BUILTIN(StringFromCharCode) {
- HandleScope scope(isolate);
- // Check resulting string length.
- int index = 0;
- Handle<String> result;
- int const length = args.length() - 1;
- if (length == 0) return isolate->heap()->empty_string();
- DCHECK_LT(0, length);
- // Load the first character code.
- uint16_t code;
- if (!ToUint16(args.at<Object>(1), &code)) return isolate->heap()->exception();
- // Assume that the resulting String contains only one byte characters.
- if (code <= String::kMaxOneByteCharCodeU) {
- // Check for single one-byte character fast case.
- if (length == 1) {
- return *isolate->factory()->LookupSingleCharacterStringFromCode(code);
- }
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, isolate->factory()->NewRawOneByteString(length));
- do {
- Handle<SeqOneByteString>::cast(result)->Set(index, code);
- if (++index == length) break;
- if (!ToUint16(args.at<Object>(1 + index), &code)) {
- return isolate->heap()->exception();
- }
- } while (code <= String::kMaxOneByteCharCodeU);
- }
- // Check if all characters fit into the one byte range.
- if (index < length) {
- // Fallback to two byte string.
- Handle<String> new_result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, new_result, isolate->factory()->NewRawTwoByteString(length));
- for (int new_index = 0; new_index < index; ++new_index) {
- uint16_t new_code =
- Handle<SeqOneByteString>::cast(result)->Get(new_index);
- Handle<SeqTwoByteString>::cast(new_result)->Set(new_index, new_code);
- }
- while (true) {
- Handle<SeqTwoByteString>::cast(new_result)->Set(index, code);
- if (++index == length) break;
- if (!ToUint16(args.at<Object>(1 + index), &code)) {
- return isolate->heap()->exception();
- }
- }
- result = new_result;
- }
- return *result;
-}
-
-// -----------------------------------------------------------------------------
-// ES6 section 21.1 ArrayBuffer Objects
-
-// ES6 section 24.1.2.1 ArrayBuffer ( length ) for the [[Call]] case.
-BUILTIN(ArrayBufferConstructor) {
- HandleScope scope(isolate);
- Handle<JSFunction> target = args.target<JSFunction>();
- DCHECK(*target == target->native_context()->array_buffer_fun() ||
- *target == target->native_context()->shared_array_buffer_fun());
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
- handle(target->shared()->name(), isolate)));
-}
-
-
-// ES6 section 24.1.2.1 ArrayBuffer ( length ) for the [[Construct]] case.
-BUILTIN(ArrayBufferConstructor_ConstructStub) {
- HandleScope scope(isolate);
- Handle<JSFunction> target = args.target<JSFunction>();
- Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
- Handle<Object> length = args.atOrUndefined(isolate, 1);
- DCHECK(*target == target->native_context()->array_buffer_fun() ||
- *target == target->native_context()->shared_array_buffer_fun());
- Handle<Object> number_length;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_length,
- Object::ToInteger(isolate, length));
- if (number_length->Number() < 0.0) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
- }
- Handle<JSObject> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- JSObject::New(target, new_target));
- size_t byte_length;
- if (!TryNumberToSize(isolate, *number_length, &byte_length)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
- }
- SharedFlag shared_flag =
- (*target == target->native_context()->array_buffer_fun())
- ? SharedFlag::kNotShared
- : SharedFlag::kShared;
- if (!JSArrayBuffer::SetupAllocatingData(Handle<JSArrayBuffer>::cast(result),
- isolate, byte_length, true,
- shared_flag)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kArrayBufferAllocationFailed));
- }
- return *result;
-}
-
-
-// ES6 section 24.1.3.1 ArrayBuffer.isView ( arg )
-BUILTIN(ArrayBufferIsView) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(2, args.length());
- Object* arg = args[1];
- return isolate->heap()->ToBoolean(arg->IsJSArrayBufferView());
-}
-
-
-// ES6 section 26.2.1.1 Proxy ( target, handler ) for the [[Call]] case.
-BUILTIN(ProxyConstructor) {
- HandleScope scope(isolate);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewTypeError(MessageTemplate::kConstructorNotFunction,
- isolate->factory()->NewStringFromAsciiChecked("Proxy")));
-}
-
-
-// ES6 section 26.2.1.1 Proxy ( target, handler ) for the [[Construct]] case.
-BUILTIN(ProxyConstructor_ConstructStub) {
- HandleScope scope(isolate);
- DCHECK(isolate->proxy_function()->IsConstructor());
- Handle<Object> target = args.atOrUndefined(isolate, 1);
- Handle<Object> handler = args.atOrUndefined(isolate, 2);
- Handle<JSProxy> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- JSProxy::New(isolate, target, handler));
- return *result;
-}
-
-
-// -----------------------------------------------------------------------------
-// Throwers for restricted function properties and strict arguments object
-// properties
-
-
-BUILTIN(RestrictedFunctionPropertiesThrower) {
- HandleScope scope(isolate);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kRestrictedFunctionProperties));
-}
-
-
-BUILTIN(RestrictedStrictArgumentsPropertiesThrower) {
- HandleScope scope(isolate);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kStrictPoisonPill));
-}
-
-
-// -----------------------------------------------------------------------------
-//
-
-
-namespace {
-
-template <bool is_construct>
-MUST_USE_RESULT MaybeHandle<Object> HandleApiCallHelper(
- Isolate* isolate, BuiltinArguments<BuiltinExtraArguments::kTarget> args) {
- HandleScope scope(isolate);
- Handle<HeapObject> function = args.target<HeapObject>();
- Handle<JSReceiver> receiver;
-
- DCHECK(function->IsFunctionTemplateInfo() ||
- Handle<JSFunction>::cast(function)->shared()->IsApiFunction());
-
- Handle<FunctionTemplateInfo> fun_data =
- function->IsFunctionTemplateInfo()
- ? Handle<FunctionTemplateInfo>::cast(function)
- : handle(JSFunction::cast(*function)->shared()->get_api_func_data());
- if (is_construct) {
- DCHECK(args.receiver()->IsTheHole());
- if (fun_data->instance_template()->IsUndefined()) {
- v8::Local<ObjectTemplate> templ =
- ObjectTemplate::New(reinterpret_cast<v8::Isolate*>(isolate),
- ToApiHandle<v8::FunctionTemplate>(fun_data));
- fun_data->set_instance_template(*Utils::OpenHandle(*templ));
- }
- Handle<ObjectTemplateInfo> instance_template(
- ObjectTemplateInfo::cast(fun_data->instance_template()), isolate);
- ASSIGN_RETURN_ON_EXCEPTION(isolate, receiver,
- ApiNatives::InstantiateObject(instance_template),
- Object);
- args[0] = *receiver;
- DCHECK_EQ(*receiver, *args.receiver());
- } else {
- DCHECK(args.receiver()->IsJSReceiver());
- receiver = args.at<JSReceiver>(0);
- }
-
- if (!is_construct && !fun_data->accept_any_receiver()) {
- if (receiver->IsJSObject() && receiver->IsAccessCheckNeeded()) {
- Handle<JSObject> js_receiver = Handle<JSObject>::cast(receiver);
- if (!isolate->MayAccess(handle(isolate->context()), js_receiver)) {
- isolate->ReportFailedAccessCheck(js_receiver);
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- }
- }
- }
-
- Object* raw_holder = fun_data->GetCompatibleReceiver(isolate, *receiver);
-
- if (raw_holder->IsNull()) {
- // This function cannot be called with the given receiver. Abort!
- THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIllegalInvocation),
- Object);
- }
-
- Object* raw_call_data = fun_data->call_code();
- if (!raw_call_data->IsUndefined()) {
- DCHECK(raw_call_data->IsCallHandlerInfo());
- CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
- Object* callback_obj = call_data->callback();
- v8::FunctionCallback callback =
- v8::ToCData<v8::FunctionCallback>(callback_obj);
- Object* data_obj = call_data->data();
-
- LOG(isolate, ApiObjectAccess("call", JSObject::cast(*args.receiver())));
- DCHECK(raw_holder->IsJSObject());
-
- FunctionCallbackArguments custom(isolate,
- data_obj,
- *function,
- raw_holder,
- &args[0] - 1,
- args.length() - 1,
- is_construct);
-
- Handle<Object> result = custom.Call(callback);
- if (result.is_null()) result = isolate->factory()->undefined_value();
-
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- if (!is_construct || result->IsJSObject()) {
- return scope.CloseAndEscape(result);
- }
- }
-
- return scope.CloseAndEscape(receiver);
-}
-
-} // namespace
-
-
-BUILTIN(HandleApiCall) {
- HandleScope scope(isolate);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- HandleApiCallHelper<false>(isolate, args));
- return *result;
-}
-
-
-BUILTIN(HandleApiCallConstruct) {
- HandleScope scope(isolate);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- HandleApiCallHelper<true>(isolate, args));
- return *result;
-}
-
-Handle<Code> Builtins::CallFunction(ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
- switch (tail_call_mode) {
- case TailCallMode::kDisallow:
- switch (mode) {
- case ConvertReceiverMode::kNullOrUndefined:
- return CallFunction_ReceiverIsNullOrUndefined();
- case ConvertReceiverMode::kNotNullOrUndefined:
- return CallFunction_ReceiverIsNotNullOrUndefined();
- case ConvertReceiverMode::kAny:
- return CallFunction_ReceiverIsAny();
- }
- break;
- case TailCallMode::kAllow:
- switch (mode) {
- case ConvertReceiverMode::kNullOrUndefined:
- return TailCallFunction_ReceiverIsNullOrUndefined();
- case ConvertReceiverMode::kNotNullOrUndefined:
- return TailCallFunction_ReceiverIsNotNullOrUndefined();
- case ConvertReceiverMode::kAny:
- return TailCallFunction_ReceiverIsAny();
- }
- break;
- }
- UNREACHABLE();
- return Handle<Code>::null();
-}
-
-Handle<Code> Builtins::Call(ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
- switch (tail_call_mode) {
- case TailCallMode::kDisallow:
- switch (mode) {
- case ConvertReceiverMode::kNullOrUndefined:
- return Call_ReceiverIsNullOrUndefined();
- case ConvertReceiverMode::kNotNullOrUndefined:
- return Call_ReceiverIsNotNullOrUndefined();
- case ConvertReceiverMode::kAny:
- return Call_ReceiverIsAny();
- }
- break;
- case TailCallMode::kAllow:
- switch (mode) {
- case ConvertReceiverMode::kNullOrUndefined:
- return TailCall_ReceiverIsNullOrUndefined();
- case ConvertReceiverMode::kNotNullOrUndefined:
- return TailCall_ReceiverIsNotNullOrUndefined();
- case ConvertReceiverMode::kAny:
- return TailCall_ReceiverIsAny();
- }
- break;
- }
- UNREACHABLE();
- return Handle<Code>::null();
-}
-
-Handle<Code> Builtins::CallBoundFunction(TailCallMode tail_call_mode) {
- switch (tail_call_mode) {
- case TailCallMode::kDisallow:
- return CallBoundFunction();
- case TailCallMode::kAllow:
- return TailCallBoundFunction();
- }
- UNREACHABLE();
- return Handle<Code>::null();
-}
-
-Handle<Code> Builtins::InterpreterPushArgsAndCall(TailCallMode tail_call_mode) {
- switch (tail_call_mode) {
- case TailCallMode::kDisallow:
- return InterpreterPushArgsAndCall();
- case TailCallMode::kAllow:
- return InterpreterPushArgsAndTailCall();
- }
- UNREACHABLE();
- return Handle<Code>::null();
-}
-
-namespace {
-
-class RelocatableArguments
- : public BuiltinArguments<BuiltinExtraArguments::kTarget>,
- public Relocatable {
- public:
- RelocatableArguments(Isolate* isolate, int length, Object** arguments)
- : BuiltinArguments<BuiltinExtraArguments::kTarget>(length, arguments),
- Relocatable(isolate) {}
-
- virtual inline void IterateInstance(ObjectVisitor* v) {
- if (length() == 0) return;
- v->VisitPointers(lowest_address(), highest_address() + 1);
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(RelocatableArguments);
-};
-
-} // namespace
-
-MaybeHandle<Object> Builtins::InvokeApiFunction(Handle<HeapObject> function,
- Handle<Object> receiver,
- int argc,
- Handle<Object> args[]) {
- Isolate* isolate = function->GetIsolate();
- // Do proper receiver conversion for non-strict mode api functions.
- if (!receiver->IsJSReceiver()) {
- DCHECK(function->IsFunctionTemplateInfo() || function->IsJSFunction());
- if (function->IsFunctionTemplateInfo() ||
- is_sloppy(JSFunction::cast(*function)->shared()->language_mode())) {
- if (receiver->IsUndefined() || receiver->IsNull()) {
- receiver = handle(isolate->global_proxy(), isolate);
- } else {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, receiver,
- Object::ToObject(isolate, receiver), Object);
- }
- }
- }
- // Construct BuiltinArguments object: function, arguments reversed, receiver.
- const int kBufferSize = 32;
- Object* small_argv[kBufferSize];
- Object** argv;
- if (argc + 2 <= kBufferSize) {
- argv = small_argv;
- } else {
- argv = new Object* [argc + 2];
- }
- argv[argc + 1] = *receiver;
- for (int i = 0; i < argc; ++i) {
- argv[argc - i] = *args[i];
- }
- argv[0] = *function;
- MaybeHandle<Object> result;
- {
- RelocatableArguments arguments(isolate, argc + 2, &argv[argc + 1]);
- result = HandleApiCallHelper<false>(isolate, arguments);
- }
- if (argv != small_argv) {
- delete[] argv;
- }
- return result;
-}
-
-
-// Helper function to handle calls to non-function objects created through the
-// API. The object can be called as either a constructor (using new) or just as
-// a function (without new).
-MUST_USE_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
- Isolate* isolate, bool is_construct_call,
- BuiltinArguments<BuiltinExtraArguments::kNone> args) {
- Handle<Object> receiver = args.receiver();
-
- // Get the object called.
- JSObject* obj = JSObject::cast(*receiver);
-
- // Get the invocation callback from the function descriptor that was
- // used to create the called object.
- DCHECK(obj->map()->is_callable());
- JSFunction* constructor = JSFunction::cast(obj->map()->GetConstructor());
- // TODO(ishell): turn this back to a DCHECK.
- CHECK(constructor->shared()->IsApiFunction());
- Object* handler =
- constructor->shared()->get_api_func_data()->instance_call_handler();
- DCHECK(!handler->IsUndefined());
- // TODO(ishell): remove this debugging code.
- CHECK(handler->IsCallHandlerInfo());
- CallHandlerInfo* call_data = CallHandlerInfo::cast(handler);
- Object* callback_obj = call_data->callback();
- v8::FunctionCallback callback =
- v8::ToCData<v8::FunctionCallback>(callback_obj);
-
- // Get the data for the call and perform the callback.
- Object* result;
- {
- HandleScope scope(isolate);
- LOG(isolate, ApiObjectAccess("call non-function", obj));
-
- FunctionCallbackArguments custom(isolate,
- call_data->data(),
- constructor,
- obj,
- &args[0] - 1,
- args.length() - 1,
- is_construct_call);
- Handle<Object> result_handle = custom.Call(callback);
- if (result_handle.is_null()) {
- result = isolate->heap()->undefined_value();
- } else {
- result = *result_handle;
- }
- }
- // Check for exceptions and return result.
- RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
- return result;
-}
-
-
-// Handle calls to non-function objects created through the API. This delegate
-// function is used when the call is a normal function call.
-BUILTIN(HandleApiCallAsFunction) {
- return HandleApiCallAsFunctionOrConstructor(isolate, false, args);
-}
-
-
-// Handle calls to non-function objects created through the API. This delegate
-// function is used when the call is a construct call.
-BUILTIN(HandleApiCallAsConstructor) {
- return HandleApiCallAsFunctionOrConstructor(isolate, true, args);
-}
-
-
-static void Generate_LoadIC_Miss(MacroAssembler* masm) {
- LoadIC::GenerateMiss(masm);
-}
-
-
-static void Generate_LoadIC_Normal(MacroAssembler* masm) {
- LoadIC::GenerateNormal(masm);
-}
-
-
-static void Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
- NamedLoadHandlerCompiler::GenerateLoadViaGetterForDeopt(masm);
-}
-
-
-static void Generate_LoadIC_Slow(MacroAssembler* masm) {
- LoadIC::GenerateRuntimeGetProperty(masm);
-}
-
-
-static void Generate_KeyedLoadIC_Slow(MacroAssembler* masm) {
- KeyedLoadIC::GenerateRuntimeGetProperty(masm);
-}
-
-
-static void Generate_KeyedLoadIC_Miss(MacroAssembler* masm) {
- KeyedLoadIC::GenerateMiss(masm);
-}
-
-
-static void Generate_KeyedLoadIC_Megamorphic(MacroAssembler* masm) {
- KeyedLoadIC::GenerateMegamorphic(masm);
-}
-
-
-static void Generate_StoreIC_Miss(MacroAssembler* masm) {
- StoreIC::GenerateMiss(masm);
-}
-
-
-static void Generate_StoreIC_Normal(MacroAssembler* masm) {
- StoreIC::GenerateNormal(masm);
-}
-
-
-static void Generate_StoreIC_Slow(MacroAssembler* masm) {
- NamedStoreHandlerCompiler::GenerateSlow(masm);
-}
-
-
-static void Generate_KeyedStoreIC_Slow(MacroAssembler* masm) {
- ElementHandlerCompiler::GenerateStoreSlow(masm);
-}
-
-
-static void Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) {
- NamedStoreHandlerCompiler::GenerateStoreViaSetterForDeopt(masm);
-}
-
-
-static void Generate_KeyedStoreIC_Megamorphic(MacroAssembler* masm) {
- KeyedStoreIC::GenerateMegamorphic(masm, SLOPPY);
-}
-
-
-static void Generate_KeyedStoreIC_Megamorphic_Strict(MacroAssembler* masm) {
- KeyedStoreIC::GenerateMegamorphic(masm, STRICT);
-}
-
-
-static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
- KeyedStoreIC::GenerateMiss(masm);
-}
-
-
-static void Generate_KeyedStoreIC_Initialize(MacroAssembler* masm) {
- KeyedStoreIC::GenerateInitialize(masm);
-}
-
-
-static void Generate_KeyedStoreIC_Initialize_Strict(MacroAssembler* masm) {
- KeyedStoreIC::GenerateInitialize(masm);
-}
-
-
-static void Generate_KeyedStoreIC_PreMonomorphic(MacroAssembler* masm) {
- KeyedStoreIC::GeneratePreMonomorphic(masm);
-}
-
-
-static void Generate_KeyedStoreIC_PreMonomorphic_Strict(MacroAssembler* masm) {
- KeyedStoreIC::GeneratePreMonomorphic(masm);
-}
-
-
-static void Generate_Return_DebugBreak(MacroAssembler* masm) {
- DebugCodegen::GenerateDebugBreakStub(masm,
- DebugCodegen::SAVE_RESULT_REGISTER);
-}
-
-
-static void Generate_Slot_DebugBreak(MacroAssembler* masm) {
- DebugCodegen::GenerateDebugBreakStub(masm,
- DebugCodegen::IGNORE_RESULT_REGISTER);
-}
-
-
-static void Generate_FrameDropper_LiveEdit(MacroAssembler* masm) {
- DebugCodegen::GenerateFrameDropperLiveEdit(masm);
-}
-
-
-Builtins::Builtins() : initialized_(false) {
- memset(builtins_, 0, sizeof(builtins_[0]) * builtin_count);
- memset(names_, 0, sizeof(names_[0]) * builtin_count);
-}
-
-
-Builtins::~Builtins() {
-}
-
-
-#define DEF_ENUM_C(name, ignore) FUNCTION_ADDR(Builtin_##name),
-Address const Builtins::c_functions_[cfunction_count] = {
- BUILTIN_LIST_C(DEF_ENUM_C)
-};
-#undef DEF_ENUM_C
-
-
-struct BuiltinDesc {
- Handle<Code> (*builder)(Isolate*, struct BuiltinDesc const*);
- byte* generator;
- byte* c_code;
- const char* s_name; // name is only used for generating log information.
- int name;
- Code::Flags flags;
- BuiltinExtraArguments extra_args;
- int argc;
-};
-
-#define BUILTIN_FUNCTION_TABLE_INIT { V8_ONCE_INIT, {} }
-
-class BuiltinFunctionTable {
- public:
- BuiltinDesc* functions() {
- base::CallOnce(&once_, &Builtins::InitBuiltinFunctionTable);
- return functions_;
- }
-
- base::OnceType once_;
- BuiltinDesc functions_[Builtins::builtin_count + 1];
-
- friend class Builtins;
-};
-
-namespace {
-
-BuiltinFunctionTable builtin_function_table = BUILTIN_FUNCTION_TABLE_INIT;
-
-Handle<Code> MacroAssemblerBuilder(Isolate* isolate,
- BuiltinDesc const* builtin_desc) {
-// For now we generate builtin adaptor code into a stack-allocated
-// buffer, before copying it into individual code objects. Be careful
-// with alignment, some platforms don't like unaligned code.
-#ifdef DEBUG
- // We can generate a lot of debug code on Arm64.
- const size_t buffer_size = 32 * KB;
-#elif V8_TARGET_ARCH_PPC64
- // 8 KB is insufficient on PPC64 when FLAG_debug_code is on.
- const size_t buffer_size = 10 * KB;
-#else
- const size_t buffer_size = 8 * KB;
-#endif
- union {
- int force_alignment;
- byte buffer[buffer_size]; // NOLINT(runtime/arrays)
- } u;
-
- MacroAssembler masm(isolate, u.buffer, sizeof(u.buffer),
- CodeObjectRequired::kYes);
- // Generate the code/adaptor.
- typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments);
- Generator g = FUNCTION_CAST<Generator>(builtin_desc->generator);
- // We pass all arguments to the generator, but it may not use all of
- // them. This works because the first arguments are on top of the
- // stack.
- DCHECK(!masm.has_frame());
- g(&masm, builtin_desc->name, builtin_desc->extra_args);
- // Move the code into the object heap.
- CodeDesc desc;
- masm.GetCode(&desc);
- Code::Flags flags = builtin_desc->flags;
- return isolate->factory()->NewCode(desc, flags, masm.CodeObject());
-}
-
-Handle<Code> CodeStubAssemblerBuilder(Isolate* isolate,
- BuiltinDesc const* builtin_desc) {
- Zone zone(isolate->allocator());
- compiler::CodeStubAssembler assembler(isolate, &zone, builtin_desc->argc,
- builtin_desc->flags,
- builtin_desc->s_name);
- // Generate the code/adaptor.
- typedef void (*Generator)(compiler::CodeStubAssembler*);
- Generator g = FUNCTION_CAST<Generator>(builtin_desc->generator);
- g(&assembler);
- return assembler.GenerateCode();
-}
-
-} // namespace
-
-// Define array of pointers to generators and C builtin functions.
-// We do this in a sort of roundabout way so that we can do the initialization
-// within the lexical scope of Builtins:: and within a context where
-// Code::Flags names a non-abstract type.
-void Builtins::InitBuiltinFunctionTable() {
- BuiltinDesc* functions = builtin_function_table.functions_;
- functions[builtin_count].builder = nullptr;
- functions[builtin_count].generator = nullptr;
- functions[builtin_count].c_code = nullptr;
- functions[builtin_count].s_name = nullptr;
- functions[builtin_count].name = builtin_count;
- functions[builtin_count].flags = static_cast<Code::Flags>(0);
- functions[builtin_count].extra_args = BuiltinExtraArguments::kNone;
- functions[builtin_count].argc = 0;
-
-#define DEF_FUNCTION_PTR_C(aname, aextra_args) \
- functions->builder = &MacroAssemblerBuilder; \
- functions->generator = FUNCTION_ADDR(Generate_Adaptor); \
- functions->c_code = FUNCTION_ADDR(Builtin_##aname); \
- functions->s_name = #aname; \
- functions->name = c_##aname; \
- functions->flags = Code::ComputeFlags(Code::BUILTIN); \
- functions->extra_args = BuiltinExtraArguments::aextra_args; \
- functions->argc = 0; \
- ++functions;
-
-#define DEF_FUNCTION_PTR_A(aname, kind, state, extra) \
- functions->builder = &MacroAssemblerBuilder; \
- functions->generator = FUNCTION_ADDR(Generate_##aname); \
- functions->c_code = NULL; \
- functions->s_name = #aname; \
- functions->name = k##aname; \
- functions->flags = Code::ComputeFlags(Code::kind, state, extra); \
- functions->extra_args = BuiltinExtraArguments::kNone; \
- functions->argc = 0; \
- ++functions;
-
-#define DEF_FUNCTION_PTR_T(aname, aargc) \
- functions->builder = &CodeStubAssemblerBuilder; \
- functions->generator = FUNCTION_ADDR(Generate_##aname); \
- functions->c_code = NULL; \
- functions->s_name = #aname; \
- functions->name = k##aname; \
- functions->flags = \
- Code::ComputeFlags(Code::BUILTIN, UNINITIALIZED, kNoExtraICState); \
- functions->extra_args = BuiltinExtraArguments::kNone; \
- functions->argc = aargc; \
- ++functions;
-
-#define DEF_FUNCTION_PTR_H(aname, kind) \
- functions->builder = &MacroAssemblerBuilder; \
- functions->generator = FUNCTION_ADDR(Generate_##aname); \
- functions->c_code = NULL; \
- functions->s_name = #aname; \
- functions->name = k##aname; \
- functions->flags = Code::ComputeHandlerFlags(Code::kind); \
- functions->extra_args = BuiltinExtraArguments::kNone; \
- functions->argc = 0; \
- ++functions;
-
- BUILTIN_LIST_C(DEF_FUNCTION_PTR_C)
- BUILTIN_LIST_A(DEF_FUNCTION_PTR_A)
- BUILTIN_LIST_T(DEF_FUNCTION_PTR_T)
- BUILTIN_LIST_H(DEF_FUNCTION_PTR_H)
- BUILTIN_LIST_DEBUG_A(DEF_FUNCTION_PTR_A)
-
-#undef DEF_FUNCTION_PTR_C
-#undef DEF_FUNCTION_PTR_A
-#undef DEF_FUNCTION_PTR_H
-#undef DEF_FUNCTION_PTR_T
-}
-
-
-void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
- DCHECK(!initialized_);
-
- // Create a scope for the handles in the builtins.
- HandleScope scope(isolate);
-
- const BuiltinDesc* functions = builtin_function_table.functions();
-
- // Traverse the list of builtins and generate an adaptor in a
- // separate code object for each one.
- for (int i = 0; i < builtin_count; i++) {
- if (create_heap_objects) {
- Handle<Code> code = (*functions[i].builder)(isolate, functions + i);
- // Log the event and add the code to the builtins array.
- PROFILE(isolate,
- CodeCreateEvent(Logger::BUILTIN_TAG, AbstractCode::cast(*code),
- functions[i].s_name));
- builtins_[i] = *code;
- code->set_builtin_index(i);
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_builtin_code) {
- CodeTracer::Scope trace_scope(isolate->GetCodeTracer());
- OFStream os(trace_scope.file());
- os << "Builtin: " << functions[i].s_name << "\n";
- code->Disassemble(functions[i].s_name, os);
- os << "\n";
- }
-#endif
- } else {
- // Deserializing. The values will be filled in during IterateBuiltins.
- builtins_[i] = NULL;
- }
- names_[i] = functions[i].s_name;
- }
-
- // Mark as initialized.
- initialized_ = true;
-}
-
-
-void Builtins::TearDown() {
- initialized_ = false;
-}
-
-
-void Builtins::IterateBuiltins(ObjectVisitor* v) {
- v->VisitPointers(&builtins_[0], &builtins_[0] + builtin_count);
-}
-
-
-const char* Builtins::Lookup(byte* pc) {
- // may be called during initialization (disassembler!)
- if (initialized_) {
- for (int i = 0; i < builtin_count; i++) {
- Code* entry = Code::cast(builtins_[i]);
- if (entry->contains(pc)) {
- return names_[i];
- }
- }
- }
- return NULL;
-}
-
-
-void Builtins::Generate_InterruptCheck(MacroAssembler* masm) {
- masm->TailCallRuntime(Runtime::kInterrupt);
-}
-
-
-void Builtins::Generate_StackCheck(MacroAssembler* masm) {
- masm->TailCallRuntime(Runtime::kStackGuard);
-}
-
-
-#define DEFINE_BUILTIN_ACCESSOR_C(name, ignore) \
-Handle<Code> Builtins::name() { \
- Code** code_address = \
- reinterpret_cast<Code**>(builtin_address(k##name)); \
- return Handle<Code>(code_address); \
-}
-#define DEFINE_BUILTIN_ACCESSOR_A(name, kind, state, extra) \
-Handle<Code> Builtins::name() { \
- Code** code_address = \
- reinterpret_cast<Code**>(builtin_address(k##name)); \
- return Handle<Code>(code_address); \
-}
-#define DEFINE_BUILTIN_ACCESSOR_T(name, argc) \
- Handle<Code> Builtins::name() { \
- Code** code_address = reinterpret_cast<Code**>(builtin_address(k##name)); \
- return Handle<Code>(code_address); \
- }
-#define DEFINE_BUILTIN_ACCESSOR_H(name, kind) \
-Handle<Code> Builtins::name() { \
- Code** code_address = \
- reinterpret_cast<Code**>(builtin_address(k##name)); \
- return Handle<Code>(code_address); \
-}
-BUILTIN_LIST_C(DEFINE_BUILTIN_ACCESSOR_C)
-BUILTIN_LIST_A(DEFINE_BUILTIN_ACCESSOR_A)
-BUILTIN_LIST_T(DEFINE_BUILTIN_ACCESSOR_T)
-BUILTIN_LIST_H(DEFINE_BUILTIN_ACCESSOR_H)
-BUILTIN_LIST_DEBUG_A(DEFINE_BUILTIN_ACCESSOR_A)
-#undef DEFINE_BUILTIN_ACCESSOR_C
-#undef DEFINE_BUILTIN_ACCESSOR_A
-#undef DEFINE_BUILTIN_ACCESSOR_T
-#undef DEFINE_BUILTIN_ACCESSOR_H
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
deleted file mode 100644
index 221d06f30f..0000000000
--- a/deps/v8/src/builtins.h
+++ /dev/null
@@ -1,671 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_BUILTINS_H_
-#define V8_BUILTINS_H_
-
-#include "src/base/flags.h"
-#include "src/handles.h"
-
-namespace v8 {
-namespace internal {
-
-namespace compiler {
-
-// Forward declarations.
-class CodeStubAssembler;
-
-} // namespace compiler
-
-// Specifies extra arguments required by a C++ builtin.
-enum class BuiltinExtraArguments : uint8_t {
- kNone = 0u,
- kTarget = 1u << 0,
- kNewTarget = 1u << 1,
- kTargetAndNewTarget = kTarget | kNewTarget
-};
-
-inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
- return static_cast<uint8_t>(lhs) & static_cast<uint8_t>(rhs);
-}
-
-
-#define CODE_AGE_LIST_WITH_ARG(V, A) \
- V(Quadragenarian, A) \
- V(Quinquagenarian, A) \
- V(Sexagenarian, A) \
- V(Septuagenarian, A) \
- V(Octogenarian, A)
-
-#define CODE_AGE_LIST_IGNORE_ARG(X, V) V(X)
-
-#define CODE_AGE_LIST(V) \
- CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V)
-
-#define CODE_AGE_LIST_COMPLETE(V) \
- V(ToBeExecutedOnce) \
- V(NotExecuted) \
- V(ExecutedOnce) \
- V(NoAge) \
- CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V)
-
-#define DECLARE_CODE_AGE_BUILTIN(C, V) \
- V(Make##C##CodeYoungAgainOddMarking, BUILTIN, \
- UNINITIALIZED, kNoExtraICState) \
- V(Make##C##CodeYoungAgainEvenMarking, BUILTIN, \
- UNINITIALIZED, kNoExtraICState)
-
-
-// Define list of builtins implemented in C++.
-#define BUILTIN_LIST_C(V) \
- V(Illegal, kNone) \
- \
- V(EmptyFunction, kNone) \
- \
- V(ArrayConcat, kNone) \
- V(ArrayIsArray, kNone) \
- V(ArrayPop, kNone) \
- V(ArrayPush, kNone) \
- V(ArrayShift, kNone) \
- V(ArraySlice, kNone) \
- V(ArraySplice, kNone) \
- V(ArrayUnshift, kNone) \
- \
- V(ArrayBufferConstructor, kTarget) \
- V(ArrayBufferConstructor_ConstructStub, kTargetAndNewTarget) \
- V(ArrayBufferIsView, kNone) \
- \
- V(BooleanConstructor, kNone) \
- V(BooleanConstructor_ConstructStub, kTargetAndNewTarget) \
- V(BooleanPrototypeToString, kNone) \
- V(BooleanPrototypeValueOf, kNone) \
- \
- V(DataViewConstructor, kNone) \
- V(DataViewConstructor_ConstructStub, kTargetAndNewTarget) \
- \
- V(DateConstructor, kNone) \
- V(DateConstructor_ConstructStub, kTargetAndNewTarget) \
- V(DateNow, kNone) \
- V(DateParse, kNone) \
- V(DateUTC, kNone) \
- V(DatePrototypeSetDate, kNone) \
- V(DatePrototypeSetFullYear, kNone) \
- V(DatePrototypeSetHours, kNone) \
- V(DatePrototypeSetMilliseconds, kNone) \
- V(DatePrototypeSetMinutes, kNone) \
- V(DatePrototypeSetMonth, kNone) \
- V(DatePrototypeSetSeconds, kNone) \
- V(DatePrototypeSetTime, kNone) \
- V(DatePrototypeSetUTCDate, kNone) \
- V(DatePrototypeSetUTCFullYear, kNone) \
- V(DatePrototypeSetUTCHours, kNone) \
- V(DatePrototypeSetUTCMilliseconds, kNone) \
- V(DatePrototypeSetUTCMinutes, kNone) \
- V(DatePrototypeSetUTCMonth, kNone) \
- V(DatePrototypeSetUTCSeconds, kNone) \
- V(DatePrototypeToDateString, kNone) \
- V(DatePrototypeToISOString, kNone) \
- V(DatePrototypeToPrimitive, kNone) \
- V(DatePrototypeToUTCString, kNone) \
- V(DatePrototypeToString, kNone) \
- V(DatePrototypeToTimeString, kNone) \
- V(DatePrototypeValueOf, kNone) \
- V(DatePrototypeGetYear, kNone) \
- V(DatePrototypeSetYear, kNone) \
- \
- V(FunctionConstructor, kTargetAndNewTarget) \
- V(FunctionPrototypeBind, kNone) \
- V(FunctionPrototypeToString, kNone) \
- \
- V(GeneratorFunctionConstructor, kTargetAndNewTarget) \
- \
- V(GlobalEval, kTarget) \
- \
- V(MathAcos, kNone) \
- V(MathAsin, kNone) \
- V(MathAtan, kNone) \
- V(MathFround, kNone) \
- V(MathImul, kNone) \
- \
- V(ObjectAssign, kNone) \
- V(ObjectCreate, kNone) \
- V(ObjectFreeze, kNone) \
- V(ObjectGetOwnPropertyDescriptor, kNone) \
- V(ObjectGetOwnPropertyNames, kNone) \
- V(ObjectGetOwnPropertySymbols, kNone) \
- V(ObjectIs, kNone) \
- V(ObjectIsExtensible, kNone) \
- V(ObjectIsFrozen, kNone) \
- V(ObjectIsSealed, kNone) \
- V(ObjectKeys, kNone) \
- V(ObjectValues, kNone) \
- V(ObjectEntries, kNone) \
- V(ObjectGetOwnPropertyDescriptors, kNone) \
- V(ObjectPreventExtensions, kNone) \
- V(ObjectSeal, kNone) \
- V(ObjectProtoToString, kNone) \
- \
- V(ProxyConstructor, kNone) \
- V(ProxyConstructor_ConstructStub, kTarget) \
- \
- V(ReflectDefineProperty, kNone) \
- V(ReflectDeleteProperty, kNone) \
- V(ReflectGet, kNone) \
- V(ReflectGetOwnPropertyDescriptor, kNone) \
- V(ReflectGetPrototypeOf, kNone) \
- V(ReflectHas, kNone) \
- V(ReflectIsExtensible, kNone) \
- V(ReflectOwnKeys, kNone) \
- V(ReflectPreventExtensions, kNone) \
- V(ReflectSet, kNone) \
- V(ReflectSetPrototypeOf, kNone) \
- \
- V(StringFromCharCode, kNone) \
- \
- V(SymbolConstructor, kNone) \
- V(SymbolConstructor_ConstructStub, kTarget) \
- \
- V(HandleApiCall, kTarget) \
- V(HandleApiCallConstruct, kTarget) \
- V(HandleApiCallAsFunction, kNone) \
- V(HandleApiCallAsConstructor, kNone) \
- \
- V(RestrictedFunctionPropertiesThrower, kNone) \
- V(RestrictedStrictArgumentsPropertiesThrower, kNone)
-
-// Define list of builtins implemented in assembly.
-#define BUILTIN_LIST_A(V) \
- V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(ConstructedNonConstructable, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(CallFunction_ReceiverIsNullOrUndefined, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(CallFunction_ReceiverIsNotNullOrUndefined, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(CallFunction_ReceiverIsAny, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(TailCallFunction_ReceiverIsNullOrUndefined, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(TailCallFunction_ReceiverIsNotNullOrUndefined, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(TailCallFunction_ReceiverIsAny, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(CallBoundFunction, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(TailCallBoundFunction, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(Call_ReceiverIsNullOrUndefined, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(Call_ReceiverIsNotNullOrUndefined, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(Call_ReceiverIsAny, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(TailCall_ReceiverIsNullOrUndefined, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(TailCall_ReceiverIsNotNullOrUndefined, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(TailCall_ReceiverIsAny, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(ConstructFunction, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(ConstructBoundFunction, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(ConstructProxy, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(Construct, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(Apply, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(HandleFastApiCall, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(InOptimizationQueue, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(JSBuiltinsConstructStub, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(JSBuiltinsConstructStubForDerived, BUILTIN, UNINITIALIZED, \
- kNoExtraICState) \
- V(JSConstructStubApi, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(CompileLazy, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(CompileOptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(CompileOptimizedConcurrent, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(NotifySoftDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(NotifyStubFailure, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(NotifyStubFailureSaveDoubles, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(InterpreterEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(InterpreterExitTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(InterpreterPushArgsAndCall, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(InterpreterPushArgsAndTailCall, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(InterpreterPushArgsAndConstruct, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(InterpreterNotifyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(InterpreterNotifySoftDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(InterpreterNotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(InterpreterEnterBytecodeDispatch, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(LoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(StoreIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, kNoExtraICState) \
- V(KeyedLoadIC_Megamorphic, KEYED_LOAD_IC, MEGAMORPHIC, kNoExtraICState) \
- \
- V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, \
- StoreICState::kStrictModeState) \
- \
- V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, kNoExtraICState) \
- V(KeyedStoreIC_PreMonomorphic, KEYED_STORE_IC, PREMONOMORPHIC, \
- kNoExtraICState) \
- V(KeyedStoreIC_Megamorphic, KEYED_STORE_IC, MEGAMORPHIC, kNoExtraICState) \
- \
- V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \
- StoreICState::kStrictModeState) \
- V(KeyedStoreIC_PreMonomorphic_Strict, KEYED_STORE_IC, PREMONOMORPHIC, \
- StoreICState::kStrictModeState) \
- V(KeyedStoreIC_Megamorphic_Strict, KEYED_STORE_IC, MEGAMORPHIC, \
- StoreICState::kStrictModeState) \
- \
- V(DatePrototypeGetDate, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(DatePrototypeGetDay, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(DatePrototypeGetFullYear, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(DatePrototypeGetHours, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(DatePrototypeGetMilliseconds, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(DatePrototypeGetMinutes, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(DatePrototypeGetMonth, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(DatePrototypeGetSeconds, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(DatePrototypeGetTime, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(DatePrototypeGetTimezoneOffset, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(DatePrototypeGetUTCDate, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(DatePrototypeGetUTCDay, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(DatePrototypeGetUTCFullYear, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(DatePrototypeGetUTCHours, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(DatePrototypeGetUTCMilliseconds, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(DatePrototypeGetUTCMinutes, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(DatePrototypeGetUTCMonth, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(DatePrototypeGetUTCSeconds, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(FunctionHasInstance, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(FunctionPrototypeApply, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(FunctionPrototypeCall, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(ReflectApply, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(ReflectConstruct, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(InternalArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(ArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(MathMax, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(MathMin, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(NumberConstructor, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(NumberConstructor_ConstructStub, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(StringConstructor, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(StringConstructor_ConstructStub, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(OnStackReplacement, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(InterruptCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(StackCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(MarkCodeAsToBeExecutedOnce, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(MarkCodeAsExecutedOnce, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(MarkCodeAsExecutedTwice, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
-
-// Define list of builtins implemented in TurboFan (with JS linkage).
-#define BUILTIN_LIST_T(V) \
- V(MathCeil, 2) \
- V(MathClz32, 2) \
- V(MathFloor, 2) \
- V(MathRound, 2) \
- V(MathSqrt, 2) \
- V(MathTrunc, 2) \
- V(ObjectHasOwnProperty, 2)
-
-// Define list of builtin handlers implemented in assembly.
-#define BUILTIN_LIST_H(V) \
- V(LoadIC_Slow, LOAD_IC) \
- V(KeyedLoadIC_Slow, KEYED_LOAD_IC) \
- V(StoreIC_Slow, STORE_IC) \
- V(KeyedStoreIC_Slow, KEYED_STORE_IC) \
- V(LoadIC_Normal, LOAD_IC) \
- V(StoreIC_Normal, STORE_IC)
-
-// Define list of builtins used by the debugger implemented in assembly.
-#define BUILTIN_LIST_DEBUG_A(V) \
- V(Return_DebugBreak, BUILTIN, DEBUG_STUB, kNoExtraICState) \
- V(Slot_DebugBreak, BUILTIN, DEBUG_STUB, kNoExtraICState) \
- V(FrameDropper_LiveEdit, BUILTIN, DEBUG_STUB, kNoExtraICState)
-
-
-class BuiltinFunctionTable;
-class ObjectVisitor;
-
-
-class Builtins {
- public:
- ~Builtins();
-
- // Generate all builtin code objects. Should be called once during
- // isolate initialization.
- void SetUp(Isolate* isolate, bool create_heap_objects);
- void TearDown();
-
- // Garbage collection support.
- void IterateBuiltins(ObjectVisitor* v);
-
- // Disassembler support.
- const char* Lookup(byte* pc);
-
- enum Name {
-#define DEF_ENUM_C(name, ignore) k##name,
-#define DEF_ENUM_A(name, kind, state, extra) k##name,
-#define DEF_ENUM_T(name, argc) k##name,
-#define DEF_ENUM_H(name, kind) k##name,
- BUILTIN_LIST_C(DEF_ENUM_C) BUILTIN_LIST_A(DEF_ENUM_A)
- BUILTIN_LIST_T(DEF_ENUM_T) BUILTIN_LIST_H(DEF_ENUM_H)
- BUILTIN_LIST_DEBUG_A(DEF_ENUM_A)
-#undef DEF_ENUM_C
-#undef DEF_ENUM_A
-#undef DEF_ENUM_T
-#undef DEF_ENUM_H
- builtin_count
- };
-
- enum CFunctionId {
-#define DEF_ENUM_C(name, ignore) c_##name,
- BUILTIN_LIST_C(DEF_ENUM_C)
-#undef DEF_ENUM_C
- cfunction_count
- };
-
-#define DECLARE_BUILTIN_ACCESSOR_C(name, ignore) Handle<Code> name();
-#define DECLARE_BUILTIN_ACCESSOR_A(name, kind, state, extra) \
- Handle<Code> name();
-#define DECLARE_BUILTIN_ACCESSOR_T(name, argc) Handle<Code> name();
-#define DECLARE_BUILTIN_ACCESSOR_H(name, kind) Handle<Code> name();
- BUILTIN_LIST_C(DECLARE_BUILTIN_ACCESSOR_C)
- BUILTIN_LIST_A(DECLARE_BUILTIN_ACCESSOR_A)
- BUILTIN_LIST_T(DECLARE_BUILTIN_ACCESSOR_T)
- BUILTIN_LIST_H(DECLARE_BUILTIN_ACCESSOR_H)
- BUILTIN_LIST_DEBUG_A(DECLARE_BUILTIN_ACCESSOR_A)
-#undef DECLARE_BUILTIN_ACCESSOR_C
-#undef DECLARE_BUILTIN_ACCESSOR_A
-#undef DECLARE_BUILTIN_ACCESSOR_T
-#undef DECLARE_BUILTIN_ACCESSOR_H
-
- // Convenience wrappers.
- Handle<Code> CallFunction(
- ConvertReceiverMode = ConvertReceiverMode::kAny,
- TailCallMode tail_call_mode = TailCallMode::kDisallow);
- Handle<Code> Call(ConvertReceiverMode = ConvertReceiverMode::kAny,
- TailCallMode tail_call_mode = TailCallMode::kDisallow);
- Handle<Code> CallBoundFunction(TailCallMode tail_call_mode);
- Handle<Code> InterpreterPushArgsAndCall(TailCallMode tail_call_mode);
-
- Code* builtin(Name name) {
- // Code::cast cannot be used here since we access builtins
- // during the marking phase of mark sweep. See IC::Clear.
- return reinterpret_cast<Code*>(builtins_[name]);
- }
-
- Address builtin_address(Name name) {
- return reinterpret_cast<Address>(&builtins_[name]);
- }
-
- static Address c_function_address(CFunctionId id) {
- return c_functions_[id];
- }
-
- const char* name(int index) {
- DCHECK(index >= 0);
- DCHECK(index < builtin_count);
- return names_[index];
- }
-
- bool is_initialized() const { return initialized_; }
-
- MUST_USE_RESULT static MaybeHandle<Object> InvokeApiFunction(
- Handle<HeapObject> function, Handle<Object> receiver, int argc,
- Handle<Object> args[]);
-
- private:
- Builtins();
-
- // The external C++ functions called from the code.
- static Address const c_functions_[cfunction_count];
-
- // Note: These are always Code objects, but to conform with
- // IterateBuiltins() above which assumes Object**'s for the callback
- // function f, we use an Object* array here.
- Object* builtins_[builtin_count];
- const char* names_[builtin_count];
-
- static void Generate_Adaptor(MacroAssembler* masm,
- CFunctionId id,
- BuiltinExtraArguments extra_args);
- static void Generate_ConstructedNonConstructable(MacroAssembler* masm);
- static void Generate_CompileLazy(MacroAssembler* masm);
- static void Generate_InOptimizationQueue(MacroAssembler* masm);
- static void Generate_CompileOptimized(MacroAssembler* masm);
- static void Generate_CompileOptimizedConcurrent(MacroAssembler* masm);
- static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
- static void Generate_JSBuiltinsConstructStub(MacroAssembler* masm);
- static void Generate_JSBuiltinsConstructStubForDerived(MacroAssembler* masm);
- static void Generate_JSConstructStubApi(MacroAssembler* masm);
- static void Generate_JSEntryTrampoline(MacroAssembler* masm);
- static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm);
- static void Generate_NotifyDeoptimized(MacroAssembler* masm);
- static void Generate_NotifySoftDeoptimized(MacroAssembler* masm);
- static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
- static void Generate_NotifyStubFailure(MacroAssembler* masm);
- static void Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm);
- static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
-
- static void Generate_Apply(MacroAssembler* masm);
-
- // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
- static void Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode,
- TailCallMode tail_call_mode);
- static void Generate_CallFunction_ReceiverIsNullOrUndefined(
- MacroAssembler* masm) {
- Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined,
- TailCallMode::kDisallow);
- }
- static void Generate_CallFunction_ReceiverIsNotNullOrUndefined(
- MacroAssembler* masm) {
- Generate_CallFunction(masm, ConvertReceiverMode::kNotNullOrUndefined,
- TailCallMode::kDisallow);
- }
- static void Generate_CallFunction_ReceiverIsAny(MacroAssembler* masm) {
- Generate_CallFunction(masm, ConvertReceiverMode::kAny,
- TailCallMode::kDisallow);
- }
- static void Generate_TailCallFunction_ReceiverIsNullOrUndefined(
- MacroAssembler* masm) {
- Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined,
- TailCallMode::kAllow);
- }
- static void Generate_TailCallFunction_ReceiverIsNotNullOrUndefined(
- MacroAssembler* masm) {
- Generate_CallFunction(masm, ConvertReceiverMode::kNotNullOrUndefined,
- TailCallMode::kAllow);
- }
- static void Generate_TailCallFunction_ReceiverIsAny(MacroAssembler* masm) {
- Generate_CallFunction(masm, ConvertReceiverMode::kAny,
- TailCallMode::kAllow);
- }
- // ES6 section 9.4.1.1 [[Call]] ( thisArgument, argumentsList)
- static void Generate_CallBoundFunctionImpl(MacroAssembler* masm,
- TailCallMode tail_call_mode);
- static void Generate_CallBoundFunction(MacroAssembler* masm) {
- Generate_CallBoundFunctionImpl(masm, TailCallMode::kDisallow);
- }
- static void Generate_TailCallBoundFunction(MacroAssembler* masm) {
- Generate_CallBoundFunctionImpl(masm, TailCallMode::kAllow);
- }
- // ES6 section 7.3.12 Call(F, V, [argumentsList])
- static void Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
- TailCallMode tail_call_mode);
- static void Generate_Call_ReceiverIsNullOrUndefined(MacroAssembler* masm) {
- Generate_Call(masm, ConvertReceiverMode::kNullOrUndefined,
- TailCallMode::kDisallow);
- }
- static void Generate_Call_ReceiverIsNotNullOrUndefined(MacroAssembler* masm) {
- Generate_Call(masm, ConvertReceiverMode::kNotNullOrUndefined,
- TailCallMode::kDisallow);
- }
- static void Generate_Call_ReceiverIsAny(MacroAssembler* masm) {
- Generate_Call(masm, ConvertReceiverMode::kAny, TailCallMode::kDisallow);
- }
- static void Generate_TailCall_ReceiverIsNullOrUndefined(
- MacroAssembler* masm) {
- Generate_Call(masm, ConvertReceiverMode::kNullOrUndefined,
- TailCallMode::kAllow);
- }
- static void Generate_TailCall_ReceiverIsNotNullOrUndefined(
- MacroAssembler* masm) {
- Generate_Call(masm, ConvertReceiverMode::kNotNullOrUndefined,
- TailCallMode::kAllow);
- }
- static void Generate_TailCall_ReceiverIsAny(MacroAssembler* masm) {
- Generate_Call(masm, ConvertReceiverMode::kAny, TailCallMode::kAllow);
- }
-
- // ES6 section 9.2.2 [[Construct]] ( argumentsList, newTarget)
- static void Generate_ConstructFunction(MacroAssembler* masm);
- // ES6 section 9.4.1.2 [[Construct]] (argumentsList, newTarget)
- static void Generate_ConstructBoundFunction(MacroAssembler* masm);
- // ES6 section 9.5.14 [[Construct]] ( argumentsList, newTarget)
- static void Generate_ConstructProxy(MacroAssembler* masm);
- // ES6 section 7.3.13 Construct (F, [argumentsList], [newTarget])
- static void Generate_Construct(MacroAssembler* masm);
-
- static void Generate_HandleFastApiCall(MacroAssembler* masm);
-
- static void Generate_DatePrototype_GetField(MacroAssembler* masm,
- int field_index);
- // ES6 section 20.3.4.2 Date.prototype.getDate ( )
- static void Generate_DatePrototypeGetDate(MacroAssembler* masm);
- // ES6 section 20.3.4.3 Date.prototype.getDay ( )
- static void Generate_DatePrototypeGetDay(MacroAssembler* masm);
- // ES6 section 20.3.4.4 Date.prototype.getFullYear ( )
- static void Generate_DatePrototypeGetFullYear(MacroAssembler* masm);
- // ES6 section 20.3.4.5 Date.prototype.getHours ( )
- static void Generate_DatePrototypeGetHours(MacroAssembler* masm);
- // ES6 section 20.3.4.6 Date.prototype.getMilliseconds ( )
- static void Generate_DatePrototypeGetMilliseconds(MacroAssembler* masm);
- // ES6 section 20.3.4.7 Date.prototype.getMinutes ( )
- static void Generate_DatePrototypeGetMinutes(MacroAssembler* masm);
- // ES6 section 20.3.4.8 Date.prototype.getMonth ( )
- static void Generate_DatePrototypeGetMonth(MacroAssembler* masm);
- // ES6 section 20.3.4.9 Date.prototype.getSeconds ( )
- static void Generate_DatePrototypeGetSeconds(MacroAssembler* masm);
- // ES6 section 20.3.4.10 Date.prototype.getTime ( )
- static void Generate_DatePrototypeGetTime(MacroAssembler* masm);
- // ES6 section 20.3.4.11 Date.prototype.getTimezoneOffset ( )
- static void Generate_DatePrototypeGetTimezoneOffset(MacroAssembler* masm);
- // ES6 section 20.3.4.12 Date.prototype.getUTCDate ( )
- static void Generate_DatePrototypeGetUTCDate(MacroAssembler* masm);
- // ES6 section 20.3.4.13 Date.prototype.getUTCDay ( )
- static void Generate_DatePrototypeGetUTCDay(MacroAssembler* masm);
- // ES6 section 20.3.4.14 Date.prototype.getUTCFullYear ( )
- static void Generate_DatePrototypeGetUTCFullYear(MacroAssembler* masm);
- // ES6 section 20.3.4.15 Date.prototype.getUTCHours ( )
- static void Generate_DatePrototypeGetUTCHours(MacroAssembler* masm);
- // ES6 section 20.3.4.16 Date.prototype.getUTCMilliseconds ( )
- static void Generate_DatePrototypeGetUTCMilliseconds(MacroAssembler* masm);
- // ES6 section 20.3.4.17 Date.prototype.getUTCMinutes ( )
- static void Generate_DatePrototypeGetUTCMinutes(MacroAssembler* masm);
- // ES6 section 20.3.4.18 Date.prototype.getUTCMonth ( )
- static void Generate_DatePrototypeGetUTCMonth(MacroAssembler* masm);
- // ES6 section 20.3.4.19 Date.prototype.getUTCSeconds ( )
- static void Generate_DatePrototypeGetUTCSeconds(MacroAssembler* masm);
-
- static void Generate_FunctionHasInstance(MacroAssembler* masm);
- static void Generate_FunctionPrototypeApply(MacroAssembler* masm);
- static void Generate_FunctionPrototypeCall(MacroAssembler* masm);
-
- static void Generate_ReflectApply(MacroAssembler* masm);
- static void Generate_ReflectConstruct(MacroAssembler* masm);
-
- static void Generate_InternalArrayCode(MacroAssembler* masm);
- static void Generate_ArrayCode(MacroAssembler* masm);
-
- // ES6 section 20.2.2.10 Math.ceil ( x )
- static void Generate_MathCeil(compiler::CodeStubAssembler* assembler);
- // ES6 section 20.2.2.11 Math.clz32 ( x )
- static void Generate_MathClz32(compiler::CodeStubAssembler* assembler);
- // ES6 section 20.2.2.16 Math.floor ( x )
- static void Generate_MathFloor(compiler::CodeStubAssembler* assembler);
- enum class MathMaxMinKind { kMax, kMin };
- static void Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind);
- // ES6 section 20.2.2.24 Math.max ( value1, value2 , ...values )
- static void Generate_MathMax(MacroAssembler* masm) {
- Generate_MathMaxMin(masm, MathMaxMinKind::kMax);
- }
- // ES6 section 20.2.2.25 Math.min ( value1, value2 , ...values )
- static void Generate_MathMin(MacroAssembler* masm) {
- Generate_MathMaxMin(masm, MathMaxMinKind::kMin);
- }
- // ES6 section 20.2.2.28 Math.round ( x )
- static void Generate_MathRound(compiler::CodeStubAssembler* assembler);
- // ES6 section 20.2.2.32 Math.sqrt ( x )
- static void Generate_MathSqrt(compiler::CodeStubAssembler* assembler);
- // ES6 section 20.2.2.35 Math.trunc ( x )
- static void Generate_MathTrunc(compiler::CodeStubAssembler* assembler);
-
- // ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Call]] case.
- static void Generate_NumberConstructor(MacroAssembler* masm);
- // ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Construct]] case.
- static void Generate_NumberConstructor_ConstructStub(MacroAssembler* masm);
-
- // ES6 section 19.1.3.2 Object.prototype.hasOwnProperty
- static void Generate_ObjectHasOwnProperty(
- compiler::CodeStubAssembler* assembler);
-
- static void Generate_StringConstructor(MacroAssembler* masm);
- static void Generate_StringConstructor_ConstructStub(MacroAssembler* masm);
- static void Generate_OnStackReplacement(MacroAssembler* masm);
- static void Generate_InterruptCheck(MacroAssembler* masm);
- static void Generate_StackCheck(MacroAssembler* masm);
-
- static void Generate_InterpreterEntryTrampoline(MacroAssembler* masm);
- static void Generate_InterpreterExitTrampoline(MacroAssembler* masm);
- static void Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
- return Generate_InterpreterPushArgsAndCallImpl(masm,
- TailCallMode::kDisallow);
- }
- static void Generate_InterpreterPushArgsAndTailCall(MacroAssembler* masm) {
- return Generate_InterpreterPushArgsAndCallImpl(masm, TailCallMode::kAllow);
- }
- static void Generate_InterpreterPushArgsAndCallImpl(
- MacroAssembler* masm, TailCallMode tail_call_mode);
- static void Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm);
- static void Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm);
- static void Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm);
- static void Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm);
- static void Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm);
-
-#define DECLARE_CODE_AGE_BUILTIN_GENERATOR(C) \
- static void Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm); \
- static void Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm);
- CODE_AGE_LIST(DECLARE_CODE_AGE_BUILTIN_GENERATOR)
-#undef DECLARE_CODE_AGE_BUILTIN_GENERATOR
-
- static void Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm);
- static void Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm);
- static void Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm);
-
- static void InitBuiltinFunctionTable();
-
- bool initialized_;
-
- friend class BuiltinFunctionTable;
- friend class Isolate;
-
- DISALLOW_COPY_AND_ASSIGN(Builtins);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_BUILTINS_H_
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 1fffcb67e5..1b643d437b 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -13,13 +13,10 @@
namespace v8 {
namespace internal {
-
#define __ ACCESS_MASM(masm)
-
-void Builtins::Generate_Adaptor(MacroAssembler* masm,
- CFunctionId id,
- BuiltinExtraArguments extra_args) {
+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
+ ExitFrameType exit_frame_type) {
// ----------- S t a t e -------------
// -- r0 : number of arguments excluding receiver
// -- r1 : target
@@ -37,32 +34,19 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// ordinary functions).
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- // Insert extra arguments.
- int num_extra_args = 0;
- switch (extra_args) {
- case BuiltinExtraArguments::kTarget:
- __ Push(r1);
- ++num_extra_args;
- break;
- case BuiltinExtraArguments::kNewTarget:
- __ Push(r3);
- ++num_extra_args;
- break;
- case BuiltinExtraArguments::kTargetAndNewTarget:
- __ Push(r1, r3);
- num_extra_args += 2;
- break;
- case BuiltinExtraArguments::kNone:
- break;
- }
-
// JumpToExternalReference expects r0 to contain the number of arguments
// including the receiver and the extra arguments.
+ const int num_extra_args = 3;
__ add(r0, r0, Operand(num_extra_args + 1));
- __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
-}
+ // Insert extra arguments.
+ __ SmiTag(r0);
+ __ Push(r0, r1, r3);
+ __ SmiUntag(r0);
+ __ JumpToExternalReference(ExternalReference(address, masm->isolate()),
+ exit_frame_type == BUILTIN_EXIT);
+}
// Load the built-in InternalArray function from the current context.
static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
@@ -71,14 +55,12 @@ static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
__ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
}
-
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
// Load the Array function from the current native context.
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
}
-
void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
@@ -106,7 +88,6 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-
void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
@@ -135,14 +116,15 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-
// static
void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
// ----------- S t a t e -------------
- // -- r0 : number of arguments
- // -- lr : return address
- // -- sp[(argc - n) * 8] : arg[n] (zero-based)
- // -- sp[(argc + 1) * 8] : receiver
+ // -- r0 : number of arguments
+ // -- r1 : function
+ // -- cp : context
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+ // -- sp[argc * 4] : receiver
// -----------------------------------
Condition const cc_done = (kind == MathMaxMinKind::kMin) ? mi : gt;
Condition const cc_swap = (kind == MathMaxMinKind::kMin) ? gt : mi;
@@ -152,51 +134,51 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? d2 : d1;
// Load the accumulator with the default return value (either -Infinity or
- // +Infinity), with the tagged value in r1 and the double value in d1.
- __ LoadRoot(r1, root_index);
- __ vldr(d1, FieldMemOperand(r1, HeapNumber::kValueOffset));
-
- // Remember how many slots to drop (including the receiver).
- __ add(r4, r0, Operand(1));
+ // +Infinity), with the tagged value in r5 and the double value in d1.
+ __ LoadRoot(r5, root_index);
+ __ vldr(d1, FieldMemOperand(r5, HeapNumber::kValueOffset));
Label done_loop, loop;
+ __ mov(r4, r0);
__ bind(&loop);
{
// Check if all parameters done.
- __ sub(r0, r0, Operand(1), SetCC);
+ __ sub(r4, r4, Operand(1), SetCC);
__ b(lt, &done_loop);
// Load the next parameter tagged value into r2.
- __ ldr(r2, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+ __ ldr(r2, MemOperand(sp, r4, LSL, kPointerSizeLog2));
// Load the double value of the parameter into d2, maybe converting the
- // parameter to a number first using the ToNumberStub if necessary.
+ // parameter to a number first using the ToNumber builtin if necessary.
Label convert, convert_smi, convert_number, done_convert;
__ bind(&convert);
__ JumpIfSmi(r2, &convert_smi);
__ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
__ JumpIfRoot(r3, Heap::kHeapNumberMapRootIndex, &convert_number);
{
- // Parameter is not a Number, use the ToNumberStub to convert it.
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ // Parameter is not a Number, use the ToNumber builtin to convert it.
+ DCHECK(!FLAG_enable_embedded_constant_pool);
+ FrameScope scope(masm, StackFrame::MANUAL);
__ SmiTag(r0);
__ SmiTag(r4);
- __ Push(r0, r1, r4);
+ __ EnterBuiltinFrame(cp, r1, r0);
+ __ Push(r4, r5);
__ mov(r0, r2);
- ToNumberStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
__ mov(r2, r0);
- __ Pop(r0, r1, r4);
+ __ Pop(r4, r5);
+ __ LeaveBuiltinFrame(cp, r1, r0);
+ __ SmiUntag(r4);
+ __ SmiUntag(r0);
{
// Restore the double accumulator value (d1).
Label done_restore;
- __ SmiToDouble(d1, r1);
- __ JumpIfSmi(r1, &done_restore);
- __ vldr(d1, FieldMemOperand(r1, HeapNumber::kValueOffset));
+ __ SmiToDouble(d1, r5);
+ __ JumpIfSmi(r5, &done_restore);
+ __ vldr(d1, FieldMemOperand(r5, HeapNumber::kValueOffset));
__ bind(&done_restore);
}
- __ SmiUntag(r4);
- __ SmiUntag(r0);
}
__ b(&convert);
__ bind(&convert_number);
@@ -222,19 +204,21 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
// Result is on the right hand side.
__ bind(&compare_swap);
__ vmov(d1, d2);
- __ mov(r1, r2);
+ __ mov(r5, r2);
__ b(&loop);
// At least one side is NaN, which means that the result will be NaN too.
__ bind(&compare_nan);
- __ LoadRoot(r1, Heap::kNanValueRootIndex);
- __ vldr(d1, FieldMemOperand(r1, HeapNumber::kValueOffset));
+ __ LoadRoot(r5, Heap::kNanValueRootIndex);
+ __ vldr(d1, FieldMemOperand(r5, HeapNumber::kValueOffset));
__ b(&loop);
}
__ bind(&done_loop);
- __ mov(r0, r1);
- __ Drop(r4);
+ // Drop all slots, including the receiver.
+ __ add(r0, r0, Operand(1));
+ __ Drop(r0);
+ __ mov(r0, r5);
__ Ret();
}
@@ -243,24 +227,36 @@ void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
// -- r1 : constructor function
+ // -- cp : context
// -- lr : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
// -----------------------------------
- // 1. Load the first argument into r0 and get rid of the rest (including the
- // receiver).
+ // 1. Load the first argument into r0.
Label no_arguments;
{
+ __ mov(r2, r0); // Store argc in r2.
__ sub(r0, r0, Operand(1), SetCC);
__ b(lo, &no_arguments);
- __ ldr(r0, MemOperand(sp, r0, LSL, kPointerSizeLog2, PreIndex));
- __ Drop(2);
+ __ ldr(r0, MemOperand(sp, r0, LSL, kPointerSizeLog2));
}
// 2a. Convert the first argument to a number.
- ToNumberStub stub(masm->isolate());
- __ TailCallStub(&stub);
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ SmiTag(r2);
+ __ EnterBuiltinFrame(cp, r1, r2);
+ __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+ __ LeaveBuiltinFrame(cp, r1, r2);
+ __ SmiUntag(r2);
+ }
+
+ {
+ // Drop all arguments including the receiver.
+ __ Drop(r2);
+ __ Ret(1);
+ }
// 2b. No arguments, return +0.
__ bind(&no_arguments);
@@ -268,13 +264,13 @@ void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
__ Ret(1);
}
-
// static
void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
// -- r1 : constructor function
// -- r3 : new target
+ // -- cp : context
// -- lr : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
@@ -283,18 +279,16 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
// 1. Make sure we operate in the context of the called function.
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- // 2. Load the first argument into r2 and get rid of the rest (including the
- // receiver).
+ // 2. Load the first argument into r2.
{
Label no_arguments, done;
+ __ mov(r6, r0); // Store argc in r6.
__ sub(r0, r0, Operand(1), SetCC);
__ b(lo, &no_arguments);
- __ ldr(r2, MemOperand(sp, r0, LSL, kPointerSizeLog2, PreIndex));
- __ Drop(2);
+ __ ldr(r2, MemOperand(sp, r0, LSL, kPointerSizeLog2));
__ b(&done);
__ bind(&no_arguments);
__ Move(r2, Smi::FromInt(0));
- __ Drop(1);
__ bind(&done);
}
@@ -305,70 +299,81 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ CompareObjectType(r2, r4, r4, HEAP_NUMBER_TYPE);
__ b(eq, &done_convert);
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r1, r3);
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ SmiTag(r6);
+ __ EnterBuiltinFrame(cp, r1, r6);
+ __ Push(r3);
__ Move(r0, r2);
- ToNumberStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
__ Move(r2, r0);
- __ Pop(r1, r3);
+ __ Pop(r3);
+ __ LeaveBuiltinFrame(cp, r1, r6);
+ __ SmiUntag(r6);
}
__ bind(&done_convert);
}
// 4. Check if new target and constructor differ.
- Label new_object;
+ Label drop_frame_and_ret, new_object;
__ cmp(r1, r3);
__ b(ne, &new_object);
// 5. Allocate a JSValue wrapper for the number.
__ AllocateJSValue(r0, r1, r2, r4, r5, &new_object);
- __ Ret();
+ __ b(&drop_frame_and_ret);
// 6. Fallback to the runtime to create new object.
__ bind(&new_object);
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r2); // first argument
+ FrameScope scope(masm, StackFrame::MANUAL);
FastNewObjectStub stub(masm->isolate());
+ __ SmiTag(r6);
+ __ EnterBuiltinFrame(cp, r1, r6);
+ __ Push(r2); // first argument
__ CallStub(&stub);
__ Pop(r2);
+ __ LeaveBuiltinFrame(cp, r1, r6);
+ __ SmiUntag(r6);
}
__ str(r2, FieldMemOperand(r0, JSValue::kValueOffset));
- __ Ret();
-}
+ __ bind(&drop_frame_and_ret);
+ {
+ __ Drop(r6);
+ __ Ret(1);
+ }
+}
// static
void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
// -- r1 : constructor function
+ // -- cp : context
// -- lr : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
// -----------------------------------
- // 1. Load the first argument into r0 and get rid of the rest (including the
- // receiver).
+ // 1. Load the first argument into r0.
Label no_arguments;
{
+ __ mov(r2, r0); // Store argc in r2.
__ sub(r0, r0, Operand(1), SetCC);
__ b(lo, &no_arguments);
- __ ldr(r0, MemOperand(sp, r0, LSL, kPointerSizeLog2, PreIndex));
- __ Drop(2);
+ __ ldr(r0, MemOperand(sp, r0, LSL, kPointerSizeLog2));
}
// 2a. At least one argument, return r0 if it's a string, otherwise
// dispatch to appropriate conversion.
- Label to_string, symbol_descriptive_string;
+ Label drop_frame_and_ret, to_string, symbol_descriptive_string;
{
__ JumpIfSmi(r0, &to_string);
STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
- __ CompareObjectType(r0, r1, r1, FIRST_NONSTRING_TYPE);
+ __ CompareObjectType(r0, r3, r3, FIRST_NONSTRING_TYPE);
__ b(hi, &to_string);
__ b(eq, &symbol_descriptive_string);
- __ Ret();
+ __ b(&drop_frame_and_ret);
}
// 2b. No arguments, return the empty string (and pop the receiver).
@@ -381,18 +386,31 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// 3a. Convert r0 to a string.
__ bind(&to_string);
{
+ FrameScope scope(masm, StackFrame::MANUAL);
ToStringStub stub(masm->isolate());
- __ TailCallStub(&stub);
+ __ SmiTag(r2);
+ __ EnterBuiltinFrame(cp, r1, r2);
+ __ CallStub(&stub);
+ __ LeaveBuiltinFrame(cp, r1, r2);
+ __ SmiUntag(r2);
}
+ __ b(&drop_frame_and_ret);
// 3b. Convert symbol in r0 to a string.
__ bind(&symbol_descriptive_string);
{
+ __ Drop(r2);
+ __ Drop(1);
__ Push(r0);
__ TailCallRuntime(Runtime::kSymbolDescriptiveString);
}
-}
+ __ bind(&drop_frame_and_ret);
+ {
+ __ Drop(r2);
+ __ Ret(1);
+ }
+}
// static
void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
@@ -400,6 +418,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// -- r0 : number of arguments
// -- r1 : constructor function
// -- r3 : new target
+ // -- cp : context
// -- lr : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
@@ -408,18 +427,16 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// 1. Make sure we operate in the context of the called function.
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- // 2. Load the first argument into r2 and get rid of the rest (including the
- // receiver).
+ // 2. Load the first argument into r2.
{
Label no_arguments, done;
+ __ mov(r6, r0); // Store argc in r6.
__ sub(r0, r0, Operand(1), SetCC);
__ b(lo, &no_arguments);
- __ ldr(r2, MemOperand(sp, r0, LSL, kPointerSizeLog2, PreIndex));
- __ Drop(2);
+ __ ldr(r2, MemOperand(sp, r0, LSL, kPointerSizeLog2));
__ b(&done);
__ bind(&no_arguments);
__ LoadRoot(r2, Heap::kempty_stringRootIndex);
- __ Drop(1);
__ bind(&done);
}
@@ -431,39 +448,51 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ b(lo, &done_convert);
__ bind(&convert);
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::MANUAL);
ToStringStub stub(masm->isolate());
- __ Push(r1, r3);
+ __ SmiTag(r6);
+ __ EnterBuiltinFrame(cp, r1, r6);
+ __ Push(r3);
__ Move(r0, r2);
__ CallStub(&stub);
__ Move(r2, r0);
- __ Pop(r1, r3);
+ __ Pop(r3);
+ __ LeaveBuiltinFrame(cp, r1, r6);
+ __ SmiUntag(r6);
}
__ bind(&done_convert);
}
// 4. Check if new target and constructor differ.
- Label new_object;
+ Label drop_frame_and_ret, new_object;
__ cmp(r1, r3);
__ b(ne, &new_object);
// 5. Allocate a JSValue wrapper for the string.
__ AllocateJSValue(r0, r1, r2, r4, r5, &new_object);
- __ Ret();
+ __ b(&drop_frame_and_ret);
// 6. Fallback to the runtime to create new object.
__ bind(&new_object);
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r2); // first argument
+ FrameScope scope(masm, StackFrame::MANUAL);
FastNewObjectStub stub(masm->isolate());
+ __ SmiTag(r6);
+ __ EnterBuiltinFrame(cp, r1, r6);
+ __ Push(r2); // first argument
__ CallStub(&stub);
__ Pop(r2);
+ __ LeaveBuiltinFrame(cp, r1, r6);
+ __ SmiUntag(r6);
}
__ str(r2, FieldMemOperand(r0, JSValue::kValueOffset));
- __ Ret();
-}
+ __ bind(&drop_frame_and_ret);
+ {
+ __ Drop(r6);
+ __ Ret(1);
+ }
+}
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
@@ -503,7 +532,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Jump(r2);
}
-
void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
@@ -521,7 +549,6 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
GenerateTailCallToSharedCode(masm);
}
-
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool create_implicit_receiver,
@@ -604,16 +631,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r0: number of arguments
// r1: constructor function
// r3: new target
- if (is_api_function) {
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- __ Call(code, RelocInfo::CODE_TARGET);
- } else {
- ParameterCount actual(r0);
- __ InvokeFunction(r1, r3, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
- }
+ ParameterCount actual(r0);
+ __ InvokeFunction(r1, r3, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
// Store offset of return address for deoptimizer.
if (create_implicit_receiver && !is_api_function) {
@@ -683,27 +703,180 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Jump(lr);
}
-
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true, false);
}
-
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, true, false, false);
}
-
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, false, false);
}
-
void Builtins::Generate_JSBuiltinsConstructStubForDerived(
MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, false, true);
}
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : the value to pass to the generator
+ // -- r1 : the JSGeneratorObject to resume
+ // -- r2 : the resume mode (tagged)
+ // -- lr : return address
+ // -----------------------------------
+ __ AssertGeneratorObject(r1);
+
+ // Store input value into generator object.
+ __ str(r0, FieldMemOperand(r1, JSGeneratorObject::kInputOrDebugPosOffset));
+ __ RecordWriteField(r1, JSGeneratorObject::kInputOrDebugPosOffset, r0, r3,
+ kLRHasNotBeenSaved, kDontSaveFPRegs);
+
+ // Store resume mode into generator object.
+ __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kResumeModeOffset));
+
+ // Load suspended function and context.
+ __ ldr(cp, FieldMemOperand(r1, JSGeneratorObject::kContextOffset));
+ __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
+
+ // Flood function if we are stepping.
+ Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+ Label stepping_prepared;
+ ExternalReference last_step_action =
+ ExternalReference::debug_last_step_action_address(masm->isolate());
+ STATIC_ASSERT(StepFrame > StepIn);
+ __ mov(ip, Operand(last_step_action));
+ __ ldrsb(ip, MemOperand(ip));
+ __ cmp(ip, Operand(StepIn));
+ __ b(ge, &prepare_step_in_if_stepping);
+
+ // Flood function if we need to continue stepping in the suspended generator.
+ ExternalReference debug_suspended_generator =
+ ExternalReference::debug_suspended_generator_address(masm->isolate());
+ __ mov(ip, Operand(debug_suspended_generator));
+ __ ldr(ip, MemOperand(ip));
+ __ cmp(ip, Operand(r1));
+ __ b(eq, &prepare_step_in_suspended_generator);
+ __ bind(&stepping_prepared);
+
+ // Push receiver.
+ __ ldr(ip, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
+ __ Push(ip);
+
+ // ----------- S t a t e -------------
+ // -- r1 : the JSGeneratorObject to resume
+ // -- r2 : the resume mode (tagged)
+ // -- r4 : generator function
+ // -- cp : generator context
+ // -- lr : return address
+ // -- sp[0] : generator receiver
+ // -----------------------------------
+
+ // Push holes for arguments to generator function. Since the parser forced
+ // context allocation for any variables in generators, the actual argument
+ // values have already been copied into the context and these dummy values
+ // will never be used.
+ __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r3,
+ FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
+ {
+ Label done_loop, loop;
+ __ bind(&loop);
+ __ sub(r3, r3, Operand(Smi::FromInt(1)), SetCC);
+ __ b(mi, &done_loop);
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ b(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Dispatch on the kind of generator object.
+ Label old_generator;
+ __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
+ __ CompareObjectType(r3, r3, r3, BYTECODE_ARRAY_TYPE);
+ __ b(ne, &old_generator);
+
+ // New-style (ignition/turbofan) generator object
+ {
+ __ ldr(r0, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r0, FieldMemOperand(
+ r0, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ SmiUntag(r0);
+ // We abuse new.target both to indicate that this is a resume call and to
+ // pass in the generator object. In ordinary calls, new.target is always
+ // undefined because generator functions are non-constructable.
+ __ Move(r3, r1);
+ __ Move(r1, r4);
+ __ ldr(r5, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+ __ Jump(r5);
+ }
+
+ // Old-style (full-codegen) generator object
+ __ bind(&old_generator);
+ {
+ // Enter a new JavaScript frame, and initialize its slots as they were when
+ // the generator was suspended.
+ DCHECK(!FLAG_enable_embedded_constant_pool);
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ Push(lr, fp);
+ __ Move(fp, sp);
+ __ Push(cp, r4);
+
+ // Restore the operand stack.
+ __ ldr(r0, FieldMemOperand(r1, JSGeneratorObject::kOperandStackOffset));
+ __ ldr(r3, FieldMemOperand(r0, FixedArray::kLengthOffset));
+ __ add(r0, r0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(r3, r0, Operand(r3, LSL, kPointerSizeLog2 - 1));
+ {
+ Label done_loop, loop;
+ __ bind(&loop);
+ __ cmp(r0, r3);
+ __ b(eq, &done_loop);
+ __ ldr(ip, MemOperand(r0, kPointerSize, PostIndex));
+ __ Push(ip);
+ __ b(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Reset operand stack so we don't leak.
+ __ LoadRoot(ip, Heap::kEmptyFixedArrayRootIndex);
+ __ str(ip, FieldMemOperand(r1, JSGeneratorObject::kOperandStackOffset));
+
+ // Resume the generator function at the continuation.
+ __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
+ __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
+ __ add(r3, r3, Operand(r2, ASR, 1));
+ __ mov(r2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+ __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
+ __ Move(r0, r1); // Continuation expects generator object in r0.
+ __ Jump(r3);
+ }
+
+ __ bind(&prepare_step_in_if_stepping);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r1, r2, r4);
+ __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ Pop(r1, r2);
+ __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
+ }
+ __ b(&stepping_prepared);
+
+ __ bind(&prepare_step_in_suspended_generator);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r1, r2);
+ __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+ __ Pop(r1, r2);
+ __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
+ }
+ __ b(&stepping_prepared);
+}
void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -711,10 +884,8 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
-
enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
-
// Clobbers r2; preserves all other registers.
static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
IsTagged argc_is_tagged) {
@@ -741,7 +912,6 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
__ bind(&okay);
}
-
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Called from Generate_JS_Entry
@@ -785,8 +955,8 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ b(&entry);
__ bind(&loop);
__ ldr(r0, MemOperand(r4, kPointerSize, PostIndex)); // read next parameter
- __ ldr(r0, MemOperand(r0)); // dereference handle
- __ push(r0); // push parameter
+ __ ldr(r0, MemOperand(r0)); // dereference handle
+ __ push(r0); // push parameter
__ bind(&entry);
__ cmp(r4, r2);
__ b(ne, &loop);
@@ -822,16 +992,29 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r0: result
}
-
void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, false);
}
-
void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
+ Register args_count = scratch;
+
+ // Get the arguments + receiver count.
+ __ ldr(args_count,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ ldr(args_count,
+ FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
+
+ // Leave the frame (also dropping the register file).
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+
+ // Drop receiver + arguments.
+ __ add(sp, sp, args_count, LeaveCC);
+}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
@@ -850,14 +1033,16 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
// The function builds an interpreter frame. See InterpreterFrameConstants in
// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(r1);
- // Get the bytecode array from the function object and load the pointer to the
- // first entry into kInterpreterBytecodeRegister.
+ // Get the bytecode array from the function object (or from the DebugInfo if
+ // it is present) and load it into kInterpreterBytecodeArrayRegister.
__ ldr(r0, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
Register debug_info = kInterpreterBytecodeArrayRegister;
DCHECK(!debug_info.is(r0));
@@ -867,10 +1052,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset), eq);
__ ldr(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex), ne);
+ FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex), ne);
+ // Check whether we should continue to use the interpreter.
+ Label switch_to_different_code_kind;
+ __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kCodeOffset));
+ __ cmp(r0, Operand(masm->CodeObject())); // Self-reference to this code.
+ __ b(ne, &switch_to_different_code_kind);
+
+ // Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
- // Check function data field is actually a BytecodeArray object.
__ SmiTst(kInterpreterBytecodeArrayRegister);
__ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r0, no_reg,
@@ -878,8 +1069,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
- // Push new.target, bytecode array and zero for bytecode array offset.
- __ mov(r0, Operand(0));
+ // Load the initial bytecode offset.
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+ // Push new.target, bytecode array and Smi tagged bytecode array offset.
+ __ SmiTag(r0, kInterpreterBytecodeOffsetRegister);
__ Push(r3, kInterpreterBytecodeArrayRegister, r0);
// Allocate the local and temporary register file on the stack.
@@ -911,18 +1106,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ b(&loop_header, ge);
}
- // TODO(rmcilroy): List of things not currently dealt with here but done in
- // fullcodegen's prologue:
- // - Call ProfileEntryHookStub when isolate has a function_entry_hook.
- // - Code aging of the BytecodeArray object.
-
- // Load accumulator, register file, bytecode offset, dispatch table into
- // registers.
+ // Load accumulator and dispatch table into registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
- __ add(kInterpreterRegisterFileRegister, fp,
- Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
- __ mov(kInterpreterBytecodeOffsetRegister,
- Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
@@ -932,36 +1117,51 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister));
__ ldr(ip, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
kPointerSizeLog2));
- // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
- // and header removal.
- __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(ip);
+ masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
+
+ // The return value is in r0.
+ LeaveInterpreterFrame(masm, r2);
+ __ Jump(lr);
- // Even though the first bytecode handler was called, we will never return.
- __ Abort(kUnexpectedReturnFromBytecodeHandler);
+ // If the shared code is no longer this entry trampoline, then the underlying
+ // function has been switched to a different kind of code and we heal the
+ // closure by switching the code entry field over to the new code as well.
+ __ bind(&switch_to_different_code_kind);
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kCodeOffset));
+ __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ str(r4, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(r1, r4, r5);
+ __ Jump(r4);
}
+void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
+ // Save the function and context for call to CompileBaseline.
+ __ ldr(r1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ ldr(kContextRegister,
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
-void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
- // TODO(rmcilroy): List of things not currently dealt with here but done in
- // fullcodegen's EmitReturnSequence.
- // - Supporting FLAG_trace for Runtime::TraceExit.
- // - Support profiler (specifically decrementing profiling_counter
- // appropriately and calling out to HandleInterrupts if necessary).
+ // Leave the frame before recompiling for baseline so that we don't count as
+ // an activation on the stack.
+ LeaveInterpreterFrame(masm, r2);
- // The return value is in accumulator, which is already in r0.
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ // Push return value.
+ __ push(r0);
- // Leave the frame (also dropping the register file).
- __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ // Push function as argument and compile for baseline.
+ __ push(r1);
+ __ CallRuntime(Runtime::kCompileBaseline);
- // Drop receiver + arguments and return.
- __ ldr(ip, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kParameterSizeOffset));
- __ add(sp, sp, ip, LeaveCC);
+ // Restore return value.
+ __ pop(r0);
+ }
__ Jump(lr);
}
-
static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register index,
Register limit, Register scratch) {
Label loop_header, loop_check;
@@ -974,10 +1174,10 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register index,
__ b(gt, &loop_header);
}
-
// static
void Builtins::Generate_InterpreterPushArgsAndCallImpl(
- MacroAssembler* masm, TailCallMode tail_call_mode) {
+ MacroAssembler* masm, TailCallMode tail_call_mode,
+ CallableType function_type) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r2 : the address of the first argument to be pushed. Subsequent
@@ -995,12 +1195,18 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
Generate_InterpreterPushArgs(masm, r2, r3, r4);
// Call the target.
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- tail_call_mode),
- RelocInfo::CODE_TARGET);
+ if (function_type == CallableType::kJSFunction) {
+ __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
+ tail_call_mode),
+ RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK_EQ(function_type, CallableType::kAny);
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ tail_call_mode),
+ RelocInfo::CODE_TARGET);
+ }
}
-
// static
void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -1025,25 +1231,24 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
-
-static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
- // Initialize register file register and dispatch table register.
- __ add(kInterpreterRegisterFileRegister, fp,
- Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ // Set the return address to the correct point in the interpreter entry
+ // trampoline.
+ Smi* interpreter_entry_return_pc_offset(
+ masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+ __ Move(r2, masm->isolate()->builtins()->InterpreterEntryTrampoline());
+ __ add(lr, r2, Operand(interpreter_entry_return_pc_offset->value() +
+ Code::kHeaderSize - kHeapObjectTag));
+
+ // Initialize the dispatch table register.
__ mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
- // Get the context from the frame.
- __ ldr(kContextRegister,
- MemOperand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kContextFromRegisterPointer));
-
// Get the bytecode array pointer from the frame.
- __ ldr(
- kInterpreterBytecodeArrayRegister,
- MemOperand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
+ __ ldr(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -1056,9 +1261,7 @@ static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
// Get the target bytecode offset from the frame.
__ ldr(kInterpreterBytecodeOffsetRegister,
- MemOperand(
- kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
@@ -1066,74 +1269,246 @@ static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister));
__ ldr(ip, MemOperand(kInterpreterDispatchTableRegister, r1, LSL,
kPointerSizeLog2));
- __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ mov(pc, ip);
}
-
-static void Generate_InterpreterNotifyDeoptimizedHelper(
- MacroAssembler* masm, Deoptimizer::BailoutType type) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Pass the deoptimization type to the runtime system.
- __ mov(r1, Operand(Smi::FromInt(static_cast<int>(type))));
- __ push(r1);
- __ CallRuntime(Runtime::kNotifyDeoptimized);
- // Tear down internal frame.
- }
-
- // Drop state (we don't use these for interpreter deopts) and and pop the
- // accumulator value into the accumulator register.
- __ Drop(1);
- __ Pop(kInterpreterAccumulatorRegister);
-
- // Enter the bytecode dispatch.
- Generate_EnterBytecodeDispatch(masm);
-}
-
-
-void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-
-void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
-}
-
-
-void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
- // Set the address of the interpreter entry trampoline as a return address.
- // This simulates the initial call to bytecode handlers in interpreter entry
- // trampoline. The return will never actually be taken, but our stack walker
- // uses this address to determine whether a frame is interpreted.
- __ Move(lr, masm->isolate()->builtins()->InterpreterEntryTrampoline());
-
- Generate_EnterBytecodeDispatch(masm);
-}
-
-
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : argument count (preserved for callee)
+ // -- r3 : new target (preserved for callee)
+ // -- r1 : target function (preserved for callee)
+ // -----------------------------------
+ // First lookup code, maybe we don't need to compile!
+ Label gotta_call_runtime, gotta_call_runtime_no_stack;
+ Label maybe_call_runtime;
+ Label try_shared;
+ Label loop_top, loop_bottom;
+
+ Register argument_count = r0;
+ Register closure = r1;
+ Register new_target = r3;
+ __ push(argument_count);
+ __ push(new_target);
+ __ push(closure);
+
+ Register map = argument_count;
+ Register index = r2;
+ __ ldr(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(map,
+ FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+ __ ldr(index, FieldMemOperand(map, FixedArray::kLengthOffset));
+ __ cmp(index, Operand(Smi::FromInt(2)));
+ __ b(lt, &gotta_call_runtime);
+
+ // Find literals.
+ // r3 : native context
+ // r2 : length / index
+ // r0 : optimized code map
+ // stack[0] : new target
+ // stack[4] : closure
+ Register native_context = r3;
+ __ ldr(native_context, NativeContextMemOperand());
+
+ __ bind(&loop_top);
+ Register temp = r1;
+ Register array_pointer = r5;
+
+ // Does the native context match?
+ __ add(array_pointer, map, Operand::PointerOffsetFromSmiKey(index));
+ __ ldr(temp, FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousContext));
+ __ ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+ __ cmp(temp, native_context);
+ __ b(ne, &loop_bottom);
+ // OSR id set to none?
+ __ ldr(temp, FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+ const int bailout_id = BailoutId::None().ToInt();
+ __ cmp(temp, Operand(Smi::FromInt(bailout_id)));
+ __ b(ne, &loop_bottom);
+ // Literals available?
+ __ ldr(temp, FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousLiterals));
+ __ ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+ __ JumpIfSmi(temp, &gotta_call_runtime);
+
+ // Save the literals in the closure.
+ __ ldr(r4, MemOperand(sp, 0));
+ __ str(temp, FieldMemOperand(r4, JSFunction::kLiteralsOffset));
+ __ push(index);
+ __ RecordWriteField(r4, JSFunction::kLiteralsOffset, temp, index,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ pop(index);
+
+ // Code available?
+ Register entry = r4;
+ __ ldr(entry,
+ FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousCachedCode));
+ __ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(entry, &maybe_call_runtime);
+
+ // Found literals and code. Get them into the closure and return.
+ __ pop(closure);
+ // Store code entry in the closure.
+ __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ Label install_optimized_code_and_tailcall;
+ __ bind(&install_optimized_code_and_tailcall);
+ __ str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(closure, entry, r5);
+
+ // Link the closure into the optimized function list.
+ // r4 : code entry
+ // r3 : native context
+ // r1 : closure
+ __ ldr(r5,
+ ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ __ str(r5, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
+ __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r5, r0,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ const int function_list_offset =
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+ __ str(closure,
+ ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ // Save closure before the write barrier.
+ __ mov(r5, closure);
+ __ RecordWriteContextSlot(native_context, function_list_offset, closure, r0,
+ kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ mov(closure, r5);
+ __ pop(new_target);
+ __ pop(argument_count);
+ __ Jump(entry);
+
+ __ bind(&loop_bottom);
+ __ sub(index, index, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+ __ cmp(index, Operand(Smi::FromInt(1)));
+ __ b(gt, &loop_top);
+
+ // We found neither literals nor code.
+ __ jmp(&gotta_call_runtime);
+
+ __ bind(&maybe_call_runtime);
+ __ pop(closure);
+
+ // Last possibility. Check the context free optimized code map entry.
+ __ ldr(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
+ SharedFunctionInfo::kSharedCodeIndex));
+ __ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(entry, &try_shared);
+
+ // Store code entry in the closure.
+ __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(&install_optimized_code_and_tailcall);
+
+ __ bind(&try_shared);
+ __ pop(new_target);
+ __ pop(argument_count);
+ // Is the full code valid?
+ __ ldr(entry,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+ __ ldr(r5, FieldMemOperand(entry, Code::kFlagsOffset));
+ __ and_(r5, r5, Operand(Code::KindField::kMask));
+ __ mov(r5, Operand(r5, LSR, Code::KindField::kShift));
+ __ cmp(r5, Operand(Code::BUILTIN));
+ __ b(eq, &gotta_call_runtime_no_stack);
+ // Yes, install the full code.
+ __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(closure, entry, r5);
+ __ Jump(entry);
+
+ __ bind(&gotta_call_runtime);
+ __ pop(closure);
+ __ pop(new_target);
+ __ pop(argument_count);
+ __ bind(&gotta_call_runtime_no_stack);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
+}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm,
Runtime::kCompileOptimized_NotConcurrent);
}
-
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
+void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : argument count (preserved for callee)
+ // -- r1 : new target (preserved for callee)
+ // -- r3 : target function (preserved for callee)
+ // -----------------------------------
+ Label failed;
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Preserve argument count for later compare.
+ __ Move(r4, r0);
+ // Push the number of arguments to the callee.
+ __ SmiTag(r0);
+ __ push(r0);
+ // Push a copy of the target function and the new target.
+ __ push(r1);
+ __ push(r3);
+
+ // The function.
+ __ push(r1);
+ // Copy arguments from caller (stdlib, foreign, heap).
+ Label args_done;
+ for (int j = 0; j < 4; ++j) {
+ Label over;
+ if (j < 3) {
+ __ cmp(r4, Operand(j));
+ __ b(ne, &over);
+ }
+ for (int i = j - 1; i >= 0; --i) {
+ __ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
+ i * kPointerSize));
+ __ push(r4);
+ }
+ for (int i = 0; i < 3 - j; ++i) {
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ }
+ if (j < 3) {
+ __ jmp(&args_done);
+ __ bind(&over);
+ }
+ }
+ __ bind(&args_done);
+
+ // Call runtime, on success unwind frame, and parent frame.
+ __ CallRuntime(Runtime::kInstantiateAsmJs, 4);
+ // A smi 0 is returned on failure, an object on success.
+ __ JumpIfSmi(r0, &failed);
+
+ __ Drop(2);
+ __ pop(r4);
+ __ SmiUntag(r4);
+ scope.GenerateLeaveFrame();
+
+ __ add(r4, r4, Operand(1));
+ __ Drop(r4);
+ __ Ret();
+
+ __ bind(&failed);
+ // Restore target function and new target.
+ __ pop(r3);
+ __ pop(r1);
+ __ pop(r0);
+ __ SmiUntag(r0);
+ }
+ // On failure, tail call back to regular js.
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+}
static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// For now, we are relying on the fact that make_code_young doesn't do any
@@ -1157,19 +1532,18 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
__ mov(pc, r0);
}
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
-void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-} \
-void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-}
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+ void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+ } \
+ void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+ }
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
-
void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
// that make_code_young doesn't do any garbage collection which allows us to
@@ -1185,8 +1559,9 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
__ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit() | fp.bit() | lr.bit());
__ PrepareCallCFunction(2, 0, r2);
__ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
- __ CallCFunction(ExternalReference::get_mark_code_as_executed_function(
- masm->isolate()), 2);
+ __ CallCFunction(
+ ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+ 2);
__ ldm(ia_w, sp, r0.bit() | r1.bit() | r3.bit() | fp.bit() | lr.bit());
// Perform prologue operations usually performed by the young code stub.
@@ -1197,17 +1572,14 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
__ mov(pc, r0);
}
-
void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
GenerateMakeCodeYoungAgainCommon(masm);
}
-
void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
-
static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
SaveFPRegsMode save_doubles) {
{
@@ -1223,20 +1595,17 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
}
__ add(sp, sp, Operand(kPointerSize)); // Ignore state
- __ mov(pc, lr); // Jump to miss handler
+ __ mov(pc, lr); // Jump to miss handler
}
-
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
}
-
void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
}
-
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
@@ -1252,14 +1621,17 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
__ SmiUntag(r6);
// Switch on the state.
Label with_tos_register, unknown_state;
- __ cmp(r6, Operand(FullCodeGenerator::NO_REGISTERS));
+ __ cmp(r6,
+ Operand(static_cast<int>(Deoptimizer::BailoutState::NO_REGISTERS)));
__ b(ne, &with_tos_register);
__ add(sp, sp, Operand(1 * kPointerSize)); // Remove state.
__ Ret();
__ bind(&with_tos_register);
+ DCHECK_EQ(kInterpreterAccumulatorRegister.code(), r0.code());
__ ldr(r0, MemOperand(sp, 1 * kPointerSize));
- __ cmp(r6, Operand(FullCodeGenerator::TOS_REG));
+ __ cmp(r6,
+ Operand(static_cast<int>(Deoptimizer::BailoutState::TOS_REGISTER)));
__ b(ne, &unknown_state);
__ add(sp, sp, Operand(2 * kPointerSize)); // Remove state.
__ Ret();
@@ -1268,22 +1640,18 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
__ stop("no cases left");
}
-
void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
-
void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
}
-
void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
-
static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
Register function_template_info,
Register scratch0, Register scratch1,
@@ -1347,7 +1715,6 @@ static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
__ bind(&receiver_check_passed);
}
-
void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments excluding receiver
@@ -1383,10 +1750,16 @@ void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kThrowIllegalInvocation);
}
-
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
+ bool has_handler_frame) {
// Lookup the function in the JavaScript frame.
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ if (has_handler_frame) {
+ __ ldr(r0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r0, MemOperand(r0, JavaScriptFrameConstants::kFunctionOffset));
+ } else {
+ __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
@@ -1394,7 +1767,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
- // If the code object is null, just return to the unoptimized code.
+ // If the code object is null, just return to the caller.
Label skip;
__ cmp(r0, Operand(Smi::FromInt(0)));
__ b(ne, &skip);
@@ -1402,11 +1775,18 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ bind(&skip);
+ // Drop any potential handler frame that is be sitting on top of the actual
+ // JavaScript frame. This is the case then OSR is triggered from bytecode.
+ if (has_handler_frame) {
+ __ LeaveFrame(StackFrame::STUB);
+ }
+
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ ldr(r1, FieldMemOperand(r0, Code::kDeoptimizationDataOffset));
- { ConstantPoolUnavailableScope constant_pool_unavailable(masm);
+ {
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm);
__ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
if (FLAG_enable_embedded_constant_pool) {
@@ -1415,8 +1795,9 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
- __ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(
- DeoptimizationInputData::kOsrPcOffsetIndex)));
+ __ ldr(r1, FieldMemOperand(
+ r1, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex)));
// Compute the target address = code start + osr_offset
__ add(lr, r0, Operand::SmiUntag(r1));
@@ -1426,11 +1807,21 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
}
}
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+ Generate_OnStackReplacementHelper(masm, false);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+ Generate_OnStackReplacementHelper(masm, true);
+}
// static
void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
int field_index) {
// ----------- S t a t e -------------
+ // -- r0 : number of arguments
+ // -- r1 : function
+ // -- cp : context
// -- lr : return address
// -- sp[0] : receiver
// -----------------------------------
@@ -1440,7 +1831,7 @@ void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
{
__ Pop(r0);
__ JumpIfSmi(r0, &receiver_not_date);
- __ CompareObjectType(r0, r1, r2, JS_DATE_TYPE);
+ __ CompareObjectType(r0, r2, r3, JS_DATE_TYPE);
__ b(ne, &receiver_not_date);
}
@@ -1470,29 +1861,13 @@ void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
// 3. Raise a TypeError if the receiver is not a date.
__ bind(&receiver_not_date);
- __ TailCallRuntime(Runtime::kThrowNotDateError);
-}
-
-// static
-void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : argc
- // -- sp[0] : first argument (left-hand side)
- // -- sp[4] : receiver (right-hand side)
- // -----------------------------------
-
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ ldr(InstanceOfDescriptor::LeftRegister(),
- MemOperand(fp, 2 * kPointerSize)); // Load left-hand side.
- __ ldr(InstanceOfDescriptor::RightRegister(),
- MemOperand(fp, 3 * kPointerSize)); // Load right-hand side.
- InstanceOfStub stub(masm->isolate(), true);
- __ CallStub(&stub);
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ Push(r0);
+ __ Move(r0, Smi::FromInt(0));
+ __ EnterBuiltinFrame(cp, r1, r0);
+ __ CallRuntime(Runtime::kThrowNotDateError);
}
-
- // Pop the argument and the receiver.
- __ Ret(2);
}
// static
@@ -1560,7 +1935,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
}
}
-
// static
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
@@ -1604,7 +1978,6 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
-
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argc
@@ -1659,7 +2032,6 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
}
}
-
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argc
@@ -1729,7 +2101,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
}
-
static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
Label* stack_overflow) {
// ----------- S t a t e -------------
@@ -1750,7 +2121,6 @@ static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
__ b(le, stack_overflow); // Signed comparison.
}
-
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ SmiTag(r0);
__ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
@@ -1761,7 +2131,6 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
}
-
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : result being passed through
@@ -1776,7 +2145,6 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
__ add(sp, sp, Operand(kPointerSize)); // adjust for receiver
}
-
// static
void Builtins::Generate_Apply(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2059,8 +2427,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ SmiTag(r0);
__ Push(r0, r1);
__ mov(r0, r3);
+ __ Push(cp);
ToObjectStub stub(masm->isolate());
__ CallStub(&stub);
+ __ Pop(cp);
__ mov(r3, r0);
__ Pop(r0, r1);
__ SmiUntag(r0);
@@ -2100,7 +2470,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
}
}
-
namespace {
void Generate_PushBoundArguments(MacroAssembler* masm) {
@@ -2179,7 +2548,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
} // namespace
-
// static
void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
TailCallMode tail_call_mode) {
@@ -2208,7 +2576,6 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
__ add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
}
-
// static
void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
TailCallMode tail_call_mode) {
@@ -2269,7 +2636,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
-
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2290,7 +2656,6 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
__ add(pc, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
}
-
// static
void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2315,7 +2680,6 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
}
-
// static
void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2335,7 +2699,6 @@ void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
}
-
// static
void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2387,6 +2750,56 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
+// static
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r1 : requested object size (untagged)
+ // -- lr : return address
+ // -----------------------------------
+ __ SmiTag(r1);
+ __ Push(r1);
+ __ Move(cp, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAllocateInNewSpace);
+}
+
+// static
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r1 : requested object size (untagged)
+ // -- lr : return address
+ // -----------------------------------
+ __ SmiTag(r1);
+ __ Move(r2, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+ __ Push(r1, r2);
+ __ Move(cp, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
+}
+
+// static
+void Builtins::Generate_Abort(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r1 : message_id as Smi
+ // -- lr : return address
+ // -----------------------------------
+ __ Push(r1);
+ __ Move(cp, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAbort);
+}
+
+void Builtins::Generate_ToNumber(MacroAssembler* masm) {
+ // The ToNumber stub takes one argument in r0.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ tst(r0, Operand(kSmiTagMask));
+ __ Ret(eq);
+
+ __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
+ // r0: receiver
+ // r1: receiver instance type
+ __ Ret(eq);
+
+ __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
+ RelocInfo::CODE_TARGET);
+}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2496,7 +2909,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
LeaveArgumentsAdaptorFrame(masm);
__ Jump(lr);
-
// -------------------------------------------
// Dont adapt arguments.
// -------------------------------------------
@@ -2512,7 +2924,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
}
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index 44bfc1762d..57395d835b 100644
--- a/deps/v8/src/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -14,17 +14,14 @@
namespace v8 {
namespace internal {
-
#define __ ACCESS_MASM(masm)
-
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
// Load the InternalArray function from the native context.
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
}
-
// Load the built-in InternalArray function from the current context.
static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
Register result) {
@@ -32,10 +29,8 @@ static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
__ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
}
-
-void Builtins::Generate_Adaptor(MacroAssembler* masm,
- CFunctionId id,
- BuiltinExtraArguments extra_args) {
+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
+ ExitFrameType exit_frame_type) {
// ----------- S t a t e -------------
// -- x0 : number of arguments excluding receiver
// -- x1 : target
@@ -53,32 +48,19 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// ordinary functions).
__ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
- // Insert extra arguments.
- int num_extra_args = 0;
- switch (extra_args) {
- case BuiltinExtraArguments::kTarget:
- __ Push(x1);
- ++num_extra_args;
- break;
- case BuiltinExtraArguments::kNewTarget:
- __ Push(x3);
- ++num_extra_args;
- break;
- case BuiltinExtraArguments::kTargetAndNewTarget:
- __ Push(x1, x3);
- num_extra_args += 2;
- break;
- case BuiltinExtraArguments::kNone:
- break;
- }
-
// JumpToExternalReference expects x0 to contain the number of arguments
// including the receiver and the extra arguments.
+ const int num_extra_args = 3;
__ Add(x0, x0, num_extra_args + 1);
- __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
-}
+ // Insert extra arguments.
+ __ SmiTag(x0);
+ __ Push(x0, x1, x3);
+ __ SmiUntag(x0);
+ __ JumpToExternalReference(ExternalReference(address, masm->isolate()),
+ exit_frame_type == BUILTIN_EXIT);
+}
void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -107,7 +89,6 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-
void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
@@ -136,14 +117,15 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-
// static
void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
// ----------- S t a t e -------------
- // -- x0 : number of arguments
- // -- lr : return address
- // -- sp[(argc - n) * 8] : arg[n] (zero-based)
- // -- sp[(argc + 1) * 8] : receiver
+ // -- x0 : number of arguments
+ // -- x1 : function
+ // -- cp : context
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 8] : arg[n] (zero-based)
+ // -- sp[argc * 8] : receiver
// -----------------------------------
ASM_LOCATION("Builtins::Generate_MathMaxMin");
@@ -152,49 +134,48 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
: Heap::kMinusInfinityValueRootIndex;
// Load the accumulator with the default return value (either -Infinity or
- // +Infinity), with the tagged value in x1 and the double value in d1.
- __ LoadRoot(x1, root_index);
- __ Ldr(d1, FieldMemOperand(x1, HeapNumber::kValueOffset));
-
- // Remember how many slots to drop (including the receiver).
- __ Add(x4, x0, 1);
+ // +Infinity), with the tagged value in x5 and the double value in d5.
+ __ LoadRoot(x5, root_index);
+ __ Ldr(d5, FieldMemOperand(x5, HeapNumber::kValueOffset));
Label done_loop, loop;
+ __ mov(x4, x0);
__ Bind(&loop);
{
// Check if all parameters done.
- __ Subs(x0, x0, 1);
+ __ Subs(x4, x4, 1);
__ B(lt, &done_loop);
// Load the next parameter tagged value into x2.
- __ Peek(x2, Operand(x0, LSL, kPointerSizeLog2));
+ __ Peek(x2, Operand(x4, LSL, kPointerSizeLog2));
// Load the double value of the parameter into d2, maybe converting the
- // parameter to a number first using the ToNumberStub if necessary.
+ // parameter to a number first using the ToNumber builtin if necessary.
Label convert_smi, convert_number, done_convert;
__ JumpIfSmi(x2, &convert_smi);
__ JumpIfHeapNumber(x2, &convert_number);
{
- // Parameter is not a Number, use the ToNumberStub to convert it.
- FrameScope scope(masm, StackFrame::INTERNAL);
+ // Parameter is not a Number, use the ToNumber builtin to convert it.
+ FrameScope scope(masm, StackFrame::MANUAL);
__ SmiTag(x0);
__ SmiTag(x4);
- __ Push(x0, x1, x4);
+ __ EnterBuiltinFrame(cp, x1, x0);
+ __ Push(x5, x4);
__ Mov(x0, x2);
- ToNumberStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
__ Mov(x2, x0);
- __ Pop(x4, x1, x0);
+ __ Pop(x4, x5);
+ __ LeaveBuiltinFrame(cp, x1, x0);
+ __ SmiUntag(x4);
+ __ SmiUntag(x0);
{
- // Restore the double accumulator value (d1).
+ // Restore the double accumulator value (d5).
Label done_restore;
- __ SmiUntagToDouble(d1, x1, kSpeculativeUntag);
- __ JumpIfSmi(x1, &done_restore);
- __ Ldr(d1, FieldMemOperand(x1, HeapNumber::kValueOffset));
+ __ SmiUntagToDouble(d5, x5, kSpeculativeUntag);
+ __ JumpIfSmi(x5, &done_restore);
+ __ Ldr(d5, FieldMemOperand(x5, HeapNumber::kValueOffset));
__ Bind(&done_restore);
}
- __ SmiUntag(x4);
- __ SmiUntag(x0);
}
__ AssertNumber(x2);
__ JumpIfSmi(x2, &convert_smi);
@@ -209,22 +190,24 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
// We can use a single fmin/fmax for the operation itself, but we then need
// to work out which HeapNumber (or smi) the result came from.
- __ Fmov(x11, d1);
+ __ Fmov(x11, d5);
if (kind == MathMaxMinKind::kMin) {
- __ Fmin(d1, d1, d2);
+ __ Fmin(d5, d5, d2);
} else {
DCHECK(kind == MathMaxMinKind::kMax);
- __ Fmax(d1, d1, d2);
+ __ Fmax(d5, d5, d2);
}
- __ Fmov(x10, d1);
+ __ Fmov(x10, d5);
__ Cmp(x10, x11);
- __ Csel(x1, x1, x2, eq);
+ __ Csel(x5, x5, x2, eq);
__ B(&loop);
}
__ Bind(&done_loop);
- __ Mov(x0, x1);
- __ Drop(x4);
+ // Drop all slots, including the receiver.
+ __ Add(x0, x0, 1);
+ __ Drop(x0);
+ __ Mov(x0, x5);
__ Ret();
}
@@ -233,25 +216,36 @@ void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
// -- x1 : constructor function
+ // -- cp : context
// -- lr : return address
// -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
// -- sp[argc * 8] : receiver
// -----------------------------------
ASM_LOCATION("Builtins::Generate_NumberConstructor");
- // 1. Load the first argument into x0 and get rid of the rest (including the
- // receiver).
+ // 1. Load the first argument into x0.
Label no_arguments;
{
__ Cbz(x0, &no_arguments);
+ __ Mov(x2, x0); // Store argc in x2.
__ Sub(x0, x0, 1);
- __ Drop(x0);
- __ Ldr(x0, MemOperand(jssp, 2 * kPointerSize, PostIndex));
+ __ Ldr(x0, MemOperand(jssp, x0, LSL, kPointerSizeLog2));
}
// 2a. Convert first argument to number.
- ToNumberStub stub(masm->isolate());
- __ TailCallStub(&stub);
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ SmiTag(x2);
+ __ EnterBuiltinFrame(cp, x1, x2);
+ __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+ __ LeaveBuiltinFrame(cp, x1, x2);
+ __ SmiUntag(x2);
+ }
+
+ {
+ // Drop all arguments.
+ __ Drop(x2);
+ }
// 2b. No arguments, return +0 (already in x0).
__ Bind(&no_arguments);
@@ -259,13 +253,13 @@ void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
__ Ret();
}
-
// static
void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
// -- x1 : constructor function
// -- x3 : new target
+ // -- cp : context
// -- lr : return address
// -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
// -- sp[argc * 8] : receiver
@@ -275,17 +269,15 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
// 1. Make sure we operate in the context of the called function.
__ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
- // 2. Load the first argument into x2 and get rid of the rest (including the
- // receiver).
+ // 2. Load the first argument into x2.
{
Label no_arguments, done;
+ __ Move(x6, x0); // Store argc in x6.
__ Cbz(x0, &no_arguments);
__ Sub(x0, x0, 1);
- __ Drop(x0);
- __ Ldr(x2, MemOperand(jssp, 2 * kPointerSize, PostIndex));
+ __ Ldr(x2, MemOperand(jssp, x0, LSL, kPointerSizeLog2));
__ B(&done);
__ Bind(&no_arguments);
- __ Drop(1);
__ Mov(x2, Smi::FromInt(0));
__ Bind(&done);
}
@@ -296,71 +288,83 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ JumpIfSmi(x2, &done_convert);
__ JumpIfObjectType(x2, x4, x4, HEAP_NUMBER_TYPE, &done_convert, eq);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(x1, x3);
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ SmiTag(x6);
+ __ EnterBuiltinFrame(cp, x1, x6);
+ __ Push(x3);
__ Move(x0, x2);
- ToNumberStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
__ Move(x2, x0);
- __ Pop(x3, x1);
+ __ Pop(x3);
+ __ LeaveBuiltinFrame(cp, x1, x6);
+ __ SmiUntag(x6);
}
__ Bind(&done_convert);
}
// 4. Check if new target and constructor differ.
- Label new_object;
+ Label drop_frame_and_ret, new_object;
__ Cmp(x1, x3);
__ B(ne, &new_object);
// 5. Allocate a JSValue wrapper for the number.
__ AllocateJSValue(x0, x1, x2, x4, x5, &new_object);
- __ Ret();
+ __ B(&drop_frame_and_ret);
// 6. Fallback to the runtime to create new object.
__ bind(&new_object);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(x2); // first argument
+ FrameScope scope(masm, StackFrame::MANUAL);
FastNewObjectStub stub(masm->isolate());
+ __ SmiTag(x6);
+ __ EnterBuiltinFrame(cp, x1, x6);
+ __ Push(x2); // first argument
__ CallStub(&stub);
__ Pop(x2);
+ __ LeaveBuiltinFrame(cp, x1, x6);
+ __ SmiUntag(x6);
}
__ Str(x2, FieldMemOperand(x0, JSValue::kValueOffset));
- __ Ret();
-}
+ __ bind(&drop_frame_and_ret);
+ {
+ __ Drop(x6);
+ __ Drop(1);
+ __ Ret();
+ }
+}
// static
void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
// -- x1 : constructor function
+ // -- cp : context
// -- lr : return address
// -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
// -- sp[argc * 8] : receiver
// -----------------------------------
ASM_LOCATION("Builtins::Generate_StringConstructor");
- // 1. Load the first argument into x0 and get rid of the rest (including the
- // receiver).
+ // 1. Load the first argument into x0.
Label no_arguments;
{
__ Cbz(x0, &no_arguments);
+ __ Mov(x2, x0); // Store argc in x2.
__ Sub(x0, x0, 1);
- __ Drop(x0);
- __ Ldr(x0, MemOperand(jssp, 2 * kPointerSize, PostIndex));
+ __ Ldr(x0, MemOperand(jssp, x0, LSL, kPointerSizeLog2));
}
// 2a. At least one argument, return x0 if it's a string, otherwise
// dispatch to appropriate conversion.
- Label to_string, symbol_descriptive_string;
+ Label drop_frame_and_ret, to_string, symbol_descriptive_string;
{
__ JumpIfSmi(x0, &to_string);
STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
- __ CompareObjectType(x0, x1, x1, FIRST_NONSTRING_TYPE);
+ __ CompareObjectType(x0, x3, x3, FIRST_NONSTRING_TYPE);
__ B(hi, &to_string);
__ B(eq, &symbol_descriptive_string);
- __ Ret();
+ __ b(&drop_frame_and_ret);
}
// 2b. No arguments, return the empty string (and pop the receiver).
@@ -374,18 +378,32 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// 3a. Convert x0 to a string.
__ Bind(&to_string);
{
+ FrameScope scope(masm, StackFrame::MANUAL);
ToStringStub stub(masm->isolate());
- __ TailCallStub(&stub);
+ __ SmiTag(x2);
+ __ EnterBuiltinFrame(cp, x1, x2);
+ __ CallStub(&stub);
+ __ LeaveBuiltinFrame(cp, x1, x2);
+ __ SmiUntag(x2);
}
+ __ b(&drop_frame_and_ret);
// 3b. Convert symbol in x0 to a string.
__ Bind(&symbol_descriptive_string);
{
+ __ Drop(x2);
+ __ Drop(1);
__ Push(x0);
__ TailCallRuntime(Runtime::kSymbolDescriptiveString);
}
-}
+ __ bind(&drop_frame_and_ret);
+ {
+ __ Drop(x2);
+ __ Drop(1);
+ __ Ret();
+ }
+}
// static
void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
@@ -393,6 +411,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// -- x0 : number of arguments
// -- x1 : constructor function
// -- x3 : new target
+ // -- cp : context
// -- lr : return address
// -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
// -- sp[argc * 8] : receiver
@@ -402,17 +421,15 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// 1. Make sure we operate in the context of the called function.
__ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
- // 2. Load the first argument into x2 and get rid of the rest (including the
- // receiver).
+ // 2. Load the first argument into x2.
{
Label no_arguments, done;
+ __ mov(x6, x0); // Store argc in x6.
__ Cbz(x0, &no_arguments);
__ Sub(x0, x0, 1);
- __ Drop(x0);
- __ Ldr(x2, MemOperand(jssp, 2 * kPointerSize, PostIndex));
+ __ Ldr(x2, MemOperand(jssp, x0, LSL, kPointerSizeLog2));
__ B(&done);
__ Bind(&no_arguments);
- __ Drop(1);
__ LoadRoot(x2, Heap::kempty_stringRootIndex);
__ Bind(&done);
}
@@ -424,37 +441,51 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ JumpIfObjectType(x2, x4, x4, FIRST_NONSTRING_TYPE, &done_convert, lo);
__ Bind(&convert);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::MANUAL);
ToStringStub stub(masm->isolate());
- __ Push(x1, x3);
+ __ SmiTag(x6);
+ __ EnterBuiltinFrame(cp, x1, x6);
+ __ Push(x3);
__ Move(x0, x2);
__ CallStub(&stub);
__ Move(x2, x0);
- __ Pop(x3, x1);
+ __ Pop(x3);
+ __ LeaveBuiltinFrame(cp, x1, x6);
+ __ SmiUntag(x6);
}
__ Bind(&done_convert);
}
// 4. Check if new target and constructor differ.
- Label new_object;
+ Label drop_frame_and_ret, new_object;
__ Cmp(x1, x3);
__ B(ne, &new_object);
// 5. Allocate a JSValue wrapper for the string.
__ AllocateJSValue(x0, x1, x2, x4, x5, &new_object);
- __ Ret();
+ __ B(&drop_frame_and_ret);
// 6. Fallback to the runtime to create new object.
__ bind(&new_object);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(x2); // first argument
+ FrameScope scope(masm, StackFrame::MANUAL);
FastNewObjectStub stub(masm->isolate());
+ __ SmiTag(x6);
+ __ EnterBuiltinFrame(cp, x1, x6);
+ __ Push(x2); // first argument
__ CallStub(&stub);
__ Pop(x2);
+ __ LeaveBuiltinFrame(cp, x1, x6);
+ __ SmiUntag(x6);
}
__ Str(x2, FieldMemOperand(x0, JSValue::kValueOffset));
- __ Ret();
+
+ __ bind(&drop_frame_and_ret);
+ {
+ __ Drop(x6);
+ __ Drop(1);
+ __ Ret();
+ }
}
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
@@ -490,7 +521,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Br(x2);
}
-
void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However, not
@@ -507,7 +537,6 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
GenerateTailCallToSharedCode(masm);
}
-
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool create_implicit_receiver,
@@ -605,16 +634,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// x0: number of arguments
// x1: constructor function
// x3: new target
- if (is_api_function) {
- __ Ldr(cp, FieldMemOperand(constructor, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- __ Call(code, RelocInfo::CODE_TARGET);
- } else {
- ParameterCount actual(argc);
- __ InvokeFunction(constructor, new_target, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
- }
+ ParameterCount actual(argc);
+ __ InvokeFunction(constructor, new_target, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
// Store offset of return address for deoptimizer.
if (create_implicit_receiver && !is_api_function) {
@@ -683,37 +705,179 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Ret();
}
-
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true, false);
}
-
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, true, false, false);
}
-
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, false, false);
}
-
void Builtins::Generate_JSBuiltinsConstructStubForDerived(
MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, false, true);
}
-
void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(x1);
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : the value to pass to the generator
+ // -- x1 : the JSGeneratorObject to resume
+ // -- x2 : the resume mode (tagged)
+ // -- lr : return address
+ // -----------------------------------
+ __ AssertGeneratorObject(x1);
+
+ // Store input value into generator object.
+ __ Str(x0, FieldMemOperand(x1, JSGeneratorObject::kInputOrDebugPosOffset));
+ __ RecordWriteField(x1, JSGeneratorObject::kInputOrDebugPosOffset, x0, x3,
+ kLRHasNotBeenSaved, kDontSaveFPRegs);
+
+ // Store resume mode into generator object.
+ __ Str(x2, FieldMemOperand(x1, JSGeneratorObject::kResumeModeOffset));
+
+ // Load suspended function and context.
+ __ Ldr(cp, FieldMemOperand(x1, JSGeneratorObject::kContextOffset));
+ __ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
+
+ // Flood function if we are stepping.
+ Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+ Label stepping_prepared;
+ ExternalReference last_step_action =
+ ExternalReference::debug_last_step_action_address(masm->isolate());
+ STATIC_ASSERT(StepFrame > StepIn);
+ __ Mov(x10, Operand(last_step_action));
+ __ Ldrsb(x10, MemOperand(x10));
+ __ CompareAndBranch(x10, Operand(StepIn), ge, &prepare_step_in_if_stepping);
+
+ // Flood function if we need to continue stepping in the suspended generator.
+ ExternalReference debug_suspended_generator =
+ ExternalReference::debug_suspended_generator_address(masm->isolate());
+ __ Mov(x10, Operand(debug_suspended_generator));
+ __ Ldr(x10, MemOperand(x10));
+ __ CompareAndBranch(x10, Operand(x1), eq,
+ &prepare_step_in_suspended_generator);
+ __ Bind(&stepping_prepared);
+
+ // Push receiver.
+ __ Ldr(x5, FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset));
+ __ Push(x5);
-enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
+ // ----------- S t a t e -------------
+ // -- x1 : the JSGeneratorObject to resume
+ // -- x2 : the resume mode (tagged)
+ // -- x4 : generator function
+ // -- cp : generator context
+ // -- lr : return address
+ // -- jssp[0] : generator receiver
+ // -----------------------------------
+ // Push holes for arguments to generator function. Since the parser forced
+ // context allocation for any variables in generators, the actual argument
+ // values have already been copied into the context and these dummy values
+ // will never be used.
+ __ Ldr(x10, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(w10,
+ FieldMemOperand(x10, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ LoadRoot(x11, Heap::kTheHoleValueRootIndex);
+ __ PushMultipleTimes(x11, w10);
+
+ // Dispatch on the kind of generator object.
+ Label old_generator;
+ __ Ldr(x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
+ __ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
+ __ B(ne, &old_generator);
+
+ // New-style (ignition/turbofan) generator object
+ {
+ __ Ldr(x0, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(w0, FieldMemOperand(
+ x0, SharedFunctionInfo::kFormalParameterCountOffset));
+ // We abuse new.target both to indicate that this is a resume call and to
+ // pass in the generator object. In ordinary calls, new.target is always
+ // undefined because generator functions are non-constructable.
+ __ Move(x3, x1);
+ __ Move(x1, x4);
+ __ Ldr(x5, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
+ __ Jump(x5);
+ }
+
+ // Old-style (full-codegen) generator object
+ __ bind(&old_generator);
+ {
+ // Enter a new JavaScript frame, and initialize its slots as they were when
+ // the generator was suspended.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ Push(lr, fp);
+ __ Move(fp, jssp);
+ __ Push(cp, x4);
+
+ // Restore the operand stack.
+ __ Ldr(x0, FieldMemOperand(x1, JSGeneratorObject::kOperandStackOffset));
+ __ Ldr(w3, UntagSmiFieldMemOperand(x0, FixedArray::kLengthOffset));
+ __ Add(x0, x0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Add(x3, x0, Operand(x3, LSL, kPointerSizeLog2));
+ {
+ Label done_loop, loop;
+ __ Bind(&loop);
+ __ Cmp(x0, x3);
+ __ B(eq, &done_loop);
+ __ Ldr(x10, MemOperand(x0, kPointerSize, PostIndex));
+ __ Push(x10);
+ __ B(&loop);
+ __ Bind(&done_loop);
+ }
+
+ // Reset operand stack so we don't leak.
+ __ LoadRoot(x10, Heap::kEmptyFixedArrayRootIndex);
+ __ Str(x10, FieldMemOperand(x1, JSGeneratorObject::kOperandStackOffset));
+
+ // Resume the generator function at the continuation.
+ __ Ldr(x10, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x10, FieldMemOperand(x10, SharedFunctionInfo::kCodeOffset));
+ __ Add(x10, x10, Code::kHeaderSize - kHeapObjectTag);
+ __ Ldrsw(x11, UntagSmiFieldMemOperand(
+ x1, JSGeneratorObject::kContinuationOffset));
+ __ Add(x10, x10, x11);
+ __ Mov(x12, Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
+ __ Str(x12, FieldMemOperand(x1, JSGeneratorObject::kContinuationOffset));
+ __ Move(x0, x1); // Continuation expects generator object in x0.
+ __ Br(x10);
+ }
+
+ __ Bind(&prepare_step_in_if_stepping);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(x1, x2, x4);
+ __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ Pop(x2, x1);
+ __ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
+ }
+ __ B(&stepping_prepared);
+
+ __ Bind(&prepare_step_in_suspended_generator);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(x1, x2);
+ __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+ __ Pop(x2, x1);
+ __ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
+ }
+ __ B(&stepping_prepared);
+}
+
+enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
// Clobbers x10, x15; preserves all other registers.
static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
@@ -744,7 +908,6 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
__ Bind(&enough_stack_space);
}
-
// Input:
// x0: new.target.
// x1: function.
@@ -794,7 +957,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Bind(&loop);
__ Ldr(x11, MemOperand(argv, kPointerSize, PostIndex));
__ Ldr(x12, MemOperand(x11)); // Dereference the handle.
- __ Push(x12); // Push the argument.
+ __ Push(x12); // Push the argument.
__ Bind(&entry);
__ Cmp(scratch, argv);
__ B(ne, &loop);
@@ -834,16 +997,29 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Ret();
}
-
void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, false);
}
-
void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
+ Register args_count = scratch;
+
+ // Get the arguments + receiver count.
+ __ ldr(args_count,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ Ldr(args_count.W(),
+ FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
+
+ // Leave the frame (also dropping the register file).
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+
+ // Drop receiver + arguments.
+ __ Drop(args_count, 1);
+}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
@@ -861,6 +1037,8 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
// The function builds an interpreter frame. See InterpreterFrameConstants in
// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
@@ -868,8 +1046,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(lr, fp, cp, x1);
__ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
- // Get the bytecode array from the function object and load the pointer to the
- // first entry into kInterpreterBytecodeRegister.
+ // Get the bytecode array from the function object (or from the DebugInfo if
+ // it is present) and load it into kInterpreterBytecodeArrayRegister.
__ Ldr(x0, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
Register debug_info = kInterpreterBytecodeArrayRegister;
Label load_debug_bytecode_array, bytecode_array_loaded;
@@ -881,8 +1059,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
__ Bind(&bytecode_array_loaded);
+ // Check whether we should continue to use the interpreter.
+ Label switch_to_different_code_kind;
+ __ Ldr(x0, FieldMemOperand(x0, SharedFunctionInfo::kCodeOffset));
+ __ Cmp(x0, Operand(masm->CodeObject())); // Self-reference to this code.
+ __ B(ne, &switch_to_different_code_kind);
+
+ // Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
- // Check function data field is actually a BytecodeArray object.
__ AssertNotSmi(kInterpreterBytecodeArrayRegister,
kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, x0, x0,
@@ -890,8 +1074,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
- // Push new.target, bytecode array and zero for bytecode array offset.
- __ Mov(x0, Operand(0));
+ // Load the initial bytecode offset.
+ __ Mov(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+ // Push new.target, bytecode array and Smi tagged bytecode array offset.
+ __ SmiTag(x0, kInterpreterBytecodeOffsetRegister);
__ Push(x3, kInterpreterBytecodeArrayRegister, x0);
// Allocate the local and temporary register file on the stack.
@@ -921,18 +1109,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Bind(&loop_header);
}
- // TODO(rmcilroy): List of things not currently dealt with here but done in
- // fullcodegen's prologue:
- // - Call ProfileEntryHookStub when isolate has a function_entry_hook.
- // - Code aging of the BytecodeArray object.
-
- // Load accumulator, register file, bytecode offset, dispatch table into
- // registers.
+ // Load accumulator and dispatch table into registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
- __ Add(kInterpreterRegisterFileRegister, fp,
- Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
- __ Mov(kInterpreterBytecodeOffsetRegister,
- Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ Mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
@@ -942,60 +1120,154 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister));
__ Mov(x1, Operand(x1, LSL, kPointerSizeLog2));
__ Ldr(ip0, MemOperand(kInterpreterDispatchTableRegister, x1));
- // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
- // and header removal.
- __ Add(ip0, ip0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(ip0);
+ masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
- // Even though the first bytecode handler was called, we will never return.
- __ Abort(kUnexpectedReturnFromBytecodeHandler);
+ // The return value is in x0.
+ LeaveInterpreterFrame(masm, x2);
+ __ Ret();
// Load debug copy of the bytecode array.
__ Bind(&load_debug_bytecode_array);
__ Ldr(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex));
+ FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
__ B(&bytecode_array_loaded);
+
+ // If the shared code is no longer this entry trampoline, then the underlying
+ // function has been switched to a different kind of code and we heal the
+ // closure by switching the code entry field over to the new code as well.
+ __ bind(&switch_to_different_code_kind);
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ __ Ldr(x7, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x7, FieldMemOperand(x7, SharedFunctionInfo::kCodeOffset));
+ __ Add(x7, x7, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Str(x7, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(x1, x7, x5);
+ __ Jump(x7);
}
+void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
+ // Save the function and context for call to CompileBaseline.
+ __ ldr(x1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ ldr(kContextRegister,
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
-void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
- // TODO(rmcilroy): List of things not currently dealt with here but done in
- // fullcodegen's EmitReturnSequence.
- // - Supporting FLAG_trace for Runtime::TraceExit.
- // - Support profiler (specifically decrementing profiling_counter
- // appropriately and calling out to HandleInterrupts if necessary).
+ // Leave the frame before recompiling for baseline so that we don't count as
+ // an activation on the stack.
+ LeaveInterpreterFrame(masm, x2);
- // The return value is in accumulator, which is already in x0.
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ // Push return value.
+ __ push(x0);
- // Leave the frame (also dropping the register file).
- __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ // Push function as argument and compile for baseline.
+ __ push(x1);
+ __ CallRuntime(Runtime::kCompileBaseline);
- // Drop receiver + arguments and return.
- __ Ldr(w1, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kParameterSizeOffset));
- __ Drop(x1, 1);
+ // Restore return value.
+ __ pop(x0);
+ }
__ Ret();
}
+// static
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+ MacroAssembler* masm, TailCallMode tail_call_mode,
+ CallableType function_type) {
+ // ----------- S t a t e -------------
+ // -- x0 : the number of arguments (not including the receiver)
+ // -- x2 : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -- x1 : the target to call (can be any Object).
+ // -----------------------------------
+
+ // Find the address of the last argument.
+ __ add(x3, x0, Operand(1)); // Add one for receiver.
+ __ lsl(x3, x3, kPointerSizeLog2);
+ __ sub(x4, x2, x3);
+
+ // Push the arguments.
+ Label loop_header, loop_check;
+ __ Mov(x5, jssp);
+ __ Claim(x3, 1);
+ __ B(&loop_check);
+ __ Bind(&loop_header);
+ // TODO(rmcilroy): Push two at a time once we ensure we keep stack aligned.
+ __ Ldr(x3, MemOperand(x2, -kPointerSize, PostIndex));
+ __ Str(x3, MemOperand(x5, -kPointerSize, PreIndex));
+ __ Bind(&loop_check);
+ __ Cmp(x2, x4);
+ __ B(gt, &loop_header);
+
+ // Call the target.
+ if (function_type == CallableType::kJSFunction) {
+ __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
+ tail_call_mode),
+ RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK_EQ(function_type, CallableType::kAny);
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ tail_call_mode),
+ RelocInfo::CODE_TARGET);
+ }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : argument count (not including receiver)
+ // -- x3 : new target
+ // -- x1 : constructor to call
+ // -- x2 : address of the first argument
+ // -----------------------------------
+
+ // Find the address of the last argument.
+ __ add(x5, x0, Operand(1)); // Add one for receiver (to be constructed).
+ __ lsl(x5, x5, kPointerSizeLog2);
+
+ // Set stack pointer and where to stop.
+ __ Mov(x6, jssp);
+ __ Claim(x5, 1);
+ __ sub(x4, x6, x5);
+
+ // Push a slot for the receiver.
+ __ Str(xzr, MemOperand(x6, -kPointerSize, PreIndex));
+
+ Label loop_header, loop_check;
+ // Push the arguments.
+ __ B(&loop_check);
+ __ Bind(&loop_header);
+ // TODO(rmcilroy): Push two at a time once we ensure we keep stack aligned.
+ __ Ldr(x5, MemOperand(x2, -kPointerSize, PostIndex));
+ __ Str(x5, MemOperand(x6, -kPointerSize, PreIndex));
+ __ Bind(&loop_check);
+ __ Cmp(x6, x4);
+ __ B(gt, &loop_header);
-static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
- // Initialize register file register and dispatch table register.
- __ Add(kInterpreterRegisterFileRegister, fp,
- Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+ // Call the constructor with x0, x1, and x3 unmodified.
+ __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ // Set the return address to the correct point in the interpreter entry
+ // trampoline.
+ Smi* interpreter_entry_return_pc_offset(
+ masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+ __ LoadObject(x1, masm->isolate()->builtins()->InterpreterEntryTrampoline());
+ __ Add(lr, x1, Operand(interpreter_entry_return_pc_offset->value() +
+ Code::kHeaderSize - kHeapObjectTag));
+
+ // Initialize the dispatch table register.
__ Mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
- // Get the context from the frame.
- __ Ldr(kContextRegister,
- MemOperand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kContextFromRegisterPointer));
-
// Get the bytecode array pointer from the frame.
- __ Ldr(
- kInterpreterBytecodeArrayRegister,
- MemOperand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
+ __ Ldr(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -1008,9 +1280,7 @@ static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
// Get the target bytecode offset from the frame.
__ Ldr(kInterpreterBytecodeOffsetRegister,
- MemOperand(
- kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
@@ -1018,74 +1288,218 @@ static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister));
__ Mov(x1, Operand(x1, LSL, kPointerSizeLog2));
__ Ldr(ip0, MemOperand(kInterpreterDispatchTableRegister, x1));
- __ Add(ip0, ip0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(ip0);
}
-
-static void Generate_InterpreterNotifyDeoptimizedHelper(
- MacroAssembler* masm, Deoptimizer::BailoutType type) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Pass the deoptimization type to the runtime system.
- __ Mov(x1, Operand(Smi::FromInt(static_cast<int>(type))));
- __ Push(x1);
- __ CallRuntime(Runtime::kNotifyDeoptimized);
- // Tear down internal frame.
- }
-
- // Drop state (we don't use these for interpreter deopts) and and pop the
- // accumulator value into the accumulator register.
- __ Drop(1);
- __ Pop(kInterpreterAccumulatorRegister);
-
- // Enter the bytecode dispatch.
- Generate_EnterBytecodeDispatch(masm);
-}
-
-
-void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-
-void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
-}
-
-
-void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
- // Set the address of the interpreter entry trampoline as a return address.
- // This simulates the initial call to bytecode handlers in interpreter entry
- // trampoline. The return will never actually be taken, but our stack walker
- // uses this address to determine whether a frame is interpreted.
- __ LoadObject(lr, masm->isolate()->builtins()->InterpreterEntryTrampoline());
-
- Generate_EnterBytecodeDispatch(masm);
-}
-
-
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : argument count (preserved for callee)
+ // -- x3 : new target (preserved for callee)
+ // -- x1 : target function (preserved for callee)
+ // -----------------------------------
+ // First lookup code, maybe we don't need to compile!
+ Label gotta_call_runtime;
+ Label maybe_call_runtime;
+ Label try_shared;
+ Label loop_top, loop_bottom;
+
+ Register closure = x1;
+ Register map = x13;
+ Register index = x2;
+ __ Ldr(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(map,
+ FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+ __ Ldrsw(index, UntagSmiFieldMemOperand(map, FixedArray::kLengthOffset));
+ __ Cmp(index, Operand(2));
+ __ B(lt, &gotta_call_runtime);
+
+ // Find literals.
+ // x3 : native context
+ // x2 : length / index
+ // x13 : optimized code map
+ // stack[0] : new target
+ // stack[4] : closure
+ Register native_context = x4;
+ __ Ldr(native_context, NativeContextMemOperand());
+
+ __ Bind(&loop_top);
+ Register temp = x5;
+ Register array_pointer = x6;
+
+ // Does the native context match?
+ __ Add(array_pointer, map, Operand(index, LSL, kPointerSizeLog2));
+ __ Ldr(temp, FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousContext));
+ __ Ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+ __ Cmp(temp, native_context);
+ __ B(ne, &loop_bottom);
+ // OSR id set to none?
+ __ Ldr(temp, FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+ const int bailout_id = BailoutId::None().ToInt();
+ __ Cmp(temp, Operand(Smi::FromInt(bailout_id)));
+ __ B(ne, &loop_bottom);
+ // Literals available?
+ __ Ldr(temp, FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousLiterals));
+ __ Ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+ __ JumpIfSmi(temp, &gotta_call_runtime);
+
+ // Save the literals in the closure.
+ __ Str(temp, FieldMemOperand(closure, JSFunction::kLiteralsOffset));
+ __ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, x7,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ // Code available?
+ Register entry = x7;
+ __ Ldr(entry,
+ FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousCachedCode));
+ __ Ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(entry, &maybe_call_runtime);
+
+ // Found literals and code. Get them into the closure and return.
+ __ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ Label install_optimized_code_and_tailcall;
+ __ Bind(&install_optimized_code_and_tailcall);
+ __ Str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(closure, entry, x5);
+
+ // Link the closure into the optimized function list.
+ // x7 : code entry
+ // x4 : native context
+ // x1 : closure
+ __ Ldr(x8,
+ ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ __ Str(x8, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
+ __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, x8, x13,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ const int function_list_offset =
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+ __ Str(closure,
+ ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ __ Mov(x5, closure);
+ __ RecordWriteContextSlot(native_context, function_list_offset, x5, x13,
+ kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ Jump(entry);
+
+ __ Bind(&loop_bottom);
+ __ Sub(index, index, Operand(SharedFunctionInfo::kEntryLength));
+ __ Cmp(index, Operand(1));
+ __ B(gt, &loop_top);
+
+ // We found neither literals nor code.
+ __ B(&gotta_call_runtime);
+
+ __ Bind(&maybe_call_runtime);
+
+ // Last possibility. Check the context free optimized code map entry.
+ __ Ldr(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
+ SharedFunctionInfo::kSharedCodeIndex));
+ __ Ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(entry, &try_shared);
+
+ // Store code entry in the closure.
+ __ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ B(&install_optimized_code_and_tailcall);
+
+ __ Bind(&try_shared);
+ // Is the full code valid?
+ __ Ldr(entry,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+ __ Ldr(x5, FieldMemOperand(entry, Code::kFlagsOffset));
+ __ and_(x5, x5, Operand(Code::KindField::kMask));
+ __ Mov(x5, Operand(x5, LSR, Code::KindField::kShift));
+ __ Cmp(x5, Operand(Code::BUILTIN));
+ __ B(eq, &gotta_call_runtime);
+ // Yes, install the full code.
+ __ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(closure, entry, x5);
+ __ Jump(entry);
+
+ __ Bind(&gotta_call_runtime);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
+}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm,
Runtime::kCompileOptimized_NotConcurrent);
}
-
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
+void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : argument count (preserved for callee)
+ // -- x1 : new target (preserved for callee)
+ // -- x3 : target function (preserved for callee)
+ // -----------------------------------
+ Label failed;
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Preserve argument count for later compare.
+ __ Move(x4, x0);
+ // Push a copy of the target function and the new target.
+ __ SmiTag(x0);
+ // Push another copy as a parameter to the runtime call.
+ __ Push(x0, x1, x3, x1);
+
+ // Copy arguments from caller (stdlib, foreign, heap).
+ Label args_done;
+ for (int j = 0; j < 4; ++j) {
+ Label over;
+ if (j < 3) {
+ __ cmp(x4, Operand(j));
+ __ B(ne, &over);
+ }
+ for (int i = j - 1; i >= 0; --i) {
+ __ ldr(x4, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
+ i * kPointerSize));
+ __ push(x4);
+ }
+ for (int i = 0; i < 3 - j; ++i) {
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ }
+ if (j < 3) {
+ __ jmp(&args_done);
+ __ bind(&over);
+ }
+ }
+ __ bind(&args_done);
+
+ // Call runtime, on success unwind frame, and parent frame.
+ __ CallRuntime(Runtime::kInstantiateAsmJs, 4);
+ // A smi 0 is returned on failure, an object on success.
+ __ JumpIfSmi(x0, &failed);
+
+ __ Drop(2);
+ __ pop(x4);
+ __ SmiUntag(x4);
+ scope.GenerateLeaveFrame();
+
+ __ add(x4, x4, Operand(1));
+ __ Drop(x4);
+ __ Ret();
+
+ __ bind(&failed);
+ // Restore target function and new target.
+ __ Pop(x3, x1, x0);
+ __ SmiUntag(x0);
+ }
+ // On failure, tail call back to regular js.
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+}
static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// For now, we are relying on the fact that make_code_young doesn't do any
@@ -1116,19 +1530,18 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
__ Br(x0);
}
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
-void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-} \
-void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-}
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+ void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+ } \
+ void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+ }
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
-
void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
// that make_code_young doesn't do any garbage collection which allows us to
@@ -1148,8 +1561,8 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
__ Push(x0, x1, x3, fp, lr);
__ Mov(x1, ExternalReference::isolate_address(masm->isolate()));
__ CallCFunction(
- ExternalReference::get_mark_code_as_executed_function(
- masm->isolate()), 2);
+ ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+ 2);
__ Pop(lr, fp, x3, x1, x0);
// Perform prologue operations usually performed by the young code stub.
@@ -1161,17 +1574,14 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
__ Br(x0);
}
-
void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
GenerateMakeCodeYoungAgainCommon(masm);
}
-
void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
-
static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
SaveFPRegsMode save_doubles) {
{
@@ -1197,17 +1607,14 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
__ Br(lr);
}
-
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
}
-
void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
}
-
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
@@ -1225,15 +1632,19 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Switch on the state.
Label with_tos_register, unknown_state;
- __ CompareAndBranch(
- state, FullCodeGenerator::NO_REGISTERS, ne, &with_tos_register);
+ __ CompareAndBranch(state,
+ static_cast<int>(Deoptimizer::BailoutState::NO_REGISTERS),
+ ne, &with_tos_register);
__ Drop(1); // Remove state.
__ Ret();
__ Bind(&with_tos_register);
// Reload TOS register.
+ DCHECK_EQ(kInterpreterAccumulatorRegister.code(), x0.code());
__ Peek(x0, kPointerSize);
- __ CompareAndBranch(state, FullCodeGenerator::TOS_REG, ne, &unknown_state);
+ __ CompareAndBranch(state,
+ static_cast<int>(Deoptimizer::BailoutState::TOS_REGISTER),
+ ne, &unknown_state);
__ Drop(2); // Remove state and TOS.
__ Ret();
@@ -1241,22 +1652,18 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
__ Abort(kInvalidFullCodegenState);
}
-
void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
-
void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
-
void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
}
-
static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
Register function_template_info,
Register scratch0, Register scratch1,
@@ -1320,7 +1727,6 @@ static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
__ Bind(&receiver_check_passed);
}
-
void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : number of arguments excluding receiver
@@ -1356,10 +1762,16 @@ void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kThrowIllegalInvocation);
}
-
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
+ bool has_handler_frame) {
// Lookup the function in the JavaScript frame.
- __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ if (has_handler_frame) {
+ __ Ldr(x0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(x0, MemOperand(x0, JavaScriptFrameConstants::kFunctionOffset));
+ } else {
+ __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
@@ -1367,21 +1779,28 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
- // If the code object is null, just return to the unoptimized code.
+ // If the code object is null, just return to the caller.
Label skip;
__ CompareAndBranch(x0, Smi::FromInt(0), ne, &skip);
__ Ret();
__ Bind(&skip);
+ // Drop any potential handler frame that is be sitting on top of the actual
+ // JavaScript frame. This is the case then OSR is triggered from bytecode.
+ if (has_handler_frame) {
+ __ LeaveFrame(StackFrame::STUB);
+ }
+
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ Ldr(x1, MemOperand(x0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
- __ Ldrsw(w1, UntagSmiFieldMemOperand(x1, FixedArray::OffsetOfElementAt(
- DeoptimizationInputData::kOsrPcOffsetIndex)));
+ __ Ldrsw(w1, UntagSmiFieldMemOperand(
+ x1, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex)));
// Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
@@ -1392,11 +1811,21 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ Ret();
}
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+ Generate_OnStackReplacementHelper(masm, false);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+ Generate_OnStackReplacementHelper(masm, true);
+}
// static
void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
int field_index) {
// ----------- S t a t e -------------
+ // -- x0 : number of arguments
+ // -- x1 : function
+ // -- cp : context
// -- lr : return address
// -- jssp[0] : receiver
// -----------------------------------
@@ -1407,7 +1836,7 @@ void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
{
__ Pop(x0);
__ JumpIfSmi(x0, &receiver_not_date);
- __ JumpIfNotObjectType(x0, x1, x2, JS_DATE_TYPE, &receiver_not_date);
+ __ JumpIfNotObjectType(x0, x2, x3, JS_DATE_TYPE, &receiver_not_date);
}
// 2. Load the specified date field, falling back to the runtime as necessary.
@@ -1435,31 +1864,13 @@ void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
// 3. Raise a TypeError if the receiver is not a date.
__ Bind(&receiver_not_date);
- __ TailCallRuntime(Runtime::kThrowNotDateError);
-}
-
-// static
-void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x0 : argc
- // -- jssp[0] : first argument (left-hand side)
- // -- jssp[8] : receiver (right-hand side)
- // -----------------------------------
- ASM_LOCATION("Builtins::Generate_FunctionHasInstance");
-
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Ldr(InstanceOfDescriptor::LeftRegister(),
- MemOperand(fp, 2 * kPointerSize)); // Load left-hand side.
- __ Ldr(InstanceOfDescriptor::RightRegister(),
- MemOperand(fp, 3 * kPointerSize)); // Load right-hand side.
- InstanceOfStub stub(masm->isolate(), true);
- __ CallStub(&stub);
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ Push(x0);
+ __ Mov(x0, Smi::FromInt(0));
+ __ EnterBuiltinFrame(cp, x1, x0);
+ __ CallRuntime(Runtime::kThrowNotDateError);
}
-
- // Pop the argument and the receiver.
- __ Drop(2);
- __ Ret();
}
// static
@@ -1549,7 +1960,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
}
}
-
// static
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
Register argc = x0;
@@ -1596,7 +2006,6 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
-
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argc
@@ -1667,7 +2076,6 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
}
}
-
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argc
@@ -1754,7 +2162,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
}
-
static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
Label* stack_overflow) {
// ----------- S t a t e -------------
@@ -1776,7 +2183,6 @@ static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
__ B(le, stack_overflow);
}
-
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ SmiTag(x10, x0);
__ Mov(x11, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
@@ -1786,7 +2192,6 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
}
-
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : result being passed through
@@ -1801,7 +2206,6 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
__ Drop(1);
}
-
// static
void Builtins::Generate_Apply(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2102,8 +2506,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ SmiTag(x0);
__ Push(x0, x1);
__ Mov(x0, x3);
+ __ Push(cp);
ToObjectStub stub(masm->isolate());
__ CallStub(&stub);
+ __ Pop(cp);
__ Mov(x3, x0);
__ Pop(x1, x0);
__ SmiUntag(x0);
@@ -2142,7 +2548,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
}
}
-
namespace {
void Generate_PushBoundArguments(MacroAssembler* masm) {
@@ -2220,7 +2625,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
} // namespace
-
// static
void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
TailCallMode tail_call_mode) {
@@ -2250,7 +2654,6 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
__ Br(x12);
}
-
// static
void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
TailCallMode tail_call_mode) {
@@ -2310,7 +2713,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
-
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2332,7 +2734,6 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
__ Br(x4);
}
-
// static
void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2363,7 +2764,6 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ Br(x12);
}
-
// static
void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2383,7 +2783,6 @@ void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
}
-
// static
void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2434,80 +2833,66 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-
// static
-void Builtins::Generate_InterpreterPushArgsAndCallImpl(
- MacroAssembler* masm, TailCallMode tail_call_mode) {
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+ ASM_LOCATION("Builtins::Generate_AllocateInNewSpace");
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
- // -- x2 : the address of the first argument to be pushed. Subsequent
- // arguments should be consecutive above this, in the same order as
- // they are to be pushed onto the stack.
- // -- x1 : the target to call (can be any Object).
+ // -- x1 : requested object size (untagged)
+ // -- lr : return address
// -----------------------------------
-
- // Find the address of the last argument.
- __ add(x3, x0, Operand(1)); // Add one for receiver.
- __ lsl(x3, x3, kPointerSizeLog2);
- __ sub(x4, x2, x3);
-
- // Push the arguments.
- Label loop_header, loop_check;
- __ Mov(x5, jssp);
- __ Claim(x3, 1);
- __ B(&loop_check);
- __ Bind(&loop_header);
- // TODO(rmcilroy): Push two at a time once we ensure we keep stack aligned.
- __ Ldr(x3, MemOperand(x2, -kPointerSize, PostIndex));
- __ Str(x3, MemOperand(x5, -kPointerSize, PreIndex));
- __ Bind(&loop_check);
- __ Cmp(x2, x4);
- __ B(gt, &loop_header);
-
- // Call the target.
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- tail_call_mode),
- RelocInfo::CODE_TARGET);
+ __ SmiTag(x1);
+ __ Push(x1);
+ __ Move(cp, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
-
// static
-void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+ ASM_LOCATION("Builtins::Generate_AllocateInOldSpace");
// ----------- S t a t e -------------
- // -- x0 : argument count (not including receiver)
- // -- x3 : new target
- // -- x1 : constructor to call
- // -- x2 : address of the first argument
+ // -- x1 : requested object size (untagged)
+ // -- lr : return address
// -----------------------------------
+ __ SmiTag(x1);
+ __ Move(x2, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+ __ Push(x1, x2);
+ __ Move(cp, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
+}
- // Find the address of the last argument.
- __ add(x5, x0, Operand(1)); // Add one for receiver (to be constructed).
- __ lsl(x5, x5, kPointerSizeLog2);
-
- // Set stack pointer and where to stop.
- __ Mov(x6, jssp);
- __ Claim(x5, 1);
- __ sub(x4, x6, x5);
+// static
+void Builtins::Generate_Abort(MacroAssembler* masm) {
+ ASM_LOCATION("Builtins::Generate_Abort");
+ // ----------- S t a t e -------------
+ // -- x1 : message_id as Smi
+ // -- lr : return address
+ // -----------------------------------
+ MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
+ __ Push(x1);
+ __ Move(cp, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAbort);
+}
- // Push a slot for the receiver.
- __ Str(xzr, MemOperand(x6, -kPointerSize, PreIndex));
+// static
+void Builtins::Generate_ToNumber(MacroAssembler* masm) {
+ // The ToNumber stub takes one argument in x0.
+ Label not_smi;
+ __ JumpIfNotSmi(x0, &not_smi);
+ __ Ret();
+ __ Bind(&not_smi);
- Label loop_header, loop_check;
- // Push the arguments.
- __ B(&loop_check);
- __ Bind(&loop_header);
- // TODO(rmcilroy): Push two at a time once we ensure we keep stack aligned.
- __ Ldr(x5, MemOperand(x2, -kPointerSize, PostIndex));
- __ Str(x5, MemOperand(x6, -kPointerSize, PreIndex));
- __ Bind(&loop_check);
- __ Cmp(x6, x4);
- __ B(gt, &loop_header);
+ Label not_heap_number;
+ __ CompareObjectType(x0, x1, x1, HEAP_NUMBER_TYPE);
+ // x0: receiver
+ // x1: receiver instance type
+ __ B(ne, &not_heap_number);
+ __ Ret();
+ __ Bind(&not_heap_number);
- // Call the constructor with x0, x1, and x3 unmodified.
- __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
+ RelocInfo::CODE_TARGET);
}
-
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline");
// ----------- S t a t e -------------
@@ -2517,7 +2902,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- x3 : new target (passed through to callee)
// -----------------------------------
- Register argc_actual = x0; // Excluding the receiver.
+ Register argc_actual = x0; // Excluding the receiver.
Register argc_expected = x2; // Excluding the receiver.
Register function = x1;
Register code_entry = x10;
@@ -2558,9 +2943,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label copy_2_by_2;
__ Bind(&copy_2_by_2);
__ Ldp(scratch1, scratch2,
- MemOperand(copy_start, - 2 * kPointerSize, PreIndex));
+ MemOperand(copy_start, -2 * kPointerSize, PreIndex));
__ Stp(scratch1, scratch2,
- MemOperand(copy_to, - 2 * kPointerSize, PreIndex));
+ MemOperand(copy_to, -2 * kPointerSize, PreIndex));
__ Cmp(copy_start, copy_end);
__ B(hi, &copy_2_by_2);
@@ -2588,7 +2973,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Add(copy_from, fp, 3 * kPointerSize);
__ Add(copy_from, copy_from, argc_actual);
__ Mov(copy_to, jssp);
- __ Sub(copy_end, copy_to, 1 * kPointerSize); // Adjust for the receiver.
+ __ Sub(copy_end, copy_to, 1 * kPointerSize); // Adjust for the receiver.
__ Sub(copy_end, copy_end, argc_actual);
// Claim space for the arguments, the receiver, and one extra slot.
@@ -2601,9 +2986,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label copy_2_by_2;
__ Bind(&copy_2_by_2);
__ Ldp(scratch1, scratch2,
- MemOperand(copy_from, - 2 * kPointerSize, PreIndex));
+ MemOperand(copy_from, -2 * kPointerSize, PreIndex));
__ Stp(scratch1, scratch2,
- MemOperand(copy_to, - 2 * kPointerSize, PreIndex));
+ MemOperand(copy_to, -2 * kPointerSize, PreIndex));
__ Cmp(copy_to, copy_end);
__ B(hi, &copy_2_by_2);
@@ -2616,7 +3001,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label fill;
__ Bind(&fill);
__ Stp(scratch1, scratch1,
- MemOperand(copy_to, - 2 * kPointerSize, PreIndex));
+ MemOperand(copy_to, -2 * kPointerSize, PreIndex));
__ Cmp(copy_to, copy_end);
__ B(hi, &fill);
@@ -2653,7 +3038,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
}
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc
new file mode 100644
index 0000000000..aed10b1288
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-api.cc
@@ -0,0 +1,291 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+
+#include "src/api-arguments.h"
+#include "src/api-natives.h"
+#include "src/builtins/builtins-utils.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+// Returns the holder JSObject if the function can legally be called with this
+// receiver. Returns nullptr if the call is illegal.
+// TODO(dcarney): CallOptimization duplicates this logic, merge.
+JSObject* GetCompatibleReceiver(Isolate* isolate, FunctionTemplateInfo* info,
+ JSObject* receiver) {
+ Object* recv_type = info->signature();
+ // No signature, return holder.
+ if (!recv_type->IsFunctionTemplateInfo()) return receiver;
+ FunctionTemplateInfo* signature = FunctionTemplateInfo::cast(recv_type);
+
+ // Check the receiver. Fast path for receivers with no hidden prototypes.
+ if (signature->IsTemplateFor(receiver)) return receiver;
+ if (!receiver->map()->has_hidden_prototype()) return nullptr;
+ for (PrototypeIterator iter(isolate, receiver, kStartAtPrototype,
+ PrototypeIterator::END_AT_NON_HIDDEN);
+ !iter.IsAtEnd(); iter.Advance()) {
+ JSObject* current = iter.GetCurrent<JSObject>();
+ if (signature->IsTemplateFor(current)) return current;
+ }
+ return nullptr;
+}
+
+template <bool is_construct>
+MUST_USE_RESULT MaybeHandle<Object> HandleApiCallHelper(
+ Isolate* isolate, Handle<HeapObject> function,
+ Handle<HeapObject> new_target, Handle<FunctionTemplateInfo> fun_data,
+ Handle<Object> receiver, BuiltinArguments args) {
+ Handle<JSObject> js_receiver;
+ JSObject* raw_holder;
+ if (is_construct) {
+ DCHECK(args.receiver()->IsTheHole(isolate));
+ if (fun_data->instance_template()->IsUndefined(isolate)) {
+ v8::Local<ObjectTemplate> templ =
+ ObjectTemplate::New(reinterpret_cast<v8::Isolate*>(isolate),
+ ToApiHandle<v8::FunctionTemplate>(fun_data));
+ fun_data->set_instance_template(*Utils::OpenHandle(*templ));
+ }
+ Handle<ObjectTemplateInfo> instance_template(
+ ObjectTemplateInfo::cast(fun_data->instance_template()), isolate);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, js_receiver,
+ ApiNatives::InstantiateObject(instance_template,
+ Handle<JSReceiver>::cast(new_target)),
+ Object);
+ args[0] = *js_receiver;
+ DCHECK_EQ(*js_receiver, *args.receiver());
+
+ raw_holder = *js_receiver;
+ } else {
+ DCHECK(receiver->IsJSReceiver());
+
+ if (!receiver->IsJSObject()) {
+ // This function cannot be called with the given receiver. Abort!
+ THROW_NEW_ERROR(
+ isolate, NewTypeError(MessageTemplate::kIllegalInvocation), Object);
+ }
+
+ js_receiver = Handle<JSObject>::cast(receiver);
+
+ if (!fun_data->accept_any_receiver() &&
+ js_receiver->IsAccessCheckNeeded() &&
+ !isolate->MayAccess(handle(isolate->context()), js_receiver)) {
+ isolate->ReportFailedAccessCheck(js_receiver);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ }
+
+ raw_holder = GetCompatibleReceiver(isolate, *fun_data, *js_receiver);
+
+ if (raw_holder == nullptr) {
+ // This function cannot be called with the given receiver. Abort!
+ THROW_NEW_ERROR(
+ isolate, NewTypeError(MessageTemplate::kIllegalInvocation), Object);
+ }
+ }
+
+ Object* raw_call_data = fun_data->call_code();
+ if (!raw_call_data->IsUndefined(isolate)) {
+ DCHECK(raw_call_data->IsCallHandlerInfo());
+ CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
+ Object* callback_obj = call_data->callback();
+ v8::FunctionCallback callback =
+ v8::ToCData<v8::FunctionCallback>(callback_obj);
+ Object* data_obj = call_data->data();
+
+ LOG(isolate, ApiObjectAccess("call", JSObject::cast(*js_receiver)));
+
+ FunctionCallbackArguments custom(isolate, data_obj, *function, raw_holder,
+ *new_target, &args[0] - 1,
+ args.length() - 1);
+
+ Handle<Object> result = custom.Call(callback);
+
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ if (result.is_null()) {
+ if (is_construct) return js_receiver;
+ return isolate->factory()->undefined_value();
+ }
+ // Rebox the result.
+ result->VerifyApiCallResultType();
+ if (!is_construct || result->IsJSObject()) return handle(*result, isolate);
+ }
+
+ return js_receiver;
+}
+
+} // anonymous namespace
+
+BUILTIN(HandleApiCall) {
+ HandleScope scope(isolate);
+ Handle<JSFunction> function = args.target<JSFunction>();
+ Handle<Object> receiver = args.receiver();
+ Handle<HeapObject> new_target = args.new_target();
+ Handle<FunctionTemplateInfo> fun_data(function->shared()->get_api_func_data(),
+ isolate);
+ if (new_target->IsJSReceiver()) {
+ RETURN_RESULT_OR_FAILURE(
+ isolate, HandleApiCallHelper<true>(isolate, function, new_target,
+ fun_data, receiver, args));
+ } else {
+ RETURN_RESULT_OR_FAILURE(
+ isolate, HandleApiCallHelper<false>(isolate, function, new_target,
+ fun_data, receiver, args));
+ }
+}
+
+namespace {
+
+class RelocatableArguments : public BuiltinArguments, public Relocatable {
+ public:
+ RelocatableArguments(Isolate* isolate, int length, Object** arguments)
+ : BuiltinArguments(length, arguments), Relocatable(isolate) {}
+
+ virtual inline void IterateInstance(ObjectVisitor* v) {
+ if (length() == 0) return;
+ v->VisitPointers(lowest_address(), highest_address() + 1);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(RelocatableArguments);
+};
+
+} // namespace
+
+MaybeHandle<Object> Builtins::InvokeApiFunction(Isolate* isolate,
+ bool is_construct,
+ Handle<HeapObject> function,
+ Handle<Object> receiver,
+ int argc, Handle<Object> args[],
+ Handle<HeapObject> new_target) {
+ DCHECK(function->IsFunctionTemplateInfo() ||
+ (function->IsJSFunction() &&
+ JSFunction::cast(*function)->shared()->IsApiFunction()));
+
+ // Do proper receiver conversion for non-strict mode api functions.
+ if (!is_construct && !receiver->IsJSReceiver()) {
+ if (function->IsFunctionTemplateInfo() ||
+ is_sloppy(JSFunction::cast(*function)->shared()->language_mode())) {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, receiver,
+ Object::ConvertReceiver(isolate, receiver),
+ Object);
+ }
+ }
+
+ Handle<FunctionTemplateInfo> fun_data =
+ function->IsFunctionTemplateInfo()
+ ? Handle<FunctionTemplateInfo>::cast(function)
+ : handle(JSFunction::cast(*function)->shared()->get_api_func_data(),
+ isolate);
+ // Construct BuiltinArguments object:
+ // new target, function, arguments reversed, receiver.
+ const int kBufferSize = 32;
+ Object* small_argv[kBufferSize];
+ Object** argv;
+ const int frame_argc = argc + BuiltinArguments::kNumExtraArgsWithReceiver;
+ if (frame_argc <= kBufferSize) {
+ argv = small_argv;
+ } else {
+ argv = new Object*[frame_argc];
+ }
+ int cursor = frame_argc - 1;
+ argv[cursor--] = *receiver;
+ for (int i = 0; i < argc; ++i) {
+ argv[cursor--] = *args[i];
+ }
+ DCHECK(cursor == BuiltinArguments::kArgcOffset);
+ argv[BuiltinArguments::kArgcOffset] = Smi::FromInt(frame_argc);
+ argv[BuiltinArguments::kTargetOffset] = *function;
+ argv[BuiltinArguments::kNewTargetOffset] = *new_target;
+ MaybeHandle<Object> result;
+ {
+ RelocatableArguments arguments(isolate, frame_argc, &argv[frame_argc - 1]);
+ if (is_construct) {
+ result = HandleApiCallHelper<true>(isolate, function, new_target,
+ fun_data, receiver, arguments);
+ } else {
+ result = HandleApiCallHelper<false>(isolate, function, new_target,
+ fun_data, receiver, arguments);
+ }
+ }
+ if (argv != small_argv) delete[] argv;
+ return result;
+}
+
+// Helper function to handle calls to non-function objects created through the
+// API. The object can be called as either a constructor (using new) or just as
+// a function (without new).
+MUST_USE_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
+ Isolate* isolate, bool is_construct_call, BuiltinArguments args) {
+ Handle<Object> receiver = args.receiver();
+
+ // Get the object called.
+ JSObject* obj = JSObject::cast(*receiver);
+
+ // Set the new target.
+ HeapObject* new_target;
+ if (is_construct_call) {
+ // TODO(adamk): This should be passed through in args instead of
+ // being patched in here. We need to set a non-undefined value
+ // for v8::FunctionCallbackInfo::IsConstructCall() to get the
+ // right answer.
+ new_target = obj;
+ } else {
+ new_target = isolate->heap()->undefined_value();
+ }
+
+ // Get the invocation callback from the function descriptor that was
+ // used to create the called object.
+ DCHECK(obj->map()->is_callable());
+ JSFunction* constructor = JSFunction::cast(obj->map()->GetConstructor());
+ // TODO(ishell): turn this back to a DCHECK.
+ CHECK(constructor->shared()->IsApiFunction());
+ Object* handler =
+ constructor->shared()->get_api_func_data()->instance_call_handler();
+ DCHECK(!handler->IsUndefined(isolate));
+ // TODO(ishell): remove this debugging code.
+ CHECK(handler->IsCallHandlerInfo());
+ CallHandlerInfo* call_data = CallHandlerInfo::cast(handler);
+ Object* callback_obj = call_data->callback();
+ v8::FunctionCallback callback =
+ v8::ToCData<v8::FunctionCallback>(callback_obj);
+
+ // Get the data for the call and perform the callback.
+ Object* result;
+ {
+ HandleScope scope(isolate);
+ LOG(isolate, ApiObjectAccess("call non-function", obj));
+
+ FunctionCallbackArguments custom(isolate, call_data->data(), constructor,
+ obj, new_target, &args[0] - 1,
+ args.length() - 1);
+ Handle<Object> result_handle = custom.Call(callback);
+ if (result_handle.is_null()) {
+ result = isolate->heap()->undefined_value();
+ } else {
+ result = *result_handle;
+ }
+ }
+ // Check for exceptions and return result.
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+ return result;
+}
+
+// Handle calls to non-function objects created through the API. This delegate
+// function is used when the call is a normal function call.
+BUILTIN(HandleApiCallAsFunction) {
+ return HandleApiCallAsFunctionOrConstructor(isolate, false, args);
+}
+
+// Handle calls to non-function objects created through the API. This delegate
+// function is used when the call is a construct call.
+BUILTIN(HandleApiCallAsConstructor) {
+ return HandleApiCallAsFunctionOrConstructor(isolate, true, args);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
new file mode 100644
index 0000000000..09ee4cc2e2
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -0,0 +1,2119 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+#include "src/code-factory.h"
+#include "src/elements.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+inline bool ClampedToInteger(Isolate* isolate, Object* object, int* out) {
+ // This is an extended version of ECMA-262 7.1.11 handling signed values
+ // Try to convert object to a number and clamp values to [kMinInt, kMaxInt]
+ if (object->IsSmi()) {
+ *out = Smi::cast(object)->value();
+ return true;
+ } else if (object->IsHeapNumber()) {
+ double value = HeapNumber::cast(object)->value();
+ if (std::isnan(value)) {
+ *out = 0;
+ } else if (value > kMaxInt) {
+ *out = kMaxInt;
+ } else if (value < kMinInt) {
+ *out = kMinInt;
+ } else {
+ *out = static_cast<int>(value);
+ }
+ return true;
+ } else if (object->IsUndefined(isolate) || object->IsNull(isolate)) {
+ *out = 0;
+ return true;
+ } else if (object->IsBoolean()) {
+ *out = object->IsTrue(isolate);
+ return true;
+ }
+ return false;
+}
+
+inline bool GetSloppyArgumentsLength(Isolate* isolate, Handle<JSObject> object,
+ int* out) {
+ Context* context = *isolate->native_context();
+ Map* map = object->map();
+ if (map != context->sloppy_arguments_map() &&
+ map != context->strict_arguments_map() &&
+ map != context->fast_aliased_arguments_map()) {
+ return false;
+ }
+ DCHECK(object->HasFastElements() || object->HasFastArgumentsElements());
+ Object* len_obj = object->InObjectPropertyAt(JSArgumentsObject::kLengthIndex);
+ if (!len_obj->IsSmi()) return false;
+ *out = Max(0, Smi::cast(len_obj)->value());
+ return *out <= object->elements()->length();
+}
+
+inline bool IsJSArrayFastElementMovingAllowed(Isolate* isolate,
+ JSArray* receiver) {
+ return JSObject::PrototypeHasNoElements(isolate, receiver);
+}
+
+inline bool HasSimpleElements(JSObject* current) {
+ return current->map()->instance_type() > LAST_CUSTOM_ELEMENTS_RECEIVER &&
+ !current->GetElementsAccessor()->HasAccessors(current);
+}
+
+inline bool HasOnlySimpleReceiverElements(Isolate* isolate,
+ JSObject* receiver) {
+ // Check that we have no accessors on the receiver's elements.
+ if (!HasSimpleElements(receiver)) return false;
+ return JSObject::PrototypeHasNoElements(isolate, receiver);
+}
+
+inline bool HasOnlySimpleElements(Isolate* isolate, JSReceiver* receiver) {
+ DisallowHeapAllocation no_gc;
+ PrototypeIterator iter(isolate, receiver, kStartAtReceiver);
+ for (; !iter.IsAtEnd(); iter.Advance()) {
+ if (iter.GetCurrent()->IsJSProxy()) return false;
+ JSObject* current = iter.GetCurrent<JSObject>();
+ if (!HasSimpleElements(current)) return false;
+ }
+ return true;
+}
+
+// Returns |false| if not applicable.
+MUST_USE_RESULT
+inline bool EnsureJSArrayWithWritableFastElements(Isolate* isolate,
+ Handle<Object> receiver,
+ BuiltinArguments* args,
+ int first_added_arg) {
+ if (!receiver->IsJSArray()) return false;
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+ ElementsKind origin_kind = array->GetElementsKind();
+ if (IsDictionaryElementsKind(origin_kind)) return false;
+ if (!array->map()->is_extensible()) return false;
+ if (args == nullptr) return true;
+
+ // If there may be elements accessors in the prototype chain, the fast path
+ // cannot be used if there arguments to add to the array.
+ if (!IsJSArrayFastElementMovingAllowed(isolate, *array)) return false;
+
+ // Adding elements to the array prototype would break code that makes sure
+ // it has no elements. Handle that elsewhere.
+ if (isolate->IsAnyInitialArrayPrototype(array)) return false;
+
+ // Need to ensure that the arguments passed in args can be contained in
+ // the array.
+ int args_length = args->length();
+ if (first_added_arg >= args_length) return true;
+
+ if (IsFastObjectElementsKind(origin_kind)) return true;
+ ElementsKind target_kind = origin_kind;
+ {
+ DisallowHeapAllocation no_gc;
+ for (int i = first_added_arg; i < args_length; i++) {
+ Object* arg = (*args)[i];
+ if (arg->IsHeapObject()) {
+ if (arg->IsHeapNumber()) {
+ target_kind = FAST_DOUBLE_ELEMENTS;
+ } else {
+ target_kind = FAST_ELEMENTS;
+ break;
+ }
+ }
+ }
+ }
+ if (target_kind != origin_kind) {
+ // Use a short-lived HandleScope to avoid creating several copies of the
+ // elements handle which would cause issues when left-trimming later-on.
+ HandleScope scope(isolate);
+ JSObject::TransitionElementsKind(array, target_kind);
+ }
+ return true;
+}
+
+MUST_USE_RESULT static Object* CallJsIntrinsic(Isolate* isolate,
+ Handle<JSFunction> function,
+ BuiltinArguments args) {
+ HandleScope handleScope(isolate);
+ int argc = args.length() - 1;
+ ScopedVector<Handle<Object>> argv(argc);
+ for (int i = 0; i < argc; ++i) {
+ argv[i] = args.at<Object>(i + 1);
+ }
+ RETURN_RESULT_OR_FAILURE(
+ isolate,
+ Execution::Call(isolate, function, args.receiver(), argc, argv.start()));
+}
+
+Object* DoArrayPush(Isolate* isolate, BuiltinArguments args) {
+ HandleScope scope(isolate);
+ Handle<Object> receiver = args.receiver();
+ if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1)) {
+ return CallJsIntrinsic(isolate, isolate->array_push(), args);
+ }
+ // Fast Elements Path
+ int to_add = args.length() - 1;
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+ int len = Smi::cast(array->length())->value();
+ if (to_add == 0) return Smi::FromInt(len);
+
+ // Currently fixed arrays cannot grow too big, so we should never hit this.
+ DCHECK_LE(to_add, Smi::kMaxValue - Smi::cast(array->length())->value());
+
+ if (JSArray::HasReadOnlyLength(array)) {
+ return CallJsIntrinsic(isolate, isolate->array_push(), args);
+ }
+
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ int new_length = accessor->Push(array, &args, to_add);
+ return Smi::FromInt(new_length);
+}
+} // namespace
+
+BUILTIN(ArrayPush) { return DoArrayPush(isolate, args); }
+
+// TODO(verwaest): This is a temporary helper until the FastArrayPush stub can
+// tailcall to the builtin directly.
+RUNTIME_FUNCTION(Runtime_ArrayPush) {
+ DCHECK_EQ(2, args.length());
+ Arguments* incoming = reinterpret_cast<Arguments*>(args[0]);
+ // Rewrap the arguments as builtins arguments.
+ int argc = incoming->length() + BuiltinArguments::kNumExtraArgsWithReceiver;
+ BuiltinArguments caller_args(argc, incoming->arguments() + 1);
+ return DoArrayPush(isolate, caller_args);
+}
+
+BUILTIN(ArrayPop) {
+ HandleScope scope(isolate);
+ Handle<Object> receiver = args.receiver();
+ if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, nullptr, 0)) {
+ return CallJsIntrinsic(isolate, isolate->array_pop(), args);
+ }
+
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+
+ uint32_t len = static_cast<uint32_t>(Smi::cast(array->length())->value());
+ if (len == 0) return isolate->heap()->undefined_value();
+
+ if (JSArray::HasReadOnlyLength(array)) {
+ return CallJsIntrinsic(isolate, isolate->array_pop(), args);
+ }
+
+ Handle<Object> result;
+ if (IsJSArrayFastElementMovingAllowed(isolate, JSArray::cast(*receiver))) {
+ // Fast Elements Path
+ result = array->GetElementsAccessor()->Pop(array);
+ } else {
+ // Use Slow Lookup otherwise
+ uint32_t new_length = len - 1;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, JSReceiver::GetElement(isolate, array, new_length));
+ JSArray::SetLength(array, new_length);
+ }
+ return *result;
+}
+
+BUILTIN(ArrayShift) {
+ HandleScope scope(isolate);
+ Heap* heap = isolate->heap();
+ Handle<Object> receiver = args.receiver();
+ if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, nullptr, 0) ||
+ !IsJSArrayFastElementMovingAllowed(isolate, JSArray::cast(*receiver))) {
+ return CallJsIntrinsic(isolate, isolate->array_shift(), args);
+ }
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+
+ int len = Smi::cast(array->length())->value();
+ if (len == 0) return heap->undefined_value();
+
+ if (JSArray::HasReadOnlyLength(array)) {
+ return CallJsIntrinsic(isolate, isolate->array_shift(), args);
+ }
+
+ Handle<Object> first = array->GetElementsAccessor()->Shift(array);
+ return *first;
+}
+
+BUILTIN(ArrayUnshift) {
+ HandleScope scope(isolate);
+ Handle<Object> receiver = args.receiver();
+ if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1)) {
+ return CallJsIntrinsic(isolate, isolate->array_unshift(), args);
+ }
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+ int to_add = args.length() - 1;
+ if (to_add == 0) return array->length();
+
+ // Currently fixed arrays cannot grow too big, so we should never hit this.
+ DCHECK_LE(to_add, Smi::kMaxValue - Smi::cast(array->length())->value());
+
+ if (JSArray::HasReadOnlyLength(array)) {
+ return CallJsIntrinsic(isolate, isolate->array_unshift(), args);
+ }
+
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ int new_length = accessor->Unshift(array, &args, to_add);
+ return Smi::FromInt(new_length);
+}
+
+BUILTIN(ArraySlice) {
+ HandleScope scope(isolate);
+ Handle<Object> receiver = args.receiver();
+ int len = -1;
+ int relative_start = 0;
+ int relative_end = 0;
+
+ if (receiver->IsJSArray()) {
+ DisallowHeapAllocation no_gc;
+ JSArray* array = JSArray::cast(*receiver);
+ if (V8_UNLIKELY(!array->HasFastElements() ||
+ !IsJSArrayFastElementMovingAllowed(isolate, array) ||
+ !isolate->IsArraySpeciesLookupChainIntact() ||
+ // If this is a subclass of Array, then call out to JS
+ !array->HasArrayPrototype(isolate))) {
+ AllowHeapAllocation allow_allocation;
+ return CallJsIntrinsic(isolate, isolate->array_slice(), args);
+ }
+ len = Smi::cast(array->length())->value();
+ } else if (receiver->IsJSObject() &&
+ GetSloppyArgumentsLength(isolate, Handle<JSObject>::cast(receiver),
+ &len)) {
+ // Array.prototype.slice.call(arguments, ...) is quite a common idiom
+ // (notably more than 50% of invocations in Web apps).
+ // Treat it in C++ as well.
+ DCHECK(JSObject::cast(*receiver)->HasFastElements() ||
+ JSObject::cast(*receiver)->HasFastArgumentsElements());
+ } else {
+ AllowHeapAllocation allow_allocation;
+ return CallJsIntrinsic(isolate, isolate->array_slice(), args);
+ }
+ DCHECK_LE(0, len);
+ int argument_count = args.length() - 1;
+ // Note carefully chosen defaults---if argument is missing,
+ // it's undefined which gets converted to 0 for relative_start
+ // and to len for relative_end.
+ relative_start = 0;
+ relative_end = len;
+ if (argument_count > 0) {
+ DisallowHeapAllocation no_gc;
+ if (!ClampedToInteger(isolate, args[1], &relative_start)) {
+ AllowHeapAllocation allow_allocation;
+ return CallJsIntrinsic(isolate, isolate->array_slice(), args);
+ }
+ if (argument_count > 1) {
+ Object* end_arg = args[2];
+ // slice handles the end_arg specially
+ if (end_arg->IsUndefined(isolate)) {
+ relative_end = len;
+ } else if (!ClampedToInteger(isolate, end_arg, &relative_end)) {
+ AllowHeapAllocation allow_allocation;
+ return CallJsIntrinsic(isolate, isolate->array_slice(), args);
+ }
+ }
+ }
+
+ // ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 6.
+ uint32_t actual_start = (relative_start < 0) ? Max(len + relative_start, 0)
+ : Min(relative_start, len);
+
+ // ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 8.
+ uint32_t actual_end =
+ (relative_end < 0) ? Max(len + relative_end, 0) : Min(relative_end, len);
+
+ Handle<JSObject> object = Handle<JSObject>::cast(receiver);
+ ElementsAccessor* accessor = object->GetElementsAccessor();
+ return *accessor->Slice(object, actual_start, actual_end);
+}
+
+BUILTIN(ArraySplice) {
+ HandleScope scope(isolate);
+ Handle<Object> receiver = args.receiver();
+ if (V8_UNLIKELY(
+ !EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 3) ||
+ // If this is a subclass of Array, then call out to JS.
+ !Handle<JSArray>::cast(receiver)->HasArrayPrototype(isolate) ||
+ // If anything with @@species has been messed with, call out to JS.
+ !isolate->IsArraySpeciesLookupChainIntact())) {
+ return CallJsIntrinsic(isolate, isolate->array_splice(), args);
+ }
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+
+ int argument_count = args.length() - 1;
+ int relative_start = 0;
+ if (argument_count > 0) {
+ DisallowHeapAllocation no_gc;
+ if (!ClampedToInteger(isolate, args[1], &relative_start)) {
+ AllowHeapAllocation allow_allocation;
+ return CallJsIntrinsic(isolate, isolate->array_splice(), args);
+ }
+ }
+ int len = Smi::cast(array->length())->value();
+ // clip relative start to [0, len]
+ int actual_start = (relative_start < 0) ? Max(len + relative_start, 0)
+ : Min(relative_start, len);
+
+ int actual_delete_count;
+ if (argument_count == 1) {
+ // SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is
+ // given as a request to delete all the elements from the start.
+ // And it differs from the case of undefined delete count.
+ // This does not follow ECMA-262, but we do the same for compatibility.
+ DCHECK(len - actual_start >= 0);
+ actual_delete_count = len - actual_start;
+ } else {
+ int delete_count = 0;
+ DisallowHeapAllocation no_gc;
+ if (argument_count > 1) {
+ if (!ClampedToInteger(isolate, args[2], &delete_count)) {
+ AllowHeapAllocation allow_allocation;
+ return CallJsIntrinsic(isolate, isolate->array_splice(), args);
+ }
+ }
+ actual_delete_count = Min(Max(delete_count, 0), len - actual_start);
+ }
+
+ int add_count = (argument_count > 1) ? (argument_count - 2) : 0;
+ int new_length = len - actual_delete_count + add_count;
+
+ if (new_length != len && JSArray::HasReadOnlyLength(array)) {
+ AllowHeapAllocation allow_allocation;
+ return CallJsIntrinsic(isolate, isolate->array_splice(), args);
+ }
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ Handle<JSArray> result_array = accessor->Splice(
+ array, actual_start, actual_delete_count, &args, add_count);
+ return *result_array;
+}
+
+// Array Concat -------------------------------------------------------------
+
+namespace {
+
+/**
+ * A simple visitor visits every element of Array's.
+ * The backend storage can be a fixed array for fast elements case,
+ * or a dictionary for sparse array. Since Dictionary is a subtype
+ * of FixedArray, the class can be used by both fast and slow cases.
+ * The second parameter of the constructor, fast_elements, specifies
+ * whether the storage is a FixedArray or Dictionary.
+ *
+ * An index limit is used to deal with the situation that a result array
+ * length overflows 32-bit non-negative integer.
+ */
+class ArrayConcatVisitor {
+ public:
+ ArrayConcatVisitor(Isolate* isolate, Handle<Object> storage,
+ bool fast_elements)
+ : isolate_(isolate),
+ storage_(isolate->global_handles()->Create(*storage)),
+ index_offset_(0u),
+ bit_field_(FastElementsField::encode(fast_elements) |
+ ExceedsLimitField::encode(false) |
+ IsFixedArrayField::encode(storage->IsFixedArray())) {
+ DCHECK(!(this->fast_elements() && !is_fixed_array()));
+ }
+
+ ~ArrayConcatVisitor() { clear_storage(); }
+
+ MUST_USE_RESULT bool visit(uint32_t i, Handle<Object> elm) {
+ uint32_t index = index_offset_ + i;
+
+ if (i >= JSObject::kMaxElementCount - index_offset_) {
+ set_exceeds_array_limit(true);
+ // Exception hasn't been thrown at this point. Return true to
+ // break out, and caller will throw. !visit would imply that
+ // there is already a pending exception.
+ return true;
+ }
+
+ if (!is_fixed_array()) {
+ LookupIterator it(isolate_, storage_, index, LookupIterator::OWN);
+ MAYBE_RETURN(
+ JSReceiver::CreateDataProperty(&it, elm, Object::THROW_ON_ERROR),
+ false);
+ return true;
+ }
+
+ if (fast_elements()) {
+ if (index < static_cast<uint32_t>(storage_fixed_array()->length())) {
+ storage_fixed_array()->set(index, *elm);
+ return true;
+ }
+ // Our initial estimate of length was foiled, possibly by
+ // getters on the arrays increasing the length of later arrays
+ // during iteration.
+ // This shouldn't happen in anything but pathological cases.
+ SetDictionaryMode();
+ // Fall-through to dictionary mode.
+ }
+ DCHECK(!fast_elements());
+ Handle<SeededNumberDictionary> dict(
+ SeededNumberDictionary::cast(*storage_));
+ // The object holding this backing store has just been allocated, so
+ // it cannot yet be used as a prototype.
+ Handle<SeededNumberDictionary> result =
+ SeededNumberDictionary::AtNumberPut(dict, index, elm, false);
+ if (!result.is_identical_to(dict)) {
+ // Dictionary needed to grow.
+ clear_storage();
+ set_storage(*result);
+ }
+ return true;
+ }
+
+ void increase_index_offset(uint32_t delta) {
+ if (JSObject::kMaxElementCount - index_offset_ < delta) {
+ index_offset_ = JSObject::kMaxElementCount;
+ } else {
+ index_offset_ += delta;
+ }
+ // If the initial length estimate was off (see special case in visit()),
+ // but the array blowing the limit didn't contain elements beyond the
+ // provided-for index range, go to dictionary mode now.
+ if (fast_elements() &&
+ index_offset_ >
+ static_cast<uint32_t>(FixedArrayBase::cast(*storage_)->length())) {
+ SetDictionaryMode();
+ }
+ }
+
+ bool exceeds_array_limit() const {
+ return ExceedsLimitField::decode(bit_field_);
+ }
+
+ Handle<JSArray> ToArray() {
+ DCHECK(is_fixed_array());
+ Handle<JSArray> array = isolate_->factory()->NewJSArray(0);
+ Handle<Object> length =
+ isolate_->factory()->NewNumber(static_cast<double>(index_offset_));
+ Handle<Map> map = JSObject::GetElementsTransitionMap(
+ array, fast_elements() ? FAST_HOLEY_ELEMENTS : DICTIONARY_ELEMENTS);
+ array->set_map(*map);
+ array->set_length(*length);
+ array->set_elements(*storage_fixed_array());
+ return array;
+ }
+
+ // Storage is either a FixedArray (if is_fixed_array()) or a JSReciever
+ // (otherwise)
+ Handle<FixedArray> storage_fixed_array() {
+ DCHECK(is_fixed_array());
+ return Handle<FixedArray>::cast(storage_);
+ }
+ Handle<JSReceiver> storage_jsreceiver() {
+ DCHECK(!is_fixed_array());
+ return Handle<JSReceiver>::cast(storage_);
+ }
+
+ private:
+ // Convert storage to dictionary mode.
+ void SetDictionaryMode() {
+ DCHECK(fast_elements() && is_fixed_array());
+ Handle<FixedArray> current_storage = storage_fixed_array();
+ Handle<SeededNumberDictionary> slow_storage(
+ SeededNumberDictionary::New(isolate_, current_storage->length()));
+ uint32_t current_length = static_cast<uint32_t>(current_storage->length());
+ FOR_WITH_HANDLE_SCOPE(
+ isolate_, uint32_t, i = 0, i, i < current_length, i++, {
+ Handle<Object> element(current_storage->get(i), isolate_);
+ if (!element->IsTheHole(isolate_)) {
+ // The object holding this backing store has just been allocated, so
+ // it cannot yet be used as a prototype.
+ Handle<SeededNumberDictionary> new_storage =
+ SeededNumberDictionary::AtNumberPut(slow_storage, i, element,
+ false);
+ if (!new_storage.is_identical_to(slow_storage)) {
+ slow_storage = loop_scope.CloseAndEscape(new_storage);
+ }
+ }
+ });
+ clear_storage();
+ set_storage(*slow_storage);
+ set_fast_elements(false);
+ }
+
+ inline void clear_storage() { GlobalHandles::Destroy(storage_.location()); }
+
+ inline void set_storage(FixedArray* storage) {
+ DCHECK(is_fixed_array());
+ storage_ = isolate_->global_handles()->Create(storage);
+ }
+
+ class FastElementsField : public BitField<bool, 0, 1> {};
+ class ExceedsLimitField : public BitField<bool, 1, 1> {};
+ class IsFixedArrayField : public BitField<bool, 2, 1> {};
+
+ bool fast_elements() const { return FastElementsField::decode(bit_field_); }
+ void set_fast_elements(bool fast) {
+ bit_field_ = FastElementsField::update(bit_field_, fast);
+ }
+ void set_exceeds_array_limit(bool exceeds) {
+ bit_field_ = ExceedsLimitField::update(bit_field_, exceeds);
+ }
+ bool is_fixed_array() const { return IsFixedArrayField::decode(bit_field_); }
+
+ Isolate* isolate_;
+ Handle<Object> storage_; // Always a global handle.
+ // Index after last seen index. Always less than or equal to
+ // JSObject::kMaxElementCount.
+ uint32_t index_offset_;
+ uint32_t bit_field_;
+};
+
+uint32_t EstimateElementCount(Handle<JSArray> array) {
+ DisallowHeapAllocation no_gc;
+ uint32_t length = static_cast<uint32_t>(array->length()->Number());
+ int element_count = 0;
+ switch (array->GetElementsKind()) {
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS: {
+ // Fast elements can't have lengths that are not representable by
+ // a 32-bit signed integer.
+ DCHECK(static_cast<int32_t>(FixedArray::kMaxLength) >= 0);
+ int fast_length = static_cast<int>(length);
+ Isolate* isolate = array->GetIsolate();
+ FixedArray* elements = FixedArray::cast(array->elements());
+ for (int i = 0; i < fast_length; i++) {
+ if (!elements->get(i)->IsTheHole(isolate)) element_count++;
+ }
+ break;
+ }
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS: {
+ // Fast elements can't have lengths that are not representable by
+ // a 32-bit signed integer.
+ DCHECK(static_cast<int32_t>(FixedDoubleArray::kMaxLength) >= 0);
+ int fast_length = static_cast<int>(length);
+ if (array->elements()->IsFixedArray()) {
+ DCHECK(FixedArray::cast(array->elements())->length() == 0);
+ break;
+ }
+ FixedDoubleArray* elements = FixedDoubleArray::cast(array->elements());
+ for (int i = 0; i < fast_length; i++) {
+ if (!elements->is_the_hole(i)) element_count++;
+ }
+ break;
+ }
+ case DICTIONARY_ELEMENTS: {
+ SeededNumberDictionary* dictionary =
+ SeededNumberDictionary::cast(array->elements());
+ Isolate* isolate = dictionary->GetIsolate();
+ int capacity = dictionary->Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object* key = dictionary->KeyAt(i);
+ if (dictionary->IsKey(isolate, key)) {
+ element_count++;
+ }
+ }
+ break;
+ }
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ // External arrays are always dense.
+ return length;
+ case NO_ELEMENTS:
+ return 0;
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ UNREACHABLE();
+ return 0;
+ }
+ // As an estimate, we assume that the prototype doesn't contain any
+ // inherited elements.
+ return element_count;
+}
+
+// Used for sorting indices in a List<uint32_t>.
+int compareUInt32(const uint32_t* ap, const uint32_t* bp) {
+ uint32_t a = *ap;
+ uint32_t b = *bp;
+ return (a == b) ? 0 : (a < b) ? -1 : 1;
+}
+
+void CollectElementIndices(Handle<JSObject> object, uint32_t range,
+ List<uint32_t>* indices) {
+ Isolate* isolate = object->GetIsolate();
+ ElementsKind kind = object->GetElementsKind();
+ switch (kind) {
+ case FAST_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS: {
+ DisallowHeapAllocation no_gc;
+ FixedArray* elements = FixedArray::cast(object->elements());
+ uint32_t length = static_cast<uint32_t>(elements->length());
+ if (range < length) length = range;
+ for (uint32_t i = 0; i < length; i++) {
+ if (!elements->get(i)->IsTheHole(isolate)) {
+ indices->Add(i);
+ }
+ }
+ break;
+ }
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS: {
+ if (object->elements()->IsFixedArray()) {
+ DCHECK(object->elements()->length() == 0);
+ break;
+ }
+ Handle<FixedDoubleArray> elements(
+ FixedDoubleArray::cast(object->elements()));
+ uint32_t length = static_cast<uint32_t>(elements->length());
+ if (range < length) length = range;
+ for (uint32_t i = 0; i < length; i++) {
+ if (!elements->is_the_hole(i)) {
+ indices->Add(i);
+ }
+ }
+ break;
+ }
+ case DICTIONARY_ELEMENTS: {
+ DisallowHeapAllocation no_gc;
+ SeededNumberDictionary* dict =
+ SeededNumberDictionary::cast(object->elements());
+ uint32_t capacity = dict->Capacity();
+ FOR_WITH_HANDLE_SCOPE(isolate, uint32_t, j = 0, j, j < capacity, j++, {
+ Object* k = dict->KeyAt(j);
+ if (!dict->IsKey(isolate, k)) continue;
+ DCHECK(k->IsNumber());
+ uint32_t index = static_cast<uint32_t>(k->Number());
+ if (index < range) {
+ indices->Add(index);
+ }
+ });
+ break;
+ }
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ {
+ uint32_t length = static_cast<uint32_t>(
+ FixedArrayBase::cast(object->elements())->length());
+ if (range <= length) {
+ length = range;
+ // We will add all indices, so we might as well clear it first
+ // and avoid duplicates.
+ indices->Clear();
+ }
+ for (uint32_t i = 0; i < length; i++) {
+ indices->Add(i);
+ }
+ if (length == range) return; // All indices accounted for already.
+ break;
+ }
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
+ ElementsAccessor* accessor = object->GetElementsAccessor();
+ for (uint32_t i = 0; i < range; i++) {
+ if (accessor->HasElement(object, i)) {
+ indices->Add(i);
+ }
+ }
+ break;
+ }
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS: {
+ DCHECK(object->IsJSValue());
+ Handle<JSValue> js_value = Handle<JSValue>::cast(object);
+ DCHECK(js_value->value()->IsString());
+ Handle<String> string(String::cast(js_value->value()), isolate);
+ uint32_t length = static_cast<uint32_t>(string->length());
+ uint32_t i = 0;
+ uint32_t limit = Min(length, range);
+ for (; i < limit; i++) {
+ indices->Add(i);
+ }
+ ElementsAccessor* accessor = object->GetElementsAccessor();
+ for (; i < range; i++) {
+ if (accessor->HasElement(object, i)) {
+ indices->Add(i);
+ }
+ }
+ break;
+ }
+ case NO_ELEMENTS:
+ break;
+ }
+
+ PrototypeIterator iter(isolate, object);
+ if (!iter.IsAtEnd()) {
+ // The prototype will usually have no inherited element indices,
+ // but we have to check.
+ CollectElementIndices(PrototypeIterator::GetCurrent<JSObject>(iter), range,
+ indices);
+ }
+}
+
+bool IterateElementsSlow(Isolate* isolate, Handle<JSReceiver> receiver,
+ uint32_t length, ArrayConcatVisitor* visitor) {
+ FOR_WITH_HANDLE_SCOPE(isolate, uint32_t, i = 0, i, i < length, ++i, {
+ Maybe<bool> maybe = JSReceiver::HasElement(receiver, i);
+ if (!maybe.IsJust()) return false;
+ if (maybe.FromJust()) {
+ Handle<Object> element_value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, element_value, JSReceiver::GetElement(isolate, receiver, i),
+ false);
+ if (!visitor->visit(i, element_value)) return false;
+ }
+ });
+ visitor->increase_index_offset(length);
+ return true;
+}
+
+/**
+ * A helper function that visits "array" elements of a JSReceiver in numerical
+ * order.
+ *
+ * The visitor argument called for each existing element in the array
+ * with the element index and the element's value.
+ * Afterwards it increments the base-index of the visitor by the array
+ * length.
+ * Returns false if any access threw an exception, otherwise true.
+ */
+bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
+ ArrayConcatVisitor* visitor) {
+ uint32_t length = 0;
+
+ if (receiver->IsJSArray()) {
+ Handle<JSArray> array = Handle<JSArray>::cast(receiver);
+ length = static_cast<uint32_t>(array->length()->Number());
+ } else {
+ Handle<Object> val;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, val, Object::GetLengthFromArrayLike(isolate, receiver), false);
+ // TODO(caitp): Support larger element indexes (up to 2^53-1).
+ if (!val->ToUint32(&length)) {
+ length = 0;
+ }
+ // TODO(cbruni): handle other element kind as well
+ return IterateElementsSlow(isolate, receiver, length, visitor);
+ }
+
+ if (!HasOnlySimpleElements(isolate, *receiver)) {
+ return IterateElementsSlow(isolate, receiver, length, visitor);
+ }
+ Handle<JSObject> array = Handle<JSObject>::cast(receiver);
+
+ switch (array->GetElementsKind()) {
+ case FAST_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS: {
+ // Run through the elements FixedArray and use HasElement and GetElement
+ // to check the prototype for missing elements.
+ Handle<FixedArray> elements(FixedArray::cast(array->elements()));
+ int fast_length = static_cast<int>(length);
+ DCHECK(fast_length <= elements->length());
+ FOR_WITH_HANDLE_SCOPE(isolate, int, j = 0, j, j < fast_length, j++, {
+ Handle<Object> element_value(elements->get(j), isolate);
+ if (!element_value->IsTheHole(isolate)) {
+ if (!visitor->visit(j, element_value)) return false;
+ } else {
+ Maybe<bool> maybe = JSReceiver::HasElement(array, j);
+ if (!maybe.IsJust()) return false;
+ if (maybe.FromJust()) {
+ // Call GetElement on array, not its prototype, or getters won't
+ // have the correct receiver.
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, element_value,
+ JSReceiver::GetElement(isolate, array, j), false);
+ if (!visitor->visit(j, element_value)) return false;
+ }
+ }
+ });
+ break;
+ }
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS: {
+ // Empty array is FixedArray but not FixedDoubleArray.
+ if (length == 0) break;
+ // Run through the elements FixedArray and use HasElement and GetElement
+ // to check the prototype for missing elements.
+ if (array->elements()->IsFixedArray()) {
+ DCHECK(array->elements()->length() == 0);
+ break;
+ }
+ Handle<FixedDoubleArray> elements(
+ FixedDoubleArray::cast(array->elements()));
+ int fast_length = static_cast<int>(length);
+ DCHECK(fast_length <= elements->length());
+ FOR_WITH_HANDLE_SCOPE(isolate, int, j = 0, j, j < fast_length, j++, {
+ if (!elements->is_the_hole(j)) {
+ double double_value = elements->get_scalar(j);
+ Handle<Object> element_value =
+ isolate->factory()->NewNumber(double_value);
+ if (!visitor->visit(j, element_value)) return false;
+ } else {
+ Maybe<bool> maybe = JSReceiver::HasElement(array, j);
+ if (!maybe.IsJust()) return false;
+ if (maybe.FromJust()) {
+ // Call GetElement on array, not its prototype, or getters won't
+ // have the correct receiver.
+ Handle<Object> element_value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, element_value,
+ JSReceiver::GetElement(isolate, array, j), false);
+ if (!visitor->visit(j, element_value)) return false;
+ }
+ }
+ });
+ break;
+ }
+
+ case DICTIONARY_ELEMENTS: {
+ Handle<SeededNumberDictionary> dict(array->element_dictionary());
+ List<uint32_t> indices(dict->Capacity() / 2);
+ // Collect all indices in the object and the prototypes less
+ // than length. This might introduce duplicates in the indices list.
+ CollectElementIndices(array, length, &indices);
+ indices.Sort(&compareUInt32);
+ int n = indices.length();
+ FOR_WITH_HANDLE_SCOPE(isolate, int, j = 0, j, j < n, (void)0, {
+ uint32_t index = indices[j];
+ Handle<Object> element;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, element, JSReceiver::GetElement(isolate, array, index),
+ false);
+ if (!visitor->visit(index, element)) return false;
+ // Skip to next different index (i.e., omit duplicates).
+ do {
+ j++;
+ } while (j < n && indices[j] == index);
+ });
+ break;
+ }
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
+ FOR_WITH_HANDLE_SCOPE(
+ isolate, uint32_t, index = 0, index, index < length, index++, {
+ Handle<Object> element;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, element, JSReceiver::GetElement(isolate, array, index),
+ false);
+ if (!visitor->visit(index, element)) return false;
+ });
+ break;
+ }
+ case NO_ELEMENTS:
+ break;
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ return IterateElementsSlow(isolate, receiver, length, visitor);
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ // |array| is guaranteed to be an array or typed array.
+ UNREACHABLE();
+ break;
+ }
+ visitor->increase_index_offset(length);
+ return true;
+}
+
+static Maybe<bool> IsConcatSpreadable(Isolate* isolate, Handle<Object> obj) {
+ HandleScope handle_scope(isolate);
+ if (!obj->IsJSReceiver()) return Just(false);
+ if (!isolate->IsIsConcatSpreadableLookupChainIntact(JSReceiver::cast(*obj))) {
+ // Slow path if @@isConcatSpreadable has been used.
+ Handle<Symbol> key(isolate->factory()->is_concat_spreadable_symbol());
+ Handle<Object> value;
+ MaybeHandle<Object> maybeValue =
+ i::Runtime::GetObjectProperty(isolate, obj, key);
+ if (!maybeValue.ToHandle(&value)) return Nothing<bool>();
+ if (!value->IsUndefined(isolate)) return Just(value->BooleanValue());
+ }
+ return Object::IsArray(obj);
+}
+
+Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
+ Isolate* isolate) {
+ int argument_count = args->length();
+
+ bool is_array_species = *species == isolate->context()->array_function();
+
+ // Pass 1: estimate the length and number of elements of the result.
+ // The actual length can be larger if any of the arguments have getters
+ // that mutate other arguments (but will otherwise be precise).
+ // The number of elements is precise if there are no inherited elements.
+
+ ElementsKind kind = FAST_SMI_ELEMENTS;
+
+ uint32_t estimate_result_length = 0;
+ uint32_t estimate_nof_elements = 0;
+ FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < argument_count, i++, {
+ Handle<Object> obj((*args)[i], isolate);
+ uint32_t length_estimate;
+ uint32_t element_estimate;
+ if (obj->IsJSArray()) {
+ Handle<JSArray> array(Handle<JSArray>::cast(obj));
+ length_estimate = static_cast<uint32_t>(array->length()->Number());
+ if (length_estimate != 0) {
+ ElementsKind array_kind =
+ GetPackedElementsKind(array->GetElementsKind());
+ kind = GetMoreGeneralElementsKind(kind, array_kind);
+ }
+ element_estimate = EstimateElementCount(array);
+ } else {
+ if (obj->IsHeapObject()) {
+ kind = GetMoreGeneralElementsKind(
+ kind, obj->IsNumber() ? FAST_DOUBLE_ELEMENTS : FAST_ELEMENTS);
+ }
+ length_estimate = 1;
+ element_estimate = 1;
+ }
+ // Avoid overflows by capping at kMaxElementCount.
+ if (JSObject::kMaxElementCount - estimate_result_length < length_estimate) {
+ estimate_result_length = JSObject::kMaxElementCount;
+ } else {
+ estimate_result_length += length_estimate;
+ }
+ if (JSObject::kMaxElementCount - estimate_nof_elements < element_estimate) {
+ estimate_nof_elements = JSObject::kMaxElementCount;
+ } else {
+ estimate_nof_elements += element_estimate;
+ }
+ });
+
+ // If estimated number of elements is more than half of length, a
+ // fixed array (fast case) is more time and space-efficient than a
+ // dictionary.
+ bool fast_case =
+ is_array_species && (estimate_nof_elements * 2) >= estimate_result_length;
+
+ if (fast_case && kind == FAST_DOUBLE_ELEMENTS) {
+ Handle<FixedArrayBase> storage =
+ isolate->factory()->NewFixedDoubleArray(estimate_result_length);
+ int j = 0;
+ bool failure = false;
+ if (estimate_result_length > 0) {
+ Handle<FixedDoubleArray> double_storage =
+ Handle<FixedDoubleArray>::cast(storage);
+ for (int i = 0; i < argument_count; i++) {
+ Handle<Object> obj((*args)[i], isolate);
+ if (obj->IsSmi()) {
+ double_storage->set(j, Smi::cast(*obj)->value());
+ j++;
+ } else if (obj->IsNumber()) {
+ double_storage->set(j, obj->Number());
+ j++;
+ } else {
+ DisallowHeapAllocation no_gc;
+ JSArray* array = JSArray::cast(*obj);
+ uint32_t length = static_cast<uint32_t>(array->length()->Number());
+ switch (array->GetElementsKind()) {
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS: {
+ // Empty array is FixedArray but not FixedDoubleArray.
+ if (length == 0) break;
+ FixedDoubleArray* elements =
+ FixedDoubleArray::cast(array->elements());
+ for (uint32_t i = 0; i < length; i++) {
+ if (elements->is_the_hole(i)) {
+ // TODO(jkummerow/verwaest): We could be a bit more clever
+ // here: Check if there are no elements/getters on the
+ // prototype chain, and if so, allow creation of a holey
+ // result array.
+ // Same thing below (holey smi case).
+ failure = true;
+ break;
+ }
+ double double_value = elements->get_scalar(i);
+ double_storage->set(j, double_value);
+ j++;
+ }
+ break;
+ }
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_SMI_ELEMENTS: {
+ Object* the_hole = isolate->heap()->the_hole_value();
+ FixedArray* elements(FixedArray::cast(array->elements()));
+ for (uint32_t i = 0; i < length; i++) {
+ Object* element = elements->get(i);
+ if (element == the_hole) {
+ failure = true;
+ break;
+ }
+ int32_t int_value = Smi::cast(element)->value();
+ double_storage->set(j, int_value);
+ j++;
+ }
+ break;
+ }
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NO_ELEMENTS:
+ DCHECK_EQ(0u, length);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ if (failure) break;
+ }
+ }
+ if (!failure) {
+ return *isolate->factory()->NewJSArrayWithElements(storage, kind, j);
+ }
+ // In case of failure, fall through.
+ }
+
+ Handle<Object> storage;
+ if (fast_case) {
+ // The backing storage array must have non-existing elements to preserve
+ // holes across concat operations.
+ storage =
+ isolate->factory()->NewFixedArrayWithHoles(estimate_result_length);
+ } else if (is_array_species) {
+ // TODO(126): move 25% pre-allocation logic into Dictionary::Allocate
+ uint32_t at_least_space_for =
+ estimate_nof_elements + (estimate_nof_elements >> 2);
+ storage = SeededNumberDictionary::New(isolate, at_least_space_for);
+ } else {
+ DCHECK(species->IsConstructor());
+ Handle<Object> length(Smi::FromInt(0), isolate);
+ Handle<Object> storage_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, storage_object,
+ Execution::New(isolate, species, species, 1, &length));
+ storage = storage_object;
+ }
+
+ ArrayConcatVisitor visitor(isolate, storage, fast_case);
+
+ for (int i = 0; i < argument_count; i++) {
+ Handle<Object> obj((*args)[i], isolate);
+ Maybe<bool> spreadable = IsConcatSpreadable(isolate, obj);
+ MAYBE_RETURN(spreadable, isolate->heap()->exception());
+ if (spreadable.FromJust()) {
+ Handle<JSReceiver> object = Handle<JSReceiver>::cast(obj);
+ if (!IterateElements(isolate, object, &visitor)) {
+ return isolate->heap()->exception();
+ }
+ } else {
+ if (!visitor.visit(0, obj)) return isolate->heap()->exception();
+ visitor.increase_index_offset(1);
+ }
+ }
+
+ if (visitor.exceeds_array_limit()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidArrayLength));
+ }
+
+ if (is_array_species) {
+ return *visitor.ToArray();
+ } else {
+ return *visitor.storage_jsreceiver();
+ }
+}
+
+bool IsSimpleArray(Isolate* isolate, Handle<JSArray> obj) {
+ DisallowHeapAllocation no_gc;
+ Map* map = obj->map();
+ // If there is only the 'length' property we are fine.
+ if (map->prototype() ==
+ isolate->native_context()->initial_array_prototype() &&
+ map->NumberOfOwnDescriptors() == 1) {
+ return true;
+ }
+ // TODO(cbruni): slower lookup for array subclasses and support slow
+ // @@IsConcatSpreadable lookup.
+ return false;
+}
+
+MaybeHandle<JSArray> Fast_ArrayConcat(Isolate* isolate,
+ BuiltinArguments* args) {
+ if (!isolate->IsIsConcatSpreadableLookupChainIntact()) {
+ return MaybeHandle<JSArray>();
+ }
+ // We shouldn't overflow when adding another len.
+ const int kHalfOfMaxInt = 1 << (kBitsPerInt - 2);
+ STATIC_ASSERT(FixedArray::kMaxLength < kHalfOfMaxInt);
+ STATIC_ASSERT(FixedDoubleArray::kMaxLength < kHalfOfMaxInt);
+ USE(kHalfOfMaxInt);
+
+ int n_arguments = args->length();
+ int result_len = 0;
+ {
+ DisallowHeapAllocation no_gc;
+ // Iterate through all the arguments performing checks
+ // and calculating total length.
+ for (int i = 0; i < n_arguments; i++) {
+ Object* arg = (*args)[i];
+ if (!arg->IsJSArray()) return MaybeHandle<JSArray>();
+ if (!HasOnlySimpleReceiverElements(isolate, JSObject::cast(arg))) {
+ return MaybeHandle<JSArray>();
+ }
+ // TODO(cbruni): support fast concatenation of DICTIONARY_ELEMENTS.
+ if (!JSObject::cast(arg)->HasFastElements()) {
+ return MaybeHandle<JSArray>();
+ }
+ Handle<JSArray> array(JSArray::cast(arg), isolate);
+ if (!IsSimpleArray(isolate, array)) {
+ return MaybeHandle<JSArray>();
+ }
+ // The Array length is guaranted to be <= kHalfOfMaxInt thus we won't
+ // overflow.
+ result_len += Smi::cast(array->length())->value();
+ DCHECK(result_len >= 0);
+ // Throw an Error if we overflow the FixedArray limits
+ if (FixedDoubleArray::kMaxLength < result_len ||
+ FixedArray::kMaxLength < result_len) {
+ AllowHeapAllocation gc;
+ THROW_NEW_ERROR(isolate,
+ NewRangeError(MessageTemplate::kInvalidArrayLength),
+ JSArray);
+ }
+ }
+ }
+ return ElementsAccessor::Concat(isolate, args, n_arguments, result_len);
+}
+
+} // namespace
+
+// ES6 22.1.3.1 Array.prototype.concat
+BUILTIN(ArrayConcat) {
+ HandleScope scope(isolate);
+
+ Handle<Object> receiver = args.receiver();
+ // TODO(bmeurer): Do we really care about the exact exception message here?
+ if (receiver->IsNull(isolate) || receiver->IsUndefined(isolate)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Array.prototype.concat")));
+ }
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, receiver, Object::ToObject(isolate, args.receiver()));
+ args[0] = *receiver;
+
+ Handle<JSArray> result_array;
+
+ // Avoid a real species read to avoid extra lookups to the array constructor
+ if (V8_LIKELY(receiver->IsJSArray() &&
+ Handle<JSArray>::cast(receiver)->HasArrayPrototype(isolate) &&
+ isolate->IsArraySpeciesLookupChainIntact())) {
+ if (Fast_ArrayConcat(isolate, &args).ToHandle(&result_array)) {
+ return *result_array;
+ }
+ if (isolate->has_pending_exception()) return isolate->heap()->exception();
+ }
+ // Reading @@species happens before anything else with a side effect, so
+ // we can do it here to determine whether to take the fast path.
+ Handle<Object> species;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, species, Object::ArraySpeciesConstructor(isolate, receiver));
+ if (*species == *isolate->array_function()) {
+ if (Fast_ArrayConcat(isolate, &args).ToHandle(&result_array)) {
+ return *result_array;
+ }
+ if (isolate->has_pending_exception()) return isolate->heap()->exception();
+ }
+ return Slow_ArrayConcat(&args, species, isolate);
+}
+
+void Builtins::Generate_ArrayIsArray(CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+
+ Node* object = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+
+ Label call_runtime(assembler), return_true(assembler),
+ return_false(assembler);
+
+ assembler->GotoIf(assembler->WordIsSmi(object), &return_false);
+ Node* instance_type = assembler->LoadInstanceType(object);
+
+ assembler->GotoIf(assembler->Word32Equal(
+ instance_type, assembler->Int32Constant(JS_ARRAY_TYPE)),
+ &return_true);
+
+ // TODO(verwaest): Handle proxies in-place.
+ assembler->Branch(assembler->Word32Equal(
+ instance_type, assembler->Int32Constant(JS_PROXY_TYPE)),
+ &call_runtime, &return_false);
+
+ assembler->Bind(&return_true);
+ assembler->Return(assembler->BooleanConstant(true));
+
+ assembler->Bind(&return_false);
+ assembler->Return(assembler->BooleanConstant(false));
+
+ assembler->Bind(&call_runtime);
+ assembler->Return(
+ assembler->CallRuntime(Runtime::kArrayIsArray, context, object));
+}
+
+void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Node* array = assembler->Parameter(0);
+ Node* search_element = assembler->Parameter(1);
+ Node* start_from = assembler->Parameter(2);
+ Node* context = assembler->Parameter(3 + 2);
+
+ Node* int32_zero = assembler->Int32Constant(0);
+ Node* int32_one = assembler->Int32Constant(1);
+
+ Node* the_hole = assembler->TheHoleConstant();
+ Node* undefined = assembler->UndefinedConstant();
+ Node* heap_number_map = assembler->HeapNumberMapConstant();
+
+ Variable len_var(assembler, MachineRepresentation::kWord32),
+ index_var(assembler, MachineRepresentation::kWord32),
+ start_from_var(assembler, MachineRepresentation::kWord32);
+
+ Label init_k(assembler), return_true(assembler), return_false(assembler),
+ call_runtime(assembler);
+
+ Label init_len(assembler);
+
+ index_var.Bind(int32_zero);
+ len_var.Bind(int32_zero);
+
+ // Take slow path if not a JSArray, if retrieving elements requires
+ // traversing prototype, or if access checks are required.
+ assembler->BranchIfFastJSArray(array, context, &init_len, &call_runtime);
+
+ assembler->Bind(&init_len);
+ {
+ // Handle case where JSArray length is not an Smi in the runtime
+ Node* len = assembler->LoadObjectField(array, JSArray::kLengthOffset);
+ assembler->GotoUnless(assembler->WordIsSmi(len), &call_runtime);
+
+ len_var.Bind(assembler->SmiToWord(len));
+ assembler->Branch(assembler->Word32Equal(len_var.value(), int32_zero),
+ &return_false, &init_k);
+ }
+
+ assembler->Bind(&init_k);
+ {
+ Label done(assembler), init_k_smi(assembler), init_k_heap_num(assembler),
+ init_k_zero(assembler), init_k_n(assembler);
+ Callable call_to_integer = CodeFactory::ToInteger(assembler->isolate());
+ Node* tagged_n = assembler->CallStub(call_to_integer, context, start_from);
+
+ assembler->Branch(assembler->WordIsSmi(tagged_n), &init_k_smi,
+ &init_k_heap_num);
+
+ assembler->Bind(&init_k_smi);
+ {
+ start_from_var.Bind(assembler->SmiToWord32(tagged_n));
+ assembler->Goto(&init_k_n);
+ }
+
+ assembler->Bind(&init_k_heap_num);
+ {
+ Label do_return_false(assembler);
+ Node* fp_len = assembler->ChangeInt32ToFloat64(len_var.value());
+ Node* fp_n = assembler->LoadHeapNumberValue(tagged_n);
+ assembler->GotoIf(assembler->Float64GreaterThanOrEqual(fp_n, fp_len),
+ &do_return_false);
+ start_from_var.Bind(assembler->TruncateFloat64ToWord32(fp_n));
+ assembler->Goto(&init_k_n);
+
+ assembler->Bind(&do_return_false);
+ {
+ index_var.Bind(int32_zero);
+ assembler->Goto(&return_false);
+ }
+ }
+
+ assembler->Bind(&init_k_n);
+ {
+ Label if_positive(assembler), if_negative(assembler), done(assembler);
+ assembler->Branch(
+ assembler->Int32LessThan(start_from_var.value(), int32_zero),
+ &if_negative, &if_positive);
+
+ assembler->Bind(&if_positive);
+ {
+ index_var.Bind(start_from_var.value());
+ assembler->Goto(&done);
+ }
+
+ assembler->Bind(&if_negative);
+ {
+ index_var.Bind(
+ assembler->Int32Add(len_var.value(), start_from_var.value()));
+ assembler->Branch(
+ assembler->Int32LessThan(index_var.value(), int32_zero),
+ &init_k_zero, &done);
+ }
+
+ assembler->Bind(&init_k_zero);
+ {
+ index_var.Bind(int32_zero);
+ assembler->Goto(&done);
+ }
+
+ assembler->Bind(&done);
+ }
+ }
+
+ static int32_t kElementsKind[] = {
+ FAST_SMI_ELEMENTS, FAST_HOLEY_SMI_ELEMENTS, FAST_ELEMENTS,
+ FAST_HOLEY_ELEMENTS, FAST_DOUBLE_ELEMENTS, FAST_HOLEY_DOUBLE_ELEMENTS,
+ };
+
+ Label if_smiorobjects(assembler), if_packed_doubles(assembler),
+ if_holey_doubles(assembler);
+ Label* element_kind_handlers[] = {&if_smiorobjects, &if_smiorobjects,
+ &if_smiorobjects, &if_smiorobjects,
+ &if_packed_doubles, &if_holey_doubles};
+
+ Node* map = assembler->LoadMap(array);
+ Node* bit_field2 = assembler->LoadMapBitField2(map);
+ Node* elements_kind =
+ assembler->BitFieldDecode<Map::ElementsKindBits>(bit_field2);
+ Node* elements = assembler->LoadElements(array);
+ assembler->Switch(elements_kind, &return_false, kElementsKind,
+ element_kind_handlers, arraysize(kElementsKind));
+
+ assembler->Bind(&if_smiorobjects);
+ {
+ Variable search_num(assembler, MachineRepresentation::kFloat64);
+ Label ident_loop(assembler, &index_var),
+ heap_num_loop(assembler, &search_num),
+ string_loop(assembler, &index_var), simd_loop(assembler),
+ undef_loop(assembler, &index_var), not_smi(assembler),
+ not_heap_num(assembler);
+
+ assembler->GotoUnless(assembler->WordIsSmi(search_element), &not_smi);
+ search_num.Bind(assembler->SmiToFloat64(search_element));
+ assembler->Goto(&heap_num_loop);
+
+ assembler->Bind(&not_smi);
+ assembler->GotoIf(assembler->WordEqual(search_element, undefined),
+ &undef_loop);
+ Node* map = assembler->LoadMap(search_element);
+ assembler->GotoIf(assembler->WordNotEqual(map, heap_number_map),
+ &not_heap_num);
+ search_num.Bind(assembler->LoadHeapNumberValue(search_element));
+ assembler->Goto(&heap_num_loop);
+
+ assembler->Bind(&not_heap_num);
+ Node* search_type = assembler->LoadMapInstanceType(map);
+ assembler->GotoIf(
+ assembler->Int32LessThan(
+ search_type, assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+ &string_loop);
+ assembler->GotoIf(
+ assembler->WordEqual(search_type,
+ assembler->Int32Constant(SIMD128_VALUE_TYPE)),
+ &simd_loop);
+ assembler->Goto(&ident_loop);
+
+ assembler->Bind(&ident_loop);
+ {
+ assembler->GotoUnless(
+ assembler->Int32LessThan(index_var.value(), len_var.value()),
+ &return_false);
+ Node* element_k =
+ assembler->LoadFixedArrayElement(elements, index_var.value());
+ assembler->GotoIf(assembler->WordEqual(element_k, search_element),
+ &return_true);
+
+ index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ assembler->Goto(&ident_loop);
+ }
+
+ assembler->Bind(&undef_loop);
+ {
+ assembler->GotoUnless(
+ assembler->Int32LessThan(index_var.value(), len_var.value()),
+ &return_false);
+ Node* element_k =
+ assembler->LoadFixedArrayElement(elements, index_var.value());
+ assembler->GotoIf(assembler->WordEqual(element_k, undefined),
+ &return_true);
+ assembler->GotoIf(assembler->WordEqual(element_k, the_hole),
+ &return_true);
+
+ index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ assembler->Goto(&undef_loop);
+ }
+
+ assembler->Bind(&heap_num_loop);
+ {
+ Label nan_loop(assembler, &index_var),
+ not_nan_loop(assembler, &index_var);
+ assembler->BranchIfFloat64IsNaN(search_num.value(), &nan_loop,
+ &not_nan_loop);
+
+ assembler->Bind(&not_nan_loop);
+ {
+ Label continue_loop(assembler), not_smi(assembler);
+ assembler->GotoUnless(
+ assembler->Int32LessThan(index_var.value(), len_var.value()),
+ &return_false);
+ Node* element_k =
+ assembler->LoadFixedArrayElement(elements, index_var.value());
+ assembler->GotoUnless(assembler->WordIsSmi(element_k), &not_smi);
+ assembler->Branch(
+ assembler->Float64Equal(search_num.value(),
+ assembler->SmiToFloat64(element_k)),
+ &return_true, &continue_loop);
+
+ assembler->Bind(&not_smi);
+ assembler->GotoIf(assembler->WordNotEqual(assembler->LoadMap(element_k),
+ heap_number_map),
+ &continue_loop);
+ assembler->BranchIfFloat64Equal(
+ search_num.value(), assembler->LoadHeapNumberValue(element_k),
+ &return_true, &continue_loop);
+
+ assembler->Bind(&continue_loop);
+ index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ assembler->Goto(&not_nan_loop);
+ }
+
+ assembler->Bind(&nan_loop);
+ {
+ Label continue_loop(assembler);
+ assembler->GotoUnless(
+ assembler->Int32LessThan(index_var.value(), len_var.value()),
+ &return_false);
+ Node* element_k =
+ assembler->LoadFixedArrayElement(elements, index_var.value());
+ assembler->GotoIf(assembler->WordIsSmi(element_k), &continue_loop);
+ assembler->GotoIf(assembler->WordNotEqual(assembler->LoadMap(element_k),
+ heap_number_map),
+ &continue_loop);
+ assembler->BranchIfFloat64IsNaN(
+ assembler->LoadHeapNumberValue(element_k), &return_true,
+ &continue_loop);
+
+ assembler->Bind(&continue_loop);
+ index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ assembler->Goto(&nan_loop);
+ }
+ }
+
+ assembler->Bind(&string_loop);
+ {
+ Label continue_loop(assembler);
+ assembler->GotoUnless(
+ assembler->Int32LessThan(index_var.value(), len_var.value()),
+ &return_false);
+ Node* element_k =
+ assembler->LoadFixedArrayElement(elements, index_var.value());
+ assembler->GotoIf(assembler->WordIsSmi(element_k), &continue_loop);
+ assembler->GotoUnless(assembler->Int32LessThan(
+ assembler->LoadInstanceType(element_k),
+ assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+ &continue_loop);
+
+ // TODO(bmeurer): Consider inlining the StringEqual logic here.
+ Callable callable = CodeFactory::StringEqual(assembler->isolate());
+ Node* result =
+ assembler->CallStub(callable, context, search_element, element_k);
+ assembler->Branch(
+ assembler->WordEqual(assembler->BooleanConstant(true), result),
+ &return_true, &continue_loop);
+
+ assembler->Bind(&continue_loop);
+ index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ assembler->Goto(&string_loop);
+ }
+
+ assembler->Bind(&simd_loop);
+ {
+ Label continue_loop(assembler, &index_var),
+ loop_body(assembler, &index_var);
+ Node* map = assembler->LoadMap(search_element);
+
+ assembler->Goto(&loop_body);
+ assembler->Bind(&loop_body);
+ assembler->GotoUnless(
+ assembler->Int32LessThan(index_var.value(), len_var.value()),
+ &return_false);
+
+ Node* element_k =
+ assembler->LoadFixedArrayElement(elements, index_var.value());
+ assembler->GotoIf(assembler->WordIsSmi(element_k), &continue_loop);
+
+ Node* map_k = assembler->LoadMap(element_k);
+ assembler->BranchIfSimd128Equal(search_element, map, element_k, map_k,
+ &return_true, &continue_loop);
+
+ assembler->Bind(&continue_loop);
+ index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ assembler->Goto(&loop_body);
+ }
+ }
+
+ assembler->Bind(&if_packed_doubles);
+ {
+ Label nan_loop(assembler, &index_var), not_nan_loop(assembler, &index_var),
+ hole_loop(assembler, &index_var), search_notnan(assembler);
+ Variable search_num(assembler, MachineRepresentation::kFloat64);
+
+ assembler->GotoUnless(assembler->WordIsSmi(search_element), &search_notnan);
+ search_num.Bind(assembler->SmiToFloat64(search_element));
+ assembler->Goto(&not_nan_loop);
+
+ assembler->Bind(&search_notnan);
+ assembler->GotoIf(assembler->WordNotEqual(
+ assembler->LoadMap(search_element), heap_number_map),
+ &return_false);
+
+ search_num.Bind(assembler->LoadHeapNumberValue(search_element));
+
+ assembler->BranchIfFloat64IsNaN(search_num.value(), &nan_loop,
+ &not_nan_loop);
+
+ // Search for HeapNumber
+ assembler->Bind(&not_nan_loop);
+ {
+ Label continue_loop(assembler);
+ assembler->GotoUnless(
+ assembler->Int32LessThan(index_var.value(), len_var.value()),
+ &return_false);
+ Node* element_k = assembler->LoadFixedDoubleArrayElement(
+ elements, index_var.value(), MachineType::Float64());
+ assembler->BranchIfFloat64Equal(element_k, search_num.value(),
+ &return_true, &continue_loop);
+ assembler->Bind(&continue_loop);
+ index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ assembler->Goto(&not_nan_loop);
+ }
+
+ // Search for NaN
+ assembler->Bind(&nan_loop);
+ {
+ Label continue_loop(assembler);
+ assembler->GotoUnless(
+ assembler->Int32LessThan(index_var.value(), len_var.value()),
+ &return_false);
+ Node* element_k = assembler->LoadFixedDoubleArrayElement(
+ elements, index_var.value(), MachineType::Float64());
+ assembler->BranchIfFloat64IsNaN(element_k, &return_true, &continue_loop);
+ assembler->Bind(&continue_loop);
+ index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ assembler->Goto(&nan_loop);
+ }
+ }
+
+ assembler->Bind(&if_holey_doubles);
+ {
+ Label nan_loop(assembler, &index_var), not_nan_loop(assembler, &index_var),
+ hole_loop(assembler, &index_var), search_notnan(assembler);
+ Variable search_num(assembler, MachineRepresentation::kFloat64);
+
+ assembler->GotoUnless(assembler->WordIsSmi(search_element), &search_notnan);
+ search_num.Bind(assembler->SmiToFloat64(search_element));
+ assembler->Goto(&not_nan_loop);
+
+ assembler->Bind(&search_notnan);
+ assembler->GotoIf(assembler->WordEqual(search_element, undefined),
+ &hole_loop);
+ assembler->GotoIf(assembler->WordNotEqual(
+ assembler->LoadMap(search_element), heap_number_map),
+ &return_false);
+
+ search_num.Bind(assembler->LoadHeapNumberValue(search_element));
+
+ assembler->BranchIfFloat64IsNaN(search_num.value(), &nan_loop,
+ &not_nan_loop);
+
+ // Search for HeapNumber
+ assembler->Bind(&not_nan_loop);
+ {
+ Label continue_loop(assembler);
+ assembler->GotoUnless(
+ assembler->Int32LessThan(index_var.value(), len_var.value()),
+ &return_false);
+
+ if (kPointerSize == kDoubleSize) {
+ Node* element = assembler->LoadFixedDoubleArrayElement(
+ elements, index_var.value(), MachineType::Uint64());
+ Node* the_hole = assembler->Int64Constant(kHoleNanInt64);
+ assembler->GotoIf(assembler->Word64Equal(element, the_hole),
+ &continue_loop);
+ } else {
+ Node* element_upper = assembler->LoadFixedDoubleArrayElement(
+ elements, index_var.value(), MachineType::Uint32(),
+ kIeeeDoubleExponentWordOffset);
+ assembler->GotoIf(
+ assembler->Word32Equal(element_upper,
+ assembler->Int32Constant(kHoleNanUpper32)),
+ &continue_loop);
+ }
+
+ Node* element_k = assembler->LoadFixedDoubleArrayElement(
+ elements, index_var.value(), MachineType::Float64());
+ assembler->BranchIfFloat64Equal(element_k, search_num.value(),
+ &return_true, &continue_loop);
+ assembler->Bind(&continue_loop);
+ index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ assembler->Goto(&not_nan_loop);
+ }
+
+ // Search for NaN
+ assembler->Bind(&nan_loop);
+ {
+ Label continue_loop(assembler);
+ assembler->GotoUnless(
+ assembler->Int32LessThan(index_var.value(), len_var.value()),
+ &return_false);
+
+ if (kPointerSize == kDoubleSize) {
+ Node* element = assembler->LoadFixedDoubleArrayElement(
+ elements, index_var.value(), MachineType::Uint64());
+ Node* the_hole = assembler->Int64Constant(kHoleNanInt64);
+ assembler->GotoIf(assembler->Word64Equal(element, the_hole),
+ &continue_loop);
+ } else {
+ Node* element_upper = assembler->LoadFixedDoubleArrayElement(
+ elements, index_var.value(), MachineType::Uint32(),
+ kIeeeDoubleExponentWordOffset);
+ assembler->GotoIf(
+ assembler->Word32Equal(element_upper,
+ assembler->Int32Constant(kHoleNanUpper32)),
+ &continue_loop);
+ }
+
+ Node* element_k = assembler->LoadFixedDoubleArrayElement(
+ elements, index_var.value(), MachineType::Float64());
+ assembler->BranchIfFloat64IsNaN(element_k, &return_true, &continue_loop);
+ assembler->Bind(&continue_loop);
+ index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ assembler->Goto(&nan_loop);
+ }
+
+ // Search for the Hole
+ assembler->Bind(&hole_loop);
+ {
+ assembler->GotoUnless(
+ assembler->Int32LessThan(index_var.value(), len_var.value()),
+ &return_false);
+
+ if (kPointerSize == kDoubleSize) {
+ Node* element = assembler->LoadFixedDoubleArrayElement(
+ elements, index_var.value(), MachineType::Uint64());
+ Node* the_hole = assembler->Int64Constant(kHoleNanInt64);
+ assembler->GotoIf(assembler->Word64Equal(element, the_hole),
+ &return_true);
+ } else {
+ Node* element_upper = assembler->LoadFixedDoubleArrayElement(
+ elements, index_var.value(), MachineType::Uint32(),
+ kIeeeDoubleExponentWordOffset);
+ assembler->GotoIf(
+ assembler->Word32Equal(element_upper,
+ assembler->Int32Constant(kHoleNanUpper32)),
+ &return_true);
+ }
+
+ index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ assembler->Goto(&hole_loop);
+ }
+ }
+
+ assembler->Bind(&return_true);
+ assembler->Return(assembler->BooleanConstant(true));
+
+ assembler->Bind(&return_false);
+ assembler->Return(assembler->BooleanConstant(false));
+
+ assembler->Bind(&call_runtime);
+ assembler->Return(assembler->CallRuntime(Runtime::kArrayIncludes_Slow,
+ context, array, search_element,
+ start_from));
+}
+
+void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Node* array = assembler->Parameter(0);
+ Node* search_element = assembler->Parameter(1);
+ Node* start_from = assembler->Parameter(2);
+ Node* context = assembler->Parameter(3 + 2);
+
+ Node* int32_zero = assembler->Int32Constant(0);
+ Node* int32_one = assembler->Int32Constant(1);
+
+ Node* undefined = assembler->UndefinedConstant();
+ Node* heap_number_map = assembler->HeapNumberMapConstant();
+
+ Variable len_var(assembler, MachineRepresentation::kWord32),
+ index_var(assembler, MachineRepresentation::kWord32),
+ start_from_var(assembler, MachineRepresentation::kWord32);
+
+ Label init_k(assembler), return_found(assembler), return_not_found(assembler),
+ call_runtime(assembler);
+
+ Label init_len(assembler);
+
+ index_var.Bind(int32_zero);
+ len_var.Bind(int32_zero);
+
+ // Take slow path if not a JSArray, if retrieving elements requires
+ // traversing prototype, or if access checks are required.
+ assembler->BranchIfFastJSArray(array, context, &init_len, &call_runtime);
+
+ assembler->Bind(&init_len);
+ {
+ // Handle case where JSArray length is not an Smi in the runtime
+ Node* len = assembler->LoadObjectField(array, JSArray::kLengthOffset);
+ assembler->GotoUnless(assembler->WordIsSmi(len), &call_runtime);
+
+ len_var.Bind(assembler->SmiToWord(len));
+ assembler->Branch(assembler->Word32Equal(len_var.value(), int32_zero),
+ &return_not_found, &init_k);
+ }
+
+ assembler->Bind(&init_k);
+ {
+ Label done(assembler), init_k_smi(assembler), init_k_heap_num(assembler),
+ init_k_zero(assembler), init_k_n(assembler);
+ Callable call_to_integer = CodeFactory::ToInteger(assembler->isolate());
+ Node* tagged_n = assembler->CallStub(call_to_integer, context, start_from);
+
+ assembler->Branch(assembler->WordIsSmi(tagged_n), &init_k_smi,
+ &init_k_heap_num);
+
+ assembler->Bind(&init_k_smi);
+ {
+ start_from_var.Bind(assembler->SmiToWord32(tagged_n));
+ assembler->Goto(&init_k_n);
+ }
+
+ assembler->Bind(&init_k_heap_num);
+ {
+ Label do_return_not_found(assembler);
+ Node* fp_len = assembler->ChangeInt32ToFloat64(len_var.value());
+ Node* fp_n = assembler->LoadHeapNumberValue(tagged_n);
+ assembler->GotoIf(assembler->Float64GreaterThanOrEqual(fp_n, fp_len),
+ &do_return_not_found);
+ start_from_var.Bind(assembler->TruncateFloat64ToWord32(fp_n));
+ assembler->Goto(&init_k_n);
+
+ assembler->Bind(&do_return_not_found);
+ {
+ index_var.Bind(int32_zero);
+ assembler->Goto(&return_not_found);
+ }
+ }
+
+ assembler->Bind(&init_k_n);
+ {
+ Label if_positive(assembler), if_negative(assembler), done(assembler);
+ assembler->Branch(
+ assembler->Int32LessThan(start_from_var.value(), int32_zero),
+ &if_negative, &if_positive);
+
+ assembler->Bind(&if_positive);
+ {
+ index_var.Bind(start_from_var.value());
+ assembler->Goto(&done);
+ }
+
+ assembler->Bind(&if_negative);
+ {
+ index_var.Bind(
+ assembler->Int32Add(len_var.value(), start_from_var.value()));
+ assembler->Branch(
+ assembler->Int32LessThan(index_var.value(), int32_zero),
+ &init_k_zero, &done);
+ }
+
+ assembler->Bind(&init_k_zero);
+ {
+ index_var.Bind(int32_zero);
+ assembler->Goto(&done);
+ }
+
+ assembler->Bind(&done);
+ }
+ }
+
+ static int32_t kElementsKind[] = {
+ FAST_SMI_ELEMENTS, FAST_HOLEY_SMI_ELEMENTS, FAST_ELEMENTS,
+ FAST_HOLEY_ELEMENTS, FAST_DOUBLE_ELEMENTS, FAST_HOLEY_DOUBLE_ELEMENTS,
+ };
+
+ Label if_smiorobjects(assembler), if_packed_doubles(assembler),
+ if_holey_doubles(assembler);
+ Label* element_kind_handlers[] = {&if_smiorobjects, &if_smiorobjects,
+ &if_smiorobjects, &if_smiorobjects,
+ &if_packed_doubles, &if_holey_doubles};
+
+ Node* map = assembler->LoadMap(array);
+ Node* bit_field2 = assembler->LoadMapBitField2(map);
+ Node* elements_kind =
+ assembler->BitFieldDecode<Map::ElementsKindBits>(bit_field2);
+ Node* elements = assembler->LoadElements(array);
+ assembler->Switch(elements_kind, &return_not_found, kElementsKind,
+ element_kind_handlers, arraysize(kElementsKind));
+
+ assembler->Bind(&if_smiorobjects);
+ {
+ Variable search_num(assembler, MachineRepresentation::kFloat64);
+ Label ident_loop(assembler, &index_var),
+ heap_num_loop(assembler, &search_num),
+ string_loop(assembler, &index_var), simd_loop(assembler),
+ undef_loop(assembler, &index_var), not_smi(assembler),
+ not_heap_num(assembler);
+
+ assembler->GotoUnless(assembler->WordIsSmi(search_element), &not_smi);
+ search_num.Bind(assembler->SmiToFloat64(search_element));
+ assembler->Goto(&heap_num_loop);
+
+ assembler->Bind(&not_smi);
+ assembler->GotoIf(assembler->WordEqual(search_element, undefined),
+ &undef_loop);
+ Node* map = assembler->LoadMap(search_element);
+ assembler->GotoIf(assembler->WordNotEqual(map, heap_number_map),
+ &not_heap_num);
+ search_num.Bind(assembler->LoadHeapNumberValue(search_element));
+ assembler->Goto(&heap_num_loop);
+
+ assembler->Bind(&not_heap_num);
+ Node* search_type = assembler->LoadMapInstanceType(map);
+ assembler->GotoIf(
+ assembler->Int32LessThan(
+ search_type, assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+ &string_loop);
+ assembler->GotoIf(
+ assembler->WordEqual(search_type,
+ assembler->Int32Constant(SIMD128_VALUE_TYPE)),
+ &simd_loop);
+ assembler->Goto(&ident_loop);
+
+ assembler->Bind(&ident_loop);
+ {
+ assembler->GotoUnless(
+ assembler->Int32LessThan(index_var.value(), len_var.value()),
+ &return_not_found);
+ Node* element_k =
+ assembler->LoadFixedArrayElement(elements, index_var.value());
+ assembler->GotoIf(assembler->WordEqual(element_k, search_element),
+ &return_found);
+
+ index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ assembler->Goto(&ident_loop);
+ }
+
+ assembler->Bind(&undef_loop);
+ {
+ assembler->GotoUnless(
+ assembler->Int32LessThan(index_var.value(), len_var.value()),
+ &return_not_found);
+ Node* element_k =
+ assembler->LoadFixedArrayElement(elements, index_var.value());
+ assembler->GotoIf(assembler->WordEqual(element_k, undefined),
+ &return_found);
+
+ index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ assembler->Goto(&undef_loop);
+ }
+
+ assembler->Bind(&heap_num_loop);
+ {
+ Label not_nan_loop(assembler, &index_var);
+ assembler->BranchIfFloat64IsNaN(search_num.value(), &return_not_found,
+ &not_nan_loop);
+
+ assembler->Bind(&not_nan_loop);
+ {
+ Label continue_loop(assembler), not_smi(assembler);
+ assembler->GotoUnless(
+ assembler->Int32LessThan(index_var.value(), len_var.value()),
+ &return_not_found);
+ Node* element_k =
+ assembler->LoadFixedArrayElement(elements, index_var.value());
+ assembler->GotoUnless(assembler->WordIsSmi(element_k), &not_smi);
+ assembler->Branch(
+ assembler->Float64Equal(search_num.value(),
+ assembler->SmiToFloat64(element_k)),
+ &return_found, &continue_loop);
+
+ assembler->Bind(&not_smi);
+ assembler->GotoIf(assembler->WordNotEqual(assembler->LoadMap(element_k),
+ heap_number_map),
+ &continue_loop);
+ assembler->BranchIfFloat64Equal(
+ search_num.value(), assembler->LoadHeapNumberValue(element_k),
+ &return_found, &continue_loop);
+
+ assembler->Bind(&continue_loop);
+ index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ assembler->Goto(&not_nan_loop);
+ }
+ }
+
+ assembler->Bind(&string_loop);
+ {
+ Label continue_loop(assembler);
+ assembler->GotoUnless(
+ assembler->Int32LessThan(index_var.value(), len_var.value()),
+ &return_not_found);
+ Node* element_k =
+ assembler->LoadFixedArrayElement(elements, index_var.value());
+ assembler->GotoIf(assembler->WordIsSmi(element_k), &continue_loop);
+ assembler->GotoUnless(assembler->Int32LessThan(
+ assembler->LoadInstanceType(element_k),
+ assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+ &continue_loop);
+
+ // TODO(bmeurer): Consider inlining the StringEqual logic here.
+ Callable callable = CodeFactory::StringEqual(assembler->isolate());
+ Node* result =
+ assembler->CallStub(callable, context, search_element, element_k);
+ assembler->Branch(
+ assembler->WordEqual(assembler->BooleanConstant(true), result),
+ &return_found, &continue_loop);
+
+ assembler->Bind(&continue_loop);
+ index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ assembler->Goto(&string_loop);
+ }
+
+ assembler->Bind(&simd_loop);
+ {
+ Label continue_loop(assembler, &index_var),
+ loop_body(assembler, &index_var);
+ Node* map = assembler->LoadMap(search_element);
+
+ assembler->Goto(&loop_body);
+ assembler->Bind(&loop_body);
+ assembler->GotoUnless(
+ assembler->Int32LessThan(index_var.value(), len_var.value()),
+ &return_not_found);
+
+ Node* element_k =
+ assembler->LoadFixedArrayElement(elements, index_var.value());
+ assembler->GotoIf(assembler->WordIsSmi(element_k), &continue_loop);
+
+ Node* map_k = assembler->LoadMap(element_k);
+ assembler->BranchIfSimd128Equal(search_element, map, element_k, map_k,
+ &return_found, &continue_loop);
+
+ assembler->Bind(&continue_loop);
+ index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ assembler->Goto(&loop_body);
+ }
+ }
+
+ assembler->Bind(&if_packed_doubles);
+ {
+ Label not_nan_loop(assembler, &index_var), search_notnan(assembler);
+ Variable search_num(assembler, MachineRepresentation::kFloat64);
+
+ assembler->GotoUnless(assembler->WordIsSmi(search_element), &search_notnan);
+ search_num.Bind(assembler->SmiToFloat64(search_element));
+ assembler->Goto(&not_nan_loop);
+
+ assembler->Bind(&search_notnan);
+ assembler->GotoIf(assembler->WordNotEqual(
+ assembler->LoadMap(search_element), heap_number_map),
+ &return_not_found);
+
+ search_num.Bind(assembler->LoadHeapNumberValue(search_element));
+
+ assembler->BranchIfFloat64IsNaN(search_num.value(), &return_not_found,
+ &not_nan_loop);
+
+ // Search for HeapNumber
+ assembler->Bind(&not_nan_loop);
+ {
+ Label continue_loop(assembler);
+ assembler->GotoUnless(
+ assembler->Int32LessThan(index_var.value(), len_var.value()),
+ &return_not_found);
+ Node* element_k = assembler->LoadFixedDoubleArrayElement(
+ elements, index_var.value(), MachineType::Float64());
+ assembler->BranchIfFloat64Equal(element_k, search_num.value(),
+ &return_found, &continue_loop);
+ assembler->Bind(&continue_loop);
+ index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ assembler->Goto(&not_nan_loop);
+ }
+ }
+
+ assembler->Bind(&if_holey_doubles);
+ {
+ Label not_nan_loop(assembler, &index_var), search_notnan(assembler);
+ Variable search_num(assembler, MachineRepresentation::kFloat64);
+
+ assembler->GotoUnless(assembler->WordIsSmi(search_element), &search_notnan);
+ search_num.Bind(assembler->SmiToFloat64(search_element));
+ assembler->Goto(&not_nan_loop);
+
+ assembler->Bind(&search_notnan);
+ assembler->GotoIf(assembler->WordNotEqual(
+ assembler->LoadMap(search_element), heap_number_map),
+ &return_not_found);
+
+ search_num.Bind(assembler->LoadHeapNumberValue(search_element));
+
+ assembler->BranchIfFloat64IsNaN(search_num.value(), &return_not_found,
+ &not_nan_loop);
+
+ // Search for HeapNumber
+ assembler->Bind(&not_nan_loop);
+ {
+ Label continue_loop(assembler);
+ assembler->GotoUnless(
+ assembler->Int32LessThan(index_var.value(), len_var.value()),
+ &return_not_found);
+
+ if (kPointerSize == kDoubleSize) {
+ Node* element = assembler->LoadFixedDoubleArrayElement(
+ elements, index_var.value(), MachineType::Uint64());
+ Node* the_hole = assembler->Int64Constant(kHoleNanInt64);
+ assembler->GotoIf(assembler->Word64Equal(element, the_hole),
+ &continue_loop);
+ } else {
+ Node* element_upper = assembler->LoadFixedDoubleArrayElement(
+ elements, index_var.value(), MachineType::Uint32(),
+ kIeeeDoubleExponentWordOffset);
+ assembler->GotoIf(
+ assembler->Word32Equal(element_upper,
+ assembler->Int32Constant(kHoleNanUpper32)),
+ &continue_loop);
+ }
+
+ Node* element_k = assembler->LoadFixedDoubleArrayElement(
+ elements, index_var.value(), MachineType::Float64());
+ assembler->BranchIfFloat64Equal(element_k, search_num.value(),
+ &return_found, &continue_loop);
+ assembler->Bind(&continue_loop);
+ index_var.Bind(assembler->Int32Add(index_var.value(), int32_one));
+ assembler->Goto(&not_nan_loop);
+ }
+ }
+
+ assembler->Bind(&return_found);
+ assembler->Return(assembler->ChangeInt32ToTagged(index_var.value()));
+
+ assembler->Bind(&return_not_found);
+ assembler->Return(assembler->NumberConstant(-1));
+
+ assembler->Bind(&call_runtime);
+ assembler->Return(assembler->CallRuntime(Runtime::kArrayIndexOf, context,
+ array, search_element, start_from));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-arraybuffer.cc b/deps/v8/src/builtins/builtins-arraybuffer.cc
new file mode 100644
index 0000000000..addf8ac291
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-arraybuffer.cc
@@ -0,0 +1,88 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// ES6 section 21.1 ArrayBuffer Objects
+
+// ES6 section 24.1.2.1 ArrayBuffer ( length ) for the [[Call]] case.
+BUILTIN(ArrayBufferConstructor) {
+ HandleScope scope(isolate);
+ Handle<JSFunction> target = args.target<JSFunction>();
+ DCHECK(*target == target->native_context()->array_buffer_fun() ||
+ *target == target->native_context()->shared_array_buffer_fun());
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
+ handle(target->shared()->name(), isolate)));
+}
+
+// ES6 section 24.1.2.1 ArrayBuffer ( length ) for the [[Construct]] case.
+BUILTIN(ArrayBufferConstructor_ConstructStub) {
+ HandleScope scope(isolate);
+ Handle<JSFunction> target = args.target<JSFunction>();
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+ Handle<Object> length = args.atOrUndefined(isolate, 1);
+ DCHECK(*target == target->native_context()->array_buffer_fun() ||
+ *target == target->native_context()->shared_array_buffer_fun());
+ Handle<Object> number_length;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_length,
+ Object::ToInteger(isolate, length));
+ if (number_length->Number() < 0.0) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
+ }
+ Handle<JSObject> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ JSObject::New(target, new_target));
+ size_t byte_length;
+ if (!TryNumberToSize(*number_length, &byte_length)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
+ }
+ SharedFlag shared_flag =
+ (*target == target->native_context()->array_buffer_fun())
+ ? SharedFlag::kNotShared
+ : SharedFlag::kShared;
+ if (!JSArrayBuffer::SetupAllocatingData(Handle<JSArrayBuffer>::cast(result),
+ isolate, byte_length, true,
+ shared_flag)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kArrayBufferAllocationFailed));
+ }
+ return *result;
+}
+
+// ES6 section 24.1.4.1 get ArrayBuffer.prototype.byteLength
+BUILTIN(ArrayBufferPrototypeGetByteLength) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSArrayBuffer, array_buffer,
+ "get ArrayBuffer.prototype.byteLength");
+
+ if (array_buffer->is_shared()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "get ArrayBuffer.prototype.byteLength"),
+ args.receiver()));
+ }
+ // TODO(franzih): According to the ES6 spec, we should throw a TypeError
+ // here if the JSArrayBuffer is detached.
+ return array_buffer->byte_length();
+}
+
+// ES6 section 24.1.3.1 ArrayBuffer.isView ( arg )
+BUILTIN(ArrayBufferIsView) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(2, args.length());
+ Object* arg = args[1];
+ return isolate->heap()->ToBoolean(arg->IsJSArrayBufferView());
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-boolean.cc b/deps/v8/src/builtins/builtins-boolean.cc
new file mode 100644
index 0000000000..5f5bed1bda
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-boolean.cc
@@ -0,0 +1,62 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// ES6 section 19.3 Boolean Objects
+
+// ES6 section 19.3.1.1 Boolean ( value ) for the [[Call]] case.
+BUILTIN(BooleanConstructor) {
+ HandleScope scope(isolate);
+ Handle<Object> value = args.atOrUndefined(isolate, 1);
+ return isolate->heap()->ToBoolean(value->BooleanValue());
+}
+
+// ES6 section 19.3.1.1 Boolean ( value ) for the [[Construct]] case.
+BUILTIN(BooleanConstructor_ConstructStub) {
+ HandleScope scope(isolate);
+ Handle<Object> value = args.atOrUndefined(isolate, 1);
+ Handle<JSFunction> target = args.target<JSFunction>();
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+ DCHECK(*target == target->native_context()->boolean_function());
+ Handle<JSObject> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ JSObject::New(target, new_target));
+ Handle<JSValue>::cast(result)->set_value(
+ isolate->heap()->ToBoolean(value->BooleanValue()));
+ return *result;
+}
+
+// ES6 section 19.3.3.2 Boolean.prototype.toString ( )
+void Builtins::Generate_BooleanPrototypeToString(CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+
+ Node* receiver = assembler->Parameter(0);
+ Node* context = assembler->Parameter(3);
+
+ Node* value = assembler->ToThisValue(
+ context, receiver, PrimitiveType::kBoolean, "Boolean.prototype.toString");
+ Node* result = assembler->LoadObjectField(value, Oddball::kToStringOffset);
+ assembler->Return(result);
+}
+
+// ES6 section 19.3.3.3 Boolean.prototype.valueOf ( )
+void Builtins::Generate_BooleanPrototypeValueOf(CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+
+ Node* receiver = assembler->Parameter(0);
+ Node* context = assembler->Parameter(3);
+
+ Node* result = assembler->ToThisValue(
+ context, receiver, PrimitiveType::kBoolean, "Boolean.prototype.valueOf");
+ assembler->Return(result);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-call.cc b/deps/v8/src/builtins/builtins-call.cc
new file mode 100644
index 0000000000..e3054a9913
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-call.cc
@@ -0,0 +1,151 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+namespace v8 {
+namespace internal {
+
+Handle<Code> Builtins::CallFunction(ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
+ switch (tail_call_mode) {
+ case TailCallMode::kDisallow:
+ switch (mode) {
+ case ConvertReceiverMode::kNullOrUndefined:
+ return CallFunction_ReceiverIsNullOrUndefined();
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ return CallFunction_ReceiverIsNotNullOrUndefined();
+ case ConvertReceiverMode::kAny:
+ return CallFunction_ReceiverIsAny();
+ }
+ break;
+ case TailCallMode::kAllow:
+ switch (mode) {
+ case ConvertReceiverMode::kNullOrUndefined:
+ return TailCallFunction_ReceiverIsNullOrUndefined();
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ return TailCallFunction_ReceiverIsNotNullOrUndefined();
+ case ConvertReceiverMode::kAny:
+ return TailCallFunction_ReceiverIsAny();
+ }
+ break;
+ }
+ UNREACHABLE();
+ return Handle<Code>::null();
+}
+
+Handle<Code> Builtins::Call(ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
+ switch (tail_call_mode) {
+ case TailCallMode::kDisallow:
+ switch (mode) {
+ case ConvertReceiverMode::kNullOrUndefined:
+ return Call_ReceiverIsNullOrUndefined();
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ return Call_ReceiverIsNotNullOrUndefined();
+ case ConvertReceiverMode::kAny:
+ return Call_ReceiverIsAny();
+ }
+ break;
+ case TailCallMode::kAllow:
+ switch (mode) {
+ case ConvertReceiverMode::kNullOrUndefined:
+ return TailCall_ReceiverIsNullOrUndefined();
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ return TailCall_ReceiverIsNotNullOrUndefined();
+ case ConvertReceiverMode::kAny:
+ return TailCall_ReceiverIsAny();
+ }
+ break;
+ }
+ UNREACHABLE();
+ return Handle<Code>::null();
+}
+
+Handle<Code> Builtins::CallBoundFunction(TailCallMode tail_call_mode) {
+ switch (tail_call_mode) {
+ case TailCallMode::kDisallow:
+ return CallBoundFunction();
+ case TailCallMode::kAllow:
+ return TailCallBoundFunction();
+ }
+ UNREACHABLE();
+ return Handle<Code>::null();
+}
+
+void Builtins::Generate_CallFunction_ReceiverIsNullOrUndefined(
+ MacroAssembler* masm) {
+ Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined,
+ TailCallMode::kDisallow);
+}
+
+void Builtins::Generate_CallFunction_ReceiverIsNotNullOrUndefined(
+ MacroAssembler* masm) {
+ Generate_CallFunction(masm, ConvertReceiverMode::kNotNullOrUndefined,
+ TailCallMode::kDisallow);
+}
+
+void Builtins::Generate_CallFunction_ReceiverIsAny(MacroAssembler* masm) {
+ Generate_CallFunction(masm, ConvertReceiverMode::kAny,
+ TailCallMode::kDisallow);
+}
+
+void Builtins::Generate_TailCallFunction_ReceiverIsNullOrUndefined(
+ MacroAssembler* masm) {
+ Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined,
+ TailCallMode::kAllow);
+}
+
+void Builtins::Generate_TailCallFunction_ReceiverIsNotNullOrUndefined(
+ MacroAssembler* masm) {
+ Generate_CallFunction(masm, ConvertReceiverMode::kNotNullOrUndefined,
+ TailCallMode::kAllow);
+}
+
+void Builtins::Generate_TailCallFunction_ReceiverIsAny(MacroAssembler* masm) {
+ Generate_CallFunction(masm, ConvertReceiverMode::kAny, TailCallMode::kAllow);
+}
+
+void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+ Generate_CallBoundFunctionImpl(masm, TailCallMode::kDisallow);
+}
+
+void Builtins::Generate_TailCallBoundFunction(MacroAssembler* masm) {
+ Generate_CallBoundFunctionImpl(masm, TailCallMode::kAllow);
+}
+
+void Builtins::Generate_Call_ReceiverIsNullOrUndefined(MacroAssembler* masm) {
+ Generate_Call(masm, ConvertReceiverMode::kNullOrUndefined,
+ TailCallMode::kDisallow);
+}
+
+void Builtins::Generate_Call_ReceiverIsNotNullOrUndefined(
+ MacroAssembler* masm) {
+ Generate_Call(masm, ConvertReceiverMode::kNotNullOrUndefined,
+ TailCallMode::kDisallow);
+}
+
+void Builtins::Generate_Call_ReceiverIsAny(MacroAssembler* masm) {
+ Generate_Call(masm, ConvertReceiverMode::kAny, TailCallMode::kDisallow);
+}
+
+void Builtins::Generate_TailCall_ReceiverIsNullOrUndefined(
+ MacroAssembler* masm) {
+ Generate_Call(masm, ConvertReceiverMode::kNullOrUndefined,
+ TailCallMode::kAllow);
+}
+
+void Builtins::Generate_TailCall_ReceiverIsNotNullOrUndefined(
+ MacroAssembler* masm) {
+ Generate_Call(masm, ConvertReceiverMode::kNotNullOrUndefined,
+ TailCallMode::kAllow);
+}
+
+void Builtins::Generate_TailCall_ReceiverIsAny(MacroAssembler* masm) {
+ Generate_Call(masm, ConvertReceiverMode::kAny, TailCallMode::kAllow);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-callsite.cc b/deps/v8/src/builtins/builtins-callsite.cc
new file mode 100644
index 0000000000..7fc2f98716
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-callsite.cc
@@ -0,0 +1,203 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+#include "src/string-builder.h"
+#include "src/wasm/wasm-module.h"
+
+namespace v8 {
+namespace internal {
+
+#define CHECK_CALLSITE(recv, method) \
+ CHECK_RECEIVER(JSObject, recv, method); \
+ if (!JSReceiver::HasOwnProperty( \
+ recv, isolate->factory()->call_site_position_symbol()) \
+ .FromMaybe(false)) { \
+ THROW_NEW_ERROR_RETURN_FAILURE( \
+ isolate, \
+ NewTypeError(MessageTemplate::kCallSiteMethod, \
+ isolate->factory()->NewStringFromAsciiChecked(method))); \
+ }
+
+namespace {
+
+Object* PositiveNumberOrNull(int value, Isolate* isolate) {
+ if (value >= 0) return *isolate->factory()->NewNumberFromInt(value);
+ return isolate->heap()->null_value();
+}
+
+} // namespace
+
+BUILTIN(CallSitePrototypeGetColumnNumber) {
+ HandleScope scope(isolate);
+ CHECK_CALLSITE(recv, "getColumnNumber");
+
+ CallSite call_site(isolate, recv);
+ CHECK(call_site.IsJavaScript() || call_site.IsWasm());
+ return PositiveNumberOrNull(call_site.GetColumnNumber(), isolate);
+}
+
+BUILTIN(CallSitePrototypeGetEvalOrigin) {
+ HandleScope scope(isolate);
+ CHECK_CALLSITE(recv, "getEvalOrigin");
+
+ CallSite call_site(isolate, recv);
+ CHECK(call_site.IsJavaScript() || call_site.IsWasm());
+ return *call_site.GetEvalOrigin();
+}
+
+BUILTIN(CallSitePrototypeGetFileName) {
+ HandleScope scope(isolate);
+ CHECK_CALLSITE(recv, "getFileName");
+
+ CallSite call_site(isolate, recv);
+ CHECK(call_site.IsJavaScript() || call_site.IsWasm());
+ return *call_site.GetFileName();
+}
+
+namespace {
+
+bool CallSiteIsStrict(Isolate* isolate, Handle<JSObject> receiver) {
+ Handle<Object> strict;
+ Handle<Symbol> symbol = isolate->factory()->call_site_strict_symbol();
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, strict,
+ JSObject::GetProperty(receiver, symbol));
+ return strict->BooleanValue();
+}
+
+} // namespace
+
+BUILTIN(CallSitePrototypeGetFunction) {
+ HandleScope scope(isolate);
+ CHECK_CALLSITE(recv, "getFunction");
+
+ if (CallSiteIsStrict(isolate, recv))
+ return *isolate->factory()->undefined_value();
+
+ Handle<Symbol> symbol = isolate->factory()->call_site_function_symbol();
+ RETURN_RESULT_OR_FAILURE(isolate, JSObject::GetProperty(recv, symbol));
+}
+
+BUILTIN(CallSitePrototypeGetFunctionName) {
+ HandleScope scope(isolate);
+ CHECK_CALLSITE(recv, "getFunctionName");
+
+ CallSite call_site(isolate, recv);
+ CHECK(call_site.IsJavaScript() || call_site.IsWasm());
+ return *call_site.GetFunctionName();
+}
+
+BUILTIN(CallSitePrototypeGetLineNumber) {
+ HandleScope scope(isolate);
+ CHECK_CALLSITE(recv, "getLineNumber");
+
+ CallSite call_site(isolate, recv);
+ CHECK(call_site.IsJavaScript() || call_site.IsWasm());
+
+ int line_number = call_site.IsWasm() ? call_site.wasm_func_index()
+ : call_site.GetLineNumber();
+ return PositiveNumberOrNull(line_number, isolate);
+}
+
+BUILTIN(CallSitePrototypeGetMethodName) {
+ HandleScope scope(isolate);
+ CHECK_CALLSITE(recv, "getMethodName");
+
+ CallSite call_site(isolate, recv);
+ CHECK(call_site.IsJavaScript() || call_site.IsWasm());
+ return *call_site.GetMethodName();
+}
+
+BUILTIN(CallSitePrototypeGetPosition) {
+ HandleScope scope(isolate);
+ CHECK_CALLSITE(recv, "getPosition");
+
+ Handle<Symbol> symbol = isolate->factory()->call_site_position_symbol();
+ RETURN_RESULT_OR_FAILURE(isolate, JSObject::GetProperty(recv, symbol));
+}
+
+BUILTIN(CallSitePrototypeGetScriptNameOrSourceURL) {
+ HandleScope scope(isolate);
+ CHECK_CALLSITE(recv, "getScriptNameOrSourceUrl");
+
+ CallSite call_site(isolate, recv);
+ CHECK(call_site.IsJavaScript() || call_site.IsWasm());
+ return *call_site.GetScriptNameOrSourceUrl();
+}
+
+BUILTIN(CallSitePrototypeGetThis) {
+ HandleScope scope(isolate);
+ CHECK_CALLSITE(recv, "getThis");
+
+ if (CallSiteIsStrict(isolate, recv))
+ return *isolate->factory()->undefined_value();
+
+ Handle<Object> receiver;
+ Handle<Symbol> symbol = isolate->factory()->call_site_receiver_symbol();
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+ JSObject::GetProperty(recv, symbol));
+
+ if (*receiver == isolate->heap()->call_site_constructor_symbol())
+ return *isolate->factory()->undefined_value();
+
+ return *receiver;
+}
+
+BUILTIN(CallSitePrototypeGetTypeName) {
+ HandleScope scope(isolate);
+ CHECK_CALLSITE(recv, "getTypeName");
+
+ CallSite call_site(isolate, recv);
+ CHECK(call_site.IsJavaScript() || call_site.IsWasm());
+ return *call_site.GetTypeName();
+}
+
+BUILTIN(CallSitePrototypeIsConstructor) {
+ HandleScope scope(isolate);
+ CHECK_CALLSITE(recv, "isConstructor");
+
+ CallSite call_site(isolate, recv);
+ CHECK(call_site.IsJavaScript() || call_site.IsWasm());
+ return isolate->heap()->ToBoolean(call_site.IsConstructor());
+}
+
+BUILTIN(CallSitePrototypeIsEval) {
+ HandleScope scope(isolate);
+ CHECK_CALLSITE(recv, "isEval");
+
+ CallSite call_site(isolate, recv);
+ CHECK(call_site.IsJavaScript() || call_site.IsWasm());
+ return isolate->heap()->ToBoolean(call_site.IsEval());
+}
+
+BUILTIN(CallSitePrototypeIsNative) {
+ HandleScope scope(isolate);
+ CHECK_CALLSITE(recv, "isNative");
+
+ CallSite call_site(isolate, recv);
+ CHECK(call_site.IsJavaScript() || call_site.IsWasm());
+ return isolate->heap()->ToBoolean(call_site.IsNative());
+}
+
+BUILTIN(CallSitePrototypeIsToplevel) {
+ HandleScope scope(isolate);
+ CHECK_CALLSITE(recv, "isToplevel");
+
+ CallSite call_site(isolate, recv);
+ CHECK(call_site.IsJavaScript() || call_site.IsWasm());
+ return isolate->heap()->ToBoolean(call_site.IsToplevel());
+}
+
+BUILTIN(CallSitePrototypeToString) {
+ HandleScope scope(isolate);
+ CHECK_CALLSITE(recv, "toString");
+ RETURN_RESULT_OR_FAILURE(isolate, CallSiteUtils::ToString(isolate, recv));
+}
+
+#undef CHECK_CALLSITE
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-conversion.cc b/deps/v8/src/builtins/builtins-conversion.cc
new file mode 100644
index 0000000000..0d04a02e24
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-conversion.cc
@@ -0,0 +1,357 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+#include "src/code-factory.h"
+
+namespace v8 {
+namespace internal {
+
+Handle<Code> Builtins::NonPrimitiveToPrimitive(ToPrimitiveHint hint) {
+ switch (hint) {
+ case ToPrimitiveHint::kDefault:
+ return NonPrimitiveToPrimitive_Default();
+ case ToPrimitiveHint::kNumber:
+ return NonPrimitiveToPrimitive_Number();
+ case ToPrimitiveHint::kString:
+ return NonPrimitiveToPrimitive_String();
+ }
+ UNREACHABLE();
+ return Handle<Code>::null();
+}
+
+namespace {
+// ES6 section 7.1.1 ToPrimitive ( input [ , PreferredType ] )
+void Generate_NonPrimitiveToPrimitive(CodeStubAssembler* assembler,
+ ToPrimitiveHint hint) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef TypeConversionDescriptor Descriptor;
+
+ Node* input = assembler->Parameter(Descriptor::kArgument);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ // Lookup the @@toPrimitive property on the {input}.
+ Callable callable = CodeFactory::GetProperty(assembler->isolate());
+ Node* to_primitive_symbol =
+ assembler->HeapConstant(assembler->factory()->to_primitive_symbol());
+ Node* exotic_to_prim =
+ assembler->CallStub(callable, context, input, to_primitive_symbol);
+
+ // Check if {exotic_to_prim} is neither null nor undefined.
+ Label ordinary_to_primitive(assembler);
+ assembler->GotoIf(
+ assembler->WordEqual(exotic_to_prim, assembler->NullConstant()),
+ &ordinary_to_primitive);
+ assembler->GotoIf(
+ assembler->WordEqual(exotic_to_prim, assembler->UndefinedConstant()),
+ &ordinary_to_primitive);
+ {
+ // Invoke the {exotic_to_prim} method on the {input} with a string
+ // representation of the {hint}.
+ Callable callable = CodeFactory::Call(assembler->isolate());
+ Node* hint_string = assembler->HeapConstant(
+ assembler->factory()->ToPrimitiveHintString(hint));
+ Node* result = assembler->CallJS(callable, context, exotic_to_prim, input,
+ hint_string);
+
+ // Verify that the {result} is actually a primitive.
+ Label if_resultisprimitive(assembler),
+ if_resultisnotprimitive(assembler, Label::kDeferred);
+ assembler->GotoIf(assembler->WordIsSmi(result), &if_resultisprimitive);
+ Node* result_instance_type = assembler->LoadInstanceType(result);
+ STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE);
+ assembler->Branch(assembler->Int32LessThanOrEqual(
+ result_instance_type,
+ assembler->Int32Constant(LAST_PRIMITIVE_TYPE)),
+ &if_resultisprimitive, &if_resultisnotprimitive);
+
+ assembler->Bind(&if_resultisprimitive);
+ {
+ // Just return the {result}.
+ assembler->Return(result);
+ }
+
+ assembler->Bind(&if_resultisnotprimitive);
+ {
+ // Somehow the @@toPrimitive method on {input} didn't yield a primitive.
+ assembler->TailCallRuntime(Runtime::kThrowCannotConvertToPrimitive,
+ context);
+ }
+ }
+
+ // Convert using the OrdinaryToPrimitive algorithm instead.
+ assembler->Bind(&ordinary_to_primitive);
+ {
+ Callable callable = CodeFactory::OrdinaryToPrimitive(
+ assembler->isolate(), (hint == ToPrimitiveHint::kString)
+ ? OrdinaryToPrimitiveHint::kString
+ : OrdinaryToPrimitiveHint::kNumber);
+ assembler->TailCallStub(callable, context, input);
+ }
+}
+} // anonymous namespace
+
+void Builtins::Generate_NonPrimitiveToPrimitive_Default(
+ CodeStubAssembler* assembler) {
+ Generate_NonPrimitiveToPrimitive(assembler, ToPrimitiveHint::kDefault);
+}
+
+void Builtins::Generate_NonPrimitiveToPrimitive_Number(
+ CodeStubAssembler* assembler) {
+ Generate_NonPrimitiveToPrimitive(assembler, ToPrimitiveHint::kNumber);
+}
+
+void Builtins::Generate_NonPrimitiveToPrimitive_String(
+ CodeStubAssembler* assembler) {
+ Generate_NonPrimitiveToPrimitive(assembler, ToPrimitiveHint::kString);
+}
+
+void Builtins::Generate_StringToNumber(CodeStubAssembler* assembler) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef TypeConversionDescriptor Descriptor;
+
+ Node* input = assembler->Parameter(Descriptor::kArgument);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ Label runtime(assembler);
+
+ // Check if string has a cached array index.
+ Node* hash = assembler->LoadNameHashField(input);
+ Node* bit = assembler->Word32And(
+ hash, assembler->Int32Constant(String::kContainsCachedArrayIndexMask));
+ assembler->GotoIf(assembler->Word32NotEqual(bit, assembler->Int32Constant(0)),
+ &runtime);
+
+ assembler->Return(assembler->SmiTag(
+ assembler->BitFieldDecode<String::ArrayIndexValueBits>(hash)));
+
+ assembler->Bind(&runtime);
+ {
+ // Note: We cannot tail call to the runtime here, as js-to-wasm
+ // trampolines also use this code currently, and they declare all
+ // outgoing parameters as untagged, while we would push a tagged
+ // object here.
+ Node* result =
+ assembler->CallRuntime(Runtime::kStringToNumber, context, input);
+ assembler->Return(result);
+ }
+}
+
+// ES6 section 7.1.3 ToNumber ( argument )
+void Builtins::Generate_NonNumberToNumber(CodeStubAssembler* assembler) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+ typedef TypeConversionDescriptor Descriptor;
+
+ Node* input = assembler->Parameter(Descriptor::kArgument);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ // We might need to loop once here due to ToPrimitive conversions.
+ Variable var_input(assembler, MachineRepresentation::kTagged);
+ Label loop(assembler, &var_input);
+ var_input.Bind(input);
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ // Load the current {input} value (known to be a HeapObject).
+ Node* input = var_input.value();
+
+ // Dispatch on the {input} instance type.
+ Node* input_instance_type = assembler->LoadInstanceType(input);
+ Label if_inputisstring(assembler), if_inputisoddball(assembler),
+ if_inputisreceiver(assembler, Label::kDeferred),
+ if_inputisother(assembler, Label::kDeferred);
+ assembler->GotoIf(assembler->Int32LessThan(
+ input_instance_type,
+ assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+ &if_inputisstring);
+ assembler->GotoIf(
+ assembler->Word32Equal(input_instance_type,
+ assembler->Int32Constant(ODDBALL_TYPE)),
+ &if_inputisoddball);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ assembler->Branch(assembler->Int32GreaterThanOrEqual(
+ input_instance_type,
+ assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE)),
+ &if_inputisreceiver, &if_inputisother);
+
+ assembler->Bind(&if_inputisstring);
+ {
+ // The {input} is a String, use the fast stub to convert it to a Number.
+ // TODO(bmeurer): Consider inlining the StringToNumber logic here.
+ Callable callable = CodeFactory::StringToNumber(assembler->isolate());
+ assembler->TailCallStub(callable, context, input);
+ }
+
+ assembler->Bind(&if_inputisoddball);
+ {
+ // The {input} is an Oddball, we just need to the Number value of it.
+ Node* result =
+ assembler->LoadObjectField(input, Oddball::kToNumberOffset);
+ assembler->Return(result);
+ }
+
+ assembler->Bind(&if_inputisreceiver);
+ {
+ // The {input} is a JSReceiver, we need to convert it to a Primitive first
+ // using the ToPrimitive type conversion, preferably yielding a Number.
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(
+ assembler->isolate(), ToPrimitiveHint::kNumber);
+ Node* result = assembler->CallStub(callable, context, input);
+
+ // Check if the {result} is already a Number.
+ Label if_resultisnumber(assembler), if_resultisnotnumber(assembler);
+ assembler->GotoIf(assembler->WordIsSmi(result), &if_resultisnumber);
+ Node* result_map = assembler->LoadMap(result);
+ assembler->Branch(
+ assembler->WordEqual(result_map, assembler->HeapNumberMapConstant()),
+ &if_resultisnumber, &if_resultisnotnumber);
+
+ assembler->Bind(&if_resultisnumber);
+ {
+ // The ToPrimitive conversion already gave us a Number, so we're done.
+ assembler->Return(result);
+ }
+
+ assembler->Bind(&if_resultisnotnumber);
+ {
+ // We now have a Primitive {result}, but it's not yet a Number.
+ var_input.Bind(result);
+ assembler->Goto(&loop);
+ }
+ }
+
+ assembler->Bind(&if_inputisother);
+ {
+ // The {input} is something else (i.e. Symbol or Simd128Value), let the
+ // runtime figure out the correct exception.
+ // Note: We cannot tail call to the runtime here, as js-to-wasm
+ // trampolines also use this code currently, and they declare all
+ // outgoing parameters as untagged, while we would push a tagged
+ // object here.
+ Node* result = assembler->CallRuntime(Runtime::kToNumber, context, input);
+ assembler->Return(result);
+ }
+ }
+}
+
+Handle<Code> Builtins::OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint) {
+ switch (hint) {
+ case OrdinaryToPrimitiveHint::kNumber:
+ return OrdinaryToPrimitive_Number();
+ case OrdinaryToPrimitiveHint::kString:
+ return OrdinaryToPrimitive_String();
+ }
+ UNREACHABLE();
+ return Handle<Code>::null();
+}
+
+namespace {
+// 7.1.1.1 OrdinaryToPrimitive ( O, hint )
+void Generate_OrdinaryToPrimitive(CodeStubAssembler* assembler,
+ OrdinaryToPrimitiveHint hint) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+ typedef TypeConversionDescriptor Descriptor;
+
+ Node* input = assembler->Parameter(Descriptor::kArgument);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ Variable var_result(assembler, MachineRepresentation::kTagged);
+ Label return_result(assembler, &var_result);
+
+ Handle<String> method_names[2];
+ switch (hint) {
+ case OrdinaryToPrimitiveHint::kNumber:
+ method_names[0] = assembler->factory()->valueOf_string();
+ method_names[1] = assembler->factory()->toString_string();
+ break;
+ case OrdinaryToPrimitiveHint::kString:
+ method_names[0] = assembler->factory()->toString_string();
+ method_names[1] = assembler->factory()->valueOf_string();
+ break;
+ }
+ for (Handle<String> name : method_names) {
+ // Lookup the {name} on the {input}.
+ Callable callable = CodeFactory::GetProperty(assembler->isolate());
+ Node* name_string = assembler->HeapConstant(name);
+ Node* method = assembler->CallStub(callable, context, input, name_string);
+
+ // Check if the {method} is callable.
+ Label if_methodiscallable(assembler),
+ if_methodisnotcallable(assembler, Label::kDeferred);
+ assembler->GotoIf(assembler->WordIsSmi(method), &if_methodisnotcallable);
+ Node* method_map = assembler->LoadMap(method);
+ Node* method_bit_field = assembler->LoadMapBitField(method_map);
+ assembler->Branch(
+ assembler->Word32Equal(
+ assembler->Word32And(method_bit_field, assembler->Int32Constant(
+ 1 << Map::kIsCallable)),
+ assembler->Int32Constant(0)),
+ &if_methodisnotcallable, &if_methodiscallable);
+
+ assembler->Bind(&if_methodiscallable);
+ {
+ // Call the {method} on the {input}.
+ Callable callable = CodeFactory::Call(assembler->isolate());
+ Node* result = assembler->CallJS(callable, context, method, input);
+ var_result.Bind(result);
+
+ // Return the {result} if it is a primitive.
+ assembler->GotoIf(assembler->WordIsSmi(result), &return_result);
+ Node* result_instance_type = assembler->LoadInstanceType(result);
+ STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE);
+ assembler->GotoIf(assembler->Int32LessThanOrEqual(
+ result_instance_type,
+ assembler->Int32Constant(LAST_PRIMITIVE_TYPE)),
+ &return_result);
+ }
+
+ // Just continue with the next {name} if the {method} is not callable.
+ assembler->Goto(&if_methodisnotcallable);
+ assembler->Bind(&if_methodisnotcallable);
+ }
+
+ assembler->TailCallRuntime(Runtime::kThrowCannotConvertToPrimitive, context);
+
+ assembler->Bind(&return_result);
+ assembler->Return(var_result.value());
+}
+} // anonymous namespace
+
+void Builtins::Generate_OrdinaryToPrimitive_Number(
+ CodeStubAssembler* assembler) {
+ Generate_OrdinaryToPrimitive(assembler, OrdinaryToPrimitiveHint::kNumber);
+}
+
+void Builtins::Generate_OrdinaryToPrimitive_String(
+ CodeStubAssembler* assembler) {
+ Generate_OrdinaryToPrimitive(assembler, OrdinaryToPrimitiveHint::kString);
+}
+
+// ES6 section 7.1.2 ToBoolean ( argument )
+void Builtins::Generate_ToBoolean(CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef TypeConversionDescriptor Descriptor;
+
+ Node* value = assembler->Parameter(Descriptor::kArgument);
+
+ Label return_true(assembler), return_false(assembler);
+ assembler->BranchIfToBooleanIsTrue(value, &return_true, &return_false);
+
+ assembler->Bind(&return_true);
+ assembler->Return(assembler->BooleanConstant(true));
+
+ assembler->Bind(&return_false);
+ assembler->Return(assembler->BooleanConstant(false));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-dataview.cc b/deps/v8/src/builtins/builtins-dataview.cc
new file mode 100644
index 0000000000..32c5a83d2f
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-dataview.cc
@@ -0,0 +1,133 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// ES6 section 24.2 DataView Objects
+
+// ES6 section 24.2.2 The DataView Constructor for the [[Call]] case.
+BUILTIN(DataViewConstructor) {
+ HandleScope scope(isolate);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kConstructorNotFunction,
+ isolate->factory()->NewStringFromAsciiChecked("DataView")));
+}
+
+// ES6 section 24.2.2 The DataView Constructor for the [[Construct]] case.
+BUILTIN(DataViewConstructor_ConstructStub) {
+ HandleScope scope(isolate);
+ Handle<JSFunction> target = args.target<JSFunction>();
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+ Handle<Object> buffer = args.atOrUndefined(isolate, 1);
+ Handle<Object> byte_offset = args.atOrUndefined(isolate, 2);
+ Handle<Object> byte_length = args.atOrUndefined(isolate, 3);
+
+ // 2. If Type(buffer) is not Object, throw a TypeError exception.
+ // 3. If buffer does not have an [[ArrayBufferData]] internal slot, throw a
+ // TypeError exception.
+ if (!buffer->IsJSArrayBuffer()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kDataViewNotArrayBuffer));
+ }
+ Handle<JSArrayBuffer> array_buffer = Handle<JSArrayBuffer>::cast(buffer);
+
+ // 4. Let offset be ToIndex(byteOffset).
+ Handle<Object> offset;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, offset,
+ Object::ToIndex(isolate, byte_offset,
+ MessageTemplate::kInvalidDataViewOffset));
+
+ // 5. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
+ // We currently violate the specification at this point.
+
+ // 6. Let bufferByteLength be the value of buffer's [[ArrayBufferByteLength]]
+ // internal slot.
+ double const buffer_byte_length = array_buffer->byte_length()->Number();
+
+ // 7. If offset > bufferByteLength, throw a RangeError exception
+ if (offset->Number() > buffer_byte_length) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewRangeError(MessageTemplate::kInvalidDataViewOffset, offset));
+ }
+
+ Handle<Object> view_byte_length;
+ if (byte_length->IsUndefined(isolate)) {
+ // 8. If byteLength is undefined, then
+ // a. Let viewByteLength be bufferByteLength - offset.
+ view_byte_length =
+ isolate->factory()->NewNumber(buffer_byte_length - offset->Number());
+ } else {
+ // 9. Else,
+ // a. Let viewByteLength be ? ToIndex(byteLength).
+ // b. If offset+viewByteLength > bufferByteLength, throw a RangeError
+ // exception
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, view_byte_length,
+ Object::ToIndex(isolate, byte_length,
+ MessageTemplate::kInvalidDataViewLength));
+ if (offset->Number() + view_byte_length->Number() > buffer_byte_length) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidDataViewLength));
+ }
+ }
+
+ // 10. Let O be ? OrdinaryCreateFromConstructor(NewTarget,
+ // "%DataViewPrototype%", «[[DataView]], [[ViewedArrayBuffer]],
+ // [[ByteLength]], [[ByteOffset]]»).
+ // 11. Set O's [[DataView]] internal slot to true.
+ Handle<JSObject> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ JSObject::New(target, new_target));
+ for (int i = 0; i < ArrayBufferView::kInternalFieldCount; ++i) {
+ Handle<JSDataView>::cast(result)->SetInternalField(i, Smi::FromInt(0));
+ }
+
+ // 12. Set O's [[ViewedArrayBuffer]] internal slot to buffer.
+ Handle<JSDataView>::cast(result)->set_buffer(*array_buffer);
+
+ // 13. Set O's [[ByteLength]] internal slot to viewByteLength.
+ Handle<JSDataView>::cast(result)->set_byte_length(*view_byte_length);
+
+ // 14. Set O's [[ByteOffset]] internal slot to offset.
+ Handle<JSDataView>::cast(result)->set_byte_offset(*offset);
+
+ // 15. Return O.
+ return *result;
+}
+
+// ES6 section 24.2.4.1 get DataView.prototype.buffer
+BUILTIN(DataViewPrototypeGetBuffer) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDataView, data_view, "get DataView.prototype.buffer");
+ return data_view->buffer();
+}
+
+// ES6 section 24.2.4.2 get DataView.prototype.byteLength
+BUILTIN(DataViewPrototypeGetByteLength) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDataView, data_view, "get DataView.prototype.byteLength");
+ // TODO(bmeurer): According to the ES6 spec, we should throw a TypeError
+ // here if the JSArrayBuffer of the {data_view} was neutered.
+ return data_view->byte_length();
+}
+
+// ES6 section 24.2.4.3 get DataView.prototype.byteOffset
+BUILTIN(DataViewPrototypeGetByteOffset) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDataView, data_view, "get DataView.prototype.byteOffset");
+ // TODO(bmeurer): According to the ES6 spec, we should throw a TypeError
+ // here if the JSArrayBuffer of the {data_view} was neutered.
+ return data_view->byte_offset();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-date.cc b/deps/v8/src/builtins/builtins-date.cc
new file mode 100644
index 0000000000..d5c34761f5
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-date.cc
@@ -0,0 +1,1002 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+#include "src/dateparser-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// ES6 section 20.3 Date Objects
+
+namespace {
+
+// ES6 section 20.3.1.1 Time Values and Time Range
+const double kMinYear = -1000000.0;
+const double kMaxYear = -kMinYear;
+const double kMinMonth = -10000000.0;
+const double kMaxMonth = -kMinMonth;
+
+// 20.3.1.2 Day Number and Time within Day
+const double kMsPerDay = 86400000.0;
+
+// ES6 section 20.3.1.11 Hours, Minutes, Second, and Milliseconds
+const double kMsPerSecond = 1000.0;
+const double kMsPerMinute = 60000.0;
+const double kMsPerHour = 3600000.0;
+
+// ES6 section 20.3.1.14 MakeDate (day, time)
+double MakeDate(double day, double time) {
+ if (std::isfinite(day) && std::isfinite(time)) {
+ return time + day * kMsPerDay;
+ }
+ return std::numeric_limits<double>::quiet_NaN();
+}
+
+// ES6 section 20.3.1.13 MakeDay (year, month, date)
+double MakeDay(double year, double month, double date) {
+ if ((kMinYear <= year && year <= kMaxYear) &&
+ (kMinMonth <= month && month <= kMaxMonth) && std::isfinite(date)) {
+ int y = FastD2I(year);
+ int m = FastD2I(month);
+ y += m / 12;
+ m %= 12;
+ if (m < 0) {
+ m += 12;
+ y -= 1;
+ }
+ DCHECK_LE(0, m);
+ DCHECK_LT(m, 12);
+
+ // kYearDelta is an arbitrary number such that:
+ // a) kYearDelta = -1 (mod 400)
+ // b) year + kYearDelta > 0 for years in the range defined by
+ // ECMA 262 - 15.9.1.1, i.e. upto 100,000,000 days on either side of
+ // Jan 1 1970. This is required so that we don't run into integer
+ // division of negative numbers.
+ // c) there shouldn't be an overflow for 32-bit integers in the following
+ // operations.
+ static const int kYearDelta = 399999;
+ static const int kBaseDay =
+ 365 * (1970 + kYearDelta) + (1970 + kYearDelta) / 4 -
+ (1970 + kYearDelta) / 100 + (1970 + kYearDelta) / 400;
+ int day_from_year = 365 * (y + kYearDelta) + (y + kYearDelta) / 4 -
+ (y + kYearDelta) / 100 + (y + kYearDelta) / 400 -
+ kBaseDay;
+ if ((y % 4 != 0) || (y % 100 == 0 && y % 400 != 0)) {
+ static const int kDayFromMonth[] = {0, 31, 59, 90, 120, 151,
+ 181, 212, 243, 273, 304, 334};
+ day_from_year += kDayFromMonth[m];
+ } else {
+ static const int kDayFromMonth[] = {0, 31, 60, 91, 121, 152,
+ 182, 213, 244, 274, 305, 335};
+ day_from_year += kDayFromMonth[m];
+ }
+ return static_cast<double>(day_from_year - 1) + date;
+ }
+ return std::numeric_limits<double>::quiet_NaN();
+}
+
+// ES6 section 20.3.1.12 MakeTime (hour, min, sec, ms)
+double MakeTime(double hour, double min, double sec, double ms) {
+ if (std::isfinite(hour) && std::isfinite(min) && std::isfinite(sec) &&
+ std::isfinite(ms)) {
+ double const h = DoubleToInteger(hour);
+ double const m = DoubleToInteger(min);
+ double const s = DoubleToInteger(sec);
+ double const milli = DoubleToInteger(ms);
+ return h * kMsPerHour + m * kMsPerMinute + s * kMsPerSecond + milli;
+ }
+ return std::numeric_limits<double>::quiet_NaN();
+}
+
+// ES6 section 20.3.1.15 TimeClip (time)
+double TimeClip(double time) {
+ if (-DateCache::kMaxTimeInMs <= time && time <= DateCache::kMaxTimeInMs) {
+ return DoubleToInteger(time) + 0.0;
+ }
+ return std::numeric_limits<double>::quiet_NaN();
+}
+
+const char* kShortWeekDays[] = {"Sun", "Mon", "Tue", "Wed",
+ "Thu", "Fri", "Sat"};
+const char* kShortMonths[] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun",
+ "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"};
+
+// ES6 section 20.3.1.16 Date Time String Format
+double ParseDateTimeString(Handle<String> str) {
+ Isolate* const isolate = str->GetIsolate();
+ str = String::Flatten(str);
+ // TODO(bmeurer): Change DateParser to not use the FixedArray.
+ Handle<FixedArray> tmp =
+ isolate->factory()->NewFixedArray(DateParser::OUTPUT_SIZE);
+ DisallowHeapAllocation no_gc;
+ String::FlatContent str_content = str->GetFlatContent();
+ bool result;
+ if (str_content.IsOneByte()) {
+ result = DateParser::Parse(isolate, str_content.ToOneByteVector(), *tmp);
+ } else {
+ result = DateParser::Parse(isolate, str_content.ToUC16Vector(), *tmp);
+ }
+ if (!result) return std::numeric_limits<double>::quiet_NaN();
+ double const day = MakeDay(tmp->get(0)->Number(), tmp->get(1)->Number(),
+ tmp->get(2)->Number());
+ double const time = MakeTime(tmp->get(3)->Number(), tmp->get(4)->Number(),
+ tmp->get(5)->Number(), tmp->get(6)->Number());
+ double date = MakeDate(day, time);
+ if (tmp->get(7)->IsNull(isolate)) {
+ if (!std::isnan(date)) {
+ date = isolate->date_cache()->ToUTC(static_cast<int64_t>(date));
+ }
+ } else {
+ date -= tmp->get(7)->Number() * 1000.0;
+ }
+ return date;
+}
+
+enum ToDateStringMode { kDateOnly, kTimeOnly, kDateAndTime };
+
+// ES6 section 20.3.4.41.1 ToDateString(tv)
+void ToDateString(double time_val, Vector<char> str, DateCache* date_cache,
+ ToDateStringMode mode = kDateAndTime) {
+ if (std::isnan(time_val)) {
+ SNPrintF(str, "Invalid Date");
+ return;
+ }
+ int64_t time_ms = static_cast<int64_t>(time_val);
+ int64_t local_time_ms = date_cache->ToLocal(time_ms);
+ int year, month, day, weekday, hour, min, sec, ms;
+ date_cache->BreakDownTime(local_time_ms, &year, &month, &day, &weekday, &hour,
+ &min, &sec, &ms);
+ int timezone_offset = -date_cache->TimezoneOffset(time_ms);
+ int timezone_hour = std::abs(timezone_offset) / 60;
+ int timezone_min = std::abs(timezone_offset) % 60;
+ const char* local_timezone = date_cache->LocalTimezone(time_ms);
+ switch (mode) {
+ case kDateOnly:
+ SNPrintF(str, "%s %s %02d %4d", kShortWeekDays[weekday],
+ kShortMonths[month], day, year);
+ return;
+ case kTimeOnly:
+ SNPrintF(str, "%02d:%02d:%02d GMT%c%02d%02d (%s)", hour, min, sec,
+ (timezone_offset < 0) ? '-' : '+', timezone_hour, timezone_min,
+ local_timezone);
+ return;
+ case kDateAndTime:
+ SNPrintF(str, "%s %s %02d %4d %02d:%02d:%02d GMT%c%02d%02d (%s)",
+ kShortWeekDays[weekday], kShortMonths[month], day, year, hour,
+ min, sec, (timezone_offset < 0) ? '-' : '+', timezone_hour,
+ timezone_min, local_timezone);
+ return;
+ }
+ UNREACHABLE();
+}
+
+Object* SetLocalDateValue(Handle<JSDate> date, double time_val) {
+ if (time_val >= -DateCache::kMaxTimeBeforeUTCInMs &&
+ time_val <= DateCache::kMaxTimeBeforeUTCInMs) {
+ Isolate* const isolate = date->GetIsolate();
+ time_val = isolate->date_cache()->ToUTC(static_cast<int64_t>(time_val));
+ } else {
+ time_val = std::numeric_limits<double>::quiet_NaN();
+ }
+ return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+} // namespace
+
+// ES6 section 20.3.2 The Date Constructor for the [[Call]] case.
+BUILTIN(DateConstructor) {
+ HandleScope scope(isolate);
+ double const time_val = JSDate::CurrentTimeValue(isolate);
+ char buffer[128];
+ ToDateString(time_val, ArrayVector(buffer), isolate->date_cache());
+ RETURN_RESULT_OR_FAILURE(
+ isolate, isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
+}
+
+// ES6 section 20.3.2 The Date Constructor for the [[Construct]] case.
+BUILTIN(DateConstructor_ConstructStub) {
+ HandleScope scope(isolate);
+ int const argc = args.length() - 1;
+ Handle<JSFunction> target = args.target<JSFunction>();
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+ double time_val;
+ if (argc == 0) {
+ time_val = JSDate::CurrentTimeValue(isolate);
+ } else if (argc == 1) {
+ Handle<Object> value = args.at<Object>(1);
+ if (value->IsJSDate()) {
+ time_val = Handle<JSDate>::cast(value)->value()->Number();
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
+ Object::ToPrimitive(value));
+ if (value->IsString()) {
+ time_val = ParseDateTimeString(Handle<String>::cast(value));
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
+ Object::ToNumber(value));
+ time_val = value->Number();
+ }
+ }
+ } else {
+ Handle<Object> year_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year_object,
+ Object::ToNumber(args.at<Object>(1)));
+ Handle<Object> month_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month_object,
+ Object::ToNumber(args.at<Object>(2)));
+ double year = year_object->Number();
+ double month = month_object->Number();
+ double date = 1.0, hours = 0.0, minutes = 0.0, seconds = 0.0, ms = 0.0;
+ if (argc >= 3) {
+ Handle<Object> date_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date_object,
+ Object::ToNumber(args.at<Object>(3)));
+ date = date_object->Number();
+ if (argc >= 4) {
+ Handle<Object> hours_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, hours_object, Object::ToNumber(args.at<Object>(4)));
+ hours = hours_object->Number();
+ if (argc >= 5) {
+ Handle<Object> minutes_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, minutes_object, Object::ToNumber(args.at<Object>(5)));
+ minutes = minutes_object->Number();
+ if (argc >= 6) {
+ Handle<Object> seconds_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, seconds_object, Object::ToNumber(args.at<Object>(6)));
+ seconds = seconds_object->Number();
+ if (argc >= 7) {
+ Handle<Object> ms_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, ms_object, Object::ToNumber(args.at<Object>(7)));
+ ms = ms_object->Number();
+ }
+ }
+ }
+ }
+ }
+ if (!std::isnan(year)) {
+ double const y = DoubleToInteger(year);
+ if (0.0 <= y && y <= 99) year = 1900 + y;
+ }
+ double const day = MakeDay(year, month, date);
+ double const time = MakeTime(hours, minutes, seconds, ms);
+ time_val = MakeDate(day, time);
+ if (time_val >= -DateCache::kMaxTimeBeforeUTCInMs &&
+ time_val <= DateCache::kMaxTimeBeforeUTCInMs) {
+ time_val = isolate->date_cache()->ToUTC(static_cast<int64_t>(time_val));
+ } else {
+ time_val = std::numeric_limits<double>::quiet_NaN();
+ }
+ }
+ RETURN_RESULT_OR_FAILURE(isolate, JSDate::New(target, new_target, time_val));
+}
+
+// ES6 section 20.3.3.1 Date.now ( )
+BUILTIN(DateNow) {
+ HandleScope scope(isolate);
+ return *isolate->factory()->NewNumber(JSDate::CurrentTimeValue(isolate));
+}
+
+// ES6 section 20.3.3.2 Date.parse ( string )
+BUILTIN(DateParse) {
+ HandleScope scope(isolate);
+ Handle<String> string;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, string,
+ Object::ToString(isolate, args.atOrUndefined(isolate, 1)));
+ return *isolate->factory()->NewNumber(ParseDateTimeString(string));
+}
+
+// ES6 section 20.3.3.4 Date.UTC (year,month,date,hours,minutes,seconds,ms)
+BUILTIN(DateUTC) {
+ HandleScope scope(isolate);
+ int const argc = args.length() - 1;
+ double year = std::numeric_limits<double>::quiet_NaN();
+ double month = std::numeric_limits<double>::quiet_NaN();
+ double date = 1.0, hours = 0.0, minutes = 0.0, seconds = 0.0, ms = 0.0;
+ if (argc >= 1) {
+ Handle<Object> year_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year_object,
+ Object::ToNumber(args.at<Object>(1)));
+ year = year_object->Number();
+ if (argc >= 2) {
+ Handle<Object> month_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month_object,
+ Object::ToNumber(args.at<Object>(2)));
+ month = month_object->Number();
+ if (argc >= 3) {
+ Handle<Object> date_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, date_object, Object::ToNumber(args.at<Object>(3)));
+ date = date_object->Number();
+ if (argc >= 4) {
+ Handle<Object> hours_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, hours_object, Object::ToNumber(args.at<Object>(4)));
+ hours = hours_object->Number();
+ if (argc >= 5) {
+ Handle<Object> minutes_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, minutes_object, Object::ToNumber(args.at<Object>(5)));
+ minutes = minutes_object->Number();
+ if (argc >= 6) {
+ Handle<Object> seconds_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, seconds_object,
+ Object::ToNumber(args.at<Object>(6)));
+ seconds = seconds_object->Number();
+ if (argc >= 7) {
+ Handle<Object> ms_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, ms_object, Object::ToNumber(args.at<Object>(7)));
+ ms = ms_object->Number();
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ if (!std::isnan(year)) {
+ double const y = DoubleToInteger(year);
+ if (0.0 <= y && y <= 99) year = 1900 + y;
+ }
+ double const day = MakeDay(year, month, date);
+ double const time = MakeTime(hours, minutes, seconds, ms);
+ return *isolate->factory()->NewNumber(TimeClip(MakeDate(day, time)));
+}
+
+// ES6 section 20.3.4.20 Date.prototype.setDate ( date )
+BUILTIN(DatePrototypeSetDate) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setDate");
+ Handle<Object> value = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, Object::ToNumber(value));
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+ int const days = isolate->date_cache()->DaysFromTime(local_time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, days);
+ int year, month, day;
+ isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
+ time_val = MakeDate(MakeDay(year, month, value->Number()), time_within_day);
+ }
+ return SetLocalDateValue(date, time_val);
+}
+
+// ES6 section 20.3.4.21 Date.prototype.setFullYear (year, month, date)
+BUILTIN(DatePrototypeSetFullYear) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setFullYear");
+ int const argc = args.length() - 1;
+ Handle<Object> year = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year, Object::ToNumber(year));
+ double y = year->Number(), m = 0.0, dt = 1.0;
+ int time_within_day = 0;
+ if (!std::isnan(date->value()->Number())) {
+ int64_t const time_ms = static_cast<int64_t>(date->value()->Number());
+ int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+ int const days = isolate->date_cache()->DaysFromTime(local_time_ms);
+ time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, days);
+ int year, month, day;
+ isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
+ m = month;
+ dt = day;
+ }
+ if (argc >= 2) {
+ Handle<Object> month = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(month));
+ m = month->Number();
+ if (argc >= 3) {
+ Handle<Object> date = args.at<Object>(3);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
+ dt = date->Number();
+ }
+ }
+ double time_val = MakeDate(MakeDay(y, m, dt), time_within_day);
+ return SetLocalDateValue(date, time_val);
+}
+
+// ES6 section 20.3.4.22 Date.prototype.setHours(hour, min, sec, ms)
+BUILTIN(DatePrototypeSetHours) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setHours");
+ int const argc = args.length() - 1;
+ Handle<Object> hour = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, hour, Object::ToNumber(hour));
+ double h = hour->Number();
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+ int day = isolate->date_cache()->DaysFromTime(local_time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, day);
+ double m = (time_within_day / (60 * 1000)) % 60;
+ double s = (time_within_day / 1000) % 60;
+ double milli = time_within_day % 1000;
+ if (argc >= 2) {
+ Handle<Object> min = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min, Object::ToNumber(min));
+ m = min->Number();
+ if (argc >= 3) {
+ Handle<Object> sec = args.at<Object>(3);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
+ s = sec->Number();
+ if (argc >= 4) {
+ Handle<Object> ms = args.at<Object>(4);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ milli = ms->Number();
+ }
+ }
+ }
+ time_val = MakeDate(day, MakeTime(h, m, s, milli));
+ }
+ return SetLocalDateValue(date, time_val);
+}
+
+// ES6 section 20.3.4.23 Date.prototype.setMilliseconds(ms)
+BUILTIN(DatePrototypeSetMilliseconds) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setMilliseconds");
+ Handle<Object> ms = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+ int day = isolate->date_cache()->DaysFromTime(local_time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, day);
+ int h = time_within_day / (60 * 60 * 1000);
+ int m = (time_within_day / (60 * 1000)) % 60;
+ int s = (time_within_day / 1000) % 60;
+ time_val = MakeDate(day, MakeTime(h, m, s, ms->Number()));
+ }
+ return SetLocalDateValue(date, time_val);
+}
+
+// ES6 section 20.3.4.24 Date.prototype.setMinutes ( min, sec, ms )
+BUILTIN(DatePrototypeSetMinutes) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setMinutes");
+ int const argc = args.length() - 1;
+ Handle<Object> min = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min, Object::ToNumber(min));
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+ int day = isolate->date_cache()->DaysFromTime(local_time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, day);
+ int h = time_within_day / (60 * 60 * 1000);
+ double m = min->Number();
+ double s = (time_within_day / 1000) % 60;
+ double milli = time_within_day % 1000;
+ if (argc >= 2) {
+ Handle<Object> sec = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
+ s = sec->Number();
+ if (argc >= 3) {
+ Handle<Object> ms = args.at<Object>(3);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ milli = ms->Number();
+ }
+ }
+ time_val = MakeDate(day, MakeTime(h, m, s, milli));
+ }
+ return SetLocalDateValue(date, time_val);
+}
+
+// ES6 section 20.3.4.25 Date.prototype.setMonth ( month, date )
+BUILTIN(DatePrototypeSetMonth) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setMonth");
+ int const argc = args.length() - 1;
+ Handle<Object> month = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(month));
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+ int days = isolate->date_cache()->DaysFromTime(local_time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, days);
+ int year, unused, day;
+ isolate->date_cache()->YearMonthDayFromDays(days, &year, &unused, &day);
+ double m = month->Number();
+ double dt = day;
+ if (argc >= 2) {
+ Handle<Object> date = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
+ dt = date->Number();
+ }
+ time_val = MakeDate(MakeDay(year, m, dt), time_within_day);
+ }
+ return SetLocalDateValue(date, time_val);
+}
+
+// ES6 section 20.3.4.26 Date.prototype.setSeconds ( sec, ms )
+BUILTIN(DatePrototypeSetSeconds) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setSeconds");
+ int const argc = args.length() - 1;
+ Handle<Object> sec = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+ int day = isolate->date_cache()->DaysFromTime(local_time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, day);
+ int h = time_within_day / (60 * 60 * 1000);
+ double m = (time_within_day / (60 * 1000)) % 60;
+ double s = sec->Number();
+ double milli = time_within_day % 1000;
+ if (argc >= 2) {
+ Handle<Object> ms = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ milli = ms->Number();
+ }
+ time_val = MakeDate(day, MakeTime(h, m, s, milli));
+ }
+ return SetLocalDateValue(date, time_val);
+}
+
+// ES6 section 20.3.4.27 Date.prototype.setTime ( time )
+BUILTIN(DatePrototypeSetTime) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setTime");
+ Handle<Object> value = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, Object::ToNumber(value));
+ return *JSDate::SetValue(date, TimeClip(value->Number()));
+}
+
+// ES6 section 20.3.4.28 Date.prototype.setUTCDate ( date )
+BUILTIN(DatePrototypeSetUTCDate) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCDate");
+ Handle<Object> value = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, Object::ToNumber(value));
+ if (std::isnan(date->value()->Number())) return date->value();
+ int64_t const time_ms = static_cast<int64_t>(date->value()->Number());
+ int const days = isolate->date_cache()->DaysFromTime(time_ms);
+ int const time_within_day = isolate->date_cache()->TimeInDay(time_ms, days);
+ int year, month, day;
+ isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
+ double const time_val =
+ MakeDate(MakeDay(year, month, value->Number()), time_within_day);
+ return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+// ES6 section 20.3.4.29 Date.prototype.setUTCFullYear (year, month, date)
+BUILTIN(DatePrototypeSetUTCFullYear) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCFullYear");
+ int const argc = args.length() - 1;
+ Handle<Object> year = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year, Object::ToNumber(year));
+ double y = year->Number(), m = 0.0, dt = 1.0;
+ int time_within_day = 0;
+ if (!std::isnan(date->value()->Number())) {
+ int64_t const time_ms = static_cast<int64_t>(date->value()->Number());
+ int const days = isolate->date_cache()->DaysFromTime(time_ms);
+ time_within_day = isolate->date_cache()->TimeInDay(time_ms, days);
+ int year, month, day;
+ isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
+ m = month;
+ dt = day;
+ }
+ if (argc >= 2) {
+ Handle<Object> month = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(month));
+ m = month->Number();
+ if (argc >= 3) {
+ Handle<Object> date = args.at<Object>(3);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
+ dt = date->Number();
+ }
+ }
+ double const time_val = MakeDate(MakeDay(y, m, dt), time_within_day);
+ return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+// ES6 section 20.3.4.30 Date.prototype.setUTCHours(hour, min, sec, ms)
+BUILTIN(DatePrototypeSetUTCHours) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCHours");
+ int const argc = args.length() - 1;
+ Handle<Object> hour = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, hour, Object::ToNumber(hour));
+ double h = hour->Number();
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int day = isolate->date_cache()->DaysFromTime(time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(time_ms, day);
+ double m = (time_within_day / (60 * 1000)) % 60;
+ double s = (time_within_day / 1000) % 60;
+ double milli = time_within_day % 1000;
+ if (argc >= 2) {
+ Handle<Object> min = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min, Object::ToNumber(min));
+ m = min->Number();
+ if (argc >= 3) {
+ Handle<Object> sec = args.at<Object>(3);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
+ s = sec->Number();
+ if (argc >= 4) {
+ Handle<Object> ms = args.at<Object>(4);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ milli = ms->Number();
+ }
+ }
+ }
+ time_val = MakeDate(day, MakeTime(h, m, s, milli));
+ }
+ return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+// ES6 section 20.3.4.31 Date.prototype.setUTCMilliseconds(ms)
+BUILTIN(DatePrototypeSetUTCMilliseconds) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCMilliseconds");
+ Handle<Object> ms = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int day = isolate->date_cache()->DaysFromTime(time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(time_ms, day);
+ int h = time_within_day / (60 * 60 * 1000);
+ int m = (time_within_day / (60 * 1000)) % 60;
+ int s = (time_within_day / 1000) % 60;
+ time_val = MakeDate(day, MakeTime(h, m, s, ms->Number()));
+ }
+ return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+// ES6 section 20.3.4.32 Date.prototype.setUTCMinutes ( min, sec, ms )
+BUILTIN(DatePrototypeSetUTCMinutes) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCMinutes");
+ int const argc = args.length() - 1;
+ Handle<Object> min = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min, Object::ToNumber(min));
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int day = isolate->date_cache()->DaysFromTime(time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(time_ms, day);
+ int h = time_within_day / (60 * 60 * 1000);
+ double m = min->Number();
+ double s = (time_within_day / 1000) % 60;
+ double milli = time_within_day % 1000;
+ if (argc >= 2) {
+ Handle<Object> sec = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
+ s = sec->Number();
+ if (argc >= 3) {
+ Handle<Object> ms = args.at<Object>(3);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ milli = ms->Number();
+ }
+ }
+ time_val = MakeDate(day, MakeTime(h, m, s, milli));
+ }
+ return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+// ES6 section 20.3.4.31 Date.prototype.setUTCMonth ( month, date )
+BUILTIN(DatePrototypeSetUTCMonth) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCMonth");
+ int const argc = args.length() - 1;
+ Handle<Object> month = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(month));
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int days = isolate->date_cache()->DaysFromTime(time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(time_ms, days);
+ int year, unused, day;
+ isolate->date_cache()->YearMonthDayFromDays(days, &year, &unused, &day);
+ double m = month->Number();
+ double dt = day;
+ if (argc >= 2) {
+ Handle<Object> date = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
+ dt = date->Number();
+ }
+ time_val = MakeDate(MakeDay(year, m, dt), time_within_day);
+ }
+ return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+// ES6 section 20.3.4.34 Date.prototype.setUTCSeconds ( sec, ms )
+BUILTIN(DatePrototypeSetUTCSeconds) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCSeconds");
+ int const argc = args.length() - 1;
+ Handle<Object> sec = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
+ double time_val = date->value()->Number();
+ if (!std::isnan(time_val)) {
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int day = isolate->date_cache()->DaysFromTime(time_ms);
+ int time_within_day = isolate->date_cache()->TimeInDay(time_ms, day);
+ int h = time_within_day / (60 * 60 * 1000);
+ double m = (time_within_day / (60 * 1000)) % 60;
+ double s = sec->Number();
+ double milli = time_within_day % 1000;
+ if (argc >= 2) {
+ Handle<Object> ms = args.at<Object>(2);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
+ milli = ms->Number();
+ }
+ time_val = MakeDate(day, MakeTime(h, m, s, milli));
+ }
+ return *JSDate::SetValue(date, TimeClip(time_val));
+}
+
+// ES6 section 20.3.4.35 Date.prototype.toDateString ( )
+BUILTIN(DatePrototypeToDateString) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.toDateString");
+ char buffer[128];
+ ToDateString(date->value()->Number(), ArrayVector(buffer),
+ isolate->date_cache(), kDateOnly);
+ RETURN_RESULT_OR_FAILURE(
+ isolate, isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
+}
+
+// ES6 section 20.3.4.36 Date.prototype.toISOString ( )
+BUILTIN(DatePrototypeToISOString) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.toISOString");
+ double const time_val = date->value()->Number();
+ if (std::isnan(time_val)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidTimeValue));
+ }
+ int64_t const time_ms = static_cast<int64_t>(time_val);
+ int year, month, day, weekday, hour, min, sec, ms;
+ isolate->date_cache()->BreakDownTime(time_ms, &year, &month, &day, &weekday,
+ &hour, &min, &sec, &ms);
+ char buffer[128];
+ if (year >= 0 && year <= 9999) {
+ SNPrintF(ArrayVector(buffer), "%04d-%02d-%02dT%02d:%02d:%02d.%03dZ", year,
+ month + 1, day, hour, min, sec, ms);
+ } else if (year < 0) {
+ SNPrintF(ArrayVector(buffer), "-%06d-%02d-%02dT%02d:%02d:%02d.%03dZ", -year,
+ month + 1, day, hour, min, sec, ms);
+ } else {
+ SNPrintF(ArrayVector(buffer), "+%06d-%02d-%02dT%02d:%02d:%02d.%03dZ", year,
+ month + 1, day, hour, min, sec, ms);
+ }
+ return *isolate->factory()->NewStringFromAsciiChecked(buffer);
+}
+
+// ES6 section 20.3.4.41 Date.prototype.toString ( )
+BUILTIN(DatePrototypeToString) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.toString");
+ char buffer[128];
+ ToDateString(date->value()->Number(), ArrayVector(buffer),
+ isolate->date_cache());
+ RETURN_RESULT_OR_FAILURE(
+ isolate, isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
+}
+
+// ES6 section 20.3.4.42 Date.prototype.toTimeString ( )
+BUILTIN(DatePrototypeToTimeString) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.toTimeString");
+ char buffer[128];
+ ToDateString(date->value()->Number(), ArrayVector(buffer),
+ isolate->date_cache(), kTimeOnly);
+ RETURN_RESULT_OR_FAILURE(
+ isolate, isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
+}
+
+// ES6 section 20.3.4.43 Date.prototype.toUTCString ( )
+BUILTIN(DatePrototypeToUTCString) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.toUTCString");
+ double const time_val = date->value()->Number();
+ if (std::isnan(time_val)) {
+ return *isolate->factory()->NewStringFromAsciiChecked("Invalid Date");
+ }
+ char buffer[128];
+ int64_t time_ms = static_cast<int64_t>(time_val);
+ int year, month, day, weekday, hour, min, sec, ms;
+ isolate->date_cache()->BreakDownTime(time_ms, &year, &month, &day, &weekday,
+ &hour, &min, &sec, &ms);
+ SNPrintF(ArrayVector(buffer), "%s, %02d %s %4d %02d:%02d:%02d GMT",
+ kShortWeekDays[weekday], day, kShortMonths[month], year, hour, min,
+ sec);
+ return *isolate->factory()->NewStringFromAsciiChecked(buffer);
+}
+
+// ES6 section 20.3.4.44 Date.prototype.valueOf ( )
+BUILTIN(DatePrototypeValueOf) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.valueOf");
+ return date->value();
+}
+
+// ES6 section 20.3.4.45 Date.prototype [ @@toPrimitive ] ( hint )
+BUILTIN(DatePrototypeToPrimitive) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CHECK_RECEIVER(JSReceiver, receiver, "Date.prototype [ @@toPrimitive ]");
+ Handle<Object> hint = args.at<Object>(1);
+ RETURN_RESULT_OR_FAILURE(isolate, JSDate::ToPrimitive(receiver, hint));
+}
+
+// ES6 section B.2.4.1 Date.prototype.getYear ( )
+BUILTIN(DatePrototypeGetYear) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.getYear");
+ double time_val = date->value()->Number();
+ if (std::isnan(time_val)) return date->value();
+ int64_t time_ms = static_cast<int64_t>(time_val);
+ int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+ int days = isolate->date_cache()->DaysFromTime(local_time_ms);
+ int year, month, day;
+ isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
+ return Smi::FromInt(year - 1900);
+}
+
+// ES6 section B.2.4.2 Date.prototype.setYear ( year )
+BUILTIN(DatePrototypeSetYear) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSDate, date, "Date.prototype.setYear");
+ Handle<Object> year = args.atOrUndefined(isolate, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year, Object::ToNumber(year));
+ double m = 0.0, dt = 1.0, y = year->Number();
+ if (0.0 <= y && y <= 99.0) {
+ y = 1900.0 + DoubleToInteger(y);
+ }
+ int time_within_day = 0;
+ if (!std::isnan(date->value()->Number())) {
+ int64_t const time_ms = static_cast<int64_t>(date->value()->Number());
+ int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
+ int const days = isolate->date_cache()->DaysFromTime(local_time_ms);
+ time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, days);
+ int year, month, day;
+ isolate->date_cache()->YearMonthDayFromDays(days, &year, &month, &day);
+ m = month;
+ dt = day;
+ }
+ double time_val = MakeDate(MakeDay(y, m, dt), time_within_day);
+ return SetLocalDateValue(date, time_val);
+}
+
+// ES6 section 20.3.4.37 Date.prototype.toJSON ( key )
+BUILTIN(DatePrototypeToJson) {
+ HandleScope scope(isolate);
+ Handle<Object> receiver = args.atOrUndefined(isolate, 0);
+ Handle<JSReceiver> receiver_obj;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver_obj,
+ Object::ToObject(isolate, receiver));
+ Handle<Object> primitive;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, primitive,
+ Object::ToPrimitive(receiver_obj, ToPrimitiveHint::kNumber));
+ if (primitive->IsNumber() && !std::isfinite(primitive->Number())) {
+ return isolate->heap()->null_value();
+ } else {
+ Handle<String> name =
+ isolate->factory()->NewStringFromAsciiChecked("toISOString");
+ Handle<Object> function;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, function,
+ Object::GetProperty(receiver_obj, name));
+ if (!function->IsCallable()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledNonCallable, name));
+ }
+ RETURN_RESULT_OR_FAILURE(
+ isolate, Execution::Call(isolate, function, receiver_obj, 0, NULL));
+ }
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetDate(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kDay);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetDay(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kWeekday);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetFullYear(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kYear);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetHours(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kHour);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetMilliseconds(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kMillisecond);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetMinutes(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kMinute);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetMonth(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kMonth);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetSeconds(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kSecond);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetTime(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kDateValue);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetTimezoneOffset(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kTimezoneOffset);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCDate(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kDayUTC);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCDay(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kWeekdayUTC);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCFullYear(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kYearUTC);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCHours(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kHourUTC);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCMilliseconds(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kMillisecondUTC);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCMinutes(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kMinuteUTC);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCMonth(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kMonthUTC);
+}
+
+// static
+void Builtins::Generate_DatePrototypeGetUTCSeconds(MacroAssembler* masm) {
+ Generate_DatePrototype_GetField(masm, JSDate::kSecondUTC);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-debug.cc b/deps/v8/src/builtins/builtins-debug.cc
new file mode 100644
index 0000000000..011eba3db4
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-debug.cc
@@ -0,0 +1,27 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+#include "src/debug/debug.h"
+
+namespace v8 {
+namespace internal {
+
+void Builtins::Generate_Return_DebugBreak(MacroAssembler* masm) {
+ DebugCodegen::GenerateDebugBreakStub(masm,
+ DebugCodegen::SAVE_RESULT_REGISTER);
+}
+
+void Builtins::Generate_Slot_DebugBreak(MacroAssembler* masm) {
+ DebugCodegen::GenerateDebugBreakStub(masm,
+ DebugCodegen::IGNORE_RESULT_REGISTER);
+}
+
+void Builtins::Generate_FrameDropper_LiveEdit(MacroAssembler* masm) {
+ DebugCodegen::GenerateFrameDropperLiveEdit(masm);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-error.cc b/deps/v8/src/builtins/builtins-error.cc
new file mode 100644
index 0000000000..c2a7b99035
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-error.cc
@@ -0,0 +1,135 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+#include "src/messages.h"
+#include "src/property-descriptor.h"
+#include "src/string-builder.h"
+
+namespace v8 {
+namespace internal {
+
+// ES6 section 19.5.1.1 Error ( message )
+BUILTIN(ErrorConstructor) {
+ HandleScope scope(isolate);
+
+ FrameSkipMode mode = SKIP_FIRST;
+ Handle<Object> caller;
+
+ // When we're passed a JSFunction as new target, we can skip frames until that
+ // specific function is seen instead of unconditionally skipping the first
+ // frame.
+ if (args.new_target()->IsJSFunction()) {
+ mode = SKIP_UNTIL_SEEN;
+ caller = args.new_target();
+ }
+
+ RETURN_RESULT_OR_FAILURE(
+ isolate, ErrorUtils::Construct(isolate, args.target<JSFunction>(),
+ Handle<Object>::cast(args.new_target()),
+ args.atOrUndefined(isolate, 1), mode,
+ caller, false));
+}
+
+// static
+BUILTIN(ErrorCaptureStackTrace) {
+ HandleScope scope(isolate);
+ Handle<Object> object_obj = args.atOrUndefined(isolate, 1);
+ if (!object_obj->IsJSObject()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kInvalidArgument, object_obj));
+ }
+ Handle<JSObject> object = Handle<JSObject>::cast(object_obj);
+ Handle<Object> caller = args.atOrUndefined(isolate, 2);
+ FrameSkipMode mode = caller->IsJSFunction() ? SKIP_UNTIL_SEEN : SKIP_FIRST;
+
+ // Collect the stack trace.
+
+ RETURN_FAILURE_ON_EXCEPTION(isolate,
+ isolate->CaptureAndSetDetailedStackTrace(object));
+
+ // Eagerly format the stack trace and set the stack property.
+
+ Handle<Object> stack_trace =
+ isolate->CaptureSimpleStackTrace(object, mode, caller);
+ if (!stack_trace->IsJSArray()) return *isolate->factory()->undefined_value();
+
+ Handle<Object> formatted_stack_trace;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, formatted_stack_trace,
+ ErrorUtils::FormatStackTrace(isolate, object, stack_trace));
+
+ PropertyDescriptor desc;
+ desc.set_configurable(true);
+ desc.set_writable(true);
+ desc.set_value(formatted_stack_trace);
+ Maybe<bool> status = JSReceiver::DefineOwnProperty(
+ isolate, object, isolate->factory()->stack_string(), &desc,
+ Object::THROW_ON_ERROR);
+ if (!status.IsJust()) return isolate->heap()->exception();
+ CHECK(status.FromJust());
+ return isolate->heap()->undefined_value();
+}
+
+// ES6 section 19.5.3.4 Error.prototype.toString ( )
+BUILTIN(ErrorPrototypeToString) {
+ HandleScope scope(isolate);
+ RETURN_RESULT_OR_FAILURE(isolate,
+ ErrorUtils::ToString(isolate, args.receiver()));
+}
+
+namespace {
+
+Object* MakeGenericError(Isolate* isolate, BuiltinArguments args,
+ Handle<JSFunction> constructor) {
+ Handle<Object> template_index = args.atOrUndefined(isolate, 1);
+ Handle<Object> arg0 = args.atOrUndefined(isolate, 2);
+ Handle<Object> arg1 = args.atOrUndefined(isolate, 3);
+ Handle<Object> arg2 = args.atOrUndefined(isolate, 4);
+
+ DCHECK(template_index->IsSmi());
+
+ RETURN_RESULT_OR_FAILURE(
+ isolate, ErrorUtils::MakeGenericError(isolate, constructor,
+ Smi::cast(*template_index)->value(),
+ arg0, arg1, arg2, SKIP_NONE));
+}
+
+} // namespace
+
+BUILTIN(MakeError) {
+ HandleScope scope(isolate);
+ return MakeGenericError(isolate, args, isolate->error_function());
+}
+
+BUILTIN(MakeRangeError) {
+ HandleScope scope(isolate);
+ return MakeGenericError(isolate, args, isolate->range_error_function());
+}
+
+BUILTIN(MakeSyntaxError) {
+ HandleScope scope(isolate);
+ return MakeGenericError(isolate, args, isolate->syntax_error_function());
+}
+
+BUILTIN(MakeTypeError) {
+ HandleScope scope(isolate);
+ return MakeGenericError(isolate, args, isolate->type_error_function());
+}
+
+BUILTIN(MakeURIError) {
+ HandleScope scope(isolate);
+ Handle<JSFunction> constructor = isolate->uri_error_function();
+ Handle<Object> undefined = isolate->factory()->undefined_value();
+ const int template_index = MessageTemplate::kURIMalformed;
+ RETURN_RESULT_OR_FAILURE(
+ isolate,
+ ErrorUtils::MakeGenericError(isolate, constructor, template_index,
+ undefined, undefined, undefined, SKIP_NONE));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-function.cc b/deps/v8/src/builtins/builtins-function.cc
new file mode 100644
index 0000000000..0a631bff5c
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-function.cc
@@ -0,0 +1,297 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+#include "src/compiler.h"
+#include "src/string-builder.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+// ES6 section 19.2.1.1.1 CreateDynamicFunction
+MaybeHandle<Object> CreateDynamicFunction(Isolate* isolate,
+ BuiltinArguments args,
+ const char* token) {
+ // Compute number of arguments, ignoring the receiver.
+ DCHECK_LE(1, args.length());
+ int const argc = args.length() - 1;
+
+ Handle<JSFunction> target = args.target<JSFunction>();
+ Handle<JSObject> target_global_proxy(target->global_proxy(), isolate);
+
+ if (!Builtins::AllowDynamicFunction(isolate, target, target_global_proxy)) {
+ isolate->CountUsage(v8::Isolate::kFunctionConstructorReturnedUndefined);
+ return isolate->factory()->undefined_value();
+ }
+
+ // Build the source string.
+ Handle<String> source;
+ {
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendCharacter('(');
+ builder.AppendCString(token);
+ builder.AppendCharacter('(');
+ bool parenthesis_in_arg_string = false;
+ if (argc > 1) {
+ for (int i = 1; i < argc; ++i) {
+ if (i > 1) builder.AppendCharacter(',');
+ Handle<String> param;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, param, Object::ToString(isolate, args.at<Object>(i)),
+ Object);
+ param = String::Flatten(param);
+ builder.AppendString(param);
+ // If the formal parameters string include ) - an illegal
+ // character - it may make the combined function expression
+ // compile. We avoid this problem by checking for this early on.
+ DisallowHeapAllocation no_gc; // Ensure vectors stay valid.
+ String::FlatContent param_content = param->GetFlatContent();
+ for (int i = 0, length = param->length(); i < length; ++i) {
+ if (param_content.Get(i) == ')') {
+ parenthesis_in_arg_string = true;
+ break;
+ }
+ }
+ }
+ // If the formal parameters include an unbalanced block comment, the
+ // function must be rejected. Since JavaScript does not allow nested
+ // comments we can include a trailing block comment to catch this.
+ builder.AppendCString("\n/**/");
+ }
+ builder.AppendCString(") {\n");
+ if (argc > 0) {
+ Handle<String> body;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, body, Object::ToString(isolate, args.at<Object>(argc)),
+ Object);
+ builder.AppendString(body);
+ }
+ builder.AppendCString("\n})");
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, source, builder.Finish(), Object);
+
+ // The SyntaxError must be thrown after all the (observable) ToString
+ // conversions are done.
+ if (parenthesis_in_arg_string) {
+ THROW_NEW_ERROR(isolate,
+ NewSyntaxError(MessageTemplate::kParenthesisInArgString),
+ Object);
+ }
+ }
+
+ // Compile the string in the constructor and not a helper so that errors to
+ // come from here.
+ Handle<JSFunction> function;
+ {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, function,
+ Compiler::GetFunctionFromString(
+ handle(target->native_context(), isolate),
+ source, ONLY_SINGLE_FUNCTION_LITERAL),
+ Object);
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ Execution::Call(isolate, function, target_global_proxy, 0, nullptr),
+ Object);
+ function = Handle<JSFunction>::cast(result);
+ function->shared()->set_name_should_print_as_anonymous(true);
+ }
+
+ // If new.target is equal to target then the function created
+ // is already correctly setup and nothing else should be done
+ // here. But if new.target is not equal to target then we are
+ // have a Function builtin subclassing case and therefore the
+ // function has wrong initial map. To fix that we create a new
+ // function object with correct initial map.
+ Handle<Object> unchecked_new_target = args.new_target();
+ if (!unchecked_new_target->IsUndefined(isolate) &&
+ !unchecked_new_target.is_identical_to(target)) {
+ Handle<JSReceiver> new_target =
+ Handle<JSReceiver>::cast(unchecked_new_target);
+ Handle<Map> initial_map;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, initial_map,
+ JSFunction::GetDerivedMap(isolate, target, new_target), Object);
+
+ Handle<SharedFunctionInfo> shared_info(function->shared(), isolate);
+ Handle<Map> map = Map::AsLanguageMode(
+ initial_map, shared_info->language_mode(), shared_info->kind());
+
+ Handle<Context> context(function->context(), isolate);
+ function = isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ map, shared_info, context, NOT_TENURED);
+ }
+ return function;
+}
+
+} // namespace
+
+// ES6 section 19.2.1.1 Function ( p1, p2, ... , pn, body )
+BUILTIN(FunctionConstructor) {
+ HandleScope scope(isolate);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, CreateDynamicFunction(isolate, args, "function"));
+ return *result;
+}
+
+// ES6 section 25.2.1.1 GeneratorFunction (p1, p2, ... , pn, body)
+BUILTIN(GeneratorFunctionConstructor) {
+ HandleScope scope(isolate);
+ RETURN_RESULT_OR_FAILURE(isolate,
+ CreateDynamicFunction(isolate, args, "function*"));
+}
+
+BUILTIN(AsyncFunctionConstructor) {
+ HandleScope scope(isolate);
+ Handle<Object> maybe_func;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, maybe_func,
+ CreateDynamicFunction(isolate, args, "async function"));
+ if (!maybe_func->IsJSFunction()) return *maybe_func;
+
+ // Do not lazily compute eval position for AsyncFunction, as they may not be
+ // determined after the function is resumed.
+ Handle<JSFunction> func = Handle<JSFunction>::cast(maybe_func);
+ Handle<Script> script = handle(Script::cast(func->shared()->script()));
+ int position = script->GetEvalPosition();
+ USE(position);
+
+ return *func;
+}
+
+namespace {
+
+Object* DoFunctionBind(Isolate* isolate, BuiltinArguments args) {
+ HandleScope scope(isolate);
+ DCHECK_LE(1, args.length());
+ if (!args.receiver()->IsCallable()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kFunctionBind));
+ }
+
+ // Allocate the bound function with the given {this_arg} and {args}.
+ Handle<JSReceiver> target = args.at<JSReceiver>(0);
+ Handle<Object> this_arg = isolate->factory()->undefined_value();
+ ScopedVector<Handle<Object>> argv(std::max(0, args.length() - 2));
+ if (args.length() > 1) {
+ this_arg = args.at<Object>(1);
+ for (int i = 2; i < args.length(); ++i) {
+ argv[i - 2] = args.at<Object>(i);
+ }
+ }
+ Handle<JSBoundFunction> function;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, function,
+ isolate->factory()->NewJSBoundFunction(target, this_arg, argv));
+
+ LookupIterator length_lookup(target, isolate->factory()->length_string(),
+ target, LookupIterator::OWN);
+ // Setup the "length" property based on the "length" of the {target}.
+ // If the targets length is the default JSFunction accessor, we can keep the
+ // accessor that's installed by default on the JSBoundFunction. It lazily
+ // computes the value from the underlying internal length.
+ if (!target->IsJSFunction() ||
+ length_lookup.state() != LookupIterator::ACCESSOR ||
+ !length_lookup.GetAccessors()->IsAccessorInfo()) {
+ Handle<Object> length(Smi::FromInt(0), isolate);
+ Maybe<PropertyAttributes> attributes =
+ JSReceiver::GetPropertyAttributes(&length_lookup);
+ if (!attributes.IsJust()) return isolate->heap()->exception();
+ if (attributes.FromJust() != ABSENT) {
+ Handle<Object> target_length;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, target_length,
+ Object::GetProperty(&length_lookup));
+ if (target_length->IsNumber()) {
+ length = isolate->factory()->NewNumber(std::max(
+ 0.0, DoubleToInteger(target_length->Number()) - argv.length()));
+ }
+ }
+ LookupIterator it(function, isolate->factory()->length_string(), function);
+ DCHECK_EQ(LookupIterator::ACCESSOR, it.state());
+ RETURN_FAILURE_ON_EXCEPTION(isolate,
+ JSObject::DefineOwnPropertyIgnoreAttributes(
+ &it, length, it.property_attributes()));
+ }
+
+ // Setup the "name" property based on the "name" of the {target}.
+ // If the targets name is the default JSFunction accessor, we can keep the
+ // accessor that's installed by default on the JSBoundFunction. It lazily
+ // computes the value from the underlying internal name.
+ LookupIterator name_lookup(target, isolate->factory()->name_string(), target,
+ LookupIterator::OWN);
+ if (!target->IsJSFunction() ||
+ name_lookup.state() != LookupIterator::ACCESSOR ||
+ !name_lookup.GetAccessors()->IsAccessorInfo()) {
+ Handle<Object> target_name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, target_name,
+ Object::GetProperty(&name_lookup));
+ Handle<String> name;
+ if (target_name->IsString()) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, name,
+ Name::ToFunctionName(Handle<String>::cast(target_name)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, name, isolate->factory()->NewConsString(
+ isolate->factory()->bound__string(), name));
+ } else {
+ name = isolate->factory()->bound__string();
+ }
+ LookupIterator it(function, isolate->factory()->name_string());
+ DCHECK_EQ(LookupIterator::ACCESSOR, it.state());
+ RETURN_FAILURE_ON_EXCEPTION(isolate,
+ JSObject::DefineOwnPropertyIgnoreAttributes(
+ &it, name, it.property_attributes()));
+ }
+ return *function;
+}
+
+} // namespace
+
+// ES6 section 19.2.3.2 Function.prototype.bind ( thisArg, ...args )
+BUILTIN(FunctionPrototypeBind) { return DoFunctionBind(isolate, args); }
+
+// TODO(verwaest): This is a temporary helper until the FastFunctionBind stub
+// can tailcall to the builtin directly.
+RUNTIME_FUNCTION(Runtime_FunctionBind) {
+ DCHECK_EQ(2, args.length());
+ Arguments* incoming = reinterpret_cast<Arguments*>(args[0]);
+ // Rewrap the arguments as builtins arguments.
+ int argc = incoming->length() + BuiltinArguments::kNumExtraArgsWithReceiver;
+ BuiltinArguments caller_args(argc, incoming->arguments() + 1);
+ return DoFunctionBind(isolate, caller_args);
+}
+
+// ES6 section 19.2.3.5 Function.prototype.toString ( )
+BUILTIN(FunctionPrototypeToString) {
+ HandleScope scope(isolate);
+ Handle<Object> receiver = args.receiver();
+ if (receiver->IsJSBoundFunction()) {
+ return *JSBoundFunction::ToString(Handle<JSBoundFunction>::cast(receiver));
+ } else if (receiver->IsJSFunction()) {
+ return *JSFunction::ToString(Handle<JSFunction>::cast(receiver));
+ }
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kNotGeneric,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Function.prototype.toString")));
+}
+
+// ES6 section 19.2.3.6 Function.prototype [ @@hasInstance ] ( V )
+void Builtins::Generate_FunctionPrototypeHasInstance(
+ CodeStubAssembler* assembler) {
+ using compiler::Node;
+
+ Node* f = assembler->Parameter(0);
+ Node* v = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+ Node* result = assembler->OrdinaryHasInstance(context, f, v);
+ assembler->Return(result);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-generator.cc b/deps/v8/src/builtins/builtins-generator.cc
new file mode 100644
index 0000000000..93b2e48cbd
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-generator.cc
@@ -0,0 +1,116 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+#include "src/code-factory.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+void Generate_GeneratorPrototypeResume(
+ CodeStubAssembler* assembler, JSGeneratorObject::ResumeMode resume_mode,
+ char const* const method_name) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ Node* receiver = assembler->Parameter(0);
+ Node* value = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+ Node* closed =
+ assembler->SmiConstant(Smi::FromInt(JSGeneratorObject::kGeneratorClosed));
+
+ // Check if the {receiver} is actually a JSGeneratorObject.
+ Label if_receiverisincompatible(assembler, Label::kDeferred);
+ assembler->GotoIf(assembler->WordIsSmi(receiver), &if_receiverisincompatible);
+ Node* receiver_instance_type = assembler->LoadInstanceType(receiver);
+ assembler->GotoUnless(assembler->Word32Equal(
+ receiver_instance_type,
+ assembler->Int32Constant(JS_GENERATOR_OBJECT_TYPE)),
+ &if_receiverisincompatible);
+
+ // Check if the {receiver} is running or already closed.
+ Node* receiver_continuation = assembler->LoadObjectField(
+ receiver, JSGeneratorObject::kContinuationOffset);
+ Label if_receiverisclosed(assembler, Label::kDeferred),
+ if_receiverisrunning(assembler, Label::kDeferred);
+ assembler->GotoIf(assembler->SmiEqual(receiver_continuation, closed),
+ &if_receiverisclosed);
+ DCHECK_LT(JSGeneratorObject::kGeneratorExecuting,
+ JSGeneratorObject::kGeneratorClosed);
+ assembler->GotoIf(assembler->SmiLessThan(receiver_continuation, closed),
+ &if_receiverisrunning);
+
+ // Resume the {receiver} using our trampoline.
+ Node* result = assembler->CallStub(
+ CodeFactory::ResumeGenerator(assembler->isolate()), context, value,
+ receiver, assembler->SmiConstant(Smi::FromInt(resume_mode)));
+ assembler->Return(result);
+
+ assembler->Bind(&if_receiverisincompatible);
+ {
+ // The {receiver} is not a valid JSGeneratorObject.
+ Node* result = assembler->CallRuntime(
+ Runtime::kThrowIncompatibleMethodReceiver, context,
+ assembler->HeapConstant(assembler->factory()->NewStringFromAsciiChecked(
+ method_name, TENURED)),
+ receiver);
+ assembler->Return(result); // Never reached.
+ }
+
+ assembler->Bind(&if_receiverisclosed);
+ {
+ // The {receiver} is closed already.
+ Node* result = nullptr;
+ switch (resume_mode) {
+ case JSGeneratorObject::kNext:
+ result = assembler->CallRuntime(Runtime::kCreateIterResultObject,
+ context, assembler->UndefinedConstant(),
+ assembler->BooleanConstant(true));
+ break;
+ case JSGeneratorObject::kReturn:
+ result =
+ assembler->CallRuntime(Runtime::kCreateIterResultObject, context,
+ value, assembler->BooleanConstant(true));
+ break;
+ case JSGeneratorObject::kThrow:
+ result = assembler->CallRuntime(Runtime::kThrow, context, value);
+ break;
+ }
+ assembler->Return(result);
+ }
+
+ assembler->Bind(&if_receiverisrunning);
+ {
+ Node* result =
+ assembler->CallRuntime(Runtime::kThrowGeneratorRunning, context);
+ assembler->Return(result); // Never reached.
+ }
+}
+
+} // anonymous namespace
+
+// ES6 section 25.3.1.2 Generator.prototype.next ( value )
+void Builtins::Generate_GeneratorPrototypeNext(CodeStubAssembler* assembler) {
+ Generate_GeneratorPrototypeResume(assembler, JSGeneratorObject::kNext,
+ "[Generator].prototype.next");
+}
+
+// ES6 section 25.3.1.3 Generator.prototype.return ( value )
+void Builtins::Generate_GeneratorPrototypeReturn(CodeStubAssembler* assembler) {
+ Generate_GeneratorPrototypeResume(assembler, JSGeneratorObject::kReturn,
+ "[Generator].prototype.return");
+}
+
+// ES6 section 25.3.1.4 Generator.prototype.throw ( exception )
+void Builtins::Generate_GeneratorPrototypeThrow(CodeStubAssembler* assembler) {
+ Generate_GeneratorPrototypeResume(assembler, JSGeneratorObject::kThrow,
+ "[Generator].prototype.throw");
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-global.cc b/deps/v8/src/builtins/builtins-global.cc
new file mode 100644
index 0000000000..d99a553d1e
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-global.cc
@@ -0,0 +1,103 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+#include "src/compiler.h"
+#include "src/uri.h"
+
+namespace v8 {
+namespace internal {
+
+// ES6 section 18.2.6.2 decodeURI (encodedURI)
+BUILTIN(GlobalDecodeURI) {
+ HandleScope scope(isolate);
+ Handle<String> encoded_uri;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, encoded_uri,
+ Object::ToString(isolate, args.atOrUndefined(isolate, 1)));
+
+ RETURN_RESULT_OR_FAILURE(isolate, Uri::DecodeUri(isolate, encoded_uri));
+}
+
+// ES6 section 18.2.6.3 decodeURIComponent (encodedURIComponent)
+BUILTIN(GlobalDecodeURIComponent) {
+ HandleScope scope(isolate);
+ Handle<String> encoded_uri_component;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, encoded_uri_component,
+ Object::ToString(isolate, args.atOrUndefined(isolate, 1)));
+
+ RETURN_RESULT_OR_FAILURE(
+ isolate, Uri::DecodeUriComponent(isolate, encoded_uri_component));
+}
+
+// ES6 section 18.2.6.4 encodeURI (uri)
+BUILTIN(GlobalEncodeURI) {
+ HandleScope scope(isolate);
+ Handle<String> uri;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, uri, Object::ToString(isolate, args.atOrUndefined(isolate, 1)));
+
+ RETURN_RESULT_OR_FAILURE(isolate, Uri::EncodeUri(isolate, uri));
+}
+
+// ES6 section 18.2.6.5 encodeURIComponenet (uriComponent)
+BUILTIN(GlobalEncodeURIComponent) {
+ HandleScope scope(isolate);
+ Handle<String> uri_component;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, uri_component,
+ Object::ToString(isolate, args.atOrUndefined(isolate, 1)));
+
+ RETURN_RESULT_OR_FAILURE(isolate,
+ Uri::EncodeUriComponent(isolate, uri_component));
+}
+
+// ES6 section B.2.1.1 escape (string)
+BUILTIN(GlobalEscape) {
+ HandleScope scope(isolate);
+ Handle<String> string;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, string,
+ Object::ToString(isolate, args.atOrUndefined(isolate, 1)));
+
+ RETURN_RESULT_OR_FAILURE(isolate, Uri::Escape(isolate, string));
+}
+
+// ES6 section B.2.1.2 unescape (string)
+BUILTIN(GlobalUnescape) {
+ HandleScope scope(isolate);
+ Handle<String> string;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, string,
+ Object::ToString(isolate, args.atOrUndefined(isolate, 1)));
+
+ RETURN_RESULT_OR_FAILURE(isolate, Uri::Unescape(isolate, string));
+}
+
+// ES6 section 18.2.1 eval (x)
+BUILTIN(GlobalEval) {
+ HandleScope scope(isolate);
+ Handle<Object> x = args.atOrUndefined(isolate, 1);
+ Handle<JSFunction> target = args.target<JSFunction>();
+ Handle<JSObject> target_global_proxy(target->global_proxy(), isolate);
+ if (!x->IsString()) return *x;
+ if (!Builtins::AllowDynamicFunction(isolate, target, target_global_proxy)) {
+ isolate->CountUsage(v8::Isolate::kFunctionConstructorReturnedUndefined);
+ return isolate->heap()->undefined_value();
+ }
+ Handle<JSFunction> function;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, function, Compiler::GetFunctionFromString(
+ handle(target->native_context(), isolate),
+ Handle<String>::cast(x), NO_PARSE_RESTRICTION));
+ RETURN_RESULT_OR_FAILURE(
+ isolate,
+ Execution::Call(isolate, function, target_global_proxy, 0, nullptr));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-handler.cc b/deps/v8/src/builtins/builtins-handler.cc
new file mode 100644
index 0000000000..8b3df7927f
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-handler.cc
@@ -0,0 +1,148 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+
+namespace v8 {
+namespace internal {
+
+void Builtins::Generate_KeyedLoadIC_Megamorphic(MacroAssembler* masm) {
+ KeyedLoadIC::GenerateMegamorphic(masm);
+}
+
+void Builtins::Generate_KeyedLoadIC_Miss(MacroAssembler* masm) {
+ KeyedLoadIC::GenerateMiss(masm);
+}
+void Builtins::Generate_KeyedLoadIC_Slow(MacroAssembler* masm) {
+ KeyedLoadIC::GenerateRuntimeGetProperty(masm);
+}
+
+void Builtins::Generate_KeyedStoreIC_Megamorphic(MacroAssembler* masm) {
+ KeyedStoreIC::GenerateMegamorphic(masm, SLOPPY);
+}
+
+void Builtins::Generate_KeyedStoreIC_Megamorphic_Strict(MacroAssembler* masm) {
+ KeyedStoreIC::GenerateMegamorphic(masm, STRICT);
+}
+
+void Builtins::Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
+ KeyedStoreIC::GenerateMiss(masm);
+}
+
+void Builtins::Generate_KeyedStoreIC_Slow(MacroAssembler* masm) {
+ ElementHandlerCompiler::GenerateStoreSlow(masm);
+}
+
+void Builtins::Generate_LoadGlobalIC_Miss(CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+ typedef LoadGlobalWithVectorDescriptor Descriptor;
+
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* vector = assembler->Parameter(Descriptor::kVector);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ assembler->TailCallRuntime(Runtime::kLoadGlobalIC_Miss, context, slot,
+ vector);
+}
+
+void Builtins::Generate_LoadGlobalIC_Slow(CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+ typedef LoadGlobalWithVectorDescriptor Descriptor;
+
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* vector = assembler->Parameter(Descriptor::kVector);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ assembler->TailCallRuntime(Runtime::kLoadGlobalIC_Slow, context, slot,
+ vector);
+}
+
+void Builtins::Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
+ NamedLoadHandlerCompiler::GenerateLoadViaGetterForDeopt(masm);
+}
+
+void Builtins::Generate_LoadIC_Miss(CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+ typedef LoadWithVectorDescriptor Descriptor;
+
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* name = assembler->Parameter(Descriptor::kName);
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* vector = assembler->Parameter(Descriptor::kVector);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ assembler->TailCallRuntime(Runtime::kLoadIC_Miss, context, receiver, name,
+ slot, vector);
+}
+
+void Builtins::Generate_LoadIC_Normal(MacroAssembler* masm) {
+ LoadIC::GenerateNormal(masm);
+}
+
+void Builtins::Generate_LoadIC_Slow(CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+ typedef LoadWithVectorDescriptor Descriptor;
+
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* name = assembler->Parameter(Descriptor::kName);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ assembler->TailCallRuntime(Runtime::kGetProperty, context, receiver, name);
+}
+
+void Builtins::Generate_StoreIC_Miss(CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+ typedef StoreWithVectorDescriptor Descriptor;
+
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* name = assembler->Parameter(Descriptor::kName);
+ Node* value = assembler->Parameter(Descriptor::kValue);
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* vector = assembler->Parameter(Descriptor::kVector);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ assembler->TailCallRuntime(Runtime::kStoreIC_Miss, context, receiver, name,
+ value, slot, vector);
+}
+
+void Builtins::Generate_StoreIC_Normal(MacroAssembler* masm) {
+ StoreIC::GenerateNormal(masm);
+}
+
+void Builtins::Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) {
+ NamedStoreHandlerCompiler::GenerateStoreViaSetterForDeopt(masm);
+}
+
+namespace {
+void Generate_StoreIC_Slow(CodeStubAssembler* assembler,
+ LanguageMode language_mode) {
+ typedef compiler::Node Node;
+ typedef StoreWithVectorDescriptor Descriptor;
+
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* name = assembler->Parameter(Descriptor::kName);
+ Node* value = assembler->Parameter(Descriptor::kValue);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* lang_mode = assembler->SmiConstant(Smi::FromInt(language_mode));
+
+ // The slow case calls into the runtime to complete the store without causing
+ // an IC miss that would otherwise cause a transition to the generic stub.
+ assembler->TailCallRuntime(Runtime::kSetProperty, context, receiver, name,
+ value, lang_mode);
+}
+} // anonymous namespace
+
+void Builtins::Generate_StoreIC_SlowSloppy(CodeStubAssembler* assembler) {
+ Generate_StoreIC_Slow(assembler, SLOPPY);
+}
+
+void Builtins::Generate_StoreIC_SlowStrict(CodeStubAssembler* assembler) {
+ Generate_StoreIC_Slow(assembler, STRICT);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-internal.cc b/deps/v8/src/builtins/builtins-internal.cc
new file mode 100644
index 0000000000..87c5dd549c
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-internal.cc
@@ -0,0 +1,145 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+#include "src/interface-descriptors.h"
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+BUILTIN(Illegal) {
+ UNREACHABLE();
+ return isolate->heap()->undefined_value(); // Make compiler happy.
+}
+
+BUILTIN(EmptyFunction) { return isolate->heap()->undefined_value(); }
+
+BUILTIN(UnsupportedThrower) {
+ HandleScope scope(isolate);
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate,
+ NewError(MessageTemplate::kUnsupported));
+}
+
+// -----------------------------------------------------------------------------
+// Throwers for restricted function properties and strict arguments object
+// properties
+
+BUILTIN(RestrictedFunctionPropertiesThrower) {
+ HandleScope scope(isolate);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kRestrictedFunctionProperties));
+}
+
+BUILTIN(RestrictedStrictArgumentsPropertiesThrower) {
+ HandleScope scope(isolate);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kStrictPoisonPill));
+}
+
+// -----------------------------------------------------------------------------
+// Interrupt and stack checks.
+
+void Builtins::Generate_InterruptCheck(MacroAssembler* masm) {
+ masm->TailCallRuntime(Runtime::kInterrupt);
+}
+
+void Builtins::Generate_StackCheck(MacroAssembler* masm) {
+ masm->TailCallRuntime(Runtime::kStackGuard);
+}
+
+// -----------------------------------------------------------------------------
+// TurboFan support builtins.
+
+void Builtins::Generate_CopyFastSmiOrObjectElements(
+ CodeStubAssembler* assembler) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CopyFastSmiOrObjectElementsDescriptor Descriptor;
+
+ Node* object = assembler->Parameter(Descriptor::kObject);
+
+ // Load the {object}s elements.
+ Node* source = assembler->LoadObjectField(object, JSObject::kElementsOffset);
+
+ CodeStubAssembler::ParameterMode mode =
+ assembler->Is64() ? CodeStubAssembler::INTEGER_PARAMETERS
+ : CodeStubAssembler::SMI_PARAMETERS;
+ Node* length = (mode == CodeStubAssembler::INTEGER_PARAMETERS)
+ ? assembler->LoadAndUntagFixedArrayBaseLength(source)
+ : assembler->LoadFixedArrayBaseLength(source);
+
+ // Check if we can allocate in new space.
+ ElementsKind kind = FAST_ELEMENTS;
+ int max_elements = FixedArrayBase::GetMaxLengthForNewSpaceAllocation(kind);
+ Label if_newspace(assembler), if_oldspace(assembler);
+ assembler->Branch(
+ assembler->UintPtrLessThan(
+ length, assembler->IntPtrOrSmiConstant(max_elements, mode)),
+ &if_newspace, &if_oldspace);
+
+ assembler->Bind(&if_newspace);
+ {
+ Node* target = assembler->AllocateFixedArray(kind, length, mode);
+ assembler->CopyFixedArrayElements(kind, source, target, length,
+ SKIP_WRITE_BARRIER, mode);
+ assembler->StoreObjectField(object, JSObject::kElementsOffset, target);
+ assembler->Return(target);
+ }
+
+ assembler->Bind(&if_oldspace);
+ {
+ Node* target = assembler->AllocateFixedArray(
+ kind, length, mode, CodeStubAssembler::kPretenured);
+ assembler->CopyFixedArrayElements(kind, source, target, length,
+ UPDATE_WRITE_BARRIER, mode);
+ assembler->StoreObjectField(object, JSObject::kElementsOffset, target);
+ assembler->Return(target);
+ }
+}
+
+void Builtins::Generate_GrowFastDoubleElements(CodeStubAssembler* assembler) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef GrowArrayElementsDescriptor Descriptor;
+
+ Node* object = assembler->Parameter(Descriptor::kObject);
+ Node* key = assembler->Parameter(Descriptor::kKey);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ Label runtime(assembler, CodeStubAssembler::Label::kDeferred);
+ Node* elements = assembler->LoadElements(object);
+ elements = assembler->CheckAndGrowElementsCapacity(
+ context, elements, FAST_DOUBLE_ELEMENTS, key, &runtime);
+ assembler->StoreObjectField(object, JSObject::kElementsOffset, elements);
+ assembler->Return(elements);
+
+ assembler->Bind(&runtime);
+ assembler->TailCallRuntime(Runtime::kGrowArrayElements, context, object, key);
+}
+
+void Builtins::Generate_GrowFastSmiOrObjectElements(
+ CodeStubAssembler* assembler) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef GrowArrayElementsDescriptor Descriptor;
+
+ Node* object = assembler->Parameter(Descriptor::kObject);
+ Node* key = assembler->Parameter(Descriptor::kKey);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ Label runtime(assembler, CodeStubAssembler::Label::kDeferred);
+ Node* elements = assembler->LoadElements(object);
+ elements = assembler->CheckAndGrowElementsCapacity(
+ context, elements, FAST_ELEMENTS, key, &runtime);
+ assembler->StoreObjectField(object, JSObject::kElementsOffset, elements);
+ assembler->Return(elements);
+
+ assembler->Bind(&runtime);
+ assembler->TailCallRuntime(Runtime::kGrowArrayElements, context, object, key);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-interpreter.cc b/deps/v8/src/builtins/builtins-interpreter.cc
new file mode 100644
index 0000000000..900172fd48
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-interpreter.cc
@@ -0,0 +1,54 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+namespace v8 {
+namespace internal {
+
+Handle<Code> Builtins::InterpreterPushArgsAndCall(TailCallMode tail_call_mode,
+ CallableType function_type) {
+ switch (tail_call_mode) {
+ case TailCallMode::kDisallow:
+ if (function_type == CallableType::kJSFunction) {
+ return InterpreterPushArgsAndCallFunction();
+ } else {
+ return InterpreterPushArgsAndCall();
+ }
+ case TailCallMode::kAllow:
+ if (function_type == CallableType::kJSFunction) {
+ return InterpreterPushArgsAndTailCallFunction();
+ } else {
+ return InterpreterPushArgsAndTailCall();
+ }
+ }
+ UNREACHABLE();
+ return Handle<Code>::null();
+}
+
+void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+ return Generate_InterpreterPushArgsAndCallImpl(masm, TailCallMode::kDisallow,
+ CallableType::kAny);
+}
+
+void Builtins::Generate_InterpreterPushArgsAndCallFunction(
+ MacroAssembler* masm) {
+ return Generate_InterpreterPushArgsAndCallImpl(masm, TailCallMode::kDisallow,
+ CallableType::kJSFunction);
+}
+
+void Builtins::Generate_InterpreterPushArgsAndTailCall(MacroAssembler* masm) {
+ return Generate_InterpreterPushArgsAndCallImpl(masm, TailCallMode::kAllow,
+ CallableType::kAny);
+}
+
+void Builtins::Generate_InterpreterPushArgsAndTailCallFunction(
+ MacroAssembler* masm) {
+ return Generate_InterpreterPushArgsAndCallImpl(masm, TailCallMode::kAllow,
+ CallableType::kJSFunction);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-json.cc b/deps/v8/src/builtins/builtins-json.cc
new file mode 100644
index 0000000000..4a8c7c5ea8
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-json.cc
@@ -0,0 +1,41 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+#include "src/json-parser.h"
+#include "src/json-stringifier.h"
+
+namespace v8 {
+namespace internal {
+
+// ES6 section 24.3.1 JSON.parse.
+BUILTIN(JsonParse) {
+ HandleScope scope(isolate);
+ Handle<Object> source = args.atOrUndefined(isolate, 1);
+ Handle<Object> reviver = args.atOrUndefined(isolate, 2);
+ Handle<String> string;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, string,
+ Object::ToString(isolate, source));
+ string = String::Flatten(string);
+ RETURN_RESULT_OR_FAILURE(
+ isolate, string->IsSeqOneByteString()
+ ? JsonParser<true>::Parse(isolate, string, reviver)
+ : JsonParser<false>::Parse(isolate, string, reviver));
+}
+
+// ES6 section 24.3.2 JSON.stringify.
+BUILTIN(JsonStringify) {
+ HandleScope scope(isolate);
+ JsonStringifier stringifier(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ Handle<Object> replacer = args.atOrUndefined(isolate, 2);
+ Handle<Object> indent = args.atOrUndefined(isolate, 3);
+ RETURN_RESULT_OR_FAILURE(isolate,
+ stringifier.Stringify(object, replacer, indent));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-math.cc b/deps/v8/src/builtins/builtins-math.cc
new file mode 100644
index 0000000000..e8d429ebac
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-math.cc
@@ -0,0 +1,561 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+#include "src/code-factory.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// ES6 section 20.2.2 Function Properties of the Math Object
+
+// ES6 section - 20.2.2.1 Math.abs ( x )
+void Builtins::Generate_MathAbs(CodeStubAssembler* assembler) {
+ using compiler::Node;
+ Node* x = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+ Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+ Node* value = assembler->Float64Abs(x_value);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+}
+
+// ES6 section 20.2.2.2 Math.acos ( x )
+void Builtins::Generate_MathAcos(CodeStubAssembler* assembler) {
+ using compiler::Node;
+
+ Node* x = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+ Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+ Node* value = assembler->Float64Acos(x_value);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+}
+
+// ES6 section 20.2.2.3 Math.acosh ( x )
+void Builtins::Generate_MathAcosh(CodeStubAssembler* assembler) {
+ using compiler::Node;
+
+ Node* x = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+ Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+ Node* value = assembler->Float64Acosh(x_value);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+}
+
+// ES6 section 20.2.2.4 Math.asin ( x )
+void Builtins::Generate_MathAsin(CodeStubAssembler* assembler) {
+ using compiler::Node;
+
+ Node* x = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+ Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+ Node* value = assembler->Float64Asin(x_value);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+}
+
+// ES6 section 20.2.2.5 Math.asinh ( x )
+void Builtins::Generate_MathAsinh(CodeStubAssembler* assembler) {
+ using compiler::Node;
+
+ Node* x = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+ Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+ Node* value = assembler->Float64Asinh(x_value);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+}
+
+// ES6 section 20.2.2.6 Math.atan ( x )
+void Builtins::Generate_MathAtan(CodeStubAssembler* assembler) {
+ using compiler::Node;
+
+ Node* x = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+ Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+ Node* value = assembler->Float64Atan(x_value);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+}
+
+// ES6 section 20.2.2.7 Math.atanh ( x )
+void Builtins::Generate_MathAtanh(CodeStubAssembler* assembler) {
+ using compiler::Node;
+
+ Node* x = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+ Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+ Node* value = assembler->Float64Atanh(x_value);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+}
+
+// ES6 section 20.2.2.8 Math.atan2 ( y, x )
+void Builtins::Generate_MathAtan2(CodeStubAssembler* assembler) {
+ using compiler::Node;
+
+ Node* y = assembler->Parameter(1);
+ Node* x = assembler->Parameter(2);
+ Node* context = assembler->Parameter(5);
+ Node* y_value = assembler->TruncateTaggedToFloat64(context, y);
+ Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+ Node* value = assembler->Float64Atan2(y_value, x_value);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+}
+
+namespace {
+
+void Generate_MathRoundingOperation(
+ CodeStubAssembler* assembler,
+ compiler::Node* (CodeStubAssembler::*float64op)(compiler::Node*)) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Node* context = assembler->Parameter(4);
+
+ // We might need to loop once for ToNumber conversion.
+ Variable var_x(assembler, MachineRepresentation::kTagged);
+ Label loop(assembler, &var_x);
+ var_x.Bind(assembler->Parameter(1));
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ // Load the current {x} value.
+ Node* x = var_x.value();
+
+ // Check if {x} is a Smi or a HeapObject.
+ Label if_xissmi(assembler), if_xisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(x), &if_xissmi, &if_xisnotsmi);
+
+ assembler->Bind(&if_xissmi);
+ {
+ // Nothing to do when {x} is a Smi.
+ assembler->Return(x);
+ }
+
+ assembler->Bind(&if_xisnotsmi);
+ {
+ // Check if {x} is a HeapNumber.
+ Label if_xisheapnumber(assembler),
+ if_xisnotheapnumber(assembler, Label::kDeferred);
+ assembler->Branch(
+ assembler->WordEqual(assembler->LoadMap(x),
+ assembler->HeapNumberMapConstant()),
+ &if_xisheapnumber, &if_xisnotheapnumber);
+
+ assembler->Bind(&if_xisheapnumber);
+ {
+ Node* x_value = assembler->LoadHeapNumberValue(x);
+ Node* value = (assembler->*float64op)(x_value);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+ }
+
+ assembler->Bind(&if_xisnotheapnumber);
+ {
+ // Need to convert {x} to a Number first.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_x.Bind(assembler->CallStub(callable, context, x));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+}
+
+} // namespace
+
+// ES6 section 20.2.2.10 Math.ceil ( x )
+void Builtins::Generate_MathCeil(CodeStubAssembler* assembler) {
+ Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Ceil);
+}
+
+// ES6 section 20.2.2.9 Math.cbrt ( x )
+void Builtins::Generate_MathCbrt(CodeStubAssembler* assembler) {
+ using compiler::Node;
+
+ Node* x = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+ Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+ Node* value = assembler->Float64Cbrt(x_value);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+}
+
+// ES6 section 20.2.2.11 Math.clz32 ( x )
+void Builtins::Generate_MathClz32(CodeStubAssembler* assembler) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Node* context = assembler->Parameter(4);
+
+ // Shared entry point for the clz32 operation.
+ Variable var_clz32_x(assembler, MachineRepresentation::kWord32);
+ Label do_clz32(assembler);
+
+ // We might need to loop once for ToNumber conversion.
+ Variable var_x(assembler, MachineRepresentation::kTagged);
+ Label loop(assembler, &var_x);
+ var_x.Bind(assembler->Parameter(1));
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ // Load the current {x} value.
+ Node* x = var_x.value();
+
+ // Check if {x} is a Smi or a HeapObject.
+ Label if_xissmi(assembler), if_xisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(x), &if_xissmi, &if_xisnotsmi);
+
+ assembler->Bind(&if_xissmi);
+ {
+ var_clz32_x.Bind(assembler->SmiToWord32(x));
+ assembler->Goto(&do_clz32);
+ }
+
+ assembler->Bind(&if_xisnotsmi);
+ {
+ // Check if {x} is a HeapNumber.
+ Label if_xisheapnumber(assembler),
+ if_xisnotheapnumber(assembler, Label::kDeferred);
+ assembler->Branch(
+ assembler->WordEqual(assembler->LoadMap(x),
+ assembler->HeapNumberMapConstant()),
+ &if_xisheapnumber, &if_xisnotheapnumber);
+
+ assembler->Bind(&if_xisheapnumber);
+ {
+ var_clz32_x.Bind(assembler->TruncateHeapNumberValueToWord32(x));
+ assembler->Goto(&do_clz32);
+ }
+
+ assembler->Bind(&if_xisnotheapnumber);
+ {
+ // Need to convert {x} to a Number first.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_x.Bind(assembler->CallStub(callable, context, x));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&do_clz32);
+ {
+ Node* x_value = var_clz32_x.value();
+ Node* value = assembler->Word32Clz(x_value);
+ Node* result = assembler->ChangeInt32ToTagged(value);
+ assembler->Return(result);
+ }
+}
+
+// ES6 section 20.2.2.12 Math.cos ( x )
+void Builtins::Generate_MathCos(CodeStubAssembler* assembler) {
+ using compiler::Node;
+
+ Node* x = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+ Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+ Node* value = assembler->Float64Cos(x_value);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+}
+
+// ES6 section 20.2.2.13 Math.cosh ( x )
+void Builtins::Generate_MathCosh(CodeStubAssembler* assembler) {
+ using compiler::Node;
+
+ Node* x = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+ Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+ Node* value = assembler->Float64Cosh(x_value);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+}
+
+// ES6 section 20.2.2.14 Math.exp ( x )
+void Builtins::Generate_MathExp(CodeStubAssembler* assembler) {
+ using compiler::Node;
+
+ Node* x = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+ Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+ Node* value = assembler->Float64Exp(x_value);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+}
+
+// ES6 section 20.2.2.16 Math.floor ( x )
+void Builtins::Generate_MathFloor(CodeStubAssembler* assembler) {
+ Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Floor);
+}
+
+// ES6 section 20.2.2.17 Math.fround ( x )
+void Builtins::Generate_MathFround(CodeStubAssembler* assembler) {
+ using compiler::Node;
+
+ Node* x = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+ Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+ Node* value32 = assembler->TruncateFloat64ToFloat32(x_value);
+ Node* value = assembler->ChangeFloat32ToFloat64(value32);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+}
+
+// ES6 section 20.2.2.18 Math.hypot ( value1, value2, ...values )
+BUILTIN(MathHypot) {
+ HandleScope scope(isolate);
+ int const length = args.length() - 1;
+ if (length == 0) return Smi::FromInt(0);
+ DCHECK_LT(0, length);
+ double max = 0;
+ bool one_arg_is_nan = false;
+ List<double> abs_values(length);
+ for (int i = 0; i < length; i++) {
+ Handle<Object> x = args.at<Object>(i + 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x, Object::ToNumber(x));
+ double abs_value = std::abs(x->Number());
+
+ if (std::isnan(abs_value)) {
+ one_arg_is_nan = true;
+ } else {
+ abs_values.Add(abs_value);
+ if (max < abs_value) {
+ max = abs_value;
+ }
+ }
+ }
+
+ if (max == V8_INFINITY) {
+ return *isolate->factory()->NewNumber(V8_INFINITY);
+ }
+
+ if (one_arg_is_nan) {
+ return *isolate->factory()->nan_value();
+ }
+
+ if (max == 0) {
+ return Smi::FromInt(0);
+ }
+ DCHECK_GT(max, 0);
+
+ // Kahan summation to avoid rounding errors.
+ // Normalize the numbers to the largest one to avoid overflow.
+ double sum = 0;
+ double compensation = 0;
+ for (int i = 0; i < length; i++) {
+ double n = abs_values.at(i) / max;
+ double summand = n * n - compensation;
+ double preliminary = sum + summand;
+ compensation = (preliminary - sum) - summand;
+ sum = preliminary;
+ }
+
+ return *isolate->factory()->NewNumber(std::sqrt(sum) * max);
+}
+
+// ES6 section 20.2.2.19 Math.imul ( x, y )
+void Builtins::Generate_MathImul(CodeStubAssembler* assembler) {
+ using compiler::Node;
+
+ Node* x = assembler->Parameter(1);
+ Node* y = assembler->Parameter(2);
+ Node* context = assembler->Parameter(5);
+ Node* x_value = assembler->TruncateTaggedToWord32(context, x);
+ Node* y_value = assembler->TruncateTaggedToWord32(context, y);
+ Node* value = assembler->Int32Mul(x_value, y_value);
+ Node* result = assembler->ChangeInt32ToTagged(value);
+ assembler->Return(result);
+}
+
+// ES6 section 20.2.2.20 Math.log ( x )
+void Builtins::Generate_MathLog(CodeStubAssembler* assembler) {
+ using compiler::Node;
+
+ Node* x = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+ Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+ Node* value = assembler->Float64Log(x_value);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+}
+
+// ES6 section 20.2.2.21 Math.log1p ( x )
+void Builtins::Generate_MathLog1p(CodeStubAssembler* assembler) {
+ using compiler::Node;
+
+ Node* x = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+ Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+ Node* value = assembler->Float64Log1p(x_value);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+}
+
+// ES6 section 20.2.2.22 Math.log10 ( x )
+void Builtins::Generate_MathLog10(CodeStubAssembler* assembler) {
+ using compiler::Node;
+
+ Node* x = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+ Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+ Node* value = assembler->Float64Log10(x_value);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+}
+
+// ES6 section 20.2.2.23 Math.log2 ( x )
+void Builtins::Generate_MathLog2(CodeStubAssembler* assembler) {
+ using compiler::Node;
+
+ Node* x = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+ Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+ Node* value = assembler->Float64Log2(x_value);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+}
+
+// ES6 section 20.2.2.15 Math.expm1 ( x )
+void Builtins::Generate_MathExpm1(CodeStubAssembler* assembler) {
+ using compiler::Node;
+
+ Node* x = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+ Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+ Node* value = assembler->Float64Expm1(x_value);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+}
+
+// ES6 section 20.2.2.26 Math.pow ( x, y )
+void Builtins::Generate_MathPow(CodeStubAssembler* assembler) {
+ using compiler::Node;
+
+ Node* x = assembler->Parameter(1);
+ Node* y = assembler->Parameter(2);
+ Node* context = assembler->Parameter(5);
+ Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+ Node* y_value = assembler->TruncateTaggedToFloat64(context, y);
+ Node* value = assembler->Float64Pow(x_value, y_value);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+}
+
+// ES6 section 20.2.2.28 Math.round ( x )
+void Builtins::Generate_MathRound(CodeStubAssembler* assembler) {
+ Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Round);
+}
+
+// ES6 section 20.2.2.29 Math.sign ( x )
+void Builtins::Generate_MathSign(CodeStubAssembler* assembler) {
+ typedef CodeStubAssembler::Label Label;
+ using compiler::Node;
+
+ // Convert the {x} value to a Number.
+ Node* x = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+ Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+
+ // Return -1 if {x} is negative, 1 if {x} is positive, or {x} itself.
+ Label if_xisnegative(assembler), if_xispositive(assembler);
+ assembler->GotoIf(
+ assembler->Float64LessThan(x_value, assembler->Float64Constant(0.0)),
+ &if_xisnegative);
+ assembler->GotoIf(
+ assembler->Float64LessThan(assembler->Float64Constant(0.0), x_value),
+ &if_xispositive);
+ assembler->Return(assembler->ChangeFloat64ToTagged(x_value));
+
+ assembler->Bind(&if_xisnegative);
+ assembler->Return(assembler->SmiConstant(Smi::FromInt(-1)));
+
+ assembler->Bind(&if_xispositive);
+ assembler->Return(assembler->SmiConstant(Smi::FromInt(1)));
+}
+
+// ES6 section 20.2.2.30 Math.sin ( x )
+void Builtins::Generate_MathSin(CodeStubAssembler* assembler) {
+ using compiler::Node;
+
+ Node* x = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+ Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+ Node* value = assembler->Float64Sin(x_value);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+}
+
+// ES6 section 20.2.2.31 Math.sinh ( x )
+void Builtins::Generate_MathSinh(CodeStubAssembler* assembler) {
+ using compiler::Node;
+
+ Node* x = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+ Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+ Node* value = assembler->Float64Sinh(x_value);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+}
+
+// ES6 section 20.2.2.32 Math.sqrt ( x )
+void Builtins::Generate_MathSqrt(CodeStubAssembler* assembler) {
+ using compiler::Node;
+
+ Node* x = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+ Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+ Node* value = assembler->Float64Sqrt(x_value);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+}
+
+// ES6 section 20.2.2.33 Math.tan ( x )
+void Builtins::Generate_MathTan(CodeStubAssembler* assembler) {
+ using compiler::Node;
+
+ Node* x = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+ Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+ Node* value = assembler->Float64Tan(x_value);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+}
+
+// ES6 section 20.2.2.34 Math.tanh ( x )
+void Builtins::Generate_MathTanh(CodeStubAssembler* assembler) {
+ using compiler::Node;
+
+ Node* x = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+ Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+ Node* value = assembler->Float64Tanh(x_value);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+}
+
+// ES6 section 20.2.2.35 Math.trunc ( x )
+void Builtins::Generate_MathTrunc(CodeStubAssembler* assembler) {
+ Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Trunc);
+}
+
+void Builtins::Generate_MathMax(MacroAssembler* masm) {
+ Generate_MathMaxMin(masm, MathMaxMinKind::kMax);
+}
+
+void Builtins::Generate_MathMin(MacroAssembler* masm) {
+ Generate_MathMaxMin(masm, MathMaxMinKind::kMin);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-number.cc b/deps/v8/src/builtins/builtins-number.cc
new file mode 100644
index 0000000000..c2af0fdecf
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-number.cc
@@ -0,0 +1,235 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// ES6 section 20.1 Number Objects
+
+// ES6 section 20.1.3.2 Number.prototype.toExponential ( fractionDigits )
+BUILTIN(NumberPrototypeToExponential) {
+ HandleScope scope(isolate);
+ Handle<Object> value = args.at<Object>(0);
+ Handle<Object> fraction_digits = args.atOrUndefined(isolate, 1);
+
+ // Unwrap the receiver {value}.
+ if (value->IsJSValue()) {
+ value = handle(Handle<JSValue>::cast(value)->value(), isolate);
+ }
+ if (!value->IsNumber()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kNotGeneric,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Number.prototype.toExponential")));
+ }
+ double const value_number = value->Number();
+
+ // Convert the {fraction_digits} to an integer first.
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, fraction_digits, Object::ToInteger(isolate, fraction_digits));
+ double const fraction_digits_number = fraction_digits->Number();
+
+ if (std::isnan(value_number)) return isolate->heap()->nan_string();
+ if (std::isinf(value_number)) {
+ return (value_number < 0.0) ? isolate->heap()->minus_infinity_string()
+ : isolate->heap()->infinity_string();
+ }
+ if (fraction_digits_number < 0.0 || fraction_digits_number > 20.0) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kNumberFormatRange,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "toExponential()")));
+ }
+ int const f = args.atOrUndefined(isolate, 1)->IsUndefined(isolate)
+ ? -1
+ : static_cast<int>(fraction_digits_number);
+ char* const str = DoubleToExponentialCString(value_number, f);
+ Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str);
+ DeleteArray(str);
+ return *result;
+}
+
+// ES6 section 20.1.3.3 Number.prototype.toFixed ( fractionDigits )
+BUILTIN(NumberPrototypeToFixed) {
+ HandleScope scope(isolate);
+ Handle<Object> value = args.at<Object>(0);
+ Handle<Object> fraction_digits = args.atOrUndefined(isolate, 1);
+
+ // Unwrap the receiver {value}.
+ if (value->IsJSValue()) {
+ value = handle(Handle<JSValue>::cast(value)->value(), isolate);
+ }
+ if (!value->IsNumber()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kNotGeneric,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Number.prototype.toFixed")));
+ }
+ double const value_number = value->Number();
+
+ // Convert the {fraction_digits} to an integer first.
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, fraction_digits, Object::ToInteger(isolate, fraction_digits));
+ double const fraction_digits_number = fraction_digits->Number();
+
+ // Check if the {fraction_digits} are in the supported range.
+ if (fraction_digits_number < 0.0 || fraction_digits_number > 20.0) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kNumberFormatRange,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "toFixed() digits")));
+ }
+
+ if (std::isnan(value_number)) return isolate->heap()->nan_string();
+ if (std::isinf(value_number)) {
+ return (value_number < 0.0) ? isolate->heap()->minus_infinity_string()
+ : isolate->heap()->infinity_string();
+ }
+ char* const str = DoubleToFixedCString(
+ value_number, static_cast<int>(fraction_digits_number));
+ Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str);
+ DeleteArray(str);
+ return *result;
+}
+
+// ES6 section 20.1.3.4 Number.prototype.toLocaleString ( [ r1 [ , r2 ] ] )
+BUILTIN(NumberPrototypeToLocaleString) {
+ HandleScope scope(isolate);
+ Handle<Object> value = args.at<Object>(0);
+
+ // Unwrap the receiver {value}.
+ if (value->IsJSValue()) {
+ value = handle(Handle<JSValue>::cast(value)->value(), isolate);
+ }
+ if (!value->IsNumber()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kNotGeneric,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Number.prototype.toLocaleString")));
+ }
+
+ // Turn the {value} into a String.
+ return *isolate->factory()->NumberToString(value);
+}
+
+// ES6 section 20.1.3.5 Number.prototype.toPrecision ( precision )
+BUILTIN(NumberPrototypeToPrecision) {
+ HandleScope scope(isolate);
+ Handle<Object> value = args.at<Object>(0);
+ Handle<Object> precision = args.atOrUndefined(isolate, 1);
+
+ // Unwrap the receiver {value}.
+ if (value->IsJSValue()) {
+ value = handle(Handle<JSValue>::cast(value)->value(), isolate);
+ }
+ if (!value->IsNumber()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kNotGeneric,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Number.prototype.toPrecision")));
+ }
+ double const value_number = value->Number();
+
+ // If no {precision} was specified, just return ToString of {value}.
+ if (precision->IsUndefined(isolate)) {
+ return *isolate->factory()->NumberToString(value);
+ }
+
+ // Convert the {precision} to an integer first.
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, precision,
+ Object::ToInteger(isolate, precision));
+ double const precision_number = precision->Number();
+
+ if (std::isnan(value_number)) return isolate->heap()->nan_string();
+ if (std::isinf(value_number)) {
+ return (value_number < 0.0) ? isolate->heap()->minus_infinity_string()
+ : isolate->heap()->infinity_string();
+ }
+ if (precision_number < 1.0 || precision_number > 21.0) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kToPrecisionFormatRange));
+ }
+ char* const str = DoubleToPrecisionCString(
+ value_number, static_cast<int>(precision_number));
+ Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str);
+ DeleteArray(str);
+ return *result;
+}
+
+// ES6 section 20.1.3.6 Number.prototype.toString ( [ radix ] )
+BUILTIN(NumberPrototypeToString) {
+ HandleScope scope(isolate);
+ Handle<Object> value = args.at<Object>(0);
+ Handle<Object> radix = args.atOrUndefined(isolate, 1);
+
+ // Unwrap the receiver {value}.
+ if (value->IsJSValue()) {
+ value = handle(Handle<JSValue>::cast(value)->value(), isolate);
+ }
+ if (!value->IsNumber()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kNotGeneric,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Number.prototype.toString")));
+ }
+ double const value_number = value->Number();
+
+ // If no {radix} was specified, just return ToString of {value}.
+ if (radix->IsUndefined(isolate)) {
+ return *isolate->factory()->NumberToString(value);
+ }
+
+ // Convert the {radix} to an integer first.
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, radix,
+ Object::ToInteger(isolate, radix));
+ double const radix_number = radix->Number();
+
+ // If {radix} is 10, just return ToString of {value}.
+ if (radix_number == 10.0) return *isolate->factory()->NumberToString(value);
+
+ // Make sure the {radix} is within the valid range.
+ if (radix_number < 2.0 || radix_number > 36.0) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kToRadixFormatRange));
+ }
+
+ // Fast case where the result is a one character string.
+ if (IsUint32Double(value_number) && value_number < radix_number) {
+ // Character array used for conversion.
+ static const char kCharTable[] = "0123456789abcdefghijklmnopqrstuvwxyz";
+ return *isolate->factory()->LookupSingleCharacterStringFromCode(
+ kCharTable[static_cast<uint32_t>(value_number)]);
+ }
+
+ // Slow case.
+ if (std::isnan(value_number)) return isolate->heap()->nan_string();
+ if (std::isinf(value_number)) {
+ return (value_number < 0.0) ? isolate->heap()->minus_infinity_string()
+ : isolate->heap()->infinity_string();
+ }
+ char* const str =
+ DoubleToRadixCString(value_number, static_cast<int>(radix_number));
+ Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str);
+ DeleteArray(str);
+ return *result;
+}
+
+// ES6 section 20.1.3.7 Number.prototype.valueOf ( )
+void Builtins::Generate_NumberPrototypeValueOf(CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+
+ Node* receiver = assembler->Parameter(0);
+ Node* context = assembler->Parameter(3);
+
+ Node* result = assembler->ToThisValue(
+ context, receiver, PrimitiveType::kNumber, "Number.prototype.valueOf");
+ assembler->Return(result);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc
new file mode 100644
index 0000000000..c422145a51
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-object.cc
@@ -0,0 +1,914 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+#include "src/code-factory.h"
+#include "src/property-descriptor.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// ES6 section 19.1 Object Objects
+
+void Builtins::Generate_ObjectHasOwnProperty(CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Node* object = assembler->Parameter(0);
+ Node* key = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+
+ Label call_runtime(assembler), return_true(assembler),
+ return_false(assembler);
+
+ // Smi receivers do not have own properties.
+ Label if_objectisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(object), &return_false,
+ &if_objectisnotsmi);
+ assembler->Bind(&if_objectisnotsmi);
+
+ Node* map = assembler->LoadMap(object);
+ Node* instance_type = assembler->LoadMapInstanceType(map);
+
+ Variable var_index(assembler, MachineRepresentation::kWord32);
+
+ Label keyisindex(assembler), if_iskeyunique(assembler);
+ assembler->TryToName(key, &keyisindex, &var_index, &if_iskeyunique,
+ &call_runtime);
+
+ assembler->Bind(&if_iskeyunique);
+ assembler->TryHasOwnProperty(object, map, instance_type, key, &return_true,
+ &return_false, &call_runtime);
+
+ assembler->Bind(&keyisindex);
+ assembler->TryLookupElement(object, map, instance_type, var_index.value(),
+ &return_true, &return_false, &call_runtime);
+
+ assembler->Bind(&return_true);
+ assembler->Return(assembler->BooleanConstant(true));
+
+ assembler->Bind(&return_false);
+ assembler->Return(assembler->BooleanConstant(false));
+
+ assembler->Bind(&call_runtime);
+ assembler->Return(assembler->CallRuntime(Runtime::kObjectHasOwnProperty,
+ context, object, key));
+}
+
+namespace {
+
+MUST_USE_RESULT Maybe<bool> FastAssign(Handle<JSReceiver> to,
+ Handle<Object> next_source) {
+ // Non-empty strings are the only non-JSReceivers that need to be handled
+ // explicitly by Object.assign.
+ if (!next_source->IsJSReceiver()) {
+ return Just(!next_source->IsString() ||
+ String::cast(*next_source)->length() == 0);
+ }
+
+ // If the target is deprecated, the object will be updated on first store. If
+ // the source for that store equals the target, this will invalidate the
+ // cached representation of the source. Preventively upgrade the target.
+ // Do this on each iteration since any property load could cause deprecation.
+ if (to->map()->is_deprecated()) {
+ JSObject::MigrateInstance(Handle<JSObject>::cast(to));
+ }
+
+ Isolate* isolate = to->GetIsolate();
+ Handle<Map> map(JSReceiver::cast(*next_source)->map(), isolate);
+
+ if (!map->IsJSObjectMap()) return Just(false);
+ if (!map->OnlyHasSimpleProperties()) return Just(false);
+
+ Handle<JSObject> from = Handle<JSObject>::cast(next_source);
+ if (from->elements() != isolate->heap()->empty_fixed_array()) {
+ return Just(false);
+ }
+
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
+ int length = map->NumberOfOwnDescriptors();
+
+ bool stable = true;
+
+ for (int i = 0; i < length; i++) {
+ Handle<Name> next_key(descriptors->GetKey(i), isolate);
+ Handle<Object> prop_value;
+ // Directly decode from the descriptor array if |from| did not change shape.
+ if (stable) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (!details.IsEnumerable()) continue;
+ if (details.kind() == kData) {
+ if (details.location() == kDescriptor) {
+ prop_value = handle(descriptors->GetValue(i), isolate);
+ } else {
+ Representation representation = details.representation();
+ FieldIndex index = FieldIndex::ForDescriptor(*map, i);
+ prop_value = JSObject::FastPropertyAt(from, representation, index);
+ }
+ } else {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, prop_value, JSReceiver::GetProperty(from, next_key),
+ Nothing<bool>());
+ stable = from->map() == *map;
+ }
+ } else {
+ // If the map did change, do a slower lookup. We are still guaranteed that
+ // the object has a simple shape, and that the key is a name.
+ LookupIterator it(from, next_key, from,
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
+ if (!it.IsFound()) continue;
+ DCHECK(it.state() == LookupIterator::DATA ||
+ it.state() == LookupIterator::ACCESSOR);
+ if (!it.IsEnumerable()) continue;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, prop_value, Object::GetProperty(&it), Nothing<bool>());
+ }
+ LookupIterator it(to, next_key, to);
+ bool call_to_js = it.IsFound() && it.state() != LookupIterator::DATA;
+ Maybe<bool> result = Object::SetProperty(
+ &it, prop_value, STRICT, Object::CERTAINLY_NOT_STORE_FROM_KEYED);
+ if (result.IsNothing()) return result;
+ if (stable && call_to_js) stable = from->map() == *map;
+ }
+
+ return Just(true);
+}
+
+} // namespace
+
+// ES6 19.1.2.1 Object.assign
+BUILTIN(ObjectAssign) {
+ HandleScope scope(isolate);
+ Handle<Object> target = args.atOrUndefined(isolate, 1);
+
+ // 1. Let to be ? ToObject(target).
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, target,
+ Object::ToObject(isolate, target));
+ Handle<JSReceiver> to = Handle<JSReceiver>::cast(target);
+ // 2. If only one argument was passed, return to.
+ if (args.length() == 2) return *to;
+ // 3. Let sources be the List of argument values starting with the
+ // second argument.
+ // 4. For each element nextSource of sources, in ascending index order,
+ for (int i = 2; i < args.length(); ++i) {
+ Handle<Object> next_source = args.at<Object>(i);
+ Maybe<bool> fast_assign = FastAssign(to, next_source);
+ if (fast_assign.IsNothing()) return isolate->heap()->exception();
+ if (fast_assign.FromJust()) continue;
+ // 4a. If nextSource is undefined or null, let keys be an empty List.
+ // 4b. Else,
+ // 4b i. Let from be ToObject(nextSource).
+ // Only non-empty strings and JSReceivers have enumerable properties.
+ Handle<JSReceiver> from =
+ Object::ToObject(isolate, next_source).ToHandleChecked();
+ // 4b ii. Let keys be ? from.[[OwnPropertyKeys]]().
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, keys, KeyAccumulator::GetKeys(
+ from, KeyCollectionMode::kOwnOnly, ALL_PROPERTIES,
+ GetKeysConversion::kKeepNumbers));
+ // 4c. Repeat for each element nextKey of keys in List order,
+ for (int j = 0; j < keys->length(); ++j) {
+ Handle<Object> next_key(keys->get(j), isolate);
+ // 4c i. Let desc be ? from.[[GetOwnProperty]](nextKey).
+ PropertyDescriptor desc;
+ Maybe<bool> found =
+ JSReceiver::GetOwnPropertyDescriptor(isolate, from, next_key, &desc);
+ if (found.IsNothing()) return isolate->heap()->exception();
+ // 4c ii. If desc is not undefined and desc.[[Enumerable]] is true, then
+ if (found.FromJust() && desc.enumerable()) {
+ // 4c ii 1. Let propValue be ? Get(from, nextKey).
+ Handle<Object> prop_value;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, prop_value,
+ Runtime::GetObjectProperty(isolate, from, next_key));
+ // 4c ii 2. Let status be ? Set(to, nextKey, propValue, true).
+ Handle<Object> status;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, status, Runtime::SetObjectProperty(isolate, to, next_key,
+ prop_value, STRICT));
+ }
+ }
+ }
+ // 5. Return to.
+ return *to;
+}
+
+// ES6 section 19.1.3.4 Object.prototype.propertyIsEnumerable ( V )
+BUILTIN(ObjectPrototypePropertyIsEnumerable) {
+ HandleScope scope(isolate);
+ Handle<JSReceiver> object;
+ Handle<Name> name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, name, Object::ToName(isolate, args.atOrUndefined(isolate, 1)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, object, JSReceiver::ToObject(isolate, args.receiver()));
+ Maybe<PropertyAttributes> maybe =
+ JSReceiver::GetOwnPropertyAttributes(object, name);
+ if (!maybe.IsJust()) return isolate->heap()->exception();
+ if (maybe.FromJust() == ABSENT) return isolate->heap()->false_value();
+ return isolate->heap()->ToBoolean((maybe.FromJust() & DONT_ENUM) == 0);
+}
+
+namespace { // anonymous namespace for ObjectProtoToString()
+
+void IsString(CodeStubAssembler* assembler, compiler::Node* object,
+ CodeStubAssembler::Label* if_string,
+ CodeStubAssembler::Label* if_notstring) {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+
+ Label if_notsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(object), if_notstring, &if_notsmi);
+
+ assembler->Bind(&if_notsmi);
+ {
+ Node* instance_type = assembler->LoadInstanceType(object);
+
+ assembler->Branch(
+ assembler->Int32LessThan(
+ instance_type, assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+ if_string, if_notstring);
+ }
+}
+
+void ReturnToStringFormat(CodeStubAssembler* assembler, compiler::Node* context,
+ compiler::Node* string) {
+ typedef compiler::Node Node;
+
+ Node* lhs = assembler->HeapConstant(
+ assembler->factory()->NewStringFromStaticChars("[object "));
+ Node* rhs = assembler->HeapConstant(
+ assembler->factory()->NewStringFromStaticChars("]"));
+
+ Callable callable = CodeFactory::StringAdd(
+ assembler->isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
+
+ assembler->Return(assembler->CallStub(
+ callable, context, assembler->CallStub(callable, context, lhs, string),
+ rhs));
+}
+
+void ReturnIfPrimitive(CodeStubAssembler* assembler,
+ compiler::Node* instance_type,
+ CodeStubAssembler::Label* return_string,
+ CodeStubAssembler::Label* return_boolean,
+ CodeStubAssembler::Label* return_number) {
+ assembler->GotoIf(
+ assembler->Int32LessThan(instance_type,
+ assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+ return_string);
+
+ assembler->GotoIf(assembler->Word32Equal(
+ instance_type, assembler->Int32Constant(ODDBALL_TYPE)),
+ return_boolean);
+
+ assembler->GotoIf(
+ assembler->Word32Equal(instance_type,
+ assembler->Int32Constant(HEAP_NUMBER_TYPE)),
+ return_number);
+}
+
+} // namespace
+
+// ES6 section 19.1.3.6 Object.prototype.toString
+void Builtins::Generate_ObjectProtoToString(CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Label return_undefined(assembler, Label::kDeferred),
+ return_null(assembler, Label::kDeferred),
+ return_arguments(assembler, Label::kDeferred), return_array(assembler),
+ return_api(assembler, Label::kDeferred), return_object(assembler),
+ return_regexp(assembler), return_function(assembler),
+ return_error(assembler), return_date(assembler), return_string(assembler),
+ return_boolean(assembler), return_jsvalue(assembler),
+ return_jsproxy(assembler, Label::kDeferred), return_number(assembler);
+
+ Label if_isproxy(assembler, Label::kDeferred);
+
+ Label checkstringtag(assembler);
+ Label if_tostringtag(assembler), if_notostringtag(assembler);
+
+ Node* receiver = assembler->Parameter(0);
+ Node* context = assembler->Parameter(3);
+
+ assembler->GotoIf(
+ assembler->Word32Equal(receiver, assembler->UndefinedConstant()),
+ &return_undefined);
+
+ assembler->GotoIf(assembler->Word32Equal(receiver, assembler->NullConstant()),
+ &return_null);
+
+ assembler->GotoIf(assembler->WordIsSmi(receiver), &return_number);
+
+ Node* receiver_instance_type = assembler->LoadInstanceType(receiver);
+ ReturnIfPrimitive(assembler, receiver_instance_type, &return_string,
+ &return_boolean, &return_number);
+
+ // for proxies, check IsArray before getting @@toStringTag
+ Variable var_proxy_is_array(assembler, MachineRepresentation::kTagged);
+ var_proxy_is_array.Bind(assembler->BooleanConstant(false));
+
+ assembler->Branch(
+ assembler->Word32Equal(receiver_instance_type,
+ assembler->Int32Constant(JS_PROXY_TYPE)),
+ &if_isproxy, &checkstringtag);
+
+ assembler->Bind(&if_isproxy);
+ {
+ // This can throw
+ var_proxy_is_array.Bind(
+ assembler->CallRuntime(Runtime::kArrayIsArray, context, receiver));
+ assembler->Goto(&checkstringtag);
+ }
+
+ assembler->Bind(&checkstringtag);
+ {
+ Node* to_string_tag_symbol = assembler->HeapConstant(
+ assembler->isolate()->factory()->to_string_tag_symbol());
+
+ GetPropertyStub stub(assembler->isolate());
+ Callable get_property =
+ Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ Node* to_string_tag_value = assembler->CallStub(
+ get_property, context, receiver, to_string_tag_symbol);
+
+ IsString(assembler, to_string_tag_value, &if_tostringtag,
+ &if_notostringtag);
+
+ assembler->Bind(&if_tostringtag);
+ ReturnToStringFormat(assembler, context, to_string_tag_value);
+ }
+ assembler->Bind(&if_notostringtag);
+ {
+ size_t const kNumCases = 11;
+ Label* case_labels[kNumCases];
+ int32_t case_values[kNumCases];
+ case_labels[0] = &return_api;
+ case_values[0] = JS_API_OBJECT_TYPE;
+ case_labels[1] = &return_api;
+ case_values[1] = JS_SPECIAL_API_OBJECT_TYPE;
+ case_labels[2] = &return_arguments;
+ case_values[2] = JS_ARGUMENTS_TYPE;
+ case_labels[3] = &return_array;
+ case_values[3] = JS_ARRAY_TYPE;
+ case_labels[4] = &return_function;
+ case_values[4] = JS_BOUND_FUNCTION_TYPE;
+ case_labels[5] = &return_function;
+ case_values[5] = JS_FUNCTION_TYPE;
+ case_labels[6] = &return_error;
+ case_values[6] = JS_ERROR_TYPE;
+ case_labels[7] = &return_date;
+ case_values[7] = JS_DATE_TYPE;
+ case_labels[8] = &return_regexp;
+ case_values[8] = JS_REGEXP_TYPE;
+ case_labels[9] = &return_jsvalue;
+ case_values[9] = JS_VALUE_TYPE;
+ case_labels[10] = &return_jsproxy;
+ case_values[10] = JS_PROXY_TYPE;
+
+ assembler->Switch(receiver_instance_type, &return_object, case_values,
+ case_labels, arraysize(case_values));
+
+ assembler->Bind(&return_undefined);
+ assembler->Return(assembler->HeapConstant(
+ assembler->isolate()->factory()->undefined_to_string()));
+
+ assembler->Bind(&return_null);
+ assembler->Return(assembler->HeapConstant(
+ assembler->isolate()->factory()->null_to_string()));
+
+ assembler->Bind(&return_number);
+ assembler->Return(assembler->HeapConstant(
+ assembler->isolate()->factory()->number_to_string()));
+
+ assembler->Bind(&return_string);
+ assembler->Return(assembler->HeapConstant(
+ assembler->isolate()->factory()->string_to_string()));
+
+ assembler->Bind(&return_boolean);
+ assembler->Return(assembler->HeapConstant(
+ assembler->isolate()->factory()->boolean_to_string()));
+
+ assembler->Bind(&return_arguments);
+ assembler->Return(assembler->HeapConstant(
+ assembler->isolate()->factory()->arguments_to_string()));
+
+ assembler->Bind(&return_array);
+ assembler->Return(assembler->HeapConstant(
+ assembler->isolate()->factory()->array_to_string()));
+
+ assembler->Bind(&return_function);
+ assembler->Return(assembler->HeapConstant(
+ assembler->isolate()->factory()->function_to_string()));
+
+ assembler->Bind(&return_error);
+ assembler->Return(assembler->HeapConstant(
+ assembler->isolate()->factory()->error_to_string()));
+
+ assembler->Bind(&return_date);
+ assembler->Return(assembler->HeapConstant(
+ assembler->isolate()->factory()->date_to_string()));
+
+ assembler->Bind(&return_regexp);
+ assembler->Return(assembler->HeapConstant(
+ assembler->isolate()->factory()->regexp_to_string()));
+
+ assembler->Bind(&return_api);
+ {
+ Node* class_name =
+ assembler->CallRuntime(Runtime::kClassOf, context, receiver);
+ ReturnToStringFormat(assembler, context, class_name);
+ }
+
+ assembler->Bind(&return_jsvalue);
+ {
+ Node* value = assembler->LoadJSValueValue(receiver);
+ assembler->GotoIf(assembler->WordIsSmi(value), &return_number);
+
+ ReturnIfPrimitive(assembler, assembler->LoadInstanceType(value),
+ &return_string, &return_boolean, &return_number);
+ assembler->Goto(&return_object);
+ }
+
+ assembler->Bind(&return_jsproxy);
+ {
+ assembler->GotoIf(assembler->WordEqual(var_proxy_is_array.value(),
+ assembler->BooleanConstant(true)),
+ &return_array);
+
+ Node* map = assembler->LoadMap(receiver);
+
+ // Return object if the proxy {receiver} is not callable.
+ assembler->Branch(
+ assembler->Word32Equal(
+ assembler->Word32And(
+ assembler->LoadMapBitField(map),
+ assembler->Int32Constant(1 << Map::kIsCallable)),
+ assembler->Int32Constant(0)),
+ &return_object, &return_function);
+ }
+
+ // Default
+ assembler->Bind(&return_object);
+ assembler->Return(assembler->HeapConstant(
+ assembler->isolate()->factory()->object_to_string()));
+ }
+}
+
+// ES6 section 19.1.2.2 Object.create ( O [ , Properties ] )
+// TODO(verwaest): Support the common cases with precached map directly in
+// an Object.create stub.
+BUILTIN(ObjectCreate) {
+ HandleScope scope(isolate);
+ Handle<Object> prototype = args.atOrUndefined(isolate, 1);
+ if (!prototype->IsNull(isolate) && !prototype->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kProtoObjectOrNull, prototype));
+ }
+
+ // Generate the map with the specified {prototype} based on the Object
+ // function's initial map from the current native context.
+ // TODO(bmeurer): Use a dedicated cache for Object.create; think about
+ // slack tracking for Object.create.
+ Handle<Map> map(isolate->native_context()->object_function()->initial_map(),
+ isolate);
+ if (map->prototype() != *prototype) {
+ if (prototype->IsNull(isolate)) {
+ map = isolate->object_with_null_prototype_map();
+ } else if (prototype->IsJSObject()) {
+ Handle<JSObject> js_prototype = Handle<JSObject>::cast(prototype);
+ if (!js_prototype->map()->is_prototype_map()) {
+ JSObject::OptimizeAsPrototype(js_prototype, FAST_PROTOTYPE);
+ }
+ Handle<PrototypeInfo> info =
+ Map::GetOrCreatePrototypeInfo(js_prototype, isolate);
+ // TODO(verwaest): Use inobject slack tracking for this map.
+ if (info->HasObjectCreateMap()) {
+ map = handle(info->ObjectCreateMap(), isolate);
+ } else {
+ map = Map::CopyInitialMap(map);
+ Map::SetPrototype(map, prototype, FAST_PROTOTYPE);
+ PrototypeInfo::SetObjectCreateMap(info, map);
+ }
+ } else {
+ map = Map::TransitionToPrototype(map, prototype, REGULAR_PROTOTYPE);
+ }
+ }
+
+ // Actually allocate the object.
+ Handle<JSObject> object = isolate->factory()->NewJSObjectFromMap(map);
+
+ // Define the properties if properties was specified and is not undefined.
+ Handle<Object> properties = args.atOrUndefined(isolate, 2);
+ if (!properties->IsUndefined(isolate)) {
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate, JSReceiver::DefineProperties(isolate, object, properties));
+ }
+
+ return *object;
+}
+
+// ES6 section 19.1.2.3 Object.defineProperties
+BUILTIN(ObjectDefineProperties) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ Handle<Object> target = args.at<Object>(1);
+ Handle<Object> properties = args.at<Object>(2);
+
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSReceiver::DefineProperties(isolate, target, properties));
+}
+
+// ES6 section 19.1.2.4 Object.defineProperty
+BUILTIN(ObjectDefineProperty) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(4, args.length());
+ Handle<Object> target = args.at<Object>(1);
+ Handle<Object> key = args.at<Object>(2);
+ Handle<Object> attributes = args.at<Object>(3);
+
+ return JSReceiver::DefineProperty(isolate, target, key, attributes);
+}
+
+namespace {
+
+template <AccessorComponent which_accessor>
+Object* ObjectDefineAccessor(Isolate* isolate, Handle<Object> object,
+ Handle<Object> name, Handle<Object> accessor) {
+ // 1. Let O be ? ToObject(this value).
+ Handle<JSReceiver> receiver;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+ Object::ConvertReceiver(isolate, object));
+ // 2. If IsCallable(getter) is false, throw a TypeError exception.
+ if (!accessor->IsCallable()) {
+ MessageTemplate::Template message =
+ which_accessor == ACCESSOR_GETTER
+ ? MessageTemplate::kObjectGetterExpectingFunction
+ : MessageTemplate::kObjectSetterExpectingFunction;
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewTypeError(message));
+ }
+ // 3. Let desc be PropertyDescriptor{[[Get]]: getter, [[Enumerable]]: true,
+ // [[Configurable]]: true}.
+ PropertyDescriptor desc;
+ if (which_accessor == ACCESSOR_GETTER) {
+ desc.set_get(accessor);
+ } else {
+ DCHECK(which_accessor == ACCESSOR_SETTER);
+ desc.set_set(accessor);
+ }
+ desc.set_enumerable(true);
+ desc.set_configurable(true);
+ // 4. Let key be ? ToPropertyKey(P).
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
+ Object::ToPropertyKey(isolate, name));
+ // 5. Perform ? DefinePropertyOrThrow(O, key, desc).
+ // To preserve legacy behavior, we ignore errors silently rather than
+ // throwing an exception.
+ Maybe<bool> success = JSReceiver::DefineOwnProperty(
+ isolate, receiver, name, &desc, Object::DONT_THROW);
+ MAYBE_RETURN(success, isolate->heap()->exception());
+ if (!success.FromJust()) {
+ isolate->CountUsage(v8::Isolate::kDefineGetterOrSetterWouldThrow);
+ }
+ // 6. Return undefined.
+ return isolate->heap()->undefined_value();
+}
+
+Object* ObjectLookupAccessor(Isolate* isolate, Handle<Object> object,
+ Handle<Object> key, AccessorComponent component) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, object,
+ Object::ConvertReceiver(isolate, object));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, key,
+ Object::ToPropertyKey(isolate, key));
+ bool success = false;
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, object, key, &success,
+ LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+ DCHECK(success);
+
+ for (; it.IsFound(); it.Next()) {
+ switch (it.state()) {
+ case LookupIterator::INTERCEPTOR:
+ case LookupIterator::NOT_FOUND:
+ case LookupIterator::TRANSITION:
+ UNREACHABLE();
+
+ case LookupIterator::ACCESS_CHECK:
+ if (it.HasAccess()) continue;
+ isolate->ReportFailedAccessCheck(it.GetHolder<JSObject>());
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+ return isolate->heap()->undefined_value();
+
+ case LookupIterator::JSPROXY:
+ return isolate->heap()->undefined_value();
+
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ return isolate->heap()->undefined_value();
+ case LookupIterator::DATA:
+ continue;
+ case LookupIterator::ACCESSOR: {
+ Handle<Object> maybe_pair = it.GetAccessors();
+ if (maybe_pair->IsAccessorPair()) {
+ return *AccessorPair::GetComponent(
+ Handle<AccessorPair>::cast(maybe_pair), component);
+ }
+ }
+ }
+ }
+
+ return isolate->heap()->undefined_value();
+}
+
+} // namespace
+
+// ES6 B.2.2.2 a.k.a.
+// https://tc39.github.io/ecma262/#sec-object.prototype.__defineGetter__
+BUILTIN(ObjectDefineGetter) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.at<Object>(0); // Receiver.
+ Handle<Object> name = args.at<Object>(1);
+ Handle<Object> getter = args.at<Object>(2);
+ return ObjectDefineAccessor<ACCESSOR_GETTER>(isolate, object, name, getter);
+}
+
+// ES6 B.2.2.3 a.k.a.
+// https://tc39.github.io/ecma262/#sec-object.prototype.__defineSetter__
+BUILTIN(ObjectDefineSetter) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.at<Object>(0); // Receiver.
+ Handle<Object> name = args.at<Object>(1);
+ Handle<Object> setter = args.at<Object>(2);
+ return ObjectDefineAccessor<ACCESSOR_SETTER>(isolate, object, name, setter);
+}
+
+// ES6 B.2.2.4 a.k.a.
+// https://tc39.github.io/ecma262/#sec-object.prototype.__lookupGetter__
+BUILTIN(ObjectLookupGetter) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.at<Object>(0);
+ Handle<Object> name = args.at<Object>(1);
+ return ObjectLookupAccessor(isolate, object, name, ACCESSOR_GETTER);
+}
+
+// ES6 B.2.2.5 a.k.a.
+// https://tc39.github.io/ecma262/#sec-object.prototype.__lookupSetter__
+BUILTIN(ObjectLookupSetter) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.at<Object>(0);
+ Handle<Object> name = args.at<Object>(1);
+ return ObjectLookupAccessor(isolate, object, name, ACCESSOR_SETTER);
+}
+
+// ES6 section 19.1.2.5 Object.freeze ( O )
+BUILTIN(ObjectFreeze) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ if (object->IsJSReceiver()) {
+ MAYBE_RETURN(JSReceiver::SetIntegrityLevel(Handle<JSReceiver>::cast(object),
+ FROZEN, Object::THROW_ON_ERROR),
+ isolate->heap()->exception());
+ }
+ return *object;
+}
+
+// ES section 19.1.2.9 Object.getPrototypeOf ( O )
+BUILTIN(ObjectGetPrototypeOf) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+
+ Handle<JSReceiver> receiver;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+ Object::ToObject(isolate, object));
+
+ RETURN_RESULT_OR_FAILURE(isolate,
+ JSReceiver::GetPrototype(isolate, receiver));
+}
+
+// ES6 section 19.1.2.6 Object.getOwnPropertyDescriptor ( O, P )
+BUILTIN(ObjectGetOwnPropertyDescriptor) {
+ HandleScope scope(isolate);
+ // 1. Let obj be ? ToObject(O).
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ Handle<JSReceiver> receiver;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+ Object::ToObject(isolate, object));
+ // 2. Let key be ? ToPropertyKey(P).
+ Handle<Object> property = args.atOrUndefined(isolate, 2);
+ Handle<Name> key;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, key,
+ Object::ToName(isolate, property));
+ // 3. Let desc be ? obj.[[GetOwnProperty]](key).
+ PropertyDescriptor desc;
+ Maybe<bool> found =
+ JSReceiver::GetOwnPropertyDescriptor(isolate, receiver, key, &desc);
+ MAYBE_RETURN(found, isolate->heap()->exception());
+ // 4. Return FromPropertyDescriptor(desc).
+ if (!found.FromJust()) return isolate->heap()->undefined_value();
+ return *desc.ToObject(isolate);
+}
+
+namespace {
+
+Object* GetOwnPropertyKeys(Isolate* isolate, BuiltinArguments args,
+ PropertyFilter filter) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ Handle<JSReceiver> receiver;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+ Object::ToObject(isolate, object));
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, keys,
+ KeyAccumulator::GetKeys(receiver, KeyCollectionMode::kOwnOnly, filter,
+ GetKeysConversion::kConvertToString));
+ return *isolate->factory()->NewJSArrayWithElements(keys);
+}
+
+} // namespace
+
+// ES6 section 19.1.2.7 Object.getOwnPropertyNames ( O )
+BUILTIN(ObjectGetOwnPropertyNames) {
+ return GetOwnPropertyKeys(isolate, args, SKIP_SYMBOLS);
+}
+
+// ES6 section 19.1.2.8 Object.getOwnPropertySymbols ( O )
+BUILTIN(ObjectGetOwnPropertySymbols) {
+ return GetOwnPropertyKeys(isolate, args, SKIP_STRINGS);
+}
+
+// ES#sec-object.is Object.is ( value1, value2 )
+BUILTIN(ObjectIs) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(3, args.length());
+ Handle<Object> value1 = args.at<Object>(1);
+ Handle<Object> value2 = args.at<Object>(2);
+ return isolate->heap()->ToBoolean(value1->SameValue(*value2));
+}
+
+// ES6 section 19.1.2.11 Object.isExtensible ( O )
+BUILTIN(ObjectIsExtensible) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ Maybe<bool> result =
+ object->IsJSReceiver()
+ ? JSReceiver::IsExtensible(Handle<JSReceiver>::cast(object))
+ : Just(false);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return isolate->heap()->ToBoolean(result.FromJust());
+}
+
+// ES6 section 19.1.2.12 Object.isFrozen ( O )
+BUILTIN(ObjectIsFrozen) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ Maybe<bool> result = object->IsJSReceiver()
+ ? JSReceiver::TestIntegrityLevel(
+ Handle<JSReceiver>::cast(object), FROZEN)
+ : Just(true);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return isolate->heap()->ToBoolean(result.FromJust());
+}
+
+// ES6 section 19.1.2.13 Object.isSealed ( O )
+BUILTIN(ObjectIsSealed) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ Maybe<bool> result = object->IsJSReceiver()
+ ? JSReceiver::TestIntegrityLevel(
+ Handle<JSReceiver>::cast(object), SEALED)
+ : Just(true);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return isolate->heap()->ToBoolean(result.FromJust());
+}
+
+// ES6 section 19.1.2.14 Object.keys ( O )
+BUILTIN(ObjectKeys) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ Handle<JSReceiver> receiver;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+ Object::ToObject(isolate, object));
+
+ Handle<FixedArray> keys;
+ int enum_length = receiver->map()->EnumLength();
+ if (enum_length != kInvalidEnumCacheSentinel &&
+ JSObject::cast(*receiver)->elements() ==
+ isolate->heap()->empty_fixed_array()) {
+ DCHECK(receiver->IsJSObject());
+ DCHECK(!JSObject::cast(*receiver)->HasNamedInterceptor());
+ DCHECK(!JSObject::cast(*receiver)->IsAccessCheckNeeded());
+ DCHECK(!receiver->map()->has_hidden_prototype());
+ DCHECK(JSObject::cast(*receiver)->HasFastProperties());
+ if (enum_length == 0) {
+ keys = isolate->factory()->empty_fixed_array();
+ } else {
+ Handle<FixedArray> cache(
+ receiver->map()->instance_descriptors()->GetEnumCache());
+ keys = isolate->factory()->CopyFixedArrayUpTo(cache, enum_length);
+ }
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, keys,
+ KeyAccumulator::GetKeys(receiver, KeyCollectionMode::kOwnOnly,
+ ENUMERABLE_STRINGS,
+ GetKeysConversion::kConvertToString));
+ }
+ return *isolate->factory()->NewJSArrayWithElements(keys, FAST_ELEMENTS);
+}
+
+BUILTIN(ObjectValues) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ Handle<JSReceiver> receiver;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+ Object::ToObject(isolate, object));
+ Handle<FixedArray> values;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, values, JSReceiver::GetOwnValues(receiver, ENUMERABLE_STRINGS));
+ return *isolate->factory()->NewJSArrayWithElements(values);
+}
+
+BUILTIN(ObjectEntries) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ Handle<JSReceiver> receiver;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+ Object::ToObject(isolate, object));
+ Handle<FixedArray> entries;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, entries,
+ JSReceiver::GetOwnEntries(receiver, ENUMERABLE_STRINGS));
+ return *isolate->factory()->NewJSArrayWithElements(entries);
+}
+
+BUILTIN(ObjectGetOwnPropertyDescriptors) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+
+ Handle<JSReceiver> receiver;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+ Object::ToObject(isolate, object));
+
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, keys, KeyAccumulator::GetKeys(
+ receiver, KeyCollectionMode::kOwnOnly, ALL_PROPERTIES,
+ GetKeysConversion::kConvertToString));
+
+ Handle<JSObject> descriptors =
+ isolate->factory()->NewJSObject(isolate->object_function());
+
+ for (int i = 0; i < keys->length(); ++i) {
+ Handle<Name> key = Handle<Name>::cast(FixedArray::get(*keys, i, isolate));
+ PropertyDescriptor descriptor;
+ Maybe<bool> did_get_descriptor = JSReceiver::GetOwnPropertyDescriptor(
+ isolate, receiver, key, &descriptor);
+ MAYBE_RETURN(did_get_descriptor, isolate->heap()->exception());
+
+ if (!did_get_descriptor.FromJust()) continue;
+ Handle<Object> from_descriptor = descriptor.ToObject(isolate);
+
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, descriptors, key, descriptors, LookupIterator::OWN);
+ Maybe<bool> success = JSReceiver::CreateDataProperty(&it, from_descriptor,
+ Object::DONT_THROW);
+ CHECK(success.FromJust());
+ }
+
+ return *descriptors;
+}
+
+// ES6 section 19.1.2.15 Object.preventExtensions ( O )
+BUILTIN(ObjectPreventExtensions) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ if (object->IsJSReceiver()) {
+ MAYBE_RETURN(JSReceiver::PreventExtensions(Handle<JSReceiver>::cast(object),
+ Object::THROW_ON_ERROR),
+ isolate->heap()->exception());
+ }
+ return *object;
+}
+
+// ES6 section 19.1.2.17 Object.seal ( O )
+BUILTIN(ObjectSeal) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ if (object->IsJSReceiver()) {
+ MAYBE_RETURN(JSReceiver::SetIntegrityLevel(Handle<JSReceiver>::cast(object),
+ SEALED, Object::THROW_ON_ERROR),
+ isolate->heap()->exception());
+ }
+ return *object;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-proxy.cc b/deps/v8/src/builtins/builtins-proxy.cc
new file mode 100644
index 0000000000..05ba3041a7
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-proxy.cc
@@ -0,0 +1,30 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+namespace v8 {
+namespace internal {
+
+// ES6 section 26.2.1.1 Proxy ( target, handler ) for the [[Call]] case.
+BUILTIN(ProxyConstructor) {
+ HandleScope scope(isolate);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kConstructorNotFunction,
+ isolate->factory()->NewStringFromAsciiChecked("Proxy")));
+}
+
+// ES6 section 26.2.1.1 Proxy ( target, handler ) for the [[Construct]] case.
+BUILTIN(ProxyConstructor_ConstructStub) {
+ HandleScope scope(isolate);
+ DCHECK(isolate->proxy_function()->IsConstructor());
+ Handle<Object> target = args.atOrUndefined(isolate, 1);
+ Handle<Object> handler = args.atOrUndefined(isolate, 2);
+ RETURN_RESULT_OR_FAILURE(isolate, JSProxy::New(isolate, target, handler));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-reflect.cc b/deps/v8/src/builtins/builtins-reflect.cc
new file mode 100644
index 0000000000..b4d16c4a7b
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-reflect.cc
@@ -0,0 +1,274 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+#include "src/property-descriptor.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// ES6 section 26.1 The Reflect Object
+
+// ES6 section 26.1.3 Reflect.defineProperty
+BUILTIN(ReflectDefineProperty) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(4, args.length());
+ Handle<Object> target = args.at<Object>(1);
+ Handle<Object> key = args.at<Object>(2);
+ Handle<Object> attributes = args.at<Object>(3);
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.defineProperty")));
+ }
+
+ Handle<Name> name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
+ Object::ToName(isolate, key));
+
+ PropertyDescriptor desc;
+ if (!PropertyDescriptor::ToPropertyDescriptor(isolate, attributes, &desc)) {
+ return isolate->heap()->exception();
+ }
+
+ Maybe<bool> result =
+ JSReceiver::DefineOwnProperty(isolate, Handle<JSReceiver>::cast(target),
+ name, &desc, Object::DONT_THROW);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return *isolate->factory()->ToBoolean(result.FromJust());
+}
+
+// ES6 section 26.1.4 Reflect.deleteProperty
+BUILTIN(ReflectDeleteProperty) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ Handle<Object> target = args.at<Object>(1);
+ Handle<Object> key = args.at<Object>(2);
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.deleteProperty")));
+ }
+
+ Handle<Name> name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
+ Object::ToName(isolate, key));
+
+ Maybe<bool> result = JSReceiver::DeletePropertyOrElement(
+ Handle<JSReceiver>::cast(target), name, SLOPPY);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return *isolate->factory()->ToBoolean(result.FromJust());
+}
+
+// ES6 section 26.1.6 Reflect.get
+BUILTIN(ReflectGet) {
+ HandleScope scope(isolate);
+ Handle<Object> target = args.atOrUndefined(isolate, 1);
+ Handle<Object> key = args.atOrUndefined(isolate, 2);
+ Handle<Object> receiver = args.length() > 3 ? args.at<Object>(3) : target;
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.get")));
+ }
+
+ Handle<Name> name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
+ Object::ToName(isolate, key));
+
+ RETURN_RESULT_OR_FAILURE(
+ isolate, Object::GetPropertyOrElement(receiver, name,
+ Handle<JSReceiver>::cast(target)));
+}
+
+// ES6 section 26.1.7 Reflect.getOwnPropertyDescriptor
+BUILTIN(ReflectGetOwnPropertyDescriptor) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ Handle<Object> target = args.at<Object>(1);
+ Handle<Object> key = args.at<Object>(2);
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.getOwnPropertyDescriptor")));
+ }
+
+ Handle<Name> name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
+ Object::ToName(isolate, key));
+
+ PropertyDescriptor desc;
+ Maybe<bool> found = JSReceiver::GetOwnPropertyDescriptor(
+ isolate, Handle<JSReceiver>::cast(target), name, &desc);
+ MAYBE_RETURN(found, isolate->heap()->exception());
+ if (!found.FromJust()) return isolate->heap()->undefined_value();
+ return *desc.ToObject(isolate);
+}
+
+// ES6 section 26.1.8 Reflect.getPrototypeOf
+BUILTIN(ReflectGetPrototypeOf) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ Handle<Object> target = args.at<Object>(1);
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.getPrototypeOf")));
+ }
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(target);
+ RETURN_RESULT_OR_FAILURE(isolate,
+ JSReceiver::GetPrototype(isolate, receiver));
+}
+
+// ES6 section 26.1.9 Reflect.has
+BUILTIN(ReflectHas) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ Handle<Object> target = args.at<Object>(1);
+ Handle<Object> key = args.at<Object>(2);
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.has")));
+ }
+
+ Handle<Name> name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
+ Object::ToName(isolate, key));
+
+ Maybe<bool> result =
+ JSReceiver::HasProperty(Handle<JSReceiver>::cast(target), name);
+ return result.IsJust() ? *isolate->factory()->ToBoolean(result.FromJust())
+ : isolate->heap()->exception();
+}
+
+// ES6 section 26.1.10 Reflect.isExtensible
+BUILTIN(ReflectIsExtensible) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ Handle<Object> target = args.at<Object>(1);
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.isExtensible")));
+ }
+
+ Maybe<bool> result =
+ JSReceiver::IsExtensible(Handle<JSReceiver>::cast(target));
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return *isolate->factory()->ToBoolean(result.FromJust());
+}
+
+// ES6 section 26.1.11 Reflect.ownKeys
+BUILTIN(ReflectOwnKeys) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ Handle<Object> target = args.at<Object>(1);
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.ownKeys")));
+ }
+
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, keys,
+ KeyAccumulator::GetKeys(Handle<JSReceiver>::cast(target),
+ KeyCollectionMode::kOwnOnly, ALL_PROPERTIES,
+ GetKeysConversion::kConvertToString));
+ return *isolate->factory()->NewJSArrayWithElements(keys);
+}
+
+// ES6 section 26.1.12 Reflect.preventExtensions
+BUILTIN(ReflectPreventExtensions) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ Handle<Object> target = args.at<Object>(1);
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.preventExtensions")));
+ }
+
+ Maybe<bool> result = JSReceiver::PreventExtensions(
+ Handle<JSReceiver>::cast(target), Object::DONT_THROW);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return *isolate->factory()->ToBoolean(result.FromJust());
+}
+
+// ES6 section 26.1.13 Reflect.set
+BUILTIN(ReflectSet) {
+ HandleScope scope(isolate);
+ Handle<Object> target = args.atOrUndefined(isolate, 1);
+ Handle<Object> key = args.atOrUndefined(isolate, 2);
+ Handle<Object> value = args.atOrUndefined(isolate, 3);
+ Handle<Object> receiver = args.length() > 4 ? args.at<Object>(4) : target;
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.set")));
+ }
+
+ Handle<Name> name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
+ Object::ToName(isolate, key));
+
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, receiver, name, Handle<JSReceiver>::cast(target));
+ Maybe<bool> result = Object::SetSuperProperty(
+ &it, value, SLOPPY, Object::MAY_BE_STORE_FROM_KEYED);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return *isolate->factory()->ToBoolean(result.FromJust());
+}
+
+// ES6 section 26.1.14 Reflect.setPrototypeOf
+BUILTIN(ReflectSetPrototypeOf) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ Handle<Object> target = args.at<Object>(1);
+ Handle<Object> proto = args.at<Object>(2);
+
+ if (!target->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNonObject,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Reflect.setPrototypeOf")));
+ }
+
+ if (!proto->IsJSReceiver() && !proto->IsNull(isolate)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kProtoObjectOrNull, proto));
+ }
+
+ Maybe<bool> result = JSReceiver::SetPrototype(
+ Handle<JSReceiver>::cast(target), proto, true, Object::DONT_THROW);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return *isolate->factory()->ToBoolean(result.FromJust());
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
new file mode 100644
index 0000000000..23d4f43af2
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
@@ -0,0 +1,266 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+#include "src/code-factory.h"
+
+namespace v8 {
+namespace internal {
+
+// ES7 sharedmem 6.3.4.1 get SharedArrayBuffer.prototype.byteLength
+BUILTIN(SharedArrayBufferPrototypeGetByteLength) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSArrayBuffer, array_buffer,
+ "get SharedArrayBuffer.prototype.byteLength");
+ if (!array_buffer->is_shared()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "get SharedArrayBuffer.prototype.byteLength"),
+ args.receiver()));
+ }
+ return array_buffer->byte_length();
+}
+
+namespace {
+
+void ValidateSharedTypedArray(CodeStubAssembler* a, compiler::Node* tagged,
+ compiler::Node* context,
+ compiler::Node** out_instance_type,
+ compiler::Node** out_backing_store) {
+ using namespace compiler;
+ CodeStubAssembler::Label is_smi(a), not_smi(a), is_typed_array(a),
+ not_typed_array(a), is_shared(a), not_shared(a), is_float_or_clamped(a),
+ not_float_or_clamped(a), invalid(a);
+
+ // Fail if it is not a heap object.
+ a->Branch(a->WordIsSmi(tagged), &is_smi, &not_smi);
+ a->Bind(&is_smi);
+ a->Goto(&invalid);
+
+ // Fail if the array's instance type is not JSTypedArray.
+ a->Bind(&not_smi);
+ a->Branch(a->WordEqual(a->LoadInstanceType(tagged),
+ a->Int32Constant(JS_TYPED_ARRAY_TYPE)),
+ &is_typed_array, &not_typed_array);
+ a->Bind(&not_typed_array);
+ a->Goto(&invalid);
+
+ // Fail if the array's JSArrayBuffer is not shared.
+ a->Bind(&is_typed_array);
+ Node* array_buffer = a->LoadObjectField(tagged, JSTypedArray::kBufferOffset);
+ Node* is_buffer_shared = a->BitFieldDecode<JSArrayBuffer::IsShared>(
+ a->LoadObjectField(array_buffer, JSArrayBuffer::kBitFieldSlot));
+ a->Branch(is_buffer_shared, &is_shared, &not_shared);
+ a->Bind(&not_shared);
+ a->Goto(&invalid);
+
+ // Fail if the array's element type is float32, float64 or clamped.
+ a->Bind(&is_shared);
+ Node* elements_instance_type = a->LoadInstanceType(
+ a->LoadObjectField(tagged, JSObject::kElementsOffset));
+ STATIC_ASSERT(FIXED_INT8_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
+ STATIC_ASSERT(FIXED_INT16_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
+ STATIC_ASSERT(FIXED_INT32_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
+ STATIC_ASSERT(FIXED_UINT8_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
+ STATIC_ASSERT(FIXED_UINT16_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
+ STATIC_ASSERT(FIXED_UINT32_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
+ a->Branch(a->Int32LessThan(elements_instance_type,
+ a->Int32Constant(FIXED_FLOAT32_ARRAY_TYPE)),
+ &not_float_or_clamped, &is_float_or_clamped);
+ a->Bind(&is_float_or_clamped);
+ a->Goto(&invalid);
+
+ a->Bind(&invalid);
+ a->CallRuntime(Runtime::kThrowNotIntegerSharedTypedArrayError, context,
+ tagged);
+ a->Return(a->UndefinedConstant());
+
+ a->Bind(&not_float_or_clamped);
+ *out_instance_type = elements_instance_type;
+
+ Node* backing_store =
+ a->LoadObjectField(array_buffer, JSArrayBuffer::kBackingStoreOffset);
+ Node* byte_offset = a->ChangeUint32ToWord(a->TruncateTaggedToWord32(
+ context,
+ a->LoadObjectField(tagged, JSArrayBufferView::kByteOffsetOffset)));
+ *out_backing_store = a->IntPtrAdd(backing_store, byte_offset);
+}
+
+// https://tc39.github.io/ecmascript_sharedmem/shmem.html#Atomics.ValidateAtomicAccess
+compiler::Node* ConvertTaggedAtomicIndexToWord32(CodeStubAssembler* a,
+ compiler::Node* tagged,
+ compiler::Node* context) {
+ using namespace compiler;
+ CodeStubAssembler::Variable var_result(a, MachineRepresentation::kWord32);
+
+ Callable to_number = CodeFactory::ToNumber(a->isolate());
+ Node* number_index = a->CallStub(to_number, context, tagged);
+ CodeStubAssembler::Label done(a, &var_result);
+
+ CodeStubAssembler::Label if_numberissmi(a), if_numberisnotsmi(a);
+ a->Branch(a->WordIsSmi(number_index), &if_numberissmi, &if_numberisnotsmi);
+
+ a->Bind(&if_numberissmi);
+ {
+ var_result.Bind(a->SmiToWord32(number_index));
+ a->Goto(&done);
+ }
+
+ a->Bind(&if_numberisnotsmi);
+ {
+ Node* number_index_value = a->LoadHeapNumberValue(number_index);
+ Node* access_index = a->TruncateFloat64ToWord32(number_index_value);
+ Node* test_index = a->ChangeInt32ToFloat64(access_index);
+
+ CodeStubAssembler::Label if_indexesareequal(a), if_indexesarenotequal(a);
+ a->Branch(a->Float64Equal(number_index_value, test_index),
+ &if_indexesareequal, &if_indexesarenotequal);
+
+ a->Bind(&if_indexesareequal);
+ {
+ var_result.Bind(access_index);
+ a->Goto(&done);
+ }
+
+ a->Bind(&if_indexesarenotequal);
+ a->Return(
+ a->CallRuntime(Runtime::kThrowInvalidAtomicAccessIndexError, context));
+ }
+
+ a->Bind(&done);
+ return var_result.value();
+}
+
+void ValidateAtomicIndex(CodeStubAssembler* a, compiler::Node* index_word,
+ compiler::Node* array_length_word,
+ compiler::Node* context) {
+ using namespace compiler;
+ // Check if the index is in bounds. If not, throw RangeError.
+ CodeStubAssembler::Label if_inbounds(a), if_notinbounds(a);
+ a->Branch(
+ a->WordOr(a->Int32LessThan(index_word, a->Int32Constant(0)),
+ a->Int32GreaterThanOrEqual(index_word, array_length_word)),
+ &if_notinbounds, &if_inbounds);
+ a->Bind(&if_notinbounds);
+ a->Return(
+ a->CallRuntime(Runtime::kThrowInvalidAtomicAccessIndexError, context));
+ a->Bind(&if_inbounds);
+}
+
+} // anonymous namespace
+
+void Builtins::Generate_AtomicsLoad(CodeStubAssembler* a) {
+ using namespace compiler;
+ Node* array = a->Parameter(1);
+ Node* index = a->Parameter(2);
+ Node* context = a->Parameter(3 + 2);
+
+ Node* instance_type;
+ Node* backing_store;
+ ValidateSharedTypedArray(a, array, context, &instance_type, &backing_store);
+
+ Node* index_word32 = ConvertTaggedAtomicIndexToWord32(a, index, context);
+ Node* array_length_word32 = a->TruncateTaggedToWord32(
+ context, a->LoadObjectField(array, JSTypedArray::kLengthOffset));
+ ValidateAtomicIndex(a, index_word32, array_length_word32, context);
+ Node* index_word = a->ChangeUint32ToWord(index_word32);
+
+ CodeStubAssembler::Label i8(a), u8(a), i16(a), u16(a), i32(a), u32(a),
+ other(a);
+ int32_t case_values[] = {
+ FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
+ FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
+ };
+ CodeStubAssembler::Label* case_labels[] = {
+ &i8, &u8, &i16, &u16, &i32, &u32,
+ };
+ a->Switch(instance_type, &other, case_values, case_labels,
+ arraysize(case_labels));
+
+ a->Bind(&i8);
+ a->Return(
+ a->SmiTag(a->AtomicLoad(MachineType::Int8(), backing_store, index_word)));
+
+ a->Bind(&u8);
+ a->Return(a->SmiTag(
+ a->AtomicLoad(MachineType::Uint8(), backing_store, index_word)));
+
+ a->Bind(&i16);
+ a->Return(a->SmiTag(a->AtomicLoad(MachineType::Int16(), backing_store,
+ a->WordShl(index_word, 1))));
+
+ a->Bind(&u16);
+ a->Return(a->SmiTag(a->AtomicLoad(MachineType::Uint16(), backing_store,
+ a->WordShl(index_word, 1))));
+
+ a->Bind(&i32);
+ a->Return(a->ChangeInt32ToTagged(a->AtomicLoad(
+ MachineType::Int32(), backing_store, a->WordShl(index_word, 2))));
+
+ a->Bind(&u32);
+ a->Return(a->ChangeUint32ToTagged(a->AtomicLoad(
+ MachineType::Uint32(), backing_store, a->WordShl(index_word, 2))));
+
+ // This shouldn't happen, we've already validated the type.
+ a->Bind(&other);
+ a->Return(a->Int32Constant(0));
+}
+
+void Builtins::Generate_AtomicsStore(CodeStubAssembler* a) {
+ using namespace compiler;
+ Node* array = a->Parameter(1);
+ Node* index = a->Parameter(2);
+ Node* value = a->Parameter(3);
+ Node* context = a->Parameter(4 + 2);
+
+ Node* instance_type;
+ Node* backing_store;
+ ValidateSharedTypedArray(a, array, context, &instance_type, &backing_store);
+
+ Node* index_word32 = ConvertTaggedAtomicIndexToWord32(a, index, context);
+ Node* array_length_word32 = a->TruncateTaggedToWord32(
+ context, a->LoadObjectField(array, JSTypedArray::kLengthOffset));
+ ValidateAtomicIndex(a, index_word32, array_length_word32, context);
+ Node* index_word = a->ChangeUint32ToWord(index_word32);
+
+ Callable to_integer = CodeFactory::ToInteger(a->isolate());
+ Node* value_integer = a->CallStub(to_integer, context, value);
+ Node* value_word32 = a->TruncateTaggedToWord32(context, value_integer);
+
+ CodeStubAssembler::Label u8(a), u16(a), u32(a), other(a);
+ int32_t case_values[] = {
+ FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
+ FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
+ };
+ CodeStubAssembler::Label* case_labels[] = {
+ &u8, &u8, &u16, &u16, &u32, &u32,
+ };
+ a->Switch(instance_type, &other, case_values, case_labels,
+ arraysize(case_labels));
+
+ a->Bind(&u8);
+ a->AtomicStore(MachineRepresentation::kWord8, backing_store, index_word,
+ value_word32);
+ a->Return(value_integer);
+
+ a->Bind(&u16);
+ a->SmiTag(a->AtomicStore(MachineRepresentation::kWord16, backing_store,
+ a->WordShl(index_word, 1), value_word32));
+ a->Return(value_integer);
+
+ a->Bind(&u32);
+ a->AtomicStore(MachineRepresentation::kWord32, backing_store,
+ a->WordShl(index_word, 2), value_word32);
+ a->Return(value_integer);
+
+ // This shouldn't happen, we've already validated the type.
+ a->Bind(&other);
+ a->Return(a->Int32Constant(0));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-string.cc b/deps/v8/src/builtins/builtins-string.cc
new file mode 100644
index 0000000000..d38f6b069d
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-string.cc
@@ -0,0 +1,526 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+#include "src/code-factory.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// ES6 section 21.1 String Objects
+
+// ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits )
+void Builtins::Generate_StringFromCharCode(CodeStubAssembler* assembler) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Node* code = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+
+ // Check if we have exactly one argument (plus the implicit receiver), i.e.
+ // if the parent frame is not an arguments adaptor frame.
+ Label if_oneargument(assembler), if_notoneargument(assembler);
+ Node* parent_frame_pointer = assembler->LoadParentFramePointer();
+ Node* parent_frame_type =
+ assembler->Load(MachineType::Pointer(), parent_frame_pointer,
+ assembler->IntPtrConstant(
+ CommonFrameConstants::kContextOrFrameTypeOffset));
+ assembler->Branch(
+ assembler->WordEqual(
+ parent_frame_type,
+ assembler->SmiConstant(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))),
+ &if_notoneargument, &if_oneargument);
+
+ assembler->Bind(&if_oneargument);
+ {
+ // Single argument case, perform fast single character string cache lookup
+ // for one-byte code units, or fall back to creating a single character
+ // string on the fly otherwise.
+ Node* code32 = assembler->TruncateTaggedToWord32(context, code);
+ Node* code16 = assembler->Word32And(
+ code32, assembler->Int32Constant(String::kMaxUtf16CodeUnit));
+ Node* result = assembler->StringFromCharCode(code16);
+ assembler->Return(result);
+ }
+
+ assembler->Bind(&if_notoneargument);
+ {
+ // Determine the resulting string length.
+ Node* length = assembler->LoadAndUntagSmi(
+ parent_frame_pointer, ArgumentsAdaptorFrameConstants::kLengthOffset);
+
+ // Assume that the resulting string contains only one-byte characters.
+ Node* result = assembler->AllocateSeqOneByteString(context, length);
+
+ // Truncate all input parameters and append them to the resulting string.
+ Variable var_offset(assembler, MachineType::PointerRepresentation());
+ Label loop(assembler, &var_offset), done_loop(assembler);
+ var_offset.Bind(assembler->IntPtrConstant(0));
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ // Load the current {offset}.
+ Node* offset = var_offset.value();
+
+ // Check if we're done with the string.
+ assembler->GotoIf(assembler->WordEqual(offset, length), &done_loop);
+
+ // Load the next code point and truncate it to a 16-bit value.
+ Node* code = assembler->Load(
+ MachineType::AnyTagged(), parent_frame_pointer,
+ assembler->IntPtrAdd(
+ assembler->WordShl(assembler->IntPtrSub(length, offset),
+ assembler->IntPtrConstant(kPointerSizeLog2)),
+ assembler->IntPtrConstant(
+ CommonFrameConstants::kFixedFrameSizeAboveFp -
+ kPointerSize)));
+ Node* code32 = assembler->TruncateTaggedToWord32(context, code);
+ Node* code16 = assembler->Word32And(
+ code32, assembler->Int32Constant(String::kMaxUtf16CodeUnit));
+
+ // Check if {code16} fits into a one-byte string.
+ Label if_codeisonebyte(assembler), if_codeistwobyte(assembler);
+ assembler->Branch(
+ assembler->Int32LessThanOrEqual(
+ code16, assembler->Int32Constant(String::kMaxOneByteCharCode)),
+ &if_codeisonebyte, &if_codeistwobyte);
+
+ assembler->Bind(&if_codeisonebyte);
+ {
+ // The {code16} fits into the SeqOneByteString {result}.
+ assembler->StoreNoWriteBarrier(
+ MachineRepresentation::kWord8, result,
+ assembler->IntPtrAdd(
+ assembler->IntPtrConstant(SeqOneByteString::kHeaderSize -
+ kHeapObjectTag),
+ offset),
+ code16);
+ var_offset.Bind(
+ assembler->IntPtrAdd(offset, assembler->IntPtrConstant(1)));
+ assembler->Goto(&loop);
+ }
+
+ assembler->Bind(&if_codeistwobyte);
+ {
+ // Allocate a SeqTwoByteString to hold the resulting string.
+ Node* cresult = assembler->AllocateSeqTwoByteString(context, length);
+
+ // Copy all characters that were previously written to the
+ // SeqOneByteString in {result} over to the new {cresult}.
+ Variable var_coffset(assembler, MachineType::PointerRepresentation());
+ Label cloop(assembler, &var_coffset), done_cloop(assembler);
+ var_coffset.Bind(assembler->IntPtrConstant(0));
+ assembler->Goto(&cloop);
+ assembler->Bind(&cloop);
+ {
+ Node* coffset = var_coffset.value();
+ assembler->GotoIf(assembler->WordEqual(coffset, offset), &done_cloop);
+ Node* ccode = assembler->Load(
+ MachineType::Uint8(), result,
+ assembler->IntPtrAdd(
+ assembler->IntPtrConstant(SeqOneByteString::kHeaderSize -
+ kHeapObjectTag),
+ coffset));
+ assembler->StoreNoWriteBarrier(
+ MachineRepresentation::kWord16, cresult,
+ assembler->IntPtrAdd(
+ assembler->IntPtrConstant(SeqTwoByteString::kHeaderSize -
+ kHeapObjectTag),
+ assembler->WordShl(coffset, 1)),
+ ccode);
+ var_coffset.Bind(
+ assembler->IntPtrAdd(coffset, assembler->IntPtrConstant(1)));
+ assembler->Goto(&cloop);
+ }
+
+ // Write the pending {code16} to {offset}.
+ assembler->Bind(&done_cloop);
+ assembler->StoreNoWriteBarrier(
+ MachineRepresentation::kWord16, cresult,
+ assembler->IntPtrAdd(
+ assembler->IntPtrConstant(SeqTwoByteString::kHeaderSize -
+ kHeapObjectTag),
+ assembler->WordShl(offset, 1)),
+ code16);
+
+ // Copy the remaining parameters to the SeqTwoByteString {cresult}.
+ Label floop(assembler, &var_offset), done_floop(assembler);
+ assembler->Goto(&floop);
+ assembler->Bind(&floop);
+ {
+ // Compute the next {offset}.
+ Node* offset = assembler->IntPtrAdd(var_offset.value(),
+ assembler->IntPtrConstant(1));
+
+ // Check if we're done with the string.
+ assembler->GotoIf(assembler->WordEqual(offset, length), &done_floop);
+
+ // Load the next code point and truncate it to a 16-bit value.
+ Node* code = assembler->Load(
+ MachineType::AnyTagged(), parent_frame_pointer,
+ assembler->IntPtrAdd(
+ assembler->WordShl(
+ assembler->IntPtrSub(length, offset),
+ assembler->IntPtrConstant(kPointerSizeLog2)),
+ assembler->IntPtrConstant(
+ CommonFrameConstants::kFixedFrameSizeAboveFp -
+ kPointerSize)));
+ Node* code32 = assembler->TruncateTaggedToWord32(context, code);
+ Node* code16 = assembler->Word32And(
+ code32, assembler->Int32Constant(String::kMaxUtf16CodeUnit));
+
+ // Store the truncated {code} point at the next offset.
+ assembler->StoreNoWriteBarrier(
+ MachineRepresentation::kWord16, cresult,
+ assembler->IntPtrAdd(
+ assembler->IntPtrConstant(SeqTwoByteString::kHeaderSize -
+ kHeapObjectTag),
+ assembler->WordShl(offset, 1)),
+ code16);
+ var_offset.Bind(offset);
+ assembler->Goto(&floop);
+ }
+
+ // Return the SeqTwoByteString.
+ assembler->Bind(&done_floop);
+ assembler->Return(cresult);
+ }
+ }
+
+ assembler->Bind(&done_loop);
+ assembler->Return(result);
+ }
+}
+
+namespace { // for String.fromCodePoint
+
+bool IsValidCodePoint(Isolate* isolate, Handle<Object> value) {
+ if (!value->IsNumber() && !Object::ToNumber(value).ToHandle(&value)) {
+ return false;
+ }
+
+ if (Object::ToInteger(isolate, value).ToHandleChecked()->Number() !=
+ value->Number()) {
+ return false;
+ }
+
+ if (value->Number() < 0 || value->Number() > 0x10FFFF) {
+ return false;
+ }
+
+ return true;
+}
+
+uc32 NextCodePoint(Isolate* isolate, BuiltinArguments args, int index) {
+ Handle<Object> value = args.at<Object>(1 + index);
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, value, Object::ToNumber(value), -1);
+ if (!IsValidCodePoint(isolate, value)) {
+ isolate->Throw(*isolate->factory()->NewRangeError(
+ MessageTemplate::kInvalidCodePoint, value));
+ return -1;
+ }
+ return DoubleToUint32(value->Number());
+}
+
+} // namespace
+
+// ES6 section 21.1.2.2 String.fromCodePoint ( ...codePoints )
+BUILTIN(StringFromCodePoint) {
+ HandleScope scope(isolate);
+ int const length = args.length() - 1;
+ if (length == 0) return isolate->heap()->empty_string();
+ DCHECK_LT(0, length);
+
+ // Optimistically assume that the resulting String contains only one byte
+ // characters.
+ List<uint8_t> one_byte_buffer(length);
+ uc32 code = 0;
+ int index;
+ for (index = 0; index < length; index++) {
+ code = NextCodePoint(isolate, args, index);
+ if (code < 0) {
+ return isolate->heap()->exception();
+ }
+ if (code > String::kMaxOneByteCharCode) {
+ break;
+ }
+ one_byte_buffer.Add(code);
+ }
+
+ if (index == length) {
+ RETURN_RESULT_OR_FAILURE(isolate, isolate->factory()->NewStringFromOneByte(
+ one_byte_buffer.ToConstVector()));
+ }
+
+ List<uc16> two_byte_buffer(length - index);
+
+ while (true) {
+ if (code <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
+ two_byte_buffer.Add(code);
+ } else {
+ two_byte_buffer.Add(unibrow::Utf16::LeadSurrogate(code));
+ two_byte_buffer.Add(unibrow::Utf16::TrailSurrogate(code));
+ }
+
+ if (++index == length) {
+ break;
+ }
+ code = NextCodePoint(isolate, args, index);
+ if (code < 0) {
+ return isolate->heap()->exception();
+ }
+ }
+
+ Handle<SeqTwoByteString> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ isolate->factory()->NewRawTwoByteString(one_byte_buffer.length() +
+ two_byte_buffer.length()));
+
+ CopyChars(result->GetChars(), one_byte_buffer.ToConstVector().start(),
+ one_byte_buffer.length());
+ CopyChars(result->GetChars() + one_byte_buffer.length(),
+ two_byte_buffer.ToConstVector().start(), two_byte_buffer.length());
+
+ return *result;
+}
+
+// ES6 section 21.1.3.1 String.prototype.charAt ( pos )
+void Builtins::Generate_StringPrototypeCharAt(CodeStubAssembler* assembler) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Node* receiver = assembler->Parameter(0);
+ Node* position = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+
+ // Check that {receiver} is coercible to Object and convert it to a String.
+ receiver =
+ assembler->ToThisString(context, receiver, "String.prototype.charAt");
+
+ // Convert the {position} to a Smi and check that it's in bounds of the
+ // {receiver}.
+ // TODO(bmeurer): Find an abstraction for this!
+ {
+ // Check if the {position} is already a Smi.
+ Variable var_position(assembler, MachineRepresentation::kTagged);
+ var_position.Bind(position);
+ Label if_positionissmi(assembler),
+ if_positionisnotsmi(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordIsSmi(position), &if_positionissmi,
+ &if_positionisnotsmi);
+ assembler->Bind(&if_positionisnotsmi);
+ {
+ // Convert the {position} to an Integer via the ToIntegerStub.
+ Callable callable = CodeFactory::ToInteger(assembler->isolate());
+ Node* index = assembler->CallStub(callable, context, position);
+
+ // Check if the resulting {index} is now a Smi.
+ Label if_indexissmi(assembler, Label::kDeferred),
+ if_indexisnotsmi(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordIsSmi(index), &if_indexissmi,
+ &if_indexisnotsmi);
+
+ assembler->Bind(&if_indexissmi);
+ {
+ var_position.Bind(index);
+ assembler->Goto(&if_positionissmi);
+ }
+
+ assembler->Bind(&if_indexisnotsmi);
+ {
+ // The ToIntegerStub canonicalizes everything in Smi range to Smi
+ // representation, so any HeapNumber returned is not in Smi range.
+ // The only exception here is -0.0, which we treat as 0.
+ Node* index_value = assembler->LoadHeapNumberValue(index);
+ Label if_indexiszero(assembler, Label::kDeferred),
+ if_indexisnotzero(assembler, Label::kDeferred);
+ assembler->Branch(assembler->Float64Equal(
+ index_value, assembler->Float64Constant(0.0)),
+ &if_indexiszero, &if_indexisnotzero);
+
+ assembler->Bind(&if_indexiszero);
+ {
+ var_position.Bind(assembler->SmiConstant(Smi::FromInt(0)));
+ assembler->Goto(&if_positionissmi);
+ }
+
+ assembler->Bind(&if_indexisnotzero);
+ {
+ // The {index} is some other integral Number, that is definitely
+ // neither -0.0 nor in Smi range.
+ assembler->Return(assembler->EmptyStringConstant());
+ }
+ }
+ }
+ assembler->Bind(&if_positionissmi);
+ position = var_position.value();
+
+ // Determine the actual length of the {receiver} String.
+ Node* receiver_length =
+ assembler->LoadObjectField(receiver, String::kLengthOffset);
+
+ // Return "" if the Smi {position} is outside the bounds of the {receiver}.
+ Label if_positioninbounds(assembler),
+ if_positionnotinbounds(assembler, Label::kDeferred);
+ assembler->Branch(assembler->SmiAboveOrEqual(position, receiver_length),
+ &if_positionnotinbounds, &if_positioninbounds);
+ assembler->Bind(&if_positionnotinbounds);
+ assembler->Return(assembler->EmptyStringConstant());
+ assembler->Bind(&if_positioninbounds);
+ }
+
+ // Load the character code at the {position} from the {receiver}.
+ Node* code = assembler->StringCharCodeAt(receiver, position);
+
+ // And return the single character string with only that {code}.
+ Node* result = assembler->StringFromCharCode(code);
+ assembler->Return(result);
+}
+
+// ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos )
+void Builtins::Generate_StringPrototypeCharCodeAt(
+ CodeStubAssembler* assembler) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Node* receiver = assembler->Parameter(0);
+ Node* position = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+
+ // Check that {receiver} is coercible to Object and convert it to a String.
+ receiver =
+ assembler->ToThisString(context, receiver, "String.prototype.charCodeAt");
+
+ // Convert the {position} to a Smi and check that it's in bounds of the
+ // {receiver}.
+ // TODO(bmeurer): Find an abstraction for this!
+ {
+ // Check if the {position} is already a Smi.
+ Variable var_position(assembler, MachineRepresentation::kTagged);
+ var_position.Bind(position);
+ Label if_positionissmi(assembler),
+ if_positionisnotsmi(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordIsSmi(position), &if_positionissmi,
+ &if_positionisnotsmi);
+ assembler->Bind(&if_positionisnotsmi);
+ {
+ // Convert the {position} to an Integer via the ToIntegerStub.
+ Callable callable = CodeFactory::ToInteger(assembler->isolate());
+ Node* index = assembler->CallStub(callable, context, position);
+
+ // Check if the resulting {index} is now a Smi.
+ Label if_indexissmi(assembler, Label::kDeferred),
+ if_indexisnotsmi(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordIsSmi(index), &if_indexissmi,
+ &if_indexisnotsmi);
+
+ assembler->Bind(&if_indexissmi);
+ {
+ var_position.Bind(index);
+ assembler->Goto(&if_positionissmi);
+ }
+
+ assembler->Bind(&if_indexisnotsmi);
+ {
+ // The ToIntegerStub canonicalizes everything in Smi range to Smi
+ // representation, so any HeapNumber returned is not in Smi range.
+ // The only exception here is -0.0, which we treat as 0.
+ Node* index_value = assembler->LoadHeapNumberValue(index);
+ Label if_indexiszero(assembler, Label::kDeferred),
+ if_indexisnotzero(assembler, Label::kDeferred);
+ assembler->Branch(assembler->Float64Equal(
+ index_value, assembler->Float64Constant(0.0)),
+ &if_indexiszero, &if_indexisnotzero);
+
+ assembler->Bind(&if_indexiszero);
+ {
+ var_position.Bind(assembler->SmiConstant(Smi::FromInt(0)));
+ assembler->Goto(&if_positionissmi);
+ }
+
+ assembler->Bind(&if_indexisnotzero);
+ {
+ // The {index} is some other integral Number, that is definitely
+ // neither -0.0 nor in Smi range.
+ assembler->Return(assembler->NaNConstant());
+ }
+ }
+ }
+ assembler->Bind(&if_positionissmi);
+ position = var_position.value();
+
+ // Determine the actual length of the {receiver} String.
+ Node* receiver_length =
+ assembler->LoadObjectField(receiver, String::kLengthOffset);
+
+ // Return NaN if the Smi {position} is outside the bounds of the {receiver}.
+ Label if_positioninbounds(assembler),
+ if_positionnotinbounds(assembler, Label::kDeferred);
+ assembler->Branch(assembler->SmiAboveOrEqual(position, receiver_length),
+ &if_positionnotinbounds, &if_positioninbounds);
+ assembler->Bind(&if_positionnotinbounds);
+ assembler->Return(assembler->NaNConstant());
+ assembler->Bind(&if_positioninbounds);
+ }
+
+ // Load the character at the {position} from the {receiver}.
+ Node* value = assembler->StringCharCodeAt(receiver, position);
+ Node* result = assembler->SmiFromWord32(value);
+ assembler->Return(result);
+}
+
+// ES6 section 21.1.3.25 String.prototype.toString ()
+void Builtins::Generate_StringPrototypeToString(CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+
+ Node* receiver = assembler->Parameter(0);
+ Node* context = assembler->Parameter(3);
+
+ Node* result = assembler->ToThisValue(
+ context, receiver, PrimitiveType::kString, "String.prototype.toString");
+ assembler->Return(result);
+}
+
+// ES6 section 21.1.3.27 String.prototype.trim ()
+BUILTIN(StringPrototypeTrim) {
+ HandleScope scope(isolate);
+ TO_THIS_STRING(string, "String.prototype.trim");
+ return *String::Trim(string, String::kTrim);
+}
+
+// Non-standard WebKit extension
+BUILTIN(StringPrototypeTrimLeft) {
+ HandleScope scope(isolate);
+ TO_THIS_STRING(string, "String.prototype.trimLeft");
+ return *String::Trim(string, String::kTrimLeft);
+}
+
+// Non-standard WebKit extension
+BUILTIN(StringPrototypeTrimRight) {
+ HandleScope scope(isolate);
+ TO_THIS_STRING(string, "String.prototype.trimRight");
+ return *String::Trim(string, String::kTrimRight);
+}
+
+// ES6 section 21.1.3.28 String.prototype.valueOf ( )
+void Builtins::Generate_StringPrototypeValueOf(CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+
+ Node* receiver = assembler->Parameter(0);
+ Node* context = assembler->Parameter(3);
+
+ Node* result = assembler->ToThisValue(
+ context, receiver, PrimitiveType::kString, "String.prototype.valueOf");
+ assembler->Return(result);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-symbol.cc b/deps/v8/src/builtins/builtins-symbol.cc
new file mode 100644
index 0000000000..8dd8a1fa27
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-symbol.cc
@@ -0,0 +1,76 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// ES6 section 19.4 Symbol Objects
+
+// ES6 section 19.4.1.1 Symbol ( [ description ] ) for the [[Call]] case.
+BUILTIN(SymbolConstructor) {
+ HandleScope scope(isolate);
+ Handle<Symbol> result = isolate->factory()->NewSymbol();
+ Handle<Object> description = args.atOrUndefined(isolate, 1);
+ if (!description->IsUndefined(isolate)) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, description,
+ Object::ToString(isolate, description));
+ result->set_name(*description);
+ }
+ return *result;
+}
+
+// ES6 section 19.4.1.1 Symbol ( [ description ] ) for the [[Construct]] case.
+BUILTIN(SymbolConstructor_ConstructStub) {
+ HandleScope scope(isolate);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kNotConstructor,
+ isolate->factory()->Symbol_string()));
+}
+
+// ES6 section 19.4.3.4 Symbol.prototype [ @@toPrimitive ] ( hint )
+void Builtins::Generate_SymbolPrototypeToPrimitive(
+ CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+
+ Node* receiver = assembler->Parameter(0);
+ Node* context = assembler->Parameter(4);
+
+ Node* result =
+ assembler->ToThisValue(context, receiver, PrimitiveType::kSymbol,
+ "Symbol.prototype [ @@toPrimitive ]");
+ assembler->Return(result);
+}
+
+// ES6 section 19.4.3.2 Symbol.prototype.toString ( )
+void Builtins::Generate_SymbolPrototypeToString(CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+
+ Node* receiver = assembler->Parameter(0);
+ Node* context = assembler->Parameter(3);
+
+ Node* value = assembler->ToThisValue(
+ context, receiver, PrimitiveType::kSymbol, "Symbol.prototype.toString");
+ Node* result =
+ assembler->CallRuntime(Runtime::kSymbolDescriptiveString, context, value);
+ assembler->Return(result);
+}
+
+// ES6 section 19.4.3.3 Symbol.prototype.valueOf ( )
+void Builtins::Generate_SymbolPrototypeValueOf(CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+
+ Node* receiver = assembler->Parameter(0);
+ Node* context = assembler->Parameter(3);
+
+ Node* result = assembler->ToThisValue(
+ context, receiver, PrimitiveType::kSymbol, "Symbol.prototype.valueOf");
+ assembler->Return(result);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-typedarray.cc b/deps/v8/src/builtins/builtins-typedarray.cc
new file mode 100644
index 0000000000..ede04f26d8
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-typedarray.cc
@@ -0,0 +1,101 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-utils.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// ES6 section 22.2 TypedArray Objects
+
+// ES6 section 22.2.3.1 get %TypedArray%.prototype.buffer
+BUILTIN(TypedArrayPrototypeBuffer) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSTypedArray, typed_array, "get TypedArray.prototype.buffer");
+ return *typed_array->GetBuffer();
+}
+
+namespace {
+
+void Generate_TypedArrayProtoypeGetter(CodeStubAssembler* assembler,
+ const char* method_name,
+ int object_offset) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ Node* receiver = assembler->Parameter(0);
+ Node* context = assembler->Parameter(3);
+
+ // Check if the {receiver} is actually a JSTypedArray.
+ Label if_receiverisincompatible(assembler, Label::kDeferred);
+ assembler->GotoIf(assembler->WordIsSmi(receiver), &if_receiverisincompatible);
+ Node* receiver_instance_type = assembler->LoadInstanceType(receiver);
+ assembler->GotoUnless(
+ assembler->Word32Equal(receiver_instance_type,
+ assembler->Int32Constant(JS_TYPED_ARRAY_TYPE)),
+ &if_receiverisincompatible);
+
+ // Check if the {receiver}'s JSArrayBuffer was neutered.
+ Node* receiver_buffer =
+ assembler->LoadObjectField(receiver, JSTypedArray::kBufferOffset);
+ Node* receiver_buffer_bit_field = assembler->LoadObjectField(
+ receiver_buffer, JSArrayBuffer::kBitFieldOffset, MachineType::Uint32());
+ Label if_receiverisneutered(assembler, Label::kDeferred);
+ assembler->GotoUnless(
+ assembler->Word32Equal(
+ assembler->Word32And(
+ receiver_buffer_bit_field,
+ assembler->Int32Constant(JSArrayBuffer::WasNeutered::kMask)),
+ assembler->Int32Constant(0)),
+ &if_receiverisneutered);
+ assembler->Return(assembler->LoadObjectField(receiver, object_offset));
+
+ assembler->Bind(&if_receiverisneutered);
+ {
+ // The {receiver}s buffer was neutered, default to zero.
+ assembler->Return(assembler->SmiConstant(0));
+ }
+
+ assembler->Bind(&if_receiverisincompatible);
+ {
+ // The {receiver} is not a valid JSGeneratorObject.
+ Node* result = assembler->CallRuntime(
+ Runtime::kThrowIncompatibleMethodReceiver, context,
+ assembler->HeapConstant(assembler->factory()->NewStringFromAsciiChecked(
+ method_name, TENURED)),
+ receiver);
+ assembler->Return(result); // Never reached.
+ }
+}
+
+} // namespace
+
+// ES6 section 22.2.3.2 get %TypedArray%.prototype.byteLength
+void Builtins::Generate_TypedArrayPrototypeByteLength(
+ CodeStubAssembler* assembler) {
+ Generate_TypedArrayProtoypeGetter(assembler,
+ "get TypedArray.prototype.byteLength",
+ JSTypedArray::kByteLengthOffset);
+}
+
+// ES6 section 22.2.3.3 get %TypedArray%.prototype.byteOffset
+void Builtins::Generate_TypedArrayPrototypeByteOffset(
+ CodeStubAssembler* assembler) {
+ Generate_TypedArrayProtoypeGetter(assembler,
+ "get TypedArray.prototype.byteOffset",
+ JSTypedArray::kByteOffsetOffset);
+}
+
+// ES6 section 22.2.3.18 get %TypedArray%.prototype.length
+void Builtins::Generate_TypedArrayPrototypeLength(
+ CodeStubAssembler* assembler) {
+ Generate_TypedArrayProtoypeGetter(assembler,
+ "get TypedArray.prototype.length",
+ JSTypedArray::kLengthOffset);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-utils.h b/deps/v8/src/builtins/builtins-utils.h
new file mode 100644
index 0000000000..90b58c79cd
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-utils.h
@@ -0,0 +1,137 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_UTILS_H_
+#define V8_BUILTINS_BUILTINS_UTILS_H_
+
+#include "src/arguments.h"
+#include "src/base/logging.h"
+#include "src/builtins/builtins.h"
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+// Arguments object passed to C++ builtins.
+class BuiltinArguments : public Arguments {
+ public:
+ BuiltinArguments(int length, Object** arguments)
+ : Arguments(length, arguments) {
+ // Check we have at least the receiver.
+ DCHECK_LE(1, this->length());
+ }
+
+ Object*& operator[](int index) {
+ DCHECK_LT(index, length());
+ return Arguments::operator[](index);
+ }
+
+ template <class S>
+ Handle<S> at(int index) {
+ DCHECK_LT(index, length());
+ return Arguments::at<S>(index);
+ }
+
+ Handle<Object> atOrUndefined(Isolate* isolate, int index) {
+ if (index >= length()) {
+ return isolate->factory()->undefined_value();
+ }
+ return at<Object>(index);
+ }
+
+ Handle<Object> receiver() { return Arguments::at<Object>(0); }
+
+ static const int kNewTargetOffset = 0;
+ static const int kTargetOffset = 1;
+ static const int kArgcOffset = 2;
+ static const int kNumExtraArgs = 3;
+ static const int kNumExtraArgsWithReceiver = 4;
+
+ template <class S>
+ Handle<S> target() {
+ return Arguments::at<S>(Arguments::length() - 1 - kTargetOffset);
+ }
+ Handle<HeapObject> new_target() {
+ return Arguments::at<HeapObject>(Arguments::length() - 1 -
+ kNewTargetOffset);
+ }
+
+ // Gets the total number of arguments including the receiver (but
+ // excluding extra arguments).
+ int length() const { return Arguments::length() - kNumExtraArgs; }
+};
+
+// ----------------------------------------------------------------------------
+// Support macro for defining builtins in C++.
+// ----------------------------------------------------------------------------
+//
+// A builtin function is defined by writing:
+//
+// BUILTIN(name) {
+// ...
+// }
+//
+// In the body of the builtin function the arguments can be accessed
+// through the BuiltinArguments object args.
+// TODO(cbruni): add global flag to check whether any tracing events have been
+// enabled.
+// TODO(cbruni): Convert the IsContext CHECK back to a DCHECK.
+#define BUILTIN(name) \
+ MUST_USE_RESULT static Object* Builtin_Impl_##name(BuiltinArguments args, \
+ Isolate* isolate); \
+ \
+ V8_NOINLINE static Object* Builtin_Impl_Stats_##name( \
+ int args_length, Object** args_object, Isolate* isolate) { \
+ BuiltinArguments args(args_length, args_object); \
+ RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Builtin_##name); \
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED( \
+ isolate, &tracing::TraceEventStatsTable::Builtin_##name); \
+ return Builtin_Impl_##name(args, isolate); \
+ } \
+ \
+ MUST_USE_RESULT Object* Builtin_##name( \
+ int args_length, Object** args_object, Isolate* isolate) { \
+ CHECK(isolate->context() == nullptr || isolate->context()->IsContext()); \
+ if (V8_UNLIKELY(TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() || \
+ FLAG_runtime_call_stats)) { \
+ return Builtin_Impl_Stats_##name(args_length, args_object, isolate); \
+ } \
+ BuiltinArguments args(args_length, args_object); \
+ return Builtin_Impl_##name(args, isolate); \
+ } \
+ \
+ MUST_USE_RESULT static Object* Builtin_Impl_##name(BuiltinArguments args, \
+ Isolate* isolate)
+
+// ----------------------------------------------------------------------------
+
+#define CHECK_RECEIVER(Type, name, method) \
+ if (!args.receiver()->Is##Type()) { \
+ THROW_NEW_ERROR_RETURN_FAILURE( \
+ isolate, \
+ NewTypeError(MessageTemplate::kIncompatibleMethodReceiver, \
+ isolate->factory()->NewStringFromAsciiChecked(method), \
+ args.receiver())); \
+ } \
+ Handle<Type> name = Handle<Type>::cast(args.receiver())
+
+// Throws a TypeError for {method} if the receiver is not coercible to Object,
+// or converts the receiver to a String otherwise and assigns it to a new var
+// with the given {name}.
+#define TO_THIS_STRING(name, method) \
+ if (args.receiver()->IsNull(isolate) || \
+ args.receiver()->IsUndefined(isolate)) { \
+ THROW_NEW_ERROR_RETURN_FAILURE( \
+ isolate, \
+ NewTypeError(MessageTemplate::kCalledOnNullOrUndefined, \
+ isolate->factory()->NewStringFromAsciiChecked(method))); \
+ } \
+ Handle<String> name; \
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION( \
+ isolate, name, Object::ToString(isolate, args.receiver()))
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_BUILTINS_UTILS_H_
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
new file mode 100644
index 0000000000..dd5b4333ce
--- /dev/null
+++ b/deps/v8/src/builtins/builtins.cc
@@ -0,0 +1,296 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins.h"
+#include "src/code-events.h"
+#include "src/code-stub-assembler.h"
+#include "src/ic/ic-state.h"
+#include "src/interface-descriptors.h"
+#include "src/isolate.h"
+#include "src/macro-assembler.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations for C++ builtins.
+#define FORWARD_DECLARE(Name) \
+ Object* Builtin_##Name(int argc, Object** args, Isolate* isolate);
+BUILTIN_LIST_C(FORWARD_DECLARE)
+
+Builtins::Builtins() : initialized_(false) {
+ memset(builtins_, 0, sizeof(builtins_[0]) * builtin_count);
+}
+
+Builtins::~Builtins() {}
+
+namespace {
+void PostBuildProfileAndTracing(Isolate* isolate, Code* code,
+ const char* name) {
+ PROFILE(isolate, CodeCreateEvent(CodeEventListener::BUILTIN_TAG,
+ AbstractCode::cast(code), name));
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_builtin_code) {
+ CodeTracer::Scope trace_scope(isolate->GetCodeTracer());
+ OFStream os(trace_scope.file());
+ os << "Builtin: " << name << "\n";
+ code->Disassemble(name, os);
+ os << "\n";
+ }
+#endif
+}
+
+typedef void (*MacroAssemblerGenerator)(MacroAssembler*);
+typedef void (*CodeAssemblerGenerator)(CodeStubAssembler*);
+
+Code* BuildWithMacroAssembler(Isolate* isolate,
+ MacroAssemblerGenerator generator,
+ Code::Flags flags, const char* s_name) {
+ HandleScope scope(isolate);
+ const size_t buffer_size = 32 * KB;
+ byte buffer[buffer_size]; // NOLINT(runtime/arrays)
+ MacroAssembler masm(isolate, buffer, buffer_size, CodeObjectRequired::kYes);
+ DCHECK(!masm.has_frame());
+ generator(&masm);
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, flags, masm.CodeObject());
+ PostBuildProfileAndTracing(isolate, *code, s_name);
+ return *code;
+}
+
+Code* BuildAdaptor(Isolate* isolate, Address builtin_address,
+ Builtins::ExitFrameType exit_frame_type, Code::Flags flags,
+ const char* name) {
+ HandleScope scope(isolate);
+ const size_t buffer_size = 32 * KB;
+ byte buffer[buffer_size]; // NOLINT(runtime/arrays)
+ MacroAssembler masm(isolate, buffer, buffer_size, CodeObjectRequired::kYes);
+ DCHECK(!masm.has_frame());
+ Builtins::Generate_Adaptor(&masm, builtin_address, exit_frame_type);
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ Handle<Code> code =
+ isolate->factory()->NewCode(desc, flags, masm.CodeObject());
+ PostBuildProfileAndTracing(isolate, *code, name);
+ return *code;
+}
+
+// Builder for builtins implemented in TurboFan with JS linkage.
+Code* BuildWithCodeStubAssemblerJS(Isolate* isolate,
+ CodeAssemblerGenerator generator, int argc,
+ Code::Flags flags, const char* name) {
+ HandleScope scope(isolate);
+ Zone zone(isolate->allocator());
+ CodeStubAssembler assembler(isolate, &zone, argc, flags, name);
+ generator(&assembler);
+ Handle<Code> code = assembler.GenerateCode();
+ PostBuildProfileAndTracing(isolate, *code, name);
+ return *code;
+}
+
+// Builder for builtins implemented in TurboFan with CallStub linkage.
+Code* BuildWithCodeStubAssemblerCS(Isolate* isolate,
+ CodeAssemblerGenerator generator,
+ CallDescriptors::Key interface_descriptor,
+ Code::Flags flags, const char* name) {
+ HandleScope scope(isolate);
+ Zone zone(isolate->allocator());
+ // The interface descriptor with given key must be initialized at this point
+ // and this construction just queries the details from the descriptors table.
+ CallInterfaceDescriptor descriptor(isolate, interface_descriptor);
+ // Ensure descriptor is already initialized.
+ DCHECK_LE(0, descriptor.GetRegisterParameterCount());
+ CodeStubAssembler assembler(isolate, &zone, descriptor, flags, name);
+ generator(&assembler);
+ Handle<Code> code = assembler.GenerateCode();
+ PostBuildProfileAndTracing(isolate, *code, name);
+ return *code;
+}
+} // anonymous namespace
+
+void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
+ DCHECK(!initialized_);
+
+ // Create a scope for the handles in the builtins.
+ HandleScope scope(isolate);
+
+ if (create_heap_objects) {
+ int index = 0;
+ const Code::Flags kBuiltinFlags = Code::ComputeFlags(Code::BUILTIN);
+ Code* code;
+#define BUILD_CPP(Name) \
+ code = BuildAdaptor(isolate, FUNCTION_ADDR(Builtin_##Name), BUILTIN_EXIT, \
+ kBuiltinFlags, #Name); \
+ builtins_[index++] = code;
+#define BUILD_API(Name) \
+ code = BuildAdaptor(isolate, FUNCTION_ADDR(Builtin_##Name), EXIT, \
+ kBuiltinFlags, #Name); \
+ builtins_[index++] = code;
+#define BUILD_TFJ(Name, Argc) \
+ code = BuildWithCodeStubAssemblerJS(isolate, &Generate_##Name, Argc, \
+ kBuiltinFlags, #Name); \
+ builtins_[index++] = code;
+#define BUILD_TFS(Name, Kind, Extra, InterfaceDescriptor) \
+ { InterfaceDescriptor##Descriptor descriptor(isolate); } \
+ code = BuildWithCodeStubAssemblerCS( \
+ isolate, &Generate_##Name, CallDescriptors::InterfaceDescriptor, \
+ Code::ComputeFlags(Code::Kind, Extra), #Name); \
+ builtins_[index++] = code;
+#define BUILD_ASM(Name) \
+ code = \
+ BuildWithMacroAssembler(isolate, Generate_##Name, kBuiltinFlags, #Name); \
+ builtins_[index++] = code;
+#define BUILD_ASH(Name, Kind, Extra) \
+ code = BuildWithMacroAssembler( \
+ isolate, Generate_##Name, Code::ComputeFlags(Code::Kind, Extra), #Name); \
+ builtins_[index++] = code;
+
+ BUILTIN_LIST(BUILD_CPP, BUILD_API, BUILD_TFJ, BUILD_TFS, BUILD_ASM,
+ BUILD_ASH, BUILD_ASM);
+
+#undef BUILD_CPP
+#undef BUILD_API
+#undef BUILD_TFJ
+#undef BUILD_TFS
+#undef BUILD_ASM
+#undef BUILD_ASH
+ CHECK_EQ(builtin_count, index);
+ for (int i = 0; i < builtin_count; i++) {
+ Code::cast(builtins_[i])->set_builtin_index(i);
+ }
+ }
+
+ // Mark as initialized.
+ initialized_ = true;
+}
+
+void Builtins::TearDown() { initialized_ = false; }
+
+void Builtins::IterateBuiltins(ObjectVisitor* v) {
+ v->VisitPointers(&builtins_[0], &builtins_[0] + builtin_count);
+}
+
+const char* Builtins::Lookup(byte* pc) {
+ // may be called during initialization (disassembler!)
+ if (initialized_) {
+ for (int i = 0; i < builtin_count; i++) {
+ Code* entry = Code::cast(builtins_[i]);
+ if (entry->contains(pc)) return name(i);
+ }
+ }
+ return NULL;
+}
+
+// static
+const char* Builtins::name(int index) {
+ switch (index) {
+#define CASE(Name, ...) \
+ case k##Name: \
+ return #Name;
+ BUILTIN_LIST_ALL(CASE)
+#undef CASE
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return "";
+}
+
+// static
+Address Builtins::CppEntryOf(int index) {
+ DCHECK(0 <= index && index < builtin_count);
+ switch (index) {
+#define CASE(Name, ...) \
+ case k##Name: \
+ return FUNCTION_ADDR(Builtin_##Name);
+ BUILTIN_LIST_C(CASE)
+#undef CASE
+ default:
+ return nullptr;
+ }
+ UNREACHABLE();
+}
+
+// static
+bool Builtins::IsCpp(int index) {
+ DCHECK(0 <= index && index < builtin_count);
+ switch (index) {
+#define CASE(Name, ...) \
+ case k##Name: \
+ return true;
+#define BUILTIN_LIST_CPP(V) \
+ BUILTIN_LIST(V, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
+ IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
+ BUILTIN_LIST_CPP(CASE)
+#undef BUILTIN_LIST_CPP
+#undef CASE
+ default:
+ return false;
+ }
+ UNREACHABLE();
+}
+
+// static
+bool Builtins::IsApi(int index) {
+ DCHECK(0 <= index && index < builtin_count);
+ switch (index) {
+#define CASE(Name, ...) \
+ case k##Name: \
+ return true;
+#define BUILTIN_LIST_API(V) \
+ BUILTIN_LIST(IGNORE_BUILTIN, V, IGNORE_BUILTIN, IGNORE_BUILTIN, \
+ IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
+ BUILTIN_LIST_API(CASE);
+#undef BUILTIN_LIST_API
+#undef CASE
+ default:
+ return false;
+ }
+ UNREACHABLE();
+}
+
+// static
+bool Builtins::HasCppImplementation(int index) {
+ DCHECK(0 <= index && index < builtin_count);
+ switch (index) {
+#define CASE(Name, ...) \
+ case k##Name: \
+ return true;
+ BUILTIN_LIST_C(CASE)
+#undef CASE
+ default:
+ return false;
+ }
+ UNREACHABLE();
+}
+
+#define DEFINE_BUILTIN_ACCESSOR(Name, ...) \
+ Handle<Code> Builtins::Name() { \
+ Code** code_address = reinterpret_cast<Code**>(builtin_address(k##Name)); \
+ return Handle<Code>(code_address); \
+ }
+BUILTIN_LIST_ALL(DEFINE_BUILTIN_ACCESSOR)
+#undef DEFINE_BUILTIN_ACCESSOR
+
+// static
+bool Builtins::AllowDynamicFunction(Isolate* isolate, Handle<JSFunction> target,
+ Handle<JSObject> target_global_proxy) {
+ if (FLAG_allow_unsafe_function_constructor) return true;
+ HandleScopeImplementer* impl = isolate->handle_scope_implementer();
+ Handle<Context> responsible_context = impl->LastEnteredContext();
+ if (responsible_context.is_null()) {
+ responsible_context = impl->MicrotaskContext();
+ // TODO(jochen): Remove this.
+ if (responsible_context.is_null()) {
+ return true;
+ }
+ }
+ if (*responsible_context == target->context()) return true;
+ return isolate->MayAccess(responsible_context, target_global_proxy);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h
new file mode 100644
index 0000000000..f8ce2e699f
--- /dev/null
+++ b/deps/v8/src/builtins/builtins.h
@@ -0,0 +1,677 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_H_
+#define V8_BUILTINS_BUILTINS_H_
+
+#include "src/base/flags.h"
+#include "src/handles.h"
+
+namespace v8 {
+namespace internal {
+
+#define CODE_AGE_LIST_WITH_ARG(V, A) \
+ V(Quadragenarian, A) \
+ V(Quinquagenarian, A) \
+ V(Sexagenarian, A) \
+ V(Septuagenarian, A) \
+ V(Octogenarian, A)
+
+#define CODE_AGE_LIST_IGNORE_ARG(X, V) V(X)
+
+#define CODE_AGE_LIST(V) CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V)
+
+#define CODE_AGE_LIST_COMPLETE(V) \
+ V(ToBeExecutedOnce) \
+ V(NotExecuted) \
+ V(ExecutedOnce) \
+ V(NoAge) \
+ CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V)
+
+#define DECLARE_CODE_AGE_BUILTIN(C, V) \
+ V(Make##C##CodeYoungAgainOddMarking) \
+ V(Make##C##CodeYoungAgainEvenMarking)
+
+// CPP: Builtin in C++. Entered via BUILTIN_EXIT frame.
+// Args: name
+// API: Builtin in C++ for API callbacks. Entered via EXIT frame.
+// Args: name
+// TFJ: Builtin in Turbofan, with JS linkage (callable as Javascript function).
+// Args: name, arguments count
+// TFS: Builtin in Turbofan, with CodeStub linkage.
+// Args: name, code kind, extra IC state, interface descriptor
+// ASM: Builtin in platform-dependent assembly.
+// Args: name
+// ASH: Handlers implemented in platform-dependent assembly.
+// Args: name, code kind, extra IC state
+// DBG: Builtin in platform-dependent assembly, used by the debugger.
+// Args: name
+#define BUILTIN_LIST(CPP, API, TFJ, TFS, ASM, ASH, DBG) \
+ ASM(Abort) \
+ /* Handlers */ \
+ ASH(KeyedLoadIC_Megamorphic, KEYED_LOAD_IC, kNoExtraICState) \
+ ASM(KeyedLoadIC_Miss) \
+ ASH(KeyedLoadIC_Slow, HANDLER, Code::KEYED_LOAD_IC) \
+ ASH(KeyedStoreIC_Megamorphic, KEYED_STORE_IC, kNoExtraICState) \
+ ASH(KeyedStoreIC_Megamorphic_Strict, KEYED_STORE_IC, \
+ StoreICState::kStrictModeState) \
+ ASM(KeyedStoreIC_Miss) \
+ ASH(KeyedStoreIC_Slow, HANDLER, Code::KEYED_STORE_IC) \
+ TFS(LoadGlobalIC_Miss, BUILTIN, kNoExtraICState, LoadGlobalWithVector) \
+ TFS(LoadGlobalIC_Slow, HANDLER, Code::LOAD_GLOBAL_IC, LoadGlobalWithVector) \
+ ASH(LoadIC_Getter_ForDeopt, LOAD_IC, kNoExtraICState) \
+ TFS(LoadIC_Miss, BUILTIN, kNoExtraICState, LoadWithVector) \
+ ASH(LoadIC_Normal, HANDLER, Code::LOAD_IC) \
+ TFS(LoadIC_Slow, HANDLER, Code::LOAD_IC, LoadWithVector) \
+ TFS(StoreIC_Miss, BUILTIN, kNoExtraICState, StoreWithVector) \
+ ASH(StoreIC_Normal, HANDLER, Code::STORE_IC) \
+ ASH(StoreIC_Setter_ForDeopt, STORE_IC, StoreICState::kStrictModeState) \
+ TFS(StoreIC_SlowSloppy, HANDLER, Code::STORE_IC, StoreWithVector) \
+ TFS(StoreIC_SlowStrict, HANDLER, Code::STORE_IC, StoreWithVector) \
+ \
+ /* Code aging */ \
+ CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, ASM) \
+ \
+ /* Calls */ \
+ ASM(ArgumentsAdaptorTrampoline) \
+ /* ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) */ \
+ ASM(CallFunction_ReceiverIsNullOrUndefined) \
+ ASM(CallFunction_ReceiverIsNotNullOrUndefined) \
+ ASM(CallFunction_ReceiverIsAny) \
+ ASM(TailCallFunction_ReceiverIsNullOrUndefined) \
+ ASM(TailCallFunction_ReceiverIsNotNullOrUndefined) \
+ ASM(TailCallFunction_ReceiverIsAny) \
+ /* ES6 section 9.4.1.1 [[Call]] ( thisArgument, argumentsList) */ \
+ ASM(CallBoundFunction) \
+ ASM(TailCallBoundFunction) \
+ /* ES6 section 7.3.12 Call(F, V, [argumentsList]) */ \
+ ASM(Call_ReceiverIsNullOrUndefined) \
+ ASM(Call_ReceiverIsNotNullOrUndefined) \
+ ASM(Call_ReceiverIsAny) \
+ ASM(TailCall_ReceiverIsNullOrUndefined) \
+ ASM(TailCall_ReceiverIsNotNullOrUndefined) \
+ ASM(TailCall_ReceiverIsAny) \
+ \
+ /* Construct */ \
+ /* ES6 section 9.2.2 [[Construct]] ( argumentsList, newTarget) */ \
+ ASM(ConstructFunction) \
+ /* ES6 section 9.4.1.2 [[Construct]] (argumentsList, newTarget) */ \
+ ASM(ConstructBoundFunction) \
+ ASM(ConstructedNonConstructable) \
+ /* ES6 section 9.5.14 [[Construct]] ( argumentsList, newTarget) */ \
+ ASM(ConstructProxy) \
+ /* ES6 section 7.3.13 Construct (F, [argumentsList], [newTarget]) */ \
+ ASM(Construct) \
+ ASM(JSConstructStubApi) \
+ ASM(JSConstructStubGeneric) \
+ ASM(JSBuiltinsConstructStub) \
+ ASM(JSBuiltinsConstructStubForDerived) \
+ \
+ /* Apply and entries */ \
+ ASM(Apply) \
+ ASM(JSEntryTrampoline) \
+ ASM(JSConstructEntryTrampoline) \
+ ASM(ResumeGeneratorTrampoline) \
+ \
+ /* Stack and interrupt check */ \
+ ASM(InterruptCheck) \
+ ASM(StackCheck) \
+ \
+ /* Interpreter */ \
+ ASM(InterpreterEntryTrampoline) \
+ ASM(InterpreterMarkBaselineOnReturn) \
+ ASM(InterpreterPushArgsAndCall) \
+ ASM(InterpreterPushArgsAndCallFunction) \
+ ASM(InterpreterPushArgsAndConstruct) \
+ ASM(InterpreterPushArgsAndTailCall) \
+ ASM(InterpreterPushArgsAndTailCallFunction) \
+ ASM(InterpreterEnterBytecodeDispatch) \
+ ASM(InterpreterOnStackReplacement) \
+ \
+ /* Code life-cycle */ \
+ ASM(CompileLazy) \
+ ASM(CompileBaseline) \
+ ASM(CompileOptimized) \
+ ASM(CompileOptimizedConcurrent) \
+ ASM(InOptimizationQueue) \
+ ASM(InstantiateAsmJs) \
+ ASM(MarkCodeAsToBeExecutedOnce) \
+ ASM(MarkCodeAsExecutedOnce) \
+ ASM(MarkCodeAsExecutedTwice) \
+ ASM(NotifyDeoptimized) \
+ ASM(NotifySoftDeoptimized) \
+ ASM(NotifyLazyDeoptimized) \
+ ASM(NotifyStubFailure) \
+ ASM(NotifyStubFailureSaveDoubles) \
+ ASM(OnStackReplacement) \
+ \
+ /* API callback handling */ \
+ API(HandleApiCall) \
+ API(HandleApiCallAsFunction) \
+ API(HandleApiCallAsConstructor) \
+ ASM(HandleFastApiCall) \
+ \
+ /* Adapters for Turbofan into runtime */ \
+ ASM(AllocateInNewSpace) \
+ ASM(AllocateInOldSpace) \
+ \
+ /* TurboFan support builtins */ \
+ TFS(CopyFastSmiOrObjectElements, BUILTIN, kNoExtraICState, \
+ CopyFastSmiOrObjectElements) \
+ TFS(GrowFastDoubleElements, BUILTIN, kNoExtraICState, GrowArrayElements) \
+ TFS(GrowFastSmiOrObjectElements, BUILTIN, kNoExtraICState, \
+ GrowArrayElements) \
+ \
+ /* Debugger */ \
+ DBG(FrameDropper_LiveEdit) \
+ DBG(Return_DebugBreak) \
+ DBG(Slot_DebugBreak) \
+ \
+ /* Type conversions */ \
+ TFS(ToBoolean, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(OrdinaryToPrimitive_Number, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(OrdinaryToPrimitive_String, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(NonPrimitiveToPrimitive_Default, BUILTIN, kNoExtraICState, \
+ TypeConversion) \
+ TFS(NonPrimitiveToPrimitive_Number, BUILTIN, kNoExtraICState, \
+ TypeConversion) \
+ TFS(NonPrimitiveToPrimitive_String, BUILTIN, kNoExtraICState, \
+ TypeConversion) \
+ TFS(StringToNumber, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(NonNumberToNumber, BUILTIN, kNoExtraICState, TypeConversion) \
+ ASM(ToNumber) \
+ \
+ /* Built-in functions for Javascript */ \
+ /* Special internal builtins */ \
+ CPP(EmptyFunction) \
+ CPP(Illegal) \
+ CPP(RestrictedFunctionPropertiesThrower) \
+ CPP(RestrictedStrictArgumentsPropertiesThrower) \
+ CPP(UnsupportedThrower) \
+ \
+ /* Array */ \
+ ASM(ArrayCode) \
+ ASM(InternalArrayCode) \
+ CPP(ArrayConcat) \
+ /* ES6 section 22.1.2.2 Array.isArray */ \
+ TFJ(ArrayIsArray, 2) \
+ /* ES7 #sec-array.prototype.includes */ \
+ TFJ(ArrayIncludes, 3) \
+ TFJ(ArrayIndexOf, 3) \
+ CPP(ArrayPop) \
+ CPP(ArrayPush) \
+ CPP(ArrayShift) \
+ CPP(ArraySlice) \
+ CPP(ArraySplice) \
+ CPP(ArrayUnshift) \
+ \
+ /* ArrayBuffer */ \
+ CPP(ArrayBufferConstructor) \
+ CPP(ArrayBufferConstructor_ConstructStub) \
+ CPP(ArrayBufferPrototypeGetByteLength) \
+ CPP(ArrayBufferIsView) \
+ \
+ /* Boolean */ \
+ CPP(BooleanConstructor) \
+ CPP(BooleanConstructor_ConstructStub) \
+ /* ES6 section 19.3.3.2 Boolean.prototype.toString ( ) */ \
+ TFJ(BooleanPrototypeToString, 1) \
+ /* ES6 section 19.3.3.3 Boolean.prototype.valueOf ( ) */ \
+ TFJ(BooleanPrototypeValueOf, 1) \
+ \
+ /* CallSite */ \
+ CPP(CallSitePrototypeGetColumnNumber) \
+ CPP(CallSitePrototypeGetEvalOrigin) \
+ CPP(CallSitePrototypeGetFileName) \
+ CPP(CallSitePrototypeGetFunction) \
+ CPP(CallSitePrototypeGetFunctionName) \
+ CPP(CallSitePrototypeGetLineNumber) \
+ CPP(CallSitePrototypeGetMethodName) \
+ CPP(CallSitePrototypeGetPosition) \
+ CPP(CallSitePrototypeGetScriptNameOrSourceURL) \
+ CPP(CallSitePrototypeGetThis) \
+ CPP(CallSitePrototypeGetTypeName) \
+ CPP(CallSitePrototypeIsConstructor) \
+ CPP(CallSitePrototypeIsEval) \
+ CPP(CallSitePrototypeIsNative) \
+ CPP(CallSitePrototypeIsToplevel) \
+ CPP(CallSitePrototypeToString) \
+ \
+ /* DataView */ \
+ CPP(DataViewConstructor) \
+ CPP(DataViewConstructor_ConstructStub) \
+ CPP(DataViewPrototypeGetBuffer) \
+ CPP(DataViewPrototypeGetByteLength) \
+ CPP(DataViewPrototypeGetByteOffset) \
+ \
+ /* Date */ \
+ CPP(DateConstructor) \
+ CPP(DateConstructor_ConstructStub) \
+ /* ES6 section 20.3.4.2 Date.prototype.getDate ( ) */ \
+ ASM(DatePrototypeGetDate) \
+ /* ES6 section 20.3.4.3 Date.prototype.getDay ( ) */ \
+ ASM(DatePrototypeGetDay) \
+ /* ES6 section 20.3.4.4 Date.prototype.getFullYear ( ) */ \
+ ASM(DatePrototypeGetFullYear) \
+ /* ES6 section 20.3.4.5 Date.prototype.getHours ( ) */ \
+ ASM(DatePrototypeGetHours) \
+ /* ES6 section 20.3.4.6 Date.prototype.getMilliseconds ( ) */ \
+ ASM(DatePrototypeGetMilliseconds) \
+ /* ES6 section 20.3.4.7 Date.prototype.getMinutes ( ) */ \
+ ASM(DatePrototypeGetMinutes) \
+ /* ES6 section 20.3.4.8 Date.prototype.getMonth */ \
+ ASM(DatePrototypeGetMonth) \
+ /* ES6 section 20.3.4.9 Date.prototype.getSeconds ( ) */ \
+ ASM(DatePrototypeGetSeconds) \
+ /* ES6 section 20.3.4.10 Date.prototype.getTime ( ) */ \
+ ASM(DatePrototypeGetTime) \
+ /* ES6 section 20.3.4.11 Date.prototype.getTimezoneOffset ( ) */ \
+ ASM(DatePrototypeGetTimezoneOffset) \
+ /* ES6 section 20.3.4.12 Date.prototype.getUTCDate ( ) */ \
+ ASM(DatePrototypeGetUTCDate) \
+ /* ES6 section 20.3.4.13 Date.prototype.getUTCDay ( ) */ \
+ ASM(DatePrototypeGetUTCDay) \
+ /* ES6 section 20.3.4.14 Date.prototype.getUTCFullYear ( ) */ \
+ ASM(DatePrototypeGetUTCFullYear) \
+ /* ES6 section 20.3.4.15 Date.prototype.getUTCHours ( ) */ \
+ ASM(DatePrototypeGetUTCHours) \
+ /* ES6 section 20.3.4.16 Date.prototype.getUTCMilliseconds ( ) */ \
+ ASM(DatePrototypeGetUTCMilliseconds) \
+ /* ES6 section 20.3.4.17 Date.prototype.getUTCMinutes ( ) */ \
+ ASM(DatePrototypeGetUTCMinutes) \
+ /* ES6 section 20.3.4.18 Date.prototype.getUTCMonth ( ) */ \
+ ASM(DatePrototypeGetUTCMonth) \
+ /* ES6 section 20.3.4.19 Date.prototype.getUTCSeconds ( ) */ \
+ ASM(DatePrototypeGetUTCSeconds) \
+ CPP(DatePrototypeGetYear) \
+ CPP(DatePrototypeSetYear) \
+ CPP(DateNow) \
+ CPP(DateParse) \
+ CPP(DatePrototypeSetDate) \
+ CPP(DatePrototypeSetFullYear) \
+ CPP(DatePrototypeSetHours) \
+ CPP(DatePrototypeSetMilliseconds) \
+ CPP(DatePrototypeSetMinutes) \
+ CPP(DatePrototypeSetMonth) \
+ CPP(DatePrototypeSetSeconds) \
+ CPP(DatePrototypeSetTime) \
+ CPP(DatePrototypeSetUTCDate) \
+ CPP(DatePrototypeSetUTCFullYear) \
+ CPP(DatePrototypeSetUTCHours) \
+ CPP(DatePrototypeSetUTCMilliseconds) \
+ CPP(DatePrototypeSetUTCMinutes) \
+ CPP(DatePrototypeSetUTCMonth) \
+ CPP(DatePrototypeSetUTCSeconds) \
+ CPP(DatePrototypeToDateString) \
+ CPP(DatePrototypeToISOString) \
+ CPP(DatePrototypeToPrimitive) \
+ CPP(DatePrototypeToUTCString) \
+ CPP(DatePrototypeToString) \
+ CPP(DatePrototypeToTimeString) \
+ CPP(DatePrototypeValueOf) \
+ CPP(DatePrototypeToJson) \
+ CPP(DateUTC) \
+ \
+ /* Error */ \
+ CPP(ErrorConstructor) \
+ CPP(ErrorCaptureStackTrace) \
+ CPP(ErrorPrototypeToString) \
+ CPP(MakeError) \
+ CPP(MakeRangeError) \
+ CPP(MakeSyntaxError) \
+ CPP(MakeTypeError) \
+ CPP(MakeURIError) \
+ \
+ /* Function */ \
+ CPP(FunctionConstructor) \
+ ASM(FunctionPrototypeApply) \
+ CPP(FunctionPrototypeBind) \
+ ASM(FunctionPrototypeCall) \
+ /* ES6 section 19.2.3.6 Function.prototype [ @@hasInstance ] ( V ) */ \
+ TFJ(FunctionPrototypeHasInstance, 2) \
+ CPP(FunctionPrototypeToString) \
+ \
+ /* Generator and Async */ \
+ CPP(GeneratorFunctionConstructor) \
+ /* ES6 section 25.3.1.2 Generator.prototype.next ( value ) */ \
+ TFJ(GeneratorPrototypeNext, 2) \
+ /* ES6 section 25.3.1.3 Generator.prototype.return ( value ) */ \
+ TFJ(GeneratorPrototypeReturn, 2) \
+ /* ES6 section 25.3.1.4 Generator.prototype.throw ( exception ) */ \
+ TFJ(GeneratorPrototypeThrow, 2) \
+ CPP(AsyncFunctionConstructor) \
+ \
+ /* Encode and decode */ \
+ CPP(GlobalDecodeURI) \
+ CPP(GlobalDecodeURIComponent) \
+ CPP(GlobalEncodeURI) \
+ CPP(GlobalEncodeURIComponent) \
+ CPP(GlobalEscape) \
+ CPP(GlobalUnescape) \
+ \
+ /* Eval */ \
+ CPP(GlobalEval) \
+ \
+ /* JSON */ \
+ CPP(JsonParse) \
+ CPP(JsonStringify) \
+ \
+ /* Math */ \
+ /* ES6 section 20.2.2.1 Math.abs ( x ) */ \
+ TFJ(MathAbs, 2) \
+ /* ES6 section 20.2.2.2 Math.acos ( x ) */ \
+ TFJ(MathAcos, 2) \
+ /* ES6 section 20.2.2.3 Math.acosh ( x ) */ \
+ TFJ(MathAcosh, 2) \
+ /* ES6 section 20.2.2.4 Math.asin ( x ) */ \
+ TFJ(MathAsin, 2) \
+ /* ES6 section 20.2.2.5 Math.asinh ( x ) */ \
+ TFJ(MathAsinh, 2) \
+ /* ES6 section 20.2.2.6 Math.atan ( x ) */ \
+ TFJ(MathAtan, 2) \
+ /* ES6 section 20.2.2.7 Math.atanh ( x ) */ \
+ TFJ(MathAtanh, 2) \
+ /* ES6 section 20.2.2.8 Math.atan2 ( y, x ) */ \
+ TFJ(MathAtan2, 3) \
+ /* ES6 section 20.2.2.9 Math.cbrt ( x ) */ \
+ TFJ(MathCbrt, 2) \
+ /* ES6 section 20.2.2.10 Math.ceil ( x ) */ \
+ TFJ(MathCeil, 2) \
+ /* ES6 section 20.2.2.11 Math.clz32 ( x ) */ \
+ TFJ(MathClz32, 2) \
+ /* ES6 section 20.2.2.12 Math.cos ( x ) */ \
+ TFJ(MathCos, 2) \
+ /* ES6 section 20.2.2.13 Math.cosh ( x ) */ \
+ TFJ(MathCosh, 2) \
+ /* ES6 section 20.2.2.14 Math.exp ( x ) */ \
+ TFJ(MathExp, 2) \
+ /* ES6 section 20.2.2.15 Math.expm1 ( x ) */ \
+ TFJ(MathExpm1, 2) \
+ /* ES6 section 20.2.2.16 Math.floor ( x ) */ \
+ TFJ(MathFloor, 2) \
+ /* ES6 section 20.2.2.17 Math.fround ( x ) */ \
+ TFJ(MathFround, 2) \
+ /* ES6 section 20.2.2.18 Math.hypot ( value1, value2, ...values ) */ \
+ CPP(MathHypot) \
+ /* ES6 section 20.2.2.19 Math.imul ( x, y ) */ \
+ TFJ(MathImul, 3) \
+ /* ES6 section 20.2.2.20 Math.log ( x ) */ \
+ TFJ(MathLog, 2) \
+ /* ES6 section 20.2.2.21 Math.log1p ( x ) */ \
+ TFJ(MathLog1p, 2) \
+ /* ES6 section 20.2.2.22 Math.log10 ( x ) */ \
+ TFJ(MathLog10, 2) \
+ /* ES6 section 20.2.2.23 Math.log2 ( x ) */ \
+ TFJ(MathLog2, 2) \
+ /* ES6 section 20.2.2.24 Math.max ( value1, value2 , ...values ) */ \
+ ASM(MathMax) \
+ /* ES6 section 20.2.2.25 Math.min ( value1, value2 , ...values ) */ \
+ ASM(MathMin) \
+ /* ES6 section 20.2.2.26 Math.pow ( x, y ) */ \
+ TFJ(MathPow, 3) \
+ /* ES6 section 20.2.2.28 Math.round ( x ) */ \
+ TFJ(MathRound, 2) \
+ /* ES6 section 20.2.2.29 Math.sign ( x ) */ \
+ TFJ(MathSign, 2) \
+ /* ES6 section 20.2.2.30 Math.sin ( x ) */ \
+ TFJ(MathSin, 2) \
+ /* ES6 section 20.2.2.31 Math.sinh ( x ) */ \
+ TFJ(MathSinh, 2) \
+ /* ES6 section 20.2.2.32 Math.sqrt ( x ) */ \
+ TFJ(MathTan, 2) \
+ /* ES6 section 20.2.2.33 Math.tan ( x ) */ \
+ TFJ(MathTanh, 2) \
+ /* ES6 section 20.2.2.34 Math.tanh ( x ) */ \
+ TFJ(MathSqrt, 2) \
+ /* ES6 section 20.2.2.35 Math.trunc ( x ) */ \
+ TFJ(MathTrunc, 2) \
+ \
+ /* Number */ \
+ /* ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Call]] case */ \
+ ASM(NumberConstructor) \
+ /* ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Construct]] case */ \
+ ASM(NumberConstructor_ConstructStub) \
+ CPP(NumberPrototypeToExponential) \
+ CPP(NumberPrototypeToFixed) \
+ CPP(NumberPrototypeToLocaleString) \
+ CPP(NumberPrototypeToPrecision) \
+ CPP(NumberPrototypeToString) \
+ /* ES6 section 20.1.3.7 Number.prototype.valueOf ( ) */ \
+ TFJ(NumberPrototypeValueOf, 1) \
+ \
+ /* Object */ \
+ CPP(ObjectAssign) \
+ CPP(ObjectCreate) \
+ CPP(ObjectDefineGetter) \
+ CPP(ObjectDefineProperties) \
+ CPP(ObjectDefineProperty) \
+ CPP(ObjectDefineSetter) \
+ CPP(ObjectEntries) \
+ CPP(ObjectFreeze) \
+ CPP(ObjectGetOwnPropertyDescriptor) \
+ CPP(ObjectGetOwnPropertyDescriptors) \
+ CPP(ObjectGetOwnPropertyNames) \
+ CPP(ObjectGetOwnPropertySymbols) \
+ CPP(ObjectGetPrototypeOf) \
+ /* ES6 section 19.1.3.2 Object.prototype.hasOwnProperty */ \
+ TFJ(ObjectHasOwnProperty, 2) \
+ CPP(ObjectIs) \
+ CPP(ObjectIsExtensible) \
+ CPP(ObjectIsFrozen) \
+ CPP(ObjectIsSealed) \
+ CPP(ObjectKeys) \
+ CPP(ObjectLookupGetter) \
+ CPP(ObjectLookupSetter) \
+ CPP(ObjectPreventExtensions) \
+ /* ES6 section 19.1.3.6 Object.prototype.toString () */ \
+ TFJ(ObjectProtoToString, 1) \
+ CPP(ObjectPrototypePropertyIsEnumerable) \
+ CPP(ObjectSeal) \
+ CPP(ObjectValues) \
+ \
+ /* Proxy */ \
+ CPP(ProxyConstructor) \
+ CPP(ProxyConstructor_ConstructStub) \
+ \
+ /* Reflect */ \
+ ASM(ReflectApply) \
+ ASM(ReflectConstruct) \
+ CPP(ReflectDefineProperty) \
+ CPP(ReflectDeleteProperty) \
+ CPP(ReflectGet) \
+ CPP(ReflectGetOwnPropertyDescriptor) \
+ CPP(ReflectGetPrototypeOf) \
+ CPP(ReflectHas) \
+ CPP(ReflectIsExtensible) \
+ CPP(ReflectOwnKeys) \
+ CPP(ReflectPreventExtensions) \
+ CPP(ReflectSet) \
+ CPP(ReflectSetPrototypeOf) \
+ \
+ /* SharedArrayBuffer */ \
+ CPP(SharedArrayBufferPrototypeGetByteLength) \
+ TFJ(AtomicsLoad, 3) \
+ TFJ(AtomicsStore, 4) \
+ \
+ /* String */ \
+ ASM(StringConstructor) \
+ ASM(StringConstructor_ConstructStub) \
+ CPP(StringFromCodePoint) \
+ /* ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits ) */ \
+ TFJ(StringFromCharCode, 2) \
+ /* ES6 section 21.1.3.1 String.prototype.charAt ( pos ) */ \
+ TFJ(StringPrototypeCharAt, 2) \
+ /* ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos ) */ \
+ TFJ(StringPrototypeCharCodeAt, 2) \
+ /* ES6 section 21.1.3.25 String.prototype.toString () */ \
+ TFJ(StringPrototypeToString, 1) \
+ CPP(StringPrototypeTrim) \
+ CPP(StringPrototypeTrimLeft) \
+ CPP(StringPrototypeTrimRight) \
+ /* ES6 section 21.1.3.28 String.prototype.valueOf () */ \
+ TFJ(StringPrototypeValueOf, 1) \
+ \
+ /* Symbol */ \
+ CPP(SymbolConstructor) \
+ CPP(SymbolConstructor_ConstructStub) \
+ /* ES6 section 19.4.3.4 Symbol.prototype [ @@toPrimitive ] ( hint ) */ \
+ TFJ(SymbolPrototypeToPrimitive, 2) \
+ /* ES6 section 19.4.3.2 Symbol.prototype.toString ( ) */ \
+ TFJ(SymbolPrototypeToString, 1) \
+ /* ES6 section 19.4.3.3 Symbol.prototype.valueOf ( ) */ \
+ TFJ(SymbolPrototypeValueOf, 1) \
+ \
+ /* TypedArray */ \
+ CPP(TypedArrayPrototypeBuffer) \
+ /* ES6 section 22.2.3.2 get %TypedArray%.prototype.byteLength */ \
+ TFJ(TypedArrayPrototypeByteLength, 1) \
+ /* ES6 section 22.2.3.3 get %TypedArray%.prototype.byteOffset */ \
+ TFJ(TypedArrayPrototypeByteOffset, 1) \
+ /* ES6 section 22.2.3.18 get %TypedArray%.prototype.length */ \
+ TFJ(TypedArrayPrototypeLength, 1)
+
+#define IGNORE_BUILTIN(...)
+
+#define BUILTIN_LIST_ALL(V) BUILTIN_LIST(V, V, V, V, V, V, V)
+
+#define BUILTIN_LIST_C(V) \
+ BUILTIN_LIST(V, V, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
+ IGNORE_BUILTIN, IGNORE_BUILTIN)
+
+#define BUILTIN_LIST_A(V) \
+ BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
+ V, V, V)
+
+#define BUILTIN_LIST_DBG(V) \
+ BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
+ IGNORE_BUILTIN, IGNORE_BUILTIN, V)
+
+// Forward declarations.
+class CodeStubAssembler;
+class ObjectVisitor;
+
+class Builtins {
+ public:
+ ~Builtins();
+
+ // Generate all builtin code objects. Should be called once during
+ // isolate initialization.
+ void SetUp(Isolate* isolate, bool create_heap_objects);
+ void TearDown();
+
+ // Garbage collection support.
+ void IterateBuiltins(ObjectVisitor* v);
+
+ // Disassembler support.
+ const char* Lookup(byte* pc);
+
+ enum Name {
+#define DEF_ENUM(Name, ...) k##Name,
+ BUILTIN_LIST_ALL(DEF_ENUM)
+#undef DEF_ENUM
+ builtin_count
+ };
+
+#define DECLARE_BUILTIN_ACCESSOR(Name, ...) Handle<Code> Name();
+ BUILTIN_LIST_ALL(DECLARE_BUILTIN_ACCESSOR)
+#undef DECLARE_BUILTIN_ACCESSOR
+
+ // Convenience wrappers.
+ Handle<Code> CallFunction(
+ ConvertReceiverMode = ConvertReceiverMode::kAny,
+ TailCallMode tail_call_mode = TailCallMode::kDisallow);
+ Handle<Code> Call(ConvertReceiverMode = ConvertReceiverMode::kAny,
+ TailCallMode tail_call_mode = TailCallMode::kDisallow);
+ Handle<Code> CallBoundFunction(TailCallMode tail_call_mode);
+ Handle<Code> NonPrimitiveToPrimitive(
+ ToPrimitiveHint hint = ToPrimitiveHint::kDefault);
+ Handle<Code> OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint);
+ Handle<Code> InterpreterPushArgsAndCall(
+ TailCallMode tail_call_mode,
+ CallableType function_type = CallableType::kAny);
+
+ Code* builtin(Name name) {
+ // Code::cast cannot be used here since we access builtins
+ // during the marking phase of mark sweep. See IC::Clear.
+ return reinterpret_cast<Code*>(builtins_[name]);
+ }
+
+ Address builtin_address(Name name) {
+ return reinterpret_cast<Address>(&builtins_[name]);
+ }
+
+ static const char* name(int index);
+
+ // Returns the C++ entry point for builtins implemented in C++, and the null
+ // Address otherwise.
+ static Address CppEntryOf(int index);
+
+ static bool IsCpp(int index);
+ static bool IsApi(int index);
+ static bool HasCppImplementation(int index);
+
+ bool is_initialized() const { return initialized_; }
+
+ MUST_USE_RESULT static MaybeHandle<Object> InvokeApiFunction(
+ Isolate* isolate, bool is_construct, Handle<HeapObject> function,
+ Handle<Object> receiver, int argc, Handle<Object> args[],
+ Handle<HeapObject> new_target);
+
+ enum ExitFrameType { EXIT, BUILTIN_EXIT };
+
+ static void Generate_Adaptor(MacroAssembler* masm, Address builtin_address,
+ ExitFrameType exit_frame_type);
+
+ static bool AllowDynamicFunction(Isolate* isolate, Handle<JSFunction> target,
+ Handle<JSObject> target_global_proxy);
+
+ private:
+ Builtins();
+
+ static void Generate_CallFunction(MacroAssembler* masm,
+ ConvertReceiverMode mode,
+ TailCallMode tail_call_mode);
+
+ static void Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+ TailCallMode tail_call_mode);
+
+ static void Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+ TailCallMode tail_call_mode);
+
+ static void Generate_InterpreterPushArgsAndCallImpl(
+ MacroAssembler* masm, TailCallMode tail_call_mode,
+ CallableType function_type);
+
+ static void Generate_DatePrototype_GetField(MacroAssembler* masm,
+ int field_index);
+
+ enum class MathMaxMinKind { kMax, kMin };
+ static void Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind);
+
+#define DECLARE_ASM(Name, ...) \
+ static void Generate_##Name(MacroAssembler* masm);
+#define DECLARE_TF(Name, ...) \
+ static void Generate_##Name(CodeStubAssembler* csasm);
+
+ BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, DECLARE_TF, DECLARE_TF,
+ DECLARE_ASM, DECLARE_ASM, DECLARE_ASM)
+
+#undef DECLARE_ASM
+#undef DECLARE_TF
+
+ // Note: These are always Code objects, but to conform with
+ // IterateBuiltins() above which assumes Object**'s for the callback
+ // function f, we use an Object* array here.
+ Object* builtins_[builtin_count];
+ bool initialized_;
+
+ friend class Isolate;
+
+ DISALLOW_COPY_AND_ASSIGN(Builtins);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_BUILTINS_H_
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index b7e33d9a74..f31ba6fcf1 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -13,13 +13,10 @@
namespace v8 {
namespace internal {
-
#define __ ACCESS_MASM(masm)
-
-void Builtins::Generate_Adaptor(MacroAssembler* masm,
- CFunctionId id,
- BuiltinExtraArguments extra_args) {
+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
+ ExitFrameType exit_frame_type) {
// ----------- S t a t e -------------
// -- eax : number of arguments excluding receiver
// -- edi : target
@@ -38,26 +35,22 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// ordinary functions).
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- // Insert extra arguments.
- int num_extra_args = 0;
- if (extra_args != BuiltinExtraArguments::kNone) {
- __ PopReturnAddressTo(ecx);
- if (extra_args & BuiltinExtraArguments::kTarget) {
- ++num_extra_args;
- __ Push(edi);
- }
- if (extra_args & BuiltinExtraArguments::kNewTarget) {
- ++num_extra_args;
- __ Push(edx);
- }
- __ PushReturnAddressFrom(ecx);
- }
-
// JumpToExternalReference expects eax to contain the number of arguments
// including the receiver and the extra arguments.
+ const int num_extra_args = 3;
__ add(eax, Immediate(num_extra_args + 1));
- __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
+ // Insert extra arguments.
+ __ PopReturnAddressTo(ecx);
+ __ SmiTag(eax);
+ __ Push(eax);
+ __ SmiUntag(eax);
+ __ Push(edi);
+ __ Push(edx);
+ __ PushReturnAddressFrom(ecx);
+
+ __ JumpToExternalReference(ExternalReference(address, masm->isolate()),
+ exit_frame_type == BUILTIN_EXIT);
}
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
@@ -186,16 +179,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ j(greater_equal, &loop);
// Call the function.
- if (is_api_function) {
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- __ call(code, RelocInfo::CODE_TARGET);
- } else {
- ParameterCount actual(eax);
- __ InvokeFunction(edi, edx, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
- }
+ ParameterCount actual(eax);
+ __ InvokeFunction(edi, edx, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
// Store offset of return address for deoptimizer.
if (create_implicit_receiver && !is_api_function) {
@@ -258,38 +244,31 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ ret(0);
}
-
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true, false);
}
-
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, true, false, false);
}
-
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, false, false);
}
-
void Builtins::Generate_JSBuiltinsConstructStubForDerived(
MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, false, true);
}
-
void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(edi);
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
-
enum IsTagged { kEaxIsSmiTagged, kEaxIsUntaggedInt };
-
// Clobbers ecx, edx, edi; preserves all other registers.
static void Generate_CheckStackOverflow(MacroAssembler* masm,
IsTagged eax_is_tagged) {
@@ -321,7 +300,6 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm,
__ bind(&okay);
}
-
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
@@ -355,7 +333,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ jmp(&entry, Label::kNear);
__ bind(&loop);
__ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv
- __ push(Operand(edx, 0)); // dereference handle
+ __ push(Operand(edx, 0)); // dereference handle
__ inc(ecx);
__ bind(&entry);
__ cmp(ecx, eax);
@@ -381,16 +359,192 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ ret(kPointerSize); // Remove receiver.
}
-
void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, false);
}
-
void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the value to pass to the generator
+ // -- ebx : the JSGeneratorObject to resume
+ // -- edx : the resume mode (tagged)
+ // -- esp[0] : return address
+ // -----------------------------------
+ __ AssertGeneratorObject(ebx);
+
+ // Store input value into generator object.
+ __ mov(FieldOperand(ebx, JSGeneratorObject::kInputOrDebugPosOffset), eax);
+ __ RecordWriteField(ebx, JSGeneratorObject::kInputOrDebugPosOffset, eax, ecx,
+ kDontSaveFPRegs);
+
+ // Store resume mode into generator object.
+ __ mov(FieldOperand(ebx, JSGeneratorObject::kResumeModeOffset), edx);
+
+ // Load suspended function and context.
+ __ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset));
+ __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+
+ // Flood function if we are stepping.
+ Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+ Label stepping_prepared;
+ ExternalReference last_step_action =
+ ExternalReference::debug_last_step_action_address(masm->isolate());
+ STATIC_ASSERT(StepFrame > StepIn);
+ __ cmpb(Operand::StaticVariable(last_step_action), Immediate(StepIn));
+ __ j(greater_equal, &prepare_step_in_if_stepping);
+
+ // Flood function if we need to continue stepping in the suspended generator.
+ ExternalReference debug_suspended_generator =
+ ExternalReference::debug_suspended_generator_address(masm->isolate());
+ __ cmp(ebx, Operand::StaticVariable(debug_suspended_generator));
+ __ j(equal, &prepare_step_in_suspended_generator);
+ __ bind(&stepping_prepared);
+
+ // Pop return address.
+ __ PopReturnAddressTo(eax);
+
+ // Push receiver.
+ __ Push(FieldOperand(ebx, JSGeneratorObject::kReceiverOffset));
+
+ // ----------- S t a t e -------------
+ // -- eax : return address
+ // -- ebx : the JSGeneratorObject to resume
+ // -- edx : the resume mode (tagged)
+ // -- edi : generator function
+ // -- esi : generator context
+ // -- esp[0] : generator receiver
+ // -----------------------------------
+
+ // Push holes for arguments to generator function. Since the parser forced
+ // context allocation for any variables in generators, the actual argument
+ // values have already been copied into the context and these dummy values
+ // will never be used.
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ecx,
+ FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+ {
+ Label done_loop, loop;
+ __ bind(&loop);
+ __ sub(ecx, Immediate(Smi::FromInt(1)));
+ __ j(carry, &done_loop, Label::kNear);
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ jmp(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Dispatch on the kind of generator object.
+ Label old_generator;
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
+ __ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, ecx);
+ __ j(not_equal, &old_generator);
+
+ // New-style (ignition/turbofan) generator object
+ {
+ __ PushReturnAddressFrom(eax);
+ __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(eax,
+ FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+ // We abuse new.target both to indicate that this is a resume call and to
+ // pass in the generator object. In ordinary calls, new.target is always
+ // undefined because generator functions are non-constructable.
+ __ mov(edx, ebx);
+ __ jmp(FieldOperand(edi, JSFunction::kCodeEntryOffset));
+ }
+
+ // Old-style (full-codegen) generator object
+ __ bind(&old_generator);
+ {
+ // Enter a new JavaScript frame, and initialize its slots as they were when
+ // the generator was suspended.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PushReturnAddressFrom(eax); // Return address.
+ __ Push(ebp); // Caller's frame pointer.
+ __ Move(ebp, esp);
+ __ Push(esi); // Callee's context.
+ __ Push(edi); // Callee's JS Function.
+
+ // Restore the operand stack.
+ __ mov(eax, FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset));
+ {
+ Label done_loop, loop;
+ __ Move(ecx, Smi::FromInt(0));
+ __ bind(&loop);
+ __ cmp(ecx, FieldOperand(eax, FixedArray::kLengthOffset));
+ __ j(equal, &done_loop, Label::kNear);
+ __ Push(FieldOperand(eax, ecx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ add(ecx, Immediate(Smi::FromInt(1)));
+ __ jmp(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Reset operand stack so we don't leak.
+ __ mov(FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset),
+ Immediate(masm->isolate()->factory()->empty_fixed_array()));
+
+ // Resume the generator function at the continuation.
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
+ __ mov(ecx, FieldOperand(ebx, JSGeneratorObject::kContinuationOffset));
+ __ SmiUntag(ecx);
+ __ lea(edx, FieldOperand(edx, ecx, times_1, Code::kHeaderSize));
+ __ mov(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
+ Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+ __ mov(eax, ebx); // Continuation expects generator object in eax.
+ __ jmp(edx);
+ }
+
+ __ bind(&prepare_step_in_if_stepping);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(ebx);
+ __ Push(edx);
+ __ Push(edi);
+ __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ Pop(edx);
+ __ Pop(ebx);
+ __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+ }
+ __ jmp(&stepping_prepared);
+
+ __ bind(&prepare_step_in_suspended_generator);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(ebx);
+ __ Push(edx);
+ __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+ __ Pop(edx);
+ __ Pop(ebx);
+ __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+ }
+ __ jmp(&stepping_prepared);
+}
+
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
+ Register scratch2) {
+ Register args_count = scratch1;
+ Register return_pc = scratch2;
+
+ // Get the arguments + reciever count.
+ __ mov(args_count,
+ Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ mov(args_count,
+ FieldOperand(args_count, BytecodeArray::kParameterSizeOffset));
+
+ // Leave the frame (also dropping the register file).
+ __ leave();
+
+ // Drop receiver + arguments.
+ __ pop(return_pc);
+ __ add(esp, args_count);
+ __ push(return_pc);
+}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
@@ -407,6 +561,8 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
// The function builds an interpreter frame. See InterpreterFrameConstants in
// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
@@ -417,10 +573,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ push(edi); // Callee's JS function.
__ push(edx); // Callee's new target.
- // Get the bytecode array from the function object and load the pointer to the
- // first entry into edi (InterpreterBytecodeRegister).
+ // Get the bytecode array from the function object (or from the DebugInfo if
+ // it is present) and load it into kInterpreterBytecodeArrayRegister.
__ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-
Label load_debug_bytecode_array, bytecode_array_loaded;
__ cmp(FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset),
Immediate(DebugInfo::uninitialized()));
@@ -429,8 +584,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
__ bind(&bytecode_array_loaded);
+ // Check whether we should continue to use the interpreter.
+ Label switch_to_different_code_kind;
+ __ Move(ecx, masm->CodeObject()); // Self-reference to this code.
+ __ cmp(ecx, FieldOperand(eax, SharedFunctionInfo::kCodeOffset));
+ __ j(not_equal, &switch_to_different_code_kind);
+
+ // Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
- // Check function data field is actually a BytecodeArray object.
__ AssertNotSmi(kInterpreterBytecodeArrayRegister);
__ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
eax);
@@ -439,8 +600,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Push bytecode array.
__ push(kInterpreterBytecodeArrayRegister);
- // Push zero for bytecode array offset.
- __ push(Immediate(0));
+ // Push Smi tagged initial bytecode array offset.
+ __ push(Immediate(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag)));
// Allocate the local and temporary register file on the stack.
{
@@ -473,74 +634,75 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ j(greater_equal, &loop_header);
}
- // TODO(rmcilroy): List of things not currently dealt with here but done in
- // fullcodegen's prologue:
- // - Call ProfileEntryHookStub when isolate has a function_entry_hook.
- // - Code aging of the BytecodeArray object.
-
- // Load accumulator, register file, bytecode offset, dispatch table into
- // registers.
+ // Load accumulator, bytecode offset and dispatch table into registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
- __ mov(kInterpreterRegisterFileRegister, ebp);
- __ add(kInterpreterRegisterFileRegister,
- Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ mov(kInterpreterBytecodeOffsetRegister,
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
- __ mov(ebx, Immediate(ExternalReference::interpreter_dispatch_table_address(
- masm->isolate())));
-
- // Push dispatch table as a stack located parameter to the bytecode handler.
- DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
- __ push(ebx);
+ __ mov(kInterpreterDispatchTableRegister,
+ Immediate(ExternalReference::interpreter_dispatch_table_address(
+ masm->isolate())));
// Dispatch to the first bytecode handler for the function.
- __ movzx_b(eax, Operand(kInterpreterBytecodeArrayRegister,
+ __ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ mov(ebx, Operand(ebx, eax, times_pointer_size, 0));
- // Restore undefined_value in accumulator (eax)
- // TODO(rmcilroy): Remove this once we move the dispatch table back into a
- // register.
- __ mov(eax, Immediate(masm->isolate()->factory()->undefined_value()));
- // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
- // and header removal.
- __ add(ebx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ mov(ebx, Operand(kInterpreterDispatchTableRegister, ebx,
+ times_pointer_size, 0));
__ call(ebx);
+ masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
- // Even though the first bytecode handler was called, we will never return.
- __ Abort(kUnexpectedReturnFromBytecodeHandler);
+ // The return value is in eax.
+ LeaveInterpreterFrame(masm, ebx, ecx);
+ __ ret(0);
// Load debug copy of the bytecode array.
__ bind(&load_debug_bytecode_array);
Register debug_info = kInterpreterBytecodeArrayRegister;
__ mov(debug_info, FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset));
__ mov(kInterpreterBytecodeArrayRegister,
- FieldOperand(debug_info, DebugInfo::kAbstractCodeIndex));
+ FieldOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
__ jmp(&bytecode_array_loaded);
+
+ // If the shared code is no longer this entry trampoline, then the underlying
+ // function has been switched to a different kind of code and we heal the
+ // closure by switching the code entry field over to the new code as well.
+ __ bind(&switch_to_different_code_kind);
+ __ pop(edx); // Callee's new target.
+ __ pop(edi); // Callee's JS function.
+ __ pop(esi); // Callee's context.
+ __ leave(); // Leave the frame so we can tail call.
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
+ __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+ __ mov(FieldOperand(edi, JSFunction::kCodeEntryOffset), ecx);
+ __ RecordWriteCodeEntryField(edi, ecx, ebx);
+ __ jmp(ecx);
}
+void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
+ // Save the function and context for call to CompileBaseline.
+ __ mov(edi, Operand(ebp, StandardFrameConstants::kFunctionOffset));
+ __ mov(kContextRegister,
+ Operand(ebp, StandardFrameConstants::kContextOffset));
-void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
- // TODO(rmcilroy): List of things not currently dealt with here but done in
- // fullcodegen's EmitReturnSequence.
- // - Supporting FLAG_trace for Runtime::TraceExit.
- // - Support profiler (specifically decrementing profiling_counter
- // appropriately and calling out to HandleInterrupts if necessary).
+ // Leave the frame before recompiling for baseline so that we don't count as
+ // an activation on the stack.
+ LeaveInterpreterFrame(masm, ebx, ecx);
- // The return value is in accumulator, which is already in rax.
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ // Push return value.
+ __ push(eax);
- // Leave the frame (also dropping the register file).
- __ leave();
+ // Push function as argument and compile for baseline.
+ __ push(edi);
+ __ CallRuntime(Runtime::kCompileBaseline);
- // Drop receiver + arguments and return.
- __ mov(ebx, FieldOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kParameterSizeOffset));
- __ pop(ecx);
- __ add(esp, ebx);
- __ push(ecx);
+ // Restore return value.
+ __ pop(eax);
+ }
__ ret(0);
}
-
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
Register array_limit) {
// ----------- S t a t e -------------
@@ -558,10 +720,10 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
__ j(greater, &loop_header, Label::kNear);
}
-
// static
void Builtins::Generate_InterpreterPushArgsAndCallImpl(
- MacroAssembler* masm, TailCallMode tail_call_mode) {
+ MacroAssembler* masm, TailCallMode tail_call_mode,
+ CallableType function_type) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- ebx : the address of the first argument to be pushed. Subsequent
@@ -584,11 +746,18 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
// Call the target.
__ Push(edx); // Re-push return address.
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- tail_call_mode),
- RelocInfo::CODE_TARGET);
-}
+ if (function_type == CallableType::kJSFunction) {
+ __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
+ tail_call_mode),
+ RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK_EQ(function_type, CallableType::kAny);
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ tail_call_mode),
+ RelocInfo::CODE_TARGET);
+ }
+}
// static
void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
@@ -627,17 +796,26 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ // Set the return address to the correct point in the interpreter entry
+ // trampoline.
+ Smi* interpreter_entry_return_pc_offset(
+ masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+ __ LoadHeapObject(ebx,
+ masm->isolate()->builtins()->InterpreterEntryTrampoline());
+ __ add(ebx, Immediate(interpreter_entry_return_pc_offset->value() +
+ Code::kHeaderSize - kHeapObjectTag));
+ __ push(ebx);
-static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
- // Initialize register file register.
- __ mov(kInterpreterRegisterFileRegister, ebp);
- __ add(kInterpreterRegisterFileRegister,
- Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+ // Initialize the dispatch table register.
+ __ mov(kInterpreterDispatchTableRegister,
+ Immediate(ExternalReference::interpreter_dispatch_table_address(
+ masm->isolate())));
// Get the bytecode array pointer from the frame.
__ mov(kInterpreterBytecodeArrayRegister,
- Operand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
+ Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -648,104 +826,252 @@ static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
}
// Get the target bytecode offset from the frame.
- __ mov(
- kInterpreterBytecodeOffsetRegister,
- Operand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
- // Push dispatch table as a stack located parameter to the bytecode handler.
- __ mov(ebx, Immediate(ExternalReference::interpreter_dispatch_table_address(
- masm->isolate())));
- DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
- __ Pop(esi);
- __ Push(ebx);
- __ Push(esi);
-
// Dispatch to the target bytecode.
- __ movzx_b(esi, Operand(kInterpreterBytecodeArrayRegister,
+ __ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ mov(ebx, Operand(ebx, esi, times_pointer_size, 0));
-
- // Get the context from the frame.
- __ mov(kContextRegister,
- Operand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kContextFromRegisterPointer));
-
- // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
- // and header removal.
- __ add(ebx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ mov(ebx, Operand(kInterpreterDispatchTableRegister, ebx,
+ times_pointer_size, 0));
__ jmp(ebx);
}
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argument count (preserved for callee)
+ // -- edx : new target (preserved for callee)
+ // -- edi : target function (preserved for callee)
+ // -----------------------------------
+ // First lookup code, maybe we don't need to compile!
+ Label gotta_call_runtime, gotta_call_runtime_no_stack;
+ Label maybe_call_runtime;
+ Label try_shared;
+ Label loop_top, loop_bottom;
+
+ Register closure = edi;
+ Register new_target = edx;
+ Register argument_count = eax;
+
+ __ push(argument_count);
+ __ push(new_target);
+ __ push(closure);
+
+ Register map = argument_count;
+ Register index = ebx;
+ __ mov(map, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(map, FieldOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+ __ mov(index, FieldOperand(map, FixedArray::kLengthOffset));
+ __ cmp(index, Immediate(Smi::FromInt(2)));
+ __ j(less, &gotta_call_runtime);
+
+ // Find literals.
+ // edx : native context
+ // ebx : length / index
+ // eax : optimized code map
+ // stack[0] : new target
+ // stack[4] : closure
+ Register native_context = edx;
+ __ mov(native_context, NativeContextOperand());
+
+ __ bind(&loop_top);
+ Register temp = edi;
+
+ // Does the native context match?
+ __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
+ SharedFunctionInfo::kOffsetToPreviousContext));
+ __ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
+ __ cmp(temp, native_context);
+ __ j(not_equal, &loop_bottom);
+ // OSR id set to none?
+ __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
+ SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+ const int bailout_id = BailoutId::None().ToInt();
+ __ cmp(temp, Immediate(Smi::FromInt(bailout_id)));
+ __ j(not_equal, &loop_bottom);
+ // Literals available?
+ __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
+ SharedFunctionInfo::kOffsetToPreviousLiterals));
+ __ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
+ __ JumpIfSmi(temp, &gotta_call_runtime);
+
+ // Save the literals in the closure.
+ __ mov(ecx, Operand(esp, 0));
+ __ mov(FieldOperand(ecx, JSFunction::kLiteralsOffset), temp);
+ __ push(index);
+ __ RecordWriteField(ecx, JSFunction::kLiteralsOffset, temp, index,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ pop(index);
+
+ // Code available?
+ Register entry = ecx;
+ __ mov(entry, FieldOperand(map, index, times_half_pointer_size,
+ SharedFunctionInfo::kOffsetToPreviousCachedCode));
+ __ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(entry, &maybe_call_runtime);
+
+ // Found literals and code. Get them into the closure and return.
+ __ pop(closure);
+ // Store code entry in the closure.
+ __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
+
+ Label install_optimized_code_and_tailcall;
+ __ bind(&install_optimized_code_and_tailcall);
+ __ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
+ __ RecordWriteCodeEntryField(closure, entry, eax);
+
+ // Link the closure into the optimized function list.
+ // ecx : code entry
+ // edx : native context
+ // edi : closure
+ __ mov(ebx,
+ ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ __ mov(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), ebx);
+ __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, ebx, eax,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ const int function_list_offset =
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+ __ mov(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST),
+ closure);
+ // Save closure before the write barrier.
+ __ mov(ebx, closure);
+ __ RecordWriteContextSlot(native_context, function_list_offset, closure, eax,
+ kDontSaveFPRegs);
+ __ mov(closure, ebx);
+ __ pop(new_target);
+ __ pop(argument_count);
+ __ jmp(entry);
+
+ __ bind(&loop_bottom);
+ __ sub(index, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+ __ cmp(index, Immediate(Smi::FromInt(1)));
+ __ j(greater, &loop_top);
+
+ // We found neither literals nor code.
+ __ jmp(&gotta_call_runtime);
+
+ __ bind(&maybe_call_runtime);
+ __ pop(closure);
+
+ // Last possibility. Check the context free optimized code map entry.
+ __ mov(entry, FieldOperand(map, FixedArray::kHeaderSize +
+ SharedFunctionInfo::kSharedCodeIndex));
+ __ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(entry, &try_shared);
+
+ // Store code entry in the closure.
+ __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
+ __ jmp(&install_optimized_code_and_tailcall);
+
+ __ bind(&try_shared);
+ __ pop(new_target);
+ __ pop(argument_count);
+ // Is the full code valid?
+ __ mov(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
+ __ mov(ebx, FieldOperand(entry, Code::kFlagsOffset));
+ __ and_(ebx, Code::KindField::kMask);
+ __ shr(ebx, Code::KindField::kShift);
+ __ cmp(ebx, Immediate(Code::BUILTIN));
+ __ j(equal, &gotta_call_runtime_no_stack);
+ // Yes, install the full code.
+ __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
+ __ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
+ __ RecordWriteCodeEntryField(closure, entry, ebx);
+ __ jmp(entry);
+
+ __ bind(&gotta_call_runtime);
+ __ pop(closure);
+ __ pop(new_target);
+ __ pop(argument_count);
+ __ bind(&gotta_call_runtime_no_stack);
-static void Generate_InterpreterNotifyDeoptimizedHelper(
- MacroAssembler* masm, Deoptimizer::BailoutType type) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Pass the deoptimization type to the runtime system.
- __ Push(Smi::FromInt(static_cast<int>(type)));
- __ CallRuntime(Runtime::kNotifyDeoptimized);
- // Tear down internal frame.
- }
-
- // Drop state (we don't use these for interpreter deopts) and and pop the
- // accumulator value into the accumulator register and push PC at top
- // of stack (to simulate initial call to bytecode handler in interpreter entry
- // trampoline).
- __ Pop(ebx);
- __ Drop(1);
- __ Pop(kInterpreterAccumulatorRegister);
- __ Push(ebx);
-
- // Enter the bytecode dispatch.
- Generate_EnterBytecodeDispatch(masm);
-}
-
-
-void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
-
-void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
}
-
-void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+ GenerateTailCallToReturnedCode(masm,
+ Runtime::kCompileOptimized_NotConcurrent);
}
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
- // Set the address of the interpreter entry trampoline as a return address.
- // This simulates the initial call to bytecode handlers in interpreter entry
- // trampoline. The return will never actually be taken, but our stack walker
- // uses this address to determine whether a frame is interpreted.
- __ Push(masm->isolate()->builtins()->InterpreterEntryTrampoline());
-
- Generate_EnterBytecodeDispatch(masm);
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
+void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argument count (preserved for callee)
+ // -- edx : new target (preserved for callee)
+ // -- edi : target function (preserved for callee)
+ // -----------------------------------
+ Label failed;
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Preserve argument count for later compare.
+ __ mov(ecx, eax);
+ // Push the number of arguments to the callee.
+ __ SmiTag(eax);
+ __ push(eax);
+ // Push a copy of the target function and the new target.
+ __ push(edi);
+ __ push(edx);
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
-}
+ // The function.
+ __ push(edi);
+ // Copy arguments from caller (stdlib, foreign, heap).
+ Label args_done;
+ for (int j = 0; j < 4; ++j) {
+ Label over;
+ if (j < 3) {
+ __ cmp(ecx, Immediate(j));
+ __ j(not_equal, &over, Label::kNear);
+ }
+ for (int i = j - 1; i >= 0; --i) {
+ __ Push(Operand(
+ ebp, StandardFrameConstants::kCallerSPOffset + i * kPointerSize));
+ }
+ for (int i = 0; i < 3 - j; ++i) {
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ }
+ if (j < 3) {
+ __ jmp(&args_done, Label::kNear);
+ __ bind(&over);
+ }
+ }
+ __ bind(&args_done);
+ // Call runtime, on success unwind frame, and parent frame.
+ __ CallRuntime(Runtime::kInstantiateAsmJs, 4);
+ // A smi 0 is returned on failure, an object on success.
+ __ JumpIfSmi(eax, &failed, Label::kNear);
-void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm,
- Runtime::kCompileOptimized_NotConcurrent);
-}
+ __ Drop(2);
+ __ Pop(ecx);
+ __ SmiUntag(ecx);
+ scope.GenerateLeaveFrame();
+ __ PopReturnAddressTo(ebx);
+ __ inc(ecx);
+ __ lea(esp, Operand(esp, ecx, times_pointer_size, 0));
+ __ PushReturnAddressFrom(ebx);
+ __ ret(0);
-void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
+ __ bind(&failed);
+ // Restore target function and new target.
+ __ pop(edx);
+ __ pop(edi);
+ __ pop(eax);
+ __ SmiUntag(eax);
+ }
+ // On failure, tail call back to regular js.
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
-
static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// For now, we are relying on the fact that make_code_young doesn't do any
// garbage collection which allows us to save/restore the registers without
@@ -771,19 +1097,18 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
__ ret(0);
}
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
-void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-} \
-void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-}
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+ void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+ } \
+ void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+ }
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
-
void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
// that make_code_young doesn't do any garbage collection which allows us to
@@ -816,17 +1141,14 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
__ ret(0);
}
-
void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
GenerateMakeCodeYoungAgainCommon(masm);
}
-
void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
-
static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
SaveFPRegsMode save_doubles) {
// Enter an internal frame.
@@ -846,17 +1168,14 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
__ ret(0); // Return to IC Miss stub, continuation still on stack.
}
-
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
}
-
void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
}
-
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
@@ -875,13 +1194,14 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Switch on the state.
Label not_no_registers, not_tos_eax;
- __ cmp(ecx, FullCodeGenerator::NO_REGISTERS);
+ __ cmp(ecx, static_cast<int>(Deoptimizer::BailoutState::NO_REGISTERS));
__ j(not_equal, &not_no_registers, Label::kNear);
__ ret(1 * kPointerSize); // Remove state.
__ bind(&not_no_registers);
+ DCHECK_EQ(kInterpreterAccumulatorRegister.code(), eax.code());
__ mov(eax, Operand(esp, 2 * kPointerSize));
- __ cmp(ecx, FullCodeGenerator::TOS_REG);
+ __ cmp(ecx, static_cast<int>(Deoptimizer::BailoutState::TOS_REGISTER));
__ j(not_equal, &not_tos_eax, Label::kNear);
__ ret(2 * kPointerSize); // Remove state, eax.
@@ -889,26 +1209,25 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
__ Abort(kNoCasesLeft);
}
-
void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
-
void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
}
-
void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
-
// static
void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
int field_index) {
// ----------- S t a t e -------------
+ // -- eax : number of arguments
+ // -- edi : function
+ // -- esi : context
// -- esp[0] : return address
// -- esp[4] : receiver
// -----------------------------------
@@ -951,35 +1270,13 @@ void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
__ bind(&receiver_not_date);
{
FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterFrame(StackFrame::INTERNAL);
+ __ Move(ebx, Immediate(0));
+ __ EnterBuiltinFrame(esi, edi, ebx);
__ CallRuntime(Runtime::kThrowNotDateError);
}
}
// static
-void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- esp[0] : return address
- // -- esp[4] : first argument (left-hand side)
- // -- esp[8] : receiver (right-hand side)
- // -----------------------------------
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ mov(InstanceOfDescriptor::LeftRegister(),
- Operand(ebp, 2 * kPointerSize)); // Load left-hand side.
- __ mov(InstanceOfDescriptor::RightRegister(),
- Operand(ebp, 3 * kPointerSize)); // Load right-hand side.
- InstanceOfStub stub(masm->isolate(), true);
- __ CallStub(&stub);
- }
-
- // Pop the argument and the receiver.
- __ ret(2 * kPointerSize);
-}
-
-// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
@@ -1056,7 +1353,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
}
}
-
// static
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// Stack Layout:
@@ -1103,7 +1399,6 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
-
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
@@ -1244,7 +1539,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
}
-
void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
@@ -1273,7 +1567,6 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-
void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
@@ -1303,11 +1596,12 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-
// static
void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
// ----------- S t a t e -------------
// -- eax : number of arguments
+ // -- edi : function
+ // -- esi : context
// -- esp[0] : return address
// -- esp[(argc - n) * 8] : arg[n] (zero-based)
// -- esp[(argc + 1) * 8] : receiver
@@ -1335,27 +1629,28 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
__ mov(ebx, Operand(esp, ecx, times_pointer_size, 0));
// Load the double value of the parameter into xmm1, maybe converting the
- // parameter to a number first using the ToNumberStub if necessary.
+ // parameter to a number first using the ToNumber builtin if necessary.
Label convert, convert_smi, convert_number, done_convert;
__ bind(&convert);
__ JumpIfSmi(ebx, &convert_smi);
__ JumpIfRoot(FieldOperand(ebx, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex, &convert_number);
{
- // Parameter is not a Number, use the ToNumberStub to convert it.
- FrameScope scope(masm, StackFrame::INTERNAL);
+ // Parameter is not a Number, use the ToNumber builtin to convert it.
+ FrameScope scope(masm, StackFrame::MANUAL);
__ SmiTag(eax);
__ SmiTag(ecx);
- __ Push(eax);
+ __ EnterBuiltinFrame(esi, edi, eax);
__ Push(ecx);
__ Push(edx);
__ mov(eax, ebx);
- ToNumberStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
__ mov(ebx, eax);
__ Pop(edx);
__ Pop(ecx);
- __ Pop(eax);
+ __ LeaveBuiltinFrame(esi, edi, eax);
+ __ SmiUntag(ecx);
+ __ SmiUntag(eax);
{
// Restore the double accumulator value (xmm0).
Label restore_smi, done_restore;
@@ -1368,8 +1663,6 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
__ SmiTag(edx);
__ bind(&done_restore);
}
- __ SmiUntag(ecx);
- __ SmiUntag(eax);
}
__ jmp(&convert);
__ bind(&convert_number);
@@ -1403,8 +1696,10 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
// Left and right hand side are equal, check for -0 vs. +0.
__ bind(&compare_equal);
+ __ Push(edi); // Preserve function in edi.
__ movmskpd(edi, reg);
__ test(edi, Immediate(1));
+ __ Pop(edi);
__ j(not_zero, &compare_swap);
__ bind(&done_compare);
@@ -1425,40 +1720,51 @@ void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : number of arguments
// -- edi : constructor function
+ // -- esi : context
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
- // 1. Load the first argument into eax and get rid of the rest (including the
- // receiver).
+ // 1. Load the first argument into ebx.
Label no_arguments;
{
__ test(eax, eax);
__ j(zero, &no_arguments, Label::kNear);
__ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
- __ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
- __ PushReturnAddressFrom(ecx);
- __ mov(eax, ebx);
}
// 2a. Convert the first argument to a number.
- ToNumberStub stub(masm->isolate());
- __ TailCallStub(&stub);
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ SmiTag(eax);
+ __ EnterBuiltinFrame(esi, edi, eax);
+ __ mov(eax, ebx);
+ __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+ __ LeaveBuiltinFrame(esi, edi, ebx); // Argc popped to ebx.
+ __ SmiUntag(ebx);
+ }
+
+ {
+ // Drop all arguments including the receiver.
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, ebx, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(ecx);
+ __ Ret();
+ }
// 2b. No arguments, return +0 (already in eax).
__ bind(&no_arguments);
__ ret(1 * kPointerSize);
}
-
// static
void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : number of arguments
// -- edi : constructor function
// -- edx : new target
+ // -- esi : context
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- esp[(argc + 1) * 4] : receiver
@@ -1467,8 +1773,11 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
// 1. Make sure we operate in the context of the called function.
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- // 2. Load the first argument into ebx and get rid of the rest (including the
- // receiver).
+ // Store argc in r8.
+ __ mov(ecx, eax);
+ __ SmiTag(ecx);
+
+ // 2. Load the first argument into ebx.
{
Label no_arguments, done;
__ test(eax, eax);
@@ -1478,9 +1787,6 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&no_arguments);
__ Move(ebx, Smi::FromInt(0));
__ bind(&done);
- __ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
- __ PushReturnAddressFrom(ecx);
}
// 3. Make sure ebx is a number.
@@ -1491,74 +1797,83 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
Heap::kHeapNumberMapRootIndex);
__ j(equal, &done_convert);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(edi);
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterBuiltinFrame(esi, edi, ecx);
__ Push(edx);
__ Move(eax, ebx);
- ToNumberStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
__ Move(ebx, eax);
__ Pop(edx);
- __ Pop(edi);
+ __ LeaveBuiltinFrame(esi, edi, ecx);
}
__ bind(&done_convert);
}
// 4. Check if new target and constructor differ.
- Label new_object;
+ Label drop_frame_and_ret, done_alloc, new_object;
__ cmp(edx, edi);
__ j(not_equal, &new_object);
// 5. Allocate a JSValue wrapper for the number.
- __ AllocateJSValue(eax, edi, ebx, ecx, &new_object);
- __ Ret();
+ __ AllocateJSValue(eax, edi, ebx, esi, &done_alloc);
+ __ jmp(&drop_frame_and_ret);
+
+ __ bind(&done_alloc);
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); // Restore esi.
// 6. Fallback to the runtime to create new object.
__ bind(&new_object);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterBuiltinFrame(esi, edi, ecx);
__ Push(ebx); // the first argument
FastNewObjectStub stub(masm->isolate());
__ CallStub(&stub);
__ Pop(FieldOperand(eax, JSValue::kValueOffset));
+ __ LeaveBuiltinFrame(esi, edi, ecx);
}
- __ Ret();
-}
+ __ bind(&drop_frame_and_ret);
+ {
+ // Drop all arguments including the receiver.
+ __ PopReturnAddressTo(esi);
+ __ SmiUntag(ecx);
+ __ lea(esp, Operand(esp, ecx, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(esi);
+ __ Ret();
+ }
+}
// static
void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : number of arguments
// -- edi : constructor function
+ // -- esi : context
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
- // 1. Load the first argument into eax and get rid of the rest (including the
- // receiver).
+ // 1. Load the first argument into eax.
Label no_arguments;
{
+ __ mov(ebx, eax); // Store argc in ebx.
__ test(eax, eax);
__ j(zero, &no_arguments, Label::kNear);
- __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
- __ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
- __ PushReturnAddressFrom(ecx);
- __ mov(eax, ebx);
+ __ mov(eax, Operand(esp, eax, times_pointer_size, 0));
}
// 2a. At least one argument, return eax if it's a string, otherwise
// dispatch to appropriate conversion.
- Label to_string, symbol_descriptive_string;
+ Label drop_frame_and_ret, to_string, symbol_descriptive_string;
{
__ JumpIfSmi(eax, &to_string, Label::kNear);
STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
__ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
__ j(above, &to_string, Label::kNear);
__ j(equal, &symbol_descriptive_string, Label::kNear);
- __ Ret();
+ __ jmp(&drop_frame_and_ret, Label::kNear);
}
// 2b. No arguments, return the empty string (and pop the receiver).
@@ -1571,20 +1886,35 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// 3a. Convert eax to a string.
__ bind(&to_string);
{
+ FrameScope scope(masm, StackFrame::MANUAL);
ToStringStub stub(masm->isolate());
- __ TailCallStub(&stub);
+ __ SmiTag(ebx);
+ __ EnterBuiltinFrame(esi, edi, ebx);
+ __ CallStub(&stub);
+ __ LeaveBuiltinFrame(esi, edi, ebx);
+ __ SmiUntag(ebx);
}
+ __ jmp(&drop_frame_and_ret, Label::kNear);
// 3b. Convert symbol in eax to a string.
__ bind(&symbol_descriptive_string);
{
__ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, ebx, times_pointer_size, kPointerSize));
__ Push(eax);
__ PushReturnAddressFrom(ecx);
__ TailCallRuntime(Runtime::kSymbolDescriptiveString);
}
-}
+ __ bind(&drop_frame_and_ret);
+ {
+ // Drop all arguments including the receiver.
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, ebx, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(ecx);
+ __ Ret();
+ }
+}
// static
void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
@@ -1592,6 +1922,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// -- eax : number of arguments
// -- edi : constructor function
// -- edx : new target
+ // -- esi : context
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- esp[(argc + 1) * 4] : receiver
@@ -1600,64 +1931,83 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// 1. Make sure we operate in the context of the called function.
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- // 2. Load the first argument into ebx and get rid of the rest (including the
- // receiver).
+ __ mov(ebx, eax);
+
+ // 2. Load the first argument into eax.
{
Label no_arguments, done;
- __ test(eax, eax);
+ __ test(ebx, ebx);
__ j(zero, &no_arguments, Label::kNear);
- __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
+ __ mov(eax, Operand(esp, ebx, times_pointer_size, 0));
__ jmp(&done, Label::kNear);
__ bind(&no_arguments);
- __ LoadRoot(ebx, Heap::kempty_stringRootIndex);
+ __ LoadRoot(eax, Heap::kempty_stringRootIndex);
__ bind(&done);
- __ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
- __ PushReturnAddressFrom(ecx);
}
- // 3. Make sure ebx is a string.
+ // 3. Make sure eax is a string.
{
Label convert, done_convert;
- __ JumpIfSmi(ebx, &convert, Label::kNear);
- __ CmpObjectType(ebx, FIRST_NONSTRING_TYPE, ecx);
+ __ JumpIfSmi(eax, &convert, Label::kNear);
+ __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
__ j(below, &done_convert);
__ bind(&convert);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::MANUAL);
ToStringStub stub(masm->isolate());
- __ Push(edi);
+ __ SmiTag(ebx);
+ __ EnterBuiltinFrame(esi, edi, ebx);
__ Push(edx);
- __ Move(eax, ebx);
__ CallStub(&stub);
- __ Move(ebx, eax);
__ Pop(edx);
- __ Pop(edi);
+ __ LeaveBuiltinFrame(esi, edi, ebx);
+ __ SmiUntag(ebx);
}
__ bind(&done_convert);
}
// 4. Check if new target and constructor differ.
- Label new_object;
+ Label drop_frame_and_ret, done_alloc, new_object;
__ cmp(edx, edi);
__ j(not_equal, &new_object);
// 5. Allocate a JSValue wrapper for the string.
- __ AllocateJSValue(eax, edi, ebx, ecx, &new_object);
- __ Ret();
+ // AllocateJSValue can't handle src == dst register. Reuse esi and restore it
+ // as needed after the call.
+ __ mov(esi, eax);
+ __ AllocateJSValue(eax, edi, esi, ecx, &done_alloc);
+ __ jmp(&drop_frame_and_ret);
+
+ __ bind(&done_alloc);
+ {
+ // Restore eax to the first argument and esi to the context.
+ __ mov(eax, esi);
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ }
// 6. Fallback to the runtime to create new object.
__ bind(&new_object);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(ebx); // the first argument
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ SmiTag(ebx);
+ __ EnterBuiltinFrame(esi, edi, ebx);
+ __ Push(eax); // the first argument
FastNewObjectStub stub(masm->isolate());
__ CallStub(&stub);
__ Pop(FieldOperand(eax, JSValue::kValueOffset));
+ __ LeaveBuiltinFrame(esi, edi, ebx);
+ __ SmiUntag(ebx);
}
- __ Ret();
-}
+ __ bind(&drop_frame_and_ret);
+ {
+ // Drop all arguments including the receiver.
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, ebx, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(ecx);
+ __ Ret();
+ }
+}
static void ArgumentsAdaptorStackCheck(MacroAssembler* masm,
Label* stack_overflow) {
@@ -1685,7 +2035,6 @@ static void ArgumentsAdaptorStackCheck(MacroAssembler* masm,
__ j(less_equal, stack_overflow); // Signed comparison.
}
-
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ push(ebp);
__ mov(ebp, esp);
@@ -1704,7 +2053,6 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ push(edi);
}
-
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Retrieve the number of arguments from the stack.
__ mov(ebx, Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
@@ -1719,7 +2067,6 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
__ push(ecx);
}
-
// static
void Builtins::Generate_Apply(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2011,8 +2358,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Push(eax);
__ Push(edi);
__ mov(eax, ecx);
+ __ Push(esi);
ToObjectStub stub(masm->isolate());
__ CallStub(&stub);
+ __ Pop(esi);
__ mov(ecx, eax);
__ Pop(edi);
__ Pop(eax);
@@ -2054,7 +2403,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
}
}
-
namespace {
void Generate_PushBoundArguments(MacroAssembler* masm) {
@@ -2141,7 +2489,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
} // namespace
-
// static
void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
TailCallMode tail_call_mode) {
@@ -2170,7 +2517,6 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
__ jmp(ecx);
}
-
// static
void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
TailCallMode tail_call_mode) {
@@ -2233,7 +2579,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
-
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2255,7 +2600,6 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
__ jmp(ecx);
}
-
// static
void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2285,7 +2629,6 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ jmp(ecx);
}
-
// static
void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2307,7 +2650,6 @@ void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
}
-
// static
void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2359,6 +2701,65 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
+// static
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- edx : requested object size (untagged)
+ // -- esp[0] : return address
+ // -----------------------------------
+ __ SmiTag(edx);
+ __ PopReturnAddressTo(ecx);
+ __ Push(edx);
+ __ PushReturnAddressFrom(ecx);
+ __ Move(esi, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAllocateInNewSpace);
+}
+
+// static
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- edx : requested object size (untagged)
+ // -- esp[0] : return address
+ // -----------------------------------
+ __ SmiTag(edx);
+ __ PopReturnAddressTo(ecx);
+ __ Push(edx);
+ __ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+ __ PushReturnAddressFrom(ecx);
+ __ Move(esi, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
+}
+
+// static
+void Builtins::Generate_Abort(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- edx : message_id as Smi
+ // -- esp[0] : return address
+ // -----------------------------------
+ __ PopReturnAddressTo(ecx);
+ __ Push(edx);
+ __ PushReturnAddressFrom(ecx);
+ __ Move(esi, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAbort);
+}
+
+// static
+void Builtins::Generate_ToNumber(MacroAssembler* masm) {
+ // The ToNumber stub takes one argument in eax.
+ Label not_smi;
+ __ JumpIfNotSmi(eax, &not_smi, Label::kNear);
+ __ Ret();
+ __ bind(&not_smi);
+
+ Label not_heap_number;
+ __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
+ __ j(not_equal, &not_heap_number, Label::kNear);
+ __ Ret();
+ __ bind(&not_heap_number);
+
+ __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
+ RelocInfo::CODE_TARGET);
+}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2467,7 +2868,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
}
-
static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
Register function_template_info,
Register scratch0, Register scratch1,
@@ -2531,7 +2931,6 @@ static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
__ bind(&receiver_check_passed);
}
-
void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : number of arguments (not including the receiver)
@@ -2575,10 +2974,16 @@ void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
}
}
-
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
+ bool has_handler_frame) {
// Lookup the function in the JavaScript frame.
- __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ if (has_handler_frame) {
+ __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(eax, Operand(eax, JavaScriptFrameConstants::kFunctionOffset));
+ } else {
+ __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
@@ -2587,19 +2992,26 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
}
Label skip;
- // If the code object is null, just return to the unoptimized code.
+ // If the code object is null, just return to the caller.
__ cmp(eax, Immediate(0));
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
__ bind(&skip);
+ // Drop any potential handler frame that is be sitting on top of the actual
+ // JavaScript frame. This is the case then OSR is triggered from bytecode.
+ if (has_handler_frame) {
+ __ leave();
+ }
+
// Load deoptimization data from the code object.
__ mov(ebx, Operand(eax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data.
__ mov(ebx, Operand(ebx, FixedArray::OffsetOfElementAt(
- DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
+ DeoptimizationInputData::kOsrPcOffsetIndex) -
+ kHeapObjectTag));
__ SmiUntag(ebx);
// Compute the target address = code_obj + header_size + osr_offset
@@ -2612,6 +3024,13 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ ret(0);
}
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+ Generate_OnStackReplacementHelper(masm, false);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+ Generate_OnStackReplacementHelper(masm, true);
+}
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/mips/OWNERS b/deps/v8/src/builtins/mips/OWNERS
new file mode 100644
index 0000000000..89455a4fbd
--- /dev/null
+++ b/deps/v8/src/builtins/mips/OWNERS
@@ -0,0 +1,6 @@
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index 9693a52697..003eeb22e0 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -10,17 +10,13 @@
#include "src/full-codegen/full-codegen.h"
#include "src/runtime/runtime.h"
-
namespace v8 {
namespace internal {
-
#define __ ACCESS_MASM(masm)
-
-void Builtins::Generate_Adaptor(MacroAssembler* masm,
- CFunctionId id,
- BuiltinExtraArguments extra_args) {
+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
+ ExitFrameType exit_frame_type) {
// ----------- S t a t e -------------
// -- a0 : number of arguments excluding receiver
// -- a1 : target
@@ -38,32 +34,19 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// ordinary functions).
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- // Insert extra arguments.
- int num_extra_args = 0;
- switch (extra_args) {
- case BuiltinExtraArguments::kTarget:
- __ Push(a1);
- ++num_extra_args;
- break;
- case BuiltinExtraArguments::kNewTarget:
- __ Push(a3);
- ++num_extra_args;
- break;
- case BuiltinExtraArguments::kTargetAndNewTarget:
- __ Push(a1, a3);
- num_extra_args += 2;
- break;
- case BuiltinExtraArguments::kNone:
- break;
- }
-
// JumpToExternalReference expects a0 to contain the number of arguments
// including the receiver and the extra arguments.
+ const int num_extra_args = 3;
__ Addu(a0, a0, num_extra_args + 1);
- __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
-}
+ // Insert extra arguments.
+ __ SmiTag(a0);
+ __ Push(a0, a1, a3);
+ __ SmiUntag(a0);
+ __ JumpToExternalReference(ExternalReference(address, masm->isolate()),
+ PROTECT, exit_frame_type == BUILTIN_EXIT);
+}
// Load the built-in InternalArray function from the current context.
static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
@@ -72,14 +55,12 @@ static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
__ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
}
-
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
// Load the Array function from the native context.
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
}
-
void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
@@ -95,11 +76,11 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// Initial map for the builtin InternalArray functions should be maps.
__ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
__ SmiTst(a2, t0);
- __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction,
- t0, Operand(zero_reg));
+ __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction, t0,
+ Operand(zero_reg));
__ GetObjectType(a2, a3, t0);
- __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction,
- t0, Operand(MAP_TYPE));
+ __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction, t0,
+ Operand(MAP_TYPE));
}
// Run the native code for the InternalArray function called as a normal
@@ -109,7 +90,6 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-
void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
@@ -125,11 +105,11 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Initial map for the builtin Array functions should be maps.
__ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
__ SmiTst(a2, t0);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction1,
- t0, Operand(zero_reg));
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction1, t0,
+ Operand(zero_reg));
__ GetObjectType(a2, a3, t0);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction2,
- t0, Operand(MAP_TYPE));
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction2, t0,
+ Operand(MAP_TYPE));
}
// Run the native code for the Array function called as a normal function.
@@ -140,66 +120,68 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-
// static
void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
// ----------- S t a t e -------------
- // -- a0 : number of arguments
- // -- ra : return address
- // -- sp[(argc - n) * 8] : arg[n] (zero-based)
- // -- sp[(argc + 1) * 8] : receiver
+ // -- a0 : number of arguments
+ // -- a1 : function
+ // -- cp : context
+ // -- ra : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+ // -- sp[argc * 4] : receiver
// -----------------------------------
Heap::RootListIndex const root_index =
(kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
: Heap::kMinusInfinityValueRootIndex;
// Load the accumulator with the default return value (either -Infinity or
- // +Infinity), with the tagged value in a1 and the double value in f0.
- __ LoadRoot(a1, root_index);
- __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
- __ Addu(a3, a0, Operand(1));
+ // +Infinity), with the tagged value in t2 and the double value in f0.
+ __ LoadRoot(t2, root_index);
+ __ ldc1(f0, FieldMemOperand(t2, HeapNumber::kValueOffset));
Label done_loop, loop;
+ __ mov(a3, a0);
__ bind(&loop);
{
// Check if all parameters done.
- __ Subu(a0, a0, Operand(1));
- __ Branch(&done_loop, lt, a0, Operand(zero_reg));
+ __ Subu(a3, a3, Operand(1));
+ __ Branch(&done_loop, lt, a3, Operand(zero_reg));
// Load the next parameter tagged value into a2.
- __ Lsa(at, sp, a0, kPointerSizeLog2);
+ __ Lsa(at, sp, a3, kPointerSizeLog2);
__ lw(a2, MemOperand(at));
// Load the double value of the parameter into f2, maybe converting the
- // parameter to a number first using the ToNumberStub if necessary.
+ // parameter to a number first using the ToNumber builtin if necessary.
Label convert, convert_smi, convert_number, done_convert;
__ bind(&convert);
__ JumpIfSmi(a2, &convert_smi);
__ lw(t0, FieldMemOperand(a2, HeapObject::kMapOffset));
__ JumpIfRoot(t0, Heap::kHeapNumberMapRootIndex, &convert_number);
{
- // Parameter is not a Number, use the ToNumberStub to convert it.
- FrameScope scope(masm, StackFrame::INTERNAL);
+ // Parameter is not a Number, use the ToNumber builtin to convert it.
+ FrameScope scope(masm, StackFrame::MANUAL);
__ SmiTag(a0);
__ SmiTag(a3);
- __ Push(a0, a1, a3);
+ __ EnterBuiltinFrame(cp, a1, a0);
+ __ Push(t2, a3);
__ mov(a0, a2);
- ToNumberStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
__ mov(a2, v0);
- __ Pop(a0, a1, a3);
+ __ Pop(t2, a3);
+ __ LeaveBuiltinFrame(cp, a1, a0);
+ __ SmiUntag(a3);
+ __ SmiUntag(a0);
{
// Restore the double accumulator value (f0).
Label restore_smi, done_restore;
- __ JumpIfSmi(a1, &restore_smi);
- __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
+ __ JumpIfSmi(t2, &restore_smi);
+ __ ldc1(f0, FieldMemOperand(t2, HeapNumber::kValueOffset));
__ jmp(&done_restore);
__ bind(&restore_smi);
- __ SmiToDoubleFPURegister(a1, f0, t0);
+ __ SmiToDoubleFPURegister(t2, f0, t0);
__ bind(&done_restore);
}
- __ SmiUntag(a3);
- __ SmiUntag(a0);
}
__ jmp(&convert);
__ bind(&convert_number);
@@ -227,20 +209,22 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
__ Branch(&set_value, ne, t1, Operand(t8));
__ jmp(&loop);
__ bind(&set_value);
- __ mov(a1, a2);
+ __ mov(t2, a2);
__ jmp(&loop);
// At least one side is NaN, which means that the result will be NaN too.
__ bind(&compare_nan);
- __ LoadRoot(a1, Heap::kNanValueRootIndex);
- __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
+ __ LoadRoot(t2, Heap::kNanValueRootIndex);
+ __ ldc1(f0, FieldMemOperand(t2, HeapNumber::kValueOffset));
__ jmp(&loop);
}
__ bind(&done_loop);
- __ Lsa(sp, sp, a3, kPointerSizeLog2);
+ // Drop all slots, including the receiver.
+ __ Addu(a0, a0, Operand(1));
+ __ Lsa(sp, sp, a0, kPointerSizeLog2);
__ Ret(USE_DELAY_SLOT);
- __ mov(v0, a1); // In delay slot.
+ __ mov(v0, t2); // In delay slot.
}
// static
@@ -248,25 +232,37 @@ void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
+ // -- cp : context
// -- ra : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
// -----------------------------------
- // 1. Load the first argument into a0 and get rid of the rest (including the
- // receiver).
+ // 1. Load the first argument into a0.
Label no_arguments;
{
__ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
- __ Subu(a0, a0, Operand(1));
- __ Lsa(sp, sp, a0, kPointerSizeLog2);
- __ lw(a0, MemOperand(sp));
- __ Drop(2);
+ __ Subu(t1, a0, Operand(1)); // In delay slot.
+ __ mov(t0, a0); // Store argc in t0.
+ __ Lsa(at, sp, t1, kPointerSizeLog2);
+ __ lw(a0, MemOperand(at));
}
// 2a. Convert first argument to number.
- ToNumberStub stub(masm->isolate());
- __ TailCallStub(&stub);
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ SmiTag(t0);
+ __ EnterBuiltinFrame(cp, a1, t0);
+ __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+ __ LeaveBuiltinFrame(cp, a1, t0);
+ __ SmiUntag(t0);
+ }
+
+ {
+ // Drop all arguments including the receiver.
+ __ Lsa(sp, sp, t0, kPointerSizeLog2);
+ __ DropAndRet(1);
+ }
// 2b. No arguments, return +0.
__ bind(&no_arguments);
@@ -274,13 +270,13 @@ void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
__ DropAndRet(1);
}
-
// static
void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
// -- a3 : new target
+ // -- cp : context
// -- ra : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
@@ -289,19 +285,17 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
// 1. Make sure we operate in the context of the called function.
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- // 2. Load the first argument into a0 and get rid of the rest (including the
- // receiver).
+ // 2. Load the first argument into a0.
{
Label no_arguments, done;
+ __ mov(t0, a0); // Store argc in t0.
__ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
- __ Subu(a0, a0, Operand(1));
- __ Lsa(sp, sp, a0, kPointerSizeLog2);
- __ lw(a0, MemOperand(sp));
- __ Drop(2);
+ __ Subu(t1, a0, Operand(1)); // In delay slot.
+ __ Lsa(at, sp, t1, kPointerSizeLog2);
+ __ lw(a0, MemOperand(at));
__ jmp(&done);
__ bind(&no_arguments);
__ Move(a0, Smi::FromInt(0));
- __ Drop(1);
__ bind(&done);
}
@@ -312,71 +306,82 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ GetObjectType(a0, a2, a2);
__ Branch(&done_convert, eq, a2, Operand(HEAP_NUMBER_TYPE));
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a1, a3);
- ToNumberStub stub(masm->isolate());
- __ CallStub(&stub);
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ SmiTag(t0);
+ __ EnterBuiltinFrame(cp, a1, t0);
+ __ Push(a3);
+ __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
__ Move(a0, v0);
- __ Pop(a1, a3);
+ __ Pop(a3);
+ __ LeaveBuiltinFrame(cp, a1, t0);
+ __ SmiUntag(t0);
}
__ bind(&done_convert);
}
// 4. Check if new target and constructor differ.
- Label new_object;
+ Label drop_frame_and_ret, new_object;
__ Branch(&new_object, ne, a1, Operand(a3));
// 5. Allocate a JSValue wrapper for the number.
- __ AllocateJSValue(v0, a1, a0, a2, t0, &new_object);
- __ Ret();
+ __ AllocateJSValue(v0, a1, a0, a2, t1, &new_object);
+ __ jmp(&drop_frame_and_ret);
// 6. Fallback to the runtime to create new object.
__ bind(&new_object);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a0); // first argument
+ FrameScope scope(masm, StackFrame::MANUAL);
FastNewObjectStub stub(masm->isolate());
+ __ SmiTag(t0);
+ __ EnterBuiltinFrame(cp, a1, t0);
+ __ Push(a0); // first argument
__ CallStub(&stub);
__ Pop(a0);
+ __ LeaveBuiltinFrame(cp, a1, t0);
+ __ SmiUntag(t0);
}
- __ Ret(USE_DELAY_SLOT);
- __ sw(a0, FieldMemOperand(v0, JSValue::kValueOffset)); // In delay slot
-}
+ __ sw(a0, FieldMemOperand(v0, JSValue::kValueOffset));
+ __ bind(&drop_frame_and_ret);
+ {
+ __ Lsa(sp, sp, t0, kPointerSizeLog2);
+ __ DropAndRet(1);
+ }
+}
// static
void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
+ // -- cp : context
// -- ra : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
// -----------------------------------
- // 1. Load the first argument into a0 and get rid of the rest (including the
- // receiver).
+ // 1. Load the first argument into a0.
Label no_arguments;
{
__ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
- __ Subu(a0, a0, Operand(1));
- __ Lsa(sp, sp, a0, kPointerSizeLog2);
- __ lw(a0, MemOperand(sp));
- __ Drop(2);
+ __ Subu(t1, a0, Operand(1));
+ __ mov(t0, a0); // Store argc in t0.
+ __ Lsa(at, sp, t1, kPointerSizeLog2);
+ __ lw(a0, MemOperand(at));
}
// 2a. At least one argument, return a0 if it's a string, otherwise
// dispatch to appropriate conversion.
- Label to_string, symbol_descriptive_string;
+ Label drop_frame_and_ret, to_string, symbol_descriptive_string;
{
__ JumpIfSmi(a0, &to_string);
- __ GetObjectType(a0, a1, a1);
+ __ GetObjectType(a0, t1, t1);
STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
- __ Subu(a1, a1, Operand(FIRST_NONSTRING_TYPE));
- __ Branch(&symbol_descriptive_string, eq, a1, Operand(zero_reg));
- __ Branch(&to_string, gt, a1, Operand(zero_reg));
- __ Ret(USE_DELAY_SLOT);
+ __ Subu(t1, t1, Operand(FIRST_NONSTRING_TYPE));
+ __ Branch(&symbol_descriptive_string, eq, t1, Operand(zero_reg));
+ __ Branch(&to_string, gt, t1, Operand(zero_reg));
__ mov(v0, a0);
+ __ jmp(&drop_frame_and_ret);
}
// 2b. No arguments, return the empty string (and pop the receiver).
@@ -389,18 +394,31 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// 3a. Convert a0 to a string.
__ bind(&to_string);
{
+ FrameScope scope(masm, StackFrame::MANUAL);
ToStringStub stub(masm->isolate());
- __ TailCallStub(&stub);
+ __ SmiTag(t0);
+ __ EnterBuiltinFrame(cp, a1, t0);
+ __ CallStub(&stub);
+ __ LeaveBuiltinFrame(cp, a1, t0);
+ __ SmiUntag(t0);
}
+ __ jmp(&drop_frame_and_ret);
// 3b. Convert symbol in a0 to a string.
__ bind(&symbol_descriptive_string);
{
+ __ Lsa(sp, sp, t0, kPointerSizeLog2);
+ __ Drop(1);
__ Push(a0);
__ TailCallRuntime(Runtime::kSymbolDescriptiveString);
}
-}
+ __ bind(&drop_frame_and_ret);
+ {
+ __ Lsa(sp, sp, t0, kPointerSizeLog2);
+ __ DropAndRet(1);
+ }
+}
// static
void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
@@ -408,6 +426,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// -- a0 : number of arguments
// -- a1 : constructor function
// -- a3 : new target
+ // -- cp : context
// -- ra : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
@@ -416,19 +435,17 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// 1. Make sure we operate in the context of the called function.
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- // 2. Load the first argument into a0 and get rid of the rest (including the
- // receiver).
+ // 2. Load the first argument into a0.
{
Label no_arguments, done;
+ __ mov(t0, a0); // Store argc in t0.
__ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
- __ Subu(a0, a0, Operand(1));
- __ Lsa(sp, sp, a0, kPointerSizeLog2);
- __ lw(a0, MemOperand(sp));
- __ Drop(2);
+ __ Subu(t1, a0, Operand(1));
+ __ Lsa(at, sp, t1, kPointerSizeLog2);
+ __ lw(a0, MemOperand(at));
__ jmp(&done);
__ bind(&no_arguments);
__ LoadRoot(a0, Heap::kempty_stringRootIndex);
- __ Drop(1);
__ bind(&done);
}
@@ -437,39 +454,52 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
Label convert, done_convert;
__ JumpIfSmi(a0, &convert);
__ GetObjectType(a0, a2, a2);
- __ And(t0, a2, Operand(kIsNotStringMask));
- __ Branch(&done_convert, eq, t0, Operand(zero_reg));
+ __ And(t1, a2, Operand(kIsNotStringMask));
+ __ Branch(&done_convert, eq, t1, Operand(zero_reg));
__ bind(&convert);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::MANUAL);
ToStringStub stub(masm->isolate());
- __ Push(a1, a3);
+ __ SmiTag(t0);
+ __ EnterBuiltinFrame(cp, a1, t0);
+ __ Push(a3);
__ CallStub(&stub);
__ Move(a0, v0);
- __ Pop(a1, a3);
+ __ Pop(a3);
+ __ LeaveBuiltinFrame(cp, a1, t0);
+ __ SmiUntag(t0);
}
__ bind(&done_convert);
}
// 4. Check if new target and constructor differ.
- Label new_object;
+ Label drop_frame_and_ret, new_object;
__ Branch(&new_object, ne, a1, Operand(a3));
// 5. Allocate a JSValue wrapper for the string.
- __ AllocateJSValue(v0, a1, a0, a2, t0, &new_object);
- __ Ret();
+ __ AllocateJSValue(v0, a1, a0, a2, t1, &new_object);
+ __ jmp(&drop_frame_and_ret);
// 6. Fallback to the runtime to create new object.
__ bind(&new_object);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a0); // first argument
+ FrameScope scope(masm, StackFrame::MANUAL);
FastNewObjectStub stub(masm->isolate());
+ __ SmiTag(t0);
+ __ EnterBuiltinFrame(cp, a1, t0);
+ __ Push(a0); // first argument
__ CallStub(&stub);
__ Pop(a0);
+ __ LeaveBuiltinFrame(cp, a1, t0);
+ __ SmiUntag(t0);
+ }
+ __ sw(a0, FieldMemOperand(v0, JSValue::kValueOffset));
+
+ __ bind(&drop_frame_and_ret);
+ {
+ __ Lsa(sp, sp, t0, kPointerSizeLog2);
+ __ DropAndRet(1);
}
- __ Ret(USE_DELAY_SLOT);
- __ sw(a0, FieldMemOperand(v0, JSValue::kValueOffset)); // In delay slot
}
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
@@ -504,7 +534,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Jump(at);
}
-
void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
@@ -521,7 +550,6 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
GenerateTailCallToSharedCode(masm);
}
-
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool create_implicit_receiver,
@@ -603,16 +631,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// a0: number of arguments
// a1: constructor function
// a3: new target
- if (is_api_function) {
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- __ Call(code, RelocInfo::CODE_TARGET);
- } else {
- ParameterCount actual(a0);
- __ InvokeFunction(a1, a3, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
- }
+ ParameterCount actual(a0);
+ __ InvokeFunction(a1, a3, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
// Store offset of return address for deoptimizer.
if (create_implicit_receiver && !is_api_function) {
@@ -679,38 +700,31 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Ret();
}
-
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true, false);
}
-
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, true, false, false);
}
-
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, false, false);
}
-
void Builtins::Generate_JSBuiltinsConstructStubForDerived(
MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, false, true);
}
-
void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(a1);
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
-
enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
-
// Clobbers a2; preserves all other registers.
static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
IsTagged argc_is_tagged) {
@@ -738,7 +752,6 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
__ bind(&okay);
}
-
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Called from JSEntryStub::GenerateBody
@@ -778,13 +791,13 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
Label loop, entry;
__ Lsa(t2, s0, a3, kPointerSizeLog2);
__ b(&entry);
- __ nop(); // Branch delay slot nop.
+ __ nop(); // Branch delay slot nop.
// t2 points past last arg.
__ bind(&loop);
__ lw(t0, MemOperand(s0)); // Read next parameter.
__ addiu(s0, s0, kPointerSize);
__ lw(t0, MemOperand(t0)); // Dereference handle.
- __ push(t0); // Push parameter.
+ __ push(t0); // Push parameter.
__ bind(&entry);
__ Branch(&loop, ne, s0, Operand(t2));
@@ -815,16 +828,185 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Jump(ra);
}
-
void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, false);
}
-
void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- v0 : the value to pass to the generator
+ // -- a1 : the JSGeneratorObject to resume
+ // -- a2 : the resume mode (tagged)
+ // -- ra : return address
+ // -----------------------------------
+ __ AssertGeneratorObject(a1);
+
+ // Store input value into generator object.
+ __ sw(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
+ __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, v0, a3,
+ kRAHasNotBeenSaved, kDontSaveFPRegs);
+
+ // Store resume mode into generator object.
+ __ sw(a2, FieldMemOperand(a1, JSGeneratorObject::kResumeModeOffset));
+
+ // Load suspended function and context.
+ __ lw(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset));
+ __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+
+ // Flood function if we are stepping.
+ Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+ Label stepping_prepared;
+ ExternalReference last_step_action =
+ ExternalReference::debug_last_step_action_address(masm->isolate());
+ STATIC_ASSERT(StepFrame > StepIn);
+ __ li(t1, Operand(last_step_action));
+ __ lb(t1, MemOperand(t1));
+ __ Branch(&prepare_step_in_if_stepping, ge, t1, Operand(StepIn));
+
+ // Flood function if we need to continue stepping in the suspended generator.
+ ExternalReference debug_suspended_generator =
+ ExternalReference::debug_suspended_generator_address(masm->isolate());
+ __ li(t1, Operand(debug_suspended_generator));
+ __ lw(t1, MemOperand(t1));
+ __ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(t1));
+ __ bind(&stepping_prepared);
+
+ // Push receiver.
+ __ lw(t1, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
+ __ Push(t1);
+
+ // ----------- S t a t e -------------
+ // -- a1 : the JSGeneratorObject to resume
+ // -- a2 : the resume mode (tagged)
+ // -- t0 : generator function
+ // -- cp : generator context
+ // -- ra : return address
+ // -- sp[0] : generator receiver
+ // -----------------------------------
+
+ // Push holes for arguments to generator function. Since the parser forced
+ // context allocation for any variables in generators, the actual argument
+ // values have already been copied into the context and these dummy values
+ // will never be used.
+ __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a3,
+ FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
+ {
+ Label done_loop, loop;
+ __ bind(&loop);
+ __ Subu(a3, a3, Operand(Smi::FromInt(1)));
+ __ Branch(&done_loop, lt, a3, Operand(zero_reg));
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Dispatch on the kind of generator object.
+ Label old_generator;
+ __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
+ __ GetObjectType(a3, a3, a3);
+ __ Branch(&old_generator, ne, a3, Operand(BYTECODE_ARRAY_TYPE));
+
+ // New-style (ignition/turbofan) generator object.
+ {
+ __ lw(a0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a0,
+ FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ SmiUntag(a0);
+ // We abuse new.target both to indicate that this is a resume call and to
+ // pass in the generator object. In ordinary calls, new.target is always
+ // undefined because generator functions are non-constructable.
+ __ Move(a3, a1);
+ __ Move(a1, t0);
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ Jump(a2);
+ }
+
+ // Old-style (full-codegen) generator object
+ __ bind(&old_generator);
+ {
+ // Enter a new JavaScript frame, and initialize its slots as they were when
+ // the generator was suspended.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ Push(ra, fp);
+ __ Move(fp, sp);
+ __ Push(cp, t0);
+
+ // Restore the operand stack.
+ __ lw(a0, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
+ __ lw(a3, FieldMemOperand(a0, FixedArray::kLengthOffset));
+ __ Addu(a0, a0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Lsa(a3, a0, a3, kPointerSizeLog2 - 1);
+ {
+ Label done_loop, loop;
+ __ bind(&loop);
+ __ Branch(&done_loop, eq, a0, Operand(a3));
+ __ lw(t1, MemOperand(a0));
+ __ Push(t1);
+ __ Branch(USE_DELAY_SLOT, &loop);
+ __ addiu(a0, a0, kPointerSize); // In delay slot.
+ __ bind(&done_loop);
+ }
+
+ // Reset operand stack so we don't leak.
+ __ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex);
+ __ sw(t1, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
+
+ // Resume the generator function at the continuation.
+ __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
+ __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ lw(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
+ __ SmiUntag(a2);
+ __ Addu(a3, a3, Operand(a2));
+ __ li(a2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+ __ sw(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
+ __ Move(v0, a1); // Continuation expects generator object in v0.
+ __ Jump(a3);
+ }
+
+ __ bind(&prepare_step_in_if_stepping);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1, a2, t0);
+ __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ Pop(a1, a2);
+ }
+ __ Branch(USE_DELAY_SLOT, &stepping_prepared);
+ __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+
+ __ bind(&prepare_step_in_suspended_generator);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1, a2);
+ __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+ __ Pop(a1, a2);
+ }
+ __ Branch(USE_DELAY_SLOT, &stepping_prepared);
+ __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+}
+
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
+ Register args_count = scratch;
+
+ // Get the arguments + receiver count.
+ __ lw(args_count,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ lw(args_count,
+ FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
+
+ // Leave the frame (also dropping the register file).
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+
+ // Drop receiver + arguments.
+ __ Addu(sp, sp, args_count);
+}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
@@ -842,14 +1024,16 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
// The function builds an interpreter frame. See InterpreterFrameConstants in
// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(a1);
- // Get the bytecode array from the function object and load the pointer to the
- // first entry into kInterpreterBytecodeRegister.
+ // Get the bytecode array from the function object (or from the DebugInfo if
+ // it is present) and load it into kInterpreterBytecodeArrayRegister.
__ lw(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
Label load_debug_bytecode_array, bytecode_array_loaded;
Register debug_info = kInterpreterBytecodeArrayRegister;
@@ -861,8 +1045,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
__ bind(&bytecode_array_loaded);
+ // Check whether we should continue to use the interpreter.
+ Label switch_to_different_code_kind;
+ __ lw(a0, FieldMemOperand(a0, SharedFunctionInfo::kCodeOffset));
+ __ Branch(&switch_to_different_code_kind, ne, a0,
+ Operand(masm->CodeObject())); // Self-reference to this code.
+
+ // Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
- // Check function data field is actually a BytecodeArray object.
__ SmiTst(kInterpreterBytecodeArrayRegister, t0);
__ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, t0,
Operand(zero_reg));
@@ -871,8 +1061,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(BYTECODE_ARRAY_TYPE));
}
- // Push new.target, bytecode array and zero for bytecode array offset.
- __ Push(a3, kInterpreterBytecodeArrayRegister, zero_reg);
+ // Load initial bytecode offset.
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+ // Push new.target, bytecode array and Smi tagged bytecode array offset.
+ __ SmiTag(t0, kInterpreterBytecodeOffsetRegister);
+ __ Push(a3, kInterpreterBytecodeArrayRegister, t0);
// Allocate the local and temporary register file on the stack.
{
@@ -902,17 +1097,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Branch(&loop_header, ge, t0, Operand(zero_reg));
}
- // TODO(rmcilroy): List of things not currently dealt with here but done in
- // fullcodegen's prologue:
- // - Call ProfileEntryHookStub when isolate has a function_entry_hook.
- // - Code aging of the BytecodeArray object.
-
- // Load bytecode offset and dispatch table into registers.
+ // Load accumulator and dispatch table into registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
- __ Addu(kInterpreterRegisterFileRegister, fp,
- Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
- __ li(kInterpreterBytecodeOffsetRegister,
- Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ li(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
@@ -923,45 +1109,61 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ lbu(a0, MemOperand(a0));
__ Lsa(at, kInterpreterDispatchTableRegister, a0, kPointerSizeLog2);
__ lw(at, MemOperand(at));
- // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
- // and header removal.
- __ Addu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(at);
+ masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
- // Even though the first bytecode handler was called, we will never return.
- __ Abort(kUnexpectedReturnFromBytecodeHandler);
+ // The return value is in v0.
+ LeaveInterpreterFrame(masm, t0);
+ __ Jump(ra);
// Load debug copy of the bytecode array.
__ bind(&load_debug_bytecode_array);
__ lw(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex));
+ FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
__ Branch(&bytecode_array_loaded);
+
+ // If the shared code is no longer this entry trampoline, then the underlying
+ // function has been switched to a different kind of code and we heal the
+ // closure by switching the code entry field over to the new code as well.
+ __ bind(&switch_to_different_code_kind);
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kCodeOffset));
+ __ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ sw(t0, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(a1, t0, t1);
+ __ Jump(t0);
}
+void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
+ // Save the function and context for call to CompileBaseline.
+ __ lw(a1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ lw(kContextRegister,
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
-void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
- // TODO(rmcilroy): List of things not currently dealt with here but done in
- // fullcodegen's EmitReturnSequence.
- // - Supporting FLAG_trace for Runtime::TraceExit.
- // - Support profiler (specifically decrementing profiling_counter
- // appropriately and calling out to HandleInterrupts if necessary).
+ // Leave the frame before recompiling for baseline so that we don't count as
+ // an activation on the stack.
+ LeaveInterpreterFrame(masm, t0);
- // The return value is in accumulator, which is already in v0.
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ // Push return value.
+ __ push(v0);
- // Leave the frame (also dropping the register file).
- __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ // Push function as argument and compile for baseline.
+ __ push(a1);
+ __ CallRuntime(Runtime::kCompileBaseline);
- // Drop receiver + arguments and return.
- __ lw(at, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kParameterSizeOffset));
- __ Addu(sp, sp, at);
+ // Restore return value.
+ __ pop(v0);
+ }
__ Jump(ra);
}
-
// static
void Builtins::Generate_InterpreterPushArgsAndCallImpl(
- MacroAssembler* masm, TailCallMode tail_call_mode) {
+ MacroAssembler* masm, TailCallMode tail_call_mode,
+ CallableType function_type) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a2 : the address of the first argument to be pushed. Subsequent
@@ -986,12 +1188,18 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
__ Branch(&loop_header, gt, a2, Operand(a3));
// Call the target.
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- tail_call_mode),
- RelocInfo::CODE_TARGET);
+ if (function_type == CallableType::kJSFunction) {
+ __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
+ tail_call_mode),
+ RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK_EQ(function_type, CallableType::kAny);
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ tail_call_mode),
+ RelocInfo::CODE_TARGET);
+ }
}
-
// static
void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -1022,25 +1230,24 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
-
-static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
- // Initialize register file register and dispatch table register.
- __ Addu(kInterpreterRegisterFileRegister, fp,
- Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ // Set the return address to the correct point in the interpreter entry
+ // trampoline.
+ Smi* interpreter_entry_return_pc_offset(
+ masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+ __ li(t0, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
+ __ Addu(ra, t0, Operand(interpreter_entry_return_pc_offset->value() +
+ Code::kHeaderSize - kHeapObjectTag));
+
+ // Initialize the dispatch table register.
__ li(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
- // Get the context from the frame.
- __ lw(kContextRegister,
- MemOperand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kContextFromRegisterPointer));
-
// Get the bytecode array pointer from the frame.
- __ lw(
- kInterpreterBytecodeArrayRegister,
- MemOperand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
+ __ lw(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -1054,9 +1261,7 @@ static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
// Get the target bytecode offset from the frame.
__ lw(kInterpreterBytecodeOffsetRegister,
- MemOperand(
- kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
@@ -1065,74 +1270,234 @@ static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
__ lbu(a1, MemOperand(a1));
__ Lsa(a1, kInterpreterDispatchTableRegister, a1, kPointerSizeLog2);
__ lw(a1, MemOperand(a1));
- __ Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(a1);
}
-
-static void Generate_InterpreterNotifyDeoptimizedHelper(
- MacroAssembler* masm, Deoptimizer::BailoutType type) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Pass the deoptimization type to the runtime system.
- __ li(a1, Operand(Smi::FromInt(static_cast<int>(type))));
- __ push(a1);
- __ CallRuntime(Runtime::kNotifyDeoptimized);
- // Tear down internal frame.
- }
-
- // Drop state (we don't use these for interpreter deopts) and and pop the
- // accumulator value into the accumulator register.
- __ Drop(1);
- __ Pop(kInterpreterAccumulatorRegister);
-
- // Enter the bytecode dispatch.
- Generate_EnterBytecodeDispatch(masm);
-}
-
-
-void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-
-void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
-}
-
-
-void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
- // Set the address of the interpreter entry trampoline as a return address.
- // This simulates the initial call to bytecode handlers in interpreter entry
- // trampoline. The return will never actually be taken, but our stack walker
- // uses this address to determine whether a frame is interpreted.
- __ li(ra, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
-
- Generate_EnterBytecodeDispatch(masm);
-}
-
-
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argument count (preserved for callee)
+ // -- a3 : new target (preserved for callee)
+ // -- a1 : target function (preserved for callee)
+ // -----------------------------------
+ // First lookup code, maybe we don't need to compile!
+ Label gotta_call_runtime, gotta_call_runtime_no_stack;
+ Label maybe_call_runtime;
+ Label try_shared;
+ Label loop_top, loop_bottom;
+
+ Register argument_count = a0;
+ Register closure = a1;
+ Register new_target = a3;
+ __ push(argument_count);
+ __ push(new_target);
+ __ push(closure);
+
+ Register map = a0;
+ Register index = a2;
+ __ lw(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(map, FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+ __ lw(index, FieldMemOperand(map, FixedArray::kLengthOffset));
+ __ Branch(&gotta_call_runtime, lt, index, Operand(Smi::FromInt(2)));
+
+ // Find literals.
+ // a3 : native context
+ // a2 : length / index
+ // a0 : optimized code map
+ // stack[0] : new target
+ // stack[4] : closure
+ Register native_context = a3;
+ __ lw(native_context, NativeContextMemOperand());
+
+ __ bind(&loop_top);
+ Register temp = a1;
+ Register array_pointer = t1;
+
+ // Does the native context match?
+ __ sll(at, index, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(array_pointer, map, Operand(at));
+ __ lw(temp, FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousContext));
+ __ lw(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+ __ Branch(&loop_bottom, ne, temp, Operand(native_context));
+ // OSR id set to none?
+ __ lw(temp, FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+ const int bailout_id = BailoutId::None().ToInt();
+ __ Branch(&loop_bottom, ne, temp, Operand(Smi::FromInt(bailout_id)));
+ // Literals available?
+ __ lw(temp, FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousLiterals));
+ __ lw(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+ __ JumpIfSmi(temp, &gotta_call_runtime);
+
+ // Save the literals in the closure.
+ __ lw(t0, MemOperand(sp, 0));
+ __ sw(temp, FieldMemOperand(t0, JSFunction::kLiteralsOffset));
+ __ push(index);
+ __ RecordWriteField(t0, JSFunction::kLiteralsOffset, temp, index,
+ kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ pop(index);
+
+ // Code available?
+ Register entry = t0;
+ __ lw(entry,
+ FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousCachedCode));
+ __ lw(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(entry, &maybe_call_runtime);
+
+ // Found literals and code. Get them into the closure and return.
+ __ pop(closure);
+ // Store code entry in the closure.
+ __ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ Label install_optimized_code_and_tailcall;
+ __ bind(&install_optimized_code_and_tailcall);
+ __ sw(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(closure, entry, t1);
+
+ // Link the closure into the optimized function list.
+ // t0 : code entry
+ // a3 : native context
+ // a1 : closure
+ __ lw(t1,
+ ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ __ sw(t1, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
+ __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, t1, a0,
+ kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ const int function_list_offset =
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+ __ sw(closure,
+ ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ // Save closure before the write barrier.
+ __ mov(t1, closure);
+ __ RecordWriteContextSlot(native_context, function_list_offset, closure, a0,
+ kRAHasNotBeenSaved, kDontSaveFPRegs);
+ __ mov(closure, t1);
+ __ pop(new_target);
+ __ pop(argument_count);
+ __ Jump(entry);
+
+ __ bind(&loop_bottom);
+ __ Subu(index, index,
+ Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+ __ Branch(&loop_top, gt, index, Operand(Smi::FromInt(1)));
+
+ // We found neither literals nor code.
+ __ jmp(&gotta_call_runtime);
+
+ __ bind(&maybe_call_runtime);
+ __ pop(closure);
+
+ // Last possibility. Check the context free optimized code map entry.
+ __ lw(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
+ SharedFunctionInfo::kSharedCodeIndex));
+ __ lw(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(entry, &try_shared);
+
+ // Store code entry in the closure.
+ __ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(&install_optimized_code_and_tailcall);
+
+ __ bind(&try_shared);
+ __ pop(new_target);
+ __ pop(argument_count);
+ // Is the full code valid?
+ __ lw(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+ __ lw(t1, FieldMemOperand(entry, Code::kFlagsOffset));
+ __ And(t1, t1, Operand(Code::KindField::kMask));
+ __ srl(t1, t1, Code::KindField::kShift);
+ __ Branch(&gotta_call_runtime_no_stack, eq, t1, Operand(Code::BUILTIN));
+ // Yes, install the full code.
+ __ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ sw(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(closure, entry, t1);
+ __ Jump(entry);
+
+ __ bind(&gotta_call_runtime);
+ __ pop(closure);
+ __ pop(new_target);
+ __ pop(argument_count);
+ __ bind(&gotta_call_runtime_no_stack);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
+}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm,
Runtime::kCompileOptimized_NotConcurrent);
}
-
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
+void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argument count (preserved for callee)
+ // -- a1 : new target (preserved for callee)
+ // -- a3 : target function (preserved for callee)
+ // -----------------------------------
+ Label failed;
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Preserve argument count for later compare.
+ __ Move(t4, a0);
+ // Push a copy of the target function and the new target.
+ // Push function as parameter to the runtime call.
+ __ SmiTag(a0);
+ __ Push(a0, a1, a3, a1);
+
+ // Copy arguments from caller (stdlib, foreign, heap).
+ Label args_done;
+ for (int j = 0; j < 4; ++j) {
+ Label over;
+ if (j < 3) {
+ __ Branch(&over, ne, t4, Operand(j));
+ }
+ for (int i = j - 1; i >= 0; --i) {
+ __ lw(t4, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
+ i * kPointerSize));
+ __ push(t4);
+ }
+ for (int i = 0; i < 3 - j; ++i) {
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ }
+ if (j < 3) {
+ __ jmp(&args_done);
+ __ bind(&over);
+ }
+ }
+ __ bind(&args_done);
+
+ // Call runtime, on success unwind frame, and parent frame.
+ __ CallRuntime(Runtime::kInstantiateAsmJs, 4);
+ // A smi 0 is returned on failure, an object on success.
+ __ JumpIfSmi(v0, &failed);
+
+ __ Drop(2);
+ __ pop(t4);
+ __ SmiUntag(t4);
+ scope.GenerateLeaveFrame();
+
+ __ Addu(t4, t4, Operand(1));
+ __ Lsa(sp, sp, t4, kPointerSizeLog2);
+ __ Ret();
+
+ __ bind(&failed);
+ // Restore target function and new target.
+ __ Pop(a0, a1, a3);
+ __ SmiUntag(a0);
+ }
+ // On failure, tail call back to regular js.
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+}
static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// For now, we are relying on the fact that make_code_young doesn't do any
@@ -1142,8 +1507,7 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// crawls in MakeCodeYoung. This seems a bit fragile.
// Set a0 to point to the head of the PlatformCodeAge sequence.
- __ Subu(a0, a0,
- Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize));
+ __ Subu(a0, a0, Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize));
// The following registers must be saved and restored when calling through to
// the runtime:
@@ -1162,19 +1526,18 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
__ Jump(a0);
}
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
-void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-} \
-void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-}
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+ void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+ } \
+ void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+ }
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
-
void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
// that make_code_young doesn't do any garbage collection which allows us to
@@ -1182,8 +1545,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// pointers.
// Set a0 to point to the head of the PlatformCodeAge sequence.
- __ Subu(a0, a0,
- Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize));
+ __ Subu(a0, a0, Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize));
// The following registers must be saved and restored when calling through to
// the runtime:
@@ -1209,17 +1571,14 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
__ Jump(a0);
}
-
void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
GenerateMakeCodeYoungAgainCommon(masm);
}
-
void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
-
static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
SaveFPRegsMode save_doubles) {
{
@@ -1235,20 +1594,17 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
}
__ Addu(sp, sp, Operand(kPointerSize)); // Ignore state
- __ Jump(ra); // Jump to miss handler
+ __ Jump(ra); // Jump to miss handler
}
-
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
}
-
void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
}
-
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
@@ -1264,15 +1620,17 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
__ SmiUntag(t2);
// Switch on the state.
Label with_tos_register, unknown_state;
- __ Branch(&with_tos_register,
- ne, t2, Operand(FullCodeGenerator::NO_REGISTERS));
+ __ Branch(&with_tos_register, ne, t2,
+ Operand(static_cast<int>(Deoptimizer::BailoutState::NO_REGISTERS)));
__ Ret(USE_DELAY_SLOT);
// Safe to fill delay slot Addu will emit one instruction.
__ Addu(sp, sp, Operand(1 * kPointerSize)); // Remove state.
__ bind(&with_tos_register);
+ DCHECK_EQ(kInterpreterAccumulatorRegister.code(), v0.code());
__ lw(v0, MemOperand(sp, 1 * kPointerSize));
- __ Branch(&unknown_state, ne, t2, Operand(FullCodeGenerator::TOS_REG));
+ __ Branch(&unknown_state, ne, t2,
+ Operand(static_cast<int>(Deoptimizer::BailoutState::TOS_REGISTER)));
__ Ret(USE_DELAY_SLOT);
// Safe to fill delay slot Addu will emit one instruction.
@@ -1282,22 +1640,18 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
__ stop("no cases left");
}
-
void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
-
void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
}
-
void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
-
// Clobbers {t2, t3, t4, t5}.
static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
Register function_template_info,
@@ -1360,7 +1714,6 @@ static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
__ bind(&receiver_check_passed);
}
-
void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments excluding receiver
@@ -1397,10 +1750,16 @@ void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kThrowIllegalInvocation);
}
-
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
+ bool has_handler_frame) {
// Lookup the function in the JavaScript frame.
- __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ if (has_handler_frame) {
+ __ lw(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a0, MemOperand(a0, JavaScriptFrameConstants::kFunctionOffset));
+ } else {
+ __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
@@ -1408,9 +1767,15 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
- // If the code object is null, just return to the unoptimized code.
+ // If the code object is null, just return to the caller.
__ Ret(eq, v0, Operand(Smi::FromInt(0)));
+ // Drop any potential handler frame that is be sitting on top of the actual
+ // JavaScript frame. This is the case then OSR is triggered from bytecode.
+ if (has_handler_frame) {
+ __ LeaveFrame(StackFrame::STUB);
+ }
+
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ lw(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
@@ -1418,7 +1783,8 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
__ lw(a1, MemOperand(a1, FixedArray::OffsetOfElementAt(
- DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
+ DeoptimizationInputData::kOsrPcOffsetIndex) -
+ kHeapObjectTag));
__ SmiUntag(a1);
// Compute the target address = code_obj + header_size + osr_offset
@@ -1430,11 +1796,21 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ Ret();
}
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+ Generate_OnStackReplacementHelper(masm, false);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+ Generate_OnStackReplacementHelper(masm, true);
+}
// static
void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
int field_index) {
// ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : function
+ // -- cp : context
// -- sp[0] : receiver
// -----------------------------------
@@ -1474,29 +1850,13 @@ void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
// 3. Raise a TypeError if the receiver is not a date.
__ bind(&receiver_not_date);
- __ TailCallRuntime(Runtime::kThrowNotDateError);
-}
-
-// static
-void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : argc
- // -- sp[0] : first argument (left-hand side)
- // -- sp[4] : receiver (right-hand side)
- // -----------------------------------
-
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ lw(InstanceOfDescriptor::LeftRegister(),
- MemOperand(fp, 2 * kPointerSize)); // Load left-hand side.
- __ lw(InstanceOfDescriptor::RightRegister(),
- MemOperand(fp, 3 * kPointerSize)); // Load right-hand side.
- InstanceOfStub stub(masm->isolate(), true);
- __ CallStub(&stub);
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ Push(a0);
+ __ Move(a0, Smi::FromInt(0));
+ __ EnterBuiltinFrame(cp, a1, a0);
+ __ CallRuntime(Runtime::kThrowNotDateError);
}
-
- // Pop the argument and the receiver.
- __ DropAndRet(2);
}
// static
@@ -1572,7 +1932,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
}
}
-
// static
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
@@ -1615,7 +1974,6 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
-
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc
@@ -1679,7 +2037,6 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
}
}
-
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc
@@ -1758,7 +2115,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
}
-
static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
Label* stack_overflow) {
// ----------- S t a t e -------------
@@ -1780,16 +2136,14 @@ static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
__ Branch(stack_overflow, le, t1, Operand(at));
}
-
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ sll(a0, a0, kSmiTagSize);
__ li(t0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ MultiPush(a0.bit() | a1.bit() | t0.bit() | fp.bit() | ra.bit());
- __ Addu(fp, sp,
- Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
+ __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize));
}
-
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- v0 : result being passed through
@@ -1805,7 +2159,6 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
__ Addu(sp, sp, Operand(kPointerSize));
}
-
// static
void Builtins::Generate_Apply(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2087,8 +2440,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ sll(a0, a0, kSmiTagSize); // Smi tagged.
__ Push(a0, a1);
__ mov(a0, a3);
+ __ Push(cp);
ToObjectStub stub(masm->isolate());
__ CallStub(&stub);
+ __ Pop(cp);
__ mov(a3, v0);
__ Pop(a0, a1);
__ sra(a0, a0, kSmiTagSize); // Un-tag.
@@ -2129,7 +2484,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
}
}
-
// static
void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
TailCallMode tail_call_mode) {
@@ -2224,7 +2578,6 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
__ Jump(at);
}
-
// static
void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
TailCallMode tail_call_mode) {
@@ -2284,7 +2637,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
-
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2306,7 +2658,6 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
__ Jump(at);
}
-
// static
void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2398,7 +2749,6 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ Jump(at);
}
-
// static
void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2417,7 +2767,6 @@ void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
}
-
// static
void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2469,6 +2818,63 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
+// static
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : requested object size (untagged)
+ // -- ra : return address
+ // -----------------------------------
+ __ SmiTag(a0);
+ __ Push(a0);
+ __ Move(cp, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAllocateInNewSpace);
+}
+
+// static
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : requested object size (untagged)
+ // -- ra : return address
+ // -----------------------------------
+ __ SmiTag(a0);
+ __ Move(a1, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+ __ Push(a0, a1);
+ __ Move(cp, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
+}
+
+// static
+void Builtins::Generate_Abort(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : message_id as Smi
+ // -- ra : return address
+ // -----------------------------------
+ __ Push(a0);
+ __ Move(cp, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAbort);
+}
+
+// static
+void Builtins::Generate_ToNumber(MacroAssembler* masm) {
+ // The ToNumber stub takes one argument in a0.
+ Label not_smi;
+ __ JumpIfNotSmi(a0, &not_smi);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
+ __ bind(&not_smi);
+
+ Label not_heap_number;
+ __ GetObjectType(a0, a1, a1);
+ // a0: receiver
+ // a1: receiver instance type
+ __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
+ __ bind(&not_heap_number);
+
+ __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
+ RelocInfo::CODE_TARGET);
+}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// State setup as expected by MacroAssembler::InvokePrologue.
@@ -2482,8 +2888,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label invoke, dont_adapt_arguments, stack_overflow;
Label enough, too_few;
- __ Branch(&dont_adapt_arguments, eq,
- a2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ __ Branch(&dont_adapt_arguments, eq, a2,
+ Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
// We use Uless as the number of argument should always be greater than 0.
__ Branch(&too_few, Uless, a0, Operand(a2));
@@ -2585,7 +2991,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
LeaveArgumentsAdaptorFrame(masm);
__ Ret();
-
// -------------------------------------------
// Don't adapt arguments.
// -------------------------------------------
@@ -2601,7 +3006,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
}
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/mips64/OWNERS b/deps/v8/src/builtins/mips64/OWNERS
new file mode 100644
index 0000000000..89455a4fbd
--- /dev/null
+++ b/deps/v8/src/builtins/mips64/OWNERS
@@ -0,0 +1,6 @@
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/deps/v8/src/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index b55b77c511..cbdb5c3250 100644
--- a/deps/v8/src/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -13,13 +13,10 @@
namespace v8 {
namespace internal {
-
#define __ ACCESS_MASM(masm)
-
-void Builtins::Generate_Adaptor(MacroAssembler* masm,
- CFunctionId id,
- BuiltinExtraArguments extra_args) {
+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
+ ExitFrameType exit_frame_type) {
// ----------- S t a t e -------------
// -- a0 : number of arguments excluding receiver
// -- a1 : target
@@ -37,32 +34,19 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// ordinary functions).
__ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- // Insert extra arguments.
- int num_extra_args = 0;
- switch (extra_args) {
- case BuiltinExtraArguments::kTarget:
- __ Push(a1);
- ++num_extra_args;
- break;
- case BuiltinExtraArguments::kNewTarget:
- __ Push(a3);
- ++num_extra_args;
- break;
- case BuiltinExtraArguments::kTargetAndNewTarget:
- __ Push(a1, a3);
- num_extra_args += 2;
- break;
- case BuiltinExtraArguments::kNone:
- break;
- }
-
// JumpToExternalReference expects a0 to contain the number of arguments
// including the receiver and the extra arguments.
+ const int num_extra_args = 3;
__ Daddu(a0, a0, num_extra_args + 1);
- __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
-}
+ // Insert extra arguments.
+ __ SmiTag(a0);
+ __ Push(a0, a1, a3);
+ __ SmiUntag(a0);
+ __ JumpToExternalReference(ExternalReference(address, masm->isolate()),
+ PROTECT, exit_frame_type == BUILTIN_EXIT);
+}
// Load the built-in InternalArray function from the current context.
static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
@@ -71,14 +55,12 @@ static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
__ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
}
-
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
// Load the Array function from the native context.
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
}
-
void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
@@ -94,11 +76,11 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// Initial map for the builtin InternalArray functions should be maps.
__ ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
__ SmiTst(a2, a4);
- __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction,
- a4, Operand(zero_reg));
+ __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction, a4,
+ Operand(zero_reg));
__ GetObjectType(a2, a3, a4);
- __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction,
- a4, Operand(MAP_TYPE));
+ __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction, a4,
+ Operand(MAP_TYPE));
}
// Run the native code for the InternalArray function called as a normal
@@ -108,7 +90,6 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-
void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
@@ -124,11 +105,11 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Initial map for the builtin Array functions should be maps.
__ ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
__ SmiTst(a2, a4);
- __ Assert(ne, kUnexpectedInitialMapForArrayFunction1,
- a4, Operand(zero_reg));
+ __ Assert(ne, kUnexpectedInitialMapForArrayFunction1, a4,
+ Operand(zero_reg));
__ GetObjectType(a2, a3, a4);
- __ Assert(eq, kUnexpectedInitialMapForArrayFunction2,
- a4, Operand(MAP_TYPE));
+ __ Assert(eq, kUnexpectedInitialMapForArrayFunction2, a4,
+ Operand(MAP_TYPE));
}
// Run the native code for the Array function called as a normal function.
@@ -139,66 +120,68 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-
// static
void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
// ----------- S t a t e -------------
- // -- a0 : number of arguments
- // -- ra : return address
- // -- sp[(argc - n) * 8] : arg[n] (zero-based)
- // -- sp[(argc + 1) * 8] : receiver
+ // -- a0 : number of arguments
+ // -- a1 : function
+ // -- cp : context
+ // -- ra : return address
+ // -- sp[(argc - n - 1) * 8] : arg[n] (zero-based)
+ // -- sp[argc * 8] : receiver
// -----------------------------------
Heap::RootListIndex const root_index =
(kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
: Heap::kMinusInfinityValueRootIndex;
// Load the accumulator with the default return value (either -Infinity or
- // +Infinity), with the tagged value in a1 and the double value in f0.
- __ LoadRoot(a1, root_index);
- __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
- __ Addu(a3, a0, 1);
+ // +Infinity), with the tagged value in t1 and the double value in f0.
+ __ LoadRoot(t1, root_index);
+ __ ldc1(f0, FieldMemOperand(t1, HeapNumber::kValueOffset));
Label done_loop, loop;
+ __ mov(a3, a0);
__ bind(&loop);
{
// Check if all parameters done.
- __ Dsubu(a0, a0, Operand(1));
- __ Branch(&done_loop, lt, a0, Operand(zero_reg));
+ __ Dsubu(a3, a3, Operand(1));
+ __ Branch(&done_loop, lt, a3, Operand(zero_reg));
// Load the next parameter tagged value into a2.
- __ Dlsa(at, sp, a0, kPointerSizeLog2);
+ __ Dlsa(at, sp, a3, kPointerSizeLog2);
__ ld(a2, MemOperand(at));
// Load the double value of the parameter into f2, maybe converting the
- // parameter to a number first using the ToNumberStub if necessary.
+ // parameter to a number first using the ToNumber builtin if necessary.
Label convert, convert_smi, convert_number, done_convert;
__ bind(&convert);
__ JumpIfSmi(a2, &convert_smi);
__ ld(a4, FieldMemOperand(a2, HeapObject::kMapOffset));
__ JumpIfRoot(a4, Heap::kHeapNumberMapRootIndex, &convert_number);
{
- // Parameter is not a Number, use the ToNumberStub to convert it.
- FrameScope scope(masm, StackFrame::INTERNAL);
+ // Parameter is not a Number, use the ToNumber builtin to convert it.
+ FrameScope scope(masm, StackFrame::MANUAL);
__ SmiTag(a0);
__ SmiTag(a3);
- __ Push(a0, a1, a3);
+ __ EnterBuiltinFrame(cp, a1, a0);
+ __ Push(t1, a3);
__ mov(a0, a2);
- ToNumberStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
__ mov(a2, v0);
- __ Pop(a0, a1, a3);
+ __ Pop(t1, a3);
+ __ LeaveBuiltinFrame(cp, a1, a0);
+ __ SmiUntag(a3);
+ __ SmiUntag(a0);
{
// Restore the double accumulator value (f0).
Label restore_smi, done_restore;
- __ JumpIfSmi(a1, &restore_smi);
- __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
+ __ JumpIfSmi(t1, &restore_smi);
+ __ ldc1(f0, FieldMemOperand(t1, HeapNumber::kValueOffset));
__ jmp(&done_restore);
__ bind(&restore_smi);
- __ SmiToDoubleFPURegister(a1, f0, a4);
+ __ SmiToDoubleFPURegister(t1, f0, a4);
__ bind(&done_restore);
}
- __ SmiUntag(a3);
- __ SmiUntag(a0);
}
__ jmp(&convert);
__ bind(&convert_number);
@@ -223,20 +206,22 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
}
__ Move(at, f0);
__ Branch(&loop, eq, a4, Operand(at));
- __ mov(a1, a2);
+ __ mov(t1, a2);
__ jmp(&loop);
// At least one side is NaN, which means that the result will be NaN too.
__ bind(&compare_nan);
- __ LoadRoot(a1, Heap::kNanValueRootIndex);
- __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
+ __ LoadRoot(t1, Heap::kNanValueRootIndex);
+ __ ldc1(f0, FieldMemOperand(t1, HeapNumber::kValueOffset));
__ jmp(&loop);
}
__ bind(&done_loop);
- __ Dlsa(sp, sp, a3, kPointerSizeLog2);
+ // Drop all slots, including the receiver.
+ __ Daddu(a0, a0, Operand(1));
+ __ Dlsa(sp, sp, a0, kPointerSizeLog2);
__ Ret(USE_DELAY_SLOT);
- __ mov(v0, a1); // In delay slot.
+ __ mov(v0, t1); // In delay slot.
}
// static
@@ -244,6 +229,7 @@ void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
+ // -- cp : context
// -- ra : return address
// -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
// -- sp[argc * 8] : receiver
@@ -254,15 +240,27 @@ void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
Label no_arguments;
{
__ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
- __ Dsubu(a0, a0, Operand(1));
- __ Dlsa(sp, sp, a0, kPointerSizeLog2);
- __ ld(a0, MemOperand(sp));
- __ Drop(2);
+ __ Dsubu(t1, a0, Operand(1)); // In delay slot.
+ __ mov(t0, a0); // Store argc in t0.
+ __ Dlsa(at, sp, t1, kPointerSizeLog2);
+ __ ld(a0, MemOperand(at));
}
// 2a. Convert first argument to number.
- ToNumberStub stub(masm->isolate());
- __ TailCallStub(&stub);
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ SmiTag(t0);
+ __ EnterBuiltinFrame(cp, a1, t0);
+ __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+ __ LeaveBuiltinFrame(cp, a1, t0);
+ __ SmiUntag(t0);
+ }
+
+ {
+ // Drop all arguments including the receiver.
+ __ Dlsa(sp, sp, t0, kPointerSizeLog2);
+ __ DropAndRet(1);
+ }
// 2b. No arguments, return +0.
__ bind(&no_arguments);
@@ -270,12 +268,12 @@ void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
__ DropAndRet(1);
}
-
void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
// -- a3 : new target
+ // -- cp : context
// -- ra : return address
// -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
// -- sp[argc * 8] : receiver
@@ -288,15 +286,14 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
// receiver).
{
Label no_arguments, done;
+ __ mov(t0, a0); // Store argc in t0.
__ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
- __ Dsubu(a0, a0, Operand(1));
- __ Dlsa(sp, sp, a0, kPointerSizeLog2);
- __ ld(a0, MemOperand(sp));
- __ Drop(2);
+ __ Dsubu(a0, a0, Operand(1)); // In delay slot.
+ __ Dlsa(at, sp, a0, kPointerSizeLog2);
+ __ ld(a0, MemOperand(at));
__ jmp(&done);
__ bind(&no_arguments);
__ Move(a0, Smi::FromInt(0));
- __ Drop(1);
__ bind(&done);
}
@@ -305,45 +302,57 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
Label done_convert;
__ JumpIfSmi(a0, &done_convert);
__ GetObjectType(a0, a2, a2);
- __ Branch(&done_convert, eq, t0, Operand(HEAP_NUMBER_TYPE));
+ __ Branch(&done_convert, eq, a2, Operand(HEAP_NUMBER_TYPE));
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a1, a3);
- ToNumberStub stub(masm->isolate());
- __ CallStub(&stub);
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ SmiTag(t0);
+ __ EnterBuiltinFrame(cp, a1, t0);
+ __ Push(a3);
+ __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
__ Move(a0, v0);
- __ Pop(a1, a3);
+ __ Pop(a3);
+ __ LeaveBuiltinFrame(cp, a1, t0);
+ __ SmiUntag(t0);
}
__ bind(&done_convert);
}
// 4. Check if new target and constructor differ.
- Label new_object;
+ Label drop_frame_and_ret, new_object;
__ Branch(&new_object, ne, a1, Operand(a3));
// 5. Allocate a JSValue wrapper for the number.
- __ AllocateJSValue(v0, a1, a0, a2, t0, &new_object);
- __ Ret();
+ __ AllocateJSValue(v0, a1, a0, a2, t1, &new_object);
+ __ jmp(&drop_frame_and_ret);
// 6. Fallback to the runtime to create new object.
__ bind(&new_object);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a0);
+ FrameScope scope(masm, StackFrame::MANUAL);
FastNewObjectStub stub(masm->isolate());
+ __ SmiTag(t0);
+ __ EnterBuiltinFrame(cp, a1, t0);
+ __ Push(a0);
__ CallStub(&stub);
__ Pop(a0);
+ __ LeaveBuiltinFrame(cp, a1, t0);
+ __ SmiUntag(t0);
}
- __ Ret(USE_DELAY_SLOT);
- __ sd(a0, FieldMemOperand(v0, JSValue::kValueOffset)); // In delay slot.
-}
+ __ sd(a0, FieldMemOperand(v0, JSValue::kValueOffset));
+ __ bind(&drop_frame_and_ret);
+ {
+ __ Dlsa(sp, sp, t0, kPointerSizeLog2);
+ __ DropAndRet(1);
+ }
+}
// static
void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
+ // -- cp : context
// -- ra : return address
// -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
// -- sp[argc * 8] : receiver
@@ -354,24 +363,24 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
Label no_arguments;
{
__ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
- __ Dsubu(a0, a0, Operand(1));
- __ Dlsa(sp, sp, a0, kPointerSizeLog2);
- __ ld(a0, MemOperand(sp));
- __ Drop(2);
+ __ Dsubu(t1, a0, Operand(1)); // In delay slot.
+ __ mov(t0, a0); // Store argc in t0.
+ __ Dlsa(at, sp, t1, kPointerSizeLog2);
+ __ ld(a0, MemOperand(at));
}
// 2a. At least one argument, return a0 if it's a string, otherwise
// dispatch to appropriate conversion.
- Label to_string, symbol_descriptive_string;
+ Label drop_frame_and_ret, to_string, symbol_descriptive_string;
{
__ JumpIfSmi(a0, &to_string);
- __ GetObjectType(a0, a1, a1);
+ __ GetObjectType(a0, t1, t1);
STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
- __ Subu(a1, a1, Operand(FIRST_NONSTRING_TYPE));
- __ Branch(&symbol_descriptive_string, eq, a1, Operand(zero_reg));
- __ Branch(&to_string, gt, a1, Operand(zero_reg));
- __ Ret(USE_DELAY_SLOT);
+ __ Subu(t1, t1, Operand(FIRST_NONSTRING_TYPE));
+ __ Branch(&symbol_descriptive_string, eq, t1, Operand(zero_reg));
+ __ Branch(&to_string, gt, t1, Operand(zero_reg));
__ mov(v0, a0);
+ __ jmp(&drop_frame_and_ret);
}
// 2b. No arguments, return the empty string (and pop the receiver).
@@ -384,24 +393,38 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// 3a. Convert a0 to a string.
__ bind(&to_string);
{
+ FrameScope scope(masm, StackFrame::MANUAL);
ToStringStub stub(masm->isolate());
- __ TailCallStub(&stub);
+ __ SmiTag(t0);
+ __ EnterBuiltinFrame(cp, a1, t0);
+ __ CallStub(&stub);
+ __ LeaveBuiltinFrame(cp, a1, t0);
+ __ SmiUntag(t0);
}
+ __ jmp(&drop_frame_and_ret);
// 3b. Convert symbol in a0 to a string.
__ bind(&symbol_descriptive_string);
{
+ __ Dlsa(sp, sp, t0, kPointerSizeLog2);
+ __ Drop(1);
__ Push(a0);
__ TailCallRuntime(Runtime::kSymbolDescriptiveString);
}
-}
+ __ bind(&drop_frame_and_ret);
+ {
+ __ Dlsa(sp, sp, t0, kPointerSizeLog2);
+ __ DropAndRet(1);
+ }
+}
void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
// -- a3 : new target
+ // -- cp : context
// -- ra : return address
// -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
// -- sp[argc * 8] : receiver
@@ -414,15 +437,14 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// receiver).
{
Label no_arguments, done;
+ __ mov(t0, a0); // Store argc in t0.
__ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
__ Dsubu(a0, a0, Operand(1));
- __ Dlsa(sp, sp, a0, kPointerSizeLog2);
- __ ld(a0, MemOperand(sp));
- __ Drop(2);
+ __ Dlsa(at, sp, a0, kPointerSizeLog2);
+ __ ld(a0, MemOperand(at));
__ jmp(&done);
__ bind(&no_arguments);
__ LoadRoot(a0, Heap::kempty_stringRootIndex);
- __ Drop(1);
__ bind(&done);
}
@@ -431,39 +453,52 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
Label convert, done_convert;
__ JumpIfSmi(a0, &convert);
__ GetObjectType(a0, a2, a2);
- __ And(t0, a2, Operand(kIsNotStringMask));
- __ Branch(&done_convert, eq, t0, Operand(zero_reg));
+ __ And(t1, a2, Operand(kIsNotStringMask));
+ __ Branch(&done_convert, eq, t1, Operand(zero_reg));
__ bind(&convert);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::MANUAL);
ToStringStub stub(masm->isolate());
- __ Push(a1, a3);
+ __ SmiTag(t0);
+ __ EnterBuiltinFrame(cp, a1, t0);
+ __ Push(a3);
__ CallStub(&stub);
__ Move(a0, v0);
- __ Pop(a1, a3);
+ __ Pop(a3);
+ __ LeaveBuiltinFrame(cp, a1, t0);
+ __ SmiUntag(t0);
}
__ bind(&done_convert);
}
// 4. Check if new target and constructor differ.
- Label new_object;
+ Label drop_frame_and_ret, new_object;
__ Branch(&new_object, ne, a1, Operand(a3));
// 5. Allocate a JSValue wrapper for the string.
- __ AllocateJSValue(v0, a1, a0, a2, t0, &new_object);
- __ Ret();
+ __ AllocateJSValue(v0, a1, a0, a2, t1, &new_object);
+ __ jmp(&drop_frame_and_ret);
// 6. Fallback to the runtime to create new object.
__ bind(&new_object);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a0);
+ FrameScope scope(masm, StackFrame::MANUAL);
FastNewObjectStub stub(masm->isolate());
+ __ SmiTag(t0);
+ __ EnterBuiltinFrame(cp, a1, t0);
+ __ Push(a0);
__ CallStub(&stub);
__ Pop(a0);
+ __ LeaveBuiltinFrame(cp, a1, t0);
+ __ SmiUntag(t0);
+ }
+ __ sd(a0, FieldMemOperand(v0, JSValue::kValueOffset));
+
+ __ bind(&drop_frame_and_ret);
+ {
+ __ Dlsa(sp, sp, t0, kPointerSizeLog2);
+ __ DropAndRet(1);
}
- __ Ret(USE_DELAY_SLOT);
- __ sd(a0, FieldMemOperand(v0, JSValue::kValueOffset)); // In delay slot.
}
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
@@ -497,7 +532,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ Jump(at);
}
-
void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
@@ -514,7 +548,6 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
GenerateTailCallToSharedCode(masm);
}
-
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool create_implicit_receiver,
@@ -592,16 +625,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// a0: number of arguments
// a1: constructor function
// a3: new target
- if (is_api_function) {
- __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- __ Call(code, RelocInfo::CODE_TARGET);
- } else {
- ParameterCount actual(a0);
- __ InvokeFunction(a1, a3, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
- }
+ ParameterCount actual(a0);
+ __ InvokeFunction(a1, a3, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
// Store offset of return address for deoptimizer.
if (create_implicit_receiver && !is_api_function) {
@@ -669,27 +695,178 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Ret();
}
-
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true, false);
}
-
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, true, false, false);
}
-
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, false, false);
}
-
void Builtins::Generate_JSBuiltinsConstructStubForDerived(
MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, false, true);
}
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- v0 : the value to pass to the generator
+ // -- a1 : the JSGeneratorObject to resume
+ // -- a2 : the resume mode (tagged)
+ // -- ra : return address
+ // -----------------------------------
+ __ AssertGeneratorObject(a1);
+
+ // Store input value into generator object.
+ __ sd(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
+ __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, v0, a3,
+ kRAHasNotBeenSaved, kDontSaveFPRegs);
+
+ // Store resume mode into generator object.
+ __ sd(a2, FieldMemOperand(a1, JSGeneratorObject::kResumeModeOffset));
+
+ // Load suspended function and context.
+ __ ld(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset));
+ __ ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+
+ // Flood function if we are stepping.
+ Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+ Label stepping_prepared;
+ ExternalReference last_step_action =
+ ExternalReference::debug_last_step_action_address(masm->isolate());
+ STATIC_ASSERT(StepFrame > StepIn);
+ __ li(a5, Operand(last_step_action));
+ __ lb(a5, MemOperand(a5));
+ __ Branch(&prepare_step_in_if_stepping, ge, a5, Operand(StepIn));
+
+ // Flood function if we need to continue stepping in the suspended generator.
+ ExternalReference debug_suspended_generator =
+ ExternalReference::debug_suspended_generator_address(masm->isolate());
+ __ li(a5, Operand(debug_suspended_generator));
+ __ ld(a5, MemOperand(a5));
+ __ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(a5));
+ __ bind(&stepping_prepared);
+
+ // Push receiver.
+ __ ld(a5, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
+ __ Push(a5);
+
+ // ----------- S t a t e -------------
+ // -- a1 : the JSGeneratorObject to resume
+ // -- a2 : the resume mode (tagged)
+ // -- a4 : generator function
+ // -- cp : generator context
+ // -- ra : return address
+ // -- sp[0] : generator receiver
+ // -----------------------------------
+
+ // Push holes for arguments to generator function. Since the parser forced
+ // context allocation for any variables in generators, the actual argument
+ // values have already been copied into the context and these dummy values
+ // will never be used.
+ __ ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a3,
+ FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
+ {
+ Label done_loop, loop;
+ __ bind(&loop);
+ __ Dsubu(a3, a3, Operand(1));
+ __ Branch(&done_loop, lt, a3, Operand(zero_reg));
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Dispatch on the kind of generator object.
+ Label old_generator;
+ __ ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
+ __ GetObjectType(a3, a3, a3);
+ __ Branch(&old_generator, ne, a3, Operand(BYTECODE_ARRAY_TYPE));
+
+ // New-style (ignition/turbofan) generator object.
+ {
+ __ ld(a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a0,
+ FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
+ // We abuse new.target both to indicate that this is a resume call and to
+ // pass in the generator object. In ordinary calls, new.target is always
+ // undefined because generator functions are non-constructable.
+ __ Move(a3, a1);
+ __ Move(a1, a4);
+ __ ld(a2, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ Jump(a2);
+ }
+
+ // Old-style (full-codegen) generator object
+ __ bind(&old_generator);
+ {
+ // Enter a new JavaScript frame, and initialize its slots as they were when
+ // the generator was suspended.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ Push(ra, fp);
+ __ Move(fp, sp);
+ __ Push(cp, a4);
+
+ // Restore the operand stack.
+ __ ld(a0, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
+ __ ld(a3, FieldMemOperand(a0, FixedArray::kLengthOffset));
+ __ SmiUntag(a3);
+ __ Daddu(a0, a0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Dlsa(a3, a0, a3, kPointerSizeLog2);
+ {
+ Label done_loop, loop;
+ __ bind(&loop);
+ __ Branch(&done_loop, eq, a0, Operand(a3));
+ __ ld(a5, MemOperand(a0));
+ __ Push(a5);
+ __ Branch(USE_DELAY_SLOT, &loop);
+ __ daddiu(a0, a0, kPointerSize); // In delay slot.
+ __ bind(&done_loop);
+ }
+
+ // Reset operand stack so we don't leak.
+ __ LoadRoot(a5, Heap::kEmptyFixedArrayRootIndex);
+ __ sd(a5, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
+
+ // Resume the generator function at the continuation.
+ __ ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
+ __ Daddu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ ld(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
+ __ SmiUntag(a2);
+ __ Daddu(a3, a3, Operand(a2));
+ __ li(a2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+ __ sd(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
+ __ Move(v0, a1); // Continuation expects generator object in v0.
+ __ Jump(a3);
+ }
+
+ __ bind(&prepare_step_in_if_stepping);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1, a2, a4);
+ __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ Pop(a1, a2);
+ }
+ __ Branch(USE_DELAY_SLOT, &stepping_prepared);
+ __ ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+
+ __ bind(&prepare_step_in_suspended_generator);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1, a2);
+ __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+ __ Pop(a1, a2);
+ }
+ __ Branch(USE_DELAY_SLOT, &stepping_prepared);
+ __ ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+}
void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -697,10 +874,8 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
-
enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
-
// Clobbers a2; preserves all other registers.
static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
IsTagged argc_is_tagged) {
@@ -727,7 +902,6 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
__ bind(&okay);
}
-
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Called from JSEntryStub::GenerateBody
@@ -767,13 +941,13 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
Label loop, entry;
__ Dlsa(a6, s0, a3, kPointerSizeLog2);
__ b(&entry);
- __ nop(); // Branch delay slot nop.
+ __ nop(); // Branch delay slot nop.
// a6 points past last arg.
__ bind(&loop);
__ ld(a4, MemOperand(s0)); // Read next parameter.
__ daddiu(s0, s0, kPointerSize);
__ ld(a4, MemOperand(a4)); // Dereference handle.
- __ push(a4); // Push parameter.
+ __ push(a4); // Push parameter.
__ bind(&entry);
__ Branch(&loop, ne, s0, Operand(a6));
@@ -803,16 +977,28 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Jump(ra);
}
-
void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, false);
}
-
void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
+ Register args_count = scratch;
+
+ // Get the arguments + receiver count.
+ __ ld(args_count,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ lw(t0, FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
+
+ // Leave the frame (also dropping the register file).
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+
+ // Drop receiver + arguments.
+ __ Daddu(sp, sp, args_count);
+}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
@@ -830,14 +1016,16 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
// The function builds an interpreter frame. See InterpreterFrameConstants in
// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(a1);
- // Get the bytecode array from the function object and load the pointer to the
- // first entry into kInterpreterBytecodeRegister.
+ // Get the bytecode array from the function object (or from the DebugInfo if
+ // it is present) and load it into kInterpreterBytecodeArrayRegister.
__ ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
Label load_debug_bytecode_array, bytecode_array_loaded;
Register debug_info = kInterpreterBytecodeArrayRegister;
@@ -849,8 +1037,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
__ bind(&bytecode_array_loaded);
+ // Check whether we should continue to use the interpreter.
+ Label switch_to_different_code_kind;
+ __ ld(a0, FieldMemOperand(a0, SharedFunctionInfo::kCodeOffset));
+ __ Branch(&switch_to_different_code_kind, ne, a0,
+ Operand(masm->CodeObject())); // Self-reference to this code.
+
+ // Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
- // Check function data field is actually a BytecodeArray object.
__ SmiTst(kInterpreterBytecodeArrayRegister, a4);
__ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, a4,
Operand(zero_reg));
@@ -859,8 +1053,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(BYTECODE_ARRAY_TYPE));
}
- // Push new.target, bytecode array and zero for bytecode array offset.
- __ Push(a3, kInterpreterBytecodeArrayRegister, zero_reg);
+ // Load initial bytecode offset.
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+ // Push new.target, bytecode array and Smi tagged bytecode array offset.
+ __ SmiTag(a4, kInterpreterBytecodeOffsetRegister);
+ __ Push(a3, kInterpreterBytecodeArrayRegister, a4);
// Allocate the local and temporary register file on the stack.
{
@@ -890,17 +1089,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Branch(&loop_header, ge, a4, Operand(zero_reg));
}
- // TODO(rmcilroy): List of things not currently dealt with here but done in
- // fullcodegen's prologue:
- // - Call ProfileEntryHookStub when isolate has a function_entry_hook.
- // - Code aging of the BytecodeArray object.
-
- // Load bytecode offset and dispatch table into registers.
+ // Load accumulator and dispatch table into registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
- __ Daddu(kInterpreterRegisterFileRegister, fp,
- Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
- __ li(kInterpreterBytecodeOffsetRegister,
- Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ li(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
@@ -911,45 +1101,61 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ lbu(a0, MemOperand(a0));
__ Dlsa(at, kInterpreterDispatchTableRegister, a0, kPointerSizeLog2);
__ ld(at, MemOperand(at));
- // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
- // and header removal.
- __ Daddu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(at);
+ masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
- // Even though the first bytecode handler was called, we will never return.
- __ Abort(kUnexpectedReturnFromBytecodeHandler);
+ // The return value is in v0.
+ LeaveInterpreterFrame(masm, t0);
+ __ Jump(ra);
// Load debug copy of the bytecode array.
__ bind(&load_debug_bytecode_array);
__ ld(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex));
+ FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
__ Branch(&bytecode_array_loaded);
+
+ // If the shared code is no longer this entry trampoline, then the underlying
+ // function has been switched to a different kind of code and we heal the
+ // closure by switching the code entry field over to the new code as well.
+ __ bind(&switch_to_different_code_kind);
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kCodeOffset));
+ __ Daddu(a4, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ sd(a4, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(a1, a4, a5);
+ __ Jump(a4);
}
+void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
+ // Save the function and context for call to CompileBaseline.
+ __ ld(a1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ ld(kContextRegister,
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
-void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
- // TODO(rmcilroy): List of things not currently dealt with here but done in
- // fullcodegen's EmitReturnSequence.
- // - Supporting FLAG_trace for Runtime::TraceExit.
- // - Support profiler (specifically decrementing profiling_counter
- // appropriately and calling out to HandleInterrupts if necessary).
+ // Leave the frame before recompiling for baseline so that we don't count as
+ // an activation on the stack.
+ LeaveInterpreterFrame(masm, t0);
- // The return value is in accumulator, which is already in v0.
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ // Push return value.
+ __ push(v0);
- // Leave the frame (also dropping the register file).
- __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ // Push function as argument and compile for baseline.
+ __ push(a1);
+ __ CallRuntime(Runtime::kCompileBaseline);
- // Drop receiver + arguments and return.
- __ lw(at, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kParameterSizeOffset));
- __ Daddu(sp, sp, at);
+ // Restore return value.
+ __ pop(v0);
+ }
__ Jump(ra);
}
-
// static
void Builtins::Generate_InterpreterPushArgsAndCallImpl(
- MacroAssembler* masm, TailCallMode tail_call_mode) {
+ MacroAssembler* masm, TailCallMode tail_call_mode,
+ CallableType function_type) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a2 : the address of the first argument to be pushed. Subsequent
@@ -974,12 +1180,18 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
__ Branch(&loop_header, gt, a2, Operand(a3));
// Call the target.
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- tail_call_mode),
- RelocInfo::CODE_TARGET);
+ if (function_type == CallableType::kJSFunction) {
+ __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
+ tail_call_mode),
+ RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK_EQ(function_type, CallableType::kAny);
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ tail_call_mode),
+ RelocInfo::CODE_TARGET);
+ }
}
-
// static
void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -1010,25 +1222,24 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
-
-static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
- // Initialize register file register and dispatch table register.
- __ Daddu(kInterpreterRegisterFileRegister, fp,
- Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ // Set the return address to the correct point in the interpreter entry
+ // trampoline.
+ Smi* interpreter_entry_return_pc_offset(
+ masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+ __ li(t0, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
+ __ Daddu(ra, t0, Operand(interpreter_entry_return_pc_offset->value() +
+ Code::kHeaderSize - kHeapObjectTag));
+
+ // Initialize the dispatch table register.
__ li(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
- // Get the context from the frame.
- __ ld(kContextRegister,
- MemOperand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kContextFromRegisterPointer));
-
// Get the bytecode array pointer from the frame.
- __ ld(
- kInterpreterBytecodeArrayRegister,
- MemOperand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
+ __ ld(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -1042,9 +1253,7 @@ static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
// Get the target bytecode offset from the frame.
__ ld(kInterpreterBytecodeOffsetRegister,
- MemOperand(
- kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
@@ -1053,74 +1262,233 @@ static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
__ lbu(a1, MemOperand(a1));
__ Dlsa(a1, kInterpreterDispatchTableRegister, a1, kPointerSizeLog2);
__ ld(a1, MemOperand(a1));
- __ Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(a1);
}
-
-static void Generate_InterpreterNotifyDeoptimizedHelper(
- MacroAssembler* masm, Deoptimizer::BailoutType type) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Pass the deoptimization type to the runtime system.
- __ li(a1, Operand(Smi::FromInt(static_cast<int>(type))));
- __ push(a1);
- __ CallRuntime(Runtime::kNotifyDeoptimized);
- // Tear down internal frame.
- }
-
- // Drop state (we don't use these for interpreter deopts) and and pop the
- // accumulator value into the accumulator register.
- __ Drop(1);
- __ Pop(kInterpreterAccumulatorRegister);
-
- // Enter the bytecode dispatch.
- Generate_EnterBytecodeDispatch(masm);
-}
-
-
-void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-
-void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
-}
-
-
-void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
- // Set the address of the interpreter entry trampoline as a return address.
- // This simulates the initial call to bytecode handlers in interpreter entry
- // trampoline. The return will never actually be taken, but our stack walker
- // uses this address to determine whether a frame is interpreted.
- __ li(ra, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
-
- Generate_EnterBytecodeDispatch(masm);
-}
-
-
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argument count (preserved for callee)
+ // -- a3 : new target (preserved for callee)
+ // -- a1 : target function (preserved for callee)
+ // -----------------------------------
+ // First lookup code, maybe we don't need to compile!
+ Label gotta_call_runtime, gotta_call_runtime_no_stack;
+ Label maybe_call_runtime;
+ Label try_shared;
+ Label loop_top, loop_bottom;
+
+ Register argument_count = a0;
+ Register closure = a1;
+ Register new_target = a3;
+ __ push(argument_count);
+ __ push(new_target);
+ __ push(closure);
+
+ Register map = a0;
+ Register index = a2;
+ __ ld(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(map, FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+ __ ld(index, FieldMemOperand(map, FixedArray::kLengthOffset));
+ __ Branch(&gotta_call_runtime, lt, index, Operand(Smi::FromInt(2)));
+
+ // Find literals.
+ // a3 : native context
+ // a2 : length / index
+ // a0 : optimized code map
+ // stack[0] : new target
+ // stack[4] : closure
+ Register native_context = a3;
+ __ ld(native_context, NativeContextMemOperand());
+
+ __ bind(&loop_top);
+ Register temp = a1;
+ Register array_pointer = a5;
+
+ // Does the native context match?
+ __ SmiScale(at, index, kPointerSizeLog2);
+ __ Daddu(array_pointer, map, Operand(at));
+ __ ld(temp, FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousContext));
+ __ ld(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+ __ Branch(&loop_bottom, ne, temp, Operand(native_context));
+ // OSR id set to none?
+ __ ld(temp, FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+ const int bailout_id = BailoutId::None().ToInt();
+ __ Branch(&loop_bottom, ne, temp, Operand(Smi::FromInt(bailout_id)));
+ // Literals available?
+ __ ld(temp, FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousLiterals));
+ __ ld(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+ __ JumpIfSmi(temp, &gotta_call_runtime);
+
+ // Save the literals in the closure.
+ __ ld(a4, MemOperand(sp, 0));
+ __ sd(temp, FieldMemOperand(a4, JSFunction::kLiteralsOffset));
+ __ push(index);
+ __ RecordWriteField(a4, JSFunction::kLiteralsOffset, temp, index,
+ kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ __ pop(index);
+
+ // Code available?
+ Register entry = a4;
+ __ ld(entry,
+ FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousCachedCode));
+ __ ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(entry, &maybe_call_runtime);
+
+ // Found literals and code. Get them into the closure and return.
+ __ pop(closure);
+ // Store code entry in the closure.
+ __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ Label install_optimized_code_and_tailcall;
+ __ bind(&install_optimized_code_and_tailcall);
+ __ sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(closure, entry, a5);
+
+ // Link the closure into the optimized function list.
+ // a4 : code entry
+ // a3 : native context
+ // a1 : closure
+ __ ld(a5,
+ ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ __ sd(a5, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
+ __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, a5, a0,
+ kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ const int function_list_offset =
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+ __ sd(closure,
+ ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ // Save closure before the write barrier.
+ __ mov(a5, closure);
+ __ RecordWriteContextSlot(native_context, function_list_offset, closure, a0,
+ kRAHasNotBeenSaved, kDontSaveFPRegs);
+ __ mov(closure, a5);
+ __ pop(new_target);
+ __ pop(argument_count);
+ __ Jump(entry);
+
+ __ bind(&loop_bottom);
+ __ Dsubu(index, index,
+ Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+ __ Branch(&loop_top, gt, index, Operand(Smi::FromInt(1)));
+
+ // We found neither literals nor code.
+ __ jmp(&gotta_call_runtime);
+
+ __ bind(&maybe_call_runtime);
+ __ pop(closure);
+
+ // Last possibility. Check the context free optimized code map entry.
+ __ ld(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
+ SharedFunctionInfo::kSharedCodeIndex));
+ __ ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(entry, &try_shared);
+
+ // Store code entry in the closure.
+ __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(&install_optimized_code_and_tailcall);
+
+ __ bind(&try_shared);
+ __ pop(new_target);
+ __ pop(argument_count);
+ // Is the full code valid?
+ __ ld(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+ __ lw(a5, FieldMemOperand(entry, Code::kFlagsOffset));
+ __ And(a5, a5, Operand(Code::KindField::kMask));
+ __ dsrl(a5, a5, Code::KindField::kShift);
+ __ Branch(&gotta_call_runtime_no_stack, eq, a5, Operand(Code::BUILTIN));
+ // Yes, install the full code.
+ __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(closure, entry, a5);
+ __ Jump(entry);
+
+ __ bind(&gotta_call_runtime);
+ __ pop(closure);
+ __ pop(new_target);
+ __ pop(argument_count);
+ __ bind(&gotta_call_runtime_no_stack);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
+}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm,
Runtime::kCompileOptimized_NotConcurrent);
}
-
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
+void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argument count (preserved for callee)
+ // -- a1 : new target (preserved for callee)
+ // -- a3 : target function (preserved for callee)
+ // -----------------------------------
+ Label failed;
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the target function and the new target.
+ // Push function as parameter to the runtime call.
+ __ Move(t2, a0);
+ __ SmiTag(a0);
+ __ Push(a0, a1, a3, a1);
+
+ // Copy arguments from caller (stdlib, foreign, heap).
+ Label args_done;
+ for (int j = 0; j < 4; ++j) {
+ Label over;
+ if (j < 3) {
+ __ Branch(&over, ne, t2, Operand(j));
+ }
+ for (int i = j - 1; i >= 0; --i) {
+ __ ld(t2, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
+ i * kPointerSize));
+ __ push(t2);
+ }
+ for (int i = 0; i < 3 - j; ++i) {
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ }
+ if (j < 3) {
+ __ jmp(&args_done);
+ __ bind(&over);
+ }
+ }
+ __ bind(&args_done);
+
+ // Call runtime, on success unwind frame, and parent frame.
+ __ CallRuntime(Runtime::kInstantiateAsmJs, 4);
+ // A smi 0 is returned on failure, an object on success.
+ __ JumpIfSmi(v0, &failed);
+
+ __ Drop(2);
+ __ pop(t2);
+ __ SmiUntag(t2);
+ scope.GenerateLeaveFrame();
+
+ __ Daddu(t2, t2, Operand(1));
+ __ Dlsa(sp, sp, t2, kPointerSizeLog2);
+ __ Ret();
+
+ __ bind(&failed);
+ // Restore target function and new target.
+ __ Pop(a0, a1, a3);
+ __ SmiUntag(a0);
+ }
+ // On failure, tail call back to regular js.
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+}
static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// For now, we are relying on the fact that make_code_young doesn't do any
@@ -1130,8 +1498,7 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// crawls in MakeCodeYoung. This seems a bit fragile.
// Set a0 to point to the head of the PlatformCodeAge sequence.
- __ Dsubu(a0, a0,
- Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize));
+ __ Dsubu(a0, a0, Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize));
// The following registers must be saved and restored when calling through to
// the runtime:
@@ -1150,19 +1517,18 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
__ Jump(a0);
}
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
-void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-} \
-void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-}
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+ void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+ } \
+ void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+ }
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
-
void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
// that make_code_young doesn't do any garbage collection which allows us to
@@ -1170,8 +1536,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// pointers.
// Set a0 to point to the head of the PlatformCodeAge sequence.
- __ Dsubu(a0, a0,
- Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize));
+ __ Dsubu(a0, a0, Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize));
// The following registers must be saved and restored when calling through to
// the runtime:
@@ -1197,17 +1562,14 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
__ Jump(a0);
}
-
void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
GenerateMakeCodeYoungAgainCommon(masm);
}
-
void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
-
static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
SaveFPRegsMode save_doubles) {
{
@@ -1223,20 +1585,17 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
}
__ Daddu(sp, sp, Operand(kPointerSize)); // Ignore state
- __ Jump(ra); // Jump to miss handler
+ __ Jump(ra); // Jump to miss handler
}
-
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
}
-
void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
}
-
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
@@ -1252,15 +1611,19 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
__ SmiUntag(a6);
// Switch on the state.
Label with_tos_register, unknown_state;
- __ Branch(&with_tos_register,
- ne, a6, Operand(FullCodeGenerator::NO_REGISTERS));
+ __ Branch(
+ &with_tos_register, ne, a6,
+ Operand(static_cast<int64_t>(Deoptimizer::BailoutState::NO_REGISTERS)));
__ Ret(USE_DELAY_SLOT);
// Safe to fill delay slot Addu will emit one instruction.
__ Daddu(sp, sp, Operand(1 * kPointerSize)); // Remove state.
__ bind(&with_tos_register);
+ DCHECK_EQ(kInterpreterAccumulatorRegister.code(), v0.code());
__ ld(v0, MemOperand(sp, 1 * kPointerSize));
- __ Branch(&unknown_state, ne, a6, Operand(FullCodeGenerator::TOS_REG));
+ __ Branch(
+ &unknown_state, ne, a6,
+ Operand(static_cast<int64_t>(Deoptimizer::BailoutState::TOS_REGISTER)));
__ Ret(USE_DELAY_SLOT);
// Safe to fill delay slot Addu will emit one instruction.
@@ -1270,22 +1633,18 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
__ stop("no cases left");
}
-
void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
-
void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
}
-
void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
-
// Clobbers {t2, t3, a4, a5}.
static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
Register function_template_info,
@@ -1349,7 +1708,6 @@ static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
__ bind(&receiver_check_passed);
}
-
void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments excluding receiver
@@ -1386,10 +1744,16 @@ void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kThrowIllegalInvocation);
}
-
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
+ bool has_handler_frame) {
// Lookup the function in the JavaScript frame.
- __ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ if (has_handler_frame) {
+ __ ld(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ld(a0, MemOperand(a0, JavaScriptFrameConstants::kFunctionOffset));
+ } else {
+ __ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
@@ -1397,9 +1761,15 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
- // If the code object is null, just return to the unoptimized code.
+ // If the code object is null, just return to the caller.
__ Ret(eq, v0, Operand(Smi::FromInt(0)));
+ // Drop any potential handler frame that is be sitting on top of the actual
+ // JavaScript frame. This is the case then OSR is triggered from bytecode.
+ if (has_handler_frame) {
+ __ LeaveFrame(StackFrame::STUB);
+ }
+
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ ld(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
@@ -1407,7 +1777,8 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
__ ld(a1, MemOperand(a1, FixedArray::OffsetOfElementAt(
- DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
+ DeoptimizationInputData::kOsrPcOffsetIndex) -
+ kHeapObjectTag));
__ SmiUntag(a1);
// Compute the target address = code_obj + header_size + osr_offset
@@ -1419,11 +1790,21 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ Ret();
}
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+ Generate_OnStackReplacementHelper(masm, false);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+ Generate_OnStackReplacementHelper(masm, true);
+}
// static
void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
int field_index) {
// ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : function
+ // -- cp : context
// -- sp[0] : receiver
// -----------------------------------
@@ -1463,29 +1844,13 @@ void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
// 3. Raise a TypeError if the receiver is not a date.
__ bind(&receiver_not_date);
- __ TailCallRuntime(Runtime::kThrowNotDateError);
-}
-
-// static
-void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : argc
- // -- sp[0] : first argument (left-hand side)
- // -- sp[8] : receiver (right-hand side)
- // -----------------------------------
-
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ ld(InstanceOfDescriptor::LeftRegister(),
- MemOperand(fp, 2 * kPointerSize)); // Load left-hand side.
- __ ld(InstanceOfDescriptor::RightRegister(),
- MemOperand(fp, 3 * kPointerSize)); // Load right-hand side.
- InstanceOfStub stub(masm->isolate(), true);
- __ CallStub(&stub);
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ Push(a0);
+ __ Move(a0, Smi::FromInt(0));
+ __ EnterBuiltinFrame(cp, a1, a0);
+ __ CallRuntime(Runtime::kThrowNotDateError);
}
-
- // Pop the argument and the receiver.
- __ DropAndRet(2);
}
// static
@@ -1561,7 +1926,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
}
}
-
// static
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
@@ -1604,7 +1968,6 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
-
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc
@@ -1668,7 +2031,6 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
}
}
-
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc
@@ -1747,7 +2109,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
}
-
static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
Label* stack_overflow) {
// ----------- S t a t e -------------
@@ -1769,17 +2130,15 @@ static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
__ Branch(stack_overflow, le, a5, Operand(at));
}
-
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
// __ sll(a0, a0, kSmiTagSize);
__ dsll32(a0, a0, 0);
__ li(a4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ MultiPush(a0.bit() | a1.bit() | a4.bit() | fp.bit() | ra.bit());
- __ Daddu(fp, sp,
- Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
+ __ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+ kPointerSize));
}
-
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- v0 : result being passed through
@@ -1796,7 +2155,6 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
__ Daddu(sp, sp, Operand(kPointerSize));
}
-
// static
void Builtins::Generate_Apply(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2077,8 +2435,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ SmiTag(a0);
__ Push(a0, a1);
__ mov(a0, a3);
+ __ Push(cp);
ToObjectStub stub(masm->isolate());
__ CallStub(&stub);
+ __ Pop(cp);
__ mov(a3, v0);
__ Pop(a0, a1);
__ SmiUntag(a0);
@@ -2118,7 +2478,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
}
}
-
// static
void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
TailCallMode tail_call_mode) {
@@ -2213,7 +2572,6 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
__ Jump(at);
}
-
// static
void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
TailCallMode tail_call_mode) {
@@ -2273,7 +2631,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
-
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
@@ -2294,7 +2651,6 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
__ Jump(at);
}
-
// static
void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2386,7 +2742,6 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ Jump(at);
}
-
// static
void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2405,7 +2760,6 @@ void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
}
-
// static
void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2457,6 +2811,63 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
+// static
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : requested object size (untagged)
+ // -- ra : return address
+ // -----------------------------------
+ __ SmiTag(a0);
+ __ Push(a0);
+ __ Move(cp, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAllocateInNewSpace);
+}
+
+// static
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : requested object size (untagged)
+ // -- ra : return address
+ // -----------------------------------
+ __ SmiTag(a0);
+ __ Move(a1, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+ __ Push(a0, a1);
+ __ Move(cp, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
+}
+
+// static
+void Builtins::Generate_Abort(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : message_id as Smi
+ // -- ra : return address
+ // -----------------------------------
+ __ Push(a0);
+ __ Move(cp, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAbort);
+}
+
+// static
+void Builtins::Generate_ToNumber(MacroAssembler* masm) {
+ // The ToNumber stub takes one argument in a0.
+ Label not_smi;
+ __ JumpIfNotSmi(a0, &not_smi);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
+ __ bind(&not_smi);
+
+ Label not_heap_number;
+ __ GetObjectType(a0, a1, a1);
+ // a0: receiver
+ // a1: receiver instance type
+ __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
+ __ bind(&not_heap_number);
+
+ __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
+ RelocInfo::CODE_TARGET);
+}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// State setup as expected by MacroAssembler::InvokePrologue.
@@ -2470,8 +2881,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label invoke, dont_adapt_arguments, stack_overflow;
Label enough, too_few;
- __ Branch(&dont_adapt_arguments, eq,
- a2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ __ Branch(&dont_adapt_arguments, eq, a2,
+ Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
// We use Uless as the number of argument should always be greater than 0.
__ Branch(&too_few, Uless, a0, Operand(a2));
@@ -2575,7 +2986,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
LeaveArgumentsAdaptorFrame(masm);
__ Ret();
-
// -------------------------------------------
// Don't adapt arguments.
// -------------------------------------------
@@ -2591,7 +3001,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
}
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/builtins/ppc/OWNERS b/deps/v8/src/builtins/ppc/OWNERS
new file mode 100644
index 0000000000..752e8e3d81
--- /dev/null
+++ b/deps/v8/src/builtins/ppc/OWNERS
@@ -0,0 +1,6 @@
+jyan@ca.ibm.com
+dstence@us.ibm.com
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/deps/v8/src/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index 884afedb21..dfea83f2b4 100644
--- a/deps/v8/src/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -13,12 +13,10 @@
namespace v8 {
namespace internal {
-
#define __ ACCESS_MASM(masm)
-
-void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id,
- BuiltinExtraArguments extra_args) {
+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
+ ExitFrameType exit_frame_type) {
// ----------- S t a t e -------------
// -- r3 : number of arguments excluding receiver
// -- r4 : target
@@ -36,32 +34,19 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id,
// ordinary functions).
__ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
- // Insert extra arguments.
- int num_extra_args = 0;
- switch (extra_args) {
- case BuiltinExtraArguments::kTarget:
- __ Push(r4);
- ++num_extra_args;
- break;
- case BuiltinExtraArguments::kNewTarget:
- __ Push(r6);
- ++num_extra_args;
- break;
- case BuiltinExtraArguments::kTargetAndNewTarget:
- __ Push(r4, r6);
- num_extra_args += 2;
- break;
- case BuiltinExtraArguments::kNone:
- break;
- }
-
// JumpToExternalReference expects r3 to contain the number of arguments
// including the receiver and the extra arguments.
+ const int num_extra_args = 3;
__ addi(r3, r3, Operand(num_extra_args + 1));
- __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
-}
+ // Insert extra arguments.
+ __ SmiTag(r3);
+ __ Push(r3, r4, r6);
+ __ SmiUntag(r3);
+ __ JumpToExternalReference(ExternalReference(address, masm->isolate()),
+ exit_frame_type == BUILTIN_EXIT);
+}
// Load the built-in InternalArray function from the current context.
static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
@@ -70,14 +55,12 @@ static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
__ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
}
-
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
// Load the Array function from the current native context.
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
}
-
void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
@@ -105,7 +88,6 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-
void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
@@ -134,14 +116,15 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-
// static
void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
// ----------- S t a t e -------------
- // -- r3 : number of arguments
- // -- lr : return address
- // -- sp[(argc - n) * 8] : arg[n] (zero-based)
- // -- sp[(argc + 1) * 8] : receiver
+ // -- r3 : number of arguments
+ // -- r4 : function
+ // -- cp : context
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+ // -- sp[argc * 4] : receiver
// -----------------------------------
Condition const cond_done = (kind == MathMaxMinKind::kMin) ? lt : gt;
Heap::RootListIndex const root_index =
@@ -150,58 +133,64 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? d2 : d1;
// Load the accumulator with the default return value (either -Infinity or
- // +Infinity), with the tagged value in r4 and the double value in d1.
- __ LoadRoot(r4, root_index);
- __ lfd(d1, FieldMemOperand(r4, HeapNumber::kValueOffset));
+ // +Infinity), with the tagged value in r8 and the double value in d1.
+ __ LoadRoot(r8, root_index);
+ __ lfd(d1, FieldMemOperand(r8, HeapNumber::kValueOffset));
// Setup state for loop
// r5: address of arg[0] + kPointerSize
// r6: number of slots to drop at exit (arguments + receiver)
- __ ShiftLeftImm(r5, r3, Operand(kPointerSizeLog2));
- __ add(r5, sp, r5);
- __ addi(r6, r3, Operand(1));
+ __ addi(r7, r3, Operand(1));
Label done_loop, loop;
+ __ mr(r7, r3);
__ bind(&loop);
{
// Check if all parameters done.
- __ cmpl(r5, sp);
- __ ble(&done_loop);
+ __ subi(r7, r7, Operand(1));
+ __ cmpi(r7, Operand::Zero());
+ __ blt(&done_loop);
- // Load the next parameter tagged value into r3.
- __ LoadPU(r3, MemOperand(r5, -kPointerSize));
+ // Load the next parameter tagged value into r5.
+ __ ShiftLeftImm(r5, r7, Operand(kPointerSizeLog2));
+ __ LoadPX(r5, MemOperand(sp, r5));
// Load the double value of the parameter into d2, maybe converting the
- // parameter to a number first using the ToNumberStub if necessary.
+ // parameter to a number first using the ToNumber builtin if necessary.
Label convert, convert_smi, convert_number, done_convert;
__ bind(&convert);
- __ JumpIfSmi(r3, &convert_smi);
- __ LoadP(r7, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ JumpIfRoot(r7, Heap::kHeapNumberMapRootIndex, &convert_number);
+ __ JumpIfSmi(r5, &convert_smi);
+ __ LoadP(r6, FieldMemOperand(r5, HeapObject::kMapOffset));
+ __ JumpIfRoot(r6, Heap::kHeapNumberMapRootIndex, &convert_number);
{
- // Parameter is not a Number, use the ToNumberStub to convert it.
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(r6);
- __ Push(r4, r5, r6);
- ToNumberStub stub(masm->isolate());
- __ CallStub(&stub);
- __ Pop(r4, r5, r6);
- __ SmiUntag(r6);
+ // Parameter is not a Number, use the ToNumber builtin to convert it.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ SmiTag(r3);
+ __ SmiTag(r7);
+ __ EnterBuiltinFrame(cp, r4, r3);
+ __ Push(r7, r8);
+ __ mr(r3, r5);
+ __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+ __ mr(r5, r3);
+ __ Pop(r7, r8);
+ __ LeaveBuiltinFrame(cp, r4, r3);
+ __ SmiUntag(r7);
+ __ SmiUntag(r3);
{
// Restore the double accumulator value (d1).
Label done_restore;
- __ SmiToDouble(d1, r4);
- __ JumpIfSmi(r4, &done_restore);
- __ lfd(d1, FieldMemOperand(r4, HeapNumber::kValueOffset));
+ __ SmiToDouble(d1, r8);
+ __ JumpIfSmi(r8, &done_restore);
+ __ lfd(d1, FieldMemOperand(r8, HeapNumber::kValueOffset));
__ bind(&done_restore);
}
}
__ b(&convert);
__ bind(&convert_number);
- __ lfd(d2, FieldMemOperand(r3, HeapNumber::kValueOffset));
+ __ lfd(d2, FieldMemOperand(r5, HeapNumber::kValueOffset));
__ b(&done_convert);
__ bind(&convert_smi);
- __ SmiToDouble(d2, r3);
+ __ SmiToDouble(d2, r5);
__ bind(&done_convert);
// Perform the actual comparison with the accumulator value on the left hand
@@ -213,26 +202,28 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
__ b(CommuteCondition(cond_done), &compare_swap);
// Left and right hand side are equal, check for -0 vs. +0.
- __ TestDoubleIsMinusZero(reg, r7, r8);
+ __ TestDoubleIsMinusZero(reg, r9, r0);
__ bne(&loop);
// Update accumulator. Result is on the right hand side.
__ bind(&compare_swap);
__ fmr(d1, d2);
- __ mr(r4, r3);
+ __ mr(r8, r5);
__ b(&loop);
// At least one side is NaN, which means that the result will be NaN too.
// We still need to visit the rest of the arguments.
__ bind(&compare_nan);
- __ LoadRoot(r4, Heap::kNanValueRootIndex);
- __ lfd(d1, FieldMemOperand(r4, HeapNumber::kValueOffset));
+ __ LoadRoot(r8, Heap::kNanValueRootIndex);
+ __ lfd(d1, FieldMemOperand(r8, HeapNumber::kValueOffset));
__ b(&loop);
}
__ bind(&done_loop);
- __ mr(r3, r4);
- __ Drop(r6);
+ // Drop all slots, including the receiver.
+ __ addi(r3, r3, Operand(1));
+ __ Drop(r3);
+ __ mr(r3, r8);
__ Ret();
}
@@ -241,26 +232,38 @@ void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
// -- r4 : constructor function
+ // -- cp : context
// -- lr : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
// -----------------------------------
- // 1. Load the first argument into r3 and get rid of the rest (including the
- // receiver).
+ // 1. Load the first argument into r3.
Label no_arguments;
{
+ __ mr(r5, r3); // Store argc in r5.
__ cmpi(r3, Operand::Zero());
__ beq(&no_arguments);
__ subi(r3, r3, Operand(1));
__ ShiftLeftImm(r3, r3, Operand(kPointerSizeLog2));
- __ LoadPUX(r3, MemOperand(sp, r3));
- __ Drop(2);
+ __ LoadPX(r3, MemOperand(sp, r3));
}
// 2a. Convert the first argument to a number.
- ToNumberStub stub(masm->isolate());
- __ TailCallStub(&stub);
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ SmiTag(r5);
+ __ EnterBuiltinFrame(cp, r4, r5);
+ __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+ __ LeaveBuiltinFrame(cp, r4, r5);
+ __ SmiUntag(r5);
+ }
+
+ {
+ // Drop all arguments including the receiver.
+ __ Drop(r5);
+ __ Ret(1);
+ }
// 2b. No arguments, return +0.
__ bind(&no_arguments);
@@ -268,13 +271,13 @@ void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
__ Ret(1);
}
-
// static
void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
// -- r4 : constructor function
// -- r6 : new target
+ // -- cp : context
// -- lr : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
@@ -283,20 +286,18 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
// 1. Make sure we operate in the context of the called function.
__ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
- // 2. Load the first argument into r5 and get rid of the rest (including the
- // receiver).
+ // 2. Load the first argument into r5.
{
Label no_arguments, done;
+ __ mr(r9, r3); // Store argc in r9.
__ cmpi(r3, Operand::Zero());
__ beq(&no_arguments);
__ subi(r3, r3, Operand(1));
__ ShiftLeftImm(r5, r3, Operand(kPointerSizeLog2));
- __ LoadPUX(r5, MemOperand(sp, r5));
- __ Drop(2);
+ __ LoadPX(r5, MemOperand(sp, r5));
__ b(&done);
__ bind(&no_arguments);
__ LoadSmiLiteral(r5, Smi::FromInt(0));
- __ Drop(1);
__ bind(&done);
}
@@ -307,72 +308,83 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ CompareObjectType(r5, r7, r7, HEAP_NUMBER_TYPE);
__ beq(&done_convert);
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r4, r6);
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ SmiTag(r9);
+ __ EnterBuiltinFrame(cp, r4, r9);
+ __ Push(r6);
__ mr(r3, r5);
- ToNumberStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
__ mr(r5, r3);
- __ Pop(r4, r6);
+ __ Pop(r6);
+ __ LeaveBuiltinFrame(cp, r4, r9);
+ __ SmiUntag(r9);
}
__ bind(&done_convert);
}
// 4. Check if new target and constructor differ.
- Label new_object;
+ Label drop_frame_and_ret, new_object;
__ cmp(r4, r6);
__ bne(&new_object);
// 5. Allocate a JSValue wrapper for the number.
__ AllocateJSValue(r3, r4, r5, r7, r8, &new_object);
- __ Ret();
+ __ b(&drop_frame_and_ret);
// 6. Fallback to the runtime to create new object.
__ bind(&new_object);
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ SmiTag(r9);
+ __ EnterBuiltinFrame(cp, r4, r9);
__ Push(r5); // first argument
FastNewObjectStub stub(masm->isolate());
__ CallStub(&stub);
__ Pop(r5);
+ __ LeaveBuiltinFrame(cp, r4, r9);
+ __ SmiUntag(r9);
}
__ StoreP(r5, FieldMemOperand(r3, JSValue::kValueOffset), r0);
- __ Ret();
-}
+ __ bind(&drop_frame_and_ret);
+ {
+ __ Drop(r9);
+ __ Ret(1);
+ }
+}
// static
void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
// -- r4 : constructor function
+ // -- cp : context
// -- lr : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
// -----------------------------------
- // 1. Load the first argument into r3 and get rid of the rest (including the
- // receiver).
+ // 1. Load the first argument into r3.
Label no_arguments;
{
+ __ mr(r5, r3); // Store argc in r5.
__ cmpi(r3, Operand::Zero());
__ beq(&no_arguments);
__ subi(r3, r3, Operand(1));
__ ShiftLeftImm(r3, r3, Operand(kPointerSizeLog2));
- __ LoadPUX(r3, MemOperand(sp, r3));
- __ Drop(2);
+ __ LoadPX(r3, MemOperand(sp, r3));
}
// 2a. At least one argument, return r3 if it's a string, otherwise
// dispatch to appropriate conversion.
- Label to_string, symbol_descriptive_string;
+ Label drop_frame_and_ret, to_string, symbol_descriptive_string;
{
__ JumpIfSmi(r3, &to_string);
STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
- __ CompareObjectType(r3, r4, r4, FIRST_NONSTRING_TYPE);
+ __ CompareObjectType(r3, r6, r6, FIRST_NONSTRING_TYPE);
__ bgt(&to_string);
__ beq(&symbol_descriptive_string);
- __ Ret();
+ __ b(&drop_frame_and_ret);
}
// 2b. No arguments, return the empty string (and pop the receiver).
@@ -385,18 +397,31 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// 3a. Convert r3 to a string.
__ bind(&to_string);
{
+ FrameScope scope(masm, StackFrame::MANUAL);
ToStringStub stub(masm->isolate());
- __ TailCallStub(&stub);
+ __ SmiTag(r5);
+ __ EnterBuiltinFrame(cp, r4, r5);
+ __ CallStub(&stub);
+ __ LeaveBuiltinFrame(cp, r4, r5);
+ __ SmiUntag(r5);
}
+ __ b(&drop_frame_and_ret);
// 3b. Convert symbol in r3 to a string.
__ bind(&symbol_descriptive_string);
{
+ __ Drop(r5);
+ __ Drop(1);
__ Push(r3);
__ TailCallRuntime(Runtime::kSymbolDescriptiveString);
}
-}
+ __ bind(&drop_frame_and_ret);
+ {
+ __ Drop(r5);
+ __ Ret(1);
+ }
+}
// static
void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
@@ -404,6 +429,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// -- r3 : number of arguments
// -- r4 : constructor function
// -- r6 : new target
+ // -- cp : context
// -- lr : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
@@ -412,20 +438,18 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// 1. Make sure we operate in the context of the called function.
__ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
- // 2. Load the first argument into r5 and get rid of the rest (including the
- // receiver).
+ // 2. Load the first argument into r5.
{
Label no_arguments, done;
+ __ mr(r9, r3); // Store argc in r9.
__ cmpi(r3, Operand::Zero());
__ beq(&no_arguments);
__ subi(r3, r3, Operand(1));
__ ShiftLeftImm(r5, r3, Operand(kPointerSizeLog2));
- __ LoadPUX(r5, MemOperand(sp, r5));
- __ Drop(2);
+ __ LoadPX(r5, MemOperand(sp, r5));
__ b(&done);
__ bind(&no_arguments);
__ LoadRoot(r5, Heap::kempty_stringRootIndex);
- __ Drop(1);
__ bind(&done);
}
@@ -437,39 +461,51 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ blt(&done_convert);
__ bind(&convert);
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::MANUAL);
ToStringStub stub(masm->isolate());
- __ Push(r4, r6);
+ __ SmiTag(r9);
+ __ EnterBuiltinFrame(cp, r4, r9);
+ __ Push(r6);
__ mr(r3, r5);
__ CallStub(&stub);
__ mr(r5, r3);
- __ Pop(r4, r6);
+ __ Pop(r6);
+ __ LeaveBuiltinFrame(cp, r4, r9);
+ __ SmiUntag(r9);
}
__ bind(&done_convert);
}
// 4. Check if new target and constructor differ.
- Label new_object;
+ Label drop_frame_and_ret, new_object;
__ cmp(r4, r6);
__ bne(&new_object);
// 5. Allocate a JSValue wrapper for the string.
__ AllocateJSValue(r3, r4, r5, r7, r8, &new_object);
- __ Ret();
+ __ b(&drop_frame_and_ret);
// 6. Fallback to the runtime to create new object.
__ bind(&new_object);
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ SmiTag(r9);
+ __ EnterBuiltinFrame(cp, r4, r9);
__ Push(r5); // first argument
FastNewObjectStub stub(masm->isolate());
__ CallStub(&stub);
__ Pop(r5);
+ __ LeaveBuiltinFrame(cp, r4, r9);
+ __ SmiUntag(r9);
}
__ StoreP(r5, FieldMemOperand(r3, JSValue::kValueOffset), r0);
- __ Ret();
-}
+ __ bind(&drop_frame_and_ret);
+ {
+ __ Drop(r9);
+ __ Ret(1);
+ }
+}
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ LoadP(ip, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
@@ -504,7 +540,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ JumpToJSEntry(ip);
}
-
void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
@@ -522,7 +557,6 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
GenerateTailCallToSharedCode(masm);
}
-
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool create_implicit_receiver,
@@ -605,15 +639,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: number of arguments
// r4: constructor function
// r6: new target
- if (is_api_function) {
- __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
- Handle<Code> code = masm->isolate()->builtins()->HandleApiCallConstruct();
- __ Call(code, RelocInfo::CODE_TARGET);
- } else {
- ParameterCount actual(r3);
- __ InvokeFunction(r4, r6, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
- }
+
+ ParameterCount actual(r3);
+ __ InvokeFunction(r4, r6, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
// Store offset of return address for deoptimizer.
if (create_implicit_receiver && !is_api_function) {
@@ -684,27 +713,194 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ blr();
}
-
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true, false);
}
-
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, true, false, false);
}
-
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, false, false);
}
-
void Builtins::Generate_JSBuiltinsConstructStubForDerived(
MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, false, true);
}
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : the value to pass to the generator
+ // -- r4 : the JSGeneratorObject to resume
+ // -- r5 : the resume mode (tagged)
+ // -- lr : return address
+ // -----------------------------------
+ __ AssertGeneratorObject(r4);
+
+ // Store input value into generator object.
+ __ StoreP(r3, FieldMemOperand(r4, JSGeneratorObject::kInputOrDebugPosOffset),
+ r0);
+ __ RecordWriteField(r4, JSGeneratorObject::kInputOrDebugPosOffset, r3, r6,
+ kLRHasNotBeenSaved, kDontSaveFPRegs);
+
+ // Store resume mode into generator object.
+ __ StoreP(r5, FieldMemOperand(r4, JSGeneratorObject::kResumeModeOffset), r0);
+
+ // Load suspended function and context.
+ __ LoadP(cp, FieldMemOperand(r4, JSGeneratorObject::kContextOffset));
+ __ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
+
+ // Flood function if we are stepping.
+ Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+ Label stepping_prepared;
+ ExternalReference last_step_action =
+ ExternalReference::debug_last_step_action_address(masm->isolate());
+ STATIC_ASSERT(StepFrame > StepIn);
+ __ mov(ip, Operand(last_step_action));
+ __ LoadByte(ip, MemOperand(ip), r0);
+ __ extsb(ip, ip);
+ __ cmpi(ip, Operand(StepIn));
+ __ bge(&prepare_step_in_if_stepping);
+
+ // Flood function if we need to continue stepping in the suspended generator.
+
+ ExternalReference debug_suspended_generator =
+ ExternalReference::debug_suspended_generator_address(masm->isolate());
+
+ __ mov(ip, Operand(debug_suspended_generator));
+ __ LoadP(ip, MemOperand(ip));
+ __ cmp(ip, r4);
+ __ beq(&prepare_step_in_suspended_generator);
+ __ bind(&stepping_prepared);
+
+ // Push receiver.
+ __ LoadP(ip, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset));
+ __ Push(ip);
+
+ // ----------- S t a t e -------------
+ // -- r4 : the JSGeneratorObject to resume
+ // -- r5 : the resume mode (tagged)
+ // -- r7 : generator function
+ // -- cp : generator context
+ // -- lr : return address
+ // -- sp[0] : generator receiver
+ // -----------------------------------
+
+ // Push holes for arguments to generator function. Since the parser forced
+ // context allocation for any variables in generators, the actual argument
+ // values have already been copied into the context and these dummy values
+ // will never be used.
+ __ LoadP(r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadWordArith(
+ r3, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
+ {
+ Label loop, done_loop;
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+#if V8_TARGET_ARCH_PPC64
+ __ cmpi(r3, Operand::Zero());
+ __ beq(&done_loop);
+#else
+ __ SmiUntag(r3, SetRC);
+ __ beq(&done_loop, cr0);
+#endif
+ __ mtctr(r3);
+ __ bind(&loop);
+ __ push(ip);
+ __ bdnz(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Dispatch on the kind of generator object.
+ Label old_generator;
+ __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
+ __ CompareObjectType(r6, r6, r6, BYTECODE_ARRAY_TYPE);
+ __ bne(&old_generator);
+
+ // New-style (ignition/turbofan) generator object
+ {
+ // We abuse new.target both to indicate that this is a resume call and to
+ // pass in the generator object. In ordinary calls, new.target is always
+ // undefined because generator functions are non-constructable.
+ __ mr(r6, r4);
+ __ mr(r4, r7);
+ __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
+ __ JumpToJSEntry(ip);
+ }
+
+ // Old-style (full-codegen) generator object
+ __ bind(&old_generator);
+ {
+ // Enter a new JavaScript frame, and initialize its slots as they were when
+ // the generator was suspended.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PushStandardFrame(r7);
+
+ // Restore the operand stack.
+ __ LoadP(r3, FieldMemOperand(r4, JSGeneratorObject::kOperandStackOffset));
+ __ LoadP(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
+ __ addi(r3, r3,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+ {
+ Label loop, done_loop;
+ __ SmiUntag(r6, SetRC);
+ __ beq(&done_loop, cr0);
+ __ mtctr(r6);
+ __ bind(&loop);
+ __ LoadPU(ip, MemOperand(r3, kPointerSize));
+ __ Push(ip);
+ __ bdnz(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Reset operand stack so we don't leak.
+ __ LoadRoot(ip, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(ip, FieldMemOperand(r4, JSGeneratorObject::kOperandStackOffset),
+ r0);
+
+ // Resume the generator function at the continuation.
+ __ LoadP(r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kCodeOffset));
+ __ addi(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
+ {
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm);
+ if (FLAG_enable_embedded_constant_pool) {
+ __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r6);
+ }
+ __ LoadP(r5, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset));
+ __ SmiUntag(r5);
+ __ add(r6, r6, r5);
+ __ LoadSmiLiteral(r5,
+ Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
+ __ StoreP(r5, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset),
+ r0);
+ __ mr(r3, r4); // Continuation expects generator object in r3.
+ __ Jump(r6);
+ }
+ }
+
+ __ bind(&prepare_step_in_if_stepping);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r4, r5, r7);
+ __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ Pop(r4, r5);
+ __ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
+ }
+ __ b(&stepping_prepared);
+
+ __ bind(&prepare_step_in_suspended_generator);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r4, r5);
+ __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+ __ Pop(r4, r5);
+ __ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
+ }
+ __ b(&stepping_prepared);
+}
void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
@@ -712,10 +908,8 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
-
enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
-
// Clobbers r5; preserves all other registers.
static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
IsTagged argc_is_tagged) {
@@ -743,7 +937,6 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
__ bind(&okay);
}
-
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Called from Generate_JS_Entry
@@ -819,16 +1012,28 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r3: result
}
-
void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, false);
}
-
void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
+ Register args_count = scratch;
+
+ // Get the arguments + receiver count.
+ __ LoadP(args_count,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ lwz(args_count,
+ FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
+
+ // Leave the frame (also dropping the register file).
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+
+ __ add(sp, sp, args_count);
+}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
@@ -847,14 +1052,16 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
// The function builds an interpreter frame. See InterpreterFrameConstants in
// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(r4);
- // Get the bytecode array from the function object and load the pointer to the
- // first entry into kInterpreterBytecodeRegister.
+ // Get the bytecode array from the function object (or from the DebugInfo if
+ // it is present) and load it into kInterpreterBytecodeArrayRegister.
__ LoadP(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
Label array_done;
Register debug_info = r5;
@@ -867,11 +1074,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ CmpSmiLiteral(debug_info, DebugInfo::uninitialized(), r0);
__ beq(&array_done);
__ LoadP(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex));
+ FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
__ bind(&array_done);
+ // Check whether we should continue to use the interpreter.
+ Label switch_to_different_code_kind;
+ __ LoadP(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
+ __ mov(ip, Operand(masm->CodeObject())); // Self-reference to this code.
+ __ cmp(r3, ip);
+ __ bne(&switch_to_different_code_kind);
+
+ // Check function data field is actually a BytecodeArray object.
+
if (FLAG_debug_code) {
- // Check function data field is actually a BytecodeArray object.
__ TestIfSmi(kInterpreterBytecodeArrayRegister, r0);
__ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r3, no_reg,
@@ -879,8 +1094,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
- // Push new.target, bytecode array and zero for bytecode array offset.
- __ li(r3, Operand::Zero());
+ // Load initial bytecode offset.
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+ // Push new.target, bytecode array and Smi tagged bytecode array offset.
+ __ SmiTag(r3, kInterpreterBytecodeOffsetRegister);
__ Push(r6, kInterpreterBytecodeArrayRegister, r3);
// Allocate the local and temporary register file on the stack.
@@ -911,18 +1130,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&no_args);
}
- // TODO(rmcilroy): List of things not currently dealt with here but done in
- // fullcodegen's prologue:
- // - Call ProfileEntryHookStub when isolate has a function_entry_hook.
- // - Code aging of the BytecodeArray object.
-
- // Load accumulator, register file, bytecode offset, dispatch table into
- // registers.
+ // Load accumulator and dispatch table into registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
- __ addi(kInterpreterRegisterFileRegister, fp,
- Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
- __ mov(kInterpreterBytecodeOffsetRegister,
- Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
@@ -932,36 +1141,52 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister));
__ ShiftLeftImm(ip, r4, Operand(kPointerSizeLog2));
__ LoadPX(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
- // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
- // and header removal.
- __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(ip);
- // Even though the first bytecode handler was called, we will never return.
- __ Abort(kUnexpectedReturnFromBytecodeHandler);
+ masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
+
+ // The return value is in r3.
+ LeaveInterpreterFrame(masm, r5);
+ __ blr();
+
+ // If the shared code is no longer this entry trampoline, then the underlying
+ // function has been switched to a different kind of code and we heal the
+ // closure by switching the code entry field over to the new code as well.
+ __ bind(&switch_to_different_code_kind);
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kCodeOffset));
+ __ addi(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ StoreP(r7, FieldMemOperand(r4, JSFunction::kCodeEntryOffset), r0);
+ __ RecordWriteCodeEntryField(r4, r7, r8);
+ __ JumpToJSEntry(r7);
}
+void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
+ // Save the function and context for call to CompileBaseline.
+ __ LoadP(r4, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ LoadP(kContextRegister,
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
-void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
- // TODO(rmcilroy): List of things not currently dealt with here but done in
- // fullcodegen's EmitReturnSequence.
- // - Supporting FLAG_trace for Runtime::TraceExit.
- // - Support profiler (specifically decrementing profiling_counter
- // appropriately and calling out to HandleInterrupts if necessary).
+ // Leave the frame before recompiling for baseline so that we don't count as
+ // an activation on the stack.
+ LeaveInterpreterFrame(masm, r5);
- // The return value is in accumulator, which is already in r3.
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ // Push return value.
+ __ push(r3);
- // Leave the frame (also dropping the register file).
- __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ // Push function as argument and compile for baseline.
+ __ push(r4);
+ __ CallRuntime(Runtime::kCompileBaseline);
- // Drop receiver + arguments and return.
- __ lwz(r0, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kParameterSizeOffset));
- __ add(sp, sp, r0);
+ // Restore return value.
+ __ pop(r3);
+ }
__ blr();
}
-
static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register index,
Register count, Register scratch) {
Label loop;
@@ -973,10 +1198,10 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register index,
__ bdnz(&loop);
}
-
// static
void Builtins::Generate_InterpreterPushArgsAndCallImpl(
- MacroAssembler* masm, TailCallMode tail_call_mode) {
+ MacroAssembler* masm, TailCallMode tail_call_mode,
+ CallableType function_type) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r5 : the address of the first argument to be pushed. Subsequent
@@ -992,12 +1217,18 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
Generate_InterpreterPushArgs(masm, r5, r6, r7);
// Call the target.
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- tail_call_mode),
- RelocInfo::CODE_TARGET);
+ if (function_type == CallableType::kJSFunction) {
+ __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
+ tail_call_mode),
+ RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK_EQ(function_type, CallableType::kAny);
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ tail_call_mode),
+ RelocInfo::CODE_TARGET);
+ }
}
-
// static
void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -1022,25 +1253,25 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ // Set the return address to the correct point in the interpreter entry
+ // trampoline.
+ Smi* interpreter_entry_return_pc_offset(
+ masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+ __ Move(r5, masm->isolate()->builtins()->InterpreterEntryTrampoline());
+ __ addi(r0, r5, Operand(interpreter_entry_return_pc_offset->value() +
+ Code::kHeaderSize - kHeapObjectTag));
+ __ mtlr(r0);
-static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
- // Initialize register file register and dispatch table register.
- __ addi(kInterpreterRegisterFileRegister, fp,
- Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+ // Initialize the dispatch table register.
__ mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
- // Get the context from the frame.
- __ LoadP(kContextRegister,
- MemOperand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kContextFromRegisterPointer));
-
// Get the bytecode array pointer from the frame.
- __ LoadP(
- kInterpreterBytecodeArrayRegister,
- MemOperand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
+ __ LoadP(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -1053,9 +1284,7 @@ static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
// Get the target bytecode offset from the frame.
__ LoadP(kInterpreterBytecodeOffsetRegister,
- MemOperand(
- kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
@@ -1063,76 +1292,227 @@ static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister));
__ ShiftLeftImm(ip, r4, Operand(kPointerSizeLog2));
__ LoadPX(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
- __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(ip);
}
-
-static void Generate_InterpreterNotifyDeoptimizedHelper(
- MacroAssembler* masm, Deoptimizer::BailoutType type) {
- // Enter an internal frame.
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-
- // Pass the deoptimization type to the runtime system.
- __ LoadSmiLiteral(r4, Smi::FromInt(static_cast<int>(type)));
- __ Push(r4);
- __ CallRuntime(Runtime::kNotifyDeoptimized);
- // Tear down internal frame.
- }
-
- // Drop state (we don't use these for interpreter deopts) and and pop the
- // accumulator value into the accumulator register.
- __ Drop(1);
- __ Pop(kInterpreterAccumulatorRegister);
-
- // Enter the bytecode dispatch.
- Generate_EnterBytecodeDispatch(masm);
-}
-
-
-void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-
-void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
-}
-
-
-void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
- // Set the address of the interpreter entry trampoline as a return address.
- // This simulates the initial call to bytecode handlers in interpreter entry
- // trampoline. The return will never actually be taken, but our stack walker
- // uses this address to determine whether a frame is interpreted.
- __ mov(r0,
- Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
- __ mtlr(r0);
-
- Generate_EnterBytecodeDispatch(masm);
-}
-
-
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : argument count (preserved for callee)
+ // -- r6 : new target (preserved for callee)
+ // -- r4 : target function (preserved for callee)
+ // -----------------------------------
+ // First lookup code, maybe we don't need to compile!
+ Label gotta_call_runtime;
+ Label maybe_call_runtime;
+ Label try_shared;
+ Label loop_top, loop_bottom;
+
+ Register closure = r4;
+ Register map = r9;
+ Register index = r5;
+ __ LoadP(map,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(map,
+ FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+ __ LoadP(index, FieldMemOperand(map, FixedArray::kLengthOffset));
+ __ CmpSmiLiteral(index, Smi::FromInt(2), r0);
+ __ blt(&gotta_call_runtime);
+
+ // Find literals.
+ // r10 : native context
+ // r5 : length / index
+ // r9 : optimized code map
+ // r6 : new target
+ // r4 : closure
+ Register native_context = r10;
+ __ LoadP(native_context, NativeContextMemOperand());
+
+ __ bind(&loop_top);
+ Register temp = r11;
+ Register array_pointer = r8;
+
+ // Does the native context match?
+ __ SmiToPtrArrayOffset(array_pointer, index);
+ __ add(array_pointer, map, array_pointer);
+ __ LoadP(temp, FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousContext));
+ __ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+ __ cmp(temp, native_context);
+ __ bne(&loop_bottom);
+ // OSR id set to none?
+ __ LoadP(temp,
+ FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+ const int bailout_id = BailoutId::None().ToInt();
+ __ CmpSmiLiteral(temp, Smi::FromInt(bailout_id), r0);
+ __ bne(&loop_bottom);
+ // Literals available?
+ __ LoadP(temp,
+ FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousLiterals));
+ __ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+ __ JumpIfSmi(temp, &gotta_call_runtime);
+
+ // Save the literals in the closure.
+ __ StoreP(temp, FieldMemOperand(closure, JSFunction::kLiteralsOffset), r0);
+ __ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, r7,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ // Code available?
+ Register entry = r7;
+ __ LoadP(entry,
+ FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousCachedCode));
+ __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(entry, &maybe_call_runtime);
+
+ // Found literals and code. Get them into the closure and return.
+ // Store code entry in the closure.
+ __ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ Label install_optimized_code_and_tailcall;
+ __ bind(&install_optimized_code_and_tailcall);
+ __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
+ __ RecordWriteCodeEntryField(closure, entry, r8);
+
+ // Link the closure into the optimized function list.
+ // r7 : code entry
+ // r10: native context
+ // r4 : closure
+ __ LoadP(
+ r8, ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ __ StoreP(r8, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset),
+ r0);
+ __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r8, temp,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ const int function_list_offset =
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+ __ StoreP(
+ closure,
+ ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0);
+ // Save closure before the write barrier.
+ __ mr(r8, closure);
+ __ RecordWriteContextSlot(native_context, function_list_offset, r8, temp,
+ kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ JumpToJSEntry(entry);
+
+ __ bind(&loop_bottom);
+ __ SubSmiLiteral(index, index, Smi::FromInt(SharedFunctionInfo::kEntryLength),
+ r0);
+ __ CmpSmiLiteral(index, Smi::FromInt(1), r0);
+ __ bgt(&loop_top);
+
+ // We found neither literals nor code.
+ __ b(&gotta_call_runtime);
+
+ __ bind(&maybe_call_runtime);
+
+ // Last possibility. Check the context free optimized code map entry.
+ __ LoadP(entry,
+ FieldMemOperand(map, FixedArray::kHeaderSize +
+ SharedFunctionInfo::kSharedCodeIndex));
+ __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(entry, &try_shared);
+
+ // Store code entry in the closure.
+ __ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ b(&install_optimized_code_and_tailcall);
+
+ __ bind(&try_shared);
+ // Is the full code valid?
+ __ LoadP(entry,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+ __ lwz(r8, FieldMemOperand(entry, Code::kFlagsOffset));
+ __ DecodeField<Code::KindField>(r8);
+ __ cmpi(r8, Operand(Code::BUILTIN));
+ __ beq(&gotta_call_runtime);
+ // Yes, install the full code.
+ __ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
+ __ RecordWriteCodeEntryField(closure, entry, r8);
+ __ JumpToJSEntry(entry);
+
+ __ bind(&gotta_call_runtime);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
+}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm,
Runtime::kCompileOptimized_NotConcurrent);
}
-
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
+void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : argument count (preserved for callee)
+ // -- r4 : new target (preserved for callee)
+ // -- r6 : target function (preserved for callee)
+ // -----------------------------------
+ Label failed;
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Preserve argument count for later compare.
+ __ Move(r7, r3);
+ // Push a copy of the target function and the new target.
+ // Push function as parameter to the runtime call.
+ __ SmiTag(r3);
+ __ Push(r3, r4, r6, r4);
+
+ // Copy arguments from caller (stdlib, foreign, heap).
+ Label args_done;
+ for (int j = 0; j < 4; ++j) {
+ Label over;
+ if (j < 3) {
+ __ cmpi(r7, Operand(j));
+ __ bne(&over);
+ }
+ for (int i = j - 1; i >= 0; --i) {
+ __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
+ i * kPointerSize));
+ __ push(r7);
+ }
+ for (int i = 0; i < 3 - j; ++i) {
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ }
+ if (j < 3) {
+ __ jmp(&args_done);
+ __ bind(&over);
+ }
+ }
+ __ bind(&args_done);
+
+ // Call runtime, on success unwind frame, and parent frame.
+ __ CallRuntime(Runtime::kInstantiateAsmJs, 4);
+ // A smi 0 is returned on failure, an object on success.
+ __ JumpIfSmi(r3, &failed);
+
+ __ Drop(2);
+ __ pop(r7);
+ __ SmiUntag(r7);
+ scope.GenerateLeaveFrame();
+
+ __ addi(r7, r7, Operand(1));
+ __ Drop(r7);
+ __ Ret();
+
+ __ bind(&failed);
+ // Restore target function and new target.
+ __ Pop(r3, r4, r6);
+ __ SmiUntag(r3);
+ }
+ // On failure, tail call back to regular js.
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+}
static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// For now, we are relying on the fact that make_code_young doesn't do any
@@ -1175,7 +1555,6 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
-
void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// For now, we are relying on the fact that make_code_young doesn't do any
// garbage collection which allows us to save/restore the registers without
@@ -1212,17 +1591,14 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
__ Jump(r3);
}
-
void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
GenerateMakeCodeYoungAgainCommon(masm);
}
-
void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
-
static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
SaveFPRegsMode save_doubles) {
{
@@ -1241,17 +1617,14 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
__ blr(); // Jump to miss handler
}
-
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
}
-
void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
}
-
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
@@ -1267,14 +1640,19 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
__ SmiUntag(r9);
// Switch on the state.
Label with_tos_register, unknown_state;
- __ cmpi(r9, Operand(FullCodeGenerator::NO_REGISTERS));
+ __ cmpi(
+ r9,
+ Operand(static_cast<intptr_t>(Deoptimizer::BailoutState::NO_REGISTERS)));
__ bne(&with_tos_register);
__ addi(sp, sp, Operand(1 * kPointerSize)); // Remove state.
__ Ret();
__ bind(&with_tos_register);
+ DCHECK_EQ(kInterpreterAccumulatorRegister.code(), r3.code());
__ LoadP(r3, MemOperand(sp, 1 * kPointerSize));
- __ cmpi(r9, Operand(FullCodeGenerator::TOS_REG));
+ __ cmpi(
+ r9,
+ Operand(static_cast<intptr_t>(Deoptimizer::BailoutState::TOS_REGISTER)));
__ bne(&unknown_state);
__ addi(sp, sp, Operand(2 * kPointerSize)); // Remove state.
__ Ret();
@@ -1283,22 +1661,18 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
__ stop("no cases left");
}
-
void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
-
void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
}
-
void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
-
// Clobbers registers {r7, r8, r9, r10}.
void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
Register function_template_info,
@@ -1364,7 +1738,6 @@ void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
__ bind(&receiver_check_passed);
}
-
void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : number of arguments excluding receiver
@@ -1376,7 +1749,6 @@ void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
// -- sp[4 * argc] : receiver
// -----------------------------------
-
// Load the FunctionTemplateInfo.
__ LoadP(r6, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
@@ -1402,10 +1774,16 @@ void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kThrowIllegalInvocation);
}
-
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
+ bool has_handler_frame) {
// Lookup the function in the JavaScript frame.
- __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ if (has_handler_frame) {
+ __ LoadP(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(r3, MemOperand(r3, JavaScriptFrameConstants::kFunctionOffset));
+ } else {
+ __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
@@ -1413,7 +1791,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
- // If the code object is null, just return to the unoptimized code.
+ // If the code object is null, just return to the caller.
Label skip;
__ CmpSmiLiteral(r3, Smi::FromInt(0), r0);
__ bne(&skip);
@@ -1421,6 +1799,12 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ bind(&skip);
+ // Drop any potential handler frame that is be sitting on top of the actual
+ // JavaScript frame. This is the case then OSR is triggered from bytecode.
+ if (has_handler_frame) {
+ __ LeaveFrame(StackFrame::STUB);
+ }
+
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ LoadP(r4, FieldMemOperand(r3, Code::kDeoptimizationDataOffset));
@@ -1449,11 +1833,21 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
}
}
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+ Generate_OnStackReplacementHelper(masm, false);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+ Generate_OnStackReplacementHelper(masm, true);
+}
// static
void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
int field_index) {
// ----------- S t a t e -------------
+ // -- r3 : number of arguments
+ // -- r4 : function
+ // -- cp : context
// -- lr : return address
// -- sp[0] : receiver
// -----------------------------------
@@ -1463,7 +1857,7 @@ void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
{
__ Pop(r3);
__ JumpIfSmi(r3, &receiver_not_date);
- __ CompareObjectType(r3, r4, r5, JS_DATE_TYPE);
+ __ CompareObjectType(r3, r5, r6, JS_DATE_TYPE);
__ bne(&receiver_not_date);
}
@@ -1493,29 +1887,13 @@ void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
// 3. Raise a TypeError if the receiver is not a date.
__ bind(&receiver_not_date);
- __ TailCallRuntime(Runtime::kThrowNotDateError);
-}
-
-// static
-void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : argc
- // -- sp[0] : first argument (left-hand side)
- // -- sp[4] : receiver (right-hand side)
- // -----------------------------------
-
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ LoadP(InstanceOfDescriptor::LeftRegister(),
- MemOperand(fp, 2 * kPointerSize)); // Load left-hand side.
- __ LoadP(InstanceOfDescriptor::RightRegister(),
- MemOperand(fp, 3 * kPointerSize)); // Load right-hand side.
- InstanceOfStub stub(masm->isolate(), true);
- __ CallStub(&stub);
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ push(r3);
+ __ LoadSmiLiteral(r3, Smi::FromInt(0));
+ __ EnterBuiltinFrame(cp, r4, r3);
+ __ CallRuntime(Runtime::kThrowNotDateError);
}
-
- // Pop the argument and the receiver.
- __ Ret(2);
}
// static
@@ -1590,7 +1968,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
}
}
-
// static
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
@@ -1619,7 +1996,6 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// Calculate the copy start address (destination). Copy end address is sp.
__ add(r5, sp, r5);
-
__ mtctr(r3);
__ bind(&loop);
__ LoadP(ip, MemOperand(r5, -kPointerSize));
@@ -1636,7 +2012,6 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
-
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argc
@@ -1699,7 +2074,6 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
}
}
-
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argc
@@ -1777,7 +2151,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
}
-
static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
Label* stack_overflow) {
// ----------- S t a t e -------------
@@ -1799,7 +2172,6 @@ static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
__ ble(stack_overflow); // Signed comparison.
}
-
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ SmiTag(r3);
__ LoadSmiLiteral(r7, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
@@ -1814,7 +2186,6 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
kPointerSize));
}
-
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : result being passed through
@@ -1829,7 +2200,6 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
__ add(sp, sp, r0);
}
-
// static
void Builtins::Generate_Apply(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2115,8 +2485,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ SmiTag(r3);
__ Push(r3, r4);
__ mr(r3, r6);
+ __ Push(cp);
ToObjectStub stub(masm->isolate());
__ CallStub(&stub);
+ __ Pop(cp);
__ mr(r6, r3);
__ Pop(r3, r4);
__ SmiUntag(r3);
@@ -2159,7 +2531,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
}
}
-
namespace {
void Generate_PushBoundArguments(MacroAssembler* masm) {
@@ -2242,7 +2613,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
} // namespace
-
// static
void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
TailCallMode tail_call_mode) {
@@ -2274,7 +2644,6 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
__ JumpToJSEntry(ip);
}
-
// static
void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
TailCallMode tail_call_mode) {
@@ -2336,7 +2705,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
-
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2358,7 +2726,6 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
__ JumpToJSEntry(ip);
}
-
// static
void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2388,7 +2755,6 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ JumpToJSEntry(ip);
}
-
// static
void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2407,7 +2773,6 @@ void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
}
-
// static
void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2460,6 +2825,57 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
+// static
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r4 : requested object size (untagged)
+ // -- lr : return address
+ // -----------------------------------
+ __ SmiTag(r4);
+ __ Push(r4);
+ __ LoadSmiLiteral(cp, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAllocateInNewSpace);
+}
+
+// static
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r4 : requested object size (untagged)
+ // -- lr : return address
+ // -----------------------------------
+ __ SmiTag(r4);
+ __ LoadSmiLiteral(r5, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+ __ Push(r4, r5);
+ __ LoadSmiLiteral(cp, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
+}
+
+// static
+void Builtins::Generate_Abort(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r4 : message_id as Smi
+ // -- lr : return address
+ // -----------------------------------
+ __ push(r4);
+ __ LoadSmiLiteral(cp, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAbort);
+}
+
+// static
+void Builtins::Generate_ToNumber(MacroAssembler* masm) {
+ // The ToNumber stub takes one argument in r3.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ TestIfSmi(r3, r0);
+ __ Ret(eq, cr0);
+
+ __ CompareObjectType(r3, r4, r4, HEAP_NUMBER_TYPE);
+ // r3: receiver
+ // r4: receiver instance type
+ __ Ret(eq);
+
+ __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
+ RelocInfo::CODE_TARGET);
+}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2579,7 +2995,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
LeaveArgumentsAdaptorFrame(masm);
__ blr();
-
// -------------------------------------------
// Dont adapt arguments.
// -------------------------------------------
@@ -2594,7 +3009,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
}
-
#undef __
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/s390/OWNERS b/deps/v8/src/builtins/s390/OWNERS
new file mode 100644
index 0000000000..752e8e3d81
--- /dev/null
+++ b/deps/v8/src/builtins/s390/OWNERS
@@ -0,0 +1,6 @@
+jyan@ca.ibm.com
+dstence@us.ibm.com
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/deps/v8/src/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 12b52c123c..c68fcc3e97 100644
--- a/deps/v8/src/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -15,8 +15,8 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id,
- BuiltinExtraArguments extra_args) {
+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
+ ExitFrameType exit_frame_type) {
// ----------- S t a t e -------------
// -- r2 : number of arguments excluding receiver
// -- r3 : target
@@ -34,30 +34,18 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id,
// ordinary functions).
__ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
- // Insert extra arguments.
- int num_extra_args = 0;
- switch (extra_args) {
- case BuiltinExtraArguments::kTarget:
- __ Push(r3);
- ++num_extra_args;
- break;
- case BuiltinExtraArguments::kNewTarget:
- __ Push(r5);
- ++num_extra_args;
- break;
- case BuiltinExtraArguments::kTargetAndNewTarget:
- __ Push(r3, r5);
- num_extra_args += 2;
- break;
- case BuiltinExtraArguments::kNone:
- break;
- }
-
// JumpToExternalReference expects r2 to contain the number of arguments
// including the receiver and the extra arguments.
+ const int num_extra_args = 3;
__ AddP(r2, r2, Operand(num_extra_args + 1));
- __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
+ // Insert extra arguments.
+ __ SmiTag(r2);
+ __ Push(r2, r3, r5);
+ __ SmiUntag(r2);
+
+ __ JumpToExternalReference(ExternalReference(address, masm->isolate()),
+ exit_frame_type == BUILTIN_EXIT);
}
// Load the built-in InternalArray function from the current context.
@@ -131,10 +119,12 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// static
void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
// ----------- S t a t e -------------
- // -- r2 : number of arguments
- // -- lr : return address
- // -- sp[(argc - n) * 8] : arg[n] (zero-based)
- // -- sp[(argc + 1) * 8] : receiver
+ // -- r2 : number of arguments
+ // -- r3 : function
+ // -- cp : context
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+ // -- sp[argc * 4] : receiver
// -----------------------------------
Condition const cond_done = (kind == MathMaxMinKind::kMin) ? lt : gt;
Heap::RootListIndex const root_index =
@@ -143,59 +133,64 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? d2 : d1;
// Load the accumulator with the default return value (either -Infinity or
- // +Infinity), with the tagged value in r3 and the double value in d1.
- __ LoadRoot(r3, root_index);
- __ LoadDouble(d1, FieldMemOperand(r3, HeapNumber::kValueOffset));
+ // +Infinity), with the tagged value in r7 and the double value in d1.
+ __ LoadRoot(r7, root_index);
+ __ LoadDouble(d1, FieldMemOperand(r7, HeapNumber::kValueOffset));
// Setup state for loop
// r4: address of arg[0] + kPointerSize
// r5: number of slots to drop at exit (arguments + receiver)
- __ ShiftLeftP(r4, r2, Operand(kPointerSizeLog2));
- __ AddP(r4, sp, r4);
- __ AddP(r5, r2, Operand(1));
+ __ AddP(r6, r2, Operand(1));
Label done_loop, loop;
+ __ LoadRR(r6, r2);
__ bind(&loop);
{
// Check if all parameters done.
- __ CmpLogicalP(r4, sp);
- __ ble(&done_loop);
+ __ SubP(r6, Operand(1));
+ __ blt(&done_loop);
// Load the next parameter tagged value into r2.
- __ lay(r4, MemOperand(r4, -kPointerSize));
- __ LoadP(r2, MemOperand(r4));
+ __ ShiftLeftP(r1, r6, Operand(kPointerSizeLog2));
+ __ LoadP(r4, MemOperand(sp, r1));
// Load the double value of the parameter into d2, maybe converting the
- // parameter to a number first using the ToNumberStub if necessary.
+ // parameter to a number first using the ToNumber builtin if necessary.
Label convert, convert_smi, convert_number, done_convert;
__ bind(&convert);
- __ JumpIfSmi(r2, &convert_smi);
- __ LoadP(r6, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ JumpIfRoot(r6, Heap::kHeapNumberMapRootIndex, &convert_number);
+ __ JumpIfSmi(r4, &convert_smi);
+ __ LoadP(r5, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ JumpIfRoot(r5, Heap::kHeapNumberMapRootIndex, &convert_number);
{
- // Parameter is not a Number, use the ToNumberStub to convert it.
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(r5);
- __ Push(r3, r4, r5);
- ToNumberStub stub(masm->isolate());
- __ CallStub(&stub);
- __ Pop(r3, r4, r5);
- __ SmiUntag(r5);
+ // Parameter is not a Number, use the ToNumber builtin to convert it.
+ DCHECK(!FLAG_enable_embedded_constant_pool);
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ SmiTag(r2);
+ __ SmiTag(r6);
+ __ EnterBuiltinFrame(cp, r3, r2);
+ __ Push(r6, r7);
+ __ LoadRR(r2, r4);
+ __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+ __ LoadRR(r4, r2);
+ __ Pop(r6, r7);
+ __ LeaveBuiltinFrame(cp, r3, r2);
+ __ SmiUntag(r6);
+ __ SmiUntag(r2);
{
// Restore the double accumulator value (d1).
Label done_restore;
- __ SmiToDouble(d1, r3);
- __ JumpIfSmi(r3, &done_restore);
- __ LoadDouble(d1, FieldMemOperand(r3, HeapNumber::kValueOffset));
+ __ SmiToDouble(d1, r7);
+ __ JumpIfSmi(r7, &done_restore);
+ __ LoadDouble(d1, FieldMemOperand(r7, HeapNumber::kValueOffset));
__ bind(&done_restore);
}
}
__ b(&convert);
__ bind(&convert_number);
- __ LoadDouble(d2, FieldMemOperand(r2, HeapNumber::kValueOffset));
+ __ LoadDouble(d2, FieldMemOperand(r4, HeapNumber::kValueOffset));
__ b(&done_convert);
__ bind(&convert_smi);
- __ SmiToDouble(d2, r2);
+ __ SmiToDouble(d2, r4);
__ bind(&done_convert);
// Perform the actual comparison with the accumulator value on the left hand
@@ -207,26 +202,28 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
__ b(CommuteCondition(cond_done), &compare_swap);
// Left and right hand side are equal, check for -0 vs. +0.
- __ TestDoubleIsMinusZero(reg, r6, r7);
+ __ TestDoubleIsMinusZero(reg, r1, r0);
__ bne(&loop);
// Update accumulator. Result is on the right hand side.
__ bind(&compare_swap);
__ ldr(d1, d2);
- __ LoadRR(r3, r2);
+ __ LoadRR(r7, r4);
__ b(&loop);
// At least one side is NaN, which means that the result will be NaN too.
// We still need to visit the rest of the arguments.
__ bind(&compare_nan);
- __ LoadRoot(r3, Heap::kNanValueRootIndex);
- __ LoadDouble(d1, FieldMemOperand(r3, HeapNumber::kValueOffset));
+ __ LoadRoot(r7, Heap::kNanValueRootIndex);
+ __ LoadDouble(d1, FieldMemOperand(r7, HeapNumber::kValueOffset));
__ b(&loop);
}
__ bind(&done_loop);
- __ LoadRR(r2, r3);
- __ Drop(r5);
+ // Drop all slots, including the receiver.
+ __ AddP(r2, Operand(1));
+ __ Drop(r2);
+ __ LoadRR(r2, r7);
__ Ret();
}
@@ -235,27 +232,38 @@ void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : number of arguments
// -- r3 : constructor function
+ // -- cp : context
// -- lr : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
// -----------------------------------
- // 1. Load the first argument into r2 and get rid of the rest (including the
- // receiver).
+ // 1. Load the first argument into r2.
Label no_arguments;
{
+ __ LoadRR(r4, r2); // Store argc in r4.
__ CmpP(r2, Operand::Zero());
__ beq(&no_arguments);
__ SubP(r2, r2, Operand(1));
__ ShiftLeftP(r2, r2, Operand(kPointerSizeLog2));
- __ la(sp, MemOperand(sp, r2));
- __ LoadP(r2, MemOperand(sp));
- __ Drop(2);
+ __ LoadP(r2, MemOperand(sp, r2));
}
// 2a. Convert the first argument to a number.
- ToNumberStub stub(masm->isolate());
- __ TailCallStub(&stub);
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ SmiTag(r4);
+ __ EnterBuiltinFrame(cp, r3, r4);
+ __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+ __ LeaveBuiltinFrame(cp, r3, r4);
+ __ SmiUntag(r4);
+ }
+
+ {
+ // Drop all arguments including the receiver.
+ __ Drop(r4);
+ __ Ret(1);
+ }
// 2b. No arguments, return +0.
__ bind(&no_arguments);
@@ -277,21 +285,18 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
// 1. Make sure we operate in the context of the called function.
__ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
- // 2. Load the first argument into r4 and get rid of the rest (including the
- // receiver).
+ // 2. Load the first argument into r4.
{
Label no_arguments, done;
+ __ LoadRR(r8, r2); // Store argc in r8.
__ CmpP(r2, Operand::Zero());
__ beq(&no_arguments);
__ SubP(r2, r2, Operand(1));
__ ShiftLeftP(r4, r2, Operand(kPointerSizeLog2));
- __ la(sp, MemOperand(sp, r4));
- __ LoadP(r4, MemOperand(sp));
- __ Drop(2);
+ __ LoadP(r4, MemOperand(sp, r4));
__ b(&done);
__ bind(&no_arguments);
__ LoadSmiLiteral(r4, Smi::FromInt(0));
- __ Drop(1);
__ bind(&done);
}
@@ -302,37 +307,49 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ CompareObjectType(r4, r6, r6, HEAP_NUMBER_TYPE);
__ beq(&done_convert);
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r3, r5);
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ SmiTag(r8);
+ __ EnterBuiltinFrame(cp, r3, r8);
+ __ Push(r5);
__ LoadRR(r2, r4);
- ToNumberStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
__ LoadRR(r4, r2);
- __ Pop(r3, r5);
+ __ Pop(r5);
+ __ LeaveBuiltinFrame(cp, r3, r8);
+ __ SmiUntag(r8);
}
__ bind(&done_convert);
}
// 4. Check if new target and constructor differ.
- Label new_object;
+ Label drop_frame_and_ret, new_object;
__ CmpP(r3, r5);
__ bne(&new_object);
// 5. Allocate a JSValue wrapper for the number.
__ AllocateJSValue(r2, r3, r4, r6, r7, &new_object);
- __ Ret();
+ __ b(&drop_frame_and_ret);
// 6. Fallback to the runtime to create new object.
__ bind(&new_object);
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r4); // first argument
+ FrameScope scope(masm, StackFrame::MANUAL);
FastNewObjectStub stub(masm->isolate());
+ __ SmiTag(r8);
+ __ EnterBuiltinFrame(cp, r3, r8);
+ __ Push(r4); // first argument
__ CallStub(&stub);
__ Pop(r4);
+ __ LeaveBuiltinFrame(cp, r3, r8);
+ __ SmiUntag(r8);
}
__ StoreP(r4, FieldMemOperand(r2, JSValue::kValueOffset), r0);
- __ Ret();
+
+ __ bind(&drop_frame_and_ret);
+ {
+ __ Drop(r8);
+ __ Ret(1);
+ }
}
// static
@@ -340,33 +357,32 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : number of arguments
// -- r3 : constructor function
+ // -- cp : context
// -- lr : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
// -----------------------------------
- // 1. Load the first argument into r2 and get rid of the rest (including the
- // receiver).
+ // 1. Load the first argument into r2.
Label no_arguments;
{
+ __ LoadRR(r4, r2); // Store argc in r4
__ CmpP(r2, Operand::Zero());
__ beq(&no_arguments);
__ SubP(r2, r2, Operand(1));
__ ShiftLeftP(r2, r2, Operand(kPointerSizeLog2));
- __ lay(sp, MemOperand(sp, r2));
- __ LoadP(r2, MemOperand(sp));
- __ Drop(2);
+ __ LoadP(r2, MemOperand(sp, r2));
}
// 2a. At least one argument, return r2 if it's a string, otherwise
// dispatch to appropriate conversion.
- Label to_string, symbol_descriptive_string;
+ Label drop_frame_and_ret, to_string, symbol_descriptive_string;
{
__ JumpIfSmi(r2, &to_string);
STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
- __ CompareObjectType(r2, r3, r3, FIRST_NONSTRING_TYPE);
+ __ CompareObjectType(r2, r5, r5, FIRST_NONSTRING_TYPE);
__ bgt(&to_string);
__ beq(&symbol_descriptive_string);
- __ Ret();
+ __ b(&drop_frame_and_ret);
}
// 2b. No arguments, return the empty string (and pop the receiver).
@@ -379,15 +395,29 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// 3a. Convert r2 to a string.
__ bind(&to_string);
{
+ FrameScope scope(masm, StackFrame::MANUAL);
ToStringStub stub(masm->isolate());
- __ TailCallStub(&stub);
+ __ SmiTag(r4);
+ __ EnterBuiltinFrame(cp, r3, r4);
+ __ CallStub(&stub);
+ __ LeaveBuiltinFrame(cp, r3, r4);
+ __ SmiUntag(r4);
}
+ __ b(&drop_frame_and_ret);
// 3b. Convert symbol in r2 to a string.
__ bind(&symbol_descriptive_string);
{
+ __ Drop(r4);
+ __ Drop(1);
__ Push(r2);
__ TailCallRuntime(Runtime::kSymbolDescriptiveString);
}
+
+ __ bind(&drop_frame_and_ret);
+ {
+ __ Drop(r4);
+ __ Ret(1);
+ }
}
// static
@@ -396,6 +426,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// -- r2 : number of arguments
// -- r3 : constructor function
// -- r5 : new target
+ // -- cp : context
// -- lr : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
@@ -404,21 +435,18 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// 1. Make sure we operate in the context of the called function.
__ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
- // 2. Load the first argument into r4 and get rid of the rest (including the
- // receiver).
+ // 2. Load the first argument into r4.
{
Label no_arguments, done;
+ __ LoadRR(r8, r2); // Store argc in r8.
__ CmpP(r2, Operand::Zero());
__ beq(&no_arguments);
__ SubP(r2, r2, Operand(1));
__ ShiftLeftP(r4, r2, Operand(kPointerSizeLog2));
- __ lay(sp, MemOperand(sp, r4));
- __ LoadP(r4, MemOperand(sp));
- __ Drop(2);
+ __ LoadP(r4, MemOperand(sp, r4));
__ b(&done);
__ bind(&no_arguments);
__ LoadRoot(r4, Heap::kempty_stringRootIndex);
- __ Drop(1);
__ bind(&done);
}
@@ -430,37 +458,50 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ blt(&done_convert);
__ bind(&convert);
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::MANUAL);
ToStringStub stub(masm->isolate());
- __ Push(r3, r5);
+ __ SmiTag(r8);
+ __ EnterBuiltinFrame(cp, r3, r8);
+ __ Push(r5);
__ LoadRR(r2, r4);
__ CallStub(&stub);
__ LoadRR(r4, r2);
- __ Pop(r3, r5);
+ __ Pop(r5);
+ __ LeaveBuiltinFrame(cp, r3, r8);
+ __ SmiUntag(r8);
}
__ bind(&done_convert);
}
// 4. Check if new target and constructor differ.
- Label new_object;
+ Label drop_frame_and_ret, new_object;
__ CmpP(r3, r5);
__ bne(&new_object);
// 5. Allocate a JSValue wrapper for the string.
__ AllocateJSValue(r2, r3, r4, r6, r7, &new_object);
- __ Ret();
+ __ b(&drop_frame_and_ret);
// 6. Fallback to the runtime to create new object.
__ bind(&new_object);
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r4); // first argument
+ FrameScope scope(masm, StackFrame::MANUAL);
FastNewObjectStub stub(masm->isolate());
+ __ SmiTag(r8);
+ __ EnterBuiltinFrame(cp, r3, r8);
+ __ Push(r4); // first argument
__ CallStub(&stub);
__ Pop(r4);
+ __ LeaveBuiltinFrame(cp, r3, r8);
+ __ SmiUntag(r8);
}
__ StoreP(r4, FieldMemOperand(r2, JSValue::kValueOffset), r0);
- __ Ret();
+
+ __ bind(&drop_frame_and_ret);
+ {
+ __ Drop(r8);
+ __ Ret(1);
+ }
}
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
@@ -596,15 +637,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r2: number of arguments
// r3: constructor function
// r5: new target
- if (is_api_function) {
- __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
- Handle<Code> code = masm->isolate()->builtins()->HandleApiCallConstruct();
- __ Call(code, RelocInfo::CODE_TARGET);
- } else {
- ParameterCount actual(r2);
- __ InvokeFunction(r3, r5, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
- }
+
+ ParameterCount actual(r2);
+ __ InvokeFunction(r3, r5, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
// Store offset of return address for deoptimizer.
if (create_implicit_receiver && !is_api_function) {
@@ -693,6 +729,176 @@ void Builtins::Generate_JSBuiltinsConstructStubForDerived(
Generate_JSConstructStubHelper(masm, false, false, true);
}
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : the value to pass to the generator
+ // -- r3 : the JSGeneratorObject to resume
+ // -- r4 : the resume mode (tagged)
+ // -- lr : return address
+ // -----------------------------------
+ __ AssertGeneratorObject(r3);
+
+ // Store input value into generator object.
+ __ StoreP(r2, FieldMemOperand(r3, JSGeneratorObject::kInputOrDebugPosOffset),
+ r0);
+ __ RecordWriteField(r3, JSGeneratorObject::kInputOrDebugPosOffset, r2, r5,
+ kLRHasNotBeenSaved, kDontSaveFPRegs);
+
+ // Store resume mode into generator object.
+ __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kResumeModeOffset));
+
+ // Load suspended function and context.
+ __ LoadP(cp, FieldMemOperand(r3, JSGeneratorObject::kContextOffset));
+ __ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
+
+ // Flood function if we are stepping.
+ Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+ Label stepping_prepared;
+ ExternalReference last_step_action =
+ ExternalReference::debug_last_step_action_address(masm->isolate());
+ STATIC_ASSERT(StepFrame > StepIn);
+ __ mov(ip, Operand(last_step_action));
+ __ LoadB(ip, MemOperand(ip));
+ __ CmpP(ip, Operand(StepIn));
+ __ bge(&prepare_step_in_if_stepping);
+
+ // Flood function if we need to continue stepping in the suspended generator.
+
+ ExternalReference debug_suspended_generator =
+ ExternalReference::debug_suspended_generator_address(masm->isolate());
+
+ __ mov(ip, Operand(debug_suspended_generator));
+ __ LoadP(ip, MemOperand(ip));
+ __ CmpP(ip, r3);
+ __ beq(&prepare_step_in_suspended_generator);
+ __ bind(&stepping_prepared);
+
+ // Push receiver.
+ __ LoadP(ip, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
+ __ Push(ip);
+
+ // ----------- S t a t e -------------
+ // -- r3 : the JSGeneratorObject to resume
+ // -- r4 : the resume mode (tagged)
+ // -- r6 : generator function
+ // -- cp : generator context
+ // -- lr : return address
+ // -- sp[0] : generator receiver
+ // -----------------------------------
+
+ // Push holes for arguments to generator function. Since the parser forced
+ // context allocation for any variables in generators, the actual argument
+ // values have already been copied into the context and these dummy values
+ // will never be used.
+ __ LoadP(r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadW(
+ r2, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
+ {
+ Label loop, done_loop;
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+#if V8_TARGET_ARCH_S390X
+ __ CmpP(r2, Operand::Zero());
+ __ beq(&done_loop);
+#else
+ __ SmiUntag(r2);
+ __ LoadAndTestP(r2, r2);
+ __ beq(&done_loop);
+#endif
+ __ LoadRR(r1, r2);
+ __ bind(&loop);
+ __ push(ip);
+ __ BranchOnCount(r1, &loop);
+ __ bind(&done_loop);
+ }
+
+ // Dispatch on the kind of generator object.
+ Label old_generator;
+ __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
+ __ CompareObjectType(r5, r5, r5, BYTECODE_ARRAY_TYPE);
+ __ bne(&old_generator, Label::kNear);
+
+ // New-style (ignition/turbofan) generator object
+ {
+ // We abuse new.target both to indicate that this is a resume call and to
+ // pass in the generator object. In ordinary calls, new.target is always
+ // undefined because generator functions are non-constructable.
+ __ LoadRR(r5, r3);
+ __ LoadRR(r3, r6);
+ __ LoadP(ip, FieldMemOperand(r3, JSFunction::kCodeEntryOffset));
+ __ JumpToJSEntry(ip);
+ }
+ // Old-style (full-codegen) generator object
+ __ bind(&old_generator);
+ {
+ // Enter a new JavaScript frame, and initialize its slots as they were when
+ // the generator was suspended.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PushStandardFrame(r6);
+
+ // Restore the operand stack.
+ __ LoadP(r2, FieldMemOperand(r3, JSGeneratorObject::kOperandStackOffset));
+ __ LoadP(r5, FieldMemOperand(r2, FixedArray::kLengthOffset));
+ __ AddP(r2, r2,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+ {
+ Label loop, done_loop;
+ __ SmiUntag(r5);
+ __ LoadAndTestP(r5, r5);
+ __ beq(&done_loop);
+ __ LoadRR(r1, r5);
+ __ bind(&loop);
+ __ LoadP(ip, MemOperand(r2, kPointerSize));
+ __ la(r2, MemOperand(r2, kPointerSize));
+ __ Push(ip);
+ __ BranchOnCount(r1, &loop);
+ __ bind(&done_loop);
+ }
+
+ // Reset operand stack so we don't leak.
+ __ LoadRoot(ip, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(ip, FieldMemOperand(r3, JSGeneratorObject::kOperandStackOffset),
+ r0);
+
+ // Resume the generator function at the continuation.
+ __ LoadP(r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kCodeOffset));
+ __ AddP(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
+ {
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm);
+ __ LoadP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset));
+ __ SmiUntag(r4);
+ __ AddP(r5, r5, r4);
+ __ LoadSmiLiteral(r4,
+ Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
+ __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset),
+ r0);
+ __ LoadRR(r2, r3); // Continuation expects generator object in r2.
+ __ Jump(r5);
+ }
+ }
+
+ __ bind(&prepare_step_in_if_stepping);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r3, r4, r6);
+ __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ Pop(r3, r4);
+ __ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
+ }
+ __ b(&stepping_prepared);
+
+ __ bind(&prepare_step_in_suspended_generator);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r3, r4);
+ __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+ __ Pop(r3, r4);
+ __ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
+ }
+ __ b(&stepping_prepared);
+}
+
void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ push(r3);
@@ -818,6 +1024,21 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
+ Register args_count = scratch;
+
+ // Get the arguments + receiver count.
+ __ LoadP(args_count,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ LoadlW(args_count,
+ FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
+
+ // Leave the frame (also dropping the register file).
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+
+ __ AddP(sp, sp, args_count);
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right. The actual argument count matches the formal parameter
@@ -835,14 +1056,16 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
// The function builds an interpreter frame. See InterpreterFrameConstants in
// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(r3);
- // Get the bytecode array from the function object and load the pointer to the
- // first entry into kInterpreterBytecodeRegister.
+ // Get the bytecode array from the function object (or from the DebugInfo if
+ // it is present) and load it into kInterpreterBytecodeArrayRegister.
__ LoadP(r2, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
Label array_done;
Register debug_info = r4;
@@ -855,11 +1078,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ CmpSmiLiteral(debug_info, DebugInfo::uninitialized(), r0);
__ beq(&array_done);
__ LoadP(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex));
+ FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
__ bind(&array_done);
+ // Check whether we should continue to use the interpreter.
+ Label switch_to_different_code_kind;
+ __ LoadP(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
+ __ CmpP(r2, Operand(masm->CodeObject())); // Self-reference to this code.
+ __ bne(&switch_to_different_code_kind);
+
+ // Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
- // Check function data field is actually a BytecodeArray object.
__ TestIfSmi(kInterpreterBytecodeArrayRegister);
__ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r2, no_reg,
@@ -867,9 +1096,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
- // Push new.target, bytecode array and zero for bytecode array offset.
- __ LoadImmP(r2, Operand::Zero());
- __ Push(r5, kInterpreterBytecodeArrayRegister, r2);
+ // Load the initial bytecode offset.
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+ // Push new.target, bytecode array and Smi tagged bytecode array offset.
+ __ SmiTag(r4, kInterpreterBytecodeOffsetRegister);
+ __ Push(r5, kInterpreterBytecodeArrayRegister, r4);
// Allocate the local and temporary register file on the stack.
{
@@ -901,18 +1134,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&no_args);
}
- // TODO(rmcilroy): List of things not currently dealt with here but done in
- // fullcodegen's prologue:
- // - Call ProfileEntryHookStub when isolate has a function_entry_hook.
- // - Code aging of the BytecodeArray object.
-
- // Load accumulator, register file, bytecode offset, dispatch table into
- // registers.
+ // Load accumulator and dispatch table into registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
- __ AddP(kInterpreterRegisterFileRegister, fp,
- Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
- __ mov(kInterpreterBytecodeOffsetRegister,
- Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
@@ -922,31 +1145,49 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister));
__ ShiftLeftP(ip, r3, Operand(kPointerSizeLog2));
__ LoadP(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
- // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
- // and header removal.
- __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(ip);
- // Even though the first bytecode handler was called, we will never return.
- __ Abort(kUnexpectedReturnFromBytecodeHandler);
+ masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
+
+ // The return value is in r2.
+ LeaveInterpreterFrame(masm, r4);
+ __ Ret();
+
+ // If the shared code is no longer this entry trampoline, then the underlying
+ // function has been switched to a different kind of code and we heal the
+ // closure by switching the code entry field over to the new code as well.
+ __ bind(&switch_to_different_code_kind);
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kCodeOffset));
+ __ AddP(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ StoreP(r6, FieldMemOperand(r3, JSFunction::kCodeEntryOffset), r0);
+ __ RecordWriteCodeEntryField(r3, r6, r7);
+ __ JumpToJSEntry(r6);
}
-void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
- // TODO(rmcilroy): List of things not currently dealt with here but done in
- // fullcodegen's EmitReturnSequence.
- // - Supporting FLAG_trace for Runtime::TraceExit.
- // - Support profiler (specifically decrementing profiling_counter
- // appropriately and calling out to HandleInterrupts if necessary).
+void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
+ // Save the function and context for call to CompileBaseline.
+ __ LoadP(r3, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ LoadP(kContextRegister,
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
- // The return value is in accumulator, which is already in r2.
+ // Leave the frame before recompiling for baseline so that we don't count as
+ // an activation on the stack.
+ LeaveInterpreterFrame(masm, r4);
- // Leave the frame (also dropping the register file).
- __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ // Push return value.
+ __ push(r2);
+
+ // Push function as argument and compile for baseline.
+ __ push(r3);
+ __ CallRuntime(Runtime::kCompileBaseline);
- // Drop receiver + arguments and return.
- __ LoadlW(r0, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kParameterSizeOffset));
- __ AddP(sp, sp, r0);
+ // Restore return value.
+ __ pop(r2);
+ }
__ Ret();
}
@@ -965,7 +1206,8 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register index,
// static
void Builtins::Generate_InterpreterPushArgsAndCallImpl(
- MacroAssembler* masm, TailCallMode tail_call_mode) {
+ MacroAssembler* masm, TailCallMode tail_call_mode,
+ CallableType function_type) {
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r4 : the address of the first argument to be pushed. Subsequent
@@ -981,9 +1223,16 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
Generate_InterpreterPushArgs(masm, r4, r5, r6);
// Call the target.
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- tail_call_mode),
- RelocInfo::CODE_TARGET);
+ if (function_type == CallableType::kJSFunction) {
+ __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
+ tail_call_mode),
+ RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK_EQ(function_type, CallableType::kAny);
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ tail_call_mode),
+ RelocInfo::CODE_TARGET);
+ }
}
// static
@@ -1010,24 +1259,24 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
-static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
- // Initialize register file register and dispatch table register.
- __ AddP(kInterpreterRegisterFileRegister, fp,
- Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ // Set the return address to the correct point in the interpreter entry
+ // trampoline.
+ Smi* interpreter_entry_return_pc_offset(
+ masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+ __ Move(r4, masm->isolate()->builtins()->InterpreterEntryTrampoline());
+ __ AddP(r14, r4, Operand(interpreter_entry_return_pc_offset->value() +
+ Code::kHeaderSize - kHeapObjectTag));
+
+ // Initialize the dispatch table register.
__ mov(kInterpreterDispatchTableRegister,
Operand(ExternalReference::interpreter_dispatch_table_address(
masm->isolate())));
- // Get the context from the frame.
- __ LoadP(kContextRegister,
- MemOperand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kContextFromRegisterPointer));
-
// Get the bytecode array pointer from the frame.
- __ LoadP(
- kInterpreterBytecodeArrayRegister,
- MemOperand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
+ __ LoadP(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -1040,9 +1289,7 @@ static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
// Get the target bytecode offset from the frame.
__ LoadP(kInterpreterBytecodeOffsetRegister,
- MemOperand(
- kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Dispatch to the target bytecode.
@@ -1050,66 +1297,226 @@ static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister));
__ ShiftLeftP(ip, r3, Operand(kPointerSizeLog2));
__ LoadP(ip, MemOperand(kInterpreterDispatchTableRegister, ip));
- __ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(ip);
}
-static void Generate_InterpreterNotifyDeoptimizedHelper(
- MacroAssembler* masm, Deoptimizer::BailoutType type) {
- // Enter an internal frame.
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-
- // Pass the deoptimization type to the runtime system.
- __ LoadSmiLiteral(r3, Smi::FromInt(static_cast<int>(type)));
- __ Push(r3);
- __ CallRuntime(Runtime::kNotifyDeoptimized);
- // Tear down internal frame.
- }
-
- // Drop state (we don't use these for interpreter deopts) and and pop the
- // accumulator value into the accumulator register.
- __ Drop(1);
- __ Pop(kInterpreterAccumulatorRegister);
-
- // Enter the bytecode dispatch.
- Generate_EnterBytecodeDispatch(masm);
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : argument count (preserved for callee)
+ // -- r5 : new target (preserved for callee)
+ // -- r3 : target function (preserved for callee)
+ // -----------------------------------
+ // First lookup code, maybe we don't need to compile!
+ Label gotta_call_runtime;
+ Label maybe_call_runtime;
+ Label try_shared;
+ Label loop_top, loop_bottom;
+
+ Register closure = r3;
+ Register map = r8;
+ Register index = r4;
+ __ LoadP(map,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(map,
+ FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+ __ LoadP(index, FieldMemOperand(map, FixedArray::kLengthOffset));
+ __ CmpSmiLiteral(index, Smi::FromInt(2), r0);
+ __ blt(&gotta_call_runtime);
+
+ // Find literals.
+ // r9 : native context
+ // r4 : length / index
+ // r8 : optimized code map
+ // r5 : new target
+ // r3 : closure
+ Register native_context = r9;
+ __ LoadP(native_context, NativeContextMemOperand());
+
+ __ bind(&loop_top);
+ Register temp = r1;
+ Register array_pointer = r7;
+
+ // Does the native context match?
+ __ SmiToPtrArrayOffset(array_pointer, index);
+ __ AddP(array_pointer, map, array_pointer);
+ __ LoadP(temp, FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousContext));
+ __ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+ __ CmpP(temp, native_context);
+ __ bne(&loop_bottom, Label::kNear);
+ // OSR id set to none?
+ __ LoadP(temp,
+ FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+ const int bailout_id = BailoutId::None().ToInt();
+ __ CmpSmiLiteral(temp, Smi::FromInt(bailout_id), r0);
+ __ bne(&loop_bottom, Label::kNear);
+ // Literals available?
+ __ LoadP(temp,
+ FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousLiterals));
+ __ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
+ __ JumpIfSmi(temp, &gotta_call_runtime);
+
+ // Save the literals in the closure.
+ __ StoreP(temp, FieldMemOperand(closure, JSFunction::kLiteralsOffset), r0);
+ __ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, r6,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ // Code available?
+ Register entry = r6;
+ __ LoadP(entry,
+ FieldMemOperand(array_pointer,
+ SharedFunctionInfo::kOffsetToPreviousCachedCode));
+ __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(entry, &maybe_call_runtime);
+
+ // Found literals and code. Get them into the closure and return.
+ // Store code entry in the closure.
+ __ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ Label install_optimized_code_and_tailcall;
+ __ bind(&install_optimized_code_and_tailcall);
+ __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
+ __ RecordWriteCodeEntryField(closure, entry, r7);
+
+ // Link the closure into the optimized function list.
+ // r6 : code entry
+ // r9: native context
+ // r3 : closure
+ __ LoadP(
+ r7, ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ __ StoreP(r7, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset),
+ r0);
+ __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r7, temp,
+ kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ const int function_list_offset =
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+ __ StoreP(
+ closure,
+ ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0);
+ // Save closure before the write barrier.
+ __ LoadRR(r7, closure);
+ __ RecordWriteContextSlot(native_context, function_list_offset, r7, temp,
+ kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ JumpToJSEntry(entry);
+
+ __ bind(&loop_bottom);
+ __ SubSmiLiteral(index, index, Smi::FromInt(SharedFunctionInfo::kEntryLength),
+ r0);
+ __ CmpSmiLiteral(index, Smi::FromInt(1), r0);
+ __ bgt(&loop_top);
+
+ // We found neither literals nor code.
+ __ b(&gotta_call_runtime);
+
+ __ bind(&maybe_call_runtime);
+
+ // Last possibility. Check the context free optimized code map entry.
+ __ LoadP(entry,
+ FieldMemOperand(map, FixedArray::kHeaderSize +
+ SharedFunctionInfo::kSharedCodeIndex));
+ __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(entry, &try_shared);
+
+ // Store code entry in the closure.
+ __ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ b(&install_optimized_code_and_tailcall);
+
+ __ bind(&try_shared);
+ // Is the full code valid?
+ __ LoadP(entry,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+ __ LoadlW(r7, FieldMemOperand(entry, Code::kFlagsOffset));
+ __ DecodeField<Code::KindField>(r7);
+ __ CmpP(r7, Operand(Code::BUILTIN));
+ __ beq(&gotta_call_runtime);
+ // Yes, install the full code.
+ __ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
+ __ RecordWriteCodeEntryField(closure, entry, r7);
+ __ JumpToJSEntry(entry);
+
+ __ bind(&gotta_call_runtime);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
-void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
}
-void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+ GenerateTailCallToReturnedCode(masm,
+ Runtime::kCompileOptimized_NotConcurrent);
}
-void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
- // Set the address of the interpreter entry trampoline as a return address.
- // This simulates the initial call to bytecode handlers in interpreter entry
- // trampoline. The return will never actually be taken, but our stack walker
- // uses this address to determine whether a frame is interpreted.
- __ mov(r14,
- Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
+void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : argument count (preserved for callee)
+ // -- r3 : new target (preserved for callee)
+ // -- r5 : target function (preserved for callee)
+ // -----------------------------------
+ Label failed;
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Preserve argument count for later compare.
+ __ Move(r6, r2);
+ // Push a copy of the target function and the new target.
+ __ SmiTag(r2);
+ // Push another copy as a parameter to the runtime call.
+ __ Push(r2, r3, r5, r3);
- Generate_EnterBytecodeDispatch(masm);
-}
+ // Copy arguments from caller (stdlib, foreign, heap).
+ Label args_done;
+ for (int j = 0; j < 4; ++j) {
+ Label over;
+ if (j < 3) {
+ __ CmpP(r6, Operand(j));
+ __ b(ne, &over);
+ }
+ for (int i = j - 1; i >= 0; --i) {
+ __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
+ i * kPointerSize));
+ __ push(r6);
+ }
+ for (int i = 0; i < 3 - j; ++i) {
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ }
+ if (j < 3) {
+ __ jmp(&args_done);
+ __ bind(&over);
+ }
+ }
+ __ bind(&args_done);
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
-}
+ // Call runtime, on success unwind frame, and parent frame.
+ __ CallRuntime(Runtime::kInstantiateAsmJs, 4);
+ // A smi 0 is returned on failure, an object on success.
+ __ JumpIfSmi(r2, &failed);
-void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm,
- Runtime::kCompileOptimized_NotConcurrent);
-}
+ __ Drop(2);
+ __ pop(r6);
+ __ SmiUntag(r6);
+ scope.GenerateLeaveFrame();
-void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
+ __ AddP(r6, r6, Operand(1));
+ __ Drop(r6);
+ __ Ret();
+
+ __ bind(&failed);
+ // Restore target function and new target.
+ __ Pop(r2, r3, r5);
+ __ SmiUntag(r2);
+ }
+ // On failure, tail call back to regular js.
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
@@ -1242,14 +1649,19 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
__ SmiUntag(r8);
// Switch on the state.
Label with_tos_register, unknown_state;
- __ CmpP(r8, Operand(FullCodeGenerator::NO_REGISTERS));
+ __ CmpP(
+ r8,
+ Operand(static_cast<intptr_t>(Deoptimizer::BailoutState::NO_REGISTERS)));
__ bne(&with_tos_register);
__ la(sp, MemOperand(sp, 1 * kPointerSize)); // Remove state.
__ Ret();
__ bind(&with_tos_register);
+ DCHECK_EQ(kInterpreterAccumulatorRegister.code(), r2.code());
__ LoadP(r2, MemOperand(sp, 1 * kPointerSize));
- __ CmpP(r8, Operand(FullCodeGenerator::TOS_REG));
+ __ CmpP(
+ r8,
+ Operand(static_cast<intptr_t>(Deoptimizer::BailoutState::TOS_REGISTER)));
__ bne(&unknown_state);
__ la(sp, MemOperand(sp, 2 * kPointerSize)); // Remove state.
__ Ret();
@@ -1371,9 +1783,16 @@ void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kThrowIllegalInvocation);
}
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
+ bool has_handler_frame) {
// Lookup the function in the JavaScript frame.
- __ LoadP(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ if (has_handler_frame) {
+ __ LoadP(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(r2, MemOperand(r2, JavaScriptFrameConstants::kFunctionOffset));
+ } else {
+ __ LoadP(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
@@ -1381,7 +1800,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
}
- // If the code object is null, just return to the unoptimized code.
+ // If the code object is null, just return to the caller.
Label skip;
__ CmpSmiLiteral(r2, Smi::FromInt(0), r0);
__ bne(&skip);
@@ -1389,6 +1808,12 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ bind(&skip);
+ // Drop any potential handler frame that is be sitting on top of the actual
+ // JavaScript frame. This is the case then OSR is triggered from bytecode.
+ if (has_handler_frame) {
+ __ LeaveFrame(StackFrame::STUB);
+ }
+
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ LoadP(r3, FieldMemOperand(r2, Code::kDeoptimizationDataOffset));
@@ -1410,10 +1835,22 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ Ret();
}
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+ Generate_OnStackReplacementHelper(masm, false);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+ Generate_OnStackReplacementHelper(masm, true);
+}
+
// static
void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
int field_index) {
// ----------- S t a t e -------------
+ // -- r2 : number of arguments
+ // -- r3 : function
+ // -- cp : context
+
// -- lr : return address
// -- sp[0] : receiver
// -----------------------------------
@@ -1423,7 +1860,7 @@ void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
{
__ Pop(r2);
__ JumpIfSmi(r2, &receiver_not_date);
- __ CompareObjectType(r2, r3, r4, JS_DATE_TYPE);
+ __ CompareObjectType(r2, r4, r5, JS_DATE_TYPE);
__ bne(&receiver_not_date);
}
@@ -1453,29 +1890,13 @@ void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
// 3. Raise a TypeError if the receiver is not a date.
__ bind(&receiver_not_date);
- __ TailCallRuntime(Runtime::kThrowNotDateError);
-}
-
-// static
-void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : argc
- // -- sp[0] : first argument (left-hand side)
- // -- sp[4] : receiver (right-hand side)
- // -----------------------------------
-
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ LoadP(InstanceOfDescriptor::LeftRegister(),
- MemOperand(fp, 2 * kPointerSize)); // Load left-hand side.
- __ LoadP(InstanceOfDescriptor::RightRegister(),
- MemOperand(fp, 3 * kPointerSize)); // Load right-hand side.
- InstanceOfStub stub(masm->isolate(), true);
- __ CallStub(&stub);
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ push(r2);
+ __ LoadSmiLiteral(r2, Smi::FromInt(0));
+ __ EnterBuiltinFrame(cp, r3, r2);
+ __ CallRuntime(Runtime::kThrowNotDateError);
}
-
- // Pop the argument and the receiver.
- __ Ret(2);
}
// static
@@ -2076,8 +2497,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ SmiTag(r2);
__ Push(r2, r3);
__ LoadRR(r2, r5);
+ __ Push(cp);
ToObjectStub stub(masm->isolate());
__ CallStub(&stub);
+ __ Pop(cp);
__ LoadRR(r5, r2);
__ Pop(r2, r3);
__ SmiUntag(r2);
@@ -2416,6 +2839,58 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
+// static
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : requested object size (untagged)
+ // -- lr : return address
+ // -----------------------------------
+ __ SmiTag(r3);
+ __ Push(r3);
+ __ LoadSmiLiteral(cp, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAllocateInNewSpace);
+}
+
+// static
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : requested object size (untagged)
+ // -- lr : return address
+ // -----------------------------------
+ __ SmiTag(r3);
+ __ LoadSmiLiteral(r4, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+ __ Push(r3, r4);
+ __ LoadSmiLiteral(cp, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
+}
+
+// static
+void Builtins::Generate_Abort(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : message_id as Smi
+ // -- lr : return address
+ // -----------------------------------
+ __ push(r3);
+ __ LoadSmiLiteral(cp, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAbort);
+}
+
+// static
+void Builtins::Generate_ToNumber(MacroAssembler* masm) {
+ // The ToNumber stub takes one argument in r2.
+ STATIC_ASSERT(kSmiTag == 0);
+ __ TestIfSmi(r2);
+ __ Ret(eq);
+
+ __ CompareObjectType(r2, r3, r3, HEAP_NUMBER_TYPE);
+ // r2: receiver
+ // r3: receiver instance type
+ __ Ret(eq);
+
+ __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
+ RelocInfo::CODE_TARGET);
+}
+
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : actual number of arguments
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index 316378348c..153660407e 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -12,13 +12,10 @@
namespace v8 {
namespace internal {
-
#define __ ACCESS_MASM(masm)
-
-void Builtins::Generate_Adaptor(MacroAssembler* masm,
- CFunctionId id,
- BuiltinExtraArguments extra_args) {
+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
+ ExitFrameType exit_frame_type) {
// ----------- S t a t e -------------
// -- rax : number of arguments excluding receiver
// -- rdi : target
@@ -31,34 +28,33 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// -----------------------------------
__ AssertFunction(rdi);
+ // The logic contained here is mirrored for TurboFan inlining in
+ // JSTypedLowering::ReduceJSCall{Function,Construct}. Keep these in sync.
+
// Make sure we operate in the context of the called function (for example
// ConstructStubs implemented in C++ will be run in the context of the caller
// instead of the callee, due to the way that [[Construct]] is defined for
// ordinary functions).
__ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- // Insert extra arguments.
- int num_extra_args = 0;
- if (extra_args != BuiltinExtraArguments::kNone) {
- __ PopReturnAddressTo(kScratchRegister);
- if (extra_args & BuiltinExtraArguments::kTarget) {
- ++num_extra_args;
- __ Push(rdi);
- }
- if (extra_args & BuiltinExtraArguments::kNewTarget) {
- ++num_extra_args;
- __ Push(rdx);
- }
- __ PushReturnAddressFrom(kScratchRegister);
- }
-
// JumpToExternalReference expects rax to contain the number of arguments
// including the receiver and the extra arguments.
+ const int num_extra_args = 3;
__ addp(rax, Immediate(num_extra_args + 1));
- __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
-}
+ // Unconditionally insert argc, target and new target as extra arguments. They
+ // will be used by stack frame iterators when constructing the stack trace.
+ __ PopReturnAddressTo(kScratchRegister);
+ __ Integer32ToSmi(rax, rax);
+ __ Push(rax);
+ __ SmiToInteger32(rax, rax);
+ __ Push(rdi);
+ __ Push(rdx);
+ __ PushReturnAddressFrom(kScratchRegister);
+ __ JumpToExternalReference(ExternalReference(address, masm->isolate()),
+ exit_frame_type == BUILTIN_EXIT);
+}
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ movp(kScratchRegister,
@@ -100,7 +96,6 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ jmp(rbx);
}
-
void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
@@ -117,7 +112,6 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
GenerateTailCallToSharedCode(masm);
}
-
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool create_implicit_receiver,
@@ -185,16 +179,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ j(greater_equal, &loop);
// Call the function.
- if (is_api_function) {
- __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- __ Call(code, RelocInfo::CODE_TARGET);
- } else {
- ParameterCount actual(rax);
- __ InvokeFunction(rdi, rdx, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
- }
+ ParameterCount actual(rax);
+ __ InvokeFunction(rdi, rdx, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
// Store offset of return address for deoptimizer.
if (create_implicit_receiver && !is_api_function) {
@@ -259,38 +246,31 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ ret(0);
}
-
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true, false);
}
-
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, true, false, false);
}
-
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, false, false);
}
-
void Builtins::Generate_JSBuiltinsConstructStubForDerived(
MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, false, true);
}
-
void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(rdi);
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
-
enum IsTagged { kRaxIsSmiTagged, kRaxIsUntaggedInt };
-
// Clobbers rcx, r11, kScratchRegister; preserves all other registers.
static void Generate_CheckStackOverflow(MacroAssembler* masm,
IsTagged rax_is_tagged) {
@@ -324,7 +304,6 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm,
__ bind(&okay);
}
-
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
@@ -339,11 +318,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Open a C++ scope for the FrameScope.
{
- // Platform specific argument handling. After this, the stack contains
- // an internal frame and the pushed function and receiver, and
- // register rax and rbx holds the argument count and argument array,
- // while rdi holds the function pointer, rsi the context, and rdx the
- // new.target.
+// Platform specific argument handling. After this, the stack contains
+// an internal frame and the pushed function and receiver, and
+// register rax and rbx holds the argument count and argument array,
+// while rdi holds the function pointer, rsi the context, and rdx the
+// new.target.
#ifdef _WIN64
// MSVC parameters in:
@@ -374,7 +353,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ movp(rdi, rdx);
// Load the new.target into rdx.
__ movp(rdx, rcx);
-#else // _WIN64
+#else // _WIN64
// GCC parameters in:
// rdi : new_target
// rsi : function
@@ -454,16 +433,199 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ ret(1 * kPointerSize); // Remove receiver.
}
-
void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, false);
}
-
void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : the value to pass to the generator
+ // -- rbx : the JSGeneratorObject to resume
+ // -- rdx : the resume mode (tagged)
+ // -- rsp[0] : return address
+ // -----------------------------------
+ __ AssertGeneratorObject(rbx);
+
+ // Store input value into generator object.
+ __ movp(FieldOperand(rbx, JSGeneratorObject::kInputOrDebugPosOffset), rax);
+ __ RecordWriteField(rbx, JSGeneratorObject::kInputOrDebugPosOffset, rax, rcx,
+ kDontSaveFPRegs);
+
+ // Store resume mode into generator object.
+ __ movp(FieldOperand(rbx, JSGeneratorObject::kResumeModeOffset), rdx);
+
+ // Load suspended function and context.
+ __ movp(rsi, FieldOperand(rbx, JSGeneratorObject::kContextOffset));
+ __ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
+
+ // Flood function if we are stepping.
+ Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+ Label stepping_prepared;
+ ExternalReference last_step_action =
+ ExternalReference::debug_last_step_action_address(masm->isolate());
+ Operand last_step_action_operand = masm->ExternalOperand(last_step_action);
+ STATIC_ASSERT(StepFrame > StepIn);
+ __ cmpb(last_step_action_operand, Immediate(StepIn));
+ __ j(greater_equal, &prepare_step_in_if_stepping);
+
+ // Flood function if we need to continue stepping in the suspended generator.
+ ExternalReference debug_suspended_generator =
+ ExternalReference::debug_suspended_generator_address(masm->isolate());
+ Operand debug_suspended_generator_operand =
+ masm->ExternalOperand(debug_suspended_generator);
+ __ cmpp(rbx, debug_suspended_generator_operand);
+ __ j(equal, &prepare_step_in_suspended_generator);
+ __ bind(&stepping_prepared);
+
+ // Pop return address.
+ __ PopReturnAddressTo(rax);
+
+ // Push receiver.
+ __ Push(FieldOperand(rbx, JSGeneratorObject::kReceiverOffset));
+
+ // ----------- S t a t e -------------
+ // -- rax : return address
+ // -- rbx : the JSGeneratorObject to resume
+ // -- rdx : the resume mode (tagged)
+ // -- rdi : generator function
+ // -- rsi : generator context
+ // -- rsp[0] : generator receiver
+ // -----------------------------------
+
+ // Push holes for arguments to generator function. Since the parser forced
+ // context allocation for any variables in generators, the actual argument
+ // values have already been copied into the context and these dummy values
+ // will never be used.
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadSharedFunctionInfoSpecialField(
+ rcx, rcx, SharedFunctionInfo::kFormalParameterCountOffset);
+ {
+ Label done_loop, loop;
+ __ bind(&loop);
+ __ subl(rcx, Immediate(1));
+ __ j(carry, &done_loop, Label::kNear);
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ jmp(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Dispatch on the kind of generator object.
+ Label old_generator;
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset));
+ __ CmpObjectType(rcx, BYTECODE_ARRAY_TYPE, rcx);
+ __ j(not_equal, &old_generator);
+
+ // New-style (ignition/turbofan) generator object.
+ {
+ __ PushReturnAddressFrom(rax);
+ __ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadSharedFunctionInfoSpecialField(
+ rax, rax, SharedFunctionInfo::kFormalParameterCountOffset);
+ // We abuse new.target both to indicate that this is a resume call and to
+ // pass in the generator object. In ordinary calls, new.target is always
+ // undefined because generator functions are non-constructable.
+ __ movp(rdx, rbx);
+ __ jmp(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ }
+
+ // Old-style (full-codegen) generator object.
+ __ bind(&old_generator);
+ {
+ // Enter a new JavaScript frame, and initialize its slots as they were when
+ // the generator was suspended.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PushReturnAddressFrom(rax); // Return address.
+ __ Push(rbp); // Caller's frame pointer.
+ __ Move(rbp, rsp);
+ __ Push(rsi); // Callee's context.
+ __ Push(rdi); // Callee's JS Function.
+
+ // Restore the operand stack.
+ __ movp(rsi, FieldOperand(rbx, JSGeneratorObject::kOperandStackOffset));
+ __ SmiToInteger32(rax, FieldOperand(rsi, FixedArray::kLengthOffset));
+ {
+ Label done_loop, loop;
+ __ Set(rcx, 0);
+ __ bind(&loop);
+ __ cmpl(rcx, rax);
+ __ j(equal, &done_loop, Label::kNear);
+ __ Push(
+ FieldOperand(rsi, rcx, times_pointer_size, FixedArray::kHeaderSize));
+ __ addl(rcx, Immediate(1));
+ __ jmp(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Reset operand stack so we don't leak.
+ __ LoadRoot(FieldOperand(rbx, JSGeneratorObject::kOperandStackOffset),
+ Heap::kEmptyFixedArrayRootIndex);
+
+ // Restore context.
+ __ movp(rsi, FieldOperand(rbx, JSGeneratorObject::kContextOffset));
+
+ // Resume the generator function at the continuation.
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
+ __ SmiToInteger64(
+ rcx, FieldOperand(rbx, JSGeneratorObject::kContinuationOffset));
+ __ leap(rdx, FieldOperand(rdx, rcx, times_1, Code::kHeaderSize));
+ __ Move(FieldOperand(rbx, JSGeneratorObject::kContinuationOffset),
+ Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
+ __ movp(rax, rbx); // Continuation expects generator object in rax.
+ __ jmp(rdx);
+ }
+
+ __ bind(&prepare_step_in_if_stepping);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(rbx);
+ __ Push(rdx);
+ __ Push(rdi);
+ __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ Pop(rdx);
+ __ Pop(rbx);
+ __ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
+ }
+ __ jmp(&stepping_prepared);
+
+ __ bind(&prepare_step_in_suspended_generator);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(rbx);
+ __ Push(rdx);
+ __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+ __ Pop(rdx);
+ __ Pop(rbx);
+ __ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
+ }
+ __ jmp(&stepping_prepared);
+}
+
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
+ Register scratch2) {
+ Register args_count = scratch1;
+ Register return_pc = scratch2;
+
+ // Get the arguments + receiver count.
+ __ movp(args_count,
+ Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ movl(args_count,
+ FieldOperand(args_count, BytecodeArray::kParameterSizeOffset));
+
+ // Leave the frame (also dropping the register file).
+ __ leave();
+
+ // Drop receiver + arguments.
+ __ PopReturnAddressTo(return_pc);
+ __ addp(rsp, args_count);
+ __ PushReturnAddressFrom(return_pc);
+}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
@@ -480,6 +642,8 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
// The function builds an interpreter frame. See InterpreterFrameConstants in
// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
@@ -490,10 +654,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(rdi); // Callee's JS function.
__ Push(rdx); // Callee's new target.
- // Get the bytecode array from the function object and load the pointer to the
- // first entry into edi (InterpreterBytecodeRegister).
+ // Get the bytecode array from the function object (or from the DebugInfo if
+ // it is present) and load it into kInterpreterBytecodeArrayRegister.
__ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
-
Label load_debug_bytecode_array, bytecode_array_loaded;
DCHECK_EQ(Smi::FromInt(0), DebugInfo::uninitialized());
__ cmpp(FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset),
@@ -503,18 +666,28 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FieldOperand(rax, SharedFunctionInfo::kFunctionDataOffset));
__ bind(&bytecode_array_loaded);
+ // Check whether we should continue to use the interpreter.
+ Label switch_to_different_code_kind;
+ __ Move(rcx, masm->CodeObject()); // Self-reference to this code.
+ __ cmpp(rcx, FieldOperand(rax, SharedFunctionInfo::kCodeOffset));
+ __ j(not_equal, &switch_to_different_code_kind);
+
+ // Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
- // Check function data field is actually a BytecodeArray object.
__ AssertNotSmi(kInterpreterBytecodeArrayRegister);
__ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
rax);
__ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
- // Push bytecode array.
+ // Load initial bytecode offset.
+ __ movp(kInterpreterBytecodeOffsetRegister,
+ Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+ // Push bytecode array and Smi tagged bytecode offset.
__ Push(kInterpreterBytecodeArrayRegister);
- // Push zero for bytecode array offset.
- __ Push(Immediate(0));
+ __ Integer32ToSmi(rcx, kInterpreterBytecodeOffsetRegister);
+ __ Push(rcx);
// Allocate the local and temporary register file on the stack.
{
@@ -545,19 +718,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ j(greater_equal, &loop_header, Label::kNear);
}
- // TODO(rmcilroy): List of things not currently dealt with here but done in
- // fullcodegen's prologue:
- // - Call ProfileEntryHookStub when isolate has a function_entry_hook.
- // - Code aging of the BytecodeArray object.
-
- // Load accumulator, register file, bytecode offset, dispatch table into
- // registers.
+ // Load accumulator and dispatch table into registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
- __ movp(kInterpreterRegisterFileRegister, rbp);
- __ addp(kInterpreterRegisterFileRegister,
- Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
- __ movp(kInterpreterBytecodeOffsetRegister,
- Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ Move(
kInterpreterDispatchTableRegister,
ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
@@ -567,46 +729,59 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ movp(rbx, Operand(kInterpreterDispatchTableRegister, rbx,
times_pointer_size, 0));
- // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
- // and header removal.
- __ addp(rbx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(rbx);
+ masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
- // Even though the first bytecode handler was called, we will never return.
- __ Abort(kUnexpectedReturnFromBytecodeHandler);
+ // The return value is in rax.
+ LeaveInterpreterFrame(masm, rbx, rcx);
+ __ ret(0);
// Load debug copy of the bytecode array.
__ bind(&load_debug_bytecode_array);
Register debug_info = kInterpreterBytecodeArrayRegister;
__ movp(debug_info, FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset));
__ movp(kInterpreterBytecodeArrayRegister,
- FieldOperand(debug_info, DebugInfo::kAbstractCodeIndex));
+ FieldOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
__ jmp(&bytecode_array_loaded);
+
+ // If the shared code is no longer this entry trampoline, then the underlying
+ // function has been switched to a different kind of code and we heal the
+ // closure by switching the code entry field over to the new code as well.
+ __ bind(&switch_to_different_code_kind);
+ __ leave(); // Leave the frame so we can tail call.
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kCodeOffset));
+ __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
+ __ movp(FieldOperand(rdi, JSFunction::kCodeEntryOffset), rcx);
+ __ RecordWriteCodeEntryField(rdi, rcx, r15);
+ __ jmp(rcx);
}
+void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
+ // Save the function and context for call to CompileBaseline.
+ __ movp(rdi, Operand(rbp, StandardFrameConstants::kFunctionOffset));
+ __ movp(kContextRegister,
+ Operand(rbp, StandardFrameConstants::kContextOffset));
-void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
- // TODO(rmcilroy): List of things not currently dealt with here but done in
- // fullcodegen's EmitReturnSequence.
- // - Supporting FLAG_trace for Runtime::TraceExit.
- // - Support profiler (specifically decrementing profiling_counter
- // appropriately and calling out to HandleInterrupts if necessary).
+ // Leave the frame before recompiling for baseline so that we don't count as
+ // an activation on the stack.
+ LeaveInterpreterFrame(masm, rbx, rcx);
- // The return value is in accumulator, which is already in rax.
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ // Push return value.
+ __ Push(rax);
- // Leave the frame (also dropping the register file).
- __ leave();
+ // Push function as argument and compile for baseline.
+ __ Push(rdi);
+ __ CallRuntime(Runtime::kCompileBaseline);
- // Drop receiver + arguments and return.
- __ movl(rbx, FieldOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kParameterSizeOffset));
- __ PopReturnAddressTo(rcx);
- __ addp(rsp, rbx);
- __ PushReturnAddressFrom(rcx);
+ // Restore return value.
+ __ Pop(rax);
+ }
__ ret(0);
}
-
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
bool push_receiver) {
// ----------- S t a t e -------------
@@ -637,10 +812,10 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
__ j(greater, &loop_header, Label::kNear);
}
-
// static
void Builtins::Generate_InterpreterPushArgsAndCallImpl(
- MacroAssembler* masm, TailCallMode tail_call_mode) {
+ MacroAssembler* masm, TailCallMode tail_call_mode,
+ CallableType function_type) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
// -- rbx : the address of the first argument to be pushed. Subsequent
@@ -656,11 +831,18 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
// Call the target.
__ PushReturnAddressFrom(kScratchRegister); // Re-push return address.
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- tail_call_mode),
- RelocInfo::CODE_TARGET);
-}
+ if (function_type == CallableType::kJSFunction) {
+ __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
+ tail_call_mode),
+ RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK_EQ(function_type, CallableType::kAny);
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ tail_call_mode),
+ RelocInfo::CODE_TARGET);
+ }
+}
// static
void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
@@ -689,26 +871,25 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ // Set the return address to the correct point in the interpreter entry
+ // trampoline.
+ Smi* interpreter_entry_return_pc_offset(
+ masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+ __ Move(rbx, masm->isolate()->builtins()->InterpreterEntryTrampoline());
+ __ addp(rbx, Immediate(interpreter_entry_return_pc_offset->value() +
+ Code::kHeaderSize - kHeapObjectTag));
+ __ Push(rbx);
-static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
- // Initialize register file register and dispatch table register.
- __ movp(kInterpreterRegisterFileRegister, rbp);
- __ addp(kInterpreterRegisterFileRegister,
- Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+ // Initialize dispatch table register.
__ Move(
kInterpreterDispatchTableRegister,
ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
- // Get the context from the frame.
- __ movp(kContextRegister,
- Operand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kContextFromRegisterPointer));
-
// Get the bytecode array pointer from the frame.
- __ movp(
- kInterpreterBytecodeArrayRegister,
- Operand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
+ __ movp(kInterpreterBytecodeArrayRegister,
+ Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -719,10 +900,8 @@ static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
}
// Get the target bytecode offset from the frame.
- __ movp(
- kInterpreterBytecodeOffsetRegister,
- Operand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ __ movp(kInterpreterBytecodeOffsetRegister,
+ Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiToInteger32(kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeOffsetRegister);
@@ -731,77 +910,222 @@ static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister, times_1, 0));
__ movp(rbx, Operand(kInterpreterDispatchTableRegister, rbx,
times_pointer_size, 0));
- __ addp(rbx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(rbx);
}
-
-static void Generate_InterpreterNotifyDeoptimizedHelper(
- MacroAssembler* masm, Deoptimizer::BailoutType type) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Pass the deoptimization type to the runtime system.
- __ Push(Smi::FromInt(static_cast<int>(type)));
- __ CallRuntime(Runtime::kNotifyDeoptimized);
- // Tear down internal frame.
- }
-
- // Drop state (we don't use these for interpreter deopts) and and pop the
- // accumulator value into the accumulator register and push PC at top
- // of stack (to simulate initial call to bytecode handler in interpreter entry
- // trampoline).
- __ Pop(rbx);
- __ Drop(1);
- __ Pop(kInterpreterAccumulatorRegister);
- __ Push(rbx);
-
- // Enter the bytecode dispatch.
- Generate_EnterBytecodeDispatch(masm);
-}
-
-
-void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
-}
-
-
-void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
-}
-
-
-void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
-}
-
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
- // Set the address of the interpreter entry trampoline as a return address.
- // This simulates the initial call to bytecode handlers in interpreter entry
- // trampoline. The return will never actually be taken, but our stack walker
- // uses this address to determine whether a frame is interpreted.
- __ Push(masm->isolate()->builtins()->InterpreterEntryTrampoline());
-
- Generate_EnterBytecodeDispatch(masm);
-}
-
-
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : argument count (preserved for callee)
+ // -- rdx : new target (preserved for callee)
+ // -- rdi : target function (preserved for callee)
+ // -----------------------------------
+ // First lookup code, maybe we don't need to compile!
+ Label gotta_call_runtime;
+ Label maybe_call_runtime;
+ Label try_shared;
+ Label loop_top, loop_bottom;
+
+ Register closure = rdi;
+ Register map = r8;
+ Register index = r9;
+ __ movp(map, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(map, FieldOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+ __ SmiToInteger32(index, FieldOperand(map, FixedArray::kLengthOffset));
+ __ cmpl(index, Immediate(2));
+ __ j(less, &gotta_call_runtime);
+
+ // Find literals.
+ // r14 : native context
+ // r9 : length / index
+ // r8 : optimized code map
+ // rdx : new target
+ // rdi : closure
+ Register native_context = r14;
+ __ movp(native_context, NativeContextOperand());
+
+ __ bind(&loop_top);
+ // Native context match?
+ Register temp = r11;
+ __ movp(temp, FieldOperand(map, index, times_pointer_size,
+ SharedFunctionInfo::kOffsetToPreviousContext));
+ __ movp(temp, FieldOperand(temp, WeakCell::kValueOffset));
+ __ cmpp(temp, native_context);
+ __ j(not_equal, &loop_bottom);
+ // OSR id set to none?
+ __ movp(temp, FieldOperand(map, index, times_pointer_size,
+ SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+ __ SmiToInteger32(temp, temp);
+ const int bailout_id = BailoutId::None().ToInt();
+ __ cmpl(temp, Immediate(bailout_id));
+ __ j(not_equal, &loop_bottom);
+ // Literals available?
+ __ movp(temp, FieldOperand(map, index, times_pointer_size,
+ SharedFunctionInfo::kOffsetToPreviousLiterals));
+ __ movp(temp, FieldOperand(temp, WeakCell::kValueOffset));
+ __ JumpIfSmi(temp, &gotta_call_runtime);
+
+ // Save the literals in the closure.
+ __ movp(FieldOperand(closure, JSFunction::kLiteralsOffset), temp);
+ __ movp(r15, index);
+ __ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, r15,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+ // Code available?
+ Register entry = rcx;
+ __ movp(entry, FieldOperand(map, index, times_pointer_size,
+ SharedFunctionInfo::kOffsetToPreviousCachedCode));
+ __ movp(entry, FieldOperand(entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(entry, &maybe_call_runtime);
+
+ // Found literals and code. Get them into the closure and return.
+ __ leap(entry, FieldOperand(entry, Code::kHeaderSize));
+
+ Label install_optimized_code_and_tailcall;
+ __ bind(&install_optimized_code_and_tailcall);
+ __ movp(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
+ __ RecordWriteCodeEntryField(closure, entry, r15);
+
+ // Link the closure into the optimized function list.
+ // rcx : code entry (entry)
+ // r14 : native context
+ // rdx : new target
+ // rdi : closure
+ __ movp(rbx,
+ ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ __ movp(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), rbx);
+ __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, rbx, r15,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ const int function_list_offset =
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+ __ movp(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST),
+ closure);
+ // Save closure before the write barrier.
+ __ movp(rbx, closure);
+ __ RecordWriteContextSlot(native_context, function_list_offset, closure, r15,
+ kDontSaveFPRegs);
+ __ movp(closure, rbx);
+ __ jmp(entry);
+
+ __ bind(&loop_bottom);
+ __ subl(index, Immediate(SharedFunctionInfo::kEntryLength));
+ __ cmpl(index, Immediate(1));
+ __ j(greater, &loop_top);
+
+ // We found neither literals nor code.
+ __ jmp(&gotta_call_runtime);
+
+ __ bind(&maybe_call_runtime);
+
+ // Last possibility. Check the context free optimized code map entry.
+ __ movp(entry, FieldOperand(map, FixedArray::kHeaderSize +
+ SharedFunctionInfo::kSharedCodeIndex));
+ __ movp(entry, FieldOperand(entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(entry, &try_shared);
+
+ // Store code entry in the closure.
+ __ leap(entry, FieldOperand(entry, Code::kHeaderSize));
+ __ jmp(&install_optimized_code_and_tailcall);
+
+ __ bind(&try_shared);
+ // Is the full code valid?
+ __ movp(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
+ __ movl(rbx, FieldOperand(entry, Code::kFlagsOffset));
+ __ andl(rbx, Immediate(Code::KindField::kMask));
+ __ shrl(rbx, Immediate(Code::KindField::kShift));
+ __ cmpl(rbx, Immediate(Code::BUILTIN));
+ __ j(equal, &gotta_call_runtime);
+ // Yes, install the full code.
+ __ leap(entry, FieldOperand(entry, Code::kHeaderSize));
+ __ movp(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
+ __ RecordWriteCodeEntryField(closure, entry, r15);
+ __ jmp(entry);
+
+ __ bind(&gotta_call_runtime);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
+}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm,
Runtime::kCompileOptimized_NotConcurrent);
}
-
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
+void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : argument count (preserved for callee)
+ // -- rdx : new target (preserved for callee)
+ // -- rdi : target function (preserved for callee)
+ // -----------------------------------
+ Label failed;
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Preserve argument count for later compare.
+ __ movp(kScratchRegister, rax);
+ // Push the number of arguments to the callee.
+ __ Integer32ToSmi(rax, rax);
+ __ Push(rax);
+ // Push a copy of the target function and the new target.
+ __ Push(rdi);
+ __ Push(rdx);
+
+ // The function.
+ __ Push(rdi);
+ // Copy arguments from caller (stdlib, foreign, heap).
+ Label args_done;
+ for (int j = 0; j < 4; ++j) {
+ Label over;
+ if (j < 3) {
+ __ cmpp(kScratchRegister, Immediate(j));
+ __ j(not_equal, &over, Label::kNear);
+ }
+ for (int i = j - 1; i >= 0; --i) {
+ __ Push(Operand(
+ rbp, StandardFrameConstants::kCallerSPOffset + i * kPointerSize));
+ }
+ for (int i = 0; i < 3 - j; ++i) {
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ }
+ if (j < 3) {
+ __ jmp(&args_done, Label::kNear);
+ __ bind(&over);
+ }
+ }
+ __ bind(&args_done);
+
+ // Call runtime, on success unwind frame, and parent frame.
+ __ CallRuntime(Runtime::kInstantiateAsmJs, 4);
+ // A smi 0 is returned on failure, an object on success.
+ __ JumpIfSmi(rax, &failed, Label::kNear);
+
+ __ Drop(2);
+ __ Pop(kScratchRegister);
+ __ SmiToInteger32(kScratchRegister, kScratchRegister);
+ scope.GenerateLeaveFrame();
+
+ __ PopReturnAddressTo(rbx);
+ __ incp(kScratchRegister);
+ __ leap(rsp, Operand(rsp, kScratchRegister, times_pointer_size, 0));
+ __ PushReturnAddressFrom(rbx);
+ __ ret(0);
+
+ __ bind(&failed);
+ // Restore target function and new target.
+ __ Pop(rdx);
+ __ Pop(rdi);
+ __ Pop(rax);
+ __ SmiToInteger32(rax, rax);
+ }
+ // On failure, tail call back to regular js.
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+}
static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// For now, we are relying on the fact that make_code_young doesn't do any
@@ -826,20 +1150,18 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
__ ret(0);
}
-
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
-void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-} \
-void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-}
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+ void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+ } \
+ void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+ }
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
-
void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
// that make_code_young doesn't do any garbage collection which allows us to
@@ -870,17 +1192,14 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
__ ret(0);
}
-
void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
GenerateMakeCodeYoungAgainCommon(masm);
}
-
void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
-
static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
SaveFPRegsMode save_doubles) {
// Enter an internal frame.
@@ -900,17 +1219,14 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
__ ret(0); // Return to IC Miss stub, continuation still on stack.
}
-
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
}
-
void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
}
-
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
// Enter an internal frame.
@@ -929,13 +1245,16 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Switch on the state.
Label not_no_registers, not_tos_rax;
- __ cmpp(kScratchRegister, Immediate(FullCodeGenerator::NO_REGISTERS));
+ __ cmpp(kScratchRegister,
+ Immediate(static_cast<int>(Deoptimizer::BailoutState::NO_REGISTERS)));
__ j(not_equal, &not_no_registers, Label::kNear);
__ ret(1 * kPointerSize); // Remove state.
__ bind(&not_no_registers);
+ DCHECK_EQ(kInterpreterAccumulatorRegister.code(), rax.code());
__ movp(rax, Operand(rsp, kPCOnStackSize + kPointerSize));
- __ cmpp(kScratchRegister, Immediate(FullCodeGenerator::TOS_REG));
+ __ cmpp(kScratchRegister,
+ Immediate(static_cast<int>(Deoptimizer::BailoutState::TOS_REGISTER)));
__ j(not_equal, &not_tos_rax, Label::kNear);
__ ret(2 * kPointerSize); // Remove state, rax.
@@ -943,26 +1262,25 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
__ Abort(kNoCasesLeft);
}
-
void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
-
void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
}
-
void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
-
// static
void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
int field_index) {
// ----------- S t a t e -------------
+ // -- rax : number of arguments
+ // -- rdi : function
+ // -- rsi : context
// -- rsp[0] : return address
// -- rsp[8] : receiver
// -----------------------------------
@@ -1004,35 +1322,13 @@ void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
__ bind(&receiver_not_date);
{
FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterFrame(StackFrame::INTERNAL);
+ __ Move(rbx, Smi::FromInt(0));
+ __ EnterBuiltinFrame(rsi, rdi, rbx);
__ CallRuntime(Runtime::kThrowNotDateError);
}
}
// static
-void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : argc
- // -- rsp[0] : return address
- // -- rsp[8] : first argument (left-hand side)
- // -- rsp[16] : receiver (right-hand side)
- // -----------------------------------
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ movp(InstanceOfDescriptor::LeftRegister(),
- Operand(rbp, 2 * kPointerSize)); // Load left-hand side.
- __ movp(InstanceOfDescriptor::RightRegister(),
- Operand(rbp, 3 * kPointerSize)); // Load right-hand side.
- InstanceOfStub stub(masm->isolate(), true);
- __ CallStub(&stub);
- }
-
- // Pop the argument and the receiver.
- __ ret(2 * kPointerSize);
-}
-
-// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
@@ -1112,7 +1408,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
}
}
-
// static
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// Stack Layout:
@@ -1165,7 +1460,6 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
-
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
@@ -1230,7 +1524,6 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
}
}
-
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
@@ -1312,7 +1605,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
}
-
void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
@@ -1342,7 +1634,6 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-
void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
@@ -1373,11 +1664,12 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-
// static
void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
// ----------- S t a t e -------------
// -- rax : number of arguments
+ // -- rdi : function
+ // -- rsi : context
// -- rsp[0] : return address
// -- rsp[(argc - n) * 8] : arg[n] (zero-based)
// -- rsp[(argc + 1) * 8] : receiver
@@ -1405,27 +1697,28 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
__ movp(rbx, Operand(rsp, rcx, times_pointer_size, 0));
// Load the double value of the parameter into xmm1, maybe converting the
- // parameter to a number first using the ToNumberStub if necessary.
+ // parameter to a number first using the ToNumber builtin if necessary.
Label convert, convert_smi, convert_number, done_convert;
__ bind(&convert);
__ JumpIfSmi(rbx, &convert_smi);
__ JumpIfRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex, &convert_number);
{
- // Parameter is not a Number, use the ToNumberStub to convert it.
- FrameScope scope(masm, StackFrame::INTERNAL);
+ // Parameter is not a Number, use the ToNumber builtin to convert it.
+ FrameScope scope(masm, StackFrame::MANUAL);
__ Integer32ToSmi(rax, rax);
__ Integer32ToSmi(rcx, rcx);
- __ Push(rax);
+ __ EnterBuiltinFrame(rsi, rdi, rax);
__ Push(rcx);
__ Push(rdx);
__ movp(rax, rbx);
- ToNumberStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
__ movp(rbx, rax);
__ Pop(rdx);
__ Pop(rcx);
- __ Pop(rax);
+ __ LeaveBuiltinFrame(rsi, rdi, rax);
+ __ SmiToInteger32(rcx, rcx);
+ __ SmiToInteger32(rax, rax);
{
// Restore the double accumulator value (xmm0).
Label restore_smi, done_restore;
@@ -1436,8 +1729,6 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
__ SmiToDouble(xmm0, rdx);
__ bind(&done_restore);
}
- __ SmiToInteger32(rcx, rcx);
- __ SmiToInteger32(rax, rax);
}
__ jmp(&convert);
__ bind(&convert_number);
@@ -1491,41 +1782,52 @@ void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : number of arguments
// -- rdi : constructor function
+ // -- rsi : context
// -- rsp[0] : return address
// -- rsp[(argc - n) * 8] : arg[n] (zero-based)
// -- rsp[(argc + 1) * 8] : receiver
// -----------------------------------
- // 1. Load the first argument into rax and get rid of the rest (including the
- // receiver).
+ // 1. Load the first argument into rbx.
Label no_arguments;
{
StackArgumentsAccessor args(rsp, rax);
__ testp(rax, rax);
__ j(zero, &no_arguments, Label::kNear);
__ movp(rbx, args.GetArgumentOperand(1));
- __ PopReturnAddressTo(rcx);
- __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
- __ PushReturnAddressFrom(rcx);
- __ movp(rax, rbx);
}
// 2a. Convert the first argument to a number.
- ToNumberStub stub(masm->isolate());
- __ TailCallStub(&stub);
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ Integer32ToSmi(rax, rax);
+ __ EnterBuiltinFrame(rsi, rdi, rax);
+ __ movp(rax, rbx);
+ __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+ __ LeaveBuiltinFrame(rsi, rdi, rbx); // Argc popped to rbx.
+ __ SmiToInteger32(rbx, rbx);
+ }
+
+ {
+ // Drop all arguments including the receiver.
+ __ PopReturnAddressTo(rcx);
+ __ leap(rsp, Operand(rsp, rbx, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(rcx);
+ __ Ret();
+ }
// 2b. No arguments, return +0 (already in rax).
__ bind(&no_arguments);
__ ret(1 * kPointerSize);
}
-
// static
void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : number of arguments
// -- rdi : constructor function
// -- rdx : new target
+ // -- rsi : context
// -- rsp[0] : return address
// -- rsp[(argc - n) * 8] : arg[n] (zero-based)
// -- rsp[(argc + 1) * 8] : receiver
@@ -1534,8 +1836,10 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
// 1. Make sure we operate in the context of the called function.
__ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- // 2. Load the first argument into rbx and get rid of the rest (including the
- // receiver).
+ // Store argc in r8.
+ __ Integer32ToSmi(r8, rax);
+
+ // 2. Load the first argument into rbx.
{
StackArgumentsAccessor args(rsp, rax);
Label no_arguments, done;
@@ -1546,9 +1850,6 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&no_arguments);
__ Move(rbx, Smi::FromInt(0));
__ bind(&done);
- __ PopReturnAddressTo(rcx);
- __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
- __ PushReturnAddressFrom(rcx);
}
// 3. Make sure rbx is a number.
@@ -1559,75 +1860,81 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
Heap::kHeapNumberMapRootIndex);
__ j(equal, &done_convert);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterBuiltinFrame(rsi, rdi, r8);
__ Push(rdx);
- __ Push(rdi);
__ Move(rax, rbx);
- ToNumberStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
__ Move(rbx, rax);
- __ Pop(rdi);
__ Pop(rdx);
+ __ LeaveBuiltinFrame(rsi, rdi, r8);
}
__ bind(&done_convert);
}
// 4. Check if new target and constructor differ.
- Label new_object;
+ Label drop_frame_and_ret, new_object;
__ cmpp(rdx, rdi);
__ j(not_equal, &new_object);
// 5. Allocate a JSValue wrapper for the number.
__ AllocateJSValue(rax, rdi, rbx, rcx, &new_object);
- __ Ret();
+ __ jmp(&drop_frame_and_ret, Label::kNear);
// 6. Fallback to the runtime to create new object.
__ bind(&new_object);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterBuiltinFrame(rsi, rdi, r8);
__ Push(rbx); // the first argument
FastNewObjectStub stub(masm->isolate());
__ CallStub(&stub);
__ Pop(FieldOperand(rax, JSValue::kValueOffset));
+ __ LeaveBuiltinFrame(rsi, rdi, r8);
}
- __ Ret();
-}
+ __ bind(&drop_frame_and_ret);
+ {
+ // Drop all arguments including the receiver.
+ __ PopReturnAddressTo(rcx);
+ __ SmiToInteger32(r8, r8);
+ __ leap(rsp, Operand(rsp, r8, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(rcx);
+ __ Ret();
+ }
+}
// static
void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : number of arguments
// -- rdi : constructor function
+ // -- rsi : context
// -- rsp[0] : return address
// -- rsp[(argc - n) * 8] : arg[n] (zero-based)
// -- rsp[(argc + 1) * 8] : receiver
// -----------------------------------
- // 1. Load the first argument into rax and get rid of the rest (including the
- // receiver).
+ // 1. Load the first argument into rax.
Label no_arguments;
{
StackArgumentsAccessor args(rsp, rax);
+ __ Integer32ToSmi(r8, rax); // Store argc in r8.
__ testp(rax, rax);
__ j(zero, &no_arguments, Label::kNear);
- __ movp(rbx, args.GetArgumentOperand(1));
- __ PopReturnAddressTo(rcx);
- __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
- __ PushReturnAddressFrom(rcx);
- __ movp(rax, rbx);
+ __ movp(rax, args.GetArgumentOperand(1));
}
// 2a. At least one argument, return rax if it's a string, otherwise
// dispatch to appropriate conversion.
- Label to_string, symbol_descriptive_string;
+ Label drop_frame_and_ret, to_string, symbol_descriptive_string;
{
__ JumpIfSmi(rax, &to_string, Label::kNear);
STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
__ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx);
__ j(above, &to_string, Label::kNear);
__ j(equal, &symbol_descriptive_string, Label::kNear);
- __ Ret();
+ __ jmp(&drop_frame_and_ret, Label::kNear);
}
// 2b. No arguments, return the empty string (and pop the receiver).
@@ -1640,20 +1947,35 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// 3a. Convert rax to a string.
__ bind(&to_string);
{
+ FrameScope scope(masm, StackFrame::MANUAL);
ToStringStub stub(masm->isolate());
- __ TailCallStub(&stub);
+ __ EnterBuiltinFrame(rsi, rdi, r8);
+ __ CallStub(&stub);
+ __ LeaveBuiltinFrame(rsi, rdi, r8);
}
+ __ jmp(&drop_frame_and_ret, Label::kNear);
// 3b. Convert symbol in rax to a string.
__ bind(&symbol_descriptive_string);
{
__ PopReturnAddressTo(rcx);
+ __ SmiToInteger32(r8, r8);
+ __ leap(rsp, Operand(rsp, r8, times_pointer_size, kPointerSize));
__ Push(rax);
__ PushReturnAddressFrom(rcx);
__ TailCallRuntime(Runtime::kSymbolDescriptiveString);
}
-}
+ __ bind(&drop_frame_and_ret);
+ {
+ // Drop all arguments including the receiver.
+ __ PopReturnAddressTo(rcx);
+ __ SmiToInteger32(r8, r8);
+ __ leap(rsp, Operand(rsp, r8, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(rcx);
+ __ Ret();
+ }
+}
// static
void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
@@ -1661,6 +1983,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// -- rax : number of arguments
// -- rdi : constructor function
// -- rdx : new target
+ // -- rsi : context
// -- rsp[0] : return address
// -- rsp[(argc - n) * 8] : arg[n] (zero-based)
// -- rsp[(argc + 1) * 8] : receiver
@@ -1669,8 +1992,10 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// 1. Make sure we operate in the context of the called function.
__ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
- // 2. Load the first argument into rbx and get rid of the rest (including the
- // receiver).
+ // Store argc in r8.
+ __ Integer32ToSmi(r8, rax);
+
+ // 2. Load the first argument into rbx.
{
StackArgumentsAccessor args(rsp, rax);
Label no_arguments, done;
@@ -1681,9 +2006,6 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&no_arguments);
__ LoadRoot(rbx, Heap::kempty_stringRootIndex);
__ bind(&done);
- __ PopReturnAddressTo(rcx);
- __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
- __ PushReturnAddressFrom(rcx);
}
// 3. Make sure rbx is a string.
@@ -1694,40 +2016,50 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ j(below, &done_convert);
__ bind(&convert);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::MANUAL);
ToStringStub stub(masm->isolate());
+ __ EnterBuiltinFrame(rsi, rdi, r8);
__ Push(rdx);
- __ Push(rdi);
__ Move(rax, rbx);
__ CallStub(&stub);
__ Move(rbx, rax);
- __ Pop(rdi);
__ Pop(rdx);
+ __ LeaveBuiltinFrame(rsi, rdi, r8);
}
__ bind(&done_convert);
}
// 4. Check if new target and constructor differ.
- Label new_object;
+ Label drop_frame_and_ret, new_object;
__ cmpp(rdx, rdi);
__ j(not_equal, &new_object);
// 5. Allocate a JSValue wrapper for the string.
__ AllocateJSValue(rax, rdi, rbx, rcx, &new_object);
- __ Ret();
+ __ jmp(&drop_frame_and_ret, Label::kNear);
// 6. Fallback to the runtime to create new object.
__ bind(&new_object);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterBuiltinFrame(rsi, rdi, r8);
__ Push(rbx); // the first argument
FastNewObjectStub stub(masm->isolate());
__ CallStub(&stub);
__ Pop(FieldOperand(rax, JSValue::kValueOffset));
+ __ LeaveBuiltinFrame(rsi, rdi, r8);
}
- __ Ret();
-}
+ __ bind(&drop_frame_and_ret);
+ {
+ // Drop all arguments including the receiver.
+ __ PopReturnAddressTo(rcx);
+ __ SmiToInteger32(r8, r8);
+ __ leap(rsp, Operand(rsp, r8, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(rcx);
+ __ Ret();
+ }
+}
static void ArgumentsAdaptorStackCheck(MacroAssembler* masm,
Label* stack_overflow) {
@@ -1755,7 +2087,6 @@ static void ArgumentsAdaptorStackCheck(MacroAssembler* masm,
__ j(less_equal, stack_overflow); // Signed comparison.
}
-
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ pushq(rbp);
__ movp(rbp, rsp);
@@ -1773,7 +2104,6 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ Push(r8);
}
-
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Retrieve the number of arguments from the stack. Number is a Smi.
__ movp(rbx, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
@@ -1789,6 +2119,66 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
__ PushReturnAddressFrom(rcx);
}
+// static
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rdx : requested object size (untagged)
+ // -- rsp[0] : return address
+ // -----------------------------------
+ __ Integer32ToSmi(rdx, rdx);
+ __ PopReturnAddressTo(rcx);
+ __ Push(rdx);
+ __ PushReturnAddressFrom(rcx);
+ __ Move(rsi, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAllocateInNewSpace);
+}
+
+// static
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rdx : requested object size (untagged)
+ // -- rsp[0] : return address
+ // -----------------------------------
+ __ Integer32ToSmi(rdx, rdx);
+ __ PopReturnAddressTo(rcx);
+ __ Push(rdx);
+ __ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+ __ PushReturnAddressFrom(rcx);
+ __ Move(rsi, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
+}
+
+// static
+void Builtins::Generate_Abort(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rdx : message_id as Smi
+ // -- rsp[0] : return address
+ // -----------------------------------
+ __ PopReturnAddressTo(rcx);
+ __ Push(rdx);
+ __ PushReturnAddressFrom(rcx);
+ __ Move(rsi, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAbort);
+}
+
+// static
+void Builtins::Generate_ToNumber(MacroAssembler* masm) {
+ // The ToNumber stub takes one argument in rax.
+ Label not_smi;
+ __ JumpIfNotSmi(rax, &not_smi, Label::kNear);
+ __ Ret();
+ __ bind(&not_smi);
+
+ Label not_heap_number;
+ __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &not_heap_number, Label::kNear);
+ __ Ret();
+ __ bind(&not_heap_number);
+
+ __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
+ RelocInfo::CODE_TARGET);
+}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -1891,7 +2281,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
}
-
// static
void Builtins::Generate_Apply(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2182,8 +2571,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Push(rax);
__ Push(rdi);
__ movp(rax, rcx);
+ __ Push(rsi);
ToObjectStub stub(masm->isolate());
__ CallStub(&stub);
+ __ Pop(rsi);
__ movp(rcx, rax);
__ Pop(rdi);
__ Pop(rax);
@@ -2224,7 +2615,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
}
}
-
namespace {
void Generate_PushBoundArguments(MacroAssembler* masm) {
@@ -2309,7 +2699,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
} // namespace
-
// static
void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
TailCallMode tail_call_mode) {
@@ -2339,7 +2728,6 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
__ jmp(rcx);
}
-
// static
void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
TailCallMode tail_call_mode) {
@@ -2403,7 +2791,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
-
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2425,7 +2812,6 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
__ jmp(rcx);
}
-
// static
void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2455,7 +2841,6 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ jmp(rcx);
}
-
// static
void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2476,7 +2861,6 @@ void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
}
-
// static
void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2529,7 +2913,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
-
static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
Register function_template_info,
Register scratch0, Register scratch1,
@@ -2595,7 +2978,6 @@ static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
__ bind(&receiver_check_passed);
}
-
void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : number of arguments (not including the receiver)
@@ -2639,10 +3021,16 @@ void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
}
}
-
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
+ bool has_handler_frame) {
// Lookup the function in the JavaScript frame.
- __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ if (has_handler_frame) {
+ __ movp(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rax, Operand(rax, JavaScriptFrameConstants::kFunctionOffset));
+ } else {
+ __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
@@ -2651,19 +3039,27 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
}
Label skip;
- // If the code object is null, just return to the unoptimized code.
+ // If the code object is null, just return to the caller.
__ cmpp(rax, Immediate(0));
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
__ bind(&skip);
+ // Drop any potential handler frame that is be sitting on top of the actual
+ // JavaScript frame. This is the case then OSR is triggered from bytecode.
+ if (has_handler_frame) {
+ __ leave();
+ }
+
// Load deoptimization data from the code object.
__ movp(rbx, Operand(rax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data.
- __ SmiToInteger32(rbx, Operand(rbx, FixedArray::OffsetOfElementAt(
- DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
+ __ SmiToInteger32(
+ rbx, Operand(rbx, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex) -
+ kHeapObjectTag));
// Compute the target address = code_obj + header_size + osr_offset
__ leap(rax, Operand(rax, rbx, times_1, Code::kHeaderSize - kHeapObjectTag));
@@ -2675,6 +3071,13 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ ret(0);
}
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+ Generate_OnStackReplacementHelper(masm, false);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+ Generate_OnStackReplacementHelper(masm, true);
+}
#undef __
diff --git a/deps/v8/src/builtins/x87/OWNERS b/deps/v8/src/builtins/x87/OWNERS
new file mode 100644
index 0000000000..dd9998b261
--- /dev/null
+++ b/deps/v8/src/builtins/x87/OWNERS
@@ -0,0 +1 @@
+weiliang.lin@intel.com
diff --git a/deps/v8/src/x87/builtins-x87.cc b/deps/v8/src/builtins/x87/builtins-x87.cc
index 9e13172c85..9c46f20ff6 100644
--- a/deps/v8/src/x87/builtins-x87.cc
+++ b/deps/v8/src/builtins/x87/builtins-x87.cc
@@ -13,13 +13,10 @@
namespace v8 {
namespace internal {
-
#define __ ACCESS_MASM(masm)
-
-void Builtins::Generate_Adaptor(MacroAssembler* masm,
- CFunctionId id,
- BuiltinExtraArguments extra_args) {
+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
+ ExitFrameType exit_frame_type) {
// ----------- S t a t e -------------
// -- eax : number of arguments excluding receiver
// -- edi : target
@@ -38,26 +35,22 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// ordinary functions).
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- // Insert extra arguments.
- int num_extra_args = 0;
- if (extra_args != BuiltinExtraArguments::kNone) {
- __ PopReturnAddressTo(ecx);
- if (extra_args & BuiltinExtraArguments::kTarget) {
- ++num_extra_args;
- __ Push(edi);
- }
- if (extra_args & BuiltinExtraArguments::kNewTarget) {
- ++num_extra_args;
- __ Push(edx);
- }
- __ PushReturnAddressFrom(ecx);
- }
-
// JumpToExternalReference expects eax to contain the number of arguments
// including the receiver and the extra arguments.
+ const int num_extra_args = 3;
__ add(eax, Immediate(num_extra_args + 1));
- __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
+ // Insert extra arguments.
+ __ PopReturnAddressTo(ecx);
+ __ SmiTag(eax);
+ __ Push(eax);
+ __ SmiUntag(eax);
+ __ Push(edi);
+ __ Push(edx);
+ __ PushReturnAddressFrom(ecx);
+
+ __ JumpToExternalReference(ExternalReference(address, masm->isolate()),
+ exit_frame_type == BUILTIN_EXIT);
}
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
@@ -186,16 +179,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ j(greater_equal, &loop);
// Call the function.
- if (is_api_function) {
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- Handle<Code> code =
- masm->isolate()->builtins()->HandleApiCallConstruct();
- __ call(code, RelocInfo::CODE_TARGET);
- } else {
- ParameterCount actual(eax);
- __ InvokeFunction(edi, edx, actual, CALL_FUNCTION,
- CheckDebugStepCallWrapper());
- }
+ ParameterCount actual(eax);
+ __ InvokeFunction(edi, edx, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
// Store offset of return address for deoptimizer.
if (create_implicit_receiver && !is_api_function) {
@@ -259,38 +245,31 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ ret(0);
}
-
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true, false);
}
-
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, true, false, false);
}
-
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, false, false);
}
-
void Builtins::Generate_JSBuiltinsConstructStubForDerived(
MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, false, true);
}
-
void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(edi);
__ CallRuntime(Runtime::kThrowConstructedNonConstructable);
}
-
enum IsTagged { kEaxIsSmiTagged, kEaxIsUntaggedInt };
-
// Clobbers ecx, edx, edi; preserves all other registers.
static void Generate_CheckStackOverflow(MacroAssembler* masm,
IsTagged eax_is_tagged) {
@@ -322,7 +301,6 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm,
__ bind(&okay);
}
-
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
@@ -356,7 +334,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ jmp(&entry, Label::kNear);
__ bind(&loop);
__ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv
- __ push(Operand(edx, 0)); // dereference handle
+ __ push(Operand(edx, 0)); // dereference handle
__ inc(ecx);
__ bind(&entry);
__ cmp(ecx, eax);
@@ -382,16 +360,192 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ ret(kPointerSize); // Remove receiver.
}
-
void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, false);
}
-
void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the value to pass to the generator
+ // -- ebx : the JSGeneratorObject to resume
+ // -- edx : the resume mode (tagged)
+ // -- esp[0] : return address
+ // -----------------------------------
+ __ AssertGeneratorObject(ebx);
+
+ // Store input value into generator object.
+ __ mov(FieldOperand(ebx, JSGeneratorObject::kInputOrDebugPosOffset), eax);
+ __ RecordWriteField(ebx, JSGeneratorObject::kInputOrDebugPosOffset, eax, ecx,
+ kDontSaveFPRegs);
+
+ // Store resume mode into generator object.
+ __ mov(FieldOperand(ebx, JSGeneratorObject::kResumeModeOffset), edx);
+
+ // Load suspended function and context.
+ __ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset));
+ __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+
+ // Flood function if we are stepping.
+ Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+ Label stepping_prepared;
+ ExternalReference last_step_action =
+ ExternalReference::debug_last_step_action_address(masm->isolate());
+ STATIC_ASSERT(StepFrame > StepIn);
+ __ cmpb(Operand::StaticVariable(last_step_action), Immediate(StepIn));
+ __ j(greater_equal, &prepare_step_in_if_stepping);
+
+ // Flood function if we need to continue stepping in the suspended generator.
+ ExternalReference debug_suspended_generator =
+ ExternalReference::debug_suspended_generator_address(masm->isolate());
+ __ cmp(ebx, Operand::StaticVariable(debug_suspended_generator));
+ __ j(equal, &prepare_step_in_suspended_generator);
+ __ bind(&stepping_prepared);
+
+ // Pop return address.
+ __ PopReturnAddressTo(eax);
+
+ // Push receiver.
+ __ Push(FieldOperand(ebx, JSGeneratorObject::kReceiverOffset));
+
+ // ----------- S t a t e -------------
+ // -- eax : return address
+ // -- ebx : the JSGeneratorObject to resume
+ // -- edx : the resume mode (tagged)
+ // -- edi : generator function
+ // -- esi : generator context
+ // -- esp[0] : generator receiver
+ // -----------------------------------
+
+ // Push holes for arguments to generator function. Since the parser forced
+ // context allocation for any variables in generators, the actual argument
+ // values have already been copied into the context and these dummy values
+ // will never be used.
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ecx,
+ FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+ {
+ Label done_loop, loop;
+ __ bind(&loop);
+ __ sub(ecx, Immediate(Smi::FromInt(1)));
+ __ j(carry, &done_loop, Label::kNear);
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ __ jmp(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Dispatch on the kind of generator object.
+ Label old_generator;
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
+ __ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, ecx);
+ __ j(not_equal, &old_generator);
+
+ // New-style (ignition/turbofan) generator object
+ {
+ __ PushReturnAddressFrom(eax);
+ __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(eax,
+ FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+ // We abuse new.target both to indicate that this is a resume call and to
+ // pass in the generator object. In ordinary calls, new.target is always
+ // undefined because generator functions are non-constructable.
+ __ mov(edx, ebx);
+ __ jmp(FieldOperand(edi, JSFunction::kCodeEntryOffset));
+ }
+
+ // Old-style (full-codegen) generator object
+ __ bind(&old_generator);
+ {
+ // Enter a new JavaScript frame, and initialize its slots as they were when
+ // the generator was suspended.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PushReturnAddressFrom(eax); // Return address.
+ __ Push(ebp); // Caller's frame pointer.
+ __ Move(ebp, esp);
+ __ Push(esi); // Callee's context.
+ __ Push(edi); // Callee's JS Function.
+
+ // Restore the operand stack.
+ __ mov(eax, FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset));
+ {
+ Label done_loop, loop;
+ __ Move(ecx, Smi::FromInt(0));
+ __ bind(&loop);
+ __ cmp(ecx, FieldOperand(eax, FixedArray::kLengthOffset));
+ __ j(equal, &done_loop, Label::kNear);
+ __ Push(FieldOperand(eax, ecx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ add(ecx, Immediate(Smi::FromInt(1)));
+ __ jmp(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Reset operand stack so we don't leak.
+ __ mov(FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset),
+ Immediate(masm->isolate()->factory()->empty_fixed_array()));
+
+ // Resume the generator function at the continuation.
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
+ __ mov(ecx, FieldOperand(ebx, JSGeneratorObject::kContinuationOffset));
+ __ SmiUntag(ecx);
+ __ lea(edx, FieldOperand(edx, ecx, times_1, Code::kHeaderSize));
+ __ mov(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
+ Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+ __ mov(eax, ebx); // Continuation expects generator object in eax.
+ __ jmp(edx);
+ }
+
+ __ bind(&prepare_step_in_if_stepping);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(ebx);
+ __ Push(edx);
+ __ Push(edi);
+ __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ Pop(edx);
+ __ Pop(ebx);
+ __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+ }
+ __ jmp(&stepping_prepared);
+
+ __ bind(&prepare_step_in_suspended_generator);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(ebx);
+ __ Push(edx);
+ __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+ __ Pop(edx);
+ __ Pop(ebx);
+ __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+ }
+ __ jmp(&stepping_prepared);
+}
+
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
+ Register scratch2) {
+ Register args_count = scratch1;
+ Register return_pc = scratch2;
+
+ // Get the arguments + reciever count.
+ __ mov(args_count,
+ Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ mov(args_count,
+ FieldOperand(args_count, BytecodeArray::kParameterSizeOffset));
+
+ // Leave the frame (also dropping the register file).
+ __ leave();
+
+ // Drop receiver + arguments.
+ __ pop(return_pc);
+ __ add(esp, args_count);
+ __ push(return_pc);
+}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
@@ -408,6 +562,8 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
// The function builds an interpreter frame. See InterpreterFrameConstants in
// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
@@ -418,10 +574,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ push(edi); // Callee's JS function.
__ push(edx); // Callee's new target.
- // Get the bytecode array from the function object and load the pointer to the
- // first entry into edi (InterpreterBytecodeRegister).
+ // Get the bytecode array from the function object (or from the DebugInfo if
+ // it is present) and load it into kInterpreterBytecodeArrayRegister.
__ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-
Label load_debug_bytecode_array, bytecode_array_loaded;
__ cmp(FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset),
Immediate(DebugInfo::uninitialized()));
@@ -430,8 +585,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
__ bind(&bytecode_array_loaded);
+ // Check whether we should continue to use the interpreter.
+ Label switch_to_different_code_kind;
+ __ Move(ecx, masm->CodeObject()); // Self-reference to this code.
+ __ cmp(ecx, FieldOperand(eax, SharedFunctionInfo::kCodeOffset));
+ __ j(not_equal, &switch_to_different_code_kind);
+
+ // Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
- // Check function data field is actually a BytecodeArray object.
__ AssertNotSmi(kInterpreterBytecodeArrayRegister);
__ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
eax);
@@ -440,8 +601,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Push bytecode array.
__ push(kInterpreterBytecodeArrayRegister);
- // Push zero for bytecode array offset.
- __ push(Immediate(0));
+ // Push Smi tagged initial bytecode array offset.
+ __ push(Immediate(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag)));
// Allocate the local and temporary register file on the stack.
{
@@ -474,74 +635,75 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ j(greater_equal, &loop_header);
}
- // TODO(rmcilroy): List of things not currently dealt with here but done in
- // fullcodegen's prologue:
- // - Call ProfileEntryHookStub when isolate has a function_entry_hook.
- // - Code aging of the BytecodeArray object.
-
- // Load accumulator, register file, bytecode offset, dispatch table into
- // registers.
+ // Load accumulator, bytecode offset and dispatch table into registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
- __ mov(kInterpreterRegisterFileRegister, ebp);
- __ add(kInterpreterRegisterFileRegister,
- Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ mov(kInterpreterBytecodeOffsetRegister,
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
- __ mov(ebx, Immediate(ExternalReference::interpreter_dispatch_table_address(
- masm->isolate())));
-
- // Push dispatch table as a stack located parameter to the bytecode handler.
- DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
- __ push(ebx);
+ __ mov(kInterpreterDispatchTableRegister,
+ Immediate(ExternalReference::interpreter_dispatch_table_address(
+ masm->isolate())));
// Dispatch to the first bytecode handler for the function.
- __ movzx_b(eax, Operand(kInterpreterBytecodeArrayRegister,
+ __ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ mov(ebx, Operand(ebx, eax, times_pointer_size, 0));
- // Restore undefined_value in accumulator (eax)
- // TODO(rmcilroy): Remove this once we move the dispatch table back into a
- // register.
- __ mov(eax, Immediate(masm->isolate()->factory()->undefined_value()));
- // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
- // and header removal.
- __ add(ebx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ mov(ebx, Operand(kInterpreterDispatchTableRegister, ebx,
+ times_pointer_size, 0));
__ call(ebx);
+ masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
- // Even though the first bytecode handler was called, we will never return.
- __ Abort(kUnexpectedReturnFromBytecodeHandler);
+ // The return value is in eax.
+ LeaveInterpreterFrame(masm, ebx, ecx);
+ __ ret(0);
// Load debug copy of the bytecode array.
__ bind(&load_debug_bytecode_array);
Register debug_info = kInterpreterBytecodeArrayRegister;
__ mov(debug_info, FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset));
__ mov(kInterpreterBytecodeArrayRegister,
- FieldOperand(debug_info, DebugInfo::kAbstractCodeIndex));
+ FieldOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
__ jmp(&bytecode_array_loaded);
+
+ // If the shared code is no longer this entry trampoline, then the underlying
+ // function has been switched to a different kind of code and we heal the
+ // closure by switching the code entry field over to the new code as well.
+ __ bind(&switch_to_different_code_kind);
+ __ pop(edx); // Callee's new target.
+ __ pop(edi); // Callee's JS function.
+ __ pop(esi); // Callee's context.
+ __ leave(); // Leave the frame so we can tail call.
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
+ __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+ __ mov(FieldOperand(edi, JSFunction::kCodeEntryOffset), ecx);
+ __ RecordWriteCodeEntryField(edi, ecx, ebx);
+ __ jmp(ecx);
}
+void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
+ // Save the function and context for call to CompileBaseline.
+ __ mov(edi, Operand(ebp, StandardFrameConstants::kFunctionOffset));
+ __ mov(kContextRegister,
+ Operand(ebp, StandardFrameConstants::kContextOffset));
-void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
- // TODO(rmcilroy): List of things not currently dealt with here but done in
- // fullcodegen's EmitReturnSequence.
- // - Supporting FLAG_trace for Runtime::TraceExit.
- // - Support profiler (specifically decrementing profiling_counter
- // appropriately and calling out to HandleInterrupts if necessary).
+ // Leave the frame before recompiling for baseline so that we don't count as
+ // an activation on the stack.
+ LeaveInterpreterFrame(masm, ebx, ecx);
- // The return value is in accumulator, which is already in rax.
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ // Push return value.
+ __ push(eax);
- // Leave the frame (also dropping the register file).
- __ leave();
+ // Push function as argument and compile for baseline.
+ __ push(edi);
+ __ CallRuntime(Runtime::kCompileBaseline);
- // Drop receiver + arguments and return.
- __ mov(ebx, FieldOperand(kInterpreterBytecodeArrayRegister,
- BytecodeArray::kParameterSizeOffset));
- __ pop(ecx);
- __ add(esp, ebx);
- __ push(ecx);
+ // Restore return value.
+ __ pop(eax);
+ }
__ ret(0);
}
-
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
Register array_limit) {
// ----------- S t a t e -------------
@@ -559,10 +721,10 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
__ j(greater, &loop_header, Label::kNear);
}
-
// static
void Builtins::Generate_InterpreterPushArgsAndCallImpl(
- MacroAssembler* masm, TailCallMode tail_call_mode) {
+ MacroAssembler* masm, TailCallMode tail_call_mode,
+ CallableType function_type) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- ebx : the address of the first argument to be pushed. Subsequent
@@ -585,11 +747,18 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
// Call the target.
__ Push(edx); // Re-push return address.
- __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- tail_call_mode),
- RelocInfo::CODE_TARGET);
-}
+ if (function_type == CallableType::kJSFunction) {
+ __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
+ tail_call_mode),
+ RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK_EQ(function_type, CallableType::kAny);
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ tail_call_mode),
+ RelocInfo::CODE_TARGET);
+ }
+}
// static
void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
@@ -628,17 +797,26 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ // Set the return address to the correct point in the interpreter entry
+ // trampoline.
+ Smi* interpreter_entry_return_pc_offset(
+ masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+ __ LoadHeapObject(ebx,
+ masm->isolate()->builtins()->InterpreterEntryTrampoline());
+ __ add(ebx, Immediate(interpreter_entry_return_pc_offset->value() +
+ Code::kHeaderSize - kHeapObjectTag));
+ __ push(ebx);
-static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
- // Initialize register file register.
- __ mov(kInterpreterRegisterFileRegister, ebp);
- __ add(kInterpreterRegisterFileRegister,
- Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
+ // Initialize the dispatch table register.
+ __ mov(kInterpreterDispatchTableRegister,
+ Immediate(ExternalReference::interpreter_dispatch_table_address(
+ masm->isolate())));
// Get the bytecode array pointer from the frame.
__ mov(kInterpreterBytecodeArrayRegister,
- Operand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
+ Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -649,104 +827,252 @@ static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
}
// Get the target bytecode offset from the frame.
- __ mov(
- kInterpreterBytecodeOffsetRegister,
- Operand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
- // Push dispatch table as a stack located parameter to the bytecode handler.
- __ mov(ebx, Immediate(ExternalReference::interpreter_dispatch_table_address(
- masm->isolate())));
- DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
- __ Pop(esi);
- __ Push(ebx);
- __ Push(esi);
-
// Dispatch to the target bytecode.
- __ movzx_b(esi, Operand(kInterpreterBytecodeArrayRegister,
+ __ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, times_1, 0));
- __ mov(ebx, Operand(ebx, esi, times_pointer_size, 0));
-
- // Get the context from the frame.
- __ mov(kContextRegister,
- Operand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kContextFromRegisterPointer));
-
- // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
- // and header removal.
- __ add(ebx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ mov(ebx, Operand(kInterpreterDispatchTableRegister, ebx,
+ times_pointer_size, 0));
__ jmp(ebx);
}
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argument count (preserved for callee)
+ // -- edx : new target (preserved for callee)
+ // -- edi : target function (preserved for callee)
+ // -----------------------------------
+ // First lookup code, maybe we don't need to compile!
+ Label gotta_call_runtime, gotta_call_runtime_no_stack;
+ Label maybe_call_runtime;
+ Label try_shared;
+ Label loop_top, loop_bottom;
+
+ Register closure = edi;
+ Register new_target = edx;
+ Register argument_count = eax;
+
+ __ push(argument_count);
+ __ push(new_target);
+ __ push(closure);
+
+ Register map = argument_count;
+ Register index = ebx;
+ __ mov(map, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(map, FieldOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
+ __ mov(index, FieldOperand(map, FixedArray::kLengthOffset));
+ __ cmp(index, Immediate(Smi::FromInt(2)));
+ __ j(less, &gotta_call_runtime);
+
+ // Find literals.
+ // edx : native context
+ // ebx : length / index
+ // eax : optimized code map
+ // stack[0] : new target
+ // stack[4] : closure
+ Register native_context = edx;
+ __ mov(native_context, NativeContextOperand());
+
+ __ bind(&loop_top);
+ Register temp = edi;
+
+ // Does the native context match?
+ __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
+ SharedFunctionInfo::kOffsetToPreviousContext));
+ __ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
+ __ cmp(temp, native_context);
+ __ j(not_equal, &loop_bottom);
+ // OSR id set to none?
+ __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
+ SharedFunctionInfo::kOffsetToPreviousOsrAstId));
+ const int bailout_id = BailoutId::None().ToInt();
+ __ cmp(temp, Immediate(Smi::FromInt(bailout_id)));
+ __ j(not_equal, &loop_bottom);
+ // Literals available?
+ __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
+ SharedFunctionInfo::kOffsetToPreviousLiterals));
+ __ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
+ __ JumpIfSmi(temp, &gotta_call_runtime);
+
+ // Save the literals in the closure.
+ __ mov(ecx, Operand(esp, 0));
+ __ mov(FieldOperand(ecx, JSFunction::kLiteralsOffset), temp);
+ __ push(index);
+ __ RecordWriteField(ecx, JSFunction::kLiteralsOffset, temp, index,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ pop(index);
+
+ // Code available?
+ Register entry = ecx;
+ __ mov(entry, FieldOperand(map, index, times_half_pointer_size,
+ SharedFunctionInfo::kOffsetToPreviousCachedCode));
+ __ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(entry, &maybe_call_runtime);
+
+ // Found literals and code. Get them into the closure and return.
+ __ pop(closure);
+ // Store code entry in the closure.
+ __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
+
+ Label install_optimized_code_and_tailcall;
+ __ bind(&install_optimized_code_and_tailcall);
+ __ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
+ __ RecordWriteCodeEntryField(closure, entry, eax);
+
+ // Link the closure into the optimized function list.
+ // ecx : code entry
+ // edx : native context
+ // edi : closure
+ __ mov(ebx,
+ ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ __ mov(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), ebx);
+ __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, ebx, eax,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ const int function_list_offset =
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+ __ mov(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST),
+ closure);
+ // Save closure before the write barrier.
+ __ mov(ebx, closure);
+ __ RecordWriteContextSlot(native_context, function_list_offset, closure, eax,
+ kDontSaveFPRegs);
+ __ mov(closure, ebx);
+ __ pop(new_target);
+ __ pop(argument_count);
+ __ jmp(entry);
+
+ __ bind(&loop_bottom);
+ __ sub(index, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+ __ cmp(index, Immediate(Smi::FromInt(1)));
+ __ j(greater, &loop_top);
+
+ // We found neither literals nor code.
+ __ jmp(&gotta_call_runtime);
+
+ __ bind(&maybe_call_runtime);
+ __ pop(closure);
+
+ // Last possibility. Check the context free optimized code map entry.
+ __ mov(entry, FieldOperand(map, FixedArray::kHeaderSize +
+ SharedFunctionInfo::kSharedCodeIndex));
+ __ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
+ __ JumpIfSmi(entry, &try_shared);
+
+ // Store code entry in the closure.
+ __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
+ __ jmp(&install_optimized_code_and_tailcall);
+
+ __ bind(&try_shared);
+ __ pop(new_target);
+ __ pop(argument_count);
+ // Is the full code valid?
+ __ mov(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
+ __ mov(ebx, FieldOperand(entry, Code::kFlagsOffset));
+ __ and_(ebx, Code::KindField::kMask);
+ __ shr(ebx, Code::KindField::kShift);
+ __ cmp(ebx, Immediate(Code::BUILTIN));
+ __ j(equal, &gotta_call_runtime_no_stack);
+ // Yes, install the full code.
+ __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
+ __ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
+ __ RecordWriteCodeEntryField(closure, entry, ebx);
+ __ jmp(entry);
+
+ __ bind(&gotta_call_runtime);
+ __ pop(closure);
+ __ pop(new_target);
+ __ pop(argument_count);
+ __ bind(&gotta_call_runtime_no_stack);
-static void Generate_InterpreterNotifyDeoptimizedHelper(
- MacroAssembler* masm, Deoptimizer::BailoutType type) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
-
- // Pass the deoptimization type to the runtime system.
- __ Push(Smi::FromInt(static_cast<int>(type)));
- __ CallRuntime(Runtime::kNotifyDeoptimized);
- // Tear down internal frame.
- }
-
- // Drop state (we don't use these for interpreter deopts) and and pop the
- // accumulator value into the accumulator register and push PC at top
- // of stack (to simulate initial call to bytecode handler in interpreter entry
- // trampoline).
- __ Pop(ebx);
- __ Drop(1);
- __ Pop(kInterpreterAccumulatorRegister);
- __ Push(ebx);
-
- // Enter the bytecode dispatch.
- Generate_EnterBytecodeDispatch(masm);
-}
-
-
-void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
-
-void Builtins::Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+void Builtins::Generate_CompileBaseline(MacroAssembler* masm) {
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileBaseline);
}
-
-void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
- Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+ GenerateTailCallToReturnedCode(masm,
+ Runtime::kCompileOptimized_NotConcurrent);
}
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
- // Set the address of the interpreter entry trampoline as a return address.
- // This simulates the initial call to bytecode handlers in interpreter entry
- // trampoline. The return will never actually be taken, but our stack walker
- // uses this address to determine whether a frame is interpreted.
- __ Push(masm->isolate()->builtins()->InterpreterEntryTrampoline());
-
- Generate_EnterBytecodeDispatch(masm);
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
+void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : argument count (preserved for callee)
+ // -- edx : new target (preserved for callee)
+ // -- edi : target function (preserved for callee)
+ // -----------------------------------
+ Label failed;
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Preserve argument count for later compare.
+ __ mov(ecx, eax);
+ // Push the number of arguments to the callee.
+ __ SmiTag(eax);
+ __ push(eax);
+ // Push a copy of the target function and the new target.
+ __ push(edi);
+ __ push(edx);
-void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
-}
+ // The function.
+ __ push(edi);
+ // Copy arguments from caller (stdlib, foreign, heap).
+ Label args_done;
+ for (int j = 0; j < 4; ++j) {
+ Label over;
+ if (j < 3) {
+ __ cmp(ecx, Immediate(j));
+ __ j(not_equal, &over, Label::kNear);
+ }
+ for (int i = j - 1; i >= 0; --i) {
+ __ Push(Operand(
+ ebp, StandardFrameConstants::kCallerSPOffset + i * kPointerSize));
+ }
+ for (int i = 0; i < 3 - j; ++i) {
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ }
+ if (j < 3) {
+ __ jmp(&args_done, Label::kNear);
+ __ bind(&over);
+ }
+ }
+ __ bind(&args_done);
+ // Call runtime, on success unwind frame, and parent frame.
+ __ CallRuntime(Runtime::kInstantiateAsmJs, 4);
+ // A smi 0 is returned on failure, an object on success.
+ __ JumpIfSmi(eax, &failed, Label::kNear);
-void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm,
- Runtime::kCompileOptimized_NotConcurrent);
-}
+ __ Drop(2);
+ __ Pop(ecx);
+ __ SmiUntag(ecx);
+ scope.GenerateLeaveFrame();
+ __ PopReturnAddressTo(ebx);
+ __ inc(ecx);
+ __ lea(esp, Operand(esp, ecx, times_pointer_size, 0));
+ __ PushReturnAddressFrom(ebx);
+ __ ret(0);
-void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
+ __ bind(&failed);
+ // Restore target function and new target.
+ __ pop(edx);
+ __ pop(edi);
+ __ pop(eax);
+ __ SmiUntag(eax);
+ }
+ // On failure, tail call back to regular js.
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
-
static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// For now, we are relying on the fact that make_code_young doesn't do any
// garbage collection which allows us to save/restore the registers without
@@ -772,19 +1098,18 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
__ ret(0);
}
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
-void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-} \
-void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-}
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+ void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+ } \
+ void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+ }
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
-
void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
// that make_code_young doesn't do any garbage collection which allows us to
@@ -817,17 +1142,14 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
__ ret(0);
}
-
void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
GenerateMakeCodeYoungAgainCommon(masm);
}
-
void Builtins::Generate_MarkCodeAsToBeExecutedOnce(MacroAssembler* masm) {
Generate_MarkCodeAsExecutedOnce(masm);
}
-
static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
SaveFPRegsMode save_doubles) {
// Enter an internal frame.
@@ -847,17 +1169,14 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
__ ret(0); // Return to IC Miss stub, continuation still on stack.
}
-
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
}
-
void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
}
-
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
@@ -876,13 +1195,14 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Switch on the state.
Label not_no_registers, not_tos_eax;
- __ cmp(ecx, FullCodeGenerator::NO_REGISTERS);
+ __ cmp(ecx, static_cast<int>(Deoptimizer::BailoutState::NO_REGISTERS));
__ j(not_equal, &not_no_registers, Label::kNear);
__ ret(1 * kPointerSize); // Remove state.
__ bind(&not_no_registers);
+ DCHECK_EQ(kInterpreterAccumulatorRegister.code(), eax.code());
__ mov(eax, Operand(esp, 2 * kPointerSize));
- __ cmp(ecx, FullCodeGenerator::TOS_REG);
+ __ cmp(ecx, static_cast<int>(Deoptimizer::BailoutState::TOS_REGISTER));
__ j(not_equal, &not_tos_eax, Label::kNear);
__ ret(2 * kPointerSize); // Remove state, eax.
@@ -890,26 +1210,25 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
__ Abort(kNoCasesLeft);
}
-
void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
-
void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
}
-
void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
-
// static
void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
int field_index) {
// ----------- S t a t e -------------
+ // -- eax : number of arguments
+ // -- edi : function
+ // -- esi : context
// -- esp[0] : return address
// -- esp[4] : receiver
// -----------------------------------
@@ -952,35 +1271,13 @@ void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
__ bind(&receiver_not_date);
{
FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterFrame(StackFrame::INTERNAL);
+ __ Move(ebx, Immediate(0));
+ __ EnterBuiltinFrame(esi, edi, ebx);
__ CallRuntime(Runtime::kThrowNotDateError);
}
}
// static
-void Builtins::Generate_FunctionHasInstance(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : argc
- // -- esp[0] : return address
- // -- esp[4] : first argument (left-hand side)
- // -- esp[8] : receiver (right-hand side)
- // -----------------------------------
-
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ mov(InstanceOfDescriptor::LeftRegister(),
- Operand(ebp, 2 * kPointerSize)); // Load left-hand side.
- __ mov(InstanceOfDescriptor::RightRegister(),
- Operand(ebp, 3 * kPointerSize)); // Load right-hand side.
- InstanceOfStub stub(masm->isolate(), true);
- __ CallStub(&stub);
- }
-
- // Pop the argument and the receiver.
- __ ret(2 * kPointerSize);
-}
-
-// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
@@ -1057,7 +1354,6 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
}
}
-
// static
void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// Stack Layout:
@@ -1104,7 +1400,6 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
-
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
@@ -1245,7 +1540,6 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
}
-
void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
@@ -1274,7 +1568,6 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-
void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
@@ -1304,11 +1597,12 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
__ TailCallStub(&stub);
}
-
// static
void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
// ----------- S t a t e -------------
// -- eax : number of arguments
+ // -- edi : function
+ // -- esi : context
// -- esp[0] : return address
// -- esp[(argc - n) * 8] : arg[n] (zero-based)
// -- esp[(argc + 1) * 8] : receiver
@@ -1336,27 +1630,28 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
__ mov(ebx, Operand(esp, ecx, times_pointer_size, 0));
// Load the double value of the parameter into stx_1, maybe converting the
- // parameter to a number first using the ToNumberStub if necessary.
+ // parameter to a number first using the ToNumber builtin if necessary.
Label convert, convert_smi, convert_number, done_convert;
__ bind(&convert);
__ JumpIfSmi(ebx, &convert_smi);
__ JumpIfRoot(FieldOperand(ebx, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex, &convert_number);
{
- // Parameter is not a Number, use the ToNumberStub to convert it.
- FrameScope scope(masm, StackFrame::INTERNAL);
+ // Parameter is not a Number, use the ToNumber builtin to convert it.
+ FrameScope scope(masm, StackFrame::MANUAL);
__ SmiTag(eax);
__ SmiTag(ecx);
- __ Push(eax);
+ __ EnterBuiltinFrame(esi, edi, eax);
__ Push(ecx);
__ Push(edx);
__ mov(eax, ebx);
- ToNumberStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
__ mov(ebx, eax);
__ Pop(edx);
__ Pop(ecx);
- __ Pop(eax);
+ __ LeaveBuiltinFrame(esi, edi, eax);
+ __ SmiUntag(ecx);
+ __ SmiUntag(eax);
{
// Restore the double accumulator value (stX_0).
Label restore_smi, done_restore;
@@ -1371,8 +1666,6 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
__ SmiTag(edx);
__ bind(&done_restore);
}
- __ SmiUntag(ecx);
- __ SmiUntag(eax);
}
__ jmp(&convert);
__ bind(&convert_number);
@@ -1444,40 +1737,51 @@ void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : number of arguments
// -- edi : constructor function
+ // -- esi : context
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
- // 1. Load the first argument into eax and get rid of the rest (including the
- // receiver).
+ // 1. Load the first argument into ebx.
Label no_arguments;
{
__ test(eax, eax);
__ j(zero, &no_arguments, Label::kNear);
__ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
- __ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
- __ PushReturnAddressFrom(ecx);
- __ mov(eax, ebx);
}
// 2a. Convert the first argument to a number.
- ToNumberStub stub(masm->isolate());
- __ TailCallStub(&stub);
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ SmiTag(eax);
+ __ EnterBuiltinFrame(esi, edi, eax);
+ __ mov(eax, ebx);
+ __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+ __ LeaveBuiltinFrame(esi, edi, ebx); // Argc popped to ebx.
+ __ SmiUntag(ebx);
+ }
+
+ {
+ // Drop all arguments including the receiver.
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, ebx, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(ecx);
+ __ Ret();
+ }
// 2b. No arguments, return +0 (already in eax).
__ bind(&no_arguments);
__ ret(1 * kPointerSize);
}
-
// static
void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : number of arguments
// -- edi : constructor function
// -- edx : new target
+ // -- esi : context
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- esp[(argc + 1) * 4] : receiver
@@ -1486,8 +1790,11 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
// 1. Make sure we operate in the context of the called function.
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- // 2. Load the first argument into ebx and get rid of the rest (including the
- // receiver).
+ // Store argc in r8.
+ __ mov(ecx, eax);
+ __ SmiTag(ecx);
+
+ // 2. Load the first argument into ebx.
{
Label no_arguments, done;
__ test(eax, eax);
@@ -1497,9 +1804,6 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&no_arguments);
__ Move(ebx, Smi::FromInt(0));
__ bind(&done);
- __ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
- __ PushReturnAddressFrom(ecx);
}
// 3. Make sure ebx is a number.
@@ -1510,74 +1814,83 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
Heap::kHeapNumberMapRootIndex);
__ j(equal, &done_convert);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(edi);
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterBuiltinFrame(esi, edi, ecx);
__ Push(edx);
__ Move(eax, ebx);
- ToNumberStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
__ Move(ebx, eax);
__ Pop(edx);
- __ Pop(edi);
+ __ LeaveBuiltinFrame(esi, edi, ecx);
}
__ bind(&done_convert);
}
// 4. Check if new target and constructor differ.
- Label new_object;
+ Label drop_frame_and_ret, done_alloc, new_object;
__ cmp(edx, edi);
__ j(not_equal, &new_object);
// 5. Allocate a JSValue wrapper for the number.
- __ AllocateJSValue(eax, edi, ebx, ecx, &new_object);
- __ Ret();
+ __ AllocateJSValue(eax, edi, ebx, esi, &done_alloc);
+ __ jmp(&drop_frame_and_ret);
+
+ __ bind(&done_alloc);
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); // Restore esi.
// 6. Fallback to the runtime to create new object.
__ bind(&new_object);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterBuiltinFrame(esi, edi, ecx);
__ Push(ebx); // the first argument
FastNewObjectStub stub(masm->isolate());
__ CallStub(&stub);
__ Pop(FieldOperand(eax, JSValue::kValueOffset));
+ __ LeaveBuiltinFrame(esi, edi, ecx);
}
- __ Ret();
-}
+ __ bind(&drop_frame_and_ret);
+ {
+ // Drop all arguments including the receiver.
+ __ PopReturnAddressTo(esi);
+ __ SmiUntag(ecx);
+ __ lea(esp, Operand(esp, ecx, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(esi);
+ __ Ret();
+ }
+}
// static
void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : number of arguments
// -- edi : constructor function
+ // -- esi : context
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
- // 1. Load the first argument into eax and get rid of the rest (including the
- // receiver).
+ // 1. Load the first argument into eax.
Label no_arguments;
{
+ __ mov(ebx, eax); // Store argc in ebx.
__ test(eax, eax);
__ j(zero, &no_arguments, Label::kNear);
- __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
- __ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
- __ PushReturnAddressFrom(ecx);
- __ mov(eax, ebx);
+ __ mov(eax, Operand(esp, eax, times_pointer_size, 0));
}
// 2a. At least one argument, return eax if it's a string, otherwise
// dispatch to appropriate conversion.
- Label to_string, symbol_descriptive_string;
+ Label drop_frame_and_ret, to_string, symbol_descriptive_string;
{
__ JumpIfSmi(eax, &to_string, Label::kNear);
STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
__ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
__ j(above, &to_string, Label::kNear);
__ j(equal, &symbol_descriptive_string, Label::kNear);
- __ Ret();
+ __ jmp(&drop_frame_and_ret, Label::kNear);
}
// 2b. No arguments, return the empty string (and pop the receiver).
@@ -1590,20 +1903,35 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// 3a. Convert eax to a string.
__ bind(&to_string);
{
+ FrameScope scope(masm, StackFrame::MANUAL);
ToStringStub stub(masm->isolate());
- __ TailCallStub(&stub);
+ __ SmiTag(ebx);
+ __ EnterBuiltinFrame(esi, edi, ebx);
+ __ CallStub(&stub);
+ __ LeaveBuiltinFrame(esi, edi, ebx);
+ __ SmiUntag(ebx);
}
+ __ jmp(&drop_frame_and_ret, Label::kNear);
// 3b. Convert symbol in eax to a string.
__ bind(&symbol_descriptive_string);
{
__ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, ebx, times_pointer_size, kPointerSize));
__ Push(eax);
__ PushReturnAddressFrom(ecx);
__ TailCallRuntime(Runtime::kSymbolDescriptiveString);
}
-}
+ __ bind(&drop_frame_and_ret);
+ {
+ // Drop all arguments including the receiver.
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, ebx, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(ecx);
+ __ Ret();
+ }
+}
// static
void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
@@ -1611,6 +1939,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// -- eax : number of arguments
// -- edi : constructor function
// -- edx : new target
+ // -- esi : context
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- esp[(argc + 1) * 4] : receiver
@@ -1619,64 +1948,83 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// 1. Make sure we operate in the context of the called function.
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- // 2. Load the first argument into ebx and get rid of the rest (including the
- // receiver).
+ __ mov(ebx, eax);
+
+ // 2. Load the first argument into eax.
{
Label no_arguments, done;
- __ test(eax, eax);
+ __ test(ebx, ebx);
__ j(zero, &no_arguments, Label::kNear);
- __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
+ __ mov(eax, Operand(esp, ebx, times_pointer_size, 0));
__ jmp(&done, Label::kNear);
__ bind(&no_arguments);
- __ LoadRoot(ebx, Heap::kempty_stringRootIndex);
+ __ LoadRoot(eax, Heap::kempty_stringRootIndex);
__ bind(&done);
- __ PopReturnAddressTo(ecx);
- __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
- __ PushReturnAddressFrom(ecx);
}
- // 3. Make sure ebx is a string.
+ // 3. Make sure eax is a string.
{
Label convert, done_convert;
- __ JumpIfSmi(ebx, &convert, Label::kNear);
- __ CmpObjectType(ebx, FIRST_NONSTRING_TYPE, ecx);
+ __ JumpIfSmi(eax, &convert, Label::kNear);
+ __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
__ j(below, &done_convert);
__ bind(&convert);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::MANUAL);
ToStringStub stub(masm->isolate());
- __ Push(edi);
+ __ SmiTag(ebx);
+ __ EnterBuiltinFrame(esi, edi, ebx);
__ Push(edx);
- __ Move(eax, ebx);
__ CallStub(&stub);
- __ Move(ebx, eax);
__ Pop(edx);
- __ Pop(edi);
+ __ LeaveBuiltinFrame(esi, edi, ebx);
+ __ SmiUntag(ebx);
}
__ bind(&done_convert);
}
// 4. Check if new target and constructor differ.
- Label new_object;
+ Label drop_frame_and_ret, done_alloc, new_object;
__ cmp(edx, edi);
__ j(not_equal, &new_object);
// 5. Allocate a JSValue wrapper for the string.
- __ AllocateJSValue(eax, edi, ebx, ecx, &new_object);
- __ Ret();
+ // AllocateJSValue can't handle src == dst register. Reuse esi and restore it
+ // as needed after the call.
+ __ mov(esi, eax);
+ __ AllocateJSValue(eax, edi, esi, ecx, &done_alloc);
+ __ jmp(&drop_frame_and_ret);
+
+ __ bind(&done_alloc);
+ {
+ // Restore eax to the first argument and esi to the context.
+ __ mov(eax, esi);
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ }
// 6. Fallback to the runtime to create new object.
__ bind(&new_object);
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(ebx); // the first argument
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ SmiTag(ebx);
+ __ EnterBuiltinFrame(esi, edi, ebx);
+ __ Push(eax); // the first argument
FastNewObjectStub stub(masm->isolate());
__ CallStub(&stub);
__ Pop(FieldOperand(eax, JSValue::kValueOffset));
+ __ LeaveBuiltinFrame(esi, edi, ebx);
+ __ SmiUntag(ebx);
}
- __ Ret();
-}
+ __ bind(&drop_frame_and_ret);
+ {
+ // Drop all arguments including the receiver.
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, ebx, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(ecx);
+ __ Ret();
+ }
+}
static void ArgumentsAdaptorStackCheck(MacroAssembler* masm,
Label* stack_overflow) {
@@ -1704,7 +2052,6 @@ static void ArgumentsAdaptorStackCheck(MacroAssembler* masm,
__ j(less_equal, stack_overflow); // Signed comparison.
}
-
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ push(ebp);
__ mov(ebp, esp);
@@ -1723,7 +2070,6 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ push(edi);
}
-
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Retrieve the number of arguments from the stack.
__ mov(ebx, Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
@@ -1738,7 +2084,6 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
__ push(ecx);
}
-
// static
void Builtins::Generate_Apply(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2037,8 +2382,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Push(eax);
__ Push(edi);
__ mov(eax, ecx);
+ __ Push(esi);
ToObjectStub stub(masm->isolate());
__ CallStub(&stub);
+ __ Pop(esi);
__ mov(ecx, eax);
__ Pop(edi);
__ Pop(eax);
@@ -2080,7 +2427,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
}
}
-
namespace {
void Generate_PushBoundArguments(MacroAssembler* masm) {
@@ -2167,7 +2513,6 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
} // namespace
-
// static
void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
TailCallMode tail_call_mode) {
@@ -2196,7 +2541,6 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
__ jmp(ecx);
}
-
// static
void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
TailCallMode tail_call_mode) {
@@ -2259,7 +2603,6 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
-
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2281,7 +2624,6 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
__ jmp(ecx);
}
-
// static
void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2311,7 +2653,6 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ jmp(ecx);
}
-
// static
void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2333,7 +2674,6 @@ void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
ExternalReference(Runtime::kJSProxyConstruct, masm->isolate()));
}
-
// static
void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2385,6 +2725,65 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
+// static
+void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- edx : requested object size (untagged)
+ // -- esp[0] : return address
+ // -----------------------------------
+ __ SmiTag(edx);
+ __ PopReturnAddressTo(ecx);
+ __ Push(edx);
+ __ PushReturnAddressFrom(ecx);
+ __ Move(esi, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAllocateInNewSpace);
+}
+
+// static
+void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- edx : requested object size (untagged)
+ // -- esp[0] : return address
+ // -----------------------------------
+ __ SmiTag(edx);
+ __ PopReturnAddressTo(ecx);
+ __ Push(edx);
+ __ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
+ __ PushReturnAddressFrom(ecx);
+ __ Move(esi, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
+}
+
+// static
+void Builtins::Generate_Abort(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- edx : message_id as Smi
+ // -- esp[0] : return address
+ // -----------------------------------
+ __ PopReturnAddressTo(ecx);
+ __ Push(edx);
+ __ PushReturnAddressFrom(ecx);
+ __ Move(esi, Smi::FromInt(0));
+ __ TailCallRuntime(Runtime::kAbort);
+}
+
+// static
+void Builtins::Generate_ToNumber(MacroAssembler* masm) {
+ // The ToNumber stub takes one argument in eax.
+ Label not_smi;
+ __ JumpIfNotSmi(eax, &not_smi, Label::kNear);
+ __ Ret();
+ __ bind(&not_smi);
+
+ Label not_heap_number;
+ __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
+ __ j(not_equal, &not_heap_number, Label::kNear);
+ __ Ret();
+ __ bind(&not_heap_number);
+
+ __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
+ RelocInfo::CODE_TARGET);
+}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -2494,7 +2893,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
}
-
static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
Register function_template_info,
Register scratch0, Register scratch1,
@@ -2558,7 +2956,6 @@ static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
__ bind(&receiver_check_passed);
}
-
void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : number of arguments (not including the receiver)
@@ -2602,10 +2999,16 @@ void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
}
}
-
-void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
+ bool has_handler_frame) {
// Lookup the function in the JavaScript frame.
- __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ if (has_handler_frame) {
+ __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(eax, Operand(eax, JavaScriptFrameConstants::kFunctionOffset));
+ } else {
+ __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
@@ -2614,19 +3017,26 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
}
Label skip;
- // If the code object is null, just return to the unoptimized code.
+ // If the code object is null, just return to the caller.
__ cmp(eax, Immediate(0));
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
__ bind(&skip);
+ // Drop any potential handler frame that is be sitting on top of the actual
+ // JavaScript frame. This is the case then OSR is triggered from bytecode.
+ if (has_handler_frame) {
+ __ leave();
+ }
+
// Load deoptimization data from the code object.
__ mov(ebx, Operand(eax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data.
__ mov(ebx, Operand(ebx, FixedArray::OffsetOfElementAt(
- DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
+ DeoptimizationInputData::kOsrPcOffsetIndex) -
+ kHeapObjectTag));
__ SmiUntag(ebx);
// Compute the target address = code_obj + header_size + osr_offset
@@ -2639,6 +3049,13 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ ret(0);
}
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+ Generate_OnStackReplacementHelper(masm, false);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+ Generate_OnStackReplacementHelper(masm, true);
+}
#undef __
} // namespace internal
diff --git a/deps/v8/src/cancelable-task.cc b/deps/v8/src/cancelable-task.cc
index d231bb799d..defbb44775 100644
--- a/deps/v8/src/cancelable-task.cc
+++ b/deps/v8/src/cancelable-task.cc
@@ -14,7 +14,6 @@ namespace internal {
Cancelable::Cancelable(CancelableTaskManager* parent)
: parent_(parent), status_(kWaiting), id_(0), cancel_counter_(0) {
id_ = parent->Register(this);
- CHECK(id_ != 0);
}
@@ -27,49 +26,35 @@ Cancelable::~Cancelable() {
}
}
-
-static bool ComparePointers(void* ptr1, void* ptr2) { return ptr1 == ptr2; }
-
-
-CancelableTaskManager::CancelableTaskManager()
- : task_id_counter_(0), cancelable_tasks_(ComparePointers) {}
-
+CancelableTaskManager::CancelableTaskManager() : task_id_counter_(0) {}
uint32_t CancelableTaskManager::Register(Cancelable* task) {
base::LockGuard<base::Mutex> guard(&mutex_);
uint32_t id = ++task_id_counter_;
// The loop below is just used when task_id_counter_ overflows.
- while ((id == 0) || (cancelable_tasks_.Lookup(reinterpret_cast<void*>(id),
- id) != nullptr)) {
- ++id;
- }
- HashMap::Entry* entry =
- cancelable_tasks_.LookupOrInsert(reinterpret_cast<void*>(id), id);
- entry->value = task;
+ while (cancelable_tasks_.count(id) > 0) ++id;
+ cancelable_tasks_[id] = task;
return id;
}
void CancelableTaskManager::RemoveFinishedTask(uint32_t id) {
base::LockGuard<base::Mutex> guard(&mutex_);
- void* removed = cancelable_tasks_.Remove(reinterpret_cast<void*>(id), id);
+ size_t removed = cancelable_tasks_.erase(id);
USE(removed);
- DCHECK(removed != nullptr);
+ DCHECK_NE(0, removed);
cancelable_tasks_barrier_.NotifyOne();
}
bool CancelableTaskManager::TryAbort(uint32_t id) {
base::LockGuard<base::Mutex> guard(&mutex_);
- HashMap::Entry* entry =
- cancelable_tasks_.Lookup(reinterpret_cast<void*>(id), id);
- if (entry != nullptr) {
- Cancelable* value = reinterpret_cast<Cancelable*>(entry->value);
+ auto entry = cancelable_tasks_.find(id);
+ if (entry != cancelable_tasks_.end()) {
+ Cancelable* value = entry->second;
if (value->Cancel()) {
// Cannot call RemoveFinishedTask here because of recursive locking.
- void* removed = cancelable_tasks_.Remove(reinterpret_cast<void*>(id), id);
- USE(removed);
- DCHECK(removed != nullptr);
+ cancelable_tasks_.erase(entry);
cancelable_tasks_barrier_.NotifyOne();
return true;
}
@@ -85,27 +70,19 @@ void CancelableTaskManager::CancelAndWait() {
// started.
base::LockGuard<base::Mutex> guard(&mutex_);
- // HashMap does not support removing while iterating, hence keep a set of
- // entries that are to be removed.
- std::set<uint32_t> to_remove;
-
- // Cancelable tasks could potentially register new tasks, requiring a loop
- // here.
- while (cancelable_tasks_.occupancy() > 0) {
- for (HashMap::Entry* p = cancelable_tasks_.Start(); p != nullptr;
- p = cancelable_tasks_.Next(p)) {
- if (reinterpret_cast<Cancelable*>(p->value)->Cancel()) {
- to_remove.insert(reinterpret_cast<Cancelable*>(p->value)->id());
+ // Cancelable tasks could be running or could potentially register new
+ // tasks, requiring a loop here.
+ while (!cancelable_tasks_.empty()) {
+ for (auto it = cancelable_tasks_.begin(); it != cancelable_tasks_.end();) {
+ auto current = it;
+ // We need to get to the next element before erasing the current.
+ ++it;
+ if (current->second->Cancel()) {
+ cancelable_tasks_.erase(current);
}
}
- // Remove tasks that were successfully canceled.
- for (auto id : to_remove) {
- cancelable_tasks_.Remove(reinterpret_cast<void*>(id), id);
- }
- to_remove.clear();
-
- // Finally, wait for already running background tasks.
- if (cancelable_tasks_.occupancy() > 0) {
+ // Wait for already running background tasks.
+ if (!cancelable_tasks_.empty()) {
cancelable_tasks_barrier_.Wait(&mutex_);
}
}
diff --git a/deps/v8/src/cancelable-task.h b/deps/v8/src/cancelable-task.h
index a8387fcd95..b1d62aad4b 100644
--- a/deps/v8/src/cancelable-task.h
+++ b/deps/v8/src/cancelable-task.h
@@ -5,11 +5,12 @@
#ifndef V8_CANCELABLE_TASK_H_
#define V8_CANCELABLE_TASK_H_
+#include <map>
+
#include "include/v8-platform.h"
-#include "src/atomic-utils.h"
+#include "src/base/atomic-utils.h"
#include "src/base/macros.h"
#include "src/base/platform/condition-variable.h"
-#include "src/hashmap.h"
namespace v8 {
namespace internal {
@@ -51,7 +52,7 @@ class CancelableTaskManager {
uint32_t task_id_counter_;
// A set of cancelable tasks that are currently registered.
- HashMap cancelable_tasks_;
+ std::map<uint32_t, Cancelable*> cancelable_tasks_;
// Mutex and condition variable enabling concurrent register and removing, as
// well as waiting for background tasks on {CancelAndWait}.
@@ -104,13 +105,13 @@ class Cancelable {
}
CancelableTaskManager* parent_;
- AtomicValue<Status> status_;
+ base::AtomicValue<Status> status_;
uint32_t id_;
// The counter is incremented for failing tries to cancel a task. This can be
// used by the task itself as an indication how often external entities tried
// to abort it.
- AtomicNumber<intptr_t> cancel_counter_;
+ base::AtomicNumber<intptr_t> cancel_counter_;
friend class CancelableTaskManager;
diff --git a/deps/v8/src/code-events.h b/deps/v8/src/code-events.h
new file mode 100644
index 0000000000..9ae1caeb37
--- /dev/null
+++ b/deps/v8/src/code-events.h
@@ -0,0 +1,183 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODE_EVENTS_H_
+#define V8_CODE_EVENTS_H_
+
+#include <unordered_set>
+
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class AbstractCode;
+class Name;
+class SharedFunctionInfo;
+class String;
+
+#define LOG_EVENTS_AND_TAGS_LIST(V) \
+ V(CODE_CREATION_EVENT, "code-creation") \
+ V(CODE_DISABLE_OPT_EVENT, "code-disable-optimization") \
+ V(CODE_MOVE_EVENT, "code-move") \
+ V(CODE_DELETE_EVENT, "code-delete") \
+ V(CODE_MOVING_GC, "code-moving-gc") \
+ V(SHARED_FUNC_MOVE_EVENT, "sfi-move") \
+ V(SNAPSHOT_CODE_NAME_EVENT, "snapshot-code-name") \
+ V(TICK_EVENT, "tick") \
+ V(REPEAT_META_EVENT, "repeat") \
+ V(BUILTIN_TAG, "Builtin") \
+ V(CALL_DEBUG_BREAK_TAG, "CallDebugBreak") \
+ V(CALL_DEBUG_PREPARE_STEP_IN_TAG, "CallDebugPrepareStepIn") \
+ V(CALL_INITIALIZE_TAG, "CallInitialize") \
+ V(CALL_MEGAMORPHIC_TAG, "CallMegamorphic") \
+ V(CALL_MISS_TAG, "CallMiss") \
+ V(CALL_NORMAL_TAG, "CallNormal") \
+ V(LOAD_INITIALIZE_TAG, "LoadInitialize") \
+ V(LOAD_MEGAMORPHIC_TAG, "LoadMegamorphic") \
+ V(STORE_INITIALIZE_TAG, "StoreInitialize") \
+ V(STORE_GENERIC_TAG, "StoreGeneric") \
+ V(STORE_MEGAMORPHIC_TAG, "StoreMegamorphic") \
+ V(KEYED_CALL_DEBUG_BREAK_TAG, "KeyedCallDebugBreak") \
+ V(KEYED_CALL_DEBUG_PREPARE_STEP_IN_TAG, "KeyedCallDebugPrepareStepIn") \
+ V(KEYED_CALL_INITIALIZE_TAG, "KeyedCallInitialize") \
+ V(KEYED_CALL_MEGAMORPHIC_TAG, "KeyedCallMegamorphic") \
+ V(KEYED_CALL_MISS_TAG, "KeyedCallMiss") \
+ V(KEYED_CALL_NORMAL_TAG, "KeyedCallNormal") \
+ V(CALLBACK_TAG, "Callback") \
+ V(EVAL_TAG, "Eval") \
+ V(FUNCTION_TAG, "Function") \
+ V(HANDLER_TAG, "Handler") \
+ V(BYTECODE_HANDLER_TAG, "BytecodeHandler") \
+ V(KEYED_LOAD_IC_TAG, "KeyedLoadIC") \
+ V(KEYED_LOAD_POLYMORPHIC_IC_TAG, "KeyedLoadPolymorphicIC") \
+ V(KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG, "KeyedExternalArrayLoadIC") \
+ V(KEYED_STORE_IC_TAG, "KeyedStoreIC") \
+ V(KEYED_STORE_POLYMORPHIC_IC_TAG, "KeyedStorePolymorphicIC") \
+ V(KEYED_EXTERNAL_ARRAY_STORE_IC_TAG, "KeyedExternalArrayStoreIC") \
+ V(LAZY_COMPILE_TAG, "LazyCompile") \
+ V(CALL_IC_TAG, "CallIC") \
+ V(LOAD_IC_TAG, "LoadIC") \
+ V(LOAD_GLOBAL_IC_TAG, "LoadGlobalIC") \
+ V(LOAD_POLYMORPHIC_IC_TAG, "LoadPolymorphicIC") \
+ V(REG_EXP_TAG, "RegExp") \
+ V(SCRIPT_TAG, "Script") \
+ V(STORE_IC_TAG, "StoreIC") \
+ V(STORE_POLYMORPHIC_IC_TAG, "StorePolymorphicIC") \
+ V(STUB_TAG, "Stub") \
+ V(NATIVE_FUNCTION_TAG, "Function") \
+ V(NATIVE_LAZY_COMPILE_TAG, "LazyCompile") \
+ V(NATIVE_SCRIPT_TAG, "Script")
+// Note that 'NATIVE_' cases for functions and scripts are mapped onto
+// original tags when writing to the log.
+
+#define PROFILE(the_isolate, Call) (the_isolate)->code_event_dispatcher()->Call;
+
+class CodeEventListener {
+ public:
+#define DECLARE_ENUM(enum_item, _) enum_item,
+ enum LogEventsAndTags {
+ LOG_EVENTS_AND_TAGS_LIST(DECLARE_ENUM) NUMBER_OF_LOG_EVENTS
+ };
+#undef DECLARE_ENUM
+
+ virtual ~CodeEventListener() {}
+
+ virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+ const char* comment) = 0;
+ virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+ Name* name) = 0;
+ virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+ SharedFunctionInfo* shared, Name* name) = 0;
+ virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+ SharedFunctionInfo* shared, Name* source,
+ int line, int column) = 0;
+ virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+ int args_count) = 0;
+ virtual void CallbackEvent(Name* name, Address entry_point) = 0;
+ virtual void GetterCallbackEvent(Name* name, Address entry_point) = 0;
+ virtual void SetterCallbackEvent(Name* name, Address entry_point) = 0;
+ virtual void RegExpCodeCreateEvent(AbstractCode* code, String* source) = 0;
+ virtual void CodeMoveEvent(AbstractCode* from, Address to) = 0;
+ virtual void SharedFunctionInfoMoveEvent(Address from, Address to) = 0;
+ virtual void CodeMovingGCEvent() = 0;
+ virtual void CodeDisableOptEvent(AbstractCode* code,
+ SharedFunctionInfo* shared) = 0;
+ virtual void CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta) = 0;
+};
+
+class CodeEventDispatcher {
+ public:
+ using LogEventsAndTags = CodeEventListener::LogEventsAndTags;
+
+ CodeEventDispatcher() {}
+
+ bool AddListener(CodeEventListener* listener) {
+ return listeners_.insert(listener).second;
+ }
+ void RemoveListener(CodeEventListener* listener) {
+ listeners_.erase(listener);
+ }
+
+#define CODE_EVENT_DISPATCH(code) \
+ for (auto it = listeners_.begin(); it != listeners_.end(); ++it) (*it)->code
+
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+ const char* comment) {
+ CODE_EVENT_DISPATCH(CodeCreateEvent(tag, code, comment));
+ }
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code, Name* name) {
+ CODE_EVENT_DISPATCH(CodeCreateEvent(tag, code, name));
+ }
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+ SharedFunctionInfo* shared, Name* name) {
+ CODE_EVENT_DISPATCH(CodeCreateEvent(tag, code, shared, name));
+ }
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+ SharedFunctionInfo* shared, Name* source, int line,
+ int column) {
+ CODE_EVENT_DISPATCH(
+ CodeCreateEvent(tag, code, shared, source, line, column));
+ }
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+ int args_count) {
+ CODE_EVENT_DISPATCH(CodeCreateEvent(tag, code, args_count));
+ }
+ void CallbackEvent(Name* name, Address entry_point) {
+ CODE_EVENT_DISPATCH(CallbackEvent(name, entry_point));
+ }
+ void GetterCallbackEvent(Name* name, Address entry_point) {
+ CODE_EVENT_DISPATCH(GetterCallbackEvent(name, entry_point));
+ }
+ void SetterCallbackEvent(Name* name, Address entry_point) {
+ CODE_EVENT_DISPATCH(SetterCallbackEvent(name, entry_point));
+ }
+ void RegExpCodeCreateEvent(AbstractCode* code, String* source) {
+ CODE_EVENT_DISPATCH(RegExpCodeCreateEvent(code, source));
+ }
+ void CodeMoveEvent(AbstractCode* from, Address to) {
+ CODE_EVENT_DISPATCH(CodeMoveEvent(from, to));
+ }
+ void SharedFunctionInfoMoveEvent(Address from, Address to) {
+ CODE_EVENT_DISPATCH(SharedFunctionInfoMoveEvent(from, to));
+ }
+ void CodeMovingGCEvent() { CODE_EVENT_DISPATCH(CodeMovingGCEvent()); }
+ void CodeDisableOptEvent(AbstractCode* code, SharedFunctionInfo* shared) {
+ CODE_EVENT_DISPATCH(CodeDisableOptEvent(code, shared));
+ }
+ void CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta) {
+ CODE_EVENT_DISPATCH(CodeDeoptEvent(code, pc, fp_to_sp_delta));
+ }
+#undef CODE_EVENT_DISPATCH
+
+ private:
+ std::unordered_set<CodeEventListener*> listeners_;
+
+ DISALLOW_COPY_AND_ASSIGN(CodeEventDispatcher);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODE_EVENTS_H_
diff --git a/deps/v8/src/code-factory.cc b/deps/v8/src/code-factory.cc
index fbfdd5f644..018f21d447 100644
--- a/deps/v8/src/code-factory.cc
+++ b/deps/v8/src/code-factory.cc
@@ -10,202 +10,229 @@
namespace v8 {
namespace internal {
+namespace {
+
+// TODO(ishell): make it (const Stub& stub) once CodeStub::GetCode() is const.
+template <typename Stub>
+Callable make_callable(Stub& stub) {
+ typedef typename Stub::Descriptor Descriptor;
+ return Callable(stub.GetCode(), Descriptor(stub.isolate()));
+}
+
+} // namespace
// static
-Callable CodeFactory::LoadIC(Isolate* isolate, TypeofMode typeof_mode) {
- return Callable(LoadIC::initialize_stub(
- isolate, LoadICState(typeof_mode).GetExtraICState()),
- LoadDescriptor(isolate));
+Callable CodeFactory::LoadIC(Isolate* isolate) {
+ if (FLAG_tf_load_ic_stub) {
+ LoadICTrampolineTFStub stub(isolate);
+ return make_callable(stub);
+ }
+ LoadICTrampolineStub stub(isolate);
+ return make_callable(stub);
}
+// static
+Callable CodeFactory::ApiGetter(Isolate* isolate) {
+ CallApiGetterStub stub(isolate);
+ return make_callable(stub);
+}
+
+// static
+Callable CodeFactory::LoadICInOptimizedCode(Isolate* isolate) {
+ if (FLAG_tf_load_ic_stub) {
+ LoadICTFStub stub(isolate);
+ return make_callable(stub);
+ }
+ LoadICStub stub(isolate);
+ return make_callable(stub);
+}
// static
-Callable CodeFactory::LoadICInOptimizedCode(
- Isolate* isolate, TypeofMode typeof_mode,
- InlineCacheState initialization_state) {
- auto code = LoadIC::initialize_stub_in_optimized_code(
- isolate, LoadICState(typeof_mode).GetExtraICState(),
- initialization_state);
- return Callable(code, LoadWithVectorDescriptor(isolate));
+Callable CodeFactory::LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode) {
+ LoadGlobalICTrampolineStub stub(isolate, LoadGlobalICState(typeof_mode));
+ return make_callable(stub);
}
+// static
+Callable CodeFactory::LoadGlobalICInOptimizedCode(Isolate* isolate,
+ TypeofMode typeof_mode) {
+ LoadGlobalICStub stub(isolate, LoadGlobalICState(typeof_mode));
+ return make_callable(stub);
+}
// static
Callable CodeFactory::KeyedLoadIC(Isolate* isolate) {
- return Callable(KeyedLoadIC::initialize_stub(isolate, kNoExtraICState),
- LoadDescriptor(isolate));
+ if (FLAG_tf_load_ic_stub) {
+ KeyedLoadICTrampolineTFStub stub(isolate);
+ return make_callable(stub);
+ }
+ KeyedLoadICTrampolineStub stub(isolate);
+ return make_callable(stub);
}
-
// static
-Callable CodeFactory::KeyedLoadICInOptimizedCode(
- Isolate* isolate, InlineCacheState initialization_state) {
- auto code = KeyedLoadIC::initialize_stub_in_optimized_code(
- isolate, initialization_state, kNoExtraICState);
- if (initialization_state != MEGAMORPHIC) {
- return Callable(code, LoadWithVectorDescriptor(isolate));
+Callable CodeFactory::KeyedLoadICInOptimizedCode(Isolate* isolate) {
+ if (FLAG_tf_load_ic_stub) {
+ KeyedLoadICTFStub stub(isolate);
+ return make_callable(stub);
}
- return Callable(code, LoadDescriptor(isolate));
+ KeyedLoadICStub stub(isolate);
+ return make_callable(stub);
}
+// static
+Callable CodeFactory::KeyedLoadIC_Megamorphic(Isolate* isolate) {
+ return Callable(isolate->builtins()->KeyedLoadIC_Megamorphic(),
+ LoadWithVectorDescriptor(isolate));
+}
// static
Callable CodeFactory::CallIC(Isolate* isolate, int argc,
ConvertReceiverMode mode,
TailCallMode tail_call_mode) {
- return Callable(CallIC::initialize_stub(isolate, argc, mode, tail_call_mode),
- CallFunctionWithFeedbackDescriptor(isolate));
+ CallICTrampolineStub stub(isolate, CallICState(argc, mode, tail_call_mode));
+ return make_callable(stub);
}
-
// static
Callable CodeFactory::CallICInOptimizedCode(Isolate* isolate, int argc,
ConvertReceiverMode mode,
TailCallMode tail_call_mode) {
- return Callable(CallIC::initialize_stub_in_optimized_code(isolate, argc, mode,
- tail_call_mode),
- CallFunctionWithFeedbackAndVectorDescriptor(isolate));
+ CallICStub stub(isolate, CallICState(argc, mode, tail_call_mode));
+ return make_callable(stub);
}
-
// static
Callable CodeFactory::StoreIC(Isolate* isolate, LanguageMode language_mode) {
- return Callable(
- StoreIC::initialize_stub(isolate, language_mode, UNINITIALIZED),
- VectorStoreICTrampolineDescriptor(isolate));
+ StoreICTrampolineStub stub(isolate, StoreICState(language_mode));
+ return make_callable(stub);
}
-
// static
-Callable CodeFactory::StoreICInOptimizedCode(
- Isolate* isolate, LanguageMode language_mode,
- InlineCacheState initialization_state) {
- CallInterfaceDescriptor descriptor = initialization_state != MEGAMORPHIC
- ? VectorStoreICDescriptor(isolate)
- : StoreDescriptor(isolate);
- return Callable(StoreIC::initialize_stub_in_optimized_code(
- isolate, language_mode, initialization_state),
- descriptor);
+Callable CodeFactory::StoreICInOptimizedCode(Isolate* isolate,
+ LanguageMode language_mode) {
+ StoreICStub stub(isolate, StoreICState(language_mode));
+ return make_callable(stub);
}
-
// static
Callable CodeFactory::KeyedStoreIC(Isolate* isolate,
LanguageMode language_mode) {
- return Callable(
- KeyedStoreIC::initialize_stub(isolate, language_mode, UNINITIALIZED),
- VectorStoreICTrampolineDescriptor(isolate));
+ KeyedStoreICTrampolineStub stub(isolate, StoreICState(language_mode));
+ return make_callable(stub);
}
-
// static
-Callable CodeFactory::KeyedStoreICInOptimizedCode(
- Isolate* isolate, LanguageMode language_mode,
- InlineCacheState initialization_state) {
- CallInterfaceDescriptor descriptor = initialization_state != MEGAMORPHIC
- ? VectorStoreICDescriptor(isolate)
- : StoreDescriptor(isolate);
- return Callable(KeyedStoreIC::initialize_stub_in_optimized_code(
- isolate, language_mode, initialization_state),
- descriptor);
+Callable CodeFactory::KeyedStoreICInOptimizedCode(Isolate* isolate,
+ LanguageMode language_mode) {
+ KeyedStoreICStub stub(isolate, StoreICState(language_mode));
+ return make_callable(stub);
}
-
// static
Callable CodeFactory::CompareIC(Isolate* isolate, Token::Value op) {
- Handle<Code> code = CompareIC::GetUninitialized(isolate, op);
- return Callable(code, CompareDescriptor(isolate));
+ CompareICStub stub(isolate, op);
+ return make_callable(stub);
}
-
// static
Callable CodeFactory::BinaryOpIC(Isolate* isolate, Token::Value op) {
BinaryOpICStub stub(isolate, op);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
-
// static
Callable CodeFactory::InstanceOf(Isolate* isolate) {
InstanceOfStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
+// static
+Callable CodeFactory::GetProperty(Isolate* isolate) {
+ GetPropertyStub stub(isolate);
+ return make_callable(stub);
+}
// static
Callable CodeFactory::ToBoolean(Isolate* isolate) {
- ToBooleanStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return Callable(isolate->builtins()->ToBoolean(),
+ TypeConversionDescriptor(isolate));
}
-
// static
Callable CodeFactory::ToNumber(Isolate* isolate) {
- ToNumberStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return Callable(isolate->builtins()->ToNumber(),
+ TypeConversionDescriptor(isolate));
}
-
// static
Callable CodeFactory::NonNumberToNumber(Isolate* isolate) {
- NonNumberToNumberStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return Callable(isolate->builtins()->NonNumberToNumber(),
+ TypeConversionDescriptor(isolate));
}
// static
Callable CodeFactory::StringToNumber(Isolate* isolate) {
- StringToNumberStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return Callable(isolate->builtins()->StringToNumber(),
+ TypeConversionDescriptor(isolate));
}
// static
Callable CodeFactory::ToString(Isolate* isolate) {
ToStringStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
-
// static
Callable CodeFactory::ToName(Isolate* isolate) {
ToNameStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
-
// static
Callable CodeFactory::ToInteger(Isolate* isolate) {
ToIntegerStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
// static
Callable CodeFactory::ToLength(Isolate* isolate) {
ToLengthStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
-
// static
Callable CodeFactory::ToObject(Isolate* isolate) {
ToObjectStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
+}
+
+// static
+Callable CodeFactory::NonPrimitiveToPrimitive(Isolate* isolate,
+ ToPrimitiveHint hint) {
+ return Callable(isolate->builtins()->NonPrimitiveToPrimitive(hint),
+ TypeConversionDescriptor(isolate));
}
+// static
+Callable CodeFactory::OrdinaryToPrimitive(Isolate* isolate,
+ OrdinaryToPrimitiveHint hint) {
+ return Callable(isolate->builtins()->OrdinaryToPrimitive(hint),
+ TypeConversionDescriptor(isolate));
+}
// static
Callable CodeFactory::NumberToString(Isolate* isolate) {
NumberToStringStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
-
// static
Callable CodeFactory::RegExpConstructResult(Isolate* isolate) {
RegExpConstructResultStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
-
// static
Callable CodeFactory::RegExpExec(Isolate* isolate) {
RegExpExecStub stub(isolate);
@@ -215,86 +242,134 @@ Callable CodeFactory::RegExpExec(Isolate* isolate) {
// static
Callable CodeFactory::Add(Isolate* isolate) {
AddStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
// static
Callable CodeFactory::Subtract(Isolate* isolate) {
SubtractStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
+}
+
+// static
+Callable CodeFactory::Multiply(Isolate* isolate) {
+ MultiplyStub stub(isolate);
+ return make_callable(stub);
+}
+
+// static
+Callable CodeFactory::Divide(Isolate* isolate) {
+ DivideStub stub(isolate);
+ return make_callable(stub);
+}
+
+// static
+Callable CodeFactory::Modulus(Isolate* isolate) {
+ ModulusStub stub(isolate);
+ return make_callable(stub);
+}
+
+// static
+Callable CodeFactory::ShiftRight(Isolate* isolate) {
+ ShiftRightStub stub(isolate);
+ return make_callable(stub);
+}
+
+// static
+Callable CodeFactory::ShiftRightLogical(Isolate* isolate) {
+ ShiftRightLogicalStub stub(isolate);
+ return make_callable(stub);
+}
+
+// static
+Callable CodeFactory::ShiftLeft(Isolate* isolate) {
+ ShiftLeftStub stub(isolate);
+ return make_callable(stub);
}
// static
Callable CodeFactory::BitwiseAnd(Isolate* isolate) {
BitwiseAndStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
// static
Callable CodeFactory::BitwiseOr(Isolate* isolate) {
BitwiseOrStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
// static
Callable CodeFactory::BitwiseXor(Isolate* isolate) {
BitwiseXorStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
+}
+
+// static
+Callable CodeFactory::Inc(Isolate* isolate) {
+ IncStub stub(isolate);
+ return make_callable(stub);
+}
+
+// static
+Callable CodeFactory::Dec(Isolate* isolate) {
+ DecStub stub(isolate);
+ return make_callable(stub);
}
// static
Callable CodeFactory::LessThan(Isolate* isolate) {
LessThanStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
// static
Callable CodeFactory::LessThanOrEqual(Isolate* isolate) {
LessThanOrEqualStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
// static
Callable CodeFactory::GreaterThan(Isolate* isolate) {
GreaterThanStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
// static
Callable CodeFactory::GreaterThanOrEqual(Isolate* isolate) {
GreaterThanOrEqualStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
// static
Callable CodeFactory::Equal(Isolate* isolate) {
EqualStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
// static
Callable CodeFactory::NotEqual(Isolate* isolate) {
NotEqualStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
// static
Callable CodeFactory::StrictEqual(Isolate* isolate) {
StrictEqualStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
// static
Callable CodeFactory::StrictNotEqual(Isolate* isolate) {
StrictNotEqualStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
// static
Callable CodeFactory::StringAdd(Isolate* isolate, StringAddFlags flags,
PretenureFlag pretenure_flag) {
StringAddStub stub(isolate, flags, pretenure_flag);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
// static
@@ -324,37 +399,37 @@ Callable CodeFactory::StringCompare(Isolate* isolate, Token::Value token) {
// static
Callable CodeFactory::StringEqual(Isolate* isolate) {
StringEqualStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
// static
Callable CodeFactory::StringNotEqual(Isolate* isolate) {
StringNotEqualStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
// static
Callable CodeFactory::StringLessThan(Isolate* isolate) {
StringLessThanStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
// static
Callable CodeFactory::StringLessThanOrEqual(Isolate* isolate) {
StringLessThanOrEqualStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
// static
Callable CodeFactory::StringGreaterThan(Isolate* isolate) {
StringGreaterThanStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
// static
Callable CodeFactory::StringGreaterThanOrEqual(Isolate* isolate) {
StringGreaterThanOrEqualStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
// static
@@ -363,121 +438,115 @@ Callable CodeFactory::SubString(Isolate* isolate) {
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
-
// static
-Callable CodeFactory::StoreInterceptor(Isolate* isolate) {
- StoreInterceptorStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+Callable CodeFactory::ResumeGenerator(Isolate* isolate) {
+ return Callable(isolate->builtins()->ResumeGeneratorTrampoline(),
+ ResumeGeneratorDescriptor(isolate));
}
// static
Callable CodeFactory::Typeof(Isolate* isolate) {
TypeofStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
-
// static
Callable CodeFactory::FastCloneRegExp(Isolate* isolate) {
FastCloneRegExpStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
-
// static
Callable CodeFactory::FastCloneShallowArray(Isolate* isolate) {
// TODO(mstarzinger): Thread through AllocationSiteMode at some point.
FastCloneShallowArrayStub stub(isolate, DONT_TRACK_ALLOCATION_SITE);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
-
// static
Callable CodeFactory::FastCloneShallowObject(Isolate* isolate, int length) {
FastCloneShallowObjectStub stub(isolate, length);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
// static
-Callable CodeFactory::FastNewContext(Isolate* isolate, int slot_count) {
- FastNewContextStub stub(isolate, slot_count);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+Callable CodeFactory::FastNewFunctionContext(Isolate* isolate) {
+ FastNewFunctionContextStub stub(isolate);
+ return make_callable(stub);
}
-
// static
-Callable CodeFactory::FastNewClosure(Isolate* isolate,
- LanguageMode language_mode,
- FunctionKind kind) {
- FastNewClosureStub stub(isolate, language_mode, kind);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+Callable CodeFactory::FastNewClosure(Isolate* isolate) {
+ FastNewClosureStub stub(isolate);
+ return make_callable(stub);
}
-
// static
Callable CodeFactory::FastNewObject(Isolate* isolate) {
FastNewObjectStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+ return make_callable(stub);
}
-
// static
-Callable CodeFactory::FastNewRestParameter(Isolate* isolate) {
- FastNewRestParameterStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+Callable CodeFactory::FastNewRestParameter(Isolate* isolate,
+ bool skip_stub_frame) {
+ FastNewRestParameterStub stub(isolate, skip_stub_frame);
+ return make_callable(stub);
}
-
// static
-Callable CodeFactory::FastNewSloppyArguments(Isolate* isolate) {
- FastNewSloppyArgumentsStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+Callable CodeFactory::FastNewSloppyArguments(Isolate* isolate,
+ bool skip_stub_frame) {
+ FastNewSloppyArgumentsStub stub(isolate, skip_stub_frame);
+ return make_callable(stub);
}
-
// static
-Callable CodeFactory::FastNewStrictArguments(Isolate* isolate) {
- FastNewStrictArgumentsStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+Callable CodeFactory::FastNewStrictArguments(Isolate* isolate,
+ bool skip_stub_frame) {
+ FastNewStrictArgumentsStub stub(isolate, skip_stub_frame);
+ return make_callable(stub);
}
+// static
+Callable CodeFactory::CopyFastSmiOrObjectElements(Isolate* isolate) {
+ return Callable(isolate->builtins()->CopyFastSmiOrObjectElements(),
+ CopyFastSmiOrObjectElementsDescriptor(isolate));
+}
// static
-Callable CodeFactory::AllocateHeapNumber(Isolate* isolate) {
- AllocateHeapNumberStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+Callable CodeFactory::GrowFastDoubleElements(Isolate* isolate) {
+ return Callable(isolate->builtins()->GrowFastDoubleElements(),
+ GrowArrayElementsDescriptor(isolate));
}
+// static
+Callable CodeFactory::GrowFastSmiOrObjectElements(Isolate* isolate) {
+ return Callable(isolate->builtins()->GrowFastSmiOrObjectElements(),
+ GrowArrayElementsDescriptor(isolate));
+}
// static
-Callable CodeFactory::AllocateMutableHeapNumber(Isolate* isolate) {
- AllocateMutableHeapNumberStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+Callable CodeFactory::AllocateHeapNumber(Isolate* isolate) {
+ AllocateHeapNumberStub stub(isolate);
+ return make_callable(stub);
}
-#define SIMD128_ALLOC(TYPE, Type, type, lane_count, lane_type) \
- Callable CodeFactory::Allocate##Type(Isolate* isolate) { \
- Allocate##Type##Stub stub(isolate); \
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor()); \
+#define SIMD128_ALLOC(TYPE, Type, type, lane_count, lane_type) \
+ Callable CodeFactory::Allocate##Type(Isolate* isolate) { \
+ Allocate##Type##Stub stub(isolate); \
+ return make_callable(stub); \
}
SIMD128_TYPES(SIMD128_ALLOC)
#undef SIMD128_ALLOC
// static
-Callable CodeFactory::AllocateInNewSpace(Isolate* isolate) {
- AllocateInNewSpaceStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
-}
-
-
-// static
Callable CodeFactory::ArgumentAdaptor(Isolate* isolate) {
return Callable(isolate->builtins()->ArgumentsAdaptorTrampoline(),
ArgumentAdaptorDescriptor(isolate));
}
-
// static
Callable CodeFactory::Call(Isolate* isolate, ConvertReceiverMode mode,
TailCallMode tail_call_mode) {
@@ -485,44 +554,51 @@ Callable CodeFactory::Call(Isolate* isolate, ConvertReceiverMode mode,
CallTrampolineDescriptor(isolate));
}
-
// static
Callable CodeFactory::CallFunction(Isolate* isolate, ConvertReceiverMode mode) {
return Callable(isolate->builtins()->CallFunction(mode),
CallTrampolineDescriptor(isolate));
}
-
// static
Callable CodeFactory::Construct(Isolate* isolate) {
return Callable(isolate->builtins()->Construct(),
ConstructTrampolineDescriptor(isolate));
}
-
// static
Callable CodeFactory::ConstructFunction(Isolate* isolate) {
return Callable(isolate->builtins()->ConstructFunction(),
ConstructTrampolineDescriptor(isolate));
}
+// static
+Callable CodeFactory::HasProperty(Isolate* isolate) {
+ HasPropertyStub stub(isolate);
+ return make_callable(stub);
+}
+
+// static
+Callable CodeFactory::ForInFilter(Isolate* isolate) {
+ ForInFilterStub stub(isolate);
+ return make_callable(stub);
+}
// static
Callable CodeFactory::InterpreterPushArgsAndCall(Isolate* isolate,
- TailCallMode tail_call_mode) {
- return Callable(
- isolate->builtins()->InterpreterPushArgsAndCall(tail_call_mode),
- InterpreterPushArgsAndCallDescriptor(isolate));
+ TailCallMode tail_call_mode,
+ CallableType function_type) {
+ return Callable(isolate->builtins()->InterpreterPushArgsAndCall(
+ tail_call_mode, function_type),
+ InterpreterPushArgsAndCallDescriptor(isolate));
}
-
// static
Callable CodeFactory::InterpreterPushArgsAndConstruct(Isolate* isolate) {
return Callable(isolate->builtins()->InterpreterPushArgsAndConstruct(),
InterpreterPushArgsAndConstructDescriptor(isolate));
}
-
// static
Callable CodeFactory::InterpreterCEntry(Isolate* isolate, int result_size) {
// Note: If we ever use fpregs in the interpreter then we will need to
@@ -531,5 +607,11 @@ Callable CodeFactory::InterpreterCEntry(Isolate* isolate, int result_size) {
return Callable(stub.GetCode(), InterpreterCEntryDescriptor(isolate));
}
+// static
+Callable CodeFactory::InterpreterOnStackReplacement(Isolate* isolate) {
+ return Callable(isolate->builtins()->InterpreterOnStackReplacement(),
+ ContextOnlyDescriptor(isolate));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/code-factory.h b/deps/v8/src/code-factory.h
index deb125f224..40b1ea447e 100644
--- a/deps/v8/src/code-factory.h
+++ b/deps/v8/src/code-factory.h
@@ -32,13 +32,14 @@ class Callable final BASE_EMBEDDED {
class CodeFactory final {
public:
// Initial states for ICs.
- static Callable LoadIC(Isolate* isolate, TypeofMode typeof_mode);
- static Callable LoadICInOptimizedCode(Isolate* isolate,
- TypeofMode typeof_mode,
- InlineCacheState initialization_state);
+ static Callable LoadIC(Isolate* isolate);
+ static Callable LoadICInOptimizedCode(Isolate* isolate);
+ static Callable LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode);
+ static Callable LoadGlobalICInOptimizedCode(Isolate* isolate,
+ TypeofMode typeof_mode);
static Callable KeyedLoadIC(Isolate* isolate);
- static Callable KeyedLoadICInOptimizedCode(
- Isolate* isolate, InlineCacheState initialization_state);
+ static Callable KeyedLoadICInOptimizedCode(Isolate* isolate);
+ static Callable KeyedLoadIC_Megamorphic(Isolate* isolate);
static Callable CallIC(Isolate* isolate, int argc,
ConvertReceiverMode mode = ConvertReceiverMode::kAny,
TailCallMode tail_call_mode = TailCallMode::kDisallow);
@@ -47,24 +48,26 @@ class CodeFactory final {
ConvertReceiverMode mode = ConvertReceiverMode::kAny,
TailCallMode tail_call_mode = TailCallMode::kDisallow);
static Callable StoreIC(Isolate* isolate, LanguageMode mode);
- static Callable StoreICInOptimizedCode(Isolate* isolate, LanguageMode mode,
- InlineCacheState initialization_state);
+ static Callable StoreICInOptimizedCode(Isolate* isolate, LanguageMode mode);
static Callable KeyedStoreIC(Isolate* isolate, LanguageMode mode);
- static Callable KeyedStoreICInOptimizedCode(
- Isolate* isolate, LanguageMode mode,
- InlineCacheState initialization_state);
+ static Callable KeyedStoreICInOptimizedCode(Isolate* isolate,
+ LanguageMode mode);
- static Callable StoreInterceptor(Isolate* isolate);
+ static Callable ResumeGenerator(Isolate* isolate);
static Callable CompareIC(Isolate* isolate, Token::Value op);
static Callable CompareNilIC(Isolate* isolate, NilValue nil_value);
static Callable BinaryOpIC(Isolate* isolate, Token::Value op);
+ static Callable ApiGetter(Isolate* isolate);
+
// Code stubs. Add methods here as needed to reduce dependency on
// code-stubs.h.
static Callable InstanceOf(Isolate* isolate);
+ static Callable GetProperty(Isolate* isolate);
+
static Callable ToBoolean(Isolate* isolate);
static Callable ToNumber(Isolate* isolate);
@@ -75,6 +78,10 @@ class CodeFactory final {
static Callable ToInteger(Isolate* isolate);
static Callable ToLength(Isolate* isolate);
static Callable ToObject(Isolate* isolate);
+ static Callable NonPrimitiveToPrimitive(
+ Isolate* isolate, ToPrimitiveHint hint = ToPrimitiveHint::kDefault);
+ static Callable OrdinaryToPrimitive(Isolate* isolate,
+ OrdinaryToPrimitiveHint hint);
static Callable NumberToString(Isolate* isolate);
static Callable RegExpConstructResult(Isolate* isolate);
@@ -82,9 +89,17 @@ class CodeFactory final {
static Callable Add(Isolate* isolate);
static Callable Subtract(Isolate* isolate);
+ static Callable Multiply(Isolate* isolate);
+ static Callable Divide(Isolate* isolate);
+ static Callable Modulus(Isolate* isolate);
+ static Callable ShiftRight(Isolate* isolate);
+ static Callable ShiftRightLogical(Isolate* isolate);
+ static Callable ShiftLeft(Isolate* isolate);
static Callable BitwiseAnd(Isolate* isolate);
static Callable BitwiseOr(Isolate* isolate);
static Callable BitwiseXor(Isolate* isolate);
+ static Callable Inc(Isolate* isolate);
+ static Callable Dec(Isolate* isolate);
static Callable LessThan(Isolate* isolate);
static Callable LessThanOrEqual(Isolate* isolate);
static Callable GreaterThan(Isolate* isolate);
@@ -111,21 +126,25 @@ class CodeFactory final {
static Callable FastCloneShallowArray(Isolate* isolate);
static Callable FastCloneShallowObject(Isolate* isolate, int length);
- static Callable FastNewContext(Isolate* isolate, int slot_count);
- static Callable FastNewClosure(Isolate* isolate, LanguageMode language_mode,
- FunctionKind kind);
+ static Callable FastNewFunctionContext(Isolate* isolate);
+ static Callable FastNewClosure(Isolate* isolate);
static Callable FastNewObject(Isolate* isolate);
- static Callable FastNewRestParameter(Isolate* isolate);
- static Callable FastNewSloppyArguments(Isolate* isolate);
- static Callable FastNewStrictArguments(Isolate* isolate);
+ static Callable FastNewRestParameter(Isolate* isolate,
+ bool skip_stub_frame = false);
+ static Callable FastNewSloppyArguments(Isolate* isolate,
+ bool skip_stub_frame = false);
+ static Callable FastNewStrictArguments(Isolate* isolate,
+ bool skip_stub_frame = false);
+
+ static Callable CopyFastSmiOrObjectElements(Isolate* isolate);
+ static Callable GrowFastDoubleElements(Isolate* isolate);
+ static Callable GrowFastSmiOrObjectElements(Isolate* isolate);
static Callable AllocateHeapNumber(Isolate* isolate);
- static Callable AllocateMutableHeapNumber(Isolate* isolate);
#define SIMD128_ALLOC(TYPE, Type, type, lane_count, lane_type) \
static Callable Allocate##Type(Isolate* isolate);
SIMD128_TYPES(SIMD128_ALLOC)
#undef SIMD128_ALLOC
- static Callable AllocateInNewSpace(Isolate* isolate);
static Callable ArgumentAdaptor(Isolate* isolate);
static Callable Call(Isolate* isolate,
@@ -135,11 +154,15 @@ class CodeFactory final {
Isolate* isolate, ConvertReceiverMode mode = ConvertReceiverMode::kAny);
static Callable Construct(Isolate* isolate);
static Callable ConstructFunction(Isolate* isolate);
+ static Callable HasProperty(Isolate* isolate);
+ static Callable ForInFilter(Isolate* isolate);
- static Callable InterpreterPushArgsAndCall(Isolate* isolate,
- TailCallMode tail_call_mode);
+ static Callable InterpreterPushArgsAndCall(
+ Isolate* isolate, TailCallMode tail_call_mode,
+ CallableType function_type = CallableType::kAny);
static Callable InterpreterPushArgsAndConstruct(Isolate* isolate);
static Callable InterpreterCEntry(Isolate* isolate, int result_size = 1);
+ static Callable InterpreterOnStackReplacement(Isolate* isolate);
};
} // namespace internal
diff --git a/deps/v8/src/code-stub-assembler.cc b/deps/v8/src/code-stub-assembler.cc
new file mode 100644
index 0000000000..06552bad26
--- /dev/null
+++ b/deps/v8/src/code-stub-assembler.cc
@@ -0,0 +1,4023 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/code-stub-assembler.h"
+#include "src/code-factory.h"
+#include "src/frames-inl.h"
+#include "src/frames.h"
+#include "src/ic/handler-configuration.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+using compiler::Node;
+
+CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
+ const CallInterfaceDescriptor& descriptor,
+ Code::Flags flags, const char* name,
+ size_t result_size)
+ : compiler::CodeAssembler(isolate, zone, descriptor, flags, name,
+ result_size) {}
+
+CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
+ int parameter_count, Code::Flags flags,
+ const char* name)
+ : compiler::CodeAssembler(isolate, zone, parameter_count, flags, name) {}
+
+void CodeStubAssembler::Assert(Node* condition) {
+#if defined(DEBUG)
+ Label ok(this);
+ Comment("[ Assert");
+ GotoIf(condition, &ok);
+ DebugBreak();
+ Goto(&ok);
+ Bind(&ok);
+ Comment("] Assert");
+#endif
+}
+
+Node* CodeStubAssembler::BooleanMapConstant() {
+ return HeapConstant(isolate()->factory()->boolean_map());
+}
+
+Node* CodeStubAssembler::EmptyStringConstant() {
+ return LoadRoot(Heap::kempty_stringRootIndex);
+}
+
+Node* CodeStubAssembler::HeapNumberMapConstant() {
+ return HeapConstant(isolate()->factory()->heap_number_map());
+}
+
+Node* CodeStubAssembler::NoContextConstant() {
+ return SmiConstant(Smi::FromInt(0));
+}
+
+Node* CodeStubAssembler::MinusZeroConstant() {
+ return LoadRoot(Heap::kMinusZeroValueRootIndex);
+}
+
+Node* CodeStubAssembler::NanConstant() {
+ return LoadRoot(Heap::kNanValueRootIndex);
+}
+
+Node* CodeStubAssembler::NullConstant() {
+ return LoadRoot(Heap::kNullValueRootIndex);
+}
+
+Node* CodeStubAssembler::UndefinedConstant() {
+ return LoadRoot(Heap::kUndefinedValueRootIndex);
+}
+
+Node* CodeStubAssembler::TheHoleConstant() {
+ return LoadRoot(Heap::kTheHoleValueRootIndex);
+}
+
+Node* CodeStubAssembler::HashSeed() {
+ return LoadAndUntagToWord32Root(Heap::kHashSeedRootIndex);
+}
+
+Node* CodeStubAssembler::StaleRegisterConstant() {
+ return LoadRoot(Heap::kStaleRegisterRootIndex);
+}
+
+Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) {
+ if (mode == SMI_PARAMETERS) {
+ return SmiConstant(Smi::FromInt(value));
+ } else {
+ DCHECK_EQ(INTEGER_PARAMETERS, mode);
+ return IntPtrConstant(value);
+ }
+}
+
+Node* CodeStubAssembler::Float64Round(Node* x) {
+ Node* one = Float64Constant(1.0);
+ Node* one_half = Float64Constant(0.5);
+
+ Variable var_x(this, MachineRepresentation::kFloat64);
+ Label return_x(this);
+
+ // Round up {x} towards Infinity.
+ var_x.Bind(Float64Ceil(x));
+
+ GotoIf(Float64LessThanOrEqual(Float64Sub(var_x.value(), one_half), x),
+ &return_x);
+ var_x.Bind(Float64Sub(var_x.value(), one));
+ Goto(&return_x);
+
+ Bind(&return_x);
+ return var_x.value();
+}
+
+Node* CodeStubAssembler::Float64Ceil(Node* x) {
+ if (IsFloat64RoundUpSupported()) {
+ return Float64RoundUp(x);
+ }
+
+ Node* one = Float64Constant(1.0);
+ Node* zero = Float64Constant(0.0);
+ Node* two_52 = Float64Constant(4503599627370496.0E0);
+ Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
+
+ Variable var_x(this, MachineRepresentation::kFloat64);
+ Label return_x(this), return_minus_x(this);
+ var_x.Bind(x);
+
+ // Check if {x} is greater than zero.
+ Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
+ Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
+ &if_xnotgreaterthanzero);
+
+ Bind(&if_xgreaterthanzero);
+ {
+ // Just return {x} unless it's in the range ]0,2^52[.
+ GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
+
+ // Round positive {x} towards Infinity.
+ var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
+ GotoUnless(Float64LessThan(var_x.value(), x), &return_x);
+ var_x.Bind(Float64Add(var_x.value(), one));
+ Goto(&return_x);
+ }
+
+ Bind(&if_xnotgreaterthanzero);
+ {
+ // Just return {x} unless it's in the range ]-2^52,0[
+ GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
+ GotoUnless(Float64LessThan(x, zero), &return_x);
+
+ // Round negated {x} towards Infinity and return the result negated.
+ Node* minus_x = Float64Neg(x);
+ var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
+ GotoUnless(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
+ var_x.Bind(Float64Sub(var_x.value(), one));
+ Goto(&return_minus_x);
+ }
+
+ Bind(&return_minus_x);
+ var_x.Bind(Float64Neg(var_x.value()));
+ Goto(&return_x);
+
+ Bind(&return_x);
+ return var_x.value();
+}
+
+Node* CodeStubAssembler::Float64Floor(Node* x) {
+ if (IsFloat64RoundDownSupported()) {
+ return Float64RoundDown(x);
+ }
+
+ Node* one = Float64Constant(1.0);
+ Node* zero = Float64Constant(0.0);
+ Node* two_52 = Float64Constant(4503599627370496.0E0);
+ Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
+
+ Variable var_x(this, MachineRepresentation::kFloat64);
+ Label return_x(this), return_minus_x(this);
+ var_x.Bind(x);
+
+ // Check if {x} is greater than zero.
+ Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
+ Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
+ &if_xnotgreaterthanzero);
+
+ Bind(&if_xgreaterthanzero);
+ {
+ // Just return {x} unless it's in the range ]0,2^52[.
+ GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
+
+ // Round positive {x} towards -Infinity.
+ var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
+ GotoUnless(Float64GreaterThan(var_x.value(), x), &return_x);
+ var_x.Bind(Float64Sub(var_x.value(), one));
+ Goto(&return_x);
+ }
+
+ Bind(&if_xnotgreaterthanzero);
+ {
+ // Just return {x} unless it's in the range ]-2^52,0[
+ GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
+ GotoUnless(Float64LessThan(x, zero), &return_x);
+
+ // Round negated {x} towards -Infinity and return the result negated.
+ Node* minus_x = Float64Neg(x);
+ var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
+ GotoUnless(Float64LessThan(var_x.value(), minus_x), &return_minus_x);
+ var_x.Bind(Float64Add(var_x.value(), one));
+ Goto(&return_minus_x);
+ }
+
+ Bind(&return_minus_x);
+ var_x.Bind(Float64Neg(var_x.value()));
+ Goto(&return_x);
+
+ Bind(&return_x);
+ return var_x.value();
+}
+
+Node* CodeStubAssembler::Float64Trunc(Node* x) {
+ if (IsFloat64RoundTruncateSupported()) {
+ return Float64RoundTruncate(x);
+ }
+
+ Node* one = Float64Constant(1.0);
+ Node* zero = Float64Constant(0.0);
+ Node* two_52 = Float64Constant(4503599627370496.0E0);
+ Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
+
+ Variable var_x(this, MachineRepresentation::kFloat64);
+ Label return_x(this), return_minus_x(this);
+ var_x.Bind(x);
+
+ // Check if {x} is greater than 0.
+ Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
+ Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
+ &if_xnotgreaterthanzero);
+
+ Bind(&if_xgreaterthanzero);
+ {
+ if (IsFloat64RoundDownSupported()) {
+ var_x.Bind(Float64RoundDown(x));
+ } else {
+ // Just return {x} unless it's in the range ]0,2^52[.
+ GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
+
+ // Round positive {x} towards -Infinity.
+ var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
+ GotoUnless(Float64GreaterThan(var_x.value(), x), &return_x);
+ var_x.Bind(Float64Sub(var_x.value(), one));
+ }
+ Goto(&return_x);
+ }
+
+ Bind(&if_xnotgreaterthanzero);
+ {
+ if (IsFloat64RoundUpSupported()) {
+ var_x.Bind(Float64RoundUp(x));
+ Goto(&return_x);
+ } else {
+ // Just return {x} unless its in the range ]-2^52,0[.
+ GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
+ GotoUnless(Float64LessThan(x, zero), &return_x);
+
+ // Round negated {x} towards -Infinity and return result negated.
+ Node* minus_x = Float64Neg(x);
+ var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
+ GotoUnless(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
+ var_x.Bind(Float64Sub(var_x.value(), one));
+ Goto(&return_minus_x);
+ }
+ }
+
+ Bind(&return_minus_x);
+ var_x.Bind(Float64Neg(var_x.value()));
+ Goto(&return_x);
+
+ Bind(&return_x);
+ return var_x.value();
+}
+
+Node* CodeStubAssembler::SmiShiftBitsConstant() {
+ return IntPtrConstant(kSmiShiftSize + kSmiTagSize);
+}
+
+Node* CodeStubAssembler::SmiFromWord32(Node* value) {
+ value = ChangeInt32ToIntPtr(value);
+ return WordShl(value, SmiShiftBitsConstant());
+}
+
+Node* CodeStubAssembler::SmiTag(Node* value) {
+ int32_t constant_value;
+ if (ToInt32Constant(value, constant_value) && Smi::IsValid(constant_value)) {
+ return SmiConstant(Smi::FromInt(constant_value));
+ }
+ return WordShl(value, SmiShiftBitsConstant());
+}
+
+Node* CodeStubAssembler::SmiUntag(Node* value) {
+ return WordSar(value, SmiShiftBitsConstant());
+}
+
+Node* CodeStubAssembler::SmiToWord32(Node* value) {
+ Node* result = WordSar(value, SmiShiftBitsConstant());
+ if (Is64()) {
+ result = TruncateInt64ToInt32(result);
+ }
+ return result;
+}
+
+Node* CodeStubAssembler::SmiToFloat64(Node* value) {
+ return ChangeInt32ToFloat64(SmiToWord32(value));
+}
+
+Node* CodeStubAssembler::SmiAdd(Node* a, Node* b) { return IntPtrAdd(a, b); }
+
+Node* CodeStubAssembler::SmiAddWithOverflow(Node* a, Node* b) {
+ return IntPtrAddWithOverflow(a, b);
+}
+
+Node* CodeStubAssembler::SmiSub(Node* a, Node* b) { return IntPtrSub(a, b); }
+
+Node* CodeStubAssembler::SmiSubWithOverflow(Node* a, Node* b) {
+ return IntPtrSubWithOverflow(a, b);
+}
+
+Node* CodeStubAssembler::SmiEqual(Node* a, Node* b) { return WordEqual(a, b); }
+
+Node* CodeStubAssembler::SmiAboveOrEqual(Node* a, Node* b) {
+ return UintPtrGreaterThanOrEqual(a, b);
+}
+
+Node* CodeStubAssembler::SmiLessThan(Node* a, Node* b) {
+ return IntPtrLessThan(a, b);
+}
+
+Node* CodeStubAssembler::SmiLessThanOrEqual(Node* a, Node* b) {
+ return IntPtrLessThanOrEqual(a, b);
+}
+
+Node* CodeStubAssembler::SmiMin(Node* a, Node* b) {
+ // TODO(bmeurer): Consider using Select once available.
+ Variable min(this, MachineRepresentation::kTagged);
+ Label if_a(this), if_b(this), join(this);
+ BranchIfSmiLessThan(a, b, &if_a, &if_b);
+ Bind(&if_a);
+ min.Bind(a);
+ Goto(&join);
+ Bind(&if_b);
+ min.Bind(b);
+ Goto(&join);
+ Bind(&join);
+ return min.value();
+}
+
+Node* CodeStubAssembler::SmiMod(Node* a, Node* b) {
+ Variable var_result(this, MachineRepresentation::kTagged);
+ Label return_result(this, &var_result),
+ return_minuszero(this, Label::kDeferred),
+ return_nan(this, Label::kDeferred);
+
+ // Untag {a} and {b}.
+ a = SmiToWord32(a);
+ b = SmiToWord32(b);
+
+ // Return NaN if {b} is zero.
+ GotoIf(Word32Equal(b, Int32Constant(0)), &return_nan);
+
+ // Check if {a} is non-negative.
+ Label if_aisnotnegative(this), if_aisnegative(this, Label::kDeferred);
+ Branch(Int32LessThanOrEqual(Int32Constant(0), a), &if_aisnotnegative,
+ &if_aisnegative);
+
+ Bind(&if_aisnotnegative);
+ {
+ // Fast case, don't need to check any other edge cases.
+ Node* r = Int32Mod(a, b);
+ var_result.Bind(SmiFromWord32(r));
+ Goto(&return_result);
+ }
+
+ Bind(&if_aisnegative);
+ {
+ if (SmiValuesAre32Bits()) {
+ // Check if {a} is kMinInt and {b} is -1 (only relevant if the
+ // kMinInt is actually representable as a Smi).
+ Label join(this);
+ GotoUnless(Word32Equal(a, Int32Constant(kMinInt)), &join);
+ GotoIf(Word32Equal(b, Int32Constant(-1)), &return_minuszero);
+ Goto(&join);
+ Bind(&join);
+ }
+
+ // Perform the integer modulus operation.
+ Node* r = Int32Mod(a, b);
+
+ // Check if {r} is zero, and if so return -0, because we have to
+ // take the sign of the left hand side {a}, which is negative.
+ GotoIf(Word32Equal(r, Int32Constant(0)), &return_minuszero);
+
+ // The remainder {r} can be outside the valid Smi range on 32bit
+ // architectures, so we cannot just say SmiFromWord32(r) here.
+ var_result.Bind(ChangeInt32ToTagged(r));
+ Goto(&return_result);
+ }
+
+ Bind(&return_minuszero);
+ var_result.Bind(MinusZeroConstant());
+ Goto(&return_result);
+
+ Bind(&return_nan);
+ var_result.Bind(NanConstant());
+ Goto(&return_result);
+
+ Bind(&return_result);
+ return var_result.value();
+}
+
+Node* CodeStubAssembler::SmiMul(Node* a, Node* b) {
+ Variable var_result(this, MachineRepresentation::kTagged);
+ Variable var_lhs_float64(this, MachineRepresentation::kFloat64),
+ var_rhs_float64(this, MachineRepresentation::kFloat64);
+ Label return_result(this, &var_result);
+
+ // Both {a} and {b} are Smis. Convert them to integers and multiply.
+ Node* lhs32 = SmiToWord32(a);
+ Node* rhs32 = SmiToWord32(b);
+ Node* pair = Int32MulWithOverflow(lhs32, rhs32);
+
+ Node* overflow = Projection(1, pair);
+
+ // Check if the multiplication overflowed.
+ Label if_overflow(this, Label::kDeferred), if_notoverflow(this);
+ Branch(overflow, &if_overflow, &if_notoverflow);
+ Bind(&if_notoverflow);
+ {
+ // If the answer is zero, we may need to return -0.0, depending on the
+ // input.
+ Label answer_zero(this), answer_not_zero(this);
+ Node* answer = Projection(0, pair);
+ Node* zero = Int32Constant(0);
+ Branch(WordEqual(answer, zero), &answer_zero, &answer_not_zero);
+ Bind(&answer_not_zero);
+ {
+ var_result.Bind(ChangeInt32ToTagged(answer));
+ Goto(&return_result);
+ }
+ Bind(&answer_zero);
+ {
+ Node* or_result = Word32Or(lhs32, rhs32);
+ Label if_should_be_negative_zero(this), if_should_be_zero(this);
+ Branch(Int32LessThan(or_result, zero), &if_should_be_negative_zero,
+ &if_should_be_zero);
+ Bind(&if_should_be_negative_zero);
+ {
+ var_result.Bind(MinusZeroConstant());
+ Goto(&return_result);
+ }
+ Bind(&if_should_be_zero);
+ {
+ var_result.Bind(zero);
+ Goto(&return_result);
+ }
+ }
+ }
+ Bind(&if_overflow);
+ {
+ var_lhs_float64.Bind(SmiToFloat64(a));
+ var_rhs_float64.Bind(SmiToFloat64(b));
+ Node* value = Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
+ Node* result = ChangeFloat64ToTagged(value);
+ var_result.Bind(result);
+ Goto(&return_result);
+ }
+
+ Bind(&return_result);
+ return var_result.value();
+}
+
+Node* CodeStubAssembler::WordIsSmi(Node* a) {
+ return WordEqual(WordAnd(a, IntPtrConstant(kSmiTagMask)), IntPtrConstant(0));
+}
+
+Node* CodeStubAssembler::WordIsPositiveSmi(Node* a) {
+ return WordEqual(WordAnd(a, IntPtrConstant(kSmiTagMask | kSmiSignMask)),
+ IntPtrConstant(0));
+}
+
+void CodeStubAssembler::BranchIfSameValueZero(Node* a, Node* b, Node* context,
+ Label* if_true, Label* if_false) {
+ Node* number_map = HeapNumberMapConstant();
+ Label a_isnumber(this), a_isnotnumber(this), b_isnumber(this), a_isnan(this),
+ float_not_equal(this);
+ // If register A and register B are identical, goto `if_true`
+ GotoIf(WordEqual(a, b), if_true);
+ // If either register A or B are Smis, goto `if_false`
+ GotoIf(Word32Or(WordIsSmi(a), WordIsSmi(b)), if_false);
+ // GotoIf(WordIsSmi(b), if_false);
+
+ Node* a_map = LoadMap(a);
+ Node* b_map = LoadMap(b);
+ Branch(WordEqual(a_map, number_map), &a_isnumber, &a_isnotnumber);
+
+ // If both register A and B are HeapNumbers, return true if they are equal,
+ // or if both are NaN
+ Bind(&a_isnumber);
+ {
+ Branch(WordEqual(b_map, number_map), &b_isnumber, if_false);
+
+ Bind(&b_isnumber);
+ Node* a_value = LoadHeapNumberValue(a);
+ Node* b_value = LoadHeapNumberValue(b);
+ BranchIfFloat64Equal(a_value, b_value, if_true, &float_not_equal);
+
+ Bind(&float_not_equal);
+ BranchIfFloat64IsNaN(a_value, &a_isnan, if_false);
+
+ Bind(&a_isnan);
+ BranchIfFloat64IsNaN(a_value, if_true, if_false);
+ }
+
+ Bind(&a_isnotnumber);
+ {
+ Label a_isstring(this), a_isnotstring(this);
+ Node* a_instance_type = LoadMapInstanceType(a_map);
+
+ Branch(Int32LessThan(a_instance_type, Int32Constant(FIRST_NONSTRING_TYPE)),
+ &a_isstring, &a_isnotstring);
+
+ Bind(&a_isstring);
+ {
+ Label b_isstring(this), b_isnotstring(this);
+ Node* b_instance_type = LoadInstanceType(b_map);
+
+ Branch(
+ Int32LessThan(b_instance_type, Int32Constant(FIRST_NONSTRING_TYPE)),
+ &b_isstring, if_false);
+
+ Bind(&b_isstring);
+ {
+ Callable callable = CodeFactory::StringEqual(isolate());
+ Node* result = CallStub(callable, context, a, b);
+ Branch(WordEqual(BooleanConstant(true), result), if_true, if_false);
+ }
+ }
+
+ Bind(&a_isnotstring);
+ {
+ // Check if {lhs} is a Simd128Value.
+ Label a_issimd128value(this);
+ Branch(Word32Equal(a_instance_type, Int32Constant(SIMD128_VALUE_TYPE)),
+ &a_issimd128value, if_false);
+
+ Bind(&a_issimd128value);
+ {
+ // Load the map of {rhs}.
+ BranchIfSimd128Equal(a, a_map, b, b_map, if_true, if_false);
+ }
+ }
+ }
+}
+
+void CodeStubAssembler::BranchIfSimd128Equal(Node* lhs, Node* lhs_map,
+ Node* rhs, Node* rhs_map,
+ Label* if_equal,
+ Label* if_notequal) {
+ Label if_mapsame(this), if_mapnotsame(this);
+ Branch(WordEqual(lhs_map, rhs_map), &if_mapsame, &if_mapnotsame);
+
+ Bind(&if_mapsame);
+ {
+ // Both {lhs} and {rhs} are Simd128Values with the same map, need special
+ // handling for Float32x4 because of NaN comparisons.
+ Label if_float32x4(this), if_notfloat32x4(this);
+ Node* float32x4_map = HeapConstant(factory()->float32x4_map());
+ Branch(WordEqual(lhs_map, float32x4_map), &if_float32x4, &if_notfloat32x4);
+
+ Bind(&if_float32x4);
+ {
+ // Both {lhs} and {rhs} are Float32x4, compare the lanes individually
+ // using a floating point comparison.
+ for (int offset = Float32x4::kValueOffset - kHeapObjectTag;
+ offset < Float32x4::kSize - kHeapObjectTag;
+ offset += sizeof(float)) {
+ // Load the floating point values for {lhs} and {rhs}.
+ Node* lhs_value =
+ Load(MachineType::Float32(), lhs, IntPtrConstant(offset));
+ Node* rhs_value =
+ Load(MachineType::Float32(), rhs, IntPtrConstant(offset));
+
+ // Perform a floating point comparison.
+ Label if_valueequal(this), if_valuenotequal(this);
+ Branch(Float32Equal(lhs_value, rhs_value), &if_valueequal,
+ &if_valuenotequal);
+ Bind(&if_valuenotequal);
+ Goto(if_notequal);
+ Bind(&if_valueequal);
+ }
+
+ // All 4 lanes match, {lhs} and {rhs} considered equal.
+ Goto(if_equal);
+ }
+
+ Bind(&if_notfloat32x4);
+ {
+ // For other Simd128Values we just perform a bitwise comparison.
+ for (int offset = Simd128Value::kValueOffset - kHeapObjectTag;
+ offset < Simd128Value::kSize - kHeapObjectTag;
+ offset += kPointerSize) {
+ // Load the word values for {lhs} and {rhs}.
+ Node* lhs_value =
+ Load(MachineType::Pointer(), lhs, IntPtrConstant(offset));
+ Node* rhs_value =
+ Load(MachineType::Pointer(), rhs, IntPtrConstant(offset));
+
+ // Perform a bitwise word-comparison.
+ Label if_valueequal(this), if_valuenotequal(this);
+ Branch(WordEqual(lhs_value, rhs_value), &if_valueequal,
+ &if_valuenotequal);
+ Bind(&if_valuenotequal);
+ Goto(if_notequal);
+ Bind(&if_valueequal);
+ }
+
+ // Bitwise comparison succeeded, {lhs} and {rhs} considered equal.
+ Goto(if_equal);
+ }
+ }
+
+ Bind(&if_mapnotsame);
+ Goto(if_notequal);
+}
+
+void CodeStubAssembler::BranchIfFastJSArray(Node* object, Node* context,
+ Label* if_true, Label* if_false) {
+ Node* int32_zero = Int32Constant(0);
+ Node* int32_one = Int32Constant(1);
+
+ Node* empty_elements = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
+
+ Variable last_map(this, MachineRepresentation::kTagged);
+ Label check_prototype(this);
+
+ // Bailout if Smi
+ GotoIf(WordIsSmi(object), if_false);
+
+ Node* map = LoadMap(object);
+ last_map.Bind(map);
+
+ // Bailout if instance type is not JS_ARRAY_TYPE
+ GotoIf(WordNotEqual(LoadMapInstanceType(map), Int32Constant(JS_ARRAY_TYPE)),
+ if_false);
+
+ Node* bit_field2 = LoadMapBitField2(map);
+ Node* elements_kind = BitFieldDecode<Map::ElementsKindBits>(bit_field2);
+
+ // Bailout if slow receiver elements
+ GotoIf(
+ Int32GreaterThan(elements_kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)),
+ if_false);
+
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == (FAST_SMI_ELEMENTS | 1));
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == (FAST_ELEMENTS | 1));
+ STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == (FAST_DOUBLE_ELEMENTS | 1));
+
+ // Check prototype chain if receiver does not have packed elements
+ Node* holey_elements = Word32And(elements_kind, int32_one);
+ Branch(Word32Equal(holey_elements, int32_zero), if_true, &check_prototype);
+
+ Bind(&check_prototype);
+ {
+ Label loop_body(this, &last_map);
+ Goto(&loop_body);
+ Bind(&loop_body);
+ Node* current_map = last_map.value();
+ Node* proto = LoadObjectField(current_map, Map::kPrototypeOffset);
+
+ // End loop
+ GotoIf(WordEqual(proto, NullConstant()), if_true);
+
+ // ASSERT: proto->IsHeapObject()
+ Node* proto_map = LoadMap(proto);
+
+ // Bailout if a Proxy, API Object, or JSValue wrapper found in prototype
+ // Because of this bailout, it's not necessary to check for interceptors or
+ // access checks on the prototype chain.
+ GotoIf(Int32LessThanOrEqual(LoadMapInstanceType(proto_map),
+ Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
+ if_false);
+
+ // Bailout if prototype contains non-empty elements
+ GotoUnless(WordEqual(LoadElements(proto), empty_elements), if_false);
+
+ last_map.Bind(proto_map);
+ Goto(&loop_body);
+ }
+}
+
+Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
+ AllocationFlags flags,
+ Node* top_address,
+ Node* limit_address) {
+ Node* top = Load(MachineType::Pointer(), top_address);
+ Node* limit = Load(MachineType::Pointer(), limit_address);
+
+ // If there's not enough space, call the runtime.
+ Variable result(this, MachineRepresentation::kTagged);
+ Label runtime_call(this, Label::kDeferred), no_runtime_call(this);
+ Label merge_runtime(this, &result);
+
+ Node* new_top = IntPtrAdd(top, size_in_bytes);
+ Branch(UintPtrGreaterThanOrEqual(new_top, limit), &runtime_call,
+ &no_runtime_call);
+
+ Bind(&runtime_call);
+ // AllocateInTargetSpace does not use the context.
+ Node* context = SmiConstant(Smi::FromInt(0));
+
+ Node* runtime_result;
+ if (flags & kPretenured) {
+ Node* runtime_flags = SmiConstant(
+ Smi::FromInt(AllocateDoubleAlignFlag::encode(false) |
+ AllocateTargetSpace::encode(AllocationSpace::OLD_SPACE)));
+ runtime_result = CallRuntime(Runtime::kAllocateInTargetSpace, context,
+ SmiTag(size_in_bytes), runtime_flags);
+ } else {
+ runtime_result = CallRuntime(Runtime::kAllocateInNewSpace, context,
+ SmiTag(size_in_bytes));
+ }
+ result.Bind(runtime_result);
+ Goto(&merge_runtime);
+
+ // When there is enough space, return `top' and bump it up.
+ Bind(&no_runtime_call);
+ Node* no_runtime_result = top;
+ StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
+ new_top);
+ no_runtime_result = BitcastWordToTagged(
+ IntPtrAdd(no_runtime_result, IntPtrConstant(kHeapObjectTag)));
+ result.Bind(no_runtime_result);
+ Goto(&merge_runtime);
+
+ Bind(&merge_runtime);
+ return result.value();
+}
+
+Node* CodeStubAssembler::AllocateRawAligned(Node* size_in_bytes,
+ AllocationFlags flags,
+ Node* top_address,
+ Node* limit_address) {
+ Node* top = Load(MachineType::Pointer(), top_address);
+ Node* limit = Load(MachineType::Pointer(), limit_address);
+ Variable adjusted_size(this, MachineType::PointerRepresentation());
+ adjusted_size.Bind(size_in_bytes);
+ if (flags & kDoubleAlignment) {
+ // TODO(epertoso): Simd128 alignment.
+ Label aligned(this), not_aligned(this), merge(this, &adjusted_size);
+ Branch(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)), &not_aligned,
+ &aligned);
+
+ Bind(&not_aligned);
+ Node* not_aligned_size =
+ IntPtrAdd(size_in_bytes, IntPtrConstant(kPointerSize));
+ adjusted_size.Bind(not_aligned_size);
+ Goto(&merge);
+
+ Bind(&aligned);
+ Goto(&merge);
+
+ Bind(&merge);
+ }
+
+ Variable address(this, MachineRepresentation::kTagged);
+ address.Bind(AllocateRawUnaligned(adjusted_size.value(), kNone, top, limit));
+
+ Label needs_filler(this), doesnt_need_filler(this),
+ merge_address(this, &address);
+ Branch(IntPtrEqual(adjusted_size.value(), size_in_bytes), &doesnt_need_filler,
+ &needs_filler);
+
+ Bind(&needs_filler);
+ // Store a filler and increase the address by kPointerSize.
+ // TODO(epertoso): this code assumes that we only align to kDoubleSize. Change
+ // it when Simd128 alignment is supported.
+ StoreNoWriteBarrier(MachineType::PointerRepresentation(), top,
+ LoadRoot(Heap::kOnePointerFillerMapRootIndex));
+ address.Bind(BitcastWordToTagged(
+ IntPtrAdd(address.value(), IntPtrConstant(kPointerSize))));
+ Goto(&merge_address);
+
+ Bind(&doesnt_need_filler);
+ Goto(&merge_address);
+
+ Bind(&merge_address);
+ // Update the top.
+ StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
+ IntPtrAdd(top, adjusted_size.value()));
+ return address.value();
+}
+
+Node* CodeStubAssembler::Allocate(Node* size_in_bytes, AllocationFlags flags) {
+ bool const new_space = !(flags & kPretenured);
+ Node* top_address = ExternalConstant(
+ new_space
+ ? ExternalReference::new_space_allocation_top_address(isolate())
+ : ExternalReference::old_space_allocation_top_address(isolate()));
+ Node* limit_address = ExternalConstant(
+ new_space
+ ? ExternalReference::new_space_allocation_limit_address(isolate())
+ : ExternalReference::old_space_allocation_limit_address(isolate()));
+
+#ifdef V8_HOST_ARCH_32_BIT
+ if (flags & kDoubleAlignment) {
+ return AllocateRawAligned(size_in_bytes, flags, top_address, limit_address);
+ }
+#endif
+
+ return AllocateRawUnaligned(size_in_bytes, flags, top_address, limit_address);
+}
+
+Node* CodeStubAssembler::Allocate(int size_in_bytes, AllocationFlags flags) {
+ return CodeStubAssembler::Allocate(IntPtrConstant(size_in_bytes), flags);
+}
+
+Node* CodeStubAssembler::InnerAllocate(Node* previous, Node* offset) {
+ return BitcastWordToTagged(IntPtrAdd(previous, offset));
+}
+
+Node* CodeStubAssembler::InnerAllocate(Node* previous, int offset) {
+ return InnerAllocate(previous, IntPtrConstant(offset));
+}
+
+void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
+ Label* if_false) {
+ Label if_valueissmi(this), if_valueisnotsmi(this), if_valueisstring(this),
+ if_valueisheapnumber(this), if_valueisother(this);
+
+ // Fast check for Boolean {value}s (common case).
+ GotoIf(WordEqual(value, BooleanConstant(true)), if_true);
+ GotoIf(WordEqual(value, BooleanConstant(false)), if_false);
+
+ // Check if {value} is a Smi or a HeapObject.
+ Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+
+ Bind(&if_valueissmi);
+ {
+ // The {value} is a Smi, only need to check against zero.
+ BranchIfSmiEqual(value, SmiConstant(0), if_false, if_true);
+ }
+
+ Bind(&if_valueisnotsmi);
+ {
+ // The {value} is a HeapObject, load its map.
+ Node* value_map = LoadMap(value);
+
+ // Load the {value}s instance type.
+ Node* value_instance_type = LoadMapInstanceType(value_map);
+
+ // Dispatch based on the instance type; we distinguish all String instance
+ // types, the HeapNumber type and everything else.
+ GotoIf(Word32Equal(value_instance_type, Int32Constant(HEAP_NUMBER_TYPE)),
+ &if_valueisheapnumber);
+ Branch(
+ Int32LessThan(value_instance_type, Int32Constant(FIRST_NONSTRING_TYPE)),
+ &if_valueisstring, &if_valueisother);
+
+ Bind(&if_valueisstring);
+ {
+ // Load the string length field of the {value}.
+ Node* value_length = LoadObjectField(value, String::kLengthOffset);
+
+ // Check if the {value} is the empty string.
+ BranchIfSmiEqual(value_length, SmiConstant(0), if_false, if_true);
+ }
+
+ Bind(&if_valueisheapnumber);
+ {
+ // Load the floating point value of {value}.
+ Node* value_value = LoadObjectField(value, HeapNumber::kValueOffset,
+ MachineType::Float64());
+
+ // Check if the floating point {value} is neither 0.0, -0.0 nor NaN.
+ Node* zero = Float64Constant(0.0);
+ GotoIf(Float64LessThan(zero, value_value), if_true);
+ BranchIfFloat64LessThan(value_value, zero, if_true, if_false);
+ }
+
+ Bind(&if_valueisother);
+ {
+ // Load the bit field from the {value}s map. The {value} is now either
+ // Null or Undefined, which have the undetectable bit set (so we always
+ // return false for those), or a Symbol or Simd128Value, whose maps never
+ // have the undetectable bit set (so we always return true for those), or
+ // a JSReceiver, which may or may not have the undetectable bit set.
+ Node* value_map_bitfield = LoadMapBitField(value_map);
+ Node* value_map_undetectable = Word32And(
+ value_map_bitfield, Int32Constant(1 << Map::kIsUndetectable));
+
+ // Check if the {value} is undetectable.
+ BranchIfWord32Equal(value_map_undetectable, Int32Constant(0), if_true,
+ if_false);
+ }
+ }
+}
+
+compiler::Node* CodeStubAssembler::LoadFromFrame(int offset, MachineType rep) {
+ Node* frame_pointer = LoadFramePointer();
+ return Load(rep, frame_pointer, IntPtrConstant(offset));
+}
+
+compiler::Node* CodeStubAssembler::LoadFromParentFrame(int offset,
+ MachineType rep) {
+ Node* frame_pointer = LoadParentFramePointer();
+ return Load(rep, frame_pointer, IntPtrConstant(offset));
+}
+
+Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset,
+ MachineType rep) {
+ return Load(rep, buffer, IntPtrConstant(offset));
+}
+
+Node* CodeStubAssembler::LoadObjectField(Node* object, int offset,
+ MachineType rep) {
+ return Load(rep, object, IntPtrConstant(offset - kHeapObjectTag));
+}
+
+Node* CodeStubAssembler::LoadObjectField(Node* object, Node* offset,
+ MachineType rep) {
+ return Load(rep, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)));
+}
+
+Node* CodeStubAssembler::LoadAndUntagObjectField(Node* object, int offset) {
+ if (Is64()) {
+#if V8_TARGET_LITTLE_ENDIAN
+ offset += kPointerSize / 2;
+#endif
+ return ChangeInt32ToInt64(
+ LoadObjectField(object, offset, MachineType::Int32()));
+ } else {
+ return SmiToWord(LoadObjectField(object, offset, MachineType::AnyTagged()));
+ }
+}
+
+Node* CodeStubAssembler::LoadAndUntagToWord32ObjectField(Node* object,
+ int offset) {
+ if (Is64()) {
+#if V8_TARGET_LITTLE_ENDIAN
+ offset += kPointerSize / 2;
+#endif
+ return LoadObjectField(object, offset, MachineType::Int32());
+ } else {
+ return SmiToWord32(
+ LoadObjectField(object, offset, MachineType::AnyTagged()));
+ }
+}
+
+Node* CodeStubAssembler::LoadAndUntagSmi(Node* base, int index) {
+ if (Is64()) {
+#if V8_TARGET_LITTLE_ENDIAN
+ index += kPointerSize / 2;
+#endif
+ return ChangeInt32ToInt64(
+ Load(MachineType::Int32(), base, IntPtrConstant(index)));
+ } else {
+ return SmiToWord(
+ Load(MachineType::AnyTagged(), base, IntPtrConstant(index)));
+ }
+}
+
+Node* CodeStubAssembler::LoadAndUntagToWord32Root(
+ Heap::RootListIndex root_index) {
+ Node* roots_array_start =
+ ExternalConstant(ExternalReference::roots_array_start(isolate()));
+ int index = root_index * kPointerSize;
+ if (Is64()) {
+#if V8_TARGET_LITTLE_ENDIAN
+ index += kPointerSize / 2;
+#endif
+ return Load(MachineType::Int32(), roots_array_start, IntPtrConstant(index));
+ } else {
+ return SmiToWord32(Load(MachineType::AnyTagged(), roots_array_start,
+ IntPtrConstant(index)));
+ }
+}
+
+Node* CodeStubAssembler::LoadHeapNumberValue(Node* object) {
+ return LoadObjectField(object, HeapNumber::kValueOffset,
+ MachineType::Float64());
+}
+
+Node* CodeStubAssembler::LoadMap(Node* object) {
+ return LoadObjectField(object, HeapObject::kMapOffset);
+}
+
+Node* CodeStubAssembler::LoadInstanceType(Node* object) {
+ return LoadMapInstanceType(LoadMap(object));
+}
+
+void CodeStubAssembler::AssertInstanceType(Node* object,
+ InstanceType instance_type) {
+ Assert(Word32Equal(LoadInstanceType(object), Int32Constant(instance_type)));
+}
+
+Node* CodeStubAssembler::LoadProperties(Node* object) {
+ return LoadObjectField(object, JSObject::kPropertiesOffset);
+}
+
+Node* CodeStubAssembler::LoadElements(Node* object) {
+ return LoadObjectField(object, JSObject::kElementsOffset);
+}
+
+Node* CodeStubAssembler::LoadFixedArrayBaseLength(compiler::Node* array) {
+ return LoadObjectField(array, FixedArrayBase::kLengthOffset);
+}
+
+Node* CodeStubAssembler::LoadAndUntagFixedArrayBaseLength(Node* array) {
+ return LoadAndUntagObjectField(array, FixedArrayBase::kLengthOffset);
+}
+
+Node* CodeStubAssembler::LoadMapBitField(Node* map) {
+ return LoadObjectField(map, Map::kBitFieldOffset, MachineType::Uint8());
+}
+
+Node* CodeStubAssembler::LoadMapBitField2(Node* map) {
+ return LoadObjectField(map, Map::kBitField2Offset, MachineType::Uint8());
+}
+
+Node* CodeStubAssembler::LoadMapBitField3(Node* map) {
+ return LoadObjectField(map, Map::kBitField3Offset, MachineType::Uint32());
+}
+
+Node* CodeStubAssembler::LoadMapInstanceType(Node* map) {
+ return LoadObjectField(map, Map::kInstanceTypeOffset, MachineType::Uint8());
+}
+
+Node* CodeStubAssembler::LoadMapDescriptors(Node* map) {
+ return LoadObjectField(map, Map::kDescriptorsOffset);
+}
+
+Node* CodeStubAssembler::LoadMapPrototype(Node* map) {
+ return LoadObjectField(map, Map::kPrototypeOffset);
+}
+
+Node* CodeStubAssembler::LoadMapInstanceSize(Node* map) {
+ return LoadObjectField(map, Map::kInstanceSizeOffset, MachineType::Uint8());
+}
+
+Node* CodeStubAssembler::LoadMapInobjectProperties(Node* map) {
+ // See Map::GetInObjectProperties() for details.
+ STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
+ Assert(Int32GreaterThanOrEqual(LoadMapInstanceType(map),
+ Int32Constant(FIRST_JS_OBJECT_TYPE)));
+ return LoadObjectField(
+ map, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset,
+ MachineType::Uint8());
+}
+
+Node* CodeStubAssembler::LoadMapConstructor(Node* map) {
+ Variable result(this, MachineRepresentation::kTagged);
+ result.Bind(LoadObjectField(map, Map::kConstructorOrBackPointerOffset));
+
+ Label done(this), loop(this, &result);
+ Goto(&loop);
+ Bind(&loop);
+ {
+ GotoIf(WordIsSmi(result.value()), &done);
+ Node* is_map_type =
+ Word32Equal(LoadInstanceType(result.value()), Int32Constant(MAP_TYPE));
+ GotoUnless(is_map_type, &done);
+ result.Bind(
+ LoadObjectField(result.value(), Map::kConstructorOrBackPointerOffset));
+ Goto(&loop);
+ }
+ Bind(&done);
+ return result.value();
+}
+
+Node* CodeStubAssembler::LoadNameHashField(Node* name) {
+ return LoadObjectField(name, Name::kHashFieldOffset, MachineType::Uint32());
+}
+
+Node* CodeStubAssembler::LoadNameHash(Node* name, Label* if_hash_not_computed) {
+ Node* hash_field = LoadNameHashField(name);
+ if (if_hash_not_computed != nullptr) {
+ GotoIf(WordEqual(
+ Word32And(hash_field, Int32Constant(Name::kHashNotComputedMask)),
+ Int32Constant(0)),
+ if_hash_not_computed);
+ }
+ return Word32Shr(hash_field, Int32Constant(Name::kHashShift));
+}
+
+Node* CodeStubAssembler::LoadStringLength(Node* object) {
+ return LoadObjectField(object, String::kLengthOffset);
+}
+
+Node* CodeStubAssembler::LoadJSValueValue(Node* object) {
+ return LoadObjectField(object, JSValue::kValueOffset);
+}
+
+Node* CodeStubAssembler::LoadWeakCellValue(Node* weak_cell, Label* if_cleared) {
+ Node* value = LoadObjectField(weak_cell, WeakCell::kValueOffset);
+ if (if_cleared != nullptr) {
+ GotoIf(WordEqual(value, IntPtrConstant(0)), if_cleared);
+ }
+ return value;
+}
+
+Node* CodeStubAssembler::AllocateUninitializedFixedArray(Node* length) {
+ Node* header_size = IntPtrConstant(FixedArray::kHeaderSize);
+ Node* data_size = WordShl(length, IntPtrConstant(kPointerSizeLog2));
+ Node* total_size = IntPtrAdd(data_size, header_size);
+
+ Node* result = Allocate(total_size, kNone);
+ StoreMapNoWriteBarrier(result, LoadRoot(Heap::kFixedArrayMapRootIndex));
+ StoreObjectFieldNoWriteBarrier(result, FixedArray::kLengthOffset,
+ SmiTag(length));
+
+ return result;
+}
+
+Node* CodeStubAssembler::LoadFixedArrayElement(Node* object, Node* index_node,
+ int additional_offset,
+ ParameterMode parameter_mode) {
+ int32_t header_size =
+ FixedArray::kHeaderSize + additional_offset - kHeapObjectTag;
+ Node* offset = ElementOffsetFromIndex(index_node, FAST_HOLEY_ELEMENTS,
+ parameter_mode, header_size);
+ return Load(MachineType::AnyTagged(), object, offset);
+}
+
+Node* CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
+ Node* object, Node* index_node, int additional_offset,
+ ParameterMode parameter_mode) {
+ int32_t header_size =
+ FixedArray::kHeaderSize + additional_offset - kHeapObjectTag;
+#if V8_TARGET_LITTLE_ENDIAN
+ if (Is64()) {
+ header_size += kPointerSize / 2;
+ }
+#endif
+ Node* offset = ElementOffsetFromIndex(index_node, FAST_HOLEY_ELEMENTS,
+ parameter_mode, header_size);
+ if (Is64()) {
+ return Load(MachineType::Int32(), object, offset);
+ } else {
+ return SmiToWord32(Load(MachineType::AnyTagged(), object, offset));
+ }
+}
+
+Node* CodeStubAssembler::LoadFixedDoubleArrayElement(
+ Node* object, Node* index_node, MachineType machine_type,
+ int additional_offset, ParameterMode parameter_mode) {
+ int32_t header_size =
+ FixedDoubleArray::kHeaderSize + additional_offset - kHeapObjectTag;
+ Node* offset = ElementOffsetFromIndex(index_node, FAST_HOLEY_DOUBLE_ELEMENTS,
+ parameter_mode, header_size);
+ return Load(machine_type, object, offset);
+}
+
+Node* CodeStubAssembler::LoadNativeContext(Node* context) {
+ return LoadFixedArrayElement(context,
+ Int32Constant(Context::NATIVE_CONTEXT_INDEX));
+}
+
+Node* CodeStubAssembler::LoadJSArrayElementsMap(ElementsKind kind,
+ Node* native_context) {
+ return LoadFixedArrayElement(native_context,
+ Int32Constant(Context::ArrayMapIndex(kind)));
+}
+
+Node* CodeStubAssembler::StoreHeapNumberValue(Node* object, Node* value) {
+ return StoreNoWriteBarrier(
+ MachineRepresentation::kFloat64, object,
+ IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag), value);
+}
+
+Node* CodeStubAssembler::StoreObjectField(
+ Node* object, int offset, Node* value) {
+ return Store(MachineRepresentation::kTagged, object,
+ IntPtrConstant(offset - kHeapObjectTag), value);
+}
+
+Node* CodeStubAssembler::StoreObjectFieldNoWriteBarrier(
+ Node* object, int offset, Node* value, MachineRepresentation rep) {
+ return StoreNoWriteBarrier(rep, object,
+ IntPtrConstant(offset - kHeapObjectTag), value);
+}
+
+Node* CodeStubAssembler::StoreMapNoWriteBarrier(Node* object, Node* map) {
+ return StoreNoWriteBarrier(
+ MachineRepresentation::kTagged, object,
+ IntPtrConstant(HeapNumber::kMapOffset - kHeapObjectTag), map);
+}
+
+Node* CodeStubAssembler::StoreObjectFieldRoot(Node* object, int offset,
+ Heap::RootListIndex root_index) {
+ if (Heap::RootIsImmortalImmovable(root_index)) {
+ return StoreObjectFieldNoWriteBarrier(object, offset, LoadRoot(root_index));
+ } else {
+ return StoreObjectField(object, offset, LoadRoot(root_index));
+ }
+}
+
+Node* CodeStubAssembler::StoreFixedArrayElement(Node* object, Node* index_node,
+ Node* value,
+ WriteBarrierMode barrier_mode,
+ ParameterMode parameter_mode) {
+ DCHECK(barrier_mode == SKIP_WRITE_BARRIER ||
+ barrier_mode == UPDATE_WRITE_BARRIER);
+ Node* offset =
+ ElementOffsetFromIndex(index_node, FAST_HOLEY_ELEMENTS, parameter_mode,
+ FixedArray::kHeaderSize - kHeapObjectTag);
+ MachineRepresentation rep = MachineRepresentation::kTagged;
+ if (barrier_mode == SKIP_WRITE_BARRIER) {
+ return StoreNoWriteBarrier(rep, object, offset, value);
+ } else {
+ return Store(rep, object, offset, value);
+ }
+}
+
+Node* CodeStubAssembler::StoreFixedDoubleArrayElement(
+ Node* object, Node* index_node, Node* value, ParameterMode parameter_mode) {
+ Node* offset =
+ ElementOffsetFromIndex(index_node, FAST_DOUBLE_ELEMENTS, parameter_mode,
+ FixedArray::kHeaderSize - kHeapObjectTag);
+ MachineRepresentation rep = MachineRepresentation::kFloat64;
+ return StoreNoWriteBarrier(rep, object, offset, value);
+}
+
+Node* CodeStubAssembler::AllocateHeapNumber() {
+ Node* result = Allocate(HeapNumber::kSize, kNone);
+ StoreMapNoWriteBarrier(result, HeapNumberMapConstant());
+ return result;
+}
+
+Node* CodeStubAssembler::AllocateHeapNumberWithValue(Node* value) {
+ Node* result = AllocateHeapNumber();
+ StoreHeapNumberValue(result, value);
+ return result;
+}
+
+Node* CodeStubAssembler::AllocateSeqOneByteString(int length) {
+ Node* result = Allocate(SeqOneByteString::SizeFor(length));
+ StoreMapNoWriteBarrier(result, LoadRoot(Heap::kOneByteStringMapRootIndex));
+ StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset,
+ SmiConstant(Smi::FromInt(length)));
+ StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldOffset,
+ IntPtrConstant(String::kEmptyHashField),
+ MachineRepresentation::kWord32);
+ return result;
+}
+
+Node* CodeStubAssembler::AllocateSeqOneByteString(Node* context, Node* length) {
+ Variable var_result(this, MachineRepresentation::kTagged);
+
+ // Compute the SeqOneByteString size and check if it fits into new space.
+ Label if_sizeissmall(this), if_notsizeissmall(this, Label::kDeferred),
+ if_join(this);
+ Node* size = WordAnd(
+ IntPtrAdd(
+ IntPtrAdd(length, IntPtrConstant(SeqOneByteString::kHeaderSize)),
+ IntPtrConstant(kObjectAlignmentMask)),
+ IntPtrConstant(~kObjectAlignmentMask));
+ Branch(IntPtrLessThanOrEqual(size,
+ IntPtrConstant(Page::kMaxRegularHeapObjectSize)),
+ &if_sizeissmall, &if_notsizeissmall);
+
+ Bind(&if_sizeissmall);
+ {
+ // Just allocate the SeqOneByteString in new space.
+ Node* result = Allocate(size);
+ StoreMapNoWriteBarrier(result, LoadRoot(Heap::kOneByteStringMapRootIndex));
+ StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset,
+ SmiFromWord(length));
+ StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldOffset,
+ IntPtrConstant(String::kEmptyHashField),
+ MachineRepresentation::kWord32);
+ var_result.Bind(result);
+ Goto(&if_join);
+ }
+
+ Bind(&if_notsizeissmall);
+ {
+ // We might need to allocate in large object space, go to the runtime.
+ Node* result = CallRuntime(Runtime::kAllocateSeqOneByteString, context,
+ SmiFromWord(length));
+ var_result.Bind(result);
+ Goto(&if_join);
+ }
+
+ Bind(&if_join);
+ return var_result.value();
+}
+
+Node* CodeStubAssembler::AllocateSeqTwoByteString(int length) {
+ Node* result = Allocate(SeqTwoByteString::SizeFor(length));
+ StoreMapNoWriteBarrier(result, LoadRoot(Heap::kStringMapRootIndex));
+ StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset,
+ SmiConstant(Smi::FromInt(length)));
+ StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldOffset,
+ IntPtrConstant(String::kEmptyHashField),
+ MachineRepresentation::kWord32);
+ return result;
+}
+
+Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context, Node* length) {
+ Variable var_result(this, MachineRepresentation::kTagged);
+
+ // Compute the SeqTwoByteString size and check if it fits into new space.
+ Label if_sizeissmall(this), if_notsizeissmall(this, Label::kDeferred),
+ if_join(this);
+ Node* size = WordAnd(
+ IntPtrAdd(IntPtrAdd(WordShl(length, 1),
+ IntPtrConstant(SeqTwoByteString::kHeaderSize)),
+ IntPtrConstant(kObjectAlignmentMask)),
+ IntPtrConstant(~kObjectAlignmentMask));
+ Branch(IntPtrLessThanOrEqual(size,
+ IntPtrConstant(Page::kMaxRegularHeapObjectSize)),
+ &if_sizeissmall, &if_notsizeissmall);
+
+ Bind(&if_sizeissmall);
+ {
+ // Just allocate the SeqTwoByteString in new space.
+ Node* result = Allocate(size);
+ StoreMapNoWriteBarrier(result, LoadRoot(Heap::kStringMapRootIndex));
+ StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset,
+ SmiFromWord(length));
+ StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldOffset,
+ IntPtrConstant(String::kEmptyHashField),
+ MachineRepresentation::kWord32);
+ var_result.Bind(result);
+ Goto(&if_join);
+ }
+
+ Bind(&if_notsizeissmall);
+ {
+ // We might need to allocate in large object space, go to the runtime.
+ Node* result = CallRuntime(Runtime::kAllocateSeqTwoByteString, context,
+ SmiFromWord(length));
+ var_result.Bind(result);
+ Goto(&if_join);
+ }
+
+ Bind(&if_join);
+ return var_result.value();
+}
+
+Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map,
+ Node* capacity_node, Node* length_node,
+ compiler::Node* allocation_site,
+ ParameterMode mode) {
+ bool is_double = IsFastDoubleElementsKind(kind);
+ int base_size = JSArray::kSize + FixedArray::kHeaderSize;
+ int elements_offset = JSArray::kSize;
+
+ Comment("begin allocation of JSArray");
+
+ if (allocation_site != nullptr) {
+ base_size += AllocationMemento::kSize;
+ elements_offset += AllocationMemento::kSize;
+ }
+
+ Node* total_size =
+ ElementOffsetFromIndex(capacity_node, kind, mode, base_size);
+
+ // Allocate both array and elements object, and initialize the JSArray.
+ Heap* heap = isolate()->heap();
+ Node* array = Allocate(total_size);
+ StoreMapNoWriteBarrier(array, array_map);
+ Node* empty_properties = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldNoWriteBarrier(array, JSArray::kPropertiesOffset,
+ empty_properties);
+ StoreObjectFieldNoWriteBarrier(
+ array, JSArray::kLengthOffset,
+ mode == SMI_PARAMETERS ? length_node : SmiTag(length_node));
+
+ if (allocation_site != nullptr) {
+ InitializeAllocationMemento(array, JSArray::kSize, allocation_site);
+ }
+
+ // Setup elements object.
+ Node* elements = InnerAllocate(array, elements_offset);
+ StoreObjectFieldNoWriteBarrier(array, JSArray::kElementsOffset, elements);
+ Handle<Map> elements_map(is_double ? heap->fixed_double_array_map()
+ : heap->fixed_array_map());
+ StoreMapNoWriteBarrier(elements, HeapConstant(elements_map));
+ StoreObjectFieldNoWriteBarrier(
+ elements, FixedArray::kLengthOffset,
+ mode == SMI_PARAMETERS ? capacity_node : SmiTag(capacity_node));
+
+ FillFixedArrayWithHole(kind, elements, IntPtrConstant(0), capacity_node,
+ mode);
+
+ return array;
+}
+
+Node* CodeStubAssembler::AllocateFixedArray(ElementsKind kind,
+ Node* capacity_node,
+ ParameterMode mode,
+ AllocationFlags flags) {
+ Node* total_size = GetFixedAarrayAllocationSize(capacity_node, kind, mode);
+
+ // Allocate both array and elements object, and initialize the JSArray.
+ Node* array = Allocate(total_size, flags);
+ Heap* heap = isolate()->heap();
+ Handle<Map> map(IsFastDoubleElementsKind(kind)
+ ? heap->fixed_double_array_map()
+ : heap->fixed_array_map());
+ if (flags & kPretenured) {
+ StoreObjectField(array, JSObject::kMapOffset, HeapConstant(map));
+ } else {
+ StoreMapNoWriteBarrier(array, HeapConstant(map));
+ }
+ StoreObjectFieldNoWriteBarrier(
+ array, FixedArray::kLengthOffset,
+ mode == INTEGER_PARAMETERS ? SmiTag(capacity_node) : capacity_node);
+ return array;
+}
+
+void CodeStubAssembler::FillFixedArrayWithHole(ElementsKind kind,
+ compiler::Node* array,
+ compiler::Node* from_node,
+ compiler::Node* to_node,
+ ParameterMode mode) {
+ int const first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
+ Heap* heap = isolate()->heap();
+ Node* hole = HeapConstant(Handle<HeapObject>(heap->the_hole_value()));
+ Node* double_hole =
+ Is64() ? Int64Constant(kHoleNanInt64) : Int32Constant(kHoleNanLower32);
+ DCHECK_EQ(kHoleNanLower32, kHoleNanUpper32);
+ bool is_double = IsFastDoubleElementsKind(kind);
+ int32_t to;
+ bool constant_to = ToInt32Constant(to_node, to);
+ int32_t from;
+ bool constant_from = ToInt32Constant(from_node, from);
+ if (constant_to && constant_from &&
+ (to - from) <= kElementLoopUnrollThreshold) {
+ for (int i = from; i < to; ++i) {
+ if (is_double) {
+ Node* offset = ElementOffsetFromIndex(Int32Constant(i), kind, mode,
+ first_element_offset);
+ // Don't use doubles to store the hole double, since manipulating the
+ // signaling NaN used for the hole in C++, e.g. with bit_cast, will
+ // change its value on ia32 (the x87 stack is used to return values
+ // and stores to the stack silently clear the signalling bit).
+ //
+ // TODO(danno): When we have a Float32/Float64 wrapper class that
+ // preserves double bits during manipulation, remove this code/change
+ // this to an indexed Float64 store.
+ if (Is64()) {
+ StoreNoWriteBarrier(MachineRepresentation::kWord64, array, offset,
+ double_hole);
+ } else {
+ StoreNoWriteBarrier(MachineRepresentation::kWord32, array, offset,
+ double_hole);
+ offset = ElementOffsetFromIndex(Int32Constant(i), kind, mode,
+ first_element_offset + kPointerSize);
+ StoreNoWriteBarrier(MachineRepresentation::kWord32, array, offset,
+ double_hole);
+ }
+ } else {
+ StoreFixedArrayElement(array, Int32Constant(i), hole,
+ SKIP_WRITE_BARRIER);
+ }
+ }
+ } else {
+ Variable current(this, MachineRepresentation::kTagged);
+ Label test(this);
+ Label decrement(this, &current);
+ Label done(this);
+ Node* limit =
+ IntPtrAdd(array, ElementOffsetFromIndex(from_node, kind, mode));
+ current.Bind(IntPtrAdd(array, ElementOffsetFromIndex(to_node, kind, mode)));
+
+ Branch(WordEqual(current.value(), limit), &done, &decrement);
+
+ Bind(&decrement);
+ current.Bind(IntPtrSub(
+ current.value(),
+ Int32Constant(IsFastDoubleElementsKind(kind) ? kDoubleSize
+ : kPointerSize)));
+ if (is_double) {
+ // Don't use doubles to store the hole double, since manipulating the
+ // signaling NaN used for the hole in C++, e.g. with bit_cast, will
+ // change its value on ia32 (the x87 stack is used to return values
+ // and stores to the stack silently clear the signalling bit).
+ //
+ // TODO(danno): When we have a Float32/Float64 wrapper class that
+ // preserves double bits during manipulation, remove this code/change
+ // this to an indexed Float64 store.
+ if (Is64()) {
+ StoreNoWriteBarrier(MachineRepresentation::kWord64, current.value(),
+ Int64Constant(first_element_offset), double_hole);
+ } else {
+ StoreNoWriteBarrier(MachineRepresentation::kWord32, current.value(),
+ Int32Constant(first_element_offset), double_hole);
+ StoreNoWriteBarrier(
+ MachineRepresentation::kWord32,
+ IntPtrAdd(current.value(),
+ Int32Constant(kPointerSize + first_element_offset)),
+ double_hole);
+ }
+ } else {
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, current.value(),
+ IntPtrConstant(first_element_offset), hole);
+ }
+ Node* compare = WordNotEqual(current.value(), limit);
+ Branch(compare, &decrement, &done);
+
+ Bind(&done);
+ }
+}
+
+void CodeStubAssembler::CopyFixedArrayElements(ElementsKind kind,
+ compiler::Node* from_array,
+ compiler::Node* to_array,
+ compiler::Node* element_count,
+ WriteBarrierMode barrier_mode,
+ ParameterMode mode) {
+ Label test(this);
+ Label done(this);
+ bool double_elements = IsFastDoubleElementsKind(kind);
+ bool needs_write_barrier =
+ barrier_mode == UPDATE_WRITE_BARRIER && IsFastObjectElementsKind(kind);
+ Node* limit_offset = ElementOffsetFromIndex(
+ IntPtrConstant(0), kind, mode, FixedArray::kHeaderSize - kHeapObjectTag);
+ Variable current_offset(this, MachineType::PointerRepresentation());
+ current_offset.Bind(ElementOffsetFromIndex(
+ element_count, kind, mode, FixedArray::kHeaderSize - kHeapObjectTag));
+ Label decrement(this, &current_offset);
+
+ Branch(WordEqual(current_offset.value(), limit_offset), &done, &decrement);
+
+ Bind(&decrement);
+ {
+ current_offset.Bind(IntPtrSub(
+ current_offset.value(),
+ IntPtrConstant(double_elements ? kDoubleSize : kPointerSize)));
+
+ Node* value =
+ Load(double_elements ? MachineType::Float64() : MachineType::Pointer(),
+ from_array, current_offset.value());
+ if (needs_write_barrier) {
+ Store(MachineRepresentation::kTagged, to_array,
+ current_offset.value(), value);
+ } else if (double_elements) {
+ StoreNoWriteBarrier(MachineRepresentation::kFloat64, to_array,
+ current_offset.value(), value);
+ } else {
+ StoreNoWriteBarrier(MachineType::PointerRepresentation(), to_array,
+ current_offset.value(), value);
+ }
+ Node* compare = WordNotEqual(current_offset.value(), limit_offset);
+ Branch(compare, &decrement, &done);
+ }
+
+ Bind(&done);
+}
+
+Node* CodeStubAssembler::CalculateNewElementsCapacity(Node* old_capacity,
+ ParameterMode mode) {
+ Node* half_old_capacity = WordShr(old_capacity, IntPtrConstant(1));
+ Node* new_capacity = IntPtrAdd(half_old_capacity, old_capacity);
+ Node* unconditioned_result =
+ IntPtrAdd(new_capacity, IntPtrOrSmiConstant(16, mode));
+ if (mode == INTEGER_PARAMETERS) {
+ return unconditioned_result;
+ } else {
+ int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
+ return WordAnd(unconditioned_result,
+ IntPtrConstant(static_cast<size_t>(-1) << kSmiShiftBits));
+ }
+}
+
+Node* CodeStubAssembler::CheckAndGrowElementsCapacity(Node* context,
+ Node* elements,
+ ElementsKind kind,
+ Node* key, Label* fail) {
+ Node* capacity = LoadFixedArrayBaseLength(elements);
+
+ // On 32-bit platforms, there is a slight performance advantage to doing all
+ // of the arithmetic for the new backing store with SMIs, since it's possible
+ // to save a few tag/untag operations without paying an extra expense when
+ // calculating array offset (the smi math can be folded away) and there are
+ // fewer live ranges. Thus only convert |capacity| and |key| to untagged value
+ // on 64-bit platforms.
+ ParameterMode mode = Is64() ? INTEGER_PARAMETERS : SMI_PARAMETERS;
+ if (mode == INTEGER_PARAMETERS) {
+ capacity = SmiUntag(capacity);
+ key = SmiUntag(key);
+ }
+
+ // If the gap growth is too big, fall back to the runtime.
+ Node* max_gap = IntPtrOrSmiConstant(JSObject::kMaxGap, mode);
+ Node* max_capacity = IntPtrAdd(capacity, max_gap);
+ GotoIf(UintPtrGreaterThanOrEqual(key, max_capacity), fail);
+
+ // Calculate the capacity of the new backing tore
+ Node* new_capacity = CalculateNewElementsCapacity(
+ IntPtrAdd(key, IntPtrOrSmiConstant(1, mode)), mode);
+
+ // If size of the allocation for the new capacity doesn't fit in a page
+ // that we can bump-pointer allocate from, fall back to the runtime,
+ int max_size = FixedArrayBase::GetMaxLengthForNewSpaceAllocation(kind);
+ GotoIf(UintPtrGreaterThanOrEqual(new_capacity,
+ IntPtrOrSmiConstant(max_size, mode)),
+ fail);
+
+ // Allocate the new backing store.
+ Node* new_elements = AllocateFixedArray(kind, new_capacity, mode);
+
+ // Fill in the added capacity in the new store with holes.
+ FillFixedArrayWithHole(kind, new_elements, capacity, new_capacity, mode);
+
+ // Copy the elements from the old elements store to the new.
+ CopyFixedArrayElements(kind, elements, new_elements, capacity,
+ SKIP_WRITE_BARRIER, mode);
+
+ return new_elements;
+}
+
+void CodeStubAssembler::InitializeAllocationMemento(
+ compiler::Node* base_allocation, int base_allocation_size,
+ compiler::Node* allocation_site) {
+ StoreObjectFieldNoWriteBarrier(
+ base_allocation, AllocationMemento::kMapOffset + base_allocation_size,
+ HeapConstant(Handle<Map>(isolate()->heap()->allocation_memento_map())));
+ StoreObjectFieldNoWriteBarrier(
+ base_allocation,
+ AllocationMemento::kAllocationSiteOffset + base_allocation_size,
+ allocation_site);
+ if (FLAG_allocation_site_pretenuring) {
+ Node* count = LoadObjectField(allocation_site,
+ AllocationSite::kPretenureCreateCountOffset);
+ Node* incremented_count = IntPtrAdd(count, SmiConstant(Smi::FromInt(1)));
+ StoreObjectFieldNoWriteBarrier(allocation_site,
+ AllocationSite::kPretenureCreateCountOffset,
+ incremented_count);
+ }
+}
+
+Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) {
+ // We might need to loop once due to ToNumber conversion.
+ Variable var_value(this, MachineRepresentation::kTagged),
+ var_result(this, MachineRepresentation::kFloat64);
+ Label loop(this, &var_value), done_loop(this, &var_result);
+ var_value.Bind(value);
+ Goto(&loop);
+ Bind(&loop);
+ {
+ // Load the current {value}.
+ value = var_value.value();
+
+ // Check if the {value} is a Smi or a HeapObject.
+ Label if_valueissmi(this), if_valueisnotsmi(this);
+ Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+
+ Bind(&if_valueissmi);
+ {
+ // Convert the Smi {value}.
+ var_result.Bind(SmiToFloat64(value));
+ Goto(&done_loop);
+ }
+
+ Bind(&if_valueisnotsmi);
+ {
+ // Check if {value} is a HeapNumber.
+ Label if_valueisheapnumber(this),
+ if_valueisnotheapnumber(this, Label::kDeferred);
+ Branch(WordEqual(LoadMap(value), HeapNumberMapConstant()),
+ &if_valueisheapnumber, &if_valueisnotheapnumber);
+
+ Bind(&if_valueisheapnumber);
+ {
+ // Load the floating point value.
+ var_result.Bind(LoadHeapNumberValue(value));
+ Goto(&done_loop);
+ }
+
+ Bind(&if_valueisnotheapnumber);
+ {
+ // Convert the {value} to a Number first.
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_value.Bind(CallStub(callable, context, value));
+ Goto(&loop);
+ }
+ }
+ }
+ Bind(&done_loop);
+ return var_result.value();
+}
+
+Node* CodeStubAssembler::TruncateTaggedToWord32(Node* context, Node* value) {
+ // We might need to loop once due to ToNumber conversion.
+ Variable var_value(this, MachineRepresentation::kTagged),
+ var_result(this, MachineRepresentation::kWord32);
+ Label loop(this, &var_value), done_loop(this, &var_result);
+ var_value.Bind(value);
+ Goto(&loop);
+ Bind(&loop);
+ {
+ // Load the current {value}.
+ value = var_value.value();
+
+ // Check if the {value} is a Smi or a HeapObject.
+ Label if_valueissmi(this), if_valueisnotsmi(this);
+ Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+
+ Bind(&if_valueissmi);
+ {
+ // Convert the Smi {value}.
+ var_result.Bind(SmiToWord32(value));
+ Goto(&done_loop);
+ }
+
+ Bind(&if_valueisnotsmi);
+ {
+ // Check if {value} is a HeapNumber.
+ Label if_valueisheapnumber(this),
+ if_valueisnotheapnumber(this, Label::kDeferred);
+ Branch(WordEqual(LoadMap(value), HeapNumberMapConstant()),
+ &if_valueisheapnumber, &if_valueisnotheapnumber);
+
+ Bind(&if_valueisheapnumber);
+ {
+ // Truncate the floating point value.
+ var_result.Bind(TruncateHeapNumberValueToWord32(value));
+ Goto(&done_loop);
+ }
+
+ Bind(&if_valueisnotheapnumber);
+ {
+ // Convert the {value} to a Number first.
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_value.Bind(CallStub(callable, context, value));
+ Goto(&loop);
+ }
+ }
+ }
+ Bind(&done_loop);
+ return var_result.value();
+}
+
+Node* CodeStubAssembler::TruncateHeapNumberValueToWord32(Node* object) {
+ Node* value = LoadHeapNumberValue(object);
+ return TruncateFloat64ToWord32(value);
+}
+
+Node* CodeStubAssembler::ChangeFloat64ToTagged(Node* value) {
+ Node* value32 = RoundFloat64ToInt32(value);
+ Node* value64 = ChangeInt32ToFloat64(value32);
+
+ Label if_valueisint32(this), if_valueisheapnumber(this), if_join(this);
+
+ Label if_valueisequal(this), if_valueisnotequal(this);
+ Branch(Float64Equal(value, value64), &if_valueisequal, &if_valueisnotequal);
+ Bind(&if_valueisequal);
+ {
+ GotoUnless(Word32Equal(value32, Int32Constant(0)), &if_valueisint32);
+ BranchIfInt32LessThan(Float64ExtractHighWord32(value), Int32Constant(0),
+ &if_valueisheapnumber, &if_valueisint32);
+ }
+ Bind(&if_valueisnotequal);
+ Goto(&if_valueisheapnumber);
+
+ Variable var_result(this, MachineRepresentation::kTagged);
+ Bind(&if_valueisint32);
+ {
+ if (Is64()) {
+ Node* result = SmiTag(ChangeInt32ToInt64(value32));
+ var_result.Bind(result);
+ Goto(&if_join);
+ } else {
+ Node* pair = Int32AddWithOverflow(value32, value32);
+ Node* overflow = Projection(1, pair);
+ Label if_overflow(this, Label::kDeferred), if_notoverflow(this);
+ Branch(overflow, &if_overflow, &if_notoverflow);
+ Bind(&if_overflow);
+ Goto(&if_valueisheapnumber);
+ Bind(&if_notoverflow);
+ {
+ Node* result = Projection(0, pair);
+ var_result.Bind(result);
+ Goto(&if_join);
+ }
+ }
+ }
+ Bind(&if_valueisheapnumber);
+ {
+ Node* result = AllocateHeapNumberWithValue(value);
+ var_result.Bind(result);
+ Goto(&if_join);
+ }
+ Bind(&if_join);
+ return var_result.value();
+}
+
+Node* CodeStubAssembler::ChangeInt32ToTagged(Node* value) {
+ if (Is64()) {
+ return SmiTag(ChangeInt32ToInt64(value));
+ }
+ Variable var_result(this, MachineRepresentation::kTagged);
+ Node* pair = Int32AddWithOverflow(value, value);
+ Node* overflow = Projection(1, pair);
+ Label if_overflow(this, Label::kDeferred), if_notoverflow(this),
+ if_join(this);
+ Branch(overflow, &if_overflow, &if_notoverflow);
+ Bind(&if_overflow);
+ {
+ Node* value64 = ChangeInt32ToFloat64(value);
+ Node* result = AllocateHeapNumberWithValue(value64);
+ var_result.Bind(result);
+ }
+ Goto(&if_join);
+ Bind(&if_notoverflow);
+ {
+ Node* result = Projection(0, pair);
+ var_result.Bind(result);
+ }
+ Goto(&if_join);
+ Bind(&if_join);
+ return var_result.value();
+}
+
+Node* CodeStubAssembler::ChangeUint32ToTagged(Node* value) {
+ Label if_overflow(this, Label::kDeferred), if_not_overflow(this),
+ if_join(this);
+ Variable var_result(this, MachineRepresentation::kTagged);
+ // If {value} > 2^31 - 1, we need to store it in a HeapNumber.
+ Branch(Uint32LessThan(Int32Constant(Smi::kMaxValue), value), &if_overflow,
+ &if_not_overflow);
+
+ Bind(&if_not_overflow);
+ {
+ if (Is64()) {
+ var_result.Bind(SmiTag(ChangeUint32ToUint64(value)));
+ } else {
+ // If tagging {value} results in an overflow, we need to use a HeapNumber
+ // to represent it.
+ Node* pair = Int32AddWithOverflow(value, value);
+ Node* overflow = Projection(1, pair);
+ GotoIf(overflow, &if_overflow);
+
+ Node* result = Projection(0, pair);
+ var_result.Bind(result);
+ }
+ }
+ Goto(&if_join);
+
+ Bind(&if_overflow);
+ {
+ Node* float64_value = ChangeUint32ToFloat64(value);
+ var_result.Bind(AllocateHeapNumberWithValue(float64_value));
+ }
+ Goto(&if_join);
+
+ Bind(&if_join);
+ return var_result.value();
+}
+
+Node* CodeStubAssembler::ToThisString(Node* context, Node* value,
+ char const* method_name) {
+ Variable var_value(this, MachineRepresentation::kTagged);
+ var_value.Bind(value);
+
+ // Check if the {value} is a Smi or a HeapObject.
+ Label if_valueissmi(this, Label::kDeferred), if_valueisnotsmi(this),
+ if_valueisstring(this);
+ Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+ Bind(&if_valueisnotsmi);
+ {
+ // Load the instance type of the {value}.
+ Node* value_instance_type = LoadInstanceType(value);
+
+ // Check if the {value} is already String.
+ Label if_valueisnotstring(this, Label::kDeferred);
+ Branch(
+ Int32LessThan(value_instance_type, Int32Constant(FIRST_NONSTRING_TYPE)),
+ &if_valueisstring, &if_valueisnotstring);
+ Bind(&if_valueisnotstring);
+ {
+ // Check if the {value} is null.
+ Label if_valueisnullorundefined(this, Label::kDeferred),
+ if_valueisnotnullorundefined(this, Label::kDeferred),
+ if_valueisnotnull(this, Label::kDeferred);
+ Branch(WordEqual(value, NullConstant()), &if_valueisnullorundefined,
+ &if_valueisnotnull);
+ Bind(&if_valueisnotnull);
+ {
+ // Check if the {value} is undefined.
+ Branch(WordEqual(value, UndefinedConstant()),
+ &if_valueisnullorundefined, &if_valueisnotnullorundefined);
+ Bind(&if_valueisnotnullorundefined);
+ {
+ // Convert the {value} to a String.
+ Callable callable = CodeFactory::ToString(isolate());
+ var_value.Bind(CallStub(callable, context, value));
+ Goto(&if_valueisstring);
+ }
+ }
+
+ Bind(&if_valueisnullorundefined);
+ {
+ // The {value} is either null or undefined.
+ CallRuntime(Runtime::kThrowCalledOnNullOrUndefined, context,
+ HeapConstant(factory()->NewStringFromAsciiChecked(
+ method_name, TENURED)));
+ Goto(&if_valueisstring); // Never reached.
+ }
+ }
+ }
+ Bind(&if_valueissmi);
+ {
+ // The {value} is a Smi, convert it to a String.
+ Callable callable = CodeFactory::NumberToString(isolate());
+ var_value.Bind(CallStub(callable, context, value));
+ Goto(&if_valueisstring);
+ }
+ Bind(&if_valueisstring);
+ return var_value.value();
+}
+
+Node* CodeStubAssembler::ToThisValue(Node* context, Node* value,
+ PrimitiveType primitive_type,
+ char const* method_name) {
+ // We might need to loop once due to JSValue unboxing.
+ Variable var_value(this, MachineRepresentation::kTagged);
+ Label loop(this, &var_value), done_loop(this),
+ done_throw(this, Label::kDeferred);
+ var_value.Bind(value);
+ Goto(&loop);
+ Bind(&loop);
+ {
+ // Load the current {value}.
+ value = var_value.value();
+
+ // Check if the {value} is a Smi or a HeapObject.
+ GotoIf(WordIsSmi(value), (primitive_type == PrimitiveType::kNumber)
+ ? &done_loop
+ : &done_throw);
+
+ // Load the mape of the {value}.
+ Node* value_map = LoadMap(value);
+
+ // Load the instance type of the {value}.
+ Node* value_instance_type = LoadMapInstanceType(value_map);
+
+ // Check if {value} is a JSValue.
+ Label if_valueisvalue(this, Label::kDeferred), if_valueisnotvalue(this);
+ Branch(Word32Equal(value_instance_type, Int32Constant(JS_VALUE_TYPE)),
+ &if_valueisvalue, &if_valueisnotvalue);
+
+ Bind(&if_valueisvalue);
+ {
+ // Load the actual value from the {value}.
+ var_value.Bind(LoadObjectField(value, JSValue::kValueOffset));
+ Goto(&loop);
+ }
+
+ Bind(&if_valueisnotvalue);
+ {
+ switch (primitive_type) {
+ case PrimitiveType::kBoolean:
+ GotoIf(WordEqual(value_map, BooleanMapConstant()), &done_loop);
+ break;
+ case PrimitiveType::kNumber:
+ GotoIf(
+ Word32Equal(value_instance_type, Int32Constant(HEAP_NUMBER_TYPE)),
+ &done_loop);
+ break;
+ case PrimitiveType::kString:
+ GotoIf(Int32LessThan(value_instance_type,
+ Int32Constant(FIRST_NONSTRING_TYPE)),
+ &done_loop);
+ break;
+ case PrimitiveType::kSymbol:
+ GotoIf(Word32Equal(value_instance_type, Int32Constant(SYMBOL_TYPE)),
+ &done_loop);
+ break;
+ }
+ Goto(&done_throw);
+ }
+ }
+
+ Bind(&done_throw);
+ {
+ // The {value} is not a compatible receiver for this method.
+ CallRuntime(Runtime::kThrowNotGeneric, context,
+ HeapConstant(factory()->NewStringFromAsciiChecked(method_name,
+ TENURED)));
+ Goto(&done_loop); // Never reached.
+ }
+
+ Bind(&done_loop);
+ return var_value.value();
+}
+
+Node* CodeStubAssembler::StringCharCodeAt(Node* string, Node* index) {
+ // Translate the {index} into a Word.
+ index = SmiToWord(index);
+
+ // We may need to loop in case of cons or sliced strings.
+ Variable var_index(this, MachineType::PointerRepresentation());
+ Variable var_result(this, MachineRepresentation::kWord32);
+ Variable var_string(this, MachineRepresentation::kTagged);
+ Variable* loop_vars[] = {&var_index, &var_string};
+ Label done_loop(this, &var_result), loop(this, 2, loop_vars);
+ var_string.Bind(string);
+ var_index.Bind(index);
+ Goto(&loop);
+ Bind(&loop);
+ {
+ // Load the current {index}.
+ index = var_index.value();
+
+ // Load the current {string}.
+ string = var_string.value();
+
+ // Load the instance type of the {string}.
+ Node* string_instance_type = LoadInstanceType(string);
+
+ // Check if the {string} is a SeqString.
+ Label if_stringissequential(this), if_stringisnotsequential(this);
+ Branch(Word32Equal(Word32And(string_instance_type,
+ Int32Constant(kStringRepresentationMask)),
+ Int32Constant(kSeqStringTag)),
+ &if_stringissequential, &if_stringisnotsequential);
+
+ Bind(&if_stringissequential);
+ {
+ // Check if the {string} is a TwoByteSeqString or a OneByteSeqString.
+ Label if_stringistwobyte(this), if_stringisonebyte(this);
+ Branch(Word32Equal(Word32And(string_instance_type,
+ Int32Constant(kStringEncodingMask)),
+ Int32Constant(kTwoByteStringTag)),
+ &if_stringistwobyte, &if_stringisonebyte);
+
+ Bind(&if_stringisonebyte);
+ {
+ var_result.Bind(
+ Load(MachineType::Uint8(), string,
+ IntPtrAdd(index, IntPtrConstant(SeqOneByteString::kHeaderSize -
+ kHeapObjectTag))));
+ Goto(&done_loop);
+ }
+
+ Bind(&if_stringistwobyte);
+ {
+ var_result.Bind(
+ Load(MachineType::Uint16(), string,
+ IntPtrAdd(WordShl(index, IntPtrConstant(1)),
+ IntPtrConstant(SeqTwoByteString::kHeaderSize -
+ kHeapObjectTag))));
+ Goto(&done_loop);
+ }
+ }
+
+ Bind(&if_stringisnotsequential);
+ {
+ // Check if the {string} is a ConsString.
+ Label if_stringiscons(this), if_stringisnotcons(this);
+ Branch(Word32Equal(Word32And(string_instance_type,
+ Int32Constant(kStringRepresentationMask)),
+ Int32Constant(kConsStringTag)),
+ &if_stringiscons, &if_stringisnotcons);
+
+ Bind(&if_stringiscons);
+ {
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we flatten the string first.
+ Label if_rhsisempty(this), if_rhsisnotempty(this, Label::kDeferred);
+ Node* rhs = LoadObjectField(string, ConsString::kSecondOffset);
+ Branch(WordEqual(rhs, EmptyStringConstant()), &if_rhsisempty,
+ &if_rhsisnotempty);
+
+ Bind(&if_rhsisempty);
+ {
+ // Just operate on the left hand side of the {string}.
+ var_string.Bind(LoadObjectField(string, ConsString::kFirstOffset));
+ Goto(&loop);
+ }
+
+ Bind(&if_rhsisnotempty);
+ {
+ // Flatten the {string} and lookup in the resulting string.
+ var_string.Bind(CallRuntime(Runtime::kFlattenString,
+ NoContextConstant(), string));
+ Goto(&loop);
+ }
+ }
+
+ Bind(&if_stringisnotcons);
+ {
+ // Check if the {string} is an ExternalString.
+ Label if_stringisexternal(this), if_stringisnotexternal(this);
+ Branch(Word32Equal(Word32And(string_instance_type,
+ Int32Constant(kStringRepresentationMask)),
+ Int32Constant(kExternalStringTag)),
+ &if_stringisexternal, &if_stringisnotexternal);
+
+ Bind(&if_stringisexternal);
+ {
+ // Check if the {string} is a short external string.
+ Label if_stringisshort(this),
+ if_stringisnotshort(this, Label::kDeferred);
+ Branch(Word32Equal(Word32And(string_instance_type,
+ Int32Constant(kShortExternalStringMask)),
+ Int32Constant(0)),
+ &if_stringisshort, &if_stringisnotshort);
+
+ Bind(&if_stringisshort);
+ {
+ // Load the actual resource data from the {string}.
+ Node* string_resource_data =
+ LoadObjectField(string, ExternalString::kResourceDataOffset,
+ MachineType::Pointer());
+
+ // Check if the {string} is a TwoByteExternalString or a
+ // OneByteExternalString.
+ Label if_stringistwobyte(this), if_stringisonebyte(this);
+ Branch(Word32Equal(Word32And(string_instance_type,
+ Int32Constant(kStringEncodingMask)),
+ Int32Constant(kTwoByteStringTag)),
+ &if_stringistwobyte, &if_stringisonebyte);
+
+ Bind(&if_stringisonebyte);
+ {
+ var_result.Bind(
+ Load(MachineType::Uint8(), string_resource_data, index));
+ Goto(&done_loop);
+ }
+
+ Bind(&if_stringistwobyte);
+ {
+ var_result.Bind(Load(MachineType::Uint16(), string_resource_data,
+ WordShl(index, IntPtrConstant(1))));
+ Goto(&done_loop);
+ }
+ }
+
+ Bind(&if_stringisnotshort);
+ {
+ // The {string} might be compressed, call the runtime.
+ var_result.Bind(SmiToWord32(
+ CallRuntime(Runtime::kExternalStringGetChar,
+ NoContextConstant(), string, SmiTag(index))));
+ Goto(&done_loop);
+ }
+ }
+
+ Bind(&if_stringisnotexternal);
+ {
+ // The {string} is a SlicedString, continue with its parent.
+ Node* string_offset =
+ LoadAndUntagObjectField(string, SlicedString::kOffsetOffset);
+ Node* string_parent =
+ LoadObjectField(string, SlicedString::kParentOffset);
+ var_index.Bind(IntPtrAdd(index, string_offset));
+ var_string.Bind(string_parent);
+ Goto(&loop);
+ }
+ }
+ }
+ }
+
+ Bind(&done_loop);
+ return var_result.value();
+}
+
+Node* CodeStubAssembler::StringFromCharCode(Node* code) {
+ Variable var_result(this, MachineRepresentation::kTagged);
+
+ // Check if the {code} is a one-byte char code.
+ Label if_codeisonebyte(this), if_codeistwobyte(this, Label::kDeferred),
+ if_done(this);
+ Branch(Int32LessThanOrEqual(code, Int32Constant(String::kMaxOneByteCharCode)),
+ &if_codeisonebyte, &if_codeistwobyte);
+ Bind(&if_codeisonebyte);
+ {
+ // Load the isolate wide single character string cache.
+ Node* cache = LoadRoot(Heap::kSingleCharacterStringCacheRootIndex);
+
+ // Check if we have an entry for the {code} in the single character string
+ // cache already.
+ Label if_entryisundefined(this, Label::kDeferred),
+ if_entryisnotundefined(this);
+ Node* entry = LoadFixedArrayElement(cache, code);
+ Branch(WordEqual(entry, UndefinedConstant()), &if_entryisundefined,
+ &if_entryisnotundefined);
+
+ Bind(&if_entryisundefined);
+ {
+ // Allocate a new SeqOneByteString for {code} and store it in the {cache}.
+ Node* result = AllocateSeqOneByteString(1);
+ StoreNoWriteBarrier(
+ MachineRepresentation::kWord8, result,
+ IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag), code);
+ StoreFixedArrayElement(cache, code, result);
+ var_result.Bind(result);
+ Goto(&if_done);
+ }
+
+ Bind(&if_entryisnotundefined);
+ {
+ // Return the entry from the {cache}.
+ var_result.Bind(entry);
+ Goto(&if_done);
+ }
+ }
+
+ Bind(&if_codeistwobyte);
+ {
+ // Allocate a new SeqTwoByteString for {code}.
+ Node* result = AllocateSeqTwoByteString(1);
+ StoreNoWriteBarrier(
+ MachineRepresentation::kWord16, result,
+ IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag), code);
+ var_result.Bind(result);
+ Goto(&if_done);
+ }
+
+ Bind(&if_done);
+ return var_result.value();
+}
+
+Node* CodeStubAssembler::BitFieldDecode(Node* word32, uint32_t shift,
+ uint32_t mask) {
+ return Word32Shr(Word32And(word32, Int32Constant(mask)),
+ static_cast<int>(shift));
+}
+
+void CodeStubAssembler::SetCounter(StatsCounter* counter, int value) {
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ Node* counter_address = ExternalConstant(ExternalReference(counter));
+ StoreNoWriteBarrier(MachineRepresentation::kWord32, counter_address,
+ Int32Constant(value));
+ }
+}
+
+void CodeStubAssembler::IncrementCounter(StatsCounter* counter, int delta) {
+ DCHECK(delta > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ Node* counter_address = ExternalConstant(ExternalReference(counter));
+ Node* value = Load(MachineType::Int32(), counter_address);
+ value = Int32Add(value, Int32Constant(delta));
+ StoreNoWriteBarrier(MachineRepresentation::kWord32, counter_address, value);
+ }
+}
+
+void CodeStubAssembler::DecrementCounter(StatsCounter* counter, int delta) {
+ DCHECK(delta > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ Node* counter_address = ExternalConstant(ExternalReference(counter));
+ Node* value = Load(MachineType::Int32(), counter_address);
+ value = Int32Sub(value, Int32Constant(delta));
+ StoreNoWriteBarrier(MachineRepresentation::kWord32, counter_address, value);
+ }
+}
+
+void CodeStubAssembler::Use(Label* label) {
+ GotoIf(Word32Equal(Int32Constant(0), Int32Constant(1)), label);
+}
+
+void CodeStubAssembler::TryToName(Node* key, Label* if_keyisindex,
+ Variable* var_index, Label* if_keyisunique,
+ Label* if_bailout) {
+ DCHECK_EQ(MachineRepresentation::kWord32, var_index->rep());
+ Comment("TryToName");
+
+ Label if_keyissmi(this), if_keyisnotsmi(this);
+ Branch(WordIsSmi(key), &if_keyissmi, &if_keyisnotsmi);
+ Bind(&if_keyissmi);
+ {
+ // Negative smi keys are named properties. Handle in the runtime.
+ GotoUnless(WordIsPositiveSmi(key), if_bailout);
+
+ var_index->Bind(SmiToWord32(key));
+ Goto(if_keyisindex);
+ }
+
+ Bind(&if_keyisnotsmi);
+
+ Node* key_instance_type = LoadInstanceType(key);
+ // Symbols are unique.
+ GotoIf(Word32Equal(key_instance_type, Int32Constant(SYMBOL_TYPE)),
+ if_keyisunique);
+
+ Label if_keyisinternalized(this);
+ Node* bits =
+ WordAnd(key_instance_type,
+ Int32Constant(kIsNotStringMask | kIsNotInternalizedMask));
+ Branch(Word32Equal(bits, Int32Constant(kStringTag | kInternalizedTag)),
+ &if_keyisinternalized, if_bailout);
+ Bind(&if_keyisinternalized);
+
+ // Check whether the key is an array index passed in as string. Handle
+ // uniform with smi keys if so.
+ // TODO(verwaest): Also support non-internalized strings.
+ Node* hash = LoadNameHashField(key);
+ Node* bit = Word32And(hash, Int32Constant(Name::kIsNotArrayIndexMask));
+ GotoIf(Word32NotEqual(bit, Int32Constant(0)), if_keyisunique);
+ // Key is an index. Check if it is small enough to be encoded in the
+ // hash_field. Handle too big array index in runtime.
+ bit = Word32And(hash, Int32Constant(Name::kContainsCachedArrayIndexMask));
+ GotoIf(Word32NotEqual(bit, Int32Constant(0)), if_bailout);
+ var_index->Bind(BitFieldDecode<Name::ArrayIndexValueBits>(hash));
+ Goto(if_keyisindex);
+}
+
+template <typename Dictionary>
+Node* CodeStubAssembler::EntryToIndex(Node* entry, int field_index) {
+ Node* entry_index = Int32Mul(entry, Int32Constant(Dictionary::kEntrySize));
+ return Int32Add(entry_index,
+ Int32Constant(Dictionary::kElementsStartIndex + field_index));
+}
+
+template <typename Dictionary>
+void CodeStubAssembler::NameDictionaryLookup(Node* dictionary,
+ Node* unique_name, Label* if_found,
+ Variable* var_name_index,
+ Label* if_not_found,
+ int inlined_probes) {
+ DCHECK_EQ(MachineRepresentation::kWord32, var_name_index->rep());
+ Comment("NameDictionaryLookup");
+
+ Node* capacity = LoadAndUntagToWord32FixedArrayElement(
+ dictionary, Int32Constant(Dictionary::kCapacityIndex));
+ Node* mask = Int32Sub(capacity, Int32Constant(1));
+ Node* hash = LoadNameHash(unique_name);
+
+ // See Dictionary::FirstProbe().
+ Node* count = Int32Constant(0);
+ Node* entry = Word32And(hash, mask);
+
+ for (int i = 0; i < inlined_probes; i++) {
+ Node* index = EntryToIndex<Dictionary>(entry);
+ var_name_index->Bind(index);
+
+ Node* current = LoadFixedArrayElement(dictionary, index);
+ GotoIf(WordEqual(current, unique_name), if_found);
+
+ // See Dictionary::NextProbe().
+ count = Int32Constant(i + 1);
+ entry = Word32And(Int32Add(entry, count), mask);
+ }
+
+ Node* undefined = UndefinedConstant();
+
+ Variable var_count(this, MachineRepresentation::kWord32);
+ Variable var_entry(this, MachineRepresentation::kWord32);
+ Variable* loop_vars[] = {&var_count, &var_entry, var_name_index};
+ Label loop(this, 3, loop_vars);
+ var_count.Bind(count);
+ var_entry.Bind(entry);
+ Goto(&loop);
+ Bind(&loop);
+ {
+ Node* count = var_count.value();
+ Node* entry = var_entry.value();
+
+ Node* index = EntryToIndex<Dictionary>(entry);
+ var_name_index->Bind(index);
+
+ Node* current = LoadFixedArrayElement(dictionary, index);
+ GotoIf(WordEqual(current, undefined), if_not_found);
+ GotoIf(WordEqual(current, unique_name), if_found);
+
+ // See Dictionary::NextProbe().
+ count = Int32Add(count, Int32Constant(1));
+ entry = Word32And(Int32Add(entry, count), mask);
+
+ var_count.Bind(count);
+ var_entry.Bind(entry);
+ Goto(&loop);
+ }
+}
+
+// Instantiate template methods to workaround GCC compilation issue.
+template void CodeStubAssembler::NameDictionaryLookup<NameDictionary>(
+ Node*, Node*, Label*, Variable*, Label*, int);
+template void CodeStubAssembler::NameDictionaryLookup<GlobalDictionary>(
+ Node*, Node*, Label*, Variable*, Label*, int);
+
+Node* CodeStubAssembler::ComputeIntegerHash(Node* key, Node* seed) {
+ // See v8::internal::ComputeIntegerHash()
+ Node* hash = key;
+ hash = Word32Xor(hash, seed);
+ hash = Int32Add(Word32Xor(hash, Int32Constant(0xffffffff)),
+ Word32Shl(hash, Int32Constant(15)));
+ hash = Word32Xor(hash, Word32Shr(hash, Int32Constant(12)));
+ hash = Int32Add(hash, Word32Shl(hash, Int32Constant(2)));
+ hash = Word32Xor(hash, Word32Shr(hash, Int32Constant(4)));
+ hash = Int32Mul(hash, Int32Constant(2057));
+ hash = Word32Xor(hash, Word32Shr(hash, Int32Constant(16)));
+ return Word32And(hash, Int32Constant(0x3fffffff));
+}
+
+template <typename Dictionary>
+void CodeStubAssembler::NumberDictionaryLookup(Node* dictionary, Node* key,
+ Label* if_found,
+ Variable* var_entry,
+ Label* if_not_found) {
+ DCHECK_EQ(MachineRepresentation::kWord32, var_entry->rep());
+ Comment("NumberDictionaryLookup");
+
+ Node* capacity = LoadAndUntagToWord32FixedArrayElement(
+ dictionary, Int32Constant(Dictionary::kCapacityIndex));
+ Node* mask = Int32Sub(capacity, Int32Constant(1));
+
+ Node* seed;
+ if (Dictionary::ShapeT::UsesSeed) {
+ seed = HashSeed();
+ } else {
+ seed = Int32Constant(kZeroHashSeed);
+ }
+ Node* hash = ComputeIntegerHash(key, seed);
+ Node* key_as_float64 = ChangeUint32ToFloat64(key);
+
+ // See Dictionary::FirstProbe().
+ Node* count = Int32Constant(0);
+ Node* entry = Word32And(hash, mask);
+
+ Node* undefined = UndefinedConstant();
+ Node* the_hole = TheHoleConstant();
+
+ Variable var_count(this, MachineRepresentation::kWord32);
+ Variable* loop_vars[] = {&var_count, var_entry};
+ Label loop(this, 2, loop_vars);
+ var_count.Bind(count);
+ var_entry->Bind(entry);
+ Goto(&loop);
+ Bind(&loop);
+ {
+ Node* count = var_count.value();
+ Node* entry = var_entry->value();
+
+ Node* index = EntryToIndex<Dictionary>(entry);
+ Node* current = LoadFixedArrayElement(dictionary, index);
+ GotoIf(WordEqual(current, undefined), if_not_found);
+ Label next_probe(this);
+ {
+ Label if_currentissmi(this), if_currentisnotsmi(this);
+ Branch(WordIsSmi(current), &if_currentissmi, &if_currentisnotsmi);
+ Bind(&if_currentissmi);
+ {
+ Node* current_value = SmiToWord32(current);
+ Branch(Word32Equal(current_value, key), if_found, &next_probe);
+ }
+ Bind(&if_currentisnotsmi);
+ {
+ GotoIf(WordEqual(current, the_hole), &next_probe);
+ // Current must be the Number.
+ Node* current_value = LoadHeapNumberValue(current);
+ Branch(Float64Equal(current_value, key_as_float64), if_found,
+ &next_probe);
+ }
+ }
+
+ Bind(&next_probe);
+ // See Dictionary::NextProbe().
+ count = Int32Add(count, Int32Constant(1));
+ entry = Word32And(Int32Add(entry, count), mask);
+
+ var_count.Bind(count);
+ var_entry->Bind(entry);
+ Goto(&loop);
+ }
+}
+
+void CodeStubAssembler::TryLookupProperty(
+ Node* object, Node* map, Node* instance_type, Node* unique_name,
+ Label* if_found_fast, Label* if_found_dict, Label* if_found_global,
+ Variable* var_meta_storage, Variable* var_name_index, Label* if_not_found,
+ Label* if_bailout) {
+ DCHECK_EQ(MachineRepresentation::kTagged, var_meta_storage->rep());
+ DCHECK_EQ(MachineRepresentation::kWord32, var_name_index->rep());
+
+ Label if_objectisspecial(this);
+ STATIC_ASSERT(JS_GLOBAL_OBJECT_TYPE <= LAST_SPECIAL_RECEIVER_TYPE);
+ GotoIf(Int32LessThanOrEqual(instance_type,
+ Int32Constant(LAST_SPECIAL_RECEIVER_TYPE)),
+ &if_objectisspecial);
+
+ Node* bit_field = LoadMapBitField(map);
+ Node* mask = Int32Constant(1 << Map::kHasNamedInterceptor |
+ 1 << Map::kIsAccessCheckNeeded);
+ Assert(Word32Equal(Word32And(bit_field, mask), Int32Constant(0)));
+
+ Node* bit_field3 = LoadMapBitField3(map);
+ Node* bit = BitFieldDecode<Map::DictionaryMap>(bit_field3);
+ Label if_isfastmap(this), if_isslowmap(this);
+ Branch(Word32Equal(bit, Int32Constant(0)), &if_isfastmap, &if_isslowmap);
+ Bind(&if_isfastmap);
+ {
+ Comment("DescriptorArrayLookup");
+ Node* nof = BitFieldDecode<Map::NumberOfOwnDescriptorsBits>(bit_field3);
+ // Bail out to the runtime for large numbers of own descriptors. The stub
+ // only does linear search, which becomes too expensive in that case.
+ {
+ static const int32_t kMaxLinear = 210;
+ GotoIf(Int32GreaterThan(nof, Int32Constant(kMaxLinear)), if_bailout);
+ }
+ Node* descriptors = LoadMapDescriptors(map);
+ var_meta_storage->Bind(descriptors);
+
+ Variable var_descriptor(this, MachineRepresentation::kWord32);
+ Label loop(this, &var_descriptor);
+ var_descriptor.Bind(Int32Constant(0));
+ Goto(&loop);
+ Bind(&loop);
+ {
+ Node* index = var_descriptor.value();
+ Node* name_offset = Int32Constant(DescriptorArray::ToKeyIndex(0));
+ Node* factor = Int32Constant(DescriptorArray::kDescriptorSize);
+ GotoIf(Word32Equal(index, nof), if_not_found);
+
+ Node* name_index = Int32Add(name_offset, Int32Mul(index, factor));
+ Node* name = LoadFixedArrayElement(descriptors, name_index);
+
+ var_name_index->Bind(name_index);
+ GotoIf(WordEqual(name, unique_name), if_found_fast);
+
+ var_descriptor.Bind(Int32Add(index, Int32Constant(1)));
+ Goto(&loop);
+ }
+ }
+ Bind(&if_isslowmap);
+ {
+ Node* dictionary = LoadProperties(object);
+ var_meta_storage->Bind(dictionary);
+
+ NameDictionaryLookup<NameDictionary>(dictionary, unique_name, if_found_dict,
+ var_name_index, if_not_found);
+ }
+ Bind(&if_objectisspecial);
+ {
+ // Handle global object here and other special objects in runtime.
+ GotoUnless(Word32Equal(instance_type, Int32Constant(JS_GLOBAL_OBJECT_TYPE)),
+ if_bailout);
+
+ // Handle interceptors and access checks in runtime.
+ Node* bit_field = LoadMapBitField(map);
+ Node* mask = Int32Constant(1 << Map::kHasNamedInterceptor |
+ 1 << Map::kIsAccessCheckNeeded);
+ GotoIf(Word32NotEqual(Word32And(bit_field, mask), Int32Constant(0)),
+ if_bailout);
+
+ Node* dictionary = LoadProperties(object);
+ var_meta_storage->Bind(dictionary);
+
+ NameDictionaryLookup<GlobalDictionary>(
+ dictionary, unique_name, if_found_global, var_name_index, if_not_found);
+ }
+}
+
+void CodeStubAssembler::TryHasOwnProperty(compiler::Node* object,
+ compiler::Node* map,
+ compiler::Node* instance_type,
+ compiler::Node* unique_name,
+ Label* if_found, Label* if_not_found,
+ Label* if_bailout) {
+ Comment("TryHasOwnProperty");
+ Variable var_meta_storage(this, MachineRepresentation::kTagged);
+ Variable var_name_index(this, MachineRepresentation::kWord32);
+
+ Label if_found_global(this);
+ TryLookupProperty(object, map, instance_type, unique_name, if_found, if_found,
+ &if_found_global, &var_meta_storage, &var_name_index,
+ if_not_found, if_bailout);
+ Bind(&if_found_global);
+ {
+ Variable var_value(this, MachineRepresentation::kTagged);
+ Variable var_details(this, MachineRepresentation::kWord32);
+ // Check if the property cell is not deleted.
+ LoadPropertyFromGlobalDictionary(var_meta_storage.value(),
+ var_name_index.value(), &var_value,
+ &var_details, if_not_found);
+ Goto(if_found);
+ }
+}
+
+void CodeStubAssembler::LoadPropertyFromFastObject(Node* object, Node* map,
+ Node* descriptors,
+ Node* name_index,
+ Variable* var_details,
+ Variable* var_value) {
+ DCHECK_EQ(MachineRepresentation::kWord32, var_details->rep());
+ DCHECK_EQ(MachineRepresentation::kTagged, var_value->rep());
+ Comment("[ LoadPropertyFromFastObject");
+
+ const int name_to_details_offset =
+ (DescriptorArray::kDescriptorDetails - DescriptorArray::kDescriptorKey) *
+ kPointerSize;
+ const int name_to_value_offset =
+ (DescriptorArray::kDescriptorValue - DescriptorArray::kDescriptorKey) *
+ kPointerSize;
+
+ Node* details = LoadAndUntagToWord32FixedArrayElement(descriptors, name_index,
+ name_to_details_offset);
+ var_details->Bind(details);
+
+ Node* location = BitFieldDecode<PropertyDetails::LocationField>(details);
+
+ Label if_in_field(this), if_in_descriptor(this), done(this);
+ Branch(Word32Equal(location, Int32Constant(kField)), &if_in_field,
+ &if_in_descriptor);
+ Bind(&if_in_field);
+ {
+ Node* field_index =
+ BitFieldDecode<PropertyDetails::FieldIndexField>(details);
+ Node* representation =
+ BitFieldDecode<PropertyDetails::RepresentationField>(details);
+
+ Node* inobject_properties = LoadMapInobjectProperties(map);
+
+ Label if_inobject(this), if_backing_store(this);
+ Variable var_double_value(this, MachineRepresentation::kFloat64);
+ Label rebox_double(this, &var_double_value);
+ BranchIfInt32LessThan(field_index, inobject_properties, &if_inobject,
+ &if_backing_store);
+ Bind(&if_inobject);
+ {
+ Comment("if_inobject");
+ Node* field_offset = ChangeInt32ToIntPtr(
+ Int32Mul(Int32Sub(LoadMapInstanceSize(map),
+ Int32Sub(inobject_properties, field_index)),
+ Int32Constant(kPointerSize)));
+
+ Label if_double(this), if_tagged(this);
+ BranchIfWord32NotEqual(representation,
+ Int32Constant(Representation::kDouble), &if_tagged,
+ &if_double);
+ Bind(&if_tagged);
+ {
+ var_value->Bind(LoadObjectField(object, field_offset));
+ Goto(&done);
+ }
+ Bind(&if_double);
+ {
+ if (FLAG_unbox_double_fields) {
+ var_double_value.Bind(
+ LoadObjectField(object, field_offset, MachineType::Float64()));
+ } else {
+ Node* mutable_heap_number = LoadObjectField(object, field_offset);
+ var_double_value.Bind(LoadHeapNumberValue(mutable_heap_number));
+ }
+ Goto(&rebox_double);
+ }
+ }
+ Bind(&if_backing_store);
+ {
+ Comment("if_backing_store");
+ Node* properties = LoadProperties(object);
+ field_index = Int32Sub(field_index, inobject_properties);
+ Node* value = LoadFixedArrayElement(properties, field_index);
+
+ Label if_double(this), if_tagged(this);
+ BranchIfWord32NotEqual(representation,
+ Int32Constant(Representation::kDouble), &if_tagged,
+ &if_double);
+ Bind(&if_tagged);
+ {
+ var_value->Bind(value);
+ Goto(&done);
+ }
+ Bind(&if_double);
+ {
+ var_double_value.Bind(LoadHeapNumberValue(value));
+ Goto(&rebox_double);
+ }
+ }
+ Bind(&rebox_double);
+ {
+ Comment("rebox_double");
+ Node* heap_number = AllocateHeapNumberWithValue(var_double_value.value());
+ var_value->Bind(heap_number);
+ Goto(&done);
+ }
+ }
+ Bind(&if_in_descriptor);
+ {
+ Node* value =
+ LoadFixedArrayElement(descriptors, name_index, name_to_value_offset);
+ var_value->Bind(value);
+ Goto(&done);
+ }
+ Bind(&done);
+
+ Comment("] LoadPropertyFromFastObject");
+}
+
+void CodeStubAssembler::LoadPropertyFromNameDictionary(Node* dictionary,
+ Node* name_index,
+ Variable* var_details,
+ Variable* var_value) {
+ Comment("LoadPropertyFromNameDictionary");
+
+ const int name_to_details_offset =
+ (NameDictionary::kEntryDetailsIndex - NameDictionary::kEntryKeyIndex) *
+ kPointerSize;
+ const int name_to_value_offset =
+ (NameDictionary::kEntryValueIndex - NameDictionary::kEntryKeyIndex) *
+ kPointerSize;
+
+ Node* details = LoadAndUntagToWord32FixedArrayElement(dictionary, name_index,
+ name_to_details_offset);
+
+ var_details->Bind(details);
+ var_value->Bind(
+ LoadFixedArrayElement(dictionary, name_index, name_to_value_offset));
+
+ Comment("] LoadPropertyFromNameDictionary");
+}
+
+void CodeStubAssembler::LoadPropertyFromGlobalDictionary(Node* dictionary,
+ Node* name_index,
+ Variable* var_details,
+ Variable* var_value,
+ Label* if_deleted) {
+ Comment("[ LoadPropertyFromGlobalDictionary");
+
+ const int name_to_value_offset =
+ (GlobalDictionary::kEntryValueIndex - GlobalDictionary::kEntryKeyIndex) *
+ kPointerSize;
+
+ Node* property_cell =
+ LoadFixedArrayElement(dictionary, name_index, name_to_value_offset);
+
+ Node* value = LoadObjectField(property_cell, PropertyCell::kValueOffset);
+ GotoIf(WordEqual(value, TheHoleConstant()), if_deleted);
+
+ var_value->Bind(value);
+
+ Node* details = LoadAndUntagToWord32ObjectField(property_cell,
+ PropertyCell::kDetailsOffset);
+ var_details->Bind(details);
+
+ Comment("] LoadPropertyFromGlobalDictionary");
+}
+
+void CodeStubAssembler::TryGetOwnProperty(
+ Node* context, Node* receiver, Node* object, Node* map, Node* instance_type,
+ Node* unique_name, Label* if_found_value, Variable* var_value,
+ Label* if_not_found, Label* if_bailout) {
+ DCHECK_EQ(MachineRepresentation::kTagged, var_value->rep());
+ Comment("TryGetOwnProperty");
+
+ Variable var_meta_storage(this, MachineRepresentation::kTagged);
+ Variable var_entry(this, MachineRepresentation::kWord32);
+
+ Label if_found_fast(this), if_found_dict(this), if_found_global(this);
+
+ Variable var_details(this, MachineRepresentation::kWord32);
+ Variable* vars[] = {var_value, &var_details};
+ Label if_found(this, 2, vars);
+
+ TryLookupProperty(object, map, instance_type, unique_name, &if_found_fast,
+ &if_found_dict, &if_found_global, &var_meta_storage,
+ &var_entry, if_not_found, if_bailout);
+ Bind(&if_found_fast);
+ {
+ Node* descriptors = var_meta_storage.value();
+ Node* name_index = var_entry.value();
+
+ LoadPropertyFromFastObject(object, map, descriptors, name_index,
+ &var_details, var_value);
+ Goto(&if_found);
+ }
+ Bind(&if_found_dict);
+ {
+ Node* dictionary = var_meta_storage.value();
+ Node* entry = var_entry.value();
+ LoadPropertyFromNameDictionary(dictionary, entry, &var_details, var_value);
+ Goto(&if_found);
+ }
+ Bind(&if_found_global);
+ {
+ Node* dictionary = var_meta_storage.value();
+ Node* entry = var_entry.value();
+
+ LoadPropertyFromGlobalDictionary(dictionary, entry, &var_details, var_value,
+ if_not_found);
+ Goto(&if_found);
+ }
+ // Here we have details and value which could be an accessor.
+ Bind(&if_found);
+ {
+ Node* details = var_details.value();
+ Node* kind = BitFieldDecode<PropertyDetails::KindField>(details);
+
+ Label if_accessor(this);
+ Branch(Word32Equal(kind, Int32Constant(kData)), if_found_value,
+ &if_accessor);
+ Bind(&if_accessor);
+ {
+ Node* accessor_pair = var_value->value();
+ GotoIf(Word32Equal(LoadInstanceType(accessor_pair),
+ Int32Constant(ACCESSOR_INFO_TYPE)),
+ if_bailout);
+ AssertInstanceType(accessor_pair, ACCESSOR_PAIR_TYPE);
+ Node* getter =
+ LoadObjectField(accessor_pair, AccessorPair::kGetterOffset);
+ Node* getter_map = LoadMap(getter);
+ Node* instance_type = LoadMapInstanceType(getter_map);
+ // FunctionTemplateInfo getters are not supported yet.
+ GotoIf(Word32Equal(instance_type,
+ Int32Constant(FUNCTION_TEMPLATE_INFO_TYPE)),
+ if_bailout);
+
+ // Return undefined if the {getter} is not callable.
+ var_value->Bind(UndefinedConstant());
+ GotoIf(Word32Equal(Word32And(LoadMapBitField(getter_map),
+ Int32Constant(1 << Map::kIsCallable)),
+ Int32Constant(0)),
+ if_found_value);
+
+ // Call the accessor.
+ Callable callable = CodeFactory::Call(isolate());
+ Node* result = CallJS(callable, context, getter, receiver);
+ var_value->Bind(result);
+ Goto(if_found_value);
+ }
+ }
+}
+
+void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
+ Node* instance_type, Node* index,
+ Label* if_found, Label* if_not_found,
+ Label* if_bailout) {
+ // Handle special objects in runtime.
+ GotoIf(Int32LessThanOrEqual(instance_type,
+ Int32Constant(LAST_SPECIAL_RECEIVER_TYPE)),
+ if_bailout);
+
+ Node* bit_field2 = LoadMapBitField2(map);
+ Node* elements_kind = BitFieldDecode<Map::ElementsKindBits>(bit_field2);
+
+ // TODO(verwaest): Support other elements kinds as well.
+ Label if_isobjectorsmi(this), if_isdouble(this), if_isdictionary(this),
+ if_isfaststringwrapper(this), if_isslowstringwrapper(this);
+ // clang-format off
+ int32_t values[] = {
+ // Handled by {if_isobjectorsmi}.
+ FAST_SMI_ELEMENTS, FAST_HOLEY_SMI_ELEMENTS, FAST_ELEMENTS,
+ FAST_HOLEY_ELEMENTS,
+ // Handled by {if_isdouble}.
+ FAST_DOUBLE_ELEMENTS, FAST_HOLEY_DOUBLE_ELEMENTS,
+ // Handled by {if_isdictionary}.
+ DICTIONARY_ELEMENTS,
+ // Handled by {if_isfaststringwrapper}.
+ FAST_STRING_WRAPPER_ELEMENTS,
+ // Handled by {if_isslowstringwrapper}.
+ SLOW_STRING_WRAPPER_ELEMENTS,
+ // Handled by {if_not_found}.
+ NO_ELEMENTS,
+ };
+ Label* labels[] = {
+ &if_isobjectorsmi, &if_isobjectorsmi, &if_isobjectorsmi,
+ &if_isobjectorsmi,
+ &if_isdouble, &if_isdouble,
+ &if_isdictionary,
+ &if_isfaststringwrapper,
+ &if_isslowstringwrapper,
+ if_not_found,
+ };
+ // clang-format on
+ STATIC_ASSERT(arraysize(values) == arraysize(labels));
+ Switch(elements_kind, if_bailout, values, labels, arraysize(values));
+
+ Bind(&if_isobjectorsmi);
+ {
+ Node* elements = LoadElements(object);
+ Node* length = LoadAndUntagFixedArrayBaseLength(elements);
+
+ GotoUnless(Uint32LessThan(index, length), if_not_found);
+
+ Node* element = LoadFixedArrayElement(elements, index);
+ Node* the_hole = TheHoleConstant();
+ Branch(WordEqual(element, the_hole), if_not_found, if_found);
+ }
+ Bind(&if_isdouble);
+ {
+ Node* elements = LoadElements(object);
+ Node* length = LoadAndUntagFixedArrayBaseLength(elements);
+
+ GotoUnless(Uint32LessThan(index, length), if_not_found);
+
+ if (kPointerSize == kDoubleSize) {
+ Node* element =
+ LoadFixedDoubleArrayElement(elements, index, MachineType::Uint64());
+ Node* the_hole = Int64Constant(kHoleNanInt64);
+ Branch(Word64Equal(element, the_hole), if_not_found, if_found);
+ } else {
+ Node* element_upper =
+ LoadFixedDoubleArrayElement(elements, index, MachineType::Uint32(),
+ kIeeeDoubleExponentWordOffset);
+ Branch(Word32Equal(element_upper, Int32Constant(kHoleNanUpper32)),
+ if_not_found, if_found);
+ }
+ }
+ Bind(&if_isdictionary);
+ {
+ Variable var_entry(this, MachineRepresentation::kWord32);
+ Node* elements = LoadElements(object);
+ NumberDictionaryLookup<SeededNumberDictionary>(elements, index, if_found,
+ &var_entry, if_not_found);
+ }
+ Bind(&if_isfaststringwrapper);
+ {
+ AssertInstanceType(object, JS_VALUE_TYPE);
+ Node* string = LoadJSValueValue(object);
+ Assert(Int32LessThan(LoadInstanceType(string),
+ Int32Constant(FIRST_NONSTRING_TYPE)));
+ Node* length = LoadStringLength(string);
+ GotoIf(Uint32LessThan(index, SmiToWord32(length)), if_found);
+ Goto(&if_isobjectorsmi);
+ }
+ Bind(&if_isslowstringwrapper);
+ {
+ AssertInstanceType(object, JS_VALUE_TYPE);
+ Node* string = LoadJSValueValue(object);
+ Assert(Int32LessThan(LoadInstanceType(string),
+ Int32Constant(FIRST_NONSTRING_TYPE)));
+ Node* length = LoadStringLength(string);
+ GotoIf(Uint32LessThan(index, SmiToWord32(length)), if_found);
+ Goto(&if_isdictionary);
+ }
+}
+
+// Instantiate template methods to workaround GCC compilation issue.
+template void CodeStubAssembler::NumberDictionaryLookup<SeededNumberDictionary>(
+ Node*, Node*, Label*, Variable*, Label*);
+template void CodeStubAssembler::NumberDictionaryLookup<
+ UnseededNumberDictionary>(Node*, Node*, Label*, Variable*, Label*);
+
+void CodeStubAssembler::TryPrototypeChainLookup(
+ Node* receiver, Node* key, LookupInHolder& lookup_property_in_holder,
+ LookupInHolder& lookup_element_in_holder, Label* if_end,
+ Label* if_bailout) {
+ // Ensure receiver is JSReceiver, otherwise bailout.
+ Label if_objectisnotsmi(this);
+ Branch(WordIsSmi(receiver), if_bailout, &if_objectisnotsmi);
+ Bind(&if_objectisnotsmi);
+
+ Node* map = LoadMap(receiver);
+ Node* instance_type = LoadMapInstanceType(map);
+ {
+ Label if_objectisreceiver(this);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ STATIC_ASSERT(FIRST_JS_RECEIVER_TYPE == JS_PROXY_TYPE);
+ Branch(
+ Int32GreaterThan(instance_type, Int32Constant(FIRST_JS_RECEIVER_TYPE)),
+ &if_objectisreceiver, if_bailout);
+ Bind(&if_objectisreceiver);
+ }
+
+ Variable var_index(this, MachineRepresentation::kWord32);
+
+ Label if_keyisindex(this), if_iskeyunique(this);
+ TryToName(key, &if_keyisindex, &var_index, &if_iskeyunique, if_bailout);
+
+ Bind(&if_iskeyunique);
+ {
+ Variable var_holder(this, MachineRepresentation::kTagged);
+ Variable var_holder_map(this, MachineRepresentation::kTagged);
+ Variable var_holder_instance_type(this, MachineRepresentation::kWord8);
+
+ Variable* merged_variables[] = {&var_holder, &var_holder_map,
+ &var_holder_instance_type};
+ Label loop(this, arraysize(merged_variables), merged_variables);
+ var_holder.Bind(receiver);
+ var_holder_map.Bind(map);
+ var_holder_instance_type.Bind(instance_type);
+ Goto(&loop);
+ Bind(&loop);
+ {
+ Node* holder_map = var_holder_map.value();
+ Node* holder_instance_type = var_holder_instance_type.value();
+
+ Label next_proto(this);
+ lookup_property_in_holder(receiver, var_holder.value(), holder_map,
+ holder_instance_type, key, &next_proto,
+ if_bailout);
+ Bind(&next_proto);
+
+ // Bailout if it can be an integer indexed exotic case.
+ GotoIf(
+ Word32Equal(holder_instance_type, Int32Constant(JS_TYPED_ARRAY_TYPE)),
+ if_bailout);
+
+ Node* proto = LoadMapPrototype(holder_map);
+
+ Label if_not_null(this);
+ Branch(WordEqual(proto, NullConstant()), if_end, &if_not_null);
+ Bind(&if_not_null);
+
+ Node* map = LoadMap(proto);
+ Node* instance_type = LoadMapInstanceType(map);
+
+ var_holder.Bind(proto);
+ var_holder_map.Bind(map);
+ var_holder_instance_type.Bind(instance_type);
+ Goto(&loop);
+ }
+ }
+ Bind(&if_keyisindex);
+ {
+ Variable var_holder(this, MachineRepresentation::kTagged);
+ Variable var_holder_map(this, MachineRepresentation::kTagged);
+ Variable var_holder_instance_type(this, MachineRepresentation::kWord8);
+
+ Variable* merged_variables[] = {&var_holder, &var_holder_map,
+ &var_holder_instance_type};
+ Label loop(this, arraysize(merged_variables), merged_variables);
+ var_holder.Bind(receiver);
+ var_holder_map.Bind(map);
+ var_holder_instance_type.Bind(instance_type);
+ Goto(&loop);
+ Bind(&loop);
+ {
+ Label next_proto(this);
+ lookup_element_in_holder(receiver, var_holder.value(),
+ var_holder_map.value(),
+ var_holder_instance_type.value(),
+ var_index.value(), &next_proto, if_bailout);
+ Bind(&next_proto);
+
+ Node* proto = LoadMapPrototype(var_holder_map.value());
+
+ Label if_not_null(this);
+ Branch(WordEqual(proto, NullConstant()), if_end, &if_not_null);
+ Bind(&if_not_null);
+
+ Node* map = LoadMap(proto);
+ Node* instance_type = LoadMapInstanceType(map);
+
+ var_holder.Bind(proto);
+ var_holder_map.Bind(map);
+ var_holder_instance_type.Bind(instance_type);
+ Goto(&loop);
+ }
+ }
+}
+
+Node* CodeStubAssembler::OrdinaryHasInstance(Node* context, Node* callable,
+ Node* object) {
+ Variable var_result(this, MachineRepresentation::kTagged);
+ Label return_false(this), return_true(this),
+ return_runtime(this, Label::kDeferred), return_result(this);
+
+ // Goto runtime if {object} is a Smi.
+ GotoIf(WordIsSmi(object), &return_runtime);
+
+ // Load map of {object}.
+ Node* object_map = LoadMap(object);
+
+ // Lookup the {callable} and {object} map in the global instanceof cache.
+ // Note: This is safe because we clear the global instanceof cache whenever
+ // we change the prototype of any object.
+ Node* instanceof_cache_function =
+ LoadRoot(Heap::kInstanceofCacheFunctionRootIndex);
+ Node* instanceof_cache_map = LoadRoot(Heap::kInstanceofCacheMapRootIndex);
+ {
+ Label instanceof_cache_miss(this);
+ GotoUnless(WordEqual(instanceof_cache_function, callable),
+ &instanceof_cache_miss);
+ GotoUnless(WordEqual(instanceof_cache_map, object_map),
+ &instanceof_cache_miss);
+ var_result.Bind(LoadRoot(Heap::kInstanceofCacheAnswerRootIndex));
+ Goto(&return_result);
+ Bind(&instanceof_cache_miss);
+ }
+
+ // Goto runtime if {callable} is a Smi.
+ GotoIf(WordIsSmi(callable), &return_runtime);
+
+ // Load map of {callable}.
+ Node* callable_map = LoadMap(callable);
+
+ // Goto runtime if {callable} is not a JSFunction.
+ Node* callable_instance_type = LoadMapInstanceType(callable_map);
+ GotoUnless(
+ Word32Equal(callable_instance_type, Int32Constant(JS_FUNCTION_TYPE)),
+ &return_runtime);
+
+ // Goto runtime if {callable} is not a constructor or has
+ // a non-instance "prototype".
+ Node* callable_bitfield = LoadMapBitField(callable_map);
+ GotoUnless(
+ Word32Equal(Word32And(callable_bitfield,
+ Int32Constant((1 << Map::kHasNonInstancePrototype) |
+ (1 << Map::kIsConstructor))),
+ Int32Constant(1 << Map::kIsConstructor)),
+ &return_runtime);
+
+ // Get the "prototype" (or initial map) of the {callable}.
+ Node* callable_prototype =
+ LoadObjectField(callable, JSFunction::kPrototypeOrInitialMapOffset);
+ {
+ Variable var_callable_prototype(this, MachineRepresentation::kTagged);
+ Label callable_prototype_valid(this);
+ var_callable_prototype.Bind(callable_prototype);
+
+ // Resolve the "prototype" if the {callable} has an initial map. Afterwards
+ // the {callable_prototype} will be either the JSReceiver prototype object
+ // or the hole value, which means that no instances of the {callable} were
+ // created so far and hence we should return false.
+ Node* callable_prototype_instance_type =
+ LoadInstanceType(callable_prototype);
+ GotoUnless(
+ Word32Equal(callable_prototype_instance_type, Int32Constant(MAP_TYPE)),
+ &callable_prototype_valid);
+ var_callable_prototype.Bind(
+ LoadObjectField(callable_prototype, Map::kPrototypeOffset));
+ Goto(&callable_prototype_valid);
+ Bind(&callable_prototype_valid);
+ callable_prototype = var_callable_prototype.value();
+ }
+
+ // Update the global instanceof cache with the current {object} map and
+ // {callable}. The cached answer will be set when it is known below.
+ StoreRoot(Heap::kInstanceofCacheFunctionRootIndex, callable);
+ StoreRoot(Heap::kInstanceofCacheMapRootIndex, object_map);
+
+ // Loop through the prototype chain looking for the {callable} prototype.
+ Variable var_object_map(this, MachineRepresentation::kTagged);
+ var_object_map.Bind(object_map);
+ Label loop(this, &var_object_map);
+ Goto(&loop);
+ Bind(&loop);
+ {
+ Node* object_map = var_object_map.value();
+
+ // Check if the current {object} needs to be access checked.
+ Node* object_bitfield = LoadMapBitField(object_map);
+ GotoUnless(
+ Word32Equal(Word32And(object_bitfield,
+ Int32Constant(1 << Map::kIsAccessCheckNeeded)),
+ Int32Constant(0)),
+ &return_runtime);
+
+ // Check if the current {object} is a proxy.
+ Node* object_instance_type = LoadMapInstanceType(object_map);
+ GotoIf(Word32Equal(object_instance_type, Int32Constant(JS_PROXY_TYPE)),
+ &return_runtime);
+
+ // Check the current {object} prototype.
+ Node* object_prototype = LoadMapPrototype(object_map);
+ GotoIf(WordEqual(object_prototype, NullConstant()), &return_false);
+ GotoIf(WordEqual(object_prototype, callable_prototype), &return_true);
+
+ // Continue with the prototype.
+ var_object_map.Bind(LoadMap(object_prototype));
+ Goto(&loop);
+ }
+
+ Bind(&return_true);
+ StoreRoot(Heap::kInstanceofCacheAnswerRootIndex, BooleanConstant(true));
+ var_result.Bind(BooleanConstant(true));
+ Goto(&return_result);
+
+ Bind(&return_false);
+ StoreRoot(Heap::kInstanceofCacheAnswerRootIndex, BooleanConstant(false));
+ var_result.Bind(BooleanConstant(false));
+ Goto(&return_result);
+
+ Bind(&return_runtime);
+ {
+ // Invalidate the global instanceof cache.
+ StoreRoot(Heap::kInstanceofCacheFunctionRootIndex, SmiConstant(0));
+ // Fallback to the runtime implementation.
+ var_result.Bind(
+ CallRuntime(Runtime::kOrdinaryHasInstance, context, callable, object));
+ }
+ Goto(&return_result);
+
+ Bind(&return_result);
+ return var_result.value();
+}
+
+compiler::Node* CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
+ ElementsKind kind,
+ ParameterMode mode,
+ int base_size) {
+ bool is_double = IsFastDoubleElementsKind(kind);
+ int element_size_shift = is_double ? kDoubleSizeLog2 : kPointerSizeLog2;
+ int element_size = 1 << element_size_shift;
+ int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
+ int32_t index = 0;
+ bool constant_index = false;
+ if (mode == SMI_PARAMETERS) {
+ element_size_shift -= kSmiShiftBits;
+ intptr_t temp = 0;
+ constant_index = ToIntPtrConstant(index_node, temp);
+ index = temp >> kSmiShiftBits;
+ } else {
+ constant_index = ToInt32Constant(index_node, index);
+ }
+ if (constant_index) {
+ return IntPtrConstant(base_size + element_size * index);
+ }
+ if (Is64() && mode == INTEGER_PARAMETERS) {
+ index_node = ChangeInt32ToInt64(index_node);
+ }
+ if (base_size == 0) {
+ return (element_size_shift >= 0)
+ ? WordShl(index_node, IntPtrConstant(element_size_shift))
+ : WordShr(index_node, IntPtrConstant(-element_size_shift));
+ }
+ return IntPtrAdd(
+ IntPtrConstant(base_size),
+ (element_size_shift >= 0)
+ ? WordShl(index_node, IntPtrConstant(element_size_shift))
+ : WordShr(index_node, IntPtrConstant(-element_size_shift)));
+}
+
+compiler::Node* CodeStubAssembler::LoadTypeFeedbackVectorForStub() {
+ Node* function =
+ LoadFromParentFrame(JavaScriptFrameConstants::kFunctionOffset);
+ Node* literals = LoadObjectField(function, JSFunction::kLiteralsOffset);
+ return LoadObjectField(literals, LiteralsArray::kFeedbackVectorOffset);
+}
+
+void CodeStubAssembler::UpdateFeedback(compiler::Node* feedback,
+ compiler::Node* type_feedback_vector,
+ compiler::Node* slot_id) {
+ Label combine_feedback(this), record_feedback(this), end(this);
+
+ Node* previous_feedback =
+ LoadFixedArrayElement(type_feedback_vector, slot_id);
+ Node* is_uninitialized = WordEqual(
+ previous_feedback,
+ HeapConstant(TypeFeedbackVector::UninitializedSentinel(isolate())));
+ BranchIf(is_uninitialized, &record_feedback, &combine_feedback);
+
+ Bind(&record_feedback);
+ {
+ StoreFixedArrayElement(type_feedback_vector, slot_id, SmiTag(feedback),
+ SKIP_WRITE_BARRIER);
+ Goto(&end);
+ }
+
+ Bind(&combine_feedback);
+ {
+ Node* untagged_previous_feedback = SmiUntag(previous_feedback);
+ Node* combined_feedback = Word32Or(untagged_previous_feedback, feedback);
+ StoreFixedArrayElement(type_feedback_vector, slot_id,
+ SmiTag(combined_feedback), SKIP_WRITE_BARRIER);
+ Goto(&end);
+ }
+
+ Bind(&end);
+}
+
+compiler::Node* CodeStubAssembler::LoadReceiverMap(compiler::Node* receiver) {
+ Variable var_receiver_map(this, MachineRepresentation::kTagged);
+ // TODO(ishell): defer blocks when it works.
+ Label load_smi_map(this /*, Label::kDeferred*/), load_receiver_map(this),
+ if_result(this);
+
+ Branch(WordIsSmi(receiver), &load_smi_map, &load_receiver_map);
+ Bind(&load_smi_map);
+ {
+ var_receiver_map.Bind(LoadRoot(Heap::kHeapNumberMapRootIndex));
+ Goto(&if_result);
+ }
+ Bind(&load_receiver_map);
+ {
+ var_receiver_map.Bind(LoadMap(receiver));
+ Goto(&if_result);
+ }
+ Bind(&if_result);
+ return var_receiver_map.value();
+}
+
+compiler::Node* CodeStubAssembler::TryMonomorphicCase(
+ const LoadICParameters* p, compiler::Node* receiver_map, Label* if_handler,
+ Variable* var_handler, Label* if_miss) {
+ DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
+
+ // TODO(ishell): add helper class that hides offset computations for a series
+ // of loads.
+ int32_t header_size = FixedArray::kHeaderSize - kHeapObjectTag;
+ Node* offset = ElementOffsetFromIndex(p->slot, FAST_HOLEY_ELEMENTS,
+ SMI_PARAMETERS, header_size);
+ Node* feedback = Load(MachineType::AnyTagged(), p->vector, offset);
+
+ // Try to quickly handle the monomorphic case without knowing for sure
+ // if we have a weak cell in feedback. We do know it's safe to look
+ // at WeakCell::kValueOffset.
+ GotoUnless(WordEqual(receiver_map, LoadWeakCellValue(feedback)), if_miss);
+
+ Node* handler = Load(MachineType::AnyTagged(), p->vector,
+ IntPtrAdd(offset, IntPtrConstant(kPointerSize)));
+
+ var_handler->Bind(handler);
+ Goto(if_handler);
+ return feedback;
+}
+
+void CodeStubAssembler::HandlePolymorphicCase(
+ const LoadICParameters* p, compiler::Node* receiver_map,
+ compiler::Node* feedback, Label* if_handler, Variable* var_handler,
+ Label* if_miss, int unroll_count) {
+ DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
+
+ // Iterate {feedback} array.
+ const int kEntrySize = 2;
+
+ for (int i = 0; i < unroll_count; i++) {
+ Label next_entry(this);
+ Node* cached_map = LoadWeakCellValue(
+ LoadFixedArrayElement(feedback, Int32Constant(i * kEntrySize)));
+ GotoIf(WordNotEqual(receiver_map, cached_map), &next_entry);
+
+ // Found, now call handler.
+ Node* handler =
+ LoadFixedArrayElement(feedback, Int32Constant(i * kEntrySize + 1));
+ var_handler->Bind(handler);
+ Goto(if_handler);
+
+ Bind(&next_entry);
+ }
+ Node* length = LoadAndUntagFixedArrayBaseLength(feedback);
+
+ // Loop from {unroll_count}*kEntrySize to {length}.
+ Variable var_index(this, MachineRepresentation::kWord32);
+ Label loop(this, &var_index);
+ var_index.Bind(Int32Constant(unroll_count * kEntrySize));
+ Goto(&loop);
+ Bind(&loop);
+ {
+ Node* index = var_index.value();
+ GotoIf(Int32GreaterThanOrEqual(index, length), if_miss);
+
+ Node* cached_map =
+ LoadWeakCellValue(LoadFixedArrayElement(feedback, index));
+
+ Label next_entry(this);
+ GotoIf(WordNotEqual(receiver_map, cached_map), &next_entry);
+
+ // Found, now call handler.
+ Node* handler = LoadFixedArrayElement(feedback, index, kPointerSize);
+ var_handler->Bind(handler);
+ Goto(if_handler);
+
+ Bind(&next_entry);
+ var_index.Bind(Int32Add(index, Int32Constant(kEntrySize)));
+ Goto(&loop);
+ }
+}
+
+compiler::Node* CodeStubAssembler::StubCachePrimaryOffset(compiler::Node* name,
+ compiler::Node* map) {
+ // See v8::internal::StubCache::PrimaryOffset().
+ STATIC_ASSERT(StubCache::kCacheIndexShift == Name::kHashShift);
+ // Compute the hash of the name (use entire hash field).
+ Node* hash_field = LoadNameHashField(name);
+ Assert(WordEqual(
+ Word32And(hash_field, Int32Constant(Name::kHashNotComputedMask)),
+ Int32Constant(0)));
+
+ // Using only the low bits in 64-bit mode is unlikely to increase the
+ // risk of collision even if the heap is spread over an area larger than
+ // 4Gb (and not at all if it isn't).
+ Node* hash = Int32Add(hash_field, map);
+ // Base the offset on a simple combination of name and map.
+ hash = Word32Xor(hash, Int32Constant(StubCache::kPrimaryMagic));
+ uint32_t mask = (StubCache::kPrimaryTableSize - 1)
+ << StubCache::kCacheIndexShift;
+ return Word32And(hash, Int32Constant(mask));
+}
+
+compiler::Node* CodeStubAssembler::StubCacheSecondaryOffset(
+ compiler::Node* name, compiler::Node* seed) {
+ // See v8::internal::StubCache::SecondaryOffset().
+
+ // Use the seed from the primary cache in the secondary cache.
+ Node* hash = Int32Sub(seed, name);
+ hash = Int32Add(hash, Int32Constant(StubCache::kSecondaryMagic));
+ int32_t mask = (StubCache::kSecondaryTableSize - 1)
+ << StubCache::kCacheIndexShift;
+ return Word32And(hash, Int32Constant(mask));
+}
+
+enum CodeStubAssembler::StubCacheTable : int {
+ kPrimary = static_cast<int>(StubCache::kPrimary),
+ kSecondary = static_cast<int>(StubCache::kSecondary)
+};
+
+void CodeStubAssembler::TryProbeStubCacheTable(
+ StubCache* stub_cache, StubCacheTable table_id,
+ compiler::Node* entry_offset, compiler::Node* name, compiler::Node* map,
+ Label* if_handler, Variable* var_handler, Label* if_miss) {
+ StubCache::Table table = static_cast<StubCache::Table>(table_id);
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ Goto(if_miss);
+ return;
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ Goto(if_miss);
+ return;
+ }
+#endif
+ // The {table_offset} holds the entry offset times four (due to masking
+ // and shifting optimizations).
+ const int kMultiplier = sizeof(StubCache::Entry) >> Name::kHashShift;
+ entry_offset = Int32Mul(entry_offset, Int32Constant(kMultiplier));
+
+ // Check that the key in the entry matches the name.
+ Node* key_base =
+ ExternalConstant(ExternalReference(stub_cache->key_reference(table)));
+ Node* entry_key = Load(MachineType::Pointer(), key_base, entry_offset);
+ GotoIf(WordNotEqual(name, entry_key), if_miss);
+
+ // Get the map entry from the cache.
+ DCHECK_EQ(kPointerSize * 2, stub_cache->map_reference(table).address() -
+ stub_cache->key_reference(table).address());
+ Node* entry_map =
+ Load(MachineType::Pointer(), key_base,
+ Int32Add(entry_offset, Int32Constant(kPointerSize * 2)));
+ GotoIf(WordNotEqual(map, entry_map), if_miss);
+
+ DCHECK_EQ(kPointerSize, stub_cache->value_reference(table).address() -
+ stub_cache->key_reference(table).address());
+ Node* code = Load(MachineType::Pointer(), key_base,
+ Int32Add(entry_offset, Int32Constant(kPointerSize)));
+
+ // We found the handler.
+ var_handler->Bind(code);
+ Goto(if_handler);
+}
+
+void CodeStubAssembler::TryProbeStubCache(
+ StubCache* stub_cache, compiler::Node* receiver, compiler::Node* name,
+ Label* if_handler, Variable* var_handler, Label* if_miss) {
+ Label try_secondary(this), miss(this);
+
+ Counters* counters = isolate()->counters();
+ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
+
+ // Check that the {receiver} isn't a smi.
+ GotoIf(WordIsSmi(receiver), &miss);
+
+ Node* receiver_map = LoadMap(receiver);
+
+ // Probe the primary table.
+ Node* primary_offset = StubCachePrimaryOffset(name, receiver_map);
+ TryProbeStubCacheTable(stub_cache, kPrimary, primary_offset, name,
+ receiver_map, if_handler, var_handler, &try_secondary);
+
+ Bind(&try_secondary);
+ {
+ // Probe the secondary table.
+ Node* secondary_offset = StubCacheSecondaryOffset(name, primary_offset);
+ TryProbeStubCacheTable(stub_cache, kSecondary, secondary_offset, name,
+ receiver_map, if_handler, var_handler, &miss);
+ }
+
+ Bind(&miss);
+ {
+ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
+ Goto(if_miss);
+ }
+}
+
+Node* CodeStubAssembler::TryToIntptr(Node* key, Label* miss) {
+ Variable var_intptr_key(this, MachineType::PointerRepresentation());
+ Label done(this, &var_intptr_key), key_is_smi(this);
+ GotoIf(WordIsSmi(key), &key_is_smi);
+ // Try to convert a heap number to a Smi.
+ GotoUnless(WordEqual(LoadMap(key), HeapNumberMapConstant()), miss);
+ {
+ Node* value = LoadHeapNumberValue(key);
+ Node* int_value = RoundFloat64ToInt32(value);
+ GotoUnless(Float64Equal(value, ChangeInt32ToFloat64(int_value)), miss);
+ var_intptr_key.Bind(ChangeInt32ToIntPtr(int_value));
+ Goto(&done);
+ }
+
+ Bind(&key_is_smi);
+ {
+ var_intptr_key.Bind(SmiUntag(key));
+ Goto(&done);
+ }
+
+ Bind(&done);
+ return var_intptr_key.value();
+}
+
+// |is_jsarray| should be non-zero for JSArrays.
+void CodeStubAssembler::EmitBoundsCheck(Node* object, Node* elements,
+ Node* intptr_key, Node* is_jsarray,
+ Label* miss) {
+ Variable var_length(this, MachineRepresentation::kTagged);
+ Label if_array(this), length_loaded(this, &var_length);
+ GotoUnless(WordEqual(is_jsarray, IntPtrConstant(0)), &if_array);
+ {
+ var_length.Bind(SmiUntag(LoadFixedArrayBaseLength(elements)));
+ Goto(&length_loaded);
+ }
+ Bind(&if_array);
+ {
+ var_length.Bind(SmiUntag(LoadObjectField(object, JSArray::kLengthOffset)));
+ Goto(&length_loaded);
+ }
+ Bind(&length_loaded);
+ GotoUnless(UintPtrLessThan(intptr_key, var_length.value()), miss);
+}
+
+// |key| should be untagged (int32).
+void CodeStubAssembler::EmitElementLoad(Node* object, Node* elements,
+ Node* elements_kind, Node* key,
+ Label* if_hole, Label* rebox_double,
+ Variable* var_double_value,
+ Label* miss) {
+ Label if_typed_array(this), if_fast_packed(this), if_fast_holey(this),
+ if_fast_double(this), if_fast_holey_double(this),
+ unimplemented_elements_kind(this);
+ STATIC_ASSERT(LAST_ELEMENTS_KIND == LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
+ GotoIf(
+ IntPtrGreaterThanOrEqual(
+ elements_kind, IntPtrConstant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND)),
+ &if_typed_array);
+
+ int32_t kinds[] = {// Handled by if_fast_packed.
+ FAST_SMI_ELEMENTS, FAST_ELEMENTS,
+ // Handled by if_fast_holey.
+ FAST_HOLEY_SMI_ELEMENTS, FAST_HOLEY_ELEMENTS,
+ // Handled by if_fast_double.
+ FAST_DOUBLE_ELEMENTS,
+ // Handled by if_fast_holey_double.
+ FAST_HOLEY_DOUBLE_ELEMENTS};
+ Label* labels[] = {// FAST_{SMI,}_ELEMENTS
+ &if_fast_packed, &if_fast_packed,
+ // FAST_HOLEY_{SMI,}_ELEMENTS
+ &if_fast_holey, &if_fast_holey,
+ // FAST_DOUBLE_ELEMENTS
+ &if_fast_double,
+ // FAST_HOLEY_DOUBLE_ELEMENTS
+ &if_fast_holey_double};
+ Switch(elements_kind, &unimplemented_elements_kind, kinds, labels,
+ arraysize(kinds));
+ Bind(&unimplemented_elements_kind);
+ {
+ // Crash if we get here.
+ DebugBreak();
+ Goto(miss);
+ }
+
+ Bind(&if_fast_packed);
+ {
+ Comment("fast packed elements");
+ // TODO(jkummerow): The Load*Element helpers add movsxlq instructions
+ // on x64 which we don't need here, because |key| is an IntPtr already.
+ // Do something about that.
+ Return(LoadFixedArrayElement(elements, key));
+ }
+
+ Bind(&if_fast_holey);
+ {
+ Comment("fast holey elements");
+ Node* element = LoadFixedArrayElement(elements, key);
+ GotoIf(WordEqual(element, TheHoleConstant()), if_hole);
+ Return(element);
+ }
+
+ Bind(&if_fast_double);
+ {
+ Comment("packed double elements");
+ var_double_value->Bind(
+ LoadFixedDoubleArrayElement(elements, key, MachineType::Float64()));
+ Goto(rebox_double);
+ }
+
+ Bind(&if_fast_holey_double);
+ {
+ Comment("holey double elements");
+ if (kPointerSize == kDoubleSize) {
+ Node* raw_element =
+ LoadFixedDoubleArrayElement(elements, key, MachineType::Uint64());
+ Node* the_hole = Int64Constant(kHoleNanInt64);
+ GotoIf(Word64Equal(raw_element, the_hole), if_hole);
+ } else {
+ Node* element_upper = LoadFixedDoubleArrayElement(
+ elements, key, MachineType::Uint32(), kIeeeDoubleExponentWordOffset);
+ GotoIf(Word32Equal(element_upper, Int32Constant(kHoleNanUpper32)),
+ if_hole);
+ }
+ var_double_value->Bind(
+ LoadFixedDoubleArrayElement(elements, key, MachineType::Float64()));
+ Goto(rebox_double);
+ }
+
+ Bind(&if_typed_array);
+ {
+ Comment("typed elements");
+ // Check if buffer has been neutered.
+ Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
+ Node* bitfield = LoadObjectField(buffer, JSArrayBuffer::kBitFieldOffset,
+ MachineType::Uint32());
+ Node* neutered_bit =
+ Word32And(bitfield, Int32Constant(JSArrayBuffer::WasNeutered::kMask));
+ GotoUnless(Word32Equal(neutered_bit, Int32Constant(0)), miss);
+ // Backing store = external_pointer + base_pointer.
+ Node* external_pointer =
+ LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset,
+ MachineType::Pointer());
+ Node* base_pointer =
+ LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset);
+ Node* backing_store = IntPtrAdd(external_pointer, base_pointer);
+
+ Label uint8_elements(this), int8_elements(this), uint16_elements(this),
+ int16_elements(this), uint32_elements(this), int32_elements(this),
+ float32_elements(this), float64_elements(this);
+ Label* elements_kind_labels[] = {
+ &uint8_elements, &uint8_elements, &int8_elements,
+ &uint16_elements, &int16_elements, &uint32_elements,
+ &int32_elements, &float32_elements, &float64_elements};
+ int32_t elements_kinds[] = {
+ UINT8_ELEMENTS, UINT8_CLAMPED_ELEMENTS, INT8_ELEMENTS,
+ UINT16_ELEMENTS, INT16_ELEMENTS, UINT32_ELEMENTS,
+ INT32_ELEMENTS, FLOAT32_ELEMENTS, FLOAT64_ELEMENTS};
+ const int kTypedElementsKindCount = LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
+ FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND +
+ 1;
+ DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kinds));
+ DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kind_labels));
+ Switch(elements_kind, miss, elements_kinds, elements_kind_labels,
+ static_cast<size_t>(kTypedElementsKindCount));
+ Bind(&uint8_elements);
+ {
+ Comment("UINT8_ELEMENTS"); // Handles UINT8_CLAMPED_ELEMENTS too.
+ Return(SmiTag(Load(MachineType::Uint8(), backing_store, key)));
+ }
+ Bind(&int8_elements);
+ {
+ Comment("INT8_ELEMENTS");
+ Return(SmiTag(Load(MachineType::Int8(), backing_store, key)));
+ }
+ Bind(&uint16_elements);
+ {
+ Comment("UINT16_ELEMENTS");
+ Node* index = WordShl(key, IntPtrConstant(1));
+ Return(SmiTag(Load(MachineType::Uint16(), backing_store, index)));
+ }
+ Bind(&int16_elements);
+ {
+ Comment("INT16_ELEMENTS");
+ Node* index = WordShl(key, IntPtrConstant(1));
+ Return(SmiTag(Load(MachineType::Int16(), backing_store, index)));
+ }
+ Bind(&uint32_elements);
+ {
+ Comment("UINT32_ELEMENTS");
+ Node* index = WordShl(key, IntPtrConstant(2));
+ Node* element = Load(MachineType::Uint32(), backing_store, index);
+ Return(ChangeUint32ToTagged(element));
+ }
+ Bind(&int32_elements);
+ {
+ Comment("INT32_ELEMENTS");
+ Node* index = WordShl(key, IntPtrConstant(2));
+ Node* element = Load(MachineType::Int32(), backing_store, index);
+ Return(ChangeInt32ToTagged(element));
+ }
+ Bind(&float32_elements);
+ {
+ Comment("FLOAT32_ELEMENTS");
+ Node* index = WordShl(key, IntPtrConstant(2));
+ Node* element = Load(MachineType::Float32(), backing_store, index);
+ var_double_value->Bind(ChangeFloat32ToFloat64(element));
+ Goto(rebox_double);
+ }
+ Bind(&float64_elements);
+ {
+ Comment("FLOAT64_ELEMENTS");
+ Node* index = WordShl(key, IntPtrConstant(3));
+ Node* element = Load(MachineType::Float64(), backing_store, index);
+ var_double_value->Bind(element);
+ Goto(rebox_double);
+ }
+ }
+}
+
+void CodeStubAssembler::HandleLoadICHandlerCase(
+ const LoadICParameters* p, Node* handler, Label* miss,
+ ElementSupport support_elements) {
+ Comment("have_handler");
+ Label call_handler(this);
+ GotoUnless(WordIsSmi(handler), &call_handler);
+
+ // |handler| is a Smi, encoding what to do. See handler-configuration.h
+ // for the encoding format.
+ {
+ Variable var_double_value(this, MachineRepresentation::kFloat64);
+ Label rebox_double(this, &var_double_value);
+
+ Node* handler_word = SmiUntag(handler);
+ if (support_elements == kSupportElements) {
+ Label property(this);
+ Node* handler_type =
+ WordAnd(handler_word, IntPtrConstant(LoadHandlerTypeBit::kMask));
+ GotoUnless(
+ WordEqual(handler_type, IntPtrConstant(kLoadICHandlerForElements)),
+ &property);
+
+ Comment("element_load");
+ Node* key = TryToIntptr(p->name, miss);
+ Node* elements = LoadElements(p->receiver);
+ Node* is_jsarray =
+ WordAnd(handler_word, IntPtrConstant(KeyedLoadIsJsArray::kMask));
+ EmitBoundsCheck(p->receiver, elements, key, is_jsarray, miss);
+ Label if_hole(this);
+
+ Node* elements_kind = BitFieldDecode<KeyedLoadElementsKind>(handler_word);
+
+ EmitElementLoad(p->receiver, elements, elements_kind, key, &if_hole,
+ &rebox_double, &var_double_value, miss);
+
+ Bind(&if_hole);
+ {
+ Comment("convert hole");
+ Node* convert_hole =
+ WordAnd(handler_word, IntPtrConstant(KeyedLoadConvertHole::kMask));
+ GotoIf(WordEqual(convert_hole, IntPtrConstant(0)), miss);
+ Node* protector_cell = LoadRoot(Heap::kArrayProtectorRootIndex);
+ DCHECK(isolate()->heap()->array_protector()->IsPropertyCell());
+ GotoUnless(
+ WordEqual(
+ LoadObjectField(protector_cell, PropertyCell::kValueOffset),
+ SmiConstant(Smi::FromInt(Isolate::kArrayProtectorValid))),
+ miss);
+ Return(UndefinedConstant());
+ }
+
+ Bind(&property);
+ Comment("property_load");
+ }
+
+ // |handler_word| is a field index as obtained by
+ // FieldIndex.GetLoadByFieldOffset():
+ Label inobject_double(this), out_of_object(this),
+ out_of_object_double(this);
+ Node* inobject_bit =
+ WordAnd(handler_word, IntPtrConstant(FieldOffsetIsInobject::kMask));
+ Node* double_bit =
+ WordAnd(handler_word, IntPtrConstant(FieldOffsetIsDouble::kMask));
+ Node* offset =
+ WordSar(handler_word, IntPtrConstant(FieldOffsetOffset::kShift));
+
+ GotoIf(WordEqual(inobject_bit, IntPtrConstant(0)), &out_of_object);
+
+ GotoUnless(WordEqual(double_bit, IntPtrConstant(0)), &inobject_double);
+ Return(LoadObjectField(p->receiver, offset));
+
+ Bind(&inobject_double);
+ if (FLAG_unbox_double_fields) {
+ var_double_value.Bind(
+ LoadObjectField(p->receiver, offset, MachineType::Float64()));
+ } else {
+ Node* mutable_heap_number = LoadObjectField(p->receiver, offset);
+ var_double_value.Bind(LoadHeapNumberValue(mutable_heap_number));
+ }
+ Goto(&rebox_double);
+
+ Bind(&out_of_object);
+ Node* properties = LoadProperties(p->receiver);
+ Node* value = LoadObjectField(properties, offset);
+ GotoUnless(WordEqual(double_bit, IntPtrConstant(0)), &out_of_object_double);
+ Return(value);
+
+ Bind(&out_of_object_double);
+ var_double_value.Bind(LoadHeapNumberValue(value));
+ Goto(&rebox_double);
+
+ Bind(&rebox_double);
+ Return(AllocateHeapNumberWithValue(var_double_value.value()));
+ }
+
+ // |handler| is a heap object. Must be code, call it.
+ Bind(&call_handler);
+ typedef LoadWithVectorDescriptor Descriptor;
+ TailCallStub(Descriptor(isolate()), handler, p->context,
+ Arg(Descriptor::kReceiver, p->receiver),
+ Arg(Descriptor::kName, p->name),
+ Arg(Descriptor::kSlot, p->slot),
+ Arg(Descriptor::kVector, p->vector));
+}
+
+void CodeStubAssembler::LoadIC(const LoadICParameters* p) {
+ Variable var_handler(this, MachineRepresentation::kTagged);
+ // TODO(ishell): defer blocks when it works.
+ Label if_handler(this, &var_handler), try_polymorphic(this),
+ try_megamorphic(this /*, Label::kDeferred*/),
+ miss(this /*, Label::kDeferred*/);
+
+ Node* receiver_map = LoadReceiverMap(p->receiver);
+
+ // Check monomorphic case.
+ Node* feedback = TryMonomorphicCase(p, receiver_map, &if_handler,
+ &var_handler, &try_polymorphic);
+ Bind(&if_handler);
+ {
+ HandleLoadICHandlerCase(p, var_handler.value(), &miss);
+ }
+
+ Bind(&try_polymorphic);
+ {
+ // Check polymorphic case.
+ Comment("LoadIC_try_polymorphic");
+ GotoUnless(
+ WordEqual(LoadMap(feedback), LoadRoot(Heap::kFixedArrayMapRootIndex)),
+ &try_megamorphic);
+ HandlePolymorphicCase(p, receiver_map, feedback, &if_handler, &var_handler,
+ &miss, 2);
+ }
+
+ Bind(&try_megamorphic);
+ {
+ // Check megamorphic case.
+ GotoUnless(
+ WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
+ &miss);
+
+ TryProbeStubCache(isolate()->load_stub_cache(), p->receiver, p->name,
+ &if_handler, &var_handler, &miss);
+ }
+ Bind(&miss);
+ {
+ TailCallRuntime(Runtime::kLoadIC_Miss, p->context, p->receiver, p->name,
+ p->slot, p->vector);
+ }
+}
+
+void CodeStubAssembler::KeyedLoadIC(const LoadICParameters* p) {
+ Variable var_handler(this, MachineRepresentation::kTagged);
+ // TODO(ishell): defer blocks when it works.
+ Label if_handler(this, &var_handler), try_polymorphic(this),
+ try_megamorphic(this /*, Label::kDeferred*/),
+ try_polymorphic_name(this /*, Label::kDeferred*/),
+ miss(this /*, Label::kDeferred*/);
+
+ Node* receiver_map = LoadReceiverMap(p->receiver);
+
+ // Check monomorphic case.
+ Node* feedback = TryMonomorphicCase(p, receiver_map, &if_handler,
+ &var_handler, &try_polymorphic);
+ Bind(&if_handler);
+ {
+ HandleLoadICHandlerCase(p, var_handler.value(), &miss, kSupportElements);
+ }
+
+ Bind(&try_polymorphic);
+ {
+ // Check polymorphic case.
+ Comment("KeyedLoadIC_try_polymorphic");
+ GotoUnless(
+ WordEqual(LoadMap(feedback), LoadRoot(Heap::kFixedArrayMapRootIndex)),
+ &try_megamorphic);
+ HandlePolymorphicCase(p, receiver_map, feedback, &if_handler, &var_handler,
+ &miss, 2);
+ }
+
+ Bind(&try_megamorphic);
+ {
+ // Check megamorphic case.
+ Comment("KeyedLoadIC_try_megamorphic");
+ GotoUnless(
+ WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
+ &try_polymorphic_name);
+ // TODO(jkummerow): Inline this? Or some of it?
+ TailCallStub(CodeFactory::KeyedLoadIC_Megamorphic(isolate()), p->context,
+ p->receiver, p->name, p->slot, p->vector);
+ }
+ Bind(&try_polymorphic_name);
+ {
+ // We might have a name in feedback, and a fixed array in the next slot.
+ Comment("KeyedLoadIC_try_polymorphic_name");
+ GotoUnless(WordEqual(feedback, p->name), &miss);
+ // If the name comparison succeeded, we know we have a fixed array with
+ // at least one map/handler pair.
+ Node* offset = ElementOffsetFromIndex(
+ p->slot, FAST_HOLEY_ELEMENTS, SMI_PARAMETERS,
+ FixedArray::kHeaderSize + kPointerSize - kHeapObjectTag);
+ Node* array = Load(MachineType::AnyTagged(), p->vector, offset);
+ HandlePolymorphicCase(p, receiver_map, array, &if_handler, &var_handler,
+ &miss, 1);
+ }
+ Bind(&miss);
+ {
+ Comment("KeyedLoadIC_miss");
+ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, p->context, p->receiver,
+ p->name, p->slot, p->vector);
+ }
+}
+
+void CodeStubAssembler::LoadGlobalIC(const LoadICParameters* p) {
+ Label try_handler(this), miss(this);
+ Node* weak_cell =
+ LoadFixedArrayElement(p->vector, p->slot, 0, SMI_PARAMETERS);
+ AssertInstanceType(weak_cell, WEAK_CELL_TYPE);
+
+ // Load value or try handler case if the {weak_cell} is cleared.
+ Node* property_cell = LoadWeakCellValue(weak_cell, &try_handler);
+ AssertInstanceType(property_cell, PROPERTY_CELL_TYPE);
+
+ Node* value = LoadObjectField(property_cell, PropertyCell::kValueOffset);
+ GotoIf(WordEqual(value, TheHoleConstant()), &miss);
+ Return(value);
+
+ Bind(&try_handler);
+ {
+ Node* handler =
+ LoadFixedArrayElement(p->vector, p->slot, kPointerSize, SMI_PARAMETERS);
+ GotoIf(WordEqual(handler, LoadRoot(Heap::kuninitialized_symbolRootIndex)),
+ &miss);
+
+ // In this case {handler} must be a Code object.
+ AssertInstanceType(handler, CODE_TYPE);
+ LoadWithVectorDescriptor descriptor(isolate());
+ Node* native_context = LoadNativeContext(p->context);
+ Node* receiver = LoadFixedArrayElement(
+ native_context, Int32Constant(Context::EXTENSION_INDEX));
+ Node* fake_name = IntPtrConstant(0);
+ TailCallStub(descriptor, handler, p->context, receiver, fake_name, p->slot,
+ p->vector);
+ }
+ Bind(&miss);
+ {
+ TailCallRuntime(Runtime::kLoadGlobalIC_Miss, p->context, p->slot,
+ p->vector);
+ }
+}
+
+Node* CodeStubAssembler::EnumLength(Node* map) {
+ Node* bitfield_3 = LoadMapBitField3(map);
+ Node* enum_length = BitFieldDecode<Map::EnumLengthBits>(bitfield_3);
+ return SmiTag(enum_length);
+}
+
+void CodeStubAssembler::CheckEnumCache(Node* receiver, Label* use_cache,
+ Label* use_runtime) {
+ Variable current_js_object(this, MachineRepresentation::kTagged);
+ current_js_object.Bind(receiver);
+
+ Variable current_map(this, MachineRepresentation::kTagged);
+ current_map.Bind(LoadMap(current_js_object.value()));
+
+ // These variables are updated in the loop below.
+ Variable* loop_vars[2] = {&current_js_object, &current_map};
+ Label loop(this, 2, loop_vars), next(this);
+
+ // Check if the enum length field is properly initialized, indicating that
+ // there is an enum cache.
+ {
+ Node* invalid_enum_cache_sentinel =
+ SmiConstant(Smi::FromInt(kInvalidEnumCacheSentinel));
+ Node* enum_length = EnumLength(current_map.value());
+ BranchIfWordEqual(enum_length, invalid_enum_cache_sentinel, use_runtime,
+ &loop);
+ }
+
+ // Check that there are no elements. |current_js_object| contains
+ // the current JS object we've reached through the prototype chain.
+ Bind(&loop);
+ {
+ Label if_elements(this), if_no_elements(this);
+ Node* elements = LoadElements(current_js_object.value());
+ Node* empty_fixed_array = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
+ // Check that there are no elements.
+ BranchIfWordEqual(elements, empty_fixed_array, &if_no_elements,
+ &if_elements);
+ Bind(&if_elements);
+ {
+ // Second chance, the object may be using the empty slow element
+ // dictionary.
+ Node* slow_empty_dictionary =
+ LoadRoot(Heap::kEmptySlowElementDictionaryRootIndex);
+ BranchIfWordNotEqual(elements, slow_empty_dictionary, use_runtime,
+ &if_no_elements);
+ }
+
+ Bind(&if_no_elements);
+ {
+ // Update map prototype.
+ current_js_object.Bind(LoadMapPrototype(current_map.value()));
+ BranchIfWordEqual(current_js_object.value(), NullConstant(), use_cache,
+ &next);
+ }
+ }
+
+ Bind(&next);
+ {
+ // For all objects but the receiver, check that the cache is empty.
+ current_map.Bind(LoadMap(current_js_object.value()));
+ Node* enum_length = EnumLength(current_map.value());
+ Node* zero_constant = SmiConstant(Smi::FromInt(0));
+ BranchIf(WordEqual(enum_length, zero_constant), &loop, use_runtime);
+ }
+}
+
+Node* CodeStubAssembler::CreateWeakCellInFeedbackVector(Node* feedback_vector,
+ Node* slot,
+ Node* value) {
+ Node* size = IntPtrConstant(WeakCell::kSize);
+ Node* cell = Allocate(size, CodeStubAssembler::kPretenured);
+
+ // Initialize the WeakCell.
+ StoreObjectFieldRoot(cell, WeakCell::kMapOffset, Heap::kWeakCellMapRootIndex);
+ StoreObjectField(cell, WeakCell::kValueOffset, value);
+ StoreObjectFieldRoot(cell, WeakCell::kNextOffset,
+ Heap::kTheHoleValueRootIndex);
+
+ // Store the WeakCell in the feedback vector.
+ StoreFixedArrayElement(feedback_vector, slot, cell, UPDATE_WRITE_BARRIER,
+ CodeStubAssembler::SMI_PARAMETERS);
+ return cell;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/code-stub-assembler.h b/deps/v8/src/code-stub-assembler.h
new file mode 100644
index 0000000000..4bad541129
--- /dev/null
+++ b/deps/v8/src/code-stub-assembler.h
@@ -0,0 +1,609 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODE_STUB_ASSEMBLER_H_
+#define V8_CODE_STUB_ASSEMBLER_H_
+
+#include <functional>
+
+#include "src/compiler/code-assembler.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+class CallInterfaceDescriptor;
+class StatsCounter;
+class StubCache;
+
+enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
+
+// Provides JavaScript-specific "macro-assembler" functionality on top of the
+// CodeAssembler. By factoring the JavaScript-isms out of the CodeAssembler,
+// it's possible to add JavaScript-specific useful CodeAssembler "macros"
+// without modifying files in the compiler directory (and requiring a review
+// from a compiler directory OWNER).
+class CodeStubAssembler : public compiler::CodeAssembler {
+ public:
+ // Create with CallStub linkage.
+ // |result_size| specifies the number of results returned by the stub.
+ // TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
+ CodeStubAssembler(Isolate* isolate, Zone* zone,
+ const CallInterfaceDescriptor& descriptor,
+ Code::Flags flags, const char* name,
+ size_t result_size = 1);
+
+ // Create with JSCall linkage.
+ CodeStubAssembler(Isolate* isolate, Zone* zone, int parameter_count,
+ Code::Flags flags, const char* name);
+
+ enum AllocationFlag : uint8_t {
+ kNone = 0,
+ kDoubleAlignment = 1,
+ kPretenured = 1 << 1
+ };
+
+ typedef base::Flags<AllocationFlag> AllocationFlags;
+
+ enum ParameterMode { INTEGER_PARAMETERS, SMI_PARAMETERS };
+
+ compiler::Node* BooleanMapConstant();
+ compiler::Node* EmptyStringConstant();
+ compiler::Node* HeapNumberMapConstant();
+ compiler::Node* NoContextConstant();
+ compiler::Node* NanConstant();
+ compiler::Node* NullConstant();
+ compiler::Node* MinusZeroConstant();
+ compiler::Node* UndefinedConstant();
+ compiler::Node* TheHoleConstant();
+ compiler::Node* HashSeed();
+ compiler::Node* StaleRegisterConstant();
+
+ compiler::Node* IntPtrOrSmiConstant(int value, ParameterMode mode);
+
+ // Float64 operations.
+ compiler::Node* Float64Ceil(compiler::Node* x);
+ compiler::Node* Float64Floor(compiler::Node* x);
+ compiler::Node* Float64Round(compiler::Node* x);
+ compiler::Node* Float64Trunc(compiler::Node* x);
+
+ // Tag a Word as a Smi value.
+ compiler::Node* SmiTag(compiler::Node* value);
+ // Untag a Smi value as a Word.
+ compiler::Node* SmiUntag(compiler::Node* value);
+
+ // Smi conversions.
+ compiler::Node* SmiToFloat64(compiler::Node* value);
+ compiler::Node* SmiFromWord(compiler::Node* value) { return SmiTag(value); }
+ compiler::Node* SmiFromWord32(compiler::Node* value);
+ compiler::Node* SmiToWord(compiler::Node* value) { return SmiUntag(value); }
+ compiler::Node* SmiToWord32(compiler::Node* value);
+
+ // Smi operations.
+ compiler::Node* SmiAdd(compiler::Node* a, compiler::Node* b);
+ compiler::Node* SmiAddWithOverflow(compiler::Node* a, compiler::Node* b);
+ compiler::Node* SmiSub(compiler::Node* a, compiler::Node* b);
+ compiler::Node* SmiSubWithOverflow(compiler::Node* a, compiler::Node* b);
+ compiler::Node* SmiEqual(compiler::Node* a, compiler::Node* b);
+ compiler::Node* SmiAboveOrEqual(compiler::Node* a, compiler::Node* b);
+ compiler::Node* SmiLessThan(compiler::Node* a, compiler::Node* b);
+ compiler::Node* SmiLessThanOrEqual(compiler::Node* a, compiler::Node* b);
+ compiler::Node* SmiMin(compiler::Node* a, compiler::Node* b);
+ // Computes a % b for Smi inputs a and b; result is not necessarily a Smi.
+ compiler::Node* SmiMod(compiler::Node* a, compiler::Node* b);
+ // Computes a * b for Smi inputs a and b; result is not necessarily a Smi.
+ compiler::Node* SmiMul(compiler::Node* a, compiler::Node* b);
+
+ // Allocate an object of the given size.
+ compiler::Node* Allocate(compiler::Node* size, AllocationFlags flags = kNone);
+ compiler::Node* Allocate(int size, AllocationFlags flags = kNone);
+ compiler::Node* InnerAllocate(compiler::Node* previous, int offset);
+ compiler::Node* InnerAllocate(compiler::Node* previous,
+ compiler::Node* offset);
+
+ void Assert(compiler::Node* condition);
+
+ // Check a value for smi-ness
+ compiler::Node* WordIsSmi(compiler::Node* a);
+ // Check that the value is a positive smi.
+ compiler::Node* WordIsPositiveSmi(compiler::Node* a);
+
+ void BranchIfSmiEqual(compiler::Node* a, compiler::Node* b, Label* if_true,
+ Label* if_false) {
+ BranchIf(SmiEqual(a, b), if_true, if_false);
+ }
+
+ void BranchIfSmiLessThan(compiler::Node* a, compiler::Node* b, Label* if_true,
+ Label* if_false) {
+ BranchIf(SmiLessThan(a, b), if_true, if_false);
+ }
+
+ void BranchIfSmiLessThanOrEqual(compiler::Node* a, compiler::Node* b,
+ Label* if_true, Label* if_false) {
+ BranchIf(SmiLessThanOrEqual(a, b), if_true, if_false);
+ }
+
+ void BranchIfFloat64IsNaN(compiler::Node* value, Label* if_true,
+ Label* if_false) {
+ BranchIfFloat64Equal(value, value, if_false, if_true);
+ }
+
+ // Branches to {if_true} if ToBoolean applied to {value} yields true,
+ // otherwise goes to {if_false}.
+ void BranchIfToBooleanIsTrue(compiler::Node* value, Label* if_true,
+ Label* if_false);
+
+ void BranchIfSimd128Equal(compiler::Node* lhs, compiler::Node* lhs_map,
+ compiler::Node* rhs, compiler::Node* rhs_map,
+ Label* if_equal, Label* if_notequal);
+ void BranchIfSimd128Equal(compiler::Node* lhs, compiler::Node* rhs,
+ Label* if_equal, Label* if_notequal) {
+ BranchIfSimd128Equal(lhs, LoadMap(lhs), rhs, LoadMap(rhs), if_equal,
+ if_notequal);
+ }
+
+ void BranchIfSameValueZero(compiler::Node* a, compiler::Node* b,
+ compiler::Node* context, Label* if_true,
+ Label* if_false);
+
+ void BranchIfFastJSArray(compiler::Node* object, compiler::Node* context,
+ Label* if_true, Label* if_false);
+
+ // Load value from current frame by given offset in bytes.
+ compiler::Node* LoadFromFrame(int offset,
+ MachineType rep = MachineType::AnyTagged());
+ // Load value from current parent frame by given offset in bytes.
+ compiler::Node* LoadFromParentFrame(
+ int offset, MachineType rep = MachineType::AnyTagged());
+
+ // Load an object pointer from a buffer that isn't in the heap.
+ compiler::Node* LoadBufferObject(compiler::Node* buffer, int offset,
+ MachineType rep = MachineType::AnyTagged());
+ // Load a field from an object on the heap.
+ compiler::Node* LoadObjectField(compiler::Node* object, int offset,
+ MachineType rep = MachineType::AnyTagged());
+ compiler::Node* LoadObjectField(compiler::Node* object,
+ compiler::Node* offset,
+ MachineType rep = MachineType::AnyTagged());
+ // Load a SMI field and untag it.
+ compiler::Node* LoadAndUntagObjectField(compiler::Node* object, int offset);
+ // Load a SMI field, untag it, and convert to Word32.
+ compiler::Node* LoadAndUntagToWord32ObjectField(compiler::Node* object,
+ int offset);
+ // Load a SMI and untag it.
+ compiler::Node* LoadAndUntagSmi(compiler::Node* base, int index);
+ // Load a SMI root, untag it, and convert to Word32.
+ compiler::Node* LoadAndUntagToWord32Root(Heap::RootListIndex root_index);
+
+ // Load the floating point value of a HeapNumber.
+ compiler::Node* LoadHeapNumberValue(compiler::Node* object);
+ // Load the Map of an HeapObject.
+ compiler::Node* LoadMap(compiler::Node* object);
+ // Load the instance type of an HeapObject.
+ compiler::Node* LoadInstanceType(compiler::Node* object);
+ // Checks that given heap object has given instance type.
+ void AssertInstanceType(compiler::Node* object, InstanceType instance_type);
+ // Load the properties backing store of a JSObject.
+ compiler::Node* LoadProperties(compiler::Node* object);
+ // Load the elements backing store of a JSObject.
+ compiler::Node* LoadElements(compiler::Node* object);
+ // Load the length of a fixed array base instance.
+ compiler::Node* LoadFixedArrayBaseLength(compiler::Node* array);
+ // Load the length of a fixed array base instance.
+ compiler::Node* LoadAndUntagFixedArrayBaseLength(compiler::Node* array);
+ // Load the bit field of a Map.
+ compiler::Node* LoadMapBitField(compiler::Node* map);
+ // Load bit field 2 of a map.
+ compiler::Node* LoadMapBitField2(compiler::Node* map);
+ // Load bit field 3 of a map.
+ compiler::Node* LoadMapBitField3(compiler::Node* map);
+ // Load the instance type of a map.
+ compiler::Node* LoadMapInstanceType(compiler::Node* map);
+ // Load the instance descriptors of a map.
+ compiler::Node* LoadMapDescriptors(compiler::Node* map);
+ // Load the prototype of a map.
+ compiler::Node* LoadMapPrototype(compiler::Node* map);
+ // Load the instance size of a Map.
+ compiler::Node* LoadMapInstanceSize(compiler::Node* map);
+ // Load the inobject properties count of a Map (valid only for JSObjects).
+ compiler::Node* LoadMapInobjectProperties(compiler::Node* map);
+ // Load the constructor of a Map (equivalent to Map::GetConstructor()).
+ compiler::Node* LoadMapConstructor(compiler::Node* map);
+
+ // Load the hash field of a name.
+ compiler::Node* LoadNameHashField(compiler::Node* name);
+ // Load the hash value of a name. If {if_hash_not_computed} label
+ // is specified then it also checks if hash is actually computed.
+ compiler::Node* LoadNameHash(compiler::Node* name,
+ Label* if_hash_not_computed = nullptr);
+
+ // Load length field of a String object.
+ compiler::Node* LoadStringLength(compiler::Node* object);
+ // Load value field of a JSValue object.
+ compiler::Node* LoadJSValueValue(compiler::Node* object);
+ // Load value field of a WeakCell object.
+ compiler::Node* LoadWeakCellValue(compiler::Node* weak_cell,
+ Label* if_cleared = nullptr);
+
+ compiler::Node* AllocateUninitializedFixedArray(compiler::Node* length);
+
+ // Load an array element from a FixedArray.
+ compiler::Node* LoadFixedArrayElement(
+ compiler::Node* object, compiler::Node* int32_index,
+ int additional_offset = 0,
+ ParameterMode parameter_mode = INTEGER_PARAMETERS);
+ // Load an array element from a FixedArray, untag it and return it as Word32.
+ compiler::Node* LoadAndUntagToWord32FixedArrayElement(
+ compiler::Node* object, compiler::Node* int32_index,
+ int additional_offset = 0,
+ ParameterMode parameter_mode = INTEGER_PARAMETERS);
+ // Load an array element from a FixedDoubleArray.
+ compiler::Node* LoadFixedDoubleArrayElement(
+ compiler::Node* object, compiler::Node* int32_index,
+ MachineType machine_type, int additional_offset = 0,
+ ParameterMode parameter_mode = INTEGER_PARAMETERS);
+
+ // Context manipulation
+ compiler::Node* LoadNativeContext(compiler::Node* context);
+
+ compiler::Node* LoadJSArrayElementsMap(ElementsKind kind,
+ compiler::Node* native_context);
+
+ // Store the floating point value of a HeapNumber.
+ compiler::Node* StoreHeapNumberValue(compiler::Node* object,
+ compiler::Node* value);
+ // Store a field to an object on the heap.
+ compiler::Node* StoreObjectField(
+ compiler::Node* object, int offset, compiler::Node* value);
+ compiler::Node* StoreObjectFieldNoWriteBarrier(
+ compiler::Node* object, int offset, compiler::Node* value,
+ MachineRepresentation rep = MachineRepresentation::kTagged);
+ // Store the Map of an HeapObject.
+ compiler::Node* StoreMapNoWriteBarrier(compiler::Node* object,
+ compiler::Node* map);
+ compiler::Node* StoreObjectFieldRoot(compiler::Node* object, int offset,
+ Heap::RootListIndex root);
+ // Store an array element to a FixedArray.
+ compiler::Node* StoreFixedArrayElement(
+ compiler::Node* object, compiler::Node* index, compiler::Node* value,
+ WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
+ ParameterMode parameter_mode = INTEGER_PARAMETERS);
+
+ compiler::Node* StoreFixedDoubleArrayElement(
+ compiler::Node* object, compiler::Node* index, compiler::Node* value,
+ ParameterMode parameter_mode = INTEGER_PARAMETERS);
+
+ // Allocate a HeapNumber without initializing its value.
+ compiler::Node* AllocateHeapNumber();
+ // Allocate a HeapNumber with a specific value.
+ compiler::Node* AllocateHeapNumberWithValue(compiler::Node* value);
+ // Allocate a SeqOneByteString with the given length.
+ compiler::Node* AllocateSeqOneByteString(int length);
+ compiler::Node* AllocateSeqOneByteString(compiler::Node* context,
+ compiler::Node* length);
+ // Allocate a SeqTwoByteString with the given length.
+ compiler::Node* AllocateSeqTwoByteString(int length);
+ compiler::Node* AllocateSeqTwoByteString(compiler::Node* context,
+ compiler::Node* length);
+ // Allocated an JSArray
+ compiler::Node* AllocateJSArray(ElementsKind kind, compiler::Node* array_map,
+ compiler::Node* capacity,
+ compiler::Node* length,
+ compiler::Node* allocation_site = nullptr,
+ ParameterMode mode = INTEGER_PARAMETERS);
+
+ compiler::Node* AllocateFixedArray(ElementsKind kind,
+ compiler::Node* capacity,
+ ParameterMode mode = INTEGER_PARAMETERS,
+ AllocationFlags flags = kNone);
+
+ void FillFixedArrayWithHole(ElementsKind kind, compiler::Node* array,
+ compiler::Node* from_index,
+ compiler::Node* to_index,
+ ParameterMode mode = INTEGER_PARAMETERS);
+
+ void CopyFixedArrayElements(
+ ElementsKind kind, compiler::Node* from_array, compiler::Node* to_array,
+ compiler::Node* element_count,
+ WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
+ ParameterMode mode = INTEGER_PARAMETERS);
+
+ compiler::Node* CalculateNewElementsCapacity(
+ compiler::Node* old_capacity, ParameterMode mode = INTEGER_PARAMETERS);
+
+ compiler::Node* CheckAndGrowElementsCapacity(compiler::Node* context,
+ compiler::Node* elements,
+ ElementsKind kind,
+ compiler::Node* key,
+ Label* fail);
+
+ // Allocation site manipulation
+ void InitializeAllocationMemento(compiler::Node* base_allocation,
+ int base_allocation_size,
+ compiler::Node* allocation_site);
+
+ compiler::Node* TruncateTaggedToFloat64(compiler::Node* context,
+ compiler::Node* value);
+ compiler::Node* TruncateTaggedToWord32(compiler::Node* context,
+ compiler::Node* value);
+ // Truncate the floating point value of a HeapNumber to an Int32.
+ compiler::Node* TruncateHeapNumberValueToWord32(compiler::Node* object);
+
+ // Conversions.
+ compiler::Node* ChangeFloat64ToTagged(compiler::Node* value);
+ compiler::Node* ChangeInt32ToTagged(compiler::Node* value);
+ compiler::Node* ChangeUint32ToTagged(compiler::Node* value);
+
+ // Type conversions.
+ // Throws a TypeError for {method_name} if {value} is not coercible to Object,
+ // or returns the {value} converted to a String otherwise.
+ compiler::Node* ToThisString(compiler::Node* context, compiler::Node* value,
+ char const* method_name);
+ // Throws a TypeError for {method_name} if {value} is neither of the given
+ // {primitive_type} nor a JSValue wrapping a value of {primitive_type}, or
+ // returns the {value} (or wrapped value) otherwise.
+ compiler::Node* ToThisValue(compiler::Node* context, compiler::Node* value,
+ PrimitiveType primitive_type,
+ char const* method_name);
+
+ // String helpers.
+ // Load a character from a String (might flatten a ConsString).
+ compiler::Node* StringCharCodeAt(compiler::Node* string,
+ compiler::Node* smi_index);
+ // Return the single character string with only {code}.
+ compiler::Node* StringFromCharCode(compiler::Node* code);
+
+ // Returns a node that is true if the given bit is set in |word32|.
+ template <typename T>
+ compiler::Node* BitFieldDecode(compiler::Node* word32) {
+ return BitFieldDecode(word32, T::kShift, T::kMask);
+ }
+
+ compiler::Node* BitFieldDecode(compiler::Node* word32, uint32_t shift,
+ uint32_t mask);
+
+ void SetCounter(StatsCounter* counter, int value);
+ void IncrementCounter(StatsCounter* counter, int delta);
+ void DecrementCounter(StatsCounter* counter, int delta);
+
+ // Generates "if (false) goto label" code. Useful for marking a label as
+ // "live" to avoid assertion failures during graph building. In the resulting
+ // code this check will be eliminated.
+ void Use(Label* label);
+
+ // Various building blocks for stubs doing property lookups.
+ void TryToName(compiler::Node* key, Label* if_keyisindex, Variable* var_index,
+ Label* if_keyisunique, Label* if_bailout);
+
+ // Calculates array index for given dictionary entry and entry field.
+ // See Dictionary::EntryToIndex().
+ template <typename Dictionary>
+ compiler::Node* EntryToIndex(compiler::Node* entry, int field_index);
+ template <typename Dictionary>
+ compiler::Node* EntryToIndex(compiler::Node* entry) {
+ return EntryToIndex<Dictionary>(entry, Dictionary::kEntryKeyIndex);
+ }
+
+ // Looks up an entry in a NameDictionaryBase successor. If the entry is found
+ // control goes to {if_found} and {var_name_index} contains an index of the
+ // key field of the entry found. If the key is not found control goes to
+ // {if_not_found}.
+ static const int kInlinedDictionaryProbes = 4;
+ template <typename Dictionary>
+ void NameDictionaryLookup(compiler::Node* dictionary,
+ compiler::Node* unique_name, Label* if_found,
+ Variable* var_name_index, Label* if_not_found,
+ int inlined_probes = kInlinedDictionaryProbes);
+
+ compiler::Node* ComputeIntegerHash(compiler::Node* key, compiler::Node* seed);
+
+ template <typename Dictionary>
+ void NumberDictionaryLookup(compiler::Node* dictionary, compiler::Node* key,
+ Label* if_found, Variable* var_entry,
+ Label* if_not_found);
+
+ // Tries to check if {object} has own {unique_name} property.
+ void TryHasOwnProperty(compiler::Node* object, compiler::Node* map,
+ compiler::Node* instance_type,
+ compiler::Node* unique_name, Label* if_found,
+ Label* if_not_found, Label* if_bailout);
+
+ // Tries to get {object}'s own {unique_name} property value. If the property
+ // is an accessor then it also calls a getter. If the property is a double
+ // field it re-wraps value in an immutable heap number.
+ void TryGetOwnProperty(compiler::Node* context, compiler::Node* receiver,
+ compiler::Node* object, compiler::Node* map,
+ compiler::Node* instance_type,
+ compiler::Node* unique_name, Label* if_found,
+ Variable* var_value, Label* if_not_found,
+ Label* if_bailout);
+
+ void LoadPropertyFromFastObject(compiler::Node* object, compiler::Node* map,
+ compiler::Node* descriptors,
+ compiler::Node* name_index,
+ Variable* var_details, Variable* var_value);
+
+ void LoadPropertyFromNameDictionary(compiler::Node* dictionary,
+ compiler::Node* entry,
+ Variable* var_details,
+ Variable* var_value);
+
+ void LoadPropertyFromGlobalDictionary(compiler::Node* dictionary,
+ compiler::Node* entry,
+ Variable* var_details,
+ Variable* var_value, Label* if_deleted);
+
+ // Generic property lookup generator. If the {object} is fast and
+ // {unique_name} property is found then the control goes to {if_found_fast}
+ // label and {var_meta_storage} and {var_name_index} will contain
+ // DescriptorArray and an index of the descriptor's name respectively.
+ // If the {object} is slow or global then the control goes to {if_found_dict}
+ // or {if_found_global} and the {var_meta_storage} and {var_name_index} will
+ // contain a dictionary and an index of the key field of the found entry.
+ // If property is not found or given lookup is not supported then
+ // the control goes to {if_not_found} or {if_bailout} respectively.
+ //
+ // Note: this code does not check if the global dictionary points to deleted
+ // entry! This has to be done by the caller.
+ void TryLookupProperty(compiler::Node* object, compiler::Node* map,
+ compiler::Node* instance_type,
+ compiler::Node* unique_name, Label* if_found_fast,
+ Label* if_found_dict, Label* if_found_global,
+ Variable* var_meta_storage, Variable* var_name_index,
+ Label* if_not_found, Label* if_bailout);
+
+ void TryLookupElement(compiler::Node* object, compiler::Node* map,
+ compiler::Node* instance_type, compiler::Node* index,
+ Label* if_found, Label* if_not_found,
+ Label* if_bailout);
+
+ // This is a type of a lookup in holder generator function. In case of a
+ // property lookup the {key} is guaranteed to be a unique name and in case of
+ // element lookup the key is an Int32 index.
+ typedef std::function<void(compiler::Node* receiver, compiler::Node* holder,
+ compiler::Node* map, compiler::Node* instance_type,
+ compiler::Node* key, Label* next_holder,
+ Label* if_bailout)>
+ LookupInHolder;
+
+ // Generic property prototype chain lookup generator.
+ // For properties it generates lookup using given {lookup_property_in_holder}
+ // and for elements it uses {lookup_element_in_holder}.
+ // Upon reaching the end of prototype chain the control goes to {if_end}.
+ // If it can't handle the case {receiver}/{key} case then the control goes
+ // to {if_bailout}.
+ void TryPrototypeChainLookup(compiler::Node* receiver, compiler::Node* key,
+ LookupInHolder& lookup_property_in_holder,
+ LookupInHolder& lookup_element_in_holder,
+ Label* if_end, Label* if_bailout);
+
+ // Instanceof helpers.
+ // ES6 section 7.3.19 OrdinaryHasInstance (C, O)
+ compiler::Node* OrdinaryHasInstance(compiler::Node* context,
+ compiler::Node* callable,
+ compiler::Node* object);
+
+ // LoadIC helpers.
+ struct LoadICParameters {
+ LoadICParameters(compiler::Node* context, compiler::Node* receiver,
+ compiler::Node* name, compiler::Node* slot,
+ compiler::Node* vector)
+ : context(context),
+ receiver(receiver),
+ name(name),
+ slot(slot),
+ vector(vector) {}
+
+ compiler::Node* context;
+ compiler::Node* receiver;
+ compiler::Node* name;
+ compiler::Node* slot;
+ compiler::Node* vector;
+ };
+
+ // Load type feedback vector from the stub caller's frame.
+ compiler::Node* LoadTypeFeedbackVectorForStub();
+
+ // Update the type feedback vector.
+ void UpdateFeedback(compiler::Node* feedback,
+ compiler::Node* type_feedback_vector,
+ compiler::Node* slot_id);
+
+ compiler::Node* LoadReceiverMap(compiler::Node* receiver);
+
+ // Checks monomorphic case. Returns {feedback} entry of the vector.
+ compiler::Node* TryMonomorphicCase(const LoadICParameters* p,
+ compiler::Node* receiver_map,
+ Label* if_handler, Variable* var_handler,
+ Label* if_miss);
+ void HandlePolymorphicCase(const LoadICParameters* p,
+ compiler::Node* receiver_map,
+ compiler::Node* feedback, Label* if_handler,
+ Variable* var_handler, Label* if_miss,
+ int unroll_count);
+
+ compiler::Node* StubCachePrimaryOffset(compiler::Node* name,
+ compiler::Node* map);
+
+ compiler::Node* StubCacheSecondaryOffset(compiler::Node* name,
+ compiler::Node* seed);
+
+ // This enum is used here as a replacement for StubCache::Table to avoid
+ // including stub cache header.
+ enum StubCacheTable : int;
+
+ void TryProbeStubCacheTable(StubCache* stub_cache, StubCacheTable table_id,
+ compiler::Node* entry_offset,
+ compiler::Node* name, compiler::Node* map,
+ Label* if_handler, Variable* var_handler,
+ Label* if_miss);
+
+ void TryProbeStubCache(StubCache* stub_cache, compiler::Node* receiver,
+ compiler::Node* name, Label* if_handler,
+ Variable* var_handler, Label* if_miss);
+
+ void LoadIC(const LoadICParameters* p);
+ void LoadGlobalIC(const LoadICParameters* p);
+ void KeyedLoadIC(const LoadICParameters* p);
+
+ // Get the enumerable length from |map| and return the result as a Smi.
+ compiler::Node* EnumLength(compiler::Node* map);
+
+ // Check the cache validity for |receiver|. Branch to |use_cache| if
+ // the cache is valid, otherwise branch to |use_runtime|.
+ void CheckEnumCache(compiler::Node* receiver,
+ CodeStubAssembler::Label* use_cache,
+ CodeStubAssembler::Label* use_runtime);
+
+ // Create a new weak cell with a specified value and install it into a
+ // feedback vector.
+ compiler::Node* CreateWeakCellInFeedbackVector(
+ compiler::Node* feedback_vector, compiler::Node* slot,
+ compiler::Node* value);
+
+ compiler::Node* GetFixedAarrayAllocationSize(compiler::Node* element_count,
+ ElementsKind kind,
+ ParameterMode mode) {
+ return ElementOffsetFromIndex(element_count, kind, mode,
+ FixedArray::kHeaderSize);
+ }
+
+ private:
+ enum ElementSupport { kOnlyProperties, kSupportElements };
+
+ void HandleLoadICHandlerCase(
+ const LoadICParameters* p, compiler::Node* handler, Label* miss,
+ ElementSupport support_elements = kOnlyProperties);
+ compiler::Node* TryToIntptr(compiler::Node* key, Label* miss);
+ void EmitBoundsCheck(compiler::Node* object, compiler::Node* elements,
+ compiler::Node* intptr_key, compiler::Node* is_jsarray,
+ Label* miss);
+ void EmitElementLoad(compiler::Node* object, compiler::Node* elements,
+ compiler::Node* elements_kind, compiler::Node* key,
+ Label* if_hole, Label* rebox_double,
+ Variable* var_double_value, Label* miss);
+
+ compiler::Node* ElementOffsetFromIndex(compiler::Node* index,
+ ElementsKind kind, ParameterMode mode,
+ int base_size = 0);
+
+ compiler::Node* AllocateRawAligned(compiler::Node* size_in_bytes,
+ AllocationFlags flags,
+ compiler::Node* top_address,
+ compiler::Node* limit_address);
+ compiler::Node* AllocateRawUnaligned(compiler::Node* size_in_bytes,
+ AllocationFlags flags,
+ compiler::Node* top_adddress,
+ compiler::Node* limit_address);
+
+ compiler::Node* SmiShiftBitsConstant();
+
+ static const int kElementLoopUnrollThreshold = 8;
+};
+
+DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags);
+
+} // namespace internal
+} // namespace v8
+#endif // V8_CODE_STUB_ASSEMBLER_H_
diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc
index c08be58945..fa7a49ebc4 100644
--- a/deps/v8/src/code-stubs-hydrogen.cc
+++ b/deps/v8/src/code-stubs-hydrogen.cc
@@ -4,6 +4,8 @@
#include "src/code-stubs.h"
+#include <memory>
+
#include "src/bailout-reason.h"
#include "src/crankshaft/hydrogen.h"
#include "src/crankshaft/lithium.h"
@@ -42,7 +44,7 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
descriptor_(code_stub),
context_(NULL) {
int parameter_count = GetParameterCount();
- parameters_.Reset(new HParameter*[parameter_count]);
+ parameters_.reset(new HParameter*[parameter_count]);
}
virtual bool BuildGraph();
@@ -81,51 +83,15 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
HValue* BuildPushElement(HValue* object, HValue* argc,
HValue* argument_elements, ElementsKind kind);
- enum ArgumentClass {
- NONE,
- SINGLE,
- MULTIPLE
- };
-
HValue* UnmappedCase(HValue* elements, HValue* key, HValue* value);
HValue* EmitKeyedSloppyArguments(HValue* receiver, HValue* key,
HValue* value);
- HValue* BuildArrayConstructor(ElementsKind kind,
- AllocationSiteOverrideMode override_mode,
- ArgumentClass argument_class);
- HValue* BuildInternalArrayConstructor(ElementsKind kind,
- ArgumentClass argument_class);
-
- // BuildCheckAndInstallOptimizedCode emits code to install the optimized
- // function found in the optimized code map at map_index in js_function, if
- // the function at map_index matches the given native_context. Builder is
- // left in the "Then()" state after the install.
- void BuildCheckAndInstallOptimizedCode(HValue* js_function,
- HValue* native_context,
- IfBuilder* builder,
- HValue* optimized_map,
- HValue* map_index);
- void BuildInstallOptimizedCode(HValue* js_function, HValue* native_context,
- HValue* code_object, HValue* literals);
- void BuildInstallCode(HValue* js_function, HValue* shared_info);
-
- HInstruction* LoadFromOptimizedCodeMap(HValue* optimized_map,
- HValue* iterator,
- int field_offset);
- void BuildInstallFromOptimizedCodeMap(HValue* js_function,
- HValue* shared_info,
- HValue* native_context);
-
HValue* BuildToString(HValue* input, bool convert);
HValue* BuildToPrimitive(HValue* input, HValue* input_map);
private:
- HValue* BuildArraySingleArgumentConstructor(JSArrayBuilder* builder);
- HValue* BuildArrayNArgumentsConstructor(JSArrayBuilder* builder,
- ElementsKind kind);
-
- base::SmartArrayPointer<HParameter*> parameters_;
+ std::unique_ptr<HParameter* []> parameters_;
HValue* arguments_length_;
CompilationInfo* info_;
CodeStub* code_stub_;
@@ -223,6 +189,8 @@ class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
explicit CodeStubGraphBuilder(CompilationInfo* info, CodeStub* stub)
: CodeStubGraphBuilderBase(info, stub) {}
+ typedef typename Stub::Descriptor Descriptor;
+
protected:
virtual HValue* BuildCodeStub() {
if (casted_stub()->IsUninitialized()) {
@@ -243,7 +211,7 @@ class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
IfBuilder builder(this);
builder.IfNot<HCompareObjectEqAndBranch, HValue*>(undefined, undefined);
builder.Then();
- builder.ElseDeopt(Deoptimizer::kForcedDeoptToRuntime);
+ builder.ElseDeopt(DeoptimizeReason::kForcedDeoptToRuntime);
return undefined;
}
@@ -280,12 +248,56 @@ Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode(
return new_object;
}
+Handle<Code> HydrogenCodeStub::GenerateRuntimeTailCall(
+ CodeStubDescriptor* descriptor) {
+ const char* name = CodeStub::MajorName(MajorKey());
+ Zone zone(isolate()->allocator());
+ CallInterfaceDescriptor interface_descriptor(GetCallInterfaceDescriptor());
+ CodeStubAssembler assembler(isolate(), &zone, interface_descriptor,
+ GetCodeFlags(), name);
+ int total_params = interface_descriptor.GetStackParameterCount() +
+ interface_descriptor.GetRegisterParameterCount();
+ switch (total_params) {
+ case 0:
+ assembler.TailCallRuntime(descriptor->miss_handler_id(),
+ assembler.Parameter(0));
+ break;
+ case 1:
+ assembler.TailCallRuntime(descriptor->miss_handler_id(),
+ assembler.Parameter(1), assembler.Parameter(0));
+ break;
+ case 2:
+ assembler.TailCallRuntime(descriptor->miss_handler_id(),
+ assembler.Parameter(2), assembler.Parameter(0),
+ assembler.Parameter(1));
+ break;
+ case 3:
+ assembler.TailCallRuntime(descriptor->miss_handler_id(),
+ assembler.Parameter(3), assembler.Parameter(0),
+ assembler.Parameter(1), assembler.Parameter(2));
+ break;
+ case 4:
+ assembler.TailCallRuntime(descriptor->miss_handler_id(),
+ assembler.Parameter(4), assembler.Parameter(0),
+ assembler.Parameter(1), assembler.Parameter(2),
+ assembler.Parameter(3));
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ return assembler.GenerateCode();
+}
template <class Stub>
static Handle<Code> DoGenerateCode(Stub* stub) {
Isolate* isolate = stub->isolate();
CodeStubDescriptor descriptor(stub);
+ if (FLAG_minimal && descriptor.has_miss_handler()) {
+ return stub->GenerateRuntimeTailCall(&descriptor);
+ }
+
// If we are uninitialized we can use a light-weight stub to enter
// the runtime that is significantly faster than using the standard
// stub-failure deopt mechanism.
@@ -298,8 +310,8 @@ static Handle<Code> DoGenerateCode(Stub* stub) {
timer.Start();
}
Zone zone(isolate->allocator());
- CompilationInfo info(CodeStub::MajorName(stub->MajorKey()), isolate, &zone,
- stub->GetCodeFlags());
+ CompilationInfo info(CStrVector(CodeStub::MajorName(stub->MajorKey())),
+ isolate, &zone, stub->GetCodeFlags());
// Parameter count is number of stack parameters.
int parameter_count = descriptor.GetStackParameterCount();
if (descriptor.function_mode() == NOT_JS_FUNCTION_STUB_MODE) {
@@ -321,7 +333,7 @@ static Handle<Code> DoGenerateCode(Stub* stub) {
template <>
HValue* CodeStubGraphBuilder<NumberToStringStub>::BuildCodeStub() {
info()->MarkAsSavesCallerDoubles();
- HValue* number = GetParameter(NumberToStringStub::kNumber);
+ HValue* number = GetParameter(Descriptor::kArgument);
return BuildNumberToString(number, Type::Number());
}
@@ -331,181 +343,22 @@ Handle<Code> NumberToStringStub::GenerateCode() {
}
-// Returns the type string of a value; see ECMA-262, 11.4.3 (p 47).
-template <>
-HValue* CodeStubGraphBuilder<TypeofStub>::BuildCodeStub() {
- Factory* factory = isolate()->factory();
- HConstant* number_string = Add<HConstant>(factory->number_string());
- HValue* object = GetParameter(TypeofStub::kObject);
-
- IfBuilder is_smi(this);
- HValue* smi_check = is_smi.If<HIsSmiAndBranch>(object);
- is_smi.Then();
- { Push(number_string); }
- is_smi.Else();
- {
- IfBuilder is_number(this);
- is_number.If<HCompareMap>(object, isolate()->factory()->heap_number_map());
- is_number.Then();
- { Push(number_string); }
- is_number.Else();
- {
- HValue* map = AddLoadMap(object, smi_check);
- HValue* instance_type = Add<HLoadNamedField>(
- map, nullptr, HObjectAccess::ForMapInstanceType());
- IfBuilder is_string(this);
- is_string.If<HCompareNumericAndBranch>(
- instance_type, Add<HConstant>(FIRST_NONSTRING_TYPE), Token::LT);
- is_string.Then();
- { Push(Add<HConstant>(factory->string_string())); }
- is_string.Else();
- {
- HConstant* object_string = Add<HConstant>(factory->object_string());
- IfBuilder is_oddball(this);
- is_oddball.If<HCompareNumericAndBranch>(
- instance_type, Add<HConstant>(ODDBALL_TYPE), Token::EQ);
- is_oddball.Then();
- {
- Push(Add<HLoadNamedField>(object, nullptr,
- HObjectAccess::ForOddballTypeOf()));
- }
- is_oddball.Else();
- {
- IfBuilder is_symbol(this);
- is_symbol.If<HCompareNumericAndBranch>(
- instance_type, Add<HConstant>(SYMBOL_TYPE), Token::EQ);
- is_symbol.Then();
- { Push(Add<HConstant>(factory->symbol_string())); }
- is_symbol.Else();
- {
- HValue* bit_field = Add<HLoadNamedField>(
- map, nullptr, HObjectAccess::ForMapBitField());
- HValue* bit_field_masked = AddUncasted<HBitwise>(
- Token::BIT_AND, bit_field,
- Add<HConstant>((1 << Map::kIsCallable) |
- (1 << Map::kIsUndetectable)));
- IfBuilder is_function(this);
- is_function.If<HCompareNumericAndBranch>(
- bit_field_masked, Add<HConstant>(1 << Map::kIsCallable),
- Token::EQ);
- is_function.Then();
- { Push(Add<HConstant>(factory->function_string())); }
- is_function.Else();
- {
-#define SIMD128_BUILDER_OPEN(TYPE, Type, type, lane_count, lane_type) \
- IfBuilder is_##type(this); \
- is_##type.If<HCompareObjectEqAndBranch>( \
- map, Add<HConstant>(factory->type##_map())); \
- is_##type.Then(); \
- { Push(Add<HConstant>(factory->type##_string())); } \
- is_##type.Else(); {
- SIMD128_TYPES(SIMD128_BUILDER_OPEN)
-#undef SIMD128_BUILDER_OPEN
- // Is it an undetectable object?
- IfBuilder is_undetectable(this);
- is_undetectable.If<HCompareNumericAndBranch>(
- bit_field_masked, graph()->GetConstant0(), Token::NE);
- is_undetectable.Then();
- {
- // typeof an undetectable object is 'undefined'.
- Push(Add<HConstant>(factory->undefined_string()));
- }
- is_undetectable.Else();
- {
- // For any kind of object not handled above, the spec rule for
- // host objects gives that it is okay to return "object".
- Push(object_string);
- }
-#define SIMD128_BUILDER_CLOSE(TYPE, Type, type, lane_count, lane_type) }
- SIMD128_TYPES(SIMD128_BUILDER_CLOSE)
-#undef SIMD128_BUILDER_CLOSE
- }
- is_function.End();
- }
- is_symbol.End();
- }
- is_oddball.End();
- }
- is_string.End();
- }
- is_number.End();
- }
- is_smi.End();
-
- return environment()->Pop();
-}
-
-
-Handle<Code> TypeofStub::GenerateCode() { return DoGenerateCode(this); }
-
-
-template <>
-HValue* CodeStubGraphBuilder<FastCloneRegExpStub>::BuildCodeStub() {
- HValue* closure = GetParameter(0);
- HValue* literal_index = GetParameter(1);
-
- // This stub is very performance sensitive, the generated code must be tuned
- // so that it doesn't build and eager frame.
- info()->MarkMustNotHaveEagerFrame();
-
- HValue* literals_array = Add<HLoadNamedField>(
- closure, nullptr, HObjectAccess::ForLiteralsPointer());
- HInstruction* boilerplate = Add<HLoadKeyed>(
- literals_array, literal_index, nullptr, nullptr, FAST_ELEMENTS,
- NEVER_RETURN_HOLE, LiteralsArray::kOffsetToFirstLiteral - kHeapObjectTag);
-
- IfBuilder if_notundefined(this);
- if_notundefined.IfNot<HCompareObjectEqAndBranch>(
- boilerplate, graph()->GetConstantUndefined());
- if_notundefined.Then();
- {
- int result_size =
- JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- HValue* result =
- Add<HAllocate>(Add<HConstant>(result_size), HType::JSObject(),
- NOT_TENURED, JS_REGEXP_TYPE);
- Add<HStoreNamedField>(
- result, HObjectAccess::ForMap(),
- Add<HLoadNamedField>(boilerplate, nullptr, HObjectAccess::ForMap()));
- Add<HStoreNamedField>(
- result, HObjectAccess::ForPropertiesPointer(),
- Add<HLoadNamedField>(boilerplate, nullptr,
- HObjectAccess::ForPropertiesPointer()));
- Add<HStoreNamedField>(
- result, HObjectAccess::ForElementsPointer(),
- Add<HLoadNamedField>(boilerplate, nullptr,
- HObjectAccess::ForElementsPointer()));
- for (int offset = JSObject::kHeaderSize; offset < result_size;
- offset += kPointerSize) {
- HObjectAccess access = HObjectAccess::ForObservableJSObjectOffset(offset);
- Add<HStoreNamedField>(result, access,
- Add<HLoadNamedField>(boilerplate, nullptr, access));
- }
- Push(result);
- }
- if_notundefined.ElseDeopt(Deoptimizer::kUninitializedBoilerplateInFastClone);
- if_notundefined.End();
-
- return Pop();
-}
-
-
-Handle<Code> FastCloneRegExpStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
template <>
HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
Factory* factory = isolate()->factory();
HValue* undefined = graph()->GetConstantUndefined();
AllocationSiteMode alloc_site_mode = casted_stub()->allocation_site_mode();
- HValue* closure = GetParameter(0);
- HValue* literal_index = GetParameter(1);
+ HValue* closure = GetParameter(Descriptor::kClosure);
+ HValue* literal_index = GetParameter(Descriptor::kLiteralIndex);
+ // TODO(turbofan): This codestub has regressed to need a frame on ia32 at some
+ // point and wasn't caught since it wasn't built in the snapshot. We should
+ // probably just replace with a TurboFan stub rather than fixing it.
+#if !(V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87)
// This stub is very performance sensitive, the generated code must be tuned
// so that it doesn't build and eager frame.
info()->MarkMustNotHaveEagerFrame();
+#endif
HValue* literals_array = Add<HLoadNamedField>(
closure, nullptr, HObjectAccess::ForLiteralsPointer());
@@ -557,7 +410,7 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
if_fixed_cow.End();
zero_capacity.End();
- checker.ElseDeopt(Deoptimizer::kUninitializedBoilerplateLiterals);
+ checker.ElseDeopt(DeoptimizeReason::kUninitializedBoilerplateLiterals);
checker.End();
return environment()->Pop();
@@ -568,188 +421,6 @@ Handle<Code> FastCloneShallowArrayStub::GenerateCode() {
return DoGenerateCode(this);
}
-
-template <>
-HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
- HValue* undefined = graph()->GetConstantUndefined();
- HValue* closure = GetParameter(0);
- HValue* literal_index = GetParameter(1);
-
- HValue* literals_array = Add<HLoadNamedField>(
- closure, nullptr, HObjectAccess::ForLiteralsPointer());
-
- HInstruction* allocation_site = Add<HLoadKeyed>(
- literals_array, literal_index, nullptr, nullptr, FAST_ELEMENTS,
- NEVER_RETURN_HOLE, LiteralsArray::kOffsetToFirstLiteral - kHeapObjectTag);
-
- IfBuilder checker(this);
- checker.IfNot<HCompareObjectEqAndBranch, HValue*>(allocation_site,
- undefined);
- checker.And();
-
- HObjectAccess access = HObjectAccess::ForAllocationSiteOffset(
- AllocationSite::kTransitionInfoOffset);
- HInstruction* boilerplate =
- Add<HLoadNamedField>(allocation_site, nullptr, access);
-
- int length = casted_stub()->length();
- if (length == 0) {
- // Empty objects have some slack added to them.
- length = JSObject::kInitialGlobalObjectUnusedPropertiesCount;
- }
- int size = JSObject::kHeaderSize + length * kPointerSize;
- int object_size = size;
- if (FLAG_allocation_site_pretenuring) {
- size += AllocationMemento::kSize;
- }
-
- HValue* boilerplate_map =
- Add<HLoadNamedField>(boilerplate, nullptr, HObjectAccess::ForMap());
- HValue* boilerplate_size = Add<HLoadNamedField>(
- boilerplate_map, nullptr, HObjectAccess::ForMapInstanceSize());
- HValue* size_in_words = Add<HConstant>(object_size >> kPointerSizeLog2);
- checker.If<HCompareNumericAndBranch>(boilerplate_size,
- size_in_words, Token::EQ);
- checker.Then();
-
- HValue* size_in_bytes = Add<HConstant>(size);
-
- HInstruction* object = Add<HAllocate>(size_in_bytes, HType::JSObject(),
- NOT_TENURED, JS_OBJECT_TYPE);
-
- for (int i = 0; i < object_size; i += kPointerSize) {
- HObjectAccess access = HObjectAccess::ForObservableJSObjectOffset(i);
- Add<HStoreNamedField>(object, access,
- Add<HLoadNamedField>(boilerplate, nullptr, access));
- }
-
- DCHECK(FLAG_allocation_site_pretenuring || (size == object_size));
- if (FLAG_allocation_site_pretenuring) {
- BuildCreateAllocationMemento(
- object, Add<HConstant>(object_size), allocation_site);
- }
-
- environment()->Push(object);
- checker.ElseDeopt(Deoptimizer::kUninitializedBoilerplateInFastClone);
- checker.End();
-
- return environment()->Pop();
-}
-
-
-Handle<Code> FastCloneShallowObjectStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<CreateAllocationSiteStub>::BuildCodeStub() {
- // This stub is performance sensitive, the generated code must be tuned
- // so that it doesn't build an eager frame.
- info()->MarkMustNotHaveEagerFrame();
-
- HValue* size = Add<HConstant>(AllocationSite::kSize);
- HInstruction* object = Add<HAllocate>(size, HType::JSObject(), TENURED,
- JS_OBJECT_TYPE);
-
- // Store the map
- Handle<Map> allocation_site_map = isolate()->factory()->allocation_site_map();
- AddStoreMapConstant(object, allocation_site_map);
-
- // Store the payload (smi elements kind)
- HValue* initial_elements_kind = Add<HConstant>(GetInitialFastElementsKind());
- Add<HStoreNamedField>(object,
- HObjectAccess::ForAllocationSiteOffset(
- AllocationSite::kTransitionInfoOffset),
- initial_elements_kind);
-
- // Unlike literals, constructed arrays don't have nested sites
- Add<HStoreNamedField>(object,
- HObjectAccess::ForAllocationSiteOffset(
- AllocationSite::kNestedSiteOffset),
- graph()->GetConstant0());
-
- // Pretenuring calculation field.
- Add<HStoreNamedField>(object,
- HObjectAccess::ForAllocationSiteOffset(
- AllocationSite::kPretenureDataOffset),
- graph()->GetConstant0());
-
- // Pretenuring memento creation count field.
- Add<HStoreNamedField>(object,
- HObjectAccess::ForAllocationSiteOffset(
- AllocationSite::kPretenureCreateCountOffset),
- graph()->GetConstant0());
-
- // Store an empty fixed array for the code dependency.
- HConstant* empty_fixed_array =
- Add<HConstant>(isolate()->factory()->empty_fixed_array());
- Add<HStoreNamedField>(
- object,
- HObjectAccess::ForAllocationSiteOffset(
- AllocationSite::kDependentCodeOffset),
- empty_fixed_array);
-
- // Link the object to the allocation site list
- HValue* site_list = Add<HConstant>(
- ExternalReference::allocation_sites_list_address(isolate()));
- HValue* site = Add<HLoadNamedField>(site_list, nullptr,
- HObjectAccess::ForAllocationSiteList());
- // TODO(mvstanton): This is a store to a weak pointer, which we may want to
- // mark as such in order to skip the write barrier, once we have a unified
- // system for weakness. For now we decided to keep it like this because having
- // an initial write barrier backed store makes this pointer strong until the
- // next GC, and allocation sites are designed to survive several GCs anyway.
- Add<HStoreNamedField>(
- object,
- HObjectAccess::ForAllocationSiteOffset(AllocationSite::kWeakNextOffset),
- site);
- Add<HStoreNamedField>(site_list, HObjectAccess::ForAllocationSiteList(),
- object);
-
- HInstruction* feedback_vector = GetParameter(0);
- HInstruction* slot = GetParameter(1);
- Add<HStoreKeyed>(feedback_vector, slot, object, nullptr, FAST_ELEMENTS,
- INITIALIZING_STORE);
- return feedback_vector;
-}
-
-
-Handle<Code> CreateAllocationSiteStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<CreateWeakCellStub>::BuildCodeStub() {
- // This stub is performance sensitive, the generated code must be tuned
- // so that it doesn't build an eager frame.
- info()->MarkMustNotHaveEagerFrame();
-
- HValue* size = Add<HConstant>(WeakCell::kSize);
- HInstruction* object =
- Add<HAllocate>(size, HType::JSObject(), TENURED, JS_OBJECT_TYPE);
-
- Handle<Map> weak_cell_map = isolate()->factory()->weak_cell_map();
- AddStoreMapConstant(object, weak_cell_map);
-
- HInstruction* value = GetParameter(CreateWeakCellDescriptor::kValueIndex);
- Add<HStoreNamedField>(object, HObjectAccess::ForWeakCellValue(), value);
- Add<HStoreNamedField>(object, HObjectAccess::ForWeakCellNext(),
- graph()->GetConstantHole());
-
- HInstruction* feedback_vector =
- GetParameter(CreateWeakCellDescriptor::kVectorIndex);
- HInstruction* slot = GetParameter(CreateWeakCellDescriptor::kSlotIndex);
- Add<HStoreKeyed>(feedback_vector, slot, object, nullptr, FAST_ELEMENTS,
- INITIALIZING_STORE);
- return graph()->GetConstant0();
-}
-
-
-Handle<Code> CreateWeakCellStub::GenerateCode() { return DoGenerateCode(this); }
-
-
template <>
HValue* CodeStubGraphBuilder<LoadScriptContextFieldStub>::BuildCodeStub() {
int context_index = casted_stub()->context_index();
@@ -775,6 +446,7 @@ HValue* CodeStubGraphBuilder<StoreScriptContextFieldStub>::BuildCodeStub() {
Add<HStoreNamedField>(script_context,
HObjectAccess::ForContextSlot(slot_index),
GetParameter(2), STORE_TO_INITIALIZED_ENTRY);
+ // TODO(ishell): Remove this unused stub.
return GetParameter(2);
}
@@ -801,7 +473,7 @@ HValue* CodeStubGraphBuilderBase::BuildPushElement(HValue* object, HValue* argc,
can_store.IfNot<HCompareMap>(argument,
isolate()->factory()->heap_number_map());
}
- can_store.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
+ can_store.ThenDeopt(DeoptimizeReason::kFastPathFailed);
can_store.End();
}
builder.EndBody();
@@ -852,19 +524,7 @@ HValue* CodeStubGraphBuilder<FastArrayPushStub>::BuildCodeStub() {
IfBuilder check(this);
check.If<HCompareNumericAndBranch>(
bits, Add<HConstant>(1 << Map::kIsExtensible), Token::NE);
- check.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
- check.End();
- }
-
- // Disallow pushing onto observed objects.
- {
- HValue* bit_field =
- Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField());
- HValue* mask = Add<HConstant>(1 << Map::kIsObserved);
- HValue* bit = AddUncasted<HBitwise>(Token::BIT_AND, bit_field, mask);
- IfBuilder check(this);
- check.If<HCompareNumericAndBranch>(bit, mask, Token::EQ);
- check.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
+ check.ThenDeopt(DeoptimizeReason::kFastPathFailed);
check.End();
}
@@ -877,7 +537,7 @@ HValue* CodeStubGraphBuilder<FastArrayPushStub>::BuildCodeStub() {
HValue* bit = AddUncasted<HBitwise>(Token::BIT_AND, bit_field3, mask);
IfBuilder check(this);
check.If<HCompareNumericAndBranch>(bit, mask, Token::EQ);
- check.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
+ check.ThenDeopt(DeoptimizeReason::kFastPathFailed);
check.End();
}
@@ -895,7 +555,7 @@ HValue* CodeStubGraphBuilder<FastArrayPushStub>::BuildCodeStub() {
HValue* bit = AddUncasted<HBitwise>(Token::BIT_AND, details, mask);
IfBuilder readonly(this);
readonly.If<HCompareNumericAndBranch>(bit, mask, Token::EQ);
- readonly.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
+ readonly.ThenDeopt(DeoptimizeReason::kFastPathFailed);
readonly.End();
}
@@ -923,17 +583,14 @@ HValue* CodeStubGraphBuilder<FastArrayPushStub>::BuildCodeStub() {
check_instance_type.If<HCompareNumericAndBranch>(
instance_type, Add<HConstant>(LAST_CUSTOM_ELEMENTS_RECEIVER),
Token::LTE);
- check_instance_type.Or();
- check_instance_type.If<HCompareNumericAndBranch>(
- instance_type, Add<HConstant>(JS_GLOBAL_PROXY_TYPE), Token::EQ);
- check_instance_type.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
+ check_instance_type.ThenDeopt(DeoptimizeReason::kFastPathFailed);
check_instance_type.End();
HValue* elements = Add<HLoadNamedField>(
prototype, nullptr, HObjectAccess::ForElementsPointer());
IfBuilder no_elements(this);
no_elements.IfNot<HCompareObjectEqAndBranch>(elements, empty);
- no_elements.ThenDeopt(Deoptimizer::kFastArrayPushFailed);
+ no_elements.ThenDeopt(DeoptimizeReason::kFastPathFailed);
no_elements.End();
environment()->Push(prototype_map);
@@ -983,7 +640,7 @@ HValue* CodeStubGraphBuilder<FastArrayPushStub>::BuildCodeStub() {
FAST_HOLEY_DOUBLE_ELEMENTS);
environment()->Push(new_length);
}
- has_double_elements.ElseDeopt(Deoptimizer::kFastArrayPushFailed);
+ has_double_elements.ElseDeopt(DeoptimizeReason::kFastPathFailed);
has_double_elements.End();
}
has_object_elements.End();
@@ -996,34 +653,189 @@ HValue* CodeStubGraphBuilder<FastArrayPushStub>::BuildCodeStub() {
Handle<Code> FastArrayPushStub::GenerateCode() { return DoGenerateCode(this); }
template <>
-HValue* CodeStubGraphBuilder<GrowArrayElementsStub>::BuildCodeStub() {
- ElementsKind kind = casted_stub()->elements_kind();
- if (IsFastDoubleElementsKind(kind)) {
- info()->MarkAsSavesCallerDoubles();
+HValue* CodeStubGraphBuilder<FastFunctionBindStub>::BuildCodeStub() {
+ // TODO(verwaest): Fix deoptimizer messages.
+ HValue* argc = GetArgumentsLength();
+ HInstruction* argument_elements = Add<HArgumentsElements>(false, false);
+ HInstruction* object = Add<HAccessArgumentsAt>(argument_elements, argc,
+ graph()->GetConstantMinus1());
+ BuildCheckHeapObject(object);
+ HValue* map = Add<HLoadNamedField>(object, nullptr, HObjectAccess::ForMap());
+ Add<HCheckInstanceType>(object, HCheckInstanceType::IS_JS_FUNCTION);
+
+ // Disallow binding of slow-mode functions. We need to figure out whether the
+ // length and name property are in the original state.
+ {
+ HValue* bit_field3 =
+ Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField3());
+ HValue* mask = Add<HConstant>(static_cast<int>(Map::DictionaryMap::kMask));
+ HValue* bit = AddUncasted<HBitwise>(Token::BIT_AND, bit_field3, mask);
+ IfBuilder check(this);
+ check.If<HCompareNumericAndBranch>(bit, mask, Token::EQ);
+ check.ThenDeopt(DeoptimizeReason::kFastPathFailed);
+ check.End();
}
- HValue* object = GetParameter(GrowArrayElementsDescriptor::kObjectIndex);
- HValue* key = GetParameter(GrowArrayElementsDescriptor::kKeyIndex);
+ // Check whether the length and name properties are still present as
+ // AccessorInfo objects. In that case, their value can be recomputed even if
+ // the actual value on the object changes.
+ {
+ HValue* descriptors =
+ Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapDescriptors());
- HValue* elements = AddLoadElements(object);
- HValue* current_capacity = Add<HLoadNamedField>(
- elements, nullptr, HObjectAccess::ForFixedArrayLength());
+ HValue* descriptors_length = Add<HLoadNamedField>(
+ descriptors, nullptr, HObjectAccess::ForFixedArrayLength());
+ IfBuilder range(this);
+ range.If<HCompareNumericAndBranch>(descriptors_length,
+ graph()->GetConstant1(), Token::LTE);
+ range.ThenDeopt(DeoptimizeReason::kFastPathFailed);
+ range.End();
+
+ // Verify .length.
+ const int length_index = JSFunction::kLengthDescriptorIndex;
+ HValue* maybe_length = Add<HLoadKeyed>(
+ descriptors, Add<HConstant>(DescriptorArray::ToKeyIndex(length_index)),
+ nullptr, nullptr, FAST_ELEMENTS);
+ Unique<Name> length_string = Unique<Name>::CreateUninitialized(
+ isolate()->factory()->length_string());
+ Add<HCheckValue>(maybe_length, length_string, false);
+
+ HValue* maybe_length_accessor = Add<HLoadKeyed>(
+ descriptors,
+ Add<HConstant>(DescriptorArray::ToValueIndex(length_index)), nullptr,
+ nullptr, FAST_ELEMENTS);
+ BuildCheckHeapObject(maybe_length_accessor);
+ Add<HCheckMaps>(maybe_length_accessor,
+ isolate()->factory()->accessor_info_map());
+
+ // Verify .name.
+ const int name_index = JSFunction::kNameDescriptorIndex;
+ HValue* maybe_name = Add<HLoadKeyed>(
+ descriptors, Add<HConstant>(DescriptorArray::ToKeyIndex(name_index)),
+ nullptr, nullptr, FAST_ELEMENTS);
+ Unique<Name> name_string =
+ Unique<Name>::CreateUninitialized(isolate()->factory()->name_string());
+ Add<HCheckValue>(maybe_name, name_string, false);
+
+ HValue* maybe_name_accessor = Add<HLoadKeyed>(
+ descriptors, Add<HConstant>(DescriptorArray::ToValueIndex(name_index)),
+ nullptr, nullptr, FAST_ELEMENTS);
+ BuildCheckHeapObject(maybe_name_accessor);
+ Add<HCheckMaps>(maybe_name_accessor,
+ isolate()->factory()->accessor_info_map());
+ }
- HValue* length =
- casted_stub()->is_js_array()
- ? Add<HLoadNamedField>(object, static_cast<HValue*>(NULL),
- HObjectAccess::ForArrayLength(kind))
- : current_capacity;
+ // Choose the right bound function map based on whether the target is
+ // constructable.
+ {
+ HValue* bit_field =
+ Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField());
+ HValue* mask = Add<HConstant>(static_cast<int>(1 << Map::kIsConstructor));
+ HValue* bits = AddUncasted<HBitwise>(Token::BIT_AND, bit_field, mask);
- return BuildCheckAndGrowElementsCapacity(object, elements, kind, length,
- current_capacity, key);
-}
+ HValue* native_context = BuildGetNativeContext();
+ IfBuilder is_constructor(this);
+ is_constructor.If<HCompareNumericAndBranch>(bits, mask, Token::EQ);
+ is_constructor.Then();
+ {
+ HValue* map = Add<HLoadNamedField>(
+ native_context, nullptr,
+ HObjectAccess::ForContextSlot(
+ Context::BOUND_FUNCTION_WITH_CONSTRUCTOR_MAP_INDEX));
+ environment()->Push(map);
+ }
+ is_constructor.Else();
+ {
+ HValue* map = Add<HLoadNamedField>(
+ native_context, nullptr,
+ HObjectAccess::ForContextSlot(
+ Context::BOUND_FUNCTION_WITHOUT_CONSTRUCTOR_MAP_INDEX));
+ environment()->Push(map);
+ }
+ is_constructor.End();
+ }
+ HValue* bound_function_map = environment()->Pop();
+ // Verify that __proto__ matches that of a the target bound function.
+ {
+ HValue* prototype =
+ Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForPrototype());
+ HValue* expected_prototype = Add<HLoadNamedField>(
+ bound_function_map, nullptr, HObjectAccess::ForPrototype());
+ IfBuilder equal_prototype(this);
+ equal_prototype.IfNot<HCompareObjectEqAndBranch>(prototype,
+ expected_prototype);
+ equal_prototype.ThenDeopt(DeoptimizeReason::kFastPathFailed);
+ equal_prototype.End();
+ }
-Handle<Code> GrowArrayElementsStub::GenerateCode() {
- return DoGenerateCode(this);
+ // Allocate the arguments array.
+ IfBuilder empty_args(this);
+ empty_args.If<HCompareNumericAndBranch>(argc, graph()->GetConstant1(),
+ Token::LTE);
+ empty_args.Then();
+ { environment()->Push(Add<HLoadRoot>(Heap::kEmptyFixedArrayRootIndex)); }
+ empty_args.Else();
+ {
+ HValue* elements_length = AddUncasted<HSub>(argc, graph()->GetConstant1());
+ HValue* elements =
+ BuildAllocateAndInitializeArray(FAST_ELEMENTS, elements_length);
+
+ LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement);
+ HValue* start = graph()->GetConstant1();
+ HValue* key = builder.BeginBody(start, argc, Token::LT);
+ {
+ HValue* argument = Add<HAccessArgumentsAt>(argument_elements, argc, key);
+ HValue* index = AddUncasted<HSub>(key, graph()->GetConstant1());
+ AddElementAccess(elements, index, argument, elements, nullptr,
+ FAST_ELEMENTS, STORE);
+ }
+ builder.EndBody();
+ environment()->Push(elements);
+ }
+ empty_args.End();
+ HValue* elements = environment()->Pop();
+
+ // Find the 'this' to bind.
+ IfBuilder no_receiver(this);
+ no_receiver.If<HCompareNumericAndBranch>(argc, graph()->GetConstant0(),
+ Token::EQ);
+ no_receiver.Then();
+ { environment()->Push(Add<HLoadRoot>(Heap::kUndefinedValueRootIndex)); }
+ no_receiver.Else();
+ {
+ environment()->Push(Add<HAccessArgumentsAt>(argument_elements, argc,
+ graph()->GetConstant0()));
+ }
+ no_receiver.End();
+ HValue* receiver = environment()->Pop();
+
+ // Allocate the resulting bound function.
+ HValue* size = Add<HConstant>(JSBoundFunction::kSize);
+ HValue* bound_function =
+ Add<HAllocate>(size, HType::JSObject(), NOT_TENURED,
+ JS_BOUND_FUNCTION_TYPE, graph()->GetConstant0());
+ Add<HStoreNamedField>(bound_function, HObjectAccess::ForMap(),
+ bound_function_map);
+ HValue* empty_fixed_array = Add<HLoadRoot>(Heap::kEmptyFixedArrayRootIndex);
+ Add<HStoreNamedField>(bound_function, HObjectAccess::ForPropertiesPointer(),
+ empty_fixed_array);
+ Add<HStoreNamedField>(bound_function, HObjectAccess::ForElementsPointer(),
+ empty_fixed_array);
+ Add<HStoreNamedField>(bound_function, HObjectAccess::ForBoundTargetFunction(),
+ object);
+
+ Add<HStoreNamedField>(bound_function, HObjectAccess::ForBoundThis(),
+ receiver);
+ Add<HStoreNamedField>(bound_function, HObjectAccess::ForBoundArguments(),
+ elements);
+
+ return bound_function;
}
+Handle<Code> FastFunctionBindStub::GenerateCode() {
+ return DoGenerateCode(this);
+}
template <>
HValue* CodeStubGraphBuilder<LoadFastElementStub>::BuildCodeStub() {
@@ -1032,9 +844,8 @@ HValue* CodeStubGraphBuilder<LoadFastElementStub>::BuildCodeStub() {
: NEVER_RETURN_HOLE;
HInstruction* load = BuildUncheckedMonomorphicElementAccess(
- GetParameter(LoadDescriptor::kReceiverIndex),
- GetParameter(LoadDescriptor::kNameIndex), NULL,
- casted_stub()->is_js_array(), casted_stub()->elements_kind(), LOAD,
+ GetParameter(Descriptor::kReceiver), GetParameter(Descriptor::kName),
+ NULL, casted_stub()->is_js_array(), casted_stub()->elements_kind(), LOAD,
hole_mode, STANDARD_STORE);
return load;
}
@@ -1068,7 +879,8 @@ HLoadNamedField* CodeStubGraphBuilderBase::BuildLoadNamedField(
template<>
HValue* CodeStubGraphBuilder<LoadFieldStub>::BuildCodeStub() {
- return BuildLoadNamedField(GetParameter(0), casted_stub()->index());
+ return BuildLoadNamedField(GetParameter(Descriptor::kReceiver),
+ casted_stub()->index());
}
@@ -1078,20 +890,8 @@ Handle<Code> LoadFieldStub::GenerateCode() {
template <>
-HValue* CodeStubGraphBuilder<ArrayBufferViewLoadFieldStub>::BuildCodeStub() {
- return BuildArrayBufferViewFieldAccessor(GetParameter(0), nullptr,
- casted_stub()->index());
-}
-
-
-Handle<Code> ArrayBufferViewLoadFieldStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
-template <>
HValue* CodeStubGraphBuilder<LoadConstantStub>::BuildCodeStub() {
- HValue* map = AddLoadMap(GetParameter(0), NULL);
+ HValue* map = AddLoadMap(GetParameter(Descriptor::kReceiver), NULL);
HObjectAccess descriptors_access = HObjectAccess::ForObservableJSObjectOffset(
Map::kDescriptorsOffset, Representation::Tagged());
HValue* descriptors = Add<HLoadNamedField>(map, nullptr, descriptors_access);
@@ -1125,7 +925,7 @@ HValue* CodeStubGraphBuilderBase::UnmappedCase(HValue* elements, HValue* key,
Add<HStoreKeyed>(backing_store, key, value, nullptr, FAST_HOLEY_ELEMENTS);
}
}
- in_unmapped_range.ElseDeopt(Deoptimizer::kOutsideOfRange);
+ in_unmapped_range.ElseDeopt(DeoptimizeReason::kOutsideOfRange);
in_unmapped_range.End();
return result;
}
@@ -1166,7 +966,7 @@ HValue* CodeStubGraphBuilderBase::EmitKeyedSloppyArguments(HValue* receiver,
IfBuilder positive_smi(this);
positive_smi.If<HCompareNumericAndBranch>(key, graph()->GetConstant0(),
Token::LT);
- positive_smi.ThenDeopt(Deoptimizer::kKeyIsNegative);
+ positive_smi.ThenDeopt(DeoptimizeReason::kKeyIsNegative);
positive_smi.End();
HValue* constant_two = Add<HConstant>(2);
@@ -1226,8 +1026,8 @@ HValue* CodeStubGraphBuilderBase::EmitKeyedSloppyArguments(HValue* receiver,
template <>
HValue* CodeStubGraphBuilder<KeyedLoadSloppyArgumentsStub>::BuildCodeStub() {
- HValue* receiver = GetParameter(LoadDescriptor::kReceiverIndex);
- HValue* key = GetParameter(LoadDescriptor::kNameIndex);
+ HValue* receiver = GetParameter(Descriptor::kReceiver);
+ HValue* key = GetParameter(Descriptor::kName);
return EmitKeyedSloppyArguments(receiver, key, NULL);
}
@@ -1240,9 +1040,9 @@ Handle<Code> KeyedLoadSloppyArgumentsStub::GenerateCode() {
template <>
HValue* CodeStubGraphBuilder<KeyedStoreSloppyArgumentsStub>::BuildCodeStub() {
- HValue* receiver = GetParameter(StoreDescriptor::kReceiverIndex);
- HValue* key = GetParameter(StoreDescriptor::kNameIndex);
- HValue* value = GetParameter(StoreDescriptor::kValueIndex);
+ HValue* receiver = GetParameter(Descriptor::kReceiver);
+ HValue* key = GetParameter(Descriptor::kName);
+ HValue* value = GetParameter(Descriptor::kValue);
return EmitKeyedSloppyArguments(receiver, key, value);
}
@@ -1275,7 +1075,7 @@ void CodeStubGraphBuilderBase::BuildStoreNamedField(
// TODO(hpayer): Allocation site pretenuring support.
HInstruction* heap_number =
Add<HAllocate>(heap_number_size, HType::HeapObject(), NOT_TENURED,
- MUTABLE_HEAP_NUMBER_TYPE);
+ MUTABLE_HEAP_NUMBER_TYPE, graph()->GetConstant0());
AddStoreMapConstant(heap_number,
isolate()->factory()->mutable_heap_number_map());
Add<HStoreNamedField>(heap_number, HObjectAccess::ForHeapNumberValue(),
@@ -1300,9 +1100,10 @@ void CodeStubGraphBuilderBase::BuildStoreNamedField(
template <>
HValue* CodeStubGraphBuilder<StoreFieldStub>::BuildCodeStub() {
- BuildStoreNamedField(GetParameter(0), GetParameter(2), casted_stub()->index(),
+ BuildStoreNamedField(GetParameter(Descriptor::kReceiver),
+ GetParameter(Descriptor::kValue), casted_stub()->index(),
casted_stub()->representation(), false);
- return GetParameter(2);
+ return GetParameter(Descriptor::kValue);
}
@@ -1312,8 +1113,21 @@ Handle<Code> StoreFieldStub::GenerateCode() { return DoGenerateCode(this); }
template <>
HValue* CodeStubGraphBuilder<StoreTransitionStub>::BuildCodeStub() {
HValue* object = GetParameter(StoreTransitionHelper::ReceiverIndex());
+ HValue* value = GetParameter(StoreTransitionHelper::ValueIndex());
+ StoreTransitionStub::StoreMode store_mode = casted_stub()->store_mode();
+
+ if (store_mode != StoreTransitionStub::StoreMapOnly) {
+ value = GetParameter(StoreTransitionHelper::ValueIndex());
+ Representation representation = casted_stub()->representation();
+ if (representation.IsDouble()) {
+ // In case we are storing a double, assure that the value is a double
+ // before manipulating the properties backing store. Otherwise the actual
+ // store may deopt, leaving the backing store in an overallocated state.
+ value = AddUncasted<HForceRepresentation>(value, representation);
+ }
+ }
- switch (casted_stub()->store_mode()) {
+ switch (store_mode) {
case StoreTransitionStub::ExtendStorageAndStoreMapAndValue: {
HValue* properties = Add<HLoadNamedField>(
object, nullptr, HObjectAccess::ForPropertiesPointer());
@@ -1341,9 +1155,8 @@ HValue* CodeStubGraphBuilder<StoreTransitionStub>::BuildCodeStub() {
// Fall through.
case StoreTransitionStub::StoreMapAndValue:
// Store the new value into the "extended" object.
- BuildStoreNamedField(
- object, GetParameter(StoreTransitionHelper::ValueIndex()),
- casted_stub()->index(), casted_stub()->representation(), true);
+ BuildStoreNamedField(object, value, casted_stub()->index(),
+ casted_stub()->representation(), true);
// Fall through.
case StoreTransitionStub::StoreMapOnly:
@@ -1352,7 +1165,7 @@ HValue* CodeStubGraphBuilder<StoreTransitionStub>::BuildCodeStub() {
GetParameter(StoreTransitionHelper::MapIndex()));
break;
}
- return GetParameter(StoreTransitionHelper::ValueIndex());
+ return value;
}
@@ -1364,13 +1177,12 @@ Handle<Code> StoreTransitionStub::GenerateCode() {
template <>
HValue* CodeStubGraphBuilder<StoreFastElementStub>::BuildCodeStub() {
BuildUncheckedMonomorphicElementAccess(
- GetParameter(StoreDescriptor::kReceiverIndex),
- GetParameter(StoreDescriptor::kNameIndex),
- GetParameter(StoreDescriptor::kValueIndex), casted_stub()->is_js_array(),
+ GetParameter(Descriptor::kReceiver), GetParameter(Descriptor::kName),
+ GetParameter(Descriptor::kValue), casted_stub()->is_js_array(),
casted_stub()->elements_kind(), STORE, NEVER_RETURN_HOLE,
casted_stub()->store_mode());
- return GetParameter(2);
+ return GetParameter(Descriptor::kValue);
}
@@ -1381,233 +1193,74 @@ Handle<Code> StoreFastElementStub::GenerateCode() {
template <>
HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
- info()->MarkAsSavesCallerDoubles();
-
- BuildTransitionElementsKind(GetParameter(0),
- GetParameter(1),
- casted_stub()->from_kind(),
- casted_stub()->to_kind(),
- casted_stub()->is_js_array());
-
- return GetParameter(0);
-}
+ ElementsKind const from_kind = casted_stub()->from_kind();
+ ElementsKind const to_kind = casted_stub()->to_kind();
+ HValue* const object = GetParameter(Descriptor::kObject);
+ HValue* const map = GetParameter(Descriptor::kMap);
+ // The {object} is known to be a JSObject (otherwise it wouldn't have elements
+ // anyways).
+ object->set_type(HType::JSObject());
-Handle<Code> TransitionElementsKindStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<AllocateInNewSpaceStub>::BuildCodeStub() {
- HValue* result = Add<HAllocate>(GetParameter(0), HType::Tagged(), NOT_TENURED,
- JS_OBJECT_TYPE);
- return result;
-}
-
-
-Handle<Code> AllocateInNewSpaceStub::GenerateCode() {
- return DoGenerateCode(this);
-}
+ info()->MarkAsSavesCallerDoubles();
+ DCHECK_IMPLIES(IsFastHoleyElementsKind(from_kind),
+ IsFastHoleyElementsKind(to_kind));
-HValue* CodeStubGraphBuilderBase::BuildArrayConstructor(
- ElementsKind kind,
- AllocationSiteOverrideMode override_mode,
- ArgumentClass argument_class) {
- HValue* constructor = GetParameter(ArrayConstructorStubBase::kConstructor);
- HValue* alloc_site = GetParameter(ArrayConstructorStubBase::kAllocationSite);
- JSArrayBuilder array_builder(this, kind, alloc_site, constructor,
- override_mode);
- HValue* result = NULL;
- switch (argument_class) {
- case NONE:
- // This stub is very performance sensitive, the generated code must be
- // tuned so that it doesn't build and eager frame.
- info()->MarkMustNotHaveEagerFrame();
- result = array_builder.AllocateEmptyArray();
- break;
- case SINGLE:
- result = BuildArraySingleArgumentConstructor(&array_builder);
- break;
- case MULTIPLE:
- result = BuildArrayNArgumentsConstructor(&array_builder, kind);
- break;
+ if (AllocationSite::GetMode(from_kind, to_kind) == TRACK_ALLOCATION_SITE) {
+ Add<HTrapAllocationMemento>(object);
}
- return result;
-}
+ if (!IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ HInstruction* elements = AddLoadElements(object);
+ IfBuilder if_objecthaselements(this);
+ if_objecthaselements.IfNot<HCompareObjectEqAndBranch>(
+ elements, Add<HConstant>(isolate()->factory()->empty_fixed_array()));
+ if_objecthaselements.Then();
+ {
+ // Determine the elements capacity.
+ HInstruction* elements_length = AddLoadFixedArrayLength(elements);
-HValue* CodeStubGraphBuilderBase::BuildInternalArrayConstructor(
- ElementsKind kind, ArgumentClass argument_class) {
- HValue* constructor = GetParameter(
- InternalArrayConstructorStubBase::kConstructor);
- JSArrayBuilder array_builder(this, kind, constructor);
+ // Determine the effective (array) length.
+ IfBuilder if_objectisarray(this);
+ if_objectisarray.If<HHasInstanceTypeAndBranch>(object, JS_ARRAY_TYPE);
+ if_objectisarray.Then();
+ {
+ // The {object} is a JSArray, load the special "length" property.
+ Push(Add<HLoadNamedField>(object, nullptr,
+ HObjectAccess::ForArrayLength(from_kind)));
+ }
+ if_objectisarray.Else();
+ {
+ // The {object} is some other JSObject.
+ Push(elements_length);
+ }
+ if_objectisarray.End();
+ HValue* length = Pop();
- HValue* result = NULL;
- switch (argument_class) {
- case NONE:
- // This stub is very performance sensitive, the generated code must be
- // tuned so that it doesn't build and eager frame.
- info()->MarkMustNotHaveEagerFrame();
- result = array_builder.AllocateEmptyArray();
- break;
- case SINGLE:
- result = BuildArraySingleArgumentConstructor(&array_builder);
- break;
- case MULTIPLE:
- result = BuildArrayNArgumentsConstructor(&array_builder, kind);
- break;
+ BuildGrowElementsCapacity(object, elements, from_kind, to_kind, length,
+ elements_length);
+ }
+ if_objecthaselements.End();
}
- return result;
-}
-
-
-HValue* CodeStubGraphBuilderBase::BuildArraySingleArgumentConstructor(
- JSArrayBuilder* array_builder) {
- // Smi check and range check on the input arg.
- HValue* constant_one = graph()->GetConstant1();
- HValue* constant_zero = graph()->GetConstant0();
-
- HInstruction* elements = Add<HArgumentsElements>(false);
- HInstruction* argument = Add<HAccessArgumentsAt>(
- elements, constant_one, constant_zero);
-
- return BuildAllocateArrayFromLength(array_builder, argument);
-}
-
-
-HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor(
- JSArrayBuilder* array_builder, ElementsKind kind) {
- // Insert a bounds check because the number of arguments might exceed
- // the kInitialMaxFastElementArray limit. This cannot happen for code
- // that was parsed, but calling via Array.apply(thisArg, [...]) might
- // trigger it.
- HValue* length = GetArgumentsLength();
- HConstant* max_alloc_length =
- Add<HConstant>(JSArray::kInitialMaxFastElementArray);
- HValue* checked_length = Add<HBoundsCheck>(length, max_alloc_length);
-
- // We need to fill with the hole if it's a smi array in the multi-argument
- // case because we might have to bail out while copying arguments into
- // the array because they aren't compatible with a smi array.
- // If it's a double array, no problem, and if it's fast then no
- // problem either because doubles are boxed.
- //
- // TODO(mvstanton): consider an instruction to memset fill the array
- // with zero in this case instead.
- JSArrayBuilder::FillMode fill_mode = IsFastSmiElementsKind(kind)
- ? JSArrayBuilder::FILL_WITH_HOLE
- : JSArrayBuilder::DONT_FILL_WITH_HOLE;
- HValue* new_object = array_builder->AllocateArray(checked_length,
- max_alloc_length,
- checked_length,
- fill_mode);
- HValue* elements = array_builder->GetElementsLocation();
- DCHECK(elements != NULL);
-
- // Now populate the elements correctly.
- LoopBuilder builder(this,
- context(),
- LoopBuilder::kPostIncrement);
- HValue* start = graph()->GetConstant0();
- HValue* key = builder.BeginBody(start, checked_length, Token::LT);
- HInstruction* argument_elements = Add<HArgumentsElements>(false);
- HInstruction* argument = Add<HAccessArgumentsAt>(
- argument_elements, checked_length, key);
-
- Add<HStoreKeyed>(elements, key, argument, nullptr, kind);
- builder.EndBody();
- return new_object;
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<ArrayNoArgumentConstructorStub>::BuildCodeStub() {
- ElementsKind kind = casted_stub()->elements_kind();
- AllocationSiteOverrideMode override_mode = casted_stub()->override_mode();
- return BuildArrayConstructor(kind, override_mode, NONE);
-}
-
-
-Handle<Code> ArrayNoArgumentConstructorStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>::
- BuildCodeStub() {
- ElementsKind kind = casted_stub()->elements_kind();
- AllocationSiteOverrideMode override_mode = casted_stub()->override_mode();
- return BuildArrayConstructor(kind, override_mode, SINGLE);
-}
-
-
-Handle<Code> ArraySingleArgumentConstructorStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<ArrayNArgumentsConstructorStub>::BuildCodeStub() {
- ElementsKind kind = casted_stub()->elements_kind();
- AllocationSiteOverrideMode override_mode = casted_stub()->override_mode();
- return BuildArrayConstructor(kind, override_mode, MULTIPLE);
-}
-
-
-Handle<Code> ArrayNArgumentsConstructorStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<InternalArrayNoArgumentConstructorStub>::
- BuildCodeStub() {
- ElementsKind kind = casted_stub()->elements_kind();
- return BuildInternalArrayConstructor(kind, NONE);
-}
+ Add<HStoreNamedField>(object, HObjectAccess::ForMap(), map);
-Handle<Code> InternalArrayNoArgumentConstructorStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<InternalArraySingleArgumentConstructorStub>::
- BuildCodeStub() {
- ElementsKind kind = casted_stub()->elements_kind();
- return BuildInternalArrayConstructor(kind, SINGLE);
+ return object;
}
-Handle<Code> InternalArraySingleArgumentConstructorStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<InternalArrayNArgumentsConstructorStub>::
- BuildCodeStub() {
- ElementsKind kind = casted_stub()->elements_kind();
- return BuildInternalArrayConstructor(kind, MULTIPLE);
-}
-
-
-Handle<Code> InternalArrayNArgumentsConstructorStub::GenerateCode() {
+Handle<Code> TransitionElementsKindStub::GenerateCode() {
return DoGenerateCode(this);
}
-
template <>
HValue* CodeStubGraphBuilder<BinaryOpICStub>::BuildCodeInitializedStub() {
BinaryOpICState state = casted_stub()->state();
- HValue* left = GetParameter(BinaryOpICStub::kLeft);
- HValue* right = GetParameter(BinaryOpICStub::kRight);
+ HValue* left = GetParameter(Descriptor::kLeft);
+ HValue* right = GetParameter(Descriptor::kRight);
Type* left_type = state.GetLeftType();
Type* right_type = state.GetRightType();
@@ -1683,10 +1336,9 @@ template <>
HValue* CodeStubGraphBuilder<BinaryOpWithAllocationSiteStub>::BuildCodeStub() {
BinaryOpICState state = casted_stub()->state();
- HValue* allocation_site = GetParameter(
- BinaryOpWithAllocationSiteStub::kAllocationSite);
- HValue* left = GetParameter(BinaryOpWithAllocationSiteStub::kLeft);
- HValue* right = GetParameter(BinaryOpWithAllocationSiteStub::kRight);
+ HValue* allocation_site = GetParameter(Descriptor::kAllocationSite);
+ HValue* left = GetParameter(Descriptor::kLeft);
+ HValue* right = GetParameter(Descriptor::kRight);
Type* left_type = state.GetLeftType();
Type* right_type = state.GetRightType();
@@ -1749,9 +1401,9 @@ HValue* CodeStubGraphBuilderBase::BuildToString(HValue* input, bool convert) {
// Convert the primitive to a string value.
ToStringStub stub(isolate());
HValue* values[] = {context(), Pop()};
- Push(AddUncasted<HCallWithDescriptor>(
- Add<HConstant>(stub.GetCode()), 0, stub.GetCallInterfaceDescriptor(),
- Vector<HValue*>(values, arraysize(values))));
+ Push(AddUncasted<HCallWithDescriptor>(Add<HConstant>(stub.GetCode()), 0,
+ stub.GetCallInterfaceDescriptor(),
+ ArrayVector(values)));
}
if_inputisstring.End();
}
@@ -1843,8 +1495,8 @@ HValue* CodeStubGraphBuilder<StringAddStub>::BuildCodeInitializedStub() {
StringAddFlags flags = stub->flags();
PretenureFlag pretenure_flag = stub->pretenure_flag();
- HValue* left = GetParameter(StringAddStub::kLeft);
- HValue* right = GetParameter(StringAddStub::kRight);
+ HValue* left = GetParameter(Descriptor::kLeft);
+ HValue* right = GetParameter(Descriptor::kRight);
// Make sure that both arguments are strings if not known in advance.
if ((flags & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
@@ -1868,7 +1520,7 @@ template <>
HValue* CodeStubGraphBuilder<ToBooleanICStub>::BuildCodeInitializedStub() {
ToBooleanICStub* stub = casted_stub();
IfBuilder if_true(this);
- if_true.If<HBranch>(GetParameter(0), stub->types());
+ if_true.If<HBranch>(GetParameter(Descriptor::kArgument), stub->types());
if_true.Then();
if_true.Return(graph()->GetConstantTrue());
if_true.Else();
@@ -1881,11 +1533,11 @@ Handle<Code> ToBooleanICStub::GenerateCode() { return DoGenerateCode(this); }
template <>
HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
StoreGlobalStub* stub = casted_stub();
- HParameter* value = GetParameter(StoreDescriptor::kValueIndex);
+ HParameter* value = GetParameter(Descriptor::kValue);
if (stub->check_global()) {
// Check that the map of the global has not changed: use a placeholder map
// that will be replaced later with the global object's map.
- HParameter* proxy = GetParameter(StoreDescriptor::kReceiverIndex);
+ HParameter* proxy = GetParameter(Descriptor::kReceiver);
HValue* proxy_map =
Add<HLoadNamedField>(proxy, nullptr, HObjectAccess::ForMap());
HValue* global =
@@ -1898,7 +1550,7 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
Add<HLoadNamedField>(global, nullptr, HObjectAccess::ForMap());
IfBuilder map_check(this);
map_check.IfNot<HCompareObjectEqAndBranch>(expected_map, map);
- map_check.ThenDeopt(Deoptimizer::kUnknownMap);
+ map_check.ThenDeopt(DeoptimizeReason::kUnknownMap);
map_check.End();
}
@@ -1921,14 +1573,14 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
builder.If<HCompareObjectEqAndBranch>(cell_contents, value);
builder.Then();
builder.ElseDeopt(
- Deoptimizer::kUnexpectedCellContentsInConstantGlobalStore);
+ DeoptimizeReason::kUnexpectedCellContentsInConstantGlobalStore);
builder.End();
} else {
IfBuilder builder(this);
HValue* hole_value = graph()->GetConstantHole();
builder.If<HCompareObjectEqAndBranch>(cell_contents, hole_value);
builder.Then();
- builder.Deopt(Deoptimizer::kUnexpectedCellContentsInGlobalStore);
+ builder.Deopt(DeoptimizeReason::kUnexpectedCellContentsInGlobalStore);
builder.Else();
// When dealing with constant types, the type may be allowed to change, as
// long as optimized code remains valid.
@@ -1951,7 +1603,7 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
Add<HLoadNamedField>(value, nullptr, HObjectAccess::ForMap());
IfBuilder map_check(this);
map_check.IfNot<HCompareObjectEqAndBranch>(expected_map, map);
- map_check.ThenDeopt(Deoptimizer::kUnknownMap);
+ map_check.ThenDeopt(DeoptimizeReason::kUnknownMap);
map_check.End();
access = access.WithRepresentation(Representation::HeapObject());
break;
@@ -1980,7 +1632,7 @@ HValue* CodeStubGraphBuilder<ElementsTransitionAndStoreStub>::BuildCodeStub() {
if (FLAG_trace_elements_transitions) {
// Tracing elements transitions is the job of the runtime.
- Add<HDeoptimize>(Deoptimizer::kTracingElementsTransitions,
+ Add<HDeoptimize>(DeoptimizeReason::kTracingElementsTransitions,
Deoptimizer::EAGER);
} else {
info()->MarkAsSavesCallerDoubles();
@@ -2007,302 +1659,9 @@ Handle<Code> ElementsTransitionAndStoreStub::GenerateCode() {
template <>
-HValue* CodeStubGraphBuilder<ToObjectStub>::BuildCodeStub() {
- HValue* receiver = GetParameter(TypeConversionDescriptor::kArgumentIndex);
- return BuildToObject(receiver);
-}
-
-
-Handle<Code> ToObjectStub::GenerateCode() { return DoGenerateCode(this); }
-
-
-void CodeStubGraphBuilderBase::BuildCheckAndInstallOptimizedCode(
- HValue* js_function,
- HValue* native_context,
- IfBuilder* builder,
- HValue* optimized_map,
- HValue* map_index) {
- HValue* osr_ast_id_none = Add<HConstant>(BailoutId::None().ToInt());
- HValue* context_slot = LoadFromOptimizedCodeMap(
- optimized_map, map_index, SharedFunctionInfo::kContextOffset);
- context_slot = Add<HLoadNamedField>(context_slot, nullptr,
- HObjectAccess::ForWeakCellValue());
- HValue* osr_ast_slot = LoadFromOptimizedCodeMap(
- optimized_map, map_index, SharedFunctionInfo::kOsrAstIdOffset);
- HValue* code_object = LoadFromOptimizedCodeMap(
- optimized_map, map_index, SharedFunctionInfo::kCachedCodeOffset);
- code_object = Add<HLoadNamedField>(code_object, nullptr,
- HObjectAccess::ForWeakCellValue());
- builder->If<HCompareObjectEqAndBranch>(native_context,
- context_slot);
- builder->AndIf<HCompareObjectEqAndBranch>(osr_ast_slot, osr_ast_id_none);
- builder->And();
- builder->IfNot<HCompareObjectEqAndBranch>(code_object,
- graph()->GetConstant0());
- builder->Then();
- HValue* literals = LoadFromOptimizedCodeMap(optimized_map,
- map_index, SharedFunctionInfo::kLiteralsOffset);
- literals = Add<HLoadNamedField>(literals, nullptr,
- HObjectAccess::ForWeakCellValue());
- IfBuilder maybe_deopt(this);
- maybe_deopt.If<HCompareObjectEqAndBranch>(literals, graph()->GetConstant0());
- maybe_deopt.ThenDeopt(Deoptimizer::kLiteralsWereDisposed);
- maybe_deopt.End();
-
- BuildInstallOptimizedCode(js_function, native_context, code_object, literals);
-
- // The builder continues in the "then" after this function.
-}
-
-
-void CodeStubGraphBuilderBase::BuildInstallOptimizedCode(HValue* js_function,
- HValue* native_context,
- HValue* code_object,
- HValue* literals) {
- Counters* counters = isolate()->counters();
- AddIncrementCounter(counters->fast_new_closure_install_optimized());
-
- // TODO(fschneider): Idea: store proper code pointers in the optimized code
- // map and either unmangle them on marking or do nothing as the whole map is
- // discarded on major GC anyway.
- Add<HStoreCodeEntry>(js_function, code_object);
- Add<HStoreNamedField>(js_function, HObjectAccess::ForLiteralsPointer(),
- literals);
-
- // Now link a function into a list of optimized functions.
- HValue* optimized_functions_list = Add<HLoadNamedField>(
- native_context, nullptr,
- HObjectAccess::ForContextSlot(Context::OPTIMIZED_FUNCTIONS_LIST));
- Add<HStoreNamedField>(js_function,
- HObjectAccess::ForNextFunctionLinkPointer(),
- optimized_functions_list);
-
- // This store is the only one that should have a write barrier.
- Add<HStoreNamedField>(native_context,
- HObjectAccess::ForContextSlot(Context::OPTIMIZED_FUNCTIONS_LIST),
- js_function);
-}
-
-
-void CodeStubGraphBuilderBase::BuildInstallCode(HValue* js_function,
- HValue* shared_info) {
- Add<HStoreNamedField>(js_function,
- HObjectAccess::ForNextFunctionLinkPointer(),
- graph()->GetConstantUndefined());
- HValue* code_object = Add<HLoadNamedField>(shared_info, nullptr,
- HObjectAccess::ForCodeOffset());
- Add<HStoreCodeEntry>(js_function, code_object);
-}
-
-
-HInstruction* CodeStubGraphBuilderBase::LoadFromOptimizedCodeMap(
- HValue* optimized_map,
- HValue* iterator,
- int field_offset) {
- // By making sure to express these loads in the form [<hvalue> + constant]
- // the keyed load can be hoisted.
- DCHECK(field_offset >= 0 && field_offset < SharedFunctionInfo::kEntryLength);
- HValue* field_slot = iterator;
- if (field_offset > 0) {
- HValue* field_offset_value = Add<HConstant>(field_offset);
- field_slot = AddUncasted<HAdd>(iterator, field_offset_value);
- }
- HInstruction* field_entry = Add<HLoadKeyed>(optimized_map, field_slot,
- nullptr, nullptr, FAST_ELEMENTS);
- return field_entry;
-}
-
-
-void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
- HValue* js_function,
- HValue* shared_info,
- HValue* native_context) {
- Counters* counters = isolate()->counters();
- Factory* factory = isolate()->factory();
- IfBuilder is_optimized(this);
- HInstruction* optimized_map = Add<HLoadNamedField>(
- shared_info, nullptr, HObjectAccess::ForOptimizedCodeMap());
- HValue* null_constant = Add<HConstant>(0);
- is_optimized.If<HCompareObjectEqAndBranch>(optimized_map, null_constant);
- is_optimized.Then();
- {
- BuildInstallCode(js_function, shared_info);
- }
- is_optimized.Else();
- {
- AddIncrementCounter(counters->fast_new_closure_try_optimized());
- // The {optimized_map} points to fixed array of 4-element entries:
- // (native context, optimized code, literals, ast-id).
- // Iterate through the {optimized_map} backwards. After the loop, if no
- // matching optimized code was found, install unoptimized code.
- // for(i = map.length() - SharedFunctionInfo::kEntryLength;
- // i >= SharedFunctionInfo::kEntriesStart;
- // i -= SharedFunctionInfo::kEntryLength) { ... }
- HValue* first_entry_index =
- Add<HConstant>(SharedFunctionInfo::kEntriesStart);
- HValue* shared_function_entry_length =
- Add<HConstant>(SharedFunctionInfo::kEntryLength);
- LoopBuilder loop_builder(this, context(), LoopBuilder::kPostDecrement,
- shared_function_entry_length);
- HValue* array_length = Add<HLoadNamedField>(
- optimized_map, nullptr, HObjectAccess::ForFixedArrayLength());
- HValue* start_pos =
- AddUncasted<HSub>(array_length, shared_function_entry_length);
- HValue* slot_iterator =
- loop_builder.BeginBody(start_pos, first_entry_index, Token::GTE);
- {
- IfBuilder done_check(this);
- BuildCheckAndInstallOptimizedCode(js_function, native_context,
- &done_check, optimized_map,
- slot_iterator);
- // Fall out of the loop
- loop_builder.Break();
- }
- loop_builder.EndBody();
-
- // If {slot_iterator} is less than the first entry index, then we failed to
- // find a context-dependent code and try context-independent code next.
- IfBuilder no_optimized_code_check(this);
- no_optimized_code_check.If<HCompareNumericAndBranch>(
- slot_iterator, first_entry_index, Token::LT);
- no_optimized_code_check.Then();
- {
- IfBuilder shared_code_check(this);
- HValue* shared_code =
- Add<HLoadNamedField>(optimized_map, nullptr,
- HObjectAccess::ForOptimizedCodeMapSharedCode());
- shared_code = Add<HLoadNamedField>(shared_code, nullptr,
- HObjectAccess::ForWeakCellValue());
- shared_code_check.IfNot<HCompareObjectEqAndBranch>(
- shared_code, graph()->GetConstant0());
- shared_code_check.Then();
- {
- // Store the context-independent optimized code.
- HValue* literals = Add<HConstant>(factory->empty_fixed_array());
- BuildInstallOptimizedCode(js_function, native_context, shared_code,
- literals);
- }
- shared_code_check.Else();
- {
- // Store the unoptimized code.
- BuildInstallCode(js_function, shared_info);
- }
- }
- }
-}
-
-
-template<>
-HValue* CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() {
- Counters* counters = isolate()->counters();
- Factory* factory = isolate()->factory();
- HInstruction* empty_fixed_array =
- Add<HConstant>(factory->empty_fixed_array());
- HValue* shared_info = GetParameter(0);
-
- AddIncrementCounter(counters->fast_new_closure_total());
-
- // Create a new closure from the given function info in new space
- HValue* size = Add<HConstant>(JSFunction::kSize);
- HInstruction* js_function =
- Add<HAllocate>(size, HType::JSObject(), NOT_TENURED, JS_FUNCTION_TYPE);
-
- int map_index = Context::FunctionMapIndex(casted_stub()->language_mode(),
- casted_stub()->kind());
-
- // Compute the function map in the current native context and set that
- // as the map of the allocated object.
- HInstruction* native_context = BuildGetNativeContext();
- HInstruction* map_slot_value = Add<HLoadNamedField>(
- native_context, nullptr, HObjectAccess::ForContextSlot(map_index));
- Add<HStoreNamedField>(js_function, HObjectAccess::ForMap(), map_slot_value);
-
- // Initialize the rest of the function.
- Add<HStoreNamedField>(js_function, HObjectAccess::ForPropertiesPointer(),
- empty_fixed_array);
- Add<HStoreNamedField>(js_function, HObjectAccess::ForElementsPointer(),
- empty_fixed_array);
- Add<HStoreNamedField>(js_function, HObjectAccess::ForLiteralsPointer(),
- empty_fixed_array);
- Add<HStoreNamedField>(js_function, HObjectAccess::ForPrototypeOrInitialMap(),
- graph()->GetConstantHole());
- Add<HStoreNamedField>(
- js_function, HObjectAccess::ForSharedFunctionInfoPointer(), shared_info);
- Add<HStoreNamedField>(js_function, HObjectAccess::ForFunctionContextPointer(),
- context());
-
- // Initialize the code pointer in the function to be the one found in the
- // shared function info object. But first check if there is an optimized
- // version for our context.
- BuildInstallFromOptimizedCodeMap(js_function, shared_info, native_context);
-
- return js_function;
-}
-
-
-Handle<Code> FastNewClosureStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
-template<>
-HValue* CodeStubGraphBuilder<FastNewContextStub>::BuildCodeStub() {
- int length = casted_stub()->slots() + Context::MIN_CONTEXT_SLOTS;
-
- // Get the function.
- HParameter* function = GetParameter(FastNewContextStub::kFunction);
-
- // Allocate the context in new space.
- HAllocate* function_context = Add<HAllocate>(
- Add<HConstant>(length * kPointerSize + FixedArray::kHeaderSize),
- HType::HeapObject(), NOT_TENURED, FIXED_ARRAY_TYPE);
-
- // Set up the object header.
- AddStoreMapConstant(function_context,
- isolate()->factory()->function_context_map());
- Add<HStoreNamedField>(function_context,
- HObjectAccess::ForFixedArrayLength(),
- Add<HConstant>(length));
-
- // Set up the fixed slots.
- Add<HStoreNamedField>(function_context,
- HObjectAccess::ForContextSlot(Context::CLOSURE_INDEX),
- function);
- Add<HStoreNamedField>(function_context,
- HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX),
- context());
- Add<HStoreNamedField>(function_context,
- HObjectAccess::ForContextSlot(Context::EXTENSION_INDEX),
- graph()->GetConstantHole());
-
- // Copy the native context from the previous context.
- HValue* native_context = Add<HLoadNamedField>(
- context(), nullptr,
- HObjectAccess::ForContextSlot(Context::NATIVE_CONTEXT_INDEX));
- Add<HStoreNamedField>(function_context, HObjectAccess::ForContextSlot(
- Context::NATIVE_CONTEXT_INDEX),
- native_context);
-
- // Initialize the rest of the slots to undefined.
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; ++i) {
- Add<HStoreNamedField>(function_context,
- HObjectAccess::ForContextSlot(i),
- graph()->GetConstantUndefined());
- }
-
- return function_context;
-}
-
-
-Handle<Code> FastNewContextStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
-template <>
HValue* CodeStubGraphBuilder<LoadDictionaryElementStub>::BuildCodeStub() {
- HValue* receiver = GetParameter(LoadDescriptor::kReceiverIndex);
- HValue* key = GetParameter(LoadDescriptor::kNameIndex);
+ HValue* receiver = GetParameter(Descriptor::kReceiver);
+ HValue* key = GetParameter(Descriptor::kName);
Add<HCheckSmi>(key);
@@ -2322,11 +1681,16 @@ Handle<Code> LoadDictionaryElementStub::GenerateCode() {
template<>
HValue* CodeStubGraphBuilder<RegExpConstructResultStub>::BuildCodeStub() {
// Determine the parameters.
- HValue* length = GetParameter(RegExpConstructResultStub::kLength);
- HValue* index = GetParameter(RegExpConstructResultStub::kIndex);
- HValue* input = GetParameter(RegExpConstructResultStub::kInput);
-
+ HValue* length = GetParameter(Descriptor::kLength);
+ HValue* index = GetParameter(Descriptor::kIndex);
+ HValue* input = GetParameter(Descriptor::kInput);
+
+ // TODO(turbofan): This codestub has regressed to need a frame on ia32 at some
+ // point and wasn't caught since it wasn't built in the snapshot. We should
+ // probably just replace with a TurboFan stub rather than fixing it.
+#if !(V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87)
info()->MarkMustNotHaveEagerFrame();
+#endif
return BuildRegExpConstructResult(length, index, input);
}
@@ -2344,6 +1708,8 @@ class CodeStubGraphBuilder<KeyedLoadGenericStub>
explicit CodeStubGraphBuilder(CompilationInfo* info, CodeStub* stub)
: CodeStubGraphBuilderBase(info, stub) {}
+ typedef KeyedLoadGenericStub::Descriptor Descriptor;
+
protected:
virtual HValue* BuildCodeStub();
@@ -2399,8 +1765,8 @@ void CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildFastElementLoad(
HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
- HValue* receiver = GetParameter(LoadDescriptor::kReceiverIndex);
- HValue* key = GetParameter(LoadDescriptor::kNameIndex);
+ HValue* receiver = GetParameter(Descriptor::kReceiver);
+ HValue* key = GetParameter(Descriptor::kName);
// Split into a smi/integer case and unique string case.
HIfContinuation index_name_split_continuation(graph()->CreateBasicBlock(),
graph()->CreateBasicBlock());
@@ -2454,12 +1820,12 @@ HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
BuildElementsKindLimitCheck(&kind_if, bit_field2,
SLOW_SLOPPY_ARGUMENTS_ELEMENTS);
// Non-strict elements are not handled.
- Add<HDeoptimize>(Deoptimizer::kNonStrictElementsInKeyedLoadGenericStub,
+ Add<HDeoptimize>(DeoptimizeReason::kNonStrictElementsInKeyedLoadGenericStub,
Deoptimizer::EAGER);
Push(graph()->GetConstant0());
kind_if.ElseDeopt(
- Deoptimizer::kElementsKindUnhandledInKeyedLoadGenericStub);
+ DeoptimizeReason::kElementsKindUnhandledInKeyedLoadGenericStub);
kind_if.End();
}
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 60b350cd93..2b71716dc3 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -8,14 +8,13 @@
#include "src/bootstrapper.h"
#include "src/code-factory.h"
-#include "src/compiler/code-stub-assembler.h"
+#include "src/code-stub-assembler.h"
#include "src/factory.h"
#include "src/gdb-jit.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/macro-assembler.h"
#include "src/parsing/parser.h"
-#include "src/profiler/cpu-profiler.h"
namespace v8 {
namespace internal {
@@ -26,9 +25,9 @@ RUNTIME_FUNCTION(UnexpectedStubMiss) {
return Smi::FromInt(0);
}
-
CodeStubDescriptor::CodeStubDescriptor(CodeStub* stub)
- : call_descriptor_(stub->GetCallInterfaceDescriptor()),
+ : isolate_(stub->isolate()),
+ call_descriptor_(stub->GetCallInterfaceDescriptor()),
stack_parameter_count_(no_reg),
hint_stack_parameter_count_(-1),
function_mode_(NOT_JS_FUNCTION_STUB_MODE),
@@ -38,9 +37,9 @@ CodeStubDescriptor::CodeStubDescriptor(CodeStub* stub)
stub->InitializeDescriptor(this);
}
-
CodeStubDescriptor::CodeStubDescriptor(Isolate* isolate, uint32_t stub_key)
- : stack_parameter_count_(no_reg),
+ : isolate_(isolate),
+ stack_parameter_count_(no_reg),
hint_stack_parameter_count_(-1),
function_mode_(NOT_JS_FUNCTION_STUB_MODE),
deoptimization_handler_(NULL),
@@ -83,8 +82,8 @@ void CodeStub::RecordCodeGeneration(Handle<Code> code) {
std::ostringstream os;
os << *this;
PROFILE(isolate(),
- CodeCreateEvent(Logger::STUB_TAG, AbstractCode::cast(*code),
- os.str().c_str()));
+ CodeCreateEvent(CodeEventListener::STUB_TAG,
+ AbstractCode::cast(*code), os.str().c_str()));
Counters* counters = isolate()->counters();
counters->total_stubs_code_size()->Increment(code->instruction_size());
#ifdef DEBUG
@@ -99,8 +98,7 @@ Code::Kind CodeStub::GetCodeKind() const {
Code::Flags CodeStub::GetCodeFlags() const {
- return Code::ComputeFlags(GetCodeKind(), GetICState(), GetExtraICState(),
- GetStubType());
+ return Code::ComputeFlags(GetCodeKind(), GetExtraICState());
}
@@ -135,11 +133,7 @@ Handle<Code> PlatformCodeStub::GenerateCode() {
CodeDesc desc;
masm.GetCode(&desc);
// Copy the generated code into a heap object.
- Code::Flags flags = Code::ComputeFlags(
- GetCodeKind(),
- GetICState(),
- GetExtraICState(),
- GetStubType());
+ Code::Flags flags = Code::ComputeFlags(GetCodeKind(), GetExtraICState());
Handle<Code> new_object = factory->NewCode(
desc, flags, masm.CodeObject(), NeedsImmovableCode());
return new_object;
@@ -276,6 +270,7 @@ MaybeHandle<Code> CodeStub::GetCode(Isolate* isolate, uint32_t key) {
// static
void BinaryOpICStub::GenerateAheadOfTime(Isolate* isolate) {
+ if (FLAG_minimal) return;
// Generate the uninitialized versions of the stub.
for (int op = Token::BIT_OR; op <= Token::MOD; ++op) {
BinaryOpICStub stub(isolate, static_cast<Token::Value>(op));
@@ -295,6 +290,7 @@ void BinaryOpICStub::PrintState(std::ostream& os) const { // NOLINT
// static
void BinaryOpICStub::GenerateAheadOfTime(Isolate* isolate,
const BinaryOpICState& state) {
+ if (FLAG_minimal) return;
BinaryOpICStub stub(isolate, state);
stub.GetCode();
}
@@ -377,45 +373,6 @@ Condition CompareICStub::GetCondition() const {
}
-void CompareICStub::AddToSpecialCache(Handle<Code> new_object) {
- DCHECK(*known_map_ != NULL);
- Isolate* isolate = new_object->GetIsolate();
- Factory* factory = isolate->factory();
- return Map::UpdateCodeCache(known_map_,
- strict() ?
- factory->strict_compare_ic_string() :
- factory->compare_ic_string(),
- new_object);
-}
-
-
-bool CompareICStub::FindCodeInSpecialCache(Code** code_out) {
- Factory* factory = isolate()->factory();
- Code::Flags flags = Code::ComputeFlags(
- GetCodeKind(),
- UNINITIALIZED);
- Handle<Object> probe(
- known_map_->FindInCodeCache(
- strict() ?
- *factory->strict_compare_ic_string() :
- *factory->compare_ic_string(),
- flags),
- isolate());
- if (probe->IsCode()) {
- *code_out = Code::cast(*probe);
-#ifdef DEBUG
- CompareICStub decode((*code_out)->stub_key(), isolate());
- DCHECK(op() == decode.op());
- DCHECK(left() == decode.left());
- DCHECK(right() == decode.right());
- DCHECK(state() == decode.state());
-#endif
- return true;
- }
- return false;
-}
-
-
void CompareICStub::Generate(MacroAssembler* masm) {
switch (state()) {
case CompareICState::UNINITIALIZED:
@@ -452,44 +409,109 @@ void CompareICStub::Generate(MacroAssembler* masm) {
}
}
-
Handle<Code> TurboFanCodeStub::GenerateCode() {
const char* name = CodeStub::MajorName(MajorKey());
Zone zone(isolate()->allocator());
CallInterfaceDescriptor descriptor(GetCallInterfaceDescriptor());
- compiler::CodeStubAssembler assembler(isolate(), &zone, descriptor,
- GetCodeFlags(), name);
+ CodeStubAssembler assembler(isolate(), &zone, descriptor, GetCodeFlags(),
+ name);
GenerateAssembly(&assembler);
return assembler.GenerateCode();
}
-void AllocateHeapNumberStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
+void LoadICTrampolineTFStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
typedef compiler::Node Node;
- Node* result = assembler->AllocateHeapNumber();
- assembler->Return(result);
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* name = assembler->Parameter(Descriptor::kName);
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* vector = assembler->LoadTypeFeedbackVectorForStub();
+
+ CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
+ assembler->LoadIC(&p);
}
-void AllocateMutableHeapNumberStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
+void LoadICTFStub::GenerateAssembly(CodeStubAssembler* assembler) const {
typedef compiler::Node Node;
- Node* result = assembler->Allocate(HeapNumber::kSize);
- assembler->StoreMapNoWriteBarrier(
- result,
- assembler->HeapConstant(isolate()->factory()->mutable_heap_number_map()));
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* name = assembler->Parameter(Descriptor::kName);
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* vector = assembler->Parameter(Descriptor::kVector);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
+ assembler->LoadIC(&p);
+}
+
+void LoadGlobalICTrampolineStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* vector = assembler->LoadTypeFeedbackVectorForStub();
+
+ CodeStubAssembler::LoadICParameters p(context, nullptr, nullptr, slot,
+ vector);
+ assembler->LoadGlobalIC(&p);
+}
+
+void LoadGlobalICStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* vector = assembler->Parameter(Descriptor::kVector);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ CodeStubAssembler::LoadICParameters p(context, nullptr, nullptr, slot,
+ vector);
+ assembler->LoadGlobalIC(&p);
+}
+
+void KeyedLoadICTrampolineTFStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* name = assembler->Parameter(Descriptor::kName);
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* vector = assembler->LoadTypeFeedbackVectorForStub();
+
+ CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
+ assembler->KeyedLoadIC(&p);
+}
+
+void KeyedLoadICTFStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* name = assembler->Parameter(Descriptor::kName);
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* vector = assembler->Parameter(Descriptor::kVector);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
+ assembler->KeyedLoadIC(&p);
+}
+
+void AllocateHeapNumberStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+
+ Node* result = assembler->AllocateHeapNumber();
assembler->Return(result);
}
#define SIMD128_GEN_ASM(TYPE, Type, type, lane_count, lane_type) \
- void Allocate##Type##Stub::GenerateAssembly( \
- compiler::CodeStubAssembler* assembler) const { \
- compiler::Node* result = assembler->Allocate( \
- Simd128Value::kSize, compiler::CodeStubAssembler::kNone); \
- compiler::Node* map_offset = \
- assembler->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag); \
- compiler::Node* map = assembler->IntPtrAdd(result, map_offset); \
+ void Allocate##Type##Stub::GenerateAssembly(CodeStubAssembler* assembler) \
+ const { \
+ compiler::Node* result = \
+ assembler->Allocate(Simd128Value::kSize, CodeStubAssembler::kNone); \
+ compiler::Node* map = assembler->LoadMap(result); \
assembler->StoreNoWriteBarrier( \
MachineRepresentation::kTagged, map, \
assembler->HeapConstant(isolate()->factory()->type##_map())); \
@@ -498,22 +520,20 @@ void AllocateMutableHeapNumberStub::GenerateAssembly(
SIMD128_TYPES(SIMD128_GEN_ASM)
#undef SIMD128_GEN_ASM
-void StringLengthStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
+void StringLengthStub::GenerateAssembly(CodeStubAssembler* assembler) const {
compiler::Node* value = assembler->Parameter(0);
- compiler::Node* string =
- assembler->LoadObjectField(value, JSValue::kValueOffset);
- compiler::Node* result =
- assembler->LoadObjectField(string, String::kLengthOffset);
+ compiler::Node* string = assembler->LoadJSValueValue(value);
+ compiler::Node* result = assembler->LoadStringLength(string);
assembler->Return(result);
}
-void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
- typedef compiler::CodeStubAssembler::Label Label;
+// static
+compiler::Node* AddStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* left, compiler::Node* right,
+ compiler::Node* context) {
+ typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
- typedef compiler::CodeStubAssembler::Variable Variable;
-
- Node* context = assembler->Parameter(2);
+ typedef CodeStubAssembler::Variable Variable;
// Shared entry for floating point addition.
Label do_fadd(assembler);
@@ -523,11 +543,14 @@ void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
// We might need to loop several times due to ToPrimitive, ToString and/or
// ToNumber conversions.
Variable var_lhs(assembler, MachineRepresentation::kTagged),
- var_rhs(assembler, MachineRepresentation::kTagged);
+ var_rhs(assembler, MachineRepresentation::kTagged),
+ var_result(assembler, MachineRepresentation::kTagged);
Variable* loop_vars[2] = {&var_lhs, &var_rhs};
- Label loop(assembler, 2, loop_vars);
- var_lhs.Bind(assembler->Parameter(0));
- var_rhs.Bind(assembler->Parameter(1));
+ Label loop(assembler, 2, loop_vars), end(assembler),
+ string_add_convert_left(assembler, Label::kDeferred),
+ string_add_convert_right(assembler, Label::kDeferred);
+ var_lhs.Bind(left);
+ var_rhs.Bind(right);
assembler->Goto(&loop);
assembler->Bind(&loop);
{
@@ -564,13 +587,14 @@ void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
}
assembler->Bind(&if_notoverflow);
- assembler->Return(assembler->Projection(0, pair));
+ var_result.Bind(assembler->Projection(0, pair));
+ assembler->Goto(&end);
}
assembler->Bind(&if_rhsisnotsmi);
{
// Load the map of {rhs}.
- Node* rhs_map = assembler->LoadObjectField(rhs, HeapObject::kMapOffset);
+ Node* rhs_map = assembler->LoadMap(rhs);
// Check if the {rhs} is a HeapNumber.
Label if_rhsisnumber(assembler),
@@ -601,11 +625,9 @@ void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
assembler->Bind(&if_rhsisstring);
{
- // Convert {lhs}, which is a Smi, to a String and concatenate the
- // resulting string with the String {rhs}.
- Callable callable = CodeFactory::StringAdd(
- assembler->isolate(), STRING_ADD_CONVERT_LEFT, NOT_TENURED);
- assembler->TailCallStub(callable, context, lhs, rhs);
+ var_lhs.Bind(lhs);
+ var_rhs.Bind(rhs);
+ assembler->Goto(&string_add_convert_left);
}
assembler->Bind(&if_rhsisnotstring);
@@ -622,9 +644,9 @@ void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
assembler->Bind(&if_rhsisreceiver);
{
// Convert {rhs} to a primitive first passing no hint.
- // TODO(bmeurer): Hook up ToPrimitiveStub here, once it's there.
- var_rhs.Bind(
- assembler->CallRuntime(Runtime::kToPrimitive, context, rhs));
+ Callable callable =
+ CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
+ var_rhs.Bind(assembler->CallStub(callable, context, rhs));
assembler->Goto(&loop);
}
@@ -655,11 +677,9 @@ void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
assembler->Bind(&if_lhsisstring);
{
- // Convert {rhs} to a String (using the sequence of ToPrimitive with
- // no hint followed by ToString) and concatenate the strings.
- Callable callable = CodeFactory::StringAdd(
- assembler->isolate(), STRING_ADD_CONVERT_RIGHT, NOT_TENURED);
- assembler->TailCallStub(callable, context, lhs, rhs);
+ var_lhs.Bind(lhs);
+ var_rhs.Bind(rhs);
+ assembler->Goto(&string_add_convert_right);
}
assembler->Bind(&if_lhsisnotstring);
@@ -702,9 +722,9 @@ void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
assembler->Bind(&if_lhsisreceiver);
{
// Convert {lhs} to a primitive first passing no hint.
- // TODO(bmeurer): Hook up ToPrimitiveStub here, once it's there.
- var_lhs.Bind(
- assembler->CallRuntime(Runtime::kToPrimitive, context, lhs));
+ Callable callable =
+ CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
+ var_lhs.Bind(assembler->CallStub(callable, context, lhs));
assembler->Goto(&loop);
}
@@ -733,11 +753,9 @@ void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
assembler->Bind(&if_rhsisstring);
{
- // Convert {lhs} to a String (using the sequence of ToPrimitive with
- // no hint followed by ToString) and concatenate the strings.
- Callable callable = CodeFactory::StringAdd(
- assembler->isolate(), STRING_ADD_CONVERT_LEFT, NOT_TENURED);
- assembler->TailCallStub(callable, context, lhs, rhs);
+ var_lhs.Bind(lhs);
+ var_rhs.Bind(rhs);
+ assembler->Goto(&string_add_convert_left);
}
assembler->Bind(&if_rhsisnotstring);
@@ -781,9 +799,9 @@ void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
assembler->Bind(&if_rhsisreceiver);
{
// Convert {rhs} to a primitive first passing no hint.
- // TODO(bmeurer): Hook up ToPrimitiveStub here too.
- var_rhs.Bind(assembler->CallRuntime(Runtime::kToPrimitive,
- context, rhs));
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(
+ assembler->isolate());
+ var_rhs.Bind(assembler->CallStub(callable, context, rhs));
assembler->Goto(&loop);
}
@@ -812,9 +830,9 @@ void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
assembler->Bind(&if_lhsisreceiver);
{
// Convert {lhs} to a primitive first passing no hint.
- // TODO(bmeurer): Hook up ToPrimitiveStub here, once it's there.
- var_lhs.Bind(assembler->CallRuntime(Runtime::kToPrimitive,
- context, lhs));
+ Callable callable =
+ CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
+ var_lhs.Bind(assembler->CallStub(callable, context, lhs));
assembler->Goto(&loop);
}
@@ -832,9 +850,9 @@ void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
assembler->Bind(&if_rhsisreceiver);
{
// Convert {rhs} to a primitive first passing no hint.
- // TODO(bmeurer): Hook up ToPrimitiveStub here too.
- var_rhs.Bind(assembler->CallRuntime(Runtime::kToPrimitive,
- context, rhs));
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(
+ assembler->isolate());
+ var_rhs.Bind(assembler->CallStub(callable, context, rhs));
assembler->Goto(&loop);
}
@@ -853,6 +871,27 @@ void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
}
}
}
+ assembler->Bind(&string_add_convert_left);
+ {
+ // Convert {lhs}, which is a Smi, to a String and concatenate the
+ // resulting string with the String {rhs}.
+ Callable callable = CodeFactory::StringAdd(
+ assembler->isolate(), STRING_ADD_CONVERT_LEFT, NOT_TENURED);
+ var_result.Bind(assembler->CallStub(callable, context, var_lhs.value(),
+ var_rhs.value()));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&string_add_convert_right);
+ {
+ // Convert {lhs}, which is a Smi, to a String and concatenate the
+ // resulting string with the String {rhs}.
+ Callable callable = CodeFactory::StringAdd(
+ assembler->isolate(), STRING_ADD_CONVERT_RIGHT, NOT_TENURED);
+ var_result.Bind(assembler->CallStub(callable, context, var_lhs.value(),
+ var_rhs.value()));
+ assembler->Goto(&end);
+ }
assembler->Bind(&do_fadd);
{
@@ -860,31 +899,169 @@ void AddStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
Node* rhs_value = var_fadd_rhs.value();
Node* value = assembler->Float64Add(lhs_value, rhs_value);
Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
+ var_result.Bind(result);
+ assembler->Goto(&end);
}
+ assembler->Bind(&end);
+ return var_result.value();
}
-void SubtractStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
- typedef compiler::CodeStubAssembler::Label Label;
+// static
+compiler::Node* AddWithFeedbackStub::Generate(
+ CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* rhs,
+ compiler::Node* slot_id, compiler::Node* type_feedback_vector,
+ compiler::Node* context) {
+ typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
- typedef compiler::CodeStubAssembler::Variable Variable;
+ typedef CodeStubAssembler::Variable Variable;
- Node* context = assembler->Parameter(2);
+ // Shared entry for floating point addition.
+ Label do_fadd(assembler), end(assembler),
+ call_add_stub(assembler, Label::kDeferred);
+ Variable var_fadd_lhs(assembler, MachineRepresentation::kFloat64),
+ var_fadd_rhs(assembler, MachineRepresentation::kFloat64),
+ var_type_feedback(assembler, MachineRepresentation::kWord32),
+ var_result(assembler, MachineRepresentation::kTagged);
+
+ // Check if the {lhs} is a Smi or a HeapObject.
+ Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+
+ assembler->Bind(&if_lhsissmi);
+ {
+ // Check if the {rhs} is also a Smi.
+ Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+ assembler->Bind(&if_rhsissmi);
+ {
+ // Try fast Smi addition first.
+ Node* pair = assembler->SmiAddWithOverflow(lhs, rhs);
+ Node* overflow = assembler->Projection(1, pair);
+
+ // Check if the Smi additon overflowed.
+ Label if_overflow(assembler), if_notoverflow(assembler);
+ assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+
+ assembler->Bind(&if_overflow);
+ {
+ var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
+ var_fadd_rhs.Bind(assembler->SmiToFloat64(rhs));
+ assembler->Goto(&do_fadd);
+ }
+
+ assembler->Bind(&if_notoverflow);
+ {
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall));
+ var_result.Bind(assembler->Projection(0, pair));
+ assembler->Goto(&end);
+ }
+ }
+
+ assembler->Bind(&if_rhsisnotsmi);
+ {
+ // Load the map of {rhs}.
+ Node* rhs_map = assembler->LoadMap(rhs);
+
+ // Check if the {rhs} is a HeapNumber.
+ assembler->GotoUnless(
+ assembler->WordEqual(rhs_map, assembler->HeapNumberMapConstant()),
+ &call_add_stub);
+
+ var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
+ var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+ assembler->Goto(&do_fadd);
+ }
+ }
+
+ assembler->Bind(&if_lhsisnotsmi);
+ {
+ // Load the map of {lhs}.
+ Node* lhs_map = assembler->LoadMap(lhs);
+
+ // Check if {lhs} is a HeapNumber.
+ Label if_lhsisnumber(assembler), if_lhsisnotnumber(assembler);
+ assembler->GotoUnless(
+ assembler->WordEqual(lhs_map, assembler->HeapNumberMapConstant()),
+ &call_add_stub);
+
+ // Check if the {rhs} is Smi.
+ Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+ assembler->Bind(&if_rhsissmi);
+ {
+ var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
+ var_fadd_rhs.Bind(assembler->SmiToFloat64(rhs));
+ assembler->Goto(&do_fadd);
+ }
+
+ assembler->Bind(&if_rhsisnotsmi);
+ {
+ // Load the map of {rhs}.
+ Node* rhs_map = assembler->LoadMap(rhs);
+
+ // Check if the {rhs} is a HeapNumber.
+ Node* number_map = assembler->HeapNumberMapConstant();
+ assembler->GotoUnless(assembler->WordEqual(rhs_map, number_map),
+ &call_add_stub);
+
+ var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
+ var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+ assembler->Goto(&do_fadd);
+ }
+ }
+
+ assembler->Bind(&do_fadd);
+ {
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kNumber));
+ Node* value =
+ assembler->Float64Add(var_fadd_lhs.value(), var_fadd_rhs.value());
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ var_result.Bind(result);
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&call_add_stub);
+ {
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ Callable callable = CodeFactory::Add(assembler->isolate());
+ var_result.Bind(assembler->CallStub(callable, context, lhs, rhs));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
+ slot_id);
+ return var_result.value();
+}
+
+// static
+compiler::Node* SubtractStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* left,
+ compiler::Node* right,
+ compiler::Node* context) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
// Shared entry for floating point subtraction.
- Label do_fsub(assembler);
+ Label do_fsub(assembler), end(assembler);
Variable var_fsub_lhs(assembler, MachineRepresentation::kFloat64),
var_fsub_rhs(assembler, MachineRepresentation::kFloat64);
// We might need to loop several times due to ToPrimitive and/or ToNumber
// conversions.
Variable var_lhs(assembler, MachineRepresentation::kTagged),
- var_rhs(assembler, MachineRepresentation::kTagged);
+ var_rhs(assembler, MachineRepresentation::kTagged),
+ var_result(assembler, MachineRepresentation::kTagged);
Variable* loop_vars[2] = {&var_lhs, &var_rhs};
Label loop(assembler, 2, loop_vars);
- var_lhs.Bind(assembler->Parameter(0));
- var_rhs.Bind(assembler->Parameter(1));
+ var_lhs.Bind(left);
+ var_rhs.Bind(right);
assembler->Goto(&loop);
assembler->Bind(&loop);
{
@@ -922,7 +1099,8 @@ void SubtractStub::GenerateAssembly(
}
assembler->Bind(&if_notoverflow);
- assembler->Return(assembler->Projection(0, pair));
+ var_result.Bind(assembler->Projection(0, pair));
+ assembler->Goto(&end);
}
assembler->Bind(&if_rhsisnotsmi);
@@ -948,7 +1126,8 @@ void SubtractStub::GenerateAssembly(
assembler->Bind(&if_rhsisnotnumber);
{
// Convert the {rhs} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
var_rhs.Bind(assembler->CallStub(callable, context, rhs));
assembler->Goto(&loop);
}
@@ -1004,7 +1183,8 @@ void SubtractStub::GenerateAssembly(
assembler->Bind(&if_rhsisnotnumber);
{
// Convert the {rhs} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
var_rhs.Bind(assembler->CallStub(callable, context, rhs));
assembler->Goto(&loop);
}
@@ -1014,7 +1194,8 @@ void SubtractStub::GenerateAssembly(
assembler->Bind(&if_lhsisnotnumber);
{
// Convert the {lhs} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
var_lhs.Bind(assembler->CallStub(callable, context, lhs));
assembler->Goto(&loop);
}
@@ -1026,51 +1207,1626 @@ void SubtractStub::GenerateAssembly(
Node* lhs_value = var_fsub_lhs.value();
Node* rhs_value = var_fsub_rhs.value();
Node* value = assembler->Float64Sub(lhs_value, rhs_value);
+ var_result.Bind(assembler->ChangeFloat64ToTagged(value));
+ assembler->Goto(&end);
+ }
+ assembler->Bind(&end);
+ return var_result.value();
+}
+
+// static
+compiler::Node* SubtractWithFeedbackStub::Generate(
+ CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* rhs,
+ compiler::Node* slot_id, compiler::Node* type_feedback_vector,
+ compiler::Node* context) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ // Shared entry for floating point subtraction.
+ Label do_fsub(assembler), end(assembler),
+ call_subtract_stub(assembler, Label::kDeferred);
+ Variable var_fsub_lhs(assembler, MachineRepresentation::kFloat64),
+ var_fsub_rhs(assembler, MachineRepresentation::kFloat64),
+ var_type_feedback(assembler, MachineRepresentation::kWord32),
+ var_result(assembler, MachineRepresentation::kTagged);
+
+ // Check if the {lhs} is a Smi or a HeapObject.
+ Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+
+ assembler->Bind(&if_lhsissmi);
+ {
+ // Check if the {rhs} is also a Smi.
+ Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+ assembler->Bind(&if_rhsissmi);
+ {
+ // Try a fast Smi subtraction first.
+ Node* pair = assembler->SmiSubWithOverflow(lhs, rhs);
+ Node* overflow = assembler->Projection(1, pair);
+
+ // Check if the Smi subtraction overflowed.
+ Label if_overflow(assembler), if_notoverflow(assembler);
+ assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+
+ assembler->Bind(&if_overflow);
+ {
+ // lhs, rhs - smi and result - number. combined - number.
+ // The result doesn't fit into Smi range.
+ var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
+ var_fsub_rhs.Bind(assembler->SmiToFloat64(rhs));
+ assembler->Goto(&do_fsub);
+ }
+
+ assembler->Bind(&if_notoverflow);
+ // lhs, rhs, result smi. combined - smi.
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall));
+ var_result.Bind(assembler->Projection(0, pair));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&if_rhsisnotsmi);
+ {
+ // Load the map of the {rhs}.
+ Node* rhs_map = assembler->LoadMap(rhs);
+
+ // Check if {rhs} is a HeapNumber.
+ assembler->GotoUnless(
+ assembler->WordEqual(rhs_map, assembler->HeapNumberMapConstant()),
+ &call_subtract_stub);
+
+ // Perform a floating point subtraction.
+ var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
+ var_fsub_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+ assembler->Goto(&do_fsub);
+ }
+ }
+
+ assembler->Bind(&if_lhsisnotsmi);
+ {
+ // Load the map of the {lhs}.
+ Node* lhs_map = assembler->LoadMap(lhs);
+
+ // Check if the {lhs} is a HeapNumber.
+ assembler->GotoUnless(
+ assembler->WordEqual(lhs_map, assembler->HeapNumberMapConstant()),
+ &call_subtract_stub);
+
+ // Check if the {rhs} is a Smi.
+ Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+ assembler->Bind(&if_rhsissmi);
+ {
+ // Perform a floating point subtraction.
+ var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
+ var_fsub_rhs.Bind(assembler->SmiToFloat64(rhs));
+ assembler->Goto(&do_fsub);
+ }
+
+ assembler->Bind(&if_rhsisnotsmi);
+ {
+ // Load the map of the {rhs}.
+ Node* rhs_map = assembler->LoadMap(rhs);
+
+ // Check if the {rhs} is a HeapNumber.
+ assembler->GotoUnless(
+ assembler->WordEqual(rhs_map, assembler->HeapNumberMapConstant()),
+ &call_subtract_stub);
+
+ // Perform a floating point subtraction.
+ var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
+ var_fsub_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+ assembler->Goto(&do_fsub);
+ }
+ }
+
+ assembler->Bind(&do_fsub);
+ {
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kNumber));
+ Node* lhs_value = var_fsub_lhs.value();
+ Node* rhs_value = var_fsub_rhs.value();
+ Node* value = assembler->Float64Sub(lhs_value, rhs_value);
+ var_result.Bind(assembler->ChangeFloat64ToTagged(value));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&call_subtract_stub);
+ {
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ Callable callable = CodeFactory::Subtract(assembler->isolate());
+ var_result.Bind(assembler->CallStub(callable, context, lhs, rhs));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
+ slot_id);
+ return var_result.value();
+}
+
+// static
+compiler::Node* MultiplyStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* left,
+ compiler::Node* right,
+ compiler::Node* context) {
+ using compiler::Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ // Shared entry point for floating point multiplication.
+ Label do_fmul(assembler), return_result(assembler);
+ Variable var_lhs_float64(assembler, MachineRepresentation::kFloat64),
+ var_rhs_float64(assembler, MachineRepresentation::kFloat64);
+
+ Node* number_map = assembler->HeapNumberMapConstant();
+
+ // We might need to loop one or two times due to ToNumber conversions.
+ Variable var_lhs(assembler, MachineRepresentation::kTagged),
+ var_rhs(assembler, MachineRepresentation::kTagged),
+ var_result(assembler, MachineRepresentation::kTagged);
+ Variable* loop_variables[] = {&var_lhs, &var_rhs};
+ Label loop(assembler, 2, loop_variables);
+ var_lhs.Bind(left);
+ var_rhs.Bind(right);
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ Node* lhs = var_lhs.value();
+ Node* rhs = var_rhs.value();
+
+ Label lhs_is_smi(assembler), lhs_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
+
+ assembler->Bind(&lhs_is_smi);
+ {
+ Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(rhs), &rhs_is_smi,
+ &rhs_is_not_smi);
+
+ assembler->Bind(&rhs_is_smi);
+ {
+ // Both {lhs} and {rhs} are Smis. The result is not necessarily a smi,
+ // in case of overflow.
+ var_result.Bind(assembler->SmiMul(lhs, rhs));
+ assembler->Goto(&return_result);
+ }
+
+ assembler->Bind(&rhs_is_not_smi);
+ {
+ Node* rhs_map = assembler->LoadMap(rhs);
+
+ // Check if {rhs} is a HeapNumber.
+ Label rhs_is_number(assembler),
+ rhs_is_not_number(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(rhs_map, number_map),
+ &rhs_is_number, &rhs_is_not_number);
+
+ assembler->Bind(&rhs_is_number);
+ {
+ // Convert {lhs} to a double and multiply it with the value of {rhs}.
+ var_lhs_float64.Bind(assembler->SmiToFloat64(lhs));
+ var_rhs_float64.Bind(assembler->LoadHeapNumberValue(rhs));
+ assembler->Goto(&do_fmul);
+ }
+
+ assembler->Bind(&rhs_is_not_number);
+ {
+ // Multiplication is commutative, swap {lhs} with {rhs} and loop.
+ var_lhs.Bind(rhs);
+ var_rhs.Bind(lhs);
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&lhs_is_not_smi);
+ {
+ Node* lhs_map = assembler->LoadMap(lhs);
+
+ // Check if {lhs} is a HeapNumber.
+ Label lhs_is_number(assembler),
+ lhs_is_not_number(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(lhs_map, number_map),
+ &lhs_is_number, &lhs_is_not_number);
+
+ assembler->Bind(&lhs_is_number);
+ {
+ // Check if {rhs} is a Smi.
+ Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(rhs), &rhs_is_smi,
+ &rhs_is_not_smi);
+
+ assembler->Bind(&rhs_is_smi);
+ {
+ // Convert {rhs} to a double and multiply it with the value of {lhs}.
+ var_lhs_float64.Bind(assembler->LoadHeapNumberValue(lhs));
+ var_rhs_float64.Bind(assembler->SmiToFloat64(rhs));
+ assembler->Goto(&do_fmul);
+ }
+
+ assembler->Bind(&rhs_is_not_smi);
+ {
+ Node* rhs_map = assembler->LoadMap(rhs);
+
+ // Check if {rhs} is a HeapNumber.
+ Label rhs_is_number(assembler),
+ rhs_is_not_number(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(rhs_map, number_map),
+ &rhs_is_number, &rhs_is_not_number);
+
+ assembler->Bind(&rhs_is_number);
+ {
+ // Both {lhs} and {rhs} are HeapNumbers. Load their values and
+ // multiply them.
+ var_lhs_float64.Bind(assembler->LoadHeapNumberValue(lhs));
+ var_rhs_float64.Bind(assembler->LoadHeapNumberValue(rhs));
+ assembler->Goto(&do_fmul);
+ }
+
+ assembler->Bind(&rhs_is_not_number);
+ {
+ // Multiplication is commutative, swap {lhs} with {rhs} and loop.
+ var_lhs.Bind(rhs);
+ var_rhs.Bind(lhs);
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&lhs_is_not_number);
+ {
+ // Convert {lhs} to a Number and loop.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_lhs.Bind(assembler->CallStub(callable, context, lhs));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&do_fmul);
+ {
+ Node* value =
+ assembler->Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
+ var_result.Bind(result);
+ assembler->Goto(&return_result);
}
+
+ assembler->Bind(&return_result);
+ return var_result.value();
}
-void BitwiseAndStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
+// static
+compiler::Node* MultiplyWithFeedbackStub::Generate(
+ CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* rhs,
+ compiler::Node* slot_id, compiler::Node* type_feedback_vector,
+ compiler::Node* context) {
using compiler::Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
- Node* lhs = assembler->Parameter(0);
- Node* rhs = assembler->Parameter(1);
- Node* context = assembler->Parameter(2);
- Node* lhs_value = assembler->TruncateTaggedToWord32(context, lhs);
- Node* rhs_value = assembler->TruncateTaggedToWord32(context, rhs);
+ // Shared entry point for floating point multiplication.
+ Label do_fmul(assembler), end(assembler),
+ call_multiply_stub(assembler, Label::kDeferred);
+ Variable var_lhs_float64(assembler, MachineRepresentation::kFloat64),
+ var_rhs_float64(assembler, MachineRepresentation::kFloat64),
+ var_result(assembler, MachineRepresentation::kTagged),
+ var_type_feedback(assembler, MachineRepresentation::kWord32);
+
+ Node* number_map = assembler->HeapNumberMapConstant();
+
+ Label lhs_is_smi(assembler), lhs_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
+
+ assembler->Bind(&lhs_is_smi);
+ {
+ Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(rhs), &rhs_is_smi, &rhs_is_not_smi);
+
+ assembler->Bind(&rhs_is_smi);
+ {
+ // Both {lhs} and {rhs} are Smis. The result is not necessarily a smi,
+ // in case of overflow.
+ var_result.Bind(assembler->SmiMul(lhs, rhs));
+ var_type_feedback.Bind(assembler->Select(
+ assembler->WordIsSmi(var_result.value()),
+ assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall),
+ assembler->Int32Constant(BinaryOperationFeedback::kNumber),
+ MachineRepresentation::kWord32));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&rhs_is_not_smi);
+ {
+ Node* rhs_map = assembler->LoadMap(rhs);
+
+ // Check if {rhs} is a HeapNumber.
+ assembler->GotoUnless(assembler->WordEqual(rhs_map, number_map),
+ &call_multiply_stub);
+
+ // Convert {lhs} to a double and multiply it with the value of {rhs}.
+ var_lhs_float64.Bind(assembler->SmiToFloat64(lhs));
+ var_rhs_float64.Bind(assembler->LoadHeapNumberValue(rhs));
+ assembler->Goto(&do_fmul);
+ }
+ }
+
+ assembler->Bind(&lhs_is_not_smi);
+ {
+ Node* lhs_map = assembler->LoadMap(lhs);
+
+ // Check if {lhs} is a HeapNumber.
+ assembler->GotoUnless(assembler->WordEqual(lhs_map, number_map),
+ &call_multiply_stub);
+
+ // Check if {rhs} is a Smi.
+ Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(rhs), &rhs_is_smi, &rhs_is_not_smi);
+
+ assembler->Bind(&rhs_is_smi);
+ {
+ // Convert {rhs} to a double and multiply it with the value of {lhs}.
+ var_lhs_float64.Bind(assembler->LoadHeapNumberValue(lhs));
+ var_rhs_float64.Bind(assembler->SmiToFloat64(rhs));
+ assembler->Goto(&do_fmul);
+ }
+
+ assembler->Bind(&rhs_is_not_smi);
+ {
+ Node* rhs_map = assembler->LoadMap(rhs);
+
+ // Check if {rhs} is a HeapNumber.
+ assembler->GotoUnless(assembler->WordEqual(rhs_map, number_map),
+ &call_multiply_stub);
+
+ // Both {lhs} and {rhs} are HeapNumbers. Load their values and
+ // multiply them.
+ var_lhs_float64.Bind(assembler->LoadHeapNumberValue(lhs));
+ var_rhs_float64.Bind(assembler->LoadHeapNumberValue(rhs));
+ assembler->Goto(&do_fmul);
+ }
+ }
+
+ assembler->Bind(&do_fmul);
+ {
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kNumber));
+ Node* value =
+ assembler->Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ var_result.Bind(result);
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&call_multiply_stub);
+ {
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ Callable callable = CodeFactory::Multiply(assembler->isolate());
+ var_result.Bind(assembler->CallStub(callable, context, lhs, rhs));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
+ slot_id);
+ return var_result.value();
+}
+
+// static
+compiler::Node* DivideStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* left,
+ compiler::Node* right,
+ compiler::Node* context) {
+ using compiler::Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ // Shared entry point for floating point division.
+ Label do_fdiv(assembler), end(assembler);
+ Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
+ var_divisor_float64(assembler, MachineRepresentation::kFloat64);
+
+ Node* number_map = assembler->HeapNumberMapConstant();
+
+ // We might need to loop one or two times due to ToNumber conversions.
+ Variable var_dividend(assembler, MachineRepresentation::kTagged),
+ var_divisor(assembler, MachineRepresentation::kTagged),
+ var_result(assembler, MachineRepresentation::kTagged);
+ Variable* loop_variables[] = {&var_dividend, &var_divisor};
+ Label loop(assembler, 2, loop_variables);
+ var_dividend.Bind(left);
+ var_divisor.Bind(right);
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ Node* dividend = var_dividend.value();
+ Node* divisor = var_divisor.value();
+
+ Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(dividend), &dividend_is_smi,
+ &dividend_is_not_smi);
+
+ assembler->Bind(&dividend_is_smi);
+ {
+ Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
+ &divisor_is_not_smi);
+
+ assembler->Bind(&divisor_is_smi);
+ {
+ Label bailout(assembler);
+
+ // Do floating point division if {divisor} is zero.
+ assembler->GotoIf(
+ assembler->WordEqual(divisor, assembler->IntPtrConstant(0)),
+ &bailout);
+
+ // Do floating point division {dividend} is zero and {divisor} is
+ // negative.
+ Label dividend_is_zero(assembler), dividend_is_not_zero(assembler);
+ assembler->Branch(
+ assembler->WordEqual(dividend, assembler->IntPtrConstant(0)),
+ &dividend_is_zero, &dividend_is_not_zero);
+
+ assembler->Bind(&dividend_is_zero);
+ {
+ assembler->GotoIf(
+ assembler->IntPtrLessThan(divisor, assembler->IntPtrConstant(0)),
+ &bailout);
+ assembler->Goto(&dividend_is_not_zero);
+ }
+ assembler->Bind(&dividend_is_not_zero);
+
+ Node* untagged_divisor = assembler->SmiUntag(divisor);
+ Node* untagged_dividend = assembler->SmiUntag(dividend);
+
+ // Do floating point division if {dividend} is kMinInt (or kMinInt - 1
+ // if the Smi size is 31) and {divisor} is -1.
+ Label divisor_is_minus_one(assembler),
+ divisor_is_not_minus_one(assembler);
+ assembler->Branch(assembler->Word32Equal(untagged_divisor,
+ assembler->Int32Constant(-1)),
+ &divisor_is_minus_one, &divisor_is_not_minus_one);
+
+ assembler->Bind(&divisor_is_minus_one);
+ {
+ assembler->GotoIf(
+ assembler->Word32Equal(
+ untagged_dividend,
+ assembler->Int32Constant(
+ kSmiValueSize == 32 ? kMinInt : (kMinInt >> 1))),
+ &bailout);
+ assembler->Goto(&divisor_is_not_minus_one);
+ }
+ assembler->Bind(&divisor_is_not_minus_one);
+
+ // TODO(epertoso): consider adding a machine instruction that returns
+ // both the result and the remainder.
+ Node* untagged_result =
+ assembler->Int32Div(untagged_dividend, untagged_divisor);
+ Node* truncated =
+ assembler->IntPtrMul(untagged_result, untagged_divisor);
+ // Do floating point division if the remainder is not 0.
+ assembler->GotoIf(
+ assembler->Word32NotEqual(untagged_dividend, truncated), &bailout);
+ var_result.Bind(assembler->SmiTag(untagged_result));
+ assembler->Goto(&end);
+
+ // Bailout: convert {dividend} and {divisor} to double and do double
+ // division.
+ assembler->Bind(&bailout);
+ {
+ var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
+ var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
+ assembler->Goto(&do_fdiv);
+ }
+ }
+
+ assembler->Bind(&divisor_is_not_smi);
+ {
+ Node* divisor_map = assembler->LoadMap(divisor);
+
+ // Check if {divisor} is a HeapNumber.
+ Label divisor_is_number(assembler),
+ divisor_is_not_number(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(divisor_map, number_map),
+ &divisor_is_number, &divisor_is_not_number);
+
+ assembler->Bind(&divisor_is_number);
+ {
+ // Convert {dividend} to a double and divide it with the value of
+ // {divisor}.
+ var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
+ var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
+ assembler->Goto(&do_fdiv);
+ }
+
+ assembler->Bind(&divisor_is_not_number);
+ {
+ // Convert {divisor} to a number and loop.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_divisor.Bind(assembler->CallStub(callable, context, divisor));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&dividend_is_not_smi);
+ {
+ Node* dividend_map = assembler->LoadMap(dividend);
+
+ // Check if {dividend} is a HeapNumber.
+ Label dividend_is_number(assembler),
+ dividend_is_not_number(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(dividend_map, number_map),
+ &dividend_is_number, &dividend_is_not_number);
+
+ assembler->Bind(&dividend_is_number);
+ {
+ // Check if {divisor} is a Smi.
+ Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
+ &divisor_is_not_smi);
+
+ assembler->Bind(&divisor_is_smi);
+ {
+ // Convert {divisor} to a double and use it for a floating point
+ // division.
+ var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
+ var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
+ assembler->Goto(&do_fdiv);
+ }
+
+ assembler->Bind(&divisor_is_not_smi);
+ {
+ Node* divisor_map = assembler->LoadMap(divisor);
+
+ // Check if {divisor} is a HeapNumber.
+ Label divisor_is_number(assembler),
+ divisor_is_not_number(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(divisor_map, number_map),
+ &divisor_is_number, &divisor_is_not_number);
+
+ assembler->Bind(&divisor_is_number);
+ {
+ // Both {dividend} and {divisor} are HeapNumbers. Load their values
+ // and divide them.
+ var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
+ var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
+ assembler->Goto(&do_fdiv);
+ }
+
+ assembler->Bind(&divisor_is_not_number);
+ {
+ // Convert {divisor} to a number and loop.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_divisor.Bind(assembler->CallStub(callable, context, divisor));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&dividend_is_not_number);
+ {
+ // Convert {dividend} to a Number and loop.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_dividend.Bind(assembler->CallStub(callable, context, dividend));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&do_fdiv);
+ {
+ Node* value = assembler->Float64Div(var_dividend_float64.value(),
+ var_divisor_float64.value());
+ var_result.Bind(assembler->ChangeFloat64ToTagged(value));
+ assembler->Goto(&end);
+ }
+ assembler->Bind(&end);
+ return var_result.value();
+}
+
+// static
+compiler::Node* DivideWithFeedbackStub::Generate(
+ CodeStubAssembler* assembler, compiler::Node* dividend,
+ compiler::Node* divisor, compiler::Node* slot_id,
+ compiler::Node* type_feedback_vector, compiler::Node* context) {
+ using compiler::Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ // Shared entry point for floating point division.
+ Label do_fdiv(assembler), end(assembler), call_divide_stub(assembler);
+ Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
+ var_divisor_float64(assembler, MachineRepresentation::kFloat64),
+ var_result(assembler, MachineRepresentation::kTagged),
+ var_type_feedback(assembler, MachineRepresentation::kWord32);
+
+ Node* number_map = assembler->HeapNumberMapConstant();
+
+ Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(dividend), &dividend_is_smi,
+ &dividend_is_not_smi);
+
+ assembler->Bind(&dividend_is_smi);
+ {
+ Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
+ &divisor_is_not_smi);
+
+ assembler->Bind(&divisor_is_smi);
+ {
+ Label bailout(assembler);
+
+ // Do floating point division if {divisor} is zero.
+ assembler->GotoIf(
+ assembler->WordEqual(divisor, assembler->IntPtrConstant(0)),
+ &bailout);
+
+ // Do floating point division {dividend} is zero and {divisor} is
+ // negative.
+ Label dividend_is_zero(assembler), dividend_is_not_zero(assembler);
+ assembler->Branch(
+ assembler->WordEqual(dividend, assembler->IntPtrConstant(0)),
+ &dividend_is_zero, &dividend_is_not_zero);
+
+ assembler->Bind(&dividend_is_zero);
+ {
+ assembler->GotoIf(
+ assembler->IntPtrLessThan(divisor, assembler->IntPtrConstant(0)),
+ &bailout);
+ assembler->Goto(&dividend_is_not_zero);
+ }
+ assembler->Bind(&dividend_is_not_zero);
+
+ Node* untagged_divisor = assembler->SmiUntag(divisor);
+ Node* untagged_dividend = assembler->SmiUntag(dividend);
+
+ // Do floating point division if {dividend} is kMinInt (or kMinInt - 1
+ // if the Smi size is 31) and {divisor} is -1.
+ Label divisor_is_minus_one(assembler),
+ divisor_is_not_minus_one(assembler);
+ assembler->Branch(assembler->Word32Equal(untagged_divisor,
+ assembler->Int32Constant(-1)),
+ &divisor_is_minus_one, &divisor_is_not_minus_one);
+
+ assembler->Bind(&divisor_is_minus_one);
+ {
+ assembler->GotoIf(
+ assembler->Word32Equal(
+ untagged_dividend,
+ assembler->Int32Constant(kSmiValueSize == 32 ? kMinInt
+ : (kMinInt >> 1))),
+ &bailout);
+ assembler->Goto(&divisor_is_not_minus_one);
+ }
+ assembler->Bind(&divisor_is_not_minus_one);
+
+ Node* untagged_result =
+ assembler->Int32Div(untagged_dividend, untagged_divisor);
+ Node* truncated = assembler->IntPtrMul(untagged_result, untagged_divisor);
+ // Do floating point division if the remainder is not 0.
+ assembler->GotoIf(assembler->Word32NotEqual(untagged_dividend, truncated),
+ &bailout);
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall));
+ var_result.Bind(assembler->SmiTag(untagged_result));
+ assembler->Goto(&end);
+
+ // Bailout: convert {dividend} and {divisor} to double and do double
+ // division.
+ assembler->Bind(&bailout);
+ {
+ var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
+ var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
+ assembler->Goto(&do_fdiv);
+ }
+ }
+
+ assembler->Bind(&divisor_is_not_smi);
+ {
+ Node* divisor_map = assembler->LoadMap(divisor);
+
+ // Check if {divisor} is a HeapNumber.
+ assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
+ &call_divide_stub);
+
+ // Convert {dividend} to a double and divide it with the value of
+ // {divisor}.
+ var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
+ var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
+ assembler->Goto(&do_fdiv);
+ }
+
+ assembler->Bind(&dividend_is_not_smi);
+ {
+ Node* dividend_map = assembler->LoadMap(dividend);
+
+ // Check if {dividend} is a HeapNumber.
+ assembler->GotoUnless(assembler->WordEqual(dividend_map, number_map),
+ &call_divide_stub);
+
+ // Check if {divisor} is a Smi.
+ Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
+ &divisor_is_not_smi);
+
+ assembler->Bind(&divisor_is_smi);
+ {
+ // Convert {divisor} to a double and use it for a floating point
+ // division.
+ var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
+ var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
+ assembler->Goto(&do_fdiv);
+ }
+
+ assembler->Bind(&divisor_is_not_smi);
+ {
+ Node* divisor_map = assembler->LoadMap(divisor);
+
+ // Check if {divisor} is a HeapNumber.
+ assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
+ &call_divide_stub);
+
+ // Both {dividend} and {divisor} are HeapNumbers. Load their values
+ // and divide them.
+ var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
+ var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
+ assembler->Goto(&do_fdiv);
+ }
+ }
+ }
+
+ assembler->Bind(&do_fdiv);
+ {
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kNumber));
+ Node* value = assembler->Float64Div(var_dividend_float64.value(),
+ var_divisor_float64.value());
+ var_result.Bind(assembler->ChangeFloat64ToTagged(value));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&call_divide_stub);
+ {
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ Callable callable = CodeFactory::Divide(assembler->isolate());
+ var_result.Bind(assembler->CallStub(callable, context, dividend, divisor));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
+ slot_id);
+ return var_result.value();
+}
+
+// static
+compiler::Node* ModulusStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* left,
+ compiler::Node* right,
+ compiler::Node* context) {
+ using compiler::Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Variable var_result(assembler, MachineRepresentation::kTagged);
+ Label return_result(assembler, &var_result);
+
+ // Shared entry point for floating point modulus.
+ Label do_fmod(assembler);
+ Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
+ var_divisor_float64(assembler, MachineRepresentation::kFloat64);
+
+ Node* number_map = assembler->HeapNumberMapConstant();
+
+ // We might need to loop one or two times due to ToNumber conversions.
+ Variable var_dividend(assembler, MachineRepresentation::kTagged),
+ var_divisor(assembler, MachineRepresentation::kTagged);
+ Variable* loop_variables[] = {&var_dividend, &var_divisor};
+ Label loop(assembler, 2, loop_variables);
+ var_dividend.Bind(left);
+ var_divisor.Bind(right);
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ Node* dividend = var_dividend.value();
+ Node* divisor = var_divisor.value();
+
+ Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(dividend), &dividend_is_smi,
+ &dividend_is_not_smi);
+
+ assembler->Bind(&dividend_is_smi);
+ {
+ Label dividend_is_not_zero(assembler);
+ Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
+ &divisor_is_not_smi);
+
+ assembler->Bind(&divisor_is_smi);
+ {
+ // Compute the modulus of two Smis.
+ var_result.Bind(assembler->SmiMod(dividend, divisor));
+ assembler->Goto(&return_result);
+ }
+
+ assembler->Bind(&divisor_is_not_smi);
+ {
+ Node* divisor_map = assembler->LoadMap(divisor);
+
+ // Check if {divisor} is a HeapNumber.
+ Label divisor_is_number(assembler),
+ divisor_is_not_number(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(divisor_map, number_map),
+ &divisor_is_number, &divisor_is_not_number);
+
+ assembler->Bind(&divisor_is_number);
+ {
+ // Convert {dividend} to a double and compute its modulus with the
+ // value of {dividend}.
+ var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
+ var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
+ assembler->Goto(&do_fmod);
+ }
+
+ assembler->Bind(&divisor_is_not_number);
+ {
+ // Convert {divisor} to a number and loop.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_divisor.Bind(assembler->CallStub(callable, context, divisor));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&dividend_is_not_smi);
+ {
+ Node* dividend_map = assembler->LoadMap(dividend);
+
+ // Check if {dividend} is a HeapNumber.
+ Label dividend_is_number(assembler),
+ dividend_is_not_number(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(dividend_map, number_map),
+ &dividend_is_number, &dividend_is_not_number);
+
+ assembler->Bind(&dividend_is_number);
+ {
+ // Check if {divisor} is a Smi.
+ Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
+ &divisor_is_not_smi);
+
+ assembler->Bind(&divisor_is_smi);
+ {
+ // Convert {divisor} to a double and compute {dividend}'s modulus with
+ // it.
+ var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
+ var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
+ assembler->Goto(&do_fmod);
+ }
+
+ assembler->Bind(&divisor_is_not_smi);
+ {
+ Node* divisor_map = assembler->LoadMap(divisor);
+
+ // Check if {divisor} is a HeapNumber.
+ Label divisor_is_number(assembler),
+ divisor_is_not_number(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(divisor_map, number_map),
+ &divisor_is_number, &divisor_is_not_number);
+
+ assembler->Bind(&divisor_is_number);
+ {
+ // Both {dividend} and {divisor} are HeapNumbers. Load their values
+ // and compute their modulus.
+ var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
+ var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
+ assembler->Goto(&do_fmod);
+ }
+
+ assembler->Bind(&divisor_is_not_number);
+ {
+ // Convert {divisor} to a number and loop.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_divisor.Bind(assembler->CallStub(callable, context, divisor));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&dividend_is_not_number);
+ {
+ // Convert {dividend} to a Number and loop.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_dividend.Bind(assembler->CallStub(callable, context, dividend));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&do_fmod);
+ {
+ Node* value = assembler->Float64Mod(var_dividend_float64.value(),
+ var_divisor_float64.value());
+ var_result.Bind(assembler->ChangeFloat64ToTagged(value));
+ assembler->Goto(&return_result);
+ }
+
+ assembler->Bind(&return_result);
+ return var_result.value();
+}
+
+// static
+compiler::Node* ModulusWithFeedbackStub::Generate(
+ CodeStubAssembler* assembler, compiler::Node* dividend,
+ compiler::Node* divisor, compiler::Node* slot_id,
+ compiler::Node* type_feedback_vector, compiler::Node* context) {
+ using compiler::Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ // Shared entry point for floating point division.
+ Label do_fmod(assembler), end(assembler), call_modulus_stub(assembler);
+ Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
+ var_divisor_float64(assembler, MachineRepresentation::kFloat64),
+ var_result(assembler, MachineRepresentation::kTagged),
+ var_type_feedback(assembler, MachineRepresentation::kWord32);
+
+ Node* number_map = assembler->HeapNumberMapConstant();
+
+ Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(dividend), &dividend_is_smi,
+ &dividend_is_not_smi);
+
+ assembler->Bind(&dividend_is_smi);
+ {
+ Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
+ &divisor_is_not_smi);
+
+ assembler->Bind(&divisor_is_smi);
+ {
+ var_result.Bind(assembler->SmiMod(dividend, divisor));
+ var_type_feedback.Bind(assembler->Select(
+ assembler->WordIsSmi(var_result.value()),
+ assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall),
+ assembler->Int32Constant(BinaryOperationFeedback::kNumber)));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&divisor_is_not_smi);
+ {
+ Node* divisor_map = assembler->LoadMap(divisor);
+
+ // Check if {divisor} is a HeapNumber.
+ assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
+ &call_modulus_stub);
+
+ // Convert {dividend} to a double and divide it with the value of
+ // {divisor}.
+ var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
+ var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
+ assembler->Goto(&do_fmod);
+ }
+ }
+
+ assembler->Bind(&dividend_is_not_smi);
+ {
+ Node* dividend_map = assembler->LoadMap(dividend);
+
+ // Check if {dividend} is a HeapNumber.
+ assembler->GotoUnless(assembler->WordEqual(dividend_map, number_map),
+ &call_modulus_stub);
+
+ // Check if {divisor} is a Smi.
+ Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
+ assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
+ &divisor_is_not_smi);
+
+ assembler->Bind(&divisor_is_smi);
+ {
+ // Convert {divisor} to a double and use it for a floating point
+ // division.
+ var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
+ var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
+ assembler->Goto(&do_fmod);
+ }
+
+ assembler->Bind(&divisor_is_not_smi);
+ {
+ Node* divisor_map = assembler->LoadMap(divisor);
+
+ // Check if {divisor} is a HeapNumber.
+ assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
+ &call_modulus_stub);
+
+ // Both {dividend} and {divisor} are HeapNumbers. Load their values
+ // and divide them.
+ var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
+ var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
+ assembler->Goto(&do_fmod);
+ }
+ }
+
+ assembler->Bind(&do_fmod);
+ {
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kNumber));
+ Node* value = assembler->Float64Mod(var_dividend_float64.value(),
+ var_divisor_float64.value());
+ var_result.Bind(assembler->ChangeFloat64ToTagged(value));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&call_modulus_stub);
+ {
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ Callable callable = CodeFactory::Modulus(assembler->isolate());
+ var_result.Bind(assembler->CallStub(callable, context, dividend, divisor));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
+ slot_id);
+ return var_result.value();
+}
+// static
+compiler::Node* ShiftLeftStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* left,
+ compiler::Node* right,
+ compiler::Node* context) {
+ using compiler::Node;
+
+ Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
+ Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
+ Node* shift_count =
+ assembler->Word32And(rhs_value, assembler->Int32Constant(0x1f));
+ Node* value = assembler->Word32Shl(lhs_value, shift_count);
+ Node* result = assembler->ChangeInt32ToTagged(value);
+ return result;
+}
+
+// static
+compiler::Node* ShiftRightStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* left,
+ compiler::Node* right,
+ compiler::Node* context) {
+ using compiler::Node;
+
+ Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
+ Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
+ Node* shift_count =
+ assembler->Word32And(rhs_value, assembler->Int32Constant(0x1f));
+ Node* value = assembler->Word32Sar(lhs_value, shift_count);
+ Node* result = assembler->ChangeInt32ToTagged(value);
+ return result;
+}
+
+// static
+compiler::Node* ShiftRightLogicalStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* left,
+ compiler::Node* right,
+ compiler::Node* context) {
+ using compiler::Node;
+
+ Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
+ Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
+ Node* shift_count =
+ assembler->Word32And(rhs_value, assembler->Int32Constant(0x1f));
+ Node* value = assembler->Word32Shr(lhs_value, shift_count);
+ Node* result = assembler->ChangeUint32ToTagged(value);
+ return result;
+}
+
+// static
+compiler::Node* BitwiseAndStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* left,
+ compiler::Node* right,
+ compiler::Node* context) {
+ using compiler::Node;
+
+ Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
+ Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
Node* value = assembler->Word32And(lhs_value, rhs_value);
Node* result = assembler->ChangeInt32ToTagged(value);
- assembler->Return(result);
+ return result;
}
-void BitwiseOrStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
+// static
+compiler::Node* BitwiseOrStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* left,
+ compiler::Node* right,
+ compiler::Node* context) {
using compiler::Node;
- Node* lhs = assembler->Parameter(0);
- Node* rhs = assembler->Parameter(1);
- Node* context = assembler->Parameter(2);
- Node* lhs_value = assembler->TruncateTaggedToWord32(context, lhs);
- Node* rhs_value = assembler->TruncateTaggedToWord32(context, rhs);
+ Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
+ Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
Node* value = assembler->Word32Or(lhs_value, rhs_value);
Node* result = assembler->ChangeInt32ToTagged(value);
- assembler->Return(result);
+ return result;
}
-void BitwiseXorStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
+// static
+compiler::Node* BitwiseXorStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* left,
+ compiler::Node* right,
+ compiler::Node* context) {
using compiler::Node;
- Node* lhs = assembler->Parameter(0);
- Node* rhs = assembler->Parameter(1);
- Node* context = assembler->Parameter(2);
- Node* lhs_value = assembler->TruncateTaggedToWord32(context, lhs);
- Node* rhs_value = assembler->TruncateTaggedToWord32(context, rhs);
+ Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
+ Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
Node* value = assembler->Word32Xor(lhs_value, rhs_value);
Node* result = assembler->ChangeInt32ToTagged(value);
- assembler->Return(result);
+ return result;
+}
+
+// static
+compiler::Node* IncStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* value,
+ compiler::Node* context,
+ compiler::Node* type_feedback_vector,
+ compiler::Node* slot_id) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ // Shared entry for floating point increment.
+ Label do_finc(assembler), end(assembler);
+ Variable var_finc_value(assembler, MachineRepresentation::kFloat64);
+
+ // We might need to try again due to ToNumber conversion.
+ Variable value_var(assembler, MachineRepresentation::kTagged);
+ Variable result_var(assembler, MachineRepresentation::kTagged);
+ Variable var_type_feedback(assembler, MachineRepresentation::kWord32);
+ Variable* loop_vars[] = {&value_var, &var_type_feedback};
+ Label start(assembler, 2, loop_vars);
+ value_var.Bind(value);
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kNone));
+ assembler->Goto(&start);
+ assembler->Bind(&start);
+ {
+ value = value_var.value();
+
+ Label if_issmi(assembler), if_isnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(value), &if_issmi, &if_isnotsmi);
+
+ assembler->Bind(&if_issmi);
+ {
+ // Try fast Smi addition first.
+ Node* one = assembler->SmiConstant(Smi::FromInt(1));
+ Node* pair = assembler->SmiAddWithOverflow(value, one);
+ Node* overflow = assembler->Projection(1, pair);
+
+ // Check if the Smi addition overflowed.
+ Label if_overflow(assembler), if_notoverflow(assembler);
+ assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+
+ assembler->Bind(&if_notoverflow);
+ var_type_feedback.Bind(assembler->Word32Or(
+ var_type_feedback.value(),
+ assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall)));
+ result_var.Bind(assembler->Projection(0, pair));
+ assembler->Goto(&end);
+
+ assembler->Bind(&if_overflow);
+ {
+ var_finc_value.Bind(assembler->SmiToFloat64(value));
+ assembler->Goto(&do_finc);
+ }
+ }
+
+ assembler->Bind(&if_isnotsmi);
+ {
+ // Check if the value is a HeapNumber.
+ Label if_valueisnumber(assembler),
+ if_valuenotnumber(assembler, Label::kDeferred);
+ Node* value_map = assembler->LoadMap(value);
+ Node* number_map = assembler->HeapNumberMapConstant();
+ assembler->Branch(assembler->WordEqual(value_map, number_map),
+ &if_valueisnumber, &if_valuenotnumber);
+
+ assembler->Bind(&if_valueisnumber);
+ {
+ // Load the HeapNumber value.
+ var_finc_value.Bind(assembler->LoadHeapNumberValue(value));
+ assembler->Goto(&do_finc);
+ }
+
+ assembler->Bind(&if_valuenotnumber);
+ {
+ // Convert to a Number first and try again.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ value_var.Bind(assembler->CallStub(callable, context, value));
+ assembler->Goto(&start);
+ }
+ }
+ }
+
+ assembler->Bind(&do_finc);
+ {
+ Node* finc_value = var_finc_value.value();
+ Node* one = assembler->Float64Constant(1.0);
+ Node* finc_result = assembler->Float64Add(finc_value, one);
+ var_type_feedback.Bind(assembler->Word32Or(
+ var_type_feedback.value(),
+ assembler->Int32Constant(BinaryOperationFeedback::kNumber)));
+ result_var.Bind(assembler->ChangeFloat64ToTagged(finc_result));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
+ slot_id);
+ return result_var.value();
+}
+
+// static
+compiler::Node* DecStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* value,
+ compiler::Node* context,
+ compiler::Node* type_feedback_vector,
+ compiler::Node* slot_id) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ // Shared entry for floating point decrement.
+ Label do_fdec(assembler), end(assembler);
+ Variable var_fdec_value(assembler, MachineRepresentation::kFloat64);
+
+ // We might need to try again due to ToNumber conversion.
+ Variable value_var(assembler, MachineRepresentation::kTagged);
+ Variable result_var(assembler, MachineRepresentation::kTagged);
+ Variable var_type_feedback(assembler, MachineRepresentation::kWord32);
+ Variable* loop_vars[] = {&value_var, &var_type_feedback};
+ Label start(assembler, 2, loop_vars);
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kNone));
+ value_var.Bind(value);
+ assembler->Goto(&start);
+ assembler->Bind(&start);
+ {
+ value = value_var.value();
+
+ Label if_issmi(assembler), if_isnotsmi(assembler);
+ assembler->Branch(assembler->WordIsSmi(value), &if_issmi, &if_isnotsmi);
+
+ assembler->Bind(&if_issmi);
+ {
+ // Try fast Smi subtraction first.
+ Node* one = assembler->SmiConstant(Smi::FromInt(1));
+ Node* pair = assembler->SmiSubWithOverflow(value, one);
+ Node* overflow = assembler->Projection(1, pair);
+
+ // Check if the Smi subtraction overflowed.
+ Label if_overflow(assembler), if_notoverflow(assembler);
+ assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+
+ assembler->Bind(&if_notoverflow);
+ var_type_feedback.Bind(assembler->Word32Or(
+ var_type_feedback.value(),
+ assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall)));
+ result_var.Bind(assembler->Projection(0, pair));
+ assembler->Goto(&end);
+
+ assembler->Bind(&if_overflow);
+ {
+ var_fdec_value.Bind(assembler->SmiToFloat64(value));
+ assembler->Goto(&do_fdec);
+ }
+ }
+
+ assembler->Bind(&if_isnotsmi);
+ {
+ // Check if the value is a HeapNumber.
+ Label if_valueisnumber(assembler),
+ if_valuenotnumber(assembler, Label::kDeferred);
+ Node* value_map = assembler->LoadMap(value);
+ Node* number_map = assembler->HeapNumberMapConstant();
+ assembler->Branch(assembler->WordEqual(value_map, number_map),
+ &if_valueisnumber, &if_valuenotnumber);
+
+ assembler->Bind(&if_valueisnumber);
+ {
+ // Load the HeapNumber value.
+ var_fdec_value.Bind(assembler->LoadHeapNumberValue(value));
+ assembler->Goto(&do_fdec);
+ }
+
+ assembler->Bind(&if_valuenotnumber);
+ {
+ // Convert to a Number first and try again.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ value_var.Bind(assembler->CallStub(callable, context, value));
+ assembler->Goto(&start);
+ }
+ }
+ }
+
+ assembler->Bind(&do_fdec);
+ {
+ Node* fdec_value = var_fdec_value.value();
+ Node* one = assembler->Float64Constant(1.0);
+ Node* fdec_result = assembler->Float64Sub(fdec_value, one);
+ var_type_feedback.Bind(assembler->Word32Or(
+ var_type_feedback.value(),
+ assembler->Int32Constant(BinaryOperationFeedback::kNumber)));
+ result_var.Bind(assembler->ChangeFloat64ToTagged(fdec_result));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
+ slot_id);
+ return result_var.value();
+}
+
+// ES6 section 7.1.13 ToObject (argument)
+void ToObjectStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Label if_number(assembler, Label::kDeferred), if_notsmi(assembler),
+ if_jsreceiver(assembler), if_noconstructor(assembler, Label::kDeferred),
+ if_wrapjsvalue(assembler);
+
+ Node* object = assembler->Parameter(Descriptor::kArgument);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ Variable constructor_function_index_var(assembler,
+ MachineRepresentation::kWord32);
+
+ assembler->Branch(assembler->WordIsSmi(object), &if_number, &if_notsmi);
+
+ assembler->Bind(&if_notsmi);
+ Node* map = assembler->LoadMap(object);
+
+ assembler->GotoIf(
+ assembler->WordEqual(map, assembler->HeapNumberMapConstant()),
+ &if_number);
+
+ Node* instance_type = assembler->LoadMapInstanceType(map);
+ assembler->GotoIf(
+ assembler->Int32GreaterThanOrEqual(
+ instance_type, assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE)),
+ &if_jsreceiver);
+
+ Node* constructor_function_index = assembler->LoadObjectField(
+ map, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset,
+ MachineType::Uint8());
+ assembler->GotoIf(
+ assembler->Word32Equal(
+ constructor_function_index,
+ assembler->Int32Constant(Map::kNoConstructorFunctionIndex)),
+ &if_noconstructor);
+ constructor_function_index_var.Bind(constructor_function_index);
+ assembler->Goto(&if_wrapjsvalue);
+
+ assembler->Bind(&if_number);
+ constructor_function_index_var.Bind(
+ assembler->Int32Constant(Context::NUMBER_FUNCTION_INDEX));
+ assembler->Goto(&if_wrapjsvalue);
+
+ assembler->Bind(&if_wrapjsvalue);
+ Node* native_context = assembler->LoadNativeContext(context);
+ Node* constructor = assembler->LoadFixedArrayElement(
+ native_context, constructor_function_index_var.value());
+ Node* initial_map = assembler->LoadObjectField(
+ constructor, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* js_value = assembler->Allocate(JSValue::kSize);
+ assembler->StoreMapNoWriteBarrier(js_value, initial_map);
+ assembler->StoreObjectFieldRoot(js_value, JSValue::kPropertiesOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ assembler->StoreObjectFieldRoot(js_value, JSObject::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ assembler->StoreObjectField(js_value, JSValue::kValueOffset, object);
+ assembler->Return(js_value);
+
+ assembler->Bind(&if_noconstructor);
+ assembler->TailCallRuntime(
+ Runtime::kThrowUndefinedOrNullToObject, context,
+ assembler->HeapConstant(assembler->factory()->NewStringFromAsciiChecked(
+ "ToObject", TENURED)));
+
+ assembler->Bind(&if_jsreceiver);
+ assembler->Return(object);
+}
+
+// static
+// ES6 section 12.5.5 typeof operator
+compiler::Node* TypeofStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* value,
+ compiler::Node* context) {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Variable result_var(assembler, MachineRepresentation::kTagged);
+
+ Label return_number(assembler, Label::kDeferred), if_oddball(assembler),
+ return_function(assembler), return_undefined(assembler),
+ return_object(assembler), return_string(assembler),
+ return_result(assembler);
+
+ assembler->GotoIf(assembler->WordIsSmi(value), &return_number);
+
+ Node* map = assembler->LoadMap(value);
+
+ assembler->GotoIf(
+ assembler->WordEqual(map, assembler->HeapNumberMapConstant()),
+ &return_number);
+
+ Node* instance_type = assembler->LoadMapInstanceType(map);
+
+ assembler->GotoIf(assembler->Word32Equal(
+ instance_type, assembler->Int32Constant(ODDBALL_TYPE)),
+ &if_oddball);
+
+ Node* callable_or_undetectable_mask =
+ assembler->Word32And(assembler->LoadMapBitField(map),
+ assembler->Int32Constant(1 << Map::kIsCallable |
+ 1 << Map::kIsUndetectable));
+
+ assembler->GotoIf(
+ assembler->Word32Equal(callable_or_undetectable_mask,
+ assembler->Int32Constant(1 << Map::kIsCallable)),
+ &return_function);
+
+ assembler->GotoUnless(assembler->Word32Equal(callable_or_undetectable_mask,
+ assembler->Int32Constant(0)),
+ &return_undefined);
+
+ assembler->GotoIf(
+ assembler->Int32GreaterThanOrEqual(
+ instance_type, assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE)),
+ &return_object);
+
+ assembler->GotoIf(
+ assembler->Int32LessThan(instance_type,
+ assembler->Int32Constant(FIRST_NONSTRING_TYPE)),
+ &return_string);
+
+#define SIMD128_BRANCH(TYPE, Type, type, lane_count, lane_type) \
+ Label return_##type(assembler); \
+ Node* type##_map = \
+ assembler->HeapConstant(assembler->factory()->type##_map()); \
+ assembler->GotoIf(assembler->WordEqual(map, type##_map), &return_##type);
+ SIMD128_TYPES(SIMD128_BRANCH)
+#undef SIMD128_BRANCH
+
+ assembler->Assert(assembler->Word32Equal(
+ instance_type, assembler->Int32Constant(SYMBOL_TYPE)));
+ result_var.Bind(assembler->HeapConstant(
+ assembler->isolate()->factory()->symbol_string()));
+ assembler->Goto(&return_result);
+
+ assembler->Bind(&return_number);
+ {
+ result_var.Bind(assembler->HeapConstant(
+ assembler->isolate()->factory()->number_string()));
+ assembler->Goto(&return_result);
+ }
+
+ assembler->Bind(&if_oddball);
+ {
+ Node* type = assembler->LoadObjectField(value, Oddball::kTypeOfOffset);
+ result_var.Bind(type);
+ assembler->Goto(&return_result);
+ }
+
+ assembler->Bind(&return_function);
+ {
+ result_var.Bind(assembler->HeapConstant(
+ assembler->isolate()->factory()->function_string()));
+ assembler->Goto(&return_result);
+ }
+
+ assembler->Bind(&return_undefined);
+ {
+ result_var.Bind(assembler->HeapConstant(
+ assembler->isolate()->factory()->undefined_string()));
+ assembler->Goto(&return_result);
+ }
+
+ assembler->Bind(&return_object);
+ {
+ result_var.Bind(assembler->HeapConstant(
+ assembler->isolate()->factory()->object_string()));
+ assembler->Goto(&return_result);
+ }
+
+ assembler->Bind(&return_string);
+ {
+ result_var.Bind(assembler->HeapConstant(
+ assembler->isolate()->factory()->string_string()));
+ assembler->Goto(&return_result);
+ }
+
+#define SIMD128_BIND_RETURN(TYPE, Type, type, lane_count, lane_type) \
+ assembler->Bind(&return_##type); \
+ { \
+ result_var.Bind(assembler->HeapConstant( \
+ assembler->isolate()->factory()->type##_string())); \
+ assembler->Goto(&return_result); \
+ }
+ SIMD128_TYPES(SIMD128_BIND_RETURN)
+#undef SIMD128_BIND_RETURN
+
+ assembler->Bind(&return_result);
+ return result_var.value();
+}
+
+// static
+compiler::Node* InstanceOfStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* object,
+ compiler::Node* callable,
+ compiler::Node* context) {
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Label return_runtime(assembler, Label::kDeferred), end(assembler);
+ Variable result(assembler, MachineRepresentation::kTagged);
+
+ // Check if no one installed @@hasInstance somewhere.
+ assembler->GotoUnless(
+ assembler->WordEqual(
+ assembler->LoadObjectField(
+ assembler->LoadRoot(Heap::kHasInstanceProtectorRootIndex),
+ PropertyCell::kValueOffset),
+ assembler->SmiConstant(Smi::FromInt(Isolate::kArrayProtectorValid))),
+ &return_runtime);
+
+ // Check if {callable} is a valid receiver.
+ assembler->GotoIf(assembler->WordIsSmi(callable), &return_runtime);
+ assembler->GotoIf(
+ assembler->Word32Equal(
+ assembler->Word32And(
+ assembler->LoadMapBitField(assembler->LoadMap(callable)),
+ assembler->Int32Constant(1 << Map::kIsCallable)),
+ assembler->Int32Constant(0)),
+ &return_runtime);
+
+ // Use the inline OrdinaryHasInstance directly.
+ result.Bind(assembler->OrdinaryHasInstance(context, callable, object));
+ assembler->Goto(&end);
+
+ // TODO(bmeurer): Use GetPropertyStub here once available.
+ assembler->Bind(&return_runtime);
+ {
+ result.Bind(assembler->CallRuntime(Runtime::kInstanceOf, context, object,
+ callable));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ return result.value();
}
namespace {
@@ -1082,15 +2838,15 @@ enum RelationalComparisonMode {
kGreaterThanOrEqual
};
-void GenerateAbstractRelationalComparison(
- compiler::CodeStubAssembler* assembler, RelationalComparisonMode mode) {
- typedef compiler::CodeStubAssembler::Label Label;
+compiler::Node* GenerateAbstractRelationalComparison(
+ CodeStubAssembler* assembler, RelationalComparisonMode mode,
+ compiler::Node* lhs, compiler::Node* rhs, compiler::Node* context) {
+ typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
- typedef compiler::CodeStubAssembler::Variable Variable;
-
- Node* context = assembler->Parameter(2);
+ typedef CodeStubAssembler::Variable Variable;
- Label return_true(assembler), return_false(assembler);
+ Label return_true(assembler), return_false(assembler), end(assembler);
+ Variable result(assembler, MachineRepresentation::kTagged);
// Shared entry for floating point comparison.
Label do_fcmp(assembler);
@@ -1103,14 +2859,14 @@ void GenerateAbstractRelationalComparison(
var_rhs(assembler, MachineRepresentation::kTagged);
Variable* loop_vars[2] = {&var_lhs, &var_rhs};
Label loop(assembler, 2, loop_vars);
- var_lhs.Bind(assembler->Parameter(0));
- var_rhs.Bind(assembler->Parameter(1));
+ var_lhs.Bind(lhs);
+ var_rhs.Bind(rhs);
assembler->Goto(&loop);
assembler->Bind(&loop);
{
// Load the current {lhs} and {rhs} values.
- Node* lhs = var_lhs.value();
- Node* rhs = var_rhs.value();
+ lhs = var_lhs.value();
+ rhs = var_rhs.value();
// Check if the {lhs} is a Smi or a HeapObject.
Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
@@ -1283,7 +3039,7 @@ void GenerateAbstractRelationalComparison(
Node* rhs_instance_type = assembler->LoadMapInstanceType(rhs_map);
// Check if {rhs} is also a String.
- Label if_rhsisstring(assembler),
+ Label if_rhsisstring(assembler, Label::kDeferred),
if_rhsisnotstring(assembler, Label::kDeferred);
assembler->Branch(assembler->Int32LessThan(
rhs_instance_type, assembler->Int32Constant(
@@ -1295,24 +3051,29 @@ void GenerateAbstractRelationalComparison(
// Both {lhs} and {rhs} are strings.
switch (mode) {
case kLessThan:
- assembler->TailCallStub(
+ result.Bind(assembler->CallStub(
CodeFactory::StringLessThan(assembler->isolate()),
- context, lhs, rhs);
+ context, lhs, rhs));
+ assembler->Goto(&end);
break;
case kLessThanOrEqual:
- assembler->TailCallStub(
+ result.Bind(assembler->CallStub(
CodeFactory::StringLessThanOrEqual(assembler->isolate()),
- context, lhs, rhs);
+ context, lhs, rhs));
+ assembler->Goto(&end);
break;
case kGreaterThan:
- assembler->TailCallStub(
+ result.Bind(assembler->CallStub(
CodeFactory::StringGreaterThan(assembler->isolate()),
- context, lhs, rhs);
+ context, lhs, rhs));
+ assembler->Goto(&end);
break;
case kGreaterThanOrEqual:
- assembler->TailCallStub(CodeFactory::StringGreaterThanOrEqual(
+ result.Bind(
+ assembler->CallStub(CodeFactory::StringGreaterThanOrEqual(
assembler->isolate()),
- context, lhs, rhs);
+ context, lhs, rhs));
+ assembler->Goto(&end);
break;
}
}
@@ -1335,9 +3096,9 @@ void GenerateAbstractRelationalComparison(
assembler->Bind(&if_rhsisreceiver);
{
// Convert {rhs} to a primitive first passing Number hint.
- // TODO(bmeurer): Hook up ToPrimitiveStub here, once it's there.
- var_rhs.Bind(assembler->CallRuntime(
- Runtime::kToPrimitive_Number, context, rhs));
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(
+ assembler->isolate(), ToPrimitiveHint::kNumber);
+ var_rhs.Bind(assembler->CallStub(callable, context, rhs));
assembler->Goto(&loop);
}
@@ -1369,9 +3130,9 @@ void GenerateAbstractRelationalComparison(
assembler->Bind(&if_lhsisreceiver);
{
// Convert {lhs} to a primitive first passing Number hint.
- // TODO(bmeurer): Hook up ToPrimitiveStub here, once it's there.
- var_lhs.Bind(assembler->CallRuntime(Runtime::kToPrimitive_Number,
- context, lhs));
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(
+ assembler->isolate(), ToPrimitiveHint::kNumber);
+ var_lhs.Bind(assembler->CallStub(callable, context, lhs));
assembler->Goto(&loop);
}
@@ -1417,25 +3178,33 @@ void GenerateAbstractRelationalComparison(
}
assembler->Bind(&return_true);
- assembler->Return(assembler->BooleanConstant(true));
+ {
+ result.Bind(assembler->BooleanConstant(true));
+ assembler->Goto(&end);
+ }
assembler->Bind(&return_false);
- assembler->Return(assembler->BooleanConstant(false));
+ {
+ result.Bind(assembler->BooleanConstant(false));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ return result.value();
}
enum ResultMode { kDontNegateResult, kNegateResult };
-void GenerateEqual_Same(compiler::CodeStubAssembler* assembler,
- compiler::Node* value,
- compiler::CodeStubAssembler::Label* if_equal,
- compiler::CodeStubAssembler::Label* if_notequal) {
+void GenerateEqual_Same(CodeStubAssembler* assembler, compiler::Node* value,
+ CodeStubAssembler::Label* if_equal,
+ CodeStubAssembler::Label* if_notequal) {
// In case of abstract or strict equality checks, we need additional checks
// for NaN values because they are not considered equal, even if both the
// left and the right hand side reference exactly the same value.
// TODO(bmeurer): This seems to violate the SIMD.js specification, but it
// seems to be what is tested in the current SIMD.js testsuite.
- typedef compiler::CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
// Check if {value} is a Smi or a HeapObject.
@@ -1472,97 +3241,28 @@ void GenerateEqual_Same(compiler::CodeStubAssembler* assembler,
}
void GenerateEqual_Simd128Value_HeapObject(
- compiler::CodeStubAssembler* assembler, compiler::Node* lhs,
- compiler::Node* lhs_map, compiler::Node* rhs, compiler::Node* rhs_map,
- compiler::CodeStubAssembler::Label* if_equal,
- compiler::CodeStubAssembler::Label* if_notequal) {
- typedef compiler::CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
-
- // Check if {lhs} and {rhs} have the same map.
- Label if_mapsame(assembler), if_mapnotsame(assembler);
- assembler->Branch(assembler->WordEqual(lhs_map, rhs_map), &if_mapsame,
- &if_mapnotsame);
-
- assembler->Bind(&if_mapsame);
- {
- // Both {lhs} and {rhs} are Simd128Values with the same map, need special
- // handling for Float32x4 because of NaN comparisons.
- Label if_float32x4(assembler), if_notfloat32x4(assembler);
- Node* float32x4_map =
- assembler->HeapConstant(assembler->factory()->float32x4_map());
- assembler->Branch(assembler->WordEqual(lhs_map, float32x4_map),
- &if_float32x4, &if_notfloat32x4);
-
- assembler->Bind(&if_float32x4);
- {
- // Both {lhs} and {rhs} are Float32x4, compare the lanes individually
- // using a floating point comparison.
- for (int offset = Float32x4::kValueOffset - kHeapObjectTag;
- offset < Float32x4::kSize - kHeapObjectTag;
- offset += sizeof(float)) {
- // Load the floating point values for {lhs} and {rhs}.
- Node* lhs_value = assembler->Load(MachineType::Float32(), lhs,
- assembler->IntPtrConstant(offset));
- Node* rhs_value = assembler->Load(MachineType::Float32(), rhs,
- assembler->IntPtrConstant(offset));
-
- // Perform a floating point comparison.
- Label if_valueequal(assembler), if_valuenotequal(assembler);
- assembler->Branch(assembler->Float32Equal(lhs_value, rhs_value),
- &if_valueequal, &if_valuenotequal);
- assembler->Bind(&if_valuenotequal);
- assembler->Goto(if_notequal);
- assembler->Bind(&if_valueequal);
- }
-
- // All 4 lanes match, {lhs} and {rhs} considered equal.
- assembler->Goto(if_equal);
- }
-
- assembler->Bind(&if_notfloat32x4);
- {
- // For other Simd128Values we just perform a bitwise comparison.
- for (int offset = Simd128Value::kValueOffset - kHeapObjectTag;
- offset < Simd128Value::kSize - kHeapObjectTag;
- offset += kPointerSize) {
- // Load the word values for {lhs} and {rhs}.
- Node* lhs_value = assembler->Load(MachineType::Pointer(), lhs,
- assembler->IntPtrConstant(offset));
- Node* rhs_value = assembler->Load(MachineType::Pointer(), rhs,
- assembler->IntPtrConstant(offset));
-
- // Perform a bitwise word-comparison.
- Label if_valueequal(assembler), if_valuenotequal(assembler);
- assembler->Branch(assembler->WordEqual(lhs_value, rhs_value),
- &if_valueequal, &if_valuenotequal);
- assembler->Bind(&if_valuenotequal);
- assembler->Goto(if_notequal);
- assembler->Bind(&if_valueequal);
- }
-
- // Bitwise comparison succeeded, {lhs} and {rhs} considered equal.
- assembler->Goto(if_equal);
- }
- }
-
- assembler->Bind(&if_mapnotsame);
- assembler->Goto(if_notequal);
+ CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* lhs_map,
+ compiler::Node* rhs, compiler::Node* rhs_map,
+ CodeStubAssembler::Label* if_equal, CodeStubAssembler::Label* if_notequal) {
+ assembler->BranchIfSimd128Equal(lhs, lhs_map, rhs, rhs_map, if_equal,
+ if_notequal);
}
// ES6 section 7.2.12 Abstract Equality Comparison
-void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
+compiler::Node* GenerateEqual(CodeStubAssembler* assembler, ResultMode mode,
+ compiler::Node* lhs, compiler::Node* rhs,
+ compiler::Node* context) {
// This is a slightly optimized version of Object::Equals represented as
// scheduled TurboFan graph utilizing the CodeStubAssembler. Whenever you
// change something functionality wise in here, remember to update the
// Object::Equals method as well.
- typedef compiler::CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
- typedef compiler::CodeStubAssembler::Variable Variable;
-
- Node* context = assembler->Parameter(2);
+ typedef CodeStubAssembler::Variable Variable;
- Label if_equal(assembler), if_notequal(assembler);
+ Label if_equal(assembler), if_notequal(assembler),
+ do_rhsstringtonumber(assembler, Label::kDeferred), end(assembler);
+ Variable result(assembler, MachineRepresentation::kTagged);
// Shared entry for floating point comparison.
Label do_fcmp(assembler);
@@ -1575,14 +3275,14 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
var_rhs(assembler, MachineRepresentation::kTagged);
Variable* loop_vars[2] = {&var_lhs, &var_rhs};
Label loop(assembler, 2, loop_vars);
- var_lhs.Bind(assembler->Parameter(0));
- var_rhs.Bind(assembler->Parameter(1));
+ var_lhs.Bind(lhs);
+ var_rhs.Bind(rhs);
assembler->Goto(&loop);
assembler->Bind(&loop);
{
// Load the current {lhs} and {rhs} values.
- Node* lhs = var_lhs.value();
- Node* rhs = var_rhs.value();
+ lhs = var_lhs.value();
+ rhs = var_rhs.value();
// Check if {lhs} and {rhs} refer to the same object.
Label if_same(assembler), if_notsame(assembler);
@@ -1610,6 +3310,8 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
&if_rhsisnotsmi);
assembler->Bind(&if_rhsissmi);
+ // We have already checked for {lhs} and {rhs} being the same value, so
+ // if both are Smis when we get here they must not be equal.
assembler->Goto(&if_notequal);
assembler->Bind(&if_rhsisnotsmi);
@@ -1619,8 +3321,7 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
// Check if {rhs} is a HeapNumber.
Node* number_map = assembler->HeapNumberMapConstant();
- Label if_rhsisnumber(assembler),
- if_rhsisnotnumber(assembler, Label::kDeferred);
+ Label if_rhsisnumber(assembler), if_rhsisnotnumber(assembler);
assembler->Branch(assembler->WordEqual(rhs_map, number_map),
&if_rhsisnumber, &if_rhsisnotnumber);
@@ -1640,7 +3341,7 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
// Check if the {rhs} is a String.
Label if_rhsisstring(assembler, Label::kDeferred),
- if_rhsisnotstring(assembler, Label::kDeferred);
+ if_rhsisnotstring(assembler);
assembler->Branch(assembler->Int32LessThan(
rhs_instance_type, assembler->Int32Constant(
FIRST_NONSTRING_TYPE)),
@@ -1648,19 +3349,17 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
assembler->Bind(&if_rhsisstring);
{
- // Convert the {rhs} to a Number.
- Callable callable =
- CodeFactory::StringToNumber(assembler->isolate());
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
+ // The {rhs} is a String and the {lhs} is a Smi; we need
+ // to convert the {rhs} to a Number and compare the output to
+ // the Number on the {lhs}.
+ assembler->Goto(&do_rhsstringtonumber);
}
assembler->Bind(&if_rhsisnotstring);
{
// Check if the {rhs} is a Boolean.
Node* boolean_map = assembler->BooleanMapConstant();
- Label if_rhsisboolean(assembler, Label::kDeferred),
- if_rhsisnotboolean(assembler, Label::kDeferred);
+ Label if_rhsisboolean(assembler), if_rhsisnotboolean(assembler);
assembler->Branch(assembler->WordEqual(rhs_map, boolean_map),
&if_rhsisboolean, &if_rhsisnotboolean);
@@ -1677,7 +3376,7 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
// Check if the {rhs} is a Receiver.
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
Label if_rhsisreceiver(assembler, Label::kDeferred),
- if_rhsisnotreceiver(assembler, Label::kDeferred);
+ if_rhsisnotreceiver(assembler);
assembler->Branch(
assembler->Int32LessThanOrEqual(
assembler->Int32Constant(FIRST_JS_RECEIVER_TYPE),
@@ -1687,9 +3386,9 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
assembler->Bind(&if_rhsisreceiver);
{
// Convert {rhs} to a primitive first (passing no hint).
- // TODO(bmeurer): Hook up ToPrimitiveStub here once it exists.
- var_rhs.Bind(assembler->CallRuntime(Runtime::kToPrimitive,
- context, rhs));
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(
+ assembler->isolate());
+ var_rhs.Bind(assembler->CallStub(callable, context, rhs));
assembler->Goto(&loop);
}
@@ -1761,8 +3460,8 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
assembler->Bind(&if_lhsisstring);
{
// Check if {rhs} is also a String.
- Label if_rhsisstring(assembler),
- if_rhsisnotstring(assembler, Label::kDeferred);
+ Label if_rhsisstring(assembler, Label::kDeferred),
+ if_rhsisnotstring(assembler);
assembler->Branch(assembler->Int32LessThan(
rhs_instance_type, assembler->Int32Constant(
FIRST_NONSTRING_TYPE)),
@@ -1776,7 +3475,8 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
(mode == kDontNegateResult)
? CodeFactory::StringEqual(assembler->isolate())
: CodeFactory::StringNotEqual(assembler->isolate());
- assembler->TailCallStub(callable, context, lhs, rhs);
+ result.Bind(assembler->CallStub(callable, context, lhs, rhs));
+ assembler->Goto(&end);
}
assembler->Bind(&if_rhsisnotstring);
@@ -1794,8 +3494,7 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
assembler->Bind(&if_lhsisnumber);
{
// Check if {rhs} is also a HeapNumber.
- Label if_rhsisnumber(assembler),
- if_rhsisnotnumber(assembler, Label::kDeferred);
+ Label if_rhsisnumber(assembler), if_rhsisnotnumber(assembler);
assembler->Branch(
assembler->Word32Equal(lhs_instance_type, rhs_instance_type),
&if_rhsisnumber, &if_rhsisnotnumber);
@@ -1825,16 +3524,13 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
// The {rhs} is a String and the {lhs} is a HeapNumber; we need
// to convert the {rhs} to a Number and compare the output to
// the Number on the {lhs}.
- Callable callable =
- CodeFactory::StringToNumber(assembler->isolate());
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
+ assembler->Goto(&do_rhsstringtonumber);
}
assembler->Bind(&if_rhsisnotstring);
{
// Check if the {rhs} is a JSReceiver.
- Label if_rhsisreceiver(assembler, Label::kDeferred),
+ Label if_rhsisreceiver(assembler),
if_rhsisnotreceiver(assembler);
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
assembler->Branch(
@@ -1926,8 +3622,7 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
assembler->Bind(&if_lhsissymbol);
{
// Check if the {rhs} is a JSReceiver.
- Label if_rhsisreceiver(assembler, Label::kDeferred),
- if_rhsisnotreceiver(assembler);
+ Label if_rhsisreceiver(assembler), if_rhsisnotreceiver(assembler);
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
assembler->Branch(
assembler->Int32LessThanOrEqual(
@@ -1974,8 +3669,7 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
assembler->Bind(&if_rhsisnotsimd128value);
{
// Check if the {rhs} is a JSReceiver.
- Label if_rhsisreceiver(assembler, Label::kDeferred),
- if_rhsisnotreceiver(assembler);
+ Label if_rhsisreceiver(assembler), if_rhsisnotreceiver(assembler);
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
assembler->Branch(
assembler->Int32LessThanOrEqual(
@@ -2050,9 +3744,9 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
{
// The {rhs} is some Primitive different from Null and
// Undefined, need to convert {lhs} to Primitive first.
- // TODO(bmeurer): Hook up ToPrimitiveStub here once it exists.
- var_lhs.Bind(assembler->CallRuntime(Runtime::kToPrimitive,
- context, lhs));
+ Callable callable =
+ CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
+ var_lhs.Bind(assembler->CallStub(callable, context, lhs));
assembler->Goto(&loop);
}
}
@@ -2060,6 +3754,13 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
}
}
}
+
+ assembler->Bind(&do_rhsstringtonumber);
+ {
+ Callable callable = CodeFactory::StringToNumber(assembler->isolate());
+ var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+ assembler->Goto(&loop);
+ }
}
assembler->Bind(&do_fcmp);
@@ -2073,14 +3774,25 @@ void GenerateEqual(compiler::CodeStubAssembler* assembler, ResultMode mode) {
}
assembler->Bind(&if_equal);
- assembler->Return(assembler->BooleanConstant(mode == kDontNegateResult));
+ {
+ result.Bind(assembler->BooleanConstant(mode == kDontNegateResult));
+ assembler->Goto(&end);
+ }
assembler->Bind(&if_notequal);
- assembler->Return(assembler->BooleanConstant(mode == kNegateResult));
+ {
+ result.Bind(assembler->BooleanConstant(mode == kNegateResult));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ return result.value();
}
-void GenerateStrictEqual(compiler::CodeStubAssembler* assembler,
- ResultMode mode) {
+compiler::Node* GenerateStrictEqual(CodeStubAssembler* assembler,
+ ResultMode mode, compiler::Node* lhs,
+ compiler::Node* rhs,
+ compiler::Node* context) {
// Here's pseudo-code for the algorithm below in case of kDontNegateResult
// mode; for kNegateResult mode we properly negate the result.
//
@@ -2129,14 +3841,12 @@ void GenerateStrictEqual(compiler::CodeStubAssembler* assembler,
// }
// }
- typedef compiler::CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
typedef compiler::Node Node;
- Node* lhs = assembler->Parameter(0);
- Node* rhs = assembler->Parameter(1);
- Node* context = assembler->Parameter(2);
-
- Label if_equal(assembler), if_notequal(assembler);
+ Label if_equal(assembler), if_notequal(assembler), end(assembler);
+ Variable result(assembler, MachineRepresentation::kTagged);
// Check if {lhs} and {rhs} refer to the same object.
Label if_same(assembler), if_notsame(assembler);
@@ -2241,7 +3951,8 @@ void GenerateStrictEqual(compiler::CodeStubAssembler* assembler,
Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
// Check if {rhs} is also a String.
- Label if_rhsisstring(assembler), if_rhsisnotstring(assembler);
+ Label if_rhsisstring(assembler, Label::kDeferred),
+ if_rhsisnotstring(assembler);
assembler->Branch(assembler->Int32LessThan(
rhs_instance_type, assembler->Int32Constant(
FIRST_NONSTRING_TYPE)),
@@ -2253,7 +3964,8 @@ void GenerateStrictEqual(compiler::CodeStubAssembler* assembler,
(mode == kDontNegateResult)
? CodeFactory::StringEqual(assembler->isolate())
: CodeFactory::StringNotEqual(assembler->isolate());
- assembler->TailCallStub(callable, context, lhs, rhs);
+ result.Bind(assembler->CallStub(callable, context, lhs, rhs));
+ assembler->Goto(&end);
}
assembler->Bind(&if_rhsisnotstring);
@@ -2330,17 +4042,26 @@ void GenerateStrictEqual(compiler::CodeStubAssembler* assembler,
}
assembler->Bind(&if_equal);
- assembler->Return(assembler->BooleanConstant(mode == kDontNegateResult));
+ {
+ result.Bind(assembler->BooleanConstant(mode == kDontNegateResult));
+ assembler->Goto(&end);
+ }
assembler->Bind(&if_notequal);
- assembler->Return(assembler->BooleanConstant(mode == kNegateResult));
+ {
+ result.Bind(assembler->BooleanConstant(mode == kNegateResult));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ return result.value();
}
-void GenerateStringRelationalComparison(compiler::CodeStubAssembler* assembler,
+void GenerateStringRelationalComparison(CodeStubAssembler* assembler,
RelationalComparisonMode mode) {
- typedef compiler::CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
- typedef compiler::CodeStubAssembler::Variable Variable;
+ typedef CodeStubAssembler::Variable Variable;
Node* lhs = assembler->Parameter(0);
Node* rhs = assembler->Parameter(1);
@@ -2386,8 +4107,8 @@ void GenerateStringRelationalComparison(compiler::CodeStubAssembler* assembler,
assembler->Bind(&if_bothonebyteseqstrings);
{
// Load the length of {lhs} and {rhs}.
- Node* lhs_length = assembler->LoadObjectField(lhs, String::kLengthOffset);
- Node* rhs_length = assembler->LoadObjectField(rhs, String::kLengthOffset);
+ Node* lhs_length = assembler->LoadStringLength(lhs);
+ Node* rhs_length = assembler->LoadStringLength(rhs);
// Determine the minimum length.
Node* length = assembler->SmiMin(lhs_length, rhs_length);
@@ -2519,8 +4240,7 @@ void GenerateStringRelationalComparison(compiler::CodeStubAssembler* assembler,
}
}
-void GenerateStringEqual(compiler::CodeStubAssembler* assembler,
- ResultMode mode) {
+void GenerateStringEqual(CodeStubAssembler* assembler, ResultMode mode) {
// Here's pseudo-code for the algorithm below in case of kDontNegateResult
// mode; for kNegateResult mode we properly negate the result.
//
@@ -2537,9 +4257,9 @@ void GenerateStringEqual(compiler::CodeStubAssembler* assembler,
// }
// return %StringEqual(lhs, rhs);
- typedef compiler::CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
- typedef compiler::CodeStubAssembler::Variable Variable;
+ typedef CodeStubAssembler::Variable Variable;
Node* lhs = assembler->Parameter(0);
Node* rhs = assembler->Parameter(1);
@@ -2559,8 +4279,8 @@ void GenerateStringEqual(compiler::CodeStubAssembler* assembler,
// The {lhs} and {rhs} don't refer to the exact same String object.
// Load the length of {lhs} and {rhs}.
- Node* lhs_length = assembler->LoadObjectField(lhs, String::kLengthOffset);
- Node* rhs_length = assembler->LoadObjectField(rhs, String::kLengthOffset);
+ Node* lhs_length = assembler->LoadStringLength(lhs);
+ Node* rhs_length = assembler->LoadStringLength(rhs);
// Check if the lengths of {lhs} and {rhs} are equal.
Label if_lengthisequal(assembler), if_lengthisnotequal(assembler);
@@ -2698,80 +4418,118 @@ void GenerateStringEqual(compiler::CodeStubAssembler* assembler,
} // namespace
-void LessThanStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
- GenerateAbstractRelationalComparison(assembler, kLessThan);
+void LoadApiGetterStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+ Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ // For now we only support receiver_is_holder.
+ DCHECK(receiver_is_holder());
+ Node* holder = receiver;
+ Node* map = assembler->LoadMap(receiver);
+ Node* descriptors = assembler->LoadMapDescriptors(map);
+ Node* offset =
+ assembler->Int32Constant(DescriptorArray::ToValueIndex(index()));
+ Node* callback = assembler->LoadFixedArrayElement(descriptors, offset);
+ assembler->TailCallStub(CodeFactory::ApiGetter(isolate()), context, receiver,
+ holder, callback);
}
-void LessThanOrEqualStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
- GenerateAbstractRelationalComparison(assembler, kLessThanOrEqual);
+// static
+compiler::Node* LessThanStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* lhs, compiler::Node* rhs,
+ compiler::Node* context) {
+ return GenerateAbstractRelationalComparison(assembler, kLessThan, lhs, rhs,
+ context);
}
-void GreaterThanStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
- GenerateAbstractRelationalComparison(assembler, kGreaterThan);
+// static
+compiler::Node* LessThanOrEqualStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* lhs,
+ compiler::Node* rhs,
+ compiler::Node* context) {
+ return GenerateAbstractRelationalComparison(assembler, kLessThanOrEqual, lhs,
+ rhs, context);
}
-void GreaterThanOrEqualStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
- GenerateAbstractRelationalComparison(assembler, kGreaterThanOrEqual);
+// static
+compiler::Node* GreaterThanStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* lhs,
+ compiler::Node* rhs,
+ compiler::Node* context) {
+ return GenerateAbstractRelationalComparison(assembler, kGreaterThan, lhs, rhs,
+ context);
}
-void EqualStub::GenerateAssembly(compiler::CodeStubAssembler* assembler) const {
- GenerateEqual(assembler, kDontNegateResult);
+// static
+compiler::Node* GreaterThanOrEqualStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* lhs,
+ compiler::Node* rhs,
+ compiler::Node* context) {
+ return GenerateAbstractRelationalComparison(assembler, kGreaterThanOrEqual,
+ lhs, rhs, context);
}
-void NotEqualStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
- GenerateEqual(assembler, kNegateResult);
+// static
+compiler::Node* EqualStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* lhs, compiler::Node* rhs,
+ compiler::Node* context) {
+ return GenerateEqual(assembler, kDontNegateResult, lhs, rhs, context);
+}
+
+// static
+compiler::Node* NotEqualStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* lhs, compiler::Node* rhs,
+ compiler::Node* context) {
+ return GenerateEqual(assembler, kNegateResult, lhs, rhs, context);
}
-void StrictEqualStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
- GenerateStrictEqual(assembler, kDontNegateResult);
+// static
+compiler::Node* StrictEqualStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* lhs,
+ compiler::Node* rhs,
+ compiler::Node* context) {
+ return GenerateStrictEqual(assembler, kDontNegateResult, lhs, rhs, context);
}
-void StrictNotEqualStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
- GenerateStrictEqual(assembler, kNegateResult);
+// static
+compiler::Node* StrictNotEqualStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* lhs,
+ compiler::Node* rhs,
+ compiler::Node* context) {
+ return GenerateStrictEqual(assembler, kNegateResult, lhs, rhs, context);
}
-void StringEqualStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
+void StringEqualStub::GenerateAssembly(CodeStubAssembler* assembler) const {
GenerateStringEqual(assembler, kDontNegateResult);
}
-void StringNotEqualStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
+void StringNotEqualStub::GenerateAssembly(CodeStubAssembler* assembler) const {
GenerateStringEqual(assembler, kNegateResult);
}
-void StringLessThanStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
+void StringLessThanStub::GenerateAssembly(CodeStubAssembler* assembler) const {
GenerateStringRelationalComparison(assembler, kLessThan);
}
void StringLessThanOrEqualStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
+ CodeStubAssembler* assembler) const {
GenerateStringRelationalComparison(assembler, kLessThanOrEqual);
}
void StringGreaterThanStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
+ CodeStubAssembler* assembler) const {
GenerateStringRelationalComparison(assembler, kGreaterThan);
}
void StringGreaterThanOrEqualStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
+ CodeStubAssembler* assembler) const {
GenerateStringRelationalComparison(assembler, kGreaterThanOrEqual);
}
-void ToLengthStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
- typedef compiler::CodeStubAssembler::Label Label;
+void ToLengthStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+ typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
- typedef compiler::CodeStubAssembler::Variable Variable;
+ typedef CodeStubAssembler::Variable Variable;
Node* context = assembler->Parameter(1);
@@ -2844,150 +4602,10 @@ void ToLengthStub::GenerateAssembly(
}
}
-void ToBooleanStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
+void ToIntegerStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+ typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
- typedef compiler::CodeStubAssembler::Label Label;
-
- Node* value = assembler->Parameter(0);
- Label if_valueissmi(assembler), if_valueisnotsmi(assembler);
-
- // Check if {value} is a Smi or a HeapObject.
- assembler->Branch(assembler->WordIsSmi(value), &if_valueissmi,
- &if_valueisnotsmi);
-
- assembler->Bind(&if_valueissmi);
- {
- // The {value} is a Smi, only need to check against zero.
- Label if_valueiszero(assembler), if_valueisnotzero(assembler);
- assembler->Branch(assembler->SmiEqual(value, assembler->SmiConstant(0)),
- &if_valueiszero, &if_valueisnotzero);
-
- assembler->Bind(&if_valueiszero);
- assembler->Return(assembler->BooleanConstant(false));
-
- assembler->Bind(&if_valueisnotzero);
- assembler->Return(assembler->BooleanConstant(true));
- }
-
- assembler->Bind(&if_valueisnotsmi);
- {
- Label if_valueisstring(assembler), if_valueisheapnumber(assembler),
- if_valueisoddball(assembler), if_valueisother(assembler);
-
- // The {value} is a HeapObject, load its map.
- Node* value_map = assembler->LoadMap(value);
-
- // Load the {value}s instance type.
- Node* value_instance_type = assembler->Load(
- MachineType::Uint8(), value_map,
- assembler->IntPtrConstant(Map::kInstanceTypeOffset - kHeapObjectTag));
-
- // Dispatch based on the instance type; we distinguish all String instance
- // types, the HeapNumber type and the Oddball type.
- size_t const kNumCases = FIRST_NONSTRING_TYPE + 2;
- Label* case_labels[kNumCases];
- int32_t case_values[kNumCases];
- for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
- case_labels[i] = new Label(assembler);
- case_values[i] = i;
- }
- case_labels[FIRST_NONSTRING_TYPE + 0] = &if_valueisheapnumber;
- case_values[FIRST_NONSTRING_TYPE + 0] = HEAP_NUMBER_TYPE;
- case_labels[FIRST_NONSTRING_TYPE + 1] = &if_valueisoddball;
- case_values[FIRST_NONSTRING_TYPE + 1] = ODDBALL_TYPE;
- assembler->Switch(value_instance_type, &if_valueisother, case_values,
- case_labels, arraysize(case_values));
- for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
- assembler->Bind(case_labels[i]);
- assembler->Goto(&if_valueisstring);
- delete case_labels[i];
- }
-
- assembler->Bind(&if_valueisstring);
- {
- // Load the string length field of the {value}.
- Node* value_length =
- assembler->LoadObjectField(value, String::kLengthOffset);
-
- // Check if the {value} is the empty string.
- Label if_valueisempty(assembler), if_valueisnotempty(assembler);
- assembler->Branch(
- assembler->SmiEqual(value_length, assembler->SmiConstant(0)),
- &if_valueisempty, &if_valueisnotempty);
-
- assembler->Bind(&if_valueisempty);
- assembler->Return(assembler->BooleanConstant(false));
-
- assembler->Bind(&if_valueisnotempty);
- assembler->Return(assembler->BooleanConstant(true));
- }
-
- assembler->Bind(&if_valueisheapnumber);
- {
- Node* value_value = assembler->Load(
- MachineType::Float64(), value,
- assembler->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag));
-
- Label if_valueispositive(assembler), if_valueisnotpositive(assembler),
- if_valueisnegative(assembler), if_valueisnanorzero(assembler);
- assembler->Branch(assembler->Float64LessThan(
- assembler->Float64Constant(0.0), value_value),
- &if_valueispositive, &if_valueisnotpositive);
-
- assembler->Bind(&if_valueispositive);
- assembler->Return(assembler->BooleanConstant(true));
-
- assembler->Bind(&if_valueisnotpositive);
- assembler->Branch(assembler->Float64LessThan(
- value_value, assembler->Float64Constant(0.0)),
- &if_valueisnegative, &if_valueisnanorzero);
-
- assembler->Bind(&if_valueisnegative);
- assembler->Return(assembler->BooleanConstant(true));
-
- assembler->Bind(&if_valueisnanorzero);
- assembler->Return(assembler->BooleanConstant(false));
- }
-
- assembler->Bind(&if_valueisoddball);
- {
- // The {value} is an Oddball, and every Oddball knows its boolean value.
- Node* value_toboolean =
- assembler->LoadObjectField(value, Oddball::kToBooleanOffset);
- assembler->Return(value_toboolean);
- }
-
- assembler->Bind(&if_valueisother);
- {
- Node* value_map_bitfield = assembler->Load(
- MachineType::Uint8(), value_map,
- assembler->IntPtrConstant(Map::kBitFieldOffset - kHeapObjectTag));
- Node* value_map_undetectable = assembler->Word32And(
- value_map_bitfield,
- assembler->Int32Constant(1 << Map::kIsUndetectable));
-
- // Check if the {value} is undetectable.
- Label if_valueisundetectable(assembler),
- if_valueisnotundetectable(assembler);
- assembler->Branch(assembler->Word32Equal(value_map_undetectable,
- assembler->Int32Constant(0)),
- &if_valueisnotundetectable, &if_valueisundetectable);
-
- assembler->Bind(&if_valueisundetectable);
- assembler->Return(assembler->BooleanConstant(false));
-
- assembler->Bind(&if_valueisnotundetectable);
- assembler->Return(assembler->BooleanConstant(true));
- }
- }
-}
-
-void ToIntegerStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
- typedef compiler::CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef compiler::CodeStubAssembler::Variable Variable;
+ typedef CodeStubAssembler::Variable Variable;
Node* context = assembler->Parameter(1);
@@ -3046,25 +4664,27 @@ void ToIntegerStub::GenerateAssembly(
}
void StoreInterceptorStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
+ CodeStubAssembler* assembler) const {
typedef compiler::Node Node;
- Node* receiver = assembler->Parameter(0);
- Node* name = assembler->Parameter(1);
- Node* value = assembler->Parameter(2);
- Node* context = assembler->Parameter(3);
+
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* name = assembler->Parameter(Descriptor::kName);
+ Node* value = assembler->Parameter(Descriptor::kValue);
+ Node* context = assembler->Parameter(Descriptor::kContext);
assembler->TailCallRuntime(Runtime::kStorePropertyWithInterceptor, context,
receiver, name, value);
}
void LoadIndexedInterceptorStub::GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const {
+ CodeStubAssembler* assembler) const {
typedef compiler::Node Node;
- typedef compiler::CodeStubAssembler::Label Label;
- Node* receiver = assembler->Parameter(0);
- Node* key = assembler->Parameter(1);
- Node* slot = assembler->Parameter(2);
- Node* vector = assembler->Parameter(3);
- Node* context = assembler->Parameter(4);
+ typedef CodeStubAssembler::Label Label;
+
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* key = assembler->Parameter(Descriptor::kName);
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* vector = assembler->Parameter(Descriptor::kVector);
+ Node* context = assembler->Parameter(Descriptor::kContext);
Label if_keyispositivesmi(assembler), if_keyisinvalid(assembler);
assembler->Branch(assembler->WordIsPositiveSmi(key), &if_keyispositivesmi,
@@ -3078,6 +4698,133 @@ void LoadIndexedInterceptorStub::GenerateAssembly(
slot, vector);
}
+// static
+bool FastCloneShallowObjectStub::IsSupported(ObjectLiteral* expr) {
+ // FastCloneShallowObjectStub doesn't copy elements, and object literals don't
+ // support copy-on-write (COW) elements for now.
+ // TODO(mvstanton): make object literals support COW elements.
+ return expr->fast_elements() && expr->has_shallow_properties() &&
+ expr->properties_count() <= kMaximumClonedProperties;
+}
+
+// static
+int FastCloneShallowObjectStub::PropertiesCount(int literal_length) {
+ // This heuristic of setting empty literals to have
+ // kInitialGlobalObjectUnusedPropertiesCount must remain in-sync with the
+ // runtime.
+ // TODO(verwaest): Unify this with the heuristic in the runtime.
+ return literal_length == 0
+ ? JSObject::kInitialGlobalObjectUnusedPropertiesCount
+ : literal_length;
+}
+
+// static
+compiler::Node* FastCloneShallowObjectStub::GenerateFastPath(
+ CodeStubAssembler* assembler, compiler::CodeAssembler::Label* call_runtime,
+ compiler::Node* closure, compiler::Node* literals_index,
+ compiler::Node* properties_count) {
+ typedef compiler::Node Node;
+ typedef compiler::CodeAssembler::Label Label;
+ typedef compiler::CodeAssembler::Variable Variable;
+
+ Node* undefined = assembler->UndefinedConstant();
+ Node* literals_array =
+ assembler->LoadObjectField(closure, JSFunction::kLiteralsOffset);
+ Node* allocation_site = assembler->LoadFixedArrayElement(
+ literals_array, literals_index,
+ LiteralsArray::kFirstLiteralIndex * kPointerSize,
+ CodeStubAssembler::SMI_PARAMETERS);
+ assembler->GotoIf(assembler->WordEqual(allocation_site, undefined),
+ call_runtime);
+
+ // Calculate the object and allocation size based on the properties count.
+ Node* object_size = assembler->IntPtrAdd(
+ assembler->WordShl(properties_count, kPointerSizeLog2),
+ assembler->IntPtrConstant(JSObject::kHeaderSize));
+ Node* allocation_size = object_size;
+ if (FLAG_allocation_site_pretenuring) {
+ allocation_size = assembler->IntPtrAdd(
+ object_size, assembler->IntPtrConstant(AllocationMemento::kSize));
+ }
+ Node* boilerplate = assembler->LoadObjectField(
+ allocation_site, AllocationSite::kTransitionInfoOffset);
+ Node* boilerplate_map = assembler->LoadMap(boilerplate);
+ Node* instance_size = assembler->LoadMapInstanceSize(boilerplate_map);
+ Node* size_in_words = assembler->WordShr(object_size, kPointerSizeLog2);
+ assembler->GotoUnless(assembler->Word32Equal(instance_size, size_in_words),
+ call_runtime);
+
+ Node* copy = assembler->Allocate(allocation_size);
+
+ // Copy boilerplate elements.
+ Variable offset(assembler, MachineType::PointerRepresentation());
+ offset.Bind(assembler->IntPtrConstant(-kHeapObjectTag));
+ Node* end_offset = assembler->IntPtrAdd(object_size, offset.value());
+ Label loop_body(assembler, &offset), loop_check(assembler, &offset);
+ // We should always have an object size greater than zero.
+ assembler->Goto(&loop_body);
+ assembler->Bind(&loop_body);
+ {
+ // The Allocate above guarantees that the copy lies in new space. This
+ // allows us to skip write barriers. This is necessary since we may also be
+ // copying unboxed doubles.
+ Node* field =
+ assembler->Load(MachineType::IntPtr(), boilerplate, offset.value());
+ assembler->StoreNoWriteBarrier(MachineType::PointerRepresentation(), copy,
+ offset.value(), field);
+ assembler->Goto(&loop_check);
+ }
+ assembler->Bind(&loop_check);
+ {
+ offset.Bind(assembler->IntPtrAdd(offset.value(),
+ assembler->IntPtrConstant(kPointerSize)));
+ assembler->GotoUnless(
+ assembler->IntPtrGreaterThanOrEqual(offset.value(), end_offset),
+ &loop_body);
+ }
+
+ if (FLAG_allocation_site_pretenuring) {
+ Node* memento = assembler->InnerAllocate(copy, object_size);
+ assembler->StoreObjectFieldNoWriteBarrier(
+ memento, HeapObject::kMapOffset,
+ assembler->LoadRoot(Heap::kAllocationMementoMapRootIndex));
+ assembler->StoreObjectFieldNoWriteBarrier(
+ memento, AllocationMemento::kAllocationSiteOffset, allocation_site);
+ Node* memento_create_count = assembler->LoadObjectField(
+ allocation_site, AllocationSite::kPretenureCreateCountOffset);
+ memento_create_count = assembler->SmiAdd(
+ memento_create_count, assembler->SmiConstant(Smi::FromInt(1)));
+ assembler->StoreObjectFieldNoWriteBarrier(
+ allocation_site, AllocationSite::kPretenureCreateCountOffset,
+ memento_create_count);
+ }
+
+ // TODO(verwaest): Allocate and fill in double boxes.
+ return copy;
+}
+
+void FastCloneShallowObjectStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ Label call_runtime(assembler);
+ Node* closure = assembler->Parameter(0);
+ Node* literals_index = assembler->Parameter(1);
+
+ Node* properties_count =
+ assembler->IntPtrConstant(PropertiesCount(this->length()));
+ Node* copy = GenerateFastPath(assembler, &call_runtime, closure,
+ literals_index, properties_count);
+ assembler->Return(copy);
+
+ assembler->Bind(&call_runtime);
+ Node* constant_properties = assembler->Parameter(2);
+ Node* flags = assembler->Parameter(3);
+ Node* context = assembler->Parameter(4);
+ assembler->TailCallRuntime(Runtime::kCreateObjectLiteral, context, closure,
+ literals_index, constant_properties, flags);
+}
+
template<class StateType>
void HydrogenCodeStub::TraceTransition(StateType from, StateType to) {
// Note: Although a no-op transition is semantically OK, it is hinting at a
@@ -3156,7 +4903,7 @@ CallInterfaceDescriptor HandlerStub::GetCallInterfaceDescriptor() const {
return LoadWithVectorDescriptor(isolate());
} else {
DCHECK(kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC);
- return VectorStoreICDescriptor(isolate());
+ return StoreWithVectorDescriptor(isolate());
}
}
@@ -3174,45 +4921,15 @@ void ElementsTransitionAndStoreStub::InitializeDescriptor(
FUNCTION_ADDR(Runtime_ElementsTransitionAndStoreIC_Miss));
}
-
-void ToObjectStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- descriptor->Initialize(Runtime::FunctionForId(Runtime::kToObject)->entry);
-}
-
-
-CallInterfaceDescriptor StoreTransitionStub::GetCallInterfaceDescriptor()
- const {
- return VectorStoreTransitionDescriptor(isolate());
-}
-
-
-CallInterfaceDescriptor
-ElementsTransitionAndStoreStub::GetCallInterfaceDescriptor() const {
- return VectorStoreTransitionDescriptor(isolate());
-}
-
-
-void FastNewClosureStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- descriptor->Initialize(Runtime::FunctionForId(Runtime::kNewClosure)->entry);
+void StoreTransitionStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+ descriptor->Initialize(
+ FUNCTION_ADDR(Runtime_TransitionStoreIC_MissFromStubFailure));
}
-
-void FastNewContextStub::InitializeDescriptor(CodeStubDescriptor* d) {}
-
-
-void TypeofStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {}
-
-
void NumberToStringStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize(
Runtime::FunctionForId(Runtime::kNumberToString)->entry);
-}
-
-
-void FastCloneRegExpStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- FastCloneRegExpDescriptor call_descriptor(isolate());
- descriptor->Initialize(
- Runtime::FunctionForId(Runtime::kCreateRegExpLiteral)->entry);
+ descriptor->SetMissHandler(Runtime::kNumberToString);
}
@@ -3221,27 +4938,14 @@ void FastCloneShallowArrayStub::InitializeDescriptor(
FastCloneShallowArrayDescriptor call_descriptor(isolate());
descriptor->Initialize(
Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry);
+ descriptor->SetMissHandler(Runtime::kCreateArrayLiteralStubBailout);
}
-
-void FastCloneShallowObjectStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- FastCloneShallowObjectDescriptor call_descriptor(isolate());
- descriptor->Initialize(
- Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry);
-}
-
-
-void CreateAllocationSiteStub::InitializeDescriptor(CodeStubDescriptor* d) {}
-
-
-void CreateWeakCellStub::InitializeDescriptor(CodeStubDescriptor* d) {}
-
-
void RegExpConstructResultStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
descriptor->Initialize(
Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
+ descriptor->SetMissHandler(Runtime::kRegExpConstructResult);
}
@@ -3259,11 +4963,6 @@ void AllocateHeapNumberStub::InitializeDescriptor(
}
-void AllocateMutableHeapNumberStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- descriptor->Initialize();
-}
-
#define SIMD128_INIT_DESC(TYPE, Type, type, lane_count, lane_type) \
void Allocate##Type##Stub::InitializeDescriptor( \
CodeStubDescriptor* descriptor) { \
@@ -3273,22 +4972,15 @@ void AllocateMutableHeapNumberStub::InitializeDescriptor(
SIMD128_TYPES(SIMD128_INIT_DESC)
#undef SIMD128_INIT_DESC
-void AllocateInNewSpaceStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- descriptor->Initialize();
-}
-
void ToBooleanICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize(FUNCTION_ADDR(Runtime_ToBooleanIC_Miss));
- descriptor->SetMissHandler(ExternalReference(
- Runtime::FunctionForId(Runtime::kToBooleanIC_Miss), isolate()));
+ descriptor->SetMissHandler(Runtime::kToBooleanIC_Miss);
}
void BinaryOpICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize(FUNCTION_ADDR(Runtime_BinaryOpIC_Miss));
- descriptor->SetMissHandler(ExternalReference(
- Runtime::FunctionForId(Runtime::kBinaryOpIC_Miss), isolate()));
+ descriptor->SetMissHandler(Runtime::kBinaryOpIC_Miss);
}
@@ -3301,21 +4993,461 @@ void BinaryOpWithAllocationSiteStub::InitializeDescriptor(
void StringAddStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize(Runtime::FunctionForId(Runtime::kStringAdd)->entry);
+ descriptor->SetMissHandler(Runtime::kStringAdd);
}
+namespace {
-void GrowArrayElementsStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- descriptor->Initialize(
- Runtime::FunctionForId(Runtime::kGrowArrayElements)->entry);
+compiler::Node* GenerateHasProperty(
+ CodeStubAssembler* assembler, compiler::Node* object, compiler::Node* key,
+ compiler::Node* context, Runtime::FunctionId fallback_runtime_function_id) {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Label call_runtime(assembler, Label::kDeferred), return_true(assembler),
+ return_false(assembler), end(assembler);
+
+ CodeStubAssembler::LookupInHolder lookup_property_in_holder =
+ [assembler, &return_true](Node* receiver, Node* holder, Node* holder_map,
+ Node* holder_instance_type, Node* unique_name,
+ Label* next_holder, Label* if_bailout) {
+ assembler->TryHasOwnProperty(holder, holder_map, holder_instance_type,
+ unique_name, &return_true, next_holder,
+ if_bailout);
+ };
+
+ CodeStubAssembler::LookupInHolder lookup_element_in_holder =
+ [assembler, &return_true](Node* receiver, Node* holder, Node* holder_map,
+ Node* holder_instance_type, Node* index,
+ Label* next_holder, Label* if_bailout) {
+ assembler->TryLookupElement(holder, holder_map, holder_instance_type,
+ index, &return_true, next_holder,
+ if_bailout);
+ };
+
+ assembler->TryPrototypeChainLookup(object, key, lookup_property_in_holder,
+ lookup_element_in_holder, &return_false,
+ &call_runtime);
+
+ Variable result(assembler, MachineRepresentation::kTagged);
+ assembler->Bind(&return_true);
+ {
+ result.Bind(assembler->BooleanConstant(true));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&return_false);
+ {
+ result.Bind(assembler->BooleanConstant(false));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&call_runtime);
+ {
+ result.Bind(assembler->CallRuntime(fallback_runtime_function_id, context,
+ object, key));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ return result.value();
}
+} // namespace
-void TypeofStub::GenerateAheadOfTime(Isolate* isolate) {
- TypeofStub stub(isolate);
- stub.GetCode();
+// static
+compiler::Node* HasPropertyStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* key,
+ compiler::Node* object,
+ compiler::Node* context) {
+ return GenerateHasProperty(assembler, object, key, context,
+ Runtime::kHasProperty);
+}
+
+// static
+compiler::Node* ForInFilterStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* key,
+ compiler::Node* object,
+ compiler::Node* context) {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Label return_undefined(assembler, Label::kDeferred),
+ return_to_name(assembler), end(assembler);
+
+ Variable var_result(assembler, MachineRepresentation::kTagged);
+
+ Node* has_property = GenerateHasProperty(assembler, object, key, context,
+ Runtime::kForInHasProperty);
+
+ assembler->Branch(
+ assembler->WordEqual(has_property, assembler->BooleanConstant(true)),
+ &return_to_name, &return_undefined);
+
+ assembler->Bind(&return_to_name);
+ {
+ // TODO(cbruni): inline ToName here.
+ Callable callable = CodeFactory::ToName(assembler->isolate());
+ var_result.Bind(assembler->CallStub(callable, context, key));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&return_undefined);
+ {
+ var_result.Bind(assembler->UndefinedConstant());
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ return var_result.value();
+}
+
+void GetPropertyStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Label call_runtime(assembler, Label::kDeferred), return_undefined(assembler),
+ end(assembler);
+
+ Node* object = assembler->Parameter(0);
+ Node* key = assembler->Parameter(1);
+ Node* context = assembler->Parameter(2);
+ Variable var_result(assembler, MachineRepresentation::kTagged);
+
+ CodeStubAssembler::LookupInHolder lookup_property_in_holder =
+ [assembler, context, &var_result, &end](
+ Node* receiver, Node* holder, Node* holder_map,
+ Node* holder_instance_type, Node* unique_name, Label* next_holder,
+ Label* if_bailout) {
+ Variable var_value(assembler, MachineRepresentation::kTagged);
+ Label if_found(assembler);
+ assembler->TryGetOwnProperty(
+ context, receiver, holder, holder_map, holder_instance_type,
+ unique_name, &if_found, &var_value, next_holder, if_bailout);
+ assembler->Bind(&if_found);
+ {
+ var_result.Bind(var_value.value());
+ assembler->Goto(&end);
+ }
+ };
+
+ CodeStubAssembler::LookupInHolder lookup_element_in_holder =
+ [assembler, context, &var_result, &end](
+ Node* receiver, Node* holder, Node* holder_map,
+ Node* holder_instance_type, Node* index, Label* next_holder,
+ Label* if_bailout) {
+ // Not supported yet.
+ assembler->Use(next_holder);
+ assembler->Goto(if_bailout);
+ };
+
+ assembler->TryPrototypeChainLookup(object, key, lookup_property_in_holder,
+ lookup_element_in_holder,
+ &return_undefined, &call_runtime);
+
+ assembler->Bind(&return_undefined);
+ {
+ var_result.Bind(assembler->UndefinedConstant());
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&call_runtime);
+ {
+ var_result.Bind(
+ assembler->CallRuntime(Runtime::kGetProperty, context, object, key));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ assembler->Return(var_result.value());
+}
+
+// static
+compiler::Node* FastNewClosureStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* shared_info,
+ compiler::Node* context) {
+ typedef compiler::Node Node;
+ typedef compiler::CodeAssembler::Label Label;
+ typedef compiler::CodeAssembler::Variable Variable;
+
+ Isolate* isolate = assembler->isolate();
+ Factory* factory = assembler->isolate()->factory();
+ assembler->IncrementCounter(isolate->counters()->fast_new_closure_total(), 1);
+
+ // Create a new closure from the given function info in new space
+ Node* result = assembler->Allocate(JSFunction::kSize);
+
+ // Calculate the index of the map we should install on the function based on
+ // the FunctionKind and LanguageMode of the function.
+ // Note: Must be kept in sync with Context::FunctionMapIndex
+ Node* compiler_hints = assembler->LoadObjectField(
+ shared_info, SharedFunctionInfo::kCompilerHintsOffset,
+ MachineType::Uint32());
+ Node* is_strict = assembler->Word32And(
+ compiler_hints,
+ assembler->Int32Constant(1 << SharedFunctionInfo::kStrictModeBit));
+
+ Label if_normal(assembler), if_generator(assembler), if_async(assembler),
+ if_class_constructor(assembler), if_function_without_prototype(assembler),
+ load_map(assembler);
+ Variable map_index(assembler, MachineRepresentation::kTagged);
+
+ Node* is_not_normal = assembler->Word32And(
+ compiler_hints,
+ assembler->Int32Constant(SharedFunctionInfo::kFunctionKindMaskBits));
+ assembler->GotoUnless(is_not_normal, &if_normal);
+
+ Node* is_generator = assembler->Word32And(
+ compiler_hints,
+ assembler->Int32Constant(1 << SharedFunctionInfo::kIsGeneratorBit));
+ assembler->GotoIf(is_generator, &if_generator);
+
+ Node* is_async = assembler->Word32And(
+ compiler_hints,
+ assembler->Int32Constant(1 << SharedFunctionInfo::kIsAsyncFunctionBit));
+ assembler->GotoIf(is_async, &if_async);
+
+ Node* is_class_constructor = assembler->Word32And(
+ compiler_hints,
+ assembler->Int32Constant(SharedFunctionInfo::kClassConstructorBits));
+ assembler->GotoIf(is_class_constructor, &if_class_constructor);
+
+ if (FLAG_debug_code) {
+ // Function must be a function without a prototype.
+ assembler->Assert(assembler->Word32And(
+ compiler_hints, assembler->Int32Constant(
+ SharedFunctionInfo::kAccessorFunctionBits |
+ (1 << SharedFunctionInfo::kIsArrowBit) |
+ (1 << SharedFunctionInfo::kIsConciseMethodBit))));
+ }
+ assembler->Goto(&if_function_without_prototype);
+
+ assembler->Bind(&if_normal);
+ {
+ map_index.Bind(assembler->Select(
+ is_strict, assembler->Int32Constant(Context::STRICT_FUNCTION_MAP_INDEX),
+ assembler->Int32Constant(Context::SLOPPY_FUNCTION_MAP_INDEX)));
+ assembler->Goto(&load_map);
+ }
+
+ assembler->Bind(&if_generator);
+ {
+ map_index.Bind(assembler->Select(
+ is_strict,
+ assembler->Int32Constant(Context::STRICT_GENERATOR_FUNCTION_MAP_INDEX),
+ assembler->Int32Constant(
+ Context::SLOPPY_GENERATOR_FUNCTION_MAP_INDEX)));
+ assembler->Goto(&load_map);
+ }
+
+ assembler->Bind(&if_async);
+ {
+ map_index.Bind(assembler->Select(
+ is_strict,
+ assembler->Int32Constant(Context::STRICT_ASYNC_FUNCTION_MAP_INDEX),
+ assembler->Int32Constant(Context::SLOPPY_ASYNC_FUNCTION_MAP_INDEX)));
+ assembler->Goto(&load_map);
+ }
+
+ assembler->Bind(&if_class_constructor);
+ {
+ map_index.Bind(
+ assembler->Int32Constant(Context::STRICT_FUNCTION_MAP_INDEX));
+ assembler->Goto(&load_map);
+ }
+
+ assembler->Bind(&if_function_without_prototype);
+ {
+ map_index.Bind(assembler->Int32Constant(
+ Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
+ assembler->Goto(&load_map);
+ }
+
+ assembler->Bind(&load_map);
+
+ // Get the function map in the current native context and set that
+ // as the map of the allocated object.
+ Node* native_context = assembler->LoadNativeContext(context);
+ Node* map_slot_value =
+ assembler->LoadFixedArrayElement(native_context, map_index.value());
+ assembler->StoreMapNoWriteBarrier(result, map_slot_value);
+
+ // Initialize the rest of the function.
+ Node* empty_fixed_array =
+ assembler->HeapConstant(factory->empty_fixed_array());
+ Node* empty_literals_array =
+ assembler->HeapConstant(factory->empty_literals_array());
+ assembler->StoreObjectFieldNoWriteBarrier(result, JSObject::kPropertiesOffset,
+ empty_fixed_array);
+ assembler->StoreObjectFieldNoWriteBarrier(result, JSObject::kElementsOffset,
+ empty_fixed_array);
+ assembler->StoreObjectFieldNoWriteBarrier(result, JSFunction::kLiteralsOffset,
+ empty_literals_array);
+ assembler->StoreObjectFieldNoWriteBarrier(
+ result, JSFunction::kPrototypeOrInitialMapOffset,
+ assembler->TheHoleConstant());
+ assembler->StoreObjectFieldNoWriteBarrier(
+ result, JSFunction::kSharedFunctionInfoOffset, shared_info);
+ assembler->StoreObjectFieldNoWriteBarrier(result, JSFunction::kContextOffset,
+ context);
+ Handle<Code> lazy_builtin_handle(
+ assembler->isolate()->builtins()->builtin(Builtins::kCompileLazy));
+ Node* lazy_builtin = assembler->HeapConstant(lazy_builtin_handle);
+ Node* lazy_builtin_entry = assembler->IntPtrAdd(
+ lazy_builtin,
+ assembler->IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
+ assembler->StoreObjectFieldNoWriteBarrier(
+ result, JSFunction::kCodeEntryOffset, lazy_builtin_entry);
+ assembler->StoreObjectFieldNoWriteBarrier(result,
+ JSFunction::kNextFunctionLinkOffset,
+ assembler->UndefinedConstant());
+
+ return result;
}
+void FastNewClosureStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+ assembler->Return(
+ Generate(assembler, assembler->Parameter(0), assembler->Parameter(1)));
+}
+
+// static
+compiler::Node* FastNewFunctionContextStub::Generate(
+ CodeStubAssembler* assembler, compiler::Node* function,
+ compiler::Node* slots, compiler::Node* context) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Node* min_context_slots =
+ assembler->Int32Constant(Context::MIN_CONTEXT_SLOTS);
+ Node* length = assembler->Int32Add(slots, min_context_slots);
+ Node* size = assembler->Int32Add(
+ assembler->Word32Shl(length, assembler->Int32Constant(kPointerSizeLog2)),
+ assembler->Int32Constant(FixedArray::kHeaderSize));
+
+ // Create a new closure from the given function info in new space
+ Node* function_context = assembler->Allocate(size);
+
+ Isolate* isolate = assembler->isolate();
+ assembler->StoreMapNoWriteBarrier(
+ function_context,
+ assembler->HeapConstant(isolate->factory()->function_context_map()));
+ assembler->StoreObjectFieldNoWriteBarrier(function_context,
+ Context::kLengthOffset,
+ assembler->SmiFromWord32(length));
+
+ // Set up the fixed slots.
+ assembler->StoreFixedArrayElement(
+ function_context, assembler->Int32Constant(Context::CLOSURE_INDEX),
+ function, SKIP_WRITE_BARRIER);
+ assembler->StoreFixedArrayElement(
+ function_context, assembler->Int32Constant(Context::PREVIOUS_INDEX),
+ context, SKIP_WRITE_BARRIER);
+ assembler->StoreFixedArrayElement(
+ function_context, assembler->Int32Constant(Context::EXTENSION_INDEX),
+ assembler->TheHoleConstant(), SKIP_WRITE_BARRIER);
+
+ // Copy the native context from the previous context.
+ Node* native_context = assembler->LoadNativeContext(context);
+ assembler->StoreFixedArrayElement(
+ function_context, assembler->Int32Constant(Context::NATIVE_CONTEXT_INDEX),
+ native_context, SKIP_WRITE_BARRIER);
+
+ // Initialize the rest of the slots to undefined.
+ Node* undefined = assembler->UndefinedConstant();
+ Variable var_slot_index(assembler, MachineRepresentation::kWord32);
+ var_slot_index.Bind(min_context_slots);
+ Label loop(assembler, &var_slot_index), after_loop(assembler);
+ assembler->Goto(&loop);
+
+ assembler->Bind(&loop);
+ {
+ Node* slot_index = var_slot_index.value();
+ assembler->GotoUnless(assembler->Int32LessThan(slot_index, length),
+ &after_loop);
+ assembler->StoreFixedArrayElement(function_context, slot_index, undefined,
+ SKIP_WRITE_BARRIER);
+ Node* one = assembler->Int32Constant(1);
+ Node* next_index = assembler->Int32Add(slot_index, one);
+ var_slot_index.Bind(next_index);
+ assembler->Goto(&loop);
+ }
+ assembler->Bind(&after_loop);
+
+ return function_context;
+}
+
+void FastNewFunctionContextStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+ Node* function = assembler->Parameter(Descriptor::kFunction);
+ Node* slots = assembler->Parameter(FastNewFunctionContextDescriptor::kSlots);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ assembler->Return(Generate(assembler, function, slots, context));
+}
+
+// static
+compiler::Node* FastCloneRegExpStub::Generate(CodeStubAssembler* assembler,
+ compiler::Node* closure,
+ compiler::Node* literal_index,
+ compiler::Node* pattern,
+ compiler::Node* flags,
+ compiler::Node* context) {
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+ typedef compiler::Node Node;
+
+ Label call_runtime(assembler, Label::kDeferred), end(assembler);
+
+ Variable result(assembler, MachineRepresentation::kTagged);
+
+ Node* undefined = assembler->UndefinedConstant();
+ Node* literals_array =
+ assembler->LoadObjectField(closure, JSFunction::kLiteralsOffset);
+ Node* boilerplate = assembler->LoadFixedArrayElement(
+ literals_array, literal_index,
+ LiteralsArray::kFirstLiteralIndex * kPointerSize,
+ CodeStubAssembler::SMI_PARAMETERS);
+ assembler->GotoIf(assembler->WordEqual(boilerplate, undefined),
+ &call_runtime);
+
+ {
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ Node* copy = assembler->Allocate(size);
+ for (int offset = 0; offset < size; offset += kPointerSize) {
+ Node* value = assembler->LoadObjectField(boilerplate, offset);
+ assembler->StoreObjectFieldNoWriteBarrier(copy, offset, value);
+ }
+ result.Bind(copy);
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&call_runtime);
+ {
+ result.Bind(assembler->CallRuntime(Runtime::kCreateRegExpLiteral, context,
+ closure, literal_index, pattern, flags));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ return result.value();
+}
+
+void FastCloneRegExpStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+ Node* closure = assembler->Parameter(Descriptor::kClosure);
+ Node* literal_index = assembler->Parameter(Descriptor::kLiteralIndex);
+ Node* pattern = assembler->Parameter(Descriptor::kPattern);
+ Node* flags = assembler->Parameter(Descriptor::kFlags);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ assembler->Return(
+ Generate(assembler, closure, literal_index, pattern, flags, context));
+}
void CreateAllocationSiteStub::GenerateAheadOfTime(Isolate* isolate) {
CreateAllocationSiteStub stub(isolate);
@@ -3337,6 +5469,7 @@ void StoreElementStub::Generate(MacroAssembler* masm) {
// static
void StoreFastElementStub::GenerateAheadOfTime(Isolate* isolate) {
+ if (FLAG_minimal) return;
StoreFastElementStub(isolate, false, FAST_HOLEY_ELEMENTS, STANDARD_STORE)
.GetCode();
StoreFastElementStub(isolate, false, FAST_HOLEY_ELEMENTS,
@@ -3349,7 +5482,6 @@ void StoreFastElementStub::GenerateAheadOfTime(Isolate* isolate) {
}
}
-
void ArrayConstructorStub::PrintName(std::ostream& os) const { // NOLINT
os << "ArrayConstructorStub";
switch (argument_count()) {
@@ -3370,20 +5502,10 @@ void ArrayConstructorStub::PrintName(std::ostream& os) const { // NOLINT
}
-std::ostream& ArrayConstructorStubBase::BasePrintName(
- std::ostream& os, // NOLINT
- const char* name) const {
- os << name << "_" << ElementsKindToString(elements_kind());
- if (override_mode() == DISABLE_ALLOCATION_SITES) {
- os << "_DISABLE_ALLOCATION_SITES";
- }
- return os;
-}
-
bool ToBooleanICStub::UpdateStatus(Handle<Object> object) {
Types new_types = types();
Types old_types = new_types;
- bool to_boolean_value = new_types.UpdateStatus(object);
+ bool to_boolean_value = new_types.UpdateStatus(isolate(), object);
TraceTransition(old_types, new_types);
set_sub_minor_key(TypesBits::update(sub_minor_key(), new_types.ToIntegral()));
return to_boolean_value;
@@ -3409,14 +5531,15 @@ std::ostream& operator<<(std::ostream& os, const ToBooleanICStub::Types& s) {
return os << ")";
}
-bool ToBooleanICStub::Types::UpdateStatus(Handle<Object> object) {
- if (object->IsUndefined()) {
+bool ToBooleanICStub::Types::UpdateStatus(Isolate* isolate,
+ Handle<Object> object) {
+ if (object->IsUndefined(isolate)) {
Add(UNDEFINED);
return false;
} else if (object->IsBoolean()) {
Add(BOOLEAN);
- return object->IsTrue();
- } else if (object->IsNull()) {
+ return object->IsTrue(isolate);
+ } else if (object->IsNull(isolate)) {
Add(NULL_TYPE);
return false;
} else if (object->IsSmi()) {
@@ -3472,14 +5595,231 @@ void ProfileEntryHookStub::EntryHookTrampoline(intptr_t function,
entry_hook(function, stack_pointer);
}
+void CreateAllocationSiteStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+ Node* size = assembler->IntPtrConstant(AllocationSite::kSize);
+ Node* site = assembler->Allocate(size, CodeStubAssembler::kPretenured);
+
+ // Store the map
+ assembler->StoreObjectFieldRoot(site, AllocationSite::kMapOffset,
+ Heap::kAllocationSiteMapRootIndex);
+
+ Node* kind =
+ assembler->SmiConstant(Smi::FromInt(GetInitialFastElementsKind()));
+ assembler->StoreObjectFieldNoWriteBarrier(
+ site, AllocationSite::kTransitionInfoOffset, kind);
+
+ // Unlike literals, constructed arrays don't have nested sites
+ Node* zero = assembler->IntPtrConstant(0);
+ assembler->StoreObjectFieldNoWriteBarrier(
+ site, AllocationSite::kNestedSiteOffset, zero);
+
+ // Pretenuring calculation field.
+ assembler->StoreObjectFieldNoWriteBarrier(
+ site, AllocationSite::kPretenureDataOffset, zero);
+
+ // Pretenuring memento creation count field.
+ assembler->StoreObjectFieldNoWriteBarrier(
+ site, AllocationSite::kPretenureCreateCountOffset, zero);
+
+ // Store an empty fixed array for the code dependency.
+ assembler->StoreObjectFieldRoot(site, AllocationSite::kDependentCodeOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+
+ // Link the object to the allocation site list
+ Node* site_list = assembler->ExternalConstant(
+ ExternalReference::allocation_sites_list_address(isolate()));
+ Node* next_site = assembler->LoadBufferObject(site_list, 0);
+
+ // TODO(mvstanton): This is a store to a weak pointer, which we may want to
+ // mark as such in order to skip the write barrier, once we have a unified
+ // system for weakness. For now we decided to keep it like this because having
+ // an initial write barrier backed store makes this pointer strong until the
+ // next GC, and allocation sites are designed to survive several GCs anyway.
+ assembler->StoreObjectField(site, AllocationSite::kWeakNextOffset, next_site);
+ assembler->StoreNoWriteBarrier(MachineRepresentation::kTagged, site_list,
+ site);
+
+ Node* feedback_vector = assembler->Parameter(Descriptor::kVector);
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+
+ assembler->StoreFixedArrayElement(feedback_vector, slot, site,
+ UPDATE_WRITE_BARRIER,
+ CodeStubAssembler::SMI_PARAMETERS);
+
+ assembler->Return(site);
+}
+
+void CreateWeakCellStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+ assembler->Return(assembler->CreateWeakCellInFeedbackVector(
+ assembler->Parameter(Descriptor::kVector),
+ assembler->Parameter(Descriptor::kSlot),
+ assembler->Parameter(Descriptor::kValue)));
+}
+
+void ArrayNoArgumentConstructorStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+ Node* native_context = assembler->LoadObjectField(
+ assembler->Parameter(Descriptor::kFunction), JSFunction::kContextOffset);
+ bool track_allocation_site =
+ AllocationSite::GetMode(elements_kind()) == TRACK_ALLOCATION_SITE &&
+ override_mode() != DISABLE_ALLOCATION_SITES;
+ Node* allocation_site =
+ track_allocation_site ? assembler->Parameter(Descriptor::kAllocationSite)
+ : nullptr;
+ Node* array_map =
+ assembler->LoadJSArrayElementsMap(elements_kind(), native_context);
+ Node* array = assembler->AllocateJSArray(
+ elements_kind(), array_map,
+ assembler->IntPtrConstant(JSArray::kPreallocatedArrayElements),
+ assembler->IntPtrConstant(0), allocation_site);
+ assembler->Return(array);
+}
+
+void InternalArrayNoArgumentConstructorStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+ Node* array_map =
+ assembler->LoadObjectField(assembler->Parameter(Descriptor::kFunction),
+ JSFunction::kPrototypeOrInitialMapOffset);
+ Node* array = assembler->AllocateJSArray(
+ elements_kind(), array_map,
+ assembler->IntPtrConstant(JSArray::kPreallocatedArrayElements),
+ assembler->IntPtrConstant(0), nullptr);
+ assembler->Return(array);
+}
+
+namespace {
+
+template <typename Descriptor>
+void SingleArgumentConstructorCommon(CodeStubAssembler* assembler,
+ ElementsKind elements_kind,
+ compiler::Node* array_map,
+ compiler::Node* allocation_site,
+ AllocationSiteMode mode) {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+
+ Label ok(assembler);
+ Label smi_size(assembler);
+ Label small_smi_size(assembler);
+ Label call_runtime(assembler, Label::kDeferred);
+
+ Node* size = assembler->Parameter(Descriptor::kArraySizeSmiParameter);
+ assembler->Branch(assembler->WordIsSmi(size), &smi_size, &call_runtime);
+
+ assembler->Bind(&smi_size);
+
+ if (IsFastPackedElementsKind(elements_kind)) {
+ Label abort(assembler, Label::kDeferred);
+ assembler->Branch(
+ assembler->SmiEqual(size, assembler->SmiConstant(Smi::FromInt(0))),
+ &small_smi_size, &abort);
+
+ assembler->Bind(&abort);
+ Node* reason =
+ assembler->SmiConstant(Smi::FromInt(kAllocatingNonEmptyPackedArray));
+ Node* context = assembler->Parameter(Descriptor::kContext);
+ assembler->TailCallRuntime(Runtime::kAbort, context, reason);
+ } else {
+ int element_size =
+ IsFastDoubleElementsKind(elements_kind) ? kDoubleSize : kPointerSize;
+ int max_fast_elements =
+ (Page::kMaxRegularHeapObjectSize - FixedArray::kHeaderSize -
+ JSArray::kSize - AllocationMemento::kSize) /
+ element_size;
+ assembler->Branch(
+ assembler->SmiAboveOrEqual(
+ size, assembler->SmiConstant(Smi::FromInt(max_fast_elements))),
+ &call_runtime, &small_smi_size);
+ }
+
+ assembler->Bind(&small_smi_size);
+ {
+ Node* array = assembler->AllocateJSArray(
+ elements_kind, array_map, size, size,
+ mode == DONT_TRACK_ALLOCATION_SITE ? nullptr : allocation_site,
+ CodeStubAssembler::SMI_PARAMETERS);
+ assembler->Return(array);
+ }
+
+ assembler->Bind(&call_runtime);
+ {
+ Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* function = assembler->Parameter(Descriptor::kFunction);
+ Node* array_size = assembler->Parameter(Descriptor::kArraySizeSmiParameter);
+ Node* allocation_site = assembler->Parameter(Descriptor::kAllocationSite);
+ assembler->TailCallRuntime(Runtime::kNewArray, context, function,
+ array_size, function, allocation_site);
+ }
+}
+} // namespace
+
+void ArraySingleArgumentConstructorStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+ Node* function = assembler->Parameter(Descriptor::kFunction);
+ Node* native_context =
+ assembler->LoadObjectField(function, JSFunction::kContextOffset);
+ Node* array_map =
+ assembler->LoadJSArrayElementsMap(elements_kind(), native_context);
+ AllocationSiteMode mode = override_mode() == DISABLE_ALLOCATION_SITES
+ ? DONT_TRACK_ALLOCATION_SITE
+ : AllocationSite::GetMode(elements_kind());
+ Node* allocation_site = assembler->Parameter(Descriptor::kAllocationSite);
+ SingleArgumentConstructorCommon<Descriptor>(assembler, elements_kind(),
+ array_map, allocation_site, mode);
+}
+
+void InternalArraySingleArgumentConstructorStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+ Node* function = assembler->Parameter(Descriptor::kFunction);
+ Node* array_map = assembler->LoadObjectField(
+ function, JSFunction::kPrototypeOrInitialMapOffset);
+ SingleArgumentConstructorCommon<Descriptor>(
+ assembler, elements_kind(), array_map, assembler->UndefinedConstant(),
+ DONT_TRACK_ALLOCATION_SITE);
+}
+
+void GrowArrayElementsStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+ CodeStubAssembler::Label runtime(assembler,
+ CodeStubAssembler::Label::kDeferred);
+
+ Node* object = assembler->Parameter(Descriptor::kObject);
+ Node* key = assembler->Parameter(Descriptor::kKey);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+ ElementsKind kind = elements_kind();
+
+ Node* elements = assembler->LoadElements(object);
+ Node* new_elements = assembler->CheckAndGrowElementsCapacity(
+ context, elements, kind, key, &runtime);
+ assembler->StoreObjectField(object, JSObject::kElementsOffset, new_elements);
+ assembler->Return(new_elements);
+
+ assembler->Bind(&runtime);
+ // TODO(danno): Make this a tail call when the stub is only used from TurboFan
+ // code. This musn't be a tail call for now, since the caller site in lithium
+ // creates a safepoint. This safepoint musn't have a different number of
+ // arguments on the stack in the case that a GC happens from the slow-case
+ // allocation path (zero, since all the stubs inputs are in registers) and
+ // when the call happens (it would be two in the tail call case due to the
+ // tail call pushing the arguments on the stack for the runtime call). By not
+ // tail-calling, the runtime call case also has zero arguments on the stack
+ // for the stub frame.
+ assembler->Return(assembler->CallRuntime(Runtime::kGrowArrayElements, context,
+ object, key));
+}
ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate)
: PlatformCodeStub(isolate) {
minor_key_ = ArgumentCountBits::encode(ANY);
- ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
}
-
ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate,
int argument_count)
: PlatformCodeStub(isolate) {
@@ -3492,15 +5832,10 @@ ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate,
} else {
UNREACHABLE();
}
- ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
-}
-
-
-InternalArrayConstructorStub::InternalArrayConstructorStub(
- Isolate* isolate) : PlatformCodeStub(isolate) {
- InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
}
+InternalArrayConstructorStub::InternalArrayConstructorStub(Isolate* isolate)
+ : PlatformCodeStub(isolate) {}
Representation RepresentationFromType(Type* type) {
if (type->Is(Type::UntaggedIntegral())) {
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index ace4aae614..36757c41c6 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -7,8 +7,8 @@
#include "src/allocation.h"
#include "src/assembler.h"
+#include "src/code-stub-assembler.h"
#include "src/codegen.h"
-#include "src/compiler/code-stub-assembler.h"
#include "src/globals.h"
#include "src/ic/ic-state.h"
#include "src/interface-descriptors.h"
@@ -19,131 +19,163 @@ namespace v8 {
namespace internal {
// List of code stubs used on all platforms.
-#define CODE_STUB_LIST_ALL_PLATFORMS(V) \
- /* PlatformCodeStubs */ \
- V(ArrayConstructor) \
- V(BinaryOpICWithAllocationSite) \
- V(CallApiCallback) \
- V(CallApiGetter) \
- V(CallConstruct) \
- V(CallIC) \
- V(CEntry) \
- V(CompareIC) \
- V(DoubleToI) \
- V(FunctionPrototype) \
- V(InstanceOf) \
- V(InternalArrayConstructor) \
- V(JSEntry) \
- V(KeyedLoadICTrampoline) \
- V(LoadICTrampoline) \
- V(CallICTrampoline) \
- V(LoadIndexedString) \
- V(MathPow) \
- V(ProfileEntryHook) \
- V(RecordWrite) \
- V(RegExpExec) \
- V(StoreBufferOverflow) \
- V(StoreElement) \
- V(StubFailureTrampoline) \
- V(SubString) \
- V(ToNumber) \
- V(NonNumberToNumber) \
- V(StringToNumber) \
- V(ToString) \
- V(ToName) \
- V(ToObject) \
- V(VectorStoreICTrampoline) \
- V(VectorKeyedStoreICTrampoline) \
- V(VectorStoreIC) \
- V(VectorKeyedStoreIC) \
- /* HydrogenCodeStubs */ \
- V(AllocateInNewSpace) \
- V(ArrayNArgumentsConstructor) \
- V(ArrayNoArgumentConstructor) \
- V(ArraySingleArgumentConstructor) \
- V(BinaryOpIC) \
- V(BinaryOpWithAllocationSite) \
- V(CreateAllocationSite) \
- V(CreateWeakCell) \
- V(ElementsTransitionAndStore) \
- V(FastArrayPush) \
- V(FastCloneRegExp) \
- V(FastCloneShallowArray) \
- V(FastCloneShallowObject) \
- V(FastNewClosure) \
- V(FastNewContext) \
- V(FastNewObject) \
- V(FastNewRestParameter) \
- V(FastNewSloppyArguments) \
- V(FastNewStrictArguments) \
- V(GrowArrayElements) \
- V(InternalArrayNArgumentsConstructor) \
- V(InternalArrayNoArgumentConstructor) \
- V(InternalArraySingleArgumentConstructor) \
- V(KeyedLoadGeneric) \
- V(LoadGlobalViaContext) \
- V(LoadScriptContextField) \
- V(LoadDictionaryElement) \
- V(NameDictionaryLookup) \
- V(NumberToString) \
- V(Typeof) \
- V(RegExpConstructResult) \
- V(StoreFastElement) \
- V(StoreGlobalViaContext) \
- V(StoreScriptContextField) \
- V(StringAdd) \
- V(ToBooleanIC) \
- V(TransitionElementsKind) \
- V(KeyedLoadIC) \
- V(LoadIC) \
- /* TurboFanCodeStubs */ \
- V(AllocateHeapNumber) \
- V(AllocateMutableHeapNumber) \
- V(AllocateFloat32x4) \
- V(AllocateInt32x4) \
- V(AllocateUint32x4) \
- V(AllocateBool32x4) \
- V(AllocateInt16x8) \
- V(AllocateUint16x8) \
- V(AllocateBool16x8) \
- V(AllocateInt8x16) \
- V(AllocateUint8x16) \
- V(AllocateBool8x16) \
- V(StringLength) \
- V(Add) \
- V(Subtract) \
- V(BitwiseAnd) \
- V(BitwiseOr) \
- V(BitwiseXor) \
- V(LessThan) \
- V(LessThanOrEqual) \
- V(GreaterThan) \
- V(GreaterThanOrEqual) \
- V(Equal) \
- V(NotEqual) \
- V(StrictEqual) \
- V(StrictNotEqual) \
- V(StringEqual) \
- V(StringNotEqual) \
- V(StringLessThan) \
- V(StringLessThanOrEqual) \
- V(StringGreaterThan) \
- V(StringGreaterThanOrEqual) \
- V(ToBoolean) \
- V(ToInteger) \
- V(ToLength) \
- /* IC Handler stubs */ \
- V(ArrayBufferViewLoadField) \
- V(LoadConstant) \
- V(LoadFastElement) \
- V(LoadField) \
- V(LoadIndexedInterceptor) \
- V(KeyedLoadSloppyArguments) \
- V(KeyedStoreSloppyArguments) \
- V(StoreField) \
- V(StoreInterceptor) \
- V(StoreGlobal) \
- V(StoreTransition)
+#define CODE_STUB_LIST_ALL_PLATFORMS(V) \
+ /* --- PlatformCodeStubs --- */ \
+ V(ArrayConstructor) \
+ V(BinaryOpICWithAllocationSite) \
+ V(CallApiCallback) \
+ V(CallApiGetter) \
+ V(CallConstruct) \
+ V(CallIC) \
+ V(CEntry) \
+ V(CompareIC) \
+ V(DoubleToI) \
+ V(FunctionPrototype) \
+ V(InternalArrayConstructor) \
+ V(JSEntry) \
+ V(LoadIndexedString) \
+ V(MathPow) \
+ V(ProfileEntryHook) \
+ V(RecordWrite) \
+ V(RegExpExec) \
+ V(StoreBufferOverflow) \
+ V(StoreElement) \
+ V(SubString) \
+ V(ToString) \
+ V(ToName) \
+ V(StoreIC) \
+ V(KeyedStoreIC) \
+ V(KeyedLoadIC) \
+ V(LoadIC) \
+ V(LoadGlobalIC) \
+ V(FastNewObject) \
+ V(FastNewRestParameter) \
+ V(FastNewSloppyArguments) \
+ V(FastNewStrictArguments) \
+ V(NameDictionaryLookup) \
+ /* This can be removed once there are no */ \
+ /* more deopting Hydrogen stubs. */ \
+ V(StubFailureTrampoline) \
+ /* These are only called from FCG */ \
+ /* They can be removed when only the TF */ \
+ /* version of the corresponding stub is */ \
+ /* used universally */ \
+ V(CallICTrampoline) \
+ V(LoadICTrampoline) \
+ V(KeyedLoadICTrampoline) \
+ V(KeyedStoreICTrampoline) \
+ V(StoreICTrampoline) \
+ /* --- HydrogenCodeStubs --- */ \
+ V(ElementsTransitionAndStore) \
+ V(FastCloneShallowArray) \
+ V(NumberToString) \
+ V(StringAdd) \
+ V(ToObject) \
+ V(Typeof) \
+ /* These builtins w/ JS linkage are */ \
+ /* just fast-cases of C++ builtins. They */ \
+ /* require varg support from TF */ \
+ V(FastArrayPush) \
+ V(FastFunctionBind) \
+ /* These will be ported/eliminated */ \
+ /* as part of the new IC system, ask */ \
+ /* ishell before doing anything */ \
+ V(KeyedLoadGeneric) \
+ V(KeyedLoadSloppyArguments) \
+ V(KeyedStoreSloppyArguments) \
+ V(LoadConstant) \
+ V(LoadDictionaryElement) \
+ V(LoadFastElement) \
+ V(LoadField) \
+ V(LoadScriptContextField) \
+ V(StoreFastElement) \
+ V(StoreField) \
+ V(StoreGlobal) \
+ V(StoreScriptContextField) \
+ V(StoreTransition) \
+ /* These should never be ported to TF */ \
+ /* because they are either used only by */ \
+ /* FCG/Crankshaft or are deprecated */ \
+ V(BinaryOpIC) \
+ V(BinaryOpWithAllocationSite) \
+ V(ToBooleanIC) \
+ V(RegExpConstructResult) \
+ V(TransitionElementsKind) \
+ V(StoreGlobalViaContext) \
+ /* --- TurboFanCodeStubs --- */ \
+ V(AllocateHeapNumber) \
+ V(AllocateFloat32x4) \
+ V(AllocateInt32x4) \
+ V(AllocateUint32x4) \
+ V(AllocateBool32x4) \
+ V(AllocateInt16x8) \
+ V(AllocateUint16x8) \
+ V(AllocateBool16x8) \
+ V(AllocateInt8x16) \
+ V(AllocateUint8x16) \
+ V(AllocateBool8x16) \
+ V(ArrayNoArgumentConstructor) \
+ V(ArraySingleArgumentConstructor) \
+ V(ArrayNArgumentsConstructor) \
+ V(CreateAllocationSite) \
+ V(CreateWeakCell) \
+ V(StringLength) \
+ V(Add) \
+ V(AddWithFeedback) \
+ V(Subtract) \
+ V(SubtractWithFeedback) \
+ V(Multiply) \
+ V(MultiplyWithFeedback) \
+ V(Divide) \
+ V(DivideWithFeedback) \
+ V(Modulus) \
+ V(ModulusWithFeedback) \
+ V(ShiftRight) \
+ V(ShiftRightLogical) \
+ V(ShiftLeft) \
+ V(BitwiseAnd) \
+ V(BitwiseOr) \
+ V(BitwiseXor) \
+ V(Inc) \
+ V(InternalArrayNoArgumentConstructor) \
+ V(InternalArraySingleArgumentConstructor) \
+ V(Dec) \
+ V(FastCloneShallowObject) \
+ V(FastCloneRegExp) \
+ V(FastNewClosure) \
+ V(FastNewFunctionContext) \
+ V(InstanceOf) \
+ V(LessThan) \
+ V(LessThanOrEqual) \
+ V(GreaterThan) \
+ V(GreaterThanOrEqual) \
+ V(Equal) \
+ V(NotEqual) \
+ V(StrictEqual) \
+ V(StrictNotEqual) \
+ V(StringEqual) \
+ V(StringNotEqual) \
+ V(StringLessThan) \
+ V(StringLessThanOrEqual) \
+ V(StringGreaterThan) \
+ V(StringGreaterThanOrEqual) \
+ V(ToInteger) \
+ V(ToLength) \
+ V(HasProperty) \
+ V(ForInFilter) \
+ V(GetProperty) \
+ V(LoadICTF) \
+ V(KeyedLoadICTF) \
+ V(StoreInterceptor) \
+ V(LoadApiGetter) \
+ V(LoadIndexedInterceptor) \
+ V(GrowArrayElements) \
+ /* These are only called from FGC and */ \
+ /* can be removed when we use ignition */ \
+ /* only */ \
+ V(LoadICTrampolineTF) \
+ V(LoadGlobalICTrampoline) \
+ V(KeyedLoadICTrampolineTF)
// List of code stubs only used on ARM 32 bits platforms.
#if V8_TARGET_ARCH_ARM
@@ -282,9 +314,7 @@ class CodeStub BASE_EMBEDDED {
// BinaryOpStub needs to override this.
virtual Code::Kind GetCodeKind() const;
- virtual InlineCacheState GetICState() const { return UNINITIALIZED; }
virtual ExtraICState GetExtraICState() const { return kNoExtraICState; }
- virtual Code::StubType GetStubType() const { return Code::NORMAL; }
Code::Flags GetCodeFlags() const;
@@ -390,10 +420,59 @@ class CodeStub BASE_EMBEDDED {
Handle<Code> GenerateCode() override; \
DEFINE_CODE_STUB(NAME, SUPER)
-#define DEFINE_TURBOFAN_CODE_STUB(NAME, SUPER) \
- public: \
- void GenerateAssembly(compiler::CodeStubAssembler* assembler) \
- const override; \
+#define DEFINE_TURBOFAN_CODE_STUB(NAME, SUPER) \
+ public: \
+ void GenerateAssembly(CodeStubAssembler* assembler) const override; \
+ DEFINE_CODE_STUB(NAME, SUPER)
+
+#define DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(NAME, SUPER) \
+ public: \
+ static compiler::Node* Generate(CodeStubAssembler* assembler, \
+ compiler::Node* left, compiler::Node* right, \
+ compiler::Node* context); \
+ void GenerateAssembly(CodeStubAssembler* assembler) const override { \
+ assembler->Return(Generate(assembler, assembler->Parameter(0), \
+ assembler->Parameter(1), \
+ assembler->Parameter(2))); \
+ } \
+ DEFINE_CODE_STUB(NAME, SUPER)
+
+#define DEFINE_TURBOFAN_BINARY_OP_CODE_STUB_WITH_FEEDBACK(NAME, SUPER) \
+ public: \
+ static compiler::Node* Generate( \
+ CodeStubAssembler* assembler, compiler::Node* left, \
+ compiler::Node* right, compiler::Node* slot_id, \
+ compiler::Node* type_feedback_vector, compiler::Node* context); \
+ void GenerateAssembly(CodeStubAssembler* assembler) const override { \
+ assembler->Return( \
+ Generate(assembler, assembler->Parameter(0), assembler->Parameter(1), \
+ assembler->Parameter(2), assembler->Parameter(3), \
+ assembler->Parameter(4))); \
+ } \
+ DEFINE_CODE_STUB(NAME, SUPER)
+
+#define DEFINE_TURBOFAN_UNARY_OP_CODE_STUB(NAME, SUPER) \
+ public: \
+ static compiler::Node* Generate(CodeStubAssembler* assembler, \
+ compiler::Node* value, \
+ compiler::Node* context); \
+ void GenerateAssembly(CodeStubAssembler* assembler) const override { \
+ assembler->Return(Generate(assembler, assembler->Parameter(0), \
+ assembler->Parameter(1))); \
+ } \
+ DEFINE_CODE_STUB(NAME, SUPER)
+
+#define DEFINE_TURBOFAN_UNARY_OP_CODE_STUB_WITH_FEEDBACK(NAME, SUPER) \
+ public: \
+ static compiler::Node* Generate( \
+ CodeStubAssembler* assembler, compiler::Node* value, \
+ compiler::Node* context, compiler::Node* type_feedback_vector, \
+ compiler::Node* slot_id); \
+ void GenerateAssembly(CodeStubAssembler* assembler) const override { \
+ assembler->Return( \
+ Generate(assembler, assembler->Parameter(0), assembler->Parameter(1), \
+ assembler->Parameter(2), assembler->Parameter(3))); \
+ } \
DEFINE_CODE_STUB(NAME, SUPER)
#define DEFINE_HANDLER_CODE_STUB(NAME, SUPER) \
@@ -403,8 +482,15 @@ class CodeStub BASE_EMBEDDED {
#define DEFINE_CALL_INTERFACE_DESCRIPTOR(NAME) \
public: \
+ typedef NAME##Descriptor Descriptor; \
CallInterfaceDescriptor GetCallInterfaceDescriptor() const override { \
- return NAME##Descriptor(isolate()); \
+ return Descriptor(isolate()); \
+ }
+
+#define DEFINE_ON_STACK_CALL_INTERFACE_DESCRIPTOR(PARAMETER_COUNT) \
+ public: \
+ CallInterfaceDescriptor GetCallInterfaceDescriptor() const override { \
+ return OnStackArgsDescriptorBase::ForArgs(isolate(), PARAMETER_COUNT); \
}
// There are some code stubs we just can't describe right now with a
@@ -450,8 +536,9 @@ class CodeStubDescriptor {
int hint_stack_parameter_count = -1,
StubFunctionMode function_mode = NOT_JS_FUNCTION_STUB_MODE);
- void SetMissHandler(ExternalReference handler) {
- miss_handler_ = handler;
+ void SetMissHandler(Runtime::FunctionId id) {
+ miss_handler_id_ = id;
+ miss_handler_ = ExternalReference(Runtime::FunctionForId(id), isolate_);
has_miss_handler_ = true;
// Our miss handler infrastructure doesn't currently support
// variable stack parameter counts.
@@ -486,6 +573,11 @@ class CodeStubDescriptor {
return miss_handler_;
}
+ Runtime::FunctionId miss_handler_id() const {
+ DCHECK(has_miss_handler_);
+ return miss_handler_id_;
+ }
+
bool has_miss_handler() const {
return has_miss_handler_;
}
@@ -508,6 +600,7 @@ class CodeStubDescriptor {
return stack_parameter_count_.is_valid();
}
+ Isolate* isolate_;
CallInterfaceDescriptor call_descriptor_;
Register stack_parameter_count_;
// If hint_stack_parameter_count_ > 0, the code stub can optimize the
@@ -518,6 +611,7 @@ class CodeStubDescriptor {
Address deoptimization_handler_;
ExternalReference miss_handler_;
+ Runtime::FunctionId miss_handler_id_;
bool has_miss_handler_;
};
@@ -542,6 +636,8 @@ class HydrogenCodeStub : public CodeStub {
Handle<Code> GenerateLightweightMissCode(ExternalReference miss);
+ Handle<Code> GenerateRuntimeTailCall(CodeStubDescriptor* descriptor);
+
template<class StateType>
void TraceTransition(StateType from, StateType to);
@@ -579,13 +675,10 @@ class TurboFanCodeStub : public CodeStub {
return GetCallInterfaceDescriptor().GetStackParameterCount();
}
- Code::StubType GetStubType() const override { return Code::FAST; }
-
protected:
explicit TurboFanCodeStub(Isolate* isolate) : CodeStub(isolate) {}
- virtual void GenerateAssembly(
- compiler::CodeStubAssembler* assembler) const = 0;
+ virtual void GenerateAssembly(CodeStubAssembler* assembler) const = 0;
private:
DEFINE_CODE_STUB_BASE(TurboFanCodeStub, CodeStub);
@@ -666,7 +759,6 @@ class StringLengthStub : public TurboFanCodeStub {
explicit StringLengthStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
Code::Kind GetCodeKind() const override { return Code::HANDLER; }
- InlineCacheState GetICState() const override { return MONOMORPHIC; }
ExtraICState GetExtraICState() const override { return Code::LOAD_IC; }
DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
@@ -678,7 +770,16 @@ class AddStub final : public TurboFanCodeStub {
explicit AddStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
- DEFINE_TURBOFAN_CODE_STUB(Add, TurboFanCodeStub);
+ DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(Add, TurboFanCodeStub);
+};
+
+class AddWithFeedbackStub final : public TurboFanCodeStub {
+ public:
+ explicit AddWithFeedbackStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOpWithVector);
+ DEFINE_TURBOFAN_BINARY_OP_CODE_STUB_WITH_FEEDBACK(AddWithFeedback,
+ TurboFanCodeStub);
};
class SubtractStub final : public TurboFanCodeStub {
@@ -686,7 +787,96 @@ class SubtractStub final : public TurboFanCodeStub {
explicit SubtractStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
- DEFINE_TURBOFAN_CODE_STUB(Subtract, TurboFanCodeStub);
+ DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(Subtract, TurboFanCodeStub);
+};
+
+class SubtractWithFeedbackStub final : public TurboFanCodeStub {
+ public:
+ explicit SubtractWithFeedbackStub(Isolate* isolate)
+ : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOpWithVector);
+ DEFINE_TURBOFAN_BINARY_OP_CODE_STUB_WITH_FEEDBACK(SubtractWithFeedback,
+ TurboFanCodeStub);
+};
+
+class MultiplyStub final : public TurboFanCodeStub {
+ public:
+ explicit MultiplyStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
+ DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(Multiply, TurboFanCodeStub);
+};
+
+class MultiplyWithFeedbackStub final : public TurboFanCodeStub {
+ public:
+ explicit MultiplyWithFeedbackStub(Isolate* isolate)
+ : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOpWithVector);
+ DEFINE_TURBOFAN_BINARY_OP_CODE_STUB_WITH_FEEDBACK(MultiplyWithFeedback,
+ TurboFanCodeStub);
+};
+
+class DivideStub final : public TurboFanCodeStub {
+ public:
+ explicit DivideStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
+ DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(Divide, TurboFanCodeStub);
+};
+
+class DivideWithFeedbackStub final : public TurboFanCodeStub {
+ public:
+ explicit DivideWithFeedbackStub(Isolate* isolate)
+ : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOpWithVector);
+ DEFINE_TURBOFAN_BINARY_OP_CODE_STUB_WITH_FEEDBACK(DivideWithFeedback,
+ TurboFanCodeStub);
+};
+
+class ModulusStub final : public TurboFanCodeStub {
+ public:
+ explicit ModulusStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
+ DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(Modulus, TurboFanCodeStub);
+};
+
+class ModulusWithFeedbackStub final : public TurboFanCodeStub {
+ public:
+ explicit ModulusWithFeedbackStub(Isolate* isolate)
+ : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOpWithVector);
+ DEFINE_TURBOFAN_BINARY_OP_CODE_STUB_WITH_FEEDBACK(ModulusWithFeedback,
+ TurboFanCodeStub);
+};
+
+class ShiftRightStub final : public TurboFanCodeStub {
+ public:
+ explicit ShiftRightStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
+ DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(ShiftRight, TurboFanCodeStub);
+};
+
+class ShiftRightLogicalStub final : public TurboFanCodeStub {
+ public:
+ explicit ShiftRightLogicalStub(Isolate* isolate)
+ : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
+ DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(ShiftRightLogical, TurboFanCodeStub);
+};
+
+class ShiftLeftStub final : public TurboFanCodeStub {
+ public:
+ explicit ShiftLeftStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
+ DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(ShiftLeft, TurboFanCodeStub);
};
class BitwiseAndStub final : public TurboFanCodeStub {
@@ -694,7 +884,7 @@ class BitwiseAndStub final : public TurboFanCodeStub {
explicit BitwiseAndStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
- DEFINE_TURBOFAN_CODE_STUB(BitwiseAnd, TurboFanCodeStub);
+ DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(BitwiseAnd, TurboFanCodeStub);
};
class BitwiseOrStub final : public TurboFanCodeStub {
@@ -702,7 +892,7 @@ class BitwiseOrStub final : public TurboFanCodeStub {
explicit BitwiseOrStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
- DEFINE_TURBOFAN_CODE_STUB(BitwiseOr, TurboFanCodeStub);
+ DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(BitwiseOr, TurboFanCodeStub);
};
class BitwiseXorStub final : public TurboFanCodeStub {
@@ -710,7 +900,32 @@ class BitwiseXorStub final : public TurboFanCodeStub {
explicit BitwiseXorStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
- DEFINE_TURBOFAN_CODE_STUB(BitwiseXor, TurboFanCodeStub);
+ DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(BitwiseXor, TurboFanCodeStub);
+};
+
+class IncStub final : public TurboFanCodeStub {
+ public:
+ explicit IncStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(CountOp);
+ DEFINE_TURBOFAN_UNARY_OP_CODE_STUB_WITH_FEEDBACK(Inc, TurboFanCodeStub);
+};
+
+class DecStub final : public TurboFanCodeStub {
+ public:
+ explicit DecStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(CountOp);
+ DEFINE_TURBOFAN_UNARY_OP_CODE_STUB_WITH_FEEDBACK(Dec, TurboFanCodeStub);
+};
+
+class InstanceOfStub final : public TurboFanCodeStub {
+ public:
+ explicit InstanceOfStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ private:
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
+ DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(InstanceOf, TurboFanCodeStub);
};
class LessThanStub final : public TurboFanCodeStub {
@@ -718,7 +933,7 @@ class LessThanStub final : public TurboFanCodeStub {
explicit LessThanStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
- DEFINE_TURBOFAN_CODE_STUB(LessThan, TurboFanCodeStub);
+ DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(LessThan, TurboFanCodeStub);
};
class LessThanOrEqualStub final : public TurboFanCodeStub {
@@ -726,7 +941,7 @@ class LessThanOrEqualStub final : public TurboFanCodeStub {
explicit LessThanOrEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
- DEFINE_TURBOFAN_CODE_STUB(LessThanOrEqual, TurboFanCodeStub);
+ DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(LessThanOrEqual, TurboFanCodeStub);
};
class GreaterThanStub final : public TurboFanCodeStub {
@@ -734,7 +949,7 @@ class GreaterThanStub final : public TurboFanCodeStub {
explicit GreaterThanStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
- DEFINE_TURBOFAN_CODE_STUB(GreaterThan, TurboFanCodeStub);
+ DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(GreaterThan, TurboFanCodeStub);
};
class GreaterThanOrEqualStub final : public TurboFanCodeStub {
@@ -743,7 +958,7 @@ class GreaterThanOrEqualStub final : public TurboFanCodeStub {
: TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
- DEFINE_TURBOFAN_CODE_STUB(GreaterThanOrEqual, TurboFanCodeStub);
+ DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(GreaterThanOrEqual, TurboFanCodeStub);
};
class EqualStub final : public TurboFanCodeStub {
@@ -751,7 +966,7 @@ class EqualStub final : public TurboFanCodeStub {
explicit EqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
- DEFINE_TURBOFAN_CODE_STUB(Equal, TurboFanCodeStub);
+ DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(Equal, TurboFanCodeStub);
};
class NotEqualStub final : public TurboFanCodeStub {
@@ -759,7 +974,7 @@ class NotEqualStub final : public TurboFanCodeStub {
explicit NotEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
- DEFINE_TURBOFAN_CODE_STUB(NotEqual, TurboFanCodeStub);
+ DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(NotEqual, TurboFanCodeStub);
};
class StrictEqualStub final : public TurboFanCodeStub {
@@ -767,7 +982,7 @@ class StrictEqualStub final : public TurboFanCodeStub {
explicit StrictEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
- DEFINE_TURBOFAN_CODE_STUB(StrictEqual, TurboFanCodeStub);
+ DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(StrictEqual, TurboFanCodeStub);
};
class StrictNotEqualStub final : public TurboFanCodeStub {
@@ -775,7 +990,7 @@ class StrictNotEqualStub final : public TurboFanCodeStub {
explicit StrictNotEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
- DEFINE_TURBOFAN_CODE_STUB(StrictNotEqual, TurboFanCodeStub);
+ DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(StrictNotEqual, TurboFanCodeStub);
};
class StringEqualStub final : public TurboFanCodeStub {
@@ -829,14 +1044,6 @@ class StringGreaterThanOrEqualStub final : public TurboFanCodeStub {
DEFINE_TURBOFAN_CODE_STUB(StringGreaterThanOrEqual, TurboFanCodeStub);
};
-class ToBooleanStub final : public TurboFanCodeStub {
- public:
- explicit ToBooleanStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
- DEFINE_TURBOFAN_CODE_STUB(ToBoolean, TurboFanCodeStub);
-};
-
class ToIntegerStub final : public TurboFanCodeStub {
public:
explicit ToIntegerStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
@@ -857,11 +1064,12 @@ class StoreInterceptorStub : public TurboFanCodeStub {
public:
explicit StoreInterceptorStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
- void GenerateAssembly(compiler::CodeStubAssembler* assember) const override;
+ void GenerateAssembly(CodeStubAssembler* assember) const override;
Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+ ExtraICState GetExtraICState() const override { return Code::STORE_IC; }
- DEFINE_CALL_INTERFACE_DESCRIPTOR(Store);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
DEFINE_CODE_STUB(StoreInterceptor, TurboFanCodeStub);
};
@@ -871,11 +1079,38 @@ class LoadIndexedInterceptorStub : public TurboFanCodeStub {
: TurboFanCodeStub(isolate) {}
Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+ ExtraICState GetExtraICState() const override { return Code::KEYED_LOAD_IC; }
DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
DEFINE_TURBOFAN_CODE_STUB(LoadIndexedInterceptor, TurboFanCodeStub);
};
+// ES6 section 12.10.3 "in" operator evaluation.
+class HasPropertyStub : public TurboFanCodeStub {
+ public:
+ explicit HasPropertyStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(HasProperty);
+ DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(HasProperty, TurboFanCodeStub);
+};
+
+class ForInFilterStub : public TurboFanCodeStub {
+ public:
+ explicit ForInFilterStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(ForInFilter);
+ DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(ForInFilter, TurboFanCodeStub);
+};
+
+// ES6 [[Get]] operation.
+class GetPropertyStub : public TurboFanCodeStub {
+ public:
+ explicit GetPropertyStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(GetProperty);
+ DEFINE_TURBOFAN_CODE_STUB(GetProperty, TurboFanCodeStub);
+};
+
enum StringAddFlags {
// Omit both parameter checks.
STRING_ADD_CHECK_NONE = 0,
@@ -899,75 +1134,43 @@ class NumberToStringStub final : public HydrogenCodeStub {
public:
explicit NumberToStringStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
- // Parameters accessed via CodeStubGraphBuilder::GetParameter()
- static const int kNumber = 0;
-
DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
DEFINE_HYDROGEN_CODE_STUB(NumberToString, HydrogenCodeStub);
};
-
-class TypeofStub final : public HydrogenCodeStub {
+class TypeofStub final : public TurboFanCodeStub {
public:
- explicit TypeofStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
-
- // Parameters accessed via CodeStubGraphBuilder::GetParameter()
- static const int kObject = 0;
-
- static void GenerateAheadOfTime(Isolate* isolate);
+ explicit TypeofStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(Typeof);
- DEFINE_HYDROGEN_CODE_STUB(Typeof, HydrogenCodeStub);
+ DEFINE_TURBOFAN_UNARY_OP_CODE_STUB(Typeof, TurboFanCodeStub);
};
-
-class FastNewClosureStub : public HydrogenCodeStub {
+class FastNewClosureStub : public TurboFanCodeStub {
public:
- FastNewClosureStub(Isolate* isolate, LanguageMode language_mode,
- FunctionKind kind)
- : HydrogenCodeStub(isolate) {
- DCHECK(IsValidFunctionKind(kind));
- set_sub_minor_key(LanguageModeBits::encode(language_mode) |
- FunctionKindBits::encode(kind));
- }
-
- LanguageMode language_mode() const {
- return LanguageModeBits::decode(sub_minor_key());
- }
-
- FunctionKind kind() const {
- return FunctionKindBits::decode(sub_minor_key());
- }
+ explicit FastNewClosureStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
- private:
- STATIC_ASSERT(LANGUAGE_END == 3);
- class LanguageModeBits : public BitField<LanguageMode, 0, 2> {};
- class FunctionKindBits : public BitField<FunctionKind, 2, 8> {};
+ static compiler::Node* Generate(CodeStubAssembler* assembler,
+ compiler::Node* shared_info,
+ compiler::Node* context);
DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewClosure);
- DEFINE_HYDROGEN_CODE_STUB(FastNewClosure, HydrogenCodeStub);
+ DEFINE_TURBOFAN_CODE_STUB(FastNewClosure, TurboFanCodeStub);
};
-
-class FastNewContextStub final : public HydrogenCodeStub {
+class FastNewFunctionContextStub final : public TurboFanCodeStub {
public:
- static const int kMaximumSlots = 64;
-
- FastNewContextStub(Isolate* isolate, int slots) : HydrogenCodeStub(isolate) {
- DCHECK(slots >= 0 && slots <= kMaximumSlots);
- set_sub_minor_key(SlotsBits::encode(slots));
- }
-
- int slots() const { return SlotsBits::decode(sub_minor_key()); }
+ explicit FastNewFunctionContextStub(Isolate* isolate)
+ : TurboFanCodeStub(isolate) {}
- // Parameters accessed via CodeStubGraphBuilder::GetParameter()
- static const int kFunction = 0;
+ static compiler::Node* Generate(CodeStubAssembler* assembler,
+ compiler::Node* function,
+ compiler::Node* slots,
+ compiler::Node* context);
private:
- class SlotsBits : public BitField<int, 0, 8> {};
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewContext);
- DEFINE_HYDROGEN_CODE_STUB(FastNewContext, HydrogenCodeStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewFunctionContext);
+ DEFINE_TURBOFAN_CODE_STUB(FastNewFunctionContext, TurboFanCodeStub);
};
@@ -986,11 +1189,19 @@ class FastNewObjectStub final : public PlatformCodeStub {
// of the strict arguments object materialization code.
class FastNewRestParameterStub final : public PlatformCodeStub {
public:
- explicit FastNewRestParameterStub(Isolate* isolate)
- : PlatformCodeStub(isolate) {}
+ explicit FastNewRestParameterStub(Isolate* isolate,
+ bool skip_stub_frame = false)
+ : PlatformCodeStub(isolate) {
+ minor_key_ = SkipStubFrameBits::encode(skip_stub_frame);
+ }
DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewRestParameter);
DEFINE_PLATFORM_CODE_STUB(FastNewRestParameter, PlatformCodeStub);
+
+ int skip_stub_frame() const { return SkipStubFrameBits::decode(minor_key_); }
+
+ private:
+ class SkipStubFrameBits : public BitField<bool, 0, 1> {};
};
@@ -999,11 +1210,19 @@ class FastNewRestParameterStub final : public PlatformCodeStub {
// and easy as the current handwritten version.
class FastNewSloppyArgumentsStub final : public PlatformCodeStub {
public:
- explicit FastNewSloppyArgumentsStub(Isolate* isolate)
- : PlatformCodeStub(isolate) {}
+ explicit FastNewSloppyArgumentsStub(Isolate* isolate,
+ bool skip_stub_frame = false)
+ : PlatformCodeStub(isolate) {
+ minor_key_ = SkipStubFrameBits::encode(skip_stub_frame);
+ }
+
+ int skip_stub_frame() const { return SkipStubFrameBits::decode(minor_key_); }
DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewSloppyArguments);
DEFINE_PLATFORM_CODE_STUB(FastNewSloppyArguments, PlatformCodeStub);
+
+ private:
+ class SkipStubFrameBits : public BitField<bool, 0, 1> {};
};
@@ -1012,21 +1231,35 @@ class FastNewSloppyArgumentsStub final : public PlatformCodeStub {
// and easy as the current handwritten version.
class FastNewStrictArgumentsStub final : public PlatformCodeStub {
public:
- explicit FastNewStrictArgumentsStub(Isolate* isolate)
- : PlatformCodeStub(isolate) {}
+ explicit FastNewStrictArgumentsStub(Isolate* isolate,
+ bool skip_stub_frame = false)
+ : PlatformCodeStub(isolate) {
+ minor_key_ = SkipStubFrameBits::encode(skip_stub_frame);
+ }
DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewStrictArguments);
DEFINE_PLATFORM_CODE_STUB(FastNewStrictArguments, PlatformCodeStub);
-};
+ int skip_stub_frame() const { return SkipStubFrameBits::decode(minor_key_); }
+
+ private:
+ class SkipStubFrameBits : public BitField<bool, 0, 1> {};
+};
-class FastCloneRegExpStub final : public HydrogenCodeStub {
+class FastCloneRegExpStub final : public TurboFanCodeStub {
public:
- explicit FastCloneRegExpStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
+ explicit FastCloneRegExpStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+
+ static compiler::Node* Generate(CodeStubAssembler* assembler,
+ compiler::Node* closure,
+ compiler::Node* literal_index,
+ compiler::Node* pattern,
+ compiler::Node* flags,
+ compiler::Node* context);
private:
DEFINE_CALL_INTERFACE_DESCRIPTOR(FastCloneRegExp);
- DEFINE_HYDROGEN_CODE_STUB(FastCloneRegExp, HydrogenCodeStub);
+ DEFINE_TURBOFAN_CODE_STUB(FastCloneRegExp, TurboFanCodeStub);
};
@@ -1049,72 +1282,71 @@ class FastCloneShallowArrayStub : public HydrogenCodeStub {
DEFINE_HYDROGEN_CODE_STUB(FastCloneShallowArray, HydrogenCodeStub);
};
-
-class FastCloneShallowObjectStub : public HydrogenCodeStub {
+class FastCloneShallowObjectStub : public TurboFanCodeStub {
public:
// Maximum number of properties in copied object.
static const int kMaximumClonedProperties = 6;
FastCloneShallowObjectStub(Isolate* isolate, int length)
- : HydrogenCodeStub(isolate) {
+ : TurboFanCodeStub(isolate) {
DCHECK_GE(length, 0);
DCHECK_LE(length, kMaximumClonedProperties);
- set_sub_minor_key(LengthBits::encode(length));
+ minor_key_ = LengthBits::encode(LengthBits::encode(length));
}
- int length() const { return LengthBits::decode(sub_minor_key()); }
+ static compiler::Node* GenerateFastPath(
+ CodeStubAssembler* assembler,
+ compiler::CodeAssembler::Label* call_runtime, compiler::Node* closure,
+ compiler::Node* literals_index, compiler::Node* properties_count);
+
+ static bool IsSupported(ObjectLiteral* expr);
+ static int PropertiesCount(int literal_length);
+
+ int length() const { return LengthBits::decode(minor_key_); }
private:
class LengthBits : public BitField<int, 0, 4> {};
DEFINE_CALL_INTERFACE_DESCRIPTOR(FastCloneShallowObject);
- DEFINE_HYDROGEN_CODE_STUB(FastCloneShallowObject, HydrogenCodeStub);
+ DEFINE_TURBOFAN_CODE_STUB(FastCloneShallowObject, TurboFanCodeStub);
};
-
-class CreateAllocationSiteStub : public HydrogenCodeStub {
+class CreateAllocationSiteStub : public TurboFanCodeStub {
public:
explicit CreateAllocationSiteStub(Isolate* isolate)
- : HydrogenCodeStub(isolate) { }
-
+ : TurboFanCodeStub(isolate) {}
static void GenerateAheadOfTime(Isolate* isolate);
DEFINE_CALL_INTERFACE_DESCRIPTOR(CreateAllocationSite);
- DEFINE_HYDROGEN_CODE_STUB(CreateAllocationSite, HydrogenCodeStub);
+ DEFINE_TURBOFAN_CODE_STUB(CreateAllocationSite, TurboFanCodeStub);
};
-
-class CreateWeakCellStub : public HydrogenCodeStub {
+class CreateWeakCellStub : public TurboFanCodeStub {
public:
- explicit CreateWeakCellStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
+ explicit CreateWeakCellStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
static void GenerateAheadOfTime(Isolate* isolate);
DEFINE_CALL_INTERFACE_DESCRIPTOR(CreateWeakCell);
- DEFINE_HYDROGEN_CODE_STUB(CreateWeakCell, HydrogenCodeStub);
+ DEFINE_TURBOFAN_CODE_STUB(CreateWeakCell, TurboFanCodeStub);
};
-
-class GrowArrayElementsStub : public HydrogenCodeStub {
+class GrowArrayElementsStub : public TurboFanCodeStub {
public:
- GrowArrayElementsStub(Isolate* isolate, bool is_js_array, ElementsKind kind)
- : HydrogenCodeStub(isolate) {
- set_sub_minor_key(ElementsKindBits::encode(kind) |
- IsJsArrayBits::encode(is_js_array));
+ GrowArrayElementsStub(Isolate* isolate, ElementsKind kind)
+ : TurboFanCodeStub(isolate) {
+ minor_key_ = ElementsKindBits::encode(GetHoleyElementsKind(kind));
}
ElementsKind elements_kind() const {
- return ElementsKindBits::decode(sub_minor_key());
+ return ElementsKindBits::decode(minor_key_);
}
- bool is_js_array() const { return IsJsArrayBits::decode(sub_minor_key()); }
-
private:
class ElementsKindBits : public BitField<ElementsKind, 0, 8> {};
- class IsJsArrayBits : public BitField<bool, ElementsKindBits::kNext, 1> {};
DEFINE_CALL_INTERFACE_DESCRIPTOR(GrowArrayElements);
- DEFINE_HYDROGEN_CODE_STUB(GrowArrayElements, HydrogenCodeStub);
+ DEFINE_TURBOFAN_CODE_STUB(GrowArrayElements, TurboFanCodeStub);
};
class FastArrayPushStub : public HydrogenCodeStub {
@@ -1122,29 +1354,19 @@ class FastArrayPushStub : public HydrogenCodeStub {
explicit FastArrayPushStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
private:
- DEFINE_CALL_INTERFACE_DESCRIPTOR(FastArrayPush);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(VarArgFunction);
DEFINE_HYDROGEN_CODE_STUB(FastArrayPush, HydrogenCodeStub);
};
-class InstanceOfStub final : public PlatformCodeStub {
+class FastFunctionBindStub : public HydrogenCodeStub {
public:
- explicit InstanceOfStub(Isolate* isolate, bool es6_instanceof = false)
- : PlatformCodeStub(isolate) {
- minor_key_ = IsES6InstanceOfBits::encode(es6_instanceof);
- }
-
- bool is_es6_instanceof() const {
- return IsES6InstanceOfBits::decode(minor_key_);
- }
+ explicit FastFunctionBindStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
private:
- class IsES6InstanceOfBits : public BitField<bool, 0, 1> {};
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(InstanceOf);
- DEFINE_PLATFORM_CODE_STUB(InstanceOf, PlatformCodeStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(VarArgFunction);
+ DEFINE_HYDROGEN_CODE_STUB(FastFunctionBind, HydrogenCodeStub);
};
-
enum AllocationSiteOverrideMode {
DONT_OVERRIDE,
DISABLE_ALLOCATION_SITES,
@@ -1172,7 +1394,7 @@ class ArrayConstructorStub: public PlatformCodeStub {
class ArgumentCountBits : public BitField<ArgumentCountKey, 0, 2> {};
- DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayConstructor);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayNArgumentsConstructor);
DEFINE_PLATFORM_CODE_STUB(ArrayConstructor, PlatformCodeStub);
};
@@ -1184,14 +1406,14 @@ class InternalArrayConstructorStub: public PlatformCodeStub {
private:
void GenerateCase(MacroAssembler* masm, ElementsKind kind);
- DEFINE_CALL_INTERFACE_DESCRIPTOR(InternalArrayConstructor);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayNArgumentsConstructor);
DEFINE_PLATFORM_CODE_STUB(InternalArrayConstructor, PlatformCodeStub);
};
class MathPowStub: public PlatformCodeStub {
public:
- enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK };
+ enum ExponentType { INTEGER, DOUBLE, TAGGED };
MathPowStub(Isolate* isolate, ExponentType exponent_type)
: PlatformCodeStub(isolate) {
@@ -1203,9 +1425,11 @@ class MathPowStub: public PlatformCodeStub {
return MathPowTaggedDescriptor(isolate());
} else if (exponent_type() == INTEGER) {
return MathPowIntegerDescriptor(isolate());
+ } else {
+ // A CallInterfaceDescriptor doesn't specify double registers (yet).
+ DCHECK_EQ(DOUBLE, exponent_type());
+ return ContextOnlyDescriptor(isolate());
}
- // A CallInterfaceDescriptor doesn't specify double registers (yet).
- return ContextOnlyDescriptor(isolate());
}
private:
@@ -1228,8 +1452,6 @@ class CallICStub: public PlatformCodeStub {
Code::Kind GetCodeKind() const override { return Code::CALL_IC; }
- InlineCacheState GetICState() const override { return GENERIC; }
-
ExtraICState GetExtraICState() const final {
return static_cast<ExtraICState>(minor_key_);
}
@@ -1239,9 +1461,7 @@ class CallICStub: public PlatformCodeStub {
ConvertReceiverMode convert_mode() const { return state().convert_mode(); }
TailCallMode tail_call_mode() const { return state().tail_call_mode(); }
- CallICState state() const {
- return CallICState(static_cast<ExtraICState>(minor_key_));
- }
+ CallICState state() const { return CallICState(GetExtraICState()); }
// Code generation helpers.
void GenerateMiss(MacroAssembler* masm);
@@ -1262,6 +1482,7 @@ class FunctionPrototypeStub : public PlatformCodeStub {
: PlatformCodeStub(isolate) {}
Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+ ExtraICState GetExtraICState() const override { return Code::LOAD_IC; }
// TODO(mvstanton): only the receiver register is accessed. When this is
// translated to a hydrogen code stub, a new CallInterfaceDescriptor
@@ -1280,7 +1501,7 @@ class LoadIndexedStringStub : public PlatformCodeStub {
: PlatformCodeStub(isolate) {}
Code::Kind GetCodeKind() const override { return Code::HANDLER; }
- Code::StubType GetStubType() const override { return Code::FAST; }
+ ExtraICState GetExtraICState() const override { return Code::KEYED_LOAD_IC; }
DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
DEFINE_PLATFORM_CODE_STUB(LoadIndexedString, PlatformCodeStub);
@@ -1291,7 +1512,6 @@ class HandlerStub : public HydrogenCodeStub {
public:
Code::Kind GetCodeKind() const override { return Code::HANDLER; }
ExtraICState GetExtraICState() const override { return kind(); }
- InlineCacheState GetICState() const override { return MONOMORPHIC; }
void InitializeDescriptor(CodeStubDescriptor* descriptor) override;
@@ -1320,41 +1540,16 @@ class LoadFieldStub: public HandlerStub {
protected:
Code::Kind kind() const override { return Code::LOAD_IC; }
- Code::StubType GetStubType() const override { return Code::FAST; }
private:
class LoadFieldByIndexBits : public BitField<int, 0, 13> {};
+ // TODO(ishell): The stub uses only kReceiver parameter.
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
DEFINE_HANDLER_CODE_STUB(LoadField, HandlerStub);
};
-class ArrayBufferViewLoadFieldStub : public HandlerStub {
- public:
- ArrayBufferViewLoadFieldStub(Isolate* isolate, FieldIndex index)
- : HandlerStub(isolate) {
- int property_index_key = index.GetFieldAccessStubKey();
- set_sub_minor_key(
- ArrayBufferViewLoadFieldByIndexBits::encode(property_index_key));
- }
-
- FieldIndex index() const {
- int property_index_key =
- ArrayBufferViewLoadFieldByIndexBits::decode(sub_minor_key());
- return FieldIndex::FromFieldAccessStubKey(property_index_key);
- }
-
- protected:
- Code::Kind kind() const override { return Code::LOAD_IC; }
- Code::StubType GetStubType() const override { return Code::FAST; }
-
- private:
- class ArrayBufferViewLoadFieldByIndexBits : public BitField<int, 0, 13> {};
-
- DEFINE_HANDLER_CODE_STUB(ArrayBufferViewLoadField, HandlerStub);
-};
-
-
class KeyedLoadSloppyArgumentsStub : public HandlerStub {
public:
explicit KeyedLoadSloppyArgumentsStub(Isolate* isolate)
@@ -1362,9 +1557,8 @@ class KeyedLoadSloppyArgumentsStub : public HandlerStub {
protected:
Code::Kind kind() const override { return Code::KEYED_LOAD_IC; }
- Code::StubType GetStubType() const override { return Code::FAST; }
- private:
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
DEFINE_HANDLER_CODE_STUB(KeyedLoadSloppyArguments, HandlerStub);
};
@@ -1381,9 +1575,8 @@ class KeyedStoreSloppyArgumentsStub : public HandlerStub {
protected:
Code::Kind kind() const override { return Code::KEYED_STORE_IC; }
- Code::StubType GetStubType() const override { return Code::FAST; }
- private:
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
DEFINE_HANDLER_CODE_STUB(KeyedStoreSloppyArguments, HandlerStub);
};
@@ -1401,14 +1594,41 @@ class LoadConstantStub : public HandlerStub {
protected:
Code::Kind kind() const override { return Code::LOAD_IC; }
- Code::StubType GetStubType() const override { return Code::FAST; }
private:
class ConstantIndexBits : public BitField<int, 0, kSubMinorKeyBits> {};
+ // TODO(ishell): The stub uses only kReceiver parameter.
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
DEFINE_HANDLER_CODE_STUB(LoadConstant, HandlerStub);
};
+class LoadApiGetterStub : public TurboFanCodeStub {
+ public:
+ LoadApiGetterStub(Isolate* isolate, bool receiver_is_holder, int index)
+ : TurboFanCodeStub(isolate) {
+ // If that's not true, we need to ensure that the receiver is actually a
+ // JSReceiver. http://crbug.com/609134
+ DCHECK(receiver_is_holder);
+ minor_key_ = IndexBits::encode(index) |
+ ReceiverIsHolderBits::encode(receiver_is_holder);
+ }
+
+ Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+ ExtraICState GetExtraICState() const override { return Code::LOAD_IC; }
+
+ int index() const { return IndexBits::decode(minor_key_); }
+ bool receiver_is_holder() const {
+ return ReceiverIsHolderBits::decode(minor_key_);
+ }
+
+ private:
+ class ReceiverIsHolderBits : public BitField<bool, 0, 1> {};
+ class IndexBits : public BitField<int, 1, kDescriptorIndexBitCount> {};
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
+ DEFINE_TURBOFAN_CODE_STUB(LoadApiGetter, TurboFanCodeStub);
+};
class StoreFieldStub : public HandlerStub {
public:
@@ -1433,12 +1653,13 @@ class StoreFieldStub : public HandlerStub {
protected:
Code::Kind kind() const override { return Code::STORE_IC; }
- Code::StubType GetStubType() const override { return Code::FAST; }
private:
class StoreFieldByIndexBits : public BitField<int, 0, 13> {};
class RepresentationBits : public BitField<uint8_t, 13, 4> {};
+ // TODO(ishell): The stub uses only kReceiver and kValue parameters.
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
DEFINE_HANDLER_CODE_STUB(StoreField, HandlerStub);
};
@@ -1472,25 +1693,23 @@ class StoreTransitionHelper {
return VectorStoreTransitionDescriptor::MapRegister();
}
- static int ReceiverIndex() {
- return StoreTransitionDescriptor::kReceiverIndex;
- }
+ static int ReceiverIndex() { return StoreTransitionDescriptor::kReceiver; }
- static int NameIndex() { return StoreTransitionDescriptor::kReceiverIndex; }
+ static int NameIndex() { return StoreTransitionDescriptor::kReceiver; }
- static int ValueIndex() { return StoreTransitionDescriptor::kValueIndex; }
+ static int ValueIndex() { return StoreTransitionDescriptor::kValue; }
static int MapIndex() {
- DCHECK(static_cast<int>(VectorStoreTransitionDescriptor::kMapIndex) ==
- static_cast<int>(StoreTransitionDescriptor::kMapIndex));
- return StoreTransitionDescriptor::kMapIndex;
+ DCHECK(static_cast<int>(VectorStoreTransitionDescriptor::kMap) ==
+ static_cast<int>(StoreTransitionDescriptor::kMap));
+ return StoreTransitionDescriptor::kMap;
}
static int VectorIndex() {
if (HasVirtualSlotArg()) {
- return VectorStoreTransitionDescriptor::kVirtualSlotVectorIndex;
+ return VectorStoreTransitionDescriptor::kVirtualSlotVector;
}
- return VectorStoreTransitionDescriptor::kVectorIndex;
+ return VectorStoreTransitionDescriptor::kVector;
}
// Some platforms don't have a slot arg.
@@ -1539,17 +1758,16 @@ class StoreTransitionStub : public HandlerStub {
return StoreModeBits::decode(sub_minor_key());
}
- CallInterfaceDescriptor GetCallInterfaceDescriptor() const override;
-
protected:
Code::Kind kind() const override { return Code::STORE_IC; }
- Code::StubType GetStubType() const override { return Code::FAST; }
+ void InitializeDescriptor(CodeStubDescriptor* descriptor) override;
private:
class StoreFieldByIndexBits : public BitField<int, 0, 13> {};
class RepresentationBits : public BitField<uint8_t, 13, 4> {};
class StoreModeBits : public BitField<StoreMode, 17, 2> {};
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(VectorStoreTransition);
DEFINE_HANDLER_CODE_STUB(StoreTransition, HandlerStub);
};
@@ -1615,30 +1833,12 @@ class StoreGlobalStub : public HandlerStub {
class RepresentationBits : public BitField<Representation::Kind, 4, 8> {};
class CheckGlobalBits : public BitField<bool, 12, 1> {};
+ // TODO(ishell): The stub uses only kValue parameter.
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
DEFINE_HANDLER_CODE_STUB(StoreGlobal, HandlerStub);
};
-
-class LoadGlobalViaContextStub final : public PlatformCodeStub {
- public:
- static const int kMaximumDepth = 15;
-
- LoadGlobalViaContextStub(Isolate* isolate, int depth)
- : PlatformCodeStub(isolate) {
- minor_key_ = DepthBits::encode(depth);
- }
-
- int depth() const { return DepthBits::decode(minor_key_); }
-
- private:
- class DepthBits : public BitField<int, 0, 4> {};
- STATIC_ASSERT(DepthBits::kMax == kMaximumDepth);
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadGlobalViaContext);
- DEFINE_PLATFORM_CODE_STUB(LoadGlobalViaContext, PlatformCodeStub);
-};
-
-
+// TODO(ishell): remove, once StoreGlobalIC is implemented.
class StoreGlobalViaContextStub final : public PlatformCodeStub {
public:
static const int kMaximumDepth = 15;
@@ -1658,8 +1858,8 @@ class StoreGlobalViaContextStub final : public PlatformCodeStub {
private:
class DepthBits : public BitField<int, 0, 4> {};
STATIC_ASSERT(DepthBits::kMax == kMaximumDepth);
- class LanguageModeBits : public BitField<LanguageMode, 4, 2> {};
- STATIC_ASSERT(LANGUAGE_END == 3);
+ class LanguageModeBits : public BitField<LanguageMode, 4, 1> {};
+ STATIC_ASSERT(LANGUAGE_END == 2);
DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreGlobalViaContext);
DEFINE_PLATFORM_CODE_STUB(StoreGlobalViaContext, PlatformCodeStub);
@@ -1677,8 +1877,10 @@ class CallApiCallbackStub : public PlatformCodeStub {
call_data_undefined, is_lazy) {}
// CallApiCallbackStub for callback functions.
- CallApiCallbackStub(Isolate* isolate, int argc, bool call_data_undefined)
- : CallApiCallbackStub(isolate, argc, false, call_data_undefined, false) {}
+ CallApiCallbackStub(Isolate* isolate, int argc, bool call_data_undefined,
+ bool is_lazy)
+ : CallApiCallbackStub(isolate, argc, false, call_data_undefined,
+ is_lazy) {}
CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
return ApiCallbackDescriptorBase::ForArgs(isolate(), argc());
@@ -1737,8 +1939,6 @@ class BinaryOpICStub : public HydrogenCodeStub {
Code::Kind GetCodeKind() const override { return Code::BINARY_OP_IC; }
- InlineCacheState GetICState() const final { return state().GetICState(); }
-
ExtraICState GetExtraICState() const final {
return static_cast<ExtraICState>(sub_minor_key());
}
@@ -1749,10 +1949,6 @@ class BinaryOpICStub : public HydrogenCodeStub {
void PrintState(std::ostream& os) const final; // NOLINT
- // Parameters accessed via CodeStubGraphBuilder::GetParameter()
- static const int kLeft = 0;
- static const int kRight = 1;
-
private:
static void GenerateAheadOfTime(Isolate* isolate,
const BinaryOpICState& state);
@@ -1782,8 +1978,6 @@ class BinaryOpICWithAllocationSiteStub final : public PlatformCodeStub {
Code::Kind GetCodeKind() const override { return Code::BINARY_OP_IC; }
- InlineCacheState GetICState() const override { return state().GetICState(); }
-
ExtraICState GetExtraICState() const override {
return static_cast<ExtraICState>(minor_key_);
}
@@ -1792,7 +1986,7 @@ class BinaryOpICWithAllocationSiteStub final : public PlatformCodeStub {
private:
BinaryOpICState state() const {
- return BinaryOpICState(isolate(), static_cast<ExtraICState>(minor_key_));
+ return BinaryOpICState(isolate(), GetExtraICState());
}
static void GenerateAheadOfTime(Isolate* isolate,
@@ -1813,11 +2007,6 @@ class BinaryOpWithAllocationSiteStub final : public BinaryOpICStub {
Code::Kind GetCodeKind() const final { return Code::STUB; }
- // Parameters accessed via CodeStubGraphBuilder::GetParameter()
- static const int kAllocationSite = 0;
- static const int kLeft = 1;
- static const int kRight = 2;
-
DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOpWithAllocationSite);
DEFINE_HYDROGEN_CODE_STUB(BinaryOpWithAllocationSite, BinaryOpICStub);
};
@@ -1840,10 +2029,6 @@ class StringAddStub final : public HydrogenCodeStub {
return PretenureFlagBits::decode(sub_minor_key());
}
- // Parameters accessed via CodeStubGraphBuilder::GetParameter()
- static const int kLeft = 0;
- static const int kRight = 1;
-
private:
class StringAddFlagsBits : public BitField<StringAddFlags, 0, 3> {};
class PretenureFlagBits : public BitField<PretenureFlag, 3, 1> {};
@@ -1861,14 +2046,29 @@ class CompareICStub : public PlatformCodeStub {
CompareICState::State right, CompareICState::State state)
: PlatformCodeStub(isolate) {
DCHECK(Token::IsCompareOp(op));
+ DCHECK(OpBits::is_valid(op - Token::EQ));
minor_key_ = OpBits::encode(op - Token::EQ) |
LeftStateBits::encode(left) | RightStateBits::encode(right) |
StateBits::encode(state);
}
+ // Creates uninitialized compare stub.
+ CompareICStub(Isolate* isolate, Token::Value op)
+ : CompareICStub(isolate, op, CompareICState::UNINITIALIZED,
+ CompareICState::UNINITIALIZED,
+ CompareICState::UNINITIALIZED) {}
+
+ CompareICStub(Isolate* isolate, ExtraICState extra_ic_state)
+ : PlatformCodeStub(isolate) {
+ minor_key_ = extra_ic_state;
+ }
+
+ ExtraICState GetExtraICState() const final {
+ return static_cast<ExtraICState>(minor_key_);
+ }
void set_known_map(Handle<Map> map) { known_map_ = map; }
- InlineCacheState GetICState() const override;
+ InlineCacheState GetICState() const;
Token::Value op() const {
return static_cast<Token::Value>(Token::EQ + OpBits::decode(minor_key_));
@@ -1899,8 +2099,9 @@ class CompareICStub : public PlatformCodeStub {
bool strict() const { return op() == Token::EQ_STRICT; }
Condition GetCondition() const;
- void AddToSpecialCache(Handle<Code> new_object) override;
- bool FindCodeInSpecialCache(Code** code_out) override;
+ // Although we don't cache anything in the special cache we have to define
+ // this predicate to avoid appearance of code stubs with embedded maps in
+ // the global stub cache.
bool UseSpecialCache() override {
return state() == CompareICState::KNOWN_RECEIVER;
}
@@ -1921,9 +2122,10 @@ class CEntryStub : public PlatformCodeStub {
public:
CEntryStub(Isolate* isolate, int result_size,
SaveFPRegsMode save_doubles = kDontSaveFPRegs,
- ArgvMode argv_mode = kArgvOnStack)
+ ArgvMode argv_mode = kArgvOnStack, bool builtin_exit_frame = false)
: PlatformCodeStub(isolate) {
minor_key_ = SaveDoublesBits::encode(save_doubles == kSaveFPRegs) |
+ FrameTypeBits::encode(builtin_exit_frame) |
ArgvMode::encode(argv_mode == kArgvInRegister);
DCHECK(result_size == 1 || result_size == 2 || result_size == 3);
minor_key_ = ResultSizeBits::update(minor_key_, result_size);
@@ -1938,13 +2140,15 @@ class CEntryStub : public PlatformCodeStub {
private:
bool save_doubles() const { return SaveDoublesBits::decode(minor_key_); }
bool argv_in_register() const { return ArgvMode::decode(minor_key_); }
+ bool is_builtin_exit() const { return FrameTypeBits::decode(minor_key_); }
int result_size() const { return ResultSizeBits::decode(minor_key_); }
bool NeedsImmovableCode() override;
class SaveDoublesBits : public BitField<bool, 0, 1> {};
class ArgvMode : public BitField<bool, 1, 1> {};
- class ResultSizeBits : public BitField<int, 2, 3> {};
+ class FrameTypeBits : public BitField<bool, 2, 1> {};
+ class ResultSizeBits : public BitField<int, 3, 3> {};
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(CEntry, PlatformCodeStub);
@@ -1984,7 +2188,7 @@ class RegExpExecStub: public PlatformCodeStub {
public:
explicit RegExpExecStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
- DEFINE_CALL_INTERFACE_DESCRIPTOR(ContextOnly);
+ DEFINE_ON_STACK_CALL_INTERFACE_DESCRIPTOR(4);
DEFINE_PLATFORM_CODE_STUB(RegExpExec, PlatformCodeStub);
};
@@ -1994,11 +2198,6 @@ class RegExpConstructResultStub final : public HydrogenCodeStub {
explicit RegExpConstructResultStub(Isolate* isolate)
: HydrogenCodeStub(isolate) { }
- // Parameters accessed via CodeStubGraphBuilder::GetParameter()
- static const int kLength = 0;
- static const int kIndex = 1;
- static const int kInput = 2;
-
DEFINE_CALL_INTERFACE_DESCRIPTOR(RegExpConstructResult);
DEFINE_HYDROGEN_CODE_STUB(RegExpConstructResult, HydrogenCodeStub);
};
@@ -2014,17 +2213,6 @@ class CallConstructStub final : public PlatformCodeStub {
};
-enum StringIndexFlags {
- // Accepts smis or heap numbers.
- STRING_INDEX_IS_NUMBER,
-
- // Accepts smis or heap numbers that are valid array indices
- // (ECMA-262 15.4). Invalid indices are reported as being out of
- // range.
- STRING_INDEX_IS_ARRAY_INDEX
-};
-
-
enum ReceiverCheckMode {
// We don't know anything about the receiver.
RECEIVER_IS_UNKNOWN,
@@ -2058,7 +2246,6 @@ class StringCharCodeAtGenerator {
StringCharCodeAtGenerator(Register object, Register index, Register result,
Label* receiver_not_string, Label* index_not_number,
Label* index_out_of_range,
- StringIndexFlags index_flags,
ReceiverCheckMode check_mode = RECEIVER_IS_UNKNOWN)
: object_(object),
index_(index),
@@ -2066,7 +2253,6 @@ class StringCharCodeAtGenerator {
receiver_not_string_(receiver_not_string),
index_not_number_(index_not_number),
index_out_of_range_(index_out_of_range),
- index_flags_(index_flags),
check_mode_(check_mode) {
DCHECK(!result_.is(object_));
DCHECK(!result_.is(index_));
@@ -2098,7 +2284,6 @@ class StringCharCodeAtGenerator {
Label* index_not_number_;
Label* index_out_of_range_;
- StringIndexFlags index_flags_;
ReceiverCheckMode check_mode_;
Label call_runtime_;
@@ -2162,11 +2347,10 @@ class StringCharAtGenerator {
StringCharAtGenerator(Register object, Register index, Register scratch,
Register result, Label* receiver_not_string,
Label* index_not_number, Label* index_out_of_range,
- StringIndexFlags index_flags,
ReceiverCheckMode check_mode = RECEIVER_IS_UNKNOWN)
: char_code_at_generator_(object, index, scratch, receiver_not_string,
index_not_number, index_out_of_range,
- index_flags, check_mode),
+ check_mode),
char_from_code_generator_(scratch, result) {}
// Generates the fast case code. On the fallthrough path |result|
@@ -2201,82 +2385,99 @@ class StringCharAtGenerator {
class LoadDictionaryElementStub : public HydrogenCodeStub {
public:
- explicit LoadDictionaryElementStub(Isolate* isolate, const LoadICState& state)
- : HydrogenCodeStub(isolate) {
- minor_key_ = state.GetExtraICState();
- }
-
- CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
- return LoadWithVectorDescriptor(isolate());
- }
+ explicit LoadDictionaryElementStub(Isolate* isolate)
+ : HydrogenCodeStub(isolate) {}
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
DEFINE_HYDROGEN_CODE_STUB(LoadDictionaryElement, HydrogenCodeStub);
};
class KeyedLoadGenericStub : public HydrogenCodeStub {
public:
- explicit KeyedLoadGenericStub(Isolate* isolate, const LoadICState& state)
- : HydrogenCodeStub(isolate) {
- minor_key_ = state.GetExtraICState();
- }
+ explicit KeyedLoadGenericStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
- InlineCacheState GetICState() const override { return GENERIC; }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
DEFINE_HYDROGEN_CODE_STUB(KeyedLoadGeneric, HydrogenCodeStub);
};
class LoadICTrampolineStub : public PlatformCodeStub {
public:
- LoadICTrampolineStub(Isolate* isolate, const LoadICState& state)
- : PlatformCodeStub(isolate) {
+ explicit LoadICTrampolineStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+
+ Code::Kind GetCodeKind() const override { return Code::LOAD_IC; }
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
+ DEFINE_PLATFORM_CODE_STUB(LoadICTrampoline, PlatformCodeStub);
+};
+
+class LoadICTrampolineTFStub : public TurboFanCodeStub {
+ public:
+ explicit LoadICTrampolineTFStub(Isolate* isolate)
+ : TurboFanCodeStub(isolate) {}
+
+ void GenerateAssembly(CodeStubAssembler* assembler) const override;
+
+ Code::Kind GetCodeKind() const override { return Code::LOAD_IC; }
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
+ DEFINE_CODE_STUB(LoadICTrampolineTF, TurboFanCodeStub);
+};
+
+class LoadGlobalICTrampolineStub : public TurboFanCodeStub {
+ public:
+ explicit LoadGlobalICTrampolineStub(Isolate* isolate,
+ const LoadGlobalICState& state)
+ : TurboFanCodeStub(isolate) {
minor_key_ = state.GetExtraICState();
}
- Code::Kind GetCodeKind() const override { return Code::LOAD_IC; }
+ void GenerateAssembly(CodeStubAssembler* assembler) const override;
- InlineCacheState GetICState() const final { return GENERIC; }
+ Code::Kind GetCodeKind() const override { return Code::LOAD_GLOBAL_IC; }
ExtraICState GetExtraICState() const final {
return static_cast<ExtraICState>(minor_key_);
}
- protected:
- LoadICState state() const {
- return LoadICState(static_cast<ExtraICState>(minor_key_));
- }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
- DEFINE_PLATFORM_CODE_STUB(LoadICTrampoline, PlatformCodeStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadGlobal);
+ DEFINE_CODE_STUB(LoadGlobalICTrampoline, TurboFanCodeStub);
};
-
class KeyedLoadICTrampolineStub : public LoadICTrampolineStub {
public:
- explicit KeyedLoadICTrampolineStub(Isolate* isolate, const LoadICState& state)
- : LoadICTrampolineStub(isolate, state) {}
+ explicit KeyedLoadICTrampolineStub(Isolate* isolate)
+ : LoadICTrampolineStub(isolate) {}
Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
DEFINE_PLATFORM_CODE_STUB(KeyedLoadICTrampoline, LoadICTrampolineStub);
};
+class KeyedLoadICTrampolineTFStub : public LoadICTrampolineTFStub {
+ public:
+ explicit KeyedLoadICTrampolineTFStub(Isolate* isolate)
+ : LoadICTrampolineTFStub(isolate) {}
+
+ void GenerateAssembly(CodeStubAssembler* assembler) const override;
-class VectorStoreICTrampolineStub : public PlatformCodeStub {
+ Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
+
+ DEFINE_CODE_STUB(KeyedLoadICTrampolineTF, LoadICTrampolineTFStub);
+};
+
+class StoreICTrampolineStub : public PlatformCodeStub {
public:
- VectorStoreICTrampolineStub(Isolate* isolate, const StoreICState& state)
+ StoreICTrampolineStub(Isolate* isolate, const StoreICState& state)
: PlatformCodeStub(isolate) {
minor_key_ = state.GetExtraICState();
}
Code::Kind GetCodeKind() const override { return Code::STORE_IC; }
- InlineCacheState GetICState() const final { return GENERIC; }
-
ExtraICState GetExtraICState() const final {
return static_cast<ExtraICState>(minor_key_);
}
@@ -2287,20 +2488,18 @@ class VectorStoreICTrampolineStub : public PlatformCodeStub {
}
private:
- DEFINE_CALL_INTERFACE_DESCRIPTOR(VectorStoreICTrampoline);
- DEFINE_PLATFORM_CODE_STUB(VectorStoreICTrampoline, PlatformCodeStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Store);
+ DEFINE_PLATFORM_CODE_STUB(StoreICTrampoline, PlatformCodeStub);
};
-
-class VectorKeyedStoreICTrampolineStub : public VectorStoreICTrampolineStub {
+class KeyedStoreICTrampolineStub : public StoreICTrampolineStub {
public:
- VectorKeyedStoreICTrampolineStub(Isolate* isolate, const StoreICState& state)
- : VectorStoreICTrampolineStub(isolate, state) {}
+ KeyedStoreICTrampolineStub(Isolate* isolate, const StoreICState& state)
+ : StoreICTrampolineStub(isolate, state) {}
Code::Kind GetCodeKind() const override { return Code::KEYED_STORE_IC; }
- DEFINE_PLATFORM_CODE_STUB(VectorKeyedStoreICTrampoline,
- VectorStoreICTrampolineStub);
+ DEFINE_PLATFORM_CODE_STUB(KeyedStoreICTrampoline, StoreICTrampolineStub);
};
@@ -2313,8 +2512,6 @@ class CallICTrampolineStub : public PlatformCodeStub {
Code::Kind GetCodeKind() const override { return Code::CALL_IC; }
- InlineCacheState GetICState() const final { return GENERIC; }
-
ExtraICState GetExtraICState() const final {
return static_cast<ExtraICState>(minor_key_);
}
@@ -2331,18 +2528,11 @@ class CallICTrampolineStub : public PlatformCodeStub {
class LoadICStub : public PlatformCodeStub {
public:
- explicit LoadICStub(Isolate* isolate, const LoadICState& state)
- : PlatformCodeStub(isolate) {
- minor_key_ = state.GetExtraICState();
- }
+ explicit LoadICStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
void GenerateForTrampoline(MacroAssembler* masm);
Code::Kind GetCodeKind() const override { return Code::LOAD_IC; }
- InlineCacheState GetICState() const final { return GENERIC; }
- ExtraICState GetExtraICState() const final {
- return static_cast<ExtraICState>(minor_key_);
- }
DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
DEFINE_PLATFORM_CODE_STUB(LoadIC, PlatformCodeStub);
@@ -2351,22 +2541,45 @@ class LoadICStub : public PlatformCodeStub {
void GenerateImpl(MacroAssembler* masm, bool in_frame);
};
+class LoadICTFStub : public TurboFanCodeStub {
+ public:
+ explicit LoadICTFStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-class KeyedLoadICStub : public PlatformCodeStub {
+ void GenerateAssembly(CodeStubAssembler* assembler) const override;
+
+ Code::Kind GetCodeKind() const override { return Code::LOAD_IC; }
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
+ DEFINE_CODE_STUB(LoadICTF, TurboFanCodeStub);
+};
+
+class LoadGlobalICStub : public TurboFanCodeStub {
public:
- explicit KeyedLoadICStub(Isolate* isolate, const LoadICState& state)
- : PlatformCodeStub(isolate) {
+ explicit LoadGlobalICStub(Isolate* isolate, const LoadGlobalICState& state)
+ : TurboFanCodeStub(isolate) {
minor_key_ = state.GetExtraICState();
}
- void GenerateForTrampoline(MacroAssembler* masm);
+ void GenerateAssembly(CodeStubAssembler* assembler) const override;
+
+ Code::Kind GetCodeKind() const override { return Code::LOAD_GLOBAL_IC; }
- Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
- InlineCacheState GetICState() const final { return GENERIC; }
ExtraICState GetExtraICState() const final {
return static_cast<ExtraICState>(minor_key_);
}
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadGlobalWithVector);
+ DEFINE_CODE_STUB(LoadGlobalIC, TurboFanCodeStub);
+};
+
+class KeyedLoadICStub : public PlatformCodeStub {
+ public:
+ explicit KeyedLoadICStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+
+ void GenerateForTrampoline(MacroAssembler* masm);
+
+ Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
+
DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
DEFINE_PLATFORM_CODE_STUB(KeyedLoadIC, PlatformCodeStub);
@@ -2374,10 +2587,20 @@ class KeyedLoadICStub : public PlatformCodeStub {
void GenerateImpl(MacroAssembler* masm, bool in_frame);
};
+class KeyedLoadICTFStub : public LoadICTFStub {
+ public:
+ explicit KeyedLoadICTFStub(Isolate* isolate) : LoadICTFStub(isolate) {}
-class VectorStoreICStub : public PlatformCodeStub {
+ void GenerateAssembly(CodeStubAssembler* assembler) const override;
+
+ Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
+
+ DEFINE_CODE_STUB(KeyedLoadICTF, LoadICTFStub);
+};
+
+class StoreICStub : public PlatformCodeStub {
public:
- VectorStoreICStub(Isolate* isolate, const StoreICState& state)
+ StoreICStub(Isolate* isolate, const StoreICState& state)
: PlatformCodeStub(isolate) {
minor_key_ = state.GetExtraICState();
}
@@ -2385,22 +2608,21 @@ class VectorStoreICStub : public PlatformCodeStub {
void GenerateForTrampoline(MacroAssembler* masm);
Code::Kind GetCodeKind() const final { return Code::STORE_IC; }
- InlineCacheState GetICState() const final { return GENERIC; }
+
ExtraICState GetExtraICState() const final {
return static_cast<ExtraICState>(minor_key_);
}
- DEFINE_CALL_INTERFACE_DESCRIPTOR(VectorStoreIC);
- DEFINE_PLATFORM_CODE_STUB(VectorStoreIC, PlatformCodeStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
+ DEFINE_PLATFORM_CODE_STUB(StoreIC, PlatformCodeStub);
protected:
void GenerateImpl(MacroAssembler* masm, bool in_frame);
};
-
-class VectorKeyedStoreICStub : public PlatformCodeStub {
+class KeyedStoreICStub : public PlatformCodeStub {
public:
- VectorKeyedStoreICStub(Isolate* isolate, const StoreICState& state)
+ KeyedStoreICStub(Isolate* isolate, const StoreICState& state)
: PlatformCodeStub(isolate) {
minor_key_ = state.GetExtraICState();
}
@@ -2408,13 +2630,13 @@ class VectorKeyedStoreICStub : public PlatformCodeStub {
void GenerateForTrampoline(MacroAssembler* masm);
Code::Kind GetCodeKind() const final { return Code::KEYED_STORE_IC; }
- InlineCacheState GetICState() const final { return GENERIC; }
+
ExtraICState GetExtraICState() const final {
return static_cast<ExtraICState>(minor_key_);
}
- DEFINE_CALL_INTERFACE_DESCRIPTOR(VectorStoreIC);
- DEFINE_PLATFORM_CODE_STUB(VectorKeyedStoreIC, PlatformCodeStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
+ DEFINE_PLATFORM_CODE_STUB(KeyedStoreIC, PlatformCodeStub);
protected:
void GenerateImpl(MacroAssembler* masm, bool in_frame);
@@ -2492,13 +2714,11 @@ class ScriptContextFieldStub : public HandlerStub {
private:
static const int kContextIndexBits = 9;
- static const int kSlotIndexBits = 13;
+ static const int kSlotIndexBits = 12;
class ContextIndexBits : public BitField<int, 0, kContextIndexBits> {};
class SlotIndexBits
: public BitField<int, kContextIndexBits, kSlotIndexBits> {};
- Code::StubType GetStubType() const override { return Code::FAST; }
-
DEFINE_CODE_STUB_BASE(ScriptContextFieldStub, HandlerStub);
};
@@ -2512,6 +2732,7 @@ class LoadScriptContextFieldStub : public ScriptContextFieldStub {
private:
Code::Kind kind() const override { return Code::LOAD_IC; }
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
DEFINE_HANDLER_CODE_STUB(LoadScriptContextField, ScriptContextFieldStub);
};
@@ -2525,6 +2746,7 @@ class StoreScriptContextFieldStub : public ScriptContextFieldStub {
private:
Code::Kind kind() const override { return Code::STORE_IC; }
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
DEFINE_HANDLER_CODE_STUB(StoreScriptContextField, ScriptContextFieldStub);
};
@@ -2557,6 +2779,7 @@ class LoadFastElementStub : public HandlerStub {
class IsJSArrayBits: public BitField<bool, 8, 1> {};
class CanConvertHoleToUndefined : public BitField<bool, 9, 1> {};
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
DEFINE_HANDLER_CODE_STUB(LoadFastElement, HandlerStub);
};
@@ -2583,29 +2806,25 @@ class StoreFastElementStub : public HydrogenCodeStub {
return CommonStoreModeBits::decode(sub_minor_key());
}
- CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
- return VectorStoreICDescriptor(isolate());
- }
-
Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+ ExtraICState GetExtraICState() const override { return Code::KEYED_STORE_IC; }
private:
class ElementsKindBits : public BitField<ElementsKind, 3, 8> {};
class IsJSArrayBits : public BitField<bool, 11, 1> {};
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
DEFINE_HYDROGEN_CODE_STUB(StoreFastElement, HydrogenCodeStub);
};
class TransitionElementsKindStub : public HydrogenCodeStub {
public:
- TransitionElementsKindStub(Isolate* isolate,
- ElementsKind from_kind,
- ElementsKind to_kind,
- bool is_js_array) : HydrogenCodeStub(isolate) {
+ TransitionElementsKindStub(Isolate* isolate, ElementsKind from_kind,
+ ElementsKind to_kind)
+ : HydrogenCodeStub(isolate) {
set_sub_minor_key(FromKindBits::encode(from_kind) |
- ToKindBits::encode(to_kind) |
- IsJSArrayBits::encode(is_js_array));
+ ToKindBits::encode(to_kind));
}
ElementsKind from_kind() const {
@@ -2614,12 +2833,9 @@ class TransitionElementsKindStub : public HydrogenCodeStub {
ElementsKind to_kind() const { return ToKindBits::decode(sub_minor_key()); }
- bool is_js_array() const { return IsJSArrayBits::decode(sub_minor_key()); }
-
private:
class FromKindBits: public BitField<ElementsKind, 8, 8> {};
class ToKindBits: public BitField<ElementsKind, 0, 8> {};
- class IsJSArrayBits: public BitField<bool, 16, 1> {};
DEFINE_CALL_INTERFACE_DESCRIPTOR(TransitionElementsKind);
DEFINE_HYDROGEN_CODE_STUB(TransitionElementsKind, HydrogenCodeStub);
@@ -2631,24 +2847,12 @@ class AllocateHeapNumberStub : public TurboFanCodeStub {
: TurboFanCodeStub(isolate) {}
void InitializeDescriptor(CodeStubDescriptor* descriptor) override;
- void GenerateAssembly(compiler::CodeStubAssembler* assembler) const override;
+ void GenerateAssembly(CodeStubAssembler* assembler) const override;
DEFINE_CALL_INTERFACE_DESCRIPTOR(AllocateHeapNumber);
DEFINE_CODE_STUB(AllocateHeapNumber, TurboFanCodeStub);
};
-class AllocateMutableHeapNumberStub : public TurboFanCodeStub {
- public:
- explicit AllocateMutableHeapNumberStub(Isolate* isolate)
- : TurboFanCodeStub(isolate) {}
-
- void InitializeDescriptor(CodeStubDescriptor* descriptor) override;
- void GenerateAssembly(compiler::CodeStubAssembler* assembler) const override;
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(AllocateMutableHeapNumber);
- DEFINE_CODE_STUB(AllocateMutableHeapNumber, TurboFanCodeStub);
-};
-
#define SIMD128_ALLOC_STUB(TYPE, Type, type, lane_count, lane_type) \
class Allocate##Type##Stub : public TurboFanCodeStub { \
public: \
@@ -2656,8 +2860,7 @@ class AllocateMutableHeapNumberStub : public TurboFanCodeStub {
: TurboFanCodeStub(isolate) {} \
\
void InitializeDescriptor(CodeStubDescriptor* descriptor) override; \
- void GenerateAssembly( \
- compiler::CodeStubAssembler* assembler) const override; \
+ void GenerateAssembly(CodeStubAssembler* assembler) const override; \
\
DEFINE_CALL_INTERFACE_DESCRIPTOR(Allocate##Type); \
DEFINE_CODE_STUB(Allocate##Type, TurboFanCodeStub); \
@@ -2665,23 +2868,11 @@ class AllocateMutableHeapNumberStub : public TurboFanCodeStub {
SIMD128_TYPES(SIMD128_ALLOC_STUB)
#undef SIMD128_ALLOC_STUB
-class AllocateInNewSpaceStub final : public HydrogenCodeStub {
- public:
- explicit AllocateInNewSpaceStub(Isolate* isolate)
- : HydrogenCodeStub(isolate) {}
-
- private:
- DEFINE_CALL_INTERFACE_DESCRIPTOR(AllocateInNewSpace);
- DEFINE_HYDROGEN_CODE_STUB(AllocateInNewSpace, HydrogenCodeStub);
-};
-
-
-class ArrayConstructorStubBase : public HydrogenCodeStub {
- public:
- ArrayConstructorStubBase(Isolate* isolate,
- ElementsKind kind,
- AllocationSiteOverrideMode override_mode)
- : HydrogenCodeStub(isolate) {
+class CommonArrayConstructorStub : public TurboFanCodeStub {
+ protected:
+ CommonArrayConstructorStub(Isolate* isolate, ElementsKind kind,
+ AllocationSiteOverrideMode override_mode)
+ : TurboFanCodeStub(isolate) {
// It only makes sense to override local allocation site behavior
// if there is a difference between the global allocation site policy
// for an ElementsKind and the desired usage of the stub.
@@ -2691,6 +2882,14 @@ class ArrayConstructorStubBase : public HydrogenCodeStub {
AllocationSiteOverrideModeBits::encode(override_mode));
}
+ void set_sub_minor_key(uint32_t key) { minor_key_ = key; }
+
+ uint32_t sub_minor_key() const { return minor_key_; }
+
+ CommonArrayConstructorStub(uint32_t key, Isolate* isolate)
+ : TurboFanCodeStub(key, isolate) {}
+
+ public:
ElementsKind elements_kind() const {
return ElementsKindBits::decode(sub_minor_key());
}
@@ -2701,147 +2900,95 @@ class ArrayConstructorStubBase : public HydrogenCodeStub {
static void GenerateStubsAheadOfTime(Isolate* isolate);
- // Parameters accessed via CodeStubGraphBuilder::GetParameter()
- static const int kConstructor = 0;
- static const int kAllocationSite = 1;
-
- protected:
- std::ostream& BasePrintName(std::ostream& os,
- const char* name) const; // NOLINT
-
private:
// Ensure data fits within available bits.
STATIC_ASSERT(LAST_ALLOCATION_SITE_OVERRIDE_MODE == 1);
- class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
- class AllocationSiteOverrideModeBits: public
- BitField<AllocationSiteOverrideMode, 8, 1> {}; // NOLINT
-
- DEFINE_CODE_STUB_BASE(ArrayConstructorStubBase, HydrogenCodeStub);
+ class ElementsKindBits : public BitField<ElementsKind, 0, 8> {};
+ class AllocationSiteOverrideModeBits
+ : public BitField<AllocationSiteOverrideMode, 8, 1> {}; // NOLINT
};
-
-class ArrayNoArgumentConstructorStub : public ArrayConstructorStubBase {
+class ArrayNoArgumentConstructorStub : public CommonArrayConstructorStub {
public:
ArrayNoArgumentConstructorStub(
- Isolate* isolate,
- ElementsKind kind,
+ Isolate* isolate, ElementsKind kind,
AllocationSiteOverrideMode override_mode = DONT_OVERRIDE)
- : ArrayConstructorStubBase(isolate, kind, override_mode) {
- }
+ : CommonArrayConstructorStub(isolate, kind, override_mode) {}
private:
void PrintName(std::ostream& os) const override { // NOLINT
- BasePrintName(os, "ArrayNoArgumentConstructorStub");
+ os << "ArrayNoArgumentConstructorStub";
}
- DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayConstructorConstantArgCount);
- DEFINE_HYDROGEN_CODE_STUB(ArrayNoArgumentConstructor,
- ArrayConstructorStubBase);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayNoArgumentConstructor);
+ DEFINE_TURBOFAN_CODE_STUB(ArrayNoArgumentConstructor,
+ CommonArrayConstructorStub);
};
-
-class ArraySingleArgumentConstructorStub : public ArrayConstructorStubBase {
+class InternalArrayNoArgumentConstructorStub
+ : public CommonArrayConstructorStub {
public:
- ArraySingleArgumentConstructorStub(
- Isolate* isolate,
- ElementsKind kind,
- AllocationSiteOverrideMode override_mode = DONT_OVERRIDE)
- : ArrayConstructorStubBase(isolate, kind, override_mode) {
- }
+ InternalArrayNoArgumentConstructorStub(Isolate* isolate, ElementsKind kind)
+ : CommonArrayConstructorStub(isolate, kind, DONT_OVERRIDE) {}
private:
void PrintName(std::ostream& os) const override { // NOLINT
- BasePrintName(os, "ArraySingleArgumentConstructorStub");
+ os << "InternalArrayNoArgumentConstructorStub";
}
- DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayConstructor);
- DEFINE_HYDROGEN_CODE_STUB(ArraySingleArgumentConstructor,
- ArrayConstructorStubBase);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayNoArgumentConstructor);
+ DEFINE_TURBOFAN_CODE_STUB(InternalArrayNoArgumentConstructor,
+ CommonArrayConstructorStub);
};
-
-class ArrayNArgumentsConstructorStub : public ArrayConstructorStubBase {
+class ArraySingleArgumentConstructorStub : public CommonArrayConstructorStub {
public:
- ArrayNArgumentsConstructorStub(
- Isolate* isolate,
- ElementsKind kind,
+ ArraySingleArgumentConstructorStub(
+ Isolate* isolate, ElementsKind kind,
AllocationSiteOverrideMode override_mode = DONT_OVERRIDE)
- : ArrayConstructorStubBase(isolate, kind, override_mode) {
- }
+ : CommonArrayConstructorStub(isolate, kind, override_mode) {}
private:
void PrintName(std::ostream& os) const override { // NOLINT
- BasePrintName(os, "ArrayNArgumentsConstructorStub");
+ os << "ArraySingleArgumentConstructorStub";
}
- DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayConstructor);
- DEFINE_HYDROGEN_CODE_STUB(ArrayNArgumentsConstructor,
- ArrayConstructorStubBase);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(ArraySingleArgumentConstructor);
+ DEFINE_TURBOFAN_CODE_STUB(ArraySingleArgumentConstructor,
+ CommonArrayConstructorStub);
};
-
-class InternalArrayConstructorStubBase : public HydrogenCodeStub {
+class InternalArraySingleArgumentConstructorStub
+ : public CommonArrayConstructorStub {
public:
- InternalArrayConstructorStubBase(Isolate* isolate, ElementsKind kind)
- : HydrogenCodeStub(isolate) {
- set_sub_minor_key(ElementsKindBits::encode(kind));
- }
-
- static void GenerateStubsAheadOfTime(Isolate* isolate);
-
- // Parameters accessed via CodeStubGraphBuilder::GetParameter()
- static const int kConstructor = 0;
-
- ElementsKind elements_kind() const {
- return ElementsKindBits::decode(sub_minor_key());
- }
+ InternalArraySingleArgumentConstructorStub(Isolate* isolate,
+ ElementsKind kind)
+ : CommonArrayConstructorStub(isolate, kind, DONT_OVERRIDE) {}
private:
- class ElementsKindBits : public BitField<ElementsKind, 0, 8> {};
-
- DEFINE_CODE_STUB_BASE(InternalArrayConstructorStubBase, HydrogenCodeStub);
-};
-
-
-class InternalArrayNoArgumentConstructorStub : public
- InternalArrayConstructorStubBase {
- public:
- InternalArrayNoArgumentConstructorStub(Isolate* isolate,
- ElementsKind kind)
- : InternalArrayConstructorStubBase(isolate, kind) { }
+ void PrintName(std::ostream& os) const override { // NOLINT
+ os << "InternalArraySingleArgumentConstructorStub";
+ }
- DEFINE_CALL_INTERFACE_DESCRIPTOR(InternalArrayConstructorConstantArgCount);
- DEFINE_HYDROGEN_CODE_STUB(InternalArrayNoArgumentConstructor,
- InternalArrayConstructorStubBase);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(ArraySingleArgumentConstructor);
+ DEFINE_TURBOFAN_CODE_STUB(InternalArraySingleArgumentConstructor,
+ CommonArrayConstructorStub);
};
-
-class InternalArraySingleArgumentConstructorStub : public
- InternalArrayConstructorStubBase {
+class ArrayNArgumentsConstructorStub : public PlatformCodeStub {
public:
- InternalArraySingleArgumentConstructorStub(Isolate* isolate,
- ElementsKind kind)
- : InternalArrayConstructorStubBase(isolate, kind) { }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(InternalArrayConstructor);
- DEFINE_HYDROGEN_CODE_STUB(InternalArraySingleArgumentConstructor,
- InternalArrayConstructorStubBase);
-};
-
+ explicit ArrayNArgumentsConstructorStub(Isolate* isolate)
+ : PlatformCodeStub(isolate) {}
-class InternalArrayNArgumentsConstructorStub : public
- InternalArrayConstructorStubBase {
- public:
- InternalArrayNArgumentsConstructorStub(Isolate* isolate, ElementsKind kind)
- : InternalArrayConstructorStubBase(isolate, kind) { }
+ CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
+ return ArrayNArgumentsConstructorDescriptor(isolate());
+ }
- DEFINE_CALL_INTERFACE_DESCRIPTOR(InternalArrayConstructor);
- DEFINE_HYDROGEN_CODE_STUB(InternalArrayNArgumentsConstructor,
- InternalArrayConstructorStubBase);
+ private:
+ DEFINE_PLATFORM_CODE_STUB(ArrayNArgumentsConstructor, PlatformCodeStub);
};
-
class StoreElementStub : public PlatformCodeStub {
public:
StoreElementStub(Isolate* isolate, ElementsKind elements_kind,
@@ -2855,10 +3002,11 @@ class StoreElementStub : public PlatformCodeStub {
}
CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
- return VectorStoreICDescriptor(isolate());
+ return StoreWithVectorDescriptor(isolate());
}
Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+ ExtraICState GetExtraICState() const override { return Code::KEYED_STORE_IC; }
private:
ElementsKind elements_kind() const {
@@ -2894,7 +3042,7 @@ class ToBooleanICStub : public HydrogenCodeStub {
Types() : EnumSet<Type, uint16_t>(0) {}
explicit Types(uint16_t bits) : EnumSet<Type, uint16_t>(bits) {}
- bool UpdateStatus(Handle<Object> object);
+ bool UpdateStatus(Isolate* isolate, Handle<Object> object);
bool NeedsMap() const;
bool CanBeUndetectable() const {
return Contains(ToBooleanICStub::SPEC_OBJECT);
@@ -2923,7 +3071,7 @@ class ToBooleanICStub : public HydrogenCodeStub {
ExtraICState GetExtraICState() const override { return types().ToIntegral(); }
- InlineCacheState GetICState() const override {
+ InlineCacheState GetICState() const {
if (types().IsEmpty()) {
return ::v8::internal::UNINITIALIZED;
} else {
@@ -2961,14 +3109,15 @@ class ElementsTransitionAndStoreStub : public HydrogenCodeStub {
return CommonStoreModeBits::decode(sub_minor_key());
}
- CallInterfaceDescriptor GetCallInterfaceDescriptor() const override;
Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+ ExtraICState GetExtraICState() const override { return Code::KEYED_STORE_IC; }
private:
class FromBits : public BitField<ElementsKind, 3, 8> {};
class ToBits : public BitField<ElementsKind, 11, 8> {};
class IsJSArrayBits : public BitField<bool, 19, 1> {};
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(VectorStoreTransition);
DEFINE_HYDROGEN_CODE_STUB(ElementsTransitionAndStore, HydrogenCodeStub);
};
@@ -3040,37 +3189,10 @@ class SubStringStub : public PlatformCodeStub {
public:
explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
- DEFINE_CALL_INTERFACE_DESCRIPTOR(ContextOnly);
+ DEFINE_ON_STACK_CALL_INTERFACE_DESCRIPTOR(3);
DEFINE_PLATFORM_CODE_STUB(SubString, PlatformCodeStub);
};
-
-class ToNumberStub final : public PlatformCodeStub {
- public:
- explicit ToNumberStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
- DEFINE_PLATFORM_CODE_STUB(ToNumber, PlatformCodeStub);
-};
-
-class NonNumberToNumberStub final : public PlatformCodeStub {
- public:
- explicit NonNumberToNumberStub(Isolate* isolate)
- : PlatformCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
- DEFINE_PLATFORM_CODE_STUB(NonNumberToNumber, PlatformCodeStub);
-};
-
-class StringToNumberStub final : public PlatformCodeStub {
- public:
- explicit StringToNumberStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
- DEFINE_PLATFORM_CODE_STUB(StringToNumber, PlatformCodeStub);
-};
-
-
class ToStringStub final : public PlatformCodeStub {
public:
explicit ToStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
@@ -3079,7 +3201,6 @@ class ToStringStub final : public PlatformCodeStub {
DEFINE_PLATFORM_CODE_STUB(ToString, PlatformCodeStub);
};
-
class ToNameStub final : public PlatformCodeStub {
public:
explicit ToNameStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
@@ -3088,13 +3209,12 @@ class ToNameStub final : public PlatformCodeStub {
DEFINE_PLATFORM_CODE_STUB(ToName, PlatformCodeStub);
};
-
-class ToObjectStub final : public HydrogenCodeStub {
+class ToObjectStub final : public TurboFanCodeStub {
public:
- explicit ToObjectStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
+ explicit ToObjectStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
- DEFINE_HYDROGEN_CODE_STUB(ToObject, HydrogenCodeStub);
+ DEFINE_TURBOFAN_CODE_STUB(ToObject, TurboFanCodeStub);
};
#undef DEFINE_CALL_INTERFACE_DESCRIPTOR
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index 692fa64bb6..e47db10f70 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -7,12 +7,15 @@
#if defined(V8_OS_AIX)
#include <fenv.h> // NOLINT(build/c++11)
#endif
+
+#include <memory>
+
#include "src/ast/prettyprinter.h"
#include "src/bootstrapper.h"
#include "src/compiler.h"
#include "src/debug/debug.h"
+#include "src/eh-frame.h"
#include "src/parsing/parser.h"
-#include "src/profiler/cpu-profiler.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -61,7 +64,6 @@ double modulo(double x, double y) {
}
UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction)
-UNARY_MATH_FUNCTION(exp, CreateExpFunction)
#undef UNARY_MATH_FUNCTION
@@ -86,32 +88,24 @@ Comment::~Comment() {
void CodeGenerator::MakeCodePrologue(CompilationInfo* info, const char* kind) {
- bool print_source = false;
bool print_ast = false;
const char* ftype;
if (info->isolate()->bootstrapper()->IsActive()) {
- print_source = FLAG_print_builtin_source;
print_ast = FLAG_print_builtin_ast;
ftype = "builtin";
} else {
- print_source = FLAG_print_source;
print_ast = FLAG_print_ast;
ftype = "user-defined";
}
- if (FLAG_trace_codegen || print_source || print_ast) {
- base::SmartArrayPointer<char> name = info->GetDebugName();
+ if (FLAG_trace_codegen || print_ast) {
+ std::unique_ptr<char[]> name = info->GetDebugName();
PrintF("[generating %s code for %s function: %s]\n", kind, ftype,
name.get());
}
#ifdef DEBUG
- if (info->parse_info() && print_source) {
- PrintF("--- Source from AST ---\n%s\n",
- PrettyPrinter(info->isolate()).PrintProgram(info->literal()));
- }
-
if (info->parse_info() && print_ast) {
PrintF("--- AST ---\n%s\n",
AstPrinter(info->isolate()).PrintProgram(info->literal()));
@@ -119,9 +113,10 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info, const char* kind) {
#endif // DEBUG
}
-
Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
- CompilationInfo* info) {
+ EhFrameWriter* eh_frame_writer,
+ CompilationInfo* info,
+ Handle<Object> self_reference) {
Isolate* isolate = info->isolate();
// Allocate and install the code.
@@ -131,11 +126,11 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
Code::ExtractKindFromFlags(flags) == Code::OPTIMIZED_FUNCTION ||
info->IsStub();
masm->GetCode(&desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, flags, masm->CodeObject(),
- false, is_crankshafted,
- info->prologue_offset(),
- info->is_debug() && !is_crankshafted);
+ if (eh_frame_writer) eh_frame_writer->GetEhFrame(&desc);
+
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, flags, self_reference, false, is_crankshafted,
+ info->prologue_offset(), info->is_debug() && !is_crankshafted);
isolate->counters()->total_compiled_code_size()->Increment(
code->instruction_size());
isolate->heap()->IncrementCodeGeneratedBytes(is_crankshafted,
@@ -147,13 +142,14 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
#ifdef ENABLE_DISASSEMBLER
AllowDeferredHandleDereference allow_deference_for_print_code;
- bool print_code = info->isolate()->bootstrapper()->IsActive()
- ? FLAG_print_builtin_code
- : (FLAG_print_code ||
- (info->IsStub() && FLAG_print_code_stubs) ||
- (info->IsOptimizing() && FLAG_print_opt_code));
+ Isolate* isolate = info->isolate();
+ bool print_code =
+ isolate->bootstrapper()->IsActive()
+ ? FLAG_print_builtin_code
+ : (FLAG_print_code || (info->IsStub() && FLAG_print_code_stubs) ||
+ (info->IsOptimizing() && FLAG_print_opt_code));
if (print_code) {
- base::SmartArrayPointer<char> debug_name = info->GetDebugName();
+ std::unique_ptr<char[]> debug_name = info->GetDebugName();
CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
OFStream os(tracing_scope.file());
@@ -162,16 +158,16 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
info->parse_info() && (code->kind() == Code::OPTIMIZED_FUNCTION ||
code->kind() == Code::FUNCTION);
if (print_source) {
- FunctionLiteral* literal = info->literal();
+ Handle<SharedFunctionInfo> shared = info->shared_info();
Handle<Script> script = info->script();
- if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+ if (!script->IsUndefined(isolate) &&
+ !script->source()->IsUndefined(isolate)) {
os << "--- Raw source ---\n";
StringCharacterStream stream(String::cast(script->source()),
- literal->start_position());
+ shared->start_position());
// fun->end_position() points to the last character in the stream. We
// need to compensate by adding one to calculate the length.
- int source_len =
- literal->end_position() - literal->start_position() + 1;
+ int source_len = shared->end_position() - shared->start_position() + 1;
for (int i = 0; i < source_len; i++) {
if (stream.HasMore()) {
os << AsReversiblyEscapedUC16(stream.GetNext());
@@ -191,8 +187,8 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
os << "--- Code ---\n";
}
if (print_source) {
- FunctionLiteral* literal = info->literal();
- os << "source_position = " << literal->start_position() << "\n";
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+ os << "source_position = " << shared->start_position() << "\n";
}
code->Disassemble(debug_name.get(), os);
os << "--- End code ---\n";
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index f941696774..d0b67f1f45 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -69,7 +69,7 @@ namespace internal {
class CompilationInfo;
-
+class EhFrameWriter;
class CodeGenerator {
public:
@@ -78,7 +78,9 @@ class CodeGenerator {
// Allocate and install the code.
static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
- CompilationInfo* info);
+ EhFrameWriter* unwinding,
+ CompilationInfo* info,
+ Handle<Object> self_reference);
// Print the code after compiling it.
static void PrintCode(Handle<Code> code, CompilationInfo* info);
@@ -93,16 +95,13 @@ class CodeGenerator {
// generated code both in runtime and compiled code.
typedef double (*UnaryMathFunctionWithIsolate)(double x, Isolate* isolate);
-UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate);
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate);
double modulo(double x, double y);
// Custom implementation of math functions.
-double fast_exp(double input, Isolate* isolate);
double fast_sqrt(double input, Isolate* isolate);
-void lazily_initialize_fast_exp(Isolate* isolate);
void lazily_initialize_fast_sqrt(Isolate* isolate);
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index aca8cee9ca..af9fbb5734 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -4,9 +4,9 @@
#include "src/compilation-cache.h"
-#include "src/assembler.h"
#include "src/counters.h"
#include "src/factory.h"
+#include "src/globals.h"
#include "src/objects-inl.h"
namespace v8 {
@@ -41,7 +41,7 @@ CompilationCache::~CompilationCache() {}
Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
DCHECK(generation < generations_);
Handle<CompilationCacheTable> result;
- if (tables_[generation]->IsUndefined()) {
+ if (tables_[generation]->IsUndefined(isolate())) {
result = CompilationCacheTable::New(isolate(), kInitialCacheSize);
tables_[generation] = *result;
} else {
@@ -56,7 +56,7 @@ Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
void CompilationSubCache::Age() {
// Don't directly age single-generation caches.
if (generations_ == 1) {
- if (tables_[0] != isolate()->heap()->undefined_value()) {
+ if (!tables_[0]->IsUndefined(isolate())) {
CompilationCacheTable::cast(tables_[0])->Age();
}
return;
@@ -121,7 +121,7 @@ bool CompilationCacheScript::HasOrigin(Handle<SharedFunctionInfo> function_info,
// If the script name isn't set, the boilerplate script should have
// an undefined name to have the same origin.
if (name.is_null()) {
- return script->name()->IsUndefined();
+ return script->name()->IsUndefined(isolate());
}
// Do the fast bailout checks first.
if (line_offset != script->line_offset()) return false;
@@ -308,7 +308,7 @@ MaybeHandle<SharedFunctionInfo> CompilationCache::LookupEval(
result =
eval_global_.Lookup(source, outer_info, language_mode, scope_position);
} else {
- DCHECK(scope_position != RelocInfo::kNoPosition);
+ DCHECK(scope_position != kNoSourcePosition);
result = eval_contextual_.Lookup(source, outer_info, language_mode,
scope_position);
}
@@ -345,7 +345,7 @@ void CompilationCache::PutEval(Handle<String> source,
if (context->IsNativeContext()) {
eval_global_.Put(source, outer_info, function_info, scope_position);
} else {
- DCHECK(scope_position != RelocInfo::kNoPosition);
+ DCHECK(scope_position != kNoSourcePosition);
eval_contextual_.Put(source, outer_info, function_info, scope_position);
}
}
diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h
index 2295f4c685..973673c524 100644
--- a/deps/v8/src/compilation-cache.h
+++ b/deps/v8/src/compilation-cache.h
@@ -210,7 +210,7 @@ class CompilationCache {
explicit CompilationCache(Isolate* isolate);
~CompilationCache();
- HashMap* EagerOptimizingSet();
+ base::HashMap* EagerOptimizingSet();
// The number of sub caches covering the different types to cache.
static const int kSubCacheCount = 4;
diff --git a/deps/v8/src/compilation-statistics.cc b/deps/v8/src/compilation-statistics.cc
index ed568cba3f..d4ca39d611 100644
--- a/deps/v8/src/compilation-statistics.cc
+++ b/deps/v8/src/compilation-statistics.cc
@@ -54,8 +54,7 @@ void CompilationStatistics::BasicStats::Accumulate(const BasicStats& stats) {
}
}
-
-static void WriteLine(std::ostream& os, const char* name,
+static void WriteLine(std::ostream& os, bool machine_format, const char* name,
const CompilationStatistics::BasicStats& stats,
const CompilationStatistics::BasicStats& total_stats) {
const size_t kBufferSize = 128;
@@ -66,18 +65,24 @@ static void WriteLine(std::ostream& os, const char* name,
double size_percent =
static_cast<double>(stats.total_allocated_bytes_ * 100) /
static_cast<double>(total_stats.total_allocated_bytes_);
- base::OS::SNPrintF(buffer, kBufferSize,
- "%28s %10.3f (%5.1f%%) "
- "%10u (%5.1f%%) %10u %10u",
- name, ms, percent, stats.total_allocated_bytes_,
- size_percent, stats.max_allocated_bytes_,
- stats.absolute_max_allocated_bytes_);
-
- os << buffer;
- if (stats.function_name_.size() > 0) {
- os << " " << stats.function_name_.c_str();
+ if (machine_format) {
+ base::OS::SNPrintF(buffer, kBufferSize,
+ "\"%s_time\"=%.3f\n\"%s_space\"=%" PRIuS, name, ms, name,
+ stats.total_allocated_bytes_);
+ os << buffer;
+ } else {
+ base::OS::SNPrintF(buffer, kBufferSize, "%28s %10.3f (%5.1f%%) %10" PRIuS
+ " (%5.1f%%) %10" PRIuS " %10" PRIuS,
+ name, ms, percent, stats.total_allocated_bytes_,
+ size_percent, stats.max_allocated_bytes_,
+ stats.absolute_max_allocated_bytes_);
+
+ os << buffer;
+ if (stats.function_name_.size() > 0) {
+ os << " " << stats.function_name_.c_str();
+ }
+ os << std::endl;
}
- os << std::endl;
}
@@ -102,10 +107,10 @@ static void WritePhaseKindBreak(std::ostream& os) {
"--------------------------------------------------------\n";
}
-
-std::ostream& operator<<(std::ostream& os, const CompilationStatistics& s) {
+std::ostream& operator<<(std::ostream& os, const AsPrintableStatistics& ps) {
// phase_kind_map_ and phase_map_ don't get mutated, so store a bunch of
// pointers into them.
+ const CompilationStatistics& s = ps.s;
typedef std::vector<CompilationStatistics::PhaseKindMap::const_iterator>
SortedPhaseKinds;
@@ -122,22 +127,27 @@ std::ostream& operator<<(std::ostream& os, const CompilationStatistics& s) {
sorted_phases[it->second.insert_order_] = it;
}
- WriteHeader(os);
+ if (!ps.machine_output) WriteHeader(os);
for (auto phase_kind_it : sorted_phase_kinds) {
const auto& phase_kind_name = phase_kind_it->first;
- for (auto phase_it : sorted_phases) {
- const auto& phase_stats = phase_it->second;
- if (phase_stats.phase_kind_name_ != phase_kind_name) continue;
- const auto& phase_name = phase_it->first;
- WriteLine(os, phase_name.c_str(), phase_stats, s.total_stats_);
+ if (!ps.machine_output) {
+ for (auto phase_it : sorted_phases) {
+ const auto& phase_stats = phase_it->second;
+ if (phase_stats.phase_kind_name_ != phase_kind_name) continue;
+ const auto& phase_name = phase_it->first;
+ WriteLine(os, ps.machine_output, phase_name.c_str(), phase_stats,
+ s.total_stats_);
+ }
+ WritePhaseKindBreak(os);
}
- WritePhaseKindBreak(os);
const auto& phase_kind_stats = phase_kind_it->second;
- WriteLine(os, phase_kind_name.c_str(), phase_kind_stats, s.total_stats_);
+ WriteLine(os, ps.machine_output, phase_kind_name.c_str(), phase_kind_stats,
+ s.total_stats_);
os << std::endl;
}
- WriteFullLine(os);
- WriteLine(os, "totals", s.total_stats_, s.total_stats_);
+
+ if (!ps.machine_output) WriteFullLine(os);
+ WriteLine(os, ps.machine_output, "totals", s.total_stats_, s.total_stats_);
return os;
}
diff --git a/deps/v8/src/compilation-statistics.h b/deps/v8/src/compilation-statistics.h
index 6219180197..ceffc2ebc1 100644
--- a/deps/v8/src/compilation-statistics.h
+++ b/deps/v8/src/compilation-statistics.h
@@ -15,6 +15,12 @@ namespace v8 {
namespace internal {
class CompilationInfo;
+class CompilationStatistics;
+
+struct AsPrintableStatistics {
+ const CompilationStatistics& s;
+ const bool machine_output;
+};
class CompilationStatistics final : public Malloced {
public:
@@ -65,7 +71,7 @@ class CompilationStatistics final : public Malloced {
};
friend std::ostream& operator<<(std::ostream& os,
- const CompilationStatistics& s);
+ const AsPrintableStatistics& s);
typedef OrderedStats PhaseKindStats;
typedef std::map<std::string, PhaseKindStats> PhaseKindMap;
@@ -78,7 +84,7 @@ class CompilationStatistics final : public Malloced {
DISALLOW_COPY_AND_ASSIGN(CompilationStatistics);
};
-std::ostream& operator<<(std::ostream& os, const CompilationStatistics& s);
+std::ostream& operator<<(std::ostream& os, const AsPrintableStatistics& s);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc
new file mode 100644
index 0000000000..923793665a
--- /dev/null
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc
@@ -0,0 +1,185 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler-dispatcher/compiler-dispatcher-job.h"
+
+#include "src/assert-scope.h"
+#include "src/global-handles.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/parsing/parse-info.h"
+#include "src/parsing/parser.h"
+#include "src/parsing/scanner-character-streams.h"
+#include "src/unicode-cache.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+CompilerDispatcherJob::CompilerDispatcherJob(Isolate* isolate,
+ Handle<JSFunction> function,
+ size_t max_stack_size)
+ : isolate_(isolate),
+ function_(Handle<JSFunction>::cast(
+ isolate_->global_handles()->Create(*function))),
+ max_stack_size_(max_stack_size) {
+ HandleScope scope(isolate_);
+ Handle<SharedFunctionInfo> shared(function_->shared(), isolate_);
+ Handle<Script> script(Script::cast(shared->script()), isolate_);
+ Handle<String> source(String::cast(script->source()), isolate_);
+ can_parse_on_background_thread_ =
+ source->IsExternalTwoByteString() || source->IsExternalOneByteString();
+}
+
+CompilerDispatcherJob::~CompilerDispatcherJob() {
+ DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
+ DCHECK(status_ == CompileJobStatus::kInitial ||
+ status_ == CompileJobStatus::kDone);
+ i::GlobalHandles::Destroy(Handle<Object>::cast(function_).location());
+}
+
+void CompilerDispatcherJob::PrepareToParseOnMainThread() {
+ DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
+ DCHECK(status() == CompileJobStatus::kInitial);
+ HandleScope scope(isolate_);
+ unicode_cache_.reset(new UnicodeCache());
+ zone_.reset(new Zone(isolate_->allocator()));
+ Handle<SharedFunctionInfo> shared(function_->shared(), isolate_);
+ Handle<Script> script(Script::cast(shared->script()), isolate_);
+ DCHECK(script->type() != Script::TYPE_NATIVE);
+
+ Handle<String> source(String::cast(script->source()), isolate_);
+ if (source->IsExternalTwoByteString()) {
+ character_stream_.reset(new ExternalTwoByteStringUtf16CharacterStream(
+ Handle<ExternalTwoByteString>::cast(source), shared->start_position(),
+ shared->end_position()));
+ } else if (source->IsExternalOneByteString()) {
+ character_stream_.reset(new ExternalOneByteStringUtf16CharacterStream(
+ Handle<ExternalOneByteString>::cast(source), shared->start_position(),
+ shared->end_position()));
+ } else {
+ source = String::Flatten(source);
+ // Have to globalize the reference here, so it survives between function
+ // calls.
+ source_ = Handle<String>::cast(isolate_->global_handles()->Create(*source));
+ character_stream_.reset(new GenericStringUtf16CharacterStream(
+ source_, shared->start_position(), shared->end_position()));
+ }
+ parse_info_.reset(new ParseInfo(zone_.get()));
+ parse_info_->set_isolate(isolate_);
+ parse_info_->set_character_stream(character_stream_.get());
+ parse_info_->set_lazy();
+ parse_info_->set_hash_seed(isolate_->heap()->HashSeed());
+ parse_info_->set_is_named_expression(shared->is_named_expression());
+ parse_info_->set_calls_eval(shared->scope_info()->CallsEval());
+ parse_info_->set_compiler_hints(shared->compiler_hints());
+ parse_info_->set_start_position(shared->start_position());
+ parse_info_->set_end_position(shared->end_position());
+ parse_info_->set_unicode_cache(unicode_cache_.get());
+ parse_info_->set_language_mode(shared->language_mode());
+
+ parser_.reset(new Parser(parse_info_.get()));
+ parser_->DeserializeScopeChain(
+ parse_info_.get(), handle(function_->context(), isolate_),
+ Scope::DeserializationMode::kDeserializeOffHeap);
+
+ Handle<String> name(String::cast(shared->name()));
+ parse_info_->set_function_name(
+ parse_info_->ast_value_factory()->GetString(name));
+ status_ = CompileJobStatus::kReadyToParse;
+}
+
+void CompilerDispatcherJob::Parse() {
+ DCHECK(can_parse_on_background_thread_ ||
+ ThreadId::Current().Equals(isolate_->thread_id()));
+ DCHECK(status() == CompileJobStatus::kReadyToParse);
+
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ std::unique_ptr<DisallowHandleDereference> no_deref;
+ // If we can't parse on a background thread, we need to be able to deref the
+ // source string.
+ if (can_parse_on_background_thread_) {
+ no_deref.reset(new DisallowHandleDereference());
+ }
+
+ // Nullify the Isolate temporarily so that the parser doesn't accidentally
+ // use it.
+ parse_info_->set_isolate(nullptr);
+
+ uintptr_t stack_limit =
+ reinterpret_cast<uintptr_t>(&stack_limit) - max_stack_size_ * KB;
+
+ parser_->set_stack_limit(stack_limit);
+ parser_->ParseOnBackground(parse_info_.get());
+
+ parse_info_->set_isolate(isolate_);
+
+ status_ = CompileJobStatus::kParsed;
+}
+
+bool CompilerDispatcherJob::FinalizeParsingOnMainThread() {
+ DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
+ DCHECK(status() == CompileJobStatus::kParsed);
+
+ if (!source_.is_null()) {
+ i::GlobalHandles::Destroy(Handle<Object>::cast(source_).location());
+ source_ = Handle<String>::null();
+ }
+
+ if (parse_info_->literal() == nullptr) {
+ status_ = CompileJobStatus::kFailed;
+ } else {
+ status_ = CompileJobStatus::kReadyToCompile;
+ }
+
+ DeferredHandleScope scope(isolate_);
+ {
+ // Create a canonical handle scope before internalizing parsed values if
+ // compiling bytecode. This is required for off-thread bytecode generation.
+ std::unique_ptr<CanonicalHandleScope> canonical;
+ if (FLAG_ignition) canonical.reset(new CanonicalHandleScope(isolate_));
+
+ Handle<SharedFunctionInfo> shared(function_->shared(), isolate_);
+ Handle<Script> script(Script::cast(shared->script()), isolate_);
+
+ parse_info_->set_script(script);
+ parse_info_->set_context(handle(function_->context(), isolate_));
+
+ // Do the parsing tasks which need to be done on the main thread. This will
+ // also handle parse errors.
+ parser_->Internalize(isolate_, script, parse_info_->literal() == nullptr);
+ parser_->HandleSourceURLComments(isolate_, script);
+
+ parse_info_->set_character_stream(nullptr);
+ parse_info_->set_unicode_cache(nullptr);
+ parser_.reset();
+ unicode_cache_.reset();
+ character_stream_.reset();
+ }
+ handles_from_parsing_.reset(scope.Detach());
+
+ return status_ != CompileJobStatus::kFailed;
+}
+
+void CompilerDispatcherJob::ResetOnMainThread() {
+ DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
+
+ parser_.reset();
+ unicode_cache_.reset();
+ character_stream_.reset();
+ parse_info_.reset();
+ zone_.reset();
+ handles_from_parsing_.reset();
+
+ if (!source_.is_null()) {
+ i::GlobalHandles::Destroy(Handle<Object>::cast(source_).location());
+ source_ = Handle<String>::null();
+ }
+
+ status_ = CompileJobStatus::kInitial;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h
new file mode 100644
index 0000000000..50414af639
--- /dev/null
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h
@@ -0,0 +1,85 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_DISPATCHER_COMPILER_DISPATCHER_JOB_H_
+#define V8_COMPILER_DISPATCHER_COMPILER_DISPATCHER_JOB_H_
+
+#include <memory>
+
+#include "src/base/macros.h"
+#include "src/handles.h"
+#include "testing/gtest/include/gtest/gtest_prod.h"
+
+namespace v8 {
+namespace internal {
+
+class CompilationInfo;
+class Isolate;
+class JSFunction;
+class ParseInfo;
+class Parser;
+class String;
+class UnicodeCache;
+class Utf16CharacterStream;
+class Zone;
+
+enum class CompileJobStatus {
+ kInitial,
+ kReadyToParse,
+ kParsed,
+ kReadyToCompile,
+ kFailed,
+ kDone,
+};
+
+class CompilerDispatcherJob {
+ public:
+ CompilerDispatcherJob(Isolate* isolate, Handle<JSFunction> function,
+ size_t max_stack_size);
+ ~CompilerDispatcherJob();
+
+ CompileJobStatus status() const { return status_; }
+ bool can_parse_on_background_thread() const {
+ return can_parse_on_background_thread_;
+ }
+
+ // Transition from kInitial to kReadyToParse.
+ void PrepareToParseOnMainThread();
+
+ // Transition from kReadyToParse to kParsed.
+ void Parse();
+
+ // Transition from kParsed to kReadyToCompile (or kFailed). Returns false
+ // when transitioning to kFailed. In that case, an exception is pending.
+ bool FinalizeParsingOnMainThread();
+
+ // Transition from any state to kInitial and free all resources.
+ void ResetOnMainThread();
+
+ private:
+ FRIEND_TEST(CompilerDispatcherJobTest, ScopeChain);
+
+ CompileJobStatus status_ = CompileJobStatus::kInitial;
+ Isolate* isolate_;
+ Handle<JSFunction> function_; // Global handle.
+ Handle<String> source_; // Global handle.
+ size_t max_stack_size_;
+
+ // Members required for parsing.
+ std::unique_ptr<UnicodeCache> unicode_cache_;
+ std::unique_ptr<Zone> zone_;
+ std::unique_ptr<Utf16CharacterStream> character_stream_;
+ std::unique_ptr<ParseInfo> parse_info_;
+ std::unique_ptr<Parser> parser_;
+ std::unique_ptr<DeferredHandles> handles_from_parsing_;
+
+ bool can_parse_on_background_thread_;
+
+ DISALLOW_COPY_AND_ASSIGN(CompilerDispatcherJob);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_DISPATCHER_COMPILER_DISPATCHER_JOB_H_
diff --git a/deps/v8/src/optimizing-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index ed202242ba..be81047976 100644
--- a/deps/v8/src/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/optimizing-compile-dispatcher.h"
+#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/base/atomicops.h"
#include "src/full-codegen/full-codegen.h"
@@ -15,20 +15,20 @@ namespace internal {
namespace {
-void DisposeOptimizedCompileJob(OptimizedCompileJob* job,
- bool restore_function_code) {
- // The recompile job is allocated in the CompilationInfo's zone.
- CompilationInfo* info = job->info();
+void DisposeCompilationJob(CompilationJob* job, bool restore_function_code) {
if (restore_function_code) {
- Handle<JSFunction> function = info->closure();
+ Handle<JSFunction> function = job->info()->closure();
function->ReplaceCode(function->shared()->code());
+ // TODO(mvstanton): We can't call ensureliterals here due to allocation,
+ // but we probably shouldn't call ReplaceCode either, as this
+ // sometimes runs on the worker thread!
+ // JSFunction::EnsureLiterals(function);
}
- delete info;
+ delete job;
}
} // namespace
-
class OptimizingCompileDispatcher::CompileTask : public v8::Task {
public:
explicit CompileTask(Isolate* isolate) : isolate_(isolate) {
@@ -51,7 +51,9 @@ class OptimizingCompileDispatcher::CompileTask : public v8::Task {
isolate_->optimizing_compile_dispatcher();
{
TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
- TRACE_EVENT0("v8", "V8.RecompileConcurrent");
+
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.RecompileConcurrent");
if (dispatcher->recompilation_delay_ != 0) {
base::OS::Sleep(base::TimeDelta::FromMilliseconds(
@@ -73,7 +75,6 @@ class OptimizingCompileDispatcher::CompileTask : public v8::Task {
DISALLOW_COPY_AND_ASSIGN(CompileTask);
};
-
OptimizingCompileDispatcher::~OptimizingCompileDispatcher() {
#ifdef DEBUG
{
@@ -85,35 +86,29 @@ OptimizingCompileDispatcher::~OptimizingCompileDispatcher() {
DeleteArray(input_queue_);
}
-
-OptimizedCompileJob* OptimizingCompileDispatcher::NextInput(
- bool check_if_flushing) {
+CompilationJob* OptimizingCompileDispatcher::NextInput(bool check_if_flushing) {
base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
if (input_queue_length_ == 0) return NULL;
- OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)];
+ CompilationJob* job = input_queue_[InputQueueIndex(0)];
DCHECK_NOT_NULL(job);
input_queue_shift_ = InputQueueIndex(1);
input_queue_length_--;
if (check_if_flushing) {
if (static_cast<ModeFlag>(base::Acquire_Load(&mode_)) == FLUSH) {
- if (!job->info()->is_osr()) {
- AllowHandleDereference allow_handle_dereference;
- DisposeOptimizedCompileJob(job, true);
- }
+ AllowHandleDereference allow_handle_dereference;
+ DisposeCompilationJob(job, true);
return NULL;
}
}
return job;
}
-
-void OptimizingCompileDispatcher::CompileNext(OptimizedCompileJob* job) {
+void OptimizingCompileDispatcher::CompileNext(CompilationJob* job) {
if (!job) return;
// The function may have already been optimized by OSR. Simply continue.
- OptimizedCompileJob::Status status = job->OptimizeGraph();
- USE(status); // Prevent an unused-variable error in release mode.
- DCHECK(status != OptimizedCompileJob::FAILED);
+ CompilationJob::Status status = job->ExecuteJob();
+ USE(status); // Prevent an unused-variable error.
// The function may have already been optimized by OSR. Simply continue.
// Use a mutex to make sure that functions marked for install
@@ -123,10 +118,9 @@ void OptimizingCompileDispatcher::CompileNext(OptimizedCompileJob* job) {
isolate_->stack_guard()->RequestInstallCode();
}
-
void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
for (;;) {
- OptimizedCompileJob* job = NULL;
+ CompilationJob* job = NULL;
{
base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
if (output_queue_.empty()) return;
@@ -134,14 +128,10 @@ void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
output_queue_.pop();
}
- // OSR jobs are dealt with separately.
- if (!job->info()->is_osr()) {
- DisposeOptimizedCompileJob(job, restore_function_code);
- }
+ DisposeCompilationJob(job, restore_function_code);
}
}
-
void OptimizingCompileDispatcher::Flush() {
base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
if (FLAG_block_concurrent_recompilation) Unblock();
@@ -156,7 +146,6 @@ void OptimizingCompileDispatcher::Flush() {
}
}
-
void OptimizingCompileDispatcher::Stop() {
base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
if (FLAG_block_concurrent_recompilation) Unblock();
@@ -176,12 +165,11 @@ void OptimizingCompileDispatcher::Stop() {
}
}
-
void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
HandleScope handle_scope(isolate_);
for (;;) {
- OptimizedCompileJob* job = NULL;
+ CompilationJob* job = NULL;
{
base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
if (output_queue_.empty()) return;
@@ -196,16 +184,14 @@ void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
function->ShortPrint();
PrintF(" as it has already been optimized.\n");
}
- DisposeOptimizedCompileJob(job, false);
+ DisposeCompilationJob(job, false);
} else {
- Compiler::FinalizeOptimizedCompileJob(job);
+ Compiler::FinalizeCompilationJob(job);
}
}
}
-
-void OptimizingCompileDispatcher::QueueForOptimization(
- OptimizedCompileJob* job) {
+void OptimizingCompileDispatcher::QueueForOptimization(CompilationJob* job) {
DCHECK(IsQueueAvailable());
{
// Add job to the back of the input queue.
@@ -222,7 +208,6 @@ void OptimizingCompileDispatcher::QueueForOptimization(
}
}
-
void OptimizingCompileDispatcher::Unblock() {
while (blocked_jobs_ > 0) {
V8::GetCurrentPlatform()->CallOnBackgroundThread(
@@ -231,6 +216,5 @@ void OptimizingCompileDispatcher::Unblock() {
}
}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/optimizing-compile-dispatcher.h b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
index e14e8aafbc..8c032ab320 100644
--- a/deps/v8/src/optimizing-compile-dispatcher.h
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_OPTIMIZING_COMPILE_DISPATCHER_H_
-#define V8_OPTIMIZING_COMPILE_DISPATCHER_H_
+#ifndef V8_COMPILER_DISPATCHER_OPTIMIZING_COMPILE_DISPATCHER_H_
+#define V8_COMPILER_DISPATCHER_OPTIMIZING_COMPILE_DISPATCHER_H_
#include <queue>
@@ -17,8 +17,7 @@
namespace v8 {
namespace internal {
-class HOptimizedGraphBuilder;
-class OptimizedCompileJob;
+class CompilationJob;
class SharedFunctionInfo;
class OptimizingCompileDispatcher {
@@ -32,7 +31,7 @@ class OptimizingCompileDispatcher {
ref_count_(0),
recompilation_delay_(FLAG_concurrent_recompilation_delay) {
base::NoBarrier_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
- input_queue_ = NewArray<OptimizedCompileJob*>(input_queue_capacity_);
+ input_queue_ = NewArray<CompilationJob*>(input_queue_capacity_);
}
~OptimizingCompileDispatcher();
@@ -40,7 +39,7 @@ class OptimizingCompileDispatcher {
void Run();
void Stop();
void Flush();
- void QueueForOptimization(OptimizedCompileJob* optimizing_compiler);
+ void QueueForOptimization(CompilationJob* job);
void Unblock();
void InstallOptimizedFunctions();
@@ -57,8 +56,8 @@ class OptimizingCompileDispatcher {
enum ModeFlag { COMPILE, FLUSH };
void FlushOutputQueue(bool restore_function_code);
- void CompileNext(OptimizedCompileJob* job);
- OptimizedCompileJob* NextInput(bool check_if_flushing = false);
+ void CompileNext(CompilationJob* job);
+ CompilationJob* NextInput(bool check_if_flushing = false);
inline int InputQueueIndex(int i) {
int result = (i + input_queue_shift_) % input_queue_capacity_;
@@ -70,14 +69,14 @@ class OptimizingCompileDispatcher {
Isolate* isolate_;
// Circular queue of incoming recompilation tasks (including OSR).
- OptimizedCompileJob** input_queue_;
+ CompilationJob** input_queue_;
int input_queue_capacity_;
int input_queue_length_;
int input_queue_shift_;
base::Mutex input_queue_mutex_;
// Queue of recompilation tasks ready to be installed (excluding OSR).
- std::queue<OptimizedCompileJob*> output_queue_;
+ std::queue<CompilationJob*> output_queue_;
// Used for job based recompilation which has multiple producers on
// different threads.
base::Mutex output_queue_mutex_;
@@ -100,4 +99,4 @@ class OptimizingCompileDispatcher {
} // namespace internal
} // namespace v8
-#endif // V8_OPTIMIZING_COMPILE_DISPATCHER_H_
+#endif // V8_COMPILER_DISPATCHER_OPTIMIZING_COMPILE_DISPATCHER_H_
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 8bb53323ab..9a5afe99da 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -5,23 +5,26 @@
#include "src/compiler.h"
#include <algorithm>
+#include <memory>
+#include "src/asmjs/asm-js.h"
+#include "src/asmjs/asm-typer.h"
#include "src/ast/ast-numbering.h"
#include "src/ast/prettyprinter.h"
-#include "src/ast/scopeinfo.h"
#include "src/ast/scopes.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/compilation-cache.h"
+#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/compiler/pipeline.h"
#include "src/crankshaft/hydrogen.h"
-#include "src/crankshaft/lithium.h"
-#include "src/crankshaft/typing.h"
#include "src/debug/debug.h"
#include "src/debug/liveedit.h"
#include "src/deoptimizer.h"
+#include "src/frames-inl.h"
#include "src/full-codegen/full-codegen.h"
-#include "src/gdb-jit.h"
+#include "src/globals.h"
+#include "src/heap/heap.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
#include "src/log-inl.h"
@@ -29,7 +32,6 @@
#include "src/parsing/parser.h"
#include "src/parsing/rewriter.h"
#include "src/parsing/scanner-character-streams.h"
-#include "src/profiler/cpu-profiler.h"
#include "src/runtime-profiler.h"
#include "src/snapshot/code-serializer.h"
#include "src/vm-state-inl.h"
@@ -52,15 +54,10 @@ namespace internal {
PARSE_INFO_GETTER(Handle<Script>, script)
-PARSE_INFO_GETTER(bool, is_eval)
-PARSE_INFO_GETTER(bool, is_native)
-PARSE_INFO_GETTER(bool, is_module)
PARSE_INFO_GETTER(FunctionLiteral*, literal)
-PARSE_INFO_GETTER_WITH_DEFAULT(LanguageMode, language_mode, STRICT)
-PARSE_INFO_GETTER_WITH_DEFAULT(Handle<JSFunction>, closure,
- Handle<JSFunction>::null())
-PARSE_INFO_GETTER_WITH_DEFAULT(Scope*, scope, nullptr)
-PARSE_INFO_GETTER(Handle<Context>, context)
+PARSE_INFO_GETTER_WITH_DEFAULT(DeclarationScope*, scope, nullptr)
+PARSE_INFO_GETTER_WITH_DEFAULT(Handle<Context>, context,
+ Handle<Context>::null())
PARSE_INFO_GETTER(Handle<SharedFunctionInfo>, shared_info)
#undef PARSE_INFO_GETTER
@@ -80,26 +77,17 @@ class CompilationHandleScope BASE_EMBEDDED {
CompilationInfo* info_;
};
-// Exactly like a CompilationInfo, except being allocated via {new} and it also
-// creates and enters a Zone on construction and deallocates it on destruction.
-class CompilationInfoWithZone : public CompilationInfo {
- public:
- explicit CompilationInfoWithZone(Handle<JSFunction> function)
- : CompilationInfo(new ParseInfo(&zone_, function)),
- zone_(function->GetIsolate()->allocator()) {}
-
- // Virtual destructor because a CompilationInfoWithZone has to exit the
- // zone scope and get rid of dependent maps even when the destructor is
- // called when cast as a CompilationInfo.
- virtual ~CompilationInfoWithZone() {
- DisableFutureOptimization();
- dependencies()->Rollback();
- delete parse_info_;
- parse_info_ = nullptr;
+// Helper that times a scoped region and records the elapsed time.
+struct ScopedTimer {
+ explicit ScopedTimer(base::TimeDelta* location) : location_(location) {
+ DCHECK(location_ != NULL);
+ timer_.Start();
}
- private:
- Zone zone_;
+ ~ScopedTimer() { *location_ += timer_.Elapsed(); }
+
+ base::ElapsedTimer timer_;
+ base::TimeDelta* location_;
};
// ----------------------------------------------------------------------------
@@ -109,25 +97,12 @@ bool CompilationInfo::has_shared_info() const {
return parse_info_ && !parse_info_->shared_info().is_null();
}
+CompilationInfo::CompilationInfo(ParseInfo* parse_info,
+ Handle<JSFunction> closure)
+ : CompilationInfo(parse_info, {}, Code::ComputeFlags(Code::FUNCTION), BASE,
+ parse_info->isolate(), parse_info->zone()) {
+ closure_ = closure;
-bool CompilationInfo::has_context() const {
- return parse_info_ && !parse_info_->context().is_null();
-}
-
-
-bool CompilationInfo::has_literal() const {
- return parse_info_ && parse_info_->literal() != nullptr;
-}
-
-
-bool CompilationInfo::has_scope() const {
- return parse_info_ && parse_info_->scope() != nullptr;
-}
-
-
-CompilationInfo::CompilationInfo(ParseInfo* parse_info)
- : CompilationInfo(parse_info, nullptr, Code::ComputeFlags(Code::FUNCTION),
- BASE, parse_info->isolate(), parse_info->zone()) {
// Compiling for the snapshot typically results in different code than
// compiling later on. This means that code recompiled with deoptimization
// support won't be "equivalent" (as defined by SharedFunctionInfo::
@@ -140,19 +115,15 @@ CompilationInfo::CompilationInfo(ParseInfo* parse_info)
if (FLAG_turbo_inlining) MarkAsInliningEnabled();
if (FLAG_turbo_source_positions) MarkAsSourcePositionsEnabled();
if (FLAG_turbo_splitting) MarkAsSplittingEnabled();
- if (FLAG_turbo_types) MarkAsTypingEnabled();
-
- if (has_shared_info()) {
- if (shared_info()->never_compiled()) MarkAsFirstCompile();
- }
}
-
-CompilationInfo::CompilationInfo(const char* debug_name, Isolate* isolate,
- Zone* zone, Code::Flags code_flags)
+CompilationInfo::CompilationInfo(Vector<const char> debug_name,
+ Isolate* isolate, Zone* zone,
+ Code::Flags code_flags)
: CompilationInfo(nullptr, debug_name, code_flags, STUB, isolate, zone) {}
-CompilationInfo::CompilationInfo(ParseInfo* parse_info, const char* debug_name,
+CompilationInfo::CompilationInfo(ParseInfo* parse_info,
+ Vector<const char> debug_name,
Code::Flags code_flags, Mode mode,
Isolate* isolate, Zone* zone)
: parse_info_(parse_info),
@@ -167,27 +138,23 @@ CompilationInfo::CompilationInfo(ParseInfo* parse_info, const char* debug_name,
bailout_reason_(kNoReason),
prologue_offset_(Code::kPrologueOffsetNotSet),
track_positions_(FLAG_hydrogen_track_positions ||
- isolate->cpu_profiler()->is_profiling()),
- opt_count_(has_shared_info() ? shared_info()->opt_count() : 0),
+ isolate->is_profiling()),
parameter_count_(0),
optimization_id_(-1),
osr_expr_stack_height_(0),
debug_name_(debug_name) {}
-
CompilationInfo::~CompilationInfo() {
- DisableFutureOptimization();
+ if (GetFlag(kDisableFutureOptimization) && has_shared_info()) {
+ shared_info()->DisableOptimization(bailout_reason());
+ }
+ dependencies()->Rollback();
delete deferred_handles_;
-#ifdef DEBUG
- // Check that no dependent maps have been added or added dependent maps have
- // been rolled back or committed.
- DCHECK(dependencies()->IsEmpty());
-#endif // DEBUG
}
int CompilationInfo::num_parameters() const {
- return has_scope() ? scope()->num_parameters() : parameter_count_;
+ return !IsStub() ? scope()->num_parameters() : parameter_count_;
}
@@ -199,11 +166,6 @@ int CompilationInfo::num_parameters_including_this() const {
bool CompilationInfo::is_this_defined() const { return !IsStub(); }
-int CompilationInfo::num_heap_slots() const {
- return has_scope() ? scope()->num_heap_slots() : 0;
-}
-
-
// Primitive functions are unlikely to be picked up by the stack-walking
// profiler, so they trigger their own optimization when they're called
// for the SharedFunctionInfo::kCallsUntilPrimitiveOptimization-th time.
@@ -212,7 +174,7 @@ bool CompilationInfo::ShouldSelfOptimize() {
!(literal()->flags() & AstProperties::kDontSelfOptimize) &&
!literal()->dont_optimize() &&
literal()->scope()->AllowsLazyCompilation() &&
- (!has_shared_info() || !shared_info()->optimization_disabled());
+ !shared_info()->optimization_disabled();
}
@@ -220,61 +182,7 @@ bool CompilationInfo::has_simple_parameters() {
return scope()->has_simple_parameters();
}
-
-int CompilationInfo::TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
- SourcePosition position,
- int parent_id) {
- DCHECK(track_positions_);
-
- int inline_id = static_cast<int>(inlined_function_infos_.size());
- InlinedFunctionInfo info(parent_id, position, UnboundScript::kNoScriptId,
- shared->start_position());
- if (!shared->script()->IsUndefined()) {
- Handle<Script> script(Script::cast(shared->script()));
- info.script_id = script->id();
-
- if (FLAG_hydrogen_track_positions && !script->source()->IsUndefined()) {
- CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "--- FUNCTION SOURCE (" << shared->DebugName()->ToCString().get()
- << ") id{" << optimization_id() << "," << inline_id << "} ---\n";
- {
- DisallowHeapAllocation no_allocation;
- int start = shared->start_position();
- int len = shared->end_position() - start;
- String::SubStringRange source(String::cast(script->source()), start,
- len);
- for (const auto& c : source) {
- os << AsReversiblyEscapedUC16(c);
- }
- }
-
- os << "\n--- END ---\n";
- }
- }
-
- inlined_function_infos_.push_back(info);
-
- if (FLAG_hydrogen_track_positions && inline_id != 0) {
- CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "INLINE (" << shared->DebugName()->ToCString().get() << ") id{"
- << optimization_id() << "," << inline_id << "} AS " << inline_id
- << " AT " << position << std::endl;
- }
-
- return inline_id;
-}
-
-
-void CompilationInfo::LogDeoptCallPosition(int pc_offset, int inlining_id) {
- if (!track_positions_ || IsStub()) return;
- DCHECK_LT(static_cast<size_t>(inlining_id), inlined_function_infos_.size());
- inlined_function_infos_.at(inlining_id).deopt_pc_offsets.push_back(pc_offset);
-}
-
-
-base::SmartArrayPointer<char> CompilationInfo::GetDebugName() const {
+std::unique_ptr<char[]> CompilationInfo::GetDebugName() const {
if (parse_info() && parse_info()->literal()) {
AllowHandleDereference allow_deref;
return parse_info()->literal()->debug_name()->ToCString();
@@ -282,10 +190,11 @@ base::SmartArrayPointer<char> CompilationInfo::GetDebugName() const {
if (parse_info() && !parse_info()->shared_info().is_null()) {
return parse_info()->shared_info()->DebugName()->ToCString();
}
- const char* str = debug_name_ ? debug_name_ : "unknown";
- size_t len = strlen(str) + 1;
- base::SmartArrayPointer<char> name(new char[len]);
- memcpy(name.get(), str, len);
+ Vector<const char> name_vec = debug_name_;
+ if (name_vec.is_empty()) name_vec = ArrayVector("unknown");
+ std::unique_ptr<char[]> name(new char[name_vec.length() + 1]);
+ memcpy(name.get(), name_vec.start(), name_vec.length());
+ name[name_vec.length()] = '\0';
return name;
}
@@ -295,6 +204,9 @@ StackFrame::Type CompilationInfo::GetOutputStackFrameType() const {
case Code::BYTECODE_HANDLER:
case Code::HANDLER:
case Code::BUILTIN:
+#define CASE_KIND(kind) case Code::kind:
+ IC_KIND_LIST(CASE_KIND)
+#undef CASE_KIND
return StackFrame::STUB;
case Code::WASM_FUNCTION:
return StackFrame::WASM;
@@ -308,278 +220,88 @@ StackFrame::Type CompilationInfo::GetOutputStackFrameType() const {
}
}
-bool CompilationInfo::ExpectsJSReceiverAsReceiver() {
- return is_sloppy(language_mode()) && !is_native();
+int CompilationInfo::GetDeclareGlobalsFlags() const {
+ DCHECK(DeclareGlobalsLanguageMode::is_valid(parse_info()->language_mode()));
+ return DeclareGlobalsEvalFlag::encode(parse_info()->is_eval()) |
+ DeclareGlobalsNativeFlag::encode(parse_info()->is_native()) |
+ DeclareGlobalsLanguageMode::encode(parse_info()->language_mode());
}
-#if DEBUG
-void CompilationInfo::PrintAstForTesting() {
- PrintF("--- Source from AST ---\n%s\n",
- PrettyPrinter(isolate()).PrintProgram(literal()));
+SourcePositionTableBuilder::RecordingMode
+CompilationInfo::SourcePositionRecordingMode() const {
+ return parse_info() && parse_info()->is_native()
+ ? SourcePositionTableBuilder::OMIT_SOURCE_POSITIONS
+ : SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS;
}
-#endif
-
-// ----------------------------------------------------------------------------
-// Implementation of OptimizedCompileJob
-
-class HOptimizedGraphBuilderWithPositions: public HOptimizedGraphBuilder {
- public:
- explicit HOptimizedGraphBuilderWithPositions(CompilationInfo* info)
- : HOptimizedGraphBuilder(info) {
- }
-
-#define DEF_VISIT(type) \
- void Visit##type(type* node) override { \
- SourcePosition old_position = SourcePosition::Unknown(); \
- if (node->position() != RelocInfo::kNoPosition) { \
- old_position = source_position(); \
- SetSourcePosition(node->position()); \
- } \
- HOptimizedGraphBuilder::Visit##type(node); \
- if (!old_position.IsUnknown()) { \
- set_source_position(old_position); \
- } \
- }
- EXPRESSION_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
-#define DEF_VISIT(type) \
- void Visit##type(type* node) override { \
- SourcePosition old_position = SourcePosition::Unknown(); \
- if (node->position() != RelocInfo::kNoPosition) { \
- old_position = source_position(); \
- SetSourcePosition(node->position()); \
- } \
- HOptimizedGraphBuilder::Visit##type(node); \
- if (!old_position.IsUnknown()) { \
- set_source_position(old_position); \
- } \
- }
- STATEMENT_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
-#define DEF_VISIT(type) \
- void Visit##type(type* node) override { \
- HOptimizedGraphBuilder::Visit##type(node); \
- }
- DECLARATION_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-};
-
-
-OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
- DCHECK(info()->IsOptimizing());
-
- // Do not use Crankshaft/TurboFan if we need to be able to set break points.
- if (info()->shared_info()->HasDebugInfo()) {
- return AbortOptimization(kFunctionBeingDebugged);
- }
-
- // Resuming a suspended frame is not supported by Crankshaft/TurboFan.
- if (info()->shared_info()->HasBuiltinFunctionId() &&
- (info()->shared_info()->builtin_function_id() == kGeneratorObjectNext ||
- info()->shared_info()->builtin_function_id() == kGeneratorObjectReturn ||
- info()->shared_info()->builtin_function_id() == kGeneratorObjectThrow)) {
- return AbortOptimization(kGeneratorResumeMethod);
- }
-
- // Limit the number of times we try to optimize functions.
- const int kMaxOptCount =
- FLAG_deopt_every_n_times == 0 ? FLAG_max_opt_count : 1000;
- if (info()->opt_count() > kMaxOptCount) {
- return AbortOptimization(kOptimizedTooManyTimes);
- }
-
- // Check the whitelist for Crankshaft.
- if (!info()->shared_info()->PassesFilter(FLAG_hydrogen_filter)) {
- return AbortOptimization(kHydrogenFilter);
- }
-
- // Optimization requires a version of fullcode with deoptimization support.
- // Recompile the unoptimized version of the code if the current version
- // doesn't have deoptimization support already.
- // Otherwise, if we are gathering compilation time and space statistics
- // for hydrogen, gather baseline statistics for a fullcode compilation.
- bool should_recompile = !info()->shared_info()->has_deoptimization_support();
- if (should_recompile || FLAG_hydrogen_stats) {
- base::ElapsedTimer timer;
- if (FLAG_hydrogen_stats) {
- timer.Start();
- }
- if (!Compiler::EnsureDeoptimizationSupport(info())) {
- return SetLastStatus(FAILED);
- }
- if (FLAG_hydrogen_stats) {
- isolate()->GetHStatistics()->IncrementFullCodeGen(timer.Elapsed());
- }
- }
-
- DCHECK(info()->shared_info()->has_deoptimization_support());
- DCHECK(!info()->is_first_compile());
-
- bool optimization_disabled = info()->shared_info()->optimization_disabled();
- bool dont_crankshaft = info()->shared_info()->dont_crankshaft();
-
- // Check the enabling conditions for Turbofan.
- // 1. "use asm" code.
- bool is_turbofanable_asm = FLAG_turbo_asm &&
- info()->shared_info()->asm_function() &&
- !optimization_disabled;
-
- // 2. Fallback for features unsupported by Crankshaft.
- bool is_unsupported_by_crankshaft_but_turbofanable =
- dont_crankshaft && strcmp(FLAG_turbo_filter, "~~") == 0 &&
- !optimization_disabled;
-
- // 3. Explicitly enabled by the command-line filter.
- bool passes_turbo_filter =
- info()->shared_info()->PassesFilter(FLAG_turbo_filter);
-
- // If this is OSR request, OSR must be enabled by Turbofan.
- bool passes_osr_test = FLAG_turbo_osr || !info()->is_osr();
-
- if ((is_turbofanable_asm || is_unsupported_by_crankshaft_but_turbofanable ||
- passes_turbo_filter) &&
- passes_osr_test) {
- // Use TurboFan for the compilation.
- if (FLAG_trace_opt) {
- OFStream os(stdout);
- os << "[compiling method " << Brief(*info()->closure())
- << " using TurboFan";
- if (info()->is_osr()) os << " OSR";
- os << "]" << std::endl;
- }
- if (info()->shared_info()->asm_function()) {
- if (info()->osr_frame()) info()->MarkAsFrameSpecializing();
- info()->MarkAsFunctionContextSpecializing();
- } else {
- if (!FLAG_always_opt) {
- info()->MarkAsBailoutOnUninitialized();
- }
- if (FLAG_native_context_specialization) {
- info()->MarkAsNativeContextSpecializing();
- info()->MarkAsTypingEnabled();
- }
- }
- if (!info()->shared_info()->asm_function() ||
- FLAG_turbo_asm_deoptimization) {
- info()->MarkAsDeoptimizationEnabled();
- }
-
- Timer t(this, &time_taken_to_create_graph_);
- compiler::Pipeline pipeline(info());
- pipeline.GenerateCode();
- if (!info()->code().is_null()) {
- return SetLastStatus(SUCCEEDED);
- }
- }
-
- if (!isolate()->use_crankshaft() || dont_crankshaft) {
- // Crankshaft is entirely disabled.
- return SetLastStatus(FAILED);
- }
+bool CompilationInfo::ExpectsJSReceiverAsReceiver() {
+ return is_sloppy(parse_info()->language_mode()) && !parse_info()->is_native();
+}
- Scope* scope = info()->scope();
- if (LUnallocated::TooManyParameters(scope->num_parameters())) {
- // Crankshaft would require too many Lithium operands.
- return AbortOptimization(kTooManyParameters);
- }
+// ----------------------------------------------------------------------------
+// Implementation of CompilationJob
- if (info()->is_osr() &&
- LUnallocated::TooManyParametersOrStackSlots(scope->num_parameters(),
- scope->num_stack_slots())) {
- // Crankshaft would require too many Lithium operands.
- return AbortOptimization(kTooManyParametersLocals);
- }
+CompilationJob::Status CompilationJob::PrepareJob() {
+ DCHECK(ThreadId::Current().Equals(info()->isolate()->thread_id()));
+ DisallowJavascriptExecution no_js(isolate());
- if (FLAG_trace_opt) {
+ if (FLAG_trace_opt && info()->IsOptimizing()) {
OFStream os(stdout);
- os << "[compiling method " << Brief(*info()->closure())
- << " using Crankshaft";
+ os << "[compiling method " << Brief(*info()->closure()) << " using "
+ << compiler_name_;
if (info()->is_osr()) os << " OSR";
os << "]" << std::endl;
}
- if (FLAG_trace_hydrogen) {
- isolate()->GetHTracer()->TraceCompilation(info());
- }
-
- // Type-check the function.
- AstTyper(info()->isolate(), info()->zone(), info()->closure(),
- info()->scope(), info()->osr_ast_id(), info()->literal())
- .Run();
-
- // Optimization could have been disabled by the parser. Note that this check
- // is only needed because the Hydrogen graph builder is missing some bailouts.
- if (info()->shared_info()->optimization_disabled()) {
- return AbortOptimization(
- info()->shared_info()->disable_optimization_reason());
- }
-
- HOptimizedGraphBuilder* graph_builder =
- (info()->is_tracking_positions() || FLAG_trace_ic)
- ? new (info()->zone()) HOptimizedGraphBuilderWithPositions(info())
- : new (info()->zone()) HOptimizedGraphBuilder(info());
-
- Timer t(this, &time_taken_to_create_graph_);
- graph_ = graph_builder->CreateGraph();
-
- if (isolate()->has_pending_exception()) {
- return SetLastStatus(FAILED);
- }
-
- if (graph_ == NULL) return SetLastStatus(BAILED_OUT);
-
- if (info()->dependencies()->HasAborted()) {
- // Dependency has changed during graph creation. Let's try again later.
- return RetryOptimization(kBailedOutDueToDependencyChange);
- }
-
- return SetLastStatus(SUCCEEDED);
+ // Delegate to the underlying implementation.
+ DCHECK(state() == State::kReadyToPrepare);
+ ScopedTimer t(&time_taken_to_prepare_);
+ return UpdateState(PrepareJobImpl(), State::kReadyToExecute);
}
-
-OptimizedCompileJob::Status OptimizedCompileJob::OptimizeGraph() {
+CompilationJob::Status CompilationJob::ExecuteJob() {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
DisallowCodeDependencyChange no_dependency_change;
- DCHECK(last_status() == SUCCEEDED);
- // TODO(turbofan): Currently everything is done in the first phase.
- if (!info()->code().is_null()) {
- return last_status();
- }
-
- Timer t(this, &time_taken_to_optimize_);
- DCHECK(graph_ != NULL);
- BailoutReason bailout_reason = kNoReason;
+ // Delegate to the underlying implementation.
+ DCHECK(state() == State::kReadyToExecute);
+ ScopedTimer t(&time_taken_to_execute_);
+ return UpdateState(ExecuteJobImpl(), State::kReadyToFinalize);
+}
- if (graph_->Optimize(&bailout_reason)) {
- chunk_ = LChunk::NewChunk(graph_);
- if (chunk_ != NULL) return SetLastStatus(SUCCEEDED);
- } else if (bailout_reason != kNoReason) {
- info_->AbortOptimization(bailout_reason);
- }
+CompilationJob::Status CompilationJob::FinalizeJob() {
+ DCHECK(ThreadId::Current().Equals(info()->isolate()->thread_id()));
+ DisallowCodeDependencyChange no_dependency_change;
+ DisallowJavascriptExecution no_js(isolate());
+ DCHECK(!info()->dependencies()->HasAborted());
- return SetLastStatus(BAILED_OUT);
+ // Delegate to the underlying implementation.
+ DCHECK(state() == State::kReadyToFinalize);
+ ScopedTimer t(&time_taken_to_finalize_);
+ return UpdateState(FinalizeJobImpl(), State::kSucceeded);
}
-
namespace {
void AddWeakObjectToCodeDependency(Isolate* isolate, Handle<HeapObject> object,
Handle<Code> code) {
Handle<WeakCell> cell = Code::WeakCellFor(code);
Heap* heap = isolate->heap();
- Handle<DependentCode> dep(heap->LookupWeakObjectToCodeDependency(object));
- dep = DependentCode::InsertWeakCode(dep, DependentCode::kWeakCodeGroup, cell);
- heap->AddWeakObjectToCodeDependency(object, dep);
+ if (heap->InNewSpace(*object)) {
+ heap->AddWeakNewSpaceObjectToCodeDependency(object, cell);
+ } else {
+ Handle<DependentCode> dep(heap->LookupWeakObjectToCodeDependency(object));
+ dep =
+ DependentCode::InsertWeakCode(dep, DependentCode::kWeakCodeGroup, cell);
+ heap->AddWeakObjectToCodeDependency(object, dep);
+ }
}
+} // namespace
-void RegisterWeakObjectsInOptimizedCode(Handle<Code> code) {
+void CompilationJob::RegisterWeakObjectsInOptimizedCode(Handle<Code> code) {
// TODO(turbofan): Move this to pipeline.cc once Crankshaft dies.
Isolate* const isolate = code->GetIsolate();
DCHECK(code->is_optimized_code());
@@ -619,62 +341,17 @@ void RegisterWeakObjectsInOptimizedCode(Handle<Code> code) {
code->set_can_have_weak_objects(true);
}
-} // namespace
-
-
-OptimizedCompileJob::Status OptimizedCompileJob::GenerateCode() {
- DCHECK(last_status() == SUCCEEDED);
- // TODO(turbofan): Currently everything is done in the first phase.
- if (!info()->code().is_null()) {
- info()->dependencies()->Commit(info()->code());
- if (info()->is_deoptimization_enabled()) {
- info()->parse_info()->context()->native_context()->AddOptimizedCode(
- *info()->code());
- RegisterWeakObjectsInOptimizedCode(info()->code());
- }
- RecordOptimizationStats();
- return last_status();
- }
-
- DCHECK(!info()->dependencies()->HasAborted());
- DisallowCodeDependencyChange no_dependency_change;
- DisallowJavascriptExecution no_js(isolate());
- { // Scope for timer.
- Timer timer(this, &time_taken_to_codegen_);
- DCHECK(chunk_ != NULL);
- DCHECK(graph_ != NULL);
- // Deferred handles reference objects that were accessible during
- // graph creation. To make sure that we don't encounter inconsistencies
- // between graph creation and code generation, we disallow accessing
- // objects through deferred handles during the latter, with exceptions.
- DisallowDeferredHandleDereference no_deferred_handle_deref;
- Handle<Code> optimized_code = chunk_->Codegen();
- if (optimized_code.is_null()) {
- if (info()->bailout_reason() == kNoReason) {
- return AbortOptimization(kCodeGenerationFailed);
- }
- return SetLastStatus(BAILED_OUT);
- }
- RegisterWeakObjectsInOptimizedCode(optimized_code);
- info()->SetCode(optimized_code);
- }
- RecordOptimizationStats();
- // Add to the weak list of optimized code objects.
- info()->context()->native_context()->AddOptimizedCode(*info()->code());
- return SetLastStatus(SUCCEEDED);
-}
-
-
-void OptimizedCompileJob::RecordOptimizationStats() {
+void CompilationJob::RecordOptimizationStats() {
+ DCHECK(info()->IsOptimizing());
Handle<JSFunction> function = info()->closure();
if (!function->IsOptimized()) {
// Concurrent recompilation and OSR may race. Increment only once.
int opt_count = function->shared()->opt_count();
function->shared()->set_opt_count(opt_count + 1);
}
- double ms_creategraph = time_taken_to_create_graph_.InMillisecondsF();
- double ms_optimize = time_taken_to_optimize_.InMillisecondsF();
- double ms_codegen = time_taken_to_codegen_.InMillisecondsF();
+ double ms_creategraph = time_taken_to_prepare_.InMillisecondsF();
+ double ms_optimize = time_taken_to_execute_.InMillisecondsF();
+ double ms_codegen = time_taken_to_finalize_.InMillisecondsF();
if (FLAG_trace_opt) {
PrintF("[optimizing ");
function->ShortPrint();
@@ -690,14 +367,12 @@ void OptimizedCompileJob::RecordOptimizationStats() {
compiled_functions++;
code_size += function->shared()->SourceSize();
PrintF("Compiled: %d functions with %d byte source size in %fms.\n",
- compiled_functions,
- code_size,
- compilation_time);
+ compiled_functions, code_size, compilation_time);
}
if (FLAG_hydrogen_stats) {
- isolate()->GetHStatistics()->IncrementSubtotals(time_taken_to_create_graph_,
- time_taken_to_optimize_,
- time_taken_to_codegen_);
+ isolate()->GetHStatistics()->IncrementSubtotals(time_taken_to_prepare_,
+ time_taken_to_execute_,
+ time_taken_to_finalize_);
}
}
@@ -706,47 +381,25 @@ void OptimizedCompileJob::RecordOptimizationStats() {
namespace {
-// Sets the expected number of properties based on estimate from compiler.
-void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
- int estimate) {
- // If no properties are added in the constructor, they are more likely
- // to be added later.
- if (estimate == 0) estimate = 2;
-
- // TODO(yangguo): check whether those heuristics are still up-to-date.
- // We do not shrink objects that go into a snapshot (yet), so we adjust
- // the estimate conservatively.
- if (shared->GetIsolate()->serializer_enabled()) {
- estimate += 2;
- } else {
- // Inobject slack tracking will reclaim redundant inobject space later,
- // so we can afford to adjust the estimate generously.
- estimate += 8;
- }
-
- shared->set_expected_nof_properties(estimate);
-}
-
-void MaybeDisableOptimization(Handle<SharedFunctionInfo> shared_info,
- BailoutReason bailout_reason) {
- if (bailout_reason != kNoReason) {
- shared_info->DisableOptimization(bailout_reason);
- }
+bool IsEvalToplevel(Handle<SharedFunctionInfo> shared) {
+ return shared->is_toplevel() && shared->script()->IsScript() &&
+ Script::cast(shared->script())->compilation_type() ==
+ Script::COMPILATION_TYPE_EVAL;
}
-void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
- CompilationInfo* info,
- Handle<SharedFunctionInfo> shared) {
- // SharedFunctionInfo is passed separately, because if CompilationInfo
- // was created using Script object, it will not have it.
-
+void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
+ CompilationInfo* info) {
// Log the code generation. If source information is available include
// script name and line number. Check explicitly whether logging is
// enabled as finding the line number is not free.
if (info->isolate()->logger()->is_logging_code_events() ||
- info->isolate()->cpu_profiler()->is_profiling()) {
+ info->isolate()->is_profiling()) {
+ Handle<SharedFunctionInfo> shared = info->shared_info();
Handle<Script> script = info->parse_info()->script();
- Handle<AbstractCode> abstract_code = info->abstract_code();
+ Handle<AbstractCode> abstract_code =
+ info->has_bytecode_array()
+ ? Handle<AbstractCode>::cast(info->bytecode_array())
+ : Handle<AbstractCode>::cast(info->code());
if (abstract_code.is_identical_to(
info->isolate()->builtins()->CompileLazy())) {
return;
@@ -757,57 +410,49 @@ void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
String* script_name = script->name()->IsString()
? String::cast(script->name())
: info->isolate()->heap()->empty_string();
- Logger::LogEventsAndTags log_tag = Logger::ToNativeByScript(tag, *script);
+ CodeEventListener::LogEventsAndTags log_tag =
+ Logger::ToNativeByScript(tag, *script);
PROFILE(info->isolate(),
- CodeCreateEvent(log_tag, *abstract_code, *shared, info, script_name,
+ CodeCreateEvent(log_tag, *abstract_code, *shared, script_name,
line_num, column_num));
}
}
-void EnsureFeedbackVector(CompilationInfo* info) {
- if (!info->has_shared_info()) return;
+void EnsureFeedbackMetadata(CompilationInfo* info) {
+ DCHECK(info->has_shared_info());
- // If no type feedback vector exists, we create one now. At this point the
+ // If no type feedback metadata exists, we create it now. At this point the
// AstNumbering pass has already run. Note the snapshot can contain outdated
// vectors for a different configuration, hence we also recreate a new vector
// when the function is not compiled (i.e. no code was serialized).
- if (info->shared_info()->feedback_vector()->is_empty() ||
+
+ // TODO(mvstanton): reintroduce is_empty() predicate to feedback_metadata().
+ if (info->shared_info()->feedback_metadata()->length() == 0 ||
!info->shared_info()->is_compiled()) {
Handle<TypeFeedbackMetadata> feedback_metadata = TypeFeedbackMetadata::New(
info->isolate(), info->literal()->feedback_vector_spec());
- Handle<TypeFeedbackVector> feedback_vector =
- TypeFeedbackVector::New(info->isolate(), feedback_metadata);
- info->shared_info()->set_feedback_vector(*feedback_vector);
+ info->shared_info()->set_feedback_metadata(*feedback_metadata);
}
// It's very important that recompiles do not alter the structure of the type
// feedback vector. Verify that the structure fits the function literal.
- CHECK(!info->shared_info()->feedback_vector()->metadata()->SpecDiffersFrom(
+ CHECK(!info->shared_info()->feedback_metadata()->SpecDiffersFrom(
info->literal()->feedback_vector_spec()));
}
-bool CompileUnoptimizedCode(CompilationInfo* info) {
- DCHECK(AllowCompilation::IsAllowed(info->isolate()));
- if (!Compiler::Analyze(info->parse_info()) ||
- !(EnsureFeedbackVector(info), FullCodeGenerator::MakeCode(info))) {
- Isolate* isolate = info->isolate();
- if (!isolate->has_pending_exception()) isolate->StackOverflow();
- return false;
- }
- return true;
-}
+bool ShouldUseIgnition(CompilationInfo* info) {
+ if (!FLAG_ignition) return false;
-bool UseIgnition(CompilationInfo* info) {
- // TODO(4681): Generator functions are not yet supported.
- if (info->shared_info()->is_generator()) {
- return false;
+ DCHECK(info->has_shared_info());
+
+ // When requesting debug code as a replacement for existing code, we provide
+ // the same kind as the existing code (to prevent implicit tier-change).
+ if (info->is_debug() && info->shared_info()->is_compiled()) {
+ return !info->shared_info()->HasBaselineCode();
}
- // TODO(4681): Resuming a suspended frame is not supported.
- if (info->shared_info()->HasBuiltinFunctionId() &&
- (info->shared_info()->builtin_function_id() == kGeneratorObjectNext ||
- info->shared_info()->builtin_function_id() == kGeneratorObjectReturn ||
- info->shared_info()->builtin_function_id() == kGeneratorObjectThrow)) {
+ // Since we can't OSR from Ignition, skip Ignition for asm.js functions.
+ if (info->shared_info()->asm_function()) {
return false;
}
@@ -822,27 +467,26 @@ bool UseIgnition(CompilationInfo* info) {
}
int CodeAndMetadataSize(CompilationInfo* info) {
- int size = 0;
if (info->has_bytecode_array()) {
- Handle<BytecodeArray> bytecode_array = info->bytecode_array();
- size += bytecode_array->BytecodeArraySize();
- size += bytecode_array->constant_pool()->Size();
- size += bytecode_array->handler_table()->Size();
- size += bytecode_array->source_position_table()->Size();
- } else {
- Handle<Code> code = info->code();
- size += code->CodeSize();
- size += code->relocation_info()->Size();
- size += code->deoptimization_data()->Size();
- size += code->handler_table()->Size();
+ return info->bytecode_array()->SizeIncludingMetadata();
}
- return size;
+ return info->code()->SizeIncludingMetadata();
}
-bool GenerateBaselineCode(CompilationInfo* info) {
+bool GenerateUnoptimizedCode(CompilationInfo* info) {
bool success;
- EnsureFeedbackVector(info);
- if (FLAG_ignition && UseIgnition(info)) {
+ EnsureFeedbackMetadata(info);
+ if (FLAG_validate_asm && info->scope()->asm_module() &&
+ !info->shared_info()->is_asm_wasm_broken()) {
+ MaybeHandle<FixedArray> wasm_data;
+ wasm_data = AsmJs::ConvertAsmToWasm(info->parse_info());
+ if (!wasm_data.is_null()) {
+ info->shared_info()->set_asm_wasm_data(*wasm_data.ToHandleChecked());
+ info->SetCode(info->isolate()->builtins()->InstantiateAsmJs());
+ return true;
+ }
+ }
+ if (ShouldUseIgnition(info)) {
success = interpreter::Interpreter::MakeBytecode(info);
} else {
success = FullCodeGenerator::MakeCode(info);
@@ -850,15 +494,17 @@ bool GenerateBaselineCode(CompilationInfo* info) {
if (success) {
Isolate* isolate = info->isolate();
Counters* counters = isolate->counters();
+ // TODO(4280): Rename counters from "baseline" to "unoptimized" eventually.
counters->total_baseline_code_size()->Increment(CodeAndMetadataSize(info));
counters->total_baseline_compile_count()->Increment(1);
}
return success;
}
-bool CompileBaselineCode(CompilationInfo* info) {
+bool CompileUnoptimizedCode(CompilationInfo* info) {
DCHECK(AllowCompilation::IsAllowed(info->isolate()));
- if (!Compiler::Analyze(info->parse_info()) || !GenerateBaselineCode(info)) {
+ if (!Compiler::Analyze(info->parse_info()) ||
+ !GenerateUnoptimizedCode(info)) {
Isolate* isolate = info->isolate();
if (!isolate->has_pending_exception()) isolate->StackOverflow();
return false;
@@ -866,44 +512,53 @@ bool CompileBaselineCode(CompilationInfo* info) {
return true;
}
-void InstallBaselineCompilationResult(CompilationInfo* info,
- Handle<SharedFunctionInfo> shared,
- Handle<ScopeInfo> scope_info) {
- // Assert that we are not overwriting (possibly patched) debug code.
- DCHECK(!shared->HasDebugCode());
+void InstallSharedScopeInfo(CompilationInfo* info,
+ Handle<SharedFunctionInfo> shared) {
+ Handle<ScopeInfo> scope_info = info->scope()->GetScopeInfo(info->isolate());
+ shared->set_scope_info(*scope_info);
+}
+
+void InstallSharedCompilationResult(CompilationInfo* info,
+ Handle<SharedFunctionInfo> shared) {
+ // TODO(mstarzinger): Compiling for debug code might be used to reveal inner
+ // functions via {FindSharedFunctionInfoInScript}, in which case we end up
+ // regenerating existing bytecode. Fix this!
+ if (info->is_debug() && info->has_bytecode_array()) {
+ shared->ClearBytecodeArray();
+ }
DCHECK(!info->code().is_null());
shared->ReplaceCode(*info->code());
- shared->set_scope_info(*scope_info);
if (info->has_bytecode_array()) {
DCHECK(!shared->HasBytecodeArray()); // Only compiled once.
shared->set_bytecode_array(*info->bytecode_array());
}
}
-MUST_USE_RESULT MaybeHandle<Code> GetUnoptimizedCodeCommon(
- CompilationInfo* info) {
+MUST_USE_RESULT MaybeHandle<Code> GetUnoptimizedCode(CompilationInfo* info) {
VMState<COMPILER> state(info->isolate());
PostponeInterruptsScope postpone(info->isolate());
+ // Create a canonical handle scope before internalizing parsed values if
+ // compiling bytecode. This is required for off-thread bytecode generation.
+ std::unique_ptr<CanonicalHandleScope> canonical;
+ if (FLAG_ignition) canonical.reset(new CanonicalHandleScope(info->isolate()));
+
// Parse and update CompilationInfo with the results.
if (!Parser::ParseStatic(info->parse_info())) return MaybeHandle<Code>();
Handle<SharedFunctionInfo> shared = info->shared_info();
- FunctionLiteral* lit = info->literal();
- DCHECK_EQ(shared->language_mode(), lit->language_mode());
- SetExpectedNofPropertiesFromEstimate(shared, lit->expected_property_count());
- MaybeDisableOptimization(shared, lit->dont_optimize_reason());
+ DCHECK_EQ(shared->language_mode(), info->literal()->language_mode());
// Compile either unoptimized code or bytecode for the interpreter.
- if (!CompileBaselineCode(info)) return MaybeHandle<Code>();
- RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
+ if (!CompileUnoptimizedCode(info)) return MaybeHandle<Code>();
- // Update the shared function info with the scope info. Allocating the
- // ScopeInfo object may cause a GC.
- Handle<ScopeInfo> scope_info =
- ScopeInfo::Create(info->isolate(), info->zone(), info->scope());
+ // Update the shared function info with the scope info.
+ InstallSharedScopeInfo(info, shared);
// Install compilation result on the shared function info
- InstallBaselineCompilationResult(info, shared, scope_info);
+ InstallSharedCompilationResult(info, shared);
+
+ // Record the function compilation event.
+ RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, info);
return info->code();
}
@@ -934,6 +589,10 @@ void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
// Frame specialization implies function context specialization.
DCHECK(!info->is_frame_specializing());
+ // TODO(4764): When compiling for OSR from bytecode, BailoutId might derive
+ // from bytecode offset and overlap with actual BailoutId. No caching!
+ if (info->is_osr() && info->is_optimizing_from_bytecode()) return;
+
// Cache optimized context-specific code.
Handle<JSFunction> function = info->closure();
Handle<SharedFunctionInfo> shared(function->shared());
@@ -964,29 +623,58 @@ bool Renumber(ParseInfo* parse_info) {
if (!shared_info.is_null()) {
FunctionLiteral* lit = parse_info->literal();
shared_info->set_ast_node_count(lit->ast_node_count());
- MaybeDisableOptimization(shared_info, lit->dont_optimize_reason());
- shared_info->set_dont_crankshaft(
- shared_info->dont_crankshaft() ||
- (lit->flags() & AstProperties::kDontCrankshaft));
+ if (lit->dont_optimize_reason() != kNoReason) {
+ shared_info->DisableOptimization(lit->dont_optimize_reason());
+ }
+ if (lit->flags() & AstProperties::kDontCrankshaft) {
+ shared_info->set_dont_crankshaft(true);
+ }
}
return true;
}
-bool GetOptimizedCodeNow(CompilationInfo* info) {
+bool UseTurboFan(Handle<SharedFunctionInfo> shared) {
+ bool optimization_disabled = shared->optimization_disabled();
+ bool dont_crankshaft = shared->dont_crankshaft();
+
+ // Check the enabling conditions for Turbofan.
+ // 1. "use asm" code.
+ bool is_turbofanable_asm =
+ FLAG_turbo_asm && shared->asm_function() && !optimization_disabled;
+
+ // 2. Fallback for features unsupported by Crankshaft.
+ bool is_unsupported_by_crankshaft_but_turbofanable =
+ dont_crankshaft && strcmp(FLAG_turbo_filter, "~~") == 0 &&
+ !optimization_disabled;
+
+ // 3. Explicitly enabled by the command-line filter.
+ bool passes_turbo_filter = shared->PassesFilter(FLAG_turbo_filter);
+
+ return is_turbofanable_asm || is_unsupported_by_crankshaft_but_turbofanable ||
+ passes_turbo_filter;
+}
+
+bool GetOptimizedCodeNow(CompilationJob* job) {
+ CompilationInfo* info = job->info();
Isolate* isolate = info->isolate();
- CanonicalHandleScope canonical(isolate);
- TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
- TRACE_EVENT0("v8", "V8.OptimizeCode");
- if (!Compiler::ParseAndAnalyze(info->parse_info())) return false;
+ // Parsing is not required when optimizing from existing bytecode.
+ if (!info->is_optimizing_from_bytecode()) {
+ if (!Compiler::ParseAndAnalyze(info->parse_info())) return false;
+ EnsureFeedbackMetadata(info);
+ }
- TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
- TRACE_EVENT0("v8", "V8.RecompileSynchronous");
+ JSFunction::EnsureLiterals(info->closure());
- OptimizedCompileJob job(info);
- if (job.CreateGraph() != OptimizedCompileJob::SUCCEEDED ||
- job.OptimizeGraph() != OptimizedCompileJob::SUCCEEDED ||
- job.GenerateCode() != OptimizedCompileJob::SUCCEEDED) {
+ TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
+ RuntimeCallTimerScope runtimeTimer(isolate,
+ &RuntimeCallStats::RecompileSynchronous);
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
+ isolate, &tracing::TraceEventStatsTable::RecompileSynchronous);
+
+ if (job->PrepareJob() != CompilationJob::SUCCEEDED ||
+ job->ExecuteJob() != CompilationJob::SUCCEEDED ||
+ job->FinalizeJob() != CompilationJob::SUCCEEDED) {
if (FLAG_trace_opt) {
PrintF("[aborted optimizing ");
info->closure()->ShortPrint();
@@ -996,18 +684,16 @@ bool GetOptimizedCodeNow(CompilationInfo* info) {
}
// Success!
+ job->RecordOptimizationStats();
DCHECK(!isolate->has_pending_exception());
InsertCodeIntoOptimizedCodeMap(info);
- RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info,
- info->shared_info());
+ RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, info);
return true;
}
-bool GetOptimizedCodeLater(CompilationInfo* info) {
+bool GetOptimizedCodeLater(CompilationJob* job) {
+ CompilationInfo* info = job->info();
Isolate* isolate = info->isolate();
- CanonicalHandleScope canonical(isolate);
- TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
- TRACE_EVENT0("v8", "V8.OptimizeCode");
if (!isolate->optimizing_compile_dispatcher()->IsQueueAvailable()) {
if (FLAG_trace_concurrent_recompilation) {
@@ -1018,58 +704,67 @@ bool GetOptimizedCodeLater(CompilationInfo* info) {
return false;
}
+ if (isolate->heap()->HighMemoryPressure()) {
+ if (FLAG_trace_concurrent_recompilation) {
+ PrintF(" ** High memory pressure, will retry optimizing ");
+ info->closure()->ShortPrint();
+ PrintF(" later.\n");
+ }
+ return false;
+ }
+
+ // All handles below this point will be allocated in a deferred handle scope
+ // that is detached and handed off to the background thread when we return.
CompilationHandleScope handle_scope(info);
- if (!Compiler::ParseAndAnalyze(info->parse_info())) return false;
+
+ // Parsing is not required when optimizing from existing bytecode.
+ if (!info->is_optimizing_from_bytecode()) {
+ if (!Compiler::ParseAndAnalyze(info->parse_info())) return false;
+ EnsureFeedbackMetadata(info);
+ }
+
+ JSFunction::EnsureLiterals(info->closure());
// Reopen handles in the new CompilationHandleScope.
info->ReopenHandlesInNewHandleScope();
info->parse_info()->ReopenHandlesInNewHandleScope();
TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
- TRACE_EVENT0("v8", "V8.RecompileSynchronous");
+ RuntimeCallTimerScope runtimeTimer(info->isolate(),
+ &RuntimeCallStats::RecompileSynchronous);
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
+ isolate, &tracing::TraceEventStatsTable::RecompileSynchronous);
- OptimizedCompileJob* job = new (info->zone()) OptimizedCompileJob(info);
- OptimizedCompileJob::Status status = job->CreateGraph();
- if (status != OptimizedCompileJob::SUCCEEDED) return false;
+ if (job->PrepareJob() != CompilationJob::SUCCEEDED) return false;
isolate->optimizing_compile_dispatcher()->QueueForOptimization(job);
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Queued ");
info->closure()->ShortPrint();
- if (info->is_osr()) {
- PrintF(" for concurrent OSR at %d.\n", info->osr_ast_id().ToInt());
- } else {
- PrintF(" for concurrent optimization.\n");
- }
+ PrintF(" for concurrent optimization.\n");
}
return true;
}
-MaybeHandle<Code> GetUnoptimizedCode(Handle<JSFunction> function) {
- DCHECK(!function->GetIsolate()->has_pending_exception());
- DCHECK(!function->is_compiled());
- if (function->shared()->is_compiled()) {
- return Handle<Code>(function->shared()->code());
- }
-
- CompilationInfoWithZone info(function);
- Handle<Code> result;
- ASSIGN_RETURN_ON_EXCEPTION(info.isolate(), result,
- GetUnoptimizedCodeCommon(&info),
- Code);
- return result;
-}
-
MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
Compiler::ConcurrencyMode mode,
BailoutId osr_ast_id = BailoutId::None(),
JavaScriptFrame* osr_frame = nullptr) {
Isolate* isolate = function->GetIsolate();
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
- if (shared->HasDebugInfo()) return MaybeHandle<Code>();
+
+ bool ignition_osr = osr_frame && osr_frame->is_interpreted();
+ DCHECK_IMPLIES(ignition_osr, !osr_ast_id.IsNone());
+ DCHECK_IMPLIES(ignition_osr, FLAG_ignition_osr);
+
+ // Flag combination --ignition-osr --no-turbo-from-bytecode is unsupported.
+ if (ignition_osr && !FLAG_turbo_from_bytecode) return MaybeHandle<Code>();
Handle<Code> cached_code;
- if (GetCodeFromOptimizedCodeMap(function, osr_ast_id)
+ // TODO(4764): When compiling for OSR from bytecode, BailoutId might derive
+ // from bytecode offset and overlap with actual BailoutId. No lookup!
+ if (!ignition_osr &&
+ GetCodeFromOptimizedCodeMap(function, osr_ast_id)
.ToHandle(&cached_code)) {
if (FLAG_trace_opt) {
PrintF("[found optimized code for ");
@@ -1082,82 +777,276 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
return cached_code;
}
- DCHECK(AllowCompilation::IsAllowed(isolate));
-
+ // Reset profiler ticks, function is no longer considered hot.
if (shared->is_compiled()) {
shared->code()->set_profiler_ticks(0);
}
- // TODO(mstarzinger): We cannot properly deserialize a scope chain containing
- // an eval scope and hence would fail at parsing the eval source again.
- if (shared->disable_optimization_reason() == kEval) {
+ VMState<COMPILER> state(isolate);
+ DCHECK(!isolate->has_pending_exception());
+ PostponeInterruptsScope postpone(isolate);
+ bool use_turbofan = UseTurboFan(shared) || ignition_osr;
+ std::unique_ptr<CompilationJob> job(
+ use_turbofan ? compiler::Pipeline::NewCompilationJob(function)
+ : new HCompilationJob(function));
+ CompilationInfo* info = job->info();
+ ParseInfo* parse_info = info->parse_info();
+
+ info->SetOptimizingForOsr(osr_ast_id, osr_frame);
+
+ // Do not use Crankshaft/TurboFan if we need to be able to set break points.
+ if (info->shared_info()->HasDebugInfo()) {
+ info->AbortOptimization(kFunctionBeingDebugged);
return MaybeHandle<Code>();
}
- // TODO(mstarzinger): We cannot properly deserialize a scope chain for the
- // builtin context, hence Genesis::InstallExperimentalNatives would fail.
- if (shared->is_toplevel() && isolate->bootstrapper()->IsActive()) {
+ // Limit the number of times we try to optimize functions.
+ const int kMaxOptCount =
+ FLAG_deopt_every_n_times == 0 ? FLAG_max_opt_count : 1000;
+ if (info->shared_info()->opt_count() > kMaxOptCount) {
+ info->AbortOptimization(kOptimizedTooManyTimes);
return MaybeHandle<Code>();
}
- base::SmartPointer<CompilationInfo> info(
- new CompilationInfoWithZone(function));
- VMState<COMPILER> state(isolate);
- DCHECK(!isolate->has_pending_exception());
- PostponeInterruptsScope postpone(isolate);
+ CanonicalHandleScope canonical(isolate);
+ TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
+ RuntimeCallTimerScope runtimeTimer(isolate, &RuntimeCallStats::OptimizeCode);
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
+ isolate, &tracing::TraceEventStatsTable::OptimizeCode);
+
+ // TurboFan can optimize directly from existing bytecode.
+ if (FLAG_turbo_from_bytecode && use_turbofan && ShouldUseIgnition(info)) {
+ if (!Compiler::EnsureBytecode(info)) {
+ if (isolate->has_pending_exception()) isolate->clear_pending_exception();
+ return MaybeHandle<Code>();
+ }
+ info->MarkAsOptimizeFromBytecode();
+ }
- info->SetOptimizingForOsr(osr_ast_id);
+ if (IsEvalToplevel(shared)) {
+ parse_info->set_eval();
+ if (function->context()->IsNativeContext()) parse_info->set_global();
+ parse_info->set_toplevel();
+ parse_info->set_allow_lazy_parsing(false);
+ parse_info->set_lazy(false);
+ }
if (mode == Compiler::CONCURRENT) {
- if (GetOptimizedCodeLater(info.get())) {
- info.Detach(); // The background recompile job owns this now.
+ if (GetOptimizedCodeLater(job.get())) {
+ job.release(); // The background recompile job owns this now.
return isolate->builtins()->InOptimizationQueue();
}
} else {
- info->set_osr_frame(osr_frame);
- if (GetOptimizedCodeNow(info.get())) return info->code();
+ if (GetOptimizedCodeNow(job.get())) return info->code();
}
if (isolate->has_pending_exception()) isolate->clear_pending_exception();
return MaybeHandle<Code>();
}
+class InterpreterActivationsFinder : public ThreadVisitor,
+ public OptimizedFunctionVisitor {
+ public:
+ explicit InterpreterActivationsFinder(SharedFunctionInfo* shared)
+ : shared_(shared), has_activations_(false) {}
+
+ void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
+ Address* activation_pc_address = nullptr;
+ JavaScriptFrameIterator it(isolate, top);
+ for (; !it.done(); it.Advance()) {
+ JavaScriptFrame* frame = it.frame();
+ if (FLAG_turbo_from_bytecode && FLAG_ignition_osr &&
+ frame->is_optimized() && frame->function()->shared() == shared_) {
+ // If we are able to optimize functions directly from bytecode, then
+ // there might be optimized OSR code active on the stack that is not
+ // reachable through a function. We count this as an activation.
+ has_activations_ = true;
+ }
+ if (frame->is_interpreted() && frame->function()->shared() == shared_) {
+ has_activations_ = true;
+ activation_pc_address = frame->pc_address();
+ }
+ }
+
+ if (activation_pc_address) {
+ activation_pc_addresses_.push_back(activation_pc_address);
+ }
+ }
+
+ void VisitFunction(JSFunction* function) {
+ if (function->Inlines(shared_)) has_activations_ = true;
+ }
+
+ void EnterContext(Context* context) {}
+ void LeaveContext(Context* context) {}
+
+ bool MarkActivationsForBaselineOnReturn(Isolate* isolate) {
+ if (activation_pc_addresses_.empty()) return false;
+
+ for (Address* activation_pc_address : activation_pc_addresses_) {
+ DCHECK(isolate->inner_pointer_to_code_cache()
+ ->GetCacheEntry(*activation_pc_address)
+ ->code->is_interpreter_trampoline_builtin());
+ *activation_pc_address =
+ isolate->builtins()->InterpreterMarkBaselineOnReturn()->entry();
+ }
+ return true;
+ }
+
+ bool has_activations() { return has_activations_; }
+
+ private:
+ SharedFunctionInfo* shared_;
+ bool has_activations_;
+ std::vector<Address*> activation_pc_addresses_;
+};
+
+bool HasInterpreterActivations(
+ Isolate* isolate, InterpreterActivationsFinder* activations_finder) {
+ activations_finder->VisitThread(isolate, isolate->thread_local_top());
+ isolate->thread_manager()->IterateArchivedThreads(activations_finder);
+ if (FLAG_turbo_from_bytecode) {
+ // If we are able to optimize functions directly from bytecode, then there
+ // might be optimized functions that rely on bytecode being around. We need
+ // to prevent switching the given function to baseline code in those cases.
+ Deoptimizer::VisitAllOptimizedFunctions(isolate, activations_finder);
+ }
+ return activations_finder->has_activations();
+}
+
+MaybeHandle<Code> GetBaselineCode(Handle<JSFunction> function) {
+ Isolate* isolate = function->GetIsolate();
+ VMState<COMPILER> state(isolate);
+ PostponeInterruptsScope postpone(isolate);
+ Zone zone(isolate->allocator());
+ ParseInfo parse_info(&zone, function);
+ CompilationInfo info(&parse_info, function);
+
+ // Reset profiler ticks, function is no longer considered hot.
+ if (function->shared()->HasBytecodeArray()) {
+ function->shared()->set_profiler_ticks(0);
+ }
+
+ // Nothing left to do if the function already has baseline code.
+ if (function->shared()->code()->kind() == Code::FUNCTION) {
+ return Handle<Code>(function->shared()->code());
+ }
+
+ // We do not switch to baseline code when the debugger might have created a
+ // copy of the bytecode with break slots to be able to set break points.
+ if (function->shared()->HasDebugInfo()) {
+ return MaybeHandle<Code>();
+ }
+
+ // TODO(4280): For now we do not switch generators or async functions to
+ // baseline code because there might be suspended activations stored in
+ // generator objects on the heap. We could eventually go directly to
+ // TurboFan in this case.
+ if (function->shared()->is_resumable()) {
+ return MaybeHandle<Code>();
+ }
+
+ // TODO(4280): For now we disable switching to baseline code in the presence
+ // of interpreter activations of the given function. The reasons is that the
+ // underlying bytecode is cleared below. Note that this only applies in case
+ // the --ignition-preserve-bytecode flag is not passed.
+ if (!FLAG_ignition_preserve_bytecode) {
+ InterpreterActivationsFinder activations_finder(function->shared());
+ if (HasInterpreterActivations(isolate, &activations_finder)) {
+ if (FLAG_trace_opt) {
+ OFStream os(stdout);
+ os << "[unable to switch " << Brief(*function) << " due to activations]"
+ << std::endl;
+ }
+
+ if (activations_finder.MarkActivationsForBaselineOnReturn(isolate)) {
+ if (FLAG_trace_opt) {
+ OFStream os(stdout);
+ os << "[marking " << Brief(function->shared())
+ << " for baseline recompilation on return]" << std::endl;
+ }
+ }
+
+ return MaybeHandle<Code>();
+ }
+ }
+
+ if (FLAG_trace_opt) {
+ OFStream os(stdout);
+ os << "[switching method " << Brief(*function) << " to baseline code]"
+ << std::endl;
+ }
+
+ // Parse and update CompilationInfo with the results.
+ if (!Parser::ParseStatic(info.parse_info())) return MaybeHandle<Code>();
+ Handle<SharedFunctionInfo> shared = info.shared_info();
+ DCHECK_EQ(shared->language_mode(), info.literal()->language_mode());
+
+ // Compile baseline code using the full code generator.
+ if (!Compiler::Analyze(info.parse_info()) ||
+ !FullCodeGenerator::MakeCode(&info)) {
+ if (!isolate->has_pending_exception()) isolate->StackOverflow();
+ return MaybeHandle<Code>();
+ }
+
+ // TODO(4280): For now we play it safe and remove the bytecode array when we
+ // switch to baseline code. We might consider keeping around the bytecode so
+ // that it can be used as the "source of truth" eventually. Note that this
+ // only applies in case the --ignition-preserve-bytecode flag is not passed.
+ if (!FLAG_ignition_preserve_bytecode) shared->ClearBytecodeArray();
+
+ // Update the shared function info with the scope info.
+ InstallSharedScopeInfo(&info, shared);
+
+ // Install compilation result on the shared function info
+ InstallSharedCompilationResult(&info, shared);
+
+ // Record the function compilation event.
+ RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, &info);
+
+ return info.code();
+}
+
MaybeHandle<Code> GetLazyCode(Handle<JSFunction> function) {
Isolate* isolate = function->GetIsolate();
DCHECK(!isolate->has_pending_exception());
DCHECK(!function->is_compiled());
TimerEventScope<TimerEventCompileCode> compile_timer(isolate);
- TRACE_EVENT0("v8", "V8.CompileCode");
+ RuntimeCallTimerScope runtimeTimer(isolate,
+ &RuntimeCallStats::CompileCodeLazy);
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
+ isolate, &tracing::TraceEventStatsTable::CompileCodeLazy);
AggregatedHistogramTimerScope timer(isolate->counters()->compile_lazy());
- // If the debugger is active, do not compile with turbofan unless we can
- // deopt from turbofan code.
- if (FLAG_turbo_asm && function->shared()->asm_function() &&
- (FLAG_turbo_asm_deoptimization || !isolate->debug()->is_active()) &&
- !FLAG_turbo_osr) {
- CompilationInfoWithZone info(function);
-
- VMState<COMPILER> state(isolate);
- PostponeInterruptsScope postpone(isolate);
- info.SetOptimizing();
-
- if (GetOptimizedCodeNow(&info)) {
+ if (FLAG_turbo_cache_shared_code) {
+ Handle<Code> cached_code;
+ if (GetCodeFromOptimizedCodeMap(function, BailoutId::None())
+ .ToHandle(&cached_code)) {
+ if (FLAG_trace_opt) {
+ PrintF("[found optimized code for ");
+ function->ShortPrint();
+ PrintF(" during unoptimized compile]\n");
+ }
DCHECK(function->shared()->is_compiled());
- return info.code();
+ return cached_code;
}
- // We have failed compilation. If there was an exception clear it so that
- // we can compile unoptimized code.
- if (isolate->has_pending_exception()) isolate->clear_pending_exception();
}
if (function->shared()->is_compiled()) {
return Handle<Code>(function->shared()->code());
}
- CompilationInfoWithZone info(function);
+ if (function->shared()->HasBytecodeArray()) {
+ Handle<Code> entry = isolate->builtins()->InterpreterEntryTrampoline();
+ function->shared()->ReplaceCode(*entry);
+ return entry;
+ }
+
+ Zone zone(isolate->allocator());
+ ParseInfo parse_info(&zone, function);
+ CompilationInfo info(&parse_info, function);
Handle<Code> result;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, result, GetUnoptimizedCodeCommon(&info),
- Code);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result, GetUnoptimizedCode(&info), Code);
if (FLAG_always_opt) {
Handle<Code> opt_code;
@@ -1171,59 +1060,6 @@ MaybeHandle<Code> GetLazyCode(Handle<JSFunction> function) {
}
-bool CompileEvalForDebugging(Handle<JSFunction> function,
- Handle<SharedFunctionInfo> shared) {
- Handle<Script> script(Script::cast(shared->script()));
- Handle<Context> context(function->context());
-
- Zone zone(function->GetIsolate()->allocator());
- ParseInfo parse_info(&zone, script);
- CompilationInfo info(&parse_info);
- Isolate* isolate = info.isolate();
-
- parse_info.set_eval();
- parse_info.set_context(context);
- if (context->IsNativeContext()) parse_info.set_global();
- parse_info.set_toplevel();
- parse_info.set_allow_lazy_parsing(false);
- parse_info.set_language_mode(shared->language_mode());
- parse_info.set_parse_restriction(NO_PARSE_RESTRICTION);
- info.MarkAsDebug();
-
- VMState<COMPILER> state(info.isolate());
-
- if (!Parser::ParseStatic(&parse_info)) {
- isolate->clear_pending_exception();
- return false;
- }
-
- FunctionLiteral* lit = parse_info.literal();
- LiveEditFunctionTracker live_edit_tracker(isolate, lit);
-
- if (!CompileUnoptimizedCode(&info)) {
- isolate->clear_pending_exception();
- return false;
- }
- shared->ReplaceCode(*info.code());
- return true;
-}
-
-
-bool CompileForDebugging(CompilationInfo* info) {
- info->MarkAsDebug();
- if (GetUnoptimizedCodeCommon(info).is_null()) {
- info->isolate()->clear_pending_exception();
- return false;
- }
- return true;
-}
-
-inline bool IsEvalToplevel(Handle<SharedFunctionInfo> shared) {
- return shared->is_toplevel() && shared->script()->IsScript() &&
- Script::cast(shared->script())->compilation_type() ==
- Script::COMPILATION_TYPE_EVAL;
-}
-
Handle<SharedFunctionInfo> NewSharedFunctionInfoForLiteral(
Isolate* isolate, FunctionLiteral* literal, Handle<Script> script) {
Handle<Code> code = isolate->builtins()->CompileLazy();
@@ -1239,12 +1075,19 @@ Handle<SharedFunctionInfo> NewSharedFunctionInfoForLiteral(
Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
Isolate* isolate = info->isolate();
TimerEventScope<TimerEventCompileCode> timer(isolate);
- TRACE_EVENT0("v8", "V8.CompileCode");
+ RuntimeCallTimerScope runtimeTimer(isolate, &RuntimeCallStats::CompileCode);
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
+ isolate, &tracing::TraceEventStatsTable::CompileCode);
PostponeInterruptsScope postpone(isolate);
DCHECK(!isolate->native_context().is_null());
ParseInfo* parse_info = info->parse_info();
Handle<Script> script = parse_info->script();
+ // Create a canonical handle scope before internalizing parsed values if
+ // compiling bytecode. This is required for off-thread bytecode generation.
+ std::unique_ptr<CanonicalHandleScope> canonical;
+ if (FLAG_ignition) canonical.reset(new CanonicalHandleScope(isolate));
+
// TODO(svenpanne) Obscure place for this, perhaps move to OnBeforeCompile?
FixedArray* array = isolate->native_context()->embedder_data();
script->set_context_data(array->get(v8::Context::kDebugIdIndex));
@@ -1287,6 +1130,7 @@ Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
parse_info->set_cached_data(nullptr);
parse_info->set_compile_options(ScriptCompiler::kNoCompileOptions);
}
+
if (!Parser::ParseStatic(parse_info)) {
return Handle<SharedFunctionInfo>::null();
}
@@ -1294,61 +1138,58 @@ Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
DCHECK(!info->is_debug() || !parse_info->allow_lazy_parsing());
- info->MarkAsFirstCompile();
-
FunctionLiteral* lit = parse_info->literal();
- LiveEditFunctionTracker live_edit_tracker(isolate, lit);
// Measure how long it takes to do the compilation; only take the
// rest of the function into account to avoid overlap with the
// parsing statistics.
- HistogramTimer* rate = info->is_eval()
- ? info->isolate()->counters()->compile_eval()
- : info->isolate()->counters()->compile();
+ RuntimeCallTimerScope runtimeTimer(
+ isolate, parse_info->is_eval() ? &RuntimeCallStats::CompileEval
+ : &RuntimeCallStats::Compile);
+ HistogramTimer* rate = parse_info->is_eval()
+ ? info->isolate()->counters()->compile_eval()
+ : info->isolate()->counters()->compile();
HistogramTimerScope timer(rate);
- TRACE_EVENT0("v8", info->is_eval() ? "V8.CompileEval" : "V8.Compile");
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
+ isolate,
+ (parse_info->is_eval() ? &tracing::TraceEventStatsTable::CompileEval
+ : &tracing::TraceEventStatsTable::Compile));
// Allocate a shared function info object.
- DCHECK_EQ(RelocInfo::kNoPosition, lit->function_token_position());
+ DCHECK_EQ(kNoSourcePosition, lit->function_token_position());
result = NewSharedFunctionInfoForLiteral(isolate, lit, script);
result->set_is_toplevel(true);
- if (info->is_eval()) {
+ if (parse_info->is_eval()) {
// Eval scripts cannot be (re-)compiled without context.
result->set_allows_lazy_compilation_without_context(false);
}
parse_info->set_shared_info(result);
// Compile the code.
- if (!CompileBaselineCode(info)) {
+ if (!CompileUnoptimizedCode(info)) {
return Handle<SharedFunctionInfo>::null();
}
+ // Update the shared function info with the scope info.
+ InstallSharedScopeInfo(info, result);
+
// Install compilation result on the shared function info
- Handle<ScopeInfo> scope_info =
- ScopeInfo::Create(info->isolate(), info->zone(), info->scope());
- InstallBaselineCompilationResult(info, result, scope_info);
+ InstallSharedCompilationResult(info, result);
Handle<String> script_name =
script->name()->IsString()
? Handle<String>(String::cast(script->name()))
: isolate->factory()->empty_string();
- Logger::LogEventsAndTags log_tag = info->is_eval()
- ? Logger::EVAL_TAG
- : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script);
-
- PROFILE(isolate, CodeCreateEvent(log_tag, *info->abstract_code(), *result,
- info, *script_name));
+ CodeEventListener::LogEventsAndTags log_tag =
+ parse_info->is_eval()
+ ? CodeEventListener::EVAL_TAG
+ : Logger::ToNativeByScript(CodeEventListener::SCRIPT_TAG, *script);
- // Hint to the runtime system used when allocating space for initial
- // property space by setting the expected number of properties for
- // the instances of the function.
- SetExpectedNofPropertiesFromEstimate(result,
- lit->expected_property_count());
+ PROFILE(isolate, CodeCreateEvent(log_tag, result->abstract_code(), *result,
+ *script_name));
if (!script.is_null())
script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
-
- live_edit_tracker.RecordFunctionInfo(result, lit, info->zone());
}
return result;
@@ -1362,7 +1203,7 @@ Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
bool Compiler::Analyze(ParseInfo* info) {
DCHECK_NOT_NULL(info->literal());
if (!Rewriter::Rewrite(info)) return false;
- if (!Scope::Analyze(info)) return false;
+ Scope::Analyze(info);
if (!Renumber(info)) return false;
DCHECK_NOT_NULL(info->scope());
return true;
@@ -1370,90 +1211,224 @@ bool Compiler::Analyze(ParseInfo* info) {
bool Compiler::ParseAndAnalyze(ParseInfo* info) {
if (!Parser::ParseStatic(info)) return false;
- return Compiler::Analyze(info);
+ if (!Compiler::Analyze(info)) return false;
+ DCHECK_NOT_NULL(info->literal());
+ DCHECK_NOT_NULL(info->scope());
+ return true;
}
bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag) {
if (function->is_compiled()) return true;
- MaybeHandle<Code> maybe_code = GetLazyCode(function);
+ Isolate* isolate = function->GetIsolate();
+ DCHECK(AllowCompilation::IsAllowed(isolate));
+
+ // Start a compilation.
Handle<Code> code;
- if (!maybe_code.ToHandle(&code)) {
+ if (!GetLazyCode(function).ToHandle(&code)) {
if (flag == CLEAR_EXCEPTION) {
- function->GetIsolate()->clear_pending_exception();
+ isolate->clear_pending_exception();
}
return false;
}
- DCHECK(code->IsJavaScriptCode());
+
+ // Install code on closure.
function->ReplaceCode(*code);
+ JSFunction::EnsureLiterals(function);
+
+ // Check postconditions on success.
+ DCHECK(!isolate->has_pending_exception());
+ DCHECK(function->shared()->is_compiled());
+ DCHECK(function->is_compiled());
+ return true;
+}
+
+bool Compiler::CompileBaseline(Handle<JSFunction> function) {
+ Isolate* isolate = function->GetIsolate();
+ DCHECK(AllowCompilation::IsAllowed(isolate));
+
+ // Start a compilation.
+ Handle<Code> code;
+ if (!GetBaselineCode(function).ToHandle(&code)) {
+ // Baseline generation failed, get unoptimized code.
+ DCHECK(function->shared()->is_compiled());
+ code = handle(function->shared()->code());
+ isolate->clear_pending_exception();
+ }
+
+ // Install code on closure.
+ function->ReplaceCode(*code);
+ JSFunction::EnsureLiterals(function);
+
+ // Check postconditions on success.
+ DCHECK(!isolate->has_pending_exception());
+ DCHECK(function->shared()->is_compiled());
DCHECK(function->is_compiled());
return true;
}
bool Compiler::CompileOptimized(Handle<JSFunction> function,
ConcurrencyMode mode) {
+ if (function->IsOptimized()) return true;
+ Isolate* isolate = function->GetIsolate();
+ DCHECK(AllowCompilation::IsAllowed(isolate));
+
+ // Start a compilation.
Handle<Code> code;
- if (GetOptimizedCode(function, mode).ToHandle(&code)) {
- // Optimization succeeded, return optimized code.
- function->ReplaceCode(*code);
- } else {
+ if (!GetOptimizedCode(function, mode).ToHandle(&code)) {
// Optimization failed, get unoptimized code.
- Isolate* isolate = function->GetIsolate();
- if (isolate->has_pending_exception()) { // Possible stack overflow.
- return false;
- }
- code = Handle<Code>(function->shared()->code(), isolate);
- if (code->kind() != Code::FUNCTION &&
- code->kind() != Code::OPTIMIZED_FUNCTION) {
- if (!GetUnoptimizedCode(function).ToHandle(&code)) {
+ DCHECK(!isolate->has_pending_exception());
+ if (function->shared()->is_compiled()) {
+ code = handle(function->shared()->code(), isolate);
+ } else if (function->shared()->HasBytecodeArray()) {
+ code = isolate->builtins()->InterpreterEntryTrampoline();
+ function->shared()->ReplaceCode(*code);
+ } else {
+ Zone zone(isolate->allocator());
+ ParseInfo parse_info(&zone, function);
+ CompilationInfo info(&parse_info, function);
+ if (!GetUnoptimizedCode(&info).ToHandle(&code)) {
return false;
}
}
- function->ReplaceCode(*code);
}
- DCHECK(function->code()->kind() == Code::FUNCTION ||
- function->code()->kind() == Code::OPTIMIZED_FUNCTION ||
- (function->code()->is_interpreter_entry_trampoline() &&
- function->shared()->HasBytecodeArray()) ||
- function->IsInOptimizationQueue());
+ // Install code on closure.
+ function->ReplaceCode(*code);
+ JSFunction::EnsureLiterals(function);
+
+ // Check postconditions on success.
+ DCHECK(!isolate->has_pending_exception());
+ DCHECK(function->shared()->is_compiled());
+ DCHECK(function->is_compiled());
return true;
}
bool Compiler::CompileDebugCode(Handle<JSFunction> function) {
- Handle<SharedFunctionInfo> shared(function->shared());
- if (IsEvalToplevel(shared)) {
- return CompileEvalForDebugging(function, shared);
- } else {
- CompilationInfoWithZone info(function);
- return CompileForDebugging(&info);
+ Isolate* isolate = function->GetIsolate();
+ DCHECK(AllowCompilation::IsAllowed(isolate));
+
+ // Start a compilation.
+ Zone zone(isolate->allocator());
+ ParseInfo parse_info(&zone, function);
+ CompilationInfo info(&parse_info, Handle<JSFunction>::null());
+ if (IsEvalToplevel(handle(function->shared()))) {
+ parse_info.set_eval();
+ if (function->context()->IsNativeContext()) parse_info.set_global();
+ parse_info.set_toplevel();
+ parse_info.set_allow_lazy_parsing(false);
+ parse_info.set_lazy(false);
}
+ info.MarkAsDebug();
+ if (GetUnoptimizedCode(&info).is_null()) {
+ isolate->clear_pending_exception();
+ return false;
+ }
+
+ // Check postconditions on success.
+ DCHECK(!isolate->has_pending_exception());
+ DCHECK(function->shared()->is_compiled());
+ DCHECK(function->shared()->HasDebugCode());
+ return true;
}
bool Compiler::CompileDebugCode(Handle<SharedFunctionInfo> shared) {
+ Isolate* isolate = shared->GetIsolate();
+ DCHECK(AllowCompilation::IsAllowed(isolate));
+
+ // Start a compilation.
+ Zone zone(isolate->allocator());
+ ParseInfo parse_info(&zone, shared);
+ CompilationInfo info(&parse_info, Handle<JSFunction>::null());
DCHECK(shared->allows_lazy_compilation_without_context());
DCHECK(!IsEvalToplevel(shared));
- Zone zone(shared->GetIsolate()->allocator());
- ParseInfo parse_info(&zone, shared);
- CompilationInfo info(&parse_info);
- return CompileForDebugging(&info);
+ info.MarkAsDebug();
+ if (GetUnoptimizedCode(&info).is_null()) {
+ isolate->clear_pending_exception();
+ return false;
+ }
+
+ // Check postconditions on success.
+ DCHECK(!isolate->has_pending_exception());
+ DCHECK(shared->is_compiled());
+ DCHECK(shared->HasDebugCode());
+ return true;
+}
+
+MaybeHandle<JSArray> Compiler::CompileForLiveEdit(Handle<Script> script) {
+ Isolate* isolate = script->GetIsolate();
+ DCHECK(AllowCompilation::IsAllowed(isolate));
+
+ // In order to ensure that live edit function info collection finds the newly
+ // generated shared function infos, clear the script's list temporarily
+ // and restore it at the end of this method.
+ Handle<Object> old_function_infos(script->shared_function_infos(), isolate);
+ script->set_shared_function_infos(Smi::FromInt(0));
+
+ // Start a compilation.
+ Zone zone(isolate->allocator());
+ ParseInfo parse_info(&zone, script);
+ CompilationInfo info(&parse_info, Handle<JSFunction>::null());
+ parse_info.set_global();
+ info.MarkAsDebug();
+
+ // TODO(635): support extensions.
+ const bool compilation_succeeded = !CompileToplevel(&info).is_null();
+ Handle<JSArray> infos;
+ if (compilation_succeeded) {
+ // Check postconditions on success.
+ DCHECK(!isolate->has_pending_exception());
+ infos = LiveEditFunctionTracker::Collect(parse_info.literal(), script,
+ &zone, isolate);
+ }
+
+ // Restore the original function info list in order to remain side-effect
+ // free as much as possible, since some code expects the old shared function
+ // infos to stick around.
+ script->set_shared_function_infos(*old_function_infos);
+
+ return infos;
+}
+
+bool Compiler::EnsureBytecode(CompilationInfo* info) {
+ DCHECK(ShouldUseIgnition(info));
+ if (!info->shared_info()->HasBytecodeArray()) {
+ DCHECK(!info->shared_info()->is_compiled());
+ if (GetUnoptimizedCode(info).is_null()) return false;
+ }
+ DCHECK(info->shared_info()->HasBytecodeArray());
+ return true;
}
// TODO(turbofan): In the future, unoptimized code with deopt support could
// be generated lazily once deopt is triggered.
bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
DCHECK_NOT_NULL(info->literal());
- DCHECK(info->has_scope());
+ DCHECK_NOT_NULL(info->scope());
Handle<SharedFunctionInfo> shared = info->shared_info();
if (!shared->has_deoptimization_support()) {
- // TODO(titzer): just reuse the ParseInfo for the unoptimized compile.
- CompilationInfoWithZone unoptimized(info->closure());
- // Note that we use the same AST that we will use for generating the
- // optimized code.
- ParseInfo* parse_info = unoptimized.parse_info();
- parse_info->set_literal(info->literal());
- parse_info->set_scope(info->scope());
- parse_info->set_context(info->context());
+ Zone zone(info->isolate()->allocator());
+ CompilationInfo unoptimized(info->parse_info(), info->closure());
unoptimized.EnableDeoptimizationSupport();
+
+ // TODO(4280): For now we do not switch generators or async functions to
+ // baseline code because there might be suspended activations stored in
+ // generator objects on the heap. We could eventually go directly to
+ // TurboFan in this case.
+ if (shared->is_resumable()) return false;
+
+ // TODO(4280): For now we disable switching to baseline code in the presence
+ // of interpreter activations of the given function. The reasons is that the
+ // underlying bytecode is cleared below. The expensive check for activations
+ // only needs to be done when the given function has bytecode, otherwise we
+ // can be sure there are no activations. Note that this only applies in case
+ // the --ignition-preserve-bytecode flag is not passed.
+ if (!FLAG_ignition_preserve_bytecode && shared->HasBytecodeArray()) {
+ InterpreterActivationsFinder activations_finder(*shared);
+ if (HasInterpreterActivations(info->isolate(), &activations_finder)) {
+ return false;
+ }
+ }
+
// If the current code has reloc info for serialization, also include
// reloc info for serialization for the new code, so that deopt support
// can be added without losing IC state.
@@ -1461,51 +1436,53 @@ bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
shared->code()->has_reloc_info_for_serialization()) {
unoptimized.PrepareForSerializing();
}
- EnsureFeedbackVector(&unoptimized);
+ EnsureFeedbackMetadata(&unoptimized);
if (!FullCodeGenerator::MakeCode(&unoptimized)) return false;
- shared->EnableDeoptimizationSupport(*unoptimized.code());
-
- info->MarkAsCompiled();
+ // TODO(4280): For now we play it safe and remove the bytecode array when we
+ // switch to baseline code. We might consider keeping around the bytecode so
+ // that it can be used as the "source of truth" eventually. Note that this
+ // only applies in case the --ignition-preserve-bytecode flag is not passed.
+ if (!FLAG_ignition_preserve_bytecode && shared->HasBytecodeArray()) {
+ shared->ClearBytecodeArray();
+ }
// The scope info might not have been set if a lazily compiled
// function is inlined before being called for the first time.
if (shared->scope_info() == ScopeInfo::Empty(info->isolate())) {
- Handle<ScopeInfo> target_scope_info =
- ScopeInfo::Create(info->isolate(), info->zone(), info->scope());
- shared->set_scope_info(*target_scope_info);
+ InstallSharedScopeInfo(info, shared);
}
+ // Install compilation result on the shared function info
+ shared->EnableDeoptimizationSupport(*unoptimized.code());
+
// The existing unoptimized code was replaced with the new one.
- RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, &unoptimized, shared);
+ RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG,
+ &unoptimized);
}
return true;
}
-void Compiler::CompileForLiveEdit(Handle<Script> script) {
- // TODO(635): support extensions.
- Zone zone(script->GetIsolate()->allocator());
- ParseInfo parse_info(&zone, script);
- CompilationInfo info(&parse_info);
- PostponeInterruptsScope postpone(info.isolate());
- VMState<COMPILER> state(info.isolate());
-
- // Get rid of old list of shared function infos.
- info.MarkAsFirstCompile();
- info.MarkAsDebug();
- info.parse_info()->set_global();
- if (!Parser::ParseStatic(info.parse_info())) return;
-
- LiveEditFunctionTracker tracker(info.isolate(), parse_info.literal());
- if (!CompileUnoptimizedCode(&info)) return;
- tracker.RecordRootFunctionInfo(info.code());
+// static
+Compiler::CompilationTier Compiler::NextCompilationTier(JSFunction* function) {
+ Handle<SharedFunctionInfo> shared(function->shared(), function->GetIsolate());
+ if (shared->code()->is_interpreter_trampoline_builtin()) {
+ if (FLAG_turbo_from_bytecode && UseTurboFan(shared)) {
+ return OPTIMIZED;
+ } else {
+ return BASELINE;
+ }
+ } else {
+ return OPTIMIZED;
+ }
}
MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
Handle<String> source, Handle<SharedFunctionInfo> outer_info,
Handle<Context> context, LanguageMode language_mode,
- ParseRestriction restriction, int line_offset, int column_offset,
- Handle<Object> script_name, ScriptOriginOptions options) {
+ ParseRestriction restriction, int eval_scope_position, int eval_position,
+ int line_offset, int column_offset, Handle<Object> script_name,
+ ScriptOriginOptions options) {
Isolate* isolate = source->GetIsolate();
int source_length = source->length();
isolate->counters()->total_eval_size()->Increment(source_length);
@@ -1514,7 +1491,7 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
CompilationCache* compilation_cache = isolate->compilation_cache();
MaybeHandle<SharedFunctionInfo> maybe_shared_info =
compilation_cache->LookupEval(source, outer_info, context, language_mode,
- line_offset);
+ eval_scope_position);
Handle<SharedFunctionInfo> shared_info;
Handle<Script> script;
@@ -1526,33 +1503,28 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
script->set_column_offset(column_offset);
}
script->set_origin_options(options);
+ script->set_compilation_type(Script::COMPILATION_TYPE_EVAL);
+ Script::SetEvalOrigin(script, outer_info, eval_position);
+
Zone zone(isolate->allocator());
ParseInfo parse_info(&zone, script);
- CompilationInfo info(&parse_info);
+ CompilationInfo info(&parse_info, Handle<JSFunction>::null());
parse_info.set_eval();
if (context->IsNativeContext()) parse_info.set_global();
parse_info.set_language_mode(language_mode);
parse_info.set_parse_restriction(restriction);
parse_info.set_context(context);
- Debug::RecordEvalCaller(script);
-
shared_info = CompileToplevel(&info);
if (shared_info.is_null()) {
return MaybeHandle<JSFunction>();
} else {
- // Explicitly disable optimization for eval code. We're not yet prepared
- // to handle eval-code in the optimizing compiler.
- if (restriction != ONLY_SINGLE_FUNCTION_LITERAL) {
- shared_info->DisableOptimization(kEval);
- }
-
// If caller is strict mode, the result must be in strict mode as well.
DCHECK(is_sloppy(language_mode) ||
is_strict(shared_info->language_mode()));
compilation_cache->PutEval(source, outer_info, context, shared_info,
- line_offset);
+ eval_scope_position);
}
}
@@ -1568,6 +1540,52 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
return result;
}
+namespace {
+
+bool CodeGenerationFromStringsAllowed(Isolate* isolate,
+ Handle<Context> context) {
+ DCHECK(context->allow_code_gen_from_strings()->IsFalse(isolate));
+ // Check with callback if set.
+ AllowCodeGenerationFromStringsCallback callback =
+ isolate->allow_code_gen_callback();
+ if (callback == NULL) {
+ // No callback set and code generation disallowed.
+ return false;
+ } else {
+ // Callback set. Let it decide if code generation is allowed.
+ VMState<EXTERNAL> state(isolate);
+ return callback(v8::Utils::ToLocal(context));
+ }
+}
+
+} // namespace
+
+MaybeHandle<JSFunction> Compiler::GetFunctionFromString(
+ Handle<Context> context, Handle<String> source,
+ ParseRestriction restriction) {
+ Isolate* const isolate = context->GetIsolate();
+ Handle<Context> native_context(context->native_context(), isolate);
+
+ // Check if native context allows code generation from
+ // strings. Throw an exception if it doesn't.
+ if (native_context->allow_code_gen_from_strings()->IsFalse(isolate) &&
+ !CodeGenerationFromStringsAllowed(isolate, native_context)) {
+ Handle<Object> error_message =
+ native_context->ErrorMessageForCodeGenerationFromStrings();
+ THROW_NEW_ERROR(isolate, NewEvalError(MessageTemplate::kCodeGenFromStrings,
+ error_message),
+ JSFunction);
+ }
+
+ // Compile source string in the native context.
+ int eval_scope_position = 0;
+ int eval_position = kNoSourcePosition;
+ Handle<SharedFunctionInfo> outer_info(native_context->closure()->shared());
+ return Compiler::GetFunctionFromEval(source, outer_info, native_context,
+ SLOPPY, restriction, eval_scope_position,
+ eval_position);
+}
+
Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
Handle<String> source, Handle<Object> script_name, int line_offset,
int column_offset, ScriptOriginOptions resource_options,
@@ -1609,7 +1627,10 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
!isolate->debug()->is_loaded()) {
// Then check cached code provided by embedder.
HistogramTimerScope timer(isolate->counters()->compile_deserialize());
- TRACE_EVENT0("v8", "V8.CompileDeserialize");
+ RuntimeCallTimerScope runtimeTimer(isolate,
+ &RuntimeCallStats::CompileDeserialize);
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
+ isolate, &tracing::TraceEventStatsTable::CompileDeserialize);
Handle<SharedFunctionInfo> result;
if (CodeSerializer::Deserialize(isolate, *cached_data, source)
.ToHandle(&result)) {
@@ -1627,8 +1648,10 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
timer.Start();
}
- if (!maybe_result.ToHandle(&result)) {
- // No cache entry found. Compile the script.
+ if (!maybe_result.ToHandle(&result) ||
+ (FLAG_serialize_toplevel &&
+ compile_options == ScriptCompiler::kProduceCodeCache)) {
+ // No cache entry found, or embedder wants a code cache. Compile the script.
// Create a script object describing the script to be compiled.
Handle<Script> script = isolate->factory()->NewScript(source);
@@ -1652,7 +1675,7 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
// Compile the function and add it to the cache.
Zone zone(isolate->allocator());
ParseInfo parse_info(&zone, script);
- CompilationInfo info(&parse_info);
+ CompilationInfo info(&parse_info, Handle<JSFunction>::null());
if (is_module) {
parse_info.set_module();
} else {
@@ -1670,7 +1693,7 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
}
parse_info.set_language_mode(
- static_cast<LanguageMode>(info.language_mode() | language_mode));
+ static_cast<LanguageMode>(parse_info.language_mode() | language_mode));
result = CompileToplevel(&info);
if (extension == NULL && !result.is_null()) {
compilation_cache->PutScript(source, context, language_mode, result);
@@ -1678,7 +1701,10 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
compile_options == ScriptCompiler::kProduceCodeCache) {
HistogramTimerScope histogram_timer(
isolate->counters()->compile_serialize());
- TRACE_EVENT0("v8", "V8.CompileSerialize");
+ RuntimeCallTimerScope runtimeTimer(isolate,
+ &RuntimeCallStats::CompileSerialize);
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
+ isolate, &tracing::TraceEventStatsTable::CompileSerialize);
*cached_data = CodeSerializer::Serialize(isolate, result, source);
if (FLAG_profile_deserialization) {
PrintF("[Compiling and serializing took %0.3f ms]\n",
@@ -1709,7 +1735,7 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForStreamedScript(
parse_info->set_language_mode(
static_cast<LanguageMode>(parse_info->language_mode() | language_mode));
- CompilationInfo compile_info(parse_info);
+ CompilationInfo compile_info(parse_info, Handle<JSFunction>::null());
// The source was parsed lazily, so compiling for debugging is not possible.
DCHECK(!compile_info.is_debug());
@@ -1726,7 +1752,9 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
// Precondition: code has been parsed and scopes have been analyzed.
Isolate* isolate = outer_info->isolate();
MaybeHandle<SharedFunctionInfo> maybe_existing;
- if (outer_info->is_first_compile()) {
+
+ // Find any previously allocated shared function info for the given literal.
+ if (outer_info->shared_info()->never_compiled()) {
// On the first compile, there are no existing shared function info for
// inner functions yet, so do not try to find them. All bets are off for
// live edit though.
@@ -1735,15 +1763,19 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
} else {
maybe_existing = script->FindSharedFunctionInfo(literal);
}
- // We found an existing shared function info. If it's already compiled,
- // don't worry about compiling it, and simply return it. If it's not yet
- // compiled, continue to decide whether to eagerly compile.
- // Carry on if we are compiling eager to obtain code for debugging,
- // unless we already have code with debut break slots.
+
+ // We found an existing shared function info. If it has any sort of code
+ // attached, don't worry about compiling and simply return it. Otherwise,
+ // continue to decide whether to eagerly compile.
+ // Note that we also carry on if we are compiling eager to obtain code for
+ // debugging, unless we already have code with debug break slots.
Handle<SharedFunctionInfo> existing;
- if (maybe_existing.ToHandle(&existing) && existing->is_compiled()) {
- if (!outer_info->is_debug() || existing->HasDebugCode()) {
- return existing;
+ if (maybe_existing.ToHandle(&existing)) {
+ DCHECK(!existing->is_toplevel());
+ if (existing->HasBaselineCode() || existing->HasBytecodeArray()) {
+ if (!outer_info->is_debug() || existing->HasDebugCode()) {
+ return existing;
+ }
}
}
@@ -1752,20 +1784,22 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
if (!maybe_existing.ToHandle(&result)) {
result = NewSharedFunctionInfoForLiteral(isolate, literal, script);
result->set_is_toplevel(false);
+
+ // If the outer function has been compiled before, we cannot be sure that
+ // shared function info for this function literal has been created for the
+ // first time. It may have already been compiled previously.
+ result->set_never_compiled(outer_info->shared_info()->never_compiled());
}
Zone zone(isolate->allocator());
ParseInfo parse_info(&zone, script);
- CompilationInfo info(&parse_info);
+ CompilationInfo info(&parse_info, Handle<JSFunction>::null());
parse_info.set_literal(literal);
parse_info.set_shared_info(result);
- parse_info.set_scope(literal->scope());
parse_info.set_language_mode(literal->scope()->language_mode());
if (outer_info->will_serialize()) info.PrepareForSerializing();
- if (outer_info->is_first_compile()) info.MarkAsFirstCompile();
if (outer_info->is_debug()) info.MarkAsDebug();
- LiveEditFunctionTracker live_edit_tracker(isolate, literal);
// Determine if the function can be lazily compiled. This is necessary to
// allow some of our builtin JS files to be lazily compiled. These
// builtins cannot be handled lazily by the parser, since we have to know
@@ -1775,13 +1809,7 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
// aggressive about lazy compilation, because it might trigger compilation
// of functions without an outer context when setting a breakpoint through
// Debug::FindSharedFunctionInfoInScript.
- bool allow_lazy_without_ctx = literal->AllowsLazyCompilationWithoutContext();
- // Compile eagerly for live edit. When compiling debug code, eagerly compile
- // unless we can lazily compile without the context.
- bool allow_lazy = literal->AllowsLazyCompilation() &&
- !LiveEditFunctionTracker::IsActive(isolate) &&
- (!info.is_debug() || allow_lazy_without_ctx);
-
+ bool allow_lazy = literal->AllowsLazyCompilation() && !info.is_debug();
bool lazy = FLAG_lazy && allow_lazy && !literal->should_eager_compile();
// Consider compiling eagerly when targeting the code cache.
@@ -1793,40 +1821,36 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
// Generate code
TimerEventScope<TimerEventCompileCode> timer(isolate);
- TRACE_EVENT0("v8", "V8.CompileCode");
+ RuntimeCallTimerScope runtimeTimer(isolate, &RuntimeCallStats::CompileCode);
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
+ isolate, &tracing::TraceEventStatsTable::CompileCode);
+
+ // Create a canonical handle scope if compiling ignition bytecode. This is
+ // required by the constant array builder to de-duplicate common objects
+ // without dereferencing handles.
+ std::unique_ptr<CanonicalHandleScope> canonical;
+ if (FLAG_ignition) canonical.reset(new CanonicalHandleScope(info.isolate()));
+
if (lazy) {
info.SetCode(isolate->builtins()->CompileLazy());
- } else if (Renumber(info.parse_info()) && GenerateBaselineCode(&info)) {
+ } else if (Renumber(info.parse_info()) && GenerateUnoptimizedCode(&info)) {
// Code generation will ensure that the feedback vector is present and
// appropriately sized.
DCHECK(!info.code().is_null());
- Handle<ScopeInfo> scope_info =
- ScopeInfo::Create(info.isolate(), info.zone(), info.scope());
if (literal->should_eager_compile() &&
literal->should_be_used_once_hint()) {
info.code()->MarkToBeExecutedOnce(isolate);
}
+ // Update the shared function info with the scope info.
+ InstallSharedScopeInfo(&info, result);
// Install compilation result on the shared function info.
- InstallBaselineCompilationResult(&info, result, scope_info);
+ InstallSharedCompilationResult(&info, result);
} else {
return Handle<SharedFunctionInfo>::null();
}
if (maybe_existing.is_null()) {
- // If the outer function has been compiled before, we cannot be sure that
- // shared function info for this function literal has been created for the
- // first time. It may have already been compiled previously.
- result->set_never_compiled(outer_info->is_first_compile() && lazy);
-
- RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, result);
- result->set_allows_lazy_compilation(literal->AllowsLazyCompilation());
- result->set_allows_lazy_compilation_without_context(allow_lazy_without_ctx);
-
- // Set the expected number of properties for instances and return
- // the resulting function.
- SetExpectedNofPropertiesFromEstimate(result,
- literal->expected_property_count());
- live_edit_tracker.RecordFunctionInfo(result, literal, info.zone());
+ RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, &info);
}
return result;
@@ -1847,14 +1871,13 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForNative(
Handle<JSFunction> fun = Handle<JSFunction>::cast(Utils::OpenHandle(
*fun_template->GetFunction(v8_isolate->GetCurrentContext())
.ToLocalChecked()));
- const int literals = fun->NumberOfLiterals();
Handle<Code> code = Handle<Code>(fun->shared()->code());
Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
Handle<SharedFunctionInfo> shared = isolate->factory()->NewSharedFunctionInfo(
- name, literals, FunctionKind::kNormalFunction, code,
+ name, fun->shared()->num_literals(), FunctionKind::kNormalFunction, code,
Handle<ScopeInfo>(fun->shared()->scope_info()));
- shared->set_construct_stub(*construct_stub);
- shared->set_feedback_vector(fun->shared()->feedback_vector());
+ shared->SetConstructStub(*construct_stub);
+ shared->set_feedback_metadata(fun->shared()->feedback_metadata());
// Copy the function data to the shared function info.
shared->set_function_data(fun->shared()->function_data());
@@ -1872,15 +1895,18 @@ MaybeHandle<Code> Compiler::GetOptimizedCodeForOSR(Handle<JSFunction> function,
return GetOptimizedCode(function, NOT_CONCURRENT, osr_ast_id, osr_frame);
}
-void Compiler::FinalizeOptimizedCompileJob(OptimizedCompileJob* job) {
- // Take ownership of compilation info. Deleting compilation info
- // also tears down the zone and the recompile job.
- base::SmartPointer<CompilationInfo> info(job->info());
+void Compiler::FinalizeCompilationJob(CompilationJob* raw_job) {
+ // Take ownership of compilation job. Deleting job also tears down the zone.
+ std::unique_ptr<CompilationJob> job(raw_job);
+ CompilationInfo* info = job->info();
Isolate* isolate = info->isolate();
VMState<COMPILER> state(isolate);
TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
- TRACE_EVENT0("v8", "V8.RecompileSynchronous");
+ RuntimeCallTimerScope runtimeTimer(isolate,
+ &RuntimeCallStats::RecompileSynchronous);
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
+ isolate, &tracing::TraceEventStatsTable::RecompileSynchronous);
Handle<SharedFunctionInfo> shared = info->shared_info();
shared->code()->set_profiler_ticks(0);
@@ -1892,16 +1918,17 @@ void Compiler::FinalizeOptimizedCompileJob(OptimizedCompileJob* job) {
// Except when OSR already disabled optimization for some reason.
// 3) The code may have already been invalidated due to dependency change.
// 4) Code generation may have failed.
- if (job->last_status() == OptimizedCompileJob::SUCCEEDED) {
+ if (job->state() == CompilationJob::State::kReadyToFinalize) {
if (shared->optimization_disabled()) {
job->RetryOptimization(kOptimizationDisabled);
} else if (info->dependencies()->HasAborted()) {
job->RetryOptimization(kBailedOutDueToDependencyChange);
- } else if (job->GenerateCode() == OptimizedCompileJob::SUCCEEDED) {
- RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info.get(), shared);
+ } else if (job->FinalizeJob() == CompilationJob::SUCCEEDED) {
+ job->RecordOptimizationStats();
+ RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, info);
if (shared->SearchOptimizedCodeMap(info->context()->native_context(),
info->osr_ast_id()).code == nullptr) {
- InsertCodeIntoOptimizedCodeMap(info.get());
+ InsertCodeIntoOptimizedCodeMap(info);
}
if (FLAG_trace_opt) {
PrintF("[completed optimizing ");
@@ -1913,7 +1940,7 @@ void Compiler::FinalizeOptimizedCompileJob(OptimizedCompileJob* job) {
}
}
- DCHECK(job->last_status() != OptimizedCompileJob::SUCCEEDED);
+ DCHECK(job->state() == CompilationJob::State::kFailed);
if (FLAG_trace_opt) {
PrintF("[aborted optimizing ");
info->closure()->ShortPrint();
@@ -1940,21 +1967,11 @@ void Compiler::PostInstantiation(Handle<JSFunction> function,
}
if (cached.literals != nullptr) {
+ DCHECK(shared->is_compiled());
function->set_literals(cached.literals);
- } else {
- Isolate* isolate = function->GetIsolate();
- int number_of_literals = shared->num_literals();
- Handle<LiteralsArray> literals =
- LiteralsArray::New(isolate, handle(shared->feedback_vector()),
- number_of_literals, pretenure);
- function->set_literals(*literals);
-
- // Cache context-specific literals.
- MaybeHandle<Code> code;
- if (cached.code != nullptr) code = handle(cached.code);
- Handle<Context> native_context(function->context()->native_context());
- SharedFunctionInfo::AddToOptimizedCodeMap(shared, native_context, code,
- literals, BailoutId::None());
+ } else if (shared->is_compiled()) {
+ // TODO(mvstanton): pass pretenure flag to EnsureLiterals.
+ JSFunction::EnsureLiterals(function);
}
}
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index fa043995b4..55215733c1 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -5,10 +5,16 @@
#ifndef V8_COMPILER_H_
#define V8_COMPILER_H_
+#include <memory>
+
#include "src/allocation.h"
-#include "src/ast/ast.h"
#include "src/bailout-reason.h"
#include "src/compilation-dependencies.h"
+#include "src/contexts.h"
+#include "src/frames.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/source-position-table.h"
#include "src/source-position.h"
#include "src/zone.h"
@@ -17,8 +23,8 @@ namespace internal {
// Forward declarations.
class CompilationInfo;
+class CompilationJob;
class JavaScriptFrame;
-class OptimizedCompileJob;
class ParseInfo;
class ScriptData;
@@ -36,6 +42,7 @@ class Compiler : public AllStatic {
public:
enum ClearExceptionFlag { KEEP_EXCEPTION, CLEAR_EXCEPTION };
enum ConcurrencyMode { NOT_CONCURRENT, CONCURRENT };
+ enum CompilationTier { INTERPRETED, BASELINE, OPTIMIZED };
// ===========================================================================
// The following family of methods ensures a given function is compiled. The
@@ -44,13 +51,14 @@ class Compiler : public AllStatic {
// given function holds (except for live-edit, which compiles the world).
static bool Compile(Handle<JSFunction> function, ClearExceptionFlag flag);
+ static bool CompileBaseline(Handle<JSFunction> function);
static bool CompileOptimized(Handle<JSFunction> function, ConcurrencyMode);
static bool CompileDebugCode(Handle<JSFunction> function);
static bool CompileDebugCode(Handle<SharedFunctionInfo> shared);
- static void CompileForLiveEdit(Handle<Script> script);
+ static MaybeHandle<JSArray> CompileForLiveEdit(Handle<Script> script);
- // Generate and install code from previously queued optimization job.
- static void FinalizeOptimizedCompileJob(OptimizedCompileJob* job);
+ // Generate and install code from previously queued compilation job.
+ static void FinalizeCompilationJob(CompilationJob* job);
// Give the compiler a chance to perform low-latency initialization tasks of
// the given {function} on its instantiation. Note that only the runtime will
@@ -63,6 +71,12 @@ class Compiler : public AllStatic {
static bool Analyze(ParseInfo* info);
// Adds deoptimization support, requires ParseAndAnalyze.
static bool EnsureDeoptimizationSupport(CompilationInfo* info);
+ // Ensures that bytecode is generated, calls ParseAndAnalyze internally.
+ static bool EnsureBytecode(CompilationInfo* info);
+
+ // The next compilation tier which the function should be compiled to for
+ // optimization. This is used as a hint by the runtime profiler.
+ static CompilationTier NextCompilationTier(JSFunction* function);
// ===========================================================================
// The following family of methods instantiates new functions for scripts or
@@ -77,10 +91,16 @@ class Compiler : public AllStatic {
MUST_USE_RESULT static MaybeHandle<JSFunction> GetFunctionFromEval(
Handle<String> source, Handle<SharedFunctionInfo> outer_info,
Handle<Context> context, LanguageMode language_mode,
- ParseRestriction restriction, int line_offset, int column_offset = 0,
+ ParseRestriction restriction, int eval_scope_position, int eval_position,
+ int line_offset = 0, int column_offset = 0,
Handle<Object> script_name = Handle<Object>(),
ScriptOriginOptions options = ScriptOriginOptions());
+ // Create a (bound) function for a String source within a context for eval.
+ MUST_USE_RESULT static MaybeHandle<JSFunction> GetFunctionFromString(
+ Handle<Context> context, Handle<String> source,
+ ParseRestriction restriction);
+
// Create a shared function info object for a String source within a context.
static Handle<SharedFunctionInfo> GetSharedFunctionInfoForScript(
Handle<String> source, Handle<Object> script_name, int line_offset,
@@ -118,26 +138,10 @@ class Compiler : public AllStatic {
JavaScriptFrame* osr_frame);
};
-struct InlinedFunctionInfo {
- InlinedFunctionInfo(int parent_id, SourcePosition inline_position,
- int script_id, int start_position)
- : parent_id(parent_id),
- inline_position(inline_position),
- script_id(script_id),
- start_position(start_position) {}
- int parent_id;
- SourcePosition inline_position;
- int script_id;
- int start_position;
- std::vector<size_t> deopt_pc_offsets;
-
- static const int kNoParentId = -1;
-};
-
// CompilationInfo encapsulates some information known at compile time. It
// is constructed based on the resources available at compile-time.
-class CompilationInfo {
+class CompilationInfo final {
public:
// Various configuration flags for a compilation, as well as some properties
// of the compiled code produced by a compilation.
@@ -154,19 +158,20 @@ class CompilationInfo {
kFrameSpecializing = 1 << 9,
kNativeContextSpecializing = 1 << 10,
kInliningEnabled = 1 << 11,
- kTypingEnabled = 1 << 12,
- kDisableFutureOptimization = 1 << 13,
- kSplittingEnabled = 1 << 14,
- kDeoptimizationEnabled = 1 << 16,
- kSourcePositionsEnabled = 1 << 17,
- kFirstCompile = 1 << 18,
- kBailoutOnUninitialized = 1 << 19,
+ kDisableFutureOptimization = 1 << 12,
+ kSplittingEnabled = 1 << 13,
+ kDeoptimizationEnabled = 1 << 14,
+ kSourcePositionsEnabled = 1 << 15,
+ kBailoutOnUninitialized = 1 << 16,
+ kOptimizeFromBytecode = 1 << 17,
+ kTypeFeedbackEnabled = 1 << 18,
+ kAccessorInliningEnabled = 1 << 19,
};
- explicit CompilationInfo(ParseInfo* parse_info);
- CompilationInfo(const char* debug_name, Isolate* isolate, Zone* zone,
+ CompilationInfo(ParseInfo* parse_info, Handle<JSFunction> closure);
+ CompilationInfo(Vector<const char> debug_name, Isolate* isolate, Zone* zone,
Code::Flags code_flags = Code::ComputeFlags(Code::STUB));
- virtual ~CompilationInfo();
+ ~CompilationInfo();
ParseInfo* parse_info() const { return parse_info_; }
@@ -174,19 +179,11 @@ class CompilationInfo {
// TODO(titzer): inline and delete accessors of ParseInfo
// -----------------------------------------------------------
Handle<Script> script() const;
- bool is_eval() const;
- bool is_native() const;
- bool is_module() const;
- LanguageMode language_mode() const;
- Handle<JSFunction> closure() const;
FunctionLiteral* literal() const;
- Scope* scope() const;
+ DeclarationScope* scope() const;
Handle<Context> context() const;
Handle<SharedFunctionInfo> shared_info() const;
bool has_shared_info() const;
- bool has_context() const;
- bool has_literal() const;
- bool has_scope() const;
// -----------------------------------------------------------
Isolate* isolate() const {
@@ -194,14 +191,14 @@ class CompilationInfo {
}
Zone* zone() { return zone_; }
bool is_osr() const { return !osr_ast_id_.IsNone(); }
+ Handle<JSFunction> closure() const { return closure_; }
Handle<Code> code() const { return code_; }
Code::Flags code_flags() const { return code_flags_; }
BailoutId osr_ast_id() const { return osr_ast_id_; }
- int opt_count() const { return opt_count_; }
+ JavaScriptFrame* osr_frame() const { return osr_frame_; }
int num_parameters() const;
int num_parameters_including_this() const;
bool is_this_defined() const;
- int num_heap_slots() const;
void set_parameter_count(int parameter_count) {
DCHECK(IsStub());
@@ -211,11 +208,6 @@ class CompilationInfo {
bool has_bytecode_array() const { return !bytecode_array_.is_null(); }
Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
- Handle<AbstractCode> abstract_code() const {
- return has_bytecode_array() ? Handle<AbstractCode>::cast(bytecode_array())
- : Handle<AbstractCode>::cast(code());
- }
-
bool is_tracking_positions() const { return track_positions_; }
bool is_calling() const {
@@ -284,6 +276,18 @@ class CompilationInfo {
return GetFlag(kDeoptimizationEnabled);
}
+ void MarkAsTypeFeedbackEnabled() { SetFlag(kTypeFeedbackEnabled); }
+
+ bool is_type_feedback_enabled() const {
+ return GetFlag(kTypeFeedbackEnabled);
+ }
+
+ void MarkAsAccessorInliningEnabled() { SetFlag(kAccessorInliningEnabled); }
+
+ bool is_accessor_inlining_enabled() const {
+ return GetFlag(kAccessorInliningEnabled);
+ }
+
void MarkAsSourcePositionsEnabled() { SetFlag(kSourcePositionsEnabled); }
bool is_source_positions_enabled() const {
@@ -294,26 +298,22 @@ class CompilationInfo {
bool is_inlining_enabled() const { return GetFlag(kInliningEnabled); }
- void MarkAsTypingEnabled() { SetFlag(kTypingEnabled); }
-
- bool is_typing_enabled() const { return GetFlag(kTypingEnabled); }
-
void MarkAsSplittingEnabled() { SetFlag(kSplittingEnabled); }
bool is_splitting_enabled() const { return GetFlag(kSplittingEnabled); }
- void MarkAsFirstCompile() { SetFlag(kFirstCompile); }
-
- void MarkAsCompiled() { SetFlag(kFirstCompile, false); }
-
- bool is_first_compile() const { return GetFlag(kFirstCompile); }
-
void MarkAsBailoutOnUninitialized() { SetFlag(kBailoutOnUninitialized); }
bool is_bailout_on_uninitialized() const {
return GetFlag(kBailoutOnUninitialized);
}
+ void MarkAsOptimizeFromBytecode() { SetFlag(kOptimizeFromBytecode); }
+
+ bool is_optimizing_from_bytecode() const {
+ return GetFlag(kOptimizeFromBytecode);
+ }
+
bool GeneratePreagedPrologue() const {
// Generate a pre-aged prologue if we are optimizing for size, which
// will make code flushing more aggressive. Only apply to Code::FUNCTION,
@@ -357,9 +357,10 @@ class CompilationInfo {
code_flags_ =
Code::KindField::update(code_flags_, Code::OPTIMIZED_FUNCTION);
}
- void SetOptimizingForOsr(BailoutId osr_ast_id) {
+ void SetOptimizingForOsr(BailoutId osr_ast_id, JavaScriptFrame* osr_frame) {
SetOptimizing();
osr_ast_id_ = osr_ast_id;
+ osr_frame_ = osr_frame;
}
// Deoptimization support.
@@ -383,7 +384,7 @@ class CompilationInfo {
}
void ReopenHandlesInNewHandleScope() {
- // Empty for now but will be needed once fields move from ParseInfo.
+ closure_ = Handle<JSFunction>(*closure_);
}
void AbortOptimization(BailoutReason reason) {
@@ -410,23 +411,8 @@ class CompilationInfo {
prologue_offset_ = prologue_offset;
}
- int start_position_for(uint32_t inlining_id) {
- return inlined_function_infos_.at(inlining_id).start_position;
- }
- const std::vector<InlinedFunctionInfo>& inlined_function_infos() {
- return inlined_function_infos_;
- }
-
- void LogDeoptCallPosition(int pc_offset, int inlining_id);
- int TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
- SourcePosition position, int pareint_id);
-
CompilationDependencies* dependencies() { return &dependencies_; }
- bool HasSameOsrEntry(Handle<JSFunction> function, BailoutId osr_ast_id) {
- return osr_ast_id_ == osr_ast_id && function.is_identical_to(closure());
- }
-
int optimization_id() const { return optimization_id_; }
int osr_expr_stack_height() { return osr_expr_stack_height_; }
@@ -434,12 +420,6 @@ class CompilationInfo {
DCHECK(height >= 0);
osr_expr_stack_height_ = height;
}
- JavaScriptFrame* osr_frame() const { return osr_frame_; }
- void set_osr_frame(JavaScriptFrame* osr_frame) { osr_frame_ = osr_frame; }
-
-#if DEBUG
- void PrintAstForTesting();
-#endif
bool has_simple_parameters();
@@ -466,7 +446,7 @@ class CompilationInfo {
inlined_functions_.push_back(InlinedFunctionHolder(inlined_function));
}
- base::SmartArrayPointer<char> GetDebugName() const;
+ std::unique_ptr<char[]> GetDebugName() const;
Code::Kind output_code_kind() const {
return Code::ExtractKindFromFlags(code_flags_);
@@ -474,26 +454,9 @@ class CompilationInfo {
StackFrame::Type GetOutputStackFrameType() const;
- protected:
- ParseInfo* parse_info_;
+ int GetDeclareGlobalsFlags() const;
- void DisableFutureOptimization() {
- if (GetFlag(kDisableFutureOptimization) && has_shared_info()) {
- // If Crankshaft tried to optimize this function, bailed out, and
- // doesn't want to try again, then use TurboFan next time.
- if (!shared_info()->dont_crankshaft() &&
- bailout_reason() != kOptimizedTooManyTimes) {
- shared_info()->set_dont_crankshaft(true);
- if (FLAG_trace_opt) {
- PrintF("[disabled Crankshaft for ");
- shared_info()->ShortPrint();
- PrintF(", reason: %s]\n", GetBailoutReason(bailout_reason()));
- }
- } else {
- shared_info()->DisableOptimization(bailout_reason());
- }
- }
- }
+ SourcePositionTableBuilder::RecordingMode SourcePositionRecordingMode() const;
private:
// Compilation mode.
@@ -505,10 +468,11 @@ class CompilationInfo {
STUB
};
- CompilationInfo(ParseInfo* parse_info, const char* debug_name,
+ CompilationInfo(ParseInfo* parse_info, Vector<const char> debug_name,
Code::Flags code_flags, Mode mode, Isolate* isolate,
Zone* zone);
+ ParseInfo* parse_info_;
Isolate* isolate_;
void SetMode(Mode mode) {
@@ -527,6 +491,8 @@ class CompilationInfo {
Code::Flags code_flags_;
+ Handle<JSFunction> closure_;
+
// The compiled code.
Handle<Code> code_;
@@ -552,15 +518,10 @@ class CompilationInfo {
int prologue_offset_;
- std::vector<InlinedFunctionInfo> inlined_function_infos_;
bool track_positions_;
InlinedFunctionList inlined_functions_;
- // A copy of shared_info()->opt_count() to avoid handle deref
- // during graph optimization.
- int opt_count_;
-
// Number of parameters used for compilation of stubs that require arguments.
int parameter_count_;
@@ -571,78 +532,95 @@ class CompilationInfo {
// The current OSR frame for specialization or {nullptr}.
JavaScriptFrame* osr_frame_ = nullptr;
- const char* debug_name_;
+ Vector<const char> debug_name_;
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
};
-
-class HGraph;
-class LChunk;
-
-// A helper class that calls the three compilation phases in
-// Crankshaft and keeps track of its state. The three phases
-// CreateGraph, OptimizeGraph and GenerateAndInstallCode can either
-// fail, bail-out to the full code generator or succeed. Apart from
-// their return value, the status of the phase last run can be checked
-// using last_status().
-class OptimizedCompileJob: public ZoneObject {
+// A base class for compilation jobs intended to run concurrent to the main
+// thread. The job is split into three phases which are called in sequence on
+// different threads and with different limitations:
+// 1) PrepareJob: Runs on main thread. No major limitations.
+// 2) ExecuteJob: Runs concurrently. No heap allocation or handle derefs.
+// 3) FinalizeJob: Runs on main thread. No dependency changes.
+//
+// Each of the three phases can either fail or succeed. The current state of
+// the job can be checked using {state()}.
+class CompilationJob {
public:
- explicit OptimizedCompileJob(CompilationInfo* info)
- : info_(info), graph_(NULL), chunk_(NULL), last_status_(FAILED) {}
-
- enum Status {
- FAILED, BAILED_OUT, SUCCEEDED
+ enum Status { SUCCEEDED, FAILED };
+ enum class State {
+ kReadyToPrepare,
+ kReadyToExecute,
+ kReadyToFinalize,
+ kSucceeded,
+ kFailed,
};
- MUST_USE_RESULT Status CreateGraph();
- MUST_USE_RESULT Status OptimizeGraph();
- MUST_USE_RESULT Status GenerateCode();
+ explicit CompilationJob(CompilationInfo* info, const char* compiler_name,
+ State initial_state = State::kReadyToPrepare)
+ : info_(info), compiler_name_(compiler_name), state_(initial_state) {}
+ virtual ~CompilationJob() {}
- Status last_status() const { return last_status_; }
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const { return info()->isolate(); }
+ // Prepare the compile job. Must be called on the main thread.
+ MUST_USE_RESULT Status PrepareJob();
+
+ // Executes the compile job. Can be called off the main thread.
+ MUST_USE_RESULT Status ExecuteJob();
+
+ // Finalizes the compile job. Must be called on the main thread.
+ MUST_USE_RESULT Status FinalizeJob();
+ // Report a transient failure, try again next time. Should only be called on
+ // optimization compilation jobs.
Status RetryOptimization(BailoutReason reason) {
+ DCHECK(info_->IsOptimizing());
info_->RetryOptimization(reason);
- return SetLastStatus(BAILED_OUT);
+ state_ = State::kFailed;
+ return FAILED;
}
+ // Report a persistent failure, disable future optimization on the function.
+ // Should only be called on optimization compilation jobs.
Status AbortOptimization(BailoutReason reason) {
+ DCHECK(info_->IsOptimizing());
info_->AbortOptimization(reason);
- return SetLastStatus(BAILED_OUT);
+ state_ = State::kFailed;
+ return FAILED;
}
- private:
- CompilationInfo* info_;
- HGraph* graph_;
- LChunk* chunk_;
- base::TimeDelta time_taken_to_create_graph_;
- base::TimeDelta time_taken_to_optimize_;
- base::TimeDelta time_taken_to_codegen_;
- Status last_status_;
-
- MUST_USE_RESULT Status SetLastStatus(Status status) {
- last_status_ = status;
- return last_status_;
- }
void RecordOptimizationStats();
- struct Timer {
- Timer(OptimizedCompileJob* job, base::TimeDelta* location)
- : job_(job), location_(location) {
- DCHECK(location_ != NULL);
- timer_.Start();
- }
+ State state() const { return state_; }
+ CompilationInfo* info() const { return info_; }
+ Isolate* isolate() const { return info()->isolate(); }
- ~Timer() {
- *location_ += timer_.Elapsed();
- }
+ protected:
+ // Overridden by the actual implementation.
+ virtual Status PrepareJobImpl() = 0;
+ virtual Status ExecuteJobImpl() = 0;
+ virtual Status FinalizeJobImpl() = 0;
- OptimizedCompileJob* job_;
- base::ElapsedTimer timer_;
- base::TimeDelta* location_;
- };
+ // Registers weak object to optimized code dependencies.
+ // TODO(turbofan): Move this to pipeline.cc once Crankshaft dies.
+ void RegisterWeakObjectsInOptimizedCode(Handle<Code> code);
+
+ private:
+ CompilationInfo* info_;
+ base::TimeDelta time_taken_to_prepare_;
+ base::TimeDelta time_taken_to_execute_;
+ base::TimeDelta time_taken_to_finalize_;
+ const char* compiler_name_;
+ State state_;
+
+ MUST_USE_RESULT Status UpdateState(Status status, State next_state) {
+ if (status == SUCCEEDED) {
+ state_ = next_state;
+ } else {
+ state_ = State::kFailed;
+ }
+ return status;
+ }
};
} // namespace internal
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index 1257e232f7..02de4edeac 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -1,6 +1,7 @@
set noparent
bmeurer@chromium.org
+epertoso@chromium.org
jarin@chromium.org
mstarzinger@chromium.org
mtrofin@chromium.org
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 722bbf020e..c43a53fba7 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -16,36 +16,39 @@ namespace compiler {
// static
FieldAccess AccessBuilder::ForMap() {
- FieldAccess access = {kTaggedBase, HeapObject::kMapOffset,
- MaybeHandle<Name>(), Type::Any(),
- MachineType::AnyTagged()};
+ FieldAccess access = {
+ kTaggedBase, HeapObject::kMapOffset, MaybeHandle<Name>(),
+ Type::OtherInternal(), MachineType::AnyTagged(), kMapWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForHeapNumberValue() {
- FieldAccess access = {kTaggedBase, HeapNumber::kValueOffset,
- MaybeHandle<Name>(), TypeCache().Get().kFloat64,
- MachineType::Float64()};
+ FieldAccess access = {kTaggedBase,
+ HeapNumber::kValueOffset,
+ MaybeHandle<Name>(),
+ TypeCache::Get().kFloat64,
+ MachineType::Float64(),
+ kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSObjectProperties() {
- FieldAccess access = {kTaggedBase, JSObject::kPropertiesOffset,
- MaybeHandle<Name>(), Type::Internal(),
- MachineType::AnyTagged()};
+ FieldAccess access = {
+ kTaggedBase, JSObject::kPropertiesOffset, MaybeHandle<Name>(),
+ Type::Internal(), MachineType::AnyTagged(), kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSObjectElements() {
- FieldAccess access = {kTaggedBase, JSObject::kElementsOffset,
- MaybeHandle<Name>(), Type::Internal(),
- MachineType::AnyTagged()};
+ FieldAccess access = {
+ kTaggedBase, JSObject::kElementsOffset, MaybeHandle<Name>(),
+ Type::Internal(), MachineType::AnyTagged(), kPointerWriteBarrier};
return access;
}
@@ -54,39 +57,149 @@ FieldAccess AccessBuilder::ForJSObjectElements() {
FieldAccess AccessBuilder::ForJSObjectInObjectProperty(Handle<Map> map,
int index) {
int const offset = map->GetInObjectPropertyOffset(index);
- FieldAccess access = {kTaggedBase, offset, MaybeHandle<Name>(),
- Type::Tagged(), MachineType::AnyTagged()};
+ FieldAccess access = {kTaggedBase,
+ offset,
+ MaybeHandle<Name>(),
+ Type::Tagged(),
+ MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
// static
+FieldAccess AccessBuilder::ForJSFunctionPrototypeOrInitialMap() {
+ FieldAccess access = {kTaggedBase,
+ JSFunction::kPrototypeOrInitialMapOffset,
+ MaybeHandle<Name>(),
+ Type::Any(),
+ MachineType::AnyTagged(),
+ kFullWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForJSFunctionContext() {
- FieldAccess access = {kTaggedBase, JSFunction::kContextOffset,
- MaybeHandle<Name>(), Type::Internal(),
- MachineType::AnyTagged()};
+ FieldAccess access = {
+ kTaggedBase, JSFunction::kContextOffset, MaybeHandle<Name>(),
+ Type::Internal(), MachineType::AnyTagged(), kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSFunctionSharedFunctionInfo() {
- FieldAccess access = {kTaggedBase, JSFunction::kSharedFunctionInfoOffset,
- Handle<Name>(), Type::Any(), MachineType::AnyTagged()};
+ FieldAccess access = {kTaggedBase,
+ JSFunction::kSharedFunctionInfoOffset,
+ Handle<Name>(),
+ Type::OtherInternal(),
+ MachineType::AnyTagged(),
+ kPointerWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSFunctionLiterals() {
+ FieldAccess access = {
+ kTaggedBase, JSFunction::kLiteralsOffset, Handle<Name>(),
+ Type::Internal(), MachineType::AnyTagged(), kPointerWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSFunctionCodeEntry() {
+ FieldAccess access = {kTaggedBase,
+ JSFunction::kCodeEntryOffset,
+ Handle<Name>(),
+ Type::UntaggedPointer(),
+ MachineType::Pointer(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSFunctionNextFunctionLink() {
+ FieldAccess access = {kTaggedBase,
+ JSFunction::kNextFunctionLinkOffset,
+ Handle<Name>(),
+ Type::Any(),
+ MachineType::AnyTagged(),
+ kPointerWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSGeneratorObjectContext() {
+ FieldAccess access = {kTaggedBase,
+ JSGeneratorObject::kContextOffset,
+ Handle<Name>(),
+ Type::Internal(),
+ MachineType::AnyTagged(),
+ kPointerWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSGeneratorObjectContinuation() {
+ TypeCache const& type_cache = TypeCache::Get();
+ FieldAccess access = {kTaggedBase,
+ JSGeneratorObject::kContinuationOffset,
+ Handle<Name>(),
+ type_cache.kSmi,
+ MachineType::AnyTagged(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSGeneratorObjectInputOrDebugPos() {
+ FieldAccess access = {kTaggedBase,
+ JSGeneratorObject::kInputOrDebugPosOffset,
+ Handle<Name>(),
+ Type::NonInternal(),
+ MachineType::AnyTagged(),
+ kFullWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSGeneratorObjectOperandStack() {
+ FieldAccess access = {kTaggedBase,
+ JSGeneratorObject::kOperandStackOffset,
+ Handle<Name>(),
+ Type::Internal(),
+ MachineType::AnyTagged(),
+ kPointerWriteBarrier};
return access;
}
+// static
+FieldAccess AccessBuilder::ForJSGeneratorObjectResumeMode() {
+ TypeCache const& type_cache = TypeCache::Get();
+ FieldAccess access = {kTaggedBase,
+ JSGeneratorObject::kResumeModeOffset,
+ Handle<Name>(),
+ type_cache.kSmi,
+ MachineType::AnyTagged(),
+ kNoWriteBarrier};
+ return access;
+}
// static
FieldAccess AccessBuilder::ForJSArrayLength(ElementsKind elements_kind) {
TypeCache const& type_cache = TypeCache::Get();
- FieldAccess access = {kTaggedBase, JSArray::kLengthOffset, Handle<Name>(),
+ FieldAccess access = {kTaggedBase,
+ JSArray::kLengthOffset,
+ Handle<Name>(),
type_cache.kJSArrayLengthType,
- MachineType::AnyTagged()};
+ MachineType::AnyTagged(),
+ kFullWriteBarrier};
if (IsFastDoubleElementsKind(elements_kind)) {
access.type = type_cache.kFixedDoubleArrayLengthType;
+ access.write_barrier_kind = kNoWriteBarrier;
} else if (IsFastElementsKind(elements_kind)) {
access.type = type_cache.kFixedArrayLengthType;
+ access.write_barrier_kind = kNoWriteBarrier;
}
return access;
}
@@ -94,190 +207,358 @@ FieldAccess AccessBuilder::ForJSArrayLength(ElementsKind elements_kind) {
// static
FieldAccess AccessBuilder::ForJSArrayBufferBackingStore() {
- FieldAccess access = {kTaggedBase, JSArrayBuffer::kBackingStoreOffset,
- MaybeHandle<Name>(), Type::UntaggedPointer(),
- MachineType::Pointer()};
+ FieldAccess access = {kTaggedBase,
+ JSArrayBuffer::kBackingStoreOffset,
+ MaybeHandle<Name>(),
+ Type::UntaggedPointer(),
+ MachineType::Pointer(),
+ kNoWriteBarrier};
return access;
}
-
// static
FieldAccess AccessBuilder::ForJSArrayBufferBitField() {
- FieldAccess access = {kTaggedBase, JSArrayBuffer::kBitFieldOffset,
- MaybeHandle<Name>(), TypeCache::Get().kInt8,
- MachineType::Int8()};
+ FieldAccess access = {kTaggedBase, JSArrayBuffer::kBitFieldOffset,
+ MaybeHandle<Name>(), TypeCache::Get().kUint8,
+ MachineType::Uint32(), kNoWriteBarrier};
return access;
}
-
// static
FieldAccess AccessBuilder::ForJSArrayBufferViewBuffer() {
- FieldAccess access = {kTaggedBase, JSArrayBufferView::kBufferOffset,
- MaybeHandle<Name>(), Type::TaggedPointer(),
- MachineType::AnyTagged()};
+ FieldAccess access = {kTaggedBase,
+ JSArrayBufferView::kBufferOffset,
+ MaybeHandle<Name>(),
+ Type::TaggedPointer(),
+ MachineType::AnyTagged(),
+ kPointerWriteBarrier};
return access;
}
+// static
+FieldAccess AccessBuilder::ForJSArrayBufferViewByteLength() {
+ FieldAccess access = {kTaggedBase,
+ JSArrayBufferView::kByteLengthOffset,
+ MaybeHandle<Name>(),
+ TypeCache::Get().kPositiveInteger,
+ MachineType::AnyTagged(),
+ kFullWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSArrayBufferViewByteOffset() {
+ FieldAccess access = {kTaggedBase,
+ JSArrayBufferView::kByteOffsetOffset,
+ MaybeHandle<Name>(),
+ TypeCache::Get().kPositiveInteger,
+ MachineType::AnyTagged(),
+ kFullWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSTypedArrayLength() {
+ FieldAccess access = {kTaggedBase,
+ JSTypedArray::kLengthOffset,
+ MaybeHandle<Name>(),
+ TypeCache::Get().kJSTypedArrayLengthType,
+ MachineType::AnyTagged(),
+ kNoWriteBarrier};
+ return access;
+}
// static
FieldAccess AccessBuilder::ForJSDateField(JSDate::FieldIndex index) {
- FieldAccess access = {
- kTaggedBase, JSDate::kValueOffset + index * kPointerSize,
- MaybeHandle<Name>(), Type::Number(), MachineType::AnyTagged()};
+ FieldAccess access = {kTaggedBase,
+ JSDate::kValueOffset + index * kPointerSize,
+ MaybeHandle<Name>(),
+ Type::Number(),
+ MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSIteratorResultDone() {
- FieldAccess access = {kTaggedBase, JSIteratorResult::kDoneOffset,
- MaybeHandle<Name>(), Type::Any(),
- MachineType::AnyTagged()};
+ FieldAccess access = {
+ kTaggedBase, JSIteratorResult::kDoneOffset, MaybeHandle<Name>(),
+ Type::NonInternal(), MachineType::AnyTagged(), kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSIteratorResultValue() {
- FieldAccess access = {kTaggedBase, JSIteratorResult::kValueOffset,
- MaybeHandle<Name>(), Type::Any(),
- MachineType::AnyTagged()};
+ FieldAccess access = {
+ kTaggedBase, JSIteratorResult::kValueOffset, MaybeHandle<Name>(),
+ Type::NonInternal(), MachineType::AnyTagged(), kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSRegExpFlags() {
- FieldAccess access = {kTaggedBase, JSRegExp::kFlagsOffset,
- MaybeHandle<Name>(), Type::Tagged(),
- MachineType::AnyTagged()};
+ FieldAccess access = {
+ kTaggedBase, JSRegExp::kFlagsOffset, MaybeHandle<Name>(),
+ Type::Tagged(), MachineType::AnyTagged(), kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSRegExpSource() {
- FieldAccess access = {kTaggedBase, JSRegExp::kSourceOffset,
- MaybeHandle<Name>(), Type::Tagged(),
- MachineType::AnyTagged()};
+ FieldAccess access = {
+ kTaggedBase, JSRegExp::kSourceOffset, MaybeHandle<Name>(),
+ Type::Tagged(), MachineType::AnyTagged(), kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForFixedArrayLength() {
- FieldAccess access = {
- kTaggedBase, FixedArray::kLengthOffset, MaybeHandle<Name>(),
- TypeCache::Get().kFixedArrayLengthType, MachineType::AnyTagged()};
+ FieldAccess access = {kTaggedBase,
+ FixedArray::kLengthOffset,
+ MaybeHandle<Name>(),
+ TypeCache::Get().kFixedArrayLengthType,
+ MachineType::AnyTagged(),
+ kNoWriteBarrier};
return access;
}
+// static
+FieldAccess AccessBuilder::ForFixedTypedArrayBaseBasePointer() {
+ FieldAccess access = {kTaggedBase,
+ FixedTypedArrayBase::kBasePointerOffset,
+ MaybeHandle<Name>(),
+ Type::Tagged(),
+ MachineType::AnyTagged(),
+ kPointerWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForFixedTypedArrayBaseExternalPointer() {
+ FieldAccess access = {kTaggedBase,
+ FixedTypedArrayBase::kExternalPointerOffset,
+ MaybeHandle<Name>(),
+ Type::UntaggedPointer(),
+ MachineType::Pointer(),
+ kNoWriteBarrier};
+ return access;
+}
// static
FieldAccess AccessBuilder::ForDescriptorArrayEnumCache() {
- FieldAccess access = {kTaggedBase, DescriptorArray::kEnumCacheOffset,
- Handle<Name>(), Type::TaggedPointer(),
- MachineType::AnyTagged()};
+ FieldAccess access = {kTaggedBase,
+ DescriptorArray::kEnumCacheOffset,
+ Handle<Name>(),
+ Type::TaggedPointer(),
+ MachineType::AnyTagged(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForDescriptorArrayEnumCacheBridgeCache() {
- FieldAccess access = {
- kTaggedBase, DescriptorArray::kEnumCacheBridgeCacheOffset, Handle<Name>(),
- Type::TaggedPointer(), MachineType::AnyTagged()};
+ FieldAccess access = {kTaggedBase,
+ DescriptorArray::kEnumCacheBridgeCacheOffset,
+ Handle<Name>(),
+ Type::TaggedPointer(),
+ MachineType::AnyTagged(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForMapBitField() {
- FieldAccess access = {kTaggedBase, Map::kBitFieldOffset, Handle<Name>(),
- TypeCache::Get().kUint8, MachineType::Uint8()};
+ FieldAccess access = {kTaggedBase, Map::kBitFieldOffset,
+ Handle<Name>(), TypeCache::Get().kUint8,
+ MachineType::Uint8(), kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForMapBitField3() {
- FieldAccess access = {kTaggedBase, Map::kBitField3Offset, Handle<Name>(),
- TypeCache::Get().kInt32, MachineType::Int32()};
+ FieldAccess access = {kTaggedBase, Map::kBitField3Offset,
+ Handle<Name>(), TypeCache::Get().kInt32,
+ MachineType::Int32(), kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForMapDescriptors() {
- FieldAccess access = {kTaggedBase, Map::kDescriptorsOffset, Handle<Name>(),
- Type::TaggedPointer(), MachineType::AnyTagged()};
+ FieldAccess access = {
+ kTaggedBase, Map::kDescriptorsOffset, Handle<Name>(),
+ Type::TaggedPointer(), MachineType::AnyTagged(), kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForMapInstanceType() {
- FieldAccess access = {kTaggedBase, Map::kInstanceTypeOffset, Handle<Name>(),
- TypeCache::Get().kUint8, MachineType::Uint8()};
+ FieldAccess access = {kTaggedBase, Map::kInstanceTypeOffset,
+ Handle<Name>(), TypeCache::Get().kUint8,
+ MachineType::Uint8(), kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForMapPrototype() {
- FieldAccess access = {kTaggedBase, Map::kPrototypeOffset, Handle<Name>(),
- Type::TaggedPointer(), MachineType::AnyTagged()};
+ FieldAccess access = {
+ kTaggedBase, Map::kPrototypeOffset, Handle<Name>(),
+ Type::TaggedPointer(), MachineType::AnyTagged(), kPointerWriteBarrier};
return access;
}
// static
+FieldAccess AccessBuilder::ForNameHashField() {
+ FieldAccess access = {kTaggedBase, Name::kHashFieldOffset,
+ Handle<Name>(), Type::Internal(),
+ MachineType::Uint32(), kNoWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForStringLength() {
- FieldAccess access = {kTaggedBase, String::kLengthOffset, Handle<Name>(),
+ FieldAccess access = {kTaggedBase,
+ String::kLengthOffset,
+ Handle<Name>(),
TypeCache::Get().kStringLengthType,
- MachineType::AnyTagged()};
+ MachineType::AnyTagged(),
+ kNoWriteBarrier};
return access;
}
+// static
+FieldAccess AccessBuilder::ForConsStringFirst() {
+ FieldAccess access = {
+ kTaggedBase, ConsString::kFirstOffset, Handle<Name>(),
+ Type::String(), MachineType::AnyTagged(), kPointerWriteBarrier};
+ return access;
+}
// static
-FieldAccess AccessBuilder::ForJSGlobalObjectGlobalProxy() {
- FieldAccess access = {kTaggedBase, JSGlobalObject::kGlobalProxyOffset,
- Handle<Name>(), Type::Receiver(),
- MachineType::AnyTagged()};
+FieldAccess AccessBuilder::ForConsStringSecond() {
+ FieldAccess access = {
+ kTaggedBase, ConsString::kSecondOffset, Handle<Name>(),
+ Type::String(), MachineType::AnyTagged(), kPointerWriteBarrier};
return access;
}
+// static
+FieldAccess AccessBuilder::ForSlicedStringOffset() {
+ FieldAccess access = {
+ kTaggedBase, SlicedString::kOffsetOffset, Handle<Name>(),
+ Type::SignedSmall(), MachineType::AnyTagged(), kNoWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForSlicedStringParent() {
+ FieldAccess access = {
+ kTaggedBase, SlicedString::kParentOffset, Handle<Name>(),
+ Type::String(), MachineType::AnyTagged(), kPointerWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForExternalStringResourceData() {
+ FieldAccess access = {kTaggedBase,
+ ExternalString::kResourceDataOffset,
+ Handle<Name>(),
+ Type::UntaggedPointer(),
+ MachineType::Pointer(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
+ElementAccess AccessBuilder::ForExternalOneByteStringCharacter() {
+ ElementAccess access = {kUntaggedBase, 0, TypeCache::Get().kUint8,
+ MachineType::Uint8(), kNoWriteBarrier};
+ return access;
+}
+
+// static
+ElementAccess AccessBuilder::ForExternalTwoByteStringCharacter() {
+ ElementAccess access = {kUntaggedBase, 0, TypeCache::Get().kUint16,
+ MachineType::Uint16(), kNoWriteBarrier};
+ return access;
+}
+
+// static
+ElementAccess AccessBuilder::ForSeqOneByteStringCharacter() {
+ ElementAccess access = {kTaggedBase, SeqOneByteString::kHeaderSize,
+ TypeCache::Get().kUint8, MachineType::Uint8(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
+ElementAccess AccessBuilder::ForSeqTwoByteStringCharacter() {
+ ElementAccess access = {kTaggedBase, SeqTwoByteString::kHeaderSize,
+ TypeCache::Get().kUint16, MachineType::Uint16(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSGlobalObjectGlobalProxy() {
+ FieldAccess access = {kTaggedBase,
+ JSGlobalObject::kGlobalProxyOffset,
+ Handle<Name>(),
+ Type::Receiver(),
+ MachineType::AnyTagged(),
+ kPointerWriteBarrier};
+ return access;
+}
// static
FieldAccess AccessBuilder::ForJSGlobalObjectNativeContext() {
- FieldAccess access = {kTaggedBase, JSGlobalObject::kNativeContextOffset,
- Handle<Name>(), Type::Internal(),
- MachineType::AnyTagged()};
+ FieldAccess access = {kTaggedBase,
+ JSGlobalObject::kNativeContextOffset,
+ Handle<Name>(),
+ Type::Internal(),
+ MachineType::AnyTagged(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForValue() {
- FieldAccess access = {kTaggedBase, JSValue::kValueOffset, Handle<Name>(),
- Type::Any(), MachineType::AnyTagged()};
+ FieldAccess access = {
+ kTaggedBase, JSValue::kValueOffset, Handle<Name>(),
+ Type::NonInternal(), MachineType::AnyTagged(), kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForArgumentsLength() {
- FieldAccess access = {kTaggedBase, JSArgumentsObject::kLengthOffset,
- Handle<Name>(), Type::Any(), MachineType::AnyTagged()};
+ FieldAccess access = {
+ kTaggedBase, JSArgumentsObject::kLengthOffset, Handle<Name>(),
+ Type::NonInternal(), MachineType::AnyTagged(), kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForArgumentsCallee() {
- FieldAccess access = {kTaggedBase, JSSloppyArgumentsObject::kCalleeOffset,
- Handle<Name>(), Type::Any(), MachineType::AnyTagged()};
+ FieldAccess access = {kTaggedBase,
+ JSSloppyArgumentsObject::kCalleeOffset,
+ Handle<Name>(),
+ Type::NonInternal(),
+ MachineType::AnyTagged(),
+ kPointerWriteBarrier};
return access;
}
@@ -285,8 +566,12 @@ FieldAccess AccessBuilder::ForArgumentsCallee() {
// static
FieldAccess AccessBuilder::ForFixedArraySlot(size_t index) {
int offset = FixedArray::OffsetOfElementAt(static_cast<int>(index));
- FieldAccess access = {kTaggedBase, offset, Handle<Name>(), Type::Any(),
- MachineType::AnyTagged()};
+ FieldAccess access = {kTaggedBase,
+ offset,
+ Handle<Name>(),
+ Type::NonInternal(),
+ MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
@@ -296,8 +581,12 @@ FieldAccess AccessBuilder::ForContextSlot(size_t index) {
int offset = Context::kHeaderSize + static_cast<int>(index) * kPointerSize;
DCHECK_EQ(offset,
Context::SlotOffset(static_cast<int>(index)) + kHeapObjectTag);
- FieldAccess access = {kTaggedBase, offset, Handle<Name>(), Type::Any(),
- MachineType::AnyTagged()};
+ FieldAccess access = {kTaggedBase,
+ offset,
+ Handle<Name>(),
+ Type::Any(),
+ MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
@@ -310,32 +599,58 @@ FieldAccess AccessBuilder::ForPropertyCellValue() {
// static
FieldAccess AccessBuilder::ForPropertyCellValue(Type* type) {
- FieldAccess access = {kTaggedBase, PropertyCell::kValueOffset, Handle<Name>(),
- type, MachineType::AnyTagged()};
- return access;
-}
-
-
-// static
-FieldAccess AccessBuilder::ForSharedFunctionInfoTypeFeedbackVector() {
- FieldAccess access = {kTaggedBase, SharedFunctionInfo::kFeedbackVectorOffset,
- Handle<Name>(), Type::Any(), MachineType::AnyTagged()};
+ FieldAccess access = {
+ kTaggedBase, PropertyCell::kValueOffset, Handle<Name>(),
+ type, MachineType::AnyTagged(), kFullWriteBarrier};
return access;
}
-
// static
ElementAccess AccessBuilder::ForFixedArrayElement() {
ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, Type::Tagged(),
- MachineType::AnyTagged()};
+ MachineType::AnyTagged(), kFullWriteBarrier};
+ return access;
+}
+
+// static
+ElementAccess AccessBuilder::ForFixedArrayElement(ElementsKind kind) {
+ ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
+ MachineType::AnyTagged(), kFullWriteBarrier};
+ switch (kind) {
+ case FAST_SMI_ELEMENTS:
+ access.type = TypeCache::Get().kSmi;
+ access.write_barrier_kind = kNoWriteBarrier;
+ break;
+ case FAST_HOLEY_SMI_ELEMENTS:
+ access.type = TypeCache::Get().kHoleySmi;
+ break;
+ case FAST_ELEMENTS:
+ access.type = Type::NonInternal();
+ break;
+ case FAST_HOLEY_ELEMENTS:
+ break;
+ case FAST_DOUBLE_ELEMENTS:
+ access.type = Type::Number();
+ access.write_barrier_kind = kNoWriteBarrier;
+ access.machine_type = MachineType::Float64();
+ break;
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ access.type = Type::Number();
+ access.write_barrier_kind = kNoWriteBarrier;
+ access.machine_type = MachineType::Float64();
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
return access;
}
-
// static
ElementAccess AccessBuilder::ForFixedDoubleArrayElement() {
ElementAccess access = {kTaggedBase, FixedDoubleArray::kHeaderSize,
- TypeCache::Get().kFloat64, MachineType::Float64()};
+ TypeCache::Get().kFloat64, MachineType::Float64(),
+ kNoWriteBarrier};
return access;
}
@@ -348,56 +663,49 @@ ElementAccess AccessBuilder::ForTypedArrayElement(ExternalArrayType type,
switch (type) {
case kExternalInt8Array: {
ElementAccess access = {taggedness, header_size, Type::Signed32(),
- MachineType::Int8()};
+ MachineType::Int8(), kNoWriteBarrier};
return access;
}
case kExternalUint8Array:
case kExternalUint8ClampedArray: {
ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
- MachineType::Uint8()};
+ MachineType::Uint8(), kNoWriteBarrier};
return access;
}
case kExternalInt16Array: {
ElementAccess access = {taggedness, header_size, Type::Signed32(),
- MachineType::Int16()};
+ MachineType::Int16(), kNoWriteBarrier};
return access;
}
case kExternalUint16Array: {
ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
- MachineType::Uint16()};
+ MachineType::Uint16(), kNoWriteBarrier};
return access;
}
case kExternalInt32Array: {
ElementAccess access = {taggedness, header_size, Type::Signed32(),
- MachineType::Int32()};
+ MachineType::Int32(), kNoWriteBarrier};
return access;
}
case kExternalUint32Array: {
ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
- MachineType::Uint32()};
+ MachineType::Uint32(), kNoWriteBarrier};
return access;
}
case kExternalFloat32Array: {
ElementAccess access = {taggedness, header_size, Type::Number(),
- MachineType::Float32()};
+ MachineType::Float32(), kNoWriteBarrier};
return access;
}
case kExternalFloat64Array: {
ElementAccess access = {taggedness, header_size, Type::Number(),
- MachineType::Float64()};
+ MachineType::Float64(), kNoWriteBarrier};
return access;
}
}
UNREACHABLE();
- ElementAccess access = {kUntaggedBase, 0, Type::None(), MachineType::None()};
- return access;
-}
-
-
-// static
-FieldAccess AccessBuilder::ForStatsCounter() {
- FieldAccess access = {kUntaggedBase, 0, MaybeHandle<Name>(),
- TypeCache::Get().kInt32, MachineType::Int32()};
+ ElementAccess access = {kUntaggedBase, 0, Type::None(), MachineType::None(),
+ kNoWriteBarrier};
return access;
}
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index 8375d37600..caaf8f8c06 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_ACCESS_BUILDER_H_
#include "src/compiler/simplified-operator.h"
+#include "src/elements-kind.h"
namespace v8 {
namespace internal {
@@ -34,12 +35,39 @@ class AccessBuilder final : public AllStatic {
// Provides access to JSObject inobject property fields.
static FieldAccess ForJSObjectInObjectProperty(Handle<Map> map, int index);
+ // Provides access to JSFunction::prototype_or_initial_map() field.
+ static FieldAccess ForJSFunctionPrototypeOrInitialMap();
+
// Provides access to JSFunction::context() field.
static FieldAccess ForJSFunctionContext();
// Provides access to JSFunction::shared() field.
static FieldAccess ForJSFunctionSharedFunctionInfo();
+ // Provides access to JSFunction::literals() field.
+ static FieldAccess ForJSFunctionLiterals();
+
+ // Provides access to JSFunction::code() field.
+ static FieldAccess ForJSFunctionCodeEntry();
+
+ // Provides access to JSFunction::next_function_link() field.
+ static FieldAccess ForJSFunctionNextFunctionLink();
+
+ // Provides access to JSGeneratorObject::context() field.
+ static FieldAccess ForJSGeneratorObjectContext();
+
+ // Provides access to JSGeneratorObject::continuation() field.
+ static FieldAccess ForJSGeneratorObjectContinuation();
+
+ // Provides access to JSGeneratorObject::input_or_debug_pos() field.
+ static FieldAccess ForJSGeneratorObjectInputOrDebugPos();
+
+ // Provides access to JSGeneratorObject::operand_stack() field.
+ static FieldAccess ForJSGeneratorObjectOperandStack();
+
+ // Provides access to JSGeneratorObject::resume_mode() field.
+ static FieldAccess ForJSGeneratorObjectResumeMode();
+
// Provides access to JSArray::length() field.
static FieldAccess ForJSArrayLength(ElementsKind elements_kind);
@@ -52,6 +80,15 @@ class AccessBuilder final : public AllStatic {
// Provides access to JSArrayBufferView::buffer() field.
static FieldAccess ForJSArrayBufferViewBuffer();
+ // Provides access to JSArrayBufferView::byteLength() field.
+ static FieldAccess ForJSArrayBufferViewByteLength();
+
+ // Provides access to JSArrayBufferView::byteOffset() field.
+ static FieldAccess ForJSArrayBufferViewByteOffset();
+
+ // Provides access to JSTypedArray::length() field.
+ static FieldAccess ForJSTypedArrayLength();
+
// Provides access to JSDate fields.
static FieldAccess ForJSDateField(JSDate::FieldIndex index);
@@ -70,6 +107,12 @@ class AccessBuilder final : public AllStatic {
// Provides access to FixedArray::length() field.
static FieldAccess ForFixedArrayLength();
+ // Provides access to FixedTypedArrayBase::base_pointer() field.
+ static FieldAccess ForFixedTypedArrayBaseBasePointer();
+
+ // Provides access to FixedTypedArrayBase::external_pointer() field.
+ static FieldAccess ForFixedTypedArrayBaseExternalPointer();
+
// Provides access to DescriptorArray::enum_cache() field.
static FieldAccess ForDescriptorArrayEnumCache();
@@ -91,9 +134,39 @@ class AccessBuilder final : public AllStatic {
// Provides access to Map::prototype() field.
static FieldAccess ForMapPrototype();
+ // Provides access to Name::hash_field() field.
+ static FieldAccess ForNameHashField();
+
// Provides access to String::length() field.
static FieldAccess ForStringLength();
+ // Provides access to ConsString::first() field.
+ static FieldAccess ForConsStringFirst();
+
+ // Provides access to ConsString::second() field.
+ static FieldAccess ForConsStringSecond();
+
+ // Provides access to SlicedString::offset() field.
+ static FieldAccess ForSlicedStringOffset();
+
+ // Provides access to SlicedString::parent() field.
+ static FieldAccess ForSlicedStringParent();
+
+ // Provides access to ExternalString::resource_data() field.
+ static FieldAccess ForExternalStringResourceData();
+
+ // Provides access to ExternalOneByteString characters.
+ static ElementAccess ForExternalOneByteStringCharacter();
+
+ // Provides access to ExternalTwoByteString characters.
+ static ElementAccess ForExternalTwoByteStringCharacter();
+
+ // Provides access to SeqOneByteString characters.
+ static ElementAccess ForSeqOneByteStringCharacter();
+
+ // Provides access to SeqTwoByteString characters.
+ static ElementAccess ForSeqTwoByteStringCharacter();
+
// Provides access to JSGlobalObject::global_proxy() field.
static FieldAccess ForJSGlobalObjectGlobalProxy();
@@ -117,11 +190,9 @@ class AccessBuilder final : public AllStatic {
static FieldAccess ForPropertyCellValue();
static FieldAccess ForPropertyCellValue(Type* type);
- // Provides access to SharedFunctionInfo::feedback_vector() field.
- static FieldAccess ForSharedFunctionInfoTypeFeedbackVector();
-
// Provides access to FixedArray elements.
static ElementAccess ForFixedArrayElement();
+ static ElementAccess ForFixedArrayElement(ElementsKind kind);
// Provides access to FixedDoubleArray elements.
static ElementAccess ForFixedDoubleArrayElement();
@@ -130,12 +201,6 @@ class AccessBuilder final : public AllStatic {
static ElementAccess ForTypedArrayElement(ExternalArrayType type,
bool is_external);
- // ===========================================================================
- // Access to global per-isolate variables (based on external reference).
-
- // Provides access to the backing store of a StatsCounter.
- static FieldAccess ForStatsCounter();
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(AccessBuilder);
};
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index e38f629c5b..2ba31f540b 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -9,7 +9,7 @@
#include "src/compiler/access-info.h"
#include "src/field-index-inl.h"
#include "src/field-type.h"
-#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
+#include "src/objects-inl.h"
#include "src/type-cache.h"
namespace v8 {
@@ -25,6 +25,8 @@ bool CanInlineElementAccess(Handle<Map> map) {
ElementsKind const elements_kind = map->elements_kind();
if (IsFastElementsKind(elements_kind)) return true;
// TODO(bmeurer): Add support for other elements kind.
+ if (elements_kind == UINT8_CLAMPED_ELEMENTS) return false;
+ if (IsFixedTypedArrayElementsKind(elements_kind)) return true;
return false;
}
@@ -56,78 +58,111 @@ std::ostream& operator<<(std::ostream& os, AccessMode access_mode) {
return os;
}
+ElementAccessInfo::ElementAccessInfo() {}
+
+ElementAccessInfo::ElementAccessInfo(MapList const& receiver_maps,
+ ElementsKind elements_kind)
+ : elements_kind_(elements_kind), receiver_maps_(receiver_maps) {}
// static
-PropertyAccessInfo PropertyAccessInfo::NotFound(Type* receiver_type,
+PropertyAccessInfo PropertyAccessInfo::NotFound(MapList const& receiver_maps,
MaybeHandle<JSObject> holder) {
- return PropertyAccessInfo(holder, receiver_type);
+ return PropertyAccessInfo(holder, receiver_maps);
}
-
// static
PropertyAccessInfo PropertyAccessInfo::DataConstant(
- Type* receiver_type, Handle<Object> constant,
+ MapList const& receiver_maps, Handle<Object> constant,
MaybeHandle<JSObject> holder) {
- return PropertyAccessInfo(holder, constant, receiver_type);
+ return PropertyAccessInfo(kDataConstant, holder, constant, receiver_maps);
}
-
// static
PropertyAccessInfo PropertyAccessInfo::DataField(
- Type* receiver_type, FieldIndex field_index, Type* field_type,
- FieldCheck field_check, MaybeHandle<JSObject> holder,
- MaybeHandle<Map> transition_map) {
- return PropertyAccessInfo(holder, transition_map, field_index, field_check,
- field_type, receiver_type);
+ MapList const& receiver_maps, FieldIndex field_index, Type* field_type,
+ MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map) {
+ return PropertyAccessInfo(holder, transition_map, field_index, field_type,
+ receiver_maps);
}
-
-ElementAccessInfo::ElementAccessInfo() : receiver_type_(Type::None()) {}
-
-
-ElementAccessInfo::ElementAccessInfo(Type* receiver_type,
- ElementsKind elements_kind,
- MaybeHandle<JSObject> holder)
- : elements_kind_(elements_kind),
- holder_(holder),
- receiver_type_(receiver_type) {}
-
+// static
+PropertyAccessInfo PropertyAccessInfo::AccessorConstant(
+ MapList const& receiver_maps, Handle<Object> constant,
+ MaybeHandle<JSObject> holder) {
+ return PropertyAccessInfo(kAccessorConstant, holder, constant, receiver_maps);
+}
PropertyAccessInfo::PropertyAccessInfo()
- : kind_(kInvalid), receiver_type_(Type::None()), field_type_(Type::Any()) {}
-
+ : kind_(kInvalid), field_type_(Type::None()) {}
PropertyAccessInfo::PropertyAccessInfo(MaybeHandle<JSObject> holder,
- Type* receiver_type)
+ MapList const& receiver_maps)
: kind_(kNotFound),
- receiver_type_(receiver_type),
+ receiver_maps_(receiver_maps),
holder_(holder),
- field_type_(Type::Any()) {}
-
+ field_type_(Type::None()) {}
-PropertyAccessInfo::PropertyAccessInfo(MaybeHandle<JSObject> holder,
+PropertyAccessInfo::PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
Handle<Object> constant,
- Type* receiver_type)
- : kind_(kDataConstant),
- receiver_type_(receiver_type),
+ MapList const& receiver_maps)
+ : kind_(kind),
+ receiver_maps_(receiver_maps),
constant_(constant),
holder_(holder),
field_type_(Type::Any()) {}
-
PropertyAccessInfo::PropertyAccessInfo(MaybeHandle<JSObject> holder,
MaybeHandle<Map> transition_map,
- FieldIndex field_index,
- FieldCheck field_check, Type* field_type,
- Type* receiver_type)
+ FieldIndex field_index, Type* field_type,
+ MapList const& receiver_maps)
: kind_(kDataField),
- receiver_type_(receiver_type),
+ receiver_maps_(receiver_maps),
transition_map_(transition_map),
holder_(holder),
field_index_(field_index),
- field_check_(field_check),
field_type_(field_type) {}
+bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that) {
+ if (this->kind_ != that->kind_) return false;
+ if (this->holder_.address() != that->holder_.address()) return false;
+
+ switch (this->kind_) {
+ case kInvalid:
+ break;
+
+ case kNotFound:
+ return true;
+
+ case kDataField: {
+ // Check if we actually access the same field.
+ if (this->transition_map_.address() == that->transition_map_.address() &&
+ this->field_index_ == that->field_index_ &&
+ this->field_type_->Is(that->field_type_) &&
+ that->field_type_->Is(this->field_type_)) {
+ this->receiver_maps_.insert(this->receiver_maps_.end(),
+ that->receiver_maps_.begin(),
+ that->receiver_maps_.end());
+ return true;
+ }
+ return false;
+ }
+
+ case kDataConstant:
+ case kAccessorConstant: {
+ // Check if we actually access the same constant.
+ if (this->constant_.address() == that->constant_.address()) {
+ this->receiver_maps_.insert(this->receiver_maps_.end(),
+ that->receiver_maps_.begin(),
+ that->receiver_maps_.end());
+ return true;
+ }
+ return false;
+ }
+ }
+
+ UNREACHABLE();
+ return false;
+}
AccessInfoFactory::AccessInfoFactory(CompilationDependencies* dependencies,
Handle<Context> native_context, Zone* zone)
@@ -144,30 +179,8 @@ bool AccessInfoFactory::ComputeElementAccessInfo(
Handle<Map> map, AccessMode access_mode, ElementAccessInfo* access_info) {
// Check if it is safe to inline element access for the {map}.
if (!CanInlineElementAccess(map)) return false;
-
ElementsKind const elements_kind = map->elements_kind();
-
- // Certain (monomorphic) stores need a prototype chain check because shape
- // changes could allow callbacks on elements in the chain that are not
- // compatible with monomorphic keyed stores.
- MaybeHandle<JSObject> holder;
- if (access_mode == AccessMode::kStore && map->prototype()->IsJSObject()) {
- for (PrototypeIterator i(map); !i.IsAtEnd(); i.Advance()) {
- Handle<JSReceiver> prototype =
- PrototypeIterator::GetCurrent<JSReceiver>(i);
- if (!prototype->IsJSObject()) return false;
- // TODO(bmeurer): We do not currently support unstable prototypes.
- // We might want to revisit the way we handle certain keyed stores
- // because this whole prototype chain check is essential a hack,
- // and I'm not sure that it is correct at all with dictionaries in
- // the prototype chain.
- if (!prototype->map()->is_stable()) return false;
- holder = Handle<JSObject>::cast(prototype);
- }
- }
-
- *access_info =
- ElementAccessInfo(Type::Class(map, zone()), elements_kind, holder);
+ *access_info = ElementAccessInfo(MapList{map}, elements_kind);
return true;
}
@@ -261,51 +274,73 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
return LookupTransition(receiver_map, name, holder, access_info);
}
}
- if (details.type() == DATA_CONSTANT) {
- *access_info = PropertyAccessInfo::DataConstant(
- Type::Class(receiver_map, zone()),
- handle(descriptors->GetValue(number), isolate()), holder);
- return true;
- } else if (details.type() == DATA) {
- int index = descriptors->GetFieldIndex(number);
- Representation field_representation = details.representation();
- FieldIndex field_index = FieldIndex::ForPropertyIndex(
- *map, index, field_representation.IsDouble());
- Type* field_type = Type::Tagged();
- if (field_representation.IsSmi()) {
- field_type = type_cache_.kSmi;
- } else if (field_representation.IsDouble()) {
- field_type = type_cache_.kFloat64;
- } else if (field_representation.IsHeapObject()) {
- // Extract the field type from the property details (make sure its
- // representation is TaggedPointer to reflect the heap object case).
- field_type = Type::Intersect(
- descriptors->GetFieldType(number)->Convert(zone()),
- Type::TaggedPointer(), zone());
- if (field_type->Is(Type::None())) {
- // Store is not safe if the field type was cleared.
- if (access_mode == AccessMode::kStore) return false;
-
- // The field type was cleared by the GC, so we don't know anything
- // about the contents now.
- // TODO(bmeurer): It would be awesome to make this saner in the
- // runtime/GC interaction.
- field_type = Type::TaggedPointer();
- } else if (!Type::Any()->Is(field_type)) {
- // Add proper code dependencies in case of stable field map(s).
- Handle<Map> field_owner_map(map->FindFieldOwner(number), isolate());
- dependencies()->AssumeFieldType(field_owner_map);
+ switch (details.type()) {
+ case DATA_CONSTANT: {
+ *access_info = PropertyAccessInfo::DataConstant(
+ MapList{receiver_map},
+ handle(descriptors->GetValue(number), isolate()), holder);
+ return true;
+ }
+ case DATA: {
+ int index = descriptors->GetFieldIndex(number);
+ Representation field_representation = details.representation();
+ FieldIndex field_index = FieldIndex::ForPropertyIndex(
+ *map, index, field_representation.IsDouble());
+ Type* field_type = Type::Tagged();
+ if (field_representation.IsSmi()) {
+ field_type = type_cache_.kSmi;
+ } else if (field_representation.IsDouble()) {
+ field_type = type_cache_.kFloat64;
+ } else if (field_representation.IsHeapObject()) {
+ // Extract the field type from the property details (make sure its
+ // representation is TaggedPointer to reflect the heap object case).
+ field_type = Type::Intersect(
+ descriptors->GetFieldType(number)->Convert(zone()),
+ Type::TaggedPointer(), zone());
+ if (field_type->Is(Type::None())) {
+ // Store is not safe if the field type was cleared.
+ if (access_mode == AccessMode::kStore) return false;
+
+ // The field type was cleared by the GC, so we don't know anything
+ // about the contents now.
+ // TODO(bmeurer): It would be awesome to make this saner in the
+ // runtime/GC interaction.
+ field_type = Type::TaggedPointer();
+ } else if (!Type::Any()->Is(field_type)) {
+ // Add proper code dependencies in case of stable field map(s).
+ Handle<Map> field_owner_map(map->FindFieldOwner(number),
+ isolate());
+ dependencies()->AssumeFieldType(field_owner_map);
+ }
+ DCHECK(field_type->Is(Type::TaggedPointer()));
}
- DCHECK(field_type->Is(Type::TaggedPointer()));
+ *access_info = PropertyAccessInfo::DataField(
+ MapList{receiver_map}, field_index, field_type, holder);
+ return true;
+ }
+ case ACCESSOR_CONSTANT: {
+ Handle<Object> accessors(descriptors->GetValue(number), isolate());
+ if (!accessors->IsAccessorPair()) return false;
+ Handle<Object> accessor(
+ access_mode == AccessMode::kLoad
+ ? Handle<AccessorPair>::cast(accessors)->getter()
+ : Handle<AccessorPair>::cast(accessors)->setter(),
+ isolate());
+ if (!accessor->IsJSFunction()) {
+ // TODO(turbofan): Add support for API accessors.
+ return false;
+ }
+ *access_info = PropertyAccessInfo::AccessorConstant(
+ MapList{receiver_map}, accessor, holder);
+ return true;
+ }
+ case ACCESSOR: {
+ // TODO(turbofan): Add support for general accessors?
+ return false;
}
- *access_info = PropertyAccessInfo::DataField(
- Type::Class(receiver_map, zone()), field_index, field_type,
- FieldCheck::kNone, holder);
- return true;
- } else {
- // TODO(bmeurer): Add support for accessors.
- return false;
}
+ UNREACHABLE();
+ return false;
}
// Don't search on the prototype chain for special indices in case of
@@ -327,7 +362,7 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
.ToHandle(&constructor)) {
map = handle(constructor->initial_map(), isolate());
DCHECK(map->prototype()->IsJSObject());
- } else if (map->prototype()->IsNull()) {
+ } else if (map->prototype()->IsNull(isolate())) {
// Store to property not found on the receiver or any prototype, we need
// to transition to a new data property.
// Implemented according to ES6 section 9.1.9 [[Set]] (P, V, Receiver)
@@ -337,8 +372,8 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
// The property was not found, return undefined or throw depending
// on the language mode of the load operation.
// Implemented according to ES6 section 9.1.8 [[Get]] (P, Receiver)
- *access_info = PropertyAccessInfo::NotFound(
- Type::Class(receiver_map, zone()), holder);
+ *access_info =
+ PropertyAccessInfo::NotFound(MapList{receiver_map}, holder);
return true;
} else {
return false;
@@ -356,7 +391,6 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
return false;
}
-
bool AccessInfoFactory::ComputePropertyAccessInfos(
MapHandleList const& maps, Handle<Name> name, AccessMode access_mode,
ZoneVector<PropertyAccessInfo>* access_infos) {
@@ -366,7 +400,15 @@ bool AccessInfoFactory::ComputePropertyAccessInfos(
if (!ComputePropertyAccessInfo(map, name, access_mode, &access_info)) {
return false;
}
- access_infos->push_back(access_info);
+ // Try to merge the {access_info} with an existing one.
+ bool merged = false;
+ for (PropertyAccessInfo& other_info : *access_infos) {
+ if (other_info.Merge(&access_info)) {
+ merged = true;
+ break;
+ }
+ }
+ if (!merged) access_infos->push_back(access_info);
}
}
return true;
@@ -400,28 +442,8 @@ bool AccessInfoFactory::LookupSpecialFieldAccessor(
field_type = type_cache_.kJSArrayLengthType;
}
}
- *access_info = PropertyAccessInfo::DataField(Type::Class(map, zone()),
- field_index, field_type);
- return true;
- }
- // Check for special JSArrayBufferView field accessors.
- if (Accessors::IsJSArrayBufferViewFieldAccessor(map, name, &offset)) {
- FieldIndex field_index = FieldIndex::ForInObjectOffset(offset);
- Type* field_type = Type::Tagged();
- if (Name::Equals(factory()->byte_length_string(), name) ||
- Name::Equals(factory()->byte_offset_string(), name)) {
- // The JSArrayBufferView::byte_length and JSArrayBufferView::byte_offset
- // properties are always numbers in the range [0, kMaxSafeInteger].
- field_type = type_cache_.kPositiveSafeInteger;
- } else if (map->IsJSTypedArrayMap()) {
- DCHECK(Name::Equals(factory()->length_string(), name));
- // The JSTypedArray::length property is always a number in the range
- // [0, kMaxSafeInteger].
- field_type = type_cache_.kPositiveSafeInteger;
- }
- *access_info = PropertyAccessInfo::DataField(
- Type::Class(map, zone()), field_index, field_type,
- FieldCheck::kJSArrayBufferViewBufferNotNeutered);
+ *access_info =
+ PropertyAccessInfo::DataField(MapList{map}, field_index, field_type);
return true;
}
return false;
@@ -472,8 +494,7 @@ bool AccessInfoFactory::LookupTransition(Handle<Map> map, Handle<Name> name,
}
dependencies()->AssumeMapNotDeprecated(transition_map);
*access_info = PropertyAccessInfo::DataField(
- Type::Class(map, zone()), field_index, field_type, FieldCheck::kNone,
- holder, transition_map);
+ MapList{map}, field_index, field_type, holder, transition_map);
return true;
}
return false;
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index cae119140a..daa872286f 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -19,7 +19,6 @@ class CompilationDependencies;
class Factory;
class TypeCache;
-
namespace compiler {
// Whether we are loading a property or storing to a property.
@@ -27,64 +26,61 @@ enum class AccessMode { kLoad, kStore };
std::ostream& operator<<(std::ostream&, AccessMode);
+typedef std::vector<Handle<Map>> MapList;
// Mapping of transition source to transition target.
typedef std::vector<std::pair<Handle<Map>, Handle<Map>>> MapTransitionList;
-
// This class encapsulates all information required to access a certain element.
class ElementAccessInfo final {
public:
ElementAccessInfo();
- ElementAccessInfo(Type* receiver_type, ElementsKind elements_kind,
- MaybeHandle<JSObject> holder);
+ ElementAccessInfo(MapList const& receiver_maps, ElementsKind elements_kind);
- MaybeHandle<JSObject> holder() const { return holder_; }
ElementsKind elements_kind() const { return elements_kind_; }
- Type* receiver_type() const { return receiver_type_; }
+ MapList const& receiver_maps() const { return receiver_maps_; }
MapTransitionList& transitions() { return transitions_; }
MapTransitionList const& transitions() const { return transitions_; }
private:
ElementsKind elements_kind_;
- MaybeHandle<JSObject> holder_;
- Type* receiver_type_;
+ MapList receiver_maps_;
MapTransitionList transitions_;
};
-
-// Additional checks that need to be perform for data field accesses.
-enum class FieldCheck : uint8_t {
- // No additional checking needed.
- kNone,
- // Check that the [[ViewedArrayBuffer]] of {JSArrayBufferView}s
- // was not neutered.
- kJSArrayBufferViewBufferNotNeutered,
-};
-
-
// This class encapsulates all information required to access a certain
// object property, either on the object itself or on the prototype chain.
class PropertyAccessInfo final {
public:
- enum Kind { kInvalid, kNotFound, kDataConstant, kDataField };
-
- static PropertyAccessInfo NotFound(Type* receiver_type,
+ enum Kind {
+ kInvalid,
+ kNotFound,
+ kDataConstant,
+ kDataField,
+ kAccessorConstant
+ };
+
+ static PropertyAccessInfo NotFound(MapList const& receiver_maps,
MaybeHandle<JSObject> holder);
- static PropertyAccessInfo DataConstant(Type* receiver_type,
+ static PropertyAccessInfo DataConstant(MapList const& receiver_maps,
Handle<Object> constant,
MaybeHandle<JSObject> holder);
static PropertyAccessInfo DataField(
- Type* receiver_type, FieldIndex field_index, Type* field_type,
- FieldCheck field_check = FieldCheck::kNone,
+ MapList const& receiver_maps, FieldIndex field_index, Type* field_type,
MaybeHandle<JSObject> holder = MaybeHandle<JSObject>(),
MaybeHandle<Map> transition_map = MaybeHandle<Map>());
+ static PropertyAccessInfo AccessorConstant(MapList const& receiver_maps,
+ Handle<Object> constant,
+ MaybeHandle<JSObject> holder);
PropertyAccessInfo();
+ bool Merge(PropertyAccessInfo const* that) WARN_UNUSED_RESULT;
+
bool IsNotFound() const { return kind() == kNotFound; }
bool IsDataConstant() const { return kind() == kDataConstant; }
bool IsDataField() const { return kind() == kDataField; }
+ bool IsAccessorConstant() const { return kind() == kAccessorConstant; }
bool HasTransitionMap() const { return !transition_map().is_null(); }
@@ -92,27 +88,25 @@ class PropertyAccessInfo final {
MaybeHandle<JSObject> holder() const { return holder_; }
MaybeHandle<Map> transition_map() const { return transition_map_; }
Handle<Object> constant() const { return constant_; }
- FieldCheck field_check() const { return field_check_; }
FieldIndex field_index() const { return field_index_; }
Type* field_type() const { return field_type_; }
- Type* receiver_type() const { return receiver_type_; }
+ MapList const& receiver_maps() const { return receiver_maps_; }
private:
- PropertyAccessInfo(MaybeHandle<JSObject> holder, Type* receiver_type);
- PropertyAccessInfo(MaybeHandle<JSObject> holder, Handle<Object> constant,
- Type* receiver_type);
+ PropertyAccessInfo(MaybeHandle<JSObject> holder,
+ MapList const& receiver_maps);
+ PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
+ Handle<Object> constant, MapList const& receiver_maps);
PropertyAccessInfo(MaybeHandle<JSObject> holder,
MaybeHandle<Map> transition_map, FieldIndex field_index,
- FieldCheck field_check, Type* field_type,
- Type* receiver_type);
+ Type* field_type, MapList const& receiver_maps);
Kind kind_;
- Type* receiver_type_;
+ MapList receiver_maps_;
Handle<Object> constant_;
MaybeHandle<Map> transition_map_;
MaybeHandle<JSObject> holder_;
FieldIndex field_index_;
- FieldCheck field_check_;
Type* field_type_;
};
diff --git a/deps/v8/src/compiler/all-nodes.cc b/deps/v8/src/compiler/all-nodes.cc
index ed4a218c2b..8040897fd3 100644
--- a/deps/v8/src/compiler/all-nodes.cc
+++ b/deps/v8/src/compiler/all-nodes.cc
@@ -10,25 +10,33 @@ namespace v8 {
namespace internal {
namespace compiler {
-AllNodes::AllNodes(Zone* local_zone, const Graph* graph)
- : live(local_zone), is_live(graph->NodeCount(), false, local_zone) {
+AllNodes::AllNodes(Zone* local_zone, const Graph* graph, bool only_inputs)
+ : reachable(local_zone),
+ is_reachable_(graph->NodeCount(), false, local_zone),
+ only_inputs_(only_inputs) {
Node* end = graph->end();
- is_live[end->id()] = true;
- live.push_back(end);
- // Find all live nodes reachable from end.
- for (size_t i = 0; i < live.size(); i++) {
- for (Node* const input : live[i]->inputs()) {
- if (input == nullptr) {
- // TODO(titzer): print a warning.
+ is_reachable_[end->id()] = true;
+ reachable.push_back(end);
+ // Find all nodes reachable from end.
+ for (size_t i = 0; i < reachable.size(); i++) {
+ for (Node* input : reachable[i]->inputs()) {
+ if (input == nullptr || input->id() >= graph->NodeCount()) {
continue;
}
- if (input->id() >= graph->NodeCount()) {
- // TODO(titzer): print a warning.
- continue;
+ if (!is_reachable_[input->id()]) {
+ is_reachable_[input->id()] = true;
+ reachable.push_back(input);
}
- if (!is_live[input->id()]) {
- is_live[input->id()] = true;
- live.push_back(input);
+ }
+ if (!only_inputs) {
+ for (Node* use : reachable[i]->uses()) {
+ if (use == nullptr || use->id() >= graph->NodeCount()) {
+ continue;
+ }
+ if (!is_reachable_[use->id()]) {
+ is_reachable_[use->id()] = true;
+ reachable.push_back(use);
+ }
}
}
}
diff --git a/deps/v8/src/compiler/all-nodes.h b/deps/v8/src/compiler/all-nodes.h
index 700f0071b1..36f02e9582 100644
--- a/deps/v8/src/compiler/all-nodes.h
+++ b/deps/v8/src/compiler/all-nodes.h
@@ -16,19 +16,27 @@ namespace compiler {
// from end.
class AllNodes {
public:
- // Constructor. Traverses the graph and builds the {live} sets.
- AllNodes(Zone* local_zone, const Graph* graph);
+ // Constructor. Traverses the graph and builds the {reachable} sets. When
+ // {only_inputs} is true, find the nodes reachable through input edges;
+ // these are all live nodes.
+ AllNodes(Zone* local_zone, const Graph* graph, bool only_inputs = true);
bool IsLive(Node* node) {
+ CHECK(only_inputs_);
+ return IsReachable(node);
+ }
+
+ bool IsReachable(Node* node) {
if (!node) return false;
size_t id = node->id();
- return id < is_live.size() && is_live[id];
+ return id < is_reachable_.size() && is_reachable_[id];
}
- NodeVector live; // Nodes reachable from end.
+ NodeVector reachable; // Nodes reachable from end.
private:
- BoolVector is_live;
+ BoolVector is_reachable_;
+ const bool only_inputs_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index a0b502237b..b8a4b080a9 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -27,30 +27,6 @@ class ArmOperandConverter final : public InstructionOperandConverter {
ArmOperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
- SwVfpRegister OutputFloat32Register(size_t index = 0) {
- return ToFloat32Register(instr_->OutputAt(index));
- }
-
- SwVfpRegister InputFloat32Register(size_t index) {
- return ToFloat32Register(instr_->InputAt(index));
- }
-
- SwVfpRegister ToFloat32Register(InstructionOperand* op) {
- return ToFloat64Register(op).low();
- }
-
- LowDwVfpRegister OutputFloat64Register(size_t index = 0) {
- return ToFloat64Register(instr_->OutputAt(index));
- }
-
- LowDwVfpRegister InputFloat64Register(size_t index) {
- return ToFloat64Register(instr_->InputAt(index));
- }
-
- LowDwVfpRegister ToFloat64Register(InstructionOperand* op) {
- return LowDwVfpRegister::from_code(ToDoubleRegister(op).code());
- }
-
SBit OutputSBit() const {
switch (instr_->flags_mode()) {
case kFlags_branch:
@@ -125,13 +101,16 @@ class ArmOperandConverter final : public InstructionOperandConverter {
case kMode_Operand2_R:
case kMode_Operand2_R_ASR_I:
case kMode_Operand2_R_ASR_R:
- case kMode_Operand2_R_LSL_I:
case kMode_Operand2_R_LSL_R:
case kMode_Operand2_R_LSR_I:
case kMode_Operand2_R_LSR_R:
case kMode_Operand2_R_ROR_I:
case kMode_Operand2_R_ROR_R:
break;
+ case kMode_Operand2_R_LSL_I:
+ *first_index += 3;
+ return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
+ LSL, InputInt32(index + 2));
case kMode_Offset_RI:
*first_index += 2;
return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
@@ -149,7 +128,7 @@ class ArmOperandConverter final : public InstructionOperandConverter {
MemOperand ToMemOperand(InstructionOperand* op) const {
DCHECK_NOT_NULL(op);
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
return SlotToMemOperand(AllocatedOperand::cast(op)->index());
}
@@ -157,8 +136,19 @@ class ArmOperandConverter final : public InstructionOperandConverter {
FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
-};
+ FloatRegister InputFloat32Register(size_t index) {
+ return ToFloat32Register(instr_->InputAt(index));
+ }
+
+ FloatRegister OutputFloat32Register() {
+ return ToFloat32Register(instr_->Output());
+ }
+
+ FloatRegister ToFloat32Register(InstructionOperand* op) {
+ return LowDwVfpRegister::from_code(ToDoubleRegister(op).code()).low();
+ }
+};
namespace {
@@ -177,10 +167,9 @@ class OutOfLineLoadFloat32 final : public OutOfLineCode {
SwVfpRegister const result_;
};
-
-class OutOfLineLoadFloat64 final : public OutOfLineCode {
+class OutOfLineLoadDouble final : public OutOfLineCode {
public:
- OutOfLineLoadFloat64(CodeGenerator* gen, DwVfpRegister result)
+ OutOfLineLoadDouble(CodeGenerator* gen, DwVfpRegister result)
: OutOfLineCode(gen), result_(result) {}
void Generate() final {
@@ -210,7 +199,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
public:
OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
Register value, Register scratch0, Register scratch1,
- RecordWriteMode mode)
+ RecordWriteMode mode,
+ UnwindingInfoWriter* unwinding_info_writer)
: OutOfLineCode(gen),
object_(object),
index_(index),
@@ -218,11 +208,14 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
value_(value),
scratch0_(scratch0),
scratch1_(scratch1),
- mode_(mode) {}
+ mode_(mode),
+ must_save_lr_(!gen->frame_access_state()->has_frame()),
+ unwinding_info_writer_(unwinding_info_writer) {}
OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t index,
Register value, Register scratch0, Register scratch1,
- RecordWriteMode mode)
+ RecordWriteMode mode,
+ UnwindingInfoWriter* unwinding_info_writer)
: OutOfLineCode(gen),
object_(object),
index_(no_reg),
@@ -231,7 +224,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
- must_save_lr_(!gen->frame_access_state()->has_frame()) {}
+ must_save_lr_(!gen->frame_access_state()->has_frame()),
+ unwinding_info_writer_(unwinding_info_writer) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -248,6 +242,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
if (must_save_lr_) {
// We need to save and restore lr if the frame was elided.
__ Push(lr);
+ unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset());
}
RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode);
@@ -260,6 +255,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ CallStub(&stub);
if (must_save_lr_) {
__ Pop(lr);
+ unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
}
}
@@ -272,6 +268,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch1_;
RecordWriteMode const mode_;
bool must_save_lr_;
+ UnwindingInfoWriter* const unwinding_info_writer_;
};
@@ -317,6 +314,10 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
return vs;
case kNotOverflow:
return vc;
+ case kPositiveOrZero:
+ return pl;
+ case kNegative:
+ return mi;
default:
break;
}
@@ -326,24 +327,22 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} // namespace
-
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(width) \
- do { \
- auto result = i.OutputFloat##width##Register(); \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- auto ool = new (zone()) OutOfLineLoadFloat##width(this, result); \
- __ b(hs, ool->entry()); \
- __ vldr(result, i.InputOffset(2)); \
- __ bind(ool->exit()); \
- DCHECK_EQ(LeaveCC, i.OutputSBit()); \
+#define ASSEMBLE_CHECKED_LOAD_FP(Type) \
+ do { \
+ auto result = i.Output##Type##Register(); \
+ auto offset = i.InputRegister(0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmp(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmp(offset, i.InputImmediate(1)); \
+ } \
+ auto ool = new (zone()) OutOfLineLoad##Type(this, result); \
+ __ b(hs, ool->entry()); \
+ __ vldr(result, i.InputOffset(2)); \
+ __ bind(ool->exit()); \
+ DCHECK_EQ(LeaveCC, i.OutputSBit()); \
} while (0)
-
#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
do { \
auto result = i.OutputRegister(); \
@@ -360,21 +359,19 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
DCHECK_EQ(LeaveCC, i.OutputSBit()); \
} while (0)
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT(width) \
- do { \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- auto value = i.InputFloat##width##Register(2); \
- __ vstr(value, i.InputOffset(3), lo); \
- DCHECK_EQ(LeaveCC, i.OutputSBit()); \
+#define ASSEMBLE_CHECKED_STORE_FP(Type) \
+ do { \
+ auto offset = i.InputRegister(0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmp(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmp(offset, i.InputImmediate(1)); \
+ } \
+ auto value = i.Input##Type##Register(2); \
+ __ vstr(value, i.InputOffset(3), lo); \
+ DCHECK_EQ(LeaveCC, i.OutputSBit()); \
} while (0)
-
#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
do { \
auto offset = i.InputRegister(0); \
@@ -388,27 +385,56 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
DCHECK_EQ(LeaveCC, i.OutputSBit()); \
} while (0)
-void CodeGenerator::AssembleDeconstructFrame() {
- __ LeaveFrame(StackFrame::MANUAL);
-}
+#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
+ do { \
+ __ asm_instr(i.OutputRegister(), \
+ MemOperand(i.InputRegister(0), i.InputRegister(1))); \
+ __ dmb(ISH); \
+ } while (0)
-void CodeGenerator::AssembleSetupStackPointer() {}
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
+ do { \
+ __ dmb(ISH); \
+ __ asm_instr(i.InputRegister(2), \
+ MemOperand(i.InputRegister(0), i.InputRegister(1))); \
+ __ dmb(ISH); \
+ } while (0)
-void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
- int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
- if (sp_slot_delta > 0) {
- __ add(sp, sp, Operand(sp_slot_delta * kPointerSize));
- }
- frame_access_state()->SetFrameAccessToDefault();
-}
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ /* TODO(bmeurer): We should really get rid of this special instruction, */ \
+ /* and generate a CallAddress instruction instead. */ \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 2, kScratchReg); \
+ __ MovToFloatParameters(i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 0, 2); \
+ /* Move the result in the double result register. */ \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
+ DCHECK_EQ(LeaveCC, i.OutputSBit()); \
+ } while (0)
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ /* TODO(bmeurer): We should really get rid of this special instruction, */ \
+ /* and generate a CallAddress instruction instead. */ \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 1, kScratchReg); \
+ __ MovToFloatParameter(i.InputDoubleRegister(0)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 0, 1); \
+ /* Move the result in the double result register. */ \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
+ DCHECK_EQ(LeaveCC, i.OutputSBit()); \
+ } while (0)
-void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
- int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
- if (sp_slot_delta < 0) {
- __ sub(sp, sp, Operand(-sp_slot_delta * kPointerSize));
- frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
- }
+void CodeGenerator::AssembleDeconstructFrame() {
+ __ LeaveFrame(StackFrame::MANUAL);
+ unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
+}
+
+void CodeGenerator::AssemblePrepareTailCall() {
if (frame_access_state()->has_frame()) {
if (FLAG_enable_embedded_constant_pool) {
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
@@ -444,8 +470,118 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
__ bind(&done);
}
+namespace {
+
+void FlushPendingPushRegisters(MacroAssembler* masm,
+ FrameAccessState* frame_access_state,
+ ZoneVector<Register>* pending_pushes) {
+ switch (pending_pushes->size()) {
+ case 0:
+ break;
+ case 1:
+ masm->push((*pending_pushes)[0]);
+ break;
+ case 2:
+ masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
+ break;
+ case 3:
+ masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
+ (*pending_pushes)[2]);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ frame_access_state->IncreaseSPDelta(pending_pushes->size());
+ pending_pushes->resize(0);
+}
+
+void AddPendingPushRegister(MacroAssembler* masm,
+ FrameAccessState* frame_access_state,
+ ZoneVector<Register>* pending_pushes,
+ Register reg) {
+ pending_pushes->push_back(reg);
+ if (pending_pushes->size() == 3 || reg.is(ip)) {
+ FlushPendingPushRegisters(masm, frame_access_state, pending_pushes);
+ }
+}
+
+void AdjustStackPointerForTailCall(
+ MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
+ ZoneVector<Register>* pending_pushes = nullptr,
+ bool allow_shrinkage = true) {
+ int current_sp_offset = state->GetSPToFPSlotCount() +
+ StandardFrameConstants::kFixedSlotCountAboveFp;
+ int stack_slot_delta = new_slot_above_sp - current_sp_offset;
+ if (stack_slot_delta > 0) {
+ if (pending_pushes != nullptr) {
+ FlushPendingPushRegisters(masm, state, pending_pushes);
+ }
+ masm->sub(sp, sp, Operand(stack_slot_delta * kPointerSize));
+ state->IncreaseSPDelta(stack_slot_delta);
+ } else if (allow_shrinkage && stack_slot_delta < 0) {
+ if (pending_pushes != nullptr) {
+ FlushPendingPushRegisters(masm, state, pending_pushes);
+ }
+ masm->add(sp, sp, Operand(-stack_slot_delta * kPointerSize));
+ state->IncreaseSPDelta(stack_slot_delta);
+ }
+}
+
+} // namespace
+
+void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
+ int first_unused_stack_slot) {
+ CodeGenerator::PushTypeFlags flags(kImmediatePush | kScalarPush);
+ ZoneVector<MoveOperands*> pushes(zone());
+ GetPushCompatibleMoves(instr, flags, &pushes);
+
+ if (!pushes.empty() &&
+ (LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
+ first_unused_stack_slot)) {
+ ArmOperandConverter g(this, instr);
+ ZoneVector<Register> pending_pushes(zone());
+ for (auto move : pushes) {
+ LocationOperand destination_location(
+ LocationOperand::cast(move->destination()));
+ InstructionOperand source(move->source());
+ AdjustStackPointerForTailCall(
+ masm(), frame_access_state(),
+ destination_location.index() - pending_pushes.size(),
+ &pending_pushes);
+ if (source.IsStackSlot()) {
+ LocationOperand source_location(LocationOperand::cast(source));
+ __ ldr(ip, g.SlotToMemOperand(source_location.index()));
+ AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+ ip);
+ } else if (source.IsRegister()) {
+ LocationOperand source_location(LocationOperand::cast(source));
+ AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+ source_location.GetRegister());
+ } else if (source.IsImmediate()) {
+ AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+ ip);
+ } else {
+ // Pushes of non-scalar data types is not supported.
+ UNIMPLEMENTED();
+ }
+ move->Eliminate();
+ }
+ FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
+ }
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ first_unused_stack_slot, nullptr, false);
+}
+
+void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
+ int first_unused_stack_slot) {
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ first_unused_stack_slot);
+}
+
// Assembles an instruction after register allocation, producing machine code.
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+ Instruction* instr) {
ArmOperandConverter i(this, instr);
__ MaybeCheckConstPool();
@@ -469,8 +605,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
- int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
- AssembleDeconstructActivationRecord(stack_param_delta);
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
@@ -485,7 +619,17 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Jump(ip);
}
DCHECK_EQ(LeaveCC, i.OutputSBit());
+ unwinding_info_writer_.MarkBlockWillExit();
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+ case kArchTailCallAddress: {
+ CHECK(!instr->InputAt(0)->IsImmediate());
+ __ Jump(i.InputRegister(0));
+ unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
break;
}
case kArchCallJSFunction: {
@@ -513,8 +657,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ cmp(cp, kScratchReg);
__ Assert(eq, kWrongFunctionContext);
}
- int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
- AssembleDeconstructActivationRecord(stack_param_delta);
if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
@@ -524,6 +666,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Jump(ip);
DCHECK_EQ(LeaveCC, i.OutputSBit());
frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
break;
}
case kArchPrepareCallCFunction: {
@@ -534,7 +677,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kArchPrepareTailCall:
- AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ AssemblePrepareTailCall();
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
@@ -561,6 +704,17 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
AssembleArchTableSwitch(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
+ case kArchDebugBreak:
+ __ stop("kArchDebugBreak");
+ break;
+ case kArchImpossible:
+ __ Abort(kConversionFromImpossibleValue);
+ break;
+ case kArchComment: {
+ Address comment_string = i.InputExternalReference(0).address();
+ __ RecordComment(reinterpret_cast<const char*>(comment_string));
+ break;
+ }
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
@@ -571,7 +725,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result =
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ if (result != kSuccess) return result;
break;
}
case kArchRet:
@@ -594,7 +750,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
case kArchTruncateDoubleToI:
- __ TruncateDoubleToI(i.OutputRegister(), i.InputFloat64Register(0));
+ __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArchStoreWithWriteBarrier: {
@@ -610,14 +766,16 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
AddressingModeField::decode(instr->opcode());
if (addressing_mode == kMode_Offset_RI) {
int32_t index = i.InputInt32(1);
- ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
- scratch0, scratch1, mode);
+ ool = new (zone())
+ OutOfLineRecordWrite(this, object, index, value, scratch0, scratch1,
+ mode, &unwinding_info_writer_);
__ str(value, MemOperand(object, index));
} else {
DCHECK_EQ(kMode_Offset_RR, addressing_mode);
Register index(i.InputRegister(1));
- ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
- scratch0, scratch1, mode);
+ ool = new (zone())
+ OutOfLineRecordWrite(this, object, index, value, scratch0, scratch1,
+ mode, &unwinding_info_writer_);
__ str(value, MemOperand(object, index));
}
__ CheckPageFlag(object, scratch0,
@@ -638,6 +796,72 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ add(i.OutputRegister(0), base, Operand(offset.offset()));
break;
}
+ case kIeee754Float64Acos:
+ ASSEMBLE_IEEE754_UNOP(acos);
+ break;
+ case kIeee754Float64Acosh:
+ ASSEMBLE_IEEE754_UNOP(acosh);
+ break;
+ case kIeee754Float64Asin:
+ ASSEMBLE_IEEE754_UNOP(asin);
+ break;
+ case kIeee754Float64Asinh:
+ ASSEMBLE_IEEE754_UNOP(asinh);
+ break;
+ case kIeee754Float64Atan:
+ ASSEMBLE_IEEE754_UNOP(atan);
+ break;
+ case kIeee754Float64Atanh:
+ ASSEMBLE_IEEE754_UNOP(atanh);
+ break;
+ case kIeee754Float64Atan2:
+ ASSEMBLE_IEEE754_BINOP(atan2);
+ break;
+ case kIeee754Float64Cbrt:
+ ASSEMBLE_IEEE754_UNOP(cbrt);
+ break;
+ case kIeee754Float64Cos:
+ ASSEMBLE_IEEE754_UNOP(cos);
+ break;
+ case kIeee754Float64Cosh:
+ ASSEMBLE_IEEE754_UNOP(cosh);
+ break;
+ case kIeee754Float64Exp:
+ ASSEMBLE_IEEE754_UNOP(exp);
+ break;
+ case kIeee754Float64Expm1:
+ ASSEMBLE_IEEE754_UNOP(expm1);
+ break;
+ case kIeee754Float64Log:
+ ASSEMBLE_IEEE754_UNOP(log);
+ break;
+ case kIeee754Float64Log1p:
+ ASSEMBLE_IEEE754_UNOP(log1p);
+ break;
+ case kIeee754Float64Log2:
+ ASSEMBLE_IEEE754_UNOP(log2);
+ break;
+ case kIeee754Float64Log10:
+ ASSEMBLE_IEEE754_UNOP(log10);
+ break;
+ case kIeee754Float64Pow: {
+ MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+ __ CallStub(&stub);
+ __ vmov(d0, d2);
+ break;
+ }
+ case kIeee754Float64Sin:
+ ASSEMBLE_IEEE754_UNOP(sin);
+ break;
+ case kIeee754Float64Sinh:
+ ASSEMBLE_IEEE754_UNOP(sinh);
+ break;
+ case kIeee754Float64Tan:
+ ASSEMBLE_IEEE754_UNOP(tan);
+ break;
+ case kIeee754Float64Tanh:
+ ASSEMBLE_IEEE754_UNOP(tanh);
+ break;
case kArmAdd:
__ add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
i.OutputSBit());
@@ -659,12 +883,16 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
i.InputRegister(2), i.OutputSBit());
break;
case kArmMls: {
- CpuFeatureScope scope(masm(), MLS);
+ CpuFeatureScope scope(masm(), ARMv7);
__ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
i.InputRegister(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
+ case kArmSmull:
+ __ smull(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1));
+ break;
case kArmSmmul:
__ smmul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -856,7 +1084,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
case kArmVcmpF32:
- if (instr->InputAt(1)->IsDoubleRegister()) {
+ if (instr->InputAt(1)->IsFPRegister()) {
__ VFPCompareAndSetFlags(i.InputFloat32Register(0),
i.InputFloat32Register(1));
} else {
@@ -907,45 +1135,45 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ vneg(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
case kArmVcmpF64:
- if (instr->InputAt(1)->IsDoubleRegister()) {
- __ VFPCompareAndSetFlags(i.InputFloat64Register(0),
- i.InputFloat64Register(1));
+ if (instr->InputAt(1)->IsFPRegister()) {
+ __ VFPCompareAndSetFlags(i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
} else {
DCHECK(instr->InputAt(1)->IsImmediate());
// 0.0 is the only immediate supported by vcmp instructions.
DCHECK(i.InputDouble(1) == 0.0);
- __ VFPCompareAndSetFlags(i.InputFloat64Register(0), i.InputDouble(1));
+ __ VFPCompareAndSetFlags(i.InputDoubleRegister(0), i.InputDouble(1));
}
DCHECK_EQ(SetCC, i.OutputSBit());
break;
case kArmVaddF64:
- __ vadd(i.OutputFloat64Register(), i.InputFloat64Register(0),
- i.InputFloat64Register(1));
+ __ vadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVsubF64:
- __ vsub(i.OutputFloat64Register(), i.InputFloat64Register(0),
- i.InputFloat64Register(1));
+ __ vsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmulF64:
- __ vmul(i.OutputFloat64Register(), i.InputFloat64Register(0),
- i.InputFloat64Register(1));
+ __ vmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmlaF64:
- __ vmla(i.OutputFloat64Register(), i.InputFloat64Register(1),
- i.InputFloat64Register(2));
+ __ vmla(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmlsF64:
- __ vmls(i.OutputFloat64Register(), i.InputFloat64Register(1),
- i.InputFloat64Register(2));
+ __ vmls(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+ i.InputDoubleRegister(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVdivF64:
- __ vdiv(i.OutputFloat64Register(), i.InputFloat64Register(0),
- i.InputFloat64Register(1));
+ __ vdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmodF64: {
@@ -953,58 +1181,58 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
// and generate a CallAddress instruction instead.
FrameScope scope(masm(), StackFrame::MANUAL);
__ PrepareCallCFunction(0, 2, kScratchReg);
- __ MovToFloatParameters(i.InputFloat64Register(0),
- i.InputFloat64Register(1));
+ __ MovToFloatParameters(i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
__ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
0, 2);
// Move the result in the double result register.
- __ MovFromFloatResult(i.OutputFloat64Register());
+ __ MovFromFloatResult(i.OutputDoubleRegister());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVsqrtF64:
- __ vsqrt(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ __ vsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArmVabsF64:
- __ vabs(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ __ vabs(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArmVnegF64:
- __ vneg(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ __ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArmVrintmF32:
__ vrintm(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
case kArmVrintmF64:
- __ vrintm(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ __ vrintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArmVrintpF32:
__ vrintp(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
case kArmVrintpF64:
- __ vrintp(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ __ vrintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArmVrintzF32:
__ vrintz(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
case kArmVrintzF64:
- __ vrintz(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ __ vrintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArmVrintaF64:
- __ vrinta(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ __ vrinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArmVrintnF32:
__ vrintn(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
case kArmVrintnF64:
- __ vrintn(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ __ vrintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArmVcvtF32F64: {
- __ vcvt_f32_f64(i.OutputFloat32Register(), i.InputFloat64Register(0));
+ __ vcvt_f32_f64(i.OutputFloat32Register(), i.InputDoubleRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtF64F32: {
- __ vcvt_f64_f32(i.OutputFloat64Register(), i.InputFloat32Register(0));
+ __ vcvt_f64_f32(i.OutputDoubleRegister(), i.InputFloat32Register(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
@@ -1025,14 +1253,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArmVcvtF64S32: {
SwVfpRegister scratch = kScratchDoubleReg.low();
__ vmov(scratch, i.InputRegister(0));
- __ vcvt_f64_s32(i.OutputFloat64Register(), scratch);
+ __ vcvt_f64_s32(i.OutputDoubleRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtF64U32: {
SwVfpRegister scratch = kScratchDoubleReg.low();
__ vmov(scratch, i.InputRegister(0));
- __ vcvt_f64_u32(i.OutputFloat64Register(), scratch);
+ __ vcvt_f64_u32(i.OutputDoubleRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
@@ -1040,6 +1268,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
SwVfpRegister scratch = kScratchDoubleReg.low();
__ vcvt_s32_f32(scratch, i.InputFloat32Register(0));
__ vmov(i.OutputRegister(), scratch);
+ // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
+ // because INT32_MIN allows easier out-of-bounds detection.
+ __ cmn(i.OutputRegister(), Operand(1));
+ __ mov(i.OutputRegister(), Operand(INT32_MIN), SBit::LeaveCC, vs);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
@@ -1047,42 +1279,58 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
SwVfpRegister scratch = kScratchDoubleReg.low();
__ vcvt_u32_f32(scratch, i.InputFloat32Register(0));
__ vmov(i.OutputRegister(), scratch);
+ // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
+ // because 0 allows easier out-of-bounds detection.
+ __ cmn(i.OutputRegister(), Operand(1));
+ __ adc(i.OutputRegister(), i.OutputRegister(), Operand::Zero());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtS32F64: {
SwVfpRegister scratch = kScratchDoubleReg.low();
- __ vcvt_s32_f64(scratch, i.InputFloat64Register(0));
+ __ vcvt_s32_f64(scratch, i.InputDoubleRegister(0));
__ vmov(i.OutputRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtU32F64: {
SwVfpRegister scratch = kScratchDoubleReg.low();
- __ vcvt_u32_f64(scratch, i.InputFloat64Register(0));
+ __ vcvt_u32_f64(scratch, i.InputDoubleRegister(0));
__ vmov(i.OutputRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
+ case kArmVmovU32F32:
+ __ vmov(i.OutputRegister(), i.InputFloat32Register(0));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVmovF32U32:
+ __ vmov(i.OutputFloat32Register(), i.InputRegister(0));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
case kArmVmovLowU32F64:
- __ VmovLow(i.OutputRegister(), i.InputFloat64Register(0));
+ __ VmovLow(i.OutputRegister(), i.InputDoubleRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmovLowF64U32:
- __ VmovLow(i.OutputFloat64Register(), i.InputRegister(1));
+ __ VmovLow(i.OutputDoubleRegister(), i.InputRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmovHighU32F64:
- __ VmovHigh(i.OutputRegister(), i.InputFloat64Register(0));
+ __ VmovHigh(i.OutputRegister(), i.InputDoubleRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmovHighF64U32:
- __ VmovHigh(i.OutputFloat64Register(), i.InputRegister(1));
+ __ VmovHigh(i.OutputDoubleRegister(), i.InputRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmovF64U32U32:
- __ vmov(i.OutputFloat64Register(), i.InputRegister(0),
- i.InputRegister(1));
+ __ vmov(i.OutputDoubleRegister(), i.InputRegister(0), i.InputRegister(1));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVmovU32U32F64:
+ __ vmov(i.OutputRegister(0), i.OutputRegister(1),
+ i.InputDoubleRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmLdrb:
@@ -1093,63 +1341,203 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ ldrsb(i.OutputRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
- case kArmStrb: {
- size_t index = 0;
- MemOperand operand = i.InputOffset(&index);
- __ strb(i.InputRegister(index), operand);
+ case kArmStrb:
+ __ strb(i.InputRegister(0), i.InputOffset(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
- }
case kArmLdrh:
__ ldrh(i.OutputRegister(), i.InputOffset());
break;
case kArmLdrsh:
__ ldrsh(i.OutputRegister(), i.InputOffset());
break;
- case kArmStrh: {
- size_t index = 0;
- MemOperand operand = i.InputOffset(&index);
- __ strh(i.InputRegister(index), operand);
+ case kArmStrh:
+ __ strh(i.InputRegister(0), i.InputOffset(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
- }
case kArmLdr:
__ ldr(i.OutputRegister(), i.InputOffset());
break;
- case kArmStr: {
- size_t index = 0;
- MemOperand operand = i.InputOffset(&index);
- __ str(i.InputRegister(index), operand);
+ case kArmStr:
+ __ str(i.InputRegister(0), i.InputOffset(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
- }
case kArmVldrF32: {
__ vldr(i.OutputFloat32Register(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
- case kArmVstrF32: {
- size_t index = 0;
- MemOperand operand = i.InputOffset(&index);
- __ vstr(i.InputFloat32Register(index), operand);
+ case kArmVstrF32:
+ __ vstr(i.InputFloat32Register(0), i.InputOffset(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
- }
case kArmVldrF64:
- __ vldr(i.OutputFloat64Register(), i.InputOffset());
- DCHECK_EQ(LeaveCC, i.OutputSBit());
+ __ vldr(i.OutputDoubleRegister(), i.InputOffset());
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVstrF64:
+ __ vstr(i.InputDoubleRegister(0), i.InputOffset(1));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmFloat32Max: {
+ FloatRegister left_reg = i.InputFloat32Register(0);
+ FloatRegister right_reg = i.InputFloat32Register(1);
+ FloatRegister result_reg = i.OutputFloat32Register();
+ Label result_is_nan, return_left, return_right, check_zero, done;
+ __ VFPCompareAndSetFlags(left_reg, right_reg);
+ __ b(mi, &return_right);
+ __ b(gt, &return_left);
+ __ b(vs, &result_is_nan);
+ // Left equals right => check for -0.
+ __ VFPCompareAndSetFlags(left_reg, 0.0);
+ if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
+ __ b(ne, &done); // left == right != 0.
+ } else {
+ __ b(ne, &return_left); // left == right != 0.
+ }
+ // At this point, both left and right are either 0 or -0.
+ // Since we operate on +0 and/or -0, vadd and vand have the same effect;
+ // the decision for vadd is easy because vand is a NEON instruction.
+ __ vadd(result_reg, left_reg, right_reg);
+ __ b(&done);
+ __ bind(&result_is_nan);
+ __ vadd(result_reg, left_reg, right_reg);
+ __ b(&done);
+ __ bind(&return_right);
+ __ Move(result_reg, right_reg);
+ if (!left_reg.is(result_reg)) __ b(&done);
+ __ bind(&return_left);
+ __ Move(result_reg, left_reg);
+ __ bind(&done);
break;
- case kArmVstrF64: {
- size_t index = 0;
- MemOperand operand = i.InputOffset(&index);
- __ vstr(i.InputFloat64Register(index), operand);
- DCHECK_EQ(LeaveCC, i.OutputSBit());
+ }
+ case kArmFloat64Max: {
+ DwVfpRegister left_reg = i.InputDoubleRegister(0);
+ DwVfpRegister right_reg = i.InputDoubleRegister(1);
+ DwVfpRegister result_reg = i.OutputDoubleRegister();
+ Label result_is_nan, return_left, return_right, check_zero, done;
+ __ VFPCompareAndSetFlags(left_reg, right_reg);
+ __ b(mi, &return_right);
+ __ b(gt, &return_left);
+ __ b(vs, &result_is_nan);
+ // Left equals right => check for -0.
+ __ VFPCompareAndSetFlags(left_reg, 0.0);
+ if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
+ __ b(ne, &done); // left == right != 0.
+ } else {
+ __ b(ne, &return_left); // left == right != 0.
+ }
+ // At this point, both left and right are either 0 or -0.
+ // Since we operate on +0 and/or -0, vadd and vand have the same effect;
+ // the decision for vadd is easy because vand is a NEON instruction.
+ __ vadd(result_reg, left_reg, right_reg);
+ __ b(&done);
+ __ bind(&result_is_nan);
+ __ vadd(result_reg, left_reg, right_reg);
+ __ b(&done);
+ __ bind(&return_right);
+ __ Move(result_reg, right_reg);
+ if (!left_reg.is(result_reg)) __ b(&done);
+ __ bind(&return_left);
+ __ Move(result_reg, left_reg);
+ __ bind(&done);
+ break;
+ }
+ case kArmFloat32Min: {
+ FloatRegister left_reg = i.InputFloat32Register(0);
+ FloatRegister right_reg = i.InputFloat32Register(1);
+ FloatRegister result_reg = i.OutputFloat32Register();
+ Label result_is_nan, return_left, return_right, check_zero, done;
+ __ VFPCompareAndSetFlags(left_reg, right_reg);
+ __ b(mi, &return_left);
+ __ b(gt, &return_right);
+ __ b(vs, &result_is_nan);
+ // Left equals right => check for -0.
+ __ VFPCompareAndSetFlags(left_reg, 0.0);
+ if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
+ __ b(ne, &done); // left == right != 0.
+ } else {
+ __ b(ne, &return_left); // left == right != 0.
+ }
+ // At this point, both left and right are either 0 or -0.
+ // We could use a single 'vorr' instruction here if we had NEON support.
+ // The algorithm is: -((-L) + (-R)), which in case of L and R being
+ // different registers is most efficiently expressed as -((-L) - R).
+ __ vneg(left_reg, left_reg);
+ if (left_reg.is(right_reg)) {
+ __ vadd(result_reg, left_reg, right_reg);
+ } else {
+ __ vsub(result_reg, left_reg, right_reg);
+ }
+ __ vneg(result_reg, result_reg);
+ __ b(&done);
+ __ bind(&result_is_nan);
+ __ vadd(result_reg, left_reg, right_reg);
+ __ b(&done);
+ __ bind(&return_right);
+ __ Move(result_reg, right_reg);
+ if (!left_reg.is(result_reg)) __ b(&done);
+ __ bind(&return_left);
+ __ Move(result_reg, left_reg);
+ __ bind(&done);
+ break;
+ }
+ case kArmFloat64Min: {
+ DwVfpRegister left_reg = i.InputDoubleRegister(0);
+ DwVfpRegister right_reg = i.InputDoubleRegister(1);
+ DwVfpRegister result_reg = i.OutputDoubleRegister();
+ Label result_is_nan, return_left, return_right, check_zero, done;
+ __ VFPCompareAndSetFlags(left_reg, right_reg);
+ __ b(mi, &return_left);
+ __ b(gt, &return_right);
+ __ b(vs, &result_is_nan);
+ // Left equals right => check for -0.
+ __ VFPCompareAndSetFlags(left_reg, 0.0);
+ if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
+ __ b(ne, &done); // left == right != 0.
+ } else {
+ __ b(ne, &return_left); // left == right != 0.
+ }
+ // At this point, both left and right are either 0 or -0.
+ // We could use a single 'vorr' instruction here if we had NEON support.
+ // The algorithm is: -((-L) + (-R)), which in case of L and R being
+ // different registers is most efficiently expressed as -((-L) - R).
+ __ vneg(left_reg, left_reg);
+ if (left_reg.is(right_reg)) {
+ __ vadd(result_reg, left_reg, right_reg);
+ } else {
+ __ vsub(result_reg, left_reg, right_reg);
+ }
+ __ vneg(result_reg, result_reg);
+ __ b(&done);
+ __ bind(&result_is_nan);
+ __ vadd(result_reg, left_reg, right_reg);
+ __ b(&done);
+ __ bind(&return_right);
+ __ Move(result_reg, right_reg);
+ if (!left_reg.is(result_reg)) __ b(&done);
+ __ bind(&return_left);
+ __ Move(result_reg, left_reg);
+ __ bind(&done);
+ break;
+ }
+ case kArmFloat64SilenceNaN: {
+ DwVfpRegister value = i.InputDoubleRegister(0);
+ DwVfpRegister result = i.OutputDoubleRegister();
+ __ VFPCanonicalizeNaN(result, value);
break;
}
case kArmPush:
- if (instr->InputAt(0)->IsDoubleRegister()) {
- __ vpush(i.InputDoubleRegister(0));
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ if (instr->InputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ vpush(i.InputDoubleRegister(0));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
+ __ vpush(i.InputFloat32Register(0));
+ frame_access_state()->IncreaseSPDelta(1);
+ }
} else {
__ push(i.InputRegister(0));
frame_access_state()->IncreaseSPDelta(1);
@@ -1178,10 +1566,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_CHECKED_LOAD_INTEGER(ldr);
break;
case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(32);
+ ASSEMBLE_CHECKED_LOAD_FP(Float32);
break;
case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(64);
+ ASSEMBLE_CHECKED_LOAD_FP(Double);
break;
case kCheckedStoreWord8:
ASSEMBLE_CHECKED_STORE_INTEGER(strb);
@@ -1193,16 +1581,43 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_CHECKED_STORE_INTEGER(str);
break;
case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FLOAT(32);
+ ASSEMBLE_CHECKED_STORE_FP(Float32);
break;
case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_FLOAT(64);
+ ASSEMBLE_CHECKED_STORE_FP(Double);
break;
case kCheckedLoadWord64:
case kCheckedStoreWord64:
UNREACHABLE(); // currently unsupported checked int64 load/store.
break;
+
+ case kAtomicLoadInt8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrsb);
+ break;
+ case kAtomicLoadUint8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrb);
+ break;
+ case kAtomicLoadInt16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrsh);
+ break;
+ case kAtomicLoadUint16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrh);
+ break;
+ case kAtomicLoadWord32:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(ldr);
+ break;
+
+ case kAtomicStoreWord8:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(strb);
+ break;
+ case kAtomicStoreWord16:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(strh);
+ break;
+ case kAtomicStoreWord32:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(str);
+ break;
}
+ return kSuccess;
} // NOLINT(readability/fn_size)
@@ -1263,20 +1678,50 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
}
-
-void CodeGenerator::AssembleDeoptimizerCall(
+CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
// TODO(turbofan): We should be able to generate better code by sharing the
// actual final call site and just bl'ing to it here, similar to what we do
// in the lithium backend.
+ if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
+ DeoptimizeReason deoptimization_reason =
+ GetDeoptimizationReason(deoptimization_id);
+ __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
__ CheckConstPool(false, false);
+ return kSuccess;
}
+void CodeGenerator::FinishFrame(Frame* frame) {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+
+ const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+ if (saves_fp != 0) {
+ frame->AlignSavedCalleeRegisterSlots();
+ }
+
+ if (saves_fp != 0) {
+ // Save callee-saved FP registers.
+ STATIC_ASSERT(DwVfpRegister::kMaxNumRegisters == 32);
+ uint32_t last = base::bits::CountLeadingZeros32(saves_fp) - 1;
+ uint32_t first = base::bits::CountTrailingZeros32(saves_fp);
+ DCHECK_EQ((last - first + 1), base::bits::CountPopulation32(saves_fp));
+ frame->AllocateSavedCalleeRegisterSlots((last - first + 1) *
+ (kDoubleSize / kPointerSize));
+ }
+ const RegList saves = FLAG_enable_embedded_constant_pool
+ ? (descriptor->CalleeSavedRegisters() & ~pp.bit())
+ : descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ // Save callee-saved registers.
+ frame->AllocateSavedCalleeRegisterSlots(
+ base::bits::CountPopulation32(saves));
+ }
+}
-void CodeGenerator::AssemblePrologue() {
+void CodeGenerator::AssembleConstructFrame() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (descriptor->IsCFunctionCall()) {
@@ -1293,9 +1738,14 @@ void CodeGenerator::AssemblePrologue() {
} else {
__ StubPrologue(info()->GetOutputStackFrameType());
}
+
+ if (!info()->GeneratePreagedPrologue()) {
+ unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
+ }
}
- int stack_shrink_slots = frame()->GetSpillSlotCount();
+ int shrink_slots = frame()->GetSpillSlotCount();
+
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1306,15 +1756,12 @@ void CodeGenerator::AssemblePrologue() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
- if (saves_fp != 0) {
- stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
- }
- if (stack_shrink_slots > 0) {
- __ sub(sp, sp, Operand(stack_shrink_slots * kPointerSize));
+ if (shrink_slots > 0) {
+ __ sub(sp, sp, Operand(shrink_slots * kPointerSize));
}
if (saves_fp != 0) {
@@ -1325,8 +1772,6 @@ void CodeGenerator::AssemblePrologue() {
DCHECK_EQ((last - first + 1), base::bits::CountPopulation32(saves_fp));
__ vstm(db_w, sp, DwVfpRegister::from_code(first),
DwVfpRegister::from_code(last));
- frame()->AllocateSavedCalleeRegisterSlots((last - first + 1) *
- (kDoubleSize / kPointerSize));
}
const RegList saves = FLAG_enable_embedded_constant_pool
? (descriptor->CalleeSavedRegisters() & ~pp.bit())
@@ -1334,8 +1779,6 @@ void CodeGenerator::AssemblePrologue() {
if (saves != 0) {
// Save callee-saved registers.
__ stm(db_w, sp, saves);
- frame()->AllocateSavedCalleeRegisterSlots(
- base::bits::CountPopulation32(saves));
}
}
@@ -1362,6 +1805,8 @@ void CodeGenerator::AssembleReturn() {
DwVfpRegister::from_code(last));
}
+ unwinding_info_writer_.MarkBlockWillExit();
+
if (descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
@@ -1408,7 +1853,13 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
switch (src.type()) {
case Constant::kInt32:
- __ mov(dst, Operand(src.ToInt32()));
+ if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
+ src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ __ mov(dst, Operand(src.ToInt32(), src.rmode()));
+ } else {
+ __ mov(dst, Operand(src.ToInt32()));
+ }
break;
case Constant::kInt64:
UNREACHABLE();
@@ -1443,7 +1894,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
- if (destination->IsDoubleStackSlot()) {
+ if (destination->IsFPStackSlot()) {
MemOperand dst = g.ToMemOperand(destination);
__ mov(ip, Operand(bit_cast<int32_t>(src.ToFloat32())));
__ str(ip, dst);
@@ -1453,32 +1904,32 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
} else {
DCHECK_EQ(Constant::kFloat64, src.type());
- DwVfpRegister dst = destination->IsDoubleRegister()
- ? g.ToFloat64Register(destination)
+ DwVfpRegister dst = destination->IsFPRegister()
+ ? g.ToDoubleRegister(destination)
: kScratchDoubleReg;
__ vmov(dst, src.ToFloat64(), kScratchReg);
- if (destination->IsDoubleStackSlot()) {
+ if (destination->IsFPStackSlot()) {
__ vstr(dst, g.ToMemOperand(destination));
}
}
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
DwVfpRegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
DwVfpRegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
__ vstr(src, g.ToMemOperand(destination));
}
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
MemOperand src = g.ToMemOperand(source);
- if (destination->IsDoubleRegister()) {
- __ vldr(g.ToDoubleRegister(destination), src);
+ if (destination->IsFPRegister()) {
+ __ vldr(g.ToDoubleRegister(destination), src);
} else {
- DwVfpRegister temp = kScratchDoubleReg;
- __ vldr(temp, src);
- __ vstr(temp, g.ToMemOperand(destination));
+ DCHECK(destination->IsFPStackSlot());
+ DwVfpRegister temp = kScratchDoubleReg;
+ __ vldr(temp, src);
+ __ vstr(temp, g.ToMemOperand(destination));
}
} else {
UNREACHABLE();
@@ -1517,35 +1968,35 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ vldr(temp_1, dst);
__ str(temp_0, dst);
__ vstr(temp_1, src);
- } else if (source->IsDoubleRegister()) {
- DwVfpRegister temp = kScratchDoubleReg;
- DwVfpRegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
- DwVfpRegister dst = g.ToDoubleRegister(destination);
- __ Move(temp, src);
- __ Move(src, dst);
- __ Move(dst, temp);
- } else {
- DCHECK(destination->IsDoubleStackSlot());
- MemOperand dst = g.ToMemOperand(destination);
- __ Move(temp, src);
- __ vldr(src, dst);
- __ vstr(temp, dst);
- }
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleStackSlot());
+ } else if (source->IsFPRegister()) {
+ LowDwVfpRegister temp = kScratchDoubleReg;
+ DwVfpRegister src = g.ToDoubleRegister(source);
+ if (destination->IsFPRegister()) {
+ DwVfpRegister dst = g.ToDoubleRegister(destination);
+ __ Move(temp, src);
+ __ Move(src, dst);
+ __ Move(dst, temp);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ Move(temp, src);
+ __ vldr(src, dst);
+ __ vstr(temp, dst);
+ }
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPStackSlot());
Register temp_0 = kScratchReg;
- DwVfpRegister temp_1 = kScratchDoubleReg;
+ LowDwVfpRegister temp_1 = kScratchDoubleReg;
MemOperand src0 = g.ToMemOperand(source);
- MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
MemOperand dst0 = g.ToMemOperand(destination);
- MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
- __ vldr(temp_1, dst0); // Save destination in temp_1.
- __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
- __ str(temp_0, dst0);
- __ ldr(temp_0, src1);
- __ str(temp_0, dst1);
- __ vstr(temp_1, src0);
+ MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
+ MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
+ __ vldr(temp_1, dst0); // Save destination in temp_1.
+ __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
+ __ str(temp_0, dst0);
+ __ ldr(temp_0, src1);
+ __ str(temp_0, dst1);
+ __ vstr(temp_1, src0);
} else {
// No other combinations are possible.
UNREACHABLE();
@@ -1559,11 +2010,6 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
}
-void CodeGenerator::AddNopForSmiCodeInlining() {
- // On 32-bit ARM we do not insert nops for inlined Smi code.
-}
-
-
void CodeGenerator::EnsureSpaceForLazyDeopt() {
if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
return;
diff --git a/deps/v8/src/compiler/arm/instruction-codes-arm.h b/deps/v8/src/compiler/arm/instruction-codes-arm.h
index 5e6f5c96a5..07c4033bd6 100644
--- a/deps/v8/src/compiler/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/arm/instruction-codes-arm.h
@@ -27,6 +27,7 @@ namespace compiler {
V(ArmMul) \
V(ArmMla) \
V(ArmMls) \
+ V(ArmSmull) \
V(ArmSmmul) \
V(ArmSmmla) \
V(ArmUmull) \
@@ -92,15 +93,23 @@ namespace compiler {
V(ArmVcvtU32F32) \
V(ArmVcvtS32F64) \
V(ArmVcvtU32F64) \
+ V(ArmVmovU32F32) \
+ V(ArmVmovF32U32) \
V(ArmVmovLowU32F64) \
V(ArmVmovLowF64U32) \
V(ArmVmovHighU32F64) \
V(ArmVmovHighF64U32) \
V(ArmVmovF64U32U32) \
+ V(ArmVmovU32U32F64) \
V(ArmVldrF32) \
V(ArmVstrF32) \
V(ArmVldrF64) \
V(ArmVstrF64) \
+ V(ArmFloat32Max) \
+ V(ArmFloat64Max) \
+ V(ArmFloat32Min) \
+ V(ArmFloat64Min) \
+ V(ArmFloat64SilenceNaN) \
V(ArmLdrb) \
V(ArmLdrsb) \
V(ArmStrb) \
diff --git a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
index 466765ee4a..3f38e5ddef 100644
--- a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
@@ -30,6 +30,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmMla:
case kArmMls:
case kArmSmmul:
+ case kArmSmull:
case kArmSmmla:
case kArmUmull:
case kArmSdiv:
@@ -94,11 +95,19 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmVcvtU32F32:
case kArmVcvtS32F64:
case kArmVcvtU32F64:
+ case kArmVmovU32F32:
+ case kArmVmovF32U32:
case kArmVmovLowU32F64:
case kArmVmovLowF64U32:
case kArmVmovHighU32F64:
case kArmVmovHighF64U32:
case kArmVmovF64U32U32:
+ case kArmVmovU32U32F64:
+ case kArmFloat32Max:
+ case kArmFloat64Max:
+ case kArmFloat32Min:
+ case kArmFloat64Min:
+ case kArmFloat64SilenceNaN:
return kNoOpcodeFlags;
case kArmVldrF32:
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index 76d9e3c66d..4b0b6afb44 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -115,6 +115,24 @@ bool TryMatchShift(InstructionSelector* selector,
return false;
}
+template <IrOpcode::Value kOpcode, int kImmMin, int kImmMax,
+ AddressingMode kImmMode>
+bool TryMatchShiftImmediate(InstructionSelector* selector,
+ InstructionCode* opcode_return, Node* node,
+ InstructionOperand* value_return,
+ InstructionOperand* shift_return) {
+ ArmOperandGenerator g(selector);
+ if (node->opcode() == kOpcode) {
+ Int32BinopMatcher m(node);
+ if (m.right().IsInRange(kImmMin, kImmMax)) {
+ *opcode_return |= AddressingModeField::encode(kImmMode);
+ *value_return = g.UseRegister(m.left().node());
+ *shift_return = g.UseImmediate(m.right().node());
+ return true;
+ }
+ }
+ return false;
+}
bool TryMatchROR(InstructionSelector* selector, InstructionCode* opcode_return,
Node* node, InstructionOperand* value_return,
@@ -142,6 +160,14 @@ bool TryMatchLSL(InstructionSelector* selector, InstructionCode* opcode_return,
value_return, shift_return);
}
+bool TryMatchLSLImmediate(InstructionSelector* selector,
+ InstructionCode* opcode_return, Node* node,
+ InstructionOperand* value_return,
+ InstructionOperand* shift_return) {
+ return TryMatchShiftImmediate<IrOpcode::kWord32Shl, 0, 31,
+ kMode_Operand2_R_LSL_I>(
+ selector, opcode_return, node, value_return, shift_return);
+}
bool TryMatchLSR(InstructionSelector* selector, InstructionCode* opcode_return,
Node* node, InstructionOperand* value_return,
@@ -226,7 +252,14 @@ void VisitBinop(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.Label(cont->false_block());
}
- outputs[output_count++] = g.DefineAsRegister(node);
+ if (cont->IsDeoptimize()) {
+ // If we can deoptimize as a result of the binop, we need to make sure that
+ // the deopt inputs are not overwritten by the binop result. One way
+ // to achieve that is to declare the output register as same-as-first.
+ outputs[output_count++] = g.DefineSameAsFirst(node);
+ } else {
+ outputs[output_count++] = g.DefineAsRegister(node);
+ }
if (cont->IsSet()) {
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
@@ -240,7 +273,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->frame_state());
+ cont->reason(), cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -294,16 +327,57 @@ void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode div_opcode,
InstructionOperand right_operand = g.UseRegister(m.right().node());
EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode, div_operand,
left_operand, right_operand);
- if (selector->IsSupported(MLS)) {
+ if (selector->IsSupported(ARMv7)) {
selector->Emit(kArmMls, result_operand, div_operand, right_operand,
left_operand);
} else {
InstructionOperand mul_operand = g.TempRegister();
selector->Emit(kArmMul, mul_operand, div_operand, right_operand);
- selector->Emit(kArmSub, result_operand, left_operand, mul_operand);
+ selector->Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_R),
+ result_operand, left_operand, mul_operand);
}
}
+void EmitLoad(InstructionSelector* selector, InstructionCode opcode,
+ InstructionOperand* output, Node* base, Node* index) {
+ ArmOperandGenerator g(selector);
+ InstructionOperand inputs[3];
+ size_t input_count = 2;
+
+ inputs[0] = g.UseRegister(base);
+ if (g.CanBeImmediate(index, opcode)) {
+ inputs[1] = g.UseImmediate(index);
+ opcode |= AddressingModeField::encode(kMode_Offset_RI);
+ } else if ((opcode == kArmLdr) &&
+ TryMatchLSLImmediate(selector, &opcode, index, &inputs[1],
+ &inputs[2])) {
+ input_count = 3;
+ } else {
+ inputs[1] = g.UseRegister(index);
+ opcode |= AddressingModeField::encode(kMode_Offset_RR);
+ }
+ selector->Emit(opcode, 1, output, input_count, inputs);
+}
+
+void EmitStore(InstructionSelector* selector, InstructionCode opcode,
+ size_t input_count, InstructionOperand* inputs,
+ Node* index) {
+ ArmOperandGenerator g(selector);
+
+ if (g.CanBeImmediate(index, opcode)) {
+ inputs[input_count++] = g.UseImmediate(index);
+ opcode |= AddressingModeField::encode(kMode_Offset_RI);
+ } else if ((opcode == kArmStr) &&
+ TryMatchLSLImmediate(selector, &opcode, index, &inputs[2],
+ &inputs[3])) {
+ input_count = 4;
+ } else {
+ inputs[input_count++] = g.UseRegister(index);
+ opcode |= AddressingModeField::encode(kMode_Offset_RR);
+ }
+ selector->Emit(opcode, 0, nullptr, input_count, inputs);
+}
+
} // namespace
@@ -313,7 +387,7 @@ void InstructionSelector::VisitLoad(Node* node) {
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- ArchOpcode opcode = kArchNop;
+ InstructionCode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kFloat32:
opcode = kArmVldrF32;
@@ -328,6 +402,8 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord16:
opcode = load_rep.IsUnsigned() ? kArmLdrh : kArmLdrsh;
break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
opcode = kArmLdr;
@@ -339,13 +415,8 @@ void InstructionSelector::VisitLoad(Node* node) {
return;
}
- if (g.CanBeImmediate(index, opcode)) {
- Emit(opcode | AddressingModeField::encode(kMode_Offset_RI),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
- } else {
- Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
- }
+ InstructionOperand output = g.DefineAsRegister(node);
+ EmitLoad(this, opcode, &output, base, index);
}
@@ -397,7 +468,7 @@ void InstructionSelector::VisitStore(Node* node) {
code |= MiscField::encode(static_cast<int>(record_write_mode));
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- ArchOpcode opcode = kArchNop;
+ InstructionCode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kFloat32:
opcode = kArmVstrF32;
@@ -412,6 +483,8 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord16:
opcode = kArmStrh;
break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
opcode = kArmStr;
@@ -423,16 +496,129 @@ void InstructionSelector::VisitStore(Node* node) {
return;
}
- if (g.CanBeImmediate(index, opcode)) {
- Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), g.NoOutput(),
- g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
- } else {
- Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), g.NoOutput(),
- g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseRegister(value);
+ inputs[input_count++] = g.UseRegister(base);
+ EmitStore(this, opcode, input_count, inputs, index);
+ }
+}
+
+void InstructionSelector::VisitUnalignedLoad(Node* node) {
+ UnalignedLoadRepresentation load_rep =
+ UnalignedLoadRepresentationOf(node->op());
+ ArmOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ InstructionCode opcode = kArmLdr;
+ // Only floating point loads need to be specially handled; integer loads
+ // support unaligned access. We support unaligned FP loads by loading to
+ // integer registers first, then moving to the destination FP register.
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32: {
+ InstructionOperand temp = g.TempRegister();
+ EmitLoad(this, opcode, &temp, base, index);
+ Emit(kArmVmovF32U32, g.DefineAsRegister(node), temp);
+ return;
}
+ case MachineRepresentation::kFloat64: {
+ // TODO(arm): use vld1.8 for this when NEON is available.
+ // Compute the address of the least-significant half of the FP value.
+ // We assume that the base node is unlikely to be an encodable immediate
+ // or the result of a shift operation, so only consider the addressing
+ // mode that should be used for the index node.
+ InstructionCode add_opcode = kArmAdd;
+ InstructionOperand inputs[3];
+ inputs[0] = g.UseRegister(base);
+
+ size_t input_count;
+ if (TryMatchImmediateOrShift(this, &add_opcode, index, &input_count,
+ &inputs[1])) {
+ // input_count has been set by TryMatchImmediateOrShift(), so increment
+ // it to account for the base register in inputs[0].
+ input_count++;
+ } else {
+ add_opcode |= AddressingModeField::encode(kMode_Operand2_R);
+ inputs[1] = g.UseRegister(index);
+ input_count = 2; // Base register and index.
+ }
+
+ InstructionOperand addr = g.TempRegister();
+ Emit(add_opcode, 1, &addr, input_count, inputs);
+
+ // Load both halves and move to an FP register.
+ InstructionOperand fp_lo = g.TempRegister();
+ InstructionOperand fp_hi = g.TempRegister();
+ opcode |= AddressingModeField::encode(kMode_Offset_RI);
+ Emit(opcode, fp_lo, addr, g.TempImmediate(0));
+ Emit(opcode, fp_hi, addr, g.TempImmediate(4));
+ Emit(kArmVmovF64U32U32, g.DefineAsRegister(node), fp_lo, fp_hi);
+ return;
+ }
+ default:
+ // All other cases should support unaligned accesses.
+ UNREACHABLE();
+ return;
}
}
+void InstructionSelector::VisitUnalignedStore(Node* node) {
+ ArmOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+
+ UnalignedStoreRepresentation store_rep =
+ UnalignedStoreRepresentationOf(node->op());
+
+ // Only floating point stores need to be specially handled; integer stores
+ // support unaligned access. We support unaligned FP stores by moving the
+ // value to integer registers first, then storing to the destination address.
+ switch (store_rep) {
+ case MachineRepresentation::kFloat32: {
+ inputs[input_count++] = g.TempRegister();
+ Emit(kArmVmovU32F32, inputs[0], g.UseRegister(value));
+ inputs[input_count++] = g.UseRegister(base);
+ EmitStore(this, kArmStr, input_count, inputs, index);
+ return;
+ }
+ case MachineRepresentation::kFloat64: {
+ // TODO(arm): use vst1.8 for this when NEON is available.
+ // Store a 64-bit floating point value using two 32-bit integer stores.
+ // Computing the store address here would require three live temporary
+ // registers (fp<63:32>, fp<31:0>, address), so compute base + 4 after
+ // storing the least-significant half of the value.
+
+ // First, move the 64-bit FP value into two temporary integer registers.
+ InstructionOperand fp[] = {g.TempRegister(), g.TempRegister()};
+ inputs[input_count++] = g.UseRegister(value);
+ Emit(kArmVmovU32U32F64, arraysize(fp), fp, input_count,
+ inputs);
+
+ // Store the least-significant half.
+ inputs[0] = fp[0]; // Low 32-bits of FP value.
+ inputs[input_count++] = g.UseRegister(base); // First store base address.
+ EmitStore(this, kArmStr, input_count, inputs, index);
+
+ // Store the most-significant half.
+ InstructionOperand base4 = g.TempRegister();
+ Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_I), base4,
+ g.UseRegister(base), g.TempImmediate(4)); // Compute base + 4.
+ inputs[0] = fp[1]; // High 32-bits of FP value.
+ inputs[1] = base4; // Second store base + 4 address.
+ EmitStore(this, kArmStr, input_count, inputs, index);
+ return;
+ }
+ default:
+ // All other cases should support unaligned accesses.
+ UNREACHABLE();
+ return;
+ }
+}
void InstructionSelector::VisitCheckedLoad(Node* node) {
CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
@@ -458,6 +644,8 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
opcode = kCheckedLoadFloat64;
break;
case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
@@ -500,6 +688,8 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
opcode = kCheckedStoreFloat64;
break;
case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
@@ -697,7 +887,7 @@ void VisitShift(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->frame_state());
+ cont->reason(), cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -900,6 +1090,9 @@ void InstructionSelector::VisitWord32ReverseBits(Node* node) {
VisitRR(this, kArmRbit, node);
}
+void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
@@ -1022,7 +1215,7 @@ void InstructionSelector::VisitInt32Add(Node* node) {
void InstructionSelector::VisitInt32Sub(Node* node) {
ArmOperandGenerator g(this);
Int32BinopMatcher m(node);
- if (IsSupported(MLS) && m.right().IsInt32Mul() &&
+ if (IsSupported(ARMv7) && m.right().IsInt32Mul() &&
CanCover(node, m.right().node())) {
Int32BinopMatcher mright(m.right().node());
Emit(kArmMls, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
@@ -1032,6 +1225,38 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
VisitBinop(this, node, kArmSub, kArmRsb);
}
+namespace {
+
+void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ ArmOperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand result_operand = g.DefineAsRegister(node);
+ InstructionOperand temp_operand = g.TempRegister();
+ InstructionOperand outputs[] = {result_operand, temp_operand};
+ InstructionOperand inputs[] = {g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node())};
+ selector->Emit(kArmSmull, 2, outputs, 2, inputs);
+
+ // result operand needs shift operator.
+ InstructionOperand shift_31 = g.UseImmediate(31);
+ InstructionCode opcode = cont->Encode(kArmCmp) |
+ AddressingModeField::encode(kMode_Operand2_R_ASR_I);
+ if (cont->IsBranch()) {
+ selector->Emit(opcode, g.NoOutput(), temp_operand, result_operand, shift_31,
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
+ } else if (cont->IsDeoptimize()) {
+ InstructionOperand in[] = {temp_operand, result_operand, shift_31};
+ selector->EmitDeoptimize(opcode, 0, nullptr, 3, in, cont->reason(),
+ cont->frame_state());
+ } else {
+ DCHECK(cont->IsSet());
+ selector->Emit(opcode, g.DefineAsRegister(cont->result()), temp_operand,
+ result_operand, shift_31);
+ }
+}
+
+} // namespace
void InstructionSelector::VisitInt32Mul(Node* node) {
ArmOperandGenerator g(this);
@@ -1142,31 +1367,22 @@ void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
VisitRR(this, kArmVcvtF32F64, node);
}
-
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
- switch (TruncationModeOf(node->op())) {
- case TruncationMode::kJavaScript:
- return VisitRR(this, kArchTruncateDoubleToI, node);
- case TruncationMode::kRoundToZero:
- return VisitRR(this, kArmVcvtS32F64, node);
- }
- UNREACHABLE();
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+ VisitRR(this, kArchTruncateDoubleToI, node);
}
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+ VisitRR(this, kArmVcvtS32F64, node);
+}
void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
- VisitRR(this, kArmVmovLowU32F64, node);
+ VisitRR(this, kArmVmovU32F32, node);
}
-
void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
- ArmOperandGenerator g(this);
- Emit(kArmVmovLowF64U32, g.DefineAsRegister(node),
- ImmediateOperand(ImmediateOperand::INLINE, 0),
- g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kArmVmovF32U32, node);
}
-
void InstructionSelector::VisitFloat32Add(Node* node) {
ArmOperandGenerator g(this);
Float32BinopMatcher m(node);
@@ -1208,15 +1424,9 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
VisitRRR(this, kArmVaddF64, node);
}
-
void InstructionSelector::VisitFloat32Sub(Node* node) {
ArmOperandGenerator g(this);
Float32BinopMatcher m(node);
- if (m.left().IsMinusZero()) {
- Emit(kArmVnegF32, g.DefineAsRegister(node),
- g.UseRegister(m.right().node()));
- return;
- }
if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
Float32BinopMatcher mright(m.right().node());
Emit(kArmVmlsF32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
@@ -1227,27 +1437,9 @@ void InstructionSelector::VisitFloat32Sub(Node* node) {
VisitRRR(this, kArmVsubF32, node);
}
-
void InstructionSelector::VisitFloat64Sub(Node* node) {
ArmOperandGenerator g(this);
Float64BinopMatcher m(node);
- if (m.left().IsMinusZero()) {
- if (m.right().IsFloat64RoundDown() &&
- CanCover(m.node(), m.right().node())) {
- if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
- CanCover(m.right().node(), m.right().InputAt(0))) {
- Float64BinopMatcher mright0(m.right().InputAt(0));
- if (mright0.left().IsMinusZero()) {
- Emit(kArmVrintpF64, g.DefineAsRegister(node),
- g.UseRegister(mright0.right().node()));
- return;
- }
- }
- }
- Emit(kArmVnegF64, g.DefineAsRegister(node),
- g.UseRegister(m.right().node()));
- return;
- }
if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
Float64BinopMatcher mright(m.right().node());
Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
@@ -1258,7 +1450,6 @@ void InstructionSelector::VisitFloat64Sub(Node* node) {
VisitRRR(this, kArmVsubF64, node);
}
-
void InstructionSelector::VisitFloat32Mul(Node* node) {
VisitRRR(this, kArmVmulF32, node);
}
@@ -1285,18 +1476,25 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
}
+void InstructionSelector::VisitFloat32Max(Node* node) {
+ VisitRRR(this, kArmFloat32Max, node);
+}
-void InstructionSelector::VisitFloat32Max(Node* node) { UNREACHABLE(); }
-
-
-void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
-
-
-void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ VisitRRR(this, kArmFloat64Max, node);
+}
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+ VisitRR(this, kArmFloat64SilenceNaN, node);
+}
-void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat32Min(Node* node) {
+ VisitRRR(this, kArmFloat32Min, node);
+}
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ VisitRRR(this, kArmFloat64Min, node);
+}
void InstructionSelector::VisitFloat32Abs(Node* node) {
VisitRR(this, kArmVabsF32, node);
@@ -1307,7 +1505,6 @@ void InstructionSelector::VisitFloat64Abs(Node* node) {
VisitRR(this, kArmVabsF64, node);
}
-
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitRR(this, kArmVsqrtF32, node);
}
@@ -1362,6 +1559,28 @@ void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
VisitRR(this, kArmVrintnF64, node);
}
+void InstructionSelector::VisitFloat32Neg(Node* node) {
+ VisitRR(this, kArmVnegF32, node);
+}
+
+void InstructionSelector::VisitFloat64Neg(Node* node) {
+ VisitRR(this, kArmVnegF64, node);
+}
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+ InstructionCode opcode) {
+ ArmOperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
+ g.UseFixed(node->InputAt(1), d1))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+ InstructionCode opcode) {
+ ArmOperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0))
+ ->MarkAsCall();
+}
void InstructionSelector::EmitPrepareArguments(
ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
@@ -1371,7 +1590,7 @@ void InstructionSelector::EmitPrepareArguments(
// Prepare for C function call.
if (descriptor->IsCFunctionCall()) {
Emit(kArchPrepareCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
0, nullptr, 0, nullptr);
// Poke any stack arguments.
@@ -1410,7 +1629,7 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+ selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
} else {
DCHECK(cont->IsSet());
@@ -1456,6 +1675,101 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
}
}
+// Check whether we can convert:
+// ((a <op> b) cmp 0), b.<cond>
+// to:
+// (a <ops> b), b.<cond'>
+// where <ops> is the flag setting version of <op>.
+// We only generate conditions <cond'> that are a combination of the N
+// and Z flags. This avoids the need to make this function dependent on
+// the flag-setting operation.
+bool CanUseFlagSettingBinop(FlagsCondition cond) {
+ switch (cond) {
+ case kEqual:
+ case kNotEqual:
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedLessThanOrEqual: // x <= 0 -> x == 0
+ case kUnsignedGreaterThan: // x > 0 -> x != 0
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Map <cond> to <cond'> so that the following transformation is possible:
+// ((a <op> b) cmp 0), b.<cond>
+// to:
+// (a <ops> b), b.<cond'>
+// where <ops> is the flag setting version of <op>.
+FlagsCondition MapForFlagSettingBinop(FlagsCondition cond) {
+ DCHECK(CanUseFlagSettingBinop(cond));
+ switch (cond) {
+ case kEqual:
+ case kNotEqual:
+ return cond;
+ case kSignedLessThan:
+ return kNegative;
+ case kSignedGreaterThanOrEqual:
+ return kPositiveOrZero;
+ case kUnsignedLessThanOrEqual: // x <= 0 -> x == 0
+ return kEqual;
+ case kUnsignedGreaterThan: // x > 0 -> x != 0
+ return kNotEqual;
+ default:
+ UNREACHABLE();
+ return cond;
+ }
+}
+
+// Check if we can perform the transformation:
+// ((a <op> b) cmp 0), b.<cond>
+// to:
+// (a <ops> b), b.<cond'>
+// where <ops> is the flag setting version of <op>, and if so,
+// updates {node}, {opcode} and {cont} accordingly.
+void MaybeReplaceCmpZeroWithFlagSettingBinop(InstructionSelector* selector,
+ Node** node, Node* binop,
+ InstructionCode* opcode,
+ FlagsCondition cond,
+ FlagsContinuation* cont) {
+ InstructionCode binop_opcode;
+ InstructionCode no_output_opcode;
+ switch (binop->opcode()) {
+ case IrOpcode::kInt32Add:
+ binop_opcode = kArmAdd;
+ no_output_opcode = kArmCmn;
+ break;
+ case IrOpcode::kWord32And:
+ binop_opcode = kArmAnd;
+ no_output_opcode = kArmTst;
+ break;
+ case IrOpcode::kWord32Or:
+ binop_opcode = kArmOrr;
+ no_output_opcode = kArmOrr;
+ break;
+ case IrOpcode::kWord32Xor:
+ binop_opcode = kArmEor;
+ no_output_opcode = kArmTeq;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ if (selector->CanCover(*node, binop)) {
+ // The comparison is the only user of {node}.
+ cont->Overwrite(MapForFlagSettingBinop(cond));
+ *opcode = no_output_opcode;
+ *node = binop;
+ } else if (selector->IsOnlyUserOfNodeInSameBlock(*node, binop)) {
+ // We can also handle the case where the {node} and the comparison are in
+ // the same basic block, and the comparison is the only user of {node} in
+ // this basic block ({node} has users in other basic blocks).
+ cont->Overwrite(MapForFlagSettingBinop(cond));
+ *opcode = binop_opcode;
+ *node = binop;
+ }
+}
// Shared routine for multiple word compare operations.
void VisitWordCompare(InstructionSelector* selector, Node* node,
@@ -1464,8 +1778,10 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
Int32BinopMatcher m(node);
InstructionOperand inputs[5];
size_t input_count = 0;
- InstructionOperand outputs[1];
+ InstructionOperand outputs[2];
size_t output_count = 0;
+ bool has_result = (opcode != kArmCmp) && (opcode != kArmCmn) &&
+ (opcode != kArmTst) && (opcode != kArmTeq);
if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
&input_count, &inputs[1])) {
@@ -1482,6 +1798,17 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.UseRegister(m.right().node());
}
+ if (has_result) {
+ if (cont->IsDeoptimize()) {
+ // If we can deoptimize as a result of the binop, we need to make sure
+ // that the deopt inputs are not overwritten by the binop result. One way
+ // to achieve that is to declare the output register as same-as-first.
+ outputs[output_count++] = g.DefineSameAsFirst(node);
+ } else {
+ outputs[output_count++] = g.DefineAsRegister(node);
+ }
+ }
+
if (cont->IsBranch()) {
inputs[input_count++] = g.Label(cont->true_block());
inputs[input_count++] = g.Label(cont->false_block());
@@ -1496,7 +1823,7 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->frame_state());
+ cont->reason(), cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -1505,7 +1832,32 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
void VisitWordCompare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
- VisitWordCompare(selector, node, kArmCmp, cont);
+ InstructionCode opcode = kArmCmp;
+ Int32BinopMatcher m(node);
+
+ FlagsCondition cond = cont->condition();
+ if (m.right().Is(0) && (m.left().IsInt32Add() || m.left().IsWord32Or() ||
+ m.left().IsWord32And() || m.left().IsWord32Xor())) {
+ // Emit flag setting instructions for comparisons against zero.
+ if (CanUseFlagSettingBinop(cond)) {
+ Node* binop = m.left().node();
+ MaybeReplaceCmpZeroWithFlagSettingBinop(selector, &node, binop, &opcode,
+ cond, cont);
+ }
+ } else if (m.left().Is(0) &&
+ (m.right().IsInt32Add() || m.right().IsWord32Or() ||
+ m.right().IsWord32And() || m.right().IsWord32Xor())) {
+ // Same as above, but we need to commute the condition before we
+ // continue with the rest of the checks.
+ cond = CommuteFlagsCondition(cond);
+ if (CanUseFlagSettingBinop(cond)) {
+ Node* binop = m.right().node();
+ MaybeReplaceCmpZeroWithFlagSettingBinop(selector, &node, binop, &opcode,
+ cond, cont);
+ }
+ }
+
+ VisitWordCompare(selector, node, opcode, cont);
}
@@ -1576,6 +1928,13 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop(selector, node, kArmSub, kArmRsb, cont);
+ case IrOpcode::kInt32MulWithOverflow:
+ // ARM doesn't set the overflow flag for multiplication, so we
+ // need to test on kNotEqual. Here is the code sequence used:
+ // smull resultlow, resulthigh, left, right
+ // cmp resulthigh, Operand(resultlow, ASR, 31)
+ cont->OverwriteAndNegateIfEqual(kNotEqual);
+ return EmitInt32MulWithOverflow(selector, node, cont);
default:
break;
}
@@ -1616,7 +1975,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand, value_operand,
- cont->frame_state());
+ cont->reason(), cont->frame_state());
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
@@ -1633,14 +1992,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -1717,7 +2076,6 @@ void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
VisitBinop(this, node, kArmAdd, kArmAdd, &cont);
}
-
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
@@ -1727,6 +2085,18 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
VisitBinop(this, node, kArmSub, kArmRsb, &cont);
}
+void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ // ARM doesn't set the overflow flag for multiplication, so we need to test
+ // on kNotEqual. Here is the code sequence used:
+ // smull resultlow, resulthigh, left, right
+ // cmp resulthigh, Operand(resultlow, ASR, 31)
+ FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf);
+ return EmitInt32MulWithOverflow(this, node, &cont);
+ }
+ FlagsContinuation cont;
+ EmitInt32MulWithOverflow(this, node, &cont);
+}
void InstructionSelector::VisitFloat32Equal(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
@@ -1807,13 +2177,72 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(right));
}
+void InstructionSelector::VisitAtomicLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ ArmOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kAtomicLoadWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+}
+
+void InstructionSelector::VisitAtomicStore(Node* node) {
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ ArmOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kAtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kAtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kAtomicStoreWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ AddressingMode addressing_mode = kMode_Offset_RR;
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 0, nullptr, input_count, inputs);
+}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- MachineOperatorBuilder::Flags flags =
- MachineOperatorBuilder::kInt32DivIsSafe |
- MachineOperatorBuilder::kUint32DivIsSafe;
+ MachineOperatorBuilder::Flags flags;
+ if (CpuFeatures::IsSupported(SUDIV)) {
+ // The sdiv and udiv instructions correctly return 0 if the divisor is 0,
+ // but the fall-back implementation does not.
+ flags |= MachineOperatorBuilder::kInt32DivIsSafe |
+ MachineOperatorBuilder::kUint32DivIsSafe;
+ }
if (CpuFeatures::IsSupported(ARMv7)) {
flags |= MachineOperatorBuilder::kWord32ReverseBits;
}
@@ -1831,6 +2260,16 @@ InstructionSelector::SupportedMachineOperatorFlags() {
return flags;
}
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+ Vector<MachineType> req_aligned = Vector<MachineType>::New(2);
+ req_aligned[0] = MachineType::Float32();
+ req_aligned[1] = MachineType::Float64();
+ return MachineOperatorBuilder::AlignmentRequirements::
+ SomeUnalignedAccessUnsupported(req_aligned, req_aligned);
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/arm/unwinding-info-writer-arm.cc b/deps/v8/src/compiler/arm/unwinding-info-writer-arm.cc
new file mode 100644
index 0000000000..a950612190
--- /dev/null
+++ b/deps/v8/src/compiler/arm/unwinding-info-writer-arm.cc
@@ -0,0 +1,108 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/arm/unwinding-info-writer-arm.h"
+#include "src/compiler/instruction.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+void UnwindingInfoWriter::BeginInstructionBlock(int pc_offset,
+ const InstructionBlock* block) {
+ if (!enabled()) return;
+
+ block_will_exit_ = false;
+
+ DCHECK_LT(block->rpo_number().ToInt(), block_initial_states_.size());
+ const BlockInitialState* initial_state =
+ block_initial_states_[block->rpo_number().ToInt()];
+ if (initial_state) {
+ if (initial_state->saved_lr_ != saved_lr_) {
+ eh_frame_writer_.AdvanceLocation(pc_offset);
+ if (initial_state->saved_lr_) {
+ eh_frame_writer_.RecordRegisterSavedToStack(lr, kPointerSize);
+ } else {
+ eh_frame_writer_.RecordRegisterFollowsInitialRule(lr);
+ }
+ saved_lr_ = initial_state->saved_lr_;
+ }
+ } else {
+ // The entry block always lacks an explicit initial state.
+ // The exit block may lack an explicit state, if it is only reached by
+ // the block ending in a bx lr.
+ // All the other blocks must have an explicit initial state.
+ DCHECK(block->predecessors().empty() || block->successors().empty());
+ }
+}
+
+void UnwindingInfoWriter::EndInstructionBlock(const InstructionBlock* block) {
+ if (!enabled() || block_will_exit_) return;
+
+ for (const RpoNumber& successor : block->successors()) {
+ int successor_index = successor.ToInt();
+ DCHECK_LT(successor_index, block_initial_states_.size());
+ const BlockInitialState* existing_state =
+ block_initial_states_[successor_index];
+
+ // If we already had an entry for this BB, check that the values are the
+ // same we are trying to insert.
+ if (existing_state) {
+ DCHECK_EQ(existing_state->saved_lr_, saved_lr_);
+ } else {
+ block_initial_states_[successor_index] =
+ new (zone_) BlockInitialState(saved_lr_);
+ }
+ }
+}
+
+void UnwindingInfoWriter::MarkFrameConstructed(int at_pc) {
+ if (!enabled()) return;
+
+ // Regardless of the type of frame constructed, the relevant part of the
+ // layout is always the one in the diagram:
+ //
+ // | .... | higher addresses
+ // +----------+ ^
+ // | LR | | |
+ // +----------+ | |
+ // | saved FP | | |
+ // +----------+ <-- FP v
+ // | .... | stack growth
+ //
+ // The LR is pushed on the stack, and we can record this fact at the end of
+ // the construction, since the LR itself is not modified in the process.
+ eh_frame_writer_.AdvanceLocation(at_pc);
+ eh_frame_writer_.RecordRegisterSavedToStack(lr, kPointerSize);
+ saved_lr_ = true;
+}
+
+void UnwindingInfoWriter::MarkFrameDeconstructed(int at_pc) {
+ if (!enabled()) return;
+
+ // The lr is restored by the last operation in LeaveFrame().
+ eh_frame_writer_.AdvanceLocation(at_pc);
+ eh_frame_writer_.RecordRegisterFollowsInitialRule(lr);
+ saved_lr_ = false;
+}
+
+void UnwindingInfoWriter::MarkLinkRegisterOnTopOfStack(int pc_offset) {
+ if (!enabled()) return;
+
+ eh_frame_writer_.AdvanceLocation(pc_offset);
+ eh_frame_writer_.SetBaseAddressRegisterAndOffset(sp, 0);
+ eh_frame_writer_.RecordRegisterSavedToStack(lr, 0);
+}
+
+void UnwindingInfoWriter::MarkPopLinkRegisterFromTopOfStack(int pc_offset) {
+ if (!enabled()) return;
+
+ eh_frame_writer_.AdvanceLocation(pc_offset);
+ eh_frame_writer_.SetBaseAddressRegisterAndOffset(fp, 0);
+ eh_frame_writer_.RecordRegisterFollowsInitialRule(lr);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/arm/unwinding-info-writer-arm.h b/deps/v8/src/compiler/arm/unwinding-info-writer-arm.h
new file mode 100644
index 0000000000..d47ca083ae
--- /dev/null
+++ b/deps/v8/src/compiler/arm/unwinding-info-writer-arm.h
@@ -0,0 +1,72 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ARM_UNWINDING_INFO_WRITER_H_
+#define V8_COMPILER_ARM_UNWINDING_INFO_WRITER_H_
+
+#include "src/eh-frame.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class InstructionBlock;
+
+class UnwindingInfoWriter {
+ public:
+ explicit UnwindingInfoWriter(Zone* zone)
+ : zone_(zone),
+ eh_frame_writer_(zone),
+ saved_lr_(false),
+ block_will_exit_(false),
+ block_initial_states_(zone) {
+ if (enabled()) eh_frame_writer_.Initialize();
+ }
+
+ void SetNumberOfInstructionBlocks(int number) {
+ if (enabled()) block_initial_states_.resize(number);
+ }
+
+ void BeginInstructionBlock(int pc_offset, const InstructionBlock* block);
+ void EndInstructionBlock(const InstructionBlock* block);
+
+ void MarkLinkRegisterOnTopOfStack(int pc_offset);
+ void MarkPopLinkRegisterFromTopOfStack(int pc_offset);
+
+ void MarkFrameConstructed(int at_pc);
+ void MarkFrameDeconstructed(int at_pc);
+
+ void MarkBlockWillExit() { block_will_exit_ = true; }
+
+ void Finish(int code_size) {
+ if (enabled()) eh_frame_writer_.Finish(code_size);
+ }
+
+ EhFrameWriter* eh_frame_writer() {
+ return enabled() ? &eh_frame_writer_ : nullptr;
+ }
+
+ private:
+ bool enabled() const { return FLAG_perf_prof_unwinding_info; }
+
+ class BlockInitialState : public ZoneObject {
+ public:
+ explicit BlockInitialState(bool saved_lr) : saved_lr_(saved_lr) {}
+
+ bool saved_lr_;
+ };
+
+ Zone* zone_;
+ EhFrameWriter eh_frame_writer_;
+ bool saved_lr_;
+ bool block_will_exit_;
+
+ ZoneVector<const BlockInitialState*> block_initial_states_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index 456e7e7608..39c3314888 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -33,6 +33,24 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
return InputDoubleRegister(index);
}
+ CPURegister InputFloat32OrZeroRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) {
+ DCHECK(bit_cast<int32_t>(InputFloat32(index)) == 0);
+ return wzr;
+ }
+ DCHECK(instr_->InputAt(index)->IsFPRegister());
+ return InputDoubleRegister(index).S();
+ }
+
+ CPURegister InputFloat64OrZeroRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) {
+ DCHECK(bit_cast<int64_t>(InputDouble(index)) == 0);
+ return xzr;
+ }
+ DCHECK(instr_->InputAt(index)->IsDoubleRegister());
+ return InputDoubleRegister(index);
+ }
+
size_t OutputCount() { return instr_->OutputCount(); }
DoubleRegister OutputFloat32Register() { return OutputDoubleRegister().S(); }
@@ -101,6 +119,8 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
return Operand(InputRegister32(index), SXTB);
case kMode_Operand2_R_SXTH:
return Operand(InputRegister32(index), SXTH);
+ case kMode_Operand2_R_SXTW:
+ return Operand(InputRegister32(index), SXTW);
case kMode_MRI:
case kMode_MRR:
break;
@@ -129,6 +149,8 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
return Operand(InputRegister64(index), SXTB);
case kMode_Operand2_R_SXTH:
return Operand(InputRegister64(index), SXTH);
+ case kMode_Operand2_R_SXTW:
+ return Operand(InputRegister64(index), SXTW);
case kMode_MRI:
case kMode_MRR:
break;
@@ -141,7 +163,6 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
const size_t index = *first_index;
switch (AddressingModeField::decode(instr_->opcode())) {
case kMode_None:
- case kMode_Operand2_R_LSL_I:
case kMode_Operand2_R_LSR_I:
case kMode_Operand2_R_ASR_I:
case kMode_Operand2_R_ROR_I:
@@ -149,7 +170,12 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
case kMode_Operand2_R_UXTH:
case kMode_Operand2_R_SXTB:
case kMode_Operand2_R_SXTH:
+ case kMode_Operand2_R_SXTW:
break;
+ case kMode_Operand2_R_LSL_I:
+ *first_index += 3;
+ return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
+ LSL, InputInt32(index + 2));
case kMode_MRI:
*first_index += 2;
return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
@@ -183,9 +209,19 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
Constant constant = ToConstant(operand);
switch (constant.type()) {
case Constant::kInt32:
- return Operand(constant.ToInt32());
+ if (constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ return Operand(constant.ToInt32(), constant.rmode());
+ } else {
+ return Operand(constant.ToInt32());
+ }
case Constant::kInt64:
- return Operand(constant.ToInt64());
+ if (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+ return Operand(constant.ToInt64(), constant.rmode());
+ } else {
+ DCHECK(constant.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ return Operand(constant.ToInt64());
+ }
case Constant::kFloat32:
return Operand(
isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
@@ -206,7 +242,7 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
DCHECK_NOT_NULL(op);
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
return SlotToMemOperand(AllocatedOperand::cast(op)->index(), masm);
}
@@ -272,7 +308,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
public:
OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand index,
Register value, Register scratch0, Register scratch1,
- RecordWriteMode mode)
+ RecordWriteMode mode,
+ UnwindingInfoWriter* unwinding_info_writer)
: OutOfLineCode(gen),
object_(object),
index_(index),
@@ -280,7 +317,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
- must_save_lr_(!gen->frame_access_state()->has_frame()) {}
+ must_save_lr_(!gen->frame_access_state()->has_frame()),
+ unwinding_info_writer_(unwinding_info_writer) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -297,6 +335,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
if (must_save_lr_) {
// We need to save and restore lr if the frame was elided.
__ Push(lr);
+ unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(),
+ __ StackPointer());
}
RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
remembered_set_action, save_fp_mode);
@@ -304,6 +344,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ CallStub(&stub);
if (must_save_lr_) {
__ Pop(lr);
+ unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
}
}
@@ -315,6 +356,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch1_;
RecordWriteMode const mode_;
bool must_save_lr_;
+ UnwindingInfoWriter* const unwinding_info_writer_;
};
@@ -363,6 +405,10 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
case kUnorderedEqual:
case kUnorderedNotEqual:
break;
+ case kPositiveOrZero:
+ return pl;
+ case kNegative:
+ return mi;
}
UNREACHABLE();
return nv;
@@ -370,6 +416,17 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} // namespace
+#define ASSEMBLE_BOUNDS_CHECK(offset, length, out_of_bounds) \
+ do { \
+ if (length.IsImmediate() && \
+ base::bits::IsPowerOfTwo64(length.ImmediateValue())) { \
+ __ Tst(offset, ~(length.ImmediateValue() - 1)); \
+ __ B(ne, out_of_bounds); \
+ } else { \
+ __ Cmp(offset, length); \
+ __ B(hs, out_of_bounds); \
+ } \
+ } while (0)
#define ASSEMBLE_CHECKED_LOAD_FLOAT(width) \
do { \
@@ -377,84 +434,72 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
auto buffer = i.InputRegister(0); \
auto offset = i.InputRegister32(1); \
auto length = i.InputOperand32(2); \
- __ Cmp(offset, length); \
auto ool = new (zone()) OutOfLineLoadNaN##width(this, result); \
- __ B(hs, ool->entry()); \
+ ASSEMBLE_BOUNDS_CHECK(offset, length, ool->entry()); \
__ Ldr(result, MemOperand(buffer, offset, UXTW)); \
__ Bind(ool->exit()); \
} while (0)
-
#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
do { \
auto result = i.OutputRegister32(); \
auto buffer = i.InputRegister(0); \
auto offset = i.InputRegister32(1); \
auto length = i.InputOperand32(2); \
- __ Cmp(offset, length); \
auto ool = new (zone()) OutOfLineLoadZero(this, result); \
- __ B(hs, ool->entry()); \
+ ASSEMBLE_BOUNDS_CHECK(offset, length, ool->entry()); \
__ asm_instr(result, MemOperand(buffer, offset, UXTW)); \
__ Bind(ool->exit()); \
} while (0)
-
#define ASSEMBLE_CHECKED_LOAD_INTEGER_64(asm_instr) \
do { \
auto result = i.OutputRegister(); \
auto buffer = i.InputRegister(0); \
auto offset = i.InputRegister32(1); \
auto length = i.InputOperand32(2); \
- __ Cmp(offset, length); \
auto ool = new (zone()) OutOfLineLoadZero(this, result); \
- __ B(hs, ool->entry()); \
+ ASSEMBLE_BOUNDS_CHECK(offset, length, ool->entry()); \
__ asm_instr(result, MemOperand(buffer, offset, UXTW)); \
__ Bind(ool->exit()); \
} while (0)
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT(width) \
- do { \
- auto buffer = i.InputRegister(0); \
- auto offset = i.InputRegister32(1); \
- auto length = i.InputOperand32(2); \
- auto value = i.InputFloat##width##Register(3); \
- __ Cmp(offset, length); \
- Label done; \
- __ B(hs, &done); \
- __ Str(value, MemOperand(buffer, offset, UXTW)); \
- __ Bind(&done); \
+#define ASSEMBLE_CHECKED_STORE_FLOAT(width) \
+ do { \
+ auto buffer = i.InputRegister(0); \
+ auto offset = i.InputRegister32(1); \
+ auto length = i.InputOperand32(2); \
+ auto value = i.InputFloat##width##OrZeroRegister(3); \
+ Label done; \
+ ASSEMBLE_BOUNDS_CHECK(offset, length, &done); \
+ __ Str(value, MemOperand(buffer, offset, UXTW)); \
+ __ Bind(&done); \
} while (0)
-
#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
do { \
auto buffer = i.InputRegister(0); \
auto offset = i.InputRegister32(1); \
auto length = i.InputOperand32(2); \
- auto value = i.InputRegister32(3); \
- __ Cmp(offset, length); \
+ auto value = i.InputOrZeroRegister32(3); \
Label done; \
- __ B(hs, &done); \
+ ASSEMBLE_BOUNDS_CHECK(offset, length, &done); \
__ asm_instr(value, MemOperand(buffer, offset, UXTW)); \
__ Bind(&done); \
} while (0)
-
#define ASSEMBLE_CHECKED_STORE_INTEGER_64(asm_instr) \
do { \
auto buffer = i.InputRegister(0); \
auto offset = i.InputRegister32(1); \
auto length = i.InputOperand32(2); \
- auto value = i.InputRegister(3); \
- __ Cmp(offset, length); \
+ auto value = i.InputOrZeroRegister64(3); \
Label done; \
- __ B(hs, &done); \
+ ASSEMBLE_BOUNDS_CHECK(offset, length, &done); \
__ asm_instr(value, MemOperand(buffer, offset, UXTW)); \
__ Bind(&done); \
} while (0)
-
#define ASSEMBLE_SHIFT(asm_instr, width) \
do { \
if (instr->InputAt(1)->IsRegister()) { \
@@ -468,6 +513,35 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} \
} while (0)
+#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
+ do { \
+ __ asm_instr(i.OutputRegister(), \
+ MemOperand(i.InputRegister(0), i.InputRegister(1))); \
+ __ Dmb(InnerShareable, BarrierAll); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
+ do { \
+ __ Dmb(InnerShareable, BarrierAll); \
+ __ asm_instr(i.InputRegister(2), \
+ MemOperand(i.InputRegister(0), i.InputRegister(1))); \
+ __ Dmb(InnerShareable, BarrierAll); \
+ } while (0)
+
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 0, 2); \
+ } while (0)
+
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 0, 1); \
+ } while (0)
+
void CodeGenerator::AssembleDeconstructFrame() {
const CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (descriptor->IsCFunctionCall() || descriptor->UseNativeStack()) {
@@ -476,23 +550,11 @@ void CodeGenerator::AssembleDeconstructFrame() {
__ Mov(jssp, fp);
}
__ Pop(fp, lr);
-}
-void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
- int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
- if (sp_slot_delta > 0) {
- __ Drop(sp_slot_delta);
- }
- frame_access_state()->SetFrameAccessToDefault();
+ unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
}
-
-void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
- int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
- if (sp_slot_delta < 0) {
- __ Claim(-sp_slot_delta);
- frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
- }
+void CodeGenerator::AssemblePrepareTailCall() {
if (frame_access_state()->has_frame()) {
__ Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
__ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -525,8 +587,41 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
__ bind(&done);
}
+namespace {
+
+void AdjustStackPointerForTailCall(MacroAssembler* masm,
+ FrameAccessState* state,
+ int new_slot_above_sp,
+ bool allow_shrinkage = true) {
+ int current_sp_offset = state->GetSPToFPSlotCount() +
+ StandardFrameConstants::kFixedSlotCountAboveFp;
+ int stack_slot_delta = new_slot_above_sp - current_sp_offset;
+ if (stack_slot_delta > 0) {
+ masm->Claim(stack_slot_delta);
+ state->IncreaseSPDelta(stack_slot_delta);
+ } else if (allow_shrinkage && stack_slot_delta < 0) {
+ masm->Drop(-stack_slot_delta);
+ state->IncreaseSPDelta(stack_slot_delta);
+ }
+}
+
+} // namespace
+
+void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
+ int first_unused_stack_slot) {
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ first_unused_stack_slot, false);
+}
+
+void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
+ int first_unused_stack_slot) {
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ first_unused_stack_slot);
+}
+
// Assembles an instruction after register allocation, producing machine code.
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+ Instruction* instr) {
Arm64OperandConverter i(this, instr);
InstructionCode opcode = instr->opcode();
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
@@ -559,8 +654,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
- int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
- AssembleDeconstructActivationRecord(stack_param_delta);
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
@@ -574,7 +667,17 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
__ Jump(target);
}
+ unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+ case kArchTailCallAddress: {
+ CHECK(!instr->InputAt(0)->IsImmediate());
+ __ Jump(i.InputRegister(0));
+ unwinding_info_writer_.MarkBlockWillExit();
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
break;
}
case kArchCallJSFunction: {
@@ -617,8 +720,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ cmp(cp, temp);
__ Assert(eq, kWrongFunctionContext);
}
- int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
- AssembleDeconstructActivationRecord(stack_param_delta);
if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
@@ -627,6 +728,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(x10);
frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
break;
}
case kArchPrepareCallCFunction:
@@ -636,7 +738,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
UNREACHABLE();
break;
case kArchPrepareTailCall:
- AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ AssemblePrepareTailCall();
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
@@ -661,6 +763,17 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchLookupSwitch:
AssembleArchLookupSwitch(instr);
break;
+ case kArchDebugBreak:
+ __ Debug("kArchDebugBreak", 0, BREAK);
+ break;
+ case kArchImpossible:
+ __ Abort(kConversionFromImpossibleValue);
+ break;
+ case kArchComment: {
+ Address comment_string = i.InputExternalReference(0).address();
+ __ RecordComment(reinterpret_cast<const char*>(comment_string));
+ break;
+ }
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
@@ -670,7 +783,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result =
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ if (result != kSuccess) return result;
break;
}
case kArchRet:
@@ -708,8 +823,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Register value = i.InputRegister(2);
Register scratch0 = i.TempRegister(0);
Register scratch1 = i.TempRegister(1);
- auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
- scratch0, scratch1, mode);
+ auto ool = new (zone())
+ OutOfLineRecordWrite(this, object, index, value, scratch0, scratch1,
+ mode, &unwinding_info_writer_);
__ Str(value, MemOperand(object, index));
__ CheckPageFlagSet(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask,
@@ -729,6 +845,71 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Add(i.OutputRegister(0), base, Operand(offset.offset()));
break;
}
+ case kIeee754Float64Acos:
+ ASSEMBLE_IEEE754_UNOP(acos);
+ break;
+ case kIeee754Float64Acosh:
+ ASSEMBLE_IEEE754_UNOP(acosh);
+ break;
+ case kIeee754Float64Asin:
+ ASSEMBLE_IEEE754_UNOP(asin);
+ break;
+ case kIeee754Float64Asinh:
+ ASSEMBLE_IEEE754_UNOP(asinh);
+ break;
+ case kIeee754Float64Atan:
+ ASSEMBLE_IEEE754_UNOP(atan);
+ break;
+ case kIeee754Float64Atanh:
+ ASSEMBLE_IEEE754_UNOP(atanh);
+ break;
+ case kIeee754Float64Atan2:
+ ASSEMBLE_IEEE754_BINOP(atan2);
+ break;
+ case kIeee754Float64Cos:
+ ASSEMBLE_IEEE754_UNOP(cos);
+ break;
+ case kIeee754Float64Cosh:
+ ASSEMBLE_IEEE754_UNOP(cosh);
+ break;
+ case kIeee754Float64Cbrt:
+ ASSEMBLE_IEEE754_UNOP(cbrt);
+ break;
+ case kIeee754Float64Exp:
+ ASSEMBLE_IEEE754_UNOP(exp);
+ break;
+ case kIeee754Float64Expm1:
+ ASSEMBLE_IEEE754_UNOP(expm1);
+ break;
+ case kIeee754Float64Log:
+ ASSEMBLE_IEEE754_UNOP(log);
+ break;
+ case kIeee754Float64Log1p:
+ ASSEMBLE_IEEE754_UNOP(log1p);
+ break;
+ case kIeee754Float64Log2:
+ ASSEMBLE_IEEE754_UNOP(log2);
+ break;
+ case kIeee754Float64Log10:
+ ASSEMBLE_IEEE754_UNOP(log10);
+ break;
+ case kIeee754Float64Pow: {
+ MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+ __ CallStub(&stub);
+ break;
+ }
+ case kIeee754Float64Sin:
+ ASSEMBLE_IEEE754_UNOP(sin);
+ break;
+ case kIeee754Float64Sinh:
+ ASSEMBLE_IEEE754_UNOP(sinh);
+ break;
+ case kIeee754Float64Tan:
+ ASSEMBLE_IEEE754_UNOP(tan);
+ break;
+ case kIeee754Float64Tanh:
+ ASSEMBLE_IEEE754_UNOP(tanh);
+ break;
case kArm64Float32RoundDown:
__ Frintm(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
@@ -775,12 +956,34 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
case kArm64And:
- __ And(i.OutputRegister(), i.InputOrZeroRegister64(0),
- i.InputOperand2_64(1));
+ if (FlagsModeField::decode(opcode) != kFlags_none) {
+ // The ands instruction only sets N and Z, so only the following
+ // conditions make sense.
+ DCHECK(FlagsConditionField::decode(opcode) == kEqual ||
+ FlagsConditionField::decode(opcode) == kNotEqual ||
+ FlagsConditionField::decode(opcode) == kPositiveOrZero ||
+ FlagsConditionField::decode(opcode) == kNegative);
+ __ Ands(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
+ } else {
+ __ And(i.OutputRegister(), i.InputOrZeroRegister64(0),
+ i.InputOperand2_64(1));
+ }
break;
case kArm64And32:
- __ And(i.OutputRegister32(), i.InputOrZeroRegister32(0),
- i.InputOperand2_32(1));
+ if (FlagsModeField::decode(opcode) != kFlags_none) {
+ // The ands instruction only sets N and Z, so only the following
+ // conditions make sense.
+ DCHECK(FlagsConditionField::decode(opcode) == kEqual ||
+ FlagsConditionField::decode(opcode) == kNotEqual ||
+ FlagsConditionField::decode(opcode) == kPositiveOrZero ||
+ FlagsConditionField::decode(opcode) == kNegative);
+ __ Ands(i.OutputRegister32(), i.InputOrZeroRegister32(0),
+ i.InputOperand2_32(1));
+ } else {
+ __ And(i.OutputRegister32(), i.InputOrZeroRegister32(0),
+ i.InputOperand2_32(1));
+ }
break;
case kArm64Bic:
__ Bic(i.OutputRegister(), i.InputOrZeroRegister64(0),
@@ -983,6 +1186,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
// Pseudo instructions turned into tbz/tbnz in AssembleArchBranch.
break;
case kArm64CompareAndBranch32:
+ case kArm64CompareAndBranch:
// Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
break;
case kArm64ClaimCSP: {
@@ -1038,7 +1242,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Register prev = __ StackPointer();
__ SetStackPointer(arch_opcode == kArm64PokeCSP ? csp : jssp);
Operand operand(i.InputInt32(1) * kPointerSize);
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ Poke(i.InputFloat64Register(0), operand);
} else {
__ Poke(i.InputRegister(0), operand);
@@ -1048,7 +1252,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
case kArm64PokePair: {
int slot = i.InputInt32(2) - 1;
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ PokePair(i.InputFloat64Register(1), i.InputFloat64Register(0),
slot * kPointerSize);
} else {
@@ -1070,25 +1274,25 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Rbit(i.OutputRegister32(), i.InputRegister32(0));
break;
case kArm64Cmp:
- __ Cmp(i.InputOrZeroRegister64(0), i.InputOperand(1));
+ __ Cmp(i.InputOrZeroRegister64(0), i.InputOperand2_64(1));
break;
case kArm64Cmp32:
__ Cmp(i.InputOrZeroRegister32(0), i.InputOperand2_32(1));
break;
case kArm64Cmn:
- __ Cmn(i.InputOrZeroRegister64(0), i.InputOperand(1));
+ __ Cmn(i.InputOrZeroRegister64(0), i.InputOperand2_64(1));
break;
case kArm64Cmn32:
__ Cmn(i.InputOrZeroRegister32(0), i.InputOperand2_32(1));
break;
case kArm64Tst:
- __ Tst(i.InputRegister(0), i.InputOperand(1));
+ __ Tst(i.InputOrZeroRegister64(0), i.InputOperand(1));
break;
case kArm64Tst32:
- __ Tst(i.InputRegister32(0), i.InputOperand32(1));
+ __ Tst(i.InputOrZeroRegister32(0), i.InputOperand32(1));
break;
case kArm64Float32Cmp:
- if (instr->InputAt(1)->IsDoubleRegister()) {
+ if (instr->InputAt(1)->IsFPRegister()) {
__ Fcmp(i.InputFloat32Register(0), i.InputFloat32Register(1));
} else {
DCHECK(instr->InputAt(1)->IsImmediate());
@@ -1113,26 +1317,17 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Fdiv(i.OutputFloat32Register(), i.InputFloat32Register(0),
i.InputFloat32Register(1));
break;
- case kArm64Float32Max:
- // (b < a) ? a : b
- __ Fcmp(i.InputFloat32Register(1), i.InputFloat32Register(0));
- __ Fcsel(i.OutputFloat32Register(), i.InputFloat32Register(0),
- i.InputFloat32Register(1), lo);
- break;
- case kArm64Float32Min:
- // (a < b) ? a : b
- __ Fcmp(i.InputFloat32Register(0), i.InputFloat32Register(1));
- __ Fcsel(i.OutputFloat32Register(), i.InputFloat32Register(0),
- i.InputFloat32Register(1), lo);
- break;
case kArm64Float32Abs:
__ Fabs(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
+ case kArm64Float32Neg:
+ __ Fneg(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
case kArm64Float32Sqrt:
__ Fsqrt(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
case kArm64Float64Cmp:
- if (instr->InputAt(1)->IsDoubleRegister()) {
+ if (instr->InputAt(1)->IsFPRegister()) {
__ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
DCHECK(instr->InputAt(1)->IsImmediate());
@@ -1168,18 +1363,26 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
0, 2);
break;
}
- case kArm64Float64Max:
- // (b < a) ? a : b
- __ Fcmp(i.InputDoubleRegister(1), i.InputDoubleRegister(0));
- __ Fcsel(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1), lo);
- break;
- case kArm64Float64Min:
- // (a < b) ? a : b
- __ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
- __ Fcsel(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1), lo);
+ case kArm64Float32Max: {
+ __ Fmax(i.OutputFloat32Register(), i.InputFloat32Register(0),
+ i.InputFloat32Register(1));
+ break;
+ }
+ case kArm64Float64Max: {
+ __ Fmax(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ }
+ case kArm64Float32Min: {
+ __ Fmin(i.OutputFloat32Register(), i.InputFloat32Register(0),
+ i.InputFloat32Register(1));
+ break;
+ }
+ case kArm64Float64Min: {
+ __ Fmin(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
break;
+ }
case kArm64Float64Abs:
__ Fabs(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
@@ -1197,12 +1400,21 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
case kArm64Float32ToInt32:
__ Fcvtzs(i.OutputRegister32(), i.InputFloat32Register(0));
+ // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
+ // because INT32_MIN allows easier out-of-bounds detection.
+ __ Cmn(i.OutputRegister32(), 1);
+ __ Csinc(i.OutputRegister32(), i.OutputRegister32(), i.OutputRegister32(),
+ vc);
break;
case kArm64Float64ToInt32:
__ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0));
break;
case kArm64Float32ToUint32:
__ Fcvtzu(i.OutputRegister32(), i.InputFloat32Register(0));
+ // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
+ // because 0 allows easier out-of-bounds detection.
+ __ Cmn(i.OutputRegister32(), 1);
+ __ Adc(i.OutputRegister32(), i.OutputRegister32(), Operand(0));
break;
case kArm64Float64ToUint32:
__ Fcvtzu(i.OutputRegister32(), i.InputDoubleRegister(0));
@@ -1305,6 +1517,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64Float64MoveU64:
__ Fmov(i.OutputFloat64Register(), i.InputRegister(0));
break;
+ case kArm64Float64SilenceNaN:
+ __ CanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
case kArm64U64MoveFloat64:
__ Fmov(i.OutputRegister(), i.InputDoubleRegister(0));
break;
@@ -1315,7 +1530,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Ldrsb(i.OutputRegister(), i.MemoryOperand());
break;
case kArm64Strb:
- __ Strb(i.InputRegister(2), i.MemoryOperand());
+ __ Strb(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
case kArm64Ldrh:
__ Ldrh(i.OutputRegister(), i.MemoryOperand());
@@ -1324,31 +1539,34 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Ldrsh(i.OutputRegister(), i.MemoryOperand());
break;
case kArm64Strh:
- __ Strh(i.InputRegister(2), i.MemoryOperand());
+ __ Strh(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
+ break;
+ case kArm64Ldrsw:
+ __ Ldrsw(i.OutputRegister(), i.MemoryOperand());
break;
case kArm64LdrW:
__ Ldr(i.OutputRegister32(), i.MemoryOperand());
break;
case kArm64StrW:
- __ Str(i.InputRegister32(2), i.MemoryOperand());
+ __ Str(i.InputOrZeroRegister32(0), i.MemoryOperand(1));
break;
case kArm64Ldr:
__ Ldr(i.OutputRegister(), i.MemoryOperand());
break;
case kArm64Str:
- __ Str(i.InputRegister(2), i.MemoryOperand());
+ __ Str(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
case kArm64LdrS:
__ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand());
break;
case kArm64StrS:
- __ Str(i.InputDoubleRegister(2).S(), i.MemoryOperand());
+ __ Str(i.InputFloat32OrZeroRegister(0), i.MemoryOperand(1));
break;
case kArm64LdrD:
__ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
break;
case kArm64StrD:
- __ Str(i.InputDoubleRegister(2), i.MemoryOperand());
+ __ Str(i.InputFloat64OrZeroRegister(0), i.MemoryOperand(1));
break;
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsb);
@@ -1392,7 +1610,37 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kCheckedStoreFloat64:
ASSEMBLE_CHECKED_STORE_FLOAT(64);
break;
+ case kAtomicLoadInt8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldrsb);
+ break;
+ case kAtomicLoadUint8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldrb);
+ break;
+ case kAtomicLoadInt16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldrsh);
+ break;
+ case kAtomicLoadUint16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldrh);
+ break;
+ case kAtomicLoadWord32:
+ __ Ldr(i.OutputRegister32(),
+ MemOperand(i.InputRegister(0), i.InputRegister(1)));
+ __ Dmb(InnerShareable, BarrierAll);
+ break;
+ case kAtomicStoreWord8:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(Strb);
+ break;
+ case kAtomicStoreWord16:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(Strh);
+ break;
+ case kAtomicStoreWord32:
+ __ Dmb(InnerShareable, BarrierAll);
+ __ Str(i.InputRegister32(2),
+ MemOperand(i.InputRegister(0), i.InputRegister(1)));
+ __ Dmb(InnerShareable, BarrierAll);
+ break;
}
+ return kSuccess;
} // NOLINT(readability/fn_size)
@@ -1415,6 +1663,17 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
default:
UNREACHABLE();
}
+ } else if (opcode == kArm64CompareAndBranch) {
+ switch (condition) {
+ case kEqual:
+ __ Cbz(i.InputRegister64(0), tlabel);
+ break;
+ case kNotEqual:
+ __ Cbnz(i.InputRegister64(0), tlabel);
+ break;
+ default:
+ UNREACHABLE();
+ }
} else if (opcode == kArm64TestAndBranch32) {
switch (condition) {
case kEqual:
@@ -1495,30 +1754,52 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
__ EndBlockPools();
}
-
-void CodeGenerator::AssembleDeoptimizerCall(
+CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
+ if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
+ DeoptimizeReason deoptimization_reason =
+ GetDeoptimizationReason(deoptimization_id);
+ __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ return kSuccess;
}
-void CodeGenerator::AssembleSetupStackPointer() {
- const CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+void CodeGenerator::FinishFrame(Frame* frame) {
+ frame->AlignFrame(16);
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+
if (descriptor->UseNativeStack() || descriptor->IsCFunctionCall()) {
__ SetStackPointer(csp);
} else {
__ SetStackPointer(jssp);
}
+
+ // Save FP registers.
+ CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
+ descriptor->CalleeSavedFPRegisters());
+ int saved_count = saves_fp.Count();
+ if (saved_count != 0) {
+ DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedFP().list());
+ frame->AllocateSavedCalleeRegisterSlots(saved_count *
+ (kDoubleSize / kPointerSize));
+ }
+
+ CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
+ descriptor->CalleeSavedRegisters());
+ saved_count = saves.Count();
+ if (saved_count != 0) {
+ frame->AllocateSavedCalleeRegisterSlots(saved_count);
+ }
}
-void CodeGenerator::AssemblePrologue() {
+void CodeGenerator::AssembleConstructFrame() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (descriptor->UseNativeStack()) {
__ AssertCspAligned();
}
- int stack_shrink_slots = frame()->GetSpillSlotCount();
if (frame_access_state()->has_frame()) {
if (descriptor->IsJSFunctionCall()) {
DCHECK(!descriptor->UseNativeStack());
@@ -1527,14 +1808,20 @@ void CodeGenerator::AssemblePrologue() {
if (descriptor->IsCFunctionCall()) {
__ Push(lr, fp);
__ Mov(fp, masm_.StackPointer());
- __ Claim(stack_shrink_slots);
+ __ Claim(frame()->GetSpillSlotCount());
} else {
__ StubPrologue(info()->GetOutputStackFrameType(),
frame()->GetTotalFrameSlotCount());
}
}
+
+ if (!info()->GeneratePreagedPrologue()) {
+ unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
+ }
}
+ int shrink_slots = frame()->GetSpillSlotCount();
+
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1545,11 +1832,11 @@ void CodeGenerator::AssemblePrologue() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
if (descriptor->IsJSFunctionCall()) {
- __ Claim(stack_shrink_slots);
+ __ Claim(shrink_slots);
}
// Save FP registers.
@@ -1559,8 +1846,6 @@ void CodeGenerator::AssemblePrologue() {
if (saved_count != 0) {
DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedFP().list());
__ PushCPURegList(saves_fp);
- frame()->AllocateSavedCalleeRegisterSlots(saved_count *
- (kDoubleSize / kPointerSize));
}
// Save registers.
// TODO(palfia): TF save list is not in sync with
@@ -1571,7 +1856,6 @@ void CodeGenerator::AssemblePrologue() {
saved_count = saves.Count();
if (saved_count != 0) {
__ PushCPURegList(saves);
- frame()->AllocateSavedCalleeRegisterSlots(saved_count);
}
}
@@ -1593,6 +1877,8 @@ void CodeGenerator::AssembleReturn() {
__ PopCPURegList(saves_fp);
}
+ unwinding_info_writer_.MarkBlockWillExit();
+
int pop_count = static_cast<int>(descriptor->StackParameterCount());
if (descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
@@ -1668,11 +1954,11 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ Str(dst, g.ToMemOperand(destination, masm()));
}
} else if (src.type() == Constant::kFloat32) {
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
FPRegister dst = g.ToDoubleRegister(destination).S();
__ Fmov(dst, src.ToFloat32());
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
UseScratchRegisterScope scope(masm());
FPRegister temp = scope.AcquireS();
__ Fmov(temp, src.ToFloat32());
@@ -1680,30 +1966,30 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
} else {
DCHECK_EQ(Constant::kFloat64, src.type());
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
FPRegister dst = g.ToDoubleRegister(destination);
__ Fmov(dst, src.ToFloat64());
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
UseScratchRegisterScope scope(masm());
FPRegister temp = scope.AcquireD();
__ Fmov(temp, src.ToFloat64());
__ Str(temp, g.ToMemOperand(destination, masm()));
}
}
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
FPRegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
FPRegister dst = g.ToDoubleRegister(destination);
__ Fmov(dst, src);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
__ Str(src, g.ToMemOperand(destination, masm()));
}
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
MemOperand src = g.ToMemOperand(source, masm());
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
__ Ldr(g.ToDoubleRegister(destination), src);
} else {
UseScratchRegisterScope scope(masm());
@@ -1739,7 +2025,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ Ldr(src, dst);
__ Str(temp, dst);
}
- } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
+ } else if (source->IsStackSlot() || source->IsFPStackSlot()) {
UseScratchRegisterScope scope(masm());
DoubleRegister temp_0 = scope.AcquireD();
DoubleRegister temp_1 = scope.AcquireD();
@@ -1749,17 +2035,17 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ Ldr(temp_1, dst);
__ Str(temp_0, dst);
__ Str(temp_1, src);
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
UseScratchRegisterScope scope(masm());
FPRegister temp = scope.AcquireD();
FPRegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
FPRegister dst = g.ToDoubleRegister(destination);
__ Fmov(temp, src);
__ Fmov(src, dst);
__ Fmov(dst, temp);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination, masm());
__ Fmov(temp, src);
__ Ldr(src, dst);
@@ -1778,9 +2064,6 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
}
-void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); }
-
-
void CodeGenerator::EnsureSpaceForLazyDeopt() {
if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
return;
diff --git a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
index f03c2fb436..898a9e9b35 100644
--- a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
@@ -78,6 +78,7 @@ namespace compiler {
V(Arm64TestAndBranch32) \
V(Arm64TestAndBranch) \
V(Arm64CompareAndBranch32) \
+ V(Arm64CompareAndBranch) \
V(Arm64ClaimCSP) \
V(Arm64ClaimJSSP) \
V(Arm64PokeCSP) \
@@ -88,11 +89,12 @@ namespace compiler {
V(Arm64Float32Sub) \
V(Arm64Float32Mul) \
V(Arm64Float32Div) \
- V(Arm64Float32Max) \
- V(Arm64Float32Min) \
V(Arm64Float32Abs) \
+ V(Arm64Float32Neg) \
V(Arm64Float32Sqrt) \
V(Arm64Float32RoundDown) \
+ V(Arm64Float32Max) \
+ V(Arm64Float32Min) \
V(Arm64Float64Cmp) \
V(Arm64Float64Add) \
V(Arm64Float64Sub) \
@@ -112,6 +114,7 @@ namespace compiler {
V(Arm64Float64RoundTruncate) \
V(Arm64Float32RoundTiesEven) \
V(Arm64Float64RoundTiesEven) \
+ V(Arm64Float64SilenceNaN) \
V(Arm64Float32ToFloat64) \
V(Arm64Float64ToFloat32) \
V(Arm64Float32ToInt32) \
@@ -146,6 +149,7 @@ namespace compiler {
V(Arm64Ldrh) \
V(Arm64Ldrsh) \
V(Arm64Strh) \
+ V(Arm64Ldrsw) \
V(Arm64LdrW) \
V(Arm64StrW) \
V(Arm64Ldr) \
@@ -174,7 +178,8 @@ namespace compiler {
V(Operand2_R_UXTB) /* %r0 UXTB (unsigned extend byte) */ \
V(Operand2_R_UXTH) /* %r0 UXTH (unsigned extend halfword) */ \
V(Operand2_R_SXTB) /* %r0 SXTB (signed extend byte) */ \
- V(Operand2_R_SXTH) /* %r0 SXTH (signed extend halfword) */
+ V(Operand2_R_SXTH) /* %r0 SXTH (signed extend halfword) */ \
+ V(Operand2_R_SXTW) /* %r0 SXTW (signed extend word) */
enum ResetJSSPAfterCall { kNoResetJSSP, kResetJSSP };
diff --git a/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
index ca372993b8..d3504dfd22 100644
--- a/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
@@ -82,11 +82,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Float32Sub:
case kArm64Float32Mul:
case kArm64Float32Div:
- case kArm64Float32Max:
- case kArm64Float32Min:
case kArm64Float32Abs:
+ case kArm64Float32Neg:
case kArm64Float32Sqrt:
case kArm64Float32RoundDown:
+ case kArm64Float32Max:
+ case kArm64Float32Min:
case kArm64Float64Cmp:
case kArm64Float64Add:
case kArm64Float64Sub:
@@ -130,11 +131,13 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Float64InsertHighWord32:
case kArm64Float64MoveU64:
case kArm64U64MoveFloat64:
+ case kArm64Float64SilenceNaN:
return kNoOpcodeFlags;
case kArm64TestAndBranch32:
case kArm64TestAndBranch:
case kArm64CompareAndBranch32:
+ case kArm64CompareAndBranch:
return kIsBlockTerminator;
case kArm64LdrS:
@@ -143,6 +146,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Ldrsb:
case kArm64Ldrh:
case kArm64Ldrsh:
+ case kArm64Ldrsw:
case kArm64LdrW:
case kArm64Ldr:
return kIsLoadOperation;
@@ -176,23 +180,46 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
// Basic latency modeling for arm64 instructions. They have been determined
// in an empirical way.
switch (instr->arch_opcode()) {
- case kArm64Float32ToFloat64:
- case kArm64Float64ToFloat32:
- case kArm64Float64ToInt32:
- case kArm64Float64ToUint32:
- case kArm64Int32ToFloat64:
- case kArm64Uint32ToFloat64:
- return 3;
-
- case kArm64Float64Add:
- case kArm64Float64Sub:
- return 2;
-
- case kArm64Float64Mul:
- return 3;
+ case kArm64Add:
+ case kArm64Add32:
+ case kArm64And:
+ case kArm64And32:
+ case kArm64Bic:
+ case kArm64Bic32:
+ case kArm64Cmn:
+ case kArm64Cmn32:
+ case kArm64Cmp:
+ case kArm64Cmp32:
+ case kArm64Eon:
+ case kArm64Eon32:
+ case kArm64Eor:
+ case kArm64Eor32:
+ case kArm64Not:
+ case kArm64Not32:
+ case kArm64Or:
+ case kArm64Or32:
+ case kArm64Orn:
+ case kArm64Orn32:
+ case kArm64Sub:
+ case kArm64Sub32:
+ case kArm64Tst:
+ case kArm64Tst32:
+ if (instr->addressing_mode() != kMode_None) {
+ return 3;
+ } else {
+ return 1;
+ }
- case kArm64Float64Div:
- return 6;
+ case kArm64Clz:
+ case kArm64Clz32:
+ case kArm64Sbfx32:
+ case kArm64Sxtb32:
+ case kArm64Sxth32:
+ case kArm64Sxtw:
+ case kArm64Ubfiz32:
+ case kArm64Ubfx:
+ case kArm64Ubfx32:
+ return 1;
case kArm64Lsl:
case kArm64Lsl32:
@@ -202,7 +229,18 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
case kArm64Asr32:
case kArm64Ror:
case kArm64Ror32:
- return 3;
+ return 1;
+
+ case kArm64Ldr:
+ case kArm64LdrD:
+ case kArm64LdrS:
+ case kArm64LdrW:
+ case kArm64Ldrb:
+ case kArm64Ldrh:
+ case kArm64Ldrsb:
+ case kArm64Ldrsh:
+ case kArm64Ldrsw:
+ return 11;
case kCheckedLoadInt8:
case kCheckedLoadUint8:
@@ -212,18 +250,95 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
case kCheckedLoadWord64:
case kCheckedLoadFloat32:
case kCheckedLoadFloat64:
- case kArm64LdrS:
- case kArm64LdrD:
- case kArm64Ldrb:
- case kArm64Ldrsb:
- case kArm64Ldrh:
- case kArm64Ldrsh:
- case kArm64LdrW:
- case kArm64Ldr:
return 5;
- default:
+ case kArm64Str:
+ case kArm64StrD:
+ case kArm64StrS:
+ case kArm64StrW:
+ case kArm64Strb:
+ case kArm64Strh:
return 1;
+
+ case kCheckedStoreWord8:
+ case kCheckedStoreWord16:
+ case kCheckedStoreWord32:
+ case kCheckedStoreWord64:
+ case kCheckedStoreFloat32:
+ case kCheckedStoreFloat64:
+ return 1;
+
+ case kArm64Madd32:
+ case kArm64Mneg32:
+ case kArm64Msub32:
+ case kArm64Mul32:
+ return 3;
+
+ case kArm64Madd:
+ case kArm64Mneg:
+ case kArm64Msub:
+ case kArm64Mul:
+ return 5;
+
+ case kArm64Idiv32:
+ case kArm64Udiv32:
+ return 12;
+
+ case kArm64Idiv:
+ case kArm64Udiv:
+ return 20;
+
+ case kArm64Float32Add:
+ case kArm64Float32Sub:
+ case kArm64Float64Add:
+ case kArm64Float64Sub:
+ return 5;
+
+ case kArm64Float32Abs:
+ case kArm64Float32Cmp:
+ case kArm64Float32Neg:
+ case kArm64Float64Abs:
+ case kArm64Float64Cmp:
+ case kArm64Float64Neg:
+ return 3;
+
+ case kArm64Float32Div:
+ case kArm64Float32Sqrt:
+ return 12;
+
+ case kArm64Float64Div:
+ case kArm64Float64Sqrt:
+ return 19;
+
+ case kArm64Float32RoundDown:
+ case kArm64Float32RoundTiesEven:
+ case kArm64Float32RoundTruncate:
+ case kArm64Float32RoundUp:
+ case kArm64Float64RoundDown:
+ case kArm64Float64RoundTiesAway:
+ case kArm64Float64RoundTiesEven:
+ case kArm64Float64RoundTruncate:
+ case kArm64Float64RoundUp:
+ return 5;
+
+ case kArm64Float32ToFloat64:
+ case kArm64Float64ToFloat32:
+ case kArm64Float64ToInt32:
+ case kArm64Float64ToUint32:
+ case kArm64Float32ToInt64:
+ case kArm64Float64ToInt64:
+ case kArm64Float32ToUint64:
+ case kArm64Float64ToUint64:
+ case kArm64Int32ToFloat64:
+ case kArm64Int64ToFloat32:
+ case kArm64Int64ToFloat64:
+ case kArm64Uint32ToFloat64:
+ case kArm64Uint64ToFloat32:
+ case kArm64Uint64ToFloat64:
+ return 5;
+
+ default:
+ return 2;
}
}
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index d90deaeecb..9bc5385d43 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -40,7 +40,9 @@ class Arm64OperandGenerator final : public OperandGenerator {
// Use the zero register if the node has the immediate value zero, otherwise
// assign a register.
InstructionOperand UseRegisterOrImmediateZero(Node* node) {
- if (IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) {
+ if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
+ (IsFloatConstant(node) &&
+ (bit_cast<int64_t>(GetFloatConstantValue(node)) == V8_INT64_C(0)))) {
return UseImmediate(node);
}
return UseRegister(node);
@@ -68,6 +70,19 @@ class Arm64OperandGenerator final : public OperandGenerator {
return OpParameter<int64_t>(node);
}
+ bool IsFloatConstant(Node* node) {
+ return (node->opcode() == IrOpcode::kFloat32Constant) ||
+ (node->opcode() == IrOpcode::kFloat64Constant);
+ }
+
+ double GetFloatConstantValue(Node* node) {
+ if (node->opcode() == IrOpcode::kFloat32Constant) {
+ return OpParameter<float>(node);
+ }
+ DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
+ return OpParameter<double>(node);
+ }
+
bool CanBeImmediate(Node* node, ImmediateMode mode) {
return IsIntegerConstant(node) &&
CanBeImmediate(GetIntegerConstantValue(node), mode);
@@ -106,6 +121,13 @@ class Arm64OperandGenerator final : public OperandGenerator {
return false;
}
+ bool CanBeLoadStoreShiftImmediate(Node* node, MachineRepresentation rep) {
+ // TODO(arm64): Load and Store on 128 bit Q registers is not supported yet.
+ DCHECK_NE(MachineRepresentation::kSimd128, rep);
+ return IsIntegerConstant(node) &&
+ (GetIntegerConstantValue(node) == ElementSizeLog2Of(rep));
+ }
+
private:
bool IsLoadStoreImmediate(int64_t value, LSDataSize size) {
return Assembler::IsImmLSScaled(value, size) ||
@@ -139,6 +161,77 @@ void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
g.UseOperand(node->InputAt(1), operand_mode));
}
+struct ExtendingLoadMatcher {
+ ExtendingLoadMatcher(Node* node, InstructionSelector* selector)
+ : matches_(false), selector_(selector), base_(nullptr), immediate_(0) {
+ Initialize(node);
+ }
+
+ bool Matches() const { return matches_; }
+
+ Node* base() const {
+ DCHECK(Matches());
+ return base_;
+ }
+ int64_t immediate() const {
+ DCHECK(Matches());
+ return immediate_;
+ }
+ ArchOpcode opcode() const {
+ DCHECK(Matches());
+ return opcode_;
+ }
+
+ private:
+ bool matches_;
+ InstructionSelector* selector_;
+ Node* base_;
+ int64_t immediate_;
+ ArchOpcode opcode_;
+
+ void Initialize(Node* node) {
+ Int64BinopMatcher m(node);
+ // When loading a 64-bit value and shifting by 32, we should
+ // just load and sign-extend the interesting 4 bytes instead.
+ // This happens, for example, when we're loading and untagging SMIs.
+ DCHECK(m.IsWord64Sar());
+ if (m.left().IsLoad() && m.right().Is(32) &&
+ selector_->CanCover(m.node(), m.left().node())) {
+ Arm64OperandGenerator g(selector_);
+ Node* load = m.left().node();
+ Node* offset = load->InputAt(1);
+ base_ = load->InputAt(0);
+ opcode_ = kArm64Ldrsw;
+ if (g.IsIntegerConstant(offset)) {
+ immediate_ = g.GetIntegerConstantValue(offset) + 4;
+ matches_ = g.CanBeImmediate(immediate_, kLoadStoreImm32);
+ }
+ }
+ }
+};
+
+bool TryMatchExtendingLoad(InstructionSelector* selector, Node* node) {
+ ExtendingLoadMatcher m(node, selector);
+ return m.Matches();
+}
+
+bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node) {
+ ExtendingLoadMatcher m(node, selector);
+ Arm64OperandGenerator g(selector);
+ if (m.Matches()) {
+ InstructionOperand inputs[2];
+ inputs[0] = g.UseRegister(m.base());
+ InstructionCode opcode =
+ m.opcode() | AddressingModeField::encode(kMode_MRI);
+ DCHECK(is_int32(m.immediate()));
+ inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate()));
+ InstructionOperand outputs[] = {g.DefineAsRegister(node)};
+ selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs),
+ inputs);
+ return true;
+ }
+ return false;
+}
bool TryMatchAnyShift(InstructionSelector* selector, Node* node,
Node* input_node, InstructionCode* opcode, bool try_ror) {
@@ -158,7 +251,10 @@ bool TryMatchAnyShift(InstructionSelector* selector, Node* node,
*opcode |= AddressingModeField::encode(kMode_Operand2_R_LSR_I);
return true;
case IrOpcode::kWord32Sar:
+ *opcode |= AddressingModeField::encode(kMode_Operand2_R_ASR_I);
+ return true;
case IrOpcode::kWord64Sar:
+ if (TryMatchExtendingLoad(selector, input_node)) return false;
*opcode |= AddressingModeField::encode(kMode_Operand2_R_ASR_I);
return true;
case IrOpcode::kWord32Ror:
@@ -211,6 +307,94 @@ bool TryMatchAnyExtend(Arm64OperandGenerator* g, InstructionSelector* selector,
return false;
}
+bool TryMatchLoadStoreShift(Arm64OperandGenerator* g,
+ InstructionSelector* selector,
+ MachineRepresentation rep, Node* node, Node* index,
+ InstructionOperand* index_op,
+ InstructionOperand* shift_immediate_op) {
+ if (!selector->CanCover(node, index)) return false;
+ if (index->InputCount() != 2) return false;
+ Node* left = index->InputAt(0);
+ Node* right = index->InputAt(1);
+ switch (index->opcode()) {
+ case IrOpcode::kWord32Shl:
+ case IrOpcode::kWord64Shl:
+ if (!g->CanBeLoadStoreShiftImmediate(right, rep)) {
+ return false;
+ }
+ *index_op = g->UseRegister(left);
+ *shift_immediate_op = g->UseImmediate(right);
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Bitfields describing binary operator properties:
+// CanCommuteField is true if we can switch the two operands, potentially
+// requiring commuting the flags continuation condition.
+typedef BitField8<bool, 1, 1> CanCommuteField;
+// MustCommuteCondField is true when we need to commute the flags continuation
+// condition in order to switch the operands.
+typedef BitField8<bool, 2, 1> MustCommuteCondField;
+// IsComparisonField is true when the operation is a comparison and has no other
+// result other than the condition.
+typedef BitField8<bool, 3, 1> IsComparisonField;
+// IsAddSubField is true when an instruction is encoded as ADD or SUB.
+typedef BitField8<bool, 4, 1> IsAddSubField;
+
+// Get properties of a binary operator.
+uint8_t GetBinopProperties(InstructionCode opcode) {
+ uint8_t result = 0;
+ switch (opcode) {
+ case kArm64Cmp32:
+ case kArm64Cmp:
+ // We can commute CMP by switching the inputs and commuting
+ // the flags continuation.
+ result = CanCommuteField::update(result, true);
+ result = MustCommuteCondField::update(result, true);
+ result = IsComparisonField::update(result, true);
+ // The CMP and CMN instructions are encoded as SUB or ADD
+ // with zero output register, and therefore support the same
+ // operand modes.
+ result = IsAddSubField::update(result, true);
+ break;
+ case kArm64Cmn32:
+ case kArm64Cmn:
+ result = CanCommuteField::update(result, true);
+ result = IsComparisonField::update(result, true);
+ result = IsAddSubField::update(result, true);
+ break;
+ case kArm64Add32:
+ case kArm64Add:
+ result = CanCommuteField::update(result, true);
+ result = IsAddSubField::update(result, true);
+ break;
+ case kArm64Sub32:
+ case kArm64Sub:
+ result = IsAddSubField::update(result, true);
+ break;
+ case kArm64Tst32:
+ case kArm64Tst:
+ result = CanCommuteField::update(result, true);
+ result = IsComparisonField::update(result, true);
+ break;
+ case kArm64And32:
+ case kArm64And:
+ case kArm64Or32:
+ case kArm64Or:
+ case kArm64Eor32:
+ case kArm64Eor:
+ result = CanCommuteField::update(result, true);
+ break;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+ DCHECK_IMPLIES(MustCommuteCondField::decode(result),
+ CanCommuteField::decode(result));
+ return result;
+}
// Shared routine for multiple binary operations.
template <typename Matcher>
@@ -218,30 +402,24 @@ void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, ImmediateMode operand_mode,
FlagsContinuation* cont) {
Arm64OperandGenerator g(selector);
- Matcher m(node);
InstructionOperand inputs[5];
size_t input_count = 0;
InstructionOperand outputs[2];
size_t output_count = 0;
- bool is_cmp = (opcode == kArm64Cmp32) || (opcode == kArm64Cmn32);
-
- // We can commute cmp by switching the inputs and commuting the flags
- // continuation.
- bool can_commute = m.HasProperty(Operator::kCommutative) || is_cmp;
- // The cmp and cmn instructions are encoded as sub or add with zero output
- // register, and therefore support the same operand modes.
- bool is_add_sub = m.IsInt32Add() || m.IsInt64Add() || m.IsInt32Sub() ||
- m.IsInt64Sub() || is_cmp;
+ Node* left_node = node->InputAt(0);
+ Node* right_node = node->InputAt(1);
- Node* left_node = m.left().node();
- Node* right_node = m.right().node();
+ uint8_t properties = GetBinopProperties(opcode);
+ bool can_commute = CanCommuteField::decode(properties);
+ bool must_commute_cond = MustCommuteCondField::decode(properties);
+ bool is_add_sub = IsAddSubField::decode(properties);
if (g.CanBeImmediate(right_node, operand_mode)) {
inputs[input_count++] = g.UseRegister(left_node);
inputs[input_count++] = g.UseImmediate(right_node);
- } else if (is_cmp && g.CanBeImmediate(left_node, operand_mode)) {
- cont->Commute();
+ } else if (can_commute && g.CanBeImmediate(left_node, operand_mode)) {
+ if (must_commute_cond) cont->Commute();
inputs[input_count++] = g.UseRegister(right_node);
inputs[input_count++] = g.UseImmediate(left_node);
} else if (is_add_sub &&
@@ -251,23 +429,29 @@ void VisitBinop(InstructionSelector* selector, Node* node,
} else if (is_add_sub && can_commute &&
TryMatchAnyExtend(&g, selector, node, right_node, left_node,
&inputs[0], &inputs[1], &opcode)) {
- if (is_cmp) cont->Commute();
+ if (must_commute_cond) cont->Commute();
input_count += 2;
} else if (TryMatchAnyShift(selector, node, right_node, &opcode,
!is_add_sub)) {
Matcher m_shift(right_node);
- inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
+ inputs[input_count++] = cont->IsDeoptimize()
+ ? g.UseRegister(left_node)
+ : g.UseRegisterOrImmediateZero(left_node);
inputs[input_count++] = g.UseRegister(m_shift.left().node());
inputs[input_count++] = g.UseImmediate(m_shift.right().node());
} else if (can_commute && TryMatchAnyShift(selector, node, left_node, &opcode,
!is_add_sub)) {
- if (is_cmp) cont->Commute();
+ if (must_commute_cond) cont->Commute();
Matcher m_shift(left_node);
- inputs[input_count++] = g.UseRegisterOrImmediateZero(right_node);
+ inputs[input_count++] = cont->IsDeoptimize()
+ ? g.UseRegister(right_node)
+ : g.UseRegisterOrImmediateZero(right_node);
inputs[input_count++] = g.UseRegister(m_shift.left().node());
inputs[input_count++] = g.UseImmediate(m_shift.right().node());
} else {
- inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
+ inputs[input_count++] = cont->IsDeoptimize()
+ ? g.UseRegister(left_node)
+ : g.UseRegisterOrImmediateZero(left_node);
inputs[input_count++] = g.UseRegister(right_node);
}
@@ -276,8 +460,15 @@ void VisitBinop(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.Label(cont->false_block());
}
- if (!is_cmp) {
- outputs[output_count++] = g.DefineAsRegister(node);
+ if (!IsComparisonField::decode(properties)) {
+ if (cont->IsDeoptimize()) {
+ // If we can deoptimize as a result of the binop, we need to make sure
+ // that the deopt inputs are not overwritten by the binop result. One way
+ // to achieve that is to declare the output register as same-as-first.
+ outputs[output_count++] = g.DefineSameAsFirst(node);
+ } else {
+ outputs[output_count++] = g.DefineAsRegister(node);
+ }
}
if (cont->IsSet()) {
@@ -285,14 +476,14 @@ void VisitBinop(InstructionSelector* selector, Node* node,
}
DCHECK_NE(0u, input_count);
- DCHECK((output_count != 0) || is_cmp);
+ DCHECK((output_count != 0) || IsComparisonField::decode(properties));
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->frame_state());
+ cont->reason(), cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -341,15 +532,44 @@ int32_t LeftShiftForReducedMultiply(Matcher* m) {
} // namespace
-
-void InstructionSelector::VisitLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- Arm64OperandGenerator g(this);
+void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
+ ImmediateMode immediate_mode, MachineRepresentation rep,
+ Node* output = nullptr) {
+ Arm64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- ArchOpcode opcode = kArchNop;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ InstructionOperand outputs[1];
+
+ // If output is not nullptr, use that as the output register. This
+ // is used when we merge a conversion into the load.
+ outputs[0] = g.DefineAsRegister(output == nullptr ? node : output);
+ inputs[0] = g.UseRegister(base);
+
+ if (g.CanBeImmediate(index, immediate_mode)) {
+ input_count = 2;
+ inputs[1] = g.UseImmediate(index);
+ opcode |= AddressingModeField::encode(kMode_MRI);
+ } else if (TryMatchLoadStoreShift(&g, selector, rep, node, index, &inputs[1],
+ &inputs[2])) {
+ input_count = 3;
+ opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
+ } else {
+ input_count = 2;
+ inputs[1] = g.UseRegister(index);
+ opcode |= AddressingModeField::encode(kMode_MRR);
+ }
+
+ selector->Emit(opcode, arraysize(outputs), outputs, input_count, inputs);
+}
+
+void InstructionSelector::VisitLoad(Node* node) {
+ InstructionCode opcode = kArchNop;
ImmediateMode immediate_mode = kNoImmediate;
- switch (load_rep.representation()) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ MachineRepresentation rep = load_rep.representation();
+ switch (rep) {
case MachineRepresentation::kFloat32:
opcode = kArm64LdrS;
immediate_mode = kLoadStoreImm32;
@@ -371,6 +591,8 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode = kArm64LdrW;
immediate_mode = kLoadStoreImm32;
break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
opcode = kArm64Ldr;
@@ -381,13 +603,7 @@ void InstructionSelector::VisitLoad(Node* node) {
UNREACHABLE();
return;
}
- if (g.CanBeImmediate(index, immediate_mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
- } else {
- Emit(opcode | AddressingModeField::encode(kMode_MRR),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
- }
+ EmitLoad(this, node, opcode, immediate_mode, rep);
}
@@ -441,7 +657,9 @@ void InstructionSelector::VisitStore(Node* node) {
code |= MiscField::encode(static_cast<int>(record_write_mode));
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- ArchOpcode opcode = kArchNop;
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ InstructionCode opcode = kArchNop;
ImmediateMode immediate_mode = kNoImmediate;
switch (rep) {
case MachineRepresentation::kFloat32:
@@ -465,6 +683,8 @@ void InstructionSelector::VisitStore(Node* node) {
opcode = kArm64StrW;
immediate_mode = kLoadStoreImm32;
break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
opcode = kArm64Str;
@@ -475,16 +695,33 @@ void InstructionSelector::VisitStore(Node* node) {
UNREACHABLE();
return;
}
+
+ inputs[0] = g.UseRegisterOrImmediateZero(value);
+ inputs[1] = g.UseRegister(base);
+
if (g.CanBeImmediate(index, immediate_mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ input_count = 3;
+ inputs[2] = g.UseImmediate(index);
+ opcode |= AddressingModeField::encode(kMode_MRI);
+ } else if (TryMatchLoadStoreShift(&g, this, rep, node, index, &inputs[2],
+ &inputs[3])) {
+ input_count = 4;
+ opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
} else {
- Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
- g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
+ input_count = 3;
+ inputs[2] = g.UseRegister(index);
+ opcode |= AddressingModeField::encode(kMode_MRR);
}
+
+ Emit(opcode, 0, nullptr, input_count, inputs);
}
}
+// Architecture supports unaligned access, therefore VisitLoad is used instead
+void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
+
+// Architecture supports unaligned access, therefore VisitStore is used instead
+void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitCheckedLoad(Node* node) {
CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
@@ -513,12 +750,25 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
opcode = kCheckedLoadFloat64;
break;
case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
+ // If the length is a constant power of two, allow the code generator to
+ // pick a more efficient bounds check sequence by passing the length as an
+ // immediate.
+ if (length->opcode() == IrOpcode::kInt32Constant) {
+ Int32Matcher m(length);
+ if (m.IsPowerOf2()) {
+ Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
+ g.UseRegister(offset), g.UseImmediate(length));
+ return;
+ }
+ }
Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
g.UseRegister(offset), g.UseOperand(length, kArithmeticImm));
}
@@ -552,14 +802,28 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
opcode = kCheckedStoreFloat64;
break;
case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
+ // If the length is a constant power of two, allow the code generator to
+ // pick a more efficient bounds check sequence by passing the length as an
+ // immediate.
+ if (length->opcode() == IrOpcode::kInt32Constant) {
+ Int32Matcher m(length);
+ if (m.IsPowerOf2()) {
+ Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
+ g.UseImmediate(length), g.UseRegisterOrImmediateZero(value));
+ return;
+ }
+ }
Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
- g.UseOperand(length, kArithmeticImm), g.UseRegister(value));
+ g.UseOperand(length, kArithmeticImm),
+ g.UseRegisterOrImmediateZero(value));
}
@@ -784,7 +1048,7 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
Arm64OperandGenerator g(this);
Int64BinopMatcher m(node);
if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
- m.right().IsInRange(32, 63)) {
+ m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) {
// There's no need to sign/zero-extend to 64-bit if we shift out the upper
// 32 bits anyway.
Emit(kArm64Lsl, g.DefineAsRegister(node),
@@ -949,6 +1213,7 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
void InstructionSelector::VisitWord64Sar(Node* node) {
+ if (TryEmitExtendingLoad(this, node)) return;
VisitRRO(this, kArm64Asr, node, kShift64Imm);
}
@@ -990,6 +1255,9 @@ void InstructionSelector::VisitWord64ReverseBits(Node* node) {
VisitRR(this, kArm64Rbit, node);
}
+void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
@@ -1100,6 +1368,33 @@ void InstructionSelector::VisitInt64Sub(Node* node) {
VisitAddSub<Int64BinopMatcher>(this, node, kArm64Sub, kArm64Add);
}
+namespace {
+
+void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ Arm64OperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand result = g.DefineAsRegister(node);
+ InstructionOperand left = g.UseRegister(m.left().node());
+ InstructionOperand right = g.UseRegister(m.right().node());
+ selector->Emit(kArm64Smull, result, left, right);
+
+ InstructionCode opcode = cont->Encode(kArm64Cmp) |
+ AddressingModeField::encode(kMode_Operand2_R_SXTW);
+ if (cont->IsBranch()) {
+ selector->Emit(opcode, g.NoOutput(), result, result,
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
+ } else if (cont->IsDeoptimize()) {
+ InstructionOperand in[] = {result, result};
+ selector->EmitDeoptimize(opcode, 0, nullptr, 2, in, cont->reason(),
+ cont->frame_state());
+ } else {
+ DCHECK(cont->IsSet());
+ selector->Emit(opcode, g.DefineAsRegister(cont->result()), result, result);
+ }
+}
+
+} // namespace
void InstructionSelector::VisitInt32Mul(Node* node) {
Arm64OperandGenerator g(this);
@@ -1183,7 +1478,6 @@ void InstructionSelector::VisitInt64Mul(Node* node) {
VisitRRR(this, kArm64Mul, node);
}
-
void InstructionSelector::VisitInt32MulHigh(Node* node) {
Arm64OperandGenerator g(this);
InstructionOperand const smull_operand = g.TempRegister();
@@ -1359,7 +1653,35 @@ void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
- VisitRR(this, kArm64Sxtw, node);
+ Node* value = node->InputAt(0);
+ if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
+ // Generate sign-extending load.
+ LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+ MachineRepresentation rep = load_rep.representation();
+ InstructionCode opcode = kArchNop;
+ ImmediateMode immediate_mode = kNoImmediate;
+ switch (rep) {
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kArm64Ldrsb : kArm64Ldrb;
+ immediate_mode = kLoadStoreImm8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kArm64Ldrsh : kArm64Ldrh;
+ immediate_mode = kLoadStoreImm16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kArm64Ldrsw;
+ immediate_mode = kLoadStoreImm32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ EmitLoad(this, value, opcode, immediate_mode, rep, node);
+ } else {
+ VisitRR(this, kArm64Sxtw, node);
+ }
}
@@ -1396,6 +1718,20 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
return;
}
+ case IrOpcode::kLoad: {
+ // As for the operations above, a 32-bit load will implicitly clear the
+ // top 32 bits of the destination register.
+ LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+ return;
+ default:
+ break;
+ }
+ }
default:
break;
}
@@ -1407,33 +1743,21 @@ void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
VisitRR(this, kArm64Float64ToFloat32, node);
}
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+ VisitRR(this, kArchTruncateDoubleToI, node);
+}
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
- switch (TruncationModeOf(node->op())) {
- case TruncationMode::kJavaScript:
- return VisitRR(this, kArchTruncateDoubleToI, node);
- case TruncationMode::kRoundToZero:
- return VisitRR(this, kArm64Float64ToInt32, node);
- }
- UNREACHABLE();
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+ VisitRR(this, kArm64Float64ToInt32, node);
}
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
Arm64OperandGenerator g(this);
Node* value = node->InputAt(0);
- if (CanCover(node, value) && value->InputCount() >= 2) {
- Int64BinopMatcher m(value);
- if ((m.IsWord64Sar() && m.right().HasValue() &&
- (m.right().Value() == 32)) ||
- (m.IsWord64Shr() && m.right().IsInRange(32, 63))) {
- Emit(kArm64Lsr, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
- g.UseImmediate(m.right().node()));
- return;
- }
- }
-
- Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+ // The top 32 bits in the 64-bit register will be undefined, and
+ // must not be used by a dependent node.
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.UseRegister(value));
}
@@ -1491,31 +1815,10 @@ void InstructionSelector::VisitFloat32Sub(Node* node) {
VisitRRR(this, kArm64Float32Sub, node);
}
-
void InstructionSelector::VisitFloat64Sub(Node* node) {
- Arm64OperandGenerator g(this);
- Float64BinopMatcher m(node);
- if (m.left().IsMinusZero()) {
- if (m.right().IsFloat64RoundDown() &&
- CanCover(m.node(), m.right().node())) {
- if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
- CanCover(m.right().node(), m.right().InputAt(0))) {
- Float64BinopMatcher mright0(m.right().InputAt(0));
- if (mright0.left().IsMinusZero()) {
- Emit(kArm64Float64RoundUp, g.DefineAsRegister(node),
- g.UseRegister(mright0.right().node()));
- return;
- }
- }
- }
- Emit(kArm64Float64Neg, g.DefineAsRegister(node),
- g.UseRegister(m.right().node()));
- return;
- }
VisitRRR(this, kArm64Float64Sub, node);
}
-
void InstructionSelector::VisitFloat32Mul(Node* node) {
VisitRRR(this, kArm64Float32Mul, node);
}
@@ -1543,22 +1846,18 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
}
-
void InstructionSelector::VisitFloat32Max(Node* node) {
VisitRRR(this, kArm64Float32Max, node);
}
-
void InstructionSelector::VisitFloat64Max(Node* node) {
VisitRRR(this, kArm64Float64Max, node);
}
-
void InstructionSelector::VisitFloat32Min(Node* node) {
VisitRRR(this, kArm64Float32Min, node);
}
-
void InstructionSelector::VisitFloat64Min(Node* node) {
VisitRRR(this, kArm64Float64Min, node);
}
@@ -1573,7 +1872,6 @@ void InstructionSelector::VisitFloat64Abs(Node* node) {
VisitRR(this, kArm64Float64Abs, node);
}
-
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitRR(this, kArm64Float32Sqrt, node);
}
@@ -1628,6 +1926,28 @@ void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
VisitRR(this, kArm64Float64RoundTiesEven, node);
}
+void InstructionSelector::VisitFloat32Neg(Node* node) {
+ VisitRR(this, kArm64Float32Neg, node);
+}
+
+void InstructionSelector::VisitFloat64Neg(Node* node) {
+ VisitRR(this, kArm64Float64Neg, node);
+}
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+ InstructionCode opcode) {
+ Arm64OperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
+ g.UseFixed(node->InputAt(1), d1))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+ InstructionCode opcode) {
+ Arm64OperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0))
+ ->MarkAsCall();
+}
void InstructionSelector::EmitPrepareArguments(
ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
@@ -1682,7 +2002,7 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+ selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
} else {
DCHECK(cont->IsSet());
@@ -1713,14 +2033,126 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
}
}
+// This function checks whether we can convert:
+// ((a <op> b) cmp 0), b.<cond>
+// to:
+// (a <ops> b), b.<cond'>
+// where <ops> is the flag setting version of <op>.
+// We only generate conditions <cond'> that are a combination of the N
+// and Z flags. This avoids the need to make this function dependent on
+// the flag-setting operation.
+bool CanUseFlagSettingBinop(FlagsCondition cond) {
+ switch (cond) {
+ case kEqual:
+ case kNotEqual:
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedLessThanOrEqual: // x <= 0 -> x == 0
+ case kUnsignedGreaterThan: // x > 0 -> x != 0
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Map <cond> to <cond'> so that the following transformation is possible:
+// ((a <op> b) cmp 0), b.<cond>
+// to:
+// (a <ops> b), b.<cond'>
+// where <ops> is the flag setting version of <op>.
+FlagsCondition MapForFlagSettingBinop(FlagsCondition cond) {
+ DCHECK(CanUseFlagSettingBinop(cond));
+ switch (cond) {
+ case kEqual:
+ case kNotEqual:
+ return cond;
+ case kSignedLessThan:
+ return kNegative;
+ case kSignedGreaterThanOrEqual:
+ return kPositiveOrZero;
+ case kUnsignedLessThanOrEqual: // x <= 0 -> x == 0
+ return kEqual;
+ case kUnsignedGreaterThan: // x > 0 -> x != 0
+ return kNotEqual;
+ default:
+ UNREACHABLE();
+ return cond;
+ }
+}
+
+// This function checks if we can perform the transformation:
+// ((a <op> b) cmp 0), b.<cond>
+// to:
+// (a <ops> b), b.<cond'>
+// where <ops> is the flag setting version of <op>, and if so,
+// updates {node}, {opcode} and {cont} accordingly.
+void MaybeReplaceCmpZeroWithFlagSettingBinop(InstructionSelector* selector,
+ Node** node, Node* binop,
+ ArchOpcode* opcode,
+ FlagsCondition cond,
+ FlagsContinuation* cont,
+ ImmediateMode* immediate_mode) {
+ ArchOpcode binop_opcode;
+ ArchOpcode no_output_opcode;
+ ImmediateMode binop_immediate_mode;
+ switch (binop->opcode()) {
+ case IrOpcode::kInt32Add:
+ binop_opcode = kArm64Add32;
+ no_output_opcode = kArm64Cmn32;
+ binop_immediate_mode = kArithmeticImm;
+ break;
+ case IrOpcode::kWord32And:
+ binop_opcode = kArm64And32;
+ no_output_opcode = kArm64Tst32;
+ binop_immediate_mode = kLogical32Imm;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ if (selector->CanCover(*node, binop)) {
+ // The comparison is the only user of the add or and, so we can generate
+ // a cmn or tst instead.
+ cont->Overwrite(MapForFlagSettingBinop(cond));
+ *opcode = no_output_opcode;
+ *node = binop;
+ *immediate_mode = binop_immediate_mode;
+ } else if (selector->IsOnlyUserOfNodeInSameBlock(*node, binop)) {
+ // We can also handle the case where the add and the compare are in the
+ // same basic block, and the compare is the only use of add in this basic
+ // block (the add has users in other basic blocks).
+ cont->Overwrite(MapForFlagSettingBinop(cond));
+ *opcode = binop_opcode;
+ *node = binop;
+ *immediate_mode = binop_immediate_mode;
+ }
+}
void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Int32BinopMatcher m(node);
ArchOpcode opcode = kArm64Cmp32;
-
- // Select negated compare for comparisons with negated right input.
- if (m.right().IsInt32Sub()) {
+ FlagsCondition cond = cont->condition();
+ ImmediateMode immediate_mode = kArithmeticImm;
+ if (m.right().Is(0) && (m.left().IsInt32Add() || m.left().IsWord32And())) {
+ // Emit flag setting add/and instructions for comparisons against zero.
+ if (CanUseFlagSettingBinop(cond)) {
+ Node* binop = m.left().node();
+ MaybeReplaceCmpZeroWithFlagSettingBinop(selector, &node, binop, &opcode,
+ cond, cont, &immediate_mode);
+ }
+ } else if (m.left().Is(0) &&
+ (m.right().IsInt32Add() || m.right().IsWord32And())) {
+ // Same as above, but we need to commute the condition before we
+ // continue with the rest of the checks.
+ cond = CommuteFlagsCondition(cond);
+ if (CanUseFlagSettingBinop(cond)) {
+ Node* binop = m.right().node();
+ MaybeReplaceCmpZeroWithFlagSettingBinop(selector, &node, binop, &opcode,
+ cond, cont, &immediate_mode);
+ }
+ } else if (m.right().IsInt32Sub()) {
+ // Select negated compare for comparisons with negated right input.
Node* sub = m.right().node();
Int32BinopMatcher msub(sub);
if (msub.left().Is(0)) {
@@ -1738,7 +2170,7 @@ void VisitWord32Compare(InstructionSelector* selector, Node* node,
opcode = kArm64Cmn32;
}
}
- VisitBinop<Int32BinopMatcher>(selector, node, opcode, kArithmeticImm, cont);
+ VisitBinop<Int32BinopMatcher>(selector, node, opcode, immediate_mode, cont);
}
@@ -1761,6 +2193,23 @@ void VisitWord64Test(InstructionSelector* selector, Node* node,
VisitWordTest(selector, node, kArm64Tst, cont);
}
+template <typename Matcher, ArchOpcode kOpcode>
+bool TryEmitTestAndBranch(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ Arm64OperandGenerator g(selector);
+ Matcher m(node);
+ if (cont->IsBranch() && m.right().HasValue() &&
+ (base::bits::CountPopulation(m.right().Value()) == 1)) {
+ // If the mask has only one bit set, we can use tbz/tbnz.
+ DCHECK((cont->condition() == kEqual) || (cont->condition() == kNotEqual));
+ selector->Emit(
+ cont->Encode(kOpcode), g.NoOutput(), g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::CountTrailingZeros(m.right().Value())),
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
+ return true;
+ }
+ return false;
+}
// Shared routine for multiple float32 compare operations.
void VisitFloat32Compare(InstructionSelector* selector, Node* node,
@@ -1805,6 +2254,8 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
while (selector->CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kWord32Equal: {
+ // Combine with comparisons against 0 by simply inverting the
+ // continuation.
Int32BinopMatcher m(value);
if (m.right().Is(0)) {
user = value;
@@ -1827,10 +2278,33 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
case IrOpcode::kUint32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitWord32Compare(selector, value, cont);
- case IrOpcode::kWord64Equal:
+ case IrOpcode::kWord64Equal: {
cont->OverwriteAndNegateIfEqual(kEqual);
+ Int64BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ Node* const left = m.left().node();
+ if (selector->CanCover(value, left) &&
+ left->opcode() == IrOpcode::kWord64And) {
+ // Attempt to merge the Word64Equal(Word64And(x, y), 0) comparison
+ // into a tbz/tbnz instruction.
+ if (TryEmitTestAndBranch<Uint64BinopMatcher, kArm64TestAndBranch>(
+ selector, left, cont)) {
+ return;
+ }
+ return VisitWordCompare(selector, left, kArm64Tst, cont, true,
+ kLogical64Imm);
+ }
+ // Merge the Word64Equal(x, 0) comparison into a cbz instruction.
+ if (cont->IsBranch()) {
+ selector->Emit(cont->Encode(kArm64CompareAndBranch), g.NoOutput(),
+ g.UseRegister(left), g.Label(cont->true_block()),
+ g.Label(cont->false_block()));
+ return;
+ }
+ }
return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
kArithmeticImm);
+ }
case IrOpcode::kInt64LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
return VisitWordCompare(selector, value, kArm64Cmp, cont, false,
@@ -1886,6 +2360,13 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop<Int32BinopMatcher>(
selector, node, kArm64Sub32, kArithmeticImm, cont);
+ case IrOpcode::kInt32MulWithOverflow:
+ // ARM64 doesn't set the overflow flag for multiplication, so we
+ // need to test on kNotEqual. Here is the code sequence used:
+ // smull result, left, right
+ // cmp result.X(), Operand(result, SXTW)
+ cont->OverwriteAndNegateIfEqual(kNotEqual);
+ return EmitInt32MulWithOverflow(selector, node, cont);
case IrOpcode::kInt64AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop<Int64BinopMatcher>(selector, node, kArm64Add,
@@ -1905,42 +2386,20 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
kArithmeticImm);
case IrOpcode::kInt32Sub:
return VisitWord32Compare(selector, value, cont);
- case IrOpcode::kWord32And: {
- Int32BinopMatcher m(value);
- if (cont->IsBranch() && m.right().HasValue() &&
- (base::bits::CountPopulation32(m.right().Value()) == 1)) {
- // If the mask has only one bit set, we can use tbz/tbnz.
- DCHECK((cont->condition() == kEqual) ||
- (cont->condition() == kNotEqual));
- selector->Emit(
- cont->Encode(kArm64TestAndBranch32), g.NoOutput(),
- g.UseRegister(m.left().node()),
- g.TempImmediate(
- base::bits::CountTrailingZeros32(m.right().Value())),
- g.Label(cont->true_block()), g.Label(cont->false_block()));
+ case IrOpcode::kWord32And:
+ if (TryEmitTestAndBranch<Uint32BinopMatcher, kArm64TestAndBranch32>(
+ selector, value, cont)) {
return;
}
return VisitWordCompare(selector, value, kArm64Tst32, cont, true,
kLogical32Imm);
- }
- case IrOpcode::kWord64And: {
- Int64BinopMatcher m(value);
- if (cont->IsBranch() && m.right().HasValue() &&
- (base::bits::CountPopulation64(m.right().Value()) == 1)) {
- // If the mask has only one bit set, we can use tbz/tbnz.
- DCHECK((cont->condition() == kEqual) ||
- (cont->condition() == kNotEqual));
- selector->Emit(
- cont->Encode(kArm64TestAndBranch), g.NoOutput(),
- g.UseRegister(m.left().node()),
- g.TempImmediate(
- base::bits::CountTrailingZeros64(m.right().Value())),
- g.Label(cont->true_block()), g.Label(cont->false_block()));
+ case IrOpcode::kWord64And:
+ if (TryEmitTestAndBranch<Uint64BinopMatcher, kArm64TestAndBranch>(
+ selector, value, cont)) {
return;
}
return VisitWordCompare(selector, value, kArm64Tst, cont, true,
kLogical64Imm);
- }
default:
break;
}
@@ -1956,7 +2415,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
DCHECK(cont->IsDeoptimize());
selector->EmitDeoptimize(cont->Encode(kArm64Tst32), g.NoOutput(),
g.UseRegister(value), g.UseRegister(value),
- cont->frame_state());
+ cont->reason(), cont->frame_state());
}
}
@@ -1969,14 +2428,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -2017,20 +2476,24 @@ void InstructionSelector::VisitWord32Equal(Node* const node) {
if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kInt32Add:
- return VisitWordCompare(this, value, kArm64Cmn32, &cont, true,
- kArithmeticImm);
+ case IrOpcode::kWord32And:
+ return VisitWord32Compare(this, node, &cont);
case IrOpcode::kInt32Sub:
return VisitWordCompare(this, value, kArm64Cmp32, &cont, false,
kArithmeticImm);
- case IrOpcode::kWord32And:
- return VisitWordCompare(this, value, kArm64Tst32, &cont, true,
- kLogical32Imm);
case IrOpcode::kWord32Equal: {
// Word32Equal(Word32Equal(x, y), 0) => Word32Compare(x, y, ne).
Int32BinopMatcher mequal(value);
node->ReplaceInput(0, mequal.left().node());
node->ReplaceInput(1, mequal.right().node());
cont.Negate();
+ // {node} still does not cover its new operands, because {mequal} is
+ // still using them.
+ // Since we won't generate any more code for {mequal}, set its
+ // operands to zero to make sure {node} can cover them.
+ // This improves pattern matching in VisitWord32Compare.
+ mequal.node()->ReplaceInput(0, m.right().node());
+ mequal.node()->ReplaceInput(1, m.right().node());
return VisitWord32Compare(this, node, &cont);
}
default:
@@ -2111,6 +2574,18 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm, &cont);
}
+void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ // ARM64 doesn't set the overflow flag for multiplication, so we need to
+ // test on kNotEqual. Here is the code sequence used:
+ // smull result, left, right
+ // cmp result.X(), Operand(result, SXTW)
+ FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf);
+ return EmitInt32MulWithOverflow(this, node, &cont);
+ }
+ FlagsContinuation cont;
+ EmitInt32MulWithOverflow(this, node, &cont);
+}
void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
@@ -2246,15 +2721,70 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(left), g.UseRegister(right));
}
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+ VisitRR(this, kArm64Float64SilenceNaN, node);
+}
+
+void InstructionSelector::VisitAtomicLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ Arm64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kAtomicLoadWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+}
+
+void InstructionSelector::VisitAtomicStore(Node* node) {
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ Arm64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kAtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kAtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kAtomicStoreWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ AddressingMode addressing_mode = kMode_MRR;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 0, nullptr, input_count, inputs);
+}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::kFloat32Max |
- MachineOperatorBuilder::kFloat32Min |
- MachineOperatorBuilder::kFloat32RoundDown |
- MachineOperatorBuilder::kFloat64Max |
- MachineOperatorBuilder::kFloat64Min |
+ return MachineOperatorBuilder::kFloat32RoundDown |
MachineOperatorBuilder::kFloat64RoundDown |
MachineOperatorBuilder::kFloat32RoundUp |
MachineOperatorBuilder::kFloat64RoundUp |
@@ -2270,6 +2800,13 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kWord64ReverseBits;
}
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+ return MachineOperatorBuilder::AlignmentRequirements::
+ FullUnalignedAccessSupport();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.cc b/deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.cc
new file mode 100644
index 0000000000..f4b732bf77
--- /dev/null
+++ b/deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.cc
@@ -0,0 +1,109 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/arm64/unwinding-info-writer-arm64.h"
+#include "src/compiler/instruction.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+void UnwindingInfoWriter::BeginInstructionBlock(int pc_offset,
+ const InstructionBlock* block) {
+ if (!enabled()) return;
+
+ block_will_exit_ = false;
+
+ DCHECK_LT(block->rpo_number().ToInt(), block_initial_states_.size());
+ const BlockInitialState* initial_state =
+ block_initial_states_[block->rpo_number().ToInt()];
+ if (initial_state) {
+ if (initial_state->saved_lr_ != saved_lr_) {
+ eh_frame_writer_.AdvanceLocation(pc_offset);
+ if (initial_state->saved_lr_) {
+ eh_frame_writer_.RecordRegisterSavedToStack(lr, kPointerSize);
+ } else {
+ eh_frame_writer_.RecordRegisterFollowsInitialRule(lr);
+ }
+ saved_lr_ = initial_state->saved_lr_;
+ }
+ } else {
+ // The entry block always lacks an explicit initial state.
+ // The exit block may lack an explicit state, if it is only reached by
+ // the block ending in a ret.
+ // All the other blocks must have an explicit initial state.
+ DCHECK(block->predecessors().empty() || block->successors().empty());
+ }
+}
+
+void UnwindingInfoWriter::EndInstructionBlock(const InstructionBlock* block) {
+ if (!enabled() || block_will_exit_) return;
+
+ for (const RpoNumber& successor : block->successors()) {
+ int successor_index = successor.ToInt();
+ DCHECK_LT(successor_index, block_initial_states_.size());
+ const BlockInitialState* existing_state =
+ block_initial_states_[successor_index];
+
+ // If we already had an entry for this BB, check that the values are the
+ // same we are trying to insert.
+ if (existing_state) {
+ DCHECK_EQ(existing_state->saved_lr_, saved_lr_);
+ } else {
+ block_initial_states_[successor_index] =
+ new (zone_) BlockInitialState(saved_lr_);
+ }
+ }
+}
+
+void UnwindingInfoWriter::MarkFrameConstructed(int at_pc) {
+ if (!enabled()) return;
+
+ // Regardless of the type of frame constructed, the relevant part of the
+ // layout is always the one in the diagram:
+ //
+ // | .... | higher addresses
+ // +----------+ ^
+ // | LR | | |
+ // +----------+ | |
+ // | saved FP | | |
+ // +----------+ <-- FP v
+ // | .... | stack growth
+ //
+ // The LR is pushed on the stack, and we can record this fact at the end of
+ // the construction, since the LR itself is not modified in the process.
+ eh_frame_writer_.AdvanceLocation(at_pc);
+ eh_frame_writer_.RecordRegisterSavedToStack(lr, kPointerSize);
+ saved_lr_ = true;
+}
+
+void UnwindingInfoWriter::MarkFrameDeconstructed(int at_pc) {
+ if (!enabled()) return;
+
+ // The lr is restored by the last operation in LeaveFrame().
+ eh_frame_writer_.AdvanceLocation(at_pc);
+ eh_frame_writer_.RecordRegisterFollowsInitialRule(lr);
+ saved_lr_ = false;
+}
+
+void UnwindingInfoWriter::MarkLinkRegisterOnTopOfStack(int pc_offset,
+ const Register& sp) {
+ if (!enabled()) return;
+
+ eh_frame_writer_.AdvanceLocation(pc_offset);
+ eh_frame_writer_.SetBaseAddressRegisterAndOffset(sp, 0);
+ eh_frame_writer_.RecordRegisterSavedToStack(lr, 0);
+}
+
+void UnwindingInfoWriter::MarkPopLinkRegisterFromTopOfStack(int pc_offset) {
+ if (!enabled()) return;
+
+ eh_frame_writer_.AdvanceLocation(pc_offset);
+ eh_frame_writer_.SetBaseAddressRegisterAndOffset(fp, 0);
+ eh_frame_writer_.RecordRegisterFollowsInitialRule(lr);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.h b/deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.h
new file mode 100644
index 0000000000..a532851d84
--- /dev/null
+++ b/deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.h
@@ -0,0 +1,72 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ARM64_UNWINDING_INFO_WRITER_H_
+#define V8_COMPILER_ARM64_UNWINDING_INFO_WRITER_H_
+
+#include "src/eh-frame.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class InstructionBlock;
+
+class UnwindingInfoWriter {
+ public:
+ explicit UnwindingInfoWriter(Zone* zone)
+ : zone_(zone),
+ eh_frame_writer_(zone),
+ saved_lr_(false),
+ block_will_exit_(false),
+ block_initial_states_(zone) {
+ if (enabled()) eh_frame_writer_.Initialize();
+ }
+
+ void SetNumberOfInstructionBlocks(int number) {
+ if (enabled()) block_initial_states_.resize(number);
+ }
+
+ void BeginInstructionBlock(int pc_offset, const InstructionBlock* block);
+ void EndInstructionBlock(const InstructionBlock* block);
+
+ void MarkLinkRegisterOnTopOfStack(int pc_offset, const Register& sp);
+ void MarkPopLinkRegisterFromTopOfStack(int pc_offset);
+
+ void MarkFrameConstructed(int at_pc);
+ void MarkFrameDeconstructed(int at_pc);
+
+ void MarkBlockWillExit() { block_will_exit_ = true; }
+
+ void Finish(int code_size) {
+ if (enabled()) eh_frame_writer_.Finish(code_size);
+ }
+
+ EhFrameWriter* eh_frame_writer() {
+ return enabled() ? &eh_frame_writer_ : nullptr;
+ }
+
+ private:
+ bool enabled() const { return FLAG_perf_prof_unwinding_info; }
+
+ class BlockInitialState : public ZoneObject {
+ public:
+ explicit BlockInitialState(bool saved_lr) : saved_lr_(saved_lr) {}
+
+ bool saved_lr_;
+ };
+
+ Zone* zone_;
+ EhFrameWriter eh_frame_writer_;
+ bool saved_lr_;
+ bool block_will_exit_;
+
+ ZoneVector<const BlockInitialState*> block_initial_states_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif
diff --git a/deps/v8/src/compiler/ast-graph-builder.cc b/deps/v8/src/compiler/ast-graph-builder.cc
index 89bb61949a..0f1fb291eb 100644
--- a/deps/v8/src/compiler/ast-graph-builder.cc
+++ b/deps/v8/src/compiler/ast-graph-builder.cc
@@ -40,14 +40,14 @@ class AstGraphBuilder::AstContext BASE_EMBEDDED {
// Plug a node into this expression context. Call this function in tail
// position in the Visit functions for expressions.
- virtual void ProduceValue(Node* value) = 0;
+ virtual void ProduceValue(Expression* expr, Node* value) = 0;
// Unplugs a node from this expression context. Call this to retrieve the
// result of another Visit function that already plugged the context.
virtual Node* ConsumeValue() = 0;
// Shortcut for "context->ProduceValue(context->ConsumeValue())".
- void ReplaceValue() { ProduceValue(ConsumeValue()); }
+ void ReplaceValue(Expression* expr) { ProduceValue(expr, ConsumeValue()); }
protected:
AstContext(AstGraphBuilder* owner, Expression::Context kind);
@@ -75,7 +75,7 @@ class AstGraphBuilder::AstEffectContext final : public AstContext {
explicit AstEffectContext(AstGraphBuilder* owner)
: AstContext(owner, Expression::kEffect) {}
~AstEffectContext() final;
- void ProduceValue(Node* value) final;
+ void ProduceValue(Expression* expr, Node* value) final;
Node* ConsumeValue() final;
};
@@ -86,7 +86,7 @@ class AstGraphBuilder::AstValueContext final : public AstContext {
explicit AstValueContext(AstGraphBuilder* owner)
: AstContext(owner, Expression::kValue) {}
~AstValueContext() final;
- void ProduceValue(Node* value) final;
+ void ProduceValue(Expression* expr, Node* value) final;
Node* ConsumeValue() final;
};
@@ -97,7 +97,7 @@ class AstGraphBuilder::AstTestContext final : public AstContext {
AstTestContext(AstGraphBuilder* owner, TypeFeedbackId feedback_id)
: AstContext(owner, Expression::kTest), feedback_id_(feedback_id) {}
~AstTestContext() final;
- void ProduceValue(Node* value) final;
+ void ProduceValue(Expression* expr, Node* value) final;
Node* ConsumeValue() final;
private:
@@ -178,14 +178,14 @@ class AstGraphBuilder::ControlScope BASE_EMBEDDED {
// Interface to execute a given command in this scope. Returning {true} here
// indicates successful execution whereas {false} requests to skip scope.
- virtual bool Execute(Command cmd, Statement* target, Node* value) {
+ virtual bool Execute(Command cmd, Statement* target, Node** value) {
// For function-level control.
switch (cmd) {
case CMD_THROW:
- builder()->BuildThrow(value);
+ builder()->BuildThrow(*value);
return true;
case CMD_RETURN:
- builder()->BuildReturn(value);
+ builder()->BuildReturn(*value);
return true;
case CMD_BREAK:
case CMD_CONTINUE:
@@ -281,9 +281,8 @@ class AstGraphBuilder::ControlScope::DeferredCommands : public ZoneObject {
return NewPathToken(TokenDispenserForFinally::kFallThroughToken);
}
Node* NewPathDispatchCondition(Node* t1, Node* t2) {
- // TODO(mstarzinger): This should be machine()->WordEqual(), but our Phi
- // nodes all have kRepTagged|kTypeAny, which causes representation mismatch.
- return owner_->NewNode(owner_->javascript()->StrictEqual(), t1, t2);
+ return owner_->NewNode(
+ owner_->javascript()->StrictEqual(CompareOperationHint::kAny), t1, t2);
}
private:
@@ -303,7 +302,7 @@ class AstGraphBuilder::ControlScopeForBreakable : public ControlScope {
: ControlScope(owner), target_(target), control_(control) {}
protected:
- bool Execute(Command cmd, Statement* target, Node* value) override {
+ bool Execute(Command cmd, Statement* target, Node** value) override {
if (target != target_) return false; // We are not the command target.
switch (cmd) {
case CMD_BREAK:
@@ -331,8 +330,11 @@ class AstGraphBuilder::ControlScopeForIteration : public ControlScope {
: ControlScope(owner), target_(target), control_(control) {}
protected:
- bool Execute(Command cmd, Statement* target, Node* value) override {
- if (target != target_) return false; // We are not the command target.
+ bool Execute(Command cmd, Statement* target, Node** value) override {
+ if (target != target_) {
+ control_->ExitLoop(value);
+ return false;
+ }
switch (cmd) {
case CMD_BREAK:
control_->Break();
@@ -356,21 +358,20 @@ class AstGraphBuilder::ControlScopeForIteration : public ControlScope {
// Control scope implementation for a TryCatchStatement.
class AstGraphBuilder::ControlScopeForCatch : public ControlScope {
public:
- ControlScopeForCatch(AstGraphBuilder* owner, TryCatchBuilder* control)
+ ControlScopeForCatch(AstGraphBuilder* owner, TryCatchStatement* stmt,
+ TryCatchBuilder* control)
: ControlScope(owner), control_(control) {
builder()->try_nesting_level_++; // Increment nesting.
- builder()->try_catch_nesting_level_++;
}
~ControlScopeForCatch() {
builder()->try_nesting_level_--; // Decrement nesting.
- builder()->try_catch_nesting_level_--;
}
protected:
- bool Execute(Command cmd, Statement* target, Node* value) override {
+ bool Execute(Command cmd, Statement* target, Node** value) override {
switch (cmd) {
case CMD_THROW:
- control_->Throw(value);
+ control_->Throw(*value);
return true;
case CMD_BREAK:
case CMD_CONTINUE:
@@ -388,8 +389,8 @@ class AstGraphBuilder::ControlScopeForCatch : public ControlScope {
// Control scope implementation for a TryFinallyStatement.
class AstGraphBuilder::ControlScopeForFinally : public ControlScope {
public:
- ControlScopeForFinally(AstGraphBuilder* owner, DeferredCommands* commands,
- TryFinallyBuilder* control)
+ ControlScopeForFinally(AstGraphBuilder* owner, TryFinallyStatement* stmt,
+ DeferredCommands* commands, TryFinallyBuilder* control)
: ControlScope(owner), commands_(commands), control_(control) {
builder()->try_nesting_level_++; // Increment nesting.
}
@@ -398,9 +399,9 @@ class AstGraphBuilder::ControlScopeForFinally : public ControlScope {
}
protected:
- bool Execute(Command cmd, Statement* target, Node* value) override {
- Node* token = commands_->RecordCommand(cmd, target, value);
- control_->LeaveTry(token, value);
+ bool Execute(Command cmd, Statement* target, Node** value) override {
+ Node* token = commands_->RecordCommand(cmd, target, *value);
+ control_->LeaveTry(token, *value);
return true;
}
@@ -410,52 +411,6 @@ class AstGraphBuilder::ControlScopeForFinally : public ControlScope {
};
-// Helper for generating before and after frame states.
-class AstGraphBuilder::FrameStateBeforeAndAfter {
- public:
- FrameStateBeforeAndAfter(AstGraphBuilder* builder, BailoutId id_before)
- : builder_(builder), frame_state_before_(nullptr) {
- frame_state_before_ = id_before == BailoutId::None()
- ? builder_->jsgraph()->EmptyFrameState()
- : builder_->environment()->Checkpoint(id_before);
- }
-
- void AddToNode(
- Node* node, BailoutId id_after,
- OutputFrameStateCombine combine = OutputFrameStateCombine::Ignore()) {
- int count = OperatorProperties::GetFrameStateInputCount(node->op());
- DCHECK_LE(count, 2);
-
- if (count >= 1) {
- // Add the frame state for after the operation.
- DCHECK_EQ(IrOpcode::kDead,
- NodeProperties::GetFrameStateInput(node, 0)->opcode());
-
- bool node_has_exception = NodeProperties::IsExceptionalCall(node);
-
- Node* frame_state_after =
- id_after == BailoutId::None()
- ? builder_->jsgraph()->EmptyFrameState()
- : builder_->environment()->Checkpoint(id_after, combine,
- node_has_exception);
-
- NodeProperties::ReplaceFrameStateInput(node, 0, frame_state_after);
- }
-
- if (count >= 2) {
- // Add the frame state for before the operation.
- DCHECK_EQ(IrOpcode::kDead,
- NodeProperties::GetFrameStateInput(node, 1)->opcode());
- NodeProperties::ReplaceFrameStateInput(node, 1, frame_state_before_);
- }
- }
-
- private:
- AstGraphBuilder* builder_;
- Node* frame_state_before_;
-};
-
-
AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
JSGraph* jsgraph, LoopAssignmentAnalysis* loop,
TypeHintAnalysis* type_hint_analysis)
@@ -468,7 +423,6 @@ AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
globals_(0, local_zone),
execution_control_(nullptr),
execution_context_(nullptr),
- try_catch_nesting_level_(0),
try_nesting_level_(0),
input_buffer_size_(0),
input_buffer_(nullptr),
@@ -486,12 +440,18 @@ AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
Node* AstGraphBuilder::GetFunctionClosureForContext() {
- Scope* closure_scope = current_scope()->ClosureScope();
+ DeclarationScope* closure_scope = current_scope()->GetClosureScope();
if (closure_scope->is_script_scope() ||
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function as
// their closure, not the anonymous closure containing the global code.
return BuildLoadNativeContextField(Context::CLOSURE_INDEX);
+ } else if (closure_scope->is_eval_scope()) {
+ // Contexts nested inside eval code have the same closure as the context
+ // calling eval, not the anonymous closure containing the eval code.
+ const Operator* op =
+ javascript()->LoadContext(0, Context::CLOSURE_INDEX, false);
+ return NewNode(op, current_context());
} else {
DCHECK(closure_scope->is_function_scope());
return GetFunctionClosure();
@@ -533,9 +493,21 @@ Node* AstGraphBuilder::GetNewTarget() {
return new_target_.get();
}
+Node* AstGraphBuilder::GetEmptyFrameState() {
+ if (!empty_frame_state_.is_set()) {
+ const Operator* op = common()->FrameState(
+ BailoutId::None(), OutputFrameStateCombine::Ignore(), nullptr);
+ Node* node = graph()->NewNode(
+ op, jsgraph()->EmptyStateValues(), jsgraph()->EmptyStateValues(),
+ jsgraph()->EmptyStateValues(), jsgraph()->NoContextConstant(),
+ jsgraph()->UndefinedConstant(), graph()->start());
+ empty_frame_state_.set(node);
+ }
+ return empty_frame_state_.get();
+}
bool AstGraphBuilder::CreateGraph(bool stack_check) {
- Scope* scope = info()->scope();
+ DeclarationScope* scope = info()->scope();
DCHECK_NOT_NULL(graph());
// Set up the basic structure of the graph. Outputs for {Start} are the formal
@@ -568,7 +540,7 @@ bool AstGraphBuilder::CreateGraph(bool stack_check) {
}
// Build local context only if there are context allocated variables.
- if (info()->num_heap_slots() > 0) {
+ if (scope->num_heap_slots() > 0) {
// Push a new inner context scope for the current activation.
Node* inner_context = BuildLocalActivationContext(GetFunctionContext());
ContextScope top_context(this, scope, inner_context);
@@ -595,7 +567,7 @@ bool AstGraphBuilder::CreateGraph(bool stack_check) {
void AstGraphBuilder::CreateGraphBody(bool stack_check) {
- Scope* scope = info()->scope();
+ DeclarationScope* scope = info()->scope();
// Build the arguments object if it is used.
BuildArgumentsObject(scope->arguments());
@@ -661,8 +633,8 @@ static BailoutId BeforeId(VariableProxy* proxy) {
: BailoutId::None();
}
-
-static const char* GetDebugParameterName(Zone* zone, Scope* scope, int index) {
+static const char* GetDebugParameterName(Zone* zone, DeclarationScope* scope,
+ int index) {
#if DEBUG
const AstRawString* name = scope->parameter(index)->raw_name();
if (name && name->length() > 0) {
@@ -675,9 +647,8 @@ static const char* GetDebugParameterName(Zone* zone, Scope* scope, int index) {
return nullptr;
}
-
AstGraphBuilder::Environment::Environment(AstGraphBuilder* builder,
- Scope* scope,
+ DeclarationScope* scope,
Node* control_dependency)
: builder_(builder),
parameters_count_(scope->num_parameters() + 1),
@@ -869,7 +840,7 @@ Node* AstGraphBuilder::Environment::Checkpoint(BailoutId ast_id,
OutputFrameStateCombine combine,
bool owner_has_exception) {
if (!builder()->info()->is_deoptimization_enabled()) {
- return builder()->jsgraph()->EmptyFrameState();
+ return builder()->GetEmptyFrameState();
}
UpdateStateValues(&parameters_node_, 0, parameters_count());
@@ -900,6 +871,34 @@ Node* AstGraphBuilder::Environment::Checkpoint(BailoutId ast_id,
return result;
}
+void AstGraphBuilder::Environment::PrepareForLoopExit(
+ Node* loop, BitVector* assigned_variables) {
+ if (IsMarkedAsUnreachable()) return;
+
+ DCHECK_EQ(loop->opcode(), IrOpcode::kLoop);
+
+ Node* control = GetControlDependency();
+
+ // Create the loop exit node.
+ Node* loop_exit = graph()->NewNode(common()->LoopExit(), control, loop);
+ UpdateControlDependency(loop_exit);
+
+ // Rename the environmnent values.
+ for (size_t i = 0; i < values()->size(); i++) {
+ if (assigned_variables == nullptr ||
+ static_cast<int>(i) >= assigned_variables->length() ||
+ assigned_variables->Contains(static_cast<int>(i))) {
+ Node* rename = graph()->NewNode(common()->LoopExitValue(), (*values())[i],
+ loop_exit);
+ (*values())[i] = rename;
+ }
+ }
+
+ // Rename the effect.
+ Node* effect_rename = graph()->NewNode(common()->LoopExitEffect(),
+ GetEffectDependency(), loop_exit);
+ UpdateEffectDependency(effect_rename);
+}
bool AstGraphBuilder::Environment::IsLivenessAnalysisEnabled() {
return FLAG_analyze_environment_liveness &&
@@ -942,19 +941,22 @@ AstGraphBuilder::AstTestContext::~AstTestContext() {
DCHECK(environment()->stack_height() == original_height_ + 1);
}
-
-void AstGraphBuilder::AstEffectContext::ProduceValue(Node* value) {
+void AstGraphBuilder::AstEffectContext::ProduceValue(Expression* expr,
+ Node* value) {
// The value is ignored.
+ owner()->PrepareEagerCheckpoint(expr->id());
}
-
-void AstGraphBuilder::AstValueContext::ProduceValue(Node* value) {
+void AstGraphBuilder::AstValueContext::ProduceValue(Expression* expr,
+ Node* value) {
environment()->Push(value);
+ owner()->PrepareEagerCheckpoint(expr->id());
}
-
-void AstGraphBuilder::AstTestContext::ProduceValue(Node* value) {
+void AstGraphBuilder::AstTestContext::ProduceValue(Expression* expr,
+ Node* value) {
environment()->Push(owner()->BuildToBoolean(value, feedback_id_));
+ owner()->PrepareEagerCheckpoint(expr->id());
}
@@ -989,7 +991,7 @@ void AstGraphBuilder::ControlScope::PerformCommand(Command command,
while (current != nullptr) {
environment()->TrimStack(current->stack_height());
environment()->TrimContextChain(current->context_length());
- if (current->Execute(command, target, value)) break;
+ if (current->Execute(command, target, &value)) break;
current = current->outer_;
}
builder()->set_environment(env);
@@ -1043,9 +1045,9 @@ void AstGraphBuilder::VisitForValues(ZoneList<Expression*>* exprs) {
void AstGraphBuilder::VisitForValue(Expression* expr) {
AstValueContext for_value(this);
if (!CheckStackOverflow()) {
- expr->Accept(this);
+ VisitNoStackOverflowCheck(expr);
} else {
- ast_context()->ProduceValue(jsgraph()->UndefinedConstant());
+ ast_context()->ProduceValue(expr, jsgraph()->UndefinedConstant());
}
}
@@ -1053,9 +1055,9 @@ void AstGraphBuilder::VisitForValue(Expression* expr) {
void AstGraphBuilder::VisitForEffect(Expression* expr) {
AstEffectContext for_effect(this);
if (!CheckStackOverflow()) {
- expr->Accept(this);
+ VisitNoStackOverflowCheck(expr);
} else {
- ast_context()->ProduceValue(jsgraph()->UndefinedConstant());
+ ast_context()->ProduceValue(expr, jsgraph()->UndefinedConstant());
}
}
@@ -1063,9 +1065,9 @@ void AstGraphBuilder::VisitForEffect(Expression* expr) {
void AstGraphBuilder::VisitForTest(Expression* expr) {
AstTestContext for_condition(this, expr->test_id());
if (!CheckStackOverflow()) {
- expr->Accept(this);
+ VisitNoStackOverflowCheck(expr);
} else {
- ast_context()->ProduceValue(jsgraph()->UndefinedConstant());
+ ast_context()->ProduceValue(expr, jsgraph()->UndefinedConstant());
}
}
@@ -1073,43 +1075,49 @@ void AstGraphBuilder::VisitForTest(Expression* expr) {
void AstGraphBuilder::Visit(Expression* expr) {
// Reuses enclosing AstContext.
if (!CheckStackOverflow()) {
- expr->Accept(this);
+ VisitNoStackOverflowCheck(expr);
} else {
- ast_context()->ProduceValue(jsgraph()->UndefinedConstant());
+ ast_context()->ProduceValue(expr, jsgraph()->UndefinedConstant());
}
}
void AstGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) {
Variable* variable = decl->proxy()->var();
- VariableMode mode = decl->mode();
- bool hole_init = mode == CONST || mode == CONST_LEGACY || mode == LET;
switch (variable->location()) {
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
- Handle<Oddball> value = variable->binding_needs_init()
- ? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value();
- globals()->push_back(variable->name());
- globals()->push_back(value);
+ DCHECK(!variable->binding_needs_init());
+ FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals()->push_back(handle(Smi::FromInt(slot.ToInt()), isolate()));
+ globals()->push_back(isolate()->factory()->undefined_value());
break;
}
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL:
- if (hole_init) {
+ if (variable->binding_needs_init()) {
Node* value = jsgraph()->TheHoleConstant();
environment()->Bind(variable, value);
}
break;
case VariableLocation::CONTEXT:
- if (hole_init) {
+ if (variable->binding_needs_init()) {
Node* value = jsgraph()->TheHoleConstant();
const Operator* op = javascript()->StoreContext(0, variable->index());
NewNode(op, current_context(), value);
}
break;
- case VariableLocation::LOOKUP:
- UNIMPLEMENTED();
+ case VariableLocation::LOOKUP: {
+ DCHECK(!variable->binding_needs_init());
+ Node* name = jsgraph()->Constant(variable->name());
+ const Operator* op = javascript()->CallRuntime(Runtime::kDeclareEvalVar);
+ Node* store = NewNode(op, name);
+ PrepareFrameState(store, decl->proxy()->id());
+ break;
+ }
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
}
@@ -1123,7 +1131,9 @@ void AstGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* decl) {
decl->fun(), info()->script(), info());
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
- globals()->push_back(variable->name());
+ FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals()->push_back(handle(Smi::FromInt(slot.ToInt()), isolate()));
globals()->push_back(function);
break;
}
@@ -1141,22 +1151,22 @@ void AstGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* decl) {
NewNode(op, current_context(), value);
break;
}
- case VariableLocation::LOOKUP:
- UNIMPLEMENTED();
+ case VariableLocation::LOOKUP: {
+ VisitForValue(decl->fun());
+ Node* value = environment()->Pop();
+ Node* name = jsgraph()->Constant(variable->name());
+ const Operator* op =
+ javascript()->CallRuntime(Runtime::kDeclareEvalFunction);
+ Node* store = NewNode(op, name, value);
+ PrepareFrameState(store, decl->proxy()->id());
+ break;
+ }
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
}
-void AstGraphBuilder::VisitImportDeclaration(ImportDeclaration* decl) {
- UNREACHABLE();
-}
-
-
-void AstGraphBuilder::VisitExportDeclaration(ExportDeclaration* decl) {
- UNREACHABLE();
-}
-
-
void AstGraphBuilder::VisitBlock(Block* stmt) {
BlockBuilder block(this);
ControlScopeForBreakable scope(this, stmt, &block);
@@ -1262,7 +1272,15 @@ void AstGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
VisitForValue(clause->label());
Node* label = environment()->Pop();
Node* tag = environment()->Top();
- const Operator* op = javascript()->StrictEqual();
+
+ CompareOperationHint hint;
+ if (!type_hint_analysis_ ||
+ !type_hint_analysis_->GetCompareOperationHint(clause->CompareId(),
+ &hint)) {
+ hint = CompareOperationHint::kAny;
+ }
+
+ const Operator* op = javascript()->StrictEqual(hint);
Node* condition = NewNode(op, tag, label);
compare_switch.BeginLabel(i, condition);
@@ -1292,7 +1310,7 @@ void AstGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
void AstGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
LoopBuilder while_loop(this);
while_loop.BeginLoop(GetVariablesAssignedInLoop(stmt), CheckOsrEntry(stmt));
- VisitIterationBody(stmt, &while_loop);
+ VisitIterationBody(stmt, &while_loop, stmt->StackCheckId());
while_loop.EndBody();
VisitForTest(stmt->cond());
Node* condition = environment()->Pop();
@@ -1307,7 +1325,7 @@ void AstGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
VisitForTest(stmt->cond());
Node* condition = environment()->Pop();
while_loop.BreakUnless(condition);
- VisitIterationBody(stmt, &while_loop);
+ VisitIterationBody(stmt, &while_loop, stmt->StackCheckId());
while_loop.EndBody();
while_loop.EndLoop();
}
@@ -1324,7 +1342,7 @@ void AstGraphBuilder::VisitForStatement(ForStatement* stmt) {
} else {
for_loop.BreakUnless(jsgraph()->TrueConstant());
}
- VisitIterationBody(stmt, &for_loop);
+ VisitIterationBody(stmt, &for_loop, stmt->StackCheckId());
for_loop.EndBody();
VisitIfNotNull(stmt->next());
for_loop.EndLoop();
@@ -1338,10 +1356,12 @@ void AstGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
for_block.BeginBlock();
// Check for null or undefined before entering loop.
Node* is_null_cond =
- NewNode(javascript()->StrictEqual(), object, jsgraph()->NullConstant());
+ NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), object,
+ jsgraph()->NullConstant());
for_block.BreakWhen(is_null_cond, BranchHint::kFalse);
- Node* is_undefined_cond = NewNode(javascript()->StrictEqual(), object,
- jsgraph()->UndefinedConstant());
+ Node* is_undefined_cond =
+ NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), object,
+ jsgraph()->UndefinedConstant());
for_block.BreakWhen(is_undefined_cond, BranchHint::kFalse);
{
// Convert object to jsobject.
@@ -1384,24 +1404,28 @@ void AstGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
PrepareFrameState(value, stmt->FilterId(),
OutputFrameStateCombine::Push());
IfBuilder test_value(this);
- Node* test_value_cond = NewNode(javascript()->StrictEqual(), value,
- jsgraph()->UndefinedConstant());
+ Node* test_value_cond =
+ NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), value,
+ jsgraph()->UndefinedConstant());
test_value.If(test_value_cond, BranchHint::kFalse);
test_value.Then();
test_value.Else();
{
+ environment()->Push(value);
+ PrepareEagerCheckpoint(stmt->FilterId());
+ value = environment()->Pop();
// Bind value and do loop body.
VectorSlotPair feedback =
CreateVectorSlotPair(stmt->EachFeedbackSlot());
- VisitForInAssignment(stmt->each(), value, feedback, stmt->FilterId(),
+ VisitForInAssignment(stmt->each(), value, feedback,
stmt->AssignmentId());
- VisitIterationBody(stmt, &for_loop);
+ VisitIterationBody(stmt, &for_loop, stmt->StackCheckId());
}
test_value.End();
- index = environment()->Peek(0);
for_loop.EndBody();
// Increment counter and continue.
+ index = environment()->Peek(0);
index = NewNode(javascript()->ForInStep(), index);
environment()->Poke(0, index);
}
@@ -1421,7 +1445,7 @@ void AstGraphBuilder::VisitForOfStatement(ForOfStatement* stmt) {
Node* condition = environment()->Pop();
for_loop.BreakWhen(condition);
VisitForEffect(stmt->assign_each());
- VisitIterationBody(stmt, &for_loop);
+ VisitIterationBody(stmt, &for_loop, stmt->StackCheckId());
for_loop.EndBody();
for_loop.EndLoop();
}
@@ -1434,7 +1458,7 @@ void AstGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
// that is intercepting 'throw' control commands.
try_control.BeginTry();
{
- ControlScopeForCatch scope(this, &try_control);
+ ControlScopeForCatch scope(this, stmt, &try_control);
STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
environment()->Push(current_context());
Visit(stmt->try_block());
@@ -1480,7 +1504,7 @@ void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// that is intercepting all control commands.
try_control.BeginTry();
{
- ControlScopeForFinally scope(this, commands, &try_control);
+ ControlScopeForFinally scope(this, stmt, commands, &try_control);
STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
environment()->Push(current_context());
Visit(stmt->try_block());
@@ -1541,25 +1565,11 @@ void AstGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
PretenureFlag pretenure = expr->pretenure() ? TENURED : NOT_TENURED;
const Operator* op = javascript()->CreateClosure(shared_info, pretenure);
Node* value = NewNode(op);
- ast_context()->ProduceValue(value);
+ ast_context()->ProduceValue(expr, value);
}
void AstGraphBuilder::VisitClassLiteral(ClassLiteral* expr) {
- // Visit declarations and class literal in a block scope.
- if (expr->scope()->ContextLocalCount() > 0) {
- Node* context = BuildLocalBlockContext(expr->scope());
- ContextScope scope(this, expr->scope(), context);
- VisitDeclarations(expr->scope()->declarations());
- VisitClassLiteralContents(expr);
- } else {
- VisitDeclarations(expr->scope()->declarations());
- VisitClassLiteralContents(expr);
- }
-}
-
-
-void AstGraphBuilder::VisitClassLiteralContents(ClassLiteral* expr) {
VisitForValueOrTheHole(expr->extends());
VisitForValue(expr->constructor());
@@ -1575,12 +1585,12 @@ void AstGraphBuilder::VisitClassLiteralContents(ClassLiteral* expr) {
environment()->Push(literal);
// Load the "prototype" from the constructor.
- FrameStateBeforeAndAfter states(this, expr->CreateLiteralId());
+ PrepareEagerCheckpoint(expr->CreateLiteralId());
Handle<Name> name = isolate()->factory()->prototype_string();
VectorSlotPair pair = CreateVectorSlotPair(expr->PrototypeSlot());
Node* prototype = BuildNamedLoad(literal, name, pair);
- states.AddToNode(prototype, expr->PrototypeId(),
- OutputFrameStateCombine::Push());
+ PrepareFrameState(prototype, expr->PrototypeId(),
+ OutputFrameStateCombine::Push());
environment()->Push(prototype);
// Create nodes to store method values into the literal.
@@ -1620,7 +1630,8 @@ void AstGraphBuilder::VisitClassLiteralContents(ClassLiteral* expr) {
jsgraph()->Constant(property->NeedsSetFunctionName());
const Operator* op =
javascript()->CallRuntime(Runtime::kDefineDataPropertyInLiteral);
- NewNode(op, receiver, key, value, attr, set_function_name);
+ Node* call = NewNode(op, receiver, key, value, attr, set_function_name);
+ PrepareFrameState(call, BailoutId::None());
break;
}
case ObjectLiteral::Property::GETTER: {
@@ -1640,24 +1651,22 @@ void AstGraphBuilder::VisitClassLiteralContents(ClassLiteral* expr) {
}
}
- // Set both the prototype and constructor to have fast properties.
+ // Set the constructor to have fast properties.
prototype = environment()->Pop();
literal = environment()->Pop();
- const Operator* op =
- javascript()->CallRuntime(Runtime::kFinalizeClassDefinition);
- literal = NewNode(op, literal, prototype);
+ const Operator* op = javascript()->CallRuntime(Runtime::kToFastProperties);
+ literal = NewNode(op, literal);
// Assign to class variable.
if (expr->class_variable_proxy() != nullptr) {
Variable* var = expr->class_variable_proxy()->var();
- FrameStateBeforeAndAfter states(this, BailoutId::None());
VectorSlotPair feedback = CreateVectorSlotPair(
expr->NeedsProxySlot() ? expr->ProxySlot()
: FeedbackVectorSlot::Invalid());
BuildVariableAssignment(var, literal, Token::INIT, feedback,
- BailoutId::None(), states);
+ BailoutId::None());
}
- ast_context()->ProduceValue(literal);
+ ast_context()->ProduceValue(expr, literal);
}
@@ -1669,7 +1678,7 @@ void AstGraphBuilder::VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
void AstGraphBuilder::VisitDoExpression(DoExpression* expr) {
VisitBlock(expr->block());
VisitVariableProxy(expr->result());
- ast_context()->ReplaceValue();
+ ast_context()->ReplaceValue(expr);
}
@@ -1683,22 +1692,26 @@ void AstGraphBuilder::VisitConditional(Conditional* expr) {
compare_if.Else();
Visit(expr->else_expression());
compare_if.End();
- ast_context()->ReplaceValue();
+ // Skip plugging AST evaluation contexts of the test kind. This is to stay in
+ // sync with full codegen which doesn't prepare the proper bailout point (see
+ // the implementation of FullCodeGenerator::VisitForControl).
+ if (ast_context()->IsTest()) return;
+ ast_context()->ReplaceValue(expr);
}
void AstGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
VectorSlotPair pair = CreateVectorSlotPair(expr->VariableFeedbackSlot());
- FrameStateBeforeAndAfter states(this, BeforeId(expr));
- Node* value = BuildVariableLoad(expr->var(), expr->id(), states, pair,
+ PrepareEagerCheckpoint(BeforeId(expr));
+ Node* value = BuildVariableLoad(expr->var(), expr->id(), pair,
ast_context()->GetStateCombine());
- ast_context()->ProduceValue(value);
+ ast_context()->ProduceValue(expr, value);
}
void AstGraphBuilder::VisitLiteral(Literal* expr) {
Node* value = jsgraph()->Constant(expr->value());
- ast_context()->ProduceValue(value);
+ ast_context()->ProduceValue(expr, value);
}
@@ -1710,7 +1723,7 @@ void AstGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
expr->pattern(), expr->flags(), expr->literal_index());
Node* literal = NewNode(op, closure);
PrepareFrameState(literal, expr->id(), ast_context()->GetStateCombine());
- ast_context()->ProduceValue(literal);
+ ast_context()->ProduceValue(expr, literal);
}
@@ -1747,18 +1760,18 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::COMPUTED: {
// It is safe to use [[Put]] here because the boilerplate already
// contains computed properties with an uninitialized value.
- if (key->value()->IsInternalizedString()) {
+ if (key->IsStringLiteral()) {
+ DCHECK(key->IsPropertyName());
if (property->emit_store()) {
VisitForValue(property->value());
- FrameStateBeforeAndAfter states(this, property->value()->id());
Node* value = environment()->Pop();
Node* literal = environment()->Top();
Handle<Name> name = key->AsPropertyName();
VectorSlotPair feedback =
CreateVectorSlotPair(property->GetSlot(0));
Node* store = BuildNamedStore(literal, name, value, feedback);
- states.AddToNode(store, key->id(),
- OutputFrameStateCombine::Ignore());
+ PrepareFrameState(store, key->id(),
+ OutputFrameStateCombine::Ignore());
BuildSetHomeObject(value, literal, property, 1);
} else {
VisitForEffect(property->value());
@@ -1797,12 +1810,16 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
}
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->getter = property;
+ AccessorTable::Iterator it = accessor_table.lookup(key);
+ it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->setter = property;
+ AccessorTable::Iterator it = accessor_table.lookup(key);
+ it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->setter = property;
}
break;
}
@@ -1823,8 +1840,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
const Operator* op =
javascript()->CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
Node* call = NewNode(op, literal, name, getter, setter, attr);
- // This should not lazy deopt on a new literal.
- PrepareFrameState(call, BailoutId::None());
+ PrepareFrameState(call, it->second->bailout_id);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1865,12 +1881,14 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::CONSTANT:
case ObjectLiteral::Property::COMPUTED:
case ObjectLiteral::Property::MATERIALIZED_LITERAL: {
+ if (!property->emit_store()) continue;
Node* attr = jsgraph()->Constant(NONE);
Node* set_function_name =
jsgraph()->Constant(property->NeedsSetFunctionName());
const Operator* op =
javascript()->CallRuntime(Runtime::kDefineDataPropertyInLiteral);
- NewNode(op, receiver, key, value, attr, set_function_name);
+ Node* call = NewNode(op, receiver, key, value, attr, set_function_name);
+ PrepareFrameState(call, expr->GetIdForPropertySet(property_index));
break;
}
case ObjectLiteral::Property::PROTOTYPE:
@@ -1895,7 +1913,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
}
}
- ast_context()->ProduceValue(environment()->Pop());
+ ast_context()->ProduceValue(expr, environment()->Pop());
}
@@ -1934,16 +1952,13 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
VisitForValue(subexpr);
- {
- FrameStateBeforeAndAfter states(this, subexpr->id());
- VectorSlotPair pair = CreateVectorSlotPair(expr->LiteralFeedbackSlot());
- Node* value = environment()->Pop();
- Node* index = jsgraph()->Constant(array_index);
- Node* literal = environment()->Top();
- Node* store = BuildKeyedStore(literal, index, value, pair);
- states.AddToNode(store, expr->GetIdForElement(array_index),
- OutputFrameStateCombine::Ignore());
- }
+ VectorSlotPair pair = CreateVectorSlotPair(expr->LiteralFeedbackSlot());
+ Node* value = environment()->Pop();
+ Node* index = jsgraph()->Constant(array_index);
+ Node* literal = environment()->Top();
+ Node* store = BuildKeyedStore(literal, index, value, pair);
+ PrepareFrameState(store, expr->GetIdForElement(array_index),
+ OutputFrameStateCombine::Ignore());
}
// In case the array literal contains spread expressions it has two parts. The
@@ -1966,14 +1981,12 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
}
}
- ast_context()->ProduceValue(environment()->Pop());
+ ast_context()->ProduceValue(expr, environment()->Pop());
}
-
void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value,
const VectorSlotPair& feedback,
- BailoutId bailout_id_before,
- BailoutId bailout_id_after) {
+ BailoutId bailout_id) {
DCHECK(expr->IsValidReferenceExpressionOrThis());
// Left-hand side can only be a property, a global or a variable slot.
@@ -1984,50 +1997,40 @@ void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value,
switch (assign_type) {
case VARIABLE: {
Variable* var = expr->AsVariableProxy()->var();
- environment()->Push(value);
- FrameStateBeforeAndAfter states(this, bailout_id_before);
- value = environment()->Pop();
- BuildVariableAssignment(var, value, Token::ASSIGN, feedback,
- bailout_id_after, states);
+ BuildVariableAssignment(var, value, Token::ASSIGN, feedback, bailout_id);
break;
}
case NAMED_PROPERTY: {
environment()->Push(value);
VisitForValue(property->obj());
- FrameStateBeforeAndAfter states(this, property->obj()->id());
Node* object = environment()->Pop();
value = environment()->Pop();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
Node* store = BuildNamedStore(object, name, value, feedback);
- states.AddToNode(store, bailout_id_after,
- OutputFrameStateCombine::Ignore());
+ PrepareFrameState(store, bailout_id, OutputFrameStateCombine::Ignore());
break;
}
case KEYED_PROPERTY: {
environment()->Push(value);
VisitForValue(property->obj());
VisitForValue(property->key());
- FrameStateBeforeAndAfter states(this, property->key()->id());
Node* key = environment()->Pop();
Node* object = environment()->Pop();
value = environment()->Pop();
Node* store = BuildKeyedStore(object, key, value, feedback);
- states.AddToNode(store, bailout_id_after,
- OutputFrameStateCombine::Ignore());
+ PrepareFrameState(store, bailout_id, OutputFrameStateCombine::Ignore());
break;
}
case NAMED_SUPER_PROPERTY: {
environment()->Push(value);
VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
- FrameStateBeforeAndAfter states(this, property->obj()->id());
Node* home_object = environment()->Pop();
Node* receiver = environment()->Pop();
value = environment()->Pop();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
- states.AddToNode(store, bailout_id_after,
- OutputFrameStateCombine::Ignore());
+ PrepareFrameState(store, bailout_id, OutputFrameStateCombine::Ignore());
break;
}
case KEYED_SUPER_PROPERTY: {
@@ -2035,14 +2038,12 @@ void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value,
VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
VisitForValue(property->key());
- FrameStateBeforeAndAfter states(this, property->key()->id());
Node* key = environment()->Pop();
Node* home_object = environment()->Pop();
Node* receiver = environment()->Pop();
value = environment()->Pop();
Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
- states.AddToNode(store, bailout_id_after,
- OutputFrameStateCombine::Ignore());
+ PrepareFrameState(store, bailout_id, OutputFrameStateCombine::Ignore());
break;
}
}
@@ -2086,7 +2087,6 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
break;
}
- BailoutId before_store_id = BailoutId::None();
// Evaluate the value and potentially handle compound assignments by loading
// the left-hand side value and performing a binary operation.
if (expr->is_compound()) {
@@ -2096,10 +2096,9 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
VariableProxy* proxy = expr->target()->AsVariableProxy();
VectorSlotPair pair =
CreateVectorSlotPair(proxy->VariableFeedbackSlot());
- FrameStateBeforeAndAfter states(this, BeforeId(proxy));
- old_value =
- BuildVariableLoad(proxy->var(), expr->target()->id(), states, pair,
- OutputFrameStateCombine::Push());
+ PrepareEagerCheckpoint(BeforeId(proxy));
+ old_value = BuildVariableLoad(proxy->var(), expr->target()->id(), pair,
+ OutputFrameStateCombine::Push());
break;
}
case NAMED_PROPERTY: {
@@ -2107,10 +2106,9 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
- FrameStateBeforeAndAfter states(this, property->obj()->id());
old_value = BuildNamedLoad(object, name, pair);
- states.AddToNode(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ PrepareFrameState(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
break;
}
case KEYED_PROPERTY: {
@@ -2118,10 +2116,9 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
Node* object = environment()->Peek(1);
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
- FrameStateBeforeAndAfter states(this, property->key()->id());
old_value = BuildKeyedLoad(object, key, pair);
- states.AddToNode(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ PrepareFrameState(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
break;
}
case NAMED_SUPER_PROPERTY: {
@@ -2130,10 +2127,9 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
- FrameStateBeforeAndAfter states(this, property->obj()->id());
old_value = BuildNamedSuperLoad(receiver, home_object, name, pair);
- states.AddToNode(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ PrepareFrameState(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
break;
}
case KEYED_SUPER_PROPERTY: {
@@ -2142,38 +2138,29 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
Node* receiver = environment()->Peek(2);
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
- FrameStateBeforeAndAfter states(this, property->key()->id());
old_value = BuildKeyedSuperLoad(receiver, home_object, key, pair);
- states.AddToNode(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ PrepareFrameState(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
break;
}
}
environment()->Push(old_value);
VisitForValue(expr->value());
- Node* value;
- {
- FrameStateBeforeAndAfter states(this, expr->value()->id());
- Node* right = environment()->Pop();
- Node* left = environment()->Pop();
- value =
- BuildBinaryOp(left, right, expr->binary_op(),
- expr->binary_operation()->BinaryOperationFeedbackId());
- states.AddToNode(value, expr->binary_operation()->id(),
- OutputFrameStateCombine::Push());
- }
+ Node* right = environment()->Pop();
+ Node* left = environment()->Pop();
+ Node* value =
+ BuildBinaryOp(left, right, expr->binary_op(),
+ expr->binary_operation()->BinaryOperationFeedbackId());
+ PrepareFrameState(value, expr->binary_operation()->id(),
+ OutputFrameStateCombine::Push());
environment()->Push(value);
if (needs_frame_state_before) {
- before_store_id = expr->binary_operation()->id();
+ PrepareEagerCheckpoint(expr->binary_operation()->id());
}
} else {
VisitForValue(expr->value());
- if (needs_frame_state_before) {
- before_store_id = expr->value()->id();
- }
}
- FrameStateBeforeAndAfter store_states(this, before_store_id);
// Store the value.
Node* value = environment()->Pop();
VectorSlotPair feedback = CreateVectorSlotPair(expr->AssignmentSlot());
@@ -2181,23 +2168,23 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
case VARIABLE: {
Variable* variable = expr->target()->AsVariableProxy()->var();
BuildVariableAssignment(variable, value, expr->op(), feedback, expr->id(),
- store_states, ast_context()->GetStateCombine());
+ ast_context()->GetStateCombine());
break;
}
case NAMED_PROPERTY: {
Node* object = environment()->Pop();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
Node* store = BuildNamedStore(object, name, value, feedback);
- store_states.AddToNode(store, expr->id(),
- ast_context()->GetStateCombine());
+ PrepareFrameState(store, expr->AssignmentId(),
+ OutputFrameStateCombine::Push());
break;
}
case KEYED_PROPERTY: {
Node* key = environment()->Pop();
Node* object = environment()->Pop();
Node* store = BuildKeyedStore(object, key, value, feedback);
- store_states.AddToNode(store, expr->id(),
- ast_context()->GetStateCombine());
+ PrepareFrameState(store, expr->AssignmentId(),
+ OutputFrameStateCombine::Push());
break;
}
case NAMED_SUPER_PROPERTY: {
@@ -2205,8 +2192,7 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
Node* receiver = environment()->Pop();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
- store_states.AddToNode(store, expr->id(),
- ast_context()->GetStateCombine());
+ PrepareFrameState(store, expr->id(), ast_context()->GetStateCombine());
break;
}
case KEYED_SUPER_PROPERTY: {
@@ -2214,20 +2200,19 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
Node* home_object = environment()->Pop();
Node* receiver = environment()->Pop();
Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
- store_states.AddToNode(store, expr->id(),
- ast_context()->GetStateCombine());
+ PrepareFrameState(store, expr->id(), ast_context()->GetStateCombine());
break;
}
}
- ast_context()->ProduceValue(value);
+ ast_context()->ProduceValue(expr, value);
}
void AstGraphBuilder::VisitYield(Yield* expr) {
- // TODO(turbofan): Implement yield here.
+ // Generator functions are supported only by going through Ignition first.
SetStackOverflow();
- ast_context()->ProduceValue(jsgraph()->UndefinedConstant());
+ ast_context()->ProduceValue(expr, jsgraph()->UndefinedConstant());
}
@@ -2235,7 +2220,7 @@ void AstGraphBuilder::VisitThrow(Throw* expr) {
VisitForValue(expr->exception());
Node* exception = environment()->Pop();
Node* value = BuildThrowError(exception, expr->id());
- ast_context()->ProduceValue(value);
+ ast_context()->ProduceValue(expr, value);
}
@@ -2249,54 +2234,50 @@ void AstGraphBuilder::VisitProperty(Property* expr) {
break;
case NAMED_PROPERTY: {
VisitForValue(expr->obj());
- FrameStateBeforeAndAfter states(this, expr->obj()->id());
Node* object = environment()->Pop();
Handle<Name> name = expr->key()->AsLiteral()->AsPropertyName();
value = BuildNamedLoad(object, name, pair);
- states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+ PrepareFrameState(value, expr->LoadId(), OutputFrameStateCombine::Push());
break;
}
case KEYED_PROPERTY: {
VisitForValue(expr->obj());
VisitForValue(expr->key());
- FrameStateBeforeAndAfter states(this, expr->key()->id());
Node* key = environment()->Pop();
Node* object = environment()->Pop();
value = BuildKeyedLoad(object, key, pair);
- states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+ PrepareFrameState(value, expr->LoadId(), OutputFrameStateCombine::Push());
break;
}
case NAMED_SUPER_PROPERTY: {
VisitForValue(expr->obj()->AsSuperPropertyReference()->this_var());
VisitForValue(expr->obj()->AsSuperPropertyReference()->home_object());
- FrameStateBeforeAndAfter states(this, expr->obj()->id());
Node* home_object = environment()->Pop();
Node* receiver = environment()->Pop();
Handle<Name> name = expr->key()->AsLiteral()->AsPropertyName();
value = BuildNamedSuperLoad(receiver, home_object, name, pair);
- states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+ PrepareFrameState(value, expr->LoadId(), OutputFrameStateCombine::Push());
break;
}
case KEYED_SUPER_PROPERTY: {
VisitForValue(expr->obj()->AsSuperPropertyReference()->this_var());
VisitForValue(expr->obj()->AsSuperPropertyReference()->home_object());
VisitForValue(expr->key());
- FrameStateBeforeAndAfter states(this, expr->key()->id());
Node* key = environment()->Pop();
Node* home_object = environment()->Pop();
Node* receiver = environment()->Pop();
value = BuildKeyedSuperLoad(receiver, home_object, key, pair);
- states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
+ PrepareFrameState(value, expr->LoadId(), OutputFrameStateCombine::Push());
break;
}
}
- ast_context()->ProduceValue(value);
+ ast_context()->ProduceValue(expr, value);
}
void AstGraphBuilder::VisitCall(Call* expr) {
Expression* callee = expr->expression();
- Call::CallType call_type = expr->GetCallType(isolate());
+ Call::CallType call_type = expr->GetCallType();
// Prepare the callee and the receiver to the function call. This depends on
// the semantics of the underlying call type.
@@ -2308,10 +2289,9 @@ void AstGraphBuilder::VisitCall(Call* expr) {
case Call::GLOBAL_CALL: {
VariableProxy* proxy = callee->AsVariableProxy();
VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
- FrameStateBeforeAndAfter states(this, BeforeId(proxy));
- callee_value =
- BuildVariableLoad(proxy->var(), expr->expression()->id(), states,
- pair, OutputFrameStateCombine::Push());
+ PrepareEagerCheckpoint(BeforeId(proxy));
+ callee_value = BuildVariableLoad(proxy->var(), expr->expression()->id(),
+ pair, OutputFrameStateCombine::Push());
receiver_hint = ConvertReceiverMode::kNullOrUndefined;
receiver_value = jsgraph()->UndefinedConstant();
break;
@@ -2334,12 +2314,11 @@ void AstGraphBuilder::VisitCall(Call* expr) {
VectorSlotPair feedback =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
VisitForValue(property->obj());
- FrameStateBeforeAndAfter states(this, property->obj()->id());
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
Node* object = environment()->Top();
callee_value = BuildNamedLoad(object, name, feedback);
- states.AddToNode(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ PrepareFrameState(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
// Note that a property call requires the receiver to be wrapped into
// an object for sloppy callees. However the receiver is guaranteed
// not to be null or undefined at this point.
@@ -2353,12 +2332,11 @@ void AstGraphBuilder::VisitCall(Call* expr) {
CreateVectorSlotPair(property->PropertyFeedbackSlot());
VisitForValue(property->obj());
VisitForValue(property->key());
- FrameStateBeforeAndAfter states(this, property->key()->id());
Node* key = environment()->Pop();
Node* object = environment()->Top();
callee_value = BuildKeyedLoad(object, key, feedback);
- states.AddToNode(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ PrepareFrameState(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
// Note that a property call requires the receiver to be wrapped into
// an object for sloppy callees. However the receiver is guaranteed
// not to be null or undefined at this point.
@@ -2375,10 +2353,9 @@ void AstGraphBuilder::VisitCall(Call* expr) {
Node* home = environment()->Peek(1);
Node* object = environment()->Top();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- FrameStateBeforeAndAfter states(this, property->obj()->id());
callee_value = BuildNamedSuperLoad(object, home, name, VectorSlotPair());
- states.AddToNode(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ PrepareFrameState(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
// Note that a property call requires the receiver to be wrapped into
// an object for sloppy callees. Since the receiver is not the target of
// the load, it could very well be null or undefined at this point.
@@ -2398,10 +2375,9 @@ void AstGraphBuilder::VisitCall(Call* expr) {
Node* key = environment()->Pop();
Node* home = environment()->Pop();
Node* object = environment()->Pop();
- FrameStateBeforeAndAfter states(this, property->key()->id());
callee_value = BuildKeyedSuperLoad(object, home, key, VectorSlotPair());
- states.AddToNode(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ PrepareFrameState(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
// Note that a property call requires the receiver to be wrapped into
// an object for sloppy callees. Since the receiver is not the target of
// the load, it could very well be null or undefined at this point.
@@ -2456,11 +2432,13 @@ void AstGraphBuilder::VisitCall(Call* expr) {
// provide a fully resolved callee to patch into the environment.
Node* function = GetFunctionClosure();
Node* language = jsgraph()->Constant(language_mode());
- Node* position = jsgraph()->Constant(current_scope()->start_position());
+ Node* eval_scope_position =
+ jsgraph()->Constant(current_scope()->start_position());
+ Node* eval_position = jsgraph()->Constant(expr->position());
const Operator* op =
javascript()->CallRuntime(Runtime::kResolvePossiblyDirectEval);
- Node* new_callee =
- NewNode(op, callee, source, function, language, position);
+ Node* new_callee = NewNode(op, callee, source, function, language,
+ eval_scope_position, eval_position);
PrepareFrameState(new_callee, expr->EvalId(),
OutputFrameStateCombine::PokeAt(arg_count + 1));
@@ -2472,12 +2450,12 @@ void AstGraphBuilder::VisitCall(Call* expr) {
VectorSlotPair feedback = CreateVectorSlotPair(expr->CallFeedbackICSlot());
const Operator* call = javascript()->CallFunction(
args->length() + 2, feedback, receiver_hint, expr->tail_call_mode());
- FrameStateBeforeAndAfter states(this, expr->CallId());
+ PrepareEagerCheckpoint(possibly_eval ? expr->EvalId() : expr->CallId());
Node* value = ProcessArguments(call, args->length() + 2);
environment()->Push(value->InputAt(0)); // The callee passed to the call.
- states.AddToNode(value, expr->ReturnId(), OutputFrameStateCombine::Push());
+ PrepareFrameState(value, expr->ReturnId(), OutputFrameStateCombine::Push());
environment()->Drop(1);
- ast_context()->ProduceValue(value);
+ ast_context()->ProduceValue(expr, value);
}
@@ -2503,10 +2481,9 @@ void AstGraphBuilder::VisitCallSuper(Call* expr) {
// Create node to perform the super call.
const Operator* call =
javascript()->CallConstruct(args->length() + 2, VectorSlotPair());
- FrameStateBeforeAndAfter states(this, super->new_target_var()->id());
Node* value = ProcessArguments(call, args->length() + 2);
- states.AddToNode(value, expr->ReturnId(), OutputFrameStateCombine::Push());
- ast_context()->ProduceValue(value);
+ PrepareFrameState(value, expr->ReturnId(), OutputFrameStateCombine::Push());
+ ast_context()->ProduceValue(expr, value);
}
@@ -2517,11 +2494,6 @@ void AstGraphBuilder::VisitCallNew(CallNew* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForValues(args);
- // The baseline compiler doesn't push the new.target, so we need to record
- // the frame state before the push.
- FrameStateBeforeAndAfter states(
- this, args->is_empty() ? expr->expression()->id() : args->last()->id());
-
// The new target is the same as the callee.
environment()->Push(environment()->Peek(args->length()));
@@ -2530,8 +2502,8 @@ void AstGraphBuilder::VisitCallNew(CallNew* expr) {
const Operator* call =
javascript()->CallConstruct(args->length() + 2, feedback);
Node* value = ProcessArguments(call, args->length() + 2);
- states.AddToNode(value, expr->ReturnId(), OutputFrameStateCombine::Push());
- ast_context()->ProduceValue(value);
+ PrepareFrameState(value, expr->ReturnId(), OutputFrameStateCombine::Push());
+ ast_context()->ProduceValue(expr, value);
}
@@ -2550,10 +2522,10 @@ void AstGraphBuilder::VisitCallJSRuntime(CallRuntime* expr) {
// Create node to perform the JS runtime call.
const Operator* call = javascript()->CallFunction(args->length() + 2);
- FrameStateBeforeAndAfter states(this, expr->CallId());
+ PrepareEagerCheckpoint(expr->CallId());
Node* value = ProcessArguments(call, args->length() + 2);
- states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
- ast_context()->ProduceValue(value);
+ PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ ast_context()->ProduceValue(expr, value);
}
@@ -2571,10 +2543,13 @@ void AstGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
// Create node to perform the runtime call.
Runtime::FunctionId functionId = expr->function()->function_id;
const Operator* call = javascript()->CallRuntime(functionId, args->length());
- FrameStateBeforeAndAfter states(this, expr->CallId());
+ if (expr->function()->intrinsic_type == Runtime::IntrinsicType::RUNTIME ||
+ expr->function()->function_id == Runtime::kInlineCall) {
+ PrepareEagerCheckpoint(expr->CallId());
+ }
Node* value = ProcessArguments(call, args->length());
- states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
- ast_context()->ProduceValue(value);
+ PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ ast_context()->ProduceValue(expr, value);
}
@@ -2614,52 +2589,48 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
case VARIABLE: {
VariableProxy* proxy = expr->expression()->AsVariableProxy();
VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
- FrameStateBeforeAndAfter states(this, BeforeId(proxy));
- old_value =
- BuildVariableLoad(proxy->var(), expr->expression()->id(), states,
- pair, OutputFrameStateCombine::Push());
+ PrepareEagerCheckpoint(BeforeId(proxy));
+ old_value = BuildVariableLoad(proxy->var(), expr->expression()->id(),
+ pair, OutputFrameStateCombine::Push());
stack_depth = 0;
break;
}
case NAMED_PROPERTY: {
VisitForValue(property->obj());
- FrameStateBeforeAndAfter states(this, property->obj()->id());
Node* object = environment()->Top();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
old_value = BuildNamedLoad(object, name, pair);
- states.AddToNode(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ PrepareFrameState(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
stack_depth = 1;
break;
}
case KEYED_PROPERTY: {
VisitForValue(property->obj());
VisitForValue(property->key());
- FrameStateBeforeAndAfter states(this, property->key()->id());
Node* key = environment()->Top();
Node* object = environment()->Peek(1);
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
old_value = BuildKeyedLoad(object, key, pair);
- states.AddToNode(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ PrepareFrameState(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
stack_depth = 2;
break;
}
case NAMED_SUPER_PROPERTY: {
VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
- FrameStateBeforeAndAfter states(this, property->obj()->id());
Node* home_object = environment()->Top();
Node* receiver = environment()->Peek(1);
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
old_value = BuildNamedSuperLoad(receiver, home_object, name, pair);
- states.AddToNode(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ PrepareFrameState(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
stack_depth = 2;
break;
}
@@ -2667,15 +2638,14 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
VisitForValue(property->key());
- FrameStateBeforeAndAfter states(this, property->obj()->id());
Node* key = environment()->Top();
Node* home_object = environment()->Peek(1);
Node* receiver = environment()->Peek(2);
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
old_value = BuildKeyedSuperLoad(receiver, home_object, key, pair);
- states.AddToNode(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ PrepareFrameState(old_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
stack_depth = 3;
break;
}
@@ -2688,7 +2658,7 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
// Create a proper eager frame state for the stores.
environment()->Push(old_value);
- FrameStateBeforeAndAfter store_states(this, expr->ToNumberId());
+ PrepareEagerCheckpoint(expr->ToNumberId());
old_value = environment()->Pop();
// Save result for postfix expressions at correct stack depth.
@@ -2701,16 +2671,10 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
}
// Create node to perform +1/-1 operation.
- Node* value;
- {
- // TODO(bmeurer): Cleanup this feedback/bailout mess!
- FrameStateBeforeAndAfter states(this, BailoutId::None());
- value = BuildBinaryOp(old_value, jsgraph()->OneConstant(),
- expr->binary_op(), expr->CountBinOpFeedbackId());
- // This should never deoptimize because we have converted to number before.
- states.AddToNode(value, BailoutId::None(),
- OutputFrameStateCombine::Ignore());
- }
+ Node* value = BuildBinaryOp(old_value, jsgraph()->OneConstant(),
+ expr->binary_op(), expr->CountBinOpFeedbackId());
+ // This should never lazy deopt because we have converted to number before.
+ PrepareFrameState(value, BailoutId::None());
// Store the value.
VectorSlotPair feedback = CreateVectorSlotPair(expr->CountSlot());
@@ -2719,7 +2683,7 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
Variable* variable = expr->expression()->AsVariableProxy()->var();
environment()->Push(value);
BuildVariableAssignment(variable, value, expr->op(), feedback,
- expr->AssignmentId(), store_states);
+ expr->AssignmentId());
environment()->Pop();
break;
}
@@ -2727,20 +2691,16 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
Node* object = environment()->Pop();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
Node* store = BuildNamedStore(object, name, value, feedback);
- environment()->Push(value);
- store_states.AddToNode(store, expr->AssignmentId(),
- OutputFrameStateCombine::Ignore());
- environment()->Pop();
+ PrepareFrameState(store, expr->AssignmentId(),
+ OutputFrameStateCombine::Push());
break;
}
case KEYED_PROPERTY: {
Node* key = environment()->Pop();
Node* object = environment()->Pop();
Node* store = BuildKeyedStore(object, key, value, feedback);
- environment()->Push(value);
- store_states.AddToNode(store, expr->AssignmentId(),
- OutputFrameStateCombine::Ignore());
- environment()->Pop();
+ PrepareFrameState(store, expr->AssignmentId(),
+ OutputFrameStateCombine::Push());
break;
}
case NAMED_SUPER_PROPERTY: {
@@ -2748,10 +2708,8 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
Node* receiver = environment()->Pop();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
- environment()->Push(value);
- store_states.AddToNode(store, expr->AssignmentId(),
- OutputFrameStateCombine::Ignore());
- environment()->Pop();
+ PrepareFrameState(store, expr->AssignmentId(),
+ OutputFrameStateCombine::Push());
break;
}
case KEYED_SUPER_PROPERTY: {
@@ -2759,10 +2717,8 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
Node* home_object = environment()->Pop();
Node* receiver = environment()->Pop();
Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
- environment()->Push(value);
- store_states.AddToNode(store, expr->AssignmentId(),
- OutputFrameStateCombine::Ignore());
- environment()->Pop();
+ PrepareFrameState(store, expr->AssignmentId(),
+ OutputFrameStateCombine::Push());
break;
}
}
@@ -2770,7 +2726,7 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
// Restore old value for postfix expressions.
if (is_postfix) value = environment()->Pop();
- ast_context()->ProduceValue(value);
+ ast_context()->ProduceValue(expr, value);
}
@@ -2784,13 +2740,12 @@ void AstGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
default: {
VisitForValue(expr->left());
VisitForValue(expr->right());
- FrameStateBeforeAndAfter states(this, expr->right()->id());
Node* right = environment()->Pop();
Node* left = environment()->Pop();
Node* value = BuildBinaryOp(left, right, expr->op(),
expr->BinaryOperationFeedbackId());
- states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
- ast_context()->ProduceValue(value);
+ PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ ast_context()->ProduceValue(expr, value);
}
}
}
@@ -2801,32 +2756,30 @@ void AstGraphBuilder::VisitLiteralCompareNil(CompareOperation* expr,
const Operator* op = nullptr;
switch (expr->op()) {
case Token::EQ:
- op = javascript()->Equal();
+ op = javascript()->Equal(CompareOperationHint::kAny);
break;
case Token::EQ_STRICT:
- op = javascript()->StrictEqual();
+ op = javascript()->StrictEqual(CompareOperationHint::kAny);
break;
default:
UNREACHABLE();
}
VisitForValue(sub_expr);
- FrameStateBeforeAndAfter states(this, sub_expr->id());
Node* value_to_compare = environment()->Pop();
Node* value = NewNode(op, value_to_compare, nil_value);
- states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
- return ast_context()->ProduceValue(value);
+ PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ return ast_context()->ProduceValue(expr, value);
}
void AstGraphBuilder::VisitLiteralCompareTypeof(CompareOperation* expr,
Expression* sub_expr,
Handle<String> check) {
VisitTypeofExpression(sub_expr);
- FrameStateBeforeAndAfter states(this, sub_expr->id());
Node* typeof_arg = NewNode(javascript()->TypeOf(), environment()->Pop());
- Node* value = NewNode(javascript()->StrictEqual(), typeof_arg,
- jsgraph()->Constant(check));
- states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
- return ast_context()->ProduceValue(value);
+ Node* value = NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
+ typeof_arg, jsgraph()->Constant(check));
+ PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ return ast_context()->ProduceValue(expr, value);
}
void AstGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
@@ -2846,34 +2799,40 @@ void AstGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
return VisitLiteralCompareNil(expr, sub_expr, jsgraph()->NullConstant());
}
+ CompareOperationHint hint;
+ if (!type_hint_analysis_ ||
+ !type_hint_analysis_->GetCompareOperationHint(
+ expr->CompareOperationFeedbackId(), &hint)) {
+ hint = CompareOperationHint::kAny;
+ }
+
const Operator* op;
switch (expr->op()) {
case Token::EQ:
- op = javascript()->Equal();
+ op = javascript()->Equal(hint);
break;
case Token::NE:
- op = javascript()->NotEqual();
+ op = javascript()->NotEqual(hint);
break;
case Token::EQ_STRICT:
- op = javascript()->StrictEqual();
+ op = javascript()->StrictEqual(hint);
break;
case Token::NE_STRICT:
- op = javascript()->StrictNotEqual();
+ op = javascript()->StrictNotEqual(hint);
break;
case Token::LT:
- op = javascript()->LessThan();
+ op = javascript()->LessThan(hint);
break;
case Token::GT:
- op = javascript()->GreaterThan();
+ op = javascript()->GreaterThan(hint);
break;
case Token::LTE:
- op = javascript()->LessThanOrEqual();
+ op = javascript()->LessThanOrEqual(hint);
break;
case Token::GTE:
- op = javascript()->GreaterThanOrEqual();
+ op = javascript()->GreaterThanOrEqual(hint);
break;
case Token::INSTANCEOF:
- DCHECK(!FLAG_harmony_instanceof);
op = javascript()->InstanceOf();
break;
case Token::IN:
@@ -2885,12 +2844,11 @@ void AstGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
}
VisitForValue(expr->left());
VisitForValue(expr->right());
- FrameStateBeforeAndAfter states(this, expr->right()->id());
Node* right = environment()->Pop();
Node* left = environment()->Pop();
Node* value = NewNode(op, left, right);
- states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
- ast_context()->ProduceValue(value);
+ PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ ast_context()->ProduceValue(expr, value);
}
@@ -2908,14 +2866,14 @@ void AstGraphBuilder::VisitEmptyParentheses(EmptyParentheses* expr) {
void AstGraphBuilder::VisitThisFunction(ThisFunction* expr) {
Node* value = GetFunctionClosure();
- ast_context()->ProduceValue(value);
+ ast_context()->ProduceValue(expr, value);
}
void AstGraphBuilder::VisitSuperPropertyReference(
SuperPropertyReference* expr) {
Node* value = BuildThrowUnsupportedSuperError(expr->id());
- ast_context()->ProduceValue(value);
+ ast_context()->ProduceValue(expr, value);
}
@@ -2933,19 +2891,20 @@ void AstGraphBuilder::VisitCaseClause(CaseClause* expr) {
void AstGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
DCHECK(globals()->empty());
- AstVisitor::VisitDeclarations(declarations);
+ AstVisitor<AstGraphBuilder>::VisitDeclarations(declarations);
if (globals()->empty()) return;
int array_index = 0;
+ Handle<TypeFeedbackVector> feedback_vector(
+ info()->closure()->feedback_vector());
Handle<FixedArray> data = isolate()->factory()->NewFixedArray(
static_cast<int>(globals()->size()), TENURED);
for (Handle<Object> obj : *globals()) data->set(array_index++, *obj);
- int encoded_flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) |
- DeclareGlobalsNativeFlag::encode(info()->is_native()) |
- DeclareGlobalsLanguageMode::encode(language_mode());
+ int encoded_flags = info()->GetDeclareGlobalsFlags();
Node* flags = jsgraph()->Constant(encoded_flags);
Node* pairs = jsgraph()->Constant(data);
+ Node* vector = jsgraph()->Constant(feedback_vector);
const Operator* op = javascript()->CallRuntime(Runtime::kDeclareGlobals);
- Node* call = NewNode(op, pairs, flags);
+ Node* call = NewNode(op, pairs, flags, vector);
PrepareFrameState(call, BailoutId::Declarations());
globals()->clear();
}
@@ -2963,13 +2922,13 @@ void AstGraphBuilder::VisitInScope(Statement* stmt, Scope* s, Node* context) {
Visit(stmt);
}
-
void AstGraphBuilder::VisitIterationBody(IterationStatement* stmt,
- LoopBuilder* loop) {
+ LoopBuilder* loop,
+ BailoutId stack_check_id) {
ControlScopeForIteration scope(this, stmt, loop);
if (FLAG_turbo_loop_stackcheck || !info()->shared_info()->asm_function()) {
Node* node = NewNode(javascript()->StackCheck());
- PrepareFrameState(node, stmt->StackCheckId());
+ PrepareFrameState(node, stack_check_id);
}
Visit(stmt->body());
}
@@ -2978,12 +2937,10 @@ void AstGraphBuilder::VisitIterationBody(IterationStatement* stmt,
void AstGraphBuilder::VisitDelete(UnaryOperation* expr) {
Node* value;
if (expr->expression()->IsVariableProxy()) {
- // Delete of an unqualified identifier is only allowed in classic mode but
- // deleting "this" is allowed in all language modes.
- Variable* variable = expr->expression()->AsVariableProxy()->var();
// Delete of an unqualified identifier is disallowed in strict mode but
// "delete this" is allowed.
- DCHECK(is_sloppy(language_mode()) || variable->HasThisName(isolate()));
+ Variable* variable = expr->expression()->AsVariableProxy()->var();
+ DCHECK(is_sloppy(language_mode()) || variable->is_this());
value = BuildVariableDelete(variable, expr->id(),
ast_context()->GetStateCombine());
} else if (expr->expression()->IsProperty()) {
@@ -2998,14 +2955,14 @@ void AstGraphBuilder::VisitDelete(UnaryOperation* expr) {
VisitForEffect(expr->expression());
value = jsgraph()->TrueConstant();
}
- ast_context()->ProduceValue(value);
+ ast_context()->ProduceValue(expr, value);
}
void AstGraphBuilder::VisitVoid(UnaryOperation* expr) {
VisitForEffect(expr->expression());
Node* value = jsgraph()->UndefinedConstant();
- ast_context()->ProduceValue(value);
+ ast_context()->ProduceValue(expr, value);
}
void AstGraphBuilder::VisitTypeofExpression(Expression* expr) {
@@ -3014,9 +2971,9 @@ void AstGraphBuilder::VisitTypeofExpression(Expression* expr) {
// perform a non-contextual load in case the operand is a variable proxy.
VariableProxy* proxy = expr->AsVariableProxy();
VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
- FrameStateBeforeAndAfter states(this, BeforeId(proxy));
+ PrepareEagerCheckpoint(BeforeId(proxy));
Node* load =
- BuildVariableLoad(proxy->var(), expr->id(), states, pair,
+ BuildVariableLoad(proxy->var(), expr->id(), pair,
OutputFrameStateCombine::Push(), INSIDE_TYPEOF);
environment()->Push(load);
} else {
@@ -3027,33 +2984,50 @@ void AstGraphBuilder::VisitTypeofExpression(Expression* expr) {
void AstGraphBuilder::VisitTypeof(UnaryOperation* expr) {
VisitTypeofExpression(expr->expression());
Node* value = NewNode(javascript()->TypeOf(), environment()->Pop());
- ast_context()->ProduceValue(value);
+ ast_context()->ProduceValue(expr, value);
}
void AstGraphBuilder::VisitNot(UnaryOperation* expr) {
- VisitForValue(expr->expression());
- Node* operand = environment()->Pop();
- Node* input = BuildToBoolean(operand, expr->expression()->test_id());
+ VisitForTest(expr->expression());
+ Node* input = environment()->Pop();
Node* value = NewNode(common()->Select(MachineRepresentation::kTagged), input,
jsgraph()->FalseConstant(), jsgraph()->TrueConstant());
- ast_context()->ProduceValue(value);
+ // Skip plugging AST evaluation contexts of the test kind. This is to stay in
+ // sync with full codegen which doesn't prepare the proper bailout point (see
+ // the implementation of FullCodeGenerator::VisitForControl).
+ if (ast_context()->IsTest()) return environment()->Push(value);
+ ast_context()->ProduceValue(expr, value);
}
void AstGraphBuilder::VisitComma(BinaryOperation* expr) {
VisitForEffect(expr->left());
Visit(expr->right());
- ast_context()->ReplaceValue();
+ // Skip plugging AST evaluation contexts of the test kind. This is to stay in
+ // sync with full codegen which doesn't prepare the proper bailout point (see
+ // the implementation of FullCodeGenerator::VisitForControl).
+ if (ast_context()->IsTest()) return;
+ ast_context()->ReplaceValue(expr);
}
void AstGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
bool is_logical_and = expr->op() == Token::AND;
IfBuilder compare_if(this);
- VisitForValue(expr->left());
- Node* condition = environment()->Top();
- compare_if.If(BuildToBoolean(condition, expr->left()->test_id()));
+ // Only use an AST evaluation context of the value kind when this expression
+ // is evaluated as value as well. Otherwise stick to a test context which is
+ // in sync with full codegen (see FullCodeGenerator::VisitLogicalExpression).
+ Node* condition = nullptr;
+ if (ast_context()->IsValue()) {
+ VisitForValue(expr->left());
+ Node* left = environment()->Top();
+ condition = BuildToBoolean(left, expr->left()->test_id());
+ } else {
+ VisitForTest(expr->left());
+ condition = environment()->Top();
+ }
+ compare_if.If(condition);
compare_if.Then();
if (is_logical_and) {
environment()->Pop();
@@ -3073,7 +3047,11 @@ void AstGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
environment()->Poke(0, jsgraph()->FalseConstant());
}
compare_if.End();
- ast_context()->ReplaceValue();
+ // Skip plugging AST evaluation contexts of the test kind. This is to stay in
+ // sync with full codegen which doesn't prepare the proper bailout point (see
+ // the implementation of FullCodeGenerator::VisitForControl).
+ if (ast_context()->IsTest()) return;
+ ast_context()->ReplaceValue(expr);
}
@@ -3084,7 +3062,7 @@ LanguageMode AstGraphBuilder::language_mode() const {
VectorSlotPair AstGraphBuilder::CreateVectorSlotPair(
FeedbackVectorSlot slot) const {
- return VectorSlotPair(handle(info()->shared_info()->feedback_vector()), slot);
+ return VectorSlotPair(handle(info()->closure()->feedback_vector()), slot);
}
@@ -3106,15 +3084,10 @@ const uint32_t kFullCheckRequired = -1;
uint32_t AstGraphBuilder::ComputeBitsetForDynamicGlobal(Variable* variable) {
DCHECK_EQ(DYNAMIC_GLOBAL, variable->mode());
- bool found_eval_scope = false;
uint32_t check_depths = 0;
for (Scope* s = current_scope(); s != nullptr; s = s->outer_scope()) {
if (s->num_heap_slots() <= 0) continue;
- // TODO(mstarzinger): If we have reached an eval scope, we check all
- // extensions from this point. Replicated from full-codegen, figure out
- // whether this is still needed. If not, drop {found_eval_scope} below.
- if (s->is_eval_scope()) found_eval_scope = true;
- if (!s->calls_sloppy_eval() && !found_eval_scope) continue;
+ if (!s->calls_sloppy_eval()) continue;
int depth = current_scope()->ContextChainLength(s);
if (depth > kMaxCheckDepth) return kFullCheckRequired;
check_depths |= 1 << depth;
@@ -3150,7 +3123,7 @@ Node* AstGraphBuilder::ProcessArguments(const Operator* op, int arity) {
Node* AstGraphBuilder::BuildLocalActivationContext(Node* context) {
- Scope* scope = info()->scope();
+ DeclarationScope* scope = info()->scope();
// Allocate a new local context.
Node* local_context = scope->is_script_scope()
@@ -3183,7 +3156,7 @@ Node* AstGraphBuilder::BuildLocalActivationContext(Node* context) {
Node* AstGraphBuilder::BuildLocalFunctionContext(Scope* scope) {
- DCHECK(scope->is_function_scope());
+ DCHECK(scope->is_function_scope() || scope->is_eval_scope());
// Allocate a new local context.
int slot_count = scope->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
@@ -3235,9 +3208,8 @@ Node* AstGraphBuilder::BuildArgumentsObject(Variable* arguments) {
// Assign the object to the {arguments} variable. This should never lazy
// deopt, so it is fine to send invalid bailout id.
DCHECK(arguments->IsContextSlot() || arguments->IsStackAllocated());
- FrameStateBeforeAndAfter states(this, BailoutId::None());
BuildVariableAssignment(arguments, object, Token::ASSIGN, VectorSlotPair(),
- BailoutId::None(), states);
+ BailoutId::None());
return object;
}
@@ -3254,9 +3226,8 @@ Node* AstGraphBuilder::BuildRestArgumentsArray(Variable* rest, int index) {
// Assign the object to the {rest} variable. This should never lazy
// deopt, so it is fine to send invalid bailout id.
DCHECK(rest->IsContextSlot() || rest->IsStackAllocated());
- FrameStateBeforeAndAfter states(this, BailoutId::None());
BuildVariableAssignment(rest, object, Token::ASSIGN, VectorSlotPair(),
- BailoutId::None(), states);
+ BailoutId::None());
return object;
}
@@ -3269,9 +3240,8 @@ Node* AstGraphBuilder::BuildThisFunctionVariable(Variable* this_function_var) {
// Assign the object to the {.this_function} variable. This should never lazy
// deopt, so it is fine to send invalid bailout id.
- FrameStateBeforeAndAfter states(this, BailoutId::None());
BuildVariableAssignment(this_function_var, this_function, Token::INIT,
- VectorSlotPair(), BailoutId::None(), states);
+ VectorSlotPair(), BailoutId::None());
return this_function;
}
@@ -3284,29 +3254,19 @@ Node* AstGraphBuilder::BuildNewTargetVariable(Variable* new_target_var) {
// Assign the object to the {new.target} variable. This should never lazy
// deopt, so it is fine to send invalid bailout id.
- FrameStateBeforeAndAfter states(this, BailoutId::None());
BuildVariableAssignment(new_target_var, object, Token::INIT, VectorSlotPair(),
- BailoutId::None(), states);
+ BailoutId::None());
return object;
}
-Node* AstGraphBuilder::BuildHoleCheckSilent(Node* value, Node* for_hole,
- Node* not_hole) {
- Node* the_hole = jsgraph()->TheHoleConstant();
- Node* check = NewNode(javascript()->StrictEqual(), value, the_hole);
- return NewNode(
- common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
- check, for_hole, not_hole);
-}
-
-
Node* AstGraphBuilder::BuildHoleCheckThenThrow(Node* value, Variable* variable,
Node* not_hole,
BailoutId bailout_id) {
IfBuilder hole_check(this);
Node* the_hole = jsgraph()->TheHoleConstant();
- Node* check = NewNode(javascript()->StrictEqual(), value, the_hole);
+ Node* check = NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
+ value, the_hole);
hole_check.If(check);
hole_check.Then();
Node* error = BuildThrowReferenceError(variable, bailout_id);
@@ -3323,7 +3283,8 @@ Node* AstGraphBuilder::BuildHoleCheckElseThrow(Node* value, Variable* variable,
BailoutId bailout_id) {
IfBuilder hole_check(this);
Node* the_hole = jsgraph()->TheHoleConstant();
- Node* check = NewNode(javascript()->StrictEqual(), value, the_hole);
+ Node* check = NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
+ value, the_hole);
hole_check.If(check);
hole_check.Then();
environment()->Push(for_hole);
@@ -3340,7 +3301,8 @@ Node* AstGraphBuilder::BuildThrowIfStaticPrototype(Node* name,
IfBuilder prototype_check(this);
Node* prototype_string =
jsgraph()->Constant(isolate()->factory()->prototype_string());
- Node* check = NewNode(javascript()->StrictEqual(), name, prototype_string);
+ Node* check = NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
+ name, prototype_string);
prototype_check.If(check);
prototype_check.Then();
Node* error = BuildThrowStaticPrototypeError(bailout_id);
@@ -3354,12 +3316,10 @@ Node* AstGraphBuilder::BuildThrowIfStaticPrototype(Node* name,
Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
BailoutId bailout_id,
- FrameStateBeforeAndAfter& states,
const VectorSlotPair& feedback,
OutputFrameStateCombine combine,
TypeofMode typeof_mode) {
Node* the_hole = jsgraph()->TheHoleConstant();
- VariableMode mode = variable->mode();
switch (variable->location()) {
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
@@ -3367,22 +3327,14 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
Handle<Name> name = variable->name();
if (Node* node = TryLoadGlobalConstant(name)) return node;
Node* value = BuildGlobalLoad(name, feedback, typeof_mode);
- states.AddToNode(value, bailout_id, combine);
+ PrepareFrameState(value, bailout_id, combine);
return value;
}
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL: {
// Local var, const, or let variable.
Node* value = environment()->Lookup(variable);
- if (mode == CONST_LEGACY) {
- // Perform check for uninitialized legacy const variables.
- if (value->op() == the_hole->op()) {
- value = jsgraph()->UndefinedConstant();
- } else if (value->opcode() == IrOpcode::kPhi) {
- Node* undefined = jsgraph()->UndefinedConstant();
- value = BuildHoleCheckSilent(value, undefined, value);
- }
- } else if (mode == LET || mode == CONST) {
+ if (variable->binding_needs_init()) {
// Perform check for uninitialized let/const variables.
if (value->op() == the_hole->op()) {
value = BuildThrowReferenceError(variable, bailout_id);
@@ -3402,11 +3354,7 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
// TODO(titzer): initialization checks are redundant for already
// initialized immutable context loads, but only specialization knows.
// Maybe specializer should be a parameter to the graph builder?
- if (mode == CONST_LEGACY) {
- // Perform check for uninitialized legacy const variables.
- Node* undefined = jsgraph()->UndefinedConstant();
- value = BuildHoleCheckSilent(value, undefined, value);
- } else if (mode == LET || mode == CONST) {
+ if (variable->binding_needs_init()) {
// Perform check for uninitialized let/const variables.
value = BuildHoleCheckThenThrow(value, variable, value, bailout_id);
}
@@ -3415,15 +3363,16 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
case VariableLocation::LOOKUP: {
// Dynamic lookup of context variable (anywhere in the chain).
Handle<String> name = variable->name();
- if (Node* node =
- TryLoadDynamicVariable(variable, name, bailout_id, states,
- feedback, combine, typeof_mode)) {
+ if (Node* node = TryLoadDynamicVariable(variable, name, bailout_id,
+ feedback, combine, typeof_mode)) {
return node;
}
Node* value = BuildDynamicLoad(name, typeof_mode);
- states.AddToNode(value, bailout_id, combine);
+ PrepareFrameState(value, bailout_id, combine);
return value;
}
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
UNREACHABLE();
return nullptr;
@@ -3448,7 +3397,7 @@ Node* AstGraphBuilder::BuildVariableDelete(Variable* variable,
case VariableLocation::LOCAL:
case VariableLocation::CONTEXT: {
// Local var, const, or let variable or context variable.
- return jsgraph()->BooleanConstant(variable->HasThisName(isolate()));
+ return jsgraph()->BooleanConstant(variable->is_this());
}
case VariableLocation::LOOKUP: {
// Dynamic lookup of context variable (anywhere in the chain).
@@ -3459,16 +3408,17 @@ Node* AstGraphBuilder::BuildVariableDelete(Variable* variable,
PrepareFrameState(result, bailout_id, combine);
return result;
}
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
UNREACHABLE();
return nullptr;
}
-
Node* AstGraphBuilder::BuildVariableAssignment(
Variable* variable, Node* value, Token::Value op,
const VectorSlotPair& feedback, BailoutId bailout_id,
- FrameStateBeforeAndAfter& states, OutputFrameStateCombine combine) {
+ OutputFrameStateCombine combine) {
Node* the_hole = jsgraph()->TheHoleConstant();
VariableMode mode = variable->mode();
switch (variable->location()) {
@@ -3477,19 +3427,13 @@ Node* AstGraphBuilder::BuildVariableAssignment(
// Global var, const, or let variable.
Handle<Name> name = variable->name();
Node* store = BuildGlobalStore(name, value, feedback);
- states.AddToNode(store, bailout_id, combine);
+ PrepareFrameState(store, bailout_id, combine);
return store;
}
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL:
// Local var, const, or let variable.
- if (mode == CONST_LEGACY && op == Token::INIT) {
- // Perform an initialization check for legacy const variables.
- Node* current = environment()->Lookup(variable);
- if (current->op() != the_hole->op()) {
- value = BuildHoleCheckSilent(current, value, current);
- }
- } else if (mode == CONST_LEGACY && op != Token::INIT) {
+ if (mode == CONST_LEGACY && op != Token::INIT) {
// Non-initializing assignment to legacy const is
// - exception in strict mode.
// - ignored in sloppy mode.
@@ -3503,7 +3447,8 @@ Node* AstGraphBuilder::BuildVariableAssignment(
// baseline code might contain debug code that inspects the variable.
Node* current = environment()->Lookup(variable);
CHECK_NOT_NULL(current);
- } else if (mode == LET && op != Token::INIT) {
+ } else if (mode == LET && op != Token::INIT &&
+ variable->binding_needs_init()) {
// Perform an initialization check for let declared variables.
Node* current = environment()->Lookup(variable);
if (current->op() == the_hole->op()) {
@@ -3520,13 +3465,15 @@ Node* AstGraphBuilder::BuildVariableAssignment(
value = BuildHoleCheckElseThrow(current, variable, value, bailout_id);
}
} else if (mode == CONST && op != Token::INIT) {
- // Assignment to const is exception in all modes.
- Node* current = environment()->Lookup(variable);
- if (current->op() == the_hole->op()) {
- return BuildThrowReferenceError(variable, bailout_id);
- } else if (current->opcode() == IrOpcode::kPhi) {
- BuildHoleCheckThenThrow(current, variable, value, bailout_id);
+ if (variable->binding_needs_init()) {
+ Node* current = environment()->Lookup(variable);
+ if (current->op() == the_hole->op()) {
+ return BuildThrowReferenceError(variable, bailout_id);
+ } else if (current->opcode() == IrOpcode::kPhi) {
+ BuildHoleCheckThenThrow(current, variable, value, bailout_id);
+ }
}
+ // Assignment to const is exception in all modes.
return BuildThrowConstAssignError(bailout_id);
}
environment()->Bind(variable, value);
@@ -3534,13 +3481,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
case VariableLocation::CONTEXT: {
// Context variable (potentially up the context chain).
int depth = current_scope()->ContextChainLength(variable->scope());
- if (mode == CONST_LEGACY && op == Token::INIT) {
- // Perform an initialization check for legacy const variables.
- const Operator* op =
- javascript()->LoadContext(depth, variable->index(), false);
- Node* current = NewNode(op, current_context());
- value = BuildHoleCheckSilent(current, value, current);
- } else if (mode == CONST_LEGACY && op != Token::INIT) {
+ if (mode == CONST_LEGACY && op != Token::INIT) {
// Non-initializing assignment to legacy const is
// - exception in strict mode.
// - ignored in sloppy mode.
@@ -3548,7 +3489,8 @@ Node* AstGraphBuilder::BuildVariableAssignment(
return BuildThrowConstAssignError(bailout_id);
}
return value;
- } else if (mode == LET && op != Token::INIT) {
+ } else if (mode == LET && op != Token::INIT &&
+ variable->binding_needs_init()) {
// Perform an initialization check for let declared variables.
const Operator* op =
javascript()->LoadContext(depth, variable->index(), false);
@@ -3565,11 +3507,13 @@ Node* AstGraphBuilder::BuildVariableAssignment(
value = BuildHoleCheckElseThrow(current, variable, value, bailout_id);
}
} else if (mode == CONST && op != Token::INIT) {
+ if (variable->binding_needs_init()) {
+ const Operator* op =
+ javascript()->LoadContext(depth, variable->index(), false);
+ Node* current = NewNode(op, current_context());
+ BuildHoleCheckThenThrow(current, variable, value, bailout_id);
+ }
// Assignment to const is exception in all modes.
- const Operator* op =
- javascript()->LoadContext(depth, variable->index(), false);
- Node* current = NewNode(op, current_context());
- BuildHoleCheckThenThrow(current, variable, value, bailout_id);
return BuildThrowConstAssignError(bailout_id);
}
const Operator* op = javascript()->StoreContext(depth, variable->index());
@@ -3578,12 +3522,12 @@ Node* AstGraphBuilder::BuildVariableAssignment(
case VariableLocation::LOOKUP: {
// Dynamic lookup of context variable (anywhere in the chain).
Handle<Name> name = variable->name();
- // TODO(mstarzinger): Use Runtime::kInitializeLegacyConstLookupSlot for
- // initializations of const declarations.
Node* store = BuildDynamicStore(name, value);
PrepareFrameState(store, bailout_id, combine);
return store;
}
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
UNREACHABLE();
return nullptr;
@@ -3733,7 +3677,7 @@ Node* AstGraphBuilder::BuildToBoolean(Node* input, TypeFeedbackId feedback_id) {
Node* AstGraphBuilder::BuildToName(Node* input, BailoutId bailout_id) {
if (Node* node = TryFastToName(input)) return node;
Node* name = NewNode(javascript()->ToName(), input);
- PrepareFrameState(name, bailout_id);
+ PrepareFrameState(name, bailout_id, OutputFrameStateCombine::Push());
return name;
}
@@ -3751,11 +3695,11 @@ Node* AstGraphBuilder::BuildSetHomeObject(Node* value, Node* home_object,
Expression* expr = property->value();
if (!FunctionLiteral::NeedsHomeObject(expr)) return value;
Handle<Name> name = isolate()->factory()->home_object_symbol();
- FrameStateBeforeAndAfter states(this, BailoutId::None());
VectorSlotPair feedback =
CreateVectorSlotPair(property->GetSlot(slot_number));
Node* store = BuildNamedStore(value, name, home_object, feedback);
- states.AddToNode(store, BailoutId::None(), OutputFrameStateCombine::Ignore());
+ PrepareFrameState(store, BailoutId::None(),
+ OutputFrameStateCombine::Ignore());
return store;
}
@@ -3838,44 +3782,44 @@ Node* AstGraphBuilder::BuildThrow(Node* exception_value) {
Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op,
TypeFeedbackId feedback_id) {
const Operator* js_op;
- BinaryOperationHints hints;
+ BinaryOperationHint hint;
if (!type_hint_analysis_ ||
- !type_hint_analysis_->GetBinaryOperationHints(feedback_id, &hints)) {
- hints = BinaryOperationHints::Any();
+ !type_hint_analysis_->GetBinaryOperationHint(feedback_id, &hint)) {
+ hint = BinaryOperationHint::kAny;
}
switch (op) {
case Token::BIT_OR:
- js_op = javascript()->BitwiseOr(hints);
+ js_op = javascript()->BitwiseOr(hint);
break;
case Token::BIT_AND:
- js_op = javascript()->BitwiseAnd(hints);
+ js_op = javascript()->BitwiseAnd(hint);
break;
case Token::BIT_XOR:
- js_op = javascript()->BitwiseXor(hints);
+ js_op = javascript()->BitwiseXor(hint);
break;
case Token::SHL:
- js_op = javascript()->ShiftLeft(hints);
+ js_op = javascript()->ShiftLeft(hint);
break;
case Token::SAR:
- js_op = javascript()->ShiftRight(hints);
+ js_op = javascript()->ShiftRight(hint);
break;
case Token::SHR:
- js_op = javascript()->ShiftRightLogical(hints);
+ js_op = javascript()->ShiftRightLogical(hint);
break;
case Token::ADD:
- js_op = javascript()->Add(hints);
+ js_op = javascript()->Add(hint);
break;
case Token::SUB:
- js_op = javascript()->Subtract(hints);
+ js_op = javascript()->Subtract(hint);
break;
case Token::MUL:
- js_op = javascript()->Multiply(hints);
+ js_op = javascript()->Multiply(hint);
break;
case Token::DIV:
- js_op = javascript()->Divide(hints);
+ js_op = javascript()->Divide(hint);
break;
case Token::MOD:
- js_op = javascript()->Modulus(hints);
+ js_op = javascript()->Modulus(hint);
break;
default:
UNREACHABLE();
@@ -3892,11 +3836,12 @@ Node* AstGraphBuilder::TryLoadGlobalConstant(Handle<Name> name) {
return nullptr;
}
-
-Node* AstGraphBuilder::TryLoadDynamicVariable(
- Variable* variable, Handle<String> name, BailoutId bailout_id,
- FrameStateBeforeAndAfter& states, const VectorSlotPair& feedback,
- OutputFrameStateCombine combine, TypeofMode typeof_mode) {
+Node* AstGraphBuilder::TryLoadDynamicVariable(Variable* variable,
+ Handle<String> name,
+ BailoutId bailout_id,
+ const VectorSlotPair& feedback,
+ OutputFrameStateCombine combine,
+ TypeofMode typeof_mode) {
VariableMode mode = variable->mode();
if (mode == DYNAMIC_GLOBAL) {
@@ -3918,8 +3863,9 @@ Node* AstGraphBuilder::TryLoadDynamicVariable(
Node* load = NewNode(
javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
current_context());
- Node* check = NewNode(javascript()->StrictEqual(), load,
- jsgraph()->TheHoleConstant());
+ Node* check =
+ NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), load,
+ jsgraph()->TheHoleConstant());
fast_block.BreakUnless(check, BranchHint::kTrue);
}
@@ -3929,7 +3875,7 @@ Node* AstGraphBuilder::TryLoadDynamicVariable(
} else {
// Perform global slot load.
Node* fast = BuildGlobalLoad(name, feedback, typeof_mode);
- states.AddToNode(fast, bailout_id, combine);
+ PrepareFrameState(fast, bailout_id, combine);
environment()->Push(fast);
}
slow_block.Break();
@@ -3938,7 +3884,7 @@ Node* AstGraphBuilder::TryLoadDynamicVariable(
// Slow case, because variable potentially shadowed. Perform dynamic lookup.
Node* slow = BuildDynamicLoad(name, typeof_mode);
- states.AddToNode(slow, bailout_id, combine);
+ PrepareFrameState(slow, bailout_id, combine);
environment()->Push(slow);
slow_block.EndBlock();
@@ -3964,16 +3910,17 @@ Node* AstGraphBuilder::TryLoadDynamicVariable(
Node* load = NewNode(
javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
current_context());
- Node* check = NewNode(javascript()->StrictEqual(), load,
- jsgraph()->TheHoleConstant());
+ Node* check =
+ NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), load,
+ jsgraph()->TheHoleConstant());
fast_block.BreakUnless(check, BranchHint::kTrue);
}
// Fast case, because variable is not shadowed. Perform context slot load.
Variable* local = variable->local_if_not_shadowed();
DCHECK(local->location() == VariableLocation::CONTEXT); // Must be context.
- Node* fast = BuildVariableLoad(local, bailout_id, states, feedback, combine,
- typeof_mode);
+ Node* fast =
+ BuildVariableLoad(local, bailout_id, feedback, combine, typeof_mode);
environment()->Push(fast);
slow_block.Break();
environment()->Pop();
@@ -3981,7 +3928,7 @@ Node* AstGraphBuilder::TryLoadDynamicVariable(
// Slow case, because variable potentially shadowed. Perform dynamic lookup.
Node* slow = BuildDynamicLoad(name, typeof_mode);
- states.AddToNode(slow, bailout_id, combine);
+ PrepareFrameState(slow, bailout_id, combine);
environment()->Push(slow);
slow_block.EndBlock();
@@ -4052,18 +3999,32 @@ bool AstGraphBuilder::CheckOsrEntry(IterationStatement* stmt) {
void AstGraphBuilder::PrepareFrameState(Node* node, BailoutId ast_id,
OutputFrameStateCombine combine) {
- if (OperatorProperties::GetFrameStateInputCount(node->op()) > 0) {
+ if (OperatorProperties::HasFrameStateInput(node->op())) {
+ DCHECK(ast_id.IsNone() || info()->shared_info()->VerifyBailoutId(ast_id));
DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
-
DCHECK_EQ(IrOpcode::kDead,
- NodeProperties::GetFrameStateInput(node, 0)->opcode());
- bool node_has_exception = NodeProperties::IsExceptionalCall(node);
- NodeProperties::ReplaceFrameStateInput(
- node, 0,
- environment()->Checkpoint(ast_id, combine, node_has_exception));
+ NodeProperties::GetFrameStateInput(node)->opcode());
+ bool has_exception = NodeProperties::IsExceptionalCall(node);
+ Node* state = environment()->Checkpoint(ast_id, combine, has_exception);
+ NodeProperties::ReplaceFrameStateInput(node, state);
}
}
+void AstGraphBuilder::PrepareEagerCheckpoint(BailoutId ast_id) {
+ if (environment()->GetEffectDependency()->opcode() == IrOpcode::kCheckpoint) {
+ // We skip preparing a checkpoint if there already is one the current effect
+ // dependency. This is just an optimization and not need for correctness.
+ return;
+ }
+ if (ast_id != BailoutId::None()) {
+ DCHECK(info()->shared_info()->VerifyBailoutId(ast_id));
+ Node* node = NewNode(common()->Checkpoint());
+ DCHECK_EQ(IrOpcode::kDead,
+ NodeProperties::GetFrameStateInput(node)->opcode());
+ Node* state = environment()->Checkpoint(ast_id);
+ NodeProperties::ReplaceFrameStateInput(node, state);
+ }
+}
BitVector* AstGraphBuilder::GetVariablesAssignedInLoop(
IterationStatement* stmt) {
@@ -4087,7 +4048,7 @@ Node* AstGraphBuilder::MakeNode(const Operator* op, int value_input_count,
DCHECK_EQ(op->ValueInputCount(), value_input_count);
bool has_context = OperatorProperties::HasContextInput(op);
- int frame_state_count = OperatorProperties::GetFrameStateInputCount(op);
+ bool has_frame_state = OperatorProperties::HasFrameStateInput(op);
bool has_control = op->ControlInputCount() == 1;
bool has_effect = op->EffectInputCount() == 1;
@@ -4095,13 +4056,13 @@ Node* AstGraphBuilder::MakeNode(const Operator* op, int value_input_count,
DCHECK(op->EffectInputCount() < 2);
Node* result = nullptr;
- if (!has_context && frame_state_count == 0 && !has_control && !has_effect) {
+ if (!has_context && !has_frame_state && !has_control && !has_effect) {
result = graph()->NewNode(op, value_input_count, value_inputs, incomplete);
} else {
bool inside_try_scope = try_nesting_level_ > 0;
int input_count_with_deps = value_input_count;
if (has_context) ++input_count_with_deps;
- input_count_with_deps += frame_state_count;
+ if (has_frame_state) ++input_count_with_deps;
if (has_control) ++input_count_with_deps;
if (has_effect) ++input_count_with_deps;
Node** buffer = EnsureInputBufferSize(input_count_with_deps);
@@ -4110,7 +4071,7 @@ Node* AstGraphBuilder::MakeNode(const Operator* op, int value_input_count,
if (has_context) {
*current_input++ = current_context();
}
- for (int i = 0; i < frame_state_count; i++) {
+ if (has_frame_state) {
// The frame state will be inserted later. Here we misuse
// the {Dead} node as a sentinel to be later overwritten
// with the real frame state.
@@ -4134,13 +4095,9 @@ Node* AstGraphBuilder::MakeNode(const Operator* op, int value_input_count,
}
// Add implicit exception continuation for throwing nodes.
if (!result->op()->HasProperty(Operator::kNoThrow) && inside_try_scope) {
- // Conservative prediction whether caught locally.
- IfExceptionHint hint = try_catch_nesting_level_ > 0
- ? IfExceptionHint::kLocallyCaught
- : IfExceptionHint::kLocallyUncaught;
// Copy the environment for the success continuation.
Environment* success_env = environment()->CopyForConditional();
- const Operator* op = common()->IfException(hint);
+ const Operator* op = common()->IfException();
Node* effect = environment()->GetEffectDependency();
Node* on_exception = graph()->NewNode(op, effect, result);
environment_->UpdateControlDependency(on_exception);
@@ -4309,7 +4266,6 @@ Node* AstGraphBuilder::NewPhi(int count, Node* input, Node* control) {
}
-// TODO(mstarzinger): Revisit this once we have proper effect states.
Node* AstGraphBuilder::NewEffectPhi(int count, Node* input, Node* control) {
const Operator* phi_op = common()->EffectPhi(count);
Node** buffer = EnsureInputBufferSize(count + 1);
diff --git a/deps/v8/src/compiler/ast-graph-builder.h b/deps/v8/src/compiler/ast-graph-builder.h
index e206db0c1a..bd307ba29a 100644
--- a/deps/v8/src/compiler/ast-graph-builder.h
+++ b/deps/v8/src/compiler/ast-graph-builder.h
@@ -15,7 +15,7 @@ namespace internal {
// Forward declarations.
class BitVector;
-
+class CompilationInfo;
namespace compiler {
@@ -32,11 +32,14 @@ class TypeHintAnalysis;
// underlying AST. The produced graph can either be compiled into a
// stand-alone function or be wired into another graph for the purposes
// of function inlining.
-class AstGraphBuilder : public AstVisitor {
+// This AstVistor is not final, and provides the AstVisitor methods as virtual
+// methods so they can be specialized by subclasses.
+class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
public:
AstGraphBuilder(Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
LoopAssignmentAnalysis* loop_assignment = nullptr,
TypeHintAnalysis* type_hint_analysis = nullptr);
+ virtual ~AstGraphBuilder() {}
// Creates a graph by visiting the entire AST.
bool CreateGraph(bool stack_check = true);
@@ -51,13 +54,13 @@ class AstGraphBuilder : public AstVisitor {
}
protected:
-#define DECLARE_VISIT(type) void Visit##type(type* node) override;
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
// Visiting functions for AST nodes make this an AstVisitor.
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
// Visiting function for declarations list is overridden.
- void VisitDeclarations(ZoneList<Declaration*>* declarations) override;
+ void VisitDeclarations(ZoneList<Declaration*>* declarations);
private:
class AstContext;
@@ -71,7 +74,6 @@ class AstGraphBuilder : public AstVisitor {
class ControlScopeForCatch;
class ControlScopeForFinally;
class Environment;
- class FrameStateBeforeAndAfter;
friend class ControlBuilder;
Isolate* isolate_;
@@ -96,7 +98,6 @@ class AstGraphBuilder : public AstVisitor {
SetOncePointer<Node> new_target_;
// Tracks how many try-blocks are currently entered.
- int try_catch_nesting_level_;
int try_nesting_level_;
// Temporary storage for building node input lists.
@@ -106,6 +107,9 @@ class AstGraphBuilder : public AstVisitor {
// Optimization to cache loaded feedback vector.
SetOncePointer<Node> feedback_vector_;
+ // Optimization to cache empty frame state.
+ SetOncePointer<Node> empty_frame_state_;
+
// Control nodes that exit the function body.
ZoneVector<Node*> exit_controls_;
@@ -167,6 +171,9 @@ class AstGraphBuilder : public AstVisitor {
// Get or create the node that represents the incoming new target value.
Node* GetNewTarget();
+ // Get or create the node that represents the empty frame state.
+ Node* GetEmptyFrameState();
+
// Node creation helpers.
Node* NewNode(const Operator* op, bool incomplete = false) {
return MakeNode(op, 0, static_cast<Node**>(nullptr), incomplete);
@@ -225,11 +232,18 @@ class AstGraphBuilder : public AstVisitor {
// Helper to indicate a node exits the function body.
void UpdateControlDependencyToLeaveFunction(Node* exit);
- // Builds deoptimization for a given node.
+ // Prepare information for lazy deoptimization. This information is attached
+ // to the given node and the output value produced by the node is combined.
+ // Conceptually this frame state is "after" a given operation.
void PrepareFrameState(Node* node, BailoutId ast_id,
OutputFrameStateCombine framestate_combine =
OutputFrameStateCombine::Ignore());
+ // Prepare information for eager deoptimization. This information is carried
+ // by dedicated {Checkpoint} nodes that are wired into the effect chain.
+ // Conceptually this frame state is "before" a given operation.
+ void PrepareEagerCheckpoint(BailoutId ast_id);
+
BitVector* GetVariablesAssignedInLoop(IterationStatement* stmt);
// Check if the given statement is an OSR entry.
@@ -277,13 +291,11 @@ class AstGraphBuilder : public AstVisitor {
Node* BuildVariableAssignment(Variable* variable, Node* value,
Token::Value op, const VectorSlotPair& slot,
BailoutId bailout_id,
- FrameStateBeforeAndAfter& states,
OutputFrameStateCombine framestate_combine =
OutputFrameStateCombine::Ignore());
Node* BuildVariableDelete(Variable* variable, BailoutId bailout_id,
OutputFrameStateCombine framestate_combine);
Node* BuildVariableLoad(Variable* variable, BailoutId bailout_id,
- FrameStateBeforeAndAfter& states,
const VectorSlotPair& feedback,
OutputFrameStateCombine framestate_combine,
TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
@@ -341,7 +353,6 @@ class AstGraphBuilder : public AstVisitor {
Node* BuildThrowUnsupportedSuperError(BailoutId bailout_id);
// Builders for dynamic hole-checks at runtime.
- Node* BuildHoleCheckSilent(Node* value, Node* for_hole, Node* not_hole);
Node* BuildHoleCheckThenThrow(Node* value, Variable* var, Node* not_hole,
BailoutId bailout_id);
Node* BuildHoleCheckElseThrow(Node* value, Variable* var, Node* for_hole,
@@ -375,7 +386,6 @@ class AstGraphBuilder : public AstVisitor {
// to resolve to a global slot or context slot (inferred from scope chain).
Node* TryLoadDynamicVariable(Variable* variable, Handle<String> name,
BailoutId bailout_id,
- FrameStateBeforeAndAfter& states,
const VectorSlotPair& feedback,
OutputFrameStateCombine combine,
TypeofMode typeof_mode);
@@ -405,7 +415,8 @@ class AstGraphBuilder : public AstVisitor {
void VisitForValues(ZoneList<Expression*>* exprs);
// Common for all IterationStatement bodies.
- void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop);
+ void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop,
+ BailoutId stack_check_id);
// Dispatched from VisitCall.
void VisitCallSuper(Call* expr);
@@ -436,16 +447,12 @@ class AstGraphBuilder : public AstVisitor {
// Dispatched from VisitForInStatement.
void VisitForInAssignment(Expression* expr, Node* value,
const VectorSlotPair& feedback,
- BailoutId bailout_id_before,
- BailoutId bailout_id_after);
+ BailoutId bailout_id);
// Dispatched from VisitObjectLiteral.
void VisitObjectLiteralAccessor(Node* home_object,
ObjectLiteralProperty* property);
- // Dispatched from VisitClassLiteral.
- void VisitClassLiteralContents(ClassLiteral* expr);
-
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(AstGraphBuilder);
};
@@ -461,7 +468,8 @@ class AstGraphBuilder : public AstVisitor {
//
class AstGraphBuilder::Environment : public ZoneObject {
public:
- Environment(AstGraphBuilder* builder, Scope* scope, Node* control_dependency);
+ Environment(AstGraphBuilder* builder, DeclarationScope* scope,
+ Node* control_dependency);
int parameters_count() const { return parameters_count_; }
int locals_count() const { return locals_count_; }
@@ -530,6 +538,10 @@ class AstGraphBuilder::Environment : public ZoneObject {
OutputFrameStateCombine::Ignore(),
bool node_has_exception = false);
+ // Inserts a loop exit control node and renames the environment.
+ // This is useful for loop peeling to insert phis at loop exits.
+ void PrepareForLoopExit(Node* loop, BitVector* assigned_variables);
+
// Control dependency tracked by this environment.
Node* GetControlDependency() { return control_dependency_; }
void UpdateControlDependency(Node* dependency) {
diff --git a/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc b/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
index ac96399774..22438c720a 100644
--- a/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
+++ b/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
@@ -55,8 +55,6 @@ void ALAA::Exit(IterationStatement* loop) {
void ALAA::VisitVariableDeclaration(VariableDeclaration* leaf) {}
void ALAA::VisitFunctionDeclaration(FunctionDeclaration* leaf) {}
-void ALAA::VisitImportDeclaration(ImportDeclaration* leaf) {}
-void ALAA::VisitExportDeclaration(ExportDeclaration* leaf) {}
void ALAA::VisitEmptyStatement(EmptyStatement* leaf) {}
void ALAA::VisitContinueStatement(ContinueStatement* leaf) {}
void ALAA::VisitBreakStatement(BreakStatement* leaf) {}
@@ -265,8 +263,9 @@ void ALAA::VisitForInStatement(ForInStatement* loop) {
void ALAA::VisitForOfStatement(ForOfStatement* loop) {
Visit(loop->assign_iterator());
Enter(loop);
+ Visit(loop->next_result());
+ Visit(loop->result_done());
Visit(loop->assign_each());
- Visit(loop->subject());
Visit(loop->body());
Exit(loop);
}
@@ -298,17 +297,15 @@ void ALAA::AnalyzeAssignment(Variable* var) {
}
}
-
-int ALAA::GetVariableIndex(Scope* scope, Variable* var) {
+int ALAA::GetVariableIndex(DeclarationScope* scope, Variable* var) {
CHECK(var->IsStackAllocated());
if (var->is_this()) return 0;
if (var->IsParameter()) return 1 + var->index();
return 1 + scope->num_parameters() + var->index();
}
-
-int LoopAssignmentAnalysis::GetAssignmentCountForTesting(Scope* scope,
- Variable* var) {
+int LoopAssignmentAnalysis::GetAssignmentCountForTesting(
+ DeclarationScope* scope, Variable* var) {
int count = 0;
int var_index = AstLoopAssignmentAnalyzer::GetVariableIndex(scope, var);
for (size_t i = 0; i < list_.size(); i++) {
diff --git a/deps/v8/src/compiler/ast-loop-assignment-analyzer.h b/deps/v8/src/compiler/ast-loop-assignment-analyzer.h
index 169691135a..0893fd1074 100644
--- a/deps/v8/src/compiler/ast-loop-assignment-analyzer.h
+++ b/deps/v8/src/compiler/ast-loop-assignment-analyzer.h
@@ -12,8 +12,9 @@
namespace v8 {
namespace internal {
-class Variable;
+class CompilationInfo;
class Scope;
+class Variable;
namespace compiler {
@@ -29,7 +30,7 @@ class LoopAssignmentAnalysis : public ZoneObject {
return nullptr;
}
- int GetAssignmentCountForTesting(Scope* scope, Variable* var);
+ int GetAssignmentCountForTesting(DeclarationScope* scope, Variable* var);
private:
friend class AstLoopAssignmentAnalyzer;
@@ -39,17 +40,18 @@ class LoopAssignmentAnalysis : public ZoneObject {
// The class that performs loop assignment analysis by walking the AST.
-class AstLoopAssignmentAnalyzer : public AstVisitor {
+class AstLoopAssignmentAnalyzer final
+ : public AstVisitor<AstLoopAssignmentAnalyzer> {
public:
AstLoopAssignmentAnalyzer(Zone* zone, CompilationInfo* info);
LoopAssignmentAnalysis* Analyze();
-#define DECLARE_VISIT(type) void Visit##type(type* node) override;
+#define DECLARE_VISIT(type) void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
- static int GetVariableIndex(Scope* scope, Variable* var);
+ static int GetVariableIndex(DeclarationScope* scope, Variable* var);
private:
CompilationInfo* info_;
diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc
index 427612c36e..9b36eb1068 100644
--- a/deps/v8/src/compiler/branch-elimination.cc
+++ b/deps/v8/src/compiler/branch-elimination.cc
@@ -83,6 +83,7 @@ Reduction BranchElimination::ReduceDeoptimizeConditional(Node* node) {
DCHECK(node->opcode() == IrOpcode::kDeoptimizeIf ||
node->opcode() == IrOpcode::kDeoptimizeUnless);
bool condition_is_true = node->opcode() == IrOpcode::kDeoptimizeUnless;
+ DeoptimizeReason reason = DeoptimizeReasonOf(node->op());
Node* condition = NodeProperties::GetValueInput(node, 0);
Node* frame_state = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -92,24 +93,24 @@ Reduction BranchElimination::ReduceDeoptimizeConditional(Node* node) {
// yet because we will have to recompute anyway once we compute the
// predecessor.
if (conditions == nullptr) {
- DCHECK_NULL(node_conditions_.Get(node));
- return NoChange();
+ return UpdateConditions(node, conditions);
}
Maybe<bool> condition_value = conditions->LookupCondition(condition);
if (condition_value.IsJust()) {
// If we know the condition we can discard the branch.
if (condition_is_true == condition_value.FromJust()) {
- // We don't to update the conditions here, because we're replacing with
- // the {control} node that already contains the right information.
- return Replace(control);
+ // We don't update the conditions here, because we're replacing {node}
+ // with the {control} node that already contains the right information.
+ ReplaceWithValue(node, dead(), effect, control);
} else {
- control = graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
- frame_state, effect, control);
+ control =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager, reason),
+ frame_state, effect, control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), control);
Revisit(graph()->end());
- return Replace(dead());
}
+ return Replace(dead());
}
return UpdateConditions(
node, conditions->AddCondition(zone_, condition, condition_is_true));
@@ -123,8 +124,7 @@ Reduction BranchElimination::ReduceIf(Node* node, bool is_true_branch) {
// yet because we will have to recompute anyway once we compute the
// predecessor.
if (from_branch == nullptr) {
- DCHECK(node_conditions_.Get(node) == nullptr);
- return NoChange();
+ return UpdateConditions(node, nullptr);
}
Node* condition = branch->InputAt(0);
return UpdateConditions(
@@ -145,8 +145,7 @@ Reduction BranchElimination::ReduceMerge(Node* node) {
// input.
for (int i = 0; i < node->InputCount(); i++) {
if (node_conditions_.Get(node->InputAt(i)) == nullptr) {
- DCHECK(node_conditions_.Get(node) == nullptr);
- return NoChange();
+ return UpdateConditions(node, nullptr);
}
}
@@ -209,7 +208,8 @@ Reduction BranchElimination::UpdateConditions(
// Only signal that the node has Changed if the condition information has
// changed.
if (conditions != original) {
- if (original == nullptr || *conditions != *original) {
+ if (conditions == nullptr || original == nullptr ||
+ *conditions != *original) {
node_conditions_.Set(node, conditions);
return Changed(node);
}
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 2249cbcb3f..a17947a246 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -60,6 +60,9 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
Environment* CopyForConditional() const;
Environment* CopyForLoop();
void Merge(Environment* other);
+ void PrepareForOsr();
+
+ void PrepareForLoopExit(Node* loop);
private:
explicit Environment(const Environment* copy);
@@ -109,6 +112,11 @@ class BytecodeGraphBuilder::FrameStateBeforeAndAfter {
id_before, OutputFrameStateCombine::Ignore());
id_after_ = BailoutId(id_before.ToInt() +
builder->bytecode_iterator().current_bytecode_size());
+ // Create an explicit checkpoint node for before the operation.
+ Node* node = builder_->NewNode(builder_->common()->Checkpoint());
+ DCHECK_EQ(IrOpcode::kDead,
+ NodeProperties::GetFrameStateInput(node)->opcode());
+ NodeProperties::ReplaceFrameStateInput(node, frame_state_before_);
}
~FrameStateBeforeAndAfter() {
@@ -123,29 +131,21 @@ class BytecodeGraphBuilder::FrameStateBeforeAndAfter {
void AddToNode(Node* node, OutputFrameStateCombine combine) {
DCHECK(!added_to_node_);
- int count = OperatorProperties::GetFrameStateInputCount(node->op());
- DCHECK_LE(count, 2);
- if (count >= 1) {
+ bool has_frame_state = OperatorProperties::HasFrameStateInput(node->op());
+ if (has_frame_state) {
// Add the frame state for after the operation.
DCHECK_EQ(IrOpcode::kDead,
- NodeProperties::GetFrameStateInput(node, 0)->opcode());
+ NodeProperties::GetFrameStateInput(node)->opcode());
Node* frame_state_after =
builder_->environment()->Checkpoint(id_after_, combine);
- NodeProperties::ReplaceFrameStateInput(node, 0, frame_state_after);
- }
-
- if (count >= 2) {
- // Add the frame state for before the operation.
- DCHECK_EQ(IrOpcode::kDead,
- NodeProperties::GetFrameStateInput(node, 1)->opcode());
- NodeProperties::ReplaceFrameStateInput(node, 1, frame_state_before_);
+ NodeProperties::ReplaceFrameStateInput(node, frame_state_after);
}
if (!combine.IsOutputIgnored()) {
output_poke_offset_ = static_cast<int>(combine.GetOffsetToPokeAt());
output_poke_count_ = node->op()->ValueOutputCount();
}
- frame_states_unused_ = count == 0;
+ frame_states_unused_ = !has_frame_state;
added_to_node_ = true;
}
@@ -352,12 +352,39 @@ void BytecodeGraphBuilder::Environment::PrepareForLoop() {
builder()->exit_controls_.push_back(terminate);
}
+void BytecodeGraphBuilder::Environment::PrepareForOsr() {
+ DCHECK_EQ(IrOpcode::kLoop, GetControlDependency()->opcode());
+ DCHECK_EQ(1, GetControlDependency()->InputCount());
+ Node* start = graph()->start();
+
+ // Create a control node for the OSR entry point and merge it into the loop
+ // header. Update the current environment's control dependency accordingly.
+ Node* entry = graph()->NewNode(common()->OsrLoopEntry(), start, start);
+ Node* control = builder()->MergeControl(GetControlDependency(), entry);
+ UpdateControlDependency(control);
+
+ // Create a merge of the effect from the OSR entry and the existing effect
+ // dependency. Update the current environment's effect dependency accordingly.
+ Node* effect = builder()->MergeEffect(GetEffectDependency(), entry, control);
+ UpdateEffectDependency(effect);
+
+ // Rename all values in the environment which will extend or introduce Phi
+ // nodes to contain the OSR values available at the entry point.
+ Node* osr_context = graph()->NewNode(
+ common()->OsrValue(Linkage::kOsrContextSpillSlotIndex), entry);
+ context_ = builder()->MergeValue(context_, osr_context, control);
+ int size = static_cast<int>(values()->size());
+ for (int i = 0; i < size; i++) {
+ int idx = i; // Indexing scheme follows {StandardFrame}, adapt accordingly.
+ if (i >= register_base()) idx += InterpreterFrameConstants::kExtraSlotCount;
+ if (i >= accumulator_base()) idx = Linkage::kOsrAccumulatorRegisterIndex;
+ Node* osr_value = graph()->NewNode(common()->OsrValue(idx), entry);
+ values_[i] = builder()->MergeValue(values_[i], osr_value, control);
+ }
+}
bool BytecodeGraphBuilder::Environment::StateValuesRequireUpdate(
Node** state_values, int offset, int count) {
- if (!builder()->deoptimization_enabled_) {
- return false;
- }
if (*state_values == nullptr) {
return true;
}
@@ -372,6 +399,31 @@ bool BytecodeGraphBuilder::Environment::StateValuesRequireUpdate(
return false;
}
+void BytecodeGraphBuilder::Environment::PrepareForLoopExit(Node* loop) {
+ DCHECK_EQ(loop->opcode(), IrOpcode::kLoop);
+
+ Node* control = GetControlDependency();
+
+ // Create the loop exit node.
+ Node* loop_exit = graph()->NewNode(common()->LoopExit(), control, loop);
+ UpdateControlDependency(loop_exit);
+
+ // Rename the effect.
+ Node* effect_rename = graph()->NewNode(common()->LoopExitEffect(),
+ GetEffectDependency(), loop_exit);
+ UpdateEffectDependency(effect_rename);
+
+ // TODO(jarin) We should also rename context here. However, uncoditional
+ // renaming confuses global object and native context specialization.
+ // We should only rename if the context is assigned in the loop.
+
+ // Rename the environmnent values.
+ for (size_t i = 0; i < values_.size(); i++) {
+ Node* rename =
+ graph()->NewNode(common()->LoopExitValue(), values_[i], loop_exit);
+ values_[i] = rename;
+ }
+}
void BytecodeGraphBuilder::Environment::UpdateStateValues(Node** state_values,
int offset,
@@ -385,10 +437,6 @@ void BytecodeGraphBuilder::Environment::UpdateStateValues(Node** state_values,
Node* BytecodeGraphBuilder::Environment::Checkpoint(
BailoutId bailout_id, OutputFrameStateCombine combine) {
- if (!builder()->deoptimization_enabled_) {
- return builder()->jsgraph()->EmptyFrameState();
- }
-
// TODO(rmcilroy): Consider using StateValuesCache for some state values.
UpdateStateValues(&parameters_state_values_, 0, parameter_count());
UpdateStateValues(&registers_state_values_, register_base(),
@@ -423,7 +471,6 @@ bool BytecodeGraphBuilder::Environment::StateValuesAreUpToDate(
bool BytecodeGraphBuilder::Environment::StateValuesAreUpToDate(
int output_poke_offset, int output_poke_count) {
- if (!builder()->deoptimization_enabled_) return true;
// Poke offset is relative to the top of the stack (i.e., the accumulator).
int output_poke_start = accumulator_base() - output_poke_offset;
int output_poke_end = output_poke_start + output_poke_count;
@@ -444,12 +491,12 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(Zone* local_zone,
bytecode_array_(handle(info->shared_info()->bytecode_array())),
exception_handler_table_(
handle(HandlerTable::cast(bytecode_array()->handler_table()))),
- feedback_vector_(handle(info->shared_info()->feedback_vector())),
+ feedback_vector_(handle(info->closure()->feedback_vector())),
frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
FrameStateType::kInterpretedFunction,
bytecode_array()->parameter_count(),
bytecode_array()->register_count(), info->shared_info())),
- deoptimization_enabled_(info->is_deoptimization_enabled()),
+ osr_ast_id_(info->osr_ast_id()),
merge_environments_(local_zone),
exception_handlers_(local_zone),
current_exception_handler_(0),
@@ -524,6 +571,10 @@ bool BytecodeGraphBuilder::CreateGraph() {
GetFunctionContext());
set_environment(&env);
+ // For OSR add an {OsrNormalEntry} as the start of the top-level environment.
+ // It will be replaced with {Dead} after typing and optimizations.
+ if (!osr_ast_id_.IsNone()) NewNode(common()->OsrNormalEntry());
+
VisitBytecodes();
// Finish the basic structure of the graph.
@@ -538,8 +589,11 @@ bool BytecodeGraphBuilder::CreateGraph() {
void BytecodeGraphBuilder::VisitBytecodes() {
BytecodeBranchAnalysis analysis(bytecode_array(), local_zone());
+ BytecodeLoopAnalysis loop_analysis(bytecode_array(), &analysis, local_zone());
analysis.Analyze();
+ loop_analysis.Analyze();
set_branch_analysis(&analysis);
+ set_loop_analysis(&loop_analysis);
interpreter::BytecodeArrayIterator iterator(bytecode_array());
set_bytecode_iterator(&iterator);
while (!iterator.done()) {
@@ -586,6 +640,11 @@ void BytecodeGraphBuilder::VisitLdaUndefined() {
environment()->BindAccumulator(node);
}
+void BytecodeGraphBuilder::VisitLdrUndefined() {
+ Node* node = jsgraph()->UndefinedConstant();
+ environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0), node);
+}
+
void BytecodeGraphBuilder::VisitLdaNull() {
Node* node = jsgraph()->NullConstant();
environment()->BindAccumulator(node);
@@ -623,25 +682,33 @@ void BytecodeGraphBuilder::VisitMov() {
environment()->BindRegister(bytecode_iterator().GetRegisterOperand(1), value);
}
-void BytecodeGraphBuilder::BuildLoadGlobal(
- TypeofMode typeof_mode) {
- FrameStateBeforeAndAfter states(this);
- Handle<Name> name =
- Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
+Node* BytecodeGraphBuilder::BuildLoadGlobal(TypeofMode typeof_mode) {
VectorSlotPair feedback =
- CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
-
+ CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(0));
+ DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC,
+ feedback_vector()->GetKind(feedback.slot()));
+ Handle<Name> name(feedback_vector()->GetName(feedback.slot()));
const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode);
- Node* node = NewNode(op, GetFunctionClosure());
- environment()->BindAccumulator(node, &states);
+ return NewNode(op, GetFunctionClosure());
}
void BytecodeGraphBuilder::VisitLdaGlobal() {
- BuildLoadGlobal(TypeofMode::NOT_INSIDE_TYPEOF);
+ FrameStateBeforeAndAfter states(this);
+ Node* node = BuildLoadGlobal(TypeofMode::NOT_INSIDE_TYPEOF);
+ environment()->BindAccumulator(node, &states);
+}
+
+void BytecodeGraphBuilder::VisitLdrGlobal() {
+ FrameStateBeforeAndAfter states(this);
+ Node* node = BuildLoadGlobal(TypeofMode::NOT_INSIDE_TYPEOF);
+ environment()->BindRegister(bytecode_iterator().GetRegisterOperand(1), node,
+ &states);
}
void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
- BuildLoadGlobal(TypeofMode::INSIDE_TYPEOF);
+ FrameStateBeforeAndAfter states(this);
+ Node* node = BuildLoadGlobal(TypeofMode::INSIDE_TYPEOF);
+ environment()->BindAccumulator(node, &states);
}
void BytecodeGraphBuilder::BuildStoreGlobal(LanguageMode language_mode) {
@@ -665,7 +732,7 @@ void BytecodeGraphBuilder::VisitStaGlobalStrict() {
BuildStoreGlobal(LanguageMode::STRICT);
}
-void BytecodeGraphBuilder::VisitLdaContextSlot() {
+Node* BytecodeGraphBuilder::BuildLoadContextSlot() {
// TODO(mythria): LoadContextSlots are unrolled by the required depth when
// generating bytecode. Hence the value of depth is always 0. Update this
// code, when the implementation changes.
@@ -676,10 +743,19 @@ void BytecodeGraphBuilder::VisitLdaContextSlot() {
0, bytecode_iterator().GetIndexOperand(1), false);
Node* context =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Node* node = NewNode(op, context);
+ return NewNode(op, context);
+}
+
+void BytecodeGraphBuilder::VisitLdaContextSlot() {
+ Node* node = BuildLoadContextSlot();
environment()->BindAccumulator(node);
}
+void BytecodeGraphBuilder::VisitLdrContextSlot() {
+ Node* node = BuildLoadContextSlot();
+ environment()->BindRegister(bytecode_iterator().GetRegisterOperand(2), node);
+}
+
void BytecodeGraphBuilder::VisitStaContextSlot() {
// TODO(mythria): LoadContextSlots are unrolled by the required depth when
// generating bytecode. Hence the value of depth is always 0. Update this
@@ -732,8 +808,7 @@ void BytecodeGraphBuilder::VisitStaLookupSlotStrict() {
BuildStaLookupSlot(LanguageMode::STRICT);
}
-void BytecodeGraphBuilder::BuildNamedLoad() {
- FrameStateBeforeAndAfter states(this);
+Node* BytecodeGraphBuilder::BuildNamedLoad() {
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Handle<Name> name =
@@ -742,14 +817,23 @@ void BytecodeGraphBuilder::BuildNamedLoad() {
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
const Operator* op = javascript()->LoadNamed(name, feedback);
- Node* node = NewNode(op, object, GetFunctionClosure());
- environment()->BindAccumulator(node, &states);
+ return NewNode(op, object, GetFunctionClosure());
}
-void BytecodeGraphBuilder::VisitLoadIC() { BuildNamedLoad(); }
+void BytecodeGraphBuilder::VisitLdaNamedProperty() {
+ FrameStateBeforeAndAfter states(this);
+ Node* node = BuildNamedLoad();
+ environment()->BindAccumulator(node, &states);
+}
-void BytecodeGraphBuilder::BuildKeyedLoad() {
+void BytecodeGraphBuilder::VisitLdrNamedProperty() {
FrameStateBeforeAndAfter states(this);
+ Node* node = BuildNamedLoad();
+ environment()->BindRegister(bytecode_iterator().GetRegisterOperand(3), node,
+ &states);
+}
+
+Node* BytecodeGraphBuilder::BuildKeyedLoad() {
Node* key = environment()->LookupAccumulator();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
@@ -757,11 +841,21 @@ void BytecodeGraphBuilder::BuildKeyedLoad() {
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
const Operator* op = javascript()->LoadProperty(feedback);
- Node* node = NewNode(op, object, key, GetFunctionClosure());
+ return NewNode(op, object, key, GetFunctionClosure());
+}
+
+void BytecodeGraphBuilder::VisitLdaKeyedProperty() {
+ FrameStateBeforeAndAfter states(this);
+ Node* node = BuildKeyedLoad();
environment()->BindAccumulator(node, &states);
}
-void BytecodeGraphBuilder::VisitKeyedLoadIC() { BuildKeyedLoad(); }
+void BytecodeGraphBuilder::VisitLdrKeyedProperty() {
+ FrameStateBeforeAndAfter states(this);
+ Node* node = BuildKeyedLoad();
+ environment()->BindRegister(bytecode_iterator().GetRegisterOperand(2), node,
+ &states);
+}
void BytecodeGraphBuilder::BuildNamedStore(LanguageMode language_mode) {
FrameStateBeforeAndAfter states(this);
@@ -778,11 +872,11 @@ void BytecodeGraphBuilder::BuildNamedStore(LanguageMode language_mode) {
environment()->RecordAfterState(node, &states);
}
-void BytecodeGraphBuilder::VisitStoreICSloppy() {
+void BytecodeGraphBuilder::VisitStaNamedPropertySloppy() {
BuildNamedStore(LanguageMode::SLOPPY);
}
-void BytecodeGraphBuilder::VisitStoreICStrict() {
+void BytecodeGraphBuilder::VisitStaNamedPropertyStrict() {
BuildNamedStore(LanguageMode::STRICT);
}
@@ -801,11 +895,11 @@ void BytecodeGraphBuilder::BuildKeyedStore(LanguageMode language_mode) {
environment()->RecordAfterState(node, &states);
}
-void BytecodeGraphBuilder::VisitKeyedStoreICSloppy() {
+void BytecodeGraphBuilder::VisitStaKeyedPropertySloppy() {
BuildKeyedStore(LanguageMode::SLOPPY);
}
-void BytecodeGraphBuilder::VisitKeyedStoreICStrict() {
+void BytecodeGraphBuilder::VisitStaKeyedPropertyStrict() {
BuildKeyedStore(LanguageMode::STRICT);
}
@@ -832,6 +926,43 @@ void BytecodeGraphBuilder::VisitCreateClosure() {
environment()->BindAccumulator(closure);
}
+void BytecodeGraphBuilder::VisitCreateBlockContext() {
+ Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0));
+
+ const Operator* op = javascript()->CreateBlockContext(scope_info);
+ Node* context = NewNode(op, environment()->LookupAccumulator());
+ environment()->BindAccumulator(context);
+}
+
+void BytecodeGraphBuilder::VisitCreateFunctionContext() {
+ uint32_t slots = bytecode_iterator().GetIndexOperand(0);
+ const Operator* op = javascript()->CreateFunctionContext(slots);
+ Node* context = NewNode(op, GetFunctionClosure());
+ environment()->BindAccumulator(context);
+}
+
+void BytecodeGraphBuilder::VisitCreateCatchContext() {
+ interpreter::Register reg = bytecode_iterator().GetRegisterOperand(0);
+ Node* exception = environment()->LookupRegister(reg);
+ Handle<String> name =
+ Handle<String>::cast(bytecode_iterator().GetConstantForIndexOperand(1));
+ Node* closure = environment()->LookupAccumulator();
+
+ const Operator* op = javascript()->CreateCatchContext(name);
+ Node* context = NewNode(op, exception, closure);
+ environment()->BindAccumulator(context);
+}
+
+void BytecodeGraphBuilder::VisitCreateWithContext() {
+ Node* object =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+
+ const Operator* op = javascript()->CreateWithContext();
+ Node* context = NewNode(op, object, environment()->LookupAccumulator());
+ environment()->BindAccumulator(context);
+}
+
void BytecodeGraphBuilder::BuildCreateArguments(CreateArgumentsType type) {
FrameStateBeforeAndAfter states(this);
const Operator* op = javascript()->CreateArguments(type);
@@ -879,15 +1010,21 @@ void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
}
void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
+ FrameStateBeforeAndAfter states(this);
Handle<FixedArray> constant_properties = Handle<FixedArray>::cast(
bytecode_iterator().GetConstantForIndexOperand(0));
int literal_index = bytecode_iterator().GetIndexOperand(1);
- int literal_flags = bytecode_iterator().GetFlagOperand(2);
+ int bytecode_flags = bytecode_iterator().GetFlagOperand(2);
+ int literal_flags =
+ interpreter::CreateObjectLiteralFlags::FlagsBits::decode(bytecode_flags);
// TODO(mstarzinger): Thread through number of properties.
int number_of_properties = constant_properties->length() / 2;
- const Operator* op = javascript()->CreateLiteralObject(
- constant_properties, literal_flags, literal_index, number_of_properties);
- BuildCreateLiteral(op);
+ Node* literal = NewNode(
+ javascript()->CreateLiteralObject(constant_properties, literal_flags,
+ literal_index, number_of_properties),
+ GetFunctionClosure());
+ environment()->BindRegister(bytecode_iterator().GetRegisterOperand(3),
+ literal, &states);
}
Node* BytecodeGraphBuilder::ProcessCallArguments(const Operator* call_op,
@@ -908,14 +1045,15 @@ Node* BytecodeGraphBuilder::ProcessCallArguments(const Operator* call_op,
void BytecodeGraphBuilder::BuildCall(TailCallMode tail_call_mode) {
FrameStateBeforeAndAfter states(this);
- // TODO(rmcilroy): Set receiver_hint correctly based on whether the receiver
- // register has been loaded with null / undefined explicitly or we are sure it
- // is not null / undefined.
ConvertReceiverMode receiver_hint = ConvertReceiverMode::kAny;
Node* callee =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
+
+ // Slot index of 0 is used indicate no feedback slot is available. Assert
+ // the assumption that slot index 0 is never a valid feedback slot.
+ STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
VectorSlotPair feedback =
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(3));
@@ -963,8 +1101,7 @@ Node* BytecodeGraphBuilder::ProcessCallRuntimeArguments(
void BytecodeGraphBuilder::VisitCallRuntime() {
FrameStateBeforeAndAfter states(this);
- Runtime::FunctionId functionId = static_cast<Runtime::FunctionId>(
- bytecode_iterator().GetRuntimeIdOperand(0));
+ Runtime::FunctionId functionId = bytecode_iterator().GetRuntimeIdOperand(0);
interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
@@ -976,8 +1113,7 @@ void BytecodeGraphBuilder::VisitCallRuntime() {
void BytecodeGraphBuilder::VisitCallRuntimeForPair() {
FrameStateBeforeAndAfter states(this);
- Runtime::FunctionId functionId = static_cast<Runtime::FunctionId>(
- bytecode_iterator().GetRuntimeIdOperand(0));
+ Runtime::FunctionId functionId = bytecode_iterator().GetRuntimeIdOperand(0);
interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
interpreter::Register first_return =
@@ -991,8 +1127,7 @@ void BytecodeGraphBuilder::VisitCallRuntimeForPair() {
void BytecodeGraphBuilder::VisitInvokeIntrinsic() {
FrameStateBeforeAndAfter states(this);
- Runtime::FunctionId functionId = static_cast<Runtime::FunctionId>(
- bytecode_iterator().GetRuntimeIdOperand(0));
+ Runtime::FunctionId functionId = bytecode_iterator().GetIntrinsicIdOperand(0);
interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
@@ -1042,6 +1177,7 @@ void BytecodeGraphBuilder::BuildThrow() {
}
void BytecodeGraphBuilder::VisitThrow() {
+ BuildLoopExitsForFunctionExit();
BuildThrow();
Node* call = environment()->LookupAccumulator();
Node* control = NewNode(common()->Throw(), call);
@@ -1049,6 +1185,7 @@ void BytecodeGraphBuilder::VisitThrow() {
}
void BytecodeGraphBuilder::VisitReThrow() {
+ BuildLoopExitsForFunctionExit();
Node* value = environment()->LookupAccumulator();
Node* call = NewNode(javascript()->CallRuntime(Runtime::kReThrow), value);
Node* control = NewNode(common()->Throw(), call);
@@ -1064,78 +1201,143 @@ void BytecodeGraphBuilder::BuildBinaryOp(const Operator* js_op) {
environment()->BindAccumulator(node, &states);
}
+// Helper function to create binary operation hint from the recorded type
+// feedback.
+BinaryOperationHint BytecodeGraphBuilder::GetBinaryOperationHint(
+ int operand_index) {
+ FeedbackVectorSlot slot = feedback_vector()->ToSlot(
+ bytecode_iterator().GetIndexOperand(operand_index));
+ DCHECK_EQ(FeedbackVectorSlotKind::GENERAL, feedback_vector()->GetKind(slot));
+ Object* feedback = feedback_vector()->Get(slot);
+ BinaryOperationHint hint = BinaryOperationHint::kAny;
+ if (feedback->IsSmi()) {
+ hint = BinaryOperationHintFromFeedback((Smi::cast(feedback))->value());
+ }
+ return hint;
+}
+
void BytecodeGraphBuilder::VisitAdd() {
- BinaryOperationHints hints = BinaryOperationHints::Any();
- BuildBinaryOp(javascript()->Add(hints));
+ BuildBinaryOp(
+ javascript()->Add(GetBinaryOperationHint(kBinaryOperationHintIndex)));
}
void BytecodeGraphBuilder::VisitSub() {
- BinaryOperationHints hints = BinaryOperationHints::Any();
- BuildBinaryOp(javascript()->Subtract(hints));
+ BuildBinaryOp(javascript()->Subtract(
+ GetBinaryOperationHint(kBinaryOperationHintIndex)));
}
void BytecodeGraphBuilder::VisitMul() {
- BinaryOperationHints hints = BinaryOperationHints::Any();
- BuildBinaryOp(javascript()->Multiply(hints));
+ BuildBinaryOp(javascript()->Multiply(
+ GetBinaryOperationHint(kBinaryOperationHintIndex)));
}
void BytecodeGraphBuilder::VisitDiv() {
- BinaryOperationHints hints = BinaryOperationHints::Any();
- BuildBinaryOp(javascript()->Divide(hints));
+ BuildBinaryOp(
+ javascript()->Divide(GetBinaryOperationHint(kBinaryOperationHintIndex)));
}
void BytecodeGraphBuilder::VisitMod() {
- BinaryOperationHints hints = BinaryOperationHints::Any();
- BuildBinaryOp(javascript()->Modulus(hints));
+ BuildBinaryOp(
+ javascript()->Modulus(GetBinaryOperationHint(kBinaryOperationHintIndex)));
}
void BytecodeGraphBuilder::VisitBitwiseOr() {
- BinaryOperationHints hints = BinaryOperationHints::Any();
- BuildBinaryOp(javascript()->BitwiseOr(hints));
+ BuildBinaryOp(javascript()->BitwiseOr(
+ GetBinaryOperationHint(kBinaryOperationHintIndex)));
}
void BytecodeGraphBuilder::VisitBitwiseXor() {
- BinaryOperationHints hints = BinaryOperationHints::Any();
- BuildBinaryOp(javascript()->BitwiseXor(hints));
+ BuildBinaryOp(javascript()->BitwiseXor(
+ GetBinaryOperationHint(kBinaryOperationHintIndex)));
}
void BytecodeGraphBuilder::VisitBitwiseAnd() {
- BinaryOperationHints hints = BinaryOperationHints::Any();
- BuildBinaryOp(javascript()->BitwiseAnd(hints));
+ BuildBinaryOp(javascript()->BitwiseAnd(
+ GetBinaryOperationHint(kBinaryOperationHintIndex)));
}
void BytecodeGraphBuilder::VisitShiftLeft() {
- BinaryOperationHints hints = BinaryOperationHints::Any();
- BuildBinaryOp(javascript()->ShiftLeft(hints));
+ BuildBinaryOp(javascript()->ShiftLeft(
+ GetBinaryOperationHint(kBinaryOperationHintIndex)));
}
void BytecodeGraphBuilder::VisitShiftRight() {
- BinaryOperationHints hints = BinaryOperationHints::Any();
- BuildBinaryOp(javascript()->ShiftRight(hints));
+ BuildBinaryOp(javascript()->ShiftRight(
+ GetBinaryOperationHint(kBinaryOperationHintIndex)));
}
void BytecodeGraphBuilder::VisitShiftRightLogical() {
- BinaryOperationHints hints = BinaryOperationHints::Any();
- BuildBinaryOp(javascript()->ShiftRightLogical(hints));
+ BuildBinaryOp(javascript()->ShiftRightLogical(
+ GetBinaryOperationHint(kBinaryOperationHintIndex)));
+}
+
+void BytecodeGraphBuilder::BuildBinaryOpWithImmediate(const Operator* js_op) {
+ FrameStateBeforeAndAfter states(this);
+ Node* left =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
+ Node* right = jsgraph()->Constant(bytecode_iterator().GetImmediateOperand(0));
+ Node* node = NewNode(js_op, left, right);
+ environment()->BindAccumulator(node, &states);
+}
+
+void BytecodeGraphBuilder::VisitAddSmi() {
+ BuildBinaryOpWithImmediate(
+ javascript()->Add(GetBinaryOperationHint(kBinaryOperationSmiHintIndex)));
+}
+
+void BytecodeGraphBuilder::VisitSubSmi() {
+ BuildBinaryOpWithImmediate(javascript()->Subtract(
+ GetBinaryOperationHint(kBinaryOperationSmiHintIndex)));
+}
+
+void BytecodeGraphBuilder::VisitBitwiseOrSmi() {
+ BuildBinaryOpWithImmediate(javascript()->BitwiseOr(
+ GetBinaryOperationHint(kBinaryOperationSmiHintIndex)));
+}
+
+void BytecodeGraphBuilder::VisitBitwiseAndSmi() {
+ BuildBinaryOpWithImmediate(javascript()->BitwiseAnd(
+ GetBinaryOperationHint(kBinaryOperationSmiHintIndex)));
+}
+
+void BytecodeGraphBuilder::VisitShiftLeftSmi() {
+ BuildBinaryOpWithImmediate(javascript()->ShiftLeft(
+ GetBinaryOperationHint(kBinaryOperationSmiHintIndex)));
+}
+
+void BytecodeGraphBuilder::VisitShiftRightSmi() {
+ BuildBinaryOpWithImmediate(javascript()->ShiftRight(
+ GetBinaryOperationHint(kBinaryOperationSmiHintIndex)));
}
void BytecodeGraphBuilder::VisitInc() {
FrameStateBeforeAndAfter states(this);
- const Operator* js_op = javascript()->Add(BinaryOperationHints::Any());
+ // Note: Use subtract -1 here instead of add 1 to ensure we always convert to
+ // a number, not a string.
+ const Operator* js_op =
+ javascript()->Subtract(GetBinaryOperationHint(kCountOperationHintIndex));
Node* node = NewNode(js_op, environment()->LookupAccumulator(),
- jsgraph()->OneConstant());
+ jsgraph()->Constant(-1));
environment()->BindAccumulator(node, &states);
}
void BytecodeGraphBuilder::VisitDec() {
FrameStateBeforeAndAfter states(this);
- const Operator* js_op = javascript()->Subtract(BinaryOperationHints::Any());
+ const Operator* js_op =
+ javascript()->Subtract(GetBinaryOperationHint(kCountOperationHintIndex));
Node* node = NewNode(js_op, environment()->LookupAccumulator(),
jsgraph()->OneConstant());
environment()->BindAccumulator(node, &states);
}
void BytecodeGraphBuilder::VisitLogicalNot() {
+ Node* value = environment()->LookupAccumulator();
+ Node* node = NewNode(common()->Select(MachineRepresentation::kTagged), value,
+ jsgraph()->FalseConstant(), jsgraph()->TrueConstant());
+ environment()->BindAccumulator(node);
+}
+
+void BytecodeGraphBuilder::VisitToBooleanLogicalNot() {
Node* value = NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
environment()->LookupAccumulator());
Node* node = NewNode(common()->Select(MachineRepresentation::kTagged), value,
@@ -1177,31 +1379,38 @@ void BytecodeGraphBuilder::BuildCompareOp(const Operator* js_op) {
}
void BytecodeGraphBuilder::VisitTestEqual() {
- BuildCompareOp(javascript()->Equal());
+ CompareOperationHint hint = CompareOperationHint::kAny;
+ BuildCompareOp(javascript()->Equal(hint));
}
void BytecodeGraphBuilder::VisitTestNotEqual() {
- BuildCompareOp(javascript()->NotEqual());
+ CompareOperationHint hint = CompareOperationHint::kAny;
+ BuildCompareOp(javascript()->NotEqual(hint));
}
void BytecodeGraphBuilder::VisitTestEqualStrict() {
- BuildCompareOp(javascript()->StrictEqual());
+ CompareOperationHint hint = CompareOperationHint::kAny;
+ BuildCompareOp(javascript()->StrictEqual(hint));
}
void BytecodeGraphBuilder::VisitTestLessThan() {
- BuildCompareOp(javascript()->LessThan());
+ CompareOperationHint hint = CompareOperationHint::kAny;
+ BuildCompareOp(javascript()->LessThan(hint));
}
void BytecodeGraphBuilder::VisitTestGreaterThan() {
- BuildCompareOp(javascript()->GreaterThan());
+ CompareOperationHint hint = CompareOperationHint::kAny;
+ BuildCompareOp(javascript()->GreaterThan(hint));
}
void BytecodeGraphBuilder::VisitTestLessThanOrEqual() {
- BuildCompareOp(javascript()->LessThanOrEqual());
+ CompareOperationHint hint = CompareOperationHint::kAny;
+ BuildCompareOp(javascript()->LessThanOrEqual(hint));
}
void BytecodeGraphBuilder::VisitTestGreaterThanOrEqual() {
- BuildCompareOp(javascript()->GreaterThanOrEqual());
+ CompareOperationHint hint = CompareOperationHint::kAny;
+ BuildCompareOp(javascript()->GreaterThanOrEqual(hint));
}
void BytecodeGraphBuilder::VisitTestIn() {
@@ -1209,14 +1418,14 @@ void BytecodeGraphBuilder::VisitTestIn() {
}
void BytecodeGraphBuilder::VisitTestInstanceOf() {
- DCHECK(!FLAG_harmony_instanceof);
BuildCompareOp(javascript()->InstanceOf());
}
void BytecodeGraphBuilder::BuildCastOperator(const Operator* js_op) {
FrameStateBeforeAndAfter states(this);
- Node* node = NewNode(js_op, environment()->LookupAccumulator());
- environment()->BindAccumulator(node, &states);
+ Node* value = NewNode(js_op, environment()->LookupAccumulator());
+ environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0), value,
+ &states);
}
void BytecodeGraphBuilder::VisitToName() {
@@ -1296,7 +1505,17 @@ void BytecodeGraphBuilder::VisitStackCheck() {
environment()->RecordAfterState(node, &states);
}
+void BytecodeGraphBuilder::VisitOsrPoll() {
+ // TODO(4764): This should be moved into the {VisitBytecodes} once we merge
+ // the polling with existing bytecode. This will also guarantee that we are
+ // not missing the OSR entry point, which we wouldn't catch right now.
+ if (osr_ast_id_.ToInt() == bytecode_iterator().current_offset()) {
+ environment()->PrepareForOsr();
+ }
+}
+
void BytecodeGraphBuilder::VisitReturn() {
+ BuildLoopExitsForFunctionExit();
Node* control =
NewNode(common()->Return(), environment()->LookupAccumulator());
MergeControlToLeaveFunction(control);
@@ -1317,10 +1536,11 @@ DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
void BytecodeGraphBuilder::BuildForInPrepare() {
FrameStateBeforeAndAfter states(this);
- Node* receiver = environment()->LookupAccumulator();
+ Node* receiver =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Node* prepare = NewNode(javascript()->ForInPrepare(), receiver);
environment()->BindRegistersToProjections(
- bytecode_iterator().GetRegisterOperand(0), prepare, &states);
+ bytecode_iterator().GetRegisterOperand(1), prepare, &states);
}
void BytecodeGraphBuilder::VisitForInPrepare() { BuildForInPrepare(); }
@@ -1362,6 +1582,51 @@ void BytecodeGraphBuilder::VisitForInStep() {
environment()->BindAccumulator(index, &states);
}
+void BytecodeGraphBuilder::VisitSuspendGenerator() {
+ Node* state = environment()->LookupAccumulator();
+ Node* generator = environment()->LookupRegister(
+ bytecode_iterator().GetRegisterOperand(0));
+ // The offsets used by the bytecode iterator are relative to a different base
+ // than what is used in the interpreter, hence the addition.
+ Node* offset =
+ jsgraph()->Constant(bytecode_iterator().current_offset() +
+ (BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+ int register_count = environment()->register_count();
+ int value_input_count = 3 + register_count;
+
+ Node** value_inputs = local_zone()->NewArray<Node*>(value_input_count);
+ value_inputs[0] = generator;
+ value_inputs[1] = state;
+ value_inputs[2] = offset;
+ for (int i = 0; i < register_count; ++i) {
+ value_inputs[3 + i] =
+ environment()->LookupRegister(interpreter::Register(i));
+ }
+
+ MakeNode(javascript()->GeneratorStore(register_count), value_input_count,
+ value_inputs, false);
+}
+
+void BytecodeGraphBuilder::VisitResumeGenerator() {
+ FrameStateBeforeAndAfter states(this);
+
+ Node* generator = environment()->LookupRegister(
+ bytecode_iterator().GetRegisterOperand(0));
+
+ // Bijection between registers and array indices must match that used in
+ // InterpreterAssembler::ExportRegisterFile.
+ for (int i = 0; i < environment()->register_count(); ++i) {
+ Node* value = NewNode(javascript()->GeneratorRestoreRegister(i), generator);
+ environment()->BindRegister(interpreter::Register(i), value);
+ }
+
+ Node* state =
+ NewNode(javascript()->GeneratorRestoreContinuation(), generator);
+
+ environment()->BindAccumulator(state, &states);
+}
+
void BytecodeGraphBuilder::VisitWide() {
// Consumed by the BytecodeArrayIterator.
UNREACHABLE();
@@ -1373,10 +1638,12 @@ void BytecodeGraphBuilder::VisitExtraWide() {
}
void BytecodeGraphBuilder::VisitIllegal() {
- // Never present in valid bytecode.
+ // Not emitted in valid bytecode.
UNREACHABLE();
}
+void BytecodeGraphBuilder::VisitNop() {}
+
void BytecodeGraphBuilder::SwitchToMergeEnvironment(int current_offset) {
if (merge_environments_[current_offset] != nullptr) {
if (environment() != nullptr) {
@@ -1395,6 +1662,7 @@ void BytecodeGraphBuilder::BuildLoopHeaderEnvironment(int current_offset) {
}
void BytecodeGraphBuilder::MergeIntoSuccessorEnvironment(int target_offset) {
+ BuildLoopExitsForBranch(target_offset);
if (merge_environments_[target_offset] == nullptr) {
// Append merge nodes to the environment. We may merge here with another
// environment. So add a place holder for merge nodes. We may add redundant
@@ -1413,6 +1681,28 @@ void BytecodeGraphBuilder::MergeControlToLeaveFunction(Node* exit) {
set_environment(nullptr);
}
+void BytecodeGraphBuilder::BuildLoopExitsForBranch(int target_offset) {
+ int origin_offset = bytecode_iterator().current_offset();
+ // Only build loop exits for forward edges.
+ if (target_offset > origin_offset) {
+ BuildLoopExitsUntilLoop(loop_analysis()->GetLoopOffsetFor(target_offset));
+ }
+}
+
+void BytecodeGraphBuilder::BuildLoopExitsUntilLoop(int loop_offset) {
+ int origin_offset = bytecode_iterator().current_offset();
+ int current_loop = loop_analysis()->GetLoopOffsetFor(origin_offset);
+ while (loop_offset < current_loop) {
+ Node* loop_node = merge_environments_[current_loop]->GetControlDependency();
+ environment()->PrepareForLoopExit(loop_node);
+ current_loop = loop_analysis()->GetParentLoopFor(current_loop);
+ }
+}
+
+void BytecodeGraphBuilder::BuildLoopExitsForFunctionExit() {
+ BuildLoopExitsUntilLoop(-1);
+}
+
void BytecodeGraphBuilder::BuildJump() {
MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
}
@@ -1431,7 +1721,8 @@ void BytecodeGraphBuilder::BuildConditionalJump(Node* condition) {
void BytecodeGraphBuilder::BuildJumpIfEqual(Node* comperand) {
Node* accumulator = environment()->LookupAccumulator();
Node* condition =
- NewNode(javascript()->StrictEqual(), accumulator, comperand);
+ NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
+ accumulator, comperand);
BuildConditionalJump(condition);
}
@@ -1440,14 +1731,17 @@ void BytecodeGraphBuilder::BuildJumpIfToBooleanEqual(Node* comperand) {
Node* accumulator = environment()->LookupAccumulator();
Node* to_boolean =
NewNode(javascript()->ToBoolean(ToBooleanHint::kAny), accumulator);
- Node* condition = NewNode(javascript()->StrictEqual(), to_boolean, comperand);
+ Node* condition =
+ NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), to_boolean,
+ comperand);
BuildConditionalJump(condition);
}
void BytecodeGraphBuilder::BuildJumpIfNotHole() {
Node* accumulator = environment()->LookupAccumulator();
- Node* condition = NewNode(javascript()->StrictEqual(), accumulator,
- jsgraph()->TheHoleConstant());
+ Node* condition =
+ NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
+ accumulator, jsgraph()->TheHoleConstant());
Node* node =
NewNode(common()->Select(MachineRepresentation::kTagged), condition,
jsgraph()->FalseConstant(), jsgraph()->TrueConstant());
@@ -1481,10 +1775,8 @@ void BytecodeGraphBuilder::EnterAndExitExceptionHandlers(int current_offset) {
int next_end = table->GetRangeEnd(current_exception_handler_);
int next_handler = table->GetRangeHandler(current_exception_handler_);
int context_register = table->GetRangeData(current_exception_handler_);
- CatchPrediction pred =
- table->GetRangePrediction(current_exception_handler_);
exception_handlers_.push(
- {next_start, next_end, next_handler, context_register, pred});
+ {next_start, next_end, next_handler, context_register});
current_exception_handler_++;
}
}
@@ -1494,7 +1786,7 @@ Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
DCHECK_EQ(op->ValueInputCount(), value_input_count);
bool has_context = OperatorProperties::HasContextInput(op);
- int frame_state_count = OperatorProperties::GetFrameStateInputCount(op);
+ bool has_frame_state = OperatorProperties::HasFrameStateInput(op);
bool has_control = op->ControlInputCount() == 1;
bool has_effect = op->EffectInputCount() == 1;
@@ -1502,13 +1794,13 @@ Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
DCHECK_LT(op->EffectInputCount(), 2);
Node* result = nullptr;
- if (!has_context && frame_state_count == 0 && !has_control && !has_effect) {
+ if (!has_context && !has_frame_state && !has_control && !has_effect) {
result = graph()->NewNode(op, value_input_count, value_inputs, incomplete);
} else {
bool inside_handler = !exception_handlers_.empty();
int input_count_with_deps = value_input_count;
if (has_context) ++input_count_with_deps;
- input_count_with_deps += frame_state_count;
+ if (has_frame_state) ++input_count_with_deps;
if (has_control) ++input_count_with_deps;
if (has_effect) ++input_count_with_deps;
Node** buffer = EnsureInputBufferSize(input_count_with_deps);
@@ -1517,7 +1809,7 @@ Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
if (has_context) {
*current_input++ = environment()->Context();
}
- for (int i = 0; i < frame_state_count; i++) {
+ if (has_frame_state) {
// The frame state will be inserted later. Here we misuse
// the {Dead} node as a sentinel to be later overwritten
// with the real frame state.
@@ -1542,13 +1834,9 @@ Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
if (!result->op()->HasProperty(Operator::kNoThrow) && inside_handler) {
int handler_offset = exception_handlers_.top().handler_offset_;
int context_index = exception_handlers_.top().context_register_;
- CatchPrediction prediction = exception_handlers_.top().pred_;
interpreter::Register context_register(context_index);
- IfExceptionHint hint = prediction == CatchPrediction::CAUGHT
- ? IfExceptionHint::kLocallyCaught
- : IfExceptionHint::kLocallyUncaught;
Environment* success_env = environment()->CopyForConditional();
- const Operator* op = common()->IfException(hint);
+ const Operator* op = common()->IfException();
Node* effect = environment()->GetEffectDependency();
Node* on_exception = graph()->NewNode(op, effect, result);
Node* context = environment()->LookupRegister(context_register);
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index c842c24b8c..2f3acc1bca 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -7,8 +7,11 @@
#include "src/compiler.h"
#include "src/compiler/bytecode-branch-analysis.h"
+#include "src/compiler/bytecode-loop-analysis.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/type-hint-analyzer.h"
#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecodes.h"
namespace v8 {
@@ -112,17 +115,19 @@ class BytecodeGraphBuilder {
void BuildCreateLiteral(const Operator* op);
void BuildCreateArguments(CreateArgumentsType type);
- void BuildLoadGlobal(TypeofMode typeof_mode);
+ Node* BuildLoadContextSlot();
+ Node* BuildLoadGlobal(TypeofMode typeof_mode);
void BuildStoreGlobal(LanguageMode language_mode);
- void BuildNamedLoad();
- void BuildKeyedLoad();
+ Node* BuildNamedLoad();
void BuildNamedStore(LanguageMode language_mode);
+ Node* BuildKeyedLoad();
void BuildKeyedStore(LanguageMode language_mode);
void BuildLdaLookupSlot(TypeofMode typeof_mode);
void BuildStaLookupSlot(LanguageMode language_mode);
void BuildCall(TailCallMode tail_call_mode);
void BuildThrow();
void BuildBinaryOp(const Operator* op);
+ void BuildBinaryOpWithImmediate(const Operator* op);
void BuildCompareOp(const Operator* op);
void BuildDelete(LanguageMode language_mode);
void BuildCastOperator(const Operator* op);
@@ -130,6 +135,10 @@ class BytecodeGraphBuilder {
void BuildForInNext();
void BuildInvokeIntrinsic();
+ // Helper function to create binary operation hint from the recorded
+ // type feedback.
+ BinaryOperationHint GetBinaryOperationHint(int operand_index);
+
// Control flow plumbing.
void BuildJump();
void BuildConditionalJump(Node* condition);
@@ -145,6 +154,12 @@ class BytecodeGraphBuilder {
// Simulates control flow that exits the function body.
void MergeControlToLeaveFunction(Node* exit);
+ // Builds loop exit nodes for every exited loop between the current bytecode
+ // offset and {target_offset}.
+ void BuildLoopExitsForBranch(int target_offset);
+ void BuildLoopExitsForFunctionExit();
+ void BuildLoopExitsUntilLoop(int loop_offset);
+
// Simulates entry and exit of exception handlers.
void EnterAndExitExceptionHandlers(int current_offset);
@@ -152,9 +167,6 @@ class BytecodeGraphBuilder {
// new nodes.
static const int kInputBufferSizeIncrement = 64;
- // The catch prediction from the handler table is reused.
- typedef HandlerTable::CatchPrediction CatchPrediction;
-
// An abstract representation for an exception handler that is being
// entered and exited while the graph builder is iterating over the
// underlying bytecode. The exception handlers within the bytecode are
@@ -164,7 +176,6 @@ class BytecodeGraphBuilder {
int end_offset_; // End offset of the handled area in the bytecode.
int handler_offset_; // Handler entry offset within the bytecode.
int context_register_; // Index of register holding handler context.
- CatchPrediction pred_; // Prediction of whether handler is catching.
};
// Field accessors
@@ -204,6 +215,12 @@ class BytecodeGraphBuilder {
branch_analysis_ = branch_analysis;
}
+ const BytecodeLoopAnalysis* loop_analysis() const { return loop_analysis_; }
+
+ void set_loop_analysis(const BytecodeLoopAnalysis* loop_analysis) {
+ loop_analysis_ = loop_analysis;
+ }
+
#define DECLARE_VISIT_BYTECODE(name, ...) void Visit##name();
BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
#undef DECLARE_VISIT_BYTECODE
@@ -216,11 +233,9 @@ class BytecodeGraphBuilder {
const FrameStateFunctionInfo* frame_state_function_info_;
const interpreter::BytecodeArrayIterator* bytecode_iterator_;
const BytecodeBranchAnalysis* branch_analysis_;
+ const BytecodeLoopAnalysis* loop_analysis_;
Environment* environment_;
-
- // Indicates whether deoptimization support is enabled for this compilation
- // and whether valid frame states need to be attached to deoptimizing nodes.
- bool deoptimization_enabled_;
+ BailoutId osr_ast_id_;
// Merge environments are snapshots of the environment at points where the
// control flow merges. This models a forward data flow propagation of all
@@ -243,6 +258,10 @@ class BytecodeGraphBuilder {
// Control nodes that exit the function body.
ZoneVector<Node*> exit_controls_;
+ static int const kBinaryOperationHintIndex = 1;
+ static int const kCountOperationHintIndex = 0;
+ static int const kBinaryOperationSmiHintIndex = 2;
+
DISALLOW_COPY_AND_ASSIGN(BytecodeGraphBuilder);
};
diff --git a/deps/v8/src/compiler/bytecode-loop-analysis.cc b/deps/v8/src/compiler/bytecode-loop-analysis.cc
new file mode 100644
index 0000000000..03c11f7196
--- /dev/null
+++ b/deps/v8/src/compiler/bytecode-loop-analysis.cc
@@ -0,0 +1,100 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/bytecode-loop-analysis.h"
+
+#include "src/compiler/bytecode-branch-analysis.h"
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+BytecodeLoopAnalysis::BytecodeLoopAnalysis(
+ Handle<BytecodeArray> bytecode_array,
+ const BytecodeBranchAnalysis* branch_analysis, Zone* zone)
+ : bytecode_array_(bytecode_array),
+ branch_analysis_(branch_analysis),
+ zone_(zone),
+ current_loop_offset_(-1),
+ found_current_backedge_(false),
+ backedge_to_header_(zone),
+ loop_header_to_parent_(zone) {}
+
+void BytecodeLoopAnalysis::Analyze() {
+ current_loop_offset_ = -1;
+ found_current_backedge_ = false;
+ interpreter::BytecodeArrayIterator iterator(bytecode_array());
+ while (!iterator.done()) {
+ interpreter::Bytecode bytecode = iterator.current_bytecode();
+ int current_offset = iterator.current_offset();
+ if (branch_analysis_->backward_branches_target(current_offset)) {
+ AddLoopEntry(current_offset);
+ } else if (interpreter::Bytecodes::IsJump(bytecode)) {
+ AddBranch(current_offset, iterator.GetJumpTargetOffset());
+ }
+ iterator.Advance();
+ }
+}
+
+void BytecodeLoopAnalysis::AddLoopEntry(int entry_offset) {
+ if (found_current_backedge_) {
+ // We assume that all backedges of a loop must occur together and before
+ // another loop entry or an outer loop backedge.
+ // This is guaranteed by the invariants from AddBranch, such that every
+ // backedge must either go to the current loop or be the first of the
+ // backedges to the parent loop.
+ // Thus here, the current loop actually ended before and we have a loop
+ // with the same parent.
+ current_loop_offset_ = loop_header_to_parent_[current_loop_offset_];
+ found_current_backedge_ = false;
+ }
+ loop_header_to_parent_[entry_offset] = current_loop_offset_;
+ current_loop_offset_ = entry_offset;
+}
+
+void BytecodeLoopAnalysis::AddBranch(int origin_offset, int target_offset) {
+ // If this is a backedge, record it.
+ if (target_offset < origin_offset) {
+ backedge_to_header_[origin_offset] = target_offset;
+ // Check whether this is actually a backedge of the outer loop and we have
+ // already finished the current loop.
+ if (target_offset < current_loop_offset_) {
+ DCHECK(found_current_backedge_);
+ int parent_offset = loop_header_to_parent_[current_loop_offset_];
+ DCHECK_EQ(target_offset, parent_offset);
+ current_loop_offset_ = parent_offset;
+ } else {
+ DCHECK_EQ(target_offset, current_loop_offset_);
+ found_current_backedge_ = true;
+ }
+ }
+}
+
+int BytecodeLoopAnalysis::GetLoopOffsetFor(int offset) const {
+ auto next_backedge = backedge_to_header_.lower_bound(offset);
+ // If there is no next backedge => offset is not in a loop.
+ if (next_backedge == backedge_to_header_.end()) {
+ return -1;
+ }
+ // If the header preceeds the offset, it is the backedge of the containing
+ // loop.
+ if (next_backedge->second <= offset) {
+ return next_backedge->second;
+ }
+ // Otherwise there is a nested loop after this offset. We just return the
+ // parent of the next nested loop.
+ return loop_header_to_parent_.upper_bound(offset)->second;
+}
+
+int BytecodeLoopAnalysis::GetParentLoopFor(int header_offset) const {
+ auto parent = loop_header_to_parent_.find(header_offset);
+ DCHECK(parent != loop_header_to_parent_.end());
+ return parent->second;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/bytecode-loop-analysis.h b/deps/v8/src/compiler/bytecode-loop-analysis.h
new file mode 100644
index 0000000000..59fabcef7b
--- /dev/null
+++ b/deps/v8/src/compiler/bytecode-loop-analysis.h
@@ -0,0 +1,67 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_BYTECODE_LOOP_ANALYSIS_H_
+#define V8_COMPILER_BYTECODE_LOOP_ANALYSIS_H_
+
+#include "src/handles.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class BytecodeArray;
+
+namespace compiler {
+
+class BytecodeBranchAnalysis;
+
+class BytecodeLoopAnalysis BASE_EMBEDDED {
+ public:
+ BytecodeLoopAnalysis(Handle<BytecodeArray> bytecode_array,
+ const BytecodeBranchAnalysis* branch_analysis,
+ Zone* zone);
+
+ // Analyze the bytecodes to find the branch sites and their
+ // targets. No other methods in this class return valid information
+ // until this has been called.
+ void Analyze();
+
+ // Get the loop header offset of the containing loop for arbitrary
+ // {offset}, or -1 if the {offset} is not inside any loop.
+ int GetLoopOffsetFor(int offset) const;
+ // Gets the loop header offset of the parent loop of the loop header
+ // at {header_offset}, or -1 for outer-most loops.
+ int GetParentLoopFor(int header_offset) const;
+
+ private:
+ void AddLoopEntry(int entry_offset);
+ void AddBranch(int origin_offset, int target_offset);
+
+ Zone* zone() const { return zone_; }
+ Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
+
+ Handle<BytecodeArray> bytecode_array_;
+ const BytecodeBranchAnalysis* branch_analysis_;
+ Zone* zone_;
+
+ int current_loop_offset_;
+ bool found_current_backedge_;
+
+ // Map from the offset of a backedge jump to the offset of the corresponding
+ // loop header. There might be multiple backedges for do-while loops.
+ ZoneMap<int, int> backedge_to_header_;
+ // Map from the offset of a loop header to the offset of its parent's loop
+ // header. This map will have as many entries as there are loops in the
+ // function.
+ ZoneMap<int, int> loop_header_to_parent_;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeLoopAnalysis);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_BYTECODE_LOOP_ANALYSIS_H_
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index b38e529f9f..f79497a6e1 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -14,8 +14,8 @@ namespace internal {
namespace compiler {
namespace {
-LinkageLocation regloc(Register reg) {
- return LinkageLocation::ForRegister(reg.code());
+LinkageLocation regloc(Register reg, MachineType type) {
+ return LinkageLocation::ForRegister(reg.code(), type);
}
@@ -182,21 +182,20 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(
CHECK(locations.return_count_ <= 2);
if (locations.return_count_ > 0) {
- locations.AddReturn(regloc(kReturnRegister0));
+ locations.AddReturn(regloc(kReturnRegister0, msig->GetReturn(0)));
}
if (locations.return_count_ > 1) {
- locations.AddReturn(regloc(kReturnRegister1));
+ locations.AddReturn(regloc(kReturnRegister1, msig->GetReturn(1)));
}
const int parameter_count = static_cast<int>(msig->parameter_count());
#ifdef PARAM_REGISTERS
- static const Register kParamRegisters[] = {PARAM_REGISTERS};
- static const int kParamRegisterCount =
- static_cast<int>(arraysize(kParamRegisters));
+ const Register kParamRegisters[] = {PARAM_REGISTERS};
+ const int kParamRegisterCount = static_cast<int>(arraysize(kParamRegisters));
#else
- static const Register* kParamRegisters = nullptr;
- static const int kParamRegisterCount = 0;
+ const Register* kParamRegisters = nullptr;
+ const int kParamRegisterCount = 0;
#endif
#ifdef STACK_SHADOW_WORDS
@@ -207,10 +206,10 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(
// Add register and/or stack parameter(s).
for (int i = 0; i < parameter_count; i++) {
if (i < kParamRegisterCount) {
- locations.AddParam(regloc(kParamRegisters[i]));
+ locations.AddParam(regloc(kParamRegisters[i], msig->GetParam(i)));
} else {
- locations.AddParam(
- LinkageLocation::ForCallerFrameSlot(-1 - stack_offset));
+ locations.AddParam(LinkageLocation::ForCallerFrameSlot(
+ -1 - stack_offset, msig->GetParam(i)));
stack_offset++;
}
}
@@ -229,7 +228,7 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(
// The target for C calls is always an address (i.e. machine pointer).
MachineType target_type = MachineType::Pointer();
- LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+ LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
CallDescriptor::Flags flags = CallDescriptor::kUseNativeStack;
if (set_initialize_root_flag) {
flags |= CallDescriptor::kInitializeRootRegister;
@@ -239,7 +238,6 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(
CallDescriptor::kCallAddress, // kind
target_type, // target MachineType
target_loc, // target location
- msig, // machine_sig
locations.Build(), // location_sig
0, // stack_parameter_count
Operator::kNoProperties, // properties
diff --git a/deps/v8/src/compiler/change-lowering.cc b/deps/v8/src/compiler/change-lowering.cc
deleted file mode 100644
index 907b36a93b..0000000000
--- a/deps/v8/src/compiler/change-lowering.cc
+++ /dev/null
@@ -1,713 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/change-lowering.h"
-
-#include "src/address-map.h"
-#include "src/code-factory.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/machine-operator.h"
-#include "src/compiler/node-properties.h"
-#include "src/compiler/operator-properties.h"
-#include "src/compiler/simplified-operator.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-ChangeLowering::~ChangeLowering() {}
-
-
-Reduction ChangeLowering::Reduce(Node* node) {
- Node* control = graph()->start();
- switch (node->opcode()) {
- case IrOpcode::kChangeBitToBool:
- return ChangeBitToBool(node->InputAt(0), control);
- case IrOpcode::kChangeBoolToBit:
- return ChangeBoolToBit(node->InputAt(0));
- case IrOpcode::kChangeFloat64ToTagged:
- return ChangeFloat64ToTagged(node->InputAt(0), control);
- case IrOpcode::kChangeInt32ToTagged:
- return ChangeInt32ToTagged(node->InputAt(0), control);
- case IrOpcode::kChangeTaggedToFloat64:
- return ChangeTaggedToFloat64(node->InputAt(0), control);
- case IrOpcode::kChangeTaggedToInt32:
- return ChangeTaggedToUI32(node->InputAt(0), control, kSigned);
- case IrOpcode::kChangeTaggedToUint32:
- return ChangeTaggedToUI32(node->InputAt(0), control, kUnsigned);
- case IrOpcode::kChangeUint32ToTagged:
- return ChangeUint32ToTagged(node->InputAt(0), control);
- case IrOpcode::kLoadField:
- return LoadField(node);
- case IrOpcode::kStoreField:
- return StoreField(node);
- case IrOpcode::kLoadElement:
- return LoadElement(node);
- case IrOpcode::kStoreElement:
- return StoreElement(node);
- case IrOpcode::kAllocate:
- return Allocate(node);
- case IrOpcode::kObjectIsReceiver:
- return ObjectIsReceiver(node);
- case IrOpcode::kObjectIsSmi:
- return ObjectIsSmi(node);
- case IrOpcode::kObjectIsNumber:
- return ObjectIsNumber(node);
- case IrOpcode::kObjectIsUndetectable:
- return ObjectIsUndetectable(node);
- default:
- return NoChange();
- }
- UNREACHABLE();
- return NoChange();
-}
-
-
-Node* ChangeLowering::HeapNumberValueIndexConstant() {
- return jsgraph()->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag);
-}
-
-
-Node* ChangeLowering::SmiMaxValueConstant() {
- return jsgraph()->Int32Constant(Smi::kMaxValue);
-}
-
-
-Node* ChangeLowering::SmiShiftBitsConstant() {
- return jsgraph()->IntPtrConstant(kSmiShiftSize + kSmiTagSize);
-}
-
-
-Node* ChangeLowering::AllocateHeapNumberWithValue(Node* value, Node* control) {
- // The AllocateHeapNumberStub does not use the context, so we can safely pass
- // in Smi zero here.
- Callable callable = CodeFactory::AllocateHeapNumber(isolate());
- Node* target = jsgraph()->HeapConstant(callable.code());
- Node* context = jsgraph()->NoContextConstant();
- Node* effect = graph()->NewNode(common()->BeginRegion(), graph()->start());
- if (!allocate_heap_number_operator_.is_set()) {
- CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
- isolate(), jsgraph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNoFlags, Operator::kNoThrow);
- allocate_heap_number_operator_.set(common()->Call(descriptor));
- }
- Node* heap_number = graph()->NewNode(allocate_heap_number_operator_.get(),
- target, context, effect, control);
- Node* store = graph()->NewNode(
- machine()->Store(StoreRepresentation(MachineRepresentation::kFloat64,
- kNoWriteBarrier)),
- heap_number, HeapNumberValueIndexConstant(), value, heap_number, control);
- return graph()->NewNode(common()->FinishRegion(), heap_number, store);
-}
-
-
-Node* ChangeLowering::ChangeInt32ToFloat64(Node* value) {
- return graph()->NewNode(machine()->ChangeInt32ToFloat64(), value);
-}
-
-
-Node* ChangeLowering::ChangeInt32ToSmi(Node* value) {
- if (machine()->Is64()) {
- value = graph()->NewNode(machine()->ChangeInt32ToInt64(), value);
- }
- return graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant());
-}
-
-
-Node* ChangeLowering::ChangeSmiToFloat64(Node* value) {
- return ChangeInt32ToFloat64(ChangeSmiToInt32(value));
-}
-
-
-Node* ChangeLowering::ChangeSmiToInt32(Node* value) {
- value = graph()->NewNode(machine()->WordSar(), value, SmiShiftBitsConstant());
- if (machine()->Is64()) {
- value = graph()->NewNode(machine()->TruncateInt64ToInt32(), value);
- }
- return value;
-}
-
-
-Node* ChangeLowering::ChangeUint32ToFloat64(Node* value) {
- return graph()->NewNode(machine()->ChangeUint32ToFloat64(), value);
-}
-
-
-Node* ChangeLowering::ChangeUint32ToSmi(Node* value) {
- if (machine()->Is64()) {
- value = graph()->NewNode(machine()->ChangeUint32ToUint64(), value);
- }
- return graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant());
-}
-
-
-Node* ChangeLowering::LoadHeapNumberValue(Node* value, Node* control) {
- return graph()->NewNode(machine()->Load(MachineType::Float64()), value,
- HeapNumberValueIndexConstant(), graph()->start(),
- control);
-}
-
-
-Node* ChangeLowering::TestNotSmi(Node* value) {
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagMask == 1);
- return graph()->NewNode(machine()->WordAnd(), value,
- jsgraph()->IntPtrConstant(kSmiTagMask));
-}
-
-
-Reduction ChangeLowering::ChangeBitToBool(Node* value, Node* control) {
- return Replace(
- graph()->NewNode(common()->Select(MachineRepresentation::kTagged), value,
- jsgraph()->TrueConstant(), jsgraph()->FalseConstant()));
-}
-
-
-Reduction ChangeLowering::ChangeBoolToBit(Node* value) {
- return Replace(graph()->NewNode(machine()->WordEqual(), value,
- jsgraph()->TrueConstant()));
-}
-
-
-Reduction ChangeLowering::ChangeFloat64ToTagged(Node* value, Node* control) {
- Type* const value_type = NodeProperties::GetType(value);
- Node* const value32 = graph()->NewNode(
- machine()->TruncateFloat64ToInt32(TruncationMode::kRoundToZero), value);
- // TODO(bmeurer): This fast case must be disabled until we kill the asm.js
- // support in the generic JavaScript pipeline, because LoadBuffer is lying
- // about its result.
- // if (value_type->Is(Type::Signed32())) {
- // return ChangeInt32ToTagged(value32, control);
- // }
- Node* check_same = graph()->NewNode(
- machine()->Float64Equal(), value,
- graph()->NewNode(machine()->ChangeInt32ToFloat64(), value32));
- Node* branch_same = graph()->NewNode(common()->Branch(), check_same, control);
-
- Node* if_smi = graph()->NewNode(common()->IfTrue(), branch_same);
- Node* vsmi;
- Node* if_box = graph()->NewNode(common()->IfFalse(), branch_same);
- Node* vbox;
-
- // We only need to check for -0 if the {value} can potentially contain -0.
- if (value_type->Maybe(Type::MinusZero())) {
- Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value32,
- jsgraph()->Int32Constant(0));
- Node* branch_zero = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check_zero, if_smi);
-
- Node* if_zero = graph()->NewNode(common()->IfTrue(), branch_zero);
- Node* if_notzero = graph()->NewNode(common()->IfFalse(), branch_zero);
-
- // In case of 0, we need to check the high bits for the IEEE -0 pattern.
- Node* check_negative = graph()->NewNode(
- machine()->Int32LessThan(),
- graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
- jsgraph()->Int32Constant(0));
- Node* branch_negative = graph()->NewNode(
- common()->Branch(BranchHint::kFalse), check_negative, if_zero);
-
- Node* if_negative = graph()->NewNode(common()->IfTrue(), branch_negative);
- Node* if_notnegative =
- graph()->NewNode(common()->IfFalse(), branch_negative);
-
- // We need to create a box for negative 0.
- if_smi = graph()->NewNode(common()->Merge(2), if_notzero, if_notnegative);
- if_box = graph()->NewNode(common()->Merge(2), if_box, if_negative);
- }
-
- // On 64-bit machines we can just wrap the 32-bit integer in a smi, for 32-bit
- // machines we need to deal with potential overflow and fallback to boxing.
- if (machine()->Is64() || value_type->Is(Type::SignedSmall())) {
- vsmi = ChangeInt32ToSmi(value32);
- } else {
- Node* smi_tag =
- graph()->NewNode(machine()->Int32AddWithOverflow(), value32, value32);
-
- Node* check_ovf = graph()->NewNode(common()->Projection(1), smi_tag);
- Node* branch_ovf = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check_ovf, if_smi);
-
- Node* if_ovf = graph()->NewNode(common()->IfTrue(), branch_ovf);
- if_box = graph()->NewNode(common()->Merge(2), if_ovf, if_box);
-
- if_smi = graph()->NewNode(common()->IfFalse(), branch_ovf);
- vsmi = graph()->NewNode(common()->Projection(0), smi_tag);
- }
-
- // Allocate the box for the {value}.
- vbox = AllocateHeapNumberWithValue(value, if_box);
-
- control = graph()->NewNode(common()->Merge(2), if_smi, if_box);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vsmi, vbox, control);
- return Replace(value);
-}
-
-
-Reduction ChangeLowering::ChangeInt32ToTagged(Node* value, Node* control) {
- if (machine()->Is64() ||
- NodeProperties::GetType(value)->Is(Type::SignedSmall())) {
- return Replace(ChangeInt32ToSmi(value));
- }
-
- Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), value, value);
-
- Node* ovf = graph()->NewNode(common()->Projection(1), add);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), ovf, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* vtrue =
- AllocateHeapNumberWithValue(ChangeInt32ToFloat64(value), if_true);
-
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = graph()->NewNode(common()->Projection(0), add);
-
- Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue, vfalse, merge);
-
- return Replace(phi);
-}
-
-
-Reduction ChangeLowering::ChangeTaggedToUI32(Node* value, Node* control,
- Signedness signedness) {
- if (NodeProperties::GetType(value)->Is(Type::TaggedSigned())) {
- return Replace(ChangeSmiToInt32(value));
- }
-
- const Operator* op = (signedness == kSigned)
- ? machine()->ChangeFloat64ToInt32()
- : machine()->ChangeFloat64ToUint32();
-
- if (NodeProperties::GetType(value)->Is(Type::TaggedPointer())) {
- return Replace(graph()->NewNode(op, LoadHeapNumberValue(value, control)));
- }
-
- Node* check = TestNotSmi(value);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* vtrue = graph()->NewNode(op, LoadHeapNumberValue(value, if_true));
-
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = ChangeSmiToInt32(value);
-
- Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
- vtrue, vfalse, merge);
-
- return Replace(phi);
-}
-
-
-namespace {
-
-bool CanCover(Node* value, IrOpcode::Value opcode) {
- if (value->opcode() != opcode) return false;
- bool first = true;
- for (Edge const edge : value->use_edges()) {
- if (NodeProperties::IsControlEdge(edge)) continue;
- if (NodeProperties::IsEffectEdge(edge)) continue;
- DCHECK(NodeProperties::IsValueEdge(edge));
- if (!first) return false;
- first = false;
- }
- return true;
-}
-
-} // namespace
-
-
-Reduction ChangeLowering::ChangeTaggedToFloat64(Node* value, Node* control) {
- if (CanCover(value, IrOpcode::kJSToNumber)) {
- // ChangeTaggedToFloat64(JSToNumber(x)) =>
- // if IsSmi(x) then ChangeSmiToFloat64(x)
- // else let y = JSToNumber(x) in
- // if IsSmi(y) then ChangeSmiToFloat64(y)
- // else LoadHeapNumberValue(y)
- Node* const object = NodeProperties::GetValueInput(value, 0);
- Node* const context = NodeProperties::GetContextInput(value);
- Node* const frame_state = NodeProperties::GetFrameStateInput(value, 0);
- Node* const effect = NodeProperties::GetEffectInput(value);
- Node* const control = NodeProperties::GetControlInput(value);
-
- const Operator* merge_op = common()->Merge(2);
- const Operator* ephi_op = common()->EffectPhi(2);
- const Operator* phi_op = common()->Phi(MachineRepresentation::kFloat64, 2);
-
- Node* check1 = TestNotSmi(object);
- Node* branch1 =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check1, control);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* vtrue1 = graph()->NewNode(value->op(), object, context, frame_state,
- effect, if_true1);
- Node* etrue1 = vtrue1;
-
- Node* check2 = TestNotSmi(vtrue1);
- Node* branch2 = graph()->NewNode(common()->Branch(), check2, if_true1);
-
- Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- Node* vtrue2 = LoadHeapNumberValue(vtrue1, if_true2);
-
- Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
- Node* vfalse2 = ChangeSmiToFloat64(vtrue1);
-
- if_true1 = graph()->NewNode(merge_op, if_true2, if_false2);
- vtrue1 = graph()->NewNode(phi_op, vtrue2, vfalse2, if_true1);
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* vfalse1 = ChangeSmiToFloat64(object);
- Node* efalse1 = effect;
-
- Node* merge1 = graph()->NewNode(merge_op, if_true1, if_false1);
- Node* ephi1 = graph()->NewNode(ephi_op, etrue1, efalse1, merge1);
- Node* phi1 = graph()->NewNode(phi_op, vtrue1, vfalse1, merge1);
-
- // Wire the new diamond into the graph, {JSToNumber} can still throw.
- NodeProperties::ReplaceUses(value, phi1, ephi1, etrue1, etrue1);
-
- // TODO(mstarzinger): This iteration cuts out the IfSuccess projection from
- // the node and places it inside the diamond. Come up with a helper method!
- for (Node* use : etrue1->uses()) {
- if (use->opcode() == IrOpcode::kIfSuccess) {
- use->ReplaceUses(merge1);
- NodeProperties::ReplaceControlInput(branch2, use);
- }
- }
-
- return Replace(phi1);
- }
-
- Node* check = TestNotSmi(value);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* vtrue = LoadHeapNumberValue(value, if_true);
-
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = ChangeSmiToFloat64(value);
-
- Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi = graph()->NewNode(
- common()->Phi(MachineRepresentation::kFloat64, 2), vtrue, vfalse, merge);
-
- return Replace(phi);
-}
-
-
-Reduction ChangeLowering::ChangeUint32ToTagged(Node* value, Node* control) {
- if (NodeProperties::GetType(value)->Is(Type::UnsignedSmall())) {
- return Replace(ChangeUint32ToSmi(value));
- }
-
- Node* check = graph()->NewNode(machine()->Uint32LessThanOrEqual(), value,
- SmiMaxValueConstant());
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* vtrue = ChangeUint32ToSmi(value);
-
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse =
- AllocateHeapNumberWithValue(ChangeUint32ToFloat64(value), if_false);
-
- Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue, vfalse, merge);
-
- return Replace(phi);
-}
-
-
-namespace {
-
-WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
- MachineRepresentation representation,
- Type* field_type, Type* input_type) {
- if (field_type->Is(Type::TaggedSigned()) ||
- input_type->Is(Type::TaggedSigned())) {
- // Write barriers are only for writes of heap objects.
- return kNoWriteBarrier;
- }
- if (input_type->Is(Type::BooleanOrNullOrUndefined())) {
- // Write barriers are not necessary when storing true, false, null or
- // undefined, because these special oddballs are always in the root set.
- return kNoWriteBarrier;
- }
- if (base_is_tagged == kTaggedBase &&
- representation == MachineRepresentation::kTagged) {
- if (input_type->IsConstant() &&
- input_type->AsConstant()->Value()->IsHeapObject()) {
- Handle<HeapObject> input =
- Handle<HeapObject>::cast(input_type->AsConstant()->Value());
- if (input->IsMap()) {
- // Write barriers for storing maps are cheaper.
- return kMapWriteBarrier;
- }
- Isolate* const isolate = input->GetIsolate();
- RootIndexMap root_index_map(isolate);
- int root_index = root_index_map.Lookup(*input);
- if (root_index != RootIndexMap::kInvalidRootIndex &&
- isolate->heap()->RootIsImmortalImmovable(root_index)) {
- // Write barriers are unnecessary for immortal immovable roots.
- return kNoWriteBarrier;
- }
- }
- if (field_type->Is(Type::TaggedPointer()) ||
- input_type->Is(Type::TaggedPointer())) {
- // Write barriers for heap objects don't need a Smi check.
- return kPointerWriteBarrier;
- }
- // Write barriers are only for writes into heap objects (i.e. tagged base).
- return kFullWriteBarrier;
- }
- return kNoWriteBarrier;
-}
-
-
-WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
- MachineRepresentation representation,
- int field_offset, Type* field_type,
- Type* input_type) {
- if (base_is_tagged == kTaggedBase && field_offset == HeapObject::kMapOffset) {
- // Write barriers for storing maps are cheaper.
- return kMapWriteBarrier;
- }
- return ComputeWriteBarrierKind(base_is_tagged, representation, field_type,
- input_type);
-}
-
-} // namespace
-
-
-Reduction ChangeLowering::LoadField(Node* node) {
- const FieldAccess& access = FieldAccessOf(node->op());
- Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
- node->InsertInput(graph()->zone(), 1, offset);
- NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
- return Changed(node);
-}
-
-
-Reduction ChangeLowering::StoreField(Node* node) {
- const FieldAccess& access = FieldAccessOf(node->op());
- Type* type = NodeProperties::GetType(node->InputAt(1));
- WriteBarrierKind kind = ComputeWriteBarrierKind(
- access.base_is_tagged, access.machine_type.representation(),
- access.offset, access.type, type);
- Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
- node->InsertInput(graph()->zone(), 1, offset);
- NodeProperties::ChangeOp(node,
- machine()->Store(StoreRepresentation(
- access.machine_type.representation(), kind)));
- return Changed(node);
-}
-
-
-Node* ChangeLowering::ComputeIndex(const ElementAccess& access,
- Node* const key) {
- Node* index = key;
- const int element_size_shift =
- ElementSizeLog2Of(access.machine_type.representation());
- if (element_size_shift) {
- index = graph()->NewNode(machine()->Word32Shl(), index,
- jsgraph()->Int32Constant(element_size_shift));
- }
- const int fixed_offset = access.header_size - access.tag();
- if (fixed_offset) {
- index = graph()->NewNode(machine()->Int32Add(), index,
- jsgraph()->Int32Constant(fixed_offset));
- }
- if (machine()->Is64()) {
- // TODO(turbofan): This is probably only correct for typed arrays, and only
- // if the typed arrays are at most 2GiB in size, which happens to match
- // exactly our current situation.
- index = graph()->NewNode(machine()->ChangeUint32ToUint64(), index);
- }
- return index;
-}
-
-
-Reduction ChangeLowering::LoadElement(Node* node) {
- const ElementAccess& access = ElementAccessOf(node->op());
- node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
- NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
- return Changed(node);
-}
-
-
-Reduction ChangeLowering::StoreElement(Node* node) {
- const ElementAccess& access = ElementAccessOf(node->op());
- Type* type = NodeProperties::GetType(node->InputAt(2));
- node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
- NodeProperties::ChangeOp(
- node, machine()->Store(StoreRepresentation(
- access.machine_type.representation(),
- ComputeWriteBarrierKind(access.base_is_tagged,
- access.machine_type.representation(),
- access.type, type))));
- return Changed(node);
-}
-
-
-Reduction ChangeLowering::Allocate(Node* node) {
- PretenureFlag pretenure = OpParameter<PretenureFlag>(node->op());
- if (pretenure == NOT_TENURED) {
- Callable callable = CodeFactory::AllocateInNewSpace(isolate());
- Node* target = jsgraph()->HeapConstant(callable.code());
- CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
- isolate(), jsgraph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNoFlags, Operator::kNoThrow);
- const Operator* op = common()->Call(descriptor);
- node->InsertInput(graph()->zone(), 0, target);
- node->InsertInput(graph()->zone(), 2, jsgraph()->NoContextConstant());
- NodeProperties::ChangeOp(node, op);
- } else {
- DCHECK_EQ(TENURED, pretenure);
- AllocationSpace space = OLD_SPACE;
- Runtime::FunctionId f = Runtime::kAllocateInTargetSpace;
- Operator::Properties props = node->op()->properties();
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
- jsgraph()->zone(), f, 2, props, CallDescriptor::kNeedsFrameState);
- ExternalReference ref(f, jsgraph()->isolate());
- int32_t flags = AllocateTargetSpace::encode(space);
- node->InsertInput(graph()->zone(), 0, jsgraph()->CEntryStubConstant(1));
- node->InsertInput(graph()->zone(), 2, jsgraph()->SmiConstant(flags));
- node->InsertInput(graph()->zone(), 3, jsgraph()->ExternalConstant(ref));
- node->InsertInput(graph()->zone(), 4, jsgraph()->Int32Constant(2));
- node->InsertInput(graph()->zone(), 5, jsgraph()->NoContextConstant());
- NodeProperties::ChangeOp(node, common()->Call(desc));
- }
- return Changed(node);
-}
-
-Node* ChangeLowering::IsSmi(Node* value) {
- return graph()->NewNode(
- machine()->WordEqual(),
- graph()->NewNode(machine()->WordAnd(), value,
- jsgraph()->IntPtrConstant(kSmiTagMask)),
- jsgraph()->IntPtrConstant(kSmiTag));
-}
-
-Node* ChangeLowering::LoadHeapObjectMap(Node* object, Node* control) {
- return graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), object,
- jsgraph()->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag),
- graph()->start(), control);
-}
-
-Node* ChangeLowering::LoadMapBitField(Node* map) {
- return graph()->NewNode(
- machine()->Load(MachineType::Uint8()), map,
- jsgraph()->IntPtrConstant(Map::kBitFieldOffset - kHeapObjectTag),
- graph()->start(), graph()->start());
-}
-
-Node* ChangeLowering::LoadMapInstanceType(Node* map) {
- return graph()->NewNode(
- machine()->Load(MachineType::Uint8()), map,
- jsgraph()->IntPtrConstant(Map::kInstanceTypeOffset - kHeapObjectTag),
- graph()->start(), graph()->start());
-}
-
-Reduction ChangeLowering::ObjectIsNumber(Node* node) {
- Node* input = NodeProperties::GetValueInput(node, 0);
- // TODO(bmeurer): Optimize somewhat based on input type.
- Node* check = IsSmi(input);
- Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* vtrue = jsgraph()->Int32Constant(1);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = graph()->NewNode(
- machine()->WordEqual(), LoadHeapObjectMap(input, if_false),
- jsgraph()->HeapConstant(isolate()->factory()->heap_number_map()));
- Node* control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- node->ReplaceInput(0, vtrue);
- node->AppendInput(graph()->zone(), vfalse);
- node->AppendInput(graph()->zone(), control);
- NodeProperties::ChangeOp(node, common()->Phi(MachineRepresentation::kBit, 2));
- return Changed(node);
-}
-
-Reduction ChangeLowering::ObjectIsReceiver(Node* node) {
- Node* input = NodeProperties::GetValueInput(node, 0);
- // TODO(bmeurer): Optimize somewhat based on input type.
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- Node* check = IsSmi(input);
- Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* vtrue = jsgraph()->Int32Constant(0);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse =
- graph()->NewNode(machine()->Uint32LessThanOrEqual(),
- jsgraph()->Uint32Constant(FIRST_JS_RECEIVER_TYPE),
- LoadMapInstanceType(LoadHeapObjectMap(input, if_false)));
- Node* control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- node->ReplaceInput(0, vtrue);
- node->AppendInput(graph()->zone(), vfalse);
- node->AppendInput(graph()->zone(), control);
- NodeProperties::ChangeOp(node, common()->Phi(MachineRepresentation::kBit, 2));
- return Changed(node);
-}
-
-Reduction ChangeLowering::ObjectIsUndetectable(Node* node) {
- Node* input = NodeProperties::GetValueInput(node, 0);
- // TODO(bmeurer): Optimize somewhat based on input type.
- Node* check = IsSmi(input);
- Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* vtrue = jsgraph()->Int32Constant(0);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = graph()->NewNode(
- machine()->Word32Equal(),
- graph()->NewNode(
- machine()->Word32Equal(),
- graph()->NewNode(machine()->Word32And(),
- jsgraph()->Uint32Constant(1 << Map::kIsUndetectable),
- LoadMapBitField(LoadHeapObjectMap(input, if_false))),
- jsgraph()->Int32Constant(0)),
- jsgraph()->Int32Constant(0));
- Node* control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- node->ReplaceInput(0, vtrue);
- node->AppendInput(graph()->zone(), vfalse);
- node->AppendInput(graph()->zone(), control);
- NodeProperties::ChangeOp(node, common()->Phi(MachineRepresentation::kBit, 2));
- return Changed(node);
-}
-
-Reduction ChangeLowering::ObjectIsSmi(Node* node) {
- node->ReplaceInput(0,
- graph()->NewNode(machine()->WordAnd(), node->InputAt(0),
- jsgraph()->IntPtrConstant(kSmiTagMask)));
- node->AppendInput(graph()->zone(), jsgraph()->IntPtrConstant(kSmiTag));
- NodeProperties::ChangeOp(node, machine()->WordEqual());
- return Changed(node);
-}
-
-Isolate* ChangeLowering::isolate() const { return jsgraph()->isolate(); }
-
-
-Graph* ChangeLowering::graph() const { return jsgraph()->graph(); }
-
-
-CommonOperatorBuilder* ChangeLowering::common() const {
- return jsgraph()->common();
-}
-
-
-MachineOperatorBuilder* ChangeLowering::machine() const {
- return jsgraph()->machine();
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/change-lowering.h b/deps/v8/src/compiler/change-lowering.h
deleted file mode 100644
index 7e5078bf84..0000000000
--- a/deps/v8/src/compiler/change-lowering.h
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_CHANGE_LOWERING_H_
-#define V8_COMPILER_CHANGE_LOWERING_H_
-
-#include "src/compiler/graph-reducer.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// Forward declarations.
-class CommonOperatorBuilder;
-struct ElementAccess;
-class JSGraph;
-class Linkage;
-class MachineOperatorBuilder;
-class Operator;
-
-class ChangeLowering final : public Reducer {
- public:
- explicit ChangeLowering(JSGraph* jsgraph) : jsgraph_(jsgraph) {}
- ~ChangeLowering() final;
-
- Reduction Reduce(Node* node) final;
-
- private:
- Node* HeapNumberValueIndexConstant();
- Node* SmiMaxValueConstant();
- Node* SmiShiftBitsConstant();
-
- Node* AllocateHeapNumberWithValue(Node* value, Node* control);
- Node* ChangeInt32ToFloat64(Node* value);
- Node* ChangeInt32ToSmi(Node* value);
- Node* ChangeSmiToFloat64(Node* value);
- Node* ChangeSmiToInt32(Node* value);
- Node* ChangeUint32ToFloat64(Node* value);
- Node* ChangeUint32ToSmi(Node* value);
- Node* LoadHeapNumberValue(Node* value, Node* control);
- Node* TestNotSmi(Node* value);
-
- Reduction ChangeBitToBool(Node* value, Node* control);
- Reduction ChangeBoolToBit(Node* value);
- Reduction ChangeFloat64ToTagged(Node* value, Node* control);
- Reduction ChangeInt32ToTagged(Node* value, Node* control);
- Reduction ChangeTaggedToFloat64(Node* value, Node* control);
- Reduction ChangeTaggedToUI32(Node* value, Node* control,
- Signedness signedness);
- Reduction ChangeUint32ToTagged(Node* value, Node* control);
-
- Reduction LoadField(Node* node);
- Reduction StoreField(Node* node);
- Reduction LoadElement(Node* node);
- Reduction StoreElement(Node* node);
- Reduction Allocate(Node* node);
-
- Node* IsSmi(Node* value);
- Node* LoadHeapObjectMap(Node* object, Node* control);
- Node* LoadMapBitField(Node* map);
- Node* LoadMapInstanceType(Node* map);
-
- Reduction ObjectIsNumber(Node* node);
- Reduction ObjectIsReceiver(Node* node);
- Reduction ObjectIsSmi(Node* node);
- Reduction ObjectIsUndetectable(Node* node);
-
- Node* ComputeIndex(const ElementAccess& access, Node* const key);
- Graph* graph() const;
- Isolate* isolate() const;
- JSGraph* jsgraph() const { return jsgraph_; }
- CommonOperatorBuilder* common() const;
- MachineOperatorBuilder* machine() const;
-
- JSGraph* const jsgraph_;
- SetOncePointer<const Operator> allocate_heap_number_operator_;
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_CHANGE_LOWERING_H_
diff --git a/deps/v8/src/compiler/checkpoint-elimination.cc b/deps/v8/src/compiler/checkpoint-elimination.cc
new file mode 100644
index 0000000000..d44dfdff48
--- /dev/null
+++ b/deps/v8/src/compiler/checkpoint-elimination.cc
@@ -0,0 +1,53 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/checkpoint-elimination.h"
+
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+CheckpointElimination::CheckpointElimination(Editor* editor)
+ : AdvancedReducer(editor) {}
+
+namespace {
+
+// The given checkpoint is redundant if it is effect-wise dominated by another
+// checkpoint and there is no observable write in between. For now we consider
+// a linear effect chain only instead of true effect-wise dominance.
+bool IsRedundantCheckpoint(Node* node) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ while (effect->op()->HasProperty(Operator::kNoWrite) &&
+ effect->op()->EffectInputCount() == 1) {
+ if (effect->opcode() == IrOpcode::kCheckpoint) return true;
+ effect = NodeProperties::GetEffectInput(effect);
+ }
+ return false;
+}
+
+} // namespace
+
+Reduction CheckpointElimination::ReduceCheckpoint(Node* node) {
+ DCHECK_EQ(IrOpcode::kCheckpoint, node->opcode());
+ if (IsRedundantCheckpoint(node)) {
+ return Replace(NodeProperties::GetEffectInput(node));
+ }
+ return NoChange();
+}
+
+Reduction CheckpointElimination::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kCheckpoint:
+ return ReduceCheckpoint(node);
+ default:
+ break;
+ }
+ return NoChange();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/checkpoint-elimination.h b/deps/v8/src/compiler/checkpoint-elimination.h
new file mode 100644
index 0000000000..edaa0e7734
--- /dev/null
+++ b/deps/v8/src/compiler/checkpoint-elimination.h
@@ -0,0 +1,30 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CHECKPOINT_ELIMINATION_H_
+#define V8_COMPILER_CHECKPOINT_ELIMINATION_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Performs elimination of redundant checkpoints within the graph.
+class CheckpointElimination final : public AdvancedReducer {
+ public:
+ explicit CheckpointElimination(Editor* editor);
+ ~CheckpointElimination() final {}
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ Reduction ReduceCheckpoint(Node* node);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_CHECKPOINT_ELIMINATION_H_
diff --git a/deps/v8/src/compiler/coalesced-live-ranges.cc b/deps/v8/src/compiler/coalesced-live-ranges.cc
deleted file mode 100644
index 4ac3e2118d..0000000000
--- a/deps/v8/src/compiler/coalesced-live-ranges.cc
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/coalesced-live-ranges.h"
-#include "src/compiler/greedy-allocator.h"
-#include "src/compiler/register-allocator.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-
-LiveRangeConflictIterator::LiveRangeConflictIterator(const LiveRange* range,
- IntervalStore* storage)
- : query_(range->first_interval()),
- pos_(storage->end()),
- intervals_(storage) {
- MovePosAndQueryToFirstConflict();
-}
-
-
-LiveRange* LiveRangeConflictIterator::Current() const {
- if (IsFinished()) return nullptr;
- return pos_->range_;
-}
-
-
-void LiveRangeConflictIterator::MovePosToFirstConflictForQuery() {
- DCHECK_NOT_NULL(query_);
- auto end = intervals_->end();
- LifetimePosition q_start = query_->start();
- LifetimePosition q_end = query_->end();
-
- if (intervals_->empty() || intervals_->rbegin()->end_ <= q_start ||
- intervals_->begin()->start_ >= q_end) {
- pos_ = end;
- return;
- }
-
- pos_ = intervals_->upper_bound(AsAllocatedInterval(q_start));
- // pos is either at the end (no start strictly greater than q_start) or
- // at some position with the aforementioned property. In either case, the
- // allocated interval before this one may intersect our query:
- // either because, although it starts before this query's start, it ends
- // after; or because it starts exactly at the query start. So unless we're
- // right at the beginning of the storage - meaning the first allocated
- // interval is also starting after this query's start - see what's behind.
- if (pos_ != intervals_->begin()) {
- --pos_;
- if (!QueryIntersectsAllocatedInterval()) {
- // The interval behind wasn't intersecting, so move back.
- ++pos_;
- }
- }
- if (pos_ == end || !QueryIntersectsAllocatedInterval()) {
- pos_ = end;
- }
-}
-
-
-void LiveRangeConflictIterator::MovePosAndQueryToFirstConflict() {
- auto end = intervals_->end();
- for (; query_ != nullptr; query_ = query_->next()) {
- MovePosToFirstConflictForQuery();
- if (pos_ != end) {
- DCHECK(QueryIntersectsAllocatedInterval());
- return;
- }
- }
-
- Invalidate();
-}
-
-
-void LiveRangeConflictIterator::IncrementPosAndSkipOverRepetitions() {
- auto end = intervals_->end();
- DCHECK(pos_ != end);
- LiveRange* current_conflict = Current();
- while (pos_ != end && pos_->range_ == current_conflict) {
- ++pos_;
- }
-}
-
-
-LiveRange* LiveRangeConflictIterator::InternalGetNext(bool clean_behind) {
- if (IsFinished()) return nullptr;
-
- LiveRange* to_clear = Current();
- IncrementPosAndSkipOverRepetitions();
- // At this point, pos_ is either at the end, or on an interval that doesn't
- // correspond to the same range as to_clear. This interval may not even be
- // a conflict.
- if (clean_behind) {
- // Since we parked pos_ on an iterator that won't be affected by removal,
- // we can safely delete to_clear's intervals.
- for (auto interval = to_clear->first_interval(); interval != nullptr;
- interval = interval->next()) {
- AllocatedInterval erase_key(interval->start(), interval->end(), nullptr);
- intervals_->erase(erase_key);
- }
- }
- // We may have parked pos_ at the end, or on a non-conflict. In that case,
- // move to the next query and reinitialize pos and query. This may invalidate
- // the iterator, if no more conflicts are available.
- if (!QueryIntersectsAllocatedInterval()) {
- query_ = query_->next();
- MovePosAndQueryToFirstConflict();
- }
- return Current();
-}
-
-
-LiveRangeConflictIterator CoalescedLiveRanges::GetConflicts(
- const LiveRange* range) {
- return LiveRangeConflictIterator(range, &intervals());
-}
-
-
-void CoalescedLiveRanges::AllocateRange(LiveRange* range) {
- for (auto interval = range->first_interval(); interval != nullptr;
- interval = interval->next()) {
- AllocatedInterval to_insert(interval->start(), interval->end(), range);
- intervals().insert(to_insert);
- }
-}
-
-
-bool CoalescedLiveRanges::VerifyAllocationsAreValidForTesting() const {
- LifetimePosition last_end = LifetimePosition::GapFromInstructionIndex(0);
- for (auto i : intervals_) {
- if (i.start_ < last_end) {
- return false;
- }
- last_end = i.end_;
- }
- return true;
-}
-
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/coalesced-live-ranges.h b/deps/v8/src/compiler/coalesced-live-ranges.h
deleted file mode 100644
index 54bbce2055..0000000000
--- a/deps/v8/src/compiler/coalesced-live-ranges.h
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COALESCED_LIVE_RANGES_H_
-#define V8_COALESCED_LIVE_RANGES_H_
-
-#include "src/compiler/register-allocator.h"
-#include "src/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-
-// Implementation detail for CoalescedLiveRanges.
-struct AllocatedInterval {
- AllocatedInterval(LifetimePosition start, LifetimePosition end,
- LiveRange* range)
- : start_(start), end_(end), range_(range) {}
-
- LifetimePosition start_;
- LifetimePosition end_;
- LiveRange* range_;
- bool operator<(const AllocatedInterval& other) const {
- return start_ < other.start_;
- }
- bool operator>(const AllocatedInterval& other) const {
- return start_ > other.start_;
- }
-};
-typedef ZoneSet<AllocatedInterval> IntervalStore;
-
-
-// An iterator over conflicts of a live range, obtained from CoalescedLiveRanges
-// The design supports two main scenarios (see GreedyAllocator):
-// (1) observing each conflicting range, without mutating the allocations, and
-// (2) observing each conflicting range, and then moving to the next, after
-// removing the current conflict.
-class LiveRangeConflictIterator {
- public:
- // Current conflict. nullptr if no conflicts, or if we reached the end of
- // conflicts.
- LiveRange* Current() const;
-
- // Get the next conflict. Caller should handle non-consecutive repetitions of
- // the same range.
- LiveRange* GetNext() { return InternalGetNext(false); }
-
- // Get the next conflict, after evicting the current one. Caller may expect
- // to never observe the same live range more than once.
- LiveRange* RemoveCurrentAndGetNext() { return InternalGetNext(true); }
-
- private:
- friend class CoalescedLiveRanges;
-
- typedef IntervalStore::const_iterator interval_iterator;
- LiveRangeConflictIterator(const LiveRange* range, IntervalStore* store);
-
- // Move the store iterator to first interval intersecting query. Since the
- // intervals are sorted, subsequent intervals intersecting query follow. May
- // leave the store iterator at "end", meaning that the current query does not
- // have an intersection.
- void MovePosToFirstConflictForQuery();
-
- // Move both query and store iterator to the first intersection, if any. If
- // none, then it invalidates the iterator (IsFinished() == true)
- void MovePosAndQueryToFirstConflict();
-
- // Increment pos and skip over intervals belonging to the same range we
- // started with (i.e. Current() before the call). It is possible that range
- // will be seen again, but not consecutively.
- void IncrementPosAndSkipOverRepetitions();
-
- // Common implementation used by both GetNext as well as
- // ClearCurrentAndGetNext.
- LiveRange* InternalGetNext(bool clean_behind);
-
- bool IsFinished() const { return query_ == nullptr; }
-
- static AllocatedInterval AsAllocatedInterval(LifetimePosition pos) {
- return AllocatedInterval(pos, LifetimePosition::Invalid(), nullptr);
- }
-
- // Intersection utilities.
- static bool Intersects(LifetimePosition a_start, LifetimePosition a_end,
- LifetimePosition b_start, LifetimePosition b_end) {
- return a_start < b_end && b_start < a_end;
- }
-
- bool QueryIntersectsAllocatedInterval() const {
- DCHECK_NOT_NULL(query_);
- return pos_ != intervals_->end() &&
- Intersects(query_->start(), query_->end(), pos_->start_, pos_->end_);
- }
-
- void Invalidate() {
- query_ = nullptr;
- pos_ = intervals_->end();
- }
-
- const UseInterval* query_;
- interval_iterator pos_;
- IntervalStore* intervals_;
-};
-
-// Collection of live ranges allocated to the same register.
-// It supports efficiently finding all conflicts for a given, non-allocated
-// range. See AllocatedInterval.
-// Allocated live ranges do not intersect. At most, individual use intervals
-// touch. We store, for a live range, an AllocatedInterval corresponding to each
-// of that range's UseIntervals. We keep the list of AllocatedIntervals sorted
-// by starts. Then, given the non-intersecting property, we know that
-// consecutive AllocatedIntervals have the property that the "smaller"'s end is
-// less or equal to the "larger"'s start.
-// This allows for quick (logarithmic complexity) identification of the first
-// AllocatedInterval to conflict with a given LiveRange, and then for efficient
-// traversal of conflicts.
-class CoalescedLiveRanges : public ZoneObject {
- public:
- explicit CoalescedLiveRanges(Zone* zone) : intervals_(zone) {}
- void clear() { intervals_.clear(); }
-
- bool empty() const { return intervals_.empty(); }
-
- // Iterate over each live range conflicting with the provided one.
- // The same live range may be observed multiple, but non-consecutive times.
- LiveRangeConflictIterator GetConflicts(const LiveRange* range);
-
-
- // Allocates a range with a pre-calculated candidate weight.
- void AllocateRange(LiveRange* range);
-
- // Unit testing API, verifying that allocated intervals do not overlap.
- bool VerifyAllocationsAreValidForTesting() const;
-
- private:
- static const float kAllocatedRangeMultiplier;
-
- IntervalStore& intervals() { return intervals_; }
- const IntervalStore& intervals() const { return intervals_; }
-
- // Augment the weight of a range that is about to be allocated.
- static void UpdateWeightAtAllocation(LiveRange* range);
-
- // Reduce the weight of a range that has lost allocation.
- static void UpdateWeightAtEviction(LiveRange* range);
-
-
- IntervalStore intervals_;
- DISALLOW_COPY_AND_ASSIGN(CoalescedLiveRanges);
-};
-
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-#endif // V8_COALESCED_LIVE_RANGES_H_
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
new file mode 100644
index 0000000000..4dd7e790fa
--- /dev/null
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -0,0 +1,1081 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-assembler.h"
+
+#include <ostream>
+
+#include "src/code-factory.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/pipeline.h"
+#include "src/compiler/raw-machine-assembler.h"
+#include "src/compiler/schedule.h"
+#include "src/frames.h"
+#include "src/interface-descriptors.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/machine-type.h"
+#include "src/macro-assembler.h"
+#include "src/utils.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone,
+ const CallInterfaceDescriptor& descriptor,
+ Code::Flags flags, const char* name,
+ size_t result_size)
+ : CodeAssembler(
+ isolate, zone,
+ Linkage::GetStubCallDescriptor(
+ isolate, zone, descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size),
+ flags, name) {}
+
+CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone, int parameter_count,
+ Code::Flags flags, const char* name)
+ : CodeAssembler(isolate, zone,
+ Linkage::GetJSCallDescriptor(zone, false, parameter_count,
+ CallDescriptor::kNoFlags),
+ flags, name) {}
+
+CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone,
+ CallDescriptor* call_descriptor, Code::Flags flags,
+ const char* name)
+ : raw_assembler_(new RawMachineAssembler(
+ isolate, new (zone) Graph(zone), call_descriptor,
+ MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags(),
+ InstructionSelector::AlignmentRequirements())),
+ flags_(flags),
+ name_(name),
+ code_generated_(false),
+ variables_(zone) {}
+
+CodeAssembler::~CodeAssembler() {}
+
+void CodeAssembler::CallPrologue() {}
+
+void CodeAssembler::CallEpilogue() {}
+
+Handle<Code> CodeAssembler::GenerateCode() {
+ DCHECK(!code_generated_);
+
+ Schedule* schedule = raw_assembler_->Export();
+ Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
+ isolate(), raw_assembler_->call_descriptor(), raw_assembler_->graph(),
+ schedule, flags_, name_);
+
+ code_generated_ = true;
+ return code;
+}
+
+bool CodeAssembler::Is64() const { return raw_assembler_->machine()->Is64(); }
+
+bool CodeAssembler::IsFloat64RoundUpSupported() const {
+ return raw_assembler_->machine()->Float64RoundUp().IsSupported();
+}
+
+bool CodeAssembler::IsFloat64RoundDownSupported() const {
+ return raw_assembler_->machine()->Float64RoundDown().IsSupported();
+}
+
+bool CodeAssembler::IsFloat64RoundTruncateSupported() const {
+ return raw_assembler_->machine()->Float64RoundTruncate().IsSupported();
+}
+
+Node* CodeAssembler::Int32Constant(int32_t value) {
+ return raw_assembler_->Int32Constant(value);
+}
+
+Node* CodeAssembler::Int64Constant(int64_t value) {
+ return raw_assembler_->Int64Constant(value);
+}
+
+Node* CodeAssembler::IntPtrConstant(intptr_t value) {
+ return raw_assembler_->IntPtrConstant(value);
+}
+
+Node* CodeAssembler::NumberConstant(double value) {
+ return raw_assembler_->NumberConstant(value);
+}
+
+Node* CodeAssembler::SmiConstant(Smi* value) {
+ return IntPtrConstant(bit_cast<intptr_t>(value));
+}
+
+Node* CodeAssembler::HeapConstant(Handle<HeapObject> object) {
+ return raw_assembler_->HeapConstant(object);
+}
+
+Node* CodeAssembler::BooleanConstant(bool value) {
+ return raw_assembler_->BooleanConstant(value);
+}
+
+Node* CodeAssembler::ExternalConstant(ExternalReference address) {
+ return raw_assembler_->ExternalConstant(address);
+}
+
+Node* CodeAssembler::Float64Constant(double value) {
+ return raw_assembler_->Float64Constant(value);
+}
+
+Node* CodeAssembler::NaNConstant() {
+ return LoadRoot(Heap::kNanValueRootIndex);
+}
+
+bool CodeAssembler::ToInt32Constant(Node* node, int32_t& out_value) {
+ Int64Matcher m(node);
+ if (m.HasValue() &&
+ m.IsInRange(std::numeric_limits<int32_t>::min(),
+ std::numeric_limits<int32_t>::max())) {
+ out_value = static_cast<int32_t>(m.Value());
+ return true;
+ }
+
+ return false;
+}
+
+bool CodeAssembler::ToInt64Constant(Node* node, int64_t& out_value) {
+ Int64Matcher m(node);
+ if (m.HasValue()) out_value = m.Value();
+ return m.HasValue();
+}
+
+bool CodeAssembler::ToIntPtrConstant(Node* node, intptr_t& out_value) {
+ IntPtrMatcher m(node);
+ if (m.HasValue()) out_value = m.Value();
+ return m.HasValue();
+}
+
+Node* CodeAssembler::Parameter(int value) {
+ return raw_assembler_->Parameter(value);
+}
+
+void CodeAssembler::Return(Node* value) {
+ return raw_assembler_->Return(value);
+}
+
+void CodeAssembler::DebugBreak() { raw_assembler_->DebugBreak(); }
+
+void CodeAssembler::Comment(const char* format, ...) {
+ if (!FLAG_code_comments) return;
+ char buffer[4 * KB];
+ StringBuilder builder(buffer, arraysize(buffer));
+ va_list arguments;
+ va_start(arguments, format);
+ builder.AddFormattedList(format, arguments);
+ va_end(arguments);
+
+ // Copy the string before recording it in the assembler to avoid
+ // issues when the stack allocated buffer goes out of scope.
+ const int prefix_len = 2;
+ int length = builder.position() + 1;
+ char* copy = reinterpret_cast<char*>(malloc(length + prefix_len));
+ MemCopy(copy + prefix_len, builder.Finalize(), length);
+ copy[0] = ';';
+ copy[1] = ' ';
+ raw_assembler_->Comment(copy);
+}
+
+void CodeAssembler::Bind(CodeAssembler::Label* label) { return label->Bind(); }
+
+Node* CodeAssembler::LoadFramePointer() {
+ return raw_assembler_->LoadFramePointer();
+}
+
+Node* CodeAssembler::LoadParentFramePointer() {
+ return raw_assembler_->LoadParentFramePointer();
+}
+
+Node* CodeAssembler::LoadStackPointer() {
+ return raw_assembler_->LoadStackPointer();
+}
+
+#define DEFINE_CODE_ASSEMBLER_BINARY_OP(name) \
+ Node* CodeAssembler::name(Node* a, Node* b) { \
+ return raw_assembler_->name(a, b); \
+ }
+CODE_ASSEMBLER_BINARY_OP_LIST(DEFINE_CODE_ASSEMBLER_BINARY_OP)
+#undef DEFINE_CODE_ASSEMBLER_BINARY_OP
+
+Node* CodeAssembler::WordShl(Node* value, int shift) {
+ return (shift != 0) ? raw_assembler_->WordShl(value, IntPtrConstant(shift))
+ : value;
+}
+
+Node* CodeAssembler::WordShr(Node* value, int shift) {
+ return (shift != 0) ? raw_assembler_->WordShr(value, IntPtrConstant(shift))
+ : value;
+}
+
+Node* CodeAssembler::Word32Shr(Node* value, int shift) {
+ return (shift != 0) ? raw_assembler_->Word32Shr(value, IntPtrConstant(shift))
+ : value;
+}
+
+Node* CodeAssembler::ChangeUint32ToWord(Node* value) {
+ if (raw_assembler_->machine()->Is64()) {
+ value = raw_assembler_->ChangeUint32ToUint64(value);
+ }
+ return value;
+}
+
+Node* CodeAssembler::ChangeInt32ToIntPtr(Node* value) {
+ if (raw_assembler_->machine()->Is64()) {
+ value = raw_assembler_->ChangeInt32ToInt64(value);
+ }
+ return value;
+}
+
+#define DEFINE_CODE_ASSEMBLER_UNARY_OP(name) \
+ Node* CodeAssembler::name(Node* a) { return raw_assembler_->name(a); }
+CODE_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_ASSEMBLER_UNARY_OP)
+#undef DEFINE_CODE_ASSEMBLER_UNARY_OP
+
+Node* CodeAssembler::Load(MachineType rep, Node* base) {
+ return raw_assembler_->Load(rep, base);
+}
+
+Node* CodeAssembler::Load(MachineType rep, Node* base, Node* index) {
+ return raw_assembler_->Load(rep, base, index);
+}
+
+Node* CodeAssembler::AtomicLoad(MachineType rep, Node* base, Node* index) {
+ return raw_assembler_->AtomicLoad(rep, base, index);
+}
+
+Node* CodeAssembler::LoadRoot(Heap::RootListIndex root_index) {
+ if (isolate()->heap()->RootCanBeTreatedAsConstant(root_index)) {
+ Handle<Object> root = isolate()->heap()->root_handle(root_index);
+ if (root->IsSmi()) {
+ return SmiConstant(Smi::cast(*root));
+ } else {
+ return HeapConstant(Handle<HeapObject>::cast(root));
+ }
+ }
+
+ Node* roots_array_start =
+ ExternalConstant(ExternalReference::roots_array_start(isolate()));
+ return Load(MachineType::AnyTagged(), roots_array_start,
+ IntPtrConstant(root_index * kPointerSize));
+}
+
+Node* CodeAssembler::Store(MachineRepresentation rep, Node* base, Node* value) {
+ return raw_assembler_->Store(rep, base, value, kFullWriteBarrier);
+}
+
+Node* CodeAssembler::Store(MachineRepresentation rep, Node* base, Node* index,
+ Node* value) {
+ return raw_assembler_->Store(rep, base, index, value, kFullWriteBarrier);
+}
+
+Node* CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
+ Node* value) {
+ return raw_assembler_->Store(rep, base, value, kNoWriteBarrier);
+}
+
+Node* CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
+ Node* index, Node* value) {
+ return raw_assembler_->Store(rep, base, index, value, kNoWriteBarrier);
+}
+
+Node* CodeAssembler::AtomicStore(MachineRepresentation rep, Node* base,
+ Node* index, Node* value) {
+ return raw_assembler_->AtomicStore(rep, base, index, value);
+}
+
+Node* CodeAssembler::StoreRoot(Heap::RootListIndex root_index, Node* value) {
+ DCHECK(Heap::RootCanBeWrittenAfterInitialization(root_index));
+ Node* roots_array_start =
+ ExternalConstant(ExternalReference::roots_array_start(isolate()));
+ return StoreNoWriteBarrier(MachineRepresentation::kTagged, roots_array_start,
+ IntPtrConstant(root_index * kPointerSize), value);
+}
+
+Node* CodeAssembler::Projection(int index, Node* value) {
+ return raw_assembler_->Projection(index, value);
+}
+
+void CodeAssembler::BranchIf(Node* condition, Label* if_true, Label* if_false) {
+ Label if_condition_is_true(this), if_condition_is_false(this);
+ Branch(condition, &if_condition_is_true, &if_condition_is_false);
+ Bind(&if_condition_is_true);
+ Goto(if_true);
+ Bind(&if_condition_is_false);
+ Goto(if_false);
+}
+
+void CodeAssembler::GotoIfException(Node* node, Label* if_exception,
+ Variable* exception_var) {
+ Label success(this), exception(this, Label::kDeferred);
+ success.MergeVariables();
+ exception.MergeVariables();
+ DCHECK(!node->op()->HasProperty(Operator::kNoThrow));
+
+ raw_assembler_->Continuations(node, success.label_, exception.label_);
+
+ Bind(&exception);
+ const Operator* op = raw_assembler_->common()->IfException();
+ Node* exception_value = raw_assembler_->AddNode(op, node, node);
+ if (exception_var != nullptr) {
+ exception_var->Bind(exception_value);
+ }
+ Goto(if_exception);
+
+ Bind(&success);
+}
+
+Node* CodeAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
+ Node** args) {
+ CallPrologue();
+ Node* return_value = raw_assembler_->CallN(descriptor, code_target, args);
+ CallEpilogue();
+ return return_value;
+}
+
+Node* CodeAssembler::TailCallN(CallDescriptor* descriptor, Node* code_target,
+ Node** args) {
+ return raw_assembler_->TailCallN(descriptor, code_target, args);
+}
+
+Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id,
+ Node* context) {
+ CallPrologue();
+ Node* return_value = raw_assembler_->CallRuntime0(function_id, context);
+ CallEpilogue();
+ return return_value;
+}
+
+Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
+ Node* arg1) {
+ CallPrologue();
+ Node* return_value = raw_assembler_->CallRuntime1(function_id, arg1, context);
+ CallEpilogue();
+ return return_value;
+}
+
+Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
+ Node* arg1, Node* arg2) {
+ CallPrologue();
+ Node* return_value =
+ raw_assembler_->CallRuntime2(function_id, arg1, arg2, context);
+ CallEpilogue();
+ return return_value;
+}
+
+Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
+ Node* arg1, Node* arg2, Node* arg3) {
+ CallPrologue();
+ Node* return_value =
+ raw_assembler_->CallRuntime3(function_id, arg1, arg2, arg3, context);
+ CallEpilogue();
+ return return_value;
+}
+
+Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
+ Node* arg1, Node* arg2, Node* arg3,
+ Node* arg4) {
+ CallPrologue();
+ Node* return_value = raw_assembler_->CallRuntime4(function_id, arg1, arg2,
+ arg3, arg4, context);
+ CallEpilogue();
+ return return_value;
+}
+
+Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+ Node* context) {
+ return raw_assembler_->TailCallRuntime0(function_id, context);
+}
+
+Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+ Node* context, Node* arg1) {
+ return raw_assembler_->TailCallRuntime1(function_id, arg1, context);
+}
+
+Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+ Node* context, Node* arg1, Node* arg2) {
+ return raw_assembler_->TailCallRuntime2(function_id, arg1, arg2, context);
+}
+
+Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+ Node* context, Node* arg1, Node* arg2,
+ Node* arg3) {
+ return raw_assembler_->TailCallRuntime3(function_id, arg1, arg2, arg3,
+ context);
+}
+
+Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+ Node* context, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4) {
+ return raw_assembler_->TailCallRuntime4(function_id, arg1, arg2, arg3, arg4,
+ context);
+}
+
+Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+ Node* context, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4, Node* arg5) {
+ return raw_assembler_->TailCallRuntime5(function_id, arg1, arg2, arg3, arg4,
+ arg5, context);
+}
+
+Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
+ Node* arg1, size_t result_size) {
+ Node* target = HeapConstant(callable.code());
+ return CallStub(callable.descriptor(), target, context, arg1, result_size);
+}
+
+Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
+ Node* arg1, Node* arg2, size_t result_size) {
+ Node* target = HeapConstant(callable.code());
+ return CallStub(callable.descriptor(), target, context, arg1, arg2,
+ result_size);
+}
+
+Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
+ Node* arg1, Node* arg2, Node* arg3,
+ size_t result_size) {
+ Node* target = HeapConstant(callable.code());
+ return CallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
+ result_size);
+}
+
+Node* CodeAssembler::CallStubN(Callable const& callable, Node** args,
+ size_t result_size) {
+ Node* target = HeapConstant(callable.code());
+ return CallStubN(callable.descriptor(), target, args, result_size);
+}
+
+Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ Node** args = zone()->NewArray<Node*>(1);
+ args[0] = context;
+
+ return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, Node* arg1,
+ size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ Node** args = zone()->NewArray<Node*>(2);
+ args[0] = arg1;
+ args[1] = context;
+
+ return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, Node* arg1,
+ Node* arg2, size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ Node** args = zone()->NewArray<Node*>(3);
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = context;
+
+ return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, Node* arg1,
+ Node* arg2, Node* arg3, size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ Node** args = zone()->NewArray<Node*>(4);
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = arg3;
+ args[3] = context;
+
+ return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, Node* arg1,
+ Node* arg2, Node* arg3, Node* arg4,
+ size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ Node** args = zone()->NewArray<Node*>(5);
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = arg3;
+ args[3] = arg4;
+ args[4] = context;
+
+ return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, Node* arg1,
+ Node* arg2, Node* arg3, Node* arg4, Node* arg5,
+ size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ Node** args = zone()->NewArray<Node*>(6);
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = arg3;
+ args[3] = arg4;
+ args[4] = arg5;
+ args[5] = context;
+
+ return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, const Arg& arg1,
+ const Arg& arg2, size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ const int kArgsCount = 3;
+ Node** args = zone()->NewArray<Node*>(kArgsCount);
+ DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
+ args[arg1.index] = arg1.value;
+ args[arg2.index] = arg2.value;
+ args[kArgsCount - 1] = context;
+ DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
+
+ return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, const Arg& arg1,
+ const Arg& arg2, const Arg& arg3,
+ size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ const int kArgsCount = 4;
+ Node** args = zone()->NewArray<Node*>(kArgsCount);
+ DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
+ args[arg1.index] = arg1.value;
+ args[arg2.index] = arg2.value;
+ args[arg3.index] = arg3.value;
+ args[kArgsCount - 1] = context;
+ DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
+
+ return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, const Arg& arg1,
+ const Arg& arg2, const Arg& arg3, const Arg& arg4,
+ size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ const int kArgsCount = 5;
+ Node** args = zone()->NewArray<Node*>(kArgsCount);
+ DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
+ args[arg1.index] = arg1.value;
+ args[arg2.index] = arg2.value;
+ args[arg3.index] = arg3.value;
+ args[arg4.index] = arg4.value;
+ args[kArgsCount - 1] = context;
+ DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
+
+ return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, const Arg& arg1,
+ const Arg& arg2, const Arg& arg3, const Arg& arg4,
+ const Arg& arg5, size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ const int kArgsCount = 6;
+ Node** args = zone()->NewArray<Node*>(kArgsCount);
+ DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
+ args[arg1.index] = arg1.value;
+ args[arg2.index] = arg2.value;
+ args[arg3.index] = arg3.value;
+ args[arg4.index] = arg4.value;
+ args[arg5.index] = arg5.value;
+ args[kArgsCount - 1] = context;
+ DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
+
+ return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::CallStubN(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node** args, size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
+ Node* arg1, size_t result_size) {
+ Node* target = HeapConstant(callable.code());
+ return TailCallStub(callable.descriptor(), target, context, arg1,
+ result_size);
+}
+
+Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
+ Node* arg1, Node* arg2, size_t result_size) {
+ Node* target = HeapConstant(callable.code());
+ return TailCallStub(callable.descriptor(), target, context, arg1, arg2,
+ result_size);
+}
+
+Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
+ Node* arg1, Node* arg2, Node* arg3,
+ size_t result_size) {
+ Node* target = HeapConstant(callable.code());
+ return TailCallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
+ result_size);
+}
+
+Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
+ Node* arg1, Node* arg2, Node* arg3,
+ Node* arg4, size_t result_size) {
+ Node* target = HeapConstant(callable.code());
+ return TailCallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
+ arg4, result_size);
+}
+
+Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, Node* arg1,
+ size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ Node** args = zone()->NewArray<Node*>(2);
+ args[0] = arg1;
+ args[1] = context;
+
+ return raw_assembler_->TailCallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, Node* arg1,
+ Node* arg2, size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ Node** args = zone()->NewArray<Node*>(3);
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = context;
+
+ return raw_assembler_->TailCallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, Node* arg1,
+ Node* arg2, Node* arg3, size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ Node** args = zone()->NewArray<Node*>(4);
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = arg3;
+ args[3] = context;
+
+ return raw_assembler_->TailCallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, Node* arg1,
+ Node* arg2, Node* arg3, Node* arg4,
+ size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ Node** args = zone()->NewArray<Node*>(5);
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = arg3;
+ args[3] = arg4;
+ args[4] = context;
+
+ return raw_assembler_->TailCallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, const Arg& arg1,
+ const Arg& arg2, const Arg& arg3,
+ const Arg& arg4, size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ const int kArgsCount = 5;
+ Node** args = zone()->NewArray<Node*>(kArgsCount);
+ DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
+ args[arg1.index] = arg1.value;
+ args[arg2.index] = arg2.value;
+ args[arg3.index] = arg3.value;
+ args[arg4.index] = arg4.value;
+ args[kArgsCount - 1] = context;
+ DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
+
+ return raw_assembler_->TailCallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, const Arg& arg1,
+ const Arg& arg2, const Arg& arg3,
+ const Arg& arg4, const Arg& arg5,
+ size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ const int kArgsCount = 6;
+ Node** args = zone()->NewArray<Node*>(kArgsCount);
+ DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
+ args[arg1.index] = arg1.value;
+ args[arg2.index] = arg2.value;
+ args[arg3.index] = arg3.value;
+ args[arg4.index] = arg4.value;
+ args[arg5.index] = arg5.value;
+ args[kArgsCount - 1] = context;
+ DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
+
+ return raw_assembler_->TailCallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::TailCallBytecodeDispatch(
+ const CallInterfaceDescriptor& interface_descriptor,
+ Node* code_target_address, Node** args) {
+ CallDescriptor* descriptor = Linkage::GetBytecodeDispatchCallDescriptor(
+ isolate(), zone(), interface_descriptor,
+ interface_descriptor.GetStackParameterCount());
+ return raw_assembler_->TailCallN(descriptor, code_target_address, args);
+}
+
+Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
+ Node* function, Node* receiver,
+ size_t result_size) {
+ const int argc = 0;
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), argc + 1,
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+ Node* target = HeapConstant(callable.code());
+
+ Node** args = zone()->NewArray<Node*>(argc + 4);
+ args[0] = function;
+ args[1] = Int32Constant(argc);
+ args[2] = receiver;
+ args[3] = context;
+
+ return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
+ Node* function, Node* receiver, Node* arg1,
+ size_t result_size) {
+ const int argc = 1;
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), argc + 1,
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+ Node* target = HeapConstant(callable.code());
+
+ Node** args = zone()->NewArray<Node*>(argc + 4);
+ args[0] = function;
+ args[1] = Int32Constant(argc);
+ args[2] = receiver;
+ args[3] = arg1;
+ args[4] = context;
+
+ return CallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
+ Node* function, Node* receiver, Node* arg1,
+ Node* arg2, size_t result_size) {
+ const int argc = 2;
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), argc + 1,
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+ Node* target = HeapConstant(callable.code());
+
+ Node** args = zone()->NewArray<Node*>(argc + 4);
+ args[0] = function;
+ args[1] = Int32Constant(argc);
+ args[2] = receiver;
+ args[3] = arg1;
+ args[4] = arg2;
+ args[5] = context;
+
+ return CallN(call_descriptor, target, args);
+}
+
+void CodeAssembler::Goto(CodeAssembler::Label* label) {
+ label->MergeVariables();
+ raw_assembler_->Goto(label->label_);
+}
+
+void CodeAssembler::GotoIf(Node* condition, Label* true_label) {
+ Label false_label(this);
+ Branch(condition, true_label, &false_label);
+ Bind(&false_label);
+}
+
+void CodeAssembler::GotoUnless(Node* condition, Label* false_label) {
+ Label true_label(this);
+ Branch(condition, &true_label, false_label);
+ Bind(&true_label);
+}
+
+void CodeAssembler::Branch(Node* condition, CodeAssembler::Label* true_label,
+ CodeAssembler::Label* false_label) {
+ true_label->MergeVariables();
+ false_label->MergeVariables();
+ return raw_assembler_->Branch(condition, true_label->label_,
+ false_label->label_);
+}
+
+void CodeAssembler::Switch(Node* index, Label* default_label,
+ const int32_t* case_values, Label** case_labels,
+ size_t case_count) {
+ RawMachineLabel** labels =
+ new (zone()->New(sizeof(RawMachineLabel*) * case_count))
+ RawMachineLabel*[case_count];
+ for (size_t i = 0; i < case_count; ++i) {
+ labels[i] = case_labels[i]->label_;
+ case_labels[i]->MergeVariables();
+ default_label->MergeVariables();
+ }
+ return raw_assembler_->Switch(index, default_label->label_, case_values,
+ labels, case_count);
+}
+
+Node* CodeAssembler::Select(Node* condition, Node* true_value,
+ Node* false_value, MachineRepresentation rep) {
+ Variable value(this, rep);
+ Label vtrue(this), vfalse(this), end(this);
+ Branch(condition, &vtrue, &vfalse);
+
+ Bind(&vtrue);
+ {
+ value.Bind(true_value);
+ Goto(&end);
+ }
+ Bind(&vfalse);
+ {
+ value.Bind(false_value);
+ Goto(&end);
+ }
+
+ Bind(&end);
+ return value.value();
+}
+
+// RawMachineAssembler delegate helpers:
+Isolate* CodeAssembler::isolate() const { return raw_assembler_->isolate(); }
+
+Factory* CodeAssembler::factory() const { return isolate()->factory(); }
+
+Zone* CodeAssembler::zone() const { return raw_assembler_->zone(); }
+
+// The core implementation of Variable is stored through an indirection so
+// that it can outlive the often block-scoped Variable declarations. This is
+// needed to ensure that variable binding and merging through phis can
+// properly be verified.
+class CodeAssembler::Variable::Impl : public ZoneObject {
+ public:
+ explicit Impl(MachineRepresentation rep) : value_(nullptr), rep_(rep) {}
+ Node* value_;
+ MachineRepresentation rep_;
+};
+
+CodeAssembler::Variable::Variable(CodeAssembler* assembler,
+ MachineRepresentation rep)
+ : impl_(new (assembler->zone()) Impl(rep)), assembler_(assembler) {
+ assembler->variables_.insert(impl_);
+}
+
+CodeAssembler::Variable::~Variable() { assembler_->variables_.erase(impl_); }
+
+void CodeAssembler::Variable::Bind(Node* value) { impl_->value_ = value; }
+
+Node* CodeAssembler::Variable::value() const {
+ DCHECK_NOT_NULL(impl_->value_);
+ return impl_->value_;
+}
+
+MachineRepresentation CodeAssembler::Variable::rep() const {
+ return impl_->rep_;
+}
+
+bool CodeAssembler::Variable::IsBound() const {
+ return impl_->value_ != nullptr;
+}
+
+CodeAssembler::Label::Label(CodeAssembler* assembler, int merged_value_count,
+ CodeAssembler::Variable** merged_variables,
+ CodeAssembler::Label::Type type)
+ : bound_(false), merge_count_(0), assembler_(assembler), label_(nullptr) {
+ void* buffer = assembler->zone()->New(sizeof(RawMachineLabel));
+ label_ = new (buffer)
+ RawMachineLabel(type == kDeferred ? RawMachineLabel::kDeferred
+ : RawMachineLabel::kNonDeferred);
+ for (int i = 0; i < merged_value_count; ++i) {
+ variable_phis_[merged_variables[i]->impl_] = nullptr;
+ }
+}
+
+void CodeAssembler::Label::MergeVariables() {
+ ++merge_count_;
+ for (auto var : assembler_->variables_) {
+ size_t count = 0;
+ Node* node = var->value_;
+ if (node != nullptr) {
+ auto i = variable_merges_.find(var);
+ if (i != variable_merges_.end()) {
+ i->second.push_back(node);
+ count = i->second.size();
+ } else {
+ count = 1;
+ variable_merges_[var] = std::vector<Node*>(1, node);
+ }
+ }
+ // If the following asserts, then you've jumped to a label without a bound
+ // variable along that path that expects to merge its value into a phi.
+ DCHECK(variable_phis_.find(var) == variable_phis_.end() ||
+ count == merge_count_);
+ USE(count);
+
+ // If the label is already bound, we already know the set of variables to
+ // merge and phi nodes have already been created.
+ if (bound_) {
+ auto phi = variable_phis_.find(var);
+ if (phi != variable_phis_.end()) {
+ DCHECK_NOT_NULL(phi->second);
+ assembler_->raw_assembler_->AppendPhiInput(phi->second, node);
+ } else {
+ auto i = variable_merges_.find(var);
+ if (i != variable_merges_.end()) {
+ // If the following assert fires, then you've declared a variable that
+ // has the same bound value along all paths up until the point you
+ // bound this label, but then later merged a path with a new value for
+ // the variable after the label bind (it's not possible to add phis to
+ // the bound label after the fact, just make sure to list the variable
+ // in the label's constructor's list of merged variables).
+ DCHECK(find_if(i->second.begin(), i->second.end(),
+ [node](Node* e) -> bool { return node != e; }) ==
+ i->second.end());
+ }
+ }
+ }
+ }
+}
+
+void CodeAssembler::Label::Bind() {
+ DCHECK(!bound_);
+ assembler_->raw_assembler_->Bind(label_);
+
+ // Make sure that all variables that have changed along any path up to this
+ // point are marked as merge variables.
+ for (auto var : assembler_->variables_) {
+ Node* shared_value = nullptr;
+ auto i = variable_merges_.find(var);
+ if (i != variable_merges_.end()) {
+ for (auto value : i->second) {
+ DCHECK(value != nullptr);
+ if (value != shared_value) {
+ if (shared_value == nullptr) {
+ shared_value = value;
+ } else {
+ variable_phis_[var] = nullptr;
+ }
+ }
+ }
+ }
+ }
+
+ for (auto var : variable_phis_) {
+ CodeAssembler::Variable::Impl* var_impl = var.first;
+ auto i = variable_merges_.find(var_impl);
+ // If the following assert fires, then a variable that has been marked as
+ // being merged at the label--either by explicitly marking it so in the
+ // label constructor or by having seen different bound values at branches
+ // into the label--doesn't have a bound value along all of the paths that
+ // have been merged into the label up to this point.
+ DCHECK(i != variable_merges_.end() && i->second.size() == merge_count_);
+ Node* phi = assembler_->raw_assembler_->Phi(
+ var.first->rep_, static_cast<int>(merge_count_), &(i->second[0]));
+ variable_phis_[var_impl] = phi;
+ }
+
+ // Bind all variables to a merge phi, the common value along all paths or
+ // null.
+ for (auto var : assembler_->variables_) {
+ auto i = variable_phis_.find(var);
+ if (i != variable_phis_.end()) {
+ var->value_ = i->second;
+ } else {
+ auto j = variable_merges_.find(var);
+ if (j != variable_merges_.end() && j->second.size() == merge_count_) {
+ var->value_ = j->second.back();
+ } else {
+ var->value_ = nullptr;
+ }
+ }
+ }
+
+ bound_ = true;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
new file mode 100644
index 0000000000..bea999b705
--- /dev/null
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -0,0 +1,487 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CODE_ASSEMBLER_H_
+#define V8_COMPILER_CODE_ASSEMBLER_H_
+
+#include <map>
+#include <memory>
+
+// Clients of this interface shouldn't depend on lots of compiler internals.
+// Do not include anything from src/compiler here!
+#include "src/allocation.h"
+#include "src/builtins/builtins.h"
+#include "src/heap/heap.h"
+#include "src/machine-type.h"
+#include "src/runtime/runtime.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class Callable;
+class CallInterfaceDescriptor;
+class Isolate;
+class Factory;
+class Zone;
+
+namespace compiler {
+
+class CallDescriptor;
+class Node;
+class RawMachineAssembler;
+class RawMachineLabel;
+
+#define CODE_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
+ V(Float32Equal) \
+ V(Float32LessThan) \
+ V(Float32LessThanOrEqual) \
+ V(Float32GreaterThan) \
+ V(Float32GreaterThanOrEqual) \
+ V(Float64Equal) \
+ V(Float64LessThan) \
+ V(Float64LessThanOrEqual) \
+ V(Float64GreaterThan) \
+ V(Float64GreaterThanOrEqual) \
+ V(Int32GreaterThan) \
+ V(Int32GreaterThanOrEqual) \
+ V(Int32LessThan) \
+ V(Int32LessThanOrEqual) \
+ V(IntPtrLessThan) \
+ V(IntPtrLessThanOrEqual) \
+ V(IntPtrGreaterThan) \
+ V(IntPtrGreaterThanOrEqual) \
+ V(IntPtrEqual) \
+ V(Uint32LessThan) \
+ V(Uint32GreaterThanOrEqual) \
+ V(UintPtrLessThan) \
+ V(UintPtrGreaterThanOrEqual) \
+ V(WordEqual) \
+ V(WordNotEqual) \
+ V(Word32Equal) \
+ V(Word32NotEqual) \
+ V(Word64Equal) \
+ V(Word64NotEqual)
+
+#define CODE_ASSEMBLER_BINARY_OP_LIST(V) \
+ CODE_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
+ V(Float64Add) \
+ V(Float64Sub) \
+ V(Float64Mul) \
+ V(Float64Div) \
+ V(Float64Mod) \
+ V(Float64Atan2) \
+ V(Float64Pow) \
+ V(Float64InsertLowWord32) \
+ V(Float64InsertHighWord32) \
+ V(IntPtrAdd) \
+ V(IntPtrAddWithOverflow) \
+ V(IntPtrSub) \
+ V(IntPtrSubWithOverflow) \
+ V(IntPtrMul) \
+ V(Int32Add) \
+ V(Int32AddWithOverflow) \
+ V(Int32Sub) \
+ V(Int32Mul) \
+ V(Int32MulWithOverflow) \
+ V(Int32Div) \
+ V(Int32Mod) \
+ V(WordOr) \
+ V(WordAnd) \
+ V(WordXor) \
+ V(WordShl) \
+ V(WordShr) \
+ V(WordSar) \
+ V(WordRor) \
+ V(Word32Or) \
+ V(Word32And) \
+ V(Word32Xor) \
+ V(Word32Shl) \
+ V(Word32Shr) \
+ V(Word32Sar) \
+ V(Word32Ror) \
+ V(Word64Or) \
+ V(Word64And) \
+ V(Word64Xor) \
+ V(Word64Shr) \
+ V(Word64Sar) \
+ V(Word64Ror)
+
+#define CODE_ASSEMBLER_UNARY_OP_LIST(V) \
+ V(Float64Abs) \
+ V(Float64Acos) \
+ V(Float64Acosh) \
+ V(Float64Asin) \
+ V(Float64Asinh) \
+ V(Float64Atan) \
+ V(Float64Atanh) \
+ V(Float64Cos) \
+ V(Float64Cosh) \
+ V(Float64Exp) \
+ V(Float64Expm1) \
+ V(Float64Log) \
+ V(Float64Log1p) \
+ V(Float64Log2) \
+ V(Float64Log10) \
+ V(Float64Cbrt) \
+ V(Float64Neg) \
+ V(Float64Sin) \
+ V(Float64Sinh) \
+ V(Float64Sqrt) \
+ V(Float64Tan) \
+ V(Float64Tanh) \
+ V(Float64ExtractLowWord32) \
+ V(Float64ExtractHighWord32) \
+ V(BitcastWordToTagged) \
+ V(TruncateFloat64ToFloat32) \
+ V(TruncateFloat64ToWord32) \
+ V(TruncateInt64ToInt32) \
+ V(ChangeFloat32ToFloat64) \
+ V(ChangeFloat64ToUint32) \
+ V(ChangeInt32ToFloat64) \
+ V(ChangeInt32ToInt64) \
+ V(ChangeUint32ToFloat64) \
+ V(ChangeUint32ToUint64) \
+ V(RoundFloat64ToInt32) \
+ V(Float64RoundDown) \
+ V(Float64RoundUp) \
+ V(Float64RoundTruncate) \
+ V(Word32Clz)
+
+// A "public" interface used by components outside of compiler directory to
+// create code objects with TurboFan's backend. This class is mostly a thin shim
+// around the RawMachineAssembler, and its primary job is to ensure that the
+// innards of the RawMachineAssembler and other compiler implementation details
+// don't leak outside of the the compiler directory..
+//
+// V8 components that need to generate low-level code using this interface
+// should include this header--and this header only--from the compiler directory
+// (this is actually enforced). Since all interesting data structures are
+// forward declared, it's not possible for clients to peek inside the compiler
+// internals.
+//
+// In addition to providing isolation between TurboFan and code generation
+// clients, CodeAssembler also provides an abstraction for creating variables
+// and enhanced Label functionality to merge variable values along paths where
+// they have differing values, including loops.
+class CodeAssembler {
+ public:
+ // Create with CallStub linkage.
+ // |result_size| specifies the number of results returned by the stub.
+ // TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
+ CodeAssembler(Isolate* isolate, Zone* zone,
+ const CallInterfaceDescriptor& descriptor, Code::Flags flags,
+ const char* name, size_t result_size = 1);
+
+ // Create with JSCall linkage.
+ CodeAssembler(Isolate* isolate, Zone* zone, int parameter_count,
+ Code::Flags flags, const char* name);
+
+ virtual ~CodeAssembler();
+
+ Handle<Code> GenerateCode();
+
+ bool Is64() const;
+ bool IsFloat64RoundUpSupported() const;
+ bool IsFloat64RoundDownSupported() const;
+ bool IsFloat64RoundTruncateSupported() const;
+
+ class Label;
+ class Variable {
+ public:
+ explicit Variable(CodeAssembler* assembler, MachineRepresentation rep);
+ ~Variable();
+ void Bind(Node* value);
+ Node* value() const;
+ MachineRepresentation rep() const;
+ bool IsBound() const;
+
+ private:
+ friend class CodeAssembler;
+ class Impl;
+ Impl* impl_;
+ CodeAssembler* assembler_;
+ };
+
+ // ===========================================================================
+ // Base Assembler
+ // ===========================================================================
+
+ // Constants.
+ Node* Int32Constant(int32_t value);
+ Node* Int64Constant(int64_t value);
+ Node* IntPtrConstant(intptr_t value);
+ Node* NumberConstant(double value);
+ Node* SmiConstant(Smi* value);
+ Node* HeapConstant(Handle<HeapObject> object);
+ Node* BooleanConstant(bool value);
+ Node* ExternalConstant(ExternalReference address);
+ Node* Float64Constant(double value);
+ Node* NaNConstant();
+
+ bool ToInt32Constant(Node* node, int32_t& out_value);
+ bool ToInt64Constant(Node* node, int64_t& out_value);
+ bool ToIntPtrConstant(Node* node, intptr_t& out_value);
+
+ Node* Parameter(int value);
+ void Return(Node* value);
+
+ void DebugBreak();
+ void Comment(const char* format, ...);
+
+ void Bind(Label* label);
+ void Goto(Label* label);
+ void GotoIf(Node* condition, Label* true_label);
+ void GotoUnless(Node* condition, Label* false_label);
+ void Branch(Node* condition, Label* true_label, Label* false_label);
+
+ void Switch(Node* index, Label* default_label, const int32_t* case_values,
+ Label** case_labels, size_t case_count);
+
+ Node* Select(Node* condition, Node* true_value, Node* false_value,
+ MachineRepresentation rep = MachineRepresentation::kTagged);
+
+ // Access to the frame pointer
+ Node* LoadFramePointer();
+ Node* LoadParentFramePointer();
+
+ // Access to the stack pointer
+ Node* LoadStackPointer();
+
+ // Load raw memory location.
+ Node* Load(MachineType rep, Node* base);
+ Node* Load(MachineType rep, Node* base, Node* index);
+ Node* AtomicLoad(MachineType rep, Node* base, Node* index);
+
+ // Load a value from the root array.
+ Node* LoadRoot(Heap::RootListIndex root_index);
+
+ // Store value to raw memory location.
+ Node* Store(MachineRepresentation rep, Node* base, Node* value);
+ Node* Store(MachineRepresentation rep, Node* base, Node* index, Node* value);
+ Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* value);
+ Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* index,
+ Node* value);
+ Node* AtomicStore(MachineRepresentation rep, Node* base, Node* index,
+ Node* value);
+
+ // Store a value to the root array.
+ Node* StoreRoot(Heap::RootListIndex root_index, Node* value);
+
+// Basic arithmetic operations.
+#define DECLARE_CODE_ASSEMBLER_BINARY_OP(name) Node* name(Node* a, Node* b);
+ CODE_ASSEMBLER_BINARY_OP_LIST(DECLARE_CODE_ASSEMBLER_BINARY_OP)
+#undef DECLARE_CODE_ASSEMBLER_BINARY_OP
+
+ Node* WordShl(Node* value, int shift);
+ Node* WordShr(Node* value, int shift);
+ Node* Word32Shr(Node* value, int shift);
+
+// Unary
+#define DECLARE_CODE_ASSEMBLER_UNARY_OP(name) Node* name(Node* a);
+ CODE_ASSEMBLER_UNARY_OP_LIST(DECLARE_CODE_ASSEMBLER_UNARY_OP)
+#undef DECLARE_CODE_ASSEMBLER_UNARY_OP
+
+ // No-op on 32-bit, otherwise zero extend.
+ Node* ChangeUint32ToWord(Node* value);
+ // No-op on 32-bit, otherwise sign extend.
+ Node* ChangeInt32ToIntPtr(Node* value);
+
+ // Projections
+ Node* Projection(int index, Node* value);
+
+ // Calls
+ Node* CallRuntime(Runtime::FunctionId function_id, Node* context);
+ Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1);
+ Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
+ Node* arg2);
+ Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
+ Node* arg2, Node* arg3);
+ Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
+ Node* arg2, Node* arg3, Node* arg4);
+ Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
+ Node* arg2, Node* arg3, Node* arg4, Node* arg5);
+
+ Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context);
+ Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
+ Node* arg1);
+ Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
+ Node* arg1, Node* arg2);
+ Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
+ Node* arg1, Node* arg2, Node* arg3);
+ Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
+ Node* arg1, Node* arg2, Node* arg3, Node* arg4);
+ Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
+ Node* arg1, Node* arg2, Node* arg3, Node* arg4,
+ Node* arg5);
+
+ // A pair of a zero-based argument index and a value.
+ // It helps writing arguments order independent code.
+ struct Arg {
+ Arg(int index, Node* value) : index(index), value(value) {}
+
+ int const index;
+ Node* const value;
+ };
+
+ Node* CallStub(Callable const& callable, Node* context, Node* arg1,
+ size_t result_size = 1);
+ Node* CallStub(Callable const& callable, Node* context, Node* arg1,
+ Node* arg2, size_t result_size = 1);
+ Node* CallStub(Callable const& callable, Node* context, Node* arg1,
+ Node* arg2, Node* arg3, size_t result_size = 1);
+ Node* CallStubN(Callable const& callable, Node** args,
+ size_t result_size = 1);
+
+ Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, size_t result_size = 1);
+ Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, Node* arg1, size_t result_size = 1);
+ Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, Node* arg1, Node* arg2, size_t result_size = 1);
+ Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, Node* arg1, Node* arg2, Node* arg3,
+ size_t result_size = 1);
+ Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
+ size_t result_size = 1);
+ Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
+ Node* arg5, size_t result_size = 1);
+
+ Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, const Arg& arg1, const Arg& arg2,
+ size_t result_size = 1);
+ Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, const Arg& arg1, const Arg& arg2,
+ const Arg& arg3, size_t result_size = 1);
+ Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, const Arg& arg1, const Arg& arg2,
+ const Arg& arg3, const Arg& arg4, size_t result_size = 1);
+ Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, const Arg& arg1, const Arg& arg2,
+ const Arg& arg3, const Arg& arg4, const Arg& arg5,
+ size_t result_size = 1);
+
+ Node* CallStubN(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node** args, size_t result_size = 1);
+
+ Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
+ size_t result_size = 1);
+ Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
+ Node* arg2, size_t result_size = 1);
+ Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
+ Node* arg2, Node* arg3, size_t result_size = 1);
+ Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
+ Node* arg2, Node* arg3, Node* arg4,
+ size_t result_size = 1);
+ Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, Node* arg1, size_t result_size = 1);
+ Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, Node* arg1, Node* arg2,
+ size_t result_size = 1);
+ Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, Node* arg1, Node* arg2, Node* arg3,
+ size_t result_size = 1);
+ Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, Node* arg1, Node* arg2, Node* arg3,
+ Node* arg4, size_t result_size = 1);
+
+ Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, const Arg& arg1, const Arg& arg2,
+ const Arg& arg3, const Arg& arg4, size_t result_size = 1);
+ Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, const Arg& arg1, const Arg& arg2,
+ const Arg& arg3, const Arg& arg4, const Arg& arg5,
+ size_t result_size = 1);
+
+ Node* TailCallBytecodeDispatch(const CallInterfaceDescriptor& descriptor,
+ Node* code_target_address, Node** args);
+
+ Node* CallJS(Callable const& callable, Node* context, Node* function,
+ Node* receiver, size_t result_size = 1);
+ Node* CallJS(Callable const& callable, Node* context, Node* function,
+ Node* receiver, Node* arg1, size_t result_size = 1);
+ Node* CallJS(Callable const& callable, Node* context, Node* function,
+ Node* receiver, Node* arg1, Node* arg2, size_t result_size = 1);
+
+ // Exception handling support.
+ void GotoIfException(Node* node, Label* if_exception,
+ Variable* exception_var = nullptr);
+
+ // Branching helpers.
+ void BranchIf(Node* condition, Label* if_true, Label* if_false);
+
+#define BRANCH_HELPER(name) \
+ void BranchIf##name(Node* a, Node* b, Label* if_true, Label* if_false) { \
+ BranchIf(name(a, b), if_true, if_false); \
+ }
+ CODE_ASSEMBLER_COMPARE_BINARY_OP_LIST(BRANCH_HELPER)
+#undef BRANCH_HELPER
+
+ // Helpers which delegate to RawMachineAssembler.
+ Factory* factory() const;
+ Isolate* isolate() const;
+ Zone* zone() const;
+
+ protected:
+ // Enables subclasses to perform operations before and after a call.
+ virtual void CallPrologue();
+ virtual void CallEpilogue();
+
+ private:
+ CodeAssembler(Isolate* isolate, Zone* zone, CallDescriptor* call_descriptor,
+ Code::Flags flags, const char* name);
+
+ Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args);
+ Node* TailCallN(CallDescriptor* descriptor, Node* code_target, Node** args);
+
+ std::unique_ptr<RawMachineAssembler> raw_assembler_;
+ Code::Flags flags_;
+ const char* name_;
+ bool code_generated_;
+ ZoneSet<Variable::Impl*> variables_;
+
+ DISALLOW_COPY_AND_ASSIGN(CodeAssembler);
+};
+
+class CodeAssembler::Label {
+ public:
+ enum Type { kDeferred, kNonDeferred };
+
+ explicit Label(
+ CodeAssembler* assembler,
+ CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred)
+ : CodeAssembler::Label(assembler, 0, nullptr, type) {}
+ Label(CodeAssembler* assembler, CodeAssembler::Variable* merged_variable,
+ CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred)
+ : CodeAssembler::Label(assembler, 1, &merged_variable, type) {}
+ Label(CodeAssembler* assembler, int merged_variable_count,
+ CodeAssembler::Variable** merged_variables,
+ CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred);
+ ~Label() {}
+
+ private:
+ friend class CodeAssembler;
+
+ void Bind();
+ void MergeVariables();
+
+ bool bound_;
+ size_t merge_count_;
+ CodeAssembler* assembler_;
+ RawMachineLabel* label_;
+ // Map of variables that need to be merged to their phi nodes (or placeholders
+ // for those phis).
+ std::map<Variable::Impl*, Node*> variable_phis_;
+ // Map of variables to the list of value nodes that have been added from each
+ // merge path in their order of merging.
+ std::map<Variable::Impl*, std::vector<Node*>> variable_merges_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_CODE_ASSEMBLER_H_
diff --git a/deps/v8/src/compiler/code-generator-impl.h b/deps/v8/src/compiler/code-generator-impl.h
index 7de32c5c91..4dccdc912c 100644
--- a/deps/v8/src/compiler/code-generator-impl.h
+++ b/deps/v8/src/compiler/code-generator-impl.h
@@ -31,10 +31,18 @@ class InstructionOperandConverter {
return ToRegister(instr_->InputAt(index));
}
+ FloatRegister InputFloatRegister(size_t index) {
+ return ToFloatRegister(instr_->InputAt(index));
+ }
+
DoubleRegister InputDoubleRegister(size_t index) {
return ToDoubleRegister(instr_->InputAt(index));
}
+ Simd128Register InputSimd128Register(size_t index) {
+ return ToSimd128Register(instr_->InputAt(index));
+ }
+
double InputDouble(size_t index) { return ToDouble(instr_->InputAt(index)); }
float InputFloat32(size_t index) { return ToFloat32(instr_->InputAt(index)); }
@@ -43,6 +51,10 @@ class InstructionOperandConverter {
return ToConstant(instr_->InputAt(index)).ToInt32();
}
+ uint32_t InputUint32(size_t index) {
+ return bit_cast<uint32_t>(InputInt32(index));
+ }
+
int64_t InputInt64(size_t index) {
return ToConstant(instr_->InputAt(index)).ToInt64();
}
@@ -85,10 +97,18 @@ class InstructionOperandConverter {
return ToRegister(instr_->TempAt(index));
}
+ FloatRegister OutputFloatRegister() {
+ return ToFloatRegister(instr_->Output());
+ }
+
DoubleRegister OutputDoubleRegister() {
return ToDoubleRegister(instr_->Output());
}
+ Simd128Register OutputSimd128Register() {
+ return ToSimd128Register(instr_->Output());
+ }
+
// -- Conversions for operands -----------------------------------------------
Label* ToLabel(InstructionOperand* op) {
@@ -103,10 +123,18 @@ class InstructionOperandConverter {
return LocationOperand::cast(op)->GetRegister();
}
+ FloatRegister ToFloatRegister(InstructionOperand* op) {
+ return LocationOperand::cast(op)->GetFloatRegister();
+ }
+
DoubleRegister ToDoubleRegister(InstructionOperand* op) {
return LocationOperand::cast(op)->GetDoubleRegister();
}
+ Simd128Register ToSimd128Register(InstructionOperand* op) {
+ return LocationOperand::cast(op)->GetSimd128Register();
+ }
+
Constant ToConstant(InstructionOperand* op) {
if (op->IsImmediate()) {
return gen_->code()->GetImmediate(ImmediateOperand::cast(op));
@@ -127,7 +155,7 @@ class InstructionOperandConverter {
return ToConstant(op).ToHeapObject();
}
- Frame* frame() const { return gen_->frame(); }
+ const Frame* frame() const { return gen_->frame(); }
FrameAccessState* frame_access_state() const {
return gen_->frame_access_state();
}
@@ -163,7 +191,7 @@ class OutOfLineCode : public ZoneObject {
Label* entry() { return &entry_; }
Label* exit() { return &exit_; }
- Frame* frame() const { return frame_; }
+ const Frame* frame() const { return frame_; }
Isolate* isolate() const { return masm()->isolate(); }
MacroAssembler* masm() const { return masm_; }
OutOfLineCode* next() const { return next_; }
@@ -171,7 +199,7 @@ class OutOfLineCode : public ZoneObject {
private:
Label entry_;
Label exit_;
- Frame* const frame_;
+ const Frame* const frame_;
MacroAssembler* const masm_;
OutOfLineCode* const next_;
};
@@ -182,8 +210,6 @@ class OutOfLineCode : public ZoneObject {
static inline void FinishCode(MacroAssembler* masm) {
#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
masm->CheckConstPool(true, false);
-#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
- masm->ud2();
#endif
}
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index 086da560e4..4513c248fc 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -5,6 +5,7 @@
#include "src/compiler/code-generator.h"
#include "src/address-map.h"
+#include "src/base/adapters.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/linkage.h"
#include "src/compiler/pipeline.h"
@@ -33,14 +34,15 @@ class CodeGenerator::JumpTable final : public ZoneObject {
CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
InstructionSequence* code, CompilationInfo* info)
- : frame_access_state_(new (code->zone()) FrameAccessState(frame)),
+ : frame_access_state_(nullptr),
linkage_(linkage),
code_(code),
+ unwinding_info_writer_(zone()),
info_(info),
labels_(zone()->NewArray<Label>(code->InstructionBlockCount())),
current_block_(RpoNumber::Invalid()),
current_source_position_(SourcePosition::Unknown()),
- masm_(info->isolate(), nullptr, 0, CodeObjectRequired::kYes),
+ masm_(info->isolate(), nullptr, 0, CodeObjectRequired::kNo),
resolver_(this),
safepoints_(code->zone()),
handlers_(code->zone()),
@@ -52,10 +54,18 @@ CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
last_lazy_deopt_pc_(0),
jump_tables_(nullptr),
ools_(nullptr),
- osr_pc_offset_(-1) {
+ osr_pc_offset_(-1),
+ source_position_table_builder_(code->zone(),
+ info->SourcePositionRecordingMode()) {
for (int i = 0; i < code->InstructionBlockCount(); ++i) {
new (&labels_[i]) Label;
}
+ CreateFrameAccessState(frame);
+}
+
+void CodeGenerator::CreateFrameAccessState(Frame* frame) {
+ FinishFrame(frame);
+ frame_access_state_ = new (code()->zone()) FrameAccessState(frame);
}
Handle<Code> CodeGenerator::GenerateCode() {
@@ -66,10 +76,6 @@ Handle<Code> CodeGenerator::GenerateCode() {
// the frame (that is done in AssemblePrologue).
FrameScope frame_scope(masm(), StackFrame::MANUAL);
- // Emit a code line info recording start event.
- PositionsRecorder* recorder = masm()->positions_recorder();
- LOG_CODE_EVENT(isolate(), CodeStartLinePosInfoRecordEvent(recorder));
-
// Place function entry hook if requested to do so.
if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
ProfileEntryHookStub::MaybeCallEntryHook(masm());
@@ -96,9 +102,9 @@ Handle<Code> CodeGenerator::GenerateCode() {
}
}
- // Finish the Frame
- frame()->AlignFrame(kFrameAlignmentInBytes);
- AssembleSetupStackPointer();
+ unwinding_info_writer_.SetNumberOfInstructionBlocks(
+ code()->InstructionBlockCount());
+
// Assemble all non-deferred blocks, followed by deferred ones.
for (int deferred = 0; deferred < 2; ++deferred) {
for (const InstructionBlock* block : code()->instruction_blocks()) {
@@ -111,6 +117,7 @@ Handle<Code> CodeGenerator::GenerateCode() {
if (block->IsHandler()) EnsureSpaceForLazyDeopt();
// Bind a label for a block.
current_block_ = block->rpo_number();
+ unwinding_info_writer_.BeginInstructionBlock(masm()->pc_offset(), block);
if (FLAG_code_comments) {
// TODO(titzer): these code comments are a giant memory leak.
Vector<char> buffer = Vector<char>::New(200);
@@ -143,7 +150,7 @@ Handle<Code> CodeGenerator::GenerateCode() {
masm()->bind(GetLabel(current_block_));
if (block->must_construct_frame()) {
- AssemblePrologue();
+ AssembleConstructFrame();
// We need to setup the root register after we assemble the prologue, to
// avoid clobbering callee saved registers in case of C linkage and
// using the roots.
@@ -153,12 +160,15 @@ Handle<Code> CodeGenerator::GenerateCode() {
}
}
+ CodeGenResult result;
if (FLAG_enable_embedded_constant_pool && !block->needs_frame()) {
ConstantPoolUnavailableScope constant_pool_unavailable(masm());
- AssembleBlock(block);
+ result = AssembleBlock(block);
} else {
- AssembleBlock(block);
+ result = AssembleBlock(block);
}
+ if (result != kSuccess) return Handle<Code>();
+ unwinding_info_writer_.EndInstructionBlock(block);
}
}
@@ -199,11 +209,17 @@ Handle<Code> CodeGenerator::GenerateCode() {
safepoints()->Emit(masm(), frame()->GetTotalFrameSlotCount());
- Handle<Code> result =
- v8::internal::CodeGenerator::MakeCodeEpilogue(masm(), info);
+ unwinding_info_writer_.Finish(masm()->pc_offset());
+
+ Handle<Code> result = v8::internal::CodeGenerator::MakeCodeEpilogue(
+ masm(), unwinding_info_writer_.eh_frame_writer(), info, Handle<Object>());
result->set_is_turbofanned(true);
result->set_stack_slots(frame()->GetTotalFrameSlotCount());
result->set_safepoint_table_offset(safepoints()->GetCodeOffset());
+ Handle<ByteArray> source_positions =
+ source_position_table_builder_.ToSourcePositionTable(
+ isolate(), Handle<AbstractCode>::cast(result));
+ result->set_source_position_table(*source_positions);
// Emit exception handler table.
if (!handlers_.empty()) {
@@ -212,12 +228,8 @@ Handle<Code> CodeGenerator::GenerateCode() {
HandlerTable::LengthForReturn(static_cast<int>(handlers_.size())),
TENURED));
for (size_t i = 0; i < handlers_.size(); ++i) {
- int position = handlers_[i].handler->pos();
- HandlerTable::CatchPrediction prediction = handlers_[i].caught_locally
- ? HandlerTable::CAUGHT
- : HandlerTable::UNCAUGHT;
table->SetReturnOffset(static_cast<int>(i), handlers_[i].pc_offset);
- table->SetReturnHandler(static_cast<int>(i), position, prediction);
+ table->SetReturnHandler(static_cast<int>(i), handlers_[i].handler->pos());
}
result->set_handler_table(*table);
}
@@ -229,11 +241,6 @@ Handle<Code> CodeGenerator::GenerateCode() {
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(result);
}
- // Emit a code line info recording stop event.
- void* line_info = recorder->DetachJITHandlerData();
- LOG_CODE_EVENT(isolate(), CodeEndLinePosInfoRecordEvent(
- AbstractCode::cast(*result), line_info));
-
return result;
}
@@ -274,8 +281,7 @@ void CodeGenerator::RecordSafepoint(ReferenceMap* references,
bool CodeGenerator::IsMaterializableFromFrame(Handle<HeapObject> object,
int* slot_return) {
if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
- if (info()->has_context() && object.is_identical_to(info()->context()) &&
- !info()->is_osr()) {
+ if (object.is_identical_to(info()->context()) && !info()->is_osr()) {
*slot_return = Frame::kContextSlot;
return true;
} else if (object.is_identical_to(info()->closure())) {
@@ -302,16 +308,105 @@ bool CodeGenerator::IsMaterializableFromRoot(
return false;
}
-void CodeGenerator::AssembleBlock(const InstructionBlock* block) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleBlock(
+ const InstructionBlock* block) {
for (int i = block->code_start(); i < block->code_end(); ++i) {
Instruction* instr = code()->InstructionAt(i);
- AssembleInstruction(instr, block);
+ CodeGenResult result = AssembleInstruction(instr, block);
+ if (result != kSuccess) return result;
}
+ return kSuccess;
}
-void CodeGenerator::AssembleInstruction(Instruction* instr,
- const InstructionBlock* block) {
+bool CodeGenerator::IsValidPush(InstructionOperand source,
+ CodeGenerator::PushTypeFlags push_type) {
+ if (source.IsImmediate() &&
+ ((push_type & CodeGenerator::kImmediatePush) != 0)) {
+ return true;
+ }
+ if ((source.IsRegister() || source.IsStackSlot()) &&
+ ((push_type & CodeGenerator::kScalarPush) != 0)) {
+ return true;
+ }
+ if ((source.IsFloatRegister() || source.IsFloatStackSlot()) &&
+ ((push_type & CodeGenerator::kFloat32Push) != 0)) {
+ return true;
+ }
+ if ((source.IsDoubleRegister() || source.IsFloatStackSlot()) &&
+ ((push_type & CodeGenerator::kFloat64Push) != 0)) {
+ return true;
+ }
+ return false;
+}
+
+void CodeGenerator::GetPushCompatibleMoves(Instruction* instr,
+ PushTypeFlags push_type,
+ ZoneVector<MoveOperands*>* pushes) {
+ pushes->clear();
+ for (int i = Instruction::FIRST_GAP_POSITION;
+ i <= Instruction::LAST_GAP_POSITION; ++i) {
+ Instruction::GapPosition inner_pos =
+ static_cast<Instruction::GapPosition>(i);
+ ParallelMove* parallel_move = instr->GetParallelMove(inner_pos);
+ if (parallel_move != nullptr) {
+ for (auto move : *parallel_move) {
+ InstructionOperand source = move->source();
+ InstructionOperand destination = move->destination();
+ int first_push_compatible_index =
+ V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0;
+ // If there are any moves from slots that will be overridden by pushes,
+ // then the full gap resolver must be used since optimization with
+ // pushes don't participate in the parallel move and might clobber
+ // values needed for the gap resolve.
+ if (source.IsStackSlot() &&
+ LocationOperand::cast(source).index() >=
+ first_push_compatible_index) {
+ pushes->clear();
+ return;
+ }
+ // TODO(danno): Right now, only consider moves from the FIRST gap for
+ // pushes. Theoretically, we could extract pushes for both gaps (there
+ // are cases where this happens), but the logic for that would also have
+ // to check to make sure that non-memory inputs to the pushes from the
+ // LAST gap don't get clobbered in the FIRST gap.
+ if (i == Instruction::FIRST_GAP_POSITION) {
+ if (destination.IsStackSlot() &&
+ LocationOperand::cast(destination).index() >=
+ first_push_compatible_index) {
+ int index = LocationOperand::cast(destination).index();
+ if (IsValidPush(source, push_type)) {
+ if (index >= static_cast<int>(pushes->size())) {
+ pushes->resize(index + 1);
+ }
+ (*pushes)[index] = move;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // For now, only support a set of continuous pushes at the end of the list.
+ size_t push_count_upper_bound = pushes->size();
+ size_t push_begin = push_count_upper_bound;
+ for (auto move : base::Reversed(*pushes)) {
+ if (move == nullptr) break;
+ push_begin--;
+ }
+ size_t push_count = pushes->size() - push_begin;
+ std::copy(pushes->begin() + push_begin,
+ pushes->begin() + push_begin + push_count, pushes->begin());
+ pushes->resize(push_count);
+}
+
+CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
+ Instruction* instr, const InstructionBlock* block) {
+ int first_unused_stack_slot;
+ bool adjust_stack =
+ GetSlotAboveSPBeforeTailCall(instr, &first_unused_stack_slot);
+ if (adjust_stack) AssembleTailCallBeforeGap(instr, first_unused_stack_slot);
AssembleGaps(instr);
+ if (adjust_stack) AssembleTailCallAfterGap(instr, first_unused_stack_slot);
DCHECK_IMPLIES(
block->must_deconstruct_frame(),
instr != code()->InstructionAt(block->last_instruction_index()) ||
@@ -321,7 +416,8 @@ void CodeGenerator::AssembleInstruction(Instruction* instr,
}
AssembleSourcePosition(instr);
// Assemble architecture-specific code for the instruction.
- AssembleArchInstruction(instr);
+ CodeGenResult result = AssembleArchInstruction(instr);
+ if (result != kSuccess) return result;
FlagsMode mode = FlagsModeField::decode(instr->opcode());
FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
@@ -337,7 +433,7 @@ void CodeGenerator::AssembleInstruction(Instruction* instr,
if (!IsNextInAssemblyOrder(true_rpo)) {
AssembleArchJump(true_rpo);
}
- return;
+ return kSuccess;
}
if (IsNextInAssemblyOrder(true_rpo)) {
// true block is next, can fall through if condition negated.
@@ -379,6 +475,7 @@ void CodeGenerator::AssembleInstruction(Instruction* instr,
break;
}
}
+ return kSuccess;
}
@@ -389,11 +486,12 @@ void CodeGenerator::AssembleSourcePosition(Instruction* instr) {
current_source_position_ = source_position;
if (source_position.IsUnknown()) return;
int code_pos = source_position.raw();
- masm()->positions_recorder()->RecordPosition(code_pos);
- masm()->positions_recorder()->WriteRecordedPositions();
+ source_position_table_builder_.AddPosition(masm()->pc_offset(), code_pos,
+ false);
if (FLAG_code_comments) {
- Vector<char> buffer = Vector<char>::New(256);
CompilationInfo* info = this->info();
+ if (!info->parse_info()) return;
+ Vector<char> buffer = Vector<char>::New(256);
int ln = Script::GetLineNumber(info->script(), code_pos);
int cn = Script::GetColumnNumber(info->script(), code_pos);
if (info->script()->name()->IsString()) {
@@ -408,6 +506,16 @@ void CodeGenerator::AssembleSourcePosition(Instruction* instr) {
}
}
+bool CodeGenerator::GetSlotAboveSPBeforeTailCall(Instruction* instr,
+ int* slot) {
+ if (instr->IsTailCall()) {
+ InstructionOperandConverter g(this, instr);
+ *slot = g.InputInt32(instr->InputCount() - 1);
+ return true;
+ } else {
+ return false;
+ }
+}
void CodeGenerator::AssembleGaps(Instruction* instr) {
for (int i = Instruction::FIRST_GAP_POSITION;
@@ -493,13 +601,8 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
if (flags & CallDescriptor::kHasExceptionHandler) {
InstructionOperandConverter i(this, instr);
- bool caught = flags & CallDescriptor::kHasLocalCatchHandler;
RpoNumber handler_rpo = i.InputRpo(instr->InputCount() - 1);
- handlers_.push_back({caught, GetLabel(handler_rpo), masm()->pc_offset()});
- }
-
- if (flags & CallDescriptor::kNeedsNopAfterCall) {
- AddNopForSmiCodeInlining();
+ handlers_.push_back({GetLabel(handler_rpo), masm()->pc_offset()});
}
if (needs_frame_state) {
@@ -508,7 +611,7 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
// code address).
size_t frame_state_offset = 1;
FrameStateDescriptor* descriptor =
- GetFrameStateDescriptor(instr, frame_state_offset);
+ GetDeoptimizationEntry(instr, frame_state_offset).descriptor();
int pc_offset = masm()->pc_offset();
int deopt_state_id = BuildTranslation(instr, pc_offset, frame_state_offset,
descriptor->state_combine());
@@ -528,7 +631,7 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
// by calls.)
for (size_t i = 0; i < descriptor->GetSize(); i++) {
InstructionOperand* op = instr->InputAt(frame_state_offset + 1 + i);
- CHECK(op->IsStackSlot() || op->IsDoubleStackSlot() || op->IsImmediate());
+ CHECK(op->IsStackSlot() || op->IsFPStackSlot() || op->IsImmediate());
}
#endif
safepoints()->RecordLazyDeoptimizationIndex(deopt_state_id);
@@ -545,15 +648,19 @@ int CodeGenerator::DefineDeoptimizationLiteral(Handle<Object> literal) {
return result;
}
-
-FrameStateDescriptor* CodeGenerator::GetFrameStateDescriptor(
+DeoptimizationEntry const& CodeGenerator::GetDeoptimizationEntry(
Instruction* instr, size_t frame_state_offset) {
InstructionOperandConverter i(this, instr);
- InstructionSequence::StateId state_id =
- InstructionSequence::StateId::FromInt(i.InputInt32(frame_state_offset));
- return code()->GetFrameStateDescriptor(state_id);
+ int const state_id = i.InputInt32(frame_state_offset);
+ return code()->GetDeoptimizationEntry(state_id);
}
+DeoptimizeReason CodeGenerator::GetDeoptimizationReason(
+ int deoptimization_id) const {
+ size_t const index = static_cast<size_t>(deoptimization_id);
+ DCHECK_LT(index, deoptimization_states_.size());
+ return deoptimization_states_[index]->reason();
+}
void CodeGenerator::TranslateStateValueDescriptor(
StateValueDescriptor* desc, Translation* translation,
@@ -662,6 +769,12 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor(
shared_info_id,
static_cast<unsigned int>(descriptor->parameters_count()));
break;
+ case FrameStateType::kGetterStub:
+ translation->BeginGetterStubFrame(shared_info_id);
+ break;
+ case FrameStateType::kSetterStub:
+ translation->BeginSetterStubFrame(shared_info_id);
+ break;
}
TranslateFrameStateDescriptorOperands(descriptor, iter, state_combine,
@@ -672,8 +785,9 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor(
int CodeGenerator::BuildTranslation(Instruction* instr, int pc_offset,
size_t frame_state_offset,
OutputFrameStateCombine state_combine) {
- FrameStateDescriptor* descriptor =
- GetFrameStateDescriptor(instr, frame_state_offset);
+ DeoptimizationEntry const& entry =
+ GetDeoptimizationEntry(instr, frame_state_offset);
+ FrameStateDescriptor* const descriptor = entry.descriptor();
frame_state_offset++;
Translation translation(
@@ -686,7 +800,8 @@ int CodeGenerator::BuildTranslation(Instruction* instr, int pc_offset,
int deoptimization_id = static_cast<int>(deoptimization_states_.size());
deoptimization_states_.push_back(new (zone()) DeoptimizationState(
- descriptor->bailout_id(), translation.index(), pc_offset));
+ descriptor->bailout_id(), translation.index(), pc_offset,
+ entry.reason()));
return deoptimization_id;
}
@@ -710,9 +825,13 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
} else {
CHECK(false);
}
- } else if (op->IsDoubleStackSlot()) {
- DCHECK(IsFloatingPoint(type.representation()));
- translation->StoreDoubleStackSlot(LocationOperand::cast(op)->index());
+ } else if (op->IsFPStackSlot()) {
+ if (type.representation() == MachineRepresentation::kFloat64) {
+ translation->StoreDoubleStackSlot(LocationOperand::cast(op)->index());
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, type.representation());
+ translation->StoreFloatStackSlot(LocationOperand::cast(op)->index());
+ }
} else if (op->IsRegister()) {
InstructionOperandConverter converter(this, instr);
if (type.representation() == MachineRepresentation::kBit) {
@@ -728,20 +847,47 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
} else {
CHECK(false);
}
- } else if (op->IsDoubleRegister()) {
- DCHECK(IsFloatingPoint(type.representation()));
+ } else if (op->IsFPRegister()) {
InstructionOperandConverter converter(this, instr);
- translation->StoreDoubleRegister(converter.ToDoubleRegister(op));
+ if (type.representation() == MachineRepresentation::kFloat64) {
+ translation->StoreDoubleRegister(converter.ToDoubleRegister(op));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, type.representation());
+ translation->StoreFloatRegister(converter.ToFloatRegister(op));
+ }
} else if (op->IsImmediate()) {
InstructionOperandConverter converter(this, instr);
Constant constant = converter.ToConstant(op);
Handle<Object> constant_object;
switch (constant.type()) {
case Constant::kInt32:
- DCHECK(type == MachineType::Int32() || type == MachineType::Uint32() ||
- type.representation() == MachineRepresentation::kBit);
+ if (type.representation() == MachineRepresentation::kTagged) {
+ // When pointers are 4 bytes, we can use int32 constants to represent
+ // Smis.
+ DCHECK_EQ(4, kPointerSize);
+ constant_object =
+ handle(reinterpret_cast<Smi*>(constant.ToInt32()), isolate());
+ DCHECK(constant_object->IsSmi());
+ } else {
+ DCHECK(type == MachineType::Int32() ||
+ type == MachineType::Uint32() ||
+ type.representation() == MachineRepresentation::kBit ||
+ type.representation() == MachineRepresentation::kNone);
+ DCHECK(type.representation() != MachineRepresentation::kNone ||
+ constant.ToInt32() == FrameStateDescriptor::kImpossibleValue);
+
+ constant_object =
+ isolate()->factory()->NewNumberFromInt(constant.ToInt32());
+ }
+ break;
+ case Constant::kInt64:
+ // When pointers are 8 bytes, we can use int64 constants to represent
+ // Smis.
+ DCHECK_EQ(type.representation(), MachineRepresentation::kTagged);
+ DCHECK_EQ(8, kPointerSize);
constant_object =
- isolate()->factory()->NewNumberFromInt(constant.ToInt32());
+ handle(reinterpret_cast<Smi*>(constant.ToInt64()), isolate());
+ DCHECK(constant_object->IsSmi());
break;
case Constant::kFloat32:
DCHECK(type.representation() == MachineRepresentation::kFloat32 ||
@@ -786,18 +932,6 @@ DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
return exit;
}
-int CodeGenerator::TailCallFrameStackSlotDelta(int stack_param_delta) {
- // Leave the PC on the stack on platforms that have that as part of their ABI
- int pc_slots = V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0;
- int sp_slot_delta = frame_access_state()->has_frame()
- ? (frame()->GetTotalFrameSlotCount() - pc_slots)
- : 0;
- // Discard only slots that won't be used by new parameters.
- sp_slot_delta += stack_param_delta;
- return sp_slot_delta;
-}
-
-
OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
: frame_(gen->frame()), masm_(gen->masm()), next_(gen->ools_) {
gen->ools_ = this;
diff --git a/deps/v8/src/compiler/code-generator.h b/deps/v8/src/compiler/code-generator.h
index b82181c331..8ad9431653 100644
--- a/deps/v8/src/compiler/code-generator.h
+++ b/deps/v8/src/compiler/code-generator.h
@@ -5,11 +5,14 @@
#ifndef V8_COMPILER_CODE_GENERATOR_H_
#define V8_COMPILER_CODE_GENERATOR_H_
+#include "src/compiler.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/instruction.h"
+#include "src/compiler/unwinding-info-writer.h"
#include "src/deoptimizer.h"
#include "src/macro-assembler.h"
#include "src/safepoint-table.h"
+#include "src/source-position-table.h"
namespace v8 {
namespace internal {
@@ -54,7 +57,7 @@ class CodeGenerator final : public GapResolver::Assembler {
InstructionSequence* code() const { return code_; }
FrameAccessState* frame_access_state() const { return frame_access_state_; }
- Frame* frame() const { return frame_access_state_->frame(); }
+ const Frame* frame() const { return frame_access_state_->frame(); }
Isolate* isolate() const { return info_->isolate(); }
Linkage* linkage() const { return linkage_; }
@@ -67,6 +70,12 @@ class CodeGenerator final : public GapResolver::Assembler {
Zone* zone() const { return code()->zone(); }
CompilationInfo* info() const { return info_; }
+ // Create the FrameAccessState object. The Frame is immutable from here on.
+ void CreateFrameAccessState(Frame* frame);
+
+ // Architecture - specific frame finalization.
+ void FinishFrame(Frame* frame);
+
// Checks if {block} will appear directly after {current_block_} when
// assembling code, in which case, a fall-through can be used.
bool IsNextInAssemblyOrder(RpoNumber block) const;
@@ -84,50 +93,87 @@ class CodeGenerator final : public GapResolver::Assembler {
bool IsMaterializableFromRoot(Handle<HeapObject> object,
Heap::RootListIndex* index_return);
+ enum CodeGenResult { kSuccess, kTooManyDeoptimizationBailouts };
+
// Assemble instructions for the specified block.
- void AssembleBlock(const InstructionBlock* block);
+ CodeGenResult AssembleBlock(const InstructionBlock* block);
// Assemble code for the specified instruction.
- void AssembleInstruction(Instruction* instr, const InstructionBlock* block);
+ CodeGenResult AssembleInstruction(Instruction* instr,
+ const InstructionBlock* block);
void AssembleSourcePosition(Instruction* instr);
void AssembleGaps(Instruction* instr);
+ // Returns true if a instruction is a tail call that needs to adjust the stack
+ // pointer before execution. The stack slot index to the empty slot above the
+ // adjusted stack pointer is returned in |slot|.
+ bool GetSlotAboveSPBeforeTailCall(Instruction* instr, int* slot);
+
// ===========================================================================
// ============= Architecture-specific code generation methods. ==============
// ===========================================================================
- void AssembleArchInstruction(Instruction* instr);
+ CodeGenResult AssembleArchInstruction(Instruction* instr);
void AssembleArchJump(RpoNumber target);
void AssembleArchBranch(Instruction* instr, BranchInfo* branch);
void AssembleArchBoolean(Instruction* instr, FlagsCondition condition);
void AssembleArchLookupSwitch(Instruction* instr);
void AssembleArchTableSwitch(Instruction* instr);
- void AssembleDeoptimizerCall(int deoptimization_id,
- Deoptimizer::BailoutType bailout_type);
+ CodeGenResult AssembleDeoptimizerCall(int deoptimization_id,
+ Deoptimizer::BailoutType bailout_type);
// Generates an architecture-specific, descriptor-specific prologue
// to set up a stack frame.
- void AssemblePrologue();
-
- void AssembleSetupStackPointer();
+ void AssembleConstructFrame();
// Generates an architecture-specific, descriptor-specific return sequence
// to tear down a stack frame.
void AssembleReturn();
- // Generates code to deconstruct a the caller's frame, including arguments.
- void AssembleDeconstructActivationRecord(int stack_param_delta);
-
void AssembleDeconstructFrame();
// Generates code to manipulate the stack in preparation for a tail call.
- void AssemblePrepareTailCall(int stack_param_delta);
+ void AssemblePrepareTailCall();
// Generates code to pop current frame if it is an arguments adaptor frame.
void AssemblePopArgumentsAdaptorFrame(Register args_reg, Register scratch1,
Register scratch2, Register scratch3);
+ enum PushTypeFlag {
+ kImmediatePush = 0x1,
+ kScalarPush = 0x2,
+ kFloat32Push = 0x4,
+ kFloat64Push = 0x8,
+ kFloatPush = kFloat32Push | kFloat64Push
+ };
+
+ typedef base::Flags<PushTypeFlag> PushTypeFlags;
+
+ static bool IsValidPush(InstructionOperand source, PushTypeFlags push_type);
+
+ // Generate a list moves from an instruction that are candidates to be turned
+ // into push instructions on platforms that support them. In general, the list
+ // of push candidates are moves to a set of contiguous destination
+ // InstructionOperand locations on the stack that don't clobber values that
+ // are needed for resolve the gap or use values generated by the gap,
+ // i.e. moves that can be hoisted together before the actual gap and assembled
+ // together.
+ static void GetPushCompatibleMoves(Instruction* instr,
+ PushTypeFlags push_type,
+ ZoneVector<MoveOperands*>* pushes);
+
+ // Called before a tail call |instr|'s gap moves are assembled and allows
+ // gap-specific pre-processing, e.g. adjustment of the sp for tail calls that
+ // need it before gap moves or conversion of certain gap moves into pushes.
+ void AssembleTailCallBeforeGap(Instruction* instr,
+ int first_unused_stack_slot);
+ // Called after a tail call |instr|'s gap moves are assembled and allows
+ // gap-specific post-processing, e.g. adjustment of the sp for tail calls that
+ // need it after gap moves.
+ void AssembleTailCallAfterGap(Instruction* instr,
+ int first_unused_stack_slot);
+
// ===========================================================================
// ============== Architecture-specific gap resolver methods. ================
// ===========================================================================
@@ -157,8 +203,9 @@ class CodeGenerator final : public GapResolver::Assembler {
void RecordCallPosition(Instruction* instr);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
- FrameStateDescriptor* GetFrameStateDescriptor(Instruction* instr,
- size_t frame_state_offset);
+ DeoptimizationEntry const& GetDeoptimizationEntry(Instruction* instr,
+ size_t frame_state_offset);
+ DeoptimizeReason GetDeoptimizationReason(int deoptimization_id) const;
int BuildTranslation(Instruction* instr, int pc_offset,
size_t frame_state_offset,
OutputFrameStateCombine state_combine);
@@ -174,40 +221,36 @@ class CodeGenerator final : public GapResolver::Assembler {
Translation* translation);
void AddTranslationForOperand(Translation* translation, Instruction* instr,
InstructionOperand* op, MachineType type);
- void AddNopForSmiCodeInlining();
void EnsureSpaceForLazyDeopt();
void MarkLazyDeoptSite();
DeoptimizationExit* AddDeoptimizationExit(Instruction* instr,
size_t frame_state_offset);
- // Converts the delta in the number of stack parameter passed from a tail
- // caller to the callee into the distance (in pointers) the SP must be
- // adjusted, taking frame elision and other relevant factors into
- // consideration.
- int TailCallFrameStackSlotDelta(int stack_param_delta);
-
// ===========================================================================
- struct DeoptimizationState : ZoneObject {
+ class DeoptimizationState final : public ZoneObject {
public:
+ DeoptimizationState(BailoutId bailout_id, int translation_id, int pc_offset,
+ DeoptimizeReason reason)
+ : bailout_id_(bailout_id),
+ translation_id_(translation_id),
+ pc_offset_(pc_offset),
+ reason_(reason) {}
+
BailoutId bailout_id() const { return bailout_id_; }
int translation_id() const { return translation_id_; }
int pc_offset() const { return pc_offset_; }
-
- DeoptimizationState(BailoutId bailout_id, int translation_id, int pc_offset)
- : bailout_id_(bailout_id),
- translation_id_(translation_id),
- pc_offset_(pc_offset) {}
+ DeoptimizeReason reason() const { return reason_; }
private:
BailoutId bailout_id_;
int translation_id_;
int pc_offset_;
+ DeoptimizeReason reason_;
};
struct HandlerInfo {
- bool caught_locally;
Label* handler;
int pc_offset;
};
@@ -217,6 +260,7 @@ class CodeGenerator final : public GapResolver::Assembler {
FrameAccessState* frame_access_state_;
Linkage* const linkage_;
InstructionSequence* const code_;
+ UnwindingInfoWriter unwinding_info_writer_;
CompilationInfo* const info_;
Label* const labels_;
Label return_label_;
@@ -235,6 +279,7 @@ class CodeGenerator final : public GapResolver::Assembler {
JumpTable* jump_tables_;
OutOfLineCode* ools_;
int osr_pc_offset_;
+ SourcePositionTableBuilder source_position_table_builder_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/code-stub-assembler.cc b/deps/v8/src/compiler/code-stub-assembler.cc
deleted file mode 100644
index bbb4d6353b..0000000000
--- a/deps/v8/src/compiler/code-stub-assembler.cc
+++ /dev/null
@@ -1,1353 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/code-stub-assembler.h"
-
-#include <ostream>
-
-#include "src/code-factory.h"
-#include "src/compiler/graph.h"
-#include "src/compiler/instruction-selector.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/pipeline.h"
-#include "src/compiler/raw-machine-assembler.h"
-#include "src/compiler/schedule.h"
-#include "src/frames.h"
-#include "src/interface-descriptors.h"
-#include "src/interpreter/bytecodes.h"
-#include "src/machine-type.h"
-#include "src/macro-assembler.h"
-#include "src/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
- const CallInterfaceDescriptor& descriptor,
- Code::Flags flags, const char* name,
- size_t result_size)
- : CodeStubAssembler(
- isolate, zone,
- Linkage::GetStubCallDescriptor(
- isolate, zone, descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size),
- flags, name) {}
-
-CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
- int parameter_count, Code::Flags flags,
- const char* name)
- : CodeStubAssembler(isolate, zone, Linkage::GetJSCallDescriptor(
- zone, false, parameter_count,
- CallDescriptor::kNoFlags),
- flags, name) {}
-
-CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
- CallDescriptor* call_descriptor,
- Code::Flags flags, const char* name)
- : raw_assembler_(new RawMachineAssembler(
- isolate, new (zone) Graph(zone), call_descriptor,
- MachineType::PointerRepresentation(),
- InstructionSelector::SupportedMachineOperatorFlags())),
- flags_(flags),
- name_(name),
- code_generated_(false),
- variables_(zone) {}
-
-CodeStubAssembler::~CodeStubAssembler() {}
-
-void CodeStubAssembler::CallPrologue() {}
-
-void CodeStubAssembler::CallEpilogue() {}
-
-Handle<Code> CodeStubAssembler::GenerateCode() {
- DCHECK(!code_generated_);
-
- Schedule* schedule = raw_assembler_->Export();
- Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
- isolate(), raw_assembler_->call_descriptor(), graph(), schedule, flags_,
- name_);
-
- code_generated_ = true;
- return code;
-}
-
-
-Node* CodeStubAssembler::Int32Constant(int value) {
- return raw_assembler_->Int32Constant(value);
-}
-
-
-Node* CodeStubAssembler::IntPtrConstant(intptr_t value) {
- return raw_assembler_->IntPtrConstant(value);
-}
-
-
-Node* CodeStubAssembler::NumberConstant(double value) {
- return raw_assembler_->NumberConstant(value);
-}
-
-Node* CodeStubAssembler::SmiConstant(Smi* value) {
- return IntPtrConstant(bit_cast<intptr_t>(value));
-}
-
-Node* CodeStubAssembler::HeapConstant(Handle<HeapObject> object) {
- return raw_assembler_->HeapConstant(object);
-}
-
-
-Node* CodeStubAssembler::BooleanConstant(bool value) {
- return raw_assembler_->BooleanConstant(value);
-}
-
-Node* CodeStubAssembler::ExternalConstant(ExternalReference address) {
- return raw_assembler_->ExternalConstant(address);
-}
-
-Node* CodeStubAssembler::Float64Constant(double value) {
- return raw_assembler_->Float64Constant(value);
-}
-
-Node* CodeStubAssembler::BooleanMapConstant() {
- return HeapConstant(isolate()->factory()->boolean_map());
-}
-
-Node* CodeStubAssembler::HeapNumberMapConstant() {
- return HeapConstant(isolate()->factory()->heap_number_map());
-}
-
-Node* CodeStubAssembler::NullConstant() {
- return LoadRoot(Heap::kNullValueRootIndex);
-}
-
-Node* CodeStubAssembler::UndefinedConstant() {
- return LoadRoot(Heap::kUndefinedValueRootIndex);
-}
-
-Node* CodeStubAssembler::Parameter(int value) {
- return raw_assembler_->Parameter(value);
-}
-
-void CodeStubAssembler::Return(Node* value) {
- return raw_assembler_->Return(value);
-}
-
-void CodeStubAssembler::Bind(CodeStubAssembler::Label* label) {
- return label->Bind();
-}
-
-Node* CodeStubAssembler::LoadFramePointer() {
- return raw_assembler_->LoadFramePointer();
-}
-
-Node* CodeStubAssembler::LoadParentFramePointer() {
- return raw_assembler_->LoadParentFramePointer();
-}
-
-Node* CodeStubAssembler::LoadStackPointer() {
- return raw_assembler_->LoadStackPointer();
-}
-
-Node* CodeStubAssembler::SmiShiftBitsConstant() {
- return IntPtrConstant(kSmiShiftSize + kSmiTagSize);
-}
-
-Node* CodeStubAssembler::Float64Round(Node* x) {
- Node* one = Float64Constant(1.0);
- Node* one_half = Float64Constant(0.5);
-
- Variable var_x(this, MachineRepresentation::kFloat64);
- Label return_x(this);
-
- // Round up {x} towards Infinity.
- var_x.Bind(Float64Ceil(x));
-
- GotoIf(Float64LessThanOrEqual(Float64Sub(var_x.value(), one_half), x),
- &return_x);
- var_x.Bind(Float64Sub(var_x.value(), one));
- Goto(&return_x);
-
- Bind(&return_x);
- return var_x.value();
-}
-
-Node* CodeStubAssembler::Float64Ceil(Node* x) {
- if (raw_assembler_->machine()->Float64RoundUp().IsSupported()) {
- return raw_assembler_->Float64RoundUp(x);
- }
-
- Node* one = Float64Constant(1.0);
- Node* zero = Float64Constant(0.0);
- Node* two_52 = Float64Constant(4503599627370496.0E0);
- Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
-
- Variable var_x(this, MachineRepresentation::kFloat64);
- Label return_x(this), return_minus_x(this);
- var_x.Bind(x);
-
- // Check if {x} is greater than zero.
- Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
- Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
- &if_xnotgreaterthanzero);
-
- Bind(&if_xgreaterthanzero);
- {
- // Just return {x} unless it's in the range ]0,2^52[.
- GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
-
- // Round positive {x} towards Infinity.
- var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
- GotoUnless(Float64LessThan(var_x.value(), x), &return_x);
- var_x.Bind(Float64Add(var_x.value(), one));
- Goto(&return_x);
- }
-
- Bind(&if_xnotgreaterthanzero);
- {
- // Just return {x} unless it's in the range ]-2^52,0[
- GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
- GotoUnless(Float64LessThan(x, zero), &return_x);
-
- // Round negated {x} towards Infinity and return the result negated.
- Node* minus_x = Float64Neg(x);
- var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
- GotoUnless(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
- var_x.Bind(Float64Sub(var_x.value(), one));
- Goto(&return_minus_x);
- }
-
- Bind(&return_minus_x);
- var_x.Bind(Float64Neg(var_x.value()));
- Goto(&return_x);
-
- Bind(&return_x);
- return var_x.value();
-}
-
-Node* CodeStubAssembler::Float64Floor(Node* x) {
- if (raw_assembler_->machine()->Float64RoundDown().IsSupported()) {
- return raw_assembler_->Float64RoundDown(x);
- }
-
- Node* one = Float64Constant(1.0);
- Node* zero = Float64Constant(0.0);
- Node* two_52 = Float64Constant(4503599627370496.0E0);
- Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
-
- Variable var_x(this, MachineRepresentation::kFloat64);
- Label return_x(this), return_minus_x(this);
- var_x.Bind(x);
-
- // Check if {x} is greater than zero.
- Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
- Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
- &if_xnotgreaterthanzero);
-
- Bind(&if_xgreaterthanzero);
- {
- // Just return {x} unless it's in the range ]0,2^52[.
- GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
-
- // Round positive {x} towards -Infinity.
- var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
- GotoUnless(Float64GreaterThan(var_x.value(), x), &return_x);
- var_x.Bind(Float64Sub(var_x.value(), one));
- Goto(&return_x);
- }
-
- Bind(&if_xnotgreaterthanzero);
- {
- // Just return {x} unless it's in the range ]-2^52,0[
- GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
- GotoUnless(Float64LessThan(x, zero), &return_x);
-
- // Round negated {x} towards -Infinity and return the result negated.
- Node* minus_x = Float64Neg(x);
- var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
- GotoUnless(Float64LessThan(var_x.value(), minus_x), &return_minus_x);
- var_x.Bind(Float64Add(var_x.value(), one));
- Goto(&return_minus_x);
- }
-
- Bind(&return_minus_x);
- var_x.Bind(Float64Neg(var_x.value()));
- Goto(&return_x);
-
- Bind(&return_x);
- return var_x.value();
-}
-
-Node* CodeStubAssembler::Float64Trunc(Node* x) {
- if (raw_assembler_->machine()->Float64RoundTruncate().IsSupported()) {
- return raw_assembler_->Float64RoundTruncate(x);
- }
-
- Node* one = Float64Constant(1.0);
- Node* zero = Float64Constant(0.0);
- Node* two_52 = Float64Constant(4503599627370496.0E0);
- Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
-
- Variable var_x(this, MachineRepresentation::kFloat64);
- Label return_x(this), return_minus_x(this);
- var_x.Bind(x);
-
- // Check if {x} is greater than 0.
- Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
- Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
- &if_xnotgreaterthanzero);
-
- Bind(&if_xgreaterthanzero);
- {
- if (raw_assembler_->machine()->Float64RoundDown().IsSupported()) {
- var_x.Bind(raw_assembler_->Float64RoundDown(x));
- } else {
- // Just return {x} unless it's in the range ]0,2^52[.
- GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
-
- // Round positive {x} towards -Infinity.
- var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
- GotoUnless(Float64GreaterThan(var_x.value(), x), &return_x);
- var_x.Bind(Float64Sub(var_x.value(), one));
- }
- Goto(&return_x);
- }
-
- Bind(&if_xnotgreaterthanzero);
- {
- if (raw_assembler_->machine()->Float64RoundUp().IsSupported()) {
- var_x.Bind(raw_assembler_->Float64RoundUp(x));
- Goto(&return_x);
- } else {
- // Just return {x} unless its in the range ]-2^52,0[.
- GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
- GotoUnless(Float64LessThan(x, zero), &return_x);
-
- // Round negated {x} towards -Infinity and return result negated.
- Node* minus_x = Float64Neg(x);
- var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
- GotoUnless(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
- var_x.Bind(Float64Sub(var_x.value(), one));
- Goto(&return_minus_x);
- }
- }
-
- Bind(&return_minus_x);
- var_x.Bind(Float64Neg(var_x.value()));
- Goto(&return_x);
-
- Bind(&return_x);
- return var_x.value();
-}
-
-Node* CodeStubAssembler::SmiTag(Node* value) {
- return raw_assembler_->WordShl(value, SmiShiftBitsConstant());
-}
-
-Node* CodeStubAssembler::SmiUntag(Node* value) {
- return raw_assembler_->WordSar(value, SmiShiftBitsConstant());
-}
-
-Node* CodeStubAssembler::SmiToWord32(Node* value) {
- Node* result = raw_assembler_->WordSar(value, SmiShiftBitsConstant());
- if (raw_assembler_->machine()->Is64()) {
- result = raw_assembler_->TruncateInt64ToInt32(result);
- }
- return result;
-}
-
-Node* CodeStubAssembler::SmiToFloat64(Node* value) {
- return ChangeInt32ToFloat64(SmiUntag(value));
-}
-
-Node* CodeStubAssembler::SmiAdd(Node* a, Node* b) { return IntPtrAdd(a, b); }
-
-Node* CodeStubAssembler::SmiAddWithOverflow(Node* a, Node* b) {
- return IntPtrAddWithOverflow(a, b);
-}
-
-Node* CodeStubAssembler::SmiSub(Node* a, Node* b) { return IntPtrSub(a, b); }
-
-Node* CodeStubAssembler::SmiSubWithOverflow(Node* a, Node* b) {
- return IntPtrSubWithOverflow(a, b);
-}
-
-Node* CodeStubAssembler::SmiEqual(Node* a, Node* b) { return WordEqual(a, b); }
-
-Node* CodeStubAssembler::SmiLessThan(Node* a, Node* b) {
- return IntPtrLessThan(a, b);
-}
-
-Node* CodeStubAssembler::SmiLessThanOrEqual(Node* a, Node* b) {
- return IntPtrLessThanOrEqual(a, b);
-}
-
-Node* CodeStubAssembler::SmiMin(Node* a, Node* b) {
- // TODO(bmeurer): Consider using Select once available.
- Variable min(this, MachineRepresentation::kTagged);
- Label if_a(this), if_b(this), join(this);
- BranchIfSmiLessThan(a, b, &if_a, &if_b);
- Bind(&if_a);
- min.Bind(a);
- Goto(&join);
- Bind(&if_b);
- min.Bind(b);
- Goto(&join);
- Bind(&join);
- return min.value();
-}
-
-#define DEFINE_CODE_STUB_ASSEMBER_BINARY_OP(name) \
- Node* CodeStubAssembler::name(Node* a, Node* b) { \
- return raw_assembler_->name(a, b); \
- }
-CODE_STUB_ASSEMBLER_BINARY_OP_LIST(DEFINE_CODE_STUB_ASSEMBER_BINARY_OP)
-#undef DEFINE_CODE_STUB_ASSEMBER_BINARY_OP
-
-Node* CodeStubAssembler::WordShl(Node* value, int shift) {
- return raw_assembler_->WordShl(value, IntPtrConstant(shift));
-}
-
-#define DEFINE_CODE_STUB_ASSEMBER_UNARY_OP(name) \
- Node* CodeStubAssembler::name(Node* a) { return raw_assembler_->name(a); }
-CODE_STUB_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_STUB_ASSEMBER_UNARY_OP)
-#undef DEFINE_CODE_STUB_ASSEMBER_UNARY_OP
-
-Node* CodeStubAssembler::WordIsSmi(Node* a) {
- return WordEqual(raw_assembler_->WordAnd(a, IntPtrConstant(kSmiTagMask)),
- IntPtrConstant(0));
-}
-
-Node* CodeStubAssembler::WordIsPositiveSmi(Node* a) {
- return WordEqual(
- raw_assembler_->WordAnd(a, IntPtrConstant(kSmiTagMask | kSmiSignMask)),
- IntPtrConstant(0));
-}
-
-Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset,
- MachineType rep) {
- return raw_assembler_->Load(rep, buffer, IntPtrConstant(offset));
-}
-
-Node* CodeStubAssembler::LoadObjectField(Node* object, int offset,
- MachineType rep) {
- return raw_assembler_->Load(rep, object,
- IntPtrConstant(offset - kHeapObjectTag));
-}
-
-Node* CodeStubAssembler::LoadHeapNumberValue(Node* object) {
- return Load(MachineType::Float64(), object,
- IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag));
-}
-
-Node* CodeStubAssembler::StoreHeapNumberValue(Node* object, Node* value) {
- return StoreNoWriteBarrier(
- MachineRepresentation::kFloat64, object,
- IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag), value);
-}
-
-Node* CodeStubAssembler::TruncateHeapNumberValueToWord32(Node* object) {
- Node* value = LoadHeapNumberValue(object);
- return raw_assembler_->TruncateFloat64ToInt32(TruncationMode::kJavaScript,
- value);
-}
-
-Node* CodeStubAssembler::LoadMapBitField(Node* map) {
- return Load(MachineType::Uint8(), map,
- IntPtrConstant(Map::kBitFieldOffset - kHeapObjectTag));
-}
-
-Node* CodeStubAssembler::LoadMapBitField2(Node* map) {
- return Load(MachineType::Uint8(), map,
- IntPtrConstant(Map::kBitField2Offset - kHeapObjectTag));
-}
-
-Node* CodeStubAssembler::LoadMapBitField3(Node* map) {
- return Load(MachineType::Uint32(), map,
- IntPtrConstant(Map::kBitField3Offset - kHeapObjectTag));
-}
-
-Node* CodeStubAssembler::LoadMapInstanceType(Node* map) {
- return Load(MachineType::Uint8(), map,
- IntPtrConstant(Map::kInstanceTypeOffset - kHeapObjectTag));
-}
-
-Node* CodeStubAssembler::LoadMapDescriptors(Node* map) {
- return LoadObjectField(map, Map::kDescriptorsOffset);
-}
-
-Node* CodeStubAssembler::LoadNameHash(Node* name) {
- return Load(MachineType::Uint32(), name,
- IntPtrConstant(Name::kHashFieldOffset - kHeapObjectTag));
-}
-
-Node* CodeStubAssembler::LoadFixedArrayElementInt32Index(
- Node* object, Node* int32_index, int additional_offset) {
- Node* header_size = IntPtrConstant(additional_offset +
- FixedArray::kHeaderSize - kHeapObjectTag);
- Node* scaled_index = WordShl(int32_index, IntPtrConstant(kPointerSizeLog2));
- Node* offset = IntPtrAdd(scaled_index, header_size);
- return Load(MachineType::AnyTagged(), object, offset);
-}
-
-Node* CodeStubAssembler::LoadFixedArrayElementSmiIndex(Node* object,
- Node* smi_index,
- int additional_offset) {
- int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
- Node* header_size = IntPtrConstant(additional_offset +
- FixedArray::kHeaderSize - kHeapObjectTag);
- Node* scaled_index =
- (kSmiShiftBits > kPointerSizeLog2)
- ? WordSar(smi_index, IntPtrConstant(kSmiShiftBits - kPointerSizeLog2))
- : WordShl(smi_index,
- IntPtrConstant(kPointerSizeLog2 - kSmiShiftBits));
- Node* offset = IntPtrAdd(scaled_index, header_size);
- return Load(MachineType::AnyTagged(), object, offset);
-}
-
-Node* CodeStubAssembler::LoadFixedArrayElementConstantIndex(Node* object,
- int index) {
- Node* offset = IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag +
- index * kPointerSize);
- return raw_assembler_->Load(MachineType::AnyTagged(), object, offset);
-}
-
-Node* CodeStubAssembler::StoreFixedArrayElementNoWriteBarrier(Node* object,
- Node* index,
- Node* value) {
- Node* offset =
- IntPtrAdd(WordShl(index, IntPtrConstant(kPointerSizeLog2)),
- IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag));
- return StoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset,
- value);
-}
-
-Node* CodeStubAssembler::LoadRoot(Heap::RootListIndex root_index) {
- if (isolate()->heap()->RootCanBeTreatedAsConstant(root_index)) {
- Handle<Object> root = isolate()->heap()->root_handle(root_index);
- if (root->IsSmi()) {
- return SmiConstant(Smi::cast(*root));
- } else {
- return HeapConstant(Handle<HeapObject>::cast(root));
- }
- }
-
- compiler::Node* roots_array_start =
- ExternalConstant(ExternalReference::roots_array_start(isolate()));
- USE(roots_array_start);
-
- // TODO(danno): Implement thee root-access case where the root is not constant
- // and must be loaded from the root array.
- UNIMPLEMENTED();
- return nullptr;
-}
-
-Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
- AllocationFlags flags,
- Node* top_address,
- Node* limit_address) {
- Node* top = Load(MachineType::Pointer(), top_address);
- Node* limit = Load(MachineType::Pointer(), limit_address);
-
- // If there's not enough space, call the runtime.
- RawMachineLabel runtime_call(RawMachineLabel::kDeferred), no_runtime_call,
- merge_runtime;
- raw_assembler_->Branch(
- raw_assembler_->IntPtrLessThan(IntPtrSub(limit, top), size_in_bytes),
- &runtime_call, &no_runtime_call);
-
- raw_assembler_->Bind(&runtime_call);
- // AllocateInTargetSpace does not use the context.
- Node* context = IntPtrConstant(0);
- Node* runtime_flags = SmiTag(Int32Constant(
- AllocateDoubleAlignFlag::encode(false) |
- AllocateTargetSpace::encode(flags & kPretenured
- ? AllocationSpace::OLD_SPACE
- : AllocationSpace::NEW_SPACE)));
- Node* runtime_result = CallRuntime(Runtime::kAllocateInTargetSpace, context,
- SmiTag(size_in_bytes), runtime_flags);
- raw_assembler_->Goto(&merge_runtime);
-
- // When there is enough space, return `top' and bump it up.
- raw_assembler_->Bind(&no_runtime_call);
- Node* no_runtime_result = top;
- StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
- IntPtrAdd(top, size_in_bytes));
- no_runtime_result =
- IntPtrAdd(no_runtime_result, IntPtrConstant(kHeapObjectTag));
- raw_assembler_->Goto(&merge_runtime);
-
- raw_assembler_->Bind(&merge_runtime);
- return raw_assembler_->Phi(MachineType::PointerRepresentation(),
- runtime_result, no_runtime_result);
-}
-
-Node* CodeStubAssembler::AllocateRawAligned(Node* size_in_bytes,
- AllocationFlags flags,
- Node* top_address,
- Node* limit_address) {
- Node* top = Load(MachineType::Pointer(), top_address);
- Node* limit = Load(MachineType::Pointer(), limit_address);
- Node* adjusted_size = size_in_bytes;
- if (flags & kDoubleAlignment) {
- // TODO(epertoso): Simd128 alignment.
- RawMachineLabel aligned, not_aligned, merge;
- raw_assembler_->Branch(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)),
- &not_aligned, &aligned);
-
- raw_assembler_->Bind(&not_aligned);
- Node* not_aligned_size =
- IntPtrAdd(size_in_bytes, IntPtrConstant(kPointerSize));
- raw_assembler_->Goto(&merge);
-
- raw_assembler_->Bind(&aligned);
- raw_assembler_->Goto(&merge);
-
- raw_assembler_->Bind(&merge);
- adjusted_size = raw_assembler_->Phi(MachineType::PointerRepresentation(),
- not_aligned_size, adjusted_size);
- }
-
- Node* address = AllocateRawUnaligned(adjusted_size, kNone, top, limit);
-
- RawMachineLabel needs_filler, doesnt_need_filler, merge_address;
- raw_assembler_->Branch(
- raw_assembler_->IntPtrEqual(adjusted_size, size_in_bytes),
- &doesnt_need_filler, &needs_filler);
-
- raw_assembler_->Bind(&needs_filler);
- // Store a filler and increase the address by kPointerSize.
- // TODO(epertoso): this code assumes that we only align to kDoubleSize. Change
- // it when Simd128 alignment is supported.
- StoreNoWriteBarrier(MachineType::PointerRepresentation(), top,
- LoadRoot(Heap::kOnePointerFillerMapRootIndex));
- Node* address_with_filler = IntPtrAdd(address, IntPtrConstant(kPointerSize));
- raw_assembler_->Goto(&merge_address);
-
- raw_assembler_->Bind(&doesnt_need_filler);
- Node* address_without_filler = address;
- raw_assembler_->Goto(&merge_address);
-
- raw_assembler_->Bind(&merge_address);
- address = raw_assembler_->Phi(MachineType::PointerRepresentation(),
- address_with_filler, address_without_filler);
- // Update the top.
- StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
- IntPtrAdd(top, adjusted_size));
- return address;
-}
-
-Node* CodeStubAssembler::Allocate(int size_in_bytes, AllocationFlags flags) {
- bool const new_space = !(flags & kPretenured);
- Node* top_address = ExternalConstant(
- new_space
- ? ExternalReference::new_space_allocation_top_address(isolate())
- : ExternalReference::old_space_allocation_top_address(isolate()));
- Node* limit_address = ExternalConstant(
- new_space
- ? ExternalReference::new_space_allocation_limit_address(isolate())
- : ExternalReference::old_space_allocation_limit_address(isolate()));
-
-#ifdef V8_HOST_ARCH_32_BIT
- if (flags & kDoubleAlignment) {
- return AllocateRawAligned(IntPtrConstant(size_in_bytes), flags, top_address,
- limit_address);
- }
-#endif
-
- return AllocateRawUnaligned(IntPtrConstant(size_in_bytes), flags, top_address,
- limit_address);
-}
-
-Node* CodeStubAssembler::AllocateHeapNumber() {
- Node* result = Allocate(HeapNumber::kSize, kNone);
- StoreMapNoWriteBarrier(result, HeapNumberMapConstant());
- return result;
-}
-
-Node* CodeStubAssembler::AllocateHeapNumberWithValue(Node* value) {
- Node* result = AllocateHeapNumber();
- StoreHeapNumberValue(result, value);
- return result;
-}
-
-Node* CodeStubAssembler::Load(MachineType rep, Node* base) {
- return raw_assembler_->Load(rep, base);
-}
-
-Node* CodeStubAssembler::Load(MachineType rep, Node* base, Node* index) {
- return raw_assembler_->Load(rep, base, index);
-}
-
-Node* CodeStubAssembler::Store(MachineRepresentation rep, Node* base,
- Node* value) {
- return raw_assembler_->Store(rep, base, value, kFullWriteBarrier);
-}
-
-Node* CodeStubAssembler::Store(MachineRepresentation rep, Node* base,
- Node* index, Node* value) {
- return raw_assembler_->Store(rep, base, index, value, kFullWriteBarrier);
-}
-
-Node* CodeStubAssembler::StoreNoWriteBarrier(MachineRepresentation rep,
- Node* base, Node* value) {
- return raw_assembler_->Store(rep, base, value, kNoWriteBarrier);
-}
-
-Node* CodeStubAssembler::StoreNoWriteBarrier(MachineRepresentation rep,
- Node* base, Node* index,
- Node* value) {
- return raw_assembler_->Store(rep, base, index, value, kNoWriteBarrier);
-}
-
-Node* CodeStubAssembler::Projection(int index, Node* value) {
- return raw_assembler_->Projection(index, value);
-}
-
-Node* CodeStubAssembler::LoadMap(Node* object) {
- return LoadObjectField(object, HeapObject::kMapOffset);
-}
-
-Node* CodeStubAssembler::StoreMapNoWriteBarrier(Node* object, Node* map) {
- return StoreNoWriteBarrier(
- MachineRepresentation::kTagged, object,
- IntPtrConstant(HeapNumber::kMapOffset - kHeapObjectTag), map);
-}
-
-Node* CodeStubAssembler::LoadInstanceType(Node* object) {
- return LoadMapInstanceType(LoadMap(object));
-}
-
-Node* CodeStubAssembler::LoadElements(Node* object) {
- return LoadObjectField(object, JSObject::kElementsOffset);
-}
-
-Node* CodeStubAssembler::LoadFixedArrayBaseLength(Node* array) {
- return LoadObjectField(array, FixedArrayBase::kLengthOffset);
-}
-
-Node* CodeStubAssembler::BitFieldDecode(Node* word32, uint32_t shift,
- uint32_t mask) {
- return raw_assembler_->Word32Shr(
- raw_assembler_->Word32And(word32, raw_assembler_->Int32Constant(mask)),
- raw_assembler_->Int32Constant(shift));
-}
-
-Node* CodeStubAssembler::ChangeFloat64ToTagged(Node* value) {
- Node* value32 = raw_assembler_->TruncateFloat64ToInt32(
- TruncationMode::kRoundToZero, value);
- Node* value64 = ChangeInt32ToFloat64(value32);
-
- Label if_valueisint32(this), if_valueisheapnumber(this), if_join(this);
-
- Label if_valueisequal(this), if_valueisnotequal(this);
- Branch(Float64Equal(value, value64), &if_valueisequal, &if_valueisnotequal);
- Bind(&if_valueisequal);
- {
- Label if_valueiszero(this), if_valueisnotzero(this);
- Branch(Float64Equal(value, Float64Constant(0.0)), &if_valueiszero,
- &if_valueisnotzero);
-
- Bind(&if_valueiszero);
- BranchIfInt32LessThan(raw_assembler_->Float64ExtractHighWord32(value),
- Int32Constant(0), &if_valueisheapnumber,
- &if_valueisint32);
-
- Bind(&if_valueisnotzero);
- Goto(&if_valueisint32);
- }
- Bind(&if_valueisnotequal);
- Goto(&if_valueisheapnumber);
-
- Variable var_result(this, MachineRepresentation::kTagged);
- Bind(&if_valueisint32);
- {
- if (raw_assembler_->machine()->Is64()) {
- Node* result = SmiTag(ChangeInt32ToInt64(value32));
- var_result.Bind(result);
- Goto(&if_join);
- } else {
- Node* pair = Int32AddWithOverflow(value32, value32);
- Node* overflow = Projection(1, pair);
- Label if_overflow(this, Label::kDeferred), if_notoverflow(this);
- Branch(overflow, &if_overflow, &if_notoverflow);
- Bind(&if_overflow);
- Goto(&if_valueisheapnumber);
- Bind(&if_notoverflow);
- {
- Node* result = Projection(0, pair);
- var_result.Bind(result);
- Goto(&if_join);
- }
- }
- }
- Bind(&if_valueisheapnumber);
- {
- Node* result = AllocateHeapNumberWithValue(value);
- var_result.Bind(result);
- Goto(&if_join);
- }
- Bind(&if_join);
- return var_result.value();
-}
-
-Node* CodeStubAssembler::ChangeInt32ToTagged(Node* value) {
- if (raw_assembler_->machine()->Is64()) {
- return SmiTag(ChangeInt32ToInt64(value));
- }
- Variable var_result(this, MachineRepresentation::kTagged);
- Node* pair = Int32AddWithOverflow(value, value);
- Node* overflow = Projection(1, pair);
- Label if_overflow(this, Label::kDeferred), if_notoverflow(this),
- if_join(this);
- Branch(overflow, &if_overflow, &if_notoverflow);
- Bind(&if_overflow);
- {
- Node* value64 = ChangeInt32ToFloat64(value);
- Node* result = AllocateHeapNumberWithValue(value64);
- var_result.Bind(result);
- }
- Goto(&if_join);
- Bind(&if_notoverflow);
- {
- Node* result = Projection(0, pair);
- var_result.Bind(result);
- }
- Goto(&if_join);
- Bind(&if_join);
- return var_result.value();
-}
-
-Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) {
- // We might need to loop once due to ToNumber conversion.
- Variable var_value(this, MachineRepresentation::kTagged),
- var_result(this, MachineRepresentation::kFloat64);
- Label loop(this, &var_value), done_loop(this, &var_result);
- var_value.Bind(value);
- Goto(&loop);
- Bind(&loop);
- {
- // Load the current {value}.
- value = var_value.value();
-
- // Check if the {value} is a Smi or a HeapObject.
- Label if_valueissmi(this), if_valueisnotsmi(this);
- Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
-
- Bind(&if_valueissmi);
- {
- // Convert the Smi {value}.
- var_result.Bind(SmiToFloat64(value));
- Goto(&done_loop);
- }
-
- Bind(&if_valueisnotsmi);
- {
- // Check if {value} is a HeapNumber.
- Label if_valueisheapnumber(this),
- if_valueisnotheapnumber(this, Label::kDeferred);
- Branch(WordEqual(LoadMap(value), HeapNumberMapConstant()),
- &if_valueisheapnumber, &if_valueisnotheapnumber);
-
- Bind(&if_valueisheapnumber);
- {
- // Load the floating point value.
- var_result.Bind(LoadHeapNumberValue(value));
- Goto(&done_loop);
- }
-
- Bind(&if_valueisnotheapnumber);
- {
- // Convert the {value} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_value.Bind(CallStub(callable, context, value));
- Goto(&loop);
- }
- }
- }
- Bind(&done_loop);
- return var_result.value();
-}
-
-Node* CodeStubAssembler::TruncateTaggedToWord32(Node* context, Node* value) {
- // We might need to loop once due to ToNumber conversion.
- Variable var_value(this, MachineRepresentation::kTagged),
- var_result(this, MachineRepresentation::kWord32);
- Label loop(this, &var_value), done_loop(this, &var_result);
- var_value.Bind(value);
- Goto(&loop);
- Bind(&loop);
- {
- // Load the current {value}.
- value = var_value.value();
-
- // Check if the {value} is a Smi or a HeapObject.
- Label if_valueissmi(this), if_valueisnotsmi(this);
- Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
-
- Bind(&if_valueissmi);
- {
- // Convert the Smi {value}.
- var_result.Bind(SmiToWord32(value));
- Goto(&done_loop);
- }
-
- Bind(&if_valueisnotsmi);
- {
- // Check if {value} is a HeapNumber.
- Label if_valueisheapnumber(this),
- if_valueisnotheapnumber(this, Label::kDeferred);
- Branch(WordEqual(LoadMap(value), HeapNumberMapConstant()),
- &if_valueisheapnumber, &if_valueisnotheapnumber);
-
- Bind(&if_valueisheapnumber);
- {
- // Truncate the floating point value.
- var_result.Bind(TruncateHeapNumberValueToWord32(value));
- Goto(&done_loop);
- }
-
- Bind(&if_valueisnotheapnumber);
- {
- // Convert the {value} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_value.Bind(CallStub(callable, context, value));
- Goto(&loop);
- }
- }
- }
- Bind(&done_loop);
- return var_result.value();
-}
-
-void CodeStubAssembler::BranchIf(Node* condition, Label* if_true,
- Label* if_false) {
- Label if_condition_is_true(this), if_condition_is_false(this);
- Branch(condition, &if_condition_is_true, &if_condition_is_false);
- Bind(&if_condition_is_true);
- Goto(if_true);
- Bind(&if_condition_is_false);
- Goto(if_false);
-}
-
-Node* CodeStubAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
- Node** args) {
- CallPrologue();
- Node* return_value = raw_assembler_->CallN(descriptor, code_target, args);
- CallEpilogue();
- return return_value;
-}
-
-
-Node* CodeStubAssembler::TailCallN(CallDescriptor* descriptor,
- Node* code_target, Node** args) {
- return raw_assembler_->TailCallN(descriptor, code_target, args);
-}
-
-Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
- Node* context) {
- CallPrologue();
- Node* return_value = raw_assembler_->CallRuntime0(function_id, context);
- CallEpilogue();
- return return_value;
-}
-
-Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
- Node* context, Node* arg1) {
- CallPrologue();
- Node* return_value = raw_assembler_->CallRuntime1(function_id, arg1, context);
- CallEpilogue();
- return return_value;
-}
-
-Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
- Node* context, Node* arg1, Node* arg2) {
- CallPrologue();
- Node* return_value =
- raw_assembler_->CallRuntime2(function_id, arg1, arg2, context);
- CallEpilogue();
- return return_value;
-}
-
-Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
- Node* context, Node* arg1, Node* arg2,
- Node* arg3) {
- CallPrologue();
- Node* return_value =
- raw_assembler_->CallRuntime3(function_id, arg1, arg2, arg3, context);
- CallEpilogue();
- return return_value;
-}
-
-Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
- Node* context, Node* arg1, Node* arg2,
- Node* arg3, Node* arg4) {
- CallPrologue();
- Node* return_value = raw_assembler_->CallRuntime4(function_id, arg1, arg2,
- arg3, arg4, context);
- CallEpilogue();
- return return_value;
-}
-
-Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
- Node* context) {
- return raw_assembler_->TailCallRuntime0(function_id, context);
-}
-
-Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
- Node* context, Node* arg1) {
- return raw_assembler_->TailCallRuntime1(function_id, arg1, context);
-}
-
-Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
- Node* context, Node* arg1,
- Node* arg2) {
- return raw_assembler_->TailCallRuntime2(function_id, arg1, arg2, context);
-}
-
-Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
- Node* context, Node* arg1, Node* arg2,
- Node* arg3) {
- return raw_assembler_->TailCallRuntime3(function_id, arg1, arg2, arg3,
- context);
-}
-
-Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
- Node* context, Node* arg1, Node* arg2,
- Node* arg3, Node* arg4) {
- return raw_assembler_->TailCallRuntime4(function_id, arg1, arg2, arg3, arg4,
- context);
-}
-
-Node* CodeStubAssembler::CallStub(Callable const& callable, Node* context,
- Node* arg1, size_t result_size) {
- Node* target = HeapConstant(callable.code());
- return CallStub(callable.descriptor(), target, context, arg1, result_size);
-}
-
-Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(2);
- args[0] = arg1;
- args[1] = context;
-
- return CallN(call_descriptor, target, args);
-}
-
-Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- Node* arg2, size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(3);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = context;
-
- return CallN(call_descriptor, target, args);
-}
-
-Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- Node* arg2, Node* arg3, size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(4);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = arg3;
- args[3] = context;
-
- return CallN(call_descriptor, target, args);
-}
-
-Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- Node* arg2, Node* arg3, Node* arg4,
- size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(5);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = arg3;
- args[3] = arg4;
- args[4] = context;
-
- return CallN(call_descriptor, target, args);
-}
-
-Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- Node* arg2, Node* arg3, Node* arg4,
- Node* arg5, size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(6);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = arg3;
- args[3] = arg4;
- args[4] = arg5;
- args[5] = context;
-
- return CallN(call_descriptor, target, args);
-}
-
-Node* CodeStubAssembler::TailCallStub(Callable const& callable, Node* context,
- Node* arg1, Node* arg2,
- size_t result_size) {
- Node* target = HeapConstant(callable.code());
- return TailCallStub(callable.descriptor(), target, context, arg1, arg2,
- result_size);
-}
-
-Node* CodeStubAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- Node* arg2, size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(3);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = context;
-
- return raw_assembler_->TailCallN(call_descriptor, target, args);
-}
-
-Node* CodeStubAssembler::TailCall(
- const CallInterfaceDescriptor& interface_descriptor, Node* code_target,
- Node** args, size_t result_size) {
- CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), interface_descriptor,
- interface_descriptor.GetStackParameterCount(),
- CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
- return raw_assembler_->TailCallN(descriptor, code_target, args);
-}
-
-void CodeStubAssembler::Goto(CodeStubAssembler::Label* label) {
- label->MergeVariables();
- raw_assembler_->Goto(label->label_);
-}
-
-void CodeStubAssembler::GotoIf(Node* condition, Label* true_label) {
- Label false_label(this);
- Branch(condition, true_label, &false_label);
- Bind(&false_label);
-}
-
-void CodeStubAssembler::GotoUnless(Node* condition, Label* false_label) {
- Label true_label(this);
- Branch(condition, &true_label, false_label);
- Bind(&true_label);
-}
-
-void CodeStubAssembler::Branch(Node* condition,
- CodeStubAssembler::Label* true_label,
- CodeStubAssembler::Label* false_label) {
- true_label->MergeVariables();
- false_label->MergeVariables();
- return raw_assembler_->Branch(condition, true_label->label_,
- false_label->label_);
-}
-
-void CodeStubAssembler::Switch(Node* index, Label* default_label,
- int32_t* case_values, Label** case_labels,
- size_t case_count) {
- RawMachineLabel** labels =
- new (zone()->New(sizeof(RawMachineLabel*) * case_count))
- RawMachineLabel*[case_count];
- for (size_t i = 0; i < case_count; ++i) {
- labels[i] = case_labels[i]->label_;
- case_labels[i]->MergeVariables();
- default_label->MergeVariables();
- }
- return raw_assembler_->Switch(index, default_label->label_, case_values,
- labels, case_count);
-}
-
-// RawMachineAssembler delegate helpers:
-Isolate* CodeStubAssembler::isolate() const {
- return raw_assembler_->isolate();
-}
-
-Factory* CodeStubAssembler::factory() const { return isolate()->factory(); }
-
-Graph* CodeStubAssembler::graph() const { return raw_assembler_->graph(); }
-
-Zone* CodeStubAssembler::zone() const { return raw_assembler_->zone(); }
-
-// The core implementation of Variable is stored through an indirection so
-// that it can outlive the often block-scoped Variable declarations. This is
-// needed to ensure that variable binding and merging through phis can
-// properly be verified.
-class CodeStubAssembler::Variable::Impl : public ZoneObject {
- public:
- explicit Impl(MachineRepresentation rep) : value_(nullptr), rep_(rep) {}
- Node* value_;
- MachineRepresentation rep_;
-};
-
-CodeStubAssembler::Variable::Variable(CodeStubAssembler* assembler,
- MachineRepresentation rep)
- : impl_(new (assembler->zone()) Impl(rep)) {
- assembler->variables_.push_back(impl_);
-}
-
-void CodeStubAssembler::Variable::Bind(Node* value) { impl_->value_ = value; }
-
-Node* CodeStubAssembler::Variable::value() const {
- DCHECK_NOT_NULL(impl_->value_);
- return impl_->value_;
-}
-
-MachineRepresentation CodeStubAssembler::Variable::rep() const {
- return impl_->rep_;
-}
-
-bool CodeStubAssembler::Variable::IsBound() const {
- return impl_->value_ != nullptr;
-}
-
-CodeStubAssembler::Label::Label(CodeStubAssembler* assembler,
- int merged_value_count,
- CodeStubAssembler::Variable** merged_variables,
- CodeStubAssembler::Label::Type type)
- : bound_(false), merge_count_(0), assembler_(assembler), label_(nullptr) {
- void* buffer = assembler->zone()->New(sizeof(RawMachineLabel));
- label_ = new (buffer)
- RawMachineLabel(type == kDeferred ? RawMachineLabel::kDeferred
- : RawMachineLabel::kNonDeferred);
- for (int i = 0; i < merged_value_count; ++i) {
- variable_phis_[merged_variables[i]->impl_] = nullptr;
- }
-}
-
-void CodeStubAssembler::Label::MergeVariables() {
- ++merge_count_;
- for (auto var : assembler_->variables_) {
- size_t count = 0;
- Node* node = var->value_;
- if (node != nullptr) {
- auto i = variable_merges_.find(var);
- if (i != variable_merges_.end()) {
- i->second.push_back(node);
- count = i->second.size();
- } else {
- count = 1;
- variable_merges_[var] = std::vector<Node*>(1, node);
- }
- }
- // If the following asserts, then you've jumped to a label without a bound
- // variable along that path that expects to merge its value into a phi.
- DCHECK(variable_phis_.find(var) == variable_phis_.end() ||
- count == merge_count_);
- USE(count);
-
- // If the label is already bound, we already know the set of variables to
- // merge and phi nodes have already been created.
- if (bound_) {
- auto phi = variable_phis_.find(var);
- if (phi != variable_phis_.end()) {
- DCHECK_NOT_NULL(phi->second);
- assembler_->raw_assembler_->AppendPhiInput(phi->second, node);
- } else {
- auto i = variable_merges_.find(var);
- if (i != variable_merges_.end()) {
- // If the following assert fires, then you've declared a variable that
- // has the same bound value along all paths up until the point you
- // bound this label, but then later merged a path with a new value for
- // the variable after the label bind (it's not possible to add phis to
- // the bound label after the fact, just make sure to list the variable
- // in the label's constructor's list of merged variables).
- DCHECK(find_if(i->second.begin(), i->second.end(),
- [node](Node* e) -> bool { return node != e; }) ==
- i->second.end());
- }
- }
- }
- }
-}
-
-void CodeStubAssembler::Label::Bind() {
- DCHECK(!bound_);
- assembler_->raw_assembler_->Bind(label_);
-
- // Make sure that all variables that have changed along any path up to this
- // point are marked as merge variables.
- for (auto var : assembler_->variables_) {
- Node* shared_value = nullptr;
- auto i = variable_merges_.find(var);
- if (i != variable_merges_.end()) {
- for (auto value : i->second) {
- DCHECK(value != nullptr);
- if (value != shared_value) {
- if (shared_value == nullptr) {
- shared_value = value;
- } else {
- variable_phis_[var] = nullptr;
- }
- }
- }
- }
- }
-
- for (auto var : variable_phis_) {
- CodeStubAssembler::Variable::Impl* var_impl = var.first;
- auto i = variable_merges_.find(var_impl);
- // If the following assert fires, then a variable that has been marked as
- // being merged at the label--either by explicitly marking it so in the
- // label constructor or by having seen different bound values at branches
- // into the label--doesn't have a bound value along all of the paths that
- // have been merged into the label up to this point.
- DCHECK(i != variable_merges_.end() && i->second.size() == merge_count_);
- Node* phi = assembler_->raw_assembler_->Phi(
- var.first->rep_, static_cast<int>(merge_count_), &(i->second[0]));
- variable_phis_[var_impl] = phi;
- }
-
- // Bind all variables to a merge phi, the common value along all paths or
- // null.
- for (auto var : assembler_->variables_) {
- auto i = variable_phis_.find(var);
- if (i != variable_phis_.end()) {
- var->value_ = i->second;
- } else {
- auto j = variable_merges_.find(var);
- if (j != variable_merges_.end() && j->second.size() == merge_count_) {
- var->value_ = j->second.back();
- } else {
- var->value_ = nullptr;
- }
- }
- }
-
- bound_ = true;
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/code-stub-assembler.h b/deps/v8/src/compiler/code-stub-assembler.h
deleted file mode 100644
index 9fcb890606..0000000000
--- a/deps/v8/src/compiler/code-stub-assembler.h
+++ /dev/null
@@ -1,475 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_CODE_STUB_ASSEMBLER_H_
-#define V8_COMPILER_CODE_STUB_ASSEMBLER_H_
-
-#include <map>
-
-// Clients of this interface shouldn't depend on lots of compiler internals.
-// Do not include anything from src/compiler here!
-#include "src/allocation.h"
-#include "src/builtins.h"
-#include "src/heap/heap.h"
-#include "src/machine-type.h"
-#include "src/runtime/runtime.h"
-#include "src/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-
-class Callable;
-class CallInterfaceDescriptor;
-class Isolate;
-class Factory;
-class Zone;
-
-namespace compiler {
-
-class CallDescriptor;
-class Graph;
-class Node;
-class Operator;
-class RawMachineAssembler;
-class RawMachineLabel;
-class Schedule;
-
-#define CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
- V(Float32Equal) \
- V(Float32LessThan) \
- V(Float32LessThanOrEqual) \
- V(Float32GreaterThan) \
- V(Float32GreaterThanOrEqual) \
- V(Float64Equal) \
- V(Float64LessThan) \
- V(Float64LessThanOrEqual) \
- V(Float64GreaterThan) \
- V(Float64GreaterThanOrEqual) \
- V(Int32GreaterThan) \
- V(Int32GreaterThanOrEqual) \
- V(Int32LessThan) \
- V(Int32LessThanOrEqual) \
- V(IntPtrLessThan) \
- V(IntPtrLessThanOrEqual) \
- V(Uint32LessThan) \
- V(UintPtrGreaterThanOrEqual) \
- V(WordEqual) \
- V(WordNotEqual) \
- V(Word32Equal) \
- V(Word32NotEqual) \
- V(Word64Equal) \
- V(Word64NotEqual)
-
-#define CODE_STUB_ASSEMBLER_BINARY_OP_LIST(V) \
- CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
- V(Float64Add) \
- V(Float64Sub) \
- V(Float64InsertLowWord32) \
- V(Float64InsertHighWord32) \
- V(IntPtrAdd) \
- V(IntPtrAddWithOverflow) \
- V(IntPtrSub) \
- V(IntPtrSubWithOverflow) \
- V(Int32Add) \
- V(Int32AddWithOverflow) \
- V(Int32Sub) \
- V(Int32Mul) \
- V(WordOr) \
- V(WordAnd) \
- V(WordXor) \
- V(WordShl) \
- V(WordShr) \
- V(WordSar) \
- V(WordRor) \
- V(Word32Or) \
- V(Word32And) \
- V(Word32Xor) \
- V(Word32Shl) \
- V(Word32Shr) \
- V(Word32Sar) \
- V(Word32Ror) \
- V(Word64Or) \
- V(Word64And) \
- V(Word64Xor) \
- V(Word64Shr) \
- V(Word64Sar) \
- V(Word64Ror)
-
-#define CODE_STUB_ASSEMBLER_UNARY_OP_LIST(V) \
- V(Float64Neg) \
- V(Float64Sqrt) \
- V(ChangeFloat64ToUint32) \
- V(ChangeInt32ToFloat64) \
- V(ChangeInt32ToInt64) \
- V(ChangeUint32ToFloat64) \
- V(ChangeUint32ToUint64) \
- V(Word32Clz)
-
-class CodeStubAssembler {
- public:
- // Create with CallStub linkage.
- // |result_size| specifies the number of results returned by the stub.
- // TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
- CodeStubAssembler(Isolate* isolate, Zone* zone,
- const CallInterfaceDescriptor& descriptor,
- Code::Flags flags, const char* name,
- size_t result_size = 1);
-
- // Create with JSCall linkage.
- CodeStubAssembler(Isolate* isolate, Zone* zone, int parameter_count,
- Code::Flags flags, const char* name);
-
- virtual ~CodeStubAssembler();
-
- Handle<Code> GenerateCode();
-
- class Label;
- class Variable {
- public:
- explicit Variable(CodeStubAssembler* assembler, MachineRepresentation rep);
- void Bind(Node* value);
- Node* value() const;
- MachineRepresentation rep() const;
- bool IsBound() const;
-
- private:
- friend class CodeStubAssembler;
- class Impl;
- Impl* impl_;
- };
-
- enum AllocationFlag : uint8_t {
- kNone = 0,
- kDoubleAlignment = 1,
- kPretenured = 1 << 1
- };
-
- typedef base::Flags<AllocationFlag> AllocationFlags;
-
- // ===========================================================================
- // Base Assembler
- // ===========================================================================
-
- // Constants.
- Node* Int32Constant(int value);
- Node* IntPtrConstant(intptr_t value);
- Node* NumberConstant(double value);
- Node* SmiConstant(Smi* value);
- Node* HeapConstant(Handle<HeapObject> object);
- Node* BooleanConstant(bool value);
- Node* ExternalConstant(ExternalReference address);
- Node* Float64Constant(double value);
- Node* BooleanMapConstant();
- Node* HeapNumberMapConstant();
- Node* NullConstant();
- Node* UndefinedConstant();
-
- Node* Parameter(int value);
- void Return(Node* value);
-
- void Bind(Label* label);
- void Goto(Label* label);
- void GotoIf(Node* condition, Label* true_label);
- void GotoUnless(Node* condition, Label* false_label);
- void Branch(Node* condition, Label* true_label, Label* false_label);
-
- void Switch(Node* index, Label* default_label, int32_t* case_values,
- Label** case_labels, size_t case_count);
-
- // Access to the frame pointer
- Node* LoadFramePointer();
- Node* LoadParentFramePointer();
-
- // Access to the stack pointer
- Node* LoadStackPointer();
-
- // Load raw memory location.
- Node* Load(MachineType rep, Node* base);
- Node* Load(MachineType rep, Node* base, Node* index);
-
- // Store value to raw memory location.
- Node* Store(MachineRepresentation rep, Node* base, Node* value);
- Node* Store(MachineRepresentation rep, Node* base, Node* index, Node* value);
- Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* value);
- Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* index,
- Node* value);
-
-// Basic arithmetic operations.
-#define DECLARE_CODE_STUB_ASSEMBER_BINARY_OP(name) Node* name(Node* a, Node* b);
- CODE_STUB_ASSEMBLER_BINARY_OP_LIST(DECLARE_CODE_STUB_ASSEMBER_BINARY_OP)
-#undef DECLARE_CODE_STUB_ASSEMBER_BINARY_OP
-
- Node* WordShl(Node* value, int shift);
-
-// Unary
-#define DECLARE_CODE_STUB_ASSEMBER_UNARY_OP(name) Node* name(Node* a);
- CODE_STUB_ASSEMBLER_UNARY_OP_LIST(DECLARE_CODE_STUB_ASSEMBER_UNARY_OP)
-#undef DECLARE_CODE_STUB_ASSEMBER_UNARY_OP
-
- // Projections
- Node* Projection(int index, Node* value);
-
- // Calls
- Node* CallRuntime(Runtime::FunctionId function_id, Node* context);
- Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1);
- Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
- Node* arg2);
- Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
- Node* arg2, Node* arg3);
- Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
- Node* arg2, Node* arg3, Node* arg4);
- Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
- Node* arg2, Node* arg3, Node* arg4, Node* arg5);
-
- Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context);
- Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
- Node* arg1);
- Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
- Node* arg1, Node* arg2);
- Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
- Node* arg1, Node* arg2, Node* arg3);
- Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
- Node* arg1, Node* arg2, Node* arg3, Node* arg4);
-
- Node* CallStub(Callable const& callable, Node* context, Node* arg1,
- size_t result_size = 1);
-
- Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, size_t result_size = 1);
- Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, Node* arg2, size_t result_size = 1);
- Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, Node* arg2, Node* arg3,
- size_t result_size = 1);
- Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
- size_t result_size = 1);
- Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
- Node* arg5, size_t result_size = 1);
-
- Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
- Node* arg2, size_t result_size = 1);
-
- Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, Node* arg2,
- size_t result_size = 1);
-
- Node* TailCall(const CallInterfaceDescriptor& descriptor, Node* target,
- Node** args, size_t result_size = 1);
-
- // ===========================================================================
- // Macros
- // ===========================================================================
-
- // Float64 operations.
- Node* Float64Ceil(Node* x);
- Node* Float64Floor(Node* x);
- Node* Float64Round(Node* x);
- Node* Float64Trunc(Node* x);
-
- // Tag a Word as a Smi value.
- Node* SmiTag(Node* value);
- // Untag a Smi value as a Word.
- Node* SmiUntag(Node* value);
-
- // Smi conversions.
- Node* SmiToFloat64(Node* value);
- Node* SmiToWord32(Node* value);
-
- // Smi operations.
- Node* SmiAdd(Node* a, Node* b);
- Node* SmiAddWithOverflow(Node* a, Node* b);
- Node* SmiSub(Node* a, Node* b);
- Node* SmiSubWithOverflow(Node* a, Node* b);
- Node* SmiEqual(Node* a, Node* b);
- Node* SmiLessThan(Node* a, Node* b);
- Node* SmiLessThanOrEqual(Node* a, Node* b);
- Node* SmiMin(Node* a, Node* b);
-
- // Load a value from the root array.
- Node* LoadRoot(Heap::RootListIndex root_index);
-
- // Check a value for smi-ness
- Node* WordIsSmi(Node* a);
-
- // Check that the value is a positive smi.
- Node* WordIsPositiveSmi(Node* a);
-
- // Load an object pointer from a buffer that isn't in the heap.
- Node* LoadBufferObject(Node* buffer, int offset,
- MachineType rep = MachineType::AnyTagged());
- // Load a field from an object on the heap.
- Node* LoadObjectField(Node* object, int offset,
- MachineType rep = MachineType::AnyTagged());
- // Load the floating point value of a HeapNumber.
- Node* LoadHeapNumberValue(Node* object);
- // Store the floating point value of a HeapNumber.
- Node* StoreHeapNumberValue(Node* object, Node* value);
- // Truncate the floating point value of a HeapNumber to an Int32.
- Node* TruncateHeapNumberValueToWord32(Node* object);
- // Load the bit field of a Map.
- Node* LoadMapBitField(Node* map);
- // Load bit field 2 of a map.
- Node* LoadMapBitField2(Node* map);
- // Load bit field 3 of a map.
- Node* LoadMapBitField3(Node* map);
- // Load the instance type of a map.
- Node* LoadMapInstanceType(Node* map);
- // Load the instance descriptors of a map.
- Node* LoadMapDescriptors(Node* map);
-
- // Load the hash field of a name.
- Node* LoadNameHash(Node* name);
-
- // Load an array element from a FixedArray.
- Node* LoadFixedArrayElementInt32Index(Node* object, Node* int32_index,
- int additional_offset = 0);
- Node* LoadFixedArrayElementSmiIndex(Node* object, Node* smi_index,
- int additional_offset = 0);
- Node* LoadFixedArrayElementConstantIndex(Node* object, int index);
-
- // Allocate an object of the given size.
- Node* Allocate(int size, AllocationFlags flags = kNone);
- // Allocate a HeapNumber without initializing its value.
- Node* AllocateHeapNumber();
- // Allocate a HeapNumber with a specific value.
- Node* AllocateHeapNumberWithValue(Node* value);
-
- // Store an array element to a FixedArray.
- Node* StoreFixedArrayElementNoWriteBarrier(Node* object, Node* index,
- Node* value);
- // Load the Map of an HeapObject.
- Node* LoadMap(Node* object);
- // Store the Map of an HeapObject.
- Node* StoreMapNoWriteBarrier(Node* object, Node* map);
- // Load the instance type of an HeapObject.
- Node* LoadInstanceType(Node* object);
-
- // Load the elements backing store of a JSObject.
- Node* LoadElements(Node* object);
- // Load the length of a fixed array base instance.
- Node* LoadFixedArrayBaseLength(Node* array);
-
- // Returns a node that is true if the given bit is set in |word32|.
- template <typename T>
- Node* BitFieldDecode(Node* word32) {
- return BitFieldDecode(word32, T::kShift, T::kMask);
- }
-
- Node* BitFieldDecode(Node* word32, uint32_t shift, uint32_t mask);
-
- // Conversions.
- Node* ChangeFloat64ToTagged(Node* value);
- Node* ChangeInt32ToTagged(Node* value);
- Node* TruncateTaggedToFloat64(Node* context, Node* value);
- Node* TruncateTaggedToWord32(Node* context, Node* value);
-
- // Branching helpers.
- // TODO(danno): Can we be more cleverish wrt. edge-split?
- void BranchIf(Node* condition, Label* if_true, Label* if_false);
-
-#define BRANCH_HELPER(name) \
- void BranchIf##name(Node* a, Node* b, Label* if_true, Label* if_false) { \
- BranchIf(name(a, b), if_true, if_false); \
- }
- CODE_STUB_ASSEMBLER_COMPARE_BINARY_OP_LIST(BRANCH_HELPER)
-#undef BRANCH_HELPER
-
- void BranchIfSmiLessThan(Node* a, Node* b, Label* if_true, Label* if_false) {
- BranchIf(SmiLessThan(a, b), if_true, if_false);
- }
-
- void BranchIfSmiLessThanOrEqual(Node* a, Node* b, Label* if_true,
- Label* if_false) {
- BranchIf(SmiLessThanOrEqual(a, b), if_true, if_false);
- }
-
- void BranchIfFloat64IsNaN(Node* value, Label* if_true, Label* if_false) {
- BranchIfFloat64Equal(value, value, if_false, if_true);
- }
-
- // Helpers which delegate to RawMachineAssembler.
- Factory* factory() const;
- Isolate* isolate() const;
- Zone* zone() const;
-
- protected:
- // Protected helpers which delegate to RawMachineAssembler.
- Graph* graph() const;
-
- // Enables subclasses to perform operations before and after a call.
- virtual void CallPrologue();
- virtual void CallEpilogue();
-
- private:
- friend class CodeStubAssemblerTester;
-
- CodeStubAssembler(Isolate* isolate, Zone* zone,
- CallDescriptor* call_descriptor, Code::Flags flags,
- const char* name);
-
- Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args);
- Node* TailCallN(CallDescriptor* descriptor, Node* code_target, Node** args);
-
- Node* SmiShiftBitsConstant();
-
- Node* AllocateRawAligned(Node* size_in_bytes, AllocationFlags flags,
- Node* top_address, Node* limit_address);
- Node* AllocateRawUnaligned(Node* size_in_bytes, AllocationFlags flags,
- Node* top_adddress, Node* limit_address);
-
- base::SmartPointer<RawMachineAssembler> raw_assembler_;
- Code::Flags flags_;
- const char* name_;
- bool code_generated_;
- ZoneVector<Variable::Impl*> variables_;
-
- DISALLOW_COPY_AND_ASSIGN(CodeStubAssembler);
-};
-
-DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags);
-
-class CodeStubAssembler::Label {
- public:
- enum Type { kDeferred, kNonDeferred };
-
- explicit Label(CodeStubAssembler* assembler,
- CodeStubAssembler::Label::Type type =
- CodeStubAssembler::Label::kNonDeferred)
- : CodeStubAssembler::Label(assembler, 0, nullptr, type) {}
- Label(CodeStubAssembler* assembler,
- CodeStubAssembler::Variable* merged_variable,
- CodeStubAssembler::Label::Type type =
- CodeStubAssembler::Label::kNonDeferred)
- : CodeStubAssembler::Label(assembler, 1, &merged_variable, type) {}
- Label(CodeStubAssembler* assembler, int merged_variable_count,
- CodeStubAssembler::Variable** merged_variables,
- CodeStubAssembler::Label::Type type =
- CodeStubAssembler::Label::kNonDeferred);
- ~Label() {}
-
- private:
- friend class CodeStubAssembler;
-
- void Bind();
- void MergeVariables();
-
- bool bound_;
- size_t merge_count_;
- CodeStubAssembler* assembler_;
- RawMachineLabel* label_;
- // Map of variables that need to be merged to their phi nodes (or placeholders
- // for those phis).
- std::map<Variable::Impl*, Node*> variable_phis_;
- // Map of variables to the list of value nodes that have been added from each
- // merge path in their order of merging.
- std::map<Variable::Impl*, std::vector<Node*>> variable_merges_;
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_CODE_STUB_ASSEMBLER_H_
diff --git a/deps/v8/src/compiler/common-node-cache.cc b/deps/v8/src/compiler/common-node-cache.cc
index a0ae6e8ad7..fa4ca34468 100644
--- a/deps/v8/src/compiler/common-node-cache.cc
+++ b/deps/v8/src/compiler/common-node-cache.cc
@@ -17,7 +17,7 @@ Node** CommonNodeCache::FindExternalConstant(ExternalReference value) {
Node** CommonNodeCache::FindHeapConstant(Handle<HeapObject> value) {
- return heap_constants_.Find(zone(), bit_cast<intptr_t>(value.location()));
+ return heap_constants_.Find(zone(), bit_cast<intptr_t>(value.address()));
}
@@ -29,6 +29,8 @@ void CommonNodeCache::GetCachedNodes(ZoneVector<Node*>* nodes) {
external_constants_.GetCachedNodes(nodes);
number_constants_.GetCachedNodes(nodes);
heap_constants_.GetCachedNodes(nodes);
+ relocatable_int32_constants_.GetCachedNodes(nodes);
+ relocatable_int64_constants_.GetCachedNodes(nodes);
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/common-node-cache.h b/deps/v8/src/compiler/common-node-cache.h
index 720bc1531d..1f07703e72 100644
--- a/deps/v8/src/compiler/common-node-cache.h
+++ b/deps/v8/src/compiler/common-node-cache.h
@@ -52,6 +52,16 @@ class CommonNodeCache final {
Node** FindHeapConstant(Handle<HeapObject> value);
+ Node** FindRelocatableInt32Constant(int32_t value, RelocInfoMode rmode) {
+ return relocatable_int32_constants_.Find(zone(),
+ std::make_pair(value, rmode));
+ }
+
+ Node** FindRelocatableInt64Constant(int64_t value, RelocInfoMode rmode) {
+ return relocatable_int64_constants_.Find(zone(),
+ std::make_pair(value, rmode));
+ }
+
// Return all nodes from the cache.
void GetCachedNodes(ZoneVector<Node*>* nodes);
@@ -65,6 +75,8 @@ class CommonNodeCache final {
IntPtrNodeCache external_constants_;
Int64NodeCache number_constants_;
IntPtrNodeCache heap_constants_;
+ RelocInt32NodeCache relocatable_int32_constants_;
+ RelocInt64NodeCache relocatable_int64_constants_;
Zone* const zone_;
DISALLOW_COPY_AND_ASSIGN(CommonNodeCache);
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index 22e16a27f2..9527c754e4 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -19,18 +19,12 @@ namespace compiler {
namespace {
-enum class Decision { kUnknown, kTrue, kFalse };
-
Decision DecideCondition(Node* const cond) {
switch (cond->opcode()) {
case IrOpcode::kInt32Constant: {
Int32Matcher mcond(cond);
return mcond.Value() ? Decision::kTrue : Decision::kFalse;
}
- case IrOpcode::kInt64Constant: {
- Int64Matcher mcond(cond);
- return mcond.Value() ? Decision::kTrue : Decision::kFalse;
- }
case IrOpcode::kHeapConstant: {
HeapObjectMatcher mcond(cond);
return mcond.Value()->BooleanValue() ? Decision::kTrue : Decision::kFalse;
@@ -70,8 +64,6 @@ Reduction CommonOperatorReducer::Reduce(Node* node) {
return ReduceReturn(node);
case IrOpcode::kSelect:
return ReduceSelect(node);
- case IrOpcode::kGuard:
- return ReduceGuard(node);
default:
break;
}
@@ -130,6 +122,7 @@ Reduction CommonOperatorReducer::ReduceDeoptimizeConditional(Node* node) {
DCHECK(node->opcode() == IrOpcode::kDeoptimizeIf ||
node->opcode() == IrOpcode::kDeoptimizeUnless);
bool condition_is_true = node->opcode() == IrOpcode::kDeoptimizeUnless;
+ DeoptimizeReason reason = DeoptimizeReasonOf(node->op());
Node* condition = NodeProperties::GetValueInput(node, 0);
Node* frame_state = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -141,20 +134,22 @@ Reduction CommonOperatorReducer::ReduceDeoptimizeConditional(Node* node) {
if (condition->opcode() == IrOpcode::kBooleanNot) {
NodeProperties::ReplaceValueInput(node, condition->InputAt(0), 0);
NodeProperties::ChangeOp(node, condition_is_true
- ? common()->DeoptimizeIf()
- : common()->DeoptimizeUnless());
+ ? common()->DeoptimizeIf(reason)
+ : common()->DeoptimizeUnless(reason));
return Changed(node);
}
Decision const decision = DecideCondition(condition);
if (decision == Decision::kUnknown) return NoChange();
if (condition_is_true == (decision == Decision::kTrue)) {
- return Replace(control);
+ ReplaceWithValue(node, dead(), effect, control);
+ } else {
+ control =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager, reason),
+ frame_state, effect, control);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), control);
+ Revisit(graph()->end());
}
- control = graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
- frame_state, effect, control);
- // TODO(bmeurer): This should be on the AdvancedReducer somehow.
- NodeProperties::MergeControlToEnd(graph(), common(), control);
- Revisit(graph()->end());
return Replace(dead());
}
@@ -252,17 +247,6 @@ Reduction CommonOperatorReducer::ReducePhi(Node* node) {
return Change(node, machine()->Float32Abs(), vtrue);
}
}
- if (mcond.left().Equals(vtrue) && mcond.right().Equals(vfalse) &&
- machine()->Float32Min().IsSupported()) {
- // We might now be able to further reduce the {merge} node.
- Revisit(merge);
- return Change(node, machine()->Float32Min().op(), vtrue, vfalse);
- } else if (mcond.left().Equals(vfalse) && mcond.right().Equals(vtrue) &&
- machine()->Float32Max().IsSupported()) {
- // We might now be able to further reduce the {merge} node.
- Revisit(merge);
- return Change(node, machine()->Float32Max().op(), vtrue, vfalse);
- }
} else if (cond->opcode() == IrOpcode::kFloat64LessThan) {
Float64BinopMatcher mcond(cond);
if (mcond.left().Is(0.0) && mcond.right().Equals(vtrue) &&
@@ -274,17 +258,6 @@ Reduction CommonOperatorReducer::ReducePhi(Node* node) {
return Change(node, machine()->Float64Abs(), vtrue);
}
}
- if (mcond.left().Equals(vtrue) && mcond.right().Equals(vfalse) &&
- machine()->Float64Min().IsSupported()) {
- // We might now be able to further reduce the {merge} node.
- Revisit(merge);
- return Change(node, machine()->Float64Min().op(), vtrue, vfalse);
- } else if (mcond.left().Equals(vfalse) && mcond.right().Equals(vtrue) &&
- machine()->Float64Max().IsSupported()) {
- // We might now be able to further reduce the {merge} node.
- Revisit(merge);
- return Change(node, machine()->Float64Max().op(), vtrue, vfalse);
- }
}
}
}
@@ -308,8 +281,16 @@ Reduction CommonOperatorReducer::ReducePhi(Node* node) {
Reduction CommonOperatorReducer::ReduceReturn(Node* node) {
DCHECK_EQ(IrOpcode::kReturn, node->opcode());
Node* const value = node->InputAt(0);
- Node* const effect = node->InputAt(1);
- Node* const control = node->InputAt(2);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+ bool changed = false;
+ if (effect->opcode() == IrOpcode::kCheckpoint) {
+ // Any {Return} node can never be used to insert a deoptimization point,
+ // hence checkpoints can be cut out of the effect chain flowing into it.
+ effect = NodeProperties::GetEffectInput(effect);
+ NodeProperties::ReplaceEffectInput(node, effect);
+ changed = true;
+ }
if (value->opcode() == IrOpcode::kPhi &&
NodeProperties::GetControlInput(value) == control &&
effect->opcode() == IrOpcode::kEffectPhi &&
@@ -334,7 +315,7 @@ Reduction CommonOperatorReducer::ReduceReturn(Node* node) {
Replace(control, dead());
return Replace(dead());
}
- return NoChange();
+ return changed ? Changed(node) : NoChange();
}
@@ -362,13 +343,6 @@ Reduction CommonOperatorReducer::ReduceSelect(Node* node) {
return Change(node, machine()->Float32Abs(), vtrue);
}
}
- if (mcond.left().Equals(vtrue) && mcond.right().Equals(vfalse) &&
- machine()->Float32Min().IsSupported()) {
- return Change(node, machine()->Float32Min().op(), vtrue, vfalse);
- } else if (mcond.left().Equals(vfalse) && mcond.right().Equals(vtrue) &&
- machine()->Float32Max().IsSupported()) {
- return Change(node, machine()->Float32Max().op(), vtrue, vfalse);
- }
break;
}
case IrOpcode::kFloat64LessThan: {
@@ -380,13 +354,6 @@ Reduction CommonOperatorReducer::ReduceSelect(Node* node) {
return Change(node, machine()->Float64Abs(), vtrue);
}
}
- if (mcond.left().Equals(vtrue) && mcond.right().Equals(vfalse) &&
- machine()->Float64Min().IsSupported()) {
- return Change(node, machine()->Float64Min().op(), vtrue, vfalse);
- } else if (mcond.left().Equals(vfalse) && mcond.right().Equals(vtrue) &&
- machine()->Float64Max().IsSupported()) {
- return Change(node, machine()->Float64Max().op(), vtrue, vfalse);
- }
break;
}
default:
@@ -396,16 +363,6 @@ Reduction CommonOperatorReducer::ReduceSelect(Node* node) {
}
-Reduction CommonOperatorReducer::ReduceGuard(Node* node) {
- DCHECK_EQ(IrOpcode::kGuard, node->opcode());
- Node* const input = NodeProperties::GetValueInput(node, 0);
- Type* const input_type = NodeProperties::GetTypeOrAny(input);
- Type* const guard_type = OpParameter<Type*>(node);
- if (input_type->Is(guard_type)) return Replace(input);
- return NoChange();
-}
-
-
Reduction CommonOperatorReducer::Change(Node* node, Operator const* op,
Node* a) {
node->ReplaceInput(0, a);
diff --git a/deps/v8/src/compiler/common-operator-reducer.h b/deps/v8/src/compiler/common-operator-reducer.h
index 49d9f1dd8e..b7aeeb7e3e 100644
--- a/deps/v8/src/compiler/common-operator-reducer.h
+++ b/deps/v8/src/compiler/common-operator-reducer.h
@@ -36,7 +36,6 @@ class CommonOperatorReducer final : public AdvancedReducer {
Reduction ReducePhi(Node* node);
Reduction ReduceReturn(Node* node);
Reduction ReduceSelect(Node* node);
- Reduction ReduceGuard(Node* node);
Reduction Change(Node* node, Operator const* op, Node* a);
Reduction Change(Node* node, Operator const* op, Node* a, Node* b);
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index 3bb1b34495..f732375a68 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -35,10 +35,14 @@ BranchHint BranchHintOf(const Operator* const op) {
return OpParameter<BranchHint>(op);
}
+DeoptimizeReason DeoptimizeReasonOf(Operator const* const op) {
+ DCHECK(op->opcode() == IrOpcode::kDeoptimizeIf ||
+ op->opcode() == IrOpcode::kDeoptimizeUnless);
+ return OpParameter<DeoptimizeReason>(op);
+}
size_t hash_value(DeoptimizeKind kind) { return static_cast<size_t>(kind); }
-
std::ostream& operator<<(std::ostream& os, DeoptimizeKind kind) {
switch (kind) {
case DeoptimizeKind::kEager:
@@ -50,25 +54,25 @@ std::ostream& operator<<(std::ostream& os, DeoptimizeKind kind) {
return os;
}
-
-DeoptimizeKind DeoptimizeKindOf(const Operator* const op) {
- DCHECK_EQ(IrOpcode::kDeoptimize, op->opcode());
- return OpParameter<DeoptimizeKind>(op);
+bool operator==(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
+ return lhs.kind() == rhs.kind() && lhs.reason() == rhs.reason();
}
+bool operator!=(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
+ return !(lhs == rhs);
+}
-size_t hash_value(IfExceptionHint hint) { return static_cast<size_t>(hint); }
+size_t hash_value(DeoptimizeParameters p) {
+ return base::hash_combine(p.kind(), p.reason());
+}
+std::ostream& operator<<(std::ostream& os, DeoptimizeParameters p) {
+ return os << p.kind() << ":" << p.reason();
+}
-std::ostream& operator<<(std::ostream& os, IfExceptionHint hint) {
- switch (hint) {
- case IfExceptionHint::kLocallyCaught:
- return os << "Caught";
- case IfExceptionHint::kLocallyUncaught:
- return os << "Uncaught";
- }
- UNREACHABLE();
- return os;
+DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const op) {
+ DCHECK_EQ(IrOpcode::kDeoptimize, op->opcode());
+ return OpParameter<DeoptimizeParameters>(op);
}
@@ -98,6 +102,11 @@ SelectParameters const& SelectParametersOf(const Operator* const op) {
return OpParameter<SelectParameters>(op);
}
+CallDescriptor const* CallDescriptorOf(const Operator* const op) {
+ DCHECK(op->opcode() == IrOpcode::kCall ||
+ op->opcode() == IrOpcode::kTailCall);
+ return OpParameter<CallDescriptor const*>(op);
+}
size_t ProjectionIndexOf(const Operator* const op) {
DCHECK_EQ(IrOpcode::kProjection, op->opcode());
@@ -142,20 +151,82 @@ std::ostream& operator<<(std::ostream& os, ParameterInfo const& i) {
return os;
}
-#define CACHED_OP_LIST(V) \
- V(Dead, Operator::kFoldable, 0, 0, 0, 1, 1, 1) \
- V(DeoptimizeIf, Operator::kFoldable, 2, 1, 1, 0, 0, 1) \
- V(DeoptimizeUnless, Operator::kFoldable, 2, 1, 1, 0, 0, 1) \
- V(IfTrue, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
- V(IfFalse, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
- V(IfSuccess, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
- V(IfDefault, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
- V(Throw, Operator::kKontrol, 1, 1, 1, 0, 0, 1) \
- V(Terminate, Operator::kKontrol, 0, 1, 1, 0, 0, 1) \
- V(OsrNormalEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1) \
- V(OsrLoopEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1) \
- V(BeginRegion, Operator::kNoThrow, 0, 1, 0, 0, 1, 0) \
- V(FinishRegion, Operator::kNoThrow, 1, 1, 0, 1, 1, 0)
+bool operator==(RelocatablePtrConstantInfo const& lhs,
+ RelocatablePtrConstantInfo const& rhs) {
+ return lhs.rmode() == rhs.rmode() && lhs.value() == rhs.value() &&
+ lhs.type() == rhs.type();
+}
+
+bool operator!=(RelocatablePtrConstantInfo const& lhs,
+ RelocatablePtrConstantInfo const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(RelocatablePtrConstantInfo const& p) {
+ return base::hash_combine(p.value(), p.rmode(), p.type());
+}
+
+std::ostream& operator<<(std::ostream& os,
+ RelocatablePtrConstantInfo const& p) {
+ return os << p.value() << "|" << p.rmode() << "|" << p.type();
+}
+
+size_t hash_value(RegionObservability observability) {
+ return static_cast<size_t>(observability);
+}
+
+std::ostream& operator<<(std::ostream& os, RegionObservability observability) {
+ switch (observability) {
+ case RegionObservability::kObservable:
+ return os << "observable";
+ case RegionObservability::kNotObservable:
+ return os << "not-observable";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+RegionObservability RegionObservabilityOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kBeginRegion, op->opcode());
+ return OpParameter<RegionObservability>(op);
+}
+
+Type* TypeGuardTypeOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kTypeGuard, op->opcode());
+ return OpParameter<Type*>(op);
+}
+
+std::ostream& operator<<(std::ostream& os,
+ const ZoneVector<MachineType>* types) {
+ // Print all the MachineTypes, separated by commas.
+ bool first = true;
+ for (MachineType elem : *types) {
+ if (!first) {
+ os << ", ";
+ }
+ first = false;
+ os << elem;
+ }
+ return os;
+}
+
+#define CACHED_OP_LIST(V) \
+ V(Dead, Operator::kFoldable, 0, 0, 0, 1, 1, 1) \
+ V(IfTrue, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
+ V(IfFalse, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
+ V(IfSuccess, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
+ V(IfException, Operator::kKontrol, 0, 1, 1, 1, 1, 1) \
+ V(IfDefault, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
+ V(Throw, Operator::kKontrol, 1, 1, 1, 0, 0, 1) \
+ V(Terminate, Operator::kKontrol, 0, 1, 1, 0, 0, 1) \
+ V(OsrNormalEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1) \
+ V(OsrLoopEntry, Operator::kFoldable | Operator::kNoThrow, 0, 1, 1, 0, 1, 1) \
+ V(LoopExit, Operator::kKontrol, 0, 0, 2, 0, 0, 1) \
+ V(LoopExitValue, Operator::kPure, 1, 0, 1, 1, 0, 0) \
+ V(LoopExitEffect, Operator::kNoThrow, 0, 1, 1, 0, 1, 0) \
+ V(Checkpoint, Operator::kKontrol, 0, 1, 1, 0, 1, 0) \
+ V(FinishRegion, Operator::kKontrol, 1, 1, 0, 1, 1, 0) \
+ V(Retain, Operator::kKontrol, 1, 1, 0, 0, 1, 0)
#define CACHED_RETURN_LIST(V) \
V(1) \
@@ -182,6 +253,11 @@ std::ostream& operator<<(std::ostream& os, ParameterInfo const& i) {
V(5) \
V(6)
+#define CACHED_INDUCTION_VARIABLE_PHI_LIST(V) \
+ V(4) \
+ V(5) \
+ V(6) \
+ V(7)
#define CACHED_LOOP_LIST(V) \
V(1) \
@@ -198,6 +274,30 @@ std::ostream& operator<<(std::ostream& os, ParameterInfo const& i) {
V(7) \
V(8)
+#define CACHED_DEOPTIMIZE_LIST(V) \
+ V(Eager, MinusZero) \
+ V(Eager, NoReason) \
+ V(Eager, WrongMap) \
+ V(Soft, InsufficientTypeFeedbackForGenericKeyedAccess) \
+ V(Soft, InsufficientTypeFeedbackForGenericNamedAccess)
+
+#define CACHED_DEOPTIMIZE_IF_LIST(V) \
+ V(DivisionByZero) \
+ V(Hole) \
+ V(MinusZero) \
+ V(Overflow) \
+ V(Smi)
+
+#define CACHED_DEOPTIMIZE_UNLESS_LIST(V) \
+ V(LostPrecision) \
+ V(LostPrecisionOrNaN) \
+ V(NoReason) \
+ V(NotAHeapNumber) \
+ V(NotAHeapNumberUndefinedBoolean) \
+ V(NotASmi) \
+ V(OutOfBounds) \
+ V(WrongInstanceType) \
+ V(WrongMap)
#define CACHED_PARAMETER_LIST(V) \
V(0) \
@@ -258,30 +358,6 @@ struct CommonOperatorGlobalCache final {
CACHED_OP_LIST(CACHED)
#undef CACHED
- template <DeoptimizeKind kKind>
- struct DeoptimizeOperator final : public Operator1<DeoptimizeKind> {
- DeoptimizeOperator()
- : Operator1<DeoptimizeKind>( // --
- IrOpcode::kDeoptimize, Operator::kNoThrow, // opcode
- "Deoptimize", // name
- 1, 1, 1, 0, 0, 1, // counts
- kKind) {} // parameter
- };
- DeoptimizeOperator<DeoptimizeKind::kEager> kDeoptimizeEagerOperator;
- DeoptimizeOperator<DeoptimizeKind::kSoft> kDeoptimizeSoftOperator;
-
- template <IfExceptionHint kCaughtLocally>
- struct IfExceptionOperator final : public Operator1<IfExceptionHint> {
- IfExceptionOperator()
- : Operator1<IfExceptionHint>( // --
- IrOpcode::kIfException, Operator::kKontrol, // opcode
- "IfException", // name
- 0, 1, 1, 1, 1, 1, // counts
- kCaughtLocally) {} // parameter
- };
- IfExceptionOperator<IfExceptionHint::kLocallyCaught> kIfExceptionCOperator;
- IfExceptionOperator<IfExceptionHint::kLocallyUncaught> kIfExceptionUOperator;
-
template <size_t kInputCount>
struct EndOperator final : public Operator {
EndOperator()
@@ -324,16 +400,30 @@ struct CommonOperatorGlobalCache final {
template <int kEffectInputCount>
struct EffectPhiOperator final : public Operator {
EffectPhiOperator()
- : Operator( // --
- IrOpcode::kEffectPhi, Operator::kPure, // opcode
- "EffectPhi", // name
- 0, kEffectInputCount, 1, 0, 1, 0) {} // counts
+ : Operator( // --
+ IrOpcode::kEffectPhi, Operator::kKontrol, // opcode
+ "EffectPhi", // name
+ 0, kEffectInputCount, 1, 0, 1, 0) {} // counts
};
#define CACHED_EFFECT_PHI(input_count) \
EffectPhiOperator<input_count> kEffectPhi##input_count##Operator;
CACHED_EFFECT_PHI_LIST(CACHED_EFFECT_PHI)
#undef CACHED_EFFECT_PHI
+ template <RegionObservability kRegionObservability>
+ struct BeginRegionOperator final : public Operator1<RegionObservability> {
+ BeginRegionOperator()
+ : Operator1<RegionObservability>( // --
+ IrOpcode::kBeginRegion, Operator::kKontrol, // opcode
+ "BeginRegion", // name
+ 0, 1, 0, 0, 1, 0, // counts
+ kRegionObservability) {} // parameter
+ };
+ BeginRegionOperator<RegionObservability::kObservable>
+ kBeginRegionObservableOperator;
+ BeginRegionOperator<RegionObservability::kNotObservable>
+ kBeginRegionNotObservableOperator;
+
template <size_t kInputCount>
struct LoopOperator final : public Operator {
LoopOperator()
@@ -360,6 +450,54 @@ struct CommonOperatorGlobalCache final {
CACHED_MERGE_LIST(CACHED_MERGE)
#undef CACHED_MERGE
+ template <DeoptimizeKind kKind, DeoptimizeReason kReason>
+ struct DeoptimizeOperator final : public Operator1<DeoptimizeParameters> {
+ DeoptimizeOperator()
+ : Operator1<DeoptimizeParameters>( // --
+ IrOpcode::kDeoptimize, // opcode
+ Operator::kFoldable | Operator::kNoThrow, // properties
+ "Deoptimize", // name
+ 1, 1, 1, 0, 0, 1, // counts
+ DeoptimizeParameters(kKind, kReason)) {} // parameter
+ };
+#define CACHED_DEOPTIMIZE(Kind, Reason) \
+ DeoptimizeOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason> \
+ kDeoptimize##Kind##Reason##Operator;
+ CACHED_DEOPTIMIZE_LIST(CACHED_DEOPTIMIZE)
+#undef CACHED_DEOPTIMIZE
+
+ template <DeoptimizeReason kReason>
+ struct DeoptimizeIfOperator final : public Operator1<DeoptimizeReason> {
+ DeoptimizeIfOperator()
+ : Operator1<DeoptimizeReason>( // --
+ IrOpcode::kDeoptimizeIf, // opcode
+ Operator::kFoldable | Operator::kNoThrow, // properties
+ "DeoptimizeIf", // name
+ 2, 1, 1, 0, 1, 1, // counts
+ kReason) {} // parameter
+ };
+#define CACHED_DEOPTIMIZE_IF(Reason) \
+ DeoptimizeIfOperator<DeoptimizeReason::k##Reason> \
+ kDeoptimizeIf##Reason##Operator;
+ CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
+#undef CACHED_DEOPTIMIZE_IF
+
+ template <DeoptimizeReason kReason>
+ struct DeoptimizeUnlessOperator final : public Operator1<DeoptimizeReason> {
+ DeoptimizeUnlessOperator()
+ : Operator1<DeoptimizeReason>( // --
+ IrOpcode::kDeoptimizeUnless, // opcode
+ Operator::kFoldable | Operator::kNoThrow, // properties
+ "DeoptimizeUnless", // name
+ 2, 1, 1, 0, 1, 1, // counts
+ kReason) {} // parameter
+ };
+#define CACHED_DEOPTIMIZE_UNLESS(Reason) \
+ DeoptimizeUnlessOperator<DeoptimizeReason::k##Reason> \
+ kDeoptimizeUnless##Reason##Operator;
+ CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
+#undef CACHED_DEOPTIMIZE_UNLESS
+
template <MachineRepresentation kRep, int kInputCount>
struct PhiOperator final : public Operator1<MachineRepresentation> {
PhiOperator()
@@ -375,6 +513,20 @@ struct CommonOperatorGlobalCache final {
CACHED_PHI_LIST(CACHED_PHI)
#undef CACHED_PHI
+ template <int kInputCount>
+ struct InductionVariablePhiOperator final : public Operator {
+ InductionVariablePhiOperator()
+ : Operator( //--
+ IrOpcode::kInductionVariablePhi, Operator::kPure, // opcode
+ "InductionVariablePhi", // name
+ kInputCount, 0, 1, 1, 0, 0) {} // counts
+ };
+#define CACHED_INDUCTION_VARIABLE_PHI(input_count) \
+ InductionVariablePhiOperator<input_count> \
+ kInductionVariablePhi##input_count##Operator;
+ CACHED_INDUCTION_VARIABLE_PHI_LIST(CACHED_INDUCTION_VARIABLE_PHI)
+#undef CACHED_INDUCTION_VARIABLE_PHI
+
template <int kIndex>
struct ParameterOperator final : public Operator1<ParameterInfo> {
ParameterOperator()
@@ -396,7 +548,7 @@ struct CommonOperatorGlobalCache final {
IrOpcode::kProjection, // opcode
Operator::kPure, // flags
"Projection", // name
- 1, 0, 0, 1, 0, 0, // counts,
+ 1, 0, 1, 1, 0, 0, // counts,
kIndex) {} // parameter
};
#define CACHED_PROJECTION(index) \
@@ -487,28 +639,62 @@ const Operator* CommonOperatorBuilder::Branch(BranchHint hint) {
return nullptr;
}
-
-const Operator* CommonOperatorBuilder::Deoptimize(DeoptimizeKind kind) {
- switch (kind) {
- case DeoptimizeKind::kEager:
- return &cache_.kDeoptimizeEagerOperator;
- case DeoptimizeKind::kSoft:
- return &cache_.kDeoptimizeSoftOperator;
+const Operator* CommonOperatorBuilder::Deoptimize(DeoptimizeKind kind,
+ DeoptimizeReason reason) {
+#define CACHED_DEOPTIMIZE(Kind, Reason) \
+ if (kind == DeoptimizeKind::k##Kind && \
+ reason == DeoptimizeReason::k##Reason) { \
+ return &cache_.kDeoptimize##Kind##Reason##Operator; \
}
- UNREACHABLE();
- return nullptr;
+ CACHED_DEOPTIMIZE_LIST(CACHED_DEOPTIMIZE)
+#undef CACHED_DEOPTIMIZE
+ // Uncached
+ DeoptimizeParameters parameter(kind, reason);
+ return new (zone()) Operator1<DeoptimizeParameters>( // --
+ IrOpcode::kDeoptimize, // opcodes
+ Operator::kFoldable | Operator::kNoThrow, // properties
+ "Deoptimize", // name
+ 1, 1, 1, 0, 0, 1, // counts
+ parameter); // parameter
}
+const Operator* CommonOperatorBuilder::DeoptimizeIf(DeoptimizeReason reason) {
+ switch (reason) {
+#define CACHED_DEOPTIMIZE_IF(Reason) \
+ case DeoptimizeReason::k##Reason: \
+ return &cache_.kDeoptimizeIf##Reason##Operator;
+ CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
+#undef CACHED_DEOPTIMIZE_IF
+ default:
+ break;
+ }
+ // Uncached
+ return new (zone()) Operator1<DeoptimizeReason>( // --
+ IrOpcode::kDeoptimizeIf, // opcode
+ Operator::kFoldable | Operator::kNoThrow, // properties
+ "DeoptimizeIf", // name
+ 2, 1, 1, 0, 1, 1, // counts
+ reason); // parameter
+}
-const Operator* CommonOperatorBuilder::IfException(IfExceptionHint hint) {
- switch (hint) {
- case IfExceptionHint::kLocallyCaught:
- return &cache_.kIfExceptionCOperator;
- case IfExceptionHint::kLocallyUncaught:
- return &cache_.kIfExceptionUOperator;
+const Operator* CommonOperatorBuilder::DeoptimizeUnless(
+ DeoptimizeReason reason) {
+ switch (reason) {
+#define CACHED_DEOPTIMIZE_UNLESS(Reason) \
+ case DeoptimizeReason::k##Reason: \
+ return &cache_.kDeoptimizeUnless##Reason##Operator;
+ CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
+#undef CACHED_DEOPTIMIZE_UNLESS
+ default:
+ break;
}
- UNREACHABLE();
- return nullptr;
+ // Uncached
+ return new (zone()) Operator1<DeoptimizeReason>( // --
+ IrOpcode::kDeoptimizeUnless, // opcode
+ Operator::kFoldable | Operator::kNoThrow, // properties
+ "DeoptimizeUnless", // name
+ 2, 1, 1, 0, 1, 1, // counts
+ reason); // parameter
}
@@ -530,10 +716,10 @@ const Operator* CommonOperatorBuilder::IfValue(int32_t index) {
const Operator* CommonOperatorBuilder::Start(int value_output_count) {
- return new (zone()) Operator( // --
- IrOpcode::kStart, Operator::kFoldable, // opcode
- "Start", // name
- 0, 0, 0, value_output_count, 1, 1); // counts
+ return new (zone()) Operator( // --
+ IrOpcode::kStart, Operator::kFoldable | Operator::kNoThrow, // opcode
+ "Start", // name
+ 0, 0, 0, value_output_count, 1, 1); // counts
}
@@ -668,6 +854,23 @@ const Operator* CommonOperatorBuilder::HeapConstant(
value); // parameter
}
+const Operator* CommonOperatorBuilder::RelocatableInt32Constant(
+ int32_t value, RelocInfo::Mode rmode) {
+ return new (zone()) Operator1<RelocatablePtrConstantInfo>( // --
+ IrOpcode::kRelocatableInt32Constant, Operator::kPure, // opcode
+ "RelocatableInt32Constant", // name
+ 0, 0, 0, 1, 0, 0, // counts
+ RelocatablePtrConstantInfo(value, rmode)); // parameter
+}
+
+const Operator* CommonOperatorBuilder::RelocatableInt64Constant(
+ int64_t value, RelocInfo::Mode rmode) {
+ return new (zone()) Operator1<RelocatablePtrConstantInfo>( // --
+ IrOpcode::kRelocatableInt64Constant, Operator::kPure, // opcode
+ "RelocatableInt64Constant", // name
+ 0, 0, 0, 1, 0, 0, // counts
+ RelocatablePtrConstantInfo(value, rmode)); // parameter
+}
const Operator* CommonOperatorBuilder::Select(MachineRepresentation rep,
BranchHint hint) {
@@ -697,6 +900,13 @@ const Operator* CommonOperatorBuilder::Phi(MachineRepresentation rep,
rep); // parameter
}
+const Operator* CommonOperatorBuilder::TypeGuard(Type* type) {
+ return new (zone()) Operator1<Type*>( // --
+ IrOpcode::kTypeGuard, Operator::kPure, // opcode
+ "TypeGuard", // name
+ 1, 0, 1, 1, 0, 0, // counts
+ type); // parameter
+}
const Operator* CommonOperatorBuilder::EffectPhi(int effect_input_count) {
DCHECK(effect_input_count > 0); // Disallow empty effect phis.
@@ -710,31 +920,43 @@ const Operator* CommonOperatorBuilder::EffectPhi(int effect_input_count) {
break;
}
// Uncached.
- return new (zone()) Operator( // --
- IrOpcode::kEffectPhi, Operator::kPure, // opcode
- "EffectPhi", // name
- 0, effect_input_count, 1, 0, 1, 0); // counts
+ return new (zone()) Operator( // --
+ IrOpcode::kEffectPhi, Operator::kKontrol, // opcode
+ "EffectPhi", // name
+ 0, effect_input_count, 1, 0, 1, 0); // counts
}
-
-const Operator* CommonOperatorBuilder::Guard(Type* type) {
- return new (zone()) Operator1<Type*>( // --
- IrOpcode::kGuard, Operator::kKontrol, // opcode
- "Guard", // name
- 1, 0, 1, 1, 0, 0, // counts
- type); // parameter
+const Operator* CommonOperatorBuilder::InductionVariablePhi(int input_count) {
+ DCHECK(input_count >= 4); // There must be always the entry, backedge,
+ // increment and at least one bound.
+ switch (input_count) {
+#define CACHED_INDUCTION_VARIABLE_PHI(input_count) \
+ case input_count: \
+ return &cache_.kInductionVariablePhi##input_count##Operator;
+ CACHED_INDUCTION_VARIABLE_PHI_LIST(CACHED_INDUCTION_VARIABLE_PHI)
+#undef CACHED_INDUCTION_VARIABLE_PHI
+ default:
+ break;
+ }
+ // Uncached.
+ return new (zone()) Operator( // --
+ IrOpcode::kInductionVariablePhi, Operator::kPure, // opcode
+ "InductionVariablePhi", // name
+ input_count, 0, 1, 1, 0, 0); // counts
}
-
-const Operator* CommonOperatorBuilder::EffectSet(int arguments) {
- DCHECK(arguments > 1); // Disallow empty/singleton sets.
- return new (zone()) Operator( // --
- IrOpcode::kEffectSet, Operator::kPure, // opcode
- "EffectSet", // name
- 0, arguments, 0, 0, 1, 0); // counts
+const Operator* CommonOperatorBuilder::BeginRegion(
+ RegionObservability region_observability) {
+ switch (region_observability) {
+ case RegionObservability::kObservable:
+ return &cache_.kBeginRegionObservableOperator;
+ case RegionObservability::kNotObservable:
+ return &cache_.kBeginRegionNotObservableOperator;
+ }
+ UNREACHABLE();
+ return nullptr;
}
-
const Operator* CommonOperatorBuilder::StateValues(int arguments) {
switch (arguments) {
#define CACHED_STATE_VALUES(arguments) \
@@ -795,7 +1017,7 @@ const Operator* CommonOperatorBuilder::Call(const CallDescriptor* descriptor) {
Operator::ZeroIfPure(descriptor->properties()),
Operator::ZeroIfNoThrow(descriptor->properties()), descriptor) {}
- void PrintParameter(std::ostream& os) const override {
+ void PrintParameter(std::ostream& os, PrintVerbosity verbose) const {
os << "[" << *parameter() << "]";
}
};
@@ -809,11 +1031,12 @@ const Operator* CommonOperatorBuilder::TailCall(
public:
explicit TailCallOperator(const CallDescriptor* descriptor)
: Operator1<const CallDescriptor*>(
- IrOpcode::kTailCall, descriptor->properties(), "TailCall",
+ IrOpcode::kTailCall,
+ descriptor->properties() | Operator::kNoThrow, "TailCall",
descriptor->InputCount() + descriptor->FrameStateCount(), 1, 1, 0,
0, 1, descriptor) {}
- void PrintParameter(std::ostream& os) const override {
+ void PrintParameter(std::ostream& os, PrintVerbosity verbose) const {
os << "[" << *parameter() << "]";
}
};
@@ -832,12 +1055,12 @@ const Operator* CommonOperatorBuilder::Projection(size_t index) {
break;
}
// Uncached.
- return new (zone()) Operator1<size_t>( // --
- IrOpcode::kProjection, // opcode
- Operator::kFoldable | Operator::kNoThrow, // flags
- "Projection", // name
- 1, 0, 0, 1, 0, 0, // counts
- index); // parameter
+ return new (zone()) Operator1<size_t>( // --
+ IrOpcode::kProjection, // opcode
+ Operator::kPure, // flags
+ "Projection", // name
+ 1, 0, 1, 1, 0, 0, // counts
+ index); // parameter
}
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 7c59f47c34..9e4d259bc9 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -5,17 +5,14 @@
#ifndef V8_COMPILER_COMMON_OPERATOR_H_
#define V8_COMPILER_COMMON_OPERATOR_H_
+#include "src/assembler.h"
#include "src/compiler/frame-states.h"
+#include "src/deoptimize-reason.h"
#include "src/machine-type.h"
#include "src/zone-containers.h"
namespace v8 {
namespace internal {
-
-// Forward declarations.
-class ExternalReference;
-class Type;
-
namespace compiler {
// Forward declarations.
@@ -46,6 +43,8 @@ std::ostream& operator<<(std::ostream&, BranchHint);
BranchHint BranchHintOf(const Operator* const);
+// Deoptimize reason for Deoptimize, DeoptimizeIf and DeoptimizeUnless.
+DeoptimizeReason DeoptimizeReasonOf(Operator const* const);
// Deoptimize bailout kind.
enum class DeoptimizeKind : uint8_t { kEager, kSoft };
@@ -54,15 +53,28 @@ size_t hash_value(DeoptimizeKind kind);
std::ostream& operator<<(std::ostream&, DeoptimizeKind);
-DeoptimizeKind DeoptimizeKindOf(const Operator* const);
+// Parameters for the {Deoptimize} operator.
+class DeoptimizeParameters final {
+ public:
+ DeoptimizeParameters(DeoptimizeKind kind, DeoptimizeReason reason)
+ : kind_(kind), reason_(reason) {}
+
+ DeoptimizeKind kind() const { return kind_; }
+ DeoptimizeReason reason() const { return reason_; }
+
+ private:
+ DeoptimizeKind const kind_;
+ DeoptimizeReason const reason_;
+};
+bool operator==(DeoptimizeParameters, DeoptimizeParameters);
+bool operator!=(DeoptimizeParameters, DeoptimizeParameters);
-// Prediction whether throw-site is surrounded by any local catch-scope.
-enum class IfExceptionHint { kLocallyUncaught, kLocallyCaught };
+size_t hast_value(DeoptimizeParameters p);
-size_t hash_value(IfExceptionHint hint);
+std::ostream& operator<<(std::ostream&, DeoptimizeParameters p);
-std::ostream& operator<<(std::ostream&, IfExceptionHint);
+DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const);
class SelectParameters final {
@@ -88,6 +100,7 @@ std::ostream& operator<<(std::ostream&, SelectParameters const& p);
SelectParameters const& SelectParametersOf(const Operator* const);
+CallDescriptor const* CallDescriptorOf(const Operator* const);
size_t ProjectionIndexOf(const Operator* const);
@@ -114,6 +127,49 @@ std::ostream& operator<<(std::ostream&, ParameterInfo const&);
int ParameterIndexOf(const Operator* const);
const ParameterInfo& ParameterInfoOf(const Operator* const);
+class RelocatablePtrConstantInfo final {
+ public:
+ enum Type { kInt32, kInt64 };
+
+ RelocatablePtrConstantInfo(int32_t value, RelocInfo::Mode rmode)
+ : value_(value), rmode_(rmode), type_(kInt32) {}
+ RelocatablePtrConstantInfo(int64_t value, RelocInfo::Mode rmode)
+ : value_(value), rmode_(rmode), type_(kInt64) {}
+
+ intptr_t value() const { return value_; }
+ RelocInfo::Mode rmode() const { return rmode_; }
+ Type type() const { return type_; }
+
+ private:
+ intptr_t value_;
+ RelocInfo::Mode rmode_;
+ Type type_;
+};
+
+bool operator==(RelocatablePtrConstantInfo const& lhs,
+ RelocatablePtrConstantInfo const& rhs);
+bool operator!=(RelocatablePtrConstantInfo const& lhs,
+ RelocatablePtrConstantInfo const& rhs);
+
+std::ostream& operator<<(std::ostream&, RelocatablePtrConstantInfo const&);
+
+size_t hash_value(RelocatablePtrConstantInfo const& p);
+
+// Used to mark a region (as identified by BeginRegion/FinishRegion) as either
+// JavaScript-observable or not (i.e. allocations are not JavaScript observable
+// themselves, but transitioning stores are).
+enum class RegionObservability : uint8_t { kObservable, kNotObservable };
+
+size_t hash_value(RegionObservability);
+
+std::ostream& operator<<(std::ostream&, RegionObservability);
+
+RegionObservability RegionObservabilityOf(Operator const*) WARN_UNUSED_RESULT;
+
+std::ostream& operator<<(std::ostream& os,
+ const ZoneVector<MachineType>* types);
+
+Type* TypeGuardTypeOf(Operator const*) WARN_UNUSED_RESULT;
// Interface for building common operators that can be used at any level of IR,
// including JavaScript, mid-level, and low-level.
@@ -127,14 +183,14 @@ class CommonOperatorBuilder final : public ZoneObject {
const Operator* IfTrue();
const Operator* IfFalse();
const Operator* IfSuccess();
- const Operator* IfException(IfExceptionHint hint);
+ const Operator* IfException();
const Operator* Switch(size_t control_output_count);
const Operator* IfValue(int32_t value);
const Operator* IfDefault();
const Operator* Throw();
- const Operator* Deoptimize(DeoptimizeKind kind);
- const Operator* DeoptimizeIf();
- const Operator* DeoptimizeUnless();
+ const Operator* Deoptimize(DeoptimizeKind kind, DeoptimizeReason reason);
+ const Operator* DeoptimizeIf(DeoptimizeReason reason);
+ const Operator* DeoptimizeUnless(DeoptimizeReason reason);
const Operator* Return(int value_input_count = 1);
const Operator* Terminate();
@@ -155,13 +211,21 @@ class CommonOperatorBuilder final : public ZoneObject {
const Operator* NumberConstant(volatile double);
const Operator* HeapConstant(const Handle<HeapObject>&);
+ const Operator* RelocatableInt32Constant(int32_t value,
+ RelocInfo::Mode rmode);
+ const Operator* RelocatableInt64Constant(int64_t value,
+ RelocInfo::Mode rmode);
+
const Operator* Select(MachineRepresentation, BranchHint = BranchHint::kNone);
const Operator* Phi(MachineRepresentation representation,
int value_input_count);
const Operator* EffectPhi(int effect_input_count);
- const Operator* EffectSet(int arguments);
- const Operator* Guard(Type* type);
- const Operator* BeginRegion();
+ const Operator* InductionVariablePhi(int value_input_count);
+ const Operator* LoopExit();
+ const Operator* LoopExitValue();
+ const Operator* LoopExitEffect();
+ const Operator* Checkpoint();
+ const Operator* BeginRegion(RegionObservability);
const Operator* FinishRegion();
const Operator* StateValues(int arguments);
const Operator* ObjectState(int pointer_slots, int id);
@@ -172,6 +236,8 @@ class CommonOperatorBuilder final : public ZoneObject {
const Operator* Call(const CallDescriptor* descriptor);
const Operator* TailCall(const CallDescriptor* descriptor);
const Operator* Projection(size_t index);
+ const Operator* Retain();
+ const Operator* TypeGuard(Type* type);
// Constructs a new merge or phi operator with the same opcode as {op}, but
// with {size} inputs.
diff --git a/deps/v8/src/compiler/control-builders.cc b/deps/v8/src/compiler/control-builders.cc
index 6905ef589f..b159bb2da7 100644
--- a/deps/v8/src/compiler/control-builders.cc
+++ b/deps/v8/src/compiler/control-builders.cc
@@ -36,6 +36,7 @@ void LoopBuilder::BeginLoop(BitVector* assigned, bool is_osr) {
loop_environment_ = environment()->CopyForLoop(assigned, is_osr);
continue_environment_ = environment()->CopyAsUnreachable();
break_environment_ = environment()->CopyAsUnreachable();
+ assigned_ = assigned;
}
@@ -60,6 +61,7 @@ void LoopBuilder::EndBody() {
void LoopBuilder::EndLoop() {
loop_environment_->Merge(environment());
set_environment(break_environment_);
+ ExitLoop();
}
@@ -82,6 +84,16 @@ void LoopBuilder::BreakWhen(Node* condition) {
control_if.End();
}
+void LoopBuilder::ExitLoop(Node** extra_value_to_rename) {
+ if (extra_value_to_rename) {
+ environment()->Push(*extra_value_to_rename);
+ }
+ environment()->PrepareForLoopExit(loop_environment_->GetControlDependency(),
+ assigned_);
+ if (extra_value_to_rename) {
+ *extra_value_to_rename = environment()->Pop();
+ }
+}
void SwitchBuilder::BeginSwitch() {
body_environment_ = environment()->CopyAsUnreachable();
diff --git a/deps/v8/src/compiler/control-builders.h b/deps/v8/src/compiler/control-builders.h
index 6ff00be596..a59dcb699a 100644
--- a/deps/v8/src/compiler/control-builders.h
+++ b/deps/v8/src/compiler/control-builders.h
@@ -63,7 +63,8 @@ class LoopBuilder final : public ControlBuilder {
: ControlBuilder(builder),
loop_environment_(nullptr),
continue_environment_(nullptr),
- break_environment_(nullptr) {}
+ break_environment_(nullptr),
+ assigned_(nullptr) {}
// Primitive control commands.
void BeginLoop(BitVector* assigned, bool is_osr = false);
@@ -74,6 +75,10 @@ class LoopBuilder final : public ControlBuilder {
// Primitive support for break.
void Break() final;
+ // Loop exit support. Used to introduce explicit loop exit control
+ // node and variable markers.
+ void ExitLoop(Node** extra_value_to_rename = nullptr);
+
// Compound control commands for conditional break.
void BreakUnless(Node* condition);
void BreakWhen(Node* condition);
@@ -82,6 +87,7 @@ class LoopBuilder final : public ControlBuilder {
Environment* loop_environment_; // Environment of the loop header.
Environment* continue_environment_; // Environment after the loop body.
Environment* break_environment_; // Environment after the loop exits.
+ BitVector* assigned_; // Assigned values in the environment.
};
diff --git a/deps/v8/src/compiler/control-flow-optimizer.cc b/deps/v8/src/compiler/control-flow-optimizer.cc
index 3fc3bcefac..6027c8201c 100644
--- a/deps/v8/src/compiler/control-flow-optimizer.cc
+++ b/deps/v8/src/compiler/control-flow-optimizer.cc
@@ -63,146 +63,10 @@ void ControlFlowOptimizer::VisitNode(Node* node) {
void ControlFlowOptimizer::VisitBranch(Node* node) {
DCHECK_EQ(IrOpcode::kBranch, node->opcode());
if (TryBuildSwitch(node)) return;
- if (TryCloneBranch(node)) return;
VisitNode(node);
}
-bool ControlFlowOptimizer::TryCloneBranch(Node* node) {
- DCHECK_EQ(IrOpcode::kBranch, node->opcode());
-
- // This optimization is a special case of (super)block cloning. It takes an
- // input graph as shown below and clones the Branch node for every predecessor
- // to the Merge, essentially removing the Merge completely. This avoids
- // materializing the bit for the Phi and may offer potential for further
- // branch folding optimizations (i.e. because one or more inputs to the Phi is
- // a constant). Note that there may be more Phi nodes hanging off the Merge,
- // but we can only a certain subset of them currently (actually only Phi and
- // EffectPhi nodes whose uses have either the IfTrue or IfFalse as control
- // input).
-
- // Control1 ... ControlN
- // ^ ^
- // | | Cond1 ... CondN
- // +----+ +----+ ^ ^
- // | | | |
- // | | +----+ |
- // Merge<--+ | +------------+
- // ^ \|/
- // | Phi
- // | |
- // Branch----+
- // ^
- // |
- // +-----+-----+
- // | |
- // IfTrue IfFalse
- // ^ ^
- // | |
-
- // The resulting graph (modulo the Phi and EffectPhi nodes) looks like this:
-
- // Control1 Cond1 ... ControlN CondN
- // ^ ^ ^ ^
- // \ / \ /
- // Branch ... Branch
- // ^ ^
- // | |
- // +---+---+ +---+----+
- // | | | |
- // IfTrue IfFalse ... IfTrue IfFalse
- // ^ ^ ^ ^
- // | | | |
- // +--+ +-------------+ |
- // | | +--------------+ +--+
- // | | | |
- // Merge Merge
- // ^ ^
- // | |
-
- Node* branch = node;
- Node* cond = NodeProperties::GetValueInput(branch, 0);
- if (!cond->OwnedBy(branch) || cond->opcode() != IrOpcode::kPhi) return false;
- Node* merge = NodeProperties::GetControlInput(branch);
- if (merge->opcode() != IrOpcode::kMerge ||
- NodeProperties::GetControlInput(cond) != merge) {
- return false;
- }
- // Grab the IfTrue/IfFalse projections of the Branch.
- BranchMatcher matcher(branch);
- // Check/collect other Phi/EffectPhi nodes hanging off the Merge.
- NodeVector phis(zone());
- for (Node* const use : merge->uses()) {
- if (use == branch || use == cond) continue;
- // We cannot currently deal with non-Phi/EffectPhi nodes hanging off the
- // Merge. Ideally, we would just clone the nodes (and everything that
- // depends on it to some distant join point), but that requires knowledge
- // about dominance/post-dominance.
- if (!NodeProperties::IsPhi(use)) return false;
- for (Edge edge : use->use_edges()) {
- // Right now we can only handle Phi/EffectPhi nodes whose uses are
- // directly control-dependend on either the IfTrue or the IfFalse
- // successor, because we know exactly how to update those uses.
- // TODO(turbofan): Generalize this to all Phi/EffectPhi nodes using
- // dominance/post-dominance on the sea of nodes.
- if (edge.from()->op()->ControlInputCount() != 1) return false;
- Node* control = NodeProperties::GetControlInput(edge.from());
- if (NodeProperties::IsPhi(edge.from())) {
- control = NodeProperties::GetControlInput(control, edge.index());
- }
- if (control != matcher.IfTrue() && control != matcher.IfFalse())
- return false;
- }
- phis.push_back(use);
- }
- BranchHint const hint = BranchHintOf(branch->op());
- int const input_count = merge->op()->ControlInputCount();
- DCHECK_LE(1, input_count);
- Node** const inputs = zone()->NewArray<Node*>(2 * input_count);
- Node** const merge_true_inputs = &inputs[0];
- Node** const merge_false_inputs = &inputs[input_count];
- for (int index = 0; index < input_count; ++index) {
- Node* cond1 = NodeProperties::GetValueInput(cond, index);
- Node* control1 = NodeProperties::GetControlInput(merge, index);
- Node* branch1 = graph()->NewNode(common()->Branch(hint), cond1, control1);
- merge_true_inputs[index] = graph()->NewNode(common()->IfTrue(), branch1);
- merge_false_inputs[index] = graph()->NewNode(common()->IfFalse(), branch1);
- Enqueue(branch1);
- }
- Node* const merge_true = graph()->NewNode(common()->Merge(input_count),
- input_count, merge_true_inputs);
- Node* const merge_false = graph()->NewNode(common()->Merge(input_count),
- input_count, merge_false_inputs);
- for (Node* const phi : phis) {
- for (int index = 0; index < input_count; ++index) {
- inputs[index] = phi->InputAt(index);
- }
- inputs[input_count] = merge_true;
- Node* phi_true = graph()->NewNode(phi->op(), input_count + 1, inputs);
- inputs[input_count] = merge_false;
- Node* phi_false = graph()->NewNode(phi->op(), input_count + 1, inputs);
- for (Edge edge : phi->use_edges()) {
- Node* control = NodeProperties::GetControlInput(edge.from());
- if (NodeProperties::IsPhi(edge.from())) {
- control = NodeProperties::GetControlInput(control, edge.index());
- }
- DCHECK(control == matcher.IfTrue() || control == matcher.IfFalse());
- edge.UpdateTo((control == matcher.IfTrue()) ? phi_true : phi_false);
- }
- phi->Kill();
- }
- // Fix up IfTrue and IfFalse and kill all dead nodes.
- matcher.IfFalse()->ReplaceUses(merge_false);
- matcher.IfTrue()->ReplaceUses(merge_true);
- matcher.IfFalse()->Kill();
- matcher.IfTrue()->Kill();
- branch->Kill();
- cond->Kill();
- merge->Kill();
- return true;
-}
-
-
bool ControlFlowOptimizer::TryBuildSwitch(Node* node) {
DCHECK_EQ(IrOpcode::kBranch, node->opcode());
diff --git a/deps/v8/src/compiler/dead-code-elimination.cc b/deps/v8/src/compiler/dead-code-elimination.cc
index 697d7f870e..81bf2997e6 100644
--- a/deps/v8/src/compiler/dead-code-elimination.cc
+++ b/deps/v8/src/compiler/dead-code-elimination.cc
@@ -28,6 +28,8 @@ Reduction DeadCodeElimination::Reduce(Node* node) {
case IrOpcode::kLoop:
case IrOpcode::kMerge:
return ReduceLoopOrMerge(node);
+ case IrOpcode::kLoopExit:
+ return ReduceLoopExit(node);
default:
return ReduceNode(node);
}
@@ -96,6 +98,9 @@ Reduction DeadCodeElimination::ReduceLoopOrMerge(Node* node) {
for (Node* const use : node->uses()) {
if (NodeProperties::IsPhi(use)) {
Replace(use, use->InputAt(0));
+ } else if (use->opcode() == IrOpcode::kLoopExit &&
+ use->InputAt(1) == node) {
+ RemoveLoopExit(use);
} else if (use->opcode() == IrOpcode::kTerminate) {
DCHECK_EQ(IrOpcode::kLoop, node->opcode());
Replace(use, dead());
@@ -121,6 +126,18 @@ Reduction DeadCodeElimination::ReduceLoopOrMerge(Node* node) {
return NoChange();
}
+Reduction DeadCodeElimination::RemoveLoopExit(Node* node) {
+ DCHECK_EQ(IrOpcode::kLoopExit, node->opcode());
+ for (Node* const use : node->uses()) {
+ if (use->opcode() == IrOpcode::kLoopExitValue ||
+ use->opcode() == IrOpcode::kLoopExitEffect) {
+ Replace(use, use->InputAt(0));
+ }
+ }
+ Node* control = NodeProperties::GetControlInput(node, 0);
+ Replace(node, control);
+ return Replace(control);
+}
Reduction DeadCodeElimination::ReduceNode(Node* node) {
// If {node} has exactly one control input and this is {Dead},
@@ -133,6 +150,15 @@ Reduction DeadCodeElimination::ReduceNode(Node* node) {
return NoChange();
}
+Reduction DeadCodeElimination::ReduceLoopExit(Node* node) {
+ Node* control = NodeProperties::GetControlInput(node, 0);
+ Node* loop = NodeProperties::GetControlInput(node, 1);
+ if (control->opcode() == IrOpcode::kDead ||
+ loop->opcode() == IrOpcode::kDead) {
+ return RemoveLoopExit(node);
+ }
+ return NoChange();
+}
void DeadCodeElimination::TrimMergeOrPhi(Node* node, int size) {
const Operator* const op = common()->ResizeMergeOrPhi(node->op(), size);
diff --git a/deps/v8/src/compiler/dead-code-elimination.h b/deps/v8/src/compiler/dead-code-elimination.h
index e5996c88ff..8e18561b4b 100644
--- a/deps/v8/src/compiler/dead-code-elimination.h
+++ b/deps/v8/src/compiler/dead-code-elimination.h
@@ -30,8 +30,11 @@ class DeadCodeElimination final : public AdvancedReducer {
private:
Reduction ReduceEnd(Node* node);
Reduction ReduceLoopOrMerge(Node* node);
+ Reduction ReduceLoopExit(Node* node);
Reduction ReduceNode(Node* node);
+ Reduction RemoveLoopExit(Node* node);
+
void TrimMergeOrPhi(Node* node, int size);
Graph* graph() const { return graph_; }
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
new file mode 100644
index 0000000000..9cc6ddc4f9
--- /dev/null
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -0,0 +1,3295 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/effect-control-linearizer.h"
+
+#include "src/code-factory.h"
+#include "src/compiler/access-builder.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
+#include "src/compiler/schedule.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+EffectControlLinearizer::EffectControlLinearizer(JSGraph* js_graph,
+ Schedule* schedule,
+ Zone* temp_zone)
+ : js_graph_(js_graph), schedule_(schedule), temp_zone_(temp_zone) {}
+
+Graph* EffectControlLinearizer::graph() const { return js_graph_->graph(); }
+CommonOperatorBuilder* EffectControlLinearizer::common() const {
+ return js_graph_->common();
+}
+SimplifiedOperatorBuilder* EffectControlLinearizer::simplified() const {
+ return js_graph_->simplified();
+}
+MachineOperatorBuilder* EffectControlLinearizer::machine() const {
+ return js_graph_->machine();
+}
+
+namespace {
+
+struct BlockEffectControlData {
+ Node* current_effect = nullptr; // New effect.
+ Node* current_control = nullptr; // New control.
+ Node* current_frame_state = nullptr; // New frame state.
+};
+
+class BlockEffectControlMap {
+ public:
+ explicit BlockEffectControlMap(Zone* temp_zone) : map_(temp_zone) {}
+
+ BlockEffectControlData& For(BasicBlock* from, BasicBlock* to) {
+ return map_[std::make_pair(from->rpo_number(), to->rpo_number())];
+ }
+
+ const BlockEffectControlData& For(BasicBlock* from, BasicBlock* to) const {
+ return map_.at(std::make_pair(from->rpo_number(), to->rpo_number()));
+ }
+
+ private:
+ typedef std::pair<int32_t, int32_t> Key;
+ typedef ZoneMap<Key, BlockEffectControlData> Map;
+
+ Map map_;
+};
+
+// Effect phis that need to be updated after the first pass.
+struct PendingEffectPhi {
+ Node* effect_phi;
+ BasicBlock* block;
+
+ PendingEffectPhi(Node* effect_phi, BasicBlock* block)
+ : effect_phi(effect_phi), block(block) {}
+};
+
+void UpdateEffectPhi(Node* node, BasicBlock* block,
+ BlockEffectControlMap* block_effects) {
+ // Update all inputs to an effect phi with the effects from the given
+ // block->effect map.
+ DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
+ DCHECK_EQ(node->op()->EffectInputCount(), block->PredecessorCount());
+ for (int i = 0; i < node->op()->EffectInputCount(); i++) {
+ Node* input = node->InputAt(i);
+ BasicBlock* predecessor = block->PredecessorAt(static_cast<size_t>(i));
+ const BlockEffectControlData& block_effect =
+ block_effects->For(predecessor, block);
+ if (input != block_effect.current_effect) {
+ node->ReplaceInput(i, block_effect.current_effect);
+ }
+ }
+}
+
+void UpdateBlockControl(BasicBlock* block,
+ BlockEffectControlMap* block_effects) {
+ Node* control = block->NodeAt(0);
+ DCHECK(NodeProperties::IsControl(control));
+
+ // Do not rewire the end node.
+ if (control->opcode() == IrOpcode::kEnd) return;
+
+ // Update all inputs to the given control node with the correct control.
+ DCHECK(control->opcode() == IrOpcode::kMerge ||
+ control->op()->ControlInputCount() == block->PredecessorCount());
+ if (control->op()->ControlInputCount() != block->PredecessorCount()) {
+ return; // We already re-wired the control inputs of this node.
+ }
+ for (int i = 0; i < control->op()->ControlInputCount(); i++) {
+ Node* input = NodeProperties::GetControlInput(control, i);
+ BasicBlock* predecessor = block->PredecessorAt(static_cast<size_t>(i));
+ const BlockEffectControlData& block_effect =
+ block_effects->For(predecessor, block);
+ if (input != block_effect.current_control) {
+ NodeProperties::ReplaceControlInput(control, block_effect.current_control,
+ i);
+ }
+ }
+}
+
+bool HasIncomingBackEdges(BasicBlock* block) {
+ for (BasicBlock* pred : block->predecessors()) {
+ if (pred->rpo_number() >= block->rpo_number()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void RemoveRegionNode(Node* node) {
+ DCHECK(IrOpcode::kFinishRegion == node->opcode() ||
+ IrOpcode::kBeginRegion == node->opcode());
+ // Update the value/context uses to the value input of the finish node and
+ // the effect uses to the effect input.
+ for (Edge edge : node->use_edges()) {
+ DCHECK(!edge.from()->IsDead());
+ if (NodeProperties::IsEffectEdge(edge)) {
+ edge.UpdateTo(NodeProperties::GetEffectInput(node));
+ } else {
+ DCHECK(!NodeProperties::IsControlEdge(edge));
+ DCHECK(!NodeProperties::IsFrameStateEdge(edge));
+ edge.UpdateTo(node->InputAt(0));
+ }
+ }
+ node->Kill();
+}
+
+void TryCloneBranch(Node* node, BasicBlock* block, Graph* graph,
+ CommonOperatorBuilder* common,
+ BlockEffectControlMap* block_effects) {
+ DCHECK_EQ(IrOpcode::kBranch, node->opcode());
+
+ // This optimization is a special case of (super)block cloning. It takes an
+ // input graph as shown below and clones the Branch node for every predecessor
+ // to the Merge, essentially removing the Merge completely. This avoids
+ // materializing the bit for the Phi and may offer potential for further
+ // branch folding optimizations (i.e. because one or more inputs to the Phi is
+ // a constant). Note that there may be more Phi nodes hanging off the Merge,
+ // but we can only a certain subset of them currently (actually only Phi and
+ // EffectPhi nodes whose uses have either the IfTrue or IfFalse as control
+ // input).
+
+ // Control1 ... ControlN
+ // ^ ^
+ // | | Cond1 ... CondN
+ // +----+ +----+ ^ ^
+ // | | | |
+ // | | +----+ |
+ // Merge<--+ | +------------+
+ // ^ \|/
+ // | Phi
+ // | |
+ // Branch----+
+ // ^
+ // |
+ // +-----+-----+
+ // | |
+ // IfTrue IfFalse
+ // ^ ^
+ // | |
+
+ // The resulting graph (modulo the Phi and EffectPhi nodes) looks like this:
+
+ // Control1 Cond1 ... ControlN CondN
+ // ^ ^ ^ ^
+ // \ / \ /
+ // Branch ... Branch
+ // ^ ^
+ // | |
+ // +---+---+ +---+----+
+ // | | | |
+ // IfTrue IfFalse ... IfTrue IfFalse
+ // ^ ^ ^ ^
+ // | | | |
+ // +--+ +-------------+ |
+ // | | +--------------+ +--+
+ // | | | |
+ // Merge Merge
+ // ^ ^
+ // | |
+
+ Node* branch = node;
+ Node* cond = NodeProperties::GetValueInput(branch, 0);
+ if (!cond->OwnedBy(branch) || cond->opcode() != IrOpcode::kPhi) return;
+ Node* merge = NodeProperties::GetControlInput(branch);
+ if (merge->opcode() != IrOpcode::kMerge ||
+ NodeProperties::GetControlInput(cond) != merge) {
+ return;
+ }
+ // Grab the IfTrue/IfFalse projections of the Branch.
+ BranchMatcher matcher(branch);
+ // Check/collect other Phi/EffectPhi nodes hanging off the Merge.
+ NodeVector phis(graph->zone());
+ for (Node* const use : merge->uses()) {
+ if (use == branch || use == cond) continue;
+ // We cannot currently deal with non-Phi/EffectPhi nodes hanging off the
+ // Merge. Ideally, we would just clone the nodes (and everything that
+ // depends on it to some distant join point), but that requires knowledge
+ // about dominance/post-dominance.
+ if (!NodeProperties::IsPhi(use)) return;
+ for (Edge edge : use->use_edges()) {
+ // Right now we can only handle Phi/EffectPhi nodes whose uses are
+ // directly control-dependend on either the IfTrue or the IfFalse
+ // successor, because we know exactly how to update those uses.
+ if (edge.from()->op()->ControlInputCount() != 1) return;
+ Node* control = NodeProperties::GetControlInput(edge.from());
+ if (NodeProperties::IsPhi(edge.from())) {
+ control = NodeProperties::GetControlInput(control, edge.index());
+ }
+ if (control != matcher.IfTrue() && control != matcher.IfFalse()) return;
+ }
+ phis.push_back(use);
+ }
+ BranchHint const hint = BranchHintOf(branch->op());
+ int const input_count = merge->op()->ControlInputCount();
+ DCHECK_LE(1, input_count);
+ Node** const inputs = graph->zone()->NewArray<Node*>(2 * input_count);
+ Node** const merge_true_inputs = &inputs[0];
+ Node** const merge_false_inputs = &inputs[input_count];
+ for (int index = 0; index < input_count; ++index) {
+ Node* cond1 = NodeProperties::GetValueInput(cond, index);
+ Node* control1 = NodeProperties::GetControlInput(merge, index);
+ Node* branch1 = graph->NewNode(common->Branch(hint), cond1, control1);
+ merge_true_inputs[index] = graph->NewNode(common->IfTrue(), branch1);
+ merge_false_inputs[index] = graph->NewNode(common->IfFalse(), branch1);
+ }
+ Node* const merge_true = matcher.IfTrue();
+ Node* const merge_false = matcher.IfFalse();
+ merge_true->TrimInputCount(0);
+ merge_false->TrimInputCount(0);
+ for (int i = 0; i < input_count; ++i) {
+ merge_true->AppendInput(graph->zone(), merge_true_inputs[i]);
+ merge_false->AppendInput(graph->zone(), merge_false_inputs[i]);
+ }
+ DCHECK_EQ(2, block->SuccessorCount());
+ NodeProperties::ChangeOp(matcher.IfTrue(), common->Merge(input_count));
+ NodeProperties::ChangeOp(matcher.IfFalse(), common->Merge(input_count));
+ int const true_index =
+ block->SuccessorAt(0)->NodeAt(0) == matcher.IfTrue() ? 0 : 1;
+ BlockEffectControlData* true_block_data =
+ &block_effects->For(block, block->SuccessorAt(true_index));
+ BlockEffectControlData* false_block_data =
+ &block_effects->For(block, block->SuccessorAt(true_index ^ 1));
+ for (Node* const phi : phis) {
+ for (int index = 0; index < input_count; ++index) {
+ inputs[index] = phi->InputAt(index);
+ }
+ inputs[input_count] = merge_true;
+ Node* phi_true = graph->NewNode(phi->op(), input_count + 1, inputs);
+ inputs[input_count] = merge_false;
+ Node* phi_false = graph->NewNode(phi->op(), input_count + 1, inputs);
+ if (phi->UseCount() == 0) {
+ DCHECK_EQ(phi->opcode(), IrOpcode::kEffectPhi);
+ DCHECK_EQ(input_count, block->SuccessorCount());
+ } else {
+ for (Edge edge : phi->use_edges()) {
+ Node* control = NodeProperties::GetControlInput(edge.from());
+ if (NodeProperties::IsPhi(edge.from())) {
+ control = NodeProperties::GetControlInput(control, edge.index());
+ }
+ DCHECK(control == matcher.IfTrue() || control == matcher.IfFalse());
+ edge.UpdateTo((control == matcher.IfTrue()) ? phi_true : phi_false);
+ }
+ }
+ if (phi->opcode() == IrOpcode::kEffectPhi) {
+ true_block_data->current_effect = phi_true;
+ false_block_data->current_effect = phi_false;
+ }
+ phi->Kill();
+ }
+ // Fix up IfTrue and IfFalse and kill all dead nodes.
+ if (branch == block->control_input()) {
+ true_block_data->current_control = merge_true;
+ false_block_data->current_control = merge_false;
+ }
+ branch->Kill();
+ cond->Kill();
+ merge->Kill();
+}
+} // namespace
+
+void EffectControlLinearizer::Run() {
+ BlockEffectControlMap block_effects(temp_zone());
+ ZoneVector<PendingEffectPhi> pending_effect_phis(temp_zone());
+ ZoneVector<BasicBlock*> pending_block_controls(temp_zone());
+ NodeVector inputs_buffer(temp_zone());
+
+ for (BasicBlock* block : *(schedule()->rpo_order())) {
+ size_t instr = 0;
+
+ // The control node should be the first.
+ Node* control = block->NodeAt(instr);
+ DCHECK(NodeProperties::IsControl(control));
+ // Update the control inputs.
+ if (HasIncomingBackEdges(block)) {
+ // If there are back edges, we need to update later because we have not
+ // computed the control yet. This should only happen for loops.
+ DCHECK_EQ(IrOpcode::kLoop, control->opcode());
+ pending_block_controls.push_back(block);
+ } else {
+ // If there are no back edges, we can update now.
+ UpdateBlockControl(block, &block_effects);
+ }
+ instr++;
+
+ // Iterate over the phis and update the effect phis.
+ Node* effect = nullptr;
+ Node* terminate = nullptr;
+ for (; instr < block->NodeCount(); instr++) {
+ Node* node = block->NodeAt(instr);
+ // Only go through the phis and effect phis.
+ if (node->opcode() == IrOpcode::kEffectPhi) {
+ // There should be at most one effect phi in a block.
+ DCHECK_NULL(effect);
+ // IfException blocks should not have effect phis.
+ DCHECK_NE(IrOpcode::kIfException, control->opcode());
+ effect = node;
+
+ // Make sure we update the inputs to the incoming blocks' effects.
+ if (HasIncomingBackEdges(block)) {
+ // In case of loops, we do not update the effect phi immediately
+ // because the back predecessor has not been handled yet. We just
+ // record the effect phi for later processing.
+ pending_effect_phis.push_back(PendingEffectPhi(node, block));
+ } else {
+ UpdateEffectPhi(node, block, &block_effects);
+ }
+ } else if (node->opcode() == IrOpcode::kPhi) {
+ // Just skip phis.
+ } else if (node->opcode() == IrOpcode::kTerminate) {
+ DCHECK(terminate == nullptr);
+ terminate = node;
+ } else {
+ break;
+ }
+ }
+
+ if (effect == nullptr) {
+ // There was no effect phi.
+ DCHECK(!HasIncomingBackEdges(block));
+ if (block == schedule()->start()) {
+ // Start block => effect is start.
+ DCHECK_EQ(graph()->start(), control);
+ effect = graph()->start();
+ } else if (control->opcode() == IrOpcode::kEnd) {
+ // End block is just a dummy, no effect needed.
+ DCHECK_EQ(BasicBlock::kNone, block->control());
+ DCHECK_EQ(1u, block->size());
+ effect = nullptr;
+ } else {
+ // If all the predecessors have the same effect, we can use it as our
+ // current effect.
+ effect =
+ block_effects.For(block->PredecessorAt(0), block).current_effect;
+ for (size_t i = 1; i < block->PredecessorCount(); ++i) {
+ if (block_effects.For(block->PredecessorAt(i), block)
+ .current_effect != effect) {
+ effect = nullptr;
+ break;
+ }
+ }
+ if (effect == nullptr) {
+ DCHECK_NE(IrOpcode::kIfException, control->opcode());
+ // The input blocks do not have the same effect. We have
+ // to create an effect phi node.
+ inputs_buffer.clear();
+ inputs_buffer.resize(block->PredecessorCount(), jsgraph()->Dead());
+ inputs_buffer.push_back(control);
+ effect = graph()->NewNode(
+ common()->EffectPhi(static_cast<int>(block->PredecessorCount())),
+ static_cast<int>(inputs_buffer.size()), &(inputs_buffer.front()));
+ // For loops, we update the effect phi node later to break cycles.
+ if (control->opcode() == IrOpcode::kLoop) {
+ pending_effect_phis.push_back(PendingEffectPhi(effect, block));
+ } else {
+ UpdateEffectPhi(effect, block, &block_effects);
+ }
+ } else if (control->opcode() == IrOpcode::kIfException) {
+ // The IfException is connected into the effect chain, so we need
+ // to update the effect here.
+ NodeProperties::ReplaceEffectInput(control, effect);
+ effect = control;
+ }
+ }
+ }
+
+ // Fixup the Terminate node.
+ if (terminate != nullptr) {
+ NodeProperties::ReplaceEffectInput(terminate, effect);
+ }
+
+ // The frame state at block entry is determined by the frame states leaving
+ // all predecessors. In case there is no frame state dominating this block,
+ // we can rely on a checkpoint being present before the next deoptimization.
+ // TODO(mstarzinger): Eventually we will need to go hunt for a frame state
+ // once deoptimizing nodes roam freely through the schedule.
+ Node* frame_state = nullptr;
+ if (block != schedule()->start()) {
+ // If all the predecessors have the same effect, we can use it
+ // as our current effect.
+ frame_state =
+ block_effects.For(block->PredecessorAt(0), block).current_frame_state;
+ for (size_t i = 1; i < block->PredecessorCount(); i++) {
+ if (block_effects.For(block->PredecessorAt(i), block)
+ .current_frame_state != frame_state) {
+ frame_state = nullptr;
+ break;
+ }
+ }
+ }
+
+ // Process the ordinary instructions.
+ for (; instr < block->NodeCount(); instr++) {
+ Node* node = block->NodeAt(instr);
+ ProcessNode(node, &frame_state, &effect, &control);
+ }
+
+ switch (block->control()) {
+ case BasicBlock::kGoto:
+ case BasicBlock::kNone:
+ break;
+
+ case BasicBlock::kCall:
+ case BasicBlock::kTailCall:
+ case BasicBlock::kSwitch:
+ case BasicBlock::kReturn:
+ case BasicBlock::kDeoptimize:
+ case BasicBlock::kThrow:
+ ProcessNode(block->control_input(), &frame_state, &effect, &control);
+ break;
+
+ case BasicBlock::kBranch:
+ ProcessNode(block->control_input(), &frame_state, &effect, &control);
+ TryCloneBranch(block->control_input(), block, graph(), common(),
+ &block_effects);
+ break;
+ }
+
+ // Store the effect, control and frame state for later use.
+ for (BasicBlock* successor : block->successors()) {
+ BlockEffectControlData* data = &block_effects.For(block, successor);
+ if (data->current_effect == nullptr) {
+ data->current_effect = effect;
+ }
+ if (data->current_control == nullptr) {
+ data->current_control = control;
+ }
+ data->current_frame_state = frame_state;
+ }
+ }
+
+ // Update the incoming edges of the effect phis that could not be processed
+ // during the first pass (because they could have incoming back edges).
+ for (const PendingEffectPhi& pending_effect_phi : pending_effect_phis) {
+ UpdateEffectPhi(pending_effect_phi.effect_phi, pending_effect_phi.block,
+ &block_effects);
+ }
+ for (BasicBlock* pending_block_control : pending_block_controls) {
+ UpdateBlockControl(pending_block_control, &block_effects);
+ }
+}
+
+namespace {
+
+void TryScheduleCallIfSuccess(Node* node, Node** control) {
+ // Schedule the call's IfSuccess node if there is no exception use.
+ if (!NodeProperties::IsExceptionalCall(node)) {
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsControlEdge(edge) &&
+ edge.from()->opcode() == IrOpcode::kIfSuccess) {
+ *control = edge.from();
+ }
+ }
+ }
+}
+
+} // namespace
+
+void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state,
+ Node** effect, Node** control) {
+ // If the node needs to be wired into the effect/control chain, do this
+ // here. Pass current frame state for lowering to eager deoptimization.
+ if (TryWireInStateEffect(node, *frame_state, effect, control)) {
+ return;
+ }
+
+ // If the node has a visible effect, then there must be a checkpoint in the
+ // effect chain before we are allowed to place another eager deoptimization
+ // point. We zap the frame state to ensure this invariant is maintained.
+ if (region_observability_ == RegionObservability::kObservable &&
+ !node->op()->HasProperty(Operator::kNoWrite)) {
+ *frame_state = nullptr;
+ }
+
+ // Remove the end markers of 'atomic' allocation region because the
+ // region should be wired-in now.
+ if (node->opcode() == IrOpcode::kFinishRegion) {
+ // Reset the current region observability.
+ region_observability_ = RegionObservability::kObservable;
+ // Update the value uses to the value input of the finish node and
+ // the effect uses to the effect input.
+ return RemoveRegionNode(node);
+ }
+ if (node->opcode() == IrOpcode::kBeginRegion) {
+ // Determine the observability for this region and use that for all
+ // nodes inside the region (i.e. ignore the absence of kNoWrite on
+ // StoreField and other operators).
+ DCHECK_NE(RegionObservability::kNotObservable, region_observability_);
+ region_observability_ = RegionObservabilityOf(node->op());
+ // Update the value uses to the value input of the finish node and
+ // the effect uses to the effect input.
+ return RemoveRegionNode(node);
+ }
+
+ // Special treatment for checkpoint nodes.
+ if (node->opcode() == IrOpcode::kCheckpoint) {
+ // Unlink the check point; effect uses will be updated to the incoming
+ // effect that is passed. The frame state is preserved for lowering.
+ DCHECK_EQ(RegionObservability::kObservable, region_observability_);
+ *frame_state = NodeProperties::GetFrameStateInput(node);
+ return;
+ }
+
+ if (node->opcode() == IrOpcode::kIfSuccess) {
+ // We always schedule IfSuccess with its call, so skip it here.
+ DCHECK_EQ(IrOpcode::kCall, node->InputAt(0)->opcode());
+ // The IfSuccess node should not belong to an exceptional call node
+ // because such IfSuccess nodes should only start a basic block (and
+ // basic block start nodes are not handled in the ProcessNode method).
+ DCHECK(!NodeProperties::IsExceptionalCall(node->InputAt(0)));
+ return;
+ }
+
+ // If the node takes an effect, replace with the current one.
+ if (node->op()->EffectInputCount() > 0) {
+ DCHECK_EQ(1, node->op()->EffectInputCount());
+ Node* input_effect = NodeProperties::GetEffectInput(node);
+
+ if (input_effect != *effect) {
+ NodeProperties::ReplaceEffectInput(node, *effect);
+ }
+
+ // If the node produces an effect, update our current effect. (However,
+ // ignore new effect chains started with ValueEffect.)
+ if (node->op()->EffectOutputCount() > 0) {
+ DCHECK_EQ(1, node->op()->EffectOutputCount());
+ *effect = node;
+ }
+ } else {
+ // New effect chain is only started with a Start or ValueEffect node.
+ DCHECK(node->op()->EffectOutputCount() == 0 ||
+ node->opcode() == IrOpcode::kStart);
+ }
+
+ // Rewire control inputs.
+ for (int i = 0; i < node->op()->ControlInputCount(); i++) {
+ NodeProperties::ReplaceControlInput(node, *control, i);
+ }
+ // Update the current control and wire IfSuccess right after calls.
+ if (node->op()->ControlOutputCount() > 0) {
+ *control = node;
+ if (node->opcode() == IrOpcode::kCall) {
+ // Schedule the call's IfSuccess node (if there is no exception use).
+ TryScheduleCallIfSuccess(node, control);
+ }
+ }
+}
+
+bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
+ Node* frame_state,
+ Node** effect,
+ Node** control) {
+ ValueEffectControl state(nullptr, nullptr, nullptr);
+ switch (node->opcode()) {
+ case IrOpcode::kChangeBitToTagged:
+ state = LowerChangeBitToTagged(node, *effect, *control);
+ break;
+ case IrOpcode::kChangeInt31ToTaggedSigned:
+ state = LowerChangeInt31ToTaggedSigned(node, *effect, *control);
+ break;
+ case IrOpcode::kChangeInt32ToTagged:
+ state = LowerChangeInt32ToTagged(node, *effect, *control);
+ break;
+ case IrOpcode::kChangeUint32ToTagged:
+ state = LowerChangeUint32ToTagged(node, *effect, *control);
+ break;
+ case IrOpcode::kChangeFloat64ToTagged:
+ state = LowerChangeFloat64ToTagged(node, *effect, *control);
+ break;
+ case IrOpcode::kChangeTaggedSignedToInt32:
+ state = LowerChangeTaggedSignedToInt32(node, *effect, *control);
+ break;
+ case IrOpcode::kChangeTaggedToBit:
+ state = LowerChangeTaggedToBit(node, *effect, *control);
+ break;
+ case IrOpcode::kChangeTaggedToInt32:
+ state = LowerChangeTaggedToInt32(node, *effect, *control);
+ break;
+ case IrOpcode::kChangeTaggedToUint32:
+ state = LowerChangeTaggedToUint32(node, *effect, *control);
+ break;
+ case IrOpcode::kChangeTaggedToFloat64:
+ state = LowerChangeTaggedToFloat64(node, *effect, *control);
+ break;
+ case IrOpcode::kTruncateTaggedToFloat64:
+ state = LowerTruncateTaggedToFloat64(node, *effect, *control);
+ break;
+ case IrOpcode::kCheckBounds:
+ state = LowerCheckBounds(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kCheckMaps:
+ state = LowerCheckMaps(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kCheckNumber:
+ state = LowerCheckNumber(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kCheckString:
+ state = LowerCheckString(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kCheckIf:
+ state = LowerCheckIf(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kCheckTaggedPointer:
+ state = LowerCheckTaggedPointer(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kCheckTaggedSigned:
+ state = LowerCheckTaggedSigned(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kCheckedInt32Add:
+ state = LowerCheckedInt32Add(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kCheckedInt32Sub:
+ state = LowerCheckedInt32Sub(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kCheckedInt32Div:
+ state = LowerCheckedInt32Div(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kCheckedInt32Mod:
+ state = LowerCheckedInt32Mod(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kCheckedUint32Div:
+ state = LowerCheckedUint32Div(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kCheckedUint32Mod:
+ state = LowerCheckedUint32Mod(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kCheckedInt32Mul:
+ state = LowerCheckedInt32Mul(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kCheckedUint32ToInt32:
+ state = LowerCheckedUint32ToInt32(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kCheckedFloat64ToInt32:
+ state = LowerCheckedFloat64ToInt32(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kCheckedTaggedSignedToInt32:
+ state =
+ LowerCheckedTaggedSignedToInt32(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kCheckedTaggedToInt32:
+ state = LowerCheckedTaggedToInt32(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kCheckedTaggedToFloat64:
+ state = LowerCheckedTaggedToFloat64(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kTruncateTaggedToWord32:
+ state = LowerTruncateTaggedToWord32(node, *effect, *control);
+ break;
+ case IrOpcode::kCheckedTruncateTaggedToWord32:
+ state = LowerCheckedTruncateTaggedToWord32(node, frame_state, *effect,
+ *control);
+ break;
+ case IrOpcode::kObjectIsCallable:
+ state = LowerObjectIsCallable(node, *effect, *control);
+ break;
+ case IrOpcode::kObjectIsNumber:
+ state = LowerObjectIsNumber(node, *effect, *control);
+ break;
+ case IrOpcode::kObjectIsReceiver:
+ state = LowerObjectIsReceiver(node, *effect, *control);
+ break;
+ case IrOpcode::kObjectIsSmi:
+ state = LowerObjectIsSmi(node, *effect, *control);
+ break;
+ case IrOpcode::kObjectIsString:
+ state = LowerObjectIsString(node, *effect, *control);
+ break;
+ case IrOpcode::kObjectIsUndetectable:
+ state = LowerObjectIsUndetectable(node, *effect, *control);
+ break;
+ case IrOpcode::kStringFromCharCode:
+ state = LowerStringFromCharCode(node, *effect, *control);
+ break;
+ case IrOpcode::kStringCharCodeAt:
+ state = LowerStringCharCodeAt(node, *effect, *control);
+ break;
+ case IrOpcode::kCheckFloat64Hole:
+ state = LowerCheckFloat64Hole(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kCheckTaggedHole:
+ state = LowerCheckTaggedHole(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kConvertTaggedHoleToUndefined:
+ state = LowerConvertTaggedHoleToUndefined(node, *effect, *control);
+ break;
+ case IrOpcode::kPlainPrimitiveToNumber:
+ state = LowerPlainPrimitiveToNumber(node, *effect, *control);
+ break;
+ case IrOpcode::kPlainPrimitiveToWord32:
+ state = LowerPlainPrimitiveToWord32(node, *effect, *control);
+ break;
+ case IrOpcode::kPlainPrimitiveToFloat64:
+ state = LowerPlainPrimitiveToFloat64(node, *effect, *control);
+ break;
+ case IrOpcode::kEnsureWritableFastElements:
+ state = LowerEnsureWritableFastElements(node, *effect, *control);
+ break;
+ case IrOpcode::kMaybeGrowFastElements:
+ state = LowerMaybeGrowFastElements(node, frame_state, *effect, *control);
+ break;
+ case IrOpcode::kTransitionElementsKind:
+ state = LowerTransitionElementsKind(node, *effect, *control);
+ break;
+ case IrOpcode::kLoadTypedElement:
+ state = LowerLoadTypedElement(node, *effect, *control);
+ break;
+ case IrOpcode::kStoreTypedElement:
+ state = LowerStoreTypedElement(node, *effect, *control);
+ break;
+ case IrOpcode::kFloat64RoundUp:
+ state = LowerFloat64RoundUp(node, *effect, *control);
+ break;
+ case IrOpcode::kFloat64RoundDown:
+ state = LowerFloat64RoundDown(node, *effect, *control);
+ break;
+ case IrOpcode::kFloat64RoundTruncate:
+ state = LowerFloat64RoundTruncate(node, *effect, *control);
+ break;
+ default:
+ return false;
+ }
+ NodeProperties::ReplaceUses(node, state.value, state.effect, state.control);
+ *effect = state.effect;
+ *control = state.control;
+ return true;
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeFloat64ToTagged(Node* node, Node* effect,
+ Node* control) {
+ CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
+ Node* value = node->InputAt(0);
+
+ Node* value32 = graph()->NewNode(machine()->RoundFloat64ToInt32(), value);
+ Node* check_same = graph()->NewNode(
+ machine()->Float64Equal(), value,
+ graph()->NewNode(machine()->ChangeInt32ToFloat64(), value32));
+ Node* branch_same = graph()->NewNode(common()->Branch(), check_same, control);
+
+ Node* if_smi = graph()->NewNode(common()->IfTrue(), branch_same);
+ Node* vsmi;
+ Node* if_box = graph()->NewNode(common()->IfFalse(), branch_same);
+
+ if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
+ // Check if {value} is -0.
+ Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value32,
+ jsgraph()->Int32Constant(0));
+ Node* branch_zero = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check_zero, if_smi);
+
+ Node* if_zero = graph()->NewNode(common()->IfTrue(), branch_zero);
+ Node* if_notzero = graph()->NewNode(common()->IfFalse(), branch_zero);
+
+ // In case of 0, we need to check the high bits for the IEEE -0 pattern.
+ Node* check_negative = graph()->NewNode(
+ machine()->Int32LessThan(),
+ graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
+ jsgraph()->Int32Constant(0));
+ Node* branch_negative = graph()->NewNode(
+ common()->Branch(BranchHint::kFalse), check_negative, if_zero);
+
+ Node* if_negative = graph()->NewNode(common()->IfTrue(), branch_negative);
+ Node* if_notnegative =
+ graph()->NewNode(common()->IfFalse(), branch_negative);
+
+ // We need to create a box for negative 0.
+ if_smi = graph()->NewNode(common()->Merge(2), if_notzero, if_notnegative);
+ if_box = graph()->NewNode(common()->Merge(2), if_box, if_negative);
+ }
+
+ // On 64-bit machines we can just wrap the 32-bit integer in a smi, for 32-bit
+ // machines we need to deal with potential overflow and fallback to boxing.
+ if (machine()->Is64()) {
+ vsmi = ChangeInt32ToSmi(value32);
+ } else {
+ Node* smi_tag = graph()->NewNode(machine()->Int32AddWithOverflow(), value32,
+ value32, if_smi);
+
+ Node* check_ovf =
+ graph()->NewNode(common()->Projection(1), smi_tag, if_smi);
+ Node* branch_ovf = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check_ovf, if_smi);
+
+ Node* if_ovf = graph()->NewNode(common()->IfTrue(), branch_ovf);
+ if_box = graph()->NewNode(common()->Merge(2), if_ovf, if_box);
+
+ if_smi = graph()->NewNode(common()->IfFalse(), branch_ovf);
+ vsmi = graph()->NewNode(common()->Projection(0), smi_tag, if_smi);
+ }
+
+ // Allocate the box for the {value}.
+ ValueEffectControl box = AllocateHeapNumberWithValue(value, effect, if_box);
+
+ control = graph()->NewNode(common()->Merge(2), if_smi, box.control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vsmi, box.value, control);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), effect, box.effect, control);
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeBitToTagged(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* branch = graph()->NewNode(common()->Branch(), value, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue = jsgraph()->TrueConstant();
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = jsgraph()->FalseConstant();
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeInt31ToTaggedSigned(Node* node,
+ Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+ value = ChangeInt32ToSmi(value);
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeInt32ToTagged(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ if (machine()->Is64()) {
+ return ValueEffectControl(ChangeInt32ToSmi(value), effect, control);
+ }
+
+ Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), value, value,
+ control);
+
+ Node* ovf = graph()->NewNode(common()->Projection(1), add, control);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), ovf, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ ValueEffectControl alloc =
+ AllocateHeapNumberWithValue(ChangeInt32ToFloat64(value), effect, if_true);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = graph()->NewNode(common()->Projection(0), add, if_false);
+
+ Node* merge = graph()->NewNode(common()->Merge(2), alloc.control, if_false);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ alloc.value, vfalse, merge);
+ Node* ephi =
+ graph()->NewNode(common()->EffectPhi(2), alloc.effect, effect, merge);
+
+ return ValueEffectControl(phi, ephi, merge);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeUint32ToTagged(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check = graph()->NewNode(machine()->Uint32LessThanOrEqual(), value,
+ SmiMaxValueConstant());
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue = ChangeUint32ToSmi(value);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ ValueEffectControl alloc = AllocateHeapNumberWithValue(
+ ChangeUint32ToFloat64(value), effect, if_false);
+
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, alloc.control);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, alloc.value, merge);
+ Node* ephi =
+ graph()->NewNode(common()->EffectPhi(2), effect, alloc.effect, merge);
+
+ return ValueEffectControl(phi, ephi, merge);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeTaggedSignedToInt32(Node* node,
+ Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+ value = ChangeSmiToInt32(value);
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeTaggedToBit(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+ value = graph()->NewNode(machine()->WordEqual(), value,
+ jsgraph()->TrueConstant());
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeTaggedToInt32(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check = ObjectIsSmi(value);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = ChangeSmiToInt32(value);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+ vfalse = efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
+ efalse, if_false);
+ vfalse = graph()->NewNode(machine()->ChangeFloat64ToInt32(), vfalse);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ vtrue, vfalse, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeTaggedToUint32(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check = ObjectIsSmi(value);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = ChangeSmiToInt32(value);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+ vfalse = efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
+ efalse, if_false);
+ vfalse = graph()->NewNode(machine()->ChangeFloat64ToUint32(), vfalse);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ vtrue, vfalse, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeTaggedToFloat64(Node* node, Node* effect,
+ Node* control) {
+ return LowerTruncateTaggedToFloat64(node, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerTruncateTaggedToFloat64(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check = ObjectIsSmi(value);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue;
+ {
+ vtrue = ChangeSmiToInt32(value);
+ vtrue = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue);
+ }
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+ vfalse = efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
+ efalse, if_false);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue, vfalse, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckBounds(Node* node, Node* frame_state,
+ Node* effect, Node* control) {
+ Node* index = node->InputAt(0);
+ Node* limit = node->InputAt(1);
+
+ Node* check = graph()->NewNode(machine()->Uint32LessThan(), index, limit);
+ control = effect = graph()->NewNode(
+ common()->DeoptimizeUnless(DeoptimizeReason::kOutOfBounds), check,
+ frame_state, effect, control);
+
+ return ValueEffectControl(index, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state,
+ Node* effect, Node* control) {
+ Node* value = node->InputAt(0);
+
+ // Load the current map of the {value}.
+ Node* value_map = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMap()), value, effect, control);
+
+ int const map_count = node->op()->ValueInputCount() - 1;
+ Node** controls = temp_zone()->NewArray<Node*>(map_count);
+ Node** effects = temp_zone()->NewArray<Node*>(map_count + 1);
+
+ for (int i = 0; i < map_count; ++i) {
+ Node* map = node->InputAt(1 + i);
+
+ Node* check = graph()->NewNode(machine()->WordEqual(), value_map, map);
+ if (i == map_count - 1) {
+ controls[i] = effects[i] = graph()->NewNode(
+ common()->DeoptimizeUnless(DeoptimizeReason::kWrongMap), check,
+ frame_state, effect, control);
+ } else {
+ control = graph()->NewNode(common()->Branch(), check, control);
+ controls[i] = graph()->NewNode(common()->IfTrue(), control);
+ control = graph()->NewNode(common()->IfFalse(), control);
+ effects[i] = effect;
+ }
+ }
+
+ control = graph()->NewNode(common()->Merge(map_count), map_count, controls);
+ effects[map_count] = control;
+ effect =
+ graph()->NewNode(common()->EffectPhi(map_count), map_count + 1, effects);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckNumber(Node* node, Node* frame_state,
+ Node* effect, Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check0 = ObjectIsSmi(value);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0 = effect;
+ {
+ Node* value_map = efalse0 =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ value, efalse0, if_false0);
+ Node* check1 = graph()->NewNode(machine()->WordEqual(), value_map,
+ jsgraph()->HeapNumberMapConstant());
+ if_false0 = efalse0 = graph()->NewNode(
+ common()->DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber), check1,
+ frame_state, efalse0, if_false0);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckString(Node* node, Node* frame_state,
+ Node* effect, Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check0 = ObjectIsSmi(value);
+ control = effect =
+ graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kSmi), check0,
+ frame_state, effect, control);
+
+ Node* value_map = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMap()), value, effect, control);
+ Node* value_instance_type = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
+ effect, control);
+
+ Node* check1 =
+ graph()->NewNode(machine()->Uint32LessThan(), value_instance_type,
+ jsgraph()->Uint32Constant(FIRST_NONSTRING_TYPE));
+ control = effect = graph()->NewNode(
+ common()->DeoptimizeUnless(DeoptimizeReason::kWrongInstanceType), check1,
+ frame_state, effect, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckIf(Node* node, Node* frame_state,
+ Node* effect, Node* control) {
+ Node* value = node->InputAt(0);
+
+ control = effect =
+ graph()->NewNode(common()->DeoptimizeUnless(DeoptimizeReason::kNoReason),
+ value, frame_state, effect, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckTaggedPointer(Node* node, Node* frame_state,
+ Node* effect, Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check = ObjectIsSmi(value);
+ control = effect =
+ graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kSmi), check,
+ frame_state, effect, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckTaggedSigned(Node* node, Node* frame_state,
+ Node* effect, Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check = ObjectIsSmi(value);
+ control = effect =
+ graph()->NewNode(common()->DeoptimizeUnless(DeoptimizeReason::kNotASmi),
+ check, frame_state, effect, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedInt32Add(Node* node, Node* frame_state,
+ Node* effect, Node* control) {
+ Node* lhs = node->InputAt(0);
+ Node* rhs = node->InputAt(1);
+
+ Node* value =
+ graph()->NewNode(machine()->Int32AddWithOverflow(), lhs, rhs, control);
+
+ Node* check = graph()->NewNode(common()->Projection(1), value, control);
+ control = effect =
+ graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
+ check, frame_state, effect, control);
+
+ value = graph()->NewNode(common()->Projection(0), value, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedInt32Sub(Node* node, Node* frame_state,
+ Node* effect, Node* control) {
+ Node* lhs = node->InputAt(0);
+ Node* rhs = node->InputAt(1);
+
+ Node* value =
+ graph()->NewNode(machine()->Int32SubWithOverflow(), lhs, rhs, control);
+
+ Node* check = graph()->NewNode(common()->Projection(1), value, control);
+ control = effect =
+ graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
+ check, frame_state, effect, control);
+
+ value = graph()->NewNode(common()->Projection(0), value, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedInt32Div(Node* node, Node* frame_state,
+ Node* effect, Node* control) {
+ Node* zero = jsgraph()->Int32Constant(0);
+ Node* minusone = jsgraph()->Int32Constant(-1);
+ Node* minint = jsgraph()->Int32Constant(std::numeric_limits<int32_t>::min());
+
+ Node* lhs = node->InputAt(0);
+ Node* rhs = node->InputAt(1);
+
+ // Check if {rhs} is positive (and not zero).
+ Node* check0 = graph()->NewNode(machine()->Int32LessThan(), zero, rhs);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+ Node* vtrue0;
+ {
+ // Fast case, no additional checking required.
+ vtrue0 = graph()->NewNode(machine()->Int32Div(), lhs, rhs, if_true0);
+ }
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0 = effect;
+ Node* vfalse0;
+ {
+ // Check if {rhs} is zero.
+ Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
+ if_false0 = efalse0 = graph()->NewNode(
+ common()->DeoptimizeIf(DeoptimizeReason::kDivisionByZero), check,
+ frame_state, efalse0, if_false0);
+
+ // Check if {lhs} is zero, as that would produce minus zero.
+ check = graph()->NewNode(machine()->Word32Equal(), lhs, zero);
+ if_false0 = efalse0 =
+ graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kMinusZero),
+ check, frame_state, efalse0, if_false0);
+
+ // Check if {lhs} is kMinInt and {rhs} is -1, in which case we'd have
+ // to return -kMinInt, which is not representable.
+ Node* check1 = graph()->NewNode(machine()->Word32Equal(), lhs, minint);
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = efalse0;
+ {
+ // Check if {rhs} is -1.
+ Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, minusone);
+ if_true1 = etrue1 =
+ graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
+ check, frame_state, etrue1, if_true1);
+ }
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* efalse1 = efalse0;
+
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ efalse0 =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
+
+ // Perform the actual integer division.
+ vfalse0 = graph()->NewNode(machine()->Int32Div(), lhs, rhs, if_false0);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2), vtrue0,
+ vfalse0, control);
+
+ // Check if the remainder is non-zero.
+ Node* check =
+ graph()->NewNode(machine()->Word32Equal(), lhs,
+ graph()->NewNode(machine()->Int32Mul(), rhs, value));
+ control = effect = graph()->NewNode(
+ common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecision), check,
+ frame_state, effect, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedInt32Mod(Node* node, Node* frame_state,
+ Node* effect, Node* control) {
+ Node* zero = jsgraph()->Int32Constant(0);
+ Node* one = jsgraph()->Int32Constant(1);
+
+ // General case for signed integer modulus, with optimization for (unknown)
+ // power of 2 right hand side.
+ //
+ // if rhs <= 0 then
+ // rhs = -rhs
+ // deopt if rhs == 0
+ // if lhs < 0 then
+ // let res = lhs % rhs in
+ // deopt if res == 0
+ // res
+ // else
+ // let msk = rhs - 1 in
+ // if rhs & msk == 0 then
+ // lhs & msk
+ // else
+ // lhs % rhs
+ //
+ Node* lhs = node->InputAt(0);
+ Node* rhs = node->InputAt(1);
+
+ // Check if {rhs} is not strictly positive.
+ Node* check0 = graph()->NewNode(machine()->Int32LessThanOrEqual(), rhs, zero);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+ Node* vtrue0;
+ {
+ // Negate {rhs}, might still produce a negative result in case of
+ // -2^31, but that is handled safely below.
+ vtrue0 = graph()->NewNode(machine()->Int32Sub(), zero, rhs);
+
+ // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
+ Node* check = graph()->NewNode(machine()->Word32Equal(), vtrue0, zero);
+ if_true0 = etrue0 = graph()->NewNode(
+ common()->DeoptimizeIf(DeoptimizeReason::kDivisionByZero), check,
+ frame_state, etrue0, if_true0);
+ }
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0 = effect;
+ Node* vfalse0 = rhs;
+
+ // At this point {rhs} is either greater than zero or -2^31, both are
+ // fine for the code that follows.
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+ rhs = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ vtrue0, vfalse0, control);
+
+ // Check if {lhs} is negative.
+ Node* check1 = graph()->NewNode(machine()->Int32LessThan(), lhs, zero);
+ Node* branch1 =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check1, control);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = effect;
+ Node* vtrue1;
+ {
+ // Compute the remainder using {lhs % msk}.
+ vtrue1 = graph()->NewNode(machine()->Int32Mod(), lhs, rhs, if_true1);
+
+ // Check if we would have to return -0.
+ Node* check = graph()->NewNode(machine()->Word32Equal(), vtrue1, zero);
+ if_true1 = etrue1 =
+ graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kMinusZero),
+ check, frame_state, etrue1, if_true1);
+ }
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* efalse1 = effect;
+ Node* vfalse1;
+ {
+ Node* msk = graph()->NewNode(machine()->Int32Sub(), rhs, one);
+
+ // Check if {rhs} minus one is a valid mask.
+ Node* check2 = graph()->NewNode(
+ machine()->Word32Equal(),
+ graph()->NewNode(machine()->Word32And(), rhs, msk), zero);
+ Node* branch2 = graph()->NewNode(common()->Branch(), check2, if_false1);
+
+ // Compute the remainder using {lhs & msk}.
+ Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* vtrue2 = graph()->NewNode(machine()->Word32And(), lhs, msk);
+
+ // Compute the remainder using the generic {lhs % rhs}.
+ Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+ Node* vfalse2 =
+ graph()->NewNode(machine()->Int32Mod(), lhs, rhs, if_false2);
+
+ if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
+ vfalse1 = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ vtrue2, vfalse2, if_false1);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, control);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2), vtrue1,
+ vfalse1, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedUint32Div(Node* node, Node* frame_state,
+ Node* effect, Node* control) {
+ Node* zero = jsgraph()->Int32Constant(0);
+
+ Node* lhs = node->InputAt(0);
+ Node* rhs = node->InputAt(1);
+
+ // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
+ Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
+ control = effect = graph()->NewNode(
+ common()->DeoptimizeIf(DeoptimizeReason::kDivisionByZero), check,
+ frame_state, effect, control);
+
+ // Perform the actual unsigned integer division.
+ Node* value = graph()->NewNode(machine()->Uint32Div(), lhs, rhs, control);
+
+ // Check if the remainder is non-zero.
+ check = graph()->NewNode(machine()->Word32Equal(), lhs,
+ graph()->NewNode(machine()->Int32Mul(), rhs, value));
+ control = effect = graph()->NewNode(
+ common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecision), check,
+ frame_state, effect, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedUint32Mod(Node* node, Node* frame_state,
+ Node* effect, Node* control) {
+ Node* zero = jsgraph()->Int32Constant(0);
+
+ Node* lhs = node->InputAt(0);
+ Node* rhs = node->InputAt(1);
+
+ // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
+ Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
+ control = effect = graph()->NewNode(
+ common()->DeoptimizeIf(DeoptimizeReason::kDivisionByZero), check,
+ frame_state, effect, control);
+
+ // Perform the actual unsigned integer modulus.
+ Node* value = graph()->NewNode(machine()->Uint32Mod(), lhs, rhs, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedInt32Mul(Node* node, Node* frame_state,
+ Node* effect, Node* control) {
+ CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
+ Node* zero = jsgraph()->Int32Constant(0);
+ Node* lhs = node->InputAt(0);
+ Node* rhs = node->InputAt(1);
+
+ Node* projection =
+ graph()->NewNode(machine()->Int32MulWithOverflow(), lhs, rhs, control);
+
+ Node* check = graph()->NewNode(common()->Projection(1), projection, control);
+ control = effect =
+ graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
+ check, frame_state, effect, control);
+
+ Node* value = graph()->NewNode(common()->Projection(0), projection, control);
+
+ if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
+ Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value, zero);
+ Node* branch_zero = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check_zero, control);
+
+ Node* if_zero = graph()->NewNode(common()->IfTrue(), branch_zero);
+ Node* e_if_zero = effect;
+ {
+ // We may need to return negative zero.
+ Node* or_inputs = graph()->NewNode(machine()->Word32Or(), lhs, rhs);
+ Node* check_or =
+ graph()->NewNode(machine()->Int32LessThan(), or_inputs, zero);
+ if_zero = e_if_zero =
+ graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kMinusZero),
+ check_or, frame_state, e_if_zero, if_zero);
+ }
+
+ Node* if_not_zero = graph()->NewNode(common()->IfFalse(), branch_zero);
+ Node* e_if_not_zero = effect;
+
+ control = graph()->NewNode(common()->Merge(2), if_zero, if_not_zero);
+ effect = graph()->NewNode(common()->EffectPhi(2), e_if_zero, e_if_not_zero,
+ control);
+ }
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedUint32ToInt32(Node* node,
+ Node* frame_state,
+ Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+ Node* max_int = jsgraph()->Int32Constant(std::numeric_limits<int32_t>::max());
+ Node* is_safe =
+ graph()->NewNode(machine()->Uint32LessThanOrEqual(), value, max_int);
+ control = effect = graph()->NewNode(
+ common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecision), is_safe,
+ frame_state, effect, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode,
+ Node* value,
+ Node* frame_state,
+ Node* effect,
+ Node* control) {
+ Node* value32 = graph()->NewNode(machine()->RoundFloat64ToInt32(), value);
+ Node* check_same = graph()->NewNode(
+ machine()->Float64Equal(), value,
+ graph()->NewNode(machine()->ChangeInt32ToFloat64(), value32));
+ control = effect = graph()->NewNode(
+ common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecisionOrNaN),
+ check_same, frame_state, effect, control);
+
+ if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
+ // Check if {value} is -0.
+ Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value32,
+ jsgraph()->Int32Constant(0));
+ Node* branch_zero = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check_zero, control);
+
+ Node* if_zero = graph()->NewNode(common()->IfTrue(), branch_zero);
+ Node* if_notzero = graph()->NewNode(common()->IfFalse(), branch_zero);
+
+ // In case of 0, we need to check the high bits for the IEEE -0 pattern.
+ Node* check_negative = graph()->NewNode(
+ machine()->Int32LessThan(),
+ graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
+ jsgraph()->Int32Constant(0));
+
+ Node* deopt_minus_zero =
+ graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kMinusZero),
+ check_negative, frame_state, effect, if_zero);
+
+ control =
+ graph()->NewNode(common()->Merge(2), deopt_minus_zero, if_notzero);
+ effect = graph()->NewNode(common()->EffectPhi(2), deopt_minus_zero, effect,
+ control);
+ }
+
+ return ValueEffectControl(value32, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedFloat64ToInt32(Node* node,
+ Node* frame_state,
+ Node* effect,
+ Node* control) {
+ CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
+ Node* value = node->InputAt(0);
+
+ return BuildCheckedFloat64ToInt32(mode, value, frame_state, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedTaggedSignedToInt32(Node* node,
+ Node* frame_state,
+ Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check = ObjectIsSmi(value);
+ control = effect =
+ graph()->NewNode(common()->DeoptimizeUnless(DeoptimizeReason::kNotASmi),
+ check, frame_state, effect, control);
+ value = ChangeSmiToInt32(value);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedTaggedToInt32(Node* node,
+ Node* frame_state,
+ Node* effect,
+ Node* control) {
+ CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
+ Node* value = node->InputAt(0);
+
+ Node* check = ObjectIsSmi(value);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ // In the Smi case, just convert to int32.
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = ChangeSmiToInt32(value);
+
+ // In the non-Smi case, check the heap numberness, load the number and convert
+ // to int32.
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ Node* value_map = efalse =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ value, efalse, if_false);
+ Node* check = graph()->NewNode(machine()->WordEqual(), value_map,
+ jsgraph()->HeapNumberMapConstant());
+ if_false = efalse = graph()->NewNode(
+ common()->DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber), check,
+ frame_state, efalse, if_false);
+ vfalse = efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
+ efalse, if_false);
+ ValueEffectControl state =
+ BuildCheckedFloat64ToInt32(mode, vfalse, frame_state, efalse, if_false);
+ if_false = state.control;
+ efalse = state.effect;
+ vfalse = state.value;
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ vtrue, vfalse, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
+ CheckTaggedInputMode mode, Node* value, Node* frame_state, Node* effect,
+ Node* control) {
+ Node* value_map = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMap()), value, effect, control);
+
+ Node* check_number = graph()->NewNode(machine()->WordEqual(), value_map,
+ jsgraph()->HeapNumberMapConstant());
+
+ switch (mode) {
+ case CheckTaggedInputMode::kNumber: {
+ control = effect = graph()->NewNode(
+ common()->DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber),
+ check_number, frame_state, effect, control);
+ break;
+ }
+ case CheckTaggedInputMode::kNumberOrOddball: {
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check_number, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ // For oddballs also contain the numeric value, let us just check that
+ // we have an oddball here.
+ Node* efalse = effect;
+ Node* instance_type = efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
+ value_map, efalse, if_false);
+ Node* check_oddball =
+ graph()->NewNode(machine()->Word32Equal(), instance_type,
+ jsgraph()->Int32Constant(ODDBALL_TYPE));
+ if_false = efalse = graph()->NewNode(
+ common()->DeoptimizeUnless(
+ DeoptimizeReason::kNotAHeapNumberUndefinedBoolean),
+ check_oddball, frame_state, efalse, if_false);
+ STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ break;
+ }
+ }
+
+ value = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
+ effect, control);
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedTaggedToFloat64(Node* node,
+ Node* frame_state,
+ Node* effect,
+ Node* control) {
+ CheckTaggedInputMode mode = CheckTaggedInputModeOf(node->op());
+ Node* value = node->InputAt(0);
+
+ Node* check = ObjectIsSmi(value);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ // In the Smi case, just convert to int32 and then float64.
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = ChangeSmiToInt32(value);
+ vtrue = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue);
+
+ // Otherwise, check heap numberness and load the number.
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ ValueEffectControl number_state = BuildCheckedHeapNumberOrOddballToFloat64(
+ mode, value, frame_state, effect, if_false);
+
+ Node* merge =
+ graph()->NewNode(common()->Merge(2), if_true, number_state.control);
+ Node* effect_phi = graph()->NewNode(common()->EffectPhi(2), etrue,
+ number_state.effect, merge);
+ Node* result =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2), vtrue,
+ number_state.value, merge);
+
+ return ValueEffectControl(result, effect_phi, merge);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerTruncateTaggedToWord32(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check = ObjectIsSmi(value);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = ChangeSmiToInt32(value);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+ vfalse = efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
+ efalse, if_false);
+ vfalse = graph()->NewNode(machine()->TruncateFloat64ToWord32(), vfalse);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ vtrue, vfalse, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedTruncateTaggedToWord32(Node* node,
+ Node* frame_state,
+ Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check = ObjectIsSmi(value);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ // In the Smi case, just convert to int32.
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = ChangeSmiToInt32(value);
+
+ // Otherwise, check that it's a heap number or oddball and truncate the value
+ // to int32.
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ ValueEffectControl false_state = BuildCheckedHeapNumberOrOddballToFloat64(
+ CheckTaggedInputMode::kNumberOrOddball, value, frame_state, effect,
+ if_false);
+ false_state.value =
+ graph()->NewNode(machine()->TruncateFloat64ToWord32(), false_state.value);
+
+ Node* merge =
+ graph()->NewNode(common()->Merge(2), if_true, false_state.control);
+ Node* effect_phi = graph()->NewNode(common()->EffectPhi(2), etrue,
+ false_state.effect, merge);
+ Node* result =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2), vtrue,
+ false_state.value, merge);
+
+ return ValueEffectControl(result, effect_phi, merge);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerObjectIsCallable(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check = ObjectIsSmi(value);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = jsgraph()->Int32Constant(0);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ Node* value_map = efalse =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ value, efalse, if_false);
+ Node* value_bit_field = efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapBitField()), value_map,
+ efalse, if_false);
+ vfalse = graph()->NewNode(
+ machine()->Word32Equal(),
+ jsgraph()->Int32Constant(1 << Map::kIsCallable),
+ graph()->NewNode(
+ machine()->Word32And(), value_bit_field,
+ jsgraph()->Int32Constant((1 << Map::kIsCallable) |
+ (1 << Map::kIsUndetectable))));
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
+ vfalse, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerObjectIsNumber(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check = ObjectIsSmi(value);
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = jsgraph()->Int32Constant(1);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ Node* value_map = efalse =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ value, efalse, if_false);
+ vfalse = graph()->NewNode(machine()->WordEqual(), value_map,
+ jsgraph()->HeapNumberMapConstant());
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
+ vfalse, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerObjectIsReceiver(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check = ObjectIsSmi(value);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = jsgraph()->Int32Constant(0);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ Node* value_map = efalse =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ value, efalse, if_false);
+ Node* value_instance_type = efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
+ efalse, if_false);
+ vfalse = graph()->NewNode(machine()->Uint32LessThanOrEqual(),
+ jsgraph()->Uint32Constant(FIRST_JS_RECEIVER_TYPE),
+ value_instance_type);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
+ vfalse, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerObjectIsSmi(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+ value = ObjectIsSmi(value);
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerObjectIsString(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check = ObjectIsSmi(value);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = jsgraph()->Int32Constant(0);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ Node* value_map = efalse =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ value, efalse, if_false);
+ Node* value_instance_type = efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
+ efalse, if_false);
+ vfalse = graph()->NewNode(machine()->Uint32LessThan(), value_instance_type,
+ jsgraph()->Uint32Constant(FIRST_NONSTRING_TYPE));
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
+ vfalse, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerObjectIsUndetectable(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check = ObjectIsSmi(value);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = jsgraph()->Int32Constant(0);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ Node* value_map = efalse =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ value, efalse, if_false);
+ Node* value_bit_field = efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapBitField()), value_map,
+ efalse, if_false);
+ vfalse = graph()->NewNode(
+ machine()->Word32Equal(),
+ graph()->NewNode(
+ machine()->Word32Equal(), jsgraph()->Int32Constant(0),
+ graph()->NewNode(
+ machine()->Word32And(), value_bit_field,
+ jsgraph()->Int32Constant(1 << Map::kIsUndetectable))),
+ jsgraph()->Int32Constant(0));
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
+ vfalse, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerStringCharCodeAt(Node* node, Node* effect,
+ Node* control) {
+ Node* subject = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ // We may need to loop several times for ConsString/SlicedString {subject}s.
+ Node* loop =
+ graph()->NewNode(common()->Loop(4), control, control, control, control);
+ Node* lsubject =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 4),
+ subject, subject, subject, subject, loop);
+ Node* lindex =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 4), index,
+ index, index, index, loop);
+ Node* leffect = graph()->NewNode(common()->EffectPhi(4), effect, effect,
+ effect, effect, loop);
+
+ control = loop;
+ effect = leffect;
+
+ // Determine the instance type of {lsubject}.
+ Node* lsubject_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ lsubject, effect, control);
+ Node* lsubject_instance_type = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
+ lsubject_map, effect, control);
+
+ // Check if {lsubject} is a SeqString.
+ Node* check0 = graph()->NewNode(
+ machine()->Word32Equal(),
+ graph()->NewNode(machine()->Word32And(), lsubject_instance_type,
+ jsgraph()->Int32Constant(kStringRepresentationMask)),
+ jsgraph()->Int32Constant(kSeqStringTag));
+ Node* branch0 = graph()->NewNode(common()->Branch(), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+ Node* vtrue0;
+ {
+ // Check if the {lsubject} is a TwoByteSeqString or a OneByteSeqString.
+ Node* check1 = graph()->NewNode(
+ machine()->Word32Equal(),
+ graph()->NewNode(machine()->Word32And(), lsubject_instance_type,
+ jsgraph()->Int32Constant(kStringEncodingMask)),
+ jsgraph()->Int32Constant(kTwoByteStringTag));
+ Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = etrue0;
+ Node* vtrue1 = etrue1 =
+ graph()->NewNode(simplified()->LoadElement(
+ AccessBuilder::ForSeqTwoByteStringCharacter()),
+ lsubject, lindex, etrue1, if_true1);
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* efalse1 = etrue0;
+ Node* vfalse1 = efalse1 =
+ graph()->NewNode(simplified()->LoadElement(
+ AccessBuilder::ForSeqOneByteStringCharacter()),
+ lsubject, lindex, efalse1, if_false1);
+
+ if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ etrue0 =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
+ vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ vtrue1, vfalse1, if_true0);
+ }
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0 = effect;
+ Node* vfalse0;
+ {
+ // Check if the {lsubject} is a ConsString.
+ Node* check1 = graph()->NewNode(
+ machine()->Word32Equal(),
+ graph()->NewNode(machine()->Word32And(), lsubject_instance_type,
+ jsgraph()->Int32Constant(kStringRepresentationMask)),
+ jsgraph()->Int32Constant(kConsStringTag));
+ Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = efalse0;
+ {
+ // Load the right hand side of the {lsubject} ConsString.
+ Node* lsubject_second = etrue1 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForConsStringSecond()),
+ lsubject, etrue1, if_true1);
+
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we flatten the string first.
+ Node* check2 = graph()->NewNode(machine()->WordEqual(), lsubject_second,
+ jsgraph()->EmptyStringConstant());
+ Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check2, if_true1);
+
+ Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* etrue2 = etrue1;
+ Node* vtrue2 = etrue2 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForConsStringFirst()),
+ lsubject, etrue2, if_true2);
+
+ Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+ Node* efalse2 = etrue1;
+ Node* vfalse2;
+ {
+ // Flatten the {lsubject} ConsString first.
+ Operator::Properties properties =
+ Operator::kNoDeopt | Operator::kNoThrow;
+ Runtime::FunctionId id = Runtime::kFlattenString;
+ CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+ graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
+ vfalse2 = efalse2 = graph()->NewNode(
+ common()->Call(desc), jsgraph()->CEntryStubConstant(1), lsubject,
+ jsgraph()->ExternalConstant(ExternalReference(id, isolate())),
+ jsgraph()->Int32Constant(1), jsgraph()->NoContextConstant(),
+ efalse2, if_false2);
+ }
+
+ // Retry the {loop} with the new subject.
+ loop->ReplaceInput(1, if_true2);
+ lindex->ReplaceInput(1, lindex);
+ leffect->ReplaceInput(1, etrue2);
+ lsubject->ReplaceInput(1, vtrue2);
+ loop->ReplaceInput(2, if_false2);
+ lindex->ReplaceInput(2, lindex);
+ leffect->ReplaceInput(2, efalse2);
+ lsubject->ReplaceInput(2, vfalse2);
+ }
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* efalse1 = efalse0;
+ Node* vfalse1;
+ {
+ // Check if the {lsubject} is an ExternalString.
+ Node* check2 = graph()->NewNode(
+ machine()->Word32Equal(),
+ graph()->NewNode(machine()->Word32And(), lsubject_instance_type,
+ jsgraph()->Int32Constant(kStringRepresentationMask)),
+ jsgraph()->Int32Constant(kExternalStringTag));
+ Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check2, if_false1);
+
+ Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* etrue2 = efalse1;
+ Node* vtrue2;
+ {
+ // Check if the {lsubject} is a short external string.
+ Node* check3 = graph()->NewNode(
+ machine()->Word32Equal(),
+ graph()->NewNode(
+ machine()->Word32And(), lsubject_instance_type,
+ jsgraph()->Int32Constant(kShortExternalStringMask)),
+ jsgraph()->Int32Constant(0));
+ Node* branch3 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check3, if_true2);
+
+ Node* if_true3 = graph()->NewNode(common()->IfTrue(), branch3);
+ Node* etrue3 = etrue2;
+ Node* vtrue3;
+ {
+ // Load the actual resource data from the {lsubject}.
+ Node* lsubject_resource_data = etrue3 = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForExternalStringResourceData()),
+ lsubject, etrue3, if_true3);
+
+ // Check if the {lsubject} is a TwoByteExternalString or a
+ // OneByteExternalString.
+ Node* check4 = graph()->NewNode(
+ machine()->Word32Equal(),
+ graph()->NewNode(machine()->Word32And(), lsubject_instance_type,
+ jsgraph()->Int32Constant(kStringEncodingMask)),
+ jsgraph()->Int32Constant(kTwoByteStringTag));
+ Node* branch4 =
+ graph()->NewNode(common()->Branch(), check4, if_true3);
+
+ Node* if_true4 = graph()->NewNode(common()->IfTrue(), branch4);
+ Node* etrue4 = etrue3;
+ Node* vtrue4 = etrue4 = graph()->NewNode(
+ simplified()->LoadElement(
+ AccessBuilder::ForExternalTwoByteStringCharacter()),
+ lsubject_resource_data, lindex, etrue4, if_true4);
+
+ Node* if_false4 = graph()->NewNode(common()->IfFalse(), branch4);
+ Node* efalse4 = etrue3;
+ Node* vfalse4 = efalse4 = graph()->NewNode(
+ simplified()->LoadElement(
+ AccessBuilder::ForExternalOneByteStringCharacter()),
+ lsubject_resource_data, lindex, efalse4, if_false4);
+
+ if_true3 = graph()->NewNode(common()->Merge(2), if_true4, if_false4);
+ etrue3 = graph()->NewNode(common()->EffectPhi(2), etrue4, efalse4,
+ if_true3);
+ vtrue3 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ vtrue4, vfalse4, if_true3);
+ }
+
+ Node* if_false3 = graph()->NewNode(common()->IfFalse(), branch3);
+ Node* efalse3 = etrue2;
+ Node* vfalse3;
+ {
+ // The {lsubject} might be compressed, call the runtime.
+ Operator::Properties properties =
+ Operator::kNoDeopt | Operator::kNoThrow;
+ Runtime::FunctionId id = Runtime::kExternalStringGetChar;
+ CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+ graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
+ vfalse3 = efalse3 = graph()->NewNode(
+ common()->Call(desc), jsgraph()->CEntryStubConstant(1), lsubject,
+ ChangeInt32ToSmi(lindex),
+ jsgraph()->ExternalConstant(ExternalReference(id, isolate())),
+ jsgraph()->Int32Constant(2), jsgraph()->NoContextConstant(),
+ efalse3, if_false3);
+ vfalse3 = ChangeSmiToInt32(vfalse3);
+ }
+
+ if_true2 = graph()->NewNode(common()->Merge(2), if_true3, if_false3);
+ etrue2 =
+ graph()->NewNode(common()->EffectPhi(2), etrue3, efalse3, if_true2);
+ vtrue2 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ vtrue3, vfalse3, if_true2);
+ }
+
+ Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+ Node* efalse2 = efalse1;
+ {
+ // The {lsubject} is a SlicedString, continue with its parent.
+ Node* lsubject_parent = efalse2 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForSlicedStringParent()),
+ lsubject, efalse2, if_false2);
+ Node* lsubject_offset = efalse2 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForSlicedStringOffset()),
+ lsubject, efalse2, if_false2);
+ Node* lsubject_index = graph()->NewNode(
+ machine()->Int32Add(), lindex, ChangeSmiToInt32(lsubject_offset));
+
+ // Retry the {loop} with the parent subject.
+ loop->ReplaceInput(3, if_false2);
+ leffect->ReplaceInput(3, efalse2);
+ lindex->ReplaceInput(3, lsubject_index);
+ lsubject->ReplaceInput(3, lsubject_parent);
+ }
+
+ if_false1 = if_true2;
+ efalse1 = etrue2;
+ vfalse1 = vtrue2;
+ }
+
+ if_false0 = if_false1;
+ efalse0 = efalse1;
+ vfalse0 = vfalse1;
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2), vtrue0,
+ vfalse0, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerStringFromCharCode(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ // Compute the character code.
+ Node* code =
+ graph()->NewNode(machine()->Word32And(), value,
+ jsgraph()->Int32Constant(String::kMaxUtf16CodeUnit));
+
+ // Check if the {code} is a one-byte char code.
+ Node* check0 =
+ graph()->NewNode(machine()->Int32LessThanOrEqual(), code,
+ jsgraph()->Int32Constant(String::kMaxOneByteCharCode));
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+ Node* vtrue0;
+ {
+ // Load the isolate wide single character string cache.
+ Node* cache =
+ jsgraph()->HeapConstant(factory()->single_character_string_cache());
+
+ // Compute the {cache} index for {code}.
+ Node* index =
+ machine()->Is32() ? code : graph()->NewNode(
+ machine()->ChangeUint32ToUint64(), code);
+
+ // Check if we have an entry for the {code} in the single character string
+ // cache already.
+ Node* entry = etrue0 = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()), cache,
+ index, etrue0, if_true0);
+
+ Node* check1 = graph()->NewNode(machine()->WordEqual(), entry,
+ jsgraph()->UndefinedConstant());
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check1, if_true0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = etrue0;
+ Node* vtrue1;
+ {
+ // Allocate a new SeqOneByteString for {code}.
+ vtrue1 = etrue1 = graph()->NewNode(
+ simplified()->Allocate(NOT_TENURED),
+ jsgraph()->Int32Constant(SeqOneByteString::SizeFor(1)), etrue1,
+ if_true1);
+ etrue1 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForMap()), vtrue1,
+ jsgraph()->HeapConstant(factory()->one_byte_string_map()), etrue1,
+ if_true1);
+ etrue1 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForNameHashField()), vtrue1,
+ jsgraph()->IntPtrConstant(Name::kEmptyHashField), etrue1, if_true1);
+ etrue1 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForStringLength()), vtrue1,
+ jsgraph()->SmiConstant(1), etrue1, if_true1);
+ etrue1 = graph()->NewNode(
+ machine()->Store(StoreRepresentation(MachineRepresentation::kWord8,
+ kNoWriteBarrier)),
+ vtrue1, jsgraph()->IntPtrConstant(SeqOneByteString::kHeaderSize -
+ kHeapObjectTag),
+ code, etrue1, if_true1);
+
+ // Remember it in the {cache}.
+ etrue1 = graph()->NewNode(
+ simplified()->StoreElement(AccessBuilder::ForFixedArrayElement()),
+ cache, index, vtrue1, etrue1, if_true1);
+ }
+
+ // Use the {entry} from the {cache}.
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* efalse1 = etrue0;
+ Node* vfalse1 = entry;
+
+ if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ etrue0 =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
+ vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue1, vfalse1, if_true0);
+ }
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0 = effect;
+ Node* vfalse0;
+ {
+ // Allocate a new SeqTwoByteString for {code}.
+ vfalse0 = efalse0 =
+ graph()->NewNode(simplified()->Allocate(NOT_TENURED),
+ jsgraph()->Int32Constant(SeqTwoByteString::SizeFor(1)),
+ efalse0, if_false0);
+ efalse0 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForMap()), vfalse0,
+ jsgraph()->HeapConstant(factory()->string_map()), efalse0, if_false0);
+ efalse0 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForNameHashField()), vfalse0,
+ jsgraph()->IntPtrConstant(Name::kEmptyHashField), efalse0, if_false0);
+ efalse0 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForStringLength()), vfalse0,
+ jsgraph()->SmiConstant(1), efalse0, if_false0);
+ efalse0 = graph()->NewNode(
+ machine()->Store(StoreRepresentation(MachineRepresentation::kWord16,
+ kNoWriteBarrier)),
+ vfalse0, jsgraph()->IntPtrConstant(SeqTwoByteString::kHeaderSize -
+ kHeapObjectTag),
+ code, efalse0, if_false0);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue0, vfalse0, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckFloat64Hole(Node* node, Node* frame_state,
+ Node* effect, Node* control) {
+ // If we reach this point w/o eliminating the {node} that's marked
+ // with allow-return-hole, we cannot do anything, so just deoptimize
+ // in case of the hole NaN (similar to Crankshaft).
+ Node* value = node->InputAt(0);
+ Node* check = graph()->NewNode(
+ machine()->Word32Equal(),
+ graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
+ jsgraph()->Int32Constant(kHoleNanUpper32));
+ control = effect =
+ graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kHole), check,
+ frame_state, effect, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckTaggedHole(Node* node, Node* frame_state,
+ Node* effect, Node* control) {
+ Node* value = node->InputAt(0);
+ Node* check = graph()->NewNode(machine()->WordEqual(), value,
+ jsgraph()->TheHoleConstant());
+ control = effect =
+ graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kHole), check,
+ frame_state, effect, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerConvertTaggedHoleToUndefined(Node* node,
+ Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+ Node* check = graph()->NewNode(machine()->WordEqual(), value,
+ jsgraph()->TheHoleConstant());
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue = jsgraph()->UndefinedConstant();
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = value;
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::AllocateHeapNumberWithValue(Node* value, Node* effect,
+ Node* control) {
+ Node* result = effect = graph()->NewNode(
+ simplified()->Allocate(NOT_TENURED),
+ jsgraph()->Int32Constant(HeapNumber::kSize), effect, control);
+ effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
+ result, jsgraph()->HeapNumberMapConstant(), effect,
+ control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForHeapNumberValue()), result,
+ value, effect, control);
+ return ValueEffectControl(result, effect, control);
+}
+
+Node* EffectControlLinearizer::ChangeInt32ToSmi(Node* value) {
+ if (machine()->Is64()) {
+ value = graph()->NewNode(machine()->ChangeInt32ToInt64(), value);
+ }
+ return graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant());
+}
+
+Node* EffectControlLinearizer::ChangeUint32ToSmi(Node* value) {
+ if (machine()->Is64()) {
+ value = graph()->NewNode(machine()->ChangeUint32ToUint64(), value);
+ }
+ return graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant());
+}
+
+Node* EffectControlLinearizer::ChangeInt32ToFloat64(Node* value) {
+ return graph()->NewNode(machine()->ChangeInt32ToFloat64(), value);
+}
+
+Node* EffectControlLinearizer::ChangeUint32ToFloat64(Node* value) {
+ return graph()->NewNode(machine()->ChangeUint32ToFloat64(), value);
+}
+
+Node* EffectControlLinearizer::ChangeSmiToInt32(Node* value) {
+ value = graph()->NewNode(machine()->WordSar(), value, SmiShiftBitsConstant());
+ if (machine()->Is64()) {
+ value = graph()->NewNode(machine()->TruncateInt64ToInt32(), value);
+ }
+ return value;
+}
+Node* EffectControlLinearizer::ObjectIsSmi(Node* value) {
+ return graph()->NewNode(
+ machine()->WordEqual(),
+ graph()->NewNode(machine()->WordAnd(), value,
+ jsgraph()->IntPtrConstant(kSmiTagMask)),
+ jsgraph()->IntPtrConstant(kSmiTag));
+}
+
+Node* EffectControlLinearizer::SmiMaxValueConstant() {
+ return jsgraph()->Int32Constant(Smi::kMaxValue);
+}
+
+Node* EffectControlLinearizer::SmiShiftBitsConstant() {
+ return jsgraph()->IntPtrConstant(kSmiShiftSize + kSmiTagSize);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerPlainPrimitiveToNumber(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+ Node* result = effect =
+ graph()->NewNode(ToNumberOperator(), jsgraph()->ToNumberBuiltinConstant(),
+ value, jsgraph()->NoContextConstant(), effect);
+ return ValueEffectControl(result, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerPlainPrimitiveToWord32(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check0 = ObjectIsSmi(value);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+ Node* vtrue0 = ChangeSmiToInt32(value);
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0 = effect;
+ Node* vfalse0;
+ {
+ vfalse0 = efalse0 = graph()->NewNode(
+ ToNumberOperator(), jsgraph()->ToNumberBuiltinConstant(), value,
+ jsgraph()->NoContextConstant(), efalse0);
+
+ Node* check1 = ObjectIsSmi(vfalse0);
+ Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = efalse0;
+ Node* vtrue1 = ChangeSmiToInt32(vfalse0);
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* efalse1 = efalse0;
+ Node* vfalse1;
+ {
+ vfalse1 = efalse1 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), efalse0,
+ efalse1, if_false1);
+ vfalse1 = graph()->NewNode(machine()->TruncateFloat64ToWord32(), vfalse1);
+ }
+
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ efalse0 =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
+ vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ vtrue1, vfalse1, if_false0);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ vtrue0, vfalse0, control);
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerPlainPrimitiveToFloat64(Node* node, Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check0 = ObjectIsSmi(value);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+ Node* vtrue0;
+ {
+ vtrue0 = ChangeSmiToInt32(value);
+ vtrue0 = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue0);
+ }
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0 = effect;
+ Node* vfalse0;
+ {
+ vfalse0 = efalse0 = graph()->NewNode(
+ ToNumberOperator(), jsgraph()->ToNumberBuiltinConstant(), value,
+ jsgraph()->NoContextConstant(), efalse0);
+
+ Node* check1 = ObjectIsSmi(vfalse0);
+ Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = efalse0;
+ Node* vtrue1;
+ {
+ vtrue1 = ChangeSmiToInt32(vfalse0);
+ vtrue1 = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue1);
+ }
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* efalse1 = efalse0;
+ Node* vfalse1;
+ {
+ vfalse1 = efalse1 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), efalse0,
+ efalse1, if_false1);
+ }
+
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ efalse0 =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
+ vfalse0 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue1, vfalse1, if_false0);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue0, vfalse0, control);
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerEnsureWritableFastElements(Node* node,
+ Node* effect,
+ Node* control) {
+ Node* object = node->InputAt(0);
+ Node* elements = node->InputAt(1);
+
+ // Load the current map of {elements}.
+ Node* elements_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ elements, effect, control);
+
+ // Check if {elements} is not a copy-on-write FixedArray.
+ Node* check = graph()->NewNode(machine()->WordEqual(), elements_map,
+ jsgraph()->FixedArrayMapConstant());
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ // Nothing to do if the {elements} are not copy-on-write.
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = elements;
+
+ // We need to take a copy of the {elements} and set them up for {object}.
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ // We need to create a copy of the {elements} for {object}.
+ Operator::Properties properties = Operator::kEliminatable;
+ Callable callable = CodeFactory::CopyFastSmiOrObjectElements(isolate());
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags,
+ properties);
+ vfalse = efalse = graph()->NewNode(
+ common()->Call(desc), jsgraph()->HeapConstant(callable.code()), object,
+ jsgraph()->NoContextConstant(), efalse);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ Node* value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
+ Node* frame_state,
+ Node* effect,
+ Node* control) {
+ GrowFastElementsFlags flags = GrowFastElementsFlagsOf(node->op());
+ Node* object = node->InputAt(0);
+ Node* elements = node->InputAt(1);
+ Node* index = node->InputAt(2);
+ Node* length = node->InputAt(3);
+
+ Node* check0 = graph()->NewNode((flags & GrowFastElementsFlag::kHoleyElements)
+ ? machine()->Uint32LessThanOrEqual()
+ : machine()->Word32Equal(),
+ length, index);
+ Node* branch0 = graph()->NewNode(common()->Branch(), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+ Node* vtrue0 = elements;
+ {
+ // Load the length of the {elements} backing store.
+ Node* elements_length = etrue0 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFixedArrayLength()), elements,
+ etrue0, if_true0);
+ elements_length = ChangeSmiToInt32(elements_length);
+
+ // Check if we need to grow the {elements} backing store.
+ Node* check1 =
+ graph()->NewNode(machine()->Uint32LessThan(), index, elements_length);
+ Node* branch1 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check1, if_true0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = etrue0;
+ Node* vtrue1 = vtrue0;
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* efalse1 = etrue0;
+ Node* vfalse1 = vtrue0;
+ {
+ // We need to grow the {elements} for {object}.
+ Operator::Properties properties = Operator::kEliminatable;
+ Callable callable =
+ (flags & GrowFastElementsFlag::kDoubleElements)
+ ? CodeFactory::GrowFastDoubleElements(isolate())
+ : CodeFactory::GrowFastSmiOrObjectElements(isolate());
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags,
+ properties);
+ vfalse1 = efalse1 = graph()->NewNode(
+ common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
+ object, ChangeInt32ToSmi(index), jsgraph()->NoContextConstant(),
+ efalse1);
+
+ // Ensure that we were able to grow the {elements}.
+ // TODO(turbofan): We use kSmi as reason here similar to Crankshaft,
+ // but maybe we should just introduce a reason that makes sense.
+ efalse1 = if_false1 = graph()->NewNode(
+ common()->DeoptimizeIf(DeoptimizeReason::kSmi), ObjectIsSmi(vfalse1),
+ frame_state, efalse1, if_false1);
+ }
+
+ if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ etrue0 =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
+ vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue1, vfalse1, if_true0);
+
+ // For JSArray {object}s we also need to update the "length".
+ if (flags & GrowFastElementsFlag::kArrayObject) {
+ // Compute the new {length}.
+ Node* object_length = ChangeInt32ToSmi(graph()->NewNode(
+ machine()->Int32Add(), index, jsgraph()->Int32Constant(1)));
+
+ // Update the "length" property of the {object}.
+ etrue0 =
+ graph()->NewNode(simplified()->StoreField(
+ AccessBuilder::ForJSArrayLength(FAST_ELEMENTS)),
+ object, object_length, etrue0, if_true0);
+ }
+ }
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0 = effect;
+ Node* vfalse0 = elements;
+ {
+ // In case of non-holey {elements}, we need to verify that the {index} is
+ // in-bounds, otherwise for holey {elements}, the check above already
+ // guards the index (and the operator forces {index} to be unsigned).
+ if (!(flags & GrowFastElementsFlag::kHoleyElements)) {
+ Node* check1 =
+ graph()->NewNode(machine()->Uint32LessThan(), index, length);
+ efalse0 = if_false0 = graph()->NewNode(
+ common()->DeoptimizeUnless(DeoptimizeReason::kOutOfBounds), check1,
+ frame_state, efalse0, if_false0);
+ }
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), vtrue0,
+ vfalse0, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerTransitionElementsKind(Node* node, Node* effect,
+ Node* control) {
+ ElementsTransition const transition = ElementsTransitionOf(node->op());
+ Node* object = node->InputAt(0);
+ Node* source_map = node->InputAt(1);
+ Node* target_map = node->InputAt(2);
+
+ // Load the current map of {object}.
+ Node* object_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()), object,
+ effect, control);
+
+ // Check if {object_map} is the same as {source_map}.
+ Node* check =
+ graph()->NewNode(machine()->WordEqual(), object_map, source_map);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ // Migrate the {object} from {source_map} to {target_map}.
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ {
+ switch (transition) {
+ case ElementsTransition::kFastTransition: {
+ // In-place migration of {object}, just store the {target_map}.
+ etrue =
+ graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
+ object, target_map, etrue, if_true);
+ break;
+ }
+ case ElementsTransition::kSlowTransition: {
+ // Instance migration, call out to the runtime for {object}.
+ Operator::Properties properties =
+ Operator::kNoDeopt | Operator::kNoThrow;
+ Runtime::FunctionId id = Runtime::kTransitionElementsKind;
+ CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+ graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
+ etrue = graph()->NewNode(
+ common()->Call(desc), jsgraph()->CEntryStubConstant(1), object,
+ target_map,
+ jsgraph()->ExternalConstant(ExternalReference(id, isolate())),
+ jsgraph()->Int32Constant(2), jsgraph()->NoContextConstant(), etrue,
+ if_true);
+ break;
+ }
+ }
+ }
+
+ // Nothing to do if the {object} doesn't have the {source_map}.
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+
+ return ValueEffectControl(nullptr, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerLoadTypedElement(Node* node, Node* effect,
+ Node* control) {
+ ExternalArrayType array_type = ExternalArrayTypeOf(node->op());
+ Node* buffer = node->InputAt(0);
+ Node* base = node->InputAt(1);
+ Node* external = node->InputAt(2);
+ Node* index = node->InputAt(3);
+
+ // We need to keep the {buffer} alive so that the GC will not release the
+ // ArrayBuffer (if there's any) as long as we are still operating on it.
+ effect = graph()->NewNode(common()->Retain(), buffer, effect);
+
+ // Compute the effective storage pointer.
+ Node* storage = effect = graph()->NewNode(machine()->UnsafePointerAdd(), base,
+ external, effect, control);
+
+ // Perform the actual typed element access.
+ Node* value = effect = graph()->NewNode(
+ simplified()->LoadElement(
+ AccessBuilder::ForTypedArrayElement(array_type, true)),
+ storage, index, effect, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerStoreTypedElement(Node* node, Node* effect,
+ Node* control) {
+ ExternalArrayType array_type = ExternalArrayTypeOf(node->op());
+ Node* buffer = node->InputAt(0);
+ Node* base = node->InputAt(1);
+ Node* external = node->InputAt(2);
+ Node* index = node->InputAt(3);
+ Node* value = node->InputAt(4);
+
+ // We need to keep the {buffer} alive so that the GC will not release the
+ // ArrayBuffer (if there's any) as long as we are still operating on it.
+ effect = graph()->NewNode(common()->Retain(), buffer, effect);
+
+ // Compute the effective storage pointer.
+ Node* storage = effect = graph()->NewNode(machine()->UnsafePointerAdd(), base,
+ external, effect, control);
+
+ // Perform the actual typed element access.
+ effect = graph()->NewNode(
+ simplified()->StoreElement(
+ AccessBuilder::ForTypedArrayElement(array_type, true)),
+ storage, index, value, effect, control);
+
+ return ValueEffectControl(nullptr, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerFloat64RoundUp(Node* node, Node* effect,
+ Node* control) {
+ // Nothing to be done if a fast hardware instruction is available.
+ if (machine()->Float64RoundUp().IsSupported()) {
+ return ValueEffectControl(node, effect, control);
+ }
+
+ Node* const one = jsgraph()->Float64Constant(1.0);
+ Node* const zero = jsgraph()->Float64Constant(0.0);
+ Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
+ Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
+ Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
+ Node* const input = node->InputAt(0);
+
+ // General case for ceil.
+ //
+ // if 0.0 < input then
+ // if 2^52 <= input then
+ // input
+ // else
+ // let temp1 = (2^52 + input) - 2^52 in
+ // if temp1 < input then
+ // temp1 + 1
+ // else
+ // temp1
+ // else
+ // if input == 0 then
+ // input
+ // else
+ // if input <= -2^52 then
+ // input
+ // else
+ // let temp1 = -0 - input in
+ // let temp2 = (2^52 + temp1) - 2^52 in
+ // let temp3 = (if temp1 < temp2 then temp2 - 1 else temp2) in
+ // -0 - temp3
+ //
+ // Note: We do not use the Diamond helper class here, because it really hurts
+ // readability with nested diamonds.
+
+ Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* vtrue0;
+ {
+ Node* check1 =
+ graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
+ Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* vtrue1 = input;
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* vfalse1;
+ {
+ Node* temp1 = graph()->NewNode(
+ machine()->Float64Sub(),
+ graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
+ vfalse1 = graph()->NewNode(
+ common()->Select(MachineRepresentation::kFloat64),
+ graph()->NewNode(machine()->Float64LessThan(), temp1, input),
+ graph()->NewNode(machine()->Float64Add(), temp1, one), temp1);
+ }
+
+ if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue1, vfalse1, if_true0);
+ }
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* vfalse0;
+ {
+ Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* vtrue1 = input;
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* vfalse1;
+ {
+ Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
+ input, minus_two_52);
+ Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check2, if_false1);
+
+ Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* vtrue2 = input;
+
+ Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+ Node* vfalse2;
+ {
+ Node* temp1 =
+ graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
+ Node* temp2 = graph()->NewNode(
+ machine()->Float64Sub(),
+ graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
+ Node* temp3 = graph()->NewNode(
+ common()->Select(MachineRepresentation::kFloat64),
+ graph()->NewNode(machine()->Float64LessThan(), temp1, temp2),
+ graph()->NewNode(machine()->Float64Sub(), temp2, one), temp2);
+ vfalse2 = graph()->NewNode(machine()->Float64Sub(), minus_zero, temp3);
+ }
+
+ if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
+ vfalse1 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue2, vfalse2, if_false1);
+ }
+
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ vfalse0 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue1, vfalse1, if_false0);
+ }
+
+ Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue0, vfalse0, merge0);
+ return ValueEffectControl(value, effect, merge0);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerFloat64RoundDown(Node* node, Node* effect,
+ Node* control) {
+ // Nothing to be done if a fast hardware instruction is available.
+ if (machine()->Float64RoundDown().IsSupported()) {
+ return ValueEffectControl(node, effect, control);
+ }
+
+ Node* const one = jsgraph()->Float64Constant(1.0);
+ Node* const zero = jsgraph()->Float64Constant(0.0);
+ Node* const minus_one = jsgraph()->Float64Constant(-1.0);
+ Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
+ Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
+ Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
+ Node* const input = node->InputAt(0);
+
+ // General case for floor.
+ //
+ // if 0.0 < input then
+ // if 2^52 <= input then
+ // input
+ // else
+ // let temp1 = (2^52 + input) - 2^52 in
+ // if input < temp1 then
+ // temp1 - 1
+ // else
+ // temp1
+ // else
+ // if input == 0 then
+ // input
+ // else
+ // if input <= -2^52 then
+ // input
+ // else
+ // let temp1 = -0 - input in
+ // let temp2 = (2^52 + temp1) - 2^52 in
+ // if temp2 < temp1 then
+ // -1 - temp2
+ // else
+ // -0 - temp2
+ //
+ // Note: We do not use the Diamond helper class here, because it really hurts
+ // readability with nested diamonds.
+
+ Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* vtrue0;
+ {
+ Node* check1 =
+ graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
+ Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* vtrue1 = input;
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* vfalse1;
+ {
+ Node* temp1 = graph()->NewNode(
+ machine()->Float64Sub(),
+ graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
+ vfalse1 = graph()->NewNode(
+ common()->Select(MachineRepresentation::kFloat64),
+ graph()->NewNode(machine()->Float64LessThan(), input, temp1),
+ graph()->NewNode(machine()->Float64Sub(), temp1, one), temp1);
+ }
+
+ if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue1, vfalse1, if_true0);
+ }
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* vfalse0;
+ {
+ Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* vtrue1 = input;
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* vfalse1;
+ {
+ Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
+ input, minus_two_52);
+ Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check2, if_false1);
+
+ Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* vtrue2 = input;
+
+ Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+ Node* vfalse2;
+ {
+ Node* temp1 =
+ graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
+ Node* temp2 = graph()->NewNode(
+ machine()->Float64Sub(),
+ graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
+ vfalse2 = graph()->NewNode(
+ common()->Select(MachineRepresentation::kFloat64),
+ graph()->NewNode(machine()->Float64LessThan(), temp2, temp1),
+ graph()->NewNode(machine()->Float64Sub(), minus_one, temp2),
+ graph()->NewNode(machine()->Float64Sub(), minus_zero, temp2));
+ }
+
+ if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
+ vfalse1 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue2, vfalse2, if_false1);
+ }
+
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ vfalse0 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue1, vfalse1, if_false0);
+ }
+
+ Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue0, vfalse0, merge0);
+ return ValueEffectControl(value, effect, merge0);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerFloat64RoundTruncate(Node* node, Node* effect,
+ Node* control) {
+ // Nothing to be done if a fast hardware instruction is available.
+ if (machine()->Float64RoundTruncate().IsSupported()) {
+ return ValueEffectControl(node, effect, control);
+ }
+
+ Node* const one = jsgraph()->Float64Constant(1.0);
+ Node* const zero = jsgraph()->Float64Constant(0.0);
+ Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
+ Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
+ Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
+ Node* const input = node->InputAt(0);
+
+ // General case for trunc.
+ //
+ // if 0.0 < input then
+ // if 2^52 <= input then
+ // input
+ // else
+ // let temp1 = (2^52 + input) - 2^52 in
+ // if input < temp1 then
+ // temp1 - 1
+ // else
+ // temp1
+ // else
+ // if input == 0 then
+ // input
+ // else
+ // if input <= -2^52 then
+ // input
+ // else
+ // let temp1 = -0 - input in
+ // let temp2 = (2^52 + temp1) - 2^52 in
+ // let temp3 = (if temp1 < temp2 then temp2 - 1 else temp2) in
+ // -0 - temp3
+ //
+ // Note: We do not use the Diamond helper class here, because it really hurts
+ // readability with nested diamonds.
+
+ Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* vtrue0;
+ {
+ Node* check1 =
+ graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
+ Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* vtrue1 = input;
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* vfalse1;
+ {
+ Node* temp1 = graph()->NewNode(
+ machine()->Float64Sub(),
+ graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
+ vfalse1 = graph()->NewNode(
+ common()->Select(MachineRepresentation::kFloat64),
+ graph()->NewNode(machine()->Float64LessThan(), input, temp1),
+ graph()->NewNode(machine()->Float64Sub(), temp1, one), temp1);
+ }
+
+ if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue1, vfalse1, if_true0);
+ }
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* vfalse0;
+ {
+ Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* vtrue1 = input;
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* vfalse1;
+ {
+ Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
+ input, minus_two_52);
+ Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check2, if_false1);
+
+ Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* vtrue2 = input;
+
+ Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+ Node* vfalse2;
+ {
+ Node* temp1 =
+ graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
+ Node* temp2 = graph()->NewNode(
+ machine()->Float64Sub(),
+ graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
+ Node* temp3 = graph()->NewNode(
+ common()->Select(MachineRepresentation::kFloat64),
+ graph()->NewNode(machine()->Float64LessThan(), temp1, temp2),
+ graph()->NewNode(machine()->Float64Sub(), temp2, one), temp2);
+ vfalse2 = graph()->NewNode(machine()->Float64Sub(), minus_zero, temp3);
+ }
+
+ if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
+ vfalse1 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue2, vfalse2, if_false1);
+ }
+
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ vfalse0 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue1, vfalse1, if_false0);
+ }
+
+ Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue0, vfalse0, merge0);
+ return ValueEffectControl(value, effect, merge0);
+}
+
+Factory* EffectControlLinearizer::factory() const {
+ return isolate()->factory();
+}
+
+Isolate* EffectControlLinearizer::isolate() const {
+ return jsgraph()->isolate();
+}
+
+Operator const* EffectControlLinearizer::ToNumberOperator() {
+ if (!to_number_operator_.is_set()) {
+ Callable callable = CodeFactory::ToNumber(isolate());
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags,
+ Operator::kEliminatable);
+ to_number_operator_.set(common()->Call(desc));
+ }
+ return to_number_operator_.get();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
new file mode 100644
index 0000000000..98f08c7b12
--- /dev/null
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -0,0 +1,203 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_EFFECT_CONTROL_LINEARIZER_H_
+#define V8_COMPILER_EFFECT_CONTROL_LINEARIZER_H_
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+
+class Zone;
+
+namespace compiler {
+
+class CommonOperatorBuilder;
+class SimplifiedOperatorBuilder;
+class MachineOperatorBuilder;
+class JSGraph;
+class Graph;
+class Schedule;
+
+class EffectControlLinearizer {
+ public:
+ EffectControlLinearizer(JSGraph* graph, Schedule* schedule, Zone* temp_zone);
+
+ void Run();
+
+ private:
+ void ProcessNode(Node* node, Node** frame_state, Node** effect,
+ Node** control);
+
+ struct ValueEffectControl {
+ Node* value;
+ Node* effect;
+ Node* control;
+ ValueEffectControl(Node* value, Node* effect, Node* control)
+ : value(value), effect(effect), control(control) {}
+ };
+
+ bool TryWireInStateEffect(Node* node, Node* frame_state, Node** effect,
+ Node** control);
+ ValueEffectControl LowerChangeBitToTagged(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerChangeInt31ToTaggedSigned(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerChangeInt32ToTagged(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerChangeUint32ToTagged(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerChangeFloat64ToTagged(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerChangeTaggedSignedToInt32(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerChangeTaggedToBit(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerChangeTaggedToInt32(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerChangeTaggedToUint32(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerCheckBounds(Node* node, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl LowerCheckMaps(Node* node, Node* frame_state, Node* effect,
+ Node* control);
+ ValueEffectControl LowerCheckNumber(Node* node, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl LowerCheckString(Node* node, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl LowerCheckIf(Node* node, Node* frame_state, Node* effect,
+ Node* control);
+ ValueEffectControl LowerCheckTaggedPointer(Node* node, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl LowerCheckTaggedSigned(Node* node, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl LowerCheckedInt32Add(Node* node, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl LowerCheckedInt32Sub(Node* node, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl LowerCheckedInt32Div(Node* node, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl LowerCheckedInt32Mod(Node* node, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl LowerCheckedUint32Div(Node* node, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl LowerCheckedUint32Mod(Node* node, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl LowerCheckedInt32Mul(Node* node, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl LowerCheckedUint32ToInt32(Node* node, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl LowerCheckedFloat64ToInt32(Node* node, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl LowerCheckedTaggedSignedToInt32(Node* node,
+ Node* frame_state,
+ Node* effect,
+ Node* control);
+ ValueEffectControl LowerCheckedTaggedToInt32(Node* node, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl LowerCheckedTaggedToFloat64(Node* node, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl LowerChangeTaggedToFloat64(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerTruncateTaggedToFloat64(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerTruncateTaggedToWord32(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerCheckedTruncateTaggedToWord32(Node* node,
+ Node* frame_state,
+ Node* effect,
+ Node* control);
+ ValueEffectControl LowerObjectIsCallable(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerObjectIsNumber(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerObjectIsReceiver(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerObjectIsSmi(Node* node, Node* effect, Node* control);
+ ValueEffectControl LowerObjectIsString(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerObjectIsUndetectable(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerStringCharCodeAt(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerStringFromCharCode(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerCheckFloat64Hole(Node* node, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl LowerCheckTaggedHole(Node* node, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl LowerConvertTaggedHoleToUndefined(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerPlainPrimitiveToNumber(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerPlainPrimitiveToWord32(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerPlainPrimitiveToFloat64(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerEnsureWritableFastElements(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerMaybeGrowFastElements(Node* node, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl LowerTransitionElementsKind(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerLoadTypedElement(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerStoreTypedElement(Node* node, Node* effect,
+ Node* control);
+
+ // Lowering of optional operators.
+ ValueEffectControl LowerFloat64RoundUp(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerFloat64RoundDown(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl LowerFloat64RoundTruncate(Node* node, Node* effect,
+ Node* control);
+
+ ValueEffectControl AllocateHeapNumberWithValue(Node* node, Node* effect,
+ Node* control);
+ ValueEffectControl BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode,
+ Node* value, Node* frame_state,
+ Node* effect, Node* control);
+ ValueEffectControl BuildCheckedHeapNumberOrOddballToFloat64(
+ CheckTaggedInputMode mode, Node* value, Node* frame_state, Node* effect,
+ Node* control);
+
+ Node* ChangeInt32ToSmi(Node* value);
+ Node* ChangeUint32ToSmi(Node* value);
+ Node* ChangeInt32ToFloat64(Node* value);
+ Node* ChangeUint32ToFloat64(Node* value);
+ Node* ChangeSmiToInt32(Node* value);
+ Node* ObjectIsSmi(Node* value);
+
+ Node* SmiMaxValueConstant();
+ Node* SmiShiftBitsConstant();
+
+ Factory* factory() const;
+ Isolate* isolate() const;
+ JSGraph* jsgraph() const { return js_graph_; }
+ Graph* graph() const;
+ Schedule* schedule() const { return schedule_; }
+ Zone* temp_zone() const { return temp_zone_; }
+ CommonOperatorBuilder* common() const;
+ SimplifiedOperatorBuilder* simplified() const;
+ MachineOperatorBuilder* machine() const;
+
+ Operator const* ToNumberOperator();
+
+ JSGraph* js_graph_;
+ Schedule* schedule_;
+ Zone* temp_zone_;
+ RegionObservability region_observability_ = RegionObservability::kObservable;
+
+ SetOncePointer<Operator const> to_number_operator_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_EFFECT_CONTROL_LINEARIZER_H_
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc
index 313b6396dd..c69b86c488 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.cc
+++ b/deps/v8/src/compiler/escape-analysis-reducer.cc
@@ -4,6 +4,7 @@
#include "src/compiler/escape-analysis-reducer.h"
+#include "src/compiler/all-nodes.h"
#include "src/compiler/js-graph.h"
#include "src/counters.h"
@@ -28,8 +29,7 @@ EscapeAnalysisReducer::EscapeAnalysisReducer(Editor* editor, JSGraph* jsgraph,
escape_analysis_(escape_analysis),
zone_(zone),
fully_reduced_(static_cast<int>(jsgraph->graph()->NodeCount() * 2), zone),
- exists_virtual_allocate_(true) {}
-
+ exists_virtual_allocate_(escape_analysis->ExistsVirtualAllocate()) {}
Reduction EscapeAnalysisReducer::Reduce(Node* node) {
if (node->id() < static_cast<NodeId>(fully_reduced_.length()) &&
@@ -105,7 +105,7 @@ Reduction EscapeAnalysisReducer::ReduceLoad(Node* node) {
fully_reduced_.Add(node->id());
}
if (Node* rep = escape_analysis()->GetReplacement(node)) {
- counters()->turbo_escape_loads_replaced()->Increment();
+ isolate()->counters()->turbo_escape_loads_replaced()->Increment();
TRACE("Replaced #%d (%s) with #%d (%s)\n", node->id(),
node->op()->mnemonic(), rep->id(), rep->op()->mnemonic());
ReplaceWithValue(node, rep);
@@ -138,7 +138,7 @@ Reduction EscapeAnalysisReducer::ReduceAllocate(Node* node) {
}
if (escape_analysis()->IsVirtual(node)) {
RelaxEffectsAndControls(node);
- counters()->turbo_escape_allocs_replaced()->Increment();
+ isolate()->counters()->turbo_escape_allocs_replaced()->Increment();
TRACE("Removed allocate #%d from effect chain\n", node->id());
return Changed(node);
}
@@ -268,7 +268,7 @@ Node* EscapeAnalysisReducer::ReduceDeoptState(Node* node, Node* effect,
}
}
if (node->opcode() == IrOpcode::kFrameState) {
- Node* outer_frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
if (outer_frame_state->opcode() == IrOpcode::kFrameState) {
if (Node* ret =
ReduceDeoptState(outer_frame_state, effect, multiple_users_rec)) {
@@ -277,7 +277,7 @@ Node* EscapeAnalysisReducer::ReduceDeoptState(Node* node, Node* effect,
node = clone = jsgraph()->graph()->CloneNode(node);
TRACE(" to #%d\n", node->id());
}
- NodeProperties::ReplaceFrameStateInput(node, 0, ret);
+ NodeProperties::ReplaceFrameStateInput(node, ret);
}
}
}
@@ -328,40 +328,19 @@ Node* EscapeAnalysisReducer::ReduceStateValueInput(Node* node, int node_index,
}
-Counters* EscapeAnalysisReducer::counters() const {
- return jsgraph_->isolate()->counters();
-}
-
-
-class EscapeAnalysisVerifier final : public AdvancedReducer {
- public:
- EscapeAnalysisVerifier(Editor* editor, EscapeAnalysis* escape_analysis)
- : AdvancedReducer(editor), escape_analysis_(escape_analysis) {}
-
- Reduction Reduce(Node* node) final {
- switch (node->opcode()) {
- case IrOpcode::kAllocate:
- CHECK(!escape_analysis_->IsVirtual(node));
- break;
- default:
- break;
- }
- return NoChange();
- }
-
- private:
- EscapeAnalysis* escape_analysis_;
-};
-
void EscapeAnalysisReducer::VerifyReplacement() const {
#ifdef DEBUG
- GraphReducer graph_reducer(zone(), jsgraph()->graph());
- EscapeAnalysisVerifier verifier(&graph_reducer, escape_analysis());
- graph_reducer.AddReducer(&verifier);
- graph_reducer.ReduceGraph();
+ AllNodes all(zone(), jsgraph()->graph());
+ for (Node* node : all.reachable) {
+ if (node->opcode() == IrOpcode::kAllocate) {
+ CHECK(!escape_analysis_->IsVirtual(node));
+ }
+ }
#endif // DEBUG
}
+Isolate* EscapeAnalysisReducer::isolate() const { return jsgraph_->isolate(); }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.h b/deps/v8/src/compiler/escape-analysis-reducer.h
index 12487b1dcf..ad6747929c 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.h
+++ b/deps/v8/src/compiler/escape-analysis-reducer.h
@@ -9,29 +9,22 @@
#include "src/compiler/escape-analysis.h"
#include "src/compiler/graph-reducer.h"
-
namespace v8 {
namespace internal {
-
-// Forward declarations.
-class Counters;
-
-
namespace compiler {
// Forward declarations.
class JSGraph;
-
class EscapeAnalysisReducer final : public AdvancedReducer {
public:
EscapeAnalysisReducer(Editor* editor, JSGraph* jsgraph,
EscapeAnalysis* escape_analysis, Zone* zone);
Reduction Reduce(Node* node) final;
- void SetExistsVirtualAllocate(bool exists) {
- exists_virtual_allocate_ = exists;
- }
+
+ // Verifies that all virtual allocation nodes have been dealt with. Run it
+ // after this reducer has been applied. Has no effect in release mode.
void VerifyReplacement() const;
private:
@@ -50,12 +43,12 @@ class EscapeAnalysisReducer final : public AdvancedReducer {
JSGraph* jsgraph() const { return jsgraph_; }
EscapeAnalysis* escape_analysis() const { return escape_analysis_; }
Zone* zone() const { return zone_; }
- Counters* counters() const;
+ Isolate* isolate() const;
JSGraph* const jsgraph_;
EscapeAnalysis* escape_analysis_;
Zone* const zone_;
- // _visited marks nodes we already processed (allocs, loads, stores)
+ // This bit vector marks nodes we already processed (allocs, loads, stores)
// and nodes that do not need a visit from ReduceDeoptState etc.
BitVector fully_reduced_;
bool exists_virtual_allocate_;
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index b1a12b201e..437c01fd15 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -24,7 +24,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-using Alias = EscapeStatusAnalysis::Alias;
+typedef NodeId Alias;
#ifdef DEBUG
#define TRACE(...) \
@@ -35,6 +35,90 @@ using Alias = EscapeStatusAnalysis::Alias;
#define TRACE(...)
#endif
+// EscapeStatusAnalysis determines for each allocation whether it escapes.
+class EscapeStatusAnalysis : public ZoneObject {
+ public:
+ enum Status {
+ kUnknown = 0u,
+ kTracked = 1u << 0,
+ kEscaped = 1u << 1,
+ kOnStack = 1u << 2,
+ kVisited = 1u << 3,
+ // A node is dangling, if it is a load of some kind, and does not have
+ // an effect successor.
+ kDanglingComputed = 1u << 4,
+ kDangling = 1u << 5,
+ // A node is is an effect branch point, if it has more than 2 non-dangling
+ // effect successors.
+ kBranchPointComputed = 1u << 6,
+ kBranchPoint = 1u << 7,
+ kInQueue = 1u << 8
+ };
+ typedef base::Flags<Status, uint16_t> StatusFlags;
+
+ void RunStatusAnalysis();
+
+ bool IsVirtual(Node* node);
+ bool IsEscaped(Node* node);
+ bool IsAllocation(Node* node);
+
+ bool IsInQueue(NodeId id);
+ void SetInQueue(NodeId id, bool on_stack);
+
+ void DebugPrint();
+
+ EscapeStatusAnalysis(EscapeAnalysis* object_analysis, Graph* graph,
+ Zone* zone);
+ void EnqueueForStatusAnalysis(Node* node);
+ bool SetEscaped(Node* node);
+ bool IsEffectBranchPoint(Node* node);
+ bool IsDanglingEffectNode(Node* node);
+ void ResizeStatusVector();
+ size_t GetStatusVectorSize();
+ bool IsVirtual(NodeId id);
+
+ Graph* graph() const { return graph_; }
+ void AssignAliases();
+ Alias GetAlias(NodeId id) const { return aliases_[id]; }
+ const ZoneVector<Alias>& GetAliasMap() const { return aliases_; }
+ Alias AliasCount() const { return next_free_alias_; }
+ static const Alias kNotReachable;
+ static const Alias kUntrackable;
+
+ bool IsNotReachable(Node* node);
+
+ private:
+ void Process(Node* node);
+ void ProcessAllocate(Node* node);
+ void ProcessFinishRegion(Node* node);
+ void ProcessStoreField(Node* node);
+ void ProcessStoreElement(Node* node);
+ bool CheckUsesForEscape(Node* node, bool phi_escaping = false) {
+ return CheckUsesForEscape(node, node, phi_escaping);
+ }
+ bool CheckUsesForEscape(Node* node, Node* rep, bool phi_escaping = false);
+ void RevisitUses(Node* node);
+ void RevisitInputs(Node* node);
+
+ Alias NextAlias() { return next_free_alias_++; }
+
+ bool HasEntry(Node* node);
+
+ bool IsAllocationPhi(Node* node);
+
+ ZoneVector<Node*> stack_;
+ EscapeAnalysis* object_analysis_;
+ Graph* const graph_;
+ ZoneVector<StatusFlags> status_;
+ Alias next_free_alias_;
+ ZoneVector<Node*> status_stack_;
+ ZoneVector<Alias> aliases_;
+
+ DISALLOW_COPY_AND_ASSIGN(EscapeStatusAnalysis);
+};
+
+DEFINE_OPERATORS_FOR_FLAGS(EscapeStatusAnalysis::StatusFlags)
+
const Alias EscapeStatusAnalysis::kNotReachable =
std::numeric_limits<Alias>::max();
const Alias EscapeStatusAnalysis::kUntrackable =
@@ -475,14 +559,11 @@ EscapeStatusAnalysis::EscapeStatusAnalysis(EscapeAnalysis* object_analysis,
: stack_(zone),
object_analysis_(object_analysis),
graph_(graph),
- zone_(zone),
status_(zone),
next_free_alias_(0),
status_stack_(zone),
aliases_(zone) {}
-EscapeStatusAnalysis::~EscapeStatusAnalysis() {}
-
bool EscapeStatusAnalysis::HasEntry(Node* node) {
return status_[node->id()] & (kTracked | kEscaped);
}
@@ -712,6 +793,12 @@ bool EscapeStatusAnalysis::CheckUsesForEscape(Node* uses, Node* rep,
}
break;
case IrOpcode::kSelect:
+ // TODO(mstarzinger): The following list of operators will eventually be
+ // handled by the EscapeAnalysisReducer (similar to ObjectIsSmi).
+ case IrOpcode::kObjectIsCallable:
+ case IrOpcode::kObjectIsNumber:
+ case IrOpcode::kObjectIsString:
+ case IrOpcode::kObjectIsUndetectable:
if (SetEscaped(rep)) {
TRACE("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
rep->id(), rep->op()->mnemonic(), use->id(),
@@ -721,7 +808,8 @@ bool EscapeStatusAnalysis::CheckUsesForEscape(Node* uses, Node* rep,
break;
default:
if (use->op()->EffectInputCount() == 0 &&
- uses->op()->EffectInputCount() > 0) {
+ uses->op()->EffectInputCount() > 0 &&
+ !IrOpcode::IsJsOpcode(use->opcode())) {
TRACE("Encountered unaccounted use by #%d (%s)\n", use->id(),
use->op()->mnemonic());
UNREACHABLE();
@@ -759,8 +847,10 @@ void EscapeStatusAnalysis::DebugPrint() {
EscapeAnalysis::EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common,
Zone* zone)
- : status_analysis_(this, graph, zone),
+ : zone_(zone),
+ slot_not_analyzed_(graph->NewNode(common->NumberConstant(0x1c0debad))),
common_(common),
+ status_analysis_(new (zone) EscapeStatusAnalysis(this, graph, zone)),
virtual_states_(zone),
replacements_(zone),
cache_(nullptr) {}
@@ -769,13 +859,13 @@ EscapeAnalysis::~EscapeAnalysis() {}
void EscapeAnalysis::Run() {
replacements_.resize(graph()->NodeCount());
- status_analysis_.AssignAliases();
- if (status_analysis_.AliasCount() > 0) {
+ status_analysis_->AssignAliases();
+ if (status_analysis_->AliasCount() > 0) {
cache_ = new (zone()) MergeCache(zone());
replacements_.resize(graph()->NodeCount());
- status_analysis_.ResizeStatusVector();
+ status_analysis_->ResizeStatusVector();
RunObjectAnalysis();
- status_analysis_.RunStatusAnalysis();
+ status_analysis_->RunStatusAnalysis();
}
}
@@ -853,11 +943,11 @@ void EscapeAnalysis::RunObjectAnalysis() {
while (!queue.empty()) {
Node* node = queue.back();
queue.pop_back();
- status_analysis_.SetInQueue(node->id(), false);
+ status_analysis_->SetInQueue(node->id(), false);
if (Process(node)) {
for (Edge edge : node->use_edges()) {
Node* use = edge.from();
- if (IsNotReachable(use)) {
+ if (status_analysis_->IsNotReachable(use)) {
continue;
}
if (NodeProperties::IsEffectEdge(edge)) {
@@ -865,14 +955,14 @@ void EscapeAnalysis::RunObjectAnalysis() {
// We need DFS do avoid some duplication of VirtualStates and
// VirtualObjects, and we want to delay phis to improve performance.
if (use->opcode() == IrOpcode::kEffectPhi) {
- if (!status_analysis_.IsInQueue(use->id())) {
+ if (!status_analysis_->IsInQueue(use->id())) {
queue.push_front(use);
}
} else if ((use->opcode() != IrOpcode::kLoadField &&
use->opcode() != IrOpcode::kLoadElement) ||
- !IsDanglingEffectNode(use)) {
- if (!status_analysis_.IsInQueue(use->id())) {
- status_analysis_.SetInQueue(use->id(), true);
+ !status_analysis_->IsDanglingEffectNode(use)) {
+ if (!status_analysis_->IsInQueue(use->id())) {
+ status_analysis_->SetInQueue(use->id(), true);
queue.push_back(use);
}
} else {
@@ -1008,8 +1098,8 @@ void EscapeAnalysis::ProcessAllocationUsers(Node* node) {
if (!obj->AllFieldsClear()) {
obj = CopyForModificationAt(obj, state, node);
obj->ClearAllFields();
- TRACE("Cleared all fields of @%d:#%d\n", GetAlias(obj->id()),
- obj->id());
+ TRACE("Cleared all fields of @%d:#%d\n",
+ status_analysis_->GetAlias(obj->id()), obj->id());
}
}
break;
@@ -1035,7 +1125,7 @@ VirtualObject* EscapeAnalysis::CopyForModificationAt(VirtualObject* obj,
Node* node) {
if (obj->NeedCopyForModification()) {
state = CopyForModificationAt(state, node);
- return state->Copy(obj, GetAlias(obj->id()));
+ return state->Copy(obj, status_analysis_->GetAlias(obj->id()));
}
return obj;
}
@@ -1045,7 +1135,8 @@ void EscapeAnalysis::ForwardVirtualState(Node* node) {
#ifdef DEBUG
if (node->opcode() != IrOpcode::kLoadField &&
node->opcode() != IrOpcode::kLoadElement &&
- node->opcode() != IrOpcode::kLoad && IsDanglingEffectNode(node)) {
+ node->opcode() != IrOpcode::kLoad &&
+ status_analysis_->IsDanglingEffectNode(node)) {
PrintF("Dangeling effect node: #%d (%s)\n", node->id(),
node->op()->mnemonic());
UNREACHABLE();
@@ -1062,8 +1153,8 @@ void EscapeAnalysis::ForwardVirtualState(Node* node) {
static_cast<void*>(virtual_states_[effect->id()]),
effect->op()->mnemonic(), effect->id(), node->op()->mnemonic(),
node->id());
- if (IsEffectBranchPoint(effect) ||
- OperatorProperties::GetFrameStateInputCount(node->op()) > 0) {
+ if (status_analysis_->IsEffectBranchPoint(effect) ||
+ OperatorProperties::HasFrameStateInput(node->op())) {
virtual_states_[node->id()]->SetCopyRequired();
TRACE(", effect input %s#%d is branch point", effect->op()->mnemonic(),
effect->id());
@@ -1075,7 +1166,7 @@ void EscapeAnalysis::ForwardVirtualState(Node* node) {
void EscapeAnalysis::ProcessStart(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kStart);
virtual_states_[node->id()] =
- new (zone()) VirtualState(node, zone(), AliasCount());
+ new (zone()) VirtualState(node, zone(), status_analysis_->AliasCount());
}
bool EscapeAnalysis::ProcessEffectPhi(Node* node) {
@@ -1084,7 +1175,8 @@ bool EscapeAnalysis::ProcessEffectPhi(Node* node) {
VirtualState* mergeState = virtual_states_[node->id()];
if (!mergeState) {
- mergeState = new (zone()) VirtualState(node, zone(), AliasCount());
+ mergeState =
+ new (zone()) VirtualState(node, zone(), status_analysis_->AliasCount());
virtual_states_[node->id()] = mergeState;
changed = true;
TRACE("Effect Phi #%d got new virtual state %p.\n", node->id(),
@@ -1102,7 +1194,8 @@ bool EscapeAnalysis::ProcessEffectPhi(Node* node) {
if (state) {
cache_->states().push_back(state);
if (state == mergeState) {
- mergeState = new (zone()) VirtualState(node, zone(), AliasCount());
+ mergeState = new (zone())
+ VirtualState(node, zone(), status_analysis_->AliasCount());
virtual_states_[node->id()] = mergeState;
changed = true;
}
@@ -1122,7 +1215,7 @@ bool EscapeAnalysis::ProcessEffectPhi(Node* node) {
TRACE("Merge %s the node.\n", changed ? "changed" : "did not change");
if (changed) {
- status_analysis_.ResizeStatusVector();
+ status_analysis_->ResizeStatusVector();
}
return changed;
}
@@ -1131,7 +1224,7 @@ void EscapeAnalysis::ProcessAllocation(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kAllocate);
ForwardVirtualState(node);
VirtualState* state = virtual_states_[node->id()];
- Alias alias = GetAlias(node->id());
+ Alias alias = status_analysis_->GetAlias(node->id());
// Check if we have already processed this node.
if (state->VirtualObjectFromAlias(alias)) {
@@ -1163,19 +1256,16 @@ void EscapeAnalysis::ProcessFinishRegion(Node* node) {
Node* allocation = NodeProperties::GetValueInput(node, 0);
if (allocation->opcode() == IrOpcode::kAllocate) {
VirtualState* state = virtual_states_[node->id()];
- VirtualObject* obj = state->VirtualObjectFromAlias(GetAlias(node->id()));
+ VirtualObject* obj =
+ state->VirtualObjectFromAlias(status_analysis_->GetAlias(node->id()));
DCHECK_NOT_NULL(obj);
obj->SetInitialized();
}
}
-Node* EscapeAnalysis::replacement(NodeId id) {
- if (id >= replacements_.size()) return nullptr;
- return replacements_[id];
-}
-
Node* EscapeAnalysis::replacement(Node* node) {
- return replacement(node->id());
+ if (node->id() >= replacements_.size()) return nullptr;
+ return replacements_[node->id()];
}
bool EscapeAnalysis::SetReplacement(Node* node, Node* rep) {
@@ -1206,41 +1296,25 @@ Node* EscapeAnalysis::ResolveReplacement(Node* node) {
}
Node* EscapeAnalysis::GetReplacement(Node* node) {
- return GetReplacement(node->id());
-}
-
-Node* EscapeAnalysis::GetReplacement(NodeId id) {
- Node* node = nullptr;
- while (replacement(id)) {
- node = replacement(id);
- id = node->id();
+ Node* result = nullptr;
+ while (replacement(node)) {
+ node = result = replacement(node);
}
- return node;
+ return result;
}
bool EscapeAnalysis::IsVirtual(Node* node) {
- if (node->id() >= status_analysis_.GetStatusVectorSize()) {
+ if (node->id() >= status_analysis_->GetStatusVectorSize()) {
return false;
}
- return status_analysis_.IsVirtual(node);
+ return status_analysis_->IsVirtual(node);
}
bool EscapeAnalysis::IsEscaped(Node* node) {
- if (node->id() >= status_analysis_.GetStatusVectorSize()) {
+ if (node->id() >= status_analysis_->GetStatusVectorSize()) {
return false;
}
- return status_analysis_.IsEscaped(node);
-}
-
-bool EscapeAnalysis::SetEscaped(Node* node) {
- return status_analysis_.SetEscaped(node);
-}
-
-VirtualObject* EscapeAnalysis::GetVirtualObject(Node* at, NodeId id) {
- if (VirtualState* states = virtual_states_[at->id()]) {
- return states->VirtualObjectFromAlias(GetAlias(id));
- }
- return nullptr;
+ return status_analysis_->IsEscaped(node);
}
bool EscapeAnalysis::CompareVirtualObjects(Node* left, Node* right) {
@@ -1253,11 +1327,24 @@ bool EscapeAnalysis::CompareVirtualObjects(Node* left, Node* right) {
return false;
}
-int EscapeAnalysis::OffsetFromAccess(Node* node) {
- DCHECK(OpParameter<FieldAccess>(node).offset % kPointerSize == 0);
- return OpParameter<FieldAccess>(node).offset / kPointerSize;
+namespace {
+
+int OffsetForFieldAccess(Node* node) {
+ FieldAccess access = FieldAccessOf(node->op());
+ DCHECK_EQ(access.offset % kPointerSize, 0);
+ return access.offset / kPointerSize;
}
+int OffsetForElementAccess(Node* node, int index) {
+ ElementAccess access = ElementAccessOf(node->op());
+ DCHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
+ kPointerSizeLog2);
+ DCHECK_EQ(access.header_size % kPointerSize, 0);
+ return access.header_size / kPointerSize + index;
+}
+
+} // namespace
+
void EscapeAnalysis::ProcessLoadFromPhi(int offset, Node* from, Node* load,
VirtualState* state) {
TRACE("Load #%d from phi #%d", load->id(), from->id());
@@ -1269,7 +1356,7 @@ void EscapeAnalysis::ProcessLoadFromPhi(int offset, Node* from, Node* load,
}
cache_->LoadVirtualObjectsForFieldsFrom(state,
- status_analysis_.GetAliasMap());
+ status_analysis_->GetAliasMap());
if (cache_->objects().size() == cache_->fields().size()) {
cache_->GetFields(offset);
if (cache_->fields().size() == cache_->objects().size()) {
@@ -1280,7 +1367,7 @@ void EscapeAnalysis::ProcessLoadFromPhi(int offset, Node* from, Node* load,
Node* phi = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, value_input_count),
value_input_count + 1, &cache_->fields().front());
- status_analysis_.ResizeStatusVector();
+ status_analysis_->ResizeStatusVector();
SetReplacement(load, phi);
TRACE(" got phi created.\n");
} else {
@@ -1300,11 +1387,9 @@ void EscapeAnalysis::ProcessLoadField(Node* node) {
Node* from = ResolveReplacement(NodeProperties::GetValueInput(node, 0));
VirtualState* state = virtual_states_[node->id()];
if (VirtualObject* object = GetVirtualObject(state, from)) {
- int offset = OffsetFromAccess(node);
- if (!object->IsTracked() ||
- static_cast<size_t>(offset) >= object->field_count()) {
- return;
- }
+ if (!object->IsTracked()) return;
+ int offset = OffsetForFieldAccess(node);
+ if (static_cast<size_t>(offset) >= object->field_count()) return;
Node* value = object->GetField(offset);
if (value) {
value = ResolveReplacement(value);
@@ -1312,8 +1397,8 @@ void EscapeAnalysis::ProcessLoadField(Node* node) {
// Record that the load has this alias.
UpdateReplacement(state, node, value);
} else if (from->opcode() == IrOpcode::kPhi &&
- OpParameter<FieldAccess>(node).offset % kPointerSize == 0) {
- int offset = OffsetFromAccess(node);
+ FieldAccessOf(node->op()).offset % kPointerSize == 0) {
+ int offset = OffsetForFieldAccess(node);
// Only binary phis are supported for now.
ProcessLoadFromPhi(offset, from, node, state);
} else {
@@ -1332,19 +1417,11 @@ void EscapeAnalysis::ProcessLoadElement(Node* node) {
index_node->opcode() != IrOpcode::kInt64Constant &&
index_node->opcode() != IrOpcode::kFloat32Constant &&
index_node->opcode() != IrOpcode::kFloat64Constant);
- ElementAccess access = OpParameter<ElementAccess>(node);
if (index.HasValue()) {
- int offset = index.Value() + access.header_size / kPointerSize;
if (VirtualObject* object = GetVirtualObject(state, from)) {
- CHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
- kPointerSizeLog2);
- CHECK_EQ(access.header_size % kPointerSize, 0);
-
- if (!object->IsTracked() ||
- static_cast<size_t>(offset) >= object->field_count()) {
- return;
- }
-
+ if (!object->IsTracked()) return;
+ int offset = OffsetForElementAccess(node, index.Value());
+ if (static_cast<size_t>(offset) >= object->field_count()) return;
Node* value = object->GetField(offset);
if (value) {
value = ResolveReplacement(value);
@@ -1352,15 +1429,14 @@ void EscapeAnalysis::ProcessLoadElement(Node* node) {
// Record that the load has this alias.
UpdateReplacement(state, node, value);
} else if (from->opcode() == IrOpcode::kPhi) {
- ElementAccess access = OpParameter<ElementAccess>(node);
- int offset = index.Value() + access.header_size / kPointerSize;
+ int offset = OffsetForElementAccess(node, index.Value());
ProcessLoadFromPhi(offset, from, node, state);
} else {
UpdateReplacement(state, node, nullptr);
}
} else {
// We have a load from a non-const index, cannot eliminate object.
- if (SetEscaped(from)) {
+ if (status_analysis_->SetEscaped(from)) {
TRACE(
"Setting #%d (%s) to escaped because load element #%d from non-const "
"index #%d (%s)\n",
@@ -1375,14 +1451,23 @@ void EscapeAnalysis::ProcessStoreField(Node* node) {
ForwardVirtualState(node);
Node* to = ResolveReplacement(NodeProperties::GetValueInput(node, 0));
VirtualState* state = virtual_states_[node->id()];
- VirtualObject* obj = GetVirtualObject(state, to);
- int offset = OffsetFromAccess(node);
- if (obj && obj->IsTracked() &&
- static_cast<size_t>(offset) < obj->field_count()) {
+ if (VirtualObject* object = GetVirtualObject(state, to)) {
+ if (!object->IsTracked()) return;
+ int offset = OffsetForFieldAccess(node);
+ if (static_cast<size_t>(offset) >= object->field_count()) return;
Node* val = ResolveReplacement(NodeProperties::GetValueInput(node, 1));
- if (obj->GetField(offset) != val) {
- obj = CopyForModificationAt(obj, state, node);
- obj->SetField(offset, val);
+ // TODO(mstarzinger): The following is a workaround to not track the code
+ // entry field in virtual JSFunction objects. We only ever store the inner
+ // pointer into the compile lazy stub in this field and the deoptimizer has
+ // this assumption hard-coded in {TranslatedState::MaterializeAt} as well.
+ if (val->opcode() == IrOpcode::kInt32Constant ||
+ val->opcode() == IrOpcode::kInt64Constant) {
+ DCHECK_EQ(JSFunction::kCodeEntryOffset, FieldAccessOf(node->op()).offset);
+ val = slot_not_analyzed_;
+ }
+ if (object->GetField(offset) != val) {
+ object = CopyForModificationAt(object, state, node);
+ object->SetField(offset, val);
}
}
}
@@ -1397,37 +1482,34 @@ void EscapeAnalysis::ProcessStoreElement(Node* node) {
index_node->opcode() != IrOpcode::kInt64Constant &&
index_node->opcode() != IrOpcode::kFloat32Constant &&
index_node->opcode() != IrOpcode::kFloat64Constant);
- ElementAccess access = OpParameter<ElementAccess>(node);
VirtualState* state = virtual_states_[node->id()];
- VirtualObject* obj = GetVirtualObject(state, to);
if (index.HasValue()) {
- int offset = index.Value() + access.header_size / kPointerSize;
- if (obj && obj->IsTracked() &&
- static_cast<size_t>(offset) < obj->field_count()) {
- CHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
- kPointerSizeLog2);
- CHECK_EQ(access.header_size % kPointerSize, 0);
+ if (VirtualObject* object = GetVirtualObject(state, to)) {
+ if (!object->IsTracked()) return;
+ int offset = OffsetForElementAccess(node, index.Value());
+ if (static_cast<size_t>(offset) >= object->field_count()) return;
Node* val = ResolveReplacement(NodeProperties::GetValueInput(node, 2));
- if (obj->GetField(offset) != val) {
- obj = CopyForModificationAt(obj, state, node);
- obj->SetField(offset, val);
+ if (object->GetField(offset) != val) {
+ object = CopyForModificationAt(object, state, node);
+ object->SetField(offset, val);
}
}
} else {
// We have a store to a non-const index, cannot eliminate object.
- if (SetEscaped(to)) {
+ if (status_analysis_->SetEscaped(to)) {
TRACE(
"Setting #%d (%s) to escaped because store element #%d to non-const "
"index #%d (%s)\n",
to->id(), to->op()->mnemonic(), node->id(), index_node->id(),
index_node->op()->mnemonic());
}
- if (obj && obj->IsTracked()) {
- if (!obj->AllFieldsClear()) {
- obj = CopyForModificationAt(obj, state, node);
- obj->ClearAllFields();
- TRACE("Cleared all fields of @%d:#%d\n", GetAlias(obj->id()),
- obj->id());
+ if (VirtualObject* object = GetVirtualObject(state, to)) {
+ if (!object->IsTracked()) return;
+ if (!object->AllFieldsClear()) {
+ object = CopyForModificationAt(object, state, node);
+ object->ClearAllFields();
+ TRACE("Cleared all fields of @%d:#%d\n",
+ status_analysis_->GetAlias(object->id()), object->id());
}
}
}
@@ -1475,21 +1557,17 @@ Node* EscapeAnalysis::GetOrCreateObjectState(Node* effect, Node* node) {
return nullptr;
}
-void EscapeAnalysis::DebugPrintObject(VirtualObject* object, Alias alias) {
- PrintF(" Alias @%d: Object #%d with %zu fields\n", alias, object->id(),
- object->field_count());
- for (size_t i = 0; i < object->field_count(); ++i) {
- if (Node* f = object->GetField(i)) {
- PrintF(" Field %zu = #%d (%s)\n", i, f->id(), f->op()->mnemonic());
- }
- }
-}
-
void EscapeAnalysis::DebugPrintState(VirtualState* state) {
PrintF("Dumping virtual state %p\n", static_cast<void*>(state));
- for (Alias alias = 0; alias < AliasCount(); ++alias) {
+ for (Alias alias = 0; alias < status_analysis_->AliasCount(); ++alias) {
if (VirtualObject* object = state->VirtualObjectFromAlias(alias)) {
- DebugPrintObject(object, alias);
+ PrintF(" Alias @%d: Object #%d with %zu fields\n", alias, object->id(),
+ object->field_count());
+ for (size_t i = 0; i < object->field_count(); ++i) {
+ if (Node* f = object->GetField(i)) {
+ PrintF(" Field %zu = #%d (%s)\n", i, f->id(), f->op()->mnemonic());
+ }
+ }
}
}
}
@@ -1511,17 +1589,17 @@ void EscapeAnalysis::DebugPrint() {
VirtualObject* EscapeAnalysis::GetVirtualObject(VirtualState* state,
Node* node) {
- if (node->id() >= status_analysis_.GetAliasMap().size()) return nullptr;
- Alias alias = GetAlias(node->id());
+ if (node->id() >= status_analysis_->GetAliasMap().size()) return nullptr;
+ Alias alias = status_analysis_->GetAlias(node->id());
if (alias >= state->size()) return nullptr;
return state->VirtualObjectFromAlias(alias);
}
bool EscapeAnalysis::ExistsVirtualAllocate() {
- for (size_t id = 0; id < status_analysis_.GetAliasMap().size(); ++id) {
- Alias alias = GetAlias(static_cast<NodeId>(id));
+ for (size_t id = 0; id < status_analysis_->GetAliasMap().size(); ++id) {
+ Alias alias = status_analysis_->GetAlias(static_cast<NodeId>(id));
if (alias < EscapeStatusAnalysis::kUntrackable) {
- if (status_analysis_.IsVirtual(static_cast<int>(id))) {
+ if (status_analysis_->IsVirtual(static_cast<int>(id))) {
return true;
}
}
@@ -1529,6 +1607,8 @@ bool EscapeAnalysis::ExistsVirtualAllocate() {
return false;
}
+Graph* EscapeAnalysis::graph() const { return status_analysis_->graph(); }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/escape-analysis.h b/deps/v8/src/compiler/escape-analysis.h
index c3f236d556..839e54ccd3 100644
--- a/deps/v8/src/compiler/escape-analysis.h
+++ b/deps/v8/src/compiler/escape-analysis.h
@@ -5,7 +5,6 @@
#ifndef V8_COMPILER_ESCAPE_ANALYSIS_H_
#define V8_COMPILER_ESCAPE_ANALYSIS_H_
-#include "src/base/flags.h"
#include "src/compiler/graph.h"
namespace v8 {
@@ -14,107 +13,15 @@ namespace compiler {
// Forward declarations.
class CommonOperatorBuilder;
-class EscapeAnalysis;
+class EscapeStatusAnalysis;
+class MergeCache;
class VirtualState;
class VirtualObject;
-// EscapeStatusAnalysis determines for each allocation whether it escapes.
-class EscapeStatusAnalysis {
- public:
- typedef NodeId Alias;
- ~EscapeStatusAnalysis();
-
- enum Status {
- kUnknown = 0u,
- kTracked = 1u << 0,
- kEscaped = 1u << 1,
- kOnStack = 1u << 2,
- kVisited = 1u << 3,
- // A node is dangling, if it is a load of some kind, and does not have
- // an effect successor.
- kDanglingComputed = 1u << 4,
- kDangling = 1u << 5,
- // A node is is an effect branch point, if it has more than 2 non-dangling
- // effect successors.
- kBranchPointComputed = 1u << 6,
- kBranchPoint = 1u << 7,
- kInQueue = 1u << 8
- };
- typedef base::Flags<Status, uint16_t> StatusFlags;
-
- void RunStatusAnalysis();
-
- bool IsVirtual(Node* node);
- bool IsEscaped(Node* node);
- bool IsAllocation(Node* node);
-
- bool IsInQueue(NodeId id);
- void SetInQueue(NodeId id, bool on_stack);
-
- void DebugPrint();
-
- EscapeStatusAnalysis(EscapeAnalysis* object_analysis, Graph* graph,
- Zone* zone);
- void EnqueueForStatusAnalysis(Node* node);
- bool SetEscaped(Node* node);
- bool IsEffectBranchPoint(Node* node);
- bool IsDanglingEffectNode(Node* node);
- void ResizeStatusVector();
- size_t GetStatusVectorSize();
- bool IsVirtual(NodeId id);
-
- Graph* graph() const { return graph_; }
- Zone* zone() const { return zone_; }
- void AssignAliases();
- Alias GetAlias(NodeId id) const { return aliases_[id]; }
- const ZoneVector<Alias>& GetAliasMap() const { return aliases_; }
- Alias AliasCount() const { return next_free_alias_; }
- static const Alias kNotReachable;
- static const Alias kUntrackable;
-
- bool IsNotReachable(Node* node);
-
- private:
- void Process(Node* node);
- void ProcessAllocate(Node* node);
- void ProcessFinishRegion(Node* node);
- void ProcessStoreField(Node* node);
- void ProcessStoreElement(Node* node);
- bool CheckUsesForEscape(Node* node, bool phi_escaping = false) {
- return CheckUsesForEscape(node, node, phi_escaping);
- }
- bool CheckUsesForEscape(Node* node, Node* rep, bool phi_escaping = false);
- void RevisitUses(Node* node);
- void RevisitInputs(Node* node);
-
- Alias NextAlias() { return next_free_alias_++; }
-
- bool HasEntry(Node* node);
-
- bool IsAllocationPhi(Node* node);
-
- ZoneVector<Node*> stack_;
- EscapeAnalysis* object_analysis_;
- Graph* const graph_;
- Zone* const zone_;
- ZoneVector<StatusFlags> status_;
- Alias next_free_alias_;
- ZoneVector<Node*> status_stack_;
- ZoneVector<Alias> aliases_;
-
- DISALLOW_COPY_AND_ASSIGN(EscapeStatusAnalysis);
-};
-
-DEFINE_OPERATORS_FOR_FLAGS(EscapeStatusAnalysis::StatusFlags)
-
-// Forward Declaration.
-class MergeCache;
-
// EscapeObjectAnalysis simulates stores to determine values of loads if
// an object is virtual and eliminated.
class EscapeAnalysis {
public:
- using Alias = EscapeStatusAnalysis::Alias;
EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common, Zone* zone);
~EscapeAnalysis();
@@ -144,17 +51,12 @@ class EscapeAnalysis {
VirtualState* states);
void ForwardVirtualState(Node* node);
- int OffsetFromAccess(Node* node);
VirtualState* CopyForModificationAt(VirtualState* state, Node* node);
VirtualObject* CopyForModificationAt(VirtualObject* obj, VirtualState* state,
Node* node);
- VirtualObject* GetVirtualObject(Node* at, NodeId id);
- bool SetEscaped(Node* node);
- Node* replacement(NodeId id);
Node* replacement(Node* node);
Node* ResolveReplacement(Node* node);
- Node* GetReplacement(NodeId id);
bool SetReplacement(Node* node, Node* rep);
bool UpdateReplacement(VirtualState* state, Node* node, Node* rep);
@@ -162,25 +64,15 @@ class EscapeAnalysis {
void DebugPrint();
void DebugPrintState(VirtualState* state);
- void DebugPrintObject(VirtualObject* state, Alias id);
- Graph* graph() const { return status_analysis_.graph(); }
- Zone* zone() const { return status_analysis_.zone(); }
+ Graph* graph() const;
+ Zone* zone() const { return zone_; }
CommonOperatorBuilder* common() const { return common_; }
- bool IsEffectBranchPoint(Node* node) {
- return status_analysis_.IsEffectBranchPoint(node);
- }
- bool IsDanglingEffectNode(Node* node) {
- return status_analysis_.IsDanglingEffectNode(node);
- }
- bool IsNotReachable(Node* node) {
- return status_analysis_.IsNotReachable(node);
- }
- Alias GetAlias(NodeId id) const { return status_analysis_.GetAlias(id); }
- Alias AliasCount() const { return status_analysis_.AliasCount(); }
-
- EscapeStatusAnalysis status_analysis_;
+
+ Zone* const zone_;
+ Node* const slot_not_analyzed_;
CommonOperatorBuilder* const common_;
+ EscapeStatusAnalysis* status_analysis_;
ZoneVector<VirtualState*> virtual_states_;
ZoneVector<Node*> replacements_;
MergeCache* cache_;
diff --git a/deps/v8/src/compiler/frame-states.cc b/deps/v8/src/compiler/frame-states.cc
index 91827d028e..a02fb0121c 100644
--- a/deps/v8/src/compiler/frame-states.cc
+++ b/deps/v8/src/compiler/frame-states.cc
@@ -64,6 +64,12 @@ std::ostream& operator<<(std::ostream& os, FrameStateType type) {
case FrameStateType::kConstructStub:
os << "CONSTRUCT_STUB";
break;
+ case FrameStateType::kGetterStub:
+ os << "GETTER_STUB";
+ break;
+ case FrameStateType::kSetterStub:
+ os << "SETTER_STUB";
+ break;
}
return os;
}
diff --git a/deps/v8/src/compiler/frame-states.h b/deps/v8/src/compiler/frame-states.h
index 2552bcb758..0d0ec47f88 100644
--- a/deps/v8/src/compiler/frame-states.h
+++ b/deps/v8/src/compiler/frame-states.h
@@ -80,7 +80,9 @@ enum class FrameStateType {
kInterpretedFunction, // Represents an InterpretedFrame.
kArgumentsAdaptor, // Represents an ArgumentsAdaptorFrame.
kTailCallerFunction, // Represents a frame removed by tail call elimination.
- kConstructStub // Represents a ConstructStubFrame.
+ kConstructStub, // Represents a ConstructStubFrame.
+ kGetterStub, // Represents a GetterStubFrame.
+ kSetterStub // Represents a SetterStubFrame.
};
class FrameStateFunctionInfo {
diff --git a/deps/v8/src/compiler/frame.cc b/deps/v8/src/compiler/frame.cc
index 3d93e1528f..e0284c8ab4 100644
--- a/deps/v8/src/compiler/frame.cc
+++ b/deps/v8/src/compiler/frame.cc
@@ -12,15 +12,13 @@ namespace v8 {
namespace internal {
namespace compiler {
-Frame::Frame(int fixed_frame_size_in_slots, const CallDescriptor* descriptor)
+Frame::Frame(int fixed_frame_size_in_slots)
: frame_slot_count_(fixed_frame_size_in_slots),
- callee_saved_slot_count_(0),
spill_slot_count_(0),
allocated_registers_(nullptr),
allocated_double_registers_(nullptr) {}
int Frame::AlignFrame(int alignment) {
- DCHECK_EQ(0, callee_saved_slot_count_);
int alignment_slots = alignment / kPointerSize;
int delta = alignment_slots - (frame_slot_count_ & (alignment_slots - 1));
if (delta != alignment_slots) {
diff --git a/deps/v8/src/compiler/frame.h b/deps/v8/src/compiler/frame.h
index d413d3e033..8d463dfb78 100644
--- a/deps/v8/src/compiler/frame.h
+++ b/deps/v8/src/compiler/frame.h
@@ -78,14 +78,10 @@ class CallDescriptor;
//
class Frame : public ZoneObject {
public:
- explicit Frame(int fixed_frame_size_in_slots,
- const CallDescriptor* descriptor);
+ explicit Frame(int fixed_frame_size_in_slots);
inline int GetTotalFrameSlotCount() const { return frame_slot_count_; }
- inline int GetSavedCalleeRegisterSlotCount() const {
- return callee_saved_slot_count_;
- }
inline int GetSpillSlotCount() const { return spill_slot_count_; }
void SetAllocatedRegisters(BitVector* regs) {
@@ -102,23 +98,20 @@ class Frame : public ZoneObject {
return !allocated_double_registers_->IsEmpty();
}
- int AlignSavedCalleeRegisterSlots(int alignment = kDoubleSize) {
- DCHECK_EQ(0, callee_saved_slot_count_);
+ void AlignSavedCalleeRegisterSlots(int alignment = kDoubleSize) {
int alignment_slots = alignment / kPointerSize;
int delta = alignment_slots - (frame_slot_count_ & (alignment_slots - 1));
if (delta != alignment_slots) {
frame_slot_count_ += delta;
}
- return delta;
+ spill_slot_count_ += delta;
}
void AllocateSavedCalleeRegisterSlots(int count) {
frame_slot_count_ += count;
- callee_saved_slot_count_ += count;
}
int AllocateSpillSlot(int width) {
- DCHECK_EQ(0, callee_saved_slot_count_);
int frame_slot_count_before = frame_slot_count_;
int slot = AllocateAlignedFrameSlot(width);
spill_slot_count_ += (frame_slot_count_ - frame_slot_count_before);
@@ -128,7 +121,6 @@ class Frame : public ZoneObject {
int AlignFrame(int alignment = kDoubleSize);
int ReserveSpillSlots(size_t slot_count) {
- DCHECK_EQ(0, callee_saved_slot_count_);
DCHECK_EQ(0, spill_slot_count_);
spill_slot_count_ += static_cast<int>(slot_count);
frame_slot_count_ += static_cast<int>(slot_count);
@@ -140,19 +132,26 @@ class Frame : public ZoneObject {
private:
int AllocateAlignedFrameSlot(int width) {
- DCHECK(width == 4 || width == 8);
- // Skip one slot if necessary.
- if (width > kPointerSize) {
- DCHECK(width == kPointerSize * 2);
- frame_slot_count_++;
- frame_slot_count_ |= 1;
+ DCHECK(width == 4 || width == 8 || width == 16);
+ if (kPointerSize == 4) {
+ // Skip one slot if necessary.
+ if (width > kPointerSize) {
+ frame_slot_count_++;
+ frame_slot_count_ |= 1;
+ // 2 extra slots if width == 16.
+ frame_slot_count_ += (width & 16) / 8;
+ }
+ } else {
+ // No alignment when slots are 8 bytes.
+ DCHECK_EQ(8, kPointerSize);
+ // 1 extra slot if width == 16.
+ frame_slot_count_ += (width & 16) / 16;
}
return frame_slot_count_++;
}
private:
int frame_slot_count_;
- int callee_saved_slot_count_;
int spill_slot_count_;
BitVector* allocated_registers_;
BitVector* allocated_double_registers_;
@@ -191,13 +190,13 @@ class FrameOffset {
// current function's frame.
class FrameAccessState : public ZoneObject {
public:
- explicit FrameAccessState(Frame* const frame)
+ explicit FrameAccessState(const Frame* const frame)
: frame_(frame),
access_frame_with_fp_(false),
sp_delta_(0),
has_frame_(false) {}
- Frame* frame() const { return frame_; }
+ const Frame* frame() const { return frame_; }
void MarkHasFrame(bool state);
int sp_delta() const { return sp_delta_; }
@@ -229,7 +228,7 @@ class FrameAccessState : public ZoneObject {
FrameOffset GetFrameOffset(int spill_slot) const;
private:
- Frame* const frame_;
+ const Frame* const frame_;
bool access_frame_with_fp_;
int sp_delta_;
bool has_frame_;
diff --git a/deps/v8/src/compiler/gap-resolver.cc b/deps/v8/src/compiler/gap-resolver.cc
index 35e91fa404..7b04198e81 100644
--- a/deps/v8/src/compiler/gap-resolver.cc
+++ b/deps/v8/src/compiler/gap-resolver.cc
@@ -34,7 +34,6 @@ void GapResolver::Resolve(ParallelMove* moves) const {
}
}
-
void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) const {
// Each call to this function performs a move and deletes it from the move
// graph. We first recursively perform any move blocking this one. We mark a
@@ -75,7 +74,7 @@ void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) const {
// This move's source may have changed due to swaps to resolve cycles and so
// it may now be the last move in the cycle. If so remove it.
InstructionOperand source = move->source();
- if (source.EqualsCanonicalized(destination)) {
+ if (source.InterferesWith(destination)) {
move->Eliminate();
return;
}
@@ -94,7 +93,7 @@ void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) const {
DCHECK((*blocker)->IsPending());
// Ensure source is a register or both are stack slots, to limit swap cases.
- if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
+ if (source.IsStackSlot() || source.IsFPStackSlot()) {
std::swap(source, destination);
}
assembler_->AssembleSwap(&source, &destination);
diff --git a/deps/v8/src/compiler/graph-reducer.cc b/deps/v8/src/compiler/graph-reducer.cc
index 6f583d6b6a..b13b954714 100644
--- a/deps/v8/src/compiler/graph-reducer.cc
+++ b/deps/v8/src/compiler/graph-reducer.cc
@@ -168,6 +168,10 @@ void GraphReducer::Replace(Node* node, Node* replacement) {
void GraphReducer::Replace(Node* node, Node* replacement, NodeId max_id) {
+ if (FLAG_trace_turbo_reduction) {
+ OFStream os(stdout);
+ os << "- Replacing " << *node << " with " << *replacement << std::endl;
+ }
if (node == graph()->start()) graph()->SetStart(replacement);
if (node == graph()->end()) graph()->SetEnd(replacement);
if (replacement->id() <= max_id) {
@@ -222,7 +226,11 @@ void GraphReducer::ReplaceWithValue(Node* node, Node* value, Node* effect,
edge.UpdateTo(dead_);
Revisit(user);
} else {
- UNREACHABLE();
+ DCHECK_NOT_NULL(control);
+ edge.UpdateTo(control);
+ Revisit(user);
+ // TODO(jarin) Check that the node cannot throw (otherwise, it
+ // would have to be connected via IfSuccess/IfException).
}
} else if (NodeProperties::IsEffectEdge(edge)) {
DCHECK_NOT_NULL(effect);
diff --git a/deps/v8/src/compiler/graph-reducer.h b/deps/v8/src/compiler/graph-reducer.h
index 683c345c14..2ac60a6d1d 100644
--- a/deps/v8/src/compiler/graph-reducer.h
+++ b/deps/v8/src/compiler/graph-reducer.h
@@ -74,8 +74,7 @@ class AdvancedReducer : public Reducer {
virtual void Revisit(Node* node) = 0;
// Replace value uses of {node} with {value} and effect uses of {node} with
// {effect}. If {effect == nullptr}, then use the effect input to {node}.
- // All
- // control uses will be relaxed assuming {node} cannot throw.
+ // All control uses will be relaxed assuming {node} cannot throw.
virtual void ReplaceWithValue(Node* node, Node* value, Node* effect,
Node* control) = 0;
};
diff --git a/deps/v8/src/compiler/graph-replay.cc b/deps/v8/src/compiler/graph-replay.cc
index cb775e96f3..352b171ffa 100644
--- a/deps/v8/src/compiler/graph-replay.cc
+++ b/deps/v8/src/compiler/graph-replay.cc
@@ -24,7 +24,7 @@ void GraphReplayPrinter::PrintReplay(Graph* graph) {
AllNodes nodes(&zone, graph);
// Allocate the nodes first.
- for (Node* node : nodes.live) {
+ for (Node* node : nodes.reachable) {
PrintReplayOpCreator(node->op());
PrintF(" Node* n%d = graph()->NewNode(op", node->id());
for (int i = 0; i < node->InputCount(); ++i) {
@@ -34,7 +34,7 @@ void GraphReplayPrinter::PrintReplay(Graph* graph) {
}
// Connect the nodes to their inputs.
- for (Node* node : nodes.live) {
+ for (Node* node : nodes.reachable) {
for (int i = 0; i < node->InputCount(); i++) {
PrintF(" n%d->ReplaceInput(%d, n%d);\n", node->id(), i,
node->InputAt(i)->id());
diff --git a/deps/v8/src/compiler/graph-trimmer.cc b/deps/v8/src/compiler/graph-trimmer.cc
index 75071c68b3..74626fe67f 100644
--- a/deps/v8/src/compiler/graph-trimmer.cc
+++ b/deps/v8/src/compiler/graph-trimmer.cc
@@ -33,7 +33,7 @@ void GraphTrimmer::TrimGraph() {
for (Edge edge : live->use_edges()) {
Node* const user = edge.from();
if (!IsLive(user)) {
- if (FLAG_trace_turbo_reduction) {
+ if (FLAG_trace_turbo_trimming) {
OFStream os(stdout);
os << "DeadLink: " << *user << "(" << edge.index() << ") -> " << *live
<< std::endl;
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index 301e3900e1..9fd80ea488 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -4,17 +4,19 @@
#include "src/compiler/graph-visualizer.h"
+#include <memory>
#include <sstream>
#include <string>
#include "src/code-stubs.h"
+#include "src/compiler.h"
#include "src/compiler/all-nodes.h"
#include "src/compiler/graph.h"
-#include "src/compiler/node.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
#include "src/compiler/opcodes.h"
-#include "src/compiler/operator.h"
#include "src/compiler/operator-properties.h"
+#include "src/compiler/operator.h"
#include "src/compiler/register-allocator.h"
#include "src/compiler/schedule.h"
#include "src/compiler/scheduler.h"
@@ -25,11 +27,11 @@ namespace v8 {
namespace internal {
namespace compiler {
-
-FILE* OpenVisualizerLogFile(CompilationInfo* info, const char* phase,
- const char* suffix, const char* mode) {
+std::unique_ptr<char[]> GetVisualizerLogFileName(CompilationInfo* info,
+ const char* phase,
+ const char* suffix) {
EmbeddedVector<char, 256> filename(0);
- base::SmartArrayPointer<char> debug_name = info->GetDebugName();
+ std::unique_ptr<char[]> debug_name = info->GetDebugName();
if (strlen(debug_name.get()) > 0) {
SNPrintF(filename, "turbo-%s", debug_name.get());
} else if (info->has_shared_info()) {
@@ -37,16 +39,40 @@ FILE* OpenVisualizerLogFile(CompilationInfo* info, const char* phase,
} else {
SNPrintF(filename, "turbo-none-%s", phase);
}
+ EmbeddedVector<char, 256> source_file(0);
+ bool source_available = false;
+ if (FLAG_trace_file_names && info->parse_info()) {
+ Object* source_name = info->script()->name();
+ if (source_name->IsString()) {
+ String* str = String::cast(source_name);
+ if (str->length() > 0) {
+ SNPrintF(source_file, "%s", str->ToCString().get());
+ std::replace(source_file.start(),
+ source_file.start() + source_file.length(), '/', '_');
+ source_available = true;
+ }
+ }
+ }
std::replace(filename.start(), filename.start() + filename.length(), ' ',
'_');
EmbeddedVector<char, 256> full_filename;
- if (phase == nullptr) {
+ if (phase == nullptr && !source_available) {
SNPrintF(full_filename, "%s.%s", filename.start(), suffix);
- } else {
+ } else if (phase != nullptr && !source_available) {
SNPrintF(full_filename, "%s-%s.%s", filename.start(), phase, suffix);
+ } else if (phase == nullptr && source_available) {
+ SNPrintF(full_filename, "%s_%s.%s", filename.start(), source_file.start(),
+ suffix);
+ } else {
+ SNPrintF(full_filename, "%s_%s-%s.%s", filename.start(),
+ source_file.start(), phase, suffix);
}
- return base::OS::FOpen(full_filename.start(), mode);
+
+ char* buffer = new char[full_filename.length() + 1];
+ memcpy(buffer, full_filename.start(), full_filename.length());
+ buffer[full_filename.length()] = '\0';
+ return std::unique_ptr<char[]>(buffer);
}
@@ -60,7 +86,7 @@ static const char* SafeMnemonic(Node* node) {
class Escaped {
public:
explicit Escaped(const std::ostringstream& os,
- const char* escaped_chars = "<>|{}")
+ const char* escaped_chars = "<>|{}\\")
: str_(os.str()), escaped_chars_(escaped_chars) {}
friend std::ostream& operator<<(std::ostream& os, const Escaped& e) {
@@ -88,10 +114,14 @@ class JSONGraphNodeWriter {
public:
JSONGraphNodeWriter(std::ostream& os, Zone* zone, const Graph* graph,
const SourcePositionTable* positions)
- : os_(os), all_(zone, graph), positions_(positions), first_node_(true) {}
+ : os_(os),
+ all_(zone, graph, false),
+ live_(zone, graph, true),
+ positions_(positions),
+ first_node_(true) {}
void Print() {
- for (Node* const node : all_.live) PrintNode(node);
+ for (Node* const node : all_.reachable) PrintNode(node);
os_ << "\n";
}
@@ -101,10 +131,15 @@ class JSONGraphNodeWriter {
} else {
os_ << ",\n";
}
- std::ostringstream label;
- label << *node->op();
- os_ << "{\"id\":" << SafeId(node) << ",\"label\":\"" << Escaped(label, "\"")
- << "\"";
+ std::ostringstream label, title, properties;
+ node->op()->PrintTo(label, Operator::PrintVerbosity::kSilent);
+ node->op()->PrintTo(title, Operator::PrintVerbosity::kVerbose);
+ node->op()->PrintPropsTo(properties);
+ os_ << "{\"id\":" << SafeId(node) << ",\"label\":\""
+ << Escaped(label, "\"\\") << "\""
+ << ",\"title\":\"" << Escaped(title, "\"\\") << "\""
+ << ",\"live\": " << (live_.IsLive(node) ? "true" : "false")
+ << ",\"properties\":\"" << Escaped(properties, "\"\\") << "\"";
IrOpcode::Value opcode = node->opcode();
if (IrOpcode::IsPhiOpcode(opcode)) {
os_ << ",\"rankInputs\":[0," << NodeProperties::FirstControlIndex(node)
@@ -126,11 +161,17 @@ class JSONGraphNodeWriter {
os_ << ",\"opcode\":\"" << IrOpcode::Mnemonic(node->opcode()) << "\"";
os_ << ",\"control\":" << (NodeProperties::IsControl(node) ? "true"
: "false");
+ os_ << ",\"opinfo\":\"" << node->op()->ValueInputCount() << " v "
+ << node->op()->EffectInputCount() << " eff "
+ << node->op()->ControlInputCount() << " ctrl in, "
+ << node->op()->ValueOutputCount() << " v "
+ << node->op()->EffectOutputCount() << " eff "
+ << node->op()->ControlOutputCount() << " ctrl out\"";
if (NodeProperties::IsTyped(node)) {
Type* type = NodeProperties::GetType(node);
std::ostringstream type_out;
type->PrintTo(type_out);
- os_ << ",\"type\":\"" << Escaped(type_out, "\"") << "\"";
+ os_ << ",\"type\":\"" << Escaped(type_out, "\"\\") << "\"";
}
os_ << "}";
}
@@ -138,6 +179,7 @@ class JSONGraphNodeWriter {
private:
std::ostream& os_;
AllNodes all_;
+ AllNodes live_;
const SourcePositionTable* positions_;
bool first_node_;
@@ -148,10 +190,10 @@ class JSONGraphNodeWriter {
class JSONGraphEdgeWriter {
public:
JSONGraphEdgeWriter(std::ostream& os, Zone* zone, const Graph* graph)
- : os_(os), all_(zone, graph), first_edge_(true) {}
+ : os_(os), all_(zone, graph, false), first_edge_(true) {}
void Print() {
- for (Node* const node : all_.live) PrintEdges(node);
+ for (Node* const node : all_.reachable) PrintEdges(node);
os_ << "\n";
}
@@ -303,7 +345,7 @@ void GraphC1Visualizer::PrintIntProperty(const char* name, int value) {
void GraphC1Visualizer::PrintCompilation(const CompilationInfo* info) {
Tag tag(this, "compilation");
- base::SmartArrayPointer<char> name = info->GetDebugName();
+ std::unique_ptr<char[]> name = info->GetDebugName();
if (info->IsOptimizing()) {
PrintStringProperty("name", name.get());
PrintIndent();
@@ -491,9 +533,8 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
for (int j = instruction_block->first_instruction_index();
j <= instruction_block->last_instruction_index(); j++) {
PrintIndent();
- PrintableInstruction printable = {
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
- instructions->InstructionAt(j)};
+ PrintableInstruction printable = {RegisterConfiguration::Turbofan(),
+ instructions->InstructionAt(j)};
os_ << j << " " << printable << " <|@\n";
}
}
@@ -536,13 +577,17 @@ void GraphC1Visualizer::PrintLiveRange(const LiveRange* range, const char* type,
os_ << vreg << ":" << range->relative_id() << " " << type;
if (range->HasRegisterAssigned()) {
AllocatedOperand op = AllocatedOperand::cast(range->GetAssignedOperand());
- if (op.IsDoubleRegister()) {
- DoubleRegister assigned_reg = op.GetDoubleRegister();
- os_ << " \"" << assigned_reg.ToString() << "\"";
+ const auto config = RegisterConfiguration::Turbofan();
+ if (op.IsRegister()) {
+ os_ << " \"" << config->GetGeneralRegisterName(op.register_code())
+ << "\"";
+ } else if (op.IsDoubleRegister()) {
+ os_ << " \"" << config->GetDoubleRegisterName(op.register_code())
+ << "\"";
} else {
- DCHECK(op.IsRegister());
- Register assigned_reg = op.GetRegister();
- os_ << " \"" << assigned_reg.ToString() << "\"";
+ DCHECK(op.IsFloatRegister());
+ os_ << " \"" << config->GetFloatRegisterName(op.register_code())
+ << "\"";
}
} else if (range->spilled()) {
const TopLevelLiveRange* top = range->TopLevel();
@@ -555,9 +600,9 @@ void GraphC1Visualizer::PrintLiveRange(const LiveRange* range, const char* type,
<< "\"";
} else {
index = AllocatedOperand::cast(top->GetSpillOperand())->index();
- if (top->kind() == DOUBLE_REGISTERS) {
- os_ << " \"double_stack:" << index << "\"";
- } else if (top->kind() == GENERAL_REGISTERS) {
+ if (IsFloatingPoint(top->representation())) {
+ os_ << " \"fp_stack:" << index << "\"";
+ } else {
os_ << " \"stack:" << index << "\"";
}
}
@@ -615,6 +660,20 @@ const int kVisited = 2;
std::ostream& operator<<(std::ostream& os, const AsRPO& ar) {
base::AccountingAllocator allocator;
Zone local_zone(&allocator);
+
+ // Do a post-order depth-first search on the RPO graph. For every node,
+ // print:
+ //
+ // - the node id
+ // - the operator mnemonic
+ // - in square brackets its parameter (if present)
+ // - in parentheses the list of argument ids and their mnemonics
+ // - the node type (if it is typed)
+
+ // Post-order guarantees that all inputs of a node will be printed before
+ // the node itself, if there are no cycles. Any cycles are broken
+ // arbitrarily.
+
ZoneVector<byte> state(ar.graph.NodeCount(), kUnvisited, &local_zone);
ZoneStack<Node*> stack(&local_zone);
@@ -635,12 +694,20 @@ std::ostream& operator<<(std::ostream& os, const AsRPO& ar) {
state[n->id()] = kVisited;
stack.pop();
os << "#" << n->id() << ":" << *n->op() << "(";
+ // Print the inputs.
int j = 0;
for (Node* const i : n->inputs()) {
if (j++ > 0) os << ", ";
os << "#" << SafeId(i) << ":" << SafeMnemonic(i);
}
- os << ")" << std::endl;
+ os << ")";
+ // Print the node type, if any.
+ if (NodeProperties::IsTyped(n)) {
+ os << " [Type: ";
+ NodeProperties::GetType(n)->PrintTo(os);
+ os << "]";
+ }
+ os << std::endl;
}
}
return os;
diff --git a/deps/v8/src/compiler/graph-visualizer.h b/deps/v8/src/compiler/graph-visualizer.h
index 1a971a55ed..700d7a75e0 100644
--- a/deps/v8/src/compiler/graph-visualizer.h
+++ b/deps/v8/src/compiler/graph-visualizer.h
@@ -7,6 +7,7 @@
#include <stdio.h>
#include <iosfwd>
+#include <memory>
namespace v8 {
namespace internal {
@@ -21,8 +22,9 @@ class RegisterAllocationData;
class Schedule;
class SourcePositionTable;
-FILE* OpenVisualizerLogFile(CompilationInfo* info, const char* phase,
- const char* suffix, const char* mode);
+std::unique_ptr<char[]> GetVisualizerLogFileName(CompilationInfo* info,
+ const char* phase,
+ const char* suffix);
struct AsJSON {
AsJSON(const Graph& g, SourcePositionTable* p) : graph(g), positions(p) {}
diff --git a/deps/v8/src/compiler/graph.h b/deps/v8/src/compiler/graph.h
index 958a15d282..a694a0b414 100644
--- a/deps/v8/src/compiler/graph.h
+++ b/deps/v8/src/compiler/graph.h
@@ -28,11 +28,30 @@ typedef uint32_t Mark;
// out-of-line data associated with each node.
typedef uint32_t NodeId;
-
-class Graph : public ZoneObject {
+class Graph final : public ZoneObject {
public:
explicit Graph(Zone* zone);
+ // Scope used when creating a subgraph for inlining. Automatically preserves
+ // the original start and end nodes of the graph, and resets them when you
+ // leave the scope.
+ class SubgraphScope final {
+ public:
+ explicit SubgraphScope(Graph* graph)
+ : graph_(graph), start_(graph->start()), end_(graph->end()) {}
+ ~SubgraphScope() {
+ graph_->SetStart(start_);
+ graph_->SetEnd(end_);
+ }
+
+ private:
+ Graph* const graph_;
+ Node* const start_;
+ Node* const end_;
+
+ DISALLOW_COPY_AND_ASSIGN(SubgraphScope);
+ };
+
// Base implementation used by all factory methods.
Node* NewNodeUnchecked(const Operator* op, int input_count,
Node* const* inputs, bool incomplete = false);
diff --git a/deps/v8/src/compiler/greedy-allocator.cc b/deps/v8/src/compiler/greedy-allocator.cc
deleted file mode 100644
index 683b75d49f..0000000000
--- a/deps/v8/src/compiler/greedy-allocator.cc
+++ /dev/null
@@ -1,629 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/greedy-allocator.h"
-#include "src/compiler/register-allocator.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-
-#define TRACE(...) \
- do { \
- if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
- } while (false)
-
-
-const float GreedyAllocator::kAllocatedRangeMultiplier = 10.0;
-
-
-namespace {
-
-void UpdateOperands(LiveRange* range, RegisterAllocationData* data) {
- int reg_id = range->assigned_register();
- range->SetUseHints(reg_id);
- if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
- data->GetPhiMapValueFor(range->TopLevel())->set_assigned_register(reg_id);
- }
-}
-
-
-void UnsetOperands(LiveRange* range, RegisterAllocationData* data) {
- range->UnsetUseHints();
- if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
- data->GetPhiMapValueFor(range->TopLevel())->UnsetAssignedRegister();
- }
-}
-
-
-LiveRange* Split(LiveRange* range, RegisterAllocationData* data,
- LifetimePosition pos) {
- DCHECK(range->Start() < pos && pos < range->End());
- DCHECK(pos.IsStart() || pos.IsGapPosition() ||
- (data->code()
- ->GetInstructionBlock(pos.ToInstructionIndex())
- ->last_instruction_index() != pos.ToInstructionIndex()));
- LiveRange* result = range->SplitAt(pos, data->allocation_zone());
- return result;
-}
-
-
-} // namespace
-
-
-AllocationCandidate AllocationScheduler::GetNext() {
- DCHECK(!queue_.empty());
- AllocationCandidate ret = queue_.top();
- queue_.pop();
- return ret;
-}
-
-
-void AllocationScheduler::Schedule(LiveRange* range) {
- TRACE("Scheduling live range %d:%d.\n", range->TopLevel()->vreg(),
- range->relative_id());
- queue_.push(AllocationCandidate(range));
-}
-
-
-void AllocationScheduler::Schedule(LiveRangeGroup* group) {
- queue_.push(AllocationCandidate(group));
-}
-
-GreedyAllocator::GreedyAllocator(RegisterAllocationData* data,
- RegisterKind kind, Zone* local_zone)
- : RegisterAllocator(data, kind),
- local_zone_(local_zone),
- allocations_(local_zone),
- scheduler_(local_zone),
- groups_(local_zone) {}
-
-
-void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) {
- TRACE("Assigning register %s to live range %d:%d\n", RegisterName(reg_id),
- range->TopLevel()->vreg(), range->relative_id());
-
- DCHECK(!range->HasRegisterAssigned());
-
- AllocateRegisterToRange(reg_id, range);
-
- TRACE("Assigning %s to range %d%d.\n", RegisterName(reg_id),
- range->TopLevel()->vreg(), range->relative_id());
- range->set_assigned_register(reg_id);
- UpdateOperands(range, data());
-}
-
-
-void GreedyAllocator::PreallocateFixedRanges() {
- allocations_.resize(num_registers());
- for (int i = 0; i < num_registers(); i++) {
- allocations_[i] = new (local_zone()) CoalescedLiveRanges(local_zone());
- }
-
- for (LiveRange* fixed_range : GetFixedRegisters()) {
- if (fixed_range != nullptr) {
- DCHECK_EQ(mode(), fixed_range->kind());
- DCHECK(fixed_range->TopLevel()->IsFixed());
-
- int reg_nr = fixed_range->assigned_register();
- EnsureValidRangeWeight(fixed_range);
- AllocateRegisterToRange(reg_nr, fixed_range);
- }
- }
-}
-
-
-void GreedyAllocator::GroupLiveRanges() {
- CoalescedLiveRanges grouper(local_zone());
- for (TopLevelLiveRange* range : data()->live_ranges()) {
- grouper.clear();
- // Skip splinters, because we do not want to optimize for them, and moves
- // due to assigning them to different registers occur in deferred blocks.
- if (!CanProcessRange(range) || range->IsSplinter() || !range->is_phi()) {
- continue;
- }
-
- // A phi can't be a memory operand, so it couldn't have been split.
- DCHECK(!range->spilled());
-
- // Maybe this phi range is itself an input to another phi which was already
- // processed.
- LiveRangeGroup* latest_grp = range->group() != nullptr
- ? range->group()
- : new (local_zone())
- LiveRangeGroup(local_zone());
-
- // Populate the grouper.
- if (range->group() == nullptr) {
- grouper.AllocateRange(range);
- } else {
- for (LiveRange* member : range->group()->ranges()) {
- grouper.AllocateRange(member);
- }
- }
- for (int j : data()->GetPhiMapValueFor(range)->phi()->operands()) {
- // skip output also in input, which may happen for loops.
- if (j == range->vreg()) continue;
-
- TopLevelLiveRange* other_top = data()->live_ranges()[j];
-
- if (other_top->IsSplinter()) continue;
- // If the other was a memory operand, it might have been split.
- // So get the unsplit part.
- LiveRange* other =
- other_top->next() == nullptr ? other_top : other_top->next();
-
- if (other->spilled()) continue;
-
- LiveRangeGroup* other_group = other->group();
- if (other_group != nullptr) {
- bool can_merge = true;
- for (LiveRange* member : other_group->ranges()) {
- if (grouper.GetConflicts(member).Current() != nullptr) {
- can_merge = false;
- break;
- }
- }
- // If each member doesn't conflict with the current group, then since
- // the members don't conflict with eachother either, we can merge them.
- if (can_merge) {
- latest_grp->ranges().insert(latest_grp->ranges().end(),
- other_group->ranges().begin(),
- other_group->ranges().end());
- for (LiveRange* member : other_group->ranges()) {
- grouper.AllocateRange(member);
- member->set_group(latest_grp);
- }
- // Clear the other range, so we avoid scheduling it.
- other_group->ranges().clear();
- }
- } else if (grouper.GetConflicts(other).Current() == nullptr) {
- grouper.AllocateRange(other);
- latest_grp->ranges().push_back(other);
- other->set_group(latest_grp);
- }
- }
-
- if (latest_grp->ranges().size() > 0 && range->group() == nullptr) {
- latest_grp->ranges().push_back(range);
- DCHECK(latest_grp->ranges().size() > 1);
- groups().push_back(latest_grp);
- range->set_group(latest_grp);
- }
- }
-}
-
-
-void GreedyAllocator::ScheduleAllocationCandidates() {
- for (LiveRangeGroup* group : groups()) {
- if (group->ranges().size() > 0) {
- // We shouldn't have added single-range groups.
- DCHECK(group->ranges().size() != 1);
- scheduler().Schedule(group);
- }
- }
- for (LiveRange* range : data()->live_ranges()) {
- if (CanProcessRange(range)) {
- for (LiveRange* child = range; child != nullptr; child = child->next()) {
- if (!child->spilled() && child->group() == nullptr) {
- scheduler().Schedule(child);
- }
- }
- }
- }
-}
-
-
-void GreedyAllocator::TryAllocateCandidate(
- const AllocationCandidate& candidate) {
- if (candidate.is_group()) {
- TryAllocateGroup(candidate.group());
- } else {
- TryAllocateLiveRange(candidate.live_range());
- }
-}
-
-
-void GreedyAllocator::TryAllocateGroup(LiveRangeGroup* group) {
- float group_weight = 0.0;
- for (LiveRange* member : group->ranges()) {
- EnsureValidRangeWeight(member);
- group_weight = Max(group_weight, member->weight());
- }
-
- float eviction_weight = group_weight;
- int eviction_reg = -1;
- int free_reg = -1;
- for (int i = 0; i < num_allocatable_registers(); ++i) {
- int reg = allocatable_register_code(i);
- float weight = GetMaximumConflictingWeight(reg, group, group_weight);
- if (weight == LiveRange::kInvalidWeight) {
- free_reg = reg;
- break;
- }
- if (weight < eviction_weight) {
- eviction_weight = weight;
- eviction_reg = reg;
- }
- }
- if (eviction_reg < 0 && free_reg < 0) {
- for (LiveRange* member : group->ranges()) {
- scheduler().Schedule(member);
- }
- return;
- }
- if (free_reg < 0) {
- DCHECK(eviction_reg >= 0);
- for (LiveRange* member : group->ranges()) {
- EvictAndRescheduleConflicts(eviction_reg, member);
- }
- free_reg = eviction_reg;
- }
-
- DCHECK(free_reg >= 0);
- for (LiveRange* member : group->ranges()) {
- AssignRangeToRegister(free_reg, member);
- }
-}
-
-
-void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) {
- // TODO(mtrofin): once we introduce groups, we'll want to first try and
- // allocate at the preferred register.
- TRACE("Attempting to allocate live range %d:%d.\n", range->TopLevel()->vreg(),
- range->relative_id());
- int free_reg = -1;
- int evictable_reg = -1;
- int hinted_reg = -1;
-
- EnsureValidRangeWeight(range);
- float competing_weight = range->weight();
- DCHECK(competing_weight != LiveRange::kInvalidWeight);
-
- // Can we allocate at the hinted register?
- if (range->FirstHintPosition(&hinted_reg) != nullptr) {
- DCHECK(hinted_reg >= 0);
- float max_conflict_weight =
- GetMaximumConflictingWeight(hinted_reg, range, competing_weight);
- if (max_conflict_weight == LiveRange::kInvalidWeight) {
- free_reg = hinted_reg;
- } else if (max_conflict_weight < range->weight()) {
- evictable_reg = hinted_reg;
- }
- }
-
- if (free_reg < 0 && evictable_reg < 0) {
- // There was no hinted reg, or we cannot allocate there.
- float smallest_weight = LiveRange::kMaxWeight;
-
- // Seek either the first free register, or, from the set of registers
- // where the maximum conflict is lower than the candidate's weight, the one
- // with the smallest such weight.
- for (int i = 0; i < num_allocatable_registers(); i++) {
- int reg = allocatable_register_code(i);
- // Skip unnecessarily re-visiting the hinted register, if any.
- if (reg == hinted_reg) continue;
- float max_conflict_weight =
- GetMaximumConflictingWeight(reg, range, competing_weight);
- if (max_conflict_weight == LiveRange::kInvalidWeight) {
- free_reg = reg;
- break;
- }
- if (max_conflict_weight < range->weight() &&
- max_conflict_weight < smallest_weight) {
- smallest_weight = max_conflict_weight;
- evictable_reg = reg;
- }
- }
- }
-
- // We have a free register, so we use it.
- if (free_reg >= 0) {
- TRACE("Found free register %s for live range %d:%d.\n",
- RegisterName(free_reg), range->TopLevel()->vreg(),
- range->relative_id());
- AssignRangeToRegister(free_reg, range);
- return;
- }
-
- // We found a register to perform evictions, so we evict and allocate our
- // candidate.
- if (evictable_reg >= 0) {
- TRACE("Found evictable register %s for live range %d:%d.\n",
- RegisterName(free_reg), range->TopLevel()->vreg(),
- range->relative_id());
- EvictAndRescheduleConflicts(evictable_reg, range);
- AssignRangeToRegister(evictable_reg, range);
- return;
- }
-
- // The range needs to be split or spilled.
- SplitOrSpillBlockedRange(range);
-}
-
-
-void GreedyAllocator::EvictAndRescheduleConflicts(unsigned reg_id,
- const LiveRange* range) {
- auto conflicts = current_allocations(reg_id)->GetConflicts(range);
- for (LiveRange* conflict = conflicts.Current(); conflict != nullptr;
- conflict = conflicts.RemoveCurrentAndGetNext()) {
- DCHECK(conflict->HasRegisterAssigned());
- CHECK(!conflict->TopLevel()->IsFixed());
- conflict->UnsetAssignedRegister();
- UnsetOperands(conflict, data());
- UpdateWeightAtEviction(conflict);
- scheduler().Schedule(conflict);
- TRACE("Evicted range %d%d.\n", conflict->TopLevel()->vreg(),
- conflict->relative_id());
- }
-}
-
-
-void GreedyAllocator::AllocateRegisters() {
- CHECK(scheduler().empty());
- CHECK(allocations_.empty());
-
- TRACE("Begin allocating function %s with the Greedy Allocator\n",
- data()->debug_name());
-
- SplitAndSpillRangesDefinedByMemoryOperand(true);
- GroupLiveRanges();
- ScheduleAllocationCandidates();
- PreallocateFixedRanges();
- while (!scheduler().empty()) {
- AllocationCandidate candidate = scheduler().GetNext();
- TryAllocateCandidate(candidate);
- }
-
- for (size_t i = 0; i < allocations_.size(); ++i) {
- if (!allocations_[i]->empty()) {
- data()->MarkAllocated(mode(), static_cast<int>(i));
- }
- }
- allocations_.clear();
-
- TryReuseSpillRangesForGroups();
-
- TRACE("End allocating function %s with the Greedy Allocator\n",
- data()->debug_name());
-}
-
-
-void GreedyAllocator::TryReuseSpillRangesForGroups() {
- for (TopLevelLiveRange* top : data()->live_ranges()) {
- if (!CanProcessRange(top) || !top->is_phi() || top->group() == nullptr) {
- continue;
- }
-
- SpillRange* spill_range = nullptr;
- for (LiveRange* member : top->group()->ranges()) {
- if (!member->TopLevel()->HasSpillRange()) continue;
- SpillRange* member_range = member->TopLevel()->GetSpillRange();
- if (spill_range == nullptr) {
- spill_range = member_range;
- } else {
- // This may not always succeed, because we group non-conflicting ranges
- // that may have been splintered, and the splinters may cause conflicts
- // in the spill ranges.
- // TODO(mtrofin): should the splinters own their own spill ranges?
- spill_range->TryMerge(member_range);
- }
- }
- }
-}
-
-
-float GreedyAllocator::GetMaximumConflictingWeight(
- unsigned reg_id, const LiveRange* range, float competing_weight) const {
- float ret = LiveRange::kInvalidWeight;
-
- auto conflicts = current_allocations(reg_id)->GetConflicts(range);
- for (LiveRange* conflict = conflicts.Current(); conflict != nullptr;
- conflict = conflicts.GetNext()) {
- DCHECK_NE(conflict->weight(), LiveRange::kInvalidWeight);
- if (competing_weight <= conflict->weight()) return LiveRange::kMaxWeight;
- ret = Max(ret, conflict->weight());
- DCHECK(ret < LiveRange::kMaxWeight);
- }
-
- return ret;
-}
-
-
-float GreedyAllocator::GetMaximumConflictingWeight(unsigned reg_id,
- const LiveRangeGroup* group,
- float group_weight) const {
- float ret = LiveRange::kInvalidWeight;
-
- for (LiveRange* member : group->ranges()) {
- float member_conflict_weight =
- GetMaximumConflictingWeight(reg_id, member, group_weight);
- if (member_conflict_weight == LiveRange::kMaxWeight) {
- return LiveRange::kMaxWeight;
- }
- if (member_conflict_weight > group_weight) return LiveRange::kMaxWeight;
- ret = Max(member_conflict_weight, ret);
- }
-
- return ret;
-}
-
-
-void GreedyAllocator::EnsureValidRangeWeight(LiveRange* range) {
- // The live range weight will be invalidated when ranges are created or split.
- // Otherwise, it is consistently updated when the range is allocated or
- // unallocated.
- if (range->weight() != LiveRange::kInvalidWeight) return;
-
- if (range->TopLevel()->IsFixed()) {
- range->set_weight(LiveRange::kMaxWeight);
- return;
- }
- if (!IsProgressPossible(range)) {
- range->set_weight(LiveRange::kMaxWeight);
- return;
- }
-
- float use_count = 0.0;
- for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next()) {
- ++use_count;
- }
- range->set_weight(use_count / static_cast<float>(range->GetSize()));
-}
-
-
-void GreedyAllocator::SpillRangeAsLastResort(LiveRange* range) {
- LifetimePosition start = range->Start();
- CHECK(range->CanBeSpilled(start));
-
- DCHECK(range->NextRegisterPosition(start) == nullptr);
- Spill(range);
-}
-
-
-LiveRange* GreedyAllocator::GetRemainderAfterSplittingAroundFirstCall(
- LiveRange* range) {
- LiveRange* ret = range;
- for (UseInterval* interval = range->first_interval(); interval != nullptr;
- interval = interval->next()) {
- LifetimePosition start = interval->start();
- LifetimePosition end = interval->end();
- // If the interval starts at instruction end, then the first instruction
- // in the interval is the next one.
- int first_full_instruction = (start.IsGapPosition() || start.IsStart())
- ? start.ToInstructionIndex()
- : start.ToInstructionIndex() + 1;
- // If the interval ends in a gap or at instruction start, then the last
- // instruction is the previous one.
- int last_full_instruction = (end.IsGapPosition() || end.IsStart())
- ? end.ToInstructionIndex() - 1
- : end.ToInstructionIndex();
-
- for (int instruction_index = first_full_instruction;
- instruction_index <= last_full_instruction; ++instruction_index) {
- if (!code()->InstructionAt(instruction_index)->IsCall()) continue;
-
- LifetimePosition before =
- GetSplitPositionForInstruction(range, instruction_index);
- LiveRange* second_part =
- before.IsValid() ? Split(range, data(), before) : range;
-
- if (range != second_part) scheduler().Schedule(range);
-
- LifetimePosition after =
- FindSplitPositionAfterCall(second_part, instruction_index);
-
- if (after.IsValid()) {
- ret = Split(second_part, data(), after);
- } else {
- ret = nullptr;
- }
- Spill(second_part);
- return ret;
- }
- }
- return ret;
-}
-
-
-bool GreedyAllocator::TrySplitAroundCalls(LiveRange* range) {
- bool modified = false;
-
- while (range != nullptr) {
- LiveRange* remainder = GetRemainderAfterSplittingAroundFirstCall(range);
- // If we performed no modification, we're done.
- if (remainder == range) {
- break;
- }
- // We performed a modification.
- modified = true;
- range = remainder;
- }
- // If we have a remainder and we made modifications, it means the remainder
- // has no calls and we should schedule it for further processing. If we made
- // no modifications, we will just return false, because we want the algorithm
- // to make progress by trying some other heuristic.
- if (modified && range != nullptr) {
- DCHECK(!range->spilled());
- DCHECK(!range->HasRegisterAssigned());
- scheduler().Schedule(range);
- }
- return modified;
-}
-
-
-LifetimePosition GreedyAllocator::FindSplitPositionAfterCall(
- const LiveRange* range, int call_index) {
- LifetimePosition after_call =
- Max(range->Start(),
- LifetimePosition::GapFromInstructionIndex(call_index + 1));
- UsePosition* next_use = range->NextRegisterPosition(after_call);
- if (!next_use) return LifetimePosition::Invalid();
-
- LifetimePosition split_pos = FindOptimalSplitPos(after_call, next_use->pos());
- split_pos =
- GetSplitPositionForInstruction(range, split_pos.ToInstructionIndex());
- return split_pos;
-}
-
-
-LifetimePosition GreedyAllocator::FindSplitPositionBeforeLoops(
- LiveRange* range) {
- LifetimePosition end = range->End();
- if (end.ToInstructionIndex() >= code()->LastInstructionIndex()) {
- end =
- LifetimePosition::GapFromInstructionIndex(end.ToInstructionIndex() - 1);
- }
- LifetimePosition pos = FindOptimalSplitPos(range->Start(), end);
- pos = GetSplitPositionForInstruction(range, pos.ToInstructionIndex());
- return pos;
-}
-
-
-void GreedyAllocator::SplitOrSpillBlockedRange(LiveRange* range) {
- if (TrySplitAroundCalls(range)) return;
-
- LifetimePosition pos = FindSplitPositionBeforeLoops(range);
-
- if (!pos.IsValid()) pos = GetLastResortSplitPosition(range);
- if (pos.IsValid()) {
- LiveRange* tail = Split(range, data(), pos);
- DCHECK(tail != range);
- scheduler().Schedule(tail);
- scheduler().Schedule(range);
- return;
- }
- SpillRangeAsLastResort(range);
-}
-
-
-// Basic heuristic for advancing the algorithm, if any other splitting heuristic
-// failed.
-LifetimePosition GreedyAllocator::GetLastResortSplitPosition(
- const LiveRange* range) {
- LifetimePosition previous = range->Start();
- for (UsePosition *pos = range->NextRegisterPosition(previous); pos != nullptr;
- previous = previous.NextFullStart(),
- pos = range->NextRegisterPosition(previous)) {
- LifetimePosition optimal = FindOptimalSplitPos(previous, pos->pos());
- LifetimePosition before =
- GetSplitPositionForInstruction(range, optimal.ToInstructionIndex());
- if (before.IsValid()) return before;
- LifetimePosition after = GetSplitPositionForInstruction(
- range, pos->pos().ToInstructionIndex() + 1);
- if (after.IsValid()) return after;
- }
- return LifetimePosition::Invalid();
-}
-
-
-bool GreedyAllocator::IsProgressPossible(const LiveRange* range) {
- return range->CanBeSpilled(range->Start()) ||
- GetLastResortSplitPosition(range).IsValid();
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/greedy-allocator.h b/deps/v8/src/compiler/greedy-allocator.h
deleted file mode 100644
index b61ba4242f..0000000000
--- a/deps/v8/src/compiler/greedy-allocator.h
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_GREEDY_ALLOCATOR_H_
-#define V8_GREEDY_ALLOCATOR_H_
-
-#include "src/compiler/coalesced-live-ranges.h"
-#include "src/compiler/register-allocator.h"
-#include "src/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-
-// The object of allocation scheduling. At minimum, this is a LiveRange, but
-// we may extend this to groups of LiveRanges. It has to be comparable.
-class AllocationCandidate {
- public:
- explicit AllocationCandidate(LiveRange* range)
- : is_group_(false), size_(range->GetSize()) {
- candidate_.range_ = range;
- }
-
- explicit AllocationCandidate(LiveRangeGroup* ranges)
- : is_group_(true), size_(CalculateGroupSize(ranges)) {
- candidate_.group_ = ranges;
- }
-
- // Strict ordering operators
- bool operator<(const AllocationCandidate& other) const {
- return size() < other.size();
- }
-
- bool operator>(const AllocationCandidate& other) const {
- return size() > other.size();
- }
-
- bool is_group() const { return is_group_; }
- LiveRange* live_range() const { return candidate_.range_; }
- LiveRangeGroup* group() const { return candidate_.group_; }
-
- private:
- unsigned CalculateGroupSize(LiveRangeGroup* group) {
- unsigned ret = 0;
- for (LiveRange* range : group->ranges()) {
- ret += range->GetSize();
- }
- return ret;
- }
-
- unsigned size() const { return size_; }
- bool is_group_;
- unsigned size_;
- union {
- LiveRange* range_;
- LiveRangeGroup* group_;
- } candidate_;
-};
-
-
-// Schedule processing (allocating) of AllocationCandidates.
-class AllocationScheduler final : ZoneObject {
- public:
- explicit AllocationScheduler(Zone* zone) : queue_(zone) {}
- void Schedule(LiveRange* range);
- void Schedule(LiveRangeGroup* group);
- AllocationCandidate GetNext();
- bool empty() const { return queue_.empty(); }
-
- private:
- typedef ZonePriorityQueue<AllocationCandidate> ScheduleQueue;
- ScheduleQueue queue_;
-
- DISALLOW_COPY_AND_ASSIGN(AllocationScheduler);
-};
-
-
-// A variant of the LLVM Greedy Register Allocator. See
-// http://blog.llvm.org/2011/09/greedy-register-allocation-in-llvm-30.html
-class GreedyAllocator final : public RegisterAllocator {
- public:
- explicit GreedyAllocator(RegisterAllocationData* data, RegisterKind kind,
- Zone* local_zone);
-
- void AllocateRegisters();
-
- private:
- static const float kAllocatedRangeMultiplier;
-
- static void UpdateWeightAtAllocation(LiveRange* range) {
- DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
- range->set_weight(range->weight() * kAllocatedRangeMultiplier);
- }
-
-
- static void UpdateWeightAtEviction(LiveRange* range) {
- DCHECK_NE(range->weight(), LiveRange::kInvalidWeight);
- range->set_weight(range->weight() / kAllocatedRangeMultiplier);
- }
-
- AllocationScheduler& scheduler() { return scheduler_; }
- CoalescedLiveRanges* current_allocations(unsigned i) {
- return allocations_[i];
- }
-
- CoalescedLiveRanges* current_allocations(unsigned i) const {
- return allocations_[i];
- }
-
- Zone* local_zone() const { return local_zone_; }
- ZoneVector<LiveRangeGroup*>& groups() { return groups_; }
- const ZoneVector<LiveRangeGroup*>& groups() const { return groups_; }
-
- // Insert fixed ranges.
- void PreallocateFixedRanges();
-
- void GroupLiveRanges();
-
- // Schedule unassigned live ranges for allocation.
- void ScheduleAllocationCandidates();
-
- void AllocateRegisterToRange(unsigned reg_id, LiveRange* range) {
- UpdateWeightAtAllocation(range);
- current_allocations(reg_id)->AllocateRange(range);
- }
- // Evict and reschedule conflicts of a given range, at a given register.
- void EvictAndRescheduleConflicts(unsigned reg_id, const LiveRange* range);
-
- void TryAllocateCandidate(const AllocationCandidate& candidate);
- void TryAllocateLiveRange(LiveRange* range);
- void TryAllocateGroup(LiveRangeGroup* group);
-
- // Calculate the weight of a candidate for allocation.
- void EnsureValidRangeWeight(LiveRange* range);
-
- // Calculate the new weight of a range that is about to be allocated.
- float GetAllocatedRangeWeight(float candidate_weight);
-
- // Returns kInvalidWeight if there are no conflicts, or the largest weight of
- // a range conflicting with the given range, at the given register.
- float GetMaximumConflictingWeight(unsigned reg_id, const LiveRange* range,
- float competing_weight) const;
-
- // Returns kInvalidWeight if there are no conflicts, or the largest weight of
- // a range conflicting with the given range, at the given register.
- float GetMaximumConflictingWeight(unsigned reg_id,
- const LiveRangeGroup* group,
- float group_weight) const;
-
- // This is the extension point for splitting heuristics.
- void SplitOrSpillBlockedRange(LiveRange* range);
-
- // Find a good position where to fill, after a range was spilled after a call.
- LifetimePosition FindSplitPositionAfterCall(const LiveRange* range,
- int call_index);
- // Split a range around all calls it passes over. Returns true if any changes
- // were made, or false if no calls were found.
- bool TrySplitAroundCalls(LiveRange* range);
-
- // Find a split position at the outmost loop.
- LifetimePosition FindSplitPositionBeforeLoops(LiveRange* range);
-
- // Finds the first call instruction in the path of this range. Splits before
- // and requeues that segment (if any), spills the section over the call, and
- // returns the section after the call. The return is:
- // - same range, if no call was found
- // - nullptr, if the range finished at the call and there's no "after the
- // call" portion.
- // - the portion after the call.
- LiveRange* GetRemainderAfterSplittingAroundFirstCall(LiveRange* range);
-
- // While we attempt to merge spill ranges later on in the allocation pipeline,
- // we want to ensure group elements get merged. Waiting until later may hinder
- // merge-ability, since the pipeline merger (being naive) may create conflicts
- // between spill ranges of group members.
- void TryReuseSpillRangesForGroups();
-
- LifetimePosition GetLastResortSplitPosition(const LiveRange* range);
-
- bool IsProgressPossible(const LiveRange* range);
-
- // Necessary heuristic: spill when all else failed.
- void SpillRangeAsLastResort(LiveRange* range);
-
- void AssignRangeToRegister(int reg_id, LiveRange* range);
-
- Zone* local_zone_;
- ZoneVector<CoalescedLiveRanges*> allocations_;
- AllocationScheduler scheduler_;
- ZoneVector<LiveRangeGroup*> groups_;
-
- DISALLOW_COPY_AND_ASSIGN(GreedyAllocator);
-};
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-#endif // V8_GREEDY_ALLOCATOR_H_
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index ee05ad00b6..d4afd3789f 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -44,11 +44,11 @@ class IA32OperandConverter : public InstructionOperandConverter {
if (op->IsRegister()) {
DCHECK(extra == 0);
return Operand(ToRegister(op));
- } else if (op->IsDoubleRegister()) {
+ } else if (op->IsFPRegister()) {
DCHECK(extra == 0);
return Operand(ToDoubleRegister(op));
}
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
return SlotToOperand(AllocatedOperand::cast(op)->index(), extra);
}
@@ -59,12 +59,19 @@ class IA32OperandConverter : public InstructionOperandConverter {
}
Operand HighOperand(InstructionOperand* op) {
- DCHECK(op->IsDoubleStackSlot());
+ DCHECK(op->IsFPStackSlot());
return ToOperand(op, kPointerSize);
}
Immediate ToImmediate(InstructionOperand* operand) {
Constant constant = ToConstant(operand);
+ if (constant.type() == Constant::kInt32 &&
+ (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
+ constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE)) {
+ return Immediate(reinterpret_cast<Address>(constant.ToInt32()),
+ constant.rmode());
+ }
switch (constant.type()) {
case Constant::kInt32:
return Immediate(constant.ToInt32());
@@ -113,8 +120,8 @@ class IA32OperandConverter : public InstructionOperandConverter {
}
case kMode_MRI: {
Register base = InputRegister(NextOffset(offset));
- int32_t disp = InputInt32(NextOffset(offset));
- return Operand(base, disp);
+ Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
+ return Operand(base, ctant.ToInt32(), ctant.rmode());
}
case kMode_MR1:
case kMode_MR2:
@@ -133,8 +140,8 @@ class IA32OperandConverter : public InstructionOperandConverter {
Register base = InputRegister(NextOffset(offset));
Register index = InputRegister(NextOffset(offset));
ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
- int32_t disp = InputInt32(NextOffset(offset));
- return Operand(base, index, scale, disp);
+ Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
+ return Operand(base, index, scale, ctant.ToInt32(), ctant.rmode());
}
case kMode_M1:
case kMode_M2:
@@ -151,12 +158,12 @@ class IA32OperandConverter : public InstructionOperandConverter {
case kMode_M8I: {
Register index = InputRegister(NextOffset(offset));
ScaleFactor scale = ScaleFor(kMode_M1I, mode);
- int32_t disp = InputInt32(NextOffset(offset));
- return Operand(index, scale, disp);
+ Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
+ return Operand(index, scale, ctant.ToInt32(), ctant.rmode());
}
case kMode_MI: {
- int32_t disp = InputInt32(NextOffset(offset));
- return Operand(Immediate(disp));
+ Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
+ return Operand(ctant.ToInt32(), ctant.rmode());
}
case kMode_None:
UNREACHABLE();
@@ -190,18 +197,33 @@ class OutOfLineLoadInteger final : public OutOfLineCode {
Register const result_;
};
-
-class OutOfLineLoadFloat final : public OutOfLineCode {
+class OutOfLineLoadFloat32NaN final : public OutOfLineCode {
public:
- OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result)
+ OutOfLineLoadFloat32NaN(CodeGenerator* gen, XMMRegister result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() final { __ pcmpeqd(result_, result_); }
+ void Generate() final {
+ __ xorps(result_, result_);
+ __ divss(result_, result_);
+ }
private:
XMMRegister const result_;
};
+class OutOfLineLoadFloat64NaN final : public OutOfLineCode {
+ public:
+ OutOfLineLoadFloat64NaN(CodeGenerator* gen, XMMRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() final {
+ __ xorpd(result_, result_);
+ __ divsd(result_, result_);
+ }
+
+ private:
+ XMMRegister const result_;
+};
class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
public:
@@ -264,23 +286,21 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
} // namespace
-
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr) \
- do { \
- auto result = i.OutputDoubleRegister(); \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- OutOfLineCode* ool = new (zone()) OutOfLineLoadFloat(this, result); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(result, i.MemoryOperand(2)); \
- __ bind(ool->exit()); \
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, OutOfLineLoadNaN) \
+ do { \
+ auto result = i.OutputDoubleRegister(); \
+ auto offset = i.InputRegister(0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmp(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmp(offset, i.InputImmediate(1)); \
+ } \
+ OutOfLineCode* ool = new (zone()) OutOfLineLoadNaN(this, result); \
+ __ j(above_equal, ool->entry()); \
+ __ asm_instr(result, i.MemoryOperand(2)); \
+ __ bind(ool->exit()); \
} while (false)
-
#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
do { \
auto result = i.OutputRegister(); \
@@ -357,28 +377,43 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
} \
} while (0)
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ /* Pass two doubles as arguments on the stack. */ \
+ __ PrepareCallCFunction(4, eax); \
+ __ movsd(Operand(esp, 0 * kDoubleSize), i.InputDoubleRegister(0)); \
+ __ movsd(Operand(esp, 1 * kDoubleSize), i.InputDoubleRegister(1)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 4); \
+ /* Return value is in st(0) on ia32. */ \
+ /* Store it into the result register. */ \
+ __ sub(esp, Immediate(kDoubleSize)); \
+ __ fstp_d(Operand(esp, 0)); \
+ __ movsd(i.OutputDoubleRegister(), Operand(esp, 0)); \
+ __ add(esp, Immediate(kDoubleSize)); \
+ } while (false)
+
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ /* Pass one double as argument on the stack. */ \
+ __ PrepareCallCFunction(2, eax); \
+ __ movsd(Operand(esp, 0 * kDoubleSize), i.InputDoubleRegister(0)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 2); \
+ /* Return value is in st(0) on ia32. */ \
+ /* Store it into the result register. */ \
+ __ sub(esp, Immediate(kDoubleSize)); \
+ __ fstp_d(Operand(esp, 0)); \
+ __ movsd(i.OutputDoubleRegister(), Operand(esp, 0)); \
+ __ add(esp, Immediate(kDoubleSize)); \
+ } while (false)
+
void CodeGenerator::AssembleDeconstructFrame() {
__ mov(esp, ebp);
__ pop(ebp);
}
-void CodeGenerator::AssembleSetupStackPointer() {}
-
-void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
- int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
- if (sp_slot_delta > 0) {
- __ add(esp, Immediate(sp_slot_delta * kPointerSize));
- }
- frame_access_state()->SetFrameAccessToDefault();
-}
-
-
-void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
- int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
- if (sp_slot_delta < 0) {
- __ sub(esp, Immediate(-sp_slot_delta * kPointerSize));
- frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
- }
+void CodeGenerator::AssemblePrepareTailCall() {
if (frame_access_state()->has_frame()) {
__ mov(ebp, MemOperand(ebp, 0));
}
@@ -423,8 +458,71 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
__ bind(&done);
}
+namespace {
+
+void AdjustStackPointerForTailCall(MacroAssembler* masm,
+ FrameAccessState* state,
+ int new_slot_above_sp,
+ bool allow_shrinkage = true) {
+ int current_sp_offset = state->GetSPToFPSlotCount() +
+ StandardFrameConstants::kFixedSlotCountAboveFp;
+ int stack_slot_delta = new_slot_above_sp - current_sp_offset;
+ if (stack_slot_delta > 0) {
+ masm->sub(esp, Immediate(stack_slot_delta * kPointerSize));
+ state->IncreaseSPDelta(stack_slot_delta);
+ } else if (allow_shrinkage && stack_slot_delta < 0) {
+ masm->add(esp, Immediate(-stack_slot_delta * kPointerSize));
+ state->IncreaseSPDelta(stack_slot_delta);
+ }
+}
+
+} // namespace
+
+void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
+ int first_unused_stack_slot) {
+ CodeGenerator::PushTypeFlags flags(kImmediatePush | kScalarPush);
+ ZoneVector<MoveOperands*> pushes(zone());
+ GetPushCompatibleMoves(instr, flags, &pushes);
+
+ if (!pushes.empty() &&
+ (LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
+ first_unused_stack_slot)) {
+ IA32OperandConverter g(this, instr);
+ for (auto move : pushes) {
+ LocationOperand destination_location(
+ LocationOperand::cast(move->destination()));
+ InstructionOperand source(move->source());
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ destination_location.index());
+ if (source.IsStackSlot()) {
+ LocationOperand source_location(LocationOperand::cast(source));
+ __ push(g.SlotToOperand(source_location.index()));
+ } else if (source.IsRegister()) {
+ LocationOperand source_location(LocationOperand::cast(source));
+ __ push(source_location.GetRegister());
+ } else if (source.IsImmediate()) {
+ __ push(Immediate(ImmediateOperand::cast(source).inline_value()));
+ } else {
+ // Pushes of non-scalar data types is not supported.
+ UNIMPLEMENTED();
+ }
+ frame_access_state()->IncreaseSPDelta(1);
+ move->Eliminate();
+ }
+ }
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ first_unused_stack_slot, false);
+}
+
+void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
+ int first_unused_stack_slot) {
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ first_unused_stack_slot);
+}
+
// Assembles an instruction after register allocation, producing machine code.
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+ Instruction* instr) {
IA32OperandConverter i(this, instr);
InstructionCode opcode = instr->opcode();
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
@@ -445,8 +543,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
- int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
- AssembleDeconstructActivationRecord(stack_param_delta);
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
no_reg, no_reg, no_reg);
@@ -460,6 +556,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ jmp(reg);
}
frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+ case kArchTailCallAddress: {
+ CHECK(!HasImmediateInput(instr, 0));
+ Register reg = i.InputRegister(0);
+ __ jmp(reg);
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
break;
}
case kArchCallJSFunction: {
@@ -483,14 +588,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
__ Assert(equal, kWrongFunctionContext);
}
- int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
- AssembleDeconstructActivationRecord(stack_param_delta);
if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
no_reg, no_reg, no_reg);
}
__ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
break;
}
case kArchPrepareCallCFunction: {
@@ -501,7 +605,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kArchPrepareTailCall:
- AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ AssemblePrepareTailCall();
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
@@ -525,6 +629,17 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
+ case kArchComment: {
+ Address comment_string = i.InputExternalReference(0).address();
+ __ RecordComment(reinterpret_cast<const char*>(comment_string));
+ break;
+ }
+ case kArchDebugBreak:
+ __ int3();
+ break;
+ case kArchImpossible:
+ __ Abort(kConversionFromImpossibleValue);
+ break;
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
@@ -534,7 +649,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result =
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ if (result != kSuccess) return result;
break;
}
case kArchRet:
@@ -593,6 +710,81 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ lea(i.OutputRegister(), Operand(base, offset.offset()));
break;
}
+ case kIeee754Float64Acos:
+ ASSEMBLE_IEEE754_UNOP(acos);
+ break;
+ case kIeee754Float64Acosh:
+ ASSEMBLE_IEEE754_UNOP(acosh);
+ break;
+ case kIeee754Float64Asin:
+ ASSEMBLE_IEEE754_UNOP(asin);
+ break;
+ case kIeee754Float64Asinh:
+ ASSEMBLE_IEEE754_UNOP(asinh);
+ break;
+ case kIeee754Float64Atan:
+ ASSEMBLE_IEEE754_UNOP(atan);
+ break;
+ case kIeee754Float64Atanh:
+ ASSEMBLE_IEEE754_UNOP(atanh);
+ break;
+ case kIeee754Float64Atan2:
+ ASSEMBLE_IEEE754_BINOP(atan2);
+ break;
+ case kIeee754Float64Cbrt:
+ ASSEMBLE_IEEE754_UNOP(cbrt);
+ break;
+ case kIeee754Float64Cos:
+ ASSEMBLE_IEEE754_UNOP(cos);
+ break;
+ case kIeee754Float64Cosh:
+ ASSEMBLE_IEEE754_UNOP(cosh);
+ break;
+ case kIeee754Float64Expm1:
+ ASSEMBLE_IEEE754_UNOP(expm1);
+ break;
+ case kIeee754Float64Exp:
+ ASSEMBLE_IEEE754_UNOP(exp);
+ break;
+ case kIeee754Float64Log:
+ ASSEMBLE_IEEE754_UNOP(log);
+ break;
+ case kIeee754Float64Log1p:
+ ASSEMBLE_IEEE754_UNOP(log1p);
+ break;
+ case kIeee754Float64Log2:
+ ASSEMBLE_IEEE754_UNOP(log2);
+ break;
+ case kIeee754Float64Log10:
+ ASSEMBLE_IEEE754_UNOP(log10);
+ break;
+ case kIeee754Float64Pow: {
+ // TODO(bmeurer): Improve integration of the stub.
+ if (!i.InputDoubleRegister(1).is(xmm2)) {
+ __ movaps(xmm2, i.InputDoubleRegister(0));
+ __ movaps(xmm1, i.InputDoubleRegister(1));
+ } else {
+ __ movaps(xmm0, i.InputDoubleRegister(0));
+ __ movaps(xmm1, xmm2);
+ __ movaps(xmm2, xmm0);
+ }
+ MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+ __ CallStub(&stub);
+ __ movaps(i.OutputDoubleRegister(), xmm3);
+ break;
+ }
+ case kIeee754Float64Sin:
+ ASSEMBLE_IEEE754_UNOP(sin);
+ break;
+ case kIeee754Float64Sinh:
+ ASSEMBLE_IEEE754_UNOP(sinh);
+ break;
+ case kIeee754Float64Tan:
+ ASSEMBLE_IEEE754_UNOP(tan);
+ break;
+ case kIeee754Float64Tanh:
+ ASSEMBLE_IEEE754_UNOP(tanh);
+ break;
case kIA32Add:
if (HasImmediateInput(instr, 1)) {
__ add(i.InputOperand(0), i.InputImmediate(1));
@@ -814,12 +1006,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
// when there is a (v)mulss depending on the result.
__ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
- case kSSEFloat32Max:
- __ maxss(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat32Min:
- __ minss(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
case kSSEFloat32Sqrt:
__ sqrtss(i.OutputDoubleRegister(), i.InputOperand(0));
break;
@@ -862,12 +1048,117 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
// when there is a (v)mulsd depending on the result.
__ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
- case kSSEFloat64Max:
- __ maxsd(i.InputDoubleRegister(0), i.InputOperand(1));
+ case kSSEFloat32Max: {
+ Label compare_nan, compare_swap, done_compare;
+ if (instr->InputAt(1)->IsFPRegister()) {
+ __ ucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ } else {
+ __ ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
+ }
+ auto ool =
+ new (zone()) OutOfLineLoadFloat32NaN(this, i.OutputDoubleRegister());
+ __ j(parity_even, ool->entry());
+ __ j(above, &done_compare, Label::kNear);
+ __ j(below, &compare_swap, Label::kNear);
+ __ movmskps(i.TempRegister(0), i.InputDoubleRegister(0));
+ __ test(i.TempRegister(0), Immediate(1));
+ __ j(zero, &done_compare, Label::kNear);
+ __ bind(&compare_swap);
+ if (instr->InputAt(1)->IsFPRegister()) {
+ __ movss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ } else {
+ __ movss(i.InputDoubleRegister(0), i.InputOperand(1));
+ }
+ __ bind(&done_compare);
+ __ bind(ool->exit());
+ break;
+ }
+
+ case kSSEFloat64Max: {
+ Label compare_nan, compare_swap, done_compare;
+ if (instr->InputAt(1)->IsFPRegister()) {
+ __ ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ } else {
+ __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
+ }
+ auto ool =
+ new (zone()) OutOfLineLoadFloat64NaN(this, i.OutputDoubleRegister());
+ __ j(parity_even, ool->entry());
+ __ j(above, &done_compare, Label::kNear);
+ __ j(below, &compare_swap, Label::kNear);
+ __ movmskpd(i.TempRegister(0), i.InputDoubleRegister(0));
+ __ test(i.TempRegister(0), Immediate(1));
+ __ j(zero, &done_compare, Label::kNear);
+ __ bind(&compare_swap);
+ if (instr->InputAt(1)->IsFPRegister()) {
+ __ movsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ } else {
+ __ movsd(i.InputDoubleRegister(0), i.InputOperand(1));
+ }
+ __ bind(&done_compare);
+ __ bind(ool->exit());
+ break;
+ }
+ case kSSEFloat32Min: {
+ Label compare_swap, done_compare;
+ if (instr->InputAt(1)->IsFPRegister()) {
+ __ ucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ } else {
+ __ ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
+ }
+ auto ool =
+ new (zone()) OutOfLineLoadFloat32NaN(this, i.OutputDoubleRegister());
+ __ j(parity_even, ool->entry());
+ __ j(below, &done_compare, Label::kNear);
+ __ j(above, &compare_swap, Label::kNear);
+ if (instr->InputAt(1)->IsFPRegister()) {
+ __ movmskps(i.TempRegister(0), i.InputDoubleRegister(1));
+ } else {
+ __ movss(kScratchDoubleReg, i.InputOperand(1));
+ __ movmskps(i.TempRegister(0), kScratchDoubleReg);
+ }
+ __ test(i.TempRegister(0), Immediate(1));
+ __ j(zero, &done_compare, Label::kNear);
+ __ bind(&compare_swap);
+ if (instr->InputAt(1)->IsFPRegister()) {
+ __ movss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ } else {
+ __ movss(i.InputDoubleRegister(0), i.InputOperand(1));
+ }
+ __ bind(&done_compare);
+ __ bind(ool->exit());
break;
- case kSSEFloat64Min:
- __ minsd(i.InputDoubleRegister(0), i.InputOperand(1));
+ }
+ case kSSEFloat64Min: {
+ Label compare_swap, done_compare;
+ if (instr->InputAt(1)->IsFPRegister()) {
+ __ ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ } else {
+ __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
+ }
+ auto ool =
+ new (zone()) OutOfLineLoadFloat64NaN(this, i.OutputDoubleRegister());
+ __ j(parity_even, ool->entry());
+ __ j(below, &done_compare, Label::kNear);
+ __ j(above, &compare_swap, Label::kNear);
+ if (instr->InputAt(1)->IsFPRegister()) {
+ __ movmskpd(i.TempRegister(0), i.InputDoubleRegister(1));
+ } else {
+ __ movsd(kScratchDoubleReg, i.InputOperand(1));
+ __ movmskpd(i.TempRegister(0), kScratchDoubleReg);
+ }
+ __ test(i.TempRegister(0), Immediate(1));
+ __ j(zero, &done_compare, Label::kNear);
+ __ bind(&compare_swap);
+ if (instr->InputAt(1)->IsFPRegister()) {
+ __ movsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ } else {
+ __ movsd(i.InputDoubleRegister(0), i.InputOperand(1));
+ }
+ __ bind(&done_compare);
+ __ bind(ool->exit());
break;
+ }
case kSSEFloat64Mod: {
// TODO(dcarney): alignment is wrong.
__ sub(esp, Immediate(kDoubleSize));
@@ -965,14 +1256,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ LoadUint32(i.OutputDoubleRegister(), i.InputOperand(0));
break;
case kSSEFloat64ExtractLowWord32:
- if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ if (instr->InputAt(0)->IsFPStackSlot()) {
__ mov(i.OutputRegister(), i.InputOperand(0));
} else {
__ movd(i.OutputRegister(), i.InputDoubleRegister(0));
}
break;
case kSSEFloat64ExtractHighWord32:
- if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ if (instr->InputAt(0)->IsFPStackSlot()) {
__ mov(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
} else {
__ Pextrd(i.OutputRegister(), i.InputDoubleRegister(0), 1);
@@ -1014,18 +1305,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
}
- case kAVXFloat32Max: {
- CpuFeatureScope avx_scope(masm(), AVX);
- __ vmaxss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
- break;
- }
- case kAVXFloat32Min: {
- CpuFeatureScope avx_scope(masm(), AVX);
- __ vminss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
- break;
- }
case kAVXFloat64Add: {
CpuFeatureScope avx_scope(masm(), AVX);
__ vaddsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1053,18 +1332,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
}
- case kAVXFloat64Max: {
- CpuFeatureScope avx_scope(masm(), AVX);
- __ vmaxsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
- break;
- }
- case kAVXFloat64Min: {
- CpuFeatureScope avx_scope(masm(), AVX);
- __ vminsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
- break;
- }
case kAVXFloat32Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
@@ -1097,6 +1364,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
break;
}
+ case kSSEFloat64SilenceNaN:
+ __ xorpd(kScratchDoubleReg, kScratchDoubleReg);
+ __ subsd(i.InputDoubleRegister(0), kScratchDoubleReg);
+ break;
case kIA32Movsxbl:
__ movsx_b(i.OutputRegister(), i.MemoryOperand());
break;
@@ -1161,7 +1432,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
case kIA32BitcastFI:
- if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ if (instr->InputAt(0)->IsFPStackSlot()) {
__ mov(i.OutputRegister(), i.InputOperand(0));
} else {
__ movd(i.OutputRegister(), i.InputDoubleRegister(0));
@@ -1210,10 +1481,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kIA32PushFloat32:
- if (instr->InputAt(0)->IsDoubleRegister()) {
- __ sub(esp, Immediate(kDoubleSize));
+ if (instr->InputAt(0)->IsFPRegister()) {
+ __ sub(esp, Immediate(kFloatSize));
__ movss(Operand(esp, 0), i.InputDoubleRegister(0));
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
} else if (HasImmediateInput(instr, 0)) {
__ Move(kScratchDoubleReg, i.InputDouble(0));
__ sub(esp, Immediate(kDoubleSize));
@@ -1227,7 +1498,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
case kIA32PushFloat64:
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
@@ -1244,10 +1515,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
case kIA32Push:
- if (instr->InputAt(0)->IsDoubleRegister()) {
- __ sub(esp, Immediate(kDoubleSize));
+ if (instr->InputAt(0)->IsFPRegister()) {
+ __ sub(esp, Immediate(kFloatSize));
__ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
} else if (HasImmediateInput(instr, 0)) {
__ push(i.InputImmediate(0));
frame_access_state()->IncreaseSPDelta(1);
@@ -1265,6 +1536,24 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
}
+ case kIA32Xchgb: {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ xchg_b(i.InputRegister(index), operand);
+ break;
+ }
+ case kIA32Xchgw: {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ xchg_w(i.InputRegister(index), operand);
+ break;
+ }
+ case kIA32Xchgl: {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ xchg(i.InputRegister(index), operand);
+ break;
+ }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b);
break;
@@ -1281,10 +1570,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_CHECKED_LOAD_INTEGER(mov);
break;
case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
+ ASSEMBLE_CHECKED_LOAD_FLOAT(movss, OutOfLineLoadFloat32NaN);
break;
case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
+ ASSEMBLE_CHECKED_LOAD_FLOAT(movsd, OutOfLineLoadFloat64NaN);
break;
case kCheckedStoreWord8:
ASSEMBLE_CHECKED_STORE_INTEGER(mov_b);
@@ -1311,7 +1600,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kCheckedStoreWord64:
UNREACHABLE(); // currently unsupported checked int64 load/store.
break;
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
+ case kAtomicStoreWord8:
+ case kAtomicStoreWord16:
+ case kAtomicStoreWord32:
+ UNREACHABLE(); // Won't be generated by instruction selector.
+ break;
}
+ return kSuccess;
} // NOLINT(readability/fn_size)
@@ -1485,12 +1785,16 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
__ jmp(Operand::JumpTable(input, times_4, table));
}
-
-void CodeGenerator::AssembleDeoptimizerCall(
+CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
+ if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
+ DeoptimizeReason deoptimization_reason =
+ GetDeoptimizationReason(deoptimization_id);
+ __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ return kSuccess;
}
@@ -1621,8 +1925,21 @@ void CodeGenerator::AssembleDeoptimizerCall(
// | RET | args | caller frame |
// ^ esp ^ ebp
+void CodeGenerator::FinishFrame(Frame* frame) {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) { // Save callee-saved registers.
+ DCHECK(!info()->is_osr());
+ int pushed = 0;
+ for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+ if (!((1 << i) & saves)) continue;
+ ++pushed;
+ }
+ frame->AllocateSavedCalleeRegisterSlots(pushed);
+ }
+}
-void CodeGenerator::AssemblePrologue() {
+void CodeGenerator::AssembleConstructFrame() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (descriptor->IsCFunctionCall()) {
@@ -1634,7 +1951,9 @@ void CodeGenerator::AssemblePrologue() {
__ StubPrologue(info()->GetOutputStackFrameType());
}
}
- int stack_shrink_slots = frame()->GetSpillSlotCount();
+
+ int shrink_slots = frame()->GetSpillSlotCount();
+
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1645,12 +1964,12 @@ void CodeGenerator::AssemblePrologue() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
const RegList saves = descriptor->CalleeSavedRegisters();
- if (stack_shrink_slots > 0) {
- __ sub(esp, Immediate(stack_shrink_slots * kPointerSize));
+ if (shrink_slots > 0) {
+ __ sub(esp, Immediate(shrink_slots * kPointerSize));
}
if (saves != 0) { // Save callee-saved registers.
@@ -1661,7 +1980,6 @@ void CodeGenerator::AssemblePrologue() {
__ push(Register::from_code(i));
++pushed;
}
- frame()->AllocateSavedCalleeRegisterSlots(pushed);
}
}
@@ -1756,11 +2074,11 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else if (src_constant.type() == Constant::kFloat32) {
// TODO(turbofan): Can we do better here?
uint32_t src = bit_cast<uint32_t>(src_constant.ToFloat32());
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
Operand dst = g.ToOperand(destination);
__ Move(dst, Immediate(src));
}
@@ -1769,37 +2087,63 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
uint32_t lower = static_cast<uint32_t>(src);
uint32_t upper = static_cast<uint32_t>(src >> 32);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
Operand dst0 = g.ToOperand(destination);
Operand dst1 = g.HighOperand(destination);
__ Move(dst0, Immediate(lower));
__ Move(dst1, Immediate(upper));
}
}
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
XMMRegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
__ movaps(dst, src);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
Operand dst = g.ToOperand(destination);
- __ movsd(dst, src);
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kFloat64) {
+ __ movsd(dst, src);
+ } else if (rep == MachineRepresentation::kFloat32) {
+ __ movss(dst, src);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ __ movups(dst, src);
+ }
}
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
Operand src = g.ToOperand(source);
- if (destination->IsDoubleRegister()) {
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (destination->IsFPRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
- __ movsd(dst, src);
+ if (rep == MachineRepresentation::kFloat64) {
+ __ movsd(dst, src);
+ } else if (rep == MachineRepresentation::kFloat32) {
+ __ movss(dst, src);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ __ movups(dst, src);
+ }
} else {
Operand dst = g.ToOperand(destination);
- __ movsd(kScratchDoubleReg, src);
- __ movsd(dst, kScratchDoubleReg);
+ if (rep == MachineRepresentation::kFloat64) {
+ __ movsd(kScratchDoubleReg, src);
+ __ movsd(dst, kScratchDoubleReg);
+ } else if (rep == MachineRepresentation::kFloat32) {
+ __ movss(kScratchDoubleReg, src);
+ __ movss(dst, kScratchDoubleReg);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ __ movups(kScratchDoubleReg, src);
+ __ movups(dst, kScratchDoubleReg);
+ }
}
} else {
UNREACHABLE();
@@ -1841,32 +2185,62 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
frame_access_state()->IncreaseSPDelta(-1);
Operand src2 = g.ToOperand(source);
__ pop(src2);
- } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+ } else if (source->IsFPRegister() && destination->IsFPRegister()) {
// XMM register-register swap.
XMMRegister src = g.ToDoubleRegister(source);
XMMRegister dst = g.ToDoubleRegister(destination);
__ movaps(kScratchDoubleReg, src);
__ movaps(src, dst);
__ movaps(dst, kScratchDoubleReg);
- } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
+ } else if (source->IsFPRegister() && destination->IsFPStackSlot()) {
// XMM register-memory swap.
XMMRegister reg = g.ToDoubleRegister(source);
Operand other = g.ToOperand(destination);
- __ movsd(kScratchDoubleReg, other);
- __ movsd(other, reg);
- __ movaps(reg, kScratchDoubleReg);
- } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kFloat64) {
+ __ movsd(kScratchDoubleReg, other);
+ __ movsd(other, reg);
+ __ movaps(reg, kScratchDoubleReg);
+ } else if (rep == MachineRepresentation::kFloat32) {
+ __ movss(kScratchDoubleReg, other);
+ __ movss(other, reg);
+ __ movaps(reg, kScratchDoubleReg);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ __ movups(kScratchDoubleReg, other);
+ __ movups(other, reg);
+ __ movups(reg, kScratchDoubleReg);
+ }
+ } else if (source->IsFPStackSlot() && destination->IsFPStackSlot()) {
// Double-width memory-to-memory.
Operand src0 = g.ToOperand(source);
- Operand src1 = g.HighOperand(source);
Operand dst0 = g.ToOperand(destination);
- Operand dst1 = g.HighOperand(destination);
- __ movsd(kScratchDoubleReg, dst0); // Save destination in scratch register.
- __ push(src0); // Then use stack to copy source to destination.
- __ pop(dst0);
- __ push(src1);
- __ pop(dst1);
- __ movsd(src0, kScratchDoubleReg);
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kFloat64) {
+ Operand src1 = g.HighOperand(source);
+ Operand dst1 = g.HighOperand(destination);
+ __ movsd(kScratchDoubleReg, dst0); // Save dst in scratch register.
+ __ push(src0); // Then use stack to copy src to destination.
+ __ pop(dst0);
+ __ push(src1);
+ __ pop(dst1);
+ __ movsd(src0, kScratchDoubleReg);
+ } else if (rep == MachineRepresentation::kFloat32) {
+ __ movss(kScratchDoubleReg, dst0); // Save dst in scratch register.
+ __ push(src0); // Then use stack to copy src to destination.
+ __ pop(dst0);
+ __ movss(src0, kScratchDoubleReg);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ // Use the XOR trick to swap without a temporary.
+ __ movups(kScratchDoubleReg, src0);
+ __ xorps(kScratchDoubleReg, dst0); // scratch contains src ^ dst.
+ __ movups(src0, kScratchDoubleReg);
+ __ xorps(kScratchDoubleReg, dst0); // scratch contains src.
+ __ movups(dst0, kScratchDoubleReg);
+ __ xorps(kScratchDoubleReg, src0); // scratch contains dst.
+ __ movups(src0, kScratchDoubleReg);
+ }
} else {
// No other combinations are possible.
UNREACHABLE();
@@ -1881,9 +2255,6 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
}
-void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
-
-
void CodeGenerator::EnsureSpaceForLazyDeopt() {
if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
return;
diff --git a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
index 3cf2094bdd..7cf0a11045 100644
--- a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
@@ -48,8 +48,6 @@ namespace compiler {
V(SSEFloat32Sub) \
V(SSEFloat32Mul) \
V(SSEFloat32Div) \
- V(SSEFloat32Max) \
- V(SSEFloat32Min) \
V(SSEFloat32Abs) \
V(SSEFloat32Neg) \
V(SSEFloat32Sqrt) \
@@ -60,7 +58,9 @@ namespace compiler {
V(SSEFloat64Mul) \
V(SSEFloat64Div) \
V(SSEFloat64Mod) \
+ V(SSEFloat32Max) \
V(SSEFloat64Max) \
+ V(SSEFloat32Min) \
V(SSEFloat64Min) \
V(SSEFloat64Abs) \
V(SSEFloat64Neg) \
@@ -81,18 +81,15 @@ namespace compiler {
V(SSEFloat64InsertLowWord32) \
V(SSEFloat64InsertHighWord32) \
V(SSEFloat64LoadLowWord32) \
+ V(SSEFloat64SilenceNaN) \
V(AVXFloat32Add) \
V(AVXFloat32Sub) \
V(AVXFloat32Mul) \
V(AVXFloat32Div) \
- V(AVXFloat32Max) \
- V(AVXFloat32Min) \
V(AVXFloat64Add) \
V(AVXFloat64Sub) \
V(AVXFloat64Mul) \
V(AVXFloat64Div) \
- V(AVXFloat64Max) \
- V(AVXFloat64Min) \
V(AVXFloat64Abs) \
V(AVXFloat64Neg) \
V(AVXFloat32Abs) \
@@ -113,7 +110,10 @@ namespace compiler {
V(IA32PushFloat32) \
V(IA32PushFloat64) \
V(IA32Poke) \
- V(IA32StackCheck)
+ V(IA32StackCheck) \
+ V(IA32Xchgb) \
+ V(IA32Xchgw) \
+ V(IA32Xchgl)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
index 803fdf6fd6..1c62de5792 100644
--- a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -51,8 +51,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kSSEFloat32Sub:
case kSSEFloat32Mul:
case kSSEFloat32Div:
- case kSSEFloat32Max:
- case kSSEFloat32Min:
case kSSEFloat32Abs:
case kSSEFloat32Neg:
case kSSEFloat32Sqrt:
@@ -63,7 +61,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kSSEFloat64Mul:
case kSSEFloat64Div:
case kSSEFloat64Mod:
+ case kSSEFloat32Max:
case kSSEFloat64Max:
+ case kSSEFloat32Min:
case kSSEFloat64Min:
case kSSEFloat64Abs:
case kSSEFloat64Neg:
@@ -84,18 +84,15 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kSSEFloat64InsertLowWord32:
case kSSEFloat64InsertHighWord32:
case kSSEFloat64LoadLowWord32:
+ case kSSEFloat64SilenceNaN:
case kAVXFloat32Add:
case kAVXFloat32Sub:
case kAVXFloat32Mul:
case kAVXFloat32Div:
- case kAVXFloat32Max:
- case kAVXFloat32Min:
case kAVXFloat64Add:
case kAVXFloat64Sub:
case kAVXFloat64Mul:
case kAVXFloat64Div:
- case kAVXFloat64Max:
- case kAVXFloat64Min:
case kAVXFloat64Abs:
case kAVXFloat64Neg:
case kAVXFloat32Abs:
@@ -127,6 +124,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Poke:
return kHasSideEffect;
+ case kIA32Xchgb:
+ case kIA32Xchgw:
+ case kIA32Xchgl:
+ return kIsLoadOperation | kHasSideEffect;
+
#define CASE(Name) case k##Name:
COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index 3eae18edcb..4a1e19bddd 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -60,13 +60,20 @@ class IA32OperandGenerator final : public OperandGenerator {
case IrOpcode::kInt32Constant:
case IrOpcode::kNumberConstant:
case IrOpcode::kExternalConstant:
+ case IrOpcode::kRelocatableInt32Constant:
+ case IrOpcode::kRelocatableInt64Constant:
return true;
case IrOpcode::kHeapConstant: {
+// TODO(bmeurer): We must not dereference handles concurrently. If we
+// really have to this here, then we need to find a way to put this
+// information on the HeapConstant node already.
+#if 0
// Constants in new space cannot be used as immediates in V8 because
// the GC does not scan code objects when collecting the new generation.
Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
Isolate* isolate = value->GetIsolate();
return !isolate->heap()->InNewSpace(*value);
+#endif
}
default:
return false;
@@ -75,12 +82,16 @@ class IA32OperandGenerator final : public OperandGenerator {
AddressingMode GenerateMemoryOperandInputs(Node* index, int scale, Node* base,
Node* displacement_node,
+ DisplacementMode displacement_mode,
InstructionOperand inputs[],
size_t* input_count) {
AddressingMode mode = kMode_MRI;
int32_t displacement = (displacement_node == nullptr)
? 0
: OpParameter<int32_t>(displacement_node);
+ if (displacement_mode == kNegativeDisplacement) {
+ displacement = -displacement;
+ }
if (base != nullptr) {
if (base->opcode() == IrOpcode::kInt32Constant) {
displacement += OpParameter<int32_t>(base);
@@ -135,11 +146,12 @@ class IA32OperandGenerator final : public OperandGenerator {
AddressingMode GetEffectiveAddressMemoryOperand(Node* node,
InstructionOperand inputs[],
size_t* input_count) {
- BaseWithIndexAndDisplacement32Matcher m(node, true);
+ BaseWithIndexAndDisplacement32Matcher m(node, AddressOption::kAllowAll);
DCHECK(m.matches());
if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
- return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
- m.displacement(), inputs, input_count);
+ return GenerateMemoryOperandInputs(
+ m.index(), m.scale(), m.base(), m.displacement(),
+ m.displacement_mode(), inputs, input_count);
} else {
inputs[(*input_count)++] = UseRegister(node->InputAt(0));
inputs[(*input_count)++] = UseRegister(node->InputAt(1));
@@ -214,7 +226,9 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord16:
opcode = load_rep.IsSigned() ? kIA32Movsxwl : kIA32Movzxwl;
break;
- case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
opcode = kIA32Movl;
break;
@@ -298,7 +312,9 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord16:
opcode = kIA32Movw;
break;
- case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
opcode = kIA32Movl;
break;
@@ -331,6 +347,11 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
+// Architecture supports unaligned access, therefore VisitLoad is used instead
+void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
+
+// Architecture supports unaligned access, therefore VisitStore is used instead
+void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitCheckedLoad(Node* node) {
CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
@@ -355,10 +376,12 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -402,10 +425,12 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -486,7 +511,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->frame_state());
+ cont->reason(), cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -570,12 +595,14 @@ void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
}
void EmitLea(InstructionSelector* selector, Node* result, Node* index,
- int scale, Node* base, Node* displacement) {
+ int scale, Node* base, Node* displacement,
+ DisplacementMode displacement_mode) {
IA32OperandGenerator g(selector);
InstructionOperand inputs[4];
size_t input_count = 0;
- AddressingMode mode = g.GenerateMemoryOperandInputs(
- index, scale, base, displacement, inputs, &input_count);
+ AddressingMode mode =
+ g.GenerateMemoryOperandInputs(index, scale, base, displacement,
+ displacement_mode, inputs, &input_count);
DCHECK_NE(0u, input_count);
DCHECK_GE(arraysize(inputs), input_count);
@@ -596,7 +623,7 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
if (m.matches()) {
Node* index = node->InputAt(0);
Node* base = m.power_of_two_plus_one() ? index : nullptr;
- EmitLea(this, node, index, m.scale(), base, nullptr);
+ EmitLea(this, node, index, m.scale(), base, nullptr, kPositiveDisplacement);
return;
}
VisitShift(this, node, kIA32Shl);
@@ -719,6 +746,9 @@ void InstructionSelector::VisitWord32Ctz(Node* node) {
void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord32Popcnt(Node* node) {
IA32OperandGenerator g(this);
@@ -736,7 +766,8 @@ void InstructionSelector::VisitInt32Add(Node* node) {
InstructionOperand inputs[4];
size_t input_count = 0;
AddressingMode mode = g.GenerateMemoryOperandInputs(
- m.index(), m.scale(), m.base(), m.displacement(), inputs, &input_count);
+ m.index(), m.scale(), m.base(), m.displacement(), m.displacement_mode(),
+ inputs, &input_count);
DCHECK_NE(0u, input_count);
DCHECK_GE(arraysize(inputs), input_count);
@@ -770,7 +801,7 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
if (m.matches()) {
Node* index = node->InputAt(0);
Node* base = m.power_of_two_plus_one() ? index : nullptr;
- EmitLea(this, node, index, m.scale(), base, nullptr);
+ EmitLea(this, node, index, m.scale(), base, nullptr, kPositiveDisplacement);
return;
}
IA32OperandGenerator g(this);
@@ -874,15 +905,12 @@ void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
VisitRO(this, node, kSSEFloat64ToFloat32);
}
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+ VisitRR(this, node, kArchTruncateDoubleToI);
+}
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
- switch (TruncationModeOf(node->op())) {
- case TruncationMode::kJavaScript:
- return VisitRR(this, node, kArchTruncateDoubleToI);
- case TruncationMode::kRoundToZero:
- return VisitRO(this, node, kSSEFloat64ToInt32);
- }
- UNREACHABLE();
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+ VisitRO(this, node, kSSEFloat64ToInt32);
}
@@ -909,41 +937,13 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
void InstructionSelector::VisitFloat32Sub(Node* node) {
- IA32OperandGenerator g(this);
- Float32BinopMatcher m(node);
- if (m.left().IsMinusZero()) {
- VisitFloatUnop(this, node, m.right().node(), kAVXFloat32Neg,
- kSSEFloat32Neg);
- return;
- }
VisitRROFloat(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
}
-
void InstructionSelector::VisitFloat64Sub(Node* node) {
- IA32OperandGenerator g(this);
- Float64BinopMatcher m(node);
- if (m.left().IsMinusZero()) {
- if (m.right().IsFloat64RoundDown() &&
- CanCover(m.node(), m.right().node())) {
- if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
- CanCover(m.right().node(), m.right().InputAt(0))) {
- Float64BinopMatcher mright0(m.right().InputAt(0));
- if (mright0.left().IsMinusZero()) {
- Emit(kSSEFloat64Round | MiscField::encode(kRoundUp),
- g.DefineAsRegister(node), g.UseRegister(mright0.right().node()));
- return;
- }
- }
- }
- VisitFloatUnop(this, node, m.right().node(), kAVXFloat64Neg,
- kSSEFloat64Neg);
- return;
- }
VisitRROFloat(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
}
-
void InstructionSelector::VisitFloat32Mul(Node* node) {
VisitRROFloat(this, node, kAVXFloat32Mul, kSSEFloat32Mul);
}
@@ -972,24 +972,36 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
temps);
}
-
void InstructionSelector::VisitFloat32Max(Node* node) {
- VisitRROFloat(this, node, kAVXFloat32Max, kSSEFloat32Max);
+ IA32OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempRegister()};
+ Emit(kSSEFloat32Max, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
+ arraysize(temps), temps);
}
-
void InstructionSelector::VisitFloat64Max(Node* node) {
- VisitRROFloat(this, node, kAVXFloat64Max, kSSEFloat64Max);
+ IA32OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempRegister()};
+ Emit(kSSEFloat64Max, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
+ arraysize(temps), temps);
}
-
void InstructionSelector::VisitFloat32Min(Node* node) {
- VisitRROFloat(this, node, kAVXFloat32Min, kSSEFloat32Min);
+ IA32OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempRegister()};
+ Emit(kSSEFloat32Min, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
+ arraysize(temps), temps);
}
-
void InstructionSelector::VisitFloat64Min(Node* node) {
- VisitRROFloat(this, node, kAVXFloat64Min, kSSEFloat64Min);
+ IA32OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempRegister()};
+ Emit(kSSEFloat64Min, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
+ arraysize(temps), temps);
}
@@ -1004,7 +1016,6 @@ void InstructionSelector::VisitFloat64Abs(Node* node) {
VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
}
-
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitRO(this, node, kSSEFloat32Sqrt);
}
@@ -1059,6 +1070,28 @@ void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToNearest));
}
+void InstructionSelector::VisitFloat32Neg(Node* node) {
+ VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Neg, kSSEFloat32Neg);
+}
+
+void InstructionSelector::VisitFloat64Neg(Node* node) {
+ VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Neg, kSSEFloat64Neg);
+}
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+ InstructionCode opcode) {
+ IA32OperandGenerator g(this);
+ Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+ InstructionCode opcode) {
+ IA32OperandGenerator g(this);
+ Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)))
+ ->MarkAsCall();
+}
void InstructionSelector::EmitPrepareArguments(
ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
@@ -1070,7 +1103,7 @@ void InstructionSelector::EmitPrepareArguments(
InstructionOperand temps[] = {g.TempRegister()};
size_t const temp_count = arraysize(temps);
Emit(kArchPrepareCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
0, nullptr, 0, nullptr, temp_count, temps);
// Poke any stack arguments.
@@ -1093,7 +1126,7 @@ void InstructionSelector::EmitPrepareArguments(
g.CanBeImmediate(input.node())
? g.UseImmediate(input.node())
: IsSupported(ATOM) ||
- sequence()->IsFloat(GetVirtualRegister(input.node()))
+ sequence()->IsFP(GetVirtualRegister(input.node()))
? g.UseRegister(input.node())
: g.Use(input.node());
if (input.type() == MachineType::Float32()) {
@@ -1134,7 +1167,7 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
selector->Emit(opcode, 0, nullptr, input_count, inputs);
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
- cont->frame_state());
+ cont->reason(), cont->frame_state());
} else {
DCHECK(cont->IsSet());
InstructionOperand output = g.DefineAsRegister(cont->result());
@@ -1152,7 +1185,7 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+ selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
} else {
DCHECK(cont->IsSet());
@@ -1175,10 +1208,7 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
// Tries to match the size of the given opcode to that of the operands, if
// possible.
InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
- Node* right) {
- if (opcode != kIA32Cmp && opcode != kIA32Test) {
- return opcode;
- }
+ Node* right, FlagsContinuation* cont) {
// Currently, if one of the two operands is not a Load, we don't know what its
// machine representation is, so we bail out.
// TODO(epertoso): we can probably get some size information out of immediates
@@ -1188,19 +1218,39 @@ InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
}
// If the load representations don't match, both operands will be
// zero/sign-extended to 32bit.
- LoadRepresentation left_representation = LoadRepresentationOf(left->op());
- if (left_representation != LoadRepresentationOf(right->op())) {
- return opcode;
- }
- switch (left_representation.representation()) {
- case MachineRepresentation::kBit:
- case MachineRepresentation::kWord8:
- return opcode == kIA32Cmp ? kIA32Cmp8 : kIA32Test8;
- case MachineRepresentation::kWord16:
- return opcode == kIA32Cmp ? kIA32Cmp16 : kIA32Test16;
- default:
- return opcode;
+ MachineType left_type = LoadRepresentationOf(left->op());
+ MachineType right_type = LoadRepresentationOf(right->op());
+ if (left_type == right_type) {
+ switch (left_type.representation()) {
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kWord8: {
+ if (opcode == kIA32Test) return kIA32Test8;
+ if (opcode == kIA32Cmp) {
+ if (left_type.semantic() == MachineSemantic::kUint32) {
+ cont->OverwriteUnsignedIfSigned();
+ } else {
+ CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
+ }
+ return kIA32Cmp8;
+ }
+ break;
+ }
+ case MachineRepresentation::kWord16:
+ if (opcode == kIA32Test) return kIA32Test16;
+ if (opcode == kIA32Cmp) {
+ if (left_type.semantic() == MachineSemantic::kUint32) {
+ cont->OverwriteUnsignedIfSigned();
+ } else {
+ CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
+ }
+ return kIA32Cmp16;
+ }
+ break;
+ default:
+ break;
+ }
}
+ return opcode;
}
// Shared routine for multiple float32 compare operations (inputs commuted).
@@ -1227,7 +1277,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
- InstructionCode narrowed_opcode = TryNarrowOpcodeSize(opcode, left, right);
+ InstructionCode narrowed_opcode =
+ TryNarrowOpcodeSize(opcode, left, right, cont);
int effect_level = selector->GetEffectLevel(node);
if (cont->IsBranch()) {
@@ -1291,7 +1342,7 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr,
+ selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->reason(),
cont->frame_state());
} else {
DCHECK(cont->IsSet());
@@ -1372,6 +1423,9 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop(selector, node, kIA32Sub, cont);
+ case IrOpcode::kInt32MulWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(selector, node, kIA32Imul, cont);
default:
break;
}
@@ -1402,14 +1456,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -1496,6 +1550,14 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
VisitBinop(this, node, kIA32Sub, &cont);
}
+void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kIA32Imul, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kIA32Imul, &cont);
+}
void InstructionSelector::VisitFloat32Equal(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
@@ -1573,15 +1635,63 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(left), g.Use(right));
}
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kSSEFloat64SilenceNaN, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitAtomicLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
+ load_rep.representation() == MachineRepresentation::kWord16 ||
+ load_rep.representation() == MachineRepresentation::kWord32);
+ USE(load_rep);
+ VisitLoad(node);
+}
+
+void InstructionSelector::VisitAtomicStore(Node* node) {
+ IA32OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kIA32Xchgb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kIA32Xchgw;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kIA32Xchgl;
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ if (g.CanBeImmediate(index)) {
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MR1;
+ }
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 0, nullptr, input_count, inputs);
+}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::Flags flags =
- MachineOperatorBuilder::kFloat32Max |
- MachineOperatorBuilder::kFloat32Min |
- MachineOperatorBuilder::kFloat64Max |
- MachineOperatorBuilder::kFloat64Min |
MachineOperatorBuilder::kWord32ShiftIsSafe |
MachineOperatorBuilder::kWord32Ctz;
if (CpuFeatures::IsSupported(POPCNT)) {
@@ -1600,6 +1710,13 @@ InstructionSelector::SupportedMachineOperatorFlags() {
return flags;
}
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+ return MachineOperatorBuilder::AlignmentRequirements::
+ FullUnalignedAccessSupport();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/instruction-codes.h b/deps/v8/src/compiler/instruction-codes.h
index b005083a85..c6689d8e18 100644
--- a/deps/v8/src/compiler/instruction-codes.h
+++ b/deps/v8/src/compiler/instruction-codes.h
@@ -48,6 +48,7 @@ enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
V(ArchCallJSFunction) \
V(ArchTailCallJSFunctionFromJSFunction) \
V(ArchTailCallJSFunction) \
+ V(ArchTailCallAddress) \
V(ArchPrepareCallCFunction) \
V(ArchCallCFunction) \
V(ArchPrepareTailCall) \
@@ -55,6 +56,9 @@ enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
V(ArchLookupSwitch) \
V(ArchTableSwitch) \
V(ArchNop) \
+ V(ArchDebugBreak) \
+ V(ArchImpossible) \
+ V(ArchComment) \
V(ArchThrowTerminator) \
V(ArchDeoptimize) \
V(ArchRet) \
@@ -77,7 +81,36 @@ enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
V(CheckedStoreWord64) \
V(CheckedStoreFloat32) \
V(CheckedStoreFloat64) \
- V(ArchStackSlot)
+ V(ArchStackSlot) \
+ V(AtomicLoadInt8) \
+ V(AtomicLoadUint8) \
+ V(AtomicLoadInt16) \
+ V(AtomicLoadUint16) \
+ V(AtomicLoadWord32) \
+ V(AtomicStoreWord8) \
+ V(AtomicStoreWord16) \
+ V(AtomicStoreWord32) \
+ V(Ieee754Float64Acos) \
+ V(Ieee754Float64Acosh) \
+ V(Ieee754Float64Asin) \
+ V(Ieee754Float64Asinh) \
+ V(Ieee754Float64Atan) \
+ V(Ieee754Float64Atanh) \
+ V(Ieee754Float64Atan2) \
+ V(Ieee754Float64Cbrt) \
+ V(Ieee754Float64Cos) \
+ V(Ieee754Float64Cosh) \
+ V(Ieee754Float64Exp) \
+ V(Ieee754Float64Expm1) \
+ V(Ieee754Float64Log) \
+ V(Ieee754Float64Log1p) \
+ V(Ieee754Float64Log10) \
+ V(Ieee754Float64Log2) \
+ V(Ieee754Float64Pow) \
+ V(Ieee754Float64Sin) \
+ V(Ieee754Float64Sinh) \
+ V(Ieee754Float64Tan) \
+ V(Ieee754Float64Tanh)
#define ARCH_OPCODE_LIST(V) \
COMMON_ARCH_OPCODE_LIST(V) \
@@ -146,7 +179,9 @@ enum FlagsCondition {
kUnorderedEqual,
kUnorderedNotEqual,
kOverflow,
- kNotOverflow
+ kNotOverflow,
+ kPositiveOrZero,
+ kNegative
};
inline FlagsCondition NegateFlagsCondition(FlagsCondition condition) {
diff --git a/deps/v8/src/compiler/instruction-scheduler.cc b/deps/v8/src/compiler/instruction-scheduler.cc
index b612cd1e9e..2e10794d69 100644
--- a/deps/v8/src/compiler/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/instruction-scheduler.cc
@@ -82,8 +82,9 @@ InstructionScheduler::InstructionScheduler(Zone* zone,
graph_(zone),
last_side_effect_instr_(nullptr),
pending_loads_(zone),
- last_live_in_reg_marker_(nullptr) {
-}
+ last_live_in_reg_marker_(nullptr),
+ last_deopt_(nullptr),
+ operands_map_(zone) {}
void InstructionScheduler::StartBlock(RpoNumber rpo) {
@@ -91,6 +92,8 @@ void InstructionScheduler::StartBlock(RpoNumber rpo) {
DCHECK(last_side_effect_instr_ == nullptr);
DCHECK(pending_loads_.empty());
DCHECK(last_live_in_reg_marker_ == nullptr);
+ DCHECK(last_deopt_ == nullptr);
+ DCHECK(operands_map_.empty());
sequence()->StartBlock(rpo);
}
@@ -106,6 +109,8 @@ void InstructionScheduler::EndBlock(RpoNumber rpo) {
last_side_effect_instr_ = nullptr;
pending_loads_.clear();
last_live_in_reg_marker_ = nullptr;
+ last_deopt_ = nullptr;
+ operands_map_.clear();
}
@@ -128,6 +133,12 @@ void InstructionScheduler::AddInstruction(Instruction* instr) {
last_live_in_reg_marker_->AddSuccessor(new_node);
}
+ // Make sure that new instructions are not scheduled before the last
+ // deoptimization point.
+ if (last_deopt_ != nullptr) {
+ last_deopt_->AddSuccessor(new_node);
+ }
+
// Instructions with side effects and memory operations can't be
// reordered with respect to each other.
if (HasSideEffect(instr)) {
@@ -146,12 +157,36 @@ void InstructionScheduler::AddInstruction(Instruction* instr) {
last_side_effect_instr_->AddSuccessor(new_node);
}
pending_loads_.push_back(new_node);
+ } else if (instr->IsDeoptimizeCall()) {
+ // Ensure that deopts are not reordered with respect to side-effect
+ // instructions.
+ if (last_side_effect_instr_ != nullptr) {
+ last_side_effect_instr_->AddSuccessor(new_node);
+ }
+ last_deopt_ = new_node;
}
// Look for operand dependencies.
- for (ScheduleGraphNode* node : graph_) {
- if (HasOperandDependency(node->instruction(), instr)) {
- node->AddSuccessor(new_node);
+ for (size_t i = 0; i < instr->InputCount(); ++i) {
+ const InstructionOperand* input = instr->InputAt(i);
+ if (input->IsUnallocated()) {
+ int32_t vreg = UnallocatedOperand::cast(input)->virtual_register();
+ auto it = operands_map_.find(vreg);
+ if (it != operands_map_.end()) {
+ it->second->AddSuccessor(new_node);
+ }
+ }
+ }
+
+ // Record the virtual registers defined by this instruction.
+ for (size_t i = 0; i < instr->OutputCount(); ++i) {
+ const InstructionOperand* output = instr->OutputAt(i);
+ if (output->IsUnallocated()) {
+ operands_map_[UnallocatedOperand::cast(output)->virtual_register()] =
+ new_node;
+ } else if (output->IsConstant()) {
+ operands_map_[ConstantOperand::cast(output)->virtual_register()] =
+ new_node;
}
}
}
@@ -206,6 +241,30 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kArchParentFramePointer:
case kArchTruncateDoubleToI:
case kArchStackSlot:
+ case kArchDebugBreak:
+ case kArchImpossible:
+ case kArchComment:
+ case kIeee754Float64Acos:
+ case kIeee754Float64Acosh:
+ case kIeee754Float64Asin:
+ case kIeee754Float64Asinh:
+ case kIeee754Float64Atan:
+ case kIeee754Float64Atanh:
+ case kIeee754Float64Atan2:
+ case kIeee754Float64Cbrt:
+ case kIeee754Float64Cos:
+ case kIeee754Float64Cosh:
+ case kIeee754Float64Exp:
+ case kIeee754Float64Expm1:
+ case kIeee754Float64Log:
+ case kIeee754Float64Log1p:
+ case kIeee754Float64Log10:
+ case kIeee754Float64Log2:
+ case kIeee754Float64Pow:
+ case kIeee754Float64Sin:
+ case kIeee754Float64Sinh:
+ case kIeee754Float64Tan:
+ case kIeee754Float64Tanh:
return kNoOpcodeFlags;
case kArchStackPointer:
@@ -224,6 +283,7 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kArchTailCallCodeObject:
case kArchTailCallJSFunctionFromJSFunction:
case kArchTailCallJSFunction:
+ case kArchTailCallAddress:
return kHasSideEffect | kIsBlockTerminator;
case kArchDeoptimize:
@@ -253,6 +313,18 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kArchStoreWithWriteBarrier:
return kHasSideEffect;
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
+ return kIsLoadOperation;
+
+ case kAtomicStoreWord8:
+ case kAtomicStoreWord16:
+ case kAtomicStoreWord32:
+ return kHasSideEffect;
+
#define CASE(Name) case k##Name:
TARGET_ARCH_OPCODE_LIST(CASE)
#undef CASE
@@ -264,33 +336,6 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
}
-bool InstructionScheduler::HasOperandDependency(
- const Instruction* instr1, const Instruction* instr2) const {
- for (size_t i = 0; i < instr1->OutputCount(); ++i) {
- for (size_t j = 0; j < instr2->InputCount(); ++j) {
- const InstructionOperand* output = instr1->OutputAt(i);
- const InstructionOperand* input = instr2->InputAt(j);
-
- if (output->IsUnallocated() && input->IsUnallocated() &&
- (UnallocatedOperand::cast(output)->virtual_register() ==
- UnallocatedOperand::cast(input)->virtual_register())) {
- return true;
- }
-
- if (output->IsConstant() && input->IsUnallocated() &&
- (ConstantOperand::cast(output)->virtual_register() ==
- UnallocatedOperand::cast(input)->virtual_register())) {
- return true;
- }
- }
- }
-
- // TODO(bafsa): Do we need to look for anti-dependencies/output-dependencies?
-
- return false;
-}
-
-
bool InstructionScheduler::IsBlockTerminator(const Instruction* instr) const {
return ((GetInstructionFlags(instr) & kIsBlockTerminator) ||
(instr->flags_mode() == kFlags_branch));
diff --git a/deps/v8/src/compiler/instruction-scheduler.h b/deps/v8/src/compiler/instruction-scheduler.h
index 104c0b97de..271aa0d0d7 100644
--- a/deps/v8/src/compiler/instruction-scheduler.h
+++ b/deps/v8/src/compiler/instruction-scheduler.h
@@ -156,10 +156,6 @@ class InstructionScheduler final : public ZoneObject {
int GetInstructionFlags(const Instruction* instr) const;
int GetTargetInstructionFlags(const Instruction* instr) const;
- // Return true if instr2 uses any value defined by instr1.
- bool HasOperandDependency(const Instruction* instr1,
- const Instruction* instr2) const;
-
// Return true if the instruction is a basic block terminator.
bool IsBlockTerminator(const Instruction* instr) const;
@@ -177,10 +173,12 @@ class InstructionScheduler final : public ZoneObject {
// Identify nops used as a definition point for live-in registers at
// function entry.
bool IsFixedRegisterParameter(const Instruction* instr) const {
- return (instr->arch_opcode() == kArchNop) &&
- (instr->OutputCount() == 1) &&
- (instr->OutputAt(0)->IsUnallocated()) &&
- UnallocatedOperand::cast(instr->OutputAt(0))->HasFixedRegisterPolicy();
+ return (instr->arch_opcode() == kArchNop) && (instr->OutputCount() == 1) &&
+ (instr->OutputAt(0)->IsUnallocated()) &&
+ (UnallocatedOperand::cast(instr->OutputAt(0))
+ ->HasFixedRegisterPolicy() ||
+ UnallocatedOperand::cast(instr->OutputAt(0))
+ ->HasFixedFPRegisterPolicy());
}
void ComputeTotalLatencies();
@@ -209,6 +207,13 @@ class InstructionScheduler final : public ZoneObject {
// All these nops are chained together and added as a predecessor of every
// other instructions in the basic block.
ScheduleGraphNode* last_live_in_reg_marker_;
+
+ // Last deoptimization instruction encountered while building the graph.
+ ScheduleGraphNode* last_deopt_;
+
+ // Keep track of definition points for virtual registers. This is used to
+ // record operand dependencies in the scheduling graph.
+ ZoneMap<int32_t, ScheduleGraphNode*> operands_map_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/instruction-selector-impl.h b/deps/v8/src/compiler/instruction-selector-impl.h
index e750aed19a..25d8a99e86 100644
--- a/deps/v8/src/compiler/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/instruction-selector-impl.h
@@ -54,22 +54,26 @@ class OperandGenerator {
reg.code(), GetVReg(node)));
}
- InstructionOperand DefineAsFixed(Node* node, DoubleRegister reg) {
+ template <typename FPRegType>
+ InstructionOperand DefineAsFixed(Node* node, FPRegType reg) {
return Define(node,
- UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
+ UnallocatedOperand(UnallocatedOperand::FIXED_FP_REGISTER,
reg.code(), GetVReg(node)));
}
InstructionOperand DefineAsConstant(Node* node) {
+ return DefineAsConstant(node, ToConstant(node));
+ }
+
+ InstructionOperand DefineAsConstant(Node* node, Constant constant) {
selector()->MarkAsDefined(node);
int virtual_register = GetVReg(node);
- sequence()->AddConstant(virtual_register, ToConstant(node));
+ sequence()->AddConstant(virtual_register, constant);
return ConstantOperand(virtual_register);
}
- InstructionOperand DefineAsLocation(Node* node, LinkageLocation location,
- MachineRepresentation rep) {
- return Define(node, ToUnallocatedOperand(location, rep, GetVReg(node)));
+ InstructionOperand DefineAsLocation(Node* node, LinkageLocation location) {
+ return Define(node, ToUnallocatedOperand(location, GetVReg(node)));
}
InstructionOperand DefineAsDualLocation(Node* node,
@@ -122,10 +126,10 @@ class OperandGenerator {
reg.code(), GetVReg(node)));
}
- InstructionOperand UseFixed(Node* node, DoubleRegister reg) {
- return Use(node,
- UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
- reg.code(), GetVReg(node)));
+ template <typename FPRegType>
+ InstructionOperand UseFixed(Node* node, FPRegType reg) {
+ return Use(node, UnallocatedOperand(UnallocatedOperand::FIXED_FP_REGISTER,
+ reg.code(), GetVReg(node)));
}
InstructionOperand UseExplicit(LinkageLocation location) {
@@ -139,24 +143,30 @@ class OperandGenerator {
}
}
+ InstructionOperand UseImmediate(int immediate) {
+ return sequence()->AddImmediate(Constant(immediate));
+ }
+
InstructionOperand UseImmediate(Node* node) {
return sequence()->AddImmediate(ToConstant(node));
}
- InstructionOperand UseLocation(Node* node, LinkageLocation location,
- MachineRepresentation rep) {
- return Use(node, ToUnallocatedOperand(location, rep, GetVReg(node)));
+ InstructionOperand UseNegatedImmediate(Node* node) {
+ return sequence()->AddImmediate(ToNegatedConstant(node));
+ }
+
+ InstructionOperand UseLocation(Node* node, LinkageLocation location) {
+ return Use(node, ToUnallocatedOperand(location, GetVReg(node)));
}
// Used to force gap moves from the from_location to the to_location
// immediately before an instruction.
InstructionOperand UsePointerLocation(LinkageLocation to_location,
LinkageLocation from_location) {
- MachineRepresentation rep = MachineType::PointerRepresentation();
UnallocatedOperand casted_from_operand =
- UnallocatedOperand::cast(TempLocation(from_location, rep));
+ UnallocatedOperand::cast(TempLocation(from_location));
selector_->Emit(kArchNop, casted_from_operand);
- return ToUnallocatedOperand(to_location, rep,
+ return ToUnallocatedOperand(to_location,
casted_from_operand.virtual_register());
}
@@ -184,10 +194,8 @@ class OperandGenerator {
return sequence()->AddImmediate(Constant(imm));
}
- InstructionOperand TempLocation(LinkageLocation location,
- MachineRepresentation rep) {
- return ToUnallocatedOperand(location, rep,
- sequence()->NextVirtualRegister());
+ InstructionOperand TempLocation(LinkageLocation location) {
+ return ToUnallocatedOperand(location, sequence()->NextVirtualRegister());
}
InstructionOperand Label(BasicBlock* block) {
@@ -211,10 +219,14 @@ class OperandGenerator {
return Constant(OpParameter<int64_t>(node));
case IrOpcode::kFloat32Constant:
return Constant(OpParameter<float>(node));
+ case IrOpcode::kRelocatableInt32Constant:
+ case IrOpcode::kRelocatableInt64Constant:
+ return Constant(OpParameter<RelocatablePtrConstantInfo>(node));
case IrOpcode::kFloat64Constant:
case IrOpcode::kNumberConstant:
return Constant(OpParameter<double>(node));
case IrOpcode::kExternalConstant:
+ case IrOpcode::kComment:
return Constant(OpParameter<ExternalReference>(node));
case IrOpcode::kHeapConstant:
return Constant(OpParameter<Handle<HeapObject>>(node));
@@ -225,6 +237,19 @@ class OperandGenerator {
return Constant(static_cast<int32_t>(0));
}
+ static Constant ToNegatedConstant(const Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32Constant:
+ return Constant(-OpParameter<int32_t>(node));
+ case IrOpcode::kInt64Constant:
+ return Constant(-OpParameter<int64_t>(node));
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return Constant(static_cast<int32_t>(0));
+ }
+
UnallocatedOperand Define(Node* node, UnallocatedOperand operand) {
DCHECK_NOT_NULL(node);
DCHECK_EQ(operand.virtual_register(), GetVReg(node));
@@ -252,7 +277,6 @@ class OperandGenerator {
}
UnallocatedOperand ToUnallocatedOperand(LinkageLocation location,
- MachineRepresentation rep,
int virtual_register) {
if (location.IsAnyRegister()) {
// any machine register.
@@ -270,8 +294,8 @@ class OperandGenerator {
location.AsCalleeFrameSlot(), virtual_register);
}
// a fixed register.
- if (IsFloatingPoint(rep)) {
- return UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
+ if (IsFloatingPoint(location.GetType().representation())) {
+ return UnallocatedOperand(UnallocatedOperand::FIXED_FP_REGISTER,
location.AsRegister(), virtual_register);
}
return UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
@@ -305,13 +329,14 @@ class FlagsContinuation final {
// Creates a new flags continuation for an eager deoptimization exit.
static FlagsContinuation ForDeoptimize(FlagsCondition condition,
+ DeoptimizeReason reason,
Node* frame_state) {
- return FlagsContinuation(kFlags_deoptimize, condition, frame_state);
+ return FlagsContinuation(condition, reason, frame_state);
}
// Creates a new flags continuation for a boolean value.
static FlagsContinuation ForSet(FlagsCondition condition, Node* result) {
- return FlagsContinuation(kFlags_set, condition, result);
+ return FlagsContinuation(condition, result);
}
bool IsNone() const { return mode_ == kFlags_none; }
@@ -322,6 +347,10 @@ class FlagsContinuation final {
DCHECK(!IsNone());
return condition_;
}
+ DeoptimizeReason reason() const {
+ DCHECK(IsDeoptimize());
+ return reason_;
+ }
Node* frame_state() const {
DCHECK(IsDeoptimize());
return frame_state_or_result_;
@@ -349,12 +378,33 @@ class FlagsContinuation final {
condition_ = CommuteFlagsCondition(condition_);
}
+ void Overwrite(FlagsCondition condition) { condition_ = condition; }
+
void OverwriteAndNegateIfEqual(FlagsCondition condition) {
bool negate = condition_ == kEqual;
condition_ = condition;
if (negate) Negate();
}
+ void OverwriteUnsignedIfSigned() {
+ switch (condition_) {
+ case kSignedLessThan:
+ condition_ = kUnsignedLessThan;
+ break;
+ case kSignedLessThanOrEqual:
+ condition_ = kUnsignedLessThanOrEqual;
+ break;
+ case kSignedGreaterThan:
+ condition_ = kUnsignedGreaterThan;
+ break;
+ case kSignedGreaterThanOrEqual:
+ condition_ = kUnsignedGreaterThanOrEqual;
+ break;
+ default:
+ break;
+ }
+ }
+
// Encodes this flags continuation into the given opcode.
InstructionCode Encode(InstructionCode opcode) {
opcode |= FlagsModeField::encode(mode_);
@@ -365,16 +415,24 @@ class FlagsContinuation final {
}
private:
- FlagsContinuation(FlagsMode mode, FlagsCondition condition,
- Node* frame_state_or_result)
- : mode_(mode),
+ FlagsContinuation(FlagsCondition condition, DeoptimizeReason reason,
+ Node* frame_state)
+ : mode_(kFlags_deoptimize),
+ condition_(condition),
+ reason_(reason),
+ frame_state_or_result_(frame_state) {
+ DCHECK_NOT_NULL(frame_state);
+ }
+ FlagsContinuation(FlagsCondition condition, Node* result)
+ : mode_(kFlags_set),
condition_(condition),
- frame_state_or_result_(frame_state_or_result) {
- DCHECK_NOT_NULL(frame_state_or_result);
+ frame_state_or_result_(result) {
+ DCHECK_NOT_NULL(result);
}
FlagsMode const mode_;
FlagsCondition condition_;
+ DeoptimizeReason reason_; // Only value if mode_ == kFlags_deoptimize
Node* frame_state_or_result_; // Only valid if mode_ == kFlags_deoptimize
// or mode_ == kFlags_set.
BasicBlock* true_block_; // Only valid if mode_ == kFlags_branch.
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index b7162fe5dc..ac8e64a58a 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -87,7 +87,6 @@ void InstructionSelector::SelectInstructions() {
#endif
}
-
void InstructionSelector::StartBlock(RpoNumber rpo) {
if (FLAG_turbo_instruction_scheduling &&
InstructionScheduler::SchedulerSupported()) {
@@ -242,6 +241,20 @@ bool InstructionSelector::CanCover(Node* user, Node* node) const {
return true;
}
+bool InstructionSelector::IsOnlyUserOfNodeInSameBlock(Node* user,
+ Node* node) const {
+ BasicBlock* bb_user = schedule()->block(user);
+ BasicBlock* bb_node = schedule()->block(node);
+ if (bb_user != bb_node) return false;
+ for (Edge const edge : node->use_edges()) {
+ Node* from = edge.from();
+ if ((from != user) && (schedule()->block(from) == bb_user)) {
+ return false;
+ }
+ }
+ return true;
+}
+
int InstructionSelector::GetVirtualRegister(const Node* node) {
DCHECK_NOT_NULL(node);
size_t const id = node->id();
@@ -286,6 +299,9 @@ void InstructionSelector::MarkAsDefined(Node* node) {
bool InstructionSelector::IsUsed(Node* node) const {
DCHECK_NOT_NULL(node);
+ // TODO(bmeurer): This is a terrible monster hack, but we have to make sure
+ // that the Retain is actually emitted, otherwise the GC will mess up.
+ if (node->opcode() == IrOpcode::kRetain) return true;
if (!node->op()->HasProperty(Operator::kEliminatable)) return true;
size_t const id = node->id();
DCHECK_LT(id, used_.size());
@@ -331,11 +347,12 @@ namespace {
enum class FrameStateInputKind { kAny, kStackSlot };
-
InstructionOperand OperandForDeopt(OperandGenerator* g, Node* input,
- FrameStateInputKind kind) {
+ FrameStateInputKind kind,
+ MachineRepresentation rep) {
switch (input->opcode()) {
case IrOpcode::kInt32Constant:
+ case IrOpcode::kInt64Constant:
case IrOpcode::kNumberConstant:
case IrOpcode::kFloat32Constant:
case IrOpcode::kFloat64Constant:
@@ -345,11 +362,15 @@ InstructionOperand OperandForDeopt(OperandGenerator* g, Node* input,
UNREACHABLE();
break;
default:
- switch (kind) {
- case FrameStateInputKind::kStackSlot:
- return g->UseUniqueSlot(input);
- case FrameStateInputKind::kAny:
- return g->UseAny(input);
+ if (rep == MachineRepresentation::kNone) {
+ return g->TempImmediate(FrameStateDescriptor::kImpossibleValue);
+ } else {
+ switch (kind) {
+ case FrameStateInputKind::kStackSlot:
+ return g->UseUniqueSlot(input);
+ case FrameStateInputKind::kAny:
+ return g->UseAny(input);
+ }
}
}
UNREACHABLE();
@@ -415,7 +436,7 @@ size_t AddOperandToStateValueDescriptor(StateValueDescriptor* descriptor,
break;
}
default: {
- inputs->push_back(OperandForDeopt(g, input, kind));
+ inputs->push_back(OperandForDeopt(g, input, kind, type.representation()));
descriptor->fields().push_back(StateValueDescriptor::Plain(zone, type));
return 1;
}
@@ -561,17 +582,17 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
bool output_is_live = buffer->output_nodes[i] != nullptr ||
i < outputs_needed_by_framestate;
if (output_is_live) {
- MachineType type =
- buffer->descriptor->GetReturnType(static_cast<int>(i));
+ MachineRepresentation rep =
+ buffer->descriptor->GetReturnType(static_cast<int>(i))
+ .representation();
LinkageLocation location =
buffer->descriptor->GetReturnLocation(static_cast<int>(i));
Node* output = buffer->output_nodes[i];
- InstructionOperand op =
- output == nullptr
- ? g.TempLocation(location, type.representation())
- : g.DefineAsLocation(output, location, type.representation());
- MarkAsRepresentation(type.representation(), op);
+ InstructionOperand op = output == nullptr
+ ? g.TempLocation(location)
+ : g.DefineAsLocation(output, location);
+ MarkAsRepresentation(rep, op);
buffer->outputs.push_back(op);
}
@@ -598,8 +619,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
break;
case CallDescriptor::kCallJSFunction:
buffer->instruction_args.push_back(
- g.UseLocation(callee, buffer->descriptor->GetInputLocation(0),
- buffer->descriptor->GetInputType(0).representation()));
+ g.UseLocation(callee, buffer->descriptor->GetInputLocation(0)));
break;
}
DCHECK_EQ(1u, buffer->instruction_args.size());
@@ -618,7 +638,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
// all the frames on top of it that are either an arguments adaptor frame
// or a tail caller frame.
if (buffer->descriptor->SupportsTailCalls()) {
- frame_state = NodeProperties::GetFrameStateInput(frame_state, 0);
+ frame_state = NodeProperties::GetFrameStateInput(frame_state);
buffer->frame_state_descriptor =
buffer->frame_state_descriptor->outer_state();
while (buffer->frame_state_descriptor != nullptr &&
@@ -626,15 +646,15 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
FrameStateType::kArgumentsAdaptor ||
buffer->frame_state_descriptor->type() ==
FrameStateType::kTailCallerFunction)) {
- frame_state = NodeProperties::GetFrameStateInput(frame_state, 0);
+ frame_state = NodeProperties::GetFrameStateInput(frame_state);
buffer->frame_state_descriptor =
buffer->frame_state_descriptor->outer_state();
}
}
- InstructionSequence::StateId state_id =
- sequence()->AddFrameStateDescriptor(buffer->frame_state_descriptor);
- buffer->instruction_args.push_back(g.TempImmediate(state_id.ToInt()));
+ int const state_id = sequence()->AddDeoptimizationEntry(
+ buffer->frame_state_descriptor, DeoptimizeReason::kNoReason);
+ buffer->instruction_args.push_back(g.TempImmediate(state_id));
StateObjectDeduplicator deduplicator(instruction_zone());
@@ -666,9 +686,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
location = LinkageLocation::ConvertToTailCallerLocation(
location, stack_param_delta);
}
- InstructionOperand op =
- g.UseLocation(*iter, location,
- buffer->descriptor->GetInputType(index).representation());
+ InstructionOperand op = g.UseLocation(*iter, location);
if (UnallocatedOperand::cast(op).HasFixedSlotPolicy() && !call_tail) {
int stack_index = -UnallocatedOperand::cast(op).fixed_slot_index() - 1;
if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
@@ -707,6 +725,7 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
int effect_level = 0;
for (Node* const node : *block) {
if (node->opcode() == IrOpcode::kStore ||
+ node->opcode() == IrOpcode::kUnalignedStore ||
node->opcode() == IrOpcode::kCheckedStore ||
node->opcode() == IrOpcode::kCall) {
++effect_level;
@@ -823,9 +842,9 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
return VisitReturn(input);
}
case BasicBlock::kDeoptimize: {
- DeoptimizeKind kind = DeoptimizeKindOf(input->op());
+ DeoptimizeParameters p = DeoptimizeParametersOf(input->op());
Node* value = input->InputAt(0);
- return VisitDeoptimize(kind, value);
+ return VisitDeoptimize(p.kind(), p.reason(), value);
}
case BasicBlock::kThrow:
DCHECK_EQ(IrOpcode::kThrow, input->opcode());
@@ -865,8 +884,6 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsReference(node), VisitIfException(node);
case IrOpcode::kFinishRegion:
return MarkAsReference(node), VisitFinishRegion(node);
- case IrOpcode::kGuard:
- return MarkAsReference(node), VisitGuard(node);
case IrOpcode::kParameter: {
MachineType type =
linkage()->GetParameterType(ParameterIndexOf(node->op()));
@@ -877,6 +894,7 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsReference(node), VisitOsrValue(node);
case IrOpcode::kPhi: {
MachineRepresentation rep = PhiRepresentationOf(node->op());
+ if (rep == MachineRepresentation::kNone) return;
MarkAsRepresentation(rep, node);
return VisitPhi(node);
}
@@ -885,6 +903,8 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kInt32Constant:
case IrOpcode::kInt64Constant:
case IrOpcode::kExternalConstant:
+ case IrOpcode::kRelocatableInt32Constant:
+ case IrOpcode::kRelocatableInt64Constant:
return VisitConstant(node);
case IrOpcode::kFloat32Constant:
return MarkAsFloat32(node), VisitConstant(node);
@@ -907,6 +927,15 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kStateValues:
case IrOpcode::kObjectState:
return;
+ case IrOpcode::kDebugBreak:
+ VisitDebugBreak(node);
+ return;
+ case IrOpcode::kComment:
+ VisitComment(node);
+ return;
+ case IrOpcode::kRetain:
+ VisitRetain(node);
+ return;
case IrOpcode::kLoad: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
@@ -936,6 +965,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord32(node), VisitWord32Ctz(node);
case IrOpcode::kWord32ReverseBits:
return MarkAsWord32(node), VisitWord32ReverseBits(node);
+ case IrOpcode::kWord32ReverseBytes:
+ return MarkAsWord32(node), VisitWord32ReverseBytes(node);
case IrOpcode::kWord32Popcnt:
return MarkAsWord32(node), VisitWord32Popcnt(node);
case IrOpcode::kWord64Popcnt:
@@ -960,6 +991,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord64(node), VisitWord64Ctz(node);
case IrOpcode::kWord64ReverseBits:
return MarkAsWord64(node), VisitWord64ReverseBits(node);
+ case IrOpcode::kWord64ReverseBytes:
+ return MarkAsWord64(node), VisitWord64ReverseBytes(node);
case IrOpcode::kWord64Equal:
return VisitWord64Equal(node);
case IrOpcode::kInt32Add:
@@ -972,6 +1005,8 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitInt32SubWithOverflow(node);
case IrOpcode::kInt32Mul:
return MarkAsWord32(node), VisitInt32Mul(node);
+ case IrOpcode::kInt32MulWithOverflow:
+ return MarkAsWord32(node), VisitInt32MulWithOverflow(node);
case IrOpcode::kInt32MulHigh:
return VisitInt32MulHigh(node);
case IrOpcode::kInt32Div:
@@ -1018,6 +1053,8 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitUint64LessThanOrEqual(node);
case IrOpcode::kUint64Mod:
return MarkAsWord64(node), VisitUint64Mod(node);
+ case IrOpcode::kBitcastWordToTagged:
+ return MarkAsReference(node), VisitBitcastWordToTagged(node);
case IrOpcode::kChangeFloat32ToFloat64:
return MarkAsFloat64(node), VisitChangeFloat32ToFloat64(node);
case IrOpcode::kChangeInt32ToFloat64:
@@ -1028,6 +1065,26 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord32(node), VisitChangeFloat64ToInt32(node);
case IrOpcode::kChangeFloat64ToUint32:
return MarkAsWord32(node), VisitChangeFloat64ToUint32(node);
+ case IrOpcode::kImpossibleToWord32:
+ return MarkAsWord32(node), VisitImpossibleToWord32(node);
+ case IrOpcode::kImpossibleToWord64:
+ return MarkAsWord64(node), VisitImpossibleToWord64(node);
+ case IrOpcode::kImpossibleToFloat32:
+ return MarkAsFloat32(node), VisitImpossibleToFloat32(node);
+ case IrOpcode::kImpossibleToFloat64:
+ return MarkAsFloat64(node), VisitImpossibleToFloat64(node);
+ case IrOpcode::kImpossibleToTagged:
+ MarkAsRepresentation(MachineType::PointerRepresentation(), node);
+ return VisitImpossibleToTagged(node);
+ case IrOpcode::kImpossibleToBit:
+ return MarkAsWord32(node), VisitImpossibleToBit(node);
+ case IrOpcode::kFloat64SilenceNaN:
+ MarkAsFloat64(node);
+ if (CanProduceSignalingNaN(node->InputAt(0))) {
+ return VisitFloat64SilenceNaN(node);
+ } else {
+ return EmitIdentity(node);
+ }
case IrOpcode::kTruncateFloat64ToUint32:
return MarkAsWord32(node), VisitTruncateFloat64ToUint32(node);
case IrOpcode::kTruncateFloat32ToInt32:
@@ -1048,10 +1105,12 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord64(node), VisitChangeUint32ToUint64(node);
case IrOpcode::kTruncateFloat64ToFloat32:
return MarkAsFloat32(node), VisitTruncateFloat64ToFloat32(node);
- case IrOpcode::kTruncateFloat64ToInt32:
- return MarkAsWord32(node), VisitTruncateFloat64ToInt32(node);
+ case IrOpcode::kTruncateFloat64ToWord32:
+ return MarkAsWord32(node), VisitTruncateFloat64ToWord32(node);
case IrOpcode::kTruncateInt64ToInt32:
return MarkAsWord32(node), VisitTruncateInt64ToInt32(node);
+ case IrOpcode::kRoundFloat64ToInt32:
+ return MarkAsWord32(node), VisitRoundFloat64ToInt32(node);
case IrOpcode::kRoundInt64ToFloat32:
return MarkAsFloat32(node), VisitRoundInt64ToFloat32(node);
case IrOpcode::kRoundInt32ToFloat32:
@@ -1076,14 +1135,12 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsFloat32(node), VisitFloat32Add(node);
case IrOpcode::kFloat32Sub:
return MarkAsFloat32(node), VisitFloat32Sub(node);
+ case IrOpcode::kFloat32Neg:
+ return MarkAsFloat32(node), VisitFloat32Neg(node);
case IrOpcode::kFloat32Mul:
return MarkAsFloat32(node), VisitFloat32Mul(node);
case IrOpcode::kFloat32Div:
return MarkAsFloat32(node), VisitFloat32Div(node);
- case IrOpcode::kFloat32Min:
- return MarkAsFloat32(node), VisitFloat32Min(node);
- case IrOpcode::kFloat32Max:
- return MarkAsFloat32(node), VisitFloat32Max(node);
case IrOpcode::kFloat32Abs:
return MarkAsFloat32(node), VisitFloat32Abs(node);
case IrOpcode::kFloat32Sqrt:
@@ -1094,10 +1151,16 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitFloat32LessThan(node);
case IrOpcode::kFloat32LessThanOrEqual:
return VisitFloat32LessThanOrEqual(node);
+ case IrOpcode::kFloat32Max:
+ return MarkAsFloat32(node), VisitFloat32Max(node);
+ case IrOpcode::kFloat32Min:
+ return MarkAsFloat32(node), VisitFloat32Min(node);
case IrOpcode::kFloat64Add:
return MarkAsFloat64(node), VisitFloat64Add(node);
case IrOpcode::kFloat64Sub:
return MarkAsFloat64(node), VisitFloat64Sub(node);
+ case IrOpcode::kFloat64Neg:
+ return MarkAsFloat64(node), VisitFloat64Neg(node);
case IrOpcode::kFloat64Mul:
return MarkAsFloat64(node), VisitFloat64Mul(node);
case IrOpcode::kFloat64Div:
@@ -1110,8 +1173,50 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsFloat64(node), VisitFloat64Max(node);
case IrOpcode::kFloat64Abs:
return MarkAsFloat64(node), VisitFloat64Abs(node);
+ case IrOpcode::kFloat64Acos:
+ return MarkAsFloat64(node), VisitFloat64Acos(node);
+ case IrOpcode::kFloat64Acosh:
+ return MarkAsFloat64(node), VisitFloat64Acosh(node);
+ case IrOpcode::kFloat64Asin:
+ return MarkAsFloat64(node), VisitFloat64Asin(node);
+ case IrOpcode::kFloat64Asinh:
+ return MarkAsFloat64(node), VisitFloat64Asinh(node);
+ case IrOpcode::kFloat64Atan:
+ return MarkAsFloat64(node), VisitFloat64Atan(node);
+ case IrOpcode::kFloat64Atanh:
+ return MarkAsFloat64(node), VisitFloat64Atanh(node);
+ case IrOpcode::kFloat64Atan2:
+ return MarkAsFloat64(node), VisitFloat64Atan2(node);
+ case IrOpcode::kFloat64Cbrt:
+ return MarkAsFloat64(node), VisitFloat64Cbrt(node);
+ case IrOpcode::kFloat64Cos:
+ return MarkAsFloat64(node), VisitFloat64Cos(node);
+ case IrOpcode::kFloat64Cosh:
+ return MarkAsFloat64(node), VisitFloat64Cosh(node);
+ case IrOpcode::kFloat64Exp:
+ return MarkAsFloat64(node), VisitFloat64Exp(node);
+ case IrOpcode::kFloat64Expm1:
+ return MarkAsFloat64(node), VisitFloat64Expm1(node);
+ case IrOpcode::kFloat64Log:
+ return MarkAsFloat64(node), VisitFloat64Log(node);
+ case IrOpcode::kFloat64Log1p:
+ return MarkAsFloat64(node), VisitFloat64Log1p(node);
+ case IrOpcode::kFloat64Log10:
+ return MarkAsFloat64(node), VisitFloat64Log10(node);
+ case IrOpcode::kFloat64Log2:
+ return MarkAsFloat64(node), VisitFloat64Log2(node);
+ case IrOpcode::kFloat64Pow:
+ return MarkAsFloat64(node), VisitFloat64Pow(node);
+ case IrOpcode::kFloat64Sin:
+ return MarkAsFloat64(node), VisitFloat64Sin(node);
+ case IrOpcode::kFloat64Sinh:
+ return MarkAsFloat64(node), VisitFloat64Sinh(node);
case IrOpcode::kFloat64Sqrt:
return MarkAsFloat64(node), VisitFloat64Sqrt(node);
+ case IrOpcode::kFloat64Tan:
+ return MarkAsFloat64(node), VisitFloat64Tan(node);
+ case IrOpcode::kFloat64Tanh:
+ return MarkAsFloat64(node), VisitFloat64Tanh(node);
case IrOpcode::kFloat64Equal:
return VisitFloat64Equal(node);
case IrOpcode::kFloat64LessThan:
@@ -1152,6 +1257,14 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitLoadFramePointer(node);
case IrOpcode::kLoadParentFramePointer:
return VisitLoadParentFramePointer(node);
+ case IrOpcode::kUnalignedLoad: {
+ UnalignedLoadRepresentation type =
+ UnalignedLoadRepresentationOf(node->op());
+ MarkAsRepresentation(type.representation(), node);
+ return VisitUnalignedLoad(node);
+ }
+ case IrOpcode::kUnalignedStore:
+ return VisitUnalignedStore(node);
case IrOpcode::kCheckedLoad: {
MachineRepresentation rep =
CheckedLoadRepresentationOf(node->op()).representation();
@@ -1184,6 +1297,16 @@ void InstructionSelector::VisitNode(Node* node) {
MarkAsWord32(NodeProperties::FindProjection(node, 0));
MarkAsWord32(NodeProperties::FindProjection(node, 1));
return VisitWord32PairSar(node);
+ case IrOpcode::kAtomicLoad: {
+ LoadRepresentation type = LoadRepresentationOf(node->op());
+ MarkAsRepresentation(type.representation(), node);
+ return VisitAtomicLoad(node);
+ }
+ case IrOpcode::kAtomicStore:
+ return VisitAtomicStore(node);
+ case IrOpcode::kUnsafePointerAdd:
+ MarkAsRepresentation(MachineType::PointerRepresentation(), node);
+ return VisitUnsafePointerAdd(node);
default:
V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
node->opcode(), node->op()->mnemonic(), node->id());
@@ -1191,13 +1314,47 @@ void InstructionSelector::VisitNode(Node* node) {
}
}
+void InstructionSelector::VisitImpossibleToWord32(Node* node) {
+ OperandGenerator g(this);
+ Emit(kArchImpossible, g.DefineAsConstant(node, Constant(0)));
+}
+
+void InstructionSelector::VisitImpossibleToWord64(Node* node) {
+ OperandGenerator g(this);
+ Emit(kArchImpossible,
+ g.DefineAsConstant(node, Constant(static_cast<int64_t>(0))));
+}
+
+void InstructionSelector::VisitImpossibleToFloat32(Node* node) {
+ OperandGenerator g(this);
+ Emit(kArchImpossible, g.DefineAsConstant(node, Constant(0.0f)));
+}
+
+void InstructionSelector::VisitImpossibleToFloat64(Node* node) {
+ OperandGenerator g(this);
+ Emit(kArchImpossible, g.DefineAsConstant(node, Constant(0.0)));
+}
+
+void InstructionSelector::VisitImpossibleToBit(Node* node) {
+ OperandGenerator g(this);
+ Emit(kArchImpossible, g.DefineAsConstant(node, Constant(0)));
+}
+
+void InstructionSelector::VisitImpossibleToTagged(Node* node) {
+ OperandGenerator g(this);
+#if V8_TARGET_ARCH_64_BIT
+ Emit(kArchImpossible,
+ g.DefineAsConstant(node, Constant(static_cast<int64_t>(0))));
+#else // V8_TARGET_ARCH_64_BIT
+ Emit(kArchImpossible, g.DefineAsConstant(node, Constant(0)));
+#endif // V8_TARGET_ARCH_64_BIT
+}
void InstructionSelector::VisitLoadStackPointer(Node* node) {
OperandGenerator g(this);
Emit(kArchStackPointer, g.DefineAsRegister(node));
}
-
void InstructionSelector::VisitLoadFramePointer(Node* node) {
OperandGenerator g(this);
Emit(kArchFramePointer, g.DefineAsRegister(node));
@@ -1208,6 +1365,90 @@ void InstructionSelector::VisitLoadParentFramePointer(Node* node) {
Emit(kArchParentFramePointer, g.DefineAsRegister(node));
}
+void InstructionSelector::VisitFloat64Acos(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Acos);
+}
+
+void InstructionSelector::VisitFloat64Acosh(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Acosh);
+}
+
+void InstructionSelector::VisitFloat64Asin(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Asin);
+}
+
+void InstructionSelector::VisitFloat64Asinh(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Asinh);
+}
+
+void InstructionSelector::VisitFloat64Atan(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Atan);
+}
+
+void InstructionSelector::VisitFloat64Atanh(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Atanh);
+}
+
+void InstructionSelector::VisitFloat64Atan2(Node* node) {
+ VisitFloat64Ieee754Binop(node, kIeee754Float64Atan2);
+}
+
+void InstructionSelector::VisitFloat64Cbrt(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Cbrt);
+}
+
+void InstructionSelector::VisitFloat64Cos(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Cos);
+}
+
+void InstructionSelector::VisitFloat64Cosh(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Cosh);
+}
+
+void InstructionSelector::VisitFloat64Exp(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Exp);
+}
+
+void InstructionSelector::VisitFloat64Expm1(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Expm1);
+}
+
+void InstructionSelector::VisitFloat64Log(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Log);
+}
+
+void InstructionSelector::VisitFloat64Log1p(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Log1p);
+}
+
+void InstructionSelector::VisitFloat64Log2(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Log2);
+}
+
+void InstructionSelector::VisitFloat64Log10(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Log10);
+}
+
+void InstructionSelector::VisitFloat64Pow(Node* node) {
+ VisitFloat64Ieee754Binop(node, kIeee754Float64Pow);
+}
+
+void InstructionSelector::VisitFloat64Sin(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Sin);
+}
+
+void InstructionSelector::VisitFloat64Sinh(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Sinh);
+}
+
+void InstructionSelector::VisitFloat64Tan(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Tan);
+}
+
+void InstructionSelector::VisitFloat64Tanh(Node* node) {
+ VisitFloat64Ieee754Unop(node, kIeee754Float64Tanh);
+}
+
void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw,
InstructionOperand& index_operand) {
OperandGenerator g(this);
@@ -1252,6 +1493,10 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
+void InstructionSelector::VisitBitcastWordToTagged(Node* node) {
+ EmitIdentity(node);
+}
+
// 32 bit targets do not implement the following instructions.
#if V8_TARGET_ARCH_32_BIT
@@ -1308,7 +1553,6 @@ void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
UNIMPLEMENTED();
}
-
void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED(); }
@@ -1421,19 +1665,7 @@ void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
#endif // V8_TARGET_ARCH_64_BIT
-void InstructionSelector::VisitFinishRegion(Node* node) {
- OperandGenerator g(this);
- Node* value = node->InputAt(0);
- Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
-}
-
-
-void InstructionSelector::VisitGuard(Node* node) {
- OperandGenerator g(this);
- Node* value = node->InputAt(0);
- Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
-}
-
+void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
void InstructionSelector::VisitParameter(Node* node) {
OperandGenerator g(this);
@@ -1443,9 +1675,7 @@ void InstructionSelector::VisitParameter(Node* node) {
? g.DefineAsDualLocation(
node, linkage()->GetParameterLocation(index),
linkage()->GetParameterSecondaryLocation(index))
- : g.DefineAsLocation(
- node, linkage()->GetParameterLocation(index),
- linkage()->GetParameterType(index).representation());
+ : g.DefineAsLocation(node, linkage()->GetParameterLocation(index));
Emit(kArchNop, op);
}
@@ -1455,18 +1685,16 @@ void InstructionSelector::VisitIfException(Node* node) {
OperandGenerator g(this);
Node* call = node->InputAt(1);
DCHECK_EQ(IrOpcode::kCall, call->opcode());
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(call);
- Emit(kArchNop,
- g.DefineAsLocation(node, descriptor->GetReturnLocation(0),
- descriptor->GetReturnType(0).representation()));
+ const CallDescriptor* descriptor = CallDescriptorOf(call->op());
+ Emit(kArchNop, g.DefineAsLocation(node, descriptor->GetReturnLocation(0)));
}
void InstructionSelector::VisitOsrValue(Node* node) {
OperandGenerator g(this);
int index = OpParameter<int>(node);
- Emit(kArchNop, g.DefineAsLocation(node, linkage()->GetOsrValueLocation(index),
- MachineRepresentation::kTagged));
+ Emit(kArchNop,
+ g.DefineAsLocation(node, linkage()->GetOsrValueLocation(index)));
}
@@ -1492,6 +1720,7 @@ void InstructionSelector::VisitProjection(Node* node) {
switch (value->opcode()) {
case IrOpcode::kInt32AddWithOverflow:
case IrOpcode::kInt32SubWithOverflow:
+ case IrOpcode::kInt32MulWithOverflow:
case IrOpcode::kInt64AddWithOverflow:
case IrOpcode::kInt64SubWithOverflow:
case IrOpcode::kTryTruncateFloat32ToInt64:
@@ -1527,7 +1756,7 @@ void InstructionSelector::VisitConstant(Node* node) {
void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
OperandGenerator g(this);
- const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
+ const CallDescriptor* descriptor = CallDescriptorOf(node->op());
FrameStateDescriptor* frame_state_descriptor = nullptr;
if (descriptor->NeedsFrameState()) {
@@ -1551,10 +1780,6 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
CallDescriptor::Flags flags = descriptor->flags();
if (handler) {
DCHECK_EQ(IrOpcode::kIfException, handler->front()->opcode());
- IfExceptionHint hint = OpParameter<IfExceptionHint>(handler->front());
- if (hint == IfExceptionHint::kLocallyCaught) {
- flags |= CallDescriptor::kHasLocalCatchHandler;
- }
flags |= CallDescriptor::kHasExceptionHandler;
buffer.instruction_args.push_back(g.Label(handler));
}
@@ -1574,7 +1799,7 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
case CallDescriptor::kCallAddress:
opcode =
kArchCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
+ MiscField::encode(static_cast<int>(descriptor->ParameterCount()));
break;
case CallDescriptor::kCallCodeObject:
opcode = kArchCallCodeObject | MiscField::encode(flags);
@@ -1595,16 +1820,13 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
void InstructionSelector::VisitTailCall(Node* node) {
OperandGenerator g(this);
- CallDescriptor const* descriptor = OpParameter<CallDescriptor const*>(node);
+ CallDescriptor const* descriptor = CallDescriptorOf(node->op());
DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
- DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
-
- // TODO(turbofan): Relax restriction for stack parameters.
- int stack_param_delta = 0;
- if (linkage()->GetIncomingDescriptor()->CanTailCall(node,
- &stack_param_delta)) {
+ CallDescriptor* caller = linkage()->GetIncomingDescriptor();
+ if (caller->CanTailCall(node)) {
+ const CallDescriptor* callee = CallDescriptorOf(node->op());
+ int stack_param_delta = callee->GetStackParameterDelta(caller);
CallBuffer buffer(zone(), descriptor, nullptr);
// Compute InstructionOperands for inputs and outputs.
@@ -1641,6 +1863,9 @@ void InstructionSelector::VisitTailCall(Node* node) {
case CallDescriptor::kCallJSFunction:
opcode = kArchTailCallJSFunction;
break;
+ case CallDescriptor::kCallAddress:
+ opcode = kArchTailCallAddress;
+ break;
default:
UNREACHABLE();
return;
@@ -1648,10 +1873,12 @@ void InstructionSelector::VisitTailCall(Node* node) {
}
opcode |= MiscField::encode(descriptor->flags());
- buffer.instruction_args.push_back(g.TempImmediate(stack_param_delta));
+ Emit(kArchPrepareTailCall, g.NoOutput());
- Emit(kArchPrepareTailCall, g.NoOutput(),
- g.TempImmediate(stack_param_delta));
+ int first_unused_stack_slot =
+ (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0) +
+ stack_param_delta;
+ buffer.instruction_args.push_back(g.TempImmediate(first_unused_stack_slot));
// Emit the tailcall instruction.
Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
@@ -1717,28 +1944,26 @@ void InstructionSelector::VisitReturn(Node* ret) {
auto value_locations = zone()->NewArray<InstructionOperand>(ret_count);
for (int i = 0; i < ret_count; ++i) {
value_locations[i] =
- g.UseLocation(ret->InputAt(i), linkage()->GetReturnLocation(i),
- linkage()->GetReturnType(i).representation());
+ g.UseLocation(ret->InputAt(i), linkage()->GetReturnLocation(i));
}
Emit(kArchRet, 0, nullptr, ret_count, value_locations);
}
}
-Instruction* InstructionSelector::EmitDeoptimize(InstructionCode opcode,
- InstructionOperand output,
- InstructionOperand a,
- InstructionOperand b,
- Node* frame_state) {
+Instruction* InstructionSelector::EmitDeoptimize(
+ InstructionCode opcode, InstructionOperand output, InstructionOperand a,
+ InstructionOperand b, DeoptimizeReason reason, Node* frame_state) {
size_t output_count = output.IsInvalid() ? 0 : 1;
InstructionOperand inputs[] = {a, b};
size_t input_count = arraysize(inputs);
return EmitDeoptimize(opcode, output_count, &output, input_count, inputs,
- frame_state);
+ reason, frame_state);
}
Instruction* InstructionSelector::EmitDeoptimize(
InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
- size_t input_count, InstructionOperand* inputs, Node* frame_state) {
+ size_t input_count, InstructionOperand* inputs, DeoptimizeReason reason,
+ Node* frame_state) {
OperandGenerator g(this);
FrameStateDescriptor* const descriptor = GetFrameStateDescriptor(frame_state);
InstructionOperandVector args(instruction_zone());
@@ -1747,9 +1972,8 @@ Instruction* InstructionSelector::EmitDeoptimize(
args.push_back(inputs[i]);
}
opcode |= MiscField::encode(static_cast<int>(input_count));
- InstructionSequence::StateId const state_id =
- sequence()->AddFrameStateDescriptor(descriptor);
- args.push_back(g.TempImmediate(state_id.ToInt()));
+ int const state_id = sequence()->AddDeoptimizationEntry(descriptor, reason);
+ args.push_back(g.TempImmediate(state_id));
StateObjectDeduplicator deduplicator(instruction_zone());
AddInputsToFrameStateDescriptor(descriptor, frame_state, &g, &deduplicator,
&args, FrameStateInputKind::kAny,
@@ -1758,7 +1982,15 @@ Instruction* InstructionSelector::EmitDeoptimize(
nullptr);
}
-void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind, Node* value) {
+void InstructionSelector::EmitIdentity(Node* node) {
+ OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+}
+
+void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind,
+ DeoptimizeReason reason,
+ Node* value) {
InstructionCode opcode = kArchDeoptimize;
switch (kind) {
case DeoptimizeKind::kEager:
@@ -1768,7 +2000,7 @@ void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind, Node* value) {
opcode |= MiscField::encode(Deoptimizer::SOFT);
break;
}
- EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, value);
+ EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, reason, value);
}
@@ -1777,6 +2009,39 @@ void InstructionSelector::VisitThrow(Node* value) {
Emit(kArchThrowTerminator, g.NoOutput());
}
+void InstructionSelector::VisitDebugBreak(Node* node) {
+ OperandGenerator g(this);
+ Emit(kArchDebugBreak, g.NoOutput());
+}
+
+void InstructionSelector::VisitComment(Node* node) {
+ OperandGenerator g(this);
+ InstructionOperand operand(g.UseImmediate(node));
+ Emit(kArchComment, 0, nullptr, 1, &operand);
+}
+
+void InstructionSelector::VisitUnsafePointerAdd(Node* node) {
+#if V8_TARGET_ARCH_64_BIT
+ VisitInt64Add(node);
+#else // V8_TARGET_ARCH_64_BIT
+ VisitInt32Add(node);
+#endif // V8_TARGET_ARCH_64_BIT
+}
+
+void InstructionSelector::VisitRetain(Node* node) {
+ OperandGenerator g(this);
+ Emit(kArchNop, g.NoOutput(), g.UseAny(node->InputAt(0)));
+}
+
+bool InstructionSelector::CanProduceSignalingNaN(Node* node) {
+ // TODO(jarin) Improve the heuristic here.
+ if (node->opcode() == IrOpcode::kFloat64Add ||
+ node->opcode() == IrOpcode::kFloat64Sub ||
+ node->opcode() == IrOpcode::kFloat64Mul) {
+ return false;
+ }
+ return true;
+}
FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
Node* state) {
diff --git a/deps/v8/src/compiler/instruction-selector.h b/deps/v8/src/compiler/instruction-selector.h
index 9c1cd4ca0c..f9f43e9f35 100644
--- a/deps/v8/src/compiler/instruction-selector.h
+++ b/deps/v8/src/compiler/instruction-selector.h
@@ -105,10 +105,11 @@ class InstructionSelector final {
Instruction* EmitDeoptimize(InstructionCode opcode, InstructionOperand output,
InstructionOperand a, InstructionOperand b,
- Node* frame_state);
+ DeoptimizeReason reason, Node* frame_state);
Instruction* EmitDeoptimize(InstructionCode opcode, size_t output_count,
InstructionOperand* outputs, size_t input_count,
- InstructionOperand* inputs, Node* frame_state);
+ InstructionOperand* inputs,
+ DeoptimizeReason reason, Node* frame_state);
// ===========================================================================
// ============== Architecture-independent CPU feature methods. ==============
@@ -139,6 +140,8 @@ class InstructionSelector final {
// TODO(sigurds) This should take a CpuFeatures argument.
static MachineOperatorBuilder::Flags SupportedMachineOperatorFlags();
+ static MachineOperatorBuilder::AlignmentRequirements AlignmentRequirements();
+
// ===========================================================================
// ============ Architecture-independent graph covering methods. =============
// ===========================================================================
@@ -149,6 +152,31 @@ class InstructionSelector final {
// edge and the two are in the same basic block.
bool CanCover(Node* user, Node* node) const;
+ // Used in pattern matching during code generation.
+ // This function checks that {node} and {user} are in the same basic block,
+ // and that {user} is the only user of {node} in this basic block. This
+ // check guarantees that there are no users of {node} scheduled between
+ // {node} and {user}, and thus we can select a single instruction for both
+ // nodes, if such an instruction exists. This check can be used for example
+ // when selecting instructions for:
+ // n = Int32Add(a, b)
+ // c = Word32Compare(n, 0, cond)
+ // Branch(c, true_label, false_label)
+ // Here we can generate a flag-setting add instruction, even if the add has
+ // uses in other basic blocks, since the flag-setting add instruction will
+ // still generate the result of the addition and not just set the flags.
+ // However, if we had uses of the add in the same basic block, we could have:
+ // n = Int32Add(a, b)
+ // o = OtherOp(n, ...)
+ // c = Word32Compare(n, 0, cond)
+ // Branch(c, true_label, false_label)
+ // where we cannot select the add and the compare together. If we were to
+ // select a flag-setting add instruction for Word32Compare and Int32Add while
+ // visiting Word32Compare, we would then have to select an instruction for
+ // OtherOp *afterwards*, which means we would attempt to use the result of
+ // the add before we have defined it.
+ bool IsOnlyUserOfNodeInSameBlock(Node* user, Node* node) const;
+
// Checks if {node} was already defined, and therefore code was already
// generated for it.
bool IsDefined(Node* node) const;
@@ -222,7 +250,7 @@ class InstructionSelector final {
// {call_code_immediate} to generate immediate operands to calls of code.
// {call_address_immediate} to generate immediate operands to address calls.
void InitializeCallBuffer(Node* call, CallBuffer* buffer,
- CallBufferFlags flags, int stack_param_delta = 0);
+ CallBufferFlags flags, int stack_slot_delta = 0);
bool IsTailCallAddressImmediate();
int GetTempsCountForTailCallFromJSFunction();
@@ -242,12 +270,15 @@ class InstructionSelector final {
// Visit the node and generate code, if any.
void VisitNode(Node* node);
+ // Visit the node and generate code for IEEE 754 functions.
+ void VisitFloat64Ieee754Binop(Node*, InstructionCode code);
+ void VisitFloat64Ieee754Unop(Node*, InstructionCode code);
+
#define DECLARE_GENERATOR(x) void Visit##x(Node* node);
MACHINE_OP_LIST(DECLARE_GENERATOR)
#undef DECLARE_GENERATOR
void VisitFinishRegion(Node* node);
- void VisitGuard(Node* node);
void VisitParameter(Node* node);
void VisitIfException(Node* node);
void VisitOsrValue(Node* node);
@@ -261,13 +292,18 @@ class InstructionSelector final {
void VisitGoto(BasicBlock* target);
void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
void VisitSwitch(Node* node, const SwitchInfo& sw);
- void VisitDeoptimize(DeoptimizeKind kind, Node* value);
+ void VisitDeoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
+ Node* value);
void VisitReturn(Node* ret);
void VisitThrow(Node* value);
+ void VisitRetain(Node* node);
void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments,
const CallDescriptor* descriptor, Node* node);
+ void EmitIdentity(Node* node);
+ bool CanProduceSignalingNaN(Node* node);
+
// ===========================================================================
Schedule* schedule() const { return schedule_; }
diff --git a/deps/v8/src/compiler/instruction.cc b/deps/v8/src/compiler/instruction.cc
index c757557a0d..615b644334 100644
--- a/deps/v8/src/compiler/instruction.cc
+++ b/deps/v8/src/compiler/instruction.cc
@@ -12,6 +12,7 @@ namespace v8 {
namespace internal {
namespace compiler {
+const auto GetRegConfig = RegisterConfiguration::Turbofan;
FlagsCondition CommuteFlagsCondition(FlagsCondition condition) {
switch (condition) {
@@ -47,6 +48,10 @@ FlagsCondition CommuteFlagsCondition(FlagsCondition condition) {
return kFloatGreaterThanOrEqualOrUnordered;
case kFloatGreaterThan:
return kFloatLessThan;
+ case kPositiveOrZero:
+ case kNegative:
+ UNREACHABLE();
+ break;
case kEqual:
case kNotEqual:
case kOverflow:
@@ -59,6 +64,9 @@ FlagsCondition CommuteFlagsCondition(FlagsCondition condition) {
return condition;
}
+bool InstructionOperand::InterferesWith(const InstructionOperand& that) const {
+ return EqualsCanonicalized(that);
+}
void InstructionOperand::Print(const RegisterConfiguration* config) const {
OFStream os(stdout);
@@ -68,13 +76,7 @@ void InstructionOperand::Print(const RegisterConfiguration* config) const {
os << wrapper << std::endl;
}
-
-void InstructionOperand::Print() const {
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
- Print(config);
-}
-
+void InstructionOperand::Print() const { Print(GetRegConfig()); }
std::ostream& operator<<(std::ostream& os,
const PrintableInstructionOperand& printable) {
@@ -95,7 +97,7 @@ std::ostream& operator<<(std::ostream& os,
<< conf->GetGeneralRegisterName(
unalloc->fixed_register_index())
<< ")";
- case UnallocatedOperand::FIXED_DOUBLE_REGISTER:
+ case UnallocatedOperand::FIXED_FP_REGISTER:
return os << "(="
<< conf->GetDoubleRegisterName(
unalloc->fixed_register_index())
@@ -126,14 +128,25 @@ std::ostream& operator<<(std::ostream& os,
case InstructionOperand::ALLOCATED: {
LocationOperand allocated = LocationOperand::cast(op);
if (op.IsStackSlot()) {
- os << "[stack:" << LocationOperand::cast(op).index();
- } else if (op.IsDoubleStackSlot()) {
- os << "[double_stack:" << LocationOperand::cast(op).index();
+ os << "[stack:" << allocated.index();
+ } else if (op.IsFPStackSlot()) {
+ os << "[fp_stack:" << allocated.index();
} else if (op.IsRegister()) {
- os << "[" << LocationOperand::cast(op).GetRegister().ToString() << "|R";
+ os << "["
+ << GetRegConfig()->GetGeneralRegisterName(allocated.register_code())
+ << "|R";
+ } else if (op.IsDoubleRegister()) {
+ os << "["
+ << GetRegConfig()->GetDoubleRegisterName(allocated.register_code())
+ << "|R";
+ } else if (op.IsFloatRegister()) {
+ os << "["
+ << GetRegConfig()->GetFloatRegisterName(allocated.register_code())
+ << "|R";
} else {
- DCHECK(op.IsDoubleRegister());
- os << "[" << LocationOperand::cast(op).GetDoubleRegister().ToString()
+ DCHECK(op.IsSimd128Register());
+ os << "["
+ << GetRegConfig()->GetSimd128RegisterName(allocated.register_code())
<< "|R";
}
if (allocated.IsExplicit()) {
@@ -167,6 +180,12 @@ std::ostream& operator<<(std::ostream& os,
case MachineRepresentation::kSimd128:
os << "|s128";
break;
+ case MachineRepresentation::kTaggedSigned:
+ os << "|ts";
+ break;
+ case MachineRepresentation::kTaggedPointer:
+ os << "|tp";
+ break;
case MachineRepresentation::kTagged:
os << "|t";
break;
@@ -180,7 +199,6 @@ std::ostream& operator<<(std::ostream& os,
return os;
}
-
void MoveOperands::Print(const RegisterConfiguration* config) const {
OFStream os(stdout);
PrintableInstructionOperand wrapper;
@@ -191,13 +209,7 @@ void MoveOperands::Print(const RegisterConfiguration* config) const {
os << wrapper << std::endl;
}
-
-void MoveOperands::Print() const {
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
- Print(config);
-}
-
+void MoveOperands::Print() const { Print(GetRegConfig()); }
std::ostream& operator<<(std::ostream& os,
const PrintableMoveOperands& printable) {
@@ -246,22 +258,23 @@ ExplicitOperand::ExplicitOperand(LocationKind kind, MachineRepresentation rep,
int index)
: LocationOperand(EXPLICIT, kind, rep, index) {
DCHECK_IMPLIES(kind == REGISTER && !IsFloatingPoint(rep),
- Register::from_code(index).IsAllocatable());
- DCHECK_IMPLIES(kind == REGISTER && IsFloatingPoint(rep),
- DoubleRegister::from_code(index).IsAllocatable());
+ GetRegConfig()->IsAllocatableGeneralCode(index));
+ DCHECK_IMPLIES(kind == REGISTER && rep == MachineRepresentation::kFloat32,
+ GetRegConfig()->IsAllocatableFloatCode(index));
+ DCHECK_IMPLIES(kind == REGISTER && (rep == MachineRepresentation::kFloat64),
+ GetRegConfig()->IsAllocatableDoubleCode(index));
}
-
Instruction::Instruction(InstructionCode opcode)
: opcode_(opcode),
bit_field_(OutputCountField::encode(0) | InputCountField::encode(0) |
TempCountField::encode(0) | IsCallField::encode(false)),
- reference_map_(nullptr) {
+ reference_map_(nullptr),
+ block_(nullptr) {
parallel_moves_[0] = nullptr;
parallel_moves_[1] = nullptr;
}
-
Instruction::Instruction(InstructionCode opcode, size_t output_count,
InstructionOperand* outputs, size_t input_count,
InstructionOperand* inputs, size_t temp_count,
@@ -271,7 +284,8 @@ Instruction::Instruction(InstructionCode opcode, size_t output_count,
InputCountField::encode(input_count) |
TempCountField::encode(temp_count) |
IsCallField::encode(false)),
- reference_map_(nullptr) {
+ reference_map_(nullptr),
+ block_(nullptr) {
parallel_moves_[0] = nullptr;
parallel_moves_[1] = nullptr;
size_t offset = 0;
@@ -309,13 +323,7 @@ void Instruction::Print(const RegisterConfiguration* config) const {
os << wrapper << std::endl;
}
-
-void Instruction::Print() const {
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
- Print(config);
-}
-
+void Instruction::Print() const { Print(GetRegConfig()); }
std::ostream& operator<<(std::ostream& os,
const PrintableParallelMove& printable) {
@@ -335,7 +343,7 @@ std::ostream& operator<<(std::ostream& os,
void ReferenceMap::RecordReference(const AllocatedOperand& op) {
// Do not record arguments as pointers.
if (op.IsStackSlot() && LocationOperand::cast(op).index() < 0) return;
- DCHECK(!op.IsDoubleRegister() && !op.IsDoubleStackSlot());
+ DCHECK(!op.IsFPRegister() && !op.IsFPStackSlot());
reference_operands_.push_back(op);
}
@@ -343,9 +351,7 @@ void ReferenceMap::RecordReference(const AllocatedOperand& op) {
std::ostream& operator<<(std::ostream& os, const ReferenceMap& pm) {
os << "{";
bool first = true;
- PrintableInstructionOperand poi = {
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
- InstructionOperand()};
+ PrintableInstructionOperand poi = {GetRegConfig(), InstructionOperand()};
for (const InstructionOperand& op : pm.reference_operands_) {
if (!first) {
os << ";";
@@ -449,6 +455,10 @@ std::ostream& operator<<(std::ostream& os, const FlagsCondition& fc) {
return os << "overflow";
case kNotOverflow:
return os << "not overflow";
+ case kPositiveOrZero:
+ return os << "positive or zero";
+ case kNegative:
+ return os << "negative";
}
UNREACHABLE();
return os;
@@ -504,6 +514,24 @@ std::ostream& operator<<(std::ostream& os,
Constant::Constant(int32_t v) : type_(kInt32), value_(v) {}
+Constant::Constant(RelocatablePtrConstantInfo info) {
+ if (info.type() == RelocatablePtrConstantInfo::kInt32) {
+ type_ = kInt32;
+ } else if (info.type() == RelocatablePtrConstantInfo::kInt64) {
+ type_ = kInt64;
+ } else {
+ UNREACHABLE();
+ }
+ value_ = info.value();
+ rmode_ = info.rmode();
+}
+
+Handle<HeapObject> Constant::ToHeapObject() const {
+ DCHECK_EQ(kHeapObject, type());
+ Handle<HeapObject> value(
+ bit_cast<HeapObject**>(static_cast<intptr_t>(value_)));
+ return value;
+}
std::ostream& operator<<(std::ostream& os, const Constant& constant) {
switch (constant.type()) {
@@ -603,7 +631,6 @@ static InstructionBlock* InstructionBlockFor(Zone* zone,
return instr_block;
}
-
InstructionBlocks* InstructionSequence::InstructionBlocksFor(
Zone* zone, const Schedule* schedule) {
InstructionBlocks* blocks = zone->NewArray<InstructionBlocks>(1);
@@ -620,7 +647,7 @@ InstructionBlocks* InstructionSequence::InstructionBlocksFor(
return blocks;
}
-void InstructionSequence::ValidateEdgeSplitForm() {
+void InstructionSequence::ValidateEdgeSplitForm() const {
// Validate blocks are in edge-split form: no block with multiple successors
// has an edge to a block (== a successor) with more than one predecessors.
for (const InstructionBlock* block : instruction_blocks()) {
@@ -635,7 +662,7 @@ void InstructionSequence::ValidateEdgeSplitForm() {
}
}
-void InstructionSequence::ValidateDeferredBlockExitPaths() {
+void InstructionSequence::ValidateDeferredBlockExitPaths() const {
// A deferred block with more than one successor must have all its successors
// deferred.
for (const InstructionBlock* block : instruction_blocks()) {
@@ -646,7 +673,21 @@ void InstructionSequence::ValidateDeferredBlockExitPaths() {
}
}
-void InstructionSequence::ValidateSSA() {
+void InstructionSequence::ValidateDeferredBlockEntryPaths() const {
+ // If a deferred block has multiple predecessors, they have to
+ // all be deferred. Otherwise, we can run into a situation where a range
+ // that spills only in deferred blocks inserts its spill in the block, but
+ // other ranges need moves inserted by ResolveControlFlow in the predecessors,
+ // which may clobber the register of this range.
+ for (const InstructionBlock* block : instruction_blocks()) {
+ if (!block->IsDeferred() || block->PredecessorCount() <= 1) continue;
+ for (RpoNumber predecessor_id : block->predecessors()) {
+ CHECK(InstructionBlockAt(predecessor_id)->IsDeferred());
+ }
+ }
+}
+
+void InstructionSequence::ValidateSSA() const {
// TODO(mtrofin): We could use a local zone here instead.
BitVector definitions(VirtualRegisterCount(), zone());
for (const Instruction* instruction : *this) {
@@ -675,7 +716,6 @@ void InstructionSequence::ComputeAssemblyOrder(InstructionBlocks* blocks) {
}
}
-
InstructionSequence::InstructionSequence(Isolate* isolate,
Zone* instruction_zone,
InstructionBlocks* instruction_blocks)
@@ -683,7 +723,6 @@ InstructionSequence::InstructionSequence(Isolate* isolate,
zone_(instruction_zone),
instruction_blocks_(instruction_blocks),
source_positions_(zone()),
- block_starts_(zone()),
constants_(ConstantMap::key_compare(),
ConstantMap::allocator_type(zone())),
immediates_(zone()),
@@ -691,10 +730,8 @@ InstructionSequence::InstructionSequence(Isolate* isolate,
next_virtual_register_(0),
reference_maps_(zone()),
representations_(zone()),
- deoptimization_entries_(zone()) {
- block_starts_.reserve(instruction_blocks_->size());
-}
-
+ deoptimization_entries_(zone()),
+ current_block_(nullptr) {}
int InstructionSequence::NextVirtualRegister() {
int virtual_register = next_virtual_register_++;
@@ -710,28 +747,31 @@ Instruction* InstructionSequence::GetBlockStart(RpoNumber rpo) const {
void InstructionSequence::StartBlock(RpoNumber rpo) {
- DCHECK(block_starts_.size() == rpo.ToSize());
- InstructionBlock* block = InstructionBlockAt(rpo);
+ DCHECK_NULL(current_block_);
+ current_block_ = InstructionBlockAt(rpo);
int code_start = static_cast<int>(instructions_.size());
- block->set_code_start(code_start);
- block_starts_.push_back(code_start);
+ current_block_->set_code_start(code_start);
}
void InstructionSequence::EndBlock(RpoNumber rpo) {
int end = static_cast<int>(instructions_.size());
- InstructionBlock* block = InstructionBlockAt(rpo);
- if (block->code_start() == end) { // Empty block. Insert a nop.
+ DCHECK_EQ(current_block_->rpo_number(), rpo);
+ if (current_block_->code_start() == end) { // Empty block. Insert a nop.
AddInstruction(Instruction::New(zone(), kArchNop));
end = static_cast<int>(instructions_.size());
}
- DCHECK(block->code_start() >= 0 && block->code_start() < end);
- block->set_code_end(end);
+ DCHECK(current_block_->code_start() >= 0 &&
+ current_block_->code_start() < end);
+ current_block_->set_code_end(end);
+ current_block_ = nullptr;
}
int InstructionSequence::AddInstruction(Instruction* instr) {
+ DCHECK_NOT_NULL(current_block_);
int index = static_cast<int>(instructions_.size());
+ instr->set_block(current_block_);
instructions_.push_back(instr);
if (instr->NeedsReferenceMap()) {
DCHECK(instr->reference_map() == nullptr);
@@ -746,18 +786,7 @@ int InstructionSequence::AddInstruction(Instruction* instr) {
InstructionBlock* InstructionSequence::GetInstructionBlock(
int instruction_index) const {
- DCHECK(instruction_blocks_->size() == block_starts_.size());
- auto begin = block_starts_.begin();
- auto end = std::lower_bound(begin, block_starts_.end(), instruction_index);
- // Post condition of std::lower_bound:
- DCHECK(end == block_starts_.end() || *end >= instruction_index);
- if (end == block_starts_.end() || *end > instruction_index) --end;
- DCHECK(*end <= instruction_index);
- size_t index = std::distance(begin, end);
- InstructionBlock* block = instruction_blocks_->at(index);
- DCHECK(block->code_start() <= instruction_index &&
- instruction_index < block->code_end());
- return block;
+ return instructions()[instruction_index]->block();
}
@@ -772,6 +801,8 @@ static MachineRepresentation FilterRepresentation(MachineRepresentation rep) {
case MachineRepresentation::kFloat32:
case MachineRepresentation::kFloat64:
case MachineRepresentation::kSimd128:
+ case MachineRepresentation::kTaggedSigned:
+ case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
return rep;
case MachineRepresentation::kNone:
@@ -806,22 +837,16 @@ void InstructionSequence::MarkAsRepresentation(MachineRepresentation rep,
representations_[virtual_register] = rep;
}
-
-InstructionSequence::StateId InstructionSequence::AddFrameStateDescriptor(
- FrameStateDescriptor* descriptor) {
+int InstructionSequence::AddDeoptimizationEntry(
+ FrameStateDescriptor* descriptor, DeoptimizeReason reason) {
int deoptimization_id = static_cast<int>(deoptimization_entries_.size());
- deoptimization_entries_.push_back(descriptor);
- return StateId::FromInt(deoptimization_id);
+ deoptimization_entries_.push_back(DeoptimizationEntry(descriptor, reason));
+ return deoptimization_id;
}
-FrameStateDescriptor* InstructionSequence::GetFrameStateDescriptor(
- InstructionSequence::StateId state_id) {
- return deoptimization_entries_[state_id.ToInt()];
-}
-
-
-int InstructionSequence::GetFrameStateDescriptorCount() {
- return static_cast<int>(deoptimization_entries_.size());
+DeoptimizationEntry const& InstructionSequence::GetDeoptimizationEntry(
+ int state_id) {
+ return deoptimization_entries_[state_id];
}
@@ -858,12 +883,7 @@ void InstructionSequence::Print(const RegisterConfiguration* config) const {
os << wrapper << std::endl;
}
-
-void InstructionSequence::Print() const {
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
- Print(config);
-}
+void InstructionSequence::Print() const { Print(GetRegConfig()); }
void InstructionSequence::PrintBlock(const RegisterConfiguration* config,
int block_id) const {
@@ -917,9 +937,7 @@ void InstructionSequence::PrintBlock(const RegisterConfiguration* config,
}
void InstructionSequence::PrintBlock(int block_id) const {
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
- PrintBlock(config, block_id);
+ PrintBlock(GetRegConfig(), block_id);
}
FrameStateDescriptor::FrameStateDescriptor(
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index a1fe494761..b5aea707d2 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -66,9 +66,13 @@ class InstructionOperand {
inline bool IsAnyRegister() const;
inline bool IsRegister() const;
+ inline bool IsFPRegister() const;
+ inline bool IsFloatRegister() const;
inline bool IsDoubleRegister() const;
inline bool IsSimd128Register() const;
inline bool IsStackSlot() const;
+ inline bool IsFPStackSlot() const;
+ inline bool IsFloatStackSlot() const;
inline bool IsDoubleStackSlot() const;
inline bool IsSimd128StackSlot() const;
@@ -99,6 +103,8 @@ class InstructionOperand {
return this->GetCanonicalizedValue() < that.GetCanonicalizedValue();
}
+ bool InterferesWith(const InstructionOperand& that) const;
+
void Print(const RegisterConfiguration* config) const;
void Print() const;
@@ -151,7 +157,7 @@ class UnallocatedOperand : public InstructionOperand {
NONE,
ANY,
FIXED_REGISTER,
- FIXED_DOUBLE_REGISTER,
+ FIXED_FP_REGISTER,
MUST_HAVE_REGISTER,
MUST_HAVE_SLOT,
SAME_AS_FIRST_INPUT
@@ -188,7 +194,7 @@ class UnallocatedOperand : public InstructionOperand {
UnallocatedOperand(ExtendedPolicy policy, int index, int virtual_register)
: UnallocatedOperand(virtual_register) {
- DCHECK(policy == FIXED_REGISTER || policy == FIXED_DOUBLE_REGISTER);
+ DCHECK(policy == FIXED_REGISTER || policy == FIXED_FP_REGISTER);
value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
value_ |= ExtendedPolicyField::encode(policy);
value_ |= LifetimeField::encode(USED_AT_END);
@@ -216,7 +222,7 @@ class UnallocatedOperand : public InstructionOperand {
bool HasFixedPolicy() const {
return basic_policy() == FIXED_SLOT ||
extended_policy() == FIXED_REGISTER ||
- extended_policy() == FIXED_DOUBLE_REGISTER;
+ extended_policy() == FIXED_FP_REGISTER;
}
bool HasRegisterPolicy() const {
return basic_policy() == EXTENDED_POLICY &&
@@ -235,9 +241,9 @@ class UnallocatedOperand : public InstructionOperand {
return basic_policy() == EXTENDED_POLICY &&
extended_policy() == FIXED_REGISTER;
}
- bool HasFixedDoubleRegisterPolicy() const {
+ bool HasFixedFPRegisterPolicy() const {
return basic_policy() == EXTENDED_POLICY &&
- extended_policy() == FIXED_DOUBLE_REGISTER;
+ extended_policy() == FIXED_FP_REGISTER;
}
bool HasSecondaryStorage() const {
return basic_policy() == EXTENDED_POLICY &&
@@ -268,9 +274,9 @@ class UnallocatedOperand : public InstructionOperand {
FixedSlotIndexField::kShift);
}
- // [fixed_register_index]: Only for FIXED_REGISTER or FIXED_DOUBLE_REGISTER.
+ // [fixed_register_index]: Only for FIXED_REGISTER or FIXED_FP_REGISTER.
int fixed_register_index() const {
- DCHECK(HasFixedRegisterPolicy() || HasFixedDoubleRegisterPolicy());
+ DCHECK(HasFixedRegisterPolicy() || HasFixedFPRegisterPolicy());
return FixedRegisterField::decode(value_);
}
@@ -413,26 +419,36 @@ class LocationOperand : public InstructionOperand {
}
int index() const {
- DCHECK(IsStackSlot() || IsDoubleStackSlot() || IsSimd128StackSlot());
+ DCHECK(IsStackSlot() || IsFPStackSlot());
+ return static_cast<int64_t>(value_) >> IndexField::kShift;
+ }
+
+ int register_code() const {
+ DCHECK(IsRegister() || IsFPRegister());
return static_cast<int64_t>(value_) >> IndexField::kShift;
}
Register GetRegister() const {
DCHECK(IsRegister());
- return Register::from_code(static_cast<int64_t>(value_) >>
- IndexField::kShift);
+ return Register::from_code(register_code());
+ }
+
+ FloatRegister GetFloatRegister() const {
+ DCHECK(IsFloatRegister());
+ return FloatRegister::from_code(register_code());
}
DoubleRegister GetDoubleRegister() const {
- DCHECK(IsDoubleRegister());
- return DoubleRegister::from_code(static_cast<int64_t>(value_) >>
- IndexField::kShift);
+ // On platforms where FloatRegister, DoubleRegister, and Simd128Register
+ // are all the same type, it's convenient to treat everything as a
+ // DoubleRegister, so be lax about type checking here.
+ DCHECK(IsFPRegister());
+ return DoubleRegister::from_code(register_code());
}
Simd128Register GetSimd128Register() const {
DCHECK(IsSimd128Register());
- return Simd128Register::from_code(static_cast<int64_t>(value_) >>
- IndexField::kShift);
+ return Simd128Register::from_code(register_code());
}
LocationKind location_kind() const {
@@ -450,6 +466,8 @@ class LocationOperand : public InstructionOperand {
case MachineRepresentation::kFloat32:
case MachineRepresentation::kFloat64:
case MachineRepresentation::kSimd128:
+ case MachineRepresentation::kTaggedSigned:
+ case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
return true;
case MachineRepresentation::kBit:
@@ -526,11 +544,23 @@ bool InstructionOperand::IsRegister() const {
!IsFloatingPoint(LocationOperand::cast(this)->representation());
}
-bool InstructionOperand::IsDoubleRegister() const {
+bool InstructionOperand::IsFPRegister() const {
return IsAnyRegister() &&
IsFloatingPoint(LocationOperand::cast(this)->representation());
}
+bool InstructionOperand::IsFloatRegister() const {
+ return IsAnyRegister() &&
+ LocationOperand::cast(this)->representation() ==
+ MachineRepresentation::kFloat32;
+}
+
+bool InstructionOperand::IsDoubleRegister() const {
+ return IsAnyRegister() &&
+ LocationOperand::cast(this)->representation() ==
+ MachineRepresentation::kFloat64;
+}
+
bool InstructionOperand::IsSimd128Register() const {
return IsAnyRegister() &&
LocationOperand::cast(this)->representation() ==
@@ -544,13 +574,29 @@ bool InstructionOperand::IsStackSlot() const {
!IsFloatingPoint(LocationOperand::cast(this)->representation());
}
-bool InstructionOperand::IsDoubleStackSlot() const {
+bool InstructionOperand::IsFPStackSlot() const {
return (IsAllocated() || IsExplicit()) &&
LocationOperand::cast(this)->location_kind() ==
LocationOperand::STACK_SLOT &&
IsFloatingPoint(LocationOperand::cast(this)->representation());
}
+bool InstructionOperand::IsFloatStackSlot() const {
+ return (IsAllocated() || IsExplicit()) &&
+ LocationOperand::cast(this)->location_kind() ==
+ LocationOperand::STACK_SLOT &&
+ LocationOperand::cast(this)->representation() ==
+ MachineRepresentation::kFloat32;
+}
+
+bool InstructionOperand::IsDoubleStackSlot() const {
+ return (IsAllocated() || IsExplicit()) &&
+ LocationOperand::cast(this)->location_kind() ==
+ LocationOperand::STACK_SLOT &&
+ LocationOperand::cast(this)->representation() ==
+ MachineRepresentation::kFloat64;
+}
+
bool InstructionOperand::IsSimd128StackSlot() const {
return (IsAllocated() || IsExplicit()) &&
LocationOperand::cast(this)->location_kind() ==
@@ -561,20 +607,18 @@ bool InstructionOperand::IsSimd128StackSlot() const {
uint64_t InstructionOperand::GetCanonicalizedValue() const {
if (IsAllocated() || IsExplicit()) {
- // TODO(dcarney): put machine type last and mask.
- MachineRepresentation canonicalized_representation =
- IsFloatingPoint(LocationOperand::cast(this)->representation())
- ? MachineRepresentation::kFloat64
- : MachineRepresentation::kNone;
+ MachineRepresentation canonical = MachineRepresentation::kNone;
+ if (IsFPRegister()) {
+ // We treat all FP register operands the same for simple aliasing.
+ canonical = MachineRepresentation::kFloat64;
+ }
return InstructionOperand::KindField::update(
- LocationOperand::RepresentationField::update(
- this->value_, canonicalized_representation),
+ LocationOperand::RepresentationField::update(this->value_, canonical),
LocationOperand::EXPLICIT);
}
return this->value_;
}
-
// Required for maps that don't care about machine type.
struct CompareOperandModuloType {
bool operator()(const InstructionOperand& a,
@@ -609,9 +653,9 @@ class MoveOperands final : public ZoneObject {
}
void SetPending() { destination_ = InstructionOperand(); }
- // True if this move a move into the given destination operand.
- bool Blocks(const InstructionOperand& operand) const {
- return !IsEliminated() && source().EqualsCanonicalized(operand);
+ // True if this move is a move into the given destination operand.
+ bool Blocks(const InstructionOperand& destination) const {
+ return !IsEliminated() && source().InterferesWith(destination);
}
// A move is redundant if it's been eliminated or if its source and
@@ -715,6 +759,8 @@ class ReferenceMap final : public ZoneObject {
std::ostream& operator<<(std::ostream& os, const ReferenceMap& pm);
+class InstructionBlock;
+
class Instruction final {
public:
size_t OutputCount() const { return OutputCountField::decode(bit_field_); }
@@ -826,7 +872,8 @@ class Instruction final {
return arch_opcode() == ArchOpcode::kArchTailCallCodeObject ||
arch_opcode() == ArchOpcode::kArchTailCallCodeObjectFromJSFunction ||
arch_opcode() == ArchOpcode::kArchTailCallJSFunction ||
- arch_opcode() == ArchOpcode::kArchTailCallJSFunctionFromJSFunction;
+ arch_opcode() == ArchOpcode::kArchTailCallJSFunctionFromJSFunction ||
+ arch_opcode() == ArchOpcode::kArchTailCallAddress;
}
bool IsThrow() const {
return arch_opcode() == ArchOpcode::kArchThrowTerminator;
@@ -859,6 +906,15 @@ class Instruction final {
ParallelMove* const* parallel_moves() const { return &parallel_moves_[0]; }
ParallelMove** parallel_moves() { return &parallel_moves_[0]; }
+ // The block_id may be invalidated in JumpThreading. It is only important for
+ // register allocation, to avoid searching for blocks from instruction
+ // indexes.
+ InstructionBlock* block() const { return block_; }
+ void set_block(InstructionBlock* block) {
+ DCHECK_NOT_NULL(block);
+ block_ = block;
+ }
+
void Print(const RegisterConfiguration* config) const;
void Print() const;
@@ -879,6 +935,7 @@ class Instruction final {
uint32_t bit_field_;
ParallelMove* parallel_moves_[2];
ReferenceMap* reference_map_;
+ InstructionBlock* block_;
InstructionOperand operands_[1];
DISALLOW_COPY_AND_ASSIGN(Instruction);
@@ -950,9 +1007,12 @@ class Constant final {
explicit Constant(Handle<HeapObject> obj)
: type_(kHeapObject), value_(bit_cast<intptr_t>(obj)) {}
explicit Constant(RpoNumber rpo) : type_(kRpoNumber), value_(rpo.ToInt()) {}
+ explicit Constant(RelocatablePtrConstantInfo info);
Type type() const { return type_; }
+ RelocInfo::Mode rmode() const { return rmode_; }
+
int32_t ToInt32() const {
DCHECK(type() == kInt32 || type() == kInt64);
const int32_t value = static_cast<int32_t>(value_);
@@ -987,14 +1047,16 @@ class Constant final {
return RpoNumber::FromInt(static_cast<int>(value_));
}
- Handle<HeapObject> ToHeapObject() const {
- DCHECK_EQ(kHeapObject, type());
- return bit_cast<Handle<HeapObject> >(static_cast<intptr_t>(value_));
- }
+ Handle<HeapObject> ToHeapObject() const;
private:
Type type_;
int64_t value_;
+#if V8_TARGET_ARCH_32_BIT
+ RelocInfo::Mode rmode_ = RelocInfo::NONE32;
+#else
+ RelocInfo::Mode rmode_ = RelocInfo::NONE64;
+#endif
};
@@ -1083,6 +1145,8 @@ class FrameStateDescriptor : public ZoneObject {
}
StateValueDescriptor* GetStateValueDescriptor() { return &values_; }
+ static const int kImpossibleValue = 0xdead;
+
private:
FrameStateType type_;
BailoutId bailout_id_;
@@ -1095,9 +1159,23 @@ class FrameStateDescriptor : public ZoneObject {
FrameStateDescriptor* outer_state_;
};
+// A deoptimization entry is a pair of the reason why we deoptimize and the
+// frame state descriptor that we have to go back to.
+class DeoptimizationEntry final {
+ public:
+ DeoptimizationEntry() {}
+ DeoptimizationEntry(FrameStateDescriptor* descriptor, DeoptimizeReason reason)
+ : descriptor_(descriptor), reason_(reason) {}
+
+ FrameStateDescriptor* descriptor() const { return descriptor_; }
+ DeoptimizeReason reason() const { return reason_; }
-typedef ZoneVector<FrameStateDescriptor*> DeoptimizationVector;
+ private:
+ FrameStateDescriptor* descriptor_ = nullptr;
+ DeoptimizeReason reason_ = DeoptimizeReason::kNoReason;
+};
+typedef ZoneVector<DeoptimizationEntry> DeoptimizationVector;
class PhiInstruction final : public ZoneObject {
public:
@@ -1268,9 +1346,17 @@ class InstructionSequence final : public ZoneObject {
return GetRepresentation(virtual_register) ==
MachineRepresentation::kTagged;
}
- bool IsFloat(int virtual_register) const {
+ bool IsFP(int virtual_register) const {
return IsFloatingPoint(GetRepresentation(virtual_register));
}
+ bool IsFloat(int virtual_register) const {
+ return GetRepresentation(virtual_register) ==
+ MachineRepresentation::kFloat32;
+ }
+ bool IsDouble(int virtual_register) const {
+ return GetRepresentation(virtual_register) ==
+ MachineRepresentation::kFloat64;
+ }
Instruction* GetBlockStart(RpoNumber rpo) const;
@@ -1316,7 +1402,8 @@ class InstructionSequence final : public ZoneObject {
Immediates& immediates() { return immediates_; }
ImmediateOperand AddImmediate(const Constant& constant) {
- if (constant.type() == Constant::kInt32) {
+ if (constant.type() == Constant::kInt32 &&
+ RelocInfo::IsNone(constant.rmode())) {
return ImmediateOperand(ImmediateOperand::INLINE, constant.ToInt32());
}
int index = static_cast<int>(immediates_.size());
@@ -1339,21 +1426,11 @@ class InstructionSequence final : public ZoneObject {
return Constant(static_cast<int32_t>(0));
}
- class StateId {
- public:
- static StateId FromInt(int id) { return StateId(id); }
- int ToInt() const { return id_; }
-
- private:
- explicit StateId(int id) : id_(id) {}
- int id_;
- };
-
- StateId AddFrameStateDescriptor(FrameStateDescriptor* descriptor);
- FrameStateDescriptor* GetFrameStateDescriptor(StateId deoptimization_id);
- int GetFrameStateDescriptorCount();
- DeoptimizationVector const& frame_state_descriptors() const {
- return deoptimization_entries_;
+ int AddDeoptimizationEntry(FrameStateDescriptor* descriptor,
+ DeoptimizeReason reason);
+ DeoptimizationEntry const& GetDeoptimizationEntry(int deoptimization_id);
+ int GetDeoptimizationEntryCount() const {
+ return static_cast<int>(deoptimization_entries_.size());
}
RpoNumber InputRpo(Instruction* instr, size_t index);
@@ -1374,9 +1451,10 @@ class InstructionSequence final : public ZoneObject {
void PrintBlock(const RegisterConfiguration* config, int block_id) const;
void PrintBlock(int block_id) const;
- void ValidateEdgeSplitForm();
- void ValidateDeferredBlockExitPaths();
- void ValidateSSA();
+ void ValidateEdgeSplitForm() const;
+ void ValidateDeferredBlockExitPaths() const;
+ void ValidateDeferredBlockEntryPaths() const;
+ void ValidateSSA() const;
private:
friend std::ostream& operator<<(std::ostream& os,
@@ -1388,7 +1466,6 @@ class InstructionSequence final : public ZoneObject {
Zone* const zone_;
InstructionBlocks* const instruction_blocks_;
SourcePositionMap source_positions_;
- IntVector block_starts_;
ConstantMap constants_;
Immediates immediates_;
InstructionDeque instructions_;
@@ -1397,6 +1474,9 @@ class InstructionSequence final : public ZoneObject {
ZoneVector<MachineRepresentation> representations_;
DeoptimizationVector deoptimization_entries_;
+ // Used at construction time
+ InstructionBlock* current_block_;
+
DISALLOW_COPY_AND_ASSIGN(InstructionSequence);
};
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index 8824a03dc9..737947aad0 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -32,6 +32,8 @@ Int64Lowering::Int64Lowering(Graph* graph, MachineOperatorBuilder* machine,
signature_(signature),
placeholder_(graph->NewNode(common->Parameter(-2, "placeholder"),
graph->start())) {
+ DCHECK_NOT_NULL(graph);
+ DCHECK_NOT_NULL(graph->end());
replacements_ = zone->NewArray<Replacement>(graph->NodeCount());
memset(replacements_, 0, sizeof(Replacement) * graph->NodeCount());
}
@@ -79,8 +81,10 @@ static int GetParameterIndexAfterLowering(
return result;
}
-static int GetParameterCountAfterLowering(
+int Int64Lowering::GetParameterCountAfterLowering(
Signature<MachineRepresentation>* signature) {
+ // GetParameterIndexAfterLowering(parameter_count) returns the parameter count
+ // after lowering.
return GetParameterIndexAfterLowering(
signature, static_cast<int>(signature->parameter_count()));
}
@@ -96,6 +100,27 @@ static int GetReturnCountAfterLowering(
return result;
}
+void Int64Lowering::GetIndexNodes(Node* index, Node*& index_low,
+ Node*& index_high) {
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ index_low = index;
+ index_high = graph()->NewNode(machine()->Int32Add(), index,
+ graph()->NewNode(common()->Int32Constant(4)));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ index_low = graph()->NewNode(machine()->Int32Add(), index,
+ graph()->NewNode(common()->Int32Constant(4)));
+ index_high = index;
+#endif
+}
+
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+const int Int64Lowering::kLowerWordOffset = 0;
+const int Int64Lowering::kHigherWordOffset = 4;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+const int Int64Lowering::kLowerWordOffset = 4;
+const int Int64Lowering::kHigherWordOffset = 0;
+#endif
+
void Int64Lowering::LowerNode(Node* node) {
switch (node->opcode()) {
case IrOpcode::kInt64Constant: {
@@ -107,17 +132,31 @@ void Int64Lowering::LowerNode(Node* node) {
ReplaceNode(node, low_node, high_node);
break;
}
- case IrOpcode::kLoad: {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ case IrOpcode::kLoad:
+ case IrOpcode::kUnalignedLoad: {
+ MachineRepresentation rep;
+ if (node->opcode() == IrOpcode::kLoad) {
+ rep = LoadRepresentationOf(node->op()).representation();
+ } else {
+ DCHECK(node->opcode() == IrOpcode::kUnalignedLoad);
+ rep = UnalignedLoadRepresentationOf(node->op()).representation();
+ }
- if (load_rep.representation() == MachineRepresentation::kWord64) {
+ if (rep == MachineRepresentation::kWord64) {
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- Node* index_high =
- graph()->NewNode(machine()->Int32Add(), index,
- graph()->NewNode(common()->Int32Constant(4)));
+ Node* index_low;
+ Node* index_high;
+ GetIndexNodes(index, index_low, index_high);
+ const Operator* load_op;
+
+ if (node->opcode() == IrOpcode::kLoad) {
+ load_op = machine()->Load(MachineType::Int32());
+ } else {
+ DCHECK(node->opcode() == IrOpcode::kUnalignedLoad);
+ load_op = machine()->UnalignedLoad(MachineType::Int32());
+ }
- const Operator* load_op = machine()->Load(MachineType::Int32());
Node* high_node;
if (node->InputCount() > 2) {
Node* effect_high = node->InputAt(2);
@@ -130,6 +169,7 @@ void Int64Lowering::LowerNode(Node* node) {
} else {
high_node = graph()->NewNode(load_op, base, index_high);
}
+ node->ReplaceInput(1, index_low);
NodeProperties::ChangeOp(node, load_op);
ReplaceNode(node, node, high_node);
} else {
@@ -137,27 +177,40 @@ void Int64Lowering::LowerNode(Node* node) {
}
break;
}
- case IrOpcode::kStore: {
- StoreRepresentation store_rep = StoreRepresentationOf(node->op());
- if (store_rep.representation() == MachineRepresentation::kWord64) {
+ case IrOpcode::kStore:
+ case IrOpcode::kUnalignedStore: {
+ MachineRepresentation rep;
+ if (node->opcode() == IrOpcode::kStore) {
+ rep = StoreRepresentationOf(node->op()).representation();
+ } else {
+ DCHECK(node->opcode() == IrOpcode::kUnalignedStore);
+ rep = UnalignedStoreRepresentationOf(node->op());
+ }
+
+ if (rep == MachineRepresentation::kWord64) {
// We change the original store node to store the low word, and create
// a new store node to store the high word. The effect and control edges
// are copied from the original store to the new store node, the effect
// edge of the original store is redirected to the new store.
- WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
-
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- Node* index_high =
- graph()->NewNode(machine()->Int32Add(), index,
- graph()->NewNode(common()->Int32Constant(4)));
-
+ Node* index_low;
+ Node* index_high;
+ GetIndexNodes(index, index_low, index_high);
Node* value = node->InputAt(2);
DCHECK(HasReplacementLow(value));
DCHECK(HasReplacementHigh(value));
- const Operator* store_op = machine()->Store(StoreRepresentation(
- MachineRepresentation::kWord32, write_barrier_kind));
+ const Operator* store_op;
+ if (node->opcode() == IrOpcode::kStore) {
+ WriteBarrierKind write_barrier_kind =
+ StoreRepresentationOf(node->op()).write_barrier_kind();
+ store_op = machine()->Store(StoreRepresentation(
+ MachineRepresentation::kWord32, write_barrier_kind));
+ } else {
+ DCHECK(node->opcode() == IrOpcode::kUnalignedStore);
+ store_op = machine()->UnalignedStore(MachineRepresentation::kWord32);
+ }
Node* high_node;
if (node->InputCount() > 3) {
@@ -173,11 +226,14 @@ void Int64Lowering::LowerNode(Node* node) {
GetReplacementHigh(value));
}
+ node->ReplaceInput(1, index_low);
node->ReplaceInput(2, GetReplacementLow(value));
NodeProperties::ChangeOp(node, store_op);
ReplaceNode(node, node, high_node);
} else {
- DefaultLowering(node);
+ if (HasReplacementLow(node->InputAt(2))) {
+ node->ReplaceInput(2, GetReplacementLow(node->InputAt(2)));
+ }
}
break;
}
@@ -223,7 +279,9 @@ void Int64Lowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kCall: {
- CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
+ // TODO(turbofan): Make WASM code const-correct wrt. CallDescriptor.
+ CallDescriptor* descriptor =
+ const_cast<CallDescriptor*>(CallDescriptorOf(node->op()));
if (DefaultLowering(node) ||
(descriptor->ReturnCount() == 1 &&
descriptor->GetReturnType(0) == MachineType::Int64())) {
@@ -235,8 +293,10 @@ void Int64Lowering::LowerNode(Node* node) {
if (descriptor->ReturnCount() == 1 &&
descriptor->GetReturnType(0) == MachineType::Int64()) {
// We access the additional return values through projections.
- Node* low_node = graph()->NewNode(common()->Projection(0), node);
- Node* high_node = graph()->NewNode(common()->Projection(1), node);
+ Node* low_node =
+ graph()->NewNode(common()->Projection(0), node, graph()->start());
+ Node* high_node =
+ graph()->NewNode(common()->Projection(1), node, graph()->start());
ReplaceNode(node, low_node, high_node);
}
break;
@@ -262,9 +322,6 @@ void Int64Lowering::LowerNode(Node* node) {
node->NullAllInputs();
break;
}
- // todo(ahaas): I added a list of missing instructions here to make merging
- // easier when I do them one by one.
- // kExprI64Add:
case IrOpcode::kInt64Add: {
DCHECK(node->InputCount() == 2);
@@ -278,13 +335,13 @@ void Int64Lowering::LowerNode(Node* node) {
NodeProperties::ChangeOp(node, machine()->Int32PairAdd());
// We access the additional return values through projections.
- Node* low_node = graph()->NewNode(common()->Projection(0), node);
- Node* high_node = graph()->NewNode(common()->Projection(1), node);
+ Node* low_node =
+ graph()->NewNode(common()->Projection(0), node, graph()->start());
+ Node* high_node =
+ graph()->NewNode(common()->Projection(1), node, graph()->start());
ReplaceNode(node, low_node, high_node);
break;
}
-
- // kExprI64Sub:
case IrOpcode::kInt64Sub: {
DCHECK(node->InputCount() == 2);
@@ -298,12 +355,13 @@ void Int64Lowering::LowerNode(Node* node) {
NodeProperties::ChangeOp(node, machine()->Int32PairSub());
// We access the additional return values through projections.
- Node* low_node = graph()->NewNode(common()->Projection(0), node);
- Node* high_node = graph()->NewNode(common()->Projection(1), node);
+ Node* low_node =
+ graph()->NewNode(common()->Projection(0), node, graph()->start());
+ Node* high_node =
+ graph()->NewNode(common()->Projection(1), node, graph()->start());
ReplaceNode(node, low_node, high_node);
break;
}
- // kExprI64Mul:
case IrOpcode::kInt64Mul: {
DCHECK(node->InputCount() == 2);
@@ -317,16 +375,13 @@ void Int64Lowering::LowerNode(Node* node) {
NodeProperties::ChangeOp(node, machine()->Int32PairMul());
// We access the additional return values through projections.
- Node* low_node = graph()->NewNode(common()->Projection(0), node);
- Node* high_node = graph()->NewNode(common()->Projection(1), node);
+ Node* low_node =
+ graph()->NewNode(common()->Projection(0), node, graph()->start());
+ Node* high_node =
+ graph()->NewNode(common()->Projection(1), node, graph()->start());
ReplaceNode(node, low_node, high_node);
break;
}
- // kExprI64DivS:
- // kExprI64DivU:
- // kExprI64RemS:
- // kExprI64RemU:
- // kExprI64Ior:
case IrOpcode::kWord64Or: {
DCHECK(node->InputCount() == 2);
Node* left = node->InputAt(0);
@@ -341,8 +396,6 @@ void Int64Lowering::LowerNode(Node* node) {
ReplaceNode(node, low_node, high_node);
break;
}
-
- // kExprI64Xor:
case IrOpcode::kWord64Xor: {
DCHECK(node->InputCount() == 2);
Node* left = node->InputAt(0);
@@ -357,7 +410,6 @@ void Int64Lowering::LowerNode(Node* node) {
ReplaceNode(node, low_node, high_node);
break;
}
- // kExprI64Shl:
case IrOpcode::kWord64Shl: {
// TODO(turbofan): if the shift count >= 32, then we can set the low word
// of the output to 0 and just calculate the high word.
@@ -375,12 +427,13 @@ void Int64Lowering::LowerNode(Node* node) {
NodeProperties::ChangeOp(node, machine()->Word32PairShl());
// We access the additional return values through projections.
- Node* low_node = graph()->NewNode(common()->Projection(0), node);
- Node* high_node = graph()->NewNode(common()->Projection(1), node);
+ Node* low_node =
+ graph()->NewNode(common()->Projection(0), node, graph()->start());
+ Node* high_node =
+ graph()->NewNode(common()->Projection(1), node, graph()->start());
ReplaceNode(node, low_node, high_node);
break;
}
- // kExprI64ShrU:
case IrOpcode::kWord64Shr: {
// TODO(turbofan): if the shift count >= 32, then we can set the low word
// of the output to 0 and just calculate the high word.
@@ -398,12 +451,13 @@ void Int64Lowering::LowerNode(Node* node) {
NodeProperties::ChangeOp(node, machine()->Word32PairShr());
// We access the additional return values through projections.
- Node* low_node = graph()->NewNode(common()->Projection(0), node);
- Node* high_node = graph()->NewNode(common()->Projection(1), node);
+ Node* low_node =
+ graph()->NewNode(common()->Projection(0), node, graph()->start());
+ Node* high_node =
+ graph()->NewNode(common()->Projection(1), node, graph()->start());
ReplaceNode(node, low_node, high_node);
break;
}
- // kExprI64ShrS:
case IrOpcode::kWord64Sar: {
// TODO(turbofan): if the shift count >= 32, then we can set the low word
// of the output to 0 and just calculate the high word.
@@ -421,12 +475,13 @@ void Int64Lowering::LowerNode(Node* node) {
NodeProperties::ChangeOp(node, machine()->Word32PairSar());
// We access the additional return values through projections.
- Node* low_node = graph()->NewNode(common()->Projection(0), node);
- Node* high_node = graph()->NewNode(common()->Projection(1), node);
+ Node* low_node =
+ graph()->NewNode(common()->Projection(0), node, graph()->start());
+ Node* high_node =
+ graph()->NewNode(common()->Projection(1), node, graph()->start());
ReplaceNode(node, low_node, high_node);
break;
}
- // kExprI64Eq:
case IrOpcode::kWord64Equal: {
DCHECK(node->InputCount() == 2);
Node* left = node->InputAt(0);
@@ -446,7 +501,6 @@ void Int64Lowering::LowerNode(Node* node) {
ReplaceNode(node, replacement, nullptr);
break;
}
- // kExprI64LtS:
case IrOpcode::kInt64LessThan: {
LowerComparison(node, machine()->Int32LessThan(),
machine()->Uint32LessThan());
@@ -467,8 +521,6 @@ void Int64Lowering::LowerNode(Node* node) {
machine()->Uint32LessThanOrEqual());
break;
}
-
- // kExprI64SConvertI32:
case IrOpcode::kChangeInt32ToInt64: {
DCHECK(node->InputCount() == 1);
Node* input = node->InputAt(0);
@@ -483,7 +535,6 @@ void Int64Lowering::LowerNode(Node* node) {
node->NullAllInputs();
break;
}
- // kExprI64UConvertI32: {
case IrOpcode::kChangeUint32ToUint64: {
DCHECK(node->InputCount() == 1);
Node* input = node->InputAt(0);
@@ -494,7 +545,6 @@ void Int64Lowering::LowerNode(Node* node) {
node->NullAllInputs();
break;
}
- // kExprF64ReinterpretI64:
case IrOpcode::kBitcastInt64ToFloat64: {
DCHECK(node->InputCount() == 1);
Node* input = node->InputAt(0);
@@ -505,14 +555,16 @@ void Int64Lowering::LowerNode(Node* node) {
machine()->Store(
StoreRepresentation(MachineRepresentation::kWord32,
WriteBarrierKind::kNoWriteBarrier)),
- stack_slot, graph()->NewNode(common()->Int32Constant(4)),
+ stack_slot,
+ graph()->NewNode(common()->Int32Constant(kHigherWordOffset)),
GetReplacementHigh(input), graph()->start(), graph()->start());
Node* store_low_word = graph()->NewNode(
machine()->Store(
StoreRepresentation(MachineRepresentation::kWord32,
WriteBarrierKind::kNoWriteBarrier)),
- stack_slot, graph()->NewNode(common()->Int32Constant(0)),
+ stack_slot,
+ graph()->NewNode(common()->Int32Constant(kLowerWordOffset)),
GetReplacementLow(input), store_high_word, graph()->start());
Node* load =
@@ -523,7 +575,6 @@ void Int64Lowering::LowerNode(Node* node) {
ReplaceNode(node, load, nullptr);
break;
}
- // kExprI64ReinterpretF64:
case IrOpcode::kBitcastFloat64ToInt64: {
DCHECK(node->InputCount() == 1);
Node* input = node->InputAt(0);
@@ -539,15 +590,15 @@ void Int64Lowering::LowerNode(Node* node) {
stack_slot, graph()->NewNode(common()->Int32Constant(0)), input,
graph()->start(), graph()->start());
- Node* high_node =
- graph()->NewNode(machine()->Load(MachineType::Int32()), stack_slot,
- graph()->NewNode(common()->Int32Constant(4)), store,
- graph()->start());
+ Node* high_node = graph()->NewNode(
+ machine()->Load(MachineType::Int32()), stack_slot,
+ graph()->NewNode(common()->Int32Constant(kHigherWordOffset)), store,
+ graph()->start());
- Node* low_node =
- graph()->NewNode(machine()->Load(MachineType::Int32()), stack_slot,
- graph()->NewNode(common()->Int32Constant(0)), store,
- graph()->start());
+ Node* low_node = graph()->NewNode(
+ machine()->Load(MachineType::Int32()), stack_slot,
+ graph()->NewNode(common()->Int32Constant(kLowerWordOffset)), store,
+ graph()->start());
ReplaceNode(node, low_node, high_node);
break;
}
@@ -659,7 +710,6 @@ void Int64Lowering::LowerNode(Node* node) {
}
break;
}
- // kExprI64Clz:
case IrOpcode::kWord64Clz: {
DCHECK(node->InputCount() == 1);
Node* input = node->InputAt(0);
@@ -678,7 +728,6 @@ void Int64Lowering::LowerNode(Node* node) {
ReplaceNode(node, low_node, graph()->NewNode(common()->Int32Constant(0)));
break;
}
- // kExprI64Ctz:
case IrOpcode::kWord64Ctz: {
DCHECK(node->InputCount() == 1);
DCHECK(machine()->Word32Ctz().IsSupported());
@@ -698,7 +747,6 @@ void Int64Lowering::LowerNode(Node* node) {
ReplaceNode(node, low_node, graph()->NewNode(common()->Int32Constant(0)));
break;
}
- // kExprI64Popcnt:
case IrOpcode::kWord64Popcnt: {
DCHECK(node->InputCount() == 1);
Node* input = node->InputAt(0);
@@ -730,6 +778,14 @@ void Int64Lowering::LowerNode(Node* node) {
}
break;
}
+ case IrOpcode::kWord64ReverseBytes: {
+ Node* input = node->InputAt(0);
+ ReplaceNode(node, graph()->NewNode(machine()->Word32ReverseBytes().op(),
+ GetReplacementHigh(input)),
+ graph()->NewNode(machine()->Word32ReverseBytes().op(),
+ GetReplacementLow(input)));
+ break;
+ }
default: { DefaultLowering(node); }
}
diff --git a/deps/v8/src/compiler/int64-lowering.h b/deps/v8/src/compiler/int64-lowering.h
index 7f6ef9a297..4ec4e821eb 100644
--- a/deps/v8/src/compiler/int64-lowering.h
+++ b/deps/v8/src/compiler/int64-lowering.h
@@ -23,6 +23,12 @@ class Int64Lowering {
void LowerGraph();
+ static int GetParameterCountAfterLowering(
+ Signature<MachineRepresentation>* signature);
+
+ static const int kLowerWordOffset;
+ static const int kHigherWordOffset;
+
private:
enum class State : uint8_t { kUnvisited, kOnStack, kVisited };
@@ -51,6 +57,7 @@ class Int64Lowering {
bool HasReplacementHigh(Node* node);
Node* GetReplacementHigh(Node* node);
void PreparePhiReplacement(Node* phi);
+ void GetIndexNodes(Node* index, Node*& index_low, Node*& index_high);
struct NodeState {
Node* node;
diff --git a/deps/v8/src/compiler/js-builtin-reducer.cc b/deps/v8/src/compiler/js-builtin-reducer.cc
index 41f9c30707..bbd5a92a7f 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.cc
+++ b/deps/v8/src/compiler/js-builtin-reducer.cc
@@ -3,6 +3,9 @@
// found in the LICENSE file.
#include "src/compiler/js-builtin-reducer.h"
+
+#include "src/compilation-dependencies.h"
+#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -40,6 +43,10 @@ class JSCallReduction {
return function->shared()->builtin_function_id();
}
+ bool ReceiverMatches(Type* type) {
+ return NodeProperties::GetType(receiver())->Is(type);
+ }
+
// Determines whether the call takes zero inputs.
bool InputsMatchZero() { return GetJSCallArity() == 0; }
@@ -66,6 +73,7 @@ class JSCallReduction {
return true;
}
+ Node* receiver() { return NodeProperties::GetValueInput(node_, 1); }
Node* left() { return GetJSCallInput(0); }
Node* right() { return GetJSCallInput(1); }
@@ -86,46 +94,320 @@ class JSCallReduction {
Node* node_;
};
-JSBuiltinReducer::JSBuiltinReducer(Editor* editor, JSGraph* jsgraph)
+JSBuiltinReducer::JSBuiltinReducer(Editor* editor, JSGraph* jsgraph,
+ Flags flags,
+ CompilationDependencies* dependencies)
: AdvancedReducer(editor),
+ dependencies_(dependencies),
+ flags_(flags),
jsgraph_(jsgraph),
type_cache_(TypeCache::Get()) {}
-// ECMA-262, section 15.8.2.11.
-Reduction JSBuiltinReducer::ReduceMathMax(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchZero()) {
- // Math.max() -> -Infinity
- return Replace(jsgraph()->Constant(-V8_INFINITY));
+namespace {
+
+MaybeHandle<Map> GetMapWitness(Node* node) {
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ // Check if the {node} is dominated by a CheckMaps with a single map
+ // for the {receiver}, and if so use that map for the lowering below.
+ for (Node* dominator = effect;;) {
+ if (dominator->opcode() == IrOpcode::kCheckMaps &&
+ dominator->InputAt(0) == receiver) {
+ if (dominator->op()->ValueInputCount() == 2) {
+ HeapObjectMatcher m(dominator->InputAt(1));
+ if (m.HasValue()) return Handle<Map>::cast(m.Value());
+ }
+ return MaybeHandle<Map>();
+ }
+ if (dominator->op()->EffectInputCount() != 1) {
+ // Didn't find any appropriate CheckMaps node.
+ return MaybeHandle<Map>();
+ }
+ dominator = NodeProperties::GetEffectInput(dominator);
}
- if (r.InputsMatchOne(Type::Number())) {
- // Math.max(a:number) -> a
- return Replace(r.left());
+}
+
+// TODO(turbofan): This was copied from Crankshaft, might be too restrictive.
+bool IsReadOnlyLengthDescriptor(Handle<Map> jsarray_map) {
+ DCHECK(!jsarray_map->is_dictionary_map());
+ Isolate* isolate = jsarray_map->GetIsolate();
+ Handle<Name> length_string = isolate->factory()->length_string();
+ DescriptorArray* descriptors = jsarray_map->instance_descriptors();
+ int number =
+ descriptors->SearchWithCache(isolate, *length_string, *jsarray_map);
+ DCHECK_NE(DescriptorArray::kNotFound, number);
+ return descriptors->GetDetails(number).IsReadOnly();
+}
+
+// TODO(turbofan): This was copied from Crankshaft, might be too restrictive.
+bool CanInlineArrayResizeOperation(Handle<Map> receiver_map) {
+ Isolate* const isolate = receiver_map->GetIsolate();
+ if (!receiver_map->prototype()->IsJSArray()) return false;
+ Handle<JSArray> receiver_prototype(JSArray::cast(receiver_map->prototype()),
+ isolate);
+ return receiver_map->instance_type() == JS_ARRAY_TYPE &&
+ IsFastElementsKind(receiver_map->elements_kind()) &&
+ !receiver_map->is_dictionary_map() && receiver_map->is_extensible() &&
+ (!receiver_map->is_prototype_map() || receiver_map->is_stable()) &&
+ receiver_prototype->map()->is_stable() &&
+ isolate->IsFastArrayConstructorPrototypeChainIntact() &&
+ isolate->IsAnyInitialArrayPrototype(receiver_prototype) &&
+ !IsReadOnlyLengthDescriptor(receiver_map);
+}
+
+} // namespace
+
+// ES6 section 22.1.3.17 Array.prototype.pop ( )
+Reduction JSBuiltinReducer::ReduceArrayPop(Node* node) {
+ Handle<Map> receiver_map;
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ // TODO(turbofan): Extend this to also handle fast (holey) double elements
+ // once we got the hole NaN mess sorted out in TurboFan/V8.
+ if (GetMapWitness(node).ToHandle(&receiver_map) &&
+ CanInlineArrayResizeOperation(receiver_map) &&
+ IsFastSmiOrObjectElementsKind(receiver_map->elements_kind())) {
+ // Install code dependencies on the {receiver} prototype maps and the
+ // global array protector cell.
+ dependencies()->AssumePropertyCell(factory()->array_protector());
+ dependencies()->AssumePrototypeMapsStable(receiver_map);
+
+ // Load the "length" property of the {receiver}.
+ Node* length = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())),
+ receiver, effect, control);
+
+ // Check if the {receiver} has any elements.
+ Node* check = graph()->NewNode(simplified()->NumberEqual(), length,
+ jsgraph()->ZeroConstant());
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = jsgraph()->UndefinedConstant();
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ // Load the elements backing store from the {receiver}.
+ Node* elements = efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+ receiver, efalse, if_false);
+
+ // Ensure that we aren't popping from a copy-on-write backing store.
+ elements = efalse =
+ graph()->NewNode(simplified()->EnsureWritableFastElements(), receiver,
+ elements, efalse, if_false);
+
+ // Compute the new {length}.
+ length = graph()->NewNode(simplified()->NumberSubtract(), length,
+ jsgraph()->OneConstant());
+
+ // Store the new {length} to the {receiver}.
+ efalse = graph()->NewNode(
+ simplified()->StoreField(
+ AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())),
+ receiver, length, efalse, if_false);
+
+ // Load the last entry from the {elements}.
+ vfalse = efalse = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(
+ receiver_map->elements_kind())),
+ elements, length, efalse, if_false);
+
+ // Store a hole to the element we just removed from the {receiver}.
+ efalse = graph()->NewNode(
+ simplified()->StoreElement(AccessBuilder::ForFixedArrayElement(
+ GetHoleyElementsKind(receiver_map->elements_kind()))),
+ elements, length, jsgraph()->TheHoleConstant(), efalse, if_false);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
+
+ // Convert the hole to undefined. Do this last, so that we can optimize
+ // conversion operator via some smart strength reduction in many cases.
+ if (IsFastHoleyElementsKind(receiver_map->elements_kind())) {
+ value =
+ graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value);
+ }
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
}
- if (r.InputsMatchAll(Type::Integral32())) {
- // Math.max(a:int32, b:int32, ...)
- Node* value = r.GetJSCallInput(0);
- for (int i = 1; i < r.GetJSCallArity(); i++) {
- Node* const input = r.GetJSCallInput(i);
- value = graph()->NewNode(
- common()->Select(MachineRepresentation::kNone),
- graph()->NewNode(simplified()->NumberLessThan(), input, value), value,
- input);
+ return NoChange();
+}
+
+// ES6 section 22.1.3.18 Array.prototype.push ( )
+Reduction JSBuiltinReducer::ReduceArrayPush(Node* node) {
+ Handle<Map> receiver_map;
+ // We need exactly target, receiver and value parameters.
+ if (node->op()->ValueInputCount() != 3) return NoChange();
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* value = NodeProperties::GetValueInput(node, 2);
+ if (GetMapWitness(node).ToHandle(&receiver_map) &&
+ CanInlineArrayResizeOperation(receiver_map)) {
+ // Install code dependencies on the {receiver} prototype maps and the
+ // global array protector cell.
+ dependencies()->AssumePropertyCell(factory()->array_protector());
+ dependencies()->AssumePrototypeMapsStable(receiver_map);
+
+ // TODO(turbofan): Perform type checks on the {value}. We are not guaranteed
+ // to learn from these checks in case they fail, as the witness (i.e. the
+ // map check from the LoadIC for a.push) might not be executed in baseline
+ // code (after we stored the value in the builtin and thereby changed the
+ // elements kind of a) before be decide to optimize this function again. We
+ // currently don't have a proper way to deal with this; the proper solution
+ // here is to learn on deopt, i.e. disable Array.prototype.push inlining
+ // for this function.
+ if (IsFastSmiElementsKind(receiver_map->elements_kind())) {
+ value = effect = graph()->NewNode(simplified()->CheckTaggedSigned(),
+ value, effect, control);
+ } else if (IsFastDoubleElementsKind(receiver_map->elements_kind())) {
+ value = effect =
+ graph()->NewNode(simplified()->CheckNumber(), value, effect, control);
+ // Make sure we do not store signaling NaNs into double arrays.
+ value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
}
+
+ // Load the "length" property of the {receiver}.
+ Node* length = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())),
+ receiver, effect, control);
+
+ // Load the elements backing store of the {receiver}.
+ Node* elements = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
+ effect, control);
+
+ // TODO(turbofan): Check if we need to grow the {elements} backing store.
+ // This will deopt if we cannot grow the array further, and we currently
+ // don't necessarily learn from it. See the comment on the value type check
+ // above.
+ GrowFastElementsFlags flags = GrowFastElementsFlag::kArrayObject;
+ if (IsFastDoubleElementsKind(receiver_map->elements_kind())) {
+ flags |= GrowFastElementsFlag::kDoubleElements;
+ }
+ elements = effect =
+ graph()->NewNode(simplified()->MaybeGrowFastElements(flags), receiver,
+ elements, length, length, effect, control);
+
+ // Append the value to the {elements}.
+ effect = graph()->NewNode(
+ simplified()->StoreElement(
+ AccessBuilder::ForFixedArrayElement(receiver_map->elements_kind())),
+ elements, length, value, effect, control);
+
+ ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
return NoChange();
}
-// ES6 section 20.2.2.19 Math.imul ( x, y )
-Reduction JSBuiltinReducer::ReduceMathImul(Node* node) {
+// ES6 section 20.2.2.1 Math.abs ( x )
+Reduction JSBuiltinReducer::ReduceMathAbs(Node* node) {
JSCallReduction r(node);
- if (r.InputsMatchTwo(Type::Number(), Type::Number())) {
- // Math.imul(a:number, b:number) -> NumberImul(NumberToUint32(a),
- // NumberToUint32(b))
- Node* a = graph()->NewNode(simplified()->NumberToUint32(), r.left());
- Node* b = graph()->NewNode(simplified()->NumberToUint32(), r.right());
- Node* value = graph()->NewNode(simplified()->NumberImul(), a, b);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.abs(a:plain-primitive) -> NumberAbs(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberAbs(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.2 Math.acos ( x )
+Reduction JSBuiltinReducer::ReduceMathAcos(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.acos(a:plain-primitive) -> NumberAcos(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberAcos(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.3 Math.acosh ( x )
+Reduction JSBuiltinReducer::ReduceMathAcosh(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.acosh(a:plain-primitive) -> NumberAcosh(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberAcosh(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.4 Math.asin ( x )
+Reduction JSBuiltinReducer::ReduceMathAsin(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.asin(a:plain-primitive) -> NumberAsin(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberAsin(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.5 Math.asinh ( x )
+Reduction JSBuiltinReducer::ReduceMathAsinh(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.asinh(a:plain-primitive) -> NumberAsinh(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberAsinh(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.6 Math.atan ( x )
+Reduction JSBuiltinReducer::ReduceMathAtan(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.atan(a:plain-primitive) -> NumberAtan(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberAtan(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.7 Math.atanh ( x )
+Reduction JSBuiltinReducer::ReduceMathAtanh(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.atanh(a:plain-primitive) -> NumberAtanh(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberAtanh(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.8 Math.atan2 ( y, x )
+Reduction JSBuiltinReducer::ReduceMathAtan2(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchTwo(Type::PlainPrimitive(), Type::PlainPrimitive())) {
+ // Math.atan2(a:plain-primitive,
+ // b:plain-primitive) -> NumberAtan2(ToNumber(a),
+ // ToNumber(b))
+ Node* left = ToNumber(r.left());
+ Node* right = ToNumber(r.right());
+ Node* value = graph()->NewNode(simplified()->NumberAtan2(), left, right);
return Replace(value);
}
return NoChange();
@@ -134,9 +416,10 @@ Reduction JSBuiltinReducer::ReduceMathImul(Node* node) {
// ES6 section 20.2.2.10 Math.ceil ( x )
Reduction JSBuiltinReducer::ReduceMathCeil(Node* node) {
JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Number())) {
- // Math.ceil(a:number) -> NumberCeil(a)
- Node* value = graph()->NewNode(simplified()->NumberCeil(), r.left());
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.ceil(a:plain-primitive) -> NumberCeil(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberCeil(), input);
return Replace(value);
}
return NoChange();
@@ -145,39 +428,194 @@ Reduction JSBuiltinReducer::ReduceMathCeil(Node* node) {
// ES6 section 20.2.2.11 Math.clz32 ( x )
Reduction JSBuiltinReducer::ReduceMathClz32(Node* node) {
JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Unsigned32())) {
- // Math.clz32(a:unsigned32) -> NumberClz32(a)
- Node* value = graph()->NewNode(simplified()->NumberClz32(), r.left());
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.clz32(a:plain-primitive) -> NumberClz32(ToUint32(a))
+ Node* input = ToUint32(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberClz32(), input);
return Replace(value);
}
+ return NoChange();
+}
+
+// ES6 section 20.2.2.12 Math.cos ( x )
+Reduction JSBuiltinReducer::ReduceMathCos(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.cos(a:plain-primitive) -> NumberCos(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberCos(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.13 Math.cosh ( x )
+Reduction JSBuiltinReducer::ReduceMathCosh(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.cosh(a:plain-primitive) -> NumberCosh(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberCosh(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.14 Math.exp ( x )
+Reduction JSBuiltinReducer::ReduceMathExp(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.exp(a:plain-primitive) -> NumberExp(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberExp(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.15 Math.expm1 ( x )
+Reduction JSBuiltinReducer::ReduceMathExpm1(Node* node) {
+ JSCallReduction r(node);
if (r.InputsMatchOne(Type::Number())) {
- // Math.clz32(a:number) -> NumberClz32(NumberToUint32(a))
- Node* value = graph()->NewNode(
- simplified()->NumberClz32(),
- graph()->NewNode(simplified()->NumberToUint32(), r.left()));
+ // Math.expm1(a:number) -> NumberExpm1(a)
+ Node* value = graph()->NewNode(simplified()->NumberExpm1(), r.left());
return Replace(value);
}
return NoChange();
}
-// ES6 draft 08-24-14, section 20.2.2.16.
+// ES6 section 20.2.2.16 Math.floor ( x )
Reduction JSBuiltinReducer::ReduceMathFloor(Node* node) {
JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Number())) {
- // Math.floor(a:number) -> NumberFloor(a)
- Node* value = graph()->NewNode(simplified()->NumberFloor(), r.left());
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.floor(a:plain-primitive) -> NumberFloor(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberFloor(), input);
return Replace(value);
}
return NoChange();
}
-// ES6 draft 08-24-14, section 20.2.2.17.
+// ES6 section 20.2.2.17 Math.fround ( x )
Reduction JSBuiltinReducer::ReduceMathFround(Node* node) {
JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.fround(a:plain-primitive) -> NumberFround(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberFround(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.19 Math.imul ( x, y )
+Reduction JSBuiltinReducer::ReduceMathImul(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchTwo(Type::PlainPrimitive(), Type::PlainPrimitive())) {
+ // Math.imul(a:plain-primitive,
+ // b:plain-primitive) -> NumberImul(ToUint32(a),
+ // ToUint32(b))
+ Node* left = ToUint32(r.left());
+ Node* right = ToUint32(r.right());
+ Node* value = graph()->NewNode(simplified()->NumberImul(), left, right);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.20 Math.log ( x )
+Reduction JSBuiltinReducer::ReduceMathLog(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.log(a:plain-primitive) -> NumberLog(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberLog(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.21 Math.log1p ( x )
+Reduction JSBuiltinReducer::ReduceMathLog1p(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.log1p(a:plain-primitive) -> NumberLog1p(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberLog1p(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.22 Math.log10 ( x )
+Reduction JSBuiltinReducer::ReduceMathLog10(Node* node) {
+ JSCallReduction r(node);
if (r.InputsMatchOne(Type::Number())) {
- // Math.fround(a:number) -> TruncateFloat64ToFloat32(a)
- Node* value =
- graph()->NewNode(machine()->TruncateFloat64ToFloat32(), r.left());
+ // Math.log10(a:number) -> NumberLog10(a)
+ Node* value = graph()->NewNode(simplified()->NumberLog10(), r.left());
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.23 Math.log2 ( x )
+Reduction JSBuiltinReducer::ReduceMathLog2(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::Number())) {
+ // Math.log2(a:number) -> NumberLog(a)
+ Node* value = graph()->NewNode(simplified()->NumberLog2(), r.left());
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.24 Math.max ( value1, value2, ...values )
+Reduction JSBuiltinReducer::ReduceMathMax(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchZero()) {
+ // Math.max() -> -Infinity
+ return Replace(jsgraph()->Constant(-V8_INFINITY));
+ }
+ if (r.InputsMatchAll(Type::PlainPrimitive())) {
+ // Math.max(a:plain-primitive, b:plain-primitive, ...)
+ Node* value = ToNumber(r.GetJSCallInput(0));
+ for (int i = 1; i < r.GetJSCallArity(); i++) {
+ Node* input = ToNumber(r.GetJSCallInput(i));
+ value = graph()->NewNode(simplified()->NumberMax(), value, input);
+ }
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.25 Math.min ( value1, value2, ...values )
+Reduction JSBuiltinReducer::ReduceMathMin(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchZero()) {
+ // Math.min() -> Infinity
+ return Replace(jsgraph()->Constant(V8_INFINITY));
+ }
+ if (r.InputsMatchAll(Type::PlainPrimitive())) {
+ // Math.min(a:plain-primitive, b:plain-primitive, ...)
+ Node* value = ToNumber(r.GetJSCallInput(0));
+ for (int i = 1; i < r.GetJSCallArity(); i++) {
+ Node* input = ToNumber(r.GetJSCallInput(i));
+ value = graph()->NewNode(simplified()->NumberMin(), value, input);
+ }
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.26 Math.pow ( x, y )
+Reduction JSBuiltinReducer::ReduceMathPow(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchTwo(Type::PlainPrimitive(), Type::PlainPrimitive())) {
+ // Math.pow(a:plain-primitive,
+ // b:plain-primitive) -> NumberPow(ToNumber(a), ToNumber(b))
+ Node* left = ToNumber(r.left());
+ Node* right = ToNumber(r.right());
+ Node* value = graph()->NewNode(simplified()->NumberPow(), left, right);
return Replace(value);
}
return NoChange();
@@ -186,9 +624,57 @@ Reduction JSBuiltinReducer::ReduceMathFround(Node* node) {
// ES6 section 20.2.2.28 Math.round ( x )
Reduction JSBuiltinReducer::ReduceMathRound(Node* node) {
JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.round(a:plain-primitive) -> NumberRound(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberRound(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.9 Math.cbrt ( x )
+Reduction JSBuiltinReducer::ReduceMathCbrt(Node* node) {
+ JSCallReduction r(node);
if (r.InputsMatchOne(Type::Number())) {
- // Math.round(a:number) -> NumberRound(a)
- Node* value = graph()->NewNode(simplified()->NumberRound(), r.left());
+ // Math.cbrt(a:number) -> NumberCbrt(a)
+ Node* value = graph()->NewNode(simplified()->NumberCbrt(), r.left());
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.29 Math.sign ( x )
+Reduction JSBuiltinReducer::ReduceMathSign(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.sign(a:plain-primitive) -> NumberSign(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberSign(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.30 Math.sin ( x )
+Reduction JSBuiltinReducer::ReduceMathSin(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.sin(a:plain-primitive) -> NumberSin(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberSin(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.31 Math.sinh ( x )
+Reduction JSBuiltinReducer::ReduceMathSinh(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.sinh(a:plain-primitive) -> NumberSinh(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberSinh(), input);
return Replace(value);
}
return NoChange();
@@ -197,9 +683,34 @@ Reduction JSBuiltinReducer::ReduceMathRound(Node* node) {
// ES6 section 20.2.2.32 Math.sqrt ( x )
Reduction JSBuiltinReducer::ReduceMathSqrt(Node* node) {
JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Number())) {
- // Math.sqrt(a:number) -> Float64Sqrt(a)
- Node* value = graph()->NewNode(machine()->Float64Sqrt(), r.left());
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.sqrt(a:plain-primitive) -> NumberSqrt(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberSqrt(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.33 Math.tan ( x )
+Reduction JSBuiltinReducer::ReduceMathTan(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.tan(a:plain-primitive) -> NumberTan(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberTan(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.2.2.34 Math.tanh ( x )
+Reduction JSBuiltinReducer::ReduceMathTanh(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.tanh(a:plain-primitive) -> NumberTanh(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberTanh(), input);
return Replace(value);
}
return NoChange();
@@ -208,9 +719,240 @@ Reduction JSBuiltinReducer::ReduceMathSqrt(Node* node) {
// ES6 section 20.2.2.35 Math.trunc ( x )
Reduction JSBuiltinReducer::ReduceMathTrunc(Node* node) {
JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Number())) {
- // Math.trunc(a:number) -> NumberTrunc(a)
- Node* value = graph()->NewNode(simplified()->NumberTrunc(), r.left());
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // Math.trunc(a:plain-primitive) -> NumberTrunc(ToNumber(a))
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->NumberTrunc(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 20.1.2.13 Number.parseInt ( string, radix )
+Reduction JSBuiltinReducer::ReduceNumberParseInt(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(type_cache_.kSafeInteger) ||
+ r.InputsMatchTwo(type_cache_.kSafeInteger,
+ type_cache_.kZeroOrUndefined) ||
+ r.InputsMatchTwo(type_cache_.kSafeInteger, type_cache_.kTenOrUndefined)) {
+ // Number.parseInt(a:safe-integer) -> NumberToInt32(a)
+ // Number.parseInt(a:safe-integer,b:#0\/undefined) -> NumberToInt32(a)
+ // Number.parseInt(a:safe-integer,b:#10\/undefined) -> NumberToInt32(a)
+ Node* input = r.GetJSCallInput(0);
+ Node* value = graph()->NewNode(simplified()->NumberToInt32(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits )
+Reduction JSBuiltinReducer::ReduceStringFromCharCode(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(Type::PlainPrimitive())) {
+ // String.fromCharCode(a:plain-primitive) -> StringFromCharCode(a)
+ Node* input = ToNumber(r.GetJSCallInput(0));
+ Node* value = graph()->NewNode(simplified()->StringFromCharCode(), input);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+namespace {
+
+Node* GetStringWitness(Node* node) {
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Type* receiver_type = NodeProperties::GetType(receiver);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ if (receiver_type->Is(Type::String())) return receiver;
+ // Check if the {node} is dominated by a CheckString renaming for
+ // it's {receiver}, and if so use that renaming as {receiver} for
+ // the lowering below.
+ for (Node* dominator = effect;;) {
+ if (dominator->opcode() == IrOpcode::kCheckString &&
+ dominator->InputAt(0) == receiver) {
+ return dominator;
+ }
+ if (dominator->op()->EffectInputCount() != 1) {
+ // Didn't find any appropriate CheckString node.
+ return nullptr;
+ }
+ dominator = NodeProperties::GetEffectInput(dominator);
+ }
+}
+
+} // namespace
+
+// ES6 section 21.1.3.1 String.prototype.charAt ( pos )
+Reduction JSBuiltinReducer::ReduceStringCharAt(Node* node) {
+ // We need at least target, receiver and index parameters.
+ if (node->op()->ValueInputCount() >= 3) {
+ Node* index = NodeProperties::GetValueInput(node, 2);
+ Type* index_type = NodeProperties::GetType(index);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ if (index_type->Is(Type::Unsigned32())) {
+ if (Node* receiver = GetStringWitness(node)) {
+ // Determine the {receiver} length.
+ Node* receiver_length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
+ effect, control);
+
+ // Check if {index} is less than {receiver} length.
+ Node* check = graph()->NewNode(simplified()->NumberLessThan(), index,
+ receiver_length);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue;
+ {
+ // Load the character from the {receiver}.
+ vtrue = graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
+ index, if_true);
+
+ // Return it as single character string.
+ vtrue = graph()->NewNode(simplified()->StringFromCharCode(), vtrue);
+ }
+
+ // Return the empty string otherwise.
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = jsgraph()->EmptyStringConstant();
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+ }
+ }
+
+ return NoChange();
+}
+
+// ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos )
+Reduction JSBuiltinReducer::ReduceStringCharCodeAt(Node* node) {
+ // We need at least target, receiver and index parameters.
+ if (node->op()->ValueInputCount() >= 3) {
+ Node* index = NodeProperties::GetValueInput(node, 2);
+ Type* index_type = NodeProperties::GetType(index);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ if (index_type->Is(Type::Unsigned32())) {
+ if (Node* receiver = GetStringWitness(node)) {
+ // Determine the {receiver} length.
+ Node* receiver_length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
+ effect, control);
+
+ // Check if {index} is less than {receiver} length.
+ Node* check = graph()->NewNode(simplified()->NumberLessThan(), index,
+ receiver_length);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, control);
+
+ // Load the character from the {receiver}.
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue = graph()->NewNode(simplified()->StringCharCodeAt(),
+ receiver, index, if_true);
+
+ // Return NaN otherwise.
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = jsgraph()->NaNConstant();
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+ }
+ }
+
+ return NoChange();
+}
+
+namespace {
+
+bool HasInstanceTypeWitness(Node* receiver, Node* effect,
+ InstanceType instance_type) {
+ for (Node* dominator = effect;;) {
+ if (dominator->opcode() == IrOpcode::kCheckMaps &&
+ dominator->InputAt(0) == receiver) {
+ // Check if all maps have the given {instance_type}.
+ for (int i = 1; i < dominator->op()->ValueInputCount(); ++i) {
+ Node* const map = NodeProperties::GetValueInput(dominator, i);
+ Type* const map_type = NodeProperties::GetType(map);
+ if (!map_type->IsConstant()) return false;
+ Handle<Map> const map_value =
+ Handle<Map>::cast(map_type->AsConstant()->Value());
+ if (map_value->instance_type() != instance_type) return false;
+ }
+ return true;
+ }
+ switch (dominator->opcode()) {
+ case IrOpcode::kStoreField: {
+ FieldAccess const& access = FieldAccessOf(dominator->op());
+ if (access.base_is_tagged == kTaggedBase &&
+ access.offset == HeapObject::kMapOffset) {
+ return false;
+ }
+ break;
+ }
+ case IrOpcode::kStoreElement:
+ break;
+ default: {
+ DCHECK_EQ(1, dominator->op()->EffectOutputCount());
+ if (dominator->op()->EffectInputCount() != 1 ||
+ !dominator->op()->HasProperty(Operator::kNoWrite)) {
+ // Didn't find any appropriate CheckMaps node.
+ return false;
+ }
+ break;
+ }
+ }
+ dominator = NodeProperties::GetEffectInput(dominator);
+ }
+}
+
+} // namespace
+
+Reduction JSBuiltinReducer::ReduceArrayBufferViewAccessor(
+ Node* node, InstanceType instance_type, FieldAccess const& access) {
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (HasInstanceTypeWitness(receiver, effect, instance_type)) {
+ // Load the {receiver}s field.
+ Node* receiver_length = effect = graph()->NewNode(
+ simplified()->LoadField(access), receiver, effect, control);
+
+ // Check if the {receiver}s buffer was neutered.
+ Node* receiver_buffer = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+ receiver, effect, control);
+ Node* receiver_buffer_bitfield = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferBitField()),
+ receiver_buffer, effect, control);
+ Node* check = graph()->NewNode(
+ simplified()->NumberEqual(),
+ graph()->NewNode(
+ simplified()->NumberBitwiseAnd(), receiver_buffer_bitfield,
+ jsgraph()->Constant(JSArrayBuffer::WasNeutered::kMask)),
+ jsgraph()->ZeroConstant());
+
+ // Default to zero if the {receiver}s buffer was neutered.
+ Node* value = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged, BranchHint::kTrue),
+ check, receiver_length, jsgraph()->ZeroConstant());
+
+ ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
return NoChange();
@@ -223,33 +965,138 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
// Dispatch according to the BuiltinFunctionId if present.
if (!r.HasBuiltinFunctionId()) return NoChange();
switch (r.GetBuiltinFunctionId()) {
- case kMathMax:
- reduction = ReduceMathMax(node);
+ case kArrayPop:
+ return ReduceArrayPop(node);
+ case kArrayPush:
+ return ReduceArrayPush(node);
+ case kMathAbs:
+ reduction = ReduceMathAbs(node);
break;
- case kMathImul:
- reduction = ReduceMathImul(node);
+ case kMathAcos:
+ reduction = ReduceMathAcos(node);
break;
- case kMathClz32:
- reduction = ReduceMathClz32(node);
+ case kMathAcosh:
+ reduction = ReduceMathAcosh(node);
+ break;
+ case kMathAsin:
+ reduction = ReduceMathAsin(node);
+ break;
+ case kMathAsinh:
+ reduction = ReduceMathAsinh(node);
+ break;
+ case kMathAtan:
+ reduction = ReduceMathAtan(node);
+ break;
+ case kMathAtanh:
+ reduction = ReduceMathAtanh(node);
+ break;
+ case kMathAtan2:
+ reduction = ReduceMathAtan2(node);
+ break;
+ case kMathCbrt:
+ reduction = ReduceMathCbrt(node);
break;
case kMathCeil:
reduction = ReduceMathCeil(node);
break;
+ case kMathClz32:
+ reduction = ReduceMathClz32(node);
+ break;
+ case kMathCos:
+ reduction = ReduceMathCos(node);
+ break;
+ case kMathCosh:
+ reduction = ReduceMathCosh(node);
+ break;
+ case kMathExp:
+ reduction = ReduceMathExp(node);
+ break;
+ case kMathExpm1:
+ reduction = ReduceMathExpm1(node);
+ break;
case kMathFloor:
reduction = ReduceMathFloor(node);
break;
case kMathFround:
reduction = ReduceMathFround(node);
break;
+ case kMathImul:
+ reduction = ReduceMathImul(node);
+ break;
+ case kMathLog:
+ reduction = ReduceMathLog(node);
+ break;
+ case kMathLog1p:
+ reduction = ReduceMathLog1p(node);
+ break;
+ case kMathLog10:
+ reduction = ReduceMathLog10(node);
+ break;
+ case kMathLog2:
+ reduction = ReduceMathLog2(node);
+ break;
+ case kMathMax:
+ reduction = ReduceMathMax(node);
+ break;
+ case kMathMin:
+ reduction = ReduceMathMin(node);
+ break;
+ case kMathPow:
+ reduction = ReduceMathPow(node);
+ break;
case kMathRound:
reduction = ReduceMathRound(node);
break;
+ case kMathSign:
+ reduction = ReduceMathSign(node);
+ break;
+ case kMathSin:
+ reduction = ReduceMathSin(node);
+ break;
+ case kMathSinh:
+ reduction = ReduceMathSinh(node);
+ break;
case kMathSqrt:
reduction = ReduceMathSqrt(node);
break;
+ case kMathTan:
+ reduction = ReduceMathTan(node);
+ break;
+ case kMathTanh:
+ reduction = ReduceMathTanh(node);
+ break;
case kMathTrunc:
reduction = ReduceMathTrunc(node);
break;
+ case kNumberParseInt:
+ reduction = ReduceNumberParseInt(node);
+ break;
+ case kStringFromCharCode:
+ reduction = ReduceStringFromCharCode(node);
+ break;
+ case kStringCharAt:
+ return ReduceStringCharAt(node);
+ case kStringCharCodeAt:
+ return ReduceStringCharCodeAt(node);
+ case kDataViewByteLength:
+ return ReduceArrayBufferViewAccessor(
+ node, JS_DATA_VIEW_TYPE,
+ AccessBuilder::ForJSArrayBufferViewByteLength());
+ case kDataViewByteOffset:
+ return ReduceArrayBufferViewAccessor(
+ node, JS_DATA_VIEW_TYPE,
+ AccessBuilder::ForJSArrayBufferViewByteOffset());
+ case kTypedArrayByteLength:
+ return ReduceArrayBufferViewAccessor(
+ node, JS_TYPED_ARRAY_TYPE,
+ AccessBuilder::ForJSArrayBufferViewByteLength());
+ case kTypedArrayByteOffset:
+ return ReduceArrayBufferViewAccessor(
+ node, JS_TYPED_ARRAY_TYPE,
+ AccessBuilder::ForJSArrayBufferViewByteOffset());
+ case kTypedArrayLength:
+ return ReduceArrayBufferViewAccessor(
+ node, JS_TYPED_ARRAY_TYPE, AccessBuilder::ForJSTypedArrayLength());
default:
break;
}
@@ -261,9 +1108,22 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
return reduction;
}
+Node* JSBuiltinReducer::ToNumber(Node* input) {
+ Type* input_type = NodeProperties::GetType(input);
+ if (input_type->Is(Type::Number())) return input;
+ return graph()->NewNode(simplified()->PlainPrimitiveToNumber(), input);
+}
+
+Node* JSBuiltinReducer::ToUint32(Node* input) {
+ input = ToNumber(input);
+ Type* input_type = NodeProperties::GetType(input);
+ if (input_type->Is(Type::Unsigned32())) return input;
+ return graph()->NewNode(simplified()->NumberToUint32(), input);
+}
Graph* JSBuiltinReducer::graph() const { return jsgraph()->graph(); }
+Factory* JSBuiltinReducer::factory() const { return isolate()->factory(); }
Isolate* JSBuiltinReducer::isolate() const { return jsgraph()->isolate(); }
@@ -273,11 +1133,6 @@ CommonOperatorBuilder* JSBuiltinReducer::common() const {
}
-MachineOperatorBuilder* JSBuiltinReducer::machine() const {
- return jsgraph()->machine();
-}
-
-
SimplifiedOperatorBuilder* JSBuiltinReducer::simplified() const {
return jsgraph()->simplified();
}
diff --git a/deps/v8/src/compiler/js-builtin-reducer.h b/deps/v8/src/compiler/js-builtin-reducer.h
index dfeb409291..2da834718c 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.h
+++ b/deps/v8/src/compiler/js-builtin-reducer.h
@@ -5,53 +5,105 @@
#ifndef V8_COMPILER_JS_BUILTIN_REDUCER_H_
#define V8_COMPILER_JS_BUILTIN_REDUCER_H_
+#include "src/base/flags.h"
#include "src/compiler/graph-reducer.h"
namespace v8 {
namespace internal {
// Forward declarations.
+class CompilationDependencies;
+class Factory;
class TypeCache;
namespace compiler {
// Forward declarations.
class CommonOperatorBuilder;
+struct FieldAccess;
class JSGraph;
-class MachineOperatorBuilder;
class SimplifiedOperatorBuilder;
class JSBuiltinReducer final : public AdvancedReducer {
public:
- explicit JSBuiltinReducer(Editor* editor, JSGraph* jsgraph);
+ // Flags that control the mode of operation.
+ enum Flag {
+ kNoFlags = 0u,
+ kDeoptimizationEnabled = 1u << 0,
+ };
+ typedef base::Flags<Flag> Flags;
+
+ JSBuiltinReducer(Editor* editor, JSGraph* jsgraph, Flags flags,
+ CompilationDependencies* dependencies);
~JSBuiltinReducer() final {}
Reduction Reduce(Node* node) final;
private:
- Reduction ReduceFunctionCall(Node* node);
- Reduction ReduceMathMax(Node* node);
- Reduction ReduceMathImul(Node* node);
+ Reduction ReduceArrayPop(Node* node);
+ Reduction ReduceArrayPush(Node* node);
+ Reduction ReduceMathAbs(Node* node);
+ Reduction ReduceMathAcos(Node* node);
+ Reduction ReduceMathAcosh(Node* node);
+ Reduction ReduceMathAsin(Node* node);
+ Reduction ReduceMathAsinh(Node* node);
+ Reduction ReduceMathAtan(Node* node);
+ Reduction ReduceMathAtanh(Node* node);
+ Reduction ReduceMathAtan2(Node* node);
+ Reduction ReduceMathCbrt(Node* node);
Reduction ReduceMathCeil(Node* node);
Reduction ReduceMathClz32(Node* node);
+ Reduction ReduceMathCos(Node* node);
+ Reduction ReduceMathCosh(Node* node);
+ Reduction ReduceMathExp(Node* node);
+ Reduction ReduceMathExpm1(Node* node);
Reduction ReduceMathFloor(Node* node);
Reduction ReduceMathFround(Node* node);
+ Reduction ReduceMathImul(Node* node);
+ Reduction ReduceMathLog(Node* node);
+ Reduction ReduceMathLog1p(Node* node);
+ Reduction ReduceMathLog10(Node* node);
+ Reduction ReduceMathLog2(Node* node);
+ Reduction ReduceMathMax(Node* node);
+ Reduction ReduceMathMin(Node* node);
+ Reduction ReduceMathPow(Node* node);
Reduction ReduceMathRound(Node* node);
+ Reduction ReduceMathSign(Node* node);
+ Reduction ReduceMathSin(Node* node);
+ Reduction ReduceMathSinh(Node* node);
Reduction ReduceMathSqrt(Node* node);
+ Reduction ReduceMathTan(Node* node);
+ Reduction ReduceMathTanh(Node* node);
Reduction ReduceMathTrunc(Node* node);
+ Reduction ReduceNumberParseInt(Node* node);
+ Reduction ReduceStringCharAt(Node* node);
+ Reduction ReduceStringCharCodeAt(Node* node);
+ Reduction ReduceStringFromCharCode(Node* node);
+ Reduction ReduceArrayBufferViewAccessor(Node* node,
+ InstanceType instance_type,
+ FieldAccess const& access);
+
+ Node* ToNumber(Node* value);
+ Node* ToUint32(Node* value);
+ Flags flags() const { return flags_; }
Graph* graph() const;
+ Factory* factory() const;
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const;
CommonOperatorBuilder* common() const;
- MachineOperatorBuilder* machine() const;
SimplifiedOperatorBuilder* simplified() const;
+ CompilationDependencies* dependencies() const { return dependencies_; }
+ CompilationDependencies* const dependencies_;
+ Flags const flags_;
JSGraph* const jsgraph_;
TypeCache const& type_cache_;
};
+DEFINE_OPERATORS_FOR_FLAGS(JSBuiltinReducer::Flags)
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index 892dcc70ce..e39021412e 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -6,6 +6,7 @@
#include "src/compiler/js-graph.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/simplified-operator.h"
#include "src/objects-inl.h"
#include "src/type-feedback-vector-inl.h"
@@ -71,7 +72,6 @@ Reduction JSCallReducer::ReduceArrayConstructor(Node* node) {
size_t const arity = p.arity() - 2;
NodeProperties::ReplaceValueInput(node, target, 0);
NodeProperties::ReplaceValueInput(node, target, 1);
- NodeProperties::RemoveFrameStateInput(node, 1);
// TODO(bmeurer): We might need to propagate the tail call mode to
// the JSCreateArray operator, because an Array call in tail call
// position must always properly consume the parent stack frame.
@@ -89,7 +89,6 @@ Reduction JSCallReducer::ReduceNumberConstructor(Node* node) {
DCHECK_LE(2u, p.arity());
Node* value = (p.arity() == 2) ? jsgraph()->ZeroConstant()
: NodeProperties::GetValueInput(node, 2);
- NodeProperties::RemoveFrameStateInput(node, 1);
NodeProperties::ReplaceValueInputs(node, value);
NodeProperties::ChangeOp(node, javascript()->ToNumber());
return Changed(node);
@@ -130,7 +129,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
// we can only optimize this in case the {node} was already inlined into
// some other function (and same for the {arg_array}).
CreateArgumentsType type = CreateArgumentsTypeOf(arg_array->op());
- Node* frame_state = NodeProperties::GetFrameStateInput(arg_array, 0);
+ Node* frame_state = NodeProperties::GetFrameStateInput(arg_array);
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
if (outer_state->opcode() != IrOpcode::kFrameState) return NoChange();
FrameStateInfo outer_info = OpParameter<FrameStateInfo>(outer_state);
@@ -220,7 +219,6 @@ Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
Node* target = NodeProperties::GetValueInput(node, 0);
Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
Node* control = NodeProperties::GetControlInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -233,7 +231,6 @@ Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
// Raise a TypeError if the {target} is a "classConstructor".
if (IsClassConstructor(shared->kind())) {
- NodeProperties::RemoveFrameStateInput(node, 0);
NodeProperties::ReplaceValueInputs(node, target);
NodeProperties::ChangeOp(
node, javascript()->CallRuntime(
@@ -272,7 +269,7 @@ Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
isolate());
CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
ConvertReceiverMode const convert_mode =
- (bound_this->IsNull() || bound_this->IsUndefined())
+ (bound_this->IsNull(isolate()) || bound_this->IsUndefined(isolate()))
? ConvertReceiverMode::kNullOrUndefined
: ConvertReceiverMode::kNotNullOrUndefined;
size_t arity = p.arity();
@@ -326,16 +323,13 @@ Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
}
// Check that the {target} is still the {array_function}.
- Node* check = effect =
- graph()->NewNode(javascript()->StrictEqual(), target, array_function,
- context, effect, control);
- control = graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
- effect, control);
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), target,
+ array_function);
+ effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
// Turn the {node} into a {JSCreateArray} call.
NodeProperties::ReplaceValueInput(node, array_function, 0);
NodeProperties::ReplaceEffectInput(node, effect);
- NodeProperties::ReplaceControlInput(node, control);
return ReduceArrayConstructor(node);
} else if (feedback->IsWeakCell()) {
Handle<WeakCell> cell = Handle<WeakCell>::cast(feedback);
@@ -344,16 +338,14 @@ Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
jsgraph()->Constant(handle(cell->value(), isolate()));
// Check that the {target} is still the {target_function}.
- Node* check = effect =
- graph()->NewNode(javascript()->StrictEqual(), target, target_function,
- context, effect, control);
- control = graph()->NewNode(common()->DeoptimizeUnless(), check,
- frame_state, effect, control);
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), target,
+ target_function);
+ effect =
+ graph()->NewNode(simplified()->CheckIf(), check, effect, control);
// Specialize the JSCallFunction node to the {target_function}.
NodeProperties::ReplaceValueInput(node, target_function, 0);
NodeProperties::ReplaceEffectInput(node, effect);
- NodeProperties::ReplaceControlInput(node, control);
// Try to further reduce the JSCallFunction {node}.
Reduction const reduction = ReduceJSCallFunction(node);
@@ -372,7 +364,6 @@ Reduction JSCallReducer::ReduceJSCallConstruct(Node* node) {
Node* target = NodeProperties::GetValueInput(node, 0);
Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -384,11 +375,6 @@ Reduction JSCallReducer::ReduceJSCallConstruct(Node* node) {
// Raise a TypeError if the {target} is not a constructor.
if (!function->IsConstructor()) {
- // Drop the lazy bailout location and use the eager bailout point for
- // the runtime function (actually as lazy bailout point). It doesn't
- // really matter which bailout location we use since we never really
- // go back after throwing the exception.
- NodeProperties::RemoveFrameStateInput(node, 0);
NodeProperties::ReplaceValueInputs(node, target);
NodeProperties::ChangeOp(
node, javascript()->CallRuntime(Runtime::kThrowCalledNonCallable));
@@ -408,7 +394,6 @@ Reduction JSCallReducer::ReduceJSCallConstruct(Node* node) {
}
// Turn the {node} into a {JSCreateArray} call.
- NodeProperties::RemoveFrameStateInput(node, 1);
for (int i = arity; i > 0; --i) {
NodeProperties::ReplaceValueInput(
node, NodeProperties::GetValueInput(node, i), i + 1);
@@ -454,16 +439,12 @@ Reduction JSCallReducer::ReduceJSCallConstruct(Node* node) {
}
// Check that the {target} is still the {array_function}.
- Node* check = effect =
- graph()->NewNode(javascript()->StrictEqual(), target, array_function,
- context, effect, control);
- control = graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
- effect, control);
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), target,
+ array_function);
+ effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
// Turn the {node} into a {JSCreateArray} call.
NodeProperties::ReplaceEffectInput(node, effect);
- NodeProperties::ReplaceControlInput(node, control);
- NodeProperties::RemoveFrameStateInput(node, 1);
for (int i = arity; i > 0; --i) {
NodeProperties::ReplaceValueInput(
node, NodeProperties::GetValueInput(node, i), i + 1);
@@ -478,16 +459,14 @@ Reduction JSCallReducer::ReduceJSCallConstruct(Node* node) {
jsgraph()->Constant(handle(cell->value(), isolate()));
// Check that the {target} is still the {target_function}.
- Node* check = effect =
- graph()->NewNode(javascript()->StrictEqual(), target, target_function,
- context, effect, control);
- control = graph()->NewNode(common()->DeoptimizeUnless(), check,
- frame_state, effect, control);
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), target,
+ target_function);
+ effect =
+ graph()->NewNode(simplified()->CheckIf(), check, effect, control);
// Specialize the JSCallConstruct node to the {target_function}.
NodeProperties::ReplaceValueInput(node, target_function, 0);
NodeProperties::ReplaceEffectInput(node, effect);
- NodeProperties::ReplaceControlInput(node, control);
if (target == new_target) {
NodeProperties::ReplaceValueInput(node, target_function, arity + 1);
}
@@ -524,6 +503,10 @@ JSOperatorBuilder* JSCallReducer::javascript() const {
return jsgraph()->javascript();
}
+SimplifiedOperatorBuilder* JSCallReducer::simplified() const {
+ return jsgraph()->simplified();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index 9ffae152ac..8d9700a072 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -16,7 +16,7 @@ namespace compiler {
class CommonOperatorBuilder;
class JSGraph;
class JSOperatorBuilder;
-
+class SimplifiedOperatorBuilder;
// Performs strength reduction on {JSCallConstruct} and {JSCallFunction} nodes,
// which might allow inlining or other optimizations to be performed afterwards.
@@ -52,6 +52,7 @@ class JSCallReducer final : public Reducer {
MaybeHandle<Context> native_context() const { return native_context_; }
CommonOperatorBuilder* common() const;
JSOperatorBuilder* javascript() const;
+ SimplifiedOperatorBuilder* simplified() const;
JSGraph* const jsgraph_;
Flags const flags_;
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index 4d9d1d9504..e02fc49de8 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -70,7 +70,7 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
// before the function to which it belongs has initialized the slot.
// We must be conservative and check if the value in the slot is currently the
// hole or undefined. If it is neither of these, then it must be initialized.
- if (value->IsUndefined() || value->IsTheHole()) {
+ if (value->IsUndefined(isolate()) || value->IsTheHole(isolate())) {
return NoChange();
}
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 6dc5a4225a..f2c5edd630 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -37,7 +37,8 @@ class AllocationBuilder final {
// Primitive allocation of static size.
void Allocate(int size, PretenureFlag pretenure = NOT_TENURED) {
- effect_ = graph()->NewNode(common()->BeginRegion(), effect_);
+ effect_ = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kNotObservable), effect_);
allocation_ =
graph()->NewNode(simplified()->Allocate(pretenure),
jsgraph()->Constant(size), effect_, control_);
@@ -101,7 +102,7 @@ class AllocationBuilder final {
// Retrieves the frame state holding actual argument values.
Node* GetArgumentsFrameState(Node* frame_state) {
- Node* const outer_state = NodeProperties::GetFrameStateInput(frame_state, 0);
+ Node* const outer_state = NodeProperties::GetFrameStateInput(frame_state);
FrameStateInfo outer_state_info = OpParameter<FrameStateInfo>(outer_state);
return outer_state_info.type() == FrameStateType::kArgumentsAdaptor
? outer_state
@@ -201,6 +202,8 @@ Reduction JSCreateLowering::Reduce(Node* node) {
return ReduceJSCreateArguments(node);
case IrOpcode::kJSCreateArray:
return ReduceJSCreateArray(node);
+ case IrOpcode::kJSCreateClosure:
+ return ReduceJSCreateClosure(node);
case IrOpcode::kJSCreateIterResultObject:
return ReduceJSCreateIterResultObject(node);
case IrOpcode::kJSCreateLiteralArray:
@@ -276,8 +279,9 @@ Reduction JSCreateLowering::ReduceJSCreate(Node* node) {
Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateArguments, node->opcode());
CreateArgumentsType type = CreateArgumentsTypeOf(node->op());
- Node* const frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ Node* const frame_state = NodeProperties::GetFrameStateInput(node);
Node* const outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
+ Node* const control = graph()->start();
FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
// Use the ArgumentsAccessStub for materializing both mapped and unmapped
@@ -291,23 +295,24 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
shared_info->has_duplicate_parameters()) {
return NoChange();
}
- // TODO(bmeurer): Actually we don't need a frame state here.
Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
+ Operator::Properties properties = node->op()->properties();
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNeedsFrameState);
+ CallDescriptor::kNoFlags, properties);
const Operator* new_op = common()->Call(desc);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
node->InsertInput(graph()->zone(), 0, stub_code);
+ node->RemoveInput(3); // Remove the frame state.
NodeProperties::ChangeOp(node, new_op);
return Changed(node);
}
case CreateArgumentsType::kUnmappedArguments: {
- // TODO(bmeurer): Actually we don't need a frame state here.
Callable callable = CodeFactory::FastNewStrictArguments(isolate());
+ Operator::Properties properties = node->op()->properties();
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNeedsFrameState);
+ CallDescriptor::kNeedsFrameState, properties);
const Operator* new_op = common()->Call(desc);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
node->InsertInput(graph()->zone(), 0, stub_code);
@@ -315,11 +320,11 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
return Changed(node);
}
case CreateArgumentsType::kRestParameter: {
- // TODO(bmeurer): Actually we don't need a frame state here.
Callable callable = CodeFactory::FastNewRestParameter(isolate());
+ Operator::Properties properties = node->op()->properties();
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNeedsFrameState);
+ CallDescriptor::kNeedsFrameState, properties);
const Operator* new_op = common()->Call(desc);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
node->InsertInput(graph()->zone(), 0, stub_code);
@@ -335,7 +340,6 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
Handle<SharedFunctionInfo> shared;
if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
Node* const callee = NodeProperties::GetValueInput(node, 0);
- Node* const control = NodeProperties::GetControlInput(node);
Node* const context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
// TODO(mstarzinger): Duplicate parameters are not handled yet.
@@ -376,7 +380,6 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
} else if (type == CreateArgumentsType::kUnmappedArguments) {
// Use inline allocation for all unmapped arguments objects within inlined
// (i.e. non-outermost) frames, independent of the object size.
- Node* const control = NodeProperties::GetControlInput(node);
Node* const context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
// Choose the correct frame state and frame state info depending on
@@ -414,7 +417,6 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
int start_index = shared->internal_formal_parameter_count();
// Use inline allocation for all unmapped arguments objects within inlined
// (i.e. non-outermost) frames, independent of the object size.
- Node* const control = NodeProperties::GetControlInput(node);
Node* const context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
// Choose the correct frame state and frame state info depending on
@@ -507,12 +509,144 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
return Changed(node);
}
+Reduction JSCreateLowering::ReduceNewArrayToStubCall(
+ Node* node, Handle<AllocationSite> site) {
+ CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
+ int const arity = static_cast<int>(p.arity());
+
+ ElementsKind elements_kind = site->GetElementsKind();
+ AllocationSiteOverrideMode override_mode =
+ (AllocationSite::GetMode(elements_kind) == TRACK_ALLOCATION_SITE)
+ ? DISABLE_ALLOCATION_SITES
+ : DONT_OVERRIDE;
+
+ if (arity == 0) {
+ ArrayNoArgumentConstructorStub stub(isolate(), elements_kind,
+ override_mode);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 1,
+ CallDescriptor::kNeedsFrameState);
+ node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(0));
+ node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+ return Changed(node);
+ } else if (arity == 1) {
+ AllocationSiteOverrideMode override_mode =
+ (AllocationSite::GetMode(elements_kind) == TRACK_ALLOCATION_SITE)
+ ? DISABLE_ALLOCATION_SITES
+ : DONT_OVERRIDE;
+
+ if (IsHoleyElementsKind(elements_kind)) {
+ ArraySingleArgumentConstructorStub stub(isolate(), elements_kind,
+ override_mode);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 2,
+ CallDescriptor::kNeedsFrameState);
+ node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(1));
+ node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+ return Changed(node);
+ }
+
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* length = NodeProperties::GetValueInput(node, 2);
+ Node* equal = graph()->NewNode(simplified()->ReferenceEqual(), length,
+ jsgraph()->ZeroConstant());
+
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), equal, control);
+ Node* call_holey;
+ Node* call_packed;
+ Node* if_success_packed;
+ Node* if_success_holey;
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* if_equal = graph()->NewNode(common()->IfTrue(), branch);
+ {
+ ArraySingleArgumentConstructorStub stub(isolate(), elements_kind,
+ override_mode);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 2,
+ CallDescriptor::kNeedsFrameState);
+
+ Node* inputs[] = {jsgraph()->HeapConstant(stub.GetCode()),
+ node->InputAt(1),
+ jsgraph()->HeapConstant(site),
+ jsgraph()->Int32Constant(1),
+ jsgraph()->UndefinedConstant(),
+ length,
+ context,
+ frame_state,
+ effect,
+ if_equal};
+
+ call_holey =
+ graph()->NewNode(common()->Call(desc), arraysize(inputs), inputs);
+ if_success_holey = graph()->NewNode(common()->IfSuccess(), call_holey);
+ }
+ Node* if_not_equal = graph()->NewNode(common()->IfFalse(), branch);
+ {
+ // Require elements kind to "go holey."
+ ArraySingleArgumentConstructorStub stub(
+ isolate(), GetHoleyElementsKind(elements_kind), override_mode);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 2,
+ CallDescriptor::kNeedsFrameState);
+
+ Node* inputs[] = {jsgraph()->HeapConstant(stub.GetCode()),
+ node->InputAt(1),
+ jsgraph()->HeapConstant(site),
+ jsgraph()->Int32Constant(1),
+ jsgraph()->UndefinedConstant(),
+ length,
+ context,
+ frame_state,
+ effect,
+ if_not_equal};
+
+ call_packed =
+ graph()->NewNode(common()->Call(desc), arraysize(inputs), inputs);
+ if_success_packed = graph()->NewNode(common()->IfSuccess(), call_packed);
+ }
+ Node* merge = graph()->NewNode(common()->Merge(2), if_success_holey,
+ if_success_packed);
+ Node* effect_phi = graph()->NewNode(common()->EffectPhi(2), call_holey,
+ call_packed, merge);
+ Node* phi =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ call_holey, call_packed, merge);
+
+ ReplaceWithValue(node, phi, effect_phi, merge);
+ return Changed(node);
+ }
+
+ DCHECK(arity > 1);
+ ArrayNArgumentsConstructorStub stub(isolate());
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), arity + 1,
+ CallDescriptor::kNeedsFrameState);
+ node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(arity));
+ node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+ return Changed(node);
+}
+
Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
Node* target = NodeProperties::GetValueInput(node, 0);
Node* new_target = NodeProperties::GetValueInput(node, 1);
+ // TODO(mstarzinger): Array constructor can throw. Hook up exceptional edges.
+ if (NodeProperties::IsExceptionalCall(node)) return NoChange();
+
// TODO(bmeurer): Optimize the subclassing case.
if (target != new_target) return NoChange();
@@ -531,16 +665,57 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
} else if (p.arity() == 1) {
Node* length = NodeProperties::GetValueInput(node, 2);
Type* length_type = NodeProperties::GetType(length);
- if (length_type->Is(Type::SignedSmall()) &&
- length_type->Min() >= 0 &&
- length_type->Max() <= kElementLoopUnrollLimit) {
+ if (length_type->Is(Type::SignedSmall()) && length_type->Min() >= 0 &&
+ length_type->Max() <= kElementLoopUnrollLimit &&
+ length_type->Min() == length_type->Max()) {
int capacity = static_cast<int>(length_type->Max());
return ReduceNewArray(node, length, capacity, site);
}
}
}
- return NoChange();
+ return ReduceNewArrayToStubCall(node, site);
+}
+
+Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateClosure, node->opcode());
+ CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
+ Handle<SharedFunctionInfo> shared = p.shared_info();
+
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ int function_map_index =
+ Context::FunctionMapIndex(shared->language_mode(), shared->kind());
+ Node* function_map = effect =
+ graph()->NewNode(javascript()->LoadContext(0, function_map_index, true),
+ native_context, native_context, effect);
+ // Note that it is only safe to embed the raw entry point of the compile
+ // lazy stub into the code, because that stub is immortal and immovable.
+ Node* compile_entry = jsgraph()->IntPtrConstant(reinterpret_cast<intptr_t>(
+ jsgraph()->isolate()->builtins()->CompileLazy()->entry()));
+ Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant();
+ Node* empty_literals_array = jsgraph()->EmptyLiteralsArrayConstant();
+ Node* the_hole = jsgraph()->TheHoleConstant();
+ Node* undefined = jsgraph()->UndefinedConstant();
+ AllocationBuilder a(jsgraph(), effect, control);
+ STATIC_ASSERT(JSFunction::kSize == 9 * kPointerSize);
+ a.Allocate(JSFunction::kSize, p.pretenure());
+ a.Store(AccessBuilder::ForMap(), function_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), empty_fixed_array);
+ a.Store(AccessBuilder::ForJSObjectElements(), empty_fixed_array);
+ a.Store(AccessBuilder::ForJSFunctionLiterals(), empty_literals_array);
+ a.Store(AccessBuilder::ForJSFunctionPrototypeOrInitialMap(), the_hole);
+ a.Store(AccessBuilder::ForJSFunctionSharedFunctionInfo(), shared);
+ a.Store(AccessBuilder::ForJSFunctionContext(), context);
+ a.Store(AccessBuilder::ForJSFunctionCodeEntry(), compile_entry);
+ a.Store(AccessBuilder::ForJSFunctionNextFunctionLink(), undefined);
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
}
Reduction JSCreateLowering::ReduceJSCreateIterResultObject(Node* node) {
@@ -889,8 +1064,9 @@ Node* JSCreateLowering::AllocateFastLiteral(
Handle<Name> property_name(
boilerplate_map->instance_descriptors()->GetKey(i), isolate());
FieldIndex index = FieldIndex::ForDescriptor(*boilerplate_map, i);
- FieldAccess access = {kTaggedBase, index.offset(), property_name,
- Type::Tagged(), MachineType::AnyTagged()};
+ FieldAccess access = {
+ kTaggedBase, index.offset(), property_name,
+ Type::Tagged(), MachineType::AnyTagged(), kFullWriteBarrier};
Node* value;
if (boilerplate->IsUnboxedDoubleField(index)) {
access.machine_type = MachineType::Float64();
@@ -908,21 +1084,25 @@ Node* JSCreateLowering::AllocateFastLiteral(
site_context->ExitScope(current_site, boilerplate_object);
} else if (property_details.representation().IsDouble()) {
// Allocate a mutable HeapNumber box and store the value into it.
- Callable callable = CodeFactory::AllocateMutableHeapNumber(isolate());
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), jsgraph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNoFlags, Operator::kNoThrow);
+ effect = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kNotObservable), effect);
value = effect = graph()->NewNode(
- common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
- jsgraph()->NoContextConstant(), effect, control);
+ simplified()->Allocate(pretenure),
+ jsgraph()->Constant(HeapNumber::kSize), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForMap()), value,
+ jsgraph()->HeapConstant(factory()->mutable_heap_number_map()),
+ effect, control);
effect = graph()->NewNode(
simplified()->StoreField(AccessBuilder::ForHeapNumberValue()),
value, jsgraph()->Constant(
Handle<HeapNumber>::cast(boilerplate_value)->value()),
effect, control);
+ value = effect =
+ graph()->NewNode(common()->FinishRegion(), value, effect);
} else if (property_details.representation().IsSmi()) {
// Ensure that value is stored as smi.
- value = boilerplate_value->IsUninitialized()
+ value = boilerplate_value->IsUninitialized(isolate())
? jsgraph()->ZeroConstant()
: jsgraph()->Constant(boilerplate_value);
} else {
diff --git a/deps/v8/src/compiler/js-create-lowering.h b/deps/v8/src/compiler/js-create-lowering.h
index 52e7ec254a..2262e66ef2 100644
--- a/deps/v8/src/compiler/js-create-lowering.h
+++ b/deps/v8/src/compiler/js-create-lowering.h
@@ -45,6 +45,7 @@ class JSCreateLowering final : public AdvancedReducer {
Reduction ReduceJSCreate(Node* node);
Reduction ReduceJSCreateArguments(Node* node);
Reduction ReduceJSCreateArray(Node* node);
+ Reduction ReduceJSCreateClosure(Node* node);
Reduction ReduceJSCreateIterResultObject(Node* node);
Reduction ReduceJSCreateLiteral(Node* node);
Reduction ReduceJSCreateFunctionContext(Node* node);
@@ -71,6 +72,8 @@ class JSCreateLowering final : public AdvancedReducer {
PretenureFlag pretenure,
AllocationSiteUsageContext* site_context);
+ Reduction ReduceNewArrayToStubCall(Node* node, Handle<AllocationSite> site);
+
// Infers the LiteralsArray to use for a given {node}.
MaybeHandle<LiteralsArray> GetSpecializationLiterals(Node* node);
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index 1f12579ec7..69526cd7ea 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -16,22 +16,17 @@ namespace v8 {
namespace internal {
namespace compiler {
-static CallDescriptor::Flags AdjustFrameStatesForCall(Node* node) {
- int count = OperatorProperties::GetFrameStateInputCount(node->op());
- if (count > 1) {
- int index = NodeProperties::FirstFrameStateIndex(node) + 1;
- do {
- node->RemoveInput(index);
- } while (--count > 1);
- }
- return count > 0 ? CallDescriptor::kNeedsFrameState
- : CallDescriptor::kNoFlags;
-}
+namespace {
+CallDescriptor::Flags FrameStateFlagForCall(Node* node) {
+ return OperatorProperties::HasFrameStateInput(node->op())
+ ? CallDescriptor::kNeedsFrameState
+ : CallDescriptor::kNoFlags;
+}
-JSGenericLowering::JSGenericLowering(bool is_typing_enabled, JSGraph* jsgraph)
- : is_typing_enabled_(is_typing_enabled), jsgraph_(jsgraph) {}
+} // namespace
+JSGenericLowering::JSGenericLowering(JSGraph* jsgraph) : jsgraph_(jsgraph) {}
JSGenericLowering::~JSGenericLowering() {}
@@ -44,69 +39,44 @@ Reduction JSGenericLowering::Reduce(Node* node) {
break;
JS_OP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
- case IrOpcode::kBranch:
- case IrOpcode::kDeoptimizeIf:
- case IrOpcode::kDeoptimizeUnless:
- // TODO(mstarzinger): If typing is enabled then simplified lowering will
- // have inserted the correct ChangeBoolToBit, otherwise we need to perform
- // poor-man's representation inference here and insert manual change.
- if (!is_typing_enabled_) {
- Node* condition = node->InputAt(0);
- Node* test = graph()->NewNode(machine()->WordEqual(), condition,
- jsgraph()->TrueConstant());
- node->ReplaceInput(0, test);
- }
- // Fall-through.
default:
// Nothing to see.
return NoChange();
}
return Changed(node);
}
-
-#define REPLACE_BINARY_OP_IC_CALL(Op, token) \
- void JSGenericLowering::Lower##Op(Node* node) { \
- CallDescriptor::Flags flags = AdjustFrameStatesForCall(node); \
- ReplaceWithStubCall(node, CodeFactory::BinaryOpIC(isolate(), token), \
- CallDescriptor::kPatchableCallSiteWithNop | flags); \
- }
-REPLACE_BINARY_OP_IC_CALL(JSShiftLeft, Token::SHL)
-REPLACE_BINARY_OP_IC_CALL(JSShiftRight, Token::SAR)
-REPLACE_BINARY_OP_IC_CALL(JSShiftRightLogical, Token::SHR)
-REPLACE_BINARY_OP_IC_CALL(JSMultiply, Token::MUL)
-REPLACE_BINARY_OP_IC_CALL(JSDivide, Token::DIV)
-REPLACE_BINARY_OP_IC_CALL(JSModulus, Token::MOD)
-#undef REPLACE_BINARY_OP_IC_CALL
-
#define REPLACE_RUNTIME_CALL(op, fun) \
void JSGenericLowering::Lower##op(Node* node) { \
ReplaceWithRuntimeCall(node, fun); \
}
REPLACE_RUNTIME_CALL(JSCreateWithContext, Runtime::kPushWithContext)
-REPLACE_RUNTIME_CALL(JSCreateModuleContext, Runtime::kPushModuleContext)
REPLACE_RUNTIME_CALL(JSConvertReceiver, Runtime::kConvertReceiver)
#undef REPLACE_RUNTIME_CALL
-#define REPLACE_STUB_CALL(Name) \
- void JSGenericLowering::LowerJS##Name(Node* node) { \
- CallDescriptor::Flags flags = AdjustFrameStatesForCall(node); \
- Callable callable = CodeFactory::Name(isolate()); \
- ReplaceWithStubCall(node, callable, flags); \
+#define REPLACE_STUB_CALL(Name) \
+ void JSGenericLowering::LowerJS##Name(Node* node) { \
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node); \
+ Callable callable = CodeFactory::Name(isolate()); \
+ ReplaceWithStubCall(node, callable, flags); \
}
REPLACE_STUB_CALL(Add)
REPLACE_STUB_CALL(Subtract)
+REPLACE_STUB_CALL(Multiply)
+REPLACE_STUB_CALL(Divide)
+REPLACE_STUB_CALL(Modulus)
REPLACE_STUB_CALL(BitwiseAnd)
REPLACE_STUB_CALL(BitwiseOr)
REPLACE_STUB_CALL(BitwiseXor)
+REPLACE_STUB_CALL(ShiftLeft)
+REPLACE_STUB_CALL(ShiftRight)
+REPLACE_STUB_CALL(ShiftRightLogical)
REPLACE_STUB_CALL(LessThan)
REPLACE_STUB_CALL(LessThanOrEqual)
REPLACE_STUB_CALL(GreaterThan)
REPLACE_STUB_CALL(GreaterThanOrEqual)
+REPLACE_STUB_CALL(HasProperty)
REPLACE_STUB_CALL(Equal)
REPLACE_STUB_CALL(NotEqual)
-REPLACE_STUB_CALL(StrictEqual)
-REPLACE_STUB_CALL(StrictNotEqual)
-REPLACE_STUB_CALL(ToBoolean)
REPLACE_STUB_CALL(ToInteger)
REPLACE_STUB_CALL(ToLength)
REPLACE_STUB_CALL(ToNumber)
@@ -117,7 +87,12 @@ REPLACE_STUB_CALL(ToString)
void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
CallDescriptor::Flags flags) {
- Operator::Properties properties = node->op()->properties();
+ ReplaceWithStubCall(node, callable, flags, node->op()->properties());
+}
+
+void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
+ CallDescriptor::Flags flags,
+ Operator::Properties properties) {
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), 0, flags, properties);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
@@ -129,7 +104,7 @@ void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
void JSGenericLowering::ReplaceWithRuntimeCall(Node* node,
Runtime::FunctionId f,
int nargs_override) {
- CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Operator::Properties properties = node->op()->properties();
const Runtime::Function* fun = Runtime::FunctionForId(f);
int nargs = (nargs_override < 0) ? fun->nargs : nargs_override;
@@ -143,11 +118,32 @@ void JSGenericLowering::ReplaceWithRuntimeCall(Node* node,
NodeProperties::ChangeOp(node, common()->Call(desc));
}
+void JSGenericLowering::LowerJSStrictEqual(Node* node) {
+ Callable callable = CodeFactory::StrictEqual(isolate());
+ node->RemoveInput(4); // control
+ ReplaceWithStubCall(node, callable, CallDescriptor::kNoFlags,
+ Operator::kEliminatable);
+}
+
+void JSGenericLowering::LowerJSStrictNotEqual(Node* node) {
+ Callable callable = CodeFactory::StrictNotEqual(isolate());
+ node->RemoveInput(4); // control
+ ReplaceWithStubCall(node, callable, CallDescriptor::kNoFlags,
+ Operator::kEliminatable);
+}
+
+void JSGenericLowering::LowerJSToBoolean(Node* node) {
+ Callable callable = CodeFactory::ToBoolean(isolate());
+ node->AppendInput(zone(), graph()->start());
+ ReplaceWithStubCall(node, callable, CallDescriptor::kNoAllocate,
+ Operator::kEliminatable);
+}
void JSGenericLowering::LowerJSTypeOf(Node* node) {
- CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
Callable callable = CodeFactory::Typeof(isolate());
- ReplaceWithStubCall(node, callable, flags);
+ node->AppendInput(zone(), graph()->start());
+ ReplaceWithStubCall(node, callable, CallDescriptor::kNoAllocate,
+ Operator::kEliminatable);
}
@@ -155,19 +151,17 @@ void JSGenericLowering::LowerJSLoadProperty(Node* node) {
Node* closure = NodeProperties::GetValueInput(node, 2);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
const PropertyAccess& p = PropertyAccessOf(node->op());
- Callable callable =
- CodeFactory::KeyedLoadICInOptimizedCode(isolate(), UNINITIALIZED);
+ Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(isolate());
// Load the type feedback vector from the closure.
- Node* shared_info = effect = graph()->NewNode(
+ Node* literals = effect = graph()->NewNode(
machine()->Load(MachineType::AnyTagged()), closure,
- jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
- kHeapObjectTag),
+ jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
effect, control);
Node* vector = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), shared_info,
- jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+ machine()->Load(MachineType::AnyTagged()), literals,
+ jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
kHeapObjectTag),
effect, control);
node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
@@ -181,19 +175,17 @@ void JSGenericLowering::LowerJSLoadNamed(Node* node) {
Node* closure = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
NamedAccess const& p = NamedAccessOf(node->op());
- Callable callable = CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_INSIDE_TYPEOF, UNINITIALIZED);
+ Callable callable = CodeFactory::LoadICInOptimizedCode(isolate());
// Load the type feedback vector from the closure.
- Node* shared_info = effect = graph()->NewNode(
+ Node* literals = effect = graph()->NewNode(
machine()->Load(MachineType::AnyTagged()), closure,
- jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
- kHeapObjectTag),
+ jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
effect, control);
Node* vector = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), shared_info,
- jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+ machine()->Load(MachineType::AnyTagged()), literals,
+ jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
kHeapObjectTag),
effect, control);
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
@@ -206,115 +198,115 @@ void JSGenericLowering::LowerJSLoadNamed(Node* node) {
void JSGenericLowering::LowerJSLoadGlobal(Node* node) {
Node* closure = NodeProperties::GetValueInput(node, 0);
- Node* context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
const LoadGlobalParameters& p = LoadGlobalParametersOf(node->op());
- Callable callable = CodeFactory::LoadICInOptimizedCode(
- isolate(), p.typeof_mode(), UNINITIALIZED);
+ Callable callable =
+ CodeFactory::LoadGlobalICInOptimizedCode(isolate(), p.typeof_mode());
// Load the type feedback vector from the closure.
- Node* shared_info = effect = graph()->NewNode(
+ Node* literals = effect = graph()->NewNode(
machine()->Load(MachineType::AnyTagged()), closure,
- jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
- kHeapObjectTag),
+ jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
effect, control);
Node* vector = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), shared_info,
- jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+ machine()->Load(MachineType::AnyTagged()), literals,
+ jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
kHeapObjectTag),
effect, control);
- // Load global object from the context.
- Node* native_context = effect =
- graph()->NewNode(machine()->Load(MachineType::AnyTagged()), context,
- jsgraph()->IntPtrConstant(
- Context::SlotOffset(Context::NATIVE_CONTEXT_INDEX)),
- effect, control);
- Node* global = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), native_context,
- jsgraph()->IntPtrConstant(Context::SlotOffset(Context::EXTENSION_INDEX)),
- effect, control);
- node->InsertInput(zone(), 0, global);
- node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
- node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
- node->ReplaceInput(3, vector);
- node->ReplaceInput(6, effect);
+ node->InsertInput(zone(), 0, jsgraph()->SmiConstant(p.feedback().index()));
+ node->ReplaceInput(1, vector);
+ node->ReplaceInput(4, effect);
ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSStoreProperty(Node* node) {
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Node* key = NodeProperties::GetValueInput(node, 1);
+ Node* value = NodeProperties::GetValueInput(node, 2);
Node* closure = NodeProperties::GetValueInput(node, 3);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
PropertyAccess const& p = PropertyAccessOf(node->op());
LanguageMode language_mode = p.language_mode();
- Callable callable = CodeFactory::KeyedStoreICInOptimizedCode(
- isolate(), language_mode, UNINITIALIZED);
+ Callable callable =
+ CodeFactory::KeyedStoreICInOptimizedCode(isolate(), language_mode);
// Load the type feedback vector from the closure.
- Node* shared_info = effect = graph()->NewNode(
+ Node* literals = effect = graph()->NewNode(
machine()->Load(MachineType::AnyTagged()), closure,
- jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
- kHeapObjectTag),
+ jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
effect, control);
Node* vector = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), shared_info,
- jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+ machine()->Load(MachineType::AnyTagged()), literals,
+ jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
kHeapObjectTag),
effect, control);
- node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
- node->ReplaceInput(4, vector);
+ typedef StoreWithVectorDescriptor Descriptor;
+ node->InsertInputs(zone(), 0, 1);
+ node->ReplaceInput(Descriptor::kReceiver, receiver);
+ node->ReplaceInput(Descriptor::kName, key);
+ node->ReplaceInput(Descriptor::kValue, value);
+ node->ReplaceInput(Descriptor::kSlot,
+ jsgraph()->SmiConstant(p.feedback().index()));
+ node->ReplaceInput(Descriptor::kVector, vector);
node->ReplaceInput(7, effect);
ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSStoreNamed(Node* node) {
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Node* value = NodeProperties::GetValueInput(node, 1);
Node* closure = NodeProperties::GetValueInput(node, 2);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
NamedAccess const& p = NamedAccessOf(node->op());
- Callable callable = CodeFactory::StoreICInOptimizedCode(
- isolate(), p.language_mode(), UNINITIALIZED);
+ Callable callable =
+ CodeFactory::StoreICInOptimizedCode(isolate(), p.language_mode());
// Load the type feedback vector from the closure.
- Node* shared_info = effect = graph()->NewNode(
+ Node* literals = effect = graph()->NewNode(
machine()->Load(MachineType::AnyTagged()), closure,
- jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
- kHeapObjectTag),
+ jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
effect, control);
Node* vector = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), shared_info,
- jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+ machine()->Load(MachineType::AnyTagged()), literals,
+ jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
kHeapObjectTag),
effect, control);
- node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
- node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
- node->ReplaceInput(4, vector);
+ typedef StoreWithVectorDescriptor Descriptor;
+ node->InsertInputs(zone(), 0, 2);
+ node->ReplaceInput(Descriptor::kReceiver, receiver);
+ node->ReplaceInput(Descriptor::kName, jsgraph()->HeapConstant(p.name()));
+ node->ReplaceInput(Descriptor::kValue, value);
+ node->ReplaceInput(Descriptor::kSlot,
+ jsgraph()->SmiConstant(p.feedback().index()));
+ node->ReplaceInput(Descriptor::kVector, vector);
node->ReplaceInput(7, effect);
ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
+ Node* value = NodeProperties::GetValueInput(node, 0);
Node* closure = NodeProperties::GetValueInput(node, 1);
Node* context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
const StoreGlobalParameters& p = StoreGlobalParametersOf(node->op());
- Callable callable = CodeFactory::StoreICInOptimizedCode(
- isolate(), p.language_mode(), UNINITIALIZED);
+ Callable callable =
+ CodeFactory::StoreICInOptimizedCode(isolate(), p.language_mode());
// Load the type feedback vector from the closure.
- Node* shared_info = effect = graph()->NewNode(
+ Node* literals = effect = graph()->NewNode(
machine()->Load(MachineType::AnyTagged()), closure,
- jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
- kHeapObjectTag),
+ jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
effect, control);
Node* vector = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), shared_info,
- jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+ machine()->Load(MachineType::AnyTagged()), literals,
+ jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
kHeapObjectTag),
effect, control);
// Load global object from the context.
@@ -327,10 +319,14 @@ void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
machine()->Load(MachineType::AnyTagged()), native_context,
jsgraph()->IntPtrConstant(Context::SlotOffset(Context::EXTENSION_INDEX)),
effect, control);
- node->InsertInput(zone(), 0, global);
- node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
- node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
- node->ReplaceInput(4, vector);
+ typedef StoreWithVectorDescriptor Descriptor;
+ node->InsertInputs(zone(), 0, 3);
+ node->ReplaceInput(Descriptor::kReceiver, global);
+ node->ReplaceInput(Descriptor::kName, jsgraph()->HeapConstant(p.name()));
+ node->ReplaceInput(Descriptor::kValue, value);
+ node->ReplaceInput(Descriptor::kSlot,
+ jsgraph()->SmiConstant(p.feedback().index()));
+ node->ReplaceInput(Descriptor::kVector, vector);
node->ReplaceInput(7, effect);
ReplaceWithStubCall(node, callable, flags);
}
@@ -344,13 +340,8 @@ void JSGenericLowering::LowerJSDeleteProperty(Node* node) {
}
-void JSGenericLowering::LowerJSHasProperty(Node* node) {
- ReplaceWithRuntimeCall(node, Runtime::kHasProperty);
-}
-
-
void JSGenericLowering::LowerJSInstanceOf(Node* node) {
- CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Callable callable = CodeFactory::InstanceOf(isolate());
ReplaceWithStubCall(node, callable, flags);
}
@@ -395,7 +386,7 @@ void JSGenericLowering::LowerJSStoreContext(Node* node) {
void JSGenericLowering::LowerJSCreate(Node* node) {
- CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Callable callable = CodeFactory::FastNewObject(isolate());
ReplaceWithStubCall(node, callable, flags);
}
@@ -421,81 +412,25 @@ void JSGenericLowering::LowerJSCreateArray(Node* node) {
CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
int const arity = static_cast<int>(p.arity());
Handle<AllocationSite> const site = p.site();
-
- // TODO(turbofan): We embed the AllocationSite from the Operator at this
- // point, which we should not do once we want to both consume the feedback
- // but at the same time shared the optimized code across native contexts,
- // as the AllocationSite is associated with a single native context (it's
- // stored in the type feedback vector after all). Once we go for cross
- // context code generation, we should somehow find a way to get to the
- // allocation site for the actual native context at runtime.
- if (!site.is_null()) {
- // Reduce {node} to the appropriate ArrayConstructorStub backend.
- // Note that these stubs "behave" like JSFunctions, which means they
- // expect a receiver on the stack, which they remove. We just push
- // undefined for the receiver.
- ElementsKind elements_kind = site->GetElementsKind();
- AllocationSiteOverrideMode override_mode =
- (AllocationSite::GetMode(elements_kind) == TRACK_ALLOCATION_SITE)
- ? DISABLE_ALLOCATION_SITES
- : DONT_OVERRIDE;
- if (arity == 0) {
- ArrayNoArgumentConstructorStub stub(isolate(), elements_kind,
- override_mode);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 1,
- CallDescriptor::kNeedsFrameState);
- node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
- node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
- node->InsertInput(graph()->zone(), 3, jsgraph()->UndefinedConstant());
- NodeProperties::ChangeOp(node, common()->Call(desc));
- } else if (arity == 1) {
- // TODO(bmeurer): Optimize for the 0 length non-holey case?
- ArraySingleArgumentConstructorStub stub(
- isolate(), GetHoleyElementsKind(elements_kind), override_mode);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 2,
- CallDescriptor::kNeedsFrameState);
- node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
- node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
- node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(1));
- node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
- NodeProperties::ChangeOp(node, common()->Call(desc));
- } else {
- ArrayNArgumentsConstructorStub stub(isolate(), elements_kind,
- override_mode);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(),
- arity + 1, CallDescriptor::kNeedsFrameState);
- node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
- node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
- node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(arity));
- node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
- NodeProperties::ChangeOp(node, common()->Call(desc));
- }
- } else {
- Node* new_target = node->InputAt(1);
- Node* type_info = site.is_null() ? jsgraph()->UndefinedConstant()
- : jsgraph()->HeapConstant(site);
- node->RemoveInput(1);
- node->InsertInput(zone(), 1 + arity, new_target);
- node->InsertInput(zone(), 2 + arity, type_info);
- ReplaceWithRuntimeCall(node, Runtime::kNewArray, arity + 3);
- }
+ Node* new_target = node->InputAt(1);
+ Node* type_info = site.is_null() ? jsgraph()->UndefinedConstant()
+ : jsgraph()->HeapConstant(site);
+ node->RemoveInput(1);
+ node->InsertInput(zone(), 1 + arity, new_target);
+ node->InsertInput(zone(), 2 + arity, type_info);
+ ReplaceWithRuntimeCall(node, Runtime::kNewArray, arity + 3);
}
void JSGenericLowering::LowerJSCreateClosure(Node* node) {
CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
- CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Handle<SharedFunctionInfo> const shared_info = p.shared_info();
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(shared_info));
- // Use the FastNewClosureStub that allocates in new space only for nested
- // functions that don't need literals cloning.
- if (p.pretenure() == NOT_TENURED && shared_info->num_literals() == 0) {
- Callable callable = CodeFactory::FastNewClosure(
- isolate(), shared_info->language_mode(), shared_info->kind());
+ // Use the FastNewClosureStub only for functions allocated in new space.
+ if (p.pretenure() == NOT_TENURED) {
+ Callable callable = CodeFactory::FastNewClosure(isolate());
ReplaceWithStubCall(node, callable, flags);
} else {
ReplaceWithRuntimeCall(node, (p.pretenure() == TENURED)
@@ -507,15 +442,11 @@ void JSGenericLowering::LowerJSCreateClosure(Node* node) {
void JSGenericLowering::LowerJSCreateFunctionContext(Node* node) {
int const slot_count = OpParameter<int>(node->op());
- CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- // Use the FastNewContextStub only for function contexts up maximum size.
- if (slot_count <= FastNewContextStub::kMaximumSlots) {
- Callable callable = CodeFactory::FastNewContext(isolate(), slot_count);
- ReplaceWithStubCall(node, callable, flags);
- } else {
- ReplaceWithRuntimeCall(node, Runtime::kNewFunctionContext);
- }
+ Callable callable = CodeFactory::FastNewFunctionContext(isolate());
+ node->InsertInput(zone(), 1, jsgraph()->Int32Constant(slot_count));
+ ReplaceWithStubCall(node, callable, flags);
}
@@ -526,7 +457,7 @@ void JSGenericLowering::LowerJSCreateIterResultObject(Node* node) {
void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
- CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.index()));
node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
@@ -545,7 +476,7 @@ void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
- CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.index()));
node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
@@ -565,7 +496,7 @@ void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
void JSGenericLowering::LowerJSCreateLiteralRegExp(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
- CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Callable callable = CodeFactory::FastCloneRegExp(isolate());
Node* literal_index = jsgraph()->SmiConstant(p.index());
Node* literal_flags = jsgraph()->SmiConstant(p.flags());
@@ -601,7 +532,7 @@ void JSGenericLowering::LowerJSCreateScriptContext(Node* node) {
void JSGenericLowering::LowerJSCallConstruct(Node* node) {
CallConstructParameters const& p = CallConstructParametersOf(node->op());
int const arg_count = static_cast<int>(p.arity() - 2);
- CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Callable callable = CodeFactory::Construct(isolate());
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
@@ -623,7 +554,7 @@ void JSGenericLowering::LowerJSCallFunction(Node* node) {
int const arg_count = static_cast<int>(p.arity() - 2);
ConvertReceiverMode const mode = p.convert_mode();
Callable callable = CodeFactory::Call(isolate(), mode);
- CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
if (p.tail_call_mode() == TailCallMode::kAllow) {
flags |= CallDescriptor::kSupportsTailCalls;
}
@@ -639,7 +570,6 @@ void JSGenericLowering::LowerJSCallFunction(Node* node) {
void JSGenericLowering::LowerJSCallRuntime(Node* node) {
const CallRuntimeParameters& p = CallRuntimeParametersOf(node->op());
- AdjustFrameStatesForCall(node);
ReplaceWithRuntimeCall(node, p.id(), static_cast<int>(p.arity()));
}
@@ -685,9 +615,17 @@ void JSGenericLowering::LowerJSStoreMessage(Node* node) {
NodeProperties::ChangeOp(node, machine()->Store(representation));
}
+void JSGenericLowering::LowerJSGeneratorStore(Node* node) {
+ UNREACHABLE(); // Eliminated in typed lowering.
+}
-void JSGenericLowering::LowerJSYield(Node* node) { UNIMPLEMENTED(); }
+void JSGenericLowering::LowerJSGeneratorRestoreContinuation(Node* node) {
+ UNREACHABLE(); // Eliminated in typed lowering.
+}
+void JSGenericLowering::LowerJSGeneratorRestoreRegister(Node* node) {
+ UNREACHABLE(); // Eliminated in typed lowering.
+}
void JSGenericLowering::LowerJSStackCheck(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
diff --git a/deps/v8/src/compiler/js-generic-lowering.h b/deps/v8/src/compiler/js-generic-lowering.h
index 5ee759bcc8..38ee431f15 100644
--- a/deps/v8/src/compiler/js-generic-lowering.h
+++ b/deps/v8/src/compiler/js-generic-lowering.h
@@ -24,7 +24,7 @@ class Linkage;
// Lowers JS-level operators to runtime and IC calls in the "generic" case.
class JSGenericLowering final : public Reducer {
public:
- JSGenericLowering(bool is_typing_enabled, JSGraph* jsgraph);
+ explicit JSGenericLowering(JSGraph* jsgraph);
~JSGenericLowering() final;
Reduction Reduce(Node* node) final;
@@ -37,6 +37,8 @@ class JSGenericLowering final : public Reducer {
// Helpers to replace existing nodes with a generic call.
void ReplaceWithStubCall(Node* node, Callable c, CallDescriptor::Flags flags);
+ void ReplaceWithStubCall(Node* node, Callable c, CallDescriptor::Flags flags,
+ Operator::Properties properties);
void ReplaceWithRuntimeCall(Node* node, Runtime::FunctionId f, int args = -1);
Zone* zone() const;
@@ -47,7 +49,6 @@ class JSGenericLowering final : public Reducer {
MachineOperatorBuilder* machine() const;
private:
- bool const is_typing_enabled_;
JSGraph* const jsgraph_;
};
diff --git a/deps/v8/src/compiler/js-global-object-specialization.cc b/deps/v8/src/compiler/js-global-object-specialization.cc
index d8c9f17fd4..5ced04e9c1 100644
--- a/deps/v8/src/compiler/js-global-object-specialization.cc
+++ b/deps/v8/src/compiler/js-global-object-specialization.cc
@@ -12,7 +12,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/lookup.h"
-#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
+#include "src/objects-inl.h"
#include "src/type-cache.h"
namespace v8 {
@@ -74,6 +74,7 @@ Reduction JSGlobalObjectSpecialization::ReduceJSLoadGlobal(Node* node) {
// properties of the global object here (represented as PropertyCell).
LookupIterator it(global_object, name, LookupIterator::OWN);
if (it.state() != LookupIterator::DATA) return NoChange();
+ if (!it.GetHolder<JSObject>()->IsJSGlobalObject()) return NoChange();
Handle<PropertyCell> property_cell = it.GetPropertyCell();
PropertyDetails property_details = property_cell->property_details();
Handle<Object> property_cell_value(property_cell->value(), isolate());
@@ -130,7 +131,6 @@ Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
DCHECK_EQ(IrOpcode::kJSStoreGlobal, node->opcode());
Handle<Name> name = StoreGlobalParametersOf(node->op()).name();
Node* value = NodeProperties::GetValueInput(node, 0);
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -154,6 +154,7 @@ Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
// properties of the global object here (represented as PropertyCell).
LookupIterator it(global_object, name, LookupIterator::OWN);
if (it.state() != LookupIterator::DATA) return NoChange();
+ if (!it.GetHolder<JSObject>()->IsJSGlobalObject()) return NoChange();
Handle<PropertyCell> property_cell = it.GetPropertyCell();
PropertyDetails property_details = property_cell->property_details();
Handle<Object> property_cell_value(property_cell->value(), isolate());
@@ -168,37 +169,35 @@ Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
// Record a code dependency on the cell, and just deoptimize if the new
// value doesn't match the previous value stored inside the cell.
dependencies()->AssumePropertyCell(property_cell);
- Node* check =
- graph()->NewNode(simplified()->ReferenceEqual(Type::Tagged()), value,
- jsgraph()->Constant(property_cell_value));
- control = graph()->NewNode(common()->DeoptimizeUnless(), check,
- frame_state, effect, control);
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), value,
+ jsgraph()->Constant(property_cell_value));
+ effect =
+ graph()->NewNode(simplified()->CheckIf(), check, effect, control);
break;
}
case PropertyCellType::kConstantType: {
// Record a code dependency on the cell, and just deoptimize if the new
// values' type doesn't match the type of the previous value in the cell.
dependencies()->AssumePropertyCell(property_cell);
- Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), value);
- Type* property_cell_value_type = Type::TaggedSigned();
+ Type* property_cell_value_type;
if (property_cell_value->IsHeapObject()) {
- // Deoptimize if the {value} is a Smi.
- control = graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
- effect, control);
-
- // Load the {value} map check against the {property_cell} map.
- Node* value_map = effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- value, effect, control);
+ // Check that the {value} is a HeapObject.
+ value = effect = graph()->NewNode(simplified()->CheckTaggedPointer(),
+ value, effect, control);
+
+ // Check {value} map agains the {property_cell} map.
Handle<Map> property_cell_value_map(
Handle<HeapObject>::cast(property_cell_value)->map(), isolate());
- check = graph()->NewNode(
- simplified()->ReferenceEqual(Type::Any()), value_map,
- jsgraph()->HeapConstant(property_cell_value_map));
+ effect = graph()->NewNode(
+ simplified()->CheckMaps(1), value,
+ jsgraph()->HeapConstant(property_cell_value_map), effect, control);
property_cell_value_type = Type::TaggedPointer();
+ } else {
+ // Check that the {value} is a Smi.
+ value = effect = graph()->NewNode(simplified()->CheckTaggedSigned(),
+ value, effect, control);
+ property_cell_value_type = Type::TaggedSigned();
}
- control = graph()->NewNode(common()->DeoptimizeUnless(), check,
- frame_state, effect, control);
effect = graph()->NewNode(
simplified()->StoreField(
AccessBuilder::ForPropertyCellValue(property_cell_value_type)),
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index 98ca7aa3c3..cafd047e74 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -14,26 +14,77 @@ namespace compiler {
#define CACHED(name, expr) \
cached_nodes_[name] ? cached_nodes_[name] : (cached_nodes_[name] = (expr))
+Node* JSGraph::AllocateInNewSpaceStubConstant() {
+ return CACHED(kAllocateInNewSpaceStubConstant,
+ HeapConstant(isolate()->builtins()->AllocateInNewSpace()));
+}
-Node* JSGraph::CEntryStubConstant(int result_size) {
- if (result_size == 1) {
- return CACHED(kCEntryStubConstant,
- HeapConstant(CEntryStub(isolate(), 1).GetCode()));
- }
- return HeapConstant(CEntryStub(isolate(), result_size).GetCode());
+Node* JSGraph::AllocateInOldSpaceStubConstant() {
+ return CACHED(kAllocateInOldSpaceStubConstant,
+ HeapConstant(isolate()->builtins()->AllocateInOldSpace()));
}
+Node* JSGraph::ToNumberBuiltinConstant() {
+ return CACHED(kToNumberBuiltinConstant,
+ HeapConstant(isolate()->builtins()->ToNumber()));
+}
+
+Node* JSGraph::CEntryStubConstant(int result_size, SaveFPRegsMode save_doubles,
+ ArgvMode argv_mode, bool builtin_exit_frame) {
+ if (save_doubles == kDontSaveFPRegs && argv_mode == kArgvOnStack &&
+ result_size == 1) {
+ CachedNode key = builtin_exit_frame
+ ? kCEntryStubWithBuiltinExitFrameConstant
+ : kCEntryStubConstant;
+ return CACHED(key,
+ HeapConstant(CEntryStub(isolate(), result_size, save_doubles,
+ argv_mode, builtin_exit_frame)
+ .GetCode()));
+ }
+ CEntryStub stub(isolate(), result_size, save_doubles, argv_mode,
+ builtin_exit_frame);
+ return HeapConstant(stub.GetCode());
+}
Node* JSGraph::EmptyFixedArrayConstant() {
return CACHED(kEmptyFixedArrayConstant,
HeapConstant(factory()->empty_fixed_array()));
}
+Node* JSGraph::EmptyLiteralsArrayConstant() {
+ return CACHED(kEmptyLiteralsArrayConstant,
+ HeapConstant(factory()->empty_literals_array()));
+}
+
+Node* JSGraph::EmptyStringConstant() {
+ return CACHED(kEmptyStringConstant, HeapConstant(factory()->empty_string()));
+}
+
+Node* JSGraph::FixedArrayMapConstant() {
+ return CACHED(kFixedArrayMapConstant,
+ HeapConstant(factory()->fixed_array_map()));
+}
+
+Node* JSGraph::FixedDoubleArrayMapConstant() {
+ return CACHED(kFixedDoubleArrayMapConstant,
+ HeapConstant(factory()->fixed_double_array_map()));
+}
+
+Node* JSGraph::HeapNumberMapConstant() {
+ return CACHED(kHeapNumberMapConstant,
+ HeapConstant(factory()->heap_number_map()));
+}
+
Node* JSGraph::OptimizedOutConstant() {
return CACHED(kOptimizedOutConstant,
HeapConstant(factory()->optimized_out()));
}
+Node* JSGraph::StaleRegisterConstant() {
+ return CACHED(kStaleRegisterConstant,
+ HeapConstant(factory()->stale_register()));
+}
+
Node* JSGraph::UndefinedConstant() {
return CACHED(kUndefinedConstant, HeapConstant(factory()->undefined_value()));
}
@@ -63,7 +114,6 @@ Node* JSGraph::ZeroConstant() {
return CACHED(kZeroConstant, NumberConstant(0.0));
}
-
Node* JSGraph::OneConstant() {
return CACHED(kOneConstant, NumberConstant(1.0));
}
@@ -76,9 +126,6 @@ Node* JSGraph::NaNConstant() {
Node* JSGraph::HeapConstant(Handle<HeapObject> value) {
- if (value->IsConsString()) {
- value = String::Flatten(Handle<String>::cast(value), TENURED);
- }
Node** loc = cache_.FindHeapConstant(value);
if (*loc == nullptr) {
*loc = graph()->NewNode(common()->HeapConstant(value));
@@ -92,15 +139,15 @@ Node* JSGraph::Constant(Handle<Object> value) {
// canonicalized node can be used.
if (value->IsNumber()) {
return Constant(value->Number());
- } else if (value->IsUndefined()) {
+ } else if (value->IsUndefined(isolate())) {
return UndefinedConstant();
- } else if (value->IsTrue()) {
+ } else if (value->IsTrue(isolate())) {
return TrueConstant();
- } else if (value->IsFalse()) {
+ } else if (value->IsFalse(isolate())) {
return FalseConstant();
- } else if (value->IsNull()) {
+ } else if (value->IsNull(isolate())) {
return NullConstant();
- } else if (value->IsTheHole()) {
+ } else if (value->IsTheHole(isolate())) {
return TheHoleConstant();
} else {
return HeapConstant(Handle<HeapObject>::cast(value));
@@ -121,6 +168,11 @@ Node* JSGraph::Constant(int32_t value) {
return NumberConstant(value);
}
+Node* JSGraph::Constant(uint32_t value) {
+ if (value == 0) return ZeroConstant();
+ if (value == 1) return OneConstant();
+ return NumberConstant(value);
+}
Node* JSGraph::Int32Constant(int32_t value) {
Node** loc = cache_.FindInt32Constant(value);
@@ -139,6 +191,30 @@ Node* JSGraph::Int64Constant(int64_t value) {
return *loc;
}
+Node* JSGraph::RelocatableInt32Constant(int32_t value, RelocInfo::Mode rmode) {
+ Node** loc = cache_.FindRelocatableInt32Constant(
+ value, static_cast<RelocInfoMode>(rmode));
+ if (*loc == nullptr) {
+ *loc = graph()->NewNode(common()->RelocatableInt32Constant(value, rmode));
+ }
+ return *loc;
+}
+
+Node* JSGraph::RelocatableInt64Constant(int64_t value, RelocInfo::Mode rmode) {
+ Node** loc = cache_.FindRelocatableInt64Constant(
+ value, static_cast<RelocInfoMode>(rmode));
+ if (*loc == nullptr) {
+ *loc = graph()->NewNode(common()->RelocatableInt64Constant(value, rmode));
+ }
+ return *loc;
+}
+
+Node* JSGraph::RelocatableIntPtrConstant(intptr_t value,
+ RelocInfo::Mode rmode) {
+ return kPointerSize == 8
+ ? RelocatableInt64Constant(value, rmode)
+ : RelocatableInt32Constant(static_cast<int>(value), rmode);
+}
Node* JSGraph::NumberConstant(double value) {
Node** loc = cache_.FindNumberConstant(value);
@@ -180,22 +256,10 @@ Node* JSGraph::ExternalConstant(Runtime::FunctionId function_id) {
return ExternalConstant(ExternalReference(function_id, isolate()));
}
-
-Node* JSGraph::EmptyFrameState() {
- Node* empty_frame_state = cached_nodes_[kEmptyFrameState];
- if (!empty_frame_state || empty_frame_state->IsDead()) {
- Node* state_values = graph()->NewNode(common()->StateValues(0));
- empty_frame_state = graph()->NewNode(
- common()->FrameState(BailoutId::None(),
- OutputFrameStateCombine::Ignore(), nullptr),
- state_values, state_values, state_values, NoContextConstant(),
- UndefinedConstant(), graph()->start());
- cached_nodes_[kEmptyFrameState] = empty_frame_state;
- }
- return empty_frame_state;
+Node* JSGraph::EmptyStateValues() {
+ return CACHED(kEmptyStateValues, graph()->NewNode(common()->StateValues(0)));
}
-
Node* JSGraph::Dead() {
return CACHED(kDead, graph()->NewNode(common()->Dead()));
}
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index 06e8030164..9d6f27dbe6 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -39,9 +39,21 @@ class JSGraph : public ZoneObject {
}
// Canonicalized global constants.
- Node* CEntryStubConstant(int result_size);
+ Node* AllocateInNewSpaceStubConstant();
+ Node* AllocateInOldSpaceStubConstant();
+ Node* ToNumberBuiltinConstant();
+ Node* CEntryStubConstant(int result_size,
+ SaveFPRegsMode save_doubles = kDontSaveFPRegs,
+ ArgvMode argv_mode = kArgvOnStack,
+ bool builtin_exit_frame = false);
Node* EmptyFixedArrayConstant();
+ Node* EmptyLiteralsArrayConstant();
+ Node* EmptyStringConstant();
+ Node* FixedArrayMapConstant();
+ Node* FixedDoubleArrayMapConstant();
+ Node* HeapNumberMapConstant();
Node* OptimizedOutConstant();
+ Node* StaleRegisterConstant();
Node* UndefinedConstant();
Node* TheHoleConstant();
Node* TrueConstant();
@@ -66,6 +78,9 @@ class JSGraph : public ZoneObject {
// Creates a NumberConstant node, usually canonicalized.
Node* Constant(int32_t value);
+ // Creates a NumberConstant node, usually canonicalized.
+ Node* Constant(uint32_t value);
+
// Creates a Int32Constant node, usually canonicalized.
Node* Int32Constant(int32_t value);
Node* Uint32Constant(uint32_t value) {
@@ -96,6 +111,10 @@ class JSGraph : public ZoneObject {
return IntPtrConstant(bit_cast<intptr_t>(value));
}
+ Node* RelocatableInt32Constant(int32_t value, RelocInfo::Mode rmode);
+ Node* RelocatableInt64Constant(int64_t value, RelocInfo::Mode rmode);
+ Node* RelocatableIntPtrConstant(intptr_t value, RelocInfo::Mode rmode);
+
// Creates a Float32Constant node, usually canonicalized.
Node* Float32Constant(float value);
@@ -115,9 +134,9 @@ class JSGraph : public ZoneObject {
// stubs and runtime functions that do not require a context.
Node* NoContextConstant() { return ZeroConstant(); }
- // Creates an empty frame states for cases where we know that a function
- // cannot deopt.
- Node* EmptyFrameState();
+ // Creates an empty StateValues node, used when we don't have any concrete
+ // values for a certain part of the frame state.
+ Node* EmptyStateValues();
// Create a control node that serves as dependency for dead nodes.
Node* Dead();
@@ -135,9 +154,19 @@ class JSGraph : public ZoneObject {
private:
enum CachedNode {
+ kAllocateInNewSpaceStubConstant,
+ kAllocateInOldSpaceStubConstant,
+ kToNumberBuiltinConstant,
kCEntryStubConstant,
+ kCEntryStubWithBuiltinExitFrameConstant,
kEmptyFixedArrayConstant,
+ kEmptyLiteralsArrayConstant,
+ kEmptyStringConstant,
+ kFixedArrayMapConstant,
+ kFixedDoubleArrayMapConstant,
+ kHeapNumberMapConstant,
kOptimizedOutConstant,
+ kStaleRegisterConstant,
kUndefinedConstant,
kTheHoleConstant,
kTrueConstant,
@@ -146,7 +175,7 @@ class JSGraph : public ZoneObject {
kZeroConstant,
kOneConstant,
kNaNConstant,
- kEmptyFrameState,
+ kEmptyStateValues,
kDead,
kNumCachedNodes // Must remain last.
};
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index 0e0508bcd4..ce7b33ba9f 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -67,21 +67,32 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
// Stop inlinining once the maximum allowed level is reached.
int level = 0;
- for (Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ for (Node* frame_state = NodeProperties::GetFrameStateInput(node);
frame_state->opcode() == IrOpcode::kFrameState;
- frame_state = NodeProperties::GetFrameStateInput(frame_state, 0)) {
+ frame_state = NodeProperties::GetFrameStateInput(frame_state)) {
if (++level > FLAG_max_inlining_levels) return NoChange();
}
// Gather feedback on how often this call site has been hit before.
int calls = -1; // Same default as CallICNexus::ExtractCallCount.
- // TODO(turbofan): We also want call counts for constructor calls.
if (node->opcode() == IrOpcode::kJSCallFunction) {
CallFunctionParameters p = CallFunctionParametersOf(node->op());
if (p.feedback().IsValid()) {
CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
calls = nexus.ExtractCallCount();
}
+ } else {
+ DCHECK_EQ(IrOpcode::kJSCallConstruct, node->opcode());
+ CallConstructParameters p = CallConstructParametersOf(node->op());
+ if (p.feedback().IsValid()) {
+ int const extra_index =
+ p.feedback().vector()->GetIndex(p.feedback().slot()) + 1;
+ Handle<Object> feedback_extra(p.feedback().vector()->get(extra_index),
+ function->GetIsolate());
+ if (feedback_extra->IsSmi()) {
+ calls = Handle<Smi>::cast(feedback_extra)->value();
+ }
+ }
}
// ---------------------------------------------------------------------------
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index e3254bd077..635daa4d76 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -4,20 +4,22 @@
#include "src/compiler/js-inlining.h"
-#include "src/ast/ast.h"
#include "src/ast/ast-numbering.h"
+#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/compiler.h"
-#include "src/compiler/all-nodes.h"
#include "src/compiler/ast-graph-builder.h"
+#include "src/compiler/ast-loop-assignment-analyzer.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/compiler/type-hint-analyzer.h"
#include "src/isolate-inl.h"
-#include "src/parsing/parser.h"
+#include "src/parsing/parse-info.h"
#include "src/parsing/rewriter.h"
namespace v8 {
@@ -54,13 +56,9 @@ class JSCallAccessor {
return call_->InputAt(formal_arguments() + 1);
}
- Node* frame_state_before() {
- return NodeProperties::GetFrameStateInput(call_, 1);
- }
-
- Node* frame_state_after() {
- // Both, {JSCallFunction} and {JSCallConstruct}, have frame state after.
- return NodeProperties::GetFrameStateInput(call_, 0);
+ Node* frame_state() {
+ // Both, {JSCallFunction} and {JSCallConstruct}, have frame state.
+ return NodeProperties::GetFrameStateInput(call_);
}
int formal_arguments() {
@@ -75,63 +73,6 @@ class JSCallAccessor {
};
-class CopyVisitor {
- public:
- CopyVisitor(Graph* source_graph, Graph* target_graph, Zone* temp_zone)
- : sentinel_op_(IrOpcode::kDead, Operator::kNoProperties, "Sentinel", 0, 0,
- 0, 0, 0, 0),
- sentinel_(target_graph->NewNode(&sentinel_op_)),
- copies_(source_graph->NodeCount(), sentinel_, temp_zone),
- source_graph_(source_graph),
- target_graph_(target_graph),
- temp_zone_(temp_zone) {}
-
- Node* GetCopy(Node* orig) { return copies_[orig->id()]; }
-
- void CopyGraph() {
- NodeVector inputs(temp_zone_);
- // TODO(bmeurer): AllNodes should be turned into something like
- // Graph::CollectNodesReachableFromEnd() and the gray set stuff should be
- // removed since it's only needed by the visualizer.
- AllNodes all(temp_zone_, source_graph_);
- // Copy all nodes reachable from end.
- for (Node* orig : all.live) {
- Node* copy = GetCopy(orig);
- if (copy != sentinel_) {
- // Mapping already exists.
- continue;
- }
- // Copy the node.
- inputs.clear();
- for (Node* input : orig->inputs()) inputs.push_back(copies_[input->id()]);
- copy = target_graph_->NewNode(orig->op(), orig->InputCount(),
- inputs.empty() ? nullptr : &inputs[0]);
- copies_[orig->id()] = copy;
- }
- // For missing inputs.
- for (Node* orig : all.live) {
- Node* copy = copies_[orig->id()];
- for (int i = 0; i < copy->InputCount(); ++i) {
- Node* input = copy->InputAt(i);
- if (input == sentinel_) {
- copy->ReplaceInput(i, GetCopy(orig->InputAt(i)));
- }
- }
- }
- }
-
- const NodeVector& copies() const { return copies_; }
-
- private:
- Operator const sentinel_op_;
- Node* const sentinel_;
- NodeVector copies_;
- Graph* const source_graph_;
- Graph* const target_graph_;
- Zone* const temp_zone_;
-};
-
-
Reduction JSInliner::InlineCall(Node* call, Node* new_target, Node* context,
Node* frame_state, Node* start, Node* end) {
// The scheduler is smart enough to place our code; we just ensure {control}
@@ -166,13 +107,13 @@ Reduction JSInliner::InlineCall(Node* call, Node* new_target, Node* context,
Replace(use, new_target);
} else if (index == inlinee_arity_index) {
// The projection is requesting the number of arguments.
- Replace(use, jsgraph_->Int32Constant(inliner_inputs - 2));
+ Replace(use, jsgraph()->Int32Constant(inliner_inputs - 2));
} else if (index == inlinee_context_index) {
// The projection is requesting the inlinee function context.
Replace(use, context);
} else {
// Call has fewer arguments than required, fill with undefined.
- Replace(use, jsgraph_->UndefinedConstant());
+ Replace(use, jsgraph()->UndefinedConstant());
}
break;
}
@@ -203,9 +144,8 @@ Reduction JSInliner::InlineCall(Node* call, Node* new_target, Node* context,
case IrOpcode::kDeoptimize:
case IrOpcode::kTerminate:
case IrOpcode::kThrow:
- NodeProperties::MergeControlToEnd(jsgraph_->graph(), jsgraph_->common(),
- input);
- Revisit(jsgraph_->graph()->end());
+ NodeProperties::MergeControlToEnd(graph(), common(), input);
+ Revisit(graph()->end());
break;
default:
UNREACHABLE();
@@ -219,20 +159,20 @@ Reduction JSInliner::InlineCall(Node* call, Node* new_target, Node* context,
// uses with said value or kill value uses if no value can be returned.
if (values.size() > 0) {
int const input_count = static_cast<int>(controls.size());
- Node* control_output = jsgraph_->graph()->NewNode(
- jsgraph_->common()->Merge(input_count), input_count, &controls.front());
+ Node* control_output = graph()->NewNode(common()->Merge(input_count),
+ input_count, &controls.front());
values.push_back(control_output);
effects.push_back(control_output);
- Node* value_output = jsgraph_->graph()->NewNode(
- jsgraph_->common()->Phi(MachineRepresentation::kTagged, input_count),
+ Node* value_output = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, input_count),
static_cast<int>(values.size()), &values.front());
- Node* effect_output = jsgraph_->graph()->NewNode(
- jsgraph_->common()->EffectPhi(input_count),
- static_cast<int>(effects.size()), &effects.front());
+ Node* effect_output =
+ graph()->NewNode(common()->EffectPhi(input_count),
+ static_cast<int>(effects.size()), &effects.front());
ReplaceWithValue(call, value_output, effect_output, control_output);
return Changed(value_output);
} else {
- ReplaceWithValue(call, call, call, jsgraph_->Dead());
+ ReplaceWithValue(call, call, call, jsgraph()->Dead());
return Changed(call);
}
}
@@ -243,24 +183,24 @@ Node* JSInliner::CreateArtificialFrameState(Node* node, Node* outer_frame_state,
FrameStateType frame_state_type,
Handle<SharedFunctionInfo> shared) {
const FrameStateFunctionInfo* state_info =
- jsgraph_->common()->CreateFrameStateFunctionInfo(
- frame_state_type, parameter_count + 1, 0, shared);
+ common()->CreateFrameStateFunctionInfo(frame_state_type,
+ parameter_count + 1, 0, shared);
- const Operator* op = jsgraph_->common()->FrameState(
+ const Operator* op = common()->FrameState(
BailoutId(-1), OutputFrameStateCombine::Ignore(), state_info);
- const Operator* op0 = jsgraph_->common()->StateValues(0);
- Node* node0 = jsgraph_->graph()->NewNode(op0);
+ const Operator* op0 = common()->StateValues(0);
+ Node* node0 = graph()->NewNode(op0);
NodeVector params(local_zone_);
for (int parameter = 0; parameter < parameter_count + 1; ++parameter) {
params.push_back(node->InputAt(1 + parameter));
}
const Operator* op_param =
- jsgraph_->common()->StateValues(static_cast<int>(params.size()));
- Node* params_node = jsgraph_->graph()->NewNode(
+ common()->StateValues(static_cast<int>(params.size()));
+ Node* params_node = graph()->NewNode(
op_param, static_cast<int>(params.size()), &params.front());
- return jsgraph_->graph()->NewNode(op, params_node, node0, node0,
- jsgraph_->UndefinedConstant(),
- node->InputAt(0), outer_frame_state);
+ return graph()->NewNode(op, params_node, node0, node0,
+ jsgraph()->UndefinedConstant(), node->InputAt(0),
+ outer_frame_state);
}
Node* JSInliner::CreateTailCallerFrameState(Node* node, Node* frame_state) {
@@ -272,25 +212,25 @@ Node* JSInliner::CreateTailCallerFrameState(Node* node, Node* frame_state) {
// If we are inlining a tail call drop caller's frame state and an
// arguments adaptor if it exists.
- frame_state = NodeProperties::GetFrameStateInput(frame_state, 0);
+ frame_state = NodeProperties::GetFrameStateInput(frame_state);
if (frame_state->opcode() == IrOpcode::kFrameState) {
FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
if (frame_info.type() == FrameStateType::kArgumentsAdaptor) {
- frame_state = NodeProperties::GetFrameStateInput(frame_state, 0);
+ frame_state = NodeProperties::GetFrameStateInput(frame_state);
}
}
const FrameStateFunctionInfo* state_info =
- jsgraph_->common()->CreateFrameStateFunctionInfo(
+ common()->CreateFrameStateFunctionInfo(
FrameStateType::kTailCallerFunction, 0, 0, shared);
- const Operator* op = jsgraph_->common()->FrameState(
+ const Operator* op = common()->FrameState(
BailoutId(-1), OutputFrameStateCombine::Ignore(), state_info);
- const Operator* op0 = jsgraph_->common()->StateValues(0);
- Node* node0 = jsgraph_->graph()->NewNode(op0);
- return jsgraph_->graph()->NewNode(op, node0, node0, node0,
- jsgraph_->UndefinedConstant(), function,
- frame_state);
+ const Operator* op0 = common()->StateValues(0);
+ Node* node0 = graph()->NewNode(op0);
+ return graph()->NewNode(op, node0, node0, node0,
+ jsgraph()->UndefinedConstant(), function,
+ frame_state);
}
namespace {
@@ -390,7 +330,7 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
// TODO(turbofan): TranslatedState::GetAdaptedArguments() currently relies on
// not inlining recursive functions. We might want to relax that at some
// point.
- for (Node* frame_state = call.frame_state_after();
+ for (Node* frame_state = call.frame_state();
frame_state->opcode() == IrOpcode::kFrameState;
frame_state = frame_state->InputAt(kFrameStateOuterStateInput)) {
FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
@@ -414,8 +354,9 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
Zone zone(info_->isolate()->allocator());
ParseInfo parse_info(&zone, function);
- CompilationInfo info(&parse_info);
+ CompilationInfo info(&parse_info, function);
if (info_->is_deoptimization_enabled()) info.MarkAsDeoptimizationEnabled();
+ if (info_->is_type_feedback_enabled()) info.MarkAsTypeFeedbackEnabled();
if (!Compiler::ParseAndAnalyze(info.parse_info())) {
TRACE("Not inlining %s into %s because parsing failed\n",
@@ -433,6 +374,7 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
info_->shared_info()->DebugName()->ToCString().get());
return NoChange();
}
+
// Remember that we inlined this function. This needs to be called right
// after we ensure deoptimization support so that the code flusher
// does not remove the code with the deoptimization support.
@@ -446,59 +388,73 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
shared_info->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
- // TODO(mstarzinger): We could use the temporary zone for the graph because
- // nodes are copied. This however leads to Zone-Types being allocated in the
- // wrong zone and makes the engine explode at high speeds. Explosion bad!
- Graph graph(jsgraph_->zone());
- JSGraph jsgraph(info.isolate(), &graph, jsgraph_->common(),
- jsgraph_->javascript(), jsgraph_->simplified(),
- jsgraph_->machine());
- AstGraphBuilder graph_builder(local_zone_, &info, &jsgraph);
- graph_builder.CreateGraph(false);
-
- CopyVisitor visitor(&graph, jsgraph_->graph(), &zone);
- visitor.CopyGraph();
-
- Node* start = visitor.GetCopy(graph.start());
- Node* end = visitor.GetCopy(graph.end());
- Node* frame_state = call.frame_state_after();
- Node* new_target = jsgraph_->UndefinedConstant();
-
- // Insert nodes around the call that model the behavior required for a
- // constructor dispatch (allocate implicit receiver and check return value).
- // This models the behavior usually accomplished by our {JSConstructStub}.
- // Note that the context has to be the callers context (input to call node).
- Node* receiver = jsgraph_->UndefinedConstant(); // Implicit receiver.
- if (node->opcode() == IrOpcode::kJSCallConstruct &&
- NeedsImplicitReceiver(shared_info)) {
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* context = NodeProperties::GetContextInput(node);
- Node* create = jsgraph_->graph()->NewNode(
- jsgraph_->javascript()->Create(), call.target(), call.new_target(),
- context, call.frame_state_before(), effect);
- NodeProperties::ReplaceEffectInput(node, create);
- // Insert a check of the return value to determine whether the return value
- // or the implicit receiver should be selected as a result of the call.
- Node* check = jsgraph_->graph()->NewNode(
- jsgraph_->javascript()->CallRuntime(Runtime::kInlineIsJSReceiver, 1),
- node, context, node, start);
- Node* select = jsgraph_->graph()->NewNode(
- jsgraph_->common()->Select(MachineRepresentation::kTagged), check, node,
- create);
- NodeProperties::ReplaceUses(node, select, check, node, node);
- NodeProperties::ReplaceValueInput(select, node, 1);
- NodeProperties::ReplaceValueInput(check, node, 0);
- NodeProperties::ReplaceEffectInput(check, node);
- receiver = create; // The implicit receiver.
+ // If function was lazily compiled, it's literals array may not yet be set up.
+ JSFunction::EnsureLiterals(function);
+
+ // Create the subgraph for the inlinee.
+ Node* start;
+ Node* end;
+ {
+ // Run the loop assignment analyzer on the inlinee.
+ AstLoopAssignmentAnalyzer loop_assignment_analyzer(&zone, &info);
+ LoopAssignmentAnalysis* loop_assignment =
+ loop_assignment_analyzer.Analyze();
+
+ // Run the type hint analyzer on the inlinee.
+ TypeHintAnalyzer type_hint_analyzer(&zone);
+ TypeHintAnalysis* type_hint_analysis =
+ type_hint_analyzer.Analyze(handle(shared_info->code(), info.isolate()));
+
+ // Run the AstGraphBuilder to create the subgraph.
+ Graph::SubgraphScope scope(graph());
+ AstGraphBuilder graph_builder(&zone, &info, jsgraph(), loop_assignment,
+ type_hint_analysis);
+ graph_builder.CreateGraph(false);
+
+ // Extract the inlinee start/end nodes.
+ start = graph()->start();
+ end = graph()->end();
}
- // Swizzle the inputs of the {JSCallConstruct} node to look like inputs to a
- // normal {JSCallFunction} node so that the rest of the inlining machinery
- // behaves as if we were dealing with a regular function invocation.
+ Node* frame_state = call.frame_state();
+ Node* new_target = jsgraph()->UndefinedConstant();
+
+ // Inline {JSCallConstruct} requires some additional magic.
if (node->opcode() == IrOpcode::kJSCallConstruct) {
+ // Insert nodes around the call that model the behavior required for a
+ // constructor dispatch (allocate implicit receiver and check return value).
+ // This models the behavior usually accomplished by our {JSConstructStub}.
+ // Note that the context has to be the callers context (input to call node).
+ Node* receiver = jsgraph()->UndefinedConstant(); // Implicit receiver.
+ if (NeedsImplicitReceiver(shared_info)) {
+ Node* frame_state_before = NodeProperties::FindFrameStateBefore(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* create = graph()->NewNode(javascript()->Create(), call.target(),
+ call.new_target(), context,
+ frame_state_before, effect);
+ NodeProperties::ReplaceEffectInput(node, create);
+ // Insert a check of the return value to determine whether the return
+ // value or the implicit receiver should be selected as a result of the
+ // call.
+ Node* check = graph()->NewNode(simplified()->ObjectIsReceiver(), node);
+ Node* select =
+ graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
+ check, node, create);
+ NodeProperties::ReplaceUses(node, select, node, node, node);
+ // Fix-up inputs that have been mangled by the {ReplaceUses} call above.
+ NodeProperties::ReplaceValueInput(select, node, 1); // Fix-up input.
+ NodeProperties::ReplaceValueInput(check, node, 0); // Fix-up input.
+ receiver = create; // The implicit receiver.
+ }
+
+ // Swizzle the inputs of the {JSCallConstruct} node to look like inputs to a
+ // normal {JSCallFunction} node so that the rest of the inlining machinery
+ // behaves as if we were dealing with a regular function invocation.
new_target = call.new_target(); // Retrieve new target value input.
node->RemoveInput(call.formal_arguments() + 1); // Drop new target.
- node->InsertInput(jsgraph_->graph()->zone(), 1, receiver);
+ node->InsertInput(graph()->zone(), 1, receiver);
+
// Insert a construct stub frame into the chain of frame states. This will
// reconstruct the proper frame when deoptimizing within the constructor.
frame_state = CreateArtificialFrameState(
@@ -510,7 +466,7 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
// TODO(turbofan): We might want to load the context from the JSFunction at
// runtime in case we only know the SharedFunctionInfo once we have dynamic
// type feedback in the compiler.
- Node* context = jsgraph_->Constant(handle(function->context()));
+ Node* context = jsgraph()->Constant(handle(function->context()));
// Insert a JSConvertReceiver node for sloppy callees. Note that the context
// passed into this node has to be the callees context (loaded above). Note
@@ -519,12 +475,13 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
// in that frame state tho, as the conversion of the receiver can be repeated
// any number of times, it's not observable.
if (node->opcode() == IrOpcode::kJSCallFunction &&
- is_sloppy(info.language_mode()) && !shared_info->native()) {
+ is_sloppy(parse_info.language_mode()) && !shared_info->native()) {
const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
+ Node* frame_state_before = NodeProperties::FindFrameStateBefore(node);
Node* effect = NodeProperties::GetEffectInput(node);
- Node* convert = jsgraph_->graph()->NewNode(
- jsgraph_->javascript()->ConvertReceiver(p.convert_mode()),
- call.receiver(), context, call.frame_state_before(), effect, start);
+ Node* convert = graph()->NewNode(
+ javascript()->ConvertReceiver(p.convert_mode()), call.receiver(),
+ context, frame_state_before, effect, start);
NodeProperties::ReplaceValueInput(node, convert, 1);
NodeProperties::ReplaceEffectInput(node, convert);
}
@@ -558,6 +515,18 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
return InlineCall(node, new_target, context, frame_state, start, end);
}
+Graph* JSInliner::graph() const { return jsgraph()->graph(); }
+
+JSOperatorBuilder* JSInliner::javascript() const {
+ return jsgraph()->javascript();
+}
+
+CommonOperatorBuilder* JSInliner::common() const { return jsgraph()->common(); }
+
+SimplifiedOperatorBuilder* JSInliner::simplified() const {
+ return jsgraph()->simplified();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h
index d0ab7c0583..49487f5a0a 100644
--- a/deps/v8/src/compiler/js-inlining.h
+++ b/deps/v8/src/compiler/js-inlining.h
@@ -36,9 +36,15 @@ class JSInliner final : public AdvancedReducer {
Reduction ReduceJSCall(Node* node, Handle<JSFunction> function);
private:
- Zone* local_zone_;
+ CommonOperatorBuilder* common() const;
+ JSOperatorBuilder* javascript() const;
+ SimplifiedOperatorBuilder* simplified() const;
+ Graph* graph() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+
+ Zone* const local_zone_;
CompilationInfo* info_;
- JSGraph* jsgraph_;
+ JSGraph* const jsgraph_;
Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state,
int parameter_count,
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index 034ee6fd76..3324508559 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -30,16 +30,16 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
Runtime::FunctionForId(CallRuntimeParametersOf(node->op()).id());
if (f->intrinsic_type != Runtime::IntrinsicType::INLINE) return NoChange();
switch (f->function_id) {
- case Runtime::kInlineConstructDouble:
- return ReduceConstructDouble(node);
case Runtime::kInlineCreateIterResultObject:
return ReduceCreateIterResultObject(node);
case Runtime::kInlineDeoptimizeNow:
return ReduceDeoptimizeNow(node);
- case Runtime::kInlineDoubleHi:
- return ReduceDoubleHi(node);
- case Runtime::kInlineDoubleLo:
- return ReduceDoubleLo(node);
+ case Runtime::kInlineGeneratorClose:
+ return ReduceGeneratorClose(node);
+ case Runtime::kInlineGeneratorGetInputOrDebugPos:
+ return ReduceGeneratorGetInputOrDebugPos(node);
+ case Runtime::kInlineGeneratorGetResumeMode:
+ return ReduceGeneratorGetResumeMode(node);
case Runtime::kInlineIsArray:
return ReduceIsInstanceType(node, JS_ARRAY_TYPE);
case Runtime::kInlineIsTypedArray:
@@ -50,8 +50,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceIsJSReceiver(node);
case Runtime::kInlineIsSmi:
return ReduceIsSmi(node);
- case Runtime::kInlineValueOf:
- return ReduceValueOf(node);
case Runtime::kInlineFixedArrayGet:
return ReduceFixedArrayGet(node);
case Runtime::kInlineFixedArraySet:
@@ -70,14 +68,10 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceToInteger(node);
case Runtime::kInlineToLength:
return ReduceToLength(node);
- case Runtime::kInlineToName:
- return ReduceToName(node);
case Runtime::kInlineToNumber:
return ReduceToNumber(node);
case Runtime::kInlineToObject:
return ReduceToObject(node);
- case Runtime::kInlineToPrimitive:
- return ReduceToPrimitive(node);
case Runtime::kInlineToString:
return ReduceToString(node);
case Runtime::kInlineCall:
@@ -86,8 +80,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceNewObject(node);
case Runtime::kInlineGetSuperConstructor:
return ReduceGetSuperConstructor(node);
- case Runtime::kInlineGetOrdinaryHasInstance:
- return ReduceGetOrdinaryHasInstance(node);
default:
break;
}
@@ -105,29 +97,16 @@ Reduction JSIntrinsicLowering::ReduceCreateIterResultObject(Node* node) {
}
-Reduction JSIntrinsicLowering::ReduceConstructDouble(Node* node) {
- Node* high = NodeProperties::GetValueInput(node, 0);
- Node* low = NodeProperties::GetValueInput(node, 1);
- Node* value =
- graph()->NewNode(machine()->Float64InsertHighWord32(),
- graph()->NewNode(machine()->Float64InsertLowWord32(),
- jsgraph()->Constant(0), low),
- high);
- ReplaceWithValue(node, value);
- return Replace(value);
-}
-
-
Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) {
if (mode() != kDeoptimizationEnabled) return NoChange();
- Node* const frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ Node* const frame_state = NodeProperties::GetFrameStateInput(node);
Node* const effect = NodeProperties::GetEffectInput(node);
Node* const control = NodeProperties::GetControlInput(node);
// TODO(bmeurer): Move MergeControlToEnd() to the AdvancedReducer.
- Node* deoptimize =
- graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
- frame_state, effect, control);
+ Node* deoptimize = graph()->NewNode(
+ common()->Deoptimize(DeoptimizeKind::kEager, DeoptimizeReason::kNoReason),
+ frame_state, effect, control);
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
Revisit(graph()->end());
@@ -136,24 +115,39 @@ Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) {
return Changed(node);
}
+Reduction JSIntrinsicLowering::ReduceGeneratorClose(Node* node) {
+ Node* const generator = NodeProperties::GetValueInput(node, 0);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+ Node* const closed = jsgraph()->Constant(JSGeneratorObject::kGeneratorClosed);
+ Node* const undefined = jsgraph()->UndefinedConstant();
+ Operator const* const op = simplified()->StoreField(
+ AccessBuilder::ForJSGeneratorObjectContinuation());
-Reduction JSIntrinsicLowering::ReduceDoubleHi(Node* node) {
- // Tell the compiler to assume number input.
- Node* renamed = graph()->NewNode(common()->Guard(Type::Number()),
- node->InputAt(0), graph()->start());
- node->ReplaceInput(0, renamed);
- return Change(node, machine()->Float64ExtractHighWord32());
+ ReplaceWithValue(node, undefined, node);
+ NodeProperties::RemoveType(node);
+ return Change(node, op, generator, closed, effect, control);
}
+Reduction JSIntrinsicLowering::ReduceGeneratorGetInputOrDebugPos(Node* node) {
+ Node* const generator = NodeProperties::GetValueInput(node, 0);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+ Operator const* const op = simplified()->LoadField(
+ AccessBuilder::ForJSGeneratorObjectInputOrDebugPos());
-Reduction JSIntrinsicLowering::ReduceDoubleLo(Node* node) {
- // Tell the compiler to assume number input.
- Node* renamed = graph()->NewNode(common()->Guard(Type::Number()),
- node->InputAt(0), graph()->start());
- node->ReplaceInput(0, renamed);
- return Change(node, machine()->Float64ExtractLowWord32());
+ return Change(node, op, generator, effect, control);
}
+Reduction JSIntrinsicLowering::ReduceGeneratorGetResumeMode(Node* node) {
+ Node* const generator = NodeProperties::GetValueInput(node, 0);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+ Operator const* const op =
+ simplified()->LoadField(AccessBuilder::ForJSGeneratorObjectResumeMode());
+
+ return Change(node, op, generator, effect, control);
+}
Reduction JSIntrinsicLowering::ReduceIsInstanceType(
Node* node, InstanceType instance_type) {
@@ -179,8 +173,8 @@ Reduction JSIntrinsicLowering::ReduceIsInstanceType(
graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()), value,
effect, if_false),
effect, if_false);
- Node* vfalse = graph()->NewNode(machine()->Word32Equal(), efalse,
- jsgraph()->Int32Constant(instance_type));
+ Node* vfalse = graph()->NewNode(simplified()->NumberEqual(), efalse,
+ jsgraph()->Constant(instance_type));
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
@@ -204,70 +198,6 @@ Reduction JSIntrinsicLowering::ReduceIsSmi(Node* node) {
}
-Reduction JSIntrinsicLowering::ReduceValueOf(Node* node) {
- // if (%_IsSmi(value)) {
- // return value;
- // } else if (%_GetInstanceType(%_GetMap(value)) == JS_VALUE_TYPE) {
- // return %_GetValue(value);
- // } else {
- // return value;
- // }
- const Operator* const merge_op = common()->Merge(2);
- const Operator* const ephi_op = common()->EffectPhi(2);
- const Operator* const phi_op =
- common()->Phi(MachineRepresentation::kTagged, 2);
-
- Node* value = NodeProperties::GetValueInput(node, 0);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), value);
- Node* branch0 = graph()->NewNode(common()->Branch(), check0, control);
-
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0 = effect;
- Node* vtrue0 = value;
-
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* efalse0;
- Node* vfalse0;
- {
- Node* check1 = graph()->NewNode(
- machine()->Word32Equal(),
- graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- value, effect, if_false0),
- effect, if_false0),
- jsgraph()->Int32Constant(JS_VALUE_TYPE));
- Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForValue()),
- value, effect, if_true1);
- Node* vtrue1 = etrue1;
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* efalse1 = effect;
- Node* vfalse1 = value;
-
- Node* merge1 = graph()->NewNode(merge_op, if_true1, if_false1);
- efalse0 = graph()->NewNode(ephi_op, etrue1, efalse1, merge1);
- vfalse0 = graph()->NewNode(phi_op, vtrue1, vfalse1, merge1);
- }
-
- Node* merge0 = graph()->NewNode(merge_op, if_true0, if_false0);
-
- // Replace all effect uses of {node} with the {ephi0}.
- Node* ephi0 = graph()->NewNode(ephi_op, etrue0, efalse0, merge0);
- ReplaceWithValue(node, node, ephi0);
-
- // Turn the {node} into a Phi.
- return Change(node, phi_op, vtrue0, vfalse0, merge0);
-}
-
-
Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op) {
// Replace all effect uses of {node} with the effect dependency.
RelaxEffectsAndControls(node);
@@ -346,12 +276,6 @@ Reduction JSIntrinsicLowering::ReduceToInteger(Node* node) {
}
-Reduction JSIntrinsicLowering::ReduceToName(Node* node) {
- NodeProperties::ChangeOp(node, javascript()->ToName());
- return Changed(node);
-}
-
-
Reduction JSIntrinsicLowering::ReduceToNumber(Node* node) {
NodeProperties::ChangeOp(node, javascript()->ToNumber());
return Changed(node);
@@ -370,17 +294,6 @@ Reduction JSIntrinsicLowering::ReduceToObject(Node* node) {
}
-Reduction JSIntrinsicLowering::ReduceToPrimitive(Node* node) {
- Node* value = NodeProperties::GetValueInput(node, 0);
- Type* value_type = NodeProperties::GetType(value);
- if (value_type->Is(Type::Primitive())) {
- ReplaceWithValue(node, value);
- return Replace(value);
- }
- return NoChange();
-}
-
-
Reduction JSIntrinsicLowering::ReduceToString(Node* node) {
NodeProperties::ChangeOp(node, javascript()->ToString());
return Changed(node);
@@ -397,15 +310,7 @@ Reduction JSIntrinsicLowering::ReduceCall(Node* node) {
}
Reduction JSIntrinsicLowering::ReduceNewObject(Node* node) {
- Node* constructor = NodeProperties::GetValueInput(node, 0);
- Node* new_target = NodeProperties::GetValueInput(node, 1);
- Node* context = NodeProperties::GetContextInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
- Node* value = graph()->NewNode(javascript()->Create(), constructor,
- new_target, context, frame_state, effect);
- ReplaceWithValue(node, value, value);
- return Replace(value);
+ return Change(node, CodeFactory::FastNewObject(isolate()), 0);
}
Reduction JSIntrinsicLowering::ReduceGetSuperConstructor(Node* node) {
@@ -419,17 +324,6 @@ Reduction JSIntrinsicLowering::ReduceGetSuperConstructor(Node* node) {
active_function_map, effect, control);
}
-Reduction JSIntrinsicLowering::ReduceGetOrdinaryHasInstance(Node* node) {
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* context = NodeProperties::GetContextInput(node);
- Node* native_context = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, effect);
- return Change(node, javascript()->LoadContext(
- 0, Context::ORDINARY_HAS_INSTANCE_INDEX, true),
- native_context, context, effect);
-}
-
Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
Node* b) {
RelaxControls(node);
@@ -466,12 +360,6 @@ Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
}
-Reduction JSIntrinsicLowering::ChangeToUndefined(Node* node, Node* effect) {
- ReplaceWithValue(node, jsgraph()->UndefinedConstant(), effect);
- return Changed(node);
-}
-
-
Reduction JSIntrinsicLowering::Change(Node* node, Callable const& callable,
int stack_parameter_count) {
CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
@@ -498,12 +386,6 @@ JSOperatorBuilder* JSIntrinsicLowering::javascript() const {
return jsgraph_->javascript();
}
-
-MachineOperatorBuilder* JSIntrinsicLowering::machine() const {
- return jsgraph()->machine();
-}
-
-
SimplifiedOperatorBuilder* JSIntrinsicLowering::simplified() const {
return jsgraph()->simplified();
}
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
index a43ed01166..6835a52c7e 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.h
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -21,7 +21,6 @@ namespace compiler {
class CommonOperatorBuilder;
class JSOperatorBuilder;
class JSGraph;
-class MachineOperatorBuilder;
class SimplifiedOperatorBuilder;
@@ -37,15 +36,14 @@ class JSIntrinsicLowering final : public AdvancedReducer {
Reduction Reduce(Node* node) final;
private:
- Reduction ReduceConstructDouble(Node* node);
Reduction ReduceCreateIterResultObject(Node* node);
Reduction ReduceDeoptimizeNow(Node* node);
- Reduction ReduceDoubleHi(Node* node);
- Reduction ReduceDoubleLo(Node* node);
+ Reduction ReduceGeneratorClose(Node* node);
+ Reduction ReduceGeneratorGetInputOrDebugPos(Node* node);
+ Reduction ReduceGeneratorGetResumeMode(Node* node);
Reduction ReduceIsInstanceType(Node* node, InstanceType instance_type);
Reduction ReduceIsJSReceiver(Node* node);
Reduction ReduceIsSmi(Node* node);
- Reduction ReduceValueOf(Node* node);
Reduction ReduceFixedArrayGet(Node* node);
Reduction ReduceFixedArraySet(Node* node);
Reduction ReduceRegExpConstructResult(Node* node);
@@ -55,22 +53,18 @@ class JSIntrinsicLowering final : public AdvancedReducer {
Reduction ReduceSubString(Node* node);
Reduction ReduceToInteger(Node* node);
Reduction ReduceToLength(Node* node);
- Reduction ReduceToName(Node* node);
Reduction ReduceToNumber(Node* node);
Reduction ReduceToObject(Node* node);
- Reduction ReduceToPrimitive(Node* node);
Reduction ReduceToString(Node* node);
Reduction ReduceCall(Node* node);
Reduction ReduceNewObject(Node* node);
Reduction ReduceGetSuperConstructor(Node* node);
- Reduction ReduceGetOrdinaryHasInstance(Node* node);
Reduction Change(Node* node, const Operator* op);
Reduction Change(Node* node, const Operator* op, Node* a, Node* b);
Reduction Change(Node* node, const Operator* op, Node* a, Node* b, Node* c);
Reduction Change(Node* node, const Operator* op, Node* a, Node* b, Node* c,
Node* d);
- Reduction ChangeToUndefined(Node* node, Node* effect = nullptr);
Reduction Change(Node* node, Callable const& callable,
int stack_parameter_count);
@@ -79,7 +73,6 @@ class JSIntrinsicLowering final : public AdvancedReducer {
Isolate* isolate() const;
CommonOperatorBuilder* common() const;
JSOperatorBuilder* javascript() const;
- MachineOperatorBuilder* machine() const;
SimplifiedOperatorBuilder* simplified() const;
DeoptimizationMode mode() const { return mode_; }
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index d1353d20be..b501b7ac25 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -15,7 +15,6 @@
#include "src/compiler/node-matchers.h"
#include "src/field-index-inl.h"
#include "src/isolate-inl.h"
-#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
#include "src/type-cache.h"
#include "src/type-feedback-vector.h"
@@ -23,6 +22,39 @@ namespace v8 {
namespace internal {
namespace compiler {
+namespace {
+
+bool HasNumberMaps(MapList const& maps) {
+ for (auto map : maps) {
+ if (map->instance_type() == HEAP_NUMBER_TYPE) return true;
+ }
+ return false;
+}
+
+bool HasOnlyJSArrayMaps(MapList const& maps) {
+ for (auto map : maps) {
+ if (!map->IsJSArrayMap()) return false;
+ }
+ return true;
+}
+
+bool HasOnlyNumberMaps(MapList const& maps) {
+ for (auto map : maps) {
+ if (map->instance_type() != HEAP_NUMBER_TYPE) return false;
+ }
+ return true;
+}
+
+template <typename T>
+bool HasOnlyStringMaps(T const& maps) {
+ for (auto map : maps) {
+ if (!map->IsStringMap()) return false;
+ }
+ return true;
+}
+
+} // namespace
+
JSNativeContextSpecialization::JSNativeContextSpecialization(
Editor* editor, JSGraph* jsgraph, Flags flags,
MaybeHandle<Context> native_context, CompilationDependencies* dependencies,
@@ -79,7 +111,9 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
node->opcode() == IrOpcode::kJSLoadProperty ||
node->opcode() == IrOpcode::kJSStoreProperty);
Node* receiver = NodeProperties::GetValueInput(node, 0);
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state_eager = NodeProperties::FindFrameStateBefore(node);
+ Node* frame_state_lazy = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -99,357 +133,224 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
return NoChange();
}
- // Nothing to do if we have no non-deprecated maps.
- if (access_infos.empty()) return NoChange();
+ // TODO(turbofan): Add support for inlining into try blocks.
+ if (NodeProperties::IsExceptionalCall(node) ||
+ !(flags() & kAccessorInliningEnabled)) {
+ for (auto access_info : access_infos) {
+ if (access_info.IsAccessorConstant()) return NoChange();
+ }
+ }
- // The final states for every polymorphic branch. We join them with
- // Merge++Phi+EffectPhi at the bottom.
- ZoneVector<Node*> values(zone());
- ZoneVector<Node*> effects(zone());
- ZoneVector<Node*> controls(zone());
+ // Nothing to do if we have no non-deprecated maps.
+ if (access_infos.empty()) {
+ return ReduceSoftDeoptimize(
+ node, DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
+ }
// Ensure that {index} matches the specified {name} (if {index} is given).
if (index != nullptr) {
- Node* check = graph()->NewNode(simplified()->ReferenceEqual(Type::Name()),
- index, jsgraph()->HeapConstant(name));
- control = graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
- effect, control);
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), index,
+ jsgraph()->HeapConstant(name));
+ effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
}
- // Check if {receiver} may be a number.
- bool receiverissmi_possible = false;
- for (PropertyAccessInfo const& access_info : access_infos) {
- if (access_info.receiver_type()->Is(Type::Number())) {
- receiverissmi_possible = true;
- break;
+ // Check for the monomorphic cases.
+ if (access_infos.size() == 1) {
+ PropertyAccessInfo access_info = access_infos.front();
+ if (HasOnlyStringMaps(access_info.receiver_maps())) {
+ // Monormorphic string access (ignoring the fact that there are multiple
+ // String maps).
+ receiver = effect = graph()->NewNode(simplified()->CheckString(),
+ receiver, effect, control);
+ } else if (HasOnlyNumberMaps(access_info.receiver_maps())) {
+ // Monomorphic number access (we also deal with Smis here).
+ receiver = effect = graph()->NewNode(simplified()->CheckNumber(),
+ receiver, effect, control);
+ } else {
+ // Monomorphic property access.
+ effect = BuildCheckTaggedPointer(receiver, effect, control);
+ effect = BuildCheckMaps(receiver, effect, control,
+ access_info.receiver_maps());
}
- }
- // Ensure that {receiver} is a heap object.
- Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
- Node* receiverissmi_control = nullptr;
- Node* receiverissmi_effect = effect;
- if (receiverissmi_possible) {
- Node* branch = graph()->NewNode(common()->Branch(), check, control);
- control = graph()->NewNode(common()->IfFalse(), branch);
- receiverissmi_control = graph()->NewNode(common()->IfTrue(), branch);
- receiverissmi_effect = effect;
+ // Generate the actual property access.
+ ValueEffectControl continuation = BuildPropertyAccess(
+ receiver, value, context, frame_state_lazy, effect, control, name,
+ native_context, access_info, access_mode);
+ value = continuation.value();
+ effect = continuation.effect();
+ control = continuation.control();
} else {
- control = graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
- effect, control);
- }
-
- // Load the {receiver} map. The resulting effect is the dominating effect for
- // all (polymorphic) branches.
- Node* receiver_map = effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- receiver, effect, control);
-
- // Generate code for the various different property access patterns.
- Node* fallthrough_control = control;
- for (size_t j = 0; j < access_infos.size(); ++j) {
- PropertyAccessInfo const& access_info = access_infos[j];
- Node* this_value = value;
- Node* this_receiver = receiver;
- Node* this_effect = effect;
- Node* this_control;
-
- // Perform map check on {receiver}.
- Type* receiver_type = access_info.receiver_type();
- if (receiver_type->Is(Type::String())) {
- // Emit an instance type check for strings.
- Node* receiver_instance_type = this_effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
- receiver_map, this_effect, fallthrough_control);
- Node* check =
- graph()->NewNode(machine()->Uint32LessThan(), receiver_instance_type,
- jsgraph()->Uint32Constant(FIRST_NONSTRING_TYPE));
- if (j == access_infos.size() - 1) {
- this_control =
- graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
- this_effect, fallthrough_control);
- fallthrough_control = nullptr;
- } else {
- Node* branch =
- graph()->NewNode(common()->Branch(), check, fallthrough_control);
- fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
- this_control = graph()->NewNode(common()->IfTrue(), branch);
- }
- } else {
- // Emit a (sequence of) map checks for other {receiver}s.
- ZoneVector<Node*> this_controls(zone());
- ZoneVector<Node*> this_effects(zone());
- int num_classes = access_info.receiver_type()->NumClasses();
- for (auto i = access_info.receiver_type()->Classes(); !i.Done();
- i.Advance()) {
- DCHECK_LT(0, num_classes);
- Handle<Map> map = i.Current();
- Node* check =
- graph()->NewNode(simplified()->ReferenceEqual(Type::Internal()),
- receiver_map, jsgraph()->Constant(map));
- if (--num_classes == 0 && j == access_infos.size() - 1) {
- this_controls.push_back(
- graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
- this_effect, fallthrough_control));
- this_effects.push_back(this_effect);
- fallthrough_control = nullptr;
- } else {
- Node* branch =
- graph()->NewNode(common()->Branch(), check, fallthrough_control);
- fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
- this_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
- this_effects.push_back(this_effect);
- }
- }
-
- // The Number case requires special treatment to also deal with Smis.
- if (receiver_type->Is(Type::Number())) {
- // Join this check with the "receiver is smi" check above.
- DCHECK_NOT_NULL(receiverissmi_effect);
- DCHECK_NOT_NULL(receiverissmi_control);
- this_effects.push_back(receiverissmi_effect);
- this_controls.push_back(receiverissmi_control);
- receiverissmi_effect = receiverissmi_control = nullptr;
+ // The final states for every polymorphic branch. We join them with
+ // Merge+Phi+EffectPhi at the bottom.
+ ZoneVector<Node*> values(zone());
+ ZoneVector<Node*> effects(zone());
+ ZoneVector<Node*> controls(zone());
+
+ // Check if {receiver} may be a number.
+ bool receiverissmi_possible = false;
+ for (PropertyAccessInfo const& access_info : access_infos) {
+ if (HasNumberMaps(access_info.receiver_maps())) {
+ receiverissmi_possible = true;
+ break;
}
-
- // Create dominating Merge+EffectPhi for this {receiver} type.
- int const this_control_count = static_cast<int>(this_controls.size());
- this_control =
- (this_control_count == 1)
- ? this_controls.front()
- : graph()->NewNode(common()->Merge(this_control_count),
- this_control_count, &this_controls.front());
- this_effects.push_back(this_control);
- int const this_effect_count = static_cast<int>(this_effects.size());
- this_effect =
- (this_control_count == 1)
- ? this_effects.front()
- : graph()->NewNode(common()->EffectPhi(this_control_count),
- this_effect_count, &this_effects.front());
}
- // Determine actual holder and perform prototype chain checks.
- Handle<JSObject> holder;
- if (access_info.holder().ToHandle(&holder)) {
- AssumePrototypesStable(receiver_type, native_context, holder);
+ // Ensure that {receiver} is a heap object.
+ Node* receiverissmi_control = nullptr;
+ Node* receiverissmi_effect = effect;
+ if (receiverissmi_possible) {
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
+ control = graph()->NewNode(common()->IfFalse(), branch);
+ receiverissmi_control = graph()->NewNode(common()->IfTrue(), branch);
+ receiverissmi_effect = effect;
+ } else {
+ effect = BuildCheckTaggedPointer(receiver, effect, control);
}
- // Generate the actual property access.
- if (access_info.IsNotFound()) {
- DCHECK_EQ(AccessMode::kLoad, access_mode);
- this_value = jsgraph()->UndefinedConstant();
- } else if (access_info.IsDataConstant()) {
- this_value = jsgraph()->Constant(access_info.constant());
- if (access_mode == AccessMode::kStore) {
- Node* check = graph()->NewNode(
- simplified()->ReferenceEqual(Type::Tagged()), value, this_value);
- this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
- frame_state, this_effect, this_control);
- }
- } else {
- DCHECK(access_info.IsDataField());
- FieldIndex const field_index = access_info.field_index();
- FieldCheck const field_check = access_info.field_check();
- Type* const field_type = access_info.field_type();
- switch (field_check) {
- case FieldCheck::kNone:
- break;
- case FieldCheck::kJSArrayBufferViewBufferNotNeutered: {
- Node* this_buffer = this_effect =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSArrayBufferViewBuffer()),
- this_receiver, this_effect, this_control);
- Node* this_buffer_bit_field = this_effect =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSArrayBufferBitField()),
- this_buffer, this_effect, this_control);
- Node* check = graph()->NewNode(
- machine()->Word32Equal(),
- graph()->NewNode(machine()->Word32And(), this_buffer_bit_field,
- jsgraph()->Int32Constant(
- 1 << JSArrayBuffer::WasNeutered::kShift)),
- jsgraph()->Int32Constant(0));
- this_control =
- graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
- this_effect, this_control);
- break;
- }
- }
- if (access_mode == AccessMode::kLoad &&
- access_info.holder().ToHandle(&holder)) {
- this_receiver = jsgraph()->Constant(holder);
- }
- Node* this_storage = this_receiver;
- if (!field_index.is_inobject()) {
- this_storage = this_effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectProperties()),
- this_storage, this_effect, this_control);
- }
- FieldAccess field_access = {kTaggedBase, field_index.offset(), name,
- field_type, MachineType::AnyTagged()};
- if (access_mode == AccessMode::kLoad) {
- if (field_type->Is(Type::UntaggedFloat64())) {
- if (!field_index.is_inobject() || field_index.is_hidden_field() ||
- !FLAG_unbox_double_fields) {
- this_storage = this_effect =
- graph()->NewNode(simplified()->LoadField(field_access),
- this_storage, this_effect, this_control);
- field_access.offset = HeapNumber::kValueOffset;
- field_access.name = MaybeHandle<Name>();
- }
- field_access.machine_type = MachineType::Float64();
- }
- this_value = this_effect =
- graph()->NewNode(simplified()->LoadField(field_access),
- this_storage, this_effect, this_control);
- } else {
- DCHECK_EQ(AccessMode::kStore, access_mode);
- if (field_type->Is(Type::UntaggedFloat64())) {
+ // Load the {receiver} map. The resulting effect is the dominating effect
+ // for all (polymorphic) branches.
+ Node* receiver_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ receiver, effect, control);
+
+ // Generate code for the various different property access patterns.
+ Node* fallthrough_control = control;
+ for (size_t j = 0; j < access_infos.size(); ++j) {
+ PropertyAccessInfo const& access_info = access_infos[j];
+ Node* this_value = value;
+ Node* this_receiver = receiver;
+ Node* this_effect = effect;
+ Node* this_control;
+
+ // Perform map check on {receiver}.
+ MapList const& receiver_maps = access_info.receiver_maps();
+ {
+ // Emit a (sequence of) map checks for other {receiver}s.
+ ZoneVector<Node*> this_controls(zone());
+ ZoneVector<Node*> this_effects(zone());
+ size_t num_classes = receiver_maps.size();
+ for (auto map : receiver_maps) {
+ DCHECK_LT(0u, num_classes);
Node* check =
- graph()->NewNode(simplified()->ObjectIsNumber(), this_value);
- this_control =
- graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
- this_effect, this_control);
- this_value = graph()->NewNode(common()->Guard(Type::Number()),
- this_value, this_control);
-
- if (!field_index.is_inobject() || field_index.is_hidden_field() ||
- !FLAG_unbox_double_fields) {
- if (access_info.HasTransitionMap()) {
- // Allocate a MutableHeapNumber for the new property.
- Callable callable =
- CodeFactory::AllocateMutableHeapNumber(isolate());
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), jsgraph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNoFlags, Operator::kNoThrow);
- Node* this_box = this_effect = graph()->NewNode(
- common()->Call(desc),
- jsgraph()->HeapConstant(callable.code()),
- jsgraph()->NoContextConstant(), this_effect, this_control);
- this_effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForHeapNumberValue()),
- this_box, this_value, this_effect, this_control);
- this_value = this_box;
-
- field_access.type = Type::TaggedPointer();
- } else {
- // We just store directly to the MutableHeapNumber.
- this_storage = this_effect =
- graph()->NewNode(simplified()->LoadField(field_access),
- this_storage, this_effect, this_control);
- field_access.offset = HeapNumber::kValueOffset;
- field_access.name = MaybeHandle<Name>();
- field_access.machine_type = MachineType::Float64();
- }
+ graph()->NewNode(simplified()->ReferenceEqual(), receiver_map,
+ jsgraph()->Constant(map));
+ if (--num_classes == 0 && j == access_infos.size() - 1) {
+ check = graph()->NewNode(simplified()->CheckIf(), check,
+ this_effect, fallthrough_control);
+ this_controls.push_back(fallthrough_control);
+ this_effects.push_back(check);
+ fallthrough_control = nullptr;
} else {
- // Unboxed double field, we store directly to the field.
- field_access.machine_type = MachineType::Float64();
+ Node* branch = graph()->NewNode(common()->Branch(), check,
+ fallthrough_control);
+ fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
+ this_controls.push_back(
+ graph()->NewNode(common()->IfTrue(), branch));
+ this_effects.push_back(this_effect);
}
- } else if (field_type->Is(Type::TaggedSigned())) {
- Node* check =
- graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
- this_control =
- graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
- this_effect, this_control);
- this_value = graph()->NewNode(common()->Guard(type_cache_.kSmi),
- this_value, this_control);
- } else if (field_type->Is(Type::TaggedPointer())) {
- Node* check =
- graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
- this_control =
- graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
- this_effect, this_control);
- if (field_type->NumClasses() == 1) {
- // Emit a map check for the value.
- Node* this_value_map = this_effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMap()), this_value,
- this_effect, this_control);
- Node* check = graph()->NewNode(
- simplified()->ReferenceEqual(Type::Internal()), this_value_map,
- jsgraph()->Constant(field_type->Classes().Current()));
- this_control =
- graph()->NewNode(common()->DeoptimizeUnless(), check,
- frame_state, this_effect, this_control);
- } else {
- DCHECK_EQ(0, field_type->NumClasses());
- }
- } else {
- DCHECK(field_type->Is(Type::Tagged()));
}
- Handle<Map> transition_map;
- if (access_info.transition_map().ToHandle(&transition_map)) {
- this_effect = graph()->NewNode(common()->BeginRegion(), this_effect);
- this_effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForMap()), this_receiver,
- jsgraph()->Constant(transition_map), this_effect, this_control);
+
+ // The Number case requires special treatment to also deal with Smis.
+ if (HasNumberMaps(receiver_maps)) {
+ // Join this check with the "receiver is smi" check above.
+ DCHECK_NOT_NULL(receiverissmi_effect);
+ DCHECK_NOT_NULL(receiverissmi_control);
+ this_effects.push_back(receiverissmi_effect);
+ this_controls.push_back(receiverissmi_control);
+ receiverissmi_effect = receiverissmi_control = nullptr;
}
- this_effect = graph()->NewNode(simplified()->StoreField(field_access),
- this_storage, this_value, this_effect,
- this_control);
- if (access_info.HasTransitionMap()) {
+
+ // Create single chokepoint for the control.
+ int const this_control_count = static_cast<int>(this_controls.size());
+ if (this_control_count == 1) {
+ this_control = this_controls.front();
+ this_effect = this_effects.front();
+ } else {
+ this_control =
+ graph()->NewNode(common()->Merge(this_control_count),
+ this_control_count, &this_controls.front());
+ this_effects.push_back(this_control);
+ this_effect =
+ graph()->NewNode(common()->EffectPhi(this_control_count),
+ this_control_count + 1, &this_effects.front());
+
+ // TODO(turbofan): The effect/control linearization will not find a
+ // FrameState after the EffectPhi that is generated above.
this_effect =
- graph()->NewNode(common()->FinishRegion(),
- jsgraph()->UndefinedConstant(), this_effect);
+ graph()->NewNode(common()->Checkpoint(), frame_state_eager,
+ this_effect, this_control);
}
}
- }
- // Remember the final state for this property access.
- values.push_back(this_value);
- effects.push_back(this_effect);
- controls.push_back(this_control);
- }
+ // Generate the actual property access.
+ ValueEffectControl continuation = BuildPropertyAccess(
+ this_receiver, this_value, context, frame_state_lazy, this_effect,
+ this_control, name, native_context, access_info, access_mode);
+ values.push_back(continuation.value());
+ effects.push_back(continuation.effect());
+ controls.push_back(continuation.control());
+ }
- DCHECK_NULL(fallthrough_control);
+ DCHECK_NULL(fallthrough_control);
- // Generate the final merge point for all (polymorphic) branches.
- int const control_count = static_cast<int>(controls.size());
- if (control_count == 0) {
- value = effect = control = jsgraph()->Dead();
- } else if (control_count == 1) {
- value = values.front();
- effect = effects.front();
- control = controls.front();
- } else {
- control = graph()->NewNode(common()->Merge(control_count), control_count,
- &controls.front());
- values.push_back(control);
- value = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, control_count),
- control_count + 1, &values.front());
- effects.push_back(control);
- effect = graph()->NewNode(common()->EffectPhi(control_count),
- control_count + 1, &effects.front());
+ // Generate the final merge point for all (polymorphic) branches.
+ int const control_count = static_cast<int>(controls.size());
+ if (control_count == 0) {
+ value = effect = control = jsgraph()->Dead();
+ } else if (control_count == 1) {
+ value = values.front();
+ effect = effects.front();
+ control = controls.front();
+ } else {
+ control = graph()->NewNode(common()->Merge(control_count), control_count,
+ &controls.front());
+ values.push_back(control);
+ value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, control_count),
+ control_count + 1, &values.front());
+ effects.push_back(control);
+ effect = graph()->NewNode(common()->EffectPhi(control_count),
+ control_count + 1, &effects.front());
+ }
}
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
-
-Reduction JSNativeContextSpecialization::ReduceNamedAccess(
+Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus(
Node* node, Node* value, FeedbackNexus const& nexus, Handle<Name> name,
AccessMode access_mode, LanguageMode language_mode) {
DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
node->opcode() == IrOpcode::kJSStoreNamed);
+ Node* const receiver = NodeProperties::GetValueInput(node, 0);
+ Node* const effect = NodeProperties::GetEffectInput(node);
// Check if the {nexus} reports type feedback for the IC.
if (nexus.IsUninitialized()) {
if ((flags() & kDeoptimizationEnabled) &&
(flags() & kBailoutOnUninitialized)) {
- // TODO(turbofan): Implement all eager bailout points correctly in
- // the graph builder.
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
- if (!OpParameter<FrameStateInfo>(frame_state).bailout_id().IsNone()) {
- return ReduceSoftDeoptimize(node);
- }
+ return ReduceSoftDeoptimize(
+ node,
+ DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
}
return NoChange();
}
// Extract receiver maps from the IC using the {nexus}.
MapHandleList receiver_maps;
- if (nexus.ExtractMaps(&receiver_maps) == 0) return NoChange();
- DCHECK_LT(0, receiver_maps.length());
+ if (!ExtractReceiverMaps(receiver, effect, nexus, &receiver_maps)) {
+ return NoChange();
+ } else if (receiver_maps.length() == 0) {
+ if ((flags() & kDeoptimizationEnabled) &&
+ (flags() & kBailoutOnUninitialized)) {
+ return ReduceSoftDeoptimize(
+ node,
+ DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
+ }
+ return NoChange();
+ }
// Try to lower the named access based on the {receiver_maps}.
return ReduceNamedAccess(node, value, receiver_maps, name, access_mode,
@@ -460,15 +361,40 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
NamedAccess const& p = NamedAccessOf(node->op());
+ Node* const receiver = NodeProperties::GetValueInput(node, 0);
Node* const value = jsgraph()->Dead();
+ // Check if we have a constant receiver.
+ HeapObjectMatcher m(receiver);
+ if (m.HasValue()) {
+ // Optimize "prototype" property of functions.
+ if (m.Value()->IsJSFunction() &&
+ p.name().is_identical_to(factory()->prototype_string())) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
+ if (function->has_initial_map()) {
+ // We need to add a code dependency on the initial map of the
+ // {function} in order to be notified about changes to the
+ // "prototype" of {function}, so it doesn't make sense to
+ // continue unless deoptimization is enabled.
+ if (flags() & kDeoptimizationEnabled) {
+ Handle<Map> initial_map(function->initial_map(), isolate());
+ dependencies()->AssumeInitialMapCantChange(initial_map);
+ Handle<Object> prototype(initial_map->prototype(), isolate());
+ Node* value = jsgraph()->Constant(prototype);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ }
+ }
+ }
+
// Extract receiver maps from the LOAD_IC using the LoadICNexus.
if (!p.feedback().IsValid()) return NoChange();
LoadICNexus nexus(p.feedback().vector(), p.feedback().slot());
// Try to lower the named access based on the {receiver_maps}.
- return ReduceNamedAccess(node, value, nexus, p.name(), AccessMode::kLoad,
- p.language_mode());
+ return ReduceNamedAccessFromNexus(node, value, nexus, p.name(),
+ AccessMode::kLoad, p.language_mode());
}
@@ -482,8 +408,8 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreNamed(Node* node) {
StoreICNexus nexus(p.feedback().vector(), p.feedback().slot());
// Try to lower the named access based on the {receiver_maps}.
- return ReduceNamedAccess(node, value, nexus, p.name(), AccessMode::kStore,
- p.language_mode());
+ return ReduceNamedAccessFromNexus(node, value, nexus, p.name(),
+ AccessMode::kStore, p.language_mode());
}
@@ -494,404 +420,298 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
node->opcode() == IrOpcode::kJSStoreProperty);
Node* receiver = NodeProperties::GetValueInput(node, 0);
- Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+ Node* frame_state = NodeProperties::FindFrameStateBefore(node);
// Not much we can do if deoptimization support is disabled.
if (!(flags() & kDeoptimizationEnabled)) return NoChange();
- // TODO(bmeurer): Add support for non-standard stores.
- if (store_mode != STANDARD_STORE) return NoChange();
+ // Check for keyed access to strings.
+ if (HasOnlyStringMaps(receiver_maps)) {
+ // Strings are immutable in JavaScript.
+ if (access_mode == AccessMode::kStore) return NoChange();
- // Retrieve the native context from the given {node}.
- Handle<Context> native_context;
- if (!GetNativeContext(node).ToHandle(&native_context)) return NoChange();
+ // Ensure that the {receiver} is actually a String.
+ receiver = effect = graph()->NewNode(simplified()->CheckString(), receiver,
+ effect, control);
- // Compute element access infos for the receiver maps.
- AccessInfoFactory access_info_factory(dependencies(), native_context,
- graph()->zone());
- ZoneVector<ElementAccessInfo> access_infos(zone());
- if (!access_info_factory.ComputeElementAccessInfos(receiver_maps, access_mode,
- &access_infos)) {
- return NoChange();
- }
+ // Determine the {receiver} length.
+ Node* length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
+ effect, control);
- // Nothing to do if we have no non-deprecated maps.
- if (access_infos.empty()) return NoChange();
-
- // The final states for every polymorphic branch. We join them with
- // Merge+Phi+EffectPhi at the bottom.
- ZoneVector<Node*> values(zone());
- ZoneVector<Node*> effects(zone());
- ZoneVector<Node*> controls(zone());
-
- // Ensure that {receiver} is a heap object.
- Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
- control = graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
- effect, control);
-
- // Load the {receiver} map. The resulting effect is the dominating effect for
- // all (polymorphic) branches.
- Node* receiver_map = effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- receiver, effect, control);
-
- // Generate code for the various different element access patterns.
- Node* fallthrough_control = control;
- for (size_t j = 0; j < access_infos.size(); ++j) {
- ElementAccessInfo const& access_info = access_infos[j];
- Node* this_receiver = receiver;
- Node* this_value = value;
- Node* this_index = index;
- Node* this_effect;
- Node* this_control;
-
- // Perform map check on {receiver}.
- Type* receiver_type = access_info.receiver_type();
- bool receiver_is_jsarray = true;
- {
- ZoneVector<Node*> this_controls(zone());
- ZoneVector<Node*> this_effects(zone());
- size_t num_transitions = access_info.transitions().size();
- int num_classes = access_info.receiver_type()->NumClasses();
- for (auto i = access_info.receiver_type()->Classes(); !i.Done();
- i.Advance()) {
- DCHECK_LT(0, num_classes);
- Handle<Map> map = i.Current();
- Node* check =
- graph()->NewNode(simplified()->ReferenceEqual(Type::Any()),
- receiver_map, jsgraph()->Constant(map));
- if (--num_classes == 0 && num_transitions == 0 &&
- j == access_infos.size() - 1) {
- // Last map check on the fallthrough control path, do a conditional
- // eager deoptimization exit here.
- // TODO(turbofan): This is ugly as hell! We should probably introduce
- // macro-ish operators for property access that encapsulate this whole
- // mess.
- this_controls.push_back(graph()->NewNode(common()->DeoptimizeUnless(),
- check, frame_state, effect,
- fallthrough_control));
- fallthrough_control = nullptr;
- } else {
- Node* branch =
- graph()->NewNode(common()->Branch(), check, fallthrough_control);
- this_controls.push_back(graph()->NewNode(common()->IfTrue(), branch));
- fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
- }
- this_effects.push_back(effect);
- if (!map->IsJSArrayMap()) receiver_is_jsarray = false;
- }
-
- // Generate possible elements kind transitions.
- for (auto transition : access_info.transitions()) {
- DCHECK_LT(0u, num_transitions);
- Handle<Map> transition_source = transition.first;
- Handle<Map> transition_target = transition.second;
- Node* transition_control;
- Node* transition_effect = effect;
-
- // Check if {receiver} has the specified {transition_source} map.
- Node* check = graph()->NewNode(
- simplified()->ReferenceEqual(Type::Any()), receiver_map,
- jsgraph()->HeapConstant(transition_source));
- if (--num_transitions == 0 && j == access_infos.size() - 1) {
- transition_control =
- graph()->NewNode(common()->DeoptimizeUnless(), check, frame_state,
- transition_effect, fallthrough_control);
- fallthrough_control = nullptr;
- } else {
- Node* branch =
- graph()->NewNode(common()->Branch(), check, fallthrough_control);
- fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
- transition_control = graph()->NewNode(common()->IfTrue(), branch);
- }
+ // Ensure that {index} is less than {receiver} length.
+ index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
+ length, effect, control);
- // Migrate {receiver} from {transition_source} to {transition_target}.
- if (IsSimpleMapChangeTransition(transition_source->elements_kind(),
- transition_target->elements_kind())) {
- // In-place migration, just store the {transition_target} map.
- transition_effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForMap()), receiver,
- jsgraph()->HeapConstant(transition_target), transition_effect,
- transition_control);
- } else {
- // Instance migration, let the stub deal with the {receiver}.
- TransitionElementsKindStub stub(isolate(),
- transition_source->elements_kind(),
- transition_target->elements_kind(),
- transition_source->IsJSArrayMap());
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 0,
- CallDescriptor::kNeedsFrameState, node->op()->properties());
- transition_effect = graph()->NewNode(
- common()->Call(desc), jsgraph()->HeapConstant(stub.GetCode()),
- receiver, jsgraph()->HeapConstant(transition_target), context,
- frame_state, transition_effect, transition_control);
- }
- this_controls.push_back(transition_control);
- this_effects.push_back(transition_effect);
- }
-
- // Create single chokepoint for the control.
- int const this_control_count = static_cast<int>(this_controls.size());
- if (this_control_count == 1) {
- this_control = this_controls.front();
- this_effect = this_effects.front();
- } else {
- this_control =
- graph()->NewNode(common()->Merge(this_control_count),
- this_control_count, &this_controls.front());
- this_effects.push_back(this_control);
- this_effect =
- graph()->NewNode(common()->EffectPhi(this_control_count),
- this_control_count + 1, &this_effects.front());
- }
- }
+ // Load the character from the {receiver}.
+ value = graph()->NewNode(simplified()->StringCharCodeAt(), receiver, index,
+ control);
- // Certain stores need a prototype chain check because shape changes
- // could allow callbacks on elements in the prototype chain that are
- // not compatible with (monomorphic) keyed stores.
- Handle<JSObject> holder;
- if (access_info.holder().ToHandle(&holder)) {
- AssumePrototypesStable(receiver_type, native_context, holder);
+ // Return it as a single character string.
+ value = graph()->NewNode(simplified()->StringFromCharCode(), value);
+ } else {
+ // Retrieve the native context from the given {node}.
+ Handle<Context> native_context;
+ if (!GetNativeContext(node).ToHandle(&native_context)) return NoChange();
+
+ // Compute element access infos for the receiver maps.
+ AccessInfoFactory access_info_factory(dependencies(), native_context,
+ graph()->zone());
+ ZoneVector<ElementAccessInfo> access_infos(zone());
+ if (!access_info_factory.ComputeElementAccessInfos(
+ receiver_maps, access_mode, &access_infos)) {
+ return NoChange();
}
- // Check that the {index} is actually a Number.
- if (!NumberMatcher(this_index).HasValue()) {
- Node* check =
- graph()->NewNode(simplified()->ObjectIsNumber(), this_index);
- this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
- frame_state, this_effect, this_control);
- this_index = graph()->NewNode(common()->Guard(Type::Number()), this_index,
- this_control);
+ // Nothing to do if we have no non-deprecated maps.
+ if (access_infos.empty()) {
+ return ReduceSoftDeoptimize(
+ node,
+ DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess);
}
- // Convert the {index} to an unsigned32 value and check if the result is
- // equal to the original {index}.
- if (!NumberMatcher(this_index).IsInRange(0.0, kMaxUInt32)) {
- Node* this_index32 =
- graph()->NewNode(simplified()->NumberToUint32(), this_index);
- Node* check = graph()->NewNode(simplified()->NumberEqual(), this_index32,
- this_index);
- this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
- frame_state, this_effect, this_control);
- this_index = this_index32;
- }
+ // For holey stores or growing stores, we need to check that the prototype
+ // chain contains no setters for elements, and we need to guard those checks
+ // via code dependencies on the relevant prototype maps.
+ if (access_mode == AccessMode::kStore) {
+ // TODO(turbofan): We could have a fast path here, that checks for the
+ // common case of Array or Object prototype only and therefore avoids
+ // the zone allocation of this vector.
+ ZoneVector<Handle<Map>> prototype_maps(zone());
+ for (ElementAccessInfo const& access_info : access_infos) {
+ for (Handle<Map> receiver_map : access_info.receiver_maps()) {
+ // If the {receiver_map} has a prototype and it's elements backing
+ // store is either holey, or we have a potentially growing store,
+ // then we need to check that all prototypes have stable maps with
+ // fast elements (and we need to guard against changes to that below).
+ if (IsHoleyElementsKind(receiver_map->elements_kind()) ||
+ IsGrowStoreMode(store_mode)) {
+ // Make sure all prototypes are stable and have fast elements.
+ for (Handle<Map> map = receiver_map;;) {
+ Handle<Object> map_prototype(map->prototype(), isolate());
+ if (map_prototype->IsNull(isolate())) break;
+ if (!map_prototype->IsJSObject()) return NoChange();
+ map = handle(Handle<JSObject>::cast(map_prototype)->map(),
+ isolate());
+ if (!map->is_stable()) return NoChange();
+ if (!IsFastElementsKind(map->elements_kind())) return NoChange();
+ prototype_maps.push_back(map);
+ }
+ }
+ }
+ }
- // TODO(bmeurer): We currently specialize based on elements kind. We should
- // also be able to properly support strings and other JSObjects here.
- ElementsKind elements_kind = access_info.elements_kind();
-
- // Load the elements for the {receiver}.
- Node* this_elements = this_effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
- this_receiver, this_effect, this_control);
-
- // Don't try to store to a copy-on-write backing store.
- if (access_mode == AccessMode::kStore &&
- IsFastSmiOrObjectElementsKind(elements_kind)) {
- Node* this_elements_map = this_effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- this_elements, this_effect, this_control);
- Node* check = graph()->NewNode(
- simplified()->ReferenceEqual(Type::Any()), this_elements_map,
- jsgraph()->HeapConstant(factory()->fixed_array_map()));
- this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
- frame_state, this_effect, this_control);
+ // Install dependencies on the relevant prototype maps.
+ for (Handle<Map> prototype_map : prototype_maps) {
+ dependencies()->AssumeMapStable(prototype_map);
+ }
}
- // Load the length of the {receiver}.
- Node* this_length = this_effect =
- receiver_is_jsarray
- ? graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForJSArrayLength(elements_kind)),
- this_receiver, this_effect, this_control)
- : graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
- this_elements, this_effect, this_control);
-
- // Check that the {index} is in the valid range for the {receiver}.
- Node* check = graph()->NewNode(simplified()->NumberLessThan(), this_index,
- this_length);
- this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
- frame_state, this_effect, this_control);
+ // Ensure that {receiver} is a heap object.
+ effect = BuildCheckTaggedPointer(receiver, effect, control);
- // Compute the element access.
- Type* element_type = Type::Any();
- MachineType element_machine_type = MachineType::AnyTagged();
- if (IsFastDoubleElementsKind(elements_kind)) {
- element_type = Type::Number();
- element_machine_type = MachineType::Float64();
- } else if (IsFastSmiElementsKind(elements_kind)) {
- element_type = type_cache_.kSmi;
- }
- ElementAccess element_access = {kTaggedBase, FixedArray::kHeaderSize,
- element_type, element_machine_type};
+ // Check for the monomorphic case.
+ if (access_infos.size() == 1) {
+ ElementAccessInfo access_info = access_infos.front();
- // Access the actual element.
- // TODO(bmeurer): Refactor this into separate methods or even a separate
- // class that deals with the elements access.
- if (access_mode == AccessMode::kLoad) {
- // Compute the real element access type, which includes the hole in case
- // of holey backing stores.
- if (elements_kind == FAST_HOLEY_ELEMENTS ||
- elements_kind == FAST_HOLEY_SMI_ELEMENTS) {
- element_access.type = Type::Union(
- element_type,
- Type::Constant(factory()->the_hole_value(), graph()->zone()),
- graph()->zone());
+ // Perform possible elements kind transitions.
+ for (auto transition : access_info.transitions()) {
+ Handle<Map> const transition_source = transition.first;
+ Handle<Map> const transition_target = transition.second;
+ effect = graph()->NewNode(
+ simplified()->TransitionElementsKind(
+ IsSimpleMapChangeTransition(transition_source->elements_kind(),
+ transition_target->elements_kind())
+ ? ElementsTransition::kFastTransition
+ : ElementsTransition::kSlowTransition),
+ receiver, jsgraph()->HeapConstant(transition_source),
+ jsgraph()->HeapConstant(transition_target), effect, control);
}
- // Perform the actual backing store access.
- this_value = this_effect = graph()->NewNode(
- simplified()->LoadElement(element_access), this_elements, this_index,
- this_effect, this_control);
- // Handle loading from holey backing stores correctly, by either mapping
- // the hole to undefined if possible, or deoptimizing otherwise.
- if (elements_kind == FAST_HOLEY_ELEMENTS ||
- elements_kind == FAST_HOLEY_SMI_ELEMENTS) {
- // Perform the hole check on the result.
- Node* check =
- graph()->NewNode(simplified()->ReferenceEqual(element_access.type),
- this_value, jsgraph()->TheHoleConstant());
- // Check if we are allowed to turn the hole into undefined.
- Type* initial_holey_array_type = Type::Class(
- handle(isolate()->get_initial_js_array_map(elements_kind)),
- graph()->zone());
- if (receiver_type->NowIs(initial_holey_array_type) &&
- isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check, this_control);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- // Add a code dependency on the array protector cell.
- AssumePrototypesStable(receiver_type, native_context,
- isolate()->initial_object_prototype());
- dependencies()->AssumePropertyCell(factory()->array_protector());
- // Turn the hole into undefined.
- this_control =
- graph()->NewNode(common()->Merge(2), if_true, if_false);
- this_value = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2),
- jsgraph()->UndefinedConstant(), this_value, this_control);
- element_type =
- Type::Union(element_type, Type::Undefined(), graph()->zone());
- } else {
- // Deoptimize in case of the hole.
- this_control =
- graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
- this_effect, this_control);
+
+ // TODO(turbofan): The effect/control linearization will not find a
+ // FrameState after the StoreField or Call that is generated for the
+ // elements kind transition above. This is because those operators
+ // don't have the kNoWrite flag on it, even though they are not
+ // observable by JavaScript.
+ effect = graph()->NewNode(common()->Checkpoint(), frame_state, effect,
+ control);
+
+ // Perform map check on the {receiver}.
+ effect = BuildCheckMaps(receiver, effect, control,
+ access_info.receiver_maps());
+
+ // Access the actual element.
+ ValueEffectControl continuation = BuildElementAccess(
+ receiver, index, value, effect, control, native_context, access_info,
+ access_mode, store_mode);
+ value = continuation.value();
+ effect = continuation.effect();
+ control = continuation.control();
+ } else {
+ // The final states for every polymorphic branch. We join them with
+ // Merge+Phi+EffectPhi at the bottom.
+ ZoneVector<Node*> values(zone());
+ ZoneVector<Node*> effects(zone());
+ ZoneVector<Node*> controls(zone());
+
+ // Generate code for the various different element access patterns.
+ Node* fallthrough_control = control;
+ for (size_t j = 0; j < access_infos.size(); ++j) {
+ ElementAccessInfo const& access_info = access_infos[j];
+ Node* this_receiver = receiver;
+ Node* this_value = value;
+ Node* this_index = index;
+ Node* this_effect = effect;
+ Node* this_control = fallthrough_control;
+
+ // Perform possible elements kind transitions.
+ for (auto transition : access_info.transitions()) {
+ Handle<Map> const transition_source = transition.first;
+ Handle<Map> const transition_target = transition.second;
+ this_effect = graph()->NewNode(
+ simplified()->TransitionElementsKind(
+ IsSimpleMapChangeTransition(
+ transition_source->elements_kind(),
+ transition_target->elements_kind())
+ ? ElementsTransition::kFastTransition
+ : ElementsTransition::kSlowTransition),
+ receiver, jsgraph()->HeapConstant(transition_source),
+ jsgraph()->HeapConstant(transition_target), this_effect,
+ this_control);
}
- // Rename the result to represent the actual type (not polluted by the
- // hole).
- this_value = graph()->NewNode(common()->Guard(element_type), this_value,
- this_control);
- } else if (elements_kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
- // Perform the hole check on the result.
- Node* check =
- graph()->NewNode(simplified()->NumberIsHoleNaN(), this_value);
- // Check if we are allowed to return the hole directly.
- Type* initial_holey_array_type = Type::Class(
- handle(isolate()->get_initial_js_array_map(elements_kind)),
- graph()->zone());
- if (receiver_type->NowIs(initial_holey_array_type) &&
- isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
- // Add a code dependency on the array protector cell.
- AssumePrototypesStable(receiver_type, native_context,
- isolate()->initial_object_prototype());
- dependencies()->AssumePropertyCell(factory()->array_protector());
- // Turn the hole into undefined.
- this_value = graph()->NewNode(
- common()->Select(MachineRepresentation::kTagged,
- BranchHint::kFalse),
- check, jsgraph()->UndefinedConstant(), this_value);
- } else {
- // Deoptimize in case of the hole.
- this_control =
- graph()->NewNode(common()->DeoptimizeIf(), check, frame_state,
- this_effect, this_control);
+
+ // Load the {receiver} map.
+ Node* receiver_map = this_effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ receiver, this_effect, this_control);
+
+ // Perform map check(s) on {receiver}.
+ MapList const& receiver_maps = access_info.receiver_maps();
+ {
+ ZoneVector<Node*> this_controls(zone());
+ ZoneVector<Node*> this_effects(zone());
+ size_t num_classes = receiver_maps.size();
+ for (Handle<Map> map : receiver_maps) {
+ DCHECK_LT(0u, num_classes);
+ Node* check =
+ graph()->NewNode(simplified()->ReferenceEqual(), receiver_map,
+ jsgraph()->Constant(map));
+ if (--num_classes == 0 && j == access_infos.size() - 1) {
+ // Last map check on the fallthrough control path, do a
+ // conditional eager deoptimization exit here.
+ // TODO(turbofan): This is ugly as hell! We should probably
+ // introduce macro-ish operators for property access that
+ // encapsulate this whole mess.
+ check = graph()->NewNode(simplified()->CheckIf(), check,
+ this_effect, this_control);
+ this_controls.push_back(this_control);
+ this_effects.push_back(check);
+ fallthrough_control = nullptr;
+ } else {
+ Node* branch = graph()->NewNode(common()->Branch(), check,
+ fallthrough_control);
+ this_controls.push_back(
+ graph()->NewNode(common()->IfTrue(), branch));
+ this_effects.push_back(effect);
+ fallthrough_control =
+ graph()->NewNode(common()->IfFalse(), branch);
+ }
+ }
+
+ // Create single chokepoint for the control.
+ int const this_control_count = static_cast<int>(this_controls.size());
+ if (this_control_count == 1) {
+ this_control = this_controls.front();
+ this_effect = this_effects.front();
+ } else {
+ this_control =
+ graph()->NewNode(common()->Merge(this_control_count),
+ this_control_count, &this_controls.front());
+ this_effects.push_back(this_control);
+ this_effect =
+ graph()->NewNode(common()->EffectPhi(this_control_count),
+ this_control_count + 1, &this_effects.front());
+
+ // TODO(turbofan): The effect/control linearization will not find a
+ // FrameState after the EffectPhi that is generated above.
+ this_effect = graph()->NewNode(common()->Checkpoint(), frame_state,
+ this_effect, this_control);
+ }
}
+
+ // Access the actual element.
+ ValueEffectControl continuation = BuildElementAccess(
+ this_receiver, this_index, this_value, this_effect, this_control,
+ native_context, access_info, access_mode, store_mode);
+ values.push_back(continuation.value());
+ effects.push_back(continuation.effect());
+ controls.push_back(continuation.control());
}
- } else {
- DCHECK_EQ(AccessMode::kStore, access_mode);
- if (IsFastSmiElementsKind(elements_kind)) {
- Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), this_value);
- this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
- frame_state, this_effect, this_control);
- this_value = graph()->NewNode(common()->Guard(type_cache_.kSmi),
- this_value, this_control);
- } else if (IsFastDoubleElementsKind(elements_kind)) {
- Node* check =
- graph()->NewNode(simplified()->ObjectIsNumber(), this_value);
- this_control = graph()->NewNode(common()->DeoptimizeUnless(), check,
- frame_state, this_effect, this_control);
- this_value = graph()->NewNode(common()->Guard(Type::Number()),
- this_value, this_control);
+
+ DCHECK_NULL(fallthrough_control);
+
+ // Generate the final merge point for all (polymorphic) branches.
+ int const control_count = static_cast<int>(controls.size());
+ if (control_count == 0) {
+ value = effect = control = jsgraph()->Dead();
+ } else if (control_count == 1) {
+ value = values.front();
+ effect = effects.front();
+ control = controls.front();
+ } else {
+ control = graph()->NewNode(common()->Merge(control_count),
+ control_count, &controls.front());
+ values.push_back(control);
+ value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, control_count),
+ control_count + 1, &values.front());
+ effects.push_back(control);
+ effect = graph()->NewNode(common()->EffectPhi(control_count),
+ control_count + 1, &effects.front());
}
- this_effect = graph()->NewNode(simplified()->StoreElement(element_access),
- this_elements, this_index, this_value,
- this_effect, this_control);
}
-
- // Remember the final state for this element access.
- values.push_back(this_value);
- effects.push_back(this_effect);
- controls.push_back(this_control);
}
- DCHECK_NULL(fallthrough_control);
-
- // Generate the final merge point for all (polymorphic) branches.
- int const control_count = static_cast<int>(controls.size());
- if (control_count == 0) {
- value = effect = control = jsgraph()->Dead();
- } else if (control_count == 1) {
- value = values.front();
- effect = effects.front();
- control = controls.front();
- } else {
- control = graph()->NewNode(common()->Merge(control_count), control_count,
- &controls.front());
- values.push_back(control);
- value = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, control_count),
- control_count + 1, &values.front());
- effects.push_back(control);
- effect = graph()->NewNode(common()->EffectPhi(control_count),
- control_count + 1, &effects.front());
- }
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
-
+template <typename KeyedICNexus>
Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
- Node* node, Node* index, Node* value, FeedbackNexus const& nexus,
+ Node* node, Node* index, Node* value, KeyedICNexus const& nexus,
AccessMode access_mode, LanguageMode language_mode,
KeyedAccessStoreMode store_mode) {
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
node->opcode() == IrOpcode::kJSStoreProperty);
+ Node* const receiver = NodeProperties::GetValueInput(node, 0);
+ Node* const effect = NodeProperties::GetEffectInput(node);
// Check if the {nexus} reports type feedback for the IC.
if (nexus.IsUninitialized()) {
if ((flags() & kDeoptimizationEnabled) &&
(flags() & kBailoutOnUninitialized)) {
- // TODO(turbofan): Implement all eager bailout points correctly in
- // the graph builder.
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
- if (!OpParameter<FrameStateInfo>(frame_state).bailout_id().IsNone()) {
- return ReduceSoftDeoptimize(node);
- }
+ return ReduceSoftDeoptimize(
+ node,
+ DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess);
}
return NoChange();
}
// Extract receiver maps from the {nexus}.
MapHandleList receiver_maps;
- if (nexus.ExtractMaps(&receiver_maps) == 0) return NoChange();
- DCHECK_LT(0, receiver_maps.length());
+ if (!ExtractReceiverMaps(receiver, effect, nexus, &receiver_maps)) {
+ return NoChange();
+ } else if (receiver_maps.length() == 0) {
+ if ((flags() & kDeoptimizationEnabled) &&
+ (flags() & kBailoutOnUninitialized)) {
+ return ReduceSoftDeoptimize(
+ node,
+ DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess);
+ }
+ return NoChange();
+ }
// Optimize access for constant {index}.
HeapObjectMatcher mindex(index);
@@ -919,6 +739,11 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
return ReduceNamedAccess(node, value, receiver_maps,
handle(name, isolate()), access_mode,
language_mode, index);
+ } else if (nexus.GetKeyType() != ELEMENT) {
+ // The KeyedLoad/StoreIC has seen non-element accesses, so we cannot assume
+ // that the {index} is a valid array index, thus we just let the IC continue
+ // to deal with this load/store.
+ return NoChange();
}
// Try to lower the element access based on the {receiver_maps}.
@@ -926,14 +751,14 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
language_mode, store_mode);
}
-
-Reduction JSNativeContextSpecialization::ReduceSoftDeoptimize(Node* node) {
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+Reduction JSNativeContextSpecialization::ReduceSoftDeoptimize(
+ Node* node, DeoptimizeReason reason) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+ Node* frame_state = NodeProperties::FindFrameStateBefore(node);
Node* deoptimize =
- graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kSoft), frame_state,
- effect, control);
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kSoft, reason),
+ frame_state, effect, control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
Revisit(graph()->end());
@@ -977,13 +802,529 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreProperty(Node* node) {
p.language_mode(), store_mode);
}
+JSNativeContextSpecialization::ValueEffectControl
+JSNativeContextSpecialization::BuildPropertyAccess(
+ Node* receiver, Node* value, Node* context, Node* frame_state, Node* effect,
+ Node* control, Handle<Name> name, Handle<Context> native_context,
+ PropertyAccessInfo const& access_info, AccessMode access_mode) {
+ // Determine actual holder and perform prototype chain checks.
+ Handle<JSObject> holder;
+ if (access_info.holder().ToHandle(&holder)) {
+ AssumePrototypesStable(access_info.receiver_maps(), native_context, holder);
+ }
+
+ // Generate the actual property access.
+ if (access_info.IsNotFound()) {
+ DCHECK_EQ(AccessMode::kLoad, access_mode);
+ value = jsgraph()->UndefinedConstant();
+ } else if (access_info.IsDataConstant()) {
+ value = jsgraph()->Constant(access_info.constant());
+ if (access_mode == AccessMode::kStore) {
+ Node* check =
+ graph()->NewNode(simplified()->ReferenceEqual(), value, value);
+ effect =
+ graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ }
+ } else if (access_info.IsAccessorConstant()) {
+ // TODO(bmeurer): Properly rewire the IfException edge here if there's any.
+ Node* target = jsgraph()->Constant(access_info.constant());
+ FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
+ Handle<SharedFunctionInfo> shared_info =
+ frame_info.shared_info().ToHandleChecked();
+ switch (access_mode) {
+ case AccessMode::kLoad: {
+ // We need a FrameState for the getter stub to restore the correct
+ // context before returning to fullcodegen.
+ FrameStateFunctionInfo const* frame_info0 =
+ common()->CreateFrameStateFunctionInfo(FrameStateType::kGetterStub,
+ 1, 0, shared_info);
+ Node* frame_state0 = graph()->NewNode(
+ common()->FrameState(BailoutId::None(),
+ OutputFrameStateCombine::Ignore(),
+ frame_info0),
+ graph()->NewNode(common()->StateValues(1), receiver),
+ jsgraph()->EmptyStateValues(), jsgraph()->EmptyStateValues(),
+ context, target, frame_state);
+
+ // Introduce the call to the getter function.
+ value = effect = graph()->NewNode(
+ javascript()->CallFunction(
+ 2, VectorSlotPair(), ConvertReceiverMode::kNotNullOrUndefined),
+ target, receiver, context, frame_state0, effect, control);
+ control = graph()->NewNode(common()->IfSuccess(), value);
+ break;
+ }
+ case AccessMode::kStore: {
+ // We need a FrameState for the setter stub to restore the correct
+ // context and return the appropriate value to fullcodegen.
+ FrameStateFunctionInfo const* frame_info0 =
+ common()->CreateFrameStateFunctionInfo(FrameStateType::kSetterStub,
+ 2, 0, shared_info);
+ Node* frame_state0 = graph()->NewNode(
+ common()->FrameState(BailoutId::None(),
+ OutputFrameStateCombine::Ignore(),
+ frame_info0),
+ graph()->NewNode(common()->StateValues(2), receiver, value),
+ jsgraph()->EmptyStateValues(), jsgraph()->EmptyStateValues(),
+ context, target, frame_state);
+
+ // Introduce the call to the setter function.
+ effect = graph()->NewNode(
+ javascript()->CallFunction(
+ 3, VectorSlotPair(), ConvertReceiverMode::kNotNullOrUndefined),
+ target, receiver, value, context, frame_state0, effect, control);
+ control = graph()->NewNode(common()->IfSuccess(), effect);
+ break;
+ }
+ }
+ } else {
+ DCHECK(access_info.IsDataField());
+ FieldIndex const field_index = access_info.field_index();
+ Type* const field_type = access_info.field_type();
+ if (access_mode == AccessMode::kLoad &&
+ access_info.holder().ToHandle(&holder)) {
+ receiver = jsgraph()->Constant(holder);
+ }
+ Node* storage = receiver;
+ if (!field_index.is_inobject()) {
+ storage = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectProperties()),
+ storage, effect, control);
+ }
+ FieldAccess field_access = {
+ kTaggedBase, field_index.offset(), name,
+ field_type, MachineType::AnyTagged(), kFullWriteBarrier};
+ if (access_mode == AccessMode::kLoad) {
+ if (field_type->Is(Type::UntaggedFloat64())) {
+ // TODO(turbofan): We remove the representation axis from the type to
+ // avoid uninhabited representation types. This is a workaround until
+ // the {PropertyAccessInfo} is using {MachineRepresentation} instead.
+ field_access.type = Type::Union(
+ field_type, Type::Representation(Type::Number(), zone()), zone());
+ if (!field_index.is_inobject() || field_index.is_hidden_field() ||
+ !FLAG_unbox_double_fields) {
+ storage = effect = graph()->NewNode(
+ simplified()->LoadField(field_access), storage, effect, control);
+ field_access.offset = HeapNumber::kValueOffset;
+ field_access.name = MaybeHandle<Name>();
+ }
+ field_access.machine_type = MachineType::Float64();
+ }
+ value = effect = graph()->NewNode(simplified()->LoadField(field_access),
+ storage, effect, control);
+ } else {
+ DCHECK_EQ(AccessMode::kStore, access_mode);
+ if (field_type->Is(Type::UntaggedFloat64())) {
+ // TODO(turbofan): We remove the representation axis from the type to
+ // avoid uninhabited representation types. This is a workaround until
+ // the {PropertyAccessInfo} is using {MachineRepresentation} instead.
+ field_access.type = Type::Union(
+ field_type, Type::Representation(Type::Number(), zone()), zone());
+ value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
+ effect, control);
+
+ if (!field_index.is_inobject() || field_index.is_hidden_field() ||
+ !FLAG_unbox_double_fields) {
+ if (access_info.HasTransitionMap()) {
+ // Allocate a MutableHeapNumber for the new property.
+ effect = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kNotObservable),
+ effect);
+ Node* box = effect = graph()->NewNode(
+ simplified()->Allocate(NOT_TENURED),
+ jsgraph()->Constant(HeapNumber::kSize), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForMap()), box,
+ jsgraph()->HeapConstant(factory()->mutable_heap_number_map()),
+ effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForHeapNumberValue()),
+ box, value, effect, control);
+ value = effect =
+ graph()->NewNode(common()->FinishRegion(), box, effect);
+
+ field_access.type = Type::TaggedPointer();
+ } else {
+ // We just store directly to the MutableHeapNumber.
+ storage = effect =
+ graph()->NewNode(simplified()->LoadField(field_access), storage,
+ effect, control);
+ field_access.offset = HeapNumber::kValueOffset;
+ field_access.name = MaybeHandle<Name>();
+ field_access.machine_type = MachineType::Float64();
+ }
+ } else {
+ // Unboxed double field, we store directly to the field.
+ field_access.machine_type = MachineType::Float64();
+ }
+ } else if (field_type->Is(Type::TaggedSigned())) {
+ value = effect = graph()->NewNode(simplified()->CheckTaggedSigned(),
+ value, effect, control);
+ } else if (field_type->Is(Type::TaggedPointer())) {
+ // Ensure that {value} is a HeapObject.
+ value = effect = graph()->NewNode(simplified()->CheckTaggedPointer(),
+ value, effect, control);
+ if (field_type->NumClasses() == 1) {
+ // Emit a map check for the value.
+ Node* field_map =
+ jsgraph()->Constant(field_type->Classes().Current());
+ effect = graph()->NewNode(simplified()->CheckMaps(1), value,
+ field_map, effect, control);
+ } else {
+ DCHECK_EQ(0, field_type->NumClasses());
+ }
+ } else {
+ DCHECK(field_type->Is(Type::Tagged()));
+ }
+ Handle<Map> transition_map;
+ if (access_info.transition_map().ToHandle(&transition_map)) {
+ effect = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kObservable), effect);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForMap()), receiver,
+ jsgraph()->Constant(transition_map), effect, control);
+ }
+ effect = graph()->NewNode(simplified()->StoreField(field_access), storage,
+ value, effect, control);
+ if (access_info.HasTransitionMap()) {
+ effect = graph()->NewNode(common()->FinishRegion(),
+ jsgraph()->UndefinedConstant(), effect);
+ }
+ }
+ }
+
+ return ValueEffectControl(value, effect, control);
+}
+
+namespace {
+
+ExternalArrayType GetArrayTypeFromElementsKind(ElementsKind kind) {
+ switch (kind) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case TYPE##_ELEMENTS: \
+ return kExternal##Type##Array;
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return kExternalInt8Array;
+}
+
+} // namespace
+
+JSNativeContextSpecialization::ValueEffectControl
+JSNativeContextSpecialization::BuildElementAccess(
+ Node* receiver, Node* index, Node* value, Node* effect, Node* control,
+ Handle<Context> native_context, ElementAccessInfo const& access_info,
+ AccessMode access_mode, KeyedAccessStoreMode store_mode) {
+ // TODO(bmeurer): We currently specialize based on elements kind. We should
+ // also be able to properly support strings and other JSObjects here.
+ ElementsKind elements_kind = access_info.elements_kind();
+ MapList const& receiver_maps = access_info.receiver_maps();
+
+ // Load the elements for the {receiver}.
+ Node* elements = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
+ effect, control);
+
+ // Don't try to store to a copy-on-write backing store.
+ if (access_mode == AccessMode::kStore &&
+ IsFastSmiOrObjectElementsKind(elements_kind) &&
+ store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(1), elements,
+ jsgraph()->FixedArrayMapConstant(), effect, control);
+ }
+
+ if (IsFixedTypedArrayElementsKind(elements_kind)) {
+ // Load the {receiver}s length.
+ Node* length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSTypedArrayLength()),
+ receiver, effect, control);
+
+ // Check if the {receiver}s buffer was neutered.
+ Node* buffer = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+ receiver, effect, control);
+ Node* buffer_bitfield = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferBitField()),
+ buffer, effect, control);
+ Node* check = graph()->NewNode(
+ simplified()->NumberEqual(),
+ graph()->NewNode(
+ simplified()->NumberBitwiseAnd(), buffer_bitfield,
+ jsgraph()->Constant(JSArrayBuffer::WasNeutered::kMask)),
+ jsgraph()->ZeroConstant());
+
+ // Default to zero if the {receiver}s buffer was neutered.
+ length = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged, BranchHint::kTrue),
+ check, length, jsgraph()->ZeroConstant());
+
+ if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
+ // Check that the {index} is a valid array index, we do the actual
+ // bounds check below and just skip the store below if it's out of
+ // bounds for the {receiver}.
+ index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
+ jsgraph()->Constant(Smi::kMaxValue),
+ effect, control);
+ } else {
+ // Check that the {index} is in the valid range for the {receiver}.
+ DCHECK_EQ(STANDARD_STORE, store_mode);
+ index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
+ length, effect, control);
+ }
+
+ // Load the base and external pointer for the {receiver}.
+ Node* base_pointer = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForFixedTypedArrayBaseBasePointer()),
+ elements, effect, control);
+ Node* external_pointer = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForFixedTypedArrayBaseExternalPointer()),
+ elements, effect, control);
+
+ // Access the actual element.
+ ExternalArrayType external_array_type =
+ GetArrayTypeFromElementsKind(elements_kind);
+ switch (access_mode) {
+ case AccessMode::kLoad: {
+ value = effect = graph()->NewNode(
+ simplified()->LoadTypedElement(external_array_type), buffer,
+ base_pointer, external_pointer, index, effect, control);
+ break;
+ }
+ case AccessMode::kStore: {
+ // Ensure that the {value} is actually a Number.
+ value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
+ effect, control);
+
+ // Check if we can skip the out-of-bounds store.
+ if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
+ Node* check =
+ graph()->NewNode(simplified()->NumberLessThan(), index, length);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ {
+ // Perform the actual store.
+ etrue = graph()->NewNode(
+ simplified()->StoreTypedElement(external_array_type), buffer,
+ base_pointer, external_pointer, index, value, etrue, if_true);
+ }
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ {
+ // Just ignore the out-of-bounds write.
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ } else {
+ // Perform the actual store
+ DCHECK_EQ(STANDARD_STORE, store_mode);
+ effect = graph()->NewNode(
+ simplified()->StoreTypedElement(external_array_type), buffer,
+ base_pointer, external_pointer, index, value, effect, control);
+ }
+ break;
+ }
+ }
+ } else {
+ // Check if the {receiver} is a JSArray.
+ bool receiver_is_jsarray = HasOnlyJSArrayMaps(receiver_maps);
+
+ // Load the length of the {receiver}.
+ Node* length = effect =
+ receiver_is_jsarray
+ ? graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForJSArrayLength(elements_kind)),
+ receiver, effect, control)
+ : graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
+ elements, effect, control);
+
+ // Check if we might need to grow the {elements} backing store.
+ if (IsGrowStoreMode(store_mode)) {
+ DCHECK_EQ(AccessMode::kStore, access_mode);
+
+ // Check that the {index} is a valid array index; the actual checking
+ // happens below right before the element store.
+ index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
+ jsgraph()->Constant(Smi::kMaxValue),
+ effect, control);
+ } else {
+ // Check that the {index} is in the valid range for the {receiver}.
+ index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
+ length, effect, control);
+ }
+
+ // Compute the element access.
+ Type* element_type = Type::NonInternal();
+ MachineType element_machine_type = MachineType::AnyTagged();
+ if (IsFastDoubleElementsKind(elements_kind)) {
+ element_type = Type::Number();
+ element_machine_type = MachineType::Float64();
+ } else if (IsFastSmiElementsKind(elements_kind)) {
+ element_type = type_cache_.kSmi;
+ }
+ ElementAccess element_access = {kTaggedBase, FixedArray::kHeaderSize,
+ element_type, element_machine_type,
+ kFullWriteBarrier};
+
+ // Access the actual element.
+ if (access_mode == AccessMode::kLoad) {
+ // Compute the real element access type, which includes the hole in case
+ // of holey backing stores.
+ if (elements_kind == FAST_HOLEY_ELEMENTS ||
+ elements_kind == FAST_HOLEY_SMI_ELEMENTS) {
+ element_access.type =
+ Type::Union(element_type, Type::Hole(), graph()->zone());
+ }
+ // Perform the actual backing store access.
+ value = effect =
+ graph()->NewNode(simplified()->LoadElement(element_access), elements,
+ index, effect, control);
+ // Handle loading from holey backing stores correctly, by either mapping
+ // the hole to undefined if possible, or deoptimizing otherwise.
+ if (elements_kind == FAST_HOLEY_ELEMENTS ||
+ elements_kind == FAST_HOLEY_SMI_ELEMENTS) {
+ // Check if we are allowed to turn the hole into undefined.
+ if (CanTreatHoleAsUndefined(receiver_maps, native_context)) {
+ // Turn the hole into undefined.
+ value = graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(),
+ value);
+ } else {
+ // Bailout if we see the hole.
+ value = effect = graph()->NewNode(simplified()->CheckTaggedHole(),
+ value, effect, control);
+ }
+ } else if (elements_kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
+ // Perform the hole check on the result.
+ CheckFloat64HoleMode mode = CheckFloat64HoleMode::kNeverReturnHole;
+ // Check if we are allowed to return the hole directly.
+ if (CanTreatHoleAsUndefined(receiver_maps, native_context)) {
+ // Return the signaling NaN hole directly if all uses are truncating.
+ mode = CheckFloat64HoleMode::kAllowReturnHole;
+ }
+ value = effect = graph()->NewNode(simplified()->CheckFloat64Hole(mode),
+ value, effect, control);
+ }
+ } else {
+ DCHECK_EQ(AccessMode::kStore, access_mode);
+ if (IsFastSmiElementsKind(elements_kind)) {
+ value = effect = graph()->NewNode(simplified()->CheckTaggedSigned(),
+ value, effect, control);
+ } else if (IsFastDoubleElementsKind(elements_kind)) {
+ value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
+ effect, control);
+ // Make sure we do not store signalling NaNs into double arrays.
+ value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
+ }
+
+ // Ensure that copy-on-write backing store is writable.
+ if (IsFastSmiOrObjectElementsKind(elements_kind) &&
+ store_mode == STORE_NO_TRANSITION_HANDLE_COW) {
+ elements = effect =
+ graph()->NewNode(simplified()->EnsureWritableFastElements(),
+ receiver, elements, effect, control);
+ } else if (IsGrowStoreMode(store_mode)) {
+ // Grow {elements} backing store if necessary. Also updates the
+ // "length" property for JSArray {receiver}s, hence there must
+ // not be any other check after this operation, as the write
+ // to the "length" property is observable.
+ GrowFastElementsFlags flags = GrowFastElementsFlag::kNone;
+ if (receiver_is_jsarray) {
+ flags |= GrowFastElementsFlag::kArrayObject;
+ }
+ if (IsHoleyElementsKind(elements_kind)) {
+ flags |= GrowFastElementsFlag::kHoleyElements;
+ }
+ if (IsFastDoubleElementsKind(elements_kind)) {
+ flags |= GrowFastElementsFlag::kDoubleElements;
+ }
+ elements = effect = graph()->NewNode(
+ simplified()->MaybeGrowFastElements(flags), receiver, elements,
+ index, length, effect, control);
+ }
+
+ // Perform the actual element access.
+ effect = graph()->NewNode(simplified()->StoreElement(element_access),
+ elements, index, value, effect, control);
+ }
+ }
+
+ return ValueEffectControl(value, effect, control);
+}
+
+Node* JSNativeContextSpecialization::BuildCheckMaps(
+ Node* receiver, Node* effect, Node* control,
+ std::vector<Handle<Map>> const& maps) {
+ HeapObjectMatcher m(receiver);
+ if (m.HasValue()) {
+ Handle<Map> receiver_map(m.Value()->map(), isolate());
+ if (receiver_map->is_stable()) {
+ for (Handle<Map> map : maps) {
+ if (map.is_identical_to(receiver_map)) {
+ dependencies()->AssumeMapStable(receiver_map);
+ return effect;
+ }
+ }
+ }
+ }
+ int const map_input_count = static_cast<int>(maps.size());
+ int const input_count = 1 + map_input_count + 1 + 1;
+ Node** inputs = zone()->NewArray<Node*>(input_count);
+ inputs[0] = receiver;
+ for (int i = 0; i < map_input_count; ++i) {
+ inputs[1 + i] = jsgraph()->HeapConstant(maps[i]);
+ }
+ inputs[input_count - 2] = effect;
+ inputs[input_count - 1] = control;
+ return graph()->NewNode(simplified()->CheckMaps(map_input_count), input_count,
+ inputs);
+}
+
+Node* JSNativeContextSpecialization::BuildCheckTaggedPointer(Node* receiver,
+ Node* effect,
+ Node* control) {
+ switch (receiver->opcode()) {
+ case IrOpcode::kHeapConstant:
+ case IrOpcode::kJSCreate:
+ case IrOpcode::kJSCreateArguments:
+ case IrOpcode::kJSCreateArray:
+ case IrOpcode::kJSCreateClosure:
+ case IrOpcode::kJSCreateIterResultObject:
+ case IrOpcode::kJSCreateLiteralArray:
+ case IrOpcode::kJSCreateLiteralObject:
+ case IrOpcode::kJSCreateLiteralRegExp:
+ case IrOpcode::kJSConvertReceiver:
+ case IrOpcode::kJSToName:
+ case IrOpcode::kJSToString:
+ case IrOpcode::kJSToObject:
+ case IrOpcode::kJSTypeOf: {
+ return effect;
+ }
+ default: {
+ return graph()->NewNode(simplified()->CheckTaggedPointer(), receiver,
+ effect, control);
+ }
+ }
+}
void JSNativeContextSpecialization::AssumePrototypesStable(
- Type* receiver_type, Handle<Context> native_context,
- Handle<JSObject> holder) {
+ std::vector<Handle<Map>> const& receiver_maps,
+ Handle<Context> native_context, Handle<JSObject> holder) {
// Determine actual holder and perform prototype chain checks.
- for (auto i = receiver_type->Classes(); !i.Done(); i.Advance()) {
- Handle<Map> map = i.Current();
+ for (auto map : receiver_maps) {
// Perform the implicit ToObject for primitives here.
// Implemented according to ES6 section 7.3.2 GetV (V, P).
Handle<JSFunction> constructor;
@@ -995,6 +1336,124 @@ void JSNativeContextSpecialization::AssumePrototypesStable(
}
}
+bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
+ std::vector<Handle<Map>> const& receiver_maps,
+ Handle<Context> native_context) {
+ // Check if the array prototype chain is intact.
+ if (!isolate()->IsFastArrayConstructorPrototypeChainIntact()) return false;
+
+ // Make sure both the initial Array and Object prototypes are stable.
+ Handle<JSObject> initial_array_prototype(
+ native_context->initial_array_prototype(), isolate());
+ Handle<JSObject> initial_object_prototype(
+ native_context->initial_object_prototype(), isolate());
+ if (!initial_array_prototype->map()->is_stable() ||
+ !initial_object_prototype->map()->is_stable()) {
+ return false;
+ }
+
+ // Check if all {receiver_maps} either have the initial Array.prototype
+ // or the initial Object.prototype as their prototype, as those are
+ // guarded by the array protector cell.
+ for (Handle<Map> map : receiver_maps) {
+ if (map->prototype() != *initial_array_prototype &&
+ map->prototype() != *initial_object_prototype) {
+ return false;
+ }
+ }
+
+ // Install code dependencies on the prototype maps.
+ for (Handle<Map> map : receiver_maps) {
+ dependencies()->AssumePrototypeMapsStable(map, initial_object_prototype);
+ }
+
+ // Install code dependency on the array protector cell.
+ dependencies()->AssumePropertyCell(factory()->array_protector());
+ return true;
+}
+
+bool JSNativeContextSpecialization::ExtractReceiverMaps(
+ Node* receiver, Node* effect, FeedbackNexus const& nexus,
+ MapHandleList* receiver_maps) {
+ DCHECK_EQ(0, receiver_maps->length());
+ // See if we can infer a concrete type for the {receiver}.
+ Handle<Map> receiver_map;
+ if (InferReceiverMap(receiver, effect).ToHandle(&receiver_map)) {
+ // We can assume that the {receiver} still has the infered {receiver_map}.
+ receiver_maps->Add(receiver_map);
+ return true;
+ }
+ // Try to extract some maps from the {nexus}.
+ if (nexus.ExtractMaps(receiver_maps) != 0) {
+ // Try to filter impossible candidates based on infered root map.
+ if (InferReceiverRootMap(receiver).ToHandle(&receiver_map)) {
+ for (int i = receiver_maps->length(); --i >= 0;) {
+ if (receiver_maps->at(i)->FindRootMap() != *receiver_map) {
+ receiver_maps->Remove(i);
+ }
+ }
+ }
+ return true;
+ }
+ return false;
+}
+
+MaybeHandle<Map> JSNativeContextSpecialization::InferReceiverMap(Node* receiver,
+ Node* effect) {
+ HeapObjectMatcher m(receiver);
+ if (m.HasValue()) {
+ Handle<Map> receiver_map(m.Value()->map(), isolate());
+ if (receiver_map->is_stable()) return receiver_map;
+ } else if (m.IsJSCreate()) {
+ HeapObjectMatcher mtarget(m.InputAt(0));
+ HeapObjectMatcher mnewtarget(m.InputAt(1));
+ if (mtarget.HasValue() && mnewtarget.HasValue()) {
+ Handle<JSFunction> constructor =
+ Handle<JSFunction>::cast(mtarget.Value());
+ if (constructor->has_initial_map()) {
+ Handle<Map> initial_map(constructor->initial_map(), isolate());
+ if (initial_map->constructor_or_backpointer() == *mnewtarget.Value()) {
+ // Walk up the {effect} chain to see if the {receiver} is the
+ // dominating effect and there's no other observable write in
+ // between.
+ while (true) {
+ if (receiver == effect) return initial_map;
+ if (!effect->op()->HasProperty(Operator::kNoWrite) ||
+ effect->op()->EffectInputCount() != 1) {
+ break;
+ }
+ effect = NodeProperties::GetEffectInput(effect);
+ }
+ }
+ }
+ }
+ }
+ // TODO(turbofan): Go hunting for CheckMaps(receiver) in the effect chain?
+ return MaybeHandle<Map>();
+}
+
+MaybeHandle<Map> JSNativeContextSpecialization::InferReceiverRootMap(
+ Node* receiver) {
+ HeapObjectMatcher m(receiver);
+ if (m.HasValue()) {
+ return handle(m.Value()->map()->FindRootMap(), isolate());
+ } else if (m.IsJSCreate()) {
+ HeapObjectMatcher mtarget(m.InputAt(0));
+ HeapObjectMatcher mnewtarget(m.InputAt(1));
+ if (mtarget.HasValue() && mnewtarget.HasValue()) {
+ Handle<JSFunction> constructor =
+ Handle<JSFunction>::cast(mtarget.Value());
+ if (constructor->has_initial_map()) {
+ Handle<Map> initial_map(constructor->initial_map(), isolate());
+ if (initial_map->constructor_or_backpointer() == *mnewtarget.Value()) {
+ DCHECK_EQ(*initial_map, initial_map->FindRootMap());
+ return initial_map;
+ }
+ }
+ }
+ }
+ return MaybeHandle<Map>();
+}
MaybeHandle<Context> JSNativeContextSpecialization::GetNativeContext(
Node* node) {
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 5562c6e36e..549dc93575 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -7,6 +7,7 @@
#include "src/base/flags.h"
#include "src/compiler/graph-reducer.h"
+#include "src/deoptimize-reason.h"
namespace v8 {
namespace internal {
@@ -23,9 +24,11 @@ namespace compiler {
// Forward declarations.
enum class AccessMode;
class CommonOperatorBuilder;
+class ElementAccessInfo;
class JSGraph;
class JSOperatorBuilder;
class MachineOperatorBuilder;
+class PropertyAccessInfo;
class SimplifiedOperatorBuilder;
@@ -38,8 +41,9 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
// Flags that control the mode of operation.
enum Flag {
kNoFlags = 0u,
- kBailoutOnUninitialized = 1u << 0,
- kDeoptimizationEnabled = 1u << 1,
+ kAccessorInliningEnabled = 1u << 0,
+ kBailoutOnUninitialized = 1u << 1,
+ kDeoptimizationEnabled = 1u << 2,
};
typedef base::Flags<Flag> Flags;
@@ -62,29 +66,88 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
AccessMode access_mode,
LanguageMode language_mode,
KeyedAccessStoreMode store_mode);
+ template <typename KeyedICNexus>
Reduction ReduceKeyedAccess(Node* node, Node* index, Node* value,
- FeedbackNexus const& nexus,
- AccessMode access_mode,
+ KeyedICNexus const& nexus, AccessMode access_mode,
LanguageMode language_mode,
KeyedAccessStoreMode store_mode);
- Reduction ReduceNamedAccess(Node* node, Node* value,
- FeedbackNexus const& nexus, Handle<Name> name,
- AccessMode access_mode,
- LanguageMode language_mode);
+ Reduction ReduceNamedAccessFromNexus(Node* node, Node* value,
+ FeedbackNexus const& nexus,
+ Handle<Name> name,
+ AccessMode access_mode,
+ LanguageMode language_mode);
Reduction ReduceNamedAccess(Node* node, Node* value,
MapHandleList const& receiver_maps,
Handle<Name> name, AccessMode access_mode,
LanguageMode language_mode,
Node* index = nullptr);
- Reduction ReduceSoftDeoptimize(Node* node);
+ Reduction ReduceSoftDeoptimize(Node* node, DeoptimizeReason reason);
+
+ // A triple of nodes that represents a continuation.
+ class ValueEffectControl final {
+ public:
+ ValueEffectControl(Node* value, Node* effect, Node* control)
+ : value_(value), effect_(effect), control_(control) {}
+
+ Node* value() const { return value_; }
+ Node* effect() const { return effect_; }
+ Node* control() const { return control_; }
+
+ private:
+ Node* const value_;
+ Node* const effect_;
+ Node* const control_;
+ };
+
+ // Construct the appropriate subgraph for property access.
+ ValueEffectControl BuildPropertyAccess(Node* receiver, Node* value,
+ Node* context, Node* frame_state,
+ Node* effect, Node* control,
+ Handle<Name> name,
+ Handle<Context> native_context,
+ PropertyAccessInfo const& access_info,
+ AccessMode access_mode);
+
+ // Construct the appropriate subgraph for element access.
+ ValueEffectControl BuildElementAccess(
+ Node* receiver, Node* index, Node* value, Node* effect, Node* control,
+ Handle<Context> native_context, ElementAccessInfo const& access_info,
+ AccessMode access_mode, KeyedAccessStoreMode store_mode);
+
+ // Construct an appropriate map check.
+ Node* BuildCheckMaps(Node* receiver, Node* effect, Node* control,
+ std::vector<Handle<Map>> const& maps);
+
+ // Construct an appropriate heap object check.
+ Node* BuildCheckTaggedPointer(Node* receiver, Node* effect, Node* control);
// Adds stability dependencies on all prototypes of every class in
// {receiver_type} up to (and including) the {holder}.
- void AssumePrototypesStable(Type* receiver_type,
+ void AssumePrototypesStable(std::vector<Handle<Map>> const& receiver_maps,
Handle<Context> native_context,
Handle<JSObject> holder);
+ // Checks if we can turn the hole into undefined when loading an element
+ // from an object with one of the {receiver_maps}; sets up appropriate
+ // code dependencies and might use the array protector cell.
+ bool CanTreatHoleAsUndefined(std::vector<Handle<Map>> const& receiver_maps,
+ Handle<Context> native_context);
+
+ // Extract receiver maps from {nexus} and filter based on {receiver} if
+ // possible.
+ bool ExtractReceiverMaps(Node* receiver, Node* effect,
+ FeedbackNexus const& nexus,
+ MapHandleList* receiver_maps);
+
+ // Try to infer a map for the given {receiver} at the current {effect}.
+ // If a map is returned then you can be sure that the {receiver} definitely
+ // has the returned map at this point in the program (identified by {effect}).
+ MaybeHandle<Map> InferReceiverMap(Node* receiver, Node* effect);
+ // Try to infer a root map for the {receiver} independent of the current
+ // program location.
+ MaybeHandle<Map> InferReceiverRootMap(Node* receiver);
+
// Retrieve the native context from the given {node} if known.
MaybeHandle<Context> GetNativeContext(Node* node);
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index 98e090b509..d19bb767b4 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -9,8 +9,8 @@
#include "src/base/lazy-instance.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
-#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
-#include "src/type-feedback-vector-inl.h"
+#include "src/handles-inl.h"
+#include "src/type-feedback-vector.h"
namespace v8 {
namespace internal {
@@ -376,176 +376,189 @@ const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op) {
return OpParameter<CreateLiteralParameters>(op);
}
-#define CACHED_OP_LIST(V) \
- V(Equal, Operator::kNoProperties, 2, 1) \
- V(NotEqual, Operator::kNoProperties, 2, 1) \
- V(StrictEqual, Operator::kNoThrow, 2, 1) \
- V(StrictNotEqual, Operator::kNoThrow, 2, 1) \
- V(LessThan, Operator::kNoProperties, 2, 1) \
- V(GreaterThan, Operator::kNoProperties, 2, 1) \
- V(LessThanOrEqual, Operator::kNoProperties, 2, 1) \
- V(GreaterThanOrEqual, Operator::kNoProperties, 2, 1) \
- V(ToInteger, Operator::kNoProperties, 1, 1) \
- V(ToLength, Operator::kNoProperties, 1, 1) \
- V(ToName, Operator::kNoProperties, 1, 1) \
- V(ToNumber, Operator::kNoProperties, 1, 1) \
- V(ToObject, Operator::kNoProperties, 1, 1) \
- V(ToString, Operator::kNoProperties, 1, 1) \
- V(Yield, Operator::kNoProperties, 1, 1) \
- V(Create, Operator::kEliminatable, 2, 1) \
- V(CreateIterResultObject, Operator::kEliminatable, 2, 1) \
- V(HasProperty, Operator::kNoProperties, 2, 1) \
- V(TypeOf, Operator::kEliminatable, 1, 1) \
- V(InstanceOf, Operator::kNoProperties, 2, 1) \
- V(ForInDone, Operator::kPure, 2, 1) \
- V(ForInNext, Operator::kNoProperties, 4, 1) \
- V(ForInPrepare, Operator::kNoProperties, 1, 3) \
- V(ForInStep, Operator::kPure, 1, 1) \
- V(LoadMessage, Operator::kNoThrow, 0, 1) \
- V(StoreMessage, Operator::kNoThrow, 1, 0) \
- V(StackCheck, Operator::kNoProperties, 0, 0) \
- V(CreateWithContext, Operator::kNoProperties, 2, 1) \
- V(CreateModuleContext, Operator::kNoProperties, 2, 1)
+const BinaryOperationHint BinaryOperationHintOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kJSBitwiseOr ||
+ op->opcode() == IrOpcode::kJSBitwiseXor ||
+ op->opcode() == IrOpcode::kJSBitwiseAnd ||
+ op->opcode() == IrOpcode::kJSShiftLeft ||
+ op->opcode() == IrOpcode::kJSShiftRight ||
+ op->opcode() == IrOpcode::kJSShiftRightLogical ||
+ op->opcode() == IrOpcode::kJSAdd ||
+ op->opcode() == IrOpcode::kJSSubtract ||
+ op->opcode() == IrOpcode::kJSMultiply ||
+ op->opcode() == IrOpcode::kJSDivide ||
+ op->opcode() == IrOpcode::kJSModulus);
+ return OpParameter<BinaryOperationHint>(op);
+}
+
+const CompareOperationHint CompareOperationHintOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kJSEqual ||
+ op->opcode() == IrOpcode::kJSNotEqual ||
+ op->opcode() == IrOpcode::kJSStrictEqual ||
+ op->opcode() == IrOpcode::kJSStrictNotEqual ||
+ op->opcode() == IrOpcode::kJSLessThan ||
+ op->opcode() == IrOpcode::kJSGreaterThan ||
+ op->opcode() == IrOpcode::kJSLessThanOrEqual ||
+ op->opcode() == IrOpcode::kJSGreaterThanOrEqual);
+ return OpParameter<CompareOperationHint>(op);
+}
+
+#define CACHED_OP_LIST(V) \
+ V(ToInteger, Operator::kNoProperties, 1, 1) \
+ V(ToLength, Operator::kNoProperties, 1, 1) \
+ V(ToName, Operator::kNoProperties, 1, 1) \
+ V(ToNumber, Operator::kNoProperties, 1, 1) \
+ V(ToObject, Operator::kFoldable, 1, 1) \
+ V(ToString, Operator::kNoProperties, 1, 1) \
+ V(Create, Operator::kEliminatable, 2, 1) \
+ V(CreateIterResultObject, Operator::kEliminatable, 2, 1) \
+ V(HasProperty, Operator::kNoProperties, 2, 1) \
+ V(TypeOf, Operator::kPure, 1, 1) \
+ V(InstanceOf, Operator::kNoProperties, 2, 1) \
+ V(ForInDone, Operator::kPure, 2, 1) \
+ V(ForInNext, Operator::kNoProperties, 4, 1) \
+ V(ForInPrepare, Operator::kNoProperties, 1, 3) \
+ V(ForInStep, Operator::kPure, 1, 1) \
+ V(LoadMessage, Operator::kNoThrow, 0, 1) \
+ V(StoreMessage, Operator::kNoThrow, 1, 0) \
+ V(GeneratorRestoreContinuation, Operator::kNoThrow, 1, 1) \
+ V(StackCheck, Operator::kNoWrite, 0, 0) \
+ V(CreateWithContext, Operator::kNoProperties, 2, 1)
+
+#define BINARY_OP_LIST(V) \
+ V(BitwiseOr) \
+ V(BitwiseXor) \
+ V(BitwiseAnd) \
+ V(ShiftLeft) \
+ V(ShiftRight) \
+ V(ShiftRightLogical) \
+ V(Add) \
+ V(Subtract) \
+ V(Multiply) \
+ V(Divide) \
+ V(Modulus)
+
+#define COMPARE_OP_LIST(V) \
+ V(Equal, Operator::kNoProperties) \
+ V(NotEqual, Operator::kNoProperties) \
+ V(StrictEqual, Operator::kPure) \
+ V(StrictNotEqual, Operator::kPure) \
+ V(LessThan, Operator::kNoProperties) \
+ V(GreaterThan, Operator::kNoProperties) \
+ V(LessThanOrEqual, Operator::kNoProperties) \
+ V(GreaterThanOrEqual, Operator::kNoProperties)
struct JSOperatorGlobalCache final {
-#define CACHED(Name, properties, value_input_count, value_output_count) \
- struct Name##Operator final : public Operator { \
- Name##Operator() \
- : Operator(IrOpcode::kJS##Name, properties, "JS" #Name, \
- value_input_count, Operator::ZeroIfPure(properties), \
- Operator::ZeroIfEliminatable(properties), \
- value_output_count, Operator::ZeroIfPure(properties), \
- Operator::ZeroIfNoThrow(properties)) {} \
- }; \
+#define CACHED_OP(Name, properties, value_input_count, value_output_count) \
+ struct Name##Operator final : public Operator { \
+ Name##Operator() \
+ : Operator(IrOpcode::kJS##Name, properties, "JS" #Name, \
+ value_input_count, Operator::ZeroIfPure(properties), \
+ Operator::ZeroIfEliminatable(properties), \
+ value_output_count, Operator::ZeroIfPure(properties), \
+ Operator::ZeroIfNoThrow(properties)) {} \
+ }; \
Name##Operator k##Name##Operator;
- CACHED_OP_LIST(CACHED)
-#undef CACHED
+ CACHED_OP_LIST(CACHED_OP)
+#undef CACHED_OP
+
+#define BINARY_OP(Name) \
+ template <BinaryOperationHint kHint> \
+ struct Name##Operator final : public Operator1<BinaryOperationHint> { \
+ Name##Operator() \
+ : Operator1<BinaryOperationHint>(IrOpcode::kJS##Name, \
+ Operator::kNoProperties, "JS" #Name, \
+ 2, 1, 1, 1, 1, 2, kHint) {} \
+ }; \
+ Name##Operator<BinaryOperationHint::kNone> k##Name##NoneOperator; \
+ Name##Operator<BinaryOperationHint::kSignedSmall> \
+ k##Name##SignedSmallOperator; \
+ Name##Operator<BinaryOperationHint::kSigned32> k##Name##Signed32Operator; \
+ Name##Operator<BinaryOperationHint::kNumberOrOddball> \
+ k##Name##NumberOrOddballOperator; \
+ Name##Operator<BinaryOperationHint::kAny> k##Name##AnyOperator;
+ BINARY_OP_LIST(BINARY_OP)
+#undef BINARY_OP
+
+#define COMPARE_OP(Name, properties) \
+ template <CompareOperationHint kHint> \
+ struct Name##Operator final : public Operator1<CompareOperationHint> { \
+ Name##Operator() \
+ : Operator1<CompareOperationHint>( \
+ IrOpcode::kJS##Name, properties, "JS" #Name, 2, 1, 1, 1, 1, \
+ Operator::ZeroIfNoThrow(properties), kHint) {} \
+ }; \
+ Name##Operator<CompareOperationHint::kNone> k##Name##NoneOperator; \
+ Name##Operator<CompareOperationHint::kSignedSmall> \
+ k##Name##SignedSmallOperator; \
+ Name##Operator<CompareOperationHint::kNumber> k##Name##NumberOperator; \
+ Name##Operator<CompareOperationHint::kNumberOrOddball> \
+ k##Name##NumberOrOddballOperator; \
+ Name##Operator<CompareOperationHint::kAny> k##Name##AnyOperator;
+ COMPARE_OP_LIST(COMPARE_OP)
+#undef COMPARE_OP
};
-
static base::LazyInstance<JSOperatorGlobalCache>::type kCache =
LAZY_INSTANCE_INITIALIZER;
-
JSOperatorBuilder::JSOperatorBuilder(Zone* zone)
: cache_(kCache.Get()), zone_(zone) {}
-
-#define CACHED(Name, properties, value_input_count, value_output_count) \
- const Operator* JSOperatorBuilder::Name() { \
- return &cache_.k##Name##Operator; \
+#define CACHED_OP(Name, properties, value_input_count, value_output_count) \
+ const Operator* JSOperatorBuilder::Name() { \
+ return &cache_.k##Name##Operator; \
}
-CACHED_OP_LIST(CACHED)
-#undef CACHED
-
-const Operator* JSOperatorBuilder::BitwiseOr(BinaryOperationHints hints) {
- // TODO(turbofan): Cache most important versions of this operator.
- return new (zone()) Operator1<BinaryOperationHints>( //--
- IrOpcode::kJSBitwiseOr, Operator::kNoProperties, // opcode
- "JSBitwiseOr", // name
- 2, 1, 1, 1, 1, 2, // inputs/outputs
- hints); // parameter
-}
-
-const Operator* JSOperatorBuilder::BitwiseXor(BinaryOperationHints hints) {
- // TODO(turbofan): Cache most important versions of this operator.
- return new (zone()) Operator1<BinaryOperationHints>( //--
- IrOpcode::kJSBitwiseXor, Operator::kNoProperties, // opcode
- "JSBitwiseXor", // name
- 2, 1, 1, 1, 1, 2, // inputs/outputs
- hints); // parameter
-}
-
-const Operator* JSOperatorBuilder::BitwiseAnd(BinaryOperationHints hints) {
- // TODO(turbofan): Cache most important versions of this operator.
- return new (zone()) Operator1<BinaryOperationHints>( //--
- IrOpcode::kJSBitwiseAnd, Operator::kNoProperties, // opcode
- "JSBitwiseAnd", // name
- 2, 1, 1, 1, 1, 2, // inputs/outputs
- hints); // parameter
-}
-
-const Operator* JSOperatorBuilder::ShiftLeft(BinaryOperationHints hints) {
- // TODO(turbofan): Cache most important versions of this operator.
- return new (zone()) Operator1<BinaryOperationHints>( //--
- IrOpcode::kJSShiftLeft, Operator::kNoProperties, // opcode
- "JSShiftLeft", // name
- 2, 1, 1, 1, 1, 2, // inputs/outputs
- hints); // parameter
-}
-
-const Operator* JSOperatorBuilder::ShiftRight(BinaryOperationHints hints) {
- // TODO(turbofan): Cache most important versions of this operator.
- return new (zone()) Operator1<BinaryOperationHints>( //--
- IrOpcode::kJSShiftRight, Operator::kNoProperties, // opcode
- "JSShiftRight", // name
- 2, 1, 1, 1, 1, 2, // inputs/outputs
- hints); // parameter
-}
-
-const Operator* JSOperatorBuilder::ShiftRightLogical(
- BinaryOperationHints hints) {
- // TODO(turbofan): Cache most important versions of this operator.
- return new (zone()) Operator1<BinaryOperationHints>( //--
- IrOpcode::kJSShiftRightLogical, Operator::kNoProperties, // opcode
- "JSShiftRightLogical", // name
- 2, 1, 1, 1, 1, 2, // inputs/outputs
- hints); // parameter
-}
-
-const Operator* JSOperatorBuilder::Add(BinaryOperationHints hints) {
- // TODO(turbofan): Cache most important versions of this operator.
- return new (zone()) Operator1<BinaryOperationHints>( //--
- IrOpcode::kJSAdd, Operator::kNoProperties, // opcode
- "JSAdd", // name
- 2, 1, 1, 1, 1, 2, // inputs/outputs
- hints); // parameter
-}
-
-const Operator* JSOperatorBuilder::Subtract(BinaryOperationHints hints) {
- // TODO(turbofan): Cache most important versions of this operator.
- return new (zone()) Operator1<BinaryOperationHints>( //--
- IrOpcode::kJSSubtract, Operator::kNoProperties, // opcode
- "JSSubtract", // name
- 2, 1, 1, 1, 1, 2, // inputs/outputs
- hints); // parameter
-}
-
-const Operator* JSOperatorBuilder::Multiply(BinaryOperationHints hints) {
- // TODO(turbofan): Cache most important versions of this operator.
- return new (zone()) Operator1<BinaryOperationHints>( //--
- IrOpcode::kJSMultiply, Operator::kNoProperties, // opcode
- "JSMultiply", // name
- 2, 1, 1, 1, 1, 2, // inputs/outputs
- hints); // parameter
-}
-
-const Operator* JSOperatorBuilder::Divide(BinaryOperationHints hints) {
- // TODO(turbofan): Cache most important versions of this operator.
- return new (zone()) Operator1<BinaryOperationHints>( //--
- IrOpcode::kJSDivide, Operator::kNoProperties, // opcode
- "JSDivide", // name
- 2, 1, 1, 1, 1, 2, // inputs/outputs
- hints); // parameter
-}
-
-const Operator* JSOperatorBuilder::Modulus(BinaryOperationHints hints) {
- // TODO(turbofan): Cache most important versions of this operator.
- return new (zone()) Operator1<BinaryOperationHints>( //--
- IrOpcode::kJSModulus, Operator::kNoProperties, // opcode
- "JSModulus", // name
- 2, 1, 1, 1, 1, 2, // inputs/outputs
- hints); // parameter
-}
-
+CACHED_OP_LIST(CACHED_OP)
+#undef CACHED_OP
+
+#define BINARY_OP(Name) \
+ const Operator* JSOperatorBuilder::Name(BinaryOperationHint hint) { \
+ switch (hint) { \
+ case BinaryOperationHint::kNone: \
+ return &cache_.k##Name##NoneOperator; \
+ case BinaryOperationHint::kSignedSmall: \
+ return &cache_.k##Name##SignedSmallOperator; \
+ case BinaryOperationHint::kSigned32: \
+ return &cache_.k##Name##Signed32Operator; \
+ case BinaryOperationHint::kNumberOrOddball: \
+ return &cache_.k##Name##NumberOrOddballOperator; \
+ case BinaryOperationHint::kAny: \
+ return &cache_.k##Name##AnyOperator; \
+ } \
+ UNREACHABLE(); \
+ return nullptr; \
+ }
+BINARY_OP_LIST(BINARY_OP)
+#undef BINARY_OP
+
+#define COMPARE_OP(Name, ...) \
+ const Operator* JSOperatorBuilder::Name(CompareOperationHint hint) { \
+ switch (hint) { \
+ case CompareOperationHint::kNone: \
+ return &cache_.k##Name##NoneOperator; \
+ case CompareOperationHint::kSignedSmall: \
+ return &cache_.k##Name##SignedSmallOperator; \
+ case CompareOperationHint::kNumber: \
+ return &cache_.k##Name##NumberOperator; \
+ case CompareOperationHint::kNumberOrOddball: \
+ return &cache_.k##Name##NumberOrOddballOperator; \
+ case CompareOperationHint::kAny: \
+ return &cache_.k##Name##AnyOperator; \
+ } \
+ UNREACHABLE(); \
+ return nullptr; \
+ }
+COMPARE_OP_LIST(COMPARE_OP)
+#undef COMPARE_OP
const Operator* JSOperatorBuilder::ToBoolean(ToBooleanHints hints) {
// TODO(turbofan): Cache most important versions of this operator.
- return new (zone()) Operator1<ToBooleanHints>( //--
- IrOpcode::kJSToBoolean, Operator::kEliminatable, // opcode
- "JSToBoolean", // name
- 1, 1, 0, 1, 1, 0, // inputs/outputs
- hints); // parameter
+ return new (zone()) Operator1<ToBooleanHints>( //--
+ IrOpcode::kJSToBoolean, Operator::kPure, // opcode
+ "JSToBoolean", // name
+ 1, 0, 0, 1, 0, 0, // inputs/outputs
+ hints); // parameter
}
const Operator* JSOperatorBuilder::CallFunction(
@@ -599,11 +612,11 @@ const Operator* JSOperatorBuilder::CallConstruct(
const Operator* JSOperatorBuilder::ConvertReceiver(
ConvertReceiverMode convert_mode) {
- return new (zone()) Operator1<ConvertReceiverMode>( // --
- IrOpcode::kJSConvertReceiver, Operator::kNoThrow, // opcode
- "JSConvertReceiver", // name
- 1, 1, 1, 1, 1, 0, // counts
- convert_mode); // parameter
+ return new (zone()) Operator1<ConvertReceiverMode>( // --
+ IrOpcode::kJSConvertReceiver, Operator::kEliminatable, // opcode
+ "JSConvertReceiver", // name
+ 1, 1, 1, 1, 1, 0, // counts
+ convert_mode); // parameter
}
const Operator* JSOperatorBuilder::LoadNamed(Handle<Name> name,
@@ -626,6 +639,21 @@ const Operator* JSOperatorBuilder::LoadProperty(
access); // parameter
}
+const Operator* JSOperatorBuilder::GeneratorStore(int register_count) {
+ return new (zone()) Operator1<int>( // --
+ IrOpcode::kJSGeneratorStore, Operator::kNoThrow, // opcode
+ "JSGeneratorStore", // name
+ 3 + register_count, 1, 1, 0, 1, 0, // counts
+ register_count); // parameter
+}
+
+const Operator* JSOperatorBuilder::GeneratorRestoreRegister(int index) {
+ return new (zone()) Operator1<int>( // --
+ IrOpcode::kJSGeneratorRestoreRegister, Operator::kNoThrow, // opcode
+ "JSGeneratorRestoreRegister", // name
+ 1, 1, 1, 1, 1, 0, // counts
+ index); // parameter
+}
const Operator* JSOperatorBuilder::StoreNamed(LanguageMode language_mode,
Handle<Name> name,
@@ -707,11 +735,11 @@ const Operator* JSOperatorBuilder::StoreContext(size_t depth, size_t index) {
const Operator* JSOperatorBuilder::CreateArguments(CreateArgumentsType type) {
- return new (zone()) Operator1<CreateArgumentsType>( // --
- IrOpcode::kJSCreateArguments, Operator::kNoThrow, // opcode
- "JSCreateArguments", // name
- 1, 1, 1, 1, 1, 0, // counts
- type); // parameter
+ return new (zone()) Operator1<CreateArgumentsType>( // --
+ IrOpcode::kJSCreateArguments, Operator::kEliminatable, // opcode
+ "JSCreateArguments", // name
+ 1, 1, 0, 1, 1, 0, // counts
+ type); // parameter
}
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index eb323c9c12..19022fa881 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -344,7 +344,6 @@ std::ostream& operator<<(std::ostream&, CreateClosureParameters const&);
const CreateClosureParameters& CreateClosureParametersOf(const Operator* op);
-
// Defines shared information for the literal that should be created. This is
// used as parameter by JSCreateLiteralArray, JSCreateLiteralObject and
// JSCreateLiteralRegExp operators.
@@ -375,6 +374,9 @@ std::ostream& operator<<(std::ostream&, CreateLiteralParameters const&);
const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op);
+const BinaryOperationHint BinaryOperationHintOf(const Operator* op);
+
+const CompareOperationHint CompareOperationHintOf(const Operator* op);
// Interface for building JavaScript-level operators, e.g. directly from the
// AST. Most operators have no parameters, thus can be globally shared for all
@@ -383,25 +385,26 @@ class JSOperatorBuilder final : public ZoneObject {
public:
explicit JSOperatorBuilder(Zone* zone);
- const Operator* Equal();
- const Operator* NotEqual();
- const Operator* StrictEqual();
- const Operator* StrictNotEqual();
- const Operator* LessThan();
- const Operator* GreaterThan();
- const Operator* LessThanOrEqual();
- const Operator* GreaterThanOrEqual();
- const Operator* BitwiseOr(BinaryOperationHints hints);
- const Operator* BitwiseXor(BinaryOperationHints hints);
- const Operator* BitwiseAnd(BinaryOperationHints hints);
- const Operator* ShiftLeft(BinaryOperationHints hints);
- const Operator* ShiftRight(BinaryOperationHints hints);
- const Operator* ShiftRightLogical(BinaryOperationHints hints);
- const Operator* Add(BinaryOperationHints hints);
- const Operator* Subtract(BinaryOperationHints hints);
- const Operator* Multiply(BinaryOperationHints hints);
- const Operator* Divide(BinaryOperationHints hints);
- const Operator* Modulus(BinaryOperationHints hints);
+ const Operator* Equal(CompareOperationHint hint);
+ const Operator* NotEqual(CompareOperationHint hint);
+ const Operator* StrictEqual(CompareOperationHint hint);
+ const Operator* StrictNotEqual(CompareOperationHint hint);
+ const Operator* LessThan(CompareOperationHint hint);
+ const Operator* GreaterThan(CompareOperationHint hint);
+ const Operator* LessThanOrEqual(CompareOperationHint hint);
+ const Operator* GreaterThanOrEqual(CompareOperationHint hint);
+
+ const Operator* BitwiseOr(BinaryOperationHint hint);
+ const Operator* BitwiseXor(BinaryOperationHint hint);
+ const Operator* BitwiseAnd(BinaryOperationHint hint);
+ const Operator* ShiftLeft(BinaryOperationHint hint);
+ const Operator* ShiftRight(BinaryOperationHint hint);
+ const Operator* ShiftRightLogical(BinaryOperationHint hint);
+ const Operator* Add(BinaryOperationHint hint);
+ const Operator* Subtract(BinaryOperationHint hint);
+ const Operator* Multiply(BinaryOperationHint hint);
+ const Operator* Divide(BinaryOperationHint hint);
+ const Operator* Modulus(BinaryOperationHint hint);
const Operator* ToBoolean(ToBooleanHints hints);
const Operator* ToInteger();
@@ -410,7 +413,6 @@ class JSOperatorBuilder final : public ZoneObject {
const Operator* ToNumber();
const Operator* ToObject();
const Operator* ToString();
- const Operator* Yield();
const Operator* Create();
const Operator* CreateArguments(CreateArgumentsType type);
@@ -471,6 +473,13 @@ class JSOperatorBuilder final : public ZoneObject {
const Operator* LoadMessage();
const Operator* StoreMessage();
+ // Used to implement Ignition's SuspendGenerator bytecode.
+ const Operator* GeneratorStore(int register_count);
+
+ // Used to implement Ignition's ResumeGenerator bytecode.
+ const Operator* GeneratorRestoreContinuation();
+ const Operator* GeneratorRestoreRegister(int index);
+
const Operator* StackCheck();
const Operator* CreateFunctionContext(int slot_count);
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 7e1a0dc24e..89ab0de97a 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -2,11 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/js-typed-lowering.h"
+
+#include "src/builtins/builtins-utils.h"
#include "src/code-factory.h"
#include "src/compilation-dependencies.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
-#include "src/compiler/js-typed-lowering.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -27,7 +29,51 @@ class JSBinopReduction final {
JSBinopReduction(JSTypedLowering* lowering, Node* node)
: lowering_(lowering), node_(node) {}
- void ConvertInputsToNumber(Node* frame_state) {
+ bool GetBinaryNumberOperationHint(NumberOperationHint* hint) {
+ if (lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) {
+ DCHECK_NE(0, node_->op()->ControlOutputCount());
+ DCHECK_EQ(1, node_->op()->EffectOutputCount());
+ DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node_->op()));
+ switch (BinaryOperationHintOf(node_->op())) {
+ case BinaryOperationHint::kSignedSmall:
+ *hint = NumberOperationHint::kSignedSmall;
+ return true;
+ case BinaryOperationHint::kSigned32:
+ *hint = NumberOperationHint::kSigned32;
+ return true;
+ case BinaryOperationHint::kNumberOrOddball:
+ *hint = NumberOperationHint::kNumberOrOddball;
+ return true;
+ case BinaryOperationHint::kAny:
+ case BinaryOperationHint::kNone:
+ break;
+ }
+ }
+ return false;
+ }
+
+ bool GetCompareNumberOperationHint(NumberOperationHint* hint) {
+ if (lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) {
+ DCHECK_EQ(1, node_->op()->EffectOutputCount());
+ switch (CompareOperationHintOf(node_->op())) {
+ case CompareOperationHint::kSignedSmall:
+ *hint = NumberOperationHint::kSignedSmall;
+ return true;
+ case CompareOperationHint::kNumber:
+ *hint = NumberOperationHint::kNumber;
+ return true;
+ case CompareOperationHint::kNumberOrOddball:
+ *hint = NumberOperationHint::kNumberOrOddball;
+ return true;
+ case CompareOperationHint::kAny:
+ case CompareOperationHint::kNone:
+ break;
+ }
+ }
+ return false;
+ }
+
+ void ConvertInputsToNumber() {
// To convert the inputs to numbers, we have to provide frame states
// for lazy bailouts in the ToNumber conversions.
// We use a little hack here: we take the frame state before the binary
@@ -43,17 +89,17 @@ class JSBinopReduction final {
bool handles_exception = NodeProperties::IsExceptionalCall(node_);
if (!left_is_primitive && !right_is_primitive && handles_exception) {
- ConvertBothInputsToNumber(&left_input, &right_input, frame_state);
+ ConvertBothInputsToNumber(&left_input, &right_input);
} else {
left_input = left_is_primitive
? ConvertPlainPrimitiveToNumber(left())
: ConvertSingleInputToNumber(
- left(), CreateFrameStateForLeftInput(frame_state));
- right_input = right_is_primitive
- ? ConvertPlainPrimitiveToNumber(right())
- : ConvertSingleInputToNumber(
- right(), CreateFrameStateForRightInput(
- frame_state, left_input));
+ left(), CreateFrameStateForLeftInput());
+ right_input =
+ right_is_primitive
+ ? ConvertPlainPrimitiveToNumber(right())
+ : ConvertSingleInputToNumber(
+ right(), CreateFrameStateForRightInput(left_input));
}
node_->ReplaceInput(0, left_input);
@@ -107,23 +153,52 @@ class JSBinopReduction final {
return lowering_->Changed(node_);
}
- Reduction ChangeToStringComparisonOperator(const Operator* op,
- bool invert = false) {
- if (node_->op()->ControlInputCount() > 0) {
- lowering_->RelaxControls(node_);
+ Reduction ChangeToSpeculativeOperator(const Operator* op, bool invert,
+ Type* upper_bound) {
+ DCHECK_EQ(1, op->EffectInputCount());
+ DCHECK_EQ(1, op->EffectOutputCount());
+ DCHECK_EQ(false, OperatorProperties::HasContextInput(op));
+ DCHECK_EQ(1, op->ControlInputCount());
+ DCHECK_EQ(0, op->ControlOutputCount());
+ DCHECK_EQ(0, OperatorProperties::GetFrameStateInputCount(op));
+ DCHECK_EQ(2, op->ValueInputCount());
+
+ DCHECK_EQ(1, node_->op()->EffectInputCount());
+ DCHECK_EQ(1, node_->op()->EffectOutputCount());
+ DCHECK_EQ(1, node_->op()->ControlInputCount());
+ DCHECK_EQ(2, node_->op()->ValueInputCount());
+
+ // Reconnect the control output to bypass the IfSuccess node and
+ // possibly disconnect from the IfException node.
+ for (Edge edge : node_->use_edges()) {
+ Node* const user = edge.from();
+ DCHECK(!user->IsDead());
+ if (NodeProperties::IsControlEdge(edge)) {
+ if (user->opcode() == IrOpcode::kIfSuccess) {
+ user->ReplaceUses(NodeProperties::GetControlInput(node_));
+ user->Kill();
+ } else {
+ DCHECK_EQ(user->opcode(), IrOpcode::kIfException);
+ edge.UpdateTo(jsgraph()->Dead());
+ }
+ }
}
- // String comparison operators need effect and control inputs, so copy them
- // over.
- Node* effect = NodeProperties::GetEffectInput(node_);
- Node* control = NodeProperties::GetControlInput(node_);
- node_->ReplaceInput(2, effect);
- node_->ReplaceInput(3, control);
-
- node_->TrimInputCount(4);
+
+ // Remove the frame state and the context.
+ if (OperatorProperties::HasFrameStateInput(node_->op())) {
+ node_->RemoveInput(NodeProperties::FirstFrameStateIndex(node_));
+ }
+ node_->RemoveInput(NodeProperties::FirstContextIndex(node_));
+
NodeProperties::ChangeOp(node_, op);
+ // Update the type to number.
+ Type* node_type = NodeProperties::GetType(node_);
+ NodeProperties::SetType(node_,
+ Type::Intersect(node_type, upper_bound, zone()));
+
if (invert) {
- // Insert a boolean-not to invert the value.
+ // Insert an boolean not to invert the value.
Node* value = graph()->NewNode(simplified()->BooleanNot(), node_);
node_->ReplaceUses(value);
// Note: ReplaceUses() smashes all uses, so smash it back here.
@@ -137,6 +212,72 @@ class JSBinopReduction final {
return ChangeToPureOperator(op, false, type);
}
+ Reduction ChangeToSpeculativeOperator(const Operator* op, Type* type) {
+ return ChangeToSpeculativeOperator(op, false, type);
+ }
+
+ const Operator* NumberOp() {
+ switch (node_->opcode()) {
+ case IrOpcode::kJSAdd:
+ return simplified()->NumberAdd();
+ case IrOpcode::kJSSubtract:
+ return simplified()->NumberSubtract();
+ case IrOpcode::kJSMultiply:
+ return simplified()->NumberMultiply();
+ case IrOpcode::kJSDivide:
+ return simplified()->NumberDivide();
+ case IrOpcode::kJSModulus:
+ return simplified()->NumberModulus();
+ case IrOpcode::kJSBitwiseAnd:
+ return simplified()->NumberBitwiseAnd();
+ case IrOpcode::kJSBitwiseOr:
+ return simplified()->NumberBitwiseOr();
+ case IrOpcode::kJSBitwiseXor:
+ return simplified()->NumberBitwiseXor();
+ case IrOpcode::kJSShiftLeft:
+ return simplified()->NumberShiftLeft();
+ case IrOpcode::kJSShiftRight:
+ return simplified()->NumberShiftRight();
+ case IrOpcode::kJSShiftRightLogical:
+ return simplified()->NumberShiftRightLogical();
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return nullptr;
+ }
+
+ const Operator* SpeculativeNumberOp(NumberOperationHint hint) {
+ switch (node_->opcode()) {
+ case IrOpcode::kJSAdd:
+ return simplified()->SpeculativeNumberAdd(hint);
+ case IrOpcode::kJSSubtract:
+ return simplified()->SpeculativeNumberSubtract(hint);
+ case IrOpcode::kJSMultiply:
+ return simplified()->SpeculativeNumberMultiply(hint);
+ case IrOpcode::kJSDivide:
+ return simplified()->SpeculativeNumberDivide(hint);
+ case IrOpcode::kJSModulus:
+ return simplified()->SpeculativeNumberModulus(hint);
+ case IrOpcode::kJSBitwiseAnd:
+ return simplified()->SpeculativeNumberBitwiseAnd(hint);
+ case IrOpcode::kJSBitwiseOr:
+ return simplified()->SpeculativeNumberBitwiseOr(hint);
+ case IrOpcode::kJSBitwiseXor:
+ return simplified()->SpeculativeNumberBitwiseXor(hint);
+ case IrOpcode::kJSShiftLeft:
+ return simplified()->SpeculativeNumberShiftLeft(hint);
+ case IrOpcode::kJSShiftRight:
+ return simplified()->SpeculativeNumberShiftRight(hint);
+ case IrOpcode::kJSShiftRightLogical:
+ return simplified()->SpeculativeNumberShiftRightLogical(hint);
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return nullptr;
+ }
+
bool LeftInputIs(Type* t) { return left_type()->Is(t); }
bool RightInputIs(Type* t) { return right_type()->Is(t); }
@@ -160,12 +301,12 @@ class JSBinopReduction final {
Node* right() { return NodeProperties::GetValueInput(node_, 1); }
Type* left_type() { return NodeProperties::GetType(node_->InputAt(0)); }
Type* right_type() { return NodeProperties::GetType(node_->InputAt(1)); }
+ Type* type() { return NodeProperties::GetType(node_); }
SimplifiedOperatorBuilder* simplified() { return lowering_->simplified(); }
Graph* graph() const { return lowering_->graph(); }
JSGraph* jsgraph() { return lowering_->jsgraph(); }
JSOperatorBuilder* javascript() { return lowering_->javascript(); }
- MachineOperatorBuilder* machine() { return lowering_->machine(); }
CommonOperatorBuilder* common() { return jsgraph()->common(); }
Zone* zone() const { return graph()->zone(); }
@@ -173,73 +314,18 @@ class JSBinopReduction final {
JSTypedLowering* lowering_; // The containing lowering instance.
Node* node_; // The original node.
- Node* CreateFrameStateForLeftInput(Node* frame_state) {
- FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
-
- if (state_info.bailout_id() == BailoutId::None()) {
- // Dummy frame state => just leave it as is.
- return frame_state;
- }
-
- // If the frame state is already the right one, just return it.
- if (state_info.state_combine().kind() == OutputFrameStateCombine::kPokeAt &&
- state_info.state_combine().GetOffsetToPokeAt() == 1) {
- return frame_state;
- }
-
- // Here, we smash the result of the conversion into the slot just below
- // the stack top. This is the slot that full code uses to store the
- // left operand.
- const Operator* op = jsgraph()->common()->FrameState(
- state_info.bailout_id(), OutputFrameStateCombine::PokeAt(1),
- state_info.function_info());
-
- return graph()->NewNode(op,
- frame_state->InputAt(kFrameStateParametersInput),
- frame_state->InputAt(kFrameStateLocalsInput),
- frame_state->InputAt(kFrameStateStackInput),
- frame_state->InputAt(kFrameStateContextInput),
- frame_state->InputAt(kFrameStateFunctionInput),
- frame_state->InputAt(kFrameStateOuterStateInput));
+ Node* CreateFrameStateForLeftInput() {
+ // Deoptimization is disabled => return dummy frame state instead.
+ Node* dummy_state = NodeProperties::GetFrameStateInput(node_);
+ DCHECK(OpParameter<FrameStateInfo>(dummy_state).bailout_id().IsNone());
+ return dummy_state;
}
- Node* CreateFrameStateForRightInput(Node* frame_state, Node* converted_left) {
- FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
-
- if (state_info.bailout_id() == BailoutId::None()) {
- // Dummy frame state => just leave it as is.
- return frame_state;
- }
-
- // Create a frame state that stores the result of the operation to the
- // top of the stack (i.e., the slot used for the right operand).
- const Operator* op = jsgraph()->common()->FrameState(
- state_info.bailout_id(), OutputFrameStateCombine::PokeAt(0),
- state_info.function_info());
-
- // Change the left operand {converted_left} on the expression stack.
- Node* stack = frame_state->InputAt(2);
- DCHECK_EQ(stack->opcode(), IrOpcode::kStateValues);
- DCHECK_GE(stack->InputCount(), 2);
-
- // TODO(jarin) Allocate in a local zone or a reusable buffer.
- NodeVector new_values(stack->InputCount(), zone());
- for (int i = 0; i < stack->InputCount(); i++) {
- if (i == stack->InputCount() - 2) {
- new_values[i] = converted_left;
- } else {
- new_values[i] = stack->InputAt(i);
- }
- }
- Node* new_stack =
- graph()->NewNode(stack->op(), stack->InputCount(), &new_values.front());
-
- return graph()->NewNode(
- op, frame_state->InputAt(kFrameStateParametersInput),
- frame_state->InputAt(kFrameStateLocalsInput), new_stack,
- frame_state->InputAt(kFrameStateContextInput),
- frame_state->InputAt(kFrameStateFunctionInput),
- frame_state->InputAt(kFrameStateOuterStateInput));
+ Node* CreateFrameStateForRightInput(Node* converted_left) {
+ // Deoptimization is disabled => return dummy frame state instead.
+ Node* dummy_state = NodeProperties::GetFrameStateInput(node_);
+ DCHECK(OpParameter<FrameStateInfo>(dummy_state).bailout_id().IsNone());
+ return dummy_state;
}
Node* ConvertPlainPrimitiveToNumber(Node* node) {
@@ -247,45 +333,45 @@ class JSBinopReduction final {
// Avoid inserting too many eager ToNumber() operations.
Reduction const reduction = lowering_->ReduceJSToNumberInput(node);
if (reduction.Changed()) return reduction.replacement();
- // TODO(jarin) Use PlainPrimitiveToNumber once we have it.
- return graph()->NewNode(
- javascript()->ToNumber(), node, jsgraph()->NoContextConstant(),
- jsgraph()->EmptyFrameState(), graph()->start(), graph()->start());
+ if (NodeProperties::GetType(node)->Is(Type::Number())) {
+ return node;
+ }
+ return graph()->NewNode(simplified()->PlainPrimitiveToNumber(), node);
}
Node* ConvertSingleInputToNumber(Node* node, Node* frame_state) {
DCHECK(!NodeProperties::GetType(node)->Is(Type::PlainPrimitive()));
Node* const n = graph()->NewNode(javascript()->ToNumber(), node, context(),
frame_state, effect(), control());
- NodeProperties::ReplaceUses(node_, node_, node_, n, n);
+ Node* const if_success = graph()->NewNode(common()->IfSuccess(), n);
+ NodeProperties::ReplaceControlInput(node_, if_success);
+ NodeProperties::ReplaceUses(node_, node_, node_, node_, n);
update_effect(n);
return n;
}
- void ConvertBothInputsToNumber(Node** left_result, Node** right_result,
- Node* frame_state) {
+ void ConvertBothInputsToNumber(Node** left_result, Node** right_result) {
Node* projections[2];
// Find {IfSuccess} and {IfException} continuations of the operation.
NodeProperties::CollectControlProjections(node_, projections, 2);
- IfExceptionHint hint = OpParameter<IfExceptionHint>(projections[1]);
Node* if_exception = projections[1];
Node* if_success = projections[0];
// Insert two ToNumber() operations that both potentially throw.
- Node* left_state = CreateFrameStateForLeftInput(frame_state);
+ Node* left_state = CreateFrameStateForLeftInput();
Node* left_conv =
graph()->NewNode(javascript()->ToNumber(), left(), context(),
left_state, effect(), control());
Node* left_success = graph()->NewNode(common()->IfSuccess(), left_conv);
- Node* right_state = CreateFrameStateForRightInput(frame_state, left_conv);
+ Node* right_state = CreateFrameStateForRightInput(left_conv);
Node* right_conv =
graph()->NewNode(javascript()->ToNumber(), right(), context(),
right_state, left_conv, left_success);
Node* left_exception =
- graph()->NewNode(common()->IfException(hint), left_conv, left_conv);
+ graph()->NewNode(common()->IfException(), left_conv, left_conv);
Node* right_exception =
- graph()->NewNode(common()->IfException(hint), right_conv, right_conv);
+ graph()->NewNode(common()->IfException(), right_conv, right_conv);
NodeProperties::ReplaceControlInput(if_success, right_conv);
update_effect(right_conv);
@@ -356,30 +442,47 @@ JSTypedLowering::JSTypedLowering(Editor* editor,
}
}
-
Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
- if (flags() & kDisableBinaryOpReduction) return NoChange();
-
JSBinopReduction r(this, node);
+ NumberOperationHint hint;
+ if (r.GetBinaryNumberOperationHint(&hint)) {
+ if (hint == NumberOperationHint::kNumberOrOddball &&
+ r.BothInputsAre(Type::PlainPrimitive()) &&
+ r.NeitherInputCanBe(Type::StringOrReceiver())) {
+ // JSAdd(x:-string, y:-string) => NumberAdd(ToNumber(x), ToNumber(y))
+ r.ConvertInputsToNumber();
+ return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
+ }
+ return r.ChangeToSpeculativeOperator(
+ simplified()->SpeculativeNumberAdd(hint), Type::Number());
+ }
if (r.BothInputsAre(Type::Number())) {
// JSAdd(x:number, y:number) => NumberAdd(x, y)
+ r.ConvertInputsToNumber();
return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
}
- if (r.NeitherInputCanBe(Type::StringOrReceiver())) {
+ if ((r.BothInputsAre(Type::PlainPrimitive()) ||
+ !(flags() & kDeoptimizationEnabled)) &&
+ r.NeitherInputCanBe(Type::StringOrReceiver())) {
// JSAdd(x:-string, y:-string) => NumberAdd(ToNumber(x), ToNumber(y))
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
- r.ConvertInputsToNumber(frame_state);
+ r.ConvertInputsToNumber();
return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
}
- if (r.BothInputsAre(Type::String())) {
- // JSAdd(x:string, y:string) => CallStub[StringAdd](x, y)
+ if (r.OneInputIs(Type::String())) {
+ StringAddFlags flags = STRING_ADD_CHECK_NONE;
+ if (!r.LeftInputIs(Type::String())) {
+ flags = STRING_ADD_CONVERT_LEFT;
+ } else if (!r.RightInputIs(Type::String())) {
+ flags = STRING_ADD_CONVERT_RIGHT;
+ }
+ // JSAdd(x:string, y) => CallStub[StringAdd](x, y)
+ // JSAdd(x, y:string) => CallStub[StringAdd](x, y)
Callable const callable =
- CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
+ CodeFactory::StringAdd(isolate(), flags, NOT_TENURED);
CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNeedsFrameState, node->op()->properties());
- DCHECK_EQ(2, OperatorProperties::GetFrameStateInputCount(node->op()));
- node->RemoveInput(NodeProperties::FirstFrameStateIndex(node) + 1);
+ DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
NodeProperties::ChangeOp(node, common()->Call(desc));
@@ -388,64 +491,62 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
return NoChange();
}
-
-Reduction JSTypedLowering::ReduceJSModulus(Node* node) {
- if (flags() & kDisableBinaryOpReduction) return NoChange();
-
+Reduction JSTypedLowering::ReduceNumberBinop(Node* node) {
JSBinopReduction r(this, node);
- if (r.BothInputsAre(Type::Number())) {
- // JSModulus(x:number, x:number) => NumberModulus(x, y)
- return r.ChangeToPureOperator(simplified()->NumberModulus(),
- Type::Number());
+ NumberOperationHint hint;
+ if (r.GetBinaryNumberOperationHint(&hint)) {
+ if (hint == NumberOperationHint::kNumberOrOddball &&
+ r.BothInputsAre(Type::PlainPrimitive())) {
+ r.ConvertInputsToNumber();
+ return r.ChangeToPureOperator(r.NumberOp(), Type::Number());
+ }
+ return r.ChangeToSpeculativeOperator(r.SpeculativeNumberOp(hint),
+ Type::Number());
+ }
+ if (r.BothInputsAre(Type::PlainPrimitive()) ||
+ !(flags() & kDeoptimizationEnabled)) {
+ r.ConvertInputsToNumber();
+ return r.ChangeToPureOperator(r.NumberOp(), Type::Number());
}
return NoChange();
}
-
-Reduction JSTypedLowering::ReduceNumberBinop(Node* node,
- const Operator* numberOp) {
- if (flags() & kDisableBinaryOpReduction) return NoChange();
-
+Reduction JSTypedLowering::ReduceInt32Binop(Node* node) {
JSBinopReduction r(this, node);
- if (numberOp == simplified()->NumberModulus()) {
- if (r.BothInputsAre(Type::Number())) {
- return r.ChangeToPureOperator(numberOp, Type::Number());
- }
- return NoChange();
+ NumberOperationHint hint;
+ if (r.GetBinaryNumberOperationHint(&hint)) {
+ return r.ChangeToSpeculativeOperator(r.SpeculativeNumberOp(hint),
+ Type::Signed32());
}
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
- r.ConvertInputsToNumber(frame_state);
- return r.ChangeToPureOperator(numberOp, Type::Number());
-}
-
-
-Reduction JSTypedLowering::ReduceInt32Binop(Node* node, const Operator* intOp) {
- if (flags() & kDisableBinaryOpReduction) return NoChange();
-
- JSBinopReduction r(this, node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
- r.ConvertInputsToNumber(frame_state);
- r.ConvertInputsToUI32(kSigned, kSigned);
- return r.ChangeToPureOperator(intOp, Type::Integral32());
+ if (r.BothInputsAre(Type::PlainPrimitive()) ||
+ !(flags() & kDeoptimizationEnabled)) {
+ r.ConvertInputsToNumber();
+ r.ConvertInputsToUI32(kSigned, kSigned);
+ return r.ChangeToPureOperator(r.NumberOp(), Type::Signed32());
+ }
+ return NoChange();
}
-
-Reduction JSTypedLowering::ReduceUI32Shift(Node* node,
- Signedness left_signedness,
- const Operator* shift_op) {
- if (flags() & kDisableBinaryOpReduction) return NoChange();
-
+Reduction JSTypedLowering::ReduceUI32Shift(Node* node, Signedness signedness) {
JSBinopReduction r(this, node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
- r.ConvertInputsToNumber(frame_state);
- r.ConvertInputsToUI32(left_signedness, kUnsigned);
- return r.ChangeToPureOperator(shift_op);
+ NumberOperationHint hint;
+ if (r.GetBinaryNumberOperationHint(&hint)) {
+ return r.ChangeToSpeculativeOperator(
+ r.SpeculativeNumberOp(hint),
+ signedness == kUnsigned ? Type::Unsigned32() : Type::Signed32());
+ }
+ if (r.BothInputsAre(Type::PlainPrimitive()) ||
+ !(flags() & kDeoptimizationEnabled)) {
+ r.ConvertInputsToNumber();
+ r.ConvertInputsToUI32(signedness, kUnsigned);
+ return r.ChangeToPureOperator(r.NumberOp(), signedness == kUnsigned
+ ? Type::Unsigned32()
+ : Type::Signed32());
+ }
+ return NoChange();
}
-
Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
- if (flags() & kDisableBinaryOpReduction) return NoChange();
-
JSBinopReduction r(this, node);
if (r.BothInputsAre(Type::String())) {
// If both inputs are definitely strings, perform a string comparison.
@@ -468,70 +569,109 @@ Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
default:
return NoChange();
}
- r.ChangeToStringComparisonOperator(stringOp);
+ r.ChangeToPureOperator(stringOp);
return Changed(node);
}
- if (r.OneInputCannotBe(Type::StringOrReceiver())) {
- const Operator* less_than;
- const Operator* less_than_or_equal;
- if (r.BothInputsAre(Type::Unsigned32())) {
- less_than = machine()->Uint32LessThan();
- less_than_or_equal = machine()->Uint32LessThanOrEqual();
- } else if (r.BothInputsAre(Type::Signed32())) {
- less_than = machine()->Int32LessThan();
- less_than_or_equal = machine()->Int32LessThanOrEqual();
+
+ NumberOperationHint hint;
+ const Operator* less_than;
+ const Operator* less_than_or_equal;
+ if (r.BothInputsAre(Type::Signed32()) ||
+ r.BothInputsAre(Type::Unsigned32())) {
+ less_than = simplified()->NumberLessThan();
+ less_than_or_equal = simplified()->NumberLessThanOrEqual();
+ } else if (r.GetCompareNumberOperationHint(&hint)) {
+ less_than = simplified()->SpeculativeNumberLessThan(hint);
+ less_than_or_equal = simplified()->SpeculativeNumberLessThanOrEqual(hint);
+ } else if (r.OneInputCannotBe(Type::StringOrReceiver()) &&
+ (r.BothInputsAre(Type::PlainPrimitive()) ||
+ !(flags() & kDeoptimizationEnabled))) {
+ r.ConvertInputsToNumber();
+ less_than = simplified()->NumberLessThan();
+ less_than_or_equal = simplified()->NumberLessThanOrEqual();
+ } else {
+ return NoChange();
+ }
+ const Operator* comparison;
+ switch (node->opcode()) {
+ case IrOpcode::kJSLessThan:
+ comparison = less_than;
+ break;
+ case IrOpcode::kJSGreaterThan:
+ comparison = less_than;
+ r.SwapInputs(); // a > b => b < a
+ break;
+ case IrOpcode::kJSLessThanOrEqual:
+ comparison = less_than_or_equal;
+ break;
+ case IrOpcode::kJSGreaterThanOrEqual:
+ comparison = less_than_or_equal;
+ r.SwapInputs(); // a >= b => b <= a
+ break;
+ default:
+ return NoChange();
+ }
+ if (comparison->EffectInputCount() > 0) {
+ return r.ChangeToSpeculativeOperator(comparison, Type::Boolean());
+ } else {
+ return r.ChangeToPureOperator(comparison);
+ }
+}
+
+Reduction JSTypedLowering::ReduceJSEqualTypeOf(Node* node, bool invert) {
+ HeapObjectBinopMatcher m(node);
+ if (m.left().IsJSTypeOf() && m.right().HasValue() &&
+ m.right().Value()->IsString()) {
+ Node* replacement;
+ Node* input = m.left().InputAt(0);
+ Handle<String> value = Handle<String>::cast(m.right().Value());
+ if (String::Equals(value, factory()->boolean_string())) {
+ replacement =
+ graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
+ graph()->NewNode(simplified()->ReferenceEqual(),
+ input, jsgraph()->TrueConstant()),
+ jsgraph()->TrueConstant(),
+ graph()->NewNode(simplified()->ReferenceEqual(),
+ input, jsgraph()->FalseConstant()));
+ } else if (String::Equals(value, factory()->function_string())) {
+ replacement = graph()->NewNode(simplified()->ObjectIsCallable(), input);
+ } else if (String::Equals(value, factory()->number_string())) {
+ replacement = graph()->NewNode(simplified()->ObjectIsNumber(), input);
+ } else if (String::Equals(value, factory()->string_string())) {
+ replacement = graph()->NewNode(simplified()->ObjectIsString(), input);
+ } else if (String::Equals(value, factory()->undefined_string())) {
+ replacement = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged),
+ graph()->NewNode(simplified()->ReferenceEqual(), input,
+ jsgraph()->NullConstant()),
+ jsgraph()->FalseConstant(),
+ graph()->NewNode(simplified()->ObjectIsUndetectable(), input));
} else {
- // TODO(turbofan): mixed signed/unsigned int32 comparisons.
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
- r.ConvertInputsToNumber(frame_state);
- less_than = simplified()->NumberLessThan();
- less_than_or_equal = simplified()->NumberLessThanOrEqual();
+ return NoChange();
}
- const Operator* comparison;
- switch (node->opcode()) {
- case IrOpcode::kJSLessThan:
- comparison = less_than;
- break;
- case IrOpcode::kJSGreaterThan:
- comparison = less_than;
- r.SwapInputs(); // a > b => b < a
- break;
- case IrOpcode::kJSLessThanOrEqual:
- comparison = less_than_or_equal;
- break;
- case IrOpcode::kJSGreaterThanOrEqual:
- comparison = less_than_or_equal;
- r.SwapInputs(); // a >= b => b <= a
- break;
- default:
- return NoChange();
+ if (invert) {
+ replacement = graph()->NewNode(simplified()->BooleanNot(), replacement);
}
- return r.ChangeToPureOperator(comparison);
+ ReplaceWithValue(node, replacement);
+ return Replace(replacement);
}
- // TODO(turbofan): relax/remove effects of this operator in other cases.
- return NoChange(); // Keep a generic comparison.
+ return NoChange();
}
-
Reduction JSTypedLowering::ReduceJSEqual(Node* node, bool invert) {
- if (flags() & kDisableBinaryOpReduction) return NoChange();
+ Reduction const reduction = ReduceJSEqualTypeOf(node, invert);
+ if (reduction.Changed()) return reduction;
JSBinopReduction r(this, node);
- if (r.BothInputsAre(Type::Number())) {
- return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
- }
if (r.BothInputsAre(Type::String())) {
- return r.ChangeToStringComparisonOperator(simplified()->StringEqual(),
- invert);
+ return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
}
if (r.BothInputsAre(Type::Boolean())) {
- return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Boolean()),
- invert);
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
}
if (r.BothInputsAre(Type::Receiver())) {
- return r.ChangeToPureOperator(
- simplified()->ReferenceEqual(Type::Receiver()), invert);
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
}
if (r.OneInputIs(Type::Undetectable())) {
RelaxEffectsAndControls(node);
@@ -548,13 +688,21 @@ Reduction JSTypedLowering::ReduceJSEqual(Node* node, bool invert) {
}
return Changed(node);
}
+
+ NumberOperationHint hint;
+ if (r.BothInputsAre(Type::Signed32()) ||
+ r.BothInputsAre(Type::Unsigned32())) {
+ return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
+ } else if (r.GetCompareNumberOperationHint(&hint)) {
+ return r.ChangeToSpeculativeOperator(
+ simplified()->SpeculativeNumberEqual(hint), invert, Type::Boolean());
+ } else if (r.BothInputsAre(Type::Number())) {
+ return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
+ }
return NoChange();
}
-
Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) {
- if (flags() & kDisableBinaryOpReduction) return NoChange();
-
JSBinopReduction r(this, node);
if (r.left() == r.right()) {
// x === x is always true if x != NaN
@@ -564,62 +712,63 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) {
return Replace(replacement);
}
}
- if (r.OneInputCannotBe(Type::NumberOrString())) {
- // For values with canonical representation (i.e. not string nor number) an
- // empty type intersection means the values cannot be strictly equal.
+ if (r.OneInputCannotBe(Type::NumberOrSimdOrString())) {
+ // For values with canonical representation (i.e. neither String, nor
+ // Simd128Value nor Number) an empty type intersection means the values
+ // cannot be strictly equal.
if (!r.left_type()->Maybe(r.right_type())) {
Node* replacement = jsgraph()->BooleanConstant(invert);
ReplaceWithValue(node, replacement);
return Replace(replacement);
}
}
+
+ Reduction const reduction = ReduceJSEqualTypeOf(node, invert);
+ if (reduction.Changed()) return reduction;
+
if (r.OneInputIs(the_hole_type_)) {
- return r.ChangeToPureOperator(simplified()->ReferenceEqual(the_hole_type_),
- invert);
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
}
if (r.OneInputIs(Type::Undefined())) {
- return r.ChangeToPureOperator(
- simplified()->ReferenceEqual(Type::Undefined()), invert);
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
}
if (r.OneInputIs(Type::Null())) {
- return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Null()),
- invert);
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
}
if (r.OneInputIs(Type::Boolean())) {
- return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Boolean()),
- invert);
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
}
if (r.OneInputIs(Type::Object())) {
- return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Object()),
- invert);
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
}
if (r.OneInputIs(Type::Receiver())) {
- return r.ChangeToPureOperator(
- simplified()->ReferenceEqual(Type::Receiver()), invert);
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
}
if (r.BothInputsAre(Type::Unique())) {
- return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Unique()),
- invert);
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
}
if (r.BothInputsAre(Type::String())) {
- return r.ChangeToStringComparisonOperator(simplified()->StringEqual(),
- invert);
+ return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
}
- if (r.BothInputsAre(Type::Number())) {
+
+ NumberOperationHint hint;
+ if (r.BothInputsAre(Type::Signed32()) ||
+ r.BothInputsAre(Type::Unsigned32())) {
+ return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
+ } else if (r.GetCompareNumberOperationHint(&hint)) {
+ return r.ChangeToSpeculativeOperator(
+ simplified()->SpeculativeNumberEqual(hint), invert, Type::Boolean());
+ } else if (r.BothInputsAre(Type::Number())) {
return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
}
- // TODO(turbofan): js-typed-lowering of StrictEqual(mixed types)
return NoChange();
}
-
Reduction JSTypedLowering::ReduceJSToBoolean(Node* node) {
Node* const input = node->InputAt(0);
Type* const input_type = NodeProperties::GetType(input);
- Node* const effect = NodeProperties::GetEffectInput(node);
if (input_type->Is(Type::Boolean())) {
// JSToBoolean(x:boolean) => x
- ReplaceWithValue(node, input, effect);
return Replace(input);
} else if (input_type->Is(Type::OrderedNumber())) {
// JSToBoolean(x:ordered-number) => BooleanNot(NumberEqual(x,#0))
@@ -629,15 +778,22 @@ Reduction JSTypedLowering::ReduceJSToBoolean(Node* node) {
node->TrimInputCount(1);
NodeProperties::ChangeOp(node, simplified()->BooleanNot());
return Changed(node);
+ } else if (input_type->Is(Type::Number())) {
+ // JSToBoolean(x:number) => NumberLessThan(#0,NumberAbs(x))
+ RelaxEffectsAndControls(node);
+ node->ReplaceInput(0, jsgraph()->ZeroConstant());
+ node->ReplaceInput(1, graph()->NewNode(simplified()->NumberAbs(), input));
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, simplified()->NumberLessThan());
+ return Changed(node);
} else if (input_type->Is(Type::String())) {
// JSToBoolean(x:string) => NumberLessThan(#0,x.length)
FieldAccess const access = AccessBuilder::ForStringLength();
Node* length = graph()->NewNode(simplified()->LoadField(access), input,
- effect, graph()->start());
+ graph()->start(), graph()->start());
ReplaceWithValue(node, node, length);
node->ReplaceInput(0, jsgraph()->ZeroConstant());
node->ReplaceInput(1, length);
- node->TrimInputCount(2);
NodeProperties::ChangeOp(node, simplified()->NumberLessThan());
return Changed(node);
}
@@ -691,27 +847,6 @@ Reduction JSTypedLowering::ReduceJSToLength(Node* node) {
}
Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
- if (input->opcode() == IrOpcode::kJSToNumber) {
- // Recursively try to reduce the input first.
- Reduction result = ReduceJSToNumber(input);
- if (result.Changed()) return result;
- return Changed(input); // JSToNumber(JSToNumber(x)) => JSToNumber(x)
- }
- // Check for ToNumber truncation of signaling NaN to undefined mapping.
- if (input->opcode() == IrOpcode::kSelect) {
- Node* check = NodeProperties::GetValueInput(input, 0);
- Node* vtrue = NodeProperties::GetValueInput(input, 1);
- Type* vtrue_type = NodeProperties::GetType(vtrue);
- Node* vfalse = NodeProperties::GetValueInput(input, 2);
- Type* vfalse_type = NodeProperties::GetType(vfalse);
- if (vtrue_type->Is(Type::Undefined()) && vfalse_type->Is(Type::Number())) {
- if (check->opcode() == IrOpcode::kNumberIsHoleNaN &&
- check->InputAt(0) == vfalse) {
- // JSToNumber(Select(NumberIsHoleNaN(x), y:undefined, x:number)) => x
- return Replace(vfalse);
- }
- }
- }
// Try constant-folding of JSToNumber with constant inputs.
Type* input_type = NodeProperties::GetType(input);
if (input_type->IsConstant()) {
@@ -736,18 +871,9 @@ Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
// JSToNumber(null) => #0
return Replace(jsgraph()->ZeroConstant());
}
- if (input_type->Is(Type::Boolean())) {
- // JSToNumber(x:boolean) => BooleanToNumber(x)
- return Replace(graph()->NewNode(simplified()->BooleanToNumber(), input));
- }
- if (input_type->Is(Type::String())) {
- // JSToNumber(x:string) => StringToNumber(x)
- return Replace(graph()->NewNode(simplified()->StringToNumber(), input));
- }
return NoChange();
}
-
Reduction JSTypedLowering::ReduceJSToNumber(Node* node) {
// Try to reduce the input first.
Node* const input = node->InputAt(0);
@@ -758,26 +884,14 @@ Reduction JSTypedLowering::ReduceJSToNumber(Node* node) {
}
Type* const input_type = NodeProperties::GetType(input);
if (input_type->Is(Type::PlainPrimitive())) {
- if (NodeProperties::GetContextInput(node) !=
- jsgraph()->NoContextConstant() ||
- NodeProperties::GetEffectInput(node) != graph()->start() ||
- NodeProperties::GetControlInput(node) != graph()->start()) {
- // JSToNumber(x:plain-primitive,context,effect,control)
- // => JSToNumber(x,no-context,start,start)
- RelaxEffectsAndControls(node);
- NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
- NodeProperties::ReplaceControlInput(node, graph()->start());
- NodeProperties::ReplaceEffectInput(node, graph()->start());
- DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
- NodeProperties::ReplaceFrameStateInput(node, 0,
- jsgraph()->EmptyFrameState());
- return Changed(node);
- }
+ RelaxEffectsAndControls(node);
+ node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->PlainPrimitiveToNumber());
+ return Changed(node);
}
return NoChange();
}
-
Reduction JSTypedLowering::ReduceJSToStringInput(Node* input) {
if (input->opcode() == IrOpcode::kJSToString) {
// Recursively try to reduce the input first.
@@ -805,7 +919,6 @@ Reduction JSTypedLowering::ReduceJSToStringInput(Node* input) {
return NoChange();
}
-
Reduction JSTypedLowering::ReduceJSToString(Node* node) {
// Try to reduce the input first.
Node* const input = node->InputAt(0);
@@ -817,86 +930,63 @@ Reduction JSTypedLowering::ReduceJSToString(Node* node) {
return NoChange();
}
-
Reduction JSTypedLowering::ReduceJSToObject(Node* node) {
DCHECK_EQ(IrOpcode::kJSToObject, node->opcode());
Node* receiver = NodeProperties::GetValueInput(node, 0);
Type* receiver_type = NodeProperties::GetType(receiver);
Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (!receiver_type->Is(Type::Receiver())) {
- // TODO(bmeurer/mstarzinger): Add support for lowering inside try blocks.
- if (receiver_type->Maybe(Type::NullOrUndefined()) &&
- NodeProperties::IsExceptionalCall(node)) {
- // ToObject throws for null or undefined inputs.
- return NoChange();
- }
+ if (receiver_type->Is(Type::Receiver())) {
+ ReplaceWithValue(node, receiver, effect, control);
+ return Replace(receiver);
+ }
- // Check whether {receiver} is a Smi.
- Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0 = effect;
-
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* efalse0 = effect;
-
- // Determine the instance type of {receiver}.
- Node* receiver_map = efalse0 =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- receiver, efalse0, if_false0);
- Node* receiver_instance_type = efalse0 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
- receiver_map, efalse0, if_false0);
-
- // Check whether {receiver} is a spec object.
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- Node* check1 =
- graph()->NewNode(machine()->Uint32LessThanOrEqual(),
- jsgraph()->Uint32Constant(FIRST_JS_RECEIVER_TYPE),
- receiver_instance_type);
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check1, if_false0);
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = efalse0;
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* efalse1 = efalse0;
+ // TODO(bmeurer/mstarzinger): Add support for lowering inside try blocks.
+ if (receiver_type->Maybe(Type::NullOrUndefined()) &&
+ NodeProperties::IsExceptionalCall(node)) {
+ // ToObject throws for null or undefined inputs.
+ return NoChange();
+ }
- // Convert {receiver} using the ToObjectStub.
- Node* if_convert =
- graph()->NewNode(common()->Merge(2), if_true0, if_false1);
- Node* econvert =
- graph()->NewNode(common()->EffectPhi(2), etrue0, efalse1, if_convert);
- Node* rconvert;
- {
- Callable callable = CodeFactory::ToObject(isolate());
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNeedsFrameState, node->op()->properties());
- rconvert = econvert = graph()->NewNode(
- common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
- receiver, context, frame_state, econvert, if_convert);
- }
+ // Check whether {receiver} is a spec object.
+ Node* check = graph()->NewNode(simplified()->ObjectIsReceiver(), receiver);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
- // The {receiver} is already a spec object.
- Node* if_done = if_true1;
- Node* edone = etrue1;
- Node* rdone = receiver;
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* rtrue = receiver;
- control = graph()->NewNode(common()->Merge(2), if_convert, if_done);
- effect = graph()->NewNode(common()->EffectPhi(2), econvert, edone, control);
- receiver =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- rconvert, rdone, control);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* rfalse;
+ {
+ // Convert {receiver} using the ToObjectStub.
+ Callable callable = CodeFactory::ToObject(isolate());
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState, node->op()->properties());
+ rfalse = efalse = graph()->NewNode(
+ common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
+ receiver, context, frame_state, efalse, if_false);
+ if_false = graph()->NewNode(common()->IfSuccess(), rfalse);
}
- ReplaceWithValue(node, receiver, effect, control);
- return Changed(receiver);
-}
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+
+ // Morph the {node} into an appropriate Phi.
+ ReplaceWithValue(node, node, effect, control);
+ node->ReplaceInput(0, rtrue);
+ node->ReplaceInput(1, rfalse);
+ node->ReplaceInput(2, control);
+ node->TrimInputCount(3);
+ NodeProperties::ChangeOp(node,
+ common()->Phi(MachineRepresentation::kTagged, 2));
+ return Changed(node);
+}
Reduction JSTypedLowering::ReduceJSLoadNamed(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
@@ -914,31 +1004,9 @@ Reduction JSTypedLowering::ReduceJSLoadNamed(Node* node) {
ReplaceWithValue(node, value, effect);
return Replace(value);
}
- // Optimize "prototype" property of functions.
- if (name.is_identical_to(factory()->prototype_string()) &&
- receiver_type->IsConstant() &&
- receiver_type->AsConstant()->Value()->IsJSFunction()) {
- // TODO(turbofan): This lowering might not kick in if we ever lower
- // the C++ accessor for "prototype" in an earlier optimization pass.
- Handle<JSFunction> function =
- Handle<JSFunction>::cast(receiver_type->AsConstant()->Value());
- if (function->has_initial_map()) {
- // We need to add a code dependency on the initial map of the {function}
- // in order to be notified about changes to the "prototype" of {function},
- // so it doesn't make sense to continue unless deoptimization is enabled.
- if (!(flags() & kDeoptimizationEnabled)) return NoChange();
- Handle<Map> initial_map(function->initial_map(), isolate());
- dependencies()->AssumeInitialMapCantChange(initial_map);
- Node* value =
- jsgraph()->Constant(handle(initial_map->prototype(), isolate()));
- ReplaceWithValue(node, value);
- return Replace(value);
- }
- }
return NoChange();
}
-
Reduction JSTypedLowering::ReduceJSLoadProperty(Node* node) {
Node* key = NodeProperties::GetValueInput(node, 1);
Node* base = NodeProperties::GetValueInput(node, 0);
@@ -972,7 +1040,10 @@ Reduction JSTypedLowering::ReduceJSLoadProperty(Node* node) {
return Replace(load);
}
// Compute byte offset.
- Node* offset = Word32Shl(key, static_cast<int>(k));
+ Node* offset =
+ (k == 0) ? key : graph()->NewNode(
+ simplified()->NumberShiftLeft(), key,
+ jsgraph()->Constant(static_cast<double>(k)));
Node* load = graph()->NewNode(simplified()->LoadBuffer(access), buffer,
offset, length, effect, control);
ReplaceWithValue(node, load, load);
@@ -983,7 +1054,6 @@ Reduction JSTypedLowering::ReduceJSLoadProperty(Node* node) {
return NoChange();
}
-
Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
Node* key = NodeProperties::GetValueInput(node, 1);
Node* base = NodeProperties::GetValueInput(node, 0);
@@ -1018,10 +1088,11 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
value = number_reduction.replacement();
} else {
Node* frame_state_for_to_number =
- NodeProperties::GetFrameStateInput(node, 1);
+ NodeProperties::FindFrameStateBefore(node);
value = effect =
graph()->NewNode(javascript()->ToNumber(), value, context,
frame_state_for_to_number, effect, control);
+ control = graph()->NewNode(common()->IfSuccess(), value);
}
}
// Check if we can avoid the bounds check.
@@ -1040,7 +1111,10 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
return Changed(node);
}
// Compute byte offset.
- Node* offset = Word32Shl(key, static_cast<int>(k));
+ Node* offset =
+ (k == 0) ? key : graph()->NewNode(
+ simplified()->NumberShiftLeft(), key,
+ jsgraph()->Constant(static_cast<double>(k)));
// Turn into a StoreBuffer operation.
RelaxControls(node);
node->ReplaceInput(0, buffer);
@@ -1058,17 +1132,13 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
return NoChange();
}
-
Reduction JSTypedLowering::ReduceJSInstanceOf(Node* node) {
DCHECK_EQ(IrOpcode::kJSInstanceOf, node->opcode());
Node* const context = NodeProperties::GetContextInput(node);
- Node* const frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ Node* const frame_state = NodeProperties::GetFrameStateInput(node);
// If deoptimization is disabled, we cannot optimize.
- if (!(flags() & kDeoptimizationEnabled) ||
- (flags() & kDisableBinaryOpReduction)) {
- return NoChange();
- }
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
// If we are in a try block, don't optimize since the runtime call
// in the proxy case can throw.
@@ -1087,29 +1157,31 @@ Reduction JSTypedLowering::ReduceJSInstanceOf(Node* node) {
Handle<JSFunction>::cast(r.right_type()->AsConstant()->Value());
Handle<SharedFunctionInfo> shared(function->shared(), isolate());
- if (!function->IsConstructor() ||
- function->map()->has_non_instance_prototype()) {
+ // Make sure the prototype of {function} is the %FunctionPrototype%, and it
+ // already has a meaningful initial map (i.e. we constructed at least one
+ // instance using the constructor {function}).
+ if (function->map()->prototype() != function->native_context()->closure() ||
+ function->map()->has_non_instance_prototype() ||
+ !function->has_initial_map()) {
return NoChange();
}
- JSFunction::EnsureHasInitialMap(function);
- DCHECK(function->has_initial_map());
+ // We can only use the fast case if @@hasInstance was not used so far.
+ if (!isolate()->IsHasInstanceLookupChainIntact()) return NoChange();
+ dependencies()->AssumePropertyCell(factory()->has_instance_protector());
+
Handle<Map> initial_map(function->initial_map(), isolate());
- this->dependencies()->AssumeInitialMapCantChange(initial_map);
+ dependencies()->AssumeInitialMapCantChange(initial_map);
Node* prototype =
jsgraph()->Constant(handle(initial_map->prototype(), isolate()));
- Node* if_is_smi = nullptr;
- Node* e_is_smi = nullptr;
// If the left hand side is an object, no smi check is needed.
- if (r.left_type()->Maybe(Type::TaggedSigned())) {
- Node* is_smi = graph()->NewNode(simplified()->ObjectIsSmi(), r.left());
- Node* branch_is_smi =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), is_smi, control);
- if_is_smi = graph()->NewNode(common()->IfTrue(), branch_is_smi);
- e_is_smi = effect;
- control = graph()->NewNode(common()->IfFalse(), branch_is_smi);
- }
+ Node* is_smi = graph()->NewNode(simplified()->ObjectIsSmi(), r.left());
+ Node* branch_is_smi =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), is_smi, control);
+ Node* if_is_smi = graph()->NewNode(common()->IfTrue(), branch_is_smi);
+ Node* e_is_smi = effect;
+ control = graph()->NewNode(common()->IfFalse(), branch_is_smi);
Node* object_map = effect =
graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
@@ -1132,10 +1204,10 @@ Reduction JSTypedLowering::ReduceJSInstanceOf(Node* node) {
int is_access_check_needed_bit = 1 << Map::kIsAccessCheckNeeded;
Node* is_access_check_needed_num =
graph()->NewNode(simplified()->NumberBitwiseAnd(), map_bit_field,
- jsgraph()->Uint32Constant(is_access_check_needed_bit));
+ jsgraph()->Constant(is_access_check_needed_bit));
Node* is_access_check_needed =
- graph()->NewNode(machine()->Word32Equal(), is_access_check_needed_num,
- jsgraph()->Uint32Constant(is_access_check_needed_bit));
+ graph()->NewNode(simplified()->NumberEqual(), is_access_check_needed_num,
+ jsgraph()->Constant(is_access_check_needed_bit));
Node* branch_is_access_check_needed = graph()->NewNode(
common()->Branch(BranchHint::kFalse), is_access_check_needed, control);
@@ -1150,52 +1222,55 @@ Reduction JSTypedLowering::ReduceJSInstanceOf(Node* node) {
Node* map_instance_type = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
loop_object_map, loop_effect, control);
- Node* is_proxy = graph()->NewNode(machine()->Word32Equal(), map_instance_type,
- jsgraph()->Uint32Constant(JS_PROXY_TYPE));
+ Node* is_proxy =
+ graph()->NewNode(simplified()->NumberEqual(), map_instance_type,
+ jsgraph()->Constant(JS_PROXY_TYPE));
Node* branch_is_proxy =
graph()->NewNode(common()->Branch(BranchHint::kFalse), is_proxy, control);
Node* if_is_proxy = graph()->NewNode(common()->IfTrue(), branch_is_proxy);
Node* e_is_proxy = effect;
-
- Node* runtime_has_in_proto_chain = control = graph()->NewNode(
- common()->Merge(2), if_is_access_check_needed, if_is_proxy);
+ control = graph()->NewNode(common()->Merge(2), if_is_access_check_needed,
+ if_is_proxy);
effect = graph()->NewNode(common()->EffectPhi(2), e_is_access_check_needed,
e_is_proxy, control);
// If we need an access check or the object is a Proxy, make a runtime call
// to finish the lowering.
- Node* bool_result_runtime_has_in_proto_chain_case = graph()->NewNode(
+ Node* runtimecall = graph()->NewNode(
javascript()->CallRuntime(Runtime::kHasInPrototypeChain), r.left(),
prototype, context, frame_state, effect, control);
+ Node* runtimecall_control =
+ graph()->NewNode(common()->IfSuccess(), runtimecall);
+
control = graph()->NewNode(common()->IfFalse(), branch_is_proxy);
Node* object_prototype = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForMapPrototype()),
loop_object_map, loop_effect, control);
- // Check if object prototype is equal to function prototype.
- Node* eq_proto =
- graph()->NewNode(simplified()->ReferenceEqual(r.right_type()),
- object_prototype, prototype);
- Node* branch_eq_proto =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), eq_proto, control);
- Node* if_eq_proto = graph()->NewNode(common()->IfTrue(), branch_eq_proto);
- Node* e_eq_proto = effect;
-
- control = graph()->NewNode(common()->IfFalse(), branch_eq_proto);
-
// If not, check if object prototype is the null prototype.
Node* null_proto =
- graph()->NewNode(simplified()->ReferenceEqual(r.right_type()),
- object_prototype, jsgraph()->NullConstant());
+ graph()->NewNode(simplified()->ReferenceEqual(), object_prototype,
+ jsgraph()->NullConstant());
Node* branch_null_proto = graph()->NewNode(
common()->Branch(BranchHint::kFalse), null_proto, control);
Node* if_null_proto = graph()->NewNode(common()->IfTrue(), branch_null_proto);
Node* e_null_proto = effect;
control = graph()->NewNode(common()->IfFalse(), branch_null_proto);
+
+ // Check if object prototype is equal to function prototype.
+ Node* eq_proto = graph()->NewNode(simplified()->ReferenceEqual(),
+ object_prototype, prototype);
+ Node* branch_eq_proto =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), eq_proto, control);
+ Node* if_eq_proto = graph()->NewNode(common()->IfTrue(), branch_eq_proto);
+ Node* e_eq_proto = effect;
+
+ control = graph()->NewNode(common()->IfFalse(), branch_eq_proto);
+
Node* load_object_map = effect =
graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
object_prototype, effect, control);
@@ -1204,31 +1279,24 @@ Reduction JSTypedLowering::ReduceJSInstanceOf(Node* node) {
loop_object_map->ReplaceInput(1, load_object_map);
loop->ReplaceInput(1, control);
- control = graph()->NewNode(common()->Merge(3), runtime_has_in_proto_chain,
+ control = graph()->NewNode(common()->Merge(3), runtimecall_control,
if_eq_proto, if_null_proto);
- effect = graph()->NewNode(common()->EffectPhi(3),
- bool_result_runtime_has_in_proto_chain_case,
- e_eq_proto, e_null_proto, control);
+ effect = graph()->NewNode(common()->EffectPhi(3), runtimecall, e_eq_proto,
+ e_null_proto, control);
Node* result = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 3),
- bool_result_runtime_has_in_proto_chain_case, jsgraph()->TrueConstant(),
- jsgraph()->FalseConstant(), control);
+ common()->Phi(MachineRepresentation::kTagged, 3), runtimecall,
+ jsgraph()->TrueConstant(), jsgraph()->FalseConstant(), control);
- if (if_is_smi != nullptr) {
- DCHECK_NOT_NULL(e_is_smi);
- control = graph()->NewNode(common()->Merge(2), if_is_smi, control);
- effect =
- graph()->NewNode(common()->EffectPhi(2), e_is_smi, effect, control);
- result = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- jsgraph()->FalseConstant(), result, control);
- }
+ control = graph()->NewNode(common()->Merge(2), if_is_smi, control);
+ effect = graph()->NewNode(common()->EffectPhi(2), e_is_smi, effect, control);
+ result = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ jsgraph()->FalseConstant(), result, control);
ReplaceWithValue(node, result, effect, control);
return Changed(result);
}
-
Reduction JSTypedLowering::ReduceJSLoadContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
ContextAccess const& access = ContextAccessOf(node->op());
@@ -1249,7 +1317,6 @@ Reduction JSTypedLowering::ReduceJSLoadContext(Node* node) {
return Changed(node);
}
-
Reduction JSTypedLowering::ReduceJSStoreContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSStoreContext, node->opcode());
ContextAccess const& access = ContextAccessOf(node->op());
@@ -1270,7 +1337,6 @@ Reduction JSTypedLowering::ReduceJSStoreContext(Node* node) {
return Changed(node);
}
-
Reduction JSTypedLowering::ReduceJSConvertReceiver(Node* node) {
DCHECK_EQ(IrOpcode::kJSConvertReceiver, node->opcode());
ConvertReceiverMode mode = ConvertReceiverModeOf(node->op());
@@ -1278,94 +1344,229 @@ Reduction JSTypedLowering::ReduceJSConvertReceiver(Node* node) {
Type* receiver_type = NodeProperties::GetType(receiver);
Node* context = NodeProperties::GetContextInput(node);
Type* context_type = NodeProperties::GetType(context);
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (!receiver_type->Is(Type::Receiver())) {
- if (receiver_type->Is(Type::NullOrUndefined()) ||
- mode == ConvertReceiverMode::kNullOrUndefined) {
- if (context_type->IsConstant()) {
- Handle<JSObject> global_proxy(
- Handle<Context>::cast(context_type->AsConstant()->Value())
- ->global_proxy(),
- isolate());
- receiver = jsgraph()->Constant(global_proxy);
- } else {
- Node* native_context = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, effect);
- receiver = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::GLOBAL_PROXY_INDEX, true),
- native_context, native_context, effect);
- }
- } else if (!receiver_type->Maybe(Type::NullOrUndefined()) ||
- mode == ConvertReceiverMode::kNotNullOrUndefined) {
- receiver = effect =
- graph()->NewNode(javascript()->ToObject(), receiver, context,
- frame_state, effect, control);
+
+ // Check if {receiver} is known to be a receiver.
+ if (receiver_type->Is(Type::Receiver())) {
+ ReplaceWithValue(node, receiver, effect, control);
+ return Replace(receiver);
+ }
+
+ // If the {receiver} is known to be null or undefined, we can just replace it
+ // with the global proxy unconditionally.
+ if (receiver_type->Is(Type::NullOrUndefined()) ||
+ mode == ConvertReceiverMode::kNullOrUndefined) {
+ if (context_type->IsConstant()) {
+ Handle<JSObject> global_proxy(
+ Handle<Context>::cast(context_type->AsConstant()->Value())
+ ->global_proxy(),
+ isolate());
+ receiver = jsgraph()->Constant(global_proxy);
} else {
- // Check {receiver} for undefined.
- Node* check0 =
- graph()->NewNode(simplified()->ReferenceEqual(receiver_type),
- receiver, jsgraph()->UndefinedConstant());
- Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check0, control);
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-
- // Check {receiver} for null.
- Node* check1 =
- graph()->NewNode(simplified()->ReferenceEqual(receiver_type),
- receiver, jsgraph()->NullConstant());
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check1, if_false0);
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-
- // Convert {receiver} using ToObject.
- Node* if_convert = if_false1;
- Node* econvert = effect;
- Node* rconvert;
- {
- rconvert = econvert =
- graph()->NewNode(javascript()->ToObject(), receiver, context,
- frame_state, econvert, if_convert);
- }
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ receiver = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::GLOBAL_PROXY_INDEX, true),
+ native_context, native_context, effect);
+ }
+ ReplaceWithValue(node, receiver, effect, control);
+ return Replace(receiver);
+ }
- // Replace {receiver} with global proxy of {context}.
- Node* if_global =
- graph()->NewNode(common()->Merge(2), if_true0, if_true1);
- Node* eglobal = effect;
- Node* rglobal;
- {
- if (context_type->IsConstant()) {
- Handle<JSObject> global_proxy(
- Handle<Context>::cast(context_type->AsConstant()->Value())
- ->global_proxy(),
- isolate());
- rglobal = jsgraph()->Constant(global_proxy);
- } else {
- Node* native_context = eglobal = graph()->NewNode(
- javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, eglobal);
- rglobal = eglobal = graph()->NewNode(
- javascript()->LoadContext(0, Context::GLOBAL_PROXY_INDEX, true),
- native_context, native_context, eglobal);
- }
- }
+ // If {receiver} cannot be null or undefined we can skip a few checks.
+ if (!receiver_type->Maybe(Type::NullOrUndefined()) ||
+ mode == ConvertReceiverMode::kNotNullOrUndefined) {
+ Node* check = graph()->NewNode(simplified()->ObjectIsReceiver(), receiver);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
- control = graph()->NewNode(common()->Merge(2), if_convert, if_global);
- effect =
- graph()->NewNode(common()->EffectPhi(2), econvert, eglobal, control);
- receiver =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- rconvert, rglobal, control);
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* rtrue = receiver;
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* rfalse;
+ {
+ // Convert {receiver} using the ToObjectStub.
+ Callable callable = CodeFactory::ToObject(isolate());
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState, node->op()->properties());
+ rfalse = efalse = graph()->NewNode(
+ common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
+ receiver, context, frame_state, efalse);
}
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+
+ // Morph the {node} into an appropriate Phi.
+ ReplaceWithValue(node, node, effect, control);
+ node->ReplaceInput(0, rtrue);
+ node->ReplaceInput(1, rfalse);
+ node->ReplaceInput(2, control);
+ node->TrimInputCount(3);
+ NodeProperties::ChangeOp(node,
+ common()->Phi(MachineRepresentation::kTagged, 2));
+ return Changed(node);
}
- ReplaceWithValue(node, receiver, effect, control);
- return Changed(receiver);
+
+ // Check if {receiver} is already a JSReceiver.
+ Node* check0 = graph()->NewNode(simplified()->ObjectIsReceiver(), receiver);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+
+ // Check {receiver} for undefined.
+ Node* check1 = graph()->NewNode(simplified()->ReferenceEqual(), receiver,
+ jsgraph()->UndefinedConstant());
+ Node* branch1 =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check1, if_false0);
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+
+ // Check {receiver} for null.
+ Node* check2 = graph()->NewNode(simplified()->ReferenceEqual(), receiver,
+ jsgraph()->NullConstant());
+ Node* branch2 =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check2, if_false1);
+ Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+
+ // We just use {receiver} directly.
+ Node* if_noop = if_true0;
+ Node* enoop = effect;
+ Node* rnoop = receiver;
+
+ // Convert {receiver} using ToObject.
+ Node* if_convert = if_false2;
+ Node* econvert = effect;
+ Node* rconvert;
+ {
+ // Convert {receiver} using the ToObjectStub.
+ Callable callable = CodeFactory::ToObject(isolate());
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState, node->op()->properties());
+ rconvert = econvert = graph()->NewNode(
+ common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
+ receiver, context, frame_state, econvert);
+ }
+
+ // Replace {receiver} with global proxy of {context}.
+ Node* if_global = graph()->NewNode(common()->Merge(2), if_true1, if_true2);
+ Node* eglobal = effect;
+ Node* rglobal;
+ {
+ if (context_type->IsConstant()) {
+ Handle<JSObject> global_proxy(
+ Handle<Context>::cast(context_type->AsConstant()->Value())
+ ->global_proxy(),
+ isolate());
+ rglobal = jsgraph()->Constant(global_proxy);
+ } else {
+ Node* native_context = eglobal = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, eglobal);
+ rglobal = eglobal = graph()->NewNode(
+ javascript()->LoadContext(0, Context::GLOBAL_PROXY_INDEX, true),
+ native_context, native_context, eglobal);
+ }
+ }
+
+ control =
+ graph()->NewNode(common()->Merge(3), if_noop, if_convert, if_global);
+ effect = graph()->NewNode(common()->EffectPhi(3), enoop, econvert, eglobal,
+ control);
+ // Morph the {node} into an appropriate Phi.
+ ReplaceWithValue(node, node, effect, control);
+ node->ReplaceInput(0, rnoop);
+ node->ReplaceInput(1, rconvert);
+ node->ReplaceInput(2, rglobal);
+ node->ReplaceInput(3, control);
+ node->TrimInputCount(4);
+ NodeProperties::ChangeOp(node,
+ common()->Phi(MachineRepresentation::kTagged, 3));
+ return Changed(node);
}
+namespace {
+
+void ReduceBuiltin(Isolate* isolate, JSGraph* jsgraph, Node* node,
+ int builtin_index, int arity, CallDescriptor::Flags flags) {
+ // Patch {node} to a direct CEntryStub call.
+ //
+ // ----------- A r g u m e n t s -----------
+ // -- 0: CEntryStub
+ // --- Stack args ---
+ // -- 1: receiver
+ // -- [2, 2 + n[: the n actual arguments passed to the builtin
+ // -- 2 + n: argc, including the receiver and implicit args (Smi)
+ // -- 2 + n + 1: target
+ // -- 2 + n + 2: new_target
+ // --- Register args ---
+ // -- 2 + n + 3: the C entry point
+ // -- 2 + n + 4: argc (Int32)
+ // -----------------------------------
+
+ // The logic contained here is mirrored in Builtins::Generate_Adaptor.
+ // Keep these in sync.
+
+ const bool is_construct = (node->opcode() == IrOpcode::kJSCallConstruct);
+
+ DCHECK(Builtins::HasCppImplementation(builtin_index));
+
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Node* new_target = is_construct
+ ? NodeProperties::GetValueInput(node, arity + 1)
+ : jsgraph->UndefinedConstant();
+
+ // API and CPP builtins are implemented in C++, and we can inline both.
+ // CPP builtins create a builtin exit frame, API builtins don't.
+ const bool has_builtin_exit_frame = Builtins::IsCpp(builtin_index);
+
+ Node* stub = jsgraph->CEntryStubConstant(1, kDontSaveFPRegs, kArgvOnStack,
+ has_builtin_exit_frame);
+ node->ReplaceInput(0, stub);
+
+ Zone* zone = jsgraph->zone();
+ if (is_construct) {
+ // Unify representations between construct and call nodes.
+ // Remove new target and add receiver as a stack parameter.
+ Node* receiver = jsgraph->UndefinedConstant();
+ node->RemoveInput(arity + 1);
+ node->InsertInput(zone, 1, receiver);
+ }
+
+ const int argc = arity + BuiltinArguments::kNumExtraArgsWithReceiver;
+ Node* argc_node = jsgraph->Int32Constant(argc);
+
+ node->InsertInput(zone, arity + 2, argc_node);
+ node->InsertInput(zone, arity + 3, target);
+ node->InsertInput(zone, arity + 4, new_target);
+
+ Address entry = Builtins::CppEntryOf(builtin_index);
+ ExternalReference entry_ref(ExternalReference(entry, isolate));
+ Node* entry_node = jsgraph->ExternalConstant(entry_ref);
+
+ node->InsertInput(zone, arity + 5, entry_node);
+ node->InsertInput(zone, arity + 6, argc_node);
+
+ static const int kReturnCount = 1;
+ const char* debug_name = Builtins::name(builtin_index);
+ Operator::Properties properties = node->op()->properties();
+ CallDescriptor* desc = Linkage::GetCEntryStubCallDescriptor(
+ zone, kReturnCount, argc, debug_name, properties, flags);
+
+ NodeProperties::ChangeOp(node, jsgraph->common()->Call(desc));
+}
+
+} // namespace
Reduction JSTypedLowering::ReduceJSCallConstruct(Node* node) {
DCHECK_EQ(IrOpcode::kJSCallConstruct, node->opcode());
@@ -1375,6 +1576,8 @@ Reduction JSTypedLowering::ReduceJSCallConstruct(Node* node) {
Node* target = NodeProperties::GetValueInput(node, 0);
Type* target_type = NodeProperties::GetType(target);
Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
// Check if {target} is a known JSFunction.
if (target_type->IsConstant() &&
@@ -1382,32 +1585,48 @@ Reduction JSTypedLowering::ReduceJSCallConstruct(Node* node) {
Handle<JSFunction> function =
Handle<JSFunction>::cast(target_type->AsConstant()->Value());
Handle<SharedFunctionInfo> shared(function->shared(), isolate());
+ const int builtin_index = shared->construct_stub()->builtin_index();
+ const bool is_builtin = (builtin_index != -1);
- // Remove the eager bailout frame state.
- NodeProperties::RemoveFrameStateInput(node, 1);
+ CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
- // Patch {node} to an indirect call via the {function}s construct stub.
- Callable callable(handle(shared->construct_stub(), isolate()),
- ConstructStubDescriptor(isolate()));
- node->RemoveInput(arity + 1);
- node->InsertInput(graph()->zone(), 0,
- jsgraph()->HeapConstant(callable.code()));
- node->InsertInput(graph()->zone(), 2, new_target);
- node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(arity));
- node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
- node->InsertInput(graph()->zone(), 5, jsgraph()->UndefinedConstant());
- NodeProperties::ChangeOp(
- node, common()->Call(Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 1 + arity,
- CallDescriptor::kNeedsFrameState)));
+ if (is_builtin && Builtins::HasCppImplementation(builtin_index) &&
+ (shared->internal_formal_parameter_count() == arity ||
+ shared->internal_formal_parameter_count() ==
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel)) {
+ // Patch {node} to a direct CEntryStub call.
+
+ // Load the context from the {target}.
+ Node* context = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSFunctionContext()),
+ target, effect, control);
+ NodeProperties::ReplaceContextInput(node, context);
+
+ // Update the effect dependency for the {node}.
+ NodeProperties::ReplaceEffectInput(node, effect);
+
+ ReduceBuiltin(isolate(), jsgraph(), node, builtin_index, arity, flags);
+ } else {
+ // Patch {node} to an indirect call via the {function}s construct stub.
+ Callable callable(handle(shared->construct_stub(), isolate()),
+ ConstructStubDescriptor(isolate()));
+ node->RemoveInput(arity + 1);
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ node->InsertInput(graph()->zone(), 2, new_target);
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(arity));
+ node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+ node->InsertInput(graph()->zone(), 5, jsgraph()->UndefinedConstant());
+ NodeProperties::ChangeOp(
+ node, common()->Call(Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(),
+ 1 + arity, flags)));
+ }
return Changed(node);
}
// Check if {target} is a JSFunction.
if (target_type->Is(Type::Function())) {
- // Remove the eager bailout frame state.
- NodeProperties::RemoveFrameStateInput(node, 1);
-
// Patch {node} to an indirect call via the ConstructFunction builtin.
Callable callable = CodeFactory::ConstructFunction(isolate());
node->RemoveInput(arity + 1);
@@ -1436,9 +1655,9 @@ Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
Type* target_type = NodeProperties::GetType(target);
Node* receiver = NodeProperties::GetValueInput(node, 1);
Type* receiver_type = NodeProperties::GetType(receiver);
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+ Node* frame_state = NodeProperties::FindFrameStateBefore(node);
// Try to infer receiver {convert_mode} from {receiver} type.
if (receiver_type->Is(Type::NullOrUndefined())) {
@@ -1453,6 +1672,8 @@ Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
Handle<JSFunction> function =
Handle<JSFunction>::cast(target_type->AsConstant()->Value());
Handle<SharedFunctionInfo> shared(function->shared(), isolate());
+ const int builtin_index = shared->code()->builtin_index();
+ const bool is_builtin = (builtin_index != -1);
// Class constructors are callable, but [[Call]] will raise an exception.
// See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList ).
@@ -1476,9 +1697,6 @@ Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
// Update the effect dependency for the {node}.
NodeProperties::ReplaceEffectInput(node, effect);
- // Remove the eager bailout frame state.
- NodeProperties::RemoveFrameStateInput(node, 1);
-
// Compute flags for the call.
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
if (p.tail_call_mode() == TailCallMode::kAllow) {
@@ -1487,9 +1705,15 @@ Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
Node* new_target = jsgraph()->UndefinedConstant();
Node* argument_count = jsgraph()->Int32Constant(arity);
- if (shared->internal_formal_parameter_count() == arity ||
- shared->internal_formal_parameter_count() ==
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
+ if (is_builtin && Builtins::HasCppImplementation(builtin_index) &&
+ (shared->internal_formal_parameter_count() == arity ||
+ shared->internal_formal_parameter_count() ==
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel)) {
+ // Patch {node} to a direct CEntryStub call.
+ ReduceBuiltin(isolate(), jsgraph(), node, builtin_index, arity, flags);
+ } else if (shared->internal_formal_parameter_count() == arity ||
+ shared->internal_formal_parameter_count() ==
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
// Patch {node} to a direct call.
node->InsertInput(graph()->zone(), arity + 2, new_target);
node->InsertInput(graph()->zone(), arity + 3, argument_count);
@@ -1516,9 +1740,6 @@ Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
// Check if {target} is a JSFunction.
if (target_type->Is(Type::Function())) {
- // Remove the eager bailout frame state.
- NodeProperties::RemoveFrameStateInput(node, 1);
-
// Compute flags for the call.
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
if (p.tail_call_mode() == TailCallMode::kAllow) {
@@ -1564,7 +1785,7 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
Node* cache_type = NodeProperties::GetValueInput(node, 2);
Node* index = NodeProperties::GetValueInput(node, 3);
Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -1579,8 +1800,8 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
receiver, effect, control);
// Check if the expected map still matches that of the {receiver}.
- Node* check0 = graph()->NewNode(simplified()->ReferenceEqual(Type::Any()),
- receiver_map, cache_type);
+ Node* check0 = graph()->NewNode(simplified()->ReferenceEqual(), receiver_map,
+ cache_type);
Node* branch0 =
graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
@@ -1600,9 +1821,13 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
{
// Filter the {key} to check if it's still a valid property of the
// {receiver} (does the ToName conversion implicitly).
+ Callable const callable = CodeFactory::ForInFilter(isolate());
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState);
vfalse0 = efalse0 = graph()->NewNode(
- javascript()->CallRuntime(Runtime::kForInFilter), receiver, key,
- context, frame_state, effect, if_false0);
+ common()->Call(desc), jsgraph()->HeapConstant(callable.code()), key,
+ receiver, context, frame_state, effect, if_false0);
if_false0 = graph()->NewNode(common()->IfSuccess(), vfalse0);
}
@@ -1626,6 +1851,84 @@ Reduction JSTypedLowering::ReduceJSForInStep(Node* node) {
return Changed(node);
}
+Reduction JSTypedLowering::ReduceJSGeneratorStore(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSGeneratorStore, node->opcode());
+ Node* generator = NodeProperties::GetValueInput(node, 0);
+ Node* continuation = NodeProperties::GetValueInput(node, 1);
+ Node* offset = NodeProperties::GetValueInput(node, 2);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ int register_count = OpParameter<int>(node);
+
+ FieldAccess array_field = AccessBuilder::ForJSGeneratorObjectOperandStack();
+ FieldAccess context_field = AccessBuilder::ForJSGeneratorObjectContext();
+ FieldAccess continuation_field =
+ AccessBuilder::ForJSGeneratorObjectContinuation();
+ FieldAccess input_or_debug_pos_field =
+ AccessBuilder::ForJSGeneratorObjectInputOrDebugPos();
+
+ Node* array = effect = graph()->NewNode(simplified()->LoadField(array_field),
+ generator, effect, control);
+
+ for (int i = 0; i < register_count; ++i) {
+ Node* value = NodeProperties::GetValueInput(node, 3 + i);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForFixedArraySlot(i)), array,
+ value, effect, control);
+ }
+
+ effect = graph()->NewNode(simplified()->StoreField(context_field), generator,
+ context, effect, control);
+ effect = graph()->NewNode(simplified()->StoreField(continuation_field),
+ generator, continuation, effect, control);
+ effect = graph()->NewNode(simplified()->StoreField(input_or_debug_pos_field),
+ generator, offset, effect, control);
+
+ ReplaceWithValue(node, effect, effect, control);
+ return Changed(effect);
+}
+
+Reduction JSTypedLowering::ReduceJSGeneratorRestoreContinuation(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSGeneratorRestoreContinuation, node->opcode());
+ Node* generator = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ FieldAccess continuation_field =
+ AccessBuilder::ForJSGeneratorObjectContinuation();
+
+ Node* continuation = effect = graph()->NewNode(
+ simplified()->LoadField(continuation_field), generator, effect, control);
+ Node* executing = jsgraph()->Constant(JSGeneratorObject::kGeneratorExecuting);
+ effect = graph()->NewNode(simplified()->StoreField(continuation_field),
+ generator, executing, effect, control);
+
+ ReplaceWithValue(node, continuation, effect, control);
+ return Changed(continuation);
+}
+
+Reduction JSTypedLowering::ReduceJSGeneratorRestoreRegister(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSGeneratorRestoreRegister, node->opcode());
+ Node* generator = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ int index = OpParameter<int>(node);
+
+ FieldAccess array_field = AccessBuilder::ForJSGeneratorObjectOperandStack();
+ FieldAccess element_field = AccessBuilder::ForFixedArraySlot(index);
+
+ Node* array = effect = graph()->NewNode(simplified()->LoadField(array_field),
+ generator, effect, control);
+ Node* element = effect = graph()->NewNode(
+ simplified()->LoadField(element_field), array, effect, control);
+ Node* stale = jsgraph()->StaleRegisterConstant();
+ effect = graph()->NewNode(simplified()->StoreField(element_field), array,
+ stale, effect, control);
+
+ ReplaceWithValue(node, element, effect, control);
+ return Changed(element);
+}
Reduction JSTypedLowering::ReduceSelect(Node* node) {
DCHECK_EQ(IrOpcode::kSelect, node->opcode());
@@ -1656,37 +1959,143 @@ Reduction JSTypedLowering::ReduceSelect(Node* node) {
return NoChange();
}
+namespace {
+
+MaybeHandle<Map> GetStableMapFromObjectType(Type* object_type) {
+ if (object_type->IsConstant() &&
+ object_type->AsConstant()->Value()->IsHeapObject()) {
+ Handle<Map> object_map(
+ Handle<HeapObject>::cast(object_type->AsConstant()->Value())->map());
+ if (object_map->is_stable()) return object_map;
+ } else if (object_type->IsClass()) {
+ Handle<Map> object_map = object_type->AsClass()->Map();
+ if (object_map->is_stable()) return object_map;
+ }
+ return MaybeHandle<Map>();
+}
+
+} // namespace
+
+Reduction JSTypedLowering::ReduceCheckMaps(Node* node) {
+ // TODO(bmeurer): Find a better home for this thing!
+ // The CheckMaps(o, ...map...) can be eliminated if map is stable and
+ // either
+ // (a) o has type Constant(object) and map == object->map, or
+ // (b) o has type Class(map),
+ // and either
+ // (1) map cannot transition further, or
+ // (2) we can add a code dependency on the stability of map
+ // (to guard the Constant type information).
+ Node* const object = NodeProperties::GetValueInput(node, 0);
+ Type* const object_type = NodeProperties::GetType(object);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ Handle<Map> object_map;
+ if (GetStableMapFromObjectType(object_type).ToHandle(&object_map)) {
+ for (int i = 1; i < node->op()->ValueInputCount(); ++i) {
+ Node* const map = NodeProperties::GetValueInput(node, i);
+ Type* const map_type = NodeProperties::GetType(map);
+ if (map_type->IsConstant() &&
+ map_type->AsConstant()->Value().is_identical_to(object_map)) {
+ if (object_map->CanTransition()) {
+ DCHECK(flags() & kDeoptimizationEnabled);
+ dependencies()->AssumeMapStable(object_map);
+ }
+ return Replace(effect);
+ }
+ }
+ }
+ return NoChange();
+}
+
+Reduction JSTypedLowering::ReduceCheckString(Node* node) {
+ // TODO(bmeurer): Find a better home for this thing!
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ Type* const input_type = NodeProperties::GetType(input);
+ if (input_type->Is(Type::String())) {
+ ReplaceWithValue(node, input);
+ return Replace(input);
+ }
+ return NoChange();
+}
+
+Reduction JSTypedLowering::ReduceLoadField(Node* node) {
+ // TODO(bmeurer): Find a better home for this thing!
+ Node* const object = NodeProperties::GetValueInput(node, 0);
+ Type* const object_type = NodeProperties::GetType(object);
+ FieldAccess const& access = FieldAccessOf(node->op());
+ if (access.base_is_tagged == kTaggedBase &&
+ access.offset == HeapObject::kMapOffset) {
+ // We can replace LoadField[Map](o) with map if is stable and either
+ // (a) o has type Constant(object) and map == object->map, or
+ // (b) o has type Class(map),
+ // and either
+ // (1) map cannot transition further, or
+ // (2) deoptimization is enabled and we can add a code dependency on the
+ // stability of map (to guard the Constant type information).
+ Handle<Map> object_map;
+ if (GetStableMapFromObjectType(object_type).ToHandle(&object_map)) {
+ if (object_map->CanTransition()) {
+ if (flags() & kDeoptimizationEnabled) {
+ dependencies()->AssumeMapStable(object_map);
+ } else {
+ return NoChange();
+ }
+ }
+ Node* const value = jsgraph()->HeapConstant(object_map);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ }
+ return NoChange();
+}
+
+Reduction JSTypedLowering::ReduceNumberRoundop(Node* node) {
+ // TODO(bmeurer): Find a better home for this thing!
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ Type* const input_type = NodeProperties::GetType(input);
+ if (input_type->Is(type_cache_.kIntegerOrMinusZeroOrNaN)) {
+ return Replace(input);
+ }
+ return NoChange();
+}
Reduction JSTypedLowering::Reduce(Node* node) {
// Check if the output type is a singleton. In that case we already know the
// result value and can simply replace the node if it's eliminable.
if (!NodeProperties::IsConstant(node) && NodeProperties::IsTyped(node) &&
node->op()->HasProperty(Operator::kEliminatable)) {
+ // We can only constant-fold nodes here, that are known to not cause any
+ // side-effect, may it be a JavaScript observable side-effect or a possible
+ // eager deoptimization exit (i.e. {node} has an operator that doesn't have
+ // the Operator::kNoDeopt property).
Type* upper = NodeProperties::GetType(node);
- if (upper->IsConstant()) {
- Node* replacement = jsgraph()->Constant(upper->AsConstant()->Value());
- ReplaceWithValue(node, replacement);
- return Changed(replacement);
- } else if (upper->Is(Type::MinusZero())) {
- Node* replacement = jsgraph()->Constant(factory()->minus_zero_value());
- ReplaceWithValue(node, replacement);
- return Changed(replacement);
- } else if (upper->Is(Type::NaN())) {
- Node* replacement = jsgraph()->NaNConstant();
- ReplaceWithValue(node, replacement);
- return Changed(replacement);
- } else if (upper->Is(Type::Null())) {
- Node* replacement = jsgraph()->NullConstant();
- ReplaceWithValue(node, replacement);
- return Changed(replacement);
- } else if (upper->Is(Type::PlainNumber()) && upper->Min() == upper->Max()) {
- Node* replacement = jsgraph()->Constant(upper->Min());
- ReplaceWithValue(node, replacement);
- return Changed(replacement);
- } else if (upper->Is(Type::Undefined())) {
- Node* replacement = jsgraph()->UndefinedConstant();
- ReplaceWithValue(node, replacement);
- return Changed(replacement);
+ if (upper->IsInhabited()) {
+ if (upper->IsConstant()) {
+ Node* replacement = jsgraph()->Constant(upper->AsConstant()->Value());
+ ReplaceWithValue(node, replacement);
+ return Changed(replacement);
+ } else if (upper->Is(Type::MinusZero())) {
+ Node* replacement = jsgraph()->Constant(factory()->minus_zero_value());
+ ReplaceWithValue(node, replacement);
+ return Changed(replacement);
+ } else if (upper->Is(Type::NaN())) {
+ Node* replacement = jsgraph()->NaNConstant();
+ ReplaceWithValue(node, replacement);
+ return Changed(replacement);
+ } else if (upper->Is(Type::Null())) {
+ Node* replacement = jsgraph()->NullConstant();
+ ReplaceWithValue(node, replacement);
+ return Changed(replacement);
+ } else if (upper->Is(Type::PlainNumber()) &&
+ upper->Min() == upper->Max()) {
+ Node* replacement = jsgraph()->Constant(upper->Min());
+ ReplaceWithValue(node, replacement);
+ return Changed(replacement);
+ } else if (upper->Is(Type::Undefined())) {
+ Node* replacement = jsgraph()->UndefinedConstant();
+ ReplaceWithValue(node, replacement);
+ return Changed(replacement);
+ }
}
}
switch (node->opcode()) {
@@ -1704,28 +2113,21 @@ Reduction JSTypedLowering::Reduce(Node* node) {
case IrOpcode::kJSGreaterThanOrEqual:
return ReduceJSComparison(node);
case IrOpcode::kJSBitwiseOr:
- return ReduceInt32Binop(node, simplified()->NumberBitwiseOr());
case IrOpcode::kJSBitwiseXor:
- return ReduceInt32Binop(node, simplified()->NumberBitwiseXor());
case IrOpcode::kJSBitwiseAnd:
- return ReduceInt32Binop(node, simplified()->NumberBitwiseAnd());
+ return ReduceInt32Binop(node);
case IrOpcode::kJSShiftLeft:
- return ReduceUI32Shift(node, kSigned, simplified()->NumberShiftLeft());
case IrOpcode::kJSShiftRight:
- return ReduceUI32Shift(node, kSigned, simplified()->NumberShiftRight());
+ return ReduceUI32Shift(node, kSigned);
case IrOpcode::kJSShiftRightLogical:
- return ReduceUI32Shift(node, kUnsigned,
- simplified()->NumberShiftRightLogical());
+ return ReduceUI32Shift(node, kUnsigned);
case IrOpcode::kJSAdd:
return ReduceJSAdd(node);
case IrOpcode::kJSSubtract:
- return ReduceNumberBinop(node, simplified()->NumberSubtract());
case IrOpcode::kJSMultiply:
- return ReduceNumberBinop(node, simplified()->NumberMultiply());
case IrOpcode::kJSDivide:
- return ReduceNumberBinop(node, simplified()->NumberDivide());
case IrOpcode::kJSModulus:
- return ReduceJSModulus(node);
+ return ReduceNumberBinop(node);
case IrOpcode::kJSToBoolean:
return ReduceJSToBoolean(node);
case IrOpcode::kJSToInteger:
@@ -1762,8 +2164,25 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSForInNext(node);
case IrOpcode::kJSForInStep:
return ReduceJSForInStep(node);
+ case IrOpcode::kJSGeneratorStore:
+ return ReduceJSGeneratorStore(node);
+ case IrOpcode::kJSGeneratorRestoreContinuation:
+ return ReduceJSGeneratorRestoreContinuation(node);
+ case IrOpcode::kJSGeneratorRestoreRegister:
+ return ReduceJSGeneratorRestoreRegister(node);
case IrOpcode::kSelect:
return ReduceSelect(node);
+ case IrOpcode::kCheckMaps:
+ return ReduceCheckMaps(node);
+ case IrOpcode::kCheckString:
+ return ReduceCheckString(node);
+ case IrOpcode::kNumberCeil:
+ case IrOpcode::kNumberFloor:
+ case IrOpcode::kNumberRound:
+ case IrOpcode::kNumberTrunc:
+ return ReduceNumberRoundop(node);
+ case IrOpcode::kLoadField:
+ return ReduceLoadField(node);
default:
break;
}
@@ -1771,13 +2190,6 @@ Reduction JSTypedLowering::Reduce(Node* node) {
}
-Node* JSTypedLowering::Word32Shl(Node* const lhs, int32_t const rhs) {
- if (rhs == 0) return lhs;
- return graph()->NewNode(machine()->Word32Shl(), lhs,
- jsgraph()->Int32Constant(rhs));
-}
-
-
Factory* JSTypedLowering::factory() const { return jsgraph()->factory(); }
@@ -1796,17 +2208,15 @@ CommonOperatorBuilder* JSTypedLowering::common() const {
return jsgraph()->common();
}
+MachineOperatorBuilder* JSTypedLowering::machine() const {
+ return jsgraph()->machine();
+}
SimplifiedOperatorBuilder* JSTypedLowering::simplified() const {
return jsgraph()->simplified();
}
-MachineOperatorBuilder* JSTypedLowering::machine() const {
- return jsgraph()->machine();
-}
-
-
CompilationDependencies* JSTypedLowering::dependencies() const {
return dependencies_;
}
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index 151787106b..35c397fb88 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -35,7 +35,6 @@ class JSTypedLowering final : public AdvancedReducer {
enum Flag {
kNoFlags = 0u,
kDeoptimizationEnabled = 1u << 0,
- kDisableBinaryOpReduction = 1u << 1,
};
typedef base::Flags<Flag> Flags;
@@ -49,9 +48,6 @@ class JSTypedLowering final : public AdvancedReducer {
friend class JSBinopReduction;
Reduction ReduceJSAdd(Node* node);
- Reduction ReduceJSModulus(Node* node);
- Reduction ReduceJSBitwiseOr(Node* node);
- Reduction ReduceJSMultiply(Node* node);
Reduction ReduceJSComparison(Node* node);
Reduction ReduceJSLoadNamed(Node* node);
Reduction ReduceJSLoadProperty(Node* node);
@@ -59,6 +55,7 @@ class JSTypedLowering final : public AdvancedReducer {
Reduction ReduceJSInstanceOf(Node* node);
Reduction ReduceJSLoadContext(Node* node);
Reduction ReduceJSStoreContext(Node* node);
+ Reduction ReduceJSEqualTypeOf(Node* node, bool invert);
Reduction ReduceJSEqual(Node* node, bool invert);
Reduction ReduceJSStrictEqual(Node* node, bool invert);
Reduction ReduceJSToBoolean(Node* node);
@@ -75,13 +72,17 @@ class JSTypedLowering final : public AdvancedReducer {
Reduction ReduceJSForInDone(Node* node);
Reduction ReduceJSForInNext(Node* node);
Reduction ReduceJSForInStep(Node* node);
+ Reduction ReduceJSGeneratorStore(Node* node);
+ Reduction ReduceJSGeneratorRestoreContinuation(Node* node);
+ Reduction ReduceJSGeneratorRestoreRegister(Node* node);
+ Reduction ReduceCheckMaps(Node* node);
+ Reduction ReduceCheckString(Node* node);
+ Reduction ReduceLoadField(Node* node);
+ Reduction ReduceNumberRoundop(Node* node);
Reduction ReduceSelect(Node* node);
- Reduction ReduceNumberBinop(Node* node, const Operator* numberOp);
- Reduction ReduceInt32Binop(Node* node, const Operator* intOp);
- Reduction ReduceUI32Shift(Node* node, Signedness left_signedness,
- const Operator* shift_op);
-
- Node* Word32Shl(Node* const lhs, int32_t const rhs);
+ Reduction ReduceNumberBinop(Node* node);
+ Reduction ReduceInt32Binop(Node* node);
+ Reduction ReduceUI32Shift(Node* node, Signedness signedness);
Factory* factory() const;
Graph* graph() const;
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 105bd353fc..e4df58d0f7 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -2,12 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/linkage.h"
+
#include "src/ast/scopes.h"
+#include "src/builtins/builtins-utils.h"
#include "src/code-stubs.h"
#include "src/compiler.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/frame.h"
-#include "src/compiler/linkage.h"
#include "src/compiler/node.h"
#include "src/compiler/osr.h"
#include "src/compiler/pipeline.h"
@@ -17,10 +19,10 @@ namespace internal {
namespace compiler {
namespace {
-LinkageLocation regloc(Register reg) {
- return LinkageLocation::ForRegister(reg.code());
-}
+LinkageLocation regloc(Register reg, MachineType type) {
+ return LinkageLocation::ForRegister(reg.code(), type);
+}
MachineType reptyp(Representation representation) {
switch (representation.kind()) {
@@ -49,6 +51,7 @@ MachineType reptyp(Representation representation) {
UNREACHABLE();
return MachineType::None();
}
+
} // namespace
@@ -75,6 +78,20 @@ std::ostream& operator<<(std::ostream& os, const CallDescriptor& d) {
<< d.FrameStateCount() << "t" << d.SupportsTailCalls();
}
+MachineSignature* CallDescriptor::GetMachineSignature(Zone* zone) const {
+ size_t param_count = ParameterCount();
+ size_t return_count = ReturnCount();
+ MachineType* types = reinterpret_cast<MachineType*>(
+ zone->New(sizeof(MachineType*) * (param_count + return_count)));
+ int current = 0;
+ for (size_t i = 0; i < return_count; ++i) {
+ types[current++] = GetReturnType(i);
+ }
+ for (size_t i = 0; i < param_count; ++i) {
+ types[current++] = GetParameterType(i);
+ }
+ return new (zone) MachineSignature(return_count, param_count, types);
+}
bool CallDescriptor::HasSameReturnLocationsAs(
const CallDescriptor* other) const {
@@ -85,46 +102,42 @@ bool CallDescriptor::HasSameReturnLocationsAs(
return true;
}
-
-bool CallDescriptor::CanTailCall(const Node* node,
- int* stack_param_delta) const {
- CallDescriptor const* other = OpParameter<CallDescriptor const*>(node);
- size_t current_input = 0;
- size_t other_input = 0;
- *stack_param_delta = 0;
- bool more_other = true;
- bool more_this = true;
- while (more_other || more_this) {
- if (other_input < other->InputCount()) {
- if (!other->GetInputLocation(other_input).IsRegister()) {
- (*stack_param_delta)--;
+int CallDescriptor::GetStackParameterDelta(
+ CallDescriptor const* tail_caller) const {
+ int callee_slots_above_sp = 0;
+ for (size_t i = 0; i < InputCount(); ++i) {
+ LinkageLocation operand = GetInputLocation(i);
+ if (!operand.IsRegister()) {
+ int new_candidate =
+ -operand.GetLocation() + operand.GetSizeInPointers() - 1;
+ if (new_candidate > callee_slots_above_sp) {
+ callee_slots_above_sp = new_candidate;
}
- } else {
- more_other = false;
}
- if (current_input < InputCount()) {
- if (!GetInputLocation(current_input).IsRegister()) {
- (*stack_param_delta)++;
+ }
+ int tail_caller_slots_above_sp = 0;
+ if (tail_caller != nullptr) {
+ for (size_t i = 0; i < tail_caller->InputCount(); ++i) {
+ LinkageLocation operand = tail_caller->GetInputLocation(i);
+ if (!operand.IsRegister()) {
+ int new_candidate =
+ -operand.GetLocation() + operand.GetSizeInPointers() - 1;
+ if (new_candidate > tail_caller_slots_above_sp) {
+ tail_caller_slots_above_sp = new_candidate;
+ }
}
- } else {
- more_this = false;
}
- ++current_input;
- ++other_input;
}
- return HasSameReturnLocationsAs(OpParameter<CallDescriptor const*>(node));
+ return callee_slots_above_sp - tail_caller_slots_above_sp;
+}
+
+bool CallDescriptor::CanTailCall(const Node* node) const {
+ return HasSameReturnLocationsAs(CallDescriptorOf(node->op()));
}
CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) {
DCHECK(!info->IsStub());
- if (info->has_literal()) {
- // If we already have the function literal, use the number of parameters
- // plus the receiver.
- return GetJSCallDescriptor(zone, info->is_osr(),
- 1 + info->literal()->parameter_count(),
- CallDescriptor::kNoFlags);
- }
if (!info->closure().is_null()) {
// If we are compiling a JS function, use a JS call descriptor,
// plus the receiver.
@@ -138,19 +151,19 @@ CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) {
// static
-int Linkage::FrameStateInputCount(Runtime::FunctionId function) {
+bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
// Most runtime functions need a FrameState. A few chosen ones that we know
// not to call into arbitrary JavaScript, not to throw, and not to deoptimize
// are blacklisted here and can be called without a FrameState.
switch (function) {
+ case Runtime::kAbort:
case Runtime::kAllocateInTargetSpace:
case Runtime::kCreateIterResultObject:
- case Runtime::kDefineDataPropertyInLiteral:
case Runtime::kDefineGetterPropertyUnchecked: // TODO(jarin): Is it safe?
case Runtime::kDefineSetterPropertyUnchecked: // TODO(jarin): Is it safe?
- case Runtime::kFinalizeClassDefinition: // TODO(conradw): Is it safe?
case Runtime::kForInDone:
case Runtime::kForInStep:
+ case Runtime::kGeneratorGetContinuation:
case Runtime::kGetSuperConstructor:
case Runtime::kIsFunction:
case Runtime::kNewClosure:
@@ -166,28 +179,24 @@ int Linkage::FrameStateInputCount(Runtime::FunctionId function) {
case Runtime::kStringLessThanOrEqual:
case Runtime::kStringGreaterThan:
case Runtime::kStringGreaterThanOrEqual:
+ case Runtime::kToFastProperties: // TODO(conradw): Is it safe?
case Runtime::kTraceEnter:
case Runtime::kTraceExit:
- return 0;
+ return false;
+ case Runtime::kInlineCall:
+ case Runtime::kInlineDeoptimizeNow:
case Runtime::kInlineGetPrototype:
case Runtime::kInlineNewObject:
case Runtime::kInlineRegExpConstructResult:
case Runtime::kInlineRegExpExec:
case Runtime::kInlineSubString:
+ case Runtime::kInlineThrowNotDateError:
case Runtime::kInlineToInteger:
case Runtime::kInlineToLength:
- case Runtime::kInlineToName:
case Runtime::kInlineToNumber:
case Runtime::kInlineToObject:
- case Runtime::kInlineToPrimitive:
- case Runtime::kInlineToPrimitive_Number:
- case Runtime::kInlineToPrimitive_String:
case Runtime::kInlineToString:
- return 1;
- case Runtime::kInlineCall:
- case Runtime::kInlineDeoptimizeNow:
- case Runtime::kInlineThrowNotDateError:
- return 2;
+ return true;
default:
break;
}
@@ -195,9 +204,9 @@ int Linkage::FrameStateInputCount(Runtime::FunctionId function) {
// Most inlined runtime functions (except the ones listed above) can be called
// without a FrameState or will be lowered by JSIntrinsicLowering internally.
const Runtime::Function* const f = Runtime::FunctionForId(function);
- if (f->intrinsic_type == Runtime::IntrinsicType::INLINE) return 0;
+ if (f->intrinsic_type == Runtime::IntrinsicType::INLINE) return false;
- return 1;
+ return true;
}
@@ -215,6 +224,23 @@ bool CallDescriptor::UsesOnlyRegisters() const {
CallDescriptor* Linkage::GetRuntimeCallDescriptor(
Zone* zone, Runtime::FunctionId function_id, int js_parameter_count,
Operator::Properties properties, CallDescriptor::Flags flags) {
+ const Runtime::Function* function = Runtime::FunctionForId(function_id);
+ const int return_count = function->result_size;
+ const char* debug_name = function->name;
+
+ if (!Linkage::NeedsFrameStateInput(function_id)) {
+ flags = static_cast<CallDescriptor::Flags>(
+ flags & ~CallDescriptor::kNeedsFrameState);
+ }
+
+ return GetCEntryStubCallDescriptor(zone, return_count, js_parameter_count,
+ debug_name, properties, flags);
+}
+
+CallDescriptor* Linkage::GetCEntryStubCallDescriptor(
+ Zone* zone, int return_count, int js_parameter_count,
+ const char* debug_name, Operator::Properties properties,
+ CallDescriptor::Flags flags) {
const size_t function_count = 1;
const size_t num_args_count = 1;
const size_t context_count = 1;
@@ -222,67 +248,53 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
static_cast<size_t>(js_parameter_count) +
num_args_count + context_count;
- const Runtime::Function* function = Runtime::FunctionForId(function_id);
- const size_t return_count = static_cast<size_t>(function->result_size);
-
- LocationSignature::Builder locations(zone, return_count, parameter_count);
- MachineSignature::Builder types(zone, return_count, parameter_count);
+ LocationSignature::Builder locations(zone, static_cast<size_t>(return_count),
+ static_cast<size_t>(parameter_count));
// Add returns.
if (locations.return_count_ > 0) {
- locations.AddReturn(regloc(kReturnRegister0));
+ locations.AddReturn(regloc(kReturnRegister0, MachineType::AnyTagged()));
}
if (locations.return_count_ > 1) {
- locations.AddReturn(regloc(kReturnRegister1));
+ locations.AddReturn(regloc(kReturnRegister1, MachineType::AnyTagged()));
}
if (locations.return_count_ > 2) {
- locations.AddReturn(regloc(kReturnRegister2));
- }
- for (size_t i = 0; i < return_count; i++) {
- types.AddReturn(MachineType::AnyTagged());
+ locations.AddReturn(regloc(kReturnRegister2, MachineType::AnyTagged()));
}
// All parameters to the runtime call go on the stack.
for (int i = 0; i < js_parameter_count; i++) {
- locations.AddParam(
- LinkageLocation::ForCallerFrameSlot(i - js_parameter_count));
- types.AddParam(MachineType::AnyTagged());
+ locations.AddParam(LinkageLocation::ForCallerFrameSlot(
+ i - js_parameter_count, MachineType::AnyTagged()));
}
// Add runtime function itself.
- locations.AddParam(regloc(kRuntimeCallFunctionRegister));
- types.AddParam(MachineType::AnyTagged());
+ locations.AddParam(
+ regloc(kRuntimeCallFunctionRegister, MachineType::Pointer()));
// Add runtime call argument count.
- locations.AddParam(regloc(kRuntimeCallArgCountRegister));
- types.AddParam(MachineType::Pointer());
+ locations.AddParam(
+ regloc(kRuntimeCallArgCountRegister, MachineType::Int32()));
// Add context.
- locations.AddParam(regloc(kContextRegister));
- types.AddParam(MachineType::AnyTagged());
-
- if (Linkage::FrameStateInputCount(function_id) == 0) {
- flags = static_cast<CallDescriptor::Flags>(
- flags & ~CallDescriptor::kNeedsFrameState);
- }
+ locations.AddParam(regloc(kContextRegister, MachineType::AnyTagged()));
// The target for runtime calls is a code object.
MachineType target_type = MachineType::AnyTagged();
- LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+ LinkageLocation target_loc =
+ LinkageLocation::ForAnyRegister(MachineType::AnyTagged());
return new (zone) CallDescriptor( // --
CallDescriptor::kCallCodeObject, // kind
target_type, // target MachineType
target_loc, // target location
- types.Build(), // machine_sig
locations.Build(), // location_sig
js_parameter_count, // stack_parameter_count
properties, // properties
kNoCalleeSaved, // callee-saved
kNoCalleeSaved, // callee-saved fp
flags, // flags
- function->name); // debug name
+ debug_name); // debug name
}
-
CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
int js_parameter_count,
CallDescriptor::Flags flags) {
@@ -294,43 +306,39 @@ CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
js_parameter_count + new_target_count + num_args_count + context_count;
LocationSignature::Builder locations(zone, return_count, parameter_count);
- MachineSignature::Builder types(zone, return_count, parameter_count);
// All JS calls have exactly one return value.
- locations.AddReturn(regloc(kReturnRegister0));
- types.AddReturn(MachineType::AnyTagged());
+ locations.AddReturn(regloc(kReturnRegister0, MachineType::AnyTagged()));
// All parameters to JS calls go on the stack.
for (int i = 0; i < js_parameter_count; i++) {
int spill_slot_index = i - js_parameter_count;
- locations.AddParam(LinkageLocation::ForCallerFrameSlot(spill_slot_index));
- types.AddParam(MachineType::AnyTagged());
+ locations.AddParam(LinkageLocation::ForCallerFrameSlot(
+ spill_slot_index, MachineType::AnyTagged()));
}
// Add JavaScript call new target value.
- locations.AddParam(regloc(kJavaScriptCallNewTargetRegister));
- types.AddParam(MachineType::AnyTagged());
+ locations.AddParam(
+ regloc(kJavaScriptCallNewTargetRegister, MachineType::AnyTagged()));
// Add JavaScript call argument count.
- locations.AddParam(regloc(kJavaScriptCallArgCountRegister));
- types.AddParam(MachineType::Int32());
+ locations.AddParam(
+ regloc(kJavaScriptCallArgCountRegister, MachineType::Int32()));
// Add context.
- locations.AddParam(regloc(kContextRegister));
- types.AddParam(MachineType::AnyTagged());
+ locations.AddParam(regloc(kContextRegister, MachineType::AnyTagged()));
// The target for JS function calls is the JSFunction object.
MachineType target_type = MachineType::AnyTagged();
// When entering into an OSR function from unoptimized code the JSFunction
// is not in a register, but it is on the stack in the marker spill slot.
- LinkageLocation target_loc = is_osr
- ? LinkageLocation::ForSavedCallerFunction()
- : regloc(kJSFunctionRegister);
+ LinkageLocation target_loc =
+ is_osr ? LinkageLocation::ForSavedCallerFunction()
+ : regloc(kJSFunctionRegister, MachineType::AnyTagged());
return new (zone) CallDescriptor( // --
CallDescriptor::kCallJSFunction, // kind
target_type, // target MachineType
target_loc, // target location
- types.Build(), // machine_sig
locations.Build(), // location_sig
js_parameter_count, // stack_parameter_count
Operator::kNoProperties, // properties
@@ -357,20 +365,16 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
static_cast<size_t>(js_parameter_count + context_count);
LocationSignature::Builder locations(zone, return_count, parameter_count);
- MachineSignature::Builder types(zone, return_count, parameter_count);
// Add returns.
if (locations.return_count_ > 0) {
- locations.AddReturn(regloc(kReturnRegister0));
+ locations.AddReturn(regloc(kReturnRegister0, return_type));
}
if (locations.return_count_ > 1) {
- locations.AddReturn(regloc(kReturnRegister1));
+ locations.AddReturn(regloc(kReturnRegister1, return_type));
}
if (locations.return_count_ > 2) {
- locations.AddReturn(regloc(kReturnRegister2));
- }
- for (size_t i = 0; i < return_count; i++) {
- types.AddReturn(return_type);
+ locations.AddReturn(regloc(kReturnRegister2, return_type));
}
// Add parameters in registers and on the stack.
@@ -378,29 +382,27 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
if (i < register_parameter_count) {
// The first parameters go in registers.
Register reg = descriptor.GetRegisterParameter(i);
- Representation rep =
- RepresentationFromType(descriptor.GetParameterType(i));
- locations.AddParam(regloc(reg));
- types.AddParam(reptyp(rep));
+ MachineType type =
+ reptyp(RepresentationFromType(descriptor.GetParameterType(i)));
+ locations.AddParam(regloc(reg, type));
} else {
// The rest of the parameters go on the stack.
int stack_slot = i - register_parameter_count - stack_parameter_count;
- locations.AddParam(LinkageLocation::ForCallerFrameSlot(stack_slot));
- types.AddParam(MachineType::AnyTagged());
+ locations.AddParam(LinkageLocation::ForCallerFrameSlot(
+ stack_slot, MachineType::AnyTagged()));
}
}
// Add context.
- locations.AddParam(regloc(kContextRegister));
- types.AddParam(MachineType::AnyTagged());
+ locations.AddParam(regloc(kContextRegister, MachineType::AnyTagged()));
// The target for stub calls is a code object.
MachineType target_type = MachineType::AnyTagged();
- LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+ LinkageLocation target_loc =
+ LinkageLocation::ForAnyRegister(MachineType::AnyTagged());
return new (zone) CallDescriptor( // --
CallDescriptor::kCallCodeObject, // kind
target_type, // target MachineType
target_loc, // target location
- types.Build(), // machine_sig
locations.Build(), // location_sig
stack_parameter_count, // stack_parameter_count
properties, // properties
@@ -411,6 +413,72 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
descriptor.DebugName(isolate));
}
+// static
+CallDescriptor* Linkage::GetAllocateCallDescriptor(Zone* zone) {
+ LocationSignature::Builder locations(zone, 1, 1);
+
+ locations.AddParam(regloc(kAllocateSizeRegister, MachineType::Int32()));
+
+ locations.AddReturn(regloc(kReturnRegister0, MachineType::AnyTagged()));
+
+ // The target for allocate calls is a code object.
+ MachineType target_type = MachineType::AnyTagged();
+ LinkageLocation target_loc =
+ LinkageLocation::ForAnyRegister(MachineType::AnyTagged());
+ return new (zone) CallDescriptor( // --
+ CallDescriptor::kCallCodeObject, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ locations.Build(), // location_sig
+ 0, // stack_parameter_count
+ Operator::kNoThrow, // properties
+ kNoCalleeSaved, // callee-saved registers
+ kNoCalleeSaved, // callee-saved fp
+ CallDescriptor::kCanUseRoots, // flags
+ "Allocate");
+}
+
+// static
+CallDescriptor* Linkage::GetBytecodeDispatchCallDescriptor(
+ Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
+ int stack_parameter_count) {
+ const int register_parameter_count = descriptor.GetRegisterParameterCount();
+ const int parameter_count = register_parameter_count + stack_parameter_count;
+
+ LocationSignature::Builder locations(zone, 0, parameter_count);
+
+ // Add parameters in registers and on the stack.
+ for (int i = 0; i < parameter_count; i++) {
+ if (i < register_parameter_count) {
+ // The first parameters go in registers.
+ Register reg = descriptor.GetRegisterParameter(i);
+ MachineType type =
+ reptyp(RepresentationFromType(descriptor.GetParameterType(i)));
+ locations.AddParam(regloc(reg, type));
+ } else {
+ // The rest of the parameters go on the stack.
+ int stack_slot = i - register_parameter_count - stack_parameter_count;
+ locations.AddParam(LinkageLocation::ForCallerFrameSlot(
+ stack_slot, MachineType::AnyTagged()));
+ }
+ }
+
+ // The target for interpreter dispatches is a code entry address.
+ MachineType target_type = MachineType::Pointer();
+ LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
+ return new (zone) CallDescriptor( // --
+ CallDescriptor::kCallAddress, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ locations.Build(), // location_sig
+ stack_parameter_count, // stack_parameter_count
+ Operator::kNoProperties, // properties
+ kNoCalleeSaved, // callee-saved registers
+ kNoCalleeSaved, // callee-saved fp
+ CallDescriptor::kCanUseRoots | // flags
+ CallDescriptor::kSupportsTailCalls, // flags
+ descriptor.DebugName(isolate));
+}
LinkageLocation Linkage::GetOsrValueLocation(int index) const {
CHECK(incoming_->IsJSFunctionCall());
@@ -427,7 +495,8 @@ LinkageLocation Linkage::GetOsrValueLocation(int index) const {
// Local variable stored in this (callee) stack.
int spill_index =
index - first_stack_slot + StandardFrameConstants::kFixedSlotCount;
- return LinkageLocation::ForCalleeFrameSlot(spill_index);
+ return LinkageLocation::ForCalleeFrameSlot(spill_index,
+ MachineType::AnyTagged());
} else {
// Parameter. Use the assigned location from the incoming call descriptor.
int parameter_index = 1 + index; // skip index 0, which is the target.
@@ -439,19 +508,21 @@ LinkageLocation Linkage::GetOsrValueLocation(int index) const {
bool Linkage::ParameterHasSecondaryLocation(int index) const {
if (!incoming_->IsJSFunctionCall()) return false;
LinkageLocation loc = GetParameterLocation(index);
- return (loc == regloc(kJSFunctionRegister) ||
- loc == regloc(kContextRegister));
+ return (loc == regloc(kJSFunctionRegister, MachineType::AnyTagged()) ||
+ loc == regloc(kContextRegister, MachineType::AnyTagged()));
}
LinkageLocation Linkage::GetParameterSecondaryLocation(int index) const {
DCHECK(ParameterHasSecondaryLocation(index));
LinkageLocation loc = GetParameterLocation(index);
- if (loc == regloc(kJSFunctionRegister)) {
- return LinkageLocation::ForCalleeFrameSlot(Frame::kJSFunctionSlot);
+ if (loc == regloc(kJSFunctionRegister, MachineType::AnyTagged())) {
+ return LinkageLocation::ForCalleeFrameSlot(Frame::kJSFunctionSlot,
+ MachineType::AnyTagged());
} else {
- DCHECK(loc == regloc(kContextRegister));
- return LinkageLocation::ForCalleeFrameSlot(Frame::kContextSlot);
+ DCHECK(loc == regloc(kContextRegister, MachineType::AnyTagged()));
+ return LinkageLocation::ForCalleeFrameSlot(Frame::kContextSlot,
+ MachineType::AnyTagged());
}
}
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index a0434f8aff..1c025081c4 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -37,56 +37,63 @@ class LinkageLocation {
return !(*this == other);
}
- static LinkageLocation ForAnyRegister() {
- return LinkageLocation(REGISTER, ANY_REGISTER);
+ static LinkageLocation ForAnyRegister(
+ MachineType type = MachineType::None()) {
+ return LinkageLocation(REGISTER, ANY_REGISTER, type);
}
- static LinkageLocation ForRegister(int32_t reg) {
+ static LinkageLocation ForRegister(int32_t reg,
+ MachineType type = MachineType::None()) {
DCHECK(reg >= 0);
- return LinkageLocation(REGISTER, reg);
+ return LinkageLocation(REGISTER, reg, type);
}
- static LinkageLocation ForCallerFrameSlot(int32_t slot) {
+ static LinkageLocation ForCallerFrameSlot(int32_t slot, MachineType type) {
DCHECK(slot < 0);
- return LinkageLocation(STACK_SLOT, slot);
+ return LinkageLocation(STACK_SLOT, slot, type);
}
- static LinkageLocation ForCalleeFrameSlot(int32_t slot) {
+ static LinkageLocation ForCalleeFrameSlot(int32_t slot, MachineType type) {
// TODO(titzer): bailout instead of crashing here.
DCHECK(slot >= 0 && slot < LinkageLocation::MAX_STACK_SLOT);
- return LinkageLocation(STACK_SLOT, slot);
+ return LinkageLocation(STACK_SLOT, slot, type);
}
static LinkageLocation ForSavedCallerReturnAddress() {
return ForCalleeFrameSlot((StandardFrameConstants::kCallerPCOffset -
StandardFrameConstants::kCallerPCOffset) /
- kPointerSize);
+ kPointerSize,
+ MachineType::Pointer());
}
static LinkageLocation ForSavedCallerFramePtr() {
return ForCalleeFrameSlot((StandardFrameConstants::kCallerPCOffset -
StandardFrameConstants::kCallerFPOffset) /
- kPointerSize);
+ kPointerSize,
+ MachineType::Pointer());
}
static LinkageLocation ForSavedCallerConstantPool() {
DCHECK(V8_EMBEDDED_CONSTANT_POOL);
return ForCalleeFrameSlot((StandardFrameConstants::kCallerPCOffset -
StandardFrameConstants::kConstantPoolOffset) /
- kPointerSize);
+ kPointerSize,
+ MachineType::AnyTagged());
}
static LinkageLocation ForSavedCallerFunction() {
return ForCalleeFrameSlot((StandardFrameConstants::kCallerPCOffset -
StandardFrameConstants::kFunctionOffset) /
- kPointerSize);
+ kPointerSize,
+ MachineType::AnyTagged());
}
static LinkageLocation ConvertToTailCallerLocation(
LinkageLocation caller_location, int stack_param_delta) {
if (!caller_location.IsRegister()) {
return LinkageLocation(STACK_SLOT,
- caller_location.GetLocation() - stack_param_delta);
+ caller_location.GetLocation() + stack_param_delta,
+ caller_location.GetType());
}
return caller_location;
}
@@ -103,9 +110,22 @@ class LinkageLocation {
static const int32_t ANY_REGISTER = -1;
static const int32_t MAX_STACK_SLOT = 32767;
- LinkageLocation(LocationType type, int32_t location) {
+ LinkageLocation(LocationType type, int32_t location,
+ MachineType machine_type) {
bit_field_ = TypeField::encode(type) |
((location << LocationField::kShift) & LocationField::kMask);
+ machine_type_ = machine_type;
+ }
+
+ MachineType GetType() const { return machine_type_; }
+
+ int GetSize() const {
+ return 1 << ElementSizeLog2Of(GetType().representation());
+ }
+
+ int GetSizeInPointers() const {
+ // Round up
+ return (GetSize() + kPointerSize - 1) / kPointerSize;
}
int32_t GetLocation() const {
@@ -134,6 +154,7 @@ class LinkageLocation {
}
int32_t bit_field_;
+ MachineType machine_type_;
};
typedef Signature<LinkageLocation> LocationSignature;
@@ -152,25 +173,22 @@ class CallDescriptor final : public ZoneObject {
enum Flag {
kNoFlags = 0u,
kNeedsFrameState = 1u << 0,
- kPatchableCallSite = 1u << 1,
- kNeedsNopAfterCall = 1u << 2,
- kHasExceptionHandler = 1u << 3,
- kHasLocalCatchHandler = 1u << 4,
- kSupportsTailCalls = 1u << 5,
- kCanUseRoots = 1u << 6,
+ kHasExceptionHandler = 1u << 1,
+ kSupportsTailCalls = 1u << 2,
+ kCanUseRoots = 1u << 3,
// (arm64 only) native stack should be used for arguments.
- kUseNativeStack = 1u << 7,
+ kUseNativeStack = 1u << 4,
// (arm64 only) call instruction has to restore JSSP or CSP.
- kRestoreJSSP = 1u << 8,
- kRestoreCSP = 1u << 9,
+ kRestoreJSSP = 1u << 5,
+ kRestoreCSP = 1u << 6,
// Causes the code generator to initialize the root register.
- kInitializeRootRegister = 1u << 10,
- kPatchableCallSiteWithNop = kPatchableCallSite | kNeedsNopAfterCall
+ kInitializeRootRegister = 1u << 7,
+ // Does not ever try to allocate space on our heap.
+ kNoAllocate = 1u << 8
};
typedef base::Flags<Flag> Flags;
CallDescriptor(Kind kind, MachineType target_type, LinkageLocation target_loc,
- const MachineSignature* machine_sig,
LocationSignature* location_sig, size_t stack_param_count,
Operator::Properties properties,
RegList callee_saved_registers,
@@ -179,7 +197,6 @@ class CallDescriptor final : public ZoneObject {
: kind_(kind),
target_type_(target_type),
target_loc_(target_loc),
- machine_sig_(machine_sig),
location_sig_(location_sig),
stack_param_count_(stack_param_count),
properties_(properties),
@@ -187,8 +204,6 @@ class CallDescriptor final : public ZoneObject {
callee_saved_fp_registers_(callee_saved_fp_registers),
flags_(flags),
debug_name_(debug_name) {
- DCHECK(machine_sig->return_count() == location_sig->return_count());
- DCHECK(machine_sig->parameter_count() == location_sig->parameter_count());
}
// Returns the kind of this call.
@@ -205,10 +220,10 @@ class CallDescriptor final : public ZoneObject {
}
// The number of return values from this call.
- size_t ReturnCount() const { return machine_sig_->return_count(); }
+ size_t ReturnCount() const { return location_sig_->return_count(); }
// The number of C parameters to this call.
- size_t CParameterCount() const { return machine_sig_->parameter_count(); }
+ size_t ParameterCount() const { return location_sig_->parameter_count(); }
// The number of stack parameters to the call.
size_t StackParameterCount() const { return stack_param_count_; }
@@ -222,7 +237,7 @@ class CallDescriptor final : public ZoneObject {
// The total number of inputs to this call, which includes the target,
// receiver, context, etc.
// TODO(titzer): this should input the framestate input too.
- size_t InputCount() const { return 1 + machine_sig_->parameter_count(); }
+ size_t InputCount() const { return 1 + location_sig_->parameter_count(); }
size_t FrameStateCount() const { return NeedsFrameState() ? 1 : 0; }
@@ -244,15 +259,19 @@ class CallDescriptor final : public ZoneObject {
return location_sig_->GetParam(index - 1);
}
- const MachineSignature* GetMachineSignature() const { return machine_sig_; }
+ MachineSignature* GetMachineSignature(Zone* zone) const;
MachineType GetReturnType(size_t index) const {
- return machine_sig_->GetReturn(index);
+ return location_sig_->GetReturn(index).GetType();
}
MachineType GetInputType(size_t index) const {
if (index == 0) return target_type_;
- return machine_sig_->GetParam(index - 1);
+ return location_sig_->GetParam(index - 1).GetType();
+ }
+
+ MachineType GetParameterType(size_t index) const {
+ return location_sig_->GetParam(index).GetType();
}
// Operator properties describe how this call can be optimized, if at all.
@@ -270,7 +289,9 @@ class CallDescriptor final : public ZoneObject {
bool HasSameReturnLocationsAs(const CallDescriptor* other) const;
- bool CanTailCall(const Node* call, int* stack_param_delta) const;
+ int GetStackParameterDelta(const CallDescriptor* tail_caller = nullptr) const;
+
+ bool CanTailCall(const Node* call) const;
private:
friend class Linkage;
@@ -278,7 +299,6 @@ class CallDescriptor final : public ZoneObject {
const Kind kind_;
const MachineType target_type_;
const LinkageLocation target_loc_;
- const MachineSignature* const machine_sig_;
const LocationSignature* const location_sig_;
const size_t stack_param_count_;
const Operator::Properties properties_;
@@ -304,10 +324,11 @@ std::ostream& operator<<(std::ostream& os, const CallDescriptor::Kind& k);
// representing the architecture-specific location. The following call node
// layouts are supported (where {n} is the number of value inputs):
//
-// #0 #1 #2 #3 [...] #n
-// Call[CodeStub] code, arg 1, arg 2, arg 3, [...], context
-// Call[JSFunction] function, rcvr, arg 1, arg 2, [...], new, #arg, context
-// Call[Runtime] CEntryStub, arg 1, arg 2, arg 3, [...], fun, #arg, context
+// #0 #1 #2 [...] #n
+// Call[CodeStub] code, arg 1, arg 2, [...], context
+// Call[JSFunction] function, rcvr, arg 1, [...], new, #arg, context
+// Call[Runtime] CEntryStub, arg 1, arg 2, [...], fun, #arg, context
+// Call[BytecodeDispatch] address, arg 1, arg 2, [...]
class Linkage : public ZoneObject {
public:
explicit Linkage(CallDescriptor* incoming) : incoming_(incoming) {}
@@ -322,9 +343,14 @@ class Linkage : public ZoneObject {
CallDescriptor::Flags flags);
static CallDescriptor* GetRuntimeCallDescriptor(
- Zone* zone, Runtime::FunctionId function, int parameter_count,
+ Zone* zone, Runtime::FunctionId function, int js_parameter_count,
Operator::Properties properties, CallDescriptor::Flags flags);
+ static CallDescriptor* GetCEntryStubCallDescriptor(
+ Zone* zone, int return_count, int js_parameter_count,
+ const char* debug_name, Operator::Properties properties,
+ CallDescriptor::Flags flags);
+
static CallDescriptor* GetStubCallDescriptor(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count, CallDescriptor::Flags flags,
@@ -332,6 +358,11 @@ class Linkage : public ZoneObject {
MachineType return_type = MachineType::AnyTagged(),
size_t return_count = 1);
+ static CallDescriptor* GetAllocateCallDescriptor(Zone* zone);
+ static CallDescriptor* GetBytecodeDispatchCallDescriptor(
+ Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
+ int stack_parameter_count);
+
// Creates a call descriptor for simplified C calls that is appropriate
// for the host platform. This simplified calling convention only supports
// integers and pointers of one word size each, i.e. no floating point,
@@ -363,7 +394,7 @@ class Linkage : public ZoneObject {
bool ParameterHasSecondaryLocation(int index) const;
LinkageLocation GetParameterSecondaryLocation(int index) const;
- static int FrameStateInputCount(Runtime::FunctionId function);
+ static bool NeedsFrameStateInput(Runtime::FunctionId function);
// Get the location where an incoming OSR value is stored.
LinkageLocation GetOsrValueLocation(int index) const;
@@ -394,6 +425,9 @@ class Linkage : public ZoneObject {
// A special {OsrValue} index to indicate the context spill slot.
static const int kOsrContextSpillSlotIndex = -1;
+ // A special {OsrValue} index to indicate the accumulator register.
+ static const int kOsrAccumulatorRegisterIndex = -1;
+
private:
CallDescriptor* const incoming_;
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index e19368d107..ad787f8092 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -1,104 +1,710 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/load-elimination.h"
-#include "src/compiler/common-operator.h"
-#include "src/compiler/graph.h"
+#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
-#include "src/types.h"
namespace v8 {
namespace internal {
namespace compiler {
-LoadElimination::~LoadElimination() {}
+namespace {
+
+enum Aliasing { kNoAlias, kMayAlias, kMustAlias };
+
+Aliasing QueryAlias(Node* a, Node* b) {
+ if (a == b) return kMustAlias;
+ if (!NodeProperties::GetType(a)->Maybe(NodeProperties::GetType(b))) {
+ return kNoAlias;
+ }
+ if (b->opcode() == IrOpcode::kAllocate) {
+ switch (a->opcode()) {
+ case IrOpcode::kAllocate:
+ case IrOpcode::kHeapConstant:
+ case IrOpcode::kParameter:
+ return kNoAlias;
+ case IrOpcode::kFinishRegion:
+ return QueryAlias(a->InputAt(0), b);
+ default:
+ break;
+ }
+ }
+ if (a->opcode() == IrOpcode::kAllocate) {
+ switch (b->opcode()) {
+ case IrOpcode::kHeapConstant:
+ case IrOpcode::kParameter:
+ return kNoAlias;
+ case IrOpcode::kFinishRegion:
+ return QueryAlias(a, b->InputAt(0));
+ default:
+ break;
+ }
+ }
+ return kMayAlias;
+}
+
+bool MayAlias(Node* a, Node* b) { return QueryAlias(a, b) != kNoAlias; }
+
+bool MustAlias(Node* a, Node* b) { return QueryAlias(a, b) == kMustAlias; }
+
+} // namespace
Reduction LoadElimination::Reduce(Node* node) {
switch (node->opcode()) {
+ case IrOpcode::kCheckMaps:
+ return ReduceCheckMaps(node);
+ case IrOpcode::kEnsureWritableFastElements:
+ return ReduceEnsureWritableFastElements(node);
+ case IrOpcode::kMaybeGrowFastElements:
+ return ReduceMaybeGrowFastElements(node);
+ case IrOpcode::kTransitionElementsKind:
+ return ReduceTransitionElementsKind(node);
case IrOpcode::kLoadField:
return ReduceLoadField(node);
- default:
+ case IrOpcode::kStoreField:
+ return ReduceStoreField(node);
+ case IrOpcode::kLoadElement:
+ return ReduceLoadElement(node);
+ case IrOpcode::kStoreElement:
+ return ReduceStoreElement(node);
+ case IrOpcode::kStoreTypedElement:
+ return ReduceStoreTypedElement(node);
+ case IrOpcode::kEffectPhi:
+ return ReduceEffectPhi(node);
+ case IrOpcode::kDead:
break;
+ case IrOpcode::kStart:
+ return ReduceStart(node);
+ default:
+ return ReduceOtherNode(node);
}
return NoChange();
}
-Reduction LoadElimination::ReduceLoadField(Node* node) {
- DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
- FieldAccess const access = FieldAccessOf(node->op());
- Node* object = NodeProperties::GetValueInput(node, 0);
- for (Node* effect = NodeProperties::GetEffectInput(node);;
- effect = NodeProperties::GetEffectInput(effect)) {
- switch (effect->opcode()) {
- case IrOpcode::kLoadField: {
- if (object == NodeProperties::GetValueInput(effect, 0) &&
- access == FieldAccessOf(effect->op())) {
- Node* const value = effect;
- ReplaceWithValue(node, value);
- return Replace(value);
+Node* LoadElimination::AbstractElements::Lookup(Node* object,
+ Node* index) const {
+ for (Element const element : elements_) {
+ if (element.object == nullptr) continue;
+ DCHECK_NOT_NULL(element.index);
+ DCHECK_NOT_NULL(element.value);
+ if (MustAlias(object, element.object) && MustAlias(index, element.index)) {
+ return element.value;
+ }
+ }
+ return nullptr;
+}
+
+LoadElimination::AbstractElements const*
+LoadElimination::AbstractElements::Kill(Node* object, Node* index,
+ Zone* zone) const {
+ for (Element const element : this->elements_) {
+ if (element.object == nullptr) continue;
+ if (MayAlias(object, element.object)) {
+ AbstractElements* that = new (zone) AbstractElements(zone);
+ for (Element const element : this->elements_) {
+ if (element.object == nullptr) continue;
+ DCHECK_NOT_NULL(element.index);
+ DCHECK_NOT_NULL(element.value);
+ if (!MayAlias(object, element.object) ||
+ !MayAlias(index, element.index)) {
+ that->elements_[that->next_index_++] = element;
}
- break;
}
- case IrOpcode::kStoreField: {
- if (access == FieldAccessOf(effect->op())) {
- if (object == NodeProperties::GetValueInput(effect, 0)) {
- Node* const value = NodeProperties::GetValueInput(effect, 1);
- Type* stored_value_type = NodeProperties::GetType(value);
- Type* load_type = NodeProperties::GetType(node);
- // Make sure the replacement's type is a subtype of the node's
- // type. Otherwise we could confuse optimizations that were
- // based on the original type.
- if (stored_value_type->Is(load_type)) {
- ReplaceWithValue(node, value);
- return Replace(value);
- } else {
- Node* renamed = graph()->NewNode(
- common()->Guard(Type::Intersect(stored_value_type, load_type,
- graph()->zone())),
- value, NodeProperties::GetControlInput(node));
- ReplaceWithValue(node, renamed);
- return Replace(renamed);
- }
- }
- // TODO(turbofan): Alias analysis to the rescue?
- return NoChange();
- }
+ that->next_index_ %= arraysize(elements_);
+ return that;
+ }
+ }
+ return this;
+}
+
+bool LoadElimination::AbstractElements::Equals(
+ AbstractElements const* that) const {
+ if (this == that) return true;
+ for (size_t i = 0; i < arraysize(elements_); ++i) {
+ Element this_element = this->elements_[i];
+ if (this_element.object == nullptr) continue;
+ for (size_t j = 0;; ++j) {
+ if (j == arraysize(elements_)) return false;
+ Element that_element = that->elements_[j];
+ if (this_element.object == that_element.object &&
+ this_element.index == that_element.index &&
+ this_element.value == that_element.value) {
break;
}
- case IrOpcode::kBeginRegion:
- case IrOpcode::kStoreBuffer:
- case IrOpcode::kStoreElement: {
- // These can never interfere with field loads.
+ }
+ }
+ for (size_t i = 0; i < arraysize(elements_); ++i) {
+ Element that_element = that->elements_[i];
+ if (that_element.object == nullptr) continue;
+ for (size_t j = 0;; ++j) {
+ if (j == arraysize(elements_)) return false;
+ Element this_element = this->elements_[j];
+ if (that_element.object == this_element.object &&
+ that_element.index == this_element.index &&
+ that_element.value == this_element.value) {
break;
}
- case IrOpcode::kFinishRegion: {
- // "Look through" FinishRegion nodes to make LoadElimination capable
- // of looking into atomic regions.
- if (object == effect) object = NodeProperties::GetValueInput(effect, 0);
- break;
+ }
+ }
+ return true;
+}
+
+LoadElimination::AbstractElements const*
+LoadElimination::AbstractElements::Merge(AbstractElements const* that,
+ Zone* zone) const {
+ if (this->Equals(that)) return this;
+ AbstractElements* copy = new (zone) AbstractElements(zone);
+ for (Element const this_element : this->elements_) {
+ if (this_element.object == nullptr) continue;
+ for (Element const that_element : that->elements_) {
+ if (this_element.object == that_element.object &&
+ this_element.index == that_element.index &&
+ this_element.value == that_element.value) {
+ copy->elements_[copy->next_index_++] = this_element;
}
- case IrOpcode::kAllocate: {
- // Allocations don't interfere with field loads. In case we see the
- // actual allocation for the {object} we can abort.
- if (object == effect) return NoChange();
- break;
+ }
+ }
+ copy->next_index_ %= arraysize(elements_);
+ return copy;
+}
+
+Node* LoadElimination::AbstractField::Lookup(Node* object) const {
+ for (auto pair : info_for_node_) {
+ if (MustAlias(object, pair.first)) return pair.second;
+ }
+ return nullptr;
+}
+
+LoadElimination::AbstractField const* LoadElimination::AbstractField::Kill(
+ Node* object, Zone* zone) const {
+ for (auto pair : this->info_for_node_) {
+ if (MayAlias(object, pair.first)) {
+ AbstractField* that = new (zone) AbstractField(zone);
+ for (auto pair : this->info_for_node_) {
+ if (!MayAlias(object, pair.first)) that->info_for_node_.insert(pair);
}
- default: {
- if (!effect->op()->HasProperty(Operator::kNoWrite) ||
- effect->op()->EffectInputCount() != 1) {
- return NoChange();
- }
- break;
+ return that;
+ }
+ }
+ return this;
+}
+
+bool LoadElimination::AbstractState::Equals(AbstractState const* that) const {
+ if (this->elements_) {
+ if (!that->elements_ || !that->elements_->Equals(this->elements_)) {
+ return false;
+ }
+ } else if (that->elements_) {
+ return false;
+ }
+ for (size_t i = 0u; i < arraysize(fields_); ++i) {
+ AbstractField const* this_field = this->fields_[i];
+ AbstractField const* that_field = that->fields_[i];
+ if (this_field) {
+ if (!that_field || !that_field->Equals(this_field)) return false;
+ } else if (that_field) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void LoadElimination::AbstractState::Merge(AbstractState const* that,
+ Zone* zone) {
+ // Merge the information we have about the elements.
+ if (this->elements_) {
+ this->elements_ = that->elements_
+ ? that->elements_->Merge(this->elements_, zone)
+ : that->elements_;
+ } else {
+ this->elements_ = that->elements_;
+ }
+
+ // Merge the information we have about the fields.
+ for (size_t i = 0; i < arraysize(fields_); ++i) {
+ if (this->fields_[i]) {
+ if (that->fields_[i]) {
+ this->fields_[i] = this->fields_[i]->Merge(that->fields_[i], zone);
+ } else {
+ this->fields_[i] = nullptr;
+ }
+ }
+ }
+}
+
+Node* LoadElimination::AbstractState::LookupElement(Node* object,
+ Node* index) const {
+ if (this->elements_) {
+ return this->elements_->Lookup(object, index);
+ }
+ return nullptr;
+}
+
+LoadElimination::AbstractState const*
+LoadElimination::AbstractState::AddElement(Node* object, Node* index,
+ Node* value, Zone* zone) const {
+ AbstractState* that = new (zone) AbstractState(*this);
+ if (that->elements_) {
+ that->elements_ = that->elements_->Extend(object, index, value, zone);
+ } else {
+ that->elements_ = new (zone) AbstractElements(object, index, value, zone);
+ }
+ return that;
+}
+
+LoadElimination::AbstractState const*
+LoadElimination::AbstractState::KillElement(Node* object, Node* index,
+ Zone* zone) const {
+ if (this->elements_) {
+ AbstractElements const* that_elements =
+ this->elements_->Kill(object, index, zone);
+ if (this->elements_ != that_elements) {
+ AbstractState* that = new (zone) AbstractState(*this);
+ that->elements_ = that_elements;
+ return that;
+ }
+ }
+ return this;
+}
+
+LoadElimination::AbstractState const* LoadElimination::AbstractState::AddField(
+ Node* object, size_t index, Node* value, Zone* zone) const {
+ AbstractState* that = new (zone) AbstractState(*this);
+ if (that->fields_[index]) {
+ that->fields_[index] = that->fields_[index]->Extend(object, value, zone);
+ } else {
+ that->fields_[index] = new (zone) AbstractField(object, value, zone);
+ }
+ return that;
+}
+
+LoadElimination::AbstractState const* LoadElimination::AbstractState::KillField(
+ Node* object, size_t index, Zone* zone) const {
+ if (AbstractField const* this_field = this->fields_[index]) {
+ this_field = this_field->Kill(object, zone);
+ if (this->fields_[index] != this_field) {
+ AbstractState* that = new (zone) AbstractState(*this);
+ that->fields_[index] = this_field;
+ return that;
+ }
+ }
+ return this;
+}
+
+Node* LoadElimination::AbstractState::LookupField(Node* object,
+ size_t index) const {
+ if (AbstractField const* this_field = this->fields_[index]) {
+ return this_field->Lookup(object);
+ }
+ return nullptr;
+}
+
+LoadElimination::AbstractState const*
+LoadElimination::AbstractStateForEffectNodes::Get(Node* node) const {
+ size_t const id = node->id();
+ if (id < info_for_node_.size()) return info_for_node_[id];
+ return nullptr;
+}
+
+void LoadElimination::AbstractStateForEffectNodes::Set(
+ Node* node, AbstractState const* state) {
+ size_t const id = node->id();
+ if (id >= info_for_node_.size()) info_for_node_.resize(id + 1, nullptr);
+ info_for_node_[id] = state;
+}
+
+Reduction LoadElimination::ReduceCheckMaps(Node* node) {
+ Node* const object = NodeProperties::GetValueInput(node, 0);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ AbstractState const* state = node_states_.Get(effect);
+ if (state == nullptr) return NoChange();
+ int const map_input_count = node->op()->ValueInputCount() - 1;
+ if (Node* const object_map = state->LookupField(object, 0)) {
+ for (int i = 0; i < map_input_count; ++i) {
+ Node* map = NodeProperties::GetValueInput(node, 1 + i);
+ if (map == object_map) return Replace(effect);
+ }
+ }
+ if (map_input_count == 1) {
+ Node* const map0 = NodeProperties::GetValueInput(node, 1);
+ state = state->AddField(object, 0, map0, zone());
+ }
+ return UpdateState(node, state);
+}
+
+Reduction LoadElimination::ReduceEnsureWritableFastElements(Node* node) {
+ Node* const object = NodeProperties::GetValueInput(node, 0);
+ Node* const elements = NodeProperties::GetValueInput(node, 1);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ AbstractState const* state = node_states_.Get(effect);
+ if (state == nullptr) return NoChange();
+ Node* fixed_array_map = jsgraph()->FixedArrayMapConstant();
+ if (Node* const elements_map = state->LookupField(elements, 0)) {
+ // Check if the {elements} already have the fixed array map.
+ if (elements_map == fixed_array_map) {
+ ReplaceWithValue(node, elements, effect);
+ return Replace(elements);
+ }
+ }
+ // We know that the resulting elements have the fixed array map.
+ state = state->AddField(node, 0, fixed_array_map, zone());
+ // Kill the previous elements on {object}.
+ state = state->KillField(object, 2, zone());
+ // Add the new elements on {object}.
+ state = state->AddField(object, 2, node, zone());
+ return UpdateState(node, state);
+}
+
+Reduction LoadElimination::ReduceMaybeGrowFastElements(Node* node) {
+ GrowFastElementsFlags flags = GrowFastElementsFlagsOf(node->op());
+ Node* const object = NodeProperties::GetValueInput(node, 0);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ AbstractState const* state = node_states_.Get(effect);
+ if (state == nullptr) return NoChange();
+ if (flags & GrowFastElementsFlag::kDoubleElements) {
+ // We know that the resulting elements have the fixed double array map.
+ Node* fixed_double_array_map = jsgraph()->FixedDoubleArrayMapConstant();
+ state = state->AddField(node, 0, fixed_double_array_map, zone());
+ } else {
+ // We know that the resulting elements have the fixed array map.
+ Node* fixed_array_map = jsgraph()->FixedArrayMapConstant();
+ state = state->AddField(node, 0, fixed_array_map, zone());
+ }
+ if (flags & GrowFastElementsFlag::kArrayObject) {
+ // Kill the previous Array::length on {object}.
+ state = state->KillField(object, 3, zone());
+ }
+ // Kill the previous elements on {object}.
+ state = state->KillField(object, 2, zone());
+ // Add the new elements on {object}.
+ state = state->AddField(object, 2, node, zone());
+ return UpdateState(node, state);
+}
+
+Reduction LoadElimination::ReduceTransitionElementsKind(Node* node) {
+ Node* const object = NodeProperties::GetValueInput(node, 0);
+ Node* const source_map = NodeProperties::GetValueInput(node, 1);
+ Node* const target_map = NodeProperties::GetValueInput(node, 2);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ AbstractState const* state = node_states_.Get(effect);
+ if (state == nullptr) return NoChange();
+ if (Node* const object_map = state->LookupField(object, 0)) {
+ if (target_map == object_map) {
+ // The {object} already has the {target_map}, so this TransitionElements
+ // {node} is fully redundant (independent of what {source_map} is).
+ return Replace(effect);
+ }
+ state = state->KillField(object, 0, zone());
+ if (source_map == object_map) {
+ state = state->AddField(object, 0, target_map, zone());
+ }
+ } else {
+ state = state->KillField(object, 0, zone());
+ }
+ ElementsTransition transition = ElementsTransitionOf(node->op());
+ switch (transition) {
+ case ElementsTransition::kFastTransition:
+ break;
+ case ElementsTransition::kSlowTransition:
+ // Kill the elements as well.
+ state = state->KillField(object, 2, zone());
+ break;
+ }
+ return UpdateState(node, state);
+}
+
+Reduction LoadElimination::ReduceLoadField(Node* node) {
+ FieldAccess const& access = FieldAccessOf(node->op());
+ Node* const object = NodeProperties::GetValueInput(node, 0);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ AbstractState const* state = node_states_.Get(effect);
+ if (state == nullptr) return NoChange();
+ int field_index = FieldIndexOf(access);
+ if (field_index >= 0) {
+ if (Node* const replacement = state->LookupField(object, field_index)) {
+ // Make sure the {replacement} has at least as good type
+ // as the original {node}.
+ if (!replacement->IsDead() &&
+ NodeProperties::GetType(replacement)
+ ->Is(NodeProperties::GetType(node))) {
+ ReplaceWithValue(node, replacement, effect);
+ return Replace(replacement);
}
}
+ state = state->AddField(object, field_index, node, zone());
+ }
+ return UpdateState(node, state);
+}
+
+Reduction LoadElimination::ReduceStoreField(Node* node) {
+ FieldAccess const& access = FieldAccessOf(node->op());
+ Node* const object = NodeProperties::GetValueInput(node, 0);
+ Node* const new_value = NodeProperties::GetValueInput(node, 1);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ AbstractState const* state = node_states_.Get(effect);
+ if (state == nullptr) return NoChange();
+ int field_index = FieldIndexOf(access);
+ if (field_index >= 0) {
+ Node* const old_value = state->LookupField(object, field_index);
+ if (old_value == new_value) {
+ // This store is fully redundant.
+ return Replace(effect);
+ }
+ // Kill all potentially aliasing fields and record the new value.
+ state = state->KillField(object, field_index, zone());
+ state = state->AddField(object, field_index, new_value, zone());
+ } else {
+ // Unsupported StoreField operator.
+ state = empty_state();
+ }
+ return UpdateState(node, state);
+}
+
+Reduction LoadElimination::ReduceLoadElement(Node* node) {
+ Node* const object = NodeProperties::GetValueInput(node, 0);
+ Node* const index = NodeProperties::GetValueInput(node, 1);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ AbstractState const* state = node_states_.Get(effect);
+ if (state == nullptr) return NoChange();
+ if (Node* const replacement = state->LookupElement(object, index)) {
+ // Make sure the {replacement} has at least as good type
+ // as the original {node}.
+ if (!replacement->IsDead() &&
+ NodeProperties::GetType(replacement)
+ ->Is(NodeProperties::GetType(node))) {
+ ReplaceWithValue(node, replacement, effect);
+ return Replace(replacement);
+ }
+ }
+ state = state->AddElement(object, index, node, zone());
+ return UpdateState(node, state);
+}
+
+Reduction LoadElimination::ReduceStoreElement(Node* node) {
+ ElementAccess const& access = ElementAccessOf(node->op());
+ Node* const object = NodeProperties::GetValueInput(node, 0);
+ Node* const index = NodeProperties::GetValueInput(node, 1);
+ Node* const new_value = NodeProperties::GetValueInput(node, 2);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ AbstractState const* state = node_states_.Get(effect);
+ if (state == nullptr) return NoChange();
+ Node* const old_value = state->LookupElement(object, index);
+ if (old_value == new_value) {
+ // This store is fully redundant.
+ return Replace(effect);
+ }
+ // Kill all potentially aliasing elements.
+ state = state->KillElement(object, index, zone());
+ // Only record the new value if the store doesn't have an implicit truncation.
+ switch (access.machine_type.representation()) {
+ case MachineRepresentation::kNone:
+ case MachineRepresentation::kBit:
+ UNREACHABLE();
+ break;
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ case MachineRepresentation::kWord64:
+ case MachineRepresentation::kFloat32:
+ // TODO(turbofan): Add support for doing the truncations.
+ break;
+ case MachineRepresentation::kFloat64:
+ case MachineRepresentation::kSimd128:
+ case MachineRepresentation::kTaggedSigned:
+ case MachineRepresentation::kTaggedPointer:
+ case MachineRepresentation::kTagged:
+ state = state->AddElement(object, index, new_value, zone());
+ break;
+ }
+ return UpdateState(node, state);
+}
+
+Reduction LoadElimination::ReduceStoreTypedElement(Node* node) {
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ AbstractState const* state = node_states_.Get(effect);
+ if (state == nullptr) return NoChange();
+ return UpdateState(node, state);
+}
+
+Reduction LoadElimination::ReduceEffectPhi(Node* node) {
+ Node* const effect0 = NodeProperties::GetEffectInput(node, 0);
+ Node* const control = NodeProperties::GetControlInput(node);
+ AbstractState const* state0 = node_states_.Get(effect0);
+ if (state0 == nullptr) return NoChange();
+ if (control->opcode() == IrOpcode::kLoop) {
+ // Here we rely on having only reducible loops:
+ // The loop entry edge always dominates the header, so we can just take
+ // the state from the first input, and compute the loop state based on it.
+ AbstractState const* state = ComputeLoopState(node, state0);
+ return UpdateState(node, state);
+ }
+ DCHECK_EQ(IrOpcode::kMerge, control->opcode());
+
+ // Shortcut for the case when we do not know anything about some input.
+ int const input_count = node->op()->EffectInputCount();
+ for (int i = 1; i < input_count; ++i) {
+ Node* const effect = NodeProperties::GetEffectInput(node, i);
+ if (node_states_.Get(effect) == nullptr) return NoChange();
+ }
+
+ // Make a copy of the first input's state and merge with the state
+ // from other inputs.
+ AbstractState* state = new (zone()) AbstractState(*state0);
+ for (int i = 1; i < input_count; ++i) {
+ Node* const input = NodeProperties::GetEffectInput(node, i);
+ state->Merge(node_states_.Get(input), zone());
+ }
+ return UpdateState(node, state);
+}
+
+Reduction LoadElimination::ReduceStart(Node* node) {
+ return UpdateState(node, empty_state());
+}
+
+Reduction LoadElimination::ReduceOtherNode(Node* node) {
+ if (node->op()->EffectInputCount() == 1) {
+ if (node->op()->EffectOutputCount() == 1) {
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ AbstractState const* state = node_states_.Get(effect);
+ // If we do not know anything about the predecessor, do not propagate
+ // just yet because we will have to recompute anyway once we compute
+ // the predecessor.
+ if (state == nullptr) return NoChange();
+ // Check if this {node} has some uncontrolled side effects.
+ if (!node->op()->HasProperty(Operator::kNoWrite)) {
+ state = empty_state();
+ }
+ return UpdateState(node, state);
+ } else {
+ // Effect terminators should be handled specially.
+ return NoChange();
+ }
+ }
+ DCHECK_EQ(0, node->op()->EffectInputCount());
+ DCHECK_EQ(0, node->op()->EffectOutputCount());
+ return NoChange();
+}
+
+Reduction LoadElimination::UpdateState(Node* node, AbstractState const* state) {
+ AbstractState const* original = node_states_.Get(node);
+ // Only signal that the {node} has Changed, if the information about {state}
+ // has changed wrt. the {original}.
+ if (state != original) {
+ if (original == nullptr || !state->Equals(original)) {
+ node_states_.Set(node, state);
+ return Changed(node);
+ }
}
- UNREACHABLE();
return NoChange();
}
+LoadElimination::AbstractState const* LoadElimination::ComputeLoopState(
+ Node* node, AbstractState const* state) const {
+ Node* const control = NodeProperties::GetControlInput(node);
+ ZoneQueue<Node*> queue(zone());
+ ZoneSet<Node*> visited(zone());
+ visited.insert(node);
+ for (int i = 1; i < control->InputCount(); ++i) {
+ queue.push(node->InputAt(i));
+ }
+ while (!queue.empty()) {
+ Node* const current = queue.front();
+ queue.pop();
+ if (visited.find(current) == visited.end()) {
+ visited.insert(current);
+ if (!current->op()->HasProperty(Operator::kNoWrite)) {
+ switch (current->opcode()) {
+ case IrOpcode::kEnsureWritableFastElements: {
+ Node* const object = NodeProperties::GetValueInput(current, 0);
+ state = state->KillField(object, 2, zone());
+ break;
+ }
+ case IrOpcode::kMaybeGrowFastElements: {
+ GrowFastElementsFlags flags =
+ GrowFastElementsFlagsOf(current->op());
+ Node* const object = NodeProperties::GetValueInput(current, 0);
+ state = state->KillField(object, 2, zone());
+ if (flags & GrowFastElementsFlag::kArrayObject) {
+ state = state->KillField(object, 3, zone());
+ }
+ break;
+ }
+ case IrOpcode::kTransitionElementsKind: {
+ Node* const object = NodeProperties::GetValueInput(current, 0);
+ state = state->KillField(object, 0, zone());
+ state = state->KillField(object, 2, zone());
+ break;
+ }
+ case IrOpcode::kStoreField: {
+ FieldAccess const& access = FieldAccessOf(current->op());
+ Node* const object = NodeProperties::GetValueInput(current, 0);
+ int field_index = FieldIndexOf(access);
+ if (field_index < 0) return empty_state();
+ state = state->KillField(object, field_index, zone());
+ break;
+ }
+ case IrOpcode::kStoreElement: {
+ Node* const object = NodeProperties::GetValueInput(current, 0);
+ Node* const index = NodeProperties::GetValueInput(current, 1);
+ state = state->KillElement(object, index, zone());
+ break;
+ }
+ case IrOpcode::kStoreBuffer:
+ case IrOpcode::kStoreTypedElement: {
+ // Doesn't affect anything we track with the state currently.
+ break;
+ }
+ default:
+ return empty_state();
+ }
+ }
+ for (int i = 0; i < current->op()->EffectInputCount(); ++i) {
+ queue.push(NodeProperties::GetEffectInput(current, i));
+ }
+ }
+ }
+ return state;
+}
+
+// static
+int LoadElimination::FieldIndexOf(FieldAccess const& access) {
+ MachineRepresentation rep = access.machine_type.representation();
+ switch (rep) {
+ case MachineRepresentation::kNone:
+ case MachineRepresentation::kBit:
+ UNREACHABLE();
+ break;
+ case MachineRepresentation::kWord32:
+ case MachineRepresentation::kWord64:
+ if (rep != MachineType::PointerRepresentation()) {
+ return -1; // We currently only track pointer size fields.
+ }
+ break;
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kFloat32:
+ return -1; // Currently untracked.
+ case MachineRepresentation::kFloat64:
+ case MachineRepresentation::kSimd128:
+ return -1; // Currently untracked.
+ case MachineRepresentation::kTaggedSigned:
+ case MachineRepresentation::kTaggedPointer:
+ case MachineRepresentation::kTagged:
+ // TODO(bmeurer): Check that we never do overlapping load/stores of
+ // individual parts of Float64/Simd128 values.
+ break;
+ }
+ DCHECK_EQ(kTaggedBase, access.base_is_tagged);
+ DCHECK_EQ(0, access.offset % kPointerSize);
+ int field_index = access.offset / kPointerSize;
+ if (field_index >= static_cast<int>(kMaxTrackedFields)) return -1;
+ return field_index;
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h
index 92c6dd01ba..2a4ee40500 100644
--- a/deps/v8/src/compiler/load-elimination.h
+++ b/deps/v8/src/compiler/load-elimination.h
@@ -1,4 +1,4 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -11,25 +11,174 @@ namespace v8 {
namespace internal {
namespace compiler {
-class CommonOperatorBuilder;
-class Graph;
+// Foward declarations.
+struct FieldAccess;
+class JSGraph;
class LoadElimination final : public AdvancedReducer {
public:
- explicit LoadElimination(Editor* editor, Graph* graph,
- CommonOperatorBuilder* common)
- : AdvancedReducer(editor), graph_(graph), common_(common) {}
- ~LoadElimination() final;
+ LoadElimination(Editor* editor, JSGraph* jsgraph, Zone* zone)
+ : AdvancedReducer(editor), node_states_(zone), jsgraph_(jsgraph) {}
+ ~LoadElimination() final {}
Reduction Reduce(Node* node) final;
private:
- CommonOperatorBuilder* common() const { return common_; }
- Graph* graph() { return graph_; }
+ static const size_t kMaxTrackedElements = 8;
+ // Abstract state to approximate the current state of an element along the
+ // effect paths through the graph.
+ class AbstractElements final : public ZoneObject {
+ public:
+ explicit AbstractElements(Zone* zone) {
+ for (size_t i = 0; i < arraysize(elements_); ++i) {
+ elements_[i] = Element();
+ }
+ }
+ AbstractElements(Node* object, Node* index, Node* value, Zone* zone)
+ : AbstractElements(zone) {
+ elements_[next_index_++] = Element(object, index, value);
+ }
+
+ AbstractElements const* Extend(Node* object, Node* index, Node* value,
+ Zone* zone) const {
+ AbstractElements* that = new (zone) AbstractElements(*this);
+ that->elements_[that->next_index_] = Element(object, index, value);
+ that->next_index_ = (that->next_index_ + 1) % arraysize(elements_);
+ return that;
+ }
+ Node* Lookup(Node* object, Node* index) const;
+ AbstractElements const* Kill(Node* object, Node* index, Zone* zone) const;
+ bool Equals(AbstractElements const* that) const;
+ AbstractElements const* Merge(AbstractElements const* that,
+ Zone* zone) const;
+
+ private:
+ struct Element {
+ Element() {}
+ Element(Node* object, Node* index, Node* value)
+ : object(object), index(index), value(value) {}
+
+ Node* object = nullptr;
+ Node* index = nullptr;
+ Node* value = nullptr;
+ };
+
+ Element elements_[kMaxTrackedElements];
+ size_t next_index_ = 0;
+ };
+
+ // Abstract state to approximate the current state of a certain field along
+ // the effect paths through the graph.
+ class AbstractField final : public ZoneObject {
+ public:
+ explicit AbstractField(Zone* zone) : info_for_node_(zone) {}
+ AbstractField(Node* object, Node* value, Zone* zone)
+ : info_for_node_(zone) {
+ info_for_node_.insert(std::make_pair(object, value));
+ }
+
+ AbstractField const* Extend(Node* object, Node* value, Zone* zone) const {
+ AbstractField* that = new (zone) AbstractField(zone);
+ that->info_for_node_ = this->info_for_node_;
+ that->info_for_node_.insert(std::make_pair(object, value));
+ return that;
+ }
+ Node* Lookup(Node* object) const;
+ AbstractField const* Kill(Node* object, Zone* zone) const;
+ bool Equals(AbstractField const* that) const {
+ return this == that || this->info_for_node_ == that->info_for_node_;
+ }
+ AbstractField const* Merge(AbstractField const* that, Zone* zone) const {
+ if (this->Equals(that)) return this;
+ AbstractField* copy = new (zone) AbstractField(zone);
+ for (auto this_it : this->info_for_node_) {
+ Node* this_object = this_it.first;
+ Node* this_value = this_it.second;
+ auto that_it = that->info_for_node_.find(this_object);
+ if (that_it != that->info_for_node_.end() &&
+ that_it->second == this_value) {
+ copy->info_for_node_.insert(this_it);
+ }
+ }
+ return copy;
+ }
+
+ private:
+ ZoneMap<Node*, Node*> info_for_node_;
+ };
+
+ static size_t const kMaxTrackedFields = 32;
+
+ class AbstractState final : public ZoneObject {
+ public:
+ AbstractState() {
+ for (size_t i = 0; i < arraysize(fields_); ++i) {
+ fields_[i] = nullptr;
+ }
+ }
+
+ bool Equals(AbstractState const* that) const;
+ void Merge(AbstractState const* that, Zone* zone);
+
+ AbstractState const* AddField(Node* object, size_t index, Node* value,
+ Zone* zone) const;
+ AbstractState const* KillField(Node* object, size_t index,
+ Zone* zone) const;
+ Node* LookupField(Node* object, size_t index) const;
+
+ AbstractState const* AddElement(Node* object, Node* index, Node* value,
+ Zone* zone) const;
+ AbstractState const* KillElement(Node* object, Node* index,
+ Zone* zone) const;
+ Node* LookupElement(Node* object, Node* index) const;
+
+ private:
+ AbstractElements const* elements_ = nullptr;
+ AbstractField const* fields_[kMaxTrackedFields];
+ };
+
+ class AbstractStateForEffectNodes final : public ZoneObject {
+ public:
+ explicit AbstractStateForEffectNodes(Zone* zone) : info_for_node_(zone) {}
+ AbstractState const* Get(Node* node) const;
+ void Set(Node* node, AbstractState const* state);
+
+ Zone* zone() const { return info_for_node_.get_allocator().zone(); }
+
+ private:
+ ZoneVector<AbstractState const*> info_for_node_;
+ };
+
+ Reduction ReduceCheckMaps(Node* node);
+ Reduction ReduceEnsureWritableFastElements(Node* node);
+ Reduction ReduceMaybeGrowFastElements(Node* node);
+ Reduction ReduceTransitionElementsKind(Node* node);
Reduction ReduceLoadField(Node* node);
- Graph* graph_;
- CommonOperatorBuilder* common_;
+ Reduction ReduceStoreField(Node* node);
+ Reduction ReduceLoadElement(Node* node);
+ Reduction ReduceStoreElement(Node* node);
+ Reduction ReduceStoreTypedElement(Node* node);
+ Reduction ReduceEffectPhi(Node* node);
+ Reduction ReduceStart(Node* node);
+ Reduction ReduceOtherNode(Node* node);
+
+ Reduction UpdateState(Node* node, AbstractState const* state);
+
+ AbstractState const* ComputeLoopState(Node* node,
+ AbstractState const* state) const;
+
+ static int FieldIndexOf(FieldAccess const& access);
+
+ AbstractState const* empty_state() const { return &empty_state_; }
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Zone* zone() const { return node_states_.zone(); }
+
+ AbstractState const empty_state_;
+ AbstractStateForEffectNodes node_states_;
+ JSGraph* const jsgraph_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoadElimination);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/loop-analysis.cc b/deps/v8/src/compiler/loop-analysis.cc
index d52c7c7742..2a81aee49b 100644
--- a/deps/v8/src/compiler/loop-analysis.cc
+++ b/deps/v8/src/compiler/loop-analysis.cc
@@ -29,6 +29,7 @@ struct NodeInfo {
struct LoopInfo {
Node* header;
NodeInfo* header_list;
+ NodeInfo* exit_list;
NodeInfo* body_list;
LoopTree::Loop* loop;
};
@@ -81,9 +82,9 @@ class LoopFinderImpl {
if (marked_forward && marked_backward) {
PrintF("X");
} else if (marked_forward) {
- PrintF("/");
+ PrintF(">");
} else if (marked_backward) {
- PrintF("\\");
+ PrintF("<");
} else {
PrintF(" ");
}
@@ -198,12 +199,22 @@ class LoopFinderImpl {
if (merge->opcode() == IrOpcode::kLoop) {
loop_num = CreateLoopInfo(merge);
}
+ } else if (node->opcode() == IrOpcode::kLoopExit) {
+ // Intentionally ignore return value. Loop exit node marks
+ // are propagated normally.
+ CreateLoopInfo(node->InputAt(1));
+ } else if (node->opcode() == IrOpcode::kLoopExitValue ||
+ node->opcode() == IrOpcode::kLoopExitEffect) {
+ Node* loop_exit = NodeProperties::GetControlInput(node);
+ // Intentionally ignore return value. Loop exit node marks
+ // are propagated normally.
+ CreateLoopInfo(loop_exit->InputAt(1));
}
// Propagate marks backwards from this node.
for (int i = 0; i < node->InputCount(); i++) {
Node* input = node->InputAt(i);
- if (loop_num > 0 && i != kAssumedLoopEntryIndex) {
+ if (IsBackedge(node, i)) {
// Only propagate the loop mark on backedges.
if (SetBackwardMark(input, loop_num)) Queue(input);
} else {
@@ -216,6 +227,7 @@ class LoopFinderImpl {
// Make a new loop if necessary for the given node.
int CreateLoopInfo(Node* node) {
+ DCHECK_EQ(IrOpcode::kLoop, node->opcode());
int loop_num = LoopNum(node);
if (loop_num > 0) return loop_num;
@@ -223,21 +235,39 @@ class LoopFinderImpl {
if (INDEX(loop_num) >= width_) ResizeBackwardMarks();
// Create a new loop.
- loops_.push_back({node, nullptr, nullptr, nullptr});
+ loops_.push_back({node, nullptr, nullptr, nullptr, nullptr});
loop_tree_->NewLoop();
+ SetLoopMarkForLoopHeader(node, loop_num);
+ return loop_num;
+ }
+
+ void SetLoopMark(Node* node, int loop_num) {
+ info(node); // create the NodeInfo
SetBackwardMark(node, loop_num);
loop_tree_->node_to_loop_num_[node->id()] = loop_num;
+ }
- // Setup loop mark for phis attached to loop header.
+ void SetLoopMarkForLoopHeader(Node* node, int loop_num) {
+ DCHECK_EQ(IrOpcode::kLoop, node->opcode());
+ SetLoopMark(node, loop_num);
for (Node* use : node->uses()) {
if (NodeProperties::IsPhi(use)) {
- info(use); // create the NodeInfo
- SetBackwardMark(use, loop_num);
- loop_tree_->node_to_loop_num_[use->id()] = loop_num;
+ SetLoopMark(use, loop_num);
}
- }
- return loop_num;
+ // Do not keep the loop alive if it does not have any backedges.
+ if (node->InputCount() <= 1) continue;
+
+ if (use->opcode() == IrOpcode::kLoopExit) {
+ SetLoopMark(use, loop_num);
+ for (Node* exit_use : use->uses()) {
+ if (exit_use->opcode() == IrOpcode::kLoopExitValue ||
+ exit_use->opcode() == IrOpcode::kLoopExitEffect) {
+ SetLoopMark(exit_use, loop_num);
+ }
+ }
+ }
+ }
}
void ResizeBackwardMarks() {
@@ -276,20 +306,33 @@ class LoopFinderImpl {
queued_.Set(node, false);
for (Edge edge : node->use_edges()) {
Node* use = edge.from();
- if (!IsBackedge(use, edge)) {
+ if (!IsBackedge(use, edge.index())) {
if (PropagateForwardMarks(node, use)) Queue(use);
}
}
}
}
- bool IsBackedge(Node* use, Edge& edge) {
+ bool IsLoopHeaderNode(Node* node) {
+ return node->opcode() == IrOpcode::kLoop || NodeProperties::IsPhi(node);
+ }
+
+ bool IsLoopExitNode(Node* node) {
+ return node->opcode() == IrOpcode::kLoopExit ||
+ node->opcode() == IrOpcode::kLoopExitValue ||
+ node->opcode() == IrOpcode::kLoopExitEffect;
+ }
+
+ bool IsBackedge(Node* use, int index) {
if (LoopNum(use) <= 0) return false;
- if (edge.index() == kAssumedLoopEntryIndex) return false;
if (NodeProperties::IsPhi(use)) {
- return !NodeProperties::IsControlEdge(edge);
+ return index != NodeProperties::FirstControlIndex(use) &&
+ index != kAssumedLoopEntryIndex;
+ } else if (use->opcode() == IrOpcode::kLoop) {
+ return index != kAssumedLoopEntryIndex;
}
- return true;
+ DCHECK(IsLoopExitNode(use));
+ return false;
}
int LoopNum(Node* node) { return loop_tree_->node_to_loop_num_[node->id()]; }
@@ -307,6 +350,22 @@ class LoopFinderImpl {
}
}
+ void AddNodeToLoop(NodeInfo* node_info, LoopInfo* loop, int loop_num) {
+ if (LoopNum(node_info->node) == loop_num) {
+ if (IsLoopHeaderNode(node_info->node)) {
+ node_info->next = loop->header_list;
+ loop->header_list = node_info;
+ } else {
+ DCHECK(IsLoopExitNode(node_info->node));
+ node_info->next = loop->exit_list;
+ loop->exit_list = node_info;
+ }
+ } else {
+ node_info->next = loop->body_list;
+ loop->body_list = node_info;
+ }
+ }
+
void FinishLoopTree() {
DCHECK(loops_found_ == static_cast<int>(loops_.size()));
DCHECK(loops_found_ == static_cast<int>(loop_tree_->all_loops_.size()));
@@ -342,13 +401,7 @@ class LoopFinderImpl {
}
}
if (innermost == nullptr) continue;
- if (LoopNum(ni.node) == innermost_index) {
- ni.next = innermost->header_list;
- innermost->header_list = &ni;
- } else {
- ni.next = innermost->body_list;
- innermost->body_list = &ni;
- }
+ AddNodeToLoop(&ni, innermost, innermost_index);
count++;
}
@@ -368,13 +421,7 @@ class LoopFinderImpl {
size_t count = 0;
for (NodeInfo& ni : info_) {
if (ni.node == nullptr || !IsInLoop(ni.node, 1)) continue;
- if (LoopNum(ni.node) == 1) {
- ni.next = li->header_list;
- li->header_list = &ni;
- } else {
- ni.next = li->body_list;
- li->body_list = &ni;
- }
+ AddNodeToLoop(&ni, li, 1);
count++;
}
@@ -406,7 +453,14 @@ class LoopFinderImpl {
// Serialize nested loops.
for (LoopTree::Loop* child : loop->children_) SerializeLoop(child);
- loop->body_end_ = static_cast<int>(loop_tree_->loop_nodes_.size());
+ // Serialize the exits.
+ loop->exits_start_ = static_cast<int>(loop_tree_->loop_nodes_.size());
+ for (NodeInfo* ni = li.exit_list; ni != nullptr; ni = ni->next) {
+ loop_tree_->loop_nodes_.push_back(ni->node);
+ loop_tree_->node_to_loop_num_[ni->node->id()] = loop_num;
+ }
+
+ loop->exits_end_ = static_cast<int>(loop_tree_->loop_nodes_.size());
}
// Connect the LoopTree loops to their parents recursively.
@@ -438,9 +492,12 @@ class LoopFinderImpl {
while (i < loop->body_start_) {
PrintF(" H#%d", loop_tree_->loop_nodes_[i++]->id());
}
- while (i < loop->body_end_) {
+ while (i < loop->exits_start_) {
PrintF(" B#%d", loop_tree_->loop_nodes_[i++]->id());
}
+ while (i < loop->exits_end_) {
+ PrintF(" E#%d", loop_tree_->loop_nodes_[i++]->id());
+ }
PrintF("\n");
for (LoopTree::Loop* child : loop->children_) PrintLoop(child);
}
@@ -452,7 +509,7 @@ LoopTree* LoopFinder::BuildLoopTree(Graph* graph, Zone* zone) {
new (graph->zone()) LoopTree(graph->NodeCount(), graph->zone());
LoopFinderImpl finder(graph, loop_tree, zone);
finder.Run();
- if (FLAG_trace_turbo_graph) {
+ if (FLAG_trace_turbo_loop) {
finder.Print();
}
return loop_tree;
diff --git a/deps/v8/src/compiler/loop-analysis.h b/deps/v8/src/compiler/loop-analysis.h
index b8bc395acb..a8c3bca7d7 100644
--- a/deps/v8/src/compiler/loop-analysis.h
+++ b/deps/v8/src/compiler/loop-analysis.h
@@ -38,8 +38,9 @@ class LoopTree : public ZoneObject {
Loop* parent() const { return parent_; }
const ZoneVector<Loop*>& children() const { return children_; }
size_t HeaderSize() const { return body_start_ - header_start_; }
- size_t BodySize() const { return body_end_ - body_start_; }
- size_t TotalSize() const { return body_end_ - header_start_; }
+ size_t BodySize() const { return exits_start_ - body_start_; }
+ size_t ExitsSize() const { return exits_end_ - exits_start_; }
+ size_t TotalSize() const { return exits_end_ - header_start_; }
size_t depth() const { return static_cast<size_t>(depth_); }
private:
@@ -52,13 +53,15 @@ class LoopTree : public ZoneObject {
children_(zone),
header_start_(-1),
body_start_(-1),
- body_end_(-1) {}
+ exits_start_(-1),
+ exits_end_(-1) {}
Loop* parent_;
int depth_;
ZoneVector<Loop*> children_;
int header_start_;
int body_start_;
- int body_end_;
+ int exits_start_;
+ int exits_end_;
};
// Return the innermost nested loop, if any, that contains {node}.
@@ -97,13 +100,19 @@ class LoopTree : public ZoneObject {
// Return a range which can iterate over the body nodes of {loop}.
NodeRange BodyNodes(Loop* loop) {
return NodeRange(&loop_nodes_[0] + loop->body_start_,
- &loop_nodes_[0] + loop->body_end_);
+ &loop_nodes_[0] + loop->exits_start_);
+ }
+
+ // Return a range which can iterate over the body nodes of {loop}.
+ NodeRange ExitNodes(Loop* loop) {
+ return NodeRange(&loop_nodes_[0] + loop->exits_start_,
+ &loop_nodes_[0] + loop->exits_end_);
}
// Return a range which can iterate over the nodes of {loop}.
NodeRange LoopNodes(Loop* loop) {
return NodeRange(&loop_nodes_[0] + loop->header_start_,
- &loop_nodes_[0] + loop->body_end_);
+ &loop_nodes_[0] + loop->exits_end_);
}
// Return the node that represents the control, i.e. the loop node itself.
diff --git a/deps/v8/src/compiler/loop-peeling.cc b/deps/v8/src/compiler/loop-peeling.cc
index 53795961b3..9535df54ad 100644
--- a/deps/v8/src/compiler/loop-peeling.cc
+++ b/deps/v8/src/compiler/loop-peeling.cc
@@ -126,8 +126,14 @@ struct Peeling {
// Copy all the nodes first.
for (Node* node : nodes) {
inputs.clear();
- for (Node* input : node->inputs()) inputs.push_back(map(input));
- Insert(node, graph->NewNode(node->op(), node->InputCount(), &inputs[0]));
+ for (Node* input : node->inputs()) {
+ inputs.push_back(map(input));
+ }
+ Node* copy = graph->NewNode(node->op(), node->InputCount(), &inputs[0]);
+ if (NodeProperties::IsTyped(node)) {
+ NodeProperties::SetType(copy, NodeProperties::GetType(node));
+ }
+ Insert(node, copy);
}
// Fix remaining inputs of the copies.
@@ -160,56 +166,54 @@ Node* PeeledIteration::map(Node* node) {
return node;
}
-
-static void FindLoopExits(LoopTree* loop_tree, LoopTree::Loop* loop,
- NodeVector& exits, NodeVector& rets) {
+bool LoopPeeler::CanPeel(LoopTree* loop_tree, LoopTree::Loop* loop) {
// Look for returns and if projections that are outside the loop but whose
// control input is inside the loop.
+ Node* loop_node = loop_tree->GetLoopControl(loop);
for (Node* node : loop_tree->LoopNodes(loop)) {
for (Node* use : node->uses()) {
if (!loop_tree->Contains(loop, use)) {
- if (IrOpcode::IsIfProjectionOpcode(use->opcode())) {
- // This is a branch from inside the loop to outside the loop.
- exits.push_back(use);
- } else if (use->opcode() == IrOpcode::kReturn &&
- loop_tree->Contains(loop,
- NodeProperties::GetControlInput(use))) {
- // This is a return from inside the loop.
- rets.push_back(use);
+ bool unmarked_exit;
+ switch (node->opcode()) {
+ case IrOpcode::kLoopExit:
+ unmarked_exit = (node->InputAt(1) != loop_node);
+ break;
+ case IrOpcode::kLoopExitValue:
+ case IrOpcode::kLoopExitEffect:
+ unmarked_exit = (node->InputAt(1)->InputAt(1) != loop_node);
+ break;
+ default:
+ unmarked_exit = (use->opcode() != IrOpcode::kTerminate);
+ }
+ if (unmarked_exit) {
+ if (FLAG_trace_turbo_loop) {
+ Node* loop_node = loop_tree->GetLoopControl(loop);
+ PrintF(
+ "Cannot peel loop %i. Loop exit without explicit mark: Node %i "
+ "(%s) is inside "
+ "loop, but its use %i (%s) is outside.\n",
+ loop_node->id(), node->id(), node->op()->mnemonic(), use->id(),
+ use->op()->mnemonic());
+ }
+ return false;
}
}
}
}
-}
-
-
-bool LoopPeeler::CanPeel(LoopTree* loop_tree, LoopTree::Loop* loop) {
- Zone zone(loop_tree->zone()->allocator());
- NodeVector exits(&zone);
- NodeVector rets(&zone);
- FindLoopExits(loop_tree, loop, exits, rets);
- return exits.size() <= 1u;
+ return true;
}
PeeledIteration* LoopPeeler::Peel(Graph* graph, CommonOperatorBuilder* common,
LoopTree* loop_tree, LoopTree::Loop* loop,
Zone* tmp_zone) {
- //============================================================================
- // Find the loop exit region to determine if this loop can be peeled.
- //============================================================================
- NodeVector exits(tmp_zone);
- NodeVector rets(tmp_zone);
- FindLoopExits(loop_tree, loop, exits, rets);
-
- if (exits.size() != 1) return nullptr; // not peelable currently.
+ if (!CanPeel(loop_tree, loop)) return nullptr;
//============================================================================
// Construct the peeled iteration.
//============================================================================
PeeledIterationImpl* iter = new (tmp_zone) PeeledIterationImpl(tmp_zone);
- size_t estimated_peeled_size =
- 5 + (loop->TotalSize() + exits.size() + rets.size()) * 2;
+ size_t estimated_peeled_size = 5 + (loop->TotalSize()) * 2;
Peeling peeling(graph, tmp_zone, estimated_peeled_size, &iter->node_pairs_);
Node* dead = graph->NewNode(common->Dead());
@@ -260,73 +264,126 @@ PeeledIteration* LoopPeeler::Peel(Graph* graph, CommonOperatorBuilder* common,
// Only one backedge, simply replace the input to loop with output of
// peeling.
for (Node* node : loop_tree->HeaderNodes(loop)) {
- node->ReplaceInput(0, peeling.map(node->InputAt(0)));
+ node->ReplaceInput(0, peeling.map(node->InputAt(1)));
}
new_entry = peeling.map(loop_node->InputAt(1));
}
loop_node->ReplaceInput(0, new_entry);
//============================================================================
- // Duplicate the loop exit region and add a merge.
+ // Change the exit and exit markers to merge/phi/effect-phi.
//============================================================================
+ for (Node* exit : loop_tree->ExitNodes(loop)) {
+ switch (exit->opcode()) {
+ case IrOpcode::kLoopExit:
+ // Change the loop exit node to a merge node.
+ exit->ReplaceInput(1, peeling.map(exit->InputAt(0)));
+ NodeProperties::ChangeOp(exit, common->Merge(2));
+ break;
+ case IrOpcode::kLoopExitValue:
+ // Change exit marker to phi.
+ exit->InsertInput(graph->zone(), 1, peeling.map(exit->InputAt(0)));
+ NodeProperties::ChangeOp(
+ exit, common->Phi(MachineRepresentation::kTagged, 2));
+ break;
+ case IrOpcode::kLoopExitEffect:
+ // Change effect exit marker to effect phi.
+ exit->InsertInput(graph->zone(), 1, peeling.map(exit->InputAt(0)));
+ NodeProperties::ChangeOp(exit, common->EffectPhi(2));
+ break;
+ default:
+ break;
+ }
+ }
+ return iter;
+}
+
+namespace {
- // Currently we are limited to peeling loops with a single exit. The exit is
- // the postdominator of the loop (ignoring returns).
- Node* postdom = exits[0];
- for (Node* node : rets) exits.push_back(node);
- for (Node* use : postdom->uses()) {
- if (NodeProperties::IsPhi(use)) exits.push_back(use);
+void PeelInnerLoops(Graph* graph, CommonOperatorBuilder* common,
+ LoopTree* loop_tree, LoopTree::Loop* loop,
+ Zone* temp_zone) {
+ // If the loop has nested loops, peel inside those.
+ if (!loop->children().empty()) {
+ for (LoopTree::Loop* inner_loop : loop->children()) {
+ PeelInnerLoops(graph, common, loop_tree, inner_loop, temp_zone);
+ }
+ return;
+ }
+ // Only peel small-enough loops.
+ if (loop->TotalSize() > LoopPeeler::kMaxPeeledNodes) return;
+ if (FLAG_trace_turbo_loop) {
+ PrintF("Peeling loop with header: ");
+ for (Node* node : loop_tree->HeaderNodes(loop)) {
+ PrintF("%i ", node->id());
+ }
+ PrintF("\n");
}
- NodeRange exit_range(&exits[0], &exits[0] + exits.size());
- peeling.CopyNodes(graph, tmp_zone, dead, exit_range);
-
- Node* merge = graph->NewNode(common->Merge(2), postdom, peeling.map(postdom));
- postdom->ReplaceUses(merge);
- merge->ReplaceInput(0, postdom); // input 0 overwritten by above line.
-
- // Find and update all the edges into either the loop or exit region.
- for (int i = 0; i < 2; i++) {
- NodeRange range = i == 0 ? loop_tree->LoopNodes(loop) : exit_range;
- ZoneVector<Edge> value_edges(tmp_zone);
- ZoneVector<Edge> effect_edges(tmp_zone);
-
- for (Node* node : range) {
- // Gather value and effect edges from outside the region.
- for (Edge edge : node->use_edges()) {
- if (!peeling.Marked(edge.from())) {
- // Edge from outside the loop into the region.
- if (NodeProperties::IsValueEdge(edge) ||
- NodeProperties::IsContextEdge(edge)) {
- value_edges.push_back(edge);
- } else if (NodeProperties::IsEffectEdge(edge)) {
- effect_edges.push_back(edge);
- } else {
- // don't do anything for control edges.
- // TODO(titzer): should update control edges to peeled?
- }
- }
+ LoopPeeler::Peel(graph, common, loop_tree, loop, temp_zone);
+}
+
+void EliminateLoopExit(Node* node) {
+ DCHECK_EQ(IrOpcode::kLoopExit, node->opcode());
+ // The exit markers take the loop exit as input. We iterate over uses
+ // and remove all the markers from the graph.
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsControlEdge(edge)) {
+ Node* marker = edge.from();
+ if (marker->opcode() == IrOpcode::kLoopExitValue) {
+ NodeProperties::ReplaceUses(marker, marker->InputAt(0));
+ marker->Kill();
+ } else if (marker->opcode() == IrOpcode::kLoopExitEffect) {
+ NodeProperties::ReplaceUses(marker, nullptr,
+ NodeProperties::GetEffectInput(marker));
+ marker->Kill();
}
+ }
+ }
+ NodeProperties::ReplaceUses(node, nullptr, nullptr,
+ NodeProperties::GetControlInput(node, 0));
+ node->Kill();
+}
+
+} // namespace
- // Update all the value and effect edges at once.
- if (!value_edges.empty()) {
- // TODO(titzer): machine type is wrong here.
- Node* phi =
- graph->NewNode(common->Phi(MachineRepresentation::kTagged, 2), node,
- peeling.map(node), merge);
- for (Edge edge : value_edges) edge.UpdateTo(phi);
- value_edges.clear();
+// static
+void LoopPeeler::PeelInnerLoopsOfTree(Graph* graph,
+ CommonOperatorBuilder* common,
+ LoopTree* loop_tree, Zone* temp_zone) {
+ for (LoopTree::Loop* loop : loop_tree->outer_loops()) {
+ PeelInnerLoops(graph, common, loop_tree, loop, temp_zone);
+ }
+
+ EliminateLoopExits(graph, temp_zone);
+}
+
+// static
+void LoopPeeler::EliminateLoopExits(Graph* graph, Zone* temp_zone) {
+ ZoneQueue<Node*> queue(temp_zone);
+ ZoneVector<bool> visited(graph->NodeCount(), false, temp_zone);
+ queue.push(graph->end());
+ while (!queue.empty()) {
+ Node* node = queue.front();
+ queue.pop();
+
+ if (node->opcode() == IrOpcode::kLoopExit) {
+ Node* control = NodeProperties::GetControlInput(node);
+ EliminateLoopExit(node);
+ if (!visited[control->id()]) {
+ visited[control->id()] = true;
+ queue.push(control);
}
- if (!effect_edges.empty()) {
- Node* effect_phi = graph->NewNode(common->EffectPhi(2), node,
- peeling.map(node), merge);
- for (Edge edge : effect_edges) edge.UpdateTo(effect_phi);
- effect_edges.clear();
+ } else {
+ for (int i = 0; i < node->op()->ControlInputCount(); i++) {
+ Node* control = NodeProperties::GetControlInput(node, i);
+ if (!visited[control->id()]) {
+ visited[control->id()] = true;
+ queue.push(control);
+ }
}
}
}
-
- return iter;
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/loop-peeling.h b/deps/v8/src/compiler/loop-peeling.h
index ea963b0f9c..8b38e2575c 100644
--- a/deps/v8/src/compiler/loop-peeling.h
+++ b/deps/v8/src/compiler/loop-peeling.h
@@ -33,6 +33,11 @@ class LoopPeeler {
static PeeledIteration* Peel(Graph* graph, CommonOperatorBuilder* common,
LoopTree* loop_tree, LoopTree::Loop* loop,
Zone* tmp_zone);
+ static void PeelInnerLoopsOfTree(Graph* graph, CommonOperatorBuilder* common,
+ LoopTree* loop_tree, Zone* tmp_zone);
+
+ static void EliminateLoopExits(Graph* graph, Zone* temp_zone);
+ static const size_t kMaxPeeledNodes = 1000;
};
diff --git a/deps/v8/src/compiler/loop-variable-optimizer.cc b/deps/v8/src/compiler/loop-variable-optimizer.cc
new file mode 100644
index 0000000000..8331963a7d
--- /dev/null
+++ b/deps/v8/src/compiler/loop-variable-optimizer.cc
@@ -0,0 +1,406 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/loop-variable-optimizer.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/node-marker.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
+#include "src/zone-containers.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Macro for outputting trace information from representation inference.
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_turbo_loop) PrintF(__VA_ARGS__); \
+ } while (false)
+
+LoopVariableOptimizer::LoopVariableOptimizer(Graph* graph,
+ CommonOperatorBuilder* common,
+ Zone* zone)
+ : graph_(graph),
+ common_(common),
+ zone_(zone),
+ limits_(zone),
+ induction_vars_(zone) {}
+
+void LoopVariableOptimizer::Run() {
+ ZoneQueue<Node*> queue(zone());
+ queue.push(graph()->start());
+ NodeMarker<bool> queued(graph(), 2);
+ while (!queue.empty()) {
+ Node* node = queue.front();
+ queue.pop();
+ queued.Set(node, false);
+
+ DCHECK(limits_.find(node->id()) == limits_.end());
+ bool all_inputs_visited = true;
+ int inputs_end = (node->opcode() == IrOpcode::kLoop)
+ ? kFirstBackedge
+ : node->op()->ControlInputCount();
+ for (int i = 0; i < inputs_end; i++) {
+ auto input = limits_.find(NodeProperties::GetControlInput(node, i)->id());
+ if (input == limits_.end()) {
+ all_inputs_visited = false;
+ break;
+ }
+ }
+ if (!all_inputs_visited) continue;
+
+ VisitNode(node);
+ DCHECK(limits_.find(node->id()) != limits_.end());
+
+ // Queue control outputs.
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsControlEdge(edge) &&
+ edge.from()->op()->ControlOutputCount() > 0) {
+ Node* use = edge.from();
+ if (use->opcode() == IrOpcode::kLoop &&
+ edge.index() != kAssumedLoopEntryIndex) {
+ VisitBackedge(node, use);
+ } else if (!queued.Get(use)) {
+ queue.push(use);
+ queued.Set(use, true);
+ }
+ }
+ }
+ }
+}
+
+class LoopVariableOptimizer::Constraint : public ZoneObject {
+ public:
+ InductionVariable::ConstraintKind kind() const { return kind_; }
+ Node* left() const { return left_; }
+ Node* right() const { return right_; }
+
+ const Constraint* next() const { return next_; }
+
+ Constraint(Node* left, InductionVariable::ConstraintKind kind, Node* right,
+ const Constraint* next)
+ : left_(left), right_(right), kind_(kind), next_(next) {}
+
+ private:
+ Node* left_;
+ Node* right_;
+ InductionVariable::ConstraintKind kind_;
+ const Constraint* next_;
+};
+
+class LoopVariableOptimizer::VariableLimits : public ZoneObject {
+ public:
+ static VariableLimits* Empty(Zone* zone) {
+ return new (zone) VariableLimits();
+ }
+
+ VariableLimits* Copy(Zone* zone) const {
+ return new (zone) VariableLimits(this);
+ }
+
+ void Add(Node* left, InductionVariable::ConstraintKind kind, Node* right,
+ Zone* zone) {
+ head_ = new (zone) Constraint(left, kind, right, head_);
+ limit_count_++;
+ }
+
+ void Merge(const VariableLimits* other) {
+ // Change the current condition list to a longest common tail
+ // of this condition list and the other list. (The common tail
+ // should correspond to the list from the common dominator.)
+
+ // First, we throw away the prefix of the longer list, so that
+ // we have lists of the same length.
+ size_t other_size = other->limit_count_;
+ const Constraint* other_limit = other->head_;
+ while (other_size > limit_count_) {
+ other_limit = other_limit->next();
+ other_size--;
+ }
+ while (limit_count_ > other_size) {
+ head_ = head_->next();
+ limit_count_--;
+ }
+
+ // Then we go through both lists in lock-step until we find
+ // the common tail.
+ while (head_ != other_limit) {
+ DCHECK(limit_count_ > 0);
+ limit_count_--;
+ other_limit = other_limit->next();
+ head_ = head_->next();
+ }
+ }
+
+ const Constraint* head() const { return head_; }
+
+ private:
+ VariableLimits() {}
+ explicit VariableLimits(const VariableLimits* other)
+ : head_(other->head_), limit_count_(other->limit_count_) {}
+
+ const Constraint* head_ = nullptr;
+ size_t limit_count_ = 0;
+};
+
+void InductionVariable::AddUpperBound(Node* bound,
+ InductionVariable::ConstraintKind kind) {
+ if (FLAG_trace_turbo_loop) {
+ OFStream os(stdout);
+ os << "New upper bound for " << phi()->id() << " (loop "
+ << NodeProperties::GetControlInput(phi())->id() << "): " << *bound
+ << std::endl;
+ }
+ upper_bounds_.push_back(Bound(bound, kind));
+}
+
+void InductionVariable::AddLowerBound(Node* bound,
+ InductionVariable::ConstraintKind kind) {
+ if (FLAG_trace_turbo_loop) {
+ OFStream os(stdout);
+ os << "New lower bound for " << phi()->id() << " (loop "
+ << NodeProperties::GetControlInput(phi())->id() << "): " << *bound;
+ }
+ lower_bounds_.push_back(Bound(bound, kind));
+}
+
+void LoopVariableOptimizer::VisitBackedge(Node* from, Node* loop) {
+ if (loop->op()->ControlInputCount() != 2) return;
+
+ // Go through the constraints, and update the induction variables in
+ // this loop if they are involved in the constraint.
+ const VariableLimits* limits = limits_[from->id()];
+ for (const Constraint* constraint = limits->head(); constraint != nullptr;
+ constraint = constraint->next()) {
+ if (constraint->left()->opcode() == IrOpcode::kPhi &&
+ NodeProperties::GetControlInput(constraint->left()) == loop) {
+ auto var = induction_vars_.find(constraint->left()->id());
+ if (var != induction_vars_.end()) {
+ var->second->AddUpperBound(constraint->right(), constraint->kind());
+ }
+ }
+ if (constraint->right()->opcode() == IrOpcode::kPhi &&
+ NodeProperties::GetControlInput(constraint->right()) == loop) {
+ auto var = induction_vars_.find(constraint->right()->id());
+ if (var != induction_vars_.end()) {
+ var->second->AddLowerBound(constraint->left(), constraint->kind());
+ }
+ }
+ }
+}
+
+void LoopVariableOptimizer::VisitNode(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kMerge:
+ return VisitMerge(node);
+ case IrOpcode::kLoop:
+ return VisitLoop(node);
+ case IrOpcode::kIfFalse:
+ return VisitIf(node, false);
+ case IrOpcode::kIfTrue:
+ return VisitIf(node, true);
+ case IrOpcode::kStart:
+ return VisitStart(node);
+ case IrOpcode::kLoopExit:
+ return VisitLoopExit(node);
+ default:
+ return VisitOtherControl(node);
+ }
+}
+
+void LoopVariableOptimizer::VisitMerge(Node* node) {
+ // Merge the limits of all incoming edges.
+ VariableLimits* merged = limits_[node->InputAt(0)->id()]->Copy(zone());
+ for (int i = 1; i < node->InputCount(); i++) {
+ merged->Merge(limits_[node->InputAt(i)->id()]);
+ }
+ limits_[node->id()] = merged;
+}
+
+void LoopVariableOptimizer::VisitLoop(Node* node) {
+ DetectInductionVariables(node);
+ // Conservatively take the limits from the loop entry here.
+ return TakeConditionsFromFirstControl(node);
+}
+
+void LoopVariableOptimizer::VisitIf(Node* node, bool polarity) {
+ Node* branch = node->InputAt(0);
+ Node* cond = branch->InputAt(0);
+ VariableLimits* limits = limits_[branch->id()]->Copy(zone());
+ // Normalize to less than comparison.
+ switch (cond->opcode()) {
+ case IrOpcode::kJSLessThan:
+ AddCmpToLimits(limits, cond, InductionVariable::kStrict, polarity);
+ break;
+ case IrOpcode::kJSGreaterThan:
+ AddCmpToLimits(limits, cond, InductionVariable::kNonStrict, !polarity);
+ break;
+ case IrOpcode::kJSLessThanOrEqual:
+ AddCmpToLimits(limits, cond, InductionVariable::kNonStrict, polarity);
+ break;
+ case IrOpcode::kJSGreaterThanOrEqual:
+ AddCmpToLimits(limits, cond, InductionVariable::kStrict, !polarity);
+ break;
+ default:
+ break;
+ }
+ limits_[node->id()] = limits;
+}
+
+void LoopVariableOptimizer::AddCmpToLimits(
+ VariableLimits* limits, Node* node, InductionVariable::ConstraintKind kind,
+ bool polarity) {
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (FindInductionVariable(left) || FindInductionVariable(right)) {
+ if (polarity) {
+ limits->Add(left, kind, right, zone());
+ } else {
+ kind = (kind == InductionVariable::kStrict)
+ ? InductionVariable::kNonStrict
+ : InductionVariable::kStrict;
+ limits->Add(right, kind, left, zone());
+ }
+ }
+}
+
+void LoopVariableOptimizer::VisitStart(Node* node) {
+ limits_[node->id()] = VariableLimits::Empty(zone());
+}
+
+void LoopVariableOptimizer::VisitLoopExit(Node* node) {
+ return TakeConditionsFromFirstControl(node);
+}
+
+void LoopVariableOptimizer::VisitOtherControl(Node* node) {
+ DCHECK_EQ(1, node->op()->ControlInputCount());
+ return TakeConditionsFromFirstControl(node);
+}
+
+void LoopVariableOptimizer::TakeConditionsFromFirstControl(Node* node) {
+ const VariableLimits* limits =
+ limits_[NodeProperties::GetControlInput(node, 0)->id()];
+ DCHECK_NOT_NULL(limits);
+ limits_[node->id()] = limits;
+}
+
+const InductionVariable* LoopVariableOptimizer::FindInductionVariable(
+ Node* node) {
+ auto var = induction_vars_.find(node->id());
+ if (var != induction_vars_.end()) {
+ return var->second;
+ }
+ return nullptr;
+}
+
+InductionVariable* LoopVariableOptimizer::TryGetInductionVariable(Node* phi) {
+ DCHECK_EQ(2, phi->op()->ValueInputCount());
+ DCHECK_EQ(IrOpcode::kLoop, NodeProperties::GetControlInput(phi)->opcode());
+ Node* initial = phi->InputAt(0);
+ Node* arith = phi->InputAt(1);
+ InductionVariable::ArithmeticType arithmeticType;
+ if (arith->opcode() == IrOpcode::kJSAdd) {
+ arithmeticType = InductionVariable::ArithmeticType::kAddition;
+ } else if (arith->opcode() == IrOpcode::kJSSubtract) {
+ arithmeticType = InductionVariable::ArithmeticType::kSubtraction;
+ } else {
+ return nullptr;
+ }
+
+ // TODO(jarin) Support both sides.
+ if (arith->InputAt(0) != phi) {
+ if (arith->InputAt(0)->opcode() != IrOpcode::kJSToNumber ||
+ arith->InputAt(0)->InputAt(0) != phi) {
+ return nullptr;
+ }
+ }
+ Node* incr = arith->InputAt(1);
+ return new (zone())
+ InductionVariable(phi, arith, incr, initial, zone(), arithmeticType);
+}
+
+void LoopVariableOptimizer::DetectInductionVariables(Node* loop) {
+ if (loop->op()->ControlInputCount() != 2) return;
+ TRACE("Loop variables for loop %i:", loop->id());
+ for (Edge edge : loop->use_edges()) {
+ if (NodeProperties::IsControlEdge(edge) &&
+ edge.from()->opcode() == IrOpcode::kPhi) {
+ Node* phi = edge.from();
+ InductionVariable* induction_var = TryGetInductionVariable(phi);
+ if (induction_var) {
+ induction_vars_[phi->id()] = induction_var;
+ TRACE(" %i", induction_var->phi()->id());
+ }
+ }
+ }
+ TRACE("\n");
+}
+
+void LoopVariableOptimizer::ChangeToInductionVariablePhis() {
+ for (auto entry : induction_vars_) {
+ // It only make sense to analyze the induction variables if
+ // there is a bound.
+ InductionVariable* induction_var = entry.second;
+ DCHECK_EQ(MachineRepresentation::kTagged,
+ PhiRepresentationOf(induction_var->phi()->op()));
+ if (induction_var->upper_bounds().size() == 0 &&
+ induction_var->lower_bounds().size() == 0) {
+ continue;
+ }
+ // Insert the increment value to the value inputs.
+ induction_var->phi()->InsertInput(graph()->zone(),
+ induction_var->phi()->InputCount() - 1,
+ induction_var->increment());
+ // Insert the bound inputs to the value inputs.
+ for (auto bound : induction_var->lower_bounds()) {
+ induction_var->phi()->InsertInput(
+ graph()->zone(), induction_var->phi()->InputCount() - 1, bound.bound);
+ }
+ for (auto bound : induction_var->upper_bounds()) {
+ induction_var->phi()->InsertInput(
+ graph()->zone(), induction_var->phi()->InputCount() - 1, bound.bound);
+ }
+ NodeProperties::ChangeOp(
+ induction_var->phi(),
+ common()->InductionVariablePhi(induction_var->phi()->InputCount() - 1));
+ }
+}
+
+void LoopVariableOptimizer::ChangeToPhisAndInsertGuards() {
+ for (auto entry : induction_vars_) {
+ InductionVariable* induction_var = entry.second;
+ if (induction_var->phi()->opcode() == IrOpcode::kInductionVariablePhi) {
+ // Turn the induction variable phi back to normal phi.
+ int value_count = 2;
+ Node* control = NodeProperties::GetControlInput(induction_var->phi());
+ DCHECK_EQ(value_count, control->op()->ControlInputCount());
+ induction_var->phi()->TrimInputCount(value_count + 1);
+ induction_var->phi()->ReplaceInput(value_count, control);
+ NodeProperties::ChangeOp(
+ induction_var->phi(),
+ common()->Phi(MachineRepresentation::kTagged, value_count));
+
+ // If the backedge is not a subtype of the phi's type, we insert a sigma
+ // to get the typing right.
+ Node* backedge_value = induction_var->phi()->InputAt(1);
+ Type* backedge_type = NodeProperties::GetType(backedge_value);
+ Type* phi_type = NodeProperties::GetType(induction_var->phi());
+ if (!backedge_type->Is(phi_type)) {
+ Node* backedge_control =
+ NodeProperties::GetControlInput(induction_var->phi())->InputAt(1);
+ Node* rename = graph()->NewNode(common()->TypeGuard(phi_type),
+ backedge_value, backedge_control);
+ induction_var->phi()->ReplaceInput(1, rename);
+ }
+ }
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/loop-variable-optimizer.h b/deps/v8/src/compiler/loop-variable-optimizer.h
new file mode 100644
index 0000000000..a5c1ad448d
--- /dev/null
+++ b/deps/v8/src/compiler/loop-variable-optimizer.h
@@ -0,0 +1,117 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_LOOP_VARIABLE_OPTIMIZER_H_
+#define V8_COMPILER_LOOP_VARIABLE_OPTIMIZER_H_
+
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class CommonOperatorBuilder;
+class Graph;
+class Node;
+
+class InductionVariable : public ZoneObject {
+ public:
+ Node* phi() const { return phi_; }
+ Node* arith() const { return arith_; }
+ Node* increment() const { return increment_; }
+ Node* init_value() const { return init_value_; }
+
+ enum ConstraintKind { kStrict, kNonStrict };
+ enum ArithmeticType { kAddition, kSubtraction };
+ struct Bound {
+ Bound(Node* bound, ConstraintKind kind) : bound(bound), kind(kind) {}
+
+ Node* bound;
+ ConstraintKind kind;
+ };
+
+ const ZoneVector<Bound>& lower_bounds() { return lower_bounds_; }
+ const ZoneVector<Bound>& upper_bounds() { return upper_bounds_; }
+
+ ArithmeticType Type() { return arithmeticType_; }
+
+ private:
+ friend class LoopVariableOptimizer;
+
+ InductionVariable(Node* phi, Node* arith, Node* increment, Node* init_value,
+ Zone* zone, ArithmeticType arithmeticType)
+ : phi_(phi),
+ arith_(arith),
+ increment_(increment),
+ init_value_(init_value),
+ lower_bounds_(zone),
+ upper_bounds_(zone),
+ arithmeticType_(arithmeticType) {}
+
+ void AddUpperBound(Node* bound, ConstraintKind kind);
+ void AddLowerBound(Node* bound, ConstraintKind kind);
+
+ Node* phi_;
+ Node* arith_;
+ Node* increment_;
+ Node* init_value_;
+ ZoneVector<Bound> lower_bounds_;
+ ZoneVector<Bound> upper_bounds_;
+ ArithmeticType arithmeticType_;
+};
+
+class LoopVariableOptimizer {
+ public:
+ void Run();
+
+ LoopVariableOptimizer(Graph* graph, CommonOperatorBuilder* common,
+ Zone* zone);
+
+ const ZoneMap<int, InductionVariable*>& induction_variables() {
+ return induction_vars_;
+ }
+
+ void ChangeToInductionVariablePhis();
+ void ChangeToPhisAndInsertGuards();
+
+ private:
+ const int kAssumedLoopEntryIndex = 0;
+ const int kFirstBackedge = 1;
+
+ class Constraint;
+ class VariableLimits;
+
+ void VisitBackedge(Node* from, Node* loop);
+ void VisitNode(Node* node);
+ void VisitMerge(Node* node);
+ void VisitLoop(Node* node);
+ void VisitIf(Node* node, bool polarity);
+ void VisitStart(Node* node);
+ void VisitLoopExit(Node* node);
+ void VisitOtherControl(Node* node);
+
+ void AddCmpToLimits(VariableLimits* limits, Node* node,
+ InductionVariable::ConstraintKind kind, bool polarity);
+
+ void TakeConditionsFromFirstControl(Node* node);
+ const InductionVariable* FindInductionVariable(Node* node);
+ InductionVariable* TryGetInductionVariable(Node* phi);
+ void DetectInductionVariables(Node* loop);
+
+ Graph* graph() { return graph_; }
+ CommonOperatorBuilder* common() { return common_; }
+ Zone* zone() { return zone_; }
+
+ Graph* graph_;
+ CommonOperatorBuilder* common_;
+ Zone* zone_;
+ ZoneMap<int, const VariableLimits*> limits_;
+ ZoneMap<int, InductionVariable*> induction_vars_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_LOOP_VARIABLE_OPTIMIZER_H_
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 19ea062053..99044aa86d 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -6,6 +6,7 @@
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
+#include "src/base/ieee754.h"
#include "src/codegen.h"
#include "src/compiler/diamond.h"
#include "src/compiler/graph.h"
@@ -42,6 +43,20 @@ Node* MachineOperatorReducer::Int64Constant(int64_t value) {
return graph()->NewNode(common()->Int64Constant(value));
}
+Node* MachineOperatorReducer::Float64Mul(Node* lhs, Node* rhs) {
+ return graph()->NewNode(machine()->Float64Mul(), lhs, rhs);
+}
+
+Node* MachineOperatorReducer::Float64PowHalf(Node* value) {
+ value =
+ graph()->NewNode(machine()->Float64Add(), Float64Constant(0.0), value);
+ return graph()->NewNode(
+ common()->Select(MachineRepresentation::kFloat64, BranchHint::kFalse),
+ graph()->NewNode(machine()->Float64LessThanOrEqual(), value,
+ Float64Constant(-V8_INFINITY)),
+ Float64Constant(V8_INFINITY),
+ graph()->NewNode(machine()->Float64Sqrt(), value));
+}
Node* MachineOperatorReducer::Word32And(Node* lhs, Node* rhs) {
Node* const node = graph()->NewNode(machine()->Word32And(), lhs, rhs);
@@ -152,16 +167,16 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kWord32Shl:
return ReduceWord32Shl(node);
- case IrOpcode::kWord32Shr: {
- Uint32BinopMatcher m(node);
- if (m.right().Is(0)) return Replace(m.left().node()); // x >>> 0 => x
- if (m.IsFoldable()) { // K >>> K => K
- return ReplaceInt32(m.left().Value() >> m.right().Value());
- }
- return ReduceWord32Shifts(node);
- }
+ case IrOpcode::kWord64Shl:
+ return ReduceWord64Shl(node);
+ case IrOpcode::kWord32Shr:
+ return ReduceWord32Shr(node);
+ case IrOpcode::kWord64Shr:
+ return ReduceWord64Shr(node);
case IrOpcode::kWord32Sar:
return ReduceWord32Sar(node);
+ case IrOpcode::kWord64Sar:
+ return ReduceWord64Sar(node);
case IrOpcode::kWord32Ror: {
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x ror 0 => x
@@ -203,8 +218,12 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kInt32Add:
return ReduceInt32Add(node);
+ case IrOpcode::kInt64Add:
+ return ReduceInt64Add(node);
case IrOpcode::kInt32Sub:
return ReduceInt32Sub(node);
+ case IrOpcode::kInt64Sub:
+ return ReduceInt64Sub(node);
case IrOpcode::kInt32Mul: {
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.right().node()); // x * 0 => 0
@@ -226,6 +245,21 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
break;
}
+ case IrOpcode::kInt32MulWithOverflow: {
+ Int32BinopMatcher m(node);
+ if (m.right().Is(2)) {
+ node->ReplaceInput(1, m.left().node());
+ NodeProperties::ChangeOp(node, machine()->Int32AddWithOverflow());
+ return Changed(node);
+ }
+ if (m.right().Is(-1)) {
+ node->ReplaceInput(0, Int32Constant(0));
+ node->ReplaceInput(1, m.left().node());
+ NodeProperties::ChangeOp(node, machine()->Int32SubWithOverflow());
+ return Changed(node);
+ }
+ break;
+ }
case IrOpcode::kInt32Div:
return ReduceInt32Div(node);
case IrOpcode::kUint32Div:
@@ -239,19 +273,15 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
if (m.IsFoldable()) { // K < K => K
return ReplaceBool(m.left().Value() < m.right().Value());
}
- if (m.left().IsInt32Sub() && m.right().Is(0)) { // x - y < 0 => x < y
- Int32BinopMatcher msub(m.left().node());
- node->ReplaceInput(0, msub.left().node());
- node->ReplaceInput(1, msub.right().node());
- return Changed(node);
- }
- if (m.left().Is(0) && m.right().IsInt32Sub()) { // 0 < x - y => y < x
- Int32BinopMatcher msub(m.right().node());
- node->ReplaceInput(0, msub.right().node());
- node->ReplaceInput(1, msub.left().node());
- return Changed(node);
- }
if (m.LeftEqualsRight()) return ReplaceBool(false); // x < x => false
+ if (m.left().IsWord32Or() && m.right().Is(0)) {
+ // (x | K) < 0 => true or (K | x) < 0 => true iff K < 0
+ Int32BinopMatcher mleftmatcher(m.left().node());
+ if (mleftmatcher.left().IsNegative() ||
+ mleftmatcher.right().IsNegative()) {
+ return ReplaceBool(true);
+ }
+ }
break;
}
case IrOpcode::kInt32LessThanOrEqual: {
@@ -259,18 +289,6 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
if (m.IsFoldable()) { // K <= K => K
return ReplaceBool(m.left().Value() <= m.right().Value());
}
- if (m.left().IsInt32Sub() && m.right().Is(0)) { // x - y <= 0 => x <= y
- Int32BinopMatcher msub(m.left().node());
- node->ReplaceInput(0, msub.left().node());
- node->ReplaceInput(1, msub.right().node());
- return Changed(node);
- }
- if (m.left().Is(0) && m.right().IsInt32Sub()) { // 0 <= x - y => y <= x
- Int32BinopMatcher msub(m.right().node());
- node->ReplaceInput(0, msub.right().node());
- node->ReplaceInput(1, msub.left().node());
- return Changed(node);
- }
if (m.LeftEqualsRight()) return ReplaceBool(true); // x <= x => true
break;
}
@@ -309,6 +327,39 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
if (m.LeftEqualsRight()) return ReplaceBool(true); // x <= x => true
break;
}
+ case IrOpcode::kFloat32Sub: {
+ Float32BinopMatcher m(node);
+ if (m.right().Is(0) && (copysign(1.0, m.right().Value()) > 0)) {
+ return Replace(m.left().node()); // x - 0 => x
+ }
+ if (m.right().IsNaN()) { // x - NaN => NaN
+ return Replace(m.right().node());
+ }
+ if (m.left().IsNaN()) { // NaN - x => NaN
+ return Replace(m.left().node());
+ }
+ if (m.IsFoldable()) { // L - R => (L - R)
+ return ReplaceFloat32(m.left().Value() - m.right().Value());
+ }
+ if (m.left().IsMinusZero()) {
+ // -0.0 - round_down(-0.0 - R) => round_up(R)
+ if (machine()->Float32RoundUp().IsSupported() &&
+ m.right().IsFloat32RoundDown()) {
+ if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat32Sub) {
+ Float32BinopMatcher mright0(m.right().InputAt(0));
+ if (mright0.left().IsMinusZero()) {
+ return Replace(graph()->NewNode(machine()->Float32RoundUp().op(),
+ mright0.right().node()));
+ }
+ }
+ }
+ // -0.0 - R => -R
+ node->RemoveInput(0);
+ NodeProperties::ChangeOp(node, machine()->Float32Neg());
+ return Changed(node);
+ }
+ break;
+ }
case IrOpcode::kFloat64Add: {
Float64BinopMatcher m(node);
if (m.right().IsNaN()) { // x + NaN => NaN
@@ -330,9 +381,26 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
if (m.left().IsNaN()) { // NaN - x => NaN
return Replace(m.left().node());
}
- if (m.IsFoldable()) { // K - K => K
+ if (m.IsFoldable()) { // L - R => (L - R)
return ReplaceFloat64(m.left().Value() - m.right().Value());
}
+ if (m.left().IsMinusZero()) {
+ // -0.0 - round_down(-0.0 - R) => round_up(R)
+ if (machine()->Float64RoundUp().IsSupported() &&
+ m.right().IsFloat64RoundDown()) {
+ if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub) {
+ Float64BinopMatcher mright0(m.right().InputAt(0));
+ if (mright0.left().IsMinusZero()) {
+ return Replace(graph()->NewNode(machine()->Float64RoundUp().op(),
+ mright0.right().node()));
+ }
+ }
+ }
+ // -0.0 - R => -R
+ node->RemoveInput(0);
+ NodeProperties::ChangeOp(node, machine()->Float64Neg());
+ return Changed(node);
+ }
break;
}
case IrOpcode::kFloat64Mul: {
@@ -382,6 +450,141 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
break;
}
+ case IrOpcode::kFloat64Acos: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::acos(m.Value()));
+ break;
+ }
+ case IrOpcode::kFloat64Acosh: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::acosh(m.Value()));
+ break;
+ }
+ case IrOpcode::kFloat64Asin: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::asin(m.Value()));
+ break;
+ }
+ case IrOpcode::kFloat64Asinh: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::asinh(m.Value()));
+ break;
+ }
+ case IrOpcode::kFloat64Atan: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::atan(m.Value()));
+ break;
+ }
+ case IrOpcode::kFloat64Atanh: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::atanh(m.Value()));
+ break;
+ }
+ case IrOpcode::kFloat64Atan2: {
+ Float64BinopMatcher m(node);
+ if (m.right().IsNaN()) {
+ return Replace(m.right().node());
+ }
+ if (m.left().IsNaN()) {
+ return Replace(m.left().node());
+ }
+ if (m.IsFoldable()) {
+ return ReplaceFloat64(
+ base::ieee754::atan2(m.left().Value(), m.right().Value()));
+ }
+ break;
+ }
+ case IrOpcode::kFloat64Cbrt: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::cbrt(m.Value()));
+ break;
+ }
+ case IrOpcode::kFloat64Cos: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::cos(m.Value()));
+ break;
+ }
+ case IrOpcode::kFloat64Cosh: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::cosh(m.Value()));
+ break;
+ }
+ case IrOpcode::kFloat64Exp: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::exp(m.Value()));
+ break;
+ }
+ case IrOpcode::kFloat64Expm1: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::expm1(m.Value()));
+ break;
+ }
+ case IrOpcode::kFloat64Log: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::log(m.Value()));
+ break;
+ }
+ case IrOpcode::kFloat64Log1p: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::log1p(m.Value()));
+ break;
+ }
+ case IrOpcode::kFloat64Log10: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::log10(m.Value()));
+ break;
+ }
+ case IrOpcode::kFloat64Log2: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::log2(m.Value()));
+ break;
+ }
+ case IrOpcode::kFloat64Pow: {
+ Float64BinopMatcher m(node);
+ // TODO(bmeurer): Constant fold once we have a unified pow implementation.
+ if (m.right().Is(0.0)) { // x ** +-0.0 => 1.0
+ return ReplaceFloat64(1.0);
+ } else if (m.right().Is(-2.0)) { // x ** -2.0 => 1 / (x * x)
+ node->ReplaceInput(0, Float64Constant(1.0));
+ node->ReplaceInput(1, Float64Mul(m.left().node(), m.left().node()));
+ NodeProperties::ChangeOp(node, machine()->Float64Div());
+ return Changed(node);
+ } else if (m.right().Is(2.0)) { // x ** 2.0 => x * x
+ node->ReplaceInput(1, m.left().node());
+ NodeProperties::ChangeOp(node, machine()->Float64Mul());
+ return Changed(node);
+ } else if (m.right().Is(-0.5)) {
+ // x ** 0.5 => 1 / (if x <= -Infinity then Infinity else sqrt(0.0 + x))
+ node->ReplaceInput(0, Float64Constant(1.0));
+ node->ReplaceInput(1, Float64PowHalf(m.left().node()));
+ NodeProperties::ChangeOp(node, machine()->Float64Div());
+ return Changed(node);
+ } else if (m.right().Is(0.5)) {
+ // x ** 0.5 => if x <= -Infinity then Infinity else sqrt(0.0 + x)
+ return Replace(Float64PowHalf(m.left().node()));
+ }
+ break;
+ }
+ case IrOpcode::kFloat64Sin: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::sin(m.Value()));
+ break;
+ }
+ case IrOpcode::kFloat64Sinh: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::sinh(m.Value()));
+ break;
+ }
+ case IrOpcode::kFloat64Tan: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::tan(m.Value()));
+ break;
+ }
+ case IrOpcode::kFloat64Tanh: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceFloat64(base::ieee754::tanh(m.Value()));
+ break;
+ }
case IrOpcode::kChangeFloat32ToFloat64: {
Float32Matcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceFloat64(m.Value());
@@ -419,8 +622,12 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
if (m.HasValue()) return ReplaceInt64(static_cast<uint64_t>(m.Value()));
break;
}
- case IrOpcode::kTruncateFloat64ToInt32:
- return ReduceTruncateFloat64ToInt32(node);
+ case IrOpcode::kTruncateFloat64ToWord32: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
+ if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
+ return NoChange();
+ }
case IrOpcode::kTruncateInt64ToInt32: {
Int64Matcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceInt32(static_cast<int32_t>(m.Value()));
@@ -433,11 +640,19 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
if (m.IsChangeFloat32ToFloat64()) return Replace(m.node()->InputAt(0));
break;
}
+ case IrOpcode::kRoundFloat64ToInt32: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceInt32(static_cast<int32_t>(m.Value()));
+ if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
+ break;
+ }
case IrOpcode::kFloat64InsertLowWord32:
return ReduceFloat64InsertLowWord32(node);
case IrOpcode::kFloat64InsertHighWord32:
return ReduceFloat64InsertHighWord32(node);
case IrOpcode::kStore:
+ case IrOpcode::kUnalignedStore:
+ case IrOpcode::kCheckedStore:
return ReduceStore(node);
case IrOpcode::kFloat64Equal:
case IrOpcode::kFloat64LessThan:
@@ -449,7 +664,6 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return NoChange();
}
-
Reduction MachineOperatorReducer::ReduceInt32Add(Node* node) {
DCHECK_EQ(IrOpcode::kInt32Add, node->opcode());
Int32BinopMatcher m(node);
@@ -480,6 +694,16 @@ Reduction MachineOperatorReducer::ReduceInt32Add(Node* node) {
return NoChange();
}
+Reduction MachineOperatorReducer::ReduceInt64Add(Node* node) {
+ DCHECK_EQ(IrOpcode::kInt64Add, node->opcode());
+ Int64BinopMatcher m(node);
+ if (m.right().Is(0)) return Replace(m.left().node()); // x + 0 => 0
+ if (m.IsFoldable()) {
+ return Replace(Uint64Constant(bit_cast<uint64_t>(m.left().Value()) +
+ bit_cast<uint64_t>(m.right().Value())));
+ }
+ return NoChange();
+}
Reduction MachineOperatorReducer::ReduceInt32Sub(Node* node) {
DCHECK_EQ(IrOpcode::kInt32Sub, node->opcode());
@@ -499,6 +723,23 @@ Reduction MachineOperatorReducer::ReduceInt32Sub(Node* node) {
return NoChange();
}
+Reduction MachineOperatorReducer::ReduceInt64Sub(Node* node) {
+ DCHECK_EQ(IrOpcode::kInt64Sub, node->opcode());
+ Int64BinopMatcher m(node);
+ if (m.right().Is(0)) return Replace(m.left().node()); // x - 0 => x
+ if (m.IsFoldable()) { // K - K => K
+ return Replace(Uint64Constant(bit_cast<uint64_t>(m.left().Value()) -
+ bit_cast<uint64_t>(m.right().Value())));
+ }
+ if (m.LeftEqualsRight()) return Replace(Int64Constant(0)); // x - x => 0
+ if (m.right().HasValue()) { // x - K => x + -K
+ node->ReplaceInput(1, Int64Constant(-m.right().Value()));
+ NodeProperties::ChangeOp(node, machine()->Int64Add());
+ Reduction const reduction = ReduceInt64Add(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+ return NoChange();
+}
Reduction MachineOperatorReducer::ReduceInt32Div(Node* node) {
Int32BinopMatcher m(node);
@@ -645,41 +886,24 @@ Reduction MachineOperatorReducer::ReduceUint32Mod(Node* node) {
}
-Reduction MachineOperatorReducer::ReduceTruncateFloat64ToInt32(Node* node) {
- Float64Matcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
- if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
- if (m.IsPhi()) {
- Node* const phi = m.node();
- DCHECK_EQ(MachineRepresentation::kFloat64, PhiRepresentationOf(phi->op()));
- if (phi->OwnedBy(node)) {
- // TruncateFloat64ToInt32[mode](Phi[Float64](x1,...,xn))
- // => Phi[Int32](TruncateFloat64ToInt32[mode](x1),
- // ...,
- // TruncateFloat64ToInt32[mode](xn))
- const int value_input_count = phi->InputCount() - 1;
- for (int i = 0; i < value_input_count; ++i) {
- Node* input = graph()->NewNode(node->op(), phi->InputAt(i));
- // TODO(bmeurer): Reschedule input for reduction once we have Revisit()
- // instead of recursing into ReduceTruncateFloat64ToInt32() here.
- Reduction reduction = ReduceTruncateFloat64ToInt32(input);
- if (reduction.Changed()) input = reduction.replacement();
- phi->ReplaceInput(i, input);
- }
- NodeProperties::ChangeOp(
- phi,
- common()->Phi(MachineRepresentation::kWord32, value_input_count));
- return Replace(phi);
- }
+Reduction MachineOperatorReducer::ReduceStore(Node* node) {
+ NodeMatcher nm(node);
+ MachineRepresentation rep;
+ int value_input;
+ if (nm.IsCheckedStore()) {
+ rep = CheckedStoreRepresentationOf(node->op());
+ value_input = 3;
+ } else if (nm.IsStore()) {
+ rep = StoreRepresentationOf(node->op()).representation();
+ value_input = 2;
+ } else {
+ DCHECK(nm.IsUnalignedStore());
+ rep = UnalignedStoreRepresentationOf(node->op());
+ value_input = 2;
}
- return NoChange();
-}
+ Node* const value = node->InputAt(value_input);
-Reduction MachineOperatorReducer::ReduceStore(Node* node) {
- MachineRepresentation const rep =
- StoreRepresentationOf(node->op()).representation();
- Node* const value = node->InputAt(2);
switch (value->opcode()) {
case IrOpcode::kWord32And: {
Uint32BinopMatcher m(value);
@@ -687,7 +911,7 @@ Reduction MachineOperatorReducer::ReduceStore(Node* node) {
(m.right().Value() & 0xff) == 0xff) ||
(rep == MachineRepresentation::kWord16 &&
(m.right().Value() & 0xffff) == 0xffff))) {
- node->ReplaceInput(2, m.left().node());
+ node->ReplaceInput(value_input, m.left().node());
return Changed(node);
}
break;
@@ -700,7 +924,7 @@ Reduction MachineOperatorReducer::ReduceStore(Node* node) {
m.right().IsInRange(1, 16)))) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().Is(m.right().Value())) {
- node->ReplaceInput(2, mleft.left().node());
+ node->ReplaceInput(value_input, mleft.left().node());
return Changed(node);
}
}
@@ -722,10 +946,10 @@ Reduction MachineOperatorReducer::ReduceProjection(size_t index, Node* node) {
int32_t val;
bool ovf = base::bits::SignedAddOverflow32(m.left().Value(),
m.right().Value(), &val);
- return ReplaceInt32((index == 0) ? val : ovf);
+ return ReplaceInt32(index == 0 ? val : ovf);
}
if (m.right().Is(0)) {
- return (index == 0) ? Replace(m.left().node()) : ReplaceInt32(0);
+ return Replace(index == 0 ? m.left().node() : m.right().node());
}
break;
}
@@ -736,10 +960,27 @@ Reduction MachineOperatorReducer::ReduceProjection(size_t index, Node* node) {
int32_t val;
bool ovf = base::bits::SignedSubOverflow32(m.left().Value(),
m.right().Value(), &val);
- return ReplaceInt32((index == 0) ? val : ovf);
+ return ReplaceInt32(index == 0 ? val : ovf);
+ }
+ if (m.right().Is(0)) {
+ return Replace(index == 0 ? m.left().node() : m.right().node());
+ }
+ break;
+ }
+ case IrOpcode::kInt32MulWithOverflow: {
+ DCHECK(index == 0 || index == 1);
+ Int32BinopMatcher m(node);
+ if (m.IsFoldable()) {
+ int32_t val;
+ bool ovf = base::bits::SignedMulOverflow32(m.left().Value(),
+ m.right().Value(), &val);
+ return ReplaceInt32(index == 0 ? val : ovf);
}
if (m.right().Is(0)) {
- return (index == 0) ? Replace(m.left().node()) : ReplaceInt32(0);
+ return Replace(m.right().node());
+ }
+ if (m.right().Is(1)) {
+ return index == 0 ? Replace(m.left().node()) : ReplaceInt32(0);
}
break;
}
@@ -795,6 +1036,45 @@ Reduction MachineOperatorReducer::ReduceWord32Shl(Node* node) {
return ReduceWord32Shifts(node);
}
+Reduction MachineOperatorReducer::ReduceWord64Shl(Node* node) {
+ DCHECK_EQ(IrOpcode::kWord64Shl, node->opcode());
+ Int64BinopMatcher m(node);
+ if (m.right().Is(0)) return Replace(m.left().node()); // x << 0 => x
+ if (m.IsFoldable()) { // K << K => K
+ return ReplaceInt64(m.left().Value() << m.right().Value());
+ }
+ return NoChange();
+}
+
+Reduction MachineOperatorReducer::ReduceWord32Shr(Node* node) {
+ Uint32BinopMatcher m(node);
+ if (m.right().Is(0)) return Replace(m.left().node()); // x >>> 0 => x
+ if (m.IsFoldable()) { // K >>> K => K
+ return ReplaceInt32(m.left().Value() >> m.right().Value());
+ }
+ if (m.left().IsWord32And() && m.right().HasValue()) {
+ Uint32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ uint32_t shift = m.right().Value() & 0x1f;
+ uint32_t mask = mleft.right().Value();
+ if ((mask >> shift) == 0) {
+ // (m >>> s) == 0 implies ((x & m) >>> s) == 0
+ return ReplaceInt32(0);
+ }
+ }
+ }
+ return ReduceWord32Shifts(node);
+}
+
+Reduction MachineOperatorReducer::ReduceWord64Shr(Node* node) {
+ DCHECK_EQ(IrOpcode::kWord64Shr, node->opcode());
+ Uint64BinopMatcher m(node);
+ if (m.right().Is(0)) return Replace(m.left().node()); // x >>> 0 => x
+ if (m.IsFoldable()) { // K >> K => K
+ return ReplaceInt64(m.left().Value() >> m.right().Value());
+ }
+ return NoChange();
+}
Reduction MachineOperatorReducer::ReduceWord32Sar(Node* node) {
Int32BinopMatcher m(node);
@@ -831,6 +1111,14 @@ Reduction MachineOperatorReducer::ReduceWord32Sar(Node* node) {
return ReduceWord32Shifts(node);
}
+Reduction MachineOperatorReducer::ReduceWord64Sar(Node* node) {
+ Int64BinopMatcher m(node);
+ if (m.right().Is(0)) return Replace(m.left().node()); // x >> 0 => x
+ if (m.IsFoldable()) {
+ return ReplaceInt64(m.left().Value() >> m.right().Value());
+ }
+ return NoChange();
+}
Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
DCHECK_EQ(IrOpcode::kWord32And, node->opcode());
diff --git a/deps/v8/src/compiler/machine-operator-reducer.h b/deps/v8/src/compiler/machine-operator-reducer.h
index 7f8ff1a5fd..167bf7efd3 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.h
+++ b/deps/v8/src/compiler/machine-operator-reducer.h
@@ -32,8 +32,13 @@ class MachineOperatorReducer final : public Reducer {
Node* Int32Constant(int32_t value);
Node* Int64Constant(int64_t value);
Node* Uint32Constant(uint32_t value) {
- return Int32Constant(bit_cast<uint32_t>(value));
+ return Int32Constant(bit_cast<int32_t>(value));
}
+ Node* Uint64Constant(uint64_t value) {
+ return Int64Constant(bit_cast<int64_t>(value));
+ }
+ Node* Float64Mul(Node* lhs, Node* rhs);
+ Node* Float64PowHalf(Node* value);
Node* Word32And(Node* lhs, Node* rhs);
Node* Word32And(Node* lhs, uint32_t rhs) {
return Word32And(lhs, Uint32Constant(rhs));
@@ -65,17 +70,22 @@ class MachineOperatorReducer final : public Reducer {
}
Reduction ReduceInt32Add(Node* node);
+ Reduction ReduceInt64Add(Node* node);
Reduction ReduceInt32Sub(Node* node);
+ Reduction ReduceInt64Sub(Node* node);
Reduction ReduceInt32Div(Node* node);
Reduction ReduceUint32Div(Node* node);
Reduction ReduceInt32Mod(Node* node);
Reduction ReduceUint32Mod(Node* node);
- Reduction ReduceTruncateFloat64ToInt32(Node* node);
Reduction ReduceStore(Node* node);
Reduction ReduceProjection(size_t index, Node* node);
Reduction ReduceWord32Shifts(Node* node);
Reduction ReduceWord32Shl(Node* node);
+ Reduction ReduceWord64Shl(Node* node);
+ Reduction ReduceWord32Shr(Node* node);
+ Reduction ReduceWord64Shr(Node* node);
Reduction ReduceWord32Sar(Node* node);
+ Reduction ReduceWord64Sar(Node* node);
Reduction ReduceWord32And(Node* node);
Reduction ReduceWord32Or(Node* node);
Reduction ReduceFloat64InsertLowWord32(Node* node);
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 6a506d26ad..43c6202eb7 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -12,40 +12,6 @@ namespace v8 {
namespace internal {
namespace compiler {
-std::ostream& operator<<(std::ostream& os, TruncationMode mode) {
- switch (mode) {
- case TruncationMode::kJavaScript:
- return os << "JavaScript";
- case TruncationMode::kRoundToZero:
- return os << "RoundToZero";
- }
- UNREACHABLE();
- return os;
-}
-
-
-TruncationMode TruncationModeOf(Operator const* op) {
- DCHECK_EQ(IrOpcode::kTruncateFloat64ToInt32, op->opcode());
- return OpParameter<TruncationMode>(op);
-}
-
-
-std::ostream& operator<<(std::ostream& os, WriteBarrierKind kind) {
- switch (kind) {
- case kNoWriteBarrier:
- return os << "NoWriteBarrier";
- case kMapWriteBarrier:
- return os << "MapWriteBarrier";
- case kPointerWriteBarrier:
- return os << "PointerWriteBarrier";
- case kFullWriteBarrier:
- return os << "FullWriteBarrier";
- }
- UNREACHABLE();
- return os;
-}
-
-
bool operator==(StoreRepresentation lhs, StoreRepresentation rhs) {
return lhs.representation() == rhs.representation() &&
lhs.write_barrier_kind() == rhs.write_barrier_kind();
@@ -69,7 +35,8 @@ std::ostream& operator<<(std::ostream& os, StoreRepresentation rep) {
LoadRepresentation LoadRepresentationOf(Operator const* op) {
- DCHECK_EQ(IrOpcode::kLoad, op->opcode());
+ DCHECK(IrOpcode::kLoad == op->opcode() ||
+ IrOpcode::kAtomicLoad == op->opcode());
return OpParameter<LoadRepresentation>(op);
}
@@ -79,6 +46,16 @@ StoreRepresentation const& StoreRepresentationOf(Operator const* op) {
return OpParameter<StoreRepresentation>(op);
}
+UnalignedLoadRepresentation UnalignedLoadRepresentationOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kUnalignedLoad, op->opcode());
+ return OpParameter<UnalignedLoadRepresentation>(op);
+}
+
+UnalignedStoreRepresentation const& UnalignedStoreRepresentationOf(
+ Operator const* op) {
+ DCHECK_EQ(IrOpcode::kUnalignedStore, op->opcode());
+ return OpParameter<UnalignedStoreRepresentation>(op);
+}
CheckedLoadRepresentation CheckedLoadRepresentationOf(Operator const* op) {
DCHECK_EQ(IrOpcode::kCheckedLoad, op->opcode());
@@ -96,125 +73,331 @@ MachineRepresentation StackSlotRepresentationOf(Operator const* op) {
return OpParameter<MachineRepresentation>(op);
}
-#define PURE_OP_LIST(V) \
- V(Word32And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Word32Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Word32Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Word32Shl, Operator::kNoProperties, 2, 0, 1) \
- V(Word32Shr, Operator::kNoProperties, 2, 0, 1) \
- V(Word32Sar, Operator::kNoProperties, 2, 0, 1) \
- V(Word32Ror, Operator::kNoProperties, 2, 0, 1) \
- V(Word32Equal, Operator::kCommutative, 2, 0, 1) \
- V(Word32Clz, Operator::kNoProperties, 1, 0, 1) \
- V(Word64And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Word64Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Word64Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Word64Shl, Operator::kNoProperties, 2, 0, 1) \
- V(Word64Shr, Operator::kNoProperties, 2, 0, 1) \
- V(Word64Sar, Operator::kNoProperties, 2, 0, 1) \
- V(Word64Ror, Operator::kNoProperties, 2, 0, 1) \
- V(Word64Clz, Operator::kNoProperties, 1, 0, 1) \
- V(Word64Equal, Operator::kCommutative, 2, 0, 1) \
- V(Int32Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Int32AddWithOverflow, Operator::kAssociative | Operator::kCommutative, 2, \
- 0, 2) \
- V(Int32Sub, Operator::kNoProperties, 2, 0, 1) \
- V(Int32SubWithOverflow, Operator::kNoProperties, 2, 0, 2) \
- V(Int32Mul, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Int32MulHigh, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Int32Div, Operator::kNoProperties, 2, 1, 1) \
- V(Int32Mod, Operator::kNoProperties, 2, 1, 1) \
- V(Int32LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Int32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Uint32Div, Operator::kNoProperties, 2, 1, 1) \
- V(Uint32LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Uint32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Uint32Mod, Operator::kNoProperties, 2, 1, 1) \
- V(Uint32MulHigh, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Int64Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Int64AddWithOverflow, Operator::kAssociative | Operator::kCommutative, 2, \
- 0, 2) \
- V(Int64Sub, Operator::kNoProperties, 2, 0, 1) \
- V(Int64SubWithOverflow, Operator::kNoProperties, 2, 0, 2) \
- V(Int64Mul, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Int64Div, Operator::kNoProperties, 2, 1, 1) \
- V(Int64Mod, Operator::kNoProperties, 2, 1, 1) \
- V(Int64LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Int64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Uint64Div, Operator::kNoProperties, 2, 1, 1) \
- V(Uint64Mod, Operator::kNoProperties, 2, 1, 1) \
- V(Uint64LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Uint64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
- V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 0, 1) \
- V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
- V(TruncateFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
- V(TruncateFloat32ToInt32, Operator::kNoProperties, 1, 0, 1) \
- V(TruncateFloat32ToUint32, Operator::kNoProperties, 1, 0, 1) \
- V(TryTruncateFloat32ToInt64, Operator::kNoProperties, 1, 0, 2) \
- V(TryTruncateFloat64ToInt64, Operator::kNoProperties, 1, 0, 2) \
- V(TryTruncateFloat32ToUint64, Operator::kNoProperties, 1, 0, 2) \
- V(TryTruncateFloat64ToUint64, Operator::kNoProperties, 1, 0, 2) \
- V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
- V(RoundInt32ToFloat32, Operator::kNoProperties, 1, 0, 1) \
- V(RoundInt64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
- V(RoundInt64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
- V(RoundUint32ToFloat32, Operator::kNoProperties, 1, 0, 1) \
- V(RoundUint64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
- V(RoundUint64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
- V(ChangeInt32ToInt64, Operator::kNoProperties, 1, 0, 1) \
- V(ChangeUint32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
- V(ChangeUint32ToUint64, Operator::kNoProperties, 1, 0, 1) \
- V(TruncateFloat64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
- V(TruncateInt64ToInt32, Operator::kNoProperties, 1, 0, 1) \
- V(BitcastFloat32ToInt32, Operator::kNoProperties, 1, 0, 1) \
- V(BitcastFloat64ToInt64, Operator::kNoProperties, 1, 0, 1) \
- V(BitcastInt32ToFloat32, Operator::kNoProperties, 1, 0, 1) \
- V(BitcastInt64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
- V(Float32Abs, Operator::kNoProperties, 1, 0, 1) \
- V(Float32Add, Operator::kCommutative, 2, 0, 1) \
- V(Float32Sub, Operator::kNoProperties, 2, 0, 1) \
- V(Float32Mul, Operator::kCommutative, 2, 0, 1) \
- V(Float32Div, Operator::kNoProperties, 2, 0, 1) \
- V(Float32Sqrt, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Abs, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Add, Operator::kCommutative, 2, 0, 1) \
- V(Float64Sub, Operator::kNoProperties, 2, 0, 1) \
- V(Float64Mul, Operator::kCommutative, 2, 0, 1) \
- V(Float64Div, Operator::kNoProperties, 2, 0, 1) \
- V(Float64Mod, Operator::kNoProperties, 2, 0, 1) \
- V(Float64Sqrt, Operator::kNoProperties, 1, 0, 1) \
- V(Float32Equal, Operator::kCommutative, 2, 0, 1) \
- V(Float32LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Float32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Float64Equal, Operator::kCommutative, 2, 0, 1) \
- V(Float64LessThan, Operator::kNoProperties, 2, 0, 1) \
- V(Float64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Float64ExtractLowWord32, Operator::kNoProperties, 1, 0, 1) \
- V(Float64ExtractHighWord32, Operator::kNoProperties, 1, 0, 1) \
- V(Float64InsertLowWord32, Operator::kNoProperties, 2, 0, 1) \
- V(Float64InsertHighWord32, Operator::kNoProperties, 2, 0, 1) \
- V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1) \
- V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1) \
- V(LoadParentFramePointer, Operator::kNoProperties, 0, 0, 1) \
- V(Int32PairAdd, Operator::kNoProperties, 4, 0, 2) \
- V(Int32PairSub, Operator::kNoProperties, 4, 0, 2) \
- V(Int32PairMul, Operator::kNoProperties, 4, 0, 2) \
- V(Word32PairShl, Operator::kNoProperties, 3, 0, 2) \
- V(Word32PairShr, Operator::kNoProperties, 3, 0, 2) \
- V(Word32PairSar, Operator::kNoProperties, 3, 0, 2)
+MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kAtomicStore, op->opcode());
+ return OpParameter<MachineRepresentation>(op);
+}
+
+#define PURE_OP_LIST(V) \
+ V(Word32And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Word32Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Word32Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Word32Shl, Operator::kNoProperties, 2, 0, 1) \
+ V(Word32Shr, Operator::kNoProperties, 2, 0, 1) \
+ V(Word32Sar, Operator::kNoProperties, 2, 0, 1) \
+ V(Word32Ror, Operator::kNoProperties, 2, 0, 1) \
+ V(Word32Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Word32Clz, Operator::kNoProperties, 1, 0, 1) \
+ V(Word64And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Word64Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Word64Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Word64Shl, Operator::kNoProperties, 2, 0, 1) \
+ V(Word64Shr, Operator::kNoProperties, 2, 0, 1) \
+ V(Word64Sar, Operator::kNoProperties, 2, 0, 1) \
+ V(Word64Ror, Operator::kNoProperties, 2, 0, 1) \
+ V(Word64Clz, Operator::kNoProperties, 1, 0, 1) \
+ V(Word64Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Int32Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Int32Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(Int32Mul, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Int32MulHigh, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Int32Div, Operator::kNoProperties, 2, 1, 1) \
+ V(Int32Mod, Operator::kNoProperties, 2, 1, 1) \
+ V(Int32LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Int32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint32Div, Operator::kNoProperties, 2, 1, 1) \
+ V(Uint32LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint32Mod, Operator::kNoProperties, 2, 1, 1) \
+ V(Uint32MulHigh, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Int64Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Int64Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(Int64Mul, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Int64Div, Operator::kNoProperties, 2, 1, 1) \
+ V(Int64Mod, Operator::kNoProperties, 2, 1, 1) \
+ V(Int64LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Int64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint64Div, Operator::kNoProperties, 2, 1, 1) \
+ V(Uint64Mod, Operator::kNoProperties, 2, 1, 1) \
+ V(Uint64LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(BitcastWordToTagged, Operator::kNoProperties, 1, 0, 1) \
+ V(TruncateFloat64ToWord32, Operator::kNoProperties, 1, 0, 1) \
+ V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 0, 1) \
+ V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
+ V(TruncateFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
+ V(TruncateFloat32ToInt32, Operator::kNoProperties, 1, 0, 1) \
+ V(TruncateFloat32ToUint32, Operator::kNoProperties, 1, 0, 1) \
+ V(TryTruncateFloat32ToInt64, Operator::kNoProperties, 1, 0, 2) \
+ V(TryTruncateFloat64ToInt64, Operator::kNoProperties, 1, 0, 2) \
+ V(TryTruncateFloat32ToUint64, Operator::kNoProperties, 1, 0, 2) \
+ V(TryTruncateFloat64ToUint64, Operator::kNoProperties, 1, 0, 2) \
+ V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64SilenceNaN, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundFloat64ToInt32, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundInt32ToFloat32, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundInt64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundInt64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundUint32ToFloat32, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundUint64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundUint64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(ChangeInt32ToInt64, Operator::kNoProperties, 1, 0, 1) \
+ V(ChangeUint32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(ChangeUint32ToUint64, Operator::kNoProperties, 1, 0, 1) \
+ V(ImpossibleToWord32, Operator::kNoProperties, 1, 0, 1) \
+ V(ImpossibleToWord64, Operator::kNoProperties, 1, 0, 1) \
+ V(ImpossibleToFloat32, Operator::kNoProperties, 1, 0, 1) \
+ V(ImpossibleToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(ImpossibleToTagged, Operator::kNoProperties, 1, 0, 1) \
+ V(ImpossibleToBit, Operator::kNoProperties, 1, 0, 1) \
+ V(TruncateFloat64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
+ V(TruncateInt64ToInt32, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastFloat32ToInt32, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastFloat64ToInt64, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastInt32ToFloat32, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastInt64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32Abs, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32Add, Operator::kCommutative, 2, 0, 1) \
+ V(Float32Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32Mul, Operator::kCommutative, 2, 0, 1) \
+ V(Float32Div, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32Sqrt, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32Max, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Float32Min, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Float64Abs, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Acos, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Acosh, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Asin, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Asinh, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Atan, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Atan2, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64Atanh, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Cbrt, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Cos, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Cosh, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Exp, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Expm1, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Log, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Log1p, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Log2, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Log10, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Max, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Float64Min, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Float64Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Add, Operator::kCommutative, 2, 0, 1) \
+ V(Float64Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64Mul, Operator::kCommutative, 2, 0, 1) \
+ V(Float64Div, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64Mod, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64Pow, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64Sin, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Sinh, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Sqrt, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Tan, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64Tanh, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Float32LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Float64LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64ExtractLowWord32, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64ExtractHighWord32, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64InsertLowWord32, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64InsertHighWord32, Operator::kNoProperties, 2, 0, 1) \
+ V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1) \
+ V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1) \
+ V(LoadParentFramePointer, Operator::kNoProperties, 0, 0, 1) \
+ V(Int32PairAdd, Operator::kNoProperties, 4, 0, 2) \
+ V(Int32PairSub, Operator::kNoProperties, 4, 0, 2) \
+ V(Int32PairMul, Operator::kNoProperties, 4, 0, 2) \
+ V(Word32PairShl, Operator::kNoProperties, 3, 0, 2) \
+ V(Word32PairShr, Operator::kNoProperties, 3, 0, 2) \
+ V(Word32PairSar, Operator::kNoProperties, 3, 0, 2) \
+ V(CreateFloat32x4, Operator::kNoProperties, 4, 0, 1) \
+ V(Float32x4ExtractLane, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
+ V(Float32x4Abs, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32x4Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32x4Sqrt, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32x4RecipApprox, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32x4RecipSqrtApprox, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32x4Add, Operator::kCommutative, 2, 0, 1) \
+ V(Float32x4Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32x4Mul, Operator::kCommutative, 2, 0, 1) \
+ V(Float32x4Div, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32x4Min, Operator::kCommutative, 2, 0, 1) \
+ V(Float32x4Max, Operator::kCommutative, 2, 0, 1) \
+ V(Float32x4MinNum, Operator::kCommutative, 2, 0, 1) \
+ V(Float32x4MaxNum, Operator::kCommutative, 2, 0, 1) \
+ V(Float32x4Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Float32x4NotEqual, Operator::kCommutative, 2, 0, 1) \
+ V(Float32x4LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32x4GreaterThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32x4Select, Operator::kNoProperties, 3, 0, 1) \
+ V(Float32x4Swizzle, Operator::kNoProperties, 5, 0, 1) \
+ V(Float32x4Shuffle, Operator::kNoProperties, 6, 0, 1) \
+ V(Float32x4FromInt32x4, Operator::kNoProperties, 1, 0, 1) \
+ V(Float32x4FromUint32x4, Operator::kNoProperties, 1, 0, 1) \
+ V(CreateInt32x4, Operator::kNoProperties, 4, 0, 1) \
+ V(Int32x4ExtractLane, Operator::kNoProperties, 2, 0, 1) \
+ V(Int32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
+ V(Int32x4Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(Int32x4Add, Operator::kCommutative, 2, 0, 1) \
+ V(Int32x4Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(Int32x4Mul, Operator::kCommutative, 2, 0, 1) \
+ V(Int32x4Min, Operator::kCommutative, 2, 0, 1) \
+ V(Int32x4Max, Operator::kCommutative, 2, 0, 1) \
+ V(Int32x4ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Int32x4ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Int32x4Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Int32x4NotEqual, Operator::kCommutative, 2, 0, 1) \
+ V(Int32x4LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Int32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Int32x4GreaterThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Int32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Int32x4Select, Operator::kNoProperties, 3, 0, 1) \
+ V(Int32x4Swizzle, Operator::kNoProperties, 5, 0, 1) \
+ V(Int32x4Shuffle, Operator::kNoProperties, 6, 0, 1) \
+ V(Int32x4FromFloat32x4, Operator::kNoProperties, 1, 0, 1) \
+ V(Uint32x4Min, Operator::kCommutative, 2, 0, 1) \
+ V(Uint32x4Max, Operator::kCommutative, 2, 0, 1) \
+ V(Uint32x4ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint32x4ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint32x4LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint32x4GreaterThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint32x4FromFloat32x4, Operator::kNoProperties, 1, 0, 1) \
+ V(CreateBool32x4, Operator::kNoProperties, 4, 0, 1) \
+ V(Bool32x4ExtractLane, Operator::kNoProperties, 2, 0, 1) \
+ V(Bool32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
+ V(Bool32x4And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Bool32x4Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Bool32x4Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Bool32x4Not, Operator::kNoProperties, 1, 0, 1) \
+ V(Bool32x4AnyTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(Bool32x4AllTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(Bool32x4Swizzle, Operator::kNoProperties, 5, 0, 1) \
+ V(Bool32x4Shuffle, Operator::kNoProperties, 6, 0, 1) \
+ V(Bool32x4Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Bool32x4NotEqual, Operator::kCommutative, 2, 0, 1) \
+ V(CreateInt16x8, Operator::kNoProperties, 8, 0, 1) \
+ V(Int16x8ExtractLane, Operator::kNoProperties, 2, 0, 1) \
+ V(Int16x8ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
+ V(Int16x8Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(Int16x8Add, Operator::kCommutative, 2, 0, 1) \
+ V(Int16x8AddSaturate, Operator::kCommutative, 2, 0, 1) \
+ V(Int16x8Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(Int16x8SubSaturate, Operator::kNoProperties, 2, 0, 1) \
+ V(Int16x8Mul, Operator::kCommutative, 2, 0, 1) \
+ V(Int16x8Min, Operator::kCommutative, 2, 0, 1) \
+ V(Int16x8Max, Operator::kCommutative, 2, 0, 1) \
+ V(Int16x8ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Int16x8ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Int16x8Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Int16x8NotEqual, Operator::kCommutative, 2, 0, 1) \
+ V(Int16x8LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Int16x8LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Int16x8GreaterThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Int16x8GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Int16x8Select, Operator::kNoProperties, 3, 0, 1) \
+ V(Int16x8Swizzle, Operator::kNoProperties, 9, 0, 1) \
+ V(Int16x8Shuffle, Operator::kNoProperties, 10, 0, 1) \
+ V(Uint16x8AddSaturate, Operator::kCommutative, 2, 0, 1) \
+ V(Uint16x8SubSaturate, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint16x8Min, Operator::kCommutative, 2, 0, 1) \
+ V(Uint16x8Max, Operator::kCommutative, 2, 0, 1) \
+ V(Uint16x8ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint16x8ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint16x8LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint16x8LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint16x8GreaterThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint16x8GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(CreateBool16x8, Operator::kNoProperties, 8, 0, 1) \
+ V(Bool16x8ExtractLane, Operator::kNoProperties, 2, 0, 1) \
+ V(Bool16x8ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
+ V(Bool16x8And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Bool16x8Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Bool16x8Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Bool16x8Not, Operator::kNoProperties, 1, 0, 1) \
+ V(Bool16x8AnyTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(Bool16x8AllTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(Bool16x8Swizzle, Operator::kNoProperties, 9, 0, 1) \
+ V(Bool16x8Shuffle, Operator::kNoProperties, 10, 0, 1) \
+ V(Bool16x8Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Bool16x8NotEqual, Operator::kCommutative, 2, 0, 1) \
+ V(CreateInt8x16, Operator::kNoProperties, 16, 0, 1) \
+ V(Int8x16ExtractLane, Operator::kNoProperties, 2, 0, 1) \
+ V(Int8x16ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
+ V(Int8x16Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(Int8x16Add, Operator::kCommutative, 2, 0, 1) \
+ V(Int8x16AddSaturate, Operator::kCommutative, 2, 0, 1) \
+ V(Int8x16Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(Int8x16SubSaturate, Operator::kNoProperties, 2, 0, 1) \
+ V(Int8x16Mul, Operator::kCommutative, 2, 0, 1) \
+ V(Int8x16Min, Operator::kCommutative, 2, 0, 1) \
+ V(Int8x16Max, Operator::kCommutative, 2, 0, 1) \
+ V(Int8x16ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Int8x16ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Int8x16Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Int8x16NotEqual, Operator::kCommutative, 2, 0, 1) \
+ V(Int8x16LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Int8x16LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Int8x16GreaterThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Int8x16GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Int8x16Select, Operator::kNoProperties, 3, 0, 1) \
+ V(Int8x16Swizzle, Operator::kNoProperties, 17, 0, 1) \
+ V(Int8x16Shuffle, Operator::kNoProperties, 18, 0, 1) \
+ V(Uint8x16AddSaturate, Operator::kCommutative, 2, 0, 1) \
+ V(Uint8x16SubSaturate, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint8x16Min, Operator::kCommutative, 2, 0, 1) \
+ V(Uint8x16Max, Operator::kCommutative, 2, 0, 1) \
+ V(Uint8x16ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint8x16ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint8x16LessThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint8x16LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint8x16GreaterThan, Operator::kNoProperties, 2, 0, 1) \
+ V(Uint8x16GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(CreateBool8x16, Operator::kNoProperties, 16, 0, 1) \
+ V(Bool8x16ExtractLane, Operator::kNoProperties, 2, 0, 1) \
+ V(Bool8x16ReplaceLane, Operator::kNoProperties, 3, 0, 1) \
+ V(Bool8x16And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Bool8x16Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Bool8x16Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Bool8x16Not, Operator::kNoProperties, 1, 0, 1) \
+ V(Bool8x16AnyTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(Bool8x16AllTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(Bool8x16Swizzle, Operator::kNoProperties, 17, 0, 1) \
+ V(Bool8x16Shuffle, Operator::kNoProperties, 18, 0, 1) \
+ V(Bool8x16Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Bool8x16NotEqual, Operator::kCommutative, 2, 0, 1) \
+ V(Simd128Load, Operator::kNoProperties, 2, 0, 1) \
+ V(Simd128Load1, Operator::kNoProperties, 2, 0, 1) \
+ V(Simd128Load2, Operator::kNoProperties, 2, 0, 1) \
+ V(Simd128Load3, Operator::kNoProperties, 2, 0, 1) \
+ V(Simd128Store, Operator::kNoProperties, 3, 0, 1) \
+ V(Simd128Store1, Operator::kNoProperties, 3, 0, 1) \
+ V(Simd128Store2, Operator::kNoProperties, 3, 0, 1) \
+ V(Simd128Store3, Operator::kNoProperties, 3, 0, 1) \
+ V(Simd128And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Simd128Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Simd128Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
+ V(Simd128Not, Operator::kNoProperties, 1, 0, 1)
#define PURE_OPTIONAL_OP_LIST(V) \
V(Word32Ctz, Operator::kNoProperties, 1, 0, 1) \
V(Word64Ctz, Operator::kNoProperties, 1, 0, 1) \
V(Word32ReverseBits, Operator::kNoProperties, 1, 0, 1) \
V(Word64ReverseBits, Operator::kNoProperties, 1, 0, 1) \
+ V(Word32ReverseBytes, Operator::kNoProperties, 1, 0, 1) \
+ V(Word64ReverseBytes, Operator::kNoProperties, 1, 0, 1) \
V(Word32Popcnt, Operator::kNoProperties, 1, 0, 1) \
V(Word64Popcnt, Operator::kNoProperties, 1, 0, 1) \
- V(Float32Max, Operator::kNoProperties, 2, 0, 1) \
- V(Float32Min, Operator::kNoProperties, 2, 0, 1) \
- V(Float64Max, Operator::kNoProperties, 2, 0, 1) \
- V(Float64Min, Operator::kNoProperties, 2, 0, 1) \
V(Float32RoundDown, Operator::kNoProperties, 1, 0, 1) \
V(Float64RoundDown, Operator::kNoProperties, 1, 0, 1) \
V(Float32RoundUp, Operator::kNoProperties, 1, 0, 1) \
@@ -225,6 +408,13 @@ MachineRepresentation StackSlotRepresentationOf(Operator const* op) {
V(Float32RoundTiesEven, Operator::kNoProperties, 1, 0, 1) \
V(Float64RoundTiesEven, Operator::kNoProperties, 1, 0, 1)
+#define OVERFLOW_OP_LIST(V) \
+ V(Int32AddWithOverflow, Operator::kAssociative | Operator::kCommutative) \
+ V(Int32SubWithOverflow, Operator::kNoProperties) \
+ V(Int32MulWithOverflow, Operator::kAssociative | Operator::kCommutative) \
+ V(Int64AddWithOverflow, Operator::kAssociative | Operator::kCommutative) \
+ V(Int64SubWithOverflow, Operator::kNoProperties)
+
#define MACHINE_TYPE_LIST(V) \
V(Float32) \
V(Float64) \
@@ -248,8 +438,23 @@ MachineRepresentation StackSlotRepresentationOf(Operator const* op) {
V(kWord16) \
V(kWord32) \
V(kWord64) \
+ V(kTaggedSigned) \
+ V(kTaggedPointer) \
V(kTagged)
+#define ATOMIC_TYPE_LIST(V) \
+ V(Int8) \
+ V(Uint8) \
+ V(Int16) \
+ V(Uint16) \
+ V(Int32) \
+ V(Uint32)
+
+#define ATOMIC_REPRESENTATION_LIST(V) \
+ V(kWord8) \
+ V(kWord16) \
+ V(kWord32)
+
struct MachineOperatorGlobalCache {
#define PURE(Name, properties, value_input_count, control_input_count, \
output_count) \
@@ -264,46 +469,56 @@ struct MachineOperatorGlobalCache {
PURE_OPTIONAL_OP_LIST(PURE)
#undef PURE
- template <TruncationMode kMode>
- struct TruncateFloat64ToInt32Operator final
- : public Operator1<TruncationMode> {
- TruncateFloat64ToInt32Operator()
- : Operator1<TruncationMode>(IrOpcode::kTruncateFloat64ToInt32,
- Operator::kPure, "TruncateFloat64ToInt32",
- 1, 0, 0, 1, 0, 0, kMode) {}
- };
- TruncateFloat64ToInt32Operator<TruncationMode::kJavaScript>
- kTruncateFloat64ToInt32JavaScript;
- TruncateFloat64ToInt32Operator<TruncationMode::kRoundToZero>
- kTruncateFloat64ToInt32RoundToZero;
-
-#define LOAD(Type) \
- struct Load##Type##Operator final : public Operator1<LoadRepresentation> { \
- Load##Type##Operator() \
- : Operator1<LoadRepresentation>( \
- IrOpcode::kLoad, Operator::kNoThrow | Operator::kNoWrite, \
- "Load", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
- struct CheckedLoad##Type##Operator final \
- : public Operator1<CheckedLoadRepresentation> { \
- CheckedLoad##Type##Operator() \
- : Operator1<CheckedLoadRepresentation>( \
- IrOpcode::kCheckedLoad, Operator::kNoThrow | Operator::kNoWrite, \
- "CheckedLoad", 3, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
- Load##Type##Operator kLoad##Type; \
+#define OVERFLOW_OP(Name, properties) \
+ struct Name##Operator final : public Operator { \
+ Name##Operator() \
+ : Operator(IrOpcode::k##Name, \
+ Operator::kEliminatable | Operator::kNoRead | properties, \
+ #Name, 2, 0, 1, 2, 0, 0) {} \
+ }; \
+ Name##Operator k##Name;
+ OVERFLOW_OP_LIST(OVERFLOW_OP)
+#undef OVERFLOW_OP
+
+#define LOAD(Type) \
+ struct Load##Type##Operator final : public Operator1<LoadRepresentation> { \
+ Load##Type##Operator() \
+ : Operator1<LoadRepresentation>( \
+ IrOpcode::kLoad, \
+ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
+ "Load", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ struct UnalignedLoad##Type##Operator final \
+ : public Operator1<UnalignedLoadRepresentation> { \
+ UnalignedLoad##Type##Operator() \
+ : Operator1<UnalignedLoadRepresentation>( \
+ IrOpcode::kUnalignedLoad, \
+ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
+ "UnalignedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ struct CheckedLoad##Type##Operator final \
+ : public Operator1<CheckedLoadRepresentation> { \
+ CheckedLoad##Type##Operator() \
+ : Operator1<CheckedLoadRepresentation>( \
+ IrOpcode::kCheckedLoad, \
+ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
+ "CheckedLoad", 3, 1, 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ Load##Type##Operator kLoad##Type; \
+ UnalignedLoad##Type##Operator kUnalignedLoad##Type; \
CheckedLoad##Type##Operator kCheckedLoad##Type;
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
-#define STACKSLOT(Type) \
- struct StackSlot##Type##Operator final \
- : public Operator1<MachineRepresentation> { \
- StackSlot##Type##Operator() \
- : Operator1<MachineRepresentation>( \
- IrOpcode::kStackSlot, Operator::kNoThrow, "StackSlot", 0, 0, 0, \
- 1, 0, 0, MachineType::Type().representation()) {} \
- }; \
+#define STACKSLOT(Type) \
+ struct StackSlot##Type##Operator final \
+ : public Operator1<MachineRepresentation> { \
+ StackSlot##Type##Operator() \
+ : Operator1<MachineRepresentation>( \
+ IrOpcode::kStackSlot, Operator::kNoDeopt | Operator::kNoThrow, \
+ "StackSlot", 0, 0, 0, 1, 0, 0, \
+ MachineType::Type().representation()) {} \
+ }; \
StackSlot##Type##Operator kStackSlot##Type;
MACHINE_TYPE_LIST(STACKSLOT)
#undef STACKSLOT
@@ -312,7 +527,8 @@ struct MachineOperatorGlobalCache {
struct Store##Type##Operator : public Operator1<StoreRepresentation> { \
explicit Store##Type##Operator(WriteBarrierKind write_barrier_kind) \
: Operator1<StoreRepresentation>( \
- IrOpcode::kStore, Operator::kNoRead | Operator::kNoThrow, \
+ IrOpcode::kStore, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
"Store", 3, 1, 1, 0, 1, 0, \
StoreRepresentation(MachineRepresentation::Type, \
write_barrier_kind)) {} \
@@ -337,11 +553,21 @@ struct MachineOperatorGlobalCache {
Store##Type##FullWriteBarrier##Operator() \
: Store##Type##Operator(kFullWriteBarrier) {} \
}; \
+ struct UnalignedStore##Type##Operator final \
+ : public Operator1<UnalignedStoreRepresentation> { \
+ UnalignedStore##Type##Operator() \
+ : Operator1<UnalignedStoreRepresentation>( \
+ IrOpcode::kUnalignedStore, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "UnalignedStore", 3, 1, 1, 0, 1, 0, \
+ MachineRepresentation::Type) {} \
+ }; \
struct CheckedStore##Type##Operator final \
: public Operator1<CheckedStoreRepresentation> { \
CheckedStore##Type##Operator() \
: Operator1<CheckedStoreRepresentation>( \
- IrOpcode::kCheckedStore, Operator::kNoRead | Operator::kNoThrow, \
+ IrOpcode::kCheckedStore, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
"CheckedStore", 4, 1, 1, 0, 1, 0, MachineRepresentation::Type) { \
} \
}; \
@@ -350,24 +576,100 @@ struct MachineOperatorGlobalCache {
Store##Type##PointerWriteBarrier##Operator \
kStore##Type##PointerWriteBarrier; \
Store##Type##FullWriteBarrier##Operator kStore##Type##FullWriteBarrier; \
+ UnalignedStore##Type##Operator kUnalignedStore##Type; \
CheckedStore##Type##Operator kCheckedStore##Type;
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
+
+#define ATOMIC_LOAD(Type) \
+ struct AtomicLoad##Type##Operator final \
+ : public Operator1<LoadRepresentation> { \
+ AtomicLoad##Type##Operator() \
+ : Operator1<LoadRepresentation>( \
+ IrOpcode::kAtomicLoad, \
+ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
+ "AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ AtomicLoad##Type##Operator kAtomicLoad##Type;
+ ATOMIC_TYPE_LIST(ATOMIC_LOAD)
+#undef ATOMIC_LOAD
+
+#define ATOMIC_STORE(Type) \
+ struct AtomicStore##Type##Operator \
+ : public Operator1<MachineRepresentation> { \
+ AtomicStore##Type##Operator() \
+ : Operator1<MachineRepresentation>( \
+ IrOpcode::kAtomicStore, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "AtomicStore", 3, 1, 1, 0, 1, 0, MachineRepresentation::Type) {} \
+ }; \
+ AtomicStore##Type##Operator kAtomicStore##Type;
+ ATOMIC_REPRESENTATION_LIST(ATOMIC_STORE)
+#undef STORE
+
+ struct DebugBreakOperator : public Operator {
+ DebugBreakOperator()
+ : Operator(IrOpcode::kDebugBreak, Operator::kNoThrow, "DebugBreak", 0,
+ 0, 0, 0, 0, 0) {}
+ };
+ DebugBreakOperator kDebugBreak;
+
+ struct UnsafePointerAddOperator final : public Operator {
+ UnsafePointerAddOperator()
+ : Operator(IrOpcode::kUnsafePointerAdd, Operator::kKontrol,
+ "UnsafePointerAdd", 2, 1, 1, 1, 1, 0) {}
+ };
+ UnsafePointerAddOperator kUnsafePointerAdd;
};
+struct CommentOperator : public Operator1<const char*> {
+ explicit CommentOperator(const char* msg)
+ : Operator1<const char*>(IrOpcode::kComment, Operator::kNoThrow,
+ "Comment", 0, 0, 0, 0, 0, 0, msg) {}
+};
static base::LazyInstance<MachineOperatorGlobalCache>::type kCache =
LAZY_INSTANCE_INITIALIZER;
-
-MachineOperatorBuilder::MachineOperatorBuilder(Zone* zone,
- MachineRepresentation word,
- Flags flags)
- : cache_(kCache.Get()), word_(word), flags_(flags) {
+MachineOperatorBuilder::MachineOperatorBuilder(
+ Zone* zone, MachineRepresentation word, Flags flags,
+ AlignmentRequirements alignmentRequirements)
+ : zone_(zone),
+ cache_(kCache.Get()),
+ word_(word),
+ flags_(flags),
+ alignment_requirements_(alignmentRequirements) {
DCHECK(word == MachineRepresentation::kWord32 ||
word == MachineRepresentation::kWord64);
}
+const Operator* MachineOperatorBuilder::UnalignedLoad(
+ UnalignedLoadRepresentation rep) {
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return &cache_.kUnalignedLoad##Type; \
+ }
+ MACHINE_TYPE_LIST(LOAD)
+#undef LOAD
+ UNREACHABLE();
+ return nullptr;
+}
+
+const Operator* MachineOperatorBuilder::UnalignedStore(
+ UnalignedStoreRepresentation rep) {
+ switch (rep) {
+#define STORE(kRep) \
+ case MachineRepresentation::kRep: \
+ return &cache_.kUnalignedStore##kRep;
+ MACHINE_REPRESENTATION_LIST(STORE)
+#undef STORE
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kNone:
+ break;
+ }
+ UNREACHABLE();
+ return nullptr;
+}
#define PURE(Name, properties, value_input_count, control_input_count, \
output_count) \
@@ -375,27 +677,18 @@ MachineOperatorBuilder::MachineOperatorBuilder(Zone* zone,
PURE_OP_LIST(PURE)
#undef PURE
-#define PURE(Name, properties, value_input_count, control_input_count, \
- output_count) \
- const OptionalOperator MachineOperatorBuilder::Name() { \
- return OptionalOperator(flags_ & k##Name ? &cache_.k##Name : nullptr); \
+#define PURE(Name, properties, value_input_count, control_input_count, \
+ output_count) \
+ const OptionalOperator MachineOperatorBuilder::Name() { \
+ return OptionalOperator(flags_ & k##Name, &cache_.k##Name); \
}
PURE_OPTIONAL_OP_LIST(PURE)
#undef PURE
-
-const Operator* MachineOperatorBuilder::TruncateFloat64ToInt32(
- TruncationMode mode) {
- switch (mode) {
- case TruncationMode::kJavaScript:
- return &cache_.kTruncateFloat64ToInt32JavaScript;
- case TruncationMode::kRoundToZero:
- return &cache_.kTruncateFloat64ToInt32RoundToZero;
- }
- UNREACHABLE();
- return nullptr;
-}
-
+#define OVERFLOW_OP(Name, properties) \
+ const Operator* MachineOperatorBuilder::Name() { return &cache_.k##Name; }
+OVERFLOW_OP_LIST(OVERFLOW_OP)
+#undef OVERFLOW_OP
const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
#define LOAD(Type) \
@@ -444,6 +737,17 @@ const Operator* MachineOperatorBuilder::Store(StoreRepresentation store_rep) {
return nullptr;
}
+const Operator* MachineOperatorBuilder::UnsafePointerAdd() {
+ return &cache_.kUnsafePointerAdd;
+}
+
+const Operator* MachineOperatorBuilder::DebugBreak() {
+ return &cache_.kDebugBreak;
+}
+
+const Operator* MachineOperatorBuilder::Comment(const char* msg) {
+ return new (zone_) CommentOperator(msg);
+}
const Operator* MachineOperatorBuilder::CheckedLoad(
CheckedLoadRepresentation rep) {
@@ -474,19 +778,28 @@ const Operator* MachineOperatorBuilder::CheckedStore(
return nullptr;
}
-// On 32 bit platforms we need to get a reference to optional operators of
-// 64-bit instructions for later Int64Lowering, even though 32 bit platforms
-// don't support the original 64-bit instruction.
-const Operator* MachineOperatorBuilder::Word64PopcntPlaceholder() {
- return &cache_.kWord64Popcnt;
+const Operator* MachineOperatorBuilder::AtomicLoad(LoadRepresentation rep) {
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return &cache_.kAtomicLoad##Type; \
+ }
+ ATOMIC_TYPE_LIST(LOAD)
+#undef LOAD
+ UNREACHABLE();
+ return nullptr;
}
-// On 32 bit platforms we need to get a reference to optional operators of
-// 64-bit instructions for later Int64Lowering, even though 32 bit platforms
-// don't support the original 64-bit instruction.
-const Operator* MachineOperatorBuilder::Word64CtzPlaceholder() {
- return &cache_.kWord64Ctz;
+const Operator* MachineOperatorBuilder::AtomicStore(MachineRepresentation rep) {
+#define STORE(kRep) \
+ if (rep == MachineRepresentation::kRep) { \
+ return &cache_.kAtomicStore##kRep; \
+ }
+ ATOMIC_REPRESENTATION_LIST(STORE)
+#undef STORE
+ UNREACHABLE();
+ return nullptr;
}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 68e393aadd..611846a1db 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -20,45 +20,25 @@ class Operator;
// For operators that are not supported on all platforms.
class OptionalOperator final {
public:
- explicit OptionalOperator(const Operator* op) : op_(op) {}
+ OptionalOperator(bool supported, const Operator* op)
+ : supported_(supported), op_(op) {}
- bool IsSupported() const { return op_ != nullptr; }
+ bool IsSupported() const { return supported_; }
+ // Gets the operator only if it is supported.
const Operator* op() const {
- DCHECK_NOT_NULL(op_);
+ DCHECK(supported_);
return op_;
}
+ // Always gets the operator, even for unsupported operators. This is useful to
+ // use the operator as a placeholder in a graph, for instance.
+ const Operator* placeholder() const { return op_; }
private:
+ bool supported_;
const Operator* const op_;
};
-// Supported float64 to int32 truncation modes.
-enum class TruncationMode : uint8_t {
- kJavaScript, // ES6 section 7.1.5
- kRoundToZero // Round towards zero. Implementation defined for NaN and ovf.
-};
-
-V8_INLINE size_t hash_value(TruncationMode mode) {
- return static_cast<uint8_t>(mode);
-}
-
-std::ostream& operator<<(std::ostream&, TruncationMode);
-
-TruncationMode TruncationModeOf(Operator const*);
-
-
-// Supported write barrier modes.
-enum WriteBarrierKind {
- kNoWriteBarrier,
- kMapWriteBarrier,
- kPointerWriteBarrier,
- kFullWriteBarrier
-};
-
-std::ostream& operator<<(std::ostream& os, WriteBarrierKind);
-
-
// A Load needs a MachineType.
typedef MachineType LoadRepresentation;
@@ -90,6 +70,15 @@ std::ostream& operator<<(std::ostream&, StoreRepresentation);
StoreRepresentation const& StoreRepresentationOf(Operator const*);
+typedef MachineType UnalignedLoadRepresentation;
+
+UnalignedLoadRepresentation UnalignedLoadRepresentationOf(Operator const*);
+
+// An UnalignedStore needs a MachineType.
+typedef MachineRepresentation UnalignedStoreRepresentation;
+
+UnalignedStoreRepresentation const& UnalignedStoreRepresentationOf(
+ Operator const*);
// A CheckedLoad needs a MachineType.
typedef MachineType CheckedLoadRepresentation;
@@ -104,6 +93,8 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const*);
MachineRepresentation StackSlotRepresentationOf(Operator const* op);
+MachineRepresentation AtomicStoreRepresentationOf(Operator const* op);
+
// Interface for building machine-level operators. These operators are
// machine-level but machine-independent and thus define a language suitable
// for generating code to run on architectures such as ia32, x64, arm, etc.
@@ -111,46 +102,110 @@ class MachineOperatorBuilder final : public ZoneObject {
public:
// Flags that specify which operations are available. This is useful
// for operations that are unsupported by some back-ends.
- enum Flag {
+ enum Flag : unsigned {
kNoFlags = 0u,
- // Note that Float*Max behaves like `(b < a) ? a : b`, not like Math.max().
- // Note that Float*Min behaves like `(a < b) ? a : b`, not like Math.min().
- kFloat32Max = 1u << 0,
- kFloat32Min = 1u << 1,
- kFloat64Max = 1u << 2,
- kFloat64Min = 1u << 3,
- kFloat32RoundDown = 1u << 4,
- kFloat64RoundDown = 1u << 5,
- kFloat32RoundUp = 1u << 6,
- kFloat64RoundUp = 1u << 7,
- kFloat32RoundTruncate = 1u << 8,
- kFloat64RoundTruncate = 1u << 9,
- kFloat32RoundTiesEven = 1u << 10,
- kFloat64RoundTiesEven = 1u << 11,
- kFloat64RoundTiesAway = 1u << 12,
- kInt32DivIsSafe = 1u << 13,
- kUint32DivIsSafe = 1u << 14,
- kWord32ShiftIsSafe = 1u << 15,
- kWord32Ctz = 1u << 16,
- kWord64Ctz = 1u << 17,
- kWord32Popcnt = 1u << 18,
- kWord64Popcnt = 1u << 19,
- kWord32ReverseBits = 1u << 20,
- kWord64ReverseBits = 1u << 21,
- kAllOptionalOps = kFloat32Max | kFloat32Min | kFloat64Max | kFloat64Min |
- kFloat32RoundDown | kFloat64RoundDown | kFloat32RoundUp |
+ kFloat32RoundDown = 1u << 0,
+ kFloat64RoundDown = 1u << 1,
+ kFloat32RoundUp = 1u << 2,
+ kFloat64RoundUp = 1u << 3,
+ kFloat32RoundTruncate = 1u << 4,
+ kFloat64RoundTruncate = 1u << 5,
+ kFloat32RoundTiesEven = 1u << 6,
+ kFloat64RoundTiesEven = 1u << 7,
+ kFloat64RoundTiesAway = 1u << 8,
+ kInt32DivIsSafe = 1u << 9,
+ kUint32DivIsSafe = 1u << 10,
+ kWord32ShiftIsSafe = 1u << 11,
+ kWord32Ctz = 1u << 12,
+ kWord64Ctz = 1u << 13,
+ kWord32Popcnt = 1u << 14,
+ kWord64Popcnt = 1u << 15,
+ kWord32ReverseBits = 1u << 16,
+ kWord64ReverseBits = 1u << 17,
+ kWord32ReverseBytes = 1u << 18,
+ kWord64ReverseBytes = 1u << 19,
+ kAllOptionalOps = kFloat32RoundDown | kFloat64RoundDown | kFloat32RoundUp |
kFloat64RoundUp | kFloat32RoundTruncate |
kFloat64RoundTruncate | kFloat64RoundTiesAway |
kFloat32RoundTiesEven | kFloat64RoundTiesEven |
kWord32Ctz | kWord64Ctz | kWord32Popcnt | kWord64Popcnt |
- kWord32ReverseBits | kWord64ReverseBits
+ kWord32ReverseBits | kWord64ReverseBits |
+ kWord32ReverseBytes | kWord64ReverseBytes
};
typedef base::Flags<Flag, unsigned> Flags;
+ class AlignmentRequirements {
+ public:
+ enum UnalignedAccessSupport { kNoSupport, kSomeSupport, kFullSupport };
+
+ bool IsUnalignedLoadSupported(const MachineType& machineType,
+ uint8_t alignment) const {
+ return IsUnalignedSupported(unalignedLoadUnsupportedTypes_, machineType,
+ alignment);
+ }
+
+ bool IsUnalignedStoreSupported(const MachineType& machineType,
+ uint8_t alignment) const {
+ return IsUnalignedSupported(unalignedStoreUnsupportedTypes_, machineType,
+ alignment);
+ }
+
+ static AlignmentRequirements FullUnalignedAccessSupport() {
+ return AlignmentRequirements(kFullSupport);
+ }
+ static AlignmentRequirements NoUnalignedAccessSupport() {
+ return AlignmentRequirements(kNoSupport);
+ }
+ static AlignmentRequirements SomeUnalignedAccessUnsupported(
+ const Vector<MachineType>& unalignedLoadUnsupportedTypes,
+ const Vector<MachineType>& unalignedStoreUnsupportedTypes) {
+ return AlignmentRequirements(kSomeSupport, unalignedLoadUnsupportedTypes,
+ unalignedStoreUnsupportedTypes);
+ }
+
+ private:
+ explicit AlignmentRequirements(
+ AlignmentRequirements::UnalignedAccessSupport unalignedAccessSupport,
+ Vector<MachineType> unalignedLoadUnsupportedTypes =
+ Vector<MachineType>(NULL, 0),
+ Vector<MachineType> unalignedStoreUnsupportedTypes =
+ Vector<MachineType>(NULL, 0))
+ : unalignedSupport_(unalignedAccessSupport),
+ unalignedLoadUnsupportedTypes_(unalignedLoadUnsupportedTypes),
+ unalignedStoreUnsupportedTypes_(unalignedStoreUnsupportedTypes) {}
+
+ bool IsUnalignedSupported(const Vector<MachineType>& unsupported,
+ const MachineType& machineType,
+ uint8_t alignment) const {
+ if (unalignedSupport_ == kFullSupport) {
+ return true;
+ } else if (unalignedSupport_ == kNoSupport) {
+ return false;
+ } else {
+ for (MachineType m : unsupported) {
+ if (m == machineType) {
+ return false;
+ }
+ }
+ return true;
+ }
+ }
+
+ const AlignmentRequirements::UnalignedAccessSupport unalignedSupport_;
+ const Vector<MachineType> unalignedLoadUnsupportedTypes_;
+ const Vector<MachineType> unalignedStoreUnsupportedTypes_;
+ };
+
explicit MachineOperatorBuilder(
Zone* zone,
MachineRepresentation word = MachineType::PointerRepresentation(),
- Flags supportedOperators = kNoFlags);
+ Flags supportedOperators = kNoFlags,
+ AlignmentRequirements alignmentRequirements =
+ AlignmentRequirements::FullUnalignedAccessSupport());
+
+ const Operator* Comment(const char* msg);
+ const Operator* DebugBreak();
+ const Operator* UnsafePointerAdd();
const Operator* Word32And();
const Operator* Word32Or();
@@ -164,9 +219,10 @@ class MachineOperatorBuilder final : public ZoneObject {
const OptionalOperator Word32Ctz();
const OptionalOperator Word32Popcnt();
const OptionalOperator Word64Popcnt();
- const Operator* Word64PopcntPlaceholder();
const OptionalOperator Word32ReverseBits();
const OptionalOperator Word64ReverseBits();
+ const OptionalOperator Word32ReverseBytes();
+ const OptionalOperator Word64ReverseBytes();
bool Word32ShiftIsSafe() const { return flags_ & kWord32ShiftIsSafe; }
const Operator* Word64And();
@@ -178,7 +234,6 @@ class MachineOperatorBuilder final : public ZoneObject {
const Operator* Word64Ror();
const Operator* Word64Clz();
const OptionalOperator Word64Ctz();
- const Operator* Word64CtzPlaceholder();
const Operator* Word64Equal();
const Operator* Int32PairAdd();
@@ -193,6 +248,7 @@ class MachineOperatorBuilder final : public ZoneObject {
const Operator* Int32Sub();
const Operator* Int32SubWithOverflow();
const Operator* Int32Mul();
+ const Operator* Int32MulWithOverflow();
const Operator* Int32MulHigh();
const Operator* Int32Div();
const Operator* Int32Mod();
@@ -220,6 +276,12 @@ class MachineOperatorBuilder final : public ZoneObject {
const Operator* Uint64LessThanOrEqual();
const Operator* Uint64Mod();
+ // This operator reinterprets the bits of a word as tagged pointer.
+ const Operator* BitcastWordToTagged();
+
+ // JavaScript float64 to int32/uint32 truncation.
+ const Operator* TruncateFloat64ToWord32();
+
// These operators change the representation of numbers while preserving the
// value of the number. Narrowing operators assume the input is representable
// in the target type and are *not* defined for other inputs.
@@ -240,11 +302,21 @@ class MachineOperatorBuilder final : public ZoneObject {
const Operator* ChangeUint32ToFloat64();
const Operator* ChangeUint32ToUint64();
+ // These are changes from impossible values (for example a smi-checked
+ // string). They can safely emit an abort instruction, which should
+ // never be reached.
+ const Operator* ImpossibleToWord32();
+ const Operator* ImpossibleToWord64();
+ const Operator* ImpossibleToFloat32();
+ const Operator* ImpossibleToFloat64();
+ const Operator* ImpossibleToTagged();
+ const Operator* ImpossibleToBit();
+
// These operators truncate or round numbers, both changing the representation
// of the number and mapping multiple input values onto the same output value.
const Operator* TruncateFloat64ToFloat32();
- const Operator* TruncateFloat64ToInt32(TruncationMode);
const Operator* TruncateInt64ToInt32();
+ const Operator* RoundFloat64ToInt32();
const Operator* RoundInt32ToFloat32();
const Operator* RoundInt64ToFloat32();
const Operator* RoundInt64ToFloat64();
@@ -286,13 +358,12 @@ class MachineOperatorBuilder final : public ZoneObject {
const Operator* Float64LessThan();
const Operator* Float64LessThanOrEqual();
- // Floating point min/max complying to IEEE 754 (single-precision).
- const OptionalOperator Float32Max();
- const OptionalOperator Float32Min();
-
- // Floating point min/max complying to IEEE 754 (double-precision).
- const OptionalOperator Float64Max();
- const OptionalOperator Float64Min();
+ // Floating point min/max complying to EcmaScript 6 (double-precision).
+ const Operator* Float64Max();
+ const Operator* Float64Min();
+ // Floating point min/max complying to WebAssembly (single-precision).
+ const Operator* Float32Max();
+ const Operator* Float32Min();
// Floating point abs complying to IEEE 754 (single-precision).
const Operator* Float32Abs();
@@ -311,18 +382,245 @@ class MachineOperatorBuilder final : public ZoneObject {
const OptionalOperator Float32RoundTiesEven();
const OptionalOperator Float64RoundTiesEven();
+ // Floating point neg.
+ const Operator* Float32Neg();
+ const Operator* Float64Neg();
+
+ // Floating point trigonometric functions (double-precision).
+ const Operator* Float64Acos();
+ const Operator* Float64Acosh();
+ const Operator* Float64Asin();
+ const Operator* Float64Asinh();
+ const Operator* Float64Atan();
+ const Operator* Float64Atan2();
+ const Operator* Float64Atanh();
+ const Operator* Float64Cos();
+ const Operator* Float64Cosh();
+ const Operator* Float64Sin();
+ const Operator* Float64Sinh();
+ const Operator* Float64Tan();
+ const Operator* Float64Tanh();
+
+ // Floating point exponential functions (double-precision).
+ const Operator* Float64Exp();
+ const Operator* Float64Expm1();
+ const Operator* Float64Pow();
+
+ // Floating point logarithm (double-precision).
+ const Operator* Float64Log();
+ const Operator* Float64Log1p();
+ const Operator* Float64Log2();
+ const Operator* Float64Log10();
+
+ // Floating point cube root (double-precision).
+ const Operator* Float64Cbrt();
+
// Floating point bit representation.
const Operator* Float64ExtractLowWord32();
const Operator* Float64ExtractHighWord32();
const Operator* Float64InsertLowWord32();
const Operator* Float64InsertHighWord32();
+ // Change signalling NaN to quiet NaN.
+ // Identity for any input that is not signalling NaN.
+ const Operator* Float64SilenceNaN();
+
+ // SIMD operators.
+ const Operator* CreateFloat32x4();
+ const Operator* Float32x4ExtractLane();
+ const Operator* Float32x4ReplaceLane();
+ const Operator* Float32x4Abs();
+ const Operator* Float32x4Neg();
+ const Operator* Float32x4Sqrt();
+ const Operator* Float32x4RecipApprox();
+ const Operator* Float32x4RecipSqrtApprox();
+ const Operator* Float32x4Add();
+ const Operator* Float32x4Sub();
+ const Operator* Float32x4Mul();
+ const Operator* Float32x4Div();
+ const Operator* Float32x4Min();
+ const Operator* Float32x4Max();
+ const Operator* Float32x4MinNum();
+ const Operator* Float32x4MaxNum();
+ const Operator* Float32x4Equal();
+ const Operator* Float32x4NotEqual();
+ const Operator* Float32x4LessThan();
+ const Operator* Float32x4LessThanOrEqual();
+ const Operator* Float32x4GreaterThan();
+ const Operator* Float32x4GreaterThanOrEqual();
+ const Operator* Float32x4Select();
+ const Operator* Float32x4Swizzle();
+ const Operator* Float32x4Shuffle();
+ const Operator* Float32x4FromInt32x4();
+ const Operator* Float32x4FromUint32x4();
+
+ const Operator* CreateInt32x4();
+ const Operator* Int32x4ExtractLane();
+ const Operator* Int32x4ReplaceLane();
+ const Operator* Int32x4Neg();
+ const Operator* Int32x4Add();
+ const Operator* Int32x4Sub();
+ const Operator* Int32x4Mul();
+ const Operator* Int32x4Min();
+ const Operator* Int32x4Max();
+ const Operator* Int32x4ShiftLeftByScalar();
+ const Operator* Int32x4ShiftRightByScalar();
+ const Operator* Int32x4Equal();
+ const Operator* Int32x4NotEqual();
+ const Operator* Int32x4LessThan();
+ const Operator* Int32x4LessThanOrEqual();
+ const Operator* Int32x4GreaterThan();
+ const Operator* Int32x4GreaterThanOrEqual();
+ const Operator* Int32x4Select();
+ const Operator* Int32x4Swizzle();
+ const Operator* Int32x4Shuffle();
+ const Operator* Int32x4FromFloat32x4();
+
+ const Operator* Uint32x4Min();
+ const Operator* Uint32x4Max();
+ const Operator* Uint32x4ShiftLeftByScalar();
+ const Operator* Uint32x4ShiftRightByScalar();
+ const Operator* Uint32x4LessThan();
+ const Operator* Uint32x4LessThanOrEqual();
+ const Operator* Uint32x4GreaterThan();
+ const Operator* Uint32x4GreaterThanOrEqual();
+ const Operator* Uint32x4FromFloat32x4();
+
+ const Operator* CreateBool32x4();
+ const Operator* Bool32x4ExtractLane();
+ const Operator* Bool32x4ReplaceLane();
+ const Operator* Bool32x4And();
+ const Operator* Bool32x4Or();
+ const Operator* Bool32x4Xor();
+ const Operator* Bool32x4Not();
+ const Operator* Bool32x4AnyTrue();
+ const Operator* Bool32x4AllTrue();
+ const Operator* Bool32x4Swizzle();
+ const Operator* Bool32x4Shuffle();
+ const Operator* Bool32x4Equal();
+ const Operator* Bool32x4NotEqual();
+
+ const Operator* CreateInt16x8();
+ const Operator* Int16x8ExtractLane();
+ const Operator* Int16x8ReplaceLane();
+ const Operator* Int16x8Neg();
+ const Operator* Int16x8Add();
+ const Operator* Int16x8AddSaturate();
+ const Operator* Int16x8Sub();
+ const Operator* Int16x8SubSaturate();
+ const Operator* Int16x8Mul();
+ const Operator* Int16x8Min();
+ const Operator* Int16x8Max();
+ const Operator* Int16x8ShiftLeftByScalar();
+ const Operator* Int16x8ShiftRightByScalar();
+ const Operator* Int16x8Equal();
+ const Operator* Int16x8NotEqual();
+ const Operator* Int16x8LessThan();
+ const Operator* Int16x8LessThanOrEqual();
+ const Operator* Int16x8GreaterThan();
+ const Operator* Int16x8GreaterThanOrEqual();
+ const Operator* Int16x8Select();
+ const Operator* Int16x8Swizzle();
+ const Operator* Int16x8Shuffle();
+
+ const Operator* Uint16x8AddSaturate();
+ const Operator* Uint16x8SubSaturate();
+ const Operator* Uint16x8Min();
+ const Operator* Uint16x8Max();
+ const Operator* Uint16x8ShiftLeftByScalar();
+ const Operator* Uint16x8ShiftRightByScalar();
+ const Operator* Uint16x8LessThan();
+ const Operator* Uint16x8LessThanOrEqual();
+ const Operator* Uint16x8GreaterThan();
+ const Operator* Uint16x8GreaterThanOrEqual();
+
+ const Operator* CreateBool16x8();
+ const Operator* Bool16x8ExtractLane();
+ const Operator* Bool16x8ReplaceLane();
+ const Operator* Bool16x8And();
+ const Operator* Bool16x8Or();
+ const Operator* Bool16x8Xor();
+ const Operator* Bool16x8Not();
+ const Operator* Bool16x8AnyTrue();
+ const Operator* Bool16x8AllTrue();
+ const Operator* Bool16x8Swizzle();
+ const Operator* Bool16x8Shuffle();
+ const Operator* Bool16x8Equal();
+ const Operator* Bool16x8NotEqual();
+
+ const Operator* CreateInt8x16();
+ const Operator* Int8x16ExtractLane();
+ const Operator* Int8x16ReplaceLane();
+ const Operator* Int8x16Neg();
+ const Operator* Int8x16Add();
+ const Operator* Int8x16AddSaturate();
+ const Operator* Int8x16Sub();
+ const Operator* Int8x16SubSaturate();
+ const Operator* Int8x16Mul();
+ const Operator* Int8x16Min();
+ const Operator* Int8x16Max();
+ const Operator* Int8x16ShiftLeftByScalar();
+ const Operator* Int8x16ShiftRightByScalar();
+ const Operator* Int8x16Equal();
+ const Operator* Int8x16NotEqual();
+ const Operator* Int8x16LessThan();
+ const Operator* Int8x16LessThanOrEqual();
+ const Operator* Int8x16GreaterThan();
+ const Operator* Int8x16GreaterThanOrEqual();
+ const Operator* Int8x16Select();
+ const Operator* Int8x16Swizzle();
+ const Operator* Int8x16Shuffle();
+
+ const Operator* Uint8x16AddSaturate();
+ const Operator* Uint8x16SubSaturate();
+ const Operator* Uint8x16Min();
+ const Operator* Uint8x16Max();
+ const Operator* Uint8x16ShiftLeftByScalar();
+ const Operator* Uint8x16ShiftRightByScalar();
+ const Operator* Uint8x16LessThan();
+ const Operator* Uint8x16LessThanOrEqual();
+ const Operator* Uint8x16GreaterThan();
+ const Operator* Uint8x16GreaterThanOrEqual();
+
+ const Operator* CreateBool8x16();
+ const Operator* Bool8x16ExtractLane();
+ const Operator* Bool8x16ReplaceLane();
+ const Operator* Bool8x16And();
+ const Operator* Bool8x16Or();
+ const Operator* Bool8x16Xor();
+ const Operator* Bool8x16Not();
+ const Operator* Bool8x16AnyTrue();
+ const Operator* Bool8x16AllTrue();
+ const Operator* Bool8x16Swizzle();
+ const Operator* Bool8x16Shuffle();
+ const Operator* Bool8x16Equal();
+ const Operator* Bool8x16NotEqual();
+
+ const Operator* Simd128Load();
+ const Operator* Simd128Load1();
+ const Operator* Simd128Load2();
+ const Operator* Simd128Load3();
+ const Operator* Simd128Store();
+ const Operator* Simd128Store1();
+ const Operator* Simd128Store2();
+ const Operator* Simd128Store3();
+ const Operator* Simd128And();
+ const Operator* Simd128Or();
+ const Operator* Simd128Xor();
+ const Operator* Simd128Not();
+
// load [base + index]
const Operator* Load(LoadRepresentation rep);
// store [base + index], value
const Operator* Store(StoreRepresentation rep);
+ // unaligned load [base + index]
+ const Operator* UnalignedLoad(UnalignedLoadRepresentation rep);
+
+ // unaligned store [base + index], value
+ const Operator* UnalignedStore(UnalignedStoreRepresentation rep);
+
const Operator* StackSlot(MachineRepresentation rep);
// Access to the machine stack.
@@ -335,11 +633,28 @@ class MachineOperatorBuilder final : public ZoneObject {
// checked-store heap, index, length, value
const Operator* CheckedStore(CheckedStoreRepresentation);
+ // atomic-load [base + index]
+ const Operator* AtomicLoad(LoadRepresentation rep);
+ // atomic-store [base + index], value
+ const Operator* AtomicStore(MachineRepresentation rep);
+
// Target machine word-size assumed by this builder.
bool Is32() const { return word() == MachineRepresentation::kWord32; }
bool Is64() const { return word() == MachineRepresentation::kWord64; }
MachineRepresentation word() const { return word_; }
+ bool UnalignedLoadSupported(const MachineType& machineType,
+ uint8_t alignment) {
+ return alignment_requirements_.IsUnalignedLoadSupported(machineType,
+ alignment);
+ }
+
+ bool UnalignedStoreSupported(const MachineType& machineType,
+ uint8_t alignment) {
+ return alignment_requirements_.IsUnalignedStoreSupported(machineType,
+ alignment);
+ }
+
// Pseudo operators that translate to 32/64-bit operators depending on the
// word-size of the target machine assumed by this builder.
#define PSEUDO_OP_LIST(V) \
@@ -371,9 +686,11 @@ class MachineOperatorBuilder final : public ZoneObject {
#undef PSEUDO_OP_LIST
private:
+ Zone* zone_;
MachineOperatorGlobalCache const& cache_;
MachineRepresentation const word_;
Flags const flags_;
+ AlignmentRequirements const alignment_requirements_;
DISALLOW_COPY_AND_ASSIGN(MachineOperatorBuilder);
};
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
new file mode 100644
index 0000000000..97c4362728
--- /dev/null
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -0,0 +1,503 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/memory-optimizer.h"
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone)
+ : jsgraph_(jsgraph),
+ empty_state_(AllocationState::Empty(zone)),
+ pending_(zone),
+ tokens_(zone),
+ zone_(zone) {}
+
+void MemoryOptimizer::Optimize() {
+ EnqueueUses(graph()->start(), empty_state());
+ while (!tokens_.empty()) {
+ Token const token = tokens_.front();
+ tokens_.pop();
+ VisitNode(token.node, token.state);
+ }
+ DCHECK(pending_.empty());
+ DCHECK(tokens_.empty());
+}
+
+MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node,
+ PretenureFlag pretenure,
+ Zone* zone)
+ : node_ids_(zone), pretenure_(pretenure), size_(nullptr) {
+ node_ids_.insert(node->id());
+}
+
+MemoryOptimizer::AllocationGroup::AllocationGroup(Node* node,
+ PretenureFlag pretenure,
+ Node* size, Zone* zone)
+ : node_ids_(zone), pretenure_(pretenure), size_(size) {
+ node_ids_.insert(node->id());
+}
+
+void MemoryOptimizer::AllocationGroup::Add(Node* node) {
+ node_ids_.insert(node->id());
+}
+
+bool MemoryOptimizer::AllocationGroup::Contains(Node* node) const {
+ return node_ids_.find(node->id()) != node_ids_.end();
+}
+
+MemoryOptimizer::AllocationState::AllocationState()
+ : group_(nullptr), size_(std::numeric_limits<int>::max()), top_(nullptr) {}
+
+MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group)
+ : group_(group), size_(std::numeric_limits<int>::max()), top_(nullptr) {}
+
+MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group,
+ int size, Node* top)
+ : group_(group), size_(size), top_(top) {}
+
+bool MemoryOptimizer::AllocationState::IsNewSpaceAllocation() const {
+ return group() && group()->IsNewSpaceAllocation();
+}
+
+void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
+ DCHECK(!node->IsDead());
+ DCHECK_LT(0, node->op()->EffectInputCount());
+ switch (node->opcode()) {
+ case IrOpcode::kAllocate:
+ return VisitAllocate(node, state);
+ case IrOpcode::kCall:
+ return VisitCall(node, state);
+ case IrOpcode::kLoadElement:
+ return VisitLoadElement(node, state);
+ case IrOpcode::kLoadField:
+ return VisitLoadField(node, state);
+ case IrOpcode::kStoreElement:
+ return VisitStoreElement(node, state);
+ case IrOpcode::kStoreField:
+ return VisitStoreField(node, state);
+ case IrOpcode::kCheckedLoad:
+ case IrOpcode::kCheckedStore:
+ case IrOpcode::kDeoptimizeIf:
+ case IrOpcode::kDeoptimizeUnless:
+ case IrOpcode::kIfException:
+ case IrOpcode::kLoad:
+ case IrOpcode::kStore:
+ case IrOpcode::kRetain:
+ case IrOpcode::kUnsafePointerAdd:
+ return VisitOtherEffect(node, state);
+ default:
+ break;
+ }
+ DCHECK_EQ(0, node->op()->EffectOutputCount());
+}
+
+void MemoryOptimizer::VisitAllocate(Node* node, AllocationState const* state) {
+ DCHECK_EQ(IrOpcode::kAllocate, node->opcode());
+ Node* value;
+ Node* size = node->InputAt(0);
+ Node* effect = node->InputAt(1);
+ Node* control = node->InputAt(2);
+ PretenureFlag pretenure = OpParameter<PretenureFlag>(node->op());
+
+ // Determine the top/limit addresses.
+ Node* top_address = jsgraph()->ExternalConstant(
+ pretenure == NOT_TENURED
+ ? ExternalReference::new_space_allocation_top_address(isolate())
+ : ExternalReference::old_space_allocation_top_address(isolate()));
+ Node* limit_address = jsgraph()->ExternalConstant(
+ pretenure == NOT_TENURED
+ ? ExternalReference::new_space_allocation_limit_address(isolate())
+ : ExternalReference::old_space_allocation_limit_address(isolate()));
+
+ // Check if we can fold this allocation into a previous allocation represented
+ // by the incoming {state}.
+ Int32Matcher m(size);
+ if (m.HasValue() && m.Value() < Page::kMaxRegularHeapObjectSize) {
+ int32_t const object_size = m.Value();
+ if (state->size() <= Page::kMaxRegularHeapObjectSize - object_size &&
+ state->group()->pretenure() == pretenure) {
+ // We can fold this Allocate {node} into the allocation {group}
+ // represented by the given {state}. Compute the upper bound for
+ // the new {state}.
+ int32_t const state_size = state->size() + object_size;
+
+ // Update the reservation check to the actual maximum upper bound.
+ AllocationGroup* const group = state->group();
+ if (OpParameter<int32_t>(group->size()) < state_size) {
+ NodeProperties::ChangeOp(group->size(),
+ common()->Int32Constant(state_size));
+ }
+
+ // Update the allocation top with the new object allocation.
+ // TODO(bmeurer): Defer writing back top as much as possible.
+ Node* top = graph()->NewNode(machine()->IntAdd(), state->top(),
+ jsgraph()->IntPtrConstant(object_size));
+ effect = graph()->NewNode(
+ machine()->Store(StoreRepresentation(
+ MachineType::PointerRepresentation(), kNoWriteBarrier)),
+ top_address, jsgraph()->IntPtrConstant(0), top, effect, control);
+
+ // Compute the effective inner allocated address.
+ value = graph()->NewNode(
+ machine()->BitcastWordToTagged(),
+ graph()->NewNode(machine()->IntAdd(), state->top(),
+ jsgraph()->IntPtrConstant(kHeapObjectTag)));
+
+ // Extend the allocation {group}.
+ group->Add(value);
+ state = AllocationState::Open(group, state_size, top, zone());
+ } else {
+ // Setup a mutable reservation size node; will be patched as we fold
+ // additional allocations into this new group.
+ Node* size = graph()->NewNode(common()->Int32Constant(object_size));
+
+ // Load allocation top and limit.
+ Node* top = effect =
+ graph()->NewNode(machine()->Load(MachineType::Pointer()), top_address,
+ jsgraph()->IntPtrConstant(0), effect, control);
+ Node* limit = effect = graph()->NewNode(
+ machine()->Load(MachineType::Pointer()), limit_address,
+ jsgraph()->IntPtrConstant(0), effect, control);
+
+ // Check if we need to collect garbage before we can start bump pointer
+ // allocation (always done for folded allocations).
+ Node* check = graph()->NewNode(
+ machine()->UintLessThan(),
+ graph()->NewNode(
+ machine()->IntAdd(), top,
+ machine()->Is64()
+ ? graph()->NewNode(machine()->ChangeInt32ToInt64(), size)
+ : size),
+ limit);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = top;
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ Node* target = pretenure == NOT_TENURED
+ ? jsgraph()->AllocateInNewSpaceStubConstant()
+ : jsgraph()->AllocateInOldSpaceStubConstant();
+ if (!allocate_operator_.is_set()) {
+ CallDescriptor* descriptor =
+ Linkage::GetAllocateCallDescriptor(graph()->zone());
+ allocate_operator_.set(common()->Call(descriptor));
+ }
+ vfalse = efalse = graph()->NewNode(allocate_operator_.get(), target,
+ size, efalse, if_false);
+ vfalse = graph()->NewNode(machine()->IntSub(), vfalse,
+ jsgraph()->IntPtrConstant(kHeapObjectTag));
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(
+ common()->Phi(MachineType::PointerRepresentation(), 2), vtrue, vfalse,
+ control);
+
+ // Compute the new top and write it back.
+ top = graph()->NewNode(machine()->IntAdd(), value,
+ jsgraph()->IntPtrConstant(object_size));
+ effect = graph()->NewNode(
+ machine()->Store(StoreRepresentation(
+ MachineType::PointerRepresentation(), kNoWriteBarrier)),
+ top_address, jsgraph()->IntPtrConstant(0), top, effect, control);
+
+ // Compute the initial object address.
+ value = graph()->NewNode(
+ machine()->BitcastWordToTagged(),
+ graph()->NewNode(machine()->IntAdd(), value,
+ jsgraph()->IntPtrConstant(kHeapObjectTag)));
+
+ // Start a new allocation group.
+ AllocationGroup* group =
+ new (zone()) AllocationGroup(value, pretenure, size, zone());
+ state = AllocationState::Open(group, object_size, top, zone());
+ }
+ } else {
+ // Load allocation top and limit.
+ Node* top = effect =
+ graph()->NewNode(machine()->Load(MachineType::Pointer()), top_address,
+ jsgraph()->IntPtrConstant(0), effect, control);
+ Node* limit = effect =
+ graph()->NewNode(machine()->Load(MachineType::Pointer()), limit_address,
+ jsgraph()->IntPtrConstant(0), effect, control);
+
+ // Compute the new top.
+ Node* new_top = graph()->NewNode(
+ machine()->IntAdd(), top,
+ machine()->Is64()
+ ? graph()->NewNode(machine()->ChangeInt32ToInt64(), size)
+ : size);
+
+ // Check if we can do bump pointer allocation here.
+ Node* check = graph()->NewNode(machine()->UintLessThan(), new_top, limit);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue;
+ {
+ etrue = graph()->NewNode(
+ machine()->Store(StoreRepresentation(
+ MachineType::PointerRepresentation(), kNoWriteBarrier)),
+ top_address, jsgraph()->IntPtrConstant(0), new_top, etrue, if_true);
+ vtrue = graph()->NewNode(
+ machine()->BitcastWordToTagged(),
+ graph()->NewNode(machine()->IntAdd(), top,
+ jsgraph()->IntPtrConstant(kHeapObjectTag)));
+ }
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ Node* target = pretenure == NOT_TENURED
+ ? jsgraph()->AllocateInNewSpaceStubConstant()
+ : jsgraph()->AllocateInOldSpaceStubConstant();
+ if (!allocate_operator_.is_set()) {
+ CallDescriptor* descriptor =
+ Linkage::GetAllocateCallDescriptor(graph()->zone());
+ allocate_operator_.set(common()->Call(descriptor));
+ }
+ vfalse = efalse = graph()->NewNode(allocate_operator_.get(), target, size,
+ efalse, if_false);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
+
+ // Create an unfoldable allocation group.
+ AllocationGroup* group =
+ new (zone()) AllocationGroup(value, pretenure, zone());
+ state = AllocationState::Closed(group, zone());
+ }
+
+ // Replace all effect uses of {node} with the {effect}, enqueue the
+ // effect uses for further processing, and replace all value uses of
+ // {node} with the {value}.
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsEffectEdge(edge)) {
+ EnqueueUse(edge.from(), edge.index(), state);
+ edge.UpdateTo(effect);
+ } else {
+ DCHECK(NodeProperties::IsValueEdge(edge));
+ edge.UpdateTo(value);
+ }
+ }
+
+ // Kill the {node} to make sure we don't leave dangling dead uses.
+ node->Kill();
+}
+
+void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) {
+ DCHECK_EQ(IrOpcode::kCall, node->opcode());
+ // If the call can allocate, we start with a fresh state.
+ if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) {
+ state = empty_state();
+ }
+ EnqueueUses(node, state);
+}
+
+void MemoryOptimizer::VisitLoadElement(Node* node,
+ AllocationState const* state) {
+ DCHECK_EQ(IrOpcode::kLoadElement, node->opcode());
+ ElementAccess const& access = ElementAccessOf(node->op());
+ Node* index = node->InputAt(1);
+ node->ReplaceInput(1, ComputeIndex(access, index));
+ NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+ EnqueueUses(node, state);
+}
+
+void MemoryOptimizer::VisitLoadField(Node* node, AllocationState const* state) {
+ DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
+ FieldAccess const& access = FieldAccessOf(node->op());
+ Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
+ node->InsertInput(graph()->zone(), 1, offset);
+ NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+ EnqueueUses(node, state);
+}
+
+void MemoryOptimizer::VisitStoreElement(Node* node,
+ AllocationState const* state) {
+ DCHECK_EQ(IrOpcode::kStoreElement, node->opcode());
+ ElementAccess const& access = ElementAccessOf(node->op());
+ Node* object = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ WriteBarrierKind write_barrier_kind =
+ ComputeWriteBarrierKind(object, state, access.write_barrier_kind);
+ node->ReplaceInput(1, ComputeIndex(access, index));
+ NodeProperties::ChangeOp(
+ node, machine()->Store(StoreRepresentation(
+ access.machine_type.representation(), write_barrier_kind)));
+ EnqueueUses(node, state);
+}
+
+void MemoryOptimizer::VisitStoreField(Node* node,
+ AllocationState const* state) {
+ DCHECK_EQ(IrOpcode::kStoreField, node->opcode());
+ FieldAccess const& access = FieldAccessOf(node->op());
+ Node* object = node->InputAt(0);
+ WriteBarrierKind write_barrier_kind =
+ ComputeWriteBarrierKind(object, state, access.write_barrier_kind);
+ Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
+ node->InsertInput(graph()->zone(), 1, offset);
+ NodeProperties::ChangeOp(
+ node, machine()->Store(StoreRepresentation(
+ access.machine_type.representation(), write_barrier_kind)));
+ EnqueueUses(node, state);
+}
+
+void MemoryOptimizer::VisitOtherEffect(Node* node,
+ AllocationState const* state) {
+ EnqueueUses(node, state);
+}
+
+Node* MemoryOptimizer::ComputeIndex(ElementAccess const& access, Node* key) {
+ Node* index;
+ if (machine()->Is64()) {
+ // On 64-bit platforms, we need to feed a Word64 index to the Load and
+ // Store operators. Since LoadElement or StoreElement don't do any bounds
+ // checking themselves, we can be sure that the {key} was already checked
+ // and is in valid range, so we can do the further address computation on
+ // Word64 below, which ideally allows us to fuse the address computation
+ // with the actual memory access operation on Intel platforms.
+ index = graph()->NewNode(machine()->ChangeUint32ToUint64(), key);
+ } else {
+ index = key;
+ }
+ int const element_size_shift =
+ ElementSizeLog2Of(access.machine_type.representation());
+ if (element_size_shift) {
+ index = graph()->NewNode(machine()->WordShl(), index,
+ jsgraph()->IntPtrConstant(element_size_shift));
+ }
+ int const fixed_offset = access.header_size - access.tag();
+ if (fixed_offset) {
+ index = graph()->NewNode(machine()->IntAdd(), index,
+ jsgraph()->IntPtrConstant(fixed_offset));
+ }
+ return index;
+}
+
+WriteBarrierKind MemoryOptimizer::ComputeWriteBarrierKind(
+ Node* object, AllocationState const* state,
+ WriteBarrierKind write_barrier_kind) {
+ if (state->IsNewSpaceAllocation() && state->group()->Contains(object)) {
+ write_barrier_kind = kNoWriteBarrier;
+ }
+ return write_barrier_kind;
+}
+
+MemoryOptimizer::AllocationState const* MemoryOptimizer::MergeStates(
+ AllocationStates const& states) {
+ // Check if all states are the same; or at least if all allocation
+ // states belong to the same allocation group.
+ AllocationState const* state = states.front();
+ AllocationGroup* group = state->group();
+ for (size_t i = 1; i < states.size(); ++i) {
+ if (states[i] != state) state = nullptr;
+ if (states[i]->group() != group) group = nullptr;
+ }
+ if (state == nullptr) {
+ if (group != nullptr) {
+ // We cannot fold any more allocations into this group, but we can still
+ // eliminate write barriers on stores to this group.
+ // TODO(bmeurer): We could potentially just create a Phi here to merge
+ // the various tops; but we need to pay special attention not to create
+ // an unschedulable graph.
+ state = AllocationState::Closed(group, zone());
+ } else {
+ // The states are from different allocation groups.
+ state = empty_state();
+ }
+ }
+ return state;
+}
+
+void MemoryOptimizer::EnqueueMerge(Node* node, int index,
+ AllocationState const* state) {
+ DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
+ int const input_count = node->InputCount() - 1;
+ DCHECK_LT(0, input_count);
+ Node* const control = node->InputAt(input_count);
+ if (control->opcode() == IrOpcode::kLoop) {
+ // For loops we always start with an empty state at the beginning.
+ if (index == 0) EnqueueUses(node, empty_state());
+ } else {
+ DCHECK_EQ(IrOpcode::kMerge, control->opcode());
+ // Check if we already know about this pending merge.
+ NodeId const id = node->id();
+ auto it = pending_.find(id);
+ if (it == pending_.end()) {
+ // Insert a new pending merge.
+ it = pending_.insert(std::make_pair(id, AllocationStates(zone()))).first;
+ }
+ // Add the next input state.
+ it->second.push_back(state);
+ // Check if states for all inputs are available by now.
+ if (it->second.size() == static_cast<size_t>(input_count)) {
+ // All inputs to this effect merge are done, merge the states given all
+ // input constraints, drop the pending merge and enqueue uses of the
+ // EffectPhi {node}.
+ state = MergeStates(it->second);
+ EnqueueUses(node, state);
+ pending_.erase(it);
+ }
+ }
+}
+
+void MemoryOptimizer::EnqueueUses(Node* node, AllocationState const* state) {
+ for (Edge const edge : node->use_edges()) {
+ if (NodeProperties::IsEffectEdge(edge)) {
+ EnqueueUse(edge.from(), edge.index(), state);
+ }
+ }
+}
+
+void MemoryOptimizer::EnqueueUse(Node* node, int index,
+ AllocationState const* state) {
+ if (node->opcode() == IrOpcode::kEffectPhi) {
+ // An EffectPhi represents a merge of different effect chains, which
+ // needs special handling depending on whether the merge is part of a
+ // loop or just a normal control join.
+ EnqueueMerge(node, index, state);
+ } else {
+ Token token = {node, state};
+ tokens_.push(token);
+ }
+}
+
+Graph* MemoryOptimizer::graph() const { return jsgraph()->graph(); }
+
+Isolate* MemoryOptimizer::isolate() const { return jsgraph()->isolate(); }
+
+CommonOperatorBuilder* MemoryOptimizer::common() const {
+ return jsgraph()->common();
+}
+
+MachineOperatorBuilder* MemoryOptimizer::machine() const {
+ return jsgraph()->machine();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/memory-optimizer.h b/deps/v8/src/compiler/memory-optimizer.h
new file mode 100644
index 0000000000..f0cd546860
--- /dev/null
+++ b/deps/v8/src/compiler/memory-optimizer.h
@@ -0,0 +1,149 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_MEMORY_OPTIMIZER_H_
+#define V8_COMPILER_MEMORY_OPTIMIZER_H_
+
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+struct ElementAccess;
+class Graph;
+class JSGraph;
+class MachineOperatorBuilder;
+class Node;
+class Operator;
+
+// NodeIds are identifying numbers for nodes that can be used to index auxiliary
+// out-of-line data associated with each node.
+typedef uint32_t NodeId;
+
+// Lowers all simplified memory access and allocation related nodes (i.e.
+// Allocate, LoadField, StoreField and friends) to machine operators.
+// Performs allocation folding and store write barrier elimination
+// implicitly.
+class MemoryOptimizer final {
+ public:
+ MemoryOptimizer(JSGraph* jsgraph, Zone* zone);
+ ~MemoryOptimizer() {}
+
+ void Optimize();
+
+ private:
+ // An allocation group represents a set of allocations that have been folded
+ // together.
+ class AllocationGroup final : public ZoneObject {
+ public:
+ AllocationGroup(Node* node, PretenureFlag pretenure, Zone* zone);
+ AllocationGroup(Node* node, PretenureFlag pretenure, Node* size,
+ Zone* zone);
+ ~AllocationGroup() {}
+
+ void Add(Node* object);
+ bool Contains(Node* object) const;
+ bool IsNewSpaceAllocation() const { return pretenure() == NOT_TENURED; }
+
+ PretenureFlag pretenure() const { return pretenure_; }
+ Node* size() const { return size_; }
+
+ private:
+ ZoneSet<NodeId> node_ids_;
+ PretenureFlag const pretenure_;
+ Node* const size_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationGroup);
+ };
+
+ // An allocation state is propagated on the effect paths through the graph.
+ class AllocationState final : public ZoneObject {
+ public:
+ static AllocationState const* Empty(Zone* zone) {
+ return new (zone) AllocationState();
+ }
+ static AllocationState const* Closed(AllocationGroup* group, Zone* zone) {
+ return new (zone) AllocationState(group);
+ }
+ static AllocationState const* Open(AllocationGroup* group, int size,
+ Node* top, Zone* zone) {
+ return new (zone) AllocationState(group, size, top);
+ }
+
+ bool IsNewSpaceAllocation() const;
+
+ AllocationGroup* group() const { return group_; }
+ Node* top() const { return top_; }
+ int size() const { return size_; }
+
+ private:
+ AllocationState();
+ explicit AllocationState(AllocationGroup* group);
+ AllocationState(AllocationGroup* group, int size, Node* top);
+
+ AllocationGroup* const group_;
+ // The upper bound of the combined allocated object size on the current path
+ // (max int if allocation folding is impossible on this path).
+ int const size_;
+ Node* const top_;
+
+ DISALLOW_COPY_AND_ASSIGN(AllocationState);
+ };
+
+ // An array of allocation states used to collect states on merges.
+ typedef ZoneVector<AllocationState const*> AllocationStates;
+
+ // We thread through tokens to represent the current state on a given effect
+ // path through the graph.
+ struct Token {
+ Node* node;
+ AllocationState const* state;
+ };
+
+ void VisitNode(Node*, AllocationState const*);
+ void VisitAllocate(Node*, AllocationState const*);
+ void VisitCall(Node*, AllocationState const*);
+ void VisitLoadElement(Node*, AllocationState const*);
+ void VisitLoadField(Node*, AllocationState const*);
+ void VisitStoreElement(Node*, AllocationState const*);
+ void VisitStoreField(Node*, AllocationState const*);
+ void VisitOtherEffect(Node*, AllocationState const*);
+
+ Node* ComputeIndex(ElementAccess const&, Node*);
+ WriteBarrierKind ComputeWriteBarrierKind(Node* object,
+ AllocationState const* state,
+ WriteBarrierKind);
+
+ AllocationState const* MergeStates(AllocationStates const& states);
+
+ void EnqueueMerge(Node*, int, AllocationState const*);
+ void EnqueueUses(Node*, AllocationState const*);
+ void EnqueueUse(Node*, int, AllocationState const*);
+
+ AllocationState const* empty_state() const { return empty_state_; }
+ Graph* graph() const;
+ Isolate* isolate() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ CommonOperatorBuilder* common() const;
+ MachineOperatorBuilder* machine() const;
+ Zone* zone() const { return zone_; }
+
+ SetOncePointer<const Operator> allocate_operator_;
+ JSGraph* const jsgraph_;
+ AllocationState const* const empty_state_;
+ ZoneMap<NodeId, AllocationStates> pending_;
+ ZoneQueue<Token> tokens_;
+ Zone* const zone_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryOptimizer);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_MEMORY_OPTIMIZER_H_
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index 9b0d706327..ee9b40eb35 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -119,7 +119,7 @@ class MipsOperandConverter final : public InstructionOperandConverter {
MemOperand ToMemOperand(InstructionOperand* op) const {
DCHECK_NOT_NULL(op);
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
return SlotToMemOperand(AllocatedOperand::cast(op)->index());
}
@@ -472,28 +472,48 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
__ bind(&done); \
}
+#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
+ do { \
+ __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
+ __ sync(); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
+ do { \
+ __ sync(); \
+ __ asm_instr(i.InputRegister(2), i.MemoryOperand()); \
+ __ sync(); \
+ } while (0)
+
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 2, kScratchReg); \
+ __ MovToFloatParameters(i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 0, 2); \
+ /* Move the result in the double result register. */ \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
+ } while (0)
+
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 1, kScratchReg); \
+ __ MovToFloatParameter(i.InputDoubleRegister(0)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 0, 1); \
+ /* Move the result in the double result register. */ \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
+ } while (0)
+
void CodeGenerator::AssembleDeconstructFrame() {
__ mov(sp, fp);
__ Pop(ra, fp);
}
-void CodeGenerator::AssembleSetupStackPointer() {}
-
-void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
- int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
- if (sp_slot_delta > 0) {
- __ addiu(sp, sp, sp_slot_delta * kPointerSize);
- }
- frame_access_state()->SetFrameAccessToDefault();
-}
-
-
-void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
- int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
- if (sp_slot_delta < 0) {
- __ Subu(sp, sp, Operand(-sp_slot_delta * kPointerSize));
- frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
- }
+void CodeGenerator::AssemblePrepareTailCall() {
if (frame_access_state()->has_frame()) {
__ lw(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
__ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -526,8 +546,41 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
__ bind(&done);
}
+namespace {
+
+void AdjustStackPointerForTailCall(MacroAssembler* masm,
+ FrameAccessState* state,
+ int new_slot_above_sp,
+ bool allow_shrinkage = true) {
+ int current_sp_offset = state->GetSPToFPSlotCount() +
+ StandardFrameConstants::kFixedSlotCountAboveFp;
+ int stack_slot_delta = new_slot_above_sp - current_sp_offset;
+ if (stack_slot_delta > 0) {
+ masm->Subu(sp, sp, stack_slot_delta * kPointerSize);
+ state->IncreaseSPDelta(stack_slot_delta);
+ } else if (allow_shrinkage && stack_slot_delta < 0) {
+ masm->Addu(sp, sp, -stack_slot_delta * kPointerSize);
+ state->IncreaseSPDelta(stack_slot_delta);
+ }
+}
+
+} // namespace
+
+void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
+ int first_unused_stack_slot) {
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ first_unused_stack_slot, false);
+}
+
+void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
+ int first_unused_stack_slot) {
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ first_unused_stack_slot);
+}
+
// Assembles an instruction after register allocation, producing machine code.
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+ Instruction* instr) {
MipsOperandConverter i(this, instr);
InstructionCode opcode = instr->opcode();
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
@@ -547,8 +600,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
- int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
- AssembleDeconstructActivationRecord(stack_param_delta);
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
@@ -562,6 +613,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Jump(at);
}
frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+ case kArchTailCallAddress: {
+ CHECK(!instr->InputAt(0)->IsImmediate());
+ __ Jump(i.InputRegister(0));
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
break;
}
case kArchCallJSFunction: {
@@ -577,6 +636,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Call(at);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
break;
}
case kArchTailCallJSFunctionFromJSFunction:
@@ -588,8 +648,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
}
- int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
- AssembleDeconstructActivationRecord(stack_param_delta);
if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
@@ -608,7 +666,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kArchPrepareTailCall:
- AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ AssemblePrepareTailCall();
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
@@ -632,6 +690,17 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
+ case kArchDebugBreak:
+ __ stop("kArchDebugBreak");
+ break;
+ case kArchImpossible:
+ __ Abort(kConversionFromImpossibleValue);
+ break;
+ case kArchComment: {
+ Address comment_string = i.InputExternalReference(0).address();
+ __ RecordComment(reinterpret_cast<const char*>(comment_string));
+ break;
+ }
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
@@ -641,7 +710,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result =
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ if (result != kSuccess) return result;
break;
}
case kArchRet:
@@ -688,6 +759,71 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Operand(offset.offset()));
break;
}
+ case kIeee754Float64Acos:
+ ASSEMBLE_IEEE754_UNOP(acos);
+ break;
+ case kIeee754Float64Acosh:
+ ASSEMBLE_IEEE754_UNOP(acosh);
+ break;
+ case kIeee754Float64Asin:
+ ASSEMBLE_IEEE754_UNOP(asin);
+ break;
+ case kIeee754Float64Asinh:
+ ASSEMBLE_IEEE754_UNOP(asinh);
+ break;
+ case kIeee754Float64Atan:
+ ASSEMBLE_IEEE754_UNOP(atan);
+ break;
+ case kIeee754Float64Atanh:
+ ASSEMBLE_IEEE754_UNOP(atanh);
+ break;
+ case kIeee754Float64Atan2:
+ ASSEMBLE_IEEE754_BINOP(atan2);
+ break;
+ case kIeee754Float64Cos:
+ ASSEMBLE_IEEE754_UNOP(cos);
+ break;
+ case kIeee754Float64Cosh:
+ ASSEMBLE_IEEE754_UNOP(cosh);
+ break;
+ case kIeee754Float64Cbrt:
+ ASSEMBLE_IEEE754_UNOP(cbrt);
+ break;
+ case kIeee754Float64Exp:
+ ASSEMBLE_IEEE754_UNOP(exp);
+ break;
+ case kIeee754Float64Expm1:
+ ASSEMBLE_IEEE754_UNOP(expm1);
+ break;
+ case kIeee754Float64Log:
+ ASSEMBLE_IEEE754_UNOP(log);
+ break;
+ case kIeee754Float64Log1p:
+ ASSEMBLE_IEEE754_UNOP(log1p);
+ break;
+ case kIeee754Float64Log10:
+ ASSEMBLE_IEEE754_UNOP(log10);
+ break;
+ case kIeee754Float64Log2:
+ ASSEMBLE_IEEE754_UNOP(log2);
+ break;
+ case kIeee754Float64Pow: {
+ MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+ __ CallStub(&stub);
+ break;
+ }
+ case kIeee754Float64Sin:
+ ASSEMBLE_IEEE754_UNOP(sin);
+ break;
+ case kIeee754Float64Sinh:
+ ASSEMBLE_IEEE754_UNOP(sinh);
+ break;
+ case kIeee754Float64Tan:
+ ASSEMBLE_IEEE754_UNOP(tan);
+ break;
+ case kIeee754Float64Tanh:
+ ASSEMBLE_IEEE754_UNOP(tanh);
+ break;
case kMipsAdd:
__ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
@@ -703,6 +839,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kMipsMul:
__ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
+ case kMipsMulOvf:
+ // Pseudo-instruction used for overflow/branch. No opcode emitted here.
+ break;
case kMipsMulHigh:
__ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
@@ -839,6 +978,36 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ sra(i.OutputRegister(), i.InputRegister(0), imm);
}
break;
+ case kMipsShlPair: {
+ if (instr->InputAt(2)->IsRegister()) {
+ __ ShlPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), i.InputRegister(2));
+ } else {
+ uint32_t imm = i.InputOperand(2).immediate();
+ __ ShlPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), imm);
+ }
+ } break;
+ case kMipsShrPair: {
+ if (instr->InputAt(2)->IsRegister()) {
+ __ ShrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), i.InputRegister(2));
+ } else {
+ uint32_t imm = i.InputOperand(2).immediate();
+ __ ShrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), imm);
+ }
+ } break;
+ case kMipsSarPair: {
+ if (instr->InputAt(2)->IsRegister()) {
+ __ SarPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), i.InputRegister(2));
+ } else {
+ uint32_t imm = i.InputOperand(2).immediate();
+ __ SarPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), imm);
+ }
+ } break;
case kMipsExt:
__ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
i.InputInt8(2));
@@ -869,7 +1038,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ li(i.OutputRegister(), i.InputOperand(0));
}
break;
-
+ case kMipsLsa:
+ DCHECK(instr->InputAt(2)->IsImmediate());
+ __ Lsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.InputInt8(2));
+ break;
case kMipsCmpS:
// Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
break;
@@ -923,6 +1096,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kMipsCmpD:
// Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
break;
+ case kMipsAddPair:
+ __ AddPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), i.InputRegister(2), i.InputRegister(3));
+ break;
+ case kMipsSubPair:
+ __ SubPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), i.InputRegister(2), i.InputRegister(3));
+ break;
case kMipsMulPair: {
__ Mulu(i.OutputRegister(1), i.OutputRegister(0), i.InputRegister(0),
i.InputRegister(2));
@@ -965,6 +1146,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kMipsAbsD:
__ abs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
+ case kMipsNegS:
+ __ Neg_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ case kMipsNegD:
+ __ Neg_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
case kMipsSqrtD: {
__ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
@@ -1009,60 +1196,48 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_ROUND_FLOAT_TO_FLOAT(round);
break;
}
- case kMipsFloat64Max: {
- // (b < a) ? a : b
- if (IsMipsArchVariant(kMips32r6)) {
- __ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
- i.InputDoubleRegister(0));
- __ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
- i.InputDoubleRegister(0));
- } else {
- __ c_d(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
- // Left operand is result, passthrough if false.
- __ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
- }
+ case kMipsFloat32Max: {
+ Label compare_nan, done_compare;
+ __ MaxNaNCheck_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
+ i.InputSingleRegister(1), &compare_nan);
+ __ Branch(&done_compare);
+ __ bind(&compare_nan);
+ __ Move(i.OutputSingleRegister(),
+ std::numeric_limits<float>::quiet_NaN());
+ __ bind(&done_compare);
break;
}
- case kMipsFloat64Min: {
- // (a < b) ? a : b
- if (IsMipsArchVariant(kMips32r6)) {
- __ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
- __ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
- i.InputDoubleRegister(0));
- } else {
- __ c_d(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
- // Right operand is result, passthrough if false.
- __ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
- }
+ case kMipsFloat64Max: {
+ Label compare_nan, done_compare;
+ __ MaxNaNCheck_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1), &compare_nan);
+ __ Branch(&done_compare);
+ __ bind(&compare_nan);
+ __ Move(i.OutputDoubleRegister(),
+ std::numeric_limits<double>::quiet_NaN());
+ __ bind(&done_compare);
break;
}
- case kMipsFloat32Max: {
- // (b < a) ? a : b
- if (IsMipsArchVariant(kMips32r6)) {
- __ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
- i.InputDoubleRegister(0));
- __ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
- i.InputDoubleRegister(0));
- } else {
- __ c_s(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
- // Left operand is result, passthrough if false.
- __ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
- }
+ case kMipsFloat32Min: {
+ Label compare_nan, done_compare;
+ __ MinNaNCheck_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
+ i.InputSingleRegister(1), &compare_nan);
+ __ Branch(&done_compare);
+ __ bind(&compare_nan);
+ __ Move(i.OutputSingleRegister(),
+ std::numeric_limits<float>::quiet_NaN());
+ __ bind(&done_compare);
break;
}
- case kMipsFloat32Min: {
- // (a < b) ? a : b
- if (IsMipsArchVariant(kMips32r6)) {
- __ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
- __ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
- i.InputDoubleRegister(0));
- } else {
- __ c_s(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
- // Right operand is result, passthrough if false.
- __ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
- }
+ case kMipsFloat64Min: {
+ Label compare_nan, done_compare;
+ __ MinNaNCheck_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1), &compare_nan);
+ __ Branch(&done_compare);
+ __ bind(&compare_nan);
+ __ Move(i.OutputDoubleRegister(),
+ std::numeric_limits<double>::quiet_NaN());
+ __ bind(&done_compare);
break;
}
case kMipsCvtSD: {
@@ -1143,6 +1318,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
FPURegister scratch = kScratchDoubleReg;
__ trunc_w_s(scratch, i.InputDoubleRegister(0));
__ mfc1(i.OutputRegister(), scratch);
+ // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
+ // because INT32_MIN allows easier out-of-bounds detection.
+ __ addiu(kScratchReg, i.OutputRegister(), 1);
+ __ slt(kScratchReg2, kScratchReg, i.OutputRegister());
+ __ Movn(i.OutputRegister(), kScratchReg, kScratchReg2);
break;
}
case kMipsTruncUwD: {
@@ -1155,6 +1335,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
FPURegister scratch = kScratchDoubleReg;
// TODO(plind): Fix wrong param order of Trunc_uw_s() macro-asm function.
__ Trunc_uw_s(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
+ // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
+ // because 0 allows easier out-of-bounds detection.
+ __ addiu(kScratchReg, i.OutputRegister(), 1);
+ __ Movz(i.OutputRegister(), zero_reg, kScratchReg);
break;
}
case kMipsFloat64ExtractLowWord32:
@@ -1169,6 +1353,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kMipsFloat64InsertHighWord32:
__ FmoveHigh(i.OutputDoubleRegister(), i.InputRegister(1));
break;
+ case kMipsFloat64SilenceNaN:
+ __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+
// ... more basic instructions ...
case kMipsLbu:
@@ -1183,36 +1371,67 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kMipsLhu:
__ lhu(i.OutputRegister(), i.MemoryOperand());
break;
+ case kMipsUlhu:
+ __ Ulhu(i.OutputRegister(), i.MemoryOperand());
+ break;
case kMipsLh:
__ lh(i.OutputRegister(), i.MemoryOperand());
break;
+ case kMipsUlh:
+ __ Ulh(i.OutputRegister(), i.MemoryOperand());
+ break;
case kMipsSh:
__ sh(i.InputRegister(2), i.MemoryOperand());
break;
+ case kMipsUsh:
+ __ Ush(i.InputRegister(2), i.MemoryOperand(), kScratchReg);
+ break;
case kMipsLw:
__ lw(i.OutputRegister(), i.MemoryOperand());
break;
+ case kMipsUlw:
+ __ Ulw(i.OutputRegister(), i.MemoryOperand());
+ break;
case kMipsSw:
__ sw(i.InputRegister(2), i.MemoryOperand());
break;
+ case kMipsUsw:
+ __ Usw(i.InputRegister(2), i.MemoryOperand());
+ break;
case kMipsLwc1: {
__ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
break;
}
+ case kMipsUlwc1: {
+ __ Ulwc1(i.OutputSingleRegister(), i.MemoryOperand(), kScratchReg);
+ break;
+ }
case kMipsSwc1: {
size_t index = 0;
MemOperand operand = i.MemoryOperand(&index);
__ swc1(i.InputSingleRegister(index), operand);
break;
}
+ case kMipsUswc1: {
+ size_t index = 0;
+ MemOperand operand = i.MemoryOperand(&index);
+ __ Uswc1(i.InputSingleRegister(index), operand, kScratchReg);
+ break;
+ }
case kMipsLdc1:
__ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
break;
+ case kMipsUldc1:
+ __ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
+ break;
case kMipsSdc1:
__ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
break;
+ case kMipsUsdc1:
+ __ Usdc1(i.InputDoubleRegister(2), i.MemoryOperand(), kScratchReg);
+ break;
case kMipsPush:
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
__ Subu(sp, sp, Operand(kDoubleSize));
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
@@ -1227,13 +1446,23 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kMipsStoreToStackSlot: {
- if (instr->InputAt(0)->IsDoubleRegister()) {
- __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
+ if (instr->InputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
+ __ swc1(i.InputSingleRegister(0), MemOperand(sp, i.InputInt32(1)));
+ }
} else {
__ sw(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
}
break;
}
+ case kMipsByteSwap32: {
+ __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4);
+ break;
+ }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
break;
@@ -1274,7 +1503,32 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kCheckedStoreWord64:
UNREACHABLE(); // currently unsupported checked int64 load/store.
break;
+ case kAtomicLoadInt8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lb);
+ break;
+ case kAtomicLoadUint8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lbu);
+ break;
+ case kAtomicLoadInt16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lh);
+ break;
+ case kAtomicLoadUint16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lhu);
+ break;
+ case kAtomicLoadWord32:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lw);
+ break;
+ case kAtomicStoreWord8:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(sb);
+ break;
+ case kAtomicStoreWord16:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(sh);
+ break;
+ case kAtomicStoreWord32:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(sw);
+ break;
}
+ return kSuccess;
} // NOLINT(readability/fn_size)
@@ -1355,6 +1609,20 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
UNSUPPORTED_COND(kMipsAddOvf, branch->condition);
break;
}
+ } else if (instr->arch_opcode() == kMipsMulOvf) {
+ switch (branch->condition) {
+ case kOverflow:
+ __ MulBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), tlabel, flabel);
+ break;
+ case kNotOverflow:
+ __ MulBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), flabel, tlabel);
+ break;
+ default:
+ UNSUPPORTED_COND(kMipsMulOvf, branch->condition);
+ break;
+ }
} else if (instr->arch_opcode() == kMipsCmp) {
cc = FlagsConditionToConditionCmp(branch->condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
@@ -1420,7 +1688,8 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
}
return;
} else if (instr->arch_opcode() == kMipsAddOvf ||
- instr->arch_opcode() == kMipsSubOvf) {
+ instr->arch_opcode() == kMipsSubOvf ||
+ instr->arch_opcode() == kMipsMulOvf) {
Label flabel, tlabel;
switch (instr->arch_opcode()) {
case kMipsAddOvf:
@@ -1432,6 +1701,10 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
__ SubBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
i.InputOperand(1), &flabel);
break;
+ case kMipsMulOvf:
+ __ MulBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), &flabel);
+ break;
default:
UNREACHABLE();
break;
@@ -1569,18 +1842,43 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
});
}
-
-void CodeGenerator::AssembleDeoptimizerCall(
+CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
+ if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
+ DeoptimizeReason deoptimization_reason =
+ GetDeoptimizationReason(deoptimization_id);
+ __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ return kSuccess;
}
+void CodeGenerator::FinishFrame(Frame* frame) {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+
+ const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ if (saves_fpu != 0) {
+ frame->AlignSavedCalleeRegisterSlots();
+ }
+
+ if (saves_fpu != 0) {
+ int count = base::bits::CountPopulation32(saves_fpu);
+ DCHECK(kNumCalleeSavedFPU == count);
+ frame->AllocateSavedCalleeRegisterSlots(count *
+ (kDoubleSize / kPointerSize));
+ }
-void CodeGenerator::AssemblePrologue() {
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ int count = base::bits::CountPopulation32(saves);
+ DCHECK(kNumCalleeSaved == count + 1);
+ frame->AllocateSavedCalleeRegisterSlots(count);
+ }
+}
+
+void CodeGenerator::AssembleConstructFrame() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- int stack_shrink_slots = frame()->GetSpillSlotCount();
if (frame_access_state()->has_frame()) {
if (descriptor->IsCFunctionCall()) {
__ Push(ra, fp);
@@ -1592,6 +1890,8 @@ void CodeGenerator::AssemblePrologue() {
}
}
+ int shrink_slots = frame()->GetSpillSlotCount();
+
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1602,35 +1902,24 @@ void CodeGenerator::AssemblePrologue() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
- if (saves_fpu != 0) {
- stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
- }
- if (stack_shrink_slots > 0) {
- __ Subu(sp, sp, Operand(stack_shrink_slots * kPointerSize));
+ if (shrink_slots > 0) {
+ __ Subu(sp, sp, Operand(shrink_slots * kPointerSize));
}
// Save callee-saved FPU registers.
if (saves_fpu != 0) {
__ MultiPushFPU(saves_fpu);
- int count = base::bits::CountPopulation32(saves_fpu);
- DCHECK(kNumCalleeSavedFPU == count);
- frame()->AllocateSavedCalleeRegisterSlots(count *
- (kDoubleSize / kPointerSize));
}
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
// Save callee-saved registers.
__ MultiPush(saves);
- // kNumCalleeSaved includes the fp register, but the fp register
- // is saved separately in TF.
- int count = base::bits::CountPopulation32(saves);
- DCHECK(kNumCalleeSaved == count + 1);
- frame()->AllocateSavedCalleeRegisterSlots(count);
+ DCHECK(kNumCalleeSaved == base::bits::CountPopulation32(saves) + 1);
}
}
@@ -1701,7 +1990,13 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
switch (src.type()) {
case Constant::kInt32:
- __ li(dst, Operand(src.ToInt32()));
+ if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
+ src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ __ li(dst, Operand(src.ToInt32(), src.rmode()));
+ } else {
+ __ li(dst, Operand(src.ToInt32()));
+ }
break;
case Constant::kFloat32:
__ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
@@ -1734,7 +2029,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
if (destination->IsStackSlot()) __ sw(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
- if (destination->IsDoubleStackSlot()) {
+ if (destination->IsFPStackSlot()) {
MemOperand dst = g.ToMemOperand(destination);
__ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
__ sw(at, dst);
@@ -1744,32 +2039,57 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
} else {
DCHECK_EQ(Constant::kFloat64, src.type());
- DoubleRegister dst = destination->IsDoubleRegister()
+ DoubleRegister dst = destination->IsFPRegister()
? g.ToDoubleRegister(destination)
: kScratchDoubleReg;
__ Move(dst, src.ToFloat64());
- if (destination->IsDoubleStackSlot()) {
+ if (destination->IsFPStackSlot()) {
__ sdc1(dst, g.ToMemOperand(destination));
}
}
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
FPURegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
FPURegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
} else {
- DCHECK(destination->IsDoubleStackSlot());
- __ sdc1(src, g.ToMemOperand(destination));
+ DCHECK(destination->IsFPStackSlot());
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kFloat64) {
+ __ sdc1(src, g.ToMemOperand(destination));
+ } else if (rep == MachineRepresentation::kFloat32) {
+ __ swc1(src, g.ToMemOperand(destination));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ UNREACHABLE();
+ }
}
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
MemOperand src = g.ToMemOperand(source);
- if (destination->IsDoubleRegister()) {
- __ ldc1(g.ToDoubleRegister(destination), src);
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (destination->IsFPRegister()) {
+ if (rep == MachineRepresentation::kFloat64) {
+ __ ldc1(g.ToDoubleRegister(destination), src);
+ } else if (rep == MachineRepresentation::kFloat32) {
+ __ lwc1(g.ToDoubleRegister(destination), src);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ UNREACHABLE();
+ }
} else {
FPURegister temp = kScratchDoubleReg;
- __ ldc1(temp, src);
- __ sdc1(temp, g.ToMemOperand(destination));
+ if (rep == MachineRepresentation::kFloat64) {
+ __ ldc1(temp, src);
+ __ sdc1(temp, g.ToMemOperand(destination));
+ } else if (rep == MachineRepresentation::kFloat32) {
+ __ lwc1(temp, src);
+ __ swc1(temp, g.ToMemOperand(destination));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ UNREACHABLE();
+ }
}
} else {
UNREACHABLE();
@@ -1808,35 +2128,57 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ lw(temp_1, dst);
__ sw(temp_0, dst);
__ sw(temp_1, src);
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
FPURegister temp = kScratchDoubleReg;
FPURegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
FPURegister dst = g.ToDoubleRegister(destination);
__ Move(temp, src);
__ Move(src, dst);
__ Move(dst, temp);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination);
- __ Move(temp, src);
- __ ldc1(src, dst);
- __ sdc1(temp, dst);
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kFloat64) {
+ __ Move(temp, src);
+ __ ldc1(src, dst);
+ __ sdc1(temp, dst);
+ } else if (rep == MachineRepresentation::kFloat32) {
+ __ Move(temp, src);
+ __ lwc1(src, dst);
+ __ swc1(temp, dst);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ UNREACHABLE();
+ }
}
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPStackSlot());
Register temp_0 = kScratchReg;
FPURegister temp_1 = kScratchDoubleReg;
MemOperand src0 = g.ToMemOperand(source);
- MemOperand src1(src0.rm(), src0.offset() + kIntSize);
MemOperand dst0 = g.ToMemOperand(destination);
- MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
- __ ldc1(temp_1, dst0); // Save destination in temp_1.
- __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
- __ sw(temp_0, dst0);
- __ lw(temp_0, src1);
- __ sw(temp_0, dst1);
- __ sdc1(temp_1, src0);
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kFloat64) {
+ MemOperand src1(src0.rm(), src0.offset() + kIntSize);
+ MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
+ __ ldc1(temp_1, dst0); // Save destination in temp_1.
+ __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
+ __ sw(temp_0, dst0);
+ __ lw(temp_0, src1);
+ __ sw(temp_0, dst1);
+ __ sdc1(temp_1, src0);
+ } else if (rep == MachineRepresentation::kFloat32) {
+ __ lwc1(temp_1, dst0); // Save destination in temp_1.
+ __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
+ __ sw(temp_0, dst0);
+ __ swc1(temp_1, src0);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ UNREACHABLE();
+ }
} else {
// No other combinations are possible.
UNREACHABLE();
@@ -1850,13 +2192,6 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
}
-void CodeGenerator::AddNopForSmiCodeInlining() {
- // Unused on 32-bit ARM. Still exists on 64-bit arm.
- // TODO(plind): Unclear when this is called now. Understand, fix if needed.
- __ nop(); // Maybe PROPERTY_ACCESS_INLINED?
-}
-
-
void CodeGenerator::EnsureSpaceForLazyDeopt() {
if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
return;
diff --git a/deps/v8/src/compiler/mips/instruction-codes-mips.h b/deps/v8/src/compiler/mips/instruction-codes-mips.h
index d85c2a7fe5..269ac0fed4 100644
--- a/deps/v8/src/compiler/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/mips/instruction-codes-mips.h
@@ -17,6 +17,7 @@ namespace compiler {
V(MipsSub) \
V(MipsSubOvf) \
V(MipsMul) \
+ V(MipsMulOvf) \
V(MipsMulHigh) \
V(MipsMulHighU) \
V(MipsDiv) \
@@ -30,9 +31,13 @@ namespace compiler {
V(MipsClz) \
V(MipsCtz) \
V(MipsPopcnt) \
+ V(MipsLsa) \
V(MipsShl) \
V(MipsShr) \
V(MipsSar) \
+ V(MipsShlPair) \
+ V(MipsShrPair) \
+ V(MipsSarPair) \
V(MipsExt) \
V(MipsIns) \
V(MipsRor) \
@@ -59,6 +64,10 @@ namespace compiler {
V(MipsSqrtD) \
V(MipsMaxD) \
V(MipsMinD) \
+ V(MipsNegS) \
+ V(MipsNegD) \
+ V(MipsAddPair) \
+ V(MipsSubPair) \
V(MipsMulPair) \
V(MipsFloat32RoundDown) \
V(MipsFloat32RoundTruncate) \
@@ -88,24 +97,35 @@ namespace compiler {
V(MipsLbu) \
V(MipsSb) \
V(MipsLh) \
+ V(MipsUlh) \
V(MipsLhu) \
+ V(MipsUlhu) \
V(MipsSh) \
+ V(MipsUsh) \
V(MipsLw) \
+ V(MipsUlw) \
V(MipsSw) \
+ V(MipsUsw) \
V(MipsLwc1) \
+ V(MipsUlwc1) \
V(MipsSwc1) \
+ V(MipsUswc1) \
V(MipsLdc1) \
+ V(MipsUldc1) \
V(MipsSdc1) \
+ V(MipsUsdc1) \
V(MipsFloat64ExtractLowWord32) \
V(MipsFloat64ExtractHighWord32) \
V(MipsFloat64InsertLowWord32) \
V(MipsFloat64InsertHighWord32) \
- V(MipsFloat64Max) \
- V(MipsFloat64Min) \
+ V(MipsFloat64SilenceNaN) \
V(MipsFloat32Max) \
+ V(MipsFloat64Max) \
V(MipsFloat32Min) \
+ V(MipsFloat64Min) \
V(MipsPush) \
V(MipsStoreToStackSlot) \
+ V(MipsByteSwap32) \
V(MipsStackClaim)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index f86ffe7643..4c353694e8 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -104,7 +104,14 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.Label(cont->false_block());
}
- outputs[output_count++] = g.DefineAsRegister(node);
+ if (cont->IsDeoptimize()) {
+ // If we can deoptimize as a result of the binop, we need to make sure that
+ // the deopt inputs are not overwritten by the binop result. One way
+ // to achieve that is to declare the output register as same-as-first.
+ outputs[output_count++] = g.DefineSameAsFirst(node);
+ } else {
+ outputs[output_count++] = g.DefineAsRegister(node);
+ }
if (cont->IsSet()) {
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
@@ -117,7 +124,7 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->frame_state());
+ cont->reason(), cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -152,6 +159,8 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord16:
opcode = load_rep.IsUnsigned() ? kMipsLhu : kMipsLh;
break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
opcode = kMipsLw;
@@ -231,6 +240,8 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord16:
opcode = kMipsSh;
break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
opcode = kMipsSw;
@@ -395,27 +406,71 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
VisitRRO(this, kMipsSar, node);
}
-void InstructionSelector::VisitInt32PairAdd(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitInt32PairSub(Node* node) { UNIMPLEMENTED(); }
+static void VisitInt32PairBinop(InstructionSelector* selector,
+ InstructionCode opcode, Node* node) {
+ MipsOperandGenerator g(selector);
-void InstructionSelector::VisitInt32PairMul(Node* node) {
- MipsOperandGenerator g(this);
+ // We use UseUniqueRegister here to avoid register sharing with the output
+ // register.
InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
g.UseUniqueRegister(node->InputAt(1)),
g.UseUniqueRegister(node->InputAt(2)),
g.UseUniqueRegister(node->InputAt(3))};
+
InstructionOperand outputs[] = {
g.DefineAsRegister(node),
g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
- Emit(kMipsMulPair, 2, outputs, 4, inputs);
+ selector->Emit(opcode, 2, outputs, 4, inputs);
}
-void InstructionSelector::VisitWord32PairShl(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitInt32PairAdd(Node* node) {
+ VisitInt32PairBinop(this, kMipsAddPair, node);
+}
-void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitInt32PairSub(Node* node) {
+ VisitInt32PairBinop(this, kMipsSubPair, node);
+}
-void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitInt32PairMul(Node* node) {
+ VisitInt32PairBinop(this, kMipsMulPair, node);
+}
+
+// Shared routine for multiple shift operations.
+static void VisitWord32PairShift(InstructionSelector* selector,
+ InstructionCode opcode, Node* node) {
+ MipsOperandGenerator g(selector);
+ Int32Matcher m(node->InputAt(2));
+ InstructionOperand shift_operand;
+ if (m.HasValue()) {
+ shift_operand = g.UseImmediate(m.node());
+ } else {
+ shift_operand = g.UseUniqueRegister(m.node());
+ }
+
+ // We use UseUniqueRegister here to avoid register sharing with the output
+ // register.
+ InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)),
+ shift_operand};
+
+ InstructionOperand outputs[] = {
+ g.DefineAsRegister(node),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+ selector->Emit(opcode, 2, outputs, 3, inputs);
+}
+
+void InstructionSelector::VisitWord32PairShl(Node* node) {
+ VisitWord32PairShift(this, kMipsShlPair, node);
+}
+
+void InstructionSelector::VisitWord32PairShr(Node* node) {
+ VisitWord32PairShift(this, kMipsShrPair, node);
+}
+
+void InstructionSelector::VisitWord32PairSar(Node* node) {
+ VisitWord32PairShift(this, kMipsSarPair, node);
+}
void InstructionSelector::VisitWord32Ror(Node* node) {
VisitRRO(this, kMipsRor, node);
@@ -429,6 +484,13 @@ void InstructionSelector::VisitWord32Clz(Node* node) {
void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsByteSwap32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
void InstructionSelector::VisitWord32Ctz(Node* node) {
MipsOperandGenerator g(this);
@@ -444,8 +506,32 @@ void InstructionSelector::VisitWord32Popcnt(Node* node) {
void InstructionSelector::VisitInt32Add(Node* node) {
MipsOperandGenerator g(this);
+ Int32BinopMatcher m(node);
+
+ // Select Lsa for (left + (left_of_right << imm)).
+ if (m.right().opcode() == IrOpcode::kWord32Shl &&
+ CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+ if (mright.right().HasValue()) {
+ int32_t shift_value = static_cast<int32_t>(mright.right().Value());
+ Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(mright.left().node()), g.TempImmediate(shift_value));
+ return;
+ }
+ }
+
+ // Select Lsa for ((left_of_left << imm) + right).
+ if (m.left().opcode() == IrOpcode::kWord32Shl &&
+ CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
+ Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.right().node()),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(shift_value));
+ return;
+ }
+ }
- // TODO(plind): Consider multiply & add optimization from arm port.
VisitBinop(this, node, kMipsAdd);
}
@@ -467,12 +553,9 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
return;
}
if (base::bits::IsPowerOfTwo32(value - 1)) {
- InstructionOperand temp = g.TempRegister();
- Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp,
+ Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value - 1)));
- Emit(kMipsAdd | AddressingModeField::encode(kMode_None),
- g.DefineAsRegister(node), g.UseRegister(m.left().node()), temp);
return;
}
if (base::bits::IsPowerOfTwo32(value + 1)) {
@@ -654,17 +737,13 @@ void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
VisitRR(this, kMipsCvtSD, node);
}
-
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
- switch (TruncationModeOf(node->op())) {
- case TruncationMode::kJavaScript:
- return VisitRR(this, kArchTruncateDoubleToI, node);
- case TruncationMode::kRoundToZero:
- return VisitRR(this, kMipsTruncWD, node);
- }
- UNREACHABLE();
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+ VisitRR(this, kArchTruncateDoubleToI, node);
}
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+ VisitRR(this, kMipsTruncWD, node);
+}
void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
VisitRR(this, kMipsFloat64ExtractLowWord32, node);
@@ -693,26 +772,10 @@ void InstructionSelector::VisitFloat32Sub(Node* node) {
VisitRRR(this, kMipsSubS, node);
}
-
void InstructionSelector::VisitFloat64Sub(Node* node) {
- MipsOperandGenerator g(this);
- Float64BinopMatcher m(node);
- if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
- CanCover(m.node(), m.right().node())) {
- if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
- CanCover(m.right().node(), m.right().InputAt(0))) {
- Float64BinopMatcher mright0(m.right().InputAt(0));
- if (mright0.left().IsMinusZero()) {
- Emit(kMipsFloat64RoundUp, g.DefineAsRegister(node),
- g.UseRegister(mright0.right().node()));
- return;
- }
- }
- }
VisitRRR(this, kMipsSubD, node);
}
-
void InstructionSelector::VisitFloat32Mul(Node* node) {
VisitRRR(this, kMipsMulS, node);
}
@@ -739,64 +802,28 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
g.UseFixed(node->InputAt(1), f14))->MarkAsCall();
}
-
void InstructionSelector::VisitFloat32Max(Node* node) {
MipsOperandGenerator g(this);
- if (IsMipsArchVariant(kMips32r6)) {
- Emit(kMipsFloat32Max, g.DefineAsRegister(node),
- g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)));
-
- } else {
- // Reverse operands, and use same reg. for result and right operand.
- Emit(kMipsFloat32Max, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
- }
+ Emit(kMipsFloat32Max, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
-
void InstructionSelector::VisitFloat64Max(Node* node) {
MipsOperandGenerator g(this);
- if (IsMipsArchVariant(kMips32r6)) {
- Emit(kMipsFloat64Max, g.DefineAsRegister(node),
- g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)));
-
- } else {
- // Reverse operands, and use same reg. for result and right operand.
- Emit(kMipsFloat64Max, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
- }
+ Emit(kMipsFloat64Max, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
-
void InstructionSelector::VisitFloat32Min(Node* node) {
MipsOperandGenerator g(this);
- if (IsMipsArchVariant(kMips32r6)) {
- Emit(kMipsFloat32Min, g.DefineAsRegister(node),
- g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)));
-
- } else {
- // Reverse operands, and use same reg. for result and right operand.
- Emit(kMipsFloat32Min, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
- }
+ Emit(kMipsFloat32Min, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
-
void InstructionSelector::VisitFloat64Min(Node* node) {
MipsOperandGenerator g(this);
- if (IsMipsArchVariant(kMips32r6)) {
- Emit(kMipsFloat64Min, g.DefineAsRegister(node),
- g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)));
-
- } else {
- // Reverse operands, and use same reg. for result and right operand.
- Emit(kMipsFloat64Min, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
- }
+ Emit(kMipsFloat64Min, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
@@ -809,7 +836,6 @@ void InstructionSelector::VisitFloat64Abs(Node* node) {
VisitRR(this, kMipsAbsD, node);
}
-
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitRR(this, kMipsSqrtS, node);
}
@@ -864,6 +890,28 @@ void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
VisitRR(this, kMipsFloat64RoundTiesEven, node);
}
+void InstructionSelector::VisitFloat32Neg(Node* node) {
+ VisitRR(this, kMipsNegS, node);
+}
+
+void InstructionSelector::VisitFloat64Neg(Node* node) {
+ VisitRR(this, kMipsNegD, node);
+}
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+ InstructionCode opcode) {
+ MipsOperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f2),
+ g.UseFixed(node->InputAt(1), f4))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+ InstructionCode opcode) {
+ MipsOperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12))
+ ->MarkAsCall();
+}
void InstructionSelector::EmitPrepareArguments(
ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
@@ -873,7 +921,7 @@ void InstructionSelector::EmitPrepareArguments(
// Prepare for C function call.
if (descriptor->IsCFunctionCall()) {
Emit(kArchPrepareCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
0, nullptr, 0, nullptr);
// Poke any stack arguments.
@@ -907,6 +955,104 @@ bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
+void InstructionSelector::VisitUnalignedLoad(Node* node) {
+ UnalignedLoadRepresentation load_rep =
+ UnalignedLoadRepresentationOf(node->op());
+ MipsOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ UNREACHABLE();
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kMipsUlhu : kMipsUlh;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ opcode = kMipsUlw;
+ break;
+ case MachineRepresentation::kFloat32:
+ opcode = kMipsUlwc1;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kMipsUldc1;
+ break;
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired load opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
+ }
+}
+
+void InstructionSelector::VisitUnalignedStore(Node* node) {
+ MipsOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op());
+
+ // TODO(mips): I guess this could be done in a better way.
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kMipsUswc1;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kMipsUsdc1;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ UNREACHABLE();
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kMipsUsh;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ opcode = kMipsUsw;
+ break;
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ addr_reg, g.TempImmediate(0), g.UseRegister(value));
+ }
+}
+
void InstructionSelector::VisitCheckedLoad(Node* node) {
CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
MipsOperandGenerator g(this);
@@ -931,6 +1077,8 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
opcode = kCheckedLoadFloat64;
break;
case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
@@ -999,7 +1147,6 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
namespace {
-
// Shared routine for multiple compare operations.
static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
InstructionOperand left, InstructionOperand right,
@@ -1010,7 +1157,7 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+ selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
} else {
DCHECK(cont->IsSet());
@@ -1184,6 +1331,9 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop(selector, node, kMipsSubOvf, cont);
+ case IrOpcode::kInt32MulWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(selector, node, kMipsMulOvf, cont);
default:
break;
}
@@ -1207,7 +1357,8 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
- g.TempImmediate(0), cont->frame_state());
+ g.TempImmediate(0), cont->reason(),
+ cont->frame_state());
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
@@ -1224,14 +1375,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -1318,6 +1469,14 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
VisitBinop(this, node, kMipsSubOvf, &cont);
}
+void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kMipsMulOvf, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kMipsMulOvf, &cont);
+}
void InstructionSelector::VisitFloat32Equal(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
@@ -1388,6 +1547,81 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(left), g.UseRegister(right));
}
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+ MipsOperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ InstructionOperand temps[] = {g.TempRegister()};
+ Emit(kMipsFloat64SilenceNaN, g.DefineSameAsFirst(node), g.UseRegister(left),
+ arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitAtomicLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ MipsOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kAtomicLoadWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired load opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
+ }
+}
+
+void InstructionSelector::VisitAtomicStore(Node* node) {
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ MipsOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kAtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kAtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kAtomicStoreWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ addr_reg, g.TempImmediate(0), g.UseRegister(value));
+ }
+}
// static
MachineOperatorBuilder::Flags
@@ -1400,19 +1634,32 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesEven;
}
+
return flags | MachineOperatorBuilder::kWord32Ctz |
MachineOperatorBuilder::kWord32Popcnt |
MachineOperatorBuilder::kInt32DivIsSafe |
MachineOperatorBuilder::kUint32DivIsSafe |
MachineOperatorBuilder::kWord32ShiftIsSafe |
- MachineOperatorBuilder::kFloat64Min |
- MachineOperatorBuilder::kFloat64Max |
- MachineOperatorBuilder::kFloat32Min |
- MachineOperatorBuilder::kFloat32Max |
MachineOperatorBuilder::kFloat32RoundDown |
MachineOperatorBuilder::kFloat32RoundUp |
MachineOperatorBuilder::kFloat32RoundTruncate |
- MachineOperatorBuilder::kFloat32RoundTiesEven;
+ MachineOperatorBuilder::kFloat32RoundTiesEven |
+ MachineOperatorBuilder::kWord32ReverseBytes |
+ MachineOperatorBuilder::kWord64ReverseBytes;
+}
+
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+ if (IsMipsArchVariant(kMips32r6)) {
+ return MachineOperatorBuilder::AlignmentRequirements::
+ FullUnalignedAccessSupport();
+ } else {
+ DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
+ IsMipsArchVariant(kMips32r2));
+ return MachineOperatorBuilder::AlignmentRequirements::
+ NoUnalignedAccessSupport();
+ }
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index c6341b1210..3511ab8206 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -119,7 +119,7 @@ class MipsOperandConverter final : public InstructionOperandConverter {
MemOperand ToMemOperand(InstructionOperand* op) const {
DCHECK_NOT_NULL(op);
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
return SlotToMemOperand(AllocatedOperand::cast(op)->index());
}
@@ -359,7 +359,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
} // namespace
-
#define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr) \
do { \
auto result = i.Output##width##Register(); \
@@ -367,7 +366,8 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
if (instr->InputAt(0)->IsRegister()) { \
auto offset = i.InputRegister(0); \
__ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
- __ Daddu(kScratchReg, i.InputRegister(2), offset); \
+ __ And(kScratchReg, offset, Operand(0xffffffff)); \
+ __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg); \
__ asm_instr(result, MemOperand(kScratchReg, 0)); \
} else { \
int offset = static_cast<int>(i.InputOperand(0).immediate()); \
@@ -377,7 +377,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
__ bind(ool->exit()); \
} while (0)
-
#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
do { \
auto result = i.OutputRegister(); \
@@ -385,7 +384,8 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
if (instr->InputAt(0)->IsRegister()) { \
auto offset = i.InputRegister(0); \
__ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
- __ Daddu(kScratchReg, i.InputRegister(2), offset); \
+ __ And(kScratchReg, offset, Operand(0xffffffff)); \
+ __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg); \
__ asm_instr(result, MemOperand(kScratchReg, 0)); \
} else { \
int offset = static_cast<int>(i.InputOperand(0).immediate()); \
@@ -395,7 +395,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
__ bind(ool->exit()); \
} while (0)
-
#define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr) \
do { \
Label done; \
@@ -403,7 +402,8 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
auto offset = i.InputRegister(0); \
auto value = i.Input##width##Register(2); \
__ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
- __ Daddu(kScratchReg, i.InputRegister(3), offset); \
+ __ And(kScratchReg, offset, Operand(0xffffffff)); \
+ __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg); \
__ asm_instr(value, MemOperand(kScratchReg, 0)); \
} else { \
int offset = static_cast<int>(i.InputOperand(0).immediate()); \
@@ -414,7 +414,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
__ bind(&done); \
} while (0)
-
#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
do { \
Label done; \
@@ -422,7 +421,8 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
auto offset = i.InputRegister(0); \
auto value = i.InputRegister(2); \
__ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
- __ Daddu(kScratchReg, i.InputRegister(3), offset); \
+ __ And(kScratchReg, offset, Operand(0xffffffff)); \
+ __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg); \
__ asm_instr(value, MemOperand(kScratchReg, 0)); \
} else { \
int offset = static_cast<int>(i.InputOperand(0).immediate()); \
@@ -433,7 +433,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
__ bind(&done); \
} while (0)
-
#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode) \
if (kArchVariant == kMips64r6) { \
__ cfc1(kScratchReg, FCSR); \
@@ -484,28 +483,48 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
__ bind(&done); \
}
+#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
+ do { \
+ __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
+ __ sync(); \
+ } while (0)
+
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
+ do { \
+ __ sync(); \
+ __ asm_instr(i.InputRegister(2), i.MemoryOperand()); \
+ __ sync(); \
+ } while (0)
+
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 2, kScratchReg); \
+ __ MovToFloatParameters(i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 0, 2); \
+ /* Move the result in the double result register. */ \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
+ } while (0)
+
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 1, kScratchReg); \
+ __ MovToFloatParameter(i.InputDoubleRegister(0)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 0, 1); \
+ /* Move the result in the double result register. */ \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
+ } while (0)
+
void CodeGenerator::AssembleDeconstructFrame() {
__ mov(sp, fp);
__ Pop(ra, fp);
}
-void CodeGenerator::AssembleSetupStackPointer() {}
-
-void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
- int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
- if (sp_slot_delta > 0) {
- __ daddiu(sp, sp, sp_slot_delta * kPointerSize);
- }
- frame_access_state()->SetFrameAccessToDefault();
-}
-
-
-void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
- int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
- if (sp_slot_delta < 0) {
- __ Dsubu(sp, sp, Operand(-sp_slot_delta * kPointerSize));
- frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
- }
+void CodeGenerator::AssemblePrepareTailCall() {
if (frame_access_state()->has_frame()) {
__ ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
__ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -538,8 +557,41 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
__ bind(&done);
}
+namespace {
+
+void AdjustStackPointerForTailCall(MacroAssembler* masm,
+ FrameAccessState* state,
+ int new_slot_above_sp,
+ bool allow_shrinkage = true) {
+ int current_sp_offset = state->GetSPToFPSlotCount() +
+ StandardFrameConstants::kFixedSlotCountAboveFp;
+ int stack_slot_delta = new_slot_above_sp - current_sp_offset;
+ if (stack_slot_delta > 0) {
+ masm->Dsubu(sp, sp, stack_slot_delta * kPointerSize);
+ state->IncreaseSPDelta(stack_slot_delta);
+ } else if (allow_shrinkage && stack_slot_delta < 0) {
+ masm->Daddu(sp, sp, -stack_slot_delta * kPointerSize);
+ state->IncreaseSPDelta(stack_slot_delta);
+ }
+}
+
+} // namespace
+
+void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
+ int first_unused_stack_slot) {
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ first_unused_stack_slot, false);
+}
+
+void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
+ int first_unused_stack_slot) {
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ first_unused_stack_slot);
+}
+
// Assembles an instruction after register allocation, producing machine code.
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+ Instruction* instr) {
MipsOperandConverter i(this, instr);
InstructionCode opcode = instr->opcode();
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
@@ -559,8 +611,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
- int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
- AssembleDeconstructActivationRecord(stack_param_delta);
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
@@ -574,6 +624,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Jump(at);
}
frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+ case kArchTailCallAddress: {
+ CHECK(!instr->InputAt(0)->IsImmediate());
+ __ Jump(i.InputRegister(0));
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
break;
}
case kArchCallJSFunction: {
@@ -598,8 +656,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
__ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
}
- int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
- AssembleDeconstructActivationRecord(stack_param_delta);
if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
@@ -608,6 +664,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(at);
frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
break;
}
case kArchPrepareCallCFunction: {
@@ -618,7 +675,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kArchPrepareTailCall:
- AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ AssemblePrepareTailCall();
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
@@ -642,6 +699,17 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
+ case kArchDebugBreak:
+ __ stop("kArchDebugBreak");
+ break;
+ case kArchImpossible:
+ __ Abort(kConversionFromImpossibleValue);
+ break;
+ case kArchComment: {
+ Address comment_string = i.InputExternalReference(0).address();
+ __ RecordComment(reinterpret_cast<const char*>(comment_string));
+ break;
+ }
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
@@ -651,7 +719,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result =
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ if (result != kSuccess) return result;
break;
}
case kArchRet:
@@ -698,6 +768,71 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Operand(offset.offset()));
break;
}
+ case kIeee754Float64Acos:
+ ASSEMBLE_IEEE754_UNOP(acos);
+ break;
+ case kIeee754Float64Acosh:
+ ASSEMBLE_IEEE754_UNOP(acosh);
+ break;
+ case kIeee754Float64Asin:
+ ASSEMBLE_IEEE754_UNOP(asin);
+ break;
+ case kIeee754Float64Asinh:
+ ASSEMBLE_IEEE754_UNOP(asinh);
+ break;
+ case kIeee754Float64Atan:
+ ASSEMBLE_IEEE754_UNOP(atan);
+ break;
+ case kIeee754Float64Atanh:
+ ASSEMBLE_IEEE754_UNOP(atanh);
+ break;
+ case kIeee754Float64Atan2:
+ ASSEMBLE_IEEE754_BINOP(atan2);
+ break;
+ case kIeee754Float64Cos:
+ ASSEMBLE_IEEE754_UNOP(cos);
+ break;
+ case kIeee754Float64Cosh:
+ ASSEMBLE_IEEE754_UNOP(cosh);
+ break;
+ case kIeee754Float64Cbrt:
+ ASSEMBLE_IEEE754_UNOP(cbrt);
+ break;
+ case kIeee754Float64Exp:
+ ASSEMBLE_IEEE754_UNOP(exp);
+ break;
+ case kIeee754Float64Expm1:
+ ASSEMBLE_IEEE754_UNOP(expm1);
+ break;
+ case kIeee754Float64Log:
+ ASSEMBLE_IEEE754_UNOP(log);
+ break;
+ case kIeee754Float64Log1p:
+ ASSEMBLE_IEEE754_UNOP(log1p);
+ break;
+ case kIeee754Float64Log2:
+ ASSEMBLE_IEEE754_UNOP(log2);
+ break;
+ case kIeee754Float64Log10:
+ ASSEMBLE_IEEE754_UNOP(log10);
+ break;
+ case kIeee754Float64Pow: {
+ MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+ __ CallStub(&stub);
+ break;
+ }
+ case kIeee754Float64Sin:
+ ASSEMBLE_IEEE754_UNOP(sin);
+ break;
+ case kIeee754Float64Sinh:
+ ASSEMBLE_IEEE754_UNOP(sinh);
+ break;
+ case kIeee754Float64Tan:
+ ASSEMBLE_IEEE754_UNOP(tan);
+ break;
+ case kIeee754Float64Tanh:
+ ASSEMBLE_IEEE754_UNOP(tanh);
+ break;
case kMips64Add:
__ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
@@ -719,6 +854,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kMips64Mul:
__ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
+ case kMips64MulOvf:
+ // Pseudo-instruction used for overflow/branch. No opcode emitted here.
+ break;
case kMips64MulHigh:
__ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
@@ -775,12 +913,42 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kMips64DmodU:
__ Dmodu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
+ case kMips64Dlsa:
+ DCHECK(instr->InputAt(2)->IsImmediate());
+ __ Dlsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.InputInt8(2));
+ break;
+ case kMips64Lsa:
+ DCHECK(instr->InputAt(2)->IsImmediate());
+ __ Lsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.InputInt8(2));
+ break;
case kMips64And:
__ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
+ case kMips64And32:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
+ __ sll(i.InputRegister(1), i.InputRegister(1), 0x0);
+ __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ } else {
+ __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
+ __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
case kMips64Or:
__ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
+ case kMips64Or32:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
+ __ sll(i.InputRegister(1), i.InputRegister(1), 0x0);
+ __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ } else {
+ __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
+ __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
case kMips64Nor:
if (instr->InputAt(1)->IsRegister()) {
__ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
@@ -789,9 +957,30 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
}
break;
+ case kMips64Nor32:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
+ __ sll(i.InputRegister(1), i.InputRegister(1), 0x0);
+ __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ } else {
+ DCHECK(i.InputOperand(1).immediate() == 0);
+ __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
+ __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
+ }
+ break;
case kMips64Xor:
__ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
+ case kMips64Xor32:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
+ __ sll(i.InputRegister(1), i.InputRegister(1), 0x0);
+ __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ } else {
+ __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
+ __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ }
+ break;
case kMips64Clz:
__ Clz(i.OutputRegister(), i.InputRegister(0));
break;
@@ -945,18 +1134,22 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
case kMips64Shr:
if (instr->InputAt(1)->IsRegister()) {
+ __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
__ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
int64_t imm = i.InputOperand(1).immediate();
+ __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
__ srl(i.OutputRegister(), i.InputRegister(0),
static_cast<uint16_t>(imm));
}
break;
case kMips64Sar:
if (instr->InputAt(1)->IsRegister()) {
+ __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
__ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
int64_t imm = i.InputOperand(1).immediate();
+ __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
__ sra(i.OutputRegister(), i.InputRegister(0),
static_cast<uint16_t>(imm));
}
@@ -1097,6 +1290,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kMips64AbsS:
__ abs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
break;
+ case kMips64NegS:
+ __ Neg_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
case kMips64SqrtS: {
__ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
@@ -1146,6 +1342,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kMips64AbsD:
__ abs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
+ case kMips64NegD:
+ __ Neg_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
case kMips64SqrtD: {
__ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
@@ -1190,62 +1389,53 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_ROUND_FLOAT_TO_FLOAT(round);
break;
}
- case kMips64Float64Max: {
- // (b < a) ? a : b
- if (kArchVariant == kMips64r6) {
- __ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
- i.InputDoubleRegister(0));
- __ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
- i.InputDoubleRegister(0));
- } else {
- __ c_d(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
- // Left operand is result, passthrough if false.
- __ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
- }
+ case kMips64Float32Max: {
+ Label compare_nan, done_compare;
+ __ MaxNaNCheck_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
+ i.InputSingleRegister(1), &compare_nan);
+ __ Branch(&done_compare);
+ __ bind(&compare_nan);
+ __ Move(i.OutputSingleRegister(),
+ std::numeric_limits<float>::quiet_NaN());
+ __ bind(&done_compare);
break;
}
- case kMips64Float64Min: {
- // (a < b) ? a : b
- if (kArchVariant == kMips64r6) {
- __ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
- __ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
- i.InputDoubleRegister(0));
- } else {
- __ c_d(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
- // Right operand is result, passthrough if false.
- __ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
- }
+ case kMips64Float64Max: {
+ Label compare_nan, done_compare;
+ __ MaxNaNCheck_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1), &compare_nan);
+ __ Branch(&done_compare);
+ __ bind(&compare_nan);
+ __ Move(i.OutputDoubleRegister(),
+ std::numeric_limits<double>::quiet_NaN());
+ __ bind(&done_compare);
break;
}
- case kMips64Float32Max: {
- // (b < a) ? a : b
- if (kArchVariant == kMips64r6) {
- __ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1),
- i.InputDoubleRegister(0));
- __ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
- i.InputDoubleRegister(0));
- } else {
- __ c_s(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1));
- // Left operand is result, passthrough if false.
- __ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
- }
+ case kMips64Float32Min: {
+ Label compare_nan, done_compare;
+ __ MinNaNCheck_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
+ i.InputSingleRegister(1), &compare_nan);
+ __ Branch(&done_compare);
+ __ bind(&compare_nan);
+ __ Move(i.OutputSingleRegister(),
+ std::numeric_limits<float>::quiet_NaN());
+ __ bind(&done_compare);
break;
}
- case kMips64Float32Min: {
- // (a < b) ? a : b
- if (kArchVariant == kMips64r6) {
- __ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
- __ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
- i.InputDoubleRegister(0));
- } else {
- __ c_s(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0));
- // Right operand is result, passthrough if false.
- __ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
- }
+ case kMips64Float64Min: {
+ Label compare_nan, done_compare;
+ __ MinNaNCheck_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1), &compare_nan);
+ __ Branch(&done_compare);
+ __ bind(&compare_nan);
+ __ Move(i.OutputDoubleRegister(),
+ std::numeric_limits<double>::quiet_NaN());
+ __ bind(&done_compare);
break;
}
+ case kMips64Float64SilenceNaN:
+ __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
case kMips64CvtSD:
__ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
break;
@@ -1339,6 +1529,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
FPURegister scratch = kScratchDoubleReg;
__ trunc_w_s(scratch, i.InputDoubleRegister(0));
__ mfc1(i.OutputRegister(), scratch);
+ // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
+ // because INT32_MIN allows easier out-of-bounds detection.
+ __ addiu(kScratchReg, i.OutputRegister(), 1);
+ __ slt(kScratchReg2, kScratchReg, i.OutputRegister());
+ __ Movn(i.OutputRegister(), kScratchReg, kScratchReg2);
break;
}
case kMips64TruncLS: {
@@ -1407,6 +1602,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
FPURegister scratch = kScratchDoubleReg;
// TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
__ Trunc_uw_s(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
+ // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
+ // because 0 allows easier out-of-bounds detection.
+ __ addiu(kScratchReg, i.OutputRegister(), 1);
+ __ Movz(i.OutputRegister(), zero_reg, kScratchReg);
break;
}
case kMips64TruncUlS: {
@@ -1457,42 +1656,85 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kMips64Lhu:
__ lhu(i.OutputRegister(), i.MemoryOperand());
break;
+ case kMips64Ulhu:
+ __ Ulhu(i.OutputRegister(), i.MemoryOperand());
+ break;
case kMips64Lh:
__ lh(i.OutputRegister(), i.MemoryOperand());
break;
+ case kMips64Ulh:
+ __ Ulh(i.OutputRegister(), i.MemoryOperand());
+ break;
case kMips64Sh:
__ sh(i.InputRegister(2), i.MemoryOperand());
break;
+ case kMips64Ush:
+ __ Ush(i.InputRegister(2), i.MemoryOperand(), kScratchReg);
+ break;
case kMips64Lw:
__ lw(i.OutputRegister(), i.MemoryOperand());
break;
+ case kMips64Ulw:
+ __ Ulw(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kMips64Lwu:
+ __ lwu(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kMips64Ulwu:
+ __ Ulwu(i.OutputRegister(), i.MemoryOperand());
+ break;
case kMips64Ld:
__ ld(i.OutputRegister(), i.MemoryOperand());
break;
+ case kMips64Uld:
+ __ Uld(i.OutputRegister(), i.MemoryOperand());
+ break;
case kMips64Sw:
__ sw(i.InputRegister(2), i.MemoryOperand());
break;
+ case kMips64Usw:
+ __ Usw(i.InputRegister(2), i.MemoryOperand());
+ break;
case kMips64Sd:
__ sd(i.InputRegister(2), i.MemoryOperand());
break;
+ case kMips64Usd:
+ __ Usd(i.InputRegister(2), i.MemoryOperand());
+ break;
case kMips64Lwc1: {
__ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
break;
}
+ case kMips64Ulwc1: {
+ __ Ulwc1(i.OutputSingleRegister(), i.MemoryOperand(), kScratchReg);
+ break;
+ }
case kMips64Swc1: {
size_t index = 0;
MemOperand operand = i.MemoryOperand(&index);
__ swc1(i.InputSingleRegister(index), operand);
break;
}
+ case kMips64Uswc1: {
+ size_t index = 0;
+ MemOperand operand = i.MemoryOperand(&index);
+ __ Uswc1(i.InputSingleRegister(index), operand, kScratchReg);
+ break;
+ }
case kMips64Ldc1:
__ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
break;
+ case kMips64Uldc1:
+ __ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
+ break;
case kMips64Sdc1:
__ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
break;
+ case kMips64Usdc1:
+ __ Usdc1(i.InputDoubleRegister(2), i.MemoryOperand(), kScratchReg);
+ break;
case kMips64Push:
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
__ Subu(sp, sp, Operand(kDoubleSize));
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
@@ -1507,13 +1749,22 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kMips64StoreToStackSlot: {
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
} else {
__ sd(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
}
break;
}
+ case kMips64ByteSwap64: {
+ __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 8);
+ break;
+ }
+ case kMips64ByteSwap32: {
+ __ ByteSwapUnsigned(i.OutputRegister(0), i.InputRegister(0), 4);
+ __ dsrl32(i.OutputRegister(0), i.OutputRegister(0), 0);
+ break;
+ }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
break;
@@ -1556,7 +1807,32 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kCheckedStoreFloat64:
ASSEMBLE_CHECKED_STORE_FLOAT(Double, sdc1);
break;
+ case kAtomicLoadInt8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lb);
+ break;
+ case kAtomicLoadUint8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lbu);
+ break;
+ case kAtomicLoadInt16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lh);
+ break;
+ case kAtomicLoadUint16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lhu);
+ break;
+ case kAtomicLoadWord32:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lw);
+ break;
+ case kAtomicStoreWord8:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(sb);
+ break;
+ case kAtomicStoreWord16:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(sh);
+ break;
+ case kAtomicStoreWord32:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(sw);
+ break;
}
+ return kSuccess;
} // NOLINT(readability/fn_size)
@@ -1643,6 +1919,20 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
UNSUPPORTED_COND(kMips64DsubOvf, branch->condition);
break;
}
+ } else if (instr->arch_opcode() == kMips64MulOvf) {
+ switch (branch->condition) {
+ case kOverflow: {
+ __ MulBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), tlabel, flabel, kScratchReg);
+ } break;
+ case kNotOverflow: {
+ __ MulBranchOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), flabel, tlabel, kScratchReg);
+ } break;
+ default:
+ UNSUPPORTED_COND(kMips64MulOvf, branch->condition);
+ break;
+ }
} else if (instr->arch_opcode() == kMips64Cmp) {
cc = FlagsConditionToConditionCmp(branch->condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
@@ -1718,7 +2008,8 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
__ xori(result, result, 1);
return;
} else if (instr->arch_opcode() == kMips64DaddOvf ||
- instr->arch_opcode() == kMips64DsubOvf) {
+ instr->arch_opcode() == kMips64DsubOvf ||
+ instr->arch_opcode() == kMips64MulOvf) {
Label flabel, tlabel;
switch (instr->arch_opcode()) {
case kMips64DaddOvf:
@@ -1730,6 +2021,10 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
__ DsubBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
i.InputOperand(1), &flabel);
break;
+ case kMips64MulOvf:
+ __ MulBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), &flabel, kScratchReg);
+ break;
default:
UNREACHABLE();
break;
@@ -1868,16 +2163,38 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
});
}
-
-void CodeGenerator::AssembleDeoptimizerCall(
+CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
+ if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
+ DeoptimizeReason deoptimization_reason =
+ GetDeoptimizationReason(deoptimization_id);
+ __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ return kSuccess;
}
+void CodeGenerator::FinishFrame(Frame* frame) {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
-void CodeGenerator::AssemblePrologue() {
+ const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ if (saves_fpu != 0) {
+ int count = base::bits::CountPopulation32(saves_fpu);
+ DCHECK(kNumCalleeSavedFPU == count);
+ frame->AllocateSavedCalleeRegisterSlots(count *
+ (kDoubleSize / kPointerSize));
+ }
+
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ int count = base::bits::CountPopulation32(saves);
+ DCHECK(kNumCalleeSaved == count + 1);
+ frame->AllocateSavedCalleeRegisterSlots(count);
+ }
+}
+
+void CodeGenerator::AssembleConstructFrame() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (descriptor->IsCFunctionCall()) {
@@ -1890,7 +2207,8 @@ void CodeGenerator::AssemblePrologue() {
}
}
- int stack_shrink_slots = frame()->GetSpillSlotCount();
+ int shrink_slots = frame()->GetSpillSlotCount();
+
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1901,32 +2219,25 @@ void CodeGenerator::AssemblePrologue() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
- if (stack_shrink_slots > 0) {
- __ Dsubu(sp, sp, Operand(stack_shrink_slots * kPointerSize));
+ if (shrink_slots > 0) {
+ __ Dsubu(sp, sp, Operand(shrink_slots * kPointerSize));
}
const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
if (saves_fpu != 0) {
// Save callee-saved FPU registers.
__ MultiPushFPU(saves_fpu);
- int count = base::bits::CountPopulation32(saves_fpu);
- DCHECK(kNumCalleeSavedFPU == count);
- frame()->AllocateSavedCalleeRegisterSlots(count *
- (kDoubleSize / kPointerSize));
+ DCHECK(kNumCalleeSavedFPU == base::bits::CountPopulation32(saves_fpu));
}
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
// Save callee-saved registers.
__ MultiPush(saves);
- // kNumCalleeSaved includes the fp register, but the fp register
- // is saved separately in TF.
- int count = base::bits::CountPopulation32(saves);
- DCHECK(kNumCalleeSaved == count + 1);
- frame()->AllocateSavedCalleeRegisterSlots(count);
+ DCHECK(kNumCalleeSaved == base::bits::CountPopulation32(saves) + 1);
}
}
@@ -1997,13 +2308,23 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
switch (src.type()) {
case Constant::kInt32:
- __ li(dst, Operand(src.ToInt32()));
+ if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ __ li(dst, Operand(src.ToInt32(), src.rmode()));
+ } else {
+ __ li(dst, Operand(src.ToInt32()));
+ }
break;
case Constant::kFloat32:
__ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
break;
case Constant::kInt64:
- __ li(dst, Operand(src.ToInt64()));
+ if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+ __ li(dst, Operand(src.ToInt64(), src.rmode()));
+ } else {
+ DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ __ li(dst, Operand(src.ToInt64()));
+ }
break;
case Constant::kFloat64:
__ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
@@ -2030,7 +2351,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
if (destination->IsStackSlot()) __ sd(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
- if (destination->IsDoubleStackSlot()) {
+ if (destination->IsFPStackSlot()) {
MemOperand dst = g.ToMemOperand(destination);
__ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
__ sw(at, dst);
@@ -2040,27 +2361,27 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
} else {
DCHECK_EQ(Constant::kFloat64, src.type());
- DoubleRegister dst = destination->IsDoubleRegister()
+ DoubleRegister dst = destination->IsFPRegister()
? g.ToDoubleRegister(destination)
: kScratchDoubleReg;
__ Move(dst, src.ToFloat64());
- if (destination->IsDoubleStackSlot()) {
+ if (destination->IsFPStackSlot()) {
__ sdc1(dst, g.ToMemOperand(destination));
}
}
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
FPURegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
FPURegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
__ sdc1(src, g.ToMemOperand(destination));
}
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
MemOperand src = g.ToMemOperand(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
__ ldc1(g.ToDoubleRegister(destination), src);
} else {
FPURegister temp = kScratchDoubleReg;
@@ -2104,23 +2425,23 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ ld(temp_1, dst);
__ sd(temp_0, dst);
__ sd(temp_1, src);
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
FPURegister temp = kScratchDoubleReg;
FPURegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
FPURegister dst = g.ToDoubleRegister(destination);
__ Move(temp, src);
__ Move(src, dst);
__ Move(dst, temp);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ Move(temp, src);
__ ldc1(src, dst);
__ sdc1(temp, dst);
}
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPStackSlot());
Register temp_0 = kScratchReg;
FPURegister temp_1 = kScratchDoubleReg;
MemOperand src0 = g.ToMemOperand(source);
@@ -2146,13 +2467,6 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
}
-void CodeGenerator::AddNopForSmiCodeInlining() {
- // Unused on 32-bit ARM. Still exists on 64-bit arm.
- // TODO(plind): Unclear when this is called now. Understand, fix if needed.
- __ nop(); // Maybe PROPERTY_ACCESS_INLINED?
-}
-
-
void CodeGenerator::EnsureSpaceForLazyDeopt() {
if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
return;
diff --git a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
index 9e94c090cd..e3dedd1750 100644
--- a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
@@ -19,6 +19,7 @@ namespace compiler {
V(Mips64Dsub) \
V(Mips64DsubOvf) \
V(Mips64Mul) \
+ V(Mips64MulOvf) \
V(Mips64MulHigh) \
V(Mips64DMulHigh) \
V(Mips64MulHighU) \
@@ -32,10 +33,16 @@ namespace compiler {
V(Mips64ModU) \
V(Mips64DmodU) \
V(Mips64And) \
+ V(Mips64And32) \
V(Mips64Or) \
+ V(Mips64Or32) \
V(Mips64Nor) \
+ V(Mips64Nor32) \
V(Mips64Xor) \
+ V(Mips64Xor32) \
V(Mips64Clz) \
+ V(Mips64Lsa) \
+ V(Mips64Dlsa) \
V(Mips64Shl) \
V(Mips64Shr) \
V(Mips64Sar) \
@@ -63,6 +70,7 @@ namespace compiler {
V(Mips64DivS) \
V(Mips64ModS) \
V(Mips64AbsS) \
+ V(Mips64NegS) \
V(Mips64SqrtS) \
V(Mips64MaxS) \
V(Mips64MinS) \
@@ -73,6 +81,7 @@ namespace compiler {
V(Mips64DivD) \
V(Mips64ModD) \
V(Mips64AbsD) \
+ V(Mips64NegD) \
V(Mips64SqrtD) \
V(Mips64MaxD) \
V(Mips64MinD) \
@@ -112,28 +121,44 @@ namespace compiler {
V(Mips64Lbu) \
V(Mips64Sb) \
V(Mips64Lh) \
+ V(Mips64Ulh) \
V(Mips64Lhu) \
+ V(Mips64Ulhu) \
V(Mips64Sh) \
+ V(Mips64Ush) \
V(Mips64Ld) \
+ V(Mips64Uld) \
V(Mips64Lw) \
+ V(Mips64Ulw) \
+ V(Mips64Lwu) \
+ V(Mips64Ulwu) \
V(Mips64Sw) \
+ V(Mips64Usw) \
V(Mips64Sd) \
+ V(Mips64Usd) \
V(Mips64Lwc1) \
+ V(Mips64Ulwc1) \
V(Mips64Swc1) \
+ V(Mips64Uswc1) \
V(Mips64Ldc1) \
+ V(Mips64Uldc1) \
V(Mips64Sdc1) \
+ V(Mips64Usdc1) \
V(Mips64BitcastDL) \
V(Mips64BitcastLD) \
V(Mips64Float64ExtractLowWord32) \
V(Mips64Float64ExtractHighWord32) \
V(Mips64Float64InsertLowWord32) \
V(Mips64Float64InsertHighWord32) \
- V(Mips64Float64Max) \
- V(Mips64Float64Min) \
V(Mips64Float32Max) \
+ V(Mips64Float64Max) \
V(Mips64Float32Min) \
+ V(Mips64Float64Min) \
+ V(Mips64Float64SilenceNaN) \
V(Mips64Push) \
V(Mips64StoreToStackSlot) \
+ V(Mips64ByteSwap64) \
+ V(Mips64ByteSwap32) \
V(Mips64StackClaim)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index 5e2b5f2ad8..1167117d62 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -109,7 +109,14 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.Label(cont->false_block());
}
- outputs[output_count++] = g.DefineAsRegister(node);
+ if (cont->IsDeoptimize()) {
+ // If we can deoptimize as a result of the binop, we need to make sure that
+ // the deopt inputs are not overwritten by the binop result. One way
+ // to achieve that is to declare the output register as same-as-first.
+ outputs[output_count++] = g.DefineSameAsFirst(node);
+ } else {
+ outputs[output_count++] = g.DefineAsRegister(node);
+ }
if (cont->IsSet()) {
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
@@ -122,7 +129,7 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->frame_state());
+ cont->reason(), cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -135,12 +142,29 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
VisitBinop(selector, node, opcode, &cont);
}
+void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
+ Node* output = nullptr) {
+ Mips64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ if (g.CanBeImmediate(index, opcode)) {
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(output == nullptr ? node : output),
+ g.UseRegister(base), g.UseImmediate(index));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
+ addr_reg, g.UseRegister(index), g.UseRegister(base));
+ // Emit desired load opcode, using temp addr_reg.
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(output == nullptr ? node : output),
+ addr_reg, g.TempImmediate(0));
+ }
+}
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- Mips64OperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
ArchOpcode opcode = kArchNop;
switch (load_rep.representation()) {
@@ -158,8 +182,10 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
break;
case MachineRepresentation::kWord32:
- opcode = kMips64Lw;
+ opcode = load_rep.IsUnsigned() ? kMips64Lwu : kMips64Lw;
break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
opcode = kMips64Ld;
@@ -170,17 +196,7 @@ void InstructionSelector::VisitLoad(Node* node) {
return;
}
- if (g.CanBeImmediate(index, opcode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
- } else {
- InstructionOperand addr_reg = g.TempRegister();
- Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
- g.UseRegister(index), g.UseRegister(base));
- // Emit desired load opcode, using temp addr_reg.
- Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
- }
+ EmitLoad(this, node, opcode);
}
@@ -241,6 +257,8 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord32:
opcode = kMips64Sw;
break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
opcode = kMips64Sd;
@@ -312,7 +330,7 @@ void InstructionSelector::VisitWord32And(Node* node) {
return;
}
}
- VisitBinop(this, node, kMips64And);
+ VisitBinop(this, node, kMips64And32);
}
@@ -368,7 +386,7 @@ void InstructionSelector::VisitWord64And(Node* node) {
void InstructionSelector::VisitWord32Or(Node* node) {
- VisitBinop(this, node, kMips64Or);
+ VisitBinop(this, node, kMips64Or32);
}
@@ -384,7 +402,7 @@ void InstructionSelector::VisitWord32Xor(Node* node) {
Int32BinopMatcher mleft(m.left().node());
if (!mleft.right().HasValue()) {
Mips64OperandGenerator g(this);
- Emit(kMips64Nor, g.DefineAsRegister(node),
+ Emit(kMips64Nor32, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
@@ -393,11 +411,11 @@ void InstructionSelector::VisitWord32Xor(Node* node) {
if (m.right().Is(-1)) {
// Use Nor for bit negation and eliminate constant loading for xori.
Mips64OperandGenerator g(this);
- Emit(kMips64Nor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ Emit(kMips64Nor32, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.TempImmediate(0));
return;
}
- VisitBinop(this, node, kMips64Xor);
+ VisitBinop(this, node, kMips64Xor32);
}
@@ -490,7 +508,7 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
Mips64OperandGenerator g(this);
Int64BinopMatcher m(node);
if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
- m.right().IsInRange(32, 63)) {
+ m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) {
// There's no need to sign/zero-extend to 64-bit if we shift out the upper
// 32 bits anyway.
Emit(kMips64Dshl, g.DefineSameAsFirst(node),
@@ -572,6 +590,17 @@ void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64ByteSwap64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64ByteSwap32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
void InstructionSelector::VisitWord32Ctz(Node* node) {
Mips64OperandGenerator g(this);
@@ -611,14 +640,66 @@ void InstructionSelector::VisitWord64Clz(Node* node) {
void InstructionSelector::VisitInt32Add(Node* node) {
Mips64OperandGenerator g(this);
- // TODO(plind): Consider multiply & add optimization from arm port.
+ Int32BinopMatcher m(node);
+
+ // Select Lsa for (left + (left_of_right << imm)).
+ if (m.right().opcode() == IrOpcode::kWord32Shl &&
+ CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+ if (mright.right().HasValue()) {
+ int32_t shift_value = static_cast<int32_t>(mright.right().Value());
+ Emit(kMips64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(mright.left().node()), g.TempImmediate(shift_value));
+ return;
+ }
+ }
+
+ // Select Lsa for ((left_of_left << imm) + right).
+ if (m.left().opcode() == IrOpcode::kWord32Shl &&
+ CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
+ Emit(kMips64Lsa, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+ g.TempImmediate(shift_value));
+ return;
+ }
+ }
VisitBinop(this, node, kMips64Add);
}
void InstructionSelector::VisitInt64Add(Node* node) {
Mips64OperandGenerator g(this);
- // TODO(plind): Consider multiply & add optimization from arm port.
+ Int64BinopMatcher m(node);
+
+ // Select Dlsa for (left + (left_of_right << imm)).
+ if (m.right().opcode() == IrOpcode::kWord64Shl &&
+ CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
+ Int64BinopMatcher mright(m.right().node());
+ if (mright.right().HasValue()) {
+ int32_t shift_value = static_cast<int32_t>(mright.right().Value());
+ Emit(kMips64Dlsa, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
+ g.TempImmediate(shift_value));
+ return;
+ }
+ }
+
+ // Select Dlsa for ((left_of_left << imm) + right).
+ if (m.left().opcode() == IrOpcode::kWord64Shl &&
+ CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasValue()) {
+ int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
+ Emit(kMips64Dlsa, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+ g.TempImmediate(shift_value));
+ return;
+ }
+ }
+
VisitBinop(this, node, kMips64Dadd);
}
@@ -645,12 +726,9 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
return;
}
if (base::bits::IsPowerOfTwo32(value - 1)) {
- InstructionOperand temp = g.TempRegister();
- Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
+ Emit(kMips64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value - 1)));
- Emit(kMips64Add | AddressingModeField::encode(kMode_None),
- g.DefineAsRegister(node), g.UseRegister(m.left().node()), temp);
return;
}
if (base::bits::IsPowerOfTwo32(value + 1)) {
@@ -705,12 +783,10 @@ void InstructionSelector::VisitInt64Mul(Node* node) {
return;
}
if (base::bits::IsPowerOfTwo32(value - 1)) {
- InstructionOperand temp = g.TempRegister();
- Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
- g.UseRegister(m.left().node()),
+ // Dlsa macro will handle the shifting value out of bound cases.
+ Emit(kMips64Dlsa, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()), g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value - 1)));
- Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
- g.DefineAsRegister(node), g.UseRegister(m.left().node()), temp);
return;
}
if (base::bits::IsPowerOfTwo32(value + 1)) {
@@ -995,9 +1071,32 @@ void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
- Mips64OperandGenerator g(this);
- Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
- g.TempImmediate(0));
+ Node* value = node->InputAt(0);
+ if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
+ // Generate sign-extending load.
+ LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+ InstructionCode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kMips64Lw;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ EmitLoad(this, value, opcode, node);
+ } else {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(0));
+ }
}
@@ -1047,17 +1146,13 @@ void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
VisitRR(this, kMips64CvtSD, node);
}
-
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
- switch (TruncationModeOf(node->op())) {
- case TruncationMode::kJavaScript:
- return VisitRR(this, kArchTruncateDoubleToI, node);
- case TruncationMode::kRoundToZero:
- return VisitRR(this, kMips64TruncWD, node);
- }
- UNREACHABLE();
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+ VisitRR(this, kArchTruncateDoubleToI, node);
}
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+ VisitRR(this, kMips64TruncWD, node);
+}
void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
VisitRR(this, kMips64CvtSL, node);
@@ -1116,26 +1211,10 @@ void InstructionSelector::VisitFloat32Sub(Node* node) {
VisitRRR(this, kMips64SubS, node);
}
-
void InstructionSelector::VisitFloat64Sub(Node* node) {
- Mips64OperandGenerator g(this);
- Float64BinopMatcher m(node);
- if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
- CanCover(m.node(), m.right().node())) {
- if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
- CanCover(m.right().node(), m.right().InputAt(0))) {
- Float64BinopMatcher mright0(m.right().InputAt(0));
- if (mright0.left().IsMinusZero()) {
- Emit(kMips64Float64RoundUp, g.DefineAsRegister(node),
- g.UseRegister(mright0.right().node()));
- return;
- }
- }
- }
VisitRRR(this, kMips64SubD, node);
}
-
void InstructionSelector::VisitFloat32Mul(Node* node) {
VisitRRR(this, kMips64MulS, node);
}
@@ -1163,64 +1242,28 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
g.UseFixed(node->InputAt(1), f14))->MarkAsCall();
}
-
void InstructionSelector::VisitFloat32Max(Node* node) {
Mips64OperandGenerator g(this);
- if (kArchVariant == kMips64r6) {
- Emit(kMips64Float32Max, g.DefineAsRegister(node),
- g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)));
-
- } else {
- // Reverse operands, and use same reg. for result and right operand.
- Emit(kMips64Float32Max, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
- }
+ Emit(kMips64Float32Max, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
-
void InstructionSelector::VisitFloat64Max(Node* node) {
Mips64OperandGenerator g(this);
- if (kArchVariant == kMips64r6) {
- Emit(kMips64Float64Max, g.DefineAsRegister(node),
- g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)));
-
- } else {
- // Reverse operands, and use same reg. for result and right operand.
- Emit(kMips64Float64Max, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
- }
+ Emit(kMips64Float64Max, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
-
void InstructionSelector::VisitFloat32Min(Node* node) {
Mips64OperandGenerator g(this);
- if (kArchVariant == kMips64r6) {
- Emit(kMips64Float32Min, g.DefineAsRegister(node),
- g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)));
-
- } else {
- // Reverse operands, and use same reg. for result and right operand.
- Emit(kMips64Float32Min, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
- }
+ Emit(kMips64Float32Min, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
-
void InstructionSelector::VisitFloat64Min(Node* node) {
Mips64OperandGenerator g(this);
- if (kArchVariant == kMips64r6) {
- Emit(kMips64Float64Min, g.DefineAsRegister(node),
- g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)));
-
- } else {
- // Reverse operands, and use same reg. for result and right operand.
- Emit(kMips64Float64Min, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)));
- }
+ Emit(kMips64Float64Min, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
@@ -1233,7 +1276,6 @@ void InstructionSelector::VisitFloat64Abs(Node* node) {
VisitRR(this, kMips64AbsD, node);
}
-
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitRR(this, kMips64SqrtS, node);
}
@@ -1288,6 +1330,28 @@ void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
VisitRR(this, kMips64Float64RoundTiesEven, node);
}
+void InstructionSelector::VisitFloat32Neg(Node* node) {
+ VisitRR(this, kMips64NegS, node);
+}
+
+void InstructionSelector::VisitFloat64Neg(Node* node) {
+ VisitRR(this, kMips64NegD, node);
+}
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+ InstructionCode opcode) {
+ Mips64OperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f2),
+ g.UseFixed(node->InputAt(1), f4))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+ InstructionCode opcode) {
+ Mips64OperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12))
+ ->MarkAsCall();
+}
void InstructionSelector::EmitPrepareArguments(
ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
@@ -1297,7 +1361,7 @@ void InstructionSelector::EmitPrepareArguments(
// Prepare for C function call.
if (descriptor->IsCFunctionCall()) {
Emit(kArchPrepareCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
0, nullptr, 0, nullptr);
// Poke any stack arguments.
@@ -1328,6 +1392,106 @@ bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
+void InstructionSelector::VisitUnalignedLoad(Node* node) {
+ UnalignedLoadRepresentation load_rep =
+ UnalignedLoadRepresentationOf(node->op());
+ Mips64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
+ opcode = kMips64Ulwc1;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kMips64Uldc1;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ UNREACHABLE();
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kMips64Ulhu : kMips64Ulh;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = load_rep.IsUnsigned() ? kMips64Ulwu : kMips64Ulw;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kMips64Uld;
+ break;
+ case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired load opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
+ }
+}
+
+void InstructionSelector::VisitUnalignedStore(Node* node) {
+ Mips64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op());
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kMips64Uswc1;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kMips64Usdc1;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ UNREACHABLE();
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kMips64Ush;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kMips64Usw;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kMips64Usd;
+ break;
+ case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ addr_reg, g.TempImmediate(0), g.UseRegister(value));
+ }
+}
+
void InstructionSelector::VisitCheckedLoad(Node* node) {
CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
Mips64OperandGenerator g(this);
@@ -1355,6 +1519,8 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
opcode = kCheckedLoadFloat64;
break;
case MachineRepresentation::kBit:
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged:
case MachineRepresentation::kSimd128:
case MachineRepresentation::kNone:
@@ -1405,6 +1571,8 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
opcode = kCheckedStoreFloat64;
break;
case MachineRepresentation::kBit:
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged:
case MachineRepresentation::kSimd128:
case MachineRepresentation::kNone:
@@ -1439,7 +1607,7 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+ selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
} else {
DCHECK(cont->IsSet());
@@ -1564,7 +1732,8 @@ void EmitWordCompareZero(InstructionSelector* selector, Node* value,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
- g.TempImmediate(0), cont->frame_state());
+ g.TempImmediate(0), cont->reason(),
+ cont->frame_state());
} else {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
g.TempImmediate(0));
@@ -1664,6 +1833,9 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop(selector, node, kMips64Dsub, cont);
+ case IrOpcode::kInt32MulWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(selector, node, kMips64MulOvf, cont);
case IrOpcode::kInt64AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop(selector, node, kMips64DaddOvf, cont);
@@ -1698,14 +1870,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -1793,6 +1965,14 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
VisitBinop(this, node, kMips64Dsub, &cont);
}
+void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kMips64MulOvf, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kMips64MulOvf, &cont);
+}
void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
@@ -1898,6 +2078,9 @@ void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
VisitRR(this, kMips64Float64ExtractHighWord32, node);
}
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+ VisitRR(this, kMips64Float64SilenceNaN, node);
+}
void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
Mips64OperandGenerator g(this);
@@ -1916,21 +2099,85 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(left), g.UseRegister(right));
}
+void InstructionSelector::VisitAtomicLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ Mips64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kAtomicLoadWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired load opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
+ }
+}
+
+void InstructionSelector::VisitAtomicStore(Node* node) {
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ Mips64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kAtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kAtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kAtomicStoreWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
+ g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ addr_reg, g.TempImmediate(0), g.UseRegister(value));
+ }
+}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::kWord32Ctz |
+ MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
+ return flags | MachineOperatorBuilder::kWord32Ctz |
MachineOperatorBuilder::kWord64Ctz |
MachineOperatorBuilder::kWord32Popcnt |
MachineOperatorBuilder::kWord64Popcnt |
MachineOperatorBuilder::kWord32ShiftIsSafe |
MachineOperatorBuilder::kInt32DivIsSafe |
MachineOperatorBuilder::kUint32DivIsSafe |
- MachineOperatorBuilder::kFloat64Min |
- MachineOperatorBuilder::kFloat64Max |
- MachineOperatorBuilder::kFloat32Min |
- MachineOperatorBuilder::kFloat32Max |
MachineOperatorBuilder::kFloat64RoundDown |
MachineOperatorBuilder::kFloat32RoundDown |
MachineOperatorBuilder::kFloat64RoundUp |
@@ -1938,7 +2185,22 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat32RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesEven |
- MachineOperatorBuilder::kFloat32RoundTiesEven;
+ MachineOperatorBuilder::kFloat32RoundTiesEven |
+ MachineOperatorBuilder::kWord32ReverseBytes |
+ MachineOperatorBuilder::kWord64ReverseBytes;
+}
+
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+ if (kArchVariant == kMips64r6) {
+ return MachineOperatorBuilder::AlignmentRequirements::
+ FullUnalignedAccessSupport();
+ } else {
+ DCHECK(kArchVariant == kMips64r2);
+ return MachineOperatorBuilder::AlignmentRequirements::
+ NoUnalignedAccessSupport();
+ }
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/move-optimizer.cc b/deps/v8/src/compiler/move-optimizer.cc
index 477f139a14..482c254de1 100644
--- a/deps/v8/src/compiler/move-optimizer.cc
+++ b/deps/v8/src/compiler/move-optimizer.cc
@@ -24,16 +24,12 @@ struct MoveKeyCompare {
}
};
-struct OperandCompare {
- bool operator()(const InstructionOperand& a,
- const InstructionOperand& b) const {
- return a.CompareCanonicalized(b);
- }
-};
-
typedef ZoneMap<MoveKey, unsigned, MoveKeyCompare> MoveMap;
typedef ZoneSet<InstructionOperand, CompareOperandModuloType> OperandSet;
+bool Blocks(const OperandSet& set, const InstructionOperand& operand) {
+ return set.find(operand) != set.end();
+}
int FindFirstNonEmptySlot(const Instruction* instr) {
int i = Instruction::FIRST_GAP_POSITION;
@@ -123,7 +119,7 @@ void MoveOptimizer::RemoveClobberedDestinations(Instruction* instruction) {
// The ret instruction makes any assignment before it unnecessary, except for
// the one for its input.
- if (instruction->opcode() == ArchOpcode::kArchRet) {
+ if (instruction->IsRet() || instruction->IsTailCall()) {
for (MoveOperands* move : *moves) {
if (inputs.find(move->destination()) == inputs.end()) {
move->Eliminate();
@@ -138,8 +134,8 @@ void MoveOptimizer::MigrateMoves(Instruction* to, Instruction* from) {
ParallelMove* from_moves = from->parallel_moves()[0];
if (from_moves == nullptr || from_moves->empty()) return;
- ZoneSet<InstructionOperand, OperandCompare> dst_cant_be(local_zone());
- ZoneSet<InstructionOperand, OperandCompare> src_cant_be(local_zone());
+ OperandSet dst_cant_be(local_zone());
+ OperandSet src_cant_be(local_zone());
// If an operand is an input to the instruction, we cannot move assignments
// where it appears on the LHS.
@@ -172,7 +168,7 @@ void MoveOptimizer::MigrateMoves(Instruction* to, Instruction* from) {
// destination operands are eligible for being moved down.
for (MoveOperands* move : *from_moves) {
if (move->IsRedundant()) continue;
- if (dst_cant_be.find(move->destination()) == dst_cant_be.end()) {
+ if (!Blocks(dst_cant_be, move->destination())) {
MoveKey key = {move->source(), move->destination()};
move_candidates.insert(key);
}
@@ -187,7 +183,7 @@ void MoveOptimizer::MigrateMoves(Instruction* to, Instruction* from) {
auto current = iter;
++iter;
InstructionOperand src = current->source;
- if (src_cant_be.find(src) != src_cant_be.end()) {
+ if (Blocks(src_cant_be, src)) {
src_cant_be.insert(current->destination);
move_candidates.erase(current);
changed = true;
diff --git a/deps/v8/src/compiler/node-cache.cc b/deps/v8/src/compiler/node-cache.cc
index 79c342b44e..061a3ae4f4 100644
--- a/deps/v8/src/compiler/node-cache.cc
+++ b/deps/v8/src/compiler/node-cache.cc
@@ -115,6 +115,9 @@ void NodeCache<Key, Hash, Pred>::GetCachedNodes(ZoneVector<Node*>* nodes) {
template class NodeCache<int32_t>;
template class NodeCache<int64_t>;
+template class NodeCache<RelocInt32Key>;
+template class NodeCache<RelocInt64Key>;
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/node-cache.h b/deps/v8/src/compiler/node-cache.h
index a8f9071af0..7063a3b0b4 100644
--- a/deps/v8/src/compiler/node-cache.h
+++ b/deps/v8/src/compiler/node-cache.h
@@ -63,6 +63,14 @@ class NodeCache final {
// Various default cache types.
typedef NodeCache<int32_t> Int32NodeCache;
typedef NodeCache<int64_t> Int64NodeCache;
+
+// All we want is the numeric value of the RelocInfo::Mode enum. We typedef
+// below to avoid pulling in assembler.h
+typedef char RelocInfoMode;
+typedef std::pair<int32_t, RelocInfoMode> RelocInt32Key;
+typedef std::pair<int64_t, RelocInfoMode> RelocInt64Key;
+typedef NodeCache<RelocInt32Key> RelocInt32NodeCache;
+typedef NodeCache<RelocInt64Key> RelocInt64NodeCache;
#if V8_HOST_ARCH_32_BIT
typedef Int32NodeCache IntPtrNodeCache;
#else
diff --git a/deps/v8/src/compiler/node-marker.h b/deps/v8/src/compiler/node-marker.h
index 5ef2063f18..84666d5f07 100644
--- a/deps/v8/src/compiler/node-marker.h
+++ b/deps/v8/src/compiler/node-marker.h
@@ -42,9 +42,22 @@ class NodeMarkerBase {
DISALLOW_COPY_AND_ASSIGN(NodeMarkerBase);
};
-
-// A NodeMarker uses monotonically increasing marks to assign local "states"
-// to nodes. Only one NodeMarker per graph is valid at a given time.
+// A NodeMarker assigns a local "state" to every node of a graph in constant
+// memory. Only one NodeMarker per graph is valid at a given time, that is,
+// after you create a NodeMarker you should no longer use NodeMarkers that
+// were created earlier. Internally, the local state is stored in the Node
+// structure.
+//
+// When you initialize a NodeMarker, all the local states are conceptually
+// set to State(0) in constant time.
+//
+// In its current implementation, in debug mode NodeMarker will try to
+// (efficiently) detect invalid use of an older NodeMarker. Namely, if you get
+// or set a node with a NodeMarker, and then get or set that node
+// with an older NodeMarker you will get a crash.
+//
+// GraphReducer uses a NodeMarker, so individual Reducers cannot use a
+// NodeMarker.
template <typename State>
class NodeMarker : public NodeMarkerBase {
public:
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index 37d0e1a561..10aed51a57 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -128,6 +128,7 @@ struct IntMatcher final : public ValueMatcher<T, kOpcode> {
return this->HasValue() && this->Value() < 0 &&
(-this->Value() & (-this->Value() - 1)) == 0;
}
+ bool IsNegative() const { return this->HasValue() && this->Value() < 0; }
};
typedef IntMatcher<int32_t, IrOpcode::kInt32Constant> Int32Matcher;
@@ -157,6 +158,7 @@ struct FloatMatcher final : public ValueMatcher<T, kOpcode> {
bool IsMinusZero() const {
return this->Is(0.0) && std::signbit(this->Value());
}
+ bool IsNegative() const { return this->HasValue() && this->Value() < 0.0; }
bool IsNaN() const { return this->HasValue() && std::isnan(this->Value()); }
bool IsZero() const { return this->Is(0.0) && !std::signbit(this->Value()); }
};
@@ -171,6 +173,10 @@ struct HeapObjectMatcher final
: public ValueMatcher<Handle<HeapObject>, IrOpcode::kHeapConstant> {
explicit HeapObjectMatcher(Node* node)
: ValueMatcher<Handle<HeapObject>, IrOpcode::kHeapConstant>(node) {}
+
+ bool Is(Handle<HeapObject> const& value) const {
+ return this->HasValue() && this->Value().address() == value.address();
+ }
};
@@ -253,7 +259,8 @@ typedef BinopMatcher<UintPtrMatcher, UintPtrMatcher> UintPtrBinopMatcher;
typedef BinopMatcher<Float32Matcher, Float32Matcher> Float32BinopMatcher;
typedef BinopMatcher<Float64Matcher, Float64Matcher> Float64BinopMatcher;
typedef BinopMatcher<NumberMatcher, NumberMatcher> NumberBinopMatcher;
-
+typedef BinopMatcher<HeapObjectMatcher, HeapObjectMatcher>
+ HeapObjectBinopMatcher;
template <class BinopMatcher, IrOpcode::Value kMulOpcode,
IrOpcode::Value kShiftOpcode>
@@ -312,11 +319,12 @@ typedef ScaleMatcher<Int32BinopMatcher, IrOpcode::kInt32Mul,
typedef ScaleMatcher<Int64BinopMatcher, IrOpcode::kInt64Mul,
IrOpcode::kWord64Shl> Int64ScaleMatcher;
-
-template <class BinopMatcher, IrOpcode::Value kAddOpcode,
- IrOpcode::Value kMulOpcode, IrOpcode::Value kShiftOpcode>
+template <class BinopMatcher, IrOpcode::Value AddOpcode,
+ IrOpcode::Value SubOpcode, IrOpcode::Value kMulOpcode,
+ IrOpcode::Value kShiftOpcode>
struct AddMatcher : public BinopMatcher {
- static const IrOpcode::Value kOpcode = kAddOpcode;
+ static const IrOpcode::Value kAddOpcode = AddOpcode;
+ static const IrOpcode::Value kSubOpcode = SubOpcode;
typedef ScaleMatcher<BinopMatcher, kMulOpcode, kShiftOpcode> Matcher;
AddMatcher(Node* node, bool allow_input_swap)
@@ -367,6 +375,9 @@ struct AddMatcher : public BinopMatcher {
if (this->right().opcode() == kAddOpcode &&
this->left().opcode() != kAddOpcode) {
this->SwapInputs();
+ } else if (this->right().opcode() == kSubOpcode &&
+ this->left().opcode() != kSubOpcode) {
+ this->SwapInputs();
}
}
@@ -374,21 +385,35 @@ struct AddMatcher : public BinopMatcher {
bool power_of_two_plus_one_;
};
-typedef AddMatcher<Int32BinopMatcher, IrOpcode::kInt32Add, IrOpcode::kInt32Mul,
- IrOpcode::kWord32Shl> Int32AddMatcher;
-typedef AddMatcher<Int64BinopMatcher, IrOpcode::kInt64Add, IrOpcode::kInt64Mul,
- IrOpcode::kWord64Shl> Int64AddMatcher;
+typedef AddMatcher<Int32BinopMatcher, IrOpcode::kInt32Add, IrOpcode::kInt32Sub,
+ IrOpcode::kInt32Mul, IrOpcode::kWord32Shl>
+ Int32AddMatcher;
+typedef AddMatcher<Int64BinopMatcher, IrOpcode::kInt64Add, IrOpcode::kInt64Sub,
+ IrOpcode::kInt64Mul, IrOpcode::kWord64Shl>
+ Int64AddMatcher;
+enum DisplacementMode { kPositiveDisplacement, kNegativeDisplacement };
+
+enum class AddressOption : uint8_t {
+ kAllowNone = 0u,
+ kAllowInputSwap = 1u << 0,
+ kAllowScale = 1u << 1,
+ kAllowAll = kAllowInputSwap | kAllowScale
+};
+
+typedef base::Flags<AddressOption, uint8_t> AddressOptions;
+DEFINE_OPERATORS_FOR_FLAGS(AddressOptions);
template <class AddMatcher>
struct BaseWithIndexAndDisplacementMatcher {
- BaseWithIndexAndDisplacementMatcher(Node* node, bool allow_input_swap)
+ BaseWithIndexAndDisplacementMatcher(Node* node, AddressOptions options)
: matches_(false),
index_(nullptr),
scale_(0),
base_(nullptr),
- displacement_(nullptr) {
- Initialize(node, allow_input_swap);
+ displacement_(nullptr),
+ displacement_mode_(kPositiveDisplacement) {
+ Initialize(node, options);
}
explicit BaseWithIndexAndDisplacementMatcher(Node* node)
@@ -396,8 +421,12 @@ struct BaseWithIndexAndDisplacementMatcher {
index_(nullptr),
scale_(0),
base_(nullptr),
- displacement_(nullptr) {
- Initialize(node, node->op()->HasProperty(Operator::kCommutative));
+ displacement_(nullptr),
+ displacement_mode_(kPositiveDisplacement) {
+ Initialize(node, AddressOption::kAllowScale |
+ (node->op()->HasProperty(Operator::kCommutative)
+ ? AddressOption::kAllowInputSwap
+ : AddressOption::kAllowNone));
}
bool matches() const { return matches_; }
@@ -405,6 +434,7 @@ struct BaseWithIndexAndDisplacementMatcher {
int scale() const { return scale_; }
Node* base() const { return base_; }
Node* displacement() const { return displacement_; }
+ DisplacementMode displacement_mode() const { return displacement_mode_; }
private:
bool matches_;
@@ -412,8 +442,9 @@ struct BaseWithIndexAndDisplacementMatcher {
int scale_;
Node* base_;
Node* displacement_;
+ DisplacementMode displacement_mode_;
- void Initialize(Node* node, bool allow_input_swap) {
+ void Initialize(Node* node, AddressOptions options) {
// The BaseWithIndexAndDisplacementMatcher canonicalizes the order of
// displacements and scale factors that are used as inputs, so instead of
// enumerating all possible patterns by brute force, checking for node
@@ -431,7 +462,7 @@ struct BaseWithIndexAndDisplacementMatcher {
// (B + D)
// (B + B)
if (node->InputCount() < 2) return;
- AddMatcher m(node, allow_input_swap);
+ AddMatcher m(node, options & AddressOption::kAllowInputSwap);
Node* left = m.left().node();
Node* right = m.right().node();
Node* displacement = nullptr;
@@ -439,83 +470,124 @@ struct BaseWithIndexAndDisplacementMatcher {
Node* index = nullptr;
Node* scale_expression = nullptr;
bool power_of_two_plus_one = false;
+ DisplacementMode displacement_mode = kPositiveDisplacement;
int scale = 0;
if (m.HasIndexInput() && left->OwnedBy(node)) {
index = m.IndexInput();
scale = m.scale();
scale_expression = left;
power_of_two_plus_one = m.power_of_two_plus_one();
- if (right->opcode() == AddMatcher::kOpcode && right->OwnedBy(node)) {
+ bool match_found = false;
+ if (right->opcode() == AddMatcher::kSubOpcode && right->OwnedBy(node)) {
AddMatcher right_matcher(right);
if (right_matcher.right().HasValue()) {
- // (S + (B + D))
+ // (S + (B - D))
base = right_matcher.left().node();
displacement = right_matcher.right().node();
+ displacement_mode = kNegativeDisplacement;
+ match_found = true;
+ }
+ }
+ if (!match_found) {
+ if (right->opcode() == AddMatcher::kAddOpcode && right->OwnedBy(node)) {
+ AddMatcher right_matcher(right);
+ if (right_matcher.right().HasValue()) {
+ // (S + (B + D))
+ base = right_matcher.left().node();
+ displacement = right_matcher.right().node();
+ } else {
+ // (S + (B + B))
+ base = right;
+ }
+ } else if (m.right().HasValue()) {
+ // (S + D)
+ displacement = right;
} else {
- // (S + (B + B))
+ // (S + B)
base = right;
}
- } else if (m.right().HasValue()) {
- // (S + D)
- displacement = right;
- } else {
- // (S + B)
- base = right;
}
} else {
- if (left->opcode() == AddMatcher::kOpcode && left->OwnedBy(node)) {
+ bool match_found = false;
+ if (left->opcode() == AddMatcher::kSubOpcode && left->OwnedBy(node)) {
AddMatcher left_matcher(left);
Node* left_left = left_matcher.left().node();
Node* left_right = left_matcher.right().node();
- if (left_matcher.HasIndexInput() && left_left->OwnedBy(left)) {
- if (left_matcher.right().HasValue()) {
- // ((S + D) + B)
+ if (left_matcher.right().HasValue()) {
+ if (left_matcher.HasIndexInput() && left_left->OwnedBy(left)) {
+ // ((S - D) + B)
index = left_matcher.IndexInput();
scale = left_matcher.scale();
scale_expression = left_left;
power_of_two_plus_one = left_matcher.power_of_two_plus_one();
displacement = left_right;
+ displacement_mode = kNegativeDisplacement;
base = right;
- } else if (m.right().HasValue()) {
- // ((S + B) + D)
- index = left_matcher.IndexInput();
- scale = left_matcher.scale();
- scale_expression = left_left;
- power_of_two_plus_one = left_matcher.power_of_two_plus_one();
- base = left_right;
- displacement = right;
} else {
- // (B + B)
- index = left;
- base = right;
- }
- } else {
- if (left_matcher.right().HasValue()) {
- // ((B + D) + B)
+ // ((B - D) + B)
index = left_left;
displacement = left_right;
+ displacement_mode = kNegativeDisplacement;
base = right;
- } else if (m.right().HasValue()) {
- // ((B + B) + D)
- index = left_left;
- base = left_right;
+ }
+ match_found = true;
+ }
+ }
+ if (!match_found) {
+ if (left->opcode() == AddMatcher::kAddOpcode && left->OwnedBy(node)) {
+ AddMatcher left_matcher(left);
+ Node* left_left = left_matcher.left().node();
+ Node* left_right = left_matcher.right().node();
+ if (left_matcher.HasIndexInput() && left_left->OwnedBy(left)) {
+ if (left_matcher.right().HasValue()) {
+ // ((S + D) + B)
+ index = left_matcher.IndexInput();
+ scale = left_matcher.scale();
+ scale_expression = left_left;
+ power_of_two_plus_one = left_matcher.power_of_two_plus_one();
+ displacement = left_right;
+ base = right;
+ } else if (m.right().HasValue()) {
+ // ((S + B) + D)
+ index = left_matcher.IndexInput();
+ scale = left_matcher.scale();
+ scale_expression = left_left;
+ power_of_two_plus_one = left_matcher.power_of_two_plus_one();
+ base = left_right;
+ displacement = right;
+ } else {
+ // (B + B)
+ index = left;
+ base = right;
+ }
+ } else {
+ if (left_matcher.right().HasValue()) {
+ // ((B + D) + B)
+ index = left_left;
+ displacement = left_right;
+ base = right;
+ } else if (m.right().HasValue()) {
+ // ((B + B) + D)
+ index = left_left;
+ base = left_right;
+ displacement = right;
+ } else {
+ // (B + B)
+ index = left;
+ base = right;
+ }
+ }
+ } else {
+ if (m.right().HasValue()) {
+ // (B + D)
+ base = left;
displacement = right;
} else {
// (B + B)
- index = left;
- base = right;
+ base = left;
+ index = right;
}
}
- } else {
- if (m.right().HasValue()) {
- // (B + D)
- base = left;
- displacement = right;
- } else {
- // (B + B)
- base = left;
- index = right;
- }
}
}
int64_t value = 0;
@@ -549,8 +621,13 @@ struct BaseWithIndexAndDisplacementMatcher {
base = index;
}
}
+ if (!(options & AddressOption::kAllowScale) && scale != 0) {
+ index = scale_expression;
+ scale = 0;
+ }
base_ = base;
displacement_ = displacement;
+ displacement_mode_ = displacement_mode;
index_ = index;
scale_ = scale;
matches_ = true;
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index ac9cc34dd9..22539cbfb4 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -62,9 +62,9 @@ Node* NodeProperties::GetContextInput(Node* node) {
// static
-Node* NodeProperties::GetFrameStateInput(Node* node, int index) {
- DCHECK_LT(index, OperatorProperties::GetFrameStateInputCount(node->op()));
- return node->InputAt(FirstFrameStateIndex(node) + index);
+Node* NodeProperties::GetFrameStateInput(Node* node) {
+ DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
+ return node->InputAt(FirstFrameStateIndex(node));
}
@@ -158,8 +158,9 @@ void NodeProperties::ReplaceContextInput(Node* node, Node* context) {
// static
-void NodeProperties::ReplaceControlInput(Node* node, Node* control) {
- node->ReplaceInput(FirstControlIndex(node), control);
+void NodeProperties::ReplaceControlInput(Node* node, Node* control, int index) {
+ DCHECK(index < node->op()->ControlInputCount());
+ node->ReplaceInput(FirstControlIndex(node) + index, control);
}
@@ -171,17 +172,9 @@ void NodeProperties::ReplaceEffectInput(Node* node, Node* effect, int index) {
// static
-void NodeProperties::ReplaceFrameStateInput(Node* node, int index,
- Node* frame_state) {
- DCHECK_LT(index, OperatorProperties::GetFrameStateInputCount(node->op()));
- node->ReplaceInput(FirstFrameStateIndex(node) + index, frame_state);
-}
-
-
-// static
-void NodeProperties::RemoveFrameStateInput(Node* node, int index) {
- DCHECK_LT(index, OperatorProperties::GetFrameStateInputCount(node->op()));
- node->RemoveInput(FirstFrameStateIndex(node) + index);
+void NodeProperties::ReplaceFrameStateInput(Node* node, Node* frame_state) {
+ DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
+ node->ReplaceInput(FirstFrameStateIndex(node), frame_state);
}
@@ -221,7 +214,8 @@ void NodeProperties::ReplaceUses(Node* node, Node* value, Node* effect,
DCHECK_NOT_NULL(exception);
edge.UpdateTo(exception);
} else {
- UNREACHABLE();
+ DCHECK_NOT_NULL(success);
+ edge.UpdateTo(success);
}
} else if (IsEffectEdge(edge)) {
DCHECK_NOT_NULL(effect);
@@ -242,6 +236,18 @@ void NodeProperties::ChangeOp(Node* node, const Operator* new_op) {
// static
+Node* NodeProperties::FindFrameStateBefore(Node* node) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ while (effect->opcode() != IrOpcode::kCheckpoint) {
+ if (effect->opcode() == IrOpcode::kDead) return effect;
+ DCHECK_EQ(1, effect->op()->EffectInputCount());
+ effect = NodeProperties::GetEffectInput(effect);
+ }
+ Node* frame_state = GetFrameStateInput(effect);
+ return frame_state;
+}
+
+// static
Node* NodeProperties::FindProjection(Node* node, size_t projection_index) {
for (auto use : node->uses()) {
if (use->opcode() == IrOpcode::kProjection &&
@@ -349,7 +355,6 @@ MaybeHandle<Context> NodeProperties::GetSpecializationNativeContext(
case IrOpcode::kJSCreateBlockContext:
case IrOpcode::kJSCreateCatchContext:
case IrOpcode::kJSCreateFunctionContext:
- case IrOpcode::kJSCreateModuleContext:
case IrOpcode::kJSCreateScriptContext:
case IrOpcode::kJSCreateWithContext: {
// Skip over the intermediate contexts, we're only interested in the
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index 58005a7153..9812158c4e 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -41,7 +41,7 @@ class NodeProperties final {
static Node* GetValueInput(Node* node, int index);
static Node* GetContextInput(Node* node);
- static Node* GetFrameStateInput(Node* node, int index);
+ static Node* GetFrameStateInput(Node* node);
static Node* GetEffectInput(Node* node, int index = 0);
static Node* GetControlInput(Node* node, int index = 0);
@@ -81,10 +81,9 @@ class NodeProperties final {
static void ReplaceValueInput(Node* node, Node* value, int index);
static void ReplaceContextInput(Node* node, Node* context);
- static void ReplaceControlInput(Node* node, Node* control);
+ static void ReplaceControlInput(Node* node, Node* control, int index = 0);
static void ReplaceEffectInput(Node* node, Node* effect, int index = 0);
- static void ReplaceFrameStateInput(Node* node, int index, Node* frame_state);
- static void RemoveFrameStateInput(Node* node, int index);
+ static void ReplaceFrameStateInput(Node* node, Node* frame_state);
static void RemoveNonValueInputs(Node* node);
static void RemoveValueInputs(Node* node);
@@ -109,6 +108,11 @@ class NodeProperties final {
// ---------------------------------------------------------------------------
// Miscellaneous utilities.
+ // Find the last frame state that is effect-wise before the given node. This
+ // assumes a linear effect-chain up to a {CheckPoint} node in the graph.
+ static Node* FindFrameStateBefore(Node* node);
+
+ // Collect the output-value projection for the given output index.
static Node* FindProjection(Node* node, size_t projection_index);
// Collect the branch-related projections from a node, such as IfTrue,
diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc
index 198c353084..f4e7b17ed2 100644
--- a/deps/v8/src/compiler/node.cc
+++ b/deps/v8/src/compiler/node.cc
@@ -193,6 +193,22 @@ void Node::InsertInput(Zone* zone, int index, Node* new_to) {
Verify();
}
+void Node::InsertInputs(Zone* zone, int index, int count) {
+ DCHECK_NOT_NULL(zone);
+ DCHECK_LE(0, index);
+ DCHECK_LT(0, count);
+ DCHECK_LT(index, InputCount());
+ for (int i = 0; i < count; i++) {
+ AppendInput(zone, InputAt(Max(InputCount() - count, 0)));
+ }
+ for (int i = InputCount() - count - 1; i >= Max(index, count); --i) {
+ ReplaceInput(i, InputAt(i - count));
+ }
+ for (int i = 0; i < count; i++) {
+ ReplaceInput(index + i, nullptr);
+ }
+ Verify();
+}
void Node::RemoveInput(int index) {
DCHECK_LE(0, index);
@@ -369,7 +385,11 @@ std::ostream& operator<<(std::ostream& os, const Node& n) {
os << "(";
for (int i = 0; i < n.InputCount(); ++i) {
if (i != 0) os << ", ";
- os << n.InputAt(i)->id();
+ if (n.InputAt(i)) {
+ os << n.InputAt(i)->id();
+ } else {
+ os << "null";
+ }
}
os << ")";
}
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index c73482fa69..493518712c 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -100,6 +100,7 @@ class Node final {
void AppendInput(Zone* zone, Node* new_to);
void InsertInput(Zone* zone, int index, Node* new_to);
+ void InsertInputs(Zone* zone, int index, int count);
void RemoveInput(int index);
void NullAllInputs();
void TrimInputCount(int new_input_count);
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index b038d154b7..c1b5945e60 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -32,31 +32,38 @@
V(End)
// Opcodes for constant operators.
-#define CONSTANT_OP_LIST(V) \
- V(Int32Constant) \
- V(Int64Constant) \
- V(Float32Constant) \
- V(Float64Constant) \
- V(ExternalConstant) \
- V(NumberConstant) \
- V(HeapConstant)
-
-#define INNER_OP_LIST(V) \
- V(Select) \
- V(Phi) \
- V(EffectSet) \
- V(EffectPhi) \
- V(Guard) \
- V(BeginRegion) \
- V(FinishRegion) \
- V(FrameState) \
- V(StateValues) \
- V(TypedStateValues) \
- V(ObjectState) \
- V(Call) \
- V(Parameter) \
- V(OsrValue) \
- V(Projection)
+#define CONSTANT_OP_LIST(V) \
+ V(Int32Constant) \
+ V(Int64Constant) \
+ V(Float32Constant) \
+ V(Float64Constant) \
+ V(ExternalConstant) \
+ V(NumberConstant) \
+ V(HeapConstant) \
+ V(RelocatableInt32Constant) \
+ V(RelocatableInt64Constant)
+
+#define INNER_OP_LIST(V) \
+ V(Select) \
+ V(Phi) \
+ V(EffectPhi) \
+ V(InductionVariablePhi) \
+ V(Checkpoint) \
+ V(BeginRegion) \
+ V(FinishRegion) \
+ V(FrameState) \
+ V(StateValues) \
+ V(TypedStateValues) \
+ V(ObjectState) \
+ V(Call) \
+ V(Parameter) \
+ V(OsrValue) \
+ V(LoopExit) \
+ V(LoopExitValue) \
+ V(LoopExitEffect) \
+ V(Projection) \
+ V(Retain) \
+ V(TypeGuard)
#define COMMON_OP_LIST(V) \
CONSTANT_OP_LIST(V) \
@@ -136,21 +143,22 @@
V(JSCreateCatchContext) \
V(JSCreateWithContext) \
V(JSCreateBlockContext) \
- V(JSCreateModuleContext) \
V(JSCreateScriptContext)
-#define JS_OTHER_OP_LIST(V) \
- V(JSCallConstruct) \
- V(JSCallFunction) \
- V(JSCallRuntime) \
- V(JSConvertReceiver) \
- V(JSForInDone) \
- V(JSForInNext) \
- V(JSForInPrepare) \
- V(JSForInStep) \
- V(JSLoadMessage) \
- V(JSStoreMessage) \
- V(JSYield) \
+#define JS_OTHER_OP_LIST(V) \
+ V(JSCallConstruct) \
+ V(JSCallFunction) \
+ V(JSCallRuntime) \
+ V(JSConvertReceiver) \
+ V(JSForInDone) \
+ V(JSForInNext) \
+ V(JSForInPrepare) \
+ V(JSForInStep) \
+ V(JSLoadMessage) \
+ V(JSStoreMessage) \
+ V(JSGeneratorStore) \
+ V(JSGeneratorRestoreContinuation) \
+ V(JSGeneratorRestoreRegister) \
V(JSStackCheck)
#define JS_OP_LIST(V) \
@@ -161,60 +169,155 @@
JS_OTHER_OP_LIST(V)
// Opcodes for VirtuaMachine-level operators.
+#define SIMPLIFIED_CHANGE_OP_LIST(V) \
+ V(ChangeTaggedSignedToInt32) \
+ V(ChangeTaggedToInt32) \
+ V(ChangeTaggedToUint32) \
+ V(ChangeTaggedToFloat64) \
+ V(ChangeInt31ToTaggedSigned) \
+ V(ChangeInt32ToTagged) \
+ V(ChangeUint32ToTagged) \
+ V(ChangeFloat64ToTagged) \
+ V(ChangeTaggedToBit) \
+ V(ChangeBitToTagged) \
+ V(TruncateTaggedToWord32) \
+ V(TruncateTaggedToFloat64)
+
+#define SIMPLIFIED_CHECKED_OP_LIST(V) \
+ V(CheckedInt32Add) \
+ V(CheckedInt32Sub) \
+ V(CheckedInt32Div) \
+ V(CheckedInt32Mod) \
+ V(CheckedUint32Div) \
+ V(CheckedUint32Mod) \
+ V(CheckedInt32Mul) \
+ V(CheckedUint32ToInt32) \
+ V(CheckedFloat64ToInt32) \
+ V(CheckedTaggedSignedToInt32) \
+ V(CheckedTaggedToInt32) \
+ V(CheckedTruncateTaggedToWord32) \
+ V(CheckedTaggedToFloat64)
+
#define SIMPLIFIED_COMPARE_BINOP_LIST(V) \
V(NumberEqual) \
V(NumberLessThan) \
V(NumberLessThanOrEqual) \
+ V(SpeculativeNumberEqual) \
+ V(SpeculativeNumberLessThan) \
+ V(SpeculativeNumberLessThanOrEqual) \
V(ReferenceEqual) \
V(StringEqual) \
V(StringLessThan) \
V(StringLessThanOrEqual)
-#define SIMPLIFIED_OP_LIST(V) \
- SIMPLIFIED_COMPARE_BINOP_LIST(V) \
- V(BooleanNot) \
- V(BooleanToNumber) \
- V(NumberAdd) \
- V(NumberSubtract) \
- V(NumberMultiply) \
- V(NumberDivide) \
- V(NumberModulus) \
- V(NumberBitwiseOr) \
- V(NumberBitwiseXor) \
- V(NumberBitwiseAnd) \
- V(NumberShiftLeft) \
- V(NumberShiftRight) \
- V(NumberShiftRightLogical) \
- V(NumberImul) \
- V(NumberClz32) \
- V(NumberCeil) \
- V(NumberFloor) \
- V(NumberRound) \
- V(NumberTrunc) \
- V(NumberToInt32) \
- V(NumberToUint32) \
- V(NumberIsHoleNaN) \
- V(PlainPrimitiveToNumber) \
- V(StringToNumber) \
- V(ChangeTaggedToInt32) \
- V(ChangeTaggedToUint32) \
- V(ChangeTaggedToFloat64) \
- V(ChangeInt32ToTagged) \
- V(ChangeUint32ToTagged) \
- V(ChangeFloat64ToTagged) \
- V(ChangeBoolToBit) \
- V(ChangeBitToBool) \
- V(Allocate) \
- V(LoadField) \
- V(LoadBuffer) \
- V(LoadElement) \
- V(StoreField) \
- V(StoreBuffer) \
- V(StoreElement) \
- V(ObjectIsNumber) \
- V(ObjectIsReceiver) \
- V(ObjectIsSmi) \
- V(ObjectIsUndetectable)
+#define SIMPLIFIED_NUMBER_BINOP_LIST(V) \
+ V(NumberAdd) \
+ V(NumberSubtract) \
+ V(NumberMultiply) \
+ V(NumberDivide) \
+ V(NumberModulus) \
+ V(NumberBitwiseOr) \
+ V(NumberBitwiseXor) \
+ V(NumberBitwiseAnd) \
+ V(NumberShiftLeft) \
+ V(NumberShiftRight) \
+ V(NumberShiftRightLogical) \
+ V(NumberAtan2) \
+ V(NumberImul) \
+ V(NumberMax) \
+ V(NumberMin) \
+ V(NumberPow)
+
+#define SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(V) \
+ V(SpeculativeNumberAdd) \
+ V(SpeculativeNumberSubtract) \
+ V(SpeculativeNumberMultiply) \
+ V(SpeculativeNumberDivide) \
+ V(SpeculativeNumberModulus) \
+ V(SpeculativeNumberBitwiseAnd) \
+ V(SpeculativeNumberBitwiseOr) \
+ V(SpeculativeNumberBitwiseXor) \
+ V(SpeculativeNumberShiftLeft) \
+ V(SpeculativeNumberShiftRight) \
+ V(SpeculativeNumberShiftRightLogical)
+
+#define SIMPLIFIED_NUMBER_UNOP_LIST(V) \
+ V(NumberAbs) \
+ V(NumberAcos) \
+ V(NumberAcosh) \
+ V(NumberAsin) \
+ V(NumberAsinh) \
+ V(NumberAtan) \
+ V(NumberAtanh) \
+ V(NumberCbrt) \
+ V(NumberCeil) \
+ V(NumberClz32) \
+ V(NumberCos) \
+ V(NumberCosh) \
+ V(NumberExp) \
+ V(NumberExpm1) \
+ V(NumberFloor) \
+ V(NumberFround) \
+ V(NumberLog) \
+ V(NumberLog1p) \
+ V(NumberLog2) \
+ V(NumberLog10) \
+ V(NumberRound) \
+ V(NumberSign) \
+ V(NumberSin) \
+ V(NumberSinh) \
+ V(NumberSqrt) \
+ V(NumberTan) \
+ V(NumberTanh) \
+ V(NumberTrunc) \
+ V(NumberToInt32) \
+ V(NumberToUint32) \
+ V(NumberSilenceNaN)
+
+#define SIMPLIFIED_OTHER_OP_LIST(V) \
+ V(PlainPrimitiveToNumber) \
+ V(PlainPrimitiveToWord32) \
+ V(PlainPrimitiveToFloat64) \
+ V(BooleanNot) \
+ V(StringCharCodeAt) \
+ V(StringFromCharCode) \
+ V(CheckBounds) \
+ V(CheckIf) \
+ V(CheckMaps) \
+ V(CheckNumber) \
+ V(CheckString) \
+ V(CheckTaggedPointer) \
+ V(CheckTaggedSigned) \
+ V(CheckFloat64Hole) \
+ V(CheckTaggedHole) \
+ V(ConvertTaggedHoleToUndefined) \
+ V(Allocate) \
+ V(LoadField) \
+ V(LoadBuffer) \
+ V(LoadElement) \
+ V(LoadTypedElement) \
+ V(StoreField) \
+ V(StoreBuffer) \
+ V(StoreElement) \
+ V(StoreTypedElement) \
+ V(ObjectIsCallable) \
+ V(ObjectIsNumber) \
+ V(ObjectIsReceiver) \
+ V(ObjectIsSmi) \
+ V(ObjectIsString) \
+ V(ObjectIsUndetectable) \
+ V(EnsureWritableFastElements) \
+ V(MaybeGrowFastElements) \
+ V(TransitionElementsKind)
+
+#define SIMPLIFIED_OP_LIST(V) \
+ SIMPLIFIED_CHANGE_OP_LIST(V) \
+ SIMPLIFIED_CHECKED_OP_LIST(V) \
+ SIMPLIFIED_COMPARE_BINOP_LIST(V) \
+ SIMPLIFIED_NUMBER_BINOP_LIST(V) \
+ SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(V) \
+ SIMPLIFIED_NUMBER_UNOP_LIST(V) \
+ SIMPLIFIED_OTHER_OP_LIST(V)
// Opcodes for Machine-level operators.
#define MACHINE_COMPARE_BINOP_LIST(V) \
@@ -237,6 +340,8 @@
#define MACHINE_OP_LIST(V) \
MACHINE_COMPARE_BINOP_LIST(V) \
+ V(DebugBreak) \
+ V(Comment) \
V(Load) \
V(Store) \
V(StackSlot) \
@@ -250,6 +355,7 @@
V(Word32Clz) \
V(Word32Ctz) \
V(Word32ReverseBits) \
+ V(Word32ReverseBytes) \
V(Word32Popcnt) \
V(Word64Popcnt) \
V(Word64And) \
@@ -262,11 +368,13 @@
V(Word64Clz) \
V(Word64Ctz) \
V(Word64ReverseBits) \
+ V(Word64ReverseBytes) \
V(Int32Add) \
V(Int32AddWithOverflow) \
V(Int32Sub) \
V(Int32SubWithOverflow) \
V(Int32Mul) \
+ V(Int32MulWithOverflow) \
V(Int32MulHigh) \
V(Int32Div) \
V(Int32Mod) \
@@ -282,9 +390,12 @@
V(Int64Mod) \
V(Uint64Div) \
V(Uint64Mod) \
+ V(BitcastWordToTagged) \
+ V(TruncateFloat64ToWord32) \
V(ChangeFloat32ToFloat64) \
V(ChangeFloat64ToInt32) \
V(ChangeFloat64ToUint32) \
+ V(Float64SilenceNaN) \
V(TruncateFloat64ToUint32) \
V(TruncateFloat32ToInt32) \
V(TruncateFloat32ToUint32) \
@@ -296,9 +407,15 @@
V(ChangeInt32ToInt64) \
V(ChangeUint32ToFloat64) \
V(ChangeUint32ToUint64) \
+ V(ImpossibleToBit) \
+ V(ImpossibleToWord32) \
+ V(ImpossibleToWord64) \
+ V(ImpossibleToFloat32) \
+ V(ImpossibleToFloat64) \
+ V(ImpossibleToTagged) \
V(TruncateFloat64ToFloat32) \
- V(TruncateFloat64ToInt32) \
V(TruncateInt64ToInt32) \
+ V(RoundFloat64ToInt32) \
V(RoundInt32ToFloat32) \
V(RoundInt64ToFloat32) \
V(RoundInt64ToFloat64) \
@@ -311,22 +428,45 @@
V(BitcastInt64ToFloat64) \
V(Float32Add) \
V(Float32Sub) \
+ V(Float32Neg) \
V(Float32Mul) \
V(Float32Div) \
- V(Float32Max) \
- V(Float32Min) \
V(Float32Abs) \
V(Float32Sqrt) \
V(Float32RoundDown) \
+ V(Float32Max) \
+ V(Float32Min) \
V(Float64Add) \
V(Float64Sub) \
+ V(Float64Neg) \
V(Float64Mul) \
V(Float64Div) \
V(Float64Mod) \
V(Float64Max) \
V(Float64Min) \
V(Float64Abs) \
+ V(Float64Acos) \
+ V(Float64Acosh) \
+ V(Float64Asin) \
+ V(Float64Asinh) \
+ V(Float64Atan) \
+ V(Float64Atanh) \
+ V(Float64Atan2) \
+ V(Float64Cbrt) \
+ V(Float64Cos) \
+ V(Float64Cosh) \
+ V(Float64Exp) \
+ V(Float64Expm1) \
+ V(Float64Log) \
+ V(Float64Log1p) \
+ V(Float64Log10) \
+ V(Float64Log2) \
+ V(Float64Pow) \
+ V(Float64Sin) \
+ V(Float64Sinh) \
V(Float64Sqrt) \
+ V(Float64Tan) \
+ V(Float64Tanh) \
V(Float64RoundDown) \
V(Float32RoundUp) \
V(Float64RoundUp) \
@@ -344,17 +484,209 @@
V(LoadParentFramePointer) \
V(CheckedLoad) \
V(CheckedStore) \
+ V(UnalignedLoad) \
+ V(UnalignedStore) \
V(Int32PairAdd) \
V(Int32PairSub) \
V(Int32PairMul) \
V(Word32PairShl) \
V(Word32PairShr) \
- V(Word32PairSar)
-
-#define VALUE_OP_LIST(V) \
- COMMON_OP_LIST(V) \
- SIMPLIFIED_OP_LIST(V) \
- MACHINE_OP_LIST(V) \
+ V(Word32PairSar) \
+ V(AtomicLoad) \
+ V(AtomicStore) \
+ V(UnsafePointerAdd)
+
+#define MACHINE_SIMD_RETURN_SIMD_OP_LIST(V) \
+ V(CreateFloat32x4) \
+ V(Float32x4ReplaceLane) \
+ V(Float32x4Abs) \
+ V(Float32x4Neg) \
+ V(Float32x4Sqrt) \
+ V(Float32x4RecipApprox) \
+ V(Float32x4RecipSqrtApprox) \
+ V(Float32x4Add) \
+ V(Float32x4Sub) \
+ V(Float32x4Mul) \
+ V(Float32x4Div) \
+ V(Float32x4Min) \
+ V(Float32x4Max) \
+ V(Float32x4MinNum) \
+ V(Float32x4MaxNum) \
+ V(Float32x4Equal) \
+ V(Float32x4NotEqual) \
+ V(Float32x4LessThan) \
+ V(Float32x4LessThanOrEqual) \
+ V(Float32x4GreaterThan) \
+ V(Float32x4GreaterThanOrEqual) \
+ V(Float32x4Select) \
+ V(Float32x4Swizzle) \
+ V(Float32x4Shuffle) \
+ V(Float32x4FromInt32x4) \
+ V(Float32x4FromUint32x4) \
+ V(CreateInt32x4) \
+ V(Int32x4ReplaceLane) \
+ V(Int32x4Neg) \
+ V(Int32x4Add) \
+ V(Int32x4Sub) \
+ V(Int32x4Mul) \
+ V(Int32x4Min) \
+ V(Int32x4Max) \
+ V(Int32x4ShiftLeftByScalar) \
+ V(Int32x4ShiftRightByScalar) \
+ V(Int32x4Equal) \
+ V(Int32x4NotEqual) \
+ V(Int32x4LessThan) \
+ V(Int32x4LessThanOrEqual) \
+ V(Int32x4GreaterThan) \
+ V(Int32x4GreaterThanOrEqual) \
+ V(Int32x4Select) \
+ V(Int32x4Swizzle) \
+ V(Int32x4Shuffle) \
+ V(Int32x4FromFloat32x4) \
+ V(Uint32x4Min) \
+ V(Uint32x4Max) \
+ V(Uint32x4ShiftLeftByScalar) \
+ V(Uint32x4ShiftRightByScalar) \
+ V(Uint32x4LessThan) \
+ V(Uint32x4LessThanOrEqual) \
+ V(Uint32x4GreaterThan) \
+ V(Uint32x4GreaterThanOrEqual) \
+ V(Uint32x4FromFloat32x4) \
+ V(CreateBool32x4) \
+ V(Bool32x4ReplaceLane) \
+ V(Bool32x4And) \
+ V(Bool32x4Or) \
+ V(Bool32x4Xor) \
+ V(Bool32x4Not) \
+ V(Bool32x4Swizzle) \
+ V(Bool32x4Shuffle) \
+ V(Bool32x4Equal) \
+ V(Bool32x4NotEqual) \
+ V(CreateInt16x8) \
+ V(Int16x8ReplaceLane) \
+ V(Int16x8Neg) \
+ V(Int16x8Add) \
+ V(Int16x8AddSaturate) \
+ V(Int16x8Sub) \
+ V(Int16x8SubSaturate) \
+ V(Int16x8Mul) \
+ V(Int16x8Min) \
+ V(Int16x8Max) \
+ V(Int16x8ShiftLeftByScalar) \
+ V(Int16x8ShiftRightByScalar) \
+ V(Int16x8Equal) \
+ V(Int16x8NotEqual) \
+ V(Int16x8LessThan) \
+ V(Int16x8LessThanOrEqual) \
+ V(Int16x8GreaterThan) \
+ V(Int16x8GreaterThanOrEqual) \
+ V(Int16x8Select) \
+ V(Int16x8Swizzle) \
+ V(Int16x8Shuffle) \
+ V(Uint16x8AddSaturate) \
+ V(Uint16x8SubSaturate) \
+ V(Uint16x8Min) \
+ V(Uint16x8Max) \
+ V(Uint16x8ShiftLeftByScalar) \
+ V(Uint16x8ShiftRightByScalar) \
+ V(Uint16x8LessThan) \
+ V(Uint16x8LessThanOrEqual) \
+ V(Uint16x8GreaterThan) \
+ V(Uint16x8GreaterThanOrEqual) \
+ V(CreateBool16x8) \
+ V(Bool16x8ReplaceLane) \
+ V(Bool16x8And) \
+ V(Bool16x8Or) \
+ V(Bool16x8Xor) \
+ V(Bool16x8Not) \
+ V(Bool16x8Swizzle) \
+ V(Bool16x8Shuffle) \
+ V(Bool16x8Equal) \
+ V(Bool16x8NotEqual) \
+ V(CreateInt8x16) \
+ V(Int8x16ReplaceLane) \
+ V(Int8x16Neg) \
+ V(Int8x16Add) \
+ V(Int8x16AddSaturate) \
+ V(Int8x16Sub) \
+ V(Int8x16SubSaturate) \
+ V(Int8x16Mul) \
+ V(Int8x16Min) \
+ V(Int8x16Max) \
+ V(Int8x16ShiftLeftByScalar) \
+ V(Int8x16ShiftRightByScalar) \
+ V(Int8x16Equal) \
+ V(Int8x16NotEqual) \
+ V(Int8x16LessThan) \
+ V(Int8x16LessThanOrEqual) \
+ V(Int8x16GreaterThan) \
+ V(Int8x16GreaterThanOrEqual) \
+ V(Int8x16Select) \
+ V(Int8x16Swizzle) \
+ V(Int8x16Shuffle) \
+ V(Uint8x16AddSaturate) \
+ V(Uint8x16SubSaturate) \
+ V(Uint8x16Min) \
+ V(Uint8x16Max) \
+ V(Uint8x16ShiftLeftByScalar) \
+ V(Uint8x16ShiftRightByScalar) \
+ V(Uint8x16LessThan) \
+ V(Uint8x16LessThanOrEqual) \
+ V(Uint8x16GreaterThan) \
+ V(Uint8x16GreaterThanOrEqual) \
+ V(CreateBool8x16) \
+ V(Bool8x16ReplaceLane) \
+ V(Bool8x16And) \
+ V(Bool8x16Or) \
+ V(Bool8x16Xor) \
+ V(Bool8x16Not) \
+ V(Bool8x16Swizzle) \
+ V(Bool8x16Shuffle) \
+ V(Bool8x16Equal) \
+ V(Bool8x16NotEqual)
+
+#define MACHINE_SIMD_RETURN_NUM_OP_LIST(V) \
+ V(Float32x4ExtractLane) \
+ V(Int32x4ExtractLane) \
+ V(Int16x8ExtractLane) \
+ V(Int8x16ExtractLane)
+
+#define MACHINE_SIMD_RETURN_BOOL_OP_LIST(V) \
+ V(Bool32x4ExtractLane) \
+ V(Bool32x4AnyTrue) \
+ V(Bool32x4AllTrue) \
+ V(Bool16x8ExtractLane) \
+ V(Bool16x8AnyTrue) \
+ V(Bool16x8AllTrue) \
+ V(Bool8x16ExtractLane) \
+ V(Bool8x16AnyTrue) \
+ V(Bool8x16AllTrue)
+
+#define MACHINE_SIMD_GENERIC_OP_LIST(V) \
+ V(Simd128Load) \
+ V(Simd128Load1) \
+ V(Simd128Load2) \
+ V(Simd128Load3) \
+ V(Simd128Store) \
+ V(Simd128Store1) \
+ V(Simd128Store2) \
+ V(Simd128Store3) \
+ V(Simd128And) \
+ V(Simd128Or) \
+ V(Simd128Xor) \
+ V(Simd128Not)
+
+#define MACHINE_SIMD_OP_LIST(V) \
+ MACHINE_SIMD_RETURN_SIMD_OP_LIST(V) \
+ MACHINE_SIMD_RETURN_NUM_OP_LIST(V) \
+ MACHINE_SIMD_RETURN_BOOL_OP_LIST(V) \
+ MACHINE_SIMD_GENERIC_OP_LIST(V)
+
+#define VALUE_OP_LIST(V) \
+ COMMON_OP_LIST(V) \
+ SIMPLIFIED_OP_LIST(V) \
+ MACHINE_OP_LIST(V) \
+ MACHINE_SIMD_OP_LIST(V) \
JS_OP_LIST(V)
// The combination of all operators at all levels and the common operators.
@@ -400,7 +732,7 @@ class IrOpcode {
// Returns true if opcode for constant operator.
static bool IsConstantOpcode(Value value) {
- return kInt32Constant <= value && value <= kHeapConstant;
+ return kInt32Constant <= value && value <= kRelocatableInt64Constant;
}
static bool IsPhiOpcode(Value value) {
diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc
new file mode 100644
index 0000000000..f3ef778dc0
--- /dev/null
+++ b/deps/v8/src/compiler/operation-typer.cc
@@ -0,0 +1,968 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/operation-typer.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/factory.h"
+#include "src/isolate.h"
+#include "src/type-cache.h"
+#include "src/types.h"
+
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+OperationTyper::OperationTyper(Isolate* isolate, Zone* zone)
+ : zone_(zone), cache_(TypeCache::Get()) {
+ Factory* factory = isolate->factory();
+ infinity_ = Type::Constant(factory->infinity_value(), zone);
+ minus_infinity_ = Type::Constant(factory->minus_infinity_value(), zone);
+ // Unfortunately, the infinities created in other places might be different
+ // ones (eg the result of NewNumber in TypeNumberConstant).
+ Type* truncating_to_zero =
+ Type::Union(Type::Union(infinity_, minus_infinity_, zone),
+ Type::MinusZeroOrNaN(), zone);
+ DCHECK(!truncating_to_zero->Maybe(Type::Integral32()));
+
+ singleton_false_ = Type::Constant(factory->false_value(), zone);
+ singleton_true_ = Type::Constant(factory->true_value(), zone);
+ singleton_the_hole_ = Type::Constant(factory->the_hole_value(), zone);
+ signed32ish_ = Type::Union(Type::Signed32(), truncating_to_zero, zone);
+ unsigned32ish_ = Type::Union(Type::Unsigned32(), truncating_to_zero, zone);
+}
+
+Type* OperationTyper::Merge(Type* left, Type* right) {
+ return Type::Union(left, right, zone());
+}
+
+Type* OperationTyper::WeakenRange(Type* previous_range, Type* current_range) {
+ static const double kWeakenMinLimits[] = {0.0,
+ -1073741824.0,
+ -2147483648.0,
+ -4294967296.0,
+ -8589934592.0,
+ -17179869184.0,
+ -34359738368.0,
+ -68719476736.0,
+ -137438953472.0,
+ -274877906944.0,
+ -549755813888.0,
+ -1099511627776.0,
+ -2199023255552.0,
+ -4398046511104.0,
+ -8796093022208.0,
+ -17592186044416.0,
+ -35184372088832.0,
+ -70368744177664.0,
+ -140737488355328.0,
+ -281474976710656.0,
+ -562949953421312.0};
+ static const double kWeakenMaxLimits[] = {0.0,
+ 1073741823.0,
+ 2147483647.0,
+ 4294967295.0,
+ 8589934591.0,
+ 17179869183.0,
+ 34359738367.0,
+ 68719476735.0,
+ 137438953471.0,
+ 274877906943.0,
+ 549755813887.0,
+ 1099511627775.0,
+ 2199023255551.0,
+ 4398046511103.0,
+ 8796093022207.0,
+ 17592186044415.0,
+ 35184372088831.0,
+ 70368744177663.0,
+ 140737488355327.0,
+ 281474976710655.0,
+ 562949953421311.0};
+ STATIC_ASSERT(arraysize(kWeakenMinLimits) == arraysize(kWeakenMaxLimits));
+
+ double current_min = current_range->Min();
+ double new_min = current_min;
+ // Find the closest lower entry in the list of allowed
+ // minima (or negative infinity if there is no such entry).
+ if (current_min != previous_range->Min()) {
+ new_min = -V8_INFINITY;
+ for (double const min : kWeakenMinLimits) {
+ if (min <= current_min) {
+ new_min = min;
+ break;
+ }
+ }
+ }
+
+ double current_max = current_range->Max();
+ double new_max = current_max;
+ // Find the closest greater entry in the list of allowed
+ // maxima (or infinity if there is no such entry).
+ if (current_max != previous_range->Max()) {
+ new_max = V8_INFINITY;
+ for (double const max : kWeakenMaxLimits) {
+ if (max >= current_max) {
+ new_max = max;
+ break;
+ }
+ }
+ }
+
+ return Type::Range(new_min, new_max, zone());
+}
+
+Type* OperationTyper::Rangify(Type* type) {
+ if (type->IsRange()) return type; // Shortcut.
+ if (!type->Is(cache_.kInteger)) {
+ return type; // Give up on non-integer types.
+ }
+ double min = type->Min();
+ double max = type->Max();
+ // Handle the degenerate case of empty bitset types (such as
+ // OtherUnsigned31 and OtherSigned32 on 64-bit architectures).
+ if (std::isnan(min)) {
+ DCHECK(std::isnan(max));
+ return type;
+ }
+ return Type::Range(min, max, zone());
+}
+
+namespace {
+
+// Returns the array's least element, ignoring NaN.
+// There must be at least one non-NaN element.
+// Any -0 is converted to 0.
+double array_min(double a[], size_t n) {
+ DCHECK(n != 0);
+ double x = +V8_INFINITY;
+ for (size_t i = 0; i < n; ++i) {
+ if (!std::isnan(a[i])) {
+ x = std::min(a[i], x);
+ }
+ }
+ DCHECK(!std::isnan(x));
+ return x == 0 ? 0 : x; // -0 -> 0
+}
+
+// Returns the array's greatest element, ignoring NaN.
+// There must be at least one non-NaN element.
+// Any -0 is converted to 0.
+double array_max(double a[], size_t n) {
+ DCHECK(n != 0);
+ double x = -V8_INFINITY;
+ for (size_t i = 0; i < n; ++i) {
+ if (!std::isnan(a[i])) {
+ x = std::max(a[i], x);
+ }
+ }
+ DCHECK(!std::isnan(x));
+ return x == 0 ? 0 : x; // -0 -> 0
+}
+
+} // namespace
+
+Type* OperationTyper::AddRanger(double lhs_min, double lhs_max, double rhs_min,
+ double rhs_max) {
+ double results[4];
+ results[0] = lhs_min + rhs_min;
+ results[1] = lhs_min + rhs_max;
+ results[2] = lhs_max + rhs_min;
+ results[3] = lhs_max + rhs_max;
+ // Since none of the inputs can be -0, the result cannot be -0 either.
+ // However, it can be nan (the sum of two infinities of opposite sign).
+ // On the other hand, if none of the "results" above is nan, then the
+ // actual result cannot be nan either.
+ int nans = 0;
+ for (int i = 0; i < 4; ++i) {
+ if (std::isnan(results[i])) ++nans;
+ }
+ if (nans == 4) return Type::NaN();
+ Type* type =
+ Type::Range(array_min(results, 4), array_max(results, 4), zone());
+ if (nans > 0) type = Type::Union(type, Type::NaN(), zone());
+ // Examples:
+ // [-inf, -inf] + [+inf, +inf] = NaN
+ // [-inf, -inf] + [n, +inf] = [-inf, -inf] \/ NaN
+ // [-inf, +inf] + [n, +inf] = [-inf, +inf] \/ NaN
+ // [-inf, m] + [n, +inf] = [-inf, +inf] \/ NaN
+ return type;
+}
+
+Type* OperationTyper::SubtractRanger(double lhs_min, double lhs_max,
+ double rhs_min, double rhs_max) {
+ double results[4];
+ results[0] = lhs_min - rhs_min;
+ results[1] = lhs_min - rhs_max;
+ results[2] = lhs_max - rhs_min;
+ results[3] = lhs_max - rhs_max;
+ // Since none of the inputs can be -0, the result cannot be -0.
+ // However, it can be nan (the subtraction of two infinities of same sign).
+ // On the other hand, if none of the "results" above is nan, then the actual
+ // result cannot be nan either.
+ int nans = 0;
+ for (int i = 0; i < 4; ++i) {
+ if (std::isnan(results[i])) ++nans;
+ }
+ if (nans == 4) return Type::NaN(); // [inf..inf] - [inf..inf] (all same sign)
+ Type* type =
+ Type::Range(array_min(results, 4), array_max(results, 4), zone());
+ return nans == 0 ? type : Type::Union(type, Type::NaN(), zone());
+ // Examples:
+ // [-inf, +inf] - [-inf, +inf] = [-inf, +inf] \/ NaN
+ // [-inf, -inf] - [-inf, -inf] = NaN
+ // [-inf, -inf] - [n, +inf] = [-inf, -inf] \/ NaN
+ // [m, +inf] - [-inf, n] = [-inf, +inf] \/ NaN
+}
+
+Type* OperationTyper::MultiplyRanger(Type* lhs, Type* rhs) {
+ double results[4];
+ double lmin = lhs->AsRange()->Min();
+ double lmax = lhs->AsRange()->Max();
+ double rmin = rhs->AsRange()->Min();
+ double rmax = rhs->AsRange()->Max();
+ results[0] = lmin * rmin;
+ results[1] = lmin * rmax;
+ results[2] = lmax * rmin;
+ results[3] = lmax * rmax;
+ // If the result may be nan, we give up on calculating a precise type, because
+ // the discontinuity makes it too complicated. Note that even if none of the
+ // "results" above is nan, the actual result may still be, so we have to do a
+ // different check:
+ bool maybe_nan = (lhs->Maybe(cache_.kSingletonZero) &&
+ (rmin == -V8_INFINITY || rmax == +V8_INFINITY)) ||
+ (rhs->Maybe(cache_.kSingletonZero) &&
+ (lmin == -V8_INFINITY || lmax == +V8_INFINITY));
+ if (maybe_nan) return cache_.kIntegerOrMinusZeroOrNaN; // Giving up.
+ bool maybe_minuszero = (lhs->Maybe(cache_.kSingletonZero) && rmin < 0) ||
+ (rhs->Maybe(cache_.kSingletonZero) && lmin < 0);
+ Type* range =
+ Type::Range(array_min(results, 4), array_max(results, 4), zone());
+ return maybe_minuszero ? Type::Union(range, Type::MinusZero(), zone())
+ : range;
+}
+
+Type* OperationTyper::ToNumber(Type* type) {
+ if (type->Is(Type::Number())) return type;
+ if (type->Is(Type::NullOrUndefined())) {
+ if (type->Is(Type::Null())) return cache_.kSingletonZero;
+ if (type->Is(Type::Undefined())) return Type::NaN();
+ return Type::Union(Type::NaN(), cache_.kSingletonZero, zone());
+ }
+ if (type->Is(Type::Boolean())) {
+ if (type->Is(singleton_false_)) return cache_.kSingletonZero;
+ if (type->Is(singleton_true_)) return cache_.kSingletonOne;
+ return cache_.kZeroOrOne;
+ }
+ if (type->Is(Type::NumberOrOddball())) {
+ if (type->Is(Type::NumberOrUndefined())) {
+ type = Type::Union(type, Type::NaN(), zone());
+ } else if (type->Is(Type::NullOrNumber())) {
+ type = Type::Union(type, cache_.kSingletonZero, zone());
+ } else if (type->Is(Type::BooleanOrNullOrNumber())) {
+ type = Type::Union(type, cache_.kZeroOrOne, zone());
+ } else {
+ type = Type::Union(type, cache_.kZeroOrOneOrNaN, zone());
+ }
+ return Type::Intersect(type, Type::Number(), zone());
+ }
+ return Type::Number();
+}
+
+Type* OperationTyper::NumberAbs(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+
+ if (!type->IsInhabited()) {
+ return Type::None();
+ }
+
+ bool const maybe_nan = type->Maybe(Type::NaN());
+ bool const maybe_minuszero = type->Maybe(Type::MinusZero());
+ type = Type::Intersect(type, Type::PlainNumber(), zone());
+ double const max = type->Max();
+ double const min = type->Min();
+ if (min < 0) {
+ if (type->Is(cache_.kInteger)) {
+ type = Type::Range(0.0, std::max(std::fabs(min), std::fabs(max)), zone());
+ } else {
+ type = Type::PlainNumber();
+ }
+ }
+ if (maybe_minuszero) {
+ type = Type::Union(type, cache_.kSingletonZero, zone());
+ }
+ if (maybe_nan) {
+ type = Type::Union(type, Type::NaN(), zone());
+ }
+ return type;
+}
+
+Type* OperationTyper::NumberAcos(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+ return Type::Number();
+}
+
+Type* OperationTyper::NumberAcosh(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+ return Type::Number();
+}
+
+Type* OperationTyper::NumberAsin(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+ return Type::Number();
+}
+
+Type* OperationTyper::NumberAsinh(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+ return Type::Number();
+}
+
+Type* OperationTyper::NumberAtan(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+ return Type::Number();
+}
+
+Type* OperationTyper::NumberAtanh(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+ return Type::Number();
+}
+
+Type* OperationTyper::NumberCbrt(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+ return Type::Number();
+}
+
+Type* OperationTyper::NumberCeil(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+ if (type->Is(cache_.kIntegerOrMinusZeroOrNaN)) return type;
+ // TODO(bmeurer): We could infer a more precise type here.
+ return cache_.kIntegerOrMinusZeroOrNaN;
+}
+
+Type* OperationTyper::NumberClz32(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+ return cache_.kZeroToThirtyTwo;
+}
+
+Type* OperationTyper::NumberCos(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+ return Type::Number();
+}
+
+Type* OperationTyper::NumberCosh(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+ return Type::Number();
+}
+
+Type* OperationTyper::NumberExp(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+ return Type::Union(Type::PlainNumber(), Type::NaN(), zone());
+}
+
+Type* OperationTyper::NumberExpm1(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+ return Type::Union(Type::PlainNumber(), Type::NaN(), zone());
+}
+
+Type* OperationTyper::NumberFloor(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+ if (type->Is(cache_.kIntegerOrMinusZeroOrNaN)) return type;
+ // TODO(bmeurer): We could infer a more precise type here.
+ return cache_.kIntegerOrMinusZeroOrNaN;
+}
+
+Type* OperationTyper::NumberFround(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+ return Type::Number();
+}
+
+Type* OperationTyper::NumberLog(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+ return Type::Number();
+}
+
+Type* OperationTyper::NumberLog1p(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+ return Type::Number();
+}
+
+Type* OperationTyper::NumberLog2(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+ return Type::Number();
+}
+
+Type* OperationTyper::NumberLog10(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+ return Type::Number();
+}
+
+Type* OperationTyper::NumberRound(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+ if (type->Is(cache_.kIntegerOrMinusZeroOrNaN)) return type;
+ // TODO(bmeurer): We could infer a more precise type here.
+ return cache_.kIntegerOrMinusZeroOrNaN;
+}
+
+Type* OperationTyper::NumberSign(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+ if (type->Is(cache_.kZeroish)) return type;
+ bool maybe_minuszero = type->Maybe(Type::MinusZero());
+ bool maybe_nan = type->Maybe(Type::NaN());
+ type = Type::Intersect(type, Type::PlainNumber(), zone());
+ if (type->Max() < 0.0) {
+ type = cache_.kSingletonMinusOne;
+ } else if (type->Max() <= 0.0) {
+ type = cache_.kMinusOneOrZero;
+ } else if (type->Min() > 0.0) {
+ type = cache_.kSingletonOne;
+ } else if (type->Min() >= 0.0) {
+ type = cache_.kZeroOrOne;
+ } else {
+ type = Type::Range(-1.0, 1.0, zone());
+ }
+ if (maybe_minuszero) type = Type::Union(type, Type::MinusZero(), zone());
+ if (maybe_nan) type = Type::Union(type, Type::NaN(), zone());
+ return type;
+}
+
+Type* OperationTyper::NumberSin(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+ return Type::Number();
+}
+
+Type* OperationTyper::NumberSinh(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+ return Type::Number();
+}
+
+Type* OperationTyper::NumberSqrt(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+ return Type::Number();
+}
+
+Type* OperationTyper::NumberTan(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+ return Type::Number();
+}
+
+Type* OperationTyper::NumberTanh(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+ return Type::Number();
+}
+
+Type* OperationTyper::NumberTrunc(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+ if (type->Is(cache_.kIntegerOrMinusZeroOrNaN)) return type;
+ // TODO(bmeurer): We could infer a more precise type here.
+ return cache_.kIntegerOrMinusZeroOrNaN;
+}
+
+Type* OperationTyper::NumberToInt32(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+
+ if (type->Is(Type::Signed32())) return type;
+ if (type->Is(cache_.kZeroish)) return cache_.kSingletonZero;
+ if (type->Is(signed32ish_)) {
+ return Type::Intersect(Type::Union(type, cache_.kSingletonZero, zone()),
+ Type::Signed32(), zone());
+ }
+ return Type::Signed32();
+}
+
+Type* OperationTyper::NumberToUint32(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+
+ if (type->Is(Type::Unsigned32())) return type;
+ if (type->Is(cache_.kZeroish)) return cache_.kSingletonZero;
+ if (type->Is(unsigned32ish_)) {
+ return Type::Intersect(Type::Union(type, cache_.kSingletonZero, zone()),
+ Type::Unsigned32(), zone());
+ }
+ return Type::Unsigned32();
+}
+
+Type* OperationTyper::NumberSilenceNaN(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+ // TODO(jarin): This is a terrible hack; we definitely need a dedicated type
+ // for the hole (tagged and/or double). Otherwise if the input is the hole
+ // NaN constant, we'd just eliminate this node in JSTypedLowering.
+ if (type->Maybe(Type::NaN())) return Type::Number();
+ return type;
+}
+
+Type* OperationTyper::NumberAdd(Type* lhs, Type* rhs) {
+ DCHECK(lhs->Is(Type::Number()));
+ DCHECK(rhs->Is(Type::Number()));
+
+ if (!lhs->IsInhabited() || !rhs->IsInhabited()) {
+ return Type::None();
+ }
+
+ // Addition can return NaN if either input can be NaN or we try to compute
+ // the sum of two infinities of opposite sign.
+ bool maybe_nan = lhs->Maybe(Type::NaN()) || rhs->Maybe(Type::NaN());
+
+ // Addition can yield minus zero only if both inputs can be minus zero.
+ bool maybe_minuszero = true;
+ if (lhs->Maybe(Type::MinusZero())) {
+ lhs = Type::Union(lhs, cache_.kSingletonZero, zone());
+ } else {
+ maybe_minuszero = false;
+ }
+ if (rhs->Maybe(Type::MinusZero())) {
+ rhs = Type::Union(rhs, cache_.kSingletonZero, zone());
+ } else {
+ maybe_minuszero = false;
+ }
+
+ // We can give more precise types for integers.
+ Type* type = Type::None();
+ lhs = Type::Intersect(lhs, Type::PlainNumber(), zone());
+ rhs = Type::Intersect(rhs, Type::PlainNumber(), zone());
+ if (lhs->IsInhabited() && rhs->IsInhabited()) {
+ if (lhs->Is(cache_.kInteger) && rhs->Is(cache_.kInteger)) {
+ type = AddRanger(lhs->Min(), lhs->Max(), rhs->Min(), rhs->Max());
+ } else {
+ if ((lhs->Maybe(minus_infinity_) && rhs->Maybe(infinity_)) ||
+ (rhs->Maybe(minus_infinity_) && lhs->Maybe(infinity_))) {
+ maybe_nan = true;
+ }
+ type = Type::PlainNumber();
+ }
+ }
+
+ // Take into account the -0 and NaN information computed earlier.
+ if (maybe_minuszero) type = Type::Union(type, Type::MinusZero(), zone());
+ if (maybe_nan) type = Type::Union(type, Type::NaN(), zone());
+ return type;
+}
+
+Type* OperationTyper::NumberSubtract(Type* lhs, Type* rhs) {
+ DCHECK(lhs->Is(Type::Number()));
+ DCHECK(rhs->Is(Type::Number()));
+
+ if (!lhs->IsInhabited() || !rhs->IsInhabited()) {
+ return Type::None();
+ }
+
+ // Subtraction can return NaN if either input can be NaN or we try to
+ // compute the sum of two infinities of opposite sign.
+ bool maybe_nan = lhs->Maybe(Type::NaN()) || rhs->Maybe(Type::NaN());
+
+ // Subtraction can yield minus zero if {lhs} can be minus zero and {rhs}
+ // can be zero.
+ bool maybe_minuszero = false;
+ if (lhs->Maybe(Type::MinusZero())) {
+ lhs = Type::Union(lhs, cache_.kSingletonZero, zone());
+ maybe_minuszero = rhs->Maybe(cache_.kSingletonZero);
+ }
+ if (rhs->Maybe(Type::MinusZero())) {
+ rhs = Type::Union(rhs, cache_.kSingletonZero, zone());
+ }
+
+ // We can give more precise types for integers.
+ Type* type = Type::None();
+ lhs = Type::Intersect(lhs, Type::PlainNumber(), zone());
+ rhs = Type::Intersect(rhs, Type::PlainNumber(), zone());
+ if (lhs->IsInhabited() && rhs->IsInhabited()) {
+ if (lhs->Is(cache_.kInteger) && rhs->Is(cache_.kInteger)) {
+ type = SubtractRanger(lhs->Min(), lhs->Max(), rhs->Min(), rhs->Max());
+ } else {
+ if ((lhs->Maybe(infinity_) && rhs->Maybe(infinity_)) ||
+ (rhs->Maybe(minus_infinity_) && lhs->Maybe(minus_infinity_))) {
+ maybe_nan = true;
+ }
+ type = Type::PlainNumber();
+ }
+ }
+
+ // Take into account the -0 and NaN information computed earlier.
+ if (maybe_minuszero) type = Type::Union(type, Type::MinusZero(), zone());
+ if (maybe_nan) type = Type::Union(type, Type::NaN(), zone());
+ return type;
+}
+
+Type* OperationTyper::NumberMultiply(Type* lhs, Type* rhs) {
+ DCHECK(lhs->Is(Type::Number()));
+ DCHECK(rhs->Is(Type::Number()));
+
+ if (!lhs->IsInhabited() || !rhs->IsInhabited()) {
+ return Type::None();
+ }
+
+ lhs = Rangify(lhs);
+ rhs = Rangify(rhs);
+ if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
+ if (lhs->IsRange() && rhs->IsRange()) {
+ return MultiplyRanger(lhs, rhs);
+ }
+ return Type::Number();
+}
+
+Type* OperationTyper::NumberDivide(Type* lhs, Type* rhs) {
+ DCHECK(lhs->Is(Type::Number()));
+ DCHECK(rhs->Is(Type::Number()));
+
+ if (!lhs->IsInhabited() || !rhs->IsInhabited()) {
+ return Type::None();
+ }
+
+ if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
+ // Division is tricky, so all we do is try ruling out nan.
+ bool maybe_nan =
+ lhs->Maybe(Type::NaN()) || rhs->Maybe(cache_.kZeroish) ||
+ ((lhs->Min() == -V8_INFINITY || lhs->Max() == +V8_INFINITY) &&
+ (rhs->Min() == -V8_INFINITY || rhs->Max() == +V8_INFINITY));
+ return maybe_nan ? Type::Number() : Type::OrderedNumber();
+}
+
+Type* OperationTyper::NumberModulus(Type* lhs, Type* rhs) {
+ DCHECK(lhs->Is(Type::Number()));
+ DCHECK(rhs->Is(Type::Number()));
+
+ // Modulus can yield NaN if either {lhs} or {rhs} are NaN, or
+ // {lhs} is not finite, or the {rhs} is a zero value.
+ bool maybe_nan = lhs->Maybe(Type::NaN()) || rhs->Maybe(cache_.kZeroish) ||
+ lhs->Min() == -V8_INFINITY || lhs->Max() == +V8_INFINITY;
+
+ // Deal with -0 inputs, only the signbit of {lhs} matters for the result.
+ bool maybe_minuszero = false;
+ if (lhs->Maybe(Type::MinusZero())) {
+ maybe_minuszero = true;
+ lhs = Type::Union(lhs, cache_.kSingletonZero, zone());
+ }
+ if (rhs->Maybe(Type::MinusZero())) {
+ rhs = Type::Union(rhs, cache_.kSingletonZero, zone());
+ }
+
+ // Rule out NaN and -0, and check what we can do with the remaining type info.
+ Type* type = Type::None();
+ lhs = Type::Intersect(lhs, Type::PlainNumber(), zone());
+ rhs = Type::Intersect(rhs, Type::PlainNumber(), zone());
+
+ // We can only derive a meaningful type if both {lhs} and {rhs} are inhabited,
+ // and the {rhs} is not 0, otherwise the result is NaN independent of {lhs}.
+ if (lhs->IsInhabited() && !rhs->Is(cache_.kSingletonZero)) {
+ // Determine the bounds of {lhs} and {rhs}.
+ double const lmin = lhs->Min();
+ double const lmax = lhs->Max();
+ double const rmin = rhs->Min();
+ double const rmax = rhs->Max();
+
+ // The sign of the result is the sign of the {lhs}.
+ if (lmin < 0.0) maybe_minuszero = true;
+
+ // For integer inputs {lhs} and {rhs} we can infer a precise type.
+ if (lhs->Is(cache_.kInteger) && rhs->Is(cache_.kInteger)) {
+ double labs = std::max(std::abs(lmin), std::abs(lmax));
+ double rabs = std::max(std::abs(rmin), std::abs(rmax)) - 1;
+ double abs = std::min(labs, rabs);
+ double min = 0.0, max = 0.0;
+ if (lmin >= 0.0) {
+ // {lhs} positive.
+ min = 0.0;
+ max = abs;
+ } else if (lmax <= 0.0) {
+ // {lhs} negative.
+ min = 0.0 - abs;
+ max = 0.0;
+ } else {
+ // {lhs} positive or negative.
+ min = 0.0 - abs;
+ max = abs;
+ }
+ type = Type::Range(min, max, zone());
+ } else {
+ type = Type::PlainNumber();
+ }
+ }
+
+ // Take into account the -0 and NaN information computed earlier.
+ if (maybe_minuszero) type = Type::Union(type, Type::MinusZero(), zone());
+ if (maybe_nan) type = Type::Union(type, Type::NaN(), zone());
+ return type;
+}
+
+Type* OperationTyper::NumberBitwiseOr(Type* lhs, Type* rhs) {
+ DCHECK(lhs->Is(Type::Number()));
+ DCHECK(rhs->Is(Type::Number()));
+
+ if (!lhs->IsInhabited() || !rhs->IsInhabited()) return Type::None();
+
+ lhs = NumberToInt32(lhs);
+ rhs = NumberToInt32(rhs);
+
+ double lmin = lhs->Min();
+ double rmin = rhs->Min();
+ double lmax = lhs->Max();
+ double rmax = rhs->Max();
+ // Or-ing any two values results in a value no smaller than their minimum.
+ // Even no smaller than their maximum if both values are non-negative.
+ double min =
+ lmin >= 0 && rmin >= 0 ? std::max(lmin, rmin) : std::min(lmin, rmin);
+ double max = kMaxInt;
+
+ // Or-ing with 0 is essentially a conversion to int32.
+ if (rmin == 0 && rmax == 0) {
+ min = lmin;
+ max = lmax;
+ }
+ if (lmin == 0 && lmax == 0) {
+ min = rmin;
+ max = rmax;
+ }
+
+ if (lmax < 0 || rmax < 0) {
+ // Or-ing two values of which at least one is negative results in a negative
+ // value.
+ max = std::min(max, -1.0);
+ }
+ return Type::Range(min, max, zone());
+}
+
+Type* OperationTyper::NumberBitwiseAnd(Type* lhs, Type* rhs) {
+ DCHECK(lhs->Is(Type::Number()));
+ DCHECK(rhs->Is(Type::Number()));
+
+ if (!lhs->IsInhabited() || !rhs->IsInhabited()) return Type::None();
+
+ lhs = NumberToInt32(lhs);
+ rhs = NumberToInt32(rhs);
+
+ double lmin = lhs->Min();
+ double rmin = rhs->Min();
+ double lmax = lhs->Max();
+ double rmax = rhs->Max();
+ double min = kMinInt;
+ // And-ing any two values results in a value no larger than their maximum.
+ // Even no larger than their minimum if both values are non-negative.
+ double max =
+ lmin >= 0 && rmin >= 0 ? std::min(lmax, rmax) : std::max(lmax, rmax);
+ // And-ing with a non-negative value x causes the result to be between
+ // zero and x.
+ if (lmin >= 0) {
+ min = 0;
+ max = std::min(max, lmax);
+ }
+ if (rmin >= 0) {
+ min = 0;
+ max = std::min(max, rmax);
+ }
+ return Type::Range(min, max, zone());
+}
+
+Type* OperationTyper::NumberBitwiseXor(Type* lhs, Type* rhs) {
+ DCHECK(lhs->Is(Type::Number()));
+ DCHECK(rhs->Is(Type::Number()));
+
+ if (!lhs->IsInhabited() || !rhs->IsInhabited()) return Type::None();
+
+ lhs = NumberToInt32(lhs);
+ rhs = NumberToInt32(rhs);
+
+ double lmin = lhs->Min();
+ double rmin = rhs->Min();
+ double lmax = lhs->Max();
+ double rmax = rhs->Max();
+ if ((lmin >= 0 && rmin >= 0) || (lmax < 0 && rmax < 0)) {
+ // Xor-ing negative or non-negative values results in a non-negative value.
+ return Type::Unsigned31();
+ }
+ if ((lmax < 0 && rmin >= 0) || (lmin >= 0 && rmax < 0)) {
+ // Xor-ing a negative and a non-negative value results in a negative value.
+ // TODO(jarin) Use a range here.
+ return Type::Negative32();
+ }
+ return Type::Signed32();
+}
+
+Type* OperationTyper::NumberShiftLeft(Type* lhs, Type* rhs) {
+ DCHECK(lhs->Is(Type::Number()));
+ DCHECK(rhs->Is(Type::Number()));
+
+ // TODO(turbofan): Infer a better type here.
+ return Type::Signed32();
+}
+
+Type* OperationTyper::NumberShiftRight(Type* lhs, Type* rhs) {
+ DCHECK(lhs->Is(Type::Number()));
+ DCHECK(rhs->Is(Type::Number()));
+
+ if (!lhs->IsInhabited() || !rhs->IsInhabited()) return Type::None();
+
+ lhs = NumberToInt32(lhs);
+ rhs = NumberToUint32(rhs);
+
+ double min = kMinInt;
+ double max = kMaxInt;
+ if (lhs->Min() >= 0) {
+ // Right-shifting a non-negative value cannot make it negative, nor larger.
+ min = std::max(min, 0.0);
+ max = std::min(max, lhs->Max());
+ if (rhs->Min() > 0 && rhs->Max() <= 31) {
+ max = static_cast<int>(max) >> static_cast<int>(rhs->Min());
+ }
+ }
+ if (lhs->Max() < 0) {
+ // Right-shifting a negative value cannot make it non-negative, nor smaller.
+ min = std::max(min, lhs->Min());
+ max = std::min(max, -1.0);
+ if (rhs->Min() > 0 && rhs->Max() <= 31) {
+ min = static_cast<int>(min) >> static_cast<int>(rhs->Min());
+ }
+ }
+ if (rhs->Min() > 0 && rhs->Max() <= 31) {
+ // Right-shifting by a positive value yields a small integer value.
+ double shift_min = kMinInt >> static_cast<int>(rhs->Min());
+ double shift_max = kMaxInt >> static_cast<int>(rhs->Min());
+ min = std::max(min, shift_min);
+ max = std::min(max, shift_max);
+ }
+ // TODO(jarin) Ideally, the following micro-optimization should be performed
+ // by the type constructor.
+ if (max == kMaxInt && min == kMinInt) return Type::Signed32();
+ return Type::Range(min, max, zone());
+}
+
+Type* OperationTyper::NumberShiftRightLogical(Type* lhs, Type* rhs) {
+ DCHECK(lhs->Is(Type::Number()));
+ DCHECK(rhs->Is(Type::Number()));
+
+ if (!lhs->IsInhabited()) return Type::None();
+
+ lhs = NumberToUint32(lhs);
+
+ // Logical right-shifting any value cannot make it larger.
+ return Type::Range(0.0, lhs->Max(), zone());
+}
+
+Type* OperationTyper::NumberAtan2(Type* lhs, Type* rhs) {
+ DCHECK(lhs->Is(Type::Number()));
+ DCHECK(rhs->Is(Type::Number()));
+ return Type::Number();
+}
+
+Type* OperationTyper::NumberImul(Type* lhs, Type* rhs) {
+ DCHECK(lhs->Is(Type::Number()));
+ DCHECK(rhs->Is(Type::Number()));
+ // TODO(turbofan): We should be able to do better here.
+ return Type::Signed32();
+}
+
+Type* OperationTyper::NumberMax(Type* lhs, Type* rhs) {
+ DCHECK(lhs->Is(Type::Number()));
+ DCHECK(rhs->Is(Type::Number()));
+ if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) {
+ return Type::NaN();
+ }
+ Type* type = Type::None();
+ // TODO(turbofan): Improve minus zero handling here.
+ if (lhs->Maybe(Type::NaN()) || rhs->Maybe(Type::NaN())) {
+ type = Type::Union(type, Type::NaN(), zone());
+ }
+ lhs = Type::Intersect(lhs, Type::OrderedNumber(), zone());
+ rhs = Type::Intersect(rhs, Type::OrderedNumber(), zone());
+ if (lhs->Is(cache_.kInteger) && rhs->Is(cache_.kInteger)) {
+ double max = std::max(lhs->Max(), rhs->Max());
+ double min = std::max(lhs->Min(), rhs->Min());
+ type = Type::Union(type, Type::Range(min, max, zone()), zone());
+ } else {
+ type = Type::Union(type, Type::Union(lhs, rhs, zone()), zone());
+ }
+ return type;
+}
+
+Type* OperationTyper::NumberMin(Type* lhs, Type* rhs) {
+ DCHECK(lhs->Is(Type::Number()));
+ DCHECK(rhs->Is(Type::Number()));
+ if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) {
+ return Type::NaN();
+ }
+ Type* type = Type::None();
+ // TODO(turbofan): Improve minus zero handling here.
+ if (lhs->Maybe(Type::NaN()) || rhs->Maybe(Type::NaN())) {
+ type = Type::Union(type, Type::NaN(), zone());
+ }
+ lhs = Type::Intersect(lhs, Type::OrderedNumber(), zone());
+ rhs = Type::Intersect(rhs, Type::OrderedNumber(), zone());
+ if (lhs->Is(cache_.kInteger) && rhs->Is(cache_.kInteger)) {
+ double max = std::min(lhs->Max(), rhs->Max());
+ double min = std::min(lhs->Min(), rhs->Min());
+ type = Type::Union(type, Type::Range(min, max, zone()), zone());
+ } else {
+ type = Type::Union(type, Type::Union(lhs, rhs, zone()), zone());
+ }
+ return type;
+}
+
+Type* OperationTyper::NumberPow(Type* lhs, Type* rhs) {
+ DCHECK(lhs->Is(Type::Number()));
+ DCHECK(rhs->Is(Type::Number()));
+ // TODO(turbofan): We should be able to do better here.
+ return Type::Number();
+}
+
+#define SPECULATIVE_NUMBER_BINOP(Name) \
+ Type* OperationTyper::Speculative##Name(Type* lhs, Type* rhs) { \
+ lhs = ToNumber(Type::Intersect(lhs, Type::NumberOrOddball(), zone())); \
+ rhs = ToNumber(Type::Intersect(rhs, Type::NumberOrOddball(), zone())); \
+ return Name(lhs, rhs); \
+ }
+SPECULATIVE_NUMBER_BINOP(NumberAdd)
+SPECULATIVE_NUMBER_BINOP(NumberSubtract)
+SPECULATIVE_NUMBER_BINOP(NumberMultiply)
+SPECULATIVE_NUMBER_BINOP(NumberDivide)
+SPECULATIVE_NUMBER_BINOP(NumberModulus)
+SPECULATIVE_NUMBER_BINOP(NumberBitwiseOr)
+SPECULATIVE_NUMBER_BINOP(NumberBitwiseAnd)
+SPECULATIVE_NUMBER_BINOP(NumberBitwiseXor)
+SPECULATIVE_NUMBER_BINOP(NumberShiftLeft)
+SPECULATIVE_NUMBER_BINOP(NumberShiftRight)
+SPECULATIVE_NUMBER_BINOP(NumberShiftRightLogical)
+#undef SPECULATIVE_NUMBER_BINOP
+
+Type* OperationTyper::ToPrimitive(Type* type) {
+ if (type->Is(Type::Primitive()) && !type->Maybe(Type::Receiver())) {
+ return type;
+ }
+ return Type::Primitive();
+}
+
+Type* OperationTyper::Invert(Type* type) {
+ DCHECK(type->Is(Type::Boolean()));
+ DCHECK(type->IsInhabited());
+ if (type->Is(singleton_false())) return singleton_true();
+ if (type->Is(singleton_true())) return singleton_false();
+ return type;
+}
+
+OperationTyper::ComparisonOutcome OperationTyper::Invert(
+ ComparisonOutcome outcome) {
+ ComparisonOutcome result(0);
+ if ((outcome & kComparisonUndefined) != 0) result |= kComparisonUndefined;
+ if ((outcome & kComparisonTrue) != 0) result |= kComparisonFalse;
+ if ((outcome & kComparisonFalse) != 0) result |= kComparisonTrue;
+ return result;
+}
+
+Type* OperationTyper::FalsifyUndefined(ComparisonOutcome outcome) {
+ if ((outcome & kComparisonFalse) != 0 ||
+ (outcome & kComparisonUndefined) != 0) {
+ return (outcome & kComparisonTrue) != 0 ? Type::Boolean()
+ : singleton_false();
+ }
+ // Type should be non empty, so we know it should be true.
+ DCHECK((outcome & kComparisonTrue) != 0);
+ return singleton_true();
+}
+
+Type* OperationTyper::TypeTypeGuard(const Operator* sigma_op, Type* input) {
+ return Type::Intersect(input, TypeGuardTypeOf(sigma_op), zone());
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/operation-typer.h b/deps/v8/src/compiler/operation-typer.h
new file mode 100644
index 0000000000..dcfe0c45ea
--- /dev/null
+++ b/deps/v8/src/compiler/operation-typer.h
@@ -0,0 +1,92 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_OPERATION_TYPER_H_
+#define V8_COMPILER_OPERATION_TYPER_H_
+
+#include "src/base/flags.h"
+#include "src/compiler/opcodes.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+class RangeType;
+class Type;
+class TypeCache;
+class Zone;
+
+namespace compiler {
+
+class Operator;
+
+class OperationTyper {
+ public:
+ OperationTyper(Isolate* isolate, Zone* zone);
+
+ // Typing Phi.
+ Type* Merge(Type* left, Type* right);
+
+ Type* ToPrimitive(Type* type);
+
+ // Helpers for number operation typing.
+ Type* ToNumber(Type* type);
+ Type* WeakenRange(Type* current_range, Type* previous_range);
+
+// Number unary operators.
+#define DECLARE_METHOD(Name) Type* Name(Type* type);
+ SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_METHOD)
+#undef DECLARE_METHOD
+
+// Number binary operators.
+#define DECLARE_METHOD(Name) Type* Name(Type* lhs, Type* rhs);
+ SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_METHOD)
+#undef DECLARE_METHOD
+
+ Type* TypeTypeGuard(const Operator* sigma_op, Type* input);
+
+ enum ComparisonOutcomeFlags {
+ kComparisonTrue = 1,
+ kComparisonFalse = 2,
+ kComparisonUndefined = 4
+ };
+
+ Type* singleton_false() const { return singleton_false_; }
+ Type* singleton_true() const { return singleton_true_; }
+ Type* singleton_the_hole() const { return singleton_the_hole_; }
+
+ private:
+ typedef base::Flags<ComparisonOutcomeFlags> ComparisonOutcome;
+
+ ComparisonOutcome Invert(ComparisonOutcome);
+ Type* Invert(Type*);
+ Type* FalsifyUndefined(ComparisonOutcome);
+
+ Type* Rangify(Type*);
+ Type* AddRanger(double lhs_min, double lhs_max, double rhs_min,
+ double rhs_max);
+ Type* SubtractRanger(double lhs_min, double lhs_max, double rhs_min,
+ double rhs_max);
+ Type* MultiplyRanger(Type* lhs, Type* rhs);
+
+ Zone* zone() const { return zone_; }
+
+ Zone* const zone_;
+ TypeCache const& cache_;
+
+ Type* infinity_;
+ Type* minus_infinity_;
+ Type* singleton_false_;
+ Type* singleton_true_;
+ Type* singleton_the_hole_;
+ Type* signed32ish_;
+ Type* unsigned32ish_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_OPERATION_TYPER_H_
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index 7f38ca7299..68d884d62d 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -20,29 +20,45 @@ bool OperatorProperties::HasContextInput(const Operator* op) {
// static
-int OperatorProperties::GetFrameStateInputCount(const Operator* op) {
+bool OperatorProperties::HasFrameStateInput(const Operator* op) {
switch (op->opcode()) {
+ case IrOpcode::kCheckpoint:
case IrOpcode::kFrameState:
- return 1;
+ return true;
case IrOpcode::kJSCallRuntime: {
const CallRuntimeParameters& p = CallRuntimeParametersOf(op);
- return Linkage::FrameStateInputCount(p.id());
+ return Linkage::NeedsFrameStateInput(p.id());
}
// Strict equality cannot lazily deoptimize.
case IrOpcode::kJSStrictEqual:
case IrOpcode::kJSStrictNotEqual:
- return 0;
+ return false;
- // We record the frame state immediately before and immediately after every
- // construct/function call.
- case IrOpcode::kJSCallConstruct:
- case IrOpcode::kJSCallFunction:
- return 2;
+ // Binary operations
+ case IrOpcode::kJSAdd:
+ case IrOpcode::kJSSubtract:
+ case IrOpcode::kJSMultiply:
+ case IrOpcode::kJSDivide:
+ case IrOpcode::kJSModulus:
+
+ // Bitwise operations
+ case IrOpcode::kJSBitwiseOr:
+ case IrOpcode::kJSBitwiseXor:
+ case IrOpcode::kJSBitwiseAnd:
+
+ // Shift operations
+ case IrOpcode::kJSShiftLeft:
+ case IrOpcode::kJSShiftRight:
+ case IrOpcode::kJSShiftRightLogical:
// Compare operations
case IrOpcode::kJSEqual:
case IrOpcode::kJSNotEqual:
+ case IrOpcode::kJSGreaterThan:
+ case IrOpcode::kJSGreaterThanOrEqual:
+ case IrOpcode::kJSLessThan:
+ case IrOpcode::kJSLessThanOrEqual:
case IrOpcode::kJSHasProperty:
case IrOpcode::kJSInstanceOf:
@@ -54,6 +70,15 @@ int OperatorProperties::GetFrameStateInputCount(const Operator* op) {
case IrOpcode::kJSCreateLiteralObject:
case IrOpcode::kJSCreateLiteralRegExp:
+ // Property access operations
+ case IrOpcode::kJSLoadNamed:
+ case IrOpcode::kJSStoreNamed:
+ case IrOpcode::kJSLoadProperty:
+ case IrOpcode::kJSStoreProperty:
+ case IrOpcode::kJSLoadGlobal:
+ case IrOpcode::kJSStoreGlobal:
+ case IrOpcode::kJSDeleteProperty:
+
// Context operations
case IrOpcode::kJSCreateScriptContext:
@@ -65,51 +90,19 @@ int OperatorProperties::GetFrameStateInputCount(const Operator* op) {
case IrOpcode::kJSToObject:
case IrOpcode::kJSToString:
+ // Call operations
+ case IrOpcode::kJSCallConstruct:
+ case IrOpcode::kJSCallFunction:
+
// Misc operations
case IrOpcode::kJSConvertReceiver:
case IrOpcode::kJSForInNext:
case IrOpcode::kJSForInPrepare:
case IrOpcode::kJSStackCheck:
- case IrOpcode::kJSDeleteProperty:
- return 1;
-
- // We record the frame state immediately before and immediately after
- // every property or global variable access.
- case IrOpcode::kJSLoadNamed:
- case IrOpcode::kJSStoreNamed:
- case IrOpcode::kJSLoadProperty:
- case IrOpcode::kJSStoreProperty:
- case IrOpcode::kJSLoadGlobal:
- case IrOpcode::kJSStoreGlobal:
- return 2;
-
- // Binary operators that can deopt in the middle the operation (e.g.,
- // as a result of lazy deopt in ToNumber conversion) need a second frame
- // state so that we can resume before the operation.
- case IrOpcode::kJSMultiply:
- case IrOpcode::kJSAdd:
- case IrOpcode::kJSBitwiseAnd:
- case IrOpcode::kJSBitwiseOr:
- case IrOpcode::kJSBitwiseXor:
- case IrOpcode::kJSDivide:
- case IrOpcode::kJSModulus:
- case IrOpcode::kJSShiftLeft:
- case IrOpcode::kJSShiftRight:
- case IrOpcode::kJSShiftRightLogical:
- case IrOpcode::kJSSubtract:
- return 2;
-
- // Compare operators that can deopt in the middle the operation (e.g.,
- // as a result of lazy deopt in ToNumber conversion) need a second frame
- // state so that we can resume before the operation.
- case IrOpcode::kJSGreaterThan:
- case IrOpcode::kJSGreaterThanOrEqual:
- case IrOpcode::kJSLessThan:
- case IrOpcode::kJSLessThanOrEqual:
- return 2;
+ return true;
default:
- return 0;
+ return false;
}
}
diff --git a/deps/v8/src/compiler/operator-properties.h b/deps/v8/src/compiler/operator-properties.h
index e7ecd931ea..4fe5f59d74 100644
--- a/deps/v8/src/compiler/operator-properties.h
+++ b/deps/v8/src/compiler/operator-properties.h
@@ -14,14 +14,17 @@ namespace compiler {
// Forward declarations.
class Operator;
-
class OperatorProperties final {
public:
static bool HasContextInput(const Operator* op);
static int GetContextInputCount(const Operator* op) {
return HasContextInput(op) ? 1 : 0;
}
- static int GetFrameStateInputCount(const Operator* op);
+
+ static bool HasFrameStateInput(const Operator* op);
+ static int GetFrameStateInputCount(const Operator* op) {
+ return HasFrameStateInput(op) ? 1 : 0;
+ }
static int GetTotalInputCount(const Operator* op);
diff --git a/deps/v8/src/compiler/operator.cc b/deps/v8/src/compiler/operator.cc
index ae10348422..fa1b2d89ca 100644
--- a/deps/v8/src/compiler/operator.cc
+++ b/deps/v8/src/compiler/operator.cc
@@ -44,8 +44,22 @@ std::ostream& operator<<(std::ostream& os, const Operator& op) {
return os;
}
+void Operator::PrintToImpl(std::ostream& os, PrintVerbosity verbose) const {
+ os << mnemonic();
+}
+
+void Operator::PrintPropsTo(std::ostream& os) const {
+ std::string separator = "";
-void Operator::PrintTo(std::ostream& os) const { os << mnemonic(); }
+#define PRINT_PROP_IF_SET(name) \
+ if (HasProperty(Operator::k##name)) { \
+ os << separator; \
+ os << #name; \
+ separator = ", "; \
+ }
+ OPERATOR_PROPERTY_LIST(PRINT_PROP_IF_SET)
+#undef PRINT_PROP_IF_SET
+}
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/operator.h b/deps/v8/src/compiler/operator.h
index fa85d599cd..b6ec2c618c 100644
--- a/deps/v8/src/compiler/operator.h
+++ b/deps/v8/src/compiler/operator.h
@@ -36,20 +36,27 @@ class Operator : public ZoneObject {
// transformations for nodes that have this operator.
enum Property {
kNoProperties = 0,
- kReducible = 1 << 0, // Participates in strength reduction.
- kCommutative = 1 << 1, // OP(a, b) == OP(b, a) for all inputs.
- kAssociative = 1 << 2, // OP(a, OP(b,c)) == OP(OP(a,b), c) for all inputs.
- kIdempotent = 1 << 3, // OP(a); OP(a) == OP(a).
- kNoRead = 1 << 4, // Has no scheduling dependency on Effects
- kNoWrite = 1 << 5, // Does not modify any Effects and thereby
+ kCommutative = 1 << 0, // OP(a, b) == OP(b, a) for all inputs.
+ kAssociative = 1 << 1, // OP(a, OP(b,c)) == OP(OP(a,b), c) for all inputs.
+ kIdempotent = 1 << 2, // OP(a); OP(a) == OP(a).
+ kNoRead = 1 << 3, // Has no scheduling dependency on Effects
+ kNoWrite = 1 << 4, // Does not modify any Effects and thereby
// create new scheduling dependencies.
- kNoThrow = 1 << 6, // Can never generate an exception.
+ kNoThrow = 1 << 5, // Can never generate an exception.
+ kNoDeopt = 1 << 6, // Can never generate an eager deoptimization exit.
kFoldable = kNoRead | kNoWrite,
- kKontrol = kFoldable | kNoThrow,
- kEliminatable = kNoWrite | kNoThrow,
- kPure = kNoRead | kNoWrite | kNoThrow | kIdempotent
+ kKontrol = kNoDeopt | kFoldable | kNoThrow,
+ kEliminatable = kNoDeopt | kNoWrite | kNoThrow,
+ kPure = kNoDeopt | kNoRead | kNoWrite | kNoThrow | kIdempotent
};
+
+// List of all bits, for the visualizer.
+#define OPERATOR_PROPERTY_LIST(V) \
+ V(Commutative) \
+ V(Associative) V(Idempotent) V(NoRead) V(NoWrite) V(NoThrow) V(NoDeopt)
+
typedef base::Flags<Property, uint8_t> Properties;
+ enum class PrintVerbosity { kVerbose, kSilent };
// Constructor.
Operator(Opcode opcode, Properties properties, const char* mnemonic,
@@ -111,11 +118,20 @@ class Operator : public ZoneObject {
}
// TODO(titzer): API for input and output types, for typechecking graph.
- protected:
+
// Print the full operator into the given stream, including any
// static parameters. Useful for debugging and visualizing the IR.
- virtual void PrintTo(std::ostream& os) const;
- friend std::ostream& operator<<(std::ostream& os, const Operator& op);
+ void PrintTo(std::ostream& os,
+ PrintVerbosity verbose = PrintVerbosity::kVerbose) const {
+ // We cannot make PrintTo virtual, because default arguments to virtual
+ // methods are banned in the style guide.
+ return PrintToImpl(os, verbose);
+ }
+
+ void PrintPropsTo(std::ostream& os) const;
+
+ protected:
+ virtual void PrintToImpl(std::ostream& os, PrintVerbosity verbose) const;
private:
Opcode opcode_;
@@ -172,14 +188,19 @@ class Operator1 : public Operator {
size_t HashCode() const final {
return base::hash_combine(this->opcode(), this->hash_(this->parameter()));
}
- virtual void PrintParameter(std::ostream& os) const {
- os << "[" << this->parameter() << "]";
+ // For most parameter types, we have only a verbose way to print them, namely
+ // ostream << parameter. But for some types it is particularly useful to have
+ // a shorter way to print them for the node labels in Turbolizer. The
+ // following method can be overridden to provide a concise and a verbose
+ // printing of a parameter.
+
+ virtual void PrintParameter(std::ostream& os, PrintVerbosity verbose) const {
+ os << "[" << parameter() << "]";
}
- protected:
- void PrintTo(std::ostream& os) const final {
+ virtual void PrintToImpl(std::ostream& os, PrintVerbosity verbose) const {
os << mnemonic();
- PrintParameter(os);
+ PrintParameter(os, verbose);
}
private:
diff --git a/deps/v8/src/compiler/osr.cc b/deps/v8/src/compiler/osr.cc
index 55431c201c..187e61230c 100644
--- a/deps/v8/src/compiler/osr.cc
+++ b/deps/v8/src/compiler/osr.cc
@@ -24,10 +24,16 @@ namespace internal {
namespace compiler {
OsrHelper::OsrHelper(CompilationInfo* info)
- : parameter_count_(info->scope()->num_parameters()),
- stack_slot_count_(info->scope()->num_stack_slots() +
- info->osr_expr_stack_height()) {}
-
+ : parameter_count_(
+ info->is_optimizing_from_bytecode()
+ ? info->shared_info()->bytecode_array()->parameter_count()
+ : info->scope()->num_parameters()),
+ stack_slot_count_(
+ info->is_optimizing_from_bytecode()
+ ? info->shared_info()->bytecode_array()->register_count() +
+ InterpreterFrameConstants::kExtraSlotCount
+ : info->scope()->num_stack_slots() +
+ info->osr_expr_stack_height()) {}
#ifdef DEBUG
#define TRACE_COND (FLAG_trace_turbo_graph && FLAG_trace_osr)
@@ -78,8 +84,8 @@ static void PeelOuterLoopsForOsr(Graph* graph, CommonOperatorBuilder* common,
}
// Copy all nodes.
- for (size_t i = 0; i < all.live.size(); i++) {
- Node* orig = all.live[i];
+ for (size_t i = 0; i < all.reachable.size(); i++) {
+ Node* orig = all.reachable[i];
Node* copy = mapping->at(orig->id());
if (copy != sentinel) {
// Mapping already exists.
@@ -107,7 +113,7 @@ static void PeelOuterLoopsForOsr(Graph* graph, CommonOperatorBuilder* common,
}
// Fix missing inputs.
- for (Node* orig : all.live) {
+ for (Node* orig : all.reachable) {
Node* copy = mapping->at(orig->id());
for (int j = 0; j < copy->InputCount(); j++) {
if (copy->InputAt(j) == sentinel) {
diff --git a/deps/v8/src/compiler/pipeline-statistics.cc b/deps/v8/src/compiler/pipeline-statistics.cc
index b98f837ee9..5b97abe5eb 100644
--- a/deps/v8/src/compiler/pipeline-statistics.cc
+++ b/deps/v8/src/compiler/pipeline-statistics.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <memory>
+
#include "src/compiler.h"
#include "src/compiler/pipeline-statistics.h"
#include "src/compiler/zone-pool.h"
@@ -12,8 +14,8 @@ namespace compiler {
void PipelineStatistics::CommonStats::Begin(
PipelineStatistics* pipeline_stats) {
- DCHECK(scope_.is_empty());
- scope_.Reset(new ZonePool::StatsScope(pipeline_stats->zone_pool_));
+ DCHECK(!scope_);
+ scope_.reset(new ZonePool::StatsScope(pipeline_stats->zone_pool_));
timer_.Start();
outer_zone_initial_size_ = pipeline_stats->OuterZoneSize();
allocated_bytes_at_start_ =
@@ -26,7 +28,7 @@ void PipelineStatistics::CommonStats::Begin(
void PipelineStatistics::CommonStats::End(
PipelineStatistics* pipeline_stats,
CompilationStatistics::BasicStats* diff) {
- DCHECK(!scope_.is_empty());
+ DCHECK(scope_);
diff->function_name_ = pipeline_stats->function_name_;
diff->delta_ = timer_.Elapsed();
size_t outer_zone_diff =
@@ -36,7 +38,7 @@ void PipelineStatistics::CommonStats::End(
diff->max_allocated_bytes_ + allocated_bytes_at_start_;
diff->total_allocated_bytes_ =
outer_zone_diff + scope_->GetTotalAllocatedBytes();
- scope_.Reset(nullptr);
+ scope_.reset();
timer_.Stop();
}
@@ -52,7 +54,7 @@ PipelineStatistics::PipelineStatistics(CompilationInfo* info,
phase_name_(nullptr) {
if (info->has_shared_info()) {
source_size_ = static_cast<size_t>(info->shared_info()->SourceSize());
- base::SmartArrayPointer<char> name =
+ std::unique_ptr<char[]> name =
info->shared_info()->DebugName()->ToCString();
function_name_ = name.get();
}
diff --git a/deps/v8/src/compiler/pipeline-statistics.h b/deps/v8/src/compiler/pipeline-statistics.h
index 2b6563da40..a9931ebed7 100644
--- a/deps/v8/src/compiler/pipeline-statistics.h
+++ b/deps/v8/src/compiler/pipeline-statistics.h
@@ -5,8 +5,10 @@
#ifndef V8_COMPILER_PIPELINE_STATISTICS_H_
#define V8_COMPILER_PIPELINE_STATISTICS_H_
+#include <memory>
#include <string>
+#include "src/base/platform/elapsed-timer.h"
#include "src/compilation-statistics.h"
#include "src/compiler/zone-pool.h"
@@ -22,6 +24,7 @@ class PipelineStatistics : public Malloced {
~PipelineStatistics();
void BeginPhaseKind(const char* phase_kind_name);
+ void EndPhaseKind();
private:
size_t OuterZoneSize() {
@@ -36,17 +39,19 @@ class PipelineStatistics : public Malloced {
void End(PipelineStatistics* pipeline_stats,
CompilationStatistics::BasicStats* diff);
- base::SmartPointer<ZonePool::StatsScope> scope_;
+ std::unique_ptr<ZonePool::StatsScope> scope_;
base::ElapsedTimer timer_;
size_t outer_zone_initial_size_;
size_t allocated_bytes_at_start_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(CommonStats);
};
- bool InPhaseKind() { return !phase_kind_stats_.scope_.is_empty(); }
- void EndPhaseKind();
+ bool InPhaseKind() { return !!phase_kind_stats_.scope_; }
friend class PhaseScope;
- bool InPhase() { return !phase_stats_.scope_.is_empty(); }
+ bool InPhase() { return !!phase_stats_.scope_; }
void BeginPhase(const char* name);
void EndPhase();
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 1d7e967cc7..ba7aa96085 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -5,6 +5,7 @@
#include "src/compiler/pipeline.h"
#include <fstream> // NOLINT(readability/streams)
+#include <memory>
#include <sstream>
#include "src/base/adapters.h"
@@ -14,20 +15,20 @@
#include "src/compiler/basic-block-instrumentor.h"
#include "src/compiler/branch-elimination.h"
#include "src/compiler/bytecode-graph-builder.h"
-#include "src/compiler/change-lowering.h"
+#include "src/compiler/checkpoint-elimination.h"
#include "src/compiler/code-generator.h"
#include "src/compiler/common-operator-reducer.h"
#include "src/compiler/control-flow-optimizer.h"
#include "src/compiler/dead-code-elimination.h"
-#include "src/compiler/escape-analysis.h"
+#include "src/compiler/effect-control-linearizer.h"
#include "src/compiler/escape-analysis-reducer.h"
+#include "src/compiler/escape-analysis.h"
#include "src/compiler/frame-elider.h"
#include "src/compiler/graph-replay.h"
#include "src/compiler/graph-trimmer.h"
#include "src/compiler/graph-visualizer.h"
-#include "src/compiler/greedy-allocator.h"
-#include "src/compiler/instruction.h"
#include "src/compiler/instruction-selector.h"
+#include "src/compiler/instruction.h"
#include "src/compiler/js-builtin-reducer.h"
#include "src/compiler/js-call-reducer.h"
#include "src/compiler/js-context-specialization.h"
@@ -44,25 +45,31 @@
#include "src/compiler/load-elimination.h"
#include "src/compiler/loop-analysis.h"
#include "src/compiler/loop-peeling.h"
+#include "src/compiler/loop-variable-optimizer.h"
#include "src/compiler/machine-operator-reducer.h"
+#include "src/compiler/memory-optimizer.h"
#include "src/compiler/move-optimizer.h"
#include "src/compiler/osr.h"
#include "src/compiler/pipeline-statistics.h"
-#include "src/compiler/register-allocator.h"
+#include "src/compiler/redundancy-elimination.h"
#include "src/compiler/register-allocator-verifier.h"
+#include "src/compiler/register-allocator.h"
#include "src/compiler/schedule.h"
#include "src/compiler/scheduler.h"
#include "src/compiler/select-lowering.h"
#include "src/compiler/simplified-lowering.h"
-#include "src/compiler/simplified-operator.h"
#include "src/compiler/simplified-operator-reducer.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/compiler/store-store-elimination.h"
#include "src/compiler/tail-call-optimization.h"
#include "src/compiler/type-hint-analyzer.h"
#include "src/compiler/typer.h"
#include "src/compiler/value-numbering-reducer.h"
#include "src/compiler/verifier.h"
#include "src/compiler/zone-pool.h"
+#include "src/isolate-inl.h"
#include "src/ostreams.h"
+#include "src/parsing/parse-info.h"
#include "src/register-configuration.h"
#include "src/type-info.h"
#include "src/utils.h"
@@ -78,97 +85,74 @@ class PipelineData {
PipelineStatistics* pipeline_statistics)
: isolate_(info->isolate()),
info_(info),
+ debug_name_(info_->GetDebugName()),
outer_zone_(info_->zone()),
zone_pool_(zone_pool),
pipeline_statistics_(pipeline_statistics),
- compilation_failed_(false),
- code_(Handle<Code>::null()),
graph_zone_scope_(zone_pool_),
graph_zone_(graph_zone_scope_.zone()),
- graph_(nullptr),
- loop_assignment_(nullptr),
- simplified_(nullptr),
- machine_(nullptr),
- common_(nullptr),
- javascript_(nullptr),
- jsgraph_(nullptr),
- schedule_(nullptr),
instruction_zone_scope_(zone_pool_),
instruction_zone_(instruction_zone_scope_.zone()),
- sequence_(nullptr),
- frame_(nullptr),
register_allocation_zone_scope_(zone_pool_),
- register_allocation_zone_(register_allocation_zone_scope_.zone()),
- register_allocation_data_(nullptr) {
+ register_allocation_zone_(register_allocation_zone_scope_.zone()) {
PhaseScope scope(pipeline_statistics, "init pipeline data");
graph_ = new (graph_zone_) Graph(graph_zone_);
- source_positions_.Reset(new SourcePositionTable(graph_));
+ source_positions_ = new (graph_zone_) SourcePositionTable(graph_);
simplified_ = new (graph_zone_) SimplifiedOperatorBuilder(graph_zone_);
machine_ = new (graph_zone_) MachineOperatorBuilder(
graph_zone_, MachineType::PointerRepresentation(),
- InstructionSelector::SupportedMachineOperatorFlags());
+ InstructionSelector::SupportedMachineOperatorFlags(),
+ InstructionSelector::AlignmentRequirements());
common_ = new (graph_zone_) CommonOperatorBuilder(graph_zone_);
javascript_ = new (graph_zone_) JSOperatorBuilder(graph_zone_);
jsgraph_ = new (graph_zone_)
JSGraph(isolate_, graph_, common_, javascript_, simplified_, machine_);
}
+ // For WASM compile entry point.
+ PipelineData(ZonePool* zone_pool, CompilationInfo* info, Graph* graph,
+ SourcePositionTable* source_positions)
+ : isolate_(info->isolate()),
+ info_(info),
+ debug_name_(info_->GetDebugName()),
+ zone_pool_(zone_pool),
+ graph_zone_scope_(zone_pool_),
+ graph_(graph),
+ source_positions_(source_positions),
+ instruction_zone_scope_(zone_pool_),
+ instruction_zone_(instruction_zone_scope_.zone()),
+ register_allocation_zone_scope_(zone_pool_),
+ register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
+
// For machine graph testing entry point.
PipelineData(ZonePool* zone_pool, CompilationInfo* info, Graph* graph,
Schedule* schedule)
: isolate_(info->isolate()),
info_(info),
- outer_zone_(nullptr),
+ debug_name_(info_->GetDebugName()),
zone_pool_(zone_pool),
- pipeline_statistics_(nullptr),
- compilation_failed_(false),
- code_(Handle<Code>::null()),
graph_zone_scope_(zone_pool_),
- graph_zone_(nullptr),
graph_(graph),
- source_positions_(new SourcePositionTable(graph_)),
- loop_assignment_(nullptr),
- simplified_(nullptr),
- machine_(nullptr),
- common_(nullptr),
- javascript_(nullptr),
- jsgraph_(nullptr),
+ source_positions_(new (info->zone()) SourcePositionTable(graph_)),
schedule_(schedule),
instruction_zone_scope_(zone_pool_),
instruction_zone_(instruction_zone_scope_.zone()),
- sequence_(nullptr),
- frame_(nullptr),
register_allocation_zone_scope_(zone_pool_),
- register_allocation_zone_(register_allocation_zone_scope_.zone()),
- register_allocation_data_(nullptr) {}
+ register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
// For register allocation testing entry point.
PipelineData(ZonePool* zone_pool, CompilationInfo* info,
InstructionSequence* sequence)
: isolate_(info->isolate()),
info_(info),
- outer_zone_(nullptr),
+ debug_name_(info_->GetDebugName()),
zone_pool_(zone_pool),
- pipeline_statistics_(nullptr),
- compilation_failed_(false),
- code_(Handle<Code>::null()),
graph_zone_scope_(zone_pool_),
- graph_zone_(nullptr),
- graph_(nullptr),
- loop_assignment_(nullptr),
- simplified_(nullptr),
- machine_(nullptr),
- common_(nullptr),
- javascript_(nullptr),
- jsgraph_(nullptr),
- schedule_(nullptr),
instruction_zone_scope_(zone_pool_),
instruction_zone_(sequence->zone()),
sequence_(sequence),
- frame_(nullptr),
register_allocation_zone_scope_(zone_pool_),
- register_allocation_zone_(register_allocation_zone_scope_.zone()),
- register_allocation_data_(nullptr) {}
+ register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
~PipelineData() {
DeleteRegisterAllocationZone();
@@ -193,9 +177,7 @@ class PipelineData {
Zone* graph_zone() const { return graph_zone_; }
Graph* graph() const { return graph_; }
- SourcePositionTable* source_positions() const {
- return source_positions_.get();
- }
+ SourcePositionTable* source_positions() const { return source_positions_; }
MachineOperatorBuilder* machine() const { return machine_; }
CommonOperatorBuilder* common() const { return common_; }
JSOperatorBuilder* javascript() const { return javascript_; }
@@ -224,6 +206,7 @@ class PipelineData {
DCHECK(!schedule_);
schedule_ = schedule;
}
+ void reset_schedule() { schedule_ = nullptr; }
Zone* instruction_zone() const { return instruction_zone_; }
InstructionSequence* sequence() const { return sequence_; }
@@ -234,14 +217,24 @@ class PipelineData {
return register_allocation_data_;
}
+ BasicBlockProfiler::Data* profiler_data() const { return profiler_data_; }
+ void set_profiler_data(BasicBlockProfiler::Data* profiler_data) {
+ profiler_data_ = profiler_data;
+ }
+
+ std::string const& source_position_output() const {
+ return source_position_output_;
+ }
+ void set_source_position_output(std::string const& source_position_output) {
+ source_position_output_ = source_position_output;
+ }
+
void DeleteGraphZone() {
- // Destroy objects with destructors first.
- source_positions_.Reset(nullptr);
if (graph_zone_ == nullptr) return;
- // Destroy zone and clear pointers.
graph_zone_scope_.Destroy();
graph_zone_ = nullptr;
graph_ = nullptr;
+ source_positions_ = nullptr;
loop_assignment_ = nullptr;
type_hint_analysis_ = nullptr;
simplified_ = nullptr;
@@ -288,42 +281,53 @@ class PipelineData {
if (descriptor != nullptr) {
fixed_frame_size = CalculateFixedFrameSize(descriptor);
}
- frame_ = new (instruction_zone()) Frame(fixed_frame_size, descriptor);
+ frame_ = new (instruction_zone()) Frame(fixed_frame_size);
}
void InitializeRegisterAllocationData(const RegisterConfiguration* config,
- CallDescriptor* descriptor,
- const char* debug_name) {
+ CallDescriptor* descriptor) {
DCHECK(register_allocation_data_ == nullptr);
register_allocation_data_ = new (register_allocation_zone())
RegisterAllocationData(config, register_allocation_zone(), frame(),
- sequence(), debug_name);
+ sequence(), debug_name_.get());
+ }
+
+ void BeginPhaseKind(const char* phase_kind_name) {
+ if (pipeline_statistics() != nullptr) {
+ pipeline_statistics()->BeginPhaseKind(phase_kind_name);
+ }
+ }
+
+ void EndPhaseKind() {
+ if (pipeline_statistics() != nullptr) {
+ pipeline_statistics()->EndPhaseKind();
+ }
}
private:
- Isolate* isolate_;
- CompilationInfo* info_;
- Zone* outer_zone_;
+ Isolate* const isolate_;
+ CompilationInfo* const info_;
+ std::unique_ptr<char[]> debug_name_;
+ Zone* outer_zone_ = nullptr;
ZonePool* const zone_pool_;
- PipelineStatistics* pipeline_statistics_;
- bool compilation_failed_;
- Handle<Code> code_;
+ PipelineStatistics* pipeline_statistics_ = nullptr;
+ bool compilation_failed_ = false;
+ Handle<Code> code_ = Handle<Code>::null();
// All objects in the following group of fields are allocated in graph_zone_.
// They are all set to nullptr when the graph_zone_ is destroyed.
ZonePool::Scope graph_zone_scope_;
- Zone* graph_zone_;
- Graph* graph_;
- // TODO(dcarney): make this into a ZoneObject.
- base::SmartPointer<SourcePositionTable> source_positions_;
- LoopAssignmentAnalysis* loop_assignment_;
+ Zone* graph_zone_ = nullptr;
+ Graph* graph_ = nullptr;
+ SourcePositionTable* source_positions_ = nullptr;
+ LoopAssignmentAnalysis* loop_assignment_ = nullptr;
TypeHintAnalysis* type_hint_analysis_ = nullptr;
- SimplifiedOperatorBuilder* simplified_;
- MachineOperatorBuilder* machine_;
- CommonOperatorBuilder* common_;
- JSOperatorBuilder* javascript_;
- JSGraph* jsgraph_;
- Schedule* schedule_;
+ SimplifiedOperatorBuilder* simplified_ = nullptr;
+ MachineOperatorBuilder* machine_ = nullptr;
+ CommonOperatorBuilder* common_ = nullptr;
+ JSOperatorBuilder* javascript_ = nullptr;
+ JSGraph* jsgraph_ = nullptr;
+ Schedule* schedule_ = nullptr;
// All objects in the following group of fields are allocated in
// instruction_zone_. They are all set to nullptr when the instruction_zone_
@@ -331,15 +335,21 @@ class PipelineData {
// destroyed.
ZonePool::Scope instruction_zone_scope_;
Zone* instruction_zone_;
- InstructionSequence* sequence_;
- Frame* frame_;
+ InstructionSequence* sequence_ = nullptr;
+ Frame* frame_ = nullptr;
// All objects in the following group of fields are allocated in
// register_allocation_zone_. They are all set to nullptr when the zone is
// destroyed.
ZonePool::Scope register_allocation_zone_scope_;
Zone* register_allocation_zone_;
- RegisterAllocationData* register_allocation_data_;
+ RegisterAllocationData* register_allocation_data_ = nullptr;
+
+ // Basic block profiling support.
+ BasicBlockProfiler::Data* profiler_data_ = nullptr;
+
+ // Source position output for --trace-turbo.
+ std::string source_position_output_;
int CalculateFixedFrameSize(CallDescriptor* descriptor) {
if (descriptor->IsJSFunctionCall()) {
@@ -354,6 +364,38 @@ class PipelineData {
DISALLOW_COPY_AND_ASSIGN(PipelineData);
};
+class PipelineImpl final {
+ public:
+ explicit PipelineImpl(PipelineData* data) : data_(data) {}
+
+ // Helpers for executing pipeline phases.
+ template <typename Phase>
+ void Run();
+ template <typename Phase, typename Arg0>
+ void Run(Arg0 arg_0);
+ template <typename Phase, typename Arg0, typename Arg1>
+ void Run(Arg0 arg_0, Arg1 arg_1);
+
+ // Run the graph creation and initial optimization passes.
+ bool CreateGraph();
+
+ // Run the concurrent optimization passes.
+ bool OptimizeGraph(Linkage* linkage);
+
+ // Perform the actual code generation and return handle to a code object.
+ Handle<Code> GenerateCode(Linkage* linkage);
+
+ bool ScheduleAndSelectInstructions(Linkage* linkage);
+ void RunPrintAndVerify(const char* phase, bool untyped = false);
+ Handle<Code> ScheduleAndGenerateCode(CallDescriptor* call_descriptor);
+ void AllocateRegisters(const RegisterConfiguration* config,
+ CallDescriptor* descriptor, bool run_verifier);
+
+ CompilationInfo* info() const;
+ Isolate* isolate() const;
+
+ PipelineData* const data_;
+};
namespace {
@@ -363,26 +405,30 @@ struct TurboCfgFile : public std::ofstream {
std::ios_base::app) {}
};
+struct TurboJsonFile : public std::ofstream {
+ TurboJsonFile(CompilationInfo* info, std::ios_base::openmode mode)
+ : std::ofstream(GetVisualizerLogFileName(info, nullptr, "json").get(),
+ mode) {}
+};
void TraceSchedule(CompilationInfo* info, Schedule* schedule) {
if (FLAG_trace_turbo) {
- FILE* json_file = OpenVisualizerLogFile(info, nullptr, "json", "a+");
- if (json_file != nullptr) {
- OFStream json_of(json_file);
- json_of << "{\"name\":\"Schedule\",\"type\":\"schedule\",\"data\":\"";
- std::stringstream schedule_stream;
- schedule_stream << *schedule;
- std::string schedule_string(schedule_stream.str());
- for (const auto& c : schedule_string) {
- json_of << AsEscapedUC16ForJSON(c);
- }
- json_of << "\"},\n";
- fclose(json_file);
+ AllowHandleDereference allow_deref;
+ TurboJsonFile json_of(info, std::ios_base::app);
+ json_of << "{\"name\":\"Schedule\",\"type\":\"schedule\",\"data\":\"";
+ std::stringstream schedule_stream;
+ schedule_stream << *schedule;
+ std::string schedule_string(schedule_stream.str());
+ for (const auto& c : schedule_string) {
+ json_of << AsEscapedUC16ForJSON(c);
}
+ json_of << "\"},\n";
+ }
+ if (FLAG_trace_turbo_graph || FLAG_trace_turbo_scheduler) {
+ AllowHandleDereference allow_deref;
+ OFStream os(stdout);
+ os << "-- Schedule --------------------------------------\n" << *schedule;
}
- if (!FLAG_trace_turbo_graph && !FLAG_trace_turbo_scheduler) return;
- OFStream os(stdout);
- os << "-- Schedule --------------------------------------\n" << *schedule;
}
@@ -476,32 +522,218 @@ class PipelineRunScope {
ZonePool::Scope zone_scope_;
};
+PipelineStatistics* CreatePipelineStatistics(CompilationInfo* info,
+ ZonePool* zone_pool) {
+ PipelineStatistics* pipeline_statistics = nullptr;
+
+ if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
+ pipeline_statistics = new PipelineStatistics(info, zone_pool);
+ pipeline_statistics->BeginPhaseKind("initializing");
+ }
+
+ if (FLAG_trace_turbo) {
+ TurboJsonFile json_of(info, std::ios_base::trunc);
+ Handle<Script> script = info->script();
+ std::unique_ptr<char[]> function_name = info->GetDebugName();
+ int pos = info->shared_info()->start_position();
+ json_of << "{\"function\":\"" << function_name.get()
+ << "\", \"sourcePosition\":" << pos << ", \"source\":\"";
+ Isolate* isolate = info->isolate();
+ if (!script->IsUndefined(isolate) &&
+ !script->source()->IsUndefined(isolate)) {
+ DisallowHeapAllocation no_allocation;
+ int start = info->shared_info()->start_position();
+ int len = info->shared_info()->end_position() - start;
+ String::SubStringRange source(String::cast(script->source()), start, len);
+ for (const auto& c : source) {
+ json_of << AsEscapedUC16ForJSON(c);
+ }
+ }
+ json_of << "\",\n\"phases\":[";
+ }
+
+ return pipeline_statistics;
+}
+
} // namespace
+class PipelineCompilationJob final : public CompilationJob {
+ public:
+ PipelineCompilationJob(Isolate* isolate, Handle<JSFunction> function)
+ // Note that the CompilationInfo is not initialized at the time we pass it
+ // to the CompilationJob constructor, but it is not dereferenced there.
+ : CompilationJob(&info_, "TurboFan"),
+ zone_(isolate->allocator()),
+ zone_pool_(isolate->allocator()),
+ parse_info_(&zone_, function),
+ info_(&parse_info_, function),
+ pipeline_statistics_(CreatePipelineStatistics(info(), &zone_pool_)),
+ data_(&zone_pool_, info(), pipeline_statistics_.get()),
+ pipeline_(&data_),
+ linkage_(nullptr) {}
+
+ protected:
+ Status PrepareJobImpl() final;
+ Status ExecuteJobImpl() final;
+ Status FinalizeJobImpl() final;
+
+ private:
+ Zone zone_;
+ ZonePool zone_pool_;
+ ParseInfo parse_info_;
+ CompilationInfo info_;
+ std::unique_ptr<PipelineStatistics> pipeline_statistics_;
+ PipelineData data_;
+ PipelineImpl pipeline_;
+ Linkage* linkage_;
+
+ DISALLOW_COPY_AND_ASSIGN(PipelineCompilationJob);
+};
+
+PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl() {
+ if (info()->shared_info()->asm_function()) {
+ if (info()->osr_frame()) info()->MarkAsFrameSpecializing();
+ info()->MarkAsFunctionContextSpecializing();
+ } else {
+ if (!FLAG_always_opt) {
+ info()->MarkAsBailoutOnUninitialized();
+ }
+ if (FLAG_native_context_specialization) {
+ info()->MarkAsNativeContextSpecializing();
+ }
+ }
+ if (!info()->shared_info()->asm_function() || FLAG_turbo_asm_deoptimization) {
+ info()->MarkAsDeoptimizationEnabled();
+ }
+ if (!info()->is_optimizing_from_bytecode()) {
+ if (FLAG_inline_accessors) {
+ info()->MarkAsAccessorInliningEnabled();
+ }
+ if (info()->is_deoptimization_enabled() && FLAG_turbo_type_feedback) {
+ info()->MarkAsTypeFeedbackEnabled();
+ }
+ if (!Compiler::EnsureDeoptimizationSupport(info())) return FAILED;
+ }
+
+ // TODO(mstarzinger): Hack to ensure that certain call descriptors are
+ // initialized on the main thread, since it is needed off-thread by the
+ // effect control linearizer.
+ CodeFactory::CopyFastSmiOrObjectElements(info()->isolate());
+ CodeFactory::GrowFastDoubleElements(info()->isolate());
+ CodeFactory::GrowFastSmiOrObjectElements(info()->isolate());
+ CodeFactory::ToNumber(info()->isolate());
+
+ linkage_ = new (&zone_) Linkage(Linkage::ComputeIncoming(&zone_, info()));
+
+ if (!pipeline_.CreateGraph()) {
+ if (isolate()->has_pending_exception()) return FAILED; // Stack overflowed.
+ return AbortOptimization(kGraphBuildingFailed);
+ }
+
+ return SUCCEEDED;
+}
+
+PipelineCompilationJob::Status PipelineCompilationJob::ExecuteJobImpl() {
+ if (!pipeline_.OptimizeGraph(linkage_)) return FAILED;
+ return SUCCEEDED;
+}
+
+PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl() {
+ Handle<Code> code = pipeline_.GenerateCode(linkage_);
+ if (code.is_null()) {
+ if (info()->bailout_reason() == kNoReason) {
+ return AbortOptimization(kCodeGenerationFailed);
+ }
+ return FAILED;
+ }
+ info()->dependencies()->Commit(code);
+ info()->SetCode(code);
+ if (info()->is_deoptimization_enabled()) {
+ info()->context()->native_context()->AddOptimizedCode(*code);
+ RegisterWeakObjectsInOptimizedCode(code);
+ }
+ return SUCCEEDED;
+}
+
+class PipelineWasmCompilationJob final : public CompilationJob {
+ public:
+ explicit PipelineWasmCompilationJob(CompilationInfo* info, Graph* graph,
+ CallDescriptor* descriptor,
+ SourcePositionTable* source_positions)
+ : CompilationJob(info, "TurboFan", State::kReadyToExecute),
+ zone_pool_(info->isolate()->allocator()),
+ data_(&zone_pool_, info, graph, source_positions),
+ pipeline_(&data_),
+ linkage_(descriptor) {}
+
+ protected:
+ Status PrepareJobImpl() final;
+ Status ExecuteJobImpl() final;
+ Status FinalizeJobImpl() final;
+
+ private:
+ ZonePool zone_pool_;
+ PipelineData data_;
+ PipelineImpl pipeline_;
+ Linkage linkage_;
+};
+
+PipelineWasmCompilationJob::Status
+PipelineWasmCompilationJob::PrepareJobImpl() {
+ UNREACHABLE(); // Prepare should always be skipped for WasmCompilationJob.
+ return SUCCEEDED;
+}
+
+PipelineWasmCompilationJob::Status
+PipelineWasmCompilationJob::ExecuteJobImpl() {
+ if (FLAG_trace_turbo) {
+ TurboJsonFile json_of(info(), std::ios_base::trunc);
+ json_of << "{\"function\":\"" << info()->GetDebugName().get()
+ << "\", \"source\":\"\",\n\"phases\":[";
+ }
+
+ pipeline_.RunPrintAndVerify("Machine", true);
+
+ if (!pipeline_.ScheduleAndSelectInstructions(&linkage_)) return FAILED;
+ return SUCCEEDED;
+}
+
+PipelineWasmCompilationJob::Status
+PipelineWasmCompilationJob::FinalizeJobImpl() {
+ pipeline_.GenerateCode(&linkage_);
+ return SUCCEEDED;
+}
template <typename Phase>
-void Pipeline::Run() {
+void PipelineImpl::Run() {
PipelineRunScope scope(this->data_, Phase::phase_name());
Phase phase;
phase.Run(this->data_, scope.zone());
}
-
template <typename Phase, typename Arg0>
-void Pipeline::Run(Arg0 arg_0) {
+void PipelineImpl::Run(Arg0 arg_0) {
PipelineRunScope scope(this->data_, Phase::phase_name());
Phase phase;
phase.Run(this->data_, scope.zone(), arg_0);
}
+template <typename Phase, typename Arg0, typename Arg1>
+void PipelineImpl::Run(Arg0 arg_0, Arg1 arg_1) {
+ PipelineRunScope scope(this->data_, Phase::phase_name());
+ Phase phase;
+ phase.Run(this->data_, scope.zone(), arg_0, arg_1);
+}
struct LoopAssignmentAnalysisPhase {
static const char* phase_name() { return "loop assignment analysis"; }
void Run(PipelineData* data, Zone* temp_zone) {
- AstLoopAssignmentAnalyzer analyzer(data->graph_zone(), data->info());
- LoopAssignmentAnalysis* loop_assignment = analyzer.Analyze();
- data->set_loop_assignment(loop_assignment);
+ if (!data->info()->is_optimizing_from_bytecode()) {
+ AstLoopAssignmentAnalyzer analyzer(data->graph_zone(), data->info());
+ LoopAssignmentAnalysis* loop_assignment = analyzer.Analyze();
+ data->set_loop_assignment(loop_assignment);
+ }
}
};
@@ -510,10 +742,12 @@ struct TypeHintAnalysisPhase {
static const char* phase_name() { return "type hint analysis"; }
void Run(PipelineData* data, Zone* temp_zone) {
- TypeHintAnalyzer analyzer(data->graph_zone());
- Handle<Code> code(data->info()->shared_info()->code(), data->isolate());
- TypeHintAnalysis* type_hint_analysis = analyzer.Analyze(code);
- data->set_type_hint_analysis(type_hint_analysis);
+ if (data->info()->is_type_feedback_enabled()) {
+ TypeHintAnalyzer analyzer(data->graph_zone());
+ Handle<Code> code(data->info()->shared_info()->code(), data->isolate());
+ TypeHintAnalysis* type_hint_analysis = analyzer.Analyze(code);
+ data->set_type_hint_analysis(type_hint_analysis);
+ }
}
};
@@ -525,7 +759,7 @@ struct GraphBuilderPhase {
bool stack_check = !data->info()->IsStub();
bool succeeded = false;
- if (data->info()->shared_info()->HasBytecodeArray()) {
+ if (data->info()->is_optimizing_from_bytecode()) {
BytecodeGraphBuilder graph_builder(temp_zone, data->info(),
data->jsgraph());
succeeded = graph_builder.CreateGraph();
@@ -569,6 +803,9 @@ struct InliningPhase {
data->info()->dependencies());
JSNativeContextSpecialization::Flags flags =
JSNativeContextSpecialization::kNoFlags;
+ if (data->info()->is_accessor_inlining_enabled()) {
+ flags |= JSNativeContextSpecialization::kAccessorInliningEnabled;
+ }
if (data->info()->is_bailout_on_uninitialized()) {
flags |= JSNativeContextSpecialization::kBailoutOnUninitialized;
}
@@ -583,6 +820,11 @@ struct InliningPhase {
? JSInliningHeuristic::kGeneralInlining
: JSInliningHeuristic::kRestrictedInlining,
temp_zone, data->info(), data->jsgraph());
+ JSIntrinsicLowering intrinsic_lowering(
+ &graph_reducer, data->jsgraph(),
+ data->info()->is_deoptimization_enabled()
+ ? JSIntrinsicLowering::kDeoptimizationEnabled
+ : JSIntrinsicLowering::kDeoptimizationDisabled);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
if (data->info()->is_frame_specializing()) {
@@ -593,8 +835,11 @@ struct InliningPhase {
}
AddReducer(data, &graph_reducer, &native_context_specialization);
AddReducer(data, &graph_reducer, &context_specialization);
+ AddReducer(data, &graph_reducer, &intrinsic_lowering);
AddReducer(data, &graph_reducer, &call_reducer);
- AddReducer(data, &graph_reducer, &inlining);
+ if (!data->info()->is_optimizing_from_bytecode()) {
+ AddReducer(data, &graph_reducer, &inlining);
+ }
graph_reducer.ReduceGraph();
}
};
@@ -606,10 +851,38 @@ struct TyperPhase {
void Run(PipelineData* data, Zone* temp_zone, Typer* typer) {
NodeVector roots(temp_zone);
data->jsgraph()->GetCachedNodes(&roots);
- typer->Run(roots);
+ LoopVariableOptimizer induction_vars(data->jsgraph()->graph(),
+ data->common(), temp_zone);
+ if (FLAG_turbo_loop_variable) induction_vars.Run();
+ typer->Run(roots, &induction_vars);
}
};
+#ifdef DEBUG
+
+struct UntyperPhase {
+ static const char* phase_name() { return "untyper"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ class RemoveTypeReducer final : public Reducer {
+ public:
+ Reduction Reduce(Node* node) final {
+ if (NodeProperties::IsTyped(node)) {
+ NodeProperties::RemoveType(node);
+ return Changed(node);
+ }
+ return NoChange();
+ }
+ };
+
+ JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+ RemoveTypeReducer remove_type_reducer;
+ AddReducer(data, &graph_reducer, &remove_type_reducer);
+ graph_reducer.ReduceGraph();
+ }
+};
+
+#endif // DEBUG
struct OsrDeconstructionPhase {
static const char* phase_name() { return "OSR deconstruction"; }
@@ -628,9 +901,12 @@ struct TypedLoweringPhase {
JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common());
- LoadElimination load_elimination(&graph_reducer, data->graph(),
- data->common());
- JSBuiltinReducer builtin_reducer(&graph_reducer, data->jsgraph());
+ JSBuiltinReducer builtin_reducer(
+ &graph_reducer, data->jsgraph(),
+ data->info()->is_deoptimization_enabled()
+ ? JSBuiltinReducer::kDeoptimizationEnabled
+ : JSBuiltinReducer::kNoFlags,
+ data->info()->dependencies());
MaybeHandle<LiteralsArray> literals_array =
data->info()->is_native_context_specializing()
? handle(data->info()->closure()->literals(), data->isolate())
@@ -642,18 +918,11 @@ struct TypedLoweringPhase {
if (data->info()->is_deoptimization_enabled()) {
typed_lowering_flags |= JSTypedLowering::kDeoptimizationEnabled;
}
- if (data->info()->shared_info()->HasBytecodeArray()) {
- typed_lowering_flags |= JSTypedLowering::kDisableBinaryOpReduction;
- }
JSTypedLowering typed_lowering(&graph_reducer, data->info()->dependencies(),
typed_lowering_flags, data->jsgraph(),
temp_zone);
- JSIntrinsicLowering intrinsic_lowering(
- &graph_reducer, data->jsgraph(),
- data->info()->is_deoptimization_enabled()
- ? JSIntrinsicLowering::kDeoptimizationEnabled
- : JSIntrinsicLowering::kDeoptimizationDisabled);
- SimplifiedOperatorReducer simple_reducer(data->jsgraph());
+ SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph());
+ CheckpointElimination checkpoint_elimination(&graph_reducer);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->common(), data->machine());
AddReducer(data, &graph_reducer, &dead_code_elimination);
@@ -662,31 +931,14 @@ struct TypedLoweringPhase {
AddReducer(data, &graph_reducer, &create_lowering);
}
AddReducer(data, &graph_reducer, &typed_lowering);
- AddReducer(data, &graph_reducer, &intrinsic_lowering);
- AddReducer(data, &graph_reducer, &load_elimination);
AddReducer(data, &graph_reducer, &simple_reducer);
+ AddReducer(data, &graph_reducer, &checkpoint_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
graph_reducer.ReduceGraph();
}
};
-struct BranchEliminationPhase {
- static const char* phase_name() { return "branch condition elimination"; }
-
- void Run(PipelineData* data, Zone* temp_zone) {
- JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
- BranchElimination branch_condition_elimination(&graph_reducer,
- data->jsgraph(), temp_zone);
- DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
- data->common());
- AddReducer(data, &graph_reducer, &branch_condition_elimination);
- AddReducer(data, &graph_reducer, &dead_code_elimination);
- graph_reducer.ReduceGraph();
- }
-};
-
-
struct EscapeAnalysisPhase {
static const char* phase_name() { return "escape analysis"; }
@@ -697,39 +949,73 @@ struct EscapeAnalysisPhase {
JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
EscapeAnalysisReducer escape_reducer(&graph_reducer, data->jsgraph(),
&escape_analysis, temp_zone);
- escape_reducer.SetExistsVirtualAllocate(
- escape_analysis.ExistsVirtualAllocate());
AddReducer(data, &graph_reducer, &escape_reducer);
graph_reducer.ReduceGraph();
escape_reducer.VerifyReplacement();
}
};
-
-struct SimplifiedLoweringPhase {
- static const char* phase_name() { return "simplified lowering"; }
+struct RepresentationSelectionPhase {
+ static const char* phase_name() { return "representation selection"; }
void Run(PipelineData* data, Zone* temp_zone) {
SimplifiedLowering lowering(data->jsgraph(), temp_zone,
data->source_positions());
lowering.LowerAllNodes();
+ }
+};
- // TODO(bmeurer): See comment on SimplifiedLowering::abort_compilation_.
- if (lowering.abort_compilation_) {
- data->set_compilation_failed();
- return;
- }
+struct LoopPeelingPhase {
+ static const char* phase_name() { return "loop peeling"; }
+ void Run(PipelineData* data, Zone* temp_zone) {
+ GraphTrimmer trimmer(temp_zone, data->graph());
+ NodeVector roots(temp_zone);
+ data->jsgraph()->GetCachedNodes(&roots);
+ trimmer.TrimGraph(roots.begin(), roots.end());
+
+ LoopTree* loop_tree =
+ LoopFinder::BuildLoopTree(data->jsgraph()->graph(), temp_zone);
+ LoopPeeler::PeelInnerLoopsOfTree(data->graph(), data->common(), loop_tree,
+ temp_zone);
+ }
+};
+
+struct LoopExitEliminationPhase {
+ static const char* phase_name() { return "loop exit elimination"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ LoopPeeler::EliminateLoopExits(data->graph(), temp_zone);
+ }
+};
+
+struct GenericLoweringPhase {
+ static const char* phase_name() { return "generic lowering"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+ JSGenericLowering generic_lowering(data->jsgraph());
+ AddReducer(data, &graph_reducer, &generic_lowering);
+ graph_reducer.ReduceGraph();
+ }
+};
+
+struct EarlyOptimizationPhase {
+ static const char* phase_name() { return "early optimization"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common());
- SimplifiedOperatorReducer simple_reducer(data->jsgraph());
- ValueNumberingReducer value_numbering(temp_zone);
+ SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph());
+ RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
+ ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
MachineOperatorReducer machine_reducer(data->jsgraph());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->common(), data->machine());
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &simple_reducer);
+ AddReducer(data, &graph_reducer, &redundancy_elimination);
AddReducer(data, &graph_reducer, &value_numbering);
AddReducer(data, &graph_reducer, &machine_reducer);
AddReducer(data, &graph_reducer, &common_reducer);
@@ -737,7 +1023,6 @@ struct SimplifiedLoweringPhase {
}
};
-
struct ControlFlowOptimizationPhase {
static const char* phase_name() { return "control flow optimization"; }
@@ -748,31 +1033,138 @@ struct ControlFlowOptimizationPhase {
}
};
+struct EffectControlLinearizationPhase {
+ static const char* phase_name() { return "effect linearization"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ // The scheduler requires the graphs to be trimmed, so trim now.
+ // TODO(jarin) Remove the trimming once the scheduler can handle untrimmed
+ // graphs.
+ GraphTrimmer trimmer(temp_zone, data->graph());
+ NodeVector roots(temp_zone);
+ data->jsgraph()->GetCachedNodes(&roots);
+ trimmer.TrimGraph(roots.begin(), roots.end());
+
+ // Schedule the graph without node splitting so that we can
+ // fix the effect and control flow for nodes with low-level side
+ // effects (such as changing representation to tagged or
+ // 'floating' allocation regions.)
+ Schedule* schedule = Scheduler::ComputeSchedule(temp_zone, data->graph(),
+ Scheduler::kNoFlags);
+ if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
+ TraceSchedule(data->info(), schedule);
+
+ // Post-pass for wiring the control/effects
+ // - connect allocating representation changes into the control&effect
+ // chains and lower them,
+ // - get rid of the region markers,
+ // - introduce effect phis and rewire effects to get SSA again.
+ EffectControlLinearizer linearizer(data->jsgraph(), schedule, temp_zone);
+ linearizer.Run();
+ }
+};
+
+// The store-store elimination greatly benefits from doing a common operator
+// reducer just before it, to eliminate conditional deopts with a constant
+// condition.
+
+struct DeadCodeEliminationPhase {
+ static const char* phase_name() { return "common operator reducer"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ // Run the common operator reducer.
+ JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+ DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+ data->common());
+ CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
+ data->common(), data->machine());
+ AddReducer(data, &graph_reducer, &dead_code_elimination);
+ AddReducer(data, &graph_reducer, &common_reducer);
+ graph_reducer.ReduceGraph();
+ }
+};
+
+struct StoreStoreEliminationPhase {
+ static const char* phase_name() { return "store-store elimination"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ GraphTrimmer trimmer(temp_zone, data->graph());
+ NodeVector roots(temp_zone);
+ data->jsgraph()->GetCachedNodes(&roots);
+ trimmer.TrimGraph(roots.begin(), roots.end());
+
+ StoreStoreElimination::Run(data->jsgraph(), temp_zone);
+ }
+};
+
+struct LoadEliminationPhase {
+ static const char* phase_name() { return "load elimination"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+ BranchElimination branch_condition_elimination(&graph_reducer,
+ data->jsgraph(), temp_zone);
+ DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+ data->common());
+ RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
+ LoadElimination load_elimination(&graph_reducer, data->jsgraph(),
+ temp_zone);
+ ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
+ CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
+ data->common(), data->machine());
+ AddReducer(data, &graph_reducer, &branch_condition_elimination);
+ AddReducer(data, &graph_reducer, &dead_code_elimination);
+ AddReducer(data, &graph_reducer, &redundancy_elimination);
+ AddReducer(data, &graph_reducer, &load_elimination);
+ AddReducer(data, &graph_reducer, &value_numbering);
+ AddReducer(data, &graph_reducer, &common_reducer);
+ graph_reducer.ReduceGraph();
+ }
+};
+
+struct MemoryOptimizationPhase {
+ static const char* phase_name() { return "memory optimization"; }
-struct ChangeLoweringPhase {
- static const char* phase_name() { return "change lowering"; }
+ void Run(PipelineData* data, Zone* temp_zone) {
+ // The memory optimizer requires the graphs to be trimmed, so trim now.
+ GraphTrimmer trimmer(temp_zone, data->graph());
+ NodeVector roots(temp_zone);
+ data->jsgraph()->GetCachedNodes(&roots);
+ trimmer.TrimGraph(roots.begin(), roots.end());
+
+ // Optimize allocations and load/store operations.
+ MemoryOptimizer optimizer(data->jsgraph(), temp_zone);
+ optimizer.Optimize();
+ }
+};
+
+struct LateOptimizationPhase {
+ static const char* phase_name() { return "late optimization"; }
void Run(PipelineData* data, Zone* temp_zone) {
JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
+ BranchElimination branch_condition_elimination(&graph_reducer,
+ data->jsgraph(), temp_zone);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common());
- SimplifiedOperatorReducer simple_reducer(data->jsgraph());
- ValueNumberingReducer value_numbering(temp_zone);
- ChangeLowering lowering(data->jsgraph());
+ ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
MachineOperatorReducer machine_reducer(data->jsgraph());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->common(), data->machine());
+ SelectLowering select_lowering(data->jsgraph()->graph(),
+ data->jsgraph()->common());
+ TailCallOptimization tco(data->common(), data->graph());
+ AddReducer(data, &graph_reducer, &branch_condition_elimination);
AddReducer(data, &graph_reducer, &dead_code_elimination);
- AddReducer(data, &graph_reducer, &simple_reducer);
AddReducer(data, &graph_reducer, &value_numbering);
- AddReducer(data, &graph_reducer, &lowering);
AddReducer(data, &graph_reducer, &machine_reducer);
AddReducer(data, &graph_reducer, &common_reducer);
+ AddReducer(data, &graph_reducer, &select_lowering);
+ AddReducer(data, &graph_reducer, &tco);
graph_reducer.ReduceGraph();
}
};
-
struct EarlyGraphTrimmingPhase {
static const char* phase_name() { return "early graph trimming"; }
void Run(PipelineData* data, Zone* temp_zone) {
@@ -810,30 +1202,6 @@ struct StressLoopPeelingPhase {
};
-struct GenericLoweringPhase {
- static const char* phase_name() { return "generic lowering"; }
-
- void Run(PipelineData* data, Zone* temp_zone) {
- JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
- DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
- data->common());
- CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
- data->common(), data->machine());
- JSGenericLowering generic_lowering(data->info()->is_typing_enabled(),
- data->jsgraph());
- SelectLowering select_lowering(data->jsgraph()->graph(),
- data->jsgraph()->common());
- TailCallOptimization tco(data->common(), data->graph());
- AddReducer(data, &graph_reducer, &dead_code_elimination);
- AddReducer(data, &graph_reducer, &common_reducer);
- AddReducer(data, &graph_reducer, &generic_lowering);
- AddReducer(data, &graph_reducer, &select_lowering);
- AddReducer(data, &graph_reducer, &tco);
- graph_reducer.ReduceGraph();
- }
-};
-
-
struct ComputeSchedulePhase {
static const char* phase_name() { return "scheduling"; }
@@ -915,13 +1283,14 @@ struct AllocateGeneralRegistersPhase {
}
};
-
template <typename RegAllocator>
-struct AllocateDoubleRegistersPhase {
- static const char* phase_name() { return "allocate double registers"; }
+struct AllocateFPRegistersPhase {
+ static const char* phase_name() {
+ return "allocate floating point registers";
+ }
void Run(PipelineData* data, Zone* temp_zone) {
- RegAllocator allocator(data->register_allocation_data(), DOUBLE_REGISTERS,
+ RegAllocator allocator(data->register_allocation_data(), FP_REGISTERS,
temp_zone);
allocator.AllocateRegisters();
}
@@ -1049,15 +1418,14 @@ struct PrintGraphPhase {
Graph* graph = data->graph();
{ // Print JSON.
- FILE* json_file = OpenVisualizerLogFile(info, nullptr, "json", "a+");
- if (json_file == nullptr) return;
- OFStream json_of(json_file);
+ AllowHandleDereference allow_deref;
+ TurboJsonFile json_of(info, std::ios_base::app);
json_of << "{\"name\":\"" << phase << "\",\"type\":\"graph\",\"data\":"
<< AsJSON(*graph, data->source_positions()) << "},\n";
- fclose(json_file);
}
if (FLAG_trace_turbo_graph) { // Simple textual RPO.
+ AllowHandleDereference allow_deref;
OFStream os(stdout);
os << "-- Graph after " << phase << " -- " << std::endl;
os << AsRPO(*graph);
@@ -1069,22 +1437,14 @@ struct PrintGraphPhase {
struct VerifyGraphPhase {
static const char* phase_name() { return nullptr; }
- void Run(PipelineData* data, Zone* temp_zone, const bool untyped) {
- Verifier::Run(data->graph(), FLAG_turbo_types && !untyped
- ? Verifier::TYPED
- : Verifier::UNTYPED);
+ void Run(PipelineData* data, Zone* temp_zone, const bool untyped,
+ bool values_only = false) {
+ Verifier::Run(data->graph(), !untyped ? Verifier::TYPED : Verifier::UNTYPED,
+ values_only ? Verifier::kValuesOnly : Verifier::kAll);
}
};
-
-void Pipeline::BeginPhaseKind(const char* phase_kind_name) {
- if (data_->pipeline_statistics() != nullptr) {
- data_->pipeline_statistics()->BeginPhaseKind(phase_kind_name);
- }
-}
-
-
-void Pipeline::RunPrintAndVerify(const char* phase, bool untyped) {
+void PipelineImpl::RunPrintAndVerify(const char* phase, bool untyped) {
if (FLAG_trace_turbo) {
Run<PrintGraphPhase>(phase);
}
@@ -1093,46 +1453,10 @@ void Pipeline::RunPrintAndVerify(const char* phase, bool untyped) {
}
}
+bool PipelineImpl::CreateGraph() {
+ PipelineData* data = this->data_;
-Handle<Code> Pipeline::GenerateCode() {
- ZonePool zone_pool(isolate()->allocator());
- base::SmartPointer<PipelineStatistics> pipeline_statistics;
-
- if (FLAG_turbo_stats) {
- pipeline_statistics.Reset(new PipelineStatistics(info(), &zone_pool));
- pipeline_statistics->BeginPhaseKind("initializing");
- }
-
- if (FLAG_trace_turbo) {
- FILE* json_file = OpenVisualizerLogFile(info(), nullptr, "json", "w+");
- if (json_file != nullptr) {
- OFStream json_of(json_file);
- Handle<Script> script = info()->script();
- base::SmartArrayPointer<char> function_name = info()->GetDebugName();
- int pos = info()->shared_info()->start_position();
- json_of << "{\"function\":\"" << function_name.get()
- << "\", \"sourcePosition\":" << pos << ", \"source\":\"";
- if (info()->has_literal() && !script->IsUndefined() &&
- !script->source()->IsUndefined()) {
- DisallowHeapAllocation no_allocation;
- FunctionLiteral* function = info()->literal();
- int start = function->start_position();
- int len = function->end_position() - start;
- String::SubStringRange source(String::cast(script->source()), start,
- len);
- for (const auto& c : source) {
- json_of << AsEscapedUC16ForJSON(c);
- }
- }
- json_of << "\",\n\"phases\":[";
- fclose(json_file);
- }
- }
-
- PipelineData data(&zone_pool, info(), pipeline_statistics.get());
- this->data_ = &data;
-
- BeginPhaseKind("graph creation");
+ data->BeginPhaseKind("graph creation");
if (FLAG_trace_turbo) {
OFStream os(stdout);
@@ -1143,18 +1467,19 @@ Handle<Code> Pipeline::GenerateCode() {
tcf << AsC1VCompilation(info());
}
- data.source_positions()->AddDecorator();
+ data->source_positions()->AddDecorator();
if (FLAG_loop_assignment_analysis) {
Run<LoopAssignmentAnalysisPhase>();
}
- if (info()->is_typing_enabled()) {
- Run<TypeHintAnalysisPhase>();
- }
+ Run<TypeHintAnalysisPhase>();
Run<GraphBuilderPhase>();
- if (data.compilation_failed()) return Handle<Code>::null();
+ if (data->compilation_failed()) {
+ data->EndPhaseKind();
+ return false;
+ }
RunPrintAndVerify("Initial untyped", true);
// Perform OSR deconstruction.
@@ -1173,28 +1498,32 @@ Handle<Code> Pipeline::GenerateCode() {
if (FLAG_print_turbo_replay) {
// Print a replay of the initial graph.
- GraphReplayPrinter::PrintReplay(data.graph());
- }
-
- base::SmartPointer<Typer> typer;
- if (info()->is_typing_enabled()) {
- // Type the graph.
- typer.Reset(new Typer(isolate(), data.graph(),
- info()->is_deoptimization_enabled()
- ? Typer::kDeoptimizationEnabled
- : Typer::kNoFlags,
- info()->dependencies()));
- Run<TyperPhase>(typer.get());
- RunPrintAndVerify("Typed");
+ GraphReplayPrinter::PrintReplay(data->graph());
}
- BeginPhaseKind("lowering");
+ // Run the type-sensitive lowerings and optimizations on the graph.
+ {
+ // Type the graph and keep the Typer running on newly created nodes within
+ // this scope; the Typer is automatically unlinked from the Graph once we
+ // leave this scope below.
+ Typer typer(isolate(), data->graph());
+ Run<TyperPhase>(&typer);
+ RunPrintAndVerify("Typed");
+
+ data->BeginPhaseKind("lowering");
- if (info()->is_typing_enabled()) {
// Lower JSOperators where we can determine types.
Run<TypedLoweringPhase>();
RunPrintAndVerify("Lowered typed");
+ if (FLAG_turbo_loop_peeling) {
+ Run<LoopPeelingPhase>();
+ RunPrintAndVerify("Loops peeled", true);
+ } else {
+ Run<LoopExitEliminationPhase>();
+ RunPrintAndVerify("Loop exits eliminated", true);
+ }
+
if (FLAG_turbo_stress_loop_peeling) {
Run<StressLoopPeelingPhase>();
RunPrintAndVerify("Loop peeled");
@@ -1205,84 +1534,137 @@ Handle<Code> Pipeline::GenerateCode() {
RunPrintAndVerify("Escape Analysed");
}
- // Lower simplified operators and insert changes.
- Run<SimplifiedLoweringPhase>();
- RunPrintAndVerify("Lowered simplified");
+ if (!info()->shared_info()->asm_function() && FLAG_turbo_load_elimination) {
+ Run<LoadEliminationPhase>();
+ RunPrintAndVerify("Load eliminated");
+ }
+ }
- Run<BranchEliminationPhase>();
- RunPrintAndVerify("Branch conditions eliminated");
+ // Select representations. This has to run w/o the Typer decorator, because
+ // we cannot compute meaningful types anyways, and the computed types might
+ // even conflict with the representation/truncation logic.
+ Run<RepresentationSelectionPhase>();
+ RunPrintAndVerify("Representations selected", true);
- // Optimize control flow.
- if (FLAG_turbo_cf_optimization) {
- Run<ControlFlowOptimizationPhase>();
- RunPrintAndVerify("Control flow optimized");
- }
+#ifdef DEBUG
+ // From now on it is invalid to look at types on the nodes, because:
+ //
+ // (a) The remaining passes (might) run concurrent to the main thread and
+ // therefore must not access the Heap or the Isolate in an uncontrolled
+ // way (as done by the type system), and
+ // (b) the types on the nodes might not make sense after representation
+ // selection due to the way we handle truncations; if we'd want to look
+ // at types afterwards we'd essentially need to re-type (large portions
+ // of) the graph.
+ //
+ // In order to catch bugs related to type access after this point we remove
+ // the types from the nodes at this point (currently only in Debug builds).
+ Run<UntyperPhase>();
+ RunPrintAndVerify("Untyped", true);
+#endif
+
+ // Run generic lowering pass.
+ Run<GenericLoweringPhase>();
+ RunPrintAndVerify("Generic lowering", true);
+
+ data->EndPhaseKind();
+
+ return true;
+}
+
+bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
+ PipelineData* data = this->data_;
+
+ data->BeginPhaseKind("block building");
- // Lower changes that have been inserted before.
- Run<ChangeLoweringPhase>();
- // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
- RunPrintAndVerify("Lowered changes", true);
+ // Run early optimization pass.
+ Run<EarlyOptimizationPhase>();
+ RunPrintAndVerify("Early optimized", true);
+
+ Run<EffectControlLinearizationPhase>();
+ RunPrintAndVerify("Effect and control linearized", true);
+
+ Run<DeadCodeEliminationPhase>();
+ RunPrintAndVerify("Common operator reducer", true);
+
+ if (FLAG_turbo_store_elimination) {
+ Run<StoreStoreEliminationPhase>();
+ RunPrintAndVerify("Store-store elimination", true);
}
- // Lower any remaining generic JSOperators.
- Run<GenericLoweringPhase>();
+ // Optimize control flow.
+ if (FLAG_turbo_cf_optimization) {
+ Run<ControlFlowOptimizationPhase>();
+ RunPrintAndVerify("Control flow optimized", true);
+ }
+
+ // Optimize memory access and allocation operations.
+ Run<MemoryOptimizationPhase>();
// TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
- RunPrintAndVerify("Lowered generic", true);
+ RunPrintAndVerify("Memory optimized", true);
+
+ // Lower changes that have been inserted before.
+ Run<LateOptimizationPhase>();
+ // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
+ RunPrintAndVerify("Late optimized", true);
Run<LateGraphTrimmingPhase>();
// TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
RunPrintAndVerify("Late trimmed", true);
- BeginPhaseKind("block building");
-
- data.source_positions()->RemoveDecorator();
+ data->source_positions()->RemoveDecorator();
- // Kill the Typer and thereby uninstall the decorator (if any).
- typer.Reset(nullptr);
-
- // TODO(bmeurer): See comment on SimplifiedLowering::abort_compilation_.
- if (data.compilation_failed()) return Handle<Code>::null();
-
- return ScheduleAndGenerateCode(
- Linkage::ComputeIncoming(data.instruction_zone(), info()));
+ return ScheduleAndSelectInstructions(linkage);
}
-
Handle<Code> Pipeline::GenerateCodeForCodeStub(Isolate* isolate,
CallDescriptor* call_descriptor,
Graph* graph, Schedule* schedule,
Code::Flags flags,
const char* debug_name) {
- CompilationInfo info(debug_name, isolate, graph->zone(), flags);
+ CompilationInfo info(CStrVector(debug_name), isolate, graph->zone(), flags);
// Construct a pipeline for scheduling and code generation.
ZonePool zone_pool(isolate->allocator());
PipelineData data(&zone_pool, &info, graph, schedule);
- base::SmartPointer<PipelineStatistics> pipeline_statistics;
- if (FLAG_turbo_stats) {
- pipeline_statistics.Reset(new PipelineStatistics(&info, &zone_pool));
+ std::unique_ptr<PipelineStatistics> pipeline_statistics;
+ if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
+ pipeline_statistics.reset(new PipelineStatistics(&info, &zone_pool));
pipeline_statistics->BeginPhaseKind("stub codegen");
}
- Pipeline pipeline(&info);
- pipeline.data_ = &data;
+ PipelineImpl pipeline(&data);
DCHECK_NOT_NULL(data.schedule());
if (FLAG_trace_turbo) {
- FILE* json_file = OpenVisualizerLogFile(&info, nullptr, "json", "w+");
- if (json_file != nullptr) {
- OFStream json_of(json_file);
+ {
+ TurboJsonFile json_of(&info, std::ios_base::trunc);
json_of << "{\"function\":\"" << info.GetDebugName().get()
<< "\", \"source\":\"\",\n\"phases\":[";
- fclose(json_file);
}
pipeline.Run<PrintGraphPhase>("Machine");
}
+ pipeline.Run<VerifyGraphPhase>(false, true);
return pipeline.ScheduleAndGenerateCode(call_descriptor);
}
+// static
+Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info) {
+ ZonePool zone_pool(info->isolate()->allocator());
+ std::unique_ptr<PipelineStatistics> pipeline_statistics(
+ CreatePipelineStatistics(info, &zone_pool));
+ PipelineData data(&zone_pool, info, pipeline_statistics.get());
+ PipelineImpl pipeline(&data);
+
+ Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));
+
+ if (!pipeline.CreateGraph()) return Handle<Code>::null();
+ if (!pipeline.OptimizeGraph(&linkage)) return Handle<Code>::null();
+ return pipeline.GenerateCode(&linkage);
+}
+// static
Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
Graph* graph,
Schedule* schedule) {
@@ -1291,7 +1673,7 @@ Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
return GenerateCodeForTesting(info, call_descriptor, graph, schedule);
}
-
+// static
Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
CallDescriptor* call_descriptor,
Graph* graph,
@@ -1299,39 +1681,53 @@ Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
// Construct a pipeline for scheduling and code generation.
ZonePool zone_pool(info->isolate()->allocator());
PipelineData data(&zone_pool, info, graph, schedule);
- base::SmartPointer<PipelineStatistics> pipeline_statistics;
- if (FLAG_turbo_stats) {
- pipeline_statistics.Reset(new PipelineStatistics(info, &zone_pool));
+ std::unique_ptr<PipelineStatistics> pipeline_statistics;
+ if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
+ pipeline_statistics.reset(new PipelineStatistics(info, &zone_pool));
pipeline_statistics->BeginPhaseKind("test codegen");
}
- Pipeline pipeline(info);
- pipeline.data_ = &data;
- if (data.schedule() == nullptr) {
- // TODO(rossberg): Should this really be untyped?
- pipeline.RunPrintAndVerify("Machine", true);
+ PipelineImpl pipeline(&data);
+
+ if (FLAG_trace_turbo) {
+ TurboJsonFile json_of(info, std::ios_base::trunc);
+ json_of << "{\"function\":\"" << info->GetDebugName().get()
+ << "\", \"source\":\"\",\n\"phases\":[";
}
+ // TODO(rossberg): Should this really be untyped?
+ pipeline.RunPrintAndVerify("Machine", true);
return pipeline.ScheduleAndGenerateCode(call_descriptor);
}
+// static
+CompilationJob* Pipeline::NewCompilationJob(Handle<JSFunction> function) {
+ return new PipelineCompilationJob(function->GetIsolate(), function);
+}
+
+// static
+CompilationJob* Pipeline::NewWasmCompilationJob(
+ CompilationInfo* info, Graph* graph, CallDescriptor* descriptor,
+ SourcePositionTable* source_positions) {
+ return new PipelineWasmCompilationJob(info, graph, descriptor,
+ source_positions);
+}
bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
InstructionSequence* sequence,
bool run_verifier) {
- CompilationInfo info("testing", sequence->isolate(), sequence->zone());
+ CompilationInfo info(ArrayVector("testing"), sequence->isolate(),
+ sequence->zone());
ZonePool zone_pool(sequence->isolate()->allocator());
PipelineData data(&zone_pool, &info, sequence);
- Pipeline pipeline(&info);
- pipeline.data_ = &data;
+ PipelineImpl pipeline(&data);
pipeline.data_->InitializeFrameData(nullptr);
pipeline.AllocateRegisters(config, nullptr, run_verifier);
return !data.compilation_failed();
}
-
-Handle<Code> Pipeline::ScheduleAndGenerateCode(
- CallDescriptor* call_descriptor) {
+bool PipelineImpl::ScheduleAndSelectInstructions(Linkage* linkage) {
+ CallDescriptor* call_descriptor = linkage->GetIncomingDescriptor();
PipelineData* data = this->data_;
DCHECK_NOT_NULL(data->graph());
@@ -1339,48 +1735,47 @@ Handle<Code> Pipeline::ScheduleAndGenerateCode(
if (data->schedule() == nullptr) Run<ComputeSchedulePhase>();
TraceSchedule(data->info(), data->schedule());
- BasicBlockProfiler::Data* profiler_data = nullptr;
if (FLAG_turbo_profiling) {
- profiler_data = BasicBlockInstrumentor::Instrument(info(), data->graph(),
- data->schedule());
+ data->set_profiler_data(BasicBlockInstrumentor::Instrument(
+ info(), data->graph(), data->schedule()));
}
data->InitializeInstructionSequence(call_descriptor);
data->InitializeFrameData(call_descriptor);
// Select and schedule instructions covering the scheduled graph.
- Linkage linkage(call_descriptor);
- Run<InstructionSelectionPhase>(&linkage);
+ Run<InstructionSelectionPhase>(linkage);
if (FLAG_trace_turbo && !data->MayHaveUnverifiableGraph()) {
+ AllowHandleDereference allow_deref;
TurboCfgFile tcf(isolate());
tcf << AsC1V("CodeGen", data->schedule(), data->source_positions(),
data->sequence());
}
- std::ostringstream source_position_output;
if (FLAG_trace_turbo) {
+ std::ostringstream source_position_output;
// Output source position information before the graph is deleted.
data_->source_positions()->Print(source_position_output);
+ data_->set_source_position_output(source_position_output.str());
}
data->DeleteGraphZone();
- BeginPhaseKind("register allocation");
+ data->BeginPhaseKind("register allocation");
bool run_verifier = FLAG_turbo_verify_allocation;
// Allocate registers.
- AllocateRegisters(
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
- call_descriptor, run_verifier);
+ AllocateRegisters(RegisterConfiguration::Turbofan(), call_descriptor,
+ run_verifier);
Run<FrameElisionPhase>();
if (data->compilation_failed()) {
info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
- return Handle<Code>();
+ data->EndPhaseKind();
+ return false;
}
- BeginPhaseKind("code generation");
// TODO(mtrofin): move this off to the register allocator.
bool generate_frame_at_start =
data_->sequence()->instruction_blocks().front()->must_construct_frame();
@@ -1389,15 +1784,25 @@ Handle<Code> Pipeline::ScheduleAndGenerateCode(
Run<JumpThreadingPhase>(generate_frame_at_start);
}
+ data->EndPhaseKind();
+
+ return true;
+}
+
+Handle<Code> PipelineImpl::GenerateCode(Linkage* linkage) {
+ PipelineData* data = this->data_;
+
+ data->BeginPhaseKind("code generation");
+
// Generate final machine code.
- Run<GenerateCodePhase>(&linkage);
+ Run<GenerateCodePhase>(linkage);
Handle<Code> code = data->code();
- if (profiler_data != nullptr) {
+ if (data->profiler_data()) {
#if ENABLE_DISASSEMBLER
std::ostringstream os;
code->Disassemble(nullptr, os);
- profiler_data->SetCode(&os);
+ data->profiler_data()->SetCode(&os);
#endif
}
@@ -1405,25 +1810,21 @@ Handle<Code> Pipeline::ScheduleAndGenerateCode(
v8::internal::CodeGenerator::PrintCode(code, info());
if (FLAG_trace_turbo) {
- FILE* json_file = OpenVisualizerLogFile(info(), nullptr, "json", "a+");
- if (json_file != nullptr) {
- OFStream json_of(json_file);
- json_of
- << "{\"name\":\"disassembly\",\"type\":\"disassembly\",\"data\":\"";
+ TurboJsonFile json_of(info(), std::ios_base::app);
+ json_of << "{\"name\":\"disassembly\",\"type\":\"disassembly\",\"data\":\"";
#if ENABLE_DISASSEMBLER
- std::stringstream disassembly_stream;
- code->Disassemble(nullptr, disassembly_stream);
- std::string disassembly_string(disassembly_stream.str());
- for (const auto& c : disassembly_string) {
- json_of << AsEscapedUC16ForJSON(c);
- }
-#endif // ENABLE_DISASSEMBLER
- json_of << "\"}\n],\n";
- json_of << "\"nodePositions\":";
- json_of << source_position_output.str();
- json_of << "}";
- fclose(json_file);
+ std::stringstream disassembly_stream;
+ code->Disassemble(nullptr, disassembly_stream);
+ std::string disassembly_string(disassembly_stream.str());
+ for (const auto& c : disassembly_string) {
+ json_of << AsEscapedUC16ForJSON(c);
}
+#endif // ENABLE_DISASSEMBLER
+ json_of << "\"}\n],\n";
+ json_of << "\"nodePositions\":";
+ json_of << data->source_position_output();
+ json_of << "}";
+
OFStream os(stdout);
os << "---------------------------------------------------\n"
<< "Finished compiling method " << info()->GetDebugName().get()
@@ -1433,30 +1834,39 @@ Handle<Code> Pipeline::ScheduleAndGenerateCode(
return code;
}
+Handle<Code> PipelineImpl::ScheduleAndGenerateCode(
+ CallDescriptor* call_descriptor) {
+ Linkage linkage(call_descriptor);
-void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
- CallDescriptor* descriptor,
- bool run_verifier) {
- PipelineData* data = this->data_;
+ // Schedule the graph, perform instruction selection and register allocation.
+ if (!ScheduleAndSelectInstructions(&linkage)) return Handle<Code>();
+ // Generate the final machine code.
+ return GenerateCode(&linkage);
+}
+
+void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
+ CallDescriptor* descriptor,
+ bool run_verifier) {
+ PipelineData* data = this->data_;
// Don't track usage for this zone in compiler stats.
- base::SmartPointer<Zone> verifier_zone;
+ std::unique_ptr<Zone> verifier_zone;
RegisterAllocatorVerifier* verifier = nullptr;
if (run_verifier) {
- verifier_zone.Reset(new Zone(isolate()->allocator()));
+ verifier_zone.reset(new Zone(isolate()->allocator()));
verifier = new (verifier_zone.get()) RegisterAllocatorVerifier(
verifier_zone.get(), config, data->sequence());
}
- base::SmartArrayPointer<char> debug_name;
#ifdef DEBUG
- debug_name = info()->GetDebugName();
data_->sequence()->ValidateEdgeSplitForm();
+ data_->sequence()->ValidateDeferredBlockEntryPaths();
data_->sequence()->ValidateDeferredBlockExitPaths();
#endif
- data->InitializeRegisterAllocationData(config, descriptor, debug_name.get());
+ data->InitializeRegisterAllocationData(config, descriptor);
if (info()->is_osr()) {
+ AllowHandleDereference allow_deref;
OsrHelper osr_helper(info());
osr_helper.SetupFrame(data->frame());
}
@@ -1465,10 +1875,10 @@ void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
Run<ResolvePhisPhase>();
Run<BuildLiveRangesPhase>();
if (FLAG_trace_turbo_graph) {
+ AllowHandleDereference allow_deref;
OFStream os(stdout);
- PrintableInstructionSequence printable = {config, data->sequence()};
os << "----- Instruction sequence before register allocation -----\n"
- << printable;
+ << PrintableInstructionSequence({config, data->sequence()});
}
if (verifier != nullptr) {
CHECK(!data->register_allocation_data()->ExistsUseWithoutDefinition());
@@ -1480,13 +1890,8 @@ void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
Run<SplinterLiveRangesPhase>();
}
- if (FLAG_turbo_greedy_regalloc) {
- Run<AllocateGeneralRegistersPhase<GreedyAllocator>>();
- Run<AllocateDoubleRegistersPhase<GreedyAllocator>>();
- } else {
- Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
- Run<AllocateDoubleRegistersPhase<LinearScanAllocator>>();
- }
+ Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
+ Run<AllocateFPRegistersPhase<LinearScanAllocator>>();
if (FLAG_turbo_preprocess_ranges) {
Run<MergeSplintersPhase>();
@@ -1505,10 +1910,10 @@ void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
Run<LocateSpillSlotsPhase>();
if (FLAG_trace_turbo_graph) {
+ AllowHandleDereference allow_deref;
OFStream os(stdout);
- PrintableInstructionSequence printable = {config, data->sequence()};
os << "----- Instruction sequence after register allocation -----\n"
- << printable;
+ << PrintableInstructionSequence({config, data->sequence()});
}
if (verifier != nullptr) {
@@ -1525,7 +1930,9 @@ void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
data->DeleteRegisterAllocationZone();
}
-Isolate* Pipeline::isolate() const { return info()->isolate(); }
+CompilationInfo* PipelineImpl::info() const { return data_->info(); }
+
+Isolate* PipelineImpl::isolate() const { return info()->isolate(); }
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index edb8191862..64befbfe06 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -13,6 +13,7 @@ namespace v8 {
namespace internal {
class CompilationInfo;
+class CompilationJob;
class RegisterConfiguration;
namespace compiler {
@@ -20,16 +21,18 @@ namespace compiler {
class CallDescriptor;
class Graph;
class InstructionSequence;
-class Linkage;
-class PipelineData;
class Schedule;
+class SourcePositionTable;
-class Pipeline {
+class Pipeline : public AllStatic {
public:
- explicit Pipeline(CompilationInfo* info) : info_(info) {}
+ // Returns a new compilation job for the given function.
+ static CompilationJob* NewCompilationJob(Handle<JSFunction> function);
- // Run the entire pipeline and generate a handle to a code object.
- Handle<Code> GenerateCode();
+ // Returns a new compilation job for the WebAssembly compilation info.
+ static CompilationJob* NewWasmCompilationJob(
+ CompilationInfo* info, Graph* graph, CallDescriptor* descriptor,
+ SourcePositionTable* source_positions);
// Run the pipeline on a machine graph and generate code. The {schedule} must
// be valid, hence the given {graph} does not need to be schedulable.
@@ -39,6 +42,10 @@ class Pipeline {
Code::Flags flags,
const char* debug_name);
+ // Run the entire pipeline and generate a handle to a code object suitable for
+ // testing.
+ static Handle<Code> GenerateCodeForTesting(CompilationInfo* info);
+
// Run the pipeline on a machine graph and generate code. If {schedule} is
// {nullptr}, then compute a new schedule for code generation.
static Handle<Code> GenerateCodeForTesting(CompilationInfo* info,
@@ -58,27 +65,7 @@ class Pipeline {
Schedule* schedule = nullptr);
private:
- // Helpers for executing pipeline phases.
- template <typename Phase>
- void Run();
- template <typename Phase, typename Arg0>
- void Run(Arg0 arg_0);
- template <typename Phase, typename Arg0, typename Arg1>
- void Run(Arg0 arg_0, Arg1 arg_1);
-
- void BeginPhaseKind(const char* phase_kind);
- void RunPrintAndVerify(const char* phase, bool untyped = false);
- Handle<Code> ScheduleAndGenerateCode(CallDescriptor* call_descriptor);
- void AllocateRegisters(const RegisterConfiguration* config,
- CallDescriptor* descriptor, bool run_verifier);
-
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() const;
-
- CompilationInfo* const info_;
- PipelineData* data_;
-
- DISALLOW_COPY_AND_ASSIGN(Pipeline);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Pipeline);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/ppc/OWNERS b/deps/v8/src/compiler/ppc/OWNERS
index eb007cb908..752e8e3d81 100644
--- a/deps/v8/src/compiler/ppc/OWNERS
+++ b/deps/v8/src/compiler/ppc/OWNERS
@@ -3,3 +3,4 @@ dstence@us.ibm.com
joransiu@ca.ibm.com
mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index 6f1e5881eb..8561c2360a 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -103,7 +103,7 @@ class PPCOperandConverter final : public InstructionOperandConverter {
MemOperand ToMemOperand(InstructionOperand* op) const {
DCHECK_NOT_NULL(op);
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
return SlotToMemOperand(AllocatedOperand::cast(op)->index());
}
@@ -175,7 +175,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
value_(value),
scratch0_(scratch0),
scratch1_(scratch1),
- mode_(mode) {}
+ mode_(mode),
+ must_save_lr_(!gen->frame_access_state()->has_frame()) {}
OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
Register value, Register scratch0, Register scratch1,
@@ -215,7 +216,12 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
DCHECK_EQ(0, offset_immediate_);
__ add(scratch1_, object_, offset_);
}
- __ CallStub(&stub);
+ if (must_save_lr_ && FLAG_enable_embedded_constant_pool) {
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm());
+ __ CallStub(&stub);
+ } else {
+ __ CallStub(&stub);
+ }
if (must_save_lr_) {
// We need to save and restore lr if the frame was elided.
__ Pop(scratch1_);
@@ -259,15 +265,10 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
#if V8_TARGET_ARCH_PPC64
case kPPC_Add:
case kPPC_Sub:
- return lt;
#endif
case kPPC_AddWithOverflow32:
case kPPC_SubWithOverflow32:
-#if V8_TARGET_ARCH_PPC64
- return ne;
-#else
return lt;
-#endif
default:
break;
}
@@ -277,15 +278,10 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
#if V8_TARGET_ARCH_PPC64
case kPPC_Add:
case kPPC_Sub:
- return ge;
#endif
case kPPC_AddWithOverflow32:
case kPPC_SubWithOverflow32:
-#if V8_TARGET_ARCH_PPC64
- return eq;
-#else
return ge;
-#endif
default:
break;
}
@@ -378,17 +374,16 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
#if V8_TARGET_ARCH_PPC64
-#define ASSEMBLE_ADD_WITH_OVERFLOW32() \
- do { \
- ASSEMBLE_BINOP(add, addi); \
- __ TestIfInt32(i.OutputRegister(), r0, cr0); \
+#define ASSEMBLE_ADD_WITH_OVERFLOW32() \
+ do { \
+ ASSEMBLE_ADD_WITH_OVERFLOW(); \
+ __ extsw(kScratchReg, kScratchReg, SetRC); \
} while (0)
-
-#define ASSEMBLE_SUB_WITH_OVERFLOW32() \
- do { \
- ASSEMBLE_BINOP(sub, subi); \
- __ TestIfInt32(i.OutputRegister(), r0, cr0); \
+#define ASSEMBLE_SUB_WITH_OVERFLOW32() \
+ do { \
+ ASSEMBLE_SUB_WITH_OVERFLOW(); \
+ __ extsw(kScratchReg, kScratchReg, SetRC); \
} while (0)
#else
#define ASSEMBLE_ADD_WITH_OVERFLOW32 ASSEMBLE_ADD_WITH_OVERFLOW
@@ -446,20 +441,117 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
-
-#define ASSEMBLE_FLOAT_MAX(scratch_reg) \
- do { \
- __ fsub(scratch_reg, i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
- __ fsel(i.OutputDoubleRegister(), scratch_reg, i.InputDoubleRegister(0), \
- i.InputDoubleRegister(1)); \
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ /* TODO(bmeurer): We should really get rid of this special instruction, */ \
+ /* and generate a CallAddress instruction instead. */ \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 1, kScratchReg); \
+ __ MovToFloatParameter(i.InputDoubleRegister(0)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 0, 1); \
+ /* Move the result in the double result register. */ \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ /* TODO(bmeurer): We should really get rid of this special instruction, */ \
+ /* and generate a CallAddress instruction instead. */ \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 2, kScratchReg); \
+ __ MovToFloatParameters(i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 0, 2); \
+ /* Move the result in the double result register. */ \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
-#define ASSEMBLE_FLOAT_MIN(scratch_reg) \
+#define ASSEMBLE_FLOAT_MAX() \
do { \
- __ fsub(scratch_reg, i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
- __ fsel(i.OutputDoubleRegister(), scratch_reg, i.InputDoubleRegister(1), \
- i.InputDoubleRegister(0)); \
+ DoubleRegister left_reg = i.InputDoubleRegister(0); \
+ DoubleRegister right_reg = i.InputDoubleRegister(1); \
+ DoubleRegister result_reg = i.OutputDoubleRegister(); \
+ Label check_nan_left, check_zero, return_left, return_right, done; \
+ __ fcmpu(left_reg, right_reg); \
+ __ bunordered(&check_nan_left); \
+ __ beq(&check_zero); \
+ __ bge(&return_left); \
+ __ b(&return_right); \
+ \
+ __ bind(&check_zero); \
+ __ fcmpu(left_reg, kDoubleRegZero); \
+ /* left == right != 0. */ \
+ __ bne(&return_left); \
+ /* At this point, both left and right are either 0 or -0. */ \
+ __ fadd(result_reg, left_reg, right_reg); \
+ __ b(&done); \
+ \
+ __ bind(&check_nan_left); \
+ __ fcmpu(left_reg, left_reg); \
+ /* left == NaN. */ \
+ __ bunordered(&return_left); \
+ __ bind(&return_right); \
+ if (!right_reg.is(result_reg)) { \
+ __ fmr(result_reg, right_reg); \
+ } \
+ __ b(&done); \
+ \
+ __ bind(&return_left); \
+ if (!left_reg.is(result_reg)) { \
+ __ fmr(result_reg, left_reg); \
+ } \
+ __ bind(&done); \
+ } while (0) \
+
+
+#define ASSEMBLE_FLOAT_MIN() \
+ do { \
+ DoubleRegister left_reg = i.InputDoubleRegister(0); \
+ DoubleRegister right_reg = i.InputDoubleRegister(1); \
+ DoubleRegister result_reg = i.OutputDoubleRegister(); \
+ Label check_nan_left, check_zero, return_left, return_right, done; \
+ __ fcmpu(left_reg, right_reg); \
+ __ bunordered(&check_nan_left); \
+ __ beq(&check_zero); \
+ __ ble(&return_left); \
+ __ b(&return_right); \
+ \
+ __ bind(&check_zero); \
+ __ fcmpu(left_reg, kDoubleRegZero); \
+ /* left == right != 0. */ \
+ __ bne(&return_left); \
+ /* At this point, both left and right are either 0 or -0. */ \
+ /* Min: The algorithm is: -((-L) + (-R)), which in case of L and R being */\
+ /* different registers is most efficiently expressed as -((-L) - R). */ \
+ __ fneg(left_reg, left_reg); \
+ if (left_reg.is(right_reg)) { \
+ __ fadd(result_reg, left_reg, right_reg); \
+ } else { \
+ __ fsub(result_reg, left_reg, right_reg); \
+ } \
+ __ fneg(result_reg, result_reg); \
+ __ b(&done); \
+ \
+ __ bind(&check_nan_left); \
+ __ fcmpu(left_reg, left_reg); \
+ /* left == NaN. */ \
+ __ bunordered(&return_left); \
+ \
+ __ bind(&return_right); \
+ if (!right_reg.is(result_reg)) { \
+ __ fmr(result_reg, right_reg); \
+ } \
+ __ b(&done); \
+ \
+ __ bind(&return_left); \
+ if (!left_reg.is(result_reg)) { \
+ __ fmr(result_reg, left_reg); \
+ } \
+ __ bind(&done); \
} while (0)
@@ -536,8 +628,13 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
-
+#if V8_TARGET_ARCH_PPC64
// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define CleanUInt32(x) __ ClearLeftImm(x, x, Operand(32))
+#else
+#define CleanUInt32(x)
+#endif
+
#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, asm_instrx, width) \
do { \
DoubleRegister result = i.OutputDoubleRegister(); \
@@ -546,7 +643,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
MemOperand operand = i.MemoryOperand(&mode, index); \
DCHECK_EQ(kMode_MRR, mode); \
Register offset = operand.rb(); \
- __ extsw(offset, offset); \
if (HasRegisterInput(instr, 2)) { \
__ cmplw(offset, i.InputRegister(2)); \
} else { \
@@ -557,14 +653,13 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
if (mode == kMode_MRI) { \
__ asm_instr(result, operand); \
} else { \
+ CleanUInt32(offset); \
__ asm_instrx(result, operand); \
} \
__ bind(ool->exit()); \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
-
-// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr, asm_instrx) \
do { \
Register result = i.OutputRegister(); \
@@ -573,7 +668,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
MemOperand operand = i.MemoryOperand(&mode, index); \
DCHECK_EQ(kMode_MRR, mode); \
Register offset = operand.rb(); \
- __ extsw(offset, offset); \
if (HasRegisterInput(instr, 2)) { \
__ cmplw(offset, i.InputRegister(2)); \
} else { \
@@ -584,14 +678,13 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
if (mode == kMode_MRI) { \
__ asm_instr(result, operand); \
} else { \
+ CleanUInt32(offset); \
__ asm_instrx(result, operand); \
} \
__ bind(ool->exit()); \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
-
-// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
#define ASSEMBLE_CHECKED_STORE_FLOAT32() \
do { \
Label done; \
@@ -600,7 +693,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
MemOperand operand = i.MemoryOperand(&mode, index); \
DCHECK_EQ(kMode_MRR, mode); \
Register offset = operand.rb(); \
- __ extsw(offset, offset); \
if (HasRegisterInput(instr, 2)) { \
__ cmplw(offset, i.InputRegister(2)); \
} else { \
@@ -612,14 +704,13 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
if (mode == kMode_MRI) { \
__ stfs(kScratchDoubleReg, operand); \
} else { \
+ CleanUInt32(offset); \
__ stfsx(kScratchDoubleReg, operand); \
} \
__ bind(&done); \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
-
-// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
#define ASSEMBLE_CHECKED_STORE_DOUBLE() \
do { \
Label done; \
@@ -628,7 +719,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
MemOperand operand = i.MemoryOperand(&mode, index); \
DCHECK_EQ(kMode_MRR, mode); \
Register offset = operand.rb(); \
- __ extsw(offset, offset); \
if (HasRegisterInput(instr, 2)) { \
__ cmplw(offset, i.InputRegister(2)); \
} else { \
@@ -639,14 +729,13 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
if (mode == kMode_MRI) { \
__ stfd(value, operand); \
} else { \
+ CleanUInt32(offset); \
__ stfdx(value, operand); \
} \
__ bind(&done); \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
-
-// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr, asm_instrx) \
do { \
Label done; \
@@ -655,7 +744,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
MemOperand operand = i.MemoryOperand(&mode, index); \
DCHECK_EQ(kMode_MRR, mode); \
Register offset = operand.rb(); \
- __ extsw(offset, offset); \
if (HasRegisterInput(instr, 2)) { \
__ cmplw(offset, i.InputRegister(2)); \
} else { \
@@ -666,33 +754,50 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
if (mode == kMode_MRI) { \
__ asm_instr(value, operand); \
} else { \
+ CleanUInt32(offset); \
__ asm_instrx(value, operand); \
} \
__ bind(&done); \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
+#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr, asm_instrx) \
+ do { \
+ Label done; \
+ Register result = i.OutputRegister(); \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode); \
+ __ sync(); \
+ if (mode == kMode_MRI) { \
+ __ asm_instr(result, operand); \
+ } else { \
+ __ asm_instrx(result, operand); \
+ } \
+ __ bind(&done); \
+ __ cmp(result, result); \
+ __ bne(&done); \
+ __ isync(); \
+ } while (0)
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, asm_instrx) \
+ do { \
+ size_t index = 0; \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, &index); \
+ Register value = i.InputRegister(index); \
+ __ sync(); \
+ if (mode == kMode_MRI) { \
+ __ asm_instr(value, operand); \
+ } else { \
+ __ asm_instrx(value, operand); \
+ } \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+ } while (0)
+
void CodeGenerator::AssembleDeconstructFrame() {
__ LeaveFrame(StackFrame::MANUAL);
}
-void CodeGenerator::AssembleSetupStackPointer() {}
-
-void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
- int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
- if (sp_slot_delta > 0) {
- __ Add(sp, sp, sp_slot_delta * kPointerSize, r0);
- }
- frame_access_state()->SetFrameAccessToDefault();
-}
-
-
-void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
- int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
- if (sp_slot_delta < 0) {
- __ Add(sp, sp, sp_slot_delta * kPointerSize, r0);
- frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
- }
+void CodeGenerator::AssemblePrepareTailCall() {
if (frame_access_state()->has_frame()) {
__ RestoreFrameStateForTailCall();
}
@@ -724,8 +829,119 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
__ bind(&done);
}
+namespace {
+
+void FlushPendingPushRegisters(MacroAssembler* masm,
+ FrameAccessState* frame_access_state,
+ ZoneVector<Register>* pending_pushes) {
+ switch (pending_pushes->size()) {
+ case 0:
+ break;
+ case 1:
+ masm->Push((*pending_pushes)[0]);
+ break;
+ case 2:
+ masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
+ break;
+ case 3:
+ masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
+ (*pending_pushes)[2]);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ frame_access_state->IncreaseSPDelta(pending_pushes->size());
+ pending_pushes->resize(0);
+}
+
+void AddPendingPushRegister(MacroAssembler* masm,
+ FrameAccessState* frame_access_state,
+ ZoneVector<Register>* pending_pushes,
+ Register reg) {
+ pending_pushes->push_back(reg);
+ if (pending_pushes->size() == 3 || reg.is(ip)) {
+ FlushPendingPushRegisters(masm, frame_access_state, pending_pushes);
+ }
+}
+
+void AdjustStackPointerForTailCall(
+ MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
+ ZoneVector<Register>* pending_pushes = nullptr,
+ bool allow_shrinkage = true) {
+ int current_sp_offset = state->GetSPToFPSlotCount() +
+ StandardFrameConstants::kFixedSlotCountAboveFp;
+ int stack_slot_delta = new_slot_above_sp - current_sp_offset;
+ if (stack_slot_delta > 0) {
+ if (pending_pushes != nullptr) {
+ FlushPendingPushRegisters(masm, state, pending_pushes);
+ }
+ masm->Add(sp, sp, -stack_slot_delta * kPointerSize, r0);
+ state->IncreaseSPDelta(stack_slot_delta);
+ } else if (allow_shrinkage && stack_slot_delta < 0) {
+ if (pending_pushes != nullptr) {
+ FlushPendingPushRegisters(masm, state, pending_pushes);
+ }
+ masm->Add(sp, sp, -stack_slot_delta * kPointerSize, r0);
+ state->IncreaseSPDelta(stack_slot_delta);
+ }
+}
+
+} // namespace
+
+void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
+ int first_unused_stack_slot) {
+ CodeGenerator::PushTypeFlags flags(kImmediatePush | kScalarPush);
+ ZoneVector<MoveOperands*> pushes(zone());
+ GetPushCompatibleMoves(instr, flags, &pushes);
+
+ if (!pushes.empty() &&
+ (LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
+ first_unused_stack_slot)) {
+ PPCOperandConverter g(this, instr);
+ ZoneVector<Register> pending_pushes(zone());
+ for (auto move : pushes) {
+ LocationOperand destination_location(
+ LocationOperand::cast(move->destination()));
+ InstructionOperand source(move->source());
+ AdjustStackPointerForTailCall(
+ masm(), frame_access_state(),
+ destination_location.index() - pending_pushes.size(),
+ &pending_pushes);
+ if (source.IsStackSlot()) {
+ LocationOperand source_location(LocationOperand::cast(source));
+ __ LoadP(ip, g.SlotToMemOperand(source_location.index()));
+ AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+ ip);
+ } else if (source.IsRegister()) {
+ LocationOperand source_location(LocationOperand::cast(source));
+ AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+ source_location.GetRegister());
+ } else if (source.IsImmediate()) {
+ AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+ ip);
+ } else {
+ // Pushes of non-scalar data types is not supported.
+ UNIMPLEMENTED();
+ }
+ move->Eliminate();
+ }
+ FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
+ }
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ first_unused_stack_slot, nullptr, false);
+}
+
+void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
+ int first_unused_stack_slot) {
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ first_unused_stack_slot);
+}
+
+
// Assembles an instruction after register allocation, producing machine code.
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+ Instruction* instr) {
PPCOperandConverter i(this, instr);
ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
@@ -749,8 +965,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
- int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
- AssembleDeconstructActivationRecord(stack_param_delta);
if (opcode == kArchTailCallCodeObjectFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
@@ -769,6 +983,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
DCHECK_EQ(LeaveRC, i.OutputRCBit());
frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+ case kArchTailCallAddress: {
+ CHECK(!instr->InputAt(0)->IsImmediate());
+ __ Jump(i.InputRegister(0));
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
break;
}
case kArchCallJSFunction: {
@@ -800,8 +1022,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ cmp(cp, kScratchReg);
__ Assert(eq, kWrongFunctionContext);
}
- int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
- AssembleDeconstructActivationRecord(stack_param_delta);
if (opcode == kArchTailCallJSFunctionFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
@@ -811,6 +1031,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Jump(ip);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
break;
}
case kArchPrepareCallCFunction: {
@@ -821,8 +1042,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kArchPrepareTailCall:
- AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ AssemblePrepareTailCall();
break;
+ case kArchComment: {
+ Address comment_string = i.InputExternalReference(0).address();
+ __ RecordComment(reinterpret_cast<const char*>(comment_string));
+ break;
+ }
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
if (instr->InputAt(0)->IsImmediate()) {
@@ -848,6 +1074,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
AssembleArchTableSwitch(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
+ case kArchDebugBreak:
+ __ stop("kArchDebugBreak");
+ break;
+ case kArchImpossible:
+ __ Abort(kConversionFromImpossibleValue);
+ break;
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
@@ -858,7 +1090,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result =
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ if (result != kSuccess) return result;
break;
}
case kArchRet:
@@ -1143,6 +1377,24 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
LeaveOE, i.OutputRCBit());
break;
#endif
+
+ case kPPC_Mul32WithHigh32:
+ if (i.OutputRegister(0).is(i.InputRegister(0)) ||
+ i.OutputRegister(0).is(i.InputRegister(1)) ||
+ i.OutputRegister(1).is(i.InputRegister(0)) ||
+ i.OutputRegister(1).is(i.InputRegister(1))) {
+ __ mullw(kScratchReg,
+ i.InputRegister(0), i.InputRegister(1)); // low
+ __ mulhw(i.OutputRegister(1),
+ i.InputRegister(0), i.InputRegister(1)); // high
+ __ mr(i.OutputRegister(0), kScratchReg);
+ } else {
+ __ mullw(i.OutputRegister(0),
+ i.InputRegister(0), i.InputRegister(1)); // low
+ __ mulhw(i.OutputRegister(1),
+ i.InputRegister(0), i.InputRegister(1)); // high
+ }
+ break;
case kPPC_MulHigh32:
__ mulhw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
i.OutputRCBit());
@@ -1198,14 +1450,80 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
// and generate a CallAddress instruction instead.
ASSEMBLE_FLOAT_MODULO();
break;
+ case kIeee754Float64Acos:
+ ASSEMBLE_IEEE754_UNOP(acos);
+ break;
+ case kIeee754Float64Acosh:
+ ASSEMBLE_IEEE754_UNOP(acosh);
+ break;
+ case kIeee754Float64Asin:
+ ASSEMBLE_IEEE754_UNOP(asin);
+ break;
+ case kIeee754Float64Asinh:
+ ASSEMBLE_IEEE754_UNOP(asinh);
+ break;
+ case kIeee754Float64Atan:
+ ASSEMBLE_IEEE754_UNOP(atan);
+ break;
+ case kIeee754Float64Atan2:
+ ASSEMBLE_IEEE754_BINOP(atan2);
+ break;
+ case kIeee754Float64Atanh:
+ ASSEMBLE_IEEE754_UNOP(atanh);
+ break;
+ case kIeee754Float64Tan:
+ ASSEMBLE_IEEE754_UNOP(tan);
+ break;
+ case kIeee754Float64Tanh:
+ ASSEMBLE_IEEE754_UNOP(tanh);
+ break;
+ case kIeee754Float64Cbrt:
+ ASSEMBLE_IEEE754_UNOP(cbrt);
+ break;
+ case kIeee754Float64Sin:
+ ASSEMBLE_IEEE754_UNOP(sin);
+ break;
+ case kIeee754Float64Sinh:
+ ASSEMBLE_IEEE754_UNOP(sinh);
+ break;
+ case kIeee754Float64Cos:
+ ASSEMBLE_IEEE754_UNOP(cos);
+ break;
+ case kIeee754Float64Cosh:
+ ASSEMBLE_IEEE754_UNOP(cosh);
+ break;
+ case kIeee754Float64Exp:
+ ASSEMBLE_IEEE754_UNOP(exp);
+ break;
+ case kIeee754Float64Expm1:
+ ASSEMBLE_IEEE754_UNOP(expm1);
+ break;
+ case kIeee754Float64Log:
+ ASSEMBLE_IEEE754_UNOP(log);
+ break;
+ case kIeee754Float64Log1p:
+ ASSEMBLE_IEEE754_UNOP(log1p);
+ break;
+ case kIeee754Float64Log2:
+ ASSEMBLE_IEEE754_UNOP(log2);
+ break;
+ case kIeee754Float64Log10:
+ ASSEMBLE_IEEE754_UNOP(log10);
+ break;
+ case kIeee754Float64Pow: {
+ MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+ __ CallStub(&stub);
+ __ Move(d1, d3);
+ break;
+ }
case kPPC_Neg:
__ neg(i.OutputRegister(), i.InputRegister(0), LeaveOE, i.OutputRCBit());
break;
case kPPC_MaxDouble:
- ASSEMBLE_FLOAT_MAX(kScratchDoubleReg);
+ ASSEMBLE_FLOAT_MAX();
break;
case kPPC_MinDouble:
- ASSEMBLE_FLOAT_MIN(kScratchDoubleReg);
+ ASSEMBLE_FLOAT_MIN();
break;
case kPPC_AbsDouble:
ASSEMBLE_FLOAT_UNOP_RC(fabs, 0);
@@ -1280,8 +1598,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(SetRC, i.OutputRCBit());
break;
#endif
+ case kPPC_Float64SilenceNaN: {
+ DoubleRegister value = i.InputDoubleRegister(0);
+ DoubleRegister result = i.OutputDoubleRegister();
+ __ CanonicalizeNaN(result, value);
+ break;
+ }
case kPPC_Push:
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ stfdu(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
} else {
@@ -1292,21 +1616,36 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
case kPPC_PushFrame: {
int num_slots = i.InputInt32(1);
- if (instr->InputAt(0)->IsDoubleRegister()) {
- __ stfdu(i.InputDoubleRegister(0),
- MemOperand(sp, -num_slots * kPointerSize));
+ if (instr->InputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ StoreDoubleU(i.InputDoubleRegister(0),
+ MemOperand(sp, -num_slots * kPointerSize), r0);
+ } else {
+ DCHECK(op->representation() == MachineRepresentation::kFloat32);
+ __ StoreSingleU(i.InputDoubleRegister(0),
+ MemOperand(sp, -num_slots * kPointerSize), r0);
+ }
} else {
__ StorePU(i.InputRegister(0),
- MemOperand(sp, -num_slots * kPointerSize));
+ MemOperand(sp, -num_slots * kPointerSize), r0);
}
break;
}
case kPPC_StoreToStackSlot: {
int slot = i.InputInt32(1);
- if (instr->InputAt(0)->IsDoubleRegister()) {
- __ stfd(i.InputDoubleRegister(0), MemOperand(sp, slot * kPointerSize));
+ if (instr->InputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ StoreDouble(i.InputDoubleRegister(0),
+ MemOperand(sp, slot * kPointerSize), r0);
+ } else {
+ DCHECK(op->representation() == MachineRepresentation::kFloat32);
+ __ StoreSingle(i.InputDoubleRegister(0),
+ MemOperand(sp, slot * kPointerSize), r0);
+ }
} else {
- __ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
+ __ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize), r0);
}
break;
}
@@ -1492,6 +1831,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kPPC_LoadWordS16:
ASSEMBLE_LOAD_INTEGER(lha, lhax);
break;
+ case kPPC_LoadWordU32:
+ ASSEMBLE_LOAD_INTEGER(lwz, lwzx);
+ break;
case kPPC_LoadWordS32:
ASSEMBLE_LOAD_INTEGER(lwa, lwax);
break;
@@ -1540,7 +1882,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_CHECKED_LOAD_INTEGER(lhz, lhzx);
break;
case kCheckedLoadWord32:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lwa, lwax);
+ ASSEMBLE_CHECKED_LOAD_INTEGER(lwz, lwzx);
break;
case kCheckedLoadWord64:
#if V8_TARGET_ARCH_PPC64
@@ -1577,10 +1919,38 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kCheckedStoreFloat64:
ASSEMBLE_CHECKED_STORE_DOUBLE();
break;
+
+ case kAtomicLoadInt8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lbz, lbzx);
+ __ extsb(i.OutputRegister(), i.OutputRegister());
+ break;
+ case kAtomicLoadUint8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lbz, lbzx);
+ break;
+ case kAtomicLoadInt16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lha, lhax);
+ break;
+ case kAtomicLoadUint16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lhz, lhzx);
+ break;
+ case kAtomicLoadWord32:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(lwz, lwzx);
+ break;
+
+ case kAtomicStoreWord8:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(stb, stbx);
+ break;
+ case kAtomicStoreWord16:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(sth, sthx);
+ break;
+ case kAtomicStoreWord32:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(stw, stwx);
+ break;
default:
UNREACHABLE();
break;
}
+ return kSuccess;
} // NOLINT(readability/fn_size)
@@ -1676,7 +2046,7 @@ void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
PPCOperandConverter i(this, instr);
Register input = i.InputRegister(0);
for (size_t index = 2; index < instr->InputCount(); index += 2) {
- __ Cmpi(input, Operand(i.InputInt32(index + 0)), r0);
+ __ Cmpwi(input, Operand(i.InputInt32(index + 0)), r0);
__ beq(GetLabel(i.InputRpo(index + 1)));
}
AssembleArchJump(i.InputRpo(1));
@@ -1700,19 +2070,48 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
__ Jump(kScratchReg);
}
-
-void CodeGenerator::AssembleDeoptimizerCall(
+CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
// TODO(turbofan): We should be able to generate better code by sharing the
// actual final call site and just bl'ing to it here, similar to what we do
// in the lithium backend.
+ if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
+ DeoptimizeReason deoptimization_reason =
+ GetDeoptimizationReason(deoptimization_id);
+ __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ return kSuccess;
}
+void CodeGenerator::FinishFrame(Frame* frame) {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ const RegList double_saves = descriptor->CalleeSavedFPRegisters();
-void CodeGenerator::AssemblePrologue() {
+ // Save callee-saved Double registers.
+ if (double_saves != 0) {
+ frame->AlignSavedCalleeRegisterSlots();
+ DCHECK(kNumCalleeSavedDoubles ==
+ base::bits::CountPopulation32(double_saves));
+ frame->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
+ (kDoubleSize / kPointerSize));
+ }
+ // Save callee-saved registers.
+ const RegList saves =
+ FLAG_enable_embedded_constant_pool
+ ? descriptor->CalleeSavedRegisters() & ~kConstantPoolRegister.bit()
+ : descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ // register save area does not include the fp or constant pool pointer.
+ const int num_saves =
+ kNumCalleeSaved - 1 - (FLAG_enable_embedded_constant_pool ? 1 : 0);
+ DCHECK(num_saves == base::bits::CountPopulation32(saves));
+ frame->AllocateSavedCalleeRegisterSlots(num_saves);
+ }
+}
+
+void CodeGenerator::AssembleConstructFrame() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (descriptor->IsCFunctionCall()) {
@@ -1736,7 +2135,7 @@ void CodeGenerator::AssemblePrologue() {
}
}
- int stack_shrink_slots = frame()->GetSpillSlotCount();
+ int shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1747,15 +2146,12 @@ void CodeGenerator::AssemblePrologue() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
const RegList double_saves = descriptor->CalleeSavedFPRegisters();
- if (double_saves != 0) {
- stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
- }
- if (stack_shrink_slots > 0) {
- __ Add(sp, sp, -stack_shrink_slots * kPointerSize, r0);
+ if (shrink_slots > 0) {
+ __ Add(sp, sp, -shrink_slots * kPointerSize, r0);
}
// Save callee-saved Double registers.
@@ -1763,8 +2159,6 @@ void CodeGenerator::AssemblePrologue() {
__ MultiPushDoubles(double_saves);
DCHECK(kNumCalleeSavedDoubles ==
base::bits::CountPopulation32(double_saves));
- frame()->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
- (kDoubleSize / kPointerSize));
}
// Save callee-saved registers.
@@ -1775,10 +2169,6 @@ void CodeGenerator::AssemblePrologue() {
if (saves != 0) {
__ MultiPush(saves);
// register save area does not include the fp or constant pool pointer.
- const int num_saves =
- kNumCalleeSaved - 1 - (FLAG_enable_embedded_constant_pool ? 1 : 0);
- DCHECK(num_saves == base::bits::CountPopulation32(saves));
- frame()->AllocateSavedCalleeRegisterSlots(num_saves);
}
}
@@ -1848,10 +2238,30 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
switch (src.type()) {
case Constant::kInt32:
- __ mov(dst, Operand(src.ToInt32()));
+#if V8_TARGET_ARCH_PPC64
+ if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+#else
+ if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
+ src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+#endif
+ __ mov(dst, Operand(src.ToInt32(), src.rmode()));
+ } else {
+ __ mov(dst, Operand(src.ToInt32()));
+ }
break;
case Constant::kInt64:
- __ mov(dst, Operand(src.ToInt64()));
+#if V8_TARGET_ARCH_PPC64
+ if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+ __ mov(dst, Operand(src.ToInt64(), src.rmode()));
+ } else {
+ DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+#endif
+ __ mov(dst, Operand(src.ToInt64()));
+#if V8_TARGET_ARCH_PPC64
+ }
+#endif
break;
case Constant::kFloat32:
__ Move(dst,
@@ -1885,34 +2295,50 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ StoreP(dst, g.ToMemOperand(destination), r0);
}
} else {
- DoubleRegister dst = destination->IsDoubleRegister()
+ DoubleRegister dst = destination->IsFPRegister()
? g.ToDoubleRegister(destination)
: kScratchDoubleReg;
double value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
: src.ToFloat64();
__ LoadDoubleLiteral(dst, value, kScratchReg);
- if (destination->IsDoubleStackSlot()) {
+ if (destination->IsFPStackSlot()) {
__ StoreDouble(dst, g.ToMemOperand(destination), r0);
}
}
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
DoubleRegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
DoubleRegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
} else {
- DCHECK(destination->IsDoubleStackSlot());
- __ StoreDouble(src, g.ToMemOperand(destination), r0);
+ DCHECK(destination->IsFPStackSlot());
+ LocationOperand* op = LocationOperand::cast(source);
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ StoreDouble(src, g.ToMemOperand(destination), r0);
+ } else {
+ __ StoreSingle(src, g.ToMemOperand(destination), r0);
+ }
}
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
MemOperand src = g.ToMemOperand(source);
- if (destination->IsDoubleRegister()) {
- __ LoadDouble(g.ToDoubleRegister(destination), src, r0);
+ if (destination->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(source);
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ LoadDouble(g.ToDoubleRegister(destination), src, r0);
+ } else {
+ __ LoadSingle(g.ToDoubleRegister(destination), src, r0);
+ }
} else {
+ LocationOperand* op = LocationOperand::cast(source);
DoubleRegister temp = kScratchDoubleReg;
- __ LoadDouble(temp, src, r0);
- __ StoreDouble(temp, g.ToMemOperand(destination), r0);
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ LoadDouble(temp, src, r0);
+ __ StoreDouble(temp, g.ToMemOperand(destination), r0);
+ } else {
+ __ LoadSingle(temp, src, r0);
+ __ StoreSingle(temp, g.ToMemOperand(destination), r0);
+ }
}
} else {
UNREACHABLE();
@@ -1942,7 +2368,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ StoreP(temp, dst);
}
#if V8_TARGET_ARCH_PPC64
- } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
+ } else if (source->IsStackSlot() || source->IsFPStackSlot()) {
#else
} else if (source->IsStackSlot()) {
DCHECK(destination->IsStackSlot());
@@ -1955,24 +2381,24 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ LoadP(temp_1, dst);
__ StoreP(temp_0, dst);
__ StoreP(temp_1, src);
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
DoubleRegister temp = kScratchDoubleReg;
DoubleRegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
DoubleRegister dst = g.ToDoubleRegister(destination);
__ fmr(temp, src);
__ fmr(src, dst);
__ fmr(dst, temp);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ fmr(temp, src);
__ lfd(src, dst);
__ stfd(temp, dst);
}
#if !V8_TARGET_ARCH_PPC64
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPStackSlot());
DoubleRegister temp_0 = kScratchDoubleReg;
DoubleRegister temp_1 = d0;
MemOperand src = g.ToMemOperand(source);
@@ -1996,11 +2422,6 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
}
-void CodeGenerator::AddNopForSmiCodeInlining() {
- // We do not insert nops for inlined Smi code.
-}
-
-
void CodeGenerator::EnsureSpaceForLazyDeopt() {
if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
return;
diff --git a/deps/v8/src/compiler/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
index 66c2e9980b..9198bcb00c 100644
--- a/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
+++ b/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
@@ -42,6 +42,7 @@ namespace compiler {
V(PPC_SubPair) \
V(PPC_SubDouble) \
V(PPC_Mul32) \
+ V(PPC_Mul32WithHigh32) \
V(PPC_Mul64) \
V(PPC_MulHigh32) \
V(PPC_MulHighU32) \
@@ -93,6 +94,7 @@ namespace compiler {
V(PPC_Uint32ToFloat32) \
V(PPC_Uint32ToDouble) \
V(PPC_Float32ToDouble) \
+ V(PPC_Float64SilenceNaN) \
V(PPC_DoubleToInt32) \
V(PPC_DoubleToUint32) \
V(PPC_DoubleToInt64) \
@@ -112,6 +114,7 @@ namespace compiler {
V(PPC_LoadWordS16) \
V(PPC_LoadWordU16) \
V(PPC_LoadWordS32) \
+ V(PPC_LoadWordU32) \
V(PPC_LoadWord64) \
V(PPC_LoadFloat32) \
V(PPC_LoadDouble) \
diff --git a/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc b/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
index e7d7719f5b..dee84943fa 100644
--- a/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
@@ -44,6 +44,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_SubPair:
case kPPC_SubDouble:
case kPPC_Mul32:
+ case kPPC_Mul32WithHigh32:
case kPPC_Mul64:
case kPPC_MulHigh32:
case kPPC_MulHighU32:
@@ -92,6 +93,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_Uint32ToFloat32:
case kPPC_Uint32ToDouble:
case kPPC_Float32ToDouble:
+ case kPPC_Float64SilenceNaN:
case kPPC_DoubleToInt32:
case kPPC_DoubleToUint32:
case kPPC_DoubleToInt64:
@@ -113,6 +115,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_LoadWordS16:
case kPPC_LoadWordU16:
case kPPC_LoadWordS32:
+ case kPPC_LoadWordU32:
case kPPC_LoadWord64:
case kPPC_LoadFloat32:
case kPPC_LoadDouble:
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index 5abb5f1476..bad8ded131 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -134,7 +134,14 @@ void VisitBinop(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.Label(cont->false_block());
}
- outputs[output_count++] = g.DefineAsRegister(node);
+ if (cont->IsDeoptimize()) {
+ // If we can deoptimize as a result of the binop, we need to make sure that
+ // the deopt inputs are not overwritten by the binop result. One way
+ // to achieve that is to declare the output register as same-as-first.
+ outputs[output_count++] = g.DefineSameAsFirst(node);
+ } else {
+ outputs[output_count++] = g.DefineAsRegister(node);
+ }
if (cont->IsSet()) {
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
@@ -147,7 +154,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->frame_state());
+ cont->reason(), cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -187,16 +194,16 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode = load_rep.IsSigned() ? kPPC_LoadWordS16 : kPPC_LoadWordU16;
break;
#if !V8_TARGET_ARCH_PPC64
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
#endif
case MachineRepresentation::kWord32:
- opcode = kPPC_LoadWordS32;
-#if V8_TARGET_ARCH_PPC64
- // TODO(mbrandy): this applies to signed loads only (lwa)
- mode = kInt16Imm_4ByteAligned;
-#endif
+ opcode = kPPC_LoadWordU32;
break;
#if V8_TARGET_ARCH_PPC64
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
opcode = kPPC_LoadWord64;
@@ -292,12 +299,16 @@ void InstructionSelector::VisitStore(Node* node) {
opcode = kPPC_StoreWord16;
break;
#if !V8_TARGET_ARCH_PPC64
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
#endif
case MachineRepresentation::kWord32:
opcode = kPPC_StoreWord32;
break;
#if V8_TARGET_ARCH_PPC64
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
opcode = kPPC_StoreWord64;
@@ -324,6 +335,11 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
+// Architecture supports unaligned access, therefore VisitLoad is used instead
+void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
+
+// Architecture supports unaligned access, therefore VisitStore is used instead
+void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitCheckedLoad(Node* node) {
CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
@@ -354,6 +370,8 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
opcode = kCheckedLoadFloat64;
break;
case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
#if !V8_TARGET_ARCH_PPC64
case MachineRepresentation::kWord64: // Fall through.
@@ -400,6 +418,8 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
opcode = kCheckedStoreFloat64;
break;
case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
#if !V8_TARGET_ARCH_PPC64
case MachineRepresentation::kWord64: // Fall through.
@@ -869,7 +889,8 @@ void InstructionSelector::VisitWord64Sar(Node* node) {
m.right().Is(32)) {
// Just load and sign-extend the interesting 4 bytes instead. This happens,
// for example, when we're loading and untagging SMIs.
- BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(), true);
+ BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(),
+ AddressOption::kAllowAll);
if (mleft.matches() && mleft.index() == nullptr) {
int64_t offset = 0;
Node* displacement = mleft.displacement();
@@ -951,6 +972,9 @@ void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
#endif
+void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitInt32Add(Node* node) {
VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add, kInt16Imm);
@@ -986,6 +1010,36 @@ void InstructionSelector::VisitInt64Sub(Node* node) {
}
#endif
+namespace {
+
+void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+ InstructionOperand left, InstructionOperand right,
+ FlagsContinuation* cont);
+void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ PPCOperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand result_operand = g.DefineAsRegister(node);
+ InstructionOperand high32_operand = g.TempRegister();
+ InstructionOperand temp_operand = g.TempRegister();
+ {
+ InstructionOperand outputs[] = {result_operand, high32_operand};
+ InstructionOperand inputs[] = {g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node())};
+ selector->Emit(kPPC_Mul32WithHigh32, 2, outputs, 2, inputs);
+ }
+ {
+ InstructionOperand shift_31 = g.UseImmediate(31);
+ InstructionOperand outputs[] = {temp_operand};
+ InstructionOperand inputs[] = {result_operand, shift_31};
+ selector->Emit(kPPC_ShiftRightAlg32, 1, outputs, 2, inputs);
+ }
+
+ VisitCompare(selector, kPPC_Cmp32, high32_operand, temp_operand, cont);
+}
+
+} // namespace
+
void InstructionSelector::VisitInt32Mul(Node* node) {
VisitRRR(this, kPPC_Mul32, node);
@@ -1137,15 +1191,12 @@ void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
VisitRR(this, kPPC_DoubleToFloat32, node);
}
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+ VisitRR(this, kArchTruncateDoubleToI, node);
+}
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
- switch (TruncationModeOf(node->op())) {
- case TruncationMode::kJavaScript:
- return VisitRR(this, kArchTruncateDoubleToI, node);
- case TruncationMode::kRoundToZero:
- return VisitRR(this, kPPC_DoubleToInt32, node);
- }
- UNREACHABLE();
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+ VisitRR(this, kPPC_DoubleToInt32, node);
}
@@ -1223,43 +1274,14 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
void InstructionSelector::VisitFloat32Sub(Node* node) {
- PPCOperandGenerator g(this);
- Float32BinopMatcher m(node);
- if (m.left().IsMinusZero()) {
- Emit(kPPC_NegDouble | MiscField::encode(1), g.DefineAsRegister(node),
- g.UseRegister(m.right().node()));
- return;
- }
VisitRRR(this, kPPC_SubDouble | MiscField::encode(1), node);
}
-
void InstructionSelector::VisitFloat64Sub(Node* node) {
// TODO(mbrandy): detect multiply-subtract
- PPCOperandGenerator g(this);
- Float64BinopMatcher m(node);
- if (m.left().IsMinusZero()) {
- if (m.right().IsFloat64RoundDown() &&
- CanCover(m.node(), m.right().node())) {
- if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
- CanCover(m.right().node(), m.right().InputAt(0))) {
- Float64BinopMatcher mright0(m.right().InputAt(0));
- if (mright0.left().IsMinusZero()) {
- // -floor(-x) = ceil(x)
- Emit(kPPC_CeilDouble, g.DefineAsRegister(node),
- g.UseRegister(mright0.right().node()));
- return;
- }
- }
- }
- Emit(kPPC_NegDouble, g.DefineAsRegister(node),
- g.UseRegister(m.right().node()));
- return;
- }
VisitRRR(this, kPPC_SubDouble, node);
}
-
void InstructionSelector::VisitFloat32Mul(Node* node) {
VisitRRR(this, kPPC_MulDouble | MiscField::encode(1), node);
}
@@ -1288,17 +1310,26 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
g.UseFixed(node->InputAt(1), d2))->MarkAsCall();
}
+void InstructionSelector::VisitFloat32Max(Node* node) {
+ VisitRRR(this, kPPC_MaxDouble | MiscField::encode(1), node);
+}
-void InstructionSelector::VisitFloat32Max(Node* node) { UNREACHABLE(); }
-
-
-void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ VisitRRR(this, kPPC_MaxDouble, node);
+}
-void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+ VisitRR(this, kPPC_Float64SilenceNaN, node);
+}
+void InstructionSelector::VisitFloat32Min(Node* node) {
+ VisitRRR(this, kPPC_MinDouble | MiscField::encode(1), node);
+}
-void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ VisitRRR(this, kPPC_MinDouble, node);
+}
void InstructionSelector::VisitFloat32Abs(Node* node) {
@@ -1310,11 +1341,24 @@ void InstructionSelector::VisitFloat64Abs(Node* node) {
VisitRR(this, kPPC_AbsDouble, node);
}
-
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitRR(this, kPPC_SqrtDouble | MiscField::encode(1), node);
}
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+ InstructionCode opcode) {
+ PPCOperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, d1), g.UseFixed(node->InputAt(0), d1))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+ InstructionCode opcode) {
+ PPCOperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, d1),
+ g.UseFixed(node->InputAt(0), d1),
+ g.UseFixed(node->InputAt(1), d2))->MarkAsCall();
+}
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
VisitRR(this, kPPC_SqrtDouble, node);
@@ -1365,6 +1409,13 @@ void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
UNREACHABLE();
}
+void InstructionSelector::VisitFloat32Neg(Node* node) {
+ VisitRR(this, kPPC_NegDouble, node);
+}
+
+void InstructionSelector::VisitFloat64Neg(Node* node) {
+ VisitRR(this, kPPC_NegDouble, node);
+}
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
@@ -1441,7 +1492,7 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+ selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
} else {
DCHECK(cont->IsSet());
@@ -1599,6 +1650,9 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
return VisitBinop<Int32BinopMatcher>(selector, node,
kPPC_SubWithOverflow32,
kInt16Imm_Negate, cont);
+ case IrOpcode::kInt32MulWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kNotEqual);
+ return EmitInt32MulWithOverflow(selector, node, cont);
#if V8_TARGET_ARCH_PPC64
case IrOpcode::kInt64AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
@@ -1681,14 +1735,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
}
@@ -1793,6 +1847,15 @@ void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
}
#endif
+void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf);
+ return EmitInt32MulWithOverflow(this, node, &cont);
+ }
+ FlagsContinuation cont;
+ EmitInt32MulWithOverflow(this, node, &cont);
+}
+
void InstructionSelector::VisitFloat32Equal(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
@@ -1840,7 +1903,7 @@ void InstructionSelector::EmitPrepareArguments(
// Prepare for C function call.
if (descriptor->IsCFunctionCall()) {
Emit(kArchPrepareCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
0, nullptr, 0, nullptr);
// Poke any stack arguments.
@@ -1921,6 +1984,60 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(left), g.UseRegister(right));
}
+void InstructionSelector::VisitAtomicLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ PPCOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kAtomicLoadWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+}
+
+void InstructionSelector::VisitAtomicStore(Node* node) {
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ PPCOperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kAtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kAtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kAtomicStoreWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ 0, nullptr, input_count, inputs);
+}
// static
MachineOperatorBuilder::Flags
@@ -1937,6 +2054,13 @@ InstructionSelector::SupportedMachineOperatorFlags() {
// We omit kWord32ShiftIsSafe as s[rl]w use 0x3f as a mask rather than 0x1f.
}
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+ return MachineOperatorBuilder::AlignmentRequirements::
+ FullUnalignedAccessSupport();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index 728d79af5b..ae40f55c12 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -13,14 +13,14 @@ namespace v8 {
namespace internal {
namespace compiler {
-RawMachineAssembler::RawMachineAssembler(Isolate* isolate, Graph* graph,
- CallDescriptor* call_descriptor,
- MachineRepresentation word,
- MachineOperatorBuilder::Flags flags)
+RawMachineAssembler::RawMachineAssembler(
+ Isolate* isolate, Graph* graph, CallDescriptor* call_descriptor,
+ MachineRepresentation word, MachineOperatorBuilder::Flags flags,
+ MachineOperatorBuilder::AlignmentRequirements alignment_requirements)
: isolate_(isolate),
graph_(graph),
schedule_(new (zone()) Schedule(zone())),
- machine_(zone(), word, flags),
+ machine_(zone(), word, flags, alignment_requirements),
common_(zone()),
call_descriptor_(call_descriptor),
parameters_(parameter_count(), zone()),
@@ -35,6 +35,12 @@ RawMachineAssembler::RawMachineAssembler(Isolate* isolate, Graph* graph,
graph->SetEnd(graph->NewNode(common_.End(0)));
}
+Node* RawMachineAssembler::RelocatableIntPtrConstant(intptr_t value,
+ RelocInfo::Mode rmode) {
+ return kPointerSize == 8
+ ? RelocatableInt64Constant(value, rmode)
+ : RelocatableInt32Constant(static_cast<int>(value), rmode);
+}
Schedule* RawMachineAssembler::Export() {
// Compute the correct codegen order.
@@ -44,7 +50,7 @@ Schedule* RawMachineAssembler::Export() {
PrintF("--- RAW SCHEDULE -------------------------------------------\n");
os << *schedule_;
}
- schedule_->EnsureSplitEdgeForm();
+ schedule_->EnsureCFGWellFormedness();
schedule_->PropagateDeferredMark();
if (FLAG_trace_turbo_scheduler) {
PrintF("--- EDGE SPLIT AND PROPAGATED DEFERRED SCHEDULE ------------\n");
@@ -79,9 +85,16 @@ void RawMachineAssembler::Branch(Node* condition, RawMachineLabel* true_val,
current_block_ = nullptr;
}
+void RawMachineAssembler::Continuations(Node* call, RawMachineLabel* if_success,
+ RawMachineLabel* if_exception) {
+ DCHECK_NOT_NULL(schedule_);
+ DCHECK_NOT_NULL(current_block_);
+ schedule()->AddCall(CurrentBlock(), call, Use(if_success), Use(if_exception));
+ current_block_ = nullptr;
+}
void RawMachineAssembler::Switch(Node* index, RawMachineLabel* default_label,
- int32_t* case_values,
+ const int32_t* case_values,
RawMachineLabel** case_labels,
size_t case_count) {
DCHECK_NE(schedule()->end(), current_block_);
@@ -106,10 +119,8 @@ void RawMachineAssembler::Switch(Node* index, RawMachineLabel* default_label,
current_block_ = nullptr;
}
-
void RawMachineAssembler::Return(Node* value) {
Node* ret = MakeNode(common()->Return(), 1, &value);
- NodeProperties::MergeControlToEnd(graph(), common(), ret);
schedule()->AddReturn(CurrentBlock(), ret);
current_block_ = nullptr;
}
@@ -118,7 +129,6 @@ void RawMachineAssembler::Return(Node* value) {
void RawMachineAssembler::Return(Node* v1, Node* v2) {
Node* values[] = {v1, v2};
Node* ret = MakeNode(common()->Return(2), 2, values);
- NodeProperties::MergeControlToEnd(graph(), common(), ret);
schedule()->AddReturn(CurrentBlock(), ret);
current_block_ = nullptr;
}
@@ -127,16 +137,19 @@ void RawMachineAssembler::Return(Node* v1, Node* v2) {
void RawMachineAssembler::Return(Node* v1, Node* v2, Node* v3) {
Node* values[] = {v1, v2, v3};
Node* ret = MakeNode(common()->Return(3), 3, values);
- NodeProperties::MergeControlToEnd(graph(), common(), ret);
schedule()->AddReturn(CurrentBlock(), ret);
current_block_ = nullptr;
}
+void RawMachineAssembler::DebugBreak() { AddNode(machine()->DebugBreak()); }
+
+void RawMachineAssembler::Comment(const char* msg) {
+ AddNode(machine()->Comment(msg));
+}
Node* RawMachineAssembler::CallN(CallDescriptor* desc, Node* function,
Node** args) {
- int param_count =
- static_cast<int>(desc->GetMachineSignature()->parameter_count());
+ int param_count = static_cast<int>(desc->ParameterCount());
int input_count = param_count + 1;
Node** buffer = zone()->NewArray<Node*>(input_count);
int index = 0;
@@ -152,8 +165,7 @@ Node* RawMachineAssembler::CallNWithFrameState(CallDescriptor* desc,
Node* function, Node** args,
Node* frame_state) {
DCHECK(desc->NeedsFrameState());
- int param_count =
- static_cast<int>(desc->GetMachineSignature()->parameter_count());
+ int param_count = static_cast<int>(desc->ParameterCount());
int input_count = param_count + 2;
Node** buffer = zone()->NewArray<Node*>(input_count);
int index = 0;
@@ -244,8 +256,7 @@ Node* RawMachineAssembler::CallRuntime4(Runtime::FunctionId function,
Node* RawMachineAssembler::TailCallN(CallDescriptor* desc, Node* function,
Node** args) {
- int param_count =
- static_cast<int>(desc->GetMachineSignature()->parameter_count());
+ int param_count = static_cast<int>(desc->ParameterCount());
int input_count = param_count + 1;
Node** buffer = zone()->NewArray<Node*>(input_count);
int index = 0;
@@ -254,7 +265,6 @@ Node* RawMachineAssembler::TailCallN(CallDescriptor* desc, Node* function,
buffer[index++] = args[i];
}
Node* tail_call = MakeNode(common()->TailCall(desc), input_count, buffer);
- NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
schedule()->AddTailCall(CurrentBlock(), tail_call);
current_block_ = nullptr;
return tail_call;
@@ -276,7 +286,6 @@ Node* RawMachineAssembler::TailCallRuntime0(Runtime::FunctionId function,
Node* nodes[] = {centry, ref, arity, context};
Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
- NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
schedule()->AddTailCall(CurrentBlock(), tail_call);
current_block_ = nullptr;
return tail_call;
@@ -298,7 +307,6 @@ Node* RawMachineAssembler::TailCallRuntime1(Runtime::FunctionId function,
Node* nodes[] = {centry, arg1, ref, arity, context};
Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
- NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
schedule()->AddTailCall(CurrentBlock(), tail_call);
current_block_ = nullptr;
return tail_call;
@@ -322,7 +330,6 @@ Node* RawMachineAssembler::TailCallRuntime2(Runtime::FunctionId function,
Node* nodes[] = {centry, arg1, arg2, ref, arity, context};
Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
- NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
schedule()->AddTailCall(CurrentBlock(), tail_call);
current_block_ = nullptr;
return tail_call;
@@ -345,7 +352,6 @@ Node* RawMachineAssembler::TailCallRuntime3(Runtime::FunctionId function,
Node* nodes[] = {centry, arg1, arg2, arg3, ref, arity, context};
Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
- NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
schedule()->AddTailCall(CurrentBlock(), tail_call);
current_block_ = nullptr;
return tail_call;
@@ -368,7 +374,29 @@ Node* RawMachineAssembler::TailCallRuntime4(Runtime::FunctionId function,
Node* nodes[] = {centry, arg1, arg2, arg3, arg4, ref, arity, context};
Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
- NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
+ schedule()->AddTailCall(CurrentBlock(), tail_call);
+ current_block_ = nullptr;
+ return tail_call;
+}
+
+Node* RawMachineAssembler::TailCallRuntime5(Runtime::FunctionId function,
+ Node* arg1, Node* arg2, Node* arg3,
+ Node* arg4, Node* arg5,
+ Node* context) {
+ const int kArity = 5;
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ zone(), function, kArity, Operator::kNoProperties,
+ CallDescriptor::kSupportsTailCalls);
+ int return_count = static_cast<int>(desc->ReturnCount());
+
+ Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+ Node* ref = AddNode(
+ common()->ExternalConstant(ExternalReference(function, isolate())));
+ Node* arity = Int32Constant(kArity);
+
+ Node* nodes[] = {centry, arg1, arg2, arg3, arg4, arg5, ref, arity, context};
+ Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
+
schedule()->AddTailCall(CurrentBlock(), tail_call);
current_block_ = nullptr;
return tail_call;
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index f3445aceea..c7d42369b9 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -40,7 +40,10 @@ class RawMachineAssembler {
Isolate* isolate, Graph* graph, CallDescriptor* call_descriptor,
MachineRepresentation word = MachineType::PointerRepresentation(),
MachineOperatorBuilder::Flags flags =
- MachineOperatorBuilder::Flag::kNoFlags);
+ MachineOperatorBuilder::Flag::kNoFlags,
+ MachineOperatorBuilder::AlignmentRequirements alignment_requirements =
+ MachineOperatorBuilder::AlignmentRequirements::
+ FullUnalignedAccessSupport());
~RawMachineAssembler() {}
Isolate* isolate() const { return isolate_; }
@@ -76,6 +79,7 @@ class RawMachineAssembler {
return kPointerSize == 8 ? Int64Constant(value)
: Int32Constant(static_cast<int>(value));
}
+ Node* RelocatableIntPtrConstant(intptr_t value, RelocInfo::Mode rmode);
Node* Int32Constant(int32_t value) {
return AddNode(common()->Int32Constant(value));
}
@@ -104,6 +108,12 @@ class RawMachineAssembler {
Node* ExternalConstant(ExternalReference address) {
return AddNode(common()->ExternalConstant(address));
}
+ Node* RelocatableInt32Constant(int32_t value, RelocInfo::Mode rmode) {
+ return AddNode(common()->RelocatableInt32Constant(value, rmode));
+ }
+ Node* RelocatableInt64Constant(int64_t value, RelocInfo::Mode rmode) {
+ return AddNode(common()->RelocatableInt64Constant(value, rmode));
+ }
Node* Projection(int index, Node* a) {
return AddNode(common()->Projection(index), a);
@@ -126,6 +136,43 @@ class RawMachineAssembler {
base, index, value);
}
+ // Unaligned memory operations
+ Node* UnalignedLoad(MachineType rep, Node* base) {
+ return UnalignedLoad(rep, base, IntPtrConstant(0));
+ }
+ Node* UnalignedLoad(MachineType rep, Node* base, Node* index) {
+ if (machine()->UnalignedLoadSupported(rep, 1)) {
+ return AddNode(machine()->Load(rep), base, index);
+ } else {
+ return AddNode(machine()->UnalignedLoad(rep), base, index);
+ }
+ }
+ Node* UnalignedStore(MachineRepresentation rep, Node* base, Node* value) {
+ return UnalignedStore(rep, base, IntPtrConstant(0), value);
+ }
+ Node* UnalignedStore(MachineRepresentation rep, Node* base, Node* index,
+ Node* value) {
+ MachineType t = MachineType::TypeForRepresentation(rep);
+ if (machine()->UnalignedStoreSupported(t, 1)) {
+ return AddNode(machine()->Store(StoreRepresentation(
+ rep, WriteBarrierKind::kNoWriteBarrier)),
+ base, index, value);
+ } else {
+ return AddNode(
+ machine()->UnalignedStore(UnalignedStoreRepresentation(rep)), base,
+ index, value);
+ }
+ }
+
+ // Atomic memory operations.
+ Node* AtomicLoad(MachineType rep, Node* base, Node* index) {
+ return AddNode(machine()->AtomicLoad(rep), base, index);
+ }
+ Node* AtomicStore(MachineRepresentation rep, Node* base, Node* index,
+ Node* value) {
+ return AddNode(machine()->AtomicStore(rep), base, index, value);
+ }
+
// Arithmetic Operations.
Node* WordAnd(Node* a, Node* b) {
return AddNode(machine()->WordAnd(), a, b);
@@ -239,6 +286,9 @@ class RawMachineAssembler {
Node* Int32MulHigh(Node* a, Node* b) {
return AddNode(machine()->Int32MulHigh(), a, b);
}
+ Node* Int32MulWithOverflow(Node* a, Node* b) {
+ return AddNode(machine()->Int32MulWithOverflow(), a, b);
+ }
Node* Int32Div(Node* a, Node* b) {
return AddNode(machine()->Int32Div(), a, b);
}
@@ -353,6 +403,8 @@ class RawMachineAssembler {
INTPTR_BINOP(Int, AddWithOverflow);
INTPTR_BINOP(Int, Sub);
INTPTR_BINOP(Int, SubWithOverflow);
+ INTPTR_BINOP(Int, Mul);
+ INTPTR_BINOP(Int, Div);
INTPTR_BINOP(Int, LessThan);
INTPTR_BINOP(Int, LessThanOrEqual);
INTPTR_BINOP(Word, Equal);
@@ -387,14 +439,8 @@ class RawMachineAssembler {
Node* Float32Div(Node* a, Node* b) {
return AddNode(machine()->Float32Div(), a, b);
}
- Node* Float32Max(Node* a, Node* b) {
- return AddNode(machine()->Float32Max().op(), a, b);
- }
- Node* Float32Min(Node* a, Node* b) {
- return AddNode(machine()->Float32Min().op(), a, b);
- }
Node* Float32Abs(Node* a) { return AddNode(machine()->Float32Abs(), a); }
- Node* Float32Neg(Node* a) { return Float32Sub(Float32Constant(-0.0f), a); }
+ Node* Float32Neg(Node* a) { return AddNode(machine()->Float32Neg(), a); }
Node* Float32Sqrt(Node* a) { return AddNode(machine()->Float32Sqrt(), a); }
Node* Float32Equal(Node* a, Node* b) {
return AddNode(machine()->Float32Equal(), a, b);
@@ -412,7 +458,12 @@ class RawMachineAssembler {
Node* Float32GreaterThanOrEqual(Node* a, Node* b) {
return Float32LessThanOrEqual(b, a);
}
-
+ Node* Float32Max(Node* a, Node* b) {
+ return AddNode(machine()->Float32Max(), a, b);
+ }
+ Node* Float32Min(Node* a, Node* b) {
+ return AddNode(machine()->Float32Min(), a, b);
+ }
Node* Float64Add(Node* a, Node* b) {
return AddNode(machine()->Float64Add(), a, b);
}
@@ -429,14 +480,39 @@ class RawMachineAssembler {
return AddNode(machine()->Float64Mod(), a, b);
}
Node* Float64Max(Node* a, Node* b) {
- return AddNode(machine()->Float64Max().op(), a, b);
+ return AddNode(machine()->Float64Max(), a, b);
}
Node* Float64Min(Node* a, Node* b) {
- return AddNode(machine()->Float64Min().op(), a, b);
+ return AddNode(machine()->Float64Min(), a, b);
}
Node* Float64Abs(Node* a) { return AddNode(machine()->Float64Abs(), a); }
- Node* Float64Neg(Node* a) { return Float64Sub(Float64Constant(-0.0), a); }
+ Node* Float64Neg(Node* a) { return AddNode(machine()->Float64Neg(), a); }
+ Node* Float64Acos(Node* a) { return AddNode(machine()->Float64Acos(), a); }
+ Node* Float64Acosh(Node* a) { return AddNode(machine()->Float64Acosh(), a); }
+ Node* Float64Asin(Node* a) { return AddNode(machine()->Float64Asin(), a); }
+ Node* Float64Asinh(Node* a) { return AddNode(machine()->Float64Asinh(), a); }
+ Node* Float64Atan(Node* a) { return AddNode(machine()->Float64Atan(), a); }
+ Node* Float64Atanh(Node* a) { return AddNode(machine()->Float64Atanh(), a); }
+ Node* Float64Atan2(Node* a, Node* b) {
+ return AddNode(machine()->Float64Atan2(), a, b);
+ }
+ Node* Float64Cbrt(Node* a) { return AddNode(machine()->Float64Cbrt(), a); }
+ Node* Float64Cos(Node* a) { return AddNode(machine()->Float64Cos(), a); }
+ Node* Float64Cosh(Node* a) { return AddNode(machine()->Float64Cosh(), a); }
+ Node* Float64Exp(Node* a) { return AddNode(machine()->Float64Exp(), a); }
+ Node* Float64Expm1(Node* a) { return AddNode(machine()->Float64Expm1(), a); }
+ Node* Float64Log(Node* a) { return AddNode(machine()->Float64Log(), a); }
+ Node* Float64Log1p(Node* a) { return AddNode(machine()->Float64Log1p(), a); }
+ Node* Float64Log10(Node* a) { return AddNode(machine()->Float64Log10(), a); }
+ Node* Float64Log2(Node* a) { return AddNode(machine()->Float64Log2(), a); }
+ Node* Float64Pow(Node* a, Node* b) {
+ return AddNode(machine()->Float64Pow(), a, b);
+ }
+ Node* Float64Sin(Node* a) { return AddNode(machine()->Float64Sin(), a); }
+ Node* Float64Sinh(Node* a) { return AddNode(machine()->Float64Sinh(), a); }
Node* Float64Sqrt(Node* a) { return AddNode(machine()->Float64Sqrt(), a); }
+ Node* Float64Tan(Node* a) { return AddNode(machine()->Float64Tan(), a); }
+ Node* Float64Tanh(Node* a) { return AddNode(machine()->Float64Tanh(), a); }
Node* Float64Equal(Node* a, Node* b) {
return AddNode(machine()->Float64Equal(), a, b);
}
@@ -455,6 +531,12 @@ class RawMachineAssembler {
}
// Conversions.
+ Node* BitcastWordToTagged(Node* a) {
+ return AddNode(machine()->BitcastWordToTagged(), a);
+ }
+ Node* TruncateFloat64ToWord32(Node* a) {
+ return AddNode(machine()->TruncateFloat64ToWord32(), a);
+ }
Node* ChangeFloat32ToFloat64(Node* a) {
return AddNode(machine()->ChangeFloat32ToFloat64(), a);
}
@@ -500,12 +582,12 @@ class RawMachineAssembler {
Node* TruncateFloat64ToFloat32(Node* a) {
return AddNode(machine()->TruncateFloat64ToFloat32(), a);
}
- Node* TruncateFloat64ToInt32(TruncationMode mode, Node* a) {
- return AddNode(machine()->TruncateFloat64ToInt32(mode), a);
- }
Node* TruncateInt64ToInt32(Node* a) {
return AddNode(machine()->TruncateInt64ToInt32(), a);
}
+ Node* RoundFloat64ToInt32(Node* a) {
+ return AddNode(machine()->RoundFloat64ToInt32(), a);
+ }
Node* RoundInt32ToFloat32(Node* a) {
return AddNode(machine()->RoundInt32ToFloat32(), a);
}
@@ -595,6 +677,14 @@ class RawMachineAssembler {
Node* StoreToPointer(void* address, MachineRepresentation rep, Node* node) {
return Store(rep, PointerConstant(address), node, kNoWriteBarrier);
}
+ Node* UnalignedLoadFromPointer(void* address, MachineType rep,
+ int32_t offset = 0) {
+ return UnalignedLoad(rep, PointerConstant(address), Int32Constant(offset));
+ }
+ Node* UnalignedStoreToPointer(void* address, MachineRepresentation rep,
+ Node* node) {
+ return UnalignedStore(rep, PointerConstant(address), node);
+ }
Node* StringConstant(const char* string) {
return HeapConstant(isolate()->factory()->InternalizeUtf8String(string));
}
@@ -651,6 +741,9 @@ class RawMachineAssembler {
// Tail call to a runtime function with four arguments.
Node* TailCallRuntime4(Runtime::FunctionId function, Node* arg1, Node* arg2,
Node* arg3, Node* arg4, Node* context);
+ // Tail call to a runtime function with five arguments.
+ Node* TailCallRuntime5(Runtime::FunctionId function, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4, Node* arg5, Node* context);
// ===========================================================================
// The following utility methods deal with control flow, hence might switch
@@ -660,13 +753,21 @@ class RawMachineAssembler {
void Goto(RawMachineLabel* label);
void Branch(Node* condition, RawMachineLabel* true_val,
RawMachineLabel* false_val);
- void Switch(Node* index, RawMachineLabel* default_label, int32_t* case_values,
- RawMachineLabel** case_labels, size_t case_count);
+ void Switch(Node* index, RawMachineLabel* default_label,
+ const int32_t* case_values, RawMachineLabel** case_labels,
+ size_t case_count);
void Return(Node* value);
void Return(Node* v1, Node* v2);
void Return(Node* v1, Node* v2, Node* v3);
void Bind(RawMachineLabel* label);
void Deoptimize(Node* state);
+ void DebugBreak();
+ void Comment(const char* msg);
+
+ // Add success / exception successor blocks and ends the current block ending
+ // in a potentially throwing call node.
+ void Continuations(Node* call, RawMachineLabel* if_success,
+ RawMachineLabel* if_exception);
// Variables.
Node* Phi(MachineRepresentation rep, Node* n1, Node* n2) {
@@ -705,10 +806,7 @@ class RawMachineAssembler {
BasicBlock* CurrentBlock();
Schedule* schedule() { return schedule_; }
- size_t parameter_count() const { return machine_sig()->parameter_count(); }
- const MachineSignature* machine_sig() const {
- return call_descriptor_->GetMachineSignature();
- }
+ size_t parameter_count() const { return call_descriptor_->ParameterCount(); }
Isolate* isolate_;
Graph* graph_;
diff --git a/deps/v8/src/compiler/redundancy-elimination.cc b/deps/v8/src/compiler/redundancy-elimination.cc
new file mode 100644
index 0000000000..c671fc23b8
--- /dev/null
+++ b/deps/v8/src/compiler/redundancy-elimination.cc
@@ -0,0 +1,239 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/redundancy-elimination.h"
+
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+RedundancyElimination::RedundancyElimination(Editor* editor, Zone* zone)
+ : AdvancedReducer(editor), node_checks_(zone), zone_(zone) {}
+
+RedundancyElimination::~RedundancyElimination() {}
+
+Reduction RedundancyElimination::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kCheckBounds:
+ case IrOpcode::kCheckFloat64Hole:
+ case IrOpcode::kCheckIf:
+ case IrOpcode::kCheckNumber:
+ case IrOpcode::kCheckString:
+ case IrOpcode::kCheckTaggedHole:
+ case IrOpcode::kCheckTaggedPointer:
+ case IrOpcode::kCheckTaggedSigned:
+ case IrOpcode::kCheckedFloat64ToInt32:
+ case IrOpcode::kCheckedInt32Add:
+ case IrOpcode::kCheckedInt32Sub:
+ case IrOpcode::kCheckedInt32Div:
+ case IrOpcode::kCheckedInt32Mod:
+ case IrOpcode::kCheckedInt32Mul:
+ case IrOpcode::kCheckedTaggedToFloat64:
+ case IrOpcode::kCheckedTaggedSignedToInt32:
+ case IrOpcode::kCheckedTaggedToInt32:
+ case IrOpcode::kCheckedUint32ToInt32:
+ return ReduceCheckNode(node);
+ case IrOpcode::kEffectPhi:
+ return ReduceEffectPhi(node);
+ case IrOpcode::kDead:
+ break;
+ case IrOpcode::kStart:
+ return ReduceStart(node);
+ default:
+ return ReduceOtherNode(node);
+ }
+ return NoChange();
+}
+
+// static
+RedundancyElimination::EffectPathChecks*
+RedundancyElimination::EffectPathChecks::Copy(Zone* zone,
+ EffectPathChecks const* checks) {
+ return new (zone->New(sizeof(EffectPathChecks))) EffectPathChecks(*checks);
+}
+
+// static
+RedundancyElimination::EffectPathChecks const*
+RedundancyElimination::EffectPathChecks::Empty(Zone* zone) {
+ return new (zone->New(sizeof(EffectPathChecks))) EffectPathChecks(nullptr, 0);
+}
+
+bool RedundancyElimination::EffectPathChecks::Equals(
+ EffectPathChecks const* that) const {
+ if (this->size_ != that->size_) return false;
+ Check* this_head = this->head_;
+ Check* that_head = that->head_;
+ while (this_head != that_head) {
+ if (this_head->node != that_head->node) return false;
+ this_head = this_head->next;
+ that_head = that_head->next;
+ }
+ return true;
+}
+
+void RedundancyElimination::EffectPathChecks::Merge(
+ EffectPathChecks const* that) {
+ // Change the current check list to a longest common tail of this check
+ // list and the other list.
+
+ // First, we throw away the prefix of the longer list, so that
+ // we have lists of the same length.
+ Check* that_head = that->head_;
+ size_t that_size = that->size_;
+ while (that_size > size_) {
+ that_head = that_head->next;
+ that_size--;
+ }
+ while (size_ > that_size) {
+ head_ = head_->next;
+ size_--;
+ }
+
+ // Then we go through both lists in lock-step until we find
+ // the common tail.
+ while (head_ != that_head) {
+ DCHECK_LT(0u, size_);
+ DCHECK_NOT_NULL(head_);
+ size_--;
+ head_ = head_->next;
+ that_head = that_head->next;
+ }
+}
+
+RedundancyElimination::EffectPathChecks const*
+RedundancyElimination::EffectPathChecks::AddCheck(Zone* zone,
+ Node* node) const {
+ Check* head = new (zone->New(sizeof(Check))) Check(node, head_);
+ return new (zone->New(sizeof(EffectPathChecks)))
+ EffectPathChecks(head, size_ + 1);
+}
+
+namespace {
+
+bool IsCompatibleCheck(Node const* a, Node const* b) {
+ if (a->op() != b->op()) return false;
+ for (int i = a->op()->ValueInputCount(); --i >= 0;) {
+ if (a->InputAt(i) != b->InputAt(i)) return false;
+ }
+ return true;
+}
+
+} // namespace
+
+Node* RedundancyElimination::EffectPathChecks::LookupCheck(Node* node) const {
+ for (Check const* check = head_; check != nullptr; check = check->next) {
+ if (IsCompatibleCheck(check->node, node)) {
+ DCHECK(!check->node->IsDead());
+ return check->node;
+ }
+ }
+ return nullptr;
+}
+
+RedundancyElimination::EffectPathChecks const*
+RedundancyElimination::PathChecksForEffectNodes::Get(Node* node) const {
+ size_t const id = node->id();
+ if (id < info_for_node_.size()) return info_for_node_[id];
+ return nullptr;
+}
+
+void RedundancyElimination::PathChecksForEffectNodes::Set(
+ Node* node, EffectPathChecks const* checks) {
+ size_t const id = node->id();
+ if (id >= info_for_node_.size()) info_for_node_.resize(id + 1, nullptr);
+ info_for_node_[id] = checks;
+}
+
+Reduction RedundancyElimination::ReduceCheckNode(Node* node) {
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ EffectPathChecks const* checks = node_checks_.Get(effect);
+ // If we do not know anything about the predecessor, do not propagate just yet
+ // because we will have to recompute anyway once we compute the predecessor.
+ if (checks == nullptr) return NoChange();
+ // See if we have another check that dominates us.
+ if (Node* check = checks->LookupCheck(node)) {
+ ReplaceWithValue(node, check);
+ return Replace(check);
+ }
+ // Learn from this check.
+ return UpdateChecks(node, checks->AddCheck(zone(), node));
+}
+
+Reduction RedundancyElimination::ReduceEffectPhi(Node* node) {
+ Node* const control = NodeProperties::GetControlInput(node);
+ if (control->opcode() == IrOpcode::kLoop) {
+ // Here we rely on having only reducible loops:
+ // The loop entry edge always dominates the header, so we can just use
+ // the information from the loop entry edge.
+ return TakeChecksFromFirstEffect(node);
+ }
+ DCHECK_EQ(IrOpcode::kMerge, control->opcode());
+
+ // Shortcut for the case when we do not know anything about some input.
+ int const input_count = node->op()->EffectInputCount();
+ for (int i = 0; i < input_count; ++i) {
+ Node* const effect = NodeProperties::GetEffectInput(node, i);
+ if (node_checks_.Get(effect) == nullptr) return NoChange();
+ }
+
+ // Make a copy of the first input's checks and merge with the checks
+ // from other inputs.
+ EffectPathChecks* checks = EffectPathChecks::Copy(
+ zone(), node_checks_.Get(NodeProperties::GetEffectInput(node, 0)));
+ for (int i = 1; i < input_count; ++i) {
+ Node* const input = NodeProperties::GetEffectInput(node, i);
+ checks->Merge(node_checks_.Get(input));
+ }
+ return UpdateChecks(node, checks);
+}
+
+Reduction RedundancyElimination::ReduceStart(Node* node) {
+ return UpdateChecks(node, EffectPathChecks::Empty(zone()));
+}
+
+Reduction RedundancyElimination::ReduceOtherNode(Node* node) {
+ if (node->op()->EffectInputCount() == 1) {
+ if (node->op()->EffectOutputCount() == 1) {
+ return TakeChecksFromFirstEffect(node);
+ } else {
+ // Effect terminators should be handled specially.
+ return NoChange();
+ }
+ }
+ DCHECK_EQ(0, node->op()->EffectInputCount());
+ DCHECK_EQ(0, node->op()->EffectOutputCount());
+ return NoChange();
+}
+
+Reduction RedundancyElimination::TakeChecksFromFirstEffect(Node* node) {
+ DCHECK_EQ(1, node->op()->EffectOutputCount());
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ EffectPathChecks const* checks = node_checks_.Get(effect);
+ // If we do not know anything about the predecessor, do not propagate just yet
+ // because we will have to recompute anyway once we compute the predecessor.
+ if (checks == nullptr) return NoChange();
+ // We just propagate the information from the effect input (ideally,
+ // we would only revisit effect uses if there is change).
+ return UpdateChecks(node, checks);
+}
+
+Reduction RedundancyElimination::UpdateChecks(Node* node,
+ EffectPathChecks const* checks) {
+ EffectPathChecks const* original = node_checks_.Get(node);
+ // Only signal that the {node} has Changed, if the information about {checks}
+ // has changed wrt. the {original}.
+ if (checks != original) {
+ if (original == nullptr || !checks->Equals(original)) {
+ node_checks_.Set(node, checks);
+ return Changed(node);
+ }
+ }
+ return NoChange();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/redundancy-elimination.h b/deps/v8/src/compiler/redundancy-elimination.h
new file mode 100644
index 0000000000..88f9032a84
--- /dev/null
+++ b/deps/v8/src/compiler/redundancy-elimination.h
@@ -0,0 +1,77 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_REDUNDANCY_ELIMINATION_H_
+#define V8_COMPILER_REDUNDANCY_ELIMINATION_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class RedundancyElimination final : public AdvancedReducer {
+ public:
+ RedundancyElimination(Editor* editor, Zone* zone);
+ ~RedundancyElimination() final;
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ struct Check {
+ Check(Node* node, Check* next) : node(node), next(next) {}
+ Node* node;
+ Check* next;
+ };
+
+ class EffectPathChecks final {
+ public:
+ static EffectPathChecks* Copy(Zone* zone, EffectPathChecks const* checks);
+ static EffectPathChecks const* Empty(Zone* zone);
+ bool Equals(EffectPathChecks const* that) const;
+ void Merge(EffectPathChecks const* that);
+
+ EffectPathChecks const* AddCheck(Zone* zone, Node* node) const;
+ Node* LookupCheck(Node* node) const;
+
+ private:
+ EffectPathChecks(Check* head, size_t size) : head_(head), size_(size) {}
+
+ // We keep track of the list length so that we can find the longest
+ // common tail easily.
+ Check* head_;
+ size_t size_;
+ };
+
+ class PathChecksForEffectNodes final {
+ public:
+ explicit PathChecksForEffectNodes(Zone* zone) : info_for_node_(zone) {}
+ EffectPathChecks const* Get(Node* node) const;
+ void Set(Node* node, EffectPathChecks const* checks);
+
+ private:
+ ZoneVector<EffectPathChecks const*> info_for_node_;
+ };
+
+ Reduction ReduceCheckNode(Node* node);
+ Reduction ReduceEffectPhi(Node* node);
+ Reduction ReduceStart(Node* node);
+ Reduction ReduceOtherNode(Node* node);
+
+ Reduction TakeChecksFromFirstEffect(Node* node);
+ Reduction UpdateChecks(Node* node, EffectPathChecks const* checks);
+
+ Zone* zone() const { return zone_; }
+
+ PathChecksForEffectNodes node_checks_;
+ Zone* const zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(RedundancyElimination);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_REDUNDANCY_ELIMINATION_H_
diff --git a/deps/v8/src/compiler/register-allocator-verifier.cc b/deps/v8/src/compiler/register-allocator-verifier.cc
index f2160f52ce..cefd04af1f 100644
--- a/deps/v8/src/compiler/register-allocator-verifier.cc
+++ b/deps/v8/src/compiler/register-allocator-verifier.cc
@@ -44,39 +44,15 @@ void VerifyAllocatedGaps(const Instruction* instr) {
} // namespace
-
-void RegisterAllocatorVerifier::VerifyInput(
- const OperandConstraint& constraint) {
- CHECK_NE(kSameAsFirst, constraint.type_);
- if (constraint.type_ != kImmediate && constraint.type_ != kExplicit) {
- CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
- constraint.virtual_register_);
- }
-}
-
-
-void RegisterAllocatorVerifier::VerifyTemp(
- const OperandConstraint& constraint) {
- CHECK_NE(kSameAsFirst, constraint.type_);
- CHECK_NE(kImmediate, constraint.type_);
- CHECK_NE(kExplicit, constraint.type_);
- CHECK_NE(kConstant, constraint.type_);
-}
-
-
-void RegisterAllocatorVerifier::VerifyOutput(
- const OperandConstraint& constraint) {
- CHECK_NE(kImmediate, constraint.type_);
- CHECK_NE(kExplicit, constraint.type_);
- CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
- constraint.virtual_register_);
-}
-
-
RegisterAllocatorVerifier::RegisterAllocatorVerifier(
Zone* zone, const RegisterConfiguration* config,
const InstructionSequence* sequence)
- : zone_(zone), config_(config), sequence_(sequence), constraints_(zone) {
+ : zone_(zone),
+ config_(config),
+ sequence_(sequence),
+ constraints_(zone),
+ assessments_(zone),
+ outstanding_assessments_(zone) {
constraints_.reserve(sequence->instructions().size());
// TODO(dcarney): model unique constraints.
// Construct OperandConstraints for all InstructionOperands, eliminating
@@ -111,6 +87,30 @@ RegisterAllocatorVerifier::RegisterAllocatorVerifier(
}
}
+void RegisterAllocatorVerifier::VerifyInput(
+ const OperandConstraint& constraint) {
+ CHECK_NE(kSameAsFirst, constraint.type_);
+ if (constraint.type_ != kImmediate && constraint.type_ != kExplicit) {
+ CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
+ constraint.virtual_register_);
+ }
+}
+
+void RegisterAllocatorVerifier::VerifyTemp(
+ const OperandConstraint& constraint) {
+ CHECK_NE(kSameAsFirst, constraint.type_);
+ CHECK_NE(kImmediate, constraint.type_);
+ CHECK_NE(kExplicit, constraint.type_);
+ CHECK_NE(kConstant, constraint.type_);
+}
+
+void RegisterAllocatorVerifier::VerifyOutput(
+ const OperandConstraint& constraint) {
+ CHECK_NE(kImmediate, constraint.type_);
+ CHECK_NE(kExplicit, constraint.type_);
+ CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
+ constraint.virtual_register_);
+}
void RegisterAllocatorVerifier::VerifyAssignment() {
CHECK(sequence()->instructions().size() == constraints()->size());
@@ -138,7 +138,6 @@ void RegisterAllocatorVerifier::VerifyAssignment() {
}
}
-
void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
OperandConstraint* constraint) {
constraint->value_ = kMinInt;
@@ -161,14 +160,14 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
int vreg = unallocated->virtual_register();
constraint->virtual_register_ = vreg;
if (unallocated->basic_policy() == UnallocatedOperand::FIXED_SLOT) {
- constraint->type_ = sequence()->IsFloat(vreg) ? kDoubleSlot : kSlot;
+ constraint->type_ = kFixedSlot;
constraint->value_ = unallocated->fixed_slot_index();
} else {
switch (unallocated->extended_policy()) {
case UnallocatedOperand::ANY:
case UnallocatedOperand::NONE:
- if (sequence()->IsFloat(vreg)) {
- constraint->type_ = kNoneDouble;
+ if (sequence()->IsFP(vreg)) {
+ constraint->type_ = kNoneFP;
} else {
constraint->type_ = kNone;
}
@@ -182,19 +181,21 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
}
constraint->value_ = unallocated->fixed_register_index();
break;
- case UnallocatedOperand::FIXED_DOUBLE_REGISTER:
- constraint->type_ = kFixedDoubleRegister;
+ case UnallocatedOperand::FIXED_FP_REGISTER:
+ constraint->type_ = kFixedFPRegister;
constraint->value_ = unallocated->fixed_register_index();
break;
case UnallocatedOperand::MUST_HAVE_REGISTER:
- if (sequence()->IsFloat(vreg)) {
- constraint->type_ = kDoubleRegister;
+ if (sequence()->IsFP(vreg)) {
+ constraint->type_ = kFPRegister;
} else {
constraint->type_ = kRegister;
}
break;
case UnallocatedOperand::MUST_HAVE_SLOT:
- constraint->type_ = sequence()->IsFloat(vreg) ? kDoubleSlot : kSlot;
+ constraint->type_ = kSlot;
+ constraint->value_ =
+ ElementSizeLog2Of(sequence()->GetRepresentation(vreg));
break;
case UnallocatedOperand::SAME_AS_FIRST_INPUT:
constraint->type_ = kSameAsFirst;
@@ -204,7 +205,6 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
}
}
-
void RegisterAllocatorVerifier::CheckConstraint(
const InstructionOperand* op, const OperandConstraint* constraint) {
switch (constraint->type_) {
@@ -225,8 +225,8 @@ void RegisterAllocatorVerifier::CheckConstraint(
case kRegister:
CHECK(op->IsRegister());
return;
- case kDoubleRegister:
- CHECK(op->IsDoubleRegister());
+ case kFPRegister:
+ CHECK(op->IsFPRegister());
return;
case kExplicit:
CHECK(op->IsExplicit());
@@ -234,29 +234,26 @@ void RegisterAllocatorVerifier::CheckConstraint(
case kFixedRegister:
case kRegisterAndSlot:
CHECK(op->IsRegister());
- CHECK_EQ(LocationOperand::cast(op)->GetRegister().code(),
- constraint->value_);
+ CHECK_EQ(LocationOperand::cast(op)->register_code(), constraint->value_);
return;
- case kFixedDoubleRegister:
- CHECK(op->IsDoubleRegister());
- CHECK_EQ(LocationOperand::cast(op)->GetDoubleRegister().code(),
- constraint->value_);
+ case kFixedFPRegister:
+ CHECK(op->IsFPRegister());
+ CHECK_EQ(LocationOperand::cast(op)->register_code(), constraint->value_);
return;
case kFixedSlot:
- CHECK(op->IsStackSlot());
+ CHECK(op->IsStackSlot() || op->IsFPStackSlot());
CHECK_EQ(LocationOperand::cast(op)->index(), constraint->value_);
return;
case kSlot:
- CHECK(op->IsStackSlot());
- return;
- case kDoubleSlot:
- CHECK(op->IsDoubleStackSlot());
+ CHECK(op->IsStackSlot() || op->IsFPStackSlot());
+ CHECK_EQ(ElementSizeLog2Of(LocationOperand::cast(op)->representation()),
+ constraint->value_);
return;
case kNone:
CHECK(op->IsRegister() || op->IsStackSlot());
return;
- case kNoneDouble:
- CHECK(op->IsDoubleRegister() || op->IsDoubleStackSlot());
+ case kNoneFP:
+ CHECK(op->IsFPRegister() || op->IsFPStackSlot());
return;
case kSameAsFirst:
CHECK(false);
@@ -264,457 +261,235 @@ void RegisterAllocatorVerifier::CheckConstraint(
}
}
-namespace {
-
-typedef RpoNumber Rpo;
-
-static const int kInvalidVreg = InstructionOperand::kInvalidVirtualRegister;
-
-struct PhiData : public ZoneObject {
- PhiData(Rpo definition_rpo, const PhiInstruction* phi, int first_pred_vreg,
- const PhiData* first_pred_phi, Zone* zone)
- : definition_rpo(definition_rpo),
- virtual_register(phi->virtual_register()),
- first_pred_vreg(first_pred_vreg),
- first_pred_phi(first_pred_phi),
- operands(zone) {
- operands.reserve(phi->operands().size());
- operands.insert(operands.begin(), phi->operands().begin(),
- phi->operands().end());
- }
- const Rpo definition_rpo;
- const int virtual_register;
- const int first_pred_vreg;
- const PhiData* first_pred_phi;
- IntVector operands;
-};
-
-class PhiMap : public ZoneMap<int, PhiData*>, public ZoneObject {
- public:
- explicit PhiMap(Zone* zone) : ZoneMap<int, PhiData*>(zone) {}
-};
-
-struct OperandLess {
- bool operator()(const InstructionOperand* a,
- const InstructionOperand* b) const {
- return a->CompareCanonicalized(*b);
- }
-};
-
-class OperandMap : public ZoneObject {
- public:
- struct MapValue : public ZoneObject {
- MapValue()
- : incoming(nullptr),
- define_vreg(kInvalidVreg),
- use_vreg(kInvalidVreg),
- succ_vreg(kInvalidVreg) {}
- MapValue* incoming; // value from first predecessor block.
- int define_vreg; // valid if this value was defined in this block.
- int use_vreg; // valid if this value was used in this block.
- int succ_vreg; // valid if propagated back from successor block.
- };
-
- class Map
- : public ZoneMap<const InstructionOperand*, MapValue*, OperandLess> {
- public:
- explicit Map(Zone* zone)
- : ZoneMap<const InstructionOperand*, MapValue*, OperandLess>(zone) {}
-
- // Remove all entries with keys not in other.
- void Intersect(const Map& other) {
- if (this->empty()) return;
- auto it = this->begin();
- OperandLess less;
- for (const std::pair<const InstructionOperand*, MapValue*>& o : other) {
- while (less(it->first, o.first)) {
- this->erase(it++);
- if (it == this->end()) return;
- }
- if (it->first->EqualsCanonicalized(*o.first)) {
- ++it;
- if (it == this->end()) return;
- } else {
- CHECK(less(o.first, it->first));
- }
- }
- }
- };
-
- explicit OperandMap(Zone* zone) : map_(zone) {}
-
- Map& map() { return map_; }
+void BlockAssessments::PerformMoves(const Instruction* instruction) {
+ const ParallelMove* first =
+ instruction->GetParallelMove(Instruction::GapPosition::START);
+ PerformParallelMoves(first);
+ const ParallelMove* last =
+ instruction->GetParallelMove(Instruction::GapPosition::END);
+ PerformParallelMoves(last);
+}
- void RunParallelMoves(Zone* zone, const ParallelMove* moves) {
- // Compute outgoing mappings.
- Map to_insert(zone);
- for (const MoveOperands* move : *moves) {
- if (move->IsEliminated()) continue;
- auto cur = map().find(&move->source());
- CHECK(cur != map().end());
- auto res =
- to_insert.insert(std::make_pair(&move->destination(), cur->second));
- // Ensure injectivity of moves.
- CHECK(res.second);
- }
- // Drop current mappings.
- for (const MoveOperands* move : *moves) {
- if (move->IsEliminated()) continue;
- auto cur = map().find(&move->destination());
- if (cur != map().end()) map().erase(cur);
- }
- // Insert new values.
- map().insert(to_insert.begin(), to_insert.end());
+void BlockAssessments::PerformParallelMoves(const ParallelMove* moves) {
+ if (moves == nullptr) return;
+
+ CHECK(map_for_moves_.empty());
+ for (MoveOperands* move : *moves) {
+ if (move->IsEliminated() || move->IsRedundant()) continue;
+ auto it = map_.find(move->source());
+ // The RHS of a parallel move should have been already assessed.
+ CHECK(it != map_.end());
+ // The LHS of a parallel move should not have been assigned in this
+ // parallel move.
+ CHECK(map_for_moves_.find(move->destination()) == map_for_moves_.end());
+ // Copy the assessment to the destination.
+ map_for_moves_[move->destination()] = it->second;
}
-
- void RunGaps(Zone* zone, const Instruction* instr) {
- for (int i = Instruction::FIRST_GAP_POSITION;
- i <= Instruction::LAST_GAP_POSITION; i++) {
- Instruction::GapPosition inner_pos =
- static_cast<Instruction::GapPosition>(i);
- const ParallelMove* move = instr->GetParallelMove(inner_pos);
- if (move == nullptr) continue;
- RunParallelMoves(zone, move);
- }
+ for (auto pair : map_for_moves_) {
+ map_[pair.first] = pair.second;
}
+ map_for_moves_.clear();
+}
- void Drop(const InstructionOperand* op) {
- auto it = map().find(op);
- if (it != map().end()) map().erase(it);
+void BlockAssessments::DropRegisters() {
+ for (auto iterator = map().begin(), end = map().end(); iterator != end;) {
+ auto current = iterator;
+ ++iterator;
+ InstructionOperand op = current->first;
+ if (op.IsAnyRegister()) map().erase(current);
}
+}
- void DropRegisters(const RegisterConfiguration* config) {
- // TODO(dcarney): sort map by kind and drop range.
- for (auto it = map().begin(); it != map().end();) {
- const InstructionOperand* op = it->first;
- if (op->IsRegister() || op->IsDoubleRegister()) {
- map().erase(it++);
- } else {
- ++it;
+BlockAssessments* RegisterAllocatorVerifier::CreateForBlock(
+ const InstructionBlock* block) {
+ RpoNumber current_block_id = block->rpo_number();
+
+ BlockAssessments* ret = new (zone()) BlockAssessments(zone());
+ if (block->PredecessorCount() == 0) {
+ // TODO(mtrofin): the following check should hold, however, in certain
+ // unit tests it is invalidated by the last block. Investigate and
+ // normalize the CFG.
+ // CHECK(current_block_id.ToInt() == 0);
+ // The phi size test below is because we can, technically, have phi
+ // instructions with one argument. Some tests expose that, too.
+ } else if (block->PredecessorCount() == 1 && block->phis().size() == 0) {
+ const BlockAssessments* prev_block = assessments_[block->predecessors()[0]];
+ ret->CopyFrom(prev_block);
+ } else {
+ for (RpoNumber pred_id : block->predecessors()) {
+ // For every operand coming from any of the predecessors, create an
+ // Unfinalized assessment.
+ auto iterator = assessments_.find(pred_id);
+ if (iterator == assessments_.end()) {
+ // This block is the head of a loop, and this predecessor is the
+ // loopback
+ // arc.
+ // Validate this is a loop case, otherwise the CFG is malformed.
+ CHECK(pred_id >= current_block_id);
+ CHECK(block->IsLoopHeader());
+ continue;
+ }
+ const BlockAssessments* pred_assessments = iterator->second;
+ CHECK_NOT_NULL(pred_assessments);
+ for (auto pair : pred_assessments->map()) {
+ InstructionOperand operand = pair.first;
+ if (ret->map().find(operand) == ret->map().end()) {
+ ret->map().insert(std::make_pair(
+ operand, new (zone()) PendingAssessment(block, operand)));
+ }
}
}
}
+ return ret;
+}
- MapValue* Define(Zone* zone, const InstructionOperand* op,
- int virtual_register) {
- MapValue* value = new (zone) MapValue();
- value->define_vreg = virtual_register;
- auto res = map().insert(std::make_pair(op, value));
- if (!res.second) res.first->second = value;
- return value;
- }
-
- void Use(const InstructionOperand* op, int use_vreg, bool initial_pass) {
- auto it = map().find(op);
- CHECK(it != map().end());
- MapValue* v = it->second;
- if (v->define_vreg != kInvalidVreg) {
- CHECK_EQ(v->define_vreg, use_vreg);
- }
- // Already used this vreg in this block.
- if (v->use_vreg != kInvalidVreg) {
- CHECK_EQ(v->use_vreg, use_vreg);
- return;
- }
- if (!initial_pass) {
- // A value may be defined and used in this block or the use must have
- // propagated up.
- if (v->succ_vreg != kInvalidVreg) {
- CHECK_EQ(v->succ_vreg, use_vreg);
- } else {
- CHECK_EQ(v->define_vreg, use_vreg);
+void RegisterAllocatorVerifier::ValidatePendingAssessment(
+ RpoNumber block_id, InstructionOperand op,
+ BlockAssessments* current_assessments, const PendingAssessment* assessment,
+ int virtual_register) {
+ // When validating a pending assessment, it is possible some of the
+ // assessments
+ // for the original operand (the one where the assessment was created for
+ // first) are also pending. To avoid recursion, we use a work list. To
+ // deal with cycles, we keep a set of seen nodes.
+ ZoneQueue<std::pair<const PendingAssessment*, int>> worklist(zone());
+ ZoneSet<RpoNumber> seen(zone());
+ worklist.push(std::make_pair(assessment, virtual_register));
+ seen.insert(block_id);
+
+ while (!worklist.empty()) {
+ auto work = worklist.front();
+ const PendingAssessment* current_assessment = work.first;
+ int current_virtual_register = work.second;
+ InstructionOperand current_operand = current_assessment->operand();
+ worklist.pop();
+
+ const InstructionBlock* origin = current_assessment->origin();
+ CHECK(origin->PredecessorCount() > 1 || origin->phis().size() > 0);
+
+ // Check if the virtual register is a phi first, instead of relying on
+ // the incoming assessments. In particular, this handles the case
+ // v1 = phi v0 v0, which structurally is identical to v0 having been
+ // defined at the top of a diamond, and arriving at the node joining the
+ // diamond's branches.
+ const PhiInstruction* phi = nullptr;
+ for (const PhiInstruction* candidate : origin->phis()) {
+ if (candidate->virtual_register() == current_virtual_register) {
+ phi = candidate;
+ break;
}
- // Mark the use.
- it->second->use_vreg = use_vreg;
- return;
- }
- // Go up block list and ensure the correct definition is reached.
- for (; v != nullptr; v = v->incoming) {
- // Value unused in block.
- if (v->define_vreg == kInvalidVreg && v->use_vreg == kInvalidVreg) {
- continue;
- }
- // Found correct definition or use.
- CHECK(v->define_vreg == use_vreg || v->use_vreg == use_vreg);
- // Mark the use.
- it->second->use_vreg = use_vreg;
- return;
}
- // Use of a non-phi value without definition.
- CHECK(false);
- }
- void UsePhi(const InstructionOperand* op, const PhiData* phi,
- bool initial_pass) {
- auto it = map().find(op);
- CHECK(it != map().end());
- MapValue* v = it->second;
- int use_vreg = phi->virtual_register;
- // Phis are not defined.
- CHECK_EQ(kInvalidVreg, v->define_vreg);
- // Already used this vreg in this block.
- if (v->use_vreg != kInvalidVreg) {
- CHECK_EQ(v->use_vreg, use_vreg);
- return;
- }
- if (!initial_pass) {
- // A used phi must have propagated its use to a predecessor.
- CHECK_EQ(v->succ_vreg, use_vreg);
- // Mark the use.
- v->use_vreg = use_vreg;
- return;
- }
- // Go up the block list starting at the first predecessor and ensure this
- // phi has a correct use or definition.
- for (v = v->incoming; v != nullptr; v = v->incoming) {
- // Value unused in block.
- if (v->define_vreg == kInvalidVreg && v->use_vreg == kInvalidVreg) {
- continue;
- }
- // Found correct definition or use.
- if (v->define_vreg != kInvalidVreg) {
- CHECK(v->define_vreg == phi->first_pred_vreg);
- } else if (v->use_vreg != phi->first_pred_vreg) {
- // Walk the phi chain, hunting for a matching phi use.
- const PhiData* p = phi;
- for (; p != nullptr; p = p->first_pred_phi) {
- if (p->virtual_register == v->use_vreg) break;
+ int op_index = 0;
+ for (RpoNumber pred : origin->predecessors()) {
+ int expected =
+ phi != nullptr ? phi->operands()[op_index] : current_virtual_register;
+
+ ++op_index;
+ auto pred_assignment = assessments_.find(pred);
+ if (pred_assignment == assessments_.end()) {
+ CHECK(origin->IsLoopHeader());
+ auto todo_iter = outstanding_assessments_.find(pred);
+ DelayedAssessments* set = nullptr;
+ if (todo_iter == outstanding_assessments_.end()) {
+ set = new (zone()) DelayedAssessments(zone());
+ outstanding_assessments_.insert(std::make_pair(pred, set));
+ } else {
+ set = todo_iter->second;
}
- CHECK(p);
+ set->AddDelayedAssessment(current_operand, expected);
+ continue;
}
- // Mark the use.
- it->second->use_vreg = use_vreg;
- return;
- }
- // Use of a phi value without definition.
- UNREACHABLE();
- }
-
- private:
- Map map_;
- DISALLOW_COPY_AND_ASSIGN(OperandMap);
-};
-
-} // namespace
+ const BlockAssessments* pred_assessments = pred_assignment->second;
+ auto found_contribution = pred_assessments->map().find(current_operand);
+ CHECK(found_contribution != pred_assessments->map().end());
+ Assessment* contribution = found_contribution->second;
-class RegisterAllocatorVerifier::BlockMaps {
- public:
- BlockMaps(Zone* zone, const InstructionSequence* sequence)
- : zone_(zone),
- sequence_(sequence),
- phi_map_guard_(sequence->VirtualRegisterCount(), zone),
- phi_map_(zone),
- incoming_maps_(zone),
- outgoing_maps_(zone) {
- InitializePhis();
- InitializeOperandMaps();
- }
-
- bool IsPhi(int virtual_register) {
- return phi_map_guard_.Contains(virtual_register);
- }
-
- const PhiData* GetPhi(int virtual_register) {
- auto it = phi_map_.find(virtual_register);
- CHECK(it != phi_map_.end());
- return it->second;
- }
-
- OperandMap* InitializeIncoming(size_t block_index, bool initial_pass) {
- return initial_pass ? InitializeFromFirstPredecessor(block_index)
- : InitializeFromIntersection(block_index);
- }
-
- void PropagateUsesBackwards() {
- typedef std::set<size_t, std::greater<size_t>, zone_allocator<size_t>>
- BlockIds;
- BlockIds block_ids((BlockIds::key_compare()),
- zone_allocator<size_t>(zone()));
- // First ensure that incoming contains only keys in all predecessors.
- for (const InstructionBlock* block : sequence()->instruction_blocks()) {
- size_t index = block->rpo_number().ToSize();
- block_ids.insert(index);
- OperandMap::Map& succ_map = incoming_maps_[index]->map();
- for (size_t i = 0; i < block->PredecessorCount(); ++i) {
- RpoNumber pred_rpo = block->predecessors()[i];
- succ_map.Intersect(outgoing_maps_[pred_rpo.ToSize()]->map());
- }
- }
- // Back propagation fixpoint.
- while (!block_ids.empty()) {
- // Pop highest block_id.
- auto block_id_it = block_ids.begin();
- const size_t succ_index = *block_id_it;
- block_ids.erase(block_id_it);
- // Propagate uses back to their definition blocks using succ_vreg.
- const InstructionBlock* block =
- sequence()->instruction_blocks()[succ_index];
- OperandMap::Map& succ_map = incoming_maps_[succ_index]->map();
- for (size_t i = 0; i < block->PredecessorCount(); ++i) {
- for (auto& succ_val : succ_map) {
- // An incoming map contains no defines.
- CHECK_EQ(kInvalidVreg, succ_val.second->define_vreg);
- // Compute succ_vreg.
- int succ_vreg = succ_val.second->succ_vreg;
- if (succ_vreg == kInvalidVreg) {
- succ_vreg = succ_val.second->use_vreg;
- // Initialize succ_vreg in back propagation chain.
- succ_val.second->succ_vreg = succ_vreg;
- }
- if (succ_vreg == kInvalidVreg) continue;
- // May need to transition phi.
- if (IsPhi(succ_vreg)) {
- const PhiData* phi = GetPhi(succ_vreg);
- if (phi->definition_rpo.ToSize() == succ_index) {
- // phi definition block, transition to pred value.
- succ_vreg = phi->operands[i];
- }
- }
- // Push succ_vreg up to all predecessors.
- RpoNumber pred_rpo = block->predecessors()[i];
- OperandMap::Map& pred_map = outgoing_maps_[pred_rpo.ToSize()]->map();
- auto& pred_val = *pred_map.find(succ_val.first);
- if (pred_val.second->use_vreg != kInvalidVreg) {
- CHECK_EQ(succ_vreg, pred_val.second->use_vreg);
- }
- if (pred_val.second->define_vreg != kInvalidVreg) {
- CHECK_EQ(succ_vreg, pred_val.second->define_vreg);
- }
- if (pred_val.second->succ_vreg != kInvalidVreg) {
- if (succ_vreg != pred_val.second->succ_vreg) {
- // When a block introduces 2 identical phis A and B, and both are
- // operands to other phis C and D, and we optimized the moves
- // defining A or B such that they now appear in the block defining
- // A and B, the back propagation will get confused when visiting
- // upwards from C and D. The operand in the block defining A and B
- // will be attributed to C (or D, depending which of these is
- // visited first).
- CHECK(IsPhi(pred_val.second->succ_vreg));
- CHECK(IsPhi(succ_vreg));
- const PhiData* current_phi = GetPhi(succ_vreg);
- const PhiData* assigned_phi = GetPhi(pred_val.second->succ_vreg);
- CHECK_EQ(current_phi->operands.size(),
- assigned_phi->operands.size());
- CHECK_EQ(current_phi->definition_rpo,
- assigned_phi->definition_rpo);
- for (size_t i = 0; i < current_phi->operands.size(); ++i) {
- CHECK_EQ(current_phi->operands[i], assigned_phi->operands[i]);
- }
- }
- } else {
- pred_val.second->succ_vreg = succ_vreg;
- block_ids.insert(pred_rpo.ToSize());
+ switch (contribution->kind()) {
+ case Final:
+ ValidateFinalAssessment(
+ block_id, current_operand, current_assessments,
+ FinalAssessment::cast(contribution), expected);
+ break;
+ case Pending: {
+ // This happens if we have a diamond feeding into another one, and
+ // the inner one never being used - other than for carrying the value.
+ const PendingAssessment* next = PendingAssessment::cast(contribution);
+ if (seen.find(pred) == seen.end()) {
+ worklist.push({next, expected});
+ seen.insert(pred);
}
+ // Note that we do not want to finalize pending assessments at the
+ // beginning of a block - which is the information we'd have
+ // available here. This is because this operand may be reused to
+ // define
+ // duplicate phis.
+ break;
}
}
}
- // Clear uses and back links for second pass.
- for (OperandMap* operand_map : incoming_maps_) {
- for (auto& succ_val : operand_map->map()) {
- succ_val.second->incoming = nullptr;
- succ_val.second->use_vreg = kInvalidVreg;
- }
- }
- }
-
- private:
- OperandMap* InitializeFromFirstPredecessor(size_t block_index) {
- OperandMap* to_init = outgoing_maps_[block_index];
- CHECK(to_init->map().empty());
- const InstructionBlock* block =
- sequence()->instruction_blocks()[block_index];
- if (block->predecessors().empty()) return to_init;
- size_t predecessor_index = block->predecessors()[0].ToSize();
- // Ensure not a backedge.
- CHECK(predecessor_index < block->rpo_number().ToSize());
- OperandMap* incoming = outgoing_maps_[predecessor_index];
- // Copy map and replace values.
- to_init->map() = incoming->map();
- for (auto& it : to_init->map()) {
- OperandMap::MapValue* incoming = it.second;
- it.second = new (zone()) OperandMap::MapValue();
- it.second->incoming = incoming;
- }
- // Copy to incoming map for second pass.
- incoming_maps_[block_index]->map() = to_init->map();
- return to_init;
- }
-
- OperandMap* InitializeFromIntersection(size_t block_index) {
- return incoming_maps_[block_index];
}
+ // If everything checks out, we may make the assessment.
+ current_assessments->map()[op] =
+ new (zone()) FinalAssessment(virtual_register, assessment);
+}
- void InitializeOperandMaps() {
- size_t block_count = sequence()->instruction_blocks().size();
- incoming_maps_.reserve(block_count);
- outgoing_maps_.reserve(block_count);
- for (size_t i = 0; i < block_count; ++i) {
- incoming_maps_.push_back(new (zone()) OperandMap(zone()));
- outgoing_maps_.push_back(new (zone()) OperandMap(zone()));
- }
- }
+void RegisterAllocatorVerifier::ValidateFinalAssessment(
+ RpoNumber block_id, InstructionOperand op,
+ BlockAssessments* current_assessments, const FinalAssessment* assessment,
+ int virtual_register) {
+ if (assessment->virtual_register() == virtual_register) return;
+ // If we have 2 phis with the exact same operand list, and the first phi is
+ // used before the second one, via the operand incoming to the block,
+ // and the second one's operand is defined (via a parallel move) after the
+ // use, then the original operand will be assigned to the first phi. We
+ // then look at the original pending assessment to ascertain if op
+ // is virtual_register.
+ const PendingAssessment* old = assessment->original_pending_assessment();
+ CHECK_NOT_NULL(old);
+ ValidatePendingAssessment(block_id, op, current_assessments, old,
+ virtual_register);
+}
- void InitializePhis() {
- const size_t block_count = sequence()->instruction_blocks().size();
- for (size_t block_index = 0; block_index < block_count; ++block_index) {
- const InstructionBlock* block =
- sequence()->instruction_blocks()[block_index];
- for (const PhiInstruction* phi : block->phis()) {
- int first_pred_vreg = phi->operands()[0];
- const PhiData* first_pred_phi = nullptr;
- if (IsPhi(first_pred_vreg)) {
- first_pred_phi = GetPhi(first_pred_vreg);
- first_pred_vreg = first_pred_phi->first_pred_vreg;
- }
- CHECK(!IsPhi(first_pred_vreg));
- PhiData* phi_data = new (zone()) PhiData(
- block->rpo_number(), phi, first_pred_vreg, first_pred_phi, zone());
- auto res =
- phi_map_.insert(std::make_pair(phi->virtual_register(), phi_data));
- CHECK(res.second);
- phi_map_guard_.Add(phi->virtual_register());
- }
+void RegisterAllocatorVerifier::ValidateUse(
+ RpoNumber block_id, BlockAssessments* current_assessments,
+ InstructionOperand op, int virtual_register) {
+ auto iterator = current_assessments->map().find(op);
+ // We should have seen this operand before.
+ CHECK(iterator != current_assessments->map().end());
+ Assessment* assessment = iterator->second;
+
+ switch (assessment->kind()) {
+ case Final:
+ ValidateFinalAssessment(block_id, op, current_assessments,
+ FinalAssessment::cast(assessment),
+ virtual_register);
+ break;
+ case Pending: {
+ const PendingAssessment* pending = PendingAssessment::cast(assessment);
+ ValidatePendingAssessment(block_id, op, current_assessments, pending,
+ virtual_register);
+ break;
}
}
-
- typedef ZoneVector<OperandMap*> OperandMaps;
- typedef ZoneVector<PhiData*> PhiVector;
-
- Zone* zone() const { return zone_; }
- const InstructionSequence* sequence() const { return sequence_; }
-
- Zone* const zone_;
- const InstructionSequence* const sequence_;
- BitVector phi_map_guard_;
- PhiMap phi_map_;
- OperandMaps incoming_maps_;
- OperandMaps outgoing_maps_;
-};
-
-
-void RegisterAllocatorVerifier::VerifyGapMoves() {
- BlockMaps block_maps(zone(), sequence());
- VerifyGapMoves(&block_maps, true);
- block_maps.PropagateUsesBackwards();
- VerifyGapMoves(&block_maps, false);
}
-
-// Compute and verify outgoing values for every block.
-void RegisterAllocatorVerifier::VerifyGapMoves(BlockMaps* block_maps,
- bool initial_pass) {
+void RegisterAllocatorVerifier::VerifyGapMoves() {
+ CHECK(assessments_.empty());
+ CHECK(outstanding_assessments_.empty());
const size_t block_count = sequence()->instruction_blocks().size();
for (size_t block_index = 0; block_index < block_count; ++block_index) {
- OperandMap* current =
- block_maps->InitializeIncoming(block_index, initial_pass);
const InstructionBlock* block =
sequence()->instruction_blocks()[block_index];
+ BlockAssessments* block_assessments = CreateForBlock(block);
+
for (int instr_index = block->code_start(); instr_index < block->code_end();
++instr_index) {
const InstructionConstraint& instr_constraint = constraints_[instr_index];
const Instruction* instr = instr_constraint.instruction_;
- current->RunGaps(zone(), instr);
+ block_assessments->PerformMoves(instr);
+
const OperandConstraint* op_constraints =
instr_constraint.operand_constraints_;
size_t count = 0;
@@ -724,24 +499,19 @@ void RegisterAllocatorVerifier::VerifyGapMoves(BlockMaps* block_maps,
continue;
}
int virtual_register = op_constraints[count].virtual_register_;
- const InstructionOperand* op = instr->InputAt(i);
- if (!block_maps->IsPhi(virtual_register)) {
- current->Use(op, virtual_register, initial_pass);
- } else {
- const PhiData* phi = block_maps->GetPhi(virtual_register);
- current->UsePhi(op, phi, initial_pass);
- }
+ InstructionOperand op = *instr->InputAt(i);
+ ValidateUse(block->rpo_number(), block_assessments, op,
+ virtual_register);
}
for (size_t i = 0; i < instr->TempCount(); ++i, ++count) {
- current->Drop(instr->TempAt(i));
+ block_assessments->Drop(*instr->TempAt(i));
}
if (instr->IsCall()) {
- current->DropRegisters(config());
+ block_assessments->DropRegisters();
}
for (size_t i = 0; i < instr->OutputCount(); ++i, ++count) {
int virtual_register = op_constraints[count].virtual_register_;
- OperandMap::MapValue* value =
- current->Define(zone(), instr->OutputAt(i), virtual_register);
+ block_assessments->AddDefinition(*instr->OutputAt(i), virtual_register);
if (op_constraints[count].type_ == kRegisterAndSlot) {
const AllocatedOperand* reg_op =
AllocatedOperand::cast(instr->OutputAt(i));
@@ -749,13 +519,38 @@ void RegisterAllocatorVerifier::VerifyGapMoves(BlockMaps* block_maps,
const AllocatedOperand* stack_op = AllocatedOperand::New(
zone(), LocationOperand::LocationKind::STACK_SLOT, rep,
op_constraints[i].spilled_slot_);
- auto insert_result =
- current->map().insert(std::make_pair(stack_op, value));
- DCHECK(insert_result.second);
- USE(insert_result);
+ block_assessments->AddDefinition(*stack_op, virtual_register);
}
}
}
+ // Now commit the assessments for this block. If there are any delayed
+ // assessments, ValidatePendingAssessment should see this block, too.
+ assessments_[block->rpo_number()] = block_assessments;
+
+ auto todo_iter = outstanding_assessments_.find(block->rpo_number());
+ if (todo_iter == outstanding_assessments_.end()) continue;
+ DelayedAssessments* todo = todo_iter->second;
+ for (auto pair : todo->map()) {
+ InstructionOperand op = pair.first;
+ int vreg = pair.second;
+ auto found_op = block_assessments->map().find(op);
+ CHECK(found_op != block_assessments->map().end());
+ switch (found_op->second->kind()) {
+ case Final:
+ ValidateFinalAssessment(block->rpo_number(), op, block_assessments,
+ FinalAssessment::cast(found_op->second),
+ vreg);
+ break;
+ case Pending:
+ const PendingAssessment* pending =
+ PendingAssessment::cast(found_op->second);
+ ValidatePendingAssessment(block->rpo_number(), op, block_assessments,
+ pending, vreg);
+ block_assessments->map()[op] =
+ new (zone()) FinalAssessment(vreg, pending);
+ break;
+ }
+ }
}
}
diff --git a/deps/v8/src/compiler/register-allocator-verifier.h b/deps/v8/src/compiler/register-allocator-verifier.h
index f3ab54f018..2db8af5728 100644
--- a/deps/v8/src/compiler/register-allocator-verifier.h
+++ b/deps/v8/src/compiler/register-allocator-verifier.h
@@ -14,6 +14,153 @@ namespace compiler {
class InstructionOperand;
class InstructionSequence;
+// The register allocator validator traverses instructions in the instruction
+// sequence, and verifies the correctness of machine operand substitutions of
+// virtual registers. It collects the virtual register instruction signatures
+// before register allocation. Then, after the register allocation pipeline
+// completes, it compares the operand substitutions against the pre-allocation
+// data.
+// At a high level, validation works as follows: we iterate through each block,
+// and, in a block, through each instruction; then:
+// - when an operand is the output of an instruction, we associate it to the
+// virtual register that the instruction sequence declares as its output. We
+// use the concept of "FinalAssessment" to model this.
+// - when an operand is used in an instruction, we check that the assessment
+// matches the expectation of the instruction
+// - moves simply copy the assessment over to the new operand
+// - blocks with more than one predecessor associate to each operand a "Pending"
+// assessment. The pending assessment remembers the operand and block where it
+// was created. Then, when the value is used (which may be as a different
+// operand, because of moves), we check that the virtual register at the use
+// site matches the definition of this pending operand: either the phi inputs
+// match, or, if it's not a phi, all the predecessors at the point the pending
+// assessment was defined have that operand assigned to the given virtual
+// register.
+// If a block is a loop header - so one or more of its predecessors are it or
+// below - we still treat uses of operands as above, but we record which operand
+// assessments haven't been made yet, and what virtual register they must
+// correspond to, and verify that when we are done with the respective
+// predecessor blocks.
+// This way, the algorithm always makes a final decision about the operands
+// in an instruction, ensuring convergence.
+// Operand assessments are recorded per block, as the result at the exit from
+// the block. When moving to a new block, we copy assessments from its single
+// predecessor, or, if the block has multiple predecessors, the mechanism was
+// described already.
+
+enum AssessmentKind { Final, Pending };
+
+class Assessment : public ZoneObject {
+ public:
+ AssessmentKind kind() const { return kind_; }
+
+ protected:
+ explicit Assessment(AssessmentKind kind) : kind_(kind) {}
+ AssessmentKind kind_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Assessment);
+};
+
+// PendingAssessments are associated to operands coming from the multiple
+// predecessors of a block. We only record the operand and the block, and
+// will determine if the way the operand is defined (from the predecessors)
+// matches a particular use. This handles scenarios where multiple phis are
+// defined with identical operands, and the move optimizer moved down the moves
+// separating the 2 phis in the block defining them.
+class PendingAssessment final : public Assessment {
+ public:
+ explicit PendingAssessment(const InstructionBlock* origin,
+ InstructionOperand operand)
+ : Assessment(Pending), origin_(origin), operand_(operand) {}
+
+ static const PendingAssessment* cast(const Assessment* assessment) {
+ CHECK(assessment->kind() == Pending);
+ return static_cast<const PendingAssessment*>(assessment);
+ }
+
+ const InstructionBlock* origin() const { return origin_; }
+ InstructionOperand operand() const { return operand_; }
+
+ private:
+ const InstructionBlock* const origin_;
+ InstructionOperand operand_;
+
+ DISALLOW_COPY_AND_ASSIGN(PendingAssessment);
+};
+
+// FinalAssessments are associated to operands that we know to be a certain
+// virtual register.
+class FinalAssessment final : public Assessment {
+ public:
+ explicit FinalAssessment(int virtual_register,
+ const PendingAssessment* original_pending = nullptr)
+ : Assessment(Final),
+ virtual_register_(virtual_register),
+ original_pending_assessment_(original_pending) {}
+
+ int virtual_register() const { return virtual_register_; }
+ static const FinalAssessment* cast(const Assessment* assessment) {
+ CHECK(assessment->kind() == Final);
+ return static_cast<const FinalAssessment*>(assessment);
+ }
+
+ const PendingAssessment* original_pending_assessment() const {
+ return original_pending_assessment_;
+ }
+
+ private:
+ int virtual_register_;
+ const PendingAssessment* original_pending_assessment_;
+
+ DISALLOW_COPY_AND_ASSIGN(FinalAssessment);
+};
+
+struct OperandAsKeyLess {
+ bool operator()(const InstructionOperand& a,
+ const InstructionOperand& b) const {
+ return a.CompareCanonicalized(b);
+ }
+};
+
+// Assessments associated with a basic block.
+class BlockAssessments : public ZoneObject {
+ public:
+ typedef ZoneMap<InstructionOperand, Assessment*, OperandAsKeyLess> OperandMap;
+ explicit BlockAssessments(Zone* zone)
+ : map_(zone), map_for_moves_(zone), zone_(zone) {}
+ void Drop(InstructionOperand operand) { map_.erase(operand); }
+ void DropRegisters();
+ void AddDefinition(InstructionOperand operand, int virtual_register) {
+ auto existent = map_.find(operand);
+ if (existent != map_.end()) {
+ // Drop the assignment
+ map_.erase(existent);
+ }
+ map_.insert(
+ std::make_pair(operand, new (zone_) FinalAssessment(virtual_register)));
+ }
+
+ void PerformMoves(const Instruction* instruction);
+ void PerformParallelMoves(const ParallelMove* moves);
+ void CopyFrom(const BlockAssessments* other) {
+ CHECK(map_.empty());
+ CHECK_NOT_NULL(other);
+ map_.insert(other->map_.begin(), other->map_.end());
+ }
+
+ OperandMap& map() { return map_; }
+ const OperandMap& map() const { return map_; }
+ void Print() const;
+
+ private:
+ OperandMap map_;
+ OperandMap map_for_moves_;
+ Zone* zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(BlockAssessments);
+};
+
class RegisterAllocatorVerifier final : public ZoneObject {
public:
RegisterAllocatorVerifier(Zone* zone, const RegisterConfiguration* config,
@@ -28,13 +175,12 @@ class RegisterAllocatorVerifier final : public ZoneObject {
kImmediate,
kRegister,
kFixedRegister,
- kDoubleRegister,
- kFixedDoubleRegister,
+ kFPRegister,
+ kFixedFPRegister,
kSlot,
- kDoubleSlot,
kFixedSlot,
kNone,
- kNoneDouble,
+ kNoneFP,
kExplicit,
kSameAsFirst,
kRegisterAndSlot
@@ -42,7 +188,9 @@ class RegisterAllocatorVerifier final : public ZoneObject {
struct OperandConstraint {
ConstraintType type_;
- int value_; // subkind index when relevant
+ // Constant or immediate value, register code, slot index, or slot size
+ // when relevant.
+ int value_;
int spilled_slot_;
int virtual_register_;
};
@@ -53,10 +201,29 @@ class RegisterAllocatorVerifier final : public ZoneObject {
OperandConstraint* operand_constraints_;
};
- class BlockMaps;
-
typedef ZoneVector<InstructionConstraint> Constraints;
+ class DelayedAssessments : public ZoneObject {
+ public:
+ explicit DelayedAssessments(Zone* zone) : map_(zone) {}
+
+ const ZoneMap<InstructionOperand, int, OperandAsKeyLess>& map() const {
+ return map_;
+ }
+
+ void AddDelayedAssessment(InstructionOperand op, int vreg) {
+ auto it = map_.find(op);
+ if (it == map_.end()) {
+ map_.insert(std::make_pair(op, vreg));
+ } else {
+ CHECK_EQ(it->second, vreg);
+ }
+ }
+
+ private:
+ ZoneMap<InstructionOperand, int, OperandAsKeyLess> map_;
+ };
+
Zone* zone() const { return zone_; }
const RegisterConfiguration* config() { return config_; }
const InstructionSequence* sequence() const { return sequence_; }
@@ -70,13 +237,25 @@ class RegisterAllocatorVerifier final : public ZoneObject {
OperandConstraint* constraint);
void CheckConstraint(const InstructionOperand* op,
const OperandConstraint* constraint);
+ BlockAssessments* CreateForBlock(const InstructionBlock* block);
- void VerifyGapMoves(BlockMaps* outgoing_mappings, bool initial_pass);
+ void ValidatePendingAssessment(RpoNumber block_id, InstructionOperand op,
+ BlockAssessments* current_assessments,
+ const PendingAssessment* assessment,
+ int virtual_register);
+ void ValidateFinalAssessment(RpoNumber block_id, InstructionOperand op,
+ BlockAssessments* current_assessments,
+ const FinalAssessment* assessment,
+ int virtual_register);
+ void ValidateUse(RpoNumber block_id, BlockAssessments* current_assessments,
+ InstructionOperand op, int virtual_register);
Zone* const zone_;
const RegisterConfiguration* config_;
const InstructionSequence* const sequence_;
Constraints constraints_;
+ ZoneMap<RpoNumber, BlockAssessments*> assessments_;
+ ZoneMap<RpoNumber, DelayedAssessments*> outstanding_assessments_;
DISALLOW_COPY_AND_ASSIGN(RegisterAllocatorVerifier);
};
diff --git a/deps/v8/src/compiler/register-allocator.cc b/deps/v8/src/compiler/register-allocator.cc
index 82faf75242..5b55b0224c 100644
--- a/deps/v8/src/compiler/register-allocator.cc
+++ b/deps/v8/src/compiler/register-allocator.cc
@@ -26,23 +26,22 @@ void RemoveElement(ZoneVector<LiveRange*>* v, LiveRange* range) {
}
int GetRegisterCount(const RegisterConfiguration* cfg, RegisterKind kind) {
- return kind == DOUBLE_REGISTERS ? cfg->num_double_registers()
- : cfg->num_general_registers();
+ return kind == FP_REGISTERS ? cfg->num_double_registers()
+ : cfg->num_general_registers();
}
int GetAllocatableRegisterCount(const RegisterConfiguration* cfg,
RegisterKind kind) {
- return kind == DOUBLE_REGISTERS
- ? cfg->num_allocatable_aliased_double_registers()
- : cfg->num_allocatable_general_registers();
+ return kind == FP_REGISTERS ? cfg->num_allocatable_aliased_double_registers()
+ : cfg->num_allocatable_general_registers();
}
const int* GetAllocatableRegisterCodes(const RegisterConfiguration* cfg,
RegisterKind kind) {
- return kind == DOUBLE_REGISTERS ? cfg->allocatable_double_codes()
- : cfg->allocatable_general_codes();
+ return kind == FP_REGISTERS ? cfg->allocatable_double_codes()
+ : cfg->allocatable_general_codes();
}
@@ -65,31 +64,6 @@ Instruction* GetLastInstruction(InstructionSequence* code,
return code->InstructionAt(block->last_instruction_index());
}
-
-bool IsOutputRegisterOf(Instruction* instr, Register reg) {
- for (size_t i = 0; i < instr->OutputCount(); i++) {
- InstructionOperand* output = instr->OutputAt(i);
- if (output->IsRegister() &&
- LocationOperand::cast(output)->GetRegister().is(reg)) {
- return true;
- }
- }
- return false;
-}
-
-
-bool IsOutputDoubleRegisterOf(Instruction* instr, DoubleRegister reg) {
- for (size_t i = 0; i < instr->OutputCount(); i++) {
- InstructionOperand* output = instr->OutputAt(i);
- if (output->IsDoubleRegister() &&
- LocationOperand::cast(output)->GetDoubleRegister().is(reg)) {
- return true;
- }
- }
- return false;
-}
-
-
// TODO(dcarney): fix frame to allow frame accesses to half size location.
int GetByteWidth(MachineRepresentation rep) {
switch (rep) {
@@ -97,14 +71,22 @@ int GetByteWidth(MachineRepresentation rep) {
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
case MachineRepresentation::kWord32:
+ case MachineRepresentation::kTaggedSigned:
+ case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
return kPointerSize;
case MachineRepresentation::kFloat32:
+// TODO(bbudge) Eliminate this when FP register aliasing works.
+#if V8_TARGET_ARCH_ARM
+ return kDoubleSize;
+#else
+ return kPointerSize;
+#endif
case MachineRepresentation::kWord64:
case MachineRepresentation::kFloat64:
- return 8;
+ return kDoubleSize;
case MachineRepresentation::kSimd128:
- return 16;
+ return kSimd128Size;
case MachineRepresentation::kNone:
break;
}
@@ -320,11 +302,7 @@ bool UsePosition::HintRegister(int* register_code) const {
case UsePositionHintType::kOperand: {
InstructionOperand* operand =
reinterpret_cast<InstructionOperand*>(hint_);
- int assigned_register =
- operand->IsRegister()
- ? LocationOperand::cast(operand)->GetRegister().code()
- : LocationOperand::cast(operand)->GetDoubleRegister().code();
- *register_code = assigned_register;
+ *register_code = LocationOperand::cast(operand)->register_code();
return true;
}
case UsePositionHintType::kPhi: {
@@ -351,10 +329,10 @@ UsePositionHintType UsePosition::HintTypeForOperand(
case InstructionOperand::UNALLOCATED:
return UsePositionHintType::kUnresolved;
case InstructionOperand::ALLOCATED:
- if (op.IsRegister() || op.IsDoubleRegister()) {
+ if (op.IsRegister() || op.IsFPRegister()) {
return UsePositionHintType::kOperand;
} else {
- DCHECK(op.IsStackSlot() || op.IsDoubleStackSlot());
+ DCHECK(op.IsStackSlot() || op.IsFPStackSlot());
return UsePositionHintType::kNone;
}
case InstructionOperand::INVALID:
@@ -414,11 +392,6 @@ std::ostream& operator<<(std::ostream& os, const LifetimePosition pos) {
return os;
}
-
-const float LiveRange::kInvalidWeight = -1;
-const float LiveRange::kMaxWeight = std::numeric_limits<float>::max();
-
-
LiveRange::LiveRange(int relative_id, MachineRepresentation rep,
TopLevelLiveRange* top_level)
: relative_id_(relative_id),
@@ -431,10 +404,7 @@ LiveRange::LiveRange(int relative_id, MachineRepresentation rep,
current_interval_(nullptr),
last_processed_use_(nullptr),
current_hint_position_(nullptr),
- splitting_pointer_(nullptr),
- size_(kInvalidSize),
- weight_(kInvalidWeight),
- group_(nullptr) {
+ splitting_pointer_(nullptr) {
DCHECK(AllocatedOperand::IsSupportedRepresentation(rep));
bits_ = AssignedRegisterField::encode(kUnassignedRegister) |
RepresentationField::encode(rep);
@@ -489,8 +459,7 @@ void LiveRange::Spill() {
RegisterKind LiveRange::kind() const {
- return IsFloatingPoint(representation()) ? DOUBLE_REGISTERS
- : GENERAL_REGISTERS;
+ return IsFloatingPoint(representation()) ? FP_REGISTERS : GENERAL_REGISTERS;
}
@@ -701,10 +670,6 @@ UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
last_processed_use_ = nullptr;
current_interval_ = nullptr;
- // Invalidate size and weight of this range. The child range has them
- // invalid at construction.
- size_ = kInvalidSize;
- weight_ = kInvalidWeight;
#ifdef DEBUG
VerifyChildStructure();
result->VerifyChildStructure();
@@ -728,11 +693,11 @@ void LiveRange::ConvertUsesToOperand(const InstructionOperand& op,
if (!pos->HasOperand()) continue;
switch (pos->type()) {
case UsePositionType::kRequiresSlot:
- DCHECK(spill_op.IsStackSlot() || spill_op.IsDoubleStackSlot());
+ DCHECK(spill_op.IsStackSlot() || spill_op.IsFPStackSlot());
InstructionOperand::ReplaceWith(pos->operand(), &spill_op);
break;
case UsePositionType::kRequiresRegister:
- DCHECK(op.IsRegister() || op.IsDoubleRegister());
+ DCHECK(op.IsRegister() || op.IsFPRegister());
// Fall through.
case UsePositionType::kAny:
InstructionOperand::ReplaceWith(pos->operand(), &op);
@@ -820,20 +785,6 @@ LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const {
return LifetimePosition::Invalid();
}
-
-unsigned LiveRange::GetSize() {
- if (size_ == kInvalidSize) {
- size_ = 0;
- for (const UseInterval* interval = first_interval(); interval != nullptr;
- interval = interval->next()) {
- size_ += (interval->end().value() - interval->start().value());
- }
- }
-
- return static_cast<unsigned>(size_);
-}
-
-
void LiveRange::Print(const RegisterConfiguration* config,
bool with_children) const {
OFStream os(stdout);
@@ -848,9 +799,7 @@ void LiveRange::Print(const RegisterConfiguration* config,
void LiveRange::Print(bool with_children) const {
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
- Print(config, with_children);
+ Print(RegisterConfiguration::Turbofan(), with_children);
}
@@ -1250,12 +1199,10 @@ std::ostream& operator<<(std::ostream& os,
return os;
}
-
SpillRange::SpillRange(TopLevelLiveRange* parent, Zone* zone)
: live_ranges_(zone),
assigned_slot_(kUnassignedSlot),
- byte_width_(GetByteWidth(parent->representation())),
- kind_(parent->kind()) {
+ byte_width_(GetByteWidth(parent->representation())) {
// Spill ranges are created for top level, non-splintered ranges. This is so
// that, when merging decisions are made, we consider the full extent of the
// virtual register, and avoid clobbering it.
@@ -1282,12 +1229,6 @@ SpillRange::SpillRange(TopLevelLiveRange* parent, Zone* zone)
parent->SetSpillRange(this);
}
-
-int SpillRange::ByteWidth() const {
- return GetByteWidth(live_ranges_[0]->representation());
-}
-
-
bool SpillRange::IsIntersectingWith(SpillRange* other) const {
if (this->use_interval_ == nullptr || other->use_interval_ == nullptr ||
this->End() <= other->use_interval_->start() ||
@@ -1300,11 +1241,8 @@ bool SpillRange::IsIntersectingWith(SpillRange* other) const {
bool SpillRange::TryMerge(SpillRange* other) {
if (HasSlot() || other->HasSlot()) return false;
- // TODO(dcarney): byte widths should be compared here not kinds.
- if (live_ranges_[0]->kind() != other->live_ranges_[0]->kind() ||
- IsIntersectingWith(other)) {
+ if (byte_width() != other->byte_width() || IsIntersectingWith(other))
return false;
- }
LifetimePosition max = LifetimePosition::MaxPosition();
if (End() < other->End() && other->End() != max) {
@@ -1390,7 +1328,6 @@ void RegisterAllocationData::PhiMapValue::CommitAssignment(
}
}
-
RegisterAllocationData::RegisterAllocationData(
const RegisterConfiguration* config, Zone* zone, Frame* frame,
InstructionSequence* code, const char* debug_name)
@@ -1400,10 +1337,6 @@ RegisterAllocationData::RegisterAllocationData(
debug_name_(debug_name),
config_(config),
phi_map_(allocation_zone()),
- allocatable_codes_(this->config()->num_general_registers(), -1,
- allocation_zone()),
- allocatable_double_codes_(this->config()->num_double_registers(), -1,
- allocation_zone()),
live_in_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
live_out_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
live_ranges_(code->VirtualRegisterCount() * 2, nullptr,
@@ -1418,10 +1351,6 @@ RegisterAllocationData::RegisterAllocationData(
assigned_double_registers_(nullptr),
virtual_register_count_(code->VirtualRegisterCount()),
preassigned_slot_ranges_(zone) {
- DCHECK(this->config()->num_general_registers() <=
- RegisterConfiguration::kMaxGeneralRegisters);
- DCHECK(this->config()->num_double_registers() <=
- RegisterConfiguration::kMaxDoubleRegisters);
assigned_registers_ = new (code_zone())
BitVector(this->config()->num_general_registers(), code_zone());
assigned_double_registers_ = new (code_zone())
@@ -1589,17 +1518,21 @@ SpillRange* RegisterAllocationData::CreateSpillRangeForLiveRange(
return spill_range;
}
-
-void RegisterAllocationData::MarkAllocated(RegisterKind kind, int index) {
- if (kind == DOUBLE_REGISTERS) {
- assigned_double_registers_->Add(index);
- } else {
- DCHECK(kind == GENERAL_REGISTERS);
- assigned_registers_->Add(index);
+void RegisterAllocationData::MarkAllocated(MachineRepresentation rep,
+ int index) {
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ case MachineRepresentation::kFloat64:
+ case MachineRepresentation::kSimd128:
+ assigned_double_registers_->Add(index);
+ break;
+ default:
+ DCHECK(!IsFloatingPoint(rep));
+ assigned_registers_->Add(index);
+ break;
}
}
-
bool RegisterAllocationData::IsBlockBoundary(LifetimePosition pos) const {
return pos.IsFullStart() &&
code()->GetInstructionBlock(pos.ToInstructionIndex())->code_start() ==
@@ -1628,7 +1561,7 @@ InstructionOperand* ConstraintBuilder::AllocateFixed(
DCHECK(!IsFloatingPoint(rep));
allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
operand->fixed_register_index());
- } else if (operand->HasFixedDoubleRegisterPolicy()) {
+ } else if (operand->HasFixedFPRegisterPolicy()) {
DCHECK(IsFloatingPoint(rep));
DCHECK_NE(InstructionOperand::kInvalidVirtualRegister, virtual_register);
allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
@@ -1913,42 +1846,59 @@ void LiveRangeBuilder::AddInitialIntervals(const InstructionBlock* block,
}
}
-
-int LiveRangeBuilder::FixedDoubleLiveRangeID(int index) {
- return -index - 1 - config()->num_general_registers();
+int LiveRangeBuilder::FixedFPLiveRangeID(int index, MachineRepresentation rep) {
+ int result = -index - 1;
+ switch (rep) {
+ case MachineRepresentation::kSimd128:
+ case MachineRepresentation::kFloat32:
+ case MachineRepresentation::kFloat64:
+ result -= config()->num_general_registers();
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return result;
}
-
TopLevelLiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index) {
DCHECK(index < config()->num_general_registers());
TopLevelLiveRange* result = data()->fixed_live_ranges()[index];
if (result == nullptr) {
- result = data()->NewLiveRange(FixedLiveRangeID(index),
- InstructionSequence::DefaultRepresentation());
+ MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
+ result = data()->NewLiveRange(FixedLiveRangeID(index), rep);
DCHECK(result->IsFixed());
result->set_assigned_register(index);
- data()->MarkAllocated(GENERAL_REGISTERS, index);
+ data()->MarkAllocated(rep, index);
data()->fixed_live_ranges()[index] = result;
}
return result;
}
-
-TopLevelLiveRange* LiveRangeBuilder::FixedDoubleLiveRangeFor(int index) {
- DCHECK(index < config()->num_double_registers());
- TopLevelLiveRange* result = data()->fixed_double_live_ranges()[index];
- if (result == nullptr) {
- result = data()->NewLiveRange(FixedDoubleLiveRangeID(index),
- MachineRepresentation::kFloat64);
- DCHECK(result->IsFixed());
- result->set_assigned_register(index);
- data()->MarkAllocated(DOUBLE_REGISTERS, index);
- data()->fixed_double_live_ranges()[index] = result;
+TopLevelLiveRange* LiveRangeBuilder::FixedFPLiveRangeFor(
+ int index, MachineRepresentation rep) {
+ TopLevelLiveRange* result = nullptr;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ case MachineRepresentation::kFloat64:
+ case MachineRepresentation::kSimd128:
+ DCHECK(index < config()->num_double_registers());
+ result = data()->fixed_double_live_ranges()[index];
+ if (result == nullptr) {
+ result = data()->NewLiveRange(FixedFPLiveRangeID(index, rep), rep);
+ DCHECK(result->IsFixed());
+ result->set_assigned_register(index);
+ data()->MarkAllocated(rep, index);
+ data()->fixed_double_live_ranges()[index] = result;
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
}
return result;
}
-
TopLevelLiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand) {
if (operand->IsUnallocated()) {
return data()->GetOrCreateLiveRangeFor(
@@ -1959,9 +1909,9 @@ TopLevelLiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand) {
} else if (operand->IsRegister()) {
return FixedLiveRangeFor(
LocationOperand::cast(operand)->GetRegister().code());
- } else if (operand->IsDoubleRegister()) {
- return FixedDoubleLiveRangeFor(
- LocationOperand::cast(operand)->GetDoubleRegister().code());
+ } else if (operand->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(operand);
+ return FixedFPLiveRangeFor(op->register_code(), op->representation());
} else {
return nullptr;
}
@@ -2056,24 +2006,27 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
if (instr->ClobbersRegisters()) {
for (int i = 0; i < config()->num_allocatable_general_registers(); ++i) {
+ // Create a UseInterval at this instruction for all fixed registers,
+ // (including the instruction outputs). Adding another UseInterval here
+ // is OK because AddUseInterval will just merge it with the existing
+ // one at the end of the range.
int code = config()->GetAllocatableGeneralCode(i);
- if (!IsOutputRegisterOf(instr, Register::from_code(code))) {
- TopLevelLiveRange* range = FixedLiveRangeFor(code);
- range->AddUseInterval(curr_position, curr_position.End(),
- allocation_zone());
- }
+ TopLevelLiveRange* range = FixedLiveRangeFor(code);
+ range->AddUseInterval(curr_position, curr_position.End(),
+ allocation_zone());
}
}
if (instr->ClobbersDoubleRegisters()) {
for (int i = 0; i < config()->num_allocatable_aliased_double_registers();
++i) {
+ // Add a UseInterval for all DoubleRegisters. See comment above for
+ // general registers.
int code = config()->GetAllocatableDoubleCode(i);
- if (!IsOutputDoubleRegisterOf(instr, DoubleRegister::from_code(code))) {
- TopLevelLiveRange* range = FixedDoubleLiveRangeFor(code);
- range->AddUseInterval(curr_position, curr_position.End(),
- allocation_zone());
- }
+ TopLevelLiveRange* range =
+ FixedFPLiveRangeFor(code, MachineRepresentation::kFloat64);
+ range->AddUseInterval(curr_position, curr_position.End(),
+ allocation_zone());
}
}
@@ -2194,23 +2147,24 @@ void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
// block.
int phi_vreg = phi->virtual_register();
live->Remove(phi_vreg);
- InstructionOperand* hint = nullptr;
+ // Select the hint from the first predecessor block that preceeds this block
+ // in the rpo ordering. Prefer non-deferred blocks. The enforcement of
+ // hinting in rpo order is required because hint resolution that happens
+ // later in the compiler pipeline visits instructions in reverse rpo,
+ // relying on the fact that phis are encountered before their hints.
+ const Instruction* instr = nullptr;
const InstructionBlock::Predecessors& predecessors = block->predecessors();
- const InstructionBlock* predecessor_block =
- code()->InstructionBlockAt(predecessors[0]);
- const Instruction* instr = GetLastInstruction(code(), predecessor_block);
- if (predecessor_block->IsDeferred()) {
- // "Prefer the hint from the first non-deferred predecessor, if any.
- for (size_t i = 1; i < predecessors.size(); ++i) {
- predecessor_block = code()->InstructionBlockAt(predecessors[i]);
- if (!predecessor_block->IsDeferred()) {
- instr = GetLastInstruction(code(), predecessor_block);
- break;
- }
+ for (size_t i = 0; i < predecessors.size(); ++i) {
+ const InstructionBlock* predecessor_block =
+ code()->InstructionBlockAt(predecessors[i]);
+ if (predecessor_block->rpo_number() < block->rpo_number()) {
+ instr = GetLastInstruction(code(), predecessor_block);
+ if (!predecessor_block->IsDeferred()) break;
}
}
DCHECK_NOT_NULL(instr);
+ InstructionOperand* hint = nullptr;
for (MoveOperands* move : *instr->GetParallelMove(Instruction::END)) {
InstructionOperand& to = move->destination();
if (to.IsUnallocated() &&
@@ -2418,7 +2372,6 @@ RegisterAllocator::RegisterAllocator(RegisterAllocationData* data,
allocatable_register_codes_(
GetAllocatableRegisterCodes(data->config(), kind)) {}
-
LifetimePosition RegisterAllocator::GetSplitPositionForInstruction(
const LiveRange* range, int instruction_index) {
LifetimePosition ret = LifetimePosition::Invalid();
@@ -2587,14 +2540,6 @@ void RegisterAllocator::Spill(LiveRange* range) {
range->Spill();
}
-
-const ZoneVector<TopLevelLiveRange*>& RegisterAllocator::GetFixedRegisters()
- const {
- return mode() == DOUBLE_REGISTERS ? data()->fixed_double_live_ranges()
- : data()->fixed_live_ranges();
-}
-
-
const char* RegisterAllocator::RegisterName(int register_code) const {
if (mode() == GENERAL_REGISTERS) {
return data()->config()->GetGeneralRegisterName(register_code);
@@ -2616,7 +2561,7 @@ LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data,
inactive_live_ranges().reserve(8);
// TryAllocateFreeReg and AllocateBlockedReg assume this
// when allocating local arrays.
- DCHECK(RegisterConfiguration::kMaxDoubleRegisters >=
+ DCHECK(RegisterConfiguration::kMaxFPRegisters >=
this->data()->config()->num_general_registers());
}
@@ -2641,11 +2586,13 @@ void LinearScanAllocator::AllocateRegisters() {
SortUnhandled();
DCHECK(UnhandledIsSorted());
- auto& fixed_ranges = GetFixedRegisters();
- for (TopLevelLiveRange* current : fixed_ranges) {
- if (current != nullptr) {
- DCHECK_EQ(mode(), current->kind());
- AddToInactive(current);
+ if (mode() == GENERAL_REGISTERS) {
+ for (TopLevelLiveRange* current : data()->fixed_live_ranges()) {
+ if (current != nullptr) AddToInactive(current);
+ }
+ } else {
+ for (TopLevelLiveRange* current : data()->fixed_double_live_ranges()) {
+ if (current != nullptr) AddToInactive(current);
}
}
@@ -2699,7 +2646,7 @@ void LinearScanAllocator::AllocateRegisters() {
void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
int reg) {
- data()->MarkAllocated(range->kind(), reg);
+ data()->MarkAllocated(range->representation(), reg);
range->set_assigned_register(reg);
range->SetUseHints(reg);
if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
@@ -2813,17 +2760,19 @@ void LinearScanAllocator::InactiveToActive(LiveRange* range) {
bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
- LifetimePosition free_until_pos[RegisterConfiguration::kMaxDoubleRegisters];
+ int num_regs = num_registers();
+ int num_codes = num_allocatable_registers();
+ const int* codes = allocatable_register_codes();
- for (int i = 0; i < num_registers(); i++) {
+ LifetimePosition free_until_pos[RegisterConfiguration::kMaxFPRegisters];
+ for (int i = 0; i < num_regs; i++) {
free_until_pos[i] = LifetimePosition::MaxPosition();
}
for (LiveRange* cur_active : active_live_ranges()) {
- free_until_pos[cur_active->assigned_register()] =
- LifetimePosition::GapFromInstructionIndex(0);
- TRACE("Register %s is free until pos %d (1)\n",
- RegisterName(cur_active->assigned_register()),
+ int cur_reg = cur_active->assigned_register();
+ free_until_pos[cur_reg] = LifetimePosition::GapFromInstructionIndex(0);
+ TRACE("Register %s is free until pos %d (1)\n", RegisterName(cur_reg),
LifetimePosition::GapFromInstructionIndex(0).value());
}
@@ -2857,9 +2806,9 @@ bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
}
// Find the register which stays free for the longest time.
- int reg = allocatable_register_code(0);
- for (int i = 1; i < num_allocatable_registers(); ++i) {
- int code = allocatable_register_code(i);
+ int reg = codes[0];
+ for (int i = 1; i < num_codes; ++i) {
+ int code = codes[i];
if (free_until_pos[code] > free_until_pos[reg]) {
reg = code;
}
@@ -2879,8 +2828,8 @@ bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
AddToUnhandledSorted(tail);
}
- // Register reg is available at the range start and is free until
- // the range end.
+ // Register reg is available at the range start and is free until the range
+ // end.
DCHECK(pos >= current->End());
TRACE("Assigning free reg %s to live range %d:%d\n", RegisterName(reg),
current->TopLevel()->vreg(), current->relative_id());
@@ -2899,17 +2848,21 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
return;
}
- LifetimePosition use_pos[RegisterConfiguration::kMaxDoubleRegisters];
- LifetimePosition block_pos[RegisterConfiguration::kMaxDoubleRegisters];
+ int num_regs = num_registers();
+ int num_codes = num_allocatable_registers();
+ const int* codes = allocatable_register_codes();
- for (int i = 0; i < num_registers(); i++) {
+ LifetimePosition use_pos[RegisterConfiguration::kMaxFPRegisters];
+ LifetimePosition block_pos[RegisterConfiguration::kMaxFPRegisters];
+ for (int i = 0; i < num_regs; i++) {
use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
}
for (LiveRange* range : active_live_ranges()) {
int cur_reg = range->assigned_register();
- if (range->TopLevel()->IsFixed() ||
- !range->CanBeSpilled(current->Start())) {
+ bool is_fixed_or_cant_spill =
+ range->TopLevel()->IsFixed() || !range->CanBeSpilled(current->Start());
+ if (is_fixed_or_cant_spill) {
block_pos[cur_reg] = use_pos[cur_reg] =
LifetimePosition::GapFromInstructionIndex(0);
} else {
@@ -2928,7 +2881,8 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
LifetimePosition next_intersection = range->FirstIntersection(current);
if (!next_intersection.IsValid()) continue;
int cur_reg = range->assigned_register();
- if (range->TopLevel()->IsFixed()) {
+ bool is_fixed = range->TopLevel()->IsFixed();
+ if (is_fixed) {
block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
} else {
@@ -2936,9 +2890,9 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
}
}
- int reg = allocatable_register_code(0);
- for (int i = 1; i < num_allocatable_registers(); ++i) {
- int code = allocatable_register_code(i);
+ int reg = codes[0];
+ for (int i = 1; i < num_codes; ++i) {
+ int code = codes[i];
if (use_pos[code] > use_pos[reg]) {
reg = code;
}
@@ -2947,9 +2901,13 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
LifetimePosition pos = use_pos[reg];
if (pos < register_use->pos()) {
- // All registers are blocked before the first use that requires a register.
- // Spill starting part of live range up to that use.
- SpillBetween(current, current->Start(), register_use->pos());
+ if (LifetimePosition::ExistsGapPositionBetween(current->Start(),
+ register_use->pos())) {
+ SpillBetween(current, current->Start(), register_use->pos());
+ } else {
+ SetLiveRangeAssignedRegister(current, reg);
+ SplitAndSpillIntersecting(current);
+ }
return;
}
@@ -2980,43 +2938,46 @@ void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
LifetimePosition split_pos = current->Start();
for (size_t i = 0; i < active_live_ranges().size(); ++i) {
LiveRange* range = active_live_ranges()[i];
- if (range->assigned_register() == reg) {
- UsePosition* next_pos = range->NextRegisterPosition(current->Start());
- LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
- if (next_pos == nullptr) {
- SpillAfter(range, spill_pos);
- } else {
- // When spilling between spill_pos and next_pos ensure that the range
- // remains spilled at least until the start of the current live range.
- // This guarantees that we will not introduce new unhandled ranges that
- // start before the current range as this violates allocation invariant
- // and will lead to an inconsistent state of active and inactive
- // live-ranges: ranges are allocated in order of their start positions,
- // ranges are retired from active/inactive when the start of the
- // current live-range is larger than their end.
- SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
- }
- ActiveToHandled(range);
- --i;
- }
+ if (range->assigned_register() != reg) continue;
+
+ UsePosition* next_pos = range->NextRegisterPosition(current->Start());
+ LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
+ if (next_pos == nullptr) {
+ SpillAfter(range, spill_pos);
+ } else {
+ // When spilling between spill_pos and next_pos ensure that the range
+ // remains spilled at least until the start of the current live range.
+ // This guarantees that we will not introduce new unhandled ranges that
+ // start before the current range as this violates allocation invariants
+ // and will lead to an inconsistent state of active and inactive
+ // live-ranges: ranges are allocated in order of their start positions,
+ // ranges are retired from active/inactive when the start of the
+ // current live-range is larger than their end.
+ DCHECK(LifetimePosition::ExistsGapPositionBetween(current->Start(),
+ next_pos->pos()));
+ SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
+ }
+ ActiveToHandled(range);
+ --i;
}
for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
LiveRange* range = inactive_live_ranges()[i];
DCHECK(range->End() > current->Start());
- if (range->assigned_register() == reg && !range->TopLevel()->IsFixed()) {
- LifetimePosition next_intersection = range->FirstIntersection(current);
- if (next_intersection.IsValid()) {
- UsePosition* next_pos = range->NextRegisterPosition(current->Start());
- if (next_pos == nullptr) {
- SpillAfter(range, split_pos);
- } else {
- next_intersection = Min(next_intersection, next_pos->pos());
- SpillBetween(range, split_pos, next_intersection);
- }
- InactiveToHandled(range);
- --i;
+ if (range->TopLevel()->IsFixed()) continue;
+ if (range->assigned_register() != reg) continue;
+
+ LifetimePosition next_intersection = range->FirstIntersection(current);
+ if (next_intersection.IsValid()) {
+ UsePosition* next_pos = range->NextRegisterPosition(current->Start());
+ if (next_pos == nullptr) {
+ SpillAfter(range, split_pos);
+ } else {
+ next_intersection = Min(next_intersection, next_pos->pos());
+ SpillBetween(range, split_pos, next_intersection);
}
+ InactiveToHandled(range);
+ --i;
}
}
}
@@ -3092,7 +3053,7 @@ bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
? range->TopLevel()->GetSpillRange()
: data()->AssignSpillRangeToLiveRange(range->TopLevel());
bool merged = first_op_spill->TryMerge(spill_range);
- CHECK(merged);
+ if (!merged) return false;
Spill(range);
return true;
} else if (pos->pos() > range->Start().NextStart()) {
@@ -3101,7 +3062,7 @@ bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
? range->TopLevel()->GetSpillRange()
: data()->AssignSpillRangeToLiveRange(range->TopLevel());
bool merged = first_op_spill->TryMerge(spill_range);
- CHECK(merged);
+ if (!merged) return false;
SpillBetween(range, range->Start(), pos->pos());
DCHECK(UnhandledIsSorted());
return true;
@@ -3196,8 +3157,7 @@ void OperandAssigner::AssignSpillSlots() {
if (range == nullptr || range->IsEmpty()) continue;
// Allocate a new operand referring to the spill slot.
if (!range->HasSlot()) {
- int byte_width = range->ByteWidth();
- int index = data()->frame()->AllocateSpillSlot(byte_width);
+ int index = data()->frame()->AllocateSpillSlot(range->byte_width());
range->set_assigned_slot(index);
}
}
@@ -3405,7 +3365,8 @@ void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
BitVector* live = live_in_sets[block->rpo_number().ToInt()];
BitVector::Iterator iterator(live);
while (!iterator.Done()) {
- LiveRangeBoundArray* array = finder.ArrayFor(iterator.Current());
+ int vreg = iterator.Current();
+ LiveRangeBoundArray* array = finder.ArrayFor(vreg);
for (const RpoNumber& pred : block->predecessors()) {
FindResult result;
const InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
@@ -3622,6 +3583,7 @@ void LiveRangeConnector::CommitSpillsInDeferredBlocks(
worklist.push(iterator.Current());
}
+ ZoneSet<std::pair<RpoNumber, int>> done_moves(temp_zone);
// Seek the deferred blocks that dominate locations requiring spill operands,
// and spill there. We only need to spill at the start of such blocks.
BitVector done_blocks(
@@ -3648,10 +3610,15 @@ void LiveRangeConnector::CommitSpillsInDeferredBlocks(
InstructionOperand pred_op = bound->range_->GetAssignedOperand();
- data()->AddGapMove(spill_block->first_instruction_index(),
- Instruction::GapPosition::START, pred_op,
- spill_operand);
- spill_block->mark_needs_frame();
+ RpoNumber spill_block_number = spill_block->rpo_number();
+ if (done_moves.find(std::make_pair(
+ spill_block_number, range->vreg())) == done_moves.end()) {
+ data()->AddGapMove(spill_block->first_instruction_index(),
+ Instruction::GapPosition::START, pred_op,
+ spill_operand);
+ done_moves.insert(std::make_pair(spill_block_number, range->vreg()));
+ spill_block->mark_needs_frame();
+ }
}
}
}
diff --git a/deps/v8/src/compiler/register-allocator.h b/deps/v8/src/compiler/register-allocator.h
index d6ed005270..6bfc6c410a 100644
--- a/deps/v8/src/compiler/register-allocator.h
+++ b/deps/v8/src/compiler/register-allocator.h
@@ -14,11 +14,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-enum RegisterKind {
- GENERAL_REGISTERS,
- DOUBLE_REGISTERS
-};
-
+enum RegisterKind { GENERAL_REGISTERS, FP_REGISTERS };
// This class represents a single point of a InstructionOperand's lifetime. For
// each instruction there are four lifetime positions:
@@ -46,6 +42,14 @@ class LifetimePosition final {
return LifetimePosition(index * kStep + kHalfStep);
}
+ static bool ExistsGapPositionBetween(LifetimePosition pos1,
+ LifetimePosition pos2) {
+ if (pos1 > pos2) std::swap(pos1, pos2);
+ LifetimePosition next(pos1.value_ + 1);
+ if (next.IsGapPosition()) return next < pos2;
+ return next.NextFullStart() < pos2;
+ }
+
// Returns a numeric representation of this lifetime position.
int value() const { return value_; }
@@ -238,11 +242,9 @@ enum class UsePositionHintType : uint8_t {
static const int32_t kUnassignedRegister =
RegisterConfiguration::kMaxGeneralRegisters;
-
-static_assert(kUnassignedRegister <= RegisterConfiguration::kMaxDoubleRegisters,
+static_assert(kUnassignedRegister <= RegisterConfiguration::kMaxFPRegisters,
"kUnassignedRegister too small");
-
// Representation of a use position.
class UsePosition final : public ZoneObject {
public:
@@ -410,19 +412,9 @@ class LiveRange : public ZoneObject {
void SetUseHints(int register_index);
void UnsetUseHints() { SetUseHints(kUnassignedRegister); }
- // Used solely by the Greedy Allocator:
- unsigned GetSize();
- float weight() const { return weight_; }
- void set_weight(float weight) { weight_ = weight; }
- LiveRangeGroup* group() const { return group_; }
- void set_group(LiveRangeGroup* group) { group_ = group; }
void Print(const RegisterConfiguration* config, bool with_children) const;
void Print(bool with_children) const;
- static const int kInvalidSize = -1;
- static const float kInvalidWeight;
- static const float kMaxWeight;
-
private:
friend class TopLevelLiveRange;
explicit LiveRange(int relative_id, MachineRepresentation rep,
@@ -459,17 +451,6 @@ class LiveRange : public ZoneObject {
mutable UsePosition* current_hint_position_;
// Cache the last position splintering stopped at.
mutable UsePosition* splitting_pointer_;
- // greedy: the number of LifetimePositions covered by this range. Used to
- // prioritize selecting live ranges for register assignment, as well as
- // in weight calculations.
- int size_;
-
- // greedy: a metric for resolving conflicts between ranges with an assigned
- // register and ranges that intersect them and need a register.
- float weight_;
-
- // greedy: groupping
- LiveRangeGroup* group_;
DISALLOW_COPY_AND_ASSIGN(LiveRange);
};
@@ -481,7 +462,6 @@ class LiveRangeGroup final : public ZoneObject {
ZoneVector<LiveRange*>& ranges() { return ranges_; }
const ZoneVector<LiveRange*>& ranges() const { return ranges_; }
- // TODO(mtrofin): populate assigned register and use in weight calculation.
int assigned_register() const { return assigned_register_; }
void set_assigned_register(int reg) { assigned_register_ = reg; }
@@ -698,8 +678,7 @@ class SpillRange final : public ZoneObject {
SpillRange(TopLevelLiveRange* range, Zone* zone);
UseInterval* interval() const { return use_interval_; }
- // Currently, only 4 or 8 byte slots are supported.
- int ByteWidth() const;
+
bool IsEmpty() const { return live_ranges_.empty(); }
bool TryMerge(SpillRange* other);
bool HasSlot() const { return assigned_slot_ != kUnassignedSlot; }
@@ -716,8 +695,8 @@ class SpillRange final : public ZoneObject {
return live_ranges_;
}
ZoneVector<TopLevelLiveRange*>& live_ranges() { return live_ranges_; }
+ // Spill slots can be 4, 8, or 16 bytes wide.
int byte_width() const { return byte_width_; }
- RegisterKind kind() const { return kind_; }
void Print() const;
private:
@@ -731,7 +710,6 @@ class SpillRange final : public ZoneObject {
LifetimePosition end_position_;
int assigned_slot_;
int byte_width_;
- RegisterKind kind_;
DISALLOW_COPY_AND_ASSIGN(SpillRange);
};
@@ -799,7 +777,7 @@ class RegisterAllocationData final : public ZoneObject {
ZoneVector<SpillRange*>& spill_ranges() { return spill_ranges_; }
DelayedReferences& delayed_references() { return delayed_references_; }
InstructionSequence* code() const { return code_; }
- // This zone is for datastructures only needed during register allocation
+ // This zone is for data structures only needed during register allocation
// phases.
Zone* allocation_zone() const { return allocation_zone_; }
// This zone is for InstructionOperands and moves that live beyond register
@@ -830,7 +808,7 @@ class RegisterAllocationData final : public ZoneObject {
bool ExistsUseWithoutDefinition();
bool RangesDefinedInDeferredStayInDeferred();
- void MarkAllocated(RegisterKind kind, int index);
+ void MarkAllocated(MachineRepresentation rep, int index);
PhiMapValue* InitializePhiMap(const InstructionBlock* block,
PhiInstruction* phi);
@@ -851,8 +829,6 @@ class RegisterAllocationData final : public ZoneObject {
const char* const debug_name_;
const RegisterConfiguration* const config_;
PhiMap phi_map_;
- ZoneVector<int> allocatable_codes_;
- ZoneVector<int> allocatable_double_codes_;
ZoneVector<BitVector*> live_in_sets_;
ZoneVector<BitVector*> live_out_sets_;
ZoneVector<TopLevelLiveRange*> live_ranges_;
@@ -933,9 +909,9 @@ class LiveRangeBuilder final : public ZoneObject {
void ProcessLoopHeader(const InstructionBlock* block, BitVector* live);
static int FixedLiveRangeID(int index) { return -index - 1; }
- int FixedDoubleLiveRangeID(int index);
+ int FixedFPLiveRangeID(int index, MachineRepresentation rep);
TopLevelLiveRange* FixedLiveRangeFor(int index);
- TopLevelLiveRange* FixedDoubleLiveRangeFor(int index);
+ TopLevelLiveRange* FixedFPLiveRangeFor(int index, MachineRepresentation rep);
void MapPhiHint(InstructionOperand* operand, UsePosition* use_pos);
void ResolvePhiHint(InstructionOperand* operand, UsePosition* use_pos);
@@ -969,7 +945,7 @@ class LiveRangeBuilder final : public ZoneObject {
class RegisterAllocator : public ZoneObject {
public:
- explicit RegisterAllocator(RegisterAllocationData* data, RegisterKind kind);
+ RegisterAllocator(RegisterAllocationData* data, RegisterKind kind);
protected:
RegisterAllocationData* data() const { return data_; }
@@ -977,8 +953,8 @@ class RegisterAllocator : public ZoneObject {
RegisterKind mode() const { return mode_; }
int num_registers() const { return num_registers_; }
int num_allocatable_registers() const { return num_allocatable_registers_; }
- int allocatable_register_code(int allocatable_index) const {
- return allocatable_register_codes_[allocatable_index];
+ const int* allocatable_register_codes() const {
+ return allocatable_register_codes_;
}
// TODO(mtrofin): explain why splitting in gap START is always OK.
@@ -1031,6 +1007,9 @@ class RegisterAllocator : public ZoneObject {
int num_allocatable_registers_;
const int* allocatable_register_codes_;
+ private:
+ bool no_combining_;
+
DISALLOW_COPY_AND_ASSIGN(RegisterAllocator);
};
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index f59c8bc909..5427bdb1cd 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -63,6 +63,11 @@ Truncation::TruncationKind Truncation::Generalize(TruncationKind rep1,
LessGeneral(rep2, TruncationKind::kFloat64)) {
return TruncationKind::kFloat64;
}
+ // Handle the generalization of any-representable values.
+ if (LessGeneral(rep1, TruncationKind::kAny) &&
+ LessGeneral(rep2, TruncationKind::kAny)) {
+ return TruncationKind::kAny;
+ }
// All other combinations are illegal.
FATAL("Tried to combine incompatible truncations");
return TruncationKind::kNone;
@@ -105,46 +110,59 @@ bool IsWord(MachineRepresentation rep) {
} // namespace
-
// Changes representation from {output_rep} to {use_rep}. The {truncation}
// parameter is only used for sanity checking - if the changer cannot figure
// out signedness for the word32->float64 conversion, then we check that the
// uses truncate to word32 (so they do not care about signedness).
Node* RepresentationChanger::GetRepresentationFor(
Node* node, MachineRepresentation output_rep, Type* output_type,
- MachineRepresentation use_rep, Truncation truncation) {
- if (output_rep == MachineRepresentation::kNone) {
- // The output representation should be set.
- return TypeError(node, output_rep, output_type, use_rep);
+ Node* use_node, UseInfo use_info) {
+ if (output_rep == MachineRepresentation::kNone &&
+ output_type->IsInhabited()) {
+ // The output representation should be set if the type is inhabited (i.e.,
+ // if the value is possible).
+ return TypeError(node, output_rep, output_type, use_info.representation());
}
- if (use_rep == output_rep) {
- // Representations are the same. That's a no-op.
- return node;
- }
- if (IsWord(use_rep) && IsWord(output_rep)) {
- // Both are words less than or equal to 32-bits.
- // Since loads of integers from memory implicitly sign or zero extend the
- // value to the full machine word size and stores implicitly truncate,
- // no representation change is necessary.
- return node;
+
+ // Handle the no-op shortcuts when no checking is necessary.
+ if (use_info.type_check() == TypeCheckKind::kNone ||
+ output_rep != MachineRepresentation::kWord32) {
+ if (use_info.representation() == output_rep) {
+ // Representations are the same. That's a no-op.
+ return node;
+ }
+ if (IsWord(use_info.representation()) && IsWord(output_rep)) {
+ // Both are words less than or equal to 32-bits.
+ // Since loads of integers from memory implicitly sign or zero extend the
+ // value to the full machine word size and stores implicitly truncate,
+ // no representation change is necessary.
+ return node;
+ }
}
- switch (use_rep) {
+
+ switch (use_info.representation()) {
+ case MachineRepresentation::kTaggedSigned:
+ case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
+ DCHECK(use_info.type_check() == TypeCheckKind::kNone);
return GetTaggedRepresentationFor(node, output_rep, output_type);
case MachineRepresentation::kFloat32:
+ DCHECK(use_info.type_check() == TypeCheckKind::kNone);
return GetFloat32RepresentationFor(node, output_rep, output_type,
- truncation);
+ use_info.truncation());
case MachineRepresentation::kFloat64:
return GetFloat64RepresentationFor(node, output_rep, output_type,
- truncation);
+ use_node, use_info);
case MachineRepresentation::kBit:
+ DCHECK(use_info.type_check() == TypeCheckKind::kNone);
return GetBitRepresentationFor(node, output_rep, output_type);
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
case MachineRepresentation::kWord32:
- return GetWord32RepresentationFor(node, output_rep, output_type,
- truncation);
+ return GetWord32RepresentationFor(node, output_rep, output_type, use_node,
+ use_info);
case MachineRepresentation::kWord64:
+ DCHECK(use_info.type_check() == TypeCheckKind::kNone);
return GetWord64RepresentationFor(node, output_rep, output_type);
case MachineRepresentation::kSimd128: // Fall through.
// TODO(bbudge) Handle conversions between tagged and untagged.
@@ -156,7 +174,6 @@ Node* RepresentationChanger::GetRepresentationFor(
return nullptr;
}
-
Node* RepresentationChanger::GetTaggedRepresentationFor(
Node* node, MachineRepresentation output_rep, Type* output_type) {
// Eagerly fold representation changes for constants.
@@ -171,7 +188,7 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
} else if (output_type->Is(Type::Unsigned32())) {
uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
return jsgraph()->Constant(static_cast<double>(value));
- } else if (output_rep == MachineRepresentation::kBit) {
+ } else if (output_type->Is(Type::Boolean())) {
return OpParameter<int32_t>(node) == 0 ? jsgraph()->FalseConstant()
: jsgraph()->TrueConstant();
} else {
@@ -187,13 +204,24 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
}
// Select the correct X -> Tagged operator.
const Operator* op;
- if (output_rep == MachineRepresentation::kBit) {
- op = simplified()->ChangeBitToBool();
+ if (output_rep == MachineRepresentation::kNone) {
+ // We should only asisgn this representation if the type is empty.
+ CHECK(!output_type->IsInhabited());
+ op = machine()->ImpossibleToTagged();
+ } else if (output_rep == MachineRepresentation::kBit) {
+ if (output_type->Is(Type::Boolean())) {
+ op = simplified()->ChangeBitToTagged();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTagged);
+ }
} else if (IsWord(output_rep)) {
- if (output_type->Is(Type::Unsigned32())) {
- op = simplified()->ChangeUint32ToTagged();
+ if (output_type->Is(Type::Signed31())) {
+ op = simplified()->ChangeInt31ToTaggedSigned();
} else if (output_type->Is(Type::Signed32())) {
op = simplified()->ChangeInt32ToTagged();
+ } else if (output_type->Is(Type::Unsigned32())) {
+ op = simplified()->ChangeUint32ToTagged();
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTagged);
@@ -201,9 +229,28 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
} else if (output_rep ==
MachineRepresentation::kFloat32) { // float32 -> float64 -> tagged
node = InsertChangeFloat32ToFloat64(node);
- op = simplified()->ChangeFloat64ToTagged();
+ op = simplified()->ChangeFloat64ToTagged(
+ output_type->Maybe(Type::MinusZero())
+ ? CheckForMinusZeroMode::kCheckForMinusZero
+ : CheckForMinusZeroMode::kDontCheckForMinusZero);
} else if (output_rep == MachineRepresentation::kFloat64) {
- op = simplified()->ChangeFloat64ToTagged();
+ if (output_type->Is(Type::Signed31())) { // float64 -> int32 -> tagged
+ node = InsertChangeFloat64ToInt32(node);
+ op = simplified()->ChangeInt31ToTaggedSigned();
+ } else if (output_type->Is(
+ Type::Signed32())) { // float64 -> int32 -> tagged
+ node = InsertChangeFloat64ToInt32(node);
+ op = simplified()->ChangeInt32ToTagged();
+ } else if (output_type->Is(
+ Type::Unsigned32())) { // float64 -> uint32 -> tagged
+ node = InsertChangeFloat64ToUint32(node);
+ op = simplified()->ChangeUint32ToTagged();
+ } else {
+ op = simplified()->ChangeFloat64ToTagged(
+ output_type->Maybe(Type::MinusZero())
+ ? CheckForMinusZeroMode::kCheckForMinusZero
+ : CheckForMinusZeroMode::kDontCheckForMinusZero);
+ }
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTagged);
@@ -236,14 +283,18 @@ Node* RepresentationChanger::GetFloat32RepresentationFor(
}
// Select the correct X -> Float32 operator.
const Operator* op = nullptr;
- if (IsWord(output_rep)) {
+ if (output_rep == MachineRepresentation::kNone) {
+ // We should only use kNone representation if the type is empty.
+ CHECK(!output_type->IsInhabited());
+ op = machine()->ImpossibleToFloat32();
+ } else if (IsWord(output_rep)) {
if (output_type->Is(Type::Signed32())) {
// int32 -> float64 -> float32
op = machine()->ChangeInt32ToFloat64();
node = jsgraph()->graph()->NewNode(op, node);
op = machine()->TruncateFloat64ToFloat32();
} else if (output_type->Is(Type::Unsigned32()) ||
- truncation.TruncatesToWord32()) {
+ truncation.IsUsedAsWord32()) {
// Either the output is uint32 or the uses only care about the
// low 32 bits (so we can pick uint32 safely).
@@ -253,9 +304,13 @@ Node* RepresentationChanger::GetFloat32RepresentationFor(
op = machine()->TruncateFloat64ToFloat32();
}
} else if (output_rep == MachineRepresentation::kTagged) {
- if (output_type->Is(Type::Number())) {
- op = simplified()
- ->ChangeTaggedToFloat64(); // tagged -> float64 -> float32
+ if (output_type->Is(Type::NumberOrOddball())) {
+ // tagged -> float64 -> float32
+ if (output_type->Is(Type::Number())) {
+ op = simplified()->ChangeTaggedToFloat64();
+ } else {
+ op = simplified()->TruncateTaggedToFloat64();
+ }
node = jsgraph()->graph()->NewNode(op, node);
op = machine()->TruncateFloat64ToFloat32();
}
@@ -269,44 +324,68 @@ Node* RepresentationChanger::GetFloat32RepresentationFor(
return jsgraph()->graph()->NewNode(op, node);
}
-
Node* RepresentationChanger::GetFloat64RepresentationFor(
Node* node, MachineRepresentation output_rep, Type* output_type,
- Truncation truncation) {
+ Node* use_node, UseInfo use_info) {
// Eagerly fold representation changes for constants.
- switch (node->opcode()) {
- case IrOpcode::kNumberConstant:
- return jsgraph()->Float64Constant(OpParameter<double>(node));
- case IrOpcode::kInt32Constant:
- if (output_type->Is(Type::Signed32())) {
- int32_t value = OpParameter<int32_t>(node);
- return jsgraph()->Float64Constant(value);
- } else {
- DCHECK(output_type->Is(Type::Unsigned32()));
- uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
- return jsgraph()->Float64Constant(static_cast<double>(value));
- }
- case IrOpcode::kFloat64Constant:
- return node; // No change necessary.
- case IrOpcode::kFloat32Constant:
- return jsgraph()->Float64Constant(OpParameter<float>(node));
- default:
- break;
+ if ((use_info.type_check() == TypeCheckKind::kNone)) {
+ // TODO(jarin) Handle checked constant conversions.
+ switch (node->opcode()) {
+ case IrOpcode::kNumberConstant:
+ return jsgraph()->Float64Constant(OpParameter<double>(node));
+ case IrOpcode::kInt32Constant:
+ if (output_type->Is(Type::Signed32())) {
+ int32_t value = OpParameter<int32_t>(node);
+ return jsgraph()->Float64Constant(value);
+ } else {
+ DCHECK(output_type->Is(Type::Unsigned32()));
+ uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
+ return jsgraph()->Float64Constant(static_cast<double>(value));
+ }
+ case IrOpcode::kFloat64Constant:
+ return node; // No change necessary.
+ case IrOpcode::kFloat32Constant:
+ return jsgraph()->Float64Constant(OpParameter<float>(node));
+ default:
+ break;
+ }
}
// Select the correct X -> Float64 operator.
const Operator* op = nullptr;
- if (IsWord(output_rep)) {
+ if (output_rep == MachineRepresentation::kNone) {
+ // We should only use kNone representation if the type is empty.
+ CHECK(!output_type->IsInhabited());
+ op = machine()->ImpossibleToFloat64();
+ } else if (IsWord(output_rep)) {
if (output_type->Is(Type::Signed32())) {
op = machine()->ChangeInt32ToFloat64();
} else if (output_type->Is(Type::Unsigned32()) ||
- truncation.TruncatesToWord32()) {
+ use_info.truncation().IsUsedAsWord32()) {
// Either the output is uint32 or the uses only care about the
// low 32 bits (so we can pick uint32 safely).
op = machine()->ChangeUint32ToFloat64();
}
+ } else if (output_rep == MachineRepresentation::kBit) {
+ op = machine()->ChangeUint32ToFloat64();
} else if (output_rep == MachineRepresentation::kTagged) {
- if (output_type->Is(Type::Number())) {
+ if (output_type->Is(Type::Undefined())) {
+ return jsgraph()->Float64Constant(
+ std::numeric_limits<double>::quiet_NaN());
+ } else if (output_type->Is(Type::TaggedSigned())) {
+ node = InsertChangeTaggedSignedToInt32(node);
+ op = machine()->ChangeInt32ToFloat64();
+ } else if (output_type->Is(Type::Number())) {
op = simplified()->ChangeTaggedToFloat64();
+ } else if (output_type->Is(Type::NumberOrOddball())) {
+ // TODO(jarin) Here we should check that truncation is Number.
+ op = simplified()->TruncateTaggedToFloat64();
+ } else if (use_info.type_check() == TypeCheckKind::kNumber ||
+ (use_info.type_check() == TypeCheckKind::kNumberOrOddball &&
+ !output_type->Maybe(Type::BooleanOrNullOrNumber()))) {
+ op = simplified()->CheckedTaggedToFloat64(CheckTaggedInputMode::kNumber);
+ } else if (use_info.type_check() == TypeCheckKind::kNumberOrOddball) {
+ op = simplified()->CheckedTaggedToFloat64(
+ CheckTaggedInputMode::kNumberOrOddball);
}
} else if (output_rep == MachineRepresentation::kFloat32) {
op = machine()->ChangeFloat32ToFloat64();
@@ -315,40 +394,66 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
return TypeError(node, output_rep, output_type,
MachineRepresentation::kFloat64);
}
- return jsgraph()->graph()->NewNode(op, node);
+ return InsertConversion(node, op, use_node);
}
-
Node* RepresentationChanger::MakeTruncatedInt32Constant(double value) {
return jsgraph()->Int32Constant(DoubleToInt32(value));
}
Node* RepresentationChanger::GetWord32RepresentationFor(
Node* node, MachineRepresentation output_rep, Type* output_type,
- Truncation truncation) {
+ Node* use_node, UseInfo use_info) {
// Eagerly fold representation changes for constants.
switch (node->opcode()) {
case IrOpcode::kInt32Constant:
return node; // No change necessary.
- case IrOpcode::kFloat32Constant:
- return MakeTruncatedInt32Constant(OpParameter<float>(node));
+ case IrOpcode::kFloat32Constant: {
+ float const fv = OpParameter<float>(node);
+ if (use_info.type_check() == TypeCheckKind::kNone ||
+ ((use_info.type_check() == TypeCheckKind::kSignedSmall ||
+ use_info.type_check() == TypeCheckKind::kSigned32) &&
+ IsInt32Double(fv))) {
+ return MakeTruncatedInt32Constant(fv);
+ }
+ break;
+ }
case IrOpcode::kNumberConstant:
- case IrOpcode::kFloat64Constant:
- return MakeTruncatedInt32Constant(OpParameter<double>(node));
+ case IrOpcode::kFloat64Constant: {
+ double const fv = OpParameter<double>(node);
+ if (use_info.type_check() == TypeCheckKind::kNone ||
+ ((use_info.type_check() == TypeCheckKind::kSignedSmall ||
+ use_info.type_check() == TypeCheckKind::kSigned32) &&
+ IsInt32Double(fv))) {
+ return MakeTruncatedInt32Constant(fv);
+ }
+ break;
+ }
default:
break;
}
+
// Select the correct X -> Word32 operator.
const Operator* op = nullptr;
- if (output_rep == MachineRepresentation::kBit) {
+ if (output_rep == MachineRepresentation::kNone) {
+ // We should only use kNone representation if the type is empty.
+ CHECK(!output_type->IsInhabited());
+ op = machine()->ImpossibleToWord32();
+ } else if (output_rep == MachineRepresentation::kBit) {
return node; // Sloppy comparison -> word32
} else if (output_rep == MachineRepresentation::kFloat64) {
if (output_type->Is(Type::Unsigned32())) {
op = machine()->ChangeFloat64ToUint32();
} else if (output_type->Is(Type::Signed32())) {
op = machine()->ChangeFloat64ToInt32();
- } else if (truncation.TruncatesToWord32()) {
- op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
+ } else if (use_info.truncation().IsUsedAsWord32()) {
+ op = machine()->TruncateFloat64ToWord32();
+ } else if (use_info.type_check() == TypeCheckKind::kSignedSmall ||
+ use_info.type_check() == TypeCheckKind::kSigned32) {
+ op = simplified()->CheckedFloat64ToInt32(
+ output_type->Maybe(Type::MinusZero())
+ ? CheckForMinusZeroMode::kCheckForMinusZero
+ : CheckForMinusZeroMode::kDontCheckForMinusZero);
}
} else if (output_rep == MachineRepresentation::kFloat32) {
node = InsertChangeFloat32ToFloat64(node); // float32 -> float64 -> int32
@@ -356,23 +461,76 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
op = machine()->ChangeFloat64ToUint32();
} else if (output_type->Is(Type::Signed32())) {
op = machine()->ChangeFloat64ToInt32();
- } else if (truncation.TruncatesToWord32()) {
- op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
+ } else if (use_info.truncation().IsUsedAsWord32()) {
+ op = machine()->TruncateFloat64ToWord32();
+ } else if (use_info.type_check() == TypeCheckKind::kSignedSmall ||
+ use_info.type_check() == TypeCheckKind::kSigned32) {
+ op = simplified()->CheckedFloat64ToInt32(
+ output_type->Maybe(Type::MinusZero())
+ ? CheckForMinusZeroMode::kCheckForMinusZero
+ : CheckForMinusZeroMode::kDontCheckForMinusZero);
}
} else if (output_rep == MachineRepresentation::kTagged) {
- if (output_type->Is(Type::Unsigned32())) {
+ if (output_type->Is(Type::TaggedSigned())) {
+ op = simplified()->ChangeTaggedSignedToInt32();
+ } else if (output_type->Is(Type::Unsigned32())) {
op = simplified()->ChangeTaggedToUint32();
} else if (output_type->Is(Type::Signed32())) {
op = simplified()->ChangeTaggedToInt32();
- } else if (truncation.TruncatesToWord32()) {
- node = InsertChangeTaggedToFloat64(node);
- op = machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript);
+ } else if (use_info.truncation().IsUsedAsWord32()) {
+ if (use_info.type_check() != TypeCheckKind::kNone) {
+ op = simplified()->CheckedTruncateTaggedToWord32();
+ } else {
+ op = simplified()->TruncateTaggedToWord32();
+ }
+ } else if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
+ op = simplified()->CheckedTaggedSignedToInt32();
+ } else if (use_info.type_check() == TypeCheckKind::kSigned32) {
+ op = simplified()->CheckedTaggedToInt32(
+ output_type->Maybe(Type::MinusZero())
+ ? CheckForMinusZeroMode::kCheckForMinusZero
+ : CheckForMinusZeroMode::kDontCheckForMinusZero);
+ }
+ } else if (output_rep == MachineRepresentation::kWord32) {
+ // Only the checked case should get here, the non-checked case is
+ // handled in GetRepresentationFor.
+ if (use_info.type_check() == TypeCheckKind::kSignedSmall ||
+ use_info.type_check() == TypeCheckKind::kSigned32) {
+ if (output_type->Is(Type::Signed32())) {
+ return node;
+ } else if (output_type->Is(Type::Unsigned32())) {
+ op = simplified()->CheckedUint32ToInt32();
+ }
+ } else {
+ DCHECK_EQ(TypeCheckKind::kNumberOrOddball, use_info.type_check());
+ return node;
}
+ } else if (output_rep == MachineRepresentation::kWord8 ||
+ output_rep == MachineRepresentation::kWord16) {
+ DCHECK(use_info.representation() == MachineRepresentation::kWord32);
+ DCHECK(use_info.type_check() == TypeCheckKind::kSignedSmall ||
+ use_info.type_check() == TypeCheckKind::kSigned32);
+ return node;
}
+
if (op == nullptr) {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord32);
}
+ return InsertConversion(node, op, use_node);
+}
+
+Node* RepresentationChanger::InsertConversion(Node* node, const Operator* op,
+ Node* use_node) {
+ if (op->ControlInputCount() > 0) {
+ // If the operator can deoptimize (which means it has control
+ // input), we need to connect it to the effect and control chains.
+ Node* effect = NodeProperties::GetEffectInput(use_node);
+ Node* control = NodeProperties::GetControlInput(use_node);
+ Node* conversion = jsgraph()->graph()->NewNode(op, node, effect, control);
+ NodeProperties::ReplaceEffectInput(use_node, conversion);
+ return conversion;
+ }
return jsgraph()->graph()->NewNode(op, node);
}
@@ -393,8 +551,12 @@ Node* RepresentationChanger::GetBitRepresentationFor(
}
// Select the correct X -> Bit operator.
const Operator* op;
- if (output_rep == MachineRepresentation::kTagged) {
- op = simplified()->ChangeBoolToBit();
+ if (output_rep == MachineRepresentation::kNone) {
+ // We should only use kNone representation if the type is empty.
+ CHECK(!output_type->IsInhabited());
+ op = machine()->ImpossibleToBit();
+ } else if (output_rep == MachineRepresentation::kTagged) {
+ op = simplified()->ChangeTaggedToBit();
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kBit);
@@ -402,10 +564,13 @@ Node* RepresentationChanger::GetBitRepresentationFor(
return jsgraph()->graph()->NewNode(op, node);
}
-
Node* RepresentationChanger::GetWord64RepresentationFor(
Node* node, MachineRepresentation output_rep, Type* output_type) {
- if (output_rep == MachineRepresentation::kBit) {
+ if (output_rep == MachineRepresentation::kNone) {
+ // We should only use kNone representation if the type is empty.
+ CHECK(!output_type->IsInhabited());
+ return jsgraph()->graph()->NewNode(machine()->ImpossibleToFloat64(), node);
+ } else if (output_rep == MachineRepresentation::kBit) {
return node; // Sloppy comparison -> word64
}
// Can't really convert Word64 to anything else. Purported to be internal.
@@ -413,31 +578,41 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
MachineRepresentation::kWord64);
}
-
const Operator* RepresentationChanger::Int32OperatorFor(
IrOpcode::Value opcode) {
switch (opcode) {
+ case IrOpcode::kSpeculativeNumberAdd: // Fall through.
case IrOpcode::kNumberAdd:
return machine()->Int32Add();
+ case IrOpcode::kSpeculativeNumberSubtract: // Fall through.
case IrOpcode::kNumberSubtract:
return machine()->Int32Sub();
+ case IrOpcode::kSpeculativeNumberMultiply:
case IrOpcode::kNumberMultiply:
return machine()->Int32Mul();
+ case IrOpcode::kSpeculativeNumberDivide:
case IrOpcode::kNumberDivide:
return machine()->Int32Div();
+ case IrOpcode::kSpeculativeNumberModulus:
case IrOpcode::kNumberModulus:
return machine()->Int32Mod();
+ case IrOpcode::kSpeculativeNumberBitwiseOr: // Fall through.
case IrOpcode::kNumberBitwiseOr:
return machine()->Word32Or();
+ case IrOpcode::kSpeculativeNumberBitwiseXor: // Fall through.
case IrOpcode::kNumberBitwiseXor:
return machine()->Word32Xor();
+ case IrOpcode::kSpeculativeNumberBitwiseAnd: // Fall through.
case IrOpcode::kNumberBitwiseAnd:
return machine()->Word32And();
case IrOpcode::kNumberEqual:
+ case IrOpcode::kSpeculativeNumberEqual:
return machine()->Word32Equal();
case IrOpcode::kNumberLessThan:
+ case IrOpcode::kSpeculativeNumberLessThan:
return machine()->Int32LessThan();
case IrOpcode::kNumberLessThanOrEqual:
+ case IrOpcode::kSpeculativeNumberLessThanOrEqual:
return machine()->Int32LessThanOrEqual();
default:
UNREACHABLE();
@@ -445,6 +620,22 @@ const Operator* RepresentationChanger::Int32OperatorFor(
}
}
+const Operator* RepresentationChanger::Int32OverflowOperatorFor(
+ IrOpcode::Value opcode) {
+ switch (opcode) {
+ case IrOpcode::kSpeculativeNumberAdd:
+ return simplified()->CheckedInt32Add();
+ case IrOpcode::kSpeculativeNumberSubtract:
+ return simplified()->CheckedInt32Sub();
+ case IrOpcode::kSpeculativeNumberDivide:
+ return simplified()->CheckedInt32Div();
+ case IrOpcode::kSpeculativeNumberModulus:
+ return simplified()->CheckedInt32Mod();
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+}
const Operator* RepresentationChanger::Uint32OperatorFor(
IrOpcode::Value opcode) {
@@ -453,17 +644,23 @@ const Operator* RepresentationChanger::Uint32OperatorFor(
return machine()->Int32Add();
case IrOpcode::kNumberSubtract:
return machine()->Int32Sub();
+ case IrOpcode::kSpeculativeNumberMultiply:
case IrOpcode::kNumberMultiply:
return machine()->Int32Mul();
+ case IrOpcode::kSpeculativeNumberDivide:
case IrOpcode::kNumberDivide:
return machine()->Uint32Div();
+ case IrOpcode::kSpeculativeNumberModulus:
case IrOpcode::kNumberModulus:
return machine()->Uint32Mod();
case IrOpcode::kNumberEqual:
+ case IrOpcode::kSpeculativeNumberEqual:
return machine()->Word32Equal();
case IrOpcode::kNumberLessThan:
+ case IrOpcode::kSpeculativeNumberLessThan:
return machine()->Uint32LessThan();
case IrOpcode::kNumberLessThanOrEqual:
+ case IrOpcode::kSpeculativeNumberLessThanOrEqual:
return machine()->Uint32LessThanOrEqual();
case IrOpcode::kNumberClz32:
return machine()->Word32Clz();
@@ -475,26 +672,106 @@ const Operator* RepresentationChanger::Uint32OperatorFor(
}
}
+const Operator* RepresentationChanger::Uint32OverflowOperatorFor(
+ IrOpcode::Value opcode) {
+ switch (opcode) {
+ case IrOpcode::kSpeculativeNumberDivide:
+ return simplified()->CheckedUint32Div();
+ case IrOpcode::kSpeculativeNumberModulus:
+ return simplified()->CheckedUint32Mod();
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+}
const Operator* RepresentationChanger::Float64OperatorFor(
IrOpcode::Value opcode) {
switch (opcode) {
+ case IrOpcode::kSpeculativeNumberAdd:
case IrOpcode::kNumberAdd:
return machine()->Float64Add();
+ case IrOpcode::kSpeculativeNumberSubtract:
case IrOpcode::kNumberSubtract:
return machine()->Float64Sub();
+ case IrOpcode::kSpeculativeNumberMultiply:
case IrOpcode::kNumberMultiply:
return machine()->Float64Mul();
+ case IrOpcode::kSpeculativeNumberDivide:
case IrOpcode::kNumberDivide:
return machine()->Float64Div();
+ case IrOpcode::kSpeculativeNumberModulus:
case IrOpcode::kNumberModulus:
return machine()->Float64Mod();
case IrOpcode::kNumberEqual:
+ case IrOpcode::kSpeculativeNumberEqual:
return machine()->Float64Equal();
case IrOpcode::kNumberLessThan:
+ case IrOpcode::kSpeculativeNumberLessThan:
return machine()->Float64LessThan();
case IrOpcode::kNumberLessThanOrEqual:
+ case IrOpcode::kSpeculativeNumberLessThanOrEqual:
return machine()->Float64LessThanOrEqual();
+ case IrOpcode::kNumberAbs:
+ return machine()->Float64Abs();
+ case IrOpcode::kNumberAcos:
+ return machine()->Float64Acos();
+ case IrOpcode::kNumberAcosh:
+ return machine()->Float64Acosh();
+ case IrOpcode::kNumberAsin:
+ return machine()->Float64Asin();
+ case IrOpcode::kNumberAsinh:
+ return machine()->Float64Asinh();
+ case IrOpcode::kNumberAtan:
+ return machine()->Float64Atan();
+ case IrOpcode::kNumberAtanh:
+ return machine()->Float64Atanh();
+ case IrOpcode::kNumberAtan2:
+ return machine()->Float64Atan2();
+ case IrOpcode::kNumberCbrt:
+ return machine()->Float64Cbrt();
+ case IrOpcode::kNumberCeil:
+ return machine()->Float64RoundUp().placeholder();
+ case IrOpcode::kNumberCos:
+ return machine()->Float64Cos();
+ case IrOpcode::kNumberCosh:
+ return machine()->Float64Cosh();
+ case IrOpcode::kNumberExp:
+ return machine()->Float64Exp();
+ case IrOpcode::kNumberExpm1:
+ return machine()->Float64Expm1();
+ case IrOpcode::kNumberFloor:
+ return machine()->Float64RoundDown().placeholder();
+ case IrOpcode::kNumberFround:
+ return machine()->TruncateFloat64ToFloat32();
+ case IrOpcode::kNumberLog:
+ return machine()->Float64Log();
+ case IrOpcode::kNumberLog1p:
+ return machine()->Float64Log1p();
+ case IrOpcode::kNumberLog2:
+ return machine()->Float64Log2();
+ case IrOpcode::kNumberLog10:
+ return machine()->Float64Log10();
+ case IrOpcode::kNumberMax:
+ return machine()->Float64Max();
+ case IrOpcode::kNumberMin:
+ return machine()->Float64Min();
+ case IrOpcode::kNumberPow:
+ return machine()->Float64Pow();
+ case IrOpcode::kNumberSin:
+ return machine()->Float64Sin();
+ case IrOpcode::kNumberSinh:
+ return machine()->Float64Sinh();
+ case IrOpcode::kNumberSqrt:
+ return machine()->Float64Sqrt();
+ case IrOpcode::kNumberTan:
+ return machine()->Float64Tan();
+ case IrOpcode::kNumberTanh:
+ return machine()->Float64Tanh();
+ case IrOpcode::kNumberTrunc:
+ return machine()->Float64RoundTruncate().placeholder();
+ case IrOpcode::kNumberSilenceNaN:
+ return machine()->Float64SilenceNaN();
default:
UNREACHABLE();
return nullptr;
@@ -530,6 +807,18 @@ Node* RepresentationChanger::InsertChangeFloat32ToFloat64(Node* node) {
return jsgraph()->graph()->NewNode(machine()->ChangeFloat32ToFloat64(), node);
}
+Node* RepresentationChanger::InsertChangeFloat64ToUint32(Node* node) {
+ return jsgraph()->graph()->NewNode(machine()->ChangeFloat64ToUint32(), node);
+}
+
+Node* RepresentationChanger::InsertChangeFloat64ToInt32(Node* node) {
+ return jsgraph()->graph()->NewNode(machine()->ChangeFloat64ToInt32(), node);
+}
+
+Node* RepresentationChanger::InsertChangeTaggedSignedToInt32(Node* node) {
+ return jsgraph()->graph()->NewNode(simplified()->ChangeTaggedSignedToInt32(),
+ node);
+}
Node* RepresentationChanger::InsertChangeTaggedToFloat64(Node* node) {
return jsgraph()->graph()->NewNode(simplified()->ChangeTaggedToFloat64(),
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index 24e28f399c..fac328072a 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -28,14 +28,18 @@ class Truncation final {
}
// Queries.
- bool TruncatesToWord32() const {
+ bool IsUnused() const { return kind_ == TruncationKind::kNone; }
+ bool IsUsedAsWord32() const {
return LessGeneral(kind_, TruncationKind::kWord32);
}
- bool TruncatesNaNToZero() {
+ bool IsUsedAsFloat64() const {
+ return LessGeneral(kind_, TruncationKind::kFloat64);
+ }
+ bool IdentifiesNaNAndZero() {
return LessGeneral(kind_, TruncationKind::kWord32) ||
LessGeneral(kind_, TruncationKind::kBool);
}
- bool TruncatesUndefinedToZeroOrNaN() {
+ bool IdentifiesUndefinedAndNaNAndZero() {
return LessGeneral(kind_, TruncationKind::kFloat64) ||
LessGeneral(kind_, TruncationKind::kWord64);
}
@@ -70,6 +74,120 @@ class Truncation final {
static bool LessGeneral(TruncationKind rep1, TruncationKind rep2);
};
+enum class TypeCheckKind : uint8_t {
+ kNone,
+ kSignedSmall,
+ kSigned32,
+ kNumber,
+ kNumberOrOddball
+};
+
+inline std::ostream& operator<<(std::ostream& os, TypeCheckKind type_check) {
+ switch (type_check) {
+ case TypeCheckKind::kNone:
+ return os << "None";
+ case TypeCheckKind::kSignedSmall:
+ return os << "SignedSmall";
+ case TypeCheckKind::kSigned32:
+ return os << "Signed32";
+ case TypeCheckKind::kNumber:
+ return os << "Number";
+ case TypeCheckKind::kNumberOrOddball:
+ return os << "NumberOrOddball";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+// The {UseInfo} class is used to describe a use of an input of a node.
+//
+// This information is used in two different ways, based on the phase:
+//
+// 1. During propagation, the use info is used to inform the input node
+// about what part of the input is used (we call this truncation) and what
+// is the preferred representation.
+//
+// 2. During lowering, the use info is used to properly convert the input
+// to the preferred representation. The preferred representation might be
+// insufficient to do the conversion (e.g. word32->float64 conv), so we also
+// need the signedness information to produce the correct value.
+class UseInfo {
+ public:
+ UseInfo(MachineRepresentation representation, Truncation truncation,
+ TypeCheckKind type_check = TypeCheckKind::kNone)
+ : representation_(representation),
+ truncation_(truncation),
+ type_check_(type_check) {}
+ static UseInfo TruncatingWord32() {
+ return UseInfo(MachineRepresentation::kWord32, Truncation::Word32());
+ }
+ static UseInfo TruncatingWord64() {
+ return UseInfo(MachineRepresentation::kWord64, Truncation::Word64());
+ }
+ static UseInfo Bool() {
+ return UseInfo(MachineRepresentation::kBit, Truncation::Bool());
+ }
+ static UseInfo TruncatingFloat32() {
+ return UseInfo(MachineRepresentation::kFloat32, Truncation::Float32());
+ }
+ static UseInfo TruncatingFloat64() {
+ return UseInfo(MachineRepresentation::kFloat64, Truncation::Float64());
+ }
+ static UseInfo PointerInt() {
+ return kPointerSize == 4 ? TruncatingWord32() : TruncatingWord64();
+ }
+ static UseInfo AnyTagged() {
+ return UseInfo(MachineRepresentation::kTagged, Truncation::Any());
+ }
+
+ // Possibly deoptimizing conversions.
+ static UseInfo CheckedSignedSmallAsWord32() {
+ return UseInfo(MachineRepresentation::kWord32, Truncation::Any(),
+ TypeCheckKind::kSignedSmall);
+ }
+ static UseInfo CheckedSigned32AsWord32() {
+ return UseInfo(MachineRepresentation::kWord32, Truncation::Any(),
+ TypeCheckKind::kSigned32);
+ }
+ static UseInfo CheckedNumberAsFloat64() {
+ return UseInfo(MachineRepresentation::kFloat64, Truncation::Float64(),
+ TypeCheckKind::kNumber);
+ }
+ static UseInfo CheckedNumberAsWord32() {
+ return UseInfo(MachineRepresentation::kWord32, Truncation::Word32(),
+ TypeCheckKind::kNumber);
+ }
+ static UseInfo CheckedNumberOrOddballAsFloat64() {
+ return UseInfo(MachineRepresentation::kFloat64, Truncation::Any(),
+ TypeCheckKind::kNumberOrOddball);
+ }
+ static UseInfo CheckedNumberOrOddballAsWord32() {
+ return UseInfo(MachineRepresentation::kWord32, Truncation::Word32(),
+ TypeCheckKind::kNumberOrOddball);
+ }
+
+ // Undetermined representation.
+ static UseInfo Any() {
+ return UseInfo(MachineRepresentation::kNone, Truncation::Any());
+ }
+ static UseInfo AnyTruncatingToBool() {
+ return UseInfo(MachineRepresentation::kNone, Truncation::Bool());
+ }
+
+ // Value not used.
+ static UseInfo None() {
+ return UseInfo(MachineRepresentation::kNone, Truncation::None());
+ }
+
+ MachineRepresentation representation() const { return representation_; }
+ Truncation truncation() const { return truncation_; }
+ TypeCheckKind type_check() const { return type_check_; }
+
+ private:
+ MachineRepresentation representation_;
+ Truncation truncation_;
+ TypeCheckKind type_check_;
+};
// Contains logic related to changing the representation of values for constants
// and other nodes, as well as lowering Simplified->Machine operators.
@@ -87,10 +205,12 @@ class RepresentationChanger final {
// out signedness for the word32->float64 conversion, then we check that the
// uses truncate to word32 (so they do not care about signedness).
Node* GetRepresentationFor(Node* node, MachineRepresentation output_rep,
- Type* output_type, MachineRepresentation use_rep,
- Truncation truncation = Truncation::None());
+ Type* output_type, Node* use_node,
+ UseInfo use_info);
const Operator* Int32OperatorFor(IrOpcode::Value opcode);
+ const Operator* Int32OverflowOperatorFor(IrOpcode::Value opcode);
const Operator* Uint32OperatorFor(IrOpcode::Value opcode);
+ const Operator* Uint32OverflowOperatorFor(IrOpcode::Value opcode);
const Operator* Float64OperatorFor(IrOpcode::Value opcode);
MachineType TypeForBasePointer(const FieldAccess& access) {
@@ -119,9 +239,11 @@ class RepresentationChanger final {
Type* output_type, Truncation truncation);
Node* GetFloat64RepresentationFor(Node* node,
MachineRepresentation output_rep,
- Type* output_type, Truncation truncation);
+ Type* output_type, Node* use_node,
+ UseInfo use_info);
Node* GetWord32RepresentationFor(Node* node, MachineRepresentation output_rep,
- Type* output_type, Truncation truncation);
+ Type* output_type, Node* use_node,
+ UseInfo use_info);
Node* GetBitRepresentationFor(Node* node, MachineRepresentation output_rep,
Type* output_type);
Node* GetWord64RepresentationFor(Node* node, MachineRepresentation output_rep,
@@ -130,8 +252,13 @@ class RepresentationChanger final {
Type* output_type, MachineRepresentation use);
Node* MakeTruncatedInt32Constant(double value);
Node* InsertChangeFloat32ToFloat64(Node* node);
+ Node* InsertChangeFloat64ToInt32(Node* node);
+ Node* InsertChangeFloat64ToUint32(Node* node);
+ Node* InsertChangeTaggedSignedToInt32(Node* node);
Node* InsertChangeTaggedToFloat64(Node* node);
+ Node* InsertConversion(Node* node, const Operator* op, Node* use_node);
+
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const { return isolate_; }
Factory* factory() const { return isolate()->factory(); }
diff --git a/deps/v8/src/compiler/s390/OWNERS b/deps/v8/src/compiler/s390/OWNERS
index eb007cb908..752e8e3d81 100644
--- a/deps/v8/src/compiler/s390/OWNERS
+++ b/deps/v8/src/compiler/s390/OWNERS
@@ -3,3 +3,4 @@ dstence@us.ibm.com
joransiu@ca.ibm.com
mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/deps/v8/src/compiler/s390/code-generator-s390.cc b/deps/v8/src/compiler/s390/code-generator-s390.cc
index 1d9685668e..c0119cdd6c 100644
--- a/deps/v8/src/compiler/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/s390/code-generator-s390.cc
@@ -27,6 +27,16 @@ class S390OperandConverter final : public InstructionOperandConverter {
size_t OutputCount() { return instr_->OutputCount(); }
+ bool Is64BitOperand(int index) {
+ return LocationOperand::cast(instr_->InputAt(index))->representation() ==
+ MachineRepresentation::kWord64;
+ }
+
+ bool Is32BitOperand(int index) {
+ return LocationOperand::cast(instr_->InputAt(index))->representation() ==
+ MachineRepresentation::kWord32;
+ }
+
bool CompareLogical() const {
switch (instr_->flags_condition()) {
case kUnsignedLessThan:
@@ -67,28 +77,36 @@ class S390OperandConverter final : public InstructionOperandConverter {
MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
const size_t index = *first_index;
- *mode = AddressingModeField::decode(instr_->opcode());
- switch (*mode) {
+ if (mode) *mode = AddressingModeField::decode(instr_->opcode());
+ switch (AddressingModeField::decode(instr_->opcode())) {
case kMode_None:
break;
+ case kMode_MR:
+ *first_index += 1;
+ return MemOperand(InputRegister(index + 0), 0);
case kMode_MRI:
*first_index += 2;
return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
case kMode_MRR:
*first_index += 2;
return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
+ case kMode_MRRI:
+ *first_index += 3;
+ return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
+ InputInt32(index + 2));
}
UNREACHABLE();
return MemOperand(r0);
}
- MemOperand MemoryOperand(AddressingMode* mode, size_t first_index = 0) {
+ MemOperand MemoryOperand(AddressingMode* mode = NULL,
+ size_t first_index = 0) {
return MemoryOperand(mode, &first_index);
}
MemOperand ToMemOperand(InstructionOperand* op) const {
DCHECK_NOT_NULL(op);
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
return SlotToMemOperand(AllocatedOperand::cast(op)->index());
}
@@ -96,12 +114,25 @@ class S390OperandConverter final : public InstructionOperandConverter {
FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
+
+ MemOperand InputStackSlot(size_t index) {
+ InstructionOperand* op = instr_->InputAt(index);
+ return SlotToMemOperand(AllocatedOperand::cast(op)->index());
+ }
};
static inline bool HasRegisterInput(Instruction* instr, int index) {
return instr->InputAt(index)->IsRegister();
}
+static inline bool HasImmediateInput(Instruction* instr, size_t index) {
+ return instr->InputAt(index)->IsImmediate();
+}
+
+static inline bool HasStackSlotInput(Instruction* instr, size_t index) {
+ return instr->InputAt(index)->IsStackSlot();
+}
+
namespace {
class OutOfLineLoadNAN32 final : public OutOfLineCode {
@@ -155,7 +186,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
value_(value),
scratch0_(scratch0),
scratch1_(scratch1),
- mode_(mode) {}
+ mode_(mode),
+ must_save_lr_(!gen->frame_access_state()->has_frame()) {}
OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
Register value, Register scratch0, Register scratch1,
@@ -233,36 +265,22 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
case kOverflow:
// Overflow checked for AddP/SubP only.
switch (op) {
-#if V8_TARGET_ARCH_S390X
- case kS390_Add:
- case kS390_Sub:
- return lt;
-#endif
- case kS390_AddWithOverflow32:
- case kS390_SubWithOverflow32:
-#if V8_TARGET_ARCH_S390X
- return ne;
-#else
- return lt;
-#endif
+ case kS390_Add32:
+ case kS390_Add64:
+ case kS390_Sub32:
+ case kS390_Sub64:
+ return overflow;
default:
break;
}
break;
case kNotOverflow:
switch (op) {
-#if V8_TARGET_ARCH_S390X
- case kS390_Add:
- case kS390_Sub:
- return ge;
-#endif
- case kS390_AddWithOverflow32:
- case kS390_SubWithOverflow32:
-#if V8_TARGET_ARCH_S390X
- return eq;
-#else
- return ge;
-#endif
+ case kS390_Add32:
+ case kS390_Add64:
+ case kS390_Sub32:
+ case kS390_Sub64:
+ return nooverflow;
default:
break;
}
@@ -287,67 +305,19 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
i.InputDoubleRegister(1)); \
} while (0)
-#define ASSEMBLE_BINOP(asm_instr_reg, asm_instr_imm) \
- do { \
- if (HasRegisterInput(instr, 1)) { \
- __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
- i.InputRegister(1)); \
- } else { \
- __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
- i.InputImmediate(1)); \
- } \
- } while (0)
-
-#define ASSEMBLE_BINOP_INT(asm_instr_reg, asm_instr_imm) \
- do { \
- if (HasRegisterInput(instr, 1)) { \
- __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
- i.InputRegister(1)); \
- } else { \
- __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
- i.InputInt32(1)); \
- } \
- } while (0)
-
-#define ASSEMBLE_ADD_WITH_OVERFLOW() \
- do { \
- if (HasRegisterInput(instr, 1)) { \
- __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
- i.InputRegister(1), kScratchReg, r0); \
- } else { \
- __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
- i.InputInt32(1), kScratchReg, r0); \
- } \
- } while (0)
-
-#define ASSEMBLE_SUB_WITH_OVERFLOW() \
- do { \
- if (HasRegisterInput(instr, 1)) { \
- __ SubAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
- i.InputRegister(1), kScratchReg, r0); \
- } else { \
- __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
- -i.InputInt32(1), kScratchReg, r0); \
- } \
- } while (0)
-
-#if V8_TARGET_ARCH_S390X
-#define ASSEMBLE_ADD_WITH_OVERFLOW32() \
- do { \
- ASSEMBLE_BINOP(AddP, AddP); \
- __ TestIfInt32(i.OutputRegister(), r0); \
+#define ASSEMBLE_BINOP(asm_instr) \
+ do { \
+ if (HasRegisterInput(instr, 1)) { \
+ __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
+ i.InputRegister(1)); \
+ } else if (HasImmediateInput(instr, 1)) { \
+ __ asm_instr(i.OutputRegister(), i.InputRegister(0), \
+ i.InputImmediate(1)); \
+ } else { \
+ UNIMPLEMENTED(); \
+ } \
} while (0)
-#define ASSEMBLE_SUB_WITH_OVERFLOW32() \
- do { \
- ASSEMBLE_BINOP(SubP, SubP); \
- __ TestIfInt32(i.OutputRegister(), r0); \
- } while (0)
-#else
-#define ASSEMBLE_ADD_WITH_OVERFLOW32 ASSEMBLE_ADD_WITH_OVERFLOW
-#define ASSEMBLE_SUB_WITH_OVERFLOW32 ASSEMBLE_SUB_WITH_OVERFLOW
-#endif
-
#define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr) \
do { \
if (HasRegisterInput(instr, 1)) { \
@@ -393,30 +363,212 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
__ MovFromFloatResult(i.OutputDoubleRegister()); \
} while (0)
-#define ASSEMBLE_FLOAT_MAX(double_scratch_reg, general_scratch_reg) \
- do { \
- Label ge, done; \
- __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
- __ bge(&ge, Label::kNear); \
- __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(1)); \
- __ b(&done, Label::kNear); \
- __ bind(&ge); \
- __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
- __ bind(&done); \
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ /* TODO(bmeurer): We should really get rid of this special instruction, */ \
+ /* and generate a CallAddress instruction instead. */ \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 1, kScratchReg); \
+ __ MovToFloatParameter(i.InputDoubleRegister(0)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 0, 1); \
+ /* Move the result in the double result register. */ \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
+ } while (0)
+
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ /* TODO(bmeurer): We should really get rid of this special instruction, */ \
+ /* and generate a CallAddress instruction instead. */ \
+ FrameScope scope(masm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 2, kScratchReg); \
+ __ MovToFloatParameters(i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 0, 2); \
+ /* Move the result in the double result register. */ \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
+ } while (0)
+
+#define ASSEMBLE_DOUBLE_MAX() \
+ do { \
+ DoubleRegister left_reg = i.InputDoubleRegister(0); \
+ DoubleRegister right_reg = i.InputDoubleRegister(1); \
+ DoubleRegister result_reg = i.OutputDoubleRegister(); \
+ Label check_nan_left, check_zero, return_left, return_right, done; \
+ __ cdbr(left_reg, right_reg); \
+ __ bunordered(&check_nan_left, Label::kNear); \
+ __ beq(&check_zero); \
+ __ bge(&return_left, Label::kNear); \
+ __ b(&return_right, Label::kNear); \
+ \
+ __ bind(&check_zero); \
+ __ lzdr(kDoubleRegZero); \
+ __ cdbr(left_reg, kDoubleRegZero); \
+ /* left == right != 0. */ \
+ __ bne(&return_left, Label::kNear); \
+ /* At this point, both left and right are either 0 or -0. */ \
+ /* N.B. The following works because +0 + -0 == +0 */ \
+ /* For max we want logical-and of sign bit: (L + R) */ \
+ __ ldr(result_reg, left_reg); \
+ __ adbr(result_reg, right_reg); \
+ __ b(&done, Label::kNear); \
+ \
+ __ bind(&check_nan_left); \
+ __ cdbr(left_reg, left_reg); \
+ /* left == NaN. */ \
+ __ bunordered(&return_left, Label::kNear); \
+ \
+ __ bind(&return_right); \
+ if (!right_reg.is(result_reg)) { \
+ __ ldr(result_reg, right_reg); \
+ } \
+ __ b(&done, Label::kNear); \
+ \
+ __ bind(&return_left); \
+ if (!left_reg.is(result_reg)) { \
+ __ ldr(result_reg, left_reg); \
+ } \
+ __ bind(&done); \
+ } while (0)
+
+#define ASSEMBLE_DOUBLE_MIN() \
+ do { \
+ DoubleRegister left_reg = i.InputDoubleRegister(0); \
+ DoubleRegister right_reg = i.InputDoubleRegister(1); \
+ DoubleRegister result_reg = i.OutputDoubleRegister(); \
+ Label check_nan_left, check_zero, return_left, return_right, done; \
+ __ cdbr(left_reg, right_reg); \
+ __ bunordered(&check_nan_left, Label::kNear); \
+ __ beq(&check_zero); \
+ __ ble(&return_left, Label::kNear); \
+ __ b(&return_right, Label::kNear); \
+ \
+ __ bind(&check_zero); \
+ __ lzdr(kDoubleRegZero); \
+ __ cdbr(left_reg, kDoubleRegZero); \
+ /* left == right != 0. */ \
+ __ bne(&return_left, Label::kNear); \
+ /* At this point, both left and right are either 0 or -0. */ \
+ /* N.B. The following works because +0 + -0 == +0 */ \
+ /* For min we want logical-or of sign bit: -(-L + -R) */ \
+ __ lcdbr(left_reg, left_reg); \
+ __ ldr(result_reg, left_reg); \
+ if (left_reg.is(right_reg)) { \
+ __ adbr(result_reg, right_reg); \
+ } else { \
+ __ sdbr(result_reg, right_reg); \
+ } \
+ __ lcdbr(result_reg, result_reg); \
+ __ b(&done, Label::kNear); \
+ \
+ __ bind(&check_nan_left); \
+ __ cdbr(left_reg, left_reg); \
+ /* left == NaN. */ \
+ __ bunordered(&return_left, Label::kNear); \
+ \
+ __ bind(&return_right); \
+ if (!right_reg.is(result_reg)) { \
+ __ ldr(result_reg, right_reg); \
+ } \
+ __ b(&done, Label::kNear); \
+ \
+ __ bind(&return_left); \
+ if (!left_reg.is(result_reg)) { \
+ __ ldr(result_reg, left_reg); \
+ } \
+ __ bind(&done); \
} while (0)
-#define ASSEMBLE_FLOAT_MIN(double_scratch_reg, general_scratch_reg) \
- do { \
- Label ge, done; \
- __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
- __ bge(&ge, Label::kNear); \
- __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
- __ b(&done, Label::kNear); \
- __ bind(&ge); \
- __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(1)); \
- __ bind(&done); \
+#define ASSEMBLE_FLOAT_MAX() \
+ do { \
+ DoubleRegister left_reg = i.InputDoubleRegister(0); \
+ DoubleRegister right_reg = i.InputDoubleRegister(1); \
+ DoubleRegister result_reg = i.OutputDoubleRegister(); \
+ Label check_nan_left, check_zero, return_left, return_right, done; \
+ __ cebr(left_reg, right_reg); \
+ __ bunordered(&check_nan_left, Label::kNear); \
+ __ beq(&check_zero); \
+ __ bge(&return_left, Label::kNear); \
+ __ b(&return_right, Label::kNear); \
+ \
+ __ bind(&check_zero); \
+ __ lzdr(kDoubleRegZero); \
+ __ cebr(left_reg, kDoubleRegZero); \
+ /* left == right != 0. */ \
+ __ bne(&return_left, Label::kNear); \
+ /* At this point, both left and right are either 0 or -0. */ \
+ /* N.B. The following works because +0 + -0 == +0 */ \
+ /* For max we want logical-and of sign bit: (L + R) */ \
+ __ ldr(result_reg, left_reg); \
+ __ aebr(result_reg, right_reg); \
+ __ b(&done, Label::kNear); \
+ \
+ __ bind(&check_nan_left); \
+ __ cebr(left_reg, left_reg); \
+ /* left == NaN. */ \
+ __ bunordered(&return_left, Label::kNear); \
+ \
+ __ bind(&return_right); \
+ if (!right_reg.is(result_reg)) { \
+ __ ldr(result_reg, right_reg); \
+ } \
+ __ b(&done, Label::kNear); \
+ \
+ __ bind(&return_left); \
+ if (!left_reg.is(result_reg)) { \
+ __ ldr(result_reg, left_reg); \
+ } \
+ __ bind(&done); \
} while (0)
+#define ASSEMBLE_FLOAT_MIN() \
+ do { \
+ DoubleRegister left_reg = i.InputDoubleRegister(0); \
+ DoubleRegister right_reg = i.InputDoubleRegister(1); \
+ DoubleRegister result_reg = i.OutputDoubleRegister(); \
+ Label check_nan_left, check_zero, return_left, return_right, done; \
+ __ cebr(left_reg, right_reg); \
+ __ bunordered(&check_nan_left, Label::kNear); \
+ __ beq(&check_zero); \
+ __ ble(&return_left, Label::kNear); \
+ __ b(&return_right, Label::kNear); \
+ \
+ __ bind(&check_zero); \
+ __ lzdr(kDoubleRegZero); \
+ __ cebr(left_reg, kDoubleRegZero); \
+ /* left == right != 0. */ \
+ __ bne(&return_left, Label::kNear); \
+ /* At this point, both left and right are either 0 or -0. */ \
+ /* N.B. The following works because +0 + -0 == +0 */ \
+ /* For min we want logical-or of sign bit: -(-L + -R) */ \
+ __ lcebr(left_reg, left_reg); \
+ __ ldr(result_reg, left_reg); \
+ if (left_reg.is(right_reg)) { \
+ __ aebr(result_reg, right_reg); \
+ } else { \
+ __ sebr(result_reg, right_reg); \
+ } \
+ __ lcebr(result_reg, result_reg); \
+ __ b(&done, Label::kNear); \
+ \
+ __ bind(&check_nan_left); \
+ __ cebr(left_reg, left_reg); \
+ /* left == NaN. */ \
+ __ bunordered(&return_left, Label::kNear); \
+ \
+ __ bind(&return_right); \
+ if (!right_reg.is(result_reg)) { \
+ __ ldr(result_reg, right_reg); \
+ } \
+ __ b(&done, Label::kNear); \
+ \
+ __ bind(&return_left); \
+ if (!left_reg.is(result_reg)) { \
+ __ ldr(result_reg, left_reg); \
+ } \
+ __ bind(&done); \
+ } while (0)
// Only MRI mode for these instructions available
#define ASSEMBLE_LOAD_FLOAT(asm_instr) \
do { \
@@ -461,7 +613,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
__ asm_instr(value, operand); \
} while (0)
-// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, width) \
do { \
DoubleRegister result = i.OutputDoubleRegister(); \
@@ -469,7 +620,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, index); \
Register offset = operand.rb(); \
- __ lgfr(offset, offset); \
if (HasRegisterInput(instr, 2)) { \
__ CmpLogical32(offset, i.InputRegister(2)); \
} else { \
@@ -477,11 +627,11 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
} \
auto ool = new (zone()) OutOfLineLoadNAN##width(this, result); \
__ bge(ool->entry()); \
+ __ CleanUInt32(offset); \
__ asm_instr(result, operand); \
__ bind(ool->exit()); \
} while (0)
-// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
do { \
Register result = i.OutputRegister(); \
@@ -489,7 +639,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, index); \
Register offset = operand.rb(); \
- __ lgfr(offset, offset); \
if (HasRegisterInput(instr, 2)) { \
__ CmpLogical32(offset, i.InputRegister(2)); \
} else { \
@@ -497,11 +646,11 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
} \
auto ool = new (zone()) OutOfLineLoadZero(this, result); \
__ bge(ool->entry()); \
+ __ CleanUInt32(offset); \
__ asm_instr(result, operand); \
__ bind(ool->exit()); \
} while (0)
-// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
#define ASSEMBLE_CHECKED_STORE_FLOAT32() \
do { \
Label done; \
@@ -509,7 +658,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, index); \
Register offset = operand.rb(); \
- __ lgfr(offset, offset); \
if (HasRegisterInput(instr, 2)) { \
__ CmpLogical32(offset, i.InputRegister(2)); \
} else { \
@@ -517,11 +665,11 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
} \
__ bge(&done); \
DoubleRegister value = i.InputDoubleRegister(3); \
+ __ CleanUInt32(offset); \
__ StoreFloat32(value, operand); \
__ bind(&done); \
} while (0)
-// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
#define ASSEMBLE_CHECKED_STORE_DOUBLE() \
do { \
Label done; \
@@ -530,7 +678,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
MemOperand operand = i.MemoryOperand(&mode, index); \
DCHECK_EQ(kMode_MRR, mode); \
Register offset = operand.rb(); \
- __ lgfr(offset, offset); \
if (HasRegisterInput(instr, 2)) { \
__ CmpLogical32(offset, i.InputRegister(2)); \
} else { \
@@ -538,11 +685,11 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
} \
__ bge(&done); \
DoubleRegister value = i.InputDoubleRegister(3); \
+ __ CleanUInt32(offset); \
__ StoreDouble(value, operand); \
__ bind(&done); \
} while (0)
-// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
do { \
Label done; \
@@ -550,7 +697,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, index); \
Register offset = operand.rb(); \
- __ lgfr(offset, offset); \
if (HasRegisterInput(instr, 2)) { \
__ CmpLogical32(offset, i.InputRegister(2)); \
} else { \
@@ -558,6 +704,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
} \
__ bge(&done); \
Register value = i.InputRegister(3); \
+ __ CleanUInt32(offset); \
__ asm_instr(value, operand); \
__ bind(&done); \
} while (0)
@@ -566,22 +713,7 @@ void CodeGenerator::AssembleDeconstructFrame() {
__ LeaveFrame(StackFrame::MANUAL);
}
-void CodeGenerator::AssembleSetupStackPointer() {}
-
-void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
- int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
- if (sp_slot_delta > 0) {
- __ AddP(sp, sp, Operand(sp_slot_delta * kPointerSize));
- }
- frame_access_state()->SetFrameAccessToDefault();
-}
-
-void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
- int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
- if (sp_slot_delta < 0) {
- __ AddP(sp, sp, Operand(sp_slot_delta * kPointerSize));
- frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
- }
+void CodeGenerator::AssemblePrepareTailCall() {
if (frame_access_state()->has_frame()) {
__ RestoreFrameStateForTailCall();
}
@@ -613,12 +745,126 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
__ bind(&done);
}
+namespace {
+
+void FlushPendingPushRegisters(MacroAssembler* masm,
+ FrameAccessState* frame_access_state,
+ ZoneVector<Register>* pending_pushes) {
+ switch (pending_pushes->size()) {
+ case 0:
+ break;
+ case 1:
+ masm->Push((*pending_pushes)[0]);
+ break;
+ case 2:
+ masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
+ break;
+ case 3:
+ masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
+ (*pending_pushes)[2]);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ frame_access_state->IncreaseSPDelta(pending_pushes->size());
+ pending_pushes->resize(0);
+}
+
+void AddPendingPushRegister(MacroAssembler* masm,
+ FrameAccessState* frame_access_state,
+ ZoneVector<Register>* pending_pushes,
+ Register reg) {
+ pending_pushes->push_back(reg);
+ if (pending_pushes->size() == 3 || reg.is(ip)) {
+ FlushPendingPushRegisters(masm, frame_access_state, pending_pushes);
+ }
+}
+void AdjustStackPointerForTailCall(
+ MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
+ ZoneVector<Register>* pending_pushes = nullptr,
+ bool allow_shrinkage = true) {
+ int current_sp_offset = state->GetSPToFPSlotCount() +
+ StandardFrameConstants::kFixedSlotCountAboveFp;
+ int stack_slot_delta = new_slot_above_sp - current_sp_offset;
+ if (stack_slot_delta > 0) {
+ if (pending_pushes != nullptr) {
+ FlushPendingPushRegisters(masm, state, pending_pushes);
+ }
+ masm->AddP(sp, sp, Operand(-stack_slot_delta * kPointerSize));
+ state->IncreaseSPDelta(stack_slot_delta);
+ } else if (allow_shrinkage && stack_slot_delta < 0) {
+ if (pending_pushes != nullptr) {
+ FlushPendingPushRegisters(masm, state, pending_pushes);
+ }
+ masm->AddP(sp, sp, Operand(-stack_slot_delta * kPointerSize));
+ state->IncreaseSPDelta(stack_slot_delta);
+ }
+}
+
+} // namespace
+
+void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
+ int first_unused_stack_slot) {
+ CodeGenerator::PushTypeFlags flags(kImmediatePush | kScalarPush);
+ ZoneVector<MoveOperands*> pushes(zone());
+ GetPushCompatibleMoves(instr, flags, &pushes);
+
+ if (!pushes.empty() &&
+ (LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
+ first_unused_stack_slot)) {
+ S390OperandConverter g(this, instr);
+ ZoneVector<Register> pending_pushes(zone());
+ for (auto move : pushes) {
+ LocationOperand destination_location(
+ LocationOperand::cast(move->destination()));
+ InstructionOperand source(move->source());
+ AdjustStackPointerForTailCall(
+ masm(), frame_access_state(),
+ destination_location.index() - pending_pushes.size(),
+ &pending_pushes);
+ if (source.IsStackSlot()) {
+ LocationOperand source_location(LocationOperand::cast(source));
+ __ LoadP(ip, g.SlotToMemOperand(source_location.index()));
+ AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+ ip);
+ } else if (source.IsRegister()) {
+ LocationOperand source_location(LocationOperand::cast(source));
+ AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+ source_location.GetRegister());
+ } else if (source.IsImmediate()) {
+ AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+ ip);
+ } else {
+ // Pushes of non-scalar data types is not supported.
+ UNIMPLEMENTED();
+ }
+ move->Eliminate();
+ }
+ FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
+ }
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ first_unused_stack_slot, nullptr, false);
+}
+
+void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
+ int first_unused_stack_slot) {
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ first_unused_stack_slot);
+}
+
// Assembles an instruction after register allocation, producing machine code.
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+ Instruction* instr) {
S390OperandConverter i(this, instr);
ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
switch (opcode) {
+ case kArchComment: {
+ Address comment_string = i.InputExternalReference(0).address();
+ __ RecordComment(reinterpret_cast<const char*>(comment_string));
+ break;
+ }
case kArchCallCodeObject: {
EnsureSpaceForLazyDeopt();
if (HasRegisterInput(instr, 0)) {
@@ -635,8 +881,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
- int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
- AssembleDeconstructActivationRecord(stack_param_delta);
if (opcode == kArchTailCallCodeObjectFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
@@ -654,6 +898,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
RelocInfo::CODE_TARGET);
}
frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+ case kArchTailCallAddress: {
+ CHECK(!instr->InputAt(0)->IsImmediate());
+ __ Jump(i.InputRegister(0));
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
break;
}
case kArchCallJSFunction: {
@@ -682,8 +934,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ CmpP(cp, kScratchReg);
__ Assert(eq, kWrongFunctionContext);
}
- int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
- AssembleDeconstructActivationRecord(stack_param_delta);
if (opcode == kArchTailCallJSFunctionFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
@@ -692,6 +942,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(ip);
frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
break;
}
case kArchPrepareCallCFunction: {
@@ -702,7 +953,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kArchPrepareTailCall:
- AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ AssemblePrepareTailCall();
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
@@ -726,6 +977,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
+ case kArchDebugBreak:
+ __ stop("kArchDebugBreak");
+ break;
+ case kArchImpossible:
+ __ Abort(kConversionFromImpossibleValue);
+ break;
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
@@ -735,7 +992,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result =
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ if (result != kSuccess) return result;
break;
}
case kArchRet:
@@ -794,22 +1053,22 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Operand(offset.offset()));
break;
}
- case kS390_And:
- ASSEMBLE_BINOP(AndP, AndP);
+ case kS390_And32:
+ ASSEMBLE_BINOP(And);
break;
- case kS390_AndComplement:
- __ NotP(i.InputRegister(1));
- __ AndP(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ case kS390_And64:
+ ASSEMBLE_BINOP(AndP);
break;
- case kS390_Or:
- ASSEMBLE_BINOP(OrP, OrP);
+ case kS390_Or32:
+ ASSEMBLE_BINOP(Or);
+ case kS390_Or64:
+ ASSEMBLE_BINOP(OrP);
break;
- case kS390_OrComplement:
- __ NotP(i.InputRegister(1));
- __ OrP(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ case kS390_Xor32:
+ ASSEMBLE_BINOP(Xor);
break;
- case kS390_Xor:
- ASSEMBLE_BINOP(XorP, XorP);
+ case kS390_Xor64:
+ ASSEMBLE_BINOP(XorP);
break;
case kS390_ShiftLeft32:
if (HasRegisterInput(instr, 1)) {
@@ -818,16 +1077,16 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ LoadRR(kScratchReg, i.InputRegister(1));
__ ShiftLeft(i.OutputRegister(), i.InputRegister(0), kScratchReg);
} else {
- ASSEMBLE_BINOP(ShiftLeft, ShiftLeft);
+ ASSEMBLE_BINOP(ShiftLeft);
}
} else {
- ASSEMBLE_BINOP(ShiftLeft, ShiftLeft);
+ ASSEMBLE_BINOP(ShiftLeft);
}
__ LoadlW(i.OutputRegister(0), i.OutputRegister(0));
break;
#if V8_TARGET_ARCH_S390X
case kS390_ShiftLeft64:
- ASSEMBLE_BINOP(sllg, sllg);
+ ASSEMBLE_BINOP(sllg);
break;
#endif
case kS390_ShiftRight32:
@@ -837,16 +1096,16 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ LoadRR(kScratchReg, i.InputRegister(1));
__ ShiftRight(i.OutputRegister(), i.InputRegister(0), kScratchReg);
} else {
- ASSEMBLE_BINOP(ShiftRight, ShiftRight);
+ ASSEMBLE_BINOP(ShiftRight);
}
} else {
- ASSEMBLE_BINOP(ShiftRight, ShiftRight);
+ ASSEMBLE_BINOP(ShiftRight);
}
__ LoadlW(i.OutputRegister(0), i.OutputRegister(0));
break;
#if V8_TARGET_ARCH_S390X
case kS390_ShiftRight64:
- ASSEMBLE_BINOP(srlg, srlg);
+ ASSEMBLE_BINOP(srlg);
break;
#endif
case kS390_ShiftRightArith32:
@@ -857,16 +1116,16 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ ShiftRightArith(i.OutputRegister(), i.InputRegister(0),
kScratchReg);
} else {
- ASSEMBLE_BINOP(ShiftRightArith, ShiftRightArith);
+ ASSEMBLE_BINOP(ShiftRightArith);
}
} else {
- ASSEMBLE_BINOP(ShiftRightArith, ShiftRightArith);
+ ASSEMBLE_BINOP(ShiftRightArith);
}
__ LoadlW(i.OutputRegister(), i.OutputRegister());
break;
#if V8_TARGET_ARCH_S390X
case kS390_ShiftRightArith64:
- ASSEMBLE_BINOP(srag, srag);
+ ASSEMBLE_BINOP(srag);
break;
#endif
#if !V8_TARGET_ARCH_S390X
@@ -957,9 +1216,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
#endif
- case kS390_Not:
- __ LoadRR(i.OutputRegister(), i.InputRegister(0));
- __ NotP(i.OutputRegister());
+ case kS390_Not32:
+ __ Not32(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kS390_Not64:
+ __ Not64(i.OutputRegister(), i.InputRegister(0));
break;
case kS390_RotLeftAndMask32:
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
@@ -1015,19 +1276,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
#endif
- case kS390_Add:
-#if V8_TARGET_ARCH_S390X
- if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
- ASSEMBLE_ADD_WITH_OVERFLOW();
- } else {
-#endif
- ASSEMBLE_BINOP(AddP, AddP);
-#if V8_TARGET_ARCH_S390X
- }
-#endif
+ case kS390_Add32:
+ ASSEMBLE_BINOP(Add32);
+ __ LoadW(i.OutputRegister(), i.OutputRegister());
break;
- case kS390_AddWithOverflow32:
- ASSEMBLE_ADD_WITH_OVERFLOW32();
+ case kS390_Add64:
+ ASSEMBLE_BINOP(AddP);
break;
case kS390_AddFloat:
// Ensure we don't clobber right/InputReg(1)
@@ -1049,19 +1303,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ adbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
}
break;
- case kS390_Sub:
-#if V8_TARGET_ARCH_S390X
- if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
- ASSEMBLE_SUB_WITH_OVERFLOW();
- } else {
-#endif
- ASSEMBLE_BINOP(SubP, SubP);
-#if V8_TARGET_ARCH_S390X
- }
-#endif
+ case kS390_Sub32:
+ ASSEMBLE_BINOP(Sub32);
+ __ LoadW(i.OutputRegister(), i.OutputRegister());
break;
- case kS390_SubWithOverflow32:
- ASSEMBLE_SUB_WITH_OVERFLOW32();
+ case kS390_Sub64:
+ ASSEMBLE_BINOP(SubP);
break;
case kS390_SubFloat:
// OutputDoubleReg() = i.InputDoubleRegister(0) - i.InputDoubleRegister(1)
@@ -1090,19 +1337,80 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
case kS390_Mul32:
-#if V8_TARGET_ARCH_S390X
- case kS390_Mul64:
+ if (HasRegisterInput(instr, 1)) {
+ __ Mul32(i.InputRegister(0), i.InputRegister(1));
+ } else if (HasImmediateInput(instr, 1)) {
+ __ Mul32(i.InputRegister(0), i.InputImmediate(1));
+ } else if (HasStackSlotInput(instr, 1)) {
+#ifdef V8_TARGET_ARCH_S390X
+ // Avoid endian-issue here:
+ // stg r1, 0(fp)
+ // ...
+ // msy r2, 0(fp) <-- This will read the upper 32 bits
+ __ lg(kScratchReg, i.InputStackSlot(1));
+ __ Mul32(i.InputRegister(0), kScratchReg);
+#else
+ __ Mul32(i.InputRegister(0), i.InputStackSlot(1));
#endif
- __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ case kS390_Mul64:
+ if (HasRegisterInput(instr, 1)) {
+ __ Mul64(i.InputRegister(0), i.InputRegister(1));
+ } else if (HasImmediateInput(instr, 1)) {
+ __ Mul64(i.InputRegister(0), i.InputImmediate(1));
+ } else if (HasStackSlotInput(instr, 1)) {
+ __ Mul64(i.InputRegister(0), i.InputStackSlot(1));
+ } else {
+ UNIMPLEMENTED();
+ }
break;
case kS390_MulHigh32:
__ LoadRR(r1, i.InputRegister(0));
- __ mr_z(r0, i.InputRegister(1));
+ if (HasRegisterInput(instr, 1)) {
+ __ mr_z(r0, i.InputRegister(1));
+ } else if (HasStackSlotInput(instr, 1)) {
+#ifdef V8_TARGET_ARCH_S390X
+ // Avoid endian-issue here:
+ // stg r1, 0(fp)
+ // ...
+ // mfy r2, 0(fp) <-- This will read the upper 32 bits
+ __ lg(kScratchReg, i.InputStackSlot(1));
+ __ mr_z(r0, kScratchReg);
+#else
+ __ mfy(r0, i.InputStackSlot(1));
+#endif
+ } else {
+ UNIMPLEMENTED();
+ }
__ LoadW(i.OutputRegister(), r0);
break;
+ case kS390_Mul32WithHigh32:
+ __ LoadRR(r1, i.InputRegister(0));
+ __ mr_z(r0, i.InputRegister(1));
+ __ LoadW(i.OutputRegister(0), r1); // low
+ __ LoadW(i.OutputRegister(1), r0); // high
+ break;
case kS390_MulHighU32:
__ LoadRR(r1, i.InputRegister(0));
- __ mlr(r0, i.InputRegister(1));
+ if (HasRegisterInput(instr, 1)) {
+ __ mlr(r0, i.InputRegister(1));
+ } else if (HasStackSlotInput(instr, 1)) {
+#ifdef V8_TARGET_ARCH_S390X
+ // Avoid endian-issue here:
+ // stg r1, 0(fp)
+ // ...
+ // mfy r2, 0(fp) <-- This will read the upper 32 bits
+ __ lg(kScratchReg, i.InputStackSlot(1));
+ __ mlr(r0, kScratchReg);
+#else
+ __ ml(r0, i.InputStackSlot(1));
+#endif
+ } else {
+ UNIMPLEMENTED();
+ }
__ LoadlW(i.OutputRegister(), r0);
break;
case kS390_MulFloat:
@@ -1220,14 +1528,90 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kS390_ModDouble:
ASSEMBLE_FLOAT_MODULO();
break;
- case kS390_Neg:
- __ LoadComplementRR(i.OutputRegister(), i.InputRegister(0));
+ case kIeee754Float64Acos:
+ ASSEMBLE_IEEE754_UNOP(acos);
+ break;
+ case kIeee754Float64Acosh:
+ ASSEMBLE_IEEE754_UNOP(acosh);
+ break;
+ case kIeee754Float64Asin:
+ ASSEMBLE_IEEE754_UNOP(asin);
+ break;
+ case kIeee754Float64Asinh:
+ ASSEMBLE_IEEE754_UNOP(asinh);
+ break;
+ case kIeee754Float64Atanh:
+ ASSEMBLE_IEEE754_UNOP(atanh);
+ break;
+ case kIeee754Float64Atan:
+ ASSEMBLE_IEEE754_UNOP(atan);
+ break;
+ case kIeee754Float64Atan2:
+ ASSEMBLE_IEEE754_BINOP(atan2);
+ break;
+ case kIeee754Float64Tan:
+ ASSEMBLE_IEEE754_UNOP(tan);
+ break;
+ case kIeee754Float64Tanh:
+ ASSEMBLE_IEEE754_UNOP(tanh);
+ break;
+ case kIeee754Float64Cbrt:
+ ASSEMBLE_IEEE754_UNOP(cbrt);
+ break;
+ case kIeee754Float64Sin:
+ ASSEMBLE_IEEE754_UNOP(sin);
+ break;
+ case kIeee754Float64Sinh:
+ ASSEMBLE_IEEE754_UNOP(sinh);
+ break;
+ case kIeee754Float64Cos:
+ ASSEMBLE_IEEE754_UNOP(cos);
+ break;
+ case kIeee754Float64Cosh:
+ ASSEMBLE_IEEE754_UNOP(cosh);
+ break;
+ case kIeee754Float64Exp:
+ ASSEMBLE_IEEE754_UNOP(exp);
+ break;
+ case kIeee754Float64Expm1:
+ ASSEMBLE_IEEE754_UNOP(expm1);
+ break;
+ case kIeee754Float64Log:
+ ASSEMBLE_IEEE754_UNOP(log);
+ break;
+ case kIeee754Float64Log1p:
+ ASSEMBLE_IEEE754_UNOP(log1p);
+ break;
+ case kIeee754Float64Log2:
+ ASSEMBLE_IEEE754_UNOP(log2);
+ break;
+ case kIeee754Float64Log10:
+ ASSEMBLE_IEEE754_UNOP(log10);
+ break;
+ case kIeee754Float64Pow: {
+ MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+ __ CallStub(&stub);
+ __ Move(d1, d3);
+ break;
+ }
+ case kS390_Neg32:
+ __ lcr(i.OutputRegister(), i.InputRegister(0));
+ __ LoadW(i.OutputRegister(), i.OutputRegister());
+ break;
+ case kS390_Neg64:
+ __ lcgr(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kS390_MaxFloat:
+ ASSEMBLE_FLOAT_MAX();
break;
case kS390_MaxDouble:
- ASSEMBLE_FLOAT_MAX(kScratchDoubleReg, kScratchReg);
+ ASSEMBLE_DOUBLE_MAX();
+ break;
+ case kS390_MinFloat:
+ ASSEMBLE_FLOAT_MIN();
break;
case kS390_MinDouble:
- ASSEMBLE_FLOAT_MIN(kScratchDoubleReg, kScratchReg);
+ ASSEMBLE_DOUBLE_MIN();
break;
case kS390_AbsDouble:
__ lpdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
@@ -1251,6 +1635,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
v8::internal::Assembler::FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0);
break;
+ case kS390_NegFloat:
+ ASSEMBLE_FLOAT_UNOP(lcebr);
+ break;
case kS390_NegDouble:
ASSEMBLE_FLOAT_UNOP(lcdbr);
break;
@@ -1305,8 +1692,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
#endif
+ case kS390_Float64SilenceNaN: {
+ DoubleRegister value = i.InputDoubleRegister(0);
+ DoubleRegister result = i.OutputDoubleRegister();
+ __ CanonicalizeNaN(result, value);
+ break;
+ }
case kS390_Push:
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ lay(sp, MemOperand(sp, -kDoubleSize));
__ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp));
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
@@ -1318,9 +1711,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kS390_PushFrame: {
int num_slots = i.InputInt32(1);
__ lay(sp, MemOperand(sp, -num_slots * kPointerSize));
- if (instr->InputAt(0)->IsDoubleRegister()) {
- __ StoreDouble(i.InputDoubleRegister(0),
- MemOperand(sp));
+ if (instr->InputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp));
+ } else {
+ DCHECK(op->representation() == MachineRepresentation::kFloat32);
+ __ StoreFloat32(i.InputDoubleRegister(0), MemOperand(sp));
+ }
} else {
__ StoreP(i.InputRegister(0),
MemOperand(sp));
@@ -1329,9 +1727,16 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
case kS390_StoreToStackSlot: {
int slot = i.InputInt32(1);
- if (instr->InputAt(0)->IsDoubleRegister()) {
- __ StoreDouble(i.InputDoubleRegister(0),
- MemOperand(sp, slot * kPointerSize));
+ if (instr->InputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ StoreDouble(i.InputDoubleRegister(0),
+ MemOperand(sp, slot * kPointerSize));
+ } else {
+ DCHECK(op->representation() == MachineRepresentation::kFloat32);
+ __ StoreFloat32(i.InputDoubleRegister(0),
+ MemOperand(sp, slot * kPointerSize));
+ }
} else {
__ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
}
@@ -1418,7 +1823,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kS390_Float32ToInt32: {
bool check_conversion = (i.OutputCount() > 1);
__ ConvertFloat32ToInt32(i.InputDoubleRegister(0), i.OutputRegister(0),
- kScratchDoubleReg);
+ kScratchDoubleReg, kRoundToZero);
if (check_conversion) {
Label conversion_done;
__ LoadImmP(i.OutputRegister(1), Operand::Zero());
@@ -1555,9 +1960,31 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kS390_LoadWordS16:
ASSEMBLE_LOAD_INTEGER(LoadHalfWordP);
break;
+ case kS390_LoadWordU32:
+ ASSEMBLE_LOAD_INTEGER(LoadlW);
+ break;
case kS390_LoadWordS32:
ASSEMBLE_LOAD_INTEGER(LoadW);
break;
+ case kS390_LoadReverse16:
+ ASSEMBLE_LOAD_INTEGER(lrvh);
+ break;
+ case kS390_LoadReverse32:
+ ASSEMBLE_LOAD_INTEGER(lrv);
+ break;
+ case kS390_LoadReverse64:
+ ASSEMBLE_LOAD_INTEGER(lrvg);
+ break;
+ case kS390_LoadReverse16RR:
+ __ lrvr(i.OutputRegister(), i.InputRegister(0));
+ __ rll(i.OutputRegister(), i.OutputRegister(), Operand(16));
+ break;
+ case kS390_LoadReverse32RR:
+ __ lrvr(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kS390_LoadReverse64RR:
+ __ lrvgr(i.OutputRegister(), i.InputRegister(0));
+ break;
#if V8_TARGET_ARCH_S390X
case kS390_LoadWord64:
ASSEMBLE_LOAD_INTEGER(lg);
@@ -1583,6 +2010,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_STORE_INTEGER(StoreP);
break;
#endif
+ case kS390_StoreReverse16:
+ ASSEMBLE_STORE_INTEGER(strvh);
+ break;
+ case kS390_StoreReverse32:
+ ASSEMBLE_STORE_INTEGER(strv);
+ break;
+ case kS390_StoreReverse64:
+ ASSEMBLE_STORE_INTEGER(strvg);
+ break;
case kS390_StoreFloat32:
ASSEMBLE_STORE_FLOAT32();
break;
@@ -1607,7 +2043,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_CHECKED_LOAD_INTEGER(LoadLogicalHalfWordP);
break;
case kCheckedLoadWord32:
- ASSEMBLE_CHECKED_LOAD_INTEGER(LoadW);
+ ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlW);
break;
case kCheckedLoadWord64:
#if V8_TARGET_ARCH_S390X
@@ -1644,10 +2080,35 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kCheckedStoreFloat64:
ASSEMBLE_CHECKED_STORE_DOUBLE();
break;
+ case kAtomicLoadInt8:
+ __ LoadB(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kAtomicLoadUint8:
+ __ LoadlB(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kAtomicLoadInt16:
+ __ LoadHalfWordP(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kAtomicLoadUint16:
+ __ LoadLogicalHalfWordP(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kAtomicLoadWord32:
+ __ LoadlW(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kAtomicStoreWord8:
+ __ StoreByte(i.InputRegister(0), i.MemoryOperand(NULL, 1));
+ break;
+ case kAtomicStoreWord16:
+ __ StoreHalfWord(i.InputRegister(0), i.MemoryOperand(NULL, 1));
+ break;
+ case kAtomicStoreWord32:
+ __ StoreW(i.InputRegister(0), i.MemoryOperand(NULL, 1));
+ break;
default:
UNREACHABLE();
break;
}
+ return kSuccess;
} // NOLINT(readability/fn_size)
// Assembles branches after an instruction.
@@ -1680,63 +2141,29 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
FlagsCondition condition) {
S390OperandConverter i(this, instr);
- Label done;
ArchOpcode op = instr->arch_opcode();
- bool check_unordered = (op == kS390_CmpDouble || kS390_CmpFloat);
+ bool check_unordered = (op == kS390_CmpDouble || op == kS390_CmpFloat);
// Overflow checked for add/sub only.
DCHECK((condition != kOverflow && condition != kNotOverflow) ||
- (op == kS390_AddWithOverflow32 || op == kS390_SubWithOverflow32) ||
- (op == kS390_Add || op == kS390_Sub));
+ (op == kS390_Add32 || kS390_Add64 || op == kS390_Sub32 ||
+ op == kS390_Sub64));
// Materialize a full 32-bit 1 or 0 value. The result register is always the
// last output of the instruction.
DCHECK_NE(0u, instr->OutputCount());
Register reg = i.OutputRegister(instr->OutputCount() - 1);
Condition cond = FlagsConditionToCondition(condition, op);
- switch (cond) {
- case ne:
- case ge:
- case gt:
- if (check_unordered) {
- __ LoadImmP(reg, Operand(1));
- __ LoadImmP(kScratchReg, Operand::Zero());
- __ bunordered(&done);
- Label cond_true;
- __ b(cond, &cond_true, Label::kNear);
- __ LoadRR(reg, kScratchReg);
- __ bind(&cond_true);
- } else {
- Label cond_true, done_here;
- __ LoadImmP(reg, Operand(1));
- __ b(cond, &cond_true, Label::kNear);
- __ LoadImmP(reg, Operand::Zero());
- __ bind(&cond_true);
- }
- break;
- case eq:
- case lt:
- case le:
- if (check_unordered) {
- __ LoadImmP(reg, Operand::Zero());
- __ LoadImmP(kScratchReg, Operand(1));
- __ bunordered(&done);
- Label cond_false;
- __ b(NegateCondition(cond), &cond_false, Label::kNear);
- __ LoadRR(reg, kScratchReg);
- __ bind(&cond_false);
- } else {
- __ LoadImmP(reg, Operand::Zero());
- Label cond_false;
- __ b(NegateCondition(cond), &cond_false, Label::kNear);
- __ LoadImmP(reg, Operand(1));
- __ bind(&cond_false);
- }
- break;
- default:
- UNREACHABLE();
- break;
+ Label done;
+ if (check_unordered) {
+ __ LoadImmP(reg, (cond == eq || cond == le || cond == lt) ? Operand::Zero()
+ : Operand(1));
+ __ bunordered(&done);
}
+ __ LoadImmP(reg, Operand::Zero());
+ __ LoadImmP(kScratchReg, Operand(1));
+ // locr is sufficient since reg's upper 32 is guarrantee to be 0
+ __ locr(cond, reg, kScratchReg);
__ bind(&done);
}
@@ -1744,7 +2171,7 @@ void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
S390OperandConverter i(this, instr);
Register input = i.InputRegister(0);
for (size_t index = 2; index < instr->InputCount(); index += 2) {
- __ CmpP(input, Operand(i.InputInt32(index + 0)));
+ __ Cmp32(input, Operand(i.InputInt32(index + 0)));
__ beq(GetLabel(i.InputRpo(index + 1)));
}
AssembleArchJump(i.InputRpo(1));
@@ -1767,17 +2194,44 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
__ Jump(kScratchReg);
}
-void CodeGenerator::AssembleDeoptimizerCall(
+CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
// TODO(turbofan): We should be able to generate better code by sharing the
// actual final call site and just bl'ing to it here, similar to what we do
// in the lithium backend.
+ if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
+ DeoptimizeReason deoptimization_reason =
+ GetDeoptimizationReason(deoptimization_id);
+ __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ return kSuccess;
}
-void CodeGenerator::AssemblePrologue() {
+void CodeGenerator::FinishFrame(Frame* frame) {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ const RegList double_saves = descriptor->CalleeSavedFPRegisters();
+
+ // Save callee-saved Double registers.
+ if (double_saves != 0) {
+ frame->AlignSavedCalleeRegisterSlots();
+ DCHECK(kNumCalleeSavedDoubles ==
+ base::bits::CountPopulation32(double_saves));
+ frame->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
+ (kDoubleSize / kPointerSize));
+ }
+ // Save callee-saved registers.
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ // register save area does not include the fp or constant pool pointer.
+ const int num_saves = kNumCalleeSaved - 1;
+ DCHECK(num_saves == base::bits::CountPopulation32(saves));
+ frame->AllocateSavedCalleeRegisterSlots(num_saves);
+ }
+}
+
+void CodeGenerator::AssembleConstructFrame() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
@@ -1794,7 +2248,7 @@ void CodeGenerator::AssemblePrologue() {
}
}
- int stack_shrink_slots = frame()->GetSpillSlotCount();
+ int shrink_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1805,15 +2259,12 @@ void CodeGenerator::AssemblePrologue() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
const RegList double_saves = descriptor->CalleeSavedFPRegisters();
- if (double_saves != 0) {
- stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
- }
- if (stack_shrink_slots > 0) {
- __ lay(sp, MemOperand(sp, -stack_shrink_slots * kPointerSize));
+ if (shrink_slots > 0) {
+ __ lay(sp, MemOperand(sp, -shrink_slots * kPointerSize));
}
// Save callee-saved Double registers.
@@ -1821,8 +2272,6 @@ void CodeGenerator::AssemblePrologue() {
__ MultiPushDoubles(double_saves);
DCHECK(kNumCalleeSavedDoubles ==
base::bits::CountPopulation32(double_saves));
- frame()->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
- (kDoubleSize / kPointerSize));
}
// Save callee-saved registers.
@@ -1830,10 +2279,6 @@ void CodeGenerator::AssemblePrologue() {
if (saves != 0) {
__ MultiPush(saves);
// register save area does not include the fp or constant pool pointer.
- const int num_saves =
- kNumCalleeSaved - 1 - (FLAG_enable_embedded_constant_pool ? 1 : 0);
- DCHECK(num_saves == base::bits::CountPopulation32(saves));
- frame()->AllocateSavedCalleeRegisterSlots(num_saves);
}
}
@@ -1898,10 +2343,30 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
switch (src.type()) {
case Constant::kInt32:
- __ mov(dst, Operand(src.ToInt32()));
+#if V8_TARGET_ARCH_S390X
+ if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+#else
+ if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
+ src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+#endif
+ __ mov(dst, Operand(src.ToInt32(), src.rmode()));
+ } else {
+ __ mov(dst, Operand(src.ToInt32()));
+ }
break;
case Constant::kInt64:
+#if V8_TARGET_ARCH_S390X
+ if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+ __ mov(dst, Operand(src.ToInt64(), src.rmode()));
+ } else {
+ DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ __ mov(dst, Operand(src.ToInt64()));
+ }
+#else
__ mov(dst, Operand(src.ToInt64()));
+#endif // V8_TARGET_ARCH_S390X
break;
case Constant::kFloat32:
__ Move(dst,
@@ -1935,7 +2400,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ StoreP(dst, g.ToMemOperand(destination), r0);
}
} else {
- DoubleRegister dst = destination->IsDoubleRegister()
+ DoubleRegister dst = destination->IsFPRegister()
? g.ToDoubleRegister(destination)
: kScratchDoubleReg;
double value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
@@ -1946,28 +2411,44 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ LoadDoubleLiteral(dst, value, kScratchReg);
}
- if (destination->IsDoubleStackSlot()) {
+ if (destination->IsFPStackSlot()) {
__ StoreDouble(dst, g.ToMemOperand(destination));
}
}
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
DoubleRegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
DoubleRegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
} else {
- DCHECK(destination->IsDoubleStackSlot());
- __ StoreDouble(src, g.ToMemOperand(destination));
+ DCHECK(destination->IsFPStackSlot());
+ LocationOperand* op = LocationOperand::cast(source);
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ StoreDouble(src, g.ToMemOperand(destination));
+ } else {
+ __ StoreFloat32(src, g.ToMemOperand(destination));
+ }
}
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
MemOperand src = g.ToMemOperand(source);
- if (destination->IsDoubleRegister()) {
- __ LoadDouble(g.ToDoubleRegister(destination), src);
+ if (destination->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(source);
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ LoadDouble(g.ToDoubleRegister(destination), src);
+ } else {
+ __ LoadFloat32(g.ToDoubleRegister(destination), src);
+ }
} else {
+ LocationOperand* op = LocationOperand::cast(source);
DoubleRegister temp = kScratchDoubleReg;
- __ LoadDouble(temp, src);
- __ StoreDouble(temp, g.ToMemOperand(destination));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ LoadDouble(temp, src);
+ __ StoreDouble(temp, g.ToMemOperand(destination));
+ } else {
+ __ LoadFloat32(temp, src);
+ __ StoreFloat32(temp, g.ToMemOperand(destination));
+ }
}
} else {
UNREACHABLE();
@@ -1996,7 +2477,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ StoreP(temp, dst);
}
#if V8_TARGET_ARCH_S390X
- } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
+ } else if (source->IsStackSlot() || source->IsFPStackSlot()) {
#else
} else if (source->IsStackSlot()) {
DCHECK(destination->IsStackSlot());
@@ -2009,24 +2490,24 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ LoadP(temp_1, dst);
__ StoreP(temp_0, dst);
__ StoreP(temp_1, src);
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
DoubleRegister temp = kScratchDoubleReg;
DoubleRegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
DoubleRegister dst = g.ToDoubleRegister(destination);
__ ldr(temp, src);
__ ldr(src, dst);
__ ldr(dst, temp);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ ldr(temp, src);
__ LoadDouble(src, dst);
__ StoreDouble(temp, dst);
}
#if !V8_TARGET_ARCH_S390X
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPStackSlot());
DoubleRegister temp_0 = kScratchDoubleReg;
DoubleRegister temp_1 = d0;
MemOperand src = g.ToMemOperand(source);
@@ -2049,10 +2530,6 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
}
}
-void CodeGenerator::AddNopForSmiCodeInlining() {
- // We do not insert nops for inlined Smi code.
-}
-
void CodeGenerator::EnsureSpaceForLazyDeopt() {
if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
return;
diff --git a/deps/v8/src/compiler/s390/instruction-codes-s390.h b/deps/v8/src/compiler/s390/instruction-codes-s390.h
index a32f8753f3..80e1532adb 100644
--- a/deps/v8/src/compiler/s390/instruction-codes-s390.h
+++ b/deps/v8/src/compiler/s390/instruction-codes-s390.h
@@ -12,11 +12,12 @@ namespace compiler {
// S390-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
#define TARGET_ARCH_OPCODE_LIST(V) \
- V(S390_And) \
- V(S390_AndComplement) \
- V(S390_Or) \
- V(S390_OrComplement) \
- V(S390_Xor) \
+ V(S390_And32) \
+ V(S390_And64) \
+ V(S390_Or32) \
+ V(S390_Or64) \
+ V(S390_Xor32) \
+ V(S390_Xor64) \
V(S390_ShiftLeft32) \
V(S390_ShiftLeft64) \
V(S390_ShiftLeftPair) \
@@ -28,23 +29,25 @@ namespace compiler {
V(S390_ShiftRightArithPair) \
V(S390_RotRight32) \
V(S390_RotRight64) \
- V(S390_Not) \
+ V(S390_Not32) \
+ V(S390_Not64) \
V(S390_RotLeftAndMask32) \
V(S390_RotLeftAndClear64) \
V(S390_RotLeftAndClearLeft64) \
V(S390_RotLeftAndClearRight64) \
- V(S390_Add) \
- V(S390_AddWithOverflow32) \
+ V(S390_Add32) \
+ V(S390_Add64) \
V(S390_AddPair) \
V(S390_AddFloat) \
V(S390_AddDouble) \
- V(S390_Sub) \
- V(S390_SubWithOverflow32) \
+ V(S390_Sub32) \
+ V(S390_Sub64) \
V(S390_SubFloat) \
V(S390_SubDouble) \
V(S390_SubPair) \
V(S390_MulPair) \
V(S390_Mul32) \
+ V(S390_Mul32WithHigh32) \
V(S390_Mul64) \
V(S390_MulHigh32) \
V(S390_MulHighU32) \
@@ -61,8 +64,10 @@ namespace compiler {
V(S390_ModU32) \
V(S390_ModU64) \
V(S390_ModDouble) \
- V(S390_Neg) \
+ V(S390_Neg32) \
+ V(S390_Neg64) \
V(S390_NegDouble) \
+ V(S390_NegFloat) \
V(S390_SqrtFloat) \
V(S390_FloorFloat) \
V(S390_CeilFloat) \
@@ -73,7 +78,9 @@ namespace compiler {
V(S390_CeilDouble) \
V(S390_TruncateDouble) \
V(S390_RoundDouble) \
+ V(S390_MaxFloat) \
V(S390_MaxDouble) \
+ V(S390_MinFloat) \
V(S390_MinDouble) \
V(S390_AbsDouble) \
V(S390_Cntlz32) \
@@ -107,6 +114,7 @@ namespace compiler {
V(S390_Float32ToInt32) \
V(S390_Float32ToUint32) \
V(S390_Float32ToDouble) \
+ V(S390_Float64SilenceNaN) \
V(S390_DoubleToInt32) \
V(S390_DoubleToUint32) \
V(S390_DoubleToInt64) \
@@ -126,6 +134,13 @@ namespace compiler {
V(S390_LoadWordS16) \
V(S390_LoadWordU16) \
V(S390_LoadWordS32) \
+ V(S390_LoadWordU32) \
+ V(S390_LoadReverse16RR) \
+ V(S390_LoadReverse32RR) \
+ V(S390_LoadReverse64RR) \
+ V(S390_LoadReverse16) \
+ V(S390_LoadReverse32) \
+ V(S390_LoadReverse64) \
V(S390_LoadWord64) \
V(S390_LoadFloat32) \
V(S390_LoadDouble) \
@@ -133,6 +148,9 @@ namespace compiler {
V(S390_StoreWord16) \
V(S390_StoreWord32) \
V(S390_StoreWord64) \
+ V(S390_StoreReverse16) \
+ V(S390_StoreReverse32) \
+ V(S390_StoreReverse64) \
V(S390_StoreFloat32) \
V(S390_StoreDouble)
@@ -150,8 +168,10 @@ namespace compiler {
// MRI = [register + immediate]
// MRR = [register + register]
#define TARGET_ADDRESSING_MODE_LIST(V) \
- V(MRI) /* [%r0 + K] */ \
- V(MRR) /* [%r0 + %r1] */
+ V(MR) /* [%r0 ] */ \
+ V(MRI) /* [%r0 + K] */ \
+ V(MRR) /* [%r0 + %r1 ] */ \
+ V(MRRI) /* [%r0 + %r1 + K] */
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc b/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
index 2d98e1109d..5ebe489e39 100644
--- a/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
@@ -13,11 +13,12 @@ bool InstructionScheduler::SchedulerSupported() { return true; }
int InstructionScheduler::GetTargetInstructionFlags(
const Instruction* instr) const {
switch (instr->arch_opcode()) {
- case kS390_And:
- case kS390_AndComplement:
- case kS390_Or:
- case kS390_OrComplement:
- case kS390_Xor:
+ case kS390_And32:
+ case kS390_And64:
+ case kS390_Or32:
+ case kS390_Or64:
+ case kS390_Xor32:
+ case kS390_Xor64:
case kS390_ShiftLeft32:
case kS390_ShiftLeft64:
case kS390_ShiftLeftPair:
@@ -29,23 +30,25 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_ShiftRightArithPair:
case kS390_RotRight32:
case kS390_RotRight64:
- case kS390_Not:
+ case kS390_Not32:
+ case kS390_Not64:
case kS390_RotLeftAndMask32:
case kS390_RotLeftAndClear64:
case kS390_RotLeftAndClearLeft64:
case kS390_RotLeftAndClearRight64:
- case kS390_Add:
- case kS390_AddWithOverflow32:
+ case kS390_Add32:
+ case kS390_Add64:
case kS390_AddPair:
case kS390_AddFloat:
case kS390_AddDouble:
- case kS390_Sub:
- case kS390_SubWithOverflow32:
+ case kS390_Sub32:
+ case kS390_Sub64:
case kS390_SubPair:
case kS390_MulPair:
case kS390_SubFloat:
case kS390_SubDouble:
case kS390_Mul32:
+ case kS390_Mul32WithHigh32:
case kS390_Mul64:
case kS390_MulHigh32:
case kS390_MulHighU32:
@@ -62,8 +65,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_ModU32:
case kS390_ModU64:
case kS390_ModDouble:
- case kS390_Neg:
+ case kS390_Neg32:
+ case kS390_Neg64:
case kS390_NegDouble:
+ case kS390_NegFloat:
case kS390_SqrtFloat:
case kS390_FloorFloat:
case kS390_CeilFloat:
@@ -74,7 +79,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_CeilDouble:
case kS390_TruncateDouble:
case kS390_RoundDouble:
+ case kS390_MaxFloat:
case kS390_MaxDouble:
+ case kS390_MinFloat:
case kS390_MinDouble:
case kS390_AbsDouble:
case kS390_Cntlz32:
@@ -104,6 +111,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_Float32ToUint32:
case kS390_Float32ToUint64:
case kS390_Float32ToDouble:
+ case kS390_Float64SilenceNaN:
case kS390_DoubleToInt32:
case kS390_DoubleToUint32:
case kS390_Float32ToInt64:
@@ -119,6 +127,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_BitcastFloat32ToInt32:
case kS390_BitcastInt64ToDouble:
case kS390_BitcastDoubleToInt64:
+ case kS390_LoadReverse16RR:
+ case kS390_LoadReverse32RR:
+ case kS390_LoadReverse64RR:
return kNoOpcodeFlags;
case kS390_LoadWordS8:
@@ -126,15 +137,22 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_LoadWordS16:
case kS390_LoadWordU16:
case kS390_LoadWordS32:
+ case kS390_LoadWordU32:
case kS390_LoadWord64:
case kS390_LoadFloat32:
case kS390_LoadDouble:
+ case kS390_LoadReverse16:
+ case kS390_LoadReverse32:
+ case kS390_LoadReverse64:
return kIsLoadOperation;
case kS390_StoreWord8:
case kS390_StoreWord16:
case kS390_StoreWord32:
case kS390_StoreWord64:
+ case kS390_StoreReverse16:
+ case kS390_StoreReverse32:
+ case kS390_StoreReverse64:
case kS390_StoreFloat32:
case kS390_StoreDouble:
case kS390_Push:
diff --git a/deps/v8/src/compiler/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
index 8a4af5e65c..6fc8a4d9f0 100644
--- a/deps/v8/src/compiler/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
@@ -13,12 +13,12 @@ namespace internal {
namespace compiler {
enum ImmediateMode {
- kInt16Imm,
- kInt16Imm_Unsigned,
- kInt16Imm_Negate,
- kInt16Imm_4ByteAligned,
kShift32Imm,
kShift64Imm,
+ kInt32Imm,
+ kInt32Imm_Negate,
+ kUint32Imm,
+ kInt20Imm,
kNoImmediate
};
@@ -35,6 +35,16 @@ class S390OperandGenerator final : public OperandGenerator {
return UseRegister(node);
}
+ int64_t GetImmediate(Node* node) {
+ if (node->opcode() == IrOpcode::kInt32Constant)
+ return OpParameter<int32_t>(node);
+ else if (node->opcode() == IrOpcode::kInt64Constant)
+ return OpParameter<int64_t>(node);
+ else
+ UNIMPLEMENTED();
+ return 0L;
+ }
+
bool CanBeImmediate(Node* node, ImmediateMode mode) {
int64_t value;
if (node->opcode() == IrOpcode::kInt32Constant)
@@ -48,23 +58,102 @@ class S390OperandGenerator final : public OperandGenerator {
bool CanBeImmediate(int64_t value, ImmediateMode mode) {
switch (mode) {
- case kInt16Imm:
- return is_int16(value);
- case kInt16Imm_Unsigned:
- return is_uint16(value);
- case kInt16Imm_Negate:
- return is_int16(-value);
- case kInt16Imm_4ByteAligned:
- return is_int16(value) && !(value & 3);
case kShift32Imm:
return 0 <= value && value < 32;
case kShift64Imm:
return 0 <= value && value < 64;
+ case kInt32Imm:
+ return is_int32(value);
+ case kInt32Imm_Negate:
+ return is_int32(-value);
+ case kUint32Imm:
+ return is_uint32(value);
+ case kInt20Imm:
+ return is_int20(value);
case kNoImmediate:
return false;
}
return false;
}
+
+ AddressingMode GenerateMemoryOperandInputs(Node* index, Node* base,
+ Node* displacement,
+ DisplacementMode displacement_mode,
+ InstructionOperand inputs[],
+ size_t* input_count) {
+ AddressingMode mode = kMode_MRI;
+ if (base != nullptr) {
+ inputs[(*input_count)++] = UseRegister(base);
+ if (index != nullptr) {
+ inputs[(*input_count)++] = UseRegister(index);
+ if (displacement != nullptr) {
+ inputs[(*input_count)++] = displacement_mode
+ ? UseNegatedImmediate(displacement)
+ : UseImmediate(displacement);
+ mode = kMode_MRRI;
+ } else {
+ mode = kMode_MRR;
+ }
+ } else {
+ if (displacement == nullptr) {
+ mode = kMode_MR;
+ } else {
+ inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
+ ? UseNegatedImmediate(displacement)
+ : UseImmediate(displacement);
+ mode = kMode_MRI;
+ }
+ }
+ } else {
+ DCHECK_NOT_NULL(index);
+ inputs[(*input_count)++] = UseRegister(index);
+ if (displacement != nullptr) {
+ inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
+ ? UseNegatedImmediate(displacement)
+ : UseImmediate(displacement);
+ mode = kMode_MRI;
+ } else {
+ mode = kMode_MR;
+ }
+ }
+ return mode;
+ }
+
+ AddressingMode GetEffectiveAddressMemoryOperand(Node* operand,
+ InstructionOperand inputs[],
+ size_t* input_count) {
+#if V8_TARGET_ARCH_S390X
+ BaseWithIndexAndDisplacement64Matcher m(operand,
+ AddressOption::kAllowInputSwap);
+#else
+ BaseWithIndexAndDisplacement32Matcher m(operand,
+ AddressOption::kAllowInputSwap);
+#endif
+ DCHECK(m.matches());
+ if ((m.displacement() == nullptr ||
+ CanBeImmediate(m.displacement(), kInt20Imm))) {
+ DCHECK(m.scale() == 0);
+ return GenerateMemoryOperandInputs(m.index(), m.base(), m.displacement(),
+ m.displacement_mode(), inputs,
+ input_count);
+ } else {
+ inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
+ inputs[(*input_count)++] = UseRegister(operand->InputAt(1));
+ return kMode_MRR;
+ }
+ }
+
+ bool CanBeBetterLeftOperand(Node* node) const {
+ return !selector()->IsLive(node);
+ }
+
+ MachineRepresentation GetRepresentation(Node* node) {
+ return sequence()->GetRepresentation(selector()->GetVirtualRegister(node));
+ }
+
+ bool Is64BitOperand(Node* node) {
+ return MachineRepresentation::kWord64 == GetRepresentation(node);
+ }
};
namespace {
@@ -115,20 +204,50 @@ void VisitBinop(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
S390OperandGenerator g(selector);
Matcher m(node);
+ Node* left = m.left().node();
+ Node* right = m.right().node();
InstructionOperand inputs[4];
size_t input_count = 0;
InstructionOperand outputs[2];
size_t output_count = 0;
- inputs[input_count++] = g.UseRegister(m.left().node());
- inputs[input_count++] = g.UseOperand(m.right().node(), operand_mode);
+ // TODO(turbofan): match complex addressing modes.
+ if (left == right) {
+ // If both inputs refer to the same operand, enforce allocating a register
+ // for both of them to ensure that we don't end up generating code like
+ // this:
+ //
+ // mov rax, [rbp-0x10]
+ // add rax, [rbp-0x10]
+ // jo label
+ InstructionOperand const input = g.UseRegister(left);
+ inputs[input_count++] = input;
+ inputs[input_count++] = input;
+ } else if (g.CanBeImmediate(right, operand_mode)) {
+ inputs[input_count++] = g.UseRegister(left);
+ inputs[input_count++] = g.UseImmediate(right);
+ } else {
+ if (node->op()->HasProperty(Operator::kCommutative) &&
+ g.CanBeBetterLeftOperand(right)) {
+ std::swap(left, right);
+ }
+ inputs[input_count++] = g.UseRegister(left);
+ inputs[input_count++] = g.UseRegister(right);
+ }
if (cont->IsBranch()) {
inputs[input_count++] = g.Label(cont->true_block());
inputs[input_count++] = g.Label(cont->false_block());
}
- outputs[output_count++] = g.DefineAsRegister(node);
+ if (cont->IsDeoptimize()) {
+ // If we can deoptimize as a result of the binop, we need to make sure that
+ // the deopt inputs are not overwritten by the binop result. One way
+ // to achieve that is to declare the output register as same-as-first.
+ outputs[output_count++] = g.DefineSameAsFirst(node);
+ } else {
+ outputs[output_count++] = g.DefineAsRegister(node);
+ }
if (cont->IsSet()) {
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
@@ -141,7 +260,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->frame_state());
+ cont->reason(), cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -160,10 +279,7 @@ void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
S390OperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* offset = node->InputAt(1);
ArchOpcode opcode = kArchNop;
- ImmediateMode mode = kInt16Imm;
switch (load_rep.representation()) {
case MachineRepresentation::kFloat32:
opcode = kS390_LoadFloat32;
@@ -179,20 +295,19 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode = load_rep.IsSigned() ? kS390_LoadWordS16 : kS390_LoadWordU16;
break;
#if !V8_TARGET_ARCH_S390X
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
#endif
case MachineRepresentation::kWord32:
- opcode = kS390_LoadWordS32;
-#if V8_TARGET_ARCH_S390X
- // TODO(john.yan): Remove this mode since s390 do not has this restriction
- mode = kInt16Imm_4ByteAligned;
-#endif
+ opcode = kS390_LoadWordU32;
break;
#if V8_TARGET_ARCH_S390X
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
opcode = kS390_LoadWord64;
- mode = kInt16Imm_4ByteAligned;
break;
#else
case MachineRepresentation::kWord64: // Fall through.
@@ -202,16 +317,14 @@ void InstructionSelector::VisitLoad(Node* node) {
UNREACHABLE();
return;
}
- if (g.CanBeImmediate(offset, mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(offset));
- } else if (g.CanBeImmediate(base, mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), g.UseRegister(offset), g.UseImmediate(base));
- } else {
- Emit(opcode | AddressingModeField::encode(kMode_MRR),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset));
- }
+ InstructionOperand outputs[1];
+ outputs[0] = g.DefineAsRegister(node);
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ AddressingMode mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ InstructionCode code = opcode | AddressingModeField::encode(mode);
+ Emit(code, 1, outputs, input_count, inputs);
}
void InstructionSelector::VisitStore(Node* node) {
@@ -232,11 +345,7 @@ void InstructionSelector::VisitStore(Node* node) {
inputs[input_count++] = g.UseUniqueRegister(base);
// OutOfLineRecordWrite uses the offset in an 'AddP' instruction as well as
// for the store itself, so we must check compatibility with both.
- if (g.CanBeImmediate(offset, kInt16Imm)
-#if V8_TARGET_ARCH_S390X
- && g.CanBeImmediate(offset, kInt16Imm_4ByteAligned)
-#endif
- ) {
+ if (g.CanBeImmediate(offset, kInt20Imm)) {
inputs[input_count++] = g.UseImmediate(offset);
addressing_mode = kMode_MRI;
} else {
@@ -267,7 +376,7 @@ void InstructionSelector::VisitStore(Node* node) {
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
ArchOpcode opcode = kArchNop;
- ImmediateMode mode = kInt16Imm;
+ NodeMatcher m(value);
switch (rep) {
case MachineRepresentation::kFloat32:
opcode = kS390_StoreFloat32;
@@ -283,16 +392,27 @@ void InstructionSelector::VisitStore(Node* node) {
opcode = kS390_StoreWord16;
break;
#if !V8_TARGET_ARCH_S390X
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
#endif
case MachineRepresentation::kWord32:
opcode = kS390_StoreWord32;
+ if (m.IsWord32ReverseBytes()) {
+ opcode = kS390_StoreReverse32;
+ value = value->InputAt(0);
+ }
break;
#if V8_TARGET_ARCH_S390X
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
opcode = kS390_StoreWord64;
- mode = kInt16Imm_4ByteAligned;
+ if (m.IsWord64ReverseBytes()) {
+ opcode = kS390_StoreReverse64;
+ value = value->InputAt(0);
+ }
break;
#else
case MachineRepresentation::kWord64: // Fall through.
@@ -302,19 +422,25 @@ void InstructionSelector::VisitStore(Node* node) {
UNREACHABLE();
return;
}
- if (g.CanBeImmediate(offset, mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(base), g.UseImmediate(offset), g.UseRegister(value));
- } else if (g.CanBeImmediate(base, mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(offset), g.UseImmediate(base), g.UseRegister(value));
- } else {
- Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
- g.UseRegister(base), g.UseRegister(offset), g.UseRegister(value));
- }
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ AddressingMode addressing_mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ InstructionCode code =
+ opcode | AddressingModeField::encode(addressing_mode);
+ InstructionOperand value_operand = g.UseRegister(value);
+ inputs[input_count++] = value_operand;
+ Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
+ inputs);
}
}
+// Architecture supports unaligned access, therefore VisitLoad is used instead
+void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
+
+// Architecture supports unaligned access, therefore VisitStore is used instead
+void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
+
void InstructionSelector::VisitCheckedLoad(Node* node) {
CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
S390OperandGenerator g(this);
@@ -344,6 +470,8 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
opcode = kCheckedLoadFloat64;
break;
case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
#if !V8_TARGET_ARCH_S390X
case MachineRepresentation::kWord64: // Fall through.
@@ -356,7 +484,7 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
AddressingMode addressingMode = kMode_MRR;
Emit(opcode | AddressingModeField::encode(addressingMode),
g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
- g.UseOperand(length, kInt16Imm_Unsigned));
+ g.UseOperand(length, kUint32Imm));
}
void InstructionSelector::VisitCheckedStore(Node* node) {
@@ -389,6 +517,8 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
opcode = kCheckedStoreFloat64;
break;
case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
#if !V8_TARGET_ARCH_S390X
case MachineRepresentation::kWord64: // Fall through.
@@ -401,53 +531,7 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
AddressingMode addressingMode = kMode_MRR;
Emit(opcode | AddressingModeField::encode(addressingMode), g.NoOutput(),
g.UseRegister(base), g.UseRegister(offset),
- g.UseOperand(length, kInt16Imm_Unsigned), g.UseRegister(value));
-}
-
-template <typename Matcher>
-static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
- ArchOpcode opcode, bool left_can_cover,
- bool right_can_cover, ImmediateMode imm_mode) {
- S390OperandGenerator g(selector);
-
- // Map instruction to equivalent operation with inverted right input.
- ArchOpcode inv_opcode = opcode;
- switch (opcode) {
- case kS390_And:
- inv_opcode = kS390_AndComplement;
- break;
- case kS390_Or:
- inv_opcode = kS390_OrComplement;
- break;
- default:
- UNREACHABLE();
- }
-
- // Select Logical(y, ~x) for Logical(Xor(x, -1), y).
- if ((m->left().IsWord32Xor() || m->left().IsWord64Xor()) && left_can_cover) {
- Matcher mleft(m->left().node());
- if (mleft.right().Is(-1)) {
- selector->Emit(inv_opcode, g.DefineAsRegister(node),
- g.UseRegister(m->right().node()),
- g.UseRegister(mleft.left().node()));
- return;
- }
- }
-
- // Select Logical(x, ~y) for Logical(x, Xor(y, -1)).
- if ((m->right().IsWord32Xor() || m->right().IsWord64Xor()) &&
- right_can_cover) {
- Matcher mright(m->right().node());
- if (mright.right().Is(-1)) {
- // TODO(all): support shifted operand on right.
- selector->Emit(inv_opcode, g.DefineAsRegister(node),
- g.UseRegister(m->left().node()),
- g.UseRegister(mright.left().node()));
- return;
- }
- }
-
- VisitBinop<Matcher>(selector, node, opcode, imm_mode);
+ g.UseOperand(length, kUint32Imm), g.UseRegister(value));
}
static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
@@ -505,9 +589,7 @@ void InstructionSelector::VisitWord32And(Node* node) {
return;
}
}
- VisitLogical<Int32BinopMatcher>(
- this, node, &m, kS390_And, CanCover(node, m.left().node()),
- CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+ VisitBinop<Int32BinopMatcher>(this, node, kS390_And32, kUint32Imm);
}
#if V8_TARGET_ARCH_S390X
@@ -559,25 +641,19 @@ void InstructionSelector::VisitWord64And(Node* node) {
}
}
}
- VisitLogical<Int64BinopMatcher>(
- this, node, &m, kS390_And, CanCover(node, m.left().node()),
- CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+ VisitBinop<Int64BinopMatcher>(this, node, kS390_And64, kUint32Imm);
}
#endif
void InstructionSelector::VisitWord32Or(Node* node) {
Int32BinopMatcher m(node);
- VisitLogical<Int32BinopMatcher>(
- this, node, &m, kS390_Or, CanCover(node, m.left().node()),
- CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+ VisitBinop<Int32BinopMatcher>(this, node, kS390_Or32, kUint32Imm);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitWord64Or(Node* node) {
Int64BinopMatcher m(node);
- VisitLogical<Int64BinopMatcher>(
- this, node, &m, kS390_Or, CanCover(node, m.left().node()),
- CanCover(node, m.right().node()), kInt16Imm_Unsigned);
+ VisitBinop<Int64BinopMatcher>(this, node, kS390_Or64, kUint32Imm);
}
#endif
@@ -585,9 +661,9 @@ void InstructionSelector::VisitWord32Xor(Node* node) {
S390OperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.right().Is(-1)) {
- Emit(kS390_Not, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+ Emit(kS390_Not32, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
} else {
- VisitBinop<Int32BinopMatcher>(this, node, kS390_Xor, kInt16Imm_Unsigned);
+ VisitBinop<Int32BinopMatcher>(this, node, kS390_Xor32, kUint32Imm);
}
}
@@ -596,9 +672,9 @@ void InstructionSelector::VisitWord64Xor(Node* node) {
S390OperandGenerator g(this);
Int64BinopMatcher m(node);
if (m.right().Is(-1)) {
- Emit(kS390_Not, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+ Emit(kS390_Not64, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
} else {
- VisitBinop<Int64BinopMatcher>(this, node, kS390_Xor, kInt16Imm_Unsigned);
+ VisitBinop<Int64BinopMatcher>(this, node, kS390_Xor64, kUint32Imm);
}
}
#endif
@@ -884,13 +960,38 @@ void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
#endif
+void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
+ S390OperandGenerator g(this);
+ Emit(kS390_LoadReverse64RR, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
+ S390OperandGenerator g(this);
+ NodeMatcher input(node->InputAt(0));
+ if (CanCover(node, input.node()) && input.IsLoad()) {
+ LoadRepresentation load_rep = LoadRepresentationOf(input.node()->op());
+ if (load_rep.representation() == MachineRepresentation::kWord32) {
+ Node* base = input.node()->InputAt(0);
+ Node* offset = input.node()->InputAt(1);
+ Emit(kS390_LoadReverse32 | AddressingModeField::encode(kMode_MRR),
+ // TODO(john.yan): one of the base and offset can be imm.
+ g.DefineAsRegister(node), g.UseRegister(base),
+ g.UseRegister(offset));
+ return;
+ }
+ }
+ Emit(kS390_LoadReverse32RR, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
void InstructionSelector::VisitInt32Add(Node* node) {
- VisitBinop<Int32BinopMatcher>(this, node, kS390_Add, kInt16Imm);
+ VisitBinop<Int32BinopMatcher>(this, node, kS390_Add32, kInt32Imm);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitInt64Add(Node* node) {
- VisitBinop<Int64BinopMatcher>(this, node, kS390_Add, kInt16Imm);
+ VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64, kInt32Imm);
}
#endif
@@ -898,9 +999,10 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
S390OperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.left().Is(0)) {
- Emit(kS390_Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
+ Emit(kS390_Neg32, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()));
} else {
- VisitBinop<Int32BinopMatcher>(this, node, kS390_Sub, kInt16Imm_Negate);
+ VisitBinop<Int32BinopMatcher>(this, node, kS390_Sub32, kInt32Imm_Negate);
}
}
@@ -909,33 +1011,125 @@ void InstructionSelector::VisitInt64Sub(Node* node) {
S390OperandGenerator g(this);
Int64BinopMatcher m(node);
if (m.left().Is(0)) {
- Emit(kS390_Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
+ Emit(kS390_Neg64, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()));
} else {
- VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub, kInt16Imm_Negate);
+ VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64, kInt32Imm_Negate);
}
}
#endif
+namespace {
+
+void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+ InstructionOperand left, InstructionOperand right,
+ FlagsContinuation* cont);
+void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ S390OperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand result_operand = g.DefineAsRegister(node);
+ InstructionOperand high32_operand = g.TempRegister();
+ InstructionOperand temp_operand = g.TempRegister();
+ {
+ InstructionOperand outputs[] = {result_operand, high32_operand};
+ InstructionOperand inputs[] = {g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node())};
+ selector->Emit(kS390_Mul32WithHigh32, 2, outputs, 2, inputs);
+ }
+ {
+ InstructionOperand shift_31 = g.UseImmediate(31);
+ InstructionOperand outputs[] = {temp_operand};
+ InstructionOperand inputs[] = {result_operand, shift_31};
+ selector->Emit(kS390_ShiftRightArith32, 1, outputs, 2, inputs);
+ }
+
+ VisitCompare(selector, kS390_Cmp32, high32_operand, temp_operand, cont);
+}
+
+void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
+ S390OperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ Node* left = m.left().node();
+ Node* right = m.right().node();
+ if (g.CanBeImmediate(right, kInt32Imm)) {
+ selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+ g.UseImmediate(right));
+ } else {
+ if (g.CanBeBetterLeftOperand(right)) {
+ std::swap(left, right);
+ }
+ selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+ g.Use(right));
+ }
+}
+
+} // namespace
+
+void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf);
+ return EmitInt32MulWithOverflow(this, node, &cont);
+ }
+ VisitMul(this, node, kS390_Mul32);
+ // FlagsContinuation cont;
+ // EmitInt32MulWithOverflow(this, node, &cont);
+}
+
void InstructionSelector::VisitInt32Mul(Node* node) {
- VisitRRR(this, kS390_Mul32, node);
+ S390OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Node* left = m.left().node();
+ Node* right = m.right().node();
+ if (g.CanBeImmediate(right, kInt32Imm) &&
+ base::bits::IsPowerOfTwo32(g.GetImmediate(right))) {
+ int power = 31 - base::bits::CountLeadingZeros32(g.GetImmediate(right));
+ Emit(kS390_ShiftLeft32, g.DefineSameAsFirst(node), g.UseRegister(left),
+ g.UseImmediate(power));
+ return;
+ }
+ VisitMul(this, node, kS390_Mul32);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitInt64Mul(Node* node) {
- VisitRRR(this, kS390_Mul64, node);
+ S390OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ Node* left = m.left().node();
+ Node* right = m.right().node();
+ if (g.CanBeImmediate(right, kInt32Imm) &&
+ base::bits::IsPowerOfTwo64(g.GetImmediate(right))) {
+ int power = 31 - base::bits::CountLeadingZeros64(g.GetImmediate(right));
+ Emit(kS390_ShiftLeft64, g.DefineSameAsFirst(node), g.UseRegister(left),
+ g.UseImmediate(power));
+ return;
+ }
+ VisitMul(this, node, kS390_Mul64);
}
#endif
void InstructionSelector::VisitInt32MulHigh(Node* node) {
S390OperandGenerator g(this);
- Emit(kS390_MulHigh32, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+ Int32BinopMatcher m(node);
+ Node* left = m.left().node();
+ Node* right = m.right().node();
+ if (g.CanBeBetterLeftOperand(right)) {
+ std::swap(left, right);
+ }
+ Emit(kS390_MulHigh32, g.DefineAsRegister(node), g.UseRegister(left),
+ g.Use(right));
}
void InstructionSelector::VisitUint32MulHigh(Node* node) {
S390OperandGenerator g(this);
- Emit(kS390_MulHighU32, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+ Int32BinopMatcher m(node);
+ Node* left = m.left().node();
+ Node* right = m.right().node();
+ if (g.CanBeBetterLeftOperand(right)) {
+ std::swap(left, right);
+ }
+ Emit(kS390_MulHighU32, g.DefineAsRegister(node), g.UseRegister(left),
+ g.Use(right));
}
void InstructionSelector::VisitInt32Div(Node* node) {
@@ -1042,14 +1236,12 @@ void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
VisitRR(this, kS390_DoubleToFloat32, node);
}
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
- switch (TruncationModeOf(node->op())) {
- case TruncationMode::kJavaScript:
- return VisitRR(this, kArchTruncateDoubleToI, node);
- case TruncationMode::kRoundToZero:
- return VisitRR(this, kS390_DoubleToInt32, node);
- }
- UNREACHABLE();
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+ VisitRR(this, kArchTruncateDoubleToI, node);
+}
+
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+ VisitRR(this, kS390_DoubleToInt32, node);
}
void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
@@ -1113,38 +1305,11 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
}
void InstructionSelector::VisitFloat32Sub(Node* node) {
- S390OperandGenerator g(this);
- Float32BinopMatcher m(node);
- if (m.left().IsMinusZero()) {
- Emit(kS390_NegDouble, g.DefineAsRegister(node),
- g.UseRegister(m.right().node()));
- return;
- }
VisitRRR(this, kS390_SubFloat, node);
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
// TODO(mbrandy): detect multiply-subtract
- S390OperandGenerator g(this);
- Float64BinopMatcher m(node);
- if (m.left().IsMinusZero()) {
- if (m.right().IsFloat64RoundDown() &&
- CanCover(m.node(), m.right().node())) {
- if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
- CanCover(m.right().node(), m.right().InputAt(0))) {
- Float64BinopMatcher mright0(m.right().InputAt(0));
- if (mright0.left().IsMinusZero()) {
- // -floor(-x) = ceil(x)
- Emit(kS390_CeilDouble, g.DefineAsRegister(node),
- g.UseRegister(mright0.right().node()));
- return;
- }
- }
- }
- Emit(kS390_NegDouble, g.DefineAsRegister(node),
- g.UseRegister(m.right().node()));
- return;
- }
VisitRRR(this, kS390_SubDouble, node);
}
@@ -1172,13 +1337,25 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
->MarkAsCall();
}
-void InstructionSelector::VisitFloat32Max(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat32Max(Node* node) {
+ VisitRRR(this, kS390_MaxFloat, node);
+}
-void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ VisitRRR(this, kS390_MaxDouble, node);
+}
-void InstructionSelector::VisitFloat32Min(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+ VisitRR(this, kS390_Float64SilenceNaN, node);
+}
+
+void InstructionSelector::VisitFloat32Min(Node* node) {
+ VisitRRR(this, kS390_MinFloat, node);
+}
-void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ VisitRRR(this, kS390_MinDouble, node);
+}
void InstructionSelector::VisitFloat32Abs(Node* node) {
VisitRR(this, kS390_AbsFloat, node);
@@ -1192,6 +1369,21 @@ void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitRR(this, kS390_SqrtFloat, node);
}
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+ InstructionCode opcode) {
+ S390OperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, d1), g.UseFixed(node->InputAt(0), d1))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+ InstructionCode opcode) {
+ S390OperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, d1), g.UseFixed(node->InputAt(0), d1),
+ g.UseFixed(node->InputAt(1), d2))
+ ->MarkAsCall();
+}
+
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
VisitRR(this, kS390_SqrtDouble, node);
}
@@ -1232,47 +1424,55 @@ void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
UNREACHABLE();
}
+void InstructionSelector::VisitFloat32Neg(Node* node) {
+ VisitRR(this, kS390_NegFloat, node);
+}
+
+void InstructionSelector::VisitFloat64Neg(Node* node) {
+ VisitRR(this, kS390_NegDouble, node);
+}
+
void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
- return VisitBinop<Int32BinopMatcher>(this, node, kS390_AddWithOverflow32,
- kInt16Imm, &cont);
+ return VisitBinop<Int32BinopMatcher>(this, node, kS390_Add32, kInt32Imm,
+ &cont);
}
FlagsContinuation cont;
- VisitBinop<Int32BinopMatcher>(this, node, kS390_AddWithOverflow32, kInt16Imm,
- &cont);
+ VisitBinop<Int32BinopMatcher>(this, node, kS390_Add32, kInt32Imm, &cont);
}
void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
- return VisitBinop<Int32BinopMatcher>(this, node, kS390_SubWithOverflow32,
- kInt16Imm_Negate, &cont);
+ return VisitBinop<Int32BinopMatcher>(this, node, kS390_Sub32,
+ kInt32Imm_Negate, &cont);
}
FlagsContinuation cont;
- VisitBinop<Int32BinopMatcher>(this, node, kS390_SubWithOverflow32,
- kInt16Imm_Negate, &cont);
+ VisitBinop<Int32BinopMatcher>(this, node, kS390_Sub32, kInt32Imm_Negate,
+ &cont);
}
#if V8_TARGET_ARCH_S390X
void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
- return VisitBinop<Int64BinopMatcher>(this, node, kS390_Add, kInt16Imm,
+ return VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64, kInt32Imm,
&cont);
}
FlagsContinuation cont;
- VisitBinop<Int64BinopMatcher>(this, node, kS390_Add, kInt16Imm, &cont);
+ VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64, kInt32Imm, &cont);
}
void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
- return VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub,
- kInt16Imm_Negate, &cont);
+ return VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64,
+ kInt32Imm_Negate, &cont);
}
FlagsContinuation cont;
- VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub, kInt16Imm_Negate, &cont);
+ VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64, kInt32Imm_Negate,
+ &cont);
}
#endif
@@ -1302,7 +1502,7 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+ selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
} else {
DCHECK(cont->IsSet());
@@ -1334,14 +1534,14 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
- ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm);
+ ImmediateMode mode = (CompareLogical(cont) ? kUint32Imm : kInt32Imm);
VisitWordCompare(selector, node, kS390_Cmp32, cont, false, mode);
}
#if V8_TARGET_ARCH_S390X
void VisitWord64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
- ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm);
+ ImmediateMode mode = (CompareLogical(cont) ? kUint32Imm : kUint32Imm);
VisitWordCompare(selector, node, kS390_Cmp64, cont, false, mode);
}
#endif
@@ -1448,21 +1648,23 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
case IrOpcode::kInt32AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop<Int32BinopMatcher>(
- selector, node, kS390_AddWithOverflow32, kInt16Imm, cont);
+ selector, node, kS390_Add32, kInt32Imm, cont);
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int32BinopMatcher>(selector, node,
- kS390_SubWithOverflow32,
- kInt16Imm_Negate, cont);
+ return VisitBinop<Int32BinopMatcher>(
+ selector, node, kS390_Sub32, kInt32Imm_Negate, cont);
+ case IrOpcode::kInt32MulWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kNotEqual);
+ return EmitInt32MulWithOverflow(selector, node, cont);
#if V8_TARGET_ARCH_S390X
case IrOpcode::kInt64AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int64BinopMatcher>(selector, node, kS390_Add,
- kInt16Imm, cont);
+ return VisitBinop<Int64BinopMatcher>(
+ selector, node, kS390_Add64, kInt32Imm, cont);
case IrOpcode::kInt64SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int64BinopMatcher>(selector, node, kS390_Sub,
- kInt16Imm_Negate, cont);
+ return VisitBinop<Int64BinopMatcher>(
+ selector, node, kS390_Sub64, kInt32Imm_Negate, cont);
#endif
default:
break;
@@ -1474,7 +1676,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
return VisitWord32Compare(selector, value, cont);
case IrOpcode::kWord32And:
return VisitWordCompare(selector, value, kS390_Tst32, cont, true,
- kInt16Imm_Unsigned);
+ kUint32Imm);
// TODO(mbrandy): Handle?
// case IrOpcode::kInt32Add:
// case IrOpcode::kWord32Or:
@@ -1488,7 +1690,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
return VisitWord64Compare(selector, value, cont);
case IrOpcode::kWord64And:
return VisitWordCompare(selector, value, kS390_Tst64, cont, true,
- kInt16Imm_Unsigned);
+ kUint32Imm);
// TODO(mbrandy): Handle?
// case IrOpcode::kInt64Add:
// case IrOpcode::kWord64Or:
@@ -1531,14 +1733,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
}
@@ -1558,7 +1760,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
- Emit(kS390_Sub, index_operand, value_operand,
+ Emit(kS390_Sub32, index_operand, value_operand,
g.TempImmediate(sw.min_value));
}
// Generate a table lookup.
@@ -1673,7 +1875,7 @@ void InstructionSelector::EmitPrepareArguments(
// Prepare for C function call.
if (descriptor->IsCFunctionCall()) {
Emit(kArchPrepareCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
0, nullptr, 0, nullptr);
// Poke any stack arguments.
@@ -1750,6 +1952,61 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(left), g.UseRegister(right));
}
+void InstructionSelector::VisitAtomicLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ S390OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kAtomicLoadWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+}
+
+void InstructionSelector::VisitAtomicStore(Node* node) {
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ S390OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kAtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kAtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kAtomicStoreWord32;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ Emit(opcode | AddressingModeField::encode(kMode_MRR), 0, nullptr, input_count,
+ inputs);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
@@ -1761,9 +2018,18 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesAway |
MachineOperatorBuilder::kWord32Popcnt |
+ MachineOperatorBuilder::kWord32ReverseBytes |
+ MachineOperatorBuilder::kWord64ReverseBytes |
MachineOperatorBuilder::kWord64Popcnt;
}
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+ return MachineOperatorBuilder::AlignmentRequirements::
+ FullUnalignedAccessSupport();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/schedule.cc b/deps/v8/src/compiler/schedule.cc
index 4ac65e5ae4..6bd1a17be0 100644
--- a/deps/v8/src/compiler/schedule.cc
+++ b/deps/v8/src/compiler/schedule.cc
@@ -199,11 +199,28 @@ void Schedule::AddGoto(BasicBlock* block, BasicBlock* succ) {
AddSuccessor(block, succ);
}
+#if DEBUG
+namespace {
+
+bool IsPotentiallyThrowingCall(IrOpcode::Value opcode) {
+ switch (opcode) {
+#define BUILD_BLOCK_JS_CASE(Name) case IrOpcode::k##Name:
+ JS_OP_LIST(BUILD_BLOCK_JS_CASE)
+#undef BUILD_BLOCK_JS_CASE
+ case IrOpcode::kCall:
+ return true;
+ default:
+ return false;
+ }
+}
+
+} // namespace
+#endif // DEBUG
void Schedule::AddCall(BasicBlock* block, Node* call, BasicBlock* success_block,
BasicBlock* exception_block) {
DCHECK_EQ(BasicBlock::kNone, block->control());
- DCHECK_EQ(IrOpcode::kCall, call->opcode());
+ DCHECK(IsPotentiallyThrowingCall(call->opcode()));
block->set_control(BasicBlock::kCall);
AddSuccessor(block, success_block);
AddSuccessor(block, exception_block);
@@ -298,41 +315,87 @@ void Schedule::InsertSwitch(BasicBlock* block, BasicBlock* end, Node* sw,
SetControlInput(block, sw);
}
-void Schedule::EnsureSplitEdgeForm() {
+void Schedule::EnsureCFGWellFormedness() {
// Make a copy of all the blocks for the iteration, since adding the split
// edges will allocate new blocks.
BasicBlockVector all_blocks_copy(all_blocks_);
// Insert missing split edge blocks.
for (auto block : all_blocks_copy) {
- if (block->PredecessorCount() > 1 && block != end_) {
- for (auto current_pred = block->predecessors().begin();
- current_pred != block->predecessors().end(); ++current_pred) {
- BasicBlock* pred = *current_pred;
- if (pred->SuccessorCount() > 1) {
- // Found a predecessor block with multiple successors.
- BasicBlock* split_edge_block = NewBasicBlock();
- split_edge_block->set_control(BasicBlock::kGoto);
- split_edge_block->successors().push_back(block);
- split_edge_block->predecessors().push_back(pred);
- split_edge_block->set_deferred(pred->deferred());
- *current_pred = split_edge_block;
- // Find a corresponding successor in the previous block, replace it
- // with the split edge block... but only do it once, since we only
- // replace the previous blocks in the current block one at a time.
- for (auto successor = pred->successors().begin();
- successor != pred->successors().end(); ++successor) {
- if (*successor == block) {
- *successor = split_edge_block;
- break;
- }
- }
+ if (block->PredecessorCount() > 1) {
+ if (block != end_) {
+ EnsureSplitEdgeForm(block);
+ }
+ if (block->deferred()) {
+ EnsureDeferredCodeSingleEntryPoint(block);
+ }
+ }
+ }
+}
+
+void Schedule::EnsureSplitEdgeForm(BasicBlock* block) {
+ DCHECK(block->PredecessorCount() > 1 && block != end_);
+ for (auto current_pred = block->predecessors().begin();
+ current_pred != block->predecessors().end(); ++current_pred) {
+ BasicBlock* pred = *current_pred;
+ if (pred->SuccessorCount() > 1) {
+ // Found a predecessor block with multiple successors.
+ BasicBlock* split_edge_block = NewBasicBlock();
+ split_edge_block->set_control(BasicBlock::kGoto);
+ split_edge_block->successors().push_back(block);
+ split_edge_block->predecessors().push_back(pred);
+ split_edge_block->set_deferred(pred->deferred());
+ *current_pred = split_edge_block;
+ // Find a corresponding successor in the previous block, replace it
+ // with the split edge block... but only do it once, since we only
+ // replace the previous blocks in the current block one at a time.
+ for (auto successor = pred->successors().begin();
+ successor != pred->successors().end(); ++successor) {
+ if (*successor == block) {
+ *successor = split_edge_block;
+ break;
}
}
}
}
}
+void Schedule::EnsureDeferredCodeSingleEntryPoint(BasicBlock* block) {
+ // If a deferred block has multiple predecessors, they have to
+ // all be deferred. Otherwise, we can run into a situation where a range
+ // that spills only in deferred blocks inserts its spill in the block, but
+ // other ranges need moves inserted by ResolveControlFlow in the predecessors,
+ // which may clobber the register of this range.
+ // To ensure that, when a deferred block has multiple predecessors, and some
+ // are not deferred, we add a non-deferred block to collect all such edges.
+
+ DCHECK(block->deferred() && block->PredecessorCount() > 1);
+ bool all_deferred = true;
+ for (auto current_pred = block->predecessors().begin();
+ current_pred != block->predecessors().end(); ++current_pred) {
+ BasicBlock* pred = *current_pred;
+ if (!pred->deferred()) {
+ all_deferred = false;
+ break;
+ }
+ }
+
+ if (all_deferred) return;
+ BasicBlock* merger = NewBasicBlock();
+ merger->set_control(BasicBlock::kGoto);
+ merger->successors().push_back(block);
+ for (auto current_pred = block->predecessors().begin();
+ current_pred != block->predecessors().end(); ++current_pred) {
+ BasicBlock* pred = *current_pred;
+ merger->predecessors().push_back(pred);
+ pred->successors().clear();
+ pred->successors().push_back(merger);
+ }
+ merger->set_deferred(false);
+ block->predecessors().clear();
+ block->predecessors().push_back(merger);
+}
+
void Schedule::PropagateDeferredMark() {
// Push forward the deferred block marks through newly inserted blocks and
// other improperly marked blocks until a fixed point is reached.
diff --git a/deps/v8/src/compiler/schedule.h b/deps/v8/src/compiler/schedule.h
index c99a0fc525..74ba835518 100644
--- a/deps/v8/src/compiler/schedule.h
+++ b/deps/v8/src/compiler/schedule.h
@@ -257,8 +257,12 @@ class Schedule final : public ZoneObject {
friend class BasicBlockInstrumentor;
friend class RawMachineAssembler;
+ // Ensure properties of the CFG assumed by further stages.
+ void EnsureCFGWellFormedness();
// Ensure split-edge form for a hand-assembled schedule.
- void EnsureSplitEdgeForm();
+ void EnsureSplitEdgeForm(BasicBlock* block);
+ // Ensure entry into a deferred block happens from a single hot block.
+ void EnsureDeferredCodeSingleEntryPoint(BasicBlock* block);
// Copy deferred block markers down as far as possible
void PropagateDeferredMark();
diff --git a/deps/v8/src/compiler/scheduler.cc b/deps/v8/src/compiler/scheduler.cc
index b04ba6f926..58c01ccf03 100644
--- a/deps/v8/src/compiler/scheduler.cc
+++ b/deps/v8/src/compiler/scheduler.cc
@@ -324,6 +324,10 @@ class CFGBuilder : public ZoneObject {
case IrOpcode::kSwitch:
BuildBlocksForSuccessors(node);
break;
+#define BUILD_BLOCK_JS_CASE(Name) case IrOpcode::k##Name:
+ JS_OP_LIST(BUILD_BLOCK_JS_CASE)
+// JS opcodes are just like calls => fall through.
+#undef BUILD_BLOCK_JS_CASE
case IrOpcode::kCall:
if (NodeProperties::IsExceptionalCall(node)) {
BuildBlocksForSuccessors(node);
@@ -364,6 +368,10 @@ class CFGBuilder : public ZoneObject {
scheduler_->UpdatePlacement(node, Scheduler::kFixed);
ConnectThrow(node);
break;
+#define CONNECT_BLOCK_JS_CASE(Name) case IrOpcode::k##Name:
+ JS_OP_LIST(CONNECT_BLOCK_JS_CASE)
+// JS opcodes are just like calls => fall through.
+#undef CONNECT_BLOCK_JS_CASE
case IrOpcode::kCall:
if (NodeProperties::IsExceptionalCall(node)) {
scheduler_->UpdatePlacement(node, Scheduler::kFixed);
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 88931f5df7..de64de3e1f 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -6,17 +6,21 @@
#include <limits>
+#include "src/address-map.h"
#include "src/base/bits.h"
#include "src/code-factory.h"
+#include "src/compiler/access-builder.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/diamond.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/operation-typer.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/representation-change.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/source-position.h"
+#include "src/conversions-inl.h"
#include "src/objects.h"
#include "src/type-cache.h"
@@ -47,7 +51,10 @@ enum Phase {
// the next phase can begin.
PROPAGATE,
- // 2.) LOWER: perform lowering for all {Simplified} nodes by replacing some
+ // 2.) RETYPE: Propagate types from type feedback forwards.
+ RETYPE,
+
+ // 3.) LOWER: perform lowering for all {Simplified} nodes by replacing some
// operators for some nodes, expanding some nodes to multiple nodes, or
// removing some (redundant) nodes.
// During this phase, use the {RepresentationChanger} to insert
@@ -56,81 +63,77 @@ enum Phase {
LOWER
};
-
namespace {
-// The {UseInfo} class is used to describe a use of an input of a node.
-//
-// This information is used in two different ways, based on the phase:
-//
-// 1. During propagation, the use info is used to inform the input node
-// about what part of the input is used (we call this truncation) and what
-// is the preferred representation.
-//
-// 2. During lowering, the use info is used to properly convert the input
-// to the preferred representation. The preferred representation might be
-// insufficient to do the conversion (e.g. word32->float64 conv), so we also
-// need the signedness information to produce the correct value.
-class UseInfo {
- public:
- UseInfo(MachineRepresentation preferred, Truncation truncation)
- : preferred_(preferred), truncation_(truncation) {}
- static UseInfo TruncatingWord32() {
- return UseInfo(MachineRepresentation::kWord32, Truncation::Word32());
- }
- static UseInfo TruncatingWord64() {
- return UseInfo(MachineRepresentation::kWord64, Truncation::Word64());
- }
- static UseInfo Bool() {
- return UseInfo(MachineRepresentation::kBit, Truncation::Bool());
- }
- static UseInfo Float32() {
- return UseInfo(MachineRepresentation::kFloat32, Truncation::Float32());
- }
- static UseInfo Float64() {
- return UseInfo(MachineRepresentation::kFloat64, Truncation::Float64());
- }
- static UseInfo PointerInt() {
- return kPointerSize == 4 ? TruncatingWord32() : TruncatingWord64();
- }
- static UseInfo AnyTagged() {
- return UseInfo(MachineRepresentation::kTagged, Truncation::Any());
+MachineRepresentation MachineRepresentationFromArrayType(
+ ExternalArrayType array_type) {
+ switch (array_type) {
+ case kExternalUint8Array:
+ case kExternalUint8ClampedArray:
+ case kExternalInt8Array:
+ return MachineRepresentation::kWord8;
+ case kExternalUint16Array:
+ case kExternalInt16Array:
+ return MachineRepresentation::kWord16;
+ case kExternalUint32Array:
+ case kExternalInt32Array:
+ return MachineRepresentation::kWord32;
+ case kExternalFloat32Array:
+ return MachineRepresentation::kFloat32;
+ case kExternalFloat64Array:
+ return MachineRepresentation::kFloat64;
}
+ UNREACHABLE();
+ return MachineRepresentation::kNone;
+}
- // Undetermined representation.
- static UseInfo Any() {
- return UseInfo(MachineRepresentation::kNone, Truncation::Any());
- }
- static UseInfo None() {
- return UseInfo(MachineRepresentation::kNone, Truncation::None());
- }
- static UseInfo AnyTruncatingToBool() {
- return UseInfo(MachineRepresentation::kNone, Truncation::Bool());
+UseInfo CheckedUseInfoAsWord32FromHint(NumberOperationHint hint) {
+ switch (hint) {
+ case NumberOperationHint::kSignedSmall:
+ return UseInfo::CheckedSignedSmallAsWord32();
+ case NumberOperationHint::kSigned32:
+ return UseInfo::CheckedSigned32AsWord32();
+ case NumberOperationHint::kNumber:
+ return UseInfo::CheckedNumberAsWord32();
+ case NumberOperationHint::kNumberOrOddball:
+ return UseInfo::CheckedNumberOrOddballAsWord32();
}
+ UNREACHABLE();
+ return UseInfo::None();
+}
- MachineRepresentation preferred() const { return preferred_; }
- Truncation truncation() const { return truncation_; }
-
- private:
- MachineRepresentation preferred_;
- Truncation truncation_;
-};
-
+UseInfo CheckedUseInfoAsFloat64FromHint(NumberOperationHint hint) {
+ switch (hint) {
+ case NumberOperationHint::kSignedSmall:
+ case NumberOperationHint::kSigned32:
+ // Not used currently.
+ UNREACHABLE();
+ break;
+ case NumberOperationHint::kNumber:
+ return UseInfo::CheckedNumberAsFloat64();
+ case NumberOperationHint::kNumberOrOddball:
+ return UseInfo::CheckedNumberOrOddballAsFloat64();
+ }
+ UNREACHABLE();
+ return UseInfo::None();
+}
UseInfo TruncatingUseInfoFromRepresentation(MachineRepresentation rep) {
switch (rep) {
+ case MachineRepresentation::kTaggedSigned:
+ case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
return UseInfo::AnyTagged();
case MachineRepresentation::kFloat64:
- return UseInfo::Float64();
+ return UseInfo::TruncatingFloat64();
case MachineRepresentation::kFloat32:
- return UseInfo::Float32();
+ return UseInfo::TruncatingFloat32();
case MachineRepresentation::kWord64:
- return UseInfo::TruncatingWord64();
+ return UseInfo::TruncatingWord64();
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
case MachineRepresentation::kWord32:
- return UseInfo::TruncatingWord32();
+ return UseInfo::TruncatingWord32();
case MachineRepresentation::kBit:
return UseInfo::Bool();
case MachineRepresentation::kSimd128: // Fall through.
@@ -151,57 +154,34 @@ UseInfo UseInfoForBasePointer(const ElementAccess& access) {
return access.tag() != 0 ? UseInfo::AnyTagged() : UseInfo::PointerInt();
}
-
-#ifdef DEBUG
-// Helpers for monotonicity checking.
-bool MachineRepresentationIsSubtype(MachineRepresentation r1,
- MachineRepresentation r2) {
- switch (r1) {
- case MachineRepresentation::kNone:
- return true;
- case MachineRepresentation::kBit:
- return r2 == MachineRepresentation::kBit ||
- r2 == MachineRepresentation::kTagged;
- case MachineRepresentation::kWord8:
- return r2 == MachineRepresentation::kWord8 ||
- r2 == MachineRepresentation::kWord16 ||
- r2 == MachineRepresentation::kWord32 ||
- r2 == MachineRepresentation::kWord64 ||
- r2 == MachineRepresentation::kFloat32 ||
- r2 == MachineRepresentation::kFloat64 ||
- r2 == MachineRepresentation::kTagged;
- case MachineRepresentation::kWord16:
- return r2 == MachineRepresentation::kWord16 ||
- r2 == MachineRepresentation::kWord32 ||
- r2 == MachineRepresentation::kWord64 ||
- r2 == MachineRepresentation::kFloat32 ||
- r2 == MachineRepresentation::kFloat64 ||
- r2 == MachineRepresentation::kTagged;
- case MachineRepresentation::kWord32:
- return r2 == MachineRepresentation::kWord32 ||
- r2 == MachineRepresentation::kWord64 ||
- r2 == MachineRepresentation::kFloat64 ||
- r2 == MachineRepresentation::kTagged;
- case MachineRepresentation::kWord64:
- return r2 == MachineRepresentation::kWord64;
- case MachineRepresentation::kFloat32:
- return r2 == MachineRepresentation::kFloat32 ||
- r2 == MachineRepresentation::kFloat64 ||
- r2 == MachineRepresentation::kTagged;
- case MachineRepresentation::kFloat64:
- return r2 == MachineRepresentation::kFloat64 ||
- r2 == MachineRepresentation::kTagged;
- case MachineRepresentation::kSimd128:
- return r2 == MachineRepresentation::kSimd128 ||
- r2 == MachineRepresentation::kTagged;
- case MachineRepresentation::kTagged:
- return r2 == MachineRepresentation::kTagged;
+void ReplaceEffectControlUses(Node* node, Node* effect, Node* control) {
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsControlEdge(edge)) {
+ edge.UpdateTo(control);
+ } else if (NodeProperties::IsEffectEdge(edge)) {
+ edge.UpdateTo(effect);
+ } else {
+ DCHECK(NodeProperties::IsValueEdge(edge));
+ }
}
- UNREACHABLE();
- return false;
}
+void ChangeToPureOp(Node* node, const Operator* new_op) {
+ if (node->op()->EffectInputCount() > 0) {
+ DCHECK_LT(0, node->op()->ControlInputCount());
+ // Disconnect the node from effect and control chains.
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ ReplaceEffectControlUses(node, effect, control);
+ node->TrimInputCount(new_op->ValueInputCount());
+ } else {
+ DCHECK_EQ(0, node->op()->ControlInputCount());
+ }
+ NodeProperties::ChangeOp(node, new_op);
+}
+#ifdef DEBUG
+// Helpers for monotonicity checking.
class InputUseInfos {
public:
explicit InputUseInfos(Zone* zone) : input_use_infos_(zone) {}
@@ -220,8 +200,7 @@ class InputUseInfos {
ZoneVector<UseInfo> input_use_infos_;
static bool IsUseLessGeneral(UseInfo use1, UseInfo use2) {
- return MachineRepresentationIsSubtype(use1.preferred(), use2.preferred()) &&
- use1.truncation().IsLessGeneralThan(use2.truncation());
+ return use1.truncation().IsLessGeneralThan(use2.truncation());
}
};
@@ -233,7 +212,7 @@ class InputUseInfos {
class RepresentationSelector {
public:
// Information for each node tracked during the fixpoint.
- class NodeInfo {
+ class NodeInfo final {
public:
// Adds new use to the node. Returns true if something has changed
// and the node has to be requeued.
@@ -243,27 +222,43 @@ class RepresentationSelector {
return truncation_ != old_truncation;
}
- void set_queued(bool value) { queued_ = value; }
- bool queued() const { return queued_; }
- void set_visited() { visited_ = true; }
- bool visited() const { return visited_; }
+ void set_queued() { state_ = kQueued; }
+ void set_visited() { state_ = kVisited; }
+ void set_pushed() { state_ = kPushed; }
+ void reset_state() { state_ = kUnvisited; }
+ bool visited() const { return state_ == kVisited; }
+ bool queued() const { return state_ == kQueued; }
+ bool unvisited() const { return state_ == kUnvisited; }
Truncation truncation() const { return truncation_; }
void set_output(MachineRepresentation output) { representation_ = output; }
MachineRepresentation representation() const { return representation_; }
+ // Helpers for feedback typing.
+ void set_feedback_type(Type* type) { feedback_type_ = type; }
+ Type* feedback_type() const { return feedback_type_; }
+ void set_weakened() { weakened_ = true; }
+ bool weakened() const { return weakened_; }
+ void set_restriction_type(Type* type) { restriction_type_ = type; }
+ Type* restriction_type() const { return restriction_type_; }
+
private:
- bool queued_ = false; // Bookkeeping for the traversal.
- bool visited_ = false; // Bookkeeping for the traversal.
+ enum State : uint8_t { kUnvisited, kPushed, kVisited, kQueued };
+ State state_ = kUnvisited;
MachineRepresentation representation_ =
MachineRepresentation::kNone; // Output representation.
Truncation truncation_ = Truncation::None(); // Information about uses.
+
+ Type* restriction_type_ = Type::Any();
+ Type* feedback_type_ = nullptr;
+ bool weakened_ = false;
};
RepresentationSelector(JSGraph* jsgraph, Zone* zone,
RepresentationChanger* changer,
SourcePositionTable* source_positions)
: jsgraph_(jsgraph),
+ zone_(zone),
count_(jsgraph->graph()->NodeCount()),
info_(count_, zone),
#ifdef DEBUG
@@ -274,11 +269,258 @@ class RepresentationSelector {
phase_(PROPAGATE),
changer_(changer),
queue_(zone),
+ typing_stack_(zone),
source_positions_(source_positions),
- type_cache_(TypeCache::Get()) {
+ type_cache_(TypeCache::Get()),
+ op_typer_(jsgraph->isolate(), graph_zone()) {
+ }
+
+ // Forward propagation of types from type feedback.
+ void RunTypePropagationPhase() {
+ // Run type propagation.
+ TRACE("--{Type propagation phase}--\n");
+ phase_ = RETYPE;
+ ResetNodeInfoState();
+
+ DCHECK(typing_stack_.empty());
+ typing_stack_.push({graph()->end(), 0});
+ GetInfo(graph()->end())->set_pushed();
+ while (!typing_stack_.empty()) {
+ NodeState& current = typing_stack_.top();
+
+ // If there is an unvisited input, push it and continue.
+ bool pushed_unvisited = false;
+ while (current.input_index < current.node->InputCount()) {
+ Node* input = current.node->InputAt(current.input_index);
+ NodeInfo* input_info = GetInfo(input);
+ current.input_index++;
+ if (input_info->unvisited()) {
+ input_info->set_pushed();
+ typing_stack_.push({input, 0});
+ pushed_unvisited = true;
+ break;
+ }
+ }
+ if (pushed_unvisited) continue;
+
+ // Process the top of the stack.
+ Node* node = current.node;
+ typing_stack_.pop();
+ NodeInfo* info = GetInfo(node);
+ info->set_visited();
+ bool updated = UpdateFeedbackType(node);
+ TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
+ VisitNode(node, info->truncation(), nullptr);
+ if (updated) {
+ for (Node* const user : node->uses()) {
+ if (GetInfo(user)->visited()) {
+ GetInfo(user)->set_queued();
+ queue_.push(user);
+ }
+ }
+ }
+ }
+
+ // Process the revisit queue.
+ while (!queue_.empty()) {
+ Node* node = queue_.front();
+ queue_.pop();
+ NodeInfo* info = GetInfo(node);
+ info->set_visited();
+ bool updated = UpdateFeedbackType(node);
+ TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
+ VisitNode(node, info->truncation(), nullptr);
+ if (updated) {
+ for (Node* const user : node->uses()) {
+ if (GetInfo(user)->visited()) {
+ GetInfo(user)->set_queued();
+ queue_.push(user);
+ }
+ }
+ }
+ }
}
- void Run(SimplifiedLowering* lowering) {
+ void ResetNodeInfoState() {
+ // Clean up for the next phase.
+ for (NodeInfo& info : info_) {
+ info.reset_state();
+ }
+ }
+
+ Type* TypeOf(Node* node) {
+ Type* type = GetInfo(node)->feedback_type();
+ return type == nullptr ? NodeProperties::GetType(node) : type;
+ }
+
+ Type* FeedbackTypeOf(Node* node) {
+ Type* type = GetInfo(node)->feedback_type();
+ return type == nullptr ? Type::None() : type;
+ }
+
+ Type* TypePhi(Node* node) {
+ int arity = node->op()->ValueInputCount();
+ Type* type = FeedbackTypeOf(node->InputAt(0));
+ for (int i = 1; i < arity; ++i) {
+ type = op_typer_.Merge(type, FeedbackTypeOf(node->InputAt(i)));
+ }
+ return type;
+ }
+
+ Type* TypeSelect(Node* node) {
+ return op_typer_.Merge(FeedbackTypeOf(node->InputAt(1)),
+ FeedbackTypeOf(node->InputAt(2)));
+ }
+
+ bool UpdateFeedbackType(Node* node) {
+ if (node->op()->ValueOutputCount() == 0) return false;
+
+ NodeInfo* info = GetInfo(node);
+ Type* type = info->feedback_type();
+ Type* new_type = type;
+
+ // For any non-phi node just wait until we get all inputs typed. We only
+ // allow untyped inputs for phi nodes because phis are the only places
+ // where cycles need to be broken.
+ if (node->opcode() != IrOpcode::kPhi) {
+ for (int i = 0; i < node->op()->ValueInputCount(); i++) {
+ if (GetInfo(node->InputAt(i))->feedback_type() == nullptr) {
+ return false;
+ }
+ }
+ }
+
+ switch (node->opcode()) {
+#define DECLARE_CASE(Name) \
+ case IrOpcode::k##Name: { \
+ new_type = op_typer_.Name(FeedbackTypeOf(node->InputAt(0)), \
+ FeedbackTypeOf(node->InputAt(1))); \
+ break; \
+ }
+ SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+
+#define DECLARE_CASE(Name) \
+ case IrOpcode::k##Name: { \
+ new_type = \
+ Type::Intersect(op_typer_.Name(FeedbackTypeOf(node->InputAt(0)), \
+ FeedbackTypeOf(node->InputAt(1))), \
+ info->restriction_type(), graph_zone()); \
+ break; \
+ }
+ SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+
+#define DECLARE_CASE(Name) \
+ case IrOpcode::k##Name: { \
+ new_type = op_typer_.Name(FeedbackTypeOf(node->InputAt(0))); \
+ break; \
+ }
+ SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+
+ case IrOpcode::kPlainPrimitiveToNumber:
+ new_type = op_typer_.ToNumber(FeedbackTypeOf(node->InputAt(0)));
+ break;
+
+ case IrOpcode::kPhi: {
+ new_type = TypePhi(node);
+ if (type != nullptr) {
+ new_type = Weaken(node, type, new_type);
+ }
+ break;
+ }
+
+ case IrOpcode::kTypeGuard: {
+ new_type = op_typer_.TypeTypeGuard(node->op(),
+ FeedbackTypeOf(node->InputAt(0)));
+ break;
+ }
+
+ case IrOpcode::kSelect: {
+ new_type = TypeSelect(node);
+ break;
+ }
+
+ default:
+ // Shortcut for operations that we do not handle.
+ if (type == nullptr) {
+ GetInfo(node)->set_feedback_type(NodeProperties::GetType(node));
+ return true;
+ }
+ return false;
+ }
+ // We need to guarantee that the feedback type is a subtype of the upper
+ // bound. Naively that should hold, but weakening can actually produce
+ // a bigger type if we are unlucky with ordering of phi typing. To be
+ // really sure, just intersect the upper bound with the feedback type.
+ new_type = Type::Intersect(GetUpperBound(node), new_type, graph_zone());
+
+ if (type != nullptr && new_type->Is(type)) return false;
+ GetInfo(node)->set_feedback_type(new_type);
+ if (FLAG_trace_representation) {
+ PrintNodeFeedbackType(node);
+ }
+ return true;
+ }
+
+ void PrintNodeFeedbackType(Node* n) {
+ OFStream os(stdout);
+ os << "#" << n->id() << ":" << *n->op() << "(";
+ int j = 0;
+ for (Node* const i : n->inputs()) {
+ if (j++ > 0) os << ", ";
+ os << "#" << i->id() << ":" << i->op()->mnemonic();
+ }
+ os << ")";
+ if (NodeProperties::IsTyped(n)) {
+ os << " [Static type: ";
+ Type* static_type = NodeProperties::GetType(n);
+ static_type->PrintTo(os);
+ Type* feedback_type = GetInfo(n)->feedback_type();
+ if (feedback_type != nullptr && feedback_type != static_type) {
+ os << ", Feedback type: ";
+ feedback_type->PrintTo(os);
+ }
+ os << "]";
+ }
+ os << std::endl;
+ }
+
+ Type* Weaken(Node* node, Type* previous_type, Type* current_type) {
+ // If the types have nothing to do with integers, return the types.
+ Type* const integer = type_cache_.kInteger;
+ if (!previous_type->Maybe(integer)) {
+ return current_type;
+ }
+ DCHECK(current_type->Maybe(integer));
+
+ Type* current_integer =
+ Type::Intersect(current_type, integer, graph_zone());
+ Type* previous_integer =
+ Type::Intersect(previous_type, integer, graph_zone());
+
+ // Once we start weakening a node, we should always weaken.
+ if (!GetInfo(node)->weakened()) {
+ // Only weaken if there is range involved; we should converge quickly
+ // for all other types (the exception is a union of many constants,
+ // but we currently do not increase the number of constants in unions).
+ Type* previous = previous_integer->GetRange();
+ Type* current = current_integer->GetRange();
+ if (current == nullptr || previous == nullptr) {
+ return current_type;
+ }
+ // Range is involved => we are weakening.
+ GetInfo(node)->set_weakened();
+ }
+
+ return Type::Union(current_type,
+ op_typer_.WeakenRange(previous_integer, current_integer),
+ graph_zone());
+ }
+
+ // Backward propagation of truncations.
+ void RunTruncationPropagationPhase() {
// Run propagation phase to a fixpoint.
TRACE("--{Propagation phase}--\n");
phase_ = PROPAGATE;
@@ -288,13 +530,20 @@ class RepresentationSelector {
Node* node = queue_.front();
NodeInfo* info = GetInfo(node);
queue_.pop();
- info->set_queued(false);
- TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
+ info->set_visited();
+ TRACE(" visit #%d: %s (trunc: %s)\n", node->id(), node->op()->mnemonic(),
+ info->truncation().description());
VisitNode(node, info->truncation(), nullptr);
TRACE(" ==> output ");
PrintOutputInfo(info);
TRACE("\n");
}
+ }
+
+ void Run(SimplifiedLowering* lowering) {
+ RunTruncationPropagationPhase();
+
+ RunTypePropagationPhase();
// Run lowering and change insertion phase.
TRACE("--{Simplified lowering phase}--\n");
@@ -316,6 +565,7 @@ class RepresentationSelector {
Node* node = *i;
Node* replacement = *(++i);
node->ReplaceUses(replacement);
+ node->Kill();
// We also need to replace the node in the rest of the vector.
for (NodeVector::iterator j = i + 1; j != replacements_.end(); ++j) {
++j;
@@ -326,8 +576,7 @@ class RepresentationSelector {
void EnqueueInitial(Node* node) {
NodeInfo* info = GetInfo(node);
- info->set_visited();
- info->set_queued(true);
+ info->set_queued();
nodes_.push_back(node);
queue_.push(node);
}
@@ -345,24 +594,23 @@ class RepresentationSelector {
node_input_use_infos_[use_node->id()].SetAndCheckInput(use_node, index,
use_info);
#endif // DEBUG
- if (!info->visited()) {
+ if (info->unvisited()) {
// First visit of this node.
- info->set_visited();
- info->set_queued(true);
+ info->set_queued();
nodes_.push_back(node);
queue_.push(node);
- TRACE(" initial: ");
+ TRACE(" initial #%i: ", node->id());
info->AddUse(use_info);
PrintTruncation(info->truncation());
return;
}
- TRACE(" queue?: ");
+ TRACE(" queue #%i?: ", node->id());
PrintTruncation(info->truncation());
if (info->AddUse(use_info)) {
// New usage information for the node is available.
if (!info->queued()) {
queue_.push(node);
- info->set_queued(true);
+ info->set_queued();
TRACE(" added: ");
} else {
TRACE(" inqueue: ");
@@ -371,61 +619,72 @@ class RepresentationSelector {
}
}
- bool lower() { return phase_ == LOWER; }
+ bool lower() const { return phase_ == LOWER; }
+ bool retype() const { return phase_ == RETYPE; }
+ bool propagate() const { return phase_ == PROPAGATE; }
- void EnqueueUses(Node* node) {
- for (Edge edge : node->use_edges()) {
- if (NodeProperties::IsValueEdge(edge)) {
- Node* const user = edge.from();
- if (user->id() < count_) {
- // New type information for the node is available.
- NodeInfo* info = GetInfo(user);
- // Enqueue the node only if we are sure it is reachable from
- // the end and it has not been queued yet.
- if (info->visited() && !info->queued()) {
- queue_.push(user);
- info->set_queued(true);
- }
- }
- }
+ void SetOutput(Node* node, MachineRepresentation representation,
+ Type* restriction_type = Type::Any()) {
+ NodeInfo* const info = GetInfo(node);
+ switch (phase_) {
+ case PROPAGATE:
+ info->set_restriction_type(restriction_type);
+ break;
+ case RETYPE:
+ DCHECK(info->restriction_type()->Is(restriction_type));
+ DCHECK(restriction_type->Is(info->restriction_type()));
+ info->set_output(representation);
+ break;
+ case LOWER:
+ DCHECK_EQ(info->representation(), representation);
+ DCHECK(info->restriction_type()->Is(restriction_type));
+ DCHECK(restriction_type->Is(info->restriction_type()));
+ break;
}
}
- void SetOutput(Node* node, MachineRepresentation representation) {
- NodeInfo* info = GetInfo(node);
- DCHECK(
- MachineRepresentationIsSubtype(info->representation(), representation));
- info->set_output(representation);
+ Type* GetUpperBound(Node* node) { return NodeProperties::GetType(node); }
+
+ bool InputCannotBe(Node* node, Type* type) {
+ DCHECK_EQ(1, node->op()->ValueInputCount());
+ return !GetUpperBound(node->InputAt(0))->Maybe(type);
}
- Type* GetUpperBound(Node* node) { return NodeProperties::GetType(node); }
+ bool InputIs(Node* node, Type* type) {
+ DCHECK_EQ(1, node->op()->ValueInputCount());
+ return GetUpperBound(node->InputAt(0))->Is(type);
+ }
bool BothInputsAreSigned32(Node* node) {
- DCHECK_EQ(2, node->InputCount());
- return GetUpperBound(node->InputAt(0))->Is(Type::Signed32()) &&
- GetUpperBound(node->InputAt(1))->Is(Type::Signed32());
+ return BothInputsAre(node, Type::Signed32());
}
bool BothInputsAreUnsigned32(Node* node) {
- DCHECK_EQ(2, node->InputCount());
- return GetUpperBound(node->InputAt(0))->Is(Type::Unsigned32()) &&
- GetUpperBound(node->InputAt(1))->Is(Type::Unsigned32());
+ return BothInputsAre(node, Type::Unsigned32());
}
bool BothInputsAre(Node* node, Type* type) {
- DCHECK_EQ(2, node->InputCount());
+ DCHECK_EQ(2, node->op()->ValueInputCount());
return GetUpperBound(node->InputAt(0))->Is(type) &&
GetUpperBound(node->InputAt(1))->Is(type);
}
+ bool OneInputCannotBe(Node* node, Type* type) {
+ DCHECK_EQ(2, node->op()->ValueInputCount());
+ return !GetUpperBound(node->InputAt(0))->Maybe(type) ||
+ !GetUpperBound(node->InputAt(1))->Maybe(type);
+ }
+
void ConvertInput(Node* node, int index, UseInfo use) {
Node* input = node->InputAt(index);
// In the change phase, insert a change before the use if necessary.
- if (use.preferred() == MachineRepresentation::kNone)
+ if (use.representation() == MachineRepresentation::kNone)
return; // No input requirement on the use.
+ DCHECK_NOT_NULL(input);
NodeInfo* input_info = GetInfo(input);
MachineRepresentation input_rep = input_info->representation();
- if (input_rep != use.preferred()) {
+ if (input_rep != use.representation() ||
+ use.type_check() != TypeCheckKind::kNone) {
// Output representation doesn't match usage.
TRACE(" change: #%d:%s(@%d #%d:%s) ", node->id(), node->op()->mnemonic(),
index, input->id(), input->op()->mnemonic());
@@ -435,17 +694,21 @@ class RepresentationSelector {
PrintUseInfo(use);
TRACE("\n");
Node* n = changer_->GetRepresentationFor(
- input, input_info->representation(), GetUpperBound(input),
- use.preferred(), use.truncation());
+ input, input_info->representation(), TypeOf(input), node, use);
node->ReplaceInput(index, n);
}
}
void ProcessInput(Node* node, int index, UseInfo use) {
- if (phase_ == PROPAGATE) {
- EnqueueInput(node, index, use);
- } else {
- ConvertInput(node, index, use);
+ switch (phase_) {
+ case PROPAGATE:
+ EnqueueInput(node, index, use);
+ break;
+ case RETYPE:
+ break;
+ case LOWER:
+ ConvertInput(node, index, use);
+ break;
}
}
@@ -468,38 +731,65 @@ class RepresentationSelector {
// values {kTypeAny}.
void VisitInputs(Node* node) {
int tagged_count = node->op()->ValueInputCount() +
- OperatorProperties::GetContextInputCount(node->op());
- // Visit value and context inputs as tagged.
+ OperatorProperties::GetContextInputCount(node->op()) +
+ OperatorProperties::GetFrameStateInputCount(node->op());
+ // Visit value, context and frame state inputs as tagged.
for (int i = 0; i < tagged_count; i++) {
ProcessInput(node, i, UseInfo::AnyTagged());
}
- // Only enqueue other inputs (framestates, effects, control).
+ // Only enqueue other inputs (effects, control).
for (int i = tagged_count; i < node->InputCount(); i++) {
EnqueueInput(node, i);
}
}
+ // Helper for an unused node.
+ void VisitUnused(Node* node) {
+ int value_count = node->op()->ValueInputCount() +
+ OperatorProperties::GetContextInputCount(node->op()) +
+ OperatorProperties::GetFrameStateInputCount(node->op());
+ for (int i = 0; i < value_count; i++) {
+ ProcessInput(node, i, UseInfo::None());
+ }
+ ProcessRemainingInputs(node, value_count);
+ if (lower()) Kill(node);
+ }
+
// Helper for binops of the R x L -> O variety.
void VisitBinop(Node* node, UseInfo left_use, UseInfo right_use,
- MachineRepresentation output) {
+ MachineRepresentation output,
+ Type* restriction_type = Type::Any()) {
DCHECK_EQ(2, node->op()->ValueInputCount());
ProcessInput(node, 0, left_use);
ProcessInput(node, 1, right_use);
for (int i = 2; i < node->InputCount(); i++) {
EnqueueInput(node, i);
}
- SetOutput(node, output);
+ SetOutput(node, output, restriction_type);
}
// Helper for binops of the I x I -> O variety.
- void VisitBinop(Node* node, UseInfo input_use, MachineRepresentation output) {
- VisitBinop(node, input_use, input_use, output);
+ void VisitBinop(Node* node, UseInfo input_use, MachineRepresentation output,
+ Type* restriction_type = Type::Any()) {
+ VisitBinop(node, input_use, input_use, output, restriction_type);
+ }
+
+ void VisitSpeculativeInt32Binop(Node* node) {
+ DCHECK_EQ(2, node->op()->ValueInputCount());
+ if (BothInputsAre(node, Type::NumberOrOddball())) {
+ return VisitBinop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ }
+ NumberOperationHint hint = NumberOperationHintOf(node->op());
+ return VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+ MachineRepresentation::kWord32);
}
// Helper for unops of the I -> O variety.
void VisitUnop(Node* node, UseInfo input_use, MachineRepresentation output) {
- DCHECK_EQ(1, node->InputCount());
+ DCHECK_EQ(1, node->op()->ValueInputCount());
ProcessInput(node, 0, input_use);
+ ProcessRemainingInputs(node, 1);
SetOutput(node, output);
}
@@ -511,7 +801,8 @@ class RepresentationSelector {
// Helpers for specific types of binops.
void VisitFloat64Binop(Node* node) {
- VisitBinop(node, UseInfo::Float64(), MachineRepresentation::kFloat64);
+ VisitBinop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
}
void VisitInt32Binop(Node* node) {
VisitBinop(node, UseInfo::TruncatingWord32(),
@@ -534,7 +825,7 @@ class RepresentationSelector {
MachineRepresentation::kWord64);
}
void VisitFloat64Cmp(Node* node) {
- VisitBinop(node, UseInfo::Float64(), MachineRepresentation::kBit);
+ VisitBinop(node, UseInfo::TruncatingFloat64(), MachineRepresentation::kBit);
}
void VisitInt32Cmp(Node* node) {
VisitBinop(node, UseInfo::TruncatingWord32(), MachineRepresentation::kBit);
@@ -550,17 +841,29 @@ class RepresentationSelector {
}
// Infer representation for phi-like nodes.
- MachineRepresentation GetOutputInfoForPhi(Node* node, Truncation use) {
+ // The {node} parameter is only used to decide on the int64 representation.
+ // Once the type system supports an external pointer type, the {node}
+ // parameter can be removed.
+ MachineRepresentation GetOutputInfoForPhi(Node* node, Type* type,
+ Truncation use) {
// Compute the representation.
- Type* type = GetUpperBound(node);
if (type->Is(Type::None())) {
return MachineRepresentation::kNone;
} else if (type->Is(Type::Signed32()) || type->Is(Type::Unsigned32())) {
return MachineRepresentation::kWord32;
- } else if (use.TruncatesToWord32()) {
+ } else if (type->Is(Type::NumberOrOddball()) && use.IsUsedAsWord32()) {
return MachineRepresentation::kWord32;
} else if (type->Is(Type::Boolean())) {
return MachineRepresentation::kBit;
+ } else if (type->Is(Type::NumberOrOddball()) && use.IsUsedAsFloat64()) {
+ return MachineRepresentation::kFloat64;
+ } else if (type->Is(
+ Type::Union(Type::SignedSmall(), Type::NaN(), zone()))) {
+ // TODO(turbofan): For Phis that return either NaN or some Smi, it's
+ // beneficial to not go all the way to double, unless the uses are
+ // double uses. For tagging that just means some potentially expensive
+ // allocation code; we might want to do the same for -0 as well?
+ return MachineRepresentation::kTagged;
} else if (type->Is(Type::Number())) {
return MachineRepresentation::kFloat64;
} else if (type->Is(Type::Internal())) {
@@ -573,6 +876,7 @@ class RepresentationSelector {
MachineRepresentation::kWord64;
#ifdef DEBUG
// Check that all the inputs agree on being Word64.
+ DCHECK_EQ(IrOpcode::kPhi, node->opcode()); // This only works for phis.
for (int i = 1; i < node->op()->ValueInputCount(); i++) {
DCHECK_EQ(is_word64, GetInfo(node->InputAt(i))->representation() ==
MachineRepresentation::kWord64);
@@ -589,7 +893,8 @@ class RepresentationSelector {
SimplifiedLowering* lowering) {
ProcessInput(node, 0, UseInfo::Bool());
- MachineRepresentation output = GetOutputInfoForPhi(node, truncation);
+ MachineRepresentation output =
+ GetOutputInfoForPhi(node, TypeOf(node), truncation);
SetOutput(node, output);
if (lower()) {
@@ -610,7 +915,10 @@ class RepresentationSelector {
// Helper for handling phis.
void VisitPhi(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
- MachineRepresentation output = GetOutputInfoForPhi(node, truncation);
+ MachineRepresentation output =
+ GetOutputInfoForPhi(node, TypeOf(node), truncation);
+ // Only set the output representation if not running with type
+ // feedback. (Feedback typing will set the representation.)
SetOutput(node, output);
int values = node->op()->ValueInputCount();
@@ -622,7 +930,7 @@ class RepresentationSelector {
}
// Convert inputs to the output representation of this phi, pass the
- // truncation truncation along.
+ // truncation along.
UseInfo input_use(output, truncation);
for (int i = 0; i < node->InputCount(); i++) {
ProcessInput(node, i, i < values ? input_use : UseInfo::None());
@@ -630,32 +938,31 @@ class RepresentationSelector {
}
void VisitCall(Node* node, SimplifiedLowering* lowering) {
- const CallDescriptor* desc = OpParameter<const CallDescriptor*>(node->op());
- const MachineSignature* sig = desc->GetMachineSignature();
- int params = static_cast<int>(sig->parameter_count());
+ const CallDescriptor* desc = CallDescriptorOf(node->op());
+ int params = static_cast<int>(desc->ParameterCount());
+ int value_input_count = node->op()->ValueInputCount();
// Propagate representation information from call descriptor.
- for (int i = 0; i < node->InputCount(); i++) {
+ for (int i = 0; i < value_input_count; i++) {
if (i == 0) {
// The target of the call.
- ProcessInput(node, i, UseInfo::None());
+ ProcessInput(node, i, UseInfo::Any());
} else if ((i - 1) < params) {
ProcessInput(node, i, TruncatingUseInfoFromRepresentation(
- sig->GetParam(i - 1).representation()));
+ desc->GetInputType(i).representation()));
} else {
- ProcessInput(node, i, UseInfo::None());
+ ProcessInput(node, i, UseInfo::AnyTagged());
}
}
+ ProcessRemainingInputs(node, value_input_count);
- if (sig->return_count() > 0) {
- SetOutput(node,
- desc->GetMachineSignature()->GetReturn().representation());
+ if (desc->ReturnCount() > 0) {
+ SetOutput(node, desc->GetReturnType(0).representation());
} else {
SetOutput(node, MachineRepresentation::kTagged);
}
}
MachineSemantic DeoptValueSemanticOf(Type* type) {
- CHECK(!type->Is(Type::None()));
// We only need signedness to do deopt correctly.
if (type->Is(Type::Signed32())) {
return MachineSemantic::kInt32;
@@ -667,11 +974,11 @@ class RepresentationSelector {
}
void VisitStateValues(Node* node) {
- if (phase_ == PROPAGATE) {
+ if (propagate()) {
for (int i = 0; i < node->InputCount(); i++) {
EnqueueInput(node, i, UseInfo::Any());
}
- } else {
+ } else if (lower()) {
Zone* zone = jsgraph_->zone();
ZoneVector<MachineType>* types =
new (zone->New(sizeof(ZoneVector<MachineType>)))
@@ -680,7 +987,7 @@ class RepresentationSelector {
Node* input = node->InputAt(i);
NodeInfo* input_info = GetInfo(input);
MachineType machine_type(input_info->representation(),
- DeoptValueSemanticOf(GetUpperBound(input)));
+ DeoptValueSemanticOf(TypeOf(input)));
DCHECK(machine_type.representation() !=
MachineRepresentation::kWord32 ||
machine_type.semantic() == MachineSemantic::kInt32 ||
@@ -697,23 +1004,197 @@ class RepresentationSelector {
return changer_->Int32OperatorFor(node->opcode());
}
+ const Operator* Int32OverflowOp(Node* node) {
+ return changer_->Int32OverflowOperatorFor(node->opcode());
+ }
+
const Operator* Uint32Op(Node* node) {
return changer_->Uint32OperatorFor(node->opcode());
}
+ const Operator* Uint32OverflowOp(Node* node) {
+ return changer_->Uint32OverflowOperatorFor(node->opcode());
+ }
+
const Operator* Float64Op(Node* node) {
return changer_->Float64OperatorFor(node->opcode());
}
+ WriteBarrierKind WriteBarrierKindFor(
+ BaseTaggedness base_taggedness,
+ MachineRepresentation field_representation, Type* field_type,
+ Node* value) {
+ if (base_taggedness == kTaggedBase &&
+ field_representation == MachineRepresentation::kTagged) {
+ Type* value_type = NodeProperties::GetType(value);
+ if (field_type->Is(Type::TaggedSigned()) ||
+ value_type->Is(Type::TaggedSigned())) {
+ // Write barriers are only for stores of heap objects.
+ return kNoWriteBarrier;
+ }
+ if (field_type->Is(Type::BooleanOrNullOrUndefined()) ||
+ value_type->Is(Type::BooleanOrNullOrUndefined())) {
+ // Write barriers are not necessary when storing true, false, null or
+ // undefined, because these special oddballs are always in the root set.
+ return kNoWriteBarrier;
+ }
+ if (value_type->IsConstant() &&
+ value_type->AsConstant()->Value()->IsHeapObject()) {
+ Handle<HeapObject> value_object =
+ Handle<HeapObject>::cast(value_type->AsConstant()->Value());
+ RootIndexMap root_index_map(jsgraph_->isolate());
+ int root_index = root_index_map.Lookup(*value_object);
+ if (root_index != RootIndexMap::kInvalidRootIndex &&
+ jsgraph_->isolate()->heap()->RootIsImmortalImmovable(root_index)) {
+ // Write barriers are unnecessary for immortal immovable roots.
+ return kNoWriteBarrier;
+ }
+ if (value_object->IsMap()) {
+ // Write barriers for storing maps are cheaper.
+ return kMapWriteBarrier;
+ }
+ }
+ if (field_type->Is(Type::TaggedPointer()) ||
+ value_type->Is(Type::TaggedPointer())) {
+ // Write barriers for heap objects are cheaper.
+ return kPointerWriteBarrier;
+ }
+ NumberMatcher m(value);
+ if (m.HasValue()) {
+ if (IsSmiDouble(m.Value())) {
+ // Storing a smi doesn't need a write barrier.
+ return kNoWriteBarrier;
+ }
+ // The NumberConstant will be represented as HeapNumber.
+ return kPointerWriteBarrier;
+ }
+ return kFullWriteBarrier;
+ }
+ return kNoWriteBarrier;
+ }
+
+ WriteBarrierKind WriteBarrierKindFor(
+ BaseTaggedness base_taggedness,
+ MachineRepresentation field_representation, int field_offset,
+ Type* field_type, Node* value) {
+ if (base_taggedness == kTaggedBase &&
+ field_offset == HeapObject::kMapOffset) {
+ return kMapWriteBarrier;
+ }
+ return WriteBarrierKindFor(base_taggedness, field_representation,
+ field_type, value);
+ }
+
+ Graph* graph() const { return jsgraph_->graph(); }
+ CommonOperatorBuilder* common() const { return jsgraph_->common(); }
+ SimplifiedOperatorBuilder* simplified() const {
+ return jsgraph_->simplified();
+ }
+
+ void LowerToCheckedInt32Mul(Node* node, Truncation truncation,
+ Type* input0_type, Type* input1_type) {
+ // If one of the inputs is positive and/or truncation is being applied,
+ // there is no need to return -0.
+ CheckForMinusZeroMode mz_mode =
+ truncation.IsUsedAsWord32() ||
+ (input0_type->Is(Type::OrderedNumber()) &&
+ input0_type->Min() > 0) ||
+ (input1_type->Is(Type::OrderedNumber()) &&
+ input1_type->Min() > 0)
+ ? CheckForMinusZeroMode::kDontCheckForMinusZero
+ : CheckForMinusZeroMode::kCheckForMinusZero;
+
+ NodeProperties::ChangeOp(node, simplified()->CheckedInt32Mul(mz_mode));
+ }
+
+ void ChangeToInt32OverflowOp(Node* node) {
+ NodeProperties::ChangeOp(node, Int32OverflowOp(node));
+ }
+
+ void ChangeToUint32OverflowOp(Node* node) {
+ NodeProperties::ChangeOp(node, Uint32OverflowOp(node));
+ }
+
+ void VisitSpeculativeAdditiveOp(Node* node, Truncation truncation,
+ SimplifiedLowering* lowering) {
+ // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we can
+ // only eliminate an unused speculative number operation if we know that
+ // the inputs are PlainPrimitive, which excludes everything that's might
+ // have side effects or throws during a ToNumber conversion.
+ if (BothInputsAre(node, Type::PlainPrimitive())) {
+ if (truncation.IsUnused()) return VisitUnused(node);
+ }
+ if (BothInputsAre(node, type_cache_.kAdditiveSafeIntegerOrMinusZero) &&
+ (GetUpperBound(node)->Is(Type::Signed32()) ||
+ GetUpperBound(node)->Is(Type::Unsigned32()) ||
+ truncation.IsUsedAsWord32())) {
+ // => Int32Add/Sub
+ VisitWord32TruncatingBinop(node);
+ if (lower()) ChangeToPureOp(node, Int32Op(node));
+ return;
+ }
+
+ // Try to use type feedback.
+ NumberOperationHint hint = NumberOperationHintOf(node->op());
+
+ // Handle the case when no int32 checks on inputs are necessary
+ // (but an overflow check is needed on the output).
+ if (BothInputsAre(node, Type::Signed32()) ||
+ (BothInputsAre(node, Type::Signed32OrMinusZero()) &&
+ NodeProperties::GetType(node)->Is(type_cache_.kSafeInteger))) {
+ // If both the inputs the feedback are int32, use the overflow op.
+ if (hint == NumberOperationHint::kSignedSmall ||
+ hint == NumberOperationHint::kSigned32) {
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32, Type::Signed32());
+ if (lower()) ChangeToInt32OverflowOp(node);
+ return;
+ }
+ }
+
+ if (hint == NumberOperationHint::kSignedSmall ||
+ hint == NumberOperationHint::kSigned32) {
+ VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+ MachineRepresentation::kWord32, Type::Signed32());
+ if (lower()) ChangeToInt32OverflowOp(node);
+ return;
+ }
+
+ // default case => Float64Add/Sub
+ VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(),
+ MachineRepresentation::kFloat64, Type::Number());
+ if (lower()) {
+ ChangeToPureOp(node, Float64Op(node));
+ }
+ return;
+ }
+
// Dispatching routine for visiting the node {node} with the usage {use}.
// Depending on the operator, propagate new usage info to the inputs.
void VisitNode(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
+ // Unconditionally eliminate unused pure nodes (only relevant if there's
+ // a pure operation in between two effectful ones, where the last one
+ // is unused).
+ // Note: We must not do this for constants, as they are cached and we
+ // would thus kill the cached {node} during lowering (i.e. replace all
+ // uses with Dead), but at that point some node lowering might have
+ // already taken the constant {node} from the cache (while it was in
+ // a sane state still) and we would afterwards replace that use with
+ // Dead as well.
+ if (node->op()->ValueInputCount() > 0 &&
+ node->op()->HasProperty(Operator::kPure)) {
+ if (truncation.IsUnused()) return VisitUnused(node);
+ }
switch (node->opcode()) {
//------------------------------------------------------------------
// Common operators.
//------------------------------------------------------------------
case IrOpcode::kStart:
+ // We use Start as a terminator for the frame state chain, so even
+ // tho Start doesn't really produce a value, we have to say Tagged
+ // here, otherwise the input conversion will fail.
+ return VisitLeaf(node, MachineRepresentation::kTagged);
case IrOpcode::kDead:
return VisitLeaf(node, MachineRepresentation::kNone);
case IrOpcode::kParameter: {
@@ -742,15 +1223,15 @@ class RepresentationSelector {
ProcessInput(node, 0, UseInfo::Bool());
ProcessInput(node, 1, UseInfo::AnyTagged());
ProcessRemainingInputs(node, 2);
- break;
+ return;
case IrOpcode::kBranch:
ProcessInput(node, 0, UseInfo::Bool());
EnqueueInput(node, NodeProperties::FirstControlIndex(node));
- break;
+ return;
case IrOpcode::kSwitch:
ProcessInput(node, 0, UseInfo::TruncatingWord32());
EnqueueInput(node, NodeProperties::FirstControlIndex(node));
- break;
+ return;
case IrOpcode::kSelect:
return VisitSelect(node, truncation, lowering);
case IrOpcode::kPhi:
@@ -758,19 +1239,23 @@ class RepresentationSelector {
case IrOpcode::kCall:
return VisitCall(node, lowering);
-//------------------------------------------------------------------
-// JavaScript operators.
-//------------------------------------------------------------------
-// For now, we assume that all JS operators were too complex to lower
-// to Simplified and that they will always require tagged value inputs
-// and produce tagged value outputs.
-// TODO(turbofan): it might be possible to lower some JSOperators here,
-// but that responsibility really lies in the typed lowering phase.
-#define DEFINE_JS_CASE(x) case IrOpcode::k##x:
- JS_OP_LIST(DEFINE_JS_CASE)
-#undef DEFINE_JS_CASE
+ //------------------------------------------------------------------
+ // JavaScript operators.
+ //------------------------------------------------------------------
+ case IrOpcode::kJSToNumber: {
VisitInputs(node);
- return SetOutput(node, MachineRepresentation::kTagged);
+ // TODO(bmeurer): Optimize somewhat based on input type?
+ if (truncation.IsUsedAsWord32()) {
+ SetOutput(node, MachineRepresentation::kWord32);
+ if (lower()) lowering->DoJSToNumberTruncatesToWord32(node, this);
+ } else if (truncation.IsUsedAsFloat64()) {
+ SetOutput(node, MachineRepresentation::kFloat64);
+ if (lower()) lowering->DoJSToNumberTruncatesToFloat64(node, this);
+ } else {
+ SetOutput(node, MachineRepresentation::kTagged);
+ }
+ return;
+ }
//------------------------------------------------------------------
// Simplified operators.
@@ -792,149 +1277,474 @@ class RepresentationSelector {
ProcessInput(node, 0, UseInfo::AnyTruncatingToBool());
SetOutput(node, MachineRepresentation::kBit);
}
- break;
+ return;
}
- case IrOpcode::kBooleanToNumber: {
- if (lower()) {
- NodeInfo* input_info = GetInfo(node->InputAt(0));
- if (input_info->representation() == MachineRepresentation::kBit) {
- // BooleanToNumber(x: kRepBit) => x
- DeferReplacement(node, node->InputAt(0));
- } else {
- // BooleanToNumber(x: kRepTagged) => WordEqual(x, #true)
- node->AppendInput(jsgraph_->zone(), jsgraph_->TrueConstant());
- NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
- }
- } else {
- // No input representation requirement; adapt during lowering.
- ProcessInput(node, 0, UseInfo::AnyTruncatingToBool());
- SetOutput(node, MachineRepresentation::kWord32);
+ case IrOpcode::kNumberEqual: {
+ Type* const lhs_type = TypeOf(node->InputAt(0));
+ Type* const rhs_type = TypeOf(node->InputAt(1));
+ // Number comparisons reduce to integer comparisons for integer inputs.
+ if ((lhs_type->Is(Type::Unsigned32()) &&
+ rhs_type->Is(Type::Unsigned32())) ||
+ (lhs_type->Is(Type::Unsigned32OrMinusZeroOrNaN()) &&
+ rhs_type->Is(Type::Unsigned32OrMinusZeroOrNaN()) &&
+ OneInputCannotBe(node, type_cache_.kZeroish))) {
+ // => unsigned Int32Cmp
+ VisitUint32Cmp(node);
+ if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
+ return;
}
- break;
+ if ((lhs_type->Is(Type::Signed32()) &&
+ rhs_type->Is(Type::Signed32())) ||
+ (lhs_type->Is(Type::Signed32OrMinusZeroOrNaN()) &&
+ rhs_type->Is(Type::Signed32OrMinusZeroOrNaN()) &&
+ OneInputCannotBe(node, type_cache_.kZeroish))) {
+ // => signed Int32Cmp
+ VisitInt32Cmp(node);
+ if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
+ return;
+ }
+ // => Float64Cmp
+ VisitFloat64Cmp(node);
+ if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+ return;
}
- case IrOpcode::kNumberEqual:
case IrOpcode::kNumberLessThan:
case IrOpcode::kNumberLessThanOrEqual: {
// Number comparisons reduce to integer comparisons for integer inputs.
- if (BothInputsAreSigned32(node)) {
- // => signed Int32Cmp
- VisitInt32Cmp(node);
- if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
- } else if (BothInputsAreUnsigned32(node)) {
+ if (TypeOf(node->InputAt(0))->Is(Type::Unsigned32()) &&
+ TypeOf(node->InputAt(1))->Is(Type::Unsigned32())) {
// => unsigned Int32Cmp
VisitUint32Cmp(node);
if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
+ } else if (TypeOf(node->InputAt(0))->Is(Type::Signed32()) &&
+ TypeOf(node->InputAt(1))->Is(Type::Signed32())) {
+ // => signed Int32Cmp
+ VisitInt32Cmp(node);
+ if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
} else {
// => Float64Cmp
VisitFloat64Cmp(node);
if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
}
- break;
+ return;
+ }
+
+ case IrOpcode::kSpeculativeNumberAdd:
+ case IrOpcode::kSpeculativeNumberSubtract:
+ return VisitSpeculativeAdditiveOp(node, truncation, lowering);
+
+ case IrOpcode::kSpeculativeNumberLessThan:
+ case IrOpcode::kSpeculativeNumberLessThanOrEqual:
+ case IrOpcode::kSpeculativeNumberEqual: {
+ // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
+ // can only eliminate an unused speculative number operation if we know
+ // that the inputs are PlainPrimitive, which excludes everything that's
+ // might have side effects or throws during a ToNumber conversion.
+ if (BothInputsAre(node, Type::PlainPrimitive())) {
+ if (truncation.IsUnused()) return VisitUnused(node);
+ }
+ // Number comparisons reduce to integer comparisons for integer inputs.
+ if (TypeOf(node->InputAt(0))->Is(Type::Unsigned32()) &&
+ TypeOf(node->InputAt(1))->Is(Type::Unsigned32())) {
+ // => unsigned Int32Cmp
+ VisitUint32Cmp(node);
+ if (lower()) ChangeToPureOp(node, Uint32Op(node));
+ return;
+ } else if (TypeOf(node->InputAt(0))->Is(Type::Signed32()) &&
+ TypeOf(node->InputAt(1))->Is(Type::Signed32())) {
+ // => signed Int32Cmp
+ VisitInt32Cmp(node);
+ if (lower()) ChangeToPureOp(node, Int32Op(node));
+ return;
+ }
+ // Try to use type feedback.
+ NumberOperationHint hint = NumberOperationHintOf(node->op());
+ switch (hint) {
+ case NumberOperationHint::kSignedSmall:
+ case NumberOperationHint::kSigned32:
+ VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+ MachineRepresentation::kBit);
+ if (lower()) ChangeToPureOp(node, Int32Op(node));
+ return;
+ case NumberOperationHint::kNumber:
+ case NumberOperationHint::kNumberOrOddball:
+ VisitBinop(node, CheckedUseInfoAsFloat64FromHint(hint),
+ MachineRepresentation::kBit);
+ if (lower()) ChangeToPureOp(node, Float64Op(node));
+ return;
+ }
+ UNREACHABLE();
+ return;
}
+
case IrOpcode::kNumberAdd:
case IrOpcode::kNumberSubtract: {
- if (BothInputsAre(node, Type::Signed32()) &&
- NodeProperties::GetType(node)->Is(Type::Signed32())) {
- // int32 + int32 = int32
- // => signed Int32Add/Sub
- VisitInt32Binop(node);
- if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
- } else if (BothInputsAre(node, type_cache_.kAdditiveSafeInteger) &&
- truncation.TruncatesToWord32()) {
- // safe-int + safe-int = x (truncated to int32)
- // => signed Int32Add/Sub (truncated)
+ if (BothInputsAre(node, type_cache_.kAdditiveSafeIntegerOrMinusZero) &&
+ (GetUpperBound(node)->Is(Type::Signed32()) ||
+ GetUpperBound(node)->Is(Type::Unsigned32()) ||
+ truncation.IsUsedAsWord32())) {
+ // => Int32Add/Sub
VisitWord32TruncatingBinop(node);
- if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
+ if (lower()) ChangeToPureOp(node, Int32Op(node));
} else {
// => Float64Add/Sub
VisitFloat64Binop(node);
- if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+ if (lower()) ChangeToPureOp(node, Float64Op(node));
}
- break;
+ return;
}
- case IrOpcode::kNumberMultiply: {
- if (BothInputsAreSigned32(node)) {
- if (NodeProperties::GetType(node)->Is(Type::Signed32())) {
- // Multiply reduces to Int32Mul if the inputs and the output
- // are integers.
- VisitInt32Binop(node);
- if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
- break;
+ case IrOpcode::kSpeculativeNumberMultiply: {
+ // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
+ // can only eliminate an unused speculative number operation if we know
+ // that the inputs are PlainPrimitive, which excludes everything that's
+ // might have side effects or throws during a ToNumber conversion.
+ if (BothInputsAre(node, Type::PlainPrimitive())) {
+ if (truncation.IsUnused()) return VisitUnused(node);
+ }
+ if (BothInputsAre(node, Type::Integral32()) &&
+ (NodeProperties::GetType(node)->Is(Type::Signed32()) ||
+ NodeProperties::GetType(node)->Is(Type::Unsigned32()) ||
+ (truncation.IsUsedAsWord32() &&
+ NodeProperties::GetType(node)->Is(
+ type_cache_.kSafeIntegerOrMinusZero)))) {
+ // Multiply reduces to Int32Mul if the inputs are integers, and
+ // (a) the output is either known to be Signed32, or
+ // (b) the output is known to be Unsigned32, or
+ // (c) the uses are truncating and the result is in the safe
+ // integer range.
+ VisitWord32TruncatingBinop(node);
+ if (lower()) ChangeToPureOp(node, Int32Op(node));
+ return;
+ }
+ // Try to use type feedback.
+ NumberOperationHint hint = NumberOperationHintOf(node->op());
+ Type* input0_type = TypeOf(node->InputAt(0));
+ Type* input1_type = TypeOf(node->InputAt(1));
+
+ // Handle the case when no int32 checks on inputs are necessary
+ // (but an overflow check is needed on the output).
+ if (BothInputsAre(node, Type::Signed32())) {
+ // If both the inputs the feedback are int32, use the overflow op.
+ if (hint == NumberOperationHint::kSignedSmall ||
+ hint == NumberOperationHint::kSigned32) {
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32, Type::Signed32());
+ if (lower()) {
+ LowerToCheckedInt32Mul(node, truncation, input0_type,
+ input1_type);
+ }
+ return;
}
- if (truncation.TruncatesToWord32() &&
- NodeProperties::GetType(node)->Is(type_cache_.kSafeInteger)) {
- // Multiply reduces to Int32Mul if the inputs are integers,
- // the uses are truncating and the result is in the safe
- // integer range.
- VisitWord32TruncatingBinop(node);
- if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
- break;
+ }
+
+ if (hint == NumberOperationHint::kSignedSmall ||
+ hint == NumberOperationHint::kSigned32) {
+ VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+ MachineRepresentation::kWord32, Type::Signed32());
+ if (lower()) {
+ LowerToCheckedInt32Mul(node, truncation, input0_type, input1_type);
}
+ return;
+ }
+
+ // Checked float64 x float64 => float64
+ VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(),
+ MachineRepresentation::kFloat64, Type::Number());
+ if (lower()) ChangeToPureOp(node, Float64Op(node));
+ return;
+ }
+ case IrOpcode::kNumberMultiply: {
+ if (BothInputsAre(node, Type::Integral32()) &&
+ (NodeProperties::GetType(node)->Is(Type::Signed32()) ||
+ NodeProperties::GetType(node)->Is(Type::Unsigned32()) ||
+ (truncation.IsUsedAsWord32() &&
+ NodeProperties::GetType(node)->Is(
+ type_cache_.kSafeIntegerOrMinusZero)))) {
+ // Multiply reduces to Int32Mul if the inputs are integers, and
+ // (a) the output is either known to be Signed32, or
+ // (b) the output is known to be Unsigned32, or
+ // (c) the uses are truncating and the result is in the safe
+ // integer range.
+ VisitWord32TruncatingBinop(node);
+ if (lower()) ChangeToPureOp(node, Int32Op(node));
+ return;
}
- // => Float64Mul
+ // Number x Number => Float64Mul
VisitFloat64Binop(node);
- if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
- break;
+ if (lower()) ChangeToPureOp(node, Float64Op(node));
+ return;
}
- case IrOpcode::kNumberDivide: {
+ case IrOpcode::kSpeculativeNumberDivide: {
+ // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
+ // can only eliminate an unused speculative number operation if we know
+ // that the inputs are PlainPrimitive, which excludes everything that's
+ // might have side effects or throws during a ToNumber conversion.
+ if (BothInputsAre(node, Type::PlainPrimitive())) {
+ if (truncation.IsUnused()) return VisitUnused(node);
+ }
+ if (BothInputsAreUnsigned32(node) && truncation.IsUsedAsWord32()) {
+ // => unsigned Uint32Div
+ VisitWord32TruncatingBinop(node);
+ if (lower()) DeferReplacement(node, lowering->Uint32Div(node));
+ return;
+ }
if (BothInputsAreSigned32(node)) {
if (NodeProperties::GetType(node)->Is(Type::Signed32())) {
// => signed Int32Div
VisitInt32Binop(node);
if (lower()) DeferReplacement(node, lowering->Int32Div(node));
- break;
+ return;
}
- if (truncation.TruncatesToWord32()) {
+ if (truncation.IsUsedAsWord32()) {
// => signed Int32Div
VisitWord32TruncatingBinop(node);
if (lower()) DeferReplacement(node, lowering->Int32Div(node));
- break;
+ return;
+ }
+ }
+
+ // Try to use type feedback.
+ NumberOperationHint hint = NumberOperationHintOf(node->op());
+
+ // Handle the case when no uint32 checks on inputs are necessary
+ // (but an overflow check is needed on the output).
+ if (BothInputsAreUnsigned32(node)) {
+ if (hint == NumberOperationHint::kSignedSmall ||
+ hint == NumberOperationHint::kSigned32) {
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32, Type::Unsigned32());
+ if (lower()) ChangeToUint32OverflowOp(node);
+ return;
+ }
+ }
+
+ // Handle the case when no int32 checks on inputs are necessary
+ // (but an overflow check is needed on the output).
+ if (BothInputsAreSigned32(node)) {
+ // If both the inputs the feedback are int32, use the overflow op.
+ if (hint == NumberOperationHint::kSignedSmall ||
+ hint == NumberOperationHint::kSigned32) {
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32, Type::Signed32());
+ if (lower()) ChangeToInt32OverflowOp(node);
+ return;
+ }
+ }
+
+ if (hint == NumberOperationHint::kSignedSmall ||
+ hint == NumberOperationHint::kSigned32) {
+ // If the result is truncated, we only need to check the inputs.
+ if (truncation.IsUsedAsWord32()) {
+ VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+ MachineRepresentation::kWord32);
+ if (lower()) DeferReplacement(node, lowering->Int32Div(node));
+ } else {
+ VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+ MachineRepresentation::kWord32, Type::Signed32());
+ if (lower()) ChangeToInt32OverflowOp(node);
}
+ return;
}
- if (BothInputsAreUnsigned32(node) && truncation.TruncatesToWord32()) {
+
+ // default case => Float64Div
+ VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(),
+ MachineRepresentation::kFloat64, Type::Number());
+ if (lower()) ChangeToPureOp(node, Float64Op(node));
+ return;
+ }
+ case IrOpcode::kNumberDivide: {
+ if (BothInputsAreUnsigned32(node) && truncation.IsUsedAsWord32()) {
// => unsigned Uint32Div
VisitWord32TruncatingBinop(node);
if (lower()) DeferReplacement(node, lowering->Uint32Div(node));
- break;
+ return;
}
- // => Float64Div
- VisitFloat64Binop(node);
- if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
- break;
- }
- case IrOpcode::kNumberModulus: {
if (BothInputsAreSigned32(node)) {
if (NodeProperties::GetType(node)->Is(Type::Signed32())) {
- // => signed Int32Mod
+ // => signed Int32Div
VisitInt32Binop(node);
- if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
- break;
+ if (lower()) DeferReplacement(node, lowering->Int32Div(node));
+ return;
}
- if (truncation.TruncatesToWord32()) {
- // => signed Int32Mod
+ if (truncation.IsUsedAsWord32()) {
+ // => signed Int32Div
VisitWord32TruncatingBinop(node);
+ if (lower()) DeferReplacement(node, lowering->Int32Div(node));
+ return;
+ }
+ }
+ // Number x Number => Float64Div
+ if (BothInputsAre(node, Type::NumberOrUndefined())) {
+ VisitFloat64Binop(node);
+ if (lower()) ChangeToPureOp(node, Float64Op(node));
+ return;
+ }
+ // Checked float64 x float64 => float64
+ DCHECK_EQ(IrOpcode::kSpeculativeNumberDivide, node->opcode());
+ VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(),
+ MachineRepresentation::kFloat64, Type::Number());
+ if (lower()) ChangeToPureOp(node, Float64Op(node));
+ return;
+ }
+ case IrOpcode::kSpeculativeNumberModulus: {
+ // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
+ // can only eliminate an unused speculative number operation if we know
+ // that the inputs are PlainPrimitive, which excludes everything that's
+ // might have side effects or throws during a ToNumber conversion.
+ if (BothInputsAre(node, Type::PlainPrimitive())) {
+ if (truncation.IsUnused()) return VisitUnused(node);
+ }
+ if (BothInputsAre(node, Type::Unsigned32OrMinusZeroOrNaN()) &&
+ (truncation.IsUsedAsWord32() ||
+ NodeProperties::GetType(node)->Is(Type::Unsigned32()))) {
+ // => unsigned Uint32Mod
+ VisitWord32TruncatingBinop(node);
+ if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
+ return;
+ }
+ if (BothInputsAre(node, Type::Signed32OrMinusZeroOrNaN()) &&
+ (truncation.IsUsedAsWord32() ||
+ NodeProperties::GetType(node)->Is(Type::Signed32()))) {
+ // => signed Int32Mod
+ VisitWord32TruncatingBinop(node);
+ if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
+ return;
+ }
+
+ // Try to use type feedback.
+ NumberOperationHint hint = NumberOperationHintOf(node->op());
+
+ // Handle the case when no uint32 checks on inputs are necessary
+ // (but an overflow check is needed on the output).
+ if (BothInputsAreUnsigned32(node)) {
+ if (hint == NumberOperationHint::kSignedSmall ||
+ hint == NumberOperationHint::kSigned32) {
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32, Type::Unsigned32());
+ if (lower()) ChangeToUint32OverflowOp(node);
+ return;
+ }
+ }
+
+ // Handle the case when no int32 checks on inputs are necessary
+ // (but an overflow check is needed on the output).
+ if (BothInputsAre(node, Type::Signed32())) {
+ // If both the inputs the feedback are int32, use the overflow op.
+ if (hint == NumberOperationHint::kSignedSmall ||
+ hint == NumberOperationHint::kSigned32) {
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32, Type::Signed32());
+ if (lower()) ChangeToInt32OverflowOp(node);
+ return;
+ }
+ }
+
+ if (hint == NumberOperationHint::kSignedSmall ||
+ hint == NumberOperationHint::kSigned32) {
+ // If the result is truncated, we only need to check the inputs.
+ if (truncation.IsUsedAsWord32()) {
+ VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+ MachineRepresentation::kWord32);
if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
- break;
+ } else {
+ VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+ MachineRepresentation::kWord32, Type::Signed32());
+ if (lower()) ChangeToInt32OverflowOp(node);
}
+ return;
+ }
+
+ if (TypeOf(node->InputAt(0))->Is(Type::Unsigned32()) &&
+ TypeOf(node->InputAt(1))->Is(Type::Unsigned32()) &&
+ (truncation.IsUsedAsWord32() ||
+ NodeProperties::GetType(node)->Is(Type::Unsigned32()))) {
+ // We can only promise Float64 truncation here, as the decision is
+ // based on the feedback types of the inputs.
+ VisitBinop(node, UseInfo(MachineRepresentation::kWord32,
+ Truncation::Float64()),
+ MachineRepresentation::kWord32);
+ if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
+ return;
+ }
+ if (TypeOf(node->InputAt(0))->Is(Type::Signed32()) &&
+ TypeOf(node->InputAt(1))->Is(Type::Signed32()) &&
+ (truncation.IsUsedAsWord32() ||
+ NodeProperties::GetType(node)->Is(Type::Signed32()))) {
+ // We can only promise Float64 truncation here, as the decision is
+ // based on the feedback types of the inputs.
+ VisitBinop(node, UseInfo(MachineRepresentation::kWord32,
+ Truncation::Float64()),
+ MachineRepresentation::kWord32);
+ if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
+ return;
}
- if (BothInputsAreUnsigned32(node) && truncation.TruncatesToWord32()) {
+ // default case => Float64Mod
+ VisitBinop(node, UseInfo::CheckedNumberOrOddballAsFloat64(),
+ MachineRepresentation::kFloat64, Type::Number());
+ if (lower()) ChangeToPureOp(node, Float64Op(node));
+ return;
+ }
+ case IrOpcode::kNumberModulus: {
+ if (BothInputsAre(node, Type::Unsigned32OrMinusZeroOrNaN()) &&
+ (truncation.IsUsedAsWord32() ||
+ NodeProperties::GetType(node)->Is(Type::Unsigned32()))) {
// => unsigned Uint32Mod
VisitWord32TruncatingBinop(node);
if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
- break;
+ return;
+ }
+ if (BothInputsAre(node, Type::Signed32OrMinusZeroOrNaN()) &&
+ (truncation.IsUsedAsWord32() ||
+ NodeProperties::GetType(node)->Is(Type::Signed32()))) {
+ // => signed Int32Mod
+ VisitWord32TruncatingBinop(node);
+ if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
+ return;
+ }
+ if (TypeOf(node->InputAt(0))->Is(Type::Unsigned32()) &&
+ TypeOf(node->InputAt(1))->Is(Type::Unsigned32()) &&
+ (truncation.IsUsedAsWord32() ||
+ NodeProperties::GetType(node)->Is(Type::Unsigned32()))) {
+ // We can only promise Float64 truncation here, as the decision is
+ // based on the feedback types of the inputs.
+ VisitBinop(node, UseInfo(MachineRepresentation::kWord32,
+ Truncation::Float64()),
+ MachineRepresentation::kWord32);
+ if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
+ return;
+ }
+ if (TypeOf(node->InputAt(0))->Is(Type::Signed32()) &&
+ TypeOf(node->InputAt(1))->Is(Type::Signed32()) &&
+ (truncation.IsUsedAsWord32() ||
+ NodeProperties::GetType(node)->Is(Type::Signed32()))) {
+ // We can only promise Float64 truncation here, as the decision is
+ // based on the feedback types of the inputs.
+ VisitBinop(node, UseInfo(MachineRepresentation::kWord32,
+ Truncation::Float64()),
+ MachineRepresentation::kWord32);
+ if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
+ return;
}
- // => Float64Mod
+ // default case => Float64Mod
VisitFloat64Binop(node);
- if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
- break;
+ if (lower()) ChangeToPureOp(node, Float64Op(node));
+ return;
}
case IrOpcode::kNumberBitwiseOr:
case IrOpcode::kNumberBitwiseXor:
case IrOpcode::kNumberBitwiseAnd: {
VisitInt32Binop(node);
if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
- break;
+ return;
}
+ case IrOpcode::kSpeculativeNumberBitwiseOr:
+ case IrOpcode::kSpeculativeNumberBitwiseXor:
+ case IrOpcode::kSpeculativeNumberBitwiseAnd:
+ VisitSpeculativeInt32Binop(node);
+ if (lower()) {
+ ChangeToPureOp(node, Int32Op(node));
+ }
+ return;
case IrOpcode::kNumberShiftLeft: {
Type* rhs_type = GetUpperBound(node->InputAt(1));
VisitBinop(node, UseInfo::TruncatingWord32(),
@@ -942,7 +1752,34 @@ class RepresentationSelector {
if (lower()) {
lowering->DoShift(node, lowering->machine()->Word32Shl(), rhs_type);
}
- break;
+ return;
+ }
+ case IrOpcode::kSpeculativeNumberShiftLeft: {
+ // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
+ // can only eliminate an unused speculative number operation if we know
+ // that the inputs are PlainPrimitive, which excludes everything that's
+ // might have side effects or throws during a ToNumber conversion.
+ if (BothInputsAre(node, Type::PlainPrimitive())) {
+ if (truncation.IsUnused()) return VisitUnused(node);
+ }
+ if (BothInputsAre(node, Type::NumberOrOddball())) {
+ Type* rhs_type = GetUpperBound(node->InputAt(1));
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower()) {
+ lowering->DoShift(node, lowering->machine()->Word32Shl(), rhs_type);
+ }
+ return;
+ }
+ NumberOperationHint hint = NumberOperationHintOf(node->op());
+ Type* rhs_type = GetUpperBound(node->InputAt(1));
+ VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+ MachineRepresentation::kWord32, Type::Signed32());
+ if (lower()) {
+ lowering->DoShift(node, lowering->machine()->Word32Shl(), rhs_type);
+ }
+ return;
}
case IrOpcode::kNumberShiftRight: {
Type* rhs_type = GetUpperBound(node->InputAt(1));
@@ -951,7 +1788,34 @@ class RepresentationSelector {
if (lower()) {
lowering->DoShift(node, lowering->machine()->Word32Sar(), rhs_type);
}
- break;
+ return;
+ }
+ case IrOpcode::kSpeculativeNumberShiftRight: {
+ // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
+ // can only eliminate an unused speculative number operation if we know
+ // that the inputs are PlainPrimitive, which excludes everything that's
+ // might have side effects or throws during a ToNumber conversion.
+ if (BothInputsAre(node, Type::PlainPrimitive())) {
+ if (truncation.IsUnused()) return VisitUnused(node);
+ }
+ if (BothInputsAre(node, Type::NumberOrOddball())) {
+ Type* rhs_type = GetUpperBound(node->InputAt(1));
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower()) {
+ lowering->DoShift(node, lowering->machine()->Word32Sar(), rhs_type);
+ }
+ return;
+ }
+ NumberOperationHint hint = NumberOperationHintOf(node->op());
+ Type* rhs_type = GetUpperBound(node->InputAt(1));
+ VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+ MachineRepresentation::kWord32, Type::Signed32());
+ if (lower()) {
+ lowering->DoShift(node, lowering->machine()->Word32Sar(), rhs_type);
+ }
+ return;
}
case IrOpcode::kNumberShiftRightLogical: {
Type* rhs_type = GetUpperBound(node->InputAt(1));
@@ -960,98 +1824,211 @@ class RepresentationSelector {
if (lower()) {
lowering->DoShift(node, lowering->machine()->Word32Shr(), rhs_type);
}
- break;
+ return;
}
- case IrOpcode::kNumberImul: {
- VisitBinop(node, UseInfo::TruncatingWord32(),
- UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
- if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
- break;
+ case IrOpcode::kSpeculativeNumberShiftRightLogical: {
+ // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
+ // can only eliminate an unused speculative number operation if we know
+ // that the inputs are PlainPrimitive, which excludes everything that's
+ // might have side effects or throws during a ToNumber conversion.
+ if (BothInputsAre(node, Type::PlainPrimitive())) {
+ if (truncation.IsUnused()) return VisitUnused(node);
+ }
+ if (BothInputsAre(node, Type::NumberOrOddball())) {
+ Type* rhs_type = GetUpperBound(node->InputAt(1));
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower()) {
+ lowering->DoShift(node, lowering->machine()->Word32Shr(), rhs_type);
+ }
+ return;
+ }
+ NumberOperationHint hint = NumberOperationHintOf(node->op());
+ Type* rhs_type = GetUpperBound(node->InputAt(1));
+ VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+ MachineRepresentation::kWord32, Type::Unsigned32());
+ if (lower()) {
+ lowering->DoShift(node, lowering->machine()->Word32Shr(), rhs_type);
+ }
+ return;
+ }
+ case IrOpcode::kNumberAbs: {
+ if (TypeOf(node->InputAt(0))->Is(Type::Unsigned32())) {
+ VisitUnop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else if (TypeOf(node->InputAt(0))->Is(Type::Signed32())) {
+ VisitUnop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower()) DeferReplacement(node, lowering->Int32Abs(node));
+ } else if (TypeOf(node->InputAt(0))
+ ->Is(type_cache_.kPositiveIntegerOrMinusZeroOrNaN)) {
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else {
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+ }
+ return;
}
case IrOpcode::kNumberClz32: {
VisitUnop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
- break;
+ return;
}
- case IrOpcode::kNumberCeil: {
- VisitUnop(node, UseInfo::Float64(), MachineRepresentation::kFloat64);
- if (lower()) DeferReplacement(node, lowering->Float64Ceil(node));
- break;
+ case IrOpcode::kNumberImul: {
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ UseInfo::TruncatingWord32(), MachineRepresentation::kWord32);
+ if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
+ return;
}
- case IrOpcode::kNumberFloor: {
- VisitUnop(node, UseInfo::Float64(), MachineRepresentation::kFloat64);
- if (lower()) DeferReplacement(node, lowering->Float64Floor(node));
- break;
+ case IrOpcode::kNumberFround: {
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat32);
+ if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+ return;
+ }
+ case IrOpcode::kNumberMax: {
+ // TODO(turbofan): We should consider feedback types here as well.
+ if (BothInputsAreUnsigned32(node)) {
+ VisitUint32Binop(node);
+ if (lower()) {
+ lowering->DoMax(node, lowering->machine()->Uint32LessThan(),
+ MachineRepresentation::kWord32);
+ }
+ } else if (BothInputsAreSigned32(node)) {
+ VisitInt32Binop(node);
+ if (lower()) {
+ lowering->DoMax(node, lowering->machine()->Int32LessThan(),
+ MachineRepresentation::kWord32);
+ }
+ } else if (BothInputsAre(node, Type::PlainNumber())) {
+ VisitFloat64Binop(node);
+ if (lower()) {
+ lowering->DoMax(node, lowering->machine()->Float64LessThan(),
+ MachineRepresentation::kFloat64);
+ }
+ } else {
+ VisitFloat64Binop(node);
+ if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+ }
+ return;
+ }
+ case IrOpcode::kNumberMin: {
+ // TODO(turbofan): We should consider feedback types here as well.
+ if (BothInputsAreUnsigned32(node)) {
+ VisitUint32Binop(node);
+ if (lower()) {
+ lowering->DoMin(node, lowering->machine()->Uint32LessThan(),
+ MachineRepresentation::kWord32);
+ }
+ } else if (BothInputsAreSigned32(node)) {
+ VisitInt32Binop(node);
+ if (lower()) {
+ lowering->DoMin(node, lowering->machine()->Int32LessThan(),
+ MachineRepresentation::kWord32);
+ }
+ } else if (BothInputsAre(node, Type::PlainNumber())) {
+ VisitFloat64Binop(node);
+ if (lower()) {
+ lowering->DoMin(node, lowering->machine()->Float64LessThan(),
+ MachineRepresentation::kFloat64);
+ }
+ } else {
+ VisitFloat64Binop(node);
+ if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+ }
+ return;
+ }
+ case IrOpcode::kNumberAtan2:
+ case IrOpcode::kNumberPow: {
+ VisitBinop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+ return;
+ }
+ case IrOpcode::kNumberAcos:
+ case IrOpcode::kNumberAcosh:
+ case IrOpcode::kNumberAsin:
+ case IrOpcode::kNumberAsinh:
+ case IrOpcode::kNumberAtan:
+ case IrOpcode::kNumberAtanh:
+ case IrOpcode::kNumberCeil:
+ case IrOpcode::kNumberCos:
+ case IrOpcode::kNumberCosh:
+ case IrOpcode::kNumberExp:
+ case IrOpcode::kNumberExpm1:
+ case IrOpcode::kNumberFloor:
+ case IrOpcode::kNumberLog:
+ case IrOpcode::kNumberLog1p:
+ case IrOpcode::kNumberLog2:
+ case IrOpcode::kNumberLog10:
+ case IrOpcode::kNumberCbrt:
+ case IrOpcode::kNumberSin:
+ case IrOpcode::kNumberSinh:
+ case IrOpcode::kNumberTan:
+ case IrOpcode::kNumberTanh:
+ case IrOpcode::kNumberTrunc: {
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+ return;
}
case IrOpcode::kNumberRound: {
- VisitUnop(node, UseInfo::Float64(), MachineRepresentation::kFloat64);
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
if (lower()) DeferReplacement(node, lowering->Float64Round(node));
- break;
+ return;
}
- case IrOpcode::kNumberTrunc: {
- VisitUnop(node, UseInfo::Float64(), MachineRepresentation::kFloat64);
- if (lower()) DeferReplacement(node, lowering->Float64Trunc(node));
- break;
+ case IrOpcode::kNumberSign: {
+ if (InputIs(node, Type::Signed32())) {
+ VisitUnop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower()) DeferReplacement(node, lowering->Int32Sign(node));
+ } else {
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower()) DeferReplacement(node, lowering->Float64Sign(node));
+ }
+ return;
+ }
+ case IrOpcode::kNumberSqrt: {
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+ return;
}
case IrOpcode::kNumberToInt32: {
// Just change representation if necessary.
VisitUnop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
if (lower()) DeferReplacement(node, node->InputAt(0));
- break;
+ return;
}
case IrOpcode::kNumberToUint32: {
// Just change representation if necessary.
VisitUnop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
if (lower()) DeferReplacement(node, node->InputAt(0));
- break;
- }
- case IrOpcode::kNumberIsHoleNaN: {
- VisitUnop(node, UseInfo::Float64(), MachineRepresentation::kBit);
- if (lower()) {
- // NumberIsHoleNaN(x) => Word32Equal(Float64ExtractLowWord32(x),
- // #HoleNaNLower32)
- node->ReplaceInput(0,
- jsgraph_->graph()->NewNode(
- lowering->machine()->Float64ExtractLowWord32(),
- node->InputAt(0)));
- node->AppendInput(jsgraph_->zone(),
- jsgraph_->Int32Constant(kHoleNanLower32));
- NodeProperties::ChangeOp(node, jsgraph_->machine()->Word32Equal());
- }
- break;
- }
- case IrOpcode::kPlainPrimitiveToNumber: {
- VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
- if (lower()) {
- // PlainPrimitiveToNumber(x) => Call(ToNumberStub, x, no-context)
- Operator::Properties properties = node->op()->properties();
- Callable callable = CodeFactory::ToNumber(jsgraph_->isolate());
- CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- jsgraph_->isolate(), jsgraph_->zone(), callable.descriptor(), 0,
- flags, properties);
- node->InsertInput(jsgraph_->zone(), 0,
- jsgraph_->HeapConstant(callable.code()));
- node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
- NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
- }
- break;
+ return;
}
case IrOpcode::kReferenceEqual: {
VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
if (lower()) {
NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
}
- break;
+ return;
}
case IrOpcode::kStringEqual: {
VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
if (lower()) {
// StringEqual(x, y) => Call(StringEqualStub, x, y, no-context)
- Operator::Properties properties = node->op()->properties();
+ Operator::Properties properties =
+ Operator::kCommutative | Operator::kEliminatable;
Callable callable = CodeFactory::StringEqual(jsgraph_->isolate());
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
@@ -1059,16 +2036,17 @@ class RepresentationSelector {
flags, properties);
node->InsertInput(jsgraph_->zone(), 0,
jsgraph_->HeapConstant(callable.code()));
- node->InsertInput(jsgraph_->zone(), 3, jsgraph_->NoContextConstant());
+ node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
+ node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
}
- break;
+ return;
}
case IrOpcode::kStringLessThan: {
VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
if (lower()) {
// StringLessThan(x, y) => Call(StringLessThanStub, x, y, no-context)
- Operator::Properties properties = node->op()->properties();
+ Operator::Properties properties = Operator::kEliminatable;
Callable callable = CodeFactory::StringLessThan(jsgraph_->isolate());
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
@@ -1076,17 +2054,18 @@ class RepresentationSelector {
flags, properties);
node->InsertInput(jsgraph_->zone(), 0,
jsgraph_->HeapConstant(callable.code()));
- node->InsertInput(jsgraph_->zone(), 3, jsgraph_->NoContextConstant());
+ node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
+ node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
}
- break;
+ return;
}
case IrOpcode::kStringLessThanOrEqual: {
VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
if (lower()) {
// StringLessThanOrEqual(x, y)
// => Call(StringLessThanOrEqualStub, x, y, no-context)
- Operator::Properties properties = node->op()->properties();
+ Operator::Properties properties = Operator::kEliminatable;
Callable callable =
CodeFactory::StringLessThanOrEqual(jsgraph_->isolate());
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
@@ -1095,51 +2074,132 @@ class RepresentationSelector {
flags, properties);
node->InsertInput(jsgraph_->zone(), 0,
jsgraph_->HeapConstant(callable.code()));
- node->InsertInput(jsgraph_->zone(), 3, jsgraph_->NoContextConstant());
+ node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
+ node->AppendInput(jsgraph_->zone(), jsgraph_->graph()->start());
NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
}
- break;
+ return;
}
- case IrOpcode::kStringToNumber: {
- VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+ case IrOpcode::kStringCharCodeAt: {
+ VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ return;
+ }
+ case IrOpcode::kStringFromCharCode: {
+ VisitUnop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kTagged);
+ return;
+ }
+
+ case IrOpcode::kCheckBounds: {
+ Type* index_type = TypeOf(node->InputAt(0));
+ if (index_type->Is(Type::Unsigned32())) {
+ VisitBinop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ } else {
+ VisitBinop(node, UseInfo::CheckedSigned32AsWord32(),
+ UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ }
if (lower()) {
- // StringToNumber(x) => Call(StringToNumberStub, x, no-context)
- Operator::Properties properties = node->op()->properties();
- Callable callable = CodeFactory::StringToNumber(jsgraph_->isolate());
- CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- jsgraph_->isolate(), jsgraph_->zone(), callable.descriptor(), 0,
- flags, properties);
- node->InsertInput(jsgraph_->zone(), 0,
- jsgraph_->HeapConstant(callable.code()));
- node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
- NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
+ // The bounds check is redundant if we already know that
+ // the index is within the bounds of [0.0, length[.
+ if (index_type->Is(NodeProperties::GetType(node))) {
+ DeferReplacement(node, node->InputAt(0));
+ }
}
- break;
+ return;
+ }
+ case IrOpcode::kCheckIf: {
+ ProcessInput(node, 0, UseInfo::Bool());
+ ProcessRemainingInputs(node, 1);
+ SetOutput(node, MachineRepresentation::kNone);
+ return;
+ }
+ case IrOpcode::kCheckNumber: {
+ if (InputIs(node, Type::Number())) {
+ if (truncation.IsUsedAsWord32()) {
+ VisitUnop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ } else {
+ // TODO(jarin,bmeurer): We need to go to Tagged here, because
+ // otherwise we cannot distinguish the hole NaN (which might need to
+ // be treated as undefined). We should have a dedicated Type for
+ // that at some point, and maybe even a dedicated truncation.
+ VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTagged);
+ }
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else {
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+ }
+ return;
+ }
+ case IrOpcode::kCheckString: {
+ if (InputIs(node, Type::String())) {
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else {
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+ }
+ return;
+ }
+ case IrOpcode::kCheckTaggedPointer: {
+ if (InputCannotBe(node, Type::SignedSmall())) {
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else {
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+ }
+ return;
}
+ case IrOpcode::kCheckTaggedSigned: {
+ if (SmiValuesAre32Bits() && truncation.IsUsedAsWord32()) {
+ VisitUnop(node, UseInfo::CheckedSignedSmallAsWord32(),
+ MachineRepresentation::kWord32);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else {
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+ }
+ return;
+ }
+
case IrOpcode::kAllocate: {
- ProcessInput(node, 0, UseInfo::AnyTagged());
+ ProcessInput(node, 0, UseInfo::TruncatingWord32());
ProcessRemainingInputs(node, 1);
SetOutput(node, MachineRepresentation::kTagged);
- break;
+ return;
}
case IrOpcode::kLoadField: {
+ if (truncation.IsUnused()) return VisitUnused(node);
FieldAccess access = FieldAccessOf(node->op());
- ProcessInput(node, 0, UseInfoForBasePointer(access));
- ProcessRemainingInputs(node, 1);
- SetOutput(node, access.machine_type.representation());
- break;
+ MachineRepresentation const representation =
+ access.machine_type.representation();
+ // TODO(bmeurer): Introduce an appropriate tagged-signed machine rep.
+ VisitUnop(node, UseInfoForBasePointer(access), representation);
+ return;
}
case IrOpcode::kStoreField: {
FieldAccess access = FieldAccessOf(node->op());
+ WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
+ access.base_is_tagged, access.machine_type.representation(),
+ access.offset, access.type, node->InputAt(1));
ProcessInput(node, 0, UseInfoForBasePointer(access));
ProcessInput(node, 1, TruncatingUseInfoFromRepresentation(
access.machine_type.representation()));
ProcessRemainingInputs(node, 2);
SetOutput(node, MachineRepresentation::kNone);
- break;
+ if (lower()) {
+ if (write_barrier_kind < access.write_barrier_kind) {
+ access.write_barrier_kind = write_barrier_kind;
+ NodeProperties::ChangeOp(
+ node, jsgraph_->simplified()->StoreField(access));
+ }
+ }
+ return;
}
case IrOpcode::kLoadBuffer: {
+ if (truncation.IsUnused()) return VisitUnused(node);
BufferAccess access = BufferAccessOf(node->op());
ProcessInput(node, 0, UseInfo::PointerInt()); // buffer
ProcessInput(node, 1, UseInfo::TruncatingWord32()); // offset
@@ -1147,8 +2207,8 @@ class RepresentationSelector {
ProcessRemainingInputs(node, 3);
MachineRepresentation output;
- if (truncation.TruncatesUndefinedToZeroOrNaN()) {
- if (truncation.TruncatesNaNToZero()) {
+ if (truncation.IdentifiesUndefinedAndNaNAndZero()) {
+ if (truncation.IdentifiesNaNAndZero()) {
// If undefined is truncated to a non-NaN number, we can use
// the load's representation.
output = access.machine_type().representation();
@@ -1160,25 +2220,17 @@ class RepresentationSelector {
MachineRepresentation::kFloat32) {
output = access.machine_type().representation();
} else {
- if (access.machine_type().representation() !=
- MachineRepresentation::kFloat64) {
- // TODO(bmeurer): See comment on abort_compilation_.
- if (lower()) lowering->abort_compilation_ = true;
- }
output = MachineRepresentation::kFloat64;
}
}
} else {
- // TODO(bmeurer): See comment on abort_compilation_.
- if (lower()) lowering->abort_compilation_ = true;
-
// If undefined is not truncated away, we need to have the tagged
// representation.
output = MachineRepresentation::kTagged;
}
SetOutput(node, output);
if (lower()) lowering->DoLoadBuffer(node, output, changer_);
- break;
+ return;
}
case IrOpcode::kStoreBuffer: {
BufferAccess access = BufferAccessOf(node->op());
@@ -1191,18 +2243,21 @@ class RepresentationSelector {
ProcessRemainingInputs(node, 4);
SetOutput(node, MachineRepresentation::kNone);
if (lower()) lowering->DoStoreBuffer(node);
- break;
+ return;
}
case IrOpcode::kLoadElement: {
+ if (truncation.IsUnused()) return VisitUnused(node);
ElementAccess access = ElementAccessOf(node->op());
- ProcessInput(node, 0, UseInfoForBasePointer(access)); // base
- ProcessInput(node, 1, UseInfo::TruncatingWord32()); // index
- ProcessRemainingInputs(node, 2);
- SetOutput(node, access.machine_type.representation());
- break;
+ VisitBinop(node, UseInfoForBasePointer(access),
+ UseInfo::TruncatingWord32(),
+ access.machine_type.representation());
+ return;
}
case IrOpcode::kStoreElement: {
ElementAccess access = ElementAccessOf(node->op());
+ WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
+ access.base_is_tagged, access.machine_type.representation(),
+ access.type, node->InputAt(2));
ProcessInput(node, 0, UseInfoForBasePointer(access)); // base
ProcessInput(node, 1, UseInfo::TruncatingWord32()); // index
ProcessInput(node, 2,
@@ -1210,15 +2265,142 @@ class RepresentationSelector {
access.machine_type.representation())); // value
ProcessRemainingInputs(node, 3);
SetOutput(node, MachineRepresentation::kNone);
- break;
+ if (lower()) {
+ if (write_barrier_kind < access.write_barrier_kind) {
+ access.write_barrier_kind = write_barrier_kind;
+ NodeProperties::ChangeOp(
+ node, jsgraph_->simplified()->StoreElement(access));
+ }
+ }
+ return;
+ }
+ case IrOpcode::kLoadTypedElement: {
+ MachineRepresentation const rep =
+ MachineRepresentationFromArrayType(ExternalArrayTypeOf(node->op()));
+ ProcessInput(node, 0, UseInfo::AnyTagged()); // buffer
+ ProcessInput(node, 1, UseInfo::AnyTagged()); // base pointer
+ ProcessInput(node, 2, UseInfo::PointerInt()); // external pointer
+ ProcessInput(node, 3, UseInfo::TruncatingWord32()); // index
+ ProcessRemainingInputs(node, 4);
+ SetOutput(node, rep);
+ return;
+ }
+ case IrOpcode::kStoreTypedElement: {
+ MachineRepresentation const rep =
+ MachineRepresentationFromArrayType(ExternalArrayTypeOf(node->op()));
+ ProcessInput(node, 0, UseInfo::AnyTagged()); // buffer
+ ProcessInput(node, 1, UseInfo::AnyTagged()); // base pointer
+ ProcessInput(node, 2, UseInfo::PointerInt()); // external pointer
+ ProcessInput(node, 3, UseInfo::TruncatingWord32()); // index
+ ProcessInput(node, 4,
+ TruncatingUseInfoFromRepresentation(rep)); // value
+ ProcessRemainingInputs(node, 5);
+ SetOutput(node, MachineRepresentation::kNone);
+ return;
+ }
+ case IrOpcode::kPlainPrimitiveToNumber: {
+ if (InputIs(node, Type::Boolean())) {
+ VisitUnop(node, UseInfo::Bool(), MachineRepresentation::kWord32);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else if (InputIs(node, Type::String())) {
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+ if (lower()) lowering->DoStringToNumber(node);
+ } else if (truncation.IsUsedAsWord32()) {
+ if (InputIs(node, Type::NumberOrOddball())) {
+ VisitUnop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else {
+ VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kWord32);
+ if (lower()) {
+ NodeProperties::ChangeOp(node,
+ simplified()->PlainPrimitiveToWord32());
+ }
+ }
+ } else if (truncation.IsUsedAsFloat64()) {
+ if (InputIs(node, Type::NumberOrOddball())) {
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else {
+ VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kFloat64);
+ if (lower()) {
+ NodeProperties::ChangeOp(node,
+ simplified()->PlainPrimitiveToFloat64());
+ }
+ }
+ } else {
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+ }
+ return;
}
+ case IrOpcode::kObjectIsCallable:
case IrOpcode::kObjectIsNumber:
case IrOpcode::kObjectIsReceiver:
case IrOpcode::kObjectIsSmi:
+ case IrOpcode::kObjectIsString:
case IrOpcode::kObjectIsUndetectable: {
ProcessInput(node, 0, UseInfo::AnyTagged());
SetOutput(node, MachineRepresentation::kBit);
- break;
+ return;
+ }
+ case IrOpcode::kCheckFloat64Hole: {
+ if (truncation.IsUnused()) return VisitUnused(node);
+ CheckFloat64HoleMode mode = CheckFloat64HoleModeOf(node->op());
+ ProcessInput(node, 0, UseInfo::TruncatingFloat64());
+ ProcessRemainingInputs(node, 1);
+ SetOutput(node, MachineRepresentation::kFloat64);
+ if (truncation.IsUsedAsFloat64() &&
+ mode == CheckFloat64HoleMode::kAllowReturnHole) {
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ }
+ return;
+ }
+ case IrOpcode::kCheckTaggedHole: {
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+ return;
+ }
+ case IrOpcode::kConvertTaggedHoleToUndefined: {
+ if (InputIs(node, Type::NumberOrOddball()) &&
+ truncation.IsUsedAsWord32()) {
+ // Propagate the Word32 truncation.
+ VisitUnop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else if (InputIs(node, Type::NumberOrOddball()) &&
+ truncation.IsUsedAsFloat64()) {
+ // Propagate the Float64 truncation.
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else if (InputIs(node, Type::NonInternal())) {
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else {
+ // TODO(turbofan): Add a (Tagged) truncation that identifies hole
+ // and undefined, i.e. for a[i] === obj cases.
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+ }
+ return;
+ }
+ case IrOpcode::kCheckMaps:
+ case IrOpcode::kTransitionElementsKind: {
+ VisitInputs(node);
+ return SetOutput(node, MachineRepresentation::kNone);
+ }
+ case IrOpcode::kEnsureWritableFastElements:
+ return VisitBinop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTagged);
+ case IrOpcode::kMaybeGrowFastElements: {
+ ProcessInput(node, 0, UseInfo::AnyTagged()); // object
+ ProcessInput(node, 1, UseInfo::AnyTagged()); // elements
+ ProcessInput(node, 2, UseInfo::TruncatingWord32()); // index
+ ProcessInput(node, 3, UseInfo::TruncatingWord32()); // length
+ ProcessRemainingInputs(node, 4);
+ SetOutput(node, MachineRepresentation::kTagged);
+ return;
}
//------------------------------------------------------------------
@@ -1231,8 +2413,7 @@ class RepresentationSelector {
ProcessInput(node, 0, UseInfo::AnyTagged()); // tagged pointer
ProcessInput(node, 1, UseInfo::PointerInt()); // index
ProcessRemainingInputs(node, 2);
- SetOutput(node, rep.representation());
- break;
+ return SetOutput(node, rep.representation());
}
case IrOpcode::kStore: {
// TODO(jarin) Eventually, we should get rid of all machine stores
@@ -1243,8 +2424,7 @@ class RepresentationSelector {
ProcessInput(node, 2,
TruncatingUseInfoFromRepresentation(rep.representation()));
ProcessRemainingInputs(node, 3);
- SetOutput(node, MachineRepresentation::kNone);
- break;
+ return SetOutput(node, MachineRepresentation::kNone);
}
case IrOpcode::kWord32Shr:
// We output unsigned int32 for shift right because JavaScript.
@@ -1323,15 +2503,12 @@ class RepresentationSelector {
return VisitUnop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord64);
case IrOpcode::kTruncateFloat64ToFloat32:
- return VisitUnop(node, UseInfo::Float64(),
+ return VisitUnop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat32);
- case IrOpcode::kTruncateFloat64ToInt32:
- return VisitUnop(node, UseInfo::Float64(),
+ case IrOpcode::kTruncateFloat64ToWord32:
+ return VisitUnop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kWord32);
- case IrOpcode::kChangeFloat32ToFloat64:
- return VisitUnop(node, UseInfo::Float32(),
- MachineRepresentation::kFloat64);
case IrOpcode::kChangeInt32ToFloat64:
return VisitUnop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kFloat64);
@@ -1351,7 +2528,10 @@ class RepresentationSelector {
case IrOpcode::kFloat64RoundTruncate:
case IrOpcode::kFloat64RoundTiesAway:
case IrOpcode::kFloat64RoundUp:
- return VisitUnop(node, UseInfo::Float64(),
+ return VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ case IrOpcode::kFloat64SilenceNaN:
+ return VisitUnop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
case IrOpcode::kFloat64Equal:
case IrOpcode::kFloat64LessThan:
@@ -1359,19 +2539,35 @@ class RepresentationSelector {
return VisitFloat64Cmp(node);
case IrOpcode::kFloat64ExtractLowWord32:
case IrOpcode::kFloat64ExtractHighWord32:
- return VisitUnop(node, UseInfo::Float64(),
+ return VisitUnop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kWord32);
case IrOpcode::kFloat64InsertLowWord32:
case IrOpcode::kFloat64InsertHighWord32:
- return VisitBinop(node, UseInfo::Float64(), UseInfo::TruncatingWord32(),
+ return VisitBinop(node, UseInfo::TruncatingFloat64(),
+ UseInfo::TruncatingWord32(),
MachineRepresentation::kFloat64);
+ case IrOpcode::kNumberSilenceNaN:
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
+ return;
case IrOpcode::kLoadStackPointer:
case IrOpcode::kLoadFramePointer:
case IrOpcode::kLoadParentFramePointer:
return VisitLeaf(node, MachineType::PointerRepresentation());
case IrOpcode::kStateValues:
- VisitStateValues(node);
- break;
+ return VisitStateValues(node);
+ case IrOpcode::kTypeGuard: {
+ // We just get rid of the sigma here. In principle, it should be
+ // possible to refine the truncation and representation based on
+ // the sigma's type.
+ MachineRepresentation output =
+ GetOutputInfoForPhi(node, TypeOf(node->InputAt(0)), truncation);
+
+ VisitUnop(node, UseInfo(output, truncation), output);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ return;
+ }
// The following opcodes are not produced before representation
// inference runs, so we do not have any real test coverage.
@@ -1379,14 +2575,27 @@ class RepresentationSelector {
case IrOpcode::kChangeFloat64ToInt32:
case IrOpcode::kChangeFloat64ToUint32:
case IrOpcode::kTruncateInt64ToInt32:
+ case IrOpcode::kChangeFloat32ToFloat64:
+ case IrOpcode::kCheckedInt32Add:
+ case IrOpcode::kCheckedInt32Sub:
+ case IrOpcode::kCheckedUint32ToInt32:
+ case IrOpcode::kCheckedFloat64ToInt32:
+ case IrOpcode::kCheckedTaggedToInt32:
+ case IrOpcode::kCheckedTaggedToFloat64:
+ case IrOpcode::kPlainPrimitiveToWord32:
+ case IrOpcode::kPlainPrimitiveToFloat64:
+ case IrOpcode::kLoopExit:
+ case IrOpcode::kLoopExitValue:
+ case IrOpcode::kLoopExitEffect:
FATAL("Representation inference: unsupported opcodes.");
+ break;
default:
VisitInputs(node);
// Assume the output is tagged.
- SetOutput(node, MachineRepresentation::kTagged);
- break;
+ return SetOutput(node, MachineRepresentation::kTagged);
}
+ UNREACHABLE();
}
void DeferReplacement(Node* node, Node* replacement) {
@@ -1394,22 +2603,41 @@ class RepresentationSelector {
node->op()->mnemonic(), replacement->id(),
replacement->op()->mnemonic());
- if (replacement->id() < count_ &&
- GetUpperBound(node)->Is(GetUpperBound(replacement))) {
- // Replace with a previously existing node eagerly only if the type is the
- // same.
- node->ReplaceUses(replacement);
- } else {
- // Otherwise, we are replacing a node with a representation change.
- // Such a substitution must be done after all lowering is done, because
- // changing the type could confuse the representation change
- // insertion for uses of the node.
- replacements_.push_back(node);
- replacements_.push_back(replacement);
+ // Disconnect the node from effect and control chains, if necessary.
+ if (node->op()->EffectInputCount() > 0) {
+ DCHECK_LT(0, node->op()->ControlInputCount());
+ // Disconnect the node from effect and control chains.
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ ReplaceEffectControlUses(node, effect, control);
}
+
+ replacements_.push_back(node);
+ replacements_.push_back(replacement);
+
node->NullAllInputs(); // Node is now dead.
}
+ void Kill(Node* node) {
+ TRACE("killing #%d:%s\n", node->id(), node->op()->mnemonic());
+
+ if (node->op()->EffectInputCount() == 1) {
+ DCHECK_LT(0, node->op()->ControlInputCount());
+ // Disconnect the node from effect and control chains.
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ ReplaceEffectControlUses(node, effect, control);
+ } else {
+ DCHECK_EQ(0, node->op()->EffectInputCount());
+ DCHECK_EQ(0, node->op()->ControlOutputCount());
+ DCHECK_EQ(0, node->op()->EffectOutputCount());
+ }
+
+ node->ReplaceUses(jsgraph_->Dead());
+
+ node->NullAllInputs(); // The {node} is now dead.
+ }
+
void PrintOutputInfo(NodeInfo* info) {
if (FLAG_trace_representation) {
OFStream os(stdout);
@@ -1427,19 +2655,20 @@ class RepresentationSelector {
void PrintTruncation(Truncation truncation) {
if (FLAG_trace_representation) {
OFStream os(stdout);
- os << truncation.description();
+ os << truncation.description() << std::endl;
}
}
void PrintUseInfo(UseInfo info) {
if (FLAG_trace_representation) {
OFStream os(stdout);
- os << info.preferred() << ":" << info.truncation().description();
+ os << info.representation() << ":" << info.truncation().description();
}
}
private:
JSGraph* jsgraph_;
+ Zone* zone_; // Temporary zone.
size_t const count_; // number of nodes in the graph
ZoneVector<NodeInfo> info_; // node id -> usage information
#ifdef DEBUG
@@ -1451,6 +2680,12 @@ class RepresentationSelector {
Phase phase_; // current phase of algorithm
RepresentationChanger* changer_; // for inserting representation changes
ZoneQueue<Node*> queue_; // queue for traversing the graph
+
+ struct NodeState {
+ Node* node;
+ int input_index;
+ };
+ ZoneStack<NodeState> typing_stack_; // stack for graph typing.
// TODO(danno): RepresentationSelector shouldn't know anything about the
// source positions table, but must for now since there currently is no other
// way to pass down source position information to nodes created during
@@ -1458,15 +2693,16 @@ class RepresentationSelector {
// position information via the SourcePositionWrapper like all other reducers.
SourcePositionTable* source_positions_;
TypeCache const& type_cache_;
+ OperationTyper op_typer_; // helper for the feedback typer
NodeInfo* GetInfo(Node* node) {
- DCHECK(node->id() >= 0);
DCHECK(node->id() < count_);
return &info_[node->id()];
}
+ Zone* zone() { return zone_; }
+ Zone* graph_zone() { return jsgraph_->zone(); }
};
-
SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, Zone* zone,
SourcePositionTable* source_positions)
: jsgraph_(jsgraph),
@@ -1474,7 +2710,6 @@ SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, Zone* zone,
type_cache_(TypeCache::Get()),
source_positions_(source_positions) {}
-
void SimplifiedLowering::LowerAllNodes() {
RepresentationChanger changer(jsgraph(), jsgraph()->isolate());
RepresentationSelector selector(jsgraph(), zone_, &changer,
@@ -1482,6 +2717,166 @@ void SimplifiedLowering::LowerAllNodes() {
selector.Run(this);
}
+void SimplifiedLowering::DoJSToNumberTruncatesToFloat64(
+ Node* node, RepresentationSelector* selector) {
+ DCHECK_EQ(IrOpcode::kJSToNumber, node->opcode());
+ Node* value = node->InputAt(0);
+ Node* context = node->InputAt(1);
+ Node* frame_state = node->InputAt(2);
+ Node* effect = node->InputAt(3);
+ Node* control = node->InputAt(4);
+ Node* throwing;
+
+ Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), value);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+ Node* vtrue0;
+ {
+ vtrue0 = graph()->NewNode(simplified()->ChangeTaggedSignedToInt32(), value);
+ vtrue0 = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue0);
+ }
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0 = effect;
+ Node* vfalse0;
+ {
+ throwing = vfalse0 = efalse0 =
+ graph()->NewNode(ToNumberOperator(), ToNumberCode(), value, context,
+ frame_state, efalse0, if_false0);
+ if_false0 = graph()->NewNode(common()->IfSuccess(), throwing);
+
+ Node* check1 = graph()->NewNode(simplified()->ObjectIsSmi(), vfalse0);
+ Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = efalse0;
+ Node* vtrue1;
+ {
+ vtrue1 =
+ graph()->NewNode(simplified()->ChangeTaggedSignedToInt32(), vfalse0);
+ vtrue1 = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue1);
+ }
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* efalse1 = efalse0;
+ Node* vfalse1;
+ {
+ vfalse1 = efalse1 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), efalse0,
+ efalse1, if_false1);
+ }
+
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ efalse0 =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
+ vfalse0 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue1, vfalse1, if_false0);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue0, vfalse0, control);
+
+ // Replace effect and control uses appropriately.
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsControlEdge(edge)) {
+ if (edge.from()->opcode() == IrOpcode::kIfSuccess) {
+ edge.from()->ReplaceUses(control);
+ edge.from()->Kill();
+ } else if (edge.from()->opcode() == IrOpcode::kIfException) {
+ edge.UpdateTo(throwing);
+ } else {
+ UNREACHABLE();
+ }
+ } else if (NodeProperties::IsEffectEdge(edge)) {
+ edge.UpdateTo(effect);
+ }
+ }
+
+ selector->DeferReplacement(node, value);
+}
+
+void SimplifiedLowering::DoJSToNumberTruncatesToWord32(
+ Node* node, RepresentationSelector* selector) {
+ DCHECK_EQ(IrOpcode::kJSToNumber, node->opcode());
+ Node* value = node->InputAt(0);
+ Node* context = node->InputAt(1);
+ Node* frame_state = node->InputAt(2);
+ Node* effect = node->InputAt(3);
+ Node* control = node->InputAt(4);
+ Node* throwing;
+
+ Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), value);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+ Node* vtrue0 =
+ graph()->NewNode(simplified()->ChangeTaggedSignedToInt32(), value);
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0 = effect;
+ Node* vfalse0;
+ {
+ throwing = vfalse0 = efalse0 =
+ graph()->NewNode(ToNumberOperator(), ToNumberCode(), value, context,
+ frame_state, efalse0, if_false0);
+ if_false0 = graph()->NewNode(common()->IfSuccess(), throwing);
+
+ Node* check1 = graph()->NewNode(simplified()->ObjectIsSmi(), vfalse0);
+ Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = efalse0;
+ Node* vtrue1 =
+ graph()->NewNode(simplified()->ChangeTaggedSignedToInt32(), vfalse0);
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* efalse1 = efalse0;
+ Node* vfalse1;
+ {
+ vfalse1 = efalse1 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), efalse0,
+ efalse1, if_false1);
+ vfalse1 = graph()->NewNode(machine()->TruncateFloat64ToWord32(), vfalse1);
+ }
+
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ efalse0 =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
+ vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ vtrue1, vfalse1, if_false0);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ vtrue0, vfalse0, control);
+
+ // Replace effect and control uses appropriately.
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsControlEdge(edge)) {
+ if (edge.from()->opcode() == IrOpcode::kIfSuccess) {
+ edge.from()->ReplaceUses(control);
+ edge.from()->Kill();
+ } else if (edge.from()->opcode() == IrOpcode::kIfException) {
+ edge.UpdateTo(throwing);
+ } else {
+ UNREACHABLE();
+ }
+ } else if (NodeProperties::IsEffectEdge(edge)) {
+ edge.UpdateTo(effect);
+ }
+ }
+
+ selector->DeferReplacement(node, value);
+}
void SimplifiedLowering::DoLoadBuffer(Node* node,
MachineRepresentation output_rep,
@@ -1507,9 +2902,11 @@ void SimplifiedLowering::DoLoadBuffer(Node* node,
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* etrue = graph()->NewNode(machine()->Load(access_type), buffer, index,
effect, if_true);
+ Type* element_type =
+ Type::Intersect(NodeProperties::GetType(node), Type::Number(), zone());
Node* vtrue = changer->GetRepresentationFor(
- etrue, access_type.representation(), NodeProperties::GetType(node),
- output_rep, Truncation::None());
+ etrue, access_type.representation(), element_type, node,
+ UseInfo(output_rep, Truncation::None()));
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
Node* efalse = effect;
@@ -1551,262 +2948,14 @@ void SimplifiedLowering::DoStoreBuffer(Node* node) {
NodeProperties::ChangeOp(node, machine()->CheckedStore(rep));
}
-Node* SimplifiedLowering::Float64Ceil(Node* const node) {
- Node* const one = jsgraph()->Float64Constant(1.0);
- Node* const zero = jsgraph()->Float64Constant(0.0);
- Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
- Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
- Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
- Node* const input = node->InputAt(0);
-
- // Use fast hardware instruction if available.
- if (machine()->Float64RoundUp().IsSupported()) {
- return graph()->NewNode(machine()->Float64RoundUp().op(), input);
- }
-
- // General case for ceil.
- //
- // if 0.0 < input then
- // if 2^52 <= input then
- // input
- // else
- // let temp1 = (2^52 + input) - 2^52 in
- // if temp1 < input then
- // temp1 + 1
- // else
- // temp1
- // else
- // if input == 0 then
- // input
- // else
- // if input <= -2^52 then
- // input
- // else
- // let temp1 = -0 - input in
- // let temp2 = (2^52 + temp1) - 2^52 in
- // let temp3 = (if temp1 < temp2 then temp2 - 1 else temp2) in
- // -0 - temp3
- //
- // Note: We do not use the Diamond helper class here, because it really hurts
- // readability with nested diamonds.
-
- Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
- Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), check0,
- graph()->start());
-
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* vtrue0;
- {
- Node* check1 =
- graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
- Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* vtrue1 = input;
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* vfalse1;
- {
- Node* temp1 = graph()->NewNode(
- machine()->Float64Sub(),
- graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
- vfalse1 = graph()->NewNode(
- common()->Select(MachineRepresentation::kFloat64),
- graph()->NewNode(machine()->Float64LessThan(), temp1, input),
- graph()->NewNode(machine()->Float64Add(), temp1, one), temp1);
- }
-
- if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue1, vfalse1, if_true0);
- }
-
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* vfalse0;
- {
- Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check1, if_false0);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* vtrue1 = input;
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* vfalse1;
- {
- Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
- input, minus_two_52);
- Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check2, if_false1);
-
- Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- Node* vtrue2 = input;
-
- Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
- Node* vfalse2;
- {
- Node* temp1 =
- graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
- Node* temp2 = graph()->NewNode(
- machine()->Float64Sub(),
- graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
- Node* temp3 = graph()->NewNode(
- common()->Select(MachineRepresentation::kFloat64),
- graph()->NewNode(machine()->Float64LessThan(), temp1, temp2),
- graph()->NewNode(machine()->Float64Sub(), temp2, one), temp2);
- vfalse2 = graph()->NewNode(machine()->Float64Sub(), minus_zero, temp3);
- }
-
- if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
- vfalse1 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue2, vfalse2, if_false1);
- }
-
- if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- vfalse0 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue1, vfalse1, if_false0);
- }
-
- Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- return graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue0, vfalse0, merge0);
-}
-
-Node* SimplifiedLowering::Float64Floor(Node* const node) {
- Node* const one = jsgraph()->Float64Constant(1.0);
- Node* const zero = jsgraph()->Float64Constant(0.0);
- Node* const minus_one = jsgraph()->Float64Constant(-1.0);
- Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
- Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
- Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
- Node* const input = node->InputAt(0);
-
- // Use fast hardware instruction if available.
- if (machine()->Float64RoundDown().IsSupported()) {
- return graph()->NewNode(machine()->Float64RoundDown().op(), input);
- }
-
- // General case for floor.
- //
- // if 0.0 < input then
- // if 2^52 <= input then
- // input
- // else
- // let temp1 = (2^52 + input) - 2^52 in
- // if input < temp1 then
- // temp1 - 1
- // else
- // temp1
- // else
- // if input == 0 then
- // input
- // else
- // if input <= -2^52 then
- // input
- // else
- // let temp1 = -0 - input in
- // let temp2 = (2^52 + temp1) - 2^52 in
- // if temp2 < temp1 then
- // -1 - temp2
- // else
- // -0 - temp2
- //
- // Note: We do not use the Diamond helper class here, because it really hurts
- // readability with nested diamonds.
-
- Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
- Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), check0,
- graph()->start());
-
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* vtrue0;
- {
- Node* check1 =
- graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
- Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* vtrue1 = input;
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* vfalse1;
- {
- Node* temp1 = graph()->NewNode(
- machine()->Float64Sub(),
- graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
- vfalse1 = graph()->NewNode(
- common()->Select(MachineRepresentation::kFloat64),
- graph()->NewNode(machine()->Float64LessThan(), input, temp1),
- graph()->NewNode(machine()->Float64Sub(), temp1, one), temp1);
- }
-
- if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue1, vfalse1, if_true0);
- }
-
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* vfalse0;
- {
- Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check1, if_false0);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* vtrue1 = input;
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* vfalse1;
- {
- Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
- input, minus_two_52);
- Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check2, if_false1);
-
- Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- Node* vtrue2 = input;
-
- Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
- Node* vfalse2;
- {
- Node* temp1 =
- graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
- Node* temp2 = graph()->NewNode(
- machine()->Float64Sub(),
- graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
- vfalse2 = graph()->NewNode(
- common()->Select(MachineRepresentation::kFloat64),
- graph()->NewNode(machine()->Float64LessThan(), temp2, temp1),
- graph()->NewNode(machine()->Float64Sub(), minus_one, temp2),
- graph()->NewNode(machine()->Float64Sub(), minus_zero, temp2));
- }
-
- if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
- vfalse1 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue2, vfalse2, if_false1);
- }
-
- if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- vfalse0 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue1, vfalse1, if_false0);
- }
-
- Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- return graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue0, vfalse0, merge0);
-}
-
Node* SimplifiedLowering::Float64Round(Node* const node) {
Node* const one = jsgraph()->Float64Constant(1.0);
Node* const one_half = jsgraph()->Float64Constant(0.5);
Node* const input = node->InputAt(0);
// Round up towards Infinity, and adjust if the difference exceeds 0.5.
- Node* result = Float64Ceil(node);
+ Node* result = graph()->NewNode(machine()->Float64RoundUp().placeholder(),
+ node->InputAt(0));
return graph()->NewNode(
common()->Select(MachineRepresentation::kFloat64),
graph()->NewNode(
@@ -1815,127 +2964,35 @@ Node* SimplifiedLowering::Float64Round(Node* const node) {
result, graph()->NewNode(machine()->Float64Sub(), result, one));
}
-Node* SimplifiedLowering::Float64Trunc(Node* const node) {
- Node* const one = jsgraph()->Float64Constant(1.0);
+Node* SimplifiedLowering::Float64Sign(Node* const node) {
+ Node* const minus_one = jsgraph()->Float64Constant(-1.0);
Node* const zero = jsgraph()->Float64Constant(0.0);
- Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
- Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
- Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
- Node* const input = node->InputAt(0);
-
- // Use fast hardware instruction if available.
- if (machine()->Float64RoundTruncate().IsSupported()) {
- return graph()->NewNode(machine()->Float64RoundTruncate().op(), input);
- }
-
- // General case for trunc.
- //
- // if 0.0 < input then
- // if 2^52 <= input then
- // input
- // else
- // let temp1 = (2^52 + input) - 2^52 in
- // if input < temp1 then
- // temp1 - 1
- // else
- // temp1
- // else
- // if input == 0 then
- // input
- // else
- // if input <= -2^52 then
- // input
- // else
- // let temp1 = -0 - input in
- // let temp2 = (2^52 + temp1) - 2^52 in
- // let temp3 = (if temp1 < temp2 then temp2 - 1 else temp2) in
- // -0 - temp3
- //
- // Note: We do not use the Diamond helper class here, because it really hurts
- // readability with nested diamonds.
-
- Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
- Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), check0,
- graph()->start());
-
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* vtrue0;
- {
- Node* check1 =
- graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
- Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
+ Node* const one = jsgraph()->Float64Constant(1.0);
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* vtrue1 = input;
+ Node* const input = node->InputAt(0);
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* vfalse1;
- {
- Node* temp1 = graph()->NewNode(
- machine()->Float64Sub(),
- graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
- vfalse1 = graph()->NewNode(
+ return graph()->NewNode(
+ common()->Select(MachineRepresentation::kFloat64),
+ graph()->NewNode(machine()->Float64LessThan(), input, zero), minus_one,
+ graph()->NewNode(
common()->Select(MachineRepresentation::kFloat64),
- graph()->NewNode(machine()->Float64LessThan(), input, temp1),
- graph()->NewNode(machine()->Float64Sub(), temp1, one), temp1);
- }
-
- if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue1, vfalse1, if_true0);
- }
-
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* vfalse0;
- {
- Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check1, if_false0);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* vtrue1 = input;
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* vfalse1;
- {
- Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
- input, minus_two_52);
- Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check2, if_false1);
-
- Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- Node* vtrue2 = input;
-
- Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
- Node* vfalse2;
- {
- Node* temp1 =
- graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
- Node* temp2 = graph()->NewNode(
- machine()->Float64Sub(),
- graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
- Node* temp3 = graph()->NewNode(
- common()->Select(MachineRepresentation::kFloat64),
- graph()->NewNode(machine()->Float64LessThan(), temp1, temp2),
- graph()->NewNode(machine()->Float64Sub(), temp2, one), temp2);
- vfalse2 = graph()->NewNode(machine()->Float64Sub(), minus_zero, temp3);
- }
-
- if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
- vfalse1 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue2, vfalse2, if_false1);
- }
+ graph()->NewNode(machine()->Float64LessThan(), zero, input), one,
+ zero));
+}
- if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- vfalse0 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue1, vfalse1, if_false0);
- }
+Node* SimplifiedLowering::Int32Abs(Node* const node) {
+ Node* const input = node->InputAt(0);
- Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- return graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue0, vfalse0, merge0);
+ // Generate case for absolute integer value.
+ //
+ // let sign = input >> 31 in
+ // (input ^ sign) - sign
+
+ Node* sign = graph()->NewNode(machine()->Word32Sar(), input,
+ jsgraph()->Int32Constant(31));
+ return graph()->NewNode(machine()->Int32Sub(),
+ graph()->NewNode(machine()->Word32Xor(), input, sign),
+ sign);
}
Node* SimplifiedLowering::Int32Div(Node* const node) {
@@ -2110,6 +3167,21 @@ Node* SimplifiedLowering::Int32Mod(Node* const node) {
return graph()->NewNode(phi_op, true0, false0, merge0);
}
+Node* SimplifiedLowering::Int32Sign(Node* const node) {
+ Node* const minus_one = jsgraph()->Int32Constant(-1);
+ Node* const zero = jsgraph()->Int32Constant(0);
+ Node* const one = jsgraph()->Int32Constant(1);
+
+ Node* const input = node->InputAt(0);
+
+ return graph()->NewNode(
+ common()->Select(MachineRepresentation::kWord32),
+ graph()->NewNode(machine()->Int32LessThan(), input, zero), minus_one,
+ graph()->NewNode(
+ common()->Select(MachineRepresentation::kWord32),
+ graph()->NewNode(machine()->Int32LessThan(), zero, input), one,
+ zero));
+}
Node* SimplifiedLowering::Uint32Div(Node* const node) {
Uint32BinopMatcher m(node);
@@ -2189,6 +3261,27 @@ Node* SimplifiedLowering::Uint32Mod(Node* const node) {
return graph()->NewNode(phi_op, true0, false0, merge0);
}
+void SimplifiedLowering::DoMax(Node* node, Operator const* op,
+ MachineRepresentation rep) {
+ Node* const lhs = node->InputAt(0);
+ Node* const rhs = node->InputAt(1);
+
+ node->ReplaceInput(0, graph()->NewNode(op, lhs, rhs));
+ DCHECK_EQ(rhs, node->InputAt(1));
+ node->AppendInput(graph()->zone(), lhs);
+ NodeProperties::ChangeOp(node, common()->Select(rep));
+}
+
+void SimplifiedLowering::DoMin(Node* node, Operator const* op,
+ MachineRepresentation rep) {
+ Node* const lhs = node->InputAt(0);
+ Node* const rhs = node->InputAt(1);
+
+ node->InsertInput(graph()->zone(), 0, graph()->NewNode(op, lhs, rhs));
+ DCHECK_EQ(lhs, node->InputAt(1));
+ DCHECK_EQ(rhs, node->InputAt(2));
+ NodeProperties::ChangeOp(node, common()->Select(rep));
+}
void SimplifiedLowering::DoShift(Node* node, Operator const* op,
Type* rhs_type) {
@@ -2197,7 +3290,41 @@ void SimplifiedLowering::DoShift(Node* node, Operator const* op,
node->ReplaceInput(1, graph()->NewNode(machine()->Word32And(), rhs,
jsgraph()->Int32Constant(0x1f)));
}
- NodeProperties::ChangeOp(node, op);
+ DCHECK(op->HasProperty(Operator::kPure));
+ ChangeToPureOp(node, op);
+}
+
+void SimplifiedLowering::DoStringToNumber(Node* node) {
+ Operator::Properties properties = Operator::kEliminatable;
+ Callable callable = CodeFactory::StringToNumber(isolate());
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ node->AppendInput(graph()->zone(), jsgraph()->NoContextConstant());
+ node->AppendInput(graph()->zone(), graph()->start());
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+}
+
+Node* SimplifiedLowering::ToNumberCode() {
+ if (!to_number_code_.is_set()) {
+ Callable callable = CodeFactory::ToNumber(isolate());
+ to_number_code_.set(jsgraph()->HeapConstant(callable.code()));
+ }
+ return to_number_code_.get();
+}
+
+Operator const* SimplifiedLowering::ToNumberOperator() {
+ if (!to_number_operator_.is_set()) {
+ Callable callable = CodeFactory::ToNumber(isolate());
+ CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags,
+ Operator::kNoProperties);
+ to_number_operator_.set(common()->Call(desc));
+ }
+ return to_number_operator_.get();
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index 8b711a9659..18c7331219 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -21,6 +21,7 @@ namespace compiler {
// Forward declarations.
class RepresentationChanger;
+class RepresentationSelector;
class SourcePositionTable;
class SimplifiedLowering final {
@@ -31,22 +32,26 @@ class SimplifiedLowering final {
void LowerAllNodes();
+ void DoMax(Node* node, Operator const* op, MachineRepresentation rep);
+ void DoMin(Node* node, Operator const* op, MachineRepresentation rep);
+ void DoJSToNumberTruncatesToFloat64(Node* node,
+ RepresentationSelector* selector);
+ void DoJSToNumberTruncatesToWord32(Node* node,
+ RepresentationSelector* selector);
// TODO(turbofan): The representation can be removed once the result of the
// representation analysis is stored in the node bounds.
void DoLoadBuffer(Node* node, MachineRepresentation rep,
RepresentationChanger* changer);
void DoStoreBuffer(Node* node);
void DoShift(Node* node, Operator const* op, Type* rhs_type);
-
- // TODO(bmeurer): This is a gigantic hack to support the gigantic LoadBuffer
- // typing hack to support the gigantic "asm.js should be fast without proper
- // verifier"-hack, ... Kill this! Soon! Really soon! I'm serious!
- bool abort_compilation_ = false;
+ void DoStringToNumber(Node* node);
private:
JSGraph* const jsgraph_;
Zone* const zone_;
TypeCache const& type_cache_;
+ SetOncePointer<Node> to_number_code_;
+ SetOncePointer<Operator const> to_number_operator_;
// TODO(danno): SimplifiedLowering shouldn't know anything about the source
// positions table, but must for now since there currently is no other way to
@@ -55,15 +60,18 @@ class SimplifiedLowering final {
// position information via the SourcePositionWrapper like all other reducers.
SourcePositionTable* source_positions_;
- Node* Float64Ceil(Node* const node);
- Node* Float64Floor(Node* const node);
Node* Float64Round(Node* const node);
- Node* Float64Trunc(Node* const node);
+ Node* Float64Sign(Node* const node);
+ Node* Int32Abs(Node* const node);
Node* Int32Div(Node* const node);
Node* Int32Mod(Node* const node);
+ Node* Int32Sign(Node* const node);
Node* Uint32Div(Node* const node);
Node* Uint32Mod(Node* const node);
+ Node* ToNumberCode();
+ Operator const* ToNumberOperator();
+
friend class RepresentationSelector;
Isolate* isolate() { return jsgraph_->isolate(); }
@@ -72,6 +80,7 @@ class SimplifiedLowering final {
Graph* graph() { return jsgraph()->graph(); }
CommonOperatorBuilder* common() { return jsgraph()->common(); }
MachineOperatorBuilder* machine() { return jsgraph()->machine(); }
+ SimplifiedOperatorBuilder* simplified() { return jsgraph()->simplified(); }
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.cc b/deps/v8/src/compiler/simplified-operator-reducer.cc
index 012004a8af..d8bd1e0232 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.cc
+++ b/deps/v8/src/compiler/simplified-operator-reducer.cc
@@ -8,6 +8,7 @@
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/operator-properties.h"
+#include "src/compiler/simplified-operator.h"
#include "src/conversions-inl.h"
#include "src/type-cache.h"
@@ -15,8 +16,25 @@ namespace v8 {
namespace internal {
namespace compiler {
-SimplifiedOperatorReducer::SimplifiedOperatorReducer(JSGraph* jsgraph)
- : jsgraph_(jsgraph), type_cache_(TypeCache::Get()) {}
+namespace {
+
+Decision DecideObjectIsSmi(Node* const input) {
+ NumberMatcher m(input);
+ if (m.HasValue()) {
+ return IsSmiDouble(m.Value()) ? Decision::kTrue : Decision::kFalse;
+ }
+ if (m.IsAllocate()) return Decision::kFalse;
+ if (m.IsChangeBitToTagged()) return Decision::kFalse;
+ if (m.IsChangeInt31ToTaggedSigned()) return Decision::kTrue;
+ if (m.IsHeapConstant()) return Decision::kFalse;
+ return Decision::kUnknown;
+}
+
+} // namespace
+
+SimplifiedOperatorReducer::SimplifiedOperatorReducer(Editor* editor,
+ JSGraph* jsgraph)
+ : AdvancedReducer(editor), jsgraph_(jsgraph) {}
SimplifiedOperatorReducer::~SimplifiedOperatorReducer() {}
@@ -25,40 +43,45 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kBooleanNot: {
HeapObjectMatcher m(node->InputAt(0));
- if (m.HasValue()) {
- return Replace(jsgraph()->BooleanConstant(!m.Value()->BooleanValue()));
- }
+ if (m.Is(factory()->true_value())) return ReplaceBoolean(false);
+ if (m.Is(factory()->false_value())) return ReplaceBoolean(true);
if (m.IsBooleanNot()) return Replace(m.InputAt(0));
break;
}
- case IrOpcode::kChangeBitToBool: {
+ case IrOpcode::kChangeBitToTagged: {
Int32Matcher m(node->InputAt(0));
if (m.Is(0)) return Replace(jsgraph()->FalseConstant());
if (m.Is(1)) return Replace(jsgraph()->TrueConstant());
- if (m.IsChangeBoolToBit()) return Replace(m.InputAt(0));
+ if (m.IsChangeTaggedToBit()) return Replace(m.InputAt(0));
break;
}
- case IrOpcode::kChangeBoolToBit: {
+ case IrOpcode::kChangeTaggedToBit: {
HeapObjectMatcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceInt32(m.Value()->BooleanValue());
- if (m.IsChangeBitToBool()) return Replace(m.InputAt(0));
+ if (m.IsChangeBitToTagged()) return Replace(m.InputAt(0));
break;
}
case IrOpcode::kChangeFloat64ToTagged: {
Float64Matcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceNumber(m.Value());
+ if (m.IsChangeTaggedToFloat64()) return Replace(m.node()->InputAt(0));
break;
}
+ case IrOpcode::kChangeInt31ToTaggedSigned:
case IrOpcode::kChangeInt32ToTagged: {
Int32Matcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceNumber(m.Value());
+ if (m.IsChangeTaggedToInt32() || m.IsChangeTaggedSignedToInt32()) {
+ return Replace(m.InputAt(0));
+ }
break;
}
- case IrOpcode::kChangeTaggedToFloat64: {
+ case IrOpcode::kChangeTaggedToFloat64:
+ case IrOpcode::kTruncateTaggedToFloat64: {
NumberMatcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceFloat64(m.Value());
if (m.IsChangeFloat64ToTagged()) return Replace(m.node()->InputAt(0));
- if (m.IsChangeInt32ToTagged()) {
+ if (m.IsChangeInt31ToTaggedSigned() || m.IsChangeInt32ToTagged()) {
return Change(node, machine()->ChangeInt32ToFloat64(), m.InputAt(0));
}
if (m.IsChangeUint32ToTagged()) {
@@ -72,7 +95,9 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
if (m.IsChangeFloat64ToTagged()) {
return Change(node, machine()->ChangeFloat64ToInt32(), m.InputAt(0));
}
- if (m.IsChangeInt32ToTagged()) return Replace(m.InputAt(0));
+ if (m.IsChangeInt31ToTaggedSigned() || m.IsChangeInt32ToTagged()) {
+ return Replace(m.InputAt(0));
+ }
break;
}
case IrOpcode::kChangeTaggedToUint32: {
@@ -89,42 +114,83 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
if (m.HasValue()) return ReplaceNumber(FastUI2D(m.Value()));
break;
}
- case IrOpcode::kNumberCeil:
- case IrOpcode::kNumberFloor:
- case IrOpcode::kNumberRound:
- case IrOpcode::kNumberTrunc: {
- Node* const input = NodeProperties::GetValueInput(node, 0);
- Type* const input_type = NodeProperties::GetType(input);
- if (input_type->Is(type_cache_.kIntegerOrMinusZeroOrNaN)) {
+ case IrOpcode::kTruncateTaggedToWord32: {
+ NumberMatcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
+ if (m.IsChangeInt31ToTaggedSigned() || m.IsChangeInt32ToTagged() ||
+ m.IsChangeUint32ToTagged()) {
+ return Replace(m.InputAt(0));
+ }
+ if (m.IsChangeFloat64ToTagged()) {
+ return Change(node, machine()->TruncateFloat64ToWord32(), m.InputAt(0));
+ }
+ break;
+ }
+ case IrOpcode::kCheckIf: {
+ HeapObjectMatcher m(node->InputAt(0));
+ if (m.Is(factory()->true_value())) {
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ return Replace(effect);
+ }
+ break;
+ }
+ case IrOpcode::kCheckNumber: {
+ NodeMatcher m(node->InputAt(0));
+ if (m.IsConvertTaggedHoleToUndefined()) {
+ node->ReplaceInput(0, m.InputAt(0));
+ return Changed(node);
+ }
+ break;
+ }
+ case IrOpcode::kCheckTaggedPointer: {
+ Node* const input = node->InputAt(0);
+ if (DecideObjectIsSmi(input) == Decision::kFalse) {
+ ReplaceWithValue(node, input);
return Replace(input);
}
break;
}
- case IrOpcode::kReferenceEqual:
- return ReduceReferenceEqual(node);
- default:
+ case IrOpcode::kCheckTaggedSigned: {
+ Node* const input = node->InputAt(0);
+ if (DecideObjectIsSmi(input) == Decision::kTrue) {
+ ReplaceWithValue(node, input);
+ return Replace(input);
+ }
+ NodeMatcher m(input);
+ if (m.IsConvertTaggedHoleToUndefined()) {
+ node->ReplaceInput(0, m.InputAt(0));
+ return Changed(node);
+ }
+ break;
+ }
+ case IrOpcode::kObjectIsSmi: {
+ Node* const input = node->InputAt(0);
+ switch (DecideObjectIsSmi(input)) {
+ case Decision::kTrue:
+ return ReplaceBoolean(true);
+ case Decision::kFalse:
+ return ReplaceBoolean(false);
+ case Decision::kUnknown:
+ break;
+ }
break;
- }
- return NoChange();
-}
-
-Reduction SimplifiedOperatorReducer::ReduceReferenceEqual(Node* node) {
- DCHECK_EQ(IrOpcode::kReferenceEqual, node->opcode());
- Node* const left = NodeProperties::GetValueInput(node, 0);
- Node* const right = NodeProperties::GetValueInput(node, 1);
- HeapObjectMatcher match_left(left);
- HeapObjectMatcher match_right(right);
- if (match_left.HasValue() && match_right.HasValue()) {
- if (match_left.Value().is_identical_to(match_right.Value())) {
- return Replace(jsgraph()->TrueConstant());
- } else {
- return Replace(jsgraph()->FalseConstant());
}
+ case IrOpcode::kNumberAbs: {
+ NumberMatcher m(node->InputAt(0));
+ if (m.HasValue()) return ReplaceNumber(std::fabs(m.Value()));
+ break;
+ }
+ case IrOpcode::kReferenceEqual: {
+ HeapObjectBinopMatcher m(node);
+ if (m.left().node() == m.right().node()) return ReplaceBoolean(true);
+ break;
+ }
+ default:
+ break;
}
return NoChange();
}
-
Reduction SimplifiedOperatorReducer::Change(Node* node, const Operator* op,
Node* a) {
DCHECK_EQ(node->InputCount(), OperatorProperties::GetTotalInputCount(op));
@@ -134,6 +200,9 @@ Reduction SimplifiedOperatorReducer::Change(Node* node, const Operator* op,
return Changed(node);
}
+Reduction SimplifiedOperatorReducer::ReplaceBoolean(bool value) {
+ return Replace(jsgraph()->BooleanConstant(value));
+}
Reduction SimplifiedOperatorReducer::ReplaceFloat64(double value) {
return Replace(jsgraph()->Float64Constant(value));
@@ -154,9 +223,15 @@ Reduction SimplifiedOperatorReducer::ReplaceNumber(int32_t value) {
return Replace(jsgraph()->Constant(value));
}
+Factory* SimplifiedOperatorReducer::factory() const {
+ return isolate()->factory();
+}
Graph* SimplifiedOperatorReducer::graph() const { return jsgraph()->graph(); }
+Isolate* SimplifiedOperatorReducer::isolate() const {
+ return jsgraph()->isolate();
+}
MachineOperatorBuilder* SimplifiedOperatorReducer::machine() const {
return jsgraph()->machine();
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.h b/deps/v8/src/compiler/simplified-operator-reducer.h
index 13301c2af5..44bfdff3e3 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.h
+++ b/deps/v8/src/compiler/simplified-operator-reducer.h
@@ -11,7 +11,8 @@ namespace v8 {
namespace internal {
// Forward declarations.
-class TypeCache;
+class Factory;
+class Isolate;
namespace compiler {
@@ -20,10 +21,9 @@ class JSGraph;
class MachineOperatorBuilder;
class SimplifiedOperatorBuilder;
-
-class SimplifiedOperatorReducer final : public Reducer {
+class SimplifiedOperatorReducer final : public AdvancedReducer {
public:
- explicit SimplifiedOperatorReducer(JSGraph* jsgraph);
+ SimplifiedOperatorReducer(Editor* editor, JSGraph* jsgraph);
~SimplifiedOperatorReducer() final;
Reduction Reduce(Node* node) final;
@@ -32,6 +32,7 @@ class SimplifiedOperatorReducer final : public Reducer {
Reduction ReduceReferenceEqual(Node* node);
Reduction Change(Node* node, const Operator* op, Node* a);
+ Reduction ReplaceBoolean(bool value);
Reduction ReplaceFloat64(double value);
Reduction ReplaceInt32(int32_t value);
Reduction ReplaceUint32(uint32_t value) {
@@ -40,13 +41,14 @@ class SimplifiedOperatorReducer final : public Reducer {
Reduction ReplaceNumber(double value);
Reduction ReplaceNumber(int32_t value);
+ Factory* factory() const;
Graph* graph() const;
+ Isolate* isolate() const;
JSGraph* jsgraph() const { return jsgraph_; }
MachineOperatorBuilder* machine() const;
SimplifiedOperatorBuilder* simplified() const;
JSGraph* const jsgraph_;
- TypeCache const& type_cache_;
DISALLOW_COPY_AND_ASSIGN(SimplifiedOperatorReducer);
};
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index daa9501b8c..cf0c3deb56 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -13,6 +13,10 @@ namespace v8 {
namespace internal {
namespace compiler {
+size_t hash_value(BaseTaggedness base_taggedness) {
+ return static_cast<uint8_t>(base_taggedness);
+}
+
std::ostream& operator<<(std::ostream& os, BaseTaggedness base_taggedness) {
switch (base_taggedness) {
case kUntaggedBase:
@@ -84,6 +88,9 @@ BufferAccess const BufferAccessOf(const Operator* op) {
bool operator==(FieldAccess const& lhs, FieldAccess const& rhs) {
+ // On purpose we don't include the write barrier kind here, as this method is
+ // really only relevant for eliminating loads and they don't care about the
+ // write barrier mode.
return lhs.base_is_tagged == rhs.base_is_tagged && lhs.offset == rhs.offset &&
lhs.machine_type == rhs.machine_type;
}
@@ -95,6 +102,9 @@ bool operator!=(FieldAccess const& lhs, FieldAccess const& rhs) {
size_t hash_value(FieldAccess const& access) {
+ // On purpose we don't include the write barrier kind here, as this method is
+ // really only relevant for eliminating loads and they don't care about the
+ // write barrier mode.
return base::hash_combine(access.base_is_tagged, access.offset,
access.machine_type);
}
@@ -110,12 +120,24 @@ std::ostream& operator<<(std::ostream& os, FieldAccess const& access) {
}
#endif
access.type->PrintTo(os);
- os << ", " << access.machine_type << "]";
+ os << ", " << access.machine_type << ", " << access.write_barrier_kind << "]";
return os;
}
+template <>
+void Operator1<FieldAccess>::PrintParameter(std::ostream& os,
+ PrintVerbosity verbose) const {
+ if (verbose == PrintVerbosity::kVerbose) {
+ os << parameter();
+ } else {
+ os << "[+" << parameter().offset << "]";
+ }
+}
bool operator==(ElementAccess const& lhs, ElementAccess const& rhs) {
+ // On purpose we don't include the write barrier kind here, as this method is
+ // really only relevant for eliminating loads and they don't care about the
+ // write barrier mode.
return lhs.base_is_tagged == rhs.base_is_tagged &&
lhs.header_size == rhs.header_size &&
lhs.machine_type == rhs.machine_type;
@@ -128,6 +150,9 @@ bool operator!=(ElementAccess const& lhs, ElementAccess const& rhs) {
size_t hash_value(ElementAccess const& access) {
+ // On purpose we don't include the write barrier kind here, as this method is
+ // really only relevant for eliminating loads and they don't care about the
+ // write barrier mode.
return base::hash_combine(access.base_is_tagged, access.header_size,
access.machine_type);
}
@@ -136,7 +161,7 @@ size_t hash_value(ElementAccess const& access) {
std::ostream& operator<<(std::ostream& os, ElementAccess const& access) {
os << access.base_is_tagged << ", " << access.header_size << ", ";
access.type->PrintTo(os);
- os << ", " << access.machine_type;
+ os << ", " << access.machine_type << ", " << access.write_barrier_kind;
return os;
}
@@ -156,87 +181,420 @@ const ElementAccess& ElementAccessOf(const Operator* op) {
return OpParameter<ElementAccess>(op);
}
-#define PURE_OP_LIST(V) \
- V(BooleanNot, Operator::kNoProperties, 1) \
- V(BooleanToNumber, Operator::kNoProperties, 1) \
- V(NumberEqual, Operator::kCommutative, 2) \
- V(NumberLessThan, Operator::kNoProperties, 2) \
- V(NumberLessThanOrEqual, Operator::kNoProperties, 2) \
- V(NumberAdd, Operator::kCommutative, 2) \
- V(NumberSubtract, Operator::kNoProperties, 2) \
- V(NumberMultiply, Operator::kCommutative, 2) \
- V(NumberDivide, Operator::kNoProperties, 2) \
- V(NumberModulus, Operator::kNoProperties, 2) \
- V(NumberBitwiseOr, Operator::kCommutative, 2) \
- V(NumberBitwiseXor, Operator::kCommutative, 2) \
- V(NumberBitwiseAnd, Operator::kCommutative, 2) \
- V(NumberShiftLeft, Operator::kNoProperties, 2) \
- V(NumberShiftRight, Operator::kNoProperties, 2) \
- V(NumberShiftRightLogical, Operator::kNoProperties, 2) \
- V(NumberImul, Operator::kNoProperties, 2) \
- V(NumberClz32, Operator::kNoProperties, 1) \
- V(NumberCeil, Operator::kNoProperties, 1) \
- V(NumberFloor, Operator::kNoProperties, 1) \
- V(NumberRound, Operator::kNoProperties, 1) \
- V(NumberTrunc, Operator::kNoProperties, 1) \
- V(NumberToInt32, Operator::kNoProperties, 1) \
- V(NumberToUint32, Operator::kNoProperties, 1) \
- V(NumberIsHoleNaN, Operator::kNoProperties, 1) \
- V(PlainPrimitiveToNumber, Operator::kNoProperties, 1) \
- V(StringToNumber, Operator::kNoProperties, 1) \
- V(ChangeTaggedToInt32, Operator::kNoProperties, 1) \
- V(ChangeTaggedToUint32, Operator::kNoProperties, 1) \
- V(ChangeTaggedToFloat64, Operator::kNoProperties, 1) \
- V(ChangeInt32ToTagged, Operator::kNoProperties, 1) \
- V(ChangeUint32ToTagged, Operator::kNoProperties, 1) \
- V(ChangeFloat64ToTagged, Operator::kNoProperties, 1) \
- V(ChangeBoolToBit, Operator::kNoProperties, 1) \
- V(ChangeBitToBool, Operator::kNoProperties, 1) \
- V(ObjectIsNumber, Operator::kNoProperties, 1) \
- V(ObjectIsReceiver, Operator::kNoProperties, 1) \
- V(ObjectIsSmi, Operator::kNoProperties, 1) \
- V(ObjectIsUndetectable, Operator::kNoProperties, 1)
-
-#define NO_THROW_OP_LIST(V) \
- V(StringEqual, Operator::kCommutative, 2) \
- V(StringLessThan, Operator::kNoThrow, 2) \
- V(StringLessThanOrEqual, Operator::kNoThrow, 2)
+ExternalArrayType ExternalArrayTypeOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kLoadTypedElement ||
+ op->opcode() == IrOpcode::kStoreTypedElement);
+ return OpParameter<ExternalArrayType>(op);
+}
+
+size_t hash_value(CheckFloat64HoleMode mode) {
+ return static_cast<size_t>(mode);
+}
+
+std::ostream& operator<<(std::ostream& os, CheckFloat64HoleMode mode) {
+ switch (mode) {
+ case CheckFloat64HoleMode::kAllowReturnHole:
+ return os << "allow-return-hole";
+ case CheckFloat64HoleMode::kNeverReturnHole:
+ return os << "never-return-hole";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+CheckFloat64HoleMode CheckFloat64HoleModeOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kCheckFloat64Hole, op->opcode());
+ return OpParameter<CheckFloat64HoleMode>(op);
+}
+
+CheckForMinusZeroMode CheckMinusZeroModeOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kChangeFloat64ToTagged ||
+ op->opcode() == IrOpcode::kCheckedInt32Mul ||
+ op->opcode() == IrOpcode::kCheckedFloat64ToInt32 ||
+ op->opcode() == IrOpcode::kCheckedTaggedToInt32);
+ return OpParameter<CheckForMinusZeroMode>(op);
+}
+
+size_t hash_value(CheckForMinusZeroMode mode) {
+ return static_cast<size_t>(mode);
+}
+
+std::ostream& operator<<(std::ostream& os, CheckForMinusZeroMode mode) {
+ switch (mode) {
+ case CheckForMinusZeroMode::kCheckForMinusZero:
+ return os << "check-for-minus-zero";
+ case CheckForMinusZeroMode::kDontCheckForMinusZero:
+ return os << "dont-check-for-minus-zero";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+size_t hash_value(CheckTaggedInputMode mode) {
+ return static_cast<size_t>(mode);
+}
+
+std::ostream& operator<<(std::ostream& os, CheckTaggedInputMode mode) {
+ switch (mode) {
+ case CheckTaggedInputMode::kNumber:
+ return os << "Number";
+ case CheckTaggedInputMode::kNumberOrOddball:
+ return os << "NumberOrOddball";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+CheckTaggedInputMode CheckTaggedInputModeOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kCheckedTaggedToFloat64, op->opcode());
+ return OpParameter<CheckTaggedInputMode>(op);
+}
+
+std::ostream& operator<<(std::ostream& os, GrowFastElementsFlags flags) {
+ bool empty = true;
+ if (flags & GrowFastElementsFlag::kArrayObject) {
+ os << "ArrayObject";
+ empty = false;
+ }
+ if (flags & GrowFastElementsFlag::kDoubleElements) {
+ if (!empty) os << "|";
+ os << "DoubleElements";
+ empty = false;
+ }
+ if (flags & GrowFastElementsFlag::kHoleyElements) {
+ if (!empty) os << "|";
+ os << "HoleyElements";
+ empty = false;
+ }
+ if (empty) os << "None";
+ return os;
+}
+
+GrowFastElementsFlags GrowFastElementsFlagsOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kMaybeGrowFastElements, op->opcode());
+ return OpParameter<GrowFastElementsFlags>(op);
+}
+
+size_t hash_value(ElementsTransition transition) {
+ return static_cast<uint8_t>(transition);
+}
+
+std::ostream& operator<<(std::ostream& os, ElementsTransition transition) {
+ switch (transition) {
+ case ElementsTransition::kFastTransition:
+ return os << "fast-transition";
+ case ElementsTransition::kSlowTransition:
+ return os << "slow-transition";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+ElementsTransition ElementsTransitionOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kTransitionElementsKind, op->opcode());
+ return OpParameter<ElementsTransition>(op);
+}
+
+std::ostream& operator<<(std::ostream& os, NumberOperationHint hint) {
+ switch (hint) {
+ case NumberOperationHint::kSignedSmall:
+ return os << "SignedSmall";
+ case NumberOperationHint::kSigned32:
+ return os << "Signed32";
+ case NumberOperationHint::kNumber:
+ return os << "Number";
+ case NumberOperationHint::kNumberOrOddball:
+ return os << "NumberOrOddball";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+size_t hash_value(NumberOperationHint hint) {
+ return static_cast<uint8_t>(hint);
+}
+
+NumberOperationHint NumberOperationHintOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kSpeculativeNumberAdd ||
+ op->opcode() == IrOpcode::kSpeculativeNumberSubtract ||
+ op->opcode() == IrOpcode::kSpeculativeNumberMultiply ||
+ op->opcode() == IrOpcode::kSpeculativeNumberDivide ||
+ op->opcode() == IrOpcode::kSpeculativeNumberModulus ||
+ op->opcode() == IrOpcode::kSpeculativeNumberShiftLeft ||
+ op->opcode() == IrOpcode::kSpeculativeNumberShiftRight ||
+ op->opcode() == IrOpcode::kSpeculativeNumberShiftRightLogical ||
+ op->opcode() == IrOpcode::kSpeculativeNumberBitwiseAnd ||
+ op->opcode() == IrOpcode::kSpeculativeNumberBitwiseOr ||
+ op->opcode() == IrOpcode::kSpeculativeNumberBitwiseXor ||
+ op->opcode() == IrOpcode::kSpeculativeNumberEqual ||
+ op->opcode() == IrOpcode::kSpeculativeNumberLessThan ||
+ op->opcode() == IrOpcode::kSpeculativeNumberLessThanOrEqual);
+ return OpParameter<NumberOperationHint>(op);
+}
+
+#define PURE_OP_LIST(V) \
+ V(BooleanNot, Operator::kNoProperties, 1, 0) \
+ V(NumberEqual, Operator::kCommutative, 2, 0) \
+ V(NumberLessThan, Operator::kNoProperties, 2, 0) \
+ V(NumberLessThanOrEqual, Operator::kNoProperties, 2, 0) \
+ V(NumberAdd, Operator::kCommutative, 2, 0) \
+ V(NumberSubtract, Operator::kNoProperties, 2, 0) \
+ V(NumberMultiply, Operator::kCommutative, 2, 0) \
+ V(NumberDivide, Operator::kNoProperties, 2, 0) \
+ V(NumberModulus, Operator::kNoProperties, 2, 0) \
+ V(NumberBitwiseOr, Operator::kCommutative, 2, 0) \
+ V(NumberBitwiseXor, Operator::kCommutative, 2, 0) \
+ V(NumberBitwiseAnd, Operator::kCommutative, 2, 0) \
+ V(NumberShiftLeft, Operator::kNoProperties, 2, 0) \
+ V(NumberShiftRight, Operator::kNoProperties, 2, 0) \
+ V(NumberShiftRightLogical, Operator::kNoProperties, 2, 0) \
+ V(NumberImul, Operator::kCommutative, 2, 0) \
+ V(NumberAbs, Operator::kNoProperties, 1, 0) \
+ V(NumberClz32, Operator::kNoProperties, 1, 0) \
+ V(NumberCeil, Operator::kNoProperties, 1, 0) \
+ V(NumberFloor, Operator::kNoProperties, 1, 0) \
+ V(NumberFround, Operator::kNoProperties, 1, 0) \
+ V(NumberAcos, Operator::kNoProperties, 1, 0) \
+ V(NumberAcosh, Operator::kNoProperties, 1, 0) \
+ V(NumberAsin, Operator::kNoProperties, 1, 0) \
+ V(NumberAsinh, Operator::kNoProperties, 1, 0) \
+ V(NumberAtan, Operator::kNoProperties, 1, 0) \
+ V(NumberAtan2, Operator::kNoProperties, 2, 0) \
+ V(NumberAtanh, Operator::kNoProperties, 1, 0) \
+ V(NumberCbrt, Operator::kNoProperties, 1, 0) \
+ V(NumberCos, Operator::kNoProperties, 1, 0) \
+ V(NumberCosh, Operator::kNoProperties, 1, 0) \
+ V(NumberExp, Operator::kNoProperties, 1, 0) \
+ V(NumberExpm1, Operator::kNoProperties, 1, 0) \
+ V(NumberLog, Operator::kNoProperties, 1, 0) \
+ V(NumberLog1p, Operator::kNoProperties, 1, 0) \
+ V(NumberLog10, Operator::kNoProperties, 1, 0) \
+ V(NumberLog2, Operator::kNoProperties, 1, 0) \
+ V(NumberMax, Operator::kNoProperties, 2, 0) \
+ V(NumberMin, Operator::kNoProperties, 2, 0) \
+ V(NumberPow, Operator::kNoProperties, 2, 0) \
+ V(NumberRound, Operator::kNoProperties, 1, 0) \
+ V(NumberSign, Operator::kNoProperties, 1, 0) \
+ V(NumberSin, Operator::kNoProperties, 1, 0) \
+ V(NumberSinh, Operator::kNoProperties, 1, 0) \
+ V(NumberSqrt, Operator::kNoProperties, 1, 0) \
+ V(NumberTan, Operator::kNoProperties, 1, 0) \
+ V(NumberTanh, Operator::kNoProperties, 1, 0) \
+ V(NumberTrunc, Operator::kNoProperties, 1, 0) \
+ V(NumberToInt32, Operator::kNoProperties, 1, 0) \
+ V(NumberToUint32, Operator::kNoProperties, 1, 0) \
+ V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \
+ V(StringCharCodeAt, Operator::kNoProperties, 2, 1) \
+ V(StringFromCharCode, Operator::kNoProperties, 1, 0) \
+ V(PlainPrimitiveToNumber, Operator::kNoProperties, 1, 0) \
+ V(PlainPrimitiveToWord32, Operator::kNoProperties, 1, 0) \
+ V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedSignedToInt32, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToInt32, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToUint32, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToFloat64, Operator::kNoProperties, 1, 0) \
+ V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1, 0) \
+ V(ChangeInt32ToTagged, Operator::kNoProperties, 1, 0) \
+ V(ChangeUint32ToTagged, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToBit, Operator::kNoProperties, 1, 0) \
+ V(ChangeBitToTagged, Operator::kNoProperties, 1, 0) \
+ V(TruncateTaggedToWord32, Operator::kNoProperties, 1, 0) \
+ V(TruncateTaggedToFloat64, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsCallable, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsNumber, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsReceiver, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsSmi, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsString, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsUndetectable, Operator::kNoProperties, 1, 0) \
+ V(ConvertTaggedHoleToUndefined, Operator::kNoProperties, 1, 0) \
+ V(ReferenceEqual, Operator::kCommutative, 2, 0) \
+ V(StringEqual, Operator::kCommutative, 2, 0) \
+ V(StringLessThan, Operator::kNoProperties, 2, 0) \
+ V(StringLessThanOrEqual, Operator::kNoProperties, 2, 0)
+
+#define SPECULATIVE_NUMBER_BINOP_LIST(V) \
+ SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(V) \
+ V(SpeculativeNumberEqual) \
+ V(SpeculativeNumberLessThan) \
+ V(SpeculativeNumberLessThanOrEqual)
+
+#define CHECKED_OP_LIST(V) \
+ V(CheckBounds, 2, 1) \
+ V(CheckIf, 1, 0) \
+ V(CheckNumber, 1, 1) \
+ V(CheckString, 1, 1) \
+ V(CheckTaggedHole, 1, 1) \
+ V(CheckTaggedPointer, 1, 1) \
+ V(CheckTaggedSigned, 1, 1) \
+ V(CheckedInt32Add, 2, 1) \
+ V(CheckedInt32Sub, 2, 1) \
+ V(CheckedInt32Div, 2, 1) \
+ V(CheckedInt32Mod, 2, 1) \
+ V(CheckedUint32Div, 2, 1) \
+ V(CheckedUint32Mod, 2, 1) \
+ V(CheckedUint32ToInt32, 1, 1) \
+ V(CheckedTaggedSignedToInt32, 1, 1) \
+ V(CheckedTruncateTaggedToWord32, 1, 1)
struct SimplifiedOperatorGlobalCache final {
-#define PURE(Name, properties, input_count) \
+#define PURE(Name, properties, value_input_count, control_input_count) \
struct Name##Operator final : public Operator { \
Name##Operator() \
: Operator(IrOpcode::k##Name, Operator::kPure | properties, #Name, \
- input_count, 0, 0, 1, 0, 0) {} \
+ value_input_count, 0, control_input_count, 1, 0, 0) {} \
}; \
Name##Operator k##Name;
PURE_OP_LIST(PURE)
#undef PURE
-#define NO_THROW(Name, properties, input_count) \
- struct Name##Operator final : public Operator { \
- Name##Operator() \
- : Operator(IrOpcode::k##Name, Operator::kNoThrow | properties, #Name, \
- input_count, 1, 1, 1, 1, 0) {} \
- }; \
+#define CHECKED(Name, value_input_count, value_output_count) \
+ struct Name##Operator final : public Operator { \
+ Name##Operator() \
+ : Operator(IrOpcode::k##Name, \
+ Operator::kFoldable | Operator::kNoThrow, #Name, \
+ value_input_count, 1, 1, value_output_count, 1, 0) {} \
+ }; \
Name##Operator k##Name;
- NO_THROW_OP_LIST(NO_THROW)
-#undef NO_THROW
+ CHECKED_OP_LIST(CHECKED)
+#undef CHECKED
+
+ template <CheckForMinusZeroMode kMode>
+ struct ChangeFloat64ToTaggedOperator final
+ : public Operator1<CheckForMinusZeroMode> {
+ ChangeFloat64ToTaggedOperator()
+ : Operator1<CheckForMinusZeroMode>(
+ IrOpcode::kChangeFloat64ToTagged, Operator::kPure,
+ "ChangeFloat64ToTagged", 1, 0, 0, 1, 0, 0, kMode) {}
+ };
+ ChangeFloat64ToTaggedOperator<CheckForMinusZeroMode::kCheckForMinusZero>
+ kChangeFloat64ToTaggedCheckForMinusZeroOperator;
+ ChangeFloat64ToTaggedOperator<CheckForMinusZeroMode::kDontCheckForMinusZero>
+ kChangeFloat64ToTaggedDontCheckForMinusZeroOperator;
+
+ template <CheckForMinusZeroMode kMode>
+ struct CheckedInt32MulOperator final
+ : public Operator1<CheckForMinusZeroMode> {
+ CheckedInt32MulOperator()
+ : Operator1<CheckForMinusZeroMode>(
+ IrOpcode::kCheckedInt32Mul,
+ Operator::kFoldable | Operator::kNoThrow, "CheckedInt32Mul", 2, 1,
+ 1, 1, 1, 0, kMode) {}
+ };
+ CheckedInt32MulOperator<CheckForMinusZeroMode::kCheckForMinusZero>
+ kCheckedInt32MulCheckForMinusZeroOperator;
+ CheckedInt32MulOperator<CheckForMinusZeroMode::kDontCheckForMinusZero>
+ kCheckedInt32MulDontCheckForMinusZeroOperator;
+
+ template <CheckForMinusZeroMode kMode>
+ struct CheckedFloat64ToInt32Operator final
+ : public Operator1<CheckForMinusZeroMode> {
+ CheckedFloat64ToInt32Operator()
+ : Operator1<CheckForMinusZeroMode>(
+ IrOpcode::kCheckedFloat64ToInt32,
+ Operator::kFoldable | Operator::kNoThrow, "CheckedFloat64ToInt32",
+ 1, 1, 1, 1, 1, 0, kMode) {}
+ };
+ CheckedFloat64ToInt32Operator<CheckForMinusZeroMode::kCheckForMinusZero>
+ kCheckedFloat64ToInt32CheckForMinusZeroOperator;
+ CheckedFloat64ToInt32Operator<CheckForMinusZeroMode::kDontCheckForMinusZero>
+ kCheckedFloat64ToInt32DontCheckForMinusZeroOperator;
+
+ template <CheckForMinusZeroMode kMode>
+ struct CheckedTaggedToInt32Operator final
+ : public Operator1<CheckForMinusZeroMode> {
+ CheckedTaggedToInt32Operator()
+ : Operator1<CheckForMinusZeroMode>(
+ IrOpcode::kCheckedTaggedToInt32,
+ Operator::kFoldable | Operator::kNoThrow, "CheckedTaggedToInt32",
+ 1, 1, 1, 1, 1, 0, kMode) {}
+ };
+ CheckedTaggedToInt32Operator<CheckForMinusZeroMode::kCheckForMinusZero>
+ kCheckedTaggedToInt32CheckForMinusZeroOperator;
+ CheckedTaggedToInt32Operator<CheckForMinusZeroMode::kDontCheckForMinusZero>
+ kCheckedTaggedToInt32DontCheckForMinusZeroOperator;
+
+ template <CheckTaggedInputMode kMode>
+ struct CheckedTaggedToFloat64Operator final
+ : public Operator1<CheckTaggedInputMode> {
+ CheckedTaggedToFloat64Operator()
+ : Operator1<CheckTaggedInputMode>(
+ IrOpcode::kCheckedTaggedToFloat64,
+ Operator::kFoldable | Operator::kNoThrow,
+ "CheckedTaggedToFloat64", 1, 1, 1, 1, 1, 0, kMode) {}
+ };
+ CheckedTaggedToFloat64Operator<CheckTaggedInputMode::kNumber>
+ kCheckedTaggedToFloat64NumberOperator;
+ CheckedTaggedToFloat64Operator<CheckTaggedInputMode::kNumberOrOddball>
+ kCheckedTaggedToFloat64NumberOrOddballOperator;
+
+ template <CheckFloat64HoleMode kMode>
+ struct CheckFloat64HoleNaNOperator final
+ : public Operator1<CheckFloat64HoleMode> {
+ CheckFloat64HoleNaNOperator()
+ : Operator1<CheckFloat64HoleMode>(
+ IrOpcode::kCheckFloat64Hole,
+ Operator::kFoldable | Operator::kNoThrow, "CheckFloat64Hole", 1,
+ 1, 1, 1, 1, 0, kMode) {}
+ };
+ CheckFloat64HoleNaNOperator<CheckFloat64HoleMode::kAllowReturnHole>
+ kCheckFloat64HoleAllowReturnHoleOperator;
+ CheckFloat64HoleNaNOperator<CheckFloat64HoleMode::kNeverReturnHole>
+ kCheckFloat64HoleNeverReturnHoleOperator;
+
+ template <PretenureFlag kPretenure>
+ struct AllocateOperator final : public Operator1<PretenureFlag> {
+ AllocateOperator()
+ : Operator1<PretenureFlag>(
+ IrOpcode::kAllocate,
+ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,
+ "Allocate", 1, 1, 1, 1, 1, 0, kPretenure) {}
+ };
+ AllocateOperator<NOT_TENURED> kAllocateNotTenuredOperator;
+ AllocateOperator<TENURED> kAllocateTenuredOperator;
+
+ struct EnsureWritableFastElementsOperator final : public Operator {
+ EnsureWritableFastElementsOperator()
+ : Operator( // --
+ IrOpcode::kEnsureWritableFastElements, // opcode
+ Operator::kNoDeopt | Operator::kNoThrow, // flags
+ "EnsureWritableFastElements", // name
+ 2, 1, 1, 1, 1, 0) {} // counts
+ };
+ EnsureWritableFastElementsOperator kEnsureWritableFastElements;
+
+#define SPECULATIVE_NUMBER_BINOP(Name) \
+ template <NumberOperationHint kHint> \
+ struct Name##Operator final : public Operator1<NumberOperationHint> { \
+ Name##Operator() \
+ : Operator1<NumberOperationHint>( \
+ IrOpcode::k##Name, Operator::kFoldable | Operator::kNoThrow, \
+ #Name, 2, 1, 1, 1, 1, 0, kHint) {} \
+ }; \
+ Name##Operator<NumberOperationHint::kSignedSmall> \
+ k##Name##SignedSmallOperator; \
+ Name##Operator<NumberOperationHint::kSigned32> k##Name##Signed32Operator; \
+ Name##Operator<NumberOperationHint::kNumber> k##Name##NumberOperator; \
+ Name##Operator<NumberOperationHint::kNumberOrOddball> \
+ k##Name##NumberOrOddballOperator;
+ SPECULATIVE_NUMBER_BINOP_LIST(SPECULATIVE_NUMBER_BINOP)
+#undef SPECULATIVE_NUMBER_BINOP
#define BUFFER_ACCESS(Type, type, TYPE, ctype, size) \
struct LoadBuffer##Type##Operator final : public Operator1<BufferAccess> { \
LoadBuffer##Type##Operator() \
- : Operator1<BufferAccess>(IrOpcode::kLoadBuffer, \
- Operator::kNoThrow | Operator::kNoWrite, \
- "LoadBuffer", 3, 1, 1, 1, 1, 0, \
- BufferAccess(kExternal##Type##Array)) {} \
+ : Operator1<BufferAccess>( \
+ IrOpcode::kLoadBuffer, \
+ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
+ "LoadBuffer", 3, 1, 1, 1, 1, 0, \
+ BufferAccess(kExternal##Type##Array)) {} \
}; \
struct StoreBuffer##Type##Operator final : public Operator1<BufferAccess> { \
StoreBuffer##Type##Operator() \
- : Operator1<BufferAccess>(IrOpcode::kStoreBuffer, \
- Operator::kNoRead | Operator::kNoThrow, \
- "StoreBuffer", 4, 1, 1, 0, 1, 0, \
- BufferAccess(kExternal##Type##Array)) {} \
+ : Operator1<BufferAccess>( \
+ IrOpcode::kStoreBuffer, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "StoreBuffer", 4, 1, 1, 0, 1, 0, \
+ BufferAccess(kExternal##Type##Array)) {} \
}; \
LoadBuffer##Type##Operator kLoadBuffer##Type; \
StoreBuffer##Type##Operator kStoreBuffer##Type;
@@ -252,25 +610,129 @@ static base::LazyInstance<SimplifiedOperatorGlobalCache>::type kCache =
SimplifiedOperatorBuilder::SimplifiedOperatorBuilder(Zone* zone)
: cache_(kCache.Get()), zone_(zone) {}
-
-#define GET_FROM_CACHE(Name, properties, input_count) \
+#define GET_FROM_CACHE(Name, ...) \
const Operator* SimplifiedOperatorBuilder::Name() { return &cache_.k##Name; }
PURE_OP_LIST(GET_FROM_CACHE)
-NO_THROW_OP_LIST(GET_FROM_CACHE)
+CHECKED_OP_LIST(GET_FROM_CACHE)
#undef GET_FROM_CACHE
+const Operator* SimplifiedOperatorBuilder::ChangeFloat64ToTagged(
+ CheckForMinusZeroMode mode) {
+ switch (mode) {
+ case CheckForMinusZeroMode::kCheckForMinusZero:
+ return &cache_.kChangeFloat64ToTaggedCheckForMinusZeroOperator;
+ case CheckForMinusZeroMode::kDontCheckForMinusZero:
+ return &cache_.kChangeFloat64ToTaggedDontCheckForMinusZeroOperator;
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
+const Operator* SimplifiedOperatorBuilder::CheckedInt32Mul(
+ CheckForMinusZeroMode mode) {
+ switch (mode) {
+ case CheckForMinusZeroMode::kCheckForMinusZero:
+ return &cache_.kCheckedInt32MulCheckForMinusZeroOperator;
+ case CheckForMinusZeroMode::kDontCheckForMinusZero:
+ return &cache_.kCheckedInt32MulDontCheckForMinusZeroOperator;
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
+const Operator* SimplifiedOperatorBuilder::CheckedFloat64ToInt32(
+ CheckForMinusZeroMode mode) {
+ switch (mode) {
+ case CheckForMinusZeroMode::kCheckForMinusZero:
+ return &cache_.kCheckedFloat64ToInt32CheckForMinusZeroOperator;
+ case CheckForMinusZeroMode::kDontCheckForMinusZero:
+ return &cache_.kCheckedFloat64ToInt32DontCheckForMinusZeroOperator;
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
+const Operator* SimplifiedOperatorBuilder::CheckedTaggedToInt32(
+ CheckForMinusZeroMode mode) {
+ switch (mode) {
+ case CheckForMinusZeroMode::kCheckForMinusZero:
+ return &cache_.kCheckedTaggedToInt32CheckForMinusZeroOperator;
+ case CheckForMinusZeroMode::kDontCheckForMinusZero:
+ return &cache_.kCheckedTaggedToInt32DontCheckForMinusZeroOperator;
+ }
+ UNREACHABLE();
+ return nullptr;
+}
-const Operator* SimplifiedOperatorBuilder::ReferenceEqual(Type* type) {
- return new (zone()) Operator(IrOpcode::kReferenceEqual,
- Operator::kCommutative | Operator::kPure,
- "ReferenceEqual", 2, 0, 0, 1, 0, 0);
+const Operator* SimplifiedOperatorBuilder::CheckedTaggedToFloat64(
+ CheckTaggedInputMode mode) {
+ switch (mode) {
+ case CheckTaggedInputMode::kNumber:
+ return &cache_.kCheckedTaggedToFloat64NumberOperator;
+ case CheckTaggedInputMode::kNumberOrOddball:
+ return &cache_.kCheckedTaggedToFloat64NumberOrOddballOperator;
+ }
+ UNREACHABLE();
+ return nullptr;
}
+const Operator* SimplifiedOperatorBuilder::CheckMaps(int map_input_count) {
+ // TODO(bmeurer): Cache the most important versions of this operator.
+ DCHECK_LT(0, map_input_count);
+ int const value_input_count = 1 + map_input_count;
+ return new (zone()) Operator1<int>( // --
+ IrOpcode::kCheckMaps, // opcode
+ Operator::kNoThrow | Operator::kNoWrite, // flags
+ "CheckMaps", // name
+ value_input_count, 1, 1, 0, 1, 0, // counts
+ map_input_count); // parameter
+}
+
+const Operator* SimplifiedOperatorBuilder::CheckFloat64Hole(
+ CheckFloat64HoleMode mode) {
+ switch (mode) {
+ case CheckFloat64HoleMode::kAllowReturnHole:
+ return &cache_.kCheckFloat64HoleAllowReturnHoleOperator;
+ case CheckFloat64HoleMode::kNeverReturnHole:
+ return &cache_.kCheckFloat64HoleNeverReturnHoleOperator;
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
+const Operator* SimplifiedOperatorBuilder::EnsureWritableFastElements() {
+ return &cache_.kEnsureWritableFastElements;
+}
+
+const Operator* SimplifiedOperatorBuilder::MaybeGrowFastElements(
+ GrowFastElementsFlags flags) {
+ return new (zone()) Operator1<GrowFastElementsFlags>( // --
+ IrOpcode::kMaybeGrowFastElements, // opcode
+ Operator::kNoThrow, // flags
+ "MaybeGrowFastElements", // name
+ 4, 1, 1, 1, 1, 0, // counts
+ flags); // parameter
+}
+
+const Operator* SimplifiedOperatorBuilder::TransitionElementsKind(
+ ElementsTransition transition) {
+ return new (zone()) Operator1<ElementsTransition>( // --
+ IrOpcode::kTransitionElementsKind, // opcode
+ Operator::kNoDeopt | Operator::kNoThrow, // flags
+ "TransitionElementsKind", // name
+ 3, 1, 1, 0, 1, 0, // counts
+ transition); // parameter
+}
const Operator* SimplifiedOperatorBuilder::Allocate(PretenureFlag pretenure) {
- return new (zone())
- Operator1<PretenureFlag>(IrOpcode::kAllocate, Operator::kNoThrow,
- "Allocate", 1, 1, 1, 1, 1, 0, pretenure);
+ switch (pretenure) {
+ case NOT_TENURED:
+ return &cache_.kAllocateNotTenuredOperator;
+ case TENURED:
+ return &cache_.kAllocateTenuredOperator;
+ }
+ UNREACHABLE();
+ return nullptr;
}
@@ -299,19 +761,38 @@ const Operator* SimplifiedOperatorBuilder::StoreBuffer(BufferAccess access) {
return nullptr;
}
+#define SPECULATIVE_NUMBER_BINOP(Name) \
+ const Operator* SimplifiedOperatorBuilder::Name(NumberOperationHint hint) { \
+ switch (hint) { \
+ case NumberOperationHint::kSignedSmall: \
+ return &cache_.k##Name##SignedSmallOperator; \
+ case NumberOperationHint::kSigned32: \
+ return &cache_.k##Name##Signed32Operator; \
+ case NumberOperationHint::kNumber: \
+ return &cache_.k##Name##NumberOperator; \
+ case NumberOperationHint::kNumberOrOddball: \
+ return &cache_.k##Name##NumberOrOddballOperator; \
+ } \
+ UNREACHABLE(); \
+ return nullptr; \
+ }
+SPECULATIVE_NUMBER_BINOP_LIST(SPECULATIVE_NUMBER_BINOP)
+#undef SPECULATIVE_NUMBER_BINOP
-#define ACCESS_OP_LIST(V) \
- V(LoadField, FieldAccess, Operator::kNoWrite, 1, 1, 1) \
- V(StoreField, FieldAccess, Operator::kNoRead, 2, 1, 0) \
- V(LoadElement, ElementAccess, Operator::kNoWrite, 2, 1, 1) \
- V(StoreElement, ElementAccess, Operator::kNoRead, 3, 1, 0)
-
+#define ACCESS_OP_LIST(V) \
+ V(LoadField, FieldAccess, Operator::kNoWrite, 1, 1, 1) \
+ V(StoreField, FieldAccess, Operator::kNoRead, 2, 1, 0) \
+ V(LoadElement, ElementAccess, Operator::kNoWrite, 2, 1, 1) \
+ V(StoreElement, ElementAccess, Operator::kNoRead, 3, 1, 0) \
+ V(LoadTypedElement, ExternalArrayType, Operator::kNoWrite, 4, 1, 1) \
+ V(StoreTypedElement, ExternalArrayType, Operator::kNoRead, 5, 1, 0)
#define ACCESS(Name, Type, properties, value_input_count, control_input_count, \
output_count) \
const Operator* SimplifiedOperatorBuilder::Name(const Type& access) { \
return new (zone()) \
- Operator1<Type>(IrOpcode::k##Name, Operator::kNoThrow | properties, \
+ Operator1<Type>(IrOpcode::k##Name, \
+ Operator::kNoDeopt | Operator::kNoThrow | properties, \
#Name, value_input_count, 1, control_input_count, \
output_count, 1, 0, access); \
}
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index a39d864914..5e7fa75827 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -7,6 +7,7 @@
#include <iosfwd>
+#include "src/compiler/operator.h"
#include "src/handles.h"
#include "src/machine-type.h"
#include "src/objects.h"
@@ -25,8 +26,9 @@ namespace compiler {
class Operator;
struct SimplifiedOperatorGlobalCache;
+enum BaseTaggedness : uint8_t { kUntaggedBase, kTaggedBase };
-enum BaseTaggedness { kUntaggedBase, kTaggedBase };
+size_t hash_value(BaseTaggedness);
std::ostream& operator<<(std::ostream&, BaseTaggedness);
@@ -63,6 +65,7 @@ struct FieldAccess {
MaybeHandle<Name> name; // debugging only.
Type* type; // type of the field.
MachineType machine_type; // machine type of the field.
+ WriteBarrierKind write_barrier_kind; // write barrier hint.
int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
};
@@ -76,6 +79,9 @@ std::ostream& operator<<(std::ostream&, FieldAccess const&);
FieldAccess const& FieldAccessOf(const Operator* op) WARN_UNUSED_RESULT;
+template <>
+void Operator1<FieldAccess>::PrintParameter(std::ostream& os,
+ PrintVerbosity verbose) const;
// An access descriptor for loads/stores of indexed structures like characters
// in strings or off-heap backing stores. Accesses from either tagged or
@@ -86,6 +92,7 @@ struct ElementAccess {
int header_size; // size of the header, without tag.
Type* type; // type of the element.
MachineType machine_type; // machine type of the element.
+ WriteBarrierKind write_barrier_kind; // write barrier hint.
int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
};
@@ -99,6 +106,83 @@ std::ostream& operator<<(std::ostream&, ElementAccess const&);
ElementAccess const& ElementAccessOf(const Operator* op) WARN_UNUSED_RESULT;
+ExternalArrayType ExternalArrayTypeOf(const Operator* op) WARN_UNUSED_RESULT;
+
+enum class CheckFloat64HoleMode : uint8_t {
+ kNeverReturnHole, // Never return the hole (deoptimize instead).
+ kAllowReturnHole // Allow to return the hole (signaling NaN).
+};
+
+size_t hash_value(CheckFloat64HoleMode);
+
+std::ostream& operator<<(std::ostream&, CheckFloat64HoleMode);
+
+CheckFloat64HoleMode CheckFloat64HoleModeOf(const Operator*) WARN_UNUSED_RESULT;
+
+enum class CheckTaggedInputMode : uint8_t {
+ kNumber,
+ kNumberOrOddball,
+};
+
+size_t hash_value(CheckTaggedInputMode);
+
+std::ostream& operator<<(std::ostream&, CheckTaggedInputMode);
+
+CheckTaggedInputMode CheckTaggedInputModeOf(const Operator*) WARN_UNUSED_RESULT;
+
+enum class CheckForMinusZeroMode : uint8_t {
+ kCheckForMinusZero,
+ kDontCheckForMinusZero,
+};
+
+size_t hash_value(CheckForMinusZeroMode);
+
+std::ostream& operator<<(std::ostream&, CheckForMinusZeroMode);
+
+CheckForMinusZeroMode CheckMinusZeroModeOf(const Operator*) WARN_UNUSED_RESULT;
+
+// A descriptor for growing elements backing stores.
+enum class GrowFastElementsFlag : uint8_t {
+ kNone = 0u,
+ kArrayObject = 1u << 0, // Update JSArray::length field.
+ kHoleyElements = 1u << 1, // Backing store is holey.
+ kDoubleElements = 1u << 2, // Backing store contains doubles.
+};
+typedef base::Flags<GrowFastElementsFlag> GrowFastElementsFlags;
+
+DEFINE_OPERATORS_FOR_FLAGS(GrowFastElementsFlags)
+
+std::ostream& operator<<(std::ostream&, GrowFastElementsFlags);
+
+GrowFastElementsFlags GrowFastElementsFlagsOf(const Operator*)
+ WARN_UNUSED_RESULT;
+
+// A descriptor for elements kind transitions.
+enum class ElementsTransition : uint8_t {
+ kFastTransition, // simple transition, just updating the map.
+ kSlowTransition // full transition, round-trip to the runtime.
+};
+
+size_t hash_value(ElementsTransition);
+
+std::ostream& operator<<(std::ostream&, ElementsTransition);
+
+ElementsTransition ElementsTransitionOf(const Operator* op) WARN_UNUSED_RESULT;
+
+// A hint for speculative number operations.
+enum class NumberOperationHint : uint8_t {
+ kSignedSmall, // Inputs were always Smi so far, output was in Smi range.
+ kSigned32, // Inputs and output were Signed32 so far.
+ kNumber, // Inputs were Number, output was Number.
+ kNumberOrOddball, // Inputs were Number or Oddball, output was Number.
+};
+
+size_t hash_value(NumberOperationHint);
+
+std::ostream& operator<<(std::ostream&, NumberOperationHint);
+
+NumberOperationHint NumberOperationHintOf(const Operator* op)
+ WARN_UNUSED_RESULT;
// Interface for building simplified operators, which represent the
// medium-level operations of V8, including adding numbers, allocating objects,
@@ -127,7 +211,6 @@ class SimplifiedOperatorBuilder final : public ZoneObject {
explicit SimplifiedOperatorBuilder(Zone* zone);
const Operator* BooleanNot();
- const Operator* BooleanToNumber();
const Operator* NumberEqual();
const Operator* NumberLessThan();
@@ -144,38 +227,126 @@ class SimplifiedOperatorBuilder final : public ZoneObject {
const Operator* NumberShiftRight();
const Operator* NumberShiftRightLogical();
const Operator* NumberImul();
+ const Operator* NumberAbs();
const Operator* NumberClz32();
const Operator* NumberCeil();
const Operator* NumberFloor();
+ const Operator* NumberFround();
+ const Operator* NumberAcos();
+ const Operator* NumberAcosh();
+ const Operator* NumberAsin();
+ const Operator* NumberAsinh();
+ const Operator* NumberAtan();
+ const Operator* NumberAtan2();
+ const Operator* NumberAtanh();
+ const Operator* NumberCbrt();
+ const Operator* NumberCos();
+ const Operator* NumberCosh();
+ const Operator* NumberExp();
+ const Operator* NumberExpm1();
+ const Operator* NumberLog();
+ const Operator* NumberLog1p();
+ const Operator* NumberLog10();
+ const Operator* NumberLog2();
+ const Operator* NumberMax();
+ const Operator* NumberMin();
+ const Operator* NumberPow();
const Operator* NumberRound();
+ const Operator* NumberSign();
+ const Operator* NumberSin();
+ const Operator* NumberSinh();
+ const Operator* NumberSqrt();
+ const Operator* NumberTan();
+ const Operator* NumberTanh();
const Operator* NumberTrunc();
const Operator* NumberToInt32();
const Operator* NumberToUint32();
- const Operator* NumberIsHoleNaN();
- const Operator* PlainPrimitiveToNumber();
+ const Operator* NumberSilenceNaN();
- const Operator* ReferenceEqual(Type* type);
+ const Operator* SpeculativeNumberAdd(NumberOperationHint hint);
+ const Operator* SpeculativeNumberSubtract(NumberOperationHint hint);
+ const Operator* SpeculativeNumberMultiply(NumberOperationHint hint);
+ const Operator* SpeculativeNumberDivide(NumberOperationHint hint);
+ const Operator* SpeculativeNumberModulus(NumberOperationHint hint);
+ const Operator* SpeculativeNumberShiftLeft(NumberOperationHint hint);
+ const Operator* SpeculativeNumberShiftRight(NumberOperationHint hint);
+ const Operator* SpeculativeNumberShiftRightLogical(NumberOperationHint hint);
+ const Operator* SpeculativeNumberBitwiseAnd(NumberOperationHint hint);
+ const Operator* SpeculativeNumberBitwiseOr(NumberOperationHint hint);
+ const Operator* SpeculativeNumberBitwiseXor(NumberOperationHint hint);
+
+ const Operator* SpeculativeNumberLessThan(NumberOperationHint hint);
+ const Operator* SpeculativeNumberLessThanOrEqual(NumberOperationHint hint);
+ const Operator* SpeculativeNumberEqual(NumberOperationHint hint);
+
+ const Operator* ReferenceEqual();
const Operator* StringEqual();
const Operator* StringLessThan();
const Operator* StringLessThanOrEqual();
- const Operator* StringToNumber();
+ const Operator* StringCharCodeAt();
+ const Operator* StringFromCharCode();
+ const Operator* PlainPrimitiveToNumber();
+ const Operator* PlainPrimitiveToWord32();
+ const Operator* PlainPrimitiveToFloat64();
+
+ const Operator* ChangeTaggedSignedToInt32();
const Operator* ChangeTaggedToInt32();
const Operator* ChangeTaggedToUint32();
const Operator* ChangeTaggedToFloat64();
+ const Operator* ChangeInt31ToTaggedSigned();
const Operator* ChangeInt32ToTagged();
const Operator* ChangeUint32ToTagged();
- const Operator* ChangeFloat64ToTagged();
- const Operator* ChangeBoolToBit();
- const Operator* ChangeBitToBool();
-
+ const Operator* ChangeFloat64ToTagged(CheckForMinusZeroMode);
+ const Operator* ChangeTaggedToBit();
+ const Operator* ChangeBitToTagged();
+ const Operator* TruncateTaggedToWord32();
+ const Operator* TruncateTaggedToFloat64();
+
+ const Operator* CheckIf();
+ const Operator* CheckBounds();
+ const Operator* CheckMaps(int map_input_count);
+ const Operator* CheckNumber();
+ const Operator* CheckString();
+ const Operator* CheckTaggedPointer();
+ const Operator* CheckTaggedSigned();
+
+ const Operator* CheckedInt32Add();
+ const Operator* CheckedInt32Sub();
+ const Operator* CheckedInt32Div();
+ const Operator* CheckedInt32Mod();
+ const Operator* CheckedUint32Div();
+ const Operator* CheckedUint32Mod();
+ const Operator* CheckedInt32Mul(CheckForMinusZeroMode);
+ const Operator* CheckedUint32ToInt32();
+ const Operator* CheckedFloat64ToInt32(CheckForMinusZeroMode);
+ const Operator* CheckedTaggedSignedToInt32();
+ const Operator* CheckedTaggedToInt32(CheckForMinusZeroMode);
+ const Operator* CheckedTaggedToFloat64(CheckTaggedInputMode);
+ const Operator* CheckedTruncateTaggedToWord32();
+
+ const Operator* CheckFloat64Hole(CheckFloat64HoleMode);
+ const Operator* CheckTaggedHole();
+ const Operator* ConvertTaggedHoleToUndefined();
+
+ const Operator* ObjectIsCallable();
const Operator* ObjectIsNumber();
const Operator* ObjectIsReceiver();
const Operator* ObjectIsSmi();
+ const Operator* ObjectIsString();
const Operator* ObjectIsUndetectable();
+ // ensure-writable-fast-elements object, elements
+ const Operator* EnsureWritableFastElements();
+
+ // maybe-grow-fast-elements object, elements, index, length
+ const Operator* MaybeGrowFastElements(GrowFastElementsFlags flags);
+
+ // transition-elements-kind object, from-map, to-map
+ const Operator* TransitionElementsKind(ElementsTransition transition);
+
const Operator* Allocate(PretenureFlag pretenure = NOT_TENURED);
const Operator* LoadField(FieldAccess const&);
@@ -187,12 +358,18 @@ class SimplifiedOperatorBuilder final : public ZoneObject {
// store-buffer buffer, offset, length, value
const Operator* StoreBuffer(BufferAccess);
- // load-element [base + index], length
+ // load-element [base + index]
const Operator* LoadElement(ElementAccess const&);
- // store-element [base + index], length, value
+ // store-element [base + index], value
const Operator* StoreElement(ElementAccess const&);
+ // load-typed-element buffer, [base + external + index]
+ const Operator* LoadTypedElement(ExternalArrayType const&);
+
+ // store-typed-element buffer, [base + external + index], value
+ const Operator* StoreTypedElement(ExternalArrayType const&);
+
private:
Zone* zone() const { return zone_; }
diff --git a/deps/v8/src/compiler/source-position.cc b/deps/v8/src/compiler/source-position.cc
index 48361ecac7..80f180076d 100644
--- a/deps/v8/src/compiler/source-position.cc
+++ b/deps/v8/src/compiler/source-position.cc
@@ -16,7 +16,8 @@ class SourcePositionTable::Decorator final : public GraphDecorator {
: source_positions_(source_positions) {}
void Decorate(Node* node) final {
- source_positions_->table_.Set(node, source_positions_->current_position_);
+ source_positions_->SetSourcePosition(node,
+ source_positions_->current_position_);
}
private:
@@ -49,6 +50,10 @@ SourcePosition SourcePositionTable::GetSourcePosition(Node* node) const {
return table_.Get(node);
}
+void SourcePositionTable::SetSourcePosition(Node* node,
+ SourcePosition position) {
+ table_.Set(node, position);
+}
void SourcePositionTable::Print(std::ostream& os) const {
os << "{";
diff --git a/deps/v8/src/compiler/source-position.h b/deps/v8/src/compiler/source-position.h
index 81db1d2e3e..d4df7835ef 100644
--- a/deps/v8/src/compiler/source-position.h
+++ b/deps/v8/src/compiler/source-position.h
@@ -5,8 +5,8 @@
#ifndef V8_COMPILER_SOURCE_POSITION_H_
#define V8_COMPILER_SOURCE_POSITION_H_
-#include "src/assembler.h"
#include "src/compiler/node-aux-data.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -25,7 +25,7 @@ class SourcePosition final {
int raw() const { return raw_; }
private:
- static const int kUnknownPosition = RelocInfo::kNoPosition;
+ static const int kUnknownPosition = kNoSourcePosition;
int raw_;
};
@@ -38,8 +38,7 @@ inline bool operator!=(const SourcePosition& lhs, const SourcePosition& rhs) {
return !(lhs == rhs);
}
-
-class SourcePositionTable final {
+class SourcePositionTable final : public ZoneObject {
public:
class Scope final {
public:
@@ -66,14 +65,12 @@ class SourcePositionTable final {
};
explicit SourcePositionTable(Graph* graph);
- ~SourcePositionTable() {
- if (decorator_) RemoveDecorator();
- }
void AddDecorator();
void RemoveDecorator();
SourcePosition GetSourcePosition(Node* node) const;
+ void SetSourcePosition(Node* node, SourcePosition position);
void Print(std::ostream& os) const;
diff --git a/deps/v8/src/compiler/store-store-elimination.cc b/deps/v8/src/compiler/store-store-elimination.cc
new file mode 100644
index 0000000000..98904b05b5
--- /dev/null
+++ b/deps/v8/src/compiler/store-store-elimination.cc
@@ -0,0 +1,570 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <iterator>
+
+#include "src/compiler/store-store-elimination.h"
+
+#include "src/compiler/all-nodes.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define TRACE(fmt, ...) \
+ do { \
+ if (FLAG_trace_store_elimination) { \
+ PrintF("RedundantStoreFinder: " fmt "\n", ##__VA_ARGS__); \
+ } \
+ } while (false)
+
+// CHECK_EXTRA is like CHECK, but has two or more arguments: a boolean
+// expression, a format string, and any number of extra arguments. The boolean
+// expression will be evaluated at runtime. If it evaluates to false, then an
+// error message will be shown containing the condition, as well as the extra
+// info formatted like with printf.
+#define CHECK_EXTRA(condition, fmt, ...) \
+ do { \
+ if (V8_UNLIKELY(!(condition))) { \
+ V8_Fatal(__FILE__, __LINE__, "Check failed: %s. Extra info: " fmt, \
+ #condition, ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+#ifdef DEBUG
+#define DCHECK_EXTRA(condition, fmt, ...) \
+ CHECK_EXTRA(condition, fmt, ##__VA_ARGS__)
+#else
+#define DCHECK_EXTRA(condition, fmt, ...) ((void)0)
+#endif
+
+// Store-store elimination.
+//
+// The aim of this optimization is to detect the following pattern in the
+// effect graph:
+//
+// - StoreField[+24, kRepTagged](263, ...)
+//
+// ... lots of nodes from which the field at offset 24 of the object
+// returned by node #263 cannot be observed ...
+//
+// - StoreField[+24, kRepTagged](263, ...)
+//
+// In such situations, the earlier StoreField cannot be observed, and can be
+// eliminated. This optimization should work for any offset and input node, of
+// course.
+//
+// The optimization also works across splits. It currently does not work for
+// loops, because we tend to put a stack check in loops, and like deopts,
+// stack checks can observe anything.
+
+// Assumption: every byte of a JS object is only ever accessed through one
+// offset. For instance, byte 15 of a given object may be accessed using a
+// two-byte read at offset 14, or a four-byte read at offset 12, but never
+// both in the same program.
+//
+// This implementation needs all dead nodes removed from the graph, and the
+// graph should be trimmed.
+
+namespace {
+
+// 16 bits was chosen fairly arbitrarily; it seems enough now. 8 bits is too
+// few.
+typedef uint16_t StoreOffset;
+
+struct UnobservableStore {
+ NodeId id_;
+ StoreOffset offset_;
+
+ bool operator==(const UnobservableStore) const;
+ bool operator!=(const UnobservableStore) const;
+ bool operator<(const UnobservableStore) const;
+};
+
+} // namespace
+
+namespace {
+
+// Instances of UnobservablesSet are immutable. They represent either a set of
+// UnobservableStores, or the "unvisited empty set".
+//
+// We apply some sharing to save memory. The class UnobservablesSet is only a
+// pointer wide, and a copy does not use any heap (or temp_zone) memory. Most
+// changes to an UnobservablesSet might allocate in the temp_zone.
+//
+// The size of an instance should be the size of a pointer, plus additional
+// space in the zone in the case of non-unvisited UnobservablesSets. Copying
+// an UnobservablesSet allocates no memory.
+class UnobservablesSet final {
+ public:
+ static UnobservablesSet Unvisited();
+ static UnobservablesSet VisitedEmpty(Zone* zone);
+ UnobservablesSet(); // unvisited
+ UnobservablesSet(const UnobservablesSet& other) : set_(other.set_) {}
+
+ UnobservablesSet Intersect(UnobservablesSet other, Zone* zone) const;
+ UnobservablesSet Add(UnobservableStore obs, Zone* zone) const;
+ UnobservablesSet RemoveSameOffset(StoreOffset off, Zone* zone) const;
+
+ const ZoneSet<UnobservableStore>* set() const { return set_; }
+
+ bool IsUnvisited() const { return set_ == nullptr; }
+ bool IsEmpty() const { return set_ == nullptr || set_->empty(); }
+ bool Contains(UnobservableStore obs) const {
+ return set_ != nullptr && (set_->find(obs) != set_->end());
+ }
+
+ bool operator==(const UnobservablesSet&) const;
+ bool operator!=(const UnobservablesSet&) const;
+
+ private:
+ explicit UnobservablesSet(const ZoneSet<UnobservableStore>* set)
+ : set_(set) {}
+ const ZoneSet<UnobservableStore>* set_;
+};
+
+} // namespace
+
+namespace {
+
+class RedundantStoreFinder final {
+ public:
+ RedundantStoreFinder(JSGraph* js_graph, Zone* temp_zone);
+
+ void Find();
+
+ const ZoneSet<Node*>& to_remove_const() { return to_remove_; }
+
+ void Visit(Node* node);
+
+ private:
+ static bool IsEffectful(Node* node);
+ void VisitEffectfulNode(Node* node);
+ UnobservablesSet RecomputeUseIntersection(Node* node);
+ UnobservablesSet RecomputeSet(Node* node, UnobservablesSet uses);
+ static bool CannotObserveStoreField(Node* node);
+
+ void MarkForRevisit(Node* node);
+ bool HasBeenVisited(Node* node);
+
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Zone* temp_zone() const { return temp_zone_; }
+ ZoneVector<UnobservablesSet>& unobservable() { return unobservable_; }
+ UnobservablesSet& unobservable_for_id(NodeId id) {
+ DCHECK_LT(id, unobservable().size());
+ return unobservable()[id];
+ }
+ ZoneSet<Node*>& to_remove() { return to_remove_; }
+
+ JSGraph* const jsgraph_;
+ Zone* const temp_zone_;
+
+ ZoneStack<Node*> revisit_;
+ ZoneVector<bool> in_revisit_;
+ // Maps node IDs to UnobservableNodeSets.
+ ZoneVector<UnobservablesSet> unobservable_;
+ ZoneSet<Node*> to_remove_;
+ const UnobservablesSet unobservables_visited_empty_;
+};
+
+// To safely cast an offset from a FieldAccess, which has a wider range
+// (namely int).
+StoreOffset ToOffset(int offset) {
+ CHECK(0 <= offset && offset < (1 << 8 * sizeof(StoreOffset)));
+ return (StoreOffset)offset;
+}
+
+StoreOffset ToOffset(const FieldAccess& access) {
+ return ToOffset(access.offset);
+}
+
+unsigned int RepSizeOf(MachineRepresentation rep) {
+ return 1u << ElementSizeLog2Of(rep);
+}
+unsigned int RepSizeOf(FieldAccess access) {
+ return RepSizeOf(access.machine_type.representation());
+}
+
+bool AtMostTagged(FieldAccess access) {
+ return RepSizeOf(access) <= RepSizeOf(MachineRepresentation::kTagged);
+}
+
+bool AtLeastTagged(FieldAccess access) {
+ return RepSizeOf(access) >= RepSizeOf(MachineRepresentation::kTagged);
+}
+
+} // namespace
+
+void RedundantStoreFinder::Find() {
+ Visit(jsgraph()->graph()->end());
+
+ while (!revisit_.empty()) {
+ Node* next = revisit_.top();
+ revisit_.pop();
+ DCHECK_LT(next->id(), in_revisit_.size());
+ in_revisit_[next->id()] = false;
+ Visit(next);
+ }
+
+#ifdef DEBUG
+ // Check that we visited all the StoreFields
+ AllNodes all(temp_zone(), jsgraph()->graph());
+ for (Node* node : all.reachable) {
+ if (node->op()->opcode() == IrOpcode::kStoreField) {
+ DCHECK_EXTRA(HasBeenVisited(node), "#%d:%s", node->id(),
+ node->op()->mnemonic());
+ }
+ }
+#endif
+}
+
+void RedundantStoreFinder::MarkForRevisit(Node* node) {
+ DCHECK_LT(node->id(), in_revisit_.size());
+ if (!in_revisit_[node->id()]) {
+ revisit_.push(node);
+ in_revisit_[node->id()] = true;
+ }
+}
+
+bool RedundantStoreFinder::HasBeenVisited(Node* node) {
+ return !unobservable_for_id(node->id()).IsUnvisited();
+}
+
+void StoreStoreElimination::Run(JSGraph* js_graph, Zone* temp_zone) {
+ // Find superfluous nodes
+ RedundantStoreFinder finder(js_graph, temp_zone);
+ finder.Find();
+
+ // Remove superfluous nodes
+
+ for (Node* node : finder.to_remove_const()) {
+ if (FLAG_trace_store_elimination) {
+ PrintF("StoreStoreElimination::Run: Eliminating node #%d:%s\n",
+ node->id(), node->op()->mnemonic());
+ }
+ Node* previous_effect = NodeProperties::GetEffectInput(node);
+ NodeProperties::ReplaceUses(node, nullptr, previous_effect, nullptr,
+ nullptr);
+ node->Kill();
+ }
+}
+
+bool RedundantStoreFinder::IsEffectful(Node* node) {
+ return (node->op()->EffectInputCount() >= 1);
+}
+
+// Recompute unobservables-set for a node. Will also mark superfluous nodes
+// as to be removed.
+
+UnobservablesSet RedundantStoreFinder::RecomputeSet(Node* node,
+ UnobservablesSet uses) {
+ switch (node->op()->opcode()) {
+ case IrOpcode::kStoreField: {
+ Node* stored_to = node->InputAt(0);
+ FieldAccess access = OpParameter<FieldAccess>(node->op());
+ StoreOffset offset = ToOffset(access);
+
+ UnobservableStore observation = {stored_to->id(), offset};
+ bool isNotObservable = uses.Contains(observation);
+
+ if (isNotObservable && AtMostTagged(access)) {
+ TRACE(" #%d is StoreField[+%d,%s](#%d), unobservable", node->id(),
+ offset, MachineReprToString(access.machine_type.representation()),
+ stored_to->id());
+ to_remove().insert(node);
+ return uses;
+ } else if (isNotObservable && !AtMostTagged(access)) {
+ TRACE(
+ " #%d is StoreField[+%d,%s](#%d), repeated in future but too "
+ "big to optimize away",
+ node->id(), offset,
+ MachineReprToString(access.machine_type.representation()),
+ stored_to->id());
+ return uses;
+ } else if (!isNotObservable && AtLeastTagged(access)) {
+ TRACE(" #%d is StoreField[+%d,%s](#%d), observable, recording in set",
+ node->id(), offset,
+ MachineReprToString(access.machine_type.representation()),
+ stored_to->id());
+ return uses.Add(observation, temp_zone());
+ } else if (!isNotObservable && !AtLeastTagged(access)) {
+ TRACE(
+ " #%d is StoreField[+%d,%s](#%d), observable but too small to "
+ "record",
+ node->id(), offset,
+ MachineReprToString(access.machine_type.representation()),
+ stored_to->id());
+ return uses;
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+ case IrOpcode::kLoadField: {
+ Node* loaded_from = node->InputAt(0);
+ FieldAccess access = OpParameter<FieldAccess>(node->op());
+ StoreOffset offset = ToOffset(access);
+
+ TRACE(
+ " #%d is LoadField[+%d,%s](#%d), removing all offsets [+%d] from "
+ "set",
+ node->id(), offset,
+ MachineReprToString(access.machine_type.representation()),
+ loaded_from->id(), offset);
+
+ return uses.RemoveSameOffset(offset, temp_zone());
+ break;
+ }
+ default:
+ if (CannotObserveStoreField(node)) {
+ TRACE(" #%d:%s can observe nothing, set stays unchanged", node->id(),
+ node->op()->mnemonic());
+ return uses;
+ } else {
+ TRACE(" #%d:%s might observe anything, recording empty set",
+ node->id(), node->op()->mnemonic());
+ return unobservables_visited_empty_;
+ }
+ }
+ UNREACHABLE();
+ return UnobservablesSet::Unvisited();
+}
+
+bool RedundantStoreFinder::CannotObserveStoreField(Node* node) {
+ return node->opcode() == IrOpcode::kCheckedLoad ||
+ node->opcode() == IrOpcode::kLoadElement ||
+ node->opcode() == IrOpcode::kLoad ||
+ node->opcode() == IrOpcode::kStore ||
+ node->opcode() == IrOpcode::kEffectPhi ||
+ node->opcode() == IrOpcode::kStoreElement ||
+ node->opcode() == IrOpcode::kCheckedStore ||
+ node->opcode() == IrOpcode::kUnsafePointerAdd ||
+ node->opcode() == IrOpcode::kRetain;
+}
+
+// Initialize unobservable_ with js_graph->graph->NodeCount() empty sets.
+RedundantStoreFinder::RedundantStoreFinder(JSGraph* js_graph, Zone* temp_zone)
+ : jsgraph_(js_graph),
+ temp_zone_(temp_zone),
+ revisit_(temp_zone),
+ in_revisit_(js_graph->graph()->NodeCount(), temp_zone),
+ unobservable_(js_graph->graph()->NodeCount(),
+ UnobservablesSet::Unvisited(), temp_zone),
+ to_remove_(temp_zone),
+ unobservables_visited_empty_(UnobservablesSet::VisitedEmpty(temp_zone)) {}
+
+void RedundantStoreFinder::Visit(Node* node) {
+ // All effectful nodes should be reachable from End via a sequence of
+ // control, then a sequence of effect edges. In VisitEffectfulNode we mark
+ // all effect inputs for revisiting (if they might have stale state); here
+ // we mark all control inputs at least once.
+
+ if (!HasBeenVisited(node)) {
+ for (int i = 0; i < node->op()->ControlInputCount(); i++) {
+ Node* control_input = NodeProperties::GetControlInput(node, i);
+ if (!HasBeenVisited(control_input)) {
+ MarkForRevisit(control_input);
+ }
+ }
+ }
+
+ bool isEffectful = (node->op()->EffectInputCount() >= 1);
+ if (isEffectful) {
+ VisitEffectfulNode(node);
+ DCHECK(HasBeenVisited(node));
+ }
+
+ if (!HasBeenVisited(node)) {
+ // Mark as visited.
+ unobservable_for_id(node->id()) = unobservables_visited_empty_;
+ }
+}
+
+void RedundantStoreFinder::VisitEffectfulNode(Node* node) {
+ if (HasBeenVisited(node)) {
+ TRACE("- Revisiting: #%d:%s", node->id(), node->op()->mnemonic());
+ }
+ UnobservablesSet after_set = RecomputeUseIntersection(node);
+ UnobservablesSet before_set = RecomputeSet(node, after_set);
+ DCHECK(!before_set.IsUnvisited());
+
+ UnobservablesSet stored_for_node = unobservable_for_id(node->id());
+ bool cur_set_changed =
+ (stored_for_node.IsUnvisited() || stored_for_node != before_set);
+ if (!cur_set_changed) {
+ // We will not be able to update the part of this chain above any more.
+ // Exit.
+ TRACE("+ No change: stabilized. Not visiting effect inputs.");
+ } else {
+ unobservable_for_id(node->id()) = before_set;
+
+ // Mark effect inputs for visiting.
+ for (int i = 0; i < node->op()->EffectInputCount(); i++) {
+ Node* input = NodeProperties::GetEffectInput(node, i);
+ if (!HasBeenVisited(input)) {
+ TRACE(" marking #%d:%s for revisit", input->id(),
+ input->op()->mnemonic());
+ MarkForRevisit(input);
+ }
+ }
+ }
+}
+
+// Compute the intersection of the UnobservablesSets of all effect uses and
+// return it. This function only works if {node} has an effect use.
+//
+// The result UnobservablesSet will always be visited.
+UnobservablesSet RedundantStoreFinder::RecomputeUseIntersection(Node* node) {
+ // {first} == true indicates that we haven't looked at any elements yet.
+ // {first} == false indicates that cur_set is the intersection of at least one
+ // thing.
+
+ bool first = true;
+ UnobservablesSet cur_set = UnobservablesSet::Unvisited(); // irrelevant
+
+ for (Edge edge : node->use_edges()) {
+ // Skip non-effect edges
+ if (!NodeProperties::IsEffectEdge(edge)) {
+ continue;
+ }
+
+ Node* use = edge.from();
+ UnobservablesSet new_set = unobservable_for_id(use->id());
+ // Include new_set in the intersection.
+ if (first) {
+ // Intersection of a one-element set is that one element
+ first = false;
+ cur_set = new_set;
+ } else {
+ // Take the intersection of cur_set and new_set.
+ cur_set = cur_set.Intersect(new_set, temp_zone());
+ }
+ }
+
+ if (first) {
+ // There were no effect uses.
+ auto opcode = node->op()->opcode();
+ // List of opcodes that may end this effect chain. The opcodes are not
+ // important to the soundness of this optimization; this serves as a
+ // general sanity check. Add opcodes to this list as it suits you.
+ //
+ // Everything is observable after these opcodes; return the empty set.
+ DCHECK_EXTRA(
+ opcode == IrOpcode::kReturn || opcode == IrOpcode::kTerminate ||
+ opcode == IrOpcode::kDeoptimize || opcode == IrOpcode::kThrow,
+ "for #%d:%s", node->id(), node->op()->mnemonic());
+ USE(opcode); // silence warning about unused variable in release mode
+
+ return unobservables_visited_empty_;
+ } else {
+ if (cur_set.IsUnvisited()) {
+ cur_set = unobservables_visited_empty_;
+ }
+
+ return cur_set;
+ }
+}
+
+UnobservablesSet UnobservablesSet::Unvisited() { return UnobservablesSet(); }
+
+UnobservablesSet::UnobservablesSet() : set_(nullptr) {}
+
+UnobservablesSet UnobservablesSet::VisitedEmpty(Zone* zone) {
+ // Create a new empty UnobservablesSet. This allocates in the zone, and
+ // can probably be optimized to use a global singleton.
+ ZoneSet<UnobservableStore>* empty_set =
+ new (zone->New(sizeof(ZoneSet<UnobservableStore>)))
+ ZoneSet<UnobservableStore>(zone);
+ return UnobservablesSet(empty_set);
+}
+
+// Computes the intersection of two UnobservablesSets. May return
+// UnobservablesSet::Unvisited() instead of an empty UnobservablesSet for
+// speed.
+UnobservablesSet UnobservablesSet::Intersect(UnobservablesSet other,
+ Zone* zone) const {
+ if (IsEmpty() || other.IsEmpty()) {
+ return Unvisited();
+ } else {
+ ZoneSet<UnobservableStore>* intersection =
+ new (zone->New(sizeof(ZoneSet<UnobservableStore>)))
+ ZoneSet<UnobservableStore>(zone);
+ // Put the intersection of set() and other.set() in intersection.
+ set_intersection(set()->begin(), set()->end(), other.set()->begin(),
+ other.set()->end(),
+ std::inserter(*intersection, intersection->end()));
+
+ return UnobservablesSet(intersection);
+ }
+}
+
+UnobservablesSet UnobservablesSet::Add(UnobservableStore obs,
+ Zone* zone) const {
+ bool present = (set()->find(obs) != set()->end());
+ if (present) {
+ return *this;
+ } else {
+ // Make a new empty set.
+ ZoneSet<UnobservableStore>* new_set =
+ new (zone->New(sizeof(ZoneSet<UnobservableStore>)))
+ ZoneSet<UnobservableStore>(zone);
+ // Copy the old elements over.
+ *new_set = *set();
+ // Add the new element.
+ bool inserted = new_set->insert(obs).second;
+ DCHECK(inserted);
+ USE(inserted); // silence warning about unused variable
+
+ return UnobservablesSet(new_set);
+ }
+}
+
+UnobservablesSet UnobservablesSet::RemoveSameOffset(StoreOffset offset,
+ Zone* zone) const {
+ // Make a new empty set.
+ ZoneSet<UnobservableStore>* new_set =
+ new (zone->New(sizeof(ZoneSet<UnobservableStore>)))
+ ZoneSet<UnobservableStore>(zone);
+ // Copy all elements over that have a different offset.
+ for (auto obs : *set()) {
+ if (obs.offset_ != offset) {
+ new_set->insert(obs);
+ }
+ }
+
+ return UnobservablesSet(new_set);
+}
+
+// Used for debugging.
+bool UnobservablesSet::operator==(const UnobservablesSet& other) const {
+ if (IsUnvisited() || other.IsUnvisited()) {
+ return IsEmpty() && other.IsEmpty();
+ } else {
+ // Both pointers guaranteed not to be nullptrs.
+ return *set() == *other.set();
+ }
+}
+
+bool UnobservablesSet::operator!=(const UnobservablesSet& other) const {
+ return !(*this == other);
+}
+
+bool UnobservableStore::operator==(const UnobservableStore other) const {
+ return (id_ == other.id_) && (offset_ == other.offset_);
+}
+
+bool UnobservableStore::operator!=(const UnobservableStore other) const {
+ return !(*this == other);
+}
+
+bool UnobservableStore::operator<(const UnobservableStore other) const {
+ return (id_ < other.id_) || (id_ == other.id_ && offset_ < other.offset_);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/store-store-elimination.h b/deps/v8/src/compiler/store-store-elimination.h
new file mode 100644
index 0000000000..07ae2c25d1
--- /dev/null
+++ b/deps/v8/src/compiler/store-store-elimination.h
@@ -0,0 +1,25 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_STORE_STORE_ELIMINATION_H_
+#define V8_COMPILER_STORE_STORE_ELIMINATION_H_
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/js-graph.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class StoreStoreElimination final {
+ public:
+ static void Run(JSGraph* js_graph, Zone* temp_zone);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_STORE_STORE_ELIMINATION_H_
diff --git a/deps/v8/src/compiler/tail-call-optimization.cc b/deps/v8/src/compiler/tail-call-optimization.cc
index 6635fb982b..7e1623aeca 100644
--- a/deps/v8/src/compiler/tail-call-optimization.cc
+++ b/deps/v8/src/compiler/tail-call-optimization.cc
@@ -20,7 +20,7 @@ Reduction TailCallOptimization::Reduce(Node* node) {
// other effect between the Call and the Return nodes.
Node* const call = NodeProperties::GetValueInput(node, 0);
if (call->opcode() == IrOpcode::kCall &&
- OpParameter<CallDescriptor const*>(call)->SupportsTailCalls() &&
+ CallDescriptorOf(call->op())->SupportsTailCalls() &&
NodeProperties::GetEffectInput(node) == call &&
!NodeProperties::IsExceptionalCall(call)) {
Node* const control = NodeProperties::GetControlInput(node);
@@ -71,7 +71,7 @@ Reduction TailCallOptimization::Reduce(Node* node) {
NodeProperties::GetValueInput(call, index));
}
NodeProperties::ChangeOp(
- node, common()->TailCall(OpParameter<CallDescriptor const*>(call)));
+ node, common()->TailCall(CallDescriptorOf(call->op())));
return Changed(node);
}
}
diff --git a/deps/v8/src/compiler/type-hint-analyzer.cc b/deps/v8/src/compiler/type-hint-analyzer.cc
index da4f2683a3..8e7a0f3eae 100644
--- a/deps/v8/src/compiler/type-hint-analyzer.cc
+++ b/deps/v8/src/compiler/type-hint-analyzer.cc
@@ -15,32 +15,71 @@ namespace compiler {
namespace {
-// TODO(bmeurer): This detour via types is ugly.
-BinaryOperationHints::Hint ToHint(Type* type) {
- if (type->Is(Type::None())) return BinaryOperationHints::kNone;
- if (type->Is(Type::SignedSmall())) return BinaryOperationHints::kSignedSmall;
- if (type->Is(Type::Signed32())) return BinaryOperationHints::kSigned32;
- if (type->Is(Type::Number())) return BinaryOperationHints::kNumber;
- if (type->Is(Type::String())) return BinaryOperationHints::kString;
- return BinaryOperationHints::kAny;
+BinaryOperationHint ToBinaryOperationHint(BinaryOpICState::Kind kind) {
+ switch (kind) {
+ case BinaryOpICState::NONE:
+ return BinaryOperationHint::kNone;
+ case BinaryOpICState::SMI:
+ return BinaryOperationHint::kSignedSmall;
+ case BinaryOpICState::INT32:
+ return BinaryOperationHint::kSigned32;
+ case BinaryOpICState::NUMBER:
+ return BinaryOperationHint::kNumberOrOddball;
+ case BinaryOpICState::STRING:
+ case BinaryOpICState::GENERIC:
+ return BinaryOperationHint::kAny;
+ }
+ UNREACHABLE();
+ return BinaryOperationHint::kNone;
}
-} // namespace
+CompareOperationHint ToCompareOperationHint(Token::Value op,
+ CompareICState::State state) {
+ switch (state) {
+ case CompareICState::UNINITIALIZED:
+ return CompareOperationHint::kNone;
+ case CompareICState::SMI:
+ return CompareOperationHint::kSignedSmall;
+ case CompareICState::NUMBER:
+ return Token::IsOrderedRelationalCompareOp(op)
+ ? CompareOperationHint::kNumberOrOddball
+ : CompareOperationHint::kNumber;
+ case CompareICState::STRING:
+ case CompareICState::INTERNALIZED_STRING:
+ case CompareICState::UNIQUE_NAME:
+ case CompareICState::RECEIVER:
+ case CompareICState::KNOWN_RECEIVER:
+ case CompareICState::BOOLEAN:
+ case CompareICState::GENERIC:
+ return CompareOperationHint::kAny;
+ }
+ UNREACHABLE();
+ return CompareOperationHint::kNone;
+}
+} // namespace
-bool TypeHintAnalysis::GetBinaryOperationHints(
- TypeFeedbackId id, BinaryOperationHints* hints) const {
+bool TypeHintAnalysis::GetBinaryOperationHint(TypeFeedbackId id,
+ BinaryOperationHint* hint) const {
auto i = infos_.find(id);
if (i == infos_.end()) return false;
Handle<Code> code = i->second;
DCHECK_EQ(Code::BINARY_OP_IC, code->kind());
BinaryOpICState state(code->GetIsolate(), code->extra_ic_state());
- *hints = BinaryOperationHints(ToHint(state.GetLeftType()),
- ToHint(state.GetRightType()),
- ToHint(state.GetResultType()));
+ *hint = ToBinaryOperationHint(state.kind());
return true;
}
+bool TypeHintAnalysis::GetCompareOperationHint(
+ TypeFeedbackId id, CompareOperationHint* hint) const {
+ auto i = infos_.find(id);
+ if (i == infos_.end()) return false;
+ Handle<Code> code = i->second;
+ DCHECK_EQ(Code::COMPARE_IC, code->kind());
+ CompareICStub stub(code->stub_key(), code->GetIsolate());
+ *hint = ToCompareOperationHint(stub.op(), stub.state());
+ return true;
+}
bool TypeHintAnalysis::GetToBooleanHints(TypeFeedbackId id,
ToBooleanHints* hints) const {
@@ -67,7 +106,6 @@ bool TypeHintAnalysis::GetToBooleanHints(TypeFeedbackId id,
return true;
}
-
TypeHintAnalysis* TypeHintAnalyzer::Analyze(Handle<Code> code) {
DisallowHeapAllocation no_gc;
TypeHintAnalysis::Infos infos(zone());
@@ -79,6 +117,7 @@ TypeHintAnalysis* TypeHintAnalyzer::Analyze(Handle<Code> code) {
Code* target = Code::GetCodeFromTargetAddress(target_address);
switch (target->kind()) {
case Code::BINARY_OP_IC:
+ case Code::COMPARE_IC:
case Code::TO_BOOLEAN_IC: {
// Add this feedback to the {infos}.
TypeFeedbackId id(static_cast<unsigned>(rinfo->data()));
@@ -90,7 +129,22 @@ TypeHintAnalysis* TypeHintAnalyzer::Analyze(Handle<Code> code) {
break;
}
}
- return new (zone()) TypeHintAnalysis(infos);
+ return new (zone()) TypeHintAnalysis(infos, zone());
+}
+
+// Helper function to transform the feedback to BinaryOperationHint.
+BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback) {
+ switch (type_feedback) {
+ case BinaryOperationFeedback::kSignedSmall:
+ return BinaryOperationHint::kSignedSmall;
+ case BinaryOperationFeedback::kNumber:
+ return BinaryOperationHint::kNumberOrOddball;
+ case BinaryOperationFeedback::kAny:
+ default:
+ return BinaryOperationHint::kAny;
+ }
+ UNREACHABLE();
+ return BinaryOperationHint::kNone;
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/type-hint-analyzer.h b/deps/v8/src/compiler/type-hint-analyzer.h
index 1a79905633..e48938a976 100644
--- a/deps/v8/src/compiler/type-hint-analyzer.h
+++ b/deps/v8/src/compiler/type-hint-analyzer.h
@@ -18,14 +18,20 @@ class TypeHintAnalysis final : public ZoneObject {
public:
typedef ZoneMap<TypeFeedbackId, Handle<Code>> Infos;
- explicit TypeHintAnalysis(Infos const& infos) : infos_(infos) {}
+ explicit TypeHintAnalysis(Infos const& infos, Zone* zone)
+ : infos_(infos), zone_(zone) {}
- bool GetBinaryOperationHints(TypeFeedbackId id,
- BinaryOperationHints* hints) const;
+ bool GetBinaryOperationHint(TypeFeedbackId id,
+ BinaryOperationHint* hint) const;
+ bool GetCompareOperationHint(TypeFeedbackId id,
+ CompareOperationHint* hint) const;
bool GetToBooleanHints(TypeFeedbackId id, ToBooleanHints* hints) const;
private:
+ Zone* zone() const { return zone_; }
+
Infos const infos_;
+ Zone* zone_;
};
@@ -44,6 +50,8 @@ class TypeHintAnalyzer final {
DISALLOW_COPY_AND_ASSIGN(TypeHintAnalyzer);
};
+BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback);
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/type-hints.cc b/deps/v8/src/compiler/type-hints.cc
index 06abad6380..a07a8707b1 100644
--- a/deps/v8/src/compiler/type-hints.cc
+++ b/deps/v8/src/compiler/type-hints.cc
@@ -8,31 +8,40 @@ namespace v8 {
namespace internal {
namespace compiler {
-std::ostream& operator<<(std::ostream& os, BinaryOperationHints::Hint hint) {
+std::ostream& operator<<(std::ostream& os, BinaryOperationHint hint) {
switch (hint) {
- case BinaryOperationHints::kNone:
+ case BinaryOperationHint::kNone:
return os << "None";
- case BinaryOperationHints::kSignedSmall:
+ case BinaryOperationHint::kSignedSmall:
return os << "SignedSmall";
- case BinaryOperationHints::kSigned32:
+ case BinaryOperationHint::kSigned32:
return os << "Signed32";
- case BinaryOperationHints::kNumber:
- return os << "Number";
- case BinaryOperationHints::kString:
- return os << "String";
- case BinaryOperationHints::kAny:
+ case BinaryOperationHint::kNumberOrOddball:
+ return os << "NumberOrOddball";
+ case BinaryOperationHint::kAny:
return os << "Any";
}
UNREACHABLE();
return os;
}
-
-std::ostream& operator<<(std::ostream& os, BinaryOperationHints hints) {
- return os << hints.left() << "*" << hints.right() << "->" << hints.result();
+std::ostream& operator<<(std::ostream& os, CompareOperationHint hint) {
+ switch (hint) {
+ case CompareOperationHint::kNone:
+ return os << "None";
+ case CompareOperationHint::kSignedSmall:
+ return os << "SignedSmall";
+ case CompareOperationHint::kNumber:
+ return os << "Number";
+ case CompareOperationHint::kNumberOrOddball:
+ return os << "NumberOrOddball";
+ case CompareOperationHint::kAny:
+ return os << "Any";
+ }
+ UNREACHABLE();
+ return os;
}
-
std::ostream& operator<<(std::ostream& os, ToBooleanHint hint) {
switch (hint) {
case ToBooleanHint::kNone:
@@ -62,12 +71,11 @@ std::ostream& operator<<(std::ostream& os, ToBooleanHint hint) {
return os;
}
-
std::ostream& operator<<(std::ostream& os, ToBooleanHints hints) {
if (hints == ToBooleanHint::kAny) return os << "Any";
if (hints == ToBooleanHint::kNone) return os << "None";
bool first = true;
- for (ToBooleanHints::mask_type i = 0; i < sizeof(i) * CHAR_BIT; ++i) {
+ for (ToBooleanHints::mask_type i = 0; i < sizeof(i) * 8; ++i) {
ToBooleanHint const hint = static_cast<ToBooleanHint>(1u << i);
if (hints & hint) {
if (!first) os << "|";
diff --git a/deps/v8/src/compiler/type-hints.h b/deps/v8/src/compiler/type-hints.h
index f1cc64036c..ad94491511 100644
--- a/deps/v8/src/compiler/type-hints.h
+++ b/deps/v8/src/compiler/type-hints.h
@@ -13,45 +13,34 @@ namespace internal {
namespace compiler {
// Type hints for an binary operation.
-class BinaryOperationHints final {
- public:
- enum Hint { kNone, kSignedSmall, kSigned32, kNumber, kString, kAny };
-
- BinaryOperationHints() : BinaryOperationHints(kNone, kNone, kNone) {}
- BinaryOperationHints(Hint left, Hint right, Hint result)
- : bit_field_(LeftField::encode(left) | RightField::encode(right) |
- ResultField::encode(result)) {}
-
- static BinaryOperationHints Any() {
- return BinaryOperationHints(kAny, kAny, kAny);
- }
-
- Hint left() const { return LeftField::decode(bit_field_); }
- Hint right() const { return RightField::decode(bit_field_); }
- Hint result() const { return ResultField::decode(bit_field_); }
-
- bool operator==(BinaryOperationHints const& that) const {
- return this->bit_field_ == that.bit_field_;
- }
- bool operator!=(BinaryOperationHints const& that) const {
- return !(*this == that);
- }
-
- friend size_t hash_value(BinaryOperationHints const& hints) {
- return hints.bit_field_;
- }
-
- private:
- typedef BitField<Hint, 0, 3> LeftField;
- typedef BitField<Hint, 3, 3> RightField;
- typedef BitField<Hint, 6, 3> ResultField;
-
- uint32_t bit_field_;
+enum class BinaryOperationHint : uint8_t {
+ kNone,
+ kSignedSmall,
+ kSigned32,
+ kNumberOrOddball,
+ kAny
};
-std::ostream& operator<<(std::ostream&, BinaryOperationHints::Hint);
-std::ostream& operator<<(std::ostream&, BinaryOperationHints);
+inline size_t hash_value(BinaryOperationHint hint) {
+ return static_cast<unsigned>(hint);
+}
+std::ostream& operator<<(std::ostream&, BinaryOperationHint);
+
+// Type hints for an compare operation.
+enum class CompareOperationHint : uint8_t {
+ kNone,
+ kSignedSmall,
+ kNumber,
+ kNumberOrOddball,
+ kAny
+};
+
+inline size_t hash_value(CompareOperationHint hint) {
+ return static_cast<unsigned>(hint);
+}
+
+std::ostream& operator<<(std::ostream&, CompareOperationHint);
// Type hints for the ToBoolean type conversion.
enum class ToBooleanHint : uint16_t {
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 81c3d3d928..6733bd6aff 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -4,14 +4,17 @@
#include "src/compiler/typer.h"
+#include <iomanip>
+
#include "src/base/flags.h"
#include "src/bootstrapper.h"
-#include "src/compilation-dependencies.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-operator.h"
-#include "src/compiler/node.h"
+#include "src/compiler/loop-variable-optimizer.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operation-typer.h"
#include "src/compiler/simplified-operator.h"
#include "src/objects-inl.h"
#include "src/type-cache.h"
@@ -29,32 +32,18 @@ class Typer::Decorator final : public GraphDecorator {
Typer* const typer_;
};
-Typer::Typer(Isolate* isolate, Graph* graph, Flags flags,
- CompilationDependencies* dependencies, FunctionType* function_type)
+Typer::Typer(Isolate* isolate, Graph* graph)
: isolate_(isolate),
graph_(graph),
- flags_(flags),
- dependencies_(dependencies),
- function_type_(function_type),
decorator_(nullptr),
- cache_(TypeCache::Get()) {
+ cache_(TypeCache::Get()),
+ operation_typer_(isolate, zone()) {
Zone* zone = this->zone();
Factory* const factory = isolate->factory();
- Type* infinity = Type::Constant(factory->infinity_value(), zone);
- Type* minus_infinity = Type::Constant(factory->minus_infinity_value(), zone);
- // TODO(neis): Unfortunately, the infinities created in other places might
- // be different ones (eg the result of NewNumber in TypeNumberConstant).
- Type* truncating_to_zero =
- Type::Union(Type::Union(infinity, minus_infinity, zone),
- Type::MinusZeroOrNaN(), zone);
- DCHECK(!truncating_to_zero->Maybe(Type::Integral32()));
-
singleton_false_ = Type::Constant(factory->false_value(), zone);
singleton_true_ = Type::Constant(factory->true_value(), zone);
singleton_the_hole_ = Type::Constant(factory->the_hole_value(), zone);
- signed32ish_ = Type::Union(Type::Signed32(), truncating_to_zero, zone);
- unsigned32ish_ = Type::Union(Type::Unsigned32(), truncating_to_zero, zone);
falsish_ = Type::Union(
Type::Undetectable(),
Type::Union(Type::Union(singleton_false_, cache_.kZeroish, zone),
@@ -76,8 +65,10 @@ Typer::~Typer() {
class Typer::Visitor : public Reducer {
public:
- explicit Visitor(Typer* typer)
- : typer_(typer), weakened_nodes_(typer->zone()) {}
+ explicit Visitor(Typer* typer, LoopVariableOptimizer* induction_vars)
+ : typer_(typer),
+ induction_vars_(induction_vars),
+ weakened_nodes_(typer->zone()) {}
Reduction Reduce(Node* node) override {
if (node->op()->ValueOutputCount() == 0) return NoChange();
@@ -95,14 +86,29 @@ class Typer::Visitor : public Reducer {
DECLARE_CASE(IfException)
// VALUE_OP_LIST without JS_SIMPLE_BINOP_LIST:
COMMON_OP_LIST(DECLARE_CASE)
- SIMPLIFIED_OP_LIST(DECLARE_CASE)
+ SIMPLIFIED_COMPARE_BINOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_OTHER_OP_LIST(DECLARE_CASE)
MACHINE_OP_LIST(DECLARE_CASE)
+ MACHINE_SIMD_OP_LIST(DECLARE_CASE)
JS_SIMPLE_UNOP_LIST(DECLARE_CASE)
JS_OBJECT_OP_LIST(DECLARE_CASE)
JS_CONTEXT_OP_LIST(DECLARE_CASE)
JS_OTHER_OP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
+#define DECLARE_CASE(x) \
+ case IrOpcode::k##x: \
+ return UpdateType(node, TypeBinaryOp(node, x));
+ SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+
+#define DECLARE_CASE(x) \
+ case IrOpcode::k##x: \
+ return UpdateType(node, TypeUnaryOp(node, x));
+ SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+
#define DECLARE_CASE(x) case IrOpcode::k##x:
DECLARE_CASE(Loop)
DECLARE_CASE(Branch)
@@ -123,6 +129,8 @@ class Typer::Visitor : public Reducer {
DECLARE_CASE(OsrLoopEntry)
DECLARE_CASE(Throw)
DECLARE_CASE(End)
+ SIMPLIFIED_CHANGE_OP_LIST(DECLARE_CASE)
+ SIMPLIFIED_CHECKED_OP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
break;
}
@@ -141,14 +149,29 @@ class Typer::Visitor : public Reducer {
DECLARE_CASE(IfException)
// VALUE_OP_LIST without JS_SIMPLE_BINOP_LIST:
COMMON_OP_LIST(DECLARE_CASE)
- SIMPLIFIED_OP_LIST(DECLARE_CASE)
+ SIMPLIFIED_COMPARE_BINOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_OTHER_OP_LIST(DECLARE_CASE)
MACHINE_OP_LIST(DECLARE_CASE)
+ MACHINE_SIMD_OP_LIST(DECLARE_CASE)
JS_SIMPLE_UNOP_LIST(DECLARE_CASE)
JS_OBJECT_OP_LIST(DECLARE_CASE)
JS_CONTEXT_OP_LIST(DECLARE_CASE)
JS_OTHER_OP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
+#define DECLARE_CASE(x) \
+ case IrOpcode::k##x: \
+ return TypeBinaryOp(node, x);
+ SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+
+#define DECLARE_CASE(x) \
+ case IrOpcode::k##x: \
+ return TypeUnaryOp(node, x);
+ SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+
#define DECLARE_CASE(x) case IrOpcode::k##x:
DECLARE_CASE(Loop)
DECLARE_CASE(Branch)
@@ -169,6 +192,8 @@ class Typer::Visitor : public Reducer {
DECLARE_CASE(OsrLoopEntry)
DECLARE_CASE(Throw)
DECLARE_CASE(End)
+ SIMPLIFIED_CHANGE_OP_LIST(DECLARE_CASE)
+ SIMPLIFIED_CHECKED_OP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
break;
}
@@ -180,12 +205,18 @@ class Typer::Visitor : public Reducer {
private:
Typer* typer_;
+ LoopVariableOptimizer* induction_vars_;
ZoneSet<NodeId> weakened_nodes_;
#define DECLARE_METHOD(x) inline Type* Type##x(Node* node);
DECLARE_METHOD(Start)
DECLARE_METHOD(IfException)
- VALUE_OP_LIST(DECLARE_METHOD)
+ COMMON_OP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_COMPARE_BINOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_OTHER_OP_LIST(DECLARE_METHOD)
+ MACHINE_OP_LIST(DECLARE_METHOD)
+ MACHINE_SIMD_OP_LIST(DECLARE_METHOD)
+ JS_OP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
Type* TypeOrNone(Node* node) {
@@ -204,10 +235,6 @@ class Typer::Visitor : public Reducer {
Zone* zone() { return typer_->zone(); }
Isolate* isolate() { return typer_->isolate(); }
Graph* graph() { return typer_->graph(); }
- Typer::Flags flags() const { return typer_->flags(); }
- CompilationDependencies* dependencies() const {
- return typer_->dependencies();
- }
void SetWeakened(NodeId node_id) { weakened_nodes_.insert(node_id); }
bool IsWeakened(NodeId node_id) {
@@ -230,7 +257,6 @@ class Typer::Visitor : public Reducer {
static ComparisonOutcome Invert(ComparisonOutcome, Typer*);
static Type* Invert(Type*, Typer*);
static Type* FalsifyUndefined(ComparisonOutcome, Typer*);
- static Type* Rangify(Type*, Typer*);
static Type* ToPrimitive(Type*, Typer*);
static Type* ToBoolean(Type*, Typer*);
@@ -240,23 +266,27 @@ class Typer::Visitor : public Reducer {
static Type* ToNumber(Type*, Typer*);
static Type* ToObject(Type*, Typer*);
static Type* ToString(Type*, Typer*);
- static Type* NumberCeil(Type*, Typer*);
- static Type* NumberFloor(Type*, Typer*);
- static Type* NumberRound(Type*, Typer*);
- static Type* NumberTrunc(Type*, Typer*);
- static Type* NumberToInt32(Type*, Typer*);
- static Type* NumberToUint32(Type*, Typer*);
+#define DECLARE_METHOD(Name) \
+ static Type* Name(Type* type, Typer* t) { \
+ return t->operation_typer_.Name(type); \
+ }
+ SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_METHOD)
+#undef DECLARE_METHOD
+#define DECLARE_METHOD(Name) \
+ static Type* Name(Type* lhs, Type* rhs, Typer* t) { \
+ return t->operation_typer_.Name(lhs, rhs); \
+ }
+ SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_METHOD)
+#undef DECLARE_METHOD
+ static Type* ObjectIsCallable(Type*, Typer*);
static Type* ObjectIsNumber(Type*, Typer*);
static Type* ObjectIsReceiver(Type*, Typer*);
static Type* ObjectIsSmi(Type*, Typer*);
+ static Type* ObjectIsString(Type*, Typer*);
static Type* ObjectIsUndetectable(Type*, Typer*);
- static Type* JSAddRanger(RangeType*, RangeType*, Typer*);
- static Type* JSSubtractRanger(RangeType*, RangeType*, Typer*);
- static Type* JSDivideRanger(RangeType*, RangeType*, Typer*);
- static Type* JSModulusRanger(RangeType*, RangeType*, Typer*);
-
static ComparisonOutcome JSCompareTyper(Type*, Type*, Typer*);
#define DECLARE_METHOD(x) static Type* x##Typer(Type*, Type*, Typer*);
@@ -264,16 +294,17 @@ class Typer::Visitor : public Reducer {
#undef DECLARE_METHOD
static Type* JSTypeOfTyper(Type*, Typer*);
- static Type* JSLoadPropertyTyper(Type*, Type*, Typer*);
static Type* JSCallFunctionTyper(Type*, Typer*);
static Type* ReferenceEqualTyper(Type*, Type*, Typer*);
+ static Type* StringFromCharCodeTyper(Type*, Typer*);
Reduction UpdateType(Node* node, Type* current) {
if (NodeProperties::IsTyped(node)) {
// Widen the type of a previously typed node.
Type* previous = NodeProperties::GetType(node);
- if (node->opcode() == IrOpcode::kPhi) {
+ if (node->opcode() == IrOpcode::kPhi ||
+ node->opcode() == IrOpcode::kInductionVariablePhi) {
// Speed up termination in the presence of range types:
current = Weaken(node, current, previous);
}
@@ -294,18 +325,23 @@ class Typer::Visitor : public Reducer {
}
};
+void Typer::Run() { Run(NodeVector(zone()), nullptr); }
-void Typer::Run() { Run(NodeVector(zone())); }
-
-
-void Typer::Run(const NodeVector& roots) {
- Visitor visitor(this);
+void Typer::Run(const NodeVector& roots,
+ LoopVariableOptimizer* induction_vars) {
+ if (induction_vars != nullptr) {
+ induction_vars->ChangeToInductionVariablePhis();
+ }
+ Visitor visitor(this, induction_vars);
GraphReducer graph_reducer(zone(), graph());
graph_reducer.AddReducer(&visitor);
for (Node* const root : roots) graph_reducer.ReduceNode(root);
graph_reducer.ReduceGraph();
-}
+ if (induction_vars != nullptr) {
+ induction_vars->ChangeToPhisAndInsertGuards();
+ }
+}
void Typer::Decorator::Decorate(Node* node) {
if (node->op()->ValueOutputCount() > 0) {
@@ -313,7 +349,7 @@ void Typer::Decorator::Decorate(Node* node) {
// Other cases will generally require a proper fixpoint iteration with Run.
bool is_typed = NodeProperties::IsTyped(node);
if (is_typed || NodeProperties::AllValueInputsAreTyped(node)) {
- Visitor typing(typer_);
+ Visitor typing(typer_, nullptr);
Type* type = typing.TypeNode(node);
if (is_typed) {
type = Type::Intersect(type, NodeProperties::GetType(node),
@@ -376,27 +412,8 @@ Type* Typer::Visitor::FalsifyUndefined(ComparisonOutcome outcome, Typer* t) {
return t->singleton_true_;
}
-
-Type* Typer::Visitor::Rangify(Type* type, Typer* t) {
- if (type->IsRange()) return type; // Shortcut.
- if (!type->Is(t->cache_.kInteger)) {
- return type; // Give up on non-integer types.
- }
- double min = type->Min();
- double max = type->Max();
- // Handle the degenerate case of empty bitset types (such as
- // OtherUnsigned31 and OtherSigned32 on 64-bit architectures).
- if (std::isnan(min)) {
- DCHECK(std::isnan(max));
- return type;
- }
- return Type::Range(min, max, t->zone());
-}
-
-
// Type conversion.
-
Type* Typer::Visitor::ToPrimitive(Type* type, Typer* t) {
if (type->Is(Type::Primitive()) && !type->Maybe(Type::Receiver())) {
return type;
@@ -455,24 +472,7 @@ Type* Typer::Visitor::ToName(Type* type, Typer* t) {
// static
Type* Typer::Visitor::ToNumber(Type* type, Typer* t) {
- if (type->Is(Type::Number())) return type;
- if (type->Is(Type::NullOrUndefined())) {
- if (type->Is(Type::Null())) return t->cache_.kSingletonZero;
- if (type->Is(Type::Undefined())) return Type::NaN();
- return Type::Union(Type::NaN(), t->cache_.kSingletonZero, t->zone());
- }
- if (type->Is(Type::NumberOrUndefined())) {
- return Type::Union(Type::Intersect(type, Type::Number(), t->zone()),
- Type::NaN(), t->zone());
- }
- if (type->Is(t->singleton_false_)) return t->cache_.kSingletonZero;
- if (type->Is(t->singleton_true_)) return t->cache_.kSingletonOne;
- if (type->Is(Type::Boolean())) return t->cache_.kZeroOrOne;
- if (type->Is(Type::BooleanOrNumber())) {
- return Type::Union(Type::Intersect(type, Type::Number(), t->zone()),
- t->cache_.kZeroOrOne, t->zone());
- }
- return Type::Number();
+ return t->operation_typer_.ToNumber(type);
}
@@ -496,66 +496,13 @@ Type* Typer::Visitor::ToString(Type* type, Typer* t) {
return Type::String();
}
-// static
-Type* Typer::Visitor::NumberCeil(Type* type, Typer* t) {
- DCHECK(type->Is(Type::Number()));
- if (type->Is(t->cache_.kIntegerOrMinusZeroOrNaN)) return type;
- // TODO(bmeurer): We could infer a more precise type here.
- return t->cache_.kIntegerOrMinusZeroOrNaN;
-}
-
-// static
-Type* Typer::Visitor::NumberFloor(Type* type, Typer* t) {
- DCHECK(type->Is(Type::Number()));
- if (type->Is(t->cache_.kIntegerOrMinusZeroOrNaN)) return type;
- // TODO(bmeurer): We could infer a more precise type here.
- return t->cache_.kIntegerOrMinusZeroOrNaN;
-}
-
-// static
-Type* Typer::Visitor::NumberRound(Type* type, Typer* t) {
- DCHECK(type->Is(Type::Number()));
- if (type->Is(t->cache_.kIntegerOrMinusZeroOrNaN)) return type;
- // TODO(bmeurer): We could infer a more precise type here.
- return t->cache_.kIntegerOrMinusZeroOrNaN;
-}
-
-// static
-Type* Typer::Visitor::NumberTrunc(Type* type, Typer* t) {
- DCHECK(type->Is(Type::Number()));
- if (type->Is(t->cache_.kIntegerOrMinusZeroOrNaN)) return type;
- // TODO(bmeurer): We could infer a more precise type here.
- return t->cache_.kIntegerOrMinusZeroOrNaN;
-}
-
-Type* Typer::Visitor::NumberToInt32(Type* type, Typer* t) {
- // TODO(neis): DCHECK(type->Is(Type::Number()));
- if (type->Is(Type::Signed32())) return type;
- if (type->Is(t->cache_.kZeroish)) return t->cache_.kSingletonZero;
- if (type->Is(t->signed32ish_)) {
- return Type::Intersect(
- Type::Union(type, t->cache_.kSingletonZero, t->zone()),
- Type::Signed32(), t->zone());
- }
- return Type::Signed32();
-}
-
-
-Type* Typer::Visitor::NumberToUint32(Type* type, Typer* t) {
- // TODO(neis): DCHECK(type->Is(Type::Number()));
- if (type->Is(Type::Unsigned32())) return type;
- if (type->Is(t->cache_.kZeroish)) return t->cache_.kSingletonZero;
- if (type->Is(t->unsigned32ish_)) {
- return Type::Intersect(
- Type::Union(type, t->cache_.kSingletonZero, t->zone()),
- Type::Unsigned32(), t->zone());
- }
- return Type::Unsigned32();
-}
-
-
// Type checks.
+Type* Typer::Visitor::ObjectIsCallable(Type* type, Typer* t) {
+ if (type->Is(Type::Function())) return t->singleton_true_;
+ if (type->Is(Type::Primitive())) return t->singleton_false_;
+ return Type::Boolean();
+}
Type* Typer::Visitor::ObjectIsNumber(Type* type, Typer* t) {
if (type->Is(Type::Number())) return t->singleton_true_;
@@ -577,6 +524,11 @@ Type* Typer::Visitor::ObjectIsSmi(Type* type, Typer* t) {
return Type::Boolean();
}
+Type* Typer::Visitor::ObjectIsString(Type* type, Typer* t) {
+ if (type->Is(Type::String())) return t->singleton_true_;
+ if (!type->Maybe(Type::String())) return t->singleton_false_;
+ return Type::Boolean();
+}
Type* Typer::Visitor::ObjectIsUndetectable(Type* type, Typer* t) {
if (type->Is(Type::Undetectable())) return t->singleton_true_;
@@ -592,22 +544,13 @@ Type* Typer::Visitor::ObjectIsUndetectable(Type* type, Typer* t) {
Type* Typer::Visitor::TypeStart(Node* node) { return Type::Internal(); }
-Type* Typer::Visitor::TypeIfException(Node* node) { return Type::Any(); }
-
+Type* Typer::Visitor::TypeIfException(Node* node) {
+ return Type::NonInternal();
+}
// Common operators.
-
-Type* Typer::Visitor::TypeParameter(Node* node) {
- if (FunctionType* function_type = typer_->function_type()) {
- int const index = ParameterIndexOf(node->op());
- if (index >= 0 && index < function_type->Arity()) {
- return function_type->Parameter(index);
- }
- }
- return Type::Any();
-}
-
+Type* Typer::Visitor::TypeParameter(Node* node) { return Type::Any(); }
Type* Typer::Visitor::TypeOsrValue(Node* node) { return Type::Any(); }
@@ -624,6 +567,14 @@ Type* Typer::Visitor::TypeInt64Constant(Node* node) {
return Type::Internal(); // TODO(rossberg): Add int64 bitset type?
}
+// TODO(gdeepti) : Fix this to do something meaningful.
+Type* Typer::Visitor::TypeRelocatableInt32Constant(Node* node) {
+ return Type::Internal();
+}
+
+Type* Typer::Visitor::TypeRelocatableInt64Constant(Node* node) {
+ return Type::Internal();
+}
Type* Typer::Visitor::TypeFloat32Constant(Node* node) {
return Type::Intersect(Type::Of(OpParameter<float>(node), zone()),
@@ -661,7 +612,6 @@ Type* Typer::Visitor::TypeSelect(Node* node) {
return Type::Union(Operand(node, 1), Operand(node, 2), zone());
}
-
Type* Typer::Visitor::TypePhi(Node* node) {
int arity = node->op()->ValueInputCount();
Type* type = Operand(node, 0);
@@ -671,25 +621,144 @@ Type* Typer::Visitor::TypePhi(Node* node) {
return type;
}
+Type* Typer::Visitor::TypeInductionVariablePhi(Node* node) {
+ int arity = NodeProperties::GetControlInput(node)->op()->ControlInputCount();
+ DCHECK_EQ(IrOpcode::kLoop, NodeProperties::GetControlInput(node)->opcode());
+ DCHECK_EQ(2, NodeProperties::GetControlInput(node)->InputCount());
+
+ Type* initial_type = Operand(node, 0);
+ Type* increment_type = Operand(node, 2);
+
+ // We only handle integer induction variables (otherwise ranges
+ // do not apply and we cannot do anything).
+ if (!initial_type->Is(typer_->cache_.kInteger) ||
+ !increment_type->Is(typer_->cache_.kInteger)) {
+ // Fallback to normal phi typing.
+ Type* type = Operand(node, 0);
+ for (int i = 1; i < arity; ++i) {
+ type = Type::Union(type, Operand(node, i), zone());
+ }
+ return type;
+ }
+ // If we do not have enough type information for the initial value or
+ // the increment, just return the initial value's type.
+ if (!initial_type->IsInhabited() || !increment_type->IsInhabited()) {
+ return initial_type;
+ }
+
+ // Now process the bounds.
+ auto res = induction_vars_->induction_variables().find(node->id());
+ DCHECK(res != induction_vars_->induction_variables().end());
+ InductionVariable* induction_var = res->second;
+
+ InductionVariable::ArithmeticType arithmetic_type = induction_var->Type();
+
+ double min = -V8_INFINITY;
+ double max = V8_INFINITY;
+
+ double increment_min;
+ double increment_max;
+ if (arithmetic_type == InductionVariable::ArithmeticType::kAddition) {
+ increment_min = increment_type->Min();
+ increment_max = increment_type->Max();
+ } else {
+ DCHECK(arithmetic_type == InductionVariable::ArithmeticType::kSubtraction);
+ increment_min = -increment_type->Max();
+ increment_max = -increment_type->Min();
+ }
+
+ if (increment_min >= 0) {
+ // increasing sequence
+ min = initial_type->Min();
+ for (auto bound : induction_var->upper_bounds()) {
+ Type* bound_type = TypeOrNone(bound.bound);
+ // If the type is not an integer, just skip the bound.
+ if (!bound_type->Is(typer_->cache_.kInteger)) continue;
+ // If the type is not inhabited, then we can take the initial value.
+ if (!bound_type->IsInhabited()) {
+ max = initial_type->Max();
+ break;
+ }
+ double bound_max = bound_type->Max();
+ if (bound.kind == InductionVariable::kStrict) {
+ bound_max -= 1;
+ }
+ max = std::min(max, bound_max + increment_max);
+ }
+ // The upper bound must be at least the initial value's upper bound.
+ max = std::max(max, initial_type->Max());
+ } else if (increment_max <= 0) {
+ // decreasing sequence
+ max = initial_type->Max();
+ for (auto bound : induction_var->lower_bounds()) {
+ Type* bound_type = TypeOrNone(bound.bound);
+ // If the type is not an integer, just skip the bound.
+ if (!bound_type->Is(typer_->cache_.kInteger)) continue;
+ // If the type is not inhabited, then we can take the initial value.
+ if (!bound_type->IsInhabited()) {
+ min = initial_type->Min();
+ break;
+ }
+ double bound_min = bound_type->Min();
+ if (bound.kind == InductionVariable::kStrict) {
+ bound_min += 1;
+ }
+ min = std::max(min, bound_min + increment_min);
+ }
+ // The lower bound must be at most the initial value's lower bound.
+ min = std::min(min, initial_type->Min());
+ } else {
+ // Shortcut: If the increment can be both positive and negative,
+ // the variable can go arbitrarily far, so just return integer.
+ return typer_->cache_.kInteger;
+ }
+ if (FLAG_trace_turbo_loop) {
+ OFStream os(stdout);
+ os << std::setprecision(10);
+ os << "Loop (" << NodeProperties::GetControlInput(node)->id()
+ << ") variable bounds in "
+ << (arithmetic_type == InductionVariable::ArithmeticType::kAddition
+ ? "addition"
+ : "subtraction")
+ << " for phi " << node->id() << ": (" << min << ", " << max << ")\n";
+ }
+ return Type::Range(min, max, typer_->zone());
+}
Type* Typer::Visitor::TypeEffectPhi(Node* node) {
UNREACHABLE();
return nullptr;
}
+Type* Typer::Visitor::TypeLoopExit(Node* node) {
+ UNREACHABLE();
+ return nullptr;
+}
+
+Type* Typer::Visitor::TypeLoopExitValue(Node* node) { return Operand(node, 0); }
-Type* Typer::Visitor::TypeEffectSet(Node* node) {
+Type* Typer::Visitor::TypeLoopExitEffect(Node* node) {
UNREACHABLE();
return nullptr;
}
+Type* Typer::Visitor::TypeEnsureWritableFastElements(Node* node) {
+ return Operand(node, 1);
+}
+
+Type* Typer::Visitor::TypeMaybeGrowFastElements(Node* node) {
+ return Operand(node, 1);
+}
-Type* Typer::Visitor::TypeGuard(Node* node) {
- Type* input_type = Operand(node, 0);
- Type* guard_type = OpParameter<Type*>(node);
- return Type::Intersect(input_type, guard_type, zone());
+Type* Typer::Visitor::TypeTransitionElementsKind(Node* node) {
+ UNREACHABLE();
+ return nullptr;
}
+Type* Typer::Visitor::TypeCheckpoint(Node* node) {
+ UNREACHABLE();
+ return nullptr;
+}
Type* Typer::Visitor::TypeBeginRegion(Node* node) {
UNREACHABLE();
@@ -727,9 +796,12 @@ Type* Typer::Visitor::TypeProjection(Node* node) {
return Type::Any();
}
+Type* Typer::Visitor::TypeTypeGuard(Node* node) {
+ Type* const type = Operand(node, 0);
+ return typer_->operation_typer()->TypeTypeGuard(node->op(), type);
+}
-Type* Typer::Visitor::TypeDead(Node* node) { return Type::Any(); }
-
+Type* Typer::Visitor::TypeDead(Node* node) { return Type::None(); }
// JS comparison operators.
@@ -746,7 +818,6 @@ Type* Typer::Visitor::JSEqualTyper(Type* lhs, Type* rhs, Typer* t) {
if (lhs->IsConstant() && rhs->Is(lhs)) {
// Types are equal and are inhabited only by a single semantic value,
// which is not nan due to the earlier check.
- // TODO(neis): Extend this to Range(x,x), MinusZero, ...?
return t->singleton_true_;
}
return Type::Boolean();
@@ -857,198 +928,41 @@ Type* Typer::Visitor::JSGreaterThanOrEqualTyper(
return FalsifyUndefined(Invert(JSCompareTyper(lhs, rhs, t), t), t);
}
-
// JS bitwise operators.
Type* Typer::Visitor::JSBitwiseOrTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = NumberToInt32(ToNumber(lhs, t), t);
- rhs = NumberToInt32(ToNumber(rhs, t), t);
- double lmin = lhs->Min();
- double rmin = rhs->Min();
- double lmax = lhs->Max();
- double rmax = rhs->Max();
- // Or-ing any two values results in a value no smaller than their minimum.
- // Even no smaller than their maximum if both values are non-negative.
- double min =
- lmin >= 0 && rmin >= 0 ? std::max(lmin, rmin) : std::min(lmin, rmin);
- double max = Type::Signed32()->Max();
-
- // Or-ing with 0 is essentially a conversion to int32.
- if (rmin == 0 && rmax == 0) {
- min = lmin;
- max = lmax;
- }
- if (lmin == 0 && lmax == 0) {
- min = rmin;
- max = rmax;
- }
-
- if (lmax < 0 || rmax < 0) {
- // Or-ing two values of which at least one is negative results in a negative
- // value.
- max = std::min(max, -1.0);
- }
- return Type::Range(min, max, t->zone());
- // TODO(neis): Be precise for singleton inputs, here and elsewhere.
+ return NumberBitwiseOr(ToNumber(lhs, t), ToNumber(rhs, t), t);
}
Type* Typer::Visitor::JSBitwiseAndTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = NumberToInt32(ToNumber(lhs, t), t);
- rhs = NumberToInt32(ToNumber(rhs, t), t);
- double lmin = lhs->Min();
- double rmin = rhs->Min();
- double lmax = lhs->Max();
- double rmax = rhs->Max();
- double min = Type::Signed32()->Min();
- // And-ing any two values results in a value no larger than their maximum.
- // Even no larger than their minimum if both values are non-negative.
- double max =
- lmin >= 0 && rmin >= 0 ? std::min(lmax, rmax) : std::max(lmax, rmax);
- // And-ing with a non-negative value x causes the result to be between
- // zero and x.
- if (lmin >= 0) {
- min = 0;
- max = std::min(max, lmax);
- }
- if (rmin >= 0) {
- min = 0;
- max = std::min(max, rmax);
- }
- return Type::Range(min, max, t->zone());
+ return NumberBitwiseAnd(ToNumber(lhs, t), ToNumber(rhs, t), t);
}
Type* Typer::Visitor::JSBitwiseXorTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = NumberToInt32(ToNumber(lhs, t), t);
- rhs = NumberToInt32(ToNumber(rhs, t), t);
- double lmin = lhs->Min();
- double rmin = rhs->Min();
- double lmax = lhs->Max();
- double rmax = rhs->Max();
- if ((lmin >= 0 && rmin >= 0) || (lmax < 0 && rmax < 0)) {
- // Xor-ing negative or non-negative values results in a non-negative value.
- return Type::Unsigned31();
- }
- if ((lmax < 0 && rmin >= 0) || (lmin >= 0 && rmax < 0)) {
- // Xor-ing a negative and a non-negative value results in a negative value.
- // TODO(jarin) Use a range here.
- return Type::Negative32();
- }
- return Type::Signed32();
+ return NumberBitwiseXor(ToNumber(lhs, t), ToNumber(rhs, t), t);
}
Type* Typer::Visitor::JSShiftLeftTyper(Type* lhs, Type* rhs, Typer* t) {
- return Type::Signed32();
+ return NumberShiftLeft(ToNumber(lhs, t), ToNumber(rhs, t), t);
}
Type* Typer::Visitor::JSShiftRightTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = NumberToInt32(ToNumber(lhs, t), t);
- rhs = NumberToUint32(ToNumber(rhs, t), t);
- double min = kMinInt;
- double max = kMaxInt;
- if (lhs->Min() >= 0) {
- // Right-shifting a non-negative value cannot make it negative, nor larger.
- min = std::max(min, 0.0);
- max = std::min(max, lhs->Max());
- if (rhs->Min() > 0 && rhs->Max() <= 31) {
- max = static_cast<int>(max) >> static_cast<int>(rhs->Min());
- }
- }
- if (lhs->Max() < 0) {
- // Right-shifting a negative value cannot make it non-negative, nor smaller.
- min = std::max(min, lhs->Min());
- max = std::min(max, -1.0);
- if (rhs->Min() > 0 && rhs->Max() <= 31) {
- min = static_cast<int>(min) >> static_cast<int>(rhs->Min());
- }
- }
- if (rhs->Min() > 0 && rhs->Max() <= 31) {
- // Right-shifting by a positive value yields a small integer value.
- double shift_min = kMinInt >> static_cast<int>(rhs->Min());
- double shift_max = kMaxInt >> static_cast<int>(rhs->Min());
- min = std::max(min, shift_min);
- max = std::min(max, shift_max);
- }
- // TODO(jarin) Ideally, the following micro-optimization should be performed
- // by the type constructor.
- if (max != Type::Signed32()->Max() || min != Type::Signed32()->Min()) {
- return Type::Range(min, max, t->zone());
- }
- return Type::Signed32();
+ return NumberShiftRight(ToNumber(lhs, t), ToNumber(rhs, t), t);
}
Type* Typer::Visitor::JSShiftRightLogicalTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = NumberToUint32(ToNumber(lhs, t), t);
- // Logical right-shifting any value cannot make it larger.
- return Type::Range(0.0, lhs->Max(), t->zone());
+ return NumberShiftRightLogical(ToNumber(lhs, t), ToNumber(rhs, t), t);
}
// JS arithmetic operators.
-
-// Returns the array's least element, ignoring NaN.
-// There must be at least one non-NaN element.
-// Any -0 is converted to 0.
-static double array_min(double a[], size_t n) {
- DCHECK(n != 0);
- double x = +V8_INFINITY;
- for (size_t i = 0; i < n; ++i) {
- if (!std::isnan(a[i])) {
- x = std::min(a[i], x);
- }
- }
- DCHECK(!std::isnan(x));
- return x == 0 ? 0 : x; // -0 -> 0
-}
-
-
-// Returns the array's greatest element, ignoring NaN.
-// There must be at least one non-NaN element.
-// Any -0 is converted to 0.
-static double array_max(double a[], size_t n) {
- DCHECK(n != 0);
- double x = -V8_INFINITY;
- for (size_t i = 0; i < n; ++i) {
- if (!std::isnan(a[i])) {
- x = std::max(a[i], x);
- }
- }
- DCHECK(!std::isnan(x));
- return x == 0 ? 0 : x; // -0 -> 0
-}
-
-Type* Typer::Visitor::JSAddRanger(RangeType* lhs, RangeType* rhs, Typer* t) {
- double results[4];
- results[0] = lhs->Min() + rhs->Min();
- results[1] = lhs->Min() + rhs->Max();
- results[2] = lhs->Max() + rhs->Min();
- results[3] = lhs->Max() + rhs->Max();
- // Since none of the inputs can be -0, the result cannot be -0 either.
- // However, it can be nan (the sum of two infinities of opposite sign).
- // On the other hand, if none of the "results" above is nan, then the actual
- // result cannot be nan either.
- int nans = 0;
- for (int i = 0; i < 4; ++i) {
- if (std::isnan(results[i])) ++nans;
- }
- if (nans == 4) return Type::NaN(); // [-inf..-inf] + [inf..inf] or vice versa
- Type* range =
- Type::Range(array_min(results, 4), array_max(results, 4), t->zone());
- return nans == 0 ? range : Type::Union(range, Type::NaN(), t->zone());
- // Examples:
- // [-inf, -inf] + [+inf, +inf] = NaN
- // [-inf, -inf] + [n, +inf] = [-inf, -inf] \/ NaN
- // [-inf, +inf] + [n, +inf] = [-inf, +inf] \/ NaN
- // [-inf, m] + [n, +inf] = [-inf, +inf] \/ NaN
-}
-
-
Type* Typer::Visitor::JSAddTyper(Type* lhs, Type* rhs, Typer* t) {
lhs = ToPrimitive(lhs, t);
rhs = ToPrimitive(rhs, t);
@@ -1059,154 +973,24 @@ Type* Typer::Visitor::JSAddTyper(Type* lhs, Type* rhs, Typer* t) {
return Type::NumberOrString();
}
}
- lhs = Rangify(ToNumber(lhs, t), t);
- rhs = Rangify(ToNumber(rhs, t), t);
- if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
- if (lhs->IsRange() && rhs->IsRange()) {
- return JSAddRanger(lhs->AsRange(), rhs->AsRange(), t);
- }
- // TODO(neis): Deal with numeric bitsets here and elsewhere.
- return Type::Number();
+ // The addition must be numeric.
+ return NumberAdd(ToNumber(lhs, t), ToNumber(rhs, t), t);
}
-Type* Typer::Visitor::JSSubtractRanger(RangeType* lhs, RangeType* rhs,
- Typer* t) {
- double results[4];
- results[0] = lhs->Min() - rhs->Min();
- results[1] = lhs->Min() - rhs->Max();
- results[2] = lhs->Max() - rhs->Min();
- results[3] = lhs->Max() - rhs->Max();
- // Since none of the inputs can be -0, the result cannot be -0.
- // However, it can be nan (the subtraction of two infinities of same sign).
- // On the other hand, if none of the "results" above is nan, then the actual
- // result cannot be nan either.
- int nans = 0;
- for (int i = 0; i < 4; ++i) {
- if (std::isnan(results[i])) ++nans;
- }
- if (nans == 4) return Type::NaN(); // [inf..inf] - [inf..inf] (all same sign)
- Type* range =
- Type::Range(array_min(results, 4), array_max(results, 4), t->zone());
- return nans == 0 ? range : Type::Union(range, Type::NaN(), t->zone());
- // Examples:
- // [-inf, +inf] - [-inf, +inf] = [-inf, +inf] \/ NaN
- // [-inf, -inf] - [-inf, -inf] = NaN
- // [-inf, -inf] - [n, +inf] = [-inf, -inf] \/ NaN
- // [m, +inf] - [-inf, n] = [-inf, +inf] \/ NaN
-}
-
-
Type* Typer::Visitor::JSSubtractTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = Rangify(ToNumber(lhs, t), t);
- rhs = Rangify(ToNumber(rhs, t), t);
- if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
- if (lhs->IsRange() && rhs->IsRange()) {
- return JSSubtractRanger(lhs->AsRange(), rhs->AsRange(), t);
- }
- return Type::Number();
+ return NumberSubtract(ToNumber(lhs, t), ToNumber(rhs, t), t);
}
-
Type* Typer::Visitor::JSMultiplyTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = Rangify(ToNumber(lhs, t), t);
- rhs = Rangify(ToNumber(rhs, t), t);
- if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
- if (lhs->IsRange() && rhs->IsRange()) {
- double results[4];
- double lmin = lhs->AsRange()->Min();
- double lmax = lhs->AsRange()->Max();
- double rmin = rhs->AsRange()->Min();
- double rmax = rhs->AsRange()->Max();
- results[0] = lmin * rmin;
- results[1] = lmin * rmax;
- results[2] = lmax * rmin;
- results[3] = lmax * rmax;
- // If the result may be nan, we give up on calculating a precise type,
- // because
- // the discontinuity makes it too complicated. Note that even if none of
- // the
- // "results" above is nan, the actual result may still be, so we have to do
- // a
- // different check:
- bool maybe_nan = (lhs->Maybe(t->cache_.kSingletonZero) &&
- (rmin == -V8_INFINITY || rmax == +V8_INFINITY)) ||
- (rhs->Maybe(t->cache_.kSingletonZero) &&
- (lmin == -V8_INFINITY || lmax == +V8_INFINITY));
- if (maybe_nan) return t->cache_.kIntegerOrMinusZeroOrNaN; // Giving up.
- bool maybe_minuszero = (lhs->Maybe(t->cache_.kSingletonZero) && rmin < 0) ||
- (rhs->Maybe(t->cache_.kSingletonZero) && lmin < 0);
- Type* range =
- Type::Range(array_min(results, 4), array_max(results, 4), t->zone());
- return maybe_minuszero ? Type::Union(range, Type::MinusZero(), t->zone())
- : range;
- }
- return Type::Number();
+ return NumberMultiply(ToNumber(lhs, t), ToNumber(rhs, t), t);
}
-
Type* Typer::Visitor::JSDivideTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = ToNumber(lhs, t);
- rhs = ToNumber(rhs, t);
- if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
- // Division is tricky, so all we do is try ruling out nan.
- // TODO(neis): try ruling out -0 as well?
- bool maybe_nan =
- lhs->Maybe(Type::NaN()) || rhs->Maybe(t->cache_.kZeroish) ||
- ((lhs->Min() == -V8_INFINITY || lhs->Max() == +V8_INFINITY) &&
- (rhs->Min() == -V8_INFINITY || rhs->Max() == +V8_INFINITY));
- return maybe_nan ? Type::Number() : Type::OrderedNumber();
-}
-
-Type* Typer::Visitor::JSModulusRanger(RangeType* lhs, RangeType* rhs,
- Typer* t) {
- double lmin = lhs->Min();
- double lmax = lhs->Max();
- double rmin = rhs->Min();
- double rmax = rhs->Max();
-
- double labs = std::max(std::abs(lmin), std::abs(lmax));
- double rabs = std::max(std::abs(rmin), std::abs(rmax)) - 1;
- double abs = std::min(labs, rabs);
- bool maybe_minus_zero = false;
- double omin = 0;
- double omax = 0;
- if (lmin >= 0) { // {lhs} positive.
- omin = 0;
- omax = abs;
- } else if (lmax <= 0) { // {lhs} negative.
- omin = 0 - abs;
- omax = 0;
- maybe_minus_zero = true;
- } else {
- omin = 0 - abs;
- omax = abs;
- maybe_minus_zero = true;
- }
-
- Type* result = Type::Range(omin, omax, t->zone());
- if (maybe_minus_zero)
- result = Type::Union(result, Type::MinusZero(), t->zone());
- return result;
+ return NumberDivide(ToNumber(lhs, t), ToNumber(rhs, t), t);
}
-
Type* Typer::Visitor::JSModulusTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = ToNumber(lhs, t);
- rhs = ToNumber(rhs, t);
- if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
-
- if (lhs->Maybe(Type::NaN()) || rhs->Maybe(t->cache_.kZeroish) ||
- lhs->Min() == -V8_INFINITY || lhs->Max() == +V8_INFINITY) {
- // Result maybe NaN.
- return Type::Number();
- }
-
- lhs = Rangify(lhs, t);
- rhs = Rangify(rhs, t);
- if (lhs->IsRange() && rhs->IsRange()) {
- return JSModulusRanger(lhs->AsRange(), rhs->AsRange(), t);
- }
- return Type::OrderedNumber();
+ return NumberModulus(ToNumber(lhs, t), ToNumber(rhs, t), t);
}
@@ -1315,56 +1099,18 @@ Type* Typer::Visitor::TypeJSCreateLiteralRegExp(Node* node) {
}
-Type* Typer::Visitor::JSLoadPropertyTyper(Type* object, Type* name, Typer* t) {
- // TODO(rossberg): Use range types and sized array types to filter undefined.
- if (object->IsArray() && name->Is(Type::Integral32())) {
- return Type::Union(
- object->AsArray()->Element(), Type::Undefined(), t->zone());
- }
- return Type::Any();
-}
-
-
Type* Typer::Visitor::TypeJSLoadProperty(Node* node) {
- return TypeBinaryOp(node, JSLoadPropertyTyper);
+ return Type::NonInternal();
}
Type* Typer::Visitor::TypeJSLoadNamed(Node* node) {
- Factory* const f = isolate()->factory();
- Handle<Name> name = NamedAccessOf(node->op()).name();
- if (name.is_identical_to(f->prototype_string())) {
- Type* receiver = Operand(node, 0);
- if (receiver->Is(Type::None())) return Type::None();
- if (receiver->IsConstant() &&
- receiver->AsConstant()->Value()->IsJSFunction()) {
- Handle<JSFunction> function =
- Handle<JSFunction>::cast(receiver->AsConstant()->Value());
- if (function->has_prototype()) {
- // We need to add a code dependency on the initial map of the {function}
- // in order to be notified about changes to "prototype" of {function},
- // so we can only infer a constant type if deoptimization is enabled.
- if (flags() & kDeoptimizationEnabled) {
- JSFunction::EnsureHasInitialMap(function);
- Handle<Map> initial_map(function->initial_map(), isolate());
- dependencies()->AssumeInitialMapCantChange(initial_map);
- return Type::Constant(handle(initial_map->prototype(), isolate()),
- zone());
- }
- }
- } else if (receiver->IsClass() &&
- receiver->AsClass()->Map()->IsJSFunctionMap()) {
- Handle<Map> map = receiver->AsClass()->Map();
- return map->has_non_instance_prototype() ? Type::Primitive()
- : Type::Receiver();
- }
- }
- return Type::Any();
+ return Type::NonInternal();
}
-
-Type* Typer::Visitor::TypeJSLoadGlobal(Node* node) { return Type::Any(); }
-
+Type* Typer::Visitor::TypeJSLoadGlobal(Node* node) {
+ return Type::NonInternal();
+}
// Returns a somewhat larger range if we previously assigned
// a (smaller) range to this node. This is used to speed up
@@ -1497,7 +1243,7 @@ Type* Typer::Visitor::WrapContextTypeForInput(Node* node) {
if (outer->Is(Type::None())) {
return Type::None();
} else {
- DCHECK(outer->Maybe(Type::Internal()));
+ DCHECK(outer->Maybe(Type::OtherInternal()));
return Type::Context(outer, zone());
}
}
@@ -1523,12 +1269,6 @@ Type* Typer::Visitor::TypeJSCreateBlockContext(Node* node) {
}
-Type* Typer::Visitor::TypeJSCreateModuleContext(Node* node) {
- // TODO(rossberg): this is probably incorrect
- return WrapContextTypeForInput(node);
-}
-
-
Type* Typer::Visitor::TypeJSCreateScriptContext(Node* node) {
return WrapContextTypeForInput(node);
}
@@ -1537,9 +1277,6 @@ Type* Typer::Visitor::TypeJSCreateScriptContext(Node* node) {
// JS other operators.
-Type* Typer::Visitor::TypeJSYield(Node* node) { return Type::Any(); }
-
-
Type* Typer::Visitor::TypeJSCallConstruct(Node* node) {
return Type::Receiver();
}
@@ -1563,17 +1300,28 @@ Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
return t->cache_.kIntegerOrMinusZeroOrNaN;
// Unary math functions.
case kMathAbs:
- case kMathLog:
case kMathExp:
- case kMathSqrt:
- case kMathCos:
- case kMathSin:
- case kMathTan:
+ case kMathExpm1:
+ return Type::Union(Type::PlainNumber(), Type::NaN(), t->zone());
case kMathAcos:
+ case kMathAcosh:
case kMathAsin:
+ case kMathAsinh:
case kMathAtan:
+ case kMathAtanh:
+ case kMathCbrt:
+ case kMathCos:
case kMathFround:
+ case kMathLog:
+ case kMathLog1p:
+ case kMathLog10:
+ case kMathLog2:
+ case kMathSin:
+ case kMathSqrt:
+ case kMathTan:
return Type::Number();
+ case kMathSign:
+ return t->cache_.kMinusOneToOne;
// Binary math functions.
case kMathAtan2:
case kMathPow:
@@ -1584,6 +1332,11 @@ Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
return Type::Signed32();
case kMathClz32:
return t->cache_.kZeroToThirtyTwo;
+ // Number functions.
+ case kNumberParseInt:
+ return t->cache_.kIntegerOrMinusZeroOrNaN;
+ case kNumberToString:
+ return Type::String();
// String functions.
case kStringCharCodeAt:
return Type::Union(Type::Range(0, kMaxUInt16, t->zone()), Type::NaN(),
@@ -1591,19 +1344,31 @@ Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
case kStringCharAt:
case kStringConcat:
case kStringFromCharCode:
+ case kStringSubstr:
case kStringToLowerCase:
case kStringToUpperCase:
return Type::String();
// Array functions.
case kArrayIndexOf:
case kArrayLastIndexOf:
- return Type::Number();
+ return Type::Range(-1, kMaxSafeInteger, t->zone());
+ // Object functions.
+ case kObjectHasOwnProperty:
+ return Type::Boolean();
+ // Global functions.
+ case kGlobalDecodeURI:
+ case kGlobalDecodeURIComponent:
+ case kGlobalEncodeURI:
+ case kGlobalEncodeURIComponent:
+ case kGlobalEscape:
+ case kGlobalUnescape:
+ return Type::String();
default:
break;
}
}
}
- return Type::Any();
+ return Type::NonInternal();
}
@@ -1625,12 +1390,6 @@ Type* Typer::Visitor::TypeJSCallRuntime(Node* node) {
case Runtime::kInlineIsTypedArray:
case Runtime::kInlineIsRegExp:
return Type::Boolean();
- case Runtime::kInlineDoubleLo:
- case Runtime::kInlineDoubleHi:
- return Type::Signed32();
- case Runtime::kInlineConstructDouble:
- case Runtime::kInlineMathAtan2:
- return Type::Number();
case Runtime::kInlineCreateIterResultObject:
case Runtime::kInlineRegExpConstructResult:
return Type::OtherObject();
@@ -1641,16 +1400,10 @@ Type* Typer::Visitor::TypeJSCallRuntime(Node* node) {
return TypeUnaryOp(node, ToInteger);
case Runtime::kInlineToLength:
return TypeUnaryOp(node, ToLength);
- case Runtime::kInlineToName:
- return TypeUnaryOp(node, ToName);
case Runtime::kInlineToNumber:
return TypeUnaryOp(node, ToNumber);
case Runtime::kInlineToObject:
return TypeUnaryOp(node, ToObject);
- case Runtime::kInlineToPrimitive:
- case Runtime::kInlineToPrimitive_Number:
- case Runtime::kInlineToPrimitive_String:
- return TypeUnaryOp(node, ToPrimitive);
case Runtime::kInlineToString:
return TypeUnaryOp(node, ToString);
case Runtime::kHasInPrototypeChain:
@@ -1658,6 +1411,9 @@ Type* Typer::Visitor::TypeJSCallRuntime(Node* node) {
default:
break;
}
+ // TODO(turbofan): This should be Type::NonInternal(), but unfortunately we
+ // have a few weird runtime calls that return the hole or even FixedArrays;
+ // change this once those weird runtime calls have been removed.
return Type::Any();
}
@@ -1698,18 +1454,25 @@ Type* Typer::Visitor::TypeJSStoreMessage(Node* node) {
return nullptr;
}
+Type* Typer::Visitor::TypeJSGeneratorStore(Node* node) {
+ UNREACHABLE();
+ return nullptr;
+}
+
+Type* Typer::Visitor::TypeJSGeneratorRestoreContinuation(Node* node) {
+ return typer_->cache_.kSmi;
+}
-Type* Typer::Visitor::TypeJSStackCheck(Node* node) { return Type::Any(); }
+Type* Typer::Visitor::TypeJSGeneratorRestoreRegister(Node* node) {
+ return Type::Any();
+}
+Type* Typer::Visitor::TypeJSStackCheck(Node* node) { return Type::Any(); }
// Simplified operators.
Type* Typer::Visitor::TypeBooleanNot(Node* node) { return Type::Boolean(); }
-Type* Typer::Visitor::TypeBooleanToNumber(Node* node) {
- return TypeUnaryOp(node, ToNumber);
-}
-
Type* Typer::Visitor::TypeNumberEqual(Node* node) { return Type::Boolean(); }
Type* Typer::Visitor::TypeNumberLessThan(Node* node) { return Type::Boolean(); }
@@ -1718,86 +1481,29 @@ Type* Typer::Visitor::TypeNumberLessThanOrEqual(Node* node) {
return Type::Boolean();
}
-Type* Typer::Visitor::TypeNumberAdd(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeNumberSubtract(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeNumberMultiply(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeNumberDivide(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeNumberModulus(Node* node) { return Type::Number(); }
-
-Type* Typer::Visitor::TypeNumberBitwiseOr(Node* node) {
- return Type::Signed32();
-}
-
-
-Type* Typer::Visitor::TypeNumberBitwiseXor(Node* node) {
- return Type::Signed32();
-}
-
-
-Type* Typer::Visitor::TypeNumberBitwiseAnd(Node* node) {
- return Type::Signed32();
-}
-
-
-Type* Typer::Visitor::TypeNumberShiftLeft(Node* node) {
- return Type::Signed32();
-}
-
-
-Type* Typer::Visitor::TypeNumberShiftRight(Node* node) {
- return Type::Signed32();
-}
-
-
-Type* Typer::Visitor::TypeNumberShiftRightLogical(Node* node) {
- return Type::Unsigned32();
-}
-
-Type* Typer::Visitor::TypeNumberImul(Node* node) { return Type::Signed32(); }
-
-Type* Typer::Visitor::TypeNumberClz32(Node* node) {
- return typer_->cache_.kZeroToThirtyTwo;
-}
-
-Type* Typer::Visitor::TypeNumberCeil(Node* node) {
- return TypeUnaryOp(node, NumberCeil);
-}
-
-Type* Typer::Visitor::TypeNumberFloor(Node* node) {
- return TypeUnaryOp(node, NumberFloor);
-}
-
-Type* Typer::Visitor::TypeNumberRound(Node* node) {
- return TypeUnaryOp(node, NumberRound);
-}
-
-Type* Typer::Visitor::TypeNumberTrunc(Node* node) {
- return TypeUnaryOp(node, NumberTrunc);
-}
-
-Type* Typer::Visitor::TypeNumberToInt32(Node* node) {
- return TypeUnaryOp(node, NumberToInt32);
+Type* Typer::Visitor::TypeSpeculativeNumberEqual(Node* node) {
+ return Type::Boolean();
}
-
-Type* Typer::Visitor::TypeNumberToUint32(Node* node) {
- return TypeUnaryOp(node, NumberToUint32);
+Type* Typer::Visitor::TypeSpeculativeNumberLessThan(Node* node) {
+ return Type::Boolean();
}
-
-Type* Typer::Visitor::TypeNumberIsHoleNaN(Node* node) {
+Type* Typer::Visitor::TypeSpeculativeNumberLessThanOrEqual(Node* node) {
return Type::Boolean();
}
-
Type* Typer::Visitor::TypePlainPrimitiveToNumber(Node* node) {
return TypeUnaryOp(node, ToNumber);
}
+Type* Typer::Visitor::TypePlainPrimitiveToWord32(Node* node) {
+ return Type::Integral32();
+}
+
+Type* Typer::Visitor::TypePlainPrimitiveToFloat64(Node* node) {
+ return Type::Number();
+}
// static
Type* Typer::Visitor::ReferenceEqualTyper(Type* lhs, Type* rhs, Typer* t) {
@@ -1820,138 +1526,103 @@ Type* Typer::Visitor::TypeStringLessThanOrEqual(Node* node) {
return Type::Boolean();
}
-Type* Typer::Visitor::TypeStringToNumber(Node* node) {
- return TypeUnaryOp(node, ToNumber);
+Type* Typer::Visitor::StringFromCharCodeTyper(Type* type, Typer* t) {
+ type = NumberToUint32(ToNumber(type, t), t);
+ Factory* f = t->isolate()->factory();
+ double min = type->Min();
+ double max = type->Max();
+ if (min == max) {
+ uint32_t code = static_cast<uint32_t>(min) & String::kMaxUtf16CodeUnitU;
+ Handle<String> string = f->LookupSingleCharacterStringFromCode(code);
+ return Type::Constant(string, t->zone());
+ }
+ return Type::String();
}
-namespace {
-
-Type* ChangeRepresentation(Type* type, Type* rep, Zone* zone) {
- return Type::Union(Type::Semantic(type, zone),
- Type::Representation(rep, zone), zone);
+Type* Typer::Visitor::TypeStringCharCodeAt(Node* node) {
+ // TODO(bmeurer): We could do better here based on inputs.
+ return Type::Range(0, kMaxUInt16, zone());
}
-} // namespace
-
-
-Type* Typer::Visitor::TypeChangeTaggedToInt32(Node* node) {
- Type* arg = Operand(node, 0);
- // TODO(neis): DCHECK(arg->Is(Type::Signed32()));
- return ChangeRepresentation(arg, Type::UntaggedIntegral32(), zone());
+Type* Typer::Visitor::TypeStringFromCharCode(Node* node) {
+ return TypeUnaryOp(node, StringFromCharCodeTyper);
}
-
-Type* Typer::Visitor::TypeChangeTaggedToUint32(Node* node) {
- Type* arg = Operand(node, 0);
- // TODO(neis): DCHECK(arg->Is(Type::Unsigned32()));
- return ChangeRepresentation(arg, Type::UntaggedIntegral32(), zone());
+Type* Typer::Visitor::TypeCheckBounds(Node* node) {
+ Type* index = Operand(node, 0);
+ Type* length = Operand(node, 1);
+ index = Type::Intersect(index, Type::Integral32(), zone());
+ if (!index->IsInhabited() || !length->IsInhabited()) return Type::None();
+ double min = std::max(index->Min(), 0.0);
+ double max = std::min(index->Max(), length->Min() - 1);
+ if (max < min) return Type::None();
+ return Type::Range(min, max, zone());
}
-
-Type* Typer::Visitor::TypeChangeTaggedToFloat64(Node* node) {
- Type* arg = Operand(node, 0);
- // TODO(neis): DCHECK(arg->Is(Type::Number()));
- return ChangeRepresentation(arg, Type::UntaggedFloat64(), zone());
+Type* Typer::Visitor::TypeCheckMaps(Node* node) {
+ UNREACHABLE();
+ return nullptr;
}
-
-Type* Typer::Visitor::TypeChangeInt32ToTagged(Node* node) {
+Type* Typer::Visitor::TypeCheckNumber(Node* node) {
Type* arg = Operand(node, 0);
- // TODO(neis): DCHECK(arg->Is(Type::Signed32()));
- Type* rep =
- arg->Is(Type::SignedSmall()) ? Type::TaggedSigned() : Type::Tagged();
- return ChangeRepresentation(arg, rep, zone());
+ return Type::Intersect(arg, Type::Number(), zone());
}
-
-Type* Typer::Visitor::TypeChangeUint32ToTagged(Node* node) {
+Type* Typer::Visitor::TypeCheckString(Node* node) {
Type* arg = Operand(node, 0);
- // TODO(neis): DCHECK(arg->Is(Type::Unsigned32()));
- return ChangeRepresentation(arg, Type::Tagged(), zone());
+ return Type::Intersect(arg, Type::String(), zone());
}
-
-Type* Typer::Visitor::TypeChangeFloat64ToTagged(Node* node) {
- Type* arg = Operand(node, 0);
- // TODO(neis): CHECK(arg.upper->Is(Type::Number()));
- return ChangeRepresentation(arg, Type::Tagged(), zone());
+Type* Typer::Visitor::TypeCheckIf(Node* node) {
+ UNREACHABLE();
+ return nullptr;
}
-
-Type* Typer::Visitor::TypeChangeBoolToBit(Node* node) {
+Type* Typer::Visitor::TypeCheckTaggedPointer(Node* node) {
Type* arg = Operand(node, 0);
- // TODO(neis): DCHECK(arg.upper->Is(Type::Boolean()));
- return ChangeRepresentation(arg, Type::UntaggedBit(), zone());
+ return Type::Intersect(arg, Type::TaggedPointer(), zone());
}
-
-Type* Typer::Visitor::TypeChangeBitToBool(Node* node) {
+Type* Typer::Visitor::TypeCheckTaggedSigned(Node* node) {
Type* arg = Operand(node, 0);
- // TODO(neis): DCHECK(arg.upper->Is(Type::Boolean()));
- return ChangeRepresentation(arg, Type::TaggedPointer(), zone());
+ return Type::Intersect(arg, typer_->cache_.kSmi, zone());
}
+Type* Typer::Visitor::TypeCheckFloat64Hole(Node* node) {
+ Type* type = Operand(node, 0);
+ return type;
+}
-Type* Typer::Visitor::TypeAllocate(Node* node) { return Type::TaggedPointer(); }
-
-
-namespace {
+Type* Typer::Visitor::TypeCheckTaggedHole(Node* node) {
+ Type* type = Operand(node, 0);
+ type = Type::Intersect(type, Type::NonInternal(), zone());
+ return type;
+}
-MaybeHandle<Map> GetStableMapFromObjectType(Type* object_type) {
- if (object_type->IsConstant() &&
- object_type->AsConstant()->Value()->IsHeapObject()) {
- Handle<Map> object_map(
- Handle<HeapObject>::cast(object_type->AsConstant()->Value())->map());
- if (object_map->is_stable()) return object_map;
- } else if (object_type->IsClass()) {
- Handle<Map> object_map = object_type->AsClass()->Map();
- if (object_map->is_stable()) return object_map;
+Type* Typer::Visitor::TypeConvertTaggedHoleToUndefined(Node* node) {
+ Type* type = Operand(node, 0);
+ if (type->Maybe(Type::Hole())) {
+ // Turn "the hole" into undefined.
+ type = Type::Intersect(type, Type::NonInternal(), zone());
+ type = Type::Union(type, Type::Undefined(), zone());
}
- return MaybeHandle<Map>();
+ return type;
}
-} // namespace
-
+Type* Typer::Visitor::TypeAllocate(Node* node) { return Type::TaggedPointer(); }
Type* Typer::Visitor::TypeLoadField(Node* node) {
- FieldAccess const& access = FieldAccessOf(node->op());
- if (access.base_is_tagged == kTaggedBase &&
- access.offset == HeapObject::kMapOffset) {
- // The type of LoadField[Map](o) is Constant(map) if map is stable and
- // either
- // (a) o has type Constant(object) and map == object->map, or
- // (b) o has type Class(map),
- // and either
- // (1) map cannot transition further, or
- // (2) deoptimization is enabled and we can add a code dependency on the
- // stability of map (to guard the Constant type information).
- Type* const object = Operand(node, 0);
- if (object->Is(Type::None())) return Type::None();
- Handle<Map> object_map;
- if (GetStableMapFromObjectType(object).ToHandle(&object_map)) {
- if (object_map->CanTransition()) {
- if (flags() & kDeoptimizationEnabled) {
- dependencies()->AssumeMapStable(object_map);
- } else {
- return access.type;
- }
- }
- Type* object_map_type = Type::Constant(object_map, zone());
- DCHECK(object_map_type->Is(access.type));
- return object_map_type;
- }
- }
- return access.type;
+ return FieldAccessOf(node->op()).type;
}
-
Type* Typer::Visitor::TypeLoadBuffer(Node* node) {
// TODO(bmeurer): This typing is not yet correct. Since we can still access
// out of bounds, the type in the general case has to include Undefined.
switch (BufferAccessOf(node->op()).external_array_type()) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case kExternal##Type##Array: \
- return typer_->cache_.k##Type;
+#define TYPED_ARRAY_CASE(ElemType, type, TYPE, ctype, size) \
+ case kExternal##ElemType##Array: \
+ return Type::Union(typer_->cache_.k##ElemType, Type::Undefined(), zone());
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
}
@@ -1964,6 +1635,17 @@ Type* Typer::Visitor::TypeLoadElement(Node* node) {
return ElementAccessOf(node->op()).type;
}
+Type* Typer::Visitor::TypeLoadTypedElement(Node* node) {
+ switch (ExternalArrayTypeOf(node->op())) {
+#define TYPED_ARRAY_CASE(ElemType, type, TYPE, ctype, size) \
+ case kExternal##ElemType##Array: \
+ return typer_->cache_.k##ElemType;
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ }
+ UNREACHABLE();
+ return nullptr;
+}
Type* Typer::Visitor::TypeStoreField(Node* node) {
UNREACHABLE();
@@ -1982,6 +1664,14 @@ Type* Typer::Visitor::TypeStoreElement(Node* node) {
return nullptr;
}
+Type* Typer::Visitor::TypeStoreTypedElement(Node* node) {
+ UNREACHABLE();
+ return nullptr;
+}
+
+Type* Typer::Visitor::TypeObjectIsCallable(Node* node) {
+ return TypeUnaryOp(node, ObjectIsCallable);
+}
Type* Typer::Visitor::TypeObjectIsNumber(Node* node) {
return TypeUnaryOp(node, ObjectIsNumber);
@@ -1997,6 +1687,9 @@ Type* Typer::Visitor::TypeObjectIsSmi(Node* node) {
return TypeUnaryOp(node, ObjectIsSmi);
}
+Type* Typer::Visitor::TypeObjectIsString(Node* node) {
+ return TypeUnaryOp(node, ObjectIsString);
+}
Type* Typer::Visitor::TypeObjectIsUndetectable(Node* node) {
return TypeUnaryOp(node, ObjectIsUndetectable);
@@ -2005,6 +1698,17 @@ Type* Typer::Visitor::TypeObjectIsUndetectable(Node* node) {
// Machine operators.
+Type* Typer::Visitor::TypeDebugBreak(Node* node) { return Type::None(); }
+
+Type* Typer::Visitor::TypeComment(Node* node) { return Type::None(); }
+
+Type* Typer::Visitor::TypeRetain(Node* node) {
+ UNREACHABLE();
+ return nullptr;
+}
+
+Type* Typer::Visitor::TypeUnsafePointerAdd(Node* node) { return Type::None(); }
+
Type* Typer::Visitor::TypeLoad(Node* node) { return Type::Any(); }
Type* Typer::Visitor::TypeStackSlot(Node* node) { return Type::Any(); }
@@ -2049,6 +1753,9 @@ Type* Typer::Visitor::TypeWord32ReverseBits(Node* node) {
return Type::Integral32();
}
+Type* Typer::Visitor::TypeWord32ReverseBytes(Node* node) {
+ return Type::Integral32();
+}
Type* Typer::Visitor::TypeWord32Popcnt(Node* node) {
return Type::Integral32();
@@ -2086,6 +1793,9 @@ Type* Typer::Visitor::TypeWord64ReverseBits(Node* node) {
return Type::Internal();
}
+Type* Typer::Visitor::TypeWord64ReverseBytes(Node* node) {
+ return Type::Internal();
+}
Type* Typer::Visitor::TypeWord64Popcnt(Node* node) { return Type::Internal(); }
@@ -2111,6 +1821,9 @@ Type* Typer::Visitor::TypeInt32SubWithOverflow(Node* node) {
Type* Typer::Visitor::TypeInt32Mul(Node* node) { return Type::Integral32(); }
+Type* Typer::Visitor::TypeInt32MulWithOverflow(Node* node) {
+ return Type::Internal();
+}
Type* Typer::Visitor::TypeInt32MulHigh(Node* node) { return Type::Signed32(); }
@@ -2166,7 +1879,6 @@ Type* Typer::Visitor::TypeInt64SubWithOverflow(Node* node) {
Type* Typer::Visitor::TypeInt64Mul(Node* node) { return Type::Internal(); }
-
Type* Typer::Visitor::TypeInt64Div(Node* node) { return Type::Internal(); }
@@ -2194,6 +1906,9 @@ Type* Typer::Visitor::TypeUint64LessThanOrEqual(Node* node) {
Type* Typer::Visitor::TypeUint64Mod(Node* node) { return Type::Internal(); }
+Type* Typer::Visitor::TypeBitcastWordToTagged(Node* node) {
+ return Type::TaggedPointer();
+}
Type* Typer::Visitor::TypeChangeFloat32ToFloat64(Node* node) {
return Type::Intersect(Type::Number(), Type::UntaggedFloat64(), zone());
@@ -2204,7 +1919,6 @@ Type* Typer::Visitor::TypeChangeFloat64ToInt32(Node* node) {
return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
}
-
Type* Typer::Visitor::TypeChangeFloat64ToUint32(Node* node) {
return Type::Intersect(Type::Unsigned32(), Type::UntaggedIntegral32(),
zone());
@@ -2250,36 +1964,60 @@ Type* Typer::Visitor::TypeChangeInt32ToFloat64(Node* node) {
return Type::Intersect(Type::Signed32(), Type::UntaggedFloat64(), zone());
}
+Type* Typer::Visitor::TypeFloat64SilenceNaN(Node* node) {
+ return Type::UntaggedFloat64();
+}
Type* Typer::Visitor::TypeChangeInt32ToInt64(Node* node) {
return Type::Internal();
}
-
Type* Typer::Visitor::TypeChangeUint32ToFloat64(Node* node) {
return Type::Intersect(Type::Unsigned32(), Type::UntaggedFloat64(), zone());
}
-
Type* Typer::Visitor::TypeChangeUint32ToUint64(Node* node) {
return Type::Internal();
}
+Type* Typer::Visitor::TypeImpossibleToWord32(Node* node) {
+ return Type::None();
+}
-Type* Typer::Visitor::TypeTruncateFloat64ToFloat32(Node* node) {
- return Type::Intersect(Type::Number(), Type::UntaggedFloat32(), zone());
+Type* Typer::Visitor::TypeImpossibleToWord64(Node* node) {
+ return Type::None();
}
+Type* Typer::Visitor::TypeImpossibleToFloat32(Node* node) {
+ return Type::None();
+}
-Type* Typer::Visitor::TypeTruncateFloat64ToInt32(Node* node) {
- return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
+Type* Typer::Visitor::TypeImpossibleToFloat64(Node* node) {
+ return Type::None();
+}
+
+Type* Typer::Visitor::TypeImpossibleToTagged(Node* node) {
+ return Type::None();
+}
+
+Type* Typer::Visitor::TypeImpossibleToBit(Node* node) { return Type::None(); }
+
+Type* Typer::Visitor::TypeTruncateFloat64ToFloat32(Node* node) {
+ return Type::Intersect(Type::Number(), Type::UntaggedFloat32(), zone());
}
+Type* Typer::Visitor::TypeTruncateFloat64ToWord32(Node* node) {
+ return Type::Intersect(Type::Integral32(), Type::UntaggedIntegral32(),
+ zone());
+}
Type* Typer::Visitor::TypeTruncateInt64ToInt32(Node* node) {
return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
}
+Type* Typer::Visitor::TypeRoundFloat64ToInt32(Node* node) {
+ return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
+}
Type* Typer::Visitor::TypeRoundInt32ToFloat32(Node* node) {
return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat32(), zone());
@@ -2336,6 +2074,7 @@ Type* Typer::Visitor::TypeFloat32Add(Node* node) { return Type::Number(); }
Type* Typer::Visitor::TypeFloat32Sub(Node* node) { return Type::Number(); }
+Type* Typer::Visitor::TypeFloat32Neg(Node* node) { return Type::Number(); }
Type* Typer::Visitor::TypeFloat32Mul(Node* node) { return Type::Number(); }
@@ -2343,12 +2082,6 @@ Type* Typer::Visitor::TypeFloat32Mul(Node* node) { return Type::Number(); }
Type* Typer::Visitor::TypeFloat32Div(Node* node) { return Type::Number(); }
-Type* Typer::Visitor::TypeFloat32Max(Node* node) { return Type::Number(); }
-
-
-Type* Typer::Visitor::TypeFloat32Min(Node* node) { return Type::Number(); }
-
-
Type* Typer::Visitor::TypeFloat32Abs(Node* node) {
// TODO(turbofan): We should be able to infer a better type here.
return Type::Number();
@@ -2370,12 +2103,16 @@ Type* Typer::Visitor::TypeFloat32LessThanOrEqual(Node* node) {
return Type::Boolean();
}
+Type* Typer::Visitor::TypeFloat32Max(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat32Min(Node* node) { return Type::Number(); }
Type* Typer::Visitor::TypeFloat64Add(Node* node) { return Type::Number(); }
Type* Typer::Visitor::TypeFloat64Sub(Node* node) { return Type::Number(); }
+Type* Typer::Visitor::TypeFloat64Neg(Node* node) { return Type::Number(); }
Type* Typer::Visitor::TypeFloat64Mul(Node* node) { return Type::Number(); }
@@ -2397,9 +2134,49 @@ Type* Typer::Visitor::TypeFloat64Abs(Node* node) {
return Type::Number();
}
+Type* Typer::Visitor::TypeFloat64Acos(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Acosh(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Asin(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Asinh(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Atan(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Atanh(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Atan2(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Cbrt(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Cos(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Cosh(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Exp(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Expm1(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Log(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Log1p(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Log10(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Log2(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Pow(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Sin(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Sinh(Node* node) { return Type::Number(); }
Type* Typer::Visitor::TypeFloat64Sqrt(Node* node) { return Type::Number(); }
+Type* Typer::Visitor::TypeFloat64Tan(Node* node) { return Type::Number(); }
+
+Type* Typer::Visitor::TypeFloat64Tanh(Node* node) { return Type::Number(); }
Type* Typer::Visitor::TypeFloat64Equal(Node* node) { return Type::Boolean(); }
@@ -2501,14 +2278,27 @@ Type* Typer::Visitor::TypeLoadParentFramePointer(Node* node) {
return Type::Internal();
}
-Type* Typer::Visitor::TypeCheckedLoad(Node* node) { return Type::Any(); }
+Type* Typer::Visitor::TypeUnalignedLoad(Node* node) { return Type::Any(); }
+Type* Typer::Visitor::TypeUnalignedStore(Node* node) {
+ UNREACHABLE();
+ return nullptr;
+}
+
+Type* Typer::Visitor::TypeCheckedLoad(Node* node) { return Type::Any(); }
Type* Typer::Visitor::TypeCheckedStore(Node* node) {
UNREACHABLE();
return nullptr;
}
+Type* Typer::Visitor::TypeAtomicLoad(Node* node) { return Type::Any(); }
+
+Type* Typer::Visitor::TypeAtomicStore(Node* node) {
+ UNREACHABLE();
+ return nullptr;
+}
+
Type* Typer::Visitor::TypeInt32PairAdd(Node* node) { return Type::Internal(); }
Type* Typer::Visitor::TypeInt32PairSub(Node* node) { return Type::Internal(); }
@@ -2521,8 +2311,25 @@ Type* Typer::Visitor::TypeWord32PairShr(Node* node) { return Type::Internal(); }
Type* Typer::Visitor::TypeWord32PairSar(Node* node) { return Type::Internal(); }
-// Heap constants.
+// SIMD type methods.
+
+#define SIMD_RETURN_SIMD(Name) \
+ Type* Typer::Visitor::Type##Name(Node* node) { return Type::Simd(); }
+MACHINE_SIMD_RETURN_SIMD_OP_LIST(SIMD_RETURN_SIMD)
+MACHINE_SIMD_GENERIC_OP_LIST(SIMD_RETURN_SIMD)
+#undef SIMD_RETURN_SIMD
+#define SIMD_RETURN_NUM(Name) \
+ Type* Typer::Visitor::Type##Name(Node* node) { return Type::Number(); }
+MACHINE_SIMD_RETURN_NUM_OP_LIST(SIMD_RETURN_NUM)
+#undef SIMD_RETURN_NUM
+
+#define SIMD_RETURN_BOOL(Name) \
+ Type* Typer::Visitor::Type##Name(Node* node) { return Type::Boolean(); }
+MACHINE_SIMD_RETURN_BOOL_OP_LIST(SIMD_RETURN_BOOL)
+#undef SIMD_RETURN_BOOL
+
+// Heap constants.
Type* Typer::Visitor::TypeConstant(Handle<Object> value) {
if (value->IsJSTypedArray()) {
diff --git a/deps/v8/src/compiler/typer.h b/deps/v8/src/compiler/typer.h
index 0982b28ade..d4d5744a6e 100644
--- a/deps/v8/src/compiler/typer.h
+++ b/deps/v8/src/compiler/typer.h
@@ -5,37 +5,30 @@
#ifndef V8_COMPILER_TYPER_H_
#define V8_COMPILER_TYPER_H_
-#include "src/base/flags.h"
#include "src/compiler/graph.h"
+#include "src/compiler/operation-typer.h"
#include "src/types.h"
namespace v8 {
namespace internal {
// Forward declarations.
-class CompilationDependencies;
class TypeCache;
namespace compiler {
+class LoopVariableOptimizer;
+class OperationTyper;
class Typer {
public:
- // Flags that control the mode of operation.
- enum Flag {
- kNoFlags = 0u,
- kDeoptimizationEnabled = 1u << 0,
- };
- typedef base::Flags<Flag> Flags;
-
- Typer(Isolate* isolate, Graph* graph, Flags flags = kNoFlags,
- CompilationDependencies* dependencies = nullptr,
- FunctionType* function_type = nullptr);
+ Typer(Isolate* isolate, Graph* graph);
~Typer();
void Run();
// TODO(bmeurer,jarin): Remove this once we have a notion of "roots" on Graph.
- void Run(const ZoneVector<Node*>& roots);
+ void Run(const ZoneVector<Node*>& roots,
+ LoopVariableOptimizer* induction_vars);
private:
class Visitor;
@@ -44,31 +37,23 @@ class Typer {
Graph* graph() const { return graph_; }
Zone* zone() const { return graph()->zone(); }
Isolate* isolate() const { return isolate_; }
- Flags flags() const { return flags_; }
- CompilationDependencies* dependencies() const { return dependencies_; }
- FunctionType* function_type() const { return function_type_; }
+ OperationTyper* operation_typer() { return &operation_typer_; }
Isolate* const isolate_;
Graph* const graph_;
- Flags const flags_;
- CompilationDependencies* const dependencies_;
- FunctionType* function_type_;
Decorator* decorator_;
TypeCache const& cache_;
+ OperationTyper operation_typer_;
Type* singleton_false_;
Type* singleton_true_;
Type* singleton_the_hole_;
- Type* signed32ish_;
- Type* unsigned32ish_;
Type* falsish_;
Type* truish_;
DISALLOW_COPY_AND_ASSIGN(Typer);
};
-DEFINE_OPERATORS_FOR_FLAGS(Typer::Flags)
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/unwinding-info-writer.h b/deps/v8/src/compiler/unwinding-info-writer.h
new file mode 100644
index 0000000000..86f5e9e800
--- /dev/null
+++ b/deps/v8/src/compiler/unwinding-info-writer.h
@@ -0,0 +1,55 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_UNWINDING_INFO_WRITER_H_
+#define V8_COMPILER_UNWINDING_INFO_WRITER_H_
+
+#if V8_TARGET_ARCH_ARM
+#include "src/compiler/arm/unwinding-info-writer-arm.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/compiler/arm64/unwinding-info-writer-arm64.h"
+#elif V8_TARGET_ARCH_X64
+#include "src/compiler/x64/unwinding-info-writer-x64.h"
+#else
+
+// Placeholder for unsupported architectures.
+
+#include "src/base/logging.h"
+
+namespace v8 {
+namespace internal {
+
+class EhFrameWriter;
+
+namespace compiler {
+
+class InstructionBlock;
+
+class UnwindingInfoWriter {
+ public:
+ explicit UnwindingInfoWriter(Zone* zone) {}
+
+ void SetNumberOfInstructionBlocks(int number) {
+ if (FLAG_perf_prof_unwinding_info) UNIMPLEMENTED();
+ }
+
+ void BeginInstructionBlock(int pc_offset, const InstructionBlock* block) {
+ if (FLAG_perf_prof_unwinding_info) UNIMPLEMENTED();
+ }
+ void EndInstructionBlock(const InstructionBlock* block) {
+ if (FLAG_perf_prof_unwinding_info) UNIMPLEMENTED();
+ }
+
+ void Finish(int code_size) {}
+
+ EhFrameWriter* eh_frame_writer() { return nullptr; }
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif
+
+#endif
diff --git a/deps/v8/src/compiler/value-numbering-reducer.cc b/deps/v8/src/compiler/value-numbering-reducer.cc
index 555570d220..4769cb0c8b 100644
--- a/deps/v8/src/compiler/value-numbering-reducer.cc
+++ b/deps/v8/src/compiler/value-numbering-reducer.cc
@@ -7,6 +7,7 @@
#include <cstring>
#include "src/base/functional.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
namespace v8 {
@@ -41,10 +42,12 @@ bool Equals(Node* a, Node* b) {
} // namespace
-
-ValueNumberingReducer::ValueNumberingReducer(Zone* zone)
- : entries_(nullptr), capacity_(0), size_(0), zone_(zone) {}
-
+ValueNumberingReducer::ValueNumberingReducer(Zone* temp_zone, Zone* graph_zone)
+ : entries_(nullptr),
+ capacity_(0),
+ size_(0),
+ temp_zone_(temp_zone),
+ graph_zone_(graph_zone) {}
ValueNumberingReducer::~ValueNumberingReducer() {}
@@ -58,7 +61,7 @@ Reduction ValueNumberingReducer::Reduce(Node* node) {
DCHECK(capacity_ == 0);
// Allocate the initial entries and insert the first entry.
capacity_ = kInitialCapacity;
- entries_ = zone()->NewArray<Node*>(kInitialCapacity);
+ entries_ = temp_zone()->NewArray<Node*>(kInitialCapacity);
memset(entries_, 0, sizeof(*entries_) * kInitialCapacity);
entries_[hash & (kInitialCapacity - 1)] = node;
size_ = 1;
@@ -123,6 +126,25 @@ Reduction ValueNumberingReducer::Reduce(Node* node) {
continue;
}
if (Equals(entry, node)) {
+ // Make sure the replacement has at least as good type as the original
+ // node.
+ if (NodeProperties::IsTyped(entry) && NodeProperties::IsTyped(node)) {
+ Type* entry_type = NodeProperties::GetType(entry);
+ Type* node_type = NodeProperties::GetType(node);
+ if (!entry_type->Is(node_type)) {
+ // Ideally, we would set an intersection of {entry_type} and
+ // {node_type} here. However, typing of NumberConstants assigns
+ // different types to constants with the same value (it creates
+ // a fresh heap number), which would make the intersection empty.
+ // To be safe, we use the smaller type if the types are comparable.
+ if (node_type->Is(entry_type)) {
+ NodeProperties::SetType(entry, node_type);
+ } else {
+ // Types are not comparable => do not replace.
+ return NoChange();
+ }
+ }
+ }
return Replace(entry);
}
}
@@ -135,7 +157,7 @@ void ValueNumberingReducer::Grow() {
Node** const old_entries = entries_;
size_t const old_capacity = capacity_;
capacity_ *= kCapacityToSizeRatio;
- entries_ = zone()->NewArray<Node*>(capacity_);
+ entries_ = temp_zone()->NewArray<Node*>(capacity_);
memset(entries_, 0, sizeof(*entries_) * capacity_);
size_ = 0;
size_t const mask = capacity_ - 1;
diff --git a/deps/v8/src/compiler/value-numbering-reducer.h b/deps/v8/src/compiler/value-numbering-reducer.h
index 822b6075c5..f700c85bc0 100644
--- a/deps/v8/src/compiler/value-numbering-reducer.h
+++ b/deps/v8/src/compiler/value-numbering-reducer.h
@@ -13,7 +13,7 @@ namespace compiler {
class ValueNumberingReducer final : public Reducer {
public:
- explicit ValueNumberingReducer(Zone* zone);
+ explicit ValueNumberingReducer(Zone* temp_zone, Zone* graph_zone);
~ValueNumberingReducer();
Reduction Reduce(Node* node) override;
@@ -22,12 +22,14 @@ class ValueNumberingReducer final : public Reducer {
enum { kInitialCapacity = 256u, kCapacityToSizeRatio = 2u };
void Grow();
- Zone* zone() const { return zone_; }
+ Zone* temp_zone() const { return temp_zone_; }
+ Zone* graph_zone() const { return graph_zone_; }
Node** entries_;
size_t capacity_;
size_t size_;
- Zone* zone_;
+ Zone* temp_zone_;
+ Zone* graph_zone_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index a69ace9480..eb42b39569 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -28,26 +28,16 @@ namespace internal {
namespace compiler {
-static bool IsDefUseChainLinkPresent(Node* def, Node* use) {
- const Node::Uses uses = def->uses();
- return std::find(uses.begin(), uses.end(), use) != uses.end();
-}
-
-
-static bool IsUseDefChainLinkPresent(Node* def, Node* use) {
- const Node::Inputs inputs = use->inputs();
- return std::find(inputs.begin(), inputs.end(), def) != inputs.end();
-}
-
-
class Verifier::Visitor {
public:
- Visitor(Zone* z, Typing typed) : zone(z), typing(typed) {}
+ Visitor(Zone* z, Typing typed, CheckInputs check_inputs)
+ : zone(z), typing(typed), check_inputs(check_inputs) {}
void Check(Node* node);
Zone* zone;
Typing typing;
+ CheckInputs check_inputs;
private:
void CheckNotTyped(Node* node) {
@@ -114,27 +104,25 @@ void Verifier::Visitor::Check(Node* node) {
int control_count = node->op()->ControlInputCount();
// Verify number of inputs matches up.
- int input_count = value_count + context_count + frame_state_count +
- effect_count + control_count;
+ int input_count = value_count + context_count + frame_state_count;
+ if (check_inputs == kAll) {
+ input_count += effect_count + control_count;
+ }
CHECK_EQ(input_count, node->InputCount());
// Verify that frame state has been inserted for the nodes that need it.
for (int i = 0; i < frame_state_count; i++) {
- Node* frame_state = NodeProperties::GetFrameStateInput(node, i);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
CHECK(frame_state->opcode() == IrOpcode::kFrameState ||
// kFrameState uses Start as a sentinel.
(node->opcode() == IrOpcode::kFrameState &&
frame_state->opcode() == IrOpcode::kStart));
- CHECK(IsDefUseChainLinkPresent(frame_state, node));
- CHECK(IsUseDefChainLinkPresent(frame_state, node));
}
// Verify all value inputs actually produce a value.
for (int i = 0; i < value_count; ++i) {
Node* value = NodeProperties::GetValueInput(node, i);
CheckOutput(value, node, value->op()->ValueOutputCount(), "value");
- CHECK(IsDefUseChainLinkPresent(value, node));
- CHECK(IsUseDefChainLinkPresent(value, node));
// Verify that only parameters and projections can have input nodes with
// multiple outputs.
CHECK(node->opcode() == IrOpcode::kParameter ||
@@ -146,24 +134,45 @@ void Verifier::Visitor::Check(Node* node) {
for (int i = 0; i < context_count; ++i) {
Node* context = NodeProperties::GetContextInput(node);
CheckOutput(context, node, context->op()->ValueOutputCount(), "context");
- CHECK(IsDefUseChainLinkPresent(context, node));
- CHECK(IsUseDefChainLinkPresent(context, node));
}
- // Verify all effect inputs actually have an effect.
- for (int i = 0; i < effect_count; ++i) {
- Node* effect = NodeProperties::GetEffectInput(node);
- CheckOutput(effect, node, effect->op()->EffectOutputCount(), "effect");
- CHECK(IsDefUseChainLinkPresent(effect, node));
- CHECK(IsUseDefChainLinkPresent(effect, node));
- }
+ if (check_inputs == kAll) {
+ // Verify all effect inputs actually have an effect.
+ for (int i = 0; i < effect_count; ++i) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ CheckOutput(effect, node, effect->op()->EffectOutputCount(), "effect");
+ }
+
+ // Verify all control inputs are control nodes.
+ for (int i = 0; i < control_count; ++i) {
+ Node* control = NodeProperties::GetControlInput(node, i);
+ CheckOutput(control, node, control->op()->ControlOutputCount(),
+ "control");
+ }
- // Verify all control inputs are control nodes.
- for (int i = 0; i < control_count; ++i) {
- Node* control = NodeProperties::GetControlInput(node, i);
- CheckOutput(control, node, control->op()->ControlOutputCount(), "control");
- CHECK(IsDefUseChainLinkPresent(control, node));
- CHECK(IsUseDefChainLinkPresent(control, node));
+ // Verify that no-no-throw nodes only have IfSuccess/IfException control
+ // uses.
+ if (!node->op()->HasProperty(Operator::kNoThrow)) {
+ int count_success = 0, count_exception = 0;
+ for (Edge edge : node->use_edges()) {
+ if (!NodeProperties::IsControlEdge(edge)) {
+ continue;
+ }
+ Node* control_use = edge.from();
+ if (control_use->opcode() != IrOpcode::kIfSuccess &&
+ control_use->opcode() != IrOpcode::kIfException) {
+ V8_Fatal(__FILE__, __LINE__,
+ "#%d:%s should be followed by IfSuccess/IfException, but is "
+ "followed by #%d:%s",
+ node->id(), node->op()->mnemonic(), control_use->id(),
+ control_use->op()->mnemonic());
+ }
+ if (control_use->opcode() == IrOpcode::kIfSuccess) ++count_success;
+ if (control_use->opcode() == IrOpcode::kIfException) ++count_exception;
+ CHECK_LE(count_success, 1);
+ CHECK_LE(count_exception, 1);
+ }
+ }
}
switch (node->opcode()) {
@@ -345,6 +354,10 @@ void Verifier::Visitor::Check(Node* node) {
// Type is a number.
CheckUpperIs(node, Type::Number());
break;
+ case IrOpcode::kRelocatableInt32Constant:
+ case IrOpcode::kRelocatableInt64Constant:
+ CHECK_EQ(0, input_count);
+ break;
case IrOpcode::kHeapConstant:
// Constants have no inputs.
CHECK_EQ(0, input_count);
@@ -397,6 +410,11 @@ void Verifier::Visitor::Check(Node* node) {
*/
break;
}
+ case IrOpcode::kInductionVariablePhi: {
+ // This is only a temporary node for the typer.
+ UNREACHABLE();
+ break;
+ }
case IrOpcode::kEffectPhi: {
// EffectPhi input count matches parent control node.
CHECK_EQ(0, value_count);
@@ -406,14 +424,27 @@ void Verifier::Visitor::Check(Node* node) {
CHECK_EQ(input_count, 1 + effect_count);
break;
}
- case IrOpcode::kEffectSet: {
- CHECK_EQ(0, value_count);
- CHECK_EQ(0, control_count);
- CHECK_LT(1, effect_count);
+ case IrOpcode::kLoopExit: {
+ CHECK_EQ(2, control_count);
+ Node* loop = NodeProperties::GetControlInput(node, 1);
+ CHECK_EQ(IrOpcode::kLoop, loop->opcode());
break;
}
- case IrOpcode::kGuard:
- // TODO(bmeurer): what are the constraints on these?
+ case IrOpcode::kLoopExitValue: {
+ CHECK_EQ(1, control_count);
+ Node* loop_exit = NodeProperties::GetControlInput(node, 0);
+ CHECK_EQ(IrOpcode::kLoopExit, loop_exit->opcode());
+ break;
+ }
+ case IrOpcode::kLoopExitEffect: {
+ CHECK_EQ(1, control_count);
+ Node* loop_exit = NodeProperties::GetControlInput(node, 0);
+ CHECK_EQ(IrOpcode::kLoopExit, loop_exit->opcode());
+ break;
+ }
+ case IrOpcode::kCheckpoint:
+ // Type is empty.
+ CheckNotTyped(node);
break;
case IrOpcode::kBeginRegion:
// TODO(rossberg): what are the constraints on these?
@@ -578,7 +609,6 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kJSCreateCatchContext:
case IrOpcode::kJSCreateWithContext:
case IrOpcode::kJSCreateBlockContext:
- case IrOpcode::kJSCreateModuleContext:
case IrOpcode::kJSCreateScriptContext: {
// Type is Context, and operand is Internal.
Node* context = NodeProperties::GetContextInput(node);
@@ -596,7 +626,6 @@ void Verifier::Visitor::Check(Node* node) {
break;
case IrOpcode::kJSCallFunction:
case IrOpcode::kJSCallRuntime:
- case IrOpcode::kJSYield:
// Type can be anything.
CheckUpperIs(node, Type::Any());
break;
@@ -628,11 +657,30 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kJSStoreMessage:
break;
+ case IrOpcode::kJSGeneratorStore:
+ CheckNotTyped(node);
+ break;
+
+ case IrOpcode::kJSGeneratorRestoreContinuation:
+ CheckUpperIs(node, Type::SignedSmall());
+ break;
+
+ case IrOpcode::kJSGeneratorRestoreRegister:
+ CheckUpperIs(node, Type::Any());
+ break;
+
case IrOpcode::kJSStackCheck:
// Type is empty.
CheckNotTyped(node);
break;
+ case IrOpcode::kComment:
+ case IrOpcode::kDebugBreak:
+ case IrOpcode::kRetain:
+ case IrOpcode::kUnsafePointerAdd:
+ CheckNotTyped(node);
+ break;
+
// Simplified operators
// -------------------------------
case IrOpcode::kBooleanNot:
@@ -640,12 +688,12 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 0, Type::Boolean());
CheckUpperIs(node, Type::Boolean());
break;
- case IrOpcode::kBooleanToNumber:
- // Boolean -> Number
- CheckValueInputIs(node, 0, Type::Boolean());
- CheckUpperIs(node, Type::Number());
- break;
case IrOpcode::kNumberEqual:
+ // (Number, Number) -> Boolean
+ CheckValueInputIs(node, 0, Type::Number());
+ CheckValueInputIs(node, 1, Type::Number());
+ CheckUpperIs(node, Type::Boolean());
+ break;
case IrOpcode::kNumberLessThan:
case IrOpcode::kNumberLessThanOrEqual:
// (Number, Number) -> Boolean
@@ -653,16 +701,32 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 1, Type::Number());
CheckUpperIs(node, Type::Boolean());
break;
+ case IrOpcode::kSpeculativeNumberAdd:
+ case IrOpcode::kSpeculativeNumberSubtract:
+ case IrOpcode::kSpeculativeNumberMultiply:
+ case IrOpcode::kSpeculativeNumberDivide:
+ case IrOpcode::kSpeculativeNumberModulus:
+ CheckUpperIs(node, Type::Number());
+ break;
+ case IrOpcode::kSpeculativeNumberEqual:
+ case IrOpcode::kSpeculativeNumberLessThan:
+ case IrOpcode::kSpeculativeNumberLessThanOrEqual:
+ CheckUpperIs(node, Type::Boolean());
+ break;
case IrOpcode::kNumberAdd:
case IrOpcode::kNumberSubtract:
case IrOpcode::kNumberMultiply:
case IrOpcode::kNumberDivide:
+ // (Number, Number) -> Number
+ CheckValueInputIs(node, 0, Type::Number());
+ CheckValueInputIs(node, 1, Type::Number());
+ CheckUpperIs(node, Type::Number());
+ break;
case IrOpcode::kNumberModulus:
// (Number, Number) -> Number
CheckValueInputIs(node, 0, Type::Number());
CheckValueInputIs(node, 1, Type::Number());
- // TODO(rossberg): activate once we retype after opcode changes.
- // CheckUpperIs(node, Type::Number());
+ CheckUpperIs(node, Type::Number());
break;
case IrOpcode::kNumberBitwiseOr:
case IrOpcode::kNumberBitwiseXor:
@@ -672,6 +736,11 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 1, Type::Signed32());
CheckUpperIs(node, Type::Signed32());
break;
+ case IrOpcode::kSpeculativeNumberBitwiseOr:
+ case IrOpcode::kSpeculativeNumberBitwiseXor:
+ case IrOpcode::kSpeculativeNumberBitwiseAnd:
+ CheckUpperIs(node, Type::Signed32());
+ break;
case IrOpcode::kNumberShiftLeft:
case IrOpcode::kNumberShiftRight:
// (Signed32, Unsigned32) -> Signed32
@@ -679,12 +748,19 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 1, Type::Unsigned32());
CheckUpperIs(node, Type::Signed32());
break;
+ case IrOpcode::kSpeculativeNumberShiftLeft:
+ case IrOpcode::kSpeculativeNumberShiftRight:
+ CheckUpperIs(node, Type::Signed32());
+ break;
case IrOpcode::kNumberShiftRightLogical:
// (Unsigned32, Unsigned32) -> Unsigned32
CheckValueInputIs(node, 0, Type::Unsigned32());
CheckValueInputIs(node, 1, Type::Unsigned32());
CheckUpperIs(node, Type::Unsigned32());
break;
+ case IrOpcode::kSpeculativeNumberShiftRightLogical:
+ CheckUpperIs(node, Type::Unsigned32());
+ break;
case IrOpcode::kNumberImul:
// (Unsigned32, Unsigned32) -> Signed32
CheckValueInputIs(node, 0, Type::Unsigned32());
@@ -696,9 +772,41 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 0, Type::Unsigned32());
CheckUpperIs(node, Type::Unsigned32());
break;
+ case IrOpcode::kNumberAtan2:
+ case IrOpcode::kNumberMax:
+ case IrOpcode::kNumberMin:
+ case IrOpcode::kNumberPow:
+ // (Number, Number) -> Number
+ CheckValueInputIs(node, 0, Type::Number());
+ CheckValueInputIs(node, 1, Type::Number());
+ CheckUpperIs(node, Type::Number());
+ break;
+ case IrOpcode::kNumberAbs:
case IrOpcode::kNumberCeil:
case IrOpcode::kNumberFloor:
+ case IrOpcode::kNumberFround:
+ case IrOpcode::kNumberAcos:
+ case IrOpcode::kNumberAcosh:
+ case IrOpcode::kNumberAsin:
+ case IrOpcode::kNumberAsinh:
+ case IrOpcode::kNumberAtan:
+ case IrOpcode::kNumberAtanh:
+ case IrOpcode::kNumberCos:
+ case IrOpcode::kNumberCosh:
+ case IrOpcode::kNumberExp:
+ case IrOpcode::kNumberExpm1:
+ case IrOpcode::kNumberLog:
+ case IrOpcode::kNumberLog1p:
+ case IrOpcode::kNumberLog2:
+ case IrOpcode::kNumberLog10:
+ case IrOpcode::kNumberCbrt:
case IrOpcode::kNumberRound:
+ case IrOpcode::kNumberSign:
+ case IrOpcode::kNumberSin:
+ case IrOpcode::kNumberSinh:
+ case IrOpcode::kNumberSqrt:
+ case IrOpcode::kNumberTan:
+ case IrOpcode::kNumberTanh:
case IrOpcode::kNumberTrunc:
// Number -> Number
CheckValueInputIs(node, 0, Type::Number());
@@ -714,16 +822,21 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 0, Type::Number());
CheckUpperIs(node, Type::Unsigned32());
break;
- case IrOpcode::kNumberIsHoleNaN:
- // Number -> Boolean
- CheckValueInputIs(node, 0, Type::Number());
- CheckUpperIs(node, Type::Boolean());
- break;
case IrOpcode::kPlainPrimitiveToNumber:
// PlainPrimitive -> Number
CheckValueInputIs(node, 0, Type::PlainPrimitive());
CheckUpperIs(node, Type::Number());
break;
+ case IrOpcode::kPlainPrimitiveToWord32:
+ // PlainPrimitive -> Integral32
+ CheckValueInputIs(node, 0, Type::PlainPrimitive());
+ CheckUpperIs(node, Type::Integral32());
+ break;
+ case IrOpcode::kPlainPrimitiveToFloat64:
+ // PlainPrimitive -> Number
+ CheckValueInputIs(node, 0, Type::PlainPrimitive());
+ CheckUpperIs(node, Type::Number());
+ break;
case IrOpcode::kStringEqual:
case IrOpcode::kStringLessThan:
case IrOpcode::kStringLessThanOrEqual:
@@ -732,10 +845,16 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 1, Type::String());
CheckUpperIs(node, Type::Boolean());
break;
- case IrOpcode::kStringToNumber:
- // String -> Number
+ case IrOpcode::kStringCharCodeAt:
+ // (String, Unsigned32) -> UnsignedSmall
CheckValueInputIs(node, 0, Type::String());
- CheckUpperIs(node, Type::Number());
+ CheckValueInputIs(node, 1, Type::Unsigned32());
+ CheckUpperIs(node, Type::UnsignedSmall());
+ break;
+ case IrOpcode::kStringFromCharCode:
+ // Number -> String
+ CheckValueInputIs(node, 0, Type::Number());
+ CheckUpperIs(node, Type::String());
break;
case IrOpcode::kReferenceEqual: {
// (Unique, Any) -> Boolean and
@@ -743,9 +862,11 @@ void Verifier::Visitor::Check(Node* node) {
CheckUpperIs(node, Type::Boolean());
break;
}
+ case IrOpcode::kObjectIsCallable:
case IrOpcode::kObjectIsNumber:
case IrOpcode::kObjectIsReceiver:
case IrOpcode::kObjectIsSmi:
+ case IrOpcode::kObjectIsString:
case IrOpcode::kObjectIsUndetectable:
CheckValueInputIs(node, 0, Type::Any());
CheckUpperIs(node, Type::Boolean());
@@ -754,7 +875,34 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 0, Type::PlainNumber());
CheckUpperIs(node, Type::TaggedPointer());
break;
+ case IrOpcode::kEnsureWritableFastElements:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckValueInputIs(node, 1, Type::Internal());
+ CheckUpperIs(node, Type::Internal());
+ break;
+ case IrOpcode::kMaybeGrowFastElements:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckValueInputIs(node, 1, Type::Internal());
+ CheckValueInputIs(node, 2, Type::Unsigned31());
+ CheckValueInputIs(node, 3, Type::Unsigned31());
+ CheckUpperIs(node, Type::Internal());
+ break;
+ case IrOpcode::kTransitionElementsKind:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckValueInputIs(node, 1, Type::Internal());
+ CheckValueInputIs(node, 2, Type::Internal());
+ CheckNotTyped(node);
+ break;
+ case IrOpcode::kChangeTaggedSignedToInt32: {
+ // Signed32 /\ Tagged -> Signed32 /\ UntaggedInt32
+ // TODO(neis): Activate once ChangeRepresentation works in typer.
+ // Type* from = Type::Intersect(Type::Signed32(), Type::Tagged());
+ // Type* to = Type::Intersect(Type::Signed32(), Type::UntaggedInt32());
+ // CheckValueInputIs(node, 0, from));
+ // CheckUpperIs(node, to));
+ break;
+ }
case IrOpcode::kChangeTaggedToInt32: {
// Signed32 /\ Tagged -> Signed32 /\ UntaggedInt32
// TODO(neis): Activate once ChangeRepresentation works in typer.
@@ -774,7 +922,7 @@ void Verifier::Visitor::Check(Node* node) {
break;
}
case IrOpcode::kChangeTaggedToFloat64: {
- // Number /\ Tagged -> Number /\ UntaggedFloat64
+ // NumberOrUndefined /\ Tagged -> Number /\ UntaggedFloat64
// TODO(neis): Activate once ChangeRepresentation works in typer.
// Type* from = Type::Intersect(Type::Number(), Type::Tagged());
// Type* to = Type::Intersect(Type::Number(), Type::UntaggedFloat64());
@@ -782,6 +930,25 @@ void Verifier::Visitor::Check(Node* node) {
// CheckUpperIs(node, to));
break;
}
+ case IrOpcode::kTruncateTaggedToFloat64: {
+ // NumberOrUndefined /\ Tagged -> Number /\ UntaggedFloat64
+ // TODO(neis): Activate once ChangeRepresentation works in typer.
+ // Type* from = Type::Intersect(Type::NumberOrUndefined(),
+ // Type::Tagged());
+ // Type* to = Type::Intersect(Type::Number(), Type::UntaggedFloat64());
+ // CheckValueInputIs(node, 0, from));
+ // CheckUpperIs(node, to));
+ break;
+ }
+ case IrOpcode::kChangeInt31ToTaggedSigned: {
+ // Signed31 /\ UntaggedInt32 -> Signed31 /\ Tagged
+ // TODO(neis): Activate once ChangeRepresentation works in typer.
+ // Type* from =Type::Intersect(Type::Signed31(), Type::UntaggedInt32());
+ // Type* to = Type::Intersect(Type::Signed31(), Type::Tagged());
+ // CheckValueInputIs(node, 0, from));
+ // CheckUpperIs(node, to));
+ break;
+ }
case IrOpcode::kChangeInt32ToTagged: {
// Signed32 /\ UntaggedInt32 -> Signed32 /\ Tagged
// TODO(neis): Activate once ChangeRepresentation works in typer.
@@ -809,7 +976,7 @@ void Verifier::Visitor::Check(Node* node) {
// CheckUpperIs(node, to));
break;
}
- case IrOpcode::kChangeBoolToBit: {
+ case IrOpcode::kChangeTaggedToBit: {
// Boolean /\ TaggedPtr -> Boolean /\ UntaggedInt1
// TODO(neis): Activate once ChangeRepresentation works in typer.
// Type* from = Type::Intersect(Type::Boolean(), Type::TaggedPtr());
@@ -818,7 +985,7 @@ void Verifier::Visitor::Check(Node* node) {
// CheckUpperIs(node, to));
break;
}
- case IrOpcode::kChangeBitToBool: {
+ case IrOpcode::kChangeBitToTagged: {
// Boolean /\ UntaggedInt1 -> Boolean /\ TaggedPtr
// TODO(neis): Activate once ChangeRepresentation works in typer.
// Type* from = Type::Intersect(Type::Boolean(), Type::UntaggedInt1());
@@ -827,6 +994,84 @@ void Verifier::Visitor::Check(Node* node) {
// CheckUpperIs(node, to));
break;
}
+ case IrOpcode::kTruncateTaggedToWord32: {
+ // Number /\ Tagged -> Signed32 /\ UntaggedInt32
+ // TODO(neis): Activate once ChangeRepresentation works in typer.
+ // Type* from = Type::Intersect(Type::Number(), Type::Tagged());
+ // Type* to = Type::Intersect(Type::Number(), Type::UntaggedInt32());
+ // CheckValueInputIs(node, 0, from));
+ // CheckUpperIs(node, to));
+ break;
+ }
+ case IrOpcode::kImpossibleToWord32:
+ case IrOpcode::kImpossibleToWord64:
+ case IrOpcode::kImpossibleToFloat32:
+ case IrOpcode::kImpossibleToFloat64:
+ case IrOpcode::kImpossibleToTagged:
+ case IrOpcode::kImpossibleToBit:
+ break;
+
+ case IrOpcode::kCheckBounds:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckValueInputIs(node, 1, Type::Unsigned31());
+ CheckUpperIs(node, Type::Unsigned31());
+ break;
+ case IrOpcode::kCheckMaps:
+ // (Any, Internal, ..., Internal) -> Any
+ CheckValueInputIs(node, 0, Type::Any());
+ for (int i = 1; i < node->op()->ValueInputCount(); ++i) {
+ CheckValueInputIs(node, i, Type::Internal());
+ }
+ CheckNotTyped(node);
+ break;
+ case IrOpcode::kCheckNumber:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckUpperIs(node, Type::Number());
+ break;
+ case IrOpcode::kCheckString:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckUpperIs(node, Type::String());
+ break;
+ case IrOpcode::kCheckIf:
+ CheckValueInputIs(node, 0, Type::Boolean());
+ CheckNotTyped(node);
+ break;
+ case IrOpcode::kCheckTaggedSigned:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckUpperIs(node, Type::TaggedSigned());
+ break;
+ case IrOpcode::kCheckTaggedPointer:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckUpperIs(node, Type::TaggedPointer());
+ break;
+
+ case IrOpcode::kCheckedInt32Add:
+ case IrOpcode::kCheckedInt32Sub:
+ case IrOpcode::kCheckedInt32Div:
+ case IrOpcode::kCheckedInt32Mod:
+ case IrOpcode::kCheckedUint32Div:
+ case IrOpcode::kCheckedUint32Mod:
+ case IrOpcode::kCheckedInt32Mul:
+ case IrOpcode::kCheckedUint32ToInt32:
+ case IrOpcode::kCheckedFloat64ToInt32:
+ case IrOpcode::kCheckedTaggedSignedToInt32:
+ case IrOpcode::kCheckedTaggedToInt32:
+ case IrOpcode::kCheckedTaggedToFloat64:
+ case IrOpcode::kCheckedTruncateTaggedToWord32:
+ break;
+
+ case IrOpcode::kCheckFloat64Hole:
+ CheckValueInputIs(node, 0, Type::Number());
+ CheckUpperIs(node, Type::Number());
+ break;
+ case IrOpcode::kCheckTaggedHole:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckUpperIs(node, Type::NonInternal());
+ break;
+ case IrOpcode::kConvertTaggedHoleToUndefined:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckUpperIs(node, Type::NonInternal());
+ break;
case IrOpcode::kLoadField:
// Object -> fieldtype
@@ -842,6 +1087,8 @@ void Verifier::Visitor::Check(Node* node) {
// CheckValueInputIs(node, 0, Type::Object());
// CheckUpperIs(node, ElementAccessOf(node->op()).type));
break;
+ case IrOpcode::kLoadTypedElement:
+ break;
case IrOpcode::kStoreField:
// (Object, fieldtype) -> _|_
// TODO(rossberg): activate once machine ops are typed.
@@ -858,6 +1105,16 @@ void Verifier::Visitor::Check(Node* node) {
// CheckValueInputIs(node, 1, ElementAccessOf(node->op()).type));
CheckNotTyped(node);
break;
+ case IrOpcode::kStoreTypedElement:
+ CheckNotTyped(node);
+ break;
+ case IrOpcode::kNumberSilenceNaN:
+ CheckValueInputIs(node, 0, Type::Number());
+ CheckUpperIs(node, Type::Number());
+ break;
+ case IrOpcode::kTypeGuard:
+ CheckUpperIs(node, TypeGuardTypeOf(node->op()));
+ break;
// Machine operators
// -----------------------
@@ -875,6 +1132,7 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kWord32Clz:
case IrOpcode::kWord32Ctz:
case IrOpcode::kWord32ReverseBits:
+ case IrOpcode::kWord32ReverseBytes:
case IrOpcode::kWord32Popcnt:
case IrOpcode::kWord64And:
case IrOpcode::kWord64Or:
@@ -887,12 +1145,14 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kWord64Popcnt:
case IrOpcode::kWord64Ctz:
case IrOpcode::kWord64ReverseBits:
+ case IrOpcode::kWord64ReverseBytes:
case IrOpcode::kWord64Equal:
case IrOpcode::kInt32Add:
case IrOpcode::kInt32AddWithOverflow:
case IrOpcode::kInt32Sub:
case IrOpcode::kInt32SubWithOverflow:
case IrOpcode::kInt32Mul:
+ case IrOpcode::kInt32MulWithOverflow:
case IrOpcode::kInt32MulHigh:
case IrOpcode::kInt32Div:
case IrOpcode::kInt32Mod:
@@ -918,24 +1178,47 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kUint64LessThanOrEqual:
case IrOpcode::kFloat32Add:
case IrOpcode::kFloat32Sub:
+ case IrOpcode::kFloat32Neg:
case IrOpcode::kFloat32Mul:
case IrOpcode::kFloat32Div:
- case IrOpcode::kFloat32Max:
- case IrOpcode::kFloat32Min:
case IrOpcode::kFloat32Abs:
case IrOpcode::kFloat32Sqrt:
case IrOpcode::kFloat32Equal:
case IrOpcode::kFloat32LessThan:
case IrOpcode::kFloat32LessThanOrEqual:
+ case IrOpcode::kFloat32Max:
+ case IrOpcode::kFloat32Min:
case IrOpcode::kFloat64Add:
case IrOpcode::kFloat64Sub:
+ case IrOpcode::kFloat64Neg:
case IrOpcode::kFloat64Mul:
case IrOpcode::kFloat64Div:
case IrOpcode::kFloat64Mod:
case IrOpcode::kFloat64Max:
case IrOpcode::kFloat64Min:
case IrOpcode::kFloat64Abs:
+ case IrOpcode::kFloat64Acos:
+ case IrOpcode::kFloat64Acosh:
+ case IrOpcode::kFloat64Asin:
+ case IrOpcode::kFloat64Asinh:
+ case IrOpcode::kFloat64Atan:
+ case IrOpcode::kFloat64Atan2:
+ case IrOpcode::kFloat64Atanh:
+ case IrOpcode::kFloat64Cbrt:
+ case IrOpcode::kFloat64Cos:
+ case IrOpcode::kFloat64Cosh:
+ case IrOpcode::kFloat64Exp:
+ case IrOpcode::kFloat64Expm1:
+ case IrOpcode::kFloat64Log:
+ case IrOpcode::kFloat64Log1p:
+ case IrOpcode::kFloat64Log10:
+ case IrOpcode::kFloat64Log2:
+ case IrOpcode::kFloat64Pow:
+ case IrOpcode::kFloat64Sin:
+ case IrOpcode::kFloat64Sinh:
case IrOpcode::kFloat64Sqrt:
+ case IrOpcode::kFloat64Tan:
+ case IrOpcode::kFloat64Tanh:
case IrOpcode::kFloat32RoundDown:
case IrOpcode::kFloat64RoundDown:
case IrOpcode::kFloat32RoundUp:
@@ -949,6 +1232,7 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kFloat64LessThan:
case IrOpcode::kFloat64LessThanOrEqual:
case IrOpcode::kTruncateInt64ToInt32:
+ case IrOpcode::kRoundFloat64ToInt32:
case IrOpcode::kRoundInt32ToFloat32:
case IrOpcode::kRoundInt64ToFloat32:
case IrOpcode::kRoundInt64ToFloat64:
@@ -956,11 +1240,12 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kRoundUint64ToFloat64:
case IrOpcode::kRoundUint64ToFloat32:
case IrOpcode::kTruncateFloat64ToFloat32:
- case IrOpcode::kTruncateFloat64ToInt32:
+ case IrOpcode::kTruncateFloat64ToWord32:
case IrOpcode::kBitcastFloat32ToInt32:
case IrOpcode::kBitcastFloat64ToInt64:
case IrOpcode::kBitcastInt32ToFloat32:
case IrOpcode::kBitcastInt64ToFloat64:
+ case IrOpcode::kBitcastWordToTagged:
case IrOpcode::kChangeInt32ToInt64:
case IrOpcode::kChangeUint32ToUint64:
case IrOpcode::kChangeInt32ToFloat64:
@@ -968,6 +1253,7 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kChangeFloat32ToFloat64:
case IrOpcode::kChangeFloat64ToInt32:
case IrOpcode::kChangeFloat64ToUint32:
+ case IrOpcode::kFloat64SilenceNaN:
case IrOpcode::kTruncateFloat64ToUint32:
case IrOpcode::kTruncateFloat32ToInt32:
case IrOpcode::kTruncateFloat32ToUint32:
@@ -988,24 +1274,32 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kLoadStackPointer:
case IrOpcode::kLoadFramePointer:
case IrOpcode::kLoadParentFramePointer:
+ case IrOpcode::kUnalignedLoad:
+ case IrOpcode::kUnalignedStore:
case IrOpcode::kCheckedLoad:
case IrOpcode::kCheckedStore:
+ case IrOpcode::kAtomicLoad:
+ case IrOpcode::kAtomicStore:
+
+#define SIMD_MACHINE_OP_CASE(Name) case IrOpcode::k##Name:
+ MACHINE_SIMD_OP_LIST(SIMD_MACHINE_OP_CASE)
+#undef SIMD_MACHINE_OP_CASE
+
// TODO(rossberg): Check.
break;
}
} // NOLINT(readability/fn_size)
-
-void Verifier::Run(Graph* graph, Typing typing) {
+void Verifier::Run(Graph* graph, Typing typing, CheckInputs check_inputs) {
CHECK_NOT_NULL(graph->start());
CHECK_NOT_NULL(graph->end());
Zone zone(graph->zone()->allocator());
- Visitor visitor(&zone, typing);
+ Visitor visitor(&zone, typing, check_inputs);
AllNodes all(&zone, graph);
- for (Node* node : all.live) visitor.Check(node);
+ for (Node* node : all.reachable) visitor.Check(node);
// Check the uniqueness of projections.
- for (Node* proj : all.live) {
+ for (Node* proj : all.reachable) {
if (proj->opcode() != IrOpcode::kProjection) continue;
Node* node = proj->InputAt(0);
for (Node* other : node->uses()) {
@@ -1281,10 +1575,9 @@ void Verifier::VerifyNode(Node* node) {
}
}
}
- // Frame state inputs should be frame states (or sentinels).
- for (int i = 0; i < OperatorProperties::GetFrameStateInputCount(node->op());
- i++) {
- Node* input = NodeProperties::GetFrameStateInput(node, i);
+ // Frame state input should be a frame state (or sentinel).
+ if (OperatorProperties::GetFrameStateInputCount(node->op()) > 0) {
+ Node* input = NodeProperties::GetFrameStateInput(node);
CHECK(input->opcode() == IrOpcode::kFrameState ||
input->opcode() == IrOpcode::kStart ||
input->opcode() == IrOpcode::kDead);
diff --git a/deps/v8/src/compiler/verifier.h b/deps/v8/src/compiler/verifier.h
index 428558d42d..60849e0238 100644
--- a/deps/v8/src/compiler/verifier.h
+++ b/deps/v8/src/compiler/verifier.h
@@ -21,8 +21,10 @@ class Schedule;
class Verifier {
public:
enum Typing { TYPED, UNTYPED };
+ enum CheckInputs { kValuesOnly, kAll };
- static void Run(Graph* graph, Typing typing = TYPED);
+ static void Run(Graph* graph, Typing typing = TYPED,
+ CheckInputs check_inputs = kAll);
#ifdef DEBUG
// Verifies consistency of node inputs and uses:
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index 93d5a084b9..e92a434d1d 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -4,36 +4,33 @@
#include "src/compiler/wasm-compiler.h"
+#include <memory>
+
#include "src/isolate-inl.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/base/platform/platform.h"
#include "src/compiler/access-builder.h"
-#include "src/compiler/change-lowering.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/diamond.h"
-#include "src/compiler/graph.h"
#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/graph.h"
#include "src/compiler/instruction-selector.h"
#include "src/compiler/int64-lowering.h"
-#include "src/compiler/js-generic-lowering.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/pipeline.h"
-#include "src/compiler/simplified-lowering.h"
-#include "src/compiler/simplified-operator.h"
#include "src/compiler/source-position.h"
-#include "src/compiler/typer.h"
+#include "src/compiler/zone-pool.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/factory.h"
#include "src/log-inl.h"
-#include "src/profiler/cpu-profiler.h"
#include "src/wasm/ast-decoder.h"
#include "src/wasm/wasm-module.h"
@@ -52,17 +49,11 @@ namespace compiler {
namespace {
const Operator* UnsupportedOpcode(wasm::WasmOpcode opcode) {
- if (wasm::WasmOpcodes::IsSupported(opcode)) {
- V8_Fatal(__FILE__, __LINE__,
- "Unsupported opcode #%d:%s reported as supported", opcode,
- wasm::WasmOpcodes::OpcodeName(opcode));
- }
V8_Fatal(__FILE__, __LINE__, "Unsupported opcode #%d:%s", opcode,
wasm::WasmOpcodes::OpcodeName(opcode));
return nullptr;
}
-
void MergeControlToEnd(JSGraph* jsgraph, Node* node) {
Graph* g = jsgraph->graph();
if (g->end()) {
@@ -72,6 +63,39 @@ void MergeControlToEnd(JSGraph* jsgraph, Node* node) {
}
}
+Node* BuildCallToRuntime(Runtime::FunctionId f, JSGraph* jsgraph,
+ Handle<Context> context, Node** parameters,
+ int parameter_count, Node** effect_ptr,
+ Node* control) {
+ // At the moment we only allow 2 parameters. If more parameters are needed,
+ // then the size of {inputs} below has to be increased accordingly.
+ DCHECK(parameter_count <= 2);
+ const Runtime::Function* fun = Runtime::FunctionForId(f);
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ jsgraph->zone(), f, fun->nargs, Operator::kNoProperties,
+ CallDescriptor::kNoFlags);
+ // CEntryStubConstant nodes have to be created and cached in the main
+ // thread. At the moment this is only done for CEntryStubConstant(1).
+ DCHECK_EQ(1, fun->result_size);
+ Node* inputs[8];
+ int count = 0;
+ inputs[count++] = jsgraph->CEntryStubConstant(fun->result_size);
+ for (int i = 0; i < parameter_count; i++) {
+ inputs[count++] = parameters[i];
+ }
+ inputs[count++] = jsgraph->ExternalConstant(
+ ExternalReference(f, jsgraph->isolate())); // ref
+ inputs[count++] = jsgraph->Int32Constant(fun->nargs); // arity
+ inputs[count++] = jsgraph->HeapConstant(context); // context
+ inputs[count++] = *effect_ptr;
+ inputs[count++] = control;
+
+ Node* node =
+ jsgraph->graph()->NewNode(jsgraph->common()->Call(desc), count, inputs);
+ *effect_ptr = node;
+ return node;
+}
+
} // namespace
// A helper that handles building graph fragments for trapping.
@@ -83,62 +107,72 @@ class WasmTrapHelper : public ZoneObject {
explicit WasmTrapHelper(WasmGraphBuilder* builder)
: builder_(builder),
jsgraph_(builder->jsgraph()),
- graph_(builder->jsgraph() ? builder->jsgraph()->graph() : nullptr) {
- for (int i = 0; i < wasm::kTrapCount; i++) traps_[i] = nullptr;
- }
+ graph_(builder->jsgraph() ? builder->jsgraph()->graph() : nullptr) {}
// Make the current control path trap to unreachable.
- void Unreachable() { ConnectTrap(wasm::kTrapUnreachable); }
+ void Unreachable(wasm::WasmCodePosition position) {
+ ConnectTrap(wasm::kTrapUnreachable, position);
+ }
// Always trap with the given reason.
- void TrapAlways(wasm::TrapReason reason) { ConnectTrap(reason); }
+ void TrapAlways(wasm::TrapReason reason, wasm::WasmCodePosition position) {
+ ConnectTrap(reason, position);
+ }
// Add a check that traps if {node} is equal to {val}.
- Node* TrapIfEq32(wasm::TrapReason reason, Node* node, int32_t val) {
+ Node* TrapIfEq32(wasm::TrapReason reason, Node* node, int32_t val,
+ wasm::WasmCodePosition position) {
Int32Matcher m(node);
if (m.HasValue() && !m.Is(val)) return graph()->start();
if (val == 0) {
- AddTrapIfFalse(reason, node);
+ AddTrapIfFalse(reason, node, position);
} else {
AddTrapIfTrue(reason,
graph()->NewNode(jsgraph()->machine()->Word32Equal(), node,
- jsgraph()->Int32Constant(val)));
+ jsgraph()->Int32Constant(val)),
+ position);
}
return builder_->Control();
}
// Add a check that traps if {node} is zero.
- Node* ZeroCheck32(wasm::TrapReason reason, Node* node) {
- return TrapIfEq32(reason, node, 0);
+ Node* ZeroCheck32(wasm::TrapReason reason, Node* node,
+ wasm::WasmCodePosition position) {
+ return TrapIfEq32(reason, node, 0, position);
}
// Add a check that traps if {node} is equal to {val}.
- Node* TrapIfEq64(wasm::TrapReason reason, Node* node, int64_t val) {
+ Node* TrapIfEq64(wasm::TrapReason reason, Node* node, int64_t val,
+ wasm::WasmCodePosition position) {
Int64Matcher m(node);
if (m.HasValue() && !m.Is(val)) return graph()->start();
- AddTrapIfTrue(reason,
- graph()->NewNode(jsgraph()->machine()->Word64Equal(), node,
- jsgraph()->Int64Constant(val)));
+ AddTrapIfTrue(reason, graph()->NewNode(jsgraph()->machine()->Word64Equal(),
+ node, jsgraph()->Int64Constant(val)),
+ position);
return builder_->Control();
}
// Add a check that traps if {node} is zero.
- Node* ZeroCheck64(wasm::TrapReason reason, Node* node) {
- return TrapIfEq64(reason, node, 0);
+ Node* ZeroCheck64(wasm::TrapReason reason, Node* node,
+ wasm::WasmCodePosition position) {
+ return TrapIfEq64(reason, node, 0, position);
}
// Add a trap if {cond} is true.
- void AddTrapIfTrue(wasm::TrapReason reason, Node* cond) {
- AddTrapIf(reason, cond, true);
+ void AddTrapIfTrue(wasm::TrapReason reason, Node* cond,
+ wasm::WasmCodePosition position) {
+ AddTrapIf(reason, cond, true, position);
}
// Add a trap if {cond} is false.
- void AddTrapIfFalse(wasm::TrapReason reason, Node* cond) {
- AddTrapIf(reason, cond, false);
+ void AddTrapIfFalse(wasm::TrapReason reason, Node* cond,
+ wasm::WasmCodePosition position) {
+ AddTrapIf(reason, cond, false, position);
}
// Add a trap if {cond} is true or false according to {iftrue}.
- void AddTrapIf(wasm::TrapReason reason, Node* cond, bool iftrue) {
+ void AddTrapIf(wasm::TrapReason reason, Node* cond, bool iftrue,
+ wasm::WasmCodePosition position) {
Node** effect_ptr = builder_->effect_;
Node** control_ptr = builder_->control_;
Node* before = *effect_ptr;
@@ -148,7 +182,7 @@ class WasmTrapHelper : public ZoneObject {
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
*control_ptr = iftrue ? if_true : if_false;
- ConnectTrap(reason);
+ ConnectTrap(reason, position);
*control_ptr = iftrue ? if_false : if_true;
*effect_ptr = before;
}
@@ -179,56 +213,57 @@ class WasmTrapHelper : public ZoneObject {
WasmGraphBuilder* builder_;
JSGraph* jsgraph_;
Graph* graph_;
- Node* traps_[wasm::kTrapCount];
- Node* effects_[wasm::kTrapCount];
+ Node* trap_merge_ = nullptr;
+ Node* trap_effect_;
+ Node* trap_reason_;
+ Node* trap_position_;
JSGraph* jsgraph() { return jsgraph_; }
Graph* graph() { return jsgraph_->graph(); }
CommonOperatorBuilder* common() { return jsgraph()->common(); }
- void ConnectTrap(wasm::TrapReason reason) {
- if (traps_[reason] == nullptr) {
- // Create trap code for the first time this trap is used.
- return BuildTrapCode(reason);
+ void ConnectTrap(wasm::TrapReason reason, wasm::WasmCodePosition position) {
+ DCHECK(position != wasm::kNoCodePosition);
+ Node* reason_node = builder_->Int32Constant(
+ wasm::WasmOpcodes::TrapReasonToMessageId(reason));
+ Node* position_node = builder_->Int32Constant(position);
+ if (trap_merge_ == nullptr) {
+ // Create trap code for the first time.
+ return BuildTrapCode(reason_node, position_node);
}
// Connect the current control and effect to the existing trap code.
- builder_->AppendToMerge(traps_[reason], builder_->Control());
- builder_->AppendToPhi(traps_[reason], effects_[reason], builder_->Effect());
+ builder_->AppendToMerge(trap_merge_, builder_->Control());
+ builder_->AppendToPhi(trap_effect_, builder_->Effect());
+ builder_->AppendToPhi(trap_reason_, reason_node);
+ builder_->AppendToPhi(trap_position_, position_node);
}
- void BuildTrapCode(wasm::TrapReason reason) {
- Node* exception =
- builder_->String(wasm::WasmOpcodes::TrapReasonName(reason));
+ void BuildTrapCode(Node* reason_node, Node* position_node) {
Node* end;
Node** control_ptr = builder_->control_;
Node** effect_ptr = builder_->effect_;
wasm::ModuleEnv* module = builder_->module_;
- *control_ptr = traps_[reason] =
+ DCHECK(trap_merge_ == NULL);
+ *control_ptr = trap_merge_ =
graph()->NewNode(common()->Merge(1), *control_ptr);
- *effect_ptr = effects_[reason] =
+ *effect_ptr = trap_effect_ =
graph()->NewNode(common()->EffectPhi(1), *effect_ptr, *control_ptr);
+ trap_reason_ =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 1),
+ reason_node, *control_ptr);
+ trap_position_ =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 1),
+ position_node, *control_ptr);
+
+ Node* trap_reason_smi = builder_->BuildChangeInt32ToSmi(trap_reason_);
+ Node* trap_position_smi = builder_->BuildChangeInt32ToSmi(trap_position_);
if (module && !module->instance->context.is_null()) {
- // Use the module context to call the runtime to throw an exception.
- Runtime::FunctionId f = Runtime::kThrow;
- const Runtime::Function* fun = Runtime::FunctionForId(f);
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
- jsgraph()->zone(), f, fun->nargs, Operator::kNoProperties,
- CallDescriptor::kNoFlags);
- Node* inputs[] = {
- jsgraph()->CEntryStubConstant(fun->result_size), // C entry
- exception, // exception
- jsgraph()->ExternalConstant(
- ExternalReference(f, jsgraph()->isolate())), // ref
- jsgraph()->Int32Constant(fun->nargs), // arity
- jsgraph()->Constant(module->instance->context), // context
- *effect_ptr,
- *control_ptr};
-
- Node* node = graph()->NewNode(
- common()->Call(desc), static_cast<int>(arraysize(inputs)), inputs);
- *control_ptr = node;
- *effect_ptr = node;
+ Node* parameters[] = {trap_reason_smi, // message id
+ trap_position_smi}; // byte position
+ BuildCallToRuntime(Runtime::kThrowWasmError, jsgraph(),
+ module->instance->context, parameters,
+ arraysize(parameters), effect_ptr, *control_ptr);
}
if (false) {
// End the control flow with a throw
@@ -247,45 +282,42 @@ class WasmTrapHelper : public ZoneObject {
}
};
-WasmGraphBuilder::WasmGraphBuilder(Zone* zone, JSGraph* jsgraph,
- wasm::FunctionSig* function_signature)
+WasmGraphBuilder::WasmGraphBuilder(
+ Zone* zone, JSGraph* jsgraph, wasm::FunctionSig* function_signature,
+ compiler::SourcePositionTable* source_position_table)
: zone_(zone),
jsgraph_(jsgraph),
module_(nullptr),
mem_buffer_(nullptr),
mem_size_(nullptr),
- function_table_(nullptr),
+ function_tables_(zone),
control_(nullptr),
effect_(nullptr),
cur_buffer_(def_buffer_),
cur_bufsize_(kDefaultBufferSize),
trap_(new (zone) WasmTrapHelper(this)),
- function_signature_(function_signature) {
+ function_signature_(function_signature),
+ source_position_table_(source_position_table) {
DCHECK_NOT_NULL(jsgraph_);
}
-
Node* WasmGraphBuilder::Error() { return jsgraph()->Dead(); }
-
Node* WasmGraphBuilder::Start(unsigned params) {
Node* start = graph()->NewNode(jsgraph()->common()->Start(params));
graph()->SetStart(start);
return start;
}
-
Node* WasmGraphBuilder::Param(unsigned index, wasm::LocalType type) {
return graph()->NewNode(jsgraph()->common()->Parameter(index),
graph()->start());
}
-
Node* WasmGraphBuilder::Loop(Node* entry) {
return graph()->NewNode(jsgraph()->common()->Loop(1), entry);
}
-
Node* WasmGraphBuilder::Terminate(Node* effect, Node* control) {
Node* terminate =
graph()->NewNode(jsgraph()->common()->Terminate(), effect, control);
@@ -293,18 +325,15 @@ Node* WasmGraphBuilder::Terminate(Node* effect, Node* control) {
return terminate;
}
-
unsigned WasmGraphBuilder::InputCount(Node* node) {
return static_cast<unsigned>(node->InputCount());
}
-
bool WasmGraphBuilder::IsPhiWithMerge(Node* phi, Node* merge) {
return phi && IrOpcode::IsPhiOpcode(phi->opcode()) &&
NodeProperties::GetControlInput(phi) == merge;
}
-
void WasmGraphBuilder::AppendToMerge(Node* merge, Node* from) {
DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
merge->AppendInput(jsgraph()->zone(), from);
@@ -313,22 +342,18 @@ void WasmGraphBuilder::AppendToMerge(Node* merge, Node* from) {
merge, jsgraph()->common()->ResizeMergeOrPhi(merge->op(), new_size));
}
-
-void WasmGraphBuilder::AppendToPhi(Node* merge, Node* phi, Node* from) {
+void WasmGraphBuilder::AppendToPhi(Node* phi, Node* from) {
DCHECK(IrOpcode::IsPhiOpcode(phi->opcode()));
- DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
int new_size = phi->InputCount();
phi->InsertInput(jsgraph()->zone(), phi->InputCount() - 1, from);
NodeProperties::ChangeOp(
phi, jsgraph()->common()->ResizeMergeOrPhi(phi->op(), new_size));
}
-
Node* WasmGraphBuilder::Merge(unsigned count, Node** controls) {
return graph()->NewNode(jsgraph()->common()->Merge(count), count, controls);
}
-
Node* WasmGraphBuilder::Phi(wasm::LocalType type, unsigned count, Node** vals,
Node* control) {
DCHECK(IrOpcode::IsMergeOpcode(control->opcode()));
@@ -338,7 +363,6 @@ Node* WasmGraphBuilder::Phi(wasm::LocalType type, unsigned count, Node** vals,
buf);
}
-
Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects,
Node* control) {
DCHECK(IrOpcode::IsMergeOpcode(control->opcode()));
@@ -348,19 +372,58 @@ Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects,
buf);
}
+Node* WasmGraphBuilder::NumberConstant(int32_t value) {
+ return jsgraph()->Constant(value);
+}
+
+Node* WasmGraphBuilder::Uint32Constant(uint32_t value) {
+ return jsgraph()->Uint32Constant(value);
+}
Node* WasmGraphBuilder::Int32Constant(int32_t value) {
return jsgraph()->Int32Constant(value);
}
-
Node* WasmGraphBuilder::Int64Constant(int64_t value) {
return jsgraph()->Int64Constant(value);
}
+void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position) {
+ // We do not generate stack checks for cctests.
+ if (module_ && !module_->instance->context.is_null()) {
+ Node* limit = graph()->NewNode(
+ jsgraph()->machine()->Load(MachineType::Pointer()),
+ jsgraph()->ExternalConstant(
+ ExternalReference::address_of_stack_limit(jsgraph()->isolate())),
+ jsgraph()->IntPtrConstant(0), *effect_, *control_);
+ Node* pointer = graph()->NewNode(jsgraph()->machine()->LoadStackPointer());
+
+ Node* check =
+ graph()->NewNode(jsgraph()->machine()->UintLessThan(), limit, pointer);
+
+ Diamond stack_check(graph(), jsgraph()->common(), check, BranchHint::kTrue);
+
+ Node* effect_true = *effect_;
+
+ Node* effect_false;
+ // Generate a call to the runtime if there is a stack check failure.
+ {
+ Node* node = BuildCallToRuntime(Runtime::kStackGuard, jsgraph(),
+ module_->instance->context, nullptr, 0,
+ effect_, stack_check.if_false);
+ effect_false = node;
+ }
+
+ Node* ephi = graph()->NewNode(jsgraph()->common()->EffectPhi(2),
+ effect_true, effect_false, stack_check.merge);
-Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left,
- Node* right) {
+ *control_ = stack_check.merge;
+ *effect_ = ephi;
+ }
+}
+
+Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left, Node* right,
+ wasm::WasmCodePosition position) {
const Operator* op;
MachineOperatorBuilder* m = jsgraph()->machine();
switch (opcode) {
@@ -374,13 +437,13 @@ Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left,
op = m->Int32Mul();
break;
case wasm::kExprI32DivS:
- return BuildI32DivS(left, right);
+ return BuildI32DivS(left, right, position);
case wasm::kExprI32DivU:
- return BuildI32DivU(left, right);
+ return BuildI32DivU(left, right, position);
case wasm::kExprI32RemS:
- return BuildI32RemS(left, right);
+ return BuildI32RemS(left, right, position);
case wasm::kExprI32RemU:
- return BuildI32RemU(left, right);
+ return BuildI32RemU(left, right, position);
case wasm::kExprI32And:
op = m->Word32And();
break;
@@ -445,62 +508,46 @@ Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left,
case wasm::kExprI64And:
op = m->Word64And();
break;
- // todo(ahaas): I added a list of missing instructions here to make merging
- // easier when I do them one by one.
- // kExprI64Add:
case wasm::kExprI64Add:
op = m->Int64Add();
break;
- // kExprI64Sub:
case wasm::kExprI64Sub:
op = m->Int64Sub();
break;
- // kExprI64Mul:
case wasm::kExprI64Mul:
op = m->Int64Mul();
break;
- // kExprI64DivS:
case wasm::kExprI64DivS:
- return BuildI64DivS(left, right);
- // kExprI64DivU:
+ return BuildI64DivS(left, right, position);
case wasm::kExprI64DivU:
- return BuildI64DivU(left, right);
- // kExprI64RemS:
+ return BuildI64DivU(left, right, position);
case wasm::kExprI64RemS:
- return BuildI64RemS(left, right);
- // kExprI64RemU:
+ return BuildI64RemS(left, right, position);
case wasm::kExprI64RemU:
- return BuildI64RemU(left, right);
+ return BuildI64RemU(left, right, position);
case wasm::kExprI64Ior:
op = m->Word64Or();
break;
-// kExprI64Xor:
case wasm::kExprI64Xor:
op = m->Word64Xor();
break;
-// kExprI64Shl:
case wasm::kExprI64Shl:
op = m->Word64Shl();
right = MaskShiftCount64(right);
break;
- // kExprI64ShrU:
case wasm::kExprI64ShrU:
op = m->Word64Shr();
right = MaskShiftCount64(right);
break;
- // kExprI64ShrS:
case wasm::kExprI64ShrS:
op = m->Word64Sar();
right = MaskShiftCount64(right);
break;
- // kExprI64Eq:
case wasm::kExprI64Eq:
op = m->Word64Equal();
break;
-// kExprI64Ne:
case wasm::kExprI64Ne:
return Invert(Binop(wasm::kExprI64Eq, left, right));
-// kExprI64LtS:
case wasm::kExprI64LtS:
op = m->Int64LessThan();
break;
@@ -602,30 +649,50 @@ Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left,
std::swap(left, right);
break;
case wasm::kExprF32Min:
- return BuildF32Min(left, right);
+ op = m->Float32Min();
+ break;
case wasm::kExprF64Min:
- return BuildF64Min(left, right);
+ op = m->Float64Min();
+ break;
case wasm::kExprF32Max:
- return BuildF32Max(left, right);
+ op = m->Float32Max();
+ break;
case wasm::kExprF64Max:
- return BuildF64Max(left, right);
- case wasm::kExprF64Pow: {
+ op = m->Float64Max();
+ break;
+ case wasm::kExprF64Pow:
return BuildF64Pow(left, right);
- }
- case wasm::kExprF64Atan2: {
- return BuildF64Atan2(left, right);
- }
- case wasm::kExprF64Mod: {
+ case wasm::kExprF64Atan2:
+ op = m->Float64Atan2();
+ break;
+ case wasm::kExprF64Mod:
return BuildF64Mod(left, right);
- }
+ case wasm::kExprI32AsmjsDivS:
+ return BuildI32AsmjsDivS(left, right);
+ case wasm::kExprI32AsmjsDivU:
+ return BuildI32AsmjsDivU(left, right);
+ case wasm::kExprI32AsmjsRemS:
+ return BuildI32AsmjsRemS(left, right);
+ case wasm::kExprI32AsmjsRemU:
+ return BuildI32AsmjsRemU(left, right);
+ case wasm::kExprI32AsmjsStoreMem8:
+ return BuildAsmjsStoreMem(MachineType::Int8(), left, right);
+ case wasm::kExprI32AsmjsStoreMem16:
+ return BuildAsmjsStoreMem(MachineType::Int16(), left, right);
+ case wasm::kExprI32AsmjsStoreMem:
+ return BuildAsmjsStoreMem(MachineType::Int32(), left, right);
+ case wasm::kExprF32AsmjsStoreMem:
+ return BuildAsmjsStoreMem(MachineType::Float32(), left, right);
+ case wasm::kExprF64AsmjsStoreMem:
+ return BuildAsmjsStoreMem(MachineType::Float64(), left, right);
default:
op = UnsupportedOpcode(opcode);
}
return graph()->NewNode(op, left, right);
}
-
-Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input) {
+Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
+ wasm::WasmCodePosition position) {
const Operator* op;
MachineOperatorBuilder* m = jsgraph()->machine();
switch (opcode) {
@@ -635,23 +702,31 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input) {
case wasm::kExprF32Abs:
op = m->Float32Abs();
break;
- case wasm::kExprF32Neg:
- return BuildF32Neg(input);
+ case wasm::kExprF32Neg: {
+ op = m->Float32Neg();
+ break;
+ }
case wasm::kExprF32Sqrt:
op = m->Float32Sqrt();
break;
case wasm::kExprF64Abs:
op = m->Float64Abs();
break;
- case wasm::kExprF64Neg:
- return BuildF64Neg(input);
+ case wasm::kExprF64Neg: {
+ op = m->Float64Neg();
+ break;
+ }
case wasm::kExprF64Sqrt:
op = m->Float64Sqrt();
break;
case wasm::kExprI32SConvertF64:
- return BuildI32SConvertF64(input);
+ return BuildI32SConvertF64(input, position);
case wasm::kExprI32UConvertF64:
- return BuildI32UConvertF64(input);
+ return BuildI32UConvertF64(input, position);
+ case wasm::kExprI32AsmjsSConvertF64:
+ return BuildI32AsmjsSConvertF64(input);
+ case wasm::kExprI32AsmjsUConvertF64:
+ return BuildI32AsmjsUConvertF64(input);
case wasm::kExprF32ConvertF64:
op = m->TruncateFloat64ToFloat32();
break;
@@ -668,9 +743,13 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input) {
op = m->RoundUint32ToFloat32();
break;
case wasm::kExprI32SConvertF32:
- return BuildI32SConvertF32(input);
+ return BuildI32SConvertF32(input, position);
case wasm::kExprI32UConvertF32:
- return BuildI32UConvertF32(input);
+ return BuildI32UConvertF32(input, position);
+ case wasm::kExprI32AsmjsSConvertF32:
+ return BuildI32AsmjsSConvertF32(input);
+ case wasm::kExprI32AsmjsUConvertF32:
+ return BuildI32AsmjsUConvertF32(input);
case wasm::kExprF64ConvertF32:
op = m->ChangeFloat32ToFloat64();
break;
@@ -751,55 +830,53 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input) {
case wasm::kExprF64Asin: {
return BuildF64Asin(input);
}
- case wasm::kExprF64Atan: {
- return BuildF64Atan(input);
- }
+ case wasm::kExprF64Atan:
+ op = m->Float64Atan();
+ break;
case wasm::kExprF64Cos: {
- return BuildF64Cos(input);
+ op = m->Float64Cos();
+ break;
}
case wasm::kExprF64Sin: {
- return BuildF64Sin(input);
+ op = m->Float64Sin();
+ break;
}
case wasm::kExprF64Tan: {
- return BuildF64Tan(input);
+ op = m->Float64Tan();
+ break;
}
case wasm::kExprF64Exp: {
- return BuildF64Exp(input);
- }
- case wasm::kExprF64Log: {
- return BuildF64Log(input);
+ op = m->Float64Exp();
+ break;
}
- // kExprI32ConvertI64:
+ case wasm::kExprF64Log:
+ op = m->Float64Log();
+ break;
case wasm::kExprI32ConvertI64:
op = m->TruncateInt64ToInt32();
break;
- // kExprI64SConvertI32:
case wasm::kExprI64SConvertI32:
op = m->ChangeInt32ToInt64();
break;
- // kExprI64UConvertI32:
case wasm::kExprI64UConvertI32:
op = m->ChangeUint32ToUint64();
break;
- // kExprF64ReinterpretI64:
case wasm::kExprF64ReinterpretI64:
op = m->BitcastInt64ToFloat64();
break;
- // kExprI64ReinterpretF64:
case wasm::kExprI64ReinterpretF64:
op = m->BitcastFloat64ToInt64();
break;
- // kExprI64Clz:
case wasm::kExprI64Clz:
op = m->Word64Clz();
break;
- // kExprI64Ctz:
case wasm::kExprI64Ctz: {
- if (m->Word64Ctz().IsSupported()) {
- op = m->Word64Ctz().op();
+ OptionalOperator ctz64 = m->Word64Ctz();
+ if (ctz64.IsSupported()) {
+ op = ctz64.op();
break;
} else if (m->Is32() && m->Word32Ctz().IsSupported()) {
- op = m->Word64CtzPlaceholder();
+ op = ctz64.placeholder();
break;
} else if (m->Word64ReverseBits().IsSupported()) {
Node* reversed = graph()->NewNode(m->Word64ReverseBits().op(), input);
@@ -809,18 +886,17 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input) {
return BuildI64Ctz(input);
}
}
- // kExprI64Popcnt:
case wasm::kExprI64Popcnt: {
- if (m->Word64Popcnt().IsSupported()) {
- op = m->Word64Popcnt().op();
+ OptionalOperator popcnt64 = m->Word64Popcnt();
+ if (popcnt64.IsSupported()) {
+ op = popcnt64.op();
} else if (m->Is32() && m->Word32Popcnt().IsSupported()) {
- op = m->Word64PopcntPlaceholder();
+ op = popcnt64.placeholder();
} else {
return BuildI64Popcnt(input);
}
break;
}
- // kExprF32SConvertI64:
case wasm::kExprI64Eqz:
op = m->Word64Equal();
return graph()->NewNode(op, input, jsgraph()->Int64Constant(0));
@@ -830,65 +906,66 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input) {
}
op = m->RoundInt64ToFloat32();
break;
- // kExprF32UConvertI64:
case wasm::kExprF32UConvertI64:
if (m->Is32()) {
return BuildF32UConvertI64(input);
}
op = m->RoundUint64ToFloat32();
break;
- // kExprF64SConvertI64:
case wasm::kExprF64SConvertI64:
if (m->Is32()) {
return BuildF64SConvertI64(input);
}
op = m->RoundInt64ToFloat64();
break;
- // kExprF64UConvertI64:
case wasm::kExprF64UConvertI64:
if (m->Is32()) {
return BuildF64UConvertI64(input);
}
op = m->RoundUint64ToFloat64();
break;
-// kExprI64SConvertF32:
- case wasm::kExprI64SConvertF32: {
- return BuildI64SConvertF32(input);
- }
- // kExprI64SConvertF64:
- case wasm::kExprI64SConvertF64: {
- return BuildI64SConvertF64(input);
- }
- // kExprI64UConvertF32:
- case wasm::kExprI64UConvertF32: {
- return BuildI64UConvertF32(input);
- }
- // kExprI64UConvertF64:
- case wasm::kExprI64UConvertF64: {
- return BuildI64UConvertF64(input);
- }
+ case wasm::kExprI64SConvertF32:
+ return BuildI64SConvertF32(input, position);
+ case wasm::kExprI64SConvertF64:
+ return BuildI64SConvertF64(input, position);
+ case wasm::kExprI64UConvertF32:
+ return BuildI64UConvertF32(input, position);
+ case wasm::kExprI64UConvertF64:
+ return BuildI64UConvertF64(input, position);
+ case wasm::kExprGrowMemory:
+ return BuildGrowMemory(input);
+ case wasm::kExprI32AsmjsLoadMem8S:
+ return BuildAsmjsLoadMem(MachineType::Int8(), input);
+ case wasm::kExprI32AsmjsLoadMem8U:
+ return BuildAsmjsLoadMem(MachineType::Uint8(), input);
+ case wasm::kExprI32AsmjsLoadMem16S:
+ return BuildAsmjsLoadMem(MachineType::Int16(), input);
+ case wasm::kExprI32AsmjsLoadMem16U:
+ return BuildAsmjsLoadMem(MachineType::Uint16(), input);
+ case wasm::kExprI32AsmjsLoadMem:
+ return BuildAsmjsLoadMem(MachineType::Int32(), input);
+ case wasm::kExprF32AsmjsLoadMem:
+ return BuildAsmjsLoadMem(MachineType::Float32(), input);
+ case wasm::kExprF64AsmjsLoadMem:
+ return BuildAsmjsLoadMem(MachineType::Float64(), input);
default:
op = UnsupportedOpcode(opcode);
}
return graph()->NewNode(op, input);
}
-
Node* WasmGraphBuilder::Float32Constant(float value) {
return jsgraph()->Float32Constant(value);
}
-
Node* WasmGraphBuilder::Float64Constant(double value) {
return jsgraph()->Float64Constant(value);
}
-
-Node* WasmGraphBuilder::Constant(Handle<Object> value) {
- return jsgraph()->Constant(value);
+Node* WasmGraphBuilder::HeapConstant(Handle<HeapObject> value) {
+ return jsgraph()->HeapConstant(value);
}
-
Node* WasmGraphBuilder::Branch(Node* cond, Node** true_node,
Node** false_node) {
DCHECK_NOT_NULL(cond);
@@ -900,24 +977,20 @@ Node* WasmGraphBuilder::Branch(Node* cond, Node** true_node,
return branch;
}
-
Node* WasmGraphBuilder::Switch(unsigned count, Node* key) {
return graph()->NewNode(jsgraph()->common()->Switch(count), key, *control_);
}
-
Node* WasmGraphBuilder::IfValue(int32_t value, Node* sw) {
DCHECK_EQ(IrOpcode::kSwitch, sw->opcode());
return graph()->NewNode(jsgraph()->common()->IfValue(value), sw);
}
-
Node* WasmGraphBuilder::IfDefault(Node* sw) {
DCHECK_EQ(IrOpcode::kSwitch, sw->opcode());
return graph()->NewNode(jsgraph()->common()->IfDefault(), sw);
}
-
Node* WasmGraphBuilder::Return(unsigned count, Node** vals) {
DCHECK_NOT_NULL(*control_);
DCHECK_NOT_NULL(*effect_);
@@ -937,12 +1010,10 @@ Node* WasmGraphBuilder::Return(unsigned count, Node** vals) {
return ret;
}
-
Node* WasmGraphBuilder::ReturnVoid() { return Return(0, Buffer(0)); }
-
-Node* WasmGraphBuilder::Unreachable() {
- trap_->Unreachable();
+Node* WasmGraphBuilder::Unreachable(wasm::WasmCodePosition position) {
+ trap_->Unreachable(position);
return nullptr;
}
@@ -978,35 +1049,156 @@ Node* WasmGraphBuilder::MaskShiftCount64(Node* node) {
return node;
}
-Node* WasmGraphBuilder::BuildF32Neg(Node* input) {
- Node* result =
- Unop(wasm::kExprF32ReinterpretI32,
- Binop(wasm::kExprI32Xor, Unop(wasm::kExprI32ReinterpretF32, input),
- jsgraph()->Int32Constant(0x80000000)));
-
- return result;
+static bool ReverseBytesSupported(MachineOperatorBuilder* m,
+ size_t size_in_bytes) {
+ switch (size_in_bytes) {
+ case 4:
+ return m->Word32ReverseBytes().IsSupported();
+ case 8:
+ return m->Word64ReverseBytes().IsSupported();
+ default:
+ break;
+ }
+ return false;
}
+Node* WasmGraphBuilder::BuildChangeEndianness(Node* node, MachineType memtype,
+ wasm::LocalType wasmtype) {
+ Node* result;
+ Node* value = node;
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ int valueSizeInBytes = 1 << ElementSizeLog2Of(memtype.representation());
+ int valueSizeInBits = 8 * valueSizeInBytes;
+ bool isFloat = false;
+
+ switch (memtype.representation()) {
+ case MachineRepresentation::kFloat64:
+ value = graph()->NewNode(m->BitcastFloat64ToInt64(), node);
+ isFloat = true;
+ case MachineRepresentation::kWord64:
+ result = jsgraph()->Int64Constant(0);
+ break;
+ case MachineRepresentation::kFloat32:
+ value = graph()->NewNode(m->BitcastFloat32ToInt32(), node);
+ isFloat = true;
+ case MachineRepresentation::kWord32:
+ case MachineRepresentation::kWord16:
+ result = jsgraph()->Int32Constant(0);
+ break;
+ case MachineRepresentation::kWord8:
+ // No need to change endianness for byte size, return original node
+ return node;
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
-Node* WasmGraphBuilder::BuildF64Neg(Node* input) {
-#if WASM_64
- Node* result =
- Unop(wasm::kExprF64ReinterpretI64,
- Binop(wasm::kExprI64Xor, Unop(wasm::kExprI64ReinterpretF64, input),
- jsgraph()->Int64Constant(0x8000000000000000)));
+ int i;
+ uint32_t shiftCount;
- return result;
-#else
- MachineOperatorBuilder* m = jsgraph()->machine();
+ if (ReverseBytesSupported(m, valueSizeInBytes < 4 ? 4 : valueSizeInBytes)) {
+ switch (valueSizeInBytes) {
+ case 2:
+ result =
+ graph()->NewNode(m->Word32ReverseBytes().op(),
+ graph()->NewNode(m->Word32Shl(), value,
+ jsgraph()->Int32Constant(16)));
+ break;
+ case 4:
+ result = graph()->NewNode(m->Word32ReverseBytes().op(), value);
+ break;
+ case 8:
+ result = graph()->NewNode(m->Word64ReverseBytes().op(), value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ for (i = 0, shiftCount = valueSizeInBits - 8; i < valueSizeInBits / 2;
+ i += 8, shiftCount -= 16) {
+ Node* shiftLower;
+ Node* shiftHigher;
+ Node* lowerByte;
+ Node* higherByte;
+
+ DCHECK(shiftCount > 0);
+ DCHECK((shiftCount + 8) % 16 == 0);
+
+ if (valueSizeInBits > 32) {
+ shiftLower = graph()->NewNode(m->Word64Shl(), value,
+ jsgraph()->Int64Constant(shiftCount));
+ shiftHigher = graph()->NewNode(m->Word64Shr(), value,
+ jsgraph()->Int64Constant(shiftCount));
+ lowerByte = graph()->NewNode(
+ m->Word64And(), shiftLower,
+ jsgraph()->Int64Constant(static_cast<uint64_t>(0xFF)
+ << (valueSizeInBits - 8 - i)));
+ higherByte = graph()->NewNode(
+ m->Word64And(), shiftHigher,
+ jsgraph()->Int64Constant(static_cast<uint64_t>(0xFF) << i));
+ result = graph()->NewNode(m->Word64Or(), result, lowerByte);
+ result = graph()->NewNode(m->Word64Or(), result, higherByte);
+ } else {
+ shiftLower = graph()->NewNode(m->Word32Shl(), value,
+ jsgraph()->Int32Constant(shiftCount));
+ shiftHigher = graph()->NewNode(m->Word32Shr(), value,
+ jsgraph()->Int32Constant(shiftCount));
+ lowerByte = graph()->NewNode(
+ m->Word32And(), shiftLower,
+ jsgraph()->Int32Constant(static_cast<uint32_t>(0xFF)
+ << (valueSizeInBits - 8 - i)));
+ higherByte = graph()->NewNode(
+ m->Word32And(), shiftHigher,
+ jsgraph()->Int32Constant(static_cast<uint32_t>(0xFF) << i));
+ result = graph()->NewNode(m->Word32Or(), result, lowerByte);
+ result = graph()->NewNode(m->Word32Or(), result, higherByte);
+ }
+ }
+ }
- Node* old_high_word = graph()->NewNode(m->Float64ExtractHighWord32(), input);
- Node* new_high_word = Binop(wasm::kExprI32Xor, old_high_word,
- jsgraph()->Int32Constant(0x80000000));
+ if (isFloat) {
+ switch (memtype.representation()) {
+ case MachineRepresentation::kFloat64:
+ result = graph()->NewNode(m->BitcastInt64ToFloat64(), result);
+ break;
+ case MachineRepresentation::kFloat32:
+ result = graph()->NewNode(m->BitcastInt32ToFloat32(), result);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
- return graph()->NewNode(m->Float64InsertHighWord32(), input, new_high_word);
-#endif
-}
+ // We need to sign extend the value
+ if (memtype.IsSigned()) {
+ DCHECK(!isFloat);
+ if (valueSizeInBits < 32) {
+ Node* shiftBitCount;
+ // Perform sign extension using following trick
+ // result = (x << machine_width - type_width) >> (machine_width -
+ // type_width)
+ if (wasmtype == wasm::kAstI64) {
+ shiftBitCount = jsgraph()->Int32Constant(64 - valueSizeInBits);
+ result = graph()->NewNode(
+ m->Word64Sar(),
+ graph()->NewNode(m->Word64Shl(),
+ graph()->NewNode(m->ChangeInt32ToInt64(), result),
+ shiftBitCount),
+ shiftBitCount);
+ } else if (wasmtype == wasm::kAstI32) {
+ shiftBitCount = jsgraph()->Int32Constant(32 - valueSizeInBits);
+ result = graph()->NewNode(
+ m->Word32Sar(),
+ graph()->NewNode(m->Word32Shl(), result, shiftBitCount),
+ shiftBitCount);
+ }
+ }
+ }
+ return result;
+}
Node* WasmGraphBuilder::BuildF32CopySign(Node* left, Node* right) {
Node* result = Unop(
@@ -1020,7 +1212,6 @@ Node* WasmGraphBuilder::BuildF32CopySign(Node* left, Node* right) {
return result;
}
-
Node* WasmGraphBuilder::BuildF64CopySign(Node* left, Node* right) {
#if WASM_64
Node* result = Unop(
@@ -1049,100 +1240,9 @@ Node* WasmGraphBuilder::BuildF64CopySign(Node* left, Node* right) {
#endif
}
-
-Node* WasmGraphBuilder::BuildF32Min(Node* left, Node* right) {
- Diamond left_le_right(graph(), jsgraph()->common(),
- Binop(wasm::kExprF32Le, left, right));
-
- Diamond right_lt_left(graph(), jsgraph()->common(),
- Binop(wasm::kExprF32Lt, right, left));
-
- Diamond left_is_not_nan(graph(), jsgraph()->common(),
- Binop(wasm::kExprF32Eq, left, left));
-
- return left_le_right.Phi(
- wasm::kAstF32, left,
- right_lt_left.Phi(
- wasm::kAstF32, right,
- left_is_not_nan.Phi(
- wasm::kAstF32,
- Binop(wasm::kExprF32Mul, right, Float32Constant(1.0)),
- Binop(wasm::kExprF32Mul, left, Float32Constant(1.0)))));
-}
-
-
-Node* WasmGraphBuilder::BuildF32Max(Node* left, Node* right) {
- Diamond left_ge_right(graph(), jsgraph()->common(),
- Binop(wasm::kExprF32Ge, left, right));
-
- Diamond right_gt_left(graph(), jsgraph()->common(),
- Binop(wasm::kExprF32Gt, right, left));
-
- Diamond left_is_not_nan(graph(), jsgraph()->common(),
- Binop(wasm::kExprF32Eq, left, left));
-
- return left_ge_right.Phi(
- wasm::kAstF32, left,
- right_gt_left.Phi(
- wasm::kAstF32, right,
- left_is_not_nan.Phi(
- wasm::kAstF32,
- Binop(wasm::kExprF32Mul, right, Float32Constant(1.0)),
- Binop(wasm::kExprF32Mul, left, Float32Constant(1.0)))));
-}
-
-
-Node* WasmGraphBuilder::BuildF64Min(Node* left, Node* right) {
- Diamond left_le_right(graph(), jsgraph()->common(),
- Binop(wasm::kExprF64Le, left, right));
-
- Diamond right_lt_left(graph(), jsgraph()->common(),
- Binop(wasm::kExprF64Lt, right, left));
-
- Diamond left_is_not_nan(graph(), jsgraph()->common(),
- Binop(wasm::kExprF64Eq, left, left));
-
- return left_le_right.Phi(
- wasm::kAstF64, left,
- right_lt_left.Phi(
- wasm::kAstF64, right,
- left_is_not_nan.Phi(
- wasm::kAstF64,
- Binop(wasm::kExprF64Mul, right, Float64Constant(1.0)),
- Binop(wasm::kExprF64Mul, left, Float64Constant(1.0)))));
-}
-
-
-Node* WasmGraphBuilder::BuildF64Max(Node* left, Node* right) {
- Diamond left_ge_right(graph(), jsgraph()->common(),
- Binop(wasm::kExprF64Ge, left, right));
-
- Diamond right_gt_left(graph(), jsgraph()->common(),
- Binop(wasm::kExprF64Lt, right, left));
-
- Diamond left_is_not_nan(graph(), jsgraph()->common(),
- Binop(wasm::kExprF64Eq, left, left));
-
- return left_ge_right.Phi(
- wasm::kAstF64, left,
- right_gt_left.Phi(
- wasm::kAstF64, right,
- left_is_not_nan.Phi(
- wasm::kAstF64,
- Binop(wasm::kExprF64Mul, right, Float64Constant(1.0)),
- Binop(wasm::kExprF64Mul, left, Float64Constant(1.0)))));
-}
-
-
-Node* WasmGraphBuilder::BuildI32SConvertF32(Node* input) {
+Node* WasmGraphBuilder::BuildI32SConvertF32(Node* input,
+ wasm::WasmCodePosition position) {
MachineOperatorBuilder* m = jsgraph()->machine();
- if (module_ && module_->asm_js()) {
- // asm.js must use the wacky JS semantics.
- input = graph()->NewNode(m->ChangeFloat32ToFloat64(), input);
- return graph()->NewNode(
- m->TruncateFloat64ToInt32(TruncationMode::kJavaScript), input);
- }
-
// Truncation of the input value is needed for the overflow check later.
Node* trunc = Unop(wasm::kExprF32Trunc, input);
Node* result = graph()->NewNode(m->TruncateFloat32ToInt32(), trunc);
@@ -1151,19 +1251,14 @@ Node* WasmGraphBuilder::BuildI32SConvertF32(Node* input) {
// truncated input value, then there has been an overflow and we trap.
Node* check = Unop(wasm::kExprF32SConvertI32, result);
Node* overflow = Binop(wasm::kExprF32Ne, trunc, check);
- trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow);
+ trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
return result;
}
-
-Node* WasmGraphBuilder::BuildI32SConvertF64(Node* input) {
+Node* WasmGraphBuilder::BuildI32SConvertF64(Node* input,
+ wasm::WasmCodePosition position) {
MachineOperatorBuilder* m = jsgraph()->machine();
- if (module_ && module_->asm_js()) {
- // asm.js must use the wacky JS semantics.
- return graph()->NewNode(
- m->TruncateFloat64ToInt32(TruncationMode::kJavaScript), input);
- }
// Truncation of the input value is needed for the overflow check later.
Node* trunc = Unop(wasm::kExprF64Trunc, input);
Node* result = graph()->NewNode(m->ChangeFloat64ToInt32(), trunc);
@@ -1172,21 +1267,14 @@ Node* WasmGraphBuilder::BuildI32SConvertF64(Node* input) {
// truncated input value, then there has been an overflow and we trap.
Node* check = Unop(wasm::kExprF64SConvertI32, result);
Node* overflow = Binop(wasm::kExprF64Ne, trunc, check);
- trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow);
+ trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
return result;
}
-
-Node* WasmGraphBuilder::BuildI32UConvertF32(Node* input) {
+Node* WasmGraphBuilder::BuildI32UConvertF32(Node* input,
+ wasm::WasmCodePosition position) {
MachineOperatorBuilder* m = jsgraph()->machine();
- if (module_ && module_->asm_js()) {
- // asm.js must use the wacky JS semantics.
- input = graph()->NewNode(m->ChangeFloat32ToFloat64(), input);
- return graph()->NewNode(
- m->TruncateFloat64ToInt32(TruncationMode::kJavaScript), input);
- }
-
// Truncation of the input value is needed for the overflow check later.
Node* trunc = Unop(wasm::kExprF32Trunc, input);
Node* result = graph()->NewNode(m->TruncateFloat32ToUint32(), trunc);
@@ -1195,19 +1283,14 @@ Node* WasmGraphBuilder::BuildI32UConvertF32(Node* input) {
// truncated input value, then there has been an overflow and we trap.
Node* check = Unop(wasm::kExprF32UConvertI32, result);
Node* overflow = Binop(wasm::kExprF32Ne, trunc, check);
- trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow);
+ trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
return result;
}
-
-Node* WasmGraphBuilder::BuildI32UConvertF64(Node* input) {
+Node* WasmGraphBuilder::BuildI32UConvertF64(Node* input,
+ wasm::WasmCodePosition position) {
MachineOperatorBuilder* m = jsgraph()->machine();
- if (module_ && module_->asm_js()) {
- // asm.js must use the wacky JS semantics.
- return graph()->NewNode(
- m->TruncateFloat64ToInt32(TruncationMode::kJavaScript), input);
- }
// Truncation of the input value is needed for the overflow check later.
Node* trunc = Unop(wasm::kExprF64Trunc, input);
Node* result = graph()->NewNode(m->TruncateFloat64ToUint32(), trunc);
@@ -1216,185 +1299,82 @@ Node* WasmGraphBuilder::BuildI32UConvertF64(Node* input) {
// truncated input value, then there has been an overflow and we trap.
Node* check = Unop(wasm::kExprF64UConvertI32, result);
Node* overflow = Binop(wasm::kExprF64Ne, trunc, check);
- trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow);
+ trap_->AddTrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
return result;
}
+Node* WasmGraphBuilder::BuildI32AsmjsSConvertF32(Node* input) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ // asm.js must use the wacky JS semantics.
+ input = graph()->NewNode(m->ChangeFloat32ToFloat64(), input);
+ return graph()->NewNode(m->TruncateFloat64ToWord32(), input);
+}
-Node* WasmGraphBuilder::BuildI32Ctz(Node* input) {
- //// Implement the following code as TF graph.
- // value = value | (value << 1);
- // value = value | (value << 2);
- // value = value | (value << 4);
- // value = value | (value << 8);
- // value = value | (value << 16);
- // return CountPopulation32(0xffffffff XOR value);
+Node* WasmGraphBuilder::BuildI32AsmjsSConvertF64(Node* input) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ // asm.js must use the wacky JS semantics.
+ return graph()->NewNode(m->TruncateFloat64ToWord32(), input);
+}
- Node* result =
- Binop(wasm::kExprI32Ior, input,
- Binop(wasm::kExprI32Shl, input, jsgraph()->Int32Constant(1)));
+Node* WasmGraphBuilder::BuildI32AsmjsUConvertF32(Node* input) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ // asm.js must use the wacky JS semantics.
+ input = graph()->NewNode(m->ChangeFloat32ToFloat64(), input);
+ return graph()->NewNode(m->TruncateFloat64ToWord32(), input);
+}
- result = Binop(wasm::kExprI32Ior, result,
- Binop(wasm::kExprI32Shl, result, jsgraph()->Int32Constant(2)));
+Node* WasmGraphBuilder::BuildI32AsmjsUConvertF64(Node* input) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ // asm.js must use the wacky JS semantics.
+ return graph()->NewNode(m->TruncateFloat64ToWord32(), input);
+}
- result = Binop(wasm::kExprI32Ior, result,
- Binop(wasm::kExprI32Shl, result, jsgraph()->Int32Constant(4)));
+Node* WasmGraphBuilder::BuildBitCountingCall(Node* input, ExternalReference ref,
+ MachineRepresentation input_type) {
+ Node* stack_slot_param =
+ graph()->NewNode(jsgraph()->machine()->StackSlot(input_type));
- result = Binop(wasm::kExprI32Ior, result,
- Binop(wasm::kExprI32Shl, result, jsgraph()->Int32Constant(8)));
+ const Operator* store_op = jsgraph()->machine()->Store(
+ StoreRepresentation(input_type, kNoWriteBarrier));
+ *effect_ =
+ graph()->NewNode(store_op, stack_slot_param, jsgraph()->Int32Constant(0),
+ input, *effect_, *control_);
- result =
- Binop(wasm::kExprI32Ior, result,
- Binop(wasm::kExprI32Shl, result, jsgraph()->Int32Constant(16)));
+ MachineSignature::Builder sig_builder(jsgraph()->zone(), 1, 1);
+ sig_builder.AddReturn(MachineType::Int32());
+ sig_builder.AddParam(MachineType::Pointer());
- result = BuildI32Popcnt(
- Binop(wasm::kExprI32Xor, jsgraph()->Int32Constant(0xffffffff), result));
+ Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref));
+ Node* args[] = {function, stack_slot_param};
- return result;
+ return BuildCCall(sig_builder.Build(), args);
}
+Node* WasmGraphBuilder::BuildI32Ctz(Node* input) {
+ return BuildBitCountingCall(
+ input, ExternalReference::wasm_word32_ctz(jsgraph()->isolate()),
+ MachineRepresentation::kWord32);
+}
Node* WasmGraphBuilder::BuildI64Ctz(Node* input) {
- //// Implement the following code as TF graph.
- // value = value | (value << 1);
- // value = value | (value << 2);
- // value = value | (value << 4);
- // value = value | (value << 8);
- // value = value | (value << 16);
- // value = value | (value << 32);
- // return CountPopulation64(0xffffffffffffffff XOR value);
-
- Node* result =
- Binop(wasm::kExprI64Ior, input,
- Binop(wasm::kExprI64Shl, input, jsgraph()->Int64Constant(1)));
-
- result = Binop(wasm::kExprI64Ior, result,
- Binop(wasm::kExprI64Shl, result, jsgraph()->Int64Constant(2)));
-
- result = Binop(wasm::kExprI64Ior, result,
- Binop(wasm::kExprI64Shl, result, jsgraph()->Int64Constant(4)));
-
- result = Binop(wasm::kExprI64Ior, result,
- Binop(wasm::kExprI64Shl, result, jsgraph()->Int64Constant(8)));
-
- result =
- Binop(wasm::kExprI64Ior, result,
- Binop(wasm::kExprI64Shl, result, jsgraph()->Int64Constant(16)));
-
- result =
- Binop(wasm::kExprI64Ior, result,
- Binop(wasm::kExprI64Shl, result, jsgraph()->Int64Constant(32)));
-
- result = BuildI64Popcnt(Binop(
- wasm::kExprI64Xor, jsgraph()->Int64Constant(0xffffffffffffffff), result));
-
- return result;
+ return Unop(wasm::kExprI64UConvertI32,
+ BuildBitCountingCall(input, ExternalReference::wasm_word64_ctz(
+ jsgraph()->isolate()),
+ MachineRepresentation::kWord64));
}
-
Node* WasmGraphBuilder::BuildI32Popcnt(Node* input) {
- //// Implement the following code as a TF graph.
- // value = ((value >> 1) & 0x55555555) + (value & 0x55555555);
- // value = ((value >> 2) & 0x33333333) + (value & 0x33333333);
- // value = ((value >> 4) & 0x0f0f0f0f) + (value & 0x0f0f0f0f);
- // value = ((value >> 8) & 0x00ff00ff) + (value & 0x00ff00ff);
- // value = ((value >> 16) & 0x0000ffff) + (value & 0x0000ffff);
-
- Node* result = Binop(
- wasm::kExprI32Add,
- Binop(wasm::kExprI32And,
- Binop(wasm::kExprI32ShrU, input, jsgraph()->Int32Constant(1)),
- jsgraph()->Int32Constant(0x55555555)),
- Binop(wasm::kExprI32And, input, jsgraph()->Int32Constant(0x55555555)));
-
- result = Binop(
- wasm::kExprI32Add,
- Binop(wasm::kExprI32And,
- Binop(wasm::kExprI32ShrU, result, jsgraph()->Int32Constant(2)),
- jsgraph()->Int32Constant(0x33333333)),
- Binop(wasm::kExprI32And, result, jsgraph()->Int32Constant(0x33333333)));
-
- result = Binop(
- wasm::kExprI32Add,
- Binop(wasm::kExprI32And,
- Binop(wasm::kExprI32ShrU, result, jsgraph()->Int32Constant(4)),
- jsgraph()->Int32Constant(0x0f0f0f0f)),
- Binop(wasm::kExprI32And, result, jsgraph()->Int32Constant(0x0f0f0f0f)));
-
- result = Binop(
- wasm::kExprI32Add,
- Binop(wasm::kExprI32And,
- Binop(wasm::kExprI32ShrU, result, jsgraph()->Int32Constant(8)),
- jsgraph()->Int32Constant(0x00ff00ff)),
- Binop(wasm::kExprI32And, result, jsgraph()->Int32Constant(0x00ff00ff)));
-
- result = Binop(
- wasm::kExprI32Add,
- Binop(wasm::kExprI32And,
- Binop(wasm::kExprI32ShrU, result, jsgraph()->Int32Constant(16)),
- jsgraph()->Int32Constant(0x0000ffff)),
- Binop(wasm::kExprI32And, result, jsgraph()->Int32Constant(0x0000ffff)));
-
- return result;
+ return BuildBitCountingCall(
+ input, ExternalReference::wasm_word32_popcnt(jsgraph()->isolate()),
+ MachineRepresentation::kWord32);
}
-
Node* WasmGraphBuilder::BuildI64Popcnt(Node* input) {
- //// Implement the following code as a TF graph.
- // value = ((value >> 1) & 0x5555555555555555) + (value & 0x5555555555555555);
- // value = ((value >> 2) & 0x3333333333333333) + (value & 0x3333333333333333);
- // value = ((value >> 4) & 0x0f0f0f0f0f0f0f0f) + (value & 0x0f0f0f0f0f0f0f0f);
- // value = ((value >> 8) & 0x00ff00ff00ff00ff) + (value & 0x00ff00ff00ff00ff);
- // value = ((value >> 16) & 0x0000ffff0000ffff) + (value &
- // 0x0000ffff0000ffff);
- // value = ((value >> 32) & 0x00000000ffffffff) + (value &
- // 0x00000000ffffffff);
-
- Node* result =
- Binop(wasm::kExprI64Add,
- Binop(wasm::kExprI64And,
- Binop(wasm::kExprI64ShrU, input, jsgraph()->Int64Constant(1)),
- jsgraph()->Int64Constant(0x5555555555555555)),
- Binop(wasm::kExprI64And, input,
- jsgraph()->Int64Constant(0x5555555555555555)));
-
- result = Binop(wasm::kExprI64Add,
- Binop(wasm::kExprI64And, Binop(wasm::kExprI64ShrU, result,
- jsgraph()->Int64Constant(2)),
- jsgraph()->Int64Constant(0x3333333333333333)),
- Binop(wasm::kExprI64And, result,
- jsgraph()->Int64Constant(0x3333333333333333)));
-
- result = Binop(wasm::kExprI64Add,
- Binop(wasm::kExprI64And, Binop(wasm::kExprI64ShrU, result,
- jsgraph()->Int64Constant(4)),
- jsgraph()->Int64Constant(0x0f0f0f0f0f0f0f0f)),
- Binop(wasm::kExprI64And, result,
- jsgraph()->Int64Constant(0x0f0f0f0f0f0f0f0f)));
-
- result = Binop(wasm::kExprI64Add,
- Binop(wasm::kExprI64And, Binop(wasm::kExprI64ShrU, result,
- jsgraph()->Int64Constant(8)),
- jsgraph()->Int64Constant(0x00ff00ff00ff00ff)),
- Binop(wasm::kExprI64And, result,
- jsgraph()->Int64Constant(0x00ff00ff00ff00ff)));
-
- result = Binop(wasm::kExprI64Add,
- Binop(wasm::kExprI64And, Binop(wasm::kExprI64ShrU, result,
- jsgraph()->Int64Constant(16)),
- jsgraph()->Int64Constant(0x0000ffff0000ffff)),
- Binop(wasm::kExprI64And, result,
- jsgraph()->Int64Constant(0x0000ffff0000ffff)));
-
- result = Binop(wasm::kExprI64Add,
- Binop(wasm::kExprI64And, Binop(wasm::kExprI64ShrU, result,
- jsgraph()->Int64Constant(32)),
- jsgraph()->Int64Constant(0x00000000ffffffff)),
- Binop(wasm::kExprI64And, result,
- jsgraph()->Int64Constant(0x00000000ffffffff)));
-
- return result;
+ return Unop(wasm::kExprI64UConvertI32,
+ BuildBitCountingCall(input, ExternalReference::wasm_word64_popcnt(
+ jsgraph()->isolate()),
+ MachineRepresentation::kWord64));
}
Node* WasmGraphBuilder::BuildF32Trunc(Node* input) {
@@ -1468,59 +1448,10 @@ Node* WasmGraphBuilder::BuildF64Asin(Node* input) {
return BuildCFuncInstruction(ref, type, input);
}
-Node* WasmGraphBuilder::BuildF64Atan(Node* input) {
- MachineType type = MachineType::Float64();
- ExternalReference ref =
- ExternalReference::f64_atan_wrapper_function(jsgraph()->isolate());
- return BuildCFuncInstruction(ref, type, input);
-}
-
-Node* WasmGraphBuilder::BuildF64Cos(Node* input) {
- MachineType type = MachineType::Float64();
- ExternalReference ref =
- ExternalReference::f64_cos_wrapper_function(jsgraph()->isolate());
- return BuildCFuncInstruction(ref, type, input);
-}
-
-Node* WasmGraphBuilder::BuildF64Sin(Node* input) {
- MachineType type = MachineType::Float64();
- ExternalReference ref =
- ExternalReference::f64_sin_wrapper_function(jsgraph()->isolate());
- return BuildCFuncInstruction(ref, type, input);
-}
-
-Node* WasmGraphBuilder::BuildF64Tan(Node* input) {
- MachineType type = MachineType::Float64();
- ExternalReference ref =
- ExternalReference::f64_tan_wrapper_function(jsgraph()->isolate());
- return BuildCFuncInstruction(ref, type, input);
-}
-
-Node* WasmGraphBuilder::BuildF64Exp(Node* input) {
- MachineType type = MachineType::Float64();
- ExternalReference ref =
- ExternalReference::f64_exp_wrapper_function(jsgraph()->isolate());
- return BuildCFuncInstruction(ref, type, input);
-}
-
-Node* WasmGraphBuilder::BuildF64Log(Node* input) {
- MachineType type = MachineType::Float64();
- ExternalReference ref =
- ExternalReference::f64_log_wrapper_function(jsgraph()->isolate());
- return BuildCFuncInstruction(ref, type, input);
-}
-
-Node* WasmGraphBuilder::BuildF64Atan2(Node* left, Node* right) {
- MachineType type = MachineType::Float64();
- ExternalReference ref =
- ExternalReference::f64_atan2_wrapper_function(jsgraph()->isolate());
- return BuildCFuncInstruction(ref, type, left, right);
-}
-
Node* WasmGraphBuilder::BuildF64Pow(Node* left, Node* right) {
MachineType type = MachineType::Float64();
ExternalReference ref =
- ExternalReference::f64_pow_wrapper_function(jsgraph()->isolate());
+ ExternalReference::wasm_float64_pow(jsgraph()->isolate());
return BuildCFuncInstruction(ref, type, left, right);
}
@@ -1635,66 +1566,74 @@ Node* WasmGraphBuilder::BuildIntToFloatConversionInstruction(
return load;
}
-Node* WasmGraphBuilder::BuildI64SConvertF32(Node* input) {
+Node* WasmGraphBuilder::BuildI64SConvertF32(Node* input,
+ wasm::WasmCodePosition position) {
if (jsgraph()->machine()->Is32()) {
return BuildFloatToIntConversionInstruction(
input, ExternalReference::wasm_float32_to_int64(jsgraph()->isolate()),
- MachineRepresentation::kFloat32, MachineType::Int64());
+ MachineRepresentation::kFloat32, MachineType::Int64(), position);
} else {
Node* trunc = graph()->NewNode(
jsgraph()->machine()->TryTruncateFloat32ToInt64(), input);
- Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
- Node* overflow =
- graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
- trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow);
+ Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc,
+ graph()->start());
+ Node* overflow = graph()->NewNode(jsgraph()->common()->Projection(1), trunc,
+ graph()->start());
+ trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
return result;
}
}
-Node* WasmGraphBuilder::BuildI64UConvertF32(Node* input) {
+Node* WasmGraphBuilder::BuildI64UConvertF32(Node* input,
+ wasm::WasmCodePosition position) {
if (jsgraph()->machine()->Is32()) {
return BuildFloatToIntConversionInstruction(
input, ExternalReference::wasm_float32_to_uint64(jsgraph()->isolate()),
- MachineRepresentation::kFloat32, MachineType::Int64());
+ MachineRepresentation::kFloat32, MachineType::Int64(), position);
} else {
Node* trunc = graph()->NewNode(
jsgraph()->machine()->TryTruncateFloat32ToUint64(), input);
- Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
- Node* overflow =
- graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
- trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow);
+ Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc,
+ graph()->start());
+ Node* overflow = graph()->NewNode(jsgraph()->common()->Projection(1), trunc,
+ graph()->start());
+ trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
return result;
}
}
-Node* WasmGraphBuilder::BuildI64SConvertF64(Node* input) {
+Node* WasmGraphBuilder::BuildI64SConvertF64(Node* input,
+ wasm::WasmCodePosition position) {
if (jsgraph()->machine()->Is32()) {
return BuildFloatToIntConversionInstruction(
input, ExternalReference::wasm_float64_to_int64(jsgraph()->isolate()),
- MachineRepresentation::kFloat64, MachineType::Int64());
+ MachineRepresentation::kFloat64, MachineType::Int64(), position);
} else {
Node* trunc = graph()->NewNode(
jsgraph()->machine()->TryTruncateFloat64ToInt64(), input);
- Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
- Node* overflow =
- graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
- trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow);
+ Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc,
+ graph()->start());
+ Node* overflow = graph()->NewNode(jsgraph()->common()->Projection(1), trunc,
+ graph()->start());
+ trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
return result;
}
}
-Node* WasmGraphBuilder::BuildI64UConvertF64(Node* input) {
+Node* WasmGraphBuilder::BuildI64UConvertF64(Node* input,
+ wasm::WasmCodePosition position) {
if (jsgraph()->machine()->Is32()) {
return BuildFloatToIntConversionInstruction(
input, ExternalReference::wasm_float64_to_uint64(jsgraph()->isolate()),
- MachineRepresentation::kFloat64, MachineType::Int64());
+ MachineRepresentation::kFloat64, MachineType::Int64(), position);
} else {
Node* trunc = graph()->NewNode(
jsgraph()->machine()->TryTruncateFloat64ToUint64(), input);
- Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc);
- Node* overflow =
- graph()->NewNode(jsgraph()->common()->Projection(1), trunc);
- trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow);
+ Node* result = graph()->NewNode(jsgraph()->common()->Projection(0), trunc,
+ graph()->start());
+ Node* overflow = graph()->NewNode(jsgraph()->common()->Projection(1), trunc,
+ graph()->start());
+ trap_->ZeroCheck64(wasm::kTrapFloatUnrepresentable, overflow, position);
return result;
}
}
@@ -1702,7 +1641,7 @@ Node* WasmGraphBuilder::BuildI64UConvertF64(Node* input) {
Node* WasmGraphBuilder::BuildFloatToIntConversionInstruction(
Node* input, ExternalReference ref,
MachineRepresentation parameter_representation,
- const MachineType result_type) {
+ const MachineType result_type, wasm::WasmCodePosition position) {
Node* stack_slot_param = graph()->NewNode(
jsgraph()->machine()->StackSlot(parameter_representation));
Node* stack_slot_result = graph()->NewNode(
@@ -1719,7 +1658,7 @@ Node* WasmGraphBuilder::BuildFloatToIntConversionInstruction(
Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref));
Node* args[] = {function, stack_slot_param, stack_slot_result};
trap_->ZeroCheck32(wasm::kTrapFloatUnrepresentable,
- BuildCCall(sig_builder.Build(), args));
+ BuildCCall(sig_builder.Build(), args), position);
const Operator* load_op = jsgraph()->machine()->Load(result_type);
Node* load =
graph()->NewNode(load_op, stack_slot_result, jsgraph()->Int32Constant(0),
@@ -1728,37 +1667,35 @@ Node* WasmGraphBuilder::BuildFloatToIntConversionInstruction(
return load;
}
-Node* WasmGraphBuilder::BuildI32DivS(Node* left, Node* right) {
- MachineOperatorBuilder* m = jsgraph()->machine();
- if (module_ && module_->asm_js()) {
- // asm.js semantics return 0 on divide or mod by zero.
- if (m->Int32DivIsSafe()) {
- // The hardware instruction does the right thing (e.g. arm).
- return graph()->NewNode(m->Int32Div(), left, right, graph()->start());
- }
-
- // Check denominator for zero.
- Diamond z(
- graph(), jsgraph()->common(),
- graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
- BranchHint::kFalse);
-
- // Check numerator for -1. (avoid minint / -1 case).
- Diamond n(
- graph(), jsgraph()->common(),
- graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
- BranchHint::kFalse);
-
- Node* div = graph()->NewNode(m->Int32Div(), left, right, z.if_false);
- Node* neg =
- graph()->NewNode(m->Int32Sub(), jsgraph()->Int32Constant(0), left);
-
- return n.Phi(MachineRepresentation::kWord32, neg,
- z.Phi(MachineRepresentation::kWord32,
- jsgraph()->Int32Constant(0), div));
- }
+Node* WasmGraphBuilder::BuildGrowMemory(Node* input) {
+ Runtime::FunctionId function_id = Runtime::kWasmGrowMemory;
+ const Runtime::Function* function = Runtime::FunctionForId(function_id);
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ jsgraph()->zone(), function_id, function->nargs, Operator::kNoThrow,
+ CallDescriptor::kNoFlags);
+ Node** control_ptr = control_;
+ Node** effect_ptr = effect_;
+ wasm::ModuleEnv* module = module_;
+ input = BuildChangeUint32ToSmi(input);
+ Node* inputs[] = {
+ jsgraph()->CEntryStubConstant(function->result_size), input, // C entry
+ jsgraph()->ExternalConstant(
+ ExternalReference(function_id, jsgraph()->isolate())), // ref
+ jsgraph()->Int32Constant(function->nargs), // arity
+ jsgraph()->HeapConstant(module->instance->context), // context
+ *effect_ptr,
+ *control_ptr};
+ Node* node = graph()->NewNode(jsgraph()->common()->Call(desc),
+ static_cast<int>(arraysize(inputs)), inputs);
+ *effect_ptr = node;
+ node = BuildChangeSmiToInt32(node);
+ return node;
+}
- trap_->ZeroCheck32(wasm::kTrapDivByZero, right);
+Node* WasmGraphBuilder::BuildI32DivS(Node* left, Node* right,
+ wasm::WasmCodePosition position) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ trap_->ZeroCheck32(wasm::kTrapDivByZero, right, position);
Node* before = *control_;
Node* denom_is_m1;
Node* denom_is_not_m1;
@@ -1766,7 +1703,7 @@ Node* WasmGraphBuilder::BuildI32DivS(Node* left, Node* right) {
graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
&denom_is_m1, &denom_is_not_m1);
*control_ = denom_is_m1;
- trap_->TrapIfEq32(wasm::kTrapDivUnrepresentable, left, kMinInt);
+ trap_->TrapIfEq32(wasm::kTrapDivUnrepresentable, left, kMinInt, position);
if (*control_ != denom_is_m1) {
*control_ = graph()->NewNode(jsgraph()->common()->Merge(2), denom_is_not_m1,
*control_);
@@ -1776,30 +1713,11 @@ Node* WasmGraphBuilder::BuildI32DivS(Node* left, Node* right) {
return graph()->NewNode(m->Int32Div(), left, right, *control_);
}
-Node* WasmGraphBuilder::BuildI32RemS(Node* left, Node* right) {
+Node* WasmGraphBuilder::BuildI32RemS(Node* left, Node* right,
+ wasm::WasmCodePosition position) {
MachineOperatorBuilder* m = jsgraph()->machine();
- if (module_ && module_->asm_js()) {
- // asm.js semantics return 0 on divide or mod by zero.
- // Explicit check for x % 0.
- Diamond z(
- graph(), jsgraph()->common(),
- graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
- BranchHint::kFalse);
- // Explicit check for x % -1.
- Diamond d(
- graph(), jsgraph()->common(),
- graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
- BranchHint::kFalse);
- d.Chain(z.if_false);
-
- return z.Phi(
- MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
- d.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
- graph()->NewNode(m->Int32Mod(), left, right, d.if_false)));
- }
-
- trap_->ZeroCheck32(wasm::kTrapRemByZero, right);
+ trap_->ZeroCheck32(wasm::kTrapRemByZero, right, position);
Diamond d(
graph(), jsgraph()->common(),
@@ -1811,56 +1729,115 @@ Node* WasmGraphBuilder::BuildI32RemS(Node* left, Node* right) {
graph()->NewNode(m->Int32Mod(), left, right, d.if_false));
}
-Node* WasmGraphBuilder::BuildI32DivU(Node* left, Node* right) {
+Node* WasmGraphBuilder::BuildI32DivU(Node* left, Node* right,
+ wasm::WasmCodePosition position) {
MachineOperatorBuilder* m = jsgraph()->machine();
- if (module_ && module_->asm_js()) {
- // asm.js semantics return 0 on divide or mod by zero.
- if (m->Uint32DivIsSafe()) {
- // The hardware instruction does the right thing (e.g. arm).
- return graph()->NewNode(m->Uint32Div(), left, right, graph()->start());
- }
+ return graph()->NewNode(
+ m->Uint32Div(), left, right,
+ trap_->ZeroCheck32(wasm::kTrapDivByZero, right, position));
+}
- // Explicit check for x % 0.
- Diamond z(
- graph(), jsgraph()->common(),
- graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
- BranchHint::kFalse);
+Node* WasmGraphBuilder::BuildI32RemU(Node* left, Node* right,
+ wasm::WasmCodePosition position) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ return graph()->NewNode(
+ m->Uint32Mod(), left, right,
+ trap_->ZeroCheck32(wasm::kTrapRemByZero, right, position));
+}
- return z.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
- graph()->NewNode(jsgraph()->machine()->Uint32Div(), left,
- right, z.if_false));
+Node* WasmGraphBuilder::BuildI32AsmjsDivS(Node* left, Node* right) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ // asm.js semantics return 0 on divide or mod by zero.
+ if (m->Int32DivIsSafe()) {
+ // The hardware instruction does the right thing (e.g. arm).
+ return graph()->NewNode(m->Int32Div(), left, right, graph()->start());
}
- return graph()->NewNode(m->Uint32Div(), left, right,
- trap_->ZeroCheck32(wasm::kTrapDivByZero, right));
+
+ // Check denominator for zero.
+ Diamond z(
+ graph(), jsgraph()->common(),
+ graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
+ BranchHint::kFalse);
+
+ // Check numerator for -1. (avoid minint / -1 case).
+ Diamond n(
+ graph(), jsgraph()->common(),
+ graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
+ BranchHint::kFalse);
+
+ Node* div = graph()->NewNode(m->Int32Div(), left, right, z.if_false);
+ Node* neg =
+ graph()->NewNode(m->Int32Sub(), jsgraph()->Int32Constant(0), left);
+
+ return n.Phi(
+ MachineRepresentation::kWord32, neg,
+ z.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0), div));
}
-Node* WasmGraphBuilder::BuildI32RemU(Node* left, Node* right) {
+Node* WasmGraphBuilder::BuildI32AsmjsRemS(Node* left, Node* right) {
MachineOperatorBuilder* m = jsgraph()->machine();
- if (module_ && module_->asm_js()) {
- // asm.js semantics return 0 on divide or mod by zero.
- // Explicit check for x % 0.
- Diamond z(
- graph(), jsgraph()->common(),
- graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
- BranchHint::kFalse);
+ // asm.js semantics return 0 on divide or mod by zero.
+ // Explicit check for x % 0.
+ Diamond z(
+ graph(), jsgraph()->common(),
+ graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
+ BranchHint::kFalse);
- Node* rem = graph()->NewNode(jsgraph()->machine()->Uint32Mod(), left, right,
- z.if_false);
- return z.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
- rem);
+ // Explicit check for x % -1.
+ Diamond d(
+ graph(), jsgraph()->common(),
+ graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
+ BranchHint::kFalse);
+ d.Chain(z.if_false);
+
+ return z.Phi(
+ MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
+ d.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
+ graph()->NewNode(m->Int32Mod(), left, right, d.if_false)));
+}
+
+Node* WasmGraphBuilder::BuildI32AsmjsDivU(Node* left, Node* right) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ // asm.js semantics return 0 on divide or mod by zero.
+ if (m->Uint32DivIsSafe()) {
+ // The hardware instruction does the right thing (e.g. arm).
+ return graph()->NewNode(m->Uint32Div(), left, right, graph()->start());
}
- return graph()->NewNode(m->Uint32Mod(), left, right,
- trap_->ZeroCheck32(wasm::kTrapRemByZero, right));
+ // Explicit check for x % 0.
+ Diamond z(
+ graph(), jsgraph()->common(),
+ graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
+ BranchHint::kFalse);
+
+ return z.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
+ graph()->NewNode(jsgraph()->machine()->Uint32Div(), left, right,
+ z.if_false));
+}
+
+Node* WasmGraphBuilder::BuildI32AsmjsRemU(Node* left, Node* right) {
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ // asm.js semantics return 0 on divide or mod by zero.
+ // Explicit check for x % 0.
+ Diamond z(
+ graph(), jsgraph()->common(),
+ graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
+ BranchHint::kFalse);
+
+ Node* rem = graph()->NewNode(jsgraph()->machine()->Uint32Mod(), left, right,
+ z.if_false);
+ return z.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
+ rem);
}
-Node* WasmGraphBuilder::BuildI64DivS(Node* left, Node* right) {
+Node* WasmGraphBuilder::BuildI64DivS(Node* left, Node* right,
+ wasm::WasmCodePosition position) {
if (jsgraph()->machine()->Is32()) {
return BuildDiv64Call(
left, right, ExternalReference::wasm_int64_div(jsgraph()->isolate()),
- MachineType::Int64(), wasm::kTrapDivByZero);
+ MachineType::Int64(), wasm::kTrapDivByZero, position);
}
- trap_->ZeroCheck64(wasm::kTrapDivByZero, right);
+ trap_->ZeroCheck64(wasm::kTrapDivByZero, right, position);
Node* before = *control_;
Node* denom_is_m1;
Node* denom_is_not_m1;
@@ -1869,7 +1846,7 @@ Node* WasmGraphBuilder::BuildI64DivS(Node* left, Node* right) {
&denom_is_m1, &denom_is_not_m1);
*control_ = denom_is_m1;
trap_->TrapIfEq64(wasm::kTrapDivUnrepresentable, left,
- std::numeric_limits<int64_t>::min());
+ std::numeric_limits<int64_t>::min(), position);
if (*control_ != denom_is_m1) {
*control_ = graph()->NewNode(jsgraph()->common()->Merge(2), denom_is_not_m1,
*control_);
@@ -1880,13 +1857,14 @@ Node* WasmGraphBuilder::BuildI64DivS(Node* left, Node* right) {
*control_);
}
-Node* WasmGraphBuilder::BuildI64RemS(Node* left, Node* right) {
+Node* WasmGraphBuilder::BuildI64RemS(Node* left, Node* right,
+ wasm::WasmCodePosition position) {
if (jsgraph()->machine()->Is32()) {
return BuildDiv64Call(
left, right, ExternalReference::wasm_int64_mod(jsgraph()->isolate()),
- MachineType::Int64(), wasm::kTrapRemByZero);
+ MachineType::Int64(), wasm::kTrapRemByZero, position);
}
- trap_->ZeroCheck64(wasm::kTrapRemByZero, right);
+ trap_->ZeroCheck64(wasm::kTrapRemByZero, right, position);
Diamond d(jsgraph()->graph(), jsgraph()->common(),
graph()->NewNode(jsgraph()->machine()->Word64Equal(), right,
jsgraph()->Int64Constant(-1)));
@@ -1898,28 +1876,33 @@ Node* WasmGraphBuilder::BuildI64RemS(Node* left, Node* right) {
rem);
}
-Node* WasmGraphBuilder::BuildI64DivU(Node* left, Node* right) {
+Node* WasmGraphBuilder::BuildI64DivU(Node* left, Node* right,
+ wasm::WasmCodePosition position) {
if (jsgraph()->machine()->Is32()) {
return BuildDiv64Call(
left, right, ExternalReference::wasm_uint64_div(jsgraph()->isolate()),
- MachineType::Int64(), wasm::kTrapDivByZero);
+ MachineType::Int64(), wasm::kTrapDivByZero, position);
}
- return graph()->NewNode(jsgraph()->machine()->Uint64Div(), left, right,
- trap_->ZeroCheck64(wasm::kTrapDivByZero, right));
+ return graph()->NewNode(
+ jsgraph()->machine()->Uint64Div(), left, right,
+ trap_->ZeroCheck64(wasm::kTrapDivByZero, right, position));
}
-Node* WasmGraphBuilder::BuildI64RemU(Node* left, Node* right) {
+Node* WasmGraphBuilder::BuildI64RemU(Node* left, Node* right,
+ wasm::WasmCodePosition position) {
if (jsgraph()->machine()->Is32()) {
return BuildDiv64Call(
left, right, ExternalReference::wasm_uint64_mod(jsgraph()->isolate()),
- MachineType::Int64(), wasm::kTrapRemByZero);
+ MachineType::Int64(), wasm::kTrapRemByZero, position);
}
- return graph()->NewNode(jsgraph()->machine()->Uint64Mod(), left, right,
- trap_->ZeroCheck64(wasm::kTrapRemByZero, right));
+ return graph()->NewNode(
+ jsgraph()->machine()->Uint64Mod(), left, right,
+ trap_->ZeroCheck64(wasm::kTrapRemByZero, right, position));
}
Node* WasmGraphBuilder::BuildDiv64Call(Node* left, Node* right,
ExternalReference ref,
- MachineType result_type, int trap_zero) {
+ MachineType result_type, int trap_zero,
+ wasm::WasmCodePosition position) {
Node* stack_slot_dst = graph()->NewNode(
jsgraph()->machine()->StackSlot(MachineRepresentation::kWord64));
Node* stack_slot_src = graph()->NewNode(
@@ -1946,8 +1929,8 @@ Node* WasmGraphBuilder::BuildDiv64Call(Node* left, Node* right,
// TODO(wasm): This can get simpler if we have a specialized runtime call to
// throw WASM exceptions by trap code instead of by string.
- trap_->ZeroCheck32(static_cast<wasm::TrapReason>(trap_zero), call);
- trap_->TrapIfEq32(wasm::kTrapDivUnrepresentable, call, -1);
+ trap_->ZeroCheck32(static_cast<wasm::TrapReason>(trap_zero), call, position);
+ trap_->TrapIfEq32(wasm::kTrapDivUnrepresentable, call, -1, position);
const Operator* load_op = jsgraph()->machine()->Load(result_type);
Node* load =
graph()->NewNode(load_op, stack_slot_dst, jsgraph()->Int32Constant(0),
@@ -1977,7 +1960,8 @@ Node* WasmGraphBuilder::BuildCCall(MachineSignature* sig, Node** args) {
return call;
}
-Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args) {
+Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
+ wasm::WasmCodePosition position) {
const size_t params = sig->parameter_count();
const size_t extra = 2; // effect and control inputs.
const size_t count = 1 + params + extra;
@@ -1993,32 +1977,36 @@ Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args) {
wasm::ModuleEnv::GetWasmCallDescriptor(jsgraph()->zone(), sig);
const Operator* op = jsgraph()->common()->Call(descriptor);
Node* call = graph()->NewNode(op, static_cast<int>(count), args);
+ SetSourcePosition(call, position);
*effect_ = call;
return call;
}
-Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args) {
+Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args,
+ wasm::WasmCodePosition position) {
DCHECK_NULL(args[0]);
// Add code object as constant.
- args[0] = Constant(module_->GetFunctionCode(index));
+ args[0] = HeapConstant(module_->GetCodeOrPlaceholder(index));
wasm::FunctionSig* sig = module_->GetFunctionSignature(index);
- return BuildWasmCall(sig, args);
+ return BuildWasmCall(sig, args, position);
}
-Node* WasmGraphBuilder::CallImport(uint32_t index, Node** args) {
+Node* WasmGraphBuilder::CallImport(uint32_t index, Node** args,
+ wasm::WasmCodePosition position) {
DCHECK_NULL(args[0]);
// Add code object as constant.
- args[0] = Constant(module_->GetImportCode(index));
+ args[0] = HeapConstant(module_->GetImportCode(index));
wasm::FunctionSig* sig = module_->GetImportSignature(index);
- return BuildWasmCall(sig, args);
+ return BuildWasmCall(sig, args, position);
}
-Node* WasmGraphBuilder::CallIndirect(uint32_t index, Node** args) {
+Node* WasmGraphBuilder::CallIndirect(uint32_t index, Node** args,
+ wasm::WasmCodePosition position) {
DCHECK_NOT_NULL(args[0]);
DCHECK(module_ && module_->instance);
@@ -2027,19 +2015,22 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t index, Node** args) {
// Compute the code object by loading it from the function table.
Node* key = args[0];
+ // Assume only one table for now.
+ DCHECK_LE(module_->instance->function_tables.size(), 1u);
// Bounds check the index.
- int table_size = static_cast<int>(module_->FunctionTableSize());
+ uint32_t table_size =
+ module_->IsValidTable(0) ? module_->GetTable(0)->max_size : 0;
if (table_size > 0) {
// Bounds check against the table size.
- Node* size = Int32Constant(static_cast<int>(table_size));
+ Node* size = Uint32Constant(table_size);
Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, size);
- trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, in_bounds);
+ trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, in_bounds, position);
} else {
// No function table. Generate a trap and return a constant.
- trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, Int32Constant(0));
+ trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, Int32Constant(0), position);
return trap_->GetTrapValue(module_->GetSignature(index));
}
- Node* table = FunctionTable();
+ Node* table = FunctionTable(0);
// Load signature from the table and check.
// The table is a FixedArray; signatures are encoded as SMIs.
@@ -2054,44 +2045,169 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t index, Node** args) {
Int32Constant(kPointerSizeLog2)),
Int32Constant(fixed_offset)),
*effect_, *control_);
- Node* sig_match = graph()->NewNode(machine->WordEqual(), load_sig,
- jsgraph()->SmiConstant(index));
- trap_->AddTrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match);
+ Node* sig_match =
+ graph()->NewNode(machine->Word32Equal(),
+ BuildChangeSmiToInt32(load_sig), Int32Constant(index));
+ trap_->AddTrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
}
// Load code object from the table.
- int offset = fixed_offset + kPointerSize * table_size;
+ uint32_t offset = fixed_offset + kPointerSize * table_size;
Node* load_code = graph()->NewNode(
machine->Load(MachineType::AnyTagged()), table,
graph()->NewNode(machine->Int32Add(),
graph()->NewNode(machine->Word32Shl(), key,
Int32Constant(kPointerSizeLog2)),
- Int32Constant(offset)),
+ Uint32Constant(offset)),
*effect_, *control_);
args[0] = load_code;
wasm::FunctionSig* sig = module_->GetSignature(index);
- return BuildWasmCall(sig, args);
+ return BuildWasmCall(sig, args, position);
}
+Node* WasmGraphBuilder::BuildI32Rol(Node* left, Node* right) {
+ // Implement Rol by Ror since TurboFan does not have Rol opcode.
+ // TODO(weiliang): support Word32Rol opcode in TurboFan.
+ Int32Matcher m(right);
+ if (m.HasValue()) {
+ return Binop(wasm::kExprI32Ror, left,
+ jsgraph()->Int32Constant(32 - m.Value()));
+ } else {
+ return Binop(wasm::kExprI32Ror, left,
+ Binop(wasm::kExprI32Sub, jsgraph()->Int32Constant(32), right));
+ }
+}
-Node* WasmGraphBuilder::ToJS(Node* node, Node* context, wasm::LocalType type) {
- SimplifiedOperatorBuilder simplified(jsgraph()->zone());
+Node* WasmGraphBuilder::BuildI64Rol(Node* left, Node* right) {
+ // Implement Rol by Ror since TurboFan does not have Rol opcode.
+ // TODO(weiliang): support Word64Rol opcode in TurboFan.
+ Int64Matcher m(right);
+ if (m.HasValue()) {
+ return Binop(wasm::kExprI64Ror, left,
+ jsgraph()->Int64Constant(64 - m.Value()));
+ } else {
+ return Binop(wasm::kExprI64Ror, left,
+ Binop(wasm::kExprI64Sub, jsgraph()->Int64Constant(64), right));
+ }
+}
+
+Node* WasmGraphBuilder::Invert(Node* node) {
+ return Unop(wasm::kExprI32Eqz, node);
+}
+
+Node* WasmGraphBuilder::BuildChangeInt32ToTagged(Node* value) {
+ MachineOperatorBuilder* machine = jsgraph()->machine();
+ CommonOperatorBuilder* common = jsgraph()->common();
+
+ if (machine->Is64()) {
+ return BuildChangeInt32ToSmi(value);
+ }
+
+ Node* add = graph()->NewNode(machine->Int32AddWithOverflow(), value, value,
+ graph()->start());
+
+ Node* ovf = graph()->NewNode(common->Projection(1), add, graph()->start());
+ Node* branch = graph()->NewNode(common->Branch(BranchHint::kFalse), ovf,
+ graph()->start());
+
+ Node* if_true = graph()->NewNode(common->IfTrue(), branch);
+ Node* vtrue = BuildAllocateHeapNumberWithValue(
+ graph()->NewNode(machine->ChangeInt32ToFloat64(), value), if_true);
+
+ Node* if_false = graph()->NewNode(common->IfFalse(), branch);
+ Node* vfalse = graph()->NewNode(common->Projection(0), add, if_false);
+
+ Node* merge = graph()->NewNode(common->Merge(2), if_true, if_false);
+ Node* phi = graph()->NewNode(common->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, merge);
+ return phi;
+}
+
+Node* WasmGraphBuilder::BuildChangeFloat64ToTagged(Node* value) {
+ MachineOperatorBuilder* machine = jsgraph()->machine();
+ CommonOperatorBuilder* common = jsgraph()->common();
+
+ Node* value32 = graph()->NewNode(machine->RoundFloat64ToInt32(), value);
+ Node* check_same = graph()->NewNode(
+ machine->Float64Equal(), value,
+ graph()->NewNode(machine->ChangeInt32ToFloat64(), value32));
+ Node* branch_same =
+ graph()->NewNode(common->Branch(), check_same, graph()->start());
+
+ Node* if_smi = graph()->NewNode(common->IfTrue(), branch_same);
+ Node* vsmi;
+ Node* if_box = graph()->NewNode(common->IfFalse(), branch_same);
+ Node* vbox;
+
+ // We only need to check for -0 if the {value} can potentially contain -0.
+ Node* check_zero = graph()->NewNode(machine->Word32Equal(), value32,
+ jsgraph()->Int32Constant(0));
+ Node* branch_zero =
+ graph()->NewNode(common->Branch(BranchHint::kFalse), check_zero, if_smi);
+
+ Node* if_zero = graph()->NewNode(common->IfTrue(), branch_zero);
+ Node* if_notzero = graph()->NewNode(common->IfFalse(), branch_zero);
+
+ // In case of 0, we need to check the high bits for the IEEE -0 pattern.
+ Node* check_negative = graph()->NewNode(
+ machine->Int32LessThan(),
+ graph()->NewNode(machine->Float64ExtractHighWord32(), value),
+ jsgraph()->Int32Constant(0));
+ Node* branch_negative = graph()->NewNode(common->Branch(BranchHint::kFalse),
+ check_negative, if_zero);
+
+ Node* if_negative = graph()->NewNode(common->IfTrue(), branch_negative);
+ Node* if_notnegative = graph()->NewNode(common->IfFalse(), branch_negative);
+
+ // We need to create a box for negative 0.
+ if_smi = graph()->NewNode(common->Merge(2), if_notzero, if_notnegative);
+ if_box = graph()->NewNode(common->Merge(2), if_box, if_negative);
+
+ // On 64-bit machines we can just wrap the 32-bit integer in a smi, for 32-bit
+ // machines we need to deal with potential overflow and fallback to boxing.
+ if (machine->Is64()) {
+ vsmi = BuildChangeInt32ToSmi(value32);
+ } else {
+ Node* smi_tag = graph()->NewNode(machine->Int32AddWithOverflow(), value32,
+ value32, if_smi);
+
+ Node* check_ovf = graph()->NewNode(common->Projection(1), smi_tag, if_smi);
+ Node* branch_ovf =
+ graph()->NewNode(common->Branch(BranchHint::kFalse), check_ovf, if_smi);
+
+ Node* if_ovf = graph()->NewNode(common->IfTrue(), branch_ovf);
+ if_box = graph()->NewNode(common->Merge(2), if_ovf, if_box);
+
+ if_smi = graph()->NewNode(common->IfFalse(), branch_ovf);
+ vsmi = graph()->NewNode(common->Projection(0), smi_tag, if_smi);
+ }
+
+ // Allocate the box for the {value}.
+ vbox = BuildAllocateHeapNumberWithValue(value, if_box);
+
+ Node* control = graph()->NewNode(common->Merge(2), if_smi, if_box);
+ value = graph()->NewNode(common->Phi(MachineRepresentation::kTagged, 2), vsmi,
+ vbox, control);
+ return value;
+}
+
+Node* WasmGraphBuilder::ToJS(Node* node, wasm::LocalType type) {
switch (type) {
case wasm::kAstI32:
- return graph()->NewNode(simplified.ChangeInt32ToTagged(), node);
+ return BuildChangeInt32ToTagged(node);
case wasm::kAstI64:
- // TODO(titzer): i64->JS has no good solution right now. Using lower 32
- // bits.
- node =
- graph()->NewNode(jsgraph()->machine()->TruncateInt64ToInt32(), node);
- return graph()->NewNode(simplified.ChangeInt32ToTagged(), node);
+ DCHECK(module_ && !module_->instance->context.is_null());
+ // Throw a TypeError.
+ return BuildCallToRuntime(Runtime::kWasmThrowTypeError, jsgraph(),
+ module_->instance->context, nullptr, 0, effect_,
+ *control_);
case wasm::kAstF32:
node = graph()->NewNode(jsgraph()->machine()->ChangeFloat32ToFloat64(),
node);
- return graph()->NewNode(simplified.ChangeFloat64ToTagged(), node);
+ return BuildChangeFloat64ToTagged(node);
case wasm::kAstF64:
- return graph()->NewNode(simplified.ChangeFloat64ToTagged(), node);
+ return BuildChangeFloat64ToTagged(node);
case wasm::kAstStmt:
return jsgraph()->UndefinedConstant();
default:
@@ -2100,33 +2216,157 @@ Node* WasmGraphBuilder::ToJS(Node* node, Node* context, wasm::LocalType type) {
}
}
+Node* WasmGraphBuilder::BuildJavaScriptToNumber(Node* node, Node* context,
+ Node* effect, Node* control) {
+ Callable callable = CodeFactory::ToNumber(jsgraph()->isolate());
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ jsgraph()->isolate(), jsgraph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNoFlags, Operator::kNoProperties);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+
+ Node* result = graph()->NewNode(jsgraph()->common()->Call(desc), stub_code,
+ node, context, effect, control);
+
+ *effect_ = result;
+
+ return result;
+}
+
+bool CanCover(Node* value, IrOpcode::Value opcode) {
+ if (value->opcode() != opcode) return false;
+ bool first = true;
+ for (Edge const edge : value->use_edges()) {
+ if (NodeProperties::IsControlEdge(edge)) continue;
+ if (NodeProperties::IsEffectEdge(edge)) continue;
+ DCHECK(NodeProperties::IsValueEdge(edge));
+ if (!first) return false;
+ first = false;
+ }
+ return true;
+}
+
+Node* WasmGraphBuilder::BuildChangeTaggedToFloat64(Node* value) {
+ MachineOperatorBuilder* machine = jsgraph()->machine();
+ CommonOperatorBuilder* common = jsgraph()->common();
+
+ if (CanCover(value, IrOpcode::kJSToNumber)) {
+ // ChangeTaggedToFloat64(JSToNumber(x)) =>
+ // if IsSmi(x) then ChangeSmiToFloat64(x)
+ // else let y = JSToNumber(x) in
+ // if IsSmi(y) then ChangeSmiToFloat64(y)
+ // else BuildLoadHeapNumberValue(y)
+ Node* object = NodeProperties::GetValueInput(value, 0);
+ Node* context = NodeProperties::GetContextInput(value);
+ Node* frame_state = NodeProperties::GetFrameStateInput(value);
+ Node* effect = NodeProperties::GetEffectInput(value);
+ Node* control = NodeProperties::GetControlInput(value);
+
+ const Operator* merge_op = common->Merge(2);
+ const Operator* ephi_op = common->EffectPhi(2);
+ const Operator* phi_op = common->Phi(MachineRepresentation::kFloat64, 2);
+
+ Node* check1 = BuildTestNotSmi(object);
+ Node* branch1 =
+ graph()->NewNode(common->Branch(BranchHint::kFalse), check1, control);
+
+ Node* if_true1 = graph()->NewNode(common->IfTrue(), branch1);
+ Node* vtrue1 = graph()->NewNode(value->op(), object, context, frame_state,
+ effect, if_true1);
+ Node* etrue1 = vtrue1;
+
+ Node* check2 = BuildTestNotSmi(vtrue1);
+ Node* branch2 = graph()->NewNode(common->Branch(), check2, if_true1);
+
+ Node* if_true2 = graph()->NewNode(common->IfTrue(), branch2);
+ Node* vtrue2 = BuildLoadHeapNumberValue(vtrue1, if_true2);
+
+ Node* if_false2 = graph()->NewNode(common->IfFalse(), branch2);
+ Node* vfalse2 = BuildChangeSmiToFloat64(vtrue1);
+
+ if_true1 = graph()->NewNode(merge_op, if_true2, if_false2);
+ vtrue1 = graph()->NewNode(phi_op, vtrue2, vfalse2, if_true1);
+
+ Node* if_false1 = graph()->NewNode(common->IfFalse(), branch1);
+ Node* vfalse1 = BuildChangeSmiToFloat64(object);
+ Node* efalse1 = effect;
+
+ Node* merge1 = graph()->NewNode(merge_op, if_true1, if_false1);
+ Node* ephi1 = graph()->NewNode(ephi_op, etrue1, efalse1, merge1);
+ Node* phi1 = graph()->NewNode(phi_op, vtrue1, vfalse1, merge1);
+
+ // Wire the new diamond into the graph, {JSToNumber} can still throw.
+ NodeProperties::ReplaceUses(value, phi1, ephi1, etrue1, etrue1);
+
+ // TODO(mstarzinger): This iteration cuts out the IfSuccess projection from
+ // the node and places it inside the diamond. Come up with a helper method!
+ for (Node* use : etrue1->uses()) {
+ if (use->opcode() == IrOpcode::kIfSuccess) {
+ use->ReplaceUses(merge1);
+ NodeProperties::ReplaceControlInput(branch2, use);
+ }
+ }
+ return phi1;
+ }
+
+ Node* check = BuildTestNotSmi(value);
+ Node* branch = graph()->NewNode(common->Branch(BranchHint::kFalse), check,
+ graph()->start());
+
+ Node* if_not_smi = graph()->NewNode(common->IfTrue(), branch);
+
+ Node* vnot_smi;
+ Node* check_undefined = graph()->NewNode(machine->WordEqual(), value,
+ jsgraph()->UndefinedConstant());
+ Node* branch_undefined = graph()->NewNode(common->Branch(BranchHint::kFalse),
+ check_undefined, if_not_smi);
+
+ Node* if_undefined = graph()->NewNode(common->IfTrue(), branch_undefined);
+ Node* vundefined =
+ jsgraph()->Float64Constant(std::numeric_limits<double>::quiet_NaN());
+
+ Node* if_not_undefined =
+ graph()->NewNode(common->IfFalse(), branch_undefined);
+ Node* vheap_number = BuildLoadHeapNumberValue(value, if_not_undefined);
+
+ if_not_smi =
+ graph()->NewNode(common->Merge(2), if_undefined, if_not_undefined);
+ vnot_smi = graph()->NewNode(common->Phi(MachineRepresentation::kFloat64, 2),
+ vundefined, vheap_number, if_not_smi);
+
+ Node* if_smi = graph()->NewNode(common->IfFalse(), branch);
+ Node* vfrom_smi = BuildChangeSmiToFloat64(value);
+
+ Node* merge = graph()->NewNode(common->Merge(2), if_not_smi, if_smi);
+ Node* phi = graph()->NewNode(common->Phi(MachineRepresentation::kFloat64, 2),
+ vnot_smi, vfrom_smi, merge);
+
+ return phi;
+}
Node* WasmGraphBuilder::FromJS(Node* node, Node* context,
wasm::LocalType type) {
// Do a JavaScript ToNumber.
- Node* num =
- graph()->NewNode(jsgraph()->javascript()->ToNumber(), node, context,
- jsgraph()->EmptyFrameState(), *effect_, *control_);
- *control_ = num;
- *effect_ = num;
+ Node* num = BuildJavaScriptToNumber(node, context, *effect_, *control_);
// Change representation.
SimplifiedOperatorBuilder simplified(jsgraph()->zone());
- num = graph()->NewNode(simplified.ChangeTaggedToFloat64(), num);
+ num = BuildChangeTaggedToFloat64(num);
switch (type) {
case wasm::kAstI32: {
- num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToInt32(
- TruncationMode::kJavaScript),
+ num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToWord32(),
num);
break;
}
case wasm::kAstI64:
// TODO(titzer): JS->i64 has no good solution right now. Using 32 bits.
- num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToInt32(
- TruncationMode::kJavaScript),
+ num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToWord32(),
num);
- num = graph()->NewNode(jsgraph()->machine()->ChangeInt32ToInt64(), num);
+ if (jsgraph()->machine()->Is64()) {
+ // We cannot change an int32 to an int64 on a 32 bit platform. Instead
+ // we will split the parameter node later.
+ num = graph()->NewNode(jsgraph()->machine()->ChangeInt32ToInt64(), num);
+ }
break;
case wasm::kAstF32:
num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToFloat32(),
@@ -2144,61 +2384,123 @@ Node* WasmGraphBuilder::FromJS(Node* node, Node* context,
return num;
}
-Node* WasmGraphBuilder::BuildI32Rol(Node* left, Node* right) {
- // Implement Rol by Ror since TurboFan does not have Rol opcode.
- // TODO(weiliang): support Word32Rol opcode in TurboFan.
- Int32Matcher m(right);
- if (m.HasValue()) {
- return Binop(wasm::kExprI32Ror, left,
- jsgraph()->Int32Constant(32 - m.Value()));
- } else {
- return Binop(wasm::kExprI32Ror, left,
- Binop(wasm::kExprI32Sub, jsgraph()->Int32Constant(32), right));
+Node* WasmGraphBuilder::BuildChangeInt32ToSmi(Node* value) {
+ if (jsgraph()->machine()->Is64()) {
+ value = graph()->NewNode(jsgraph()->machine()->ChangeInt32ToInt64(), value);
}
+ return graph()->NewNode(jsgraph()->machine()->WordShl(), value,
+ BuildSmiShiftBitsConstant());
}
-Node* WasmGraphBuilder::BuildI64Rol(Node* left, Node* right) {
- // Implement Rol by Ror since TurboFan does not have Rol opcode.
- // TODO(weiliang): support Word64Rol opcode in TurboFan.
- Int64Matcher m(right);
- if (m.HasValue()) {
- return Binop(wasm::kExprI64Ror, left,
- jsgraph()->Int64Constant(64 - m.Value()));
- } else {
- return Binop(wasm::kExprI64Ror, left,
- Binop(wasm::kExprI64Sub, jsgraph()->Int64Constant(64), right));
+Node* WasmGraphBuilder::BuildChangeSmiToInt32(Node* value) {
+ value = graph()->NewNode(jsgraph()->machine()->WordSar(), value,
+ BuildSmiShiftBitsConstant());
+ if (jsgraph()->machine()->Is64()) {
+ value =
+ graph()->NewNode(jsgraph()->machine()->TruncateInt64ToInt32(), value);
}
+ return value;
}
-Node* WasmGraphBuilder::Invert(Node* node) {
- return Unop(wasm::kExprI32Eqz, node);
+Node* WasmGraphBuilder::BuildChangeUint32ToSmi(Node* value) {
+ if (jsgraph()->machine()->Is64()) {
+ value =
+ graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), value);
+ }
+ return graph()->NewNode(jsgraph()->machine()->WordShl(), value,
+ BuildSmiShiftBitsConstant());
}
+Node* WasmGraphBuilder::BuildChangeSmiToFloat64(Node* value) {
+ return graph()->NewNode(jsgraph()->machine()->ChangeInt32ToFloat64(),
+ BuildChangeSmiToInt32(value));
+}
+
+Node* WasmGraphBuilder::BuildTestNotSmi(Node* value) {
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagMask == 1);
+ return graph()->NewNode(jsgraph()->machine()->WordAnd(), value,
+ jsgraph()->IntPtrConstant(kSmiTagMask));
+}
+
+Node* WasmGraphBuilder::BuildSmiShiftBitsConstant() {
+ return jsgraph()->IntPtrConstant(kSmiShiftSize + kSmiTagSize);
+}
+
+Node* WasmGraphBuilder::BuildAllocateHeapNumberWithValue(Node* value,
+ Node* control) {
+ MachineOperatorBuilder* machine = jsgraph()->machine();
+ CommonOperatorBuilder* common = jsgraph()->common();
+ // The AllocateHeapNumberStub does not use the context, so we can safely pass
+ // in Smi zero here.
+ Callable callable = CodeFactory::AllocateHeapNumber(jsgraph()->isolate());
+ Node* target = jsgraph()->HeapConstant(callable.code());
+ Node* context = jsgraph()->NoContextConstant();
+ Node* effect =
+ graph()->NewNode(common->BeginRegion(RegionObservability::kNotObservable),
+ graph()->start());
+ if (!allocate_heap_number_operator_.is_set()) {
+ CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+ jsgraph()->isolate(), jsgraph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNoFlags, Operator::kNoThrow);
+ allocate_heap_number_operator_.set(common->Call(descriptor));
+ }
+ Node* heap_number = graph()->NewNode(allocate_heap_number_operator_.get(),
+ target, context, effect, control);
+ Node* store =
+ graph()->NewNode(machine->Store(StoreRepresentation(
+ MachineRepresentation::kFloat64, kNoWriteBarrier)),
+ heap_number, BuildHeapNumberValueIndexConstant(), value,
+ heap_number, control);
+ return graph()->NewNode(common->FinishRegion(), heap_number, store);
+}
+
+Node* WasmGraphBuilder::BuildLoadHeapNumberValue(Node* value, Node* control) {
+ return graph()->NewNode(jsgraph()->machine()->Load(MachineType::Float64()),
+ value, BuildHeapNumberValueIndexConstant(),
+ graph()->start(), control);
+}
+
+Node* WasmGraphBuilder::BuildHeapNumberValueIndexConstant() {
+ return jsgraph()->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag);
+}
void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
wasm::FunctionSig* sig) {
- int params = static_cast<int>(sig->parameter_count());
- int count = params + 3;
+ int wasm_count = static_cast<int>(sig->parameter_count());
+ int param_count;
+ if (jsgraph()->machine()->Is64()) {
+ param_count = static_cast<int>(sig->parameter_count());
+ } else {
+ param_count = Int64Lowering::GetParameterCountAfterLowering(sig);
+ }
+ int count = param_count + 3;
Node** args = Buffer(count);
// Build the start and the JS parameter nodes.
- Node* start = Start(params + 5);
+ Node* start = Start(param_count + 5);
*control_ = start;
*effect_ = start;
// Create the context parameter
Node* context = graph()->NewNode(
jsgraph()->common()->Parameter(
- Linkage::GetJSCallContextParamIndex(params + 1), "%context"),
+ Linkage::GetJSCallContextParamIndex(wasm_count + 1), "%context"),
graph()->start());
int pos = 0;
- args[pos++] = Constant(wasm_code);
+ args[pos++] = HeapConstant(wasm_code);
// Convert JS parameters to WASM numbers.
- for (int i = 0; i < params; i++) {
+ for (int i = 0; i < wasm_count; ++i) {
Node* param =
graph()->NewNode(jsgraph()->common()->Parameter(i + 1), start);
- args[pos++] = FromJS(param, context, sig->GetParam(i));
+ Node* wasm_param = FromJS(param, context, sig->GetParam(i));
+ args[pos++] = wasm_param;
+ if (jsgraph()->machine()->Is32() && sig->GetParam(i) == wasm::kAstI64) {
+ // We make up the high word with SAR to get the proper sign extension.
+ args[pos++] = graph()->NewNode(jsgraph()->machine()->Word32Sar(),
+ wasm_param, jsgraph()->Int32Constant(31));
+ }
}
args[pos++] = *effect_;
@@ -2207,132 +2509,181 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
// Call the WASM code.
CallDescriptor* desc =
wasm::ModuleEnv::GetWasmCallDescriptor(jsgraph()->zone(), sig);
+ if (jsgraph()->machine()->Is32()) {
+ desc = wasm::ModuleEnv::GetI32WasmCallDescriptor(jsgraph()->zone(), desc);
+ }
Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), count, args);
- Node* jsval =
- ToJS(call, context,
- sig->return_count() == 0 ? wasm::kAstStmt : sig->GetReturn());
+ Node* retval = call;
+ if (jsgraph()->machine()->Is32() && sig->return_count() > 0 &&
+ sig->GetReturn(0) == wasm::kAstI64) {
+ // The return values comes as two values, we pick the low word.
+ retval = graph()->NewNode(jsgraph()->common()->Projection(0), retval,
+ graph()->start());
+ }
+ Node* jsval = ToJS(
+ retval, sig->return_count() == 0 ? wasm::kAstStmt : sig->GetReturn());
Node* ret =
graph()->NewNode(jsgraph()->common()->Return(), jsval, call, start);
MergeControlToEnd(jsgraph(), ret);
}
-
-void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSFunction> function,
+void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
wasm::FunctionSig* sig) {
- int js_count = function->shared()->internal_formal_parameter_count();
+ DCHECK(target->IsCallable());
+
int wasm_count = static_cast<int>(sig->parameter_count());
+ int param_count;
+ if (jsgraph()->machine()->Is64()) {
+ param_count = wasm_count;
+ } else {
+ param_count = Int64Lowering::GetParameterCountAfterLowering(sig);
+ }
// Build the start and the parameter nodes.
Isolate* isolate = jsgraph()->isolate();
CallDescriptor* desc;
- Node* start = Start(wasm_count + 3);
+ Node* start = Start(param_count + 3);
*effect_ = start;
*control_ = start;
- // JS context is the last parameter.
- Node* context = Constant(Handle<Context>(function->context(), isolate));
Node** args = Buffer(wasm_count + 7);
- bool arg_count_before_args = false;
- bool add_new_target_undefined = false;
+ // The default context of the target.
+ Handle<Context> target_context = isolate->native_context();
+ // Optimization: check if the target is a JSFunction with the right arity so
+ // that we can call it directly.
+ bool call_direct = false;
int pos = 0;
- if (js_count == wasm_count) {
- // exact arity match, just call the function directly.
- desc = Linkage::GetJSCallDescriptor(graph()->zone(), false, wasm_count + 1,
- CallDescriptor::kNoFlags);
- arg_count_before_args = false;
- add_new_target_undefined = true;
- } else {
- // Use the Call builtin.
+ if (target->IsJSFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(target);
+ if (function->shared()->internal_formal_parameter_count() == wasm_count) {
+ call_direct = true;
+
+ args[pos++] = jsgraph()->Constant(target); // target callable.
+ // Receiver.
+ if (is_sloppy(function->shared()->language_mode()) &&
+ !function->shared()->native()) {
+ args[pos++] =
+ HeapConstant(handle(function->context()->global_proxy(), isolate));
+ } else {
+ args[pos++] = jsgraph()->Constant(
+ handle(isolate->heap()->undefined_value(), isolate));
+ }
+
+ desc = Linkage::GetJSCallDescriptor(
+ graph()->zone(), false, wasm_count + 1, CallDescriptor::kNoFlags);
+
+ // For a direct call we have to use the context of the JSFunction.
+ target_context = handle(function->context());
+ }
+ }
+
+ // We cannot call the target directly, we have to use the Call builtin.
+ if (!call_direct) {
Callable callable = CodeFactory::Call(isolate);
args[pos++] = jsgraph()->HeapConstant(callable.code());
+ args[pos++] = jsgraph()->Constant(target); // target callable
+ args[pos++] = jsgraph()->Int32Constant(wasm_count); // argument count
+ args[pos++] = jsgraph()->Constant(
+ handle(isolate->heap()->undefined_value(), isolate)); // receiver
+
desc = Linkage::GetStubCallDescriptor(isolate, graph()->zone(),
callable.descriptor(), wasm_count + 1,
CallDescriptor::kNoFlags);
- arg_count_before_args = true;
}
- args[pos++] = jsgraph()->Constant(function); // JS function.
- if (arg_count_before_args) {
- args[pos++] = jsgraph()->Int32Constant(wasm_count); // argument count
- }
- // JS receiver.
- Handle<Object> global(function->context()->global_object(), isolate);
- args[pos++] = jsgraph()->Constant(global);
-
// Convert WASM numbers to JS values.
- for (int i = 0; i < wasm_count; i++) {
- Node* param = graph()->NewNode(jsgraph()->common()->Parameter(i), start);
- args[pos++] = ToJS(param, context, sig->GetParam(i));
+ int param_index = 0;
+ for (int i = 0; i < wasm_count; ++i) {
+ Node* param =
+ graph()->NewNode(jsgraph()->common()->Parameter(param_index++), start);
+ args[pos++] = ToJS(param, sig->GetParam(i));
+ if (jsgraph()->machine()->Is32() && sig->GetParam(i) == wasm::kAstI64) {
+ // On 32 bit platforms we have to skip the high word of int64 parameters.
+ param_index++;
+ }
}
- if (add_new_target_undefined) {
+ if (call_direct) {
args[pos++] = jsgraph()->UndefinedConstant(); // new target
- }
-
- if (!arg_count_before_args) {
args[pos++] = jsgraph()->Int32Constant(wasm_count); // argument count
}
- args[pos++] = context;
+
+ args[pos++] = HeapConstant(target_context);
args[pos++] = *effect_;
args[pos++] = *control_;
Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), pos, args);
// Convert the return value back.
+ Node* ret;
Node* val =
- FromJS(call, context,
+ FromJS(call, HeapConstant(isolate->native_context()),
sig->return_count() == 0 ? wasm::kAstStmt : sig->GetReturn());
- Node* ret = graph()->NewNode(jsgraph()->common()->Return(), val, call, start);
+ if (jsgraph()->machine()->Is32() && sig->return_count() > 0 &&
+ sig->GetReturn() == wasm::kAstI64) {
+ ret = graph()->NewNode(jsgraph()->common()->Return(), val,
+ graph()->NewNode(jsgraph()->machine()->Word32Sar(),
+ val, jsgraph()->Int32Constant(31)),
+ call, start);
+ } else {
+ ret = graph()->NewNode(jsgraph()->common()->Return(), val, call, start);
+ }
MergeControlToEnd(jsgraph(), ret);
}
-
Node* WasmGraphBuilder::MemBuffer(uint32_t offset) {
DCHECK(module_ && module_->instance);
if (offset == 0) {
if (!mem_buffer_) {
- mem_buffer_ = jsgraph()->IntPtrConstant(
- reinterpret_cast<uintptr_t>(module_->instance->mem_start));
+ mem_buffer_ = jsgraph()->RelocatableIntPtrConstant(
+ reinterpret_cast<uintptr_t>(module_->instance->mem_start),
+ RelocInfo::WASM_MEMORY_REFERENCE);
}
return mem_buffer_;
} else {
- return jsgraph()->IntPtrConstant(
- reinterpret_cast<uintptr_t>(module_->instance->mem_start + offset));
+ return jsgraph()->RelocatableIntPtrConstant(
+ reinterpret_cast<uintptr_t>(module_->instance->mem_start + offset),
+ RelocInfo::WASM_MEMORY_REFERENCE);
}
}
-
Node* WasmGraphBuilder::MemSize(uint32_t offset) {
DCHECK(module_ && module_->instance);
uint32_t size = static_cast<uint32_t>(module_->instance->mem_size);
if (offset == 0) {
- if (!mem_size_) mem_size_ = jsgraph()->Int32Constant(size);
+ if (!mem_size_)
+ mem_size_ = jsgraph()->RelocatableInt32Constant(
+ size, RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
return mem_size_;
} else {
- return jsgraph()->Int32Constant(size + offset);
+ return jsgraph()->RelocatableInt32Constant(
+ size + offset, RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
}
}
-
-Node* WasmGraphBuilder::FunctionTable() {
+Node* WasmGraphBuilder::FunctionTable(uint32_t index) {
DCHECK(module_ && module_->instance &&
- !module_->instance->function_table.is_null());
- if (!function_table_) {
- function_table_ = jsgraph()->Constant(module_->instance->function_table);
+ index < module_->instance->function_tables.size());
+ if (!function_tables_.size()) {
+ for (size_t i = 0; i < module_->instance->function_tables.size(); ++i) {
+ DCHECK(!module_->instance->function_tables[i].is_null());
+ function_tables_.push_back(
+ HeapConstant(module_->instance->function_tables[i]));
+ }
}
- return function_table_;
+ return function_tables_[index];
}
-
-Node* WasmGraphBuilder::LoadGlobal(uint32_t index) {
- DCHECK(module_ && module_->instance && module_->instance->globals_start);
- MachineType mem_type = module_->GetGlobalType(index);
- Node* addr = jsgraph()->IntPtrConstant(
+Node* WasmGraphBuilder::GetGlobal(uint32_t index) {
+ MachineType mem_type =
+ wasm::WasmOpcodes::MachineTypeFor(module_->GetGlobalType(index));
+ Node* addr = jsgraph()->RelocatableIntPtrConstant(
reinterpret_cast<uintptr_t>(module_->instance->globals_start +
- module_->module->globals[index].offset));
+ module_->module->globals[index].offset),
+ RelocInfo::WASM_GLOBAL_REFERENCE);
const Operator* op = jsgraph()->machine()->Load(mem_type);
Node* node = graph()->NewNode(op, addr, jsgraph()->Int32Constant(0), *effect_,
*control_);
@@ -2340,13 +2691,13 @@ Node* WasmGraphBuilder::LoadGlobal(uint32_t index) {
return node;
}
-
-Node* WasmGraphBuilder::StoreGlobal(uint32_t index, Node* val) {
- DCHECK(module_ && module_->instance && module_->instance->globals_start);
- MachineType mem_type = module_->GetGlobalType(index);
- Node* addr = jsgraph()->IntPtrConstant(
+Node* WasmGraphBuilder::SetGlobal(uint32_t index, Node* val) {
+ MachineType mem_type =
+ wasm::WasmOpcodes::MachineTypeFor(module_->GetGlobalType(index));
+ Node* addr = jsgraph()->RelocatableIntPtrConstant(
reinterpret_cast<uintptr_t>(module_->instance->globals_start +
- module_->module->globals[index].offset));
+ module_->module->globals[index].offset),
+ RelocInfo::WASM_GLOBAL_REFERENCE);
const Operator* op = jsgraph()->machine()->Store(
StoreRepresentation(mem_type.representation(), kNoWriteBarrier));
Node* node = graph()->NewNode(op, addr, jsgraph()->Int32Constant(0), val,
@@ -2355,49 +2706,65 @@ Node* WasmGraphBuilder::StoreGlobal(uint32_t index, Node* val) {
return node;
}
-
void WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
- uint32_t offset) {
- // TODO(turbofan): fold bounds checks for constant indexes.
+ uint32_t offset,
+ wasm::WasmCodePosition position) {
DCHECK(module_ && module_->instance);
- size_t size = module_->instance->mem_size;
+ uint32_t size = module_->instance->mem_size;
byte memsize = wasm::WasmOpcodes::MemSize(memtype);
- Node* cond;
+
+ // Check against the effective size.
+ size_t effective_size;
if (offset >= size || (static_cast<uint64_t>(offset) + memsize) > size) {
- // The access will always throw.
- cond = jsgraph()->Int32Constant(0);
+ effective_size = 0;
} else {
- // Check against the limit.
- size_t limit = size - offset - memsize;
- CHECK(limit <= kMaxUInt32);
- cond = graph()->NewNode(
- jsgraph()->machine()->Uint32LessThanOrEqual(), index,
- jsgraph()->Int32Constant(static_cast<uint32_t>(limit)));
+ effective_size = size - offset - memsize + 1;
}
+ CHECK(effective_size <= kMaxUInt32);
- trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond);
+ Uint32Matcher m(index);
+ if (m.HasValue()) {
+ uint32_t value = m.Value();
+ if (value < effective_size) {
+ // The bounds check will always succeed.
+ return;
+ }
+ }
+
+ Node* cond = graph()->NewNode(jsgraph()->machine()->Uint32LessThan(), index,
+ jsgraph()->RelocatableInt32Constant(
+ static_cast<uint32_t>(effective_size),
+ RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
+ trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
}
Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
- Node* index, uint32_t offset) {
+ Node* index, uint32_t offset,
+ uint32_t alignment,
+ wasm::WasmCodePosition position) {
Node* load;
- if (module_ && module_->asm_js()) {
- // asm.js semantics use CheckedLoad (i.e. OOB reads return 0ish).
- DCHECK_EQ(0, offset);
- const Operator* op = jsgraph()->machine()->CheckedLoad(memtype);
- load = graph()->NewNode(op, MemBuffer(0), index, MemSize(0), *effect_,
- *control_);
- } else {
- // WASM semantics throw on OOB. Introduce explicit bounds check.
- BoundsCheckMem(memtype, index, offset);
+ // WASM semantics throw on OOB. Introduce explicit bounds check.
+ BoundsCheckMem(memtype, index, offset, position);
+ bool aligned = static_cast<int>(alignment) >=
+ ElementSizeLog2Of(memtype.representation());
+
+ if (aligned ||
+ jsgraph()->machine()->UnalignedLoadSupported(memtype, alignment)) {
load = graph()->NewNode(jsgraph()->machine()->Load(memtype),
MemBuffer(offset), index, *effect_, *control_);
+ } else {
+ load = graph()->NewNode(jsgraph()->machine()->UnalignedLoad(memtype),
+ MemBuffer(offset), index, *effect_, *control_);
}
*effect_ = load;
+#if defined(V8_TARGET_BIG_ENDIAN)
+ load = BuildChangeEndianness(load, memtype, type);
+#endif
+
if (type == wasm::kAstI64 &&
ElementSizeLog2Of(memtype.representation()) < 3) {
// TODO(titzer): TF zeroes the upper bits of 64-bit loads for subword sizes.
@@ -2416,39 +2783,70 @@ Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
- uint32_t offset, Node* val) {
+ uint32_t offset, uint32_t alignment, Node* val,
+ wasm::WasmCodePosition position) {
Node* store;
- if (module_ && module_->asm_js()) {
- // asm.js semantics use CheckedStore (i.e. ignore OOB writes).
- DCHECK_EQ(0, offset);
- const Operator* op =
- jsgraph()->machine()->CheckedStore(memtype.representation());
- store = graph()->NewNode(op, MemBuffer(0), index, MemSize(0), val, *effect_,
- *control_);
- } else {
- // WASM semantics throw on OOB. Introduce explicit bounds check.
- BoundsCheckMem(memtype, index, offset);
+
+ // WASM semantics throw on OOB. Introduce explicit bounds check.
+ BoundsCheckMem(memtype, index, offset, position);
+ StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
+
+ bool aligned = static_cast<int>(alignment) >=
+ ElementSizeLog2Of(memtype.representation());
+
+#if defined(V8_TARGET_BIG_ENDIAN)
+ val = BuildChangeEndianness(val, memtype);
+#endif
+
+ if (aligned ||
+ jsgraph()->machine()->UnalignedStoreSupported(memtype, alignment)) {
StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
store =
graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset),
index, val, *effect_, *control_);
+ } else {
+ UnalignedStoreRepresentation rep(memtype.representation());
+ store =
+ graph()->NewNode(jsgraph()->machine()->UnalignedStore(rep),
+ MemBuffer(offset), index, val, *effect_, *control_);
}
+
*effect_ = store;
+
return store;
}
+Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) {
+ // TODO(turbofan): fold bounds checks for constant asm.js loads.
+ // asm.js semantics use CheckedLoad (i.e. OOB reads return 0ish).
+ const Operator* op = jsgraph()->machine()->CheckedLoad(type);
+ Node* load = graph()->NewNode(op, MemBuffer(0), index, MemSize(0), *effect_,
+ *control_);
+ *effect_ = load;
+ return load;
+}
+
+Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
+ Node* val) {
+ // TODO(turbofan): fold bounds checks for constant asm.js stores.
+ // asm.js semantics use CheckedStore (i.e. ignore OOB writes).
+ const Operator* op =
+ jsgraph()->machine()->CheckedStore(type.representation());
+ Node* store = graph()->NewNode(op, MemBuffer(0), index, MemSize(0), val,
+ *effect_, *control_);
+ *effect_ = store;
+ return val;
+}
void WasmGraphBuilder::PrintDebugName(Node* node) {
PrintF("#%d:%s", node->id(), node->op()->mnemonic());
}
-
Node* WasmGraphBuilder::String(const char* string) {
return jsgraph()->Constant(
jsgraph()->isolate()->factory()->NewStringFromAsciiChecked(string));
}
-
Graph* WasmGraphBuilder::graph() { return jsgraph()->graph(); }
void WasmGraphBuilder::Int64LoweringForTesting() {
@@ -2460,45 +2858,52 @@ void WasmGraphBuilder::Int64LoweringForTesting() {
}
}
-static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
- CompilationInfo* info,
- const char* message, uint32_t index,
- wasm::WasmName func_name) {
- Isolate* isolate = info->isolate();
- if (isolate->logger()->is_logging_code_events() ||
- isolate->cpu_profiler()->is_profiling()) {
- ScopedVector<char> buffer(128);
- SNPrintF(buffer, "%s#%d:%.*s", message, index, func_name.length,
- func_name.name);
- Handle<String> name_str =
- isolate->factory()->NewStringFromAsciiChecked(buffer.start());
- Handle<String> script_str =
- isolate->factory()->NewStringFromAsciiChecked("(WASM)");
- Handle<Code> code = info->code();
- Handle<SharedFunctionInfo> shared =
- isolate->factory()->NewSharedFunctionInfo(name_str, code, false);
- PROFILE(isolate, CodeCreateEvent(tag, AbstractCode::cast(*code), *shared,
- info, *script_str, 0, 0));
- }
-}
-
-Handle<JSFunction> CompileJSToWasmWrapper(
- Isolate* isolate, wasm::ModuleEnv* module, Handle<String> name,
- Handle<Code> wasm_code, Handle<JSObject> module_object, uint32_t index) {
- wasm::WasmFunction* func = &module->module->functions[index];
+void WasmGraphBuilder::SetSourcePosition(Node* node,
+ wasm::WasmCodePosition position) {
+ DCHECK_NE(position, wasm::kNoCodePosition);
+ compiler::SourcePosition pos(position);
+ if (source_position_table_)
+ source_position_table_->SetSourcePosition(node, pos);
+}
- //----------------------------------------------------------------------------
- // Create the JSFunction object.
- //----------------------------------------------------------------------------
+Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
+ const NodeVector& inputs) {
+ switch (opcode) {
+ case wasm::kExprI32x4ExtractLane:
+ return graph()->NewNode(jsgraph()->machine()->Int32x4ExtractLane(),
+ inputs[0], inputs[1]);
+ case wasm::kExprI32x4Splat:
+ return graph()->NewNode(jsgraph()->machine()->Int32x4ExtractLane(),
+ inputs[0], inputs[0], inputs[0], inputs[0]);
+ default:
+ return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
+ }
+}
+
+static void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
+ Isolate* isolate, Handle<Code> code,
+ const char* message, uint32_t index,
+ const wasm::WasmName& module_name,
+ const wasm::WasmName& func_name) {
+ DCHECK(isolate->logger()->is_logging_code_events() ||
+ isolate->is_profiling());
+
+ ScopedVector<char> buffer(128);
+ SNPrintF(buffer, "%s#%d:%.*s:%.*s", message, index, module_name.length(),
+ module_name.start(), func_name.length(), func_name.start());
+ Handle<String> name_str =
+ isolate->factory()->NewStringFromAsciiChecked(buffer.start());
+ Handle<String> script_str =
+ isolate->factory()->NewStringFromAsciiChecked("(WASM)");
Handle<SharedFunctionInfo> shared =
- isolate->factory()->NewSharedFunctionInfo(name, wasm_code, false);
- int params = static_cast<int>(func->sig->parameter_count());
- shared->set_length(params);
- shared->set_internal_formal_parameter_count(params);
- Handle<JSFunction> function = isolate->factory()->NewFunction(
- isolate->wasm_function_map(), name, MaybeHandle<Code>());
- function->SetInternalField(0, *module_object);
- function->set_shared(*shared);
+ isolate->factory()->NewSharedFunctionInfo(name_str, code, false);
+ PROFILE(isolate, CodeCreateEvent(tag, AbstractCode::cast(*code), *shared,
+ *script_str, 0, 0));
+}
+
+Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::ModuleEnv* module,
+ Handle<Code> wasm_code, uint32_t index) {
+ const wasm::WasmFunction* func = &module->module->functions[index];
//----------------------------------------------------------------------------
// Create the Graph
@@ -2506,9 +2911,8 @@ Handle<JSFunction> CompileJSToWasmWrapper(
Zone zone(isolate->allocator());
Graph graph(&zone);
CommonOperatorBuilder common(&zone);
- JSOperatorBuilder javascript(&zone);
MachineOperatorBuilder machine(&zone);
- JSGraph jsgraph(isolate, &graph, &common, &javascript, nullptr, &machine);
+ JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
Node* control = nullptr;
Node* effect = nullptr;
@@ -2522,85 +2926,67 @@ Handle<JSFunction> CompileJSToWasmWrapper(
//----------------------------------------------------------------------------
// Run the compilation pipeline.
//----------------------------------------------------------------------------
- {
- // Changes lowering requires types.
- Typer typer(isolate, &graph);
- NodeVector roots(&zone);
- jsgraph.GetCachedNodes(&roots);
- typer.Run(roots);
-
- // Run generic and change lowering.
- JSGenericLowering generic(true, &jsgraph);
- ChangeLowering changes(&jsgraph);
- GraphReducer graph_reducer(&zone, &graph, jsgraph.Dead());
- graph_reducer.AddReducer(&changes);
- graph_reducer.AddReducer(&generic);
- graph_reducer.ReduceGraph();
-
- if (FLAG_trace_turbo_graph) { // Simple textual RPO.
- OFStream os(stdout);
- os << "-- Graph after change lowering -- " << std::endl;
- os << AsRPO(graph);
- }
+ if (FLAG_trace_turbo_graph) { // Simple textual RPO.
+ OFStream os(stdout);
+ os << "-- Graph after change lowering -- " << std::endl;
+ os << AsRPO(graph);
+ }
- // Schedule and compile to machine code.
- int params = static_cast<int>(
- module->GetFunctionSignature(index)->parameter_count());
- CallDescriptor* incoming = Linkage::GetJSCallDescriptor(
- &zone, false, params + 1, CallDescriptor::kNoFlags);
- Code::Flags flags = Code::ComputeFlags(Code::JS_TO_WASM_FUNCTION);
- bool debugging =
+ // Schedule and compile to machine code.
+ int params =
+ static_cast<int>(module->GetFunctionSignature(index)->parameter_count());
+ CallDescriptor* incoming = Linkage::GetJSCallDescriptor(
+ &zone, false, params + 1, CallDescriptor::kNoFlags);
+ Code::Flags flags = Code::ComputeFlags(Code::JS_TO_WASM_FUNCTION);
+ bool debugging =
#if DEBUG
- true;
+ true;
#else
- FLAG_print_opt_code || FLAG_trace_turbo || FLAG_trace_turbo_graph;
+ FLAG_print_opt_code || FLAG_trace_turbo || FLAG_trace_turbo_graph;
#endif
- const char* func_name = "js-to-wasm";
+ Vector<const char> func_name = ArrayVector("js-to-wasm");
- static unsigned id = 0;
- Vector<char> buffer;
- if (debugging) {
- buffer = Vector<char>::New(128);
- SNPrintF(buffer, "js-to-wasm#%d", id);
- func_name = buffer.start();
- }
+ static unsigned id = 0;
+ Vector<char> buffer;
+ if (debugging) {
+ buffer = Vector<char>::New(128);
+ int chars = SNPrintF(buffer, "js-to-wasm#%d", id);
+ func_name = Vector<const char>::cast(buffer.SubVector(0, chars));
+ }
- CompilationInfo info(func_name, isolate, &zone, flags);
- Handle<Code> code =
- Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
+ CompilationInfo info(func_name, isolate, &zone, flags);
+ Handle<Code> code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph);
#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_opt_code && !code.is_null()) {
- OFStream os(stdout);
- code->Disassemble(buffer.start(), os);
- }
+ if (FLAG_print_opt_code && !code.is_null()) {
+ OFStream os(stdout);
+ code->Disassemble(buffer.start(), os);
+ }
#endif
- if (debugging) {
- buffer.Dispose();
- }
+ if (debugging) {
+ buffer.Dispose();
+ }
+ if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
RecordFunctionCompilation(
- Logger::FUNCTION_TAG, &info, "js-to-wasm", index,
+ CodeEventListener::FUNCTION_TAG, isolate, code, "js-to-wasm", index,
+ wasm::WasmName("export"),
module->module->GetName(func->name_offset, func->name_length));
- // Set the JSFunction's machine code.
- function->set_code(*code);
}
- return function;
+ return code;
}
-Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
- Handle<JSFunction> function,
- wasm::FunctionSig* sig,
- wasm::WasmName module_name,
- wasm::WasmName function_name) {
+Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
+ wasm::FunctionSig* sig, uint32_t index,
+ Handle<String> import_module,
+ MaybeHandle<String> import_function) {
//----------------------------------------------------------------------------
// Create the Graph
//----------------------------------------------------------------------------
Zone zone(isolate->allocator());
Graph graph(&zone);
CommonOperatorBuilder common(&zone);
- JSOperatorBuilder javascript(&zone);
MachineOperatorBuilder machine(&zone);
- JSGraph jsgraph(isolate, &graph, &common, &javascript, nullptr, &machine);
+ JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
Node* control = nullptr;
Node* effect = nullptr;
@@ -2608,25 +2994,10 @@ Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
WasmGraphBuilder builder(&zone, &jsgraph, sig);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
- builder.set_module(module);
- builder.BuildWasmToJSWrapper(function, sig);
+ builder.BuildWasmToJSWrapper(target, sig);
Handle<Code> code = Handle<Code>::null();
{
- // Changes lowering requires types.
- Typer typer(isolate, &graph);
- NodeVector roots(&zone);
- jsgraph.GetCachedNodes(&roots);
- typer.Run(roots);
-
- // Run generic and change lowering.
- JSGenericLowering generic(true, &jsgraph);
- ChangeLowering changes(&jsgraph);
- GraphReducer graph_reducer(&zone, &graph, jsgraph.Dead());
- graph_reducer.AddReducer(&changes);
- graph_reducer.AddReducer(&generic);
- graph_reducer.ReduceGraph();
-
if (FLAG_trace_turbo_graph) { // Simple textual RPO.
OFStream os(stdout);
os << "-- Graph after change lowering -- " << std::endl;
@@ -2636,6 +3007,9 @@ Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
// Schedule and compile to machine code.
CallDescriptor* incoming =
wasm::ModuleEnv::GetWasmCallDescriptor(&zone, sig);
+ if (machine.Is32()) {
+ incoming = wasm::ModuleEnv::GetI32WasmCallDescriptor(&zone, incoming);
+ }
Code::Flags flags = Code::ComputeFlags(Code::WASM_TO_JS_FUNCTION);
bool debugging =
#if DEBUG
@@ -2643,13 +3017,13 @@ Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
#else
FLAG_print_opt_code || FLAG_trace_turbo || FLAG_trace_turbo_graph;
#endif
- const char* func_name = "wasm-to-js";
+ Vector<const char> func_name = ArrayVector("wasm-to-js");
static unsigned id = 0;
Vector<char> buffer;
if (debugging) {
buffer = Vector<char>::New(128);
- SNPrintF(buffer, "wasm-to-js#%d", id);
- func_name = buffer.start();
+ int chars = SNPrintF(buffer, "wasm-to-js#%d", id);
+ func_name = Vector<const char>::cast(buffer.SubVector(0, chars));
}
CompilationInfo info(func_name, isolate, &zone, flags);
@@ -2663,125 +3037,201 @@ Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
if (debugging) {
buffer.Dispose();
}
-
- RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, "wasm-to-js", 0,
- module_name);
}
+ if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
+ const char* function_name = nullptr;
+ int function_name_size = 0;
+ if (!import_function.is_null()) {
+ Handle<String> handle = import_function.ToHandleChecked();
+ function_name = handle->ToCString().get();
+ function_name_size = handle->length();
+ }
+ RecordFunctionCompilation(
+ CodeEventListener::FUNCTION_TAG, isolate, code, "wasm-to-js", index,
+ {import_module->ToCString().get(), import_module->length()},
+ {function_name, function_name_size});
+ }
+
return code;
}
-
-// Helper function to compile a single function.
-Handle<Code> CompileWasmFunction(wasm::ErrorThrower& thrower, Isolate* isolate,
- wasm::ModuleEnv* module_env,
- const wasm::WasmFunction& function) {
- if (FLAG_trace_wasm_compiler) {
- OFStream os(stdout);
- os << "Compiling WASM function "
- << wasm::WasmFunctionName(&function, module_env) << std::endl;
- os << std::endl;
- }
-
- double decode_ms = 0;
+SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
+ double* decode_ms) {
base::ElapsedTimer decode_timer;
if (FLAG_trace_wasm_decode_time) {
decode_timer.Start();
}
-
// Create a TF graph during decoding.
- Zone zone(isolate->allocator());
- Graph graph(&zone);
- CommonOperatorBuilder common(&zone);
- MachineOperatorBuilder machine(
- &zone, MachineType::PointerRepresentation(),
- InstructionSelector::SupportedMachineOperatorFlags());
- JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
- WasmGraphBuilder builder(&zone, &jsgraph, function.sig);
+
+ Graph* graph = jsgraph_->graph();
+ CommonOperatorBuilder* common = jsgraph_->common();
+ MachineOperatorBuilder* machine = jsgraph_->machine();
+ SourcePositionTable* source_position_table =
+ new (jsgraph_->zone()) SourcePositionTable(graph);
+ WasmGraphBuilder builder(jsgraph_->zone(), jsgraph_, function_->sig,
+ source_position_table);
wasm::FunctionBody body = {
- module_env, function.sig, module_env->module->module_start,
- module_env->module->module_start + function.code_start_offset,
- module_env->module->module_start + function.code_end_offset};
- wasm::TreeResult result =
- wasm::BuildTFGraph(isolate->allocator(), &builder, body);
+ module_env_, function_->sig, module_env_->module->module_start,
+ module_env_->module->module_start + function_->code_start_offset,
+ module_env_->module->module_start + function_->code_end_offset};
+ graph_construction_result_ =
+ wasm::BuildTFGraph(isolate_->allocator(), &builder, body);
- if (result.failed()) {
+ if (graph_construction_result_.failed()) {
if (FLAG_trace_wasm_compiler) {
OFStream os(stdout);
- os << "Compilation failed: " << result << std::endl;
+ os << "Compilation failed: " << graph_construction_result_ << std::endl;
}
- // Add the function as another context for the exception
- ScopedVector<char> buffer(128);
- wasm::WasmName name =
- module_env->module->GetName(function.name_offset, function.name_length);
- SNPrintF(buffer, "Compiling WASM function #%d:%.*s failed:",
- function.func_index, name.length, name.name);
- thrower.Failed(buffer.start(), result);
- return Handle<Code>::null();
+ return nullptr;
}
- int index = static_cast<int>(function.func_index);
- if (index >= FLAG_trace_wasm_ast_start && index < FLAG_trace_wasm_ast_end) {
- PrintAst(isolate->allocator(), body);
+ if (machine->Is32()) {
+ Int64Lowering r(graph, machine, common, jsgraph_->zone(), function_->sig);
+ r.LowerGraph();
}
+ int index = static_cast<int>(function_->func_index);
+
+ if (index >= FLAG_trace_wasm_ast_start && index < FLAG_trace_wasm_ast_end) {
+ OFStream os(stdout);
+ PrintAst(isolate_->allocator(), body, os, nullptr);
+ }
if (FLAG_trace_wasm_decode_time) {
- decode_ms = decode_timer.Elapsed().InMillisecondsF();
+ *decode_ms = decode_timer.Elapsed().InMillisecondsF();
+ }
+ return source_position_table;
+}
+
+WasmCompilationUnit::WasmCompilationUnit(wasm::ErrorThrower* thrower,
+ Isolate* isolate,
+ wasm::ModuleEnv* module_env,
+ const wasm::WasmFunction* function,
+ uint32_t index)
+ : thrower_(thrower),
+ isolate_(isolate),
+ module_env_(module_env),
+ function_(function),
+ graph_zone_(new Zone(isolate->allocator())),
+ jsgraph_(new (graph_zone()) JSGraph(
+ isolate, new (graph_zone()) Graph(graph_zone()),
+ new (graph_zone()) CommonOperatorBuilder(graph_zone()), nullptr,
+ nullptr, new (graph_zone()) MachineOperatorBuilder(
+ graph_zone(), MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags(),
+ InstructionSelector::AlignmentRequirements()))),
+ compilation_zone_(isolate->allocator()),
+ info_(function->name_length != 0
+ ? module_env->module->GetNameOrNull(function->name_offset,
+ function->name_length)
+ : ArrayVector("wasm"),
+ isolate, &compilation_zone_,
+ Code::ComputeFlags(Code::WASM_FUNCTION)),
+ job_(),
+ index_(index),
+ ok_(true) {
+ // Create and cache this node in the main thread.
+ jsgraph_->CEntryStubConstant(1);
+}
+
+void WasmCompilationUnit::ExecuteCompilation() {
+ // TODO(ahaas): The counters are not thread-safe at the moment.
+ // HistogramTimerScope wasm_compile_function_time_scope(
+ // isolate_->counters()->wasm_compile_function_time());
+ if (FLAG_trace_wasm_compiler) {
+ OFStream os(stdout);
+ os << "Compiling WASM function "
+ << wasm::WasmFunctionName(function_, module_env_) << std::endl;
+ os << std::endl;
}
- base::ElapsedTimer compile_timer;
+ double decode_ms = 0;
+ size_t node_count = 0;
+
+ std::unique_ptr<Zone> graph_zone(graph_zone_.release());
+ SourcePositionTable* source_positions = BuildGraphForWasmFunction(&decode_ms);
+
+ if (graph_construction_result_.failed()) {
+ ok_ = false;
+ return;
+ }
+
+ base::ElapsedTimer pipeline_timer;
if (FLAG_trace_wasm_decode_time) {
- compile_timer.Start();
+ node_count = jsgraph_->graph()->NodeCount();
+ pipeline_timer.Start();
}
+
// Run the compiler pipeline to generate machine code.
- CallDescriptor* descriptor =
- wasm::ModuleEnv::GetWasmCallDescriptor(&zone, function.sig);
- if (machine.Is32()) {
- descriptor = module_env->GetI32WasmCallDescriptor(&zone, descriptor);
+ CallDescriptor* descriptor = wasm::ModuleEnv::GetWasmCallDescriptor(
+ &compilation_zone_, function_->sig);
+ if (jsgraph_->machine()->Is32()) {
+ descriptor =
+ module_env_->GetI32WasmCallDescriptor(&compilation_zone_, descriptor);
+ }
+ job_.reset(Pipeline::NewWasmCompilationJob(&info_, jsgraph_->graph(),
+ descriptor, source_positions));
+ ok_ = job_->ExecuteJob() == CompilationJob::SUCCEEDED;
+ // TODO(bradnelson): Improve histogram handling of size_t.
+ // TODO(ahaas): The counters are not thread-safe at the moment.
+ // isolate_->counters()->wasm_compile_function_peak_memory_bytes()
+ // ->AddSample(
+ // static_cast<int>(jsgraph->graph()->zone()->allocation_size()));
+
+ if (FLAG_trace_wasm_decode_time) {
+ double pipeline_ms = pipeline_timer.Elapsed().InMillisecondsF();
+ PrintF(
+ "wasm-compilation phase 1 ok: %d bytes, %0.3f ms decode, %zu nodes, "
+ "%0.3f ms pipeline\n",
+ static_cast<int>(function_->code_end_offset -
+ function_->code_start_offset),
+ decode_ms, node_count, pipeline_ms);
+ }
+}
+
+Handle<Code> WasmCompilationUnit::FinishCompilation() {
+ if (!ok_) {
+ if (graph_construction_result_.failed()) {
+ // Add the function as another context for the exception
+ ScopedVector<char> buffer(128);
+ wasm::WasmName name = module_env_->module->GetName(
+ function_->name_offset, function_->name_length);
+ SNPrintF(buffer, "Compiling WASM function #%d:%.*s failed:",
+ function_->func_index, name.length(), name.start());
+ thrower_->Failed(buffer.start(), graph_construction_result_);
+ }
+
+ return Handle<Code>::null();
}
- Code::Flags flags = Code::ComputeFlags(Code::WASM_FUNCTION);
- // add flags here if a meaningful name is helpful for debugging.
- bool debugging =
-#if DEBUG
- true;
-#else
- FLAG_print_opt_code || FLAG_trace_turbo || FLAG_trace_turbo_graph;
-#endif
- const char* func_name = "wasm";
- Vector<char> buffer;
- if (debugging) {
- buffer = Vector<char>::New(128);
- wasm::WasmName name =
- module_env->module->GetName(function.name_offset, function.name_length);
- SNPrintF(buffer, "WASM_function_#%d:%.*s", function.func_index, name.length,
- name.name);
- func_name = buffer.start();
+ if (job_->FinalizeJob() != CompilationJob::SUCCEEDED) {
+ return Handle<Code>::null();
}
- CompilationInfo info(func_name, isolate, &zone, flags);
-
- Handle<Code> code =
- Pipeline::GenerateCodeForTesting(&info, descriptor, &graph);
- if (debugging) {
- buffer.Dispose();
+ base::ElapsedTimer compile_timer;
+ if (FLAG_trace_wasm_decode_time) {
+ compile_timer.Start();
}
- if (!code.is_null()) {
- RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, "WASM_function",
- function.func_index,
- module_env->module->GetName(
- function.name_offset, function.name_length));
+ Handle<Code> code = info_.code();
+ DCHECK(!code.is_null());
+
+ if (isolate_->logger()->is_logging_code_events() ||
+ isolate_->is_profiling()) {
+ RecordFunctionCompilation(
+ CodeEventListener::FUNCTION_TAG, isolate_, code, "WASM_function",
+ function_->func_index, wasm::WasmName("module"),
+ module_env_->module->GetName(function_->name_offset,
+ function_->name_length));
}
if (FLAG_trace_wasm_decode_time) {
double compile_ms = compile_timer.Elapsed().InMillisecondsF();
- PrintF(
- "wasm-compile ok: %d bytes, %0.3f ms decode, %d nodes, %0.3f ms "
- "compile\n",
- static_cast<int>(function.code_end_offset - function.code_start_offset),
- decode_ms, static_cast<int>(graph.NodeCount()), compile_ms);
+ PrintF("wasm-code-generation ok: %d bytes, %0.3f ms code generation\n",
+ static_cast<int>(function_->code_end_offset -
+ function_->code_start_offset),
+ compile_ms);
}
+
return code;
}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index bbcafa7296..487ddcb760 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -5,9 +5,13 @@
#ifndef V8_COMPILER_WASM_COMPILER_H_
#define V8_COMPILER_WASM_COMPILER_H_
+#include <memory>
+
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
+#include "src/compiler.h"
#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-result.h"
#include "src/zone.h"
namespace v8 {
@@ -18,44 +22,83 @@ namespace compiler {
class Node;
class JSGraph;
class Graph;
-}
+class Operator;
+class SourcePositionTable;
+} // namespace compiler
namespace wasm {
// Forward declarations for some WASM data structures.
struct ModuleEnv;
struct WasmFunction;
class ErrorThrower;
+struct DecodeStruct;
// Expose {Node} and {Graph} opaquely as {wasm::TFNode} and {wasm::TFGraph}.
typedef compiler::Node TFNode;
typedef compiler::JSGraph TFGraph;
-}
+} // namespace wasm
namespace compiler {
-// Compiles a single function, producing a code object.
-Handle<Code> CompileWasmFunction(wasm::ErrorThrower& thrower, Isolate* isolate,
- wasm::ModuleEnv* module_env,
- const wasm::WasmFunction& function);
+class WasmCompilationUnit final {
+ public:
+ WasmCompilationUnit(wasm::ErrorThrower* thrower, Isolate* isolate,
+ wasm::ModuleEnv* module_env,
+ const wasm::WasmFunction* function, uint32_t index);
+
+ Zone* graph_zone() { return graph_zone_.get(); }
+ int index() const { return index_; }
+
+ void ExecuteCompilation();
+ Handle<Code> FinishCompilation();
+
+ static Handle<Code> CompileWasmFunction(wasm::ErrorThrower* thrower,
+ Isolate* isolate,
+ wasm::ModuleEnv* module_env,
+ const wasm::WasmFunction* function) {
+ WasmCompilationUnit unit(thrower, isolate, module_env, function, 0);
+ unit.ExecuteCompilation();
+ return unit.FinishCompilation();
+ }
+
+ private:
+ SourcePositionTable* BuildGraphForWasmFunction(double* decode_ms);
+
+ wasm::ErrorThrower* thrower_;
+ Isolate* isolate_;
+ wasm::ModuleEnv* module_env_;
+ const wasm::WasmFunction* function_;
+ // The graph zone is deallocated at the end of ExecuteCompilation.
+ std::unique_ptr<Zone> graph_zone_;
+ JSGraph* jsgraph_;
+ Zone compilation_zone_;
+ CompilationInfo info_;
+ std::unique_ptr<CompilationJob> job_;
+ uint32_t index_;
+ wasm::Result<wasm::DecodeStruct*> graph_construction_result_;
+ bool ok_;
+
+ DISALLOW_COPY_AND_ASSIGN(WasmCompilationUnit);
+};
// Wraps a JS function, producing a code object that can be called from WASM.
-Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
- Handle<JSFunction> function,
- wasm::FunctionSig* sig,
- wasm::WasmName module_name,
- wasm::WasmName function_name);
-
-// Wraps a given wasm code object, producing a JSFunction that can be called
-// from JavaScript.
-Handle<JSFunction> CompileJSToWasmWrapper(
- Isolate* isolate, wasm::ModuleEnv* module, Handle<String> name,
- Handle<Code> wasm_code, Handle<JSObject> module_object, uint32_t index);
+Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
+ wasm::FunctionSig* sig, uint32_t index,
+ Handle<String> import_module,
+ MaybeHandle<String> import_function);
+
+// Wraps a given wasm code object, producing a code object.
+Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::ModuleEnv* module,
+ Handle<Code> wasm_code, uint32_t index);
// Abstracts details of building TurboFan graph nodes for WASM to separate
// the WASM decoder from the internal details of TurboFan.
class WasmTrapHelper;
+typedef ZoneVector<Node*> NodeVector;
class WasmGraphBuilder {
public:
- WasmGraphBuilder(Zone* z, JSGraph* g, wasm::FunctionSig* function_signature);
+ WasmGraphBuilder(
+ Zone* z, JSGraph* g, wasm::FunctionSig* function_signature,
+ compiler::SourcePositionTable* source_position_table = nullptr);
Node** Buffer(size_t count) {
if (count > cur_bufsize_) {
@@ -78,17 +121,23 @@ class WasmGraphBuilder {
Node* Merge(unsigned count, Node** controls);
Node* Phi(wasm::LocalType type, unsigned count, Node** vals, Node* control);
Node* EffectPhi(unsigned count, Node** effects, Node* control);
+ Node* NumberConstant(int32_t value);
+ Node* Uint32Constant(uint32_t value);
Node* Int32Constant(int32_t value);
Node* Int64Constant(int64_t value);
Node* Float32Constant(float value);
Node* Float64Constant(double value);
- Node* Constant(Handle<Object> value);
- Node* Binop(wasm::WasmOpcode opcode, Node* left, Node* right);
- Node* Unop(wasm::WasmOpcode opcode, Node* input);
+ Node* HeapConstant(Handle<HeapObject> value);
+ Node* Binop(wasm::WasmOpcode opcode, Node* left, Node* right,
+ wasm::WasmCodePosition position = wasm::kNoCodePosition);
+ Node* Unop(wasm::WasmOpcode opcode, Node* input,
+ wasm::WasmCodePosition position = wasm::kNoCodePosition);
unsigned InputCount(Node* node);
bool IsPhiWithMerge(Node* phi, Node* merge);
void AppendToMerge(Node* merge, Node* from);
- void AppendToPhi(Node* merge, Node* phi, Node* from);
+ void AppendToPhi(Node* phi, Node* from);
+
+ void StackCheck(wasm::WasmCodePosition position);
//-----------------------------------------------------------------------
// Operations that read and/or write {control} and {effect}.
@@ -99,28 +148,34 @@ class WasmGraphBuilder {
Node* IfDefault(Node* sw);
Node* Return(unsigned count, Node** vals);
Node* ReturnVoid();
- Node* Unreachable();
-
- Node* CallDirect(uint32_t index, Node** args);
- Node* CallImport(uint32_t index, Node** args);
- Node* CallIndirect(uint32_t index, Node** args);
+ Node* Unreachable(wasm::WasmCodePosition position);
+
+ Node* CallDirect(uint32_t index, Node** args,
+ wasm::WasmCodePosition position);
+ Node* CallImport(uint32_t index, Node** args,
+ wasm::WasmCodePosition position);
+ Node* CallIndirect(uint32_t index, Node** args,
+ wasm::WasmCodePosition position);
void BuildJSToWasmWrapper(Handle<Code> wasm_code, wasm::FunctionSig* sig);
- void BuildWasmToJSWrapper(Handle<JSFunction> function,
- wasm::FunctionSig* sig);
- Node* ToJS(Node* node, Node* context, wasm::LocalType type);
+ void BuildWasmToJSWrapper(Handle<JSReceiver> target, wasm::FunctionSig* sig);
+
+ Node* ToJS(Node* node, wasm::LocalType type);
Node* FromJS(Node* node, Node* context, wasm::LocalType type);
Node* Invert(Node* node);
- Node* FunctionTable();
+ Node* FunctionTable(uint32_t index);
//-----------------------------------------------------------------------
// Operations that concern the linear memory.
//-----------------------------------------------------------------------
Node* MemSize(uint32_t offset);
- Node* LoadGlobal(uint32_t index);
- Node* StoreGlobal(uint32_t index, Node* val);
+ Node* GetGlobal(uint32_t index);
+ Node* SetGlobal(uint32_t index, Node* val);
Node* LoadMem(wasm::LocalType type, MachineType memtype, Node* index,
- uint32_t offset);
- Node* StoreMem(MachineType type, Node* index, uint32_t offset, Node* val);
+ uint32_t offset, uint32_t alignment,
+ wasm::WasmCodePosition position);
+ Node* StoreMem(MachineType type, Node* index, uint32_t offset,
+ uint32_t alignment, Node* val,
+ wasm::WasmCodePosition position);
static void PrintDebugName(Node* node);
@@ -137,6 +192,10 @@ class WasmGraphBuilder {
void Int64LoweringForTesting();
+ void SetSourcePosition(Node* node, wasm::WasmCodePosition position);
+
+ Node* SimdOp(wasm::WasmOpcode opcode, const NodeVector& inputs);
+
private:
static const int kDefaultBufferSize = 16;
friend class WasmTrapHelper;
@@ -146,7 +205,7 @@ class WasmGraphBuilder {
wasm::ModuleEnv* module_;
Node* mem_buffer_;
Node* mem_size_;
- Node* function_table_;
+ NodeVector function_tables_;
Node** control_;
Node** effect_;
Node** cur_buffer_;
@@ -155,6 +214,9 @@ class WasmGraphBuilder {
WasmTrapHelper* trap_;
wasm::FunctionSig* function_signature_;
+ SetOncePointer<const Operator> allocate_heap_number_operator_;
+
+ compiler::SourcePositionTable* source_position_table_ = nullptr;
// Internal helper methods.
JSGraph* jsgraph() { return jsgraph_; }
@@ -162,30 +224,32 @@ class WasmGraphBuilder {
Node* String(const char* string);
Node* MemBuffer(uint32_t offset);
- void BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset);
+ void BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset,
+ wasm::WasmCodePosition position);
+
+ Node* BuildChangeEndianness(Node* node, MachineType type,
+ wasm::LocalType wasmtype = wasm::kAstStmt);
Node* MaskShiftCount32(Node* node);
Node* MaskShiftCount64(Node* node);
Node* BuildCCall(MachineSignature* sig, Node** args);
- Node* BuildWasmCall(wasm::FunctionSig* sig, Node** args);
+ Node* BuildWasmCall(wasm::FunctionSig* sig, Node** args,
+ wasm::WasmCodePosition position);
- Node* BuildF32Neg(Node* input);
- Node* BuildF64Neg(Node* input);
Node* BuildF32CopySign(Node* left, Node* right);
Node* BuildF64CopySign(Node* left, Node* right);
- Node* BuildF32Min(Node* left, Node* right);
- Node* BuildF32Max(Node* left, Node* right);
- Node* BuildF64Min(Node* left, Node* right);
- Node* BuildF64Max(Node* left, Node* right);
- Node* BuildI32SConvertF32(Node* input);
- Node* BuildI32SConvertF64(Node* input);
- Node* BuildI32UConvertF32(Node* input);
- Node* BuildI32UConvertF64(Node* input);
+ Node* BuildI32SConvertF32(Node* input, wasm::WasmCodePosition position);
+ Node* BuildI32SConvertF64(Node* input, wasm::WasmCodePosition position);
+ Node* BuildI32UConvertF32(Node* input, wasm::WasmCodePosition position);
+ Node* BuildI32UConvertF64(Node* input, wasm::WasmCodePosition position);
Node* BuildI32Ctz(Node* input);
Node* BuildI32Popcnt(Node* input);
Node* BuildI64Ctz(Node* input);
Node* BuildI64Popcnt(Node* input);
+ Node* BuildBitCountingCall(Node* input, ExternalReference ref,
+ MachineRepresentation input_type);
+
Node* BuildCFuncInstruction(ExternalReference ref, MachineType type,
Node* input0, Node* input1 = nullptr);
Node* BuildF32Trunc(Node* input);
@@ -201,14 +265,7 @@ class WasmGraphBuilder {
Node* BuildF64Acos(Node* input);
Node* BuildF64Asin(Node* input);
- Node* BuildF64Atan(Node* input);
- Node* BuildF64Cos(Node* input);
- Node* BuildF64Sin(Node* input);
- Node* BuildF64Tan(Node* input);
- Node* BuildF64Exp(Node* input);
- Node* BuildF64Log(Node* input);
Node* BuildF64Pow(Node* left, Node* right);
- Node* BuildF64Atan2(Node* left, Node* right);
Node* BuildF64Mod(Node* left, Node* right);
Node* BuildIntToFloatConversionInstruction(
@@ -223,23 +280,54 @@ class WasmGraphBuilder {
Node* BuildFloatToIntConversionInstruction(
Node* input, ExternalReference ref,
MachineRepresentation parameter_representation,
- const MachineType result_type);
- Node* BuildI64SConvertF32(Node* input);
- Node* BuildI64UConvertF32(Node* input);
- Node* BuildI64SConvertF64(Node* input);
- Node* BuildI64UConvertF64(Node* input);
-
- Node* BuildI32DivS(Node* left, Node* right);
- Node* BuildI32RemS(Node* left, Node* right);
- Node* BuildI32DivU(Node* left, Node* right);
- Node* BuildI32RemU(Node* left, Node* right);
-
- Node* BuildI64DivS(Node* left, Node* right);
- Node* BuildI64RemS(Node* left, Node* right);
- Node* BuildI64DivU(Node* left, Node* right);
- Node* BuildI64RemU(Node* left, Node* right);
+ const MachineType result_type, wasm::WasmCodePosition position);
+ Node* BuildI64SConvertF32(Node* input, wasm::WasmCodePosition position);
+ Node* BuildI64UConvertF32(Node* input, wasm::WasmCodePosition position);
+ Node* BuildI64SConvertF64(Node* input, wasm::WasmCodePosition position);
+ Node* BuildI64UConvertF64(Node* input, wasm::WasmCodePosition position);
+
+ Node* BuildI32DivS(Node* left, Node* right, wasm::WasmCodePosition position);
+ Node* BuildI32RemS(Node* left, Node* right, wasm::WasmCodePosition position);
+ Node* BuildI32DivU(Node* left, Node* right, wasm::WasmCodePosition position);
+ Node* BuildI32RemU(Node* left, Node* right, wasm::WasmCodePosition position);
+
+ Node* BuildI64DivS(Node* left, Node* right, wasm::WasmCodePosition position);
+ Node* BuildI64RemS(Node* left, Node* right, wasm::WasmCodePosition position);
+ Node* BuildI64DivU(Node* left, Node* right, wasm::WasmCodePosition position);
+ Node* BuildI64RemU(Node* left, Node* right, wasm::WasmCodePosition position);
Node* BuildDiv64Call(Node* left, Node* right, ExternalReference ref,
- MachineType result_type, int trap_zero);
+ MachineType result_type, int trap_zero,
+ wasm::WasmCodePosition position);
+
+ Node* BuildJavaScriptToNumber(Node* node, Node* context, Node* effect,
+ Node* control);
+ Node* BuildChangeInt32ToTagged(Node* value);
+ Node* BuildChangeFloat64ToTagged(Node* value);
+ Node* BuildChangeTaggedToFloat64(Node* value);
+
+ Node* BuildChangeInt32ToSmi(Node* value);
+ Node* BuildChangeSmiToInt32(Node* value);
+ Node* BuildChangeUint32ToSmi(Node* value);
+ Node* BuildChangeSmiToFloat64(Node* value);
+ Node* BuildTestNotSmi(Node* value);
+ Node* BuildSmiShiftBitsConstant();
+
+ Node* BuildAllocateHeapNumberWithValue(Node* value, Node* control);
+ Node* BuildLoadHeapNumberValue(Node* value, Node* control);
+ Node* BuildHeapNumberValueIndexConstant();
+ Node* BuildGrowMemory(Node* input);
+
+ // Asm.js specific functionality.
+ Node* BuildI32AsmjsSConvertF32(Node* input);
+ Node* BuildI32AsmjsSConvertF64(Node* input);
+ Node* BuildI32AsmjsUConvertF32(Node* input);
+ Node* BuildI32AsmjsUConvertF64(Node* input);
+ Node* BuildI32AsmjsDivS(Node* left, Node* right);
+ Node* BuildI32AsmjsRemS(Node* left, Node* right);
+ Node* BuildI32AsmjsDivU(Node* left, Node* right);
+ Node* BuildI32AsmjsRemU(Node* left, Node* right);
+ Node* BuildAsmjsLoadMem(MachineType type, Node* index);
+ Node* BuildAsmjsStoreMem(MachineType type, Node* index, Node* val);
Node** Realloc(Node** buffer, size_t old_count, size_t new_count) {
Node** buf = Buffer(new_count);
diff --git a/deps/v8/src/compiler/wasm-linkage.cc b/deps/v8/src/compiler/wasm-linkage.cc
index f0e14ce3e9..c50f643910 100644
--- a/deps/v8/src/compiler/wasm-linkage.cc
+++ b/deps/v8/src/compiler/wasm-linkage.cc
@@ -3,7 +3,9 @@
// found in the LICENSE file.
#include "src/assembler.h"
+#include "src/base/lazy-instance.h"
#include "src/macro-assembler.h"
+#include "src/register-configuration.h"
#include "src/wasm/wasm-module.h"
@@ -21,6 +23,7 @@ using compiler::CallDescriptor;
using compiler::LinkageLocation;
namespace {
+
MachineType MachineTypeFor(LocalType type) {
switch (type) {
case kAstI32:
@@ -31,26 +34,24 @@ MachineType MachineTypeFor(LocalType type) {
return MachineType::Float64();
case kAstF32:
return MachineType::Float32();
+ case kAstS128:
+ return MachineType::Simd128();
default:
UNREACHABLE();
return MachineType::AnyTagged();
}
}
-
-// Platform-specific configuration for C calling convention.
-LinkageLocation regloc(Register reg) {
- return LinkageLocation::ForRegister(reg.code());
+LinkageLocation regloc(Register reg, MachineType type) {
+ return LinkageLocation::ForRegister(reg.code(), type);
}
-
-LinkageLocation regloc(DoubleRegister reg) {
- return LinkageLocation::ForRegister(reg.code());
+LinkageLocation regloc(DoubleRegister reg, MachineType type) {
+ return LinkageLocation::ForRegister(reg.code(), type);
}
-
-LinkageLocation stackloc(int i) {
- return LinkageLocation::ForCallerFrameSlot(i);
+LinkageLocation stackloc(int i, MachineType type) {
+ return LinkageLocation::ForCallerFrameSlot(i, type);
}
@@ -58,7 +59,7 @@ LinkageLocation stackloc(int i) {
// ===========================================================================
// == ia32 ===================================================================
// ===========================================================================
-#define GP_PARAM_REGISTERS eax, edx, ecx, ebx
+#define GP_PARAM_REGISTERS eax, edx, ecx, ebx, esi
#define GP_RETURN_REGISTERS eax, edx
#define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
#define FP_RETURN_REGISTERS xmm1, xmm2
@@ -176,20 +177,21 @@ struct Allocator {
if (IsFloatingPoint(type)) {
// Allocate a floating point register/stack location.
if (fp_offset < fp_count) {
- return regloc(fp_regs[fp_offset++]);
+ DoubleRegister reg = fp_regs[fp_offset++];
+ return regloc(reg, MachineTypeFor(type));
} else {
int offset = -1 - stack_offset;
stack_offset += Words(type);
- return stackloc(offset);
+ return stackloc(offset, MachineTypeFor(type));
}
} else {
// Allocate a general purpose register/stack location.
if (gp_offset < gp_count) {
- return regloc(gp_regs[gp_offset++]);
+ return regloc(gp_regs[gp_offset++], MachineTypeFor(type));
} else {
int offset = -1 - stack_offset;
stack_offset += Words(type);
- return stackloc(offset);
+ return stackloc(offset, MachineTypeFor(type));
}
}
}
@@ -197,11 +199,7 @@ struct Allocator {
return type == kAstF32 || type == kAstF64;
}
int Words(LocalType type) {
- // The code generation for pushing parameters on the stack does not
- // distinguish between float32 and float64. Therefore also float32 needs
- // two words.
- if (kPointerSize < 8 &&
- (type == kAstI64 || type == kAstF64 || type == kAstF32)) {
+ if (kPointerSize < 8 && (type == kAstI64 || type == kAstF64)) {
return 2;
}
return 1;
@@ -209,81 +207,83 @@ struct Allocator {
};
} // namespace
-static Allocator GetReturnRegisters() {
-#ifdef GP_RETURN_REGISTERS
- static const Register kGPReturnRegisters[] = {GP_RETURN_REGISTERS};
- static const int kGPReturnRegistersCount =
- static_cast<int>(arraysize(kGPReturnRegisters));
+struct ParameterRegistersCreateTrait {
+ static void Construct(Allocator* allocated_ptr) {
+#ifdef GP_PARAM_REGISTERS
+ static const Register kGPParamRegisters[] = {GP_PARAM_REGISTERS};
+ static const int kGPParamRegistersCount =
+ static_cast<int>(arraysize(kGPParamRegisters));
#else
- static const Register* kGPReturnRegisters = nullptr;
- static const int kGPReturnRegistersCount = 0;
+ static const Register* kGPParamRegisters = nullptr;
+ static const int kGPParamRegistersCount = 0;
#endif
-#ifdef FP_RETURN_REGISTERS
- static const DoubleRegister kFPReturnRegisters[] = {FP_RETURN_REGISTERS};
- static const int kFPReturnRegistersCount =
- static_cast<int>(arraysize(kFPReturnRegisters));
+#ifdef FP_PARAM_REGISTERS
+ static const DoubleRegister kFPParamRegisters[] = {FP_PARAM_REGISTERS};
+ static const int kFPParamRegistersCount =
+ static_cast<int>(arraysize(kFPParamRegisters));
#else
- static const DoubleRegister* kFPReturnRegisters = nullptr;
- static const int kFPReturnRegistersCount = 0;
+ static const DoubleRegister* kFPParamRegisters = nullptr;
+ static const int kFPParamRegistersCount = 0;
#endif
- Allocator rets(kGPReturnRegisters, kGPReturnRegistersCount,
- kFPReturnRegisters, kFPReturnRegistersCount);
+ new (allocated_ptr) Allocator(kGPParamRegisters, kGPParamRegistersCount,
+ kFPParamRegisters, kFPParamRegistersCount);
+ }
+};
- return rets;
-}
+static base::LazyInstance<Allocator, ParameterRegistersCreateTrait>::type
+ parameter_registers = LAZY_INSTANCE_INITIALIZER;
-static Allocator GetParameterRegisters() {
-#ifdef GP_PARAM_REGISTERS
- static const Register kGPParamRegisters[] = {GP_PARAM_REGISTERS};
- static const int kGPParamRegistersCount =
- static_cast<int>(arraysize(kGPParamRegisters));
+struct ReturnRegistersCreateTrait {
+ static void Construct(Allocator* allocated_ptr) {
+#ifdef GP_RETURN_REGISTERS
+ static const Register kGPReturnRegisters[] = {GP_RETURN_REGISTERS};
+ static const int kGPReturnRegistersCount =
+ static_cast<int>(arraysize(kGPReturnRegisters));
#else
- static const Register* kGPParamRegisters = nullptr;
- static const int kGPParamRegistersCount = 0;
+ static const Register* kGPReturnRegisters = nullptr;
+ static const int kGPReturnRegistersCount = 0;
#endif
-#ifdef FP_PARAM_REGISTERS
- static const DoubleRegister kFPParamRegisters[] = {FP_PARAM_REGISTERS};
- static const int kFPParamRegistersCount =
- static_cast<int>(arraysize(kFPParamRegisters));
+#ifdef FP_RETURN_REGISTERS
+ static const DoubleRegister kFPReturnRegisters[] = {FP_RETURN_REGISTERS};
+ static const int kFPReturnRegistersCount =
+ static_cast<int>(arraysize(kFPReturnRegisters));
#else
- static const DoubleRegister* kFPParamRegisters = nullptr;
- static const int kFPParamRegistersCount = 0;
+ static const DoubleRegister* kFPReturnRegisters = nullptr;
+ static const int kFPReturnRegistersCount = 0;
#endif
- Allocator params(kGPParamRegisters, kGPParamRegistersCount, kFPParamRegisters,
- kFPParamRegistersCount);
+ new (allocated_ptr) Allocator(kGPReturnRegisters, kGPReturnRegistersCount,
+ kFPReturnRegisters, kFPReturnRegistersCount);
+ }
+};
- return params;
-}
+static base::LazyInstance<Allocator, ReturnRegistersCreateTrait>::type
+ return_registers = LAZY_INSTANCE_INITIALIZER;
// General code uses the above configuration data.
CallDescriptor* ModuleEnv::GetWasmCallDescriptor(Zone* zone,
FunctionSig* fsig) {
- MachineSignature::Builder msig(zone, fsig->return_count(),
- fsig->parameter_count());
LocationSignature::Builder locations(zone, fsig->return_count(),
fsig->parameter_count());
- Allocator rets = GetReturnRegisters();
+ Allocator rets = return_registers.Get();
// Add return location(s).
const int return_count = static_cast<int>(locations.return_count_);
for (int i = 0; i < return_count; i++) {
LocalType ret = fsig->GetReturn(i);
- msig.AddReturn(MachineTypeFor(ret));
locations.AddReturn(rets.Next(ret));
}
- Allocator params = GetParameterRegisters();
+ Allocator params = parameter_registers.Get();
// Add register and/or stack parameter(s).
const int parameter_count = static_cast<int>(fsig->parameter_count());
for (int i = 0; i < parameter_count; i++) {
LocalType param = fsig->GetParam(i);
- msig.AddParam(MachineTypeFor(param));
locations.AddParam(params.Next(param));
}
@@ -292,13 +292,12 @@ CallDescriptor* ModuleEnv::GetWasmCallDescriptor(Zone* zone,
// The target for WASM calls is always a code object.
MachineType target_type = MachineType::AnyTagged();
- LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+ LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
return new (zone) CallDescriptor( // --
CallDescriptor::kCallCodeObject, // kind
target_type, // target MachineType
target_loc, // target location
- msig.Build(), // machine_sig
locations.Build(), // location_sig
params.stack_offset, // stack_parameter_count
compiler::Operator::kNoProperties, // properties
@@ -310,58 +309,52 @@ CallDescriptor* ModuleEnv::GetWasmCallDescriptor(Zone* zone,
CallDescriptor* ModuleEnv::GetI32WasmCallDescriptor(
Zone* zone, CallDescriptor* descriptor) {
- const MachineSignature* signature = descriptor->GetMachineSignature();
- size_t parameter_count = signature->parameter_count();
- size_t return_count = signature->return_count();
- for (size_t i = 0; i < signature->parameter_count(); i++) {
- if (signature->GetParam(i) == MachineType::Int64()) {
+ size_t parameter_count = descriptor->ParameterCount();
+ size_t return_count = descriptor->ReturnCount();
+ for (size_t i = 0; i < descriptor->ParameterCount(); i++) {
+ if (descriptor->GetParameterType(i) == MachineType::Int64()) {
// For each int64 input we get two int32 inputs.
parameter_count++;
}
}
- for (size_t i = 0; i < signature->return_count(); i++) {
- if (signature->GetReturn(i) == MachineType::Int64()) {
+ for (size_t i = 0; i < descriptor->ReturnCount(); i++) {
+ if (descriptor->GetReturnType(i) == MachineType::Int64()) {
// For each int64 return we get two int32 returns.
return_count++;
}
}
- if (parameter_count == signature->parameter_count() &&
- return_count == signature->return_count()) {
+ if (parameter_count == descriptor->ParameterCount() &&
+ return_count == descriptor->ReturnCount()) {
// If there is no int64 parameter or return value, we can just return the
// original descriptor.
return descriptor;
}
- MachineSignature::Builder msig(zone, return_count, parameter_count);
LocationSignature::Builder locations(zone, return_count, parameter_count);
- Allocator rets = GetReturnRegisters();
+ Allocator rets = return_registers.Get();
- for (size_t i = 0; i < signature->return_count(); i++) {
- if (signature->GetReturn(i) == MachineType::Int64()) {
+ for (size_t i = 0; i < descriptor->ReturnCount(); i++) {
+ if (descriptor->GetReturnType(i) == MachineType::Int64()) {
// For each int64 return we get two int32 returns.
- msig.AddReturn(MachineType::Int32());
- msig.AddReturn(MachineType::Int32());
locations.AddReturn(rets.Next(MachineRepresentation::kWord32));
locations.AddReturn(rets.Next(MachineRepresentation::kWord32));
} else {
- msig.AddReturn(signature->GetReturn(i));
- locations.AddReturn(rets.Next(signature->GetReturn(i).representation()));
+ locations.AddReturn(
+ rets.Next(descriptor->GetReturnType(i).representation()));
}
}
- Allocator params = GetParameterRegisters();
+ Allocator params = parameter_registers.Get();
- for (size_t i = 0; i < signature->parameter_count(); i++) {
- if (signature->GetParam(i) == MachineType::Int64()) {
+ for (size_t i = 0; i < descriptor->ParameterCount(); i++) {
+ if (descriptor->GetParameterType(i) == MachineType::Int64()) {
// For each int64 input we get two int32 inputs.
- msig.AddParam(MachineType::Int32());
- msig.AddParam(MachineType::Int32());
locations.AddParam(params.Next(MachineRepresentation::kWord32));
locations.AddParam(params.Next(MachineRepresentation::kWord32));
} else {
- msig.AddParam(signature->GetParam(i));
- locations.AddParam(params.Next(signature->GetParam(i).representation()));
+ locations.AddParam(
+ params.Next(descriptor->GetParameterType(i).representation()));
}
}
@@ -369,7 +362,6 @@ CallDescriptor* ModuleEnv::GetI32WasmCallDescriptor(
descriptor->kind(), // kind
descriptor->GetInputType(0), // target MachineType
descriptor->GetInputLocation(0), // target location
- msig.Build(), // machine_sig
locations.Build(), // location_sig
params.stack_offset, // stack_parameter_count
descriptor->properties(), // properties
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index 2e4eccb483..5e1ef6ba1a 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -18,10 +18,6 @@ namespace compiler {
#define __ masm()->
-
-#define kScratchDoubleReg xmm0
-
-
// Adds X64 specific methods for decoding operands.
class X64OperandConverter : public InstructionOperandConverter {
public:
@@ -44,11 +40,16 @@ class X64OperandConverter : public InstructionOperandConverter {
DCHECK_EQ(0, bit_cast<int64_t>(constant.ToFloat64()));
return Immediate(0);
}
+ if (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE ||
+ constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+ return Immediate(constant.ToInt32(), constant.rmode());
+ }
return Immediate(constant.ToInt32());
}
Operand ToOperand(InstructionOperand* op, int extra = 0) {
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
return SlotToOperand(AllocatedOperand::cast(op)->index(), extra);
}
@@ -163,35 +164,59 @@ class OutOfLineLoadZero final : public OutOfLineCode {
Register const result_;
};
-
-class OutOfLineLoadNaN final : public OutOfLineCode {
+class OutOfLineLoadFloat32NaN final : public OutOfLineCode {
public:
- OutOfLineLoadNaN(CodeGenerator* gen, XMMRegister result)
+ OutOfLineLoadFloat32NaN(CodeGenerator* gen, XMMRegister result)
: OutOfLineCode(gen), result_(result) {}
- void Generate() final { __ Pcmpeqd(result_, result_); }
+ void Generate() final {
+ __ Xorps(result_, result_);
+ __ Divss(result_, result_);
+ }
private:
XMMRegister const result_;
};
+class OutOfLineLoadFloat64NaN final : public OutOfLineCode {
+ public:
+ OutOfLineLoadFloat64NaN(CodeGenerator* gen, XMMRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() final {
+ __ Xorpd(result_, result_);
+ __ Divsd(result_, result_);
+ }
+
+ private:
+ XMMRegister const result_;
+};
class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
public:
OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
- XMMRegister input)
- : OutOfLineCode(gen), result_(result), input_(input) {}
+ XMMRegister input,
+ UnwindingInfoWriter* unwinding_info_writer)
+ : OutOfLineCode(gen),
+ result_(result),
+ input_(input),
+ unwinding_info_writer_(unwinding_info_writer) {}
void Generate() final {
__ subp(rsp, Immediate(kDoubleSize));
+ unwinding_info_writer_->MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ kDoubleSize);
__ Movsd(MemOperand(rsp, 0), input_);
__ SlowTruncateToI(result_, rsp, 0);
__ addp(rsp, Immediate(kDoubleSize));
+ unwinding_info_writer_->MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ -kDoubleSize);
}
private:
Register const result_;
XMMRegister const input_;
+ UnwindingInfoWriter* const unwinding_info_writer_;
};
@@ -341,31 +366,28 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
} \
} while (0)
-
#define ASSEMBLE_SSE_BINOP(asm_instr) \
do { \
- if (instr->InputAt(1)->IsDoubleRegister()) { \
+ if (instr->InputAt(1)->IsFPRegister()) { \
__ asm_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
} else { \
__ asm_instr(i.InputDoubleRegister(0), i.InputOperand(1)); \
} \
} while (0)
-
#define ASSEMBLE_SSE_UNOP(asm_instr) \
do { \
- if (instr->InputAt(0)->IsDoubleRegister()) { \
+ if (instr->InputAt(0)->IsFPRegister()) { \
__ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
} else { \
__ asm_instr(i.OutputDoubleRegister(), i.InputOperand(0)); \
} \
} while (0)
-
#define ASSEMBLE_AVX_BINOP(asm_instr) \
do { \
CpuFeatureScope avx_scope(masm(), AVX); \
- if (instr->InputAt(1)->IsDoubleRegister()) { \
+ if (instr->InputAt(1)->IsFPRegister()) { \
__ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
} else { \
@@ -374,13 +396,12 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
} \
} while (0)
-
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr) \
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, OutOfLineLoadNaN) \
do { \
auto result = i.OutputDoubleRegister(); \
auto buffer = i.InputRegister(0); \
auto index1 = i.InputRegister(1); \
- auto index2 = i.InputInt32(2); \
+ auto index2 = i.InputUint32(2); \
OutOfLineCode* ool; \
if (instr->InputAt(3)->IsRegister()) { \
auto length = i.InputRegister(3); \
@@ -388,25 +409,27 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ cmpl(index1, length); \
ool = new (zone()) OutOfLineLoadNaN(this, result); \
} else { \
- auto length = i.InputInt32(3); \
+ auto length = i.InputUint32(3); \
+ RelocInfo::Mode rmode = i.ToConstant(instr->InputAt(3)).rmode(); \
DCHECK_LE(index2, length); \
- __ cmpq(index1, Immediate(length - index2)); \
+ __ cmpl(index1, Immediate(length - index2, rmode)); \
class OutOfLineLoadFloat final : public OutOfLineCode { \
public: \
OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result, \
Register buffer, Register index1, int32_t index2, \
- int32_t length) \
+ int32_t length, RelocInfo::Mode rmode) \
: OutOfLineCode(gen), \
result_(result), \
buffer_(buffer), \
index1_(index1), \
index2_(index2), \
- length_(length) {} \
+ length_(length), \
+ rmode_(rmode) {} \
\
void Generate() final { \
__ leal(kScratchRegister, Operand(index1_, index2_)); \
__ Pcmpeqd(result_, result_); \
- __ cmpl(kScratchRegister, Immediate(length_)); \
+ __ cmpl(kScratchRegister, Immediate(length_, rmode_)); \
__ j(above_equal, exit()); \
__ asm_instr(result_, \
Operand(buffer_, kScratchRegister, times_1, 0)); \
@@ -418,22 +441,22 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const index1_; \
int32_t const index2_; \
int32_t const length_; \
+ RelocInfo::Mode rmode_; \
}; \
- ool = new (zone()) \
- OutOfLineLoadFloat(this, result, buffer, index1, index2, length); \
+ ool = new (zone()) OutOfLineLoadFloat(this, result, buffer, index1, \
+ index2, length, rmode); \
} \
__ j(above_equal, ool->entry()); \
__ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
__ bind(ool->exit()); \
} while (false)
-
#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
do { \
auto result = i.OutputRegister(); \
auto buffer = i.InputRegister(0); \
auto index1 = i.InputRegister(1); \
- auto index2 = i.InputInt32(2); \
+ auto index2 = i.InputUint32(2); \
OutOfLineCode* ool; \
if (instr->InputAt(3)->IsRegister()) { \
auto length = i.InputRegister(3); \
@@ -441,25 +464,27 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ cmpl(index1, length); \
ool = new (zone()) OutOfLineLoadZero(this, result); \
} else { \
- auto length = i.InputInt32(3); \
+ auto length = i.InputUint32(3); \
+ RelocInfo::Mode rmode = i.ToConstant(instr->InputAt(3)).rmode(); \
DCHECK_LE(index2, length); \
- __ cmpq(index1, Immediate(length - index2)); \
+ __ cmpl(index1, Immediate(length - index2, rmode)); \
class OutOfLineLoadInteger final : public OutOfLineCode { \
public: \
OutOfLineLoadInteger(CodeGenerator* gen, Register result, \
Register buffer, Register index1, int32_t index2, \
- int32_t length) \
+ int32_t length, RelocInfo::Mode rmode) \
: OutOfLineCode(gen), \
result_(result), \
buffer_(buffer), \
index1_(index1), \
index2_(index2), \
- length_(length) {} \
+ length_(length), \
+ rmode_(rmode) {} \
\
void Generate() final { \
Label oob; \
__ leal(kScratchRegister, Operand(index1_, index2_)); \
- __ cmpl(kScratchRegister, Immediate(length_)); \
+ __ cmpl(kScratchRegister, Immediate(length_, rmode_)); \
__ j(above_equal, &oob, Label::kNear); \
__ asm_instr(result_, \
Operand(buffer_, kScratchRegister, times_1, 0)); \
@@ -474,21 +499,21 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const index1_; \
int32_t const index2_; \
int32_t const length_; \
+ RelocInfo::Mode const rmode_; \
}; \
- ool = new (zone()) \
- OutOfLineLoadInteger(this, result, buffer, index1, index2, length); \
+ ool = new (zone()) OutOfLineLoadInteger(this, result, buffer, index1, \
+ index2, length, rmode); \
} \
__ j(above_equal, ool->entry()); \
__ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
__ bind(ool->exit()); \
} while (false)
-
#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
do { \
auto buffer = i.InputRegister(0); \
auto index1 = i.InputRegister(1); \
- auto index2 = i.InputInt32(2); \
+ auto index2 = i.InputUint32(2); \
auto value = i.InputDoubleRegister(4); \
if (instr->InputAt(3)->IsRegister()) { \
auto length = i.InputRegister(3); \
@@ -499,24 +524,26 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ asm_instr(Operand(buffer, index1, times_1, index2), value); \
__ bind(&done); \
} else { \
- auto length = i.InputInt32(3); \
+ auto length = i.InputUint32(3); \
+ RelocInfo::Mode rmode = i.ToConstant(instr->InputAt(3)).rmode(); \
DCHECK_LE(index2, length); \
- __ cmpq(index1, Immediate(length - index2)); \
+ __ cmpl(index1, Immediate(length - index2, rmode)); \
class OutOfLineStoreFloat final : public OutOfLineCode { \
public: \
OutOfLineStoreFloat(CodeGenerator* gen, Register buffer, \
Register index1, int32_t index2, int32_t length, \
- XMMRegister value) \
+ XMMRegister value, RelocInfo::Mode rmode) \
: OutOfLineCode(gen), \
buffer_(buffer), \
index1_(index1), \
index2_(index2), \
length_(length), \
- value_(value) {} \
+ value_(value), \
+ rmode_(rmode) {} \
\
void Generate() final { \
__ leal(kScratchRegister, Operand(index1_, index2_)); \
- __ cmpl(kScratchRegister, Immediate(length_)); \
+ __ cmpl(kScratchRegister, Immediate(length_, rmode_)); \
__ j(above_equal, exit()); \
__ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0), \
value_); \
@@ -528,21 +555,21 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
int32_t const index2_; \
int32_t const length_; \
XMMRegister const value_; \
+ RelocInfo::Mode rmode_; \
}; \
- auto ool = new (zone()) \
- OutOfLineStoreFloat(this, buffer, index1, index2, length, value); \
+ auto ool = new (zone()) OutOfLineStoreFloat( \
+ this, buffer, index1, index2, length, value, rmode); \
__ j(above_equal, ool->entry()); \
__ asm_instr(Operand(buffer, index1, times_1, index2), value); \
__ bind(ool->exit()); \
} \
} while (false)
-
#define ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Value) \
do { \
auto buffer = i.InputRegister(0); \
auto index1 = i.InputRegister(1); \
- auto index2 = i.InputInt32(2); \
+ auto index2 = i.InputUint32(2); \
if (instr->InputAt(3)->IsRegister()) { \
auto length = i.InputRegister(3); \
DCHECK_EQ(0, index2); \
@@ -552,24 +579,26 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ asm_instr(Operand(buffer, index1, times_1, index2), value); \
__ bind(&done); \
} else { \
- auto length = i.InputInt32(3); \
+ auto length = i.InputUint32(3); \
+ RelocInfo::Mode rmode = i.ToConstant(instr->InputAt(3)).rmode(); \
DCHECK_LE(index2, length); \
- __ cmpq(index1, Immediate(length - index2)); \
+ __ cmpl(index1, Immediate(length - index2, rmode)); \
class OutOfLineStoreInteger final : public OutOfLineCode { \
public: \
OutOfLineStoreInteger(CodeGenerator* gen, Register buffer, \
Register index1, int32_t index2, int32_t length, \
- Value value) \
+ Value value, RelocInfo::Mode rmode) \
: OutOfLineCode(gen), \
buffer_(buffer), \
index1_(index1), \
index2_(index2), \
length_(length), \
- value_(value) {} \
+ value_(value), \
+ rmode_(rmode) {} \
\
void Generate() final { \
__ leal(kScratchRegister, Operand(index1_, index2_)); \
- __ cmpl(kScratchRegister, Immediate(length_)); \
+ __ cmpl(kScratchRegister, Immediate(length_, rmode_)); \
__ j(above_equal, exit()); \
__ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0), \
value_); \
@@ -581,16 +610,16 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
int32_t const index2_; \
int32_t const length_; \
Value const value_; \
+ RelocInfo::Mode rmode_; \
}; \
- auto ool = new (zone()) \
- OutOfLineStoreInteger(this, buffer, index1, index2, length, value); \
+ auto ool = new (zone()) OutOfLineStoreInteger( \
+ this, buffer, index1, index2, length, value, rmode); \
__ j(above_equal, ool->entry()); \
__ asm_instr(Operand(buffer, index1, times_1, index2), value); \
__ bind(ool->exit()); \
} \
} while (false)
-
#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
do { \
if (instr->InputAt(4)->IsRegister()) { \
@@ -602,28 +631,27 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
} \
} while (false)
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ __ PrepareCallCFunction(2); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 2); \
+ } while (false)
+
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ __ PrepareCallCFunction(1); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 1); \
+ } while (false)
+
void CodeGenerator::AssembleDeconstructFrame() {
+ unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
__ movq(rsp, rbp);
__ popq(rbp);
}
-void CodeGenerator::AssembleSetupStackPointer() {}
-
-void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
- int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
- if (sp_slot_delta > 0) {
- __ addq(rsp, Immediate(sp_slot_delta * kPointerSize));
- }
- frame_access_state()->SetFrameAccessToDefault();
-}
-
-
-void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
- int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
- if (sp_slot_delta < 0) {
- __ subq(rsp, Immediate(-sp_slot_delta * kPointerSize));
- frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
- }
+void CodeGenerator::AssemblePrepareTailCall() {
if (frame_access_state()->has_frame()) {
__ movq(rbp, MemOperand(rbp, 0));
}
@@ -655,8 +683,71 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
__ bind(&done);
}
+namespace {
+
+void AdjustStackPointerForTailCall(MacroAssembler* masm,
+ FrameAccessState* state,
+ int new_slot_above_sp,
+ bool allow_shrinkage = true) {
+ int current_sp_offset = state->GetSPToFPSlotCount() +
+ StandardFrameConstants::kFixedSlotCountAboveFp;
+ int stack_slot_delta = new_slot_above_sp - current_sp_offset;
+ if (stack_slot_delta > 0) {
+ masm->subq(rsp, Immediate(stack_slot_delta * kPointerSize));
+ state->IncreaseSPDelta(stack_slot_delta);
+ } else if (allow_shrinkage && stack_slot_delta < 0) {
+ masm->addq(rsp, Immediate(-stack_slot_delta * kPointerSize));
+ state->IncreaseSPDelta(stack_slot_delta);
+ }
+}
+
+} // namespace
+
+void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
+ int first_unused_stack_slot) {
+ CodeGenerator::PushTypeFlags flags(kImmediatePush | kScalarPush);
+ ZoneVector<MoveOperands*> pushes(zone());
+ GetPushCompatibleMoves(instr, flags, &pushes);
+
+ if (!pushes.empty() &&
+ (LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
+ first_unused_stack_slot)) {
+ X64OperandConverter g(this, instr);
+ for (auto move : pushes) {
+ LocationOperand destination_location(
+ LocationOperand::cast(move->destination()));
+ InstructionOperand source(move->source());
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ destination_location.index());
+ if (source.IsStackSlot()) {
+ LocationOperand source_location(LocationOperand::cast(source));
+ __ Push(g.SlotToOperand(source_location.index()));
+ } else if (source.IsRegister()) {
+ LocationOperand source_location(LocationOperand::cast(source));
+ __ Push(source_location.GetRegister());
+ } else if (source.IsImmediate()) {
+ __ Push(Immediate(ImmediateOperand::cast(source).inline_value()));
+ } else {
+ // Pushes of non-scalar data types is not supported.
+ UNIMPLEMENTED();
+ }
+ frame_access_state()->IncreaseSPDelta(1);
+ move->Eliminate();
+ }
+ }
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ first_unused_stack_slot, false);
+}
+
+void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
+ int first_unused_stack_slot) {
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ first_unused_stack_slot);
+}
+
// Assembles an instruction after register allocation, producing machine code.
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+ Instruction* instr) {
X64OperandConverter i(this, instr);
InstructionCode opcode = instr->opcode();
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
@@ -677,8 +768,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
- int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
- AssembleDeconstructActivationRecord(stack_param_delta);
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
@@ -692,7 +781,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ addp(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(reg);
}
+ unwinding_info_writer_.MarkBlockWillExit();
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+ case kArchTailCallAddress: {
+ CHECK(!HasImmediateInput(instr, 0));
+ Register reg = i.InputRegister(0);
+ __ jmp(reg);
+ unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
break;
}
case kArchCallJSFunction: {
@@ -716,8 +816,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
__ Assert(equal, kWrongFunctionContext);
}
- int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
- AssembleDeconstructActivationRecord(stack_param_delta);
if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
@@ -725,6 +823,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
__ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
break;
}
case kArchPrepareCallCFunction: {
@@ -735,7 +834,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kArchPrepareTailCall:
- AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ AssemblePrepareTailCall();
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
@@ -759,6 +858,17 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
+ case kArchComment: {
+ Address comment_string = i.InputExternalReference(0).address();
+ __ RecordComment(reinterpret_cast<const char*>(comment_string));
+ break;
+ }
+ case kArchDebugBreak:
+ __ int3();
+ break;
+ case kArchImpossible:
+ __ Abort(kConversionFromImpossibleValue);
+ break;
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
@@ -768,7 +878,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result =
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ if (result != kSuccess) return result;
break;
}
case kArchRet:
@@ -790,11 +902,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchTruncateDoubleToI: {
auto result = i.OutputRegister();
auto input = i.InputDoubleRegister(0);
- auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
+ auto ool = new (zone()) OutOfLineTruncateDoubleToI(
+ this, result, input, &unwinding_info_writer_);
+ // We use Cvttsd2siq instead of Cvttsd2si due to performance reasons. The
+ // use of Cvttsd2siq requires the movl below to avoid sign extension.
__ Cvttsd2siq(result, input);
__ cmpq(result, Immediate(1));
__ j(overflow, ool->entry());
__ bind(ool->exit());
+ __ movl(result, result);
break;
}
case kArchStoreWithWriteBarrier: {
@@ -827,6 +943,74 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ leaq(i.OutputRegister(), Operand(base, offset.offset()));
break;
}
+ case kIeee754Float64Acos:
+ ASSEMBLE_IEEE754_UNOP(acos);
+ break;
+ case kIeee754Float64Acosh:
+ ASSEMBLE_IEEE754_UNOP(acosh);
+ break;
+ case kIeee754Float64Asin:
+ ASSEMBLE_IEEE754_UNOP(asin);
+ break;
+ case kIeee754Float64Asinh:
+ ASSEMBLE_IEEE754_UNOP(asinh);
+ break;
+ case kIeee754Float64Atan:
+ ASSEMBLE_IEEE754_UNOP(atan);
+ break;
+ case kIeee754Float64Atanh:
+ ASSEMBLE_IEEE754_UNOP(atanh);
+ break;
+ case kIeee754Float64Atan2:
+ ASSEMBLE_IEEE754_BINOP(atan2);
+ break;
+ case kIeee754Float64Cbrt:
+ ASSEMBLE_IEEE754_UNOP(cbrt);
+ break;
+ case kIeee754Float64Cos:
+ ASSEMBLE_IEEE754_UNOP(cos);
+ break;
+ case kIeee754Float64Cosh:
+ ASSEMBLE_IEEE754_UNOP(cosh);
+ break;
+ case kIeee754Float64Exp:
+ ASSEMBLE_IEEE754_UNOP(exp);
+ break;
+ case kIeee754Float64Expm1:
+ ASSEMBLE_IEEE754_UNOP(expm1);
+ break;
+ case kIeee754Float64Log:
+ ASSEMBLE_IEEE754_UNOP(log);
+ break;
+ case kIeee754Float64Log1p:
+ ASSEMBLE_IEEE754_UNOP(log1p);
+ break;
+ case kIeee754Float64Log2:
+ ASSEMBLE_IEEE754_UNOP(log2);
+ break;
+ case kIeee754Float64Log10:
+ ASSEMBLE_IEEE754_UNOP(log10);
+ break;
+ case kIeee754Float64Pow: {
+ // TODO(bmeurer): Improve integration of the stub.
+ __ Movsd(xmm2, xmm0);
+ MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+ __ CallStub(&stub);
+ __ Movsd(xmm0, xmm3);
+ break;
+ }
+ case kIeee754Float64Sin:
+ ASSEMBLE_IEEE754_UNOP(sin);
+ break;
+ case kIeee754Float64Sinh:
+ ASSEMBLE_IEEE754_UNOP(sinh);
+ break;
+ case kIeee754Float64Tan:
+ ASSEMBLE_IEEE754_UNOP(tan);
+ break;
+ case kIeee754Float64Tanh:
+ ASSEMBLE_IEEE754_UNOP(tanh);
+ break;
case kX64Add32:
ASSEMBLE_BINOP(addl);
break;
@@ -1030,12 +1214,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kSSEFloat32Sqrt:
ASSEMBLE_SSE_UNOP(sqrtss);
break;
- case kSSEFloat32Max:
- ASSEMBLE_SSE_BINOP(maxss);
- break;
- case kSSEFloat32Min:
- ASSEMBLE_SSE_BINOP(minss);
- break;
case kSSEFloat32ToFloat64:
ASSEMBLE_SSE_UNOP(Cvtss2sd);
break;
@@ -1047,14 +1225,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kSSEFloat32ToInt32:
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ Cvttss2si(i.OutputRegister(), i.InputDoubleRegister(0));
} else {
__ Cvttss2si(i.OutputRegister(), i.InputOperand(0));
}
break;
case kSSEFloat32ToUint32: {
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ Cvttss2siq(i.OutputRegister(), i.InputDoubleRegister(0));
} else {
__ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
@@ -1081,6 +1259,8 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
case kSSEFloat64Mod: {
__ subq(rsp, Immediate(kDoubleSize));
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ kDoubleSize);
// Move values to st(0) and st(1).
__ Movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
__ fld_d(Operand(rsp, 0));
@@ -1101,7 +1281,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ shrl(rax, Immediate(8));
__ andl(rax, Immediate(0xFF));
__ pushq(rax);
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ kPointerSize);
__ popfq();
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ -kPointerSize);
}
__ j(parity_even, &mod_loop);
// Move output to stack and clean up.
@@ -1109,14 +1293,120 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ fstp_d(Operand(rsp, 0));
__ Movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
__ addq(rsp, Immediate(kDoubleSize));
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ -kDoubleSize);
+ break;
+ }
+ case kSSEFloat32Max: {
+ Label compare_nan, compare_swap, done_compare;
+ if (instr->InputAt(1)->IsFPRegister()) {
+ __ Ucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ } else {
+ __ Ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
+ }
+ auto ool =
+ new (zone()) OutOfLineLoadFloat32NaN(this, i.OutputDoubleRegister());
+ __ j(parity_even, ool->entry());
+ __ j(above, &done_compare, Label::kNear);
+ __ j(below, &compare_swap, Label::kNear);
+ __ Movmskps(kScratchRegister, i.InputDoubleRegister(0));
+ __ testl(kScratchRegister, Immediate(1));
+ __ j(zero, &done_compare, Label::kNear);
+ __ bind(&compare_swap);
+ if (instr->InputAt(1)->IsFPRegister()) {
+ __ Movss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ } else {
+ __ Movss(i.InputDoubleRegister(0), i.InputOperand(1));
+ }
+ __ bind(&done_compare);
+ __ bind(ool->exit());
break;
}
- case kSSEFloat64Max:
- ASSEMBLE_SSE_BINOP(maxsd);
+ case kSSEFloat32Min: {
+ Label compare_swap, done_compare;
+ if (instr->InputAt(1)->IsFPRegister()) {
+ __ Ucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ } else {
+ __ Ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
+ }
+ auto ool =
+ new (zone()) OutOfLineLoadFloat32NaN(this, i.OutputDoubleRegister());
+ __ j(parity_even, ool->entry());
+ __ j(below, &done_compare, Label::kNear);
+ __ j(above, &compare_swap, Label::kNear);
+ if (instr->InputAt(1)->IsFPRegister()) {
+ __ Movmskps(kScratchRegister, i.InputDoubleRegister(1));
+ } else {
+ __ Movss(kScratchDoubleReg, i.InputOperand(1));
+ __ Movmskps(kScratchRegister, kScratchDoubleReg);
+ }
+ __ testl(kScratchRegister, Immediate(1));
+ __ j(zero, &done_compare, Label::kNear);
+ __ bind(&compare_swap);
+ if (instr->InputAt(1)->IsFPRegister()) {
+ __ Movss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ } else {
+ __ Movss(i.InputDoubleRegister(0), i.InputOperand(1));
+ }
+ __ bind(&done_compare);
+ __ bind(ool->exit());
break;
- case kSSEFloat64Min:
- ASSEMBLE_SSE_BINOP(minsd);
+ }
+ case kSSEFloat64Max: {
+ Label compare_nan, compare_swap, done_compare;
+ if (instr->InputAt(1)->IsFPRegister()) {
+ __ Ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ } else {
+ __ Ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
+ }
+ auto ool =
+ new (zone()) OutOfLineLoadFloat64NaN(this, i.OutputDoubleRegister());
+ __ j(parity_even, ool->entry());
+ __ j(above, &done_compare, Label::kNear);
+ __ j(below, &compare_swap, Label::kNear);
+ __ Movmskpd(kScratchRegister, i.InputDoubleRegister(0));
+ __ testl(kScratchRegister, Immediate(1));
+ __ j(zero, &done_compare, Label::kNear);
+ __ bind(&compare_swap);
+ if (instr->InputAt(1)->IsFPRegister()) {
+ __ Movsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ } else {
+ __ Movsd(i.InputDoubleRegister(0), i.InputOperand(1));
+ }
+ __ bind(&done_compare);
+ __ bind(ool->exit());
break;
+ }
+ case kSSEFloat64Min: {
+ Label compare_swap, done_compare;
+ if (instr->InputAt(1)->IsFPRegister()) {
+ __ Ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ } else {
+ __ Ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
+ }
+ auto ool =
+ new (zone()) OutOfLineLoadFloat64NaN(this, i.OutputDoubleRegister());
+ __ j(parity_even, ool->entry());
+ __ j(below, &done_compare, Label::kNear);
+ __ j(above, &compare_swap, Label::kNear);
+ if (instr->InputAt(1)->IsFPRegister()) {
+ __ Movmskpd(kScratchRegister, i.InputDoubleRegister(1));
+ } else {
+ __ Movsd(kScratchDoubleReg, i.InputOperand(1));
+ __ Movmskpd(kScratchRegister, kScratchDoubleReg);
+ }
+ __ testl(kScratchRegister, Immediate(1));
+ __ j(zero, &done_compare, Label::kNear);
+ __ bind(&compare_swap);
+ if (instr->InputAt(1)->IsFPRegister()) {
+ __ Movsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ } else {
+ __ Movsd(i.InputDoubleRegister(0), i.InputOperand(1));
+ }
+ __ bind(&done_compare);
+ __ bind(ool->exit());
+ break;
+ }
case kSSEFloat64Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
@@ -1145,14 +1435,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_SSE_UNOP(Cvtsd2ss);
break;
case kSSEFloat64ToInt32:
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ Cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0));
} else {
__ Cvttsd2si(i.OutputRegister(), i.InputOperand(0));
}
break;
case kSSEFloat64ToUint32: {
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ Cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
} else {
__ Cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
@@ -1163,7 +1453,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kSSEFloat32ToInt64:
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ Cvttss2siq(i.OutputRegister(), i.InputDoubleRegister(0));
} else {
__ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
@@ -1173,7 +1463,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Label done;
Label fail;
__ Move(kScratchDoubleReg, static_cast<float>(INT64_MIN));
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ Ucomiss(kScratchDoubleReg, i.InputDoubleRegister(0));
} else {
__ Ucomiss(kScratchDoubleReg, i.InputOperand(0));
@@ -1192,7 +1482,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
case kSSEFloat64ToInt64:
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ Cvttsd2siq(i.OutputRegister(0), i.InputDoubleRegister(0));
} else {
__ Cvttsd2siq(i.OutputRegister(0), i.InputOperand(0));
@@ -1202,7 +1492,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Label done;
Label fail;
__ Move(kScratchDoubleReg, static_cast<double>(INT64_MIN));
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ Ucomisd(kScratchDoubleReg, i.InputDoubleRegister(0));
} else {
__ Ucomisd(kScratchDoubleReg, i.InputOperand(0));
@@ -1228,7 +1518,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
// There does not exist a Float32ToUint64 instruction, so we have to use
// the Float32ToInt64 instruction.
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ Cvttss2siq(i.OutputRegister(), i.InputDoubleRegister(0));
} else {
__ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
@@ -1241,7 +1531,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
// input value was not within the positive int64 range. We subtract 2^64
// and convert it again to see if it is within the uint64 range.
__ Move(kScratchDoubleReg, -9223372036854775808.0f);
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ addss(kScratchDoubleReg, i.InputDoubleRegister(0));
} else {
__ addss(kScratchDoubleReg, i.InputOperand(0));
@@ -1271,7 +1561,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
// There does not exist a Float64ToUint64 instruction, so we have to use
// the Float64ToInt64 instruction.
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ Cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0));
} else {
__ Cvttsd2siq(i.OutputRegister(), i.InputOperand(0));
@@ -1284,7 +1574,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
// input value was not within the positive int64 range. We subtract 2^64
// and convert it again to see if it is within the uint64 range.
__ Move(kScratchDoubleReg, -9223372036854775808.0);
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ addsd(kScratchDoubleReg, i.InputDoubleRegister(0));
} else {
__ addsd(kScratchDoubleReg, i.InputOperand(0));
@@ -1369,14 +1659,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Cvtqsi2ss(i.OutputDoubleRegister(), kScratchRegister);
break;
case kSSEFloat64ExtractLowWord32:
- if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ if (instr->InputAt(0)->IsFPStackSlot()) {
__ movl(i.OutputRegister(), i.InputOperand(0));
} else {
__ Movd(i.OutputRegister(), i.InputDoubleRegister(0));
}
break;
case kSSEFloat64ExtractHighWord32:
- if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ if (instr->InputAt(0)->IsFPStackSlot()) {
__ movl(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
} else {
__ Pextrd(i.OutputRegister(), i.InputDoubleRegister(0), 1);
@@ -1405,7 +1695,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
case kAVXFloat32Cmp: {
CpuFeatureScope avx_scope(masm(), AVX);
- if (instr->InputAt(1)->IsDoubleRegister()) {
+ if (instr->InputAt(1)->IsFPRegister()) {
__ vucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
__ vucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
@@ -1427,15 +1717,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
// when there is a (v)mulss depending on the result.
__ Movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
- case kAVXFloat32Max:
- ASSEMBLE_AVX_BINOP(vmaxss);
- break;
- case kAVXFloat32Min:
- ASSEMBLE_AVX_BINOP(vminss);
- break;
case kAVXFloat64Cmp: {
CpuFeatureScope avx_scope(masm(), AVX);
- if (instr->InputAt(1)->IsDoubleRegister()) {
+ if (instr->InputAt(1)->IsFPRegister()) {
__ vucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
__ vucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
@@ -1457,18 +1741,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
// when there is a (v)mulsd depending on the result.
__ Movapd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
- case kAVXFloat64Max:
- ASSEMBLE_AVX_BINOP(vmaxsd);
- break;
- case kAVXFloat64Min:
- ASSEMBLE_AVX_BINOP(vminsd);
- break;
case kAVXFloat32Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
CpuFeatureScope avx_scope(masm(), AVX);
__ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ vpsrlq(kScratchDoubleReg, kScratchDoubleReg, 33);
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
i.InputDoubleRegister(0));
} else {
@@ -1482,7 +1760,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
CpuFeatureScope avx_scope(masm(), AVX);
__ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ vpsllq(kScratchDoubleReg, kScratchDoubleReg, 31);
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
i.InputDoubleRegister(0));
} else {
@@ -1496,7 +1774,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
CpuFeatureScope avx_scope(masm(), AVX);
__ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ vpsrlq(kScratchDoubleReg, kScratchDoubleReg, 1);
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
i.InputDoubleRegister(0));
} else {
@@ -1510,7 +1788,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
CpuFeatureScope avx_scope(masm(), AVX);
__ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ vpsllq(kScratchDoubleReg, kScratchDoubleReg, 63);
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
i.InputDoubleRegister(0));
} else {
@@ -1519,6 +1797,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
}
+ case kSSEFloat64SilenceNaN:
+ __ Xorpd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Subsd(i.InputDoubleRegister(0), kScratchDoubleReg);
+ break;
case kX64Movsxbl:
ASSEMBLE_MOVX(movsxbl);
__ AssertZeroExtended(i.OutputRegister());
@@ -1527,6 +1809,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_MOVX(movzxbl);
__ AssertZeroExtended(i.OutputRegister());
break;
+ case kX64Movsxbq:
+ ASSEMBLE_MOVX(movsxbq);
+ break;
+ case kX64Movzxbq:
+ ASSEMBLE_MOVX(movzxbq);
+ __ AssertZeroExtended(i.OutputRegister());
+ break;
case kX64Movb: {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
@@ -1545,6 +1834,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_MOVX(movzxwl);
__ AssertZeroExtended(i.OutputRegister());
break;
+ case kX64Movsxwq:
+ ASSEMBLE_MOVX(movsxwq);
+ break;
+ case kX64Movzxwq:
+ ASSEMBLE_MOVX(movzxwq);
+ __ AssertZeroExtended(i.OutputRegister());
+ break;
case kX64Movw: {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
@@ -1612,14 +1908,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
case kX64BitcastFI:
- if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ if (instr->InputAt(0)->IsFPStackSlot()) {
__ movl(i.OutputRegister(), i.InputOperand(0));
} else {
__ Movd(i.OutputRegister(), i.InputDoubleRegister(0));
}
break;
case kX64BitcastDL:
- if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ if (instr->InputAt(0)->IsFPStackSlot()) {
__ movq(i.OutputRegister(), i.InputOperand(0));
} else {
__ Movq(i.OutputRegister(), i.InputDoubleRegister(0));
@@ -1686,18 +1982,26 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
if (HasImmediateInput(instr, 0)) {
__ pushq(i.InputImmediate(0));
frame_access_state()->IncreaseSPDelta(1);
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ kPointerSize);
} else {
if (instr->InputAt(0)->IsRegister()) {
__ pushq(i.InputRegister(0));
frame_access_state()->IncreaseSPDelta(1);
- } else if (instr->InputAt(0)->IsDoubleRegister()) {
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ kPointerSize);
+ } else if (instr->InputAt(0)->IsFPRegister()) {
// TODO(titzer): use another machine instruction?
__ subq(rsp, Immediate(kDoubleSize));
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ kDoubleSize);
__ Movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
} else {
__ pushq(i.InputOperand(0));
frame_access_state()->IncreaseSPDelta(1);
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ kPointerSize);
}
}
break;
@@ -1710,6 +2014,24 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
}
+ case kX64Xchgb: {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ xchgb(i.InputRegister(index), operand);
+ break;
+ }
+ case kX64Xchgw: {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ xchgw(i.InputRegister(index), operand);
+ break;
+ }
+ case kX64Xchgl: {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ xchgl(i.InputRegister(index), operand);
+ break;
+ }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
break;
@@ -1729,10 +2051,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_CHECKED_LOAD_INTEGER(movq);
break;
case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(Movss);
+ ASSEMBLE_CHECKED_LOAD_FLOAT(Movss, OutOfLineLoadFloat32NaN);
break;
case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(Movsd);
+ ASSEMBLE_CHECKED_LOAD_FLOAT(Movsd, OutOfLineLoadFloat64NaN);
break;
case kCheckedStoreWord8:
ASSEMBLE_CHECKED_STORE_INTEGER(movb);
@@ -1755,7 +2077,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kX64StackCheck:
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
break;
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
+ case kAtomicStoreWord8:
+ case kAtomicStoreWord16:
+ case kAtomicStoreWord32:
+ UNREACHABLE(); // Won't be generated by instruction selector.
+ break;
}
+ return kSuccess;
} // NOLINT(readability/fn_size)
@@ -1918,12 +2251,16 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
__ jmp(Operand(kScratchRegister, input, times_8, 0));
}
-
-void CodeGenerator::AssembleDeoptimizerCall(
+CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
+ if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
+ DeoptimizeReason deoptimization_reason =
+ GetDeoptimizationReason(deoptimization_id);
+ __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ return kSuccess;
}
@@ -1933,10 +2270,35 @@ static const int kQuadWordSize = 16;
} // namespace
+void CodeGenerator::FinishFrame(Frame* frame) {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
-void CodeGenerator::AssemblePrologue() {
+ const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
+ if (saves_fp != 0) {
+ frame->AlignSavedCalleeRegisterSlots();
+ if (saves_fp != 0) { // Save callee-saved XMM registers.
+ const uint32_t saves_fp_count = base::bits::CountPopulation32(saves_fp);
+ frame->AllocateSavedCalleeRegisterSlots(saves_fp_count *
+ (kQuadWordSize / kPointerSize));
+ }
+ }
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) { // Save callee-saved registers.
+ int count = 0;
+ for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+ if (((1 << i) & saves)) {
+ ++count;
+ }
+ }
+ frame->AllocateSavedCalleeRegisterSlots(count);
+ }
+}
+
+void CodeGenerator::AssembleConstructFrame() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
+ int pc_base = __ pc_offset();
+
if (descriptor->IsCFunctionCall()) {
__ pushq(rbp);
__ movq(rbp, rsp);
@@ -1945,8 +2307,13 @@ void CodeGenerator::AssemblePrologue() {
} else {
__ StubPrologue(info()->GetOutputStackFrameType());
}
+
+ if (!descriptor->IsJSFunctionCall() || !info()->GeneratePreagedPrologue()) {
+ unwinding_info_writer_.MarkFrameConstructed(pc_base);
+ }
}
- int stack_shrink_slots = frame()->GetSpillSlotCount();
+ int shrink_slots = frame()->GetSpillSlotCount();
+
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -1957,16 +2324,12 @@ void CodeGenerator::AssemblePrologue() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- stack_shrink_slots -=
- static_cast<int>(OsrHelper(info()).UnoptimizedFrameSlots());
+ shrink_slots -= static_cast<int>(OsrHelper(info()).UnoptimizedFrameSlots());
}
const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
- if (saves_fp != 0) {
- stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
- }
- if (stack_shrink_slots > 0) {
- __ subq(rsp, Immediate(stack_shrink_slots * kPointerSize));
+ if (shrink_slots > 0) {
+ __ subq(rsp, Immediate(shrink_slots * kPointerSize));
}
if (saves_fp != 0) { // Save callee-saved XMM registers.
@@ -1982,8 +2345,6 @@ void CodeGenerator::AssemblePrologue() {
XMMRegister::from_code(i));
slot_idx++;
}
- frame()->AllocateSavedCalleeRegisterSlots(saves_fp_count *
- (kQuadWordSize / kPointerSize));
}
const RegList saves = descriptor->CalleeSavedRegisters();
@@ -1991,7 +2352,6 @@ void CodeGenerator::AssemblePrologue() {
for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
if (!((1 << i) & saves)) continue;
__ pushq(Register::from_code(i));
- frame()->AllocateSavedCalleeRegisterSlots(1);
}
}
}
@@ -2024,6 +2384,8 @@ void CodeGenerator::AssembleReturn() {
__ addp(rsp, Immediate(stack_size));
}
+ unwinding_info_writer_.MarkBlockWillExit();
+
if (descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
@@ -2077,12 +2439,29 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
Register dst = destination->IsRegister() ? g.ToRegister(destination)
: kScratchRegister;
switch (src.type()) {
- case Constant::kInt32:
- // TODO(dcarney): don't need scratch in this case.
- __ Set(dst, src.ToInt32());
+ case Constant::kInt32: {
+ if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+ __ movq(dst, src.ToInt64(), src.rmode());
+ } else {
+ // TODO(dcarney): don't need scratch in this case.
+ int32_t value = src.ToInt32();
+ if (value == 0) {
+ __ xorl(dst, dst);
+ } else {
+ __ movl(dst, Immediate(value));
+ }
+ }
break;
+ }
case Constant::kInt64:
- __ Set(dst, src.ToInt64());
+ if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+ __ movq(dst, src.ToInt64(), src.rmode());
+ } else {
+ DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ __ Set(dst, src.ToInt64());
+ }
break;
case Constant::kFloat32:
__ Move(dst,
@@ -2118,45 +2497,60 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else if (src.type() == Constant::kFloat32) {
// TODO(turbofan): Can we do better here?
uint32_t src_const = bit_cast<uint32_t>(src.ToFloat32());
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
__ Move(g.ToDoubleRegister(destination), src_const);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
Operand dst = g.ToOperand(destination);
__ movl(dst, Immediate(src_const));
}
} else {
DCHECK_EQ(Constant::kFloat64, src.type());
uint64_t src_const = bit_cast<uint64_t>(src.ToFloat64());
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
__ Move(g.ToDoubleRegister(destination), src_const);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
__ movq(kScratchRegister, src_const);
__ movq(g.ToOperand(destination), kScratchRegister);
}
}
- } else if (source->IsDoubleRegister()) {
+ } else if (source->IsFPRegister()) {
XMMRegister src = g.ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
__ Movapd(dst, src);
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
Operand dst = g.ToOperand(destination);
- __ Movsd(dst, src);
+ MachineRepresentation rep =
+ LocationOperand::cast(source)->representation();
+ if (rep != MachineRepresentation::kSimd128) {
+ __ Movsd(dst, src);
+ } else {
+ __ Movups(dst, src);
+ }
}
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
Operand src = g.ToOperand(source);
- if (destination->IsDoubleRegister()) {
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (destination->IsFPRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
- __ Movsd(dst, src);
+ if (rep != MachineRepresentation::kSimd128) {
+ __ Movsd(dst, src);
+ } else {
+ __ Movups(dst, src);
+ }
} else {
- // We rely on having xmm0 available as a fixed scratch register.
Operand dst = g.ToOperand(destination);
- __ Movsd(xmm0, src);
- __ Movsd(dst, xmm0);
+ if (rep != MachineRepresentation::kSimd128) {
+ __ Movsd(kScratchDoubleReg, src);
+ __ Movsd(dst, kScratchDoubleReg);
+ } else {
+ __ Movups(kScratchDoubleReg, src);
+ __ Movups(dst, kScratchDoubleReg);
+ }
}
} else {
UNREACHABLE();
@@ -2180,42 +2574,66 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
Register src = g.ToRegister(source);
__ pushq(src);
frame_access_state()->IncreaseSPDelta(1);
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ kPointerSize);
Operand dst = g.ToOperand(destination);
__ movq(src, dst);
frame_access_state()->IncreaseSPDelta(-1);
dst = g.ToOperand(destination);
__ popq(dst);
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ -kPointerSize);
} else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
- (source->IsDoubleStackSlot() &&
- destination->IsDoubleStackSlot())) {
+ (source->IsFPStackSlot() && destination->IsFPStackSlot())) {
// Memory-memory.
- Register tmp = kScratchRegister;
Operand src = g.ToOperand(source);
Operand dst = g.ToOperand(destination);
- __ movq(tmp, dst);
- __ pushq(src);
- frame_access_state()->IncreaseSPDelta(1);
- src = g.ToOperand(source);
- __ movq(src, tmp);
- frame_access_state()->IncreaseSPDelta(-1);
- dst = g.ToOperand(destination);
- __ popq(dst);
- } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
- // XMM register-register swap. We rely on having xmm0
- // available as a fixed scratch register.
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (rep != MachineRepresentation::kSimd128) {
+ Register tmp = kScratchRegister;
+ __ movq(tmp, dst);
+ __ pushq(src);
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ kPointerSize);
+ frame_access_state()->IncreaseSPDelta(1);
+ src = g.ToOperand(source);
+ __ movq(src, tmp);
+ frame_access_state()->IncreaseSPDelta(-1);
+ dst = g.ToOperand(destination);
+ __ popq(dst);
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ -kPointerSize);
+ } else {
+ // Use the XOR trick to swap without a temporary.
+ __ Movups(kScratchDoubleReg, src);
+ __ Xorps(kScratchDoubleReg, dst); // scratch contains src ^ dst.
+ __ Movups(src, kScratchDoubleReg);
+ __ Xorps(kScratchDoubleReg, dst); // scratch contains src.
+ __ Movups(dst, kScratchDoubleReg);
+ __ Xorps(kScratchDoubleReg, src); // scratch contains dst.
+ __ Movups(src, kScratchDoubleReg);
+ }
+ } else if (source->IsFPRegister() && destination->IsFPRegister()) {
+ // XMM register-register swap.
XMMRegister src = g.ToDoubleRegister(source);
XMMRegister dst = g.ToDoubleRegister(destination);
- __ Movapd(xmm0, src);
+ __ Movapd(kScratchDoubleReg, src);
__ Movapd(src, dst);
- __ Movapd(dst, xmm0);
- } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
- // XMM register-memory swap. We rely on having xmm0
- // available as a fixed scratch register.
+ __ Movapd(dst, kScratchDoubleReg);
+ } else if (source->IsFPRegister() && destination->IsFPStackSlot()) {
+ // XMM register-memory swap.
XMMRegister src = g.ToDoubleRegister(source);
Operand dst = g.ToOperand(destination);
- __ Movsd(xmm0, src);
- __ Movsd(src, dst);
- __ Movsd(dst, xmm0);
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (rep != MachineRepresentation::kSimd128) {
+ __ Movsd(kScratchDoubleReg, src);
+ __ Movsd(src, dst);
+ __ Movsd(dst, kScratchDoubleReg);
+ } else {
+ __ Movups(kScratchDoubleReg, src);
+ __ Movups(src, dst);
+ __ Movups(dst, kScratchDoubleReg);
+ }
} else {
// No other combinations are possible.
UNREACHABLE();
@@ -2230,9 +2648,6 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
}
-void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
-
-
void CodeGenerator::EnsureSpaceForLazyDeopt() {
if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
return;
@@ -2241,7 +2656,7 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
int space_needed = Deoptimizer::patch_size();
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
- int current_pc = masm()->pc_offset();
+ int current_pc = __ pc_offset();
if (current_pc < last_lazy_deopt_pc_ + space_needed) {
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
__ Nop(padding_size);
diff --git a/deps/v8/src/compiler/x64/instruction-codes-x64.h b/deps/v8/src/compiler/x64/instruction-codes-x64.h
index bd19386d6a..7ab1097428 100644
--- a/deps/v8/src/compiler/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/x64/instruction-codes-x64.h
@@ -64,8 +64,6 @@ namespace compiler {
V(SSEFloat32Abs) \
V(SSEFloat32Neg) \
V(SSEFloat32Sqrt) \
- V(SSEFloat32Max) \
- V(SSEFloat32Min) \
V(SSEFloat32ToFloat64) \
V(SSEFloat32ToInt32) \
V(SSEFloat32ToUint32) \
@@ -80,7 +78,9 @@ namespace compiler {
V(SSEFloat64Neg) \
V(SSEFloat64Sqrt) \
V(SSEFloat64Round) \
+ V(SSEFloat32Max) \
V(SSEFloat64Max) \
+ V(SSEFloat32Min) \
V(SSEFloat64Min) \
V(SSEFloat64ToFloat32) \
V(SSEFloat64ToInt32) \
@@ -102,29 +102,30 @@ namespace compiler {
V(SSEFloat64InsertLowWord32) \
V(SSEFloat64InsertHighWord32) \
V(SSEFloat64LoadLowWord32) \
+ V(SSEFloat64SilenceNaN) \
V(AVXFloat32Cmp) \
V(AVXFloat32Add) \
V(AVXFloat32Sub) \
V(AVXFloat32Mul) \
V(AVXFloat32Div) \
- V(AVXFloat32Max) \
- V(AVXFloat32Min) \
V(AVXFloat64Cmp) \
V(AVXFloat64Add) \
V(AVXFloat64Sub) \
V(AVXFloat64Mul) \
V(AVXFloat64Div) \
- V(AVXFloat64Max) \
- V(AVXFloat64Min) \
V(AVXFloat64Abs) \
V(AVXFloat64Neg) \
V(AVXFloat32Abs) \
V(AVXFloat32Neg) \
V(X64Movsxbl) \
V(X64Movzxbl) \
+ V(X64Movsxbq) \
+ V(X64Movzxbq) \
V(X64Movb) \
V(X64Movsxwl) \
V(X64Movzxwl) \
+ V(X64Movsxwq) \
+ V(X64Movzxwq) \
V(X64Movw) \
V(X64Movl) \
V(X64Movsxlq) \
@@ -141,7 +142,10 @@ namespace compiler {
V(X64Inc32) \
V(X64Push) \
V(X64Poke) \
- V(X64StackCheck)
+ V(X64StackCheck) \
+ V(X64Xchgb) \
+ V(X64Xchgw) \
+ V(X64Xchgl)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
index 3c31965d72..fb4b74914d 100644
--- a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
@@ -67,8 +67,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kSSEFloat32Neg:
case kSSEFloat32Sqrt:
case kSSEFloat32Round:
- case kSSEFloat32Max:
- case kSSEFloat32Min:
case kSSEFloat32ToFloat64:
case kSSEFloat64Cmp:
case kSSEFloat64Add:
@@ -80,7 +78,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kSSEFloat64Neg:
case kSSEFloat64Sqrt:
case kSSEFloat64Round:
+ case kSSEFloat32Max:
case kSSEFloat64Max:
+ case kSSEFloat32Min:
case kSSEFloat64Min:
case kSSEFloat64ToFloat32:
case kSSEFloat32ToInt32:
@@ -104,20 +104,17 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kSSEFloat64InsertLowWord32:
case kSSEFloat64InsertHighWord32:
case kSSEFloat64LoadLowWord32:
+ case kSSEFloat64SilenceNaN:
case kAVXFloat32Cmp:
case kAVXFloat32Add:
case kAVXFloat32Sub:
case kAVXFloat32Mul:
case kAVXFloat32Div:
- case kAVXFloat32Max:
- case kAVXFloat32Min:
case kAVXFloat64Cmp:
case kAVXFloat64Add:
case kAVXFloat64Sub:
case kAVXFloat64Mul:
case kAVXFloat64Div:
- case kAVXFloat64Max:
- case kAVXFloat64Min:
case kAVXFloat64Abs:
case kAVXFloat64Neg:
case kAVXFloat32Abs:
@@ -136,8 +133,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Movsxbl:
case kX64Movzxbl:
+ case kX64Movsxbq:
+ case kX64Movzxbq:
case kX64Movsxwl:
case kX64Movzxwl:
+ case kX64Movsxwq:
+ case kX64Movzxwq:
case kX64Movsxlq:
DCHECK(instr->InputCount() >= 1);
return instr->InputAt(0)->IsRegister() ? kNoOpcodeFlags
@@ -168,6 +169,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Poke:
return kHasSideEffect;
+ case kX64Xchgb:
+ case kX64Xchgw:
+ case kX64Xchgl:
+ return kIsLoadOperation | kHasSideEffect;
+
#define CASE(Name) case k##Name:
COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index f46ff5946d..798d438e25 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -22,6 +22,7 @@ class X64OperandGenerator final : public OperandGenerator {
bool CanBeImmediate(Node* node) {
switch (node->opcode()) {
case IrOpcode::kInt32Constant:
+ case IrOpcode::kRelocatableInt32Constant:
return true;
case IrOpcode::kInt64Constant: {
const int64_t value = OpParameter<int64_t>(node);
@@ -36,6 +37,15 @@ class X64OperandGenerator final : public OperandGenerator {
}
}
+ int32_t GetImmediateIntegerValue(Node* node) {
+ DCHECK(CanBeImmediate(node));
+ if (node->opcode() == IrOpcode::kInt32Constant) {
+ return OpParameter<int32_t>(node);
+ }
+ DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode());
+ return static_cast<int32_t>(OpParameter<int64_t>(node));
+ }
+
bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input,
int effect_level) {
if (input->opcode() != IrOpcode::kLoad ||
@@ -69,6 +79,7 @@ class X64OperandGenerator final : public OperandGenerator {
AddressingMode GenerateMemoryOperandInputs(Node* index, int scale_exponent,
Node* base, Node* displacement,
+ DisplacementMode displacement_mode,
InstructionOperand inputs[],
size_t* input_count) {
AddressingMode mode = kMode_MRI;
@@ -78,7 +89,9 @@ class X64OperandGenerator final : public OperandGenerator {
DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
inputs[(*input_count)++] = UseRegister(index);
if (displacement != nullptr) {
- inputs[(*input_count)++] = UseImmediate(displacement);
+ inputs[(*input_count)++] = displacement_mode
+ ? UseNegatedImmediate(displacement)
+ : UseImmediate(displacement);
static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
kMode_MR4I, kMode_MR8I};
mode = kMRnI_modes[scale_exponent];
@@ -91,7 +104,9 @@ class X64OperandGenerator final : public OperandGenerator {
if (displacement == nullptr) {
mode = kMode_MR;
} else {
- inputs[(*input_count)++] = UseImmediate(displacement);
+ inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
+ ? UseNegatedImmediate(displacement)
+ : UseImmediate(displacement);
mode = kMode_MRI;
}
}
@@ -100,7 +115,9 @@ class X64OperandGenerator final : public OperandGenerator {
DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
inputs[(*input_count)++] = UseRegister(index);
if (displacement != nullptr) {
- inputs[(*input_count)++] = UseImmediate(displacement);
+ inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
+ ? UseNegatedImmediate(displacement)
+ : UseImmediate(displacement);
static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
kMode_M4I, kMode_M8I};
mode = kMnI_modes[scale_exponent];
@@ -120,11 +137,12 @@ class X64OperandGenerator final : public OperandGenerator {
AddressingMode GetEffectiveAddressMemoryOperand(Node* operand,
InstructionOperand inputs[],
size_t* input_count) {
- BaseWithIndexAndDisplacement64Matcher m(operand, true);
+ BaseWithIndexAndDisplacement64Matcher m(operand, AddressOption::kAllowAll);
DCHECK(m.matches());
if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
- return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
- m.displacement(), inputs, input_count);
+ return GenerateMemoryOperandInputs(
+ m.index(), m.scale(), m.base(), m.displacement(),
+ m.displacement_mode(), inputs, input_count);
} else {
inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
inputs[(*input_count)++] = UseRegister(operand->InputAt(1));
@@ -160,6 +178,8 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord32:
opcode = kX64Movl;
break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
opcode = kX64Movq;
@@ -245,6 +265,8 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord32:
opcode = kX64Movl;
break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
opcode = kX64Movq;
@@ -268,6 +290,11 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
+// Architecture supports unaligned access, therefore VisitLoad is used instead
+void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
+
+// Architecture supports unaligned access, therefore VisitStore is used instead
+void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitCheckedLoad(Node* node) {
CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
@@ -297,6 +324,8 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
@@ -350,6 +379,8 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
@@ -431,7 +462,7 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->frame_state());
+ cont->reason(), cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -541,16 +572,16 @@ void VisitWord64Shift(InstructionSelector* selector, Node* node,
}
}
-
void EmitLea(InstructionSelector* selector, InstructionCode opcode,
Node* result, Node* index, int scale, Node* base,
- Node* displacement) {
+ Node* displacement, DisplacementMode displacement_mode) {
X64OperandGenerator g(selector);
InstructionOperand inputs[4];
size_t input_count = 0;
- AddressingMode mode = g.GenerateMemoryOperandInputs(
- index, scale, base, displacement, inputs, &input_count);
+ AddressingMode mode =
+ g.GenerateMemoryOperandInputs(index, scale, base, displacement,
+ displacement_mode, inputs, &input_count);
DCHECK_NE(0u, input_count);
DCHECK_GE(arraysize(inputs), input_count);
@@ -571,7 +602,8 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
if (m.matches()) {
Node* index = node->InputAt(0);
Node* base = m.power_of_two_plus_one() ? index : nullptr;
- EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr);
+ EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr,
+ kPositiveDisplacement);
return;
}
VisitWord32Shift(this, node, kX64Shl32);
@@ -580,15 +612,25 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
void InstructionSelector::VisitWord64Shl(Node* node) {
X64OperandGenerator g(this);
- Int64BinopMatcher m(node);
- if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
- m.right().IsInRange(32, 63)) {
- // There's no need to sign/zero-extend to 64-bit if we shift out the upper
- // 32 bits anyway.
- Emit(kX64Shl, g.DefineSameAsFirst(node),
- g.UseRegister(m.left().node()->InputAt(0)),
- g.UseImmediate(m.right().node()));
+ Int64ScaleMatcher m(node, true);
+ if (m.matches()) {
+ Node* index = node->InputAt(0);
+ Node* base = m.power_of_two_plus_one() ? index : nullptr;
+ EmitLea(this, kX64Lea, node, index, m.scale(), base, nullptr,
+ kPositiveDisplacement);
return;
+ } else {
+ Int64BinopMatcher m(node);
+ if ((m.left().IsChangeInt32ToInt64() ||
+ m.left().IsChangeUint32ToUint64()) &&
+ m.right().IsInRange(32, 63)) {
+ // There's no need to sign/zero-extend to 64-bit if we shift out the upper
+ // 32 bits anyway.
+ Emit(kX64Shl, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()->InputAt(0)),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
}
VisitWord64Shift(this, node, kX64Shl);
}
@@ -598,37 +640,19 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
VisitWord32Shift(this, node, kX64Shr32);
}
-
-void InstructionSelector::VisitWord64Shr(Node* node) {
- VisitWord64Shift(this, node, kX64Shr);
-}
-
-
-void InstructionSelector::VisitWord32Sar(Node* node) {
- X64OperandGenerator g(this);
- Int32BinopMatcher m(node);
- if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
- Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().Is(16) && m.right().Is(16)) {
- Emit(kX64Movsxwl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
- return;
- } else if (mleft.right().Is(24) && m.right().Is(24)) {
- Emit(kX64Movsxbl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
- return;
- }
- }
- VisitWord32Shift(this, node, kX64Sar32);
-}
-
-
-void InstructionSelector::VisitWord64Sar(Node* node) {
- X64OperandGenerator g(this);
+namespace {
+bool TryMatchLoadWord64AndShiftRight(InstructionSelector* selector, Node* node,
+ InstructionCode opcode) {
+ DCHECK(IrOpcode::kWord64Sar == node->opcode() ||
+ IrOpcode::kWord64Shr == node->opcode());
+ X64OperandGenerator g(selector);
Int64BinopMatcher m(node);
- if (CanCover(m.node(), m.left().node()) && m.left().IsLoad() &&
+ if (selector->CanCover(m.node(), m.left().node()) && m.left().IsLoad() &&
m.right().Is(32)) {
// Just load and sign-extend the interesting 4 bytes instead. This happens,
// for example, when we're loading and untagging SMIs.
- BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(), true);
+ BaseWithIndexAndDisplacement64Matcher mleft(m.left().node(),
+ AddressOption::kAllowAll);
if (mleft.matches() && (mleft.displacement() == nullptr ||
g.CanBeImmediate(mleft.displacement()))) {
size_t input_count = 0;
@@ -681,16 +705,43 @@ void InstructionSelector::VisitWord64Sar(Node* node) {
}
inputs[input_count++] = ImmediateOperand(ImmediateOperand::INLINE, 4);
} else {
- ImmediateOperand* op = ImmediateOperand::cast(&inputs[input_count - 1]);
- int32_t displacement = sequence()->GetImmediate(op).ToInt32();
- *op = ImmediateOperand(ImmediateOperand::INLINE, displacement + 4);
+ int32_t displacement = g.GetImmediateIntegerValue(mleft.displacement());
+ inputs[input_count - 1] =
+ ImmediateOperand(ImmediateOperand::INLINE, displacement + 4);
}
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
- InstructionCode code = kX64Movsxlq | AddressingModeField::encode(mode);
- Emit(code, 1, outputs, input_count, inputs);
+ InstructionCode code = opcode | AddressingModeField::encode(mode);
+ selector->Emit(code, 1, outputs, input_count, inputs);
+ return true;
+ }
+ }
+ return false;
+}
+} // namespace
+
+void InstructionSelector::VisitWord64Shr(Node* node) {
+ if (TryMatchLoadWord64AndShiftRight(this, node, kX64Movl)) return;
+ VisitWord64Shift(this, node, kX64Shr);
+}
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+ X64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().Is(16) && m.right().Is(16)) {
+ Emit(kX64Movsxwl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
+ return;
+ } else if (mleft.right().Is(24) && m.right().Is(24)) {
+ Emit(kX64Movsxbl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
return;
}
}
+ VisitWord32Shift(this, node, kX64Sar32);
+}
+
+void InstructionSelector::VisitWord64Sar(Node* node) {
+ if (TryMatchLoadWord64AndShiftRight(this, node, kX64Movsxlq)) return;
VisitWord64Shift(this, node, kX64Sar);
}
@@ -734,6 +785,9 @@ void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord32Popcnt(Node* node) {
X64OperandGenerator g(this);
@@ -755,7 +809,7 @@ void InstructionSelector::VisitInt32Add(Node* node) {
if (m.matches() &&
(m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
EmitLea(this, kX64Lea32, node, m.index(), m.scale(), m.base(),
- m.displacement());
+ m.displacement(), m.displacement_mode());
return;
}
@@ -765,6 +819,18 @@ void InstructionSelector::VisitInt32Add(Node* node) {
void InstructionSelector::VisitInt64Add(Node* node) {
+ X64OperandGenerator g(this);
+
+ // Try to match the Add to a leaq pattern
+ BaseWithIndexAndDisplacement64Matcher m(node);
+ if (m.matches() &&
+ (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
+ EmitLea(this, kX64Lea, node, m.index(), m.scale(), m.base(),
+ m.displacement(), m.displacement_mode());
+ return;
+ }
+
+ // No leal pattern match, use addq
VisitBinop(this, node, kX64Add);
}
@@ -804,6 +870,14 @@ void InstructionSelector::VisitInt64Sub(Node* node) {
if (m.left().Is(0)) {
Emit(kX64Neg, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
} else {
+ if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
+ // Turn subtractions of constant values into immediate "leaq" instructions
+ // by negating the value.
+ Emit(kX64Lea | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(-static_cast<int32_t>(m.right().Value())));
+ return;
+ }
VisitBinop(this, node, kX64Sub);
}
}
@@ -838,7 +912,6 @@ void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
}
}
-
void VisitMulHigh(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
X64OperandGenerator g(selector);
@@ -880,18 +953,27 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
if (m.matches()) {
Node* index = node->InputAt(0);
Node* base = m.power_of_two_plus_one() ? index : nullptr;
- EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr);
+ EmitLea(this, kX64Lea32, node, index, m.scale(), base, nullptr,
+ kPositiveDisplacement);
return;
}
VisitMul(this, node, kX64Imul32);
}
+void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
+ // TODO(mvstanton): Use Int32ScaleMatcher somehow.
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kX64Imul32, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kX64Imul32, &cont);
+}
void InstructionSelector::VisitInt64Mul(Node* node) {
VisitMul(this, node, kX64Imul);
}
-
void InstructionSelector::VisitInt32MulHigh(Node* node) {
VisitMulHigh(this, node, kX64ImulHigh32);
}
@@ -1056,7 +1138,36 @@ void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
X64OperandGenerator g(this);
- Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+ Node* const value = node->InputAt(0);
+ if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
+ LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+ MachineRepresentation rep = load_rep.representation();
+ InstructionCode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kX64Movsxbq : kX64Movzxbq;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kX64Movsxwq : kX64Movzxwq;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = load_rep.IsSigned() ? kX64Movsxlq : kX64Movl;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ InstructionOperand outputs[] = {g.DefineAsRegister(node)};
+ size_t input_count = 0;
+ InstructionOperand inputs[3];
+ AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
+ node->InputAt(0), inputs, &input_count);
+ opcode |= AddressingModeField::encode(mode);
+ Emit(opcode, 1, outputs, input_count, inputs);
+ } else {
+ Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+ }
}
@@ -1113,6 +1224,12 @@ void VisitRR(InstructionSelector* selector, Node* node,
g.UseRegister(node->InputAt(0)));
}
+void VisitRRO(InstructionSelector* selector, Node* node,
+ InstructionCode opcode) {
+ X64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+}
void VisitFloatBinop(InstructionSelector* selector, Node* node,
ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
@@ -1144,15 +1261,8 @@ void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
VisitRO(this, node, kSSEFloat64ToFloat32);
}
-
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
- switch (TruncationModeOf(node->op())) {
- case TruncationMode::kJavaScript:
- return VisitRR(this, node, kArchTruncateDoubleToI);
- case TruncationMode::kRoundToZero:
- return VisitRO(this, node, kSSEFloat64ToInt32);
- }
- UNREACHABLE();
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+ VisitRR(this, node, kArchTruncateDoubleToI);
}
@@ -1165,6 +1275,10 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
case IrOpcode::kWord64Shr: {
Int64BinopMatcher m(value);
if (m.right().Is(32)) {
+ if (TryMatchLoadWord64AndShiftRight(this, value, kX64Movl)) {
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+ return;
+ }
Emit(kX64Shr, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()), g.TempImmediate(32));
return;
@@ -1178,6 +1292,9 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
}
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+ VisitRO(this, node, kSSEFloat64ToInt32);
+}
void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
X64OperandGenerator g(this);
@@ -1249,17 +1366,9 @@ void InstructionSelector::VisitFloat32Add(Node* node) {
void InstructionSelector::VisitFloat32Sub(Node* node) {
- X64OperandGenerator g(this);
- Float32BinopMatcher m(node);
- if (m.left().IsMinusZero()) {
- VisitFloatUnop(this, node, m.right().node(), kAVXFloat32Neg,
- kSSEFloat32Neg);
- return;
- }
VisitFloatBinop(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
}
-
void InstructionSelector::VisitFloat32Mul(Node* node) {
VisitFloatBinop(this, node, kAVXFloat32Mul, kSSEFloat32Mul);
}
@@ -1270,16 +1379,6 @@ void InstructionSelector::VisitFloat32Div(Node* node) {
}
-void InstructionSelector::VisitFloat32Max(Node* node) {
- VisitFloatBinop(this, node, kAVXFloat32Max, kSSEFloat32Max);
-}
-
-
-void InstructionSelector::VisitFloat32Min(Node* node) {
- VisitFloatBinop(this, node, kAVXFloat32Min, kSSEFloat32Min);
-}
-
-
void InstructionSelector::VisitFloat32Abs(Node* node) {
VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Abs, kSSEFloat32Abs);
}
@@ -1289,6 +1388,13 @@ void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitRO(this, node, kSSEFloat32Sqrt);
}
+void InstructionSelector::VisitFloat32Max(Node* node) {
+ VisitRRO(this, node, kSSEFloat32Max);
+}
+
+void InstructionSelector::VisitFloat32Min(Node* node) {
+ VisitRRO(this, node, kSSEFloat32Min);
+}
void InstructionSelector::VisitFloat64Add(Node* node) {
VisitFloatBinop(this, node, kAVXFloat64Add, kSSEFloat64Add);
@@ -1296,29 +1402,9 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
void InstructionSelector::VisitFloat64Sub(Node* node) {
- X64OperandGenerator g(this);
- Float64BinopMatcher m(node);
- if (m.left().IsMinusZero()) {
- if (m.right().IsFloat64RoundDown() &&
- CanCover(m.node(), m.right().node())) {
- if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
- CanCover(m.right().node(), m.right().InputAt(0))) {
- Float64BinopMatcher mright0(m.right().InputAt(0));
- if (mright0.left().IsMinusZero()) {
- Emit(kSSEFloat64Round | MiscField::encode(kRoundUp),
- g.DefineAsRegister(node), g.UseRegister(mright0.right().node()));
- return;
- }
- }
- }
- VisitFloatUnop(this, node, m.right().node(), kAVXFloat64Neg,
- kSSEFloat64Neg);
- return;
- }
VisitFloatBinop(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
}
-
void InstructionSelector::VisitFloat64Mul(Node* node) {
VisitFloatBinop(this, node, kAVXFloat64Mul, kSSEFloat64Mul);
}
@@ -1339,12 +1425,12 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
void InstructionSelector::VisitFloat64Max(Node* node) {
- VisitFloatBinop(this, node, kAVXFloat64Max, kSSEFloat64Max);
+ VisitRRO(this, node, kSSEFloat64Max);
}
void InstructionSelector::VisitFloat64Min(Node* node) {
- VisitFloatBinop(this, node, kAVXFloat64Min, kSSEFloat64Min);
+ VisitRRO(this, node, kSSEFloat64Min);
}
@@ -1352,7 +1438,6 @@ void InstructionSelector::VisitFloat64Abs(Node* node) {
VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
}
-
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
VisitRO(this, node, kSSEFloat64Sqrt);
}
@@ -1402,6 +1487,28 @@ void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToNearest));
}
+void InstructionSelector::VisitFloat32Neg(Node* node) {
+ VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Neg, kSSEFloat32Neg);
+}
+
+void InstructionSelector::VisitFloat64Neg(Node* node) {
+ VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Neg, kSSEFloat64Neg);
+}
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+ InstructionCode opcode) {
+ X64OperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, xmm0), g.UseFixed(node->InputAt(0), xmm0),
+ g.UseFixed(node->InputAt(1), xmm1))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+ InstructionCode opcode) {
+ X64OperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, xmm0), g.UseFixed(node->InputAt(0), xmm0))
+ ->MarkAsCall();
+}
void InstructionSelector::EmitPrepareArguments(
ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
@@ -1411,7 +1518,7 @@ void InstructionSelector::EmitPrepareArguments(
// Prepare for C function call.
if (descriptor->IsCFunctionCall()) {
Emit(kArchPrepareCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
0, nullptr, 0, nullptr);
// Poke any stack arguments.
@@ -1434,7 +1541,7 @@ void InstructionSelector::EmitPrepareArguments(
g.CanBeImmediate(input.node())
? g.UseImmediate(input.node())
: IsSupported(ATOM) ||
- sequence()->IsFloat(GetVirtualRegister(input.node()))
+ sequence()->IsFP(GetVirtualRegister(input.node()))
? g.UseRegister(input.node())
: g.Use(input.node());
Emit(kX64Push, g.NoOutput(), value);
@@ -1469,7 +1576,7 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
selector->Emit(opcode, 0, nullptr, input_count, inputs);
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
- cont->frame_state());
+ cont->reason(), cont->frame_state());
} else {
DCHECK(cont->IsSet());
InstructionOperand output = g.DefineAsRegister(cont->result());
@@ -1487,7 +1594,7 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+ selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
} else {
DCHECK(cont->IsSet());
@@ -1510,10 +1617,7 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
// Tries to match the size of the given opcode to that of the operands, if
// possible.
InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
- Node* right) {
- if (opcode != kX64Cmp32 && opcode != kX64Test32) {
- return opcode;
- }
+ Node* right, FlagsContinuation* cont) {
// Currently, if one of the two operands is not a Load, we don't know what its
// machine representation is, so we bail out.
// TODO(epertoso): we can probably get some size information out of immediates
@@ -1523,19 +1627,39 @@ InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
}
// If the load representations don't match, both operands will be
// zero/sign-extended to 32bit.
- LoadRepresentation left_representation = LoadRepresentationOf(left->op());
- if (left_representation != LoadRepresentationOf(right->op())) {
- return opcode;
- }
- switch (left_representation.representation()) {
- case MachineRepresentation::kBit:
- case MachineRepresentation::kWord8:
- return opcode == kX64Cmp32 ? kX64Cmp8 : kX64Test8;
- case MachineRepresentation::kWord16:
- return opcode == kX64Cmp32 ? kX64Cmp16 : kX64Test16;
- default:
- return opcode;
+ MachineType left_type = LoadRepresentationOf(left->op());
+ MachineType right_type = LoadRepresentationOf(right->op());
+ if (left_type == right_type) {
+ switch (left_type.representation()) {
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kWord8: {
+ if (opcode == kX64Test32) return kX64Test8;
+ if (opcode == kX64Cmp32) {
+ if (left_type.semantic() == MachineSemantic::kUint32) {
+ cont->OverwriteUnsignedIfSigned();
+ } else {
+ CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
+ }
+ return kX64Cmp8;
+ }
+ break;
+ }
+ case MachineRepresentation::kWord16:
+ if (opcode == kX64Test32) return kX64Test16;
+ if (opcode == kX64Cmp32) {
+ if (left_type.semantic() == MachineSemantic::kUint32) {
+ cont->OverwriteUnsignedIfSigned();
+ } else {
+ CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
+ }
+ return kX64Cmp16;
+ }
+ break;
+ default:
+ break;
+ }
}
+ return opcode;
}
// Shared routine for multiple word compare operations.
@@ -1545,7 +1669,7 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
- opcode = TryNarrowOpcodeSize(opcode, left, right);
+ opcode = TryNarrowOpcodeSize(opcode, left, right, cont);
// If one of the two inputs is an immediate, make sure it's on the right, or
// if one of the two inputs is a memory operand, make sure it's on the left.
@@ -1604,7 +1728,7 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node,
selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr,
+ selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->reason(),
cont->frame_state());
} else {
DCHECK(cont->IsSet());
@@ -1746,6 +1870,9 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop(selector, node, kX64Sub32, cont);
+ case IrOpcode::kInt32MulWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(selector, node, kX64Imul32, cont);
case IrOpcode::kInt64AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop(selector, node, kX64Add, cont);
@@ -1785,14 +1912,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -2033,15 +2160,63 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(left), g.Use(right));
}
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kSSEFloat64SilenceNaN, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitAtomicLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
+ load_rep.representation() == MachineRepresentation::kWord16 ||
+ load_rep.representation() == MachineRepresentation::kWord32);
+ USE(load_rep);
+ VisitLoad(node);
+}
+
+void InstructionSelector::VisitAtomicStore(Node* node) {
+ X64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kX64Xchgb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kX64Xchgw;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kX64Xchgl;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ if (g.CanBeImmediate(index)) {
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MR1;
+ }
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs);
+}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::Flags flags =
- MachineOperatorBuilder::kFloat32Max |
- MachineOperatorBuilder::kFloat32Min |
- MachineOperatorBuilder::kFloat64Max |
- MachineOperatorBuilder::kFloat64Min |
MachineOperatorBuilder::kWord32ShiftIsSafe |
MachineOperatorBuilder::kWord32Ctz | MachineOperatorBuilder::kWord64Ctz;
if (CpuFeatures::IsSupported(POPCNT)) {
@@ -2061,6 +2236,13 @@ InstructionSelector::SupportedMachineOperatorFlags() {
return flags;
}
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+ return MachineOperatorBuilder::AlignmentRequirements::
+ FullUnalignedAccessSupport();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/x64/unwinding-info-writer-x64.cc b/deps/v8/src/compiler/x64/unwinding-info-writer-x64.cc
new file mode 100644
index 0000000000..4efba3254f
--- /dev/null
+++ b/deps/v8/src/compiler/x64/unwinding-info-writer-x64.cc
@@ -0,0 +1,102 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/x64/unwinding-info-writer-x64.h"
+#include "src/compiler/instruction.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+void UnwindingInfoWriter::BeginInstructionBlock(int pc_offset,
+ const InstructionBlock* block) {
+ if (!enabled()) return;
+
+ block_will_exit_ = false;
+
+ DCHECK_LT(block->rpo_number().ToInt(), block_initial_states_.size());
+ const BlockInitialState* initial_state =
+ block_initial_states_[block->rpo_number().ToInt()];
+ if (initial_state) {
+ if (!initial_state->register_.is(eh_frame_writer_.base_register()) &&
+ initial_state->offset_ != eh_frame_writer_.base_offset()) {
+ eh_frame_writer_.AdvanceLocation(pc_offset);
+ eh_frame_writer_.SetBaseAddressRegisterAndOffset(initial_state->register_,
+ initial_state->offset_);
+ } else if (!initial_state->register_.is(eh_frame_writer_.base_register())) {
+ eh_frame_writer_.AdvanceLocation(pc_offset);
+ eh_frame_writer_.SetBaseAddressRegister(initial_state->register_);
+ } else if (initial_state->offset_ != eh_frame_writer_.base_offset()) {
+ eh_frame_writer_.AdvanceLocation(pc_offset);
+ eh_frame_writer_.SetBaseAddressOffset(initial_state->offset_);
+ }
+
+ tracking_fp_ = initial_state->tracking_fp_;
+ } else {
+ // The entry block always lacks an explicit initial state.
+ // The exit block may lack an explicit state, if it is only reached by
+ // the block ending in a ret.
+ // All the other blocks must have an explicit initial state.
+ DCHECK(block->predecessors().empty() || block->successors().empty());
+ }
+}
+
+void UnwindingInfoWriter::EndInstructionBlock(const InstructionBlock* block) {
+ if (!enabled() || block_will_exit_) return;
+
+ for (const RpoNumber& successor : block->successors()) {
+ int successor_index = successor.ToInt();
+ DCHECK_LT(successor_index, block_initial_states_.size());
+ const BlockInitialState* existing_state =
+ block_initial_states_[successor_index];
+ // If we already had an entry for this BB, check that the values are the
+ // same we are trying to insert.
+ if (existing_state) {
+ DCHECK(existing_state->register_.is(eh_frame_writer_.base_register()));
+ DCHECK_EQ(existing_state->offset_, eh_frame_writer_.base_offset());
+ DCHECK_EQ(existing_state->tracking_fp_, tracking_fp_);
+ } else {
+ block_initial_states_[successor_index] = new (zone_)
+ BlockInitialState(eh_frame_writer_.base_register(),
+ eh_frame_writer_.base_offset(), tracking_fp_);
+ }
+ }
+}
+
+void UnwindingInfoWriter::MarkFrameConstructed(int pc_base) {
+ if (!enabled()) return;
+
+ // push rbp
+ eh_frame_writer_.AdvanceLocation(pc_base + 1);
+ eh_frame_writer_.IncreaseBaseAddressOffset(kInt64Size);
+ // <base address> points at the bottom of the current frame on x64 and
+ // <base register> is rsp, which points to the top of the frame by definition.
+ // Thus, the distance between <base address> and the top is -<base offset>.
+ int top_of_stack = -eh_frame_writer_.base_offset();
+ eh_frame_writer_.RecordRegisterSavedToStack(rbp, top_of_stack);
+
+ // mov rbp, rsp
+ eh_frame_writer_.AdvanceLocation(pc_base + 4);
+ eh_frame_writer_.SetBaseAddressRegister(rbp);
+
+ tracking_fp_ = true;
+}
+
+void UnwindingInfoWriter::MarkFrameDeconstructed(int pc_base) {
+ if (!enabled()) return;
+
+ // mov rsp, rbp
+ eh_frame_writer_.AdvanceLocation(pc_base + 3);
+ eh_frame_writer_.SetBaseAddressRegister(rsp);
+
+ // pop rbp
+ eh_frame_writer_.AdvanceLocation(pc_base + 4);
+ eh_frame_writer_.IncreaseBaseAddressOffset(-kInt64Size);
+
+ tracking_fp_ = false;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/x64/unwinding-info-writer-x64.h b/deps/v8/src/compiler/x64/unwinding-info-writer-x64.h
new file mode 100644
index 0000000000..8bb5903e54
--- /dev/null
+++ b/deps/v8/src/compiler/x64/unwinding-info-writer-x64.h
@@ -0,0 +1,79 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_X64_UNWINDING_INFO_WRITER_H_
+#define V8_COMPILER_X64_UNWINDING_INFO_WRITER_H_
+
+#include "src/eh-frame.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class InstructionBlock;
+
+class UnwindingInfoWriter {
+ public:
+ explicit UnwindingInfoWriter(Zone* zone)
+ : zone_(zone),
+ eh_frame_writer_(zone),
+ tracking_fp_(false),
+ block_will_exit_(false),
+ block_initial_states_(zone) {
+ if (enabled()) eh_frame_writer_.Initialize();
+ }
+
+ void MaybeIncreaseBaseOffsetAt(int pc_offset, int base_delta) {
+ if (enabled() && !tracking_fp_) {
+ eh_frame_writer_.AdvanceLocation(pc_offset);
+ eh_frame_writer_.IncreaseBaseAddressOffset(base_delta);
+ }
+ }
+
+ void SetNumberOfInstructionBlocks(int number) {
+ if (enabled()) block_initial_states_.resize(number);
+ }
+
+ void BeginInstructionBlock(int pc_offset, const InstructionBlock* block);
+ void EndInstructionBlock(const InstructionBlock* block);
+
+ void MarkFrameConstructed(int pc_base);
+ void MarkFrameDeconstructed(int pc_base);
+
+ void MarkBlockWillExit() { block_will_exit_ = true; }
+
+ void Finish(int code_size) {
+ if (enabled()) eh_frame_writer_.Finish(code_size);
+ }
+
+ EhFrameWriter* eh_frame_writer() {
+ return enabled() ? &eh_frame_writer_ : nullptr;
+ }
+
+ private:
+ bool enabled() const { return FLAG_perf_prof_unwinding_info; }
+
+ class BlockInitialState : public ZoneObject {
+ public:
+ BlockInitialState(Register reg, int offset, bool tracking_fp)
+ : register_(reg), offset_(offset), tracking_fp_(tracking_fp) {}
+
+ Register register_;
+ int offset_;
+ bool tracking_fp_;
+ };
+
+ Zone* zone_;
+ EhFrameWriter eh_frame_writer_;
+ bool tracking_fp_;
+ bool block_will_exit_;
+
+ ZoneVector<const BlockInitialState*> block_initial_states_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif
diff --git a/deps/v8/src/compiler/x87/code-generator-x87.cc b/deps/v8/src/compiler/x87/code-generator-x87.cc
index da7fdb481b..1064e622eb 100644
--- a/deps/v8/src/compiler/x87/code-generator-x87.cc
+++ b/deps/v8/src/compiler/x87/code-generator-x87.cc
@@ -42,7 +42,7 @@ class X87OperandConverter : public InstructionOperandConverter {
DCHECK(extra == 0);
return Operand(ToRegister(op));
}
- DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+ DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
return SlotToOperand(AllocatedOperand::cast(op)->index(), extra);
}
@@ -53,12 +53,19 @@ class X87OperandConverter : public InstructionOperandConverter {
}
Operand HighOperand(InstructionOperand* op) {
- DCHECK(op->IsDoubleStackSlot());
+ DCHECK(op->IsFPStackSlot());
return ToOperand(op, kPointerSize);
}
Immediate ToImmediate(InstructionOperand* operand) {
Constant constant = ToConstant(operand);
+ if (constant.type() == Constant::kInt32 &&
+ (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
+ constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
+ constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE)) {
+ return Immediate(reinterpret_cast<Address>(constant.ToInt32()),
+ constant.rmode());
+ }
switch (constant.type()) {
case Constant::kInt32:
return Immediate(constant.ToInt32());
@@ -107,8 +114,8 @@ class X87OperandConverter : public InstructionOperandConverter {
}
case kMode_MRI: {
Register base = InputRegister(NextOffset(offset));
- int32_t disp = InputInt32(NextOffset(offset));
- return Operand(base, disp);
+ Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
+ return Operand(base, ctant.ToInt32(), ctant.rmode());
}
case kMode_MR1:
case kMode_MR2:
@@ -127,8 +134,8 @@ class X87OperandConverter : public InstructionOperandConverter {
Register base = InputRegister(NextOffset(offset));
Register index = InputRegister(NextOffset(offset));
ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
- int32_t disp = InputInt32(NextOffset(offset));
- return Operand(base, index, scale, disp);
+ Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
+ return Operand(base, index, scale, ctant.ToInt32(), ctant.rmode());
}
case kMode_M1:
case kMode_M2:
@@ -145,12 +152,12 @@ class X87OperandConverter : public InstructionOperandConverter {
case kMode_M8I: {
Register index = InputRegister(NextOffset(offset));
ScaleFactor scale = ScaleFor(kMode_M1I, mode);
- int32_t disp = InputInt32(NextOffset(offset));
- return Operand(index, scale, disp);
+ Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
+ return Operand(index, scale, ctant.ToInt32(), ctant.rmode());
}
case kMode_MI: {
- int32_t disp = InputInt32(NextOffset(offset));
- return Operand(Immediate(disp));
+ Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
+ return Operand(ctant.ToInt32(), ctant.rmode());
}
case kMode_None:
UNREACHABLE();
@@ -184,21 +191,35 @@ class OutOfLineLoadInteger final : public OutOfLineCode {
Register const result_;
};
+class OutOfLineLoadFloat32NaN final : public OutOfLineCode {
+ public:
+ OutOfLineLoadFloat32NaN(CodeGenerator* gen, X87Register result)
+ : OutOfLineCode(gen), result_(result) {}
-class OutOfLineLoadFloat final : public OutOfLineCode {
+ void Generate() final {
+ DCHECK(result_.code() == 0);
+ USE(result_);
+ __ fstp(0);
+ __ push(Immediate(0xffc00000));
+ __ fld_s(MemOperand(esp, 0));
+ __ lea(esp, Operand(esp, kFloatSize));
+ }
+
+ private:
+ X87Register const result_;
+};
+
+class OutOfLineLoadFloat64NaN final : public OutOfLineCode {
public:
- OutOfLineLoadFloat(CodeGenerator* gen, X87Register result)
+ OutOfLineLoadFloat64NaN(CodeGenerator* gen, X87Register result)
: OutOfLineCode(gen), result_(result) {}
void Generate() final {
DCHECK(result_.code() == 0);
USE(result_);
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
__ fstp(0);
- __ push(Immediate(0xffffffff));
- __ push(Immediate(0x7fffffff));
+ __ push(Immediate(0xfff80000));
+ __ push(Immediate(0x00000000));
__ fld_d(MemOperand(esp, 0));
__ lea(esp, Operand(esp, kDoubleSize));
}
@@ -207,7 +228,6 @@ class OutOfLineLoadFloat final : public OutOfLineCode {
X87Register const result_;
};
-
class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
public:
OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
@@ -268,25 +288,23 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
} // namespace
-
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr) \
- do { \
- auto result = i.OutputDoubleRegister(); \
- auto offset = i.InputRegister(0); \
- DCHECK(result.code() == 0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- OutOfLineCode* ool = new (zone()) OutOfLineLoadFloat(this, result); \
- __ j(above_equal, ool->entry()); \
- __ fstp(0); \
- __ asm_instr(i.MemoryOperand(2)); \
- __ bind(ool->exit()); \
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, OutOfLineLoadNaN) \
+ do { \
+ auto result = i.OutputDoubleRegister(); \
+ auto offset = i.InputRegister(0); \
+ DCHECK(result.code() == 0); \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ cmp(offset, i.InputRegister(1)); \
+ } else { \
+ __ cmp(offset, i.InputImmediate(1)); \
+ } \
+ OutOfLineCode* ool = new (zone()) OutOfLineLoadNaN(this, result); \
+ __ j(above_equal, ool->entry()); \
+ __ fstp(0); \
+ __ asm_instr(i.MemoryOperand(2)); \
+ __ bind(ool->exit()); \
} while (false)
-
#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
do { \
auto result = i.OutputRegister(); \
@@ -364,31 +382,56 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
} \
} while (0)
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ /* Saves the esp into ebx */ \
+ __ push(ebx); \
+ __ mov(ebx, esp); \
+ /* Pass one double as argument on the stack. */ \
+ __ PrepareCallCFunction(4, eax); \
+ __ fstp(0); \
+ /* Load first operand from original stack */ \
+ __ fld_d(MemOperand(ebx, 4 + kDoubleSize)); \
+ /* Put first operand into stack for function call */ \
+ __ fstp_d(Operand(esp, 0 * kDoubleSize)); \
+ /* Load second operand from original stack */ \
+ __ fld_d(MemOperand(ebx, 4)); \
+ /* Put second operand into stack for function call */ \
+ __ fstp_d(Operand(esp, 1 * kDoubleSize)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 4); \
+ /* Restore the ebx */ \
+ __ pop(ebx); \
+ /* Return value is in st(0) on x87. */ \
+ __ lea(esp, Operand(esp, 2 * kDoubleSize)); \
+ } while (false)
+
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ /* Saves the esp into ebx */ \
+ __ push(ebx); \
+ __ mov(ebx, esp); \
+ /* Pass one double as argument on the stack. */ \
+ __ PrepareCallCFunction(2, eax); \
+ __ fstp(0); \
+ /* Load operand from original stack */ \
+ __ fld_d(MemOperand(ebx, 4)); \
+ /* Put operand into stack for function call */ \
+ __ fstp_d(Operand(esp, 0)); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
+ 2); \
+ /* Restore the ebx */ \
+ __ pop(ebx); \
+ /* Return value is in st(0) on x87. */ \
+ __ lea(esp, Operand(esp, kDoubleSize)); \
+ } while (false)
+
void CodeGenerator::AssembleDeconstructFrame() {
__ mov(esp, ebp);
__ pop(ebp);
}
-// For insert fninit/fld1 instructions after the Prologue
-thread_local bool is_block_0 = false;
-
-void CodeGenerator::AssembleSetupStackPointer() { is_block_0 = true; }
-
-void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
- int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
- if (sp_slot_delta > 0) {
- __ add(esp, Immediate(sp_slot_delta * kPointerSize));
- }
- frame_access_state()->SetFrameAccessToDefault();
-}
-
-
-void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
- int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
- if (sp_slot_delta < 0) {
- __ sub(esp, Immediate(-sp_slot_delta * kPointerSize));
- frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
- }
+void CodeGenerator::AssemblePrepareTailCall() {
if (frame_access_state()->has_frame()) {
__ mov(ebp, MemOperand(ebp, 0));
}
@@ -433,19 +476,75 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
__ bind(&done);
}
+namespace {
+
+void AdjustStackPointerForTailCall(MacroAssembler* masm,
+ FrameAccessState* state,
+ int new_slot_above_sp,
+ bool allow_shrinkage = true) {
+ int current_sp_offset = state->GetSPToFPSlotCount() +
+ StandardFrameConstants::kFixedSlotCountAboveFp;
+ int stack_slot_delta = new_slot_above_sp - current_sp_offset;
+ if (stack_slot_delta > 0) {
+ masm->sub(esp, Immediate(stack_slot_delta * kPointerSize));
+ state->IncreaseSPDelta(stack_slot_delta);
+ } else if (allow_shrinkage && stack_slot_delta < 0) {
+ masm->add(esp, Immediate(-stack_slot_delta * kPointerSize));
+ state->IncreaseSPDelta(stack_slot_delta);
+ }
+}
+
+} // namespace
+
+void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
+ int first_unused_stack_slot) {
+ CodeGenerator::PushTypeFlags flags(kImmediatePush | kScalarPush);
+ ZoneVector<MoveOperands*> pushes(zone());
+ GetPushCompatibleMoves(instr, flags, &pushes);
+
+ if (!pushes.empty() &&
+ (LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
+ first_unused_stack_slot)) {
+ X87OperandConverter g(this, instr);
+ for (auto move : pushes) {
+ LocationOperand destination_location(
+ LocationOperand::cast(move->destination()));
+ InstructionOperand source(move->source());
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ destination_location.index());
+ if (source.IsStackSlot()) {
+ LocationOperand source_location(LocationOperand::cast(source));
+ __ push(g.SlotToOperand(source_location.index()));
+ } else if (source.IsRegister()) {
+ LocationOperand source_location(LocationOperand::cast(source));
+ __ push(source_location.GetRegister());
+ } else if (source.IsImmediate()) {
+ __ push(Immediate(ImmediateOperand::cast(source).inline_value()));
+ } else {
+ // Pushes of non-scalar data types is not supported.
+ UNIMPLEMENTED();
+ }
+ frame_access_state()->IncreaseSPDelta(1);
+ move->Eliminate();
+ }
+ }
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ first_unused_stack_slot, false);
+}
+
+void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
+ int first_unused_stack_slot) {
+ AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ first_unused_stack_slot);
+}
+
// Assembles an instruction after register allocation, producing machine code.
-void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+ Instruction* instr) {
X87OperandConverter i(this, instr);
InstructionCode opcode = instr->opcode();
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
- // Workaround for CL #35139 (https://codereview.chromium.org/1775323002)
- if (is_block_0) {
- __ fninit();
- __ fld1();
- is_block_0 = false;
- }
-
switch (arch_opcode) {
case kArchCallCodeObject: {
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
@@ -463,7 +562,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
RecordCallPosition(instr);
bool double_result =
- instr->HasOutput() && instr->Output()->IsDoubleRegister();
+ instr->HasOutput() && instr->Output()->IsFPRegister();
if (double_result) {
__ lea(esp, Operand(esp, -kDoubleSize));
__ fstp_d(Operand(esp, 0));
@@ -484,8 +583,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ VerifyX87StackDepth(1);
}
__ fstp(0);
- int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
- AssembleDeconstructActivationRecord(stack_param_delta);
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
no_reg, no_reg, no_reg);
@@ -499,6 +596,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ jmp(reg);
}
frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+ case kArchTailCallAddress: {
+ CHECK(!HasImmediateInput(instr, 0));
+ Register reg = i.InputRegister(0);
+ __ jmp(reg);
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
break;
}
case kArchCallJSFunction: {
@@ -516,7 +622,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
RecordCallPosition(instr);
bool double_result =
- instr->HasOutput() && instr->Output()->IsDoubleRegister();
+ instr->HasOutput() && instr->Output()->IsFPRegister();
if (double_result) {
__ lea(esp, Operand(esp, -kDoubleSize));
__ fstp_d(Operand(esp, 0));
@@ -543,14 +649,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ VerifyX87StackDepth(1);
}
__ fstp(0);
- int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
- AssembleDeconstructActivationRecord(stack_param_delta);
if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
no_reg, no_reg, no_reg);
}
__ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
break;
}
case kArchPrepareCallCFunction: {
@@ -561,7 +666,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kArchPrepareTailCall:
- AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
+ AssemblePrepareTailCall();
break;
case kArchCallCFunction: {
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
@@ -577,7 +682,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ CallCFunction(func, num_parameters);
}
bool double_result =
- instr->HasOutput() && instr->Output()->IsDoubleRegister();
+ instr->HasOutput() && instr->Output()->IsFPRegister();
if (double_result) {
__ lea(esp, Operand(esp, -kDoubleSize));
__ fstp_d(Operand(esp, 0));
@@ -602,6 +707,17 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
+ case kArchComment: {
+ Address comment_string = i.InputExternalReference(0).address();
+ __ RecordComment(reinterpret_cast<const char*>(comment_string));
+ break;
+ }
+ case kArchDebugBreak:
+ __ int3();
+ break;
+ case kArchImpossible:
+ __ Abort(kConversionFromImpossibleValue);
+ break;
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
@@ -612,7 +728,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
int double_register_param_count = 0;
int x87_layout = 0;
for (size_t i = 0; i < instr->InputCount(); i++) {
- if (instr->InputAt(i)->IsDoubleRegister()) {
+ if (instr->InputAt(i)->IsFPRegister()) {
double_register_param_count++;
}
}
@@ -630,7 +746,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Deoptimizer::BailoutType bailout_type =
Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
- AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ CodeGenResult result =
+ AssembleDeoptimizerCall(deopt_state_id, bailout_type);
+ if (result != kSuccess) return result;
break;
}
case kArchRet:
@@ -650,11 +768,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
case kArchTruncateDoubleToI: {
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
__ fld_d(i.InputOperand(0));
}
__ TruncateX87TOSToI(i.OutputRegister());
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
__ fstp(0);
}
break;
@@ -689,6 +807,84 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ lea(i.OutputRegister(), Operand(base, offset.offset()));
break;
}
+ case kIeee754Float64Acos:
+ ASSEMBLE_IEEE754_UNOP(acos);
+ break;
+ case kIeee754Float64Acosh:
+ ASSEMBLE_IEEE754_UNOP(acosh);
+ break;
+ case kIeee754Float64Asin:
+ ASSEMBLE_IEEE754_UNOP(asin);
+ break;
+ case kIeee754Float64Asinh:
+ ASSEMBLE_IEEE754_UNOP(asinh);
+ break;
+ case kIeee754Float64Atan:
+ ASSEMBLE_IEEE754_UNOP(atan);
+ break;
+ case kIeee754Float64Atanh:
+ ASSEMBLE_IEEE754_UNOP(atanh);
+ break;
+ case kIeee754Float64Atan2:
+ ASSEMBLE_IEEE754_BINOP(atan2);
+ break;
+ case kIeee754Float64Cbrt:
+ ASSEMBLE_IEEE754_UNOP(cbrt);
+ break;
+ case kIeee754Float64Cos:
+ __ X87SetFPUCW(0x027F);
+ ASSEMBLE_IEEE754_UNOP(cos);
+ __ X87SetFPUCW(0x037F);
+ break;
+ case kIeee754Float64Cosh:
+ ASSEMBLE_IEEE754_UNOP(cosh);
+ break;
+ case kIeee754Float64Expm1:
+ __ X87SetFPUCW(0x027F);
+ ASSEMBLE_IEEE754_UNOP(expm1);
+ __ X87SetFPUCW(0x037F);
+ break;
+ case kIeee754Float64Exp:
+ ASSEMBLE_IEEE754_UNOP(exp);
+ break;
+ case kIeee754Float64Log:
+ ASSEMBLE_IEEE754_UNOP(log);
+ break;
+ case kIeee754Float64Log1p:
+ ASSEMBLE_IEEE754_UNOP(log1p);
+ break;
+ case kIeee754Float64Log2:
+ ASSEMBLE_IEEE754_UNOP(log2);
+ break;
+ case kIeee754Float64Log10:
+ ASSEMBLE_IEEE754_UNOP(log10);
+ break;
+ case kIeee754Float64Pow: {
+ // Keep the x87 FPU stack empty before calling stub code
+ __ fstp(0);
+ // Call the MathStub and put return value in stX_0
+ MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+ __ CallStub(&stub);
+ /* Return value is in st(0) on x87. */
+ __ lea(esp, Operand(esp, 2 * kDoubleSize));
+ break;
+ }
+ case kIeee754Float64Sin:
+ __ X87SetFPUCW(0x027F);
+ ASSEMBLE_IEEE754_UNOP(sin);
+ __ X87SetFPUCW(0x037F);
+ break;
+ case kIeee754Float64Sinh:
+ ASSEMBLE_IEEE754_UNOP(sinh);
+ break;
+ case kIeee754Float64Tan:
+ __ X87SetFPUCW(0x027F);
+ ASSEMBLE_IEEE754_UNOP(tan);
+ __ X87SetFPUCW(0x037F);
+ break;
+ case kIeee754Float64Tanh:
+ ASSEMBLE_IEEE754_UNOP(tanh);
+ break;
case kX87Add:
if (HasImmediateInput(instr, 1)) {
__ add(i.InputOperand(0), i.InputImmediate(1));
@@ -900,7 +1096,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
uint32_t lower = static_cast<uint32_t>(src);
uint32_t upper = static_cast<uint32_t>(src >> 32);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ mov(MemOperand(esp, 0), Immediate(lower));
__ mov(MemOperand(esp, kInt32Size), Immediate(upper));
@@ -979,110 +1175,34 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ X87SetFPUCW(0x037F);
break;
}
- case kX87Float32Max: {
- Label check_nan_left, check_zero, return_left, return_right;
- Condition condition = below;
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_s(MemOperand(esp, kFloatSize));
- __ fld_s(MemOperand(esp, 0));
- __ fld(1);
- __ fld(1);
- __ FCmp();
- // At least one NaN.
- // Return the second operands if one of the two operands is NaN
- __ j(parity_even, &return_right, Label::kNear);
- __ j(equal, &check_zero, Label::kNear); // left == right.
- __ j(condition, &return_left, Label::kNear);
- __ jmp(&return_right, Label::kNear);
-
- __ bind(&check_zero);
- __ fld(0);
- __ fldz();
- __ FCmp();
- __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
-
- __ fadd(1);
- __ jmp(&return_left, Label::kNear);
-
- __ bind(&return_right);
- __ fxch();
-
- __ bind(&return_left);
- __ fstp(0);
- __ lea(esp, Operand(esp, 2 * kFloatSize));
- break;
- }
- case kX87Float32Min: {
- Label check_nan_left, check_zero, return_left, return_right;
- Condition condition = above;
+ case kX87Float32Sqrt: {
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
__ VerifyX87StackDepth(1);
}
__ fstp(0);
- __ fld_s(MemOperand(esp, kFloatSize));
- __ fld_s(MemOperand(esp, 0));
- __ fld(1);
- __ fld(1);
- __ FCmp();
- // At least one NaN.
- // Return the second operands if one of the two operands is NaN
- __ j(parity_even, &return_right, Label::kNear);
- __ j(equal, &check_zero, Label::kNear); // left == right.
- __ j(condition, &return_left, Label::kNear);
- __ jmp(&return_right, Label::kNear);
-
- __ bind(&check_zero);
- __ fld(0);
- __ fldz();
- __ FCmp();
- __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
- // At this point, both left and right are either 0 or -0.
- // Push st0 and st1 to stack, then pop them to temp registers and OR them,
- // load it to left.
- __ push(eax);
- __ fld(1);
- __ fld(1);
- __ sub(esp, Immediate(2 * kPointerSize));
- __ fstp_s(MemOperand(esp, 0));
- __ fstp_s(MemOperand(esp, kPointerSize));
- __ pop(eax);
- __ xor_(MemOperand(esp, 0), eax);
- __ fstp(0);
__ fld_s(MemOperand(esp, 0));
- __ pop(eax); // restore esp
- __ pop(eax); // restore esp
- __ jmp(&return_left, Label::kNear);
-
-
- __ bind(&return_right);
- __ fxch();
-
- __ bind(&return_left);
- __ fstp(0);
- __ lea(esp, Operand(esp, 2 * kFloatSize));
+ __ fsqrt();
+ __ lea(esp, Operand(esp, kFloatSize));
break;
}
- case kX87Float32Sqrt: {
+ case kX87Float32Abs: {
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
__ VerifyX87StackDepth(1);
}
__ fstp(0);
__ fld_s(MemOperand(esp, 0));
- __ fsqrt();
+ __ fabs();
__ lea(esp, Operand(esp, kFloatSize));
break;
}
- case kX87Float32Abs: {
+ case kX87Float32Neg: {
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
__ VerifyX87StackDepth(1);
}
__ fstp(0);
__ fld_s(MemOperand(esp, 0));
- __ fabs();
+ __ fchs();
__ lea(esp, Operand(esp, kFloatSize));
break;
}
@@ -1092,10 +1212,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
// Set the correct round mode in x87 control register
__ X87SetRC((mode << 10));
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
InstructionOperand* input = instr->InputAt(0);
USE(input);
- DCHECK(input->IsDoubleStackSlot());
+ DCHECK(input->IsFPStackSlot());
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
__ VerifyX87StackDepth(1);
}
@@ -1180,9 +1300,44 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ lea(esp, Operand(esp, 2 * kDoubleSize));
break;
}
+ case kX87Float32Max: {
+ Label compare_swap, done_compare;
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, kFloatSize));
+ __ fld_s(MemOperand(esp, 0));
+ __ fld(1);
+ __ fld(1);
+ __ FCmp();
+
+ auto ool =
+ new (zone()) OutOfLineLoadFloat32NaN(this, i.OutputDoubleRegister());
+ __ j(parity_even, ool->entry());
+ __ j(below, &done_compare, Label::kNear);
+ __ j(above, &compare_swap, Label::kNear);
+ __ push(eax);
+ __ lea(esp, Operand(esp, -kFloatSize));
+ __ fld(1);
+ __ fstp_s(Operand(esp, 0));
+ __ mov(eax, MemOperand(esp, 0));
+ __ and_(eax, Immediate(0x80000000));
+ __ lea(esp, Operand(esp, kFloatSize));
+ __ pop(eax);
+ __ j(zero, &done_compare, Label::kNear);
+
+ __ bind(&compare_swap);
+ __ bind(ool->exit());
+ __ fxch(1);
+
+ __ bind(&done_compare);
+ __ fstp(0);
+ __ lea(esp, Operand(esp, 2 * kFloatSize));
+ break;
+ }
case kX87Float64Max: {
- Label check_zero, return_left, return_right;
- Condition condition = below;
+ Label compare_swap, done_compare;
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
__ VerifyX87StackDepth(1);
}
@@ -1192,29 +1347,69 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ fld(1);
__ fld(1);
__ FCmp();
- __ j(parity_even, &return_right,
- Label::kNear); // At least one NaN, Return right.
- __ j(equal, &check_zero, Label::kNear); // left == right.
- __ j(condition, &return_left, Label::kNear);
- __ jmp(&return_right, Label::kNear);
- __ bind(&check_zero);
- __ fld(0);
- __ fldz();
- __ FCmp();
- __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
+ auto ool =
+ new (zone()) OutOfLineLoadFloat64NaN(this, i.OutputDoubleRegister());
+ __ j(parity_even, ool->entry());
+ __ j(below, &done_compare, Label::kNear);
+ __ j(above, &compare_swap, Label::kNear);
+ __ push(eax);
+ __ lea(esp, Operand(esp, -kDoubleSize));
+ __ fld(1);
+ __ fstp_d(Operand(esp, 0));
+ __ mov(eax, MemOperand(esp, 4));
+ __ and_(eax, Immediate(0x80000000));
+ __ lea(esp, Operand(esp, kDoubleSize));
+ __ pop(eax);
+ __ j(zero, &done_compare, Label::kNear);
- __ bind(&return_right);
- __ fxch();
+ __ bind(&compare_swap);
+ __ bind(ool->exit());
+ __ fxch(1);
- __ bind(&return_left);
+ __ bind(&done_compare);
__ fstp(0);
__ lea(esp, Operand(esp, 2 * kDoubleSize));
break;
}
+ case kX87Float32Min: {
+ Label compare_swap, done_compare;
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, kFloatSize));
+ __ fld_s(MemOperand(esp, 0));
+ __ fld(1);
+ __ fld(1);
+ __ FCmp();
+
+ auto ool =
+ new (zone()) OutOfLineLoadFloat32NaN(this, i.OutputDoubleRegister());
+ __ j(parity_even, ool->entry());
+ __ j(above, &done_compare, Label::kNear);
+ __ j(below, &compare_swap, Label::kNear);
+ __ push(eax);
+ __ lea(esp, Operand(esp, -kFloatSize));
+ __ fld(0);
+ __ fstp_s(Operand(esp, 0));
+ __ mov(eax, MemOperand(esp, 0));
+ __ and_(eax, Immediate(0x80000000));
+ __ lea(esp, Operand(esp, kFloatSize));
+ __ pop(eax);
+ __ j(zero, &done_compare, Label::kNear);
+
+ __ bind(&compare_swap);
+ __ bind(ool->exit());
+ __ fxch(1);
+
+ __ bind(&done_compare);
+ __ fstp(0);
+ __ lea(esp, Operand(esp, 2 * kFloatSize));
+ break;
+ }
case kX87Float64Min: {
- Label check_zero, return_left, return_right;
- Condition condition = above;
+ Label compare_swap, done_compare;
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
__ VerifyX87StackDepth(1);
}
@@ -1224,22 +1419,27 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ fld(1);
__ fld(1);
__ FCmp();
- __ j(parity_even, &return_right,
- Label::kNear); // At least one NaN, return right value.
- __ j(equal, &check_zero, Label::kNear); // left == right.
- __ j(condition, &return_left, Label::kNear);
- __ jmp(&return_right, Label::kNear);
- __ bind(&check_zero);
+ auto ool =
+ new (zone()) OutOfLineLoadFloat64NaN(this, i.OutputDoubleRegister());
+ __ j(parity_even, ool->entry());
+ __ j(above, &done_compare, Label::kNear);
+ __ j(below, &compare_swap, Label::kNear);
+ __ push(eax);
+ __ lea(esp, Operand(esp, -kDoubleSize));
__ fld(0);
- __ fldz();
- __ FCmp();
- __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
+ __ fstp_d(Operand(esp, 0));
+ __ mov(eax, MemOperand(esp, 4));
+ __ and_(eax, Immediate(0x80000000));
+ __ lea(esp, Operand(esp, kDoubleSize));
+ __ pop(eax);
+ __ j(zero, &done_compare, Label::kNear);
- __ bind(&return_right);
- __ fxch();
+ __ bind(&compare_swap);
+ __ bind(ool->exit());
+ __ fxch(1);
- __ bind(&return_left);
+ __ bind(&done_compare);
__ fstp(0);
__ lea(esp, Operand(esp, 2 * kDoubleSize));
break;
@@ -1254,6 +1454,16 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ lea(esp, Operand(esp, kDoubleSize));
break;
}
+ case kX87Float64Neg: {
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, 0));
+ __ fchs();
+ __ lea(esp, Operand(esp, kDoubleSize));
+ break;
+ }
case kX87Int32ToFloat32: {
InstructionOperand* input = instr->InputAt(0);
DCHECK(input->IsRegister() || input->IsStackSlot());
@@ -1333,13 +1543,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
case kX87Float32ToFloat64: {
InstructionOperand* input = instr->InputAt(0);
- if (input->IsDoubleRegister()) {
+ if (input->IsFPRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ fstp_s(MemOperand(esp, 0));
__ fld_s(MemOperand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
} else {
- DCHECK(input->IsDoubleStackSlot());
+ DCHECK(input->IsFPStackSlot());
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
__ VerifyX87StackDepth(1);
}
@@ -1357,54 +1567,58 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kX87Float32ToInt32: {
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
__ fld_s(i.InputOperand(0));
}
__ TruncateX87TOSToI(i.OutputRegister(0));
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
__ fstp(0);
}
break;
}
case kX87Float32ToUint32: {
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
__ fld_s(i.InputOperand(0));
}
Label success;
__ TruncateX87TOSToI(i.OutputRegister(0));
__ test(i.OutputRegister(0), i.OutputRegister(0));
__ j(positive, &success);
+ // Need to reserve the input float32 data.
+ __ fld(0);
__ push(Immediate(INT32_MIN));
__ fild_s(Operand(esp, 0));
__ lea(esp, Operand(esp, kPointerSize));
__ faddp();
__ TruncateX87TOSToI(i.OutputRegister(0));
__ or_(i.OutputRegister(0), Immediate(0x80000000));
+ // Only keep input float32 data in x87 stack when return.
+ __ fstp(0);
__ bind(&success);
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
__ fstp(0);
}
break;
}
case kX87Float64ToInt32: {
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
__ fld_d(i.InputOperand(0));
}
__ TruncateX87TOSToI(i.OutputRegister(0));
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
__ fstp(0);
}
break;
}
case kX87Float64ToFloat32: {
InstructionOperand* input = instr->InputAt(0);
- if (input->IsDoubleRegister()) {
+ if (input->IsFPRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ fstp_s(MemOperand(esp, 0));
__ fld_s(MemOperand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
} else {
- DCHECK(input->IsDoubleStackSlot());
+ DCHECK(input->IsFPStackSlot());
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
__ VerifyX87StackDepth(1);
}
@@ -1419,7 +1633,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
case kX87Float64ToUint32: {
__ push_imm32(-2147483648);
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
__ fld_d(i.InputOperand(0));
}
__ fild_s(Operand(esp, 0));
@@ -1429,13 +1643,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ add(esp, Immediate(kInt32Size));
__ add(i.OutputRegister(), Immediate(0x80000000));
__ fstp(0);
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
__ fstp(0);
}
break;
}
case kX87Float64ExtractHighWord32: {
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ fst_d(MemOperand(esp, 0));
__ mov(i.OutputRegister(), MemOperand(esp, kDoubleSize / 2));
@@ -1443,13 +1657,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
} else {
InstructionOperand* input = instr->InputAt(0);
USE(input);
- DCHECK(input->IsDoubleStackSlot());
+ DCHECK(input->IsFPStackSlot());
__ mov(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
}
break;
}
case kX87Float64ExtractLowWord32: {
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ fst_d(MemOperand(esp, 0));
__ mov(i.OutputRegister(), MemOperand(esp, 0));
@@ -1457,7 +1671,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
} else {
InstructionOperand* input = instr->InputAt(0);
USE(input);
- DCHECK(input->IsDoubleStackSlot());
+ DCHECK(input->IsFPStackSlot());
__ mov(i.OutputRegister(), i.InputOperand(0));
}
break;
@@ -1496,10 +1710,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
// Set the correct round mode in x87 control register
__ X87SetRC((mode << 10));
- if (!instr->InputAt(0)->IsDoubleRegister()) {
+ if (!instr->InputAt(0)->IsFPRegister()) {
InstructionOperand* input = instr->InputAt(0);
USE(input);
- DCHECK(input->IsDoubleStackSlot());
+ DCHECK(input->IsFPStackSlot());
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
__ VerifyX87StackDepth(1);
}
@@ -1517,6 +1731,30 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ lea(esp, Operand(esp, 2 * kDoubleSize));
break;
}
+ case kX87Float64SilenceNaN: {
+ Label end, return_qnan;
+ __ fstp(0);
+ __ push(ebx);
+ // Load Half word of HoleNan(SNaN) into ebx
+ __ mov(ebx, MemOperand(esp, 2 * kInt32Size));
+ __ cmp(ebx, Immediate(kHoleNanUpper32));
+ // Check input is HoleNaN(SNaN)?
+ __ j(equal, &return_qnan, Label::kNear);
+ // If input isn't HoleNaN(SNaN), just load it and return
+ __ fld_d(MemOperand(esp, 1 * kInt32Size));
+ __ jmp(&end);
+ __ bind(&return_qnan);
+ // If input is HoleNaN(SNaN), Return QNaN
+ __ push(Immediate(0xffffffff));
+ __ push(Immediate(0xfff7ffff));
+ __ fld_d(MemOperand(esp, 0));
+ __ lea(esp, Operand(esp, kDoubleSize));
+ __ bind(&end);
+ __ pop(ebx);
+ // Clear stack.
+ __ lea(esp, Operand(esp, 1 * kDoubleSize));
+ break;
+ }
case kX87Movsxbl:
__ movsx_b(i.OutputRegister(), i.MemoryOperand());
break;
@@ -1652,30 +1890,32 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kX87Push:
- if (instr->InputAt(0)->IsDoubleRegister()) {
+ if (instr->InputAt(0)->IsFPRegister()) {
auto allocated = AllocatedOperand::cast(*instr->InputAt(0));
if (allocated.representation() == MachineRepresentation::kFloat32) {
- __ sub(esp, Immediate(kDoubleSize));
+ __ sub(esp, Immediate(kFloatSize));
__ fst_s(Operand(esp, 0));
+ frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
} else {
DCHECK(allocated.representation() == MachineRepresentation::kFloat64);
__ sub(esp, Immediate(kDoubleSize));
__ fst_d(Operand(esp, 0));
- }
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
- } else if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ }
+ } else if (instr->InputAt(0)->IsFPStackSlot()) {
auto allocated = AllocatedOperand::cast(*instr->InputAt(0));
if (allocated.representation() == MachineRepresentation::kFloat32) {
- __ sub(esp, Immediate(kDoubleSize));
+ __ sub(esp, Immediate(kFloatSize));
__ fld_s(i.InputOperand(0));
__ fstp_s(MemOperand(esp, 0));
+ frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
} else {
DCHECK(allocated.representation() == MachineRepresentation::kFloat64);
__ sub(esp, Immediate(kDoubleSize));
__ fld_d(i.InputOperand(0));
__ fstp_d(MemOperand(esp, 0));
- }
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ }
} else if (HasImmediateInput(instr, 0)) {
__ push(i.InputImmediate(0));
frame_access_state()->IncreaseSPDelta(1);
@@ -1693,12 +1933,30 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
}
+ case kX87Xchgb: {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ xchg_b(i.InputRegister(index), operand);
+ break;
+ }
+ case kX87Xchgw: {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ xchg_w(i.InputRegister(index), operand);
+ break;
+ }
+ case kX87Xchgl: {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ xchg(i.InputRegister(index), operand);
+ break;
+ }
case kX87PushFloat32:
__ lea(esp, Operand(esp, -kFloatSize));
- if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ if (instr->InputAt(0)->IsFPStackSlot()) {
__ fld_s(i.InputOperand(0));
__ fstp_s(MemOperand(esp, 0));
- } else if (instr->InputAt(0)->IsDoubleRegister()) {
+ } else if (instr->InputAt(0)->IsFPRegister()) {
__ fst_s(MemOperand(esp, 0));
} else {
UNREACHABLE();
@@ -1706,10 +1964,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
case kX87PushFloat64:
__ lea(esp, Operand(esp, -kDoubleSize));
- if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ if (instr->InputAt(0)->IsFPStackSlot()) {
__ fld_d(i.InputOperand(0));
__ fstp_d(MemOperand(esp, 0));
- } else if (instr->InputAt(0)->IsDoubleRegister()) {
+ } else if (instr->InputAt(0)->IsFPRegister()) {
__ fst_d(MemOperand(esp, 0));
} else {
UNREACHABLE();
@@ -1731,10 +1989,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_CHECKED_LOAD_INTEGER(mov);
break;
case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(fld_s);
+ ASSEMBLE_CHECKED_LOAD_FLOAT(fld_s, OutOfLineLoadFloat32NaN);
break;
case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(fld_d);
+ ASSEMBLE_CHECKED_LOAD_FLOAT(fld_d, OutOfLineLoadFloat64NaN);
break;
case kCheckedStoreWord8:
ASSEMBLE_CHECKED_STORE_INTEGER(mov_b);
@@ -1761,7 +2019,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kCheckedStoreWord64:
UNREACHABLE(); // currently unsupported checked int64 load/store.
break;
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
+ case kAtomicStoreWord8:
+ case kAtomicStoreWord16:
+ case kAtomicStoreWord32:
+ UNREACHABLE(); // Won't be generated by instruction selector.
+ break;
}
+ return kSuccess;
} // NOLINT(readability/fn_size)
@@ -1837,7 +2106,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
int double_register_param_count = 0;
int x87_layout = 0;
for (size_t i = 0; i < instr->InputCount(); i++) {
- if (instr->InputAt(i)->IsDoubleRegister()) {
+ if (instr->InputAt(i)->IsFPRegister()) {
double_register_param_count++;
}
}
@@ -1971,12 +2240,16 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
__ jmp(Operand::JumpTable(input, times_4, table));
}
-
-void CodeGenerator::AssembleDeoptimizerCall(
+CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
+ if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
+ DeoptimizeReason deoptimization_reason =
+ GetDeoptimizationReason(deoptimization_id);
+ __ RecordDeoptReason(deoptimization_reason, 0, deoptimization_id);
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+ return kSuccess;
}
@@ -2107,8 +2380,25 @@ void CodeGenerator::AssembleDeoptimizerCall(
// | RET | args | caller frame |
// ^ esp ^ ebp
+void CodeGenerator::FinishFrame(Frame* frame) {
+ CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ const RegList saves = descriptor->CalleeSavedRegisters();
+ if (saves != 0) { // Save callee-saved registers.
+ DCHECK(!info()->is_osr());
+ int pushed = 0;
+ for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+ if (!((1 << i) & saves)) continue;
+ ++pushed;
+ }
+ frame->AllocateSavedCalleeRegisterSlots(pushed);
+ }
+
+ // Initailize FPU state.
+ __ fninit();
+ __ fld1();
+}
-void CodeGenerator::AssemblePrologue() {
+void CodeGenerator::AssembleConstructFrame() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (descriptor->IsCFunctionCall()) {
@@ -2120,7 +2410,9 @@ void CodeGenerator::AssemblePrologue() {
__ StubPrologue(info()->GetOutputStackFrameType());
}
}
- int stack_shrink_slots = frame()->GetSpillSlotCount();
+
+ int shrink_slots = frame()->GetSpillSlotCount();
+
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -2131,7 +2423,7 @@ void CodeGenerator::AssemblePrologue() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
// Initailize FPU state.
__ fninit();
@@ -2139,8 +2431,8 @@ void CodeGenerator::AssemblePrologue() {
}
const RegList saves = descriptor->CalleeSavedRegisters();
- if (stack_shrink_slots > 0) {
- __ sub(esp, Immediate(stack_shrink_slots * kPointerSize));
+ if (shrink_slots > 0) {
+ __ sub(esp, Immediate(shrink_slots * kPointerSize));
}
if (saves != 0) { // Save callee-saved registers.
@@ -2151,7 +2443,6 @@ void CodeGenerator::AssemblePrologue() {
__ push(Register::from_code(i));
++pushed;
}
- frame()->AllocateSavedCalleeRegisterSlots(pushed);
}
}
@@ -2263,7 +2554,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else if (src_constant.type() == Constant::kFloat32) {
// TODO(turbofan): Can we do better here?
uint32_t src = bit_cast<uint32_t>(src_constant.ToFloat32());
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
__ sub(esp, Immediate(kInt32Size));
__ mov(MemOperand(esp, 0), Immediate(src));
// always only push one value into the x87 stack.
@@ -2271,7 +2562,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ fld_s(MemOperand(esp, 0));
__ add(esp, Immediate(kInt32Size));
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
Operand dst = g.ToOperand(destination);
__ Move(dst, Immediate(src));
}
@@ -2280,7 +2571,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
uint32_t lower = static_cast<uint32_t>(src);
uint32_t upper = static_cast<uint32_t>(src >> 32);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ mov(MemOperand(esp, 0), Immediate(lower));
__ mov(MemOperand(esp, kInt32Size), Immediate(upper));
@@ -2289,15 +2580,15 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ fld_d(MemOperand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
} else {
- DCHECK(destination->IsDoubleStackSlot());
+ DCHECK(destination->IsFPStackSlot());
Operand dst0 = g.ToOperand(destination);
Operand dst1 = g.HighOperand(destination);
__ Move(dst0, Immediate(lower));
__ Move(dst1, Immediate(upper));
}
}
- } else if (source->IsDoubleRegister()) {
- DCHECK(destination->IsDoubleStackSlot());
+ } else if (source->IsFPRegister()) {
+ DCHECK(destination->IsFPStackSlot());
Operand dst = g.ToOperand(destination);
auto allocated = AllocatedOperand::cast(*source);
switch (allocated.representation()) {
@@ -2310,11 +2601,11 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
default:
UNREACHABLE();
}
- } else if (source->IsDoubleStackSlot()) {
- DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
Operand src = g.ToOperand(source);
auto allocated = AllocatedOperand::cast(*source);
- if (destination->IsDoubleRegister()) {
+ if (destination->IsFPRegister()) {
// always only push one value into the x87 stack.
__ fstp(0);
switch (allocated.representation()) {
@@ -2373,9 +2664,9 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
frame_access_state()->IncreaseSPDelta(-1);
Operand src2 = g.ToOperand(source);
__ pop(src2);
- } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+ } else if (source->IsFPRegister() && destination->IsFPRegister()) {
UNREACHABLE();
- } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
+ } else if (source->IsFPRegister() && destination->IsFPStackSlot()) {
auto allocated = AllocatedOperand::cast(*source);
switch (allocated.representation()) {
case MachineRepresentation::kFloat32:
@@ -2391,7 +2682,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
default:
UNREACHABLE();
}
- } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
+ } else if (source->IsFPStackSlot() && destination->IsFPStackSlot()) {
auto allocated = AllocatedOperand::cast(*source);
switch (allocated.representation()) {
case MachineRepresentation::kFloat32:
@@ -2423,9 +2714,6 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
}
-void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
-
-
void CodeGenerator::EnsureSpaceForLazyDeopt() {
if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
return;
diff --git a/deps/v8/src/compiler/x87/instruction-codes-x87.h b/deps/v8/src/compiler/x87/instruction-codes-x87.h
index d70a737023..5f527fd43f 100644
--- a/deps/v8/src/compiler/x87/instruction-codes-x87.h
+++ b/deps/v8/src/compiler/x87/instruction-codes-x87.h
@@ -49,9 +49,8 @@ namespace compiler {
V(X87Float32Sub) \
V(X87Float32Mul) \
V(X87Float32Div) \
- V(X87Float32Max) \
- V(X87Float32Min) \
V(X87Float32Abs) \
+ V(X87Float32Neg) \
V(X87Float32Sqrt) \
V(X87Float32Round) \
V(X87LoadFloat64Constant) \
@@ -60,9 +59,12 @@ namespace compiler {
V(X87Float64Mul) \
V(X87Float64Div) \
V(X87Float64Mod) \
+ V(X87Float32Max) \
V(X87Float64Max) \
+ V(X87Float32Min) \
V(X87Float64Min) \
V(X87Float64Abs) \
+ V(X87Float64Neg) \
V(X87Int32ToFloat32) \
V(X87Uint32ToFloat32) \
V(X87Int32ToFloat64) \
@@ -80,6 +82,7 @@ namespace compiler {
V(X87Float64Sqrt) \
V(X87Float64Round) \
V(X87Float64Cmp) \
+ V(X87Float64SilenceNaN) \
V(X87Movsxbl) \
V(X87Movzxbl) \
V(X87Movb) \
@@ -96,7 +99,10 @@ namespace compiler {
V(X87PushFloat64) \
V(X87PushFloat32) \
V(X87Poke) \
- V(X87StackCheck)
+ V(X87StackCheck) \
+ V(X87Xchgb) \
+ V(X87Xchgw) \
+ V(X87Xchgl)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/x87/instruction-selector-x87.cc b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
index f5376bc3d4..0fe6a4b704 100644
--- a/deps/v8/src/compiler/x87/instruction-selector-x87.cc
+++ b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
@@ -64,13 +64,20 @@ class X87OperandGenerator final : public OperandGenerator {
case IrOpcode::kInt32Constant:
case IrOpcode::kNumberConstant:
case IrOpcode::kExternalConstant:
+ case IrOpcode::kRelocatableInt32Constant:
+ case IrOpcode::kRelocatableInt64Constant:
return true;
case IrOpcode::kHeapConstant: {
+// TODO(bmeurer): We must not dereference handles concurrently. If we
+// really have to this here, then we need to find a way to put this
+// information on the HeapConstant node already.
+#if 0
// Constants in new space cannot be used as immediates in V8 because
// the GC does not scan code objects when collecting the new generation.
Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
Isolate* isolate = value->GetIsolate();
return !isolate->heap()->InNewSpace(*value);
+#endif
}
default:
return false;
@@ -79,12 +86,16 @@ class X87OperandGenerator final : public OperandGenerator {
AddressingMode GenerateMemoryOperandInputs(Node* index, int scale, Node* base,
Node* displacement_node,
+ DisplacementMode displacement_mode,
InstructionOperand inputs[],
size_t* input_count) {
AddressingMode mode = kMode_MRI;
int32_t displacement = (displacement_node == nullptr)
? 0
: OpParameter<int32_t>(displacement_node);
+ if (displacement_mode == kNegativeDisplacement) {
+ displacement = -displacement;
+ }
if (base != nullptr) {
if (base->opcode() == IrOpcode::kInt32Constant) {
displacement += OpParameter<int32_t>(base);
@@ -139,11 +150,12 @@ class X87OperandGenerator final : public OperandGenerator {
AddressingMode GetEffectiveAddressMemoryOperand(Node* node,
InstructionOperand inputs[],
size_t* input_count) {
- BaseWithIndexAndDisplacement32Matcher m(node, true);
+ BaseWithIndexAndDisplacement32Matcher m(node, AddressOption::kAllowAll);
DCHECK(m.matches());
if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
- return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
- m.displacement(), inputs, input_count);
+ return GenerateMemoryOperandInputs(
+ m.index(), m.scale(), m.base(), m.displacement(),
+ m.displacement_mode(), inputs, input_count);
} else {
inputs[(*input_count)++] = UseRegister(node->InputAt(0));
inputs[(*input_count)++] = UseRegister(node->InputAt(1));
@@ -175,7 +187,9 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord16:
opcode = load_rep.IsSigned() ? kX87Movsxwl : kX87Movzxwl;
break;
- case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
opcode = kX87Movl;
break;
@@ -259,7 +273,9 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord16:
opcode = kX87Movw;
break;
- case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
opcode = kX87Movl;
break;
@@ -292,6 +308,11 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
+// Architecture supports unaligned access, therefore VisitLoad is used instead
+void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
+
+// Architecture supports unaligned access, therefore VisitStore is used instead
+void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitCheckedLoad(Node* node) {
CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
@@ -316,10 +337,12 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -363,10 +386,12 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -447,7 +472,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->frame_state());
+ cont->reason(), cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -531,12 +556,14 @@ void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
}
void EmitLea(InstructionSelector* selector, Node* result, Node* index,
- int scale, Node* base, Node* displacement) {
+ int scale, Node* base, Node* displacement,
+ DisplacementMode displacement_mode) {
X87OperandGenerator g(selector);
InstructionOperand inputs[4];
size_t input_count = 0;
- AddressingMode mode = g.GenerateMemoryOperandInputs(
- index, scale, base, displacement, inputs, &input_count);
+ AddressingMode mode =
+ g.GenerateMemoryOperandInputs(index, scale, base, displacement,
+ displacement_mode, inputs, &input_count);
DCHECK_NE(0u, input_count);
DCHECK_GE(arraysize(inputs), input_count);
@@ -557,7 +584,7 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
if (m.matches()) {
Node* index = node->InputAt(0);
Node* base = m.power_of_two_plus_one() ? index : nullptr;
- EmitLea(this, node, index, m.scale(), base, nullptr);
+ EmitLea(this, node, index, m.scale(), base, nullptr, kPositiveDisplacement);
return;
}
VisitShift(this, node, kX87Shl);
@@ -677,6 +704,9 @@ void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord32Popcnt(Node* node) {
X87OperandGenerator g(this);
@@ -694,7 +724,8 @@ void InstructionSelector::VisitInt32Add(Node* node) {
InstructionOperand inputs[4];
size_t input_count = 0;
AddressingMode mode = g.GenerateMemoryOperandInputs(
- m.index(), m.scale(), m.base(), m.displacement(), inputs, &input_count);
+ m.index(), m.scale(), m.base(), m.displacement(), m.displacement_mode(),
+ inputs, &input_count);
DCHECK_NE(0u, input_count);
DCHECK_GE(arraysize(inputs), input_count);
@@ -728,7 +759,7 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
if (m.matches()) {
Node* index = node->InputAt(0);
Node* base = m.power_of_two_plus_one() ? index : nullptr;
- EmitLea(this, node, index, m.scale(), base, nullptr);
+ EmitLea(this, node, index, m.scale(), base, nullptr, kPositiveDisplacement);
return;
}
X87OperandGenerator g(this);
@@ -846,21 +877,15 @@ void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
g.Use(node->InputAt(0)));
}
-
-void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
X87OperandGenerator g(this);
+ Emit(kArchTruncateDoubleToI, g.DefineAsRegister(node),
+ g.Use(node->InputAt(0)));
+}
- switch (TruncationModeOf(node->op())) {
- case TruncationMode::kJavaScript:
- Emit(kArchTruncateDoubleToI, g.DefineAsRegister(node),
- g.Use(node->InputAt(0)));
- return;
- case TruncationMode::kRoundToZero:
- Emit(kX87Float64ToInt32, g.DefineAsRegister(node),
- g.Use(node->InputAt(0)));
- return;
- }
- UNREACHABLE();
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
@@ -900,7 +925,6 @@ void InstructionSelector::VisitFloat32Sub(Node* node) {
Emit(kX87Float32Sub, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
-
void InstructionSelector::VisitFloat64Sub(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
@@ -908,7 +932,6 @@ void InstructionSelector::VisitFloat64Sub(Node* node) {
Emit(kX87Float64Sub, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
-
void InstructionSelector::VisitFloat32Mul(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
@@ -949,7 +972,6 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
Emit(kX87Float64Mod, g.DefineAsFixed(node, stX_0), 1, temps)->MarkAsCall();
}
-
void InstructionSelector::VisitFloat32Max(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
@@ -957,7 +979,6 @@ void InstructionSelector::VisitFloat32Max(Node* node) {
Emit(kX87Float32Max, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
-
void InstructionSelector::VisitFloat64Max(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
@@ -965,7 +986,6 @@ void InstructionSelector::VisitFloat64Max(Node* node) {
Emit(kX87Float64Max, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
-
void InstructionSelector::VisitFloat32Min(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
@@ -973,7 +993,6 @@ void InstructionSelector::VisitFloat32Min(Node* node) {
Emit(kX87Float32Min, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
-
void InstructionSelector::VisitFloat64Min(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
@@ -995,7 +1014,6 @@ void InstructionSelector::VisitFloat64Abs(Node* node) {
Emit(kX87Float64Abs, g.DefineAsFixed(node, stX_0), 0, nullptr);
}
-
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
@@ -1070,6 +1088,32 @@ void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
}
+void InstructionSelector::VisitFloat32Neg(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87Float32Neg, g.DefineAsFixed(node, stX_0), 0, nullptr);
+}
+
+void InstructionSelector::VisitFloat64Neg(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87Float64Neg, g.DefineAsFixed(node, stX_0), 0, nullptr);
+}
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+ InstructionCode opcode) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
+ Emit(opcode, g.DefineAsFixed(node, stX_0), 0, nullptr)->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+ InstructionCode opcode) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(opcode, g.DefineAsFixed(node, stX_0), 0, nullptr)->MarkAsCall();
+}
void InstructionSelector::EmitPrepareArguments(
ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
@@ -1081,7 +1125,7 @@ void InstructionSelector::EmitPrepareArguments(
InstructionOperand temps[] = {g.TempRegister()};
size_t const temp_count = arraysize(temps);
Emit(kArchPrepareCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
0, nullptr, 0, nullptr, temp_count, temps);
// Poke any stack arguments.
@@ -1104,7 +1148,7 @@ void InstructionSelector::EmitPrepareArguments(
g.CanBeImmediate(input.node())
? g.UseImmediate(input.node())
: IsSupported(ATOM) ||
- sequence()->IsFloat(GetVirtualRegister(input.node()))
+ sequence()->IsFP(GetVirtualRegister(input.node()))
? g.UseRegister(input.node())
: g.Use(input.node());
Emit(kX87Push, g.NoOutput(), value);
@@ -1139,7 +1183,7 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
selector->Emit(opcode, 0, nullptr, input_count, inputs);
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
- cont->frame_state());
+ cont->reason(), cont->frame_state());
} else {
DCHECK(cont->IsSet());
InstructionOperand output = g.DefineAsRegister(cont->result());
@@ -1157,7 +1201,7 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
selector->Emit(opcode, g.NoOutput(), left, right,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right,
+ selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
} else {
DCHECK(cont->IsSet());
@@ -1180,10 +1224,7 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
// Tries to match the size of the given opcode to that of the operands, if
// possible.
InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
- Node* right) {
- if (opcode != kX87Cmp && opcode != kX87Test) {
- return opcode;
- }
+ Node* right, FlagsContinuation* cont) {
// Currently, if one of the two operands is not a Load, we don't know what its
// machine representation is, so we bail out.
// TODO(epertoso): we can probably get some size information out of immediates
@@ -1193,19 +1234,39 @@ InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
}
// If the load representations don't match, both operands will be
// zero/sign-extended to 32bit.
- LoadRepresentation left_representation = LoadRepresentationOf(left->op());
- if (left_representation != LoadRepresentationOf(right->op())) {
- return opcode;
- }
- switch (left_representation.representation()) {
- case MachineRepresentation::kBit:
- case MachineRepresentation::kWord8:
- return opcode == kX87Cmp ? kX87Cmp8 : kX87Test8;
- case MachineRepresentation::kWord16:
- return opcode == kX87Cmp ? kX87Cmp16 : kX87Test16;
- default:
- return opcode;
+ MachineType left_type = LoadRepresentationOf(left->op());
+ MachineType right_type = LoadRepresentationOf(right->op());
+ if (left_type == right_type) {
+ switch (left_type.representation()) {
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kWord8: {
+ if (opcode == kX87Test) return kX87Test8;
+ if (opcode == kX87Cmp) {
+ if (left_type.semantic() == MachineSemantic::kUint32) {
+ cont->OverwriteUnsignedIfSigned();
+ } else {
+ CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
+ }
+ return kX87Cmp8;
+ }
+ break;
+ }
+ case MachineRepresentation::kWord16:
+ if (opcode == kX87Test) return kX87Test16;
+ if (opcode == kX87Cmp) {
+ if (left_type.semantic() == MachineSemantic::kUint32) {
+ cont->OverwriteUnsignedIfSigned();
+ } else {
+ CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
+ }
+ return kX87Cmp16;
+ }
+ break;
+ default:
+ break;
+ }
}
+ return opcode;
}
// Shared routine for multiple float32 compare operations (inputs commuted).
@@ -1220,7 +1281,7 @@ void VisitFloat32Compare(InstructionSelector* selector, Node* node,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(cont->Encode(kX87Float32Cmp), g.NoOutput(),
g.Use(node->InputAt(0)), g.Use(node->InputAt(1)),
- cont->frame_state());
+ cont->reason(), cont->frame_state());
} else {
DCHECK(cont->IsSet());
selector->Emit(cont->Encode(kX87Float32Cmp),
@@ -1241,7 +1302,7 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(cont->Encode(kX87Float64Cmp), g.NoOutput(),
g.Use(node->InputAt(0)), g.Use(node->InputAt(1)),
- cont->frame_state());
+ cont->reason(), cont->frame_state());
} else {
DCHECK(cont->IsSet());
selector->Emit(cont->Encode(kX87Float64Cmp),
@@ -1256,7 +1317,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
- InstructionCode narrowed_opcode = TryNarrowOpcodeSize(opcode, left, right);
+ InstructionCode narrowed_opcode =
+ TryNarrowOpcodeSize(opcode, left, right, cont);
int effect_level = selector->GetEffectLevel(node);
if (cont->IsBranch()) {
@@ -1320,7 +1382,7 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr,
+ selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->reason(),
cont->frame_state());
} else {
DCHECK(cont->IsSet());
@@ -1401,6 +1463,9 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
case IrOpcode::kInt32SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitBinop(selector, node, kX87Sub, cont);
+ case IrOpcode::kInt32MulWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(selector, node, kX87Imul, cont);
default:
break;
}
@@ -1432,14 +1497,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForDeoptimize(kNotEqual, node->InputAt(1));
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForDeoptimize(kEqual, node->InputAt(1));
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -1526,6 +1591,14 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
VisitBinop(this, node, kX87Sub, &cont);
}
+void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kX87Imul, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kX87Imul, &cont);
+}
void InstructionSelector::VisitFloat32Equal(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
@@ -1598,15 +1671,63 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(left), g.UseRegister(right));
}
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87Float64SilenceNaN, g.DefineAsFixed(node, stX_0), 0, nullptr);
+}
+
+void InstructionSelector::VisitAtomicLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
+ load_rep.representation() == MachineRepresentation::kWord16 ||
+ load_rep.representation() == MachineRepresentation::kWord32);
+ USE(load_rep);
+ VisitLoad(node);
+}
+
+void InstructionSelector::VisitAtomicStore(Node* node) {
+ X87OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kX87Xchgb;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kX87Xchgw;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kX87Xchgl;
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ if (g.CanBeImmediate(index)) {
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MR1;
+ }
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 0, nullptr, input_count, inputs);
+}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::Flags flags =
- MachineOperatorBuilder::kFloat32Max |
- MachineOperatorBuilder::kFloat32Min |
- MachineOperatorBuilder::kFloat64Max |
- MachineOperatorBuilder::kFloat64Min |
MachineOperatorBuilder::kWord32ShiftIsSafe;
if (CpuFeatures::IsSupported(POPCNT)) {
flags |= MachineOperatorBuilder::kWord32Popcnt;
@@ -1623,6 +1744,13 @@ InstructionSelector::SupportedMachineOperatorFlags() {
return flags;
}
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+ return MachineOperatorBuilder::AlignmentRequirements::
+ FullUnalignedAccessSupport();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/context-measure.cc b/deps/v8/src/context-measure.cc
index 342362983d..00c11eebc2 100644
--- a/deps/v8/src/context-measure.cc
+++ b/deps/v8/src/context-measure.cc
@@ -18,7 +18,7 @@ ContextMeasure::ContextMeasure(Context* context)
count_(0),
size_(0) {
DCHECK(context_->IsNativeContext());
- Object* next_link = context_->get(Context::NEXT_CONTEXT_LINK);
+ Object* next_link = context_->next_context_link();
MeasureObject(context_);
MeasureDeferredObjects();
context_->set(Context::NEXT_CONTEXT_LINK, next_link);
@@ -37,10 +37,10 @@ bool ContextMeasure::IsShared(HeapObject* object) {
void ContextMeasure::MeasureObject(HeapObject* object) {
- if (back_reference_map_.Lookup(object).is_valid()) return;
+ if (reference_map_.Lookup(object).is_valid()) return;
if (root_index_map_.Lookup(object) != RootIndexMap::kInvalidRootIndex) return;
if (IsShared(object)) return;
- back_reference_map_.Add(object, BackReference::DummyReference());
+ reference_map_.Add(object, SerializerReference::DummyReference());
recursion_depth_++;
if (recursion_depth_ > kMaxRecursion) {
deferred_objects_.Add(object);
diff --git a/deps/v8/src/context-measure.h b/deps/v8/src/context-measure.h
index 665c547912..7e94f2c00c 100644
--- a/deps/v8/src/context-measure.h
+++ b/deps/v8/src/context-measure.h
@@ -29,7 +29,7 @@ class ContextMeasure : public ObjectVisitor {
Context* context_;
- BackReferenceMap back_reference_map_;
+ SerializerReferenceMap reference_map_;
RootIndexMap root_index_map_;
static const int kMaxRecursion = 16;
diff --git a/deps/v8/src/contexts-inl.h b/deps/v8/src/contexts-inl.h
index 344d5db578..ce2c97be75 100644
--- a/deps/v8/src/contexts-inl.h
+++ b/deps/v8/src/contexts-inl.h
@@ -56,8 +56,9 @@ Context* Context::previous() {
}
void Context::set_previous(Context* context) { set(PREVIOUS_INDEX, context); }
+Object* Context::next_context_link() { return get(Context::NEXT_CONTEXT_LINK); }
-bool Context::has_extension() { return !extension()->IsTheHole(); }
+bool Context::has_extension() { return !extension()->IsTheHole(GetIsolate()); }
HeapObject* Context::extension() {
return HeapObject::cast(get(EXTENSION_INDEX));
}
@@ -66,10 +67,6 @@ void Context::set_extension(HeapObject* object) {
}
-JSModule* Context::module() { return JSModule::cast(get(EXTENSION_INDEX)); }
-void Context::set_module(JSModule* module) { set(EXTENSION_INDEX, module); }
-
-
Context* Context::native_context() {
Object* result = get(NATIVE_CONTEXT_INDEX);
DCHECK(IsBootstrappingOrNativeContext(this->GetIsolate(), result));
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index 67a9fea8b8..b3cf255736 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -4,7 +4,6 @@
#include "src/contexts.h"
-#include "src/ast/scopeinfo.h"
#include "src/bootstrapper.h"
#include "src/debug/debug.h"
#include "src/isolate-inl.h"
@@ -92,7 +91,7 @@ Context* Context::closure_context() {
JSObject* Context::extension_object() {
DCHECK(IsNativeContext() || IsFunctionContext() || IsBlockContext());
HeapObject* object = extension();
- if (object->IsTheHole()) return nullptr;
+ if (object->IsTheHole(GetIsolate())) return nullptr;
if (IsBlockContext()) {
if (!object->IsSloppyBlockWithEvalContextExtension()) return nullptr;
object = SloppyBlockWithEvalContextExtension::cast(object)->extension();
@@ -177,58 +176,15 @@ static Maybe<bool> UnscopableLookup(LookupIterator* it) {
return Just(!blacklist->BooleanValue());
}
-static void GetAttributesAndBindingFlags(VariableMode mode,
- InitializationFlag init_flag,
- PropertyAttributes* attributes,
- BindingFlags* binding_flags) {
- switch (mode) {
- case VAR:
- *attributes = NONE;
- *binding_flags = MUTABLE_IS_INITIALIZED;
- break;
- case LET:
- *attributes = NONE;
- *binding_flags = (init_flag == kNeedsInitialization)
- ? MUTABLE_CHECK_INITIALIZED
- : MUTABLE_IS_INITIALIZED;
- break;
- case CONST_LEGACY:
- *attributes = READ_ONLY;
- *binding_flags = (init_flag == kNeedsInitialization)
- ? IMMUTABLE_CHECK_INITIALIZED
- : IMMUTABLE_IS_INITIALIZED;
- break;
- case CONST:
- *attributes = READ_ONLY;
- *binding_flags = (init_flag == kNeedsInitialization)
- ? IMMUTABLE_CHECK_INITIALIZED_HARMONY
- : IMMUTABLE_IS_INITIALIZED_HARMONY;
- break;
- case IMPORT:
- // TODO(ES6)
- UNREACHABLE();
- break;
- case DYNAMIC:
- case DYNAMIC_GLOBAL:
- case DYNAMIC_LOCAL:
- case TEMPORARY:
- // Note: Fixed context slots are statically allocated by the compiler.
- // Statically allocated variables always have a statically known mode,
- // which is the mode with which they were declared when added to the
- // scope. Thus, the DYNAMIC mode (which corresponds to dynamically
- // declared variables that were introduced through declaration nodes)
- // must not appear here.
- UNREACHABLE();
- break;
- }
+static PropertyAttributes GetAttributesForMode(VariableMode mode) {
+ DCHECK(IsDeclaredVariableMode(mode));
+ return IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
}
-
-Handle<Object> Context::Lookup(Handle<String> name,
- ContextLookupFlags flags,
- int* index,
- PropertyAttributes* attributes,
- BindingFlags* binding_flags) {
+Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
+ int* index, PropertyAttributes* attributes,
+ InitializationFlag* init_flag,
+ VariableMode* variable_mode) {
Isolate* isolate = GetIsolate();
Handle<Context> context(this, isolate);
@@ -236,7 +192,8 @@ Handle<Object> Context::Lookup(Handle<String> name,
bool failed_whitelist = false;
*index = kNotFound;
*attributes = ABSENT;
- *binding_flags = MISSING_BINDING;
+ *init_flag = kCreatedInitialized;
+ *variable_mode = VAR;
if (FLAG_trace_contexts) {
PrintF("Context::Lookup(");
@@ -275,8 +232,9 @@ Handle<Object> Context::Lookup(Handle<String> name,
r.context_index, reinterpret_cast<void*>(*c));
}
*index = r.slot_index;
- GetAttributesAndBindingFlags(r.mode, r.init_flag, attributes,
- binding_flags);
+ *variable_mode = r.mode;
+ *init_flag = r.init_flag;
+ *attributes = GetAttributesForMode(r.mode);
return ScriptContextTable::GetContext(script_contexts,
r.context_index);
}
@@ -331,12 +289,10 @@ Handle<Object> Context::Lookup(Handle<String> name,
? context->closure()->shared()->scope_info()
: context->scope_info());
VariableMode mode;
- InitializationFlag init_flag;
- // TODO(sigurds) Figure out whether maybe_assigned_flag should
- // be used to compute binding_flags.
+ InitializationFlag flag;
MaybeAssignedFlag maybe_assigned_flag;
- int slot_index = ScopeInfo::ContextSlotIndex(
- scope_info, name, &mode, &init_flag, &maybe_assigned_flag);
+ int slot_index = ScopeInfo::ContextSlotIndex(scope_info, name, &mode,
+ &flag, &maybe_assigned_flag);
DCHECK(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS);
if (slot_index >= 0) {
if (FLAG_trace_contexts) {
@@ -344,8 +300,9 @@ Handle<Object> Context::Lookup(Handle<String> name,
slot_index, mode);
}
*index = slot_index;
- GetAttributesAndBindingFlags(mode, init_flag, attributes,
- binding_flags);
+ *variable_mode = mode;
+ *init_flag = flag;
+ *attributes = GetAttributesForMode(mode);
return context;
}
@@ -362,8 +319,8 @@ Handle<Object> Context::Lookup(Handle<String> name,
*index = function_index;
*attributes = READ_ONLY;
DCHECK(mode == CONST_LEGACY || mode == CONST);
- *binding_flags = (mode == CONST_LEGACY)
- ? IMMUTABLE_IS_INITIALIZED : IMMUTABLE_IS_INITIALIZED_HARMONY;
+ *init_flag = kCreatedInitialized;
+ *variable_mode = mode;
return context;
}
}
@@ -376,7 +333,8 @@ Handle<Object> Context::Lookup(Handle<String> name,
}
*index = Context::THROWN_OBJECT_INDEX;
*attributes = NONE;
- *binding_flags = MUTABLE_IS_INITIALIZED;
+ *init_flag = kCreatedInitialized;
+ *variable_mode = VAR;
return context;
}
} else if (context->IsDebugEvaluateContext()) {
@@ -394,8 +352,9 @@ Handle<Object> Context::Lookup(Handle<String> name,
// Check the original context, but do not follow its context chain.
obj = context->get(WRAPPED_CONTEXT_INDEX);
if (obj->IsContext()) {
- Handle<Object> result = Context::cast(obj)->Lookup(
- name, DONT_FOLLOW_CHAINS, index, attributes, binding_flags);
+ Handle<Object> result =
+ Context::cast(obj)->Lookup(name, DONT_FOLLOW_CHAINS, index,
+ attributes, init_flag, variable_mode);
if (!result.is_null()) return result;
}
// Check whitelist. Names that do not pass whitelist shall only resolve
@@ -449,10 +408,11 @@ void Context::InitializeGlobalSlots() {
void Context::AddOptimizedFunction(JSFunction* function) {
DCHECK(IsNativeContext());
+ Isolate* isolate = GetIsolate();
#ifdef ENABLE_SLOW_DCHECKS
if (FLAG_enable_slow_asserts) {
Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
- while (!element->IsUndefined()) {
+ while (!element->IsUndefined(isolate)) {
CHECK(element != function);
element = JSFunction::cast(element)->next_function_link();
}
@@ -460,25 +420,25 @@ void Context::AddOptimizedFunction(JSFunction* function) {
// Check that the context belongs to the weak native contexts list.
bool found = false;
- Object* context = GetHeap()->native_contexts_list();
- while (!context->IsUndefined()) {
+ Object* context = isolate->heap()->native_contexts_list();
+ while (!context->IsUndefined(isolate)) {
if (context == this) {
found = true;
break;
}
- context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+ context = Context::cast(context)->next_context_link();
}
CHECK(found);
#endif
// If the function link field is already used then the function was
// enqueued as a code flushing candidate and we remove it now.
- if (!function->next_function_link()->IsUndefined()) {
+ if (!function->next_function_link()->IsUndefined(isolate)) {
CodeFlusher* flusher = GetHeap()->mark_compact_collector()->code_flusher();
flusher->EvictCandidate(function);
}
- DCHECK(function->next_function_link()->IsUndefined());
+ DCHECK(function->next_function_link()->IsUndefined(isolate));
function->set_next_function_link(get(OPTIMIZED_FUNCTIONS_LIST),
UPDATE_WEAK_WRITE_BARRIER);
@@ -490,9 +450,10 @@ void Context::RemoveOptimizedFunction(JSFunction* function) {
DCHECK(IsNativeContext());
Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
JSFunction* prev = NULL;
- while (!element->IsUndefined()) {
+ Isolate* isolate = function->GetIsolate();
+ while (!element->IsUndefined(isolate)) {
JSFunction* element_function = JSFunction::cast(element);
- DCHECK(element_function->next_function_link()->IsUndefined() ||
+ DCHECK(element_function->next_function_link()->IsUndefined(isolate) ||
element_function->next_function_link()->IsJSFunction());
if (element_function == function) {
if (prev == NULL) {
@@ -528,7 +489,7 @@ Object* Context::OptimizedFunctionsListHead() {
void Context::AddOptimizedCode(Code* code) {
DCHECK(IsNativeContext());
DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
- DCHECK(code->next_code_link()->IsUndefined());
+ DCHECK(code->next_code_link()->IsUndefined(GetIsolate()));
code->set_next_code_link(get(OPTIMIZED_CODE_LIST));
set(OPTIMIZED_CODE_LIST, code, UPDATE_WEAK_WRITE_BARRIER);
}
@@ -561,7 +522,7 @@ Object* Context::DeoptimizedCodeListHead() {
Handle<Object> Context::ErrorMessageForCodeGenerationFromStrings() {
Isolate* isolate = GetIsolate();
Handle<Object> result(error_message_for_code_gen_from_strings(), isolate);
- if (!result->IsUndefined()) return result;
+ if (!result->IsUndefined(isolate)) return result;
return isolate->factory()->NewStringFromStaticChars(
"Code generation from strings disallowed for this context");
}
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index 90fb9a4278..d73135f7a4 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -25,49 +25,6 @@ enum ContextLookupFlags {
};
-// ES5 10.2 defines lexical environments with mutable and immutable bindings.
-// Immutable bindings have two states, initialized and uninitialized, and
-// their state is changed by the InitializeImmutableBinding method. The
-// BindingFlags enum represents information if a binding has definitely been
-// initialized. A mutable binding does not need to be checked and thus has
-// the BindingFlag MUTABLE_IS_INITIALIZED.
-//
-// There are two possibilities for immutable bindings
-// * 'const' declared variables. They are initialized when evaluating the
-// corresponding declaration statement. They need to be checked for being
-// initialized and thus get the flag IMMUTABLE_CHECK_INITIALIZED.
-// * The function name of a named function literal. The binding is immediately
-// initialized when entering the function and thus does not need to be
-// checked. it gets the BindingFlag IMMUTABLE_IS_INITIALIZED.
-// Accessing an uninitialized binding produces the undefined value.
-//
-// The harmony proposal for block scoped bindings also introduces the
-// uninitialized state for mutable bindings.
-// * A 'let' declared variable. They are initialized when evaluating the
-// corresponding declaration statement. They need to be checked for being
-// initialized and thus get the flag MUTABLE_CHECK_INITIALIZED.
-// * A 'var' declared variable. It is initialized immediately upon creation
-// and thus doesn't need to be checked. It gets the flag
-// MUTABLE_IS_INITIALIZED.
-// * Catch bound variables, function parameters and variables introduced by
-// function declarations are initialized immediately and do not need to be
-// checked. Thus they get the flag MUTABLE_IS_INITIALIZED.
-// Immutable bindings in harmony mode get the _HARMONY flag variants. Accessing
-// an uninitialized binding produces a reference error.
-//
-// In V8 uninitialized bindings are set to the hole value upon creation and set
-// to a different value upon initialization.
-enum BindingFlags {
- MUTABLE_IS_INITIALIZED,
- MUTABLE_CHECK_INITIALIZED,
- IMMUTABLE_IS_INITIALIZED,
- IMMUTABLE_CHECK_INITIALIZED,
- IMMUTABLE_IS_INITIALIZED_HARMONY,
- IMMUTABLE_CHECK_INITIALIZED_HARMONY,
- MISSING_BINDING
-};
-
-
// Heap-allocated activation contexts.
//
// Contexts are implemented as FixedArray objects; the Context
@@ -77,79 +34,69 @@ enum BindingFlags {
// must always be allocated via Heap::AllocateContext() or
// Factory::NewContext.
-#define NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \
- V(IS_ARRAYLIKE, JSFunction, is_arraylike) \
- V(GET_TEMPLATE_CALL_SITE_INDEX, JSFunction, get_template_call_site) \
- V(MAKE_RANGE_ERROR_INDEX, JSFunction, make_range_error) \
- V(MAKE_TYPE_ERROR_INDEX, JSFunction, make_type_error) \
- V(OBJECT_FREEZE, JSFunction, object_freeze) \
- V(OBJECT_IS_EXTENSIBLE, JSFunction, object_is_extensible) \
- V(OBJECT_IS_FROZEN, JSFunction, object_is_frozen) \
- V(OBJECT_IS_SEALED, JSFunction, object_is_sealed) \
- V(OBJECT_KEYS, JSFunction, object_keys) \
- V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply) \
- V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct) \
- V(REFLECT_DEFINE_PROPERTY_INDEX, JSFunction, reflect_define_property) \
- V(REFLECT_DELETE_PROPERTY_INDEX, JSFunction, reflect_delete_property) \
- V(SPREAD_ARGUMENTS_INDEX, JSFunction, spread_arguments) \
- V(SPREAD_ITERABLE_INDEX, JSFunction, spread_iterable) \
- V(ORDINARY_HAS_INSTANCE_INDEX, JSFunction, ordinary_has_instance) \
- V(MATH_FLOOR, JSFunction, math_floor) \
- V(MATH_SQRT, JSFunction, math_sqrt)
-
-#define NATIVE_CONTEXT_IMPORTED_FIELDS(V) \
- V(ARRAY_CONCAT_INDEX, JSFunction, array_concat) \
- V(ARRAY_POP_INDEX, JSFunction, array_pop) \
- V(ARRAY_PUSH_INDEX, JSFunction, array_push) \
- V(ARRAY_SHIFT_INDEX, JSFunction, array_shift) \
- V(ARRAY_SPLICE_INDEX, JSFunction, array_splice) \
- V(ARRAY_SLICE_INDEX, JSFunction, array_slice) \
- V(ARRAY_UNSHIFT_INDEX, JSFunction, array_unshift) \
- V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator) \
- V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
- V(ERROR_FUNCTION_INDEX, JSFunction, error_function) \
- V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function) \
- V(GET_STACK_TRACE_LINE_INDEX, JSFunction, get_stack_trace_line_fun) \
- V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
- V(JSON_SERIALIZE_ADAPTER_INDEX, JSFunction, json_serialize_adapter) \
- V(MAKE_ERROR_FUNCTION_INDEX, JSFunction, make_error_function) \
- V(MAP_DELETE_METHOD_INDEX, JSFunction, map_delete) \
- V(MAP_GET_METHOD_INDEX, JSFunction, map_get) \
- V(MAP_HAS_METHOD_INDEX, JSFunction, map_has) \
- V(MAP_SET_METHOD_INDEX, JSFunction, map_set) \
- V(MATH_POW_METHOD_INDEX, JSFunction, math_pow) \
- V(MESSAGE_GET_COLUMN_NUMBER_INDEX, JSFunction, message_get_column_number) \
- V(MESSAGE_GET_LINE_NUMBER_INDEX, JSFunction, message_get_line_number) \
- V(MESSAGE_GET_SOURCE_LINE_INDEX, JSFunction, message_get_source_line) \
- V(NATIVE_OBJECT_GET_NOTIFIER_INDEX, JSFunction, native_object_get_notifier) \
- V(NATIVE_OBJECT_NOTIFIER_PERFORM_CHANGE, JSFunction, \
- native_object_notifier_perform_change) \
- V(NATIVE_OBJECT_OBSERVE_INDEX, JSFunction, native_object_observe) \
- V(NO_SIDE_EFFECTS_TO_STRING_FUN_INDEX, JSFunction, \
- no_side_effects_to_string_fun) \
- V(OBJECT_VALUE_OF, JSFunction, object_value_of) \
- V(OBJECT_TO_STRING, JSFunction, object_to_string) \
- V(OBSERVERS_BEGIN_SPLICE_INDEX, JSFunction, observers_begin_perform_splice) \
- V(OBSERVERS_END_SPLICE_INDEX, JSFunction, observers_end_perform_splice) \
- V(OBSERVERS_ENQUEUE_SPLICE_INDEX, JSFunction, observers_enqueue_splice) \
- V(OBSERVERS_NOTIFY_CHANGE_INDEX, JSFunction, observers_notify_change) \
- V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \
- V(PROMISE_CHAIN_INDEX, JSFunction, promise_chain) \
- V(PROMISE_CREATE_INDEX, JSFunction, promise_create) \
- V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function) \
- V(PROMISE_HAS_USER_DEFINED_REJECT_HANDLER_INDEX, JSFunction, \
- promise_has_user_defined_reject_handler) \
- V(PROMISE_REJECT_INDEX, JSFunction, promise_reject) \
- V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve) \
- V(PROMISE_THEN_INDEX, JSFunction, promise_then) \
- V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function) \
- V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function) \
- V(SET_ADD_METHOD_INDEX, JSFunction, set_add) \
- V(SET_DELETE_METHOD_INDEX, JSFunction, set_delete) \
- V(SET_HAS_METHOD_INDEX, JSFunction, set_has) \
- V(STACK_OVERFLOW_BOILERPLATE_INDEX, JSObject, stack_overflow_boilerplate) \
- V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function) \
- V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function) \
+#define NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \
+ V(IS_ARRAYLIKE, JSFunction, is_arraylike) \
+ V(GET_TEMPLATE_CALL_SITE_INDEX, JSFunction, get_template_call_site) \
+ V(MAKE_ERROR_INDEX, JSFunction, make_error) \
+ V(MAKE_RANGE_ERROR_INDEX, JSFunction, make_range_error) \
+ V(MAKE_SYNTAX_ERROR_INDEX, JSFunction, make_syntax_error) \
+ V(MAKE_TYPE_ERROR_INDEX, JSFunction, make_type_error) \
+ V(MAKE_URI_ERROR_INDEX, JSFunction, make_uri_error) \
+ V(OBJECT_DEFINE_PROPERTIES, JSFunction, object_define_properties) \
+ V(OBJECT_DEFINE_PROPERTY, JSFunction, object_define_property) \
+ V(OBJECT_FREEZE, JSFunction, object_freeze) \
+ V(OBJECT_GET_PROTOTYPE_OF, JSFunction, object_get_prototype_of) \
+ V(OBJECT_IS_EXTENSIBLE, JSFunction, object_is_extensible) \
+ V(OBJECT_IS_FROZEN, JSFunction, object_is_frozen) \
+ V(OBJECT_IS_SEALED, JSFunction, object_is_sealed) \
+ V(OBJECT_KEYS, JSFunction, object_keys) \
+ V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply) \
+ V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct) \
+ V(REFLECT_DEFINE_PROPERTY_INDEX, JSFunction, reflect_define_property) \
+ V(REFLECT_DELETE_PROPERTY_INDEX, JSFunction, reflect_delete_property) \
+ V(SPREAD_ARGUMENTS_INDEX, JSFunction, spread_arguments) \
+ V(SPREAD_ITERABLE_INDEX, JSFunction, spread_iterable) \
+ V(MATH_FLOOR_INDEX, JSFunction, math_floor) \
+ V(MATH_POW_INDEX, JSFunction, math_pow)
+
+#define NATIVE_CONTEXT_IMPORTED_FIELDS(V) \
+ V(ARRAY_CONCAT_INDEX, JSFunction, array_concat) \
+ V(ARRAY_POP_INDEX, JSFunction, array_pop) \
+ V(ARRAY_PUSH_INDEX, JSFunction, array_push) \
+ V(ARRAY_SHIFT_INDEX, JSFunction, array_shift) \
+ V(ARRAY_SPLICE_INDEX, JSFunction, array_splice) \
+ V(ARRAY_SLICE_INDEX, JSFunction, array_slice) \
+ V(ARRAY_UNSHIFT_INDEX, JSFunction, array_unshift) \
+ V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator) \
+ V(ASYNC_FUNCTION_AWAIT_INDEX, JSFunction, async_function_await) \
+ V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
+ V(ERROR_FUNCTION_INDEX, JSFunction, error_function) \
+ V(ERROR_TO_STRING, JSFunction, error_to_string) \
+ V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function) \
+ V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
+ V(MAP_DELETE_METHOD_INDEX, JSFunction, map_delete) \
+ V(MAP_GET_METHOD_INDEX, JSFunction, map_get) \
+ V(MAP_HAS_METHOD_INDEX, JSFunction, map_has) \
+ V(MAP_SET_METHOD_INDEX, JSFunction, map_set) \
+ V(OBJECT_VALUE_OF, JSFunction, object_value_of) \
+ V(OBJECT_TO_STRING, JSFunction, object_to_string) \
+ V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \
+ V(PROMISE_CREATE_INDEX, JSFunction, promise_create) \
+ V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function) \
+ V(PROMISE_HAS_USER_DEFINED_REJECT_HANDLER_INDEX, JSFunction, \
+ promise_has_user_defined_reject_handler) \
+ V(PROMISE_REJECT_INDEX, JSFunction, promise_reject) \
+ V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve) \
+ V(PROMISE_CREATE_RESOLVED_INDEX, JSFunction, promise_create_resolved) \
+ V(PROMISE_CREATE_REJECTED_INDEX, JSFunction, promise_create_rejected) \
+ V(PROMISE_THEN_INDEX, JSFunction, promise_then) \
+ V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function) \
+ V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function) \
+ V(SET_ADD_METHOD_INDEX, JSFunction, set_add) \
+ V(SET_DELETE_METHOD_INDEX, JSFunction, set_delete) \
+ V(SET_HAS_METHOD_INDEX, JSFunction, set_has) \
+ V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function) \
+ V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function) \
V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function)
#define NATIVE_CONTEXT_FIELDS(V) \
@@ -162,6 +109,7 @@ enum BindingFlags {
V(ARRAY_BUFFER_FUN_INDEX, JSFunction, array_buffer_fun) \
V(ARRAY_BUFFER_MAP_INDEX, Map, array_buffer_map) \
V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
+ V(ASYNC_FUNCTION_FUNCTION_INDEX, JSFunction, async_function_constructor) \
V(BOOL16X8_FUNCTION_INDEX, JSFunction, bool16x8_function) \
V(BOOL32X4_FUNCTION_INDEX, JSFunction, bool32x4_function) \
V(BOOL8X16_FUNCTION_INDEX, JSFunction, bool8x16_function) \
@@ -173,6 +121,7 @@ enum BindingFlags {
V(CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, JSFunction, \
call_as_constructor_delegate) \
V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \
+ V(CALLSITE_FUNCTION_INDEX, JSFunction, callsite_function) \
V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
V(DATA_PROPERTY_DESCRIPTOR_MAP_INDEX, Map, data_property_descriptor_map) \
V(DATA_VIEW_FUN_INDEX, JSFunction, data_view_fun) \
@@ -186,13 +135,16 @@ enum BindingFlags {
V(FLOAT32_ARRAY_FUN_INDEX, JSFunction, float32_array_fun) \
V(FLOAT32X4_FUNCTION_INDEX, JSFunction, float32x4_function) \
V(FLOAT64_ARRAY_FUN_INDEX, JSFunction, float64_array_fun) \
- V(TEMPLATE_INSTANTIATIONS_CACHE_INDEX, UnseededNumberDictionary, \
- template_instantiations_cache) \
+ V(FAST_TEMPLATE_INSTANTIATIONS_CACHE_INDEX, FixedArray, \
+ fast_template_instantiations_cache) \
+ V(SLOW_TEMPLATE_INSTANTIATIONS_CACHE_INDEX, UnseededNumberDictionary, \
+ slow_template_instantiations_cache) \
V(FUNCTION_FUNCTION_INDEX, JSFunction, function_function) \
V(GENERATOR_FUNCTION_FUNCTION_INDEX, JSFunction, \
generator_function_function) \
V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, generator_object_prototype_map) \
V(INITIAL_ARRAY_PROTOTYPE_INDEX, JSObject, initial_array_prototype) \
+ V(INITIAL_GENERATOR_PROTOTYPE_INDEX, JSObject, initial_generator_prototype) \
V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \
V(INT16_ARRAY_FUN_INDEX, JSFunction, int16_array_fun) \
V(INT16X8_FUNCTION_INDEX, JSFunction, int16x8_function) \
@@ -222,11 +174,12 @@ enum BindingFlags {
V(MAP_CACHE_INDEX, Object, map_cache) \
V(MAP_ITERATOR_MAP_INDEX, Map, map_iterator_map) \
V(STRING_ITERATOR_MAP_INDEX, Map, string_iterator_map) \
- V(MESSAGE_LISTENERS_INDEX, JSObject, message_listeners) \
+ V(MESSAGE_LISTENERS_INDEX, TemplateList, message_listeners) \
V(NATIVES_UTILS_OBJECT_INDEX, Object, natives_utils_object) \
V(NORMALIZED_MAP_CACHE_INDEX, Object, normalized_map_cache) \
V(NUMBER_FUNCTION_INDEX, JSFunction, number_function) \
V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
+ V(OBJECT_WITH_NULL_PROTOTYPE_MAP, Map, object_with_null_prototype_map) \
V(OBJECT_FUNCTION_PROTOTYPE_MAP_INDEX, Map, object_function_prototype_map) \
V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function) \
V(PROXY_CALLABLE_MAP_INDEX, Map, proxy_callable_map) \
@@ -249,8 +202,14 @@ enum BindingFlags {
V(SLOPPY_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX, Map, \
sloppy_function_with_readonly_prototype_map) \
V(WASM_FUNCTION_MAP_INDEX, Map, wasm_function_map) \
+ V(WASM_MODULE_CONSTRUCTOR_INDEX, JSFunction, wasm_module_constructor) \
+ V(WASM_INSTANCE_CONSTRUCTOR_INDEX, JSFunction, wasm_instance_constructor) \
+ V(WASM_MODULE_SYM_INDEX, Symbol, wasm_module_sym) \
+ V(WASM_INSTANCE_SYM_INDEX, Symbol, wasm_instance_sym) \
+ V(SLOPPY_ASYNC_FUNCTION_MAP_INDEX, Map, sloppy_async_function_map) \
V(SLOPPY_GENERATOR_FUNCTION_MAP_INDEX, Map, sloppy_generator_function_map) \
V(SLOW_ALIASED_ARGUMENTS_MAP_INDEX, Map, slow_aliased_arguments_map) \
+ V(STRICT_ASYNC_FUNCTION_MAP_INDEX, Map, strict_async_function_map) \
V(STRICT_ARGUMENTS_MAP_INDEX, Map, strict_arguments_map) \
V(STRICT_FUNCTION_MAP_INDEX, Map, strict_function_map) \
V(STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
@@ -259,6 +218,8 @@ enum BindingFlags {
V(STRING_FUNCTION_INDEX, JSFunction, string_function) \
V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map) \
V(SYMBOL_FUNCTION_INDEX, JSFunction, symbol_function) \
+ V(TYPED_ARRAY_FUN_INDEX, JSFunction, typed_array_function) \
+ V(TYPED_ARRAY_PROTOTYPE_INDEX, JSObject, typed_array_prototype) \
V(UINT16_ARRAY_FUN_INDEX, JSFunction, uint16_array_fun) \
V(UINT16X8_FUNCTION_INDEX, JSFunction, uint16x8_function) \
V(UINT32_ARRAY_FUN_INDEX, JSFunction, uint32_array_fun) \
@@ -335,9 +296,7 @@ class ScriptContextTable : public FixedArray {
// statically allocated context slots. The names are needed
// for dynamic lookups in the presence of 'with' or 'eval'.
//
-// [ previous ] A pointer to the previous context. It is NULL for
-// function contexts, and non-NULL for 'with' contexts.
-// Used to implement the 'with' statement.
+// [ previous ] A pointer to the previous context.
//
// [ extension ] A pointer to an extension JSObject, or "the hole". Used to
// implement 'with' statements and dynamic declarations
@@ -351,11 +310,8 @@ class ScriptContextTable : public FixedArray {
// block scopes, it may also be a struct being a
// SloppyBlockWithEvalContextExtension, pairing the ScopeInfo
// with an extension object.
-// For module contexts, points back to the respective JSModule.
//
-// [ global_object ] A pointer to the global object. Provided for quick
-// access to the global object from inside the code (since
-// we always have a context pointer).
+// [ native_context ] A pointer to the native context.
//
// In addition, function contexts may have statically allocated context slots
// to store local variables/functions that are accessed from inner functions
@@ -421,6 +377,8 @@ class Context: public FixedArray {
inline Context* previous();
inline void set_previous(Context* context);
+ inline Object* next_context_link();
+
inline bool has_extension();
inline HeapObject* extension();
inline void set_extension(HeapObject* object);
@@ -429,9 +387,6 @@ class Context: public FixedArray {
ScopeInfo* scope_info();
String* catch_name();
- inline JSModule* module();
- inline void set_module(JSModule* module);
-
// Get the context where var declarations will be hoisted to, which
// may be the context itself.
Context* declaration_context();
@@ -514,11 +469,10 @@ class Context: public FixedArray {
// 3) result.is_null():
// There was no binding found, *index is always -1 and *attributes is
// always ABSENT.
- Handle<Object> Lookup(Handle<String> name,
- ContextLookupFlags flags,
- int* index,
- PropertyAttributes* attributes,
- BindingFlags* binding_flags);
+ Handle<Object> Lookup(Handle<String> name, ContextLookupFlags flags,
+ int* index, PropertyAttributes* attributes,
+ InitializationFlag* init_flag,
+ VariableMode* variable_mode);
// Code generation support.
static int SlotOffset(int index) {
@@ -526,11 +480,17 @@ class Context: public FixedArray {
}
static int FunctionMapIndex(LanguageMode language_mode, FunctionKind kind) {
+ // Note: Must be kept in sync with FastNewClosureStub::Generate.
if (IsGeneratorFunction(kind)) {
return is_strict(language_mode) ? STRICT_GENERATOR_FUNCTION_MAP_INDEX
: SLOPPY_GENERATOR_FUNCTION_MAP_INDEX;
}
+ if (IsAsyncFunction(kind)) {
+ return is_strict(language_mode) ? STRICT_ASYNC_FUNCTION_MAP_INDEX
+ : SLOPPY_ASYNC_FUNCTION_MAP_INDEX;
+ }
+
if (IsClassConstructor(kind)) {
// Use strict function map (no own "caller" / "arguments")
return STRICT_FUNCTION_MAP_INDEX;
diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h
index 730e6477cb..427a67d109 100644
--- a/deps/v8/src/conversions-inl.h
+++ b/deps/v8/src/conversions-inl.h
@@ -97,6 +97,13 @@ int32_t DoubleToInt32(double x) {
}
}
+bool DoubleToSmiInteger(double value, int* smi_int_value) {
+ if (IsMinusZero(value)) return false;
+ int i = FastD2IChecked(value);
+ if (value != i || !Smi::IsValid(i)) return false;
+ *smi_int_value = i;
+ return true;
+}
bool IsSmiDouble(double value) {
return !IsMinusZero(value) && value >= Smi::kMinValue &&
@@ -132,8 +139,9 @@ int64_t NumberToInt64(Object* number) {
return static_cast<int64_t>(number->Number());
}
-bool TryNumberToSize(Isolate* isolate, Object* number, size_t* result) {
- SealHandleScope shs(isolate);
+bool TryNumberToSize(Object* number, size_t* result) {
+ // Do not create handles in this function! Don't use SealHandleScope because
+ // the function can be used concurrently.
if (number->IsSmi()) {
int value = Smi::cast(number)->value();
DCHECK(static_cast<unsigned>(Smi::kMaxValue) <=
@@ -155,10 +163,9 @@ bool TryNumberToSize(Isolate* isolate, Object* number, size_t* result) {
}
}
-
-size_t NumberToSize(Isolate* isolate, Object* number) {
+size_t NumberToSize(Object* number) {
size_t result = 0;
- bool is_valid = TryNumberToSize(isolate, number, &result);
+ bool is_valid = TryNumberToSize(number, &result);
CHECK(is_valid);
return result;
}
diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h
index 29262e5306..2dd91d9319 100644
--- a/deps/v8/src/conversions.h
+++ b/deps/v8/src/conversions.h
@@ -147,28 +147,26 @@ char* DoubleToExponentialCString(double value, int f);
char* DoubleToPrecisionCString(double value, int f);
char* DoubleToRadixCString(double value, int radix);
-
static inline bool IsMinusZero(double value) {
- static const DoubleRepresentation minus_zero(-0.0);
- return DoubleRepresentation(value) == minus_zero;
+ return bit_cast<int64_t>(value) == bit_cast<int64_t>(-0.0);
}
+// Returns true if value can be converted to a SMI, and returns the resulting
+// integer value of the SMI in |smi_int_value|.
+inline bool DoubleToSmiInteger(double value, int* smi_int_value);
inline bool IsSmiDouble(double value);
-
// Integer32 is an integer that can be represented as a signed 32-bit
// integer. It has to be in the range [-2^31, 2^31 - 1].
// We also have to check for negative 0 as it is not an Integer32.
inline bool IsInt32Double(double value);
-
// UInteger32 is an integer that can be represented as an unsigned 32-bit
// integer. It has to be in the range [0, 2^32 - 1].
// We also have to check for negative 0 as it is not a UInteger32.
inline bool IsUint32Double(double value);
-
// Convert from Number object to C integer.
inline int32_t NumberToInt32(Object* number);
inline uint32_t NumberToUint32(Object* number);
@@ -177,13 +175,10 @@ inline int64_t NumberToInt64(Object* number);
double StringToDouble(UnicodeCache* unicode_cache, Handle<String> string,
int flags, double empty_string_val = 0.0);
-
-inline bool TryNumberToSize(Isolate* isolate, Object* number, size_t* result);
-
+inline bool TryNumberToSize(Object* number, size_t* result);
// Converts a number into size_t.
-inline size_t NumberToSize(Isolate* isolate, Object* number);
-
+inline size_t NumberToSize(Object* number);
// returns DoubleToString(StringToDouble(string)) == string
bool IsSpecialIndex(UnicodeCache* unicode_cache, String* string);
diff --git a/deps/v8/src/counters-inl.h b/deps/v8/src/counters-inl.h
new file mode 100644
index 0000000000..c8c06d2950
--- /dev/null
+++ b/deps/v8/src/counters-inl.h
@@ -0,0 +1,24 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COUNTERS_INL_H_
+#define V8_COUNTERS_INL_H_
+
+#include "src/counters.h"
+
+namespace v8 {
+namespace internal {
+
+RuntimeCallTimerScope::RuntimeCallTimerScope(
+ HeapObject* heap_object, RuntimeCallStats::CounterId counter_id) {
+ if (V8_UNLIKELY(FLAG_runtime_call_stats)) {
+ isolate_ = heap_object->GetIsolate();
+ RuntimeCallStats::Enter(isolate_, &timer_, counter_id);
+ }
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COUNTERS_INL_H_
diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc
index 4f5c251a0c..8a5908c9af 100644
--- a/deps/v8/src/counters.cc
+++ b/deps/v8/src/counters.cc
@@ -9,6 +9,7 @@
#include "src/base/platform/platform.h"
#include "src/isolate.h"
#include "src/log-inl.h"
+#include "src/log.h"
namespace v8 {
namespace internal {
@@ -200,18 +201,21 @@ class RuntimeCallStatEntries {
void Print(std::ostream& os) {
if (total_call_count == 0) return;
std::sort(entries.rbegin(), entries.rend());
- os << std::setw(50) << "Runtime Function/C++ Builtin" << std::setw(10)
+ os << std::setw(50) << "Runtime Function/C++ Builtin" << std::setw(12)
<< "Time" << std::setw(18) << "Count" << std::endl
- << std::string(86, '=') << std::endl;
+ << std::string(88, '=') << std::endl;
for (Entry& entry : entries) {
entry.SetTotal(total_time, total_call_count);
entry.Print(os);
}
- os << std::string(86, '-') << std::endl;
+ os << std::string(88, '-') << std::endl;
Entry("Total", total_time, total_call_count).Print(os);
}
- void Add(RuntimeCallCounter* counter) {
+ // By default, the compiler will usually inline this, which results in a large
+ // binary size increase: std::vector::push_back expands to a large amount of
+ // instructions, and this function is invoked repeatedly by macros.
+ V8_NOINLINE void Add(RuntimeCallCounter* counter) {
if (counter->count == 0) return;
entries.push_back(Entry(counter->name, counter->time, counter->count));
total_time += counter->time;
@@ -223,7 +227,7 @@ class RuntimeCallStatEntries {
public:
Entry(const char* name, base::TimeDelta time, uint64_t count)
: name_(name),
- time_(time.InMilliseconds()),
+ time_(time.InMicroseconds()),
count_(count),
time_percent_(100),
count_percent_(100) {}
@@ -236,9 +240,9 @@ class RuntimeCallStatEntries {
void Print(std::ostream& os) {
os.precision(2);
- os << std::fixed;
+ os << std::fixed << std::setprecision(2);
os << std::setw(50) << name_;
- os << std::setw(8) << time_ << "ms ";
+ os << std::setw(10) << static_cast<double>(time_) / 1000 << "ms ";
os << std::setw(6) << time_percent_ << "%";
os << std::setw(10) << count_ << " ";
os << std::setw(6) << count_percent_ << "%";
@@ -246,10 +250,10 @@ class RuntimeCallStatEntries {
}
void SetTotal(base::TimeDelta total_time, uint64_t total_count) {
- if (total_time.InMilliseconds() == 0) {
+ if (total_time.InMicroseconds() == 0) {
time_percent_ = 0;
} else {
- time_percent_ = 100.0 * time_ / total_time.InMilliseconds();
+ time_percent_ = 100.0 * time_ / total_time.InMicroseconds();
}
count_percent_ = 100.0 * count_ / total_count;
}
@@ -272,68 +276,92 @@ void RuntimeCallCounter::Reset() {
time = base::TimeDelta();
}
-void RuntimeCallStats::Enter(RuntimeCallCounter* counter) {
- RuntimeCallTimer* timer = new RuntimeCallTimer();
- timer->Initialize(counter, current_timer_);
- Enter(timer);
+void RuntimeCallCounter::Dump(std::stringstream& out) {
+ out << "\"" << name << "\":[" << count << "," << time.InMicroseconds()
+ << "],";
}
-void RuntimeCallStats::Enter(RuntimeCallTimer* timer_) {
- current_timer_ = timer_;
- current_timer_->Start();
+// static
+void RuntimeCallStats::Enter(Isolate* isolate, RuntimeCallTimer* timer,
+ CounterId counter_id) {
+ RuntimeCallStats* stats = isolate->counters()->runtime_call_stats();
+ RuntimeCallCounter* counter = &(stats->*counter_id);
+ timer->Start(counter, stats->current_timer_);
+ stats->current_timer_ = timer;
}
-void RuntimeCallStats::Leave() {
- RuntimeCallTimer* timer = current_timer_;
- Leave(timer);
- delete timer;
+// static
+void RuntimeCallStats::Leave(Isolate* isolate, RuntimeCallTimer* timer) {
+ RuntimeCallStats* stats = isolate->counters()->runtime_call_stats();
+
+ if (stats->current_timer_ == timer) {
+ stats->current_timer_ = timer->Stop();
+ } else {
+ // Must be a Threading cctest. Walk the chain of Timers to find the
+ // buried one that's leaving. We don't care about keeping nested timings
+ // accurate, just avoid crashing by keeping the chain intact.
+ RuntimeCallTimer* next = stats->current_timer_;
+ while (next->parent_ != timer) next = next->parent_;
+ next->parent_ = timer->Stop();
+ }
}
-void RuntimeCallStats::Leave(RuntimeCallTimer* timer) {
- current_timer_ = timer->Stop();
+// static
+void RuntimeCallStats::CorrectCurrentCounterId(Isolate* isolate,
+ CounterId counter_id) {
+ RuntimeCallStats* stats = isolate->counters()->runtime_call_stats();
+ DCHECK_NOT_NULL(stats->current_timer_);
+ RuntimeCallCounter* counter = &(stats->*counter_id);
+ stats->current_timer_->counter_ = counter;
}
void RuntimeCallStats::Print(std::ostream& os) {
RuntimeCallStatEntries entries;
+#define PRINT_COUNTER(name) entries.Add(&this->name);
+ FOR_EACH_MANUAL_COUNTER(PRINT_COUNTER)
+#undef PRINT_COUNTER
+
#define PRINT_COUNTER(name, nargs, ressize) entries.Add(&this->Runtime_##name);
FOR_EACH_INTRINSIC(PRINT_COUNTER)
#undef PRINT_COUNTER
-#define PRINT_COUNTER(name, type) entries.Add(&this->Builtin_##name);
+#define PRINT_COUNTER(name) entries.Add(&this->Builtin_##name);
BUILTIN_LIST_C(PRINT_COUNTER)
#undef PRINT_COUNTER
- entries.Add(&this->ExternalCallback);
- entries.Add(&this->GC);
- entries.Add(&this->UnexpectedStubMiss);
+#define PRINT_COUNTER(name) entries.Add(&this->API_##name);
+ FOR_EACH_API_COUNTER(PRINT_COUNTER)
+#undef PRINT_COUNTER
+
+#define PRINT_COUNTER(name) entries.Add(&this->Handler_##name);
+ FOR_EACH_HANDLER_COUNTER(PRINT_COUNTER)
+#undef PRINT_COUNTER
entries.Print(os);
}
void RuntimeCallStats::Reset() {
if (!FLAG_runtime_call_stats) return;
-#define RESET_COUNTER(name, nargs, ressize) this->Runtime_##name.Reset();
+#define RESET_COUNTER(name) this->name.Reset();
+ FOR_EACH_MANUAL_COUNTER(RESET_COUNTER)
+#undef RESET_COUNTER
+
+#define RESET_COUNTER(name, nargs, result_size) this->Runtime_##name.Reset();
FOR_EACH_INTRINSIC(RESET_COUNTER)
#undef RESET_COUNTER
-#define RESET_COUNTER(name, type) this->Builtin_##name.Reset();
+
+#define RESET_COUNTER(name) this->Builtin_##name.Reset();
BUILTIN_LIST_C(RESET_COUNTER)
#undef RESET_COUNTER
- this->ExternalCallback.Reset();
- this->GC.Reset();
- this->UnexpectedStubMiss.Reset();
-}
-void RuntimeCallTimerScope::Enter(Isolate* isolate,
- RuntimeCallCounter* counter) {
- isolate_ = isolate;
- RuntimeCallStats* stats = isolate->counters()->runtime_call_stats();
- timer_.Initialize(counter, stats->current_timer());
- stats->Enter(&timer_);
-}
+#define RESET_COUNTER(name) this->API_##name.Reset();
+ FOR_EACH_API_COUNTER(RESET_COUNTER)
+#undef RESET_COUNTER
-void RuntimeCallTimerScope::Leave() {
- isolate_->counters()->runtime_call_stats()->Leave(&timer_);
+#define RESET_COUNTER(name) this->Handler_##name.Reset();
+ FOR_EACH_HANDLER_COUNTER(RESET_COUNTER)
+#undef RESET_COUNTER
}
} // namespace internal
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index 7183d0e52e..59627f13f6 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -9,7 +9,7 @@
#include "src/allocation.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/base/platform/time.h"
-#include "src/builtins.h"
+#include "src/builtins/builtins.h"
#include "src/globals.h"
#include "src/objects.h"
#include "src/runtime/runtime.h"
@@ -482,6 +482,7 @@ double AggregatedMemoryHistogram<Histogram>::Aggregate(double current_ms,
struct RuntimeCallCounter {
explicit RuntimeCallCounter(const char* name) : name(name) {}
void Reset();
+ V8_NOINLINE void Dump(std::stringstream& out);
const char* name;
int64_t count = 0;
@@ -492,88 +493,334 @@ struct RuntimeCallCounter {
// timers used for properly measuring the own time of a RuntimeCallCounter.
class RuntimeCallTimer {
public:
- inline void Initialize(RuntimeCallCounter* counter,
- RuntimeCallTimer* parent) {
+ RuntimeCallTimer() {}
+ RuntimeCallCounter* counter() { return counter_; }
+ base::ElapsedTimer timer() { return timer_; }
+
+ private:
+ friend class RuntimeCallStats;
+
+ inline void Start(RuntimeCallCounter* counter, RuntimeCallTimer* parent) {
counter_ = counter;
parent_ = parent;
- }
-
- inline void Start() {
timer_.Start();
- counter_->count++;
}
inline RuntimeCallTimer* Stop() {
base::TimeDelta delta = timer_.Elapsed();
+ timer_.Stop();
+ counter_->count++;
counter_->time += delta;
if (parent_ != NULL) {
- parent_->AdjustForSubTimer(delta);
+ // Adjust parent timer so that it does not include sub timer's time.
+ parent_->counter_->time -= delta;
}
return parent_;
}
- inline void AdjustForSubTimer(base::TimeDelta delta) {
- counter_->time -= delta;
- }
-
- private:
- RuntimeCallCounter* counter_;
- RuntimeCallTimer* parent_;
+ RuntimeCallCounter* counter_ = nullptr;
+ RuntimeCallTimer* parent_ = nullptr;
base::ElapsedTimer timer_;
};
-struct RuntimeCallStats {
- // Dummy counter for the unexpected stub miss.
- RuntimeCallCounter UnexpectedStubMiss =
- RuntimeCallCounter("UnexpectedStubMiss");
- // Counter for runtime callbacks into JavaScript.
- RuntimeCallCounter ExternalCallback = RuntimeCallCounter("ExternalCallback");
- RuntimeCallCounter GC = RuntimeCallCounter("GC");
+#define FOR_EACH_API_COUNTER(V) \
+ V(ArrayBuffer_Cast) \
+ V(ArrayBuffer_Neuter) \
+ V(ArrayBuffer_New) \
+ V(Array_CloneElementAt) \
+ V(Array_New) \
+ V(BooleanObject_BooleanValue) \
+ V(BooleanObject_New) \
+ V(Context_New) \
+ V(Context_NewRemoteContext) \
+ V(DataView_New) \
+ V(Date_DateTimeConfigurationChangeNotification) \
+ V(Date_New) \
+ V(Date_NumberValue) \
+ V(Debug_Call) \
+ V(Debug_GetMirror) \
+ V(Error_New) \
+ V(External_New) \
+ V(Float32Array_New) \
+ V(Float64Array_New) \
+ V(Function_Call) \
+ V(Function_New) \
+ V(Function_NewInstance) \
+ V(FunctionTemplate_GetFunction) \
+ V(FunctionTemplate_New) \
+ V(FunctionTemplate_NewRemoteInstance) \
+ V(FunctionTemplate_NewWithFastHandler) \
+ V(Int16Array_New) \
+ V(Int32Array_New) \
+ V(Int8Array_New) \
+ V(JSON_Parse) \
+ V(JSON_Stringify) \
+ V(Map_AsArray) \
+ V(Map_Clear) \
+ V(Map_Delete) \
+ V(Map_Get) \
+ V(Map_Has) \
+ V(Map_New) \
+ V(Map_Set) \
+ V(Message_GetEndColumn) \
+ V(Message_GetLineNumber) \
+ V(Message_GetSourceLine) \
+ V(Message_GetStartColumn) \
+ V(NumberObject_New) \
+ V(NumberObject_NumberValue) \
+ V(Object_CallAsConstructor) \
+ V(Object_CallAsFunction) \
+ V(Object_CreateDataProperty) \
+ V(Object_DefineOwnProperty) \
+ V(Object_Delete) \
+ V(Object_DeleteProperty) \
+ V(Object_ForceSet) \
+ V(Object_Get) \
+ V(Object_GetOwnPropertyDescriptor) \
+ V(Object_GetOwnPropertyNames) \
+ V(Object_GetPropertyAttributes) \
+ V(Object_GetPropertyNames) \
+ V(Object_GetRealNamedProperty) \
+ V(Object_GetRealNamedPropertyAttributes) \
+ V(Object_GetRealNamedPropertyAttributesInPrototypeChain) \
+ V(Object_GetRealNamedPropertyInPrototypeChain) \
+ V(Object_HasOwnProperty) \
+ V(Object_HasRealIndexedProperty) \
+ V(Object_HasRealNamedCallbackProperty) \
+ V(Object_HasRealNamedProperty) \
+ V(Object_Int32Value) \
+ V(Object_IntegerValue) \
+ V(Object_New) \
+ V(Object_NumberValue) \
+ V(Object_ObjectProtoToString) \
+ V(Object_Set) \
+ V(Object_SetAccessor) \
+ V(Object_SetIntegrityLevel) \
+ V(Object_SetPrivate) \
+ V(Object_SetPrototype) \
+ V(ObjectTemplate_New) \
+ V(ObjectTemplate_NewInstance) \
+ V(Object_ToArrayIndex) \
+ V(Object_ToDetailString) \
+ V(Object_ToInt32) \
+ V(Object_ToInteger) \
+ V(Object_ToNumber) \
+ V(Object_ToObject) \
+ V(Object_ToString) \
+ V(Object_ToUint32) \
+ V(Object_Uint32Value) \
+ V(Persistent_New) \
+ V(Private_New) \
+ V(Promise_Catch) \
+ V(Promise_Chain) \
+ V(Promise_HasRejectHandler) \
+ V(Promise_Resolver_New) \
+ V(Promise_Resolver_Resolve) \
+ V(Promise_Then) \
+ V(Proxy_New) \
+ V(RangeError_New) \
+ V(ReferenceError_New) \
+ V(RegExp_New) \
+ V(ScriptCompiler_Compile) \
+ V(ScriptCompiler_CompileFunctionInContext) \
+ V(ScriptCompiler_CompileUnbound) \
+ V(Script_Run) \
+ V(Set_Add) \
+ V(Set_AsArray) \
+ V(Set_Clear) \
+ V(Set_Delete) \
+ V(Set_Has) \
+ V(Set_New) \
+ V(SharedArrayBuffer_New) \
+ V(String_Concat) \
+ V(String_NewExternalOneByte) \
+ V(String_NewExternalTwoByte) \
+ V(String_NewFromOneByte) \
+ V(String_NewFromTwoByte) \
+ V(String_NewFromUtf8) \
+ V(StringObject_New) \
+ V(StringObject_StringValue) \
+ V(String_Write) \
+ V(String_WriteUtf8) \
+ V(Symbol_New) \
+ V(SymbolObject_New) \
+ V(SymbolObject_SymbolValue) \
+ V(SyntaxError_New) \
+ V(TryCatch_StackTrace) \
+ V(TypeError_New) \
+ V(Uint16Array_New) \
+ V(Uint32Array_New) \
+ V(Uint8Array_New) \
+ V(Uint8ClampedArray_New) \
+ V(UnboundScript_GetId) \
+ V(UnboundScript_GetLineNumber) \
+ V(UnboundScript_GetName) \
+ V(UnboundScript_GetSourceMappingURL) \
+ V(UnboundScript_GetSourceURL) \
+ V(Value_TypeOf)
+
+#define FOR_EACH_MANUAL_COUNTER(V) \
+ V(AccessorGetterCallback) \
+ V(AccessorNameGetterCallback) \
+ V(AccessorNameSetterCallback) \
+ V(Compile) \
+ V(CompileCode) \
+ V(CompileCodeLazy) \
+ V(CompileDeserialize) \
+ V(CompileEval) \
+ V(CompileFullCode) \
+ V(CompileIgnition) \
+ V(CompileSerialize) \
+ V(DeoptimizeCode) \
+ V(FunctionCallback) \
+ V(GC) \
+ V(GenericNamedPropertyDeleterCallback) \
+ V(GenericNamedPropertyQueryCallback) \
+ V(GenericNamedPropertySetterCallback) \
+ V(IndexedPropertyDeleterCallback) \
+ V(IndexedPropertyGetterCallback) \
+ V(IndexedPropertyQueryCallback) \
+ V(IndexedPropertySetterCallback) \
+ V(InvokeFunctionCallback) \
+ V(JS_Execution) \
+ V(Map_SetPrototype) \
+ V(Map_TransitionToAccessorProperty) \
+ V(Map_TransitionToDataProperty) \
+ V(Object_DeleteProperty) \
+ V(OptimizeCode) \
+ V(Parse) \
+ V(ParseLazy) \
+ V(PropertyCallback) \
+ V(PrototypeMap_TransitionToAccessorProperty) \
+ V(PrototypeMap_TransitionToDataProperty) \
+ V(PrototypeObject_DeleteProperty) \
+ V(RecompileConcurrent) \
+ V(RecompileSynchronous) \
+ /* Dummy counter for the unexpected stub miss. */ \
+ V(UnexpectedStubMiss)
+
+#define FOR_EACH_HANDLER_COUNTER(V) \
+ V(IC_HandlerCacheHit) \
+ V(KeyedLoadIC_LoadIndexedStringStub) \
+ V(KeyedLoadIC_LoadIndexedInterceptorStub) \
+ V(KeyedLoadIC_KeyedLoadSloppyArgumentsStub) \
+ V(KeyedLoadIC_LoadFastElementStub) \
+ V(KeyedLoadIC_LoadDictionaryElementStub) \
+ V(KeyedLoadIC_SlowStub) \
+ V(KeyedStoreIC_KeyedStoreSloppyArgumentsStub) \
+ V(KeyedStoreIC_StoreFastElementStub) \
+ V(KeyedStoreIC_StoreElementStub) \
+ V(KeyedStoreIC_Polymorphic) \
+ V(LoadIC_FunctionPrototypeStub) \
+ V(LoadIC_LoadApiGetterStub) \
+ V(LoadIC_LoadCallback) \
+ V(LoadIC_LoadConstant) \
+ V(LoadIC_LoadConstantStub) \
+ V(LoadIC_LoadField) \
+ V(LoadIC_LoadFieldStub) \
+ V(LoadIC_LoadGlobal) \
+ V(LoadIC_LoadInterceptor) \
+ V(LoadIC_LoadNonexistent) \
+ V(LoadIC_LoadNormal) \
+ V(LoadIC_LoadScriptContextFieldStub) \
+ V(LoadIC_LoadViaGetter) \
+ V(LoadIC_SlowStub) \
+ V(LoadIC_StringLengthStub) \
+ V(StoreIC_SlowStub) \
+ V(StoreIC_StoreCallback) \
+ V(StoreIC_StoreField) \
+ V(StoreIC_StoreFieldStub) \
+ V(StoreIC_StoreGlobal) \
+ V(StoreIC_StoreGlobalTransition) \
+ V(StoreIC_StoreInterceptorStub) \
+ V(StoreIC_StoreNormal) \
+ V(StoreIC_StoreScriptContextFieldStub) \
+ V(StoreIC_StoreTransition) \
+ V(StoreIC_StoreViaSetter)
+
+class RuntimeCallStats {
+ public:
+ typedef RuntimeCallCounter RuntimeCallStats::*CounterId;
+
+#define CALL_RUNTIME_COUNTER(name) \
+ RuntimeCallCounter name = RuntimeCallCounter(#name);
+ FOR_EACH_MANUAL_COUNTER(CALL_RUNTIME_COUNTER)
+#undef CALL_RUNTIME_COUNTER
#define CALL_RUNTIME_COUNTER(name, nargs, ressize) \
RuntimeCallCounter Runtime_##name = RuntimeCallCounter(#name);
FOR_EACH_INTRINSIC(CALL_RUNTIME_COUNTER)
#undef CALL_RUNTIME_COUNTER
-#define CALL_BUILTIN_COUNTER(name, type) \
+#define CALL_BUILTIN_COUNTER(name) \
RuntimeCallCounter Builtin_##name = RuntimeCallCounter(#name);
BUILTIN_LIST_C(CALL_BUILTIN_COUNTER)
#undef CALL_BUILTIN_COUNTER
-
- // Counter to track recursive time events.
- RuntimeCallTimer* current_timer_ = NULL;
+#define CALL_BUILTIN_COUNTER(name) \
+ RuntimeCallCounter API_##name = RuntimeCallCounter("API_" #name);
+ FOR_EACH_API_COUNTER(CALL_BUILTIN_COUNTER)
+#undef CALL_BUILTIN_COUNTER
+#define CALL_BUILTIN_COUNTER(name) \
+ RuntimeCallCounter Handler_##name = RuntimeCallCounter(#name);
+ FOR_EACH_HANDLER_COUNTER(CALL_BUILTIN_COUNTER)
+#undef CALL_BUILTIN_COUNTER
// Starting measuring the time for a function. This will establish the
// connection to the parent counter for properly calculating the own times.
- void Enter(RuntimeCallCounter* counter);
- void Enter(RuntimeCallTimer* timer);
+ static void Enter(Isolate* isolate, RuntimeCallTimer* timer,
+ CounterId counter_id);
+
// Leave a scope for a measured runtime function. This will properly add
// the time delta to the current_counter and subtract the delta from its
// parent.
- void Leave();
- void Leave(RuntimeCallTimer* timer);
+ static void Leave(Isolate* isolate, RuntimeCallTimer* timer);
- RuntimeCallTimer* current_timer() { return current_timer_; }
+ // Set counter id for the innermost measurement. It can be used to refine
+ // event kind when a runtime entry counter is too generic.
+ static void CorrectCurrentCounterId(Isolate* isolate, CounterId counter_id);
void Reset();
void Print(std::ostream& os);
RuntimeCallStats() { Reset(); }
+ RuntimeCallTimer* current_timer() { return current_timer_; }
+
+ private:
+ // Counter to track recursive time events.
+ RuntimeCallTimer* current_timer_ = NULL;
};
+#define TRACE_RUNTIME_CALL_STATS(isolate, counter_name) \
+ do { \
+ if (FLAG_runtime_call_stats) { \
+ RuntimeCallStats::CorrectCurrentCounterId( \
+ isolate, &RuntimeCallStats::counter_name); \
+ } \
+ } while (false)
+
+#define TRACE_HANDLER_STATS(isolate, counter_name) \
+ TRACE_RUNTIME_CALL_STATS(isolate, Handler_##counter_name)
+
// A RuntimeCallTimerScopes wraps around a RuntimeCallTimer to measure the
// the time of C++ scope.
class RuntimeCallTimerScope {
public:
- inline explicit RuntimeCallTimerScope(Isolate* isolate,
- RuntimeCallCounter* counter) {
- if (FLAG_runtime_call_stats) Enter(isolate, counter);
+ inline RuntimeCallTimerScope(Isolate* isolate,
+ RuntimeCallStats::CounterId counter_id) {
+ if (V8_UNLIKELY(FLAG_runtime_call_stats)) {
+ isolate_ = isolate;
+ RuntimeCallStats::Enter(isolate_, &timer_, counter_id);
+ }
}
+ // This constructor is here just to avoid calling GetIsolate() when the
+ // stats are disabled and the isolate is not directly available.
+ inline RuntimeCallTimerScope(HeapObject* heap_object,
+ RuntimeCallStats::CounterId counter_id);
+
inline ~RuntimeCallTimerScope() {
- if (FLAG_runtime_call_stats) Leave();
+ if (V8_UNLIKELY(FLAG_runtime_call_stats)) {
+ RuntimeCallStats::Leave(isolate_, &timer_);
+ }
}
- void Enter(Isolate* isolate, RuntimeCallCounter* counter);
- void Leave();
-
private:
Isolate* isolate_;
RuntimeCallTimer timer_;
@@ -588,44 +835,57 @@ class RuntimeCallTimerScope {
101) \
HR(code_cache_reject_reason, V8.CodeCacheRejectReason, 1, 6, 6) \
HR(errors_thrown_per_context, V8.ErrorsThrownPerContext, 0, 200, 20) \
- HR(debug_feature_usage, V8.DebugFeatureUsage, 1, 7, 7)
-
-#define HISTOGRAM_TIMER_LIST(HT) \
- /* Garbage collection timers. */ \
- HT(gc_compactor, V8.GCCompactor, 10000, MILLISECOND) \
- HT(gc_finalize, V8.GCFinalizeMC, 10000, MILLISECOND) \
- HT(gc_finalize_reduce_memory, V8.GCFinalizeMCReduceMemory, 10000, \
- MILLISECOND) \
- HT(gc_scavenger, V8.GCScavenger, 10000, MILLISECOND) \
- HT(gc_context, V8.GCContext, 10000, \
- MILLISECOND) /* GC context cleanup time */ \
- HT(gc_idle_notification, V8.GCIdleNotification, 10000, MILLISECOND) \
- HT(gc_incremental_marking, V8.GCIncrementalMarking, 10000, MILLISECOND) \
- HT(gc_incremental_marking_start, V8.GCIncrementalMarkingStart, 10000, \
- MILLISECOND) \
- HT(gc_incremental_marking_finalize, V8.GCIncrementalMarkingFinalize, 10000, \
- MILLISECOND) \
- HT(gc_low_memory_notification, V8.GCLowMemoryNotification, 10000, \
- MILLISECOND) \
- /* Parsing timers. */ \
- HT(parse, V8.ParseMicroSeconds, 1000000, MICROSECOND) \
- HT(parse_lazy, V8.ParseLazyMicroSeconds, 1000000, MICROSECOND) \
- HT(pre_parse, V8.PreParseMicroSeconds, 1000000, MICROSECOND) \
- /* Compilation times. */ \
- HT(compile, V8.CompileMicroSeconds, 1000000, MICROSECOND) \
- HT(compile_eval, V8.CompileEvalMicroSeconds, 1000000, MICROSECOND) \
- /* Serialization as part of compilation (code caching) */ \
- HT(compile_serialize, V8.CompileSerializeMicroSeconds, 100000, MICROSECOND) \
- HT(compile_deserialize, V8.CompileDeserializeMicroSeconds, 1000000, \
- MICROSECOND) \
- /* Total compilation time incl. caching/parsing */ \
- HT(compile_script, V8.CompileScriptMicroSeconds, 1000000, MICROSECOND)
-
+ HR(debug_feature_usage, V8.DebugFeatureUsage, 1, 7, 7) \
+ /* Asm/Wasm. */ \
+ HR(wasm_functions_per_module, V8.WasmFunctionsPerModule, 1, 10000, 51)
+
+#define HISTOGRAM_TIMER_LIST(HT) \
+ /* Garbage collection timers. */ \
+ HT(gc_compactor, V8.GCCompactor, 10000, MILLISECOND) \
+ HT(gc_finalize, V8.GCFinalizeMC, 10000, MILLISECOND) \
+ HT(gc_finalize_reduce_memory, V8.GCFinalizeMCReduceMemory, 10000, \
+ MILLISECOND) \
+ HT(gc_scavenger, V8.GCScavenger, 10000, MILLISECOND) \
+ HT(gc_context, V8.GCContext, 10000, \
+ MILLISECOND) /* GC context cleanup time */ \
+ HT(gc_idle_notification, V8.GCIdleNotification, 10000, MILLISECOND) \
+ HT(gc_incremental_marking, V8.GCIncrementalMarking, 10000, MILLISECOND) \
+ HT(gc_incremental_marking_start, V8.GCIncrementalMarkingStart, 10000, \
+ MILLISECOND) \
+ HT(gc_incremental_marking_finalize, V8.GCIncrementalMarkingFinalize, 10000, \
+ MILLISECOND) \
+ HT(gc_low_memory_notification, V8.GCLowMemoryNotification, 10000, \
+ MILLISECOND) \
+ /* Parsing timers. */ \
+ HT(parse, V8.ParseMicroSeconds, 1000000, MICROSECOND) \
+ HT(parse_lazy, V8.ParseLazyMicroSeconds, 1000000, MICROSECOND) \
+ HT(pre_parse, V8.PreParseMicroSeconds, 1000000, MICROSECOND) \
+ /* Compilation times. */ \
+ HT(compile, V8.CompileMicroSeconds, 1000000, MICROSECOND) \
+ HT(compile_eval, V8.CompileEvalMicroSeconds, 1000000, MICROSECOND) \
+ /* Serialization as part of compilation (code caching) */ \
+ HT(compile_serialize, V8.CompileSerializeMicroSeconds, 100000, MICROSECOND) \
+ HT(compile_deserialize, V8.CompileDeserializeMicroSeconds, 1000000, \
+ MICROSECOND) \
+ /* Total compilation time incl. caching/parsing */ \
+ HT(compile_script, V8.CompileScriptMicroSeconds, 1000000, MICROSECOND) \
+ /* Total JavaScript execution time (including callbacks and runtime calls */ \
+ HT(execute, V8.Execute, 1000000, MICROSECOND) \
+ /* Asm/Wasm */ \
+ HT(wasm_instantiate_module_time, V8.WasmInstantiateModuleMicroSeconds, \
+ 1000000, MICROSECOND) \
+ HT(wasm_decode_module_time, V8.WasmDecodeModuleMicroSeconds, 1000000, \
+ MICROSECOND) \
+ HT(wasm_decode_function_time, V8.WasmDecodeFunctionMicroSeconds, 1000000, \
+ MICROSECOND) \
+ HT(wasm_compile_module_time, V8.WasmCompileModuleMicroSeconds, 1000000, \
+ MICROSECOND) \
+ HT(wasm_compile_function_time, V8.WasmCompileFunctionMicroSeconds, 1000000, \
+ MICROSECOND)
#define AGGREGATABLE_HISTOGRAM_TIMER_LIST(AHT) \
AHT(compile_lazy, V8.CompileLazyMicroSeconds)
-
#define HISTOGRAM_PERCENTAGE_LIST(HP) \
/* Heap fragmentation. */ \
HP(external_fragmentation_total, V8.MemoryExternalFragmentationTotal) \
@@ -639,10 +899,7 @@ class RuntimeCallTimerScope {
HP(heap_fraction_old_space, V8.MemoryHeapFractionOldSpace) \
HP(heap_fraction_code_space, V8.MemoryHeapFractionCodeSpace) \
HP(heap_fraction_map_space, V8.MemoryHeapFractionMapSpace) \
- HP(heap_fraction_lo_space, V8.MemoryHeapFractionLoSpace) \
- /* Percentage of crankshafted codegen. */ \
- HP(codegen_fraction_crankshaft, V8.CodegenFractionCrankshaft)
-
+ HP(heap_fraction_lo_space, V8.MemoryHeapFractionLoSpace)
#define HISTOGRAM_LEGACY_MEMORY_LIST(HM) \
HM(heap_sample_total_committed, V8.MemoryHeapSampleTotalCommitted) \
@@ -651,10 +908,17 @@ class RuntimeCallTimerScope {
HM(heap_sample_code_space_committed, V8.MemoryHeapSampleCodeSpaceCommitted) \
HM(heap_sample_maximum_committed, V8.MemoryHeapSampleMaximumCommitted)
-#define HISTOGRAM_MEMORY_LIST(HM) \
- HM(memory_heap_committed, V8.MemoryHeapCommitted) \
- HM(memory_heap_used, V8.MemoryHeapUsed)
-
+#define HISTOGRAM_MEMORY_LIST(HM) \
+ HM(memory_heap_committed, V8.MemoryHeapCommitted) \
+ HM(memory_heap_used, V8.MemoryHeapUsed) \
+ /* Asm/Wasm */ \
+ HM(wasm_decode_module_peak_memory_bytes, V8.WasmDecodeModulePeakMemoryBytes) \
+ HM(wasm_compile_function_peak_memory_bytes, \
+ V8.WasmCompileFunctionPeakMemoryBytes) \
+ HM(wasm_min_mem_pages_count, V8.WasmMinMemPagesCount) \
+ HM(wasm_max_mem_pages_count, V8.WasmMaxMemPagesCount) \
+ HM(wasm_function_size_bytes, V8.WasmFunctionSizeBytes) \
+ HM(wasm_module_size_bytes, V8.WasmModuleSizeBytes)
// WARNING: STATS_COUNTER_LIST_* is a very large macro that is causing MSVC
// Intellisense to crash. It was broken into two macros (each of length 40
@@ -703,7 +967,6 @@ class RuntimeCallTimerScope {
/* The store-buffer implementation of the write barrier. */ \
SC(store_buffer_overflows, V8.StoreBufferOverflows)
-
#define STATS_COUNTER_LIST_2(SC) \
/* Number of code stubs. */ \
SC(code_stubs, V8.CodeStubs) \
@@ -743,8 +1006,6 @@ class RuntimeCallTimerScope {
SC(enum_cache_hits, V8.EnumCacheHits) \
SC(enum_cache_misses, V8.EnumCacheMisses) \
SC(fast_new_closure_total, V8.FastNewClosureTotal) \
- SC(fast_new_closure_try_optimized, V8.FastNewClosureTryOptimized) \
- SC(fast_new_closure_install_optimized, V8.FastNewClosureInstallOptimized) \
SC(string_add_runtime, V8.StringAddRuntime) \
SC(string_add_native, V8.StringAddNative) \
SC(string_add_runtime_ext_to_one_byte, V8.StringAddRuntimeExtToOneByte) \
@@ -756,14 +1017,12 @@ class RuntimeCallTimerScope {
SC(regexp_entry_native, V8.RegExpEntryNative) \
SC(number_to_string_native, V8.NumberToStringNative) \
SC(number_to_string_runtime, V8.NumberToStringRuntime) \
- SC(math_atan2_runtime, V8.MathAtan2Runtime) \
- SC(math_clz32_runtime, V8.MathClz32Runtime) \
SC(math_exp_runtime, V8.MathExpRuntime) \
SC(math_log_runtime, V8.MathLogRuntime) \
SC(math_pow_runtime, V8.MathPowRuntime) \
SC(stack_interrupts, V8.StackInterrupts) \
SC(runtime_profiler_ticks, V8.RuntimeProfilerTicks) \
- SC(runtime_calls, V8.RuntimeCalls) \
+ SC(runtime_calls, V8.RuntimeCalls) \
SC(bounds_checks_eliminated, V8.BoundsChecksEliminated) \
SC(bounds_checks_hoisted, V8.BoundsChecksHoisted) \
SC(soft_deopts_requested, V8.SoftDeoptsRequested) \
@@ -794,7 +1053,9 @@ class RuntimeCallTimerScope {
/* Total code size (including metadata) of baseline code or bytecode. */ \
SC(total_baseline_code_size, V8.TotalBaselineCodeSize) \
/* Total count of functions compiled using the baseline compiler. */ \
- SC(total_baseline_compile_count, V8.TotalBaselineCompileCount)
+ SC(total_baseline_compile_count, V8.TotalBaselineCompileCount) \
+ SC(wasm_generated_code_size, V8.WasmGeneratedCodeBytes) \
+ SC(wasm_reloc_size, V8.WasmRelocBytes)
// This file contains all the v8 counters that are in use.
class Counters {
diff --git a/deps/v8/src/crankshaft/arm/lithium-arm.cc b/deps/v8/src/crankshaft/arm/lithium-arm.cc
index 4072982513..324dcfefa8 100644
--- a/deps/v8/src/crankshaft/arm/lithium-arm.cc
+++ b/deps/v8/src/crankshaft/arm/lithium-arm.cc
@@ -877,7 +877,7 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
LInstruction* result = new (zone()) LPrologue();
- if (info_->num_heap_slots() > 0) {
+ if (info_->scope()->num_heap_slots() > 0) {
result = MarkAsCall(result, instr);
}
return result;
@@ -934,17 +934,6 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
}
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
- LOperand* left =
- UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
- LOperand* right =
- UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
- LOperand* context = UseFixed(instr->context(), cp);
- LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
HHasInPrototypeChainAndBranch* instr) {
LOperand* object = UseRegister(instr->object());
@@ -1080,6 +1069,10 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
return DoMathAbs(instr);
case kMathLog:
return DoMathLog(instr);
+ case kMathCos:
+ return DoMathCos(instr);
+ case kMathSin:
+ return DoMathSin(instr);
case kMathExp:
return DoMathExp(instr);
case kMathSqrt:
@@ -1145,16 +1138,25 @@ LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
return DefineAsRegister(result);
}
+LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
+ LOperand* input = UseFixedDouble(instr->value(), d0);
+ return MarkAsCall(DefineFixedDouble(new (zone()) LMathCos(input), d0), instr);
+}
+
+LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
+ LOperand* input = UseFixedDouble(instr->value(), d0);
+ return MarkAsCall(DefineFixedDouble(new (zone()) LMathSin(input), d0), instr);
+}
LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
DCHECK(instr->representation().IsDouble());
DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LOperand* double_temp = TempDoubleRegister();
- LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
- return DefineAsRegister(result);
+ LOperand* input = UseFixedDouble(instr->value(), d0);
+ return MarkAsCall(DefineFixedDouble(new (zone()) LMathExp(input), d0), instr);
}
@@ -1978,20 +1980,6 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
}
-LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
- HValue* value = instr->value();
- DCHECK(value->representation().IsDouble());
- return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
-}
-
-
-LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
- LOperand* lo = UseRegister(instr->lo());
- LOperand* hi = UseRegister(instr->hi());
- return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
-}
-
-
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LOperand* context = info()->IsStub()
? UseFixed(instr->context(), cp)
@@ -2023,14 +2011,9 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* global_object =
- UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- }
- LLoadGlobalGeneric* result =
- new(zone()) LLoadGlobalGeneric(context, global_object, vector);
+ LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+
+ LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -2074,10 +2057,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- }
+ LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
LInstruction* result =
DefineFixed(new(zone()) LLoadNamedGeneric(context, object, vector), r0);
@@ -2149,10 +2129,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- }
+ LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
LInstruction* result =
DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key, vector),
@@ -2214,12 +2191,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
DCHECK(instr->key()->representation().IsTagged());
DCHECK(instr->value()->representation().IsTagged());
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
- vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
- }
+ LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
+ LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
LStoreKeyedGeneric* result =
new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
@@ -2308,12 +2281,8 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* obj =
UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
- vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
- }
+ LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
+ LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
LStoreNamedGeneric* result =
new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
@@ -2351,13 +2320,18 @@ LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseAny(instr->context());
LOperand* size = UseRegisterOrConstant(instr->size());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
- LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2);
- return AssignPointerMap(DefineAsRegister(result));
+ if (instr->IsAllocationFolded()) {
+ LFastAllocate* result = new (zone()) LFastAllocate(size, temp1, temp2);
+ return DefineAsRegister(result);
+ } else {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
+ LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
+ }
}
diff --git a/deps/v8/src/crankshaft/arm/lithium-arm.h b/deps/v8/src/crankshaft/arm/lithium-arm.h
index 60fe79d402..80fbe81a0f 100644
--- a/deps/v8/src/crankshaft/arm/lithium-arm.h
+++ b/deps/v8/src/crankshaft/arm/lithium-arm.h
@@ -53,7 +53,6 @@ class LCodeGen;
V(ConstantI) \
V(ConstantS) \
V(ConstantT) \
- V(ConstructDouble) \
V(Context) \
V(DebugBreak) \
V(DeclareGlobals) \
@@ -61,12 +60,12 @@ class LCodeGen;
V(DivByConstI) \
V(DivByPowerOf2I) \
V(DivI) \
- V(DoubleBits) \
V(DoubleToI) \
V(DoubleToSmi) \
V(Drop) \
V(Dummy) \
V(DummyUse) \
+ V(FastAllocate) \
V(FlooringDivByConstI) \
V(FlooringDivByPowerOf2I) \
V(FlooringDivI) \
@@ -78,7 +77,6 @@ class LCodeGen;
V(HasInPrototypeChainAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
- V(InstanceOf) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
@@ -98,6 +96,8 @@ class LCodeGen;
V(LoadNamedGeneric) \
V(MathAbs) \
V(MathClz32) \
+ V(MathCos) \
+ V(MathSin) \
V(MathExp) \
V(MathFloor) \
V(MathFround) \
@@ -151,7 +151,6 @@ class LCodeGen;
V(UnknownOSRValue) \
V(WrapReceiver)
-
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
Opcode opcode() const final { return LInstruction::k##type; } \
void CompileToNative(LCodeGen* generator) final; \
@@ -906,24 +905,29 @@ class LMathClz32 final : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
};
+class LMathCos final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathCos(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
-class LMathExp final : public LTemplateInstruction<1, 1, 3> {
+ DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
+};
+
+class LMathSin final : public LTemplateInstruction<1, 1, 0> {
public:
- LMathExp(LOperand* value,
- LOperand* double_temp,
- LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- temps_[2] = double_temp;
- ExternalReference::InitializeMathExpData();
- }
+ explicit LMathSin(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
+};
+
+class LMathExp final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathExp(LOperand* value) { inputs_[0] = value; }
LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
- LOperand* double_temp() { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
};
@@ -1135,22 +1139,6 @@ class LCmpT final : public LTemplateInstruction<1, 3, 0> {
};
-class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
- public:
- LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() const { return inputs_[0]; }
- LOperand* left() const { return inputs_[1]; }
- LOperand* right() const { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-};
-
-
class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
public:
LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
@@ -1584,18 +1572,14 @@ class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
};
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
+class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
public:
- LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
- LOperand* vector) {
+ LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
inputs_[0] = context;
- inputs_[1] = global_object;
temps_[0] = vector;
}
LOperand* context() { return inputs_[0]; }
- LOperand* global_object() { return inputs_[1]; }
LOperand* temp_vector() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
@@ -2174,6 +2158,8 @@ class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
LOperand* key() { return inputs_[3]; }
LOperand* current_capacity() { return inputs_[4]; }
+ bool ClobbersDoubleRegisters(Isolate* isolate) const override { return true; }
+
DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
};
@@ -2343,33 +2329,6 @@ class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> {
};
-class LDoubleBits final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDoubleBits(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
- DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
-};
-
-
-class LConstructDouble final : public LTemplateInstruction<1, 2, 0> {
- public:
- LConstructDouble(LOperand* hi, LOperand* lo) {
- inputs_[0] = hi;
- inputs_[1] = lo;
- }
-
- LOperand* hi() { return inputs_[0]; }
- LOperand* lo() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
-};
-
-
class LAllocate final : public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* context,
@@ -2391,6 +2350,21 @@ class LAllocate final : public LTemplateInstruction<1, 2, 2> {
DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
+class LFastAllocate final : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LFastAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = size;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* size() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
+ DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
class LTypeof final : public LTemplateInstruction<1, 2, 0> {
public:
@@ -2547,6 +2521,8 @@ class LChunkBuilder final : public LChunkBuilderBase {
LInstruction* DoMathFround(HUnaryMathOperation* instr);
LInstruction* DoMathAbs(HUnaryMathOperation* instr);
LInstruction* DoMathLog(HUnaryMathOperation* instr);
+ LInstruction* DoMathCos(HUnaryMathOperation* instr);
+ LInstruction* DoMathSin(HUnaryMathOperation* instr);
LInstruction* DoMathExp(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
diff --git a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
index c64aac3cc8..072215d5fc 100644
--- a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
@@ -11,7 +11,6 @@
#include "src/crankshaft/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
-#include "src/profiler/cpu-profiler.h"
namespace v8 {
namespace internal {
@@ -164,14 +163,12 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
__ Push(info()->scope()->GetScopeInfo(info()->isolate()));
__ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
- } else if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
+ } else {
+ FastNewFunctionContextStub stub(isolate());
+ __ mov(FastNewFunctionContextDescriptor::SlotsRegister(), Operand(slots));
__ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
+ // Result of FastNewFunctionContextStub is always in new space.
need_write_barrier = false;
- } else {
- __ push(r1);
- __ CallRuntime(Runtime::kNewFunctionContext);
}
RecordSafepoint(deopt_mode);
@@ -180,10 +177,11 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
__ mov(cp, r0);
__ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
- int num_parameters = scope()->num_parameters();
- int first_parameter = scope()->has_this_declaration() ? -1 : 0;
+ int num_parameters = info()->scope()->num_parameters();
+ int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+ Variable* var = (i == -1) ? info()->scope()->receiver()
+ : info()->scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -329,8 +327,6 @@ bool LCodeGen::GenerateJumpTable() {
} else {
__ bl(&call_deopt_entry);
}
- info()->LogDeoptCallPosition(masm()->pc_offset(),
- table_entry->deopt_info.inlining_id);
masm()->CheckConstPool(false, false);
}
@@ -769,9 +765,8 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
}
-
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
+ DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type) {
LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
@@ -823,7 +818,7 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
__ stop("trap_on_deopt", condition);
}
- Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+ Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
@@ -832,13 +827,12 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
!info()->saves_caller_doubles()) {
DeoptComment(deopt_info);
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
- info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
} else {
Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
!frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+ if (FLAG_trace_deopt || isolate()->is_profiling() ||
jump_table_.is_empty() ||
!table_entry.IsEquivalentTo(jump_table_.last())) {
jump_table_.Add(table_entry, zone());
@@ -847,9 +841,8 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
}
}
-
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeReason deopt_reason) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
@@ -910,13 +903,6 @@ void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
}
-void LCodeGen::RecordAndWritePosition(int position) {
- if (position == RelocInfo::kNoPosition) return;
- masm()->positions_recorder()->RecordPosition(position);
- masm()->positions_recorder()->WriteRecordedPositions();
-}
-
-
static const char* LabelType(LLabel* label) {
if (label->is_loop_header()) return " (loop header)";
if (label->is_osr_entry()) return " (OSR entry)";
@@ -989,7 +975,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ and_(dividend, dividend, Operand(mask));
__ rsb(dividend, dividend, Operand::Zero(), SetCC);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
__ b(&done);
}
@@ -1007,7 +993,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@@ -1022,7 +1008,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
Label remainder_not_zero;
__ b(ne, &remainder_not_zero);
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
__ bind(&remainder_not_zero);
}
}
@@ -1042,7 +1028,7 @@ void LCodeGen::DoModI(LModI* instr) {
// case because we can't return a NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right_reg, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
@@ -1053,7 +1039,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ b(ne, &no_overflow_possible);
__ cmp(right_reg, Operand(-1));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
} else {
__ b(ne, &no_overflow_possible);
__ mov(result_reg, Operand::Zero());
@@ -1074,7 +1060,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ cmp(left_reg, Operand::Zero());
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
}
__ bind(&done);
@@ -1099,7 +1085,7 @@ void LCodeGen::DoModI(LModI* instr) {
// NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right_reg, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
}
__ Move(result_reg, left_reg);
@@ -1129,7 +1115,7 @@ void LCodeGen::DoModI(LModI* instr) {
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ b(ne, &done);
__ cmp(left_reg, Operand::Zero());
- DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero);
}
__ bind(&done);
}
@@ -1147,19 +1133,19 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ cmp(dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ tst(dividend, Operand(mask));
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
@@ -1187,7 +1173,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@@ -1195,7 +1181,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
__ TruncatingDiv(result, dividend, Abs(divisor));
@@ -1205,7 +1191,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
__ mov(ip, Operand(divisor));
__ smull(scratch0(), ip, result, ip);
__ sub(scratch0(), scratch0(), dividend, SetCC);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
}
}
@@ -1220,7 +1206,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(divisor, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@@ -1232,7 +1218,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
}
__ b(pl, &positive);
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
__ bind(&positive);
}
@@ -1244,7 +1230,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
__ cmp(dividend, Operand(kMinInt));
__ cmp(divisor, Operand(-1), eq);
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
}
if (CpuFeatures::IsSupported(SUDIV)) {
@@ -1267,7 +1253,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
Register remainder = scratch0();
__ Mls(remainder, result, divisor, dividend);
__ cmp(remainder, Operand::Zero());
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
}
}
@@ -1318,13 +1304,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// If the divisor is negative, we have to negate and handle edge cases.
__ rsb(result, dividend, Operand::Zero(), SetCC);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
}
return;
}
@@ -1347,7 +1333,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@@ -1355,7 +1341,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
// Easy case: We need no dynamic check for the dividend and the flooring
@@ -1396,7 +1382,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@@ -1408,7 +1394,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
}
__ b(pl, &positive);
__ cmp(left, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
__ bind(&positive);
}
@@ -1420,7 +1406,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
__ cmp(left, Operand(kMinInt));
__ cmp(right, Operand(-1), eq);
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
}
if (CpuFeatures::IsSupported(SUDIV)) {
@@ -1466,14 +1452,14 @@ void LCodeGen::DoMulI(LMulI* instr) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
__ cmp(left, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
switch (constant) {
case -1:
if (overflow) {
__ rsb(result, left, Operand::Zero(), SetCC);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
} else {
__ rsb(result, left, Operand::Zero());
}
@@ -1483,7 +1469,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
// If left is strictly negative and the constant is null, the
// result is -0. Deoptimize if required, otherwise return 0.
__ cmp(left, Operand::Zero());
- DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero);
}
__ mov(result, Operand::Zero());
break;
@@ -1533,7 +1519,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ smull(result, scratch, left, right);
}
__ cmp(scratch, Operand(result, ASR, 31));
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
} else {
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left);
@@ -1549,7 +1535,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ b(pl, &done);
// Bail out if the result is minus zero.
__ cmp(result, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
}
}
@@ -1612,7 +1598,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
case Token::SHR:
if (instr->can_deopt()) {
__ mov(result, Operand(left, LSR, scratch), SetCC);
- DeoptimizeIf(mi, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(mi, instr, DeoptimizeReason::kNegativeValue);
} else {
__ mov(result, Operand(left, LSR, scratch));
}
@@ -1649,7 +1635,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
if (instr->can_deopt()) {
__ tst(left, Operand(0x80000000));
- DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNegativeValue);
}
__ Move(result, left);
}
@@ -1664,7 +1650,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
__ SmiTag(result, left, SetCC);
}
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
} else {
__ mov(result, Operand(left, LSL, shift_count));
}
@@ -1696,7 +1682,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
}
if (can_overflow) {
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
}
}
@@ -1717,7 +1703,7 @@ void LCodeGen::DoRSubI(LRSubI* instr) {
}
if (can_overflow) {
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
}
}
@@ -1858,7 +1844,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
}
if (can_overflow) {
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
}
}
@@ -2097,7 +2083,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ SmiTst(reg);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi);
}
const Register map = scratch0();
@@ -2159,7 +2145,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject);
}
}
}
@@ -2498,16 +2484,6 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
}
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
- DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
- DCHECK(ToRegister(instr->result()).is(r0));
- InstanceOfStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoHasInPrototypeChainAndBranch(
LHasInPrototypeChainAndBranch* instr) {
Register const object = ToRegister(instr->object());
@@ -2533,16 +2509,16 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
__ ldrb(object_instance_type,
FieldMemOperand(object_map, Map::kBitFieldOffset));
__ tst(object_instance_type, Operand(1 << Map::kIsAccessCheckNeeded));
- DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck);
// Deoptimize for proxies.
__ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
- DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy);
__ ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
- __ cmp(object_prototype, prototype);
- EmitTrueBranch(instr, eq);
__ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
EmitFalseBranch(instr, eq);
+ __ cmp(object_prototype, prototype);
+ EmitTrueBranch(instr, eq);
__ ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
__ b(&loop);
}
@@ -2636,15 +2612,12 @@ void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->global_object())
- .is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).is(r0));
- __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
- isolate(), instr->typeof_mode(), PREMONOMORPHIC)
- .code();
+ Handle<Code> ic =
+ CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2657,7 +2630,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
} else {
__ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
}
@@ -2678,7 +2651,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
} else {
__ b(ne, &skip_assignment);
}
@@ -2739,10 +2712,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
// Name is always in r2.
__ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_INSIDE_TYPEOF,
- instr->hydrogen()->initialization_state())
- .code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -2759,7 +2729,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
// If the function does not have an initial map, we're done.
Label done;
@@ -2873,7 +2843,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ ldr(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ cmp(result, Operand(0x80000000));
- DeoptimizeIf(cs, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(cs, instr, DeoptimizeReason::kNegativeValue);
}
break;
case FLOAT32_ELEMENTS:
@@ -2928,7 +2898,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
__ cmp(scratch, Operand(kHoleNanUpper32));
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
}
}
@@ -2962,11 +2932,11 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ SmiTst(result);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi);
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
__ cmp(result, scratch);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
}
} else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
@@ -2981,7 +2951,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
__ ldr(result, FieldMemOperand(result, Cell::kValueOffset));
__ cmp(result, Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
- DeoptimizeIf(ne, instr, Deoptimizer::kHole);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
}
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
__ bind(&done);
@@ -3036,13 +3006,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
- if (instr->hydrogen()->HasVectorAndSlot()) {
- EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
- }
+ EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
- Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
- isolate(), instr->hydrogen()->initialization_state())
- .code();
+ Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -3130,9 +3096,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// Deoptimize if the receiver is not a JS object.
__ SmiTst(receiver);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi);
__ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
- DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject);
__ b(&result_in_receiver);
__ bind(&global_object);
@@ -3166,7 +3132,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmp(length, Operand(kArgumentsLimit));
- DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments);
+ DeoptimizeIf(hi, instr, DeoptimizeReason::kTooManyArguments);
// Push the receiver and use the register to keep the original
// number of arguments.
@@ -3251,6 +3217,8 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ push(scratch0());
__ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
__ push(scratch0());
+ __ Move(scratch0(), instr->hydrogen()->feedback_vector());
+ __ push(scratch0());
CallRuntime(Runtime::kDeclareGlobals, instr);
}
@@ -3318,7 +3286,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
__ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(scratch, Operand(ip));
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
Label done;
Register exponent = scratch0();
@@ -3386,7 +3354,7 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
// if input is positive.
__ rsb(result, input, Operand::Zero(), SetCC, mi);
// Deoptimize on overflow.
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
}
@@ -3433,7 +3401,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
Label done, exact;
__ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
- DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN);
__ bind(&exact);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -3441,7 +3409,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ cmp(result, Operand::Zero());
__ b(ne, &done);
__ cmp(input_high, Operand::Zero());
- DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero);
}
__ bind(&done);
}
@@ -3467,7 +3435,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ VmovHigh(input_high, input);
__ cmp(input_high, Operand::Zero());
// [-0.5, -0].
- DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero);
}
__ VFPCompareAndSetFlags(input, dot_five);
__ mov(result, Operand(1), LeaveCC, eq); // +0.5.
@@ -3481,7 +3449,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// Reuse dot_five (double_scratch0) as we no longer need this value.
__ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
&done, &done);
- DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN);
__ bind(&done);
}
@@ -3545,7 +3513,7 @@ void LCodeGen::DoPower(LPower* instr) {
__ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r6, Operand(ip));
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
@@ -3559,26 +3527,32 @@ void LCodeGen::DoPower(LPower* instr) {
}
}
+void LCodeGen::DoMathCos(LMathCos* instr) {
+ __ PrepareCallCFunction(0, 1, scratch0());
+ __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+ __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1);
+ __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
-void LCodeGen::DoMathExp(LMathExp* instr) {
- DwVfpRegister input = ToDoubleRegister(instr->value());
- DwVfpRegister result = ToDoubleRegister(instr->result());
- DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
- DwVfpRegister double_scratch2 = double_scratch0();
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
+void LCodeGen::DoMathSin(LMathSin* instr) {
+ __ PrepareCallCFunction(0, 1, scratch0());
+ __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+ __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1);
+ __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
- MathExpGenerator::EmitMathExp(
- masm(), input, result, double_scratch1, double_scratch2,
- temp1, temp2, scratch0());
+void LCodeGen::DoMathExp(LMathExp* instr) {
+ __ PrepareCallCFunction(0, 1, scratch0());
+ __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+ __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1);
+ __ MovFromFloatResult(ToDoubleRegister(instr->result()));
}
void LCodeGen::DoMathLog(LMathLog* instr) {
__ PrepareCallCFunction(0, 1, scratch0());
__ MovToFloatParameter(ToDoubleRegister(instr->value()));
- __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
- 0, 1);
+ __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1);
__ MovFromFloatResult(ToDoubleRegister(instr->result()));
}
@@ -3601,7 +3575,9 @@ void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
#endif
if (FLAG_code_comments) {
if (actual.is_reg()) {
- Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+ Comment(";;; PrepareForTailCall, actual: %s {",
+ RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
+ actual.reg().code()));
} else {
Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
}
@@ -3728,14 +3704,8 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->result()).is(r0));
__ mov(r0, Operand(instr->arity()));
- if (instr->arity() == 1) {
- // We only need the allocation site for the case we have a length argument.
- // The case may bail out to the runtime, which will determine the correct
- // elements kind with the site.
- __ Move(r2, instr->hydrogen()->site());
- } else {
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- }
+ __ Move(r2, instr->hydrogen()->site());
+
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
@@ -3768,7 +3738,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ bind(&done);
} else {
- ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+ ArrayNArgumentsConstructorStub stub(isolate());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
@@ -3889,14 +3859,12 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- if (instr->hydrogen()->HasVectorAndSlot()) {
- EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
- }
+ EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
__ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
- Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
- isolate(), instr->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ Handle<Code> ic =
+ CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -3919,7 +3887,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
+ DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds);
}
}
@@ -4031,11 +3999,6 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
if (instr->NeedsCanonicalization()) {
// Force a canonical NaN.
- if (masm()->emit_debug_code()) {
- __ vmrs(ip);
- __ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
- __ Assert(ne, kDefaultNaNModeNotSet);
- }
__ VFPCanonicalizeNaN(double_scratch, value);
__ vstr(double_scratch, scratch, 0);
} else {
@@ -4108,13 +4071,11 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- if (instr->hydrogen()->HasVectorAndSlot()) {
- EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
- }
+ EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
- isolate(), instr->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ isolate(), instr->language_mode())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -4193,14 +4154,21 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
LOperand* key = instr->key();
if (key->IsConstantOperand()) {
- __ Move(r3, Operand(ToSmi(LConstantOperand::cast(key))));
+ LConstantOperand* constant_key = LConstantOperand::cast(key);
+ int32_t int_key = ToInteger32(constant_key);
+ if (Smi::IsValid(int_key)) {
+ __ mov(r3, Operand(Smi::FromInt(int_key)));
+ } else {
+ // We should never get here at runtime because there is a smi check on
+ // the key before this point.
+ __ stop("expected smi");
+ }
} else {
__ Move(r3, ToRegister(key));
__ SmiTag(r3);
}
- GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
- instr->hydrogen()->kind());
+ GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
__ CallStub(&stub);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
@@ -4209,7 +4177,7 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
// Deopt on smi, which means the elements array changed to dictionary mode.
__ SmiTst(result);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi);
}
@@ -4242,8 +4210,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
DCHECK(object_reg.is(r0));
PushSafepointRegistersScope scope(this);
__ Move(r1, to_map);
- bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
- TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+ TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
__ CallStub(&stub);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kLazyDeopt);
@@ -4257,7 +4224,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMementoFound);
__ bind(&no_memento_found);
}
@@ -4494,7 +4461,7 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
if (FLAG_inline_new) {
__ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
+ __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
__ b(&done);
}
@@ -4508,25 +4475,20 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
-
- // NumberTagI and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Reset the context register.
+ if (!dst.is(cp)) {
+ __ mov(cp, Operand::Zero());
+ }
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ sub(r0, r0, Operand(kHeapObjectTag));
__ StoreToSafepointRegisterSlot(r0, dst);
}
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
- __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
- __ add(dst, dst, Operand(kHeapObjectTag));
+ __ vstr(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
}
@@ -4551,16 +4513,12 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
- // We want the untagged address first for performance
- __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
- DONT_TAG_RESULT);
+ __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
} else {
__ jmp(deferred->entry());
}
__ bind(deferred->exit());
- __ vstr(input_reg, reg, HeapNumber::kValueOffset);
- // Now that we have finished with the object's real address tag it
- __ add(reg, reg, Operand(kHeapObjectTag));
+ __ vstr(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
}
@@ -4572,16 +4530,13 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
__ mov(reg, Operand::Zero());
PushSafepointRegistersScope scope(this);
- // NumberTagI and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Reset the context register.
+ if (!reg.is(cp)) {
+ __ mov(cp, Operand::Zero());
+ }
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ sub(r0, r0, Operand(kHeapObjectTag));
__ StoreToSafepointRegisterSlot(r0, reg);
}
@@ -4593,12 +4548,12 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ tst(input, Operand(0xc0000000));
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
__ SmiTag(output, input, SetCC);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
} else {
__ SmiTag(output, input);
}
@@ -4612,7 +4567,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
STATIC_ASSERT(kHeapObjectTag == 1);
// If the input is a HeapObject, SmiUntag will set the carry flag.
__ SmiUntag(result, input, SetCC);
- DeoptimizeIf(cs, instr, Deoptimizer::kNotASmi);
+ DeoptimizeIf(cs, instr, DeoptimizeReason::kNotASmi);
} else {
__ SmiUntag(result, input);
}
@@ -4640,7 +4595,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
if (can_convert_undefined_to_nan) {
__ b(ne, &convert);
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
}
// load heap number
__ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
@@ -4650,7 +4605,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
__ b(ne, &done);
__ VmovHigh(scratch, result_reg);
__ cmp(scratch, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
__ jmp(&done);
if (can_convert_undefined_to_nan) {
@@ -4658,7 +4613,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
// Convert undefined (and hole) to NaN.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(input_reg, Operand(ip));
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
__ jmp(&done);
@@ -4726,22 +4681,22 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ bind(&check_false);
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ cmp(scratch2, Operand(ip));
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
__ mov(input_reg, Operand::Zero());
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
__ sub(ip, scratch2, Operand(kHeapObjectTag));
__ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
__ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ cmp(input_reg, Operand::Zero());
__ b(ne, &done);
__ VmovHigh(scratch1, double_scratch2);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero);
}
}
__ bind(&done);
@@ -4810,14 +4765,14 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
} else {
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ VmovHigh(scratch1, double_input);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
}
}
@@ -4835,26 +4790,26 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
} else {
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ VmovHigh(scratch1, double_input);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
}
}
__ SmiTag(result_reg, SetCC);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input));
- DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi);
}
@@ -4862,7 +4817,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input));
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi);
}
}
@@ -4875,7 +4830,7 @@ void LCodeGen::DoCheckArrayBufferNotNeutered(
__ ldr(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
__ ldr(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
__ tst(scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
- DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds);
}
@@ -4895,13 +4850,13 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
} else {
- DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(lo, instr, DeoptimizeReason::kWrongInstanceType);
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmp(scratch, Operand(last));
- DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(hi, instr, DeoptimizeReason::kWrongInstanceType);
}
}
} else {
@@ -4912,11 +4867,12 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ tst(scratch, Operand(mask));
- DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(tag == 0 ? ne : eq, instr,
+ DeoptimizeReason::kWrongInstanceType);
} else {
__ and_(scratch, scratch, Operand(mask));
__ cmp(scratch, Operand(tag));
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
}
}
}
@@ -4935,7 +4891,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
} else {
__ cmp(reg, Operand(object));
}
- DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch);
}
@@ -4950,7 +4906,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ StoreToSafepointRegisterSlot(r0, scratch0());
}
__ tst(scratch0(), Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kInstanceMigrationFailed);
}
@@ -5008,7 +4964,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ b(ne, deferred->entry());
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
}
__ bind(&success);
@@ -5047,7 +5003,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ cmp(input_reg, Operand(factory()->undefined_value()));
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
__ mov(result_reg, Operand::Zero());
__ jmp(&done);
@@ -5065,26 +5021,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
-void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
- DwVfpRegister value_reg = ToDoubleRegister(instr->value());
- Register result_reg = ToRegister(instr->result());
- if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
- __ VmovHigh(result_reg, value_reg);
- } else {
- __ VmovLow(result_reg, value_reg);
- }
-}
-
-
-void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
- Register hi_reg = ToRegister(instr->hi());
- Register lo_reg = ToRegister(instr->lo());
- DwVfpRegister result_reg = ToDoubleRegister(instr->result());
- __ VmovHigh(result_reg, hi_reg);
- __ VmovLow(result_reg, lo_reg);
-}
-
-
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate final : public LDeferredCode {
public:
@@ -5105,7 +5041,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
Register scratch2 = ToRegister(instr->temp2());
// Allocate memory for the object.
- AllocationFlags flags = TAG_OBJECT;
+ AllocationFlags flags = NO_ALLOCATION_FLAGS;
if (instr->hydrogen()->MustAllocateDoubleAligned()) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
@@ -5114,6 +5050,11 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
+ if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+ flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
+ }
+ DCHECK(!instr->hydrogen()->IsAllocationFolded());
+
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
CHECK(size <= Page::kMaxRegularHeapObjectSize);
@@ -5181,6 +5122,49 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
CallRuntimeFromDeferred(
Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(r0, result);
+
+ if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+ AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
+ if (instr->hydrogen()->IsOldSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
+ }
+ // If the allocation folding dominator allocate triggered a GC, allocation
+ // happend in the runtime. We have to reset the top pointer to virtually
+ // undo the allocation.
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
+ Register top_address = scratch0();
+ __ sub(r0, r0, Operand(kHeapObjectTag));
+ __ mov(top_address, Operand(allocation_top));
+ __ str(r0, MemOperand(top_address));
+ __ add(r0, r0, Operand(kHeapObjectTag));
+ }
+}
+
+void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
+ DCHECK(instr->hydrogen()->IsAllocationFolded());
+ DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
+ Register result = ToRegister(instr->result());
+ Register scratch1 = ToRegister(instr->temp1());
+ Register scratch2 = ToRegister(instr->temp2());
+
+ AllocationFlags flags = ALLOCATION_FOLDED;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+ if (instr->hydrogen()->IsOldSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE);
+ }
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ __ FastAllocate(size, result, scratch1, scratch2, flags);
+ } else {
+ Register size = ToRegister(instr->size());
+ __ FastAllocate(size, result, scratch1, scratch2, flags);
+ }
}
@@ -5193,8 +5177,8 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
__ mov(r0, Operand(isolate()->factory()->number_string()));
__ jmp(&end);
__ bind(&do_call);
- TypeofStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ Callable callable = CodeFactory::Typeof(isolate());
+ CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
__ bind(&end);
}
@@ -5452,7 +5436,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
__ ldr(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
__ cmp(result, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kNoCache);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache);
__ bind(&done);
}
@@ -5463,7 +5447,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register map = ToRegister(instr->map());
__ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
__ cmp(map, scratch0());
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
}
diff --git a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.h b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.h
index 8bbacc3c58..533f4c8cca 100644
--- a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.h
@@ -135,8 +135,6 @@ class LCodeGen: public LCodeGenBase {
#undef DECLARE_DO
private:
- LanguageMode language_mode() const { return info()->language_mode(); }
-
Scope* scope() const { return scope_; }
Register scratch0() { return r9; }
@@ -233,10 +231,10 @@ class LCodeGen: public LCodeGenBase {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition condition, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
+ DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition condition, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason);
+ DeoptimizeReason deopt_reason);
void AddToTranslation(LEnvironment* environment,
Translation* translation,
@@ -255,7 +253,7 @@ class LCodeGen: public LCodeGenBase {
void EmitIntegerMathAbs(LMathAbs* instr);
- // Support for recording safepoint and position information.
+ // Support for recording safepoint information.
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
@@ -266,8 +264,6 @@ class LCodeGen: public LCodeGenBase {
int arguments,
Safepoint::DeoptMode mode);
- void RecordAndWritePosition(int position) override;
-
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
diff --git a/deps/v8/src/crankshaft/arm64/lithium-arm64.cc b/deps/v8/src/crankshaft/arm64/lithium-arm64.cc
index 6cfc846548..8067a6ae28 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-arm64.cc
+++ b/deps/v8/src/crankshaft/arm64/lithium-arm64.cc
@@ -726,7 +726,7 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
LInstruction* result = new (zone()) LPrologue();
- if (info_->num_heap_slots() > 0) {
+ if (info_->scope()->num_heap_slots() > 0) {
result = MarkAsCall(result, instr);
}
return result;
@@ -841,14 +841,20 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseAny(instr->context());
LOperand* size = UseRegisterOrConstant(instr->size());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
- LOperand* temp3 = instr->MustPrefillWithFiller() ? TempRegister() : NULL;
- LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2, temp3);
- return AssignPointerMap(DefineAsRegister(result));
+ if (instr->IsAllocationFolded()) {
+ LFastAllocate* result = new (zone()) LFastAllocate(size, temp1, temp2);
+ return DefineAsRegister(result);
+ } else {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
+ LOperand* temp3 = instr->MustPrefillWithFiller() ? TempRegister() : NULL;
+ LAllocate* result =
+ new (zone()) LAllocate(context, size, temp1, temp2, temp3);
+ return AssignPointerMap(DefineAsRegister(result));
+ }
}
@@ -1461,17 +1467,6 @@ LInstruction* LChunkBuilder::DoInnerAllocatedObject(
}
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
- LOperand* left =
- UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
- LOperand* right =
- UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
- LOperand* context = UseFixed(instr->context(), cp);
- LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
- return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
HHasInPrototypeChainAndBranch* instr) {
LOperand* object = UseRegister(instr->object());
@@ -1558,15 +1553,9 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* global_object =
- UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- }
+ LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- LLoadGlobalGeneric* result =
- new(zone()) LLoadGlobalGeneric(context, global_object, vector);
+ LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
return MarkAsCall(DefineFixed(result, x0), instr);
}
@@ -1626,10 +1615,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- }
+ LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
LInstruction* result =
DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key, vector),
@@ -1648,10 +1634,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- }
+ LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
LInstruction* result =
DefineFixed(new(zone()) LLoadNamedGeneric(context, object, vector), x0);
@@ -1929,20 +1912,6 @@ LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
}
-LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
- HValue* value = instr->value();
- DCHECK(value->representation().IsDouble());
- return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
-}
-
-
-LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
- LOperand* lo = UseRegisterAndClobber(instr->lo());
- LOperand* hi = UseRegister(instr->hi());
- return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
-}
-
-
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LOperand* context = info()->IsStub()
? UseFixed(instr->context(), cp)
@@ -2251,12 +2220,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
DCHECK(instr->key()->representation().IsTagged());
DCHECK(instr->value()->representation().IsTagged());
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
- vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
- }
+ LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
+ LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
LStoreKeyedGeneric* result = new (zone())
LStoreKeyedGeneric(context, object, key, value, slot, vector);
@@ -2299,12 +2264,8 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
- vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
- }
+ LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
+ LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
LStoreNamedGeneric* result =
new (zone()) LStoreNamedGeneric(context, object, value, slot, vector);
@@ -2481,17 +2442,26 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
return result;
}
}
+ case kMathCos: {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
+ LOperand* input = UseFixedDouble(instr->value(), d0);
+ LMathCos* result = new (zone()) LMathCos(input);
+ return MarkAsCall(DefineFixedDouble(result, d0), instr);
+ }
+ case kMathSin: {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
+ LOperand* input = UseFixedDouble(instr->value(), d0);
+ LMathSin* result = new (zone()) LMathSin(input);
+ return MarkAsCall(DefineFixedDouble(result, d0), instr);
+ }
case kMathExp: {
DCHECK(instr->representation().IsDouble());
DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseRegister(instr->value());
- LOperand* double_temp1 = TempDoubleRegister();
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LOperand* temp3 = TempRegister();
- LMathExp* result = new(zone()) LMathExp(input, double_temp1,
- temp1, temp2, temp3);
- return DefineAsRegister(result);
+ LOperand* input = UseFixedDouble(instr->value(), d0);
+ LMathExp* result = new (zone()) LMathExp(input);
+ return MarkAsCall(DefineFixedDouble(result, d0), instr);
}
case kMathFloor: {
DCHECK(instr->value()->representation().IsDouble());
diff --git a/deps/v8/src/crankshaft/arm64/lithium-arm64.h b/deps/v8/src/crankshaft/arm64/lithium-arm64.h
index 237487ff88..782da09546 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-arm64.h
+++ b/deps/v8/src/crankshaft/arm64/lithium-arm64.h
@@ -57,7 +57,6 @@ class LCodeGen;
V(ConstantI) \
V(ConstantS) \
V(ConstantT) \
- V(ConstructDouble) \
V(Context) \
V(DebugBreak) \
V(DeclareGlobals) \
@@ -65,11 +64,11 @@ class LCodeGen;
V(DivByConstI) \
V(DivByPowerOf2I) \
V(DivI) \
- V(DoubleBits) \
V(DoubleToIntOrSmi) \
V(Drop) \
V(Dummy) \
V(DummyUse) \
+ V(FastAllocate) \
V(FlooringDivByConstI) \
V(FlooringDivByPowerOf2I) \
V(FlooringDivI) \
@@ -81,7 +80,6 @@ class LCodeGen;
V(HasInPrototypeChainAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
- V(InstanceOf) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
@@ -104,6 +102,8 @@ class LCodeGen;
V(MathAbs) \
V(MathAbsTagged) \
V(MathClz32) \
+ V(MathCos) \
+ V(MathSin) \
V(MathExp) \
V(MathFloorD) \
V(MathFloorI) \
@@ -163,7 +163,6 @@ class LCodeGen;
V(UnknownOSRValue) \
V(WrapReceiver)
-
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
Opcode opcode() const final { return LInstruction::k##type; } \
void CompileToNative(LCodeGen* generator) final; \
@@ -626,6 +625,21 @@ class LAllocate final : public LTemplateInstruction<1, 2, 3> {
DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
+class LFastAllocate final : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LFastAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = size;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* size() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
+ DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
public:
@@ -966,33 +980,6 @@ class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> {
};
-class LDoubleBits final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDoubleBits(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
- DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
-};
-
-
-class LConstructDouble final : public LTemplateInstruction<1, 2, 0> {
- public:
- LConstructDouble(LOperand* hi, LOperand* lo) {
- inputs_[0] = hi;
- inputs_[1] = lo;
- }
-
- LOperand* hi() { return inputs_[0]; }
- LOperand* lo() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
-};
-
-
class LClassOfTestAndBranch final : public LControlInstruction<1, 2> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
@@ -1363,22 +1350,6 @@ class LInnerAllocatedObject final : public LTemplateInstruction<1, 2, 0> {
};
-class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
- public:
- LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() const { return inputs_[0]; }
- LOperand* left() const { return inputs_[1]; }
- LOperand* right() const { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-};
-
-
class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 2> {
public:
LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype,
@@ -1568,18 +1539,14 @@ class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 1> {
DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
};
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
+class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
public:
- LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
- LOperand* vector) {
+ LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
inputs_[0] = context;
- inputs_[1] = global_object;
temps_[0] = vector;
}
LOperand* context() { return inputs_[0]; }
- LOperand* global_object() { return inputs_[1]; }
LOperand* temp_vector() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
@@ -1765,26 +1732,23 @@ class LMathAbsTagged: public LTemplateInstruction<1, 2, 3> {
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
};
+class LMathCos final : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathCos(LOperand* value) : LUnaryMathOperation<0>(value) {}
+
+ DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
+};
-class LMathExp final : public LUnaryMathOperation<4> {
+class LMathSin final : public LUnaryMathOperation<0> {
public:
- LMathExp(LOperand* value,
- LOperand* double_temp1,
- LOperand* temp1,
- LOperand* temp2,
- LOperand* temp3)
- : LUnaryMathOperation<4>(value) {
- temps_[0] = double_temp1;
- temps_[1] = temp1;
- temps_[2] = temp2;
- temps_[3] = temp3;
- ExternalReference::InitializeMathExpData();
- }
+ explicit LMathSin(LOperand* value) : LUnaryMathOperation<0>(value) {}
- LOperand* double_temp1() { return temps_[0]; }
- LOperand* temp1() { return temps_[1]; }
- LOperand* temp2() { return temps_[2]; }
- LOperand* temp3() { return temps_[3]; }
+ DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
+};
+
+class LMathExp final : public LUnaryMathOperation<0> {
+ public:
+ explicit LMathExp(LOperand* value) : LUnaryMathOperation<0>(value) {}
DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
};
@@ -2470,6 +2434,8 @@ class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
LOperand* key() { return inputs_[3]; }
LOperand* current_capacity() { return inputs_[4]; }
+ bool ClobbersDoubleRegisters(Isolate* isolate) const override { return true; }
+
DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
};
diff --git a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
index 9bbc8b87e8..b5e1245f3a 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
+++ b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
@@ -12,7 +12,6 @@
#include "src/crankshaft/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
-#include "src/profiler/cpu-profiler.h"
namespace v8 {
namespace internal {
@@ -365,15 +364,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->constructor()).is(x1));
__ Mov(x0, Operand(instr->arity()));
- if (instr->arity() == 1) {
- // We only need the allocation site for the case we have a length argument.
- // The case may bail out to the runtime, which will determine the correct
- // elements kind with the site.
- __ Mov(x2, instr->hydrogen()->site());
- } else {
- __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
- }
-
+ __ Mov(x2, instr->hydrogen()->site());
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
@@ -406,7 +397,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ Bind(&done);
} else {
- ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+ ArrayNArgumentsConstructorStub stub(isolate());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
@@ -447,20 +438,13 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
LInstruction* instr,
LOperand* context) {
- LoadContextFromDeferred(context);
+ if (context != nullptr) LoadContextFromDeferred(context);
__ CallRuntimeSaveDoubles(id);
RecordSafepointWithRegisters(
instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
}
-void LCodeGen::RecordAndWritePosition(int position) {
- if (position == RelocInfo::kNoPosition) return;
- masm()->positions_recorder()->RecordPosition(position);
- masm()->positions_recorder()->WriteRecordedPositions();
-}
-
-
void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode) {
if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
@@ -599,7 +583,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
Comment(";;; Prologue begin");
// Allocate a local context if needed.
- if (info()->num_heap_slots() > 0) {
+ if (info()->scope()->num_heap_slots() > 0) {
Comment(";;; Allocate local context");
bool need_write_barrier = true;
// Argument to NewContext is the function, which is in x1.
@@ -610,14 +594,12 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
__ Push(x1, x10);
__ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
- } else if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
+ } else {
+ FastNewFunctionContextStub stub(isolate());
+ __ Mov(FastNewFunctionContextDescriptor::SlotsRegister(), slots);
__ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
+ // Result of FastNewFunctionContextStub is always in new space.
need_write_barrier = false;
- } else {
- __ Push(x1);
- __ CallRuntime(Runtime::kNewFunctionContext);
}
RecordSafepoint(deopt_mode);
// Context is returned in x0. It replaces the context passed to us. It's
@@ -625,10 +607,11 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
__ Mov(cp, x0);
__ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
- int num_parameters = scope()->num_parameters();
- int first_parameter = scope()->has_this_declaration() ? -1 : 0;
+ int num_parameters = info()->scope()->num_parameters();
+ int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+ Variable* var = (i == -1) ? info()->scope()->receiver()
+ : info()->scope()->parameter(i);
if (var->IsContextSlot()) {
Register value = x0;
Register scratch = x3;
@@ -775,8 +758,6 @@ bool LCodeGen::GenerateJumpTable() {
// table.
__ Bl(&call_deopt_entry);
}
- info()->LogDeoptCallPosition(masm()->pc_offset(),
- table_entry->deopt_info.inlining_id);
masm()->CheckConstPool(false, false);
}
@@ -839,11 +820,9 @@ void LCodeGen::FinishCode(Handle<Code> code) {
PopulateDeoptimizationData(code);
}
-
void LCodeGen::DeoptimizeBranch(
- LInstruction* instr, Deoptimizer::DeoptReason deopt_reason,
- BranchType branch_type, Register reg, int bit,
- Deoptimizer::BailoutType* override_bailout_type) {
+ LInstruction* instr, DeoptimizeReason deopt_reason, BranchType branch_type,
+ Register reg, int bit, Deoptimizer::BailoutType* override_bailout_type) {
LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
Deoptimizer::BailoutType bailout_type =
@@ -892,7 +871,7 @@ void LCodeGen::DeoptimizeBranch(
__ Bind(&dont_trap);
}
- Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+ Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to build frame, or restore caller doubles.
@@ -900,14 +879,13 @@ void LCodeGen::DeoptimizeBranch(
frame_is_built_ && !info()->saves_caller_doubles()) {
DeoptComment(deopt_info);
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
- info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
} else {
Deoptimizer::JumpTableEntry* table_entry =
new (zone()) Deoptimizer::JumpTableEntry(
entry, deopt_info, bailout_type, !frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+ if (FLAG_trace_deopt || isolate()->is_profiling() ||
jump_table_.is_empty() ||
!table_entry->IsEquivalentTo(*jump_table_.last())) {
jump_table_.Add(table_entry, zone());
@@ -916,70 +894,59 @@ void LCodeGen::DeoptimizeBranch(
}
}
-
-void LCodeGen::Deoptimize(LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
+void LCodeGen::Deoptimize(LInstruction* instr, DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType* override_bailout_type) {
DeoptimizeBranch(instr, deopt_reason, always, NoReg, -1,
override_bailout_type);
}
-
void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeReason deopt_reason) {
DeoptimizeBranch(instr, deopt_reason, static_cast<BranchType>(cond));
}
-
void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeReason deopt_reason) {
DeoptimizeBranch(instr, deopt_reason, reg_zero, rt);
}
-
void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeReason deopt_reason) {
DeoptimizeBranch(instr, deopt_reason, reg_not_zero, rt);
}
-
void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeReason deopt_reason) {
int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
DeoptimizeIfBitSet(rt, sign_bit, instr, deopt_reason);
}
-
void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeReason deopt_reason) {
DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
}
-
void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeReason deopt_reason) {
DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
}
-
void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeReason deopt_reason) {
__ CompareRoot(rt, index);
DeoptimizeIf(eq, instr, deopt_reason);
}
-
void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeReason deopt_reason) {
__ CompareRoot(rt, index);
DeoptimizeIf(ne, instr, deopt_reason);
}
-
void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeReason deopt_reason) {
__ TestForMinusZero(input);
DeoptimizeIf(vs, instr, deopt_reason);
}
@@ -987,18 +954,16 @@ void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) {
__ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
}
-
void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeReason deopt_reason) {
DeoptimizeBranch(instr, deopt_reason, reg_bit_set, rt, bit);
}
-
void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeReason deopt_reason) {
DeoptimizeBranch(instr, deopt_reason, reg_bit_clear, rt, bit);
}
@@ -1377,7 +1342,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
if (can_overflow) {
__ Adds(result, left, right);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
} else {
__ Add(result, left, right);
}
@@ -1391,7 +1356,7 @@ void LCodeGen::DoAddS(LAddS* instr) {
Operand right = ToOperand(instr->right());
if (can_overflow) {
__ Adds(result, left, right);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
} else {
__ Add(result, left, right);
}
@@ -1416,7 +1381,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
Register temp2 = ToRegister(instr->temp2());
// Allocate memory for the object.
- AllocationFlags flags = TAG_OBJECT;
+ AllocationFlags flags = NO_ALLOCATION_FLAGS;
if (instr->hydrogen()->MustAllocateDoubleAligned()) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
@@ -1426,6 +1391,11 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
+ if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+ flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
+ }
+ DCHECK(!instr->hydrogen()->IsAllocationFolded());
+
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
CHECK(size <= Page::kMaxRegularHeapObjectSize);
@@ -1466,6 +1436,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ Mov(ToRegister(instr->result()), Smi::FromInt(0));
PushSafepointRegistersScope scope(this);
+ LoadContextFromDeferred(instr->context());
// We're in a SafepointRegistersScope so we can use any scratch registers.
Register size = x0;
if (instr->size()->IsConstantOperand()) {
@@ -1484,9 +1455,51 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ Mov(x10, Smi::FromInt(flags));
__ Push(size, x10);
- CallRuntimeFromDeferred(
- Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
+ CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr, nullptr);
__ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result()));
+
+ if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+ AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
+ if (instr->hydrogen()->IsOldSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
+ }
+ // If the allocation folding dominator allocate triggered a GC, allocation
+ // happend in the runtime. We have to reset the top pointer to virtually
+ // undo the allocation.
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
+ Register top_address = x10;
+ __ Sub(x0, x0, Operand(kHeapObjectTag));
+ __ Mov(top_address, Operand(allocation_top));
+ __ Str(x0, MemOperand(top_address));
+ __ Add(x0, x0, Operand(kHeapObjectTag));
+ }
+}
+
+void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
+ DCHECK(instr->hydrogen()->IsAllocationFolded());
+ DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
+ Register result = ToRegister(instr->result());
+ Register scratch1 = ToRegister(instr->temp1());
+ Register scratch2 = ToRegister(instr->temp2());
+
+ AllocationFlags flags = ALLOCATION_FOLDED;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+ if (instr->hydrogen()->IsOldSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE);
+ }
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ __ FastAllocate(size, result, scratch1, scratch2, flags);
+ } else {
+ Register size = ToRegister(instr->size());
+ __ FastAllocate(size, result, scratch1, scratch2, flags);
+ }
}
@@ -1506,7 +1519,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ Cmp(length, kArgumentsLimit);
- DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments);
+ DeoptimizeIf(hi, instr, DeoptimizeReason::kTooManyArguments);
// Push the receiver and use the register to keep the original
// number of arguments.
@@ -1698,7 +1711,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
__ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed);
} else {
- DeoptimizeIf(cond, instr, Deoptimizer::kOutOfBounds);
+ DeoptimizeIf(cond, instr, DeoptimizeReason::kOutOfBounds);
}
}
@@ -1778,7 +1791,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ JumpIfSmi(value, true_label);
} else if (expected.NeedsMap()) {
// If we need a map later and have a smi, deopt.
- DeoptimizeIfSmi(value, instr, Deoptimizer::kSmi);
+ DeoptimizeIfSmi(value, instr, DeoptimizeReason::kSmi);
}
Register map = NoReg;
@@ -1845,7 +1858,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- Deoptimize(instr, Deoptimizer::kUnexpectedObject);
+ Deoptimize(instr, DeoptimizeReason::kUnexpectedObject);
}
}
}
@@ -1981,7 +1994,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(x0, temp);
}
- DeoptimizeIfSmi(temp, instr, Deoptimizer::kInstanceMigrationFailed);
+ DeoptimizeIfSmi(temp, instr, DeoptimizeReason::kInstanceMigrationFailed);
}
@@ -2036,7 +2049,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ B(ne, deferred->entry());
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
}
__ Bind(&success);
@@ -2045,7 +2058,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
- DeoptimizeIfSmi(ToRegister(instr->value()), instr, Deoptimizer::kSmi);
+ DeoptimizeIfSmi(ToRegister(instr->value()), instr, DeoptimizeReason::kSmi);
}
}
@@ -2053,7 +2066,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
Register value = ToRegister(instr->value());
DCHECK(!instr->result() || ToRegister(instr->result()).Is(value));
- DeoptimizeIfNotSmi(value, instr, Deoptimizer::kNotASmi);
+ DeoptimizeIfNotSmi(value, instr, DeoptimizeReason::kNotASmi);
}
@@ -2066,7 +2079,7 @@ void LCodeGen::DoCheckArrayBufferNotNeutered(
__ Ldr(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
__ Ldr(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
__ Tst(scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
- DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds);
}
@@ -2084,15 +2097,15 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
__ Cmp(scratch, first);
if (first == last) {
// If there is only one type in the interval check for equality.
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
} else if (last == LAST_TYPE) {
// We don't need to compare with the higher bound of the interval.
- DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(lo, instr, DeoptimizeReason::kWrongInstanceType);
} else {
// If we are below the lower bound, set the C flag and clear the Z flag
// to force a deopt.
__ Ccmp(scratch, last, CFlag, hs);
- DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(hi, instr, DeoptimizeReason::kWrongInstanceType);
}
} else {
uint8_t mask;
@@ -2103,10 +2116,10 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
DCHECK((tag == 0) || (tag == mask));
if (tag == 0) {
DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr,
- Deoptimizer::kWrongInstanceType);
+ DeoptimizeReason::kWrongInstanceType);
} else {
DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr,
- Deoptimizer::kWrongInstanceType);
+ DeoptimizeReason::kWrongInstanceType);
}
} else {
if (tag == 0) {
@@ -2115,7 +2128,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
__ And(scratch, scratch, mask);
__ Cmp(scratch, tag);
}
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
}
}
}
@@ -2155,7 +2168,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is coverted to zero for clamping conversion.
DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
- Deoptimizer::kNotAHeapNumberUndefined);
+ DeoptimizeReason::kNotAHeapNumberUndefined);
__ Mov(result, 0);
__ B(&done);
@@ -2170,30 +2183,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
-void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
- DoubleRegister value_reg = ToDoubleRegister(instr->value());
- Register result_reg = ToRegister(instr->result());
- if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
- __ Fmov(result_reg, value_reg);
- __ Lsr(result_reg, result_reg, 32);
- } else {
- __ Fmov(result_reg.W(), value_reg.S());
- }
-}
-
-
-void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
- Register hi_reg = ToRegister(instr->hi());
- Register lo_reg = ToRegister(instr->lo());
- DoubleRegister result_reg = ToDoubleRegister(instr->result());
-
- // Insert the least significant 32 bits of hi_reg into the most significant
- // 32 bits of lo_reg, and move to a floating point register.
- __ Bfi(lo_reg, hi_reg, 32, 32);
- __ Fmov(result_reg, lo_reg);
-}
-
-
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Handle<String> class_name = instr->hydrogen()->class_name();
Label* true_label = instr->TrueLabel(chunk_);
@@ -2432,7 +2421,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
} else {
__ Cmp(reg, Operand(object));
}
- DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch);
}
@@ -2469,21 +2458,21 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIfZero(dividend, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIfZero(dividend, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
// Test dividend for kMinInt by subtracting one (cmp) and checking for
// overflow.
__ Cmp(dividend, 1);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ Tst(dividend, mask);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
@@ -2511,14 +2500,14 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(!AreAliased(dividend, result));
if (divisor == 0) {
- Deoptimize(instr, Deoptimizer::kDivisionByZero);
+ Deoptimize(instr, DeoptimizeReason::kDivisionByZero);
return;
}
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfZero(dividend, instr, DeoptimizeReason::kMinusZero);
}
__ TruncatingDiv(result, dividend, Abs(divisor));
@@ -2530,7 +2519,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
__ Sxtw(dividend.X(), dividend);
__ Mov(temp, divisor);
__ Smsubl(temp.X(), result, temp, dividend.X());
- DeoptimizeIfNotZero(temp, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIfNotZero(temp, instr, DeoptimizeReason::kLostPrecision);
}
}
@@ -2553,7 +2542,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIfZero(divisor, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (0 / -x) as that will produce negative zero.
@@ -2565,7 +2554,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// If the divisor >= 0 (pl, the opposite of mi) set the flags to
// condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
__ Ccmp(dividend, 0, NoFlag, mi);
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
// Check for (kMinInt / -1).
@@ -2577,13 +2566,13 @@ void LCodeGen::DoDivI(LDivI* instr) {
// -1. If overflow is clear, set the flags for condition ne, as the
// dividend isn't -1, and thus we shouldn't deopt.
__ Ccmp(divisor, -1, NoFlag, vs);
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
}
// Compute remainder and deopt if it's not zero.
Register remainder = ToRegister32(instr->temp());
__ Msub(remainder, result, divisor, dividend);
- DeoptimizeIfNotZero(remainder, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIfNotZero(remainder, instr, DeoptimizeReason::kLostPrecision);
}
@@ -2592,11 +2581,11 @@ void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
Register result = ToRegister32(instr->result());
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfMinusZero(input, instr, DeoptimizeReason::kMinusZero);
}
__ TryRepresentDoubleAsInt32(result, input, double_scratch());
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
if (instr->tag_result()) {
__ SmiTag(result.X());
@@ -2636,7 +2625,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
__ LoadInstanceDescriptors(map, result);
__ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- DeoptimizeIfZero(result, instr, Deoptimizer::kNoCache);
+ DeoptimizeIfZero(result, instr, DeoptimizeReason::kNoCache);
__ Bind(&done);
}
@@ -2758,16 +2747,6 @@ void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
}
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
- DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
- DCHECK(ToRegister(instr->result()).is(x0));
- InstanceOfStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoHasInPrototypeChainAndBranch(
LHasInPrototypeChainAndBranch* instr) {
Register const object = ToRegister(instr->object());
@@ -2792,16 +2771,16 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
__ Ldrb(object_instance_type,
FieldMemOperand(object_map, Map::kBitFieldOffset));
__ Tst(object_instance_type, Operand(1 << Map::kIsAccessCheckNeeded));
- DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck);
// Deoptimize for proxies.
__ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
- DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy);
__ Ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
- __ Cmp(object_prototype, prototype);
- __ B(eq, instr->TrueLabel(chunk_));
__ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
__ B(eq, instr->FalseLabel(chunk_));
+ __ Cmp(object_prototype, prototype);
+ __ B(eq, instr->TrueLabel(chunk_));
__ Ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
__ B(&loop);
}
@@ -2830,7 +2809,9 @@ void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
#endif
if (FLAG_code_comments) {
if (actual.is_reg()) {
- Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+ Comment(";;; PrepareForTailCall, actual: %s {",
+ RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
+ actual.reg().code()));
} else {
Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
}
@@ -2981,7 +2962,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
if (instr->hydrogen()->DeoptimizesOnHole()) {
DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
- Deoptimizer::kHole);
+ DeoptimizeReason::kHole);
} else {
Label not_the_hole;
__ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, &not_the_hole);
@@ -3003,7 +2984,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
- Deoptimizer::kHole);
+ DeoptimizeReason::kHole);
// If the function does not have an initial map, we're done.
Label done;
@@ -3051,14 +3032,12 @@ void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->global_object())
- .is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).Is(x0));
- __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
+
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
- isolate(), instr->typeof_mode(), PREMONOMORPHIC)
- .code();
+ Handle<Code> ic =
+ CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3152,7 +3131,7 @@ void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
// Deopt if value > 0x80000000.
__ Tst(result, 0xFFFFFFFF80000000);
- DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNegativeValue);
}
break;
case FLOAT32_ELEMENTS:
@@ -3248,7 +3227,7 @@ void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
Register scratch = ToRegister(instr->temp());
__ Fmov(scratch, result);
__ Eor(scratch, scratch, kHoleNanInt64);
- DeoptimizeIfZero(scratch, instr, Deoptimizer::kHole);
+ DeoptimizeIfZero(scratch, instr, DeoptimizeReason::kHole);
}
}
@@ -3286,10 +3265,10 @@ void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- DeoptimizeIfNotSmi(result, instr, Deoptimizer::kNotASmi);
+ DeoptimizeIfNotSmi(result, instr, DeoptimizeReason::kNotASmi);
} else {
DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
- Deoptimizer::kHole);
+ DeoptimizeReason::kHole);
}
} else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
@@ -3303,7 +3282,7 @@ void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
__ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
__ Cmp(result, Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
- DeoptimizeIf(ne, instr, Deoptimizer::kHole);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
}
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
__ Bind(&done);
@@ -3316,13 +3295,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
- if (instr->hydrogen()->HasVectorAndSlot()) {
- EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
- }
+ EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
- Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
- isolate(), instr->hydrogen()->initialization_state())
- .code();
+ Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
DCHECK(ToRegister(instr->result()).Is(x0));
@@ -3376,10 +3351,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
__ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_INSIDE_TYPEOF,
- instr->hydrogen()->initialization_state())
- .code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
DCHECK(ToRegister(instr->result()).is(x0));
@@ -3404,7 +3376,7 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
Register result = r.IsSmi() ? ToRegister(instr->result())
: ToRegister32(instr->result());
__ Abs(result, input);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
}
}
@@ -3527,19 +3499,25 @@ void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) {
__ Bind(&done);
}
+void LCodeGen::DoMathCos(LMathCos* instr) {
+ DCHECK(instr->IsMarkedAsCall());
+ DCHECK(ToDoubleRegister(instr->value()).is(d0));
+ __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1);
+ DCHECK(ToDoubleRegister(instr->result()).Is(d0));
+}
+
+void LCodeGen::DoMathSin(LMathSin* instr) {
+ DCHECK(instr->IsMarkedAsCall());
+ DCHECK(ToDoubleRegister(instr->value()).is(d0));
+ __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1);
+ DCHECK(ToDoubleRegister(instr->result()).Is(d0));
+}
void LCodeGen::DoMathExp(LMathExp* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister double_temp1 = ToDoubleRegister(instr->double_temp1());
- DoubleRegister double_temp2 = double_scratch();
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
- Register temp3 = ToRegister(instr->temp3());
-
- MathExpGenerator::EmitMathExp(masm(), input, result,
- double_temp1, double_temp2,
- temp1, temp2, temp3);
+ DCHECK(instr->IsMarkedAsCall());
+ DCHECK(ToDoubleRegister(instr->value()).is(d0));
+ __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1);
+ DCHECK(ToDoubleRegister(instr->result()).Is(d0));
}
@@ -3556,7 +3534,7 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
Register result = ToRegister(instr->result());
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfMinusZero(input, instr, DeoptimizeReason::kMinusZero);
}
__ Fcvtms(result, input);
@@ -3566,7 +3544,7 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
__ Cmp(result, Operand(result, SXTW));
// - The input was not NaN.
__ Fccmp(input, input, NoFlag, eq);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
}
@@ -3592,13 +3570,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// If the divisor is negative, we have to negate and handle edge cases.
__ Negs(result, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
}
return;
}
@@ -3621,14 +3599,14 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(!AreAliased(dividend, result));
if (divisor == 0) {
- Deoptimize(instr, Deoptimizer::kDivisionByZero);
+ Deoptimize(instr, DeoptimizeReason::kDivisionByZero);
return;
}
// Check for (0 / -x) that will produce negative zero.
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfZero(dividend, instr, DeoptimizeReason::kMinusZero);
}
// Easy case: We need no dynamic check for the dividend and the flooring
@@ -3671,14 +3649,14 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ Sdiv(result, dividend, divisor);
// Check for x / 0.
- DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIfZero(divisor, instr, DeoptimizeReason::kDivisionByZero);
// Check for (kMinInt / -1).
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
// The V flag will be set iff dividend == kMinInt.
__ Cmp(dividend, 1);
__ Ccmp(divisor, -1, NoFlag, vs);
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
}
// Check for (0 / -x) that will produce negative zero.
@@ -3688,7 +3666,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// "divisor" can't be null because the code would have already been
// deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
// In this case we need to deoptimize to produce a -0.
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
Label done;
@@ -3708,8 +3686,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
void LCodeGen::DoMathLog(LMathLog* instr) {
DCHECK(instr->IsMarkedAsCall());
DCHECK(ToDoubleRegister(instr->value()).is(d0));
- __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
- 0, 1);
+ __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1);
DCHECK(ToDoubleRegister(instr->result()).Is(d0));
}
@@ -3847,18 +3824,18 @@ void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
// Deoptimize if the result > 1, as it must be larger than 32 bits.
__ Cmp(result, 1);
- DeoptimizeIf(hi, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(hi, instr, DeoptimizeReason::kOverflow);
// Deoptimize for negative inputs, which at this point are only numbers in
// the range [-0.5, -0.0]
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Fmov(result, input);
- DeoptimizeIfNegative(result, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfNegative(result, instr, DeoptimizeReason::kMinusZero);
}
// Deoptimize if the input was NaN.
__ Fcmp(input, dot_five);
- DeoptimizeIf(vs, instr, Deoptimizer::kNaN);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kNaN);
// Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[
// if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1,
@@ -3936,7 +3913,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ And(dividend, dividend, mask);
__ Negs(dividend, dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
__ B(&done);
}
@@ -3955,7 +3932,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(!AreAliased(dividend, result, temp));
if (divisor == 0) {
- Deoptimize(instr, Deoptimizer::kDivisionByZero);
+ Deoptimize(instr, DeoptimizeReason::kDivisionByZero);
return;
}
@@ -3969,7 +3946,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label remainder_not_zero;
__ Cbnz(result, &remainder_not_zero);
- DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfNegative(dividend, instr, DeoptimizeReason::kMinusZero);
__ bind(&remainder_not_zero);
}
}
@@ -3984,12 +3961,12 @@ void LCodeGen::DoModI(LModI* instr) {
// modulo = dividend - quotient * divisor
__ Sdiv(result, dividend, divisor);
if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIfZero(divisor, instr, DeoptimizeReason::kDivisionByZero);
}
__ Msub(result, result, divisor, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Cbnz(result, &done);
- DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfNegative(dividend, instr, DeoptimizeReason::kMinusZero);
}
__ Bind(&done);
}
@@ -4012,10 +3989,10 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
if (bailout_on_minus_zero) {
if (right < 0) {
// The result is -0 if right is negative and left is zero.
- DeoptimizeIfZero(left, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfZero(left, instr, DeoptimizeReason::kMinusZero);
} else if (right == 0) {
// The result is -0 if the right is zero and the left is negative.
- DeoptimizeIfNegative(left, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfNegative(left, instr, DeoptimizeReason::kMinusZero);
}
}
@@ -4025,7 +4002,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
if (can_overflow) {
// Only 0x80000000 can overflow here.
__ Negs(result, left);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
} else {
__ Neg(result, left);
}
@@ -4041,7 +4018,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
case 2:
if (can_overflow) {
__ Adds(result, left, left);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
} else {
__ Add(result, left, left);
}
@@ -4060,7 +4037,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
DCHECK(!AreAliased(scratch, left));
__ Cls(scratch, left);
__ Cmp(scratch, right_log2);
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow);
}
if (right >= 0) {
@@ -4070,7 +4047,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
// result = -left << log2(-right)
if (can_overflow) {
__ Negs(result, Operand(left, LSL, right_log2));
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
} else {
__ Neg(result, Operand(left, LSL, right_log2));
}
@@ -4128,13 +4105,13 @@ void LCodeGen::DoMulI(LMulI* instr) {
// - If so (eq), set N (mi) if left + right is negative.
// - Otherwise, clear N.
__ Ccmn(left, right, NoFlag, eq);
- DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero);
}
if (can_overflow) {
__ Smull(result.X(), left, right);
__ Cmp(result.X(), Operand(result, SXTW));
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
} else {
__ Mul(result, left, right);
}
@@ -4158,7 +4135,7 @@ void LCodeGen::DoMulS(LMulS* instr) {
// - If so (eq), set N (mi) if left + right is negative.
// - Otherwise, clear N.
__ Ccmn(left, right, NoFlag, eq);
- DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero);
}
STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
@@ -4166,7 +4143,7 @@ void LCodeGen::DoMulS(LMulS* instr) {
__ Smulh(result, left, right);
__ Cmp(result, Operand(result.W(), SXTW));
__ SmiTag(result);
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
} else {
if (AreAliased(result, left, right)) {
// All three registers are the same: half untag the input and then
@@ -4199,12 +4176,10 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
__ Mov(result, 0);
PushSafepointRegistersScope scope(this);
- // NumberTagU and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Reset the context register.
+ if (!result.is(cp)) {
+ __ Mov(cp, 0);
+ }
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
@@ -4264,13 +4239,10 @@ void LCodeGen::DoDeferredNumberTagU(LInstruction* instr,
{
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
-
- // NumberTagU and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Reset the context register.
+ if (!dst.is(cp)) {
+ __ Mov(cp, 0);
+ }
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
@@ -4342,14 +4314,14 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
// Load heap number.
__ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
if (instr->hydrogen()->deoptimize_on_minus_zero()) {
- DeoptimizeIfMinusZero(result, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfMinusZero(result, instr, DeoptimizeReason::kMinusZero);
}
__ B(&done);
if (can_convert_undefined_to_nan) {
__ Bind(&convert_undefined);
DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
- Deoptimizer::kNotAHeapNumberUndefined);
+ DeoptimizeReason::kNotAHeapNumberUndefined);
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
@@ -4537,7 +4509,7 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
Register output = ToRegister(instr->result());
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
- DeoptimizeIfNegative(input.W(), instr, Deoptimizer::kOverflow);
+ DeoptimizeIfNegative(input.W(), instr, DeoptimizeReason::kOverflow);
}
__ SmiTag(output, input);
}
@@ -4549,7 +4521,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
Label done, untag;
if (instr->needs_check()) {
- DeoptimizeIfNotSmi(input, instr, Deoptimizer::kNotASmi);
+ DeoptimizeIfNotSmi(input, instr, DeoptimizeReason::kNotASmi);
}
__ Bind(&untag);
@@ -4574,7 +4546,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
if (instr->can_deopt()) {
// If `left >>> right` >= 0x80000000, the result is not representable
// in a signed 32-bit smi.
- DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIfNegative(result, instr, DeoptimizeReason::kNegativeValue);
}
break;
default: UNREACHABLE();
@@ -4584,7 +4556,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
int shift_count = JSShiftAmountFromLConstant(right_op);
if (shift_count == 0) {
if ((instr->op() == Token::SHR) && instr->can_deopt()) {
- DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIfNegative(left, instr, DeoptimizeReason::kNegativeValue);
}
__ Mov(result, left, kDiscardForSameWReg);
} else {
@@ -4637,7 +4609,7 @@ void LCodeGen::DoShiftS(LShiftS* instr) {
if (instr->can_deopt()) {
// If `left >>> right` >= 0x80000000, the result is not representable
// in a signed 32-bit smi.
- DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIfNegative(result, instr, DeoptimizeReason::kNegativeValue);
}
break;
default: UNREACHABLE();
@@ -4647,7 +4619,7 @@ void LCodeGen::DoShiftS(LShiftS* instr) {
int shift_count = JSShiftAmountFromLConstant(right_op);
if (shift_count == 0) {
if ((instr->op() == Token::SHR) && instr->can_deopt()) {
- DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIfNegative(left, instr, DeoptimizeReason::kNegativeValue);
}
__ Mov(result, left);
} else {
@@ -4691,6 +4663,8 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
__ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
__ Push(scratch1, scratch2);
+ __ LoadHeapObject(scratch1, instr->hydrogen()->feedback_vector());
+ __ Push(scratch1);
CallRuntime(Runtime::kDeclareGlobals, instr);
}
@@ -4776,7 +4750,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ Ldr(scratch, target);
if (instr->hydrogen()->DeoptimizesOnHole()) {
DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr,
- Deoptimizer::kHole);
+ DeoptimizeReason::kHole);
} else {
__ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
}
@@ -4960,13 +4934,11 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- if (instr->hydrogen()->HasVectorAndSlot()) {
- EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
- }
+ EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
- isolate(), instr->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ isolate(), instr->language_mode())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -5043,8 +5015,7 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
__ SmiTag(x3);
}
- GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
- instr->hydrogen()->kind());
+ GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
__ CallStub(&stub);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
@@ -5052,7 +5023,7 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
}
// Deopt on smi, which means the elements array changed to dictionary mode.
- DeoptimizeIfSmi(result, instr, Deoptimizer::kSmi);
+ DeoptimizeIfSmi(result, instr, DeoptimizeReason::kSmi);
}
@@ -5160,14 +5131,12 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- if (instr->hydrogen()->HasVectorAndSlot()) {
- EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
- }
+ EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
__ Mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
- Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
- isolate(), instr->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ Handle<Code> ic =
+ CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -5296,7 +5265,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
if (can_overflow) {
__ Subs(result, left, right);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
} else {
__ Sub(result, left, right);
}
@@ -5310,7 +5279,7 @@ void LCodeGen::DoSubS(LSubS* instr) {
Operand right = ToOperand(instr->right());
if (can_overflow) {
__ Subs(result, left, right);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
} else {
__ Sub(result, left, right);
}
@@ -5351,7 +5320,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
// Output contains zero, undefined is converted to zero for truncating
// conversions.
DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
- Deoptimizer::kNotAHeapNumberUndefinedBoolean);
+ DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
} else {
Register output = ToRegister32(instr->result());
DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
@@ -5362,13 +5331,13 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
// function. If the result is out of range, branch to deoptimize.
__ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
__ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Cmp(output, 0);
__ B(ne, &done);
__ Fmov(scratch1, dbl_scratch1);
- DeoptimizeIfNegative(scratch1, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIfNegative(scratch1, instr, DeoptimizeReason::kMinusZero);
}
}
__ Bind(&done);
@@ -5442,8 +5411,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
PushSafepointRegistersScope scope(this);
__ Mov(x1, Operand(to_map));
- bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
- TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+ TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
__ CallStub(&stub);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kLazyDeopt);
@@ -5459,7 +5427,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
- DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMementoFound);
__ Bind(&no_memento_found);
}
@@ -5483,8 +5451,8 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
__ Mov(x0, Immediate(isolate()->factory()->number_string()));
__ B(&end);
__ Bind(&do_call);
- TypeofStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ Callable callable = CodeFactory::Typeof(isolate());
+ CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
__ Bind(&end);
}
@@ -5605,7 +5573,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register temp = ToRegister(instr->temp());
__ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
__ Cmp(map, temp);
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
}
@@ -5639,10 +5607,10 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
// Deoptimize if the receiver is not a JS object.
- DeoptimizeIfSmi(receiver, instr, Deoptimizer::kSmi);
+ DeoptimizeIfSmi(receiver, instr, DeoptimizeReason::kSmi);
__ CompareObjectType(receiver, result, result, FIRST_JS_RECEIVER_TYPE);
__ B(ge, &copy_receiver);
- Deoptimize(instr, Deoptimizer::kNotAJavaScriptObject);
+ Deoptimize(instr, DeoptimizeReason::kNotAJavaScriptObject);
__ Bind(&global_object);
__ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
diff --git a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h
index f67ad5ab5d..2fc6f96d7a 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h
+++ b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h
@@ -199,38 +199,35 @@ class LCodeGen: public LCodeGenBase {
Register temp,
LOperand* index,
String::Encoding encoding);
- void DeoptimizeBranch(LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
+ void DeoptimizeBranch(LInstruction* instr, DeoptimizeReason deopt_reason,
BranchType branch_type, Register reg = NoReg,
int bit = -1,
Deoptimizer::BailoutType* override_bailout_type = NULL);
- void Deoptimize(LInstruction* instr, Deoptimizer::DeoptReason deopt_reason,
+ void Deoptimize(LInstruction* instr, DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType* override_bailout_type = NULL);
void DeoptimizeIf(Condition cond, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason);
+ DeoptimizeReason deopt_reason);
void DeoptimizeIfZero(Register rt, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason);
+ DeoptimizeReason deopt_reason);
void DeoptimizeIfNotZero(Register rt, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason);
+ DeoptimizeReason deopt_reason);
void DeoptimizeIfNegative(Register rt, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason);
+ DeoptimizeReason deopt_reason);
void DeoptimizeIfSmi(Register rt, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason);
+ DeoptimizeReason deopt_reason);
void DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason);
+ DeoptimizeReason deopt_reason);
void DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
- LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason);
+ LInstruction* instr, DeoptimizeReason deopt_reason);
void DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
- LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason);
+ LInstruction* instr, DeoptimizeReason deopt_reason);
void DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr);
void DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason);
+ DeoptimizeReason deopt_reason);
void DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason);
+ DeoptimizeReason deopt_reason);
void DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason);
+ DeoptimizeReason deopt_reason);
MemOperand PrepareKeyedExternalArrayOperand(Register key,
Register base,
@@ -331,8 +328,7 @@ class LCodeGen: public LCodeGenBase {
int formal_parameter_count, int arity,
bool is_tail_call, LInstruction* instr);
- // Support for recording safepoint and position information.
- void RecordAndWritePosition(int position) override;
+ // Support for recording safepoint information.
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
diff --git a/deps/v8/src/crankshaft/arm64/lithium-gap-resolver-arm64.h b/deps/v8/src/crankshaft/arm64/lithium-gap-resolver-arm64.h
index 4f5eb223d4..acac4e19ff 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-gap-resolver-arm64.h
+++ b/deps/v8/src/crankshaft/arm64/lithium-gap-resolver-arm64.h
@@ -66,7 +66,8 @@ class LGapResolver BASE_EMBEDDED {
// Registers used to solve cycles.
const Register& SavedValueRegister() {
- DCHECK(!masm_.ScratchRegister().IsAllocatable());
+ DCHECK(!RegisterConfiguration::Crankshaft()->IsAllocatableGeneralCode(
+ masm_.ScratchRegister().code()));
return masm_.ScratchRegister();
}
// The scratch register is used to break cycles and to store constant.
@@ -77,7 +78,8 @@ class LGapResolver BASE_EMBEDDED {
// We use the Crankshaft floating-point scratch register to break a cycle
// involving double values as the MacroAssembler will not need it for the
// operations performed by the gap resolver.
- DCHECK(!crankshaft_fp_scratch.IsAllocatable());
+ DCHECK(!RegisterConfiguration::Crankshaft()->IsAllocatableGeneralCode(
+ crankshaft_fp_scratch.code()));
return crankshaft_fp_scratch;
}
diff --git a/deps/v8/src/crankshaft/hydrogen-gvn.cc b/deps/v8/src/crankshaft/hydrogen-gvn.cc
index 07bfabc79a..e6ddd7526b 100644
--- a/deps/v8/src/crankshaft/hydrogen-gvn.cc
+++ b/deps/v8/src/crankshaft/hydrogen-gvn.cc
@@ -637,17 +637,12 @@ void HGlobalValueNumberingPhase::ProcessLoopBlock(
}
-bool HGlobalValueNumberingPhase::AllowCodeMotion() {
- return info()->IsStub() || info()->opt_count() + 1 < FLAG_max_opt_count;
-}
-
-
bool HGlobalValueNumberingPhase::ShouldMove(HInstruction* instr,
HBasicBlock* loop_header) {
// If we've disabled code motion or we're in a block that unconditionally
// deoptimizes, don't move any instructions.
- return AllowCodeMotion() && !instr->block()->IsDeoptimizing() &&
- instr->block()->IsReachable();
+ return graph()->allow_code_motion() && !instr->block()->IsDeoptimizing() &&
+ instr->block()->IsReachable();
}
diff --git a/deps/v8/src/crankshaft/hydrogen-gvn.h b/deps/v8/src/crankshaft/hydrogen-gvn.h
index a5e2168603..9a8d40710f 100644
--- a/deps/v8/src/crankshaft/hydrogen-gvn.h
+++ b/deps/v8/src/crankshaft/hydrogen-gvn.h
@@ -126,7 +126,6 @@ class HGlobalValueNumberingPhase final : public HPhase {
void ProcessLoopBlock(HBasicBlock* block,
HBasicBlock* before_loop,
SideEffects loop_kills);
- bool AllowCodeMotion();
bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header);
TrackedEffects Print(SideEffects side_effects) {
return TrackedEffects(&side_effects_tracker_, side_effects);
diff --git a/deps/v8/src/crankshaft/hydrogen-instructions.cc b/deps/v8/src/crankshaft/hydrogen-instructions.cc
index b57bebd8fc..b8020c7270 100644
--- a/deps/v8/src/crankshaft/hydrogen-instructions.cc
+++ b/deps/v8/src/crankshaft/hydrogen-instructions.cc
@@ -5,6 +5,7 @@
#include "src/crankshaft/hydrogen-instructions.h"
#include "src/base/bits.h"
+#include "src/base/ieee754.h"
#include "src/base/safe_math.h"
#include "src/crankshaft/hydrogen-infer-representation.h"
#include "src/double.h"
@@ -784,11 +785,9 @@ bool HInstruction::CanDeoptimize() {
case HValue::kCompareNumericAndBranch:
case HValue::kCompareObjectEqAndBranch:
case HValue::kConstant:
- case HValue::kConstructDouble:
case HValue::kContext:
case HValue::kDebugBreak:
case HValue::kDeclareGlobals:
- case HValue::kDoubleBits:
case HValue::kDummyUse:
case HValue::kEnterInlined:
case HValue::kEnvironmentMarker:
@@ -798,7 +797,6 @@ bool HInstruction::CanDeoptimize() {
case HValue::kHasCachedArrayIndexAndBranch:
case HValue::kHasInstanceTypeAndBranch:
case HValue::kInnerAllocatedObject:
- case HValue::kInstanceOf:
case HValue::kIsSmiAndBranch:
case HValue::kIsStringAndBranch:
case HValue::kIsUndetectableAndBranch:
@@ -1115,10 +1113,14 @@ const char* HUnaryMathOperation::OpName() const {
return "round";
case kMathAbs:
return "abs";
+ case kMathCos:
+ return "cos";
case kMathLog:
return "log";
case kMathExp:
return "exp";
+ case kMathSin:
+ return "sin";
case kMathSqrt:
return "sqrt";
case kMathPowHalf:
@@ -1554,6 +1556,9 @@ void HCheckInstanceType::GetCheckInterval(InstanceType* first,
case IS_JS_ARRAY:
*first = *last = JS_ARRAY_TYPE;
return;
+ case IS_JS_FUNCTION:
+ *first = *last = JS_FUNCTION_TYPE;
+ return;
case IS_JS_DATE:
*first = *last = JS_DATE_TYPE;
return;
@@ -1626,6 +1631,8 @@ const char* HCheckInstanceType::GetCheckName() const {
switch (check_) {
case IS_JS_RECEIVER: return "object";
case IS_JS_ARRAY: return "array";
+ case IS_JS_FUNCTION:
+ return "function";
case IS_JS_DATE:
return "date";
case IS_STRING: return "string";
@@ -1652,12 +1659,6 @@ std::ostream& HUnknownOSRValue::PrintDataTo(std::ostream& os) const { // NOLINT
}
-std::ostream& HInstanceOf::PrintDataTo(std::ostream& os) const { // NOLINT
- return os << NameOf(left()) << " " << NameOf(right()) << " "
- << NameOf(context());
-}
-
-
Range* HValue::InferRange(Zone* zone) {
Range* result;
if (representation().IsSmi() || type().IsSmi()) {
@@ -2171,6 +2172,32 @@ HConstant::HConstant(Handle<Object> object, Representation r)
BooleanValueField::encode(object->BooleanValue()) |
IsUndetectableField::encode(false) | IsCallableField::encode(false) |
InstanceTypeField::encode(kUnknownInstanceType)) {
+ if (object->IsNumber()) {
+ double n = object->Number();
+ bool has_int32_value = IsInteger32(n);
+ bit_field_ = HasInt32ValueField::update(bit_field_, has_int32_value);
+ int32_value_ = DoubleToInt32(n);
+ bit_field_ = HasSmiValueField::update(
+ bit_field_, has_int32_value && Smi::IsValid(int32_value_));
+ if (std::isnan(n)) {
+ double_value_ = std::numeric_limits<double>::quiet_NaN();
+ // Canonicalize object with NaN value.
+ DCHECK(object->IsHeapObject()); // NaN can't be a Smi.
+ Isolate* isolate = HeapObject::cast(*object)->GetIsolate();
+ object = isolate->factory()->nan_value();
+ object_ = Unique<Object>::CreateUninitialized(object);
+ } else {
+ double_value_ = n;
+ // Canonicalize object with -0.0 value.
+ if (bit_cast<int64_t>(n) == bit_cast<int64_t>(-0.0)) {
+ DCHECK(object->IsHeapObject()); // -0.0 can't be a Smi.
+ Isolate* isolate = HeapObject::cast(*object)->GetIsolate();
+ object = isolate->factory()->minus_zero_value();
+ object_ = Unique<Object>::CreateUninitialized(object);
+ }
+ }
+ bit_field_ = HasDoubleValueField::update(bit_field_, true);
+ }
if (object->IsHeapObject()) {
Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object);
Isolate* isolate = heap_object->GetIsolate();
@@ -2186,16 +2213,6 @@ HConstant::HConstant(Handle<Object> object, Representation r)
bit_field_,
HasMapValue() && Handle<Map>::cast(heap_object)->is_stable());
}
- if (object->IsNumber()) {
- double n = object->Number();
- bool has_int32_value = IsInteger32(n);
- bit_field_ = HasInt32ValueField::update(bit_field_, has_int32_value);
- int32_value_ = DoubleToInt32(n);
- bit_field_ = HasSmiValueField::update(
- bit_field_, has_int32_value && Smi::IsValid(int32_value_));
- double_value_ = n;
- bit_field_ = HasDoubleValueField::update(bit_field_, true);
- }
Initialize(r);
}
@@ -2246,7 +2263,6 @@ HConstant::HConstant(int32_t integer_value, Representation r,
Initialize(r);
}
-
HConstant::HConstant(double double_value, Representation r,
bool is_not_in_new_space, Unique<Object> object)
: object_(object),
@@ -2260,8 +2276,7 @@ HConstant::HConstant(double double_value, Representation r,
!std::isnan(double_value)) |
IsUndetectableField::encode(false) |
InstanceTypeField::encode(kUnknownInstanceType)),
- int32_value_(DoubleToInt32(double_value)),
- double_value_(double_value) {
+ int32_value_(DoubleToInt32(double_value)) {
bit_field_ = HasSmiValueField::update(
bit_field_, HasInteger32Value() && Smi::IsValid(int32_value_));
// It's possible to create a constant with a value in Smi-range but stored
@@ -2269,6 +2284,11 @@ HConstant::HConstant(double double_value, Representation r,
bool could_be_heapobject = r.IsTagged() && !object.handle().is_null();
bool is_smi = HasSmiValue() && !could_be_heapobject;
set_type(is_smi ? HType::Smi() : HType::TaggedNumber());
+ if (std::isnan(double_value)) {
+ double_value_ = std::numeric_limits<double>::quiet_NaN();
+ } else {
+ double_value_ = double_value;
+ }
Initialize(r);
}
@@ -2419,9 +2439,9 @@ Maybe<HConstant*> HConstant::CopyToTruncatedNumber(Isolate* isolate,
if (handle->IsBoolean()) {
res = handle->BooleanValue() ?
new(zone) HConstant(1) : new(zone) HConstant(0);
- } else if (handle->IsUndefined()) {
+ } else if (handle->IsUndefined(isolate)) {
res = new (zone) HConstant(std::numeric_limits<double>::quiet_NaN());
- } else if (handle->IsNull()) {
+ } else if (handle->IsNull(isolate)) {
res = new(zone) HConstant(0);
} else if (handle->IsString()) {
res = new(zone) HConstant(String::ToNumber(Handle<String>::cast(handle)));
@@ -3126,6 +3146,7 @@ Representation HUnaryMathOperation::RepresentationFromInputs() {
bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) {
DCHECK(side_effect == kNewSpacePromotion);
+ DCHECK(!IsAllocationFolded());
Zone* zone = block()->zone();
Isolate* isolate = block()->isolate();
if (!FLAG_use_allocation_folding) return false;
@@ -3153,7 +3174,8 @@ bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
HValue* current_size = size();
// TODO(hpayer): Add support for non-constant allocation in dominator.
- if (!dominator_size->IsInteger32Constant()) {
+ if (!current_size->IsInteger32Constant() ||
+ !dominator_size->IsInteger32Constant()) {
if (FLAG_trace_allocation_folding) {
PrintF("#%d (%s) cannot fold into #%d (%s), "
"dynamic allocation size in dominator\n",
@@ -3171,32 +3193,6 @@ bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
return false;
}
- if (!has_size_upper_bound()) {
- if (FLAG_trace_allocation_folding) {
- PrintF("#%d (%s) cannot fold into #%d (%s), "
- "can't estimate total allocation size\n",
- id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
- }
- return false;
- }
-
- if (!current_size->IsInteger32Constant()) {
- // If it's not constant then it is a size_in_bytes calculation graph
- // like this: (const_header_size + const_element_size * size).
- DCHECK(current_size->IsInstruction());
-
- HInstruction* current_instr = HInstruction::cast(current_size);
- if (!current_instr->Dominates(dominator_allocate)) {
- if (FLAG_trace_allocation_folding) {
- PrintF("#%d (%s) cannot fold into #%d (%s), dynamic size "
- "value does not dominate target allocation\n",
- id(), Mnemonic(), dominator_allocate->id(),
- dominator_allocate->Mnemonic());
- }
- return false;
- }
- }
-
DCHECK(
(IsNewSpaceAllocation() && dominator_allocate->IsNewSpaceAllocation()) ||
(IsOldSpaceAllocation() && dominator_allocate->IsOldSpaceAllocation()));
@@ -3213,7 +3209,7 @@ bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
}
}
- int32_t current_size_max_value = size_upper_bound()->GetInteger32Constant();
+ int32_t current_size_max_value = size()->GetInteger32Constant();
int32_t new_dominator_size = dominator_size_constant + current_size_max_value;
// Since we clear the first word after folded memory, we cannot use the
@@ -3227,27 +3223,9 @@ bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
return false;
}
- HInstruction* new_dominator_size_value;
-
- if (current_size->IsInteger32Constant()) {
- new_dominator_size_value = HConstant::CreateAndInsertBefore(
- isolate, zone, context(), new_dominator_size, Representation::None(),
- dominator_allocate);
- } else {
- HValue* new_dominator_size_constant = HConstant::CreateAndInsertBefore(
- isolate, zone, context(), dominator_size_constant,
- Representation::Integer32(), dominator_allocate);
-
- // Add old and new size together and insert.
- current_size->ChangeRepresentation(Representation::Integer32());
-
- new_dominator_size_value = HAdd::New(
- isolate, zone, context(), new_dominator_size_constant, current_size);
- new_dominator_size_value->ClearFlag(HValue::kCanOverflow);
- new_dominator_size_value->ChangeRepresentation(Representation::Integer32());
-
- new_dominator_size_value->InsertBefore(dominator_allocate);
- }
+ HInstruction* new_dominator_size_value = HConstant::CreateAndInsertBefore(
+ isolate, zone, context(), new_dominator_size, Representation::None(),
+ dominator_allocate);
dominator_allocate->UpdateSize(new_dominator_size_value);
@@ -3257,103 +3235,45 @@ bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
}
}
- bool keep_heap_iterable = FLAG_log_gc || FLAG_heap_stats;
-#ifdef VERIFY_HEAP
- keep_heap_iterable = keep_heap_iterable || FLAG_verify_heap;
-#endif
-
- if (keep_heap_iterable) {
- dominator_allocate->MakePrefillWithFiller();
- } else {
- // TODO(hpayer): This is a short-term hack to make allocation mementos
- // work again in new space.
- dominator_allocate->ClearNextMapWord(original_object_size);
+ if (IsAllocationFoldingDominator()) {
+ DeleteAndReplaceWith(dominator_allocate);
+ if (FLAG_trace_allocation_folding) {
+ PrintF(
+ "#%d (%s) folded dominator into #%d (%s), new dominator size: %d\n",
+ id(), Mnemonic(), dominator_allocate->id(),
+ dominator_allocate->Mnemonic(), new_dominator_size);
+ }
+ return true;
}
- dominator_allocate->UpdateClearNextMapWord(MustClearNextMapWord());
+ if (!dominator_allocate->IsAllocationFoldingDominator()) {
+ HAllocate* first_alloc =
+ HAllocate::New(isolate, zone, dominator_allocate->context(),
+ dominator_size, dominator_allocate->type(),
+ IsNewSpaceAllocation() ? NOT_TENURED : TENURED,
+ JS_OBJECT_TYPE, block()->graph()->GetConstant0());
+ first_alloc->InsertAfter(dominator_allocate);
+ dominator_allocate->ReplaceAllUsesWith(first_alloc);
+ dominator_allocate->MakeAllocationFoldingDominator();
+ first_alloc->MakeFoldedAllocation(dominator_allocate);
+ if (FLAG_trace_allocation_folding) {
+ PrintF("#%d (%s) inserted for dominator #%d (%s)\n", first_alloc->id(),
+ first_alloc->Mnemonic(), dominator_allocate->id(),
+ dominator_allocate->Mnemonic());
+ }
+ }
- // After that replace the dominated allocate instruction.
- HInstruction* inner_offset = HConstant::CreateAndInsertBefore(
- isolate, zone, context(), dominator_size_constant, Representation::None(),
- this);
+ MakeFoldedAllocation(dominator_allocate);
- HInstruction* dominated_allocate_instr = HInnerAllocatedObject::New(
- isolate, zone, context(), dominator_allocate, inner_offset, type());
- dominated_allocate_instr->InsertBefore(this);
- DeleteAndReplaceWith(dominated_allocate_instr);
if (FLAG_trace_allocation_folding) {
- PrintF("#%d (%s) folded into #%d (%s)\n",
- id(), Mnemonic(), dominator_allocate->id(),
- dominator_allocate->Mnemonic());
+ PrintF("#%d (%s) folded into #%d (%s), new dominator size: %d\n", id(),
+ Mnemonic(), dominator_allocate->id(), dominator_allocate->Mnemonic(),
+ new_dominator_size);
}
return true;
}
-void HAllocate::UpdateFreeSpaceFiller(int32_t free_space_size) {
- DCHECK(filler_free_space_size_ != NULL);
- Zone* zone = block()->zone();
- // We must explicitly force Smi representation here because on x64 we
- // would otherwise automatically choose int32, but the actual store
- // requires a Smi-tagged value.
- HConstant* new_free_space_size = HConstant::CreateAndInsertBefore(
- block()->isolate(), zone, context(),
- filler_free_space_size_->value()->GetInteger32Constant() +
- free_space_size,
- Representation::Smi(), filler_free_space_size_);
- filler_free_space_size_->UpdateValue(new_free_space_size);
-}
-
-
-void HAllocate::CreateFreeSpaceFiller(int32_t free_space_size) {
- DCHECK(filler_free_space_size_ == NULL);
- Isolate* isolate = block()->isolate();
- Zone* zone = block()->zone();
- HInstruction* free_space_instr =
- HInnerAllocatedObject::New(isolate, zone, context(), dominating_allocate_,
- dominating_allocate_->size(), type());
- free_space_instr->InsertBefore(this);
- HConstant* filler_map = HConstant::CreateAndInsertAfter(
- zone, Unique<Map>::CreateImmovable(isolate->factory()->free_space_map()),
- true, free_space_instr);
- HInstruction* store_map =
- HStoreNamedField::New(isolate, zone, context(), free_space_instr,
- HObjectAccess::ForMap(), filler_map);
- store_map->SetFlag(HValue::kHasNoObservableSideEffects);
- store_map->InsertAfter(filler_map);
-
- // We must explicitly force Smi representation here because on x64 we
- // would otherwise automatically choose int32, but the actual store
- // requires a Smi-tagged value.
- HConstant* filler_size =
- HConstant::CreateAndInsertAfter(isolate, zone, context(), free_space_size,
- Representation::Smi(), store_map);
- // Must force Smi representation for x64 (see comment above).
- HObjectAccess access = HObjectAccess::ForMapAndOffset(
- isolate->factory()->free_space_map(), FreeSpace::kSizeOffset,
- Representation::Smi());
- HStoreNamedField* store_size = HStoreNamedField::New(
- isolate, zone, context(), free_space_instr, access, filler_size);
- store_size->SetFlag(HValue::kHasNoObservableSideEffects);
- store_size->InsertAfter(filler_size);
- filler_free_space_size_ = store_size;
-}
-
-
-void HAllocate::ClearNextMapWord(int offset) {
- if (MustClearNextMapWord()) {
- Zone* zone = block()->zone();
- HObjectAccess access =
- HObjectAccess::ForObservableJSObjectOffset(offset);
- HStoreNamedField* clear_next_map =
- HStoreNamedField::New(block()->isolate(), zone, context(), this, access,
- block()->graph()->GetConstant0());
- clear_next_map->ClearAllSideEffects();
- clear_next_map->InsertAfter(this);
- }
-}
-
-
std::ostream& HAllocate::PrintDataTo(std::ostream& os) const { // NOLINT
os << NameOf(size()) << " (";
if (IsNewSpaceAllocation()) os << "N";
@@ -3387,13 +3307,11 @@ bool HStoreKeyed::NeedsCanonicalization() {
Representation from = HChange::cast(value())->from();
return from.IsTagged() || from.IsHeapObject();
}
- case kLoadNamedField:
- case kPhi: {
- // Better safe than sorry...
- return true;
- }
- default:
+ case kConstant:
+ // Double constants are canonicalized upon construction.
return false;
+ default:
+ return !value()->IsBinaryOperation();
}
}
@@ -3502,6 +3420,9 @@ HInstruction* HUnaryMathOperation::New(Isolate* isolate, Zone* zone,
}
if (std::isinf(d)) { // +Infinity and -Infinity.
switch (op) {
+ case kMathCos:
+ case kMathSin:
+ return H_CONSTANT_DOUBLE(std::numeric_limits<double>::quiet_NaN());
case kMathExp:
return H_CONSTANT_DOUBLE((d > 0.0) ? d : 0.0);
case kMathLog:
@@ -3523,11 +3444,14 @@ HInstruction* HUnaryMathOperation::New(Isolate* isolate, Zone* zone,
}
}
switch (op) {
+ case kMathCos:
+ return H_CONSTANT_DOUBLE(base::ieee754::cos(d));
case kMathExp:
- lazily_initialize_fast_exp(isolate);
- return H_CONSTANT_DOUBLE(fast_exp(d, isolate));
+ return H_CONSTANT_DOUBLE(base::ieee754::exp(d));
case kMathLog:
- return H_CONSTANT_DOUBLE(std::log(d));
+ return H_CONSTANT_DOUBLE(base::ieee754::log(d));
+ case kMathSin:
+ return H_CONSTANT_DOUBLE(base::ieee754::sin(d));
case kMathSqrt:
lazily_initialize_fast_sqrt(isolate);
return H_CONSTANT_DOUBLE(fast_sqrt(d, isolate));
diff --git a/deps/v8/src/crankshaft/hydrogen-instructions.h b/deps/v8/src/crankshaft/hydrogen-instructions.h
index 196a14fc70..98c7275f85 100644
--- a/deps/v8/src/crankshaft/hydrogen-instructions.h
+++ b/deps/v8/src/crankshaft/hydrogen-instructions.h
@@ -16,6 +16,7 @@
#include "src/crankshaft/hydrogen-types.h"
#include "src/crankshaft/unique.h"
#include "src/deoptimizer.h"
+#include "src/globals.h"
#include "src/small-pointer-list.h"
#include "src/utils.h"
#include "src/zone.h"
@@ -77,13 +78,11 @@ class LChunkBuilder;
V(CompareObjectEqAndBranch) \
V(CompareMap) \
V(Constant) \
- V(ConstructDouble) \
V(Context) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
V(Div) \
- V(DoubleBits) \
V(DummyUse) \
V(EnterInlined) \
V(EnvironmentMarker) \
@@ -95,7 +94,6 @@ class LChunkBuilder;
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
- V(InstanceOf) \
V(InvokeFunction) \
V(HasInPrototypeChainAndBranch) \
V(IsStringAndBranch) \
@@ -1115,7 +1113,7 @@ class HInstruction : public HValue {
: HValue(type),
next_(NULL),
previous_(NULL),
- position_(RelocInfo::kNoPosition) {
+ position_(kNoSourcePosition) {
SetDependsOnFlag(kOsrEntries);
}
@@ -1303,7 +1301,7 @@ class HGoto final : public HTemplateControlInstruction<1, 0> {
class HDeoptimize final : public HTemplateControlInstruction<1, 0> {
public:
static HDeoptimize* New(Isolate* isolate, Zone* zone, HValue* context,
- Deoptimizer::DeoptReason reason,
+ DeoptimizeReason reason,
Deoptimizer::BailoutType type,
HBasicBlock* unreachable_continuation) {
return new(zone) HDeoptimize(reason, type, unreachable_continuation);
@@ -1318,20 +1316,19 @@ class HDeoptimize final : public HTemplateControlInstruction<1, 0> {
return Representation::None();
}
- Deoptimizer::DeoptReason reason() const { return reason_; }
+ DeoptimizeReason reason() const { return reason_; }
Deoptimizer::BailoutType type() { return type_; }
DECLARE_CONCRETE_INSTRUCTION(Deoptimize)
private:
- explicit HDeoptimize(Deoptimizer::DeoptReason reason,
- Deoptimizer::BailoutType type,
+ explicit HDeoptimize(DeoptimizeReason reason, Deoptimizer::BailoutType type,
HBasicBlock* unreachable_continuation)
: reason_(reason), type_(type) {
SetSuccessorAt(0, unreachable_continuation);
}
- Deoptimizer::DeoptReason reason_;
+ DeoptimizeReason reason_;
Deoptimizer::BailoutType type_;
};
@@ -1661,65 +1658,6 @@ class HClampToUint8 final : public HUnaryOperation {
};
-class HDoubleBits final : public HUnaryOperation {
- public:
- enum Bits { HIGH, LOW };
- DECLARE_INSTRUCTION_FACTORY_P2(HDoubleBits, HValue*, Bits);
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Double();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleBits)
-
- Bits bits() { return bits_; }
-
- protected:
- bool DataEquals(HValue* other) override {
- return other->IsDoubleBits() && HDoubleBits::cast(other)->bits() == bits();
- }
-
- private:
- HDoubleBits(HValue* value, Bits bits)
- : HUnaryOperation(value), bits_(bits) {
- set_representation(Representation::Integer32());
- SetFlag(kUseGVN);
- }
-
- bool IsDeletable() const override { return true; }
-
- Bits bits_;
-};
-
-
-class HConstructDouble final : public HTemplateInstruction<2> {
- public:
- DECLARE_INSTRUCTION_FACTORY_P2(HConstructDouble, HValue*, HValue*);
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Integer32();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ConstructDouble)
-
- HValue* hi() { return OperandAt(0); }
- HValue* lo() { return OperandAt(1); }
-
- protected:
- bool DataEquals(HValue* other) override { return true; }
-
- private:
- explicit HConstructDouble(HValue* hi, HValue* lo) {
- set_representation(Representation::Double());
- SetFlag(kUseGVN);
- SetOperandAt(0, hi);
- SetOperandAt(1, lo);
- }
-
- bool IsDeletable() const override { return true; }
-};
-
-
enum RemovableSimulate {
REMOVABLE_SIMULATE,
FIXED_SIMULATE
@@ -2123,13 +2061,16 @@ class HThisFunction final : public HTemplateInstruction<0> {
class HDeclareGlobals final : public HUnaryOperation {
public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HDeclareGlobals,
- Handle<FixedArray>,
- int);
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HDeclareGlobals,
+ Handle<FixedArray>, int,
+ Handle<TypeFeedbackVector>);
HValue* context() { return OperandAt(0); }
Handle<FixedArray> pairs() const { return pairs_; }
int flags() const { return flags_; }
+ Handle<TypeFeedbackVector> feedback_vector() const {
+ return feedback_vector_;
+ }
DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals)
@@ -2138,17 +2079,18 @@ class HDeclareGlobals final : public HUnaryOperation {
}
private:
- HDeclareGlobals(HValue* context,
- Handle<FixedArray> pairs,
- int flags)
+ HDeclareGlobals(HValue* context, Handle<FixedArray> pairs, int flags,
+ Handle<TypeFeedbackVector> feedback_vector)
: HUnaryOperation(context),
pairs_(pairs),
+ feedback_vector_(feedback_vector),
flags_(flags) {
set_representation(Representation::Tagged());
SetAllSideEffects();
}
Handle<FixedArray> pairs_;
+ Handle<TypeFeedbackVector> feedback_vector_;
int flags_;
};
@@ -2162,8 +2104,6 @@ class HCall : public HTemplateInstruction<V> {
this->SetAllSideEffects();
}
- HType CalculateInferredType() final { return HType::Tagged(); }
-
virtual int argument_count() const {
return argument_count_;
}
@@ -2241,8 +2181,6 @@ class HCallWithDescriptor final : public HInstruction {
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor)
- HType CalculateInferredType() final { return HType::Tagged(); }
-
// Defines whether this instruction corresponds to a JS call at tail position.
TailCallMode syntactic_tail_call_mode() const {
return SyntacticTailCallModeField::decode(bit_field_);
@@ -2458,9 +2396,11 @@ class HUnaryMathOperation final : public HTemplateInstruction<2> {
return Representation::Tagged();
} else {
switch (op_) {
+ case kMathCos:
case kMathFloor:
case kMathRound:
case kMathFround:
+ case kMathSin:
case kMathSqrt:
case kMathPowHalf:
case kMathLog:
@@ -2498,7 +2438,7 @@ class HUnaryMathOperation final : public HTemplateInstruction<2> {
// Indicates if we support a double (and int32) output for Math.floor and
// Math.round.
bool SupportsFlexibleFloorAndRound() const {
-#if V8_TARGET_ARCH_ARM64
+#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
return true;
#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
return CpuFeatures::IsSupported(SSE4_1);
@@ -2529,9 +2469,11 @@ class HUnaryMathOperation final : public HTemplateInstruction<2> {
// is tagged, and not when it is an unboxed double or unboxed integer.
SetChangesFlag(kNewSpacePromotion);
break;
+ case kMathCos:
case kMathFround:
case kMathLog:
case kMathExp:
+ case kMathSin:
case kMathSqrt:
case kMathPowHalf:
set_representation(Representation::Double());
@@ -2791,6 +2733,7 @@ class HCheckInstanceType final : public HUnaryOperation {
enum Check {
IS_JS_RECEIVER,
IS_JS_ARRAY,
+ IS_JS_FUNCTION,
IS_JS_DATE,
IS_STRING,
IS_INTERNALIZED_STRING,
@@ -2809,6 +2752,8 @@ class HCheckInstanceType final : public HUnaryOperation {
switch (check_) {
case IS_JS_RECEIVER: return HType::JSReceiver();
case IS_JS_ARRAY: return HType::JSArray();
+ case IS_JS_FUNCTION:
+ return HType::JSObject();
case IS_JS_DATE: return HType::JSObject();
case IS_STRING: return HType::String();
case IS_INTERNALIZED_STRING: return HType::String();
@@ -3767,6 +3712,7 @@ class HBitwiseBinaryOperation : public HBinaryOperation {
: HBinaryOperation(context, left, right, type) {
SetFlag(kFlexibleRepresentation);
SetFlag(kTruncatingToInt32);
+ SetFlag(kAllowUndefinedAsNaN);
SetAllSideEffects();
}
@@ -4271,27 +4217,6 @@ class HTypeofIsAndBranch final : public HUnaryControlInstruction {
};
-class HInstanceOf final : public HBinaryOperation {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInstanceOf, HValue*, HValue*);
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOf)
-
- private:
- HInstanceOf(HValue* context, HValue* left, HValue* right)
- : HBinaryOperation(context, left, right, HType::Boolean()) {
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-};
-
-
class HHasInPrototypeChainAndBranch final
: public HTemplateControlInstruction<2, 2> {
public:
@@ -4396,6 +4321,11 @@ class HAdd final : public HArithmeticBinaryOperation {
SetChangesFlag(kNewSpacePromotion);
ClearFlag(kAllowUndefinedAsNaN);
}
+ if (!right()->type().IsTaggedNumber() &&
+ !right()->representation().IsDouble() &&
+ !right()->representation().IsSmiOrInteger32()) {
+ ClearFlag(kAllowUndefinedAsNaN);
+ }
}
Representation RepresentationFromInputs() override;
@@ -4894,26 +4824,20 @@ class HUnknownOSRValue final : public HTemplateInstruction<0> {
HPhi* incoming_value_;
};
-
-class HLoadGlobalGeneric final : public HTemplateInstruction<2> {
+class HLoadGlobalGeneric final : public HTemplateInstruction<1> {
public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HLoadGlobalGeneric, HValue*,
- Handle<String>, TypeofMode);
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HLoadGlobalGeneric,
+ Handle<String>, TypeofMode,
+ Handle<TypeFeedbackVector>,
+ FeedbackVectorSlot);
HValue* context() { return OperandAt(0); }
- HValue* global_object() { return OperandAt(1); }
Handle<String> name() const { return name_; }
TypeofMode typeof_mode() const { return typeof_mode_; }
FeedbackVectorSlot slot() const { return slot_; }
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
}
- bool HasVectorAndSlot() const { return true; }
- void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
- FeedbackVectorSlot slot) {
- feedback_vector_ = vector;
- slot_ = slot;
- }
std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
@@ -4924,11 +4848,14 @@ class HLoadGlobalGeneric final : public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric)
private:
- HLoadGlobalGeneric(HValue* context, HValue* global_object,
- Handle<String> name, TypeofMode typeof_mode)
- : name_(name), typeof_mode_(typeof_mode) {
+ HLoadGlobalGeneric(HValue* context, Handle<String> name,
+ TypeofMode typeof_mode, Handle<TypeFeedbackVector> vector,
+ FeedbackVectorSlot slot)
+ : name_(name),
+ typeof_mode_(typeof_mode),
+ feedback_vector_(vector),
+ slot_(slot) {
SetOperandAt(0, context);
- SetOperandAt(1, global_object);
set_representation(Representation::Tagged());
SetAllSideEffects();
}
@@ -4939,8 +4866,7 @@ class HLoadGlobalGeneric final : public HTemplateInstruction<2> {
FeedbackVectorSlot slot_;
};
-
-class HAllocate final : public HTemplateInstruction<2> {
+class HAllocate final : public HTemplateInstruction<3> {
public:
static bool CompatibleInstanceTypes(InstanceType type1,
InstanceType type2) {
@@ -4951,9 +4877,10 @@ class HAllocate final : public HTemplateInstruction<2> {
static HAllocate* New(
Isolate* isolate, Zone* zone, HValue* context, HValue* size, HType type,
PretenureFlag pretenure_flag, InstanceType instance_type,
+ HValue* dominator,
Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null()) {
- return new(zone) HAllocate(context, size, type, pretenure_flag,
- instance_type, allocation_site);
+ return new (zone) HAllocate(context, size, type, pretenure_flag,
+ instance_type, dominator, allocation_site);
}
// Maximum instance size for which allocations will be inlined.
@@ -4961,13 +4888,7 @@ class HAllocate final : public HTemplateInstruction<2> {
HValue* context() const { return OperandAt(0); }
HValue* size() const { return OperandAt(1); }
-
- bool has_size_upper_bound() { return size_upper_bound_ != NULL; }
- HConstant* size_upper_bound() { return size_upper_bound_; }
- void set_size_upper_bound(HConstant* value) {
- DCHECK(size_upper_bound_ == NULL);
- size_upper_bound_ = value;
- }
+ HValue* allocation_folding_dominator() const { return OperandAt(2); }
Representation RequiredInputRepresentation(int index) override {
if (index == 0) {
@@ -5005,14 +4926,28 @@ class HAllocate final : public HTemplateInstruction<2> {
flags_ = static_cast<HAllocate::Flags>(flags_ | PREFILL_WITH_FILLER);
}
- bool MustClearNextMapWord() const {
- return (flags_ & CLEAR_NEXT_MAP_WORD) != 0;
- }
-
void MakeDoubleAligned() {
flags_ = static_cast<HAllocate::Flags>(flags_ | ALLOCATE_DOUBLE_ALIGNED);
}
+ void MakeAllocationFoldingDominator() {
+ flags_ =
+ static_cast<HAllocate::Flags>(flags_ | ALLOCATION_FOLDING_DOMINATOR);
+ }
+
+ bool IsAllocationFoldingDominator() {
+ return (flags_ & ALLOCATION_FOLDING_DOMINATOR) != 0;
+ }
+
+ void MakeFoldedAllocation(HAllocate* dominator) {
+ flags_ = static_cast<HAllocate::Flags>(flags_ | ALLOCATION_FOLDED);
+ ClearFlag(kTrackSideEffectDominators);
+ ClearChangesFlag(kNewSpacePromotion);
+ SetOperandAt(2, dominator);
+ }
+
+ bool IsAllocationFolded() { return (flags_ & ALLOCATION_FOLDED) != 0; }
+
bool HandleSideEffectDominator(GVNFlag side_effect,
HValue* dominator) override;
@@ -5026,23 +4961,19 @@ class HAllocate final : public HTemplateInstruction<2> {
ALLOCATE_IN_OLD_SPACE = 1 << 2,
ALLOCATE_DOUBLE_ALIGNED = 1 << 3,
PREFILL_WITH_FILLER = 1 << 4,
- CLEAR_NEXT_MAP_WORD = 1 << 5
+ ALLOCATION_FOLDING_DOMINATOR = 1 << 5,
+ ALLOCATION_FOLDED = 1 << 6
};
- HAllocate(HValue* context,
- HValue* size,
- HType type,
- PretenureFlag pretenure_flag,
- InstanceType instance_type,
- Handle<AllocationSite> allocation_site =
- Handle<AllocationSite>::null())
- : HTemplateInstruction<2>(type),
- flags_(ComputeFlags(pretenure_flag, instance_type)),
- dominating_allocate_(NULL),
- filler_free_space_size_(NULL),
- size_upper_bound_(NULL) {
+ HAllocate(
+ HValue* context, HValue* size, HType type, PretenureFlag pretenure_flag,
+ InstanceType instance_type, HValue* dominator,
+ Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null())
+ : HTemplateInstruction<3>(type),
+ flags_(ComputeFlags(pretenure_flag, instance_type)) {
SetOperandAt(0, context);
UpdateSize(size);
+ SetOperandAt(2, dominator);
set_representation(Representation::Tagged());
SetFlag(kTrackSideEffectDominators);
SetChangesFlag(kNewSpacePromotion);
@@ -5072,46 +5003,20 @@ class HAllocate final : public HTemplateInstruction<2> {
if (!FLAG_use_gvn || !FLAG_use_allocation_folding) {
flags = static_cast<Flags>(flags | PREFILL_WITH_FILLER);
}
- if (pretenure_flag == NOT_TENURED &&
- AllocationSite::CanTrack(instance_type)) {
- flags = static_cast<Flags>(flags | CLEAR_NEXT_MAP_WORD);
- }
return flags;
}
- void UpdateClearNextMapWord(bool clear_next_map_word) {
- flags_ = static_cast<Flags>(clear_next_map_word
- ? flags_ | CLEAR_NEXT_MAP_WORD
- : flags_ & ~CLEAR_NEXT_MAP_WORD);
- }
-
void UpdateSize(HValue* size) {
SetOperandAt(1, size);
- if (size->IsInteger32Constant()) {
- size_upper_bound_ = HConstant::cast(size);
- } else {
- size_upper_bound_ = NULL;
- }
}
- HAllocate* GetFoldableDominator(HAllocate* dominator);
-
- void UpdateFreeSpaceFiller(int32_t filler_size);
-
- void CreateFreeSpaceFiller(int32_t filler_size);
-
bool IsFoldable(HAllocate* allocate) {
return (IsNewSpaceAllocation() && allocate->IsNewSpaceAllocation()) ||
(IsOldSpaceAllocation() && allocate->IsOldSpaceAllocation());
}
- void ClearNextMapWord(int offset);
-
Flags flags_;
Handle<Map> known_initial_map_;
- HAllocate* dominating_allocate_;
- HStoreNamedField* filler_free_space_size_;
- HConstant* size_upper_bound_;
};
@@ -5183,9 +5088,23 @@ inline bool StoringValueNeedsWriteBarrier(HValue* value) {
inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
HValue* value,
HValue* dominator) {
+ // There may be multiple inner allocates dominated by one allocate.
while (object->IsInnerAllocatedObject()) {
object = HInnerAllocatedObject::cast(object)->base_object();
}
+
+ if (object->IsAllocate()) {
+ HAllocate* allocate = HAllocate::cast(object);
+ if (allocate->IsAllocationFolded()) {
+ HValue* dominator = allocate->allocation_folding_dominator();
+ // There is no guarantee that all allocations are folded together because
+ // GVN performs a fixpoint.
+ if (HAllocate::cast(dominator)->IsAllocationFoldingDominator()) {
+ object = dominator;
+ }
+ }
+ }
+
if (object->IsConstant() &&
HConstant::cast(object)->HasExternalReferenceValue()) {
// Stores to external references require no write barriers
@@ -5226,10 +5145,7 @@ class HLoadContextSlot final : public HUnaryOperation {
// hole value. This is used for checking for loading of uninitialized
// harmony bindings where we deoptimize into full-codegen generated code
// which will subsequently throw a reference error.
- kCheckDeoptimize,
- // Load and check the value of the context slot. Return undefined if it's
- // the hole value. This is used for non-harmony const assignments
- kCheckReturnUndefined
+ kCheckDeoptimize
};
HLoadContextSlot(HValue* context, int slot_index, Mode mode)
@@ -5282,9 +5198,7 @@ class HStoreContextSlot final : public HTemplateInstruction<2> {
// hole value. This is used for checking for assignments to uninitialized
// harmony bindings where we deoptimize into full-codegen generated code
// which will subsequently throw a reference error.
- kCheckDeoptimize,
- // Check the previous value and ignore assignment if it isn't a hole value
- kCheckIgnoreAssignment
+ kCheckDeoptimize
};
DECLARE_INSTRUCTION_FACTORY_P4(HStoreContextSlot, HValue*, int,
@@ -5604,6 +5518,19 @@ class HObjectAccess final {
Handle<Name>::null(), false, false);
}
+ static HObjectAccess ForBoundTargetFunction() {
+ return HObjectAccess(kInobject,
+ JSBoundFunction::kBoundTargetFunctionOffset);
+ }
+
+ static HObjectAccess ForBoundThis() {
+ return HObjectAccess(kInobject, JSBoundFunction::kBoundThisOffset);
+ }
+
+ static HObjectAccess ForBoundArguments() {
+ return HObjectAccess(kInobject, JSBoundFunction::kBoundArgumentsOffset);
+ }
+
// Create an access to an offset in a fixed array header.
static HObjectAccess ForFixedArrayHeader(int offset);
@@ -5925,26 +5852,19 @@ class HLoadNamedField final : public HTemplateInstruction<2> {
class HLoadNamedGeneric final : public HTemplateInstruction<2> {
public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HLoadNamedGeneric, HValue*,
- Handle<Name>, InlineCacheState);
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HLoadNamedGeneric, HValue*,
+ Handle<Name>,
+ Handle<TypeFeedbackVector>,
+ FeedbackVectorSlot);
HValue* context() const { return OperandAt(0); }
HValue* object() const { return OperandAt(1); }
Handle<Name> name() const { return name_; }
- InlineCacheState initialization_state() const {
- return initialization_state_;
- }
FeedbackVectorSlot slot() const { return slot_; }
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
}
- bool HasVectorAndSlot() const { return true; }
- void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
- FeedbackVectorSlot slot) {
- feedback_vector_ = vector;
- slot_ = slot;
- }
Representation RequiredInputRepresentation(int index) override {
return Representation::Tagged();
@@ -5956,9 +5876,8 @@ class HLoadNamedGeneric final : public HTemplateInstruction<2> {
private:
HLoadNamedGeneric(HValue* context, HValue* object, Handle<Name> name,
- InlineCacheState initialization_state)
- : name_(name),
- initialization_state_(initialization_state) {
+ Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
+ : name_(name), feedback_vector_(vector), slot_(slot) {
SetOperandAt(0, context);
SetOperandAt(1, object);
set_representation(Representation::Tagged());
@@ -5968,7 +5887,6 @@ class HLoadNamedGeneric final : public HTemplateInstruction<2> {
Handle<Name> name_;
Handle<TypeFeedbackVector> feedback_vector_;
FeedbackVectorSlot slot_;
- InlineCacheState initialization_state_;
};
@@ -6210,27 +6128,17 @@ class HLoadKeyed final : public HTemplateInstruction<4>,
class HLoadKeyedGeneric final : public HTemplateInstruction<3> {
public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HLoadKeyedGeneric, HValue*,
- HValue*, InlineCacheState);
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HLoadKeyedGeneric, HValue*,
+ HValue*,
+ Handle<TypeFeedbackVector>,
+ FeedbackVectorSlot);
HValue* object() const { return OperandAt(0); }
HValue* key() const { return OperandAt(1); }
HValue* context() const { return OperandAt(2); }
- InlineCacheState initialization_state() const {
- return initialization_state_;
- }
FeedbackVectorSlot slot() const { return slot_; }
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
}
- bool HasVectorAndSlot() const {
- DCHECK(initialization_state_ == MEGAMORPHIC || !feedback_vector_.is_null());
- return !feedback_vector_.is_null();
- }
- void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
- FeedbackVectorSlot slot) {
- feedback_vector_ = vector;
- slot_ = slot;
- }
std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
@@ -6245,8 +6153,8 @@ class HLoadKeyedGeneric final : public HTemplateInstruction<3> {
private:
HLoadKeyedGeneric(HValue* context, HValue* obj, HValue* key,
- InlineCacheState initialization_state)
- : initialization_state_(initialization_state) {
+ Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
+ : feedback_vector_(vector), slot_(slot) {
set_representation(Representation::Tagged());
SetOperandAt(0, obj);
SetOperandAt(1, key);
@@ -6256,7 +6164,6 @@ class HLoadKeyedGeneric final : public HTemplateInstruction<3> {
Handle<TypeFeedbackVector> feedback_vector_;
FeedbackVectorSlot slot_;
- InlineCacheState initialization_state_;
};
@@ -6418,20 +6325,18 @@ class HStoreNamedField final : public HTemplateInstruction<3> {
uint32_t bit_field_;
};
-
class HStoreNamedGeneric final : public HTemplateInstruction<3> {
public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P5(HStoreNamedGeneric, HValue*,
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P6(HStoreNamedGeneric, HValue*,
Handle<Name>, HValue*,
- LanguageMode, InlineCacheState);
+ LanguageMode,
+ Handle<TypeFeedbackVector>,
+ FeedbackVectorSlot);
HValue* object() const { return OperandAt(0); }
HValue* value() const { return OperandAt(1); }
HValue* context() const { return OperandAt(2); }
Handle<Name> name() const { return name_; }
LanguageMode language_mode() const { return language_mode_; }
- InlineCacheState initialization_state() const {
- return initialization_state_;
- }
std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
@@ -6443,22 +6348,17 @@ class HStoreNamedGeneric final : public HTemplateInstruction<3> {
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
}
- bool HasVectorAndSlot() const { return true; }
- void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
- FeedbackVectorSlot slot) {
- feedback_vector_ = vector;
- slot_ = slot;
- }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric)
private:
HStoreNamedGeneric(HValue* context, HValue* object, Handle<Name> name,
HValue* value, LanguageMode language_mode,
- InlineCacheState initialization_state)
+ Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
: name_(name),
- language_mode_(language_mode),
- initialization_state_(initialization_state) {
+ feedback_vector_(vector),
+ slot_(slot),
+ language_mode_(language_mode) {
SetOperandAt(0, object);
SetOperandAt(1, value);
SetOperandAt(2, context);
@@ -6469,10 +6369,8 @@ class HStoreNamedGeneric final : public HTemplateInstruction<3> {
Handle<TypeFeedbackVector> feedback_vector_;
FeedbackVectorSlot slot_;
LanguageMode language_mode_;
- InlineCacheState initialization_state_;
};
-
class HStoreKeyed final : public HTemplateInstruction<4>,
public ArrayInstructionInterface {
public:
@@ -6655,21 +6553,18 @@ class HStoreKeyed final : public HTemplateInstruction<4>,
HValue* dominator_;
};
-
class HStoreKeyedGeneric final : public HTemplateInstruction<4> {
public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P5(HStoreKeyedGeneric, HValue*,
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P6(HStoreKeyedGeneric, HValue*,
HValue*, HValue*, LanguageMode,
- InlineCacheState);
+ Handle<TypeFeedbackVector>,
+ FeedbackVectorSlot);
HValue* object() const { return OperandAt(0); }
HValue* key() const { return OperandAt(1); }
HValue* value() const { return OperandAt(2); }
HValue* context() const { return OperandAt(3); }
LanguageMode language_mode() const { return language_mode_; }
- InlineCacheState initialization_state() const {
- return initialization_state_;
- }
Representation RequiredInputRepresentation(int index) override {
// tagged[tagged] = tagged
@@ -6680,14 +6575,6 @@ class HStoreKeyedGeneric final : public HTemplateInstruction<4> {
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
}
- bool HasVectorAndSlot() const {
- return !feedback_vector_.is_null();
- }
- void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
- FeedbackVectorSlot slot) {
- feedback_vector_ = vector;
- slot_ = slot;
- }
std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
@@ -6696,9 +6583,8 @@ class HStoreKeyedGeneric final : public HTemplateInstruction<4> {
private:
HStoreKeyedGeneric(HValue* context, HValue* object, HValue* key,
HValue* value, LanguageMode language_mode,
- InlineCacheState initialization_state)
- : language_mode_(language_mode),
- initialization_state_(initialization_state) {
+ Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
+ : feedback_vector_(vector), slot_(slot), language_mode_(language_mode) {
SetOperandAt(0, object);
SetOperandAt(1, key);
SetOperandAt(2, value);
@@ -6709,10 +6595,8 @@ class HStoreKeyedGeneric final : public HTemplateInstruction<4> {
Handle<TypeFeedbackVector> feedback_vector_;
FeedbackVectorSlot slot_;
LanguageMode language_mode_;
- InlineCacheState initialization_state_;
};
-
class HTransitionElementsKind final : public HTemplateInstruction<2> {
public:
inline static HTransitionElementsKind* New(Isolate* isolate, Zone* zone,
diff --git a/deps/v8/src/crankshaft/hydrogen-osr.h b/deps/v8/src/crankshaft/hydrogen-osr.h
index e2cfd30428..0610b4284f 100644
--- a/deps/v8/src/crankshaft/hydrogen-osr.h
+++ b/deps/v8/src/crankshaft/hydrogen-osr.h
@@ -5,13 +5,14 @@
#ifndef V8_CRANKSHAFT_HYDROGEN_OSR_H_
#define V8_CRANKSHAFT_HYDROGEN_OSR_H_
-#include "src/ast/ast.h"
#include "src/crankshaft/hydrogen.h"
#include "src/zone.h"
namespace v8 {
namespace internal {
+class IterationStatement;
+
// Responsible for building graph parts related to OSR and otherwise
// setting up the graph to do an OSR compile.
class HOsrBuilder : public ZoneObject {
diff --git a/deps/v8/src/crankshaft/hydrogen-range-analysis.h b/deps/v8/src/crankshaft/hydrogen-range-analysis.h
index cff7026e14..eeac690e62 100644
--- a/deps/v8/src/crankshaft/hydrogen-range-analysis.h
+++ b/deps/v8/src/crankshaft/hydrogen-range-analysis.h
@@ -5,6 +5,7 @@
#ifndef V8_CRANKSHAFT_HYDROGEN_RANGE_ANALYSIS_H_
#define V8_CRANKSHAFT_HYDROGEN_RANGE_ANALYSIS_H_
+#include "src/base/compiler-specific.h"
#include "src/crankshaft/hydrogen.h"
namespace v8 {
@@ -21,7 +22,7 @@ class HRangeAnalysisPhase : public HPhase {
void Run();
private:
- void TraceRange(const char* msg, ...);
+ PRINTF_FORMAT(2, 3) void TraceRange(const char* msg, ...);
void InferControlFlowRange(HCompareNumericAndBranch* test,
HBasicBlock* dest);
void UpdateControlFlowRange(Token::Value op, HValue* value, HValue* other);
diff --git a/deps/v8/src/crankshaft/hydrogen-types.cc b/deps/v8/src/crankshaft/hydrogen-types.cc
index 4266e28da0..20d50d897c 100644
--- a/deps/v8/src/crankshaft/hydrogen-types.cc
+++ b/deps/v8/src/crankshaft/hydrogen-types.cc
@@ -34,23 +34,25 @@ HType HType::FromFieldType(Handle<FieldType> type, Zone* temp_zone) {
// static
HType HType::FromValue(Handle<Object> value) {
- if (value->IsSmi()) return HType::Smi();
- if (value->IsNull()) return HType::Null();
- if (value->IsHeapNumber()) {
+ Object* raw_value = *value;
+ if (raw_value->IsSmi()) return HType::Smi();
+ DCHECK(raw_value->IsHeapObject());
+ Isolate* isolate = HeapObject::cast(*value)->GetIsolate();
+ if (raw_value->IsNull(isolate)) return HType::Null();
+ if (raw_value->IsHeapNumber()) {
double n = Handle<v8::internal::HeapNumber>::cast(value)->value();
return IsSmiDouble(n) ? HType::Smi() : HType::HeapNumber();
}
- if (value->IsString()) return HType::String();
- if (value->IsBoolean()) return HType::Boolean();
- if (value->IsUndefined()) return HType::Undefined();
- if (value->IsJSArray()) {
- DCHECK(!value->IsUndetectable());
+ if (raw_value->IsString()) return HType::String();
+ if (raw_value->IsBoolean()) return HType::Boolean();
+ if (raw_value->IsUndefined(isolate)) return HType::Undefined();
+ if (raw_value->IsJSArray()) {
+ DCHECK(!raw_value->IsUndetectable());
return HType::JSArray();
}
- if (value->IsJSObject() && !value->IsUndetectable()) {
+ if (raw_value->IsJSObject() && !raw_value->IsUndetectable()) {
return HType::JSObject();
}
- DCHECK(value->IsHeapObject());
return HType::HeapObject();
}
diff --git a/deps/v8/src/crankshaft/hydrogen.cc b/deps/v8/src/crankshaft/hydrogen.cc
index 295b2c9455..240101eeeb 100644
--- a/deps/v8/src/crankshaft/hydrogen.cc
+++ b/deps/v8/src/crankshaft/hydrogen.cc
@@ -4,11 +4,12 @@
#include "src/crankshaft/hydrogen.h"
+#include <memory>
#include <sstream>
#include "src/allocation-site-scopes.h"
#include "src/ast/ast-numbering.h"
-#include "src/ast/scopeinfo.h"
+#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/crankshaft/hydrogen-bce.h"
#include "src/crankshaft/hydrogen-canonicalize.h"
@@ -35,6 +36,7 @@
#include "src/crankshaft/typing.h"
#include "src/field-type.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/globals.h"
#include "src/ic/call-optimization.h"
#include "src/ic/ic.h"
// GetRootConstructor
@@ -68,6 +70,178 @@
namespace v8 {
namespace internal {
+const auto GetRegConfig = RegisterConfiguration::Crankshaft;
+
+class HOptimizedGraphBuilderWithPositions : public HOptimizedGraphBuilder {
+ public:
+ explicit HOptimizedGraphBuilderWithPositions(CompilationInfo* info)
+ : HOptimizedGraphBuilder(info) {}
+
+#define DEF_VISIT(type) \
+ void Visit##type(type* node) override { \
+ SourcePosition old_position = SourcePosition::Unknown(); \
+ if (node->position() != kNoSourcePosition) { \
+ old_position = source_position(); \
+ SetSourcePosition(node->position()); \
+ } \
+ HOptimizedGraphBuilder::Visit##type(node); \
+ if (!old_position.IsUnknown()) { \
+ set_source_position(old_position); \
+ } \
+ }
+ EXPRESSION_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+#define DEF_VISIT(type) \
+ void Visit##type(type* node) override { \
+ SourcePosition old_position = SourcePosition::Unknown(); \
+ if (node->position() != kNoSourcePosition) { \
+ old_position = source_position(); \
+ SetSourcePosition(node->position()); \
+ } \
+ HOptimizedGraphBuilder::Visit##type(node); \
+ if (!old_position.IsUnknown()) { \
+ set_source_position(old_position); \
+ } \
+ }
+ STATEMENT_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+#define DEF_VISIT(type) \
+ void Visit##type(type* node) override { \
+ HOptimizedGraphBuilder::Visit##type(node); \
+ }
+ DECLARATION_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+};
+
+HCompilationJob::Status HCompilationJob::PrepareJobImpl() {
+ if (!isolate()->use_crankshaft() ||
+ info()->shared_info()->dont_crankshaft()) {
+ // Crankshaft is entirely disabled.
+ return FAILED;
+ }
+
+ // Optimization requires a version of fullcode with deoptimization support.
+ // Recompile the unoptimized version of the code if the current version
+ // doesn't have deoptimization support already.
+ // Otherwise, if we are gathering compilation time and space statistics
+ // for hydrogen, gather baseline statistics for a fullcode compilation.
+ bool should_recompile = !info()->shared_info()->has_deoptimization_support();
+ if (should_recompile || FLAG_hydrogen_stats) {
+ base::ElapsedTimer timer;
+ if (FLAG_hydrogen_stats) {
+ timer.Start();
+ }
+ if (!Compiler::EnsureDeoptimizationSupport(info())) {
+ return FAILED;
+ }
+ if (FLAG_hydrogen_stats) {
+ isolate()->GetHStatistics()->IncrementFullCodeGen(timer.Elapsed());
+ }
+ }
+ DCHECK(info()->shared_info()->has_deoptimization_support());
+ DCHECK(!info()->shared_info()->never_compiled());
+
+ // Check the whitelist for Crankshaft.
+ if (!info()->shared_info()->PassesFilter(FLAG_hydrogen_filter)) {
+ return AbortOptimization(kHydrogenFilter);
+ }
+
+ Scope* scope = info()->scope();
+ if (LUnallocated::TooManyParameters(scope->num_parameters())) {
+ // Crankshaft would require too many Lithium operands.
+ return AbortOptimization(kTooManyParameters);
+ }
+
+ if (info()->is_osr() &&
+ LUnallocated::TooManyParametersOrStackSlots(scope->num_parameters(),
+ scope->num_stack_slots())) {
+ // Crankshaft would require too many Lithium operands.
+ return AbortOptimization(kTooManyParametersLocals);
+ }
+
+ if (IsGeneratorFunction(info()->shared_info()->kind())) {
+ // Crankshaft does not support generators.
+ return AbortOptimization(kGenerator);
+ }
+
+ if (FLAG_trace_hydrogen) {
+ isolate()->GetHTracer()->TraceCompilation(info());
+ }
+
+ // Optimization could have been disabled by the parser. Note that this check
+ // is only needed because the Hydrogen graph builder is missing some bailouts.
+ if (info()->shared_info()->optimization_disabled()) {
+ return AbortOptimization(
+ info()->shared_info()->disable_optimization_reason());
+ }
+
+ HOptimizedGraphBuilder* graph_builder =
+ (info()->is_tracking_positions() || FLAG_trace_ic)
+ ? new (info()->zone()) HOptimizedGraphBuilderWithPositions(info())
+ : new (info()->zone()) HOptimizedGraphBuilder(info());
+
+ // Type-check the function.
+ AstTyper(info()->isolate(), info()->zone(), info()->closure(),
+ info()->scope(), info()->osr_ast_id(), info()->literal(),
+ graph_builder->bounds())
+ .Run();
+
+ graph_ = graph_builder->CreateGraph();
+
+ if (isolate()->has_pending_exception()) {
+ return FAILED;
+ }
+
+ if (graph_ == NULL) return FAILED;
+
+ if (info()->dependencies()->HasAborted()) {
+ // Dependency has changed during graph creation. Let's try again later.
+ return RetryOptimization(kBailedOutDueToDependencyChange);
+ }
+
+ return SUCCEEDED;
+}
+
+HCompilationJob::Status HCompilationJob::ExecuteJobImpl() {
+ DCHECK(graph_ != NULL);
+ BailoutReason bailout_reason = kNoReason;
+
+ if (graph_->Optimize(&bailout_reason)) {
+ chunk_ = LChunk::NewChunk(graph_);
+ if (chunk_ != NULL) return SUCCEEDED;
+ } else if (bailout_reason != kNoReason) {
+ info()->AbortOptimization(bailout_reason);
+ }
+
+ return FAILED;
+}
+
+HCompilationJob::Status HCompilationJob::FinalizeJobImpl() {
+ DCHECK(chunk_ != NULL);
+ DCHECK(graph_ != NULL);
+ {
+ // Deferred handles reference objects that were accessible during
+ // graph creation. To make sure that we don't encounter inconsistencies
+ // between graph creation and code generation, we disallow accessing
+ // objects through deferred handles during the latter, with exceptions.
+ DisallowDeferredHandleDereference no_deferred_handle_deref;
+ Handle<Code> optimized_code = chunk_->Codegen();
+ if (optimized_code.is_null()) {
+ if (info()->bailout_reason() == kNoReason) {
+ return AbortOptimization(kCodeGenerationFailed);
+ }
+ return FAILED;
+ }
+ RegisterWeakObjectsInOptimizedCode(optimized_code);
+ info()->SetCode(optimized_code);
+ }
+ // Add to the weak list of optimized code objects.
+ info()->context()->native_context()->AddOptimizedCode(*info()->code());
+ return SUCCEEDED;
+}
+
HBasicBlock::HBasicBlock(HGraph* graph)
: block_id_(graph->GetNextBlockID()),
graph_(graph),
@@ -932,8 +1106,7 @@ void HGraphBuilder::IfBuilder::Else() {
did_else_ = true;
}
-
-void HGraphBuilder::IfBuilder::Deopt(Deoptimizer::DeoptReason reason) {
+void HGraphBuilder::IfBuilder::Deopt(DeoptimizeReason reason) {
DCHECK(did_then_);
builder()->Add<HDeoptimize>(reason, Deoptimizer::EAGER);
AddMergeAtJoinBlock(true);
@@ -1186,8 +1359,12 @@ void HGraphBuilder::LoopBuilder::EndBody() {
HGraph* HGraphBuilder::CreateGraph() {
+ DCHECK(!FLAG_minimal);
graph_ = new (zone()) HGraph(info_, descriptor_);
if (FLAG_hydrogen_stats) isolate()->GetHStatistics()->Initialize(info_);
+ if (!info_->IsStub() && info_->is_tracking_positions()) {
+ TraceInlinedFunction(info_->shared_info(), SourcePosition::Unknown());
+ }
CompilationPhase phase("H_Block building", info_);
set_current_block(graph()->entry_block());
if (!BuildGraph()) return NULL;
@@ -1195,6 +1372,53 @@ HGraph* HGraphBuilder::CreateGraph() {
return graph_;
}
+int HGraphBuilder::TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
+ SourcePosition position) {
+ DCHECK(info_->is_tracking_positions());
+
+ int inline_id = static_cast<int>(graph()->inlined_function_infos().size());
+ HInlinedFunctionInfo info(shared->start_position());
+ if (!shared->script()->IsUndefined(isolate())) {
+ Handle<Script> script(Script::cast(shared->script()), isolate());
+
+ if (FLAG_hydrogen_track_positions &&
+ !script->source()->IsUndefined(isolate())) {
+ CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+ Object* source_name = script->name();
+ OFStream os(tracing_scope.file());
+ os << "--- FUNCTION SOURCE (";
+ if (source_name->IsString()) {
+ os << String::cast(source_name)->ToCString().get() << ":";
+ }
+ os << shared->DebugName()->ToCString().get() << ") id{";
+ os << info_->optimization_id() << "," << inline_id << "} ---\n";
+ {
+ DisallowHeapAllocation no_allocation;
+ int start = shared->start_position();
+ int len = shared->end_position() - start;
+ String::SubStringRange source(String::cast(script->source()), start,
+ len);
+ for (const auto& c : source) {
+ os << AsReversiblyEscapedUC16(c);
+ }
+ }
+
+ os << "\n--- END ---\n";
+ }
+ }
+
+ graph()->inlined_function_infos().push_back(info);
+
+ if (FLAG_hydrogen_track_positions && inline_id != 0) {
+ CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+ OFStream os(tracing_scope.file());
+ os << "INLINE (" << shared->DebugName()->ToCString().get() << ") id{"
+ << info_->optimization_id() << "," << inline_id << "} AS " << inline_id
+ << " AT " << position << std::endl;
+ }
+
+ return inline_id;
+}
HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
DCHECK(current_block() != NULL);
@@ -1289,9 +1513,7 @@ HValue* HGraphBuilder::BuildCheckHeapObject(HValue* obj) {
return Add<HCheckHeapObject>(obj);
}
-
-void HGraphBuilder::FinishExitWithHardDeoptimization(
- Deoptimizer::DeoptReason reason) {
+void HGraphBuilder::FinishExitWithHardDeoptimization(DeoptimizeReason reason) {
Add<HDeoptimize>(reason, Deoptimizer::EAGER);
FinishExitCurrentBlock(New<HAbnormalExit>());
}
@@ -1609,7 +1831,7 @@ void HGraphBuilder::BuildNonGlobalObjectCheck(HValue* receiver) {
IfBuilder if_global_object(this);
if_global_object.If<HCompareNumericAndBranch>(instance_type, global_type,
Token::EQ);
- if_global_object.ThenDeopt(Deoptimizer::kReceiverWasAGlobalObject);
+ if_global_object.ThenDeopt(DeoptimizeReason::kReceiverWasAGlobalObject);
if_global_object.End();
}
@@ -1822,7 +2044,7 @@ HValue* HGraphBuilder::BuildCreateIterResultObject(HValue* value,
// Allocate the JSIteratorResult object.
HValue* result =
Add<HAllocate>(Add<HConstant>(JSIteratorResult::kSize), HType::JSObject(),
- NOT_TENURED, JS_OBJECT_TYPE);
+ NOT_TENURED, JS_OBJECT_TYPE, graph()->GetConstant0());
// Initialize the JSIteratorResult object.
HValue* native_context = BuildGetNativeContext();
@@ -1859,9 +2081,9 @@ HValue* HGraphBuilder::BuildRegExpConstructResult(HValue* length,
HValue* size = BuildCalculateElementsSize(elements_kind, length);
// Allocate the JSRegExpResult and the FixedArray in one step.
- HValue* result = Add<HAllocate>(
- Add<HConstant>(JSRegExpResult::kSize), HType::JSArray(),
- NOT_TENURED, JS_ARRAY_TYPE);
+ HValue* result =
+ Add<HAllocate>(Add<HConstant>(JSRegExpResult::kSize), HType::JSArray(),
+ NOT_TENURED, JS_ARRAY_TYPE, graph()->GetConstant0());
// Initialize the JSRegExpResult header.
HValue* native_context = Add<HLoadNamedField>(
@@ -1895,12 +2117,6 @@ HValue* HGraphBuilder::BuildRegExpConstructResult(HValue* length,
HAllocate* elements = BuildAllocateElements(elements_kind, size);
BuildInitializeElementsHeader(elements, elements_kind, length);
- if (!elements->has_size_upper_bound()) {
- HConstant* size_in_bytes_upper_bound = EstablishElementsAllocationSize(
- elements_kind, max_length->Integer32Value());
- elements->set_size_upper_bound(size_in_bytes_upper_bound);
- }
-
Add<HStoreNamedField>(
result, HObjectAccess::ForJSArrayOffset(JSArray::kElementsOffset),
elements);
@@ -1965,7 +2181,7 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
if_objectissmi.Else();
{
if (type->Is(Type::SignedSmall())) {
- if_objectissmi.Deopt(Deoptimizer::kExpectedSmi);
+ if_objectissmi.Deopt(DeoptimizeReason::kExpectedSmi);
} else {
// Check if the object is a heap number.
IfBuilder if_objectisnumber(this);
@@ -2021,7 +2237,7 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
if_objectisnumber.Else();
{
if (type->Is(Type::Number())) {
- if_objectisnumber.Deopt(Deoptimizer::kExpectedHeapNumber);
+ if_objectisnumber.Deopt(DeoptimizeReason::kExpectedHeapNumber);
}
}
if_objectisnumber.JoinContinuation(&found);
@@ -2062,9 +2278,8 @@ HValue* HGraphBuilder::BuildToNumber(HValue* input) {
Callable callable = CodeFactory::ToNumber(isolate());
HValue* stub = Add<HConstant>(callable.code());
HValue* values[] = {context(), input};
- HCallWithDescriptor* instr =
- Add<HCallWithDescriptor>(stub, 0, callable.descriptor(),
- Vector<HValue*>(values, arraysize(values)));
+ HCallWithDescriptor* instr = Add<HCallWithDescriptor>(
+ stub, 0, callable.descriptor(), ArrayVector(values));
instr->set_type(HType::TaggedNumber());
return instr;
}
@@ -2115,7 +2330,7 @@ HValue* HGraphBuilder::BuildToObject(HValue* receiver) {
constructor_function_index,
Add<HConstant>(Map::kNoConstructorFunctionIndex), Token::EQ);
constructor_function_index_is_invalid.ThenDeopt(
- Deoptimizer::kUndefinedOrNullInToObject);
+ DeoptimizeReason::kUndefinedOrNullInToObject);
constructor_function_index_is_invalid.End();
// Use the global constructor function.
@@ -2177,8 +2392,8 @@ HAllocate* HGraphBuilder::BuildAllocate(
// Perform the actual allocation.
HAllocate* object = Add<HAllocate>(
- size, type, allocation_mode.GetPretenureMode(),
- instance_type, allocation_mode.feedback_site());
+ size, type, allocation_mode.GetPretenureMode(), instance_type,
+ graph()->GetConstant0(), allocation_mode.feedback_site());
// Setup the allocation memento.
if (allocation_mode.CreateAllocationMementos()) {
@@ -2196,7 +2411,20 @@ HValue* HGraphBuilder::BuildAddStringLengths(HValue* left_length,
HValue* length = AddUncasted<HAdd>(left_length, right_length);
// Check that length <= kMaxLength <=> length < MaxLength + 1.
HValue* max_length = Add<HConstant>(String::kMaxLength + 1);
- Add<HBoundsCheck>(length, max_length);
+ if (top_info()->IsStub()) {
+ // This is a mitigation for crbug.com/627934; the real fix
+ // will be to migrate the StringAddStub to TurboFan one day.
+ IfBuilder if_invalid(this);
+ if_invalid.If<HCompareNumericAndBranch>(length, max_length, Token::GT);
+ if_invalid.Then();
+ {
+ Add<HCallRuntime>(
+ Runtime::FunctionForId(Runtime::kThrowInvalidStringLength), 0);
+ }
+ if_invalid.End();
+ } else {
+ Add<HBoundsCheck>(length, max_length);
+ }
return length;
}
@@ -2610,7 +2838,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
HInstruction* result = AddElementAccess(
backing_store, key, val, bounds_check, checked_object->ActualValue(),
elements_kind, access_type);
- negative_checker.ElseDeopt(Deoptimizer::kNegativeKeyEncountered);
+ negative_checker.ElseDeopt(DeoptimizeReason::kNegativeKeyEncountered);
negative_checker.End();
length_checker.End();
return result;
@@ -2663,53 +2891,6 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
}
-HValue* HGraphBuilder::BuildAllocateArrayFromLength(
- JSArrayBuilder* array_builder,
- HValue* length_argument) {
- if (length_argument->IsConstant() &&
- HConstant::cast(length_argument)->HasSmiValue()) {
- int array_length = HConstant::cast(length_argument)->Integer32Value();
- if (array_length == 0) {
- return array_builder->AllocateEmptyArray();
- } else {
- return array_builder->AllocateArray(length_argument,
- array_length,
- length_argument);
- }
- }
-
- HValue* constant_zero = graph()->GetConstant0();
- HConstant* max_alloc_length =
- Add<HConstant>(JSArray::kInitialMaxFastElementArray);
- HInstruction* checked_length = Add<HBoundsCheck>(length_argument,
- max_alloc_length);
- IfBuilder if_builder(this);
- if_builder.If<HCompareNumericAndBranch>(checked_length, constant_zero,
- Token::EQ);
- if_builder.Then();
- const int initial_capacity = JSArray::kPreallocatedArrayElements;
- HConstant* initial_capacity_node = Add<HConstant>(initial_capacity);
- Push(initial_capacity_node); // capacity
- Push(constant_zero); // length
- if_builder.Else();
- if (!(top_info()->IsStub()) &&
- IsFastPackedElementsKind(array_builder->kind())) {
- // We'll come back later with better (holey) feedback.
- if_builder.Deopt(
- Deoptimizer::kHoleyArrayDespitePackedElements_kindFeedback);
- } else {
- Push(checked_length); // capacity
- Push(checked_length); // length
- }
- if_builder.End();
-
- // Figure out total size
- HValue* length = Pop();
- HValue* capacity = Pop();
- return array_builder->AllocateArray(capacity, max_alloc_length, length);
-}
-
-
HValue* HGraphBuilder::BuildCalculateElementsSize(ElementsKind kind,
HValue* capacity) {
int elements_size = IsFastDoubleElementsKind(kind)
@@ -2738,8 +2919,8 @@ HAllocate* HGraphBuilder::AllocateJSArrayObject(AllocationSiteMode mode) {
base_size += AllocationMemento::kSize;
}
HConstant* size_in_bytes = Add<HConstant>(base_size);
- return Add<HAllocate>(
- size_in_bytes, HType::JSArray(), NOT_TENURED, JS_OBJECT_TYPE);
+ return Add<HAllocate>(size_in_bytes, HType::JSArray(), NOT_TENURED,
+ JS_OBJECT_TYPE, graph()->GetConstant0());
}
@@ -2761,7 +2942,7 @@ HAllocate* HGraphBuilder::BuildAllocateElements(ElementsKind kind,
: FIXED_ARRAY_TYPE;
return Add<HAllocate>(size_in_bytes, HType::HeapObject(), NOT_TENURED,
- instance_type);
+ instance_type, graph()->GetConstant0());
}
@@ -2800,15 +2981,13 @@ void HGraphBuilder::BuildJSArrayHeader(HValue* array,
HValue* length_field) {
Add<HStoreNamedField>(array, HObjectAccess::ForMap(), array_map);
- HConstant* empty_fixed_array =
- Add<HConstant>(isolate()->factory()->empty_fixed_array());
+ HValue* empty_fixed_array = Add<HLoadRoot>(Heap::kEmptyFixedArrayRootIndex);
Add<HStoreNamedField>(
array, HObjectAccess::ForPropertiesPointer(), empty_fixed_array);
- Add<HStoreNamedField>(
- array, HObjectAccess::ForElementsPointer(),
- elements != NULL ? elements : empty_fixed_array);
+ Add<HStoreNamedField>(array, HObjectAccess::ForElementsPointer(),
+ elements != nullptr ? elements : empty_fixed_array);
Add<HStoreNamedField>(
array, HObjectAccess::ForArrayLength(elements_kind), length_field);
@@ -3149,14 +3328,6 @@ HValue* HGraphBuilder::BuildCloneShallowArrayNonEmpty(HValue* boilerplate,
HAllocate* elements = BuildAllocateElements(kind, elements_size);
- // This function implicitly relies on the fact that the
- // FastCloneShallowArrayStub is called only for literals shorter than
- // JSArray::kInitialMaxFastElementArray.
- // Can't add HBoundsCheck here because otherwise the stub will eager a frame.
- HConstant* size_upper_bound = EstablishElementsAllocationSize(
- kind, JSArray::kInitialMaxFastElementArray);
- elements->set_size_upper_bound(size_upper_bound);
-
Add<HStoreNamedField>(result, HObjectAccess::ForElementsPointer(), elements);
// The allocation for the cloned array above causes register pressure on
@@ -3327,170 +3498,6 @@ HValue* HGraphBuilder::BuildArrayBufferViewFieldAccessor(HValue* object,
return Pop();
}
-
-HGraphBuilder::JSArrayBuilder::JSArrayBuilder(HGraphBuilder* builder,
- ElementsKind kind,
- HValue* allocation_site_payload,
- HValue* constructor_function,
- AllocationSiteOverrideMode override_mode) :
- builder_(builder),
- kind_(kind),
- allocation_site_payload_(allocation_site_payload),
- constructor_function_(constructor_function) {
- DCHECK(!allocation_site_payload->IsConstant() ||
- HConstant::cast(allocation_site_payload)->handle(
- builder_->isolate())->IsAllocationSite());
- mode_ = override_mode == DISABLE_ALLOCATION_SITES
- ? DONT_TRACK_ALLOCATION_SITE
- : AllocationSite::GetMode(kind);
-}
-
-
-HGraphBuilder::JSArrayBuilder::JSArrayBuilder(HGraphBuilder* builder,
- ElementsKind kind,
- HValue* constructor_function) :
- builder_(builder),
- kind_(kind),
- mode_(DONT_TRACK_ALLOCATION_SITE),
- allocation_site_payload_(NULL),
- constructor_function_(constructor_function) {
-}
-
-
-HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode() {
- if (!builder()->top_info()->IsStub()) {
- // A constant map is fine.
- Handle<Map> map(builder()->isolate()->get_initial_js_array_map(kind_),
- builder()->isolate());
- return builder()->Add<HConstant>(map);
- }
-
- if (constructor_function_ != NULL && kind_ == GetInitialFastElementsKind()) {
- // No need for a context lookup if the kind_ matches the initial
- // map, because we can just load the map in that case.
- HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
- return builder()->Add<HLoadNamedField>(constructor_function_, nullptr,
- access);
- }
-
- // TODO(mvstanton): we should always have a constructor function if we
- // are creating a stub.
- HInstruction* native_context = constructor_function_ != NULL
- ? builder()->BuildGetNativeContext(constructor_function_)
- : builder()->BuildGetNativeContext();
-
- HObjectAccess access =
- HObjectAccess::ForContextSlot(Context::ArrayMapIndex(kind_));
- return builder()->Add<HLoadNamedField>(native_context, nullptr, access);
-}
-
-
-HValue* HGraphBuilder::JSArrayBuilder::EmitInternalMapCode() {
- // Find the map near the constructor function
- HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
- return builder()->Add<HLoadNamedField>(constructor_function_, nullptr,
- access);
-}
-
-
-HAllocate* HGraphBuilder::JSArrayBuilder::AllocateEmptyArray() {
- HConstant* capacity = builder()->Add<HConstant>(initial_capacity());
- return AllocateArray(capacity,
- capacity,
- builder()->graph()->GetConstant0());
-}
-
-
-HAllocate* HGraphBuilder::JSArrayBuilder::AllocateArray(
- HValue* capacity,
- HConstant* capacity_upper_bound,
- HValue* length_field,
- FillMode fill_mode) {
- return AllocateArray(capacity,
- capacity_upper_bound->GetInteger32Constant(),
- length_field,
- fill_mode);
-}
-
-
-HAllocate* HGraphBuilder::JSArrayBuilder::AllocateArray(
- HValue* capacity,
- int capacity_upper_bound,
- HValue* length_field,
- FillMode fill_mode) {
- HConstant* elememts_size_upper_bound = capacity->IsInteger32Constant()
- ? HConstant::cast(capacity)
- : builder()->EstablishElementsAllocationSize(kind_, capacity_upper_bound);
-
- HAllocate* array = AllocateArray(capacity, length_field, fill_mode);
- if (!elements_location_->has_size_upper_bound()) {
- elements_location_->set_size_upper_bound(elememts_size_upper_bound);
- }
- return array;
-}
-
-
-HAllocate* HGraphBuilder::JSArrayBuilder::AllocateArray(
- HValue* capacity,
- HValue* length_field,
- FillMode fill_mode) {
- // These HForceRepresentations are because we store these as fields in the
- // objects we construct, and an int32-to-smi HChange could deopt. Accept
- // the deopt possibility now, before allocation occurs.
- capacity =
- builder()->AddUncasted<HForceRepresentation>(capacity,
- Representation::Smi());
- length_field =
- builder()->AddUncasted<HForceRepresentation>(length_field,
- Representation::Smi());
-
- // Generate size calculation code here in order to make it dominate
- // the JSArray allocation.
- HValue* elements_size =
- builder()->BuildCalculateElementsSize(kind_, capacity);
-
- // Bail out for large objects.
- HValue* max_regular_heap_object_size =
- builder()->Add<HConstant>(Page::kMaxRegularHeapObjectSize);
- builder()->Add<HBoundsCheck>(elements_size, max_regular_heap_object_size);
-
- // Allocate (dealing with failure appropriately)
- HAllocate* array_object = builder()->AllocateJSArrayObject(mode_);
-
- // Fill in the fields: map, properties, length
- HValue* map;
- if (allocation_site_payload_ == NULL) {
- map = EmitInternalMapCode();
- } else {
- map = EmitMapCode();
- }
-
- builder()->BuildJSArrayHeader(array_object,
- map,
- NULL, // set elements to empty fixed array
- mode_,
- kind_,
- allocation_site_payload_,
- length_field);
-
- // Allocate and initialize the elements
- elements_location_ = builder()->BuildAllocateElements(kind_, elements_size);
-
- builder()->BuildInitializeElementsHeader(elements_location_, kind_, capacity);
-
- // Set the elements
- builder()->Add<HStoreNamedField>(
- array_object, HObjectAccess::ForElementsPointer(), elements_location_);
-
- if (fill_mode == FILL_WITH_HOLE) {
- builder()->BuildFillElementsWithHole(elements_location_, kind_,
- graph()->GetConstant0(), capacity);
- }
-
- return array_object;
-}
-
-
HValue* HGraphBuilder::AddLoadJSBuiltin(int context_index) {
HValue* native_context = BuildGetNativeContext();
HObjectAccess function_access = HObjectAccess::ForContextSlot(context_index);
@@ -3506,7 +3513,8 @@ HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
break_scope_(NULL),
inlined_count_(0),
globals_(10, info->zone()),
- osr_(new (info->zone()) HOsrBuilder(this)) {
+ osr_(new (info->zone()) HOsrBuilder(this)),
+ bounds_(info->zone()) {
// This is not initialized in the initializer list because the
// constructor for the initial state relies on function_state_ == NULL
// to know it's the initial state.
@@ -3534,13 +3542,13 @@ HBasicBlock* HOptimizedGraphBuilder::CreateJoin(HBasicBlock* first,
}
}
-
HBasicBlock* HOptimizedGraphBuilder::JoinContinue(IterationStatement* statement,
+ BailoutId continue_id,
HBasicBlock* exit_block,
HBasicBlock* continue_block) {
if (continue_block != NULL) {
if (exit_block != NULL) Goto(exit_block, continue_block);
- continue_block->SetJoinId(statement->ContinueId());
+ continue_block->SetJoinId(continue_id);
return continue_block;
}
return exit_block;
@@ -3599,7 +3607,6 @@ std::ostream& operator<<(std::ostream& os, const HBasicBlock& b) {
return os << "B" << b.block_id();
}
-
HGraph::HGraph(CompilationInfo* info, CallInterfaceDescriptor descriptor)
: isolate_(info->isolate()),
next_block_id_(0),
@@ -3612,21 +3619,19 @@ HGraph::HGraph(CompilationInfo* info, CallInterfaceDescriptor descriptor)
info_(info),
descriptor_(descriptor),
zone_(info->zone()),
+ allow_code_motion_(false),
use_optimistic_licm_(false),
depends_on_empty_array_proto_elements_(false),
type_change_checksum_(0),
maximum_environment_size_(0),
no_side_effects_scope_count_(0),
- disallow_adding_new_values_(false) {
+ disallow_adding_new_values_(false),
+ inlined_function_infos_(info->zone()) {
if (info->IsStub()) {
// For stubs, explicitly add the context to the environment.
start_environment_ = new (zone_)
HEnvironment(zone_, descriptor.GetRegisterParameterCount() + 1);
} else {
- if (info->is_tracking_positions()) {
- info->TraceInlinedFunction(info->shared_info(), SourcePosition::Unknown(),
- InlinedFunctionInfo::kNoParentId);
- }
start_environment_ =
new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
}
@@ -3655,7 +3660,8 @@ void HGraph::FinalizeUniqueness() {
int HGraph::SourcePositionToScriptPosition(SourcePosition pos) {
return (FLAG_hydrogen_track_positions && !pos.IsUnknown())
- ? info()->start_position_for(pos.inlining_id()) + pos.position()
+ ? inlined_function_infos_.at(pos.inlining_id()).start_position +
+ pos.position()
: pos.raw();
}
@@ -4394,7 +4400,7 @@ bool HOptimizedGraphBuilder::BuildGraph() {
return false;
}
- Scope* scope = current_info()->scope();
+ DeclarationScope* scope = current_info()->scope();
SetUpScope(scope);
// Add an edge to the body entry. This is warty: the graph's start
@@ -4445,6 +4451,11 @@ bool HOptimizedGraphBuilder::BuildGraph() {
!type_info->matches_inlined_type_change_checksum(composite_checksum));
type_info->set_inlined_type_change_checksum(composite_checksum);
+ // Set this predicate early to avoid handle deref during graph optimization.
+ graph()->set_allow_code_motion(
+ current_info()->IsStub() ||
+ current_info()->shared_info()->opt_count() + 1 < FLAG_max_opt_count);
+
// Perform any necessary OSR-specific cleanups or changes to the graph.
osr()->FinishGraph();
@@ -4594,8 +4605,7 @@ HInstruction* HOptimizedGraphBuilder::PreProcessCall(Instruction* call) {
return call;
}
-
-void HOptimizedGraphBuilder::SetUpScope(Scope* scope) {
+void HOptimizedGraphBuilder::SetUpScope(DeclarationScope* scope) {
HEnvironment* prolog_env = environment();
int parameter_count = environment()->parameter_count();
ZoneList<HValue*> parameters(parameter_count, zone());
@@ -4681,7 +4691,7 @@ void HOptimizedGraphBuilder::VisitBlock(Block* stmt) {
if (scope != NULL) {
if (scope->NeedsContext()) {
// Load the function object.
- Scope* declaration_scope = scope->DeclarationScope();
+ DeclarationScope* declaration_scope = scope->GetDeclarationScope();
HInstruction* function;
HValue* outer_context = environment()->context();
if (declaration_scope->is_script_scope() ||
@@ -4991,7 +5001,7 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
CHECK_ALIVE(VisitForValue(stmt->tag()));
Add<HSimulate>(stmt->EntryId());
HValue* tag_value = Top();
- Type* tag_type = stmt->tag()->bounds().lower;
+ Type* tag_type = bounds_.get(stmt->tag()).lower;
// 1. Build all the tests, with dangling true branches
BailoutId default_id = BailoutId::None();
@@ -5008,7 +5018,7 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
if (current_block() == NULL) return Bailout(kUnsupportedSwitchStatement);
HValue* label_value = Pop();
- Type* label_type = clause->label()->bounds().lower;
+ Type* label_type = bounds_.get(clause->label()).lower;
Type* combined_type = clause->compare_type();
HControlInstruction* compare = BuildCompareInstruction(
Token::EQ_STRICT, tag_value, label_value, tag_type, label_type,
@@ -5084,10 +5094,10 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
}
}
-
void HOptimizedGraphBuilder::VisitLoopBody(IterationStatement* stmt,
+ BailoutId stack_check_id,
HBasicBlock* loop_entry) {
- Add<HSimulate>(stmt->StackCheckId());
+ Add<HSimulate>(stack_check_id);
HStackCheck* stack_check =
HStackCheck::cast(Add<HStackCheck>(HStackCheck::kBackwardsBranch));
DCHECK(loop_entry->IsLoopHeader());
@@ -5106,10 +5116,10 @@ void HOptimizedGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
BreakAndContinueInfo break_info(stmt, scope());
{
BreakAndContinueScope push(&break_info, this);
- CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry));
+ CHECK_BAILOUT(VisitLoopBody(stmt, stmt->StackCheckId(), loop_entry));
}
- HBasicBlock* body_exit =
- JoinContinue(stmt, current_block(), break_info.continue_block());
+ HBasicBlock* body_exit = JoinContinue(
+ stmt, stmt->ContinueId(), current_block(), break_info.continue_block());
HBasicBlock* loop_successor = NULL;
if (body_exit != NULL) {
set_current_block(body_exit);
@@ -5169,10 +5179,10 @@ void HOptimizedGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
BreakAndContinueInfo break_info(stmt, scope());
if (current_block() != NULL) {
BreakAndContinueScope push(&break_info, this);
- CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry));
+ CHECK_BAILOUT(VisitLoopBody(stmt, stmt->StackCheckId(), loop_entry));
}
- HBasicBlock* body_exit =
- JoinContinue(stmt, current_block(), break_info.continue_block());
+ HBasicBlock* body_exit = JoinContinue(
+ stmt, stmt->ContinueId(), current_block(), break_info.continue_block());
HBasicBlock* loop_exit = CreateLoop(stmt,
loop_entry,
body_exit,
@@ -5218,10 +5228,10 @@ void HOptimizedGraphBuilder::VisitForStatement(ForStatement* stmt) {
BreakAndContinueInfo break_info(stmt, scope());
if (current_block() != NULL) {
BreakAndContinueScope push(&break_info, this);
- CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry));
+ CHECK_BAILOUT(VisitLoopBody(stmt, stmt->StackCheckId(), loop_entry));
}
- HBasicBlock* body_exit =
- JoinContinue(stmt, current_block(), break_info.continue_block());
+ HBasicBlock* body_exit = JoinContinue(
+ stmt, stmt->ContinueId(), current_block(), break_info.continue_block());
if (stmt->next() != NULL && body_exit != NULL) {
set_current_block(body_exit);
@@ -5259,7 +5269,7 @@ void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
if_undefined_or_null.Or();
if_undefined_or_null.If<HCompareObjectEqAndBranch>(
enumerable, graph()->GetConstantNull());
- if_undefined_or_null.ThenDeopt(Deoptimizer::kUndefinedOrNullInForIn);
+ if_undefined_or_null.ThenDeopt(DeoptimizeReason::kUndefinedOrNullInForIn);
if_undefined_or_null.End();
BuildForInBody(stmt, each_var, enumerable);
}
@@ -5389,10 +5399,12 @@ void HOptimizedGraphBuilder::BuildForInBody(ForInStatement* stmt,
}
set_current_block(if_slow);
{
- // Check if key is still valid for enumerable.
- Add<HPushArguments>(enumerable, key);
- Runtime::FunctionId function_id = Runtime::kForInFilter;
- Push(Add<HCallRuntime>(Runtime::FunctionForId(function_id), 2));
+ ForInFilterStub stub(isolate());
+ HValue* values[] = {context(), key, enumerable};
+ HConstant* stub_value = Add<HConstant>(stub.GetCode());
+ Push(Add<HCallWithDescriptor>(stub_value, 0,
+ stub.GetCallInterfaceDescriptor(),
+ ArrayVector(values)));
Add<HSimulate>(stmt->FilterId());
FinishCurrentBlock(New<HCompareObjectEqAndBranch>(
Top(), graph()->GetConstantUndefined(), if_slow_skip, if_slow_pass));
@@ -5417,11 +5429,11 @@ void HOptimizedGraphBuilder::BuildForInBody(ForInStatement* stmt,
break_info.set_continue_block(continue_block);
{
BreakAndContinueScope push(&break_info, this);
- CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry));
+ CHECK_BAILOUT(VisitLoopBody(stmt, stmt->StackCheckId(), loop_entry));
}
- HBasicBlock* body_exit =
- JoinContinue(stmt, current_block(), break_info.continue_block());
+ HBasicBlock* body_exit = JoinContinue(
+ stmt, stmt->ContinueId(), current_block(), break_info.continue_block());
if (body_exit != NULL) {
set_current_block(body_exit);
@@ -5491,17 +5503,16 @@ void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
// We also have a stack overflow if the recursive compilation did.
if (HasStackOverflow()) return;
// Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
+ // space for nested functions that don't need pretenuring.
HConstant* shared_info_value = Add<HConstant>(shared_info);
HInstruction* instr;
- if (!expr->pretenure() && shared_info->num_literals() == 0) {
- FastNewClosureStub stub(isolate(), shared_info->language_mode(),
- shared_info->kind());
+ if (!expr->pretenure()) {
+ FastNewClosureStub stub(isolate());
FastNewClosureDescriptor descriptor(isolate());
HValue* values[] = {context(), shared_info_value};
HConstant* stub_value = Add<HConstant>(stub.GetCode());
- instr = New<HCallWithDescriptor>(
- stub_value, 0, descriptor, Vector<HValue*>(values, arraysize(values)));
+ instr = New<HCallWithDescriptor>(stub_value, 0, descriptor,
+ ArrayVector(values));
} else {
Add<HPushArguments>(shared_info_value);
Runtime::FunctionId function_id =
@@ -5593,6 +5604,7 @@ HOptimizedGraphBuilder::LookupGlobalProperty(Variable* var, LookupIterator* it,
return kUseGeneric;
case LookupIterator::DATA:
if (access_type == STORE && it->IsReadOnly()) return kUseGeneric;
+ if (!it->GetHolder<JSObject>()->IsJSGlobalObject()) return kUseGeneric;
return kUseCell;
case LookupIterator::JSPROXY:
case LookupIterator::TRANSITION:
@@ -5653,7 +5665,7 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
// If the values is not the hole, it will stay initialized,
// so no need to generate a check.
- if (*current_value == *isolate()->factory()->the_hole_value()) {
+ if (current_value->IsTheHole(isolate())) {
return Bailout(kReferenceToUninitializedVariable);
}
HInstruction* result = New<HLoadNamedField>(
@@ -5716,13 +5728,10 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
return ast_context()->ReturnInstruction(instr, expr->id());
}
} else {
- HValue* global_object = Add<HLoadNamedField>(
- BuildGetNativeContext(), nullptr,
- HObjectAccess::ForContextSlot(Context::EXTENSION_INDEX));
+ Handle<TypeFeedbackVector> vector(current_feedback_vector(), isolate());
HLoadGlobalGeneric* instr = New<HLoadGlobalGeneric>(
- global_object, variable->name(), ast_context()->typeof_mode());
- instr->SetVectorAndSlot(handle(current_feedback_vector(), isolate()),
- expr->VariableFeedbackSlot());
+ variable->name(), ast_context()->typeof_mode(), vector,
+ expr->VariableFeedbackSlot());
return ast_context()->ReturnInstruction(instr, expr->id());
}
}
@@ -5746,9 +5755,6 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
case CONST:
mode = HLoadContextSlot::kCheckDeoptimize;
break;
- case CONST_LEGACY:
- mode = HLoadContextSlot::kCheckReturnUndefined;
- break;
default:
mode = HLoadContextSlot::kNoCheck;
break;
@@ -5760,6 +5766,9 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
case VariableLocation::LOOKUP:
return Bailout(kReferenceToAVariableWhichRequiresDynamicLookup);
+
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
}
@@ -5782,9 +5791,8 @@ void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
context(), AddThisFunction(), Add<HConstant>(expr->literal_index()),
Add<HConstant>(expr->pattern()), Add<HConstant>(expr->flags())};
HConstant* stub_value = Add<HConstant>(callable.code());
- HInstruction* instr =
- New<HCallWithDescriptor>(stub_value, 0, callable.descriptor(),
- Vector<HValue*>(values, arraysize(values)));
+ HInstruction* instr = New<HCallWithDescriptor>(
+ stub_value, 0, callable.descriptor(), ArrayVector(values));
return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -5880,7 +5888,7 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
closure->literals()->literal(expr->literal_index()), isolate());
Handle<AllocationSite> site;
Handle<JSObject> boilerplate;
- if (!literals_cell->IsUndefined()) {
+ if (!literals_cell->IsUndefined(isolate())) {
// Retrieve the boilerplate
site = Handle<AllocationSite>::cast(literals_cell);
boilerplate = Handle<JSObject>(JSObject::cast(site->transition_info()),
@@ -5925,7 +5933,8 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::COMPUTED:
// It is safe to use [[Put]] here because the boilerplate already
// contains computed properties with an uninitialized value.
- if (key->value()->IsInternalizedString()) {
+ if (key->IsStringLiteral()) {
+ DCHECK(key->IsPropertyName());
if (property->emit_store()) {
CHECK_ALIVE(VisitForValue(value));
HValue* value = Pop();
@@ -5998,18 +6007,13 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<Object> literals_cell(literals->literal(expr->literal_index()),
isolate());
Handle<JSObject> boilerplate_object;
- if (!literals_cell->IsUndefined()) {
+ if (!literals_cell->IsUndefined(isolate())) {
DCHECK(literals_cell->IsAllocationSite());
site = Handle<AllocationSite>::cast(literals_cell);
boilerplate_object = Handle<JSObject>(
JSObject::cast(site->transition_info()), isolate());
}
- ElementsKind boilerplate_elements_kind = expr->constant_elements_kind();
- if (!boilerplate_object.is_null()) {
- boilerplate_elements_kind = boilerplate_object->GetElementsKind();
- }
-
// Check whether to use fast or slow deep-copying for boilerplate.
int max_properties = kMaxFastLiteralProperties;
if (!boilerplate_object.is_null() &&
@@ -6060,20 +6064,28 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
HValue* key = Add<HConstant>(i);
- switch (boilerplate_elements_kind) {
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS: {
- Add<HStoreKeyed>(elements, key, value, nullptr,
- boilerplate_elements_kind);
- break;
+ if (!boilerplate_object.is_null()) {
+ ElementsKind boilerplate_elements_kind =
+ boilerplate_object->GetElementsKind();
+ switch (boilerplate_elements_kind) {
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS: {
+ Add<HStoreKeyed>(elements, key, value, nullptr,
+ boilerplate_elements_kind);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
- default:
- UNREACHABLE();
- break;
+ } else {
+ HInstruction* instr = BuildKeyedGeneric(
+ STORE, expr, expr->LiteralFeedbackSlot(), literal, key, value);
+ AddInstruction(instr);
}
Add<HSimulate>(expr->GetIdForElement(i));
@@ -6152,10 +6164,9 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
HInstruction* heap_number_size = Add<HConstant>(HeapNumber::kSize);
// TODO(hpayer): Allocation site pretenuring support.
- HInstruction* heap_number = Add<HAllocate>(heap_number_size,
- HType::HeapObject(),
- NOT_TENURED,
- MUTABLE_HEAP_NUMBER_TYPE);
+ HInstruction* heap_number =
+ Add<HAllocate>(heap_number_size, HType::HeapObject(), NOT_TENURED,
+ MUTABLE_HEAP_NUMBER_TYPE, graph()->GetConstant0());
AddStoreMapConstant(
heap_number, isolate()->factory()->mutable_heap_number_map());
Add<HStoreNamedField>(heap_number, HObjectAccess::ForHeapNumberValue(),
@@ -6385,7 +6396,6 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::IsIntegerIndexedExotic() {
bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessMonomorphic() {
if (!CanInlinePropertyAccess(map_)) return false;
if (IsJSObjectFieldAccessor()) return IsLoad();
- if (IsJSArrayBufferViewFieldAccessor()) return IsLoad();
if (map_->IsJSFunctionMap() && map_->is_constructor() &&
!map_->has_non_instance_prototype() &&
name_.is_identical_to(isolate()->factory()->prototype_string())) {
@@ -6433,17 +6443,6 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessAsMonomorphic(
}
return true;
}
- if (GetJSArrayBufferViewFieldAccess(&access)) {
- for (int i = 1; i < maps->length(); ++i) {
- PropertyAccessInfo test_info(builder_, access_type_, maps->at(i), name_);
- HObjectAccess test_access = HObjectAccess::ForMap(); // bogus default
- if (!test_info.GetJSArrayBufferViewFieldAccess(&test_access)) {
- return false;
- }
- if (!access.Equals(test_access)) return false;
- }
- return true;
- }
// Currently only handle numbers as a polymorphic case.
// TODO(verwaest): Support monomorphic handling of numbers with a HCheckNumber
@@ -6497,12 +6496,6 @@ HValue* HOptimizedGraphBuilder::BuildMonomorphicAccess(
return New<HLoadNamedField>(object, checked_object, access);
}
- if (info->GetJSArrayBufferViewFieldAccess(&access)) {
- DCHECK(info->IsLoad());
- checked_object = Add<HCheckArrayBufferNotNeutered>(checked_object);
- return New<HLoadNamedField>(object, checked_object, access);
- }
-
if (info->name().is_identical_to(isolate()->factory()->prototype_string()) &&
info->map()->IsJSFunctionMap() && info->map()->is_constructor()) {
DCHECK(!info->map()->has_non_instance_prototype());
@@ -6694,7 +6687,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
// use a generic IC.
if (count == maps->length() && FLAG_deoptimize_uncommon_cases) {
FinishExitWithHardDeoptimization(
- Deoptimizer::kUnknownMapInPolymorphicAccess);
+ DeoptimizeReason::kUnknownMapInPolymorphicAccess);
} else {
HInstruction* instr =
BuildNamedGeneric(access_type, expr, slot, object, name, value);
@@ -6727,7 +6720,7 @@ static bool ComputeReceiverTypes(Expression* expr, HValue* receiver,
SmallMapList* maps = expr->GetReceiverTypes();
*t = maps;
bool monomorphic = expr->IsMonomorphic();
- if (maps != NULL && receiver->HasMonomorphicJSObjectType()) {
+ if (maps != nullptr && receiver->HasMonomorphicJSObjectType()) {
if (maps->length() > 0) {
Map* root_map = receiver->GetMonomorphicJSObjectMap()->FindRootMap();
maps->FilterForPossibleTransitions(root_map);
@@ -6737,7 +6730,6 @@ static bool ComputeReceiverTypes(Expression* expr, HValue* receiver,
// possible if the receiver had a known map at some point, and no
// map-changing stores have happened to it since.
Handle<Map> candidate_map = receiver->GetMonomorphicJSObjectMap();
- if (candidate_map->is_observed()) return false;
for (HInstruction* current = builder->current_block()->last();
current != nullptr; current = current->previous()) {
if (current->IsBlockEntry()) break;
@@ -6774,7 +6766,6 @@ static bool AreStringTypes(SmallMapList* maps) {
return true;
}
-
void HOptimizedGraphBuilder::BuildStore(Expression* expr, Property* prop,
FeedbackVectorSlot slot,
BailoutId ast_id, BailoutId return_id,
@@ -6856,7 +6847,7 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
// If the values is not the hole, it will stay initialized,
// so no need to generate a check.
- if (*current_value == *isolate()->factory()->the_hole_value()) {
+ if (current_value->IsTheHole(isolate())) {
return Bailout(kReferenceToUninitializedVariable);
}
@@ -6882,7 +6873,7 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
if (value->IsConstant()) {
HConstant* c_value = HConstant::cast(value);
if (!constant.is_identical_to(c_value->handle(isolate()))) {
- Add<HDeoptimize>(Deoptimizer::kConstantGlobalVariableAssignment,
+ Add<HDeoptimize>(DeoptimizeReason::kConstantGlobalVariableAssignment,
Deoptimizer::EAGER);
}
} else {
@@ -6895,7 +6886,7 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
}
builder.Then();
builder.Else();
- Add<HDeoptimize>(Deoptimizer::kConstantGlobalVariableAssignment,
+ Add<HDeoptimize>(DeoptimizeReason::kConstantGlobalVariableAssignment,
Deoptimizer::EAGER);
builder.End();
}
@@ -6928,12 +6919,11 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
HValue* global_object = Add<HLoadNamedField>(
BuildGetNativeContext(), nullptr,
HObjectAccess::ForContextSlot(Context::EXTENSION_INDEX));
- HStoreNamedGeneric* instr =
- Add<HStoreNamedGeneric>(global_object, var->name(), value,
- function_language_mode(), PREMONOMORPHIC);
Handle<TypeFeedbackVector> vector =
handle(current_feedback_vector(), isolate());
- instr->SetVectorAndSlot(vector, slot);
+ HStoreNamedGeneric* instr =
+ Add<HStoreNamedGeneric>(global_object, var->name(), value,
+ function_language_mode(), vector, slot);
USE(instr);
DCHECK(instr->HasObservableSideEffects());
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
@@ -7002,7 +6992,11 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
case CONST:
return Bailout(kNonInitializerAssignmentToConst);
case CONST_LEGACY:
- return ast_context()->ReturnValue(Pop());
+ if (is_strict(function_language_mode())) {
+ return Bailout(kNonInitializerAssignmentToConst);
+ } else {
+ return ast_context()->ReturnValue(Pop());
+ }
default:
mode = HStoreContextSlot::kNoCheck;
}
@@ -7018,6 +7012,9 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
case VariableLocation::LOOKUP:
return Bailout(kCompoundAssignmentToLookupSlot);
+
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
return ast_context()->ReturnValue(Pop());
@@ -7071,10 +7068,16 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
}
} else if (var->mode() == CONST_LEGACY) {
if (expr->op() != Token::INIT) {
- CHECK_ALIVE(VisitForValue(expr->value()));
- return ast_context()->ReturnValue(Pop());
+ if (is_strict(function_language_mode())) {
+ return Bailout(kNonInitializerAssignmentToConst);
+ } else {
+ CHECK_ALIVE(VisitForValue(expr->value()));
+ return ast_context()->ReturnValue(Pop());
+ }
}
+ // TODO(adamk): Is this required? Legacy const variables are always
+ // initialized before use.
if (var->IsStackAllocated()) {
// We insert a use of the old value to detect unsupported uses of const
// variables (e.g. initialization inside a loop).
@@ -7083,7 +7086,7 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
}
}
- if (proxy->IsArguments()) return Bailout(kAssignmentToArguments);
+ if (var->is_arguments()) return Bailout(kAssignmentToArguments);
// Handle the assignment.
switch (var->location()) {
@@ -7146,11 +7149,7 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
}
} else {
DCHECK_EQ(Token::INIT, expr->op());
- if (var->mode() == CONST_LEGACY) {
- mode = HStoreContextSlot::kCheckIgnoreAssignment;
- } else {
- mode = HStoreContextSlot::kNoCheck;
- }
+ mode = HStoreContextSlot::kNoCheck;
}
HValue* context = BuildContextChainWalk(var);
@@ -7164,6 +7163,9 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
case VariableLocation::LOOKUP:
return Bailout(kAssignmentToLOOKUPVariable);
+
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
} else {
return Bailout(kInvalidLeftHandSideInAssignment);
@@ -7239,7 +7241,7 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
HValue* object, Handle<Name> name, HValue* value, bool is_uninitialized) {
if (is_uninitialized) {
Add<HDeoptimize>(
- Deoptimizer::kInsufficientTypeFeedbackForGenericNamedAccess,
+ DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess,
Deoptimizer::SOFT);
}
if (access_type == LOAD) {
@@ -7253,16 +7255,17 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
// it has to share information with full code.
HConstant* key = Add<HConstant>(name);
HLoadKeyedGeneric* result =
- New<HLoadKeyedGeneric>(object, key, PREMONOMORPHIC);
- result->SetVectorAndSlot(vector, slot);
+ New<HLoadKeyedGeneric>(object, key, vector, slot);
return result;
}
HLoadNamedGeneric* result =
- New<HLoadNamedGeneric>(object, name, PREMONOMORPHIC);
- result->SetVectorAndSlot(vector, slot);
+ New<HLoadNamedGeneric>(object, name, vector, slot);
return result;
} else {
+ Handle<TypeFeedbackVector> vector =
+ handle(current_feedback_vector(), isolate());
+
if (current_feedback_vector()->GetKind(slot) ==
FeedbackVectorSlotKind::KEYED_STORE_IC) {
// It's possible that a keyed store of a constant string was converted
@@ -7271,18 +7274,12 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
// it has to share information with full code.
HConstant* key = Add<HConstant>(name);
HStoreKeyedGeneric* result = New<HStoreKeyedGeneric>(
- object, key, value, function_language_mode(), PREMONOMORPHIC);
- Handle<TypeFeedbackVector> vector =
- handle(current_feedback_vector(), isolate());
- result->SetVectorAndSlot(vector, slot);
+ object, key, value, function_language_mode(), vector, slot);
return result;
}
HStoreNamedGeneric* result = New<HStoreNamedGeneric>(
- object, name, value, function_language_mode(), PREMONOMORPHIC);
- Handle<TypeFeedbackVector> vector =
- handle(current_feedback_vector(), isolate());
- result->SetVectorAndSlot(vector, slot);
+ object, name, value, function_language_mode(), vector, slot);
return result;
}
}
@@ -7291,25 +7288,15 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
HInstruction* HOptimizedGraphBuilder::BuildKeyedGeneric(
PropertyAccessType access_type, Expression* expr, FeedbackVectorSlot slot,
HValue* object, HValue* key, HValue* value) {
+ Handle<TypeFeedbackVector> vector =
+ handle(current_feedback_vector(), isolate());
if (access_type == LOAD) {
- InlineCacheState initial_state = expr->AsProperty()->GetInlineCacheState();
HLoadKeyedGeneric* result =
- New<HLoadKeyedGeneric>(object, key, initial_state);
- // HLoadKeyedGeneric with vector ics benefits from being encoded as
- // MEGAMORPHIC because the vector/slot combo becomes unnecessary.
- if (initial_state != MEGAMORPHIC) {
- // We need to pass vector information.
- Handle<TypeFeedbackVector> vector =
- handle(current_feedback_vector(), isolate());
- result->SetVectorAndSlot(vector, slot);
- }
+ New<HLoadKeyedGeneric>(object, key, vector, slot);
return result;
} else {
HStoreKeyedGeneric* result = New<HStoreKeyedGeneric>(
- object, key, value, function_language_mode(), PREMONOMORPHIC);
- Handle<TypeFeedbackVector> vector =
- handle(current_feedback_vector(), isolate());
- result->SetVectorAndSlot(vector, slot);
+ object, key, value, function_language_mode(), vector, slot);
return result;
}
}
@@ -7612,12 +7599,11 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
// Deopt if none of the cases matched.
NoObservableSideEffectsScope scope(this);
FinishExitWithHardDeoptimization(
- Deoptimizer::kUnknownMapInPolymorphicElementAccess);
+ DeoptimizeReason::kUnknownMapInPolymorphicElementAccess);
set_current_block(join);
return access_type == STORE ? val : Pop();
}
-
HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
HValue* obj, HValue* key, HValue* val, Expression* expr,
FeedbackVectorSlot slot, BailoutId ast_id, BailoutId return_id,
@@ -7730,13 +7716,15 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
if (access_type == STORE) {
if (expr->IsAssignment() &&
expr->AsAssignment()->HasNoTypeInformation()) {
- Add<HDeoptimize>(Deoptimizer::kInsufficientTypeFeedbackForKeyedStore,
- Deoptimizer::SOFT);
+ Add<HDeoptimize>(
+ DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess,
+ Deoptimizer::SOFT);
}
} else {
if (expr->AsProperty()->HasNoTypeInformation()) {
- Add<HDeoptimize>(Deoptimizer::kInsufficientTypeFeedbackForKeyedLoad,
- Deoptimizer::SOFT);
+ Add<HDeoptimize>(
+ DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess,
+ Deoptimizer::SOFT);
}
}
instr = AddInstruction(
@@ -7790,6 +7778,11 @@ bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
return false;
}
+ // Make sure we visit the arguments object so that the liveness analysis
+ // still records the access.
+ CHECK_ALIVE_OR_RETURN(VisitForValue(expr->obj(), ARGUMENTS_ALLOWED), true);
+ Drop(1);
+
if (function_state()->outer() == NULL) {
HInstruction* elements = Add<HArgumentsElements>(false);
result = New<HArgumentsLength>(elements);
@@ -7825,7 +7818,6 @@ bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
return true;
}
-
HValue* HOptimizedGraphBuilder::BuildNamedAccess(
PropertyAccessType access, BailoutId ast_id, BailoutId return_id,
Expression* expr, FeedbackVectorSlot slot, HValue* object,
@@ -7943,8 +7935,7 @@ HInstruction* HGraphBuilder::BuildConstantMapCheck(Handle<JSObject> constant) {
HInstruction* HGraphBuilder::BuildCheckPrototypeMaps(Handle<JSObject> prototype,
Handle<JSObject> holder) {
- PrototypeIterator iter(isolate(), prototype,
- PrototypeIterator::START_AT_RECEIVER);
+ PrototypeIterator iter(isolate(), prototype, kStartAtReceiver);
while (holder.is_null() ||
!PrototypeIterator::GetCurrent(iter).is_identical_to(holder)) {
BuildConstantMapCheck(PrototypeIterator::GetCurrent<JSObject>(iter));
@@ -8005,7 +7996,7 @@ HInstruction* HOptimizedGraphBuilder::NewCallFunction(
HConstant* stub = Add<HConstant>(callable.code());
return New<HCallWithDescriptor>(stub, argument_count, callable.descriptor(),
- Vector<HValue*>(op_vals, arraysize(op_vals)),
+ ArrayVector(op_vals),
syntactic_tail_call_mode);
}
@@ -8030,7 +8021,7 @@ HInstruction* HOptimizedGraphBuilder::NewCallFunctionViaIC(
HConstant* stub = Add<HConstant>(callable.code());
return New<HCallWithDescriptor>(stub, argument_count, callable.descriptor(),
- Vector<HValue*>(op_vals, arraysize(op_vals)),
+ ArrayVector(op_vals),
syntactic_tail_call_mode);
}
@@ -8173,7 +8164,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
bool try_inline = FLAG_polymorphic_inlining && !needs_wrapping;
if (FLAG_trace_inlining && try_inline) {
Handle<JSFunction> caller = current_info()->closure();
- base::SmartArrayPointer<char> caller_name =
+ std::unique_ptr<char[]> caller_name =
caller->shared()->DebugName()->ToCString();
PrintF("Trying to inline the polymorphic call to %s from %s\n",
name->ToCString().get(),
@@ -8210,7 +8201,8 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
if (ordered_functions == maps->length() && FLAG_deoptimize_uncommon_cases) {
- FinishExitWithHardDeoptimization(Deoptimizer::kUnknownMapInPolymorphicCall);
+ FinishExitWithHardDeoptimization(
+ DeoptimizeReason::kUnknownMapInPolymorphicCall);
} else {
Property* prop = expr->expression()->AsProperty();
HInstruction* function =
@@ -8259,9 +8251,9 @@ void HOptimizedGraphBuilder::TraceInline(Handle<JSFunction> target,
const char* reason,
TailCallMode tail_call_mode) {
if (FLAG_trace_inlining) {
- base::SmartArrayPointer<char> target_name =
+ std::unique_ptr<char[]> target_name =
target->shared()->DebugName()->ToCString();
- base::SmartArrayPointer<char> caller_name =
+ std::unique_ptr<char[]> caller_name =
caller->shared()->DebugName()->ToCString();
if (reason == NULL) {
const char* call_mode =
@@ -8382,7 +8374,7 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
top_info()->parse_info()->ast_value_factory());
parse_info.set_ast_value_factory_owned(false);
- CompilationInfo target_info(&parse_info);
+ CompilationInfo target_info(&parse_info, target);
Handle<SharedFunctionInfo> target_shared(target->shared());
if (inlining_kind != CONSTRUCT_CALL_RETURN &&
@@ -8456,7 +8448,8 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
ZoneList<Declaration*>* decls = target_info.scope()->declarations();
int decl_count = decls->length();
for (int i = 0; i < decl_count; ++i) {
- if (!decls->at(i)->IsInlineable()) {
+ if (decls->at(i)->IsFunctionDeclaration() ||
+ !decls->at(i)->proxy()->var()->IsStackAllocated()) {
TraceInline(target, caller, "target has non-trivial declaration");
return false;
}
@@ -8468,6 +8461,7 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
TraceInline(target, caller, "could not generate deoptimization info");
return false;
}
+
// Remember that we inlined this function. This needs to be called right
// after the EnsureDeoptimizationSupport call so that the code flusher
// does not remove the code with the deoptimization support.
@@ -8477,16 +8471,19 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
// After this point, we've made a decision to inline this function (so
// TryInline should always return true).
+ // If target was lazily compiled, it's literals array may not yet be set up.
+ JSFunction::EnsureLiterals(target);
+
// Type-check the inlined function.
DCHECK(target_shared->has_deoptimization_support());
AstTyper(target_info.isolate(), target_info.zone(), target_info.closure(),
- target_info.scope(), target_info.osr_ast_id(), target_info.literal())
+ target_info.scope(), target_info.osr_ast_id(), target_info.literal(),
+ &bounds_)
.Run();
int inlining_id = 0;
if (top_info()->is_tracking_positions()) {
- inlining_id = top_info()->TraceInlinedFunction(
- target_shared, source_position(), function_state()->inlining_id());
+ inlining_id = TraceInlinedFunction(target_shared, source_position());
}
// Save the pending call context. Set up new one for the inlined function.
@@ -8662,9 +8659,13 @@ bool HOptimizedGraphBuilder::TryInlineGetter(Handle<Object> getter,
BailoutId ast_id,
BailoutId return_id) {
if (TryInlineApiGetter(getter, receiver_map, ast_id)) return true;
- return getter->IsJSFunction() &&
- TryInline(Handle<JSFunction>::cast(getter), 0, NULL, ast_id, return_id,
- GETTER_CALL_RETURN, TailCallMode::kDisallow);
+ if (getter->IsJSFunction()) {
+ Handle<JSFunction> getter_function = Handle<JSFunction>::cast(getter);
+ return TryInlineBuiltinGetterCall(getter_function, receiver_map, ast_id) ||
+ TryInline(getter_function, 0, NULL, ast_id, return_id,
+ GETTER_CALL_RETURN, TailCallMode::kDisallow);
+ }
+ return false;
}
bool HOptimizedGraphBuilder::TryInlineSetter(Handle<Object> setter,
@@ -8694,13 +8695,13 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr) {
// We intentionally ignore expr->tail_call_mode() here because builtins
// we inline here do not observe if they were tail called or not.
switch (id) {
+ case kMathCos:
case kMathExp:
- if (!FLAG_fast_math) break;
- // Fall through if FLAG_fast_math.
case kMathRound:
case kMathFround:
case kMathFloor:
case kMathAbs:
+ case kMathSin:
case kMathSqrt:
case kMathLog:
case kMathClz32:
@@ -8751,15 +8752,67 @@ bool HOptimizedGraphBuilder::CanInlineArrayResizeOperation(
return !receiver_map.is_null() && receiver_map->prototype()->IsJSObject() &&
receiver_map->instance_type() == JS_ARRAY_TYPE &&
IsFastElementsKind(receiver_map->elements_kind()) &&
- !receiver_map->is_dictionary_map() && !receiver_map->is_observed() &&
- receiver_map->is_extensible() &&
+ !receiver_map->is_dictionary_map() && receiver_map->is_extensible() &&
(!receiver_map->is_prototype_map() || receiver_map->is_stable()) &&
!IsReadOnlyLengthDescriptor(receiver_map);
}
+bool HOptimizedGraphBuilder::TryInlineBuiltinGetterCall(
+ Handle<JSFunction> function, Handle<Map> receiver_map, BailoutId ast_id) {
+ if (!function->shared()->HasBuiltinFunctionId()) return false;
+ BuiltinFunctionId id = function->shared()->builtin_function_id();
+
+ // Try to inline getter calls like DataView.prototype.byteLength/byteOffset
+ // as operations in the calling function.
+ switch (id) {
+ case kDataViewBuffer: {
+ if (!receiver_map->IsJSDataViewMap()) return false;
+ HObjectAccess access = HObjectAccess::ForMapAndOffset(
+ receiver_map, JSDataView::kBufferOffset);
+ HValue* object = Pop(); // receiver
+ HInstruction* result = New<HLoadNamedField>(object, object, access);
+ ast_context()->ReturnInstruction(result, ast_id);
+ return true;
+ }
+ case kDataViewByteLength:
+ case kDataViewByteOffset: {
+ if (!receiver_map->IsJSDataViewMap()) return false;
+ int offset = (id == kDataViewByteLength) ? JSDataView::kByteLengthOffset
+ : JSDataView::kByteOffsetOffset;
+ HObjectAccess access =
+ HObjectAccess::ForMapAndOffset(receiver_map, offset);
+ HValue* object = Pop(); // receiver
+ HValue* checked_object = Add<HCheckArrayBufferNotNeutered>(object);
+ HInstruction* result =
+ New<HLoadNamedField>(object, checked_object, access);
+ ast_context()->ReturnInstruction(result, ast_id);
+ return true;
+ }
+ case kTypedArrayByteLength:
+ case kTypedArrayByteOffset:
+ case kTypedArrayLength: {
+ if (!receiver_map->IsJSTypedArrayMap()) return false;
+ int offset = (id == kTypedArrayLength)
+ ? JSTypedArray::kLengthOffset
+ : (id == kTypedArrayByteLength)
+ ? JSTypedArray::kByteLengthOffset
+ : JSTypedArray::kByteOffsetOffset;
+ HObjectAccess access =
+ HObjectAccess::ForMapAndOffset(receiver_map, offset);
+ HValue* object = Pop(); // receiver
+ HValue* checked_object = Add<HCheckArrayBufferNotNeutered>(object);
+ HInstruction* result =
+ New<HLoadNamedField>(object, checked_object, access);
+ ast_context()->ReturnInstruction(result, ast_id);
+ return true;
+ }
+ default:
+ return false;
+ }
+}
bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
- Call* expr, Handle<JSFunction> function, Handle<Map> receiver_map,
+ Handle<JSFunction> function, Handle<Map> receiver_map, BailoutId ast_id,
int args_count_no_receiver) {
if (!function->shared()->HasBuiltinFunctionId()) return false;
BuiltinFunctionId id = function->shared()->builtin_function_id();
@@ -8804,12 +8857,12 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
HInstruction* char_code =
BuildStringCharCodeAt(string, index);
if (id == kStringCharCodeAt) {
- ast_context()->ReturnInstruction(char_code, expr->id());
+ ast_context()->ReturnInstruction(char_code, ast_id);
return true;
}
AddInstruction(char_code);
HInstruction* result = NewUncasted<HStringCharFromCode>(char_code);
- ast_context()->ReturnInstruction(result, expr->id());
+ ast_context()->ReturnInstruction(result, ast_id);
return true;
}
break;
@@ -8821,17 +8874,17 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
argument, Representation::Integer32());
argument->SetFlag(HValue::kTruncatingToInt32);
HInstruction* result = NewUncasted<HStringCharFromCode>(argument);
- ast_context()->ReturnInstruction(result, expr->id());
+ ast_context()->ReturnInstruction(result, ast_id);
return true;
}
break;
+ case kMathCos:
case kMathExp:
- if (!FLAG_fast_math) break;
- // Fall through if FLAG_fast_math.
case kMathRound:
case kMathFround:
case kMathFloor:
case kMathAbs:
+ case kMathSin:
case kMathSqrt:
case kMathLog:
case kMathClz32:
@@ -8839,7 +8892,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
HValue* argument = Pop();
Drop(2); // Receiver and function.
HInstruction* op = NewUncasted<HUnaryMathOperation>(argument, id);
- ast_context()->ReturnInstruction(op, expr->id());
+ ast_context()->ReturnInstruction(op, ast_id);
return true;
}
break;
@@ -8870,7 +8923,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
if (result == NULL) {
result = NewUncasted<HPower>(left, right);
}
- ast_context()->ReturnInstruction(result, expr->id());
+ ast_context()->ReturnInstruction(result, ast_id);
return true;
}
break;
@@ -8883,7 +8936,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
HMathMinMax::Operation op = (id == kMathMin) ? HMathMinMax::kMathMin
: HMathMinMax::kMathMax;
HInstruction* result = NewUncasted<HMathMinMax>(left, right, op);
- ast_context()->ReturnInstruction(result, expr->id());
+ ast_context()->ReturnInstruction(result, ast_id);
return true;
}
break;
@@ -8894,7 +8947,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
Drop(2); // Receiver and function.
HInstruction* result =
HMul::NewImul(isolate(), zone(), context(), left, right);
- ast_context()->ReturnInstruction(result, expr->id());
+ ast_context()->ReturnInstruction(result, ast_id);
return true;
}
break;
@@ -8950,7 +9003,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
length_checker.End();
}
result = ast_context()->IsEffect() ? graph()->GetConstant0() : Top();
- Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+ Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
if (!ast_context()->IsEffect()) Drop(1);
ast_context()->ReturnValue(result);
@@ -9003,7 +9056,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
STORE, NEVER_RETURN_HOLE, STORE_AND_GROW_NO_TRANSITION);
if (!ast_context()->IsEffect()) Push(new_size);
- Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+ Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
if (!ast_context()->IsEffect()) Drop(1);
}
@@ -9031,16 +9084,16 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
HConstant* inline_threshold = Add<HConstant>(static_cast<int32_t>(16));
Drop(args_count_no_receiver);
- HValue* receiver = Pop();
- Drop(1); // Function.
HValue* result;
+ HValue* receiver = Pop();
+ HValue* checked_object = AddCheckMap(receiver, receiver_map);
+ HValue* length = Add<HLoadNamedField>(
+ receiver, checked_object, HObjectAccess::ForArrayLength(kind));
+ Drop(1); // Function.
{
NoObservableSideEffectsScope scope(this);
- HValue* length = Add<HLoadNamedField>(
- receiver, nullptr, HObjectAccess::ForArrayLength(kind));
-
IfBuilder if_lengthiszero(this);
HValue* lengthiszero = if_lengthiszero.If<HCompareNumericAndBranch>(
length, graph()->GetConstant0(), Token::EQ);
@@ -9117,7 +9170,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
if_lengthiszero.End();
}
result = ast_context()->IsEffect() ? graph()->GetConstant0() : Top();
- Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+ Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
if (!ast_context()->IsEffect()) Drop(1);
ast_context()->ReturnValue(result);
return true;
@@ -9129,7 +9182,6 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
if (!receiver_map->prototype()->IsJSObject()) return false;
ElementsKind kind = receiver_map->elements_kind();
if (!IsFastElementsKind(kind)) return false;
- if (receiver_map->is_observed()) return false;
if (argument_count != 2) return false;
if (!receiver_map->is_extensible()) return false;
@@ -9155,7 +9207,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
HValue* index = BuildArrayIndexOf(receiver, search_element, kind, mode);
if (!ast_context()->IsEffect()) Push(index);
- Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+ Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
if (!ast_context()->IsEffect()) Drop(1);
ast_context()->ReturnValue(index);
return true;
@@ -9306,7 +9358,7 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(
}
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
Handle<Object> call_data_obj(api_call_info->data(), isolate());
- bool call_data_undefined = call_data_obj->IsUndefined();
+ bool call_data_undefined = call_data_obj->IsUndefined(isolate());
HValue* call_data = Add<HConstant>(call_data_obj);
ApiFunction fun(v8::ToCData<Address>(api_call_info->callback()));
ExternalReference ref = ExternalReference(&fun,
@@ -9329,7 +9381,7 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(
Vector<HValue*>(op_vals, arraysize(op_vals) - 1),
syntactic_tail_call_mode);
} else {
- CallApiCallbackStub stub(isolate(), argc, call_data_undefined);
+ CallApiCallbackStub stub(isolate(), argc, call_data_undefined, false);
Handle<Code> code = stub.GetCode();
HConstant* code_value = Add<HConstant>(code);
call = New<HCallWithDescriptor>(
@@ -9352,7 +9404,7 @@ void HOptimizedGraphBuilder::HandleIndirectCall(Call* expr, HValue* function,
HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
known_function =
Handle<JSFunction>::cast(HConstant::cast(function)->handle(isolate()));
- if (TryInlineBuiltinMethodCall(expr, known_function, Handle<Map>(),
+ if (TryInlineBuiltinMethodCall(known_function, Handle<Map>(), expr->id(),
args_count_no_receiver)) {
if (FLAG_trace_inlining) {
PrintF("Inlining builtin ");
@@ -9493,26 +9545,6 @@ HValue* HOptimizedGraphBuilder::ImplicitReceiverFor(HValue* function,
}
-void HOptimizedGraphBuilder::BuildArrayCall(Expression* expression,
- int arguments_count,
- HValue* function,
- Handle<AllocationSite> site) {
- Add<HCheckValue>(function, array_function());
-
- if (IsCallArrayInlineable(arguments_count, site)) {
- BuildInlinedCallArray(expression, arguments_count, site);
- return;
- }
-
- HInstruction* call = PreProcessCall(New<HCallNewArray>(
- function, arguments_count + 1, site->GetElementsKind(), site));
- if (expression->IsCall()) {
- Drop(1);
- }
- ast_context()->ReturnInstruction(call, expression->id());
-}
-
-
HValue* HOptimizedGraphBuilder::BuildArrayIndexOf(HValue* receiver,
HValue* search_element,
ElementsKind kind,
@@ -9659,8 +9691,8 @@ HValue* HOptimizedGraphBuilder::BuildArrayIndexOf(HValue* receiver,
return Pop();
}
-
-bool HOptimizedGraphBuilder::TryHandleArrayCall(Call* expr, HValue* function) {
+template <class T>
+bool HOptimizedGraphBuilder::TryHandleArrayCall(T* expr, HValue* function) {
if (!array_function().is_identical_to(expr->target())) {
return false;
}
@@ -9668,24 +9700,16 @@ bool HOptimizedGraphBuilder::TryHandleArrayCall(Call* expr, HValue* function) {
Handle<AllocationSite> site = expr->allocation_site();
if (site.is_null()) return false;
- BuildArrayCall(expr,
- expr->arguments()->length(),
- function,
- site);
- return true;
-}
-
+ Add<HCheckValue>(function, array_function());
-bool HOptimizedGraphBuilder::TryHandleArrayCallNew(CallNew* expr,
- HValue* function) {
- if (!array_function().is_identical_to(expr->target())) {
- return false;
- }
+ int arguments_count = expr->arguments()->length();
+ if (TryInlineArrayCall(expr, arguments_count, site)) return true;
- Handle<AllocationSite> site = expr->allocation_site();
- if (site.is_null()) return false;
+ HInstruction* call = PreProcessCall(New<HCallNewArray>(
+ function, arguments_count + 1, site->GetElementsKind(), site));
+ if (expr->IsCall()) Drop(1);
+ ast_context()->ReturnInstruction(call, expr->id());
- BuildArrayCall(expr, expr->arguments()->length(), function, site);
return true;
}
@@ -9753,7 +9777,7 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
CHECK_ALIVE(VisitExpressions(expr->arguments()));
Handle<Map> map = maps->length() == 1 ? maps->first() : Handle<Map>();
- if (TryInlineBuiltinMethodCall(expr, known_function, map,
+ if (TryInlineBuiltinMethodCall(known_function, map, expr->id(),
expr->arguments()->length())) {
if (FLAG_trace_inlining) {
PrintF("Inlining builtin ");
@@ -9788,7 +9812,7 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
// We have to use EAGER deoptimization here because Deoptimizer::SOFT
// gets ignored by the always-opt flag, which leads to incorrect code.
Add<HDeoptimize>(
- Deoptimizer::kInsufficientTypeFeedbackForCallWithArguments,
+ DeoptimizeReason::kInsufficientTypeFeedbackForCallWithArguments,
Deoptimizer::EAGER);
arguments_flag = ARGUMENTS_FAKED;
}
@@ -9805,8 +9829,7 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
PushArgumentsFromEnvironment(argument_count);
} else {
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
- if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
+ if (expr->is_possibly_eval()) {
return Bailout(kPossibleDirectCallToEval);
}
@@ -9851,8 +9874,7 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
syntactic_tail_call_mode, tail_call_mode);
} else {
PushArgumentsFromEnvironment(argument_count);
- if (expr->is_uninitialized() &&
- expr->IsUsingCallFeedbackICSlot(isolate())) {
+ if (expr->is_uninitialized() && expr->IsUsingCallFeedbackICSlot()) {
// We've never seen this call before, so let's have Crankshaft learn
// through the type vector.
call = NewCallFunctionViaIC(function, argument_count,
@@ -9871,50 +9893,108 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
return ast_context()->ReturnInstruction(call, expr->id());
}
+bool HOptimizedGraphBuilder::TryInlineArrayCall(Expression* expression,
+ int argument_count,
+ Handle<AllocationSite> site) {
+ Handle<JSFunction> caller = current_info()->closure();
+ Handle<JSFunction> target = array_function();
-void HOptimizedGraphBuilder::BuildInlinedCallArray(
- Expression* expression,
- int argument_count,
- Handle<AllocationSite> site) {
- DCHECK(!site.is_null());
- DCHECK(argument_count >= 0 && argument_count <= 1);
- NoObservableSideEffectsScope no_effects(this);
+ if (!site->CanInlineCall()) {
+ TraceInline(target, caller, "AllocationSite requested no inlining.");
+ return false;
+ }
- // We should at least have the constructor on the expression stack.
- HValue* constructor = environment()->ExpressionStackAt(argument_count);
+ if (argument_count > 1) {
+ TraceInline(target, caller, "Too many arguments to inline.");
+ return false;
+ }
- // Register on the site for deoptimization if the transition feedback changes.
- top_info()->dependencies()->AssumeTransitionStable(site);
- ElementsKind kind = site->GetElementsKind();
- HInstruction* site_instruction = Add<HConstant>(site);
+ int array_length = 0;
+ // Do not inline if the constant length argument is not a smi or outside the
+ // valid range for unrolled loop initialization.
+ if (argument_count == 1) {
+ HValue* argument = Top();
+ if (!argument->IsConstant()) {
+ TraceInline(target, caller,
+ "Dont inline [new] Array(n) where n isn't constant.");
+ return false;
+ }
- // In the single constant argument case, we may have to adjust elements kind
- // to avoid creating a packed non-empty array.
- if (argument_count == 1 && !IsHoleyElementsKind(kind)) {
- HValue* argument = environment()->Top();
- if (argument->IsConstant()) {
- HConstant* constant_argument = HConstant::cast(argument);
- DCHECK(constant_argument->HasSmiValue());
- int constant_array_size = constant_argument->Integer32Value();
- if (constant_array_size != 0) {
- kind = GetHoleyElementsKind(kind);
- }
+ HConstant* constant_argument = HConstant::cast(argument);
+ if (!constant_argument->HasSmiValue()) {
+ TraceInline(target, caller,
+ "Constant length outside of valid inlining range.");
+ return false;
+ }
+ array_length = constant_argument->Integer32Value();
+ if (array_length < 0 || array_length > kElementLoopUnrollThreshold) {
+ TraceInline(target, caller,
+ "Constant length outside of valid inlining range.");
+ return false;
}
}
+ TraceInline(target, caller, NULL);
+
+ NoObservableSideEffectsScope no_effects(this);
+
+ // Register on the site for deoptimization if the transition feedback changes.
+ top_info()->dependencies()->AssumeTransitionStable(site);
+
// Build the array.
- JSArrayBuilder array_builder(this,
- kind,
- site_instruction,
- constructor,
- DISABLE_ALLOCATION_SITES);
- HValue* new_object = argument_count == 0
- ? array_builder.AllocateEmptyArray()
- : BuildAllocateArrayFromLength(&array_builder, Top());
+ ElementsKind kind = site->GetElementsKind();
+ HValue* capacity;
+ HValue* length;
+ if (array_length == 0) {
+ STATIC_ASSERT(0 < JSArray::kPreallocatedArrayElements);
+ const int initial_capacity = JSArray::kPreallocatedArrayElements;
+ capacity = Add<HConstant>(initial_capacity);
+ length = graph()->GetConstant0();
+ } else {
+ length = Top();
+ capacity = length;
+ kind = GetHoleyElementsKind(kind);
+ }
+
+ // These HForceRepresentations are because we store these as fields in the
+ // objects we construct, and an int32-to-smi HChange could deopt. Accept
+ // the deopt possibility now, before allocation occurs.
+ length = AddUncasted<HForceRepresentation>(length, Representation::Smi());
+ capacity = AddUncasted<HForceRepresentation>(capacity, Representation::Smi());
+
+ // Generate size calculation code here in order to make it dominate
+ // the JSArray allocation.
+ HValue* elements_size = BuildCalculateElementsSize(kind, capacity);
+
+ // Bail out for large objects.
+ HValue* max_size = Add<HConstant>(Page::kMaxRegularHeapObjectSize);
+ Add<HBoundsCheck>(elements_size, max_size);
+
+ // Allocate (dealing with failure appropriately).
+ AllocationSiteMode mode = DONT_TRACK_ALLOCATION_SITE;
+ HAllocate* new_object = AllocateJSArrayObject(mode);
+
+ // Fill in the fields: map, properties, length.
+ Handle<Map> map_constant(isolate()->get_initial_js_array_map(kind));
+ HValue* map = Add<HConstant>(map_constant);
+
+ BuildJSArrayHeader(new_object, map,
+ nullptr, // set elements to empty fixed array
+ mode, kind, nullptr, length);
+
+ // Allocate and initialize the elements.
+ HAllocate* elements = BuildAllocateElements(kind, elements_size);
+ BuildInitializeElementsHeader(elements, kind, capacity);
+ BuildFillElementsWithHole(elements, kind, graph()->GetConstant0(), capacity);
+
+ // Set the elements.
+ Add<HStoreNamedField>(new_object, HObjectAccess::ForElementsPointer(),
+ elements);
int args_to_drop = argument_count + (expression->IsCall() ? 2 : 1);
Drop(args_to_drop);
ast_context()->ReturnValue(new_object);
+ return true;
}
@@ -9927,53 +10007,6 @@ static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
HAllocate::kMaxInlineSize;
}
-
-bool HOptimizedGraphBuilder::IsCallArrayInlineable(
- int argument_count,
- Handle<AllocationSite> site) {
- Handle<JSFunction> caller = current_info()->closure();
- Handle<JSFunction> target = array_function();
- // We should have the function plus array arguments on the environment stack.
- DCHECK(environment()->length() >= (argument_count + 1));
- DCHECK(!site.is_null());
-
- bool inline_ok = false;
- if (site->CanInlineCall()) {
- // We also want to avoid inlining in certain 1 argument scenarios.
- if (argument_count == 1) {
- HValue* argument = Top();
- if (argument->IsConstant()) {
- // Do not inline if the constant length argument is not a smi or
- // outside the valid range for unrolled loop initialization.
- HConstant* constant_argument = HConstant::cast(argument);
- if (constant_argument->HasSmiValue()) {
- int value = constant_argument->Integer32Value();
- inline_ok = value >= 0 && value <= kElementLoopUnrollThreshold;
- if (!inline_ok) {
- TraceInline(target, caller,
- "Constant length outside of valid inlining range.");
- }
- }
- } else {
- TraceInline(target, caller,
- "Dont inline [new] Array(n) where n isn't constant.");
- }
- } else if (argument_count == 0) {
- inline_ok = true;
- } else {
- TraceInline(target, caller, "Too many arguments to inline.");
- }
- } else {
- TraceInline(target, caller, "AllocationSite requested no inlining.");
- }
-
- if (inline_ok) {
- TraceInline(target, caller, NULL);
- }
- return inline_ok;
-}
-
-
void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
DCHECK(!HasStackOverflow());
DCHECK(current_block() != NULL);
@@ -10067,7 +10100,7 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
} else {
// The constructor function is both an operand to the instruction and an
// argument to the construct call.
- if (TryHandleArrayCallNew(expr, function)) return;
+ if (TryHandleArrayCall(expr, function)) return;
}
HValue* arity = Add<HConstant>(argument_count - 1);
@@ -10075,9 +10108,8 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
Callable callable = CodeFactory::Construct(isolate());
HConstant* stub = Add<HConstant>(callable.code());
PushArgumentsFromEnvironment(argument_count);
- HInstruction* construct =
- New<HCallWithDescriptor>(stub, argument_count, callable.descriptor(),
- Vector<HValue*>(op_vals, arraysize(op_vals)));
+ HInstruction* construct = New<HCallWithDescriptor>(
+ stub, argument_count, callable.descriptor(), ArrayVector(op_vals));
return ast_context()->ReturnInstruction(construct, expr->id());
}
@@ -10186,7 +10218,8 @@ HValue* HOptimizedGraphBuilder::BuildAllocateExternalElements(
length = AddUncasted<HForceRepresentation>(length, Representation::Smi());
HValue* elements = Add<HAllocate>(
Add<HConstant>(FixedTypedArrayBase::kHeaderSize), HType::HeapObject(),
- NOT_TENURED, external_array_map->instance_type());
+ NOT_TENURED, external_array_map->instance_type(),
+ graph()->GetConstant0());
AddStoreMapConstant(elements, external_array_map);
Add<HStoreNamedField>(elements,
@@ -10242,9 +10275,9 @@ HValue* HOptimizedGraphBuilder::BuildAllocateFixedTypedArray(
length = AddUncasted<HForceRepresentation>(length, Representation::Smi());
Handle<Map> fixed_typed_array_map(
isolate()->heap()->MapForFixedTypedArray(array_type));
- HAllocate* elements =
- Add<HAllocate>(total_size, HType::HeapObject(), NOT_TENURED,
- fixed_typed_array_map->instance_type());
+ HAllocate* elements = Add<HAllocate>(
+ total_size, HType::HeapObject(), NOT_TENURED,
+ fixed_typed_array_map->instance_type(), graph()->GetConstant0());
#ifndef V8_HOST_ARCH_64_BIT
if (array_type == kExternalFloat64Array) {
@@ -10567,8 +10600,8 @@ void HOptimizedGraphBuilder::VisitDelete(UnaryOperation* expr) {
// Result of deleting non-global variables is false. 'this' is not really
// a variable, though we implement it as one. The subexpression does not
// have side effects.
- HValue* value = var->HasThisName(isolate()) ? graph()->GetConstantTrue()
- : graph()->GetConstantFalse();
+ HValue* value = var->is_this() ? graph()->GetConstantTrue()
+ : graph()->GetConstantFalse();
return ast_context()->ReturnValue(value);
} else {
Bailout(kDeleteWithNonGlobalVariable);
@@ -10689,7 +10722,6 @@ HInstruction* HOptimizedGraphBuilder::BuildIncrement(
return instr;
}
-
void HOptimizedGraphBuilder::BuildStoreForEffect(
Expression* expr, Property* prop, FeedbackVectorSlot slot, BailoutId ast_id,
BailoutId return_id, HValue* object, HValue* key, HValue* value) {
@@ -10778,6 +10810,9 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
case VariableLocation::LOOKUP:
return Bailout(kLookupVariableInCountOperation);
+
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
Drop(returns_original_input ? 2 : 1);
@@ -10956,9 +10991,9 @@ HValue* HOptimizedGraphBuilder::BuildBinaryOperation(
HValue* left,
HValue* right,
PushBeforeSimulateBehavior push_sim_result) {
- Type* left_type = expr->left()->bounds().lower;
- Type* right_type = expr->right()->bounds().lower;
- Type* result_type = expr->bounds().lower;
+ Type* left_type = bounds_.get(expr->left()).lower;
+ Type* right_type = bounds_.get(expr->right()).lower;
+ Type* result_type = bounds_.get(expr).lower;
Maybe<int> fixed_right_arg = expr->fixed_right_arg();
Handle<AllocationSite> allocation_site = expr->allocation_site();
@@ -11016,7 +11051,7 @@ HValue* HGraphBuilder::BuildBinaryOperation(Token::Value op, HValue* left,
if (!left_type->IsInhabited()) {
Add<HDeoptimize>(
- Deoptimizer::kInsufficientTypeFeedbackForLHSOfBinaryOperation,
+ DeoptimizeReason::kInsufficientTypeFeedbackForLHSOfBinaryOperation,
Deoptimizer::SOFT);
left_type = Type::Any();
left_rep = RepresentationFor(left_type);
@@ -11025,7 +11060,7 @@ HValue* HGraphBuilder::BuildBinaryOperation(Token::Value op, HValue* left,
if (!right_type->IsInhabited()) {
Add<HDeoptimize>(
- Deoptimizer::kInsufficientTypeFeedbackForRHSOfBinaryOperation,
+ DeoptimizeReason::kInsufficientTypeFeedbackForRHSOfBinaryOperation,
Deoptimizer::SOFT);
right_type = Type::Any();
right_rep = RepresentationFor(right_type);
@@ -11143,46 +11178,53 @@ HValue* HGraphBuilder::BuildBinaryOperation(Token::Value op, HValue* left,
// inline several instructions (including the two pushes) for every tagged
// operation in optimized code, which is more expensive, than a stub call.
if (graph()->info()->IsStub() && is_non_primitive) {
- Runtime::FunctionId function_id;
+ HValue* values[] = {context(), left, right};
+#define GET_STUB(Name) \
+ do { \
+ Callable callable = CodeFactory::Name(isolate()); \
+ HValue* stub = Add<HConstant>(callable.code()); \
+ instr = AddUncasted<HCallWithDescriptor>(stub, 0, callable.descriptor(), \
+ ArrayVector(values)); \
+ } while (false)
+
switch (op) {
default:
UNREACHABLE();
case Token::ADD:
- function_id = Runtime::kAdd;
+ GET_STUB(Add);
break;
case Token::SUB:
- function_id = Runtime::kSubtract;
+ GET_STUB(Subtract);
break;
case Token::MUL:
- function_id = Runtime::kMultiply;
+ GET_STUB(Multiply);
break;
case Token::DIV:
- function_id = Runtime::kDivide;
+ GET_STUB(Divide);
break;
case Token::MOD:
- function_id = Runtime::kModulus;
+ GET_STUB(Modulus);
break;
case Token::BIT_OR:
- function_id = Runtime::kBitwiseOr;
+ GET_STUB(BitwiseOr);
break;
case Token::BIT_AND:
- function_id = Runtime::kBitwiseAnd;
+ GET_STUB(BitwiseAnd);
break;
case Token::BIT_XOR:
- function_id = Runtime::kBitwiseXor;
+ GET_STUB(BitwiseXor);
break;
case Token::SAR:
- function_id = Runtime::kShiftRight;
+ GET_STUB(ShiftRight);
break;
case Token::SHR:
- function_id = Runtime::kShiftRightLogical;
+ GET_STUB(ShiftRightLogical);
break;
case Token::SHL:
- function_id = Runtime::kShiftLeft;
+ GET_STUB(ShiftLeft);
break;
}
- Add<HPushArguments>(left, right);
- instr = AddUncasted<HCallRuntime>(Runtime::FunctionForId(function_id), 2);
+#undef GET_STUB
} else {
switch (op) {
case Token::ADD:
@@ -11202,7 +11244,7 @@ HValue* HGraphBuilder::BuildBinaryOperation(Token::Value op, HValue* left,
IfBuilder if_same(this);
if_same.If<HCompareNumericAndBranch>(right, fixed_right, Token::EQ);
if_same.Then();
- if_same.ElseDeopt(Deoptimizer::kUnexpectedRHSOfBinaryOperation);
+ if_same.ElseDeopt(DeoptimizeReason::kUnexpectedRHSOfBinaryOperation);
right = fixed_right;
}
instr = AddUncasted<HMod>(left, right);
@@ -11440,18 +11482,22 @@ void HOptimizedGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
return ast_context()->ReturnControl(instr, expr->id());
}
+namespace {
-static bool IsLiteralCompareBool(Isolate* isolate,
- HValue* left,
- Token::Value op,
- HValue* right) {
+bool IsLiteralCompareStrict(Isolate* isolate, HValue* left, Token::Value op,
+ HValue* right) {
return op == Token::EQ_STRICT &&
- ((left->IsConstant() &&
- HConstant::cast(left)->handle(isolate)->IsBoolean()) ||
- (right->IsConstant() &&
- HConstant::cast(right)->handle(isolate)->IsBoolean()));
+ ((left->IsConstant() &&
+ !HConstant::cast(left)->handle(isolate)->IsNumber() &&
+ !HConstant::cast(left)->handle(isolate)->IsSimd128Value() &&
+ !HConstant::cast(left)->handle(isolate)->IsString()) ||
+ (right->IsConstant() &&
+ !HConstant::cast(right)->handle(isolate)->IsNumber() &&
+ !HConstant::cast(right)->handle(isolate)->IsSimd128Value() &&
+ !HConstant::cast(right)->handle(isolate)->IsString()));
}
+} // namespace
void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
DCHECK(!HasStackOverflow());
@@ -11486,8 +11532,8 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
return ast_context()->ReturnControl(instr, expr->id());
}
- Type* left_type = expr->left()->bounds().lower;
- Type* right_type = expr->right()->bounds().lower;
+ Type* left_type = bounds_.get(expr->left()).lower;
+ Type* right_type = bounds_.get(expr->right()).lower;
Type* combined_type = expr->combined_type();
CHECK_ALIVE(VisitForValue(expr->left()));
@@ -11497,25 +11543,31 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
HValue* left = Pop();
Token::Value op = expr->op();
- if (IsLiteralCompareBool(isolate(), left, op, right)) {
+ if (IsLiteralCompareStrict(isolate(), left, op, right)) {
HCompareObjectEqAndBranch* result =
New<HCompareObjectEqAndBranch>(left, right);
return ast_context()->ReturnControl(result, expr->id());
}
if (op == Token::INSTANCEOF) {
- DCHECK(!FLAG_harmony_instanceof);
// Check to see if the rhs of the instanceof is a known function.
if (right->IsConstant() &&
HConstant::cast(right)->handle(isolate())->IsJSFunction()) {
- Handle<JSFunction> constructor =
+ Handle<JSFunction> function =
Handle<JSFunction>::cast(HConstant::cast(right)->handle(isolate()));
- if (constructor->IsConstructor() &&
- !constructor->map()->has_non_instance_prototype()) {
- JSFunction::EnsureHasInitialMap(constructor);
- DCHECK(constructor->has_initial_map());
- Handle<Map> initial_map(constructor->initial_map(), isolate());
+ // Make sure the prototype of {function} is the %FunctionPrototype%, and
+ // it already has a meaningful initial map (i.e. we constructed at least
+ // one instance using the constructor {function}).
+ // We can only use the fast case if @@hasInstance was not used so far.
+ if (function->has_initial_map() &&
+ function->map()->prototype() ==
+ function->native_context()->closure() &&
+ !function->map()->has_non_instance_prototype() &&
+ isolate()->IsHasInstanceLookupChainIntact()) {
+ Handle<Map> initial_map(function->initial_map(), isolate());
top_info()->dependencies()->AssumeInitialMapCantChange(initial_map);
+ top_info()->dependencies()->AssumePropertyCell(
+ isolate()->factory()->has_instance_protector());
HInstruction* prototype =
Add<HConstant>(handle(initial_map->prototype(), isolate()));
HHasInPrototypeChainAndBranch* result =
@@ -11524,13 +11576,21 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
}
}
- HInstanceOf* result = New<HInstanceOf>(left, right);
+ Callable callable = CodeFactory::InstanceOf(isolate());
+ HValue* stub = Add<HConstant>(callable.code());
+ HValue* values[] = {context(), left, right};
+ HCallWithDescriptor* result = New<HCallWithDescriptor>(
+ stub, 0, callable.descriptor(), ArrayVector(values));
+ result->set_type(HType::Boolean());
return ast_context()->ReturnInstruction(result, expr->id());
} else if (op == Token::IN) {
- Add<HPushArguments>(left, right);
+ Callable callable = CodeFactory::HasProperty(isolate());
+ HValue* stub = Add<HConstant>(callable.code());
+ HValue* values[] = {context(), left, right};
HInstruction* result =
- New<HCallRuntime>(Runtime::FunctionForId(Runtime::kHasProperty), 2);
+ New<HCallWithDescriptor>(stub, 0, callable.descriptor(),
+ Vector<HValue*>(values, arraysize(values)));
return ast_context()->ReturnInstruction(result, expr->id());
}
@@ -11556,7 +11616,8 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
// soft deoptimize when there is no type feedback.
if (!combined_type->IsInhabited()) {
Add<HDeoptimize>(
- Deoptimizer::kInsufficientTypeFeedbackForCombinedTypeOfBinaryOperation,
+ DeoptimizeReason::
+ kInsufficientTypeFeedbackForCombinedTypeOfBinaryOperation,
Deoptimizer::SOFT);
combined_type = left_type = right_type = Type::Any();
}
@@ -11573,8 +11634,9 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
HConstant::cast(left)->HasNumberValue()) ||
(right->IsConstant() &&
HConstant::cast(right)->HasNumberValue())) {
- Add<HDeoptimize>(Deoptimizer::kTypeMismatchBetweenFeedbackAndConstant,
- Deoptimizer::SOFT);
+ Add<HDeoptimize>(
+ DeoptimizeReason::kTypeMismatchBetweenFeedbackAndConstant,
+ Deoptimizer::SOFT);
// The caller expects a branch instruction, so make it happy.
return New<HBranch>(graph()->GetConstantTrue());
}
@@ -11656,8 +11718,9 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
!HConstant::cast(left)->HasInternalizedStringValue()) ||
(right->IsConstant() &&
!HConstant::cast(right)->HasInternalizedStringValue())) {
- Add<HDeoptimize>(Deoptimizer::kTypeMismatchBetweenFeedbackAndConstant,
- Deoptimizer::SOFT);
+ Add<HDeoptimize>(
+ DeoptimizeReason::kTypeMismatchBetweenFeedbackAndConstant,
+ Deoptimizer::SOFT);
// The caller expects a branch instruction, so make it happy.
return New<HBranch>(graph()->GetConstantTrue());
}
@@ -11811,8 +11874,9 @@ HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
}
top_info()->dependencies()->AssumeTransitionStable(current_site);
- HInstruction* object = Add<HAllocate>(
- object_size_constant, type, pretenure_flag, instance_type, top_site);
+ HInstruction* object =
+ Add<HAllocate>(object_size_constant, type, pretenure_flag, instance_type,
+ graph()->GetConstant0(), top_site);
// If allocation folding reaches Page::kMaxRegularHeapObjectSize the
// elements array may not get folded into the object. Hence, we set the
@@ -11853,7 +11917,8 @@ HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
InstanceType instance_type = boilerplate_object->HasFastDoubleElements()
? FIXED_DOUBLE_ARRAY_TYPE : FIXED_ARRAY_TYPE;
object_elements = Add<HAllocate>(object_elements_size, HType::HeapObject(),
- pretenure_flag, instance_type, top_site);
+ pretenure_flag, instance_type,
+ graph()->GetConstant0(), top_site);
BuildEmitElements(boilerplate_object, elements, object_elements,
site_context);
Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
@@ -11954,9 +12019,9 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
if (representation.IsDouble()) {
// Allocate a HeapNumber box and store the value into it.
HValue* heap_number_constant = Add<HConstant>(HeapNumber::kSize);
- HInstruction* double_box =
- Add<HAllocate>(heap_number_constant, HType::HeapObject(),
- pretenure_flag, MUTABLE_HEAP_NUMBER_TYPE);
+ HInstruction* double_box = Add<HAllocate>(
+ heap_number_constant, HType::HeapObject(), pretenure_flag,
+ MUTABLE_HEAP_NUMBER_TYPE, graph()->GetConstant0());
AddStoreMapConstant(double_box,
isolate()->factory()->mutable_heap_number_map());
// Unwrap the mutable heap number from the boilerplate.
@@ -11966,9 +12031,9 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
double_box, HObjectAccess::ForHeapNumberValue(), double_value);
value_instruction = double_box;
} else if (representation.IsSmi()) {
- value_instruction = value->IsUninitialized()
- ? graph()->GetConstant0()
- : Add<HConstant>(value);
+ value_instruction = value->IsUninitialized(isolate())
+ ? graph()->GetConstant0()
+ : Add<HConstant>(value);
// Ensure that value is stored as smi.
access = access.WithRepresentation(representation);
} else {
@@ -12092,16 +12157,14 @@ void HOptimizedGraphBuilder::VisitSuperCallReference(SuperCallReference* expr) {
void HOptimizedGraphBuilder::VisitDeclarations(
ZoneList<Declaration*>* declarations) {
DCHECK(globals_.is_empty());
- AstVisitor::VisitDeclarations(declarations);
+ AstVisitor<HOptimizedGraphBuilder>::VisitDeclarations(declarations);
if (!globals_.is_empty()) {
Handle<FixedArray> array =
isolate()->factory()->NewFixedArray(globals_.length(), TENURED);
for (int i = 0; i < globals_.length(); ++i) array->set(i, *globals_.at(i));
- int flags =
- DeclareGlobalsEvalFlag::encode(current_info()->is_eval()) |
- DeclareGlobalsNativeFlag::encode(current_info()->is_native()) |
- DeclareGlobalsLanguageMode::encode(current_info()->language_mode());
- Add<HDeclareGlobals>(array, flags);
+ int flags = current_info()->GetDeclareGlobalsFlags();
+ Handle<TypeFeedbackVector> vector(current_feedback_vector(), isolate());
+ Add<HDeclareGlobals>(array, flags, vector);
globals_.Rewind(0);
}
}
@@ -12110,26 +12173,26 @@ void HOptimizedGraphBuilder::VisitDeclarations(
void HOptimizedGraphBuilder::VisitVariableDeclaration(
VariableDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
- VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED:
- globals_.Add(variable->name(), zone());
- globals_.Add(variable->binding_needs_init()
- ? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value(), zone());
+ case VariableLocation::UNALLOCATED: {
+ DCHECK(!variable->binding_needs_init());
+ FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals_.Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+ globals_.Add(isolate()->factory()->undefined_value(), zone());
return;
+ }
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL:
- if (hole_init) {
+ if (variable->binding_needs_init()) {
HValue* value = graph()->GetConstantHole();
environment()->Bind(variable, value);
}
break;
case VariableLocation::CONTEXT:
- if (hole_init) {
+ if (variable->binding_needs_init()) {
HValue* value = graph()->GetConstantHole();
HValue* context = environment()->context();
HStoreContextSlot* store = Add<HStoreContextSlot>(
@@ -12141,6 +12204,8 @@ void HOptimizedGraphBuilder::VisitVariableDeclaration(
break;
case VariableLocation::LOOKUP:
return Bailout(kUnsupportedLookupSlotInDeclaration);
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
}
@@ -12152,7 +12217,9 @@ void HOptimizedGraphBuilder::VisitFunctionDeclaration(
switch (variable->location()) {
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
- globals_.Add(variable->name(), zone());
+ FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals_.Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
Handle<SharedFunctionInfo> function = Compiler::GetSharedFunctionInfo(
declaration->fun(), current_info()->script(), top_info());
// Check for stack-overflow exception.
@@ -12180,22 +12247,12 @@ void HOptimizedGraphBuilder::VisitFunctionDeclaration(
}
case VariableLocation::LOOKUP:
return Bailout(kUnsupportedLookupSlotInDeclaration);
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
}
-void HOptimizedGraphBuilder::VisitImportDeclaration(
- ImportDeclaration* declaration) {
- UNREACHABLE();
-}
-
-
-void HOptimizedGraphBuilder::VisitExportDeclaration(
- ExportDeclaration* declaration) {
- UNREACHABLE();
-}
-
-
void HOptimizedGraphBuilder::VisitRewritableExpression(
RewritableExpression* node) {
CHECK_ALIVE(Visit(node->expression()));
@@ -12275,33 +12332,8 @@ void HOptimizedGraphBuilder::GenerateToInteger(CallRuntime* call) {
Callable callable = CodeFactory::ToInteger(isolate());
HValue* stub = Add<HConstant>(callable.code());
HValue* values[] = {context(), input};
- HInstruction* result =
- New<HCallWithDescriptor>(stub, 0, callable.descriptor(),
- Vector<HValue*>(values, arraysize(values)));
- return ast_context()->ReturnInstruction(result, call->id());
- }
-}
-
-
-void HOptimizedGraphBuilder::GenerateToName(CallRuntime* call) {
- DCHECK_EQ(1, call->arguments()->length());
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* input = Pop();
- if (input->type().IsSmi()) {
- HValue* result = BuildNumberToString(input, Type::SignedSmall());
- return ast_context()->ReturnValue(result);
- } else if (input->type().IsTaggedNumber()) {
- HValue* result = BuildNumberToString(input, Type::Number());
- return ast_context()->ReturnValue(result);
- } else if (input->type().IsString()) {
- return ast_context()->ReturnValue(input);
- } else {
- Callable callable = CodeFactory::ToName(isolate());
- HValue* stub = Add<HConstant>(callable.code());
- HValue* values[] = {context(), input};
- HInstruction* result =
- New<HCallWithDescriptor>(stub, 0, callable.descriptor(),
- Vector<HValue*>(values, arraysize(values)));
+ HInstruction* result = New<HCallWithDescriptor>(
+ stub, 0, callable.descriptor(), ArrayVector(values));
return ast_context()->ReturnInstruction(result, call->id());
}
}
@@ -12326,9 +12358,8 @@ void HOptimizedGraphBuilder::GenerateToString(CallRuntime* call) {
Callable callable = CodeFactory::ToString(isolate());
HValue* stub = Add<HConstant>(callable.code());
HValue* values[] = {context(), input};
- HInstruction* result =
- New<HCallWithDescriptor>(stub, 0, callable.descriptor(),
- Vector<HValue*>(values, arraysize(values)));
+ HInstruction* result = New<HCallWithDescriptor>(
+ stub, 0, callable.descriptor(), ArrayVector(values));
return ast_context()->ReturnInstruction(result, call->id());
}
}
@@ -12341,9 +12372,8 @@ void HOptimizedGraphBuilder::GenerateToLength(CallRuntime* call) {
HValue* input = Pop();
HValue* stub = Add<HConstant>(callable.code());
HValue* values[] = {context(), input};
- HInstruction* result =
- New<HCallWithDescriptor>(stub, 0, callable.descriptor(),
- Vector<HValue*>(values, arraysize(values)));
+ HInstruction* result = New<HCallWithDescriptor>(
+ stub, 0, callable.descriptor(), ArrayVector(values));
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -12412,66 +12442,6 @@ void HOptimizedGraphBuilder::GenerateHasFastPackedElements(CallRuntime* call) {
}
-void HOptimizedGraphBuilder::GenerateValueOf(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* object = Pop();
-
- IfBuilder if_objectisvalue(this);
- HValue* objectisvalue = if_objectisvalue.If<HHasInstanceTypeAndBranch>(
- object, JS_VALUE_TYPE);
- if_objectisvalue.Then();
- {
- // Return the actual value.
- Push(Add<HLoadNamedField>(
- object, objectisvalue,
- HObjectAccess::ForObservableJSObjectOffset(
- JSValue::kValueOffset)));
- Add<HSimulate>(call->id(), FIXED_SIMULATE);
- }
- if_objectisvalue.Else();
- {
- // If the object is not a value return the object.
- Push(object);
- Add<HSimulate>(call->id(), FIXED_SIMULATE);
- }
- if_objectisvalue.End();
- return ast_context()->ReturnValue(Pop());
-}
-
-
-void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar(
- CallRuntime* call) {
- DCHECK(call->arguments()->length() == 3);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
- HValue* string = Pop();
- HValue* value = Pop();
- HValue* index = Pop();
- Add<HSeqStringSetChar>(String::ONE_BYTE_ENCODING, string,
- index, value);
- Add<HSimulate>(call->id(), FIXED_SIMULATE);
- return ast_context()->ReturnValue(graph()->GetConstantUndefined());
-}
-
-
-void HOptimizedGraphBuilder::GenerateTwoByteSeqStringSetChar(
- CallRuntime* call) {
- DCHECK(call->arguments()->length() == 3);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
- HValue* string = Pop();
- HValue* value = Pop();
- HValue* index = Pop();
- Add<HSeqStringSetChar>(String::TWO_BYTE_ENCODING, string,
- index, value);
- Add<HSimulate>(call->id(), FIXED_SIMULATE);
- return ast_context()->ReturnValue(graph()->GetConstantUndefined());
-}
-
-
// Fast support for charCodeAt(n).
void HOptimizedGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
DCHECK(call->arguments()->length() == 2);
@@ -12494,20 +12464,6 @@ void HOptimizedGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
}
-// Fast support for string.charAt(n) and string[n].
-void HOptimizedGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 2);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
- HValue* index = Pop();
- HValue* string = Pop();
- HInstruction* char_code = BuildStringCharCodeAt(string, index);
- AddInstruction(char_code);
- HInstruction* result = NewUncasted<HStringCharFromCode>(char_code);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
// Fast support for SubString.
void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
DCHECK_EQ(3, call->arguments()->length());
@@ -12516,9 +12472,9 @@ void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
Callable callable = CodeFactory::SubString(isolate());
HValue* stub = Add<HConstant>(callable.code());
HValue* values[] = {context()};
- HInstruction* result = New<HCallWithDescriptor>(
- stub, call->arguments()->length(), callable.descriptor(),
- Vector<HValue*>(values, arraysize(values)));
+ HInstruction* result =
+ New<HCallWithDescriptor>(stub, call->arguments()->length(),
+ callable.descriptor(), ArrayVector(values));
result->set_type(HType::String());
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -12531,8 +12487,8 @@ void HOptimizedGraphBuilder::GenerateNewObject(CallRuntime* call) {
FastNewObjectDescriptor descriptor(isolate());
HValue* values[] = {context(), Pop(), Pop()};
HConstant* stub_value = Add<HConstant>(stub.GetCode());
- HInstruction* result = New<HCallWithDescriptor>(
- stub_value, 0, descriptor, Vector<HValue*>(values, arraysize(values)));
+ HInstruction* result =
+ New<HCallWithDescriptor>(stub_value, 0, descriptor, ArrayVector(values));
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -12544,9 +12500,9 @@ void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
Callable callable = CodeFactory::RegExpExec(isolate());
HValue* stub = Add<HConstant>(callable.code());
HValue* values[] = {context()};
- HInstruction* result = New<HCallWithDescriptor>(
- stub, call->arguments()->length(), callable.descriptor(),
- Vector<HValue*>(values, arraysize(values)));
+ HInstruction* result =
+ New<HCallWithDescriptor>(stub, call->arguments()->length(),
+ callable.descriptor(), ArrayVector(values));
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -12571,35 +12527,6 @@ void HOptimizedGraphBuilder::GenerateRegExpSource(CallRuntime* call) {
}
-void HOptimizedGraphBuilder::GenerateDoubleLo(CallRuntime* call) {
- DCHECK_EQ(1, call->arguments()->length());
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HInstruction* result = NewUncasted<HDoubleBits>(value, HDoubleBits::LOW);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateDoubleHi(CallRuntime* call) {
- DCHECK_EQ(1, call->arguments()->length());
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HInstruction* result = NewUncasted<HDoubleBits>(value, HDoubleBits::HIGH);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateConstructDouble(CallRuntime* call) {
- DCHECK_EQ(2, call->arguments()->length());
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
- HValue* lo = Pop();
- HValue* hi = Pop();
- HInstruction* result = NewUncasted<HConstructDouble>(hi, lo);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
// Construct a RegExp exec result with two in-object properties.
void HOptimizedGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
DCHECK_EQ(3, call->arguments()->length());
@@ -12628,75 +12555,15 @@ void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateCall(CallRuntime* call) {
DCHECK_LE(2, call->arguments()->length());
CHECK_ALIVE(VisitExpressions(call->arguments()));
-
- // Try and customize ES6 instanceof here.
- // We should at least have the constructor on the expression stack.
- if (FLAG_harmony_instanceof && FLAG_harmony_instanceof_opt &&
- call->arguments()->length() == 3) {
- HValue* target = environment()->ExpressionStackAt(2);
- if (target->IsConstant()) {
- HConstant* constant_function = HConstant::cast(target);
- if (constant_function->handle(isolate())->IsJSFunction()) {
- Handle<JSFunction> func =
- Handle<JSFunction>::cast(constant_function->handle(isolate()));
- if (*func == isolate()->native_context()->ordinary_has_instance()) {
- // Look at the function, which will be argument 1.
- HValue* right = environment()->ExpressionStackAt(1);
- if (right->IsConstant() &&
- HConstant::cast(right)->handle(isolate())->IsJSFunction()) {
- Handle<JSFunction> constructor = Handle<JSFunction>::cast(
- HConstant::cast(right)->handle(isolate()));
- if (constructor->IsConstructor() &&
- !constructor->map()->has_non_instance_prototype()) {
- JSFunction::EnsureHasInitialMap(constructor);
- DCHECK(constructor->has_initial_map());
- Handle<Map> initial_map(constructor->initial_map(), isolate());
- top_info()->dependencies()->AssumeInitialMapCantChange(
- initial_map);
- HInstruction* prototype =
- Add<HConstant>(handle(initial_map->prototype(), isolate()));
- HValue* left = environment()->ExpressionStackAt(0);
- HHasInPrototypeChainAndBranch* result =
- New<HHasInPrototypeChainAndBranch>(left, prototype);
- Drop(3);
- return ast_context()->ReturnControl(result, call->id());
- }
- }
- }
- }
- }
- }
-
CallTrampolineDescriptor descriptor(isolate());
PushArgumentsFromEnvironment(call->arguments()->length() - 1);
HValue* trampoline = Add<HConstant>(isolate()->builtins()->Call());
HValue* target = Pop();
HValue* values[] = {context(), target,
Add<HConstant>(call->arguments()->length() - 2)};
- HInstruction* result = New<HCallWithDescriptor>(
- trampoline, call->arguments()->length() - 1, descriptor,
- Vector<HValue*>(values, arraysize(values)));
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-// Fast call to math functions.
-void HOptimizedGraphBuilder::GenerateMathPow(CallRuntime* call) {
- DCHECK_EQ(2, call->arguments()->length());
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
- HValue* right = Pop();
- HValue* left = Pop();
- HInstruction* result = NewUncasted<HPower>(left, right);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateMathLogRT(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathLog);
+ HInstruction* result =
+ New<HCallWithDescriptor>(trampoline, call->arguments()->length() - 1,
+ descriptor, ArrayVector(values));
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -12777,7 +12644,7 @@ HValue* HOptimizedGraphBuilder::BuildAllocateOrderedHashTable() {
// Allocate the table and add the proper map.
HValue* table =
Add<HAllocate>(Add<HConstant>(kSizeInBytes), HType::HeapObject(),
- NOT_TENURED, FIXED_ARRAY_TYPE);
+ NOT_TENURED, FIXED_ARRAY_TYPE, graph()->GetConstant0());
AddStoreMapConstant(table, isolate()->factory()->ordered_hash_table_map());
// Initialize the FixedArray...
@@ -12907,13 +12774,6 @@ void HOptimizedGraphBuilder::GenerateDebugIsActive(CallRuntime* call) {
return ast_context()->ReturnValue(value);
}
-void HOptimizedGraphBuilder::GenerateGetOrdinaryHasInstance(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 0);
- // ordinary_has_instance is immutable so we can treat it as a constant.
- HValue* value = Add<HConstant>(isolate()->ordinary_has_instance());
- return ast_context()->ReturnValue(value);
-}
-
#undef CHECK_BAILOUT
#undef CHECK_ALIVE
@@ -12934,7 +12794,7 @@ HEnvironment::HEnvironment(HEnvironment* outer,
push_count_(0),
ast_id_(BailoutId::None()),
zone_(zone) {
- Scope* declaration_scope = scope->DeclarationScope();
+ DeclarationScope* declaration_scope = scope->GetDeclarationScope();
Initialize(declaration_scope->num_parameters() + 1,
declaration_scope->num_stack_slots(), 0);
}
@@ -13235,13 +13095,26 @@ std::ostream& operator<<(std::ostream& os, const HEnvironment& env) {
void HTracer::TraceCompilation(CompilationInfo* info) {
Tag tag(this, "compilation");
- base::SmartArrayPointer<char> name = info->GetDebugName();
+ std::string name;
+ if (info->parse_info()) {
+ Object* source_name = info->script()->name();
+ if (source_name->IsString()) {
+ String* str = String::cast(source_name);
+ if (str->length() > 0) {
+ name.append(str->ToCString().get());
+ name.append(":");
+ }
+ }
+ }
+ std::unique_ptr<char[]> method_name = info->GetDebugName();
+ name.append(method_name.get());
if (info->IsOptimizing()) {
- PrintStringProperty("name", name.get());
+ PrintStringProperty("name", name.c_str());
PrintIndent();
- trace_.Add("method \"%s:%d\"\n", name.get(), info->optimization_id());
+ trace_.Add("method \"%s:%d\"\n", method_name.get(),
+ info->optimization_id());
} else {
- PrintStringProperty("name", name.get());
+ PrintStringProperty("name", name.c_str());
PrintStringProperty("method", "stub");
}
PrintLongProperty("date",
@@ -13423,10 +13296,11 @@ void HTracer::TraceLiveRange(LiveRange* range, const char* type,
int assigned_reg = op->index();
if (op->IsDoubleRegister()) {
trace_.Add(" \"%s\"",
- DoubleRegister::from_code(assigned_reg).ToString());
+ GetRegConfig()->GetDoubleRegisterName(assigned_reg));
} else {
DCHECK(op->IsRegister());
- trace_.Add(" \"%s\"", Register::from_code(assigned_reg).ToString());
+ trace_.Add(" \"%s\"",
+ GetRegConfig()->GetGeneralRegisterName(assigned_reg));
}
} else if (range->IsSpilled()) {
LOperand* op = range->TopLevel()->GetSpillOperand();
diff --git a/deps/v8/src/crankshaft/hydrogen.h b/deps/v8/src/crankshaft/hydrogen.h
index 10c0baa29d..931dd01dcb 100644
--- a/deps/v8/src/crankshaft/hydrogen.h
+++ b/deps/v8/src/crankshaft/hydrogen.h
@@ -7,12 +7,13 @@
#include "src/accessors.h"
#include "src/allocation.h"
-#include "src/ast/ast.h"
-#include "src/ast/scopes.h"
+#include "src/ast/ast-type-bounds.h"
#include "src/bailout-reason.h"
#include "src/compiler.h"
#include "src/crankshaft/compilation-phase.h"
#include "src/crankshaft/hydrogen-instructions.h"
+#include "src/globals.h"
+#include "src/parsing/parse-info.h"
#include "src/zone.h"
namespace v8 {
@@ -29,7 +30,30 @@ class HTracer;
class LAllocator;
class LChunk;
class LiveRange;
+class Scope;
+class HCompilationJob final : public CompilationJob {
+ public:
+ explicit HCompilationJob(Handle<JSFunction> function)
+ : CompilationJob(&info_, "Crankshaft"),
+ zone_(function->GetIsolate()->allocator()),
+ parse_info_(&zone_, function),
+ info_(&parse_info_, function),
+ graph_(nullptr),
+ chunk_(nullptr) {}
+
+ protected:
+ virtual Status PrepareJobImpl();
+ virtual Status ExecuteJobImpl();
+ virtual Status FinalizeJobImpl();
+
+ private:
+ Zone zone_;
+ ParseInfo parse_info_;
+ CompilationInfo info_;
+ HGraph* graph_;
+ LChunk* chunk_;
+};
class HBasicBlock final : public ZoneObject {
public:
@@ -293,6 +317,11 @@ class HLoopInformation final : public ZoneObject {
HStackCheck* stack_check_;
};
+struct HInlinedFunctionInfo {
+ explicit HInlinedFunctionInfo(int start_position)
+ : start_position(start_position) {}
+ int start_position;
+};
class HGraph final : public ZoneObject {
public:
@@ -392,13 +421,11 @@ class HGraph final : public ZoneObject {
}
int maximum_environment_size() { return maximum_environment_size_; }
- bool use_optimistic_licm() {
- return use_optimistic_licm_;
- }
+ bool allow_code_motion() const { return allow_code_motion_; }
+ void set_allow_code_motion(bool value) { allow_code_motion_ = value; }
- void set_use_optimistic_licm(bool value) {
- use_optimistic_licm_ = value;
- }
+ bool use_optimistic_licm() const { return use_optimistic_licm_; }
+ void set_use_optimistic_licm(bool value) { use_optimistic_licm_ = value; }
void MarkDependsOnEmptyArrayProtoElements() {
// Add map dependency if not already added.
@@ -444,6 +471,10 @@ class HGraph final : public ZoneObject {
// the corresponding script.
int SourcePositionToScriptPosition(SourcePosition position);
+ ZoneVector<HInlinedFunctionInfo>& inlined_function_infos() {
+ return inlined_function_infos_;
+ }
+
private:
HConstant* ReinsertConstantIfNecessary(HConstant* constant);
HConstant* GetConstant(SetOncePointer<HConstant>* pointer,
@@ -480,6 +511,7 @@ class HGraph final : public ZoneObject {
CallInterfaceDescriptor descriptor_;
Zone* zone_;
+ bool allow_code_motion_;
bool use_optimistic_licm_;
bool depends_on_empty_array_proto_elements_;
int type_change_checksum_;
@@ -487,6 +519,8 @@ class HGraph final : public ZoneObject {
int no_side_effects_scope_count_;
bool disallow_adding_new_values_;
+ ZoneVector<HInlinedFunctionInfo> inlined_function_infos_;
+
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
@@ -1299,7 +1333,7 @@ class HGraphBuilder {
class P7, class P8, class P9>
HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7,
P8 p8, P9 p9) {
- return AddInstruction(NewUncasted<I>(p1, p2, p3, p4, p5, p6, p7, p8, p8));
+ return AddInstruction(NewUncasted<I>(p1, p2, p3, p4, p5, p6, p7, p8, p9));
}
template <class I, class P1, class P2, class P3, class P4, class P5, class P6,
@@ -1482,7 +1516,7 @@ class HGraphBuilder {
HValue* EnforceNumberType(HValue* number, Type* expected);
HValue* TruncateToNumber(HValue* value, Type** expected);
- void FinishExitWithHardDeoptimization(Deoptimizer::DeoptReason reason);
+ void FinishExitWithHardDeoptimization(DeoptimizeReason reason);
void AddIncrementCounter(StatsCounter* counter);
@@ -1631,12 +1665,12 @@ class HGraphBuilder {
void End();
void EndUnreachable();
- void Deopt(Deoptimizer::DeoptReason reason);
- void ThenDeopt(Deoptimizer::DeoptReason reason) {
+ void Deopt(DeoptimizeReason reason);
+ void ThenDeopt(DeoptimizeReason reason) {
Then();
Deopt(reason);
}
- void ElseDeopt(Deoptimizer::DeoptReason reason) {
+ void ElseDeopt(DeoptimizeReason reason) {
Else();
Deopt(reason);
}
@@ -1744,69 +1778,6 @@ class HGraphBuilder {
HValue* BuildNewElementsCapacity(HValue* old_capacity);
- class JSArrayBuilder final {
- public:
- JSArrayBuilder(HGraphBuilder* builder,
- ElementsKind kind,
- HValue* allocation_site_payload,
- HValue* constructor_function,
- AllocationSiteOverrideMode override_mode);
-
- JSArrayBuilder(HGraphBuilder* builder,
- ElementsKind kind,
- HValue* constructor_function = NULL);
-
- enum FillMode {
- DONT_FILL_WITH_HOLE,
- FILL_WITH_HOLE
- };
-
- ElementsKind kind() { return kind_; }
- HAllocate* elements_location() { return elements_location_; }
-
- HAllocate* AllocateEmptyArray();
- HAllocate* AllocateArray(HValue* capacity,
- HValue* length_field,
- FillMode fill_mode = FILL_WITH_HOLE);
- // Use these allocators when capacity could be unknown at compile time
- // but its limit is known. For constant |capacity| the value of
- // |capacity_upper_bound| is ignored and the actual |capacity|
- // value is used as an upper bound.
- HAllocate* AllocateArray(HValue* capacity,
- int capacity_upper_bound,
- HValue* length_field,
- FillMode fill_mode = FILL_WITH_HOLE);
- HAllocate* AllocateArray(HValue* capacity,
- HConstant* capacity_upper_bound,
- HValue* length_field,
- FillMode fill_mode = FILL_WITH_HOLE);
- HValue* GetElementsLocation() { return elements_location_; }
- HValue* EmitMapCode();
-
- private:
- Zone* zone() const { return builder_->zone(); }
- int elements_size() const {
- return IsFastDoubleElementsKind(kind_) ? kDoubleSize : kPointerSize;
- }
- HGraphBuilder* builder() { return builder_; }
- HGraph* graph() { return builder_->graph(); }
- int initial_capacity() {
- STATIC_ASSERT(JSArray::kPreallocatedArrayElements > 0);
- return JSArray::kPreallocatedArrayElements;
- }
-
- HValue* EmitInternalMapCode();
-
- HGraphBuilder* builder_;
- ElementsKind kind_;
- AllocationSiteMode mode_;
- HValue* allocation_site_payload_;
- HValue* constructor_function_;
- HAllocate* elements_location_;
- };
-
- HValue* BuildAllocateArrayFromLength(JSArrayBuilder* array_builder,
- HValue* length_argument);
HValue* BuildCalculateElementsSize(ElementsKind kind,
HValue* capacity);
HAllocate* AllocateJSArrayObject(AllocationSiteMode mode);
@@ -1901,7 +1872,7 @@ class HGraphBuilder {
protected:
void SetSourcePosition(int position) {
- if (position != RelocInfo::kNoPosition) {
+ if (position != kNoSourcePosition) {
position_.set_position(position - start_position_);
}
// Otherwise position remains unknown.
@@ -1918,7 +1889,7 @@ class HGraphBuilder {
// the SourcePosition assuming that this position corresponds to the
// same function as current position_.
SourcePosition ScriptPositionToSourcePosition(int position) {
- if (position == RelocInfo::kNoPosition) {
+ if (position == kNoSourcePosition) {
return SourcePosition::Unknown();
}
SourcePosition pos = position_;
@@ -1929,6 +1900,9 @@ class HGraphBuilder {
SourcePosition source_position() { return position_; }
void set_source_position(SourcePosition position) { position_ = position; }
+ int TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
+ SourcePosition position);
+
HValue* BuildAllocateEmptyArrayBuffer(HValue* byte_length);
template <typename ViewClass>
void BuildArrayBufferViewInitialization(HValue* obj,
@@ -1953,10 +1927,9 @@ class HGraphBuilder {
int start_position_;
};
-
template <>
inline HDeoptimize* HGraphBuilder::Add<HDeoptimize>(
- Deoptimizer::DeoptReason reason, Deoptimizer::BailoutType type) {
+ DeoptimizeReason reason, Deoptimizer::BailoutType type) {
if (type == Deoptimizer::SOFT) {
isolate()->counters()->soft_deopts_requested()->Increment();
if (FLAG_always_opt) return NULL;
@@ -1973,10 +1946,9 @@ inline HDeoptimize* HGraphBuilder::Add<HDeoptimize>(
return instr;
}
-
template <>
inline HInstruction* HGraphBuilder::AddUncasted<HDeoptimize>(
- Deoptimizer::DeoptReason reason, Deoptimizer::BailoutType type) {
+ DeoptimizeReason reason, Deoptimizer::BailoutType type) {
return Add<HDeoptimize>(reason, type);
}
@@ -2087,8 +2059,10 @@ inline HContext* HGraphBuilder::New<HContext>() {
return HContext::New(zone());
}
-
-class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
+// This AstVistor is not final, and provides the AstVisitor methods as virtual
+// methods so they can be specialized by subclasses.
+class HOptimizedGraphBuilder : public HGraphBuilder,
+ public AstVisitor<HOptimizedGraphBuilder> {
public:
// A class encapsulating (lazily-allocated) break and continue blocks for
// a breakable statement. Separated from BreakAndContinueScope so that it
@@ -2168,7 +2142,9 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
FunctionState* function_state() const { return function_state_; }
- void VisitDeclarations(ZoneList<Declaration*>* declarations) override;
+ void VisitDeclarations(ZoneList<Declaration*>* declarations);
+
+ AstTypeBounds* bounds() { return &bounds_; }
void* operator new(size_t size, Zone* zone) { return zone->New(size); }
void operator delete(void* pointer, Zone* zone) { }
@@ -2214,17 +2190,20 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
TestContext* inlined_test_context() const {
return function_state()->test_context();
}
+ Handle<JSFunction> current_closure() const {
+ return current_info()->closure();
+ }
Handle<SharedFunctionInfo> current_shared_info() const {
return current_info()->shared_info();
}
TypeFeedbackVector* current_feedback_vector() const {
- return current_shared_info()->feedback_vector();
+ return current_closure()->feedback_vector();
}
void ClearInlinedTestContext() {
function_state()->ClearInlinedTestContext();
}
LanguageMode function_language_mode() {
- return function_state()->compilation_info()->language_mode();
+ return function_state()->compilation_info()->parse_info()->language_mode();
}
#define FOR_EACH_HYDROGEN_INTRINSIC(F) \
@@ -2235,19 +2214,13 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
F(IsJSProxy) \
F(Call) \
F(NewObject) \
- F(ValueOf) \
F(StringCharFromCode) \
- F(StringCharAt) \
- F(OneByteSeqStringSetChar) \
- F(TwoByteSeqStringSetChar) \
F(ToInteger) \
- F(ToName) \
F(ToObject) \
F(ToString) \
F(ToLength) \
F(ToNumber) \
F(IsJSReceiver) \
- F(MathPow) \
F(HasCachedArrayIndex) \
F(GetCachedArrayIndex) \
F(DebugBreakInOptimizedCode) \
@@ -2259,7 +2232,6 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
F(RegExpSource) \
F(NumberToString) \
F(DebugIsActive) \
- F(GetOrdinaryHasInstance) \
/* Typed Arrays */ \
F(TypedArrayInitialize) \
F(MaxSmi) \
@@ -2269,11 +2241,6 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
F(TypedArrayGetLength) \
/* ArrayBuffer */ \
F(ArrayBufferGetByteLength) \
- /* Maths */ \
- F(ConstructDouble) \
- F(DoubleHi) \
- F(DoubleLo) \
- F(MathLogRT) \
/* ES6 Collections */ \
F(MapClear) \
F(MapInitialize) \
@@ -2302,7 +2269,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
void VisitLogicalExpression(BinaryOperation* expr);
void VisitArithmeticExpression(BinaryOperation* expr);
- void VisitLoopBody(IterationStatement* stmt,
+ void VisitLoopBody(IterationStatement* stmt, BailoutId stack_check_id,
HBasicBlock* loop_entry);
void BuildForInBody(ForInStatement* stmt, Variable* each_var,
@@ -2328,7 +2295,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HBasicBlock* BuildLoopEntry(IterationStatement* statement);
HBasicBlock* JoinContinue(IterationStatement* statement,
- HBasicBlock* exit_block,
+ BailoutId continue_id, HBasicBlock* exit_block,
HBasicBlock* continue_block);
HValue* Top() const { return environment()->Top(); }
@@ -2336,21 +2303,19 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
void Bind(Variable* var, HValue* value) { environment()->Bind(var, value); }
bool IsEligibleForEnvironmentLivenessAnalysis(Variable* var,
int index,
- HValue* value,
HEnvironment* env) {
if (!FLAG_analyze_environment_liveness) return false;
// |this| and |arguments| are always live; zapping parameters isn't
// safe because function.arguments can inspect them at any time.
return !var->is_this() &&
!var->is_arguments() &&
- !value->IsArgumentsObject() &&
env->is_local_index(index);
}
void BindIfLive(Variable* var, HValue* value) {
HEnvironment* env = environment();
int index = env->IndexFor(var);
env->Bind(index, value);
- if (IsEligibleForEnvironmentLivenessAnalysis(var, index, value, env)) {
+ if (IsEligibleForEnvironmentLivenessAnalysis(var, index, env)) {
HEnvironmentMarker* bind =
Add<HEnvironmentMarker>(HEnvironmentMarker::BIND, index);
USE(bind);
@@ -2362,8 +2327,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HValue* LookupAndMakeLive(Variable* var) {
HEnvironment* env = environment();
int index = env->IndexFor(var);
- HValue* value = env->Lookup(index);
- if (IsEligibleForEnvironmentLivenessAnalysis(var, index, value, env)) {
+ if (IsEligibleForEnvironmentLivenessAnalysis(var, index, env)) {
HEnvironmentMarker* lookup =
Add<HEnvironmentMarker>(HEnvironmentMarker::LOOKUP, index);
USE(lookup);
@@ -2371,7 +2335,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
lookup->set_closure(env->closure());
#endif
}
- return value;
+ return env->Lookup(index);
}
// The value of the arguments object is allowed in some but not most value
@@ -2386,7 +2350,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HBasicBlock* false_block);
// Visit a list of expressions from left to right, each in a value context.
- void VisitExpressions(ZoneList<Expression*>* exprs) override;
+ void VisitExpressions(ZoneList<Expression*>* exprs);
void VisitExpressions(ZoneList<Expression*>* exprs,
ArgumentsAllowedFlag flag);
@@ -2395,10 +2359,10 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
template <class Instruction> HInstruction* PreProcessCall(Instruction* call);
void PushArgumentsFromEnvironment(int count);
- void SetUpScope(Scope* scope);
- void VisitStatements(ZoneList<Statement*>* statements) override;
+ void SetUpScope(DeclarationScope* scope);
+ void VisitStatements(ZoneList<Statement*>* statements);
-#define DECLARE_VISIT(type) void Visit##type(type* node) override;
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
@@ -2422,10 +2386,8 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
void BuildFunctionApply(Call* expr);
void BuildFunctionCall(Call* expr);
- bool TryHandleArrayCall(Call* expr, HValue* function);
- bool TryHandleArrayCallNew(CallNew* expr, HValue* function);
- void BuildArrayCall(Expression* expr, int arguments_count, HValue* function,
- Handle<AllocationSite> cell);
+ template <class T>
+ bool TryHandleArrayCall(T* expr, HValue* function);
enum ArrayIndexOfMode { kFirstIndexOf, kLastIndexOf };
HValue* BuildArrayIndexOf(HValue* receiver,
@@ -2451,8 +2413,10 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HValue* implicit_return_value);
bool TryInlineIndirectCall(Handle<JSFunction> function, Call* expr,
int arguments_count);
- bool TryInlineBuiltinMethodCall(Call* expr, Handle<JSFunction> function,
- Handle<Map> receiver_map,
+ bool TryInlineBuiltinGetterCall(Handle<JSFunction> function,
+ Handle<Map> receiver_map, BailoutId ast_id);
+ bool TryInlineBuiltinMethodCall(Handle<JSFunction> function,
+ Handle<Map> receiver_map, BailoutId ast_id,
int args_count_no_receiver);
bool TryInlineBuiltinFunctionCall(Call* expr);
enum ApiCallType {
@@ -2535,9 +2499,8 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
return handle(isolate()->native_context()->array_function());
}
- bool IsCallArrayInlineable(int argument_count, Handle<AllocationSite> site);
- void BuildInlinedCallArray(Expression* expression, int argument_count,
- Handle<AllocationSite> site);
+ bool TryInlineArrayCall(Expression* expression, int argument_count,
+ Handle<AllocationSite> site);
void BuildInitializeInobjectProperties(HValue* receiver,
Handle<Map> initial_map);
@@ -2596,20 +2559,6 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
return false;
}
- bool IsJSArrayBufferViewFieldAccessor() {
- int offset; // unused
- return Accessors::IsJSArrayBufferViewFieldAccessor(map_, name_, &offset);
- }
-
- bool GetJSArrayBufferViewFieldAccess(HObjectAccess* access) {
- int offset;
- if (Accessors::IsJSArrayBufferViewFieldAccessor(map_, name_, &offset)) {
- *access = HObjectAccess::ForMapAndOffset(map_, offset);
- return true;
- }
- return false;
- }
-
bool has_holder() { return !holder_.is_null(); }
bool IsLoad() const { return access_type_ == LOAD; }
@@ -2903,6 +2852,8 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HOsrBuilder* osr_;
+ AstTypeBounds bounds_;
+
friend class FunctionState; // Pushes and pops the state stack.
friend class AstContext; // Pushes and pops the AST context stack.
friend class KeyedLoadFastElementStub;
diff --git a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
index d8b20c87a7..8233659ddb 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
@@ -15,7 +15,6 @@
#include "src/ia32/frames-ia32.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
-#include "src/profiler/cpu-profiler.h"
namespace v8 {
namespace internal {
@@ -165,25 +164,24 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
Comment(";;; Prologue begin");
// Possibly allocate a local context.
- if (info_->num_heap_slots() > 0) {
+ if (info_->scope()->num_heap_slots() > 0) {
Comment(";;; Allocate local context");
bool need_write_barrier = true;
// Argument to NewContext is the function, which is still in edi.
- int slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int slots = info_->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
if (info()->scope()->is_script_scope()) {
__ push(edi);
__ Push(info()->scope()->GetScopeInfo(info()->isolate()));
__ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
- } else if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
+ } else {
+ FastNewFunctionContextStub stub(isolate());
+ __ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
+ Immediate(slots));
__ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
+ // Result of FastNewFunctionContextStub is always in new space.
need_write_barrier = false;
- } else {
- __ push(edi);
- __ CallRuntime(Runtime::kNewFunctionContext);
}
RecordSafepoint(deopt_mode);
@@ -193,10 +191,11 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
__ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax);
// Copy parameters into context if necessary.
- int num_parameters = scope()->num_parameters();
- int first_parameter = scope()->has_this_declaration() ? -1 : 0;
+ int num_parameters = info()->scope()->num_parameters();
+ int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+ Variable* var = (i == -1) ? info()->scope()->receiver()
+ : info()->scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -274,8 +273,6 @@ bool LCodeGen::GenerateJumpTable() {
if (info()->saves_caller_doubles()) RestoreCallerDoubles();
__ call(entry, RelocInfo::RUNTIME_ENTRY);
}
- info()->LogDeoptCallPosition(masm()->pc_offset(),
- table_entry->deopt_info.inlining_id);
}
if (needs_frame.is_linked()) {
__ bind(&needs_frame);
@@ -682,9 +679,8 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(
}
}
-
void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
+ DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type) {
LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
@@ -725,19 +721,18 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
__ bind(&done);
}
- Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+ Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
DCHECK(info()->IsStub() || frame_is_built_);
if (cc == no_condition && frame_is_built_) {
DeoptComment(deopt_info);
__ call(entry, RelocInfo::RUNTIME_ENTRY);
- info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
} else {
Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
!frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+ if (FLAG_trace_deopt || isolate()->is_profiling() ||
jump_table_.is_empty() ||
!table_entry.IsEquivalentTo(jump_table_.last())) {
jump_table_.Add(table_entry, zone());
@@ -750,9 +745,8 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
}
}
-
void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeReason deopt_reason) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
@@ -811,13 +805,6 @@ void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
}
-void LCodeGen::RecordAndWritePosition(int position) {
- if (position == RelocInfo::kNoPosition) return;
- masm()->positions_recorder()->RecordPosition(position);
- masm()->positions_recorder()->WriteRecordedPositions();
-}
-
-
static const char* LabelType(LLabel* label) {
if (label->is_loop_header()) return " (loop header)";
if (label->is_osr_entry()) return " (OSR entry)";
@@ -890,7 +877,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ and_(dividend, mask);
__ neg(dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
}
__ jmp(&done, Label::kNear);
}
@@ -907,7 +894,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(eax));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@@ -922,7 +909,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
Label remainder_not_zero;
__ j(not_zero, &remainder_not_zero, Label::kNear);
__ cmp(dividend, Immediate(0));
- DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero);
__ bind(&remainder_not_zero);
}
}
@@ -944,7 +931,7 @@ void LCodeGen::DoModI(LModI* instr) {
// deopt in this case because we can't return a NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(right_reg, Operand(right_reg));
- DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for kMinInt % -1, idiv would signal a divide error. We
@@ -955,7 +942,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ cmp(right_reg, -1);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kMinusZero);
} else {
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ Move(result_reg, Immediate(0));
@@ -974,7 +961,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ j(not_sign, &positive_left, Label::kNear);
__ idiv(right_reg);
__ test(result_reg, Operand(result_reg));
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
__ jmp(&done, Label::kNear);
__ bind(&positive_left);
}
@@ -994,19 +981,19 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ cmp(dividend, kMinInt);
- DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ test(dividend, Immediate(mask));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision);
}
__ Move(result, dividend);
int32_t shift = WhichPowerOf2Abs(divisor);
@@ -1027,7 +1014,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(edx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@@ -1035,7 +1022,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
}
__ TruncatingDiv(dividend, Abs(divisor));
@@ -1045,7 +1032,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
__ mov(eax, edx);
__ imul(eax, eax, divisor);
__ sub(eax, dividend);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision);
}
}
@@ -1065,7 +1052,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(divisor, divisor);
- DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@@ -1074,7 +1061,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ test(dividend, dividend);
__ j(not_zero, &dividend_not_zero, Label::kNear);
__ test(divisor, divisor);
- DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
__ bind(&dividend_not_zero);
}
@@ -1084,7 +1071,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ cmp(dividend, kMinInt);
__ j(not_zero, &dividend_not_min_int, Label::kNear);
__ cmp(divisor, -1);
- DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
__ bind(&dividend_not_min_int);
}
@@ -1095,7 +1082,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
__ test(remainder, remainder);
- DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision);
}
}
@@ -1117,13 +1104,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// If the divisor is negative, we have to negate and handle edge cases.
__ neg(dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
return;
}
@@ -1150,7 +1137,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(edx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@@ -1158,7 +1145,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
}
// Easy case: We need no dynamic check for the dividend and the flooring
@@ -1205,7 +1192,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(divisor, divisor);
- DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@@ -1214,7 +1201,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ test(dividend, dividend);
__ j(not_zero, &dividend_not_zero, Label::kNear);
__ test(divisor, divisor);
- DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
__ bind(&dividend_not_zero);
}
@@ -1224,7 +1211,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ cmp(dividend, kMinInt);
__ j(not_zero, &dividend_not_min_int, Label::kNear);
__ cmp(divisor, -1);
- DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
__ bind(&dividend_not_min_int);
}
@@ -1302,7 +1289,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -1312,15 +1299,15 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ j(not_zero, &done, Label::kNear);
if (right->IsConstantOperand()) {
if (ToInteger32(LConstantOperand::cast(right)) < 0) {
- DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
} else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
__ cmp(ToRegister(instr->temp()), Immediate(0));
- DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero);
}
} else {
// Test the non-zero operand for negative sign.
__ or_(ToRegister(instr->temp()), ToOperand(right));
- DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
}
__ bind(&done);
}
@@ -1393,7 +1380,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shr_cl(ToRegister(left));
if (instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue);
}
break;
case Token::SHL:
@@ -1410,7 +1397,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
case Token::ROR:
if (shift_count == 0 && instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue);
} else {
__ ror(ToRegister(left), shift_count);
}
@@ -1425,7 +1412,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shr(ToRegister(left), shift_count);
} else if (instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue);
}
break;
case Token::SHL:
@@ -1436,7 +1423,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shl(ToRegister(left), shift_count - 1);
}
__ SmiTag(ToRegister(left));
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
} else {
__ shl(ToRegister(left), shift_count);
}
@@ -1462,7 +1449,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
__ sub(ToRegister(left), ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
}
@@ -1634,7 +1621,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
__ add(ToRegister(left), ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
}
}
@@ -1893,7 +1880,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ test(reg, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi);
}
Register map = no_reg; // Keep the compiler happy.
@@ -1956,7 +1943,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kUnexpectedObject);
}
}
}
@@ -2291,16 +2278,6 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
}
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
- DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
- DCHECK(ToRegister(instr->result()).is(eax));
- InstanceOfStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoHasInPrototypeChainAndBranch(
LHasInPrototypeChainAndBranch* instr) {
Register const object = ToRegister(instr->object());
@@ -2324,16 +2301,16 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
// Deoptimize if the object needs to be access checked.
__ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kAccessCheck);
+ DeoptimizeIf(not_zero, instr, DeoptimizeReason::kAccessCheck);
// Deoptimize for proxies.
__ CmpInstanceType(object_map, JS_PROXY_TYPE);
- DeoptimizeIf(equal, instr, Deoptimizer::kProxy);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kProxy);
__ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
- __ cmp(object_prototype, prototype);
- EmitTrueBranch(instr, equal);
__ cmp(object_prototype, factory()->null_value());
EmitFalseBranch(instr, equal);
+ __ cmp(object_prototype, prototype);
+ EmitTrueBranch(instr, equal);
__ mov(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
__ jmp(&loop);
}
@@ -2431,15 +2408,12 @@ void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->global_object())
- .is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).is(eax));
- __ mov(LoadDescriptor::NameRegister(), instr->name());
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
- isolate(), instr->typeof_mode(), PREMONOMORPHIC)
- .code();
+ Handle<Code> ic =
+ CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2452,7 +2426,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
} else {
Label is_not_hole;
__ j(not_equal, &is_not_hole, Label::kNear);
@@ -2473,7 +2447,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(target, factory()->the_hole_value());
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
} else {
__ j(not_equal, &skip_assignment, Label::kNear);
}
@@ -2554,10 +2528,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
__ mov(LoadDescriptor::NameRegister(), instr->name());
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_INSIDE_TYPEOF,
- instr->hydrogen()->initialization_state())
- .code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2573,7 +2544,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ cmp(Operand(result), Immediate(factory()->the_hole_value()));
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
// If the function does not have an initial map, we're done.
Label done;
@@ -2657,7 +2628,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ mov(result, operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ test(result, Operand(result));
- DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(negative, instr, DeoptimizeReason::kNegativeValue);
}
break;
case FLOAT32_ELEMENTS:
@@ -2689,7 +2660,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
FAST_DOUBLE_ELEMENTS,
instr->base_offset() + sizeof(kHoleNanLower32));
__ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
}
Operand double_load_operand = BuildFastArrayOperand(
@@ -2716,10 +2687,10 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotASmi);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotASmi);
} else {
__ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
}
} else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
@@ -2728,12 +2699,12 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
__ j(not_equal, &done);
if (info()->IsStub()) {
// A stub can safely convert the hole to undefined only if the array
- // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
- // it needs to bail out.
- __ mov(result, isolate()->factory()->array_protector());
+ // protector cell contains (Smi) Isolate::kArrayProtectorValid.
+ // Otherwise it needs to bail out.
+ __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
__ cmp(FieldOperand(result, PropertyCell::kValueOffset),
Immediate(Smi::FromInt(Isolate::kArrayProtectorValid)));
- DeoptimizeIf(not_equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kHole);
}
__ mov(result, isolate()->factory()->undefined_value());
__ bind(&done);
@@ -2788,13 +2759,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
- if (instr->hydrogen()->HasVectorAndSlot()) {
- EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
- }
+ EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
- Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
- isolate(), instr->hydrogen()->initialization_state())
- .code();
+ Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2887,9 +2854,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// The receiver should be a JS object.
__ test(receiver, Immediate(kSmiTagMask));
- DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kSmi);
__ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, scratch);
- DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
+ DeoptimizeIf(below, instr, DeoptimizeReason::kNotAJavaScriptObject);
__ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
@@ -2913,7 +2880,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmp(length, kArgumentsLimit);
- DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments);
+ DeoptimizeIf(above, instr, DeoptimizeReason::kTooManyArguments);
__ push(receiver);
__ mov(receiver, length);
@@ -2990,6 +2957,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
__ push(Immediate(instr->hydrogen()->pairs()));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
+ __ push(Immediate(instr->hydrogen()->feedback_vector()));
CallRuntime(Runtime::kDeclareGlobals, instr);
}
@@ -3088,7 +3056,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
Label slow, allocated, done;
uint32_t available_regs = eax.bit() | ecx.bit() | edx.bit() | ebx.bit();
@@ -3146,7 +3114,7 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ neg(input_reg); // Sets flags.
- DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(negative, instr, DeoptimizeReason::kOverflow);
__ bind(&is_positive);
}
@@ -3211,20 +3179,20 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
__ j(not_equal, &non_zero, Label::kNear);
__ movmskpd(output_reg, input_reg);
__ test(output_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
__ bind(&non_zero);
}
__ roundsd(xmm_scratch, input_reg, kRoundDown);
__ cvttsd2si(output_reg, Operand(xmm_scratch));
// Overflow is signalled with minint.
__ cmp(output_reg, 0x1);
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
} else {
Label negative_sign, done;
// Deoptimize on unordered.
__ xorps(xmm_scratch, xmm_scratch); // Zero the register.
__ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
+ DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN);
__ j(below, &negative_sign, Label::kNear);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -3233,7 +3201,7 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
__ j(above, &positive_sign, Label::kNear);
__ movmskpd(output_reg, input_reg);
__ test(output_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
__ Move(output_reg, Immediate(0));
__ jmp(&done, Label::kNear);
__ bind(&positive_sign);
@@ -3243,7 +3211,7 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
__ cvttsd2si(output_reg, Operand(input_reg));
// Overflow is signalled with minint.
__ cmp(output_reg, 0x1);
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
__ jmp(&done, Label::kNear);
// Non-zero negative reaches here.
@@ -3254,7 +3222,7 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
__ ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear);
__ sub(output_reg, Immediate(1));
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
__ bind(&done);
}
@@ -3297,7 +3265,7 @@ void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
__ cvttsd2si(output_reg, Operand(xmm_scratch));
// Overflow is signalled with minint.
__ cmp(output_reg, 0x1);
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
__ jmp(&done, dist);
__ bind(&below_one_half);
@@ -3312,7 +3280,7 @@ void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
__ cvttsd2si(output_reg, Operand(input_temp));
// Catch minint due to overflow, and to prevent overflow when compensating.
__ cmp(output_reg, 0x1);
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
__ Cvtsi2sd(xmm_scratch, output_reg);
__ ucomisd(xmm_scratch, input_temp);
@@ -3328,7 +3296,7 @@ void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
// If the sign is positive, we return +0.
__ movmskpd(output_reg, input_reg);
__ test(output_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
}
__ Move(output_reg, Immediate(0));
__ bind(&done);
@@ -3404,7 +3372,7 @@ void LCodeGen::DoPower(LPower* instr) {
__ JumpIfSmi(tagged_exponent, &no_deopt);
DCHECK(!ecx.is(tagged_exponent));
__ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, ecx);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
@@ -3420,31 +3388,18 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoMathLog(LMathLog* instr) {
- DCHECK(instr->value()->Equals(instr->result()));
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- XMMRegister xmm_scratch = double_scratch0();
- Label positive, done, zero;
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(input_reg, xmm_scratch);
- __ j(above, &positive, Label::kNear);
- __ j(not_carry, &zero, Label::kNear);
- __ pcmpeqd(input_reg, input_reg);
- __ jmp(&done, Label::kNear);
- __ bind(&zero);
- ExternalReference ninf =
- ExternalReference::address_of_negative_infinity();
- __ movsd(input_reg, Operand::StaticVariable(ninf));
- __ jmp(&done, Label::kNear);
- __ bind(&positive);
- __ fldln2();
- __ sub(Operand(esp), Immediate(kDoubleSize));
- __ movsd(Operand(esp, 0), input_reg);
- __ fld_d(Operand(esp, 0));
- __ fyl2x();
+ XMMRegister input = ToDoubleRegister(instr->value());
+ XMMRegister result = ToDoubleRegister(instr->result());
+ // Pass one double as argument on the stack.
+ __ PrepareCallCFunction(2, eax);
+ __ movsd(Operand(esp, 0 * kDoubleSize), input);
+ __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 2);
+ // Return value is in st(0) on ia32.
+ // Store it into the result register.
+ __ sub(esp, Immediate(kDoubleSize));
__ fstp_d(Operand(esp, 0));
- __ movsd(input_reg, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
- __ bind(&done);
+ __ movsd(result, Operand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
}
@@ -3455,15 +3410,49 @@ void LCodeGen::DoMathClz32(LMathClz32* instr) {
__ Lzcnt(result, input);
}
+void LCodeGen::DoMathCos(LMathCos* instr) {
+ XMMRegister input = ToDoubleRegister(instr->value());
+ XMMRegister result = ToDoubleRegister(instr->result());
+ // Pass one double as argument on the stack.
+ __ PrepareCallCFunction(2, eax);
+ __ movsd(Operand(esp, 0 * kDoubleSize), input);
+ __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 2);
+ // Return value is in st(0) on ia32.
+ // Store it into the result register.
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fstp_d(Operand(esp, 0));
+ __ movsd(result, Operand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+}
+
+void LCodeGen::DoMathSin(LMathSin* instr) {
+ XMMRegister input = ToDoubleRegister(instr->value());
+ XMMRegister result = ToDoubleRegister(instr->result());
+ // Pass one double as argument on the stack.
+ __ PrepareCallCFunction(2, eax);
+ __ movsd(Operand(esp, 0 * kDoubleSize), input);
+ __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 2);
+ // Return value is in st(0) on ia32.
+ // Store it into the result register.
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fstp_d(Operand(esp, 0));
+ __ movsd(result, Operand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+}
void LCodeGen::DoMathExp(LMathExp* instr) {
XMMRegister input = ToDoubleRegister(instr->value());
XMMRegister result = ToDoubleRegister(instr->result());
- XMMRegister temp0 = double_scratch0();
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
-
- MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
+ // Pass one double as argument on the stack.
+ __ PrepareCallCFunction(2, eax);
+ __ movsd(Operand(esp, 0 * kDoubleSize), input);
+ __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 2);
+ // Return value is in st(0) on ia32.
+ // Store it into the result register.
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fstp_d(Operand(esp, 0));
+ __ movsd(result, Operand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
}
void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
@@ -3478,7 +3467,9 @@ void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
#endif
if (FLAG_code_comments) {
if (actual.is_reg()) {
- Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+ Comment(";;; PrepareForTailCall, actual: %s {",
+ RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
+ actual.reg().code()));
} else {
Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
}
@@ -3548,14 +3539,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->result()).is(eax));
__ Move(eax, Immediate(instr->arity()));
- if (instr->arity() == 1) {
- // We only need the allocation site for the case we have a length argument.
- // The case may bail out to the runtime, which will determine the correct
- // elements kind with the site.
- __ mov(ebx, instr->hydrogen()->site());
- } else {
- __ mov(ebx, isolate()->factory()->undefined_value());
- }
+ __ mov(ebx, instr->hydrogen()->site());
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
@@ -3589,7 +3573,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ bind(&done);
} else {
- ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+ ArrayNArgumentsConstructorStub stub(isolate());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
@@ -3719,14 +3703,12 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- if (instr->hydrogen()->HasVectorAndSlot()) {
- EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
- }
+ EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
__ mov(StoreDescriptor::NameRegister(), instr->name());
- Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
- isolate(), instr->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ Handle<Code> ic =
+ CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3751,7 +3733,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ int3();
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
+ DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds);
}
}
@@ -3896,13 +3878,11 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- if (instr->hydrogen()->HasVectorAndSlot()) {
- EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
- }
+ EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
- isolate(), instr->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ isolate(), instr->language_mode())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3912,7 +3892,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kMementoFound);
__ bind(&no_memento_found);
}
@@ -3986,14 +3966,21 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
LOperand* key = instr->key();
if (key->IsConstantOperand()) {
- __ mov(ebx, ToImmediate(key, Representation::Smi()));
+ LConstantOperand* constant_key = LConstantOperand::cast(key);
+ int32_t int_key = ToInteger32(constant_key);
+ if (Smi::IsValid(int_key)) {
+ __ mov(ebx, Immediate(Smi::FromInt(int_key)));
+ } else {
+ // We should never get here at runtime because there is a smi check on
+ // the key before this point.
+ __ int3();
+ }
} else {
__ Move(ebx, ToRegister(key));
__ SmiTag(ebx);
}
- GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
- instr->hydrogen()->kind());
+ GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
__ CallStub(&stub);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
@@ -4002,7 +3989,7 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
// Deopt on smi, which means the elements array changed to dictionary mode.
__ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kSmi);
}
@@ -4035,8 +4022,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
DCHECK(object_reg.is(eax));
PushSafepointRegistersScope scope(this);
__ mov(ebx, to_map);
- bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
- TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+ TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
__ CallStub(&stub);
RecordSafepointWithLazyDeopt(instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
@@ -4273,13 +4259,10 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
-
- // NumberTagI and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ // Reset the context register.
+ if (!reg.is(esi)) {
+ __ Move(esi, Immediate(0));
+ }
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
@@ -4329,12 +4312,10 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
__ Move(reg, Immediate(0));
PushSafepointRegistersScope scope(this);
- // NumberTagI and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ // Reset the context register.
+ if (!reg.is(esi)) {
+ __ Move(esi, Immediate(0));
+ }
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
@@ -4348,12 +4329,12 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ test(input, Immediate(0xc0000000));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(not_zero, instr, DeoptimizeReason::kOverflow);
}
__ SmiTag(input);
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
}
@@ -4364,7 +4345,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
DCHECK(input->IsRegister() && input->Equals(instr->result()));
if (instr->needs_check()) {
__ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi);
+ DeoptimizeIf(not_zero, instr, DeoptimizeReason::kNotASmi);
} else {
__ AssertSmi(result);
}
@@ -4391,7 +4372,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
if (can_convert_undefined_to_nan) {
__ j(not_equal, &convert, Label::kNear);
} else {
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
}
// Heap number to XMM conversion.
@@ -4404,7 +4385,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
__ j(not_zero, &done, Label::kNear);
__ movmskpd(temp_reg, result_reg);
__ test_b(temp_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
}
__ jmp(&done, Label::kNear);
@@ -4413,9 +4394,11 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
// Convert undefined to NaN.
__ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
+ DeoptimizeIf(not_equal, instr,
+ DeoptimizeReason::kNotAHeapNumberUndefined);
- __ pcmpeqd(result_reg, result_reg);
+ __ xorpd(result_reg, result_reg);
+ __ divsd(result_reg, result_reg);
__ jmp(&done, Label::kNear);
}
} else {
@@ -4466,26 +4449,26 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
__ bind(&check_false);
__ cmp(input_reg, factory()->false_value());
DeoptimizeIf(not_equal, instr,
- Deoptimizer::kNotAHeapNumberUndefinedBoolean);
+ DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
__ Move(input_reg, Immediate(0));
} else {
XMMRegister scratch = ToDoubleRegister(instr->temp());
DCHECK(!scratch.is(xmm0));
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
__ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ cvttsd2si(input_reg, Operand(xmm0));
__ Cvtsi2sd(scratch, Operand(input_reg));
__ ucomisd(xmm0, scratch);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
- DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision);
+ DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN);
if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
__ test(input_reg, Operand(input_reg));
__ j(not_zero, done);
__ movmskpd(input_reg, xmm0);
__ and_(input_reg, 1);
- DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
}
}
}
@@ -4565,11 +4548,11 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
&is_nan, &minus_zero, dist);
__ jmp(&done, dist);
__ bind(&lost_precision);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision);
__ bind(&is_nan);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN);
__ bind(&minus_zero);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
}
}
@@ -4591,21 +4574,21 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
&minus_zero, dist);
__ jmp(&done, dist);
__ bind(&lost_precision);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision);
__ bind(&is_nan);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN);
__ bind(&minus_zero);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
__ SmiTag(result_reg);
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi);
+ DeoptimizeIf(not_zero, instr, DeoptimizeReason::kNotASmi);
}
@@ -4613,7 +4596,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi);
}
}
@@ -4626,7 +4609,7 @@ void LCodeGen::DoCheckArrayBufferNotNeutered(
__ mov(scratch, FieldOperand(view, JSArrayBufferView::kBufferOffset));
__ test_b(FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset),
Immediate(1 << JSArrayBuffer::WasNeutered::kShift));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kOutOfBounds);
+ DeoptimizeIf(not_zero, instr, DeoptimizeReason::kOutOfBounds);
}
@@ -4645,13 +4628,13 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType);
} else {
- DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(below, instr, DeoptimizeReason::kWrongInstanceType);
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(last));
- DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(above, instr, DeoptimizeReason::kWrongInstanceType);
}
}
} else {
@@ -4663,12 +4646,12 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(mask));
DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
- Deoptimizer::kWrongInstanceType);
+ DeoptimizeReason::kWrongInstanceType);
} else {
__ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
__ and_(temp, mask);
__ cmp(temp, tag);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType);
}
}
}
@@ -4684,7 +4667,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
Operand operand = ToOperand(instr->value());
__ cmp(operand, object);
}
- DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kValueMismatch);
}
@@ -4699,7 +4682,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ test(eax, Immediate(kSmiTagMask));
}
- DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kInstanceMigrationFailed);
}
@@ -4753,7 +4736,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ j(not_equal, deferred->entry());
} else {
- DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap);
}
__ bind(&success);
@@ -4792,7 +4775,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
__ mov(input_reg, 0);
__ jmp(&done, Label::kNear);
@@ -4810,43 +4793,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
-void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
- XMMRegister value_reg = ToDoubleRegister(instr->value());
- Register result_reg = ToRegister(instr->result());
- if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope scope2(masm(), SSE4_1);
- __ pextrd(result_reg, value_reg, 1);
- } else {
- XMMRegister xmm_scratch = double_scratch0();
- __ pshufd(xmm_scratch, value_reg, 1);
- __ movd(result_reg, xmm_scratch);
- }
- } else {
- __ movd(result_reg, value_reg);
- }
-}
-
-
-void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
- Register hi_reg = ToRegister(instr->hi());
- Register lo_reg = ToRegister(instr->lo());
- XMMRegister result_reg = ToDoubleRegister(instr->result());
-
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope scope2(masm(), SSE4_1);
- __ movd(result_reg, lo_reg);
- __ pinsrd(result_reg, hi_reg, 1);
- } else {
- XMMRegister xmm_scratch = double_scratch0();
- __ movd(result_reg, hi_reg);
- __ psllq(result_reg, 32);
- __ movd(xmm_scratch, lo_reg);
- __ orps(result_reg, xmm_scratch);
- }
-}
-
-
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate final : public LDeferredCode {
public:
@@ -4865,7 +4811,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
Register temp = ToRegister(instr->temp());
// Allocate memory for the object.
- AllocationFlags flags = TAG_OBJECT;
+ AllocationFlags flags = NO_ALLOCATION_FLAGS;
if (instr->hydrogen()->MustAllocateDoubleAligned()) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
@@ -4873,6 +4819,10 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
+ if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+ flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
+ }
+ DCHECK(!instr->hydrogen()->IsAllocationFolded());
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
@@ -4903,6 +4853,29 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
}
}
+void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
+ DCHECK(instr->hydrogen()->IsAllocationFolded());
+ DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
+ Register result = ToRegister(instr->result());
+ Register temp = ToRegister(instr->temp());
+
+ AllocationFlags flags = ALLOCATION_FOLDED;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+ if (instr->hydrogen()->IsOldSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE);
+ }
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ __ FastAllocate(size, result, temp, flags);
+ } else {
+ Register size = ToRegister(instr->size());
+ __ FastAllocate(size, result, temp, flags);
+ }
+}
void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
Register result = ToRegister(instr->result());
@@ -4942,6 +4915,22 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
CallRuntimeFromDeferred(
Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, eax);
+
+ if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+ AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
+ if (instr->hydrogen()->IsOldSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
+ }
+ // If the allocation folding dominator allocate triggered a GC, allocation
+ // happend in the runtime. We have to reset the top pointer to virtually
+ // undo the allocation.
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
+ __ sub(eax, Immediate(kHeapObjectTag));
+ __ mov(Operand::StaticVariable(allocation_top), eax);
+ __ add(eax, Immediate(kHeapObjectTag));
+ }
}
@@ -4954,8 +4943,8 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
__ mov(eax, Immediate(isolate()->factory()->number_string()));
__ jmp(&end);
__ bind(&do_call);
- TypeofStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ Callable callable = CodeFactory::Typeof(isolate());
+ CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
__ bind(&end);
}
@@ -5211,7 +5200,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ bind(&done);
__ test(result, result);
- DeoptimizeIf(equal, instr, Deoptimizer::kNoCache);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kNoCache);
}
@@ -5219,7 +5208,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
__ cmp(ToRegister(instr->map()),
FieldOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap);
}
diff --git a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h
index bc61c96339..38a493dbb4 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h
@@ -115,8 +115,6 @@ class LCodeGen: public LCodeGenBase {
#undef DECLARE_DO
private:
- LanguageMode language_mode() const { return info()->language_mode(); }
-
Scope* scope() const { return scope_; }
XMMRegister double_scratch0() const { return xmm0; }
@@ -206,10 +204,10 @@ class LCodeGen: public LCodeGenBase {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition cc, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
+ DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition cc, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason);
+ DeoptimizeReason deopt_reason);
bool DeoptEveryNTimes() {
return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
@@ -241,7 +239,7 @@ class LCodeGen: public LCodeGenBase {
void EmitIntegerMathAbs(LMathAbs* instr);
- // Support for recording safepoint and position information.
+ // Support for recording safepoint information.
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
@@ -252,8 +250,6 @@ class LCodeGen: public LCodeGenBase {
int arguments,
Safepoint::DeoptMode mode);
- void RecordAndWritePosition(int position) override;
-
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
diff --git a/deps/v8/src/crankshaft/ia32/lithium-gap-resolver-ia32.cc b/deps/v8/src/crankshaft/ia32/lithium-gap-resolver-ia32.cc
index c3284df882..be8251cffb 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-gap-resolver-ia32.cc
+++ b/deps/v8/src/crankshaft/ia32/lithium-gap-resolver-ia32.cc
@@ -167,8 +167,7 @@ int LGapResolver::CountSourceUses(LOperand* operand) {
Register LGapResolver::GetFreeRegisterNot(Register reg) {
int skip_index = reg.is(no_reg) ? -1 : reg.code();
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
int code = config->GetAllocatableGeneralCode(i);
if (source_uses_[code] == 0 && destination_uses_[code] > 0 &&
@@ -183,8 +182,7 @@ Register LGapResolver::GetFreeRegisterNot(Register reg) {
bool LGapResolver::HasBeenReset() {
if (!moves_.is_empty()) return false;
if (spilled_register_ >= 0) return false;
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
int code = config->GetAllocatableGeneralCode(i);
if (source_uses_[code] != 0) return false;
@@ -238,8 +236,7 @@ Register LGapResolver::EnsureTempRegister() {
// 3. Prefer to spill a register that is not used in any remaining move
// because it will not need to be restored until the end.
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
int code = config->GetAllocatableGeneralCode(i);
if (source_uses_[code] == 0 && destination_uses_[code] == 0) {
diff --git a/deps/v8/src/crankshaft/ia32/lithium-ia32.cc b/deps/v8/src/crankshaft/ia32/lithium-ia32.cc
index 4afeef5d68..67942241e6 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-ia32.cc
+++ b/deps/v8/src/crankshaft/ia32/lithium-ia32.cc
@@ -910,7 +910,7 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
LInstruction* result = new (zone()) LPrologue();
- if (info_->num_heap_slots() > 0) {
+ if (info_->scope()->num_heap_slots() > 0) {
result = MarkAsCall(result, instr);
}
return result;
@@ -966,17 +966,6 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
}
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
- LOperand* left =
- UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
- LOperand* right =
- UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
- LOperand* context = UseFixed(instr->context(), esi);
- LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
HHasInPrototypeChainAndBranch* instr) {
LOperand* object = UseRegister(instr->object());
@@ -1104,6 +1093,8 @@ LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
switch (instr->op()) {
+ case kMathCos:
+ return DoMathCos(instr);
case kMathFloor:
return DoMathFloor(instr);
case kMathRound:
@@ -1122,6 +1113,8 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
return DoMathPowHalf(instr);
case kMathClz32:
return DoMathClz32(instr);
+ case kMathSin:
+ return DoMathSin(instr);
default:
UNREACHABLE();
return NULL;
@@ -1188,15 +1181,25 @@ LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
return DefineAsRegister(result);
}
+LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegisterAtStart(instr->value());
+ return MarkAsCall(DefineSameAsFirst(new (zone()) LMathCos(input)), instr);
+}
+
+LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegisterAtStart(instr->value());
+ return MarkAsCall(DefineSameAsFirst(new (zone()) LMathSin(input)), instr);
+}
LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
DCHECK(instr->representation().IsDouble());
DCHECK(instr->value()->representation().IsDouble());
- LOperand* value = UseTempRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LMathExp* result = new(zone()) LMathExp(value, temp1, temp2);
- return DefineAsRegister(result);
+ LOperand* input = UseRegisterAtStart(instr->value());
+ return MarkAsCall(DefineSameAsFirst(new (zone()) LMathExp(input)), instr);
}
@@ -1984,20 +1987,6 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
}
-LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
- HValue* value = instr->value();
- DCHECK(value->representation().IsDouble());
- return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
-}
-
-
-LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
- LOperand* lo = UseRegister(instr->lo());
- LOperand* hi = UseRegister(instr->hi());
- return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
-}
-
-
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LOperand* context = info()->IsStub() ? UseFixed(instr->context(), esi) : NULL;
LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
@@ -2029,15 +2018,9 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- LOperand* global_object =
- UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- }
+ LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- LLoadGlobalGeneric* result =
- new(zone()) LLoadGlobalGeneric(context, global_object, vector);
+ LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -2085,10 +2068,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- }
+ LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
context, object, vector);
return MarkAsCall(DefineFixed(result, eax), instr);
@@ -2158,10 +2138,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- }
+ LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
LLoadKeyedGeneric* result =
new(zone()) LLoadKeyedGeneric(context, object, key, vector);
return MarkAsCall(DefineFixed(result, eax), instr);
@@ -2245,12 +2222,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
DCHECK(instr->key()->representation().IsTagged());
DCHECK(instr->value()->representation().IsTagged());
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
- vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
- }
+ LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
+ LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
LStoreKeyedGeneric* result = new (zone())
LStoreKeyedGeneric(context, object, key, value, slot, vector);
@@ -2364,12 +2337,8 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* object =
UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
- vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
- }
+ LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
+ LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
LStoreNamedGeneric* result =
new (zone()) LStoreNamedGeneric(context, object, value, slot, vector);
@@ -2406,14 +2375,19 @@ LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseAny(instr->context());
- LOperand* size = instr->size()->IsConstant()
- ? UseConstant(instr->size())
- : UseTempRegister(instr->size());
- LOperand* temp = TempRegister();
- LAllocate* result = new(zone()) LAllocate(context, size, temp);
- return AssignPointerMap(DefineAsRegister(result));
+ LOperand* size = instr->size()->IsConstant() ? UseConstant(instr->size())
+ : UseRegister(instr->size());
+ if (instr->IsAllocationFolded()) {
+ LOperand* temp = TempRegister();
+ LFastAllocate* result = new (zone()) LFastAllocate(size, temp);
+ return DefineAsRegister(result);
+ } else {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
+ LOperand* temp = TempRegister();
+ LAllocate* result = new (zone()) LAllocate(context, size, temp);
+ return AssignPointerMap(DefineAsRegister(result));
+ }
}
diff --git a/deps/v8/src/crankshaft/ia32/lithium-ia32.h b/deps/v8/src/crankshaft/ia32/lithium-ia32.h
index 68541a48c4..e525341ca0 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-ia32.h
+++ b/deps/v8/src/crankshaft/ia32/lithium-ia32.h
@@ -57,7 +57,6 @@ class LCodeGen;
V(ConstantI) \
V(ConstantS) \
V(ConstantT) \
- V(ConstructDouble) \
V(Context) \
V(DebugBreak) \
V(DeclareGlobals) \
@@ -65,12 +64,12 @@ class LCodeGen;
V(DivByConstI) \
V(DivByPowerOf2I) \
V(DivI) \
- V(DoubleBits) \
V(DoubleToI) \
V(DoubleToSmi) \
V(Drop) \
V(Dummy) \
V(DummyUse) \
+ V(FastAllocate) \
V(FlooringDivByConstI) \
V(FlooringDivByPowerOf2I) \
V(FlooringDivI) \
@@ -82,7 +81,6 @@ class LCodeGen;
V(HasInPrototypeChainAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
- V(InstanceOf) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
@@ -102,6 +100,7 @@ class LCodeGen;
V(LoadRoot) \
V(MathAbs) \
V(MathClz32) \
+ V(MathCos) \
V(MathExp) \
V(MathFloorD) \
V(MathFloorI) \
@@ -111,6 +110,7 @@ class LCodeGen;
V(MathPowHalf) \
V(MathRoundD) \
V(MathRoundI) \
+ V(MathSin) \
V(MathSqrt) \
V(MaybeGrowElements) \
V(ModByConstI) \
@@ -912,21 +912,29 @@ class LMathClz32 final : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
};
+class LMathCos final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathCos(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
-class LMathExp final : public LTemplateInstruction<1, 1, 2> {
+ DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
+};
+
+class LMathSin final : public LTemplateInstruction<1, 1, 0> {
public:
- LMathExp(LOperand* value,
- LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- ExternalReference::InitializeMathExpData();
- }
+ explicit LMathSin(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
+};
+
+class LMathExp final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathExp(LOperand* value) { inputs_[0] = value; }
LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
};
@@ -1139,22 +1147,6 @@ class LCmpT final : public LTemplateInstruction<1, 3, 0> {
};
-class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
- public:
- LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() const { return inputs_[0]; }
- LOperand* left() const { return inputs_[1]; }
- LOperand* right() const { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-};
-
-
class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 1> {
public:
LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype,
@@ -1605,18 +1597,14 @@ class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
};
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
+class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
public:
- LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
- LOperand* vector) {
+ LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
inputs_[0] = context;
- inputs_[1] = global_object;
temps_[0] = vector;
}
LOperand* context() { return inputs_[0]; }
- LOperand* global_object() { return inputs_[1]; }
LOperand* temp_vector() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
@@ -2183,6 +2171,8 @@ class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
LOperand* key() { return inputs_[3]; }
LOperand* current_capacity() { return inputs_[4]; }
+ bool ClobbersDoubleRegisters(Isolate* isolate) const override { return true; }
+
DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
};
@@ -2358,33 +2348,6 @@ class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> {
};
-class LDoubleBits final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDoubleBits(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
- DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
-};
-
-
-class LConstructDouble final : public LTemplateInstruction<1, 2, 0> {
- public:
- LConstructDouble(LOperand* hi, LOperand* lo) {
- inputs_[0] = hi;
- inputs_[1] = lo;
- }
-
- LOperand* hi() { return inputs_[0]; }
- LOperand* lo() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
-};
-
-
class LAllocate final : public LTemplateInstruction<1, 2, 1> {
public:
LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
@@ -2401,6 +2364,19 @@ class LAllocate final : public LTemplateInstruction<1, 2, 1> {
DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
+class LFastAllocate final : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LFastAllocate(LOperand* size, LOperand* temp) {
+ inputs_[0] = size;
+ temps_[0] = temp;
+ }
+
+ LOperand* size() const { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
+ DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
class LTypeof final : public LTemplateInstruction<1, 2, 0> {
public:
@@ -2555,6 +2531,8 @@ class LChunkBuilder final : public LChunkBuilderBase {
LInstruction* DoMathFround(HUnaryMathOperation* instr);
LInstruction* DoMathAbs(HUnaryMathOperation* instr);
LInstruction* DoMathLog(HUnaryMathOperation* instr);
+ LInstruction* DoMathCos(HUnaryMathOperation* instr);
+ LInstruction* DoMathSin(HUnaryMathOperation* instr);
LInstruction* DoMathExp(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
diff --git a/deps/v8/src/crankshaft/lithium-allocator.cc b/deps/v8/src/crankshaft/lithium-allocator.cc
index 6155dc0f23..d17cd27c10 100644
--- a/deps/v8/src/crankshaft/lithium-allocator.cc
+++ b/deps/v8/src/crankshaft/lithium-allocator.cc
@@ -13,6 +13,8 @@
namespace v8 {
namespace internal {
+const auto GetRegConfig = RegisterConfiguration::Crankshaft;
+
static inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) {
return a.Value() < b.Value() ? a : b;
}
@@ -940,7 +942,7 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
if (instr->ClobbersRegisters()) {
for (int i = 0; i < Register::kNumRegisters; ++i) {
- if (Register::from_code(i).IsAllocatable()) {
+ if (GetRegConfig()->IsAllocatableGeneralCode(i)) {
if (output == NULL || !output->IsRegister() ||
output->index() != i) {
LiveRange* range = FixedLiveRangeFor(i);
@@ -953,7 +955,7 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
if (instr->ClobbersDoubleRegisters(isolate())) {
for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
- if (DoubleRegister::from_code(i).IsAllocatable()) {
+ if (GetRegConfig()->IsAllocatableDoubleCode(i)) {
if (output == NULL || !output->IsDoubleRegister() ||
output->index() != i) {
LiveRange* range = FixedDoubleLiveRangeFor(i);
@@ -1460,12 +1462,8 @@ void LAllocator::PopulatePointerMaps() {
void LAllocator::AllocateGeneralRegisters() {
LAllocatorPhase phase("L_Allocate general registers", this);
- num_registers_ =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
- ->num_allocatable_general_registers();
- allocatable_register_codes_ =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
- ->allocatable_general_codes();
+ num_registers_ = GetRegConfig()->num_allocatable_general_registers();
+ allocatable_register_codes_ = GetRegConfig()->allocatable_general_codes();
mode_ = GENERAL_REGISTERS;
AllocateRegisters();
}
@@ -1473,12 +1471,8 @@ void LAllocator::AllocateGeneralRegisters() {
void LAllocator::AllocateDoubleRegisters() {
LAllocatorPhase phase("L_Allocate double registers", this);
- num_registers_ =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
- ->num_allocatable_double_registers();
- allocatable_register_codes_ =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
- ->allocatable_double_codes();
+ num_registers_ = GetRegConfig()->num_allocatable_double_registers();
+ allocatable_register_codes_ = GetRegConfig()->allocatable_double_codes();
mode_ = DOUBLE_REGISTERS;
AllocateRegisters();
}
@@ -1596,9 +1590,9 @@ void LAllocator::AllocateRegisters() {
const char* LAllocator::RegisterName(int allocation_index) {
if (mode_ == GENERAL_REGISTERS) {
- return Register::from_code(allocation_index).ToString();
+ return GetRegConfig()->GetGeneralRegisterName(allocation_index);
} else {
- return DoubleRegister::from_code(allocation_index).ToString();
+ return GetRegConfig()->GetDoubleRegisterName(allocation_index);
}
}
diff --git a/deps/v8/src/crankshaft/lithium-allocator.h b/deps/v8/src/crankshaft/lithium-allocator.h
index b648bd80c6..ce0e56560b 100644
--- a/deps/v8/src/crankshaft/lithium-allocator.h
+++ b/deps/v8/src/crankshaft/lithium-allocator.h
@@ -6,6 +6,7 @@
#define V8_CRANKSHAFT_LITHIUM_ALLOCATOR_H_
#include "src/allocation.h"
+#include "src/base/compiler-specific.h"
#include "src/crankshaft/compilation-phase.h"
#include "src/crankshaft/lithium.h"
#include "src/zone.h"
@@ -327,7 +328,7 @@ class LAllocator BASE_EMBEDDED {
public:
LAllocator(int first_virtual_register, HGraph* graph);
- static void TraceAlloc(const char* msg, ...);
+ static PRINTF_FORMAT(1, 2) void TraceAlloc(const char* msg, ...);
// Checks whether the value of a given virtual register is tagged.
bool HasTaggedValue(int virtual_register) const;
diff --git a/deps/v8/src/crankshaft/lithium-codegen.cc b/deps/v8/src/crankshaft/lithium-codegen.cc
index 53fedcf1df..5041de6451 100644
--- a/deps/v8/src/crankshaft/lithium-codegen.cc
+++ b/deps/v8/src/crankshaft/lithium-codegen.cc
@@ -37,6 +37,8 @@
#error Unsupported target architecture.
#endif
+#include "src/globals.h"
+
namespace v8 {
namespace internal {
@@ -45,7 +47,6 @@ HGraph* LCodeGenBase::graph() const {
return chunk()->graph();
}
-
LCodeGenBase::LCodeGenBase(LChunk* chunk, MacroAssembler* assembler,
CompilationInfo* info)
: chunk_(static_cast<LPlatformChunk*>(chunk)),
@@ -61,8 +62,9 @@ LCodeGenBase::LCodeGenBase(LChunk* chunk, MacroAssembler* assembler,
translations_(info->zone()),
inlined_function_count_(0),
last_lazy_deopt_pc_(0),
- osr_pc_offset_(-1) {}
-
+ osr_pc_offset_(-1),
+ source_position_table_builder_(info->zone(),
+ info->SourcePositionRecordingMode()) {}
bool LCodeGenBase::GenerateBody() {
DCHECK(is_generating());
@@ -137,6 +139,10 @@ void LCodeGenBase::CheckEnvironmentUsage() {
#endif
}
+void LCodeGenBase::RecordAndWritePosition(int pos) {
+ if (pos == kNoSourcePosition) return;
+ source_position_table_builder_.AddPosition(masm_->pc_offset(), pos, false);
+}
void LCodeGenBase::Comment(const char* format, ...) {
if (!FLAG_code_comments) return;
@@ -158,8 +164,9 @@ void LCodeGenBase::Comment(const char* format, ...) {
void LCodeGenBase::DeoptComment(const Deoptimizer::DeoptInfo& deopt_info) {
SourcePosition position = deopt_info.position;
+ int deopt_id = deopt_info.deopt_id;
int raw_position = position.IsUnknown() ? 0 : position.raw();
- masm()->RecordDeoptReason(deopt_info.deopt_reason, raw_position);
+ masm()->RecordDeoptReason(deopt_info.deopt_reason, raw_position, deopt_id);
}
@@ -364,14 +371,12 @@ void LCodeGenBase::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
}
}
-
Deoptimizer::DeoptInfo LCodeGenBase::MakeDeoptInfo(
- LInstruction* instr, Deoptimizer::DeoptReason deopt_reason) {
+ LInstruction* instr, DeoptimizeReason deopt_reason, int deopt_id) {
Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position(),
- instr->Mnemonic(), deopt_reason);
- HEnterInlined* enter_inlined = instr->environment()->entry();
- deopt_info.inlining_id = enter_inlined ? enter_inlined->inlining_id() : 0;
+ deopt_reason, deopt_id);
return deopt_info;
}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/lithium-codegen.h b/deps/v8/src/crankshaft/lithium-codegen.h
index b1f7dac2e5..fbf96924ee 100644
--- a/deps/v8/src/crankshaft/lithium-codegen.h
+++ b/deps/v8/src/crankshaft/lithium-codegen.h
@@ -8,10 +8,13 @@
#include "src/bailout-reason.h"
#include "src/compiler.h"
#include "src/deoptimizer.h"
+#include "src/source-position-table.h"
namespace v8 {
namespace internal {
+class HGraph;
+class LChunk;
class LEnvironment;
class LInstruction;
class LPlatformChunk;
@@ -32,23 +35,25 @@ class LCodeGenBase BASE_EMBEDDED {
Zone* zone() const { return zone_; }
LPlatformChunk* chunk() const { return chunk_; }
HGraph* graph() const;
+ SourcePositionTableBuilder* source_position_table_builder() {
+ return &source_position_table_builder_;
+ }
- void FPRINTF_CHECKING Comment(const char* format, ...);
+ void PRINTF_FORMAT(2, 3) Comment(const char* format, ...);
void DeoptComment(const Deoptimizer::DeoptInfo& deopt_info);
- static Deoptimizer::DeoptInfo MakeDeoptInfo(
- LInstruction* instr, Deoptimizer::DeoptReason deopt_reason);
+ static Deoptimizer::DeoptInfo MakeDeoptInfo(LInstruction* instr,
+ DeoptimizeReason deopt_reason,
+ int deopt_id);
bool GenerateBody();
virtual void GenerateBodyInstructionPre(LInstruction* instr) {}
virtual void GenerateBodyInstructionPost(LInstruction* instr) {}
virtual void EnsureSpaceForLazyDeopt(int space_needed) = 0;
- virtual void RecordAndWritePosition(int position) = 0;
+ void RecordAndWritePosition(int position);
int GetNextEmittedBlock() const;
- void RegisterWeakObjectsInOptimizedCode(Handle<Code> code);
-
void WriteTranslationFrame(LEnvironment* environment,
Translation* translation);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -83,6 +88,7 @@ class LCodeGenBase BASE_EMBEDDED {
int inlined_function_count_;
int last_lazy_deopt_pc_;
int osr_pc_offset_;
+ SourcePositionTableBuilder source_position_table_builder_;
bool is_unused() const { return status_ == UNUSED; }
bool is_generating() const { return status_ == GENERATING; }
diff --git a/deps/v8/src/crankshaft/lithium.cc b/deps/v8/src/crankshaft/lithium.cc
index d34b04f5da..8cf3a3f0e6 100644
--- a/deps/v8/src/crankshaft/lithium.cc
+++ b/deps/v8/src/crankshaft/lithium.cc
@@ -40,6 +40,7 @@
namespace v8 {
namespace internal {
+const auto GetRegConfig = RegisterConfiguration::Crankshaft;
void LOperand::PrintTo(StringStream* stream) {
LUnallocated* unalloc = NULL;
@@ -63,7 +64,7 @@ void LOperand::PrintTo(StringStream* stream) {
stream->Add("(=invalid_reg#%d)", reg_index);
} else {
const char* register_name =
- Register::from_code(reg_index).ToString();
+ GetRegConfig()->GetGeneralRegisterName(reg_index);
stream->Add("(=%s)", register_name);
}
break;
@@ -74,7 +75,7 @@ void LOperand::PrintTo(StringStream* stream) {
stream->Add("(=invalid_double_reg#%d)", reg_index);
} else {
const char* double_register_name =
- DoubleRegister::from_code(reg_index).ToString();
+ GetRegConfig()->GetDoubleRegisterName(reg_index);
stream->Add("(=%s)", double_register_name);
}
break;
@@ -110,7 +111,8 @@ void LOperand::PrintTo(StringStream* stream) {
if (reg_index < 0 || reg_index >= Register::kNumRegisters) {
stream->Add("(=invalid_reg#%d|R)", reg_index);
} else {
- stream->Add("[%s|R]", Register::from_code(reg_index).ToString());
+ stream->Add("[%s|R]",
+ GetRegConfig()->GetGeneralRegisterName(reg_index));
}
break;
}
@@ -119,7 +121,7 @@ void LOperand::PrintTo(StringStream* stream) {
if (reg_index < 0 || reg_index >= DoubleRegister::kMaxNumRegisters) {
stream->Add("(=invalid_double_reg#%d|R)", reg_index);
} else {
- stream->Add("[%s|R]", DoubleRegister::from_code(reg_index).ToString());
+ stream->Add("[%s|R]", GetRegConfig()->GetDoubleRegisterName(reg_index));
}
break;
}
@@ -446,9 +448,6 @@ LChunk* LChunk::NewChunk(HGraph* graph) {
Handle<Code> LChunk::Codegen() {
MacroAssembler assembler(info()->isolate(), NULL, 0,
CodeObjectRequired::kYes);
- LOG_CODE_EVENT(info()->isolate(),
- CodeStartLinePosInfoRecordEvent(
- assembler.positions_recorder()));
// Code serializer only takes unoptimized code.
DCHECK(!info()->will_serialize());
LCodeGen generator(this, &assembler, info());
@@ -458,19 +457,18 @@ Handle<Code> LChunk::Codegen() {
if (generator.GenerateCode()) {
generator.CheckEnvironmentUsage();
CodeGenerator::MakeCodePrologue(info(), "optimized");
- Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&assembler, info());
+ Handle<Code> code = CodeGenerator::MakeCodeEpilogue(
+ &assembler, nullptr, info(), assembler.CodeObject());
generator.FinishCode(code);
CommitDependencies(code);
+ Handle<ByteArray> source_positions =
+ generator.source_position_table_builder()->ToSourcePositionTable(
+ info()->isolate(), Handle<AbstractCode>::cast(code));
+ code->set_source_position_table(*source_positions);
code->set_is_crankshafted(true);
- void* jit_handler_data =
- assembler.positions_recorder()->DetachJITHandlerData();
- LOG_CODE_EVENT(info()->isolate(),
- CodeEndLinePosInfoRecordEvent(AbstractCode::cast(*code),
- jit_handler_data));
CodeGenerator::PrintCode(code, info());
- DCHECK(!(info()->isolate()->serializer_enabled() &&
- info()->GetMustNotHaveEagerFrame() &&
+ DCHECK(!(info()->GetMustNotHaveEagerFrame() &&
generator.NeedsEagerFrame()));
return code;
}
diff --git a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
index f1717ca474..fa345e5173 100644
--- a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
@@ -34,8 +34,6 @@
#include "src/crankshaft/mips/lithium-gap-resolver-mips.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
-#include "src/profiler/cpu-profiler.h"
-
namespace v8 {
namespace internal {
@@ -184,14 +182,12 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
__ Push(info()->scope()->GetScopeInfo(info()->isolate()));
__ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
- } else if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
+ } else {
+ FastNewFunctionContextStub stub(isolate());
+ __ li(FastNewFunctionContextDescriptor::SlotsRegister(), Operand(slots));
__ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
+ // Result of FastNewFunctionContextStub is always in new space.
need_write_barrier = false;
- } else {
- __ push(a1);
- __ CallRuntime(Runtime::kNewFunctionContext);
}
RecordSafepoint(deopt_mode);
@@ -200,10 +196,11 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
__ mov(cp, v0);
__ sw(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
- int num_parameters = scope()->num_parameters();
- int first_parameter = scope()->has_this_declaration() ? -1 : 0;
+ int num_parameters = info()->scope()->num_parameters();
+ int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+ Variable* var = (i == -1) ? info()->scope()->receiver()
+ : info()->scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -330,8 +327,6 @@ bool LCodeGen::GenerateJumpTable() {
} else {
__ Call(&call_deopt_entry);
}
- info()->LogDeoptCallPosition(masm()->pc_offset(),
- table_entry->deopt_info.inlining_id);
}
if (needs_frame.is_linked()) {
@@ -745,9 +740,8 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
}
-
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
+ DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type,
Register src1, const Operand& src2) {
LEnvironment* environment = instr->environment();
@@ -789,7 +783,7 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
__ bind(&skip);
}
- Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+ Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
@@ -798,13 +792,12 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
!info()->saves_caller_doubles()) {
DeoptComment(deopt_info);
__ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
- info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
} else {
Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
!frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+ if (FLAG_trace_deopt || isolate()->is_profiling() ||
jump_table_.is_empty() ||
!table_entry.IsEquivalentTo(jump_table_.last())) {
jump_table_.Add(table_entry, zone());
@@ -813,10 +806,9 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
}
}
-
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
- Register src1, const Operand& src2) {
+ DeoptimizeReason deopt_reason, Register src1,
+ const Operand& src2) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
@@ -877,13 +869,6 @@ void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
}
-void LCodeGen::RecordAndWritePosition(int position) {
- if (position == RelocInfo::kNoPosition) return;
- masm()->positions_recorder()->RecordPosition(position);
- masm()->positions_recorder()->WriteRecordedPositions();
-}
-
-
static const char* LabelType(LLabel* label) {
if (label->is_loop_header()) return " (loop header)";
if (label->is_osr_entry()) return " (OSR entry)";
@@ -955,7 +940,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ subu(dividend, zero_reg, dividend);
__ And(dividend, dividend, Operand(mask));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
Operand(zero_reg));
}
__ Branch(USE_DELAY_SLOT, &done);
@@ -988,7 +973,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label remainder_not_zero;
__ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, dividend,
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, dividend,
Operand(zero_reg));
__ bind(&remainder_not_zero);
}
@@ -1008,7 +993,7 @@ void LCodeGen::DoModI(LModI* instr) {
// Check for x % 0, we have to deopt in this case because we can't return a
// NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, right_reg,
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, right_reg,
Operand(zero_reg));
}
@@ -1018,7 +1003,8 @@ void LCodeGen::DoModI(LModI* instr) {
Label no_overflow_possible;
__ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, right_reg, Operand(-1));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, right_reg,
+ Operand(-1));
} else {
__ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
__ Branch(USE_DELAY_SLOT, &done);
@@ -1030,7 +1016,7 @@ void LCodeGen::DoModI(LModI* instr) {
// If we care about -0, test if the dividend is <0 and the result is 0.
__ Branch(&done, ge, left_reg, Operand(zero_reg));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result_reg,
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result_reg,
Operand(zero_reg));
}
__ bind(&done);
@@ -1047,19 +1033,21 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
Operand(zero_reg));
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, dividend, Operand(kMinInt));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, dividend,
+ Operand(kMinInt));
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ And(at, dividend, Operand(mask));
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, at,
+ Operand(zero_reg));
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
@@ -1096,7 +1084,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
Operand(zero_reg));
}
@@ -1106,7 +1094,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
__ Mul(scratch0(), result, Operand(divisor));
__ Subu(scratch0(), scratch0(), dividend);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, scratch0(),
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, scratch0(),
Operand(zero_reg));
}
}
@@ -1126,7 +1114,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, divisor,
Operand(zero_reg));
}
@@ -1134,7 +1122,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, divisor,
Operand(zero_reg));
__ bind(&left_not_zero);
}
@@ -1144,12 +1132,12 @@ void LCodeGen::DoDivI(LDivI* instr) {
!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
Label left_not_min_int;
__ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, divisor, Operand(-1));
__ bind(&left_not_min_int);
}
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, remainder,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, remainder,
Operand(zero_reg));
}
}
@@ -1196,14 +1184,15 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
__ Subu(result, zero_reg, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result,
+ Operand(zero_reg));
}
// Dividing by -1 is basically negation, unless we overflow.
__ Xor(scratch, scratch, result);
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch,
+ DeoptimizeIf(ge, instr, DeoptimizeReason::kOverflow, scratch,
Operand(zero_reg));
}
return;
@@ -1239,7 +1228,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
// Check for (0 / -x) that will produce negative zero.
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
Operand(zero_reg));
}
@@ -1284,7 +1273,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, divisor,
Operand(zero_reg));
}
@@ -1292,7 +1281,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, divisor,
Operand(zero_reg));
__ bind(&left_not_zero);
}
@@ -1302,7 +1291,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
Label left_not_min_int;
__ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, divisor, Operand(-1));
__ bind(&left_not_min_int);
}
@@ -1333,7 +1322,8 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (bailout_on_minus_zero && (constant < 0)) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, left,
+ Operand(zero_reg));
}
switch (constant) {
@@ -1351,7 +1341,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (bailout_on_minus_zero) {
// If left is strictly negative and the constant is null, the
// result is -0. Deoptimize if required, otherwise return 0.
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left,
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, left,
Operand(zero_reg));
}
__ mov(result, zero_reg);
@@ -1403,7 +1393,8 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ Mul(scratch, result, left, right);
}
__ sra(at, result, 31);
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at));
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, scratch,
+ Operand(at));
} else {
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left);
@@ -1418,7 +1409,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ Xor(at, left, right);
__ Branch(&done, ge, at, Operand(zero_reg));
// Bail out if the result is minus zero.
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result,
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result,
Operand(zero_reg));
__ bind(&done);
}
@@ -1483,7 +1474,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
case Token::SHR:
__ srlv(result, left, ToRegister(right_op));
if (instr->can_deopt()) {
- DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, result,
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue, result,
Operand(zero_reg));
}
break;
@@ -1519,7 +1510,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
if (instr->can_deopt()) {
__ And(at, left, Operand(0x80000000));
- DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue, at,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNegativeValue, at,
Operand(zero_reg));
}
__ Move(result, left);
@@ -1535,7 +1526,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
__ SmiTagCheckOverflow(result, left, scratch);
}
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch,
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, scratch,
Operand(zero_reg));
} else {
__ sll(result, left, shift_count);
@@ -1728,13 +1719,13 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
HMathMinMax::Operation operation = instr->hydrogen()->operation();
- Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
+ Register scratch = scratch1();
if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
+ Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
Register left_reg = ToRegister(left);
Register right_reg = EmitLoadRegister(right, scratch0());
Register result_reg = ToRegister(instr->result());
Label return_right, done;
- Register scratch = scratch1();
__ Slt(scratch, left_reg, Operand(right_reg));
if (condition == ge) {
__ Movz(result_reg, left_reg, scratch);
@@ -1749,43 +1740,19 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
FPURegister left_reg = ToDoubleRegister(left);
FPURegister right_reg = ToDoubleRegister(right);
FPURegister result_reg = ToDoubleRegister(instr->result());
- Label check_nan_left, check_zero, return_left, return_right, done;
- __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg);
- __ BranchF(&return_left, NULL, condition, left_reg, right_reg);
- __ Branch(&return_right);
-
- __ bind(&check_zero);
- // left == right != 0.
- __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
- // At this point, both left and right are either 0 or -0.
- if (operation == HMathMinMax::kMathMin) {
- // The algorithm is: -((-L) + (-R)), which in case of L and R being
- // different registers is most efficiently expressed as -((-L) - R).
- __ neg_d(left_reg, left_reg);
- if (left_reg.is(right_reg)) {
- __ add_d(result_reg, left_reg, right_reg);
- } else {
- __ sub_d(result_reg, left_reg, right_reg);
- }
- __ neg_d(result_reg, result_reg);
+ Label nan, done;
+ if (operation == HMathMinMax::kMathMax) {
+ __ MaxNaNCheck_d(result_reg, left_reg, right_reg, &nan);
} else {
- __ add_d(result_reg, left_reg, right_reg);
+ DCHECK(operation == HMathMinMax::kMathMin);
+ __ MinNaNCheck_d(result_reg, left_reg, right_reg, &nan);
}
__ Branch(&done);
- __ bind(&check_nan_left);
- // left == NaN.
- __ BranchF(NULL, &return_left, eq, left_reg, left_reg);
- __ bind(&return_right);
- if (!right_reg.is(result_reg)) {
- __ mov_d(result_reg, right_reg);
- }
- __ Branch(&done);
+ __ bind(&nan);
+ __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+ __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
- __ bind(&return_left);
- if (!left_reg.is(result_reg)) {
- __ mov_d(result_reg, left_reg);
- }
__ bind(&done);
}
}
@@ -1992,7 +1959,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ SmiTst(reg, at);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
}
const Register map = scratch0();
@@ -2056,7 +2023,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject, zero_reg,
+ DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject, zero_reg,
Operand(zero_reg));
}
}
@@ -2409,16 +2376,6 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
}
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
- DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
- DCHECK(ToRegister(instr->result()).is(v0));
- InstanceOfStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoHasInPrototypeChainAndBranch(
LHasInPrototypeChainAndBranch* instr) {
Register const object = ToRegister(instr->object());
@@ -2445,18 +2402,18 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
FieldMemOperand(object_map, Map::kBitFieldOffset));
__ And(object_instance_type, object_instance_type,
Operand(1 << Map::kIsAccessCheckNeeded));
- DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, object_instance_type,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck, object_instance_type,
Operand(zero_reg));
// Deoptimize for proxies.
__ lbu(object_instance_type,
FieldMemOperand(object_map, Map::kInstanceTypeOffset));
- DeoptimizeIf(eq, instr, Deoptimizer::kProxy, object_instance_type,
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy, object_instance_type,
Operand(JS_PROXY_TYPE));
__ lw(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
- EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
__ LoadRoot(at, Heap::kNullValueRootIndex);
EmitFalseBranch(instr, eq, object_prototype, Operand(at));
+ EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
__ Branch(USE_DELAY_SLOT, &loop);
__ lw(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
}
@@ -2552,15 +2509,12 @@ void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->global_object())
- .is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).is(v0));
- __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
- isolate(), instr->typeof_mode(), PREMONOMORPHIC)
- .code();
+ Handle<Code> ic =
+ CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2574,7 +2528,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result, Operand(at));
} else {
Label is_not_hole;
__ Branch(&is_not_hole, ne, result, Operand(at));
@@ -2598,7 +2552,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch, Operand(at));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, scratch, Operand(at));
} else {
__ Branch(&skip_assignment, ne, scratch, Operand(at));
}
@@ -2659,10 +2613,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
// Name is always in a2.
__ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_INSIDE_TYPEOF,
- instr->hydrogen()->initialization_state())
- .code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2678,7 +2629,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result, Operand(at));
// If the function does not have an initial map, we're done.
Label done;
@@ -2798,7 +2749,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case UINT32_ELEMENTS:
__ lw(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- DeoptimizeIf(Ugreater_equal, instr, Deoptimizer::kNegativeValue,
+ DeoptimizeIf(Ugreater_equal, instr, DeoptimizeReason::kNegativeValue,
result, Operand(0x80000000));
}
break;
@@ -2853,7 +2804,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset));
- DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch,
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, scratch,
Operand(kHoleNanUpper32));
}
}
@@ -2888,11 +2839,12 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ SmiTst(result, scratch);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, scratch,
Operand(zero_reg));
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(scratch));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result,
+ Operand(scratch));
}
} else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
@@ -2905,7 +2857,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
// it needs to bail out.
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
__ lw(result, FieldMemOperand(result, Cell::kValueOffset));
- DeoptimizeIf(ne, instr, Deoptimizer::kHole, result,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kHole, result,
Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
}
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
@@ -2967,13 +2919,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
- if (instr->hydrogen()->HasVectorAndSlot()) {
- EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
- }
+ EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
- Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
- isolate(), instr->hydrogen()->initialization_state())
- .code();
+ Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3059,10 +3007,10 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// Deoptimize if the receiver is not a JS object.
__ SmiTst(receiver, scratch);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, scratch, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, scratch, Operand(zero_reg));
__ GetObjectType(receiver, scratch, scratch);
- DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject, scratch,
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject, scratch,
Operand(FIRST_JS_RECEIVER_TYPE));
__ Branch(&result_in_receiver);
@@ -3096,7 +3044,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// Copy the arguments to this function possibly from the
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
- DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments, length,
+ DeoptimizeIf(hi, instr, DeoptimizeReason::kTooManyArguments, length,
Operand(kArgumentsLimit));
// Push the receiver and use the register to keep the original
@@ -3182,6 +3130,8 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ li(scratch0(), instr->hydrogen()->pairs());
__ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
__ Push(scratch0(), scratch1());
+ __ li(scratch0(), instr->hydrogen()->feedback_vector());
+ __ Push(scratch0());
CallRuntime(Runtime::kDeclareGlobals, instr);
}
@@ -3247,7 +3197,8 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Deoptimize if not a heap number.
__ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch, Operand(at));
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch,
+ Operand(at));
Label done;
Register exponent = scratch0();
@@ -3314,7 +3265,8 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
__ mov(result, input);
__ subu(result, zero_reg, input);
// Overflow if result is still negative, i.e. 0x80000000.
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, result,
+ Operand(zero_reg));
__ bind(&done);
}
@@ -3369,7 +3321,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
except_flag);
// Deopt if the operation did not succeed.
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -3378,7 +3330,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ Branch(&done, ne, result, Operand(zero_reg));
__ Mfhc1(scratch1, input);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
Operand(zero_reg));
__ bind(&done);
}
@@ -3412,7 +3364,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// The following conversion will not work with numbers
// outside of ]-2^32, 2^32[.
- DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch,
+ DeoptimizeIf(ge, instr, DeoptimizeReason::kOverflow, scratch,
Operand(HeapNumber::kExponentBias + 32));
// Save the original sign for later comparison.
@@ -3427,7 +3379,8 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ Xor(result, result, Operand(scratch));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// ARM uses 'mi' here, which is 'lt'
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, result,
+ Operand(zero_reg));
} else {
Label skip2;
// ARM uses 'mi' here, which is 'lt'
@@ -3446,7 +3399,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
double_scratch1,
except_flag);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -3455,7 +3408,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ bind(&check_sign_on_zero);
__ Mfhc1(scratch, input);
__ And(scratch, scratch, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch,
Operand(zero_reg));
}
__ bind(&done);
@@ -3489,10 +3442,8 @@ void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
// Math.sqrt(-Infinity) == NaN
Label done;
__ Move(temp, static_cast<double>(-V8_INFINITY));
- __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
- // Set up Infinity in the delay slot.
- // result is overwritten if the branch is not taken.
- __ neg_d(result, temp);
+ __ Neg_d(result, temp);
+ __ BranchF(&done, NULL, eq, temp, input);
// Add +0 to convert -0 to +0.
__ add_d(result, input, kDoubleRegZero);
@@ -3522,7 +3473,7 @@ void LCodeGen::DoPower(LPower* instr) {
DCHECK(!t3.is(tagged_exponent));
__ lw(t3, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, t3, Operand(at));
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, t3, Operand(at));
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
@@ -3536,26 +3487,32 @@ void LCodeGen::DoPower(LPower* instr) {
}
}
+void LCodeGen::DoMathCos(LMathCos* instr) {
+ __ PrepareCallCFunction(0, 1, scratch0());
+ __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+ __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1);
+ __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
-void LCodeGen::DoMathExp(LMathExp* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
- DoubleRegister double_scratch2 = double_scratch0();
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
+void LCodeGen::DoMathSin(LMathSin* instr) {
+ __ PrepareCallCFunction(0, 1, scratch0());
+ __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+ __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1);
+ __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
- MathExpGenerator::EmitMathExp(
- masm(), input, result, double_scratch1, double_scratch2,
- temp1, temp2, scratch0());
+void LCodeGen::DoMathExp(LMathExp* instr) {
+ __ PrepareCallCFunction(0, 1, scratch0());
+ __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+ __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1);
+ __ MovFromFloatResult(ToDoubleRegister(instr->result()));
}
void LCodeGen::DoMathLog(LMathLog* instr) {
__ PrepareCallCFunction(0, 1, scratch0());
__ MovToFloatParameter(ToDoubleRegister(instr->value()));
- __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
- 0, 1);
+ __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1);
__ MovFromFloatResult(ToDoubleRegister(instr->result()));
}
@@ -3578,7 +3535,9 @@ void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
#endif
if (FLAG_code_comments) {
if (actual.is_reg()) {
- Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+ Comment(";;; PrepareForTailCall, actual: %s {",
+ RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
+ actual.reg().code()));
} else {
Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
}
@@ -3685,14 +3644,8 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->result()).is(v0));
__ li(a0, Operand(instr->arity()));
- if (instr->arity() == 1) {
- // We only need the allocation site for the case we have a length argument.
- // The case may bail out to the runtime, which will determine the correct
- // elements kind with the site.
- __ li(a2, instr->hydrogen()->site());
- } else {
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- }
+ __ li(a2, instr->hydrogen()->site());
+
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
@@ -3724,7 +3677,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ bind(&done);
} else {
- ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+ ArrayNArgumentsConstructorStub stub(isolate());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
@@ -3846,14 +3799,12 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- if (instr->hydrogen()->HasVectorAndSlot()) {
- EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
- }
+ EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
__ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
- Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
- isolate(), instr->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ Handle<Code> ic =
+ CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3876,7 +3827,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds, reg, operand);
+ DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds, reg, operand);
}
}
@@ -4074,13 +4025,11 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- if (instr->hydrogen()->HasVectorAndSlot()) {
- EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
- }
+ EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
- isolate(), instr->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ isolate(), instr->language_mode())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4159,14 +4108,21 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
LOperand* key = instr->key();
if (key->IsConstantOperand()) {
- __ li(a3, Operand(ToSmi(LConstantOperand::cast(key))));
+ LConstantOperand* constant_key = LConstantOperand::cast(key);
+ int32_t int_key = ToInteger32(constant_key);
+ if (Smi::IsValid(int_key)) {
+ __ li(a3, Operand(Smi::FromInt(int_key)));
+ } else {
+ // We should never get here at runtime because there is a smi check on
+ // the key before this point.
+ __ stop("expected smi");
+ }
} else {
__ mov(a3, ToRegister(key));
__ SmiTag(a3);
}
- GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
- instr->hydrogen()->kind());
+ GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
__ mov(a0, result);
__ CallStub(&stub);
RecordSafepointWithLazyDeopt(
@@ -4176,7 +4132,7 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
// Deopt on smi, which means the elements array changed to dictionary mode.
__ SmiTst(result, at);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
}
@@ -4208,8 +4164,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
PushSafepointRegistersScope scope(this);
__ li(a1, Operand(to_map));
- bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
- TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+ TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
__ CallStub(&stub);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kLazyDeopt);
@@ -4456,7 +4411,7 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
if (FLAG_inline_new) {
__ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
+ __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
__ Branch(&done);
}
@@ -4470,26 +4425,20 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
-
- // NumberTagI and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Reset the context register.
+ if (!dst.is(cp)) {
+ __ mov(cp, zero_reg);
+ }
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ Subu(v0, v0, kHeapObjectTag);
__ StoreToSafepointRegisterSlot(v0, dst);
}
-
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
- __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
- __ Addu(dst, dst, kHeapObjectTag);
+ __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
}
@@ -4514,16 +4463,13 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
- // We want the untagged address first for performance
- __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
- DONT_TAG_RESULT);
+ __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
} else {
__ Branch(deferred->entry());
}
__ bind(deferred->exit());
- __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
+ __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
// Now that we have finished with the object's real address tag it
- __ Addu(reg, reg, kHeapObjectTag);
}
@@ -4535,16 +4481,13 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
__ mov(reg, zero_reg);
PushSafepointRegistersScope scope(this);
- // NumberTagI and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Reset the context register.
+ if (!reg.is(cp)) {
+ __ mov(cp, zero_reg);
+ }
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ Subu(v0, v0, kHeapObjectTag);
__ StoreToSafepointRegisterSlot(v0, reg);
}
@@ -4556,12 +4499,12 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ And(at, input, Operand(0xc0000000));
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, at, Operand(zero_reg));
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
__ SmiTagCheckOverflow(output, input, at);
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, at, Operand(zero_reg));
} else {
__ SmiTag(output, input);
}
@@ -4577,7 +4520,8 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
// If the input is a HeapObject, value of scratch won't be zero.
__ And(scratch, input, Operand(kHeapObjectTag));
__ SmiUntag(result, input);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, scratch,
+ Operand(zero_reg));
} else {
__ SmiUntag(result, input);
}
@@ -4602,7 +4546,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
if (can_convert_undefined_to_nan) {
__ Branch(&convert, ne, scratch, Operand(at));
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch,
Operand(at));
}
// Load heap number.
@@ -4611,7 +4555,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
__ mfc1(at, result_reg.low());
__ Branch(&done, ne, at, Operand(zero_reg));
__ Mfhc1(scratch, result_reg);
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, scratch,
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, scratch,
Operand(HeapNumber::kSignMask));
}
__ Branch(&done);
@@ -4619,8 +4563,8 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
__ bind(&convert);
// Convert undefined (and hole) to NaN.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
- Operand(at));
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined,
+ input_reg, Operand(at));
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
__ Branch(&done);
@@ -4684,12 +4628,12 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ bind(&check_false);
__ LoadRoot(at, Heap::kFalseValueRootIndex);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefinedBoolean,
scratch2, Operand(at));
__ Branch(USE_DELAY_SLOT, &done);
__ mov(input_reg, zero_reg); // In delay slot.
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch1,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch1,
Operand(at));
// Load the double value.
@@ -4705,7 +4649,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
except_flag,
kCheckForInexactConversion);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -4713,7 +4657,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ Mfhc1(scratch1, double_scratch);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
Operand(zero_reg));
}
}
@@ -4790,7 +4734,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
kCheckForInexactConversion);
// Deopt if the operation did not succeed (except_flag != 0).
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -4798,7 +4742,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
__ Branch(&done, ne, result_reg, Operand(zero_reg));
__ Mfhc1(scratch1, double_input);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
Operand(zero_reg));
__ bind(&done);
}
@@ -4825,7 +4769,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
kCheckForInexactConversion);
// Deopt if the operation did not succeed (except_flag != 0).
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -4833,20 +4777,21 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
__ Branch(&done, ne, result_reg, Operand(zero_reg));
__ Mfhc1(scratch1, double_input);
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
Operand(zero_reg));
__ bind(&done);
}
}
__ SmiTagCheckOverflow(result_reg, result_reg, scratch1);
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch1, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, scratch1,
+ Operand(zero_reg));
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input), at);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, at, Operand(zero_reg));
}
@@ -4854,7 +4799,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input), at);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
}
}
@@ -4867,7 +4812,8 @@ void LCodeGen::DoCheckArrayBufferNotNeutered(
__ lw(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
__ lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
__ And(at, scratch, 1 << JSArrayBuffer::WasNeutered::kShift);
- DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds, at,
+ Operand(zero_reg));
}
@@ -4884,14 +4830,14 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType, scratch,
Operand(first));
} else {
- DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType, scratch,
+ DeoptimizeIf(lo, instr, DeoptimizeReason::kWrongInstanceType, scratch,
Operand(first));
// Omit check for the last type.
if (last != LAST_TYPE) {
- DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType, scratch,
+ DeoptimizeIf(hi, instr, DeoptimizeReason::kWrongInstanceType, scratch,
Operand(last));
}
}
@@ -4903,11 +4849,11 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ And(at, scratch, mask);
- DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
- at, Operand(zero_reg));
+ DeoptimizeIf(tag == 0 ? ne : eq, instr,
+ DeoptimizeReason::kWrongInstanceType, at, Operand(zero_reg));
} else {
__ And(scratch, scratch, Operand(mask));
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType, scratch,
Operand(tag));
}
}
@@ -4923,9 +4869,10 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
Handle<Cell> cell = isolate()->factory()->NewCell(object);
__ li(at, Operand(cell));
__ lw(at, FieldMemOperand(at, Cell::kValueOffset));
- DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(at));
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch, reg, Operand(at));
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(object));
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch, reg,
+ Operand(object));
}
}
@@ -4941,7 +4888,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ StoreToSafepointRegisterSlot(v0, scratch0());
}
__ SmiTst(scratch0(), at);
- DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, at,
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kInstanceMigrationFailed, at,
Operand(zero_reg));
}
@@ -4996,7 +4943,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ Branch(deferred->entry(), ne, map_reg, Operand(map));
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map_reg, Operand(map));
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap, map_reg, Operand(map));
}
__ bind(&success);
@@ -5034,7 +4981,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined, input_reg,
Operand(factory()->undefined_value()));
__ mov(result_reg, zero_reg);
__ jmp(&done);
@@ -5053,25 +5000,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
-void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
- DoubleRegister value_reg = ToDoubleRegister(instr->value());
- Register result_reg = ToRegister(instr->result());
- if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
- __ FmoveHigh(result_reg, value_reg);
- } else {
- __ FmoveLow(result_reg, value_reg);
- }
-}
-
-
-void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
- Register hi_reg = ToRegister(instr->hi());
- Register lo_reg = ToRegister(instr->lo());
- DoubleRegister result_reg = ToDoubleRegister(instr->result());
- __ Move(result_reg, lo_reg, hi_reg);
-}
-
-
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate final : public LDeferredCode {
public:
@@ -5092,7 +5020,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
Register scratch2 = ToRegister(instr->temp2());
// Allocate memory for the object.
- AllocationFlags flags = TAG_OBJECT;
+ AllocationFlags flags = NO_ALLOCATION_FLAGS;
if (instr->hydrogen()->MustAllocateDoubleAligned()) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
@@ -5100,6 +5028,12 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
+
+ if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+ flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
+ }
+ DCHECK(!instr->hydrogen()->IsAllocationFolded());
+
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
CHECK(size <= Page::kMaxRegularHeapObjectSize);
@@ -5168,6 +5102,49 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
CallRuntimeFromDeferred(
Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(v0, result);
+
+ if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+ AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
+ if (instr->hydrogen()->IsOldSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
+ }
+ // If the allocation folding dominator allocate triggered a GC, allocation
+ // happend in the runtime. We have to reset the top pointer to virtually
+ // undo the allocation.
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
+ Register top_address = scratch0();
+ __ Subu(v0, v0, Operand(kHeapObjectTag));
+ __ li(top_address, Operand(allocation_top));
+ __ sw(v0, MemOperand(top_address));
+ __ Addu(v0, v0, Operand(kHeapObjectTag));
+ }
+}
+
+void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
+ DCHECK(instr->hydrogen()->IsAllocationFolded());
+ DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
+ Register result = ToRegister(instr->result());
+ Register scratch1 = ToRegister(instr->temp1());
+ Register scratch2 = ToRegister(instr->temp2());
+
+ AllocationFlags flags = ALLOCATION_FOLDED;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+ if (instr->hydrogen()->IsOldSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE);
+ }
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ __ FastAllocate(size, result, scratch1, scratch2, flags);
+ } else {
+ Register size = ToRegister(instr->size());
+ __ FastAllocate(size, result, scratch1, scratch2, flags);
+ }
}
@@ -5180,8 +5157,8 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
__ li(v0, Operand(isolate()->factory()->number_string()));
__ jmp(&end);
__ bind(&do_call);
- TypeofStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ Callable callable = CodeFactory::Typeof(isolate());
+ CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
__ bind(&end);
}
@@ -5470,7 +5447,8 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ lw(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- DeoptimizeIf(eq, instr, Deoptimizer::kNoCache, result, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache, result,
+ Operand(zero_reg));
__ bind(&done);
}
@@ -5480,7 +5458,8 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
Register map = ToRegister(instr->map());
__ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map, Operand(scratch0()));
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap, map,
+ Operand(scratch0()));
}
diff --git a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h
index 7a316e5957..d51f62c90f 100644
--- a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h
@@ -134,8 +134,6 @@ class LCodeGen: public LCodeGenBase {
#undef DECLARE_DO
private:
- LanguageMode language_mode() const { return info()->language_mode(); }
-
Scope* scope() const { return scope_; }
Register scratch0() { return kLithiumScratchReg; }
@@ -227,14 +225,14 @@ class LCodeGen: public LCodeGenBase {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition condition, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
+ DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type,
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
- void DeoptimizeIf(
- Condition condition, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason = Deoptimizer::kNoReason,
- Register src1 = zero_reg, const Operand& src2 = Operand(zero_reg));
+ void DeoptimizeIf(Condition condition, LInstruction* instr,
+ DeoptimizeReason deopt_reason = DeoptimizeReason::kNoReason,
+ Register src1 = zero_reg,
+ const Operand& src2 = Operand(zero_reg));
void AddToTranslation(LEnvironment* environment,
Translation* translation,
@@ -253,7 +251,7 @@ class LCodeGen: public LCodeGenBase {
void EmitIntegerMathAbs(LMathAbs* instr);
- // Support for recording safepoint and position information.
+ // Support for recording safepoint information.
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
@@ -264,8 +262,6 @@ class LCodeGen: public LCodeGenBase {
int arguments,
Safepoint::DeoptMode mode);
- void RecordAndWritePosition(int position) override;
-
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
diff --git a/deps/v8/src/crankshaft/mips/lithium-mips.cc b/deps/v8/src/crankshaft/mips/lithium-mips.cc
index 71c34df516..a7880eee87 100644
--- a/deps/v8/src/crankshaft/mips/lithium-mips.cc
+++ b/deps/v8/src/crankshaft/mips/lithium-mips.cc
@@ -887,7 +887,7 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
LInstruction* result = new (zone()) LPrologue();
- if (info_->num_heap_slots() > 0) {
+ if (info_->scope()->num_heap_slots() > 0) {
result = MarkAsCall(result, instr);
}
return result;
@@ -939,17 +939,6 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
}
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
- LOperand* left =
- UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
- LOperand* right =
- UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
- LOperand* context = UseFixed(instr->context(), cp);
- LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
HHasInPrototypeChainAndBranch* instr) {
LOperand* object = UseRegister(instr->object());
@@ -1085,6 +1074,10 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
return DoMathAbs(instr);
case kMathLog:
return DoMathLog(instr);
+ case kMathCos:
+ return DoMathCos(instr);
+ case kMathSin:
+ return DoMathSin(instr);
case kMathExp:
return DoMathExp(instr);
case kMathSqrt:
@@ -1114,16 +1107,25 @@ LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
return DefineAsRegister(result);
}
+LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
+ LOperand* input = UseFixedDouble(instr->value(), f4);
+ return MarkAsCall(DefineFixedDouble(new (zone()) LMathCos(input), f4), instr);
+}
+
+LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
+ LOperand* input = UseFixedDouble(instr->value(), f4);
+ return MarkAsCall(DefineFixedDouble(new (zone()) LMathSin(input), f4), instr);
+}
LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
DCHECK(instr->representation().IsDouble());
DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LOperand* double_temp = TempDoubleRegister();
- LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
- return DefineAsRegister(result);
+ LOperand* input = UseFixedDouble(instr->value(), f4);
+ return MarkAsCall(DefineFixedDouble(new (zone()) LMathExp(input), f4), instr);
}
@@ -1925,20 +1927,6 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
}
-LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
- HValue* value = instr->value();
- DCHECK(value->representation().IsDouble());
- return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
-}
-
-
-LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
- LOperand* lo = UseRegister(instr->lo());
- LOperand* hi = UseRegister(instr->hi());
- return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
-}
-
-
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LOperand* context = info()->IsStub()
? UseFixed(instr->context(), cp)
@@ -1970,14 +1958,9 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* global_object =
- UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- }
- LLoadGlobalGeneric* result =
- new(zone()) LLoadGlobalGeneric(context, global_object, vector);
+ LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+
+ LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
return MarkAsCall(DefineFixed(result, v0), instr);
}
@@ -2021,10 +2004,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- }
+ LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
LInstruction* result =
DefineFixed(new(zone()) LLoadNamedGeneric(context, object, vector), v0);
@@ -2096,10 +2076,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- }
+ LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
LInstruction* result =
DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key, vector),
@@ -2161,12 +2138,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
DCHECK(instr->key()->representation().IsTagged());
DCHECK(instr->value()->representation().IsTagged());
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
- vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
- }
+ LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
+ LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
LStoreKeyedGeneric* result =
new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
@@ -2255,12 +2228,8 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* obj =
UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
- vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
- }
+ LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
+ LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
LStoreNamedGeneric* result =
new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
@@ -2298,13 +2267,18 @@ LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseAny(instr->context());
LOperand* size = UseRegisterOrConstant(instr->size());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
- LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2);
- return AssignPointerMap(DefineAsRegister(result));
+ if (instr->IsAllocationFolded()) {
+ LFastAllocate* result = new (zone()) LFastAllocate(size, temp1, temp2);
+ return DefineAsRegister(result);
+ } else {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
+ LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
+ }
}
diff --git a/deps/v8/src/crankshaft/mips/lithium-mips.h b/deps/v8/src/crankshaft/mips/lithium-mips.h
index 7d41093be1..9711c9a6db 100644
--- a/deps/v8/src/crankshaft/mips/lithium-mips.h
+++ b/deps/v8/src/crankshaft/mips/lithium-mips.h
@@ -53,7 +53,6 @@ class LCodeGen;
V(ConstantI) \
V(ConstantS) \
V(ConstantT) \
- V(ConstructDouble) \
V(Context) \
V(DebugBreak) \
V(DeclareGlobals) \
@@ -62,11 +61,11 @@ class LCodeGen;
V(DivByPowerOf2I) \
V(DivI) \
V(DoubleToI) \
- V(DoubleBits) \
V(DoubleToSmi) \
V(Drop) \
V(Dummy) \
V(DummyUse) \
+ V(FastAllocate) \
V(FlooringDivByConstI) \
V(FlooringDivByPowerOf2I) \
V(FlooringDivI) \
@@ -78,7 +77,6 @@ class LCodeGen;
V(HasInPrototypeChainAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
- V(InstanceOf) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
@@ -97,6 +95,8 @@ class LCodeGen;
V(LoadNamedField) \
V(LoadNamedGeneric) \
V(MathAbs) \
+ V(MathCos) \
+ V(MathSin) \
V(MathExp) \
V(MathClz32) \
V(MathFloor) \
@@ -882,24 +882,29 @@ class LMathClz32 final : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
};
+class LMathCos final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathCos(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
-class LMathExp final : public LTemplateInstruction<1, 1, 3> {
+ DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
+};
+
+class LMathSin final : public LTemplateInstruction<1, 1, 0> {
public:
- LMathExp(LOperand* value,
- LOperand* double_temp,
- LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- temps_[2] = double_temp;
- ExternalReference::InitializeMathExpData();
- }
+ explicit LMathSin(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
+};
+
+class LMathExp final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathExp(LOperand* value) { inputs_[0] = value; }
LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
- LOperand* double_temp() { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
};
@@ -1113,22 +1118,6 @@ class LCmpT final : public LTemplateInstruction<1, 3, 0> {
};
-class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
- public:
- LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() const { return inputs_[0]; }
- LOperand* left() const { return inputs_[1]; }
- LOperand* right() const { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-};
-
-
class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
public:
LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
@@ -1547,18 +1536,14 @@ class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
};
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
+class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
public:
- LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
- LOperand* vector) {
+ LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
inputs_[0] = context;
- inputs_[1] = global_object;
temps_[0] = vector;
}
LOperand* context() { return inputs_[0]; }
- LOperand* global_object() { return inputs_[1]; }
LOperand* temp_vector() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
@@ -2131,6 +2116,8 @@ class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
LOperand* key() { return inputs_[3]; }
LOperand* current_capacity() { return inputs_[4]; }
+ bool ClobbersDoubleRegisters(Isolate* isolate) const override { return true; }
+
DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
};
@@ -2302,33 +2289,6 @@ class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> {
};
-class LDoubleBits final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDoubleBits(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
- DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
-};
-
-
-class LConstructDouble final : public LTemplateInstruction<1, 2, 0> {
- public:
- LConstructDouble(LOperand* hi, LOperand* lo) {
- inputs_[0] = hi;
- inputs_[1] = lo;
- }
-
- LOperand* hi() { return inputs_[0]; }
- LOperand* lo() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
-};
-
-
class LAllocate final : public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* context,
@@ -2350,6 +2310,21 @@ class LAllocate final : public LTemplateInstruction<1, 2, 2> {
DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
+class LFastAllocate final : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LFastAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = size;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* size() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
+ DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
class LTypeof final : public LTemplateInstruction<1, 2, 0> {
public:
@@ -2504,6 +2479,8 @@ class LChunkBuilder final : public LChunkBuilderBase {
LInstruction* DoMathFround(HUnaryMathOperation* instr);
LInstruction* DoMathAbs(HUnaryMathOperation* instr);
LInstruction* DoMathLog(HUnaryMathOperation* instr);
+ LInstruction* DoMathCos(HUnaryMathOperation* instr);
+ LInstruction* DoMathSin(HUnaryMathOperation* instr);
LInstruction* DoMathExp(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
diff --git a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
index c7bbe9f07a..65e922848b 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
+++ b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
@@ -10,7 +10,6 @@
#include "src/crankshaft/mips64/lithium-gap-resolver-mips64.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
-#include "src/profiler/cpu-profiler.h"
namespace v8 {
namespace internal {
@@ -159,14 +158,12 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
__ Push(info()->scope()->GetScopeInfo(info()->isolate()));
__ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
- } else if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
+ } else {
+ FastNewFunctionContextStub stub(isolate());
+ __ li(FastNewFunctionContextDescriptor::SlotsRegister(), Operand(slots));
__ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
+ // Result of FastNewFunctionContextStub is always in new space.
need_write_barrier = false;
- } else {
- __ push(a1);
- __ CallRuntime(Runtime::kNewFunctionContext);
}
RecordSafepoint(deopt_mode);
@@ -175,10 +172,11 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
__ mov(cp, v0);
__ sd(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
- int num_parameters = scope()->num_parameters();
- int first_parameter = scope()->has_this_declaration() ? -1 : 0;
+ int num_parameters = info()->scope()->num_parameters();
+ int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+ Variable* var = (i == -1) ? info()->scope()->receiver()
+ : info()->scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -315,8 +313,6 @@ bool LCodeGen::GenerateJumpTable() {
__ BranchAndLink(&call_deopt_entry);
}
}
- info()->LogDeoptCallPosition(masm()->pc_offset(),
- table_entry->deopt_info.inlining_id);
}
if (needs_frame.is_linked()) {
__ bind(&needs_frame);
@@ -732,9 +728,8 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
}
-
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
+ DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type,
Register src1, const Operand& src2) {
LEnvironment* environment = instr->environment();
@@ -776,7 +771,7 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
__ bind(&skip);
}
- Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+ Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
@@ -785,14 +780,13 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
!info()->saves_caller_doubles()) {
DeoptComment(deopt_info);
__ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
- info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
} else {
Deoptimizer::JumpTableEntry* table_entry =
new (zone()) Deoptimizer::JumpTableEntry(
entry, deopt_info, bailout_type, !frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+ if (FLAG_trace_deopt || isolate()->is_profiling() ||
jump_table_.is_empty() ||
!table_entry->IsEquivalentTo(*jump_table_.last())) {
jump_table_.Add(table_entry, zone());
@@ -801,10 +795,9 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
}
}
-
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
- Register src1, const Operand& src2) {
+ DeoptimizeReason deopt_reason, Register src1,
+ const Operand& src2) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
@@ -865,13 +858,6 @@ void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
}
-void LCodeGen::RecordAndWritePosition(int position) {
- if (position == RelocInfo::kNoPosition) return;
- masm()->positions_recorder()->RecordPosition(position);
- masm()->positions_recorder()->WriteRecordedPositions();
-}
-
-
static const char* LabelType(LLabel* label) {
if (label->is_loop_header()) return " (loop header)";
if (label->is_osr_entry()) return " (OSR entry)";
@@ -943,7 +929,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ dsubu(dividend, zero_reg, dividend);
__ And(dividend, dividend, Operand(mask));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
Operand(zero_reg));
}
__ Branch(USE_DELAY_SLOT, &done);
@@ -963,7 +949,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@@ -976,7 +962,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label remainder_not_zero;
__ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, dividend,
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, dividend,
Operand(zero_reg));
__ bind(&remainder_not_zero);
}
@@ -996,7 +982,7 @@ void LCodeGen::DoModI(LModI* instr) {
// Check for x % 0, we have to deopt in this case because we can't return a
// NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, right_reg,
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, right_reg,
Operand(zero_reg));
}
@@ -1006,7 +992,8 @@ void LCodeGen::DoModI(LModI* instr) {
Label no_overflow_possible;
__ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, right_reg, Operand(-1));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, right_reg,
+ Operand(-1));
} else {
__ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
__ Branch(USE_DELAY_SLOT, &done);
@@ -1019,7 +1006,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ Branch(&done, ge, left_reg, Operand(zero_reg));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result_reg,
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result_reg,
Operand(zero_reg));
}
__ bind(&done);
@@ -1036,19 +1023,21 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
Operand(zero_reg));
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, dividend, Operand(kMinInt));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, dividend,
+ Operand(kMinInt));
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ And(at, dividend, Operand(mask));
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, at,
+ Operand(zero_reg));
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
@@ -1078,14 +1067,14 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
return;
}
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
Operand(zero_reg));
}
@@ -1095,7 +1084,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
__ Dmul(scratch0(), result, Operand(divisor));
__ Dsubu(scratch0(), scratch0(), dividend);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, scratch0(),
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, scratch0(),
Operand(zero_reg));
}
}
@@ -1114,7 +1103,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, divisor,
Operand(zero_reg));
}
@@ -1122,7 +1111,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, divisor,
Operand(zero_reg));
__ bind(&left_not_zero);
}
@@ -1132,7 +1121,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
Label left_not_min_int;
__ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, divisor, Operand(-1));
__ bind(&left_not_min_int);
}
@@ -1144,7 +1133,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
} else {
__ dmod(remainder, dividend, divisor);
}
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, remainder,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, remainder,
Operand(zero_reg));
}
}
@@ -1170,7 +1159,7 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
DCHECK(!result.is(dividend) || !scratch.is(dividend));
// If the divisor is 1, return the dividend.
- if (divisor == 1) {
+ if (divisor == 0) {
__ Move(result, dividend);
return;
}
@@ -1190,14 +1179,16 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
__ Dsubu(result, zero_reg, dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result,
+ Operand(zero_reg));
}
__ Xor(scratch, scratch, result);
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(gt, instr, Deoptimizer::kOverflow, result, Operand(kMaxInt));
+ DeoptimizeIf(gt, instr, DeoptimizeReason::kOverflow, result,
+ Operand(kMaxInt));
}
return;
}
@@ -1225,14 +1216,14 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
return;
}
// Check for (0 / -x) that will produce negative zero.
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, dividend,
Operand(zero_reg));
}
@@ -1277,7 +1268,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero, divisor,
Operand(zero_reg));
}
@@ -1285,7 +1276,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, divisor,
Operand(zero_reg));
__ bind(&left_not_zero);
}
@@ -1295,7 +1286,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
Label left_not_min_int;
__ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow, divisor, Operand(-1));
__ bind(&left_not_min_int);
}
@@ -1332,7 +1323,8 @@ void LCodeGen::DoMulS(LMulS* instr) {
if (bailout_on_minus_zero && (constant < 0)) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, left,
+ Operand(zero_reg));
}
switch (constant) {
@@ -1350,7 +1342,7 @@ void LCodeGen::DoMulS(LMulS* instr) {
if (bailout_on_minus_zero) {
// If left is strictly negative and the constant is null, the
// result is -0. Deoptimize if required, otherwise return 0.
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left,
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, left,
Operand(zero_reg));
}
__ mov(result, zero_reg);
@@ -1398,7 +1390,8 @@ void LCodeGen::DoMulS(LMulS* instr) {
__ dsra32(scratch, result, 0);
__ sra(at, result, 31);
__ SmiTag(result);
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at));
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, scratch,
+ Operand(at));
} else {
__ SmiUntag(result, left);
__ dmul(result, result, right);
@@ -1409,7 +1402,7 @@ void LCodeGen::DoMulS(LMulS* instr) {
__ Xor(at, left, right);
__ Branch(&done, ge, at, Operand(zero_reg));
// Bail out if the result is minus zero.
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result,
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result,
Operand(zero_reg));
__ bind(&done);
}
@@ -1434,7 +1427,8 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (bailout_on_minus_zero && (constant < 0)) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, left,
+ Operand(zero_reg));
}
switch (constant) {
@@ -1452,7 +1446,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (bailout_on_minus_zero) {
// If left is strictly negative and the constant is null, the
// result is -0. Deoptimize if required, otherwise return 0.
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left,
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, left,
Operand(zero_reg));
}
__ mov(result, zero_reg);
@@ -1501,7 +1495,8 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ dsra32(scratch, result, 0);
__ sra(at, result, 31);
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at));
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, scratch,
+ Operand(at));
} else {
__ mul(result, left, right);
}
@@ -1511,7 +1506,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ Xor(at, left, right);
__ Branch(&done, ge, at, Operand(zero_reg));
// Bail out if the result is minus zero.
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result,
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, result,
Operand(zero_reg));
__ bind(&done);
}
@@ -1576,10 +1571,10 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ srlv(result, left, ToRegister(right_op));
if (instr->can_deopt()) {
// TODO(yy): (-1) >>> 0. anything else?
- DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, result,
- Operand(zero_reg));
- DeoptimizeIf(gt, instr, Deoptimizer::kNegativeValue, result,
- Operand(kMaxInt));
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue, result,
+ Operand(zero_reg));
+ DeoptimizeIf(gt, instr, DeoptimizeReason::kNegativeValue, result,
+ Operand(kMaxInt));
}
break;
case Token::SHL:
@@ -1614,7 +1609,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
if (instr->can_deopt()) {
__ And(at, left, Operand(0x80000000));
- DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue, at,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNegativeValue, at,
Operand(zero_reg));
}
__ Move(result, left);
@@ -1846,13 +1841,13 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
HMathMinMax::Operation operation = instr->hydrogen()->operation();
- Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
+ Register scratch = scratch1();
if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
+ Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
Register left_reg = ToRegister(left);
Register right_reg = EmitLoadRegister(right, scratch0());
Register result_reg = ToRegister(instr->result());
Label return_right, done;
- Register scratch = scratch1();
__ Slt(scratch, left_reg, Operand(right_reg));
if (condition == ge) {
__ Movz(result_reg, left_reg, scratch);
@@ -1867,43 +1862,19 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
FPURegister left_reg = ToDoubleRegister(left);
FPURegister right_reg = ToDoubleRegister(right);
FPURegister result_reg = ToDoubleRegister(instr->result());
- Label check_nan_left, check_zero, return_left, return_right, done;
- __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg);
- __ BranchF(&return_left, NULL, condition, left_reg, right_reg);
- __ Branch(&return_right);
-
- __ bind(&check_zero);
- // left == right != 0.
- __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
- // At this point, both left and right are either 0 or -0.
- if (operation == HMathMinMax::kMathMin) {
- // The algorithm is: -((-L) + (-R)), which in case of L and R being
- // different registers is most efficiently expressed as -((-L) - R).
- __ neg_d(left_reg, left_reg);
- if (left_reg.is(right_reg)) {
- __ add_d(result_reg, left_reg, right_reg);
- } else {
- __ sub_d(result_reg, left_reg, right_reg);
- }
- __ neg_d(result_reg, result_reg);
+ Label nan, done;
+ if (operation == HMathMinMax::kMathMax) {
+ __ MaxNaNCheck_d(result_reg, left_reg, right_reg, &nan);
} else {
- __ add_d(result_reg, left_reg, right_reg);
+ DCHECK(operation == HMathMinMax::kMathMin);
+ __ MinNaNCheck_d(result_reg, left_reg, right_reg, &nan);
}
__ Branch(&done);
- __ bind(&check_nan_left);
- // left == NaN.
- __ BranchF(NULL, &return_left, eq, left_reg, left_reg);
- __ bind(&return_right);
- if (!right_reg.is(result_reg)) {
- __ mov_d(result_reg, right_reg);
- }
- __ Branch(&done);
+ __ bind(&nan);
+ __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+ __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
- __ bind(&return_left);
- if (!left_reg.is(result_reg)) {
- __ mov_d(result_reg, left_reg);
- }
__ bind(&done);
}
}
@@ -2110,7 +2081,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ SmiTst(reg, at);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
}
const Register map = scratch0();
@@ -2174,7 +2145,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject, zero_reg,
+ DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject, zero_reg,
Operand(zero_reg));
}
}
@@ -2529,18 +2500,6 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
}
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- Label true_label, done;
- DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
- DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
- DCHECK(ToRegister(instr->result()).is(v0));
-
- InstanceOfStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoHasInPrototypeChainAndBranch(
LHasInPrototypeChainAndBranch* instr) {
Register const object = ToRegister(instr->object());
@@ -2567,17 +2526,17 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
FieldMemOperand(object_map, Map::kBitFieldOffset));
__ And(object_instance_type, object_instance_type,
Operand(1 << Map::kIsAccessCheckNeeded));
- DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, object_instance_type,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck, object_instance_type,
Operand(zero_reg));
__ lbu(object_instance_type,
FieldMemOperand(object_map, Map::kInstanceTypeOffset));
- DeoptimizeIf(eq, instr, Deoptimizer::kProxy, object_instance_type,
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy, object_instance_type,
Operand(JS_PROXY_TYPE));
__ ld(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
- EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
__ LoadRoot(at, Heap::kNullValueRootIndex);
EmitFalseBranch(instr, eq, object_prototype, Operand(at));
+ EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
__ Branch(&loop, USE_DELAY_SLOT);
__ ld(object_map, FieldMemOperand(object_prototype,
HeapObject::kMapOffset)); // In delay slot.
@@ -2674,15 +2633,12 @@ void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->global_object())
- .is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).is(v0));
- __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
- isolate(), instr->typeof_mode(), PREMONOMORPHIC)
- .code();
+ Handle<Code> ic =
+ CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2696,7 +2652,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result, Operand(at));
} else {
Label is_not_hole;
__ Branch(&is_not_hole, ne, result, Operand(at));
@@ -2720,7 +2676,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch, Operand(at));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, scratch, Operand(at));
} else {
__ Branch(&skip_assignment, ne, scratch, Operand(at));
}
@@ -2796,10 +2752,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
// Name is always in a2.
__ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_INSIDE_TYPEOF,
- instr->hydrogen()->initialization_state())
- .code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2815,7 +2768,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result, Operand(at));
// If the function does not have an initial map, we're done.
Label done;
@@ -2945,7 +2898,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case UINT32_ELEMENTS:
__ lw(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- DeoptimizeIf(Ugreater_equal, instr, Deoptimizer::kNegativeValue,
+ DeoptimizeIf(Ugreater_equal, instr, DeoptimizeReason::kNegativeValue,
result, Operand(0x80000000));
}
break;
@@ -3008,7 +2961,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ FmoveHigh(scratch, result);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch,
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, scratch,
Operand(static_cast<int32_t>(kHoleNanUpper32)));
}
}
@@ -3062,11 +3015,12 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (hinstr->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ SmiTst(result, scratch);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, scratch,
Operand(zero_reg));
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(scratch));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kHole, result,
+ Operand(scratch));
}
} else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
@@ -3080,7 +3034,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
// The comparison only needs LS bits of value, which is a smi.
__ ld(result, FieldMemOperand(result, Cell::kValueOffset));
- DeoptimizeIf(ne, instr, Deoptimizer::kHole, result,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kHole, result,
Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
}
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
@@ -3148,13 +3102,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
- if (instr->hydrogen()->HasVectorAndSlot()) {
- EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
- }
+ EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
- Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
- isolate(), instr->hydrogen()->initialization_state())
- .code();
+ Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3244,10 +3194,10 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// Deoptimize if the receiver is not a JS object.
__ SmiTst(receiver, scratch);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, scratch, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, scratch, Operand(zero_reg));
__ GetObjectType(receiver, scratch, scratch);
- DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject, scratch,
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject, scratch,
Operand(FIRST_JS_RECEIVER_TYPE));
__ Branch(&result_in_receiver);
@@ -3281,7 +3231,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// Copy the arguments to this function possibly from the
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
- DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments, length,
+ DeoptimizeIf(hi, instr, DeoptimizeReason::kTooManyArguments, length,
Operand(kArgumentsLimit));
// Push the receiver and use the register to keep the original
@@ -3367,6 +3317,8 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ li(scratch0(), instr->hydrogen()->pairs());
__ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
__ Push(scratch0(), scratch1());
+ __ li(scratch0(), instr->hydrogen()->feedback_vector());
+ __ Push(scratch0());
CallRuntime(Runtime::kDeclareGlobals, instr);
}
@@ -3432,7 +3384,8 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Deoptimize if not a heap number.
__ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch, Operand(at));
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch,
+ Operand(at));
Label done;
Register exponent = scratch0();
@@ -3499,7 +3452,8 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
__ mov(result, input);
__ subu(result, zero_reg, input);
// Overflow if result is still negative, i.e. 0x80000000.
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, result,
+ Operand(zero_reg));
__ bind(&done);
}
@@ -3513,7 +3467,8 @@ void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
__ mov(result, input);
__ dsubu(result, zero_reg, input);
// Overflow if result is still negative, i.e. 0x80000000 00000000.
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, result,
+ Operand(zero_reg));
__ bind(&done);
}
@@ -3570,7 +3525,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
except_flag);
// Deopt if the operation did not succeed.
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -3579,7 +3534,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ Branch(&done, ne, result, Operand(zero_reg));
__ mfhc1(scratch1, input); // Get exponent/sign bits.
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
Operand(zero_reg));
__ bind(&done);
}
@@ -3613,7 +3568,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// The following conversion will not work with numbers
// outside of ]-2^32, 2^32[.
- DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch,
+ DeoptimizeIf(ge, instr, DeoptimizeReason::kOverflow, scratch,
Operand(HeapNumber::kExponentBias + 32));
// Save the original sign for later comparison.
@@ -3631,7 +3586,8 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ Xor(result, result, Operand(scratch));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// ARM uses 'mi' here, which is 'lt'
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero, result,
+ Operand(zero_reg));
} else {
Label skip2;
// ARM uses 'mi' here, which is 'lt'
@@ -3650,7 +3606,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
double_scratch1,
except_flag);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -3659,7 +3615,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ bind(&check_sign_on_zero);
__ mfhc1(scratch, input); // Get exponent/sign bits.
__ And(scratch, scratch, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch,
Operand(zero_reg));
}
__ bind(&done);
@@ -3693,10 +3649,8 @@ void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
// Math.sqrt(-Infinity) == NaN
Label done;
__ Move(temp, static_cast<double>(-V8_INFINITY));
- __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
- // Set up Infinity in the delay slot.
- // result is overwritten if the branch is not taken.
- __ neg_d(result, temp);
+ __ Neg_d(result, temp);
+ __ BranchF(&done, NULL, eq, temp, input);
// Add +0 to convert -0 to +0.
__ add_d(result, input, kDoubleRegZero);
@@ -3726,7 +3680,7 @@ void LCodeGen::DoPower(LPower* instr) {
DCHECK(!a7.is(tagged_exponent));
__ lw(a7, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, a7, Operand(at));
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, a7, Operand(at));
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
@@ -3740,26 +3694,32 @@ void LCodeGen::DoPower(LPower* instr) {
}
}
+void LCodeGen::DoMathCos(LMathCos* instr) {
+ __ PrepareCallCFunction(0, 1, scratch0());
+ __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+ __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1);
+ __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
-void LCodeGen::DoMathExp(LMathExp* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
- DoubleRegister double_scratch2 = double_scratch0();
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
+void LCodeGen::DoMathSin(LMathSin* instr) {
+ __ PrepareCallCFunction(0, 1, scratch0());
+ __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+ __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1);
+ __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
- MathExpGenerator::EmitMathExp(
- masm(), input, result, double_scratch1, double_scratch2,
- temp1, temp2, scratch0());
+void LCodeGen::DoMathExp(LMathExp* instr) {
+ __ PrepareCallCFunction(0, 1, scratch0());
+ __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+ __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1);
+ __ MovFromFloatResult(ToDoubleRegister(instr->result()));
}
void LCodeGen::DoMathLog(LMathLog* instr) {
__ PrepareCallCFunction(0, 1, scratch0());
__ MovToFloatParameter(ToDoubleRegister(instr->value()));
- __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
- 0, 1);
+ __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1);
__ MovFromFloatResult(ToDoubleRegister(instr->result()));
}
@@ -3782,7 +3742,9 @@ void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
#endif
if (FLAG_code_comments) {
if (actual.is_reg()) {
- Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+ Comment(";;; PrepareForTailCall, actual: %s {",
+ RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
+ actual.reg().code()));
} else {
Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
}
@@ -3886,14 +3848,8 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->result()).is(v0));
__ li(a0, Operand(instr->arity()));
- if (instr->arity() == 1) {
- // We only need the allocation site for the case we have a length argument.
- // The case may bail out to the runtime, which will determine the correct
- // elements kind with the site.
- __ li(a2, instr->hydrogen()->site());
- } else {
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- }
+ __ li(a2, instr->hydrogen()->site());
+
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
@@ -3925,7 +3881,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ bind(&done);
} else {
- ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+ ArrayNArgumentsConstructorStub stub(isolate());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4056,14 +4012,12 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- if (instr->hydrogen()->HasVectorAndSlot()) {
- EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
- }
+ EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
__ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
- Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
- isolate(), instr->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ Handle<Code> ic =
+ CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4086,7 +4040,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds, reg, operand);
+ DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds, reg, operand);
}
}
@@ -4306,13 +4260,11 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- if (instr->hydrogen()->HasVectorAndSlot()) {
- EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
- }
+ EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
- isolate(), instr->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ isolate(), instr->language_mode())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4391,14 +4343,21 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
LOperand* key = instr->key();
if (key->IsConstantOperand()) {
- __ li(a3, Operand(ToSmi(LConstantOperand::cast(key))));
+ LConstantOperand* constant_key = LConstantOperand::cast(key);
+ int32_t int_key = ToInteger32(constant_key);
+ if (Smi::IsValid(int_key)) {
+ __ li(a3, Operand(Smi::FromInt(int_key)));
+ } else {
+ // We should never get here at runtime because there is a smi check on
+ // the key before this point.
+ __ stop("expected smi");
+ }
} else {
__ mov(a3, ToRegister(key));
__ SmiTag(a3);
}
- GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
- instr->hydrogen()->kind());
+ GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
__ mov(a0, result);
__ CallStub(&stub);
RecordSafepointWithLazyDeopt(
@@ -4408,7 +4367,7 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
// Deopt on smi, which means the elements array changed to dictionary mode.
__ SmiTst(result, at);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
}
@@ -4440,8 +4399,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
PushSafepointRegistersScope scope(this);
__ li(a1, Operand(to_map));
- bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
- TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+ TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
__ CallStub(&stub);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kLazyDeopt);
@@ -4455,7 +4413,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(al, instr, Deoptimizer::kMementoFound);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kMementoFound);
__ bind(&no_memento_found);
}
@@ -4662,7 +4620,7 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
if (FLAG_inline_new) {
__ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, TAG_RESULT);
+ __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
__ Branch(&done);
}
@@ -4675,13 +4633,10 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
__ mov(dst, zero_reg);
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
-
- // NumberTagI and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Reset the context register.
+ if (!dst.is(cp)) {
+ __ mov(cp, zero_reg);
+ }
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
@@ -4717,15 +4672,12 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
if (FLAG_inline_new) {
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
// We want the untagged address first for performance
- __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
- DONT_TAG_RESULT);
+ __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
} else {
__ Branch(deferred->entry());
}
__ bind(deferred->exit());
- __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
- // Now that we have finished with the object's real address tag it
- __ Daddu(reg, reg, kHeapObjectTag);
+ __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
}
@@ -4737,16 +4689,13 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
__ mov(reg, zero_reg);
PushSafepointRegistersScope scope(this);
- // NumberTagI and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Reset the context register.
+ if (!reg.is(cp)) {
+ __ mov(cp, zero_reg);
+ }
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- __ Dsubu(v0, v0, kHeapObjectTag);
__ StoreToSafepointRegisterSlot(v0, reg);
}
@@ -4758,12 +4707,12 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ And(at, input, Operand(0x80000000));
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, at, Operand(zero_reg));
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
__ SmiTagCheckOverflow(output, input, at);
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, at, Operand(zero_reg));
} else {
__ SmiTag(output, input);
}
@@ -4779,7 +4728,8 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
// If the input is a HeapObject, value of scratch won't be zero.
__ And(scratch, input, Operand(kHeapObjectTag));
__ SmiUntag(result, input);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, scratch,
+ Operand(zero_reg));
} else {
__ SmiUntag(result, input);
}
@@ -4804,7 +4754,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
if (can_convert_undefined_to_nan) {
__ Branch(&convert, ne, scratch, Operand(at));
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch,
Operand(at));
}
// Load heap number.
@@ -4813,7 +4763,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
__ mfc1(at, result_reg);
__ Branch(&done, ne, at, Operand(zero_reg));
__ mfhc1(scratch, result_reg); // Get exponent/sign bits.
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, scratch,
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, scratch,
Operand(HeapNumber::kSignMask));
}
__ Branch(&done);
@@ -4821,8 +4771,8 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
__ bind(&convert);
// Convert undefined (and hole) to NaN.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
- Operand(at));
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined,
+ input_reg, Operand(at));
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
__ Branch(&done);
@@ -4886,12 +4836,12 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ bind(&check_false);
__ LoadRoot(at, Heap::kFalseValueRootIndex);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefinedBoolean,
scratch2, Operand(at));
__ Branch(USE_DELAY_SLOT, &done);
__ mov(input_reg, zero_reg); // In delay slot.
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch1,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch1,
Operand(at));
// Load the double value.
@@ -4907,7 +4857,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
except_flag,
kCheckForInexactConversion);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -4915,7 +4865,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ mfhc1(scratch1, double_scratch); // Get exponent/sign bits.
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
Operand(zero_reg));
}
}
@@ -4992,7 +4942,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
kCheckForInexactConversion);
// Deopt if the operation did not succeed (except_flag != 0).
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -5000,7 +4950,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
__ Branch(&done, ne, result_reg, Operand(zero_reg));
__ mfhc1(scratch1, double_input); // Get exponent/sign bits.
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
Operand(zero_reg));
__ bind(&done);
}
@@ -5027,7 +4977,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
kCheckForInexactConversion);
// Deopt if the operation did not succeed (except_flag != 0).
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN, except_flag,
Operand(zero_reg));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -5035,7 +4985,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
__ Branch(&done, ne, result_reg, Operand(zero_reg));
__ mfhc1(scratch1, double_input); // Get exponent/sign bits.
__ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero, scratch1,
Operand(zero_reg));
__ bind(&done);
}
@@ -5047,7 +4997,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input), at);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, at, Operand(zero_reg));
}
@@ -5055,7 +5005,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input), at);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
}
}
@@ -5068,7 +5018,8 @@ void LCodeGen::DoCheckArrayBufferNotNeutered(
__ ld(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
__ lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
__ And(at, scratch, 1 << JSArrayBuffer::WasNeutered::kShift);
- DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, at, Operand(zero_reg));
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds, at,
+ Operand(zero_reg));
}
@@ -5085,14 +5036,14 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType, scratch,
Operand(first));
} else {
- DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType, scratch,
+ DeoptimizeIf(lo, instr, DeoptimizeReason::kWrongInstanceType, scratch,
Operand(first));
// Omit check for the last type.
if (last != LAST_TYPE) {
- DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType, scratch,
+ DeoptimizeIf(hi, instr, DeoptimizeReason::kWrongInstanceType, scratch,
Operand(last));
}
}
@@ -5104,11 +5055,11 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ And(at, scratch, mask);
- DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
- at, Operand(zero_reg));
+ DeoptimizeIf(tag == 0 ? ne : eq, instr,
+ DeoptimizeReason::kWrongInstanceType, at, Operand(zero_reg));
} else {
__ And(scratch, scratch, Operand(mask));
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType, scratch,
Operand(tag));
}
}
@@ -5124,9 +5075,10 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
Handle<Cell> cell = isolate()->factory()->NewCell(object);
__ li(at, Operand(cell));
__ ld(at, FieldMemOperand(at, Cell::kValueOffset));
- DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(at));
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch, reg, Operand(at));
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(object));
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch, reg,
+ Operand(object));
}
}
@@ -5142,7 +5094,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ StoreToSafepointRegisterSlot(v0, scratch0());
}
__ SmiTst(scratch0(), at);
- DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, at,
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kInstanceMigrationFailed, at,
Operand(zero_reg));
}
@@ -5197,7 +5149,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ Branch(deferred->entry(), ne, map_reg, Operand(map));
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map_reg, Operand(map));
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap, map_reg, Operand(map));
}
__ bind(&success);
@@ -5235,7 +5187,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined, input_reg,
Operand(factory()->undefined_value()));
__ mov(result_reg, zero_reg);
__ jmp(&done);
@@ -5254,25 +5206,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
-void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
- DoubleRegister value_reg = ToDoubleRegister(instr->value());
- Register result_reg = ToRegister(instr->result());
- if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
- __ FmoveHigh(result_reg, value_reg);
- } else {
- __ FmoveLow(result_reg, value_reg);
- }
-}
-
-
-void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
- Register hi_reg = ToRegister(instr->hi());
- Register lo_reg = ToRegister(instr->lo());
- DoubleRegister result_reg = ToDoubleRegister(instr->result());
- __ Move(result_reg, lo_reg, hi_reg);
-}
-
-
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate final : public LDeferredCode {
public:
@@ -5293,7 +5226,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
Register scratch2 = ToRegister(instr->temp2());
// Allocate memory for the object.
- AllocationFlags flags = TAG_OBJECT;
+ AllocationFlags flags = NO_ALLOCATION_FLAGS;
if (instr->hydrogen()->MustAllocateDoubleAligned()) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
@@ -5301,6 +5234,12 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
+
+ if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+ flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
+ }
+ DCHECK(!instr->hydrogen()->IsAllocationFolded());
+
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
CHECK(size <= Page::kMaxRegularHeapObjectSize);
@@ -5371,6 +5310,49 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
CallRuntimeFromDeferred(
Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(v0, result);
+
+ if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+ AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
+ if (instr->hydrogen()->IsOldSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
+ }
+ // If the allocation folding dominator allocate triggered a GC, allocation
+ // happend in the runtime. We have to reset the top pointer to virtually
+ // undo the allocation.
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
+ Register top_address = scratch0();
+ __ Dsubu(v0, v0, Operand(kHeapObjectTag));
+ __ li(top_address, Operand(allocation_top));
+ __ sd(v0, MemOperand(top_address));
+ __ Daddu(v0, v0, Operand(kHeapObjectTag));
+ }
+}
+
+void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
+ DCHECK(instr->hydrogen()->IsAllocationFolded());
+ DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
+ Register result = ToRegister(instr->result());
+ Register scratch1 = ToRegister(instr->temp1());
+ Register scratch2 = ToRegister(instr->temp2());
+
+ AllocationFlags flags = ALLOCATION_FOLDED;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+ if (instr->hydrogen()->IsOldSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE);
+ }
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ __ FastAllocate(size, result, scratch1, scratch2, flags);
+ } else {
+ Register size = ToRegister(instr->size());
+ __ FastAllocate(size, result, scratch1, scratch2, flags);
+ }
}
@@ -5383,8 +5365,8 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
__ li(v0, Operand(isolate()->factory()->number_string()));
__ jmp(&end);
__ bind(&do_call);
- TypeofStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ Callable callable = CodeFactory::Typeof(isolate());
+ CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
__ bind(&end);
}
@@ -5674,7 +5656,8 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ ld(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- DeoptimizeIf(eq, instr, Deoptimizer::kNoCache, result, Operand(zero_reg));
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache, result,
+ Operand(zero_reg));
__ bind(&done);
}
@@ -5684,7 +5667,8 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
Register map = ToRegister(instr->map());
__ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map, Operand(scratch0()));
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap, map,
+ Operand(scratch0()));
}
diff --git a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h
index 4a700bd66c..41d8b2c031 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h
+++ b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h
@@ -136,8 +136,6 @@ class LCodeGen: public LCodeGenBase {
#undef DECLARE_DO
private:
- LanguageMode language_mode() const { return info()->language_mode(); }
-
Scope* scope() const { return scope_; }
Register scratch0() { return kLithiumScratchReg; }
@@ -229,14 +227,14 @@ class LCodeGen: public LCodeGenBase {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition condition, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
+ DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type,
Register src1 = zero_reg,
const Operand& src2 = Operand(zero_reg));
- void DeoptimizeIf(
- Condition condition, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason = Deoptimizer::kNoReason,
- Register src1 = zero_reg, const Operand& src2 = Operand(zero_reg));
+ void DeoptimizeIf(Condition condition, LInstruction* instr,
+ DeoptimizeReason deopt_reason = DeoptimizeReason::kNoReason,
+ Register src1 = zero_reg,
+ const Operand& src2 = Operand(zero_reg));
void AddToTranslation(LEnvironment* environment,
Translation* translation,
@@ -256,7 +254,7 @@ class LCodeGen: public LCodeGenBase {
void EmitIntegerMathAbs(LMathAbs* instr);
void EmitSmiMathAbs(LMathAbs* instr);
- // Support for recording safepoint and position information.
+ // Support for recording safepoint information.
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
@@ -267,8 +265,6 @@ class LCodeGen: public LCodeGenBase {
int arguments,
Safepoint::DeoptMode mode);
- void RecordAndWritePosition(int position) override;
-
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
diff --git a/deps/v8/src/crankshaft/mips64/lithium-mips64.cc b/deps/v8/src/crankshaft/mips64/lithium-mips64.cc
index bcfbc249d2..922f12ada8 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-mips64.cc
+++ b/deps/v8/src/crankshaft/mips64/lithium-mips64.cc
@@ -887,7 +887,7 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
LInstruction* result = new (zone()) LPrologue();
- if (info_->num_heap_slots() > 0) {
+ if (info_->scope()->num_heap_slots() > 0) {
result = MarkAsCall(result, instr);
}
return result;
@@ -939,17 +939,6 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
}
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
- LOperand* left =
- UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
- LOperand* right =
- UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
- LOperand* context = UseFixed(instr->context(), cp);
- LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
HHasInPrototypeChainAndBranch* instr) {
LOperand* object = UseRegister(instr->object());
@@ -1085,6 +1074,10 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
return DoMathAbs(instr);
case kMathLog:
return DoMathLog(instr);
+ case kMathCos:
+ return DoMathCos(instr);
+ case kMathSin:
+ return DoMathSin(instr);
case kMathExp:
return DoMathExp(instr);
case kMathSqrt:
@@ -1114,16 +1107,25 @@ LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
return DefineAsRegister(result);
}
+LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
+ LOperand* input = UseFixedDouble(instr->value(), f4);
+ return MarkAsCall(DefineFixedDouble(new (zone()) LMathCos(input), f4), instr);
+}
+
+LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
+ LOperand* input = UseFixedDouble(instr->value(), f4);
+ return MarkAsCall(DefineFixedDouble(new (zone()) LMathSin(input), f4), instr);
+}
LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
DCHECK(instr->representation().IsDouble());
DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LOperand* double_temp = TempDoubleRegister();
- LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
- return DefineAsRegister(result);
+ LOperand* input = UseFixedDouble(instr->value(), f4);
+ return MarkAsCall(DefineFixedDouble(new (zone()) LMathExp(input), f4), instr);
}
@@ -1928,20 +1930,6 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
}
-LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
- HValue* value = instr->value();
- DCHECK(value->representation().IsDouble());
- return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
-}
-
-
-LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
- LOperand* lo = UseRegister(instr->lo());
- LOperand* hi = UseRegister(instr->hi());
- return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
-}
-
-
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LOperand* context = info()->IsStub()
? UseFixed(instr->context(), cp)
@@ -1973,14 +1961,9 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* global_object =
- UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- }
- LLoadGlobalGeneric* result =
- new(zone()) LLoadGlobalGeneric(context, global_object, vector);
+ LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+
+ LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
return MarkAsCall(DefineFixed(result, v0), instr);
}
@@ -2024,10 +2007,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- }
+ LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
LInstruction* result =
DefineFixed(new(zone()) LLoadNamedGeneric(context, object, vector), v0);
@@ -2100,10 +2080,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- }
+ LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
LInstruction* result =
DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key, vector),
@@ -2166,12 +2143,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
DCHECK(instr->key()->representation().IsTagged());
DCHECK(instr->value()->representation().IsTagged());
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
- vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
- }
+ LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
+ LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
LStoreKeyedGeneric* result =
new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
@@ -2260,12 +2233,8 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* obj =
UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
- vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
- }
+ LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
+ LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
LStoreNamedGeneric* result =
new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
@@ -2303,13 +2272,18 @@ LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseAny(instr->context());
LOperand* size = UseRegisterOrConstant(instr->size());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
- LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2);
- return AssignPointerMap(DefineAsRegister(result));
+ if (instr->IsAllocationFolded()) {
+ LFastAllocate* result = new (zone()) LFastAllocate(size, temp1, temp2);
+ return DefineAsRegister(result);
+ } else {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
+ LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
+ }
}
diff --git a/deps/v8/src/crankshaft/mips64/lithium-mips64.h b/deps/v8/src/crankshaft/mips64/lithium-mips64.h
index 41cf93c2a4..f8b5c48885 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-mips64.h
+++ b/deps/v8/src/crankshaft/mips64/lithium-mips64.h
@@ -55,7 +55,6 @@ class LCodeGen;
V(ConstantI) \
V(ConstantS) \
V(ConstantT) \
- V(ConstructDouble) \
V(Context) \
V(DebugBreak) \
V(DeclareGlobals) \
@@ -64,11 +63,11 @@ class LCodeGen;
V(DivByPowerOf2I) \
V(DivI) \
V(DoubleToI) \
- V(DoubleBits) \
V(DoubleToSmi) \
V(Drop) \
V(Dummy) \
V(DummyUse) \
+ V(FastAllocate) \
V(FlooringDivByConstI) \
V(FlooringDivByPowerOf2I) \
V(FlooringDivI) \
@@ -80,7 +79,6 @@ class LCodeGen;
V(HasInPrototypeChainAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
- V(InstanceOf) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
@@ -99,6 +97,8 @@ class LCodeGen;
V(LoadNamedField) \
V(LoadNamedGeneric) \
V(MathAbs) \
+ V(MathCos) \
+ V(MathSin) \
V(MathExp) \
V(MathClz32) \
V(MathFloor) \
@@ -900,24 +900,29 @@ class LMathClz32 final : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
};
+class LMathCos final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathCos(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
-class LMathExp final : public LTemplateInstruction<1, 1, 3> {
+ DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
+};
+
+class LMathSin final : public LTemplateInstruction<1, 1, 0> {
public:
- LMathExp(LOperand* value,
- LOperand* double_temp,
- LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- temps_[2] = double_temp;
- ExternalReference::InitializeMathExpData();
- }
+ explicit LMathSin(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
+};
+
+class LMathExp final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathExp(LOperand* value) { inputs_[0] = value; }
LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
- LOperand* double_temp() { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
};
@@ -1131,22 +1136,6 @@ class LCmpT final : public LTemplateInstruction<1, 3, 0> {
};
-class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
- public:
- LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() const { return inputs_[0]; }
- LOperand* left() const { return inputs_[1]; }
- LOperand* right() const { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-};
-
-
class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
public:
LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
@@ -1609,18 +1598,14 @@ class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
};
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
+class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
public:
- LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
- LOperand* vector) {
+ LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
inputs_[0] = context;
- inputs_[1] = global_object;
temps_[0] = vector;
}
LOperand* context() { return inputs_[0]; }
- LOperand* global_object() { return inputs_[1]; }
LOperand* temp_vector() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
@@ -2177,6 +2162,8 @@ class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
LOperand* key() { return inputs_[3]; }
LOperand* current_capacity() { return inputs_[4]; }
+ bool ClobbersDoubleRegisters(Isolate* isolate) const override { return true; }
+
DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
};
@@ -2348,33 +2335,6 @@ class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> {
};
-class LDoubleBits final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDoubleBits(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
- DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
-};
-
-
-class LConstructDouble final : public LTemplateInstruction<1, 2, 0> {
- public:
- LConstructDouble(LOperand* hi, LOperand* lo) {
- inputs_[0] = hi;
- inputs_[1] = lo;
- }
-
- LOperand* hi() { return inputs_[0]; }
- LOperand* lo() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
-};
-
-
class LAllocate final : public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* context,
@@ -2396,6 +2356,21 @@ class LAllocate final : public LTemplateInstruction<1, 2, 2> {
DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
+class LFastAllocate final : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LFastAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = size;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* size() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
+ DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
class LTypeof final : public LTemplateInstruction<1, 2, 0> {
public:
@@ -2550,6 +2525,8 @@ class LChunkBuilder final : public LChunkBuilderBase {
LInstruction* DoMathFround(HUnaryMathOperation* instr);
LInstruction* DoMathAbs(HUnaryMathOperation* instr);
LInstruction* DoMathLog(HUnaryMathOperation* instr);
+ LInstruction* DoMathCos(HUnaryMathOperation* instr);
+ LInstruction* DoMathSin(HUnaryMathOperation* instr);
LInstruction* DoMathExp(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
diff --git a/deps/v8/src/crankshaft/ppc/OWNERS b/deps/v8/src/crankshaft/ppc/OWNERS
index eb007cb908..752e8e3d81 100644
--- a/deps/v8/src/crankshaft/ppc/OWNERS
+++ b/deps/v8/src/crankshaft/ppc/OWNERS
@@ -3,3 +3,4 @@ dstence@us.ibm.com
joransiu@ca.ibm.com
mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
index d5d01043dd..fa1f430c58 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
+++ b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
@@ -11,7 +11,6 @@
#include "src/crankshaft/ppc/lithium-gap-resolver-ppc.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
-#include "src/profiler/cpu-profiler.h"
namespace v8 {
namespace internal {
@@ -170,14 +169,12 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
__ Push(info()->scope()->GetScopeInfo(info()->isolate()));
__ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
- } else if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
+ } else {
+ FastNewFunctionContextStub stub(isolate());
+ __ mov(FastNewFunctionContextDescriptor::SlotsRegister(), Operand(slots));
__ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
+ // Result of FastNewFunctionContextStub is always in new space.
need_write_barrier = false;
- } else {
- __ push(r4);
- __ CallRuntime(Runtime::kNewFunctionContext);
}
RecordSafepoint(deopt_mode);
@@ -186,10 +183,11 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
__ mr(cp, r3);
__ StoreP(r3, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
- int num_parameters = scope()->num_parameters();
- int first_parameter = scope()->has_this_declaration() ? -1 : 0;
+ int num_parameters = info()->scope()->num_parameters();
+ int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+ Variable* var = (i == -1) ? info()->scope()->receiver()
+ : info()->scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -326,8 +324,6 @@ bool LCodeGen::GenerateJumpTable() {
} else {
__ b(&call_deopt_entry, SetLK);
}
- info()->LogDeoptCallPosition(masm()->pc_offset(),
- table_entry->deopt_info.inlining_id);
}
if (needs_frame.is_linked()) {
@@ -713,9 +709,8 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
}
-
void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
+ DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type,
CRegister cr) {
LEnvironment* environment = instr->environment();
@@ -755,7 +750,7 @@ void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
__ stop("trap_on_deopt", cond, kDefaultStopCode, cr);
}
- Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+ Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
@@ -763,13 +758,12 @@ void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) {
DeoptComment(deopt_info);
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
- info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
} else {
Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
!frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+ if (FLAG_trace_deopt || isolate()->is_profiling() ||
jump_table_.is_empty() ||
!table_entry.IsEquivalentTo(jump_table_.last())) {
jump_table_.Add(table_entry, zone());
@@ -778,10 +772,8 @@ void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
}
}
-
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
- CRegister cr) {
+ DeoptimizeReason deopt_reason, CRegister cr) {
Deoptimizer::BailoutType bailout_type =
info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
DeoptimizeIf(condition, instr, deopt_reason, bailout_type, cr);
@@ -837,13 +829,6 @@ void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
}
-void LCodeGen::RecordAndWritePosition(int position) {
- if (position == RelocInfo::kNoPosition) return;
- masm()->positions_recorder()->RecordPosition(position);
- masm()->positions_recorder()->WriteRecordedPositions();
-}
-
-
static const char* LabelType(LLabel* label) {
if (label->is_loop_header()) return " (loop header)";
if (label->is_osr_entry()) return " (OSR entry)";
@@ -910,12 +895,12 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ ExtractBitRange(dividend, dividend, shift - 1, 0);
__ neg(dividend, dividend, LeaveOE, SetRC);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, cr0);
}
} else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ li(dividend, Operand::Zero());
} else {
- DeoptimizeIf(al, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kMinusZero);
}
__ b(&done);
}
@@ -937,7 +922,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@@ -952,7 +937,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
Label remainder_not_zero;
__ bne(&remainder_not_zero, cr0);
__ cmpwi(dividend, Operand::Zero());
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
__ bind(&remainder_not_zero);
}
}
@@ -977,14 +962,14 @@ void LCodeGen::DoModI(LModI* instr) {
// Check for x % 0.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmpwi(right_reg, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for kMinInt % -1, divw will return undefined, which is not what we
// want. We have to deopt if we care about -0, because we can't return that.
if (can_overflow) {
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero, cr0);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kMinusZero, cr0);
} else {
if (CpuFeatures::IsSupported(ISELECT)) {
__ isel(overflow, result_reg, r0, result_reg, cr0);
@@ -1006,7 +991,7 @@ void LCodeGen::DoModI(LModI* instr) {
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ bne(&done, cr0);
__ cmpwi(left_reg, Operand::Zero());
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
}
__ bind(&done);
@@ -1024,13 +1009,13 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmpwi(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
__ cmpw(dividend, r0);
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
}
int32_t shift = WhichPowerOf2Abs(divisor);
@@ -1038,7 +1023,7 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) {
__ TestBitRange(dividend, shift - 1, 0, r0);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, cr0);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, cr0);
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
@@ -1068,7 +1053,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@@ -1076,7 +1061,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmpwi(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
__ TruncatingDiv(result, dividend, Abs(divisor));
@@ -1087,7 +1072,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
__ mov(ip, Operand(divisor));
__ mullw(scratch, result, ip);
__ cmpw(scratch, dividend);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
}
}
@@ -1113,7 +1098,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmpwi(divisor, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@@ -1122,14 +1107,14 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ cmpwi(dividend, Operand::Zero());
__ bne(&dividend_not_zero);
__ cmpwi(divisor, Operand::Zero());
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
__ bind(&dividend_not_zero);
}
// Check for (kMinInt / -1).
if (can_overflow) {
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
} else {
// When truncating, we want kMinInt / -1 = kMinInt.
if (CpuFeatures::IsSupported(ISELECT)) {
@@ -1143,12 +1128,16 @@ void LCodeGen::DoDivI(LDivI* instr) {
}
}
+#if V8_TARGET_ARCH_PPC64
+ __ extsw(result, result);
+#endif
+
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
Register scratch = scratch0();
__ mullw(scratch, divisor, result);
__ cmpw(dividend, scratch);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
}
}
@@ -1176,7 +1165,7 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
if (divisor == -1 && can_overflow) {
__ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
__ cmpw(dividend, r0);
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
}
#else
if (can_overflow) {
@@ -1188,7 +1177,7 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
__ neg(result, dividend, oe, SetRC);
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, cr0);
}
// If the negation could not overflow, simply shifting is OK.
@@ -1204,7 +1193,7 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
return;
}
@@ -1226,7 +1215,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@@ -1234,7 +1223,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmpwi(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
// Easy case: We need no dynamic check for the dividend and the flooring
@@ -1286,7 +1275,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmpwi(divisor, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@@ -1295,14 +1284,14 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ cmpwi(dividend, Operand::Zero());
__ bne(&dividend_not_zero);
__ cmpwi(divisor, Operand::Zero());
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
__ bind(&dividend_not_zero);
}
// Check for (kMinInt / -1).
if (can_overflow) {
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
} else {
// When truncating, we want kMinInt / -1 = kMinInt.
if (CpuFeatures::IsSupported(ISELECT)) {
@@ -1336,6 +1325,9 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// We performed a truncating division. Correct the result.
__ subi(result, result, Operand(1));
__ bind(&done);
+#if V8_TARGET_ARCH_PPC64
+ __ extsw(result, result);
+#endif
}
@@ -1377,7 +1369,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
__ cmpi(left, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
switch (constant) {
@@ -1389,12 +1381,12 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ li(r0, Operand::Zero()); // clear xer
__ mtxer(r0);
__ neg(result, left, SetOE, SetRC);
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
#if V8_TARGET_ARCH_PPC64
} else {
__ neg(result, left);
__ TestIfInt32(result, r0);
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
}
#endif
} else {
@@ -1414,7 +1406,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ cmpwi(left, Operand::Zero());
}
#endif
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
}
__ li(result, Operand::Zero());
break;
@@ -1467,7 +1459,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ Mul(result, left, right);
}
__ TestIfInt32(result, r0);
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiTag(result);
}
@@ -1482,7 +1474,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ mullw(result, left, right);
}
__ TestIfInt32(scratch, result, r0);
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
#endif
} else {
if (instr->hydrogen()->representation().IsSmi()) {
@@ -1509,7 +1501,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
#endif
// Bail out if the result is minus zero.
__ cmpi(result, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
}
}
@@ -1595,7 +1587,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
#if V8_TARGET_ARCH_PPC64
__ extsw(result, result, SetRC);
#endif
- DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, cr0);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue, cr0);
} else {
__ srw(result, left, scratch);
}
@@ -1635,7 +1627,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
if (instr->can_deopt()) {
__ cmpwi(left, Operand::Zero());
- DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue);
}
__ Move(result, left);
}
@@ -1654,7 +1646,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
__ SmiTagCheckOverflow(result, left, scratch);
}
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
#endif
} else {
__ slwi(result, left, Operand(shift_count));
@@ -1693,7 +1685,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
#if V8_TARGET_ARCH_PPC64
if (can_overflow) {
__ TestIfInt32(result, r0);
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
}
#endif
} else {
@@ -1704,7 +1696,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
__ SubAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
scratch0(), r0);
}
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
}
}
@@ -1870,7 +1862,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
#if V8_TARGET_ARCH_PPC64
if (can_overflow) {
__ TestIfInt32(result, r0);
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
}
#endif
} else {
@@ -1881,7 +1873,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
__ AddAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
scratch0(), r0);
}
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
}
}
@@ -2132,7 +2124,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ TestIfSmi(reg, r0);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
}
const Register map = scratch0();
@@ -2196,7 +2188,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject);
}
}
}
@@ -2555,16 +2547,6 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
}
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
- DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
- DCHECK(ToRegister(instr->result()).is(r3));
- InstanceOfStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoHasInPrototypeChainAndBranch(
LHasInPrototypeChainAndBranch* instr) {
Register const object = ToRegister(instr->object());
@@ -2590,16 +2572,16 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
__ lbz(object_instance_type,
FieldMemOperand(object_map, Map::kBitFieldOffset));
__ TestBit(object_instance_type, Map::kIsAccessCheckNeeded, r0);
- DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, cr0);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck, cr0);
// Deoptimize for proxies.
__ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
- DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy);
__ LoadP(object_prototype,
FieldMemOperand(object_map, Map::kPrototypeOffset));
- __ cmp(object_prototype, prototype);
- EmitTrueBranch(instr, eq);
__ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
EmitFalseBranch(instr, eq);
+ __ cmp(object_prototype, prototype);
+ EmitTrueBranch(instr, eq);
__ LoadP(object_map,
FieldMemOperand(object_prototype, HeapObject::kMapOffset));
__ b(&loop);
@@ -2705,15 +2687,12 @@ void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->global_object())
- .is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).is(r3));
- __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
- isolate(), instr->typeof_mode(), PREMONOMORPHIC)
- .code();
+ Handle<Code> ic =
+ CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2726,7 +2705,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
__ cmp(result, ip);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
} else {
if (CpuFeatures::IsSupported(ISELECT)) {
Register scratch = scratch0();
@@ -2758,7 +2737,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
} else {
__ bne(&skip_assignment);
}
@@ -2828,10 +2807,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
// Name is always in r5.
__ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_INSIDE_TYPEOF,
- instr->hydrogen()->initialization_state())
- .code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2848,7 +2824,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
// If the function does not have an initial map, we're done.
if (CpuFeatures::IsSupported(ISELECT)) {
@@ -3000,7 +2976,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
__ cmplw(result, r0);
- DeoptimizeIf(ge, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(ge, instr, DeoptimizeReason::kNegativeValue);
}
break;
case FLOAT32_ELEMENTS:
@@ -3065,7 +3041,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
__ lwz(scratch, MemOperand(scratch, Register::kExponentOffset));
}
__ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
}
}
@@ -3116,11 +3092,11 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (requires_hole_check) {
if (IsFastSmiElementsKind(hinstr->elements_kind())) {
__ TestIfSmi(result, r0);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
__ cmp(result, scratch);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
}
} else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
@@ -3135,7 +3111,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
__ LoadP(result, FieldMemOperand(result, Cell::kValueOffset));
__ CmpSmiLiteral(result, Smi::FromInt(Isolate::kArrayProtectorValid), r0);
- DeoptimizeIf(ne, instr, Deoptimizer::kHole);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
}
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
__ bind(&done);
@@ -3189,14 +3165,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
+ EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
- if (instr->hydrogen()->HasVectorAndSlot()) {
- EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
- }
-
- Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
- isolate(), instr->hydrogen()->initialization_state())
- .code();
+ Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3290,9 +3261,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// Deoptimize if the receiver is not a JS object.
__ TestIfSmi(receiver, r0);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
__ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
- DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject);
__ b(&result_in_receiver);
__ bind(&global_object);
@@ -3326,7 +3297,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmpli(length, Operand(kArgumentsLimit));
- DeoptimizeIf(gt, instr, Deoptimizer::kTooManyArguments);
+ DeoptimizeIf(gt, instr, DeoptimizeReason::kTooManyArguments);
// Push the receiver and use the register to keep the original
// number of arguments.
@@ -3411,6 +3382,8 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ push(scratch0());
__ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
__ push(scratch0());
+ __ Move(scratch0(), instr->hydrogen()->feedback_vector());
+ __ push(scratch0());
CallRuntime(Runtime::kDeclareGlobals, instr);
}
@@ -3478,7 +3451,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
__ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(scratch, ip);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
Label done;
Register exponent = scratch0();
@@ -3548,7 +3521,7 @@ void LCodeGen::EmitMathAbs(LMathAbs* instr) {
__ mtxer(r0);
__ neg(result, result, SetOE, SetRC);
// Deoptimize on overflow.
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
__ bind(&done);
}
@@ -3565,7 +3538,7 @@ void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
// Deoptimize on overflow.
__ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
__ cmpw(input, r0);
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
__ neg(result, result);
__ bind(&done);
@@ -3614,8 +3587,13 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
}
}
+void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
+ DoubleRegister input_reg = ToDoubleRegister(instr->value());
+ DoubleRegister output_reg = ToDoubleRegister(instr->result());
+ __ frim(output_reg, input_reg);
+}
-void LCodeGen::DoMathFloor(LMathFloor* instr) {
+void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
DoubleRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
Register input_high = scratch0();
@@ -3624,7 +3602,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done,
&exact);
- DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN);
__ bind(&exact);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -3632,13 +3610,35 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ cmpi(result, Operand::Zero());
__ bne(&done);
__ cmpwi(input_high, Operand::Zero());
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
}
__ bind(&done);
}
+void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
+ DoubleRegister input_reg = ToDoubleRegister(instr->value());
+ DoubleRegister output_reg = ToDoubleRegister(instr->result());
+ DoubleRegister dot_five = double_scratch0();
+ Label done;
+
+ __ frin(output_reg, input_reg);
+ __ fcmpu(input_reg, kDoubleRegZero);
+ __ bge(&done);
+ __ fcmpu(output_reg, input_reg);
+ __ beq(&done);
+
+ // Negative, non-integer case
+ __ LoadDoubleLiteral(dot_five, 0.5, r0);
+ __ fadd(output_reg, input_reg, dot_five);
+ __ frim(output_reg, output_reg);
+ // The range [-0.5, -0.0[ yielded +0.0. Force the sign to negative.
+ __ fabs(output_reg, output_reg);
+ __ fneg(output_reg, output_reg);
+
+ __ bind(&done);
+}
-void LCodeGen::DoMathRound(LMathRound* instr) {
+void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
DoubleRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
@@ -3651,7 +3651,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ LoadDoubleLiteral(dot_five, 0.5, r0);
__ fabs(double_scratch1, input);
__ fcmpu(double_scratch1, dot_five);
- DeoptimizeIf(unordered, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(unordered, instr, DeoptimizeReason::kLostPrecisionOrNaN);
// If input is in [-0.5, -0], the result is -0.
// If input is in [+0, +0.5[, the result is +0.
// If the input is +0.5, the result is 1.
@@ -3659,7 +3659,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// [-0.5, -0] (negative) yields minus zero.
__ TestDoubleSign(input, scratch1);
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
}
__ fcmpu(input, dot_five);
if (CpuFeatures::IsSupported(ISELECT)) {
@@ -3683,7 +3683,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// Reuse dot_five (double_scratch0) as we no longer need this value.
__ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
double_scratch0(), &done, &done);
- DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN);
__ bind(&done);
}
@@ -3748,7 +3748,7 @@ void LCodeGen::DoPower(LPower* instr) {
__ LoadP(r10, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r10, ip);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
@@ -3762,29 +3762,34 @@ void LCodeGen::DoPower(LPower* instr) {
}
}
+void LCodeGen::DoMathCos(LMathCos* instr) {
+ __ PrepareCallCFunction(0, 1, scratch0());
+ __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+ __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1);
+ __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
-void LCodeGen::DoMathExp(LMathExp* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
- DoubleRegister double_scratch2 = double_scratch0();
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
-
- MathExpGenerator::EmitMathExp(masm(), input, result, double_scratch1,
- double_scratch2, temp1, temp2, scratch0());
+void LCodeGen::DoMathSin(LMathSin* instr) {
+ __ PrepareCallCFunction(0, 1, scratch0());
+ __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+ __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1);
+ __ MovFromFloatResult(ToDoubleRegister(instr->result()));
}
+void LCodeGen::DoMathExp(LMathExp* instr) {
+ __ PrepareCallCFunction(0, 1, scratch0());
+ __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+ __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1);
+ __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
void LCodeGen::DoMathLog(LMathLog* instr) {
__ PrepareCallCFunction(0, 1, scratch0());
__ MovToFloatParameter(ToDoubleRegister(instr->value()));
- __ CallCFunction(ExternalReference::math_log_double_function(isolate()), 0,
- 1);
+ __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1);
__ MovFromFloatResult(ToDoubleRegister(instr->result()));
}
-
void LCodeGen::DoMathClz32(LMathClz32* instr) {
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
@@ -3803,7 +3808,9 @@ void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
#endif
if (FLAG_code_comments) {
if (actual.is_reg()) {
- Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+ Comment(";;; PrepareForTailCall, actual: %s {",
+ RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
+ actual.reg().code()));
} else {
Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
}
@@ -3909,14 +3916,8 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->result()).is(r3));
__ mov(r3, Operand(instr->arity()));
- if (instr->arity() == 1) {
- // We only need the allocation site for the case we have a length argument.
- // The case may bail out to the runtime, which will determine the correct
- // elements kind with the site.
- __ Move(r5, instr->hydrogen()->site());
- } else {
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- }
+ __ Move(r5, instr->hydrogen()->site());
+
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
@@ -3948,7 +3949,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ bind(&done);
} else {
- ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+ ArrayNArgumentsConstructorStub stub(isolate());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4083,14 +4084,12 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- if (instr->hydrogen()->HasVectorAndSlot()) {
- EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
- }
+ EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
__ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
- Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
- isolate(), instr->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ Handle<Code> ic =
+ CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4133,7 +4132,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
+ DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds);
}
}
@@ -4345,13 +4344,11 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- if (instr->hydrogen()->HasVectorAndSlot()) {
- EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
- }
+ EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
- isolate(), instr->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ isolate(), instr->language_mode())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4430,13 +4427,20 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
LOperand* key = instr->key();
if (key->IsConstantOperand()) {
- __ LoadSmiLiteral(r6, ToSmi(LConstantOperand::cast(key)));
+ LConstantOperand* constant_key = LConstantOperand::cast(key);
+ int32_t int_key = ToInteger32(constant_key);
+ if (Smi::IsValid(int_key)) {
+ __ LoadSmiLiteral(r6, Smi::FromInt(int_key));
+ } else {
+ // We should never get here at runtime because there is a smi check on
+ // the key before this point.
+ __ stop("expected smi");
+ }
} else {
__ SmiTag(r6, ToRegister(key));
}
- GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
- instr->hydrogen()->kind());
+ GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
__ CallStub(&stub);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
@@ -4445,7 +4449,7 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
// Deopt on smi, which means the elements array changed to dictionary mode.
__ TestIfSmi(result, r0);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
}
@@ -4476,8 +4480,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
DCHECK(object_reg.is(r3));
PushSafepointRegistersScope scope(this);
__ Move(r4, to_map);
- bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
- TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+ TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
__ CallStub(&stub);
RecordSafepointWithRegisters(instr->pointer_map(), 0,
Safepoint::kLazyDeopt);
@@ -4492,7 +4495,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register temp2 = ToRegister(instr->temp2());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
- DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMementoFound);
__ bind(&no_memento_found);
}
@@ -4730,13 +4733,10 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
-
- // NumberTagI and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Reset the context register.
+ if (!dst.is(cp)) {
+ __ li(cp, Operand::Zero());
+ }
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(instr->pointer_map(), 0,
Safepoint::kNoLazyDeopt);
@@ -4788,12 +4788,10 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
__ li(reg, Operand::Zero());
PushSafepointRegistersScope scope(this);
- // NumberTagI and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Reset the context register.
+ if (!reg.is(cp)) {
+ __ li(cp, Operand::Zero());
+ }
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(instr->pointer_map(), 0,
Safepoint::kNoLazyDeopt);
@@ -4808,13 +4806,13 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ TestUnsignedSmiCandidate(input, r0);
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, cr0);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, cr0);
}
#if !V8_TARGET_ARCH_PPC64
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
__ SmiTagCheckOverflow(output, input, r0);
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
} else {
#endif
__ SmiTag(output, input);
@@ -4832,7 +4830,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
// If the input is a HeapObject, value of scratch won't be zero.
__ andi(scratch, input, Operand(kHeapObjectTag));
__ SmiUntag(result, input);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
} else {
__ SmiUntag(result, input);
}
@@ -4862,13 +4860,13 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
if (can_convert_undefined_to_nan) {
__ bne(&convert);
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
}
// load heap number
__ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
if (deoptimize_on_minus_zero) {
__ TestDoubleIsMinusZero(result_reg, scratch, ip);
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
__ b(&done);
if (can_convert_undefined_to_nan) {
@@ -4876,7 +4874,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
// Convert undefined (and hole) to NaN.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(input_reg, ip);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ lfd(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
__ b(&done);
@@ -4938,10 +4936,10 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ bind(&check_false);
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ cmp(input_reg, ip);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
__ li(input_reg, Operand::Zero());
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
__ lfd(double_scratch2,
FieldMemOperand(input_reg, HeapNumber::kValueOffset));
@@ -4951,13 +4949,13 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
}
__ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
double_scratch);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ cmpi(input_reg, Operand::Zero());
__ bne(&done);
__ TestHeapNumberSign(scratch2, scratch1);
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
}
}
__ bind(&done);
@@ -5026,13 +5024,13 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
__ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ cmpi(result_reg, Operand::Zero());
__ bne(&done);
__ TestDoubleSign(double_input, scratch1);
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
}
}
@@ -5051,13 +5049,13 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
__ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ cmpi(result_reg, Operand::Zero());
__ bne(&done);
__ TestDoubleSign(double_input, scratch1);
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
}
}
@@ -5065,7 +5063,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
__ SmiTag(result_reg);
#else
__ SmiTagCheckOverflow(result_reg, r0);
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
#endif
}
@@ -5073,7 +5071,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ TestIfSmi(ToRegister(input), r0);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
}
@@ -5081,7 +5079,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ TestIfSmi(ToRegister(input), r0);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
}
}
@@ -5094,7 +5092,7 @@ void LCodeGen::DoCheckArrayBufferNotNeutered(
__ LoadP(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
__ lwz(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
__ andi(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
- DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, cr0);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds, cr0);
}
@@ -5114,13 +5112,13 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
} else {
- DeoptimizeIf(lt, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kWrongInstanceType);
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmpli(scratch, Operand(last));
- DeoptimizeIf(gt, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(gt, instr, DeoptimizeReason::kWrongInstanceType);
}
}
} else {
@@ -5131,12 +5129,12 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ andi(r0, scratch, Operand(mask));
- DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
- cr0);
+ DeoptimizeIf(tag == 0 ? ne : eq, instr,
+ DeoptimizeReason::kWrongInstanceType, cr0);
} else {
__ andi(scratch, scratch, Operand(mask));
__ cmpi(scratch, Operand(tag));
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
}
}
}
@@ -5155,7 +5153,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
} else {
__ Cmpi(reg, Operand(object), r0);
}
- DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch);
}
@@ -5171,7 +5169,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ StoreToSafepointRegisterSlot(r3, temp);
}
__ TestIfSmi(temp, r0);
- DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, cr0);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kInstanceMigrationFailed, cr0);
}
@@ -5226,7 +5224,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ bne(deferred->entry());
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
}
__ bind(&success);
@@ -5265,7 +5263,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ Cmpi(input_reg, Operand(factory()->undefined_value()), r0);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
__ li(result_reg, Operand::Zero());
__ b(&done);
@@ -5283,35 +5281,11 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
-void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
- DoubleRegister value_reg = ToDoubleRegister(instr->value());
- Register result_reg = ToRegister(instr->result());
-
- if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
- __ MovDoubleHighToInt(result_reg, value_reg);
- } else {
- __ MovDoubleLowToInt(result_reg, value_reg);
- }
-}
-
-
-void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
- Register hi_reg = ToRegister(instr->hi());
- Register lo_reg = ToRegister(instr->lo());
- DoubleRegister result_reg = ToDoubleRegister(instr->result());
-#if V8_TARGET_ARCH_PPC64
- __ MovInt64ComponentsToDouble(result_reg, hi_reg, lo_reg, r0);
-#else
- __ MovInt64ToDouble(result_reg, hi_reg, lo_reg);
-#endif
-}
-
-
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate final : public LDeferredCode {
public:
DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
- : LDeferredCode(codegen), instr_(instr) {}
+ : LDeferredCode(codegen), instr_(instr) { }
void Generate() override { codegen()->DoDeferredAllocate(instr_); }
LInstruction* instr() override { return instr_; }
@@ -5319,14 +5293,15 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
LAllocate* instr_;
};
- DeferredAllocate* deferred = new (zone()) DeferredAllocate(this, instr);
+ DeferredAllocate* deferred =
+ new(zone()) DeferredAllocate(this, instr);
Register result = ToRegister(instr->result());
Register scratch = ToRegister(instr->temp1());
Register scratch2 = ToRegister(instr->temp2());
// Allocate memory for the object.
- AllocationFlags flags = TAG_OBJECT;
+ AllocationFlags flags = NO_ALLOCATION_FLAGS;
if (instr->hydrogen()->MustAllocateDoubleAligned()) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
@@ -5335,6 +5310,12 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
+ if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+ flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
+ }
+
+ DCHECK(!instr->hydrogen()->IsAllocationFolded());
+
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
CHECK(size <= Page::kMaxRegularHeapObjectSize);
@@ -5406,6 +5387,49 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr,
instr->context());
__ StoreToSafepointRegisterSlot(r3, result);
+
+ if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+ AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
+ if (instr->hydrogen()->IsOldSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
+ }
+ // If the allocation folding dominator allocate triggered a GC, allocation
+ // happend in the runtime. We have to reset the top pointer to virtually
+ // undo the allocation.
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
+ Register top_address = scratch0();
+ __ subi(r3, r3, Operand(kHeapObjectTag));
+ __ mov(top_address, Operand(allocation_top));
+ __ StoreP(r3, MemOperand(top_address));
+ __ addi(r3, r3, Operand(kHeapObjectTag));
+ }
+}
+
+void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
+ DCHECK(instr->hydrogen()->IsAllocationFolded());
+ DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
+ Register result = ToRegister(instr->result());
+ Register scratch1 = ToRegister(instr->temp1());
+ Register scratch2 = ToRegister(instr->temp2());
+
+ AllocationFlags flags = ALLOCATION_FOLDED;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+ if (instr->hydrogen()->IsOldSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE);
+ }
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ __ FastAllocate(size, result, scratch1, scratch2, flags);
+ } else {
+ Register size = ToRegister(instr->size());
+ __ FastAllocate(size, result, scratch1, scratch2, flags);
+ }
}
@@ -5670,7 +5694,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
__ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
__ cmpi(result, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kNoCache);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache);
__ bind(&done);
}
@@ -5681,7 +5705,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register map = ToRegister(instr->map());
__ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
__ cmp(map, scratch0());
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
}
diff --git a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h
index 28f168036c..fe212d4034 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h
+++ b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h
@@ -128,8 +128,6 @@ class LCodeGen : public LCodeGenBase {
#undef DECLARE_DO
private:
- LanguageMode language_mode() const { return info()->language_mode(); }
-
Scope* scope() const { return scope_; }
Register scratch0() { return kLithiumScratch; }
@@ -209,10 +207,10 @@ class LCodeGen : public LCodeGenBase {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition condition, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
+ DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type, CRegister cr = cr7);
void DeoptimizeIf(Condition condition, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason, CRegister cr = cr7);
+ DeoptimizeReason deopt_reason, CRegister cr = cr7);
void AddToTranslation(LEnvironment* environment, Translation* translation,
LOperand* op, bool is_tagged, bool is_uint32,
@@ -230,7 +228,7 @@ class LCodeGen : public LCodeGenBase {
void EmitInteger32MathAbs(LMathAbs* instr);
#endif
- // Support for recording safepoint and position information.
+ // Support for recording safepoint information.
void RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
int arguments, Safepoint::DeoptMode mode);
void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
@@ -238,8 +236,6 @@ class LCodeGen : public LCodeGenBase {
void RecordSafepointWithRegisters(LPointerMap* pointers, int arguments,
Safepoint::DeoptMode mode);
- void RecordAndWritePosition(int position) override;
-
static Condition TokenToCondition(Token::Value op);
void EmitGoto(int block);
diff --git a/deps/v8/src/crankshaft/ppc/lithium-ppc.cc b/deps/v8/src/crankshaft/ppc/lithium-ppc.cc
index b7397869bb..958620c38a 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-ppc.cc
+++ b/deps/v8/src/crankshaft/ppc/lithium-ppc.cc
@@ -892,7 +892,7 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
LInstruction* result = new (zone()) LPrologue();
- if (info_->num_heap_slots() > 0) {
+ if (info_->scope()->num_heap_slots() > 0) {
result = MarkAsCall(result, instr);
}
return result;
@@ -949,17 +949,6 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
}
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
- LOperand* left =
- UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
- LOperand* right =
- UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
- LOperand* context = UseFixed(instr->context(), cp);
- LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
- return MarkAsCall(DefineFixed(result, r3), instr);
-}
-
-
LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
HHasInPrototypeChainAndBranch* instr) {
LOperand* object = UseRegister(instr->object());
@@ -1091,6 +1080,10 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
return DoMathAbs(instr);
case kMathLog:
return DoMathLog(instr);
+ case kMathCos:
+ return DoMathCos(instr);
+ case kMathSin:
+ return DoMathSin(instr);
case kMathExp:
return DoMathExp(instr);
case kMathSqrt:
@@ -1107,20 +1100,32 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+ DCHECK(instr->value()->representation().IsDouble());
LOperand* input = UseRegister(instr->value());
- LMathFloor* result = new (zone()) LMathFloor(input);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ if (instr->representation().IsInteger32()) {
+ LMathFloorI* result = new (zone()) LMathFloorI(input);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ } else {
+ DCHECK(instr->representation().IsDouble());
+ LMathFloorD* result = new (zone()) LMathFloorD(input);
+ return DefineAsRegister(result);
+ }
}
-
LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+ DCHECK(instr->value()->representation().IsDouble());
LOperand* input = UseRegister(instr->value());
- LOperand* temp = TempDoubleRegister();
- LMathRound* result = new (zone()) LMathRound(input, temp);
- return AssignEnvironment(DefineAsRegister(result));
+ if (instr->representation().IsInteger32()) {
+ LOperand* temp = TempDoubleRegister();
+ LMathRoundI* result = new (zone()) LMathRoundI(input, temp);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ } else {
+ DCHECK(instr->representation().IsDouble());
+ LMathRoundD* result = new (zone()) LMathRoundD(input);
+ return DefineAsRegister(result);
+ }
}
-
LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
LOperand* input = UseRegister(instr->value());
LMathFround* result = new (zone()) LMathFround(input);
@@ -1145,8 +1150,8 @@ LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
DCHECK(instr->representation().IsDouble());
DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), d1);
- return MarkAsCall(DefineFixedDouble(new (zone()) LMathLog(input), d1), instr);
+ LOperand* input = UseFixedDouble(instr->value(), d0);
+ return MarkAsCall(DefineFixedDouble(new (zone()) LMathLog(input), d0), instr);
}
@@ -1156,16 +1161,25 @@ LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
return DefineAsRegister(result);
}
+LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
+ LOperand* input = UseFixedDouble(instr->value(), d0);
+ return MarkAsCall(DefineFixedDouble(new (zone()) LMathCos(input), d0), instr);
+}
+
+LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
+ LOperand* input = UseFixedDouble(instr->value(), d0);
+ return MarkAsCall(DefineFixedDouble(new (zone()) LMathSin(input), d0), instr);
+}
LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
DCHECK(instr->representation().IsDouble());
DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LOperand* double_temp = TempDoubleRegister();
- LMathExp* result = new (zone()) LMathExp(input, double_temp, temp1, temp2);
- return DefineAsRegister(result);
+ LOperand* input = UseFixedDouble(instr->value(), d0);
+ return MarkAsCall(DefineFixedDouble(new (zone()) LMathExp(input), d0), instr);
}
@@ -1943,20 +1957,6 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
}
-LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
- HValue* value = instr->value();
- DCHECK(value->representation().IsDouble());
- return DefineAsRegister(new (zone()) LDoubleBits(UseRegister(value)));
-}
-
-
-LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
- LOperand* lo = UseRegister(instr->lo());
- LOperand* hi = UseRegister(instr->hi());
- return DefineAsRegister(new (zone()) LConstructDouble(hi, lo));
-}
-
-
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LOperand* context = info()->IsStub() ? UseFixed(instr->context(), cp) : NULL;
LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
@@ -1986,14 +1986,9 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* global_object =
- UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- }
- LLoadGlobalGeneric* result =
- new (zone()) LLoadGlobalGeneric(context, global_object, vector);
+ LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+
+ LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
return MarkAsCall(DefineFixed(result, r3), instr);
}
@@ -2037,10 +2032,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- }
+ LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
LInstruction* result =
DefineFixed(new (zone()) LLoadNamedGeneric(context, object, vector), r3);
@@ -2110,10 +2102,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- }
+ LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
LInstruction* result = DefineFixed(
new (zone()) LLoadKeyedGeneric(context, object, key, vector), r3);
@@ -2172,12 +2161,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
DCHECK(instr->key()->representation().IsTagged());
DCHECK(instr->value()->representation().IsTagged());
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
- vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
- }
+ LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
+ LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
LStoreKeyedGeneric* result =
new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
@@ -2265,13 +2250,8 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* obj =
UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
- vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
- }
-
+ LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
+ LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
LStoreNamedGeneric* result =
new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
return MarkAsCall(result, instr);
@@ -2307,13 +2287,18 @@ LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseAny(instr->context());
LOperand* size = UseRegisterOrConstant(instr->size());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
- LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
- return AssignPointerMap(DefineAsRegister(result));
+ if (instr->IsAllocationFolded()) {
+ LFastAllocate* result = new (zone()) LFastAllocate(size, temp1, temp2);
+ return DefineAsRegister(result);
+ } else {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
+ LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
+ }
}
diff --git a/deps/v8/src/crankshaft/ppc/lithium-ppc.h b/deps/v8/src/crankshaft/ppc/lithium-ppc.h
index c39f6204f8..f26bfc5e87 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-ppc.h
+++ b/deps/v8/src/crankshaft/ppc/lithium-ppc.h
@@ -53,7 +53,6 @@ class LCodeGen;
V(ConstantI) \
V(ConstantS) \
V(ConstantT) \
- V(ConstructDouble) \
V(Context) \
V(DebugBreak) \
V(DeclareGlobals) \
@@ -61,12 +60,12 @@ class LCodeGen;
V(DivByConstI) \
V(DivByPowerOf2I) \
V(DivI) \
- V(DoubleBits) \
V(DoubleToI) \
V(DoubleToSmi) \
V(Drop) \
V(Dummy) \
V(DummyUse) \
+ V(FastAllocate) \
V(FlooringDivByConstI) \
V(FlooringDivByPowerOf2I) \
V(FlooringDivI) \
@@ -78,7 +77,6 @@ class LCodeGen;
V(HasInPrototypeChainAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
- V(InstanceOf) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
@@ -98,13 +96,17 @@ class LCodeGen;
V(LoadNamedGeneric) \
V(MathAbs) \
V(MathClz32) \
+ V(MathCos) \
+ V(MathSin) \
V(MathExp) \
- V(MathFloor) \
+ V(MathFloorD) \
+ V(MathFloorI) \
V(MathFround) \
V(MathLog) \
V(MathMinMax) \
V(MathPowHalf) \
- V(MathRound) \
+ V(MathRoundD) \
+ V(MathRoundI) \
V(MathSqrt) \
V(MaybeGrowElements) \
V(ModByConstI) \
@@ -151,7 +153,6 @@ class LCodeGen;
V(UnknownOSRValue) \
V(WrapReceiver)
-
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
Opcode opcode() const final { return LInstruction::k##type; } \
void CompileToNative(LCodeGen* generator) final; \
@@ -807,21 +808,43 @@ class LCompareNumericAndBranch final : public LControlInstruction<2, 0> {
void PrintDataTo(StringStream* stream) override;
};
+// Math.floor with a double result.
+class LMathFloorD final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathFloorD(LOperand* value) { inputs_[0] = value; }
-class LMathFloor final : public LTemplateInstruction<1, 1, 0> {
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathFloorD, "math-floor-d")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+// Math.floor with an integer result.
+class LMathFloorI final : public LTemplateInstruction<1, 1, 0> {
public:
- explicit LMathFloor(LOperand* value) { inputs_[0] = value; }
+ explicit LMathFloorI(LOperand* value) { inputs_[0] = value; }
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+ DECLARE_CONCRETE_INSTRUCTION(MathFloorI, "math-floor-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
};
+// Math.round with a double result.
+class LMathRoundD final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathRoundD(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
-class LMathRound final : public LTemplateInstruction<1, 1, 1> {
+ DECLARE_CONCRETE_INSTRUCTION(MathRoundD, "math-round-d")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+// Math.round with an integer result.
+class LMathRoundI final : public LTemplateInstruction<1, 1, 1> {
public:
- LMathRound(LOperand* value, LOperand* temp) {
+ LMathRoundI(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
@@ -829,7 +852,7 @@ class LMathRound final : public LTemplateInstruction<1, 1, 1> {
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+ DECLARE_CONCRETE_INSTRUCTION(MathRoundI, "math-round-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
};
@@ -878,22 +901,31 @@ class LMathClz32 final : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
};
+class LMathCos final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathCos(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
+};
-class LMathExp final : public LTemplateInstruction<1, 1, 3> {
+
+class LMathSin final : public LTemplateInstruction<1, 1, 0> {
public:
- LMathExp(LOperand* value, LOperand* double_temp, LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- temps_[2] = double_temp;
- ExternalReference::InitializeMathExpData();
- }
+ explicit LMathSin(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
+};
+
+
+class LMathExp final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathExp(LOperand* value) { inputs_[0] = value; }
LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
- LOperand* double_temp() { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
};
@@ -1092,22 +1124,6 @@ class LCmpT final : public LTemplateInstruction<1, 3, 0> {
};
-class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
- public:
- LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() const { return inputs_[0]; }
- LOperand* left() const { return inputs_[1]; }
- LOperand* right() const { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-};
-
-
class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
public:
LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
@@ -1528,18 +1544,14 @@ class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
};
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
+class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
public:
- LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
- LOperand* vector) {
+ LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
inputs_[0] = context;
- inputs_[1] = global_object;
temps_[0] = vector;
}
LOperand* context() { return inputs_[0]; }
- LOperand* global_object() { return inputs_[1]; }
LOperand* temp_vector() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
@@ -2091,6 +2103,8 @@ class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
LOperand* key() { return inputs_[3]; }
LOperand* current_capacity() { return inputs_[4]; }
+ bool ClobbersDoubleRegisters(Isolate* isolate) const override { return true; }
+
DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
};
@@ -2250,31 +2264,6 @@ class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> {
};
-class LDoubleBits final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDoubleBits(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
- DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
-};
-
-
-class LConstructDouble final : public LTemplateInstruction<1, 2, 0> {
- public:
- LConstructDouble(LOperand* hi, LOperand* lo) {
- inputs_[0] = hi;
- inputs_[1] = lo;
- }
-
- LOperand* hi() { return inputs_[0]; }
- LOperand* lo() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
-};
-
-
class LAllocate final : public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* context, LOperand* size, LOperand* temp1,
@@ -2294,6 +2283,22 @@ class LAllocate final : public LTemplateInstruction<1, 2, 2> {
DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
+class LFastAllocate final : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LFastAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = size;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* size() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
+ DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
class LTypeof final : public LTemplateInstruction<1, 2, 0> {
public:
@@ -2441,6 +2446,8 @@ class LChunkBuilder final : public LChunkBuilderBase {
LInstruction* DoMathFround(HUnaryMathOperation* instr);
LInstruction* DoMathAbs(HUnaryMathOperation* instr);
LInstruction* DoMathLog(HUnaryMathOperation* instr);
+ LInstruction* DoMathCos(HUnaryMathOperation* instr);
+ LInstruction* DoMathSin(HUnaryMathOperation* instr);
LInstruction* DoMathExp(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
diff --git a/deps/v8/src/crankshaft/s390/OWNERS b/deps/v8/src/crankshaft/s390/OWNERS
index eb007cb908..752e8e3d81 100644
--- a/deps/v8/src/crankshaft/s390/OWNERS
+++ b/deps/v8/src/crankshaft/s390/OWNERS
@@ -3,3 +3,4 @@ dstence@us.ibm.com
joransiu@ca.ibm.com
mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc b/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc
index 689f4bc1ae..7bb718df7e 100644
--- a/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc
+++ b/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc
@@ -12,7 +12,6 @@
#include "src/crankshaft/s390/lithium-gap-resolver-s390.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
-#include "src/profiler/cpu-profiler.h"
namespace v8 {
namespace internal {
@@ -160,14 +159,12 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
__ Push(info()->scope()->GetScopeInfo(info()->isolate()));
__ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
- } else if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
+ } else {
+ FastNewFunctionContextStub stub(isolate());
+ __ mov(FastNewFunctionContextDescriptor::SlotsRegister(), Operand(slots));
__ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
+ // Result of FastNewFunctionContextStub is always in new space.
need_write_barrier = false;
- } else {
- __ push(r3);
- __ CallRuntime(Runtime::kNewFunctionContext);
}
RecordSafepoint(deopt_mode);
@@ -176,10 +173,11 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
__ LoadRR(cp, r2);
__ StoreP(r2, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
- int num_parameters = scope()->num_parameters();
- int first_parameter = scope()->has_this_declaration() ? -1 : 0;
+ int num_parameters = info()->scope()->num_parameters();
+ int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+ Variable* var = (i == -1) ? info()->scope()->receiver()
+ : info()->scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -312,8 +310,6 @@ bool LCodeGen::GenerateJumpTable() {
} else {
__ b(r14, &call_deopt_entry);
}
- info()->LogDeoptCallPosition(masm()->pc_offset(),
- table_entry->deopt_info.inlining_id);
}
if (needs_frame.is_linked()) {
@@ -675,7 +671,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
+ DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type,
CRegister cr) {
LEnvironment* environment = instr->environment();
@@ -746,20 +742,19 @@ void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
__ stop("trap_on_deopt", cond, kDefaultStopCode, cr);
}
- Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+ Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) {
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
- info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
} else {
Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
!frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+ if (FLAG_trace_deopt || isolate()->is_profiling() ||
jump_table_.is_empty() ||
!table_entry.IsEquivalentTo(jump_table_.last())) {
jump_table_.Add(table_entry, zone());
@@ -769,8 +764,7 @@ void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
}
void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
- CRegister cr) {
+ DeoptimizeReason deopt_reason, CRegister cr) {
Deoptimizer::BailoutType bailout_type =
info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
DeoptimizeIf(cond, instr, deopt_reason, bailout_type, cr);
@@ -820,12 +814,6 @@ void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
}
-void LCodeGen::RecordAndWritePosition(int position) {
- if (position == RelocInfo::kNoPosition) return;
- masm()->positions_recorder()->RecordPosition(position);
- masm()->positions_recorder()->WriteRecordedPositions();
-}
-
static const char* LabelType(LLabel* label) {
if (label->is_loop_header()) return " (loop header)";
if (label->is_osr_entry()) return " (OSR entry)";
@@ -885,12 +873,12 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ ExtractBitRange(dividend, dividend, shift - 1, 0);
__ LoadComplementRR(dividend, dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
} else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ mov(dividend, Operand::Zero());
} else {
- DeoptimizeIf(al, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kMinusZero);
}
__ b(&done, Label::kNear);
}
@@ -911,7 +899,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@@ -926,7 +914,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
Label remainder_not_zero;
__ bne(&remainder_not_zero, Label::kNear /*, cr0*/);
__ Cmp32(dividend, Operand::Zero());
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
__ bind(&remainder_not_zero);
}
}
@@ -941,7 +929,7 @@ void LCodeGen::DoModI(LModI* instr) {
// Check for x % 0.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ Cmp32(right_reg, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for kMinInt % -1, dr will return undefined, which is not what we
@@ -952,7 +940,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ bne(&no_overflow_possible, Label::kNear);
__ Cmp32(right_reg, Operand(-1));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
} else {
__ b(ne, &no_overflow_possible, Label::kNear);
__ mov(result_reg, Operand::Zero());
@@ -976,7 +964,7 @@ void LCodeGen::DoModI(LModI* instr) {
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ bne(&done, Label::kNear);
__ Cmp32(left_reg, Operand::Zero());
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
}
__ bind(&done);
@@ -993,12 +981,12 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ Cmp32(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ Cmp32(dividend, Operand(0x80000000));
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
}
int32_t shift = WhichPowerOf2Abs(divisor);
@@ -1006,7 +994,7 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) {
__ TestBitRange(dividend, shift - 1, 0, r0);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, cr0);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, cr0);
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
@@ -1038,7 +1026,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@@ -1046,7 +1034,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ Cmp32(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
__ TruncatingDiv(result, dividend, Abs(divisor));
@@ -1057,7 +1045,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
__ mov(ip, Operand(divisor));
__ Mul(scratch, result, ip);
__ Cmp32(scratch, dividend);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
}
}
@@ -1074,7 +1062,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ Cmp32(divisor, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@@ -1083,7 +1071,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ Cmp32(dividend, Operand::Zero());
__ bne(&dividend_not_zero, Label::kNear);
__ Cmp32(divisor, Operand::Zero());
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
__ bind(&dividend_not_zero);
}
@@ -1093,7 +1081,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ Cmp32(dividend, Operand(kMinInt));
__ bne(&dividend_not_min_int, Label::kNear);
__ Cmp32(divisor, Operand(-1));
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
__ bind(&dividend_not_min_int);
}
@@ -1106,7 +1094,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
__ Cmp32(r0, Operand::Zero());
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
}
}
@@ -1134,13 +1122,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
#if V8_TARGET_ARCH_S390X
if (divisor == -1 && can_overflow) {
__ Cmp32(dividend, Operand(0x80000000));
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
}
#endif
__ LoadComplementRR(result, dividend);
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, cr0);
}
// If the negation could not overflow, simply shifting is OK.
@@ -1156,7 +1144,7 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
return;
}
@@ -1180,7 +1168,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@@ -1188,7 +1176,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ Cmp32(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
// Easy case: We need no dynamic check for the dividend and the flooring
@@ -1231,7 +1219,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ Cmp32(divisor, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@@ -1240,7 +1228,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ Cmp32(dividend, Operand::Zero());
__ bne(&dividend_not_zero, Label::kNear);
__ Cmp32(divisor, Operand::Zero());
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
__ bind(&dividend_not_zero);
}
@@ -1251,7 +1239,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ bne(&no_overflow_possible, Label::kNear);
__ Cmp32(divisor, Operand(-1));
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
} else {
__ bne(&no_overflow_possible, Label::kNear);
__ LoadRR(result, dividend);
@@ -1327,7 +1315,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
__ CmpP(left, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
switch (constant) {
@@ -1337,12 +1325,12 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (instr->hydrogen()->representation().IsSmi()) {
#endif
__ LoadComplementRR(result, left);
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
#if V8_TARGET_ARCH_S390X
} else {
__ LoadComplementRR(result, left);
__ TestIfInt32(result, r0);
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
}
#endif
} else {
@@ -1362,7 +1350,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ Cmp32(left, Operand::Zero());
}
#endif
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
}
__ LoadImmP(result, Operand::Zero());
break;
@@ -1416,7 +1404,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ msgr(result, right);
}
__ TestIfInt32(result, r0);
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiTag(result);
}
@@ -1433,7 +1421,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ LoadRR(result, scratch);
}
__ TestIfInt32(r0, result, scratch);
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
#endif
} else {
if (instr->hydrogen()->representation().IsSmi()) {
@@ -1461,7 +1449,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
#endif
// Bail out if the result is minus zero.
__ CmpP(result, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
}
}
@@ -1562,7 +1550,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
#else
__ ltr(result, result); // Set the <,==,> condition
#endif
- DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, cr0);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue, cr0);
}
break;
case Token::SHL:
@@ -1609,7 +1597,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
if (instr->can_deopt()) {
__ Cmp32(left, Operand::Zero());
- DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue);
}
__ Move(result, left);
}
@@ -1631,7 +1619,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
__ SmiTagCheckOverflow(result, left, scratch);
}
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
#endif
} else {
__ ShiftLeft(result, left, Operand(shift_count));
@@ -1707,7 +1695,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
__ lgfr(ToRegister(result), ToRegister(result));
#endif
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
}
@@ -1895,7 +1883,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
#endif
// Doptimize on overflow
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
}
@@ -2138,7 +2126,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ TestIfSmi(reg);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
}
const Register map = scratch0();
@@ -2202,7 +2190,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject);
}
}
}
@@ -2541,15 +2529,6 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
EmitBranch(instr, eq);
}
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
- DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
- DCHECK(ToRegister(instr->result()).is(r2));
- InstanceOfStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
void LCodeGen::DoHasInPrototypeChainAndBranch(
LHasInPrototypeChainAndBranch* instr) {
Register const object = ToRegister(instr->object());
@@ -2574,16 +2553,16 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
__ LoadlB(object_instance_type,
FieldMemOperand(object_map, Map::kBitFieldOffset));
__ TestBit(object_instance_type, Map::kIsAccessCheckNeeded, r0);
- DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, cr0);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck, cr0);
// Deoptimize for proxies.
__ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
- DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy);
__ LoadP(object_prototype,
FieldMemOperand(object_map, Map::kPrototypeOffset));
- __ CmpP(object_prototype, prototype);
- EmitTrueBranch(instr, eq);
__ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
EmitFalseBranch(instr, eq);
+ __ CmpP(object_prototype, prototype);
+ EmitTrueBranch(instr, eq);
__ LoadP(object_map,
FieldMemOperand(object_prototype, HeapObject::kMapOffset));
__ b(&loop);
@@ -2682,15 +2661,12 @@ void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->global_object())
- .is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).is(r2));
- __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
- isolate(), instr->typeof_mode(), PREMONOMORPHIC)
- .code();
+ Handle<Code> ic =
+ CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2701,7 +2677,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
} else {
Label skip;
__ bne(&skip, Label::kNear);
@@ -2723,7 +2699,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ LoadP(scratch, target);
__ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
} else {
__ bne(&skip_assignment);
}
@@ -2791,10 +2767,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
// Name is always in r4.
__ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_INSIDE_TYPEOF,
- instr->hydrogen()->initialization_state())
- .code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2809,7 +2782,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
// If the function does not have an initial map, we're done.
Label done;
@@ -2884,6 +2857,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
}
int element_size_shift = ElementsKindToShiftSize(elements_kind);
bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+ bool keyMaybeNegative = instr->hydrogen()->IsDehoisted();
int base_offset = instr->base_offset();
bool use_scratch = false;
@@ -2897,7 +2871,8 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
use_scratch = true;
}
} else {
- __ IndexToArrayOffset(scratch0(), key, element_size_shift, key_is_smi);
+ __ IndexToArrayOffset(scratch0(), key, element_size_shift, key_is_smi,
+ keyMaybeNegative);
use_scratch = true;
}
if (elements_kind == FLOAT32_ELEMENTS) {
@@ -2917,7 +2892,8 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
Register result = ToRegister(instr->result());
MemOperand mem_operand =
PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
- constant_key, element_size_shift, base_offset);
+ constant_key, element_size_shift, base_offset,
+ keyMaybeNegative);
switch (elements_kind) {
case INT8_ELEMENTS:
__ LoadB(result, mem_operand);
@@ -2939,7 +2915,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ LoadlW(result, mem_operand, r0);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ CmpLogical32(result, Operand(0x80000000));
- DeoptimizeIf(ge, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(ge, instr, DeoptimizeReason::kNegativeValue);
}
break;
case FLOAT32_ELEMENTS:
@@ -2971,6 +2947,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+ bool keyMaybeNegative = instr->hydrogen()->IsDehoisted();
int constant_key = 0;
if (key_is_constant) {
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
@@ -2985,7 +2962,8 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
intptr_t base_offset = instr->base_offset() + constant_key * kDoubleSize;
if (!key_is_constant) {
use_scratch = true;
- __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
+ __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi,
+ keyMaybeNegative);
}
// Memory references support up to 20-bits signed displacement in RXY form
@@ -3016,7 +2994,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
base_offset + Register::kExponentOffset));
}
__ Cmp32(r0, Operand(kHoleNanUpper32));
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
}
}
@@ -3068,10 +3046,10 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (requires_hole_check) {
if (IsFastSmiElementsKind(hinstr->elements_kind())) {
__ TestIfSmi(result);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
} else {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
}
} else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
@@ -3086,7 +3064,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
__ LoadP(result, FieldMemOperand(result, Cell::kValueOffset));
__ CmpSmiLiteral(result, Smi::FromInt(Isolate::kArrayProtectorValid), r0);
- DeoptimizeIf(ne, instr, Deoptimizer::kHole);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
}
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
__ bind(&done);
@@ -3107,7 +3085,8 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base,
bool key_is_constant, bool key_is_smi,
int constant_key,
int element_size_shift,
- int base_offset) {
+ int base_offset,
+ bool keyMaybeNegative) {
Register scratch = scratch0();
if (key_is_constant) {
@@ -3125,7 +3104,8 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base,
(element_size_shift != (key_is_smi ? kSmiTagSize + kSmiShiftSize : 0));
if (needs_shift) {
- __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
+ __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi,
+ keyMaybeNegative);
} else {
scratch = key;
}
@@ -3141,14 +3121,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
+ EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
- if (instr->hydrogen()->HasVectorAndSlot()) {
- EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
- }
-
- Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
- isolate(), instr->hydrogen()->initialization_state())
- .code();
+ Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3233,9 +3208,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// Deoptimize if the receiver is not a JS object.
__ TestIfSmi(receiver);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
__ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
- DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject);
__ b(&result_in_receiver, Label::kNear);
__ bind(&global_object);
@@ -3268,7 +3243,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ CmpLogicalP(length, Operand(kArgumentsLimit));
- DeoptimizeIf(gt, instr, Deoptimizer::kTooManyArguments);
+ DeoptimizeIf(gt, instr, DeoptimizeReason::kTooManyArguments);
// Push the receiver and use the register to keep the original
// number of arguments.
@@ -3346,6 +3321,8 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ push(scratch0());
__ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
__ push(scratch0());
+ __ Move(scratch0(), instr->hydrogen()->feedback_vector());
+ __ push(scratch0());
CallRuntime(Runtime::kDeclareGlobals, instr);
}
@@ -3411,7 +3388,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Deoptimize if not a heap number.
__ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
Label done;
Register exponent = scratch0();
@@ -3479,7 +3456,7 @@ void LCodeGen::EmitMathAbs(LMathAbs* instr) {
__ bge(&done, Label::kNear);
__ LoadComplementRR(result, result);
// Deoptimize on overflow.
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
__ bind(&done);
}
@@ -3494,7 +3471,7 @@ void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
// Deoptimize on overflow.
__ Cmp32(input, Operand(0x80000000));
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
__ LoadComplementRR(result, result);
__ bind(&done);
@@ -3551,7 +3528,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done,
&exact);
- DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN);
__ bind(&exact);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -3559,7 +3536,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ CmpP(result, Operand::Zero());
__ bne(&done, Label::kNear);
__ Cmp32(input_high, Operand::Zero());
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
}
__ bind(&done);
}
@@ -3577,7 +3554,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ LoadDoubleLiteral(dot_five, 0.5, r0);
__ lpdbr(double_scratch1, input);
__ cdbr(double_scratch1, dot_five);
- DeoptimizeIf(unordered, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(unordered, instr, DeoptimizeReason::kLostPrecisionOrNaN);
// If input is in [-0.5, -0], the result is -0.
// If input is in [+0, +0.5[, the result is +0.
// If the input is +0.5, the result is 1.
@@ -3585,7 +3562,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// [-0.5, -0] (negative) yields minus zero.
__ TestDoubleSign(input, scratch1);
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
}
Label return_zero;
__ cdbr(input, dot_five);
@@ -3604,7 +3581,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// Reuse dot_five (double_scratch0) as we no longer need this value.
__ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
double_scratch0(), &done, &done);
- DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN);
__ bind(&done);
}
@@ -3669,7 +3646,7 @@ void LCodeGen::DoPower(LPower* instr) {
__ JumpIfSmi(tagged_exponent, &no_deopt);
__ LoadP(r9, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
__ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
@@ -3683,23 +3660,31 @@ void LCodeGen::DoPower(LPower* instr) {
}
}
-void LCodeGen::DoMathExp(LMathExp* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
- DoubleRegister double_scratch2 = double_scratch0();
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
+void LCodeGen::DoMathCos(LMathCos* instr) {
+ __ PrepareCallCFunction(0, 1, scratch0());
+ __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+ __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1);
+ __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
- MathExpGenerator::EmitMathExp(masm(), input, result, double_scratch1,
- double_scratch2, temp1, temp2, scratch0());
+void LCodeGen::DoMathSin(LMathSin* instr) {
+ __ PrepareCallCFunction(0, 1, scratch0());
+ __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+ __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1);
+ __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
+
+void LCodeGen::DoMathExp(LMathExp* instr) {
+ __ PrepareCallCFunction(0, 1, scratch0());
+ __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+ __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1);
+ __ MovFromFloatResult(ToDoubleRegister(instr->result()));
}
void LCodeGen::DoMathLog(LMathLog* instr) {
__ PrepareCallCFunction(0, 1, scratch0());
__ MovToFloatParameter(ToDoubleRegister(instr->value()));
- __ CallCFunction(ExternalReference::math_log_double_function(isolate()), 0,
- 1);
+ __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1);
__ MovFromFloatResult(ToDoubleRegister(instr->result()));
}
@@ -3728,7 +3713,9 @@ void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
#endif
if (FLAG_code_comments) {
if (actual.is_reg()) {
- Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+ Comment(";;; PrepareForTailCall, actual: %s {",
+ RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
+ actual.reg().code()));
} else {
Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
}
@@ -3832,14 +3819,8 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->result()).is(r2));
__ mov(r2, Operand(instr->arity()));
- if (instr->arity() == 1) {
- // We only need the allocation site for the case we have a length argument.
- // The case may bail out to the runtime, which will determine the correct
- // elements kind with the site.
- __ Move(r4, instr->hydrogen()->site());
- } else {
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- }
+ __ Move(r4, instr->hydrogen()->site());
+
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
@@ -3871,7 +3852,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ bind(&done);
} else {
- ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+ ArrayNArgumentsConstructorStub stub(isolate());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4002,15 +3983,12 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- if (instr->hydrogen()->HasVectorAndSlot()) {
- EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
- }
+ EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
__ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
- Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
- isolate(), instr->language_mode(),
- instr->hydrogen()->initialization_state())
- .code();
+ Handle<Code> ic =
+ CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4052,7 +4030,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
+ DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds);
}
}
@@ -4072,6 +4050,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
}
int element_size_shift = ElementsKindToShiftSize(elements_kind);
bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+ bool keyMaybeNegative = instr->hydrogen()->IsDehoisted();
int base_offset = instr->base_offset();
if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
@@ -4091,7 +4070,8 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
address = external_pointer;
}
} else {
- __ IndexToArrayOffset(address, key, element_size_shift, key_is_smi);
+ __ IndexToArrayOffset(address, key, element_size_shift, key_is_smi,
+ keyMaybeNegative);
__ AddP(address, external_pointer);
}
if (elements_kind == FLOAT32_ELEMENTS) {
@@ -4104,7 +4084,8 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
Register value(ToRegister(instr->value()));
MemOperand mem_operand =
PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
- constant_key, element_size_shift, base_offset);
+ constant_key, element_size_shift, base_offset,
+ keyMaybeNegative);
switch (elements_kind) {
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
@@ -4172,6 +4153,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
}
int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+ bool keyMaybeNegative = instr->hydrogen()->IsDehoisted();
int base_offset = instr->base_offset() + constant_key * kDoubleSize;
bool use_scratch = false;
intptr_t address_offset = base_offset;
@@ -4185,7 +4167,8 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
}
} else {
use_scratch = true;
- __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
+ __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi,
+ keyMaybeNegative);
// Memory references support up to 20-bits signed displacement in RXY form
if (!is_int20((address_offset))) {
__ AddP(scratch, Operand(address_offset));
@@ -4303,13 +4286,10 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- if (instr->hydrogen()->HasVectorAndSlot()) {
- EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
- }
+ EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
- isolate(), instr->language_mode(),
- instr->hydrogen()->initialization_state())
+ isolate(), instr->language_mode())
.code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4387,13 +4367,20 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
LOperand* key = instr->key();
if (key->IsConstantOperand()) {
- __ LoadSmiLiteral(r5, ToSmi(LConstantOperand::cast(key)));
+ LConstantOperand* constant_key = LConstantOperand::cast(key);
+ int32_t int_key = ToInteger32(constant_key);
+ if (Smi::IsValid(int_key)) {
+ __ LoadSmiLiteral(r5, Smi::FromInt(int_key));
+ } else {
+ // We should never get here at runtime because there is a smi check on
+ // the key before this point.
+ __ stop("expected smi");
+ }
} else {
__ SmiTag(r5, ToRegister(key));
}
- GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
- instr->hydrogen()->kind());
+ GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
__ CallStub(&stub);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
@@ -4402,7 +4389,7 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
// Deopt on smi, which means the elements array changed to dictionary mode.
__ TestIfSmi(result);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
}
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
@@ -4431,8 +4418,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
DCHECK(object_reg.is(r2));
PushSafepointRegistersScope scope(this);
__ Move(r3, to_map);
- bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
- TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+ TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
__ CallStub(&stub);
RecordSafepointWithRegisters(instr->pointer_map(), 0,
Safepoint::kLazyDeopt);
@@ -4446,7 +4432,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register temp2 = ToRegister(instr->temp2());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
- DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMementoFound);
__ bind(&no_memento_found);
}
@@ -4674,13 +4660,10 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
-
- // NumberTagI and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Reset the context register.
+ if (!dst.is(cp)) {
+ __ LoadImmP(cp, Operand::Zero());
+ }
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(instr->pointer_map(), 0,
Safepoint::kNoLazyDeopt);
@@ -4730,12 +4713,10 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
__ LoadImmP(reg, Operand::Zero());
PushSafepointRegistersScope scope(this);
- // NumberTagI and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Reset the context register.
+ if (!reg.is(cp)) {
+ __ LoadImmP(cp, Operand::Zero());
+ }
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(instr->pointer_map(), 0,
Safepoint::kNoLazyDeopt);
@@ -4749,13 +4730,13 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ TestUnsignedSmiCandidate(input, r0);
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, cr0);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, cr0);
}
#if !V8_TARGET_ARCH_S390X
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
__ SmiTagCheckOverflow(output, input, r0);
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
} else {
#endif
__ SmiTag(output, input);
@@ -4769,7 +4750,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
Register result = ToRegister(instr->result());
if (instr->needs_check()) {
__ tmll(input, Operand(kHeapObjectTag));
- DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
__ SmiUntag(result, input);
} else {
__ SmiUntag(result, input);
@@ -4799,20 +4780,20 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
if (can_convert_undefined_to_nan) {
__ bne(&convert, Label::kNear);
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
}
// load heap number
__ ld(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
if (deoptimize_on_minus_zero) {
__ TestDoubleIsMinusZero(result_reg, scratch, ip);
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
__ b(&done, Label::kNear);
if (can_convert_undefined_to_nan) {
__ bind(&convert);
// Convert undefined (and hole) to NaN.
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ ld(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
__ b(&done, Label::kNear);
@@ -4869,11 +4850,11 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ bind(&check_false);
__ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
__ LoadImmP(input_reg, Operand::Zero());
} else {
// Deoptimize if we don't have a heap number.
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
__ ld(double_scratch2,
FieldMemOperand(input_reg, HeapNumber::kValueOffset));
@@ -4883,13 +4864,13 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
}
__ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
double_scratch);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ CmpP(input_reg, Operand::Zero());
__ bne(&done, Label::kNear);
__ TestHeapNumberSign(scratch2, scratch1);
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
}
}
__ bind(&done);
@@ -4955,13 +4936,13 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
__ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ CmpP(result_reg, Operand::Zero());
__ bne(&done, Label::kNear);
__ TestDoubleSign(double_input, scratch1);
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
}
}
@@ -4979,13 +4960,13 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
__ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ CmpP(result_reg, Operand::Zero());
__ bne(&done, Label::kNear);
__ TestDoubleSign(double_input, scratch1);
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
}
}
@@ -4993,21 +4974,21 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
__ SmiTag(result_reg);
#else
__ SmiTagCheckOverflow(result_reg, r0);
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
#endif
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ TestIfSmi(ToRegister(input));
- DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
}
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ TestIfSmi(ToRegister(input));
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
}
}
@@ -5019,7 +5000,7 @@ void LCodeGen::DoCheckArrayBufferNotNeutered(
__ LoadP(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
__ LoadlW(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
__ And(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
- DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, cr0);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds, cr0);
}
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
@@ -5038,14 +5019,14 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
} else {
- DeoptimizeIf(lt, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kWrongInstanceType);
// Omit check for the last type.
if (last != LAST_TYPE) {
__ CmpLogicalByte(FieldMemOperand(scratch, Map::kInstanceTypeOffset),
Operand(last));
- DeoptimizeIf(gt, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(gt, instr, DeoptimizeReason::kWrongInstanceType);
}
}
} else {
@@ -5058,11 +5039,12 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ AndP(scratch, Operand(mask));
- DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(tag == 0 ? ne : eq, instr,
+ DeoptimizeReason::kWrongInstanceType);
} else {
__ AndP(scratch, Operand(mask));
__ CmpP(scratch, Operand(tag));
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
}
}
}
@@ -5079,7 +5061,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
} else {
__ CmpP(reg, Operand(object));
}
- DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch);
}
void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
@@ -5094,7 +5076,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ StoreToSafepointRegisterSlot(r2, temp);
}
__ TestIfSmi(temp);
- DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, cr0);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kInstanceMigrationFailed, cr0);
}
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
@@ -5147,7 +5129,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ bne(deferred->entry());
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
}
__ bind(&success);
@@ -5183,7 +5165,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ CmpP(input_reg, Operand(factory()->undefined_value()));
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
__ LoadImmP(result_reg, Operand::Zero());
__ b(&done, Label::kNear);
@@ -5200,31 +5182,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
__ bind(&done);
}
-void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
- DoubleRegister value_reg = ToDoubleRegister(instr->value());
- Register result_reg = ToRegister(instr->result());
- __ lgdr(result_reg, value_reg);
- if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
- __ srlg(result_reg, result_reg, Operand(32));
- } else {
- __ llgfr(result_reg, result_reg);
- }
-}
-
-void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
- Register hi_reg = ToRegister(instr->hi());
- Register lo_reg = ToRegister(instr->lo());
- DoubleRegister result_reg = ToDoubleRegister(instr->result());
- Register scratch = scratch0();
-
- // Combine hi_reg:lo_reg into a single 64-bit register.
- __ sllg(scratch, hi_reg, Operand(32));
- __ lr(scratch, lo_reg);
-
- // Bitwise convert from GPR to FPR
- __ ldgr(result_reg, scratch);
-}
-
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate final : public LDeferredCode {
public:
@@ -5244,7 +5201,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
Register scratch2 = ToRegister(instr->temp2());
// Allocate memory for the object.
- AllocationFlags flags = TAG_OBJECT;
+ AllocationFlags flags = NO_ALLOCATION_FLAGS;
if (instr->hydrogen()->MustAllocateDoubleAligned()) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
@@ -5253,6 +5210,12 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
+ if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+ flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
+ }
+
+ DCHECK(!instr->hydrogen()->IsAllocationFolded());
+
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
CHECK(size <= Page::kMaxRegularHeapObjectSize);
@@ -5331,6 +5294,49 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr,
instr->context());
__ StoreToSafepointRegisterSlot(r2, result);
+
+ if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+ AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
+ if (instr->hydrogen()->IsOldSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
+ }
+ // If the allocation folding dominator allocate triggered a GC, allocation
+ // happend in the runtime. We have to reset the top pointer to virtually
+ // undo the allocation.
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
+ Register top_address = scratch0();
+ __ SubP(r2, r2, Operand(kHeapObjectTag));
+ __ mov(top_address, Operand(allocation_top));
+ __ StoreP(r2, MemOperand(top_address));
+ __ AddP(r2, r2, Operand(kHeapObjectTag));
+ }
+}
+
+void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
+ DCHECK(instr->hydrogen()->IsAllocationFolded());
+ DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
+ Register result = ToRegister(instr->result());
+ Register scratch1 = ToRegister(instr->temp1());
+ Register scratch2 = ToRegister(instr->temp2());
+
+ AllocationFlags flags = ALLOCATION_FOLDED;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+ if (instr->hydrogen()->IsOldSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE);
+ }
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ __ FastAllocate(size, result, scratch1, scratch2, flags);
+ } else {
+ Register size = ToRegister(instr->size());
+ __ FastAllocate(size, result, scratch1, scratch2, flags);
+ }
}
void LCodeGen::DoTypeof(LTypeof* instr) {
@@ -5580,7 +5586,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
__ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
__ CmpP(result, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kNoCache);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache);
__ bind(&done);
}
@@ -5590,7 +5596,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register map = ToRegister(instr->map());
__ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
__ CmpP(map, scratch0());
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
}
void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
diff --git a/deps/v8/src/crankshaft/s390/lithium-codegen-s390.h b/deps/v8/src/crankshaft/s390/lithium-codegen-s390.h
index 6d364cbe11..e5df255f4d 100644
--- a/deps/v8/src/crankshaft/s390/lithium-codegen-s390.h
+++ b/deps/v8/src/crankshaft/s390/lithium-codegen-s390.h
@@ -116,7 +116,8 @@ class LCodeGen : public LCodeGenBase {
MemOperand PrepareKeyedOperand(Register key, Register base,
bool key_is_constant, bool key_is_tagged,
int constant_key, int element_size_shift,
- int base_offset);
+ int base_offset,
+ bool keyMaybeNegative = true);
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
@@ -127,8 +128,6 @@ class LCodeGen : public LCodeGenBase {
#undef DECLARE_DO
private:
- LanguageMode language_mode() const { return info()->language_mode(); }
-
Scope* scope() const { return scope_; }
Register scratch0() { return kLithiumScratch; }
@@ -208,10 +207,10 @@ class LCodeGen : public LCodeGenBase {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition condition, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
+ DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type, CRegister cr = cr7);
void DeoptimizeIf(Condition condition, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason, CRegister cr = cr7);
+ DeoptimizeReason deopt_reason, CRegister cr = cr7);
void AddToTranslation(LEnvironment* environment, Translation* translation,
LOperand* op, bool is_tagged, bool is_uint32,
@@ -229,7 +228,7 @@ class LCodeGen : public LCodeGenBase {
void EmitInteger32MathAbs(LMathAbs* instr);
#endif
- // Support for recording safepoint and position information.
+ // Support for recording safepoint information.
void RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
int arguments, Safepoint::DeoptMode mode);
void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
@@ -237,8 +236,6 @@ class LCodeGen : public LCodeGenBase {
void RecordSafepointWithRegisters(LPointerMap* pointers, int arguments,
Safepoint::DeoptMode mode);
- void RecordAndWritePosition(int position) override;
-
static Condition TokenToCondition(Token::Value op);
void EmitGoto(int block);
diff --git a/deps/v8/src/crankshaft/s390/lithium-s390.cc b/deps/v8/src/crankshaft/s390/lithium-s390.cc
index a18f877187..3048e4c8b5 100644
--- a/deps/v8/src/crankshaft/s390/lithium-s390.cc
+++ b/deps/v8/src/crankshaft/s390/lithium-s390.cc
@@ -815,7 +815,7 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
LInstruction* result = new (zone()) LPrologue();
- if (info_->num_heap_slots() > 0) {
+ if (info_->scope()->num_heap_slots() > 0) {
result = MarkAsCall(result, instr);
}
return result;
@@ -865,16 +865,6 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
return DefineAsRegister(new (zone()) LArgumentsElements);
}
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
- LOperand* left =
- UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
- LOperand* right =
- UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
- LOperand* context = UseFixed(instr->context(), cp);
- LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
- return MarkAsCall(DefineFixed(result, r2), instr);
-}
-
LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
HHasInPrototypeChainAndBranch* instr) {
LOperand* object = UseRegister(instr->object());
@@ -995,6 +985,10 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
return DoMathAbs(instr);
case kMathLog:
return DoMathLog(instr);
+ case kMathCos:
+ return DoMathCos(instr);
+ case kMathSin:
+ return DoMathSin(instr);
case kMathExp:
return DoMathExp(instr);
case kMathSqrt:
@@ -1044,8 +1038,8 @@ LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
DCHECK(instr->representation().IsDouble());
DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseFixedDouble(instr->value(), d1);
- return MarkAsCall(DefineFixedDouble(new (zone()) LMathLog(input), d1), instr);
+ LOperand* input = UseFixedDouble(instr->value(), d0);
+ return MarkAsCall(DefineFixedDouble(new (zone()) LMathLog(input), d0), instr);
}
LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
@@ -1054,15 +1048,25 @@ LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
return DefineAsRegister(result);
}
+LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
+ LOperand* input = UseFixedDouble(instr->value(), d0);
+ return MarkAsCall(DefineFixedDouble(new (zone()) LMathCos(input), d0), instr);
+}
+
+LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
+ LOperand* input = UseFixedDouble(instr->value(), d0);
+ return MarkAsCall(DefineFixedDouble(new (zone()) LMathSin(input), d0), instr);
+}
+
LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
DCHECK(instr->representation().IsDouble());
DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LOperand* double_temp = TempDoubleRegister();
- LMathExp* result = new (zone()) LMathExp(input, double_temp, temp1, temp2);
- return DefineAsRegister(result);
+ LOperand* input = UseFixedDouble(instr->value(), d0);
+ return MarkAsCall(DefineFixedDouble(new (zone()) LMathExp(input), d0), instr);
}
LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
@@ -1783,18 +1787,6 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
}
}
-LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
- HValue* value = instr->value();
- DCHECK(value->representation().IsDouble());
- return DefineAsRegister(new (zone()) LDoubleBits(UseRegister(value)));
-}
-
-LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
- LOperand* lo = UseRegister(instr->lo());
- LOperand* hi = UseRegister(instr->hi());
- return DefineAsRegister(new (zone()) LConstructDouble(hi, lo));
-}
-
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LOperand* context = info()->IsStub() ? UseFixed(instr->context(), cp) : NULL;
LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
@@ -1822,14 +1814,9 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
- LOperand* global_object =
- UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- }
- LLoadGlobalGeneric* result =
- new (zone()) LLoadGlobalGeneric(context, global_object, vector);
+ LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+
+ LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
return MarkAsCall(DefineFixed(result, r2), instr);
}
@@ -1869,10 +1856,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- }
+ LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
LInstruction* result =
DefineFixed(new (zone()) LLoadNamedGeneric(context, object, vector), r2);
@@ -1938,10 +1922,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- }
+ LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
LInstruction* result = DefineFixed(
new (zone()) LLoadKeyedGeneric(context, object, key, vector), r2);
@@ -1998,12 +1979,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
DCHECK(instr->key()->representation().IsTagged());
DCHECK(instr->value()->representation().IsTagged());
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
- vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
- }
+ LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
+ LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
LStoreKeyedGeneric* result =
new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
@@ -2086,13 +2063,8 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* obj =
UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
- vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
- }
-
+ LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
+ LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
LStoreNamedGeneric* result =
new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
return MarkAsCall(result, instr);
@@ -2124,13 +2096,18 @@ LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
}
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseAny(instr->context());
LOperand* size = UseRegisterOrConstant(instr->size());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
- LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
- return AssignPointerMap(DefineAsRegister(result));
+ if (instr->IsAllocationFolded()) {
+ LFastAllocate* result = new (zone()) LFastAllocate(size, temp1, temp2);
+ return DefineAsRegister(result);
+ } else {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
+ LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
+ }
}
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
diff --git a/deps/v8/src/crankshaft/s390/lithium-s390.h b/deps/v8/src/crankshaft/s390/lithium-s390.h
index b6a161411d..1f1e520067 100644
--- a/deps/v8/src/crankshaft/s390/lithium-s390.h
+++ b/deps/v8/src/crankshaft/s390/lithium-s390.h
@@ -53,7 +53,6 @@ class LCodeGen;
V(ConstantI) \
V(ConstantS) \
V(ConstantT) \
- V(ConstructDouble) \
V(Context) \
V(DebugBreak) \
V(DeclareGlobals) \
@@ -61,12 +60,12 @@ class LCodeGen;
V(DivByConstI) \
V(DivByPowerOf2I) \
V(DivI) \
- V(DoubleBits) \
V(DoubleToI) \
V(DoubleToSmi) \
V(Drop) \
V(Dummy) \
V(DummyUse) \
+ V(FastAllocate) \
V(FlooringDivByConstI) \
V(FlooringDivByPowerOf2I) \
V(FlooringDivI) \
@@ -78,7 +77,6 @@ class LCodeGen;
V(HasInPrototypeChainAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
- V(InstanceOf) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
@@ -98,6 +96,8 @@ class LCodeGen;
V(LoadNamedGeneric) \
V(MathAbs) \
V(MathClz32) \
+ V(MathCos) \
+ V(MathSin) \
V(MathExp) \
V(MathFloor) \
V(MathFround) \
@@ -836,21 +836,29 @@ class LMathClz32 final : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
};
-class LMathExp final : public LTemplateInstruction<1, 1, 3> {
+class LMathCos final : public LTemplateInstruction<1, 1, 0> {
public:
- LMathExp(LOperand* value, LOperand* double_temp, LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- temps_[2] = double_temp;
- ExternalReference::InitializeMathExpData();
- }
+ explicit LMathCos(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
+};
+
+class LMathSin final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathSin(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
+};
+
+class LMathExp final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathExp(LOperand* value) { inputs_[0] = value; }
LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
- LOperand* double_temp() { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
};
@@ -1035,21 +1043,6 @@ class LCmpT final : public LTemplateInstruction<1, 3, 0> {
Token::Value op() const { return hydrogen()->token(); }
};
-class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
- public:
- LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() const { return inputs_[0]; }
- LOperand* left() const { return inputs_[1]; }
- LOperand* right() const { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-};
-
class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
public:
LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
@@ -1445,17 +1438,14 @@ class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
};
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
+class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
public:
- LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
- LOperand* vector) {
+ LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
inputs_[0] = context;
- inputs_[1] = global_object;
temps_[0] = vector;
}
LOperand* context() { return inputs_[0]; }
- LOperand* global_object() { return inputs_[1]; }
LOperand* temp_vector() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
@@ -1977,6 +1967,8 @@ class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
LOperand* key() { return inputs_[3]; }
LOperand* current_capacity() { return inputs_[4]; }
+ bool ClobbersDoubleRegisters(Isolate* isolate) const override { return true; }
+
DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
};
@@ -2123,29 +2115,6 @@ class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> {
DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
};
-class LDoubleBits final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDoubleBits(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
- DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
-};
-
-class LConstructDouble final : public LTemplateInstruction<1, 2, 0> {
- public:
- LConstructDouble(LOperand* hi, LOperand* lo) {
- inputs_[0] = hi;
- inputs_[1] = lo;
- }
-
- LOperand* hi() { return inputs_[0]; }
- LOperand* lo() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
-};
-
class LAllocate final : public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* context, LOperand* size, LOperand* temp1,
@@ -2165,6 +2134,22 @@ class LAllocate final : public LTemplateInstruction<1, 2, 2> {
DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
+class LFastAllocate final : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LFastAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = size;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* size() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
+ DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
class LTypeof final : public LTemplateInstruction<1, 2, 0> {
public:
LTypeof(LOperand* context, LOperand* value) {
@@ -2302,6 +2287,8 @@ class LChunkBuilder final : public LChunkBuilderBase {
LInstruction* DoMathFround(HUnaryMathOperation* instr);
LInstruction* DoMathAbs(HUnaryMathOperation* instr);
LInstruction* DoMathLog(HUnaryMathOperation* instr);
+ LInstruction* DoMathCos(HUnaryMathOperation* instr);
+ LInstruction* DoMathSin(HUnaryMathOperation* instr);
LInstruction* DoMathExp(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
diff --git a/deps/v8/src/crankshaft/typing.cc b/deps/v8/src/crankshaft/typing.cc
index 69d7efed63..5961838711 100644
--- a/deps/v8/src/crankshaft/typing.cc
+++ b/deps/v8/src/crankshaft/typing.cc
@@ -14,9 +14,9 @@
namespace v8 {
namespace internal {
-
AstTyper::AstTyper(Isolate* isolate, Zone* zone, Handle<JSFunction> closure,
- Scope* scope, BailoutId osr_ast_id, FunctionLiteral* root)
+ DeclarationScope* scope, BailoutId osr_ast_id,
+ FunctionLiteral* root, AstTypeBounds* bounds)
: isolate_(isolate),
zone_(zone),
closure_(closure),
@@ -24,9 +24,10 @@ AstTyper::AstTyper(Isolate* isolate, Zone* zone, Handle<JSFunction> closure,
osr_ast_id_(osr_ast_id),
root_(root),
oracle_(isolate, zone, handle(closure->shared()->code()),
- handle(closure->shared()->feedback_vector()),
+ handle(closure->feedback_vector()),
handle(closure->context()->native_context())),
- store_(zone) {
+ store_(zone),
+ bounds_(bounds) {
InitializeAstVisitor(isolate);
}
@@ -304,14 +305,7 @@ void AstTyper::VisitForInStatement(ForInStatement* stmt) {
store_.Forget(); // Control may transfer here via 'break'.
}
-
-void AstTyper::VisitForOfStatement(ForOfStatement* stmt) {
- RECURSE(Visit(stmt->iterable()));
- store_.Forget(); // Control may transfer here via looping or 'continue'.
- RECURSE(Visit(stmt->body()));
- store_.Forget(); // Control may transfer here via 'break'.
-}
-
+void AstTyper::VisitForOfStatement(ForOfStatement* stmt) {}
void AstTyper::VisitTryCatchStatement(TryCatchStatement* stmt) {
Effects try_effects = EnterEffects();
@@ -353,7 +347,7 @@ void AstTyper::VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
void AstTyper::VisitDoExpression(DoExpression* expr) {
RECURSE(VisitBlock(expr->block()));
RECURSE(VisitVariableProxy(expr->result()));
- NarrowType(expr, expr->result()->bounds());
+ NarrowType(expr, bounds_->get(expr->result()));
}
@@ -371,9 +365,9 @@ void AstTyper::VisitConditional(Conditional* expr) {
then_effects.Alt(else_effects);
store_.Seq(then_effects);
- NarrowType(expr, Bounds::Either(
- expr->then_expression()->bounds(),
- expr->else_expression()->bounds(), zone()));
+ NarrowType(expr,
+ Bounds::Either(bounds_->get(expr->then_expression()),
+ bounds_->get(expr->else_expression()), zone()));
}
@@ -464,11 +458,11 @@ void AstTyper::VisitAssignment(Assignment* expr) {
expr->is_compound() ? expr->binary_operation() : expr->value();
RECURSE(Visit(expr->target()));
RECURSE(Visit(rhs));
- NarrowType(expr, rhs->bounds());
+ NarrowType(expr, bounds_->get(rhs));
VariableProxy* proxy = expr->target()->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsStackAllocated()) {
- store_.Seq(variable_index(proxy->var()), Effect(expr->bounds()));
+ store_.Seq(variable_index(proxy->var()), Effect(bounds_->get(expr)));
}
}
@@ -521,7 +515,7 @@ void AstTyper::VisitCall(Call* expr) {
// Collect type feedback.
RECURSE(Visit(expr->expression()));
bool is_uninitialized = true;
- if (expr->IsUsingCallFeedbackICSlot(isolate_)) {
+ if (expr->IsUsingCallFeedbackICSlot()) {
FeedbackVectorSlot slot = expr->CallFeedbackICSlot();
is_uninitialized = oracle()->CallIsUninitialized(slot);
if (!expr->expression()->IsProperty() &&
@@ -540,8 +534,7 @@ void AstTyper::VisitCall(Call* expr) {
RECURSE(Visit(arg));
}
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
- if (proxy != NULL && proxy->var()->is_possibly_eval(isolate_)) {
+ if (expr->is_possibly_eval()) {
store_.Forget(); // Eval could do whatever to local variables.
}
@@ -628,7 +621,7 @@ void AstTyper::VisitCountOperation(CountOperation* expr) {
VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsStackAllocated()) {
- store_.Seq(variable_index(proxy->var()), Effect(expr->bounds()));
+ store_.Seq(variable_index(proxy->var()), Effect(bounds_->get(expr)));
}
}
@@ -656,7 +649,7 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
case Token::COMMA:
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
- NarrowType(expr, expr->right()->bounds());
+ NarrowType(expr, bounds_->get(expr->right()));
break;
case Token::OR:
case Token::AND: {
@@ -669,16 +662,16 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
left_effects.Alt(right_effects);
store_.Seq(left_effects);
- NarrowType(expr, Bounds::Either(
- expr->left()->bounds(), expr->right()->bounds(), zone()));
+ NarrowType(expr, Bounds::Either(bounds_->get(expr->left()),
+ bounds_->get(expr->right()), zone()));
break;
}
case Token::BIT_OR:
case Token::BIT_AND: {
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
- Type* upper = Type::Union(
- expr->left()->bounds().upper, expr->right()->bounds().upper, zone());
+ Type* upper = Type::Union(bounds_->get(expr->left()).upper,
+ bounds_->get(expr->right()).upper, zone());
if (!upper->Is(Type::Signed32())) upper = Type::Signed32();
Type* lower = Type::Intersect(Type::SignedSmall(), upper, zone());
NarrowType(expr, Bounds(lower, upper));
@@ -702,8 +695,8 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
case Token::ADD: {
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
- Bounds l = expr->left()->bounds();
- Bounds r = expr->right()->bounds();
+ Bounds l = bounds_->get(expr->left());
+ Bounds r = bounds_->get(expr->right());
Type* lower =
!l.lower->IsInhabited() || !r.lower->IsInhabited()
? Type::None()
@@ -792,13 +785,5 @@ void AstTyper::VisitFunctionDeclaration(FunctionDeclaration* declaration) {
}
-void AstTyper::VisitImportDeclaration(ImportDeclaration* declaration) {
-}
-
-
-void AstTyper::VisitExportDeclaration(ExportDeclaration* declaration) {
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/typing.h b/deps/v8/src/crankshaft/typing.h
index 40b538aef3..94340c5a74 100644
--- a/deps/v8/src/crankshaft/typing.h
+++ b/deps/v8/src/crankshaft/typing.h
@@ -6,8 +6,9 @@
#define V8_CRANKSHAFT_TYPING_H_
#include "src/allocation.h"
-#include "src/ast/ast.h"
+#include "src/ast/ast-type-bounds.h"
#include "src/ast/scopes.h"
+#include "src/ast/variables.h"
#include "src/effects.h"
#include "src/type-info.h"
#include "src/types.h"
@@ -16,11 +17,13 @@
namespace v8 {
namespace internal {
+class FunctionLiteral;
-class AstTyper: public AstVisitor {
+class AstTyper final : public AstVisitor<AstTyper> {
public:
AstTyper(Isolate* isolate, Zone* zone, Handle<JSFunction> closure,
- Scope* scope, BailoutId osr_ast_id, FunctionLiteral* root);
+ DeclarationScope* scope, BailoutId osr_ast_id, FunctionLiteral* root,
+ AstTypeBounds* bounds);
void Run();
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
@@ -36,20 +39,21 @@ class AstTyper: public AstVisitor {
Isolate* isolate_;
Zone* zone_;
Handle<JSFunction> closure_;
- Scope* scope_;
+ DeclarationScope* scope_;
BailoutId osr_ast_id_;
FunctionLiteral* root_;
TypeFeedbackOracle oracle_;
Store store_;
+ AstTypeBounds* bounds_;
Zone* zone() const { return zone_; }
TypeFeedbackOracle* oracle() { return &oracle_; }
void NarrowType(Expression* e, Bounds b) {
- e->set_bounds(Bounds::Both(e->bounds(), b, zone()));
+ bounds_->set(e, Bounds::Both(bounds_->get(e), b, zone()));
}
void NarrowLowerType(Expression* e, Type* t) {
- e->set_bounds(Bounds::NarrowLower(e->bounds(), t, zone()));
+ bounds_->set(e, Bounds::NarrowLower(bounds_->get(e), t, zone()));
}
Effects EnterEffects() {
@@ -69,10 +73,10 @@ class AstTyper: public AstVisitor {
var->IsParameter() ? parameter_index(var->index()) : kNoVar;
}
- void VisitDeclarations(ZoneList<Declaration*>* declarations) override;
- void VisitStatements(ZoneList<Statement*>* statements) override;
+ void VisitDeclarations(ZoneList<Declaration*>* declarations);
+ void VisitStatements(ZoneList<Statement*>* statements);
-#define DECLARE_VISIT(type) void Visit##type(type* node) override;
+#define DECLARE_VISIT(type) void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
diff --git a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
index 28dfe8a8dd..e417eaaeb1 100644
--- a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
@@ -12,7 +12,6 @@
#include "src/crankshaft/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
-#include "src/profiler/cpu-profiler.h"
namespace v8 {
namespace internal {
@@ -168,25 +167,23 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
Comment(";;; Prologue begin");
// Possibly allocate a local context.
- if (info_->num_heap_slots() > 0) {
+ if (info_->scope()->num_heap_slots() > 0) {
Comment(";;; Allocate local context");
bool need_write_barrier = true;
// Argument to NewContext is the function, which is still in rdi.
- int slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int slots = info_->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
if (info()->scope()->is_script_scope()) {
__ Push(rdi);
__ Push(info()->scope()->GetScopeInfo(info()->isolate()));
__ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
- } else if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
+ } else {
+ FastNewFunctionContextStub stub(isolate());
+ __ Set(FastNewFunctionContextDescriptor::SlotsRegister(), slots);
__ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
+ // Result of FastNewFunctionContextStub is always in new space.
need_write_barrier = false;
- } else {
- __ Push(rdi);
- __ CallRuntime(Runtime::kNewFunctionContext);
}
RecordSafepoint(deopt_mode);
@@ -196,10 +193,11 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
__ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rax);
// Copy any necessary parameters into the context.
- int num_parameters = scope()->num_parameters();
- int first_parameter = scope()->has_this_declaration() ? -1 : 0;
+ int num_parameters = info()->scope()->num_parameters();
+ int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+ Variable* var = (i == -1) ? info()->scope()->receiver()
+ : info()->scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -299,8 +297,6 @@ bool LCodeGen::GenerateJumpTable() {
}
__ call(entry, RelocInfo::RUNTIME_ENTRY);
}
- info()->LogDeoptCallPosition(masm()->pc_offset(),
- table_entry->deopt_info.inlining_id);
}
if (needs_frame.is_linked()) {
@@ -702,9 +698,8 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
}
-
void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
+ DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type) {
LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
@@ -748,7 +743,7 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
__ bind(&done);
}
- Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+ Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
@@ -757,13 +752,12 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
!info()->saves_caller_doubles()) {
DeoptComment(deopt_info);
__ call(entry, RelocInfo::RUNTIME_ENTRY);
- info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
} else {
Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
!frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+ if (FLAG_trace_deopt || isolate()->is_profiling() ||
jump_table_.is_empty() ||
!table_entry.IsEquivalentTo(jump_table_.last())) {
jump_table_.Add(table_entry, zone());
@@ -776,9 +770,8 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
}
}
-
void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeReason deopt_reason) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
@@ -839,13 +832,6 @@ void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
}
-void LCodeGen::RecordAndWritePosition(int position) {
- if (position == RelocInfo::kNoPosition) return;
- masm()->positions_recorder()->RecordPosition(position);
- masm()->positions_recorder()->WriteRecordedPositions();
-}
-
-
static const char* LabelType(LLabel* label) {
if (label->is_loop_header()) return " (loop header)";
if (label->is_osr_entry()) return " (OSR entry)";
@@ -918,7 +904,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ andl(dividend, Immediate(mask));
__ negl(dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
}
__ jmp(&done, Label::kNear);
}
@@ -935,7 +921,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(rax));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@@ -950,7 +936,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
Label remainder_not_zero;
__ j(not_zero, &remainder_not_zero, Label::kNear);
__ cmpl(dividend, Immediate(0));
- DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero);
__ bind(&remainder_not_zero);
}
}
@@ -972,7 +958,7 @@ void LCodeGen::DoModI(LModI* instr) {
// deopt in this case because we can't return a NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(right_reg, right_reg);
- DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for kMinInt % -1, idiv would signal a divide error. We
@@ -983,7 +969,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ j(not_zero, &no_overflow_possible, Label::kNear);
__ cmpl(right_reg, Immediate(-1));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kMinusZero);
} else {
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ Set(result_reg, 0);
@@ -1003,7 +989,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ j(not_sign, &positive_left, Label::kNear);
__ idivl(right_reg);
__ testl(result_reg, result_reg);
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
__ jmp(&done, Label::kNear);
__ bind(&positive_left);
}
@@ -1029,13 +1015,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// If the divisor is negative, we have to negate and handle edge cases.
__ negl(dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
return;
}
@@ -1062,7 +1048,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(rdx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@@ -1070,7 +1056,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ testl(dividend, dividend);
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
}
// Easy case: We need no dynamic check for the dividend and the flooring
@@ -1117,7 +1103,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(divisor, divisor);
- DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@@ -1126,7 +1112,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ testl(dividend, dividend);
__ j(not_zero, &dividend_not_zero, Label::kNear);
__ testl(divisor, divisor);
- DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
__ bind(&dividend_not_zero);
}
@@ -1136,7 +1122,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ cmpl(dividend, Immediate(kMinInt));
__ j(not_zero, &dividend_not_min_int, Label::kNear);
__ cmpl(divisor, Immediate(-1));
- DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
__ bind(&dividend_not_min_int);
}
@@ -1165,19 +1151,19 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ testl(dividend, dividend);
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ cmpl(dividend, Immediate(kMinInt));
- DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ testl(dividend, Immediate(mask));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision);
}
__ Move(result, dividend);
int32_t shift = WhichPowerOf2Abs(divisor);
@@ -1198,7 +1184,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(rdx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@@ -1206,7 +1192,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ testl(dividend, dividend);
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
}
__ TruncatingDiv(dividend, Abs(divisor));
@@ -1216,7 +1202,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
__ movl(rax, rdx);
__ imull(rax, rax, Immediate(divisor));
__ subl(rax, dividend);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision);
}
}
@@ -1236,7 +1222,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(divisor, divisor);
- DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@@ -1245,7 +1231,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ testl(dividend, dividend);
__ j(not_zero, &dividend_not_zero, Label::kNear);
__ testl(divisor, divisor);
- DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
__ bind(&dividend_not_zero);
}
@@ -1255,7 +1241,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ cmpl(dividend, Immediate(kMinInt));
__ j(not_zero, &dividend_not_min_int, Label::kNear);
__ cmpl(divisor, Immediate(-1));
- DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
__ bind(&dividend_not_min_int);
}
@@ -1266,7 +1252,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
__ testl(remainder, remainder);
- DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision);
}
}
@@ -1343,7 +1329,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
if (can_overflow) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -1362,10 +1348,10 @@ void LCodeGen::DoMulI(LMulI* instr) {
? !instr->hydrogen_value()->representation().IsSmi()
: SmiValuesAre31Bits());
if (ToInteger32(LConstantOperand::cast(right)) < 0) {
- DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
} else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
__ cmpl(kScratchRegister, Immediate(0));
- DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero);
}
} else if (right->IsStackSlot()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
@@ -1373,7 +1359,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
} else {
__ orl(kScratchRegister, ToOperand(right));
}
- DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
} else {
// Test the non-zero operand for negative sign.
if (instr->hydrogen_value()->representation().IsSmi()) {
@@ -1381,7 +1367,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
} else {
__ orl(kScratchRegister, ToRegister(right));
}
- DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
}
__ bind(&done);
}
@@ -1494,7 +1480,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shrl_cl(ToRegister(left));
if (instr->can_deopt()) {
__ testl(ToRegister(left), ToRegister(left));
- DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(negative, instr, DeoptimizeReason::kNegativeValue);
}
break;
case Token::SHL:
@@ -1523,7 +1509,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shrl(ToRegister(left), Immediate(shift_count));
} else if (instr->can_deopt()) {
__ testl(ToRegister(left), ToRegister(left));
- DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(negative, instr, DeoptimizeReason::kNegativeValue);
}
break;
case Token::SHL:
@@ -1538,7 +1524,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shll(ToRegister(left), Immediate(shift_count - 1));
}
__ Integer32ToSmi(ToRegister(left), ToRegister(left));
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
} else {
__ shll(ToRegister(left), Immediate(shift_count));
}
@@ -1581,7 +1567,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
}
@@ -1756,7 +1742,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
}
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
}
}
@@ -1782,7 +1768,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
: SmiValuesAre31Bits());
__ cmpl(left_reg, right_imm);
__ j(condition, &return_left, Label::kNear);
- __ movp(left_reg, right_imm);
+ __ movl(left_reg, right_imm);
} else if (right->IsRegister()) {
Register right_reg = ToRegister(right);
if (instr->hydrogen_value()->representation().IsSmi()) {
@@ -1890,13 +1876,12 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
__ Movapd(result, result);
break;
case Token::MOD: {
- XMMRegister xmm_scratch = double_scratch0();
- __ PrepareCallCFunction(2);
- __ Movapd(xmm_scratch, left);
+ DCHECK(left.is(xmm0));
DCHECK(right.is(xmm1));
+ DCHECK(result.is(xmm0));
+ __ PrepareCallCFunction(2);
__ CallCFunction(
ExternalReference::mod_two_doubles_operation(isolate()), 2);
- __ Movapd(result, xmm_scratch);
break;
}
default:
@@ -2035,7 +2020,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ testb(reg, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi);
}
const Register map = kScratchRegister;
@@ -2095,7 +2080,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kUnexpectedObject);
}
}
}
@@ -2441,16 +2426,6 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
}
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
- DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
- DCHECK(ToRegister(instr->result()).is(rax));
- InstanceOfStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoHasInPrototypeChainAndBranch(
LHasInPrototypeChainAndBranch* instr) {
Register const object = ToRegister(instr->object());
@@ -2471,20 +2446,19 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
Label loop;
__ bind(&loop);
-
// Deoptimize if the object needs to be access checked.
__ testb(FieldOperand(object_map, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kAccessCheck);
+ DeoptimizeIf(not_zero, instr, DeoptimizeReason::kAccessCheck);
// Deoptimize for proxies.
__ CmpInstanceType(object_map, JS_PROXY_TYPE);
- DeoptimizeIf(equal, instr, Deoptimizer::kProxy);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kProxy);
__ movp(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
- __ cmpp(object_prototype, prototype);
- EmitTrueBranch(instr, equal);
__ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
EmitFalseBranch(instr, equal);
+ __ cmpp(object_prototype, prototype);
+ EmitTrueBranch(instr, equal);
__ movp(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
__ jmp(&loop);
}
@@ -2576,15 +2550,12 @@ void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->global_object())
- .is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).is(rax));
- __ Move(LoadDescriptor::NameRegister(), instr->name());
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
- isolate(), instr->typeof_mode(), PREMONOMORPHIC)
- .code();
+ Handle<Code> ic =
+ CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2596,7 +2567,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
} else {
Label is_not_hole;
__ j(not_equal, &is_not_hole, Label::kNear);
@@ -2617,7 +2588,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(target, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
} else {
__ j(not_equal, &skip_assignment);
}
@@ -2699,10 +2670,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
__ Move(LoadDescriptor::NameRegister(), instr->name());
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_INSIDE_TYPEOF,
- instr->hydrogen()->initialization_state())
- .code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2717,7 +2685,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
// If the function does not have an initial map, we're done.
Label done;
@@ -2819,7 +2787,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ movl(result, operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ testl(result, result);
- DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(negative, instr, DeoptimizeReason::kNegativeValue);
}
break;
case FLOAT32_ELEMENTS:
@@ -2860,7 +2828,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
FAST_DOUBLE_ELEMENTS,
instr->base_offset() + sizeof(kHoleNanLower32));
__ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
}
Operand double_load_operand = BuildFastArrayOperand(
@@ -2917,10 +2885,10 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (requires_hole_check) {
if (IsFastSmiElementsKind(hinstr->elements_kind())) {
Condition smi = __ CheckSmi(result);
- DeoptimizeIf(NegateCondition(smi), instr, Deoptimizer::kNotASmi);
+ DeoptimizeIf(NegateCondition(smi), instr, DeoptimizeReason::kNotASmi);
} else {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
}
} else if (hinstr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
DCHECK(hinstr->elements_kind() == FAST_HOLEY_ELEMENTS);
@@ -2934,7 +2902,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
__ Cmp(FieldOperand(result, Cell::kValueOffset),
Smi::FromInt(Isolate::kArrayProtectorValid));
- DeoptimizeIf(not_equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kHole);
}
__ Move(result, isolate()->factory()->undefined_value());
__ bind(&done);
@@ -2986,13 +2954,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
- if (instr->hydrogen()->HasVectorAndSlot()) {
- EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
- }
+ EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
- Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
- isolate(), instr->hydrogen()->initialization_state())
- .code();
+ Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3087,9 +3051,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// The receiver should be a JS object.
Condition is_smi = __ CheckSmi(receiver);
- DeoptimizeIf(is_smi, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(is_smi, instr, DeoptimizeReason::kSmi);
__ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, kScratchRegister);
- DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
+ DeoptimizeIf(below, instr, DeoptimizeReason::kNotAJavaScriptObject);
__ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
@@ -3114,7 +3078,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmpp(length, Immediate(kArgumentsLimit));
- DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments);
+ DeoptimizeIf(above, instr, DeoptimizeReason::kTooManyArguments);
__ Push(receiver);
__ movp(receiver, length);
@@ -3187,6 +3151,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
__ Push(instr->hydrogen()->pairs());
__ Push(Smi::FromInt(instr->hydrogen()->flags()));
+ __ Push(instr->hydrogen()->feedback_vector());
CallRuntime(Runtime::kDeclareGlobals, instr);
}
@@ -3284,7 +3249,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
Label slow, allocated, done;
uint32_t available_regs = rax.bit() | rcx.bit() | rdx.bit() | rbx.bit();
@@ -3341,7 +3306,7 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ negl(input_reg); // Sets flags.
- DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(negative, instr, DeoptimizeReason::kOverflow);
__ bind(&is_positive);
}
@@ -3352,7 +3317,7 @@ void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ negp(input_reg); // Sets flags.
- DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(negative, instr, DeoptimizeReason::kOverflow);
__ bind(&is_positive);
}
@@ -3414,18 +3379,18 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
// Deoptimize if minus zero.
__ Movq(output_reg, input_reg);
__ subq(output_reg, Immediate(1));
- DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kMinusZero);
}
__ Roundsd(xmm_scratch, input_reg, kRoundDown);
__ Cvttsd2si(output_reg, xmm_scratch);
__ cmpl(output_reg, Immediate(0x1));
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
} else {
Label negative_sign, done;
// Deoptimize on unordered.
__ Xorpd(xmm_scratch, xmm_scratch); // Zero the register.
__ Ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
+ DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN);
__ j(below, &negative_sign, Label::kNear);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -3434,7 +3399,7 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
__ j(above, &positive_sign, Label::kNear);
__ Movmskpd(output_reg, input_reg);
__ testl(output_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
__ Set(output_reg, 0);
__ jmp(&done);
__ bind(&positive_sign);
@@ -3444,7 +3409,7 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
__ Cvttsd2si(output_reg, input_reg);
// Overflow is signalled with minint.
__ cmpl(output_reg, Immediate(0x1));
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
__ jmp(&done, Label::kNear);
// Non-zero negative reaches here.
@@ -3455,7 +3420,7 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
__ Ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear);
__ subl(output_reg, Immediate(1));
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
__ bind(&done);
}
@@ -3497,7 +3462,7 @@ void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
__ Cvttsd2si(output_reg, xmm_scratch);
// Overflow is signalled with minint.
__ cmpl(output_reg, Immediate(0x1));
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
__ jmp(&done, dist);
__ bind(&below_one_half);
@@ -3513,7 +3478,7 @@ void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
__ Cvttsd2si(output_reg, input_temp);
// Catch minint due to overflow, and to prevent overflow when compensating.
__ cmpl(output_reg, Immediate(0x1));
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
__ Cvtlsi2sd(xmm_scratch, output_reg);
__ Ucomisd(xmm_scratch, input_temp);
@@ -3528,7 +3493,7 @@ void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Movq(output_reg, input_reg);
__ testq(output_reg, output_reg);
- DeoptimizeIf(negative, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(negative, instr, DeoptimizeReason::kMinusZero);
}
__ Set(output_reg, 0);
__ bind(&done);
@@ -3607,7 +3572,7 @@ void LCodeGen::DoPower(LPower* instr) {
Label no_deopt;
__ JumpIfSmi(tagged_exponent, &no_deopt, Label::kNear);
__ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, rcx);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
@@ -3621,45 +3586,32 @@ void LCodeGen::DoPower(LPower* instr) {
}
}
+void LCodeGen::DoMathCos(LMathCos* instr) {
+ DCHECK(ToDoubleRegister(instr->value()).is(xmm0));
+ DCHECK(ToDoubleRegister(instr->result()).is(xmm0));
+ __ PrepareCallCFunction(1);
+ __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 1);
+}
void LCodeGen::DoMathExp(LMathExp* instr) {
- XMMRegister input = ToDoubleRegister(instr->value());
- XMMRegister result = ToDoubleRegister(instr->result());
- XMMRegister temp0 = double_scratch0();
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
-
- MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
+ DCHECK(ToDoubleRegister(instr->value()).is(xmm0));
+ DCHECK(ToDoubleRegister(instr->result()).is(xmm0));
+ __ PrepareCallCFunction(1);
+ __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 1);
}
+void LCodeGen::DoMathSin(LMathSin* instr) {
+ DCHECK(ToDoubleRegister(instr->value()).is(xmm0));
+ DCHECK(ToDoubleRegister(instr->result()).is(xmm0));
+ __ PrepareCallCFunction(1);
+ __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 1);
+}
void LCodeGen::DoMathLog(LMathLog* instr) {
- DCHECK(instr->value()->Equals(instr->result()));
- XMMRegister input_reg = ToDoubleRegister(instr->value());
- XMMRegister xmm_scratch = double_scratch0();
- Label positive, done, zero;
- __ Xorpd(xmm_scratch, xmm_scratch);
- __ Ucomisd(input_reg, xmm_scratch);
- __ j(above, &positive, Label::kNear);
- __ j(not_carry, &zero, Label::kNear);
- __ Pcmpeqd(input_reg, input_reg);
- __ jmp(&done, Label::kNear);
- __ bind(&zero);
- ExternalReference ninf =
- ExternalReference::address_of_negative_infinity();
- Operand ninf_operand = masm()->ExternalOperand(ninf);
- __ Movsd(input_reg, ninf_operand);
- __ jmp(&done, Label::kNear);
- __ bind(&positive);
- __ fldln2();
- __ subp(rsp, Immediate(kDoubleSize));
- __ Movsd(Operand(rsp, 0), input_reg);
- __ fld_d(Operand(rsp, 0));
- __ fyl2x();
- __ fstp_d(Operand(rsp, 0));
- __ Movsd(input_reg, Operand(rsp, 0));
- __ addp(rsp, Immediate(kDoubleSize));
- __ bind(&done);
+ DCHECK(ToDoubleRegister(instr->value()).is(xmm0));
+ DCHECK(ToDoubleRegister(instr->result()).is(xmm0));
+ __ PrepareCallCFunction(1);
+ __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 1);
}
@@ -3682,7 +3634,9 @@ void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
#endif
if (FLAG_code_comments) {
if (actual.is_reg()) {
- Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+ Comment(";;; PrepareForTailCall, actual: %s {",
+ RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
+ actual.reg().code()));
} else {
Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
}
@@ -3751,14 +3705,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->result()).is(rax));
__ Set(rax, instr->arity());
- if (instr->arity() == 1) {
- // We only need the allocation site for the case we have a length argument.
- // The case may bail out to the runtime, which will determine the correct
- // elements kind with the site.
- __ Move(rbx, instr->hydrogen()->site());
- } else {
- __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
- }
+ __ Move(rbx, instr->hydrogen()->site());
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
@@ -3792,7 +3739,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ bind(&done);
} else {
- ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+ ArrayNArgumentsConstructorStub stub(isolate());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
@@ -3955,14 +3902,12 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- if (instr->hydrogen()->HasVectorAndSlot()) {
- EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
- }
+ EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
__ Move(StoreDescriptor::NameRegister(), instr->hydrogen()->name());
- Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
- isolate(), instr->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ Handle<Code> ic =
+ CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4023,7 +3968,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ int3();
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
+ DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds);
}
}
@@ -4214,13 +4159,11 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- if (instr->hydrogen()->HasVectorAndSlot()) {
- EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
- }
+ EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
- isolate(), instr->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ isolate(), instr->language_mode())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4315,8 +4258,7 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
__ Integer32ToSmi(rbx, rbx);
}
- GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
- instr->hydrogen()->kind());
+ GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
__ CallStub(&stub);
RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
__ StoreToSafepointRegisterSlot(result, result);
@@ -4324,7 +4266,7 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
// Deopt on smi, which means the elements array changed to dictionary mode.
Condition is_smi = __ CheckSmi(result);
- DeoptimizeIf(is_smi, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(is_smi, instr, DeoptimizeReason::kSmi);
}
@@ -4351,8 +4293,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
PushSafepointRegistersScope scope(this);
__ Move(rbx, to_map);
- bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
- TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+ TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
__ CallStub(&stub);
RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
}
@@ -4365,7 +4306,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kMementoFound);
__ bind(&no_memento_found);
}
@@ -4608,13 +4549,10 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
-
- // NumberTagIU uses the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ // Reset the context register.
+ if (!reg.is(rsi)) {
+ __ Set(rsi, 0);
+ }
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
@@ -4664,12 +4602,10 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
{
PushSafepointRegistersScope scope(this);
- // NumberTagD uses the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ // Reset the context register.
+ if (!reg.is(rsi)) {
+ __ Move(rsi, 0);
+ }
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
@@ -4686,12 +4622,12 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
Condition is_smi = __ CheckUInteger32ValidSmiValue(input);
- DeoptimizeIf(NegateCondition(is_smi), instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(NegateCondition(is_smi), instr, DeoptimizeReason::kOverflow);
}
__ Integer32ToSmi(output, input);
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
}
@@ -4701,7 +4637,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
Register input = ToRegister(instr->value());
if (instr->needs_check()) {
Condition is_smi = __ CheckSmi(input);
- DeoptimizeIf(NegateCondition(is_smi), instr, Deoptimizer::kNotASmi);
+ DeoptimizeIf(NegateCondition(is_smi), instr, DeoptimizeReason::kNotASmi);
} else {
__ AssertSmi(input);
}
@@ -4732,7 +4668,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
if (can_convert_undefined_to_nan) {
__ j(not_equal, &convert, Label::kNear);
} else {
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
}
if (deoptimize_on_minus_zero) {
@@ -4742,7 +4678,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
__ j(not_equal, &done, Label::kNear);
__ Movmskpd(kScratchRegister, result_reg);
__ testl(kScratchRegister, Immediate(1));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
}
__ jmp(&done, Label::kNear);
@@ -4751,9 +4687,11 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
// Convert undefined (and hole) to NaN. Compute NaN as 0/0.
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
+ DeoptimizeIf(not_equal, instr,
+ DeoptimizeReason::kNotAHeapNumberUndefined);
- __ Pcmpeqd(result_reg, result_reg);
+ __ Xorpd(result_reg, result_reg);
+ __ Divsd(result_reg, result_reg);
__ jmp(&done, Label::kNear);
}
} else {
@@ -4798,26 +4736,27 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
__ bind(&check_false);
__ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
DeoptimizeIf(not_equal, instr,
- Deoptimizer::kNotAHeapNumberUndefinedBoolean);
+ DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
__ Set(input_reg, 0);
} else {
XMMRegister scratch = ToDoubleRegister(instr->temp());
- DCHECK(!scratch.is(xmm0));
+ DCHECK(!scratch.is(double_scratch0()));
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
- __ Movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ Cvttsd2si(input_reg, xmm0);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
+ __ Movsd(double_scratch0(),
+ FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ Cvttsd2si(input_reg, double_scratch0());
__ Cvtlsi2sd(scratch, input_reg);
- __ Ucomisd(xmm0, scratch);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
- DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
+ __ Ucomisd(double_scratch0(), scratch);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision);
+ DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN);
if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
__ testl(input_reg, input_reg);
__ j(not_zero, done);
- __ Movmskpd(input_reg, xmm0);
+ __ Movmskpd(input_reg, double_scratch0());
__ andl(input_reg, Immediate(1));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
}
}
}
@@ -4888,11 +4827,11 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
&is_nan, &minus_zero, dist);
__ jmp(&done, dist);
__ bind(&lost_precision);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision);
__ bind(&is_nan);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN);
__ bind(&minus_zero);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
}
}
@@ -4915,21 +4854,21 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
&minus_zero, dist);
__ jmp(&done, dist);
__ bind(&lost_precision);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision);
__ bind(&is_nan);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN);
__ bind(&minus_zero);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
__ Integer32ToSmi(result_reg, result_reg);
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
Condition cc = masm()->CheckSmi(ToRegister(input));
- DeoptimizeIf(NegateCondition(cc), instr, Deoptimizer::kNotASmi);
+ DeoptimizeIf(NegateCondition(cc), instr, DeoptimizeReason::kNotASmi);
}
@@ -4937,7 +4876,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
Condition cc = masm()->CheckSmi(ToRegister(input));
- DeoptimizeIf(cc, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(cc, instr, DeoptimizeReason::kSmi);
}
}
@@ -4950,7 +4889,7 @@ void LCodeGen::DoCheckArrayBufferNotNeutered(
FieldOperand(view, JSArrayBufferView::kBufferOffset));
__ testb(FieldOperand(kScratchRegister, JSArrayBuffer::kBitFieldOffset),
Immediate(1 << JSArrayBuffer::WasNeutered::kShift));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kOutOfBounds);
+ DeoptimizeIf(not_zero, instr, DeoptimizeReason::kOutOfBounds);
}
@@ -4969,14 +4908,14 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType);
} else {
- DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(below, instr, DeoptimizeReason::kWrongInstanceType);
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
Immediate(static_cast<int8_t>(last)));
- DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(above, instr, DeoptimizeReason::kWrongInstanceType);
}
}
} else {
@@ -4989,13 +4928,13 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
__ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
Immediate(mask));
DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
- Deoptimizer::kWrongInstanceType);
+ DeoptimizeReason::kWrongInstanceType);
} else {
__ movzxbl(kScratchRegister,
FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
__ andb(kScratchRegister, Immediate(mask));
__ cmpb(kScratchRegister, Immediate(tag));
- DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType);
}
}
}
@@ -5004,7 +4943,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
__ Cmp(reg, instr->hydrogen()->object().handle());
- DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kValueMismatch);
}
@@ -5019,7 +4958,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ testp(rax, Immediate(kSmiTagMask));
}
- DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kInstanceMigrationFailed);
}
@@ -5073,7 +5012,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ j(not_equal, deferred->entry());
} else {
- DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap);
}
__ bind(&success);
@@ -5112,7 +5051,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ Cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
__ xorl(input_reg, input_reg);
__ jmp(&done, Label::kNear);
@@ -5131,29 +5070,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
-void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
- XMMRegister value_reg = ToDoubleRegister(instr->value());
- Register result_reg = ToRegister(instr->result());
- if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
- __ Movq(result_reg, value_reg);
- __ shrq(result_reg, Immediate(32));
- } else {
- __ Movd(result_reg, value_reg);
- }
-}
-
-
-void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
- Register hi_reg = ToRegister(instr->hi());
- Register lo_reg = ToRegister(instr->lo());
- XMMRegister result_reg = ToDoubleRegister(instr->result());
- __ movl(kScratchRegister, hi_reg);
- __ shlq(kScratchRegister, Immediate(32));
- __ orq(kScratchRegister, lo_reg);
- __ Movq(result_reg, kScratchRegister);
-}
-
-
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate final : public LDeferredCode {
public:
@@ -5173,7 +5089,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
Register temp = ToRegister(instr->temp());
// Allocate memory for the object.
- AllocationFlags flags = TAG_OBJECT;
+ AllocationFlags flags = NO_ALLOCATION_FLAGS;
if (instr->hydrogen()->MustAllocateDoubleAligned()) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
@@ -5182,6 +5098,11 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
+ if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+ flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
+ }
+ DCHECK(!instr->hydrogen()->IsAllocationFolded());
+
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
CHECK(size <= Page::kMaxRegularHeapObjectSize);
@@ -5211,6 +5132,29 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
}
}
+void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
+ DCHECK(instr->hydrogen()->IsAllocationFolded());
+ DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
+ Register result = ToRegister(instr->result());
+ Register temp = ToRegister(instr->temp());
+
+ AllocationFlags flags = ALLOCATION_FOLDED;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+ if (instr->hydrogen()->IsOldSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE);
+ }
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ __ FastAllocate(size, result, temp, flags);
+ } else {
+ Register size = ToRegister(instr->size());
+ __ FastAllocate(size, result, temp, flags);
+ }
+}
void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
Register result = ToRegister(instr->result());
@@ -5243,6 +5187,22 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
CallRuntimeFromDeferred(
Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, rax);
+
+ if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+ AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
+ if (instr->hydrogen()->IsOldSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
+ }
+ // If the allocation folding dominator allocate triggered a GC, allocation
+ // happend in the runtime. We have to reset the top pointer to virtually
+ // undo the allocation.
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
+ __ subp(rax, Immediate(kHeapObjectTag));
+ __ Store(allocation_top, rax);
+ __ addp(rax, Immediate(kHeapObjectTag));
+ }
}
@@ -5255,8 +5215,8 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
__ Move(rax, isolate()->factory()->number_string());
__ jmp(&end);
__ bind(&do_call);
- TypeofStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ Callable callable = CodeFactory::Typeof(isolate());
+ CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
__ bind(&end);
}
@@ -5522,7 +5482,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ bind(&done);
Condition cc = masm()->CheckSmi(result);
- DeoptimizeIf(cc, instr, Deoptimizer::kNoCache);
+ DeoptimizeIf(cc, instr, DeoptimizeReason::kNoCache);
}
@@ -5530,7 +5490,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
__ cmpp(ToRegister(instr->map()),
FieldOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap);
}
diff --git a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.h b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.h
index 139645e6cd..22c39ad088 100644
--- a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.h
@@ -111,13 +111,11 @@ class LCodeGen: public LCodeGenBase {
#undef DECLARE_DO
private:
- LanguageMode language_mode() const { return info()->language_mode(); }
-
LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk()->graph(); }
- XMMRegister double_scratch0() const { return xmm0; }
+ XMMRegister double_scratch0() const { return kScratchDoubleReg; }
void EmitClassOfTest(Label* if_true,
Label* if_false,
@@ -207,10 +205,10 @@ class LCodeGen: public LCodeGenBase {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition cc, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
+ DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition cc, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason);
+ DeoptimizeReason deopt_reason);
bool DeoptEveryNTimes() {
return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
@@ -240,7 +238,7 @@ class LCodeGen: public LCodeGenBase {
void EmitIntegerMathAbs(LMathAbs* instr);
void EmitSmiMathAbs(LMathAbs* instr);
- // Support for recording safepoint and position information.
+ // Support for recording safepoint information.
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
@@ -250,7 +248,6 @@ class LCodeGen: public LCodeGenBase {
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
- void RecordAndWritePosition(int position) override;
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
diff --git a/deps/v8/src/crankshaft/x64/lithium-gap-resolver-x64.cc b/deps/v8/src/crankshaft/x64/lithium-gap-resolver-x64.cc
index 3808c377dc..94dffb333a 100644
--- a/deps/v8/src/crankshaft/x64/lithium-gap-resolver-x64.cc
+++ b/deps/v8/src/crankshaft/x64/lithium-gap-resolver-x64.cc
@@ -223,8 +223,8 @@ void LGapResolver::EmitMove(int index) {
__ Movsd(cgen_->ToDoubleRegister(destination), src);
} else {
DCHECK(destination->IsDoubleStackSlot());
- __ Movsd(xmm0, src);
- __ Movsd(cgen_->ToOperand(destination), xmm0);
+ __ Movsd(kScratchDoubleReg, src);
+ __ Movsd(cgen_->ToOperand(destination), kScratchDoubleReg);
}
} else {
UNREACHABLE();
@@ -264,18 +264,18 @@ void LGapResolver::EmitSwap(int index) {
// Swap two stack slots or two double stack slots.
Operand src = cgen_->ToOperand(source);
Operand dst = cgen_->ToOperand(destination);
- __ Movsd(xmm0, src);
+ __ Movsd(kScratchDoubleReg, src);
__ movp(kScratchRegister, dst);
- __ Movsd(dst, xmm0);
+ __ Movsd(dst, kScratchDoubleReg);
__ movp(src, kScratchRegister);
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
// Swap two double registers.
XMMRegister source_reg = cgen_->ToDoubleRegister(source);
XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
- __ Movapd(xmm0, source_reg);
+ __ Movapd(kScratchDoubleReg, source_reg);
__ Movapd(source_reg, destination_reg);
- __ Movapd(destination_reg, xmm0);
+ __ Movapd(destination_reg, kScratchDoubleReg);
} else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
// Swap a double register and a double stack slot.
@@ -287,9 +287,9 @@ void LGapResolver::EmitSwap(int index) {
LOperand* other = source->IsDoubleRegister() ? destination : source;
DCHECK(other->IsDoubleStackSlot());
Operand other_operand = cgen_->ToOperand(other);
- __ Movapd(xmm0, reg);
+ __ Movapd(kScratchDoubleReg, reg);
__ Movsd(reg, other_operand);
- __ Movsd(other_operand, xmm0);
+ __ Movsd(other_operand, kScratchDoubleReg);
} else {
// No other combinations are possible.
diff --git a/deps/v8/src/crankshaft/x64/lithium-x64.cc b/deps/v8/src/crankshaft/x64/lithium-x64.cc
index e86b90c838..42451690af 100644
--- a/deps/v8/src/crankshaft/x64/lithium-x64.cc
+++ b/deps/v8/src/crankshaft/x64/lithium-x64.cc
@@ -714,10 +714,10 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
DCHECK(instr->left()->representation().IsDouble());
DCHECK(instr->right()->representation().IsDouble());
if (op == Token::MOD) {
- LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+ LOperand* left = UseFixedDouble(instr->BetterLeftOperand(), xmm0);
LOperand* right = UseFixedDouble(instr->BetterRightOperand(), xmm1);
LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return MarkAsCall(DefineSameAsFirst(result), instr);
+ return MarkAsCall(DefineFixedDouble(result, xmm0), instr);
} else {
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
@@ -907,7 +907,7 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
LInstruction* result = new (zone()) LPrologue();
- if (info_->num_heap_slots() > 0) {
+ if (info_->scope()->num_heap_slots() > 0) {
result = MarkAsCall(result, instr);
}
return result;
@@ -957,17 +957,6 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
}
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
- LOperand* left =
- UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
- LOperand* right =
- UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
- LOperand* context = UseFixed(instr->context(), rsi);
- LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
HHasInPrototypeChainAndBranch* instr) {
LOperand* object = UseRegister(instr->object());
@@ -1101,10 +1090,14 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
return DoMathFround(instr);
case kMathAbs:
return DoMathAbs(instr);
+ case kMathCos:
+ return DoMathCos(instr);
case kMathLog:
return DoMathLog(instr);
case kMathExp:
return DoMathExp(instr);
+ case kMathSin:
+ return DoMathSin(instr);
case kMathSqrt:
return DoMathSqrt(instr);
case kMathPowHalf:
@@ -1166,8 +1159,9 @@ LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
DCHECK(instr->representation().IsDouble());
DCHECK(instr->value()->representation().IsDouble());
- LOperand* input = UseRegisterAtStart(instr->value());
- return MarkAsCall(DefineSameAsFirst(new(zone()) LMathLog(input)), instr);
+ LOperand* input = UseFixedDouble(instr->value(), xmm0);
+ return MarkAsCall(DefineFixedDouble(new (zone()) LMathLog(input), xmm0),
+ instr);
}
@@ -1177,17 +1171,29 @@ LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
return DefineAsRegister(result);
}
+LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
+ LOperand* input = UseFixedDouble(instr->value(), xmm0);
+ return MarkAsCall(DefineFixedDouble(new (zone()) LMathCos(input), xmm0),
+ instr);
+}
LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
DCHECK(instr->representation().IsDouble());
DCHECK(instr->value()->representation().IsDouble());
- LOperand* value = UseTempRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LMathExp* result = new(zone()) LMathExp(value, temp1, temp2);
- return DefineAsRegister(result);
+ LOperand* input = UseFixedDouble(instr->value(), xmm0);
+ return MarkAsCall(DefineFixedDouble(new (zone()) LMathExp(input), xmm0),
+ instr);
}
+LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
+ LOperand* input = UseFixedDouble(instr->value(), xmm0);
+ return MarkAsCall(DefineFixedDouble(new (zone()) LMathSin(input), xmm0),
+ instr);
+}
LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
LOperand* input = UseAtStart(instr->value());
@@ -1961,20 +1967,6 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
}
-LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
- HValue* value = instr->value();
- DCHECK(value->representation().IsDouble());
- return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
-}
-
-
-LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
- LOperand* lo = UseRegister(instr->lo());
- LOperand* hi = UseRegister(instr->hi());
- return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
-}
-
-
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LOperand* context = info()->IsStub() ? UseFixed(instr->context(), rsi) : NULL;
LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
@@ -2004,15 +1996,9 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* global_object =
- UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- }
+ LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- LLoadGlobalGeneric* result =
- new(zone()) LLoadGlobalGeneric(context, global_object, vector);
+ LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -2069,10 +2055,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- }
+ LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
context, object, vector);
return MarkAsCall(DefineFixed(result, rax), instr);
@@ -2170,10 +2153,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- }
+ LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
LLoadKeyedGeneric* result =
new(zone()) LLoadKeyedGeneric(context, object, key, vector);
@@ -2254,12 +2234,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
DCHECK(instr->key()->representation().IsTagged());
DCHECK(instr->value()->representation().IsTagged());
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
- vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
- }
+ LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
+ LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
LStoreKeyedGeneric* result = new (zone())
LStoreKeyedGeneric(context, object, key, value, slot, vector);
@@ -2366,12 +2342,8 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* object =
UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
- vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
- }
+ LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
+ LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
LStoreNamedGeneric* result =
new (zone()) LStoreNamedGeneric(context, object, value, slot, vector);
@@ -2408,14 +2380,19 @@ LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseAny(instr->context());
- LOperand* size = instr->size()->IsConstant()
- ? UseConstant(instr->size())
- : UseTempRegister(instr->size());
- LOperand* temp = TempRegister();
- LAllocate* result = new(zone()) LAllocate(context, size, temp);
- return AssignPointerMap(DefineAsRegister(result));
+ LOperand* size = instr->size()->IsConstant() ? UseConstant(instr->size())
+ : UseRegister(instr->size());
+ if (instr->IsAllocationFolded()) {
+ LOperand* temp = TempRegister();
+ LFastAllocate* result = new (zone()) LFastAllocate(size, temp);
+ return DefineAsRegister(result);
+ } else {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
+ LOperand* temp = TempRegister();
+ LAllocate* result = new (zone()) LAllocate(context, size, temp);
+ return AssignPointerMap(DefineAsRegister(result));
+ }
}
diff --git a/deps/v8/src/crankshaft/x64/lithium-x64.h b/deps/v8/src/crankshaft/x64/lithium-x64.h
index 1feba4bf20..5c0ce04a8a 100644
--- a/deps/v8/src/crankshaft/x64/lithium-x64.h
+++ b/deps/v8/src/crankshaft/x64/lithium-x64.h
@@ -53,7 +53,6 @@ class LCodeGen;
V(ConstantI) \
V(ConstantS) \
V(ConstantT) \
- V(ConstructDouble) \
V(Context) \
V(DebugBreak) \
V(DeclareGlobals) \
@@ -61,12 +60,12 @@ class LCodeGen;
V(DivByConstI) \
V(DivByPowerOf2I) \
V(DivI) \
- V(DoubleBits) \
V(DoubleToI) \
V(DoubleToSmi) \
V(Drop) \
V(DummyUse) \
V(Dummy) \
+ V(FastAllocate) \
V(FlooringDivByConstI) \
V(FlooringDivByPowerOf2I) \
V(FlooringDivI) \
@@ -78,7 +77,6 @@ class LCodeGen;
V(HasInPrototypeChainAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
- V(InstanceOf) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
@@ -98,6 +96,7 @@ class LCodeGen;
V(LoadNamedGeneric) \
V(MathAbs) \
V(MathClz32) \
+ V(MathCos) \
V(MathExp) \
V(MathFloorD) \
V(MathFloorI) \
@@ -107,6 +106,7 @@ class LCodeGen;
V(MathPowHalf) \
V(MathRoundD) \
V(MathRoundI) \
+ V(MathSin) \
V(MathSqrt) \
V(MaybeGrowElements) \
V(ModByConstI) \
@@ -909,23 +909,32 @@ class LMathClz32 final : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
};
+class LMathCos final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathCos(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
-class LMathExp final : public LTemplateInstruction<1, 1, 2> {
+ DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
+};
+
+class LMathExp final : public LTemplateInstruction<1, 1, 0> {
public:
- LMathExp(LOperand* value, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- ExternalReference::InitializeMathExpData();
- }
+ explicit LMathExp(LOperand* value) { inputs_[0] = value; }
LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
};
+class LMathSin final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathSin(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
+};
class LMathSqrt final : public LTemplateInstruction<1, 1, 0> {
public:
@@ -1136,22 +1145,6 @@ class LCmpT final : public LTemplateInstruction<1, 3, 0> {
};
-class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
- public:
- LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-};
-
-
class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
public:
LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
@@ -1597,13 +1590,10 @@ class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
LOperand* temp_vector() { return temps_[0]; }
};
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
+class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
public:
- explicit LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
- LOperand* vector) {
+ explicit LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
inputs_[0] = context;
- inputs_[1] = global_object;
temps_[0] = vector;
}
@@ -1611,7 +1601,6 @@ class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
LOperand* context() { return inputs_[0]; }
- LOperand* global_object() { return inputs_[1]; }
LOperand* temp_vector() { return temps_[0]; }
Handle<Object> name() const { return hydrogen()->name(); }
@@ -2172,6 +2161,8 @@ class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
LOperand* key() { return inputs_[3]; }
LOperand* current_capacity() { return inputs_[4]; }
+ bool ClobbersDoubleRegisters(Isolate* isolate) const override { return true; }
+
DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
};
@@ -2342,33 +2333,6 @@ class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> {
};
-class LDoubleBits final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDoubleBits(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
- DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
-};
-
-
-class LConstructDouble final : public LTemplateInstruction<1, 2, 0> {
- public:
- LConstructDouble(LOperand* hi, LOperand* lo) {
- inputs_[0] = hi;
- inputs_[1] = lo;
- }
-
- LOperand* hi() { return inputs_[0]; }
- LOperand* lo() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
-};
-
-
class LAllocate final : public LTemplateInstruction<1, 2, 1> {
public:
LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
@@ -2385,6 +2349,19 @@ class LAllocate final : public LTemplateInstruction<1, 2, 1> {
DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
+class LFastAllocate final : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LFastAllocate(LOperand* size, LOperand* temp) {
+ inputs_[0] = size;
+ temps_[0] = temp;
+ }
+
+ LOperand* size() const { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
+ DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
class LTypeof final : public LTemplateInstruction<1, 2, 0> {
public:
@@ -2542,8 +2519,10 @@ class LChunkBuilder final : public LChunkBuilderBase {
LInstruction* DoMathRound(HUnaryMathOperation* instr);
LInstruction* DoMathFround(HUnaryMathOperation* instr);
LInstruction* DoMathAbs(HUnaryMathOperation* instr);
+ LInstruction* DoMathCos(HUnaryMathOperation* instr);
LInstruction* DoMathLog(HUnaryMathOperation* instr);
LInstruction* DoMathExp(HUnaryMathOperation* instr);
+ LInstruction* DoMathSin(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
LInstruction* DoMathClz32(HUnaryMathOperation* instr);
diff --git a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
index 1ca3a99271..f6aa9639b3 100644
--- a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
+++ b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
@@ -14,7 +14,6 @@
#include "src/deoptimizer.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
-#include "src/profiler/cpu-profiler.h"
#include "src/x87/frames-x87.h"
namespace v8 {
@@ -135,25 +134,24 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
Comment(";;; Prologue begin");
// Possibly allocate a local context.
- if (info_->num_heap_slots() > 0) {
+ if (info_->scope()->num_heap_slots() > 0) {
Comment(";;; Allocate local context");
bool need_write_barrier = true;
// Argument to NewContext is the function, which is still in edi.
- int slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int slots = info_->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
if (info()->scope()->is_script_scope()) {
__ push(edi);
__ Push(info()->scope()->GetScopeInfo(info()->isolate()));
__ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
- } else if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
+ } else {
+ FastNewFunctionContextStub stub(isolate());
+ __ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
+ Immediate(slots));
__ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
+ // Result of FastNewFunctionContextStub is always in new space.
need_write_barrier = false;
- } else {
- __ push(edi);
- __ CallRuntime(Runtime::kNewFunctionContext);
}
RecordSafepoint(deopt_mode);
@@ -163,10 +161,11 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
__ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax);
// Copy parameters into context if necessary.
- int num_parameters = scope()->num_parameters();
- int first_parameter = scope()->has_this_declaration() ? -1 : 0;
+ int num_parameters = info()->scope()->num_parameters();
+ int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+ Variable* var = (i == -1) ? info()->scope()->receiver()
+ : info()->scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -201,6 +200,13 @@ void LCodeGen::GenerateOsrPrologue() {
osr_pc_offset_ = masm()->pc_offset();
+ // Interpreter is the first tier compiler now. It will run the code generated
+ // by TurboFan compiler which will always put "1" on x87 FPU stack.
+ // This behavior will affect crankshaft's x87 FPU stack depth check under
+ // debug mode.
+ // Need to reset the FPU stack here for this scenario.
+ __ fninit();
+
// Adjust the frame size, subsuming the unoptimized frame into the
// optimized frame.
int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
@@ -268,8 +274,6 @@ bool LCodeGen::GenerateJumpTable() {
} else {
__ call(entry, RelocInfo::RUNTIME_ENTRY);
}
- info()->LogDeoptCallPosition(masm()->pc_offset(),
- table_entry->deopt_info.inlining_id);
}
if (needs_frame.is_linked()) {
__ bind(&needs_frame);
@@ -945,9 +949,8 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(
}
}
-
void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
+ DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type) {
LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
@@ -1009,19 +1012,18 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
__ bind(&done);
}
- Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+ Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
DCHECK(info()->IsStub() || frame_is_built_);
if (cc == no_condition && frame_is_built_) {
DeoptComment(deopt_info);
__ call(entry, RelocInfo::RUNTIME_ENTRY);
- info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
} else {
Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
!frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
- if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+ if (FLAG_trace_deopt || isolate()->is_profiling() ||
jump_table_.is_empty() ||
!table_entry.IsEquivalentTo(jump_table_.last())) {
jump_table_.Add(table_entry, zone());
@@ -1034,9 +1036,8 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
}
}
-
void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeReason deopt_reason) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
@@ -1095,13 +1096,6 @@ void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
}
-void LCodeGen::RecordAndWritePosition(int position) {
- if (position == RelocInfo::kNoPosition) return;
- masm()->positions_recorder()->RecordPosition(position);
- masm()->positions_recorder()->WriteRecordedPositions();
-}
-
-
static const char* LabelType(LLabel* label) {
if (label->is_loop_header()) return " (loop header)";
if (label->is_osr_entry()) return " (OSR entry)";
@@ -1184,7 +1178,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ and_(dividend, mask);
__ neg(dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
}
__ jmp(&done, Label::kNear);
}
@@ -1201,7 +1195,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(eax));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@@ -1216,7 +1210,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
Label remainder_not_zero;
__ j(not_zero, &remainder_not_zero, Label::kNear);
__ cmp(dividend, Immediate(0));
- DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero);
__ bind(&remainder_not_zero);
}
}
@@ -1238,7 +1232,7 @@ void LCodeGen::DoModI(LModI* instr) {
// deopt in this case because we can't return a NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(right_reg, Operand(right_reg));
- DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for kMinInt % -1, idiv would signal a divide error. We
@@ -1249,7 +1243,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ cmp(right_reg, -1);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kMinusZero);
} else {
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ Move(result_reg, Immediate(0));
@@ -1268,7 +1262,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ j(not_sign, &positive_left, Label::kNear);
__ idiv(right_reg);
__ test(result_reg, Operand(result_reg));
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
__ jmp(&done, Label::kNear);
__ bind(&positive_left);
}
@@ -1288,19 +1282,19 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ cmp(dividend, kMinInt);
- DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ test(dividend, Immediate(mask));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision);
}
__ Move(result, dividend);
int32_t shift = WhichPowerOf2Abs(divisor);
@@ -1321,7 +1315,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(edx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@@ -1329,7 +1323,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
}
__ TruncatingDiv(dividend, Abs(divisor));
@@ -1339,7 +1333,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
__ mov(eax, edx);
__ imul(eax, eax, divisor);
__ sub(eax, dividend);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision);
}
}
@@ -1359,7 +1353,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(divisor, divisor);
- DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@@ -1368,7 +1362,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ test(dividend, dividend);
__ j(not_zero, &dividend_not_zero, Label::kNear);
__ test(divisor, divisor);
- DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
__ bind(&dividend_not_zero);
}
@@ -1378,7 +1372,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ cmp(dividend, kMinInt);
__ j(not_zero, &dividend_not_min_int, Label::kNear);
__ cmp(divisor, -1);
- DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
__ bind(&dividend_not_min_int);
}
@@ -1389,7 +1383,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
__ test(remainder, remainder);
- DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision);
}
}
@@ -1411,13 +1405,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// If the divisor is negative, we have to negate and handle edge cases.
__ neg(dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
return;
}
@@ -1444,7 +1438,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(ToRegister(instr->result()).is(edx));
if (divisor == 0) {
- DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@@ -1452,7 +1446,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ test(dividend, dividend);
- DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
}
// Easy case: We need no dynamic check for the dividend and the flooring
@@ -1499,7 +1493,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(divisor, divisor);
- DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@@ -1508,7 +1502,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ test(dividend, dividend);
__ j(not_zero, &dividend_not_zero, Label::kNear);
__ test(divisor, divisor);
- DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
__ bind(&dividend_not_zero);
}
@@ -1518,7 +1512,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ cmp(dividend, kMinInt);
__ j(not_zero, &dividend_not_min_int, Label::kNear);
__ cmp(divisor, -1);
- DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
__ bind(&dividend_not_min_int);
}
@@ -1596,7 +1590,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -1606,15 +1600,15 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ j(not_zero, &done);
if (right->IsConstantOperand()) {
if (ToInteger32(LConstantOperand::cast(right)) < 0) {
- DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
} else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
__ cmp(ToRegister(instr->temp()), Immediate(0));
- DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero);
}
} else {
// Test the non-zero operand for negative sign.
__ or_(ToRegister(instr->temp()), ToOperand(right));
- DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
}
__ bind(&done);
}
@@ -1687,7 +1681,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shr_cl(ToRegister(left));
if (instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue);
}
break;
case Token::SHL:
@@ -1704,7 +1698,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
case Token::ROR:
if (shift_count == 0 && instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue);
} else {
__ ror(ToRegister(left), shift_count);
}
@@ -1719,7 +1713,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shr(ToRegister(left), shift_count);
} else if (instr->can_deopt()) {
__ test(ToRegister(left), ToRegister(left));
- DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue);
}
break;
case Token::SHL:
@@ -1730,7 +1724,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
__ shl(ToRegister(left), shift_count - 1);
}
__ SmiTag(ToRegister(left));
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
} else {
__ shl(ToRegister(left), shift_count);
}
@@ -1756,7 +1750,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
__ sub(ToRegister(left), ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
}
@@ -1904,7 +1898,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
__ add(ToRegister(left), ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
}
}
@@ -1972,8 +1966,8 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ pop(scratch_reg); // restore esp
} else {
// Since we operate on +0 and/or -0, addsd and andsd have the same effect.
- X87Fxch(left_reg);
- __ fadd(1);
+ // Should put the result in stX0
+ __ fadd_i(1);
}
__ jmp(&return_left, Label::kNear);
@@ -1984,7 +1978,6 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ j(parity_even, &return_left, Label::kNear); // left == NaN.
__ bind(&return_right);
- X87Fxch(left_reg);
X87Mov(left_reg, right_reg);
__ bind(&return_left);
@@ -2158,7 +2151,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ test(reg, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi);
}
Register map = no_reg; // Keep the compiler happy.
@@ -2221,7 +2214,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kUnexpectedObject);
}
}
}
@@ -2358,9 +2351,7 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
__ add(esp, Immediate(kDoubleSize));
int offset = sizeof(kHoleNanUpper32);
- // x87 converts sNaN(0xfff7fffffff7ffff) to QNaN(0xfffffffffff7ffff),
- // so we check the upper with 0xffffffff for hole as a temporary fix.
- __ cmp(MemOperand(esp, -offset), Immediate(0xffffffff));
+ __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
EmitBranch(instr, equal);
}
@@ -2572,16 +2563,6 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
}
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
- DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
- DCHECK(ToRegister(instr->result()).is(eax));
- InstanceOfStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoHasInPrototypeChainAndBranch(
LHasInPrototypeChainAndBranch* instr) {
Register const object = ToRegister(instr->object());
@@ -2605,16 +2586,16 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
// Deoptimize if the object needs to be access checked.
__ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kAccessCheck);
+ DeoptimizeIf(not_zero, instr, DeoptimizeReason::kAccessCheck);
// Deoptimize for proxies.
__ CmpInstanceType(object_map, JS_PROXY_TYPE);
- DeoptimizeIf(equal, instr, Deoptimizer::kProxy);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kProxy);
__ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
- __ cmp(object_prototype, prototype);
- EmitTrueBranch(instr, equal);
__ cmp(object_prototype, factory()->null_value());
EmitFalseBranch(instr, equal);
+ __ cmp(object_prototype, prototype);
+ EmitTrueBranch(instr, equal);
__ mov(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
__ jmp(&loop);
}
@@ -2711,15 +2692,12 @@ void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->global_object())
- .is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).is(eax));
- __ mov(LoadDescriptor::NameRegister(), instr->name());
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
- isolate(), instr->typeof_mode(), PREMONOMORPHIC)
- .code();
+ Handle<Code> ic =
+ CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2732,7 +2710,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
} else {
Label is_not_hole;
__ j(not_equal, &is_not_hole, Label::kNear);
@@ -2753,7 +2731,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(target, factory()->the_hole_value());
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
} else {
__ j(not_equal, &skip_assignment, Label::kNear);
}
@@ -2828,10 +2806,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
__ mov(LoadDescriptor::NameRegister(), instr->name());
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_INSIDE_TYPEOF,
- instr->hydrogen()->initialization_state())
- .code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2847,7 +2822,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ cmp(Operand(result), Immediate(factory()->the_hole_value()));
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
// If the function does not have an initial map, we're done.
Label done;
@@ -2929,7 +2904,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ mov(result, operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ test(result, Operand(result));
- DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(negative, instr, DeoptimizeReason::kNegativeValue);
}
break;
case FLOAT32_ELEMENTS:
@@ -2961,7 +2936,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
FAST_DOUBLE_ELEMENTS,
instr->base_offset() + sizeof(kHoleNanLower32));
__ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
}
Operand double_load_operand = BuildFastArrayOperand(
@@ -2987,10 +2962,10 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotASmi);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotASmi);
} else {
__ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
}
} else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
@@ -2999,12 +2974,12 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
__ j(not_equal, &done);
if (info()->IsStub()) {
// A stub can safely convert the hole to undefined only if the array
- // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
- // it needs to bail out.
- __ mov(result, isolate()->factory()->array_protector());
+ // protector cell contains (Smi) Isolate::kArrayProtectorValid.
+ // Otherwise it needs to bail out.
+ __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
__ cmp(FieldOperand(result, PropertyCell::kValueOffset),
Immediate(Smi::FromInt(Isolate::kArrayProtectorValid)));
- DeoptimizeIf(not_equal, instr, Deoptimizer::kHole);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kHole);
}
__ mov(result, isolate()->factory()->undefined_value());
__ bind(&done);
@@ -3059,13 +3034,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
- if (instr->hydrogen()->HasVectorAndSlot()) {
- EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
- }
+ EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
- Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
- isolate(), instr->hydrogen()->initialization_state())
- .code();
+ Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3158,9 +3129,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// The receiver should be a JS object.
__ test(receiver, Immediate(kSmiTagMask));
- DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kSmi);
__ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, scratch);
- DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
+ DeoptimizeIf(below, instr, DeoptimizeReason::kNotAJavaScriptObject);
__ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
@@ -3184,7 +3155,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmp(length, kArgumentsLimit);
- DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments);
+ DeoptimizeIf(above, instr, DeoptimizeReason::kTooManyArguments);
__ push(receiver);
__ mov(receiver, length);
@@ -3261,6 +3232,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
__ push(Immediate(instr->hydrogen()->pairs()));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
+ __ push(Immediate(instr->hydrogen()->feedback_vector()));
CallRuntime(Runtime::kDeclareGlobals, instr);
}
@@ -3359,7 +3331,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
Label slow, allocated, done;
uint32_t available_regs = eax.bit() | ecx.bit() | edx.bit() | ebx.bit();
@@ -3417,7 +3389,7 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ neg(input_reg); // Sets flags.
- DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(negative, instr, DeoptimizeReason::kOverflow);
__ bind(&is_positive);
}
@@ -3470,7 +3442,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ fldz();
__ fld(1);
__ FCmp();
- DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
+ DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN);
__ j(below, &not_minus_zero, Label::kNear);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -3479,7 +3451,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
// +- 0.0.
__ fld(0);
__ FXamSign();
- DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
__ Move(output_reg, Immediate(0));
__ jmp(&done, Label::kFar);
}
@@ -3494,7 +3466,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ pop(output_reg);
__ X87SetRC(0x0000);
__ X87CheckIA();
- DeoptimizeIf(equal, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kOverflow);
__ fnclex();
__ X87SetRC(0x0000);
__ bind(&done);
@@ -3530,7 +3502,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// Check overflow.
__ X87CheckIA();
__ pop(result);
- DeoptimizeIf(equal, instr, Deoptimizer::kConversionOverflow);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kConversionOverflow);
__ fnclex();
// Restore round mode.
__ X87SetRC(0x0000);
@@ -3547,7 +3519,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// If the sign is positive, we return +0.
__ fld(0);
__ FXamSign();
- DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
}
__ Move(result, Immediate(0));
__ jmp(&done);
@@ -3566,7 +3538,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// Check overflow.
__ X87CheckIA();
__ pop(result);
- DeoptimizeIf(equal, instr, Deoptimizer::kConversionOverflow);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kConversionOverflow);
__ fnclex();
// Restore round mode.
__ X87SetRC(0x0000);
@@ -3644,7 +3616,7 @@ void LCodeGen::DoPower(LPower* instr) {
X87LoadForUsage(base);
__ JumpIfSmi(exponent, &no_deopt);
__ CmpObjectType(exponent, HEAP_NUMBER_TYPE, temp);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
// Heap number(double)
__ fld_d(FieldOperand(exponent, HeapNumber::kValueOffset));
__ jmp(&done);
@@ -3696,40 +3668,17 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoMathLog(LMathLog* instr) {
DCHECK(instr->value()->Equals(instr->result()));
+ X87Register result = ToX87Register(instr->result());
X87Register input_reg = ToX87Register(instr->value());
X87Fxch(input_reg);
- Label positive, done, zero, nan_result;
- __ fldz();
- __ fld(1);
- __ FCmp();
- __ j(below, &nan_result, Label::kNear);
- __ j(equal, &zero, Label::kNear);
- // Positive input.
- // {input, ln2}.
- __ fldln2();
- // {ln2, input}.
- __ fxch();
- // {result}.
- __ fyl2x();
- __ jmp(&done, Label::kNear);
-
- __ bind(&nan_result);
- X87PrepareToWrite(input_reg);
- __ push(Immediate(0xffffffff));
- __ push(Immediate(0x7fffffff));
- __ fld_d(MemOperand(esp, 0));
- __ lea(esp, Operand(esp, kDoubleSize));
- X87CommitWrite(input_reg);
- __ jmp(&done, Label::kNear);
-
- __ bind(&zero);
- ExternalReference ninf = ExternalReference::address_of_negative_infinity();
- X87PrepareToWrite(input_reg);
- __ fld_d(Operand::StaticVariable(ninf));
- X87CommitWrite(input_reg);
-
- __ bind(&done);
+ // Pass one double as argument on the stack.
+ __ PrepareCallCFunction(2, eax);
+ __ fstp_d(MemOperand(esp, 0));
+ X87PrepareToWrite(result);
+ __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 2);
+ // Return value is in st(0) on ia32.
+ X87CommitWrite(result);
}
@@ -3740,67 +3689,46 @@ void LCodeGen::DoMathClz32(LMathClz32* instr) {
__ Lzcnt(result, input);
}
+void LCodeGen::DoMathCos(LMathCos* instr) {
+ X87Register result = ToX87Register(instr->result());
+ X87Register input_reg = ToX87Register(instr->value());
+ __ fld(x87_stack_.st(input_reg));
-void LCodeGen::DoMathExp(LMathExp* instr) {
- X87Register input = ToX87Register(instr->value());
- X87Register result_reg = ToX87Register(instr->result());
- Register temp_result = ToRegister(instr->temp1());
- Register temp = ToRegister(instr->temp2());
- Label slow, done, smi, finish;
- DCHECK(result_reg.is(input));
-
- // Store input into Heap number and call runtime function kMathExpRT.
- if (FLAG_inline_new) {
- __ AllocateHeapNumber(temp_result, temp, no_reg, &slow);
- __ jmp(&done, Label::kNear);
- }
-
- // Slow case: Call the runtime system to do the number allocation.
- __ bind(&slow);
- {
- // TODO(3095996): Put a valid pointer value in the stack slot where the
- // result register is stored, as this register is in the pointer map, but
- // contains an integer value.
- __ Move(temp_result, Immediate(0));
+ // Pass one double as argument on the stack.
+ __ PrepareCallCFunction(2, eax);
+ __ fstp_d(MemOperand(esp, 0));
+ X87PrepareToWrite(result);
+ __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 2);
+ // Return value is in st(0) on ia32.
+ X87CommitWrite(result);
+}
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
+void LCodeGen::DoMathSin(LMathSin* instr) {
+ X87Register result = ToX87Register(instr->result());
+ X87Register input_reg = ToX87Register(instr->value());
+ __ fld(x87_stack_.st(input_reg));
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(instr->pointer_map(), 0,
- Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(temp_result, eax);
- }
- __ bind(&done);
- X87LoadForUsage(input);
- __ fstp_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
+ // Pass one double as argument on the stack.
+ __ PrepareCallCFunction(2, eax);
+ __ fstp_d(MemOperand(esp, 0));
+ X87PrepareToWrite(result);
+ __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 2);
+ // Return value is in st(0) on ia32.
+ X87CommitWrite(result);
+}
- {
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
+void LCodeGen::DoMathExp(LMathExp* instr) {
+ X87Register result = ToX87Register(instr->result());
+ X87Register input_reg = ToX87Register(instr->value());
+ __ fld(x87_stack_.st(input_reg));
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ push(temp_result);
- __ CallRuntimeSaveDoubles(Runtime::kMathExpRT);
- RecordSafepointWithRegisters(instr->pointer_map(), 1,
- Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(temp_result, eax);
- }
- X87PrepareToWrite(result_reg);
- // return value of MathExpRT is Smi or Heap Number.
- __ JumpIfSmi(temp_result, &smi);
- // Heap number(double)
- __ fld_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
- __ jmp(&finish);
- // SMI
- __ bind(&smi);
- __ SmiUntag(temp_result);
- __ push(temp_result);
- __ fild_s(MemOperand(esp, 0));
- __ pop(temp_result);
- __ bind(&finish);
- X87CommitWrite(result_reg);
+ // Pass one double as argument on the stack.
+ __ PrepareCallCFunction(2, eax);
+ __ fstp_d(MemOperand(esp, 0));
+ X87PrepareToWrite(result);
+ __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 2);
+ // Return value is in st(0) on ia32.
+ X87CommitWrite(result);
}
void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
@@ -3815,7 +3743,9 @@ void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
#endif
if (FLAG_code_comments) {
if (actual.is_reg()) {
- Comment(";;; PrepareForTailCall, actual: %s {", actual.reg().ToString());
+ Comment(";;; PrepareForTailCall, actual: %s {",
+ RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
+ actual.reg().code()));
} else {
Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
}
@@ -3885,14 +3815,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->result()).is(eax));
__ Move(eax, Immediate(instr->arity()));
- if (instr->arity() == 1) {
- // We only need the allocation site for the case we have a length argument.
- // The case may bail out to the runtime, which will determine the correct
- // elements kind with the site.
- __ mov(ebx, instr->hydrogen()->site());
- } else {
- __ mov(ebx, isolate()->factory()->undefined_value());
- }
+ __ mov(ebx, instr->hydrogen()->site());
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
@@ -3926,7 +3849,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ bind(&done);
} else {
- ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+ ArrayNArgumentsConstructorStub stub(isolate());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4053,14 +3976,12 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- if (instr->hydrogen()->HasVectorAndSlot()) {
- EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
- }
+ EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
__ mov(StoreDescriptor::NameRegister(), instr->name());
- Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
- isolate(), instr->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ Handle<Code> ic =
+ CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4085,7 +4006,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ int3();
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
+ DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds);
}
}
@@ -4122,9 +4043,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
__ fst_d(MemOperand(esp, 0));
__ lea(esp, Operand(esp, kDoubleSize));
int offset = sizeof(kHoleNanUpper32);
- // x87 converts sNaN(0xfff7fffffff7ffff) to QNaN(0xfffffffffff7ffff),
- // so we check the upper with 0xffffffff for hole as a temporary fix.
- __ cmp(MemOperand(esp, -offset), Immediate(0xffffffff));
+ __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
__ j(not_equal, &no_special_nan_handling, Label::kNear);
__ mov(operand, Immediate(lower));
__ mov(operand2, Immediate(upper));
@@ -4210,9 +4129,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
__ fst_d(MemOperand(esp, 0));
__ lea(esp, Operand(esp, kDoubleSize));
int offset = sizeof(kHoleNanUpper32);
- // x87 converts sNaN(0xfff7fffffff7ffff) to QNaN(0xfffffffffff7ffff),
- // so we check the upper with 0xffffffff for hole as a temporary fix.
- __ cmp(MemOperand(esp, -offset), Immediate(0xffffffff));
+ __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
__ j(not_equal, &no_special_nan_handling, Label::kNear);
__ mov(double_store_operand, Immediate(lower));
__ mov(double_store_operand2, Immediate(upper));
@@ -4283,13 +4200,11 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- if (instr->hydrogen()->HasVectorAndSlot()) {
- EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
- }
+ EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
- isolate(), instr->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ isolate(), instr->language_mode())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4299,7 +4214,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kMementoFound);
__ bind(&no_memento_found);
}
@@ -4375,14 +4290,21 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
LOperand* key = instr->key();
if (key->IsConstantOperand()) {
- __ mov(ebx, ToImmediate(key, Representation::Smi()));
+ LConstantOperand* constant_key = LConstantOperand::cast(key);
+ int32_t int_key = ToInteger32(constant_key);
+ if (Smi::IsValid(int_key)) {
+ __ mov(ebx, Immediate(Smi::FromInt(int_key)));
+ } else {
+ // We should never get here at runtime because there is a smi check on
+ // the key before this point.
+ __ int3();
+ }
} else {
__ Move(ebx, ToRegister(key));
__ SmiTag(ebx);
}
- GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
- instr->hydrogen()->kind());
+ GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
__ CallStub(&stub);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
@@ -4391,7 +4313,7 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
// Deopt on smi, which means the elements array changed to dictionary mode.
__ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kSmi);
}
@@ -4423,8 +4345,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
DCHECK(object_reg.is(eax));
PushSafepointRegistersScope scope(this);
__ mov(ebx, to_map);
- bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
- TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+ TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
__ CallStub(&stub);
RecordSafepointWithLazyDeopt(instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
@@ -4683,13 +4604,10 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
-
- // NumberTagI and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ // Reset the context register.
+ if (!reg.is(esi)) {
+ __ Move(esi, Immediate(0));
+ }
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
@@ -4744,12 +4662,10 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
__ Move(reg, Immediate(0));
PushSafepointRegistersScope scope(this);
- // NumberTagI and NumberTagD use the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ // Reset the context register.
+ if (!reg.is(esi)) {
+ __ Move(esi, Immediate(0));
+ }
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
@@ -4763,12 +4679,12 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ test(input, Immediate(0xc0000000));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(not_zero, instr, DeoptimizeReason::kOverflow);
}
__ SmiTag(input);
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
}
@@ -4779,7 +4695,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
DCHECK(input->IsRegister() && input->Equals(instr->result()));
if (instr->needs_check()) {
__ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi);
+ DeoptimizeIf(not_zero, instr, DeoptimizeReason::kNotASmi);
} else {
__ AssertSmi(result);
}
@@ -4805,18 +4721,19 @@ void LCodeGen::EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input_reg,
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
if (!can_convert_undefined_to_nan) {
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
} else {
Label heap_number, convert;
__ j(equal, &heap_number);
// Convert undefined (or hole) to NaN.
__ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
+ DeoptimizeIf(not_equal, instr,
+ DeoptimizeReason::kNotAHeapNumberUndefined);
__ bind(&convert);
- __ push(Immediate(0xffffffff));
- __ push(Immediate(0x7fffffff));
+ __ push(Immediate(0xfff80000));
+ __ push(Immediate(0x00000000));
__ fld_d(MemOperand(esp, 0));
__ lea(esp, Operand(esp, kDoubleSize));
__ jmp(&done, Label::kNear);
@@ -4838,7 +4755,7 @@ void LCodeGen::EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input_reg,
// Pop FPU stack before deoptimizing.
__ fstp(0);
- DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
}
__ jmp(&done, Label::kNear);
} else {
@@ -4892,14 +4809,14 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
__ bind(&check_false);
__ cmp(input_reg, factory()->false_value());
DeoptimizeIf(not_equal, instr,
- Deoptimizer::kNotAHeapNumberUndefinedBoolean);
+ DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
__ Move(input_reg, Immediate(0));
} else {
// TODO(olivf) Converting a number on the fpu is actually quite slow. We
// should first try a fast conversion and then bailout to this slow case.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
__ sub(esp, Immediate(kPointerSize));
__ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
@@ -4915,12 +4832,12 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
__ j(equal, &no_precision_lost, Label::kNear);
__ fstp(0);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision);
__ bind(&no_precision_lost);
__ j(parity_odd, &not_nan);
__ fstp(0);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN);
__ bind(&not_nan);
__ test(input_reg, Operand(input_reg));
@@ -4935,14 +4852,14 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
__ fstp_s(Operand(esp, 0));
__ pop(input_reg);
__ test(input_reg, Operand(input_reg));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
} else {
__ fist_s(MemOperand(esp, 0));
__ fild_s(MemOperand(esp, 0));
__ FCmp();
__ pop(input_reg);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
- DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision);
+ DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN);
}
}
}
@@ -5023,11 +4940,11 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
&lost_precision, &is_nan, &minus_zero);
__ jmp(&done);
__ bind(&lost_precision);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision);
__ bind(&is_nan);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN);
__ bind(&minus_zero);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
}
}
@@ -5047,21 +4964,21 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
&lost_precision, &is_nan, &minus_zero);
__ jmp(&done);
__ bind(&lost_precision);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision);
__ bind(&is_nan);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN);
__ bind(&minus_zero);
- DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
__ SmiTag(result_reg);
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi);
+ DeoptimizeIf(not_zero, instr, DeoptimizeReason::kNotASmi);
}
@@ -5069,7 +4986,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ test(ToOperand(input), Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi);
}
}
@@ -5082,7 +4999,7 @@ void LCodeGen::DoCheckArrayBufferNotNeutered(
__ mov(scratch, FieldOperand(view, JSArrayBufferView::kBufferOffset));
__ test_b(FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset),
Immediate(1 << JSArrayBuffer::WasNeutered::kShift));
- DeoptimizeIf(not_zero, instr, Deoptimizer::kOutOfBounds);
+ DeoptimizeIf(not_zero, instr, DeoptimizeReason::kOutOfBounds);
}
@@ -5101,13 +5018,13 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType);
} else {
- DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(below, instr, DeoptimizeReason::kWrongInstanceType);
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(last));
- DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(above, instr, DeoptimizeReason::kWrongInstanceType);
}
}
} else {
@@ -5119,12 +5036,12 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(mask));
DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
- Deoptimizer::kWrongInstanceType);
+ DeoptimizeReason::kWrongInstanceType);
} else {
__ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
__ and_(temp, mask);
__ cmp(temp, tag);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType);
}
}
}
@@ -5140,7 +5057,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
Operand operand = ToOperand(instr->value());
__ cmp(operand, object);
}
- DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kValueMismatch);
}
@@ -5155,7 +5072,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ test(eax, Immediate(kSmiTagMask));
}
- DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed);
+ DeoptimizeIf(zero, instr, DeoptimizeReason::kInstanceMigrationFailed);
}
@@ -5212,7 +5129,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ j(not_equal, deferred->entry());
} else {
- DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap);
}
__ bind(&success);
@@ -5253,7 +5170,7 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
__ jmp(&zero_result, Label::kNear);
// Heap number
@@ -5356,36 +5273,6 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
}
-void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
- X87Register value_reg = ToX87Register(instr->value());
- Register result_reg = ToRegister(instr->result());
- X87Fxch(value_reg);
- __ sub(esp, Immediate(kDoubleSize));
- __ fst_d(Operand(esp, 0));
- if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
- __ mov(result_reg, Operand(esp, kPointerSize));
- } else {
- __ mov(result_reg, Operand(esp, 0));
- }
- __ add(esp, Immediate(kDoubleSize));
-}
-
-
-void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
- Register hi_reg = ToRegister(instr->hi());
- Register lo_reg = ToRegister(instr->lo());
- X87Register result_reg = ToX87Register(instr->result());
- // Follow below pattern to write a x87 fp register.
- X87PrepareToWrite(result_reg);
- __ sub(esp, Immediate(kDoubleSize));
- __ mov(Operand(esp, 0), lo_reg);
- __ mov(Operand(esp, kPointerSize), hi_reg);
- __ fld_d(Operand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- X87CommitWrite(result_reg);
-}
-
-
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate final : public LDeferredCode {
public:
@@ -5407,7 +5294,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
Register temp = ToRegister(instr->temp());
// Allocate memory for the object.
- AllocationFlags flags = TAG_OBJECT;
+ AllocationFlags flags = NO_ALLOCATION_FLAGS;
if (instr->hydrogen()->MustAllocateDoubleAligned()) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
@@ -5416,6 +5303,11 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
+ if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+ flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
+ }
+ DCHECK(!instr->hydrogen()->IsAllocationFolded());
+
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
CHECK(size <= Page::kMaxRegularHeapObjectSize);
@@ -5445,6 +5337,29 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
}
}
+void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
+ DCHECK(instr->hydrogen()->IsAllocationFolded());
+ DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
+ Register result = ToRegister(instr->result());
+ Register temp = ToRegister(instr->temp());
+
+ AllocationFlags flags = ALLOCATION_FOLDED;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+ if (instr->hydrogen()->IsOldSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ flags = static_cast<AllocationFlags>(flags | PRETENURE);
+ }
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ CHECK(size <= Page::kMaxRegularHeapObjectSize);
+ __ FastAllocate(size, result, temp, flags);
+ } else {
+ Register size = ToRegister(instr->size());
+ __ FastAllocate(size, result, temp, flags);
+ }
+}
void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
Register result = ToRegister(instr->result());
@@ -5484,6 +5399,22 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
CallRuntimeFromDeferred(
Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, eax);
+
+ if (instr->hydrogen()->IsAllocationFoldingDominator()) {
+ AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
+ if (instr->hydrogen()->IsOldSpaceAllocation()) {
+ DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+ allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
+ }
+ // If the allocation folding dominator allocate triggered a GC, allocation
+ // happend in the runtime. We have to reset the top pointer to virtually
+ // undo the allocation.
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
+ __ sub(eax, Immediate(kHeapObjectTag));
+ __ mov(Operand::StaticVariable(allocation_top), eax);
+ __ add(eax, Immediate(kHeapObjectTag));
+ }
}
@@ -5496,8 +5427,8 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
__ mov(eax, Immediate(isolate()->factory()->number_string()));
__ jmp(&end);
__ bind(&do_call);
- TypeofStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ Callable callable = CodeFactory::Typeof(isolate());
+ CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
__ bind(&end);
}
@@ -5755,7 +5686,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ bind(&done);
__ test(result, result);
- DeoptimizeIf(equal, instr, Deoptimizer::kNoCache);
+ DeoptimizeIf(equal, instr, DeoptimizeReason::kNoCache);
}
@@ -5763,7 +5694,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
__ cmp(ToRegister(instr->map()),
FieldOperand(object, HeapObject::kMapOffset));
- DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap);
}
diff --git a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.h b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.h
index 3719236a40..cdf02f3f8c 100644
--- a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.h
+++ b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.h
@@ -150,8 +150,6 @@ class LCodeGen: public LCodeGenBase {
#undef DECLARE_DO
private:
- LanguageMode language_mode() const { return info()->language_mode(); }
-
Scope* scope() const { return scope_; }
void EmitClassOfTest(Label* if_true,
@@ -234,10 +232,10 @@ class LCodeGen: public LCodeGenBase {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition cc, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
+ DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition cc, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason);
+ DeoptimizeReason deopt_reason);
bool DeoptEveryNTimes() {
return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
@@ -269,7 +267,7 @@ class LCodeGen: public LCodeGenBase {
void EmitIntegerMathAbs(LMathAbs* instr);
- // Support for recording safepoint and position information.
+ // Support for recording safepoint information.
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
@@ -280,8 +278,6 @@ class LCodeGen: public LCodeGenBase {
int arguments,
Safepoint::DeoptMode mode);
- void RecordAndWritePosition(int position) override;
-
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
diff --git a/deps/v8/src/crankshaft/x87/lithium-gap-resolver-x87.cc b/deps/v8/src/crankshaft/x87/lithium-gap-resolver-x87.cc
index aa9183541f..6bfc2e2a07 100644
--- a/deps/v8/src/crankshaft/x87/lithium-gap-resolver-x87.cc
+++ b/deps/v8/src/crankshaft/x87/lithium-gap-resolver-x87.cc
@@ -168,8 +168,7 @@ int LGapResolver::CountSourceUses(LOperand* operand) {
Register LGapResolver::GetFreeRegisterNot(Register reg) {
int skip_index = reg.is(no_reg) ? -1 : reg.code();
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
int code = config->GetAllocatableGeneralCode(i);
if (source_uses_[code] == 0 && destination_uses_[code] > 0 &&
@@ -184,8 +183,7 @@ Register LGapResolver::GetFreeRegisterNot(Register reg) {
bool LGapResolver::HasBeenReset() {
if (!moves_.is_empty()) return false;
if (spilled_register_ >= 0) return false;
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
int code = config->GetAllocatableGeneralCode(i);
if (source_uses_[code] != 0) return false;
@@ -239,8 +237,7 @@ Register LGapResolver::EnsureTempRegister() {
// 3. Prefer to spill a register that is not used in any remaining move
// because it will not need to be restored until the end.
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
int code = config->GetAllocatableGeneralCode(i);
if (source_uses_[code] == 0 && destination_uses_[code] == 0) {
diff --git a/deps/v8/src/crankshaft/x87/lithium-x87.cc b/deps/v8/src/crankshaft/x87/lithium-x87.cc
index 163d2c9cfb..f614b93c9c 100644
--- a/deps/v8/src/crankshaft/x87/lithium-x87.cc
+++ b/deps/v8/src/crankshaft/x87/lithium-x87.cc
@@ -925,7 +925,7 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
LInstruction* result = new (zone()) LPrologue();
- if (info_->num_heap_slots() > 0) {
+ if (info_->scope()->num_heap_slots() > 0) {
result = MarkAsCall(result, instr);
}
return result;
@@ -983,17 +983,6 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
}
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
- LOperand* left =
- UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
- LOperand* right =
- UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
- LOperand* context = UseFixed(instr->context(), esi);
- LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
HHasInPrototypeChainAndBranch* instr) {
LOperand* object = UseRegister(instr->object());
@@ -1121,15 +1110,28 @@ LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
switch (instr->op()) {
- case kMathFloor: return DoMathFloor(instr);
- case kMathRound: return DoMathRound(instr);
- case kMathFround: return DoMathFround(instr);
- case kMathAbs: return DoMathAbs(instr);
- case kMathLog: return DoMathLog(instr);
- case kMathExp: return DoMathExp(instr);
- case kMathSqrt: return DoMathSqrt(instr);
- case kMathPowHalf: return DoMathPowHalf(instr);
- case kMathClz32: return DoMathClz32(instr);
+ case kMathCos:
+ return DoMathCos(instr);
+ case kMathFloor:
+ return DoMathFloor(instr);
+ case kMathRound:
+ return DoMathRound(instr);
+ case kMathFround:
+ return DoMathFround(instr);
+ case kMathAbs:
+ return DoMathAbs(instr);
+ case kMathLog:
+ return DoMathLog(instr);
+ case kMathExp:
+ return DoMathExp(instr);
+ case kMathSqrt:
+ return DoMathSqrt(instr);
+ case kMathPowHalf:
+ return DoMathPowHalf(instr);
+ case kMathClz32:
+ return DoMathClz32(instr);
+ case kMathSin:
+ return DoMathSin(instr);
default:
UNREACHABLE();
return NULL;
@@ -1184,15 +1186,25 @@ LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
return DefineAsRegister(result);
}
+LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegisterAtStart(instr->value());
+ return MarkAsCall(DefineSameAsFirst(new (zone()) LMathCos(input)), instr);
+}
+
+LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
+ DCHECK(instr->representation().IsDouble());
+ DCHECK(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegisterAtStart(instr->value());
+ return MarkAsCall(DefineSameAsFirst(new (zone()) LMathSin(input)), instr);
+}
LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
DCHECK(instr->representation().IsDouble());
DCHECK(instr->value()->representation().IsDouble());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp1 = FixedTemp(ecx);
- LOperand* temp2 = FixedTemp(edx);
- LMathExp* result = new(zone()) LMathExp(value, temp1, temp2);
- return MarkAsCall(DefineSameAsFirst(result), instr);
+ LOperand* input = UseRegisterAtStart(instr->value());
+ return MarkAsCall(DefineSameAsFirst(new (zone()) LMathExp(input)), instr);
}
@@ -1973,20 +1985,6 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
}
-LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
- HValue* value = instr->value();
- DCHECK(value->representation().IsDouble());
- return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
-}
-
-
-LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
- LOperand* lo = UseRegister(instr->lo());
- LOperand* hi = UseRegister(instr->hi());
- return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
-}
-
-
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LOperand* context = info()->IsStub() ? UseFixed(instr->context(), esi) : NULL;
LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
@@ -2016,15 +2014,9 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
- LOperand* global_object =
- UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- }
+ LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- LLoadGlobalGeneric* result =
- new(zone()) LLoadGlobalGeneric(context, global_object, vector);
+ LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@@ -2072,10 +2064,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- }
+ LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
context, object, vector);
return MarkAsCall(DefineFixed(result, eax), instr);
@@ -2145,10 +2134,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- }
+ LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
LLoadKeyedGeneric* result =
new(zone()) LLoadKeyedGeneric(context, object, key, vector);
return MarkAsCall(DefineFixed(result, eax), instr);
@@ -2238,12 +2224,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
DCHECK(instr->key()->representation().IsTagged());
DCHECK(instr->value()->representation().IsTagged());
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
- vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
- }
+ LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
+ LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
LStoreKeyedGeneric* result = new (zone())
LStoreKeyedGeneric(context, object, key, value, slot, vector);
@@ -2357,12 +2339,8 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* object =
UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
- LOperand* slot = NULL;
- LOperand* vector = NULL;
- if (instr->HasVectorAndSlot()) {
- slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
- vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
- }
+ LOperand* slot = FixedTemp(StoreWithVectorDescriptor::SlotRegister());
+ LOperand* vector = FixedTemp(StoreWithVectorDescriptor::VectorRegister());
LStoreNamedGeneric* result =
new (zone()) LStoreNamedGeneric(context, object, value, slot, vector);
@@ -2399,14 +2377,19 @@ LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
- info()->MarkAsDeferredCalling();
- LOperand* context = UseAny(instr->context());
- LOperand* size = instr->size()->IsConstant()
- ? UseConstant(instr->size())
- : UseTempRegister(instr->size());
- LOperand* temp = TempRegister();
- LAllocate* result = new(zone()) LAllocate(context, size, temp);
- return AssignPointerMap(DefineAsRegister(result));
+ LOperand* size = instr->size()->IsConstant() ? UseConstant(instr->size())
+ : UseRegister(instr->size());
+ if (instr->IsAllocationFolded()) {
+ LOperand* temp = TempRegister();
+ LFastAllocate* result = new (zone()) LFastAllocate(size, temp);
+ return DefineAsRegister(result);
+ } else {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
+ LOperand* temp = TempRegister();
+ LAllocate* result = new (zone()) LAllocate(context, size, temp);
+ return AssignPointerMap(DefineAsRegister(result));
+ }
}
diff --git a/deps/v8/src/crankshaft/x87/lithium-x87.h b/deps/v8/src/crankshaft/x87/lithium-x87.h
index d83322acd3..3ef8f75523 100644
--- a/deps/v8/src/crankshaft/x87/lithium-x87.h
+++ b/deps/v8/src/crankshaft/x87/lithium-x87.h
@@ -58,7 +58,6 @@ class LCodeGen;
V(ConstantI) \
V(ConstantS) \
V(ConstantT) \
- V(ConstructDouble) \
V(Context) \
V(DebugBreak) \
V(DeclareGlobals) \
@@ -66,12 +65,12 @@ class LCodeGen;
V(DivByConstI) \
V(DivByPowerOf2I) \
V(DivI) \
- V(DoubleBits) \
V(DoubleToI) \
V(DoubleToSmi) \
V(Drop) \
V(Dummy) \
V(DummyUse) \
+ V(FastAllocate) \
V(FlooringDivByConstI) \
V(FlooringDivByPowerOf2I) \
V(FlooringDivI) \
@@ -83,7 +82,6 @@ class LCodeGen;
V(HasInPrototypeChainAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
- V(InstanceOf) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
@@ -103,6 +101,7 @@ class LCodeGen;
V(LoadRoot) \
V(MathAbs) \
V(MathClz32) \
+ V(MathCos) \
V(MathExp) \
V(MathFloor) \
V(MathFround) \
@@ -112,6 +111,7 @@ class LCodeGen;
V(MathRound) \
V(MathSqrt) \
V(MaybeGrowElements) \
+ V(MathSin) \
V(ModByConstI) \
V(ModByPowerOf2I) \
V(ModI) \
@@ -153,7 +153,6 @@ class LCodeGen;
V(UnknownOSRValue) \
V(WrapReceiver)
-
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
Opcode opcode() const final { return LInstruction::k##type; } \
void CompileToNative(LCodeGen* generator) final; \
@@ -905,21 +904,29 @@ class LMathClz32 final : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
};
+class LMathCos final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathCos(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
-class LMathExp final : public LTemplateInstruction<1, 1, 2> {
+ DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
+};
+
+class LMathSin final : public LTemplateInstruction<1, 1, 0> {
public:
- LMathExp(LOperand* value,
- LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- ExternalReference::InitializeMathExpData();
- }
+ explicit LMathSin(LOperand* value) { inputs_[0] = value; }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
+};
+
+class LMathExp final : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathExp(LOperand* value) { inputs_[0] = value; }
LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
};
@@ -1134,22 +1141,6 @@ class LCmpT final : public LTemplateInstruction<1, 3, 0> {
};
-class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
- public:
- LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
- inputs_[0] = context;
- inputs_[1] = left;
- inputs_[2] = right;
- }
-
- LOperand* context() const { return inputs_[0]; }
- LOperand* left() const { return inputs_[1]; }
- LOperand* right() const { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-};
-
-
class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 1> {
public:
LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype,
@@ -1596,18 +1587,14 @@ class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
};
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
+class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
public:
- LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
- LOperand* vector) {
+ LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
inputs_[0] = context;
- inputs_[1] = global_object;
temps_[0] = vector;
}
LOperand* context() { return inputs_[0]; }
- LOperand* global_object() { return inputs_[1]; }
LOperand* temp_vector() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
@@ -2170,6 +2157,8 @@ class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
LOperand* key() { return inputs_[3]; }
LOperand* current_capacity() { return inputs_[4]; }
+ bool ClobbersDoubleRegisters(Isolate* isolate) const override { return true; }
+
DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
};
@@ -2355,33 +2344,6 @@ class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> {
};
-class LDoubleBits final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LDoubleBits(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
- DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
-};
-
-
-class LConstructDouble final : public LTemplateInstruction<1, 2, 0> {
- public:
- LConstructDouble(LOperand* hi, LOperand* lo) {
- inputs_[0] = hi;
- inputs_[1] = lo;
- }
-
- LOperand* hi() { return inputs_[0]; }
- LOperand* lo() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
-};
-
-
class LAllocate final : public LTemplateInstruction<1, 2, 1> {
public:
LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
@@ -2398,6 +2360,19 @@ class LAllocate final : public LTemplateInstruction<1, 2, 1> {
DECLARE_HYDROGEN_ACCESSOR(Allocate)
};
+class LFastAllocate final : public LTemplateInstruction<1, 1, 1> {
+ public:
+ LFastAllocate(LOperand* size, LOperand* temp) {
+ inputs_[0] = size;
+ temps_[0] = temp;
+ }
+
+ LOperand* size() const { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FastAllocate, "fast-allocate")
+ DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
class LTypeof final : public LTemplateInstruction<1, 2, 0> {
public:
@@ -2552,6 +2527,8 @@ class LChunkBuilder final : public LChunkBuilderBase {
LInstruction* DoMathFround(HUnaryMathOperation* instr);
LInstruction* DoMathAbs(HUnaryMathOperation* instr);
LInstruction* DoMathLog(HUnaryMathOperation* instr);
+ LInstruction* DoMathCos(HUnaryMathOperation* instr);
+ LInstruction* DoMathSin(HUnaryMathOperation* instr);
LInstruction* DoMathExp(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
diff --git a/deps/v8/src/d8-posix.cc b/deps/v8/src/d8-posix.cc
index 36d83b53cf..d2cf573d4c 100644
--- a/deps/v8/src/d8-posix.cc
+++ b/deps/v8/src/d8-posix.cc
@@ -7,6 +7,7 @@
#include <signal.h>
#include <stdlib.h>
#include <string.h>
+#include <sys/select.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
@@ -15,10 +16,6 @@
#include "src/d8.h"
-#if !V8_OS_NACL
-#include <sys/select.h>
-#endif
-
namespace v8 {
@@ -105,16 +102,11 @@ static bool WaitOnFD(int fd,
}
timeout.tv_usec = (read_timeout % 1000) * 1000;
timeout.tv_sec = read_timeout / 1000;
-#if V8_OS_NACL
- // PNaCL has no support for select.
- int number_of_fds_ready = -1;
-#else
int number_of_fds_ready = select(fd + 1,
&readfds,
&writefds,
&exceptfds,
read_timeout != -1 ? &timeout : NULL);
-#endif
return number_of_fds_ready == 1;
}
@@ -531,21 +523,18 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
OpenFDCloser error_read_closer(exec_error_fds[kReadFD]);
OpenFDCloser stdout_read_closer(stdout_fds[kReadFD]);
- if (!ChildLaunchedOK(args.GetIsolate(), exec_error_fds)) return;
+ Isolate* isolate = args.GetIsolate();
+ if (!ChildLaunchedOK(isolate, exec_error_fds)) return;
- Local<Value> accumulator = GetStdout(args.GetIsolate(), stdout_fds[kReadFD],
- start_time, read_timeout, total_timeout);
+ Local<Value> accumulator = GetStdout(isolate, stdout_fds[kReadFD], start_time,
+ read_timeout, total_timeout);
if (accumulator->IsUndefined()) {
kill(pid, SIGINT); // On timeout, kill the subprocess.
args.GetReturnValue().Set(accumulator);
return;
}
- if (!WaitForChild(args.GetIsolate(),
- pid,
- child_waiter,
- start_time,
- read_timeout,
+ if (!WaitForChild(isolate, pid, child_waiter, start_time, read_timeout,
total_timeout)) {
return;
}
@@ -588,13 +577,8 @@ void Shell::SetUMask(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
if (args[0]->IsNumber()) {
-#if V8_OS_NACL
- // PNaCL has no support for umask.
- int previous = 0;
-#else
int previous = umask(
args[0]->Int32Value(args.GetIsolate()->GetCurrentContext()).FromJust());
-#endif
args.GetReturnValue().Set(previous);
return;
} else {
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 967d1e4fd4..a8af9de2d1 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -19,6 +19,7 @@
#ifndef V8_SHARED
#include <algorithm>
+#include <fstream>
#include <vector>
#endif // !V8_SHARED
@@ -34,13 +35,16 @@
#include "src/ostreams.h"
#include "include/libplatform/libplatform.h"
+#include "include/libplatform/v8-tracing.h"
#ifndef V8_SHARED
#include "src/api.h"
#include "src/base/cpu.h"
+#include "src/base/debug/stack_trace.h"
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
#include "src/base/sys-info.h"
#include "src/basic-block-profiler.h"
+#include "src/interpreter/interpreter.h"
#include "src/snapshot/natives.h"
#include "src/utils.h"
#include "src/v8.h"
@@ -133,7 +137,7 @@ class PredictablePlatform : public Platform {
}
uint64_t AddTraceEvent(char phase, const uint8_t* categoryEnabledFlag,
- const char* name, uint64_t id,
+ const char* name, const char* scope, uint64_t id,
uint64_t bind_id, int numArgs, const char** argNames,
const uint8_t* argTypes, const uint64_t* argValues,
unsigned int flags) override {
@@ -203,6 +207,129 @@ Worker* GetWorkerFromInternalField(Isolate* isolate, Local<Object> object) {
} // namespace
+namespace tracing {
+
+namespace {
+
+// String options that can be used to initialize TraceOptions.
+const char kRecordUntilFull[] = "record-until-full";
+const char kRecordContinuously[] = "record-continuously";
+const char kRecordAsMuchAsPossible[] = "record-as-much-as-possible";
+
+const char kRecordModeParam[] = "record_mode";
+const char kEnableSamplingParam[] = "enable_sampling";
+const char kEnableSystraceParam[] = "enable_systrace";
+const char kEnableArgumentFilterParam[] = "enable_argument_filter";
+const char kIncludedCategoriesParam[] = "included_categories";
+const char kExcludedCategoriesParam[] = "excluded_categories";
+
+class TraceConfigParser {
+ public:
+ static void FillTraceConfig(v8::Isolate* isolate,
+ platform::tracing::TraceConfig* trace_config,
+ const char* json_str) {
+ HandleScope outer_scope(isolate);
+ Local<Context> context = Context::New(isolate);
+ Context::Scope context_scope(context);
+ HandleScope inner_scope(isolate);
+
+ Local<String> source =
+ String::NewFromUtf8(isolate, json_str, NewStringType::kNormal)
+ .ToLocalChecked();
+ Local<Value> result = JSON::Parse(context, source).ToLocalChecked();
+ Local<v8::Object> trace_config_object = Local<v8::Object>::Cast(result);
+
+ trace_config->SetTraceRecordMode(
+ GetTraceRecordMode(isolate, context, trace_config_object));
+ if (GetBoolean(isolate, context, trace_config_object,
+ kEnableSamplingParam)) {
+ trace_config->EnableSampling();
+ }
+ if (GetBoolean(isolate, context, trace_config_object,
+ kEnableSystraceParam)) {
+ trace_config->EnableSystrace();
+ }
+ if (GetBoolean(isolate, context, trace_config_object,
+ kEnableArgumentFilterParam)) {
+ trace_config->EnableArgumentFilter();
+ }
+ UpdateCategoriesList(isolate, context, trace_config_object,
+ kIncludedCategoriesParam, trace_config);
+ UpdateCategoriesList(isolate, context, trace_config_object,
+ kExcludedCategoriesParam, trace_config);
+ }
+
+ private:
+ static bool GetBoolean(v8::Isolate* isolate, Local<Context> context,
+ Local<v8::Object> object, const char* property) {
+ Local<Value> value = GetValue(isolate, context, object, property);
+ if (value->IsNumber()) {
+ Local<Boolean> v8_boolean = value->ToBoolean(context).ToLocalChecked();
+ return v8_boolean->Value();
+ }
+ return false;
+ }
+
+ static int UpdateCategoriesList(
+ v8::Isolate* isolate, Local<Context> context, Local<v8::Object> object,
+ const char* property, platform::tracing::TraceConfig* trace_config) {
+ Local<Value> value = GetValue(isolate, context, object, property);
+ if (value->IsArray()) {
+ Local<Array> v8_array = Local<Array>::Cast(value);
+ for (int i = 0, length = v8_array->Length(); i < length; ++i) {
+ Local<Value> v = v8_array->Get(context, i)
+ .ToLocalChecked()
+ ->ToString(context)
+ .ToLocalChecked();
+ String::Utf8Value str(v->ToString(context).ToLocalChecked());
+ if (kIncludedCategoriesParam == property) {
+ trace_config->AddIncludedCategory(*str);
+ } else {
+ trace_config->AddExcludedCategory(*str);
+ }
+ }
+ return v8_array->Length();
+ }
+ return 0;
+ }
+
+ static platform::tracing::TraceRecordMode GetTraceRecordMode(
+ v8::Isolate* isolate, Local<Context> context, Local<v8::Object> object) {
+ Local<Value> value = GetValue(isolate, context, object, kRecordModeParam);
+ if (value->IsString()) {
+ Local<String> v8_string = value->ToString(context).ToLocalChecked();
+ String::Utf8Value str(v8_string);
+ if (strcmp(kRecordUntilFull, *str) == 0) {
+ return platform::tracing::TraceRecordMode::RECORD_UNTIL_FULL;
+ } else if (strcmp(kRecordContinuously, *str) == 0) {
+ return platform::tracing::TraceRecordMode::RECORD_CONTINUOUSLY;
+ } else if (strcmp(kRecordAsMuchAsPossible, *str) == 0) {
+ return platform::tracing::TraceRecordMode::RECORD_AS_MUCH_AS_POSSIBLE;
+ }
+ }
+ return platform::tracing::TraceRecordMode::RECORD_UNTIL_FULL;
+ }
+
+ static Local<Value> GetValue(v8::Isolate* isolate, Local<Context> context,
+ Local<v8::Object> object, const char* property) {
+ Local<String> v8_str =
+ String::NewFromUtf8(isolate, property, NewStringType::kNormal)
+ .ToLocalChecked();
+ return object->Get(context, v8_str).ToLocalChecked();
+ }
+};
+
+} // namespace
+
+static platform::tracing::TraceConfig* CreateTraceConfigFromJSON(
+ v8::Isolate* isolate, const char* json_str) {
+ platform::tracing::TraceConfig* trace_config =
+ new platform::tracing::TraceConfig();
+ TraceConfigParser::FillTraceConfig(isolate, trace_config, json_str);
+ return trace_config;
+}
+
+} // namespace tracing
class PerIsolateData {
public:
@@ -336,7 +463,9 @@ MaybeLocal<Script> Shell::CompileString(
ScriptCompiler::CompileOptions compile_options, SourceType source_type) {
Local<Context> context(isolate->GetCurrentContext());
ScriptOrigin origin(name);
- if (compile_options == ScriptCompiler::kNoCompileOptions) {
+ // TODO(adamk): Make use of compile options for Modules.
+ if (compile_options == ScriptCompiler::kNoCompileOptions ||
+ source_type == MODULE) {
ScriptCompiler::Source script_source(source, origin);
return source_type == SCRIPT
? ScriptCompiler::Compile(context, &script_source,
@@ -356,11 +485,9 @@ MaybeLocal<Script> Shell::CompileString(
DCHECK(false); // A new compile option?
}
if (data == NULL) compile_options = ScriptCompiler::kNoCompileOptions;
+ DCHECK_EQ(SCRIPT, source_type);
MaybeLocal<Script> result =
- source_type == SCRIPT
- ? ScriptCompiler::Compile(context, &cached_source, compile_options)
- : ScriptCompiler::CompileModule(context, &cached_source,
- compile_options);
+ ScriptCompiler::Compile(context, &cached_source, compile_options);
CHECK(data == NULL || !data->rejected);
return result;
}
@@ -521,9 +648,8 @@ void Shell::RealmGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<Context>::New(args.GetIsolate(), data->realms_[index])->Global());
}
-
-// Realm.create() creates a new realm and returns its index.
-void Shell::RealmCreate(const v8::FunctionCallbackInfo<v8::Value>& args) {
+MaybeLocal<Context> Shell::CreateRealm(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
TryCatch try_catch(isolate);
PerIsolateData* data = PerIsolateData::Get(isolate);
@@ -540,12 +666,29 @@ void Shell::RealmCreate(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (context.IsEmpty()) {
DCHECK(try_catch.HasCaught());
try_catch.ReThrow();
- return;
+ return MaybeLocal<Context>();
}
data->realms_[index].Reset(isolate, context);
args.GetReturnValue().Set(index);
+ return context;
}
+// Realm.create() creates a new realm with a distinct security token
+// and returns its index.
+void Shell::RealmCreate(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ CreateRealm(args);
+}
+
+// Realm.createAllowCrossRealmAccess() creates a new realm with the same
+// security token as the current realm.
+void Shell::RealmCreateAllowCrossRealmAccess(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Local<Context> context;
+ if (CreateRealm(args).ToLocal(&context)) {
+ context->SetSecurityToken(
+ args.GetIsolate()->GetEnteredContext()->GetSecurityToken());
+ }
+}
// Realm.dispose(i) disposes the reference to the realm i.
void Shell::RealmDispose(const v8::FunctionCallbackInfo<v8::Value>& args) {
@@ -907,25 +1050,28 @@ void Shell::ReportException(Isolate* isolate, v8::TryCatch* try_catch) {
// Print (filename):(line number): (message).
v8::String::Utf8Value filename(message->GetScriptOrigin().ResourceName());
const char* filename_string = ToCString(filename);
- int linenum =
- message->GetLineNumber(isolate->GetCurrentContext()).FromJust();
+ Maybe<int> maybeline = message->GetLineNumber(isolate->GetCurrentContext());
+ int linenum = maybeline.IsJust() ? maybeline.FromJust() : -1;
printf("%s:%i: %s\n", filename_string, linenum, exception_string);
- // Print line of source code.
- v8::String::Utf8Value sourceline(
- message->GetSourceLine(isolate->GetCurrentContext()).ToLocalChecked());
- const char* sourceline_string = ToCString(sourceline);
- printf("%s\n", sourceline_string);
- // Print wavy underline (GetUnderline is deprecated).
- int start =
- message->GetStartColumn(isolate->GetCurrentContext()).FromJust();
- for (int i = 0; i < start; i++) {
- printf(" ");
- }
- int end = message->GetEndColumn(isolate->GetCurrentContext()).FromJust();
- for (int i = start; i < end; i++) {
- printf("^");
+ Local<String> sourceline;
+ if (message->GetSourceLine(isolate->GetCurrentContext())
+ .ToLocal(&sourceline)) {
+ // Print line of source code.
+ v8::String::Utf8Value sourcelinevalue(sourceline);
+ const char* sourceline_string = ToCString(sourcelinevalue);
+ printf("%s\n", sourceline_string);
+ // Print wavy underline (GetUnderline is deprecated).
+ int start =
+ message->GetStartColumn(isolate->GetCurrentContext()).FromJust();
+ for (int i = 0; i < start; i++) {
+ printf(" ");
+ }
+ int end = message->GetEndColumn(isolate->GetCurrentContext()).FromJust();
+ for (int i = start; i < end; i++) {
+ printf("^");
+ }
+ printf("\n");
}
- printf("\n");
Local<Value> stack_trace_string;
if (try_catch->StackTrace(isolate->GetCurrentContext())
.ToLocal(&stack_trace_string) &&
@@ -1067,8 +1213,7 @@ Local<String> Shell::Stringify(Isolate* isolate, Local<Value> value) {
Local<Function> fun = Local<Function>::New(isolate, stringify_function_);
Local<Value> argv[1] = {value};
v8::TryCatch try_catch(isolate);
- MaybeLocal<Value> result =
- fun->Call(context, Undefined(isolate), 1, argv).ToLocalChecked();
+ MaybeLocal<Value> result = fun->Call(context, Undefined(isolate), 1, argv);
if (result.IsEmpty()) return String::Empty(isolate);
return result.ToLocalChecked().As<String>();
}
@@ -1114,6 +1259,10 @@ Local<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
String::NewFromUtf8(isolate, "version", NewStringType::kNormal)
.ToLocalChecked(),
FunctionTemplate::New(isolate, Version));
+ global_template->Set(
+ Symbol::GetToStringTag(isolate),
+ String::NewFromUtf8(isolate, "global", NewStringType::kNormal)
+ .ToLocalChecked());
// Bind the Realm object.
Local<ObjectTemplate> realm_template = ObjectTemplate::New(isolate);
@@ -1134,6 +1283,11 @@ Local<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
.ToLocalChecked(),
FunctionTemplate::New(isolate, RealmCreate));
realm_template->Set(
+ String::NewFromUtf8(isolate, "createAllowCrossRealmAccess",
+ NewStringType::kNormal)
+ .ToLocalChecked(),
+ FunctionTemplate::New(isolate, RealmCreateAllowCrossRealmAccess));
+ realm_template->Set(
String::NewFromUtf8(isolate, "dispose", NewStringType::kNormal)
.ToLocalChecked(),
FunctionTemplate::New(isolate, RealmDispose));
@@ -1275,6 +1429,21 @@ struct CounterAndKey {
inline bool operator<(const CounterAndKey& lhs, const CounterAndKey& rhs) {
return strcmp(lhs.key, rhs.key) < 0;
}
+
+void Shell::WriteIgnitionDispatchCountersFile(v8::Isolate* isolate) {
+ HandleScope handle_scope(isolate);
+ Local<Context> context = Context::New(isolate);
+ Context::Scope context_scope(context);
+
+ Local<Object> dispatch_counters = reinterpret_cast<i::Isolate*>(isolate)
+ ->interpreter()
+ ->GetDispatchCountersObject();
+ std::ofstream dispatch_counters_stream(
+ i::FLAG_trace_ignition_dispatches_output_file);
+ dispatch_counters_stream << *String::Utf8Value(
+ JSON::Stringify(context, dispatch_counters).ToLocalChecked());
+}
+
#endif // !V8_SHARED
@@ -1312,6 +1481,7 @@ void Shell::OnExit(v8::Isolate* isolate) {
"-------------+\n");
delete [] counters;
}
+
delete counters_file_;
delete counter_map_;
#endif // !V8_SHARED
@@ -1439,11 +1609,6 @@ void Shell::RunShell(Isolate* isolate) {
while (true) {
HandleScope inner_scope(isolate);
printf("d8> ");
-#if defined(__native_client__)
- // Native Client libc is used to being embedded in Chrome and
- // has trouble recognizing when to flush.
- fflush(stdout);
-#endif
Local<String> input = Shell::ReadFromStdin(isolate);
if (input.IsEmpty()) break;
ExecuteString(isolate, input, name, true, true);
@@ -1951,6 +2116,12 @@ bool Shell::SetOptions(int argc, char* argv[]) {
return false;
}
argv[i] = NULL;
+ } else if (strcmp(argv[i], "--enable-tracing") == 0) {
+ options.trace_enabled = true;
+ argv[i] = NULL;
+ } else if (strncmp(argv[i], "--trace-config=", 15) == 0) {
+ options.trace_config = argv[i] + 15;
+ argv[i] = NULL;
}
}
@@ -2367,6 +2538,10 @@ static void DumpHeapConstants(i::Isolate* isolate) {
int Shell::Main(int argc, char* argv[]) {
+ std::ofstream trace_file;
+#ifndef V8_SHARED
+ v8::base::debug::EnableInProcessStackDumping();
+#endif
#if (defined(_WIN32) || defined(_WIN64))
UINT new_flags =
SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX;
@@ -2383,7 +2558,7 @@ int Shell::Main(int argc, char* argv[]) {
#endif // defined(_MSC_VER)
#endif // defined(_WIN32) || defined(_WIN64)
if (!SetOptions(argc, argv)) return 1;
- v8::V8::InitializeICU(options.icu_data_file);
+ v8::V8::InitializeICUDefaultLocation(argv[0], options.icu_data_file);
#ifndef V8_SHARED
g_platform = i::FLAG_verify_predictable
? new PredictablePlatform()
@@ -2434,6 +2609,38 @@ int Shell::Main(int argc, char* argv[]) {
Initialize(isolate);
PerIsolateData data(isolate);
+ if (options.trace_enabled) {
+ trace_file.open("v8_trace.json");
+ platform::tracing::TracingController* tracing_controller =
+ new platform::tracing::TracingController();
+ platform::tracing::TraceBuffer* trace_buffer =
+ platform::tracing::TraceBuffer::CreateTraceBufferRingBuffer(
+ platform::tracing::TraceBuffer::kRingBufferChunks,
+ platform::tracing::TraceWriter::CreateJSONTraceWriter(
+ trace_file));
+ platform::tracing::TraceConfig* trace_config;
+ if (options.trace_config) {
+ int size = 0;
+ char* trace_config_json_str =
+ ReadChars(nullptr, options.trace_config, &size);
+ trace_config =
+ tracing::CreateTraceConfigFromJSON(isolate, trace_config_json_str);
+ delete[] trace_config_json_str;
+ } else {
+ trace_config =
+ platform::tracing::TraceConfig::CreateDefaultTraceConfig();
+ }
+ tracing_controller->Initialize(trace_buffer);
+ tracing_controller->StartTracing(trace_config);
+#ifndef V8_SHARED
+ if (!i::FLAG_verify_predictable) {
+ platform::SetTracingController(g_platform, tracing_controller);
+ }
+#else
+ platform::SetTracingController(g_platform, tracing_controller);
+#endif
+ }
+
#ifndef V8_SHARED
if (options.dump_heap_constants) {
DumpHeapConstants(reinterpret_cast<i::Isolate*>(isolate));
@@ -2476,6 +2683,13 @@ int Shell::Main(int argc, char* argv[]) {
RunShell(isolate);
}
+#ifndef V8_SHARED
+ if (i::FLAG_ignition && i::FLAG_trace_ignition_dispatches &&
+ i::FLAG_trace_ignition_dispatches_output_file != nullptr) {
+ WriteIgnitionDispatchCountersFile(isolate);
+ }
+#endif
+
// Shut down contexts and collect garbage.
evaluation_context_.Reset();
#ifndef V8_SHARED
diff --git a/deps/v8/src/d8.gyp b/deps/v8/src/d8.gyp
index f249a78856..cc65a5b75a 100644
--- a/deps/v8/src/d8.gyp
+++ b/deps/v8/src/d8.gyp
@@ -31,20 +31,20 @@
# Enable support for Intel VTune. Supported on ia32/x64 only
'v8_enable_vtunejit%': 0,
'v8_enable_i18n_support%': 1,
- 'v8_toolset_for_d8%': 'target',
},
- 'includes': ['../build/toolchain.gypi', '../build/features.gypi'],
+ 'includes': ['../gypfiles/toolchain.gypi', '../gypfiles/features.gypi'],
'targets': [
{
'target_name': 'd8',
'type': 'executable',
'dependencies': [
- '../tools/gyp/v8.gyp:v8',
- '../tools/gyp/v8.gyp:v8_libplatform',
+ 'v8.gyp:v8',
+ 'v8.gyp:v8_libplatform',
],
# Generated source files need this explicitly:
'include_dirs+': [
'..',
+ '<(DEPTH)',
],
'sources': [
'd8.h',
@@ -52,7 +52,7 @@
],
'conditions': [
[ 'want_separate_host_toolset==1', {
- 'toolsets': [ '<(v8_toolset_for_d8)', ],
+ 'toolsets': [ 'target', ],
}],
['(OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="netbsd" \
or OS=="openbsd" or OS=="solaris" or OS=="android" \
@@ -141,7 +141,7 @@
},
],
'conditions': [
- ['test_isolation_mode != "noop" and v8_toolset_for_d8 == "target"', {
+ ['test_isolation_mode != "noop"', {
'targets': [
{
'target_name': 'd8_run',
@@ -150,7 +150,7 @@
'd8',
],
'includes': [
- '../build/isolate.gypi',
+ '../gypfiles/isolate.gypi',
],
'sources': [
'd8.isolate',
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index 321d9c1770..0e365a52dd 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -7,8 +7,8 @@
#ifndef V8_SHARED
#include "src/allocation.h"
+#include "src/base/hashmap.h"
#include "src/base/platform/time.h"
-#include "src/hashmap.h"
#include "src/list.h"
#else
#include "include/v8.h"
@@ -61,13 +61,13 @@ class CounterMap {
public:
CounterMap(): hash_map_(Match) { }
Counter* Lookup(const char* name) {
- i::HashMap::Entry* answer =
+ base::HashMap::Entry* answer =
hash_map_.Lookup(const_cast<char*>(name), Hash(name));
if (!answer) return NULL;
return reinterpret_cast<Counter*>(answer->value);
}
void Set(const char* name, Counter* value) {
- i::HashMap::Entry* answer =
+ base::HashMap::Entry* answer =
hash_map_.LookupOrInsert(const_cast<char*>(name), Hash(name));
DCHECK(answer != NULL);
answer->value = value;
@@ -81,14 +81,14 @@ class CounterMap {
const char* CurrentKey() { return static_cast<const char*>(entry_->key); }
Counter* CurrentValue() { return static_cast<Counter*>(entry_->value); }
private:
- i::HashMap* map_;
- i::HashMap::Entry* entry_;
+ base::HashMap* map_;
+ base::HashMap::Entry* entry_;
};
private:
static int Hash(const char* name);
static bool Match(void* key1, void* key2);
- i::HashMap hash_map_;
+ base::HashMap hash_map_;
};
#endif // !V8_SHARED
@@ -290,7 +290,9 @@ class ShellOptions {
isolate_sources(NULL),
icu_data_file(NULL),
natives_blob(NULL),
- snapshot_blob(NULL) {}
+ snapshot_blob(NULL),
+ trace_enabled(false),
+ trace_config(NULL) {}
~ShellOptions() {
delete[] isolate_sources;
@@ -318,6 +320,8 @@ class ShellOptions {
const char* icu_data_file;
const char* natives_blob;
const char* snapshot_blob;
+ bool trace_enabled;
+ const char* trace_config;
};
#ifdef V8_SHARED
@@ -350,7 +354,7 @@ class Shell : public i::AllStatic {
#ifndef V8_SHARED
// TODO(binji): stupid implementation for now. Is there an easy way to hash an
- // object for use in i::HashMap? By pointer?
+ // object for use in base::HashMap? By pointer?
typedef i::List<Local<Object>> ObjectList;
static bool SerializeValue(Isolate* isolate, Local<Value> value,
const ObjectList& to_transfer,
@@ -375,6 +379,8 @@ class Shell : public i::AllStatic {
static void RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args);
static void RealmGlobal(const v8::FunctionCallbackInfo<v8::Value>& args);
static void RealmCreate(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void RealmCreateAllowCrossRealmAccess(
+ const v8::FunctionCallbackInfo<v8::Value>& args);
static void RealmDispose(const v8::FunctionCallbackInfo<v8::Value>& args);
static void RealmSwitch(const v8::FunctionCallbackInfo<v8::Value>& args);
static void RealmEval(const v8::FunctionCallbackInfo<v8::Value>& args);
@@ -461,6 +467,7 @@ class Shell : public i::AllStatic {
static i::List<Worker*> workers_;
static i::List<SharedArrayBuffer::Contents> externalized_shared_contents_;
+ static void WriteIgnitionDispatchCountersFile(v8::Isolate* isolate);
static Counter* GetCounter(const char* name, bool is_histogram);
static Local<String> Stringify(Isolate* isolate, Local<Value> value);
#endif // !V8_SHARED
@@ -468,6 +475,8 @@ class Shell : public i::AllStatic {
static void RunShell(Isolate* isolate);
static bool SetOptions(int argc, char* argv[]);
static Local<ObjectTemplate> CreateGlobalTemplate(Isolate* isolate);
+ static MaybeLocal<Context> CreateRealm(
+ const v8::FunctionCallbackInfo<v8::Value>& args);
};
diff --git a/deps/v8/src/dateparser-inl.h b/deps/v8/src/dateparser-inl.h
index 7e5c4e355e..47a7c6e7ff 100644
--- a/deps/v8/src/dateparser-inl.h
+++ b/deps/v8/src/dateparser-inl.h
@@ -13,9 +13,8 @@ namespace v8 {
namespace internal {
template <typename Char>
-bool DateParser::Parse(Vector<Char> str,
- FixedArray* out,
- UnicodeCache* unicode_cache) {
+bool DateParser::Parse(Isolate* isolate, Vector<Char> str, FixedArray* out) {
+ UnicodeCache* unicode_cache = isolate->unicode_cache();
DCHECK(out->length() >= OUTPUT_SIZE);
InputReader<Char> in(unicode_cache, str);
DateStringTokenizer<Char> scanner(&in);
@@ -76,10 +75,12 @@ bool DateParser::Parse(Vector<Char> str,
if (next_unhandled_token.IsInvalid()) return false;
bool has_read_number = !day.IsEmpty();
// If there's anything left, continue with the legacy parser.
+ bool legacy_parser = false;
for (DateToken token = next_unhandled_token;
!token.IsEndOfInput();
token = scanner.Next()) {
if (token.IsNumber()) {
+ legacy_parser = true;
has_read_number = true;
int n = token.number();
if (scanner.SkipSymbol(':')) {
@@ -115,6 +116,7 @@ bool DateParser::Parse(Vector<Char> str,
scanner.SkipSymbol('-');
}
} else if (token.IsKeyword()) {
+ legacy_parser = true;
// Parse a "word" (sequence of chars. >= 'A').
KeywordType type = token.keyword_type();
int value = token.keyword_value();
@@ -133,6 +135,7 @@ bool DateParser::Parse(Vector<Char> str,
if (scanner.Peek().IsNumber()) return false;
}
} else if (token.IsAsciiSign() && (tz.IsUTC() || !time.IsEmpty())) {
+ legacy_parser = true;
// Parse UTC offset (only after UTC or time).
tz.SetSign(token.ascii_sign());
// The following number may be empty.
@@ -170,7 +173,13 @@ bool DateParser::Parse(Vector<Char> str,
}
}
- return day.Write(out) && time.Write(out) && tz.Write(out);
+ bool success = day.Write(out) && time.Write(out) && tz.Write(out);
+
+ if (legacy_parser && success) {
+ isolate->CountUsage(v8::Isolate::kLegacyDateParser);
+ }
+
+ return success;
}
diff --git a/deps/v8/src/dateparser.h b/deps/v8/src/dateparser.h
index 533173984c..d7676cbe08 100644
--- a/deps/v8/src/dateparser.h
+++ b/deps/v8/src/dateparser.h
@@ -26,7 +26,7 @@ class DateParser : public AllStatic {
// [7]: UTC offset in seconds, or null value if no timezone specified
// If parsing fails, return false (content of output array is not defined).
template <typename Char>
- static bool Parse(Vector<Char> str, FixedArray* output, UnicodeCache* cache);
+ static bool Parse(Isolate* isolate, Vector<Char> str, FixedArray* output);
enum {
YEAR, MONTH, DAY, HOUR, MINUTE, SECOND, MILLISECOND, UTC_OFFSET, OUTPUT_SIZE
diff --git a/deps/v8/src/debug/OWNERS b/deps/v8/src/debug/OWNERS
index cf18bd8a3b..4e493cdc6e 100644
--- a/deps/v8/src/debug/OWNERS
+++ b/deps/v8/src/debug/OWNERS
@@ -1,6 +1,7 @@
set noparent
bmeurer@chromium.org
+jgruber@chromium.org
mvstanton@chromium.org
ulan@chromium.org
verwaest@chromium.org
diff --git a/deps/v8/src/debug/arm/debug-arm.cc b/deps/v8/src/debug/arm/debug-arm.cc
index fa3540e53b..29e4827be4 100644
--- a/deps/v8/src/debug/arm/debug-arm.cc
+++ b/deps/v8/src/debug/arm/debug-arm.cc
@@ -41,7 +41,7 @@ void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
Handle<Code> code) {
- DCHECK_EQ(Code::BUILTIN, code->kind());
+ DCHECK(code->is_debug_stub());
CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotInstructions);
// Patch the code changing the debug break slot code from
// mov r2, r2
diff --git a/deps/v8/src/debug/arm64/debug-arm64.cc b/deps/v8/src/debug/arm64/debug-arm64.cc
index cd017219d2..bf7964a7be 100644
--- a/deps/v8/src/debug/arm64/debug-arm64.cc
+++ b/deps/v8/src/debug/arm64/debug-arm64.cc
@@ -43,7 +43,7 @@ void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
Handle<Code> code) {
- DCHECK_EQ(Code::BUILTIN, code->kind());
+ DCHECK(code->is_debug_stub());
PatchingAssembler patcher(isolate, reinterpret_cast<Instruction*>(pc),
Assembler::kDebugBreakSlotInstructions);
// Patch the code emitted by DebugCodegen::GenerateSlots, changing the debug
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index dae1348322..fb2df312b8 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -5,11 +5,13 @@
#include "src/debug/debug-evaluate.h"
#include "src/accessors.h"
+#include "src/compiler.h"
#include "src/contexts.h"
-#include "src/debug/debug.h"
#include "src/debug/debug-frames.h"
#include "src/debug/debug-scopes.h"
+#include "src/debug/debug.h"
#include "src/frames-inl.h"
+#include "src/globals.h"
#include "src/isolate-inl.h"
namespace v8 {
@@ -54,8 +56,9 @@ MaybeHandle<Object> DebugEvaluate::Local(Isolate* isolate,
DisableBreak disable_break_scope(isolate->debug(), disable_break);
// Get the frame where the debugging is performed.
- JavaScriptFrameIterator it(isolate, frame_id);
- JavaScriptFrame* frame = it.frame();
+ StackTraceFrameIterator it(isolate, frame_id);
+ if (!it.is_javascript()) return isolate->factory()->undefined_value();
+ JavaScriptFrame* frame = it.javascript_frame();
// Traverse the saved contexts chain to find the active context for the
// selected frame.
@@ -95,11 +98,12 @@ MaybeHandle<Object> DebugEvaluate::Evaluate(
}
Handle<JSFunction> eval_fun;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, eval_fun,
- Compiler::GetFunctionFromEval(
- source, outer_info, context, SLOPPY,
- NO_PARSE_RESTRICTION, RelocInfo::kNoPosition),
- Object);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, eval_fun,
+ Compiler::GetFunctionFromEval(source, outer_info, context, SLOPPY,
+ NO_PARSE_RESTRICTION, kNoSourcePosition,
+ kNoSourcePosition),
+ Object);
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
@@ -125,8 +129,7 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
frame_(frame),
inlined_jsframe_index_(inlined_jsframe_index) {
FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
- Handle<JSFunction> local_function =
- Handle<JSFunction>::cast(frame_inspector.GetFunction());
+ Handle<JSFunction> local_function = frame_inspector.GetFunction();
Handle<Context> outer_context(local_function->context());
evaluation_context_ = outer_context;
outer_info_ = handle(local_function->shared());
@@ -182,7 +185,8 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
context_chain_element.wrapped_context = current_context;
}
context_chain_.Add(context_chain_element);
- } else if (scope_type == ScopeIterator::ScopeTypeBlock) {
+ } else if (scope_type == ScopeIterator::ScopeTypeBlock ||
+ scope_type == ScopeIterator::ScopeTypeEval) {
Handle<JSObject> materialized = factory->NewJSObjectWithNullProto();
frame_inspector.MaterializeStackLocals(materialized,
it.CurrentScopeInfo());
@@ -248,7 +252,7 @@ void DebugEvaluate::ContextBuilder::MaterializeReceiver(
// referenced by the current function, so it can be correctly resolved.
return;
} else if (local_function->shared()->scope_info()->HasReceiver() &&
- !frame_->receiver()->IsTheHole()) {
+ !frame_->receiver()->IsTheHole(isolate_)) {
recv = handle(frame_->receiver(), isolate_);
}
JSObject::SetOwnPropertyIgnoreAttributes(target, name, recv, NONE).Check();
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
index a7956ff417..c98f911f75 100644
--- a/deps/v8/src/debug/debug-frames.cc
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -9,45 +9,55 @@
namespace v8 {
namespace internal {
-FrameInspector::FrameInspector(JavaScriptFrame* frame,
- int inlined_jsframe_index, Isolate* isolate)
+FrameInspector::FrameInspector(StandardFrame* frame, int inlined_jsframe_index,
+ Isolate* isolate)
: frame_(frame), deoptimized_frame_(NULL), isolate_(isolate) {
- has_adapted_arguments_ = frame_->has_adapted_arguments();
+ JavaScriptFrame* js_frame =
+ frame->is_java_script() ? javascript_frame() : nullptr;
+ DCHECK(js_frame || frame->is_wasm());
+ has_adapted_arguments_ = js_frame && js_frame->has_adapted_arguments();
is_bottommost_ = inlined_jsframe_index == 0;
is_optimized_ = frame_->is_optimized();
is_interpreted_ = frame_->is_interpreted();
// Calculate the deoptimized frame.
if (frame->is_optimized()) {
+ DCHECK(js_frame != nullptr);
// TODO(turbofan): Revisit once we support deoptimization.
- if (frame->LookupCode()->is_turbofanned() &&
- frame->function()->shared()->asm_function() &&
+ if (js_frame->LookupCode()->is_turbofanned() &&
+ js_frame->function()->shared()->asm_function() &&
!FLAG_turbo_asm_deoptimization) {
is_optimized_ = false;
return;
}
deoptimized_frame_ = Deoptimizer::DebuggerInspectableFrame(
- frame, inlined_jsframe_index, isolate);
+ js_frame, inlined_jsframe_index, isolate);
}
}
-
FrameInspector::~FrameInspector() {
// Get rid of the calculated deoptimized frame if any.
- if (deoptimized_frame_ != NULL) {
- Deoptimizer::DeleteDebuggerInspectableFrame(deoptimized_frame_, isolate_);
+ if (deoptimized_frame_ != nullptr) {
+ delete deoptimized_frame_;
}
}
-
int FrameInspector::GetParametersCount() {
return is_optimized_ ? deoptimized_frame_->parameters_count()
: frame_->ComputeParametersCount();
}
-Handle<Object> FrameInspector::GetFunction() {
+Handle<Script> FrameInspector::GetScript() {
+ Object* script = is_optimized_
+ ? deoptimized_frame_->GetFunction()->shared()->script()
+ : frame_->script();
+ return handle(Script::cast(script), isolate_);
+}
+
+Handle<JSFunction> FrameInspector::GetFunction() {
+ DCHECK(!frame_->is_wasm());
return is_optimized_ ? deoptimized_frame_->GetFunction()
- : handle(frame_->function(), isolate_);
+ : handle(javascript_frame()->function(), isolate_);
}
Handle<Object> FrameInspector::GetParameter(int index) {
@@ -57,8 +67,9 @@ Handle<Object> FrameInspector::GetParameter(int index) {
Handle<Object> FrameInspector::GetExpression(int index) {
// TODO(turbofan): Revisit once we support deoptimization.
- if (frame_->LookupCode()->is_turbofanned() &&
- frame_->function()->shared()->asm_function() &&
+ if (frame_->is_java_script() &&
+ javascript_frame()->LookupCode()->is_turbofanned() &&
+ javascript_frame()->function()->shared()->asm_function() &&
!FLAG_turbo_asm_deoptimization) {
return isolate_->factory()->undefined_value();
}
@@ -68,18 +79,18 @@ Handle<Object> FrameInspector::GetExpression(int index) {
int FrameInspector::GetSourcePosition() {
- if (is_optimized_) {
- return deoptimized_frame_->GetSourcePosition();
- } else if (is_interpreted_) {
+ if (is_optimized_) return deoptimized_frame_->GetSourcePosition();
+ AbstractCode* code;
+ int code_offset;
+ if (is_interpreted_) {
InterpretedFrame* frame = reinterpret_cast<InterpretedFrame*>(frame_);
- BytecodeArray* bytecode_array =
- frame->function()->shared()->bytecode_array();
- return bytecode_array->SourcePosition(frame->GetBytecodeOffset());
+ code = AbstractCode::cast(frame->GetBytecodeArray());
+ code_offset = frame->GetBytecodeOffset();
} else {
- Code* code = frame_->LookupCode();
- int offset = static_cast<int>(frame_->pc() - code->instruction_start());
- return code->SourcePosition(offset);
+ code = AbstractCode::cast(frame_->LookupCode());
+ code_offset = static_cast<int>(frame_->pc() - code->instruction_start());
}
+ return code->SourcePosition(code_offset);
}
@@ -97,8 +108,9 @@ Handle<Object> FrameInspector::GetContext() {
// To inspect all the provided arguments the frame might need to be
// replaced with the arguments frame.
-void FrameInspector::SetArgumentsFrame(JavaScriptFrame* frame) {
+void FrameInspector::SetArgumentsFrame(StandardFrame* frame) {
DCHECK(has_adapted_arguments_);
+ DCHECK(frame->is_arguments_adaptor());
frame_ = frame;
is_optimized_ = frame_->is_optimized();
is_interpreted_ = frame_->is_interpreted();
@@ -117,26 +129,31 @@ void FrameInspector::MaterializeStackLocals(Handle<JSObject> target,
// TODO(yangguo): check whether this is necessary, now that we materialize
// context locals as well.
Handle<String> name(scope_info->ParameterName(i));
+ if (ScopeInfo::VariableIsSynthetic(*name)) continue;
if (ParameterIsShadowedByContextLocal(scope_info, name)) continue;
Handle<Object> value =
i < GetParametersCount()
? GetParameter(i)
: Handle<Object>::cast(isolate_->factory()->undefined_value());
- DCHECK(!value->IsTheHole());
+ DCHECK(!value->IsTheHole(isolate_));
JSObject::SetOwnPropertyIgnoreAttributes(target, name, value, NONE).Check();
}
// Second fill all stack locals.
for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
- if (scope_info->LocalIsSynthetic(i)) continue;
Handle<String> name(scope_info->StackLocalName(i));
+ if (ScopeInfo::VariableIsSynthetic(*name)) continue;
Handle<Object> value = GetExpression(scope_info->StackLocalIndex(i));
// TODO(yangguo): We convert optimized out values to {undefined} when they
// are passed to the debugger. Eventually we should handle them somehow.
- if (value->IsTheHole()) value = isolate_->factory()->undefined_value();
- if (value->IsOptimizedOut()) value = isolate_->factory()->undefined_value();
+ if (value->IsTheHole(isolate_)) {
+ value = isolate_->factory()->undefined_value();
+ }
+ if (value->IsOptimizedOut(isolate_)) {
+ value = isolate_->factory()->undefined_value();
+ }
JSObject::SetOwnPropertyIgnoreAttributes(target, name, value, NONE).Check();
}
}
@@ -152,10 +169,8 @@ void FrameInspector::MaterializeStackLocals(Handle<JSObject> target,
void FrameInspector::UpdateStackLocalsFromMaterializedObject(
Handle<JSObject> target, Handle<ScopeInfo> scope_info) {
- if (is_optimized_) {
- // Optimized frames are not supported. Simply give up.
- return;
- }
+ // Optimized frames and wasm frames are not supported. Simply give up.
+ if (is_optimized_ || frame_->is_wasm()) return;
HandleScope scope(isolate_);
@@ -163,23 +178,23 @@ void FrameInspector::UpdateStackLocalsFromMaterializedObject(
for (int i = 0; i < scope_info->ParameterCount(); ++i) {
// Shadowed parameters were not materialized.
Handle<String> name(scope_info->ParameterName(i));
+ if (ScopeInfo::VariableIsSynthetic(*name)) continue;
if (ParameterIsShadowedByContextLocal(scope_info, name)) continue;
- DCHECK(!frame_->GetParameter(i)->IsTheHole());
+ DCHECK(!javascript_frame()->GetParameter(i)->IsTheHole(isolate_));
Handle<Object> value =
Object::GetPropertyOrElement(target, name).ToHandleChecked();
- frame_->SetParameterValue(i, *value);
+ javascript_frame()->SetParameterValue(i, *value);
}
// Stack locals.
for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
- if (scope_info->LocalIsSynthetic(i)) continue;
+ Handle<String> name(scope_info->StackLocalName(i));
+ if (ScopeInfo::VariableIsSynthetic(*name)) continue;
int index = scope_info->StackLocalIndex(i);
- if (frame_->GetExpression(index)->IsTheHole()) continue;
+ if (frame_->GetExpression(index)->IsTheHole(isolate_)) continue;
Handle<Object> value =
- Object::GetPropertyOrElement(
- target, handle(scope_info->StackLocalName(i), isolate_))
- .ToHandleChecked();
+ Object::GetPropertyOrElement(target, name).ToHandleChecked();
frame_->SetExpression(index, *value);
}
}
@@ -194,9 +209,8 @@ bool FrameInspector::ParameterIsShadowedByContextLocal(
&maybe_assigned_flag) != -1;
}
-
-SaveContext* DebugFrameHelper::FindSavedContextForFrame(
- Isolate* isolate, JavaScriptFrame* frame) {
+SaveContext* DebugFrameHelper::FindSavedContextForFrame(Isolate* isolate,
+ StandardFrame* frame) {
SaveContext* save = isolate->save_context();
while (save != NULL && !save->IsBelowFrame(frame)) {
save = save->prev();
@@ -205,13 +219,16 @@ SaveContext* DebugFrameHelper::FindSavedContextForFrame(
return save;
}
-
-int DebugFrameHelper::FindIndexedNonNativeFrame(JavaScriptFrameIterator* it,
+int DebugFrameHelper::FindIndexedNonNativeFrame(StackTraceFrameIterator* it,
int index) {
int count = -1;
for (; !it->done(); it->Advance()) {
+ if (it->is_wasm()) {
+ if (++count == index) return 0;
+ continue;
+ }
List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
- it->frame()->Summarize(&frames);
+ it->javascript_frame()->Summarize(&frames);
for (int i = frames.length() - 1; i >= 0; i--) {
// Omit functions from native and extension scripts.
if (!frames[i].function()->shared()->IsSubjectToDebugging()) continue;
diff --git a/deps/v8/src/debug/debug-frames.h b/deps/v8/src/debug/debug-frames.h
index c04fd2b6bf..e8698e70ae 100644
--- a/deps/v8/src/debug/debug-frames.h
+++ b/deps/v8/src/debug/debug-frames.h
@@ -15,21 +15,28 @@ namespace internal {
class FrameInspector {
public:
- FrameInspector(JavaScriptFrame* frame, int inlined_jsframe_index,
+ FrameInspector(StandardFrame* frame, int inlined_jsframe_index,
Isolate* isolate);
~FrameInspector();
int GetParametersCount();
- Handle<Object> GetFunction();
+ Handle<JSFunction> GetFunction();
+ Handle<Script> GetScript();
Handle<Object> GetParameter(int index);
Handle<Object> GetExpression(int index);
int GetSourcePosition();
bool IsConstructor();
Handle<Object> GetContext();
- JavaScriptFrame* GetArgumentsFrame() { return frame_; }
- void SetArgumentsFrame(JavaScriptFrame* frame);
+ inline JavaScriptFrame* javascript_frame() {
+ return frame_->is_arguments_adaptor() ? ArgumentsAdaptorFrame::cast(frame_)
+ : JavaScriptFrame::cast(frame_);
+ }
+ inline WasmFrame* wasm_frame() { return WasmFrame::cast(frame_); }
+
+ JavaScriptFrame* GetArgumentsFrame() { return javascript_frame(); }
+ void SetArgumentsFrame(StandardFrame* frame);
void MaterializeStackLocals(Handle<JSObject> target,
Handle<ScopeInfo> scope_info);
@@ -44,7 +51,7 @@ class FrameInspector {
bool ParameterIsShadowedByContextLocal(Handle<ScopeInfo> info,
Handle<String> parameter_name);
- JavaScriptFrame* frame_;
+ StandardFrame* frame_;
DeoptimizedFrameInfo* deoptimized_frame_;
Isolate* isolate_;
bool is_optimized_;
@@ -59,10 +66,10 @@ class FrameInspector {
class DebugFrameHelper : public AllStatic {
public:
static SaveContext* FindSavedContextForFrame(Isolate* isolate,
- JavaScriptFrame* frame);
+ StandardFrame* frame);
// Advances the iterator to the frame that matches the index and returns the
// inlined frame index, or -1 if not found. Skips native JS functions.
- static int FindIndexedNonNativeFrame(JavaScriptFrameIterator* it, int index);
+ static int FindIndexedNonNativeFrame(StackTraceFrameIterator* it, int index);
// Helper functions for wrapping and unwrapping stack frame ids.
static Smi* WrapFrameId(StackFrame::Id id) {
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index d9c615b01b..55108bb96e 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -4,12 +4,17 @@
#include "src/debug/debug-scopes.h"
+#include <memory>
+
#include "src/ast/scopes.h"
+#include "src/compiler.h"
#include "src/debug/debug.h"
#include "src/frames-inl.h"
#include "src/globals.h"
#include "src/isolate-inl.h"
+#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
+#include "src/parsing/rewriter.h"
namespace v8 {
namespace internal {
@@ -21,19 +26,21 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
nested_scope_chain_(4),
seen_script_scope_(false),
failed_(false) {
- if (!frame_inspector->GetContext()->IsContext() ||
- !frame_inspector->GetFunction()->IsJSFunction()) {
+ if (!frame_inspector->GetContext()->IsContext()) {
// Optimized frame, context or function cannot be materialized. Give up.
return;
}
context_ = Handle<Context>::cast(frame_inspector->GetContext());
+ // We should not instantiate a ScopeIterator for wasm frames.
+ DCHECK(frame_inspector->GetScript()->type() != Script::TYPE_WASM);
+
// Catch the case when the debugger stops in an internal function.
Handle<JSFunction> function = GetFunction();
Handle<SharedFunctionInfo> shared_info(function->shared());
Handle<ScopeInfo> scope_info(shared_info->scope_info());
- if (shared_info->script() == isolate->heap()->undefined_value()) {
+ if (shared_info->script()->IsUndefined(isolate)) {
while (context_->closure() == *function) {
context_ = Handle<Context>(context_->previous(), isolate_);
}
@@ -80,33 +87,47 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
}
// Reparse the code and analyze the scopes.
- Scope* scope = NULL;
// Check whether we are in global, eval or function code.
Zone zone(isolate->allocator());
+ std::unique_ptr<ParseInfo> info;
if (scope_info->scope_type() != FUNCTION_SCOPE) {
// Global or eval code.
Handle<Script> script(Script::cast(shared_info->script()));
- ParseInfo info(&zone, script);
+ info.reset(new ParseInfo(&zone, script));
+ info->set_toplevel();
if (scope_info->scope_type() == SCRIPT_SCOPE) {
- info.set_global();
+ info->set_global();
} else {
DCHECK(scope_info->scope_type() == EVAL_SCOPE);
- info.set_eval();
- info.set_context(Handle<Context>(function->context()));
- }
- if (Parser::ParseStatic(&info) && Scope::Analyze(&info)) {
- scope = info.literal()->scope();
+ info->set_eval();
+ info->set_context(Handle<Context>(function->context()));
+ // Language mode may be inherited from the eval caller.
+ // Retrieve it from shared function info.
+ info->set_language_mode(shared_info->language_mode());
}
- if (!ignore_nested_scopes) RetrieveScopeChain(scope);
- if (collect_non_locals) CollectNonLocals(scope);
} else {
- // Function code
- ParseInfo info(&zone, function);
- if (Parser::ParseStatic(&info) && Scope::Analyze(&info)) {
- scope = info.literal()->scope();
+ // Inner function.
+ info.reset(new ParseInfo(&zone, function));
+ }
+ if (Parser::ParseStatic(info.get()) && Rewriter::Rewrite(info.get())) {
+ DeclarationScope* scope = info->literal()->scope();
+ if (!ignore_nested_scopes || collect_non_locals) {
+ CollectNonLocals(info.get(), scope);
+ }
+ if (!ignore_nested_scopes) {
+ AstNodeFactory ast_node_factory(info.get()->ast_value_factory());
+ scope->AllocateVariables(info.get(), &ast_node_factory);
+ RetrieveScopeChain(scope);
}
- if (!ignore_nested_scopes) RetrieveScopeChain(scope);
- if (collect_non_locals) CollectNonLocals(scope);
+ } else if (!ignore_nested_scopes) {
+ // A failed reparse indicates that the preparser has diverged from the
+ // parser or that the preparse data given to the initial parse has been
+ // faulty. We fail in debug mode but in release mode we only provide the
+ // information we get from the context chain but nothing about
+ // completely stack allocated scopes or stack allocated locals.
+ // Or it could be due to stack overflow.
+ DCHECK(isolate_->has_pending_exception());
+ failed_ = true;
}
UnwrapEvaluationContext();
}
@@ -122,12 +143,23 @@ ScopeIterator::ScopeIterator(Isolate* isolate, Handle<JSFunction> function)
UnwrapEvaluationContext();
}
+ScopeIterator::ScopeIterator(Isolate* isolate,
+ Handle<JSGeneratorObject> generator)
+ : isolate_(isolate),
+ frame_inspector_(NULL),
+ context_(generator->context()),
+ seen_script_scope_(false),
+ failed_(false) {
+ if (!generator->function()->shared()->IsSubjectToDebugging()) {
+ context_ = Handle<Context>();
+ }
+ UnwrapEvaluationContext();
+}
+
void ScopeIterator::UnwrapEvaluationContext() {
while (true) {
if (context_.is_null()) return;
if (!context_->IsDebugEvaluateContext()) return;
- // An existing debug-evaluate context can only be outside the local scope.
- DCHECK(nested_scope_chain_.is_empty());
Handle<Object> wrapped(context_->get(Context::WRAPPED_CONTEXT_INDEX),
isolate_);
if (wrapped->IsContext()) {
@@ -201,11 +233,15 @@ void ScopeIterator::Next() {
} else if (nested_scope_chain_.is_empty()) {
context_ = Handle<Context>(context_->previous(), isolate_);
} else {
- if (nested_scope_chain_.last().scope_info->HasContext()) {
- DCHECK(context_->previous() != NULL);
- context_ = Handle<Context>(context_->previous(), isolate_);
- }
- nested_scope_chain_.RemoveLast();
+ do {
+ if (nested_scope_chain_.last().scope_info->HasContext()) {
+ DCHECK(context_->previous() != NULL);
+ context_ = Handle<Context>(context_->previous(), isolate_);
+ }
+ nested_scope_chain_.RemoveLast();
+ if (nested_scope_chain_.is_empty()) break;
+ // Repeat to skip hidden scopes.
+ } while (nested_scope_chain_.last().is_hidden());
}
UnwrapEvaluationContext();
}
@@ -236,8 +272,10 @@ ScopeIterator::ScopeType ScopeIterator::Type() {
DCHECK(!scope_info->HasContext() || context_->IsBlockContext());
return ScopeTypeBlock;
case EVAL_SCOPE:
- UNREACHABLE();
+ DCHECK(!scope_info->HasContext() || context_->IsFunctionContext());
+ return ScopeTypeEval;
}
+ UNREACHABLE();
}
if (context_->IsNativeContext()) {
DCHECK(context_->global_object()->IsJSGlobalObject());
@@ -284,7 +322,8 @@ MaybeHandle<JSObject> ScopeIterator::ScopeObject() {
// Materialize the content of the closure scope into a JSObject.
return MaterializeClosure();
case ScopeIterator::ScopeTypeBlock:
- return MaterializeBlockScope();
+ case ScopeIterator::ScopeTypeEval:
+ return MaterializeInnerScope();
case ScopeIterator::ScopeTypeModule:
return MaterializeModuleScope();
}
@@ -295,7 +334,8 @@ MaybeHandle<JSObject> ScopeIterator::ScopeObject() {
bool ScopeIterator::HasContext() {
ScopeType type = Type();
- if (type == ScopeTypeBlock || type == ScopeTypeLocal) {
+ if (type == ScopeTypeBlock || type == ScopeTypeLocal ||
+ type == ScopeTypeEval) {
if (!nested_scope_chain_.is_empty()) {
return nested_scope_chain_.last().scope_info->HasContext();
}
@@ -321,7 +361,8 @@ bool ScopeIterator::SetVariableValue(Handle<String> variable_name,
case ScopeIterator::ScopeTypeScript:
return SetScriptVariableValue(variable_name, new_value);
case ScopeIterator::ScopeTypeBlock:
- return SetBlockVariableValue(variable_name, new_value);
+ case ScopeIterator::ScopeTypeEval:
+ return SetInnerScopeVariableValue(variable_name, new_value);
case ScopeIterator::ScopeTypeModule:
// TODO(2399): should we implement it?
break;
@@ -421,29 +462,16 @@ void ScopeIterator::DebugPrint() {
}
#endif
-
-void ScopeIterator::RetrieveScopeChain(Scope* scope) {
- if (scope != NULL) {
- int source_position = frame_inspector_->GetSourcePosition();
- GetNestedScopeChain(isolate_, scope, source_position);
- } else {
- // A failed reparse indicates that the preparser has diverged from the
- // parser or that the preparse data given to the initial parse has been
- // faulty. We fail in debug mode but in release mode we only provide the
- // information we get from the context chain but nothing about
- // completely stack allocated scopes or stack allocated locals.
- // Or it could be due to stack overflow.
- DCHECK(isolate_->has_pending_exception());
- failed_ = true;
- }
+void ScopeIterator::RetrieveScopeChain(DeclarationScope* scope) {
+ DCHECK_NOT_NULL(scope);
+ int source_position = frame_inspector_->GetSourcePosition();
+ GetNestedScopeChain(isolate_, scope, source_position);
}
-
-void ScopeIterator::CollectNonLocals(Scope* scope) {
- if (scope != NULL) {
- DCHECK(non_locals_.is_null());
- non_locals_ = scope->CollectNonLocals(StringSet::New(isolate_));
- }
+void ScopeIterator::CollectNonLocals(ParseInfo* info, DeclarationScope* scope) {
+ DCHECK_NOT_NULL(scope);
+ DCHECK(non_locals_.is_null());
+ non_locals_ = scope->CollectNonLocals(info, StringSet::New(isolate_));
}
@@ -453,7 +481,7 @@ MaybeHandle<JSObject> ScopeIterator::MaterializeScriptScope() {
global->native_context()->script_context_table());
Handle<JSObject> script_scope =
- isolate_->factory()->NewJSObject(isolate_->object_function());
+ isolate_->factory()->NewJSObjectWithNullProto();
for (int context_index = 0; context_index < script_contexts->used();
context_index++) {
@@ -470,7 +498,7 @@ MaybeHandle<JSObject> ScopeIterator::MaterializeLocalScope() {
Handle<JSFunction> function = GetFunction();
Handle<JSObject> local_scope =
- isolate_->factory()->NewJSObject(isolate_->object_function());
+ isolate_->factory()->NewJSObjectWithNullProto();
frame_inspector_->MaterializeStackLocals(local_scope, function);
Handle<Context> frame_context =
@@ -482,19 +510,16 @@ MaybeHandle<JSObject> ScopeIterator::MaterializeLocalScope() {
if (!scope_info->HasContext()) return local_scope;
- // Third fill all context locals.
+ // Fill all context locals.
Handle<Context> function_context(frame_context->closure_context());
CopyContextLocalsToScopeObject(scope_info, function_context, local_scope);
// Finally copy any properties from the function context extension.
// These will be variables introduced by eval.
if (function_context->closure() == *function &&
- function_context->has_extension() &&
!function_context->IsNativeContext()) {
- bool success = CopyContextExtensionToScopeObject(
- handle(function_context->extension_object(), isolate_), local_scope,
- INCLUDE_PROTOS);
- if (!success) return MaybeHandle<JSObject>();
+ CopyContextExtensionToScopeObject(function_context, local_scope,
+ KeyCollectionMode::kIncludePrototypes);
}
return local_scope;
@@ -513,19 +538,15 @@ Handle<JSObject> ScopeIterator::MaterializeClosure() {
// Allocate and initialize a JSObject with all the content of this function
// closure.
Handle<JSObject> closure_scope =
- isolate_->factory()->NewJSObject(isolate_->object_function());
+ isolate_->factory()->NewJSObjectWithNullProto();
// Fill all context locals to the context extension.
CopyContextLocalsToScopeObject(scope_info, context, closure_scope);
// Finally copy any properties from the function context extension. This will
// be variables introduced by eval.
- if (context->has_extension()) {
- bool success = CopyContextExtensionToScopeObject(
- handle(context->extension_object(), isolate_), closure_scope, OWN_ONLY);
- DCHECK(success);
- USE(success);
- }
+ CopyContextExtensionToScopeObject(context, closure_scope,
+ KeyCollectionMode::kOwnOnly);
return closure_scope;
}
@@ -540,7 +561,7 @@ Handle<JSObject> ScopeIterator::MaterializeCatchScope() {
Handle<Object> thrown_object(context->get(Context::THROWN_OBJECT_INDEX),
isolate_);
Handle<JSObject> catch_scope =
- isolate_->factory()->NewJSObject(isolate_->object_function());
+ isolate_->factory()->NewJSObjectWithNullProto();
JSObject::SetOwnPropertyIgnoreAttributes(catch_scope, name, thrown_object,
NONE)
.Check();
@@ -560,14 +581,14 @@ Handle<JSObject> ScopeIterator::WithContextExtension() {
// Create a plain JSObject which materializes the block scope for the specified
// block context.
-Handle<JSObject> ScopeIterator::MaterializeBlockScope() {
- Handle<JSObject> block_scope =
- isolate_->factory()->NewJSObject(isolate_->object_function());
+Handle<JSObject> ScopeIterator::MaterializeInnerScope() {
+ Handle<JSObject> inner_scope =
+ isolate_->factory()->NewJSObjectWithNullProto();
Handle<Context> context = Handle<Context>::null();
if (!nested_scope_chain_.is_empty()) {
Handle<ScopeInfo> scope_info = nested_scope_chain_.last().scope_info;
- frame_inspector_->MaterializeStackLocals(block_scope, scope_info);
+ frame_inspector_->MaterializeStackLocals(inner_scope, scope_info);
if (scope_info->HasContext()) context = CurrentContext();
} else {
context = CurrentContext();
@@ -575,17 +596,11 @@ Handle<JSObject> ScopeIterator::MaterializeBlockScope() {
if (!context.is_null()) {
// Fill all context locals.
- CopyContextLocalsToScopeObject(handle(context->scope_info()),
- context, block_scope);
- // Fill all extension variables.
- if (context->extension_object() != nullptr) {
- bool success = CopyContextExtensionToScopeObject(
- handle(context->extension_object()), block_scope, OWN_ONLY);
- DCHECK(success);
- USE(success);
- }
+ CopyContextLocalsToScopeObject(CurrentScopeInfo(), context, inner_scope);
+ CopyContextExtensionToScopeObject(context, inner_scope,
+ KeyCollectionMode::kOwnOnly);
}
- return block_scope;
+ return inner_scope;
}
@@ -599,7 +614,7 @@ MaybeHandle<JSObject> ScopeIterator::MaterializeModuleScope() {
// Allocate and initialize a JSObject with all the members of the debugged
// module.
Handle<JSObject> module_scope =
- isolate_->factory()->NewJSObject(isolate_->object_function());
+ isolate_->factory()->NewJSObjectWithNullProto();
// Fill all context locals.
CopyContextLocalsToScopeObject(scope_info, context, module_scope);
@@ -607,161 +622,121 @@ MaybeHandle<JSObject> ScopeIterator::MaterializeModuleScope() {
return module_scope;
}
-
-// Set the context local variable value.
-bool ScopeIterator::SetContextLocalValue(Handle<ScopeInfo> scope_info,
- Handle<Context> context,
- Handle<String> variable_name,
- Handle<Object> new_value) {
- for (int i = 0; i < scope_info->ContextLocalCount(); i++) {
- Handle<String> next_name(scope_info->ContextLocalName(i));
- if (String::Equals(variable_name, next_name)) {
- VariableMode mode;
- InitializationFlag init_flag;
- MaybeAssignedFlag maybe_assigned_flag;
- int context_index = ScopeInfo::ContextSlotIndex(
- scope_info, next_name, &mode, &init_flag, &maybe_assigned_flag);
- context->set(context_index, *new_value);
+bool ScopeIterator::SetParameterValue(Handle<ScopeInfo> scope_info,
+ JavaScriptFrame* frame,
+ Handle<String> parameter_name,
+ Handle<Object> new_value) {
+ // Setting stack locals of optimized frames is not supported.
+ if (frame->is_optimized()) return false;
+ HandleScope scope(isolate_);
+ for (int i = 0; i < scope_info->ParameterCount(); ++i) {
+ if (String::Equals(handle(scope_info->ParameterName(i)), parameter_name)) {
+ frame->SetParameterValue(i, *new_value);
return true;
}
}
-
return false;
}
-
-bool ScopeIterator::SetLocalVariableValue(Handle<String> variable_name,
+bool ScopeIterator::SetStackVariableValue(Handle<ScopeInfo> scope_info,
+ Handle<String> variable_name,
Handle<Object> new_value) {
+ if (frame_inspector_ == nullptr) return false;
JavaScriptFrame* frame = GetFrame();
- // Optimized frames are not supported.
+ // Setting stack locals of optimized frames is not supported.
if (frame->is_optimized()) return false;
-
- Handle<JSFunction> function(frame->function());
- Handle<SharedFunctionInfo> shared(function->shared());
- Handle<ScopeInfo> scope_info(shared->scope_info());
-
- bool default_result = false;
-
- // Parameters.
- for (int i = 0; i < scope_info->ParameterCount(); ++i) {
- HandleScope scope(isolate_);
- if (String::Equals(handle(scope_info->ParameterName(i)), variable_name)) {
- frame->SetParameterValue(i, *new_value);
- // Argument might be shadowed in heap context, don't stop here.
- default_result = true;
- }
- }
-
- // Stack locals.
+ HandleScope scope(isolate_);
for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
- HandleScope scope(isolate_);
if (String::Equals(handle(scope_info->StackLocalName(i)), variable_name)) {
frame->SetExpression(scope_info->StackLocalIndex(i), *new_value);
return true;
}
}
+ return false;
+}
- if (scope_info->HasContext()) {
- // Context locals.
- Handle<Context> frame_context(Context::cast(frame->context()));
- Handle<Context> function_context(frame_context->declaration_context());
- if (SetContextLocalValue(scope_info, function_context, variable_name,
- new_value)) {
+bool ScopeIterator::SetContextVariableValue(Handle<ScopeInfo> scope_info,
+ Handle<Context> context,
+ Handle<String> variable_name,
+ Handle<Object> new_value) {
+ HandleScope scope(isolate_);
+ for (int i = 0; i < scope_info->ContextLocalCount(); i++) {
+ Handle<String> next_name(scope_info->ContextLocalName(i));
+ if (String::Equals(variable_name, next_name)) {
+ VariableMode mode;
+ InitializationFlag init_flag;
+ MaybeAssignedFlag maybe_assigned_flag;
+ int context_index = ScopeInfo::ContextSlotIndex(
+ scope_info, next_name, &mode, &init_flag, &maybe_assigned_flag);
+ context->set(context_index, *new_value);
return true;
}
+ }
- // Function context extension. These are variables introduced by eval.
- if (function_context->closure() == *function) {
- if (function_context->has_extension() &&
- !function_context->IsNativeContext()) {
- Handle<JSObject> ext(function_context->extension_object());
-
- Maybe<bool> maybe = JSReceiver::HasProperty(ext, variable_name);
- DCHECK(maybe.IsJust());
- if (maybe.FromJust()) {
- // We don't expect this to do anything except replacing
- // property value.
- Runtime::SetObjectProperty(isolate_, ext, variable_name, new_value,
- SLOPPY)
- .Assert();
- return true;
- }
- }
+ if (context->has_extension()) {
+ Handle<JSObject> ext(context->extension_object());
+ Maybe<bool> maybe = JSReceiver::HasOwnProperty(ext, variable_name);
+ DCHECK(maybe.IsJust());
+ if (maybe.FromJust()) {
+ // We don't expect this to do anything except replacing property value.
+ JSObject::SetOwnPropertyIgnoreAttributes(ext, variable_name, new_value,
+ NONE)
+ .Check();
+ return true;
}
}
- return default_result;
+ return false;
}
-
-bool ScopeIterator::SetBlockVariableValue(Handle<String> variable_name,
+bool ScopeIterator::SetLocalVariableValue(Handle<String> variable_name,
Handle<Object> new_value) {
- Handle<ScopeInfo> scope_info = CurrentScopeInfo();
JavaScriptFrame* frame = GetFrame();
+ Handle<ScopeInfo> scope_info(frame->function()->shared()->scope_info());
- for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
- HandleScope scope(isolate_);
- if (String::Equals(handle(scope_info->StackLocalName(i)), variable_name)) {
- frame->SetExpression(scope_info->StackLocalIndex(i), *new_value);
- return true;
- }
- }
+ // Parameter might be shadowed in context. Don't stop here.
+ bool result = SetParameterValue(scope_info, frame, variable_name, new_value);
- if (HasContext()) {
- Handle<Context> context = CurrentContext();
- if (SetContextLocalValue(scope_info, context, variable_name, new_value)) {
- return true;
- }
+ // Stack locals.
+ if (SetStackVariableValue(scope_info, variable_name, new_value)) {
+ return true;
+ }
- Handle<JSObject> ext(context->extension_object(), isolate_);
- if (!ext.is_null()) {
- Maybe<bool> maybe = JSReceiver::HasOwnProperty(ext, variable_name);
- DCHECK(maybe.IsJust());
- if (maybe.FromJust()) {
- // We don't expect this to do anything except replacing property value.
- JSObject::SetOwnPropertyIgnoreAttributes(ext, variable_name, new_value,
- NONE)
- .Check();
- return true;
- }
- }
+ if (scope_info->HasContext() &&
+ SetContextVariableValue(scope_info, CurrentContext(), variable_name,
+ new_value)) {
+ return true;
}
- return false;
+ return result;
}
+bool ScopeIterator::SetInnerScopeVariableValue(Handle<String> variable_name,
+ Handle<Object> new_value) {
+ Handle<ScopeInfo> scope_info = CurrentScopeInfo();
+ DCHECK(scope_info->scope_type() == BLOCK_SCOPE ||
+ scope_info->scope_type() == EVAL_SCOPE);
-// This method copies structure of MaterializeClosure method above.
-bool ScopeIterator::SetClosureVariableValue(Handle<String> variable_name,
- Handle<Object> new_value) {
- Handle<Context> context = CurrentContext();
- DCHECK(context->IsFunctionContext());
-
- // Context locals to the context extension.
- Handle<SharedFunctionInfo> shared(context->closure()->shared());
- Handle<ScopeInfo> scope_info(shared->scope_info());
- if (SetContextLocalValue(scope_info, context, variable_name, new_value)) {
+ // Setting stack locals of optimized frames is not supported.
+ if (SetStackVariableValue(scope_info, variable_name, new_value)) {
return true;
}
- // Properties from the function context extension. This will
- // be variables introduced by eval.
- if (context->has_extension()) {
- Handle<JSObject> ext(JSObject::cast(context->extension_object()));
- Maybe<bool> maybe = JSReceiver::HasOwnProperty(ext, variable_name);
- DCHECK(maybe.IsJust());
- if (maybe.FromJust()) {
- // We don't expect this to do anything except replacing property value.
- JSObject::SetOwnPropertyIgnoreAttributes(ext, variable_name, new_value,
- NONE)
- .Check();
- return true;
- }
+ if (HasContext() && SetContextVariableValue(scope_info, CurrentContext(),
+ variable_name, new_value)) {
+ return true;
}
return false;
}
+// This method copies structure of MaterializeClosure method above.
+bool ScopeIterator::SetClosureVariableValue(Handle<String> variable_name,
+ Handle<Object> new_value) {
+ DCHECK(CurrentContext()->IsFunctionContext());
+ return SetContextVariableValue(CurrentScopeInfo(), CurrentContext(),
+ variable_name, new_value);
+}
bool ScopeIterator::SetScriptVariableValue(Handle<String> variable_name,
Handle<Object> new_value) {
@@ -780,7 +755,6 @@ bool ScopeIterator::SetScriptVariableValue(Handle<String> variable_name,
return false;
}
-
bool ScopeIterator::SetCatchVariableValue(Handle<String> variable_name,
Handle<Object> new_value) {
Handle<Context> context = CurrentContext();
@@ -801,57 +775,61 @@ void ScopeIterator::CopyContextLocalsToScopeObject(
int local_count = scope_info->ContextLocalCount();
if (local_count == 0) return;
// Fill all context locals to the context extension.
- int first_context_var = scope_info->StackLocalCount();
- int start = scope_info->ContextLocalNameEntriesIndex();
for (int i = 0; i < local_count; ++i) {
- if (scope_info->LocalIsSynthetic(first_context_var + i)) continue;
+ Handle<String> name(scope_info->ContextLocalName(i));
+ if (ScopeInfo::VariableIsSynthetic(*name)) continue;
int context_index = Context::MIN_CONTEXT_SLOTS + i;
Handle<Object> value = Handle<Object>(context->get(context_index), isolate);
// Reflect variables under TDZ as undefined in scope object.
- if (value->IsTheHole()) continue;
+ if (value->IsTheHole(isolate)) continue;
// This should always succeed.
// TODO(verwaest): Use AddDataProperty instead.
- JSObject::SetOwnPropertyIgnoreAttributes(
- scope_object, handle(String::cast(scope_info->get(i + start))), value,
- NONE)
+ JSObject::SetOwnPropertyIgnoreAttributes(scope_object, name, value, NONE)
.Check();
}
}
-bool ScopeIterator::CopyContextExtensionToScopeObject(
- Handle<JSObject> extension, Handle<JSObject> scope_object,
- KeyCollectionType type) {
- Handle<FixedArray> keys;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate_, keys, JSReceiver::GetKeys(extension, type, ENUMERABLE_STRINGS),
- false);
+void ScopeIterator::CopyContextExtensionToScopeObject(
+ Handle<Context> context, Handle<JSObject> scope_object,
+ KeyCollectionMode mode) {
+ if (context->extension_object() == nullptr) return;
+ Handle<JSObject> extension(context->extension_object());
+ Handle<FixedArray> keys =
+ KeyAccumulator::GetKeys(extension, mode, ENUMERABLE_STRINGS)
+ .ToHandleChecked();
for (int i = 0; i < keys->length(); i++) {
// Names of variables introduced by eval are strings.
DCHECK(keys->get(i)->IsString());
Handle<String> key(String::cast(keys->get(i)));
- Handle<Object> value;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate_, value, Object::GetPropertyOrElement(extension, key), false);
- RETURN_ON_EXCEPTION_VALUE(
- isolate_, JSObject::SetOwnPropertyIgnoreAttributes(
- scope_object, key, value, NONE), false);
+ Handle<Object> value =
+ Object::GetPropertyOrElement(extension, key).ToHandleChecked();
+ JSObject::SetOwnPropertyIgnoreAttributes(scope_object, key, value, NONE)
+ .Check();
}
- return true;
}
void ScopeIterator::GetNestedScopeChain(Isolate* isolate, Scope* scope,
int position) {
- if (!scope->is_eval_scope()) {
+ if (scope->is_function_scope()) {
+ // Do not collect scopes of nested inner functions inside the current one.
+ Handle<JSFunction> function = frame_inspector_->GetFunction();
+ if (scope->end_position() < function->shared()->end_position()) return;
+ }
+ if (scope->is_hidden()) {
+ // We need to add this chain element in case the scope has a context
+ // associated. We need to keep the scope chain and context chain in sync.
+ nested_scope_chain_.Add(ExtendedScopeInfo(scope->GetScopeInfo(isolate)));
+ } else {
nested_scope_chain_.Add(ExtendedScopeInfo(scope->GetScopeInfo(isolate),
scope->start_position(),
scope->end_position()));
}
- for (int i = 0; i < scope->inner_scopes()->length(); i++) {
- Scope* inner_scope = scope->inner_scopes()->at(i);
+ for (Scope* inner_scope = scope->inner_scope(); inner_scope != nullptr;
+ inner_scope = inner_scope->sibling()) {
int beg_pos = inner_scope->start_position();
int end_pos = inner_scope->end_position();
- DCHECK(beg_pos >= 0 && end_pos >= 0);
+ DCHECK((beg_pos >= 0 && end_pos >= 0) || inner_scope->is_hidden());
if (beg_pos <= position && position < end_pos) {
GetNestedScopeChain(isolate, inner_scope, position);
return;
diff --git a/deps/v8/src/debug/debug-scopes.h b/deps/v8/src/debug/debug-scopes.h
index 4e95fc4ba4..0491d73c74 100644
--- a/deps/v8/src/debug/debug-scopes.h
+++ b/deps/v8/src/debug/debug-scopes.h
@@ -25,6 +25,7 @@ class ScopeIterator {
ScopeTypeCatch,
ScopeTypeBlock,
ScopeTypeScript,
+ ScopeTypeEval,
ScopeTypeModule
};
@@ -42,6 +43,7 @@ class ScopeIterator {
Option options = DEFAULT);
ScopeIterator(Isolate* isolate, Handle<JSFunction> function);
+ ScopeIterator(Isolate* isolate, Handle<JSGeneratorObject> generator);
MUST_USE_RESULT MaybeHandle<JSObject> MaterializeScopeDetails();
@@ -85,9 +87,12 @@ class ScopeIterator {
struct ExtendedScopeInfo {
ExtendedScopeInfo(Handle<ScopeInfo> info, int start, int end)
: scope_info(info), start_position(start), end_position(end) {}
+ explicit ExtendedScopeInfo(Handle<ScopeInfo> info)
+ : scope_info(info), start_position(-1), end_position(-1) {}
Handle<ScopeInfo> scope_info;
int start_position;
int end_position;
+ bool is_hidden() { return start_position == -1 && end_position == -1; }
};
Isolate* isolate_;
@@ -103,12 +108,12 @@ class ScopeIterator {
}
inline Handle<JSFunction> GetFunction() {
- return Handle<JSFunction>::cast(frame_inspector_->GetFunction());
+ return frame_inspector_->GetFunction();
}
- void RetrieveScopeChain(Scope* scope);
+ void RetrieveScopeChain(DeclarationScope* scope);
- void CollectNonLocals(Scope* scope);
+ void CollectNonLocals(ParseInfo* info, DeclarationScope* scope);
void UnwrapEvaluationContext();
@@ -117,30 +122,38 @@ class ScopeIterator {
MUST_USE_RESULT MaybeHandle<JSObject> MaterializeModuleScope();
Handle<JSObject> MaterializeClosure();
Handle<JSObject> MaterializeCatchScope();
- Handle<JSObject> MaterializeBlockScope();
+ Handle<JSObject> MaterializeInnerScope();
Handle<JSObject> WithContextExtension();
bool SetLocalVariableValue(Handle<String> variable_name,
Handle<Object> new_value);
- bool SetBlockVariableValue(Handle<String> variable_name,
- Handle<Object> new_value);
+ bool SetInnerScopeVariableValue(Handle<String> variable_name,
+ Handle<Object> new_value);
bool SetClosureVariableValue(Handle<String> variable_name,
Handle<Object> new_value);
bool SetScriptVariableValue(Handle<String> variable_name,
Handle<Object> new_value);
bool SetCatchVariableValue(Handle<String> variable_name,
Handle<Object> new_value);
- bool SetContextLocalValue(Handle<ScopeInfo> scope_info,
- Handle<Context> context,
- Handle<String> variable_name,
- Handle<Object> new_value);
+
+ // Helper functions.
+ bool SetParameterValue(Handle<ScopeInfo> scope_info, JavaScriptFrame* frame,
+ Handle<String> parameter_name,
+ Handle<Object> new_value);
+ bool SetStackVariableValue(Handle<ScopeInfo> scope_info,
+ Handle<String> variable_name,
+ Handle<Object> new_value);
+ bool SetContextVariableValue(Handle<ScopeInfo> scope_info,
+ Handle<Context> context,
+ Handle<String> variable_name,
+ Handle<Object> new_value);
void CopyContextLocalsToScopeObject(Handle<ScopeInfo> scope_info,
Handle<Context> context,
Handle<JSObject> scope_object);
- bool CopyContextExtensionToScopeObject(Handle<JSObject> extension,
+ void CopyContextExtensionToScopeObject(Handle<Context> context,
Handle<JSObject> scope_object,
- KeyCollectionType type);
+ KeyCollectionMode mode);
// Get the chain of nested scopes within this scope for the source statement
// position. The scopes will be added to the list from the outermost scope to
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index cb2d8648aa..e04695771b 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -4,24 +4,30 @@
#include "src/debug/debug.h"
+#include <memory>
+
#include "src/api.h"
#include "src/arguments.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/compilation-cache.h"
+#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/compiler.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
#include "src/frames-inl.h"
#include "src/full-codegen/full-codegen.h"
#include "src/global-handles.h"
+#include "src/globals.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
#include "src/list.h"
#include "src/log.h"
#include "src/messages.h"
#include "src/snapshot/natives.h"
+#include "src/wasm/wasm-debug.h"
+#include "src/wasm/wasm-module.h"
#include "include/v8-debug.h"
@@ -49,63 +55,136 @@ Debug::Debug(Isolate* isolate)
ThreadInit();
}
+BreakLocation BreakLocation::FromFrame(Handle<DebugInfo> debug_info,
+ JavaScriptFrame* frame) {
+ FrameSummary summary = FrameSummary::GetFirst(frame);
+ int offset = summary.code_offset();
+ Handle<AbstractCode> abstract_code = summary.abstract_code();
+ if (abstract_code->IsCode()) offset = offset - 1;
+ auto it = BreakIterator::GetIterator(debug_info, abstract_code);
+ it->SkipTo(BreakIndexFromCodeOffset(debug_info, abstract_code, offset));
+ return it->GetBreakLocation();
+}
-static v8::Local<v8::Context> GetDebugEventContext(Isolate* isolate) {
- Handle<Context> context = isolate->debug()->debugger_entry()->GetContext();
- // Isolate::context() may have been NULL when "script collected" event
- // occured.
- if (context.is_null()) return v8::Local<v8::Context>();
- Handle<Context> native_context(context->native_context());
- return v8::Utils::ToLocal(native_context);
+void BreakLocation::AllAtCurrentStatement(Handle<DebugInfo> debug_info,
+ JavaScriptFrame* frame,
+ List<BreakLocation>* result_out) {
+ FrameSummary summary = FrameSummary::GetFirst(frame);
+ int offset = summary.code_offset();
+ Handle<AbstractCode> abstract_code = summary.abstract_code();
+ if (abstract_code->IsCode()) offset = offset - 1;
+ int statement_position;
+ {
+ auto it = BreakIterator::GetIterator(debug_info, abstract_code);
+ it->SkipTo(BreakIndexFromCodeOffset(debug_info, abstract_code, offset));
+ statement_position = it->statement_position();
+ }
+ for (auto it = BreakIterator::GetIterator(debug_info, abstract_code);
+ !it->Done(); it->Next()) {
+ if (it->statement_position() == statement_position) {
+ result_out->Add(it->GetBreakLocation());
+ }
+ }
}
-BreakLocation::BreakLocation(Handle<DebugInfo> debug_info, DebugBreakType type,
- int code_offset, int position,
- int statement_position)
- : debug_info_(debug_info),
- code_offset_(code_offset),
- type_(type),
- position_(position),
- statement_position_(statement_position) {}
+int BreakLocation::BreakIndexFromCodeOffset(Handle<DebugInfo> debug_info,
+ Handle<AbstractCode> abstract_code,
+ int offset) {
+ // Run through all break points to locate the one closest to the address.
+ int closest_break = 0;
+ int distance = kMaxInt;
+ DCHECK(0 <= offset && offset < abstract_code->Size());
+ for (auto it = BreakIterator::GetIterator(debug_info, abstract_code);
+ !it->Done(); it->Next()) {
+ // Check if this break point is closer that what was previously found.
+ if (it->code_offset() <= offset && offset - it->code_offset() < distance) {
+ closest_break = it->break_index();
+ distance = offset - it->code_offset();
+ // Check whether we can't get any closer.
+ if (distance == 0) break;
+ }
+ }
+ return closest_break;
+}
-BreakLocation::Iterator* BreakLocation::GetIterator(
- Handle<DebugInfo> debug_info, BreakLocatorType type) {
- if (debug_info->abstract_code()->IsBytecodeArray()) {
- return new BytecodeArrayIterator(debug_info, type);
+bool BreakLocation::HasBreakPoint(Handle<DebugInfo> debug_info) const {
+ // First check whether there is a break point with the same source position.
+ if (!debug_info->HasBreakPoint(position_)) return false;
+ // Then check whether a break point at that source position would have
+ // the same code offset. Otherwise it's just a break location that we can
+ // step to, but not actually a location where we can put a break point.
+ if (abstract_code_->IsCode()) {
+ DCHECK_EQ(debug_info->DebugCode(), abstract_code_->GetCode());
+ CodeBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+ it.SkipToPosition(position_, BREAK_POSITION_ALIGNED);
+ return it.code_offset() == code_offset_;
} else {
- return new CodeIterator(debug_info, type);
+ DCHECK(abstract_code_->IsBytecodeArray());
+ BytecodeArrayBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+ it.SkipToPosition(position_, BREAK_POSITION_ALIGNED);
+ return it.code_offset() == code_offset_;
}
}
-BreakLocation::Iterator::Iterator(Handle<DebugInfo> debug_info)
- : debug_info_(debug_info),
- break_index_(-1),
- position_(1),
- statement_position_(1) {}
-
-int BreakLocation::Iterator::ReturnPosition() {
- if (debug_info_->shared()->HasSourceCode()) {
- return debug_info_->shared()->end_position() -
- debug_info_->shared()->start_position() - 1;
+std::unique_ptr<BreakIterator> BreakIterator::GetIterator(
+ Handle<DebugInfo> debug_info, Handle<AbstractCode> abstract_code,
+ BreakLocatorType type) {
+ if (abstract_code->IsBytecodeArray()) {
+ DCHECK(debug_info->HasDebugBytecodeArray());
+ return std::unique_ptr<BreakIterator>(
+ new BytecodeArrayBreakIterator(debug_info, type));
} else {
- return 0;
+ DCHECK(abstract_code->IsCode());
+ DCHECK(debug_info->HasDebugCode());
+ return std::unique_ptr<BreakIterator>(
+ new CodeBreakIterator(debug_info, type));
}
}
-BreakLocation::CodeIterator::CodeIterator(Handle<DebugInfo> debug_info,
- BreakLocatorType type)
- : Iterator(debug_info),
- reloc_iterator_(debug_info->abstract_code()->GetCode(),
- GetModeMask(type)) {
+BreakIterator::BreakIterator(Handle<DebugInfo> debug_info,
+ BreakLocatorType type)
+ : debug_info_(debug_info), break_index_(-1), break_locator_type_(type) {
+ position_ = debug_info->shared()->start_position();
+ statement_position_ = position_;
+}
+
+int BreakIterator::BreakIndexFromPosition(int source_position,
+ BreakPositionAlignment alignment) {
+ int distance = kMaxInt;
+ int closest_break = break_index();
+ while (!Done()) {
+ int next_position;
+ if (alignment == STATEMENT_ALIGNED) {
+ next_position = statement_position();
+ } else {
+ DCHECK(alignment == BREAK_POSITION_ALIGNED);
+ next_position = position();
+ }
+ if (source_position <= next_position &&
+ next_position - source_position < distance) {
+ closest_break = break_index();
+ distance = next_position - source_position;
+ // Check whether we can't get any closer.
+ if (distance == 0) break;
+ }
+ Next();
+ }
+ return closest_break;
+}
+
+CodeBreakIterator::CodeBreakIterator(Handle<DebugInfo> debug_info,
+ BreakLocatorType type)
+ : BreakIterator(debug_info, type),
+ reloc_iterator_(debug_info->DebugCode(), GetModeMask(type)),
+ source_position_iterator_(
+ debug_info->DebugCode()->source_position_table()) {
// There is at least one break location.
DCHECK(!Done());
Next();
}
-int BreakLocation::CodeIterator::GetModeMask(BreakLocatorType type) {
+int CodeBreakIterator::GetModeMask(BreakLocatorType type) {
int mask = 0;
- mask |= RelocInfo::ModeMask(RelocInfo::POSITION);
- mask |= RelocInfo::ModeMask(RelocInfo::STATEMENT_POSITION);
mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_RETURN);
mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_CALL);
if (isolate()->is_tail_call_elimination_enabled()) {
@@ -118,81 +197,97 @@ int BreakLocation::CodeIterator::GetModeMask(BreakLocatorType type) {
return mask;
}
-void BreakLocation::CodeIterator::Next() {
+void CodeBreakIterator::Next() {
DisallowHeapAllocation no_gc;
DCHECK(!Done());
// Iterate through reloc info stopping at each breakable code target.
bool first = break_index_ == -1;
- while (!Done()) {
- if (!first) reloc_iterator_.next();
- first = false;
- if (Done()) return;
-
- // Whenever a statement position or (plain) position is passed update the
- // current value of these.
- if (RelocInfo::IsPosition(rmode())) {
- if (RelocInfo::IsStatementPosition(rmode())) {
- statement_position_ = static_cast<int>(
- rinfo()->data() - debug_info_->shared()->start_position());
- }
- // Always update the position as we don't want that to be before the
- // statement position.
- position_ = static_cast<int>(rinfo()->data() -
- debug_info_->shared()->start_position());
- DCHECK(position_ >= 0);
- DCHECK(statement_position_ >= 0);
- continue;
- }
- DCHECK(RelocInfo::IsDebugBreakSlot(rmode()) ||
- RelocInfo::IsDebuggerStatement(rmode()));
+ if (!first) reloc_iterator_.next();
+ first = false;
+ if (Done()) return;
- if (RelocInfo::IsDebugBreakSlotAtReturn(rmode())) {
- // Set the positions to the end of the function.
- statement_position_ = position_ = ReturnPosition();
+ int offset = code_offset();
+ while (!source_position_iterator_.done() &&
+ source_position_iterator_.code_offset() <= offset) {
+ position_ = source_position_iterator_.source_position();
+ if (source_position_iterator_.is_statement()) {
+ statement_position_ = position_;
}
-
- break;
+ source_position_iterator_.Advance();
}
+
+ DCHECK(RelocInfo::IsDebugBreakSlot(rmode()) ||
+ RelocInfo::IsDebuggerStatement(rmode()));
break_index_++;
}
-BreakLocation BreakLocation::CodeIterator::GetBreakLocation() {
- DebugBreakType type;
+DebugBreakType CodeBreakIterator::GetDebugBreakType() {
if (RelocInfo::IsDebugBreakSlotAtReturn(rmode())) {
- type = DEBUG_BREAK_SLOT_AT_RETURN;
+ return DEBUG_BREAK_SLOT_AT_RETURN;
} else if (RelocInfo::IsDebugBreakSlotAtCall(rmode())) {
- type = DEBUG_BREAK_SLOT_AT_CALL;
+ return DEBUG_BREAK_SLOT_AT_CALL;
} else if (RelocInfo::IsDebugBreakSlotAtTailCall(rmode())) {
- type = isolate()->is_tail_call_elimination_enabled()
+ return isolate()->is_tail_call_elimination_enabled()
? DEBUG_BREAK_SLOT_AT_TAIL_CALL
: DEBUG_BREAK_SLOT_AT_CALL;
} else if (RelocInfo::IsDebuggerStatement(rmode())) {
- type = DEBUGGER_STATEMENT;
+ return DEBUGGER_STATEMENT;
} else if (RelocInfo::IsDebugBreakSlot(rmode())) {
- type = DEBUG_BREAK_SLOT;
+ return DEBUG_BREAK_SLOT;
} else {
- type = NOT_DEBUG_BREAK;
+ return NOT_DEBUG_BREAK;
}
- return BreakLocation(debug_info_, type, code_offset(), position(),
- statement_position());
}
-BreakLocation::BytecodeArrayIterator::BytecodeArrayIterator(
+void CodeBreakIterator::SkipToPosition(int position,
+ BreakPositionAlignment alignment) {
+ CodeBreakIterator it(debug_info_, break_locator_type_);
+ SkipTo(it.BreakIndexFromPosition(position, alignment));
+}
+
+void CodeBreakIterator::SetDebugBreak() {
+ DebugBreakType debug_break_type = GetDebugBreakType();
+ if (debug_break_type == DEBUGGER_STATEMENT) return;
+ DCHECK(debug_break_type >= DEBUG_BREAK_SLOT);
+ Builtins* builtins = isolate()->builtins();
+ Handle<Code> target = debug_break_type == DEBUG_BREAK_SLOT_AT_RETURN
+ ? builtins->Return_DebugBreak()
+ : builtins->Slot_DebugBreak();
+ DebugCodegen::PatchDebugBreakSlot(isolate(), rinfo()->pc(), target);
+}
+
+void CodeBreakIterator::ClearDebugBreak() {
+ DebugBreakType debug_break_type = GetDebugBreakType();
+ if (debug_break_type == DEBUGGER_STATEMENT) return;
+ DCHECK(debug_break_type >= DEBUG_BREAK_SLOT);
+ DebugCodegen::ClearDebugBreakSlot(isolate(), rinfo()->pc());
+}
+
+bool CodeBreakIterator::IsDebugBreak() {
+ DebugBreakType debug_break_type = GetDebugBreakType();
+ if (debug_break_type == DEBUGGER_STATEMENT) return false;
+ DCHECK(debug_break_type >= DEBUG_BREAK_SLOT);
+ return DebugCodegen::DebugBreakSlotIsPatched(rinfo()->pc());
+}
+
+BreakLocation CodeBreakIterator::GetBreakLocation() {
+ Handle<AbstractCode> code(AbstractCode::cast(debug_info_->DebugCode()));
+ return BreakLocation(code, GetDebugBreakType(), code_offset(), position_);
+}
+
+BytecodeArrayBreakIterator::BytecodeArrayBreakIterator(
Handle<DebugInfo> debug_info, BreakLocatorType type)
- : Iterator(debug_info),
- source_position_iterator_(debug_info->abstract_code()
- ->GetBytecodeArray()
- ->source_position_table()),
- break_locator_type_(type),
- start_position_(debug_info->shared()->start_position()) {
+ : BreakIterator(debug_info, type),
+ source_position_iterator_(
+ debug_info->DebugBytecodeArray()->source_position_table()) {
// There is at least one break location.
DCHECK(!Done());
Next();
}
-void BreakLocation::BytecodeArrayIterator::Next() {
+void BytecodeArrayBreakIterator::Next() {
DisallowHeapAllocation no_gc;
DCHECK(!Done());
bool first = break_index_ == -1;
@@ -200,32 +295,27 @@ void BreakLocation::BytecodeArrayIterator::Next() {
if (!first) source_position_iterator_.Advance();
first = false;
if (Done()) return;
- position_ = source_position_iterator_.source_position() - start_position_;
+ position_ = source_position_iterator_.source_position();
if (source_position_iterator_.is_statement()) {
statement_position_ = position_;
}
DCHECK(position_ >= 0);
DCHECK(statement_position_ >= 0);
- enum DebugBreakType type = GetDebugBreakType();
+ DebugBreakType type = GetDebugBreakType();
if (type == NOT_DEBUG_BREAK) continue;
if (break_locator_type_ == ALL_BREAK_LOCATIONS) break;
DCHECK_EQ(CALLS_AND_RETURNS, break_locator_type_);
if (type == DEBUG_BREAK_SLOT_AT_CALL) break;
- if (type == DEBUG_BREAK_SLOT_AT_RETURN) {
- DCHECK_EQ(ReturnPosition(), position_);
- DCHECK_EQ(ReturnPosition(), statement_position_);
- break;
- }
+ if (type == DEBUG_BREAK_SLOT_AT_RETURN) break;
}
break_index_++;
}
-BreakLocation::DebugBreakType
-BreakLocation::BytecodeArrayIterator::GetDebugBreakType() {
- BytecodeArray* bytecode_array = debug_info_->original_bytecode_array();
+DebugBreakType BytecodeArrayBreakIterator::GetDebugBreakType() {
+ BytecodeArray* bytecode_array = debug_info_->OriginalBytecodeArray();
interpreter::Bytecode bytecode =
interpreter::Bytecodes::FromByte(bytecode_array->get(code_offset()));
@@ -246,225 +336,52 @@ BreakLocation::BytecodeArrayIterator::GetDebugBreakType() {
}
}
-BreakLocation BreakLocation::BytecodeArrayIterator::GetBreakLocation() {
- return BreakLocation(debug_info_, GetDebugBreakType(), code_offset(),
- position(), statement_position());
-}
-
-// Find the break point at the supplied address, or the closest one before
-// the address.
-BreakLocation BreakLocation::FromCodeOffset(Handle<DebugInfo> debug_info,
- int offset) {
- base::SmartPointer<Iterator> it(GetIterator(debug_info));
- it->SkipTo(BreakIndexFromCodeOffset(debug_info, offset));
- return it->GetBreakLocation();
-}
-
-FrameSummary GetFirstFrameSummary(JavaScriptFrame* frame) {
- List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
- frame->Summarize(&frames);
- return frames.first();
-}
-
-int CallOffsetFromCodeOffset(int code_offset, bool is_interpreted) {
- // Code offset points to the instruction after the call. Subtract 1 to
- // exclude that instruction from the search. For bytecode, the code offset
- // still points to the call.
- return is_interpreted ? code_offset : code_offset - 1;
-}
-
-BreakLocation BreakLocation::FromFrame(Handle<DebugInfo> debug_info,
- JavaScriptFrame* frame) {
- FrameSummary summary = GetFirstFrameSummary(frame);
- int call_offset =
- CallOffsetFromCodeOffset(summary.code_offset(), frame->is_interpreted());
- return FromCodeOffset(debug_info, call_offset);
-}
-
-void BreakLocation::AllForStatementPosition(Handle<DebugInfo> debug_info,
- int statement_position,
- List<BreakLocation>* result_out) {
- for (base::SmartPointer<Iterator> it(GetIterator(debug_info)); !it->Done();
- it->Next()) {
- if (it->statement_position() == statement_position) {
- result_out->Add(it->GetBreakLocation());
- }
- }
-}
-
-int BreakLocation::BreakIndexFromCodeOffset(Handle<DebugInfo> debug_info,
- int offset) {
- // Run through all break points to locate the one closest to the address.
- int closest_break = 0;
- int distance = kMaxInt;
- DCHECK(0 <= offset && offset < debug_info->abstract_code()->Size());
- for (base::SmartPointer<Iterator> it(GetIterator(debug_info)); !it->Done();
- it->Next()) {
- // Check if this break point is closer that what was previously found.
- if (it->code_offset() <= offset && offset - it->code_offset() < distance) {
- closest_break = it->break_index();
- distance = offset - it->code_offset();
- // Check whether we can't get any closer.
- if (distance == 0) break;
- }
- }
- return closest_break;
-}
-
-
-BreakLocation BreakLocation::FromPosition(Handle<DebugInfo> debug_info,
- int position,
- BreakPositionAlignment alignment) {
- // Run through all break points to locate the one closest to the source
- // position.
- int distance = kMaxInt;
- base::SmartPointer<Iterator> it(GetIterator(debug_info));
- BreakLocation closest_break = it->GetBreakLocation();
- while (!it->Done()) {
- int next_position;
- if (alignment == STATEMENT_ALIGNED) {
- next_position = it->statement_position();
- } else {
- DCHECK(alignment == BREAK_POSITION_ALIGNED);
- next_position = it->position();
- }
- if (position <= next_position && next_position - position < distance) {
- closest_break = it->GetBreakLocation();
- distance = next_position - position;
- // Check whether we can't get any closer.
- if (distance == 0) break;
- }
- it->Next();
- }
- return closest_break;
-}
-
-
-void BreakLocation::SetBreakPoint(Handle<Object> break_point_object) {
- // If there is not already a real break point here patch code with debug
- // break.
- if (!HasBreakPoint()) SetDebugBreak();
- DCHECK(IsDebugBreak() || IsDebuggerStatement());
- // Set the break point information.
- DebugInfo::SetBreakPoint(debug_info_, code_offset_, position_,
- statement_position_, break_point_object);
-}
-
-
-void BreakLocation::ClearBreakPoint(Handle<Object> break_point_object) {
- // Clear the break point information.
- DebugInfo::ClearBreakPoint(debug_info_, code_offset_, break_point_object);
- // If there are no more break points here remove the debug break.
- if (!HasBreakPoint()) {
- ClearDebugBreak();
- DCHECK(!IsDebugBreak());
- }
-}
-
-
-void BreakLocation::SetOneShot() {
- // Debugger statement always calls debugger. No need to modify it.
- if (IsDebuggerStatement()) return;
-
- // If there is a real break point here no more to do.
- if (HasBreakPoint()) {
- DCHECK(IsDebugBreak());
- return;
- }
-
- // Patch code with debug break.
- SetDebugBreak();
-}
-
-
-void BreakLocation::ClearOneShot() {
- // Debugger statement always calls debugger. No need to modify it.
- if (IsDebuggerStatement()) return;
-
- // If there is a real break point here no more to do.
- if (HasBreakPoint()) {
- DCHECK(IsDebugBreak());
- return;
- }
-
- // Patch code removing debug break.
- ClearDebugBreak();
- DCHECK(!IsDebugBreak());
-}
-
-
-void BreakLocation::SetDebugBreak() {
- // Debugger statement always calls debugger. No need to modify it.
- if (IsDebuggerStatement()) return;
-
- // If there is already a break point here just return. This might happen if
- // the same code is flooded with break points twice. Flooding the same
- // function twice might happen when stepping in a function with an exception
- // handler as the handler and the function is the same.
- if (IsDebugBreak()) return;
-
- DCHECK(IsDebugBreakSlot());
- if (abstract_code()->IsCode()) {
- Code* code = abstract_code()->GetCode();
- DCHECK(code->kind() == Code::FUNCTION);
- Builtins* builtins = isolate()->builtins();
- Handle<Code> target = IsReturn() ? builtins->Return_DebugBreak()
- : builtins->Slot_DebugBreak();
- Address pc = code->instruction_start() + code_offset();
- DebugCodegen::PatchDebugBreakSlot(isolate(), pc, target);
- } else {
- BytecodeArray* bytecode_array = abstract_code()->GetBytecodeArray();
- interpreter::Bytecode bytecode =
- interpreter::Bytecodes::FromByte(bytecode_array->get(code_offset()));
- interpreter::Bytecode debugbreak =
- interpreter::Bytecodes::GetDebugBreak(bytecode);
- bytecode_array->set(code_offset(),
- interpreter::Bytecodes::ToByte(debugbreak));
- }
- DCHECK(IsDebugBreak());
+void BytecodeArrayBreakIterator::SkipToPosition(
+ int position, BreakPositionAlignment alignment) {
+ BytecodeArrayBreakIterator it(debug_info_, break_locator_type_);
+ SkipTo(it.BreakIndexFromPosition(position, alignment));
}
-
-void BreakLocation::ClearDebugBreak() {
- // Debugger statement always calls debugger. No need to modify it.
- if (IsDebuggerStatement()) return;
-
- DCHECK(IsDebugBreakSlot());
- if (abstract_code()->IsCode()) {
- Code* code = abstract_code()->GetCode();
- DCHECK(code->kind() == Code::FUNCTION);
- Address pc = code->instruction_start() + code_offset();
- DebugCodegen::ClearDebugBreakSlot(isolate(), pc);
- } else {
- BytecodeArray* bytecode_array = abstract_code()->GetBytecodeArray();
- BytecodeArray* original = debug_info_->original_bytecode_array();
- bytecode_array->set(code_offset(), original->get(code_offset()));
- }
- DCHECK(!IsDebugBreak());
+void BytecodeArrayBreakIterator::SetDebugBreak() {
+ DebugBreakType debug_break_type = GetDebugBreakType();
+ if (debug_break_type == DEBUGGER_STATEMENT) return;
+ DCHECK(debug_break_type >= DEBUG_BREAK_SLOT);
+ BytecodeArray* bytecode_array = debug_info_->DebugBytecodeArray();
+ interpreter::Bytecode bytecode =
+ interpreter::Bytecodes::FromByte(bytecode_array->get(code_offset()));
+ if (interpreter::Bytecodes::IsDebugBreak(bytecode)) return;
+ interpreter::Bytecode debugbreak =
+ interpreter::Bytecodes::GetDebugBreak(bytecode);
+ bytecode_array->set(code_offset(),
+ interpreter::Bytecodes::ToByte(debugbreak));
+}
+
+void BytecodeArrayBreakIterator::ClearDebugBreak() {
+ DebugBreakType debug_break_type = GetDebugBreakType();
+ if (debug_break_type == DEBUGGER_STATEMENT) return;
+ DCHECK(debug_break_type >= DEBUG_BREAK_SLOT);
+ BytecodeArray* bytecode_array = debug_info_->DebugBytecodeArray();
+ BytecodeArray* original = debug_info_->OriginalBytecodeArray();
+ bytecode_array->set(code_offset(), original->get(code_offset()));
+}
+
+bool BytecodeArrayBreakIterator::IsDebugBreak() {
+ DebugBreakType debug_break_type = GetDebugBreakType();
+ if (debug_break_type == DEBUGGER_STATEMENT) return false;
+ DCHECK(debug_break_type >= DEBUG_BREAK_SLOT);
+ BytecodeArray* bytecode_array = debug_info_->DebugBytecodeArray();
+ interpreter::Bytecode bytecode =
+ interpreter::Bytecodes::FromByte(bytecode_array->get(code_offset()));
+ return interpreter::Bytecodes::IsDebugBreak(bytecode);
}
-
-bool BreakLocation::IsDebugBreak() const {
- if (IsDebuggerStatement()) return false;
- DCHECK(IsDebugBreakSlot());
- if (abstract_code()->IsCode()) {
- Code* code = abstract_code()->GetCode();
- DCHECK(code->kind() == Code::FUNCTION);
- Address pc = code->instruction_start() + code_offset();
- return DebugCodegen::DebugBreakSlotIsPatched(pc);
- } else {
- BytecodeArray* bytecode_array = abstract_code()->GetBytecodeArray();
- interpreter::Bytecode bytecode =
- interpreter::Bytecodes::FromByte(bytecode_array->get(code_offset()));
- return interpreter::Bytecodes::IsDebugBreak(bytecode);
- }
+BreakLocation BytecodeArrayBreakIterator::GetBreakLocation() {
+ Handle<AbstractCode> code(
+ AbstractCode::cast(debug_info_->DebugBytecodeArray()));
+ return BreakLocation(code, GetDebugBreakType(), code_offset(), position_);
}
-Handle<Object> BreakLocation::BreakPointObjects() const {
- return debug_info_->GetBreakPointObjects(code_offset_);
-}
-
void DebugFeatureTracker::Track(DebugFeatureTracker::Feature feature) {
uint32_t mask = 1 << feature;
// Only count one sample per feature and isolate.
@@ -480,11 +397,11 @@ void Debug::ThreadInit() {
thread_local_.break_id_ = 0;
thread_local_.break_frame_id_ = StackFrame::NO_ID;
thread_local_.last_step_action_ = StepNone;
- thread_local_.last_statement_position_ = RelocInfo::kNoPosition;
+ thread_local_.last_statement_position_ = kNoSourcePosition;
thread_local_.last_fp_ = 0;
thread_local_.target_fp_ = 0;
- thread_local_.step_in_enabled_ = false;
thread_local_.return_value_ = Handle<Object>();
+ clear_suspended_generator();
// TODO(isolates): frames_are_dropped_?
base::NoBarrier_Store(&thread_local_.current_debug_scope_,
static_cast<base::AtomicWord>(0));
@@ -492,25 +409,24 @@ void Debug::ThreadInit() {
char* Debug::ArchiveDebug(char* storage) {
- char* to = storage;
- MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
+ // Simply reset state. Don't archive anything.
ThreadInit();
return storage + ArchiveSpacePerThread();
}
char* Debug::RestoreDebug(char* storage) {
- char* from = storage;
- MemCopy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
+ // Simply reset state. Don't restore anything.
+ ThreadInit();
return storage + ArchiveSpacePerThread();
}
+int Debug::ArchiveSpacePerThread() { return 0; }
-int Debug::ArchiveSpacePerThread() {
- return sizeof(ThreadLocal);
+void Debug::Iterate(ObjectVisitor* v) {
+ v->VisitPointer(&thread_local_.suspended_generator_);
}
-
DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
// Globalize the request debug info object and make it weak.
GlobalHandles* global_handles = debug_info->GetIsolate()->global_handles();
@@ -543,9 +459,13 @@ bool Debug::Load() {
// Create the debugger context.
HandleScope scope(isolate_);
ExtensionConfiguration no_extensions;
+ // TODO(yangguo): we rely on the fact that first context snapshot is usable
+ // as debug context. This dependency is gone once we remove
+ // debug context completely.
+ static const int kFirstContextSnapshotIndex = 0;
Handle<Context> context = isolate_->bootstrapper()->CreateEnvironment(
MaybeHandle<JSGlobalProxy>(), v8::Local<ObjectTemplate>(), &no_extensions,
- DEBUG_CONTEXT);
+ kFirstContextSnapshotIndex, DEBUG_CONTEXT);
// Fail if no context could be created.
if (context.is_null()) return false;
@@ -594,14 +514,14 @@ void Debug::Break(JavaScriptFrame* frame) {
// Return if we failed to retrieve the debug info.
return;
}
- Handle<DebugInfo> debug_info(shared->GetDebugInfo());
+ Handle<DebugInfo> debug_info(shared->GetDebugInfo(), isolate_);
// Find the break location where execution has stopped.
BreakLocation location = BreakLocation::FromFrame(debug_info, frame);
// Find actual break points, if any, and trigger debug break event.
- Handle<Object> break_points_hit = CheckBreakPoints(&location);
- if (!break_points_hit->IsUndefined()) {
+ Handle<Object> break_points_hit = CheckBreakPoints(debug_info, &location);
+ if (!break_points_hit->IsUndefined(isolate_)) {
// Clear all current stepping setup.
ClearStepping();
// Notify the debug event listeners.
@@ -631,12 +551,12 @@ void Debug::Break(JavaScriptFrame* frame) {
step_break = location.IsTailCall();
// Fall through.
case StepIn: {
- FrameSummary summary = GetFirstFrameSummary(frame);
+ FrameSummary summary = FrameSummary::GetFirst(frame);
int offset = summary.code_offset();
step_break = step_break || location.IsReturn() ||
(current_fp != last_fp) ||
(thread_local_.last_statement_position_ !=
- location.abstract_code()->SourceStatementPosition(offset));
+ summary.abstract_code()->SourceStatementPosition(offset));
break;
}
case StepFrame:
@@ -659,20 +579,22 @@ void Debug::Break(JavaScriptFrame* frame) {
// Find break point objects for this location, if any, and evaluate them.
// Return an array of break point objects that evaluated true.
-Handle<Object> Debug::CheckBreakPoints(BreakLocation* location,
+Handle<Object> Debug::CheckBreakPoints(Handle<DebugInfo> debug_info,
+ BreakLocation* location,
bool* has_break_points) {
Factory* factory = isolate_->factory();
bool has_break_points_to_check =
- break_points_active_ && location->HasBreakPoint();
+ break_points_active_ && location->HasBreakPoint(debug_info);
if (has_break_points) *has_break_points = has_break_points_to_check;
if (!has_break_points_to_check) return factory->undefined_value();
- Handle<Object> break_point_objects = location->BreakPointObjects();
+ Handle<Object> break_point_objects =
+ debug_info->GetBreakPointObjects(location->position());
// Count the number of break points hit. If there are multiple break points
// they are in a FixedArray.
Handle<FixedArray> break_points_hit;
int break_points_hit_count = 0;
- DCHECK(!break_point_objects->IsUndefined());
+ DCHECK(!break_point_objects->IsUndefined(isolate_));
if (break_point_objects->IsFixedArray()) {
Handle<FixedArray> array(FixedArray::cast(*break_point_objects));
break_points_hit = factory->NewFixedArray(array->length());
@@ -710,17 +632,15 @@ bool Debug::IsMutedAtCurrentLocation(JavaScriptFrame* frame) {
// Enter the debugger.
DebugScope debug_scope(this);
if (debug_scope.failed()) return false;
- BreakLocation current_position = BreakLocation::FromFrame(debug_info, frame);
List<BreakLocation> break_locations;
- BreakLocation::AllForStatementPosition(
- debug_info, current_position.statement_position(), &break_locations);
+ BreakLocation::AllAtCurrentStatement(debug_info, frame, &break_locations);
bool has_break_points_at_all = false;
for (int i = 0; i < break_locations.length(); i++) {
bool has_break_points;
Handle<Object> check_result =
- CheckBreakPoints(&break_locations[i], &has_break_points);
+ CheckBreakPoints(debug_info, &break_locations[i], &has_break_points);
has_break_points_at_all |= has_break_points;
- if (has_break_points && !check_result->IsUndefined()) return false;
+ if (has_break_points && !check_result->IsUndefined(isolate_)) return false;
}
return has_break_points_at_all;
}
@@ -759,7 +679,7 @@ bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
}
// Return whether the break point is triggered.
- return result->IsTrue();
+ return result->IsTrue(isolate_);
}
@@ -780,15 +700,17 @@ bool Debug::SetBreakPoint(Handle<JSFunction> function,
DCHECK(*source_position >= 0);
// Find the break point and change it.
- BreakLocation location = BreakLocation::FromPosition(
- debug_info, *source_position, STATEMENT_ALIGNED);
- *source_position = location.statement_position();
- location.SetBreakPoint(break_point_object);
+ *source_position =
+ FindBreakablePosition(debug_info, *source_position, STATEMENT_ALIGNED);
+ DebugInfo::SetBreakPoint(debug_info, *source_position, break_point_object);
+ // At least one active break point now.
+ DCHECK(debug_info->GetBreakPointCount() > 0);
- feature_tracker()->Track(DebugFeatureTracker::kBreakPoint);
+ ClearBreakPoints(debug_info);
+ ApplyBreakPoints(debug_info);
- // At least one active break point now.
- return debug_info->GetBreakPointCount() > 0;
+ feature_tracker()->Track(DebugFeatureTracker::kBreakPoint);
+ return true;
}
@@ -796,12 +718,16 @@ bool Debug::SetBreakPointForScript(Handle<Script> script,
Handle<Object> break_point_object,
int* source_position,
BreakPositionAlignment alignment) {
+ if (script->type() == Script::TYPE_WASM) {
+ // TODO(clemensh): set breakpoint for wasm.
+ return false;
+ }
HandleScope scope(isolate_);
// Obtain shared function info for the function.
Handle<Object> result =
FindSharedFunctionInfoInScript(script, *source_position);
- if (result->IsUndefined()) return false;
+ if (result->IsUndefined(isolate_)) return false;
// Make sure the function has set up the debug info.
Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>::cast(result);
@@ -812,76 +738,111 @@ bool Debug::SetBreakPointForScript(Handle<Script> script,
// Find position within function. The script position might be before the
// source position of the first function.
- int position;
if (shared->start_position() > *source_position) {
- position = 0;
- } else {
- position = *source_position - shared->start_position();
+ *source_position = shared->start_position();
}
Handle<DebugInfo> debug_info(shared->GetDebugInfo());
- // Source positions starts with zero.
- DCHECK(position >= 0);
// Find the break point and change it.
- BreakLocation location =
- BreakLocation::FromPosition(debug_info, position, alignment);
- location.SetBreakPoint(break_point_object);
+ *source_position =
+ FindBreakablePosition(debug_info, *source_position, alignment);
+ DebugInfo::SetBreakPoint(debug_info, *source_position, break_point_object);
+ // At least one active break point now.
+ DCHECK(debug_info->GetBreakPointCount() > 0);
- feature_tracker()->Track(DebugFeatureTracker::kBreakPoint);
+ ClearBreakPoints(debug_info);
+ ApplyBreakPoints(debug_info);
- position = (alignment == STATEMENT_ALIGNED) ? location.statement_position()
- : location.position();
+ feature_tracker()->Track(DebugFeatureTracker::kBreakPoint);
+ return true;
+}
- *source_position = position + shared->start_position();
+int Debug::FindBreakablePosition(Handle<DebugInfo> debug_info,
+ int source_position,
+ BreakPositionAlignment alignment) {
+ int statement_position;
+ int position;
+ if (debug_info->HasDebugCode()) {
+ CodeBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+ it.SkipToPosition(source_position, alignment);
+ statement_position = it.statement_position();
+ position = it.position();
+ } else {
+ DCHECK(debug_info->HasDebugBytecodeArray());
+ BytecodeArrayBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+ it.SkipToPosition(source_position, alignment);
+ statement_position = it.statement_position();
+ position = it.position();
+ }
+ return alignment == STATEMENT_ALIGNED ? statement_position : position;
+}
- // At least one active break point now.
- DCHECK(debug_info->GetBreakPointCount() > 0);
- return true;
+void Debug::ApplyBreakPoints(Handle<DebugInfo> debug_info) {
+ DisallowHeapAllocation no_gc;
+ if (debug_info->break_points()->IsUndefined(isolate_)) return;
+ FixedArray* break_points = debug_info->break_points();
+ for (int i = 0; i < break_points->length(); i++) {
+ if (break_points->get(i)->IsUndefined(isolate_)) continue;
+ BreakPointInfo* info = BreakPointInfo::cast(break_points->get(i));
+ if (info->GetBreakPointCount() == 0) continue;
+ if (debug_info->HasDebugCode()) {
+ CodeBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+ it.SkipToPosition(info->source_position(), BREAK_POSITION_ALIGNED);
+ it.SetDebugBreak();
+ }
+ if (debug_info->HasDebugBytecodeArray()) {
+ BytecodeArrayBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+ it.SkipToPosition(info->source_position(), BREAK_POSITION_ALIGNED);
+ it.SetDebugBreak();
+ }
+ }
}
+void Debug::ClearBreakPoints(Handle<DebugInfo> debug_info) {
+ DisallowHeapAllocation no_gc;
+ if (debug_info->HasDebugCode()) {
+ for (CodeBreakIterator it(debug_info, ALL_BREAK_LOCATIONS); !it.Done();
+ it.Next()) {
+ it.ClearDebugBreak();
+ }
+ }
+ if (debug_info->HasDebugBytecodeArray()) {
+ for (BytecodeArrayBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+ !it.Done(); it.Next()) {
+ it.ClearDebugBreak();
+ }
+ }
+}
void Debug::ClearBreakPoint(Handle<Object> break_point_object) {
HandleScope scope(isolate_);
- DebugInfoListNode* node = debug_info_list_;
- while (node != NULL) {
+ for (DebugInfoListNode* node = debug_info_list_; node != NULL;
+ node = node->next()) {
Handle<Object> result =
DebugInfo::FindBreakPointInfo(node->debug_info(), break_point_object);
- if (!result->IsUndefined()) {
- // Get information in the break point.
- Handle<BreakPointInfo> break_point_info =
- Handle<BreakPointInfo>::cast(result);
- Handle<DebugInfo> debug_info = node->debug_info();
-
- BreakLocation location = BreakLocation::FromCodeOffset(
- debug_info, break_point_info->code_offset());
- location.ClearBreakPoint(break_point_object);
-
- // If there are no more break points left remove the debug info for this
- // function.
+ if (result->IsUndefined(isolate_)) continue;
+ Handle<DebugInfo> debug_info = node->debug_info();
+ if (DebugInfo::ClearBreakPoint(debug_info, break_point_object)) {
+ ClearBreakPoints(debug_info);
if (debug_info->GetBreakPointCount() == 0) {
RemoveDebugInfoAndClearFromShared(debug_info);
+ } else {
+ ApplyBreakPoints(debug_info);
}
-
return;
}
- node = node->next();
}
}
-
// Clear out all the debug break code. This is ONLY supposed to be used when
// shutting down the debugger as it will leave the break point information in
// DebugInfo even though the code is patched back to the non break point state.
void Debug::ClearAllBreakPoints() {
for (DebugInfoListNode* node = debug_info_list_; node != NULL;
node = node->next()) {
- for (base::SmartPointer<BreakLocation::Iterator> it(
- BreakLocation::GetIterator(node->debug_info()));
- !it->Done(); it->Next()) {
- it->GetBreakLocation().ClearDebugBreak();
- }
+ ClearBreakPoints(node->debug_info());
}
// Remove all debug info.
while (debug_info_list_ != NULL) {
@@ -889,7 +850,6 @@ void Debug::ClearAllBreakPoints() {
}
}
-
void Debug::FloodWithOneShot(Handle<JSFunction> function,
BreakLocatorType type) {
// Debug utility functions are not subject to debugging.
@@ -911,14 +871,19 @@ void Debug::FloodWithOneShot(Handle<JSFunction> function,
// Flood the function with break points.
Handle<DebugInfo> debug_info(shared->GetDebugInfo());
- for (base::SmartPointer<BreakLocation::Iterator> it(
- BreakLocation::GetIterator(debug_info, type));
- !it->Done(); it->Next()) {
- it->GetBreakLocation().SetOneShot();
+ if (debug_info->HasDebugCode()) {
+ for (CodeBreakIterator it(debug_info, type); !it.Done(); it.Next()) {
+ it.SetDebugBreak();
+ }
+ }
+ if (debug_info->HasDebugBytecodeArray()) {
+ for (BytecodeArrayBreakIterator it(debug_info, type); !it.Done();
+ it.Next()) {
+ it.SetDebugBreak();
+ }
}
}
-
void Debug::ChangeBreakOnException(ExceptionBreakType type, bool enable) {
if (type == BreakUncaughtException) {
break_on_uncaught_exception_ = enable;
@@ -938,14 +903,22 @@ bool Debug::IsBreakOnException(ExceptionBreakType type) {
void Debug::PrepareStepIn(Handle<JSFunction> function) {
+ CHECK(last_step_action() >= StepIn);
if (!is_active()) return;
- if (last_step_action() < StepIn) return;
if (in_debug_scope()) return;
- if (thread_local_.step_in_enabled_) {
- FloodWithOneShot(function);
- }
+ FloodWithOneShot(function);
}
+void Debug::PrepareStepInSuspendedGenerator() {
+ CHECK(has_suspended_generator());
+ if (!is_active()) return;
+ if (in_debug_scope()) return;
+ thread_local_.last_step_action_ = StepIn;
+ Handle<JSFunction> function(
+ JSGeneratorObject::cast(thread_local_.suspended_generator_)->function());
+ FloodWithOneShot(function);
+ clear_suspended_generator();
+}
void Debug::PrepareStepOnThrow() {
if (!is_active()) return;
@@ -1000,10 +973,7 @@ void Debug::PrepareStep(StepAction step_action) {
feature_tracker()->Track(DebugFeatureTracker::kStepping);
- // Remember this step action and count.
thread_local_.last_step_action_ = step_action;
- STATIC_ASSERT(StepFrame > StepIn);
- thread_local_.step_in_enabled_ = (step_action >= StepIn);
// If the function on the top frame is unresolved perform step out. This will
// be the case when calling unknown function and having the debugger stopped
@@ -1019,7 +989,7 @@ void Debug::PrepareStep(StepAction step_action) {
}
// Get the debug info (create it if it does not exist).
- FrameSummary summary = GetFirstFrameSummary(frame);
+ FrameSummary summary = FrameSummary::GetFirst(frame);
Handle<JSFunction> function(summary.function());
Handle<SharedFunctionInfo> shared(function->shared());
if (!EnsureDebugInfo(shared, function)) {
@@ -1028,15 +998,7 @@ void Debug::PrepareStep(StepAction step_action) {
}
Handle<DebugInfo> debug_info(shared->GetDebugInfo());
- // Refresh frame summary if the code has been recompiled for debugging.
- if (AbstractCode::cast(shared->code()) != *summary.abstract_code()) {
- summary = GetFirstFrameSummary(frame);
- }
-
- int call_offset =
- CallOffsetFromCodeOffset(summary.code_offset(), frame->is_interpreted());
- BreakLocation location =
- BreakLocation::FromCodeOffset(debug_info, call_offset);
+ BreakLocation location = BreakLocation::FromFrame(debug_info, frame);
// Any step at a return is a step-out.
if (location.IsReturn()) step_action = StepOut;
@@ -1044,9 +1006,10 @@ void Debug::PrepareStep(StepAction step_action) {
if (location.IsTailCall() && step_action == StepNext) step_action = StepOut;
thread_local_.last_statement_position_ =
- debug_info->abstract_code()->SourceStatementPosition(
- summary.code_offset());
+ summary.abstract_code()->SourceStatementPosition(summary.code_offset());
thread_local_.last_fp_ = frame->UnpaddedFP();
+ // No longer perform the current async step.
+ clear_suspended_generator();
switch (step_action) {
case StepNone:
@@ -1063,18 +1026,14 @@ void Debug::PrepareStep(StepAction step_action) {
Deoptimizer::DeoptimizeFunction(frames_it.frame()->function());
frames_it.Advance();
}
- if (frames_it.done()) {
- // Stepping out to the embedder. Disable step-in to avoid stepping into
- // the next (unrelated) call that the embedder makes.
- thread_local_.step_in_enabled_ = false;
- } else {
+ if (!frames_it.done()) {
// Fill the caller function to return to with one-shot break points.
Handle<JSFunction> caller_function(frames_it.frame()->function());
FloodWithOneShot(caller_function);
thread_local_.target_fp_ = frames_it.frame()->UnpaddedFP();
}
// Clear last position info. For stepping out it does not matter.
- thread_local_.last_statement_position_ = RelocInfo::kNoPosition;
+ thread_local_.last_statement_position_ = kNoSourcePosition;
thread_local_.last_fp_ = 0;
break;
case StepNext:
@@ -1092,37 +1051,44 @@ void Debug::PrepareStep(StepAction step_action) {
}
}
-
// Simple function for returning the source positions for active break points.
Handle<Object> Debug::GetSourceBreakLocations(
Handle<SharedFunctionInfo> shared,
BreakPositionAlignment position_alignment) {
Isolate* isolate = shared->GetIsolate();
- Heap* heap = isolate->heap();
if (!shared->HasDebugInfo()) {
- return Handle<Object>(heap->undefined_value(), isolate);
+ return isolate->factory()->undefined_value();
}
Handle<DebugInfo> debug_info(shared->GetDebugInfo());
if (debug_info->GetBreakPointCount() == 0) {
- return Handle<Object>(heap->undefined_value(), isolate);
+ return isolate->factory()->undefined_value();
}
Handle<FixedArray> locations =
isolate->factory()->NewFixedArray(debug_info->GetBreakPointCount());
int count = 0;
for (int i = 0; i < debug_info->break_points()->length(); ++i) {
- if (!debug_info->break_points()->get(i)->IsUndefined()) {
+ if (!debug_info->break_points()->get(i)->IsUndefined(isolate)) {
BreakPointInfo* break_point_info =
BreakPointInfo::cast(debug_info->break_points()->get(i));
int break_points = break_point_info->GetBreakPointCount();
if (break_points == 0) continue;
Smi* position = NULL;
- switch (position_alignment) {
- case STATEMENT_ALIGNED:
- position = Smi::FromInt(break_point_info->statement_position());
- break;
- case BREAK_POSITION_ALIGNED:
- position = Smi::FromInt(break_point_info->source_position());
- break;
+ if (position_alignment == STATEMENT_ALIGNED) {
+ if (debug_info->HasDebugCode()) {
+ CodeBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+ it.SkipToPosition(break_point_info->source_position(),
+ BREAK_POSITION_ALIGNED);
+ position = Smi::FromInt(it.statement_position());
+ } else {
+ DCHECK(debug_info->HasDebugBytecodeArray());
+ BytecodeArrayBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+ it.SkipToPosition(break_point_info->source_position(),
+ BREAK_POSITION_ALIGNED);
+ position = Smi::FromInt(it.statement_position());
+ }
+ } else {
+ DCHECK_EQ(BREAK_POSITION_ALIGNED, position_alignment);
+ position = Smi::FromInt(break_point_info->source_position());
}
for (int j = 0; j < break_points; ++j) locations->set(count++, position);
}
@@ -1130,14 +1096,12 @@ Handle<Object> Debug::GetSourceBreakLocations(
return locations;
}
-
void Debug::ClearStepping() {
// Clear the various stepping setup.
ClearOneShot();
thread_local_.last_step_action_ = StepNone;
- thread_local_.step_in_enabled_ = false;
- thread_local_.last_statement_position_ = RelocInfo::kNoPosition;
+ thread_local_.last_statement_position_ = kNoSourcePosition;
thread_local_.last_fp_ = 0;
thread_local_.target_fp_ = 0;
}
@@ -1152,21 +1116,13 @@ void Debug::ClearOneShot() {
// removed from the list.
for (DebugInfoListNode* node = debug_info_list_; node != NULL;
node = node->next()) {
- for (base::SmartPointer<BreakLocation::Iterator> it(
- BreakLocation::GetIterator(node->debug_info()));
- !it->Done(); it->Next()) {
- it->GetBreakLocation().ClearOneShot();
- }
+ Handle<DebugInfo> debug_info = node->debug_info();
+ ClearBreakPoints(debug_info);
+ ApplyBreakPoints(debug_info);
}
}
-void Debug::EnableStepIn() {
- STATIC_ASSERT(StepFrame > StepIn);
- thread_local_.step_in_enabled_ = (last_step_action() >= StepIn);
-}
-
-
bool MatchingCodeTargets(Code* target1, Code* target2) {
if (target1 == target2) return true;
if (target1->kind() != target2->kind()) return false;
@@ -1266,7 +1222,7 @@ class RedirectActiveFunctions : public ThreadVisitor {
InterpretedFrame* interpreted_frame =
reinterpret_cast<InterpretedFrame*>(frame);
BytecodeArray* debug_copy =
- shared_->GetDebugInfo()->abstract_code()->GetBytecodeArray();
+ shared_->GetDebugInfo()->DebugBytecodeArray();
interpreted_frame->PatchBytecodeArray(debug_copy);
continue;
}
@@ -1319,9 +1275,7 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
{
SharedFunctionInfo::Iterator iterator(isolate_);
while (SharedFunctionInfo* shared = iterator.Next()) {
- if (!shared->OptimizedCodeMapIsCleared()) {
- shared->ClearOptimizedCodeMap();
- }
+ shared->ClearCodeFromOptimizedCodeMap();
}
}
@@ -1329,7 +1283,8 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
"prepare for break points");
- bool is_interpreted = shared->HasBytecodeArray();
+ DCHECK(shared->is_compiled());
+ bool baseline_exists = shared->HasBaselineCode();
{
// TODO(yangguo): with bytecode, we still walk the heap to find all
@@ -1337,7 +1292,8 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
// smarter here and avoid the heap walk.
HeapIterator iterator(isolate_->heap());
HeapObject* obj;
- bool include_generators = !is_interpreted && shared->is_generator();
+ // Continuation from old-style generators need to be recomputed.
+ bool find_resumables = baseline_exists && shared->is_resumable();
while ((obj = iterator.next())) {
if (obj->IsJSFunction()) {
@@ -1346,9 +1302,12 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
if (function->code()->kind() == Code::OPTIMIZED_FUNCTION) {
Deoptimizer::DeoptimizeFunction(function);
}
- if (is_interpreted) continue;
- if (function->shared() == *shared) functions.Add(handle(function));
- } else if (include_generators && obj->IsJSGeneratorObject()) {
+ if (baseline_exists && function->shared() == *shared) {
+ functions.Add(handle(function));
+ }
+ } else if (find_resumables && obj->IsJSGeneratorObject()) {
+ // This case handles async functions as well, as they use generator
+ // objects for in-progress async function execution.
JSGeneratorObject* generator_obj = JSGeneratorObject::cast(obj);
if (!generator_obj->is_suspended()) continue;
JSFunction* function = generator_obj->function();
@@ -1363,17 +1322,18 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
}
// We do not need to replace code to debug bytecode.
- DCHECK(!is_interpreted || functions.length() == 0);
- DCHECK(!is_interpreted || suspended_generators.length() == 0);
+ DCHECK(baseline_exists || functions.is_empty());
+ DCHECK(baseline_exists || suspended_generators.is_empty());
// We do not need to recompile to debug bytecode.
- if (!is_interpreted && !shared->HasDebugCode()) {
+ if (baseline_exists && !shared->code()->has_debug_break_slots()) {
DCHECK(functions.length() > 0);
if (!Compiler::CompileDebugCode(functions.first())) return false;
}
for (Handle<JSFunction> const function : functions) {
function->ReplaceCode(shared->code());
+ JSFunction::EnsureLiterals(function);
}
for (Handle<JSGeneratorObject> const generator_obj : suspended_generators) {
@@ -1390,19 +1350,26 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
return true;
}
+void Debug::RecordAsyncFunction(Handle<JSGeneratorObject> generator_object) {
+ if (last_step_action() <= StepOut) return;
+ if (!generator_object->function()->shared()->is_async()) return;
+ DCHECK(!has_suspended_generator());
+ thread_local_.suspended_generator_ = *generator_object;
+ ClearStepping();
+}
class SharedFunctionInfoFinder {
public:
explicit SharedFunctionInfoFinder(int target_position)
: current_candidate_(NULL),
current_candidate_closure_(NULL),
- current_start_position_(RelocInfo::kNoPosition),
+ current_start_position_(kNoSourcePosition),
target_position_(target_position) {}
void NewCandidate(SharedFunctionInfo* shared, JSFunction* closure = NULL) {
if (!shared->IsSubjectToDebugging()) return;
int start_position = shared->function_token_position();
- if (start_position == RelocInfo::kNoPosition) {
+ if (start_position == kNoSourcePosition) {
start_position = shared->start_position();
}
@@ -1538,23 +1505,17 @@ bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
return false;
}
- if (shared->HasBytecodeArray()) {
- // To prepare bytecode for debugging, we already need to have the debug
- // info (containing the debug copy) upfront, but since we do not recompile,
- // preparing for break points cannot fail.
- CreateDebugInfo(shared);
- CHECK(PrepareFunctionForBreakPoints(shared));
- } else {
- if (!PrepareFunctionForBreakPoints(shared)) return false;
- CreateDebugInfo(shared);
- }
+ // To prepare bytecode for debugging, we already need to have the debug
+ // info (containing the debug copy) upfront, but since we do not recompile,
+ // preparing for break points cannot fail.
+ CreateDebugInfo(shared);
+ CHECK(PrepareFunctionForBreakPoints(shared));
return true;
}
void Debug::CreateDebugInfo(Handle<SharedFunctionInfo> shared) {
// Create the debug info object.
- DCHECK(shared->HasDebugCode());
Handle<DebugInfo> debug_info = isolate_->factory()->NewDebugInfo(shared);
// Add debug info to the list.
@@ -1600,23 +1561,18 @@ void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
}
}
-
bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
HandleScope scope(isolate_);
// Get the executing function in which the debug break occurred.
- Handle<JSFunction> function(JSFunction::cast(frame->function()));
- Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<SharedFunctionInfo> shared(frame->function()->shared());
// With no debug info there are no break points, so we can't be at a return.
if (!shared->HasDebugInfo()) return false;
DCHECK(!frame->is_optimized());
- FrameSummary summary = GetFirstFrameSummary(frame);
-
Handle<DebugInfo> debug_info(shared->GetDebugInfo());
- BreakLocation location =
- BreakLocation::FromCodeOffset(debug_info, summary.code_offset());
+ BreakLocation location = BreakLocation::FromFrame(debug_info, frame);
return location.IsReturn() || location.IsTailCall();
}
@@ -1664,21 +1620,6 @@ Handle<FixedArray> Debug::GetLoadedScripts() {
}
-void Debug::RecordEvalCaller(Handle<Script> script) {
- script->set_compilation_type(Script::COMPILATION_TYPE_EVAL);
- // For eval scripts add information on the function from which eval was
- // called.
- StackTraceFrameIterator it(script->GetIsolate());
- if (!it.done()) {
- script->set_eval_from_shared(it.frame()->function()->shared());
- Code* code = it.frame()->LookupCode();
- int offset = static_cast<int>(
- it.frame()->pc() - code->instruction_start());
- script->set_eval_from_instructions_offset(offset);
- }
-}
-
-
MaybeHandle<Object> Debug::MakeExecutionState() {
// Create the execution state object.
Handle<Object> argv[] = { isolate_->factory()->NewNumberFromInt(break_id()) };
@@ -1716,13 +1657,6 @@ MaybeHandle<Object> Debug::MakeCompileEvent(Handle<Script> script,
}
-MaybeHandle<Object> Debug::MakePromiseEvent(Handle<JSObject> event_data) {
- // Create the promise event object.
- Handle<Object> argv[] = { event_data };
- return CallFunction("MakePromiseEvent", arraysize(argv), argv);
-}
-
-
MaybeHandle<Object> Debug::MakeAsyncTaskEvent(Handle<JSObject> task_event) {
// Create the async task event object.
Handle<Object> argv[] = { task_event };
@@ -1753,7 +1687,7 @@ void Debug::OnPromiseReject(Handle<JSObject> promise, Handle<Object> value) {
HandleScope scope(isolate_);
// Check whether the promise has been marked as having triggered a message.
Handle<Symbol> key = isolate_->factory()->promise_debug_marker_symbol();
- if (JSReceiver::GetDataProperty(promise, key)->IsUndefined()) {
+ if (JSReceiver::GetDataProperty(promise, key)->IsUndefined(isolate_)) {
OnException(value, promise);
}
}
@@ -1767,8 +1701,11 @@ MaybeHandle<Object> Debug::PromiseHasUserDefinedRejectHandler(
void Debug::OnException(Handle<Object> exception, Handle<Object> promise) {
- // In our prediction, try-finally is not considered to catch.
Isolate::CatchType catch_type = isolate_->PredictExceptionCatcher();
+
+ // Don't notify listener of exceptions that are internal to a desugaring.
+ if (catch_type == Isolate::CAUGHT_BY_DESUGARING) return;
+
bool uncaught = (catch_type == Isolate::NOT_CAUGHT);
if (promise->IsJSObject()) {
Handle<JSObject> jspromise = Handle<JSObject>::cast(promise);
@@ -1780,7 +1717,7 @@ void Debug::OnException(Handle<Object> exception, Handle<Object> promise) {
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate_, has_reject_handler,
PromiseHasUserDefinedRejectHandler(jspromise), /* void */);
- uncaught = has_reject_handler->IsFalse();
+ uncaught = has_reject_handler->IsFalse(isolate_);
}
// Bail out if exception breaks are not active
if (uncaught) {
@@ -1853,25 +1790,6 @@ void Debug::OnAfterCompile(Handle<Script> script) {
}
-void Debug::OnPromiseEvent(Handle<JSObject> data) {
- if (in_debug_scope() || ignore_events()) return;
-
- HandleScope scope(isolate_);
- DebugScope debug_scope(this);
- if (debug_scope.failed()) return;
-
- // Create the script collected state object.
- Handle<Object> event_data;
- // Bail out and don't call debugger if exception.
- if (!MakePromiseEvent(data).ToHandle(&event_data)) return;
-
- // Process debug event.
- ProcessDebugEvent(v8::PromiseEvent,
- Handle<JSObject>::cast(event_data),
- true);
-}
-
-
void Debug::OnAsyncTaskEvent(Handle<JSObject> data) {
if (in_debug_scope() || ignore_events()) return;
@@ -1945,7 +1863,7 @@ void Debug::CallEventCallback(v8::DebugEvent event,
exec_state,
event_data,
event_listener_data_ };
- Handle<JSReceiver> global(isolate_->global_proxy());
+ Handle<JSReceiver> global = isolate_->global_proxy();
Execution::TryCall(isolate_, Handle<JSFunction>::cast(event_listener_),
global, arraysize(argv), argv);
}
@@ -2021,7 +1939,6 @@ void Debug::NotifyMessageHandler(v8::DebugEvent event,
case v8::NewFunction:
case v8::BeforeCompile:
case v8::CompileError:
- case v8::PromiseEvent:
case v8::AsyncTaskEvent:
break;
case v8::Exception:
@@ -2099,7 +2016,7 @@ void Debug::NotifyMessageHandler(v8::DebugEvent event,
request_args, &maybe_exception);
if (maybe_result.ToHandle(&answer_value)) {
- if (answer_value->IsUndefined()) {
+ if (answer_value->IsUndefined(isolate_)) {
answer = isolate_->factory()->empty_string();
} else {
answer = Handle<String>::cast(answer_value);
@@ -2116,7 +2033,7 @@ void Debug::NotifyMessageHandler(v8::DebugEvent event,
isolate_, is_running, cmd_processor, 1, is_running_args);
Handle<Object> result;
if (!maybe_result.ToHandle(&result)) break;
- running = result->IsTrue();
+ running = result->IsTrue(isolate_);
} else {
Handle<Object> exception;
if (!maybe_exception.ToHandle(&exception)) break;
@@ -2150,7 +2067,7 @@ void Debug::SetEventListener(Handle<Object> callback,
event_listener_data_ = Handle<Object>();
// Set new entry.
- if (!callback->IsUndefined() && !callback->IsNull()) {
+ if (!callback->IsUndefined(isolate_) && !callback->IsNull(isolate_)) {
event_listener_ = global_handles->Create(*callback);
if (data.is_null()) data = isolate_->factory()->undefined_value();
event_listener_data_ = global_handles->Create(*data);
@@ -2295,7 +2212,7 @@ void Debug::PrintBreakLocation() {
JavaScriptFrameIterator iterator(isolate_);
if (iterator.done()) return;
JavaScriptFrame* frame = iterator.frame();
- FrameSummary summary = GetFirstFrameSummary(frame);
+ FrameSummary summary = FrameSummary::GetFirst(frame);
int source_position =
summary.abstract_code()->SourcePosition(summary.code_offset());
Handle<Object> script_obj(summary.function()->shared()->script(), isolate_);
@@ -2306,8 +2223,10 @@ void Debug::PrintBreakLocation() {
Handle<Script> script = Handle<Script>::cast(script_obj);
Handle<String> source(String::cast(script->source()));
Script::InitLineEnds(script);
- int line = Script::GetLineNumber(script, source_position);
- int column = Script::GetColumnNumber(script, source_position);
+ int line =
+ Script::GetLineNumber(script, source_position) - script->line_offset();
+ int column = Script::GetColumnNumber(script, source_position) -
+ (line == 0 ? script->column_offset() : 0);
Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
int line_start =
line == 0 ? 0 : Smi::cast(line_ends->get(line - 1))->value() + 1;
@@ -2342,12 +2261,14 @@ DebugScope::DebugScope(Debug* debug)
break_frame_id_ = debug_->break_frame_id();
return_value_ = debug_->return_value();
- // Create the new break info. If there is no JavaScript frames there is no
- // break frame id.
- JavaScriptFrameIterator it(isolate());
- bool has_js_frames = !it.done();
- debug_->thread_local_.break_frame_id_ = has_js_frames ? it.frame()->id()
- : StackFrame::NO_ID;
+ // Create the new break info. If there is no proper frames there is no break
+ // frame id.
+ StackTraceFrameIterator it(isolate());
+ bool has_frames = !it.done();
+ // We don't currently support breaking inside wasm framess.
+ DCHECK(!has_frames || !it.is_wasm());
+ debug_->thread_local_.break_frame_id_ =
+ has_frames ? it.frame()->id() : StackFrame::NO_ID;
debug_->SetNextBreakId();
debug_->UpdateState();
@@ -2482,6 +2403,16 @@ v8::Local<v8::String> MessageImpl::GetJSON() const {
}
}
+namespace {
+v8::Local<v8::Context> GetDebugEventContext(Isolate* isolate) {
+ Handle<Context> context = isolate->debug()->debugger_entry()->GetContext();
+ // Isolate::context() may have been NULL when "script collected" event
+ // occured.
+ if (context.is_null()) return v8::Local<v8::Context>();
+ Handle<Context> native_context(context->native_context());
+ return v8::Utils::ToLocal(native_context);
+}
+} // anonymous namespace
v8::Local<v8::Context> MessageImpl::GetEventContext() const {
Isolate* isolate = event_data_->GetIsolate();
@@ -2538,6 +2469,9 @@ v8::Debug::ClientData* EventDetailsImpl::GetClientData() const {
return client_data_;
}
+v8::Isolate* EventDetailsImpl::GetIsolate() const {
+ return reinterpret_cast<v8::Isolate*>(exec_state_->GetIsolate());
+}
CommandMessage::CommandMessage() : text_(Vector<uint16_t>::empty()),
client_data_(NULL) {
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index 35aa8db049..36f973c500 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -9,15 +9,15 @@
#include "src/arguments.h"
#include "src/assembler.h"
#include "src/base/atomicops.h"
+#include "src/base/hashmap.h"
#include "src/base/platform/platform.h"
#include "src/debug/liveedit.h"
#include "src/execution.h"
#include "src/factory.h"
#include "src/flags.h"
#include "src/frames.h"
-#include "src/hashmap.h"
-#include "src/interpreter/source-position-table.h"
#include "src/runtime/runtime.h"
+#include "src/source-position-table.h"
#include "src/string-stream.h"
#include "src/v8threads.h"
@@ -38,9 +38,10 @@ enum StepAction : int8_t {
StepNext = 1, // Step to the next statement in the current function.
StepIn = 2, // Step into new functions invoked or the next statement
// in the current function.
- StepFrame = 3 // Step into a new frame or return to previous frame.
-};
+ StepFrame = 3, // Step into a new frame or return to previous frame.
+ LastStepAction = StepFrame
+};
// Type of exception break. NOTE: These values are in macros.py as well.
enum ExceptionBreakType {
@@ -60,24 +61,23 @@ enum BreakPositionAlignment {
BREAK_POSITION_ALIGNED = 1
};
+enum DebugBreakType {
+ NOT_DEBUG_BREAK,
+ DEBUGGER_STATEMENT,
+ DEBUG_BREAK_SLOT,
+ DEBUG_BREAK_SLOT_AT_CALL,
+ DEBUG_BREAK_SLOT_AT_RETURN,
+ DEBUG_BREAK_SLOT_AT_TAIL_CALL,
+};
class BreakLocation {
public:
- // Find the break point at the supplied address, or the closest one before
- // the address.
- static BreakLocation FromCodeOffset(Handle<DebugInfo> debug_info, int offset);
-
static BreakLocation FromFrame(Handle<DebugInfo> debug_info,
JavaScriptFrame* frame);
- static void AllForStatementPosition(Handle<DebugInfo> debug_info,
- int statement_position,
- List<BreakLocation>* result_out);
-
- static BreakLocation FromPosition(Handle<DebugInfo> debug_info, int position,
- BreakPositionAlignment alignment);
-
- bool IsDebugBreak() const;
+ static void AllAtCurrentStatement(Handle<DebugInfo> debug_info,
+ JavaScriptFrame* frame,
+ List<BreakLocation>* result_out);
inline bool IsReturn() const { return type_ == DEBUG_BREAK_SLOT_AT_RETURN; }
inline bool IsCall() const { return type_ == DEBUG_BREAK_SLOT_AT_CALL; }
@@ -88,138 +88,137 @@ class BreakLocation {
inline bool IsDebuggerStatement() const {
return type_ == DEBUGGER_STATEMENT;
}
- inline bool HasBreakPoint() const {
- return debug_info_->HasBreakPoint(code_offset_);
+
+ bool HasBreakPoint(Handle<DebugInfo> debug_info) const;
+
+ inline int position() const { return position_; }
+
+ private:
+ BreakLocation(Handle<AbstractCode> abstract_code, DebugBreakType type,
+ int code_offset, int position)
+ : abstract_code_(abstract_code),
+ code_offset_(code_offset),
+ type_(type),
+ position_(position) {
+ DCHECK_NE(NOT_DEBUG_BREAK, type_);
}
- Handle<Object> BreakPointObjects() const;
+ static int BreakIndexFromCodeOffset(Handle<DebugInfo> debug_info,
+ Handle<AbstractCode> abstract_code,
+ int offset);
- void SetBreakPoint(Handle<Object> break_point_object);
- void ClearBreakPoint(Handle<Object> break_point_object);
+ void SetDebugBreak();
+ void ClearDebugBreak();
- void SetOneShot();
- void ClearOneShot();
+ Handle<AbstractCode> abstract_code_;
+ int code_offset_;
+ DebugBreakType type_;
+ int position_;
- inline int position() const { return position_; }
- inline int statement_position() const { return statement_position_; }
+ friend class CodeBreakIterator;
+ friend class BytecodeArrayBreakIterator;
+};
+
+class BreakIterator {
+ public:
+ static std::unique_ptr<BreakIterator> GetIterator(
+ Handle<DebugInfo> debug_info, Handle<AbstractCode> abstract_code,
+ BreakLocatorType type = ALL_BREAK_LOCATIONS);
+
+ virtual ~BreakIterator() {}
- inline int code_offset() const { return code_offset_; }
- inline Isolate* isolate() { return debug_info_->GetIsolate(); }
+ virtual BreakLocation GetBreakLocation() = 0;
+ virtual bool Done() const = 0;
+ virtual void Next() = 0;
- inline AbstractCode* abstract_code() const {
- return debug_info_->abstract_code();
+ void SkipTo(int count) {
+ while (count-- > 0) Next();
}
- protected:
- enum DebugBreakType {
- NOT_DEBUG_BREAK,
- DEBUGGER_STATEMENT,
- DEBUG_BREAK_SLOT,
- DEBUG_BREAK_SLOT_AT_CALL,
- DEBUG_BREAK_SLOT_AT_RETURN,
- DEBUG_BREAK_SLOT_AT_TAIL_CALL,
- };
+ virtual int code_offset() = 0;
+ int break_index() const { return break_index_; }
+ inline int position() const { return position_; }
+ inline int statement_position() const { return statement_position_; }
- BreakLocation(Handle<DebugInfo> debug_info, DebugBreakType type,
- int code_offset, int position, int statement_position);
+ virtual bool IsDebugBreak() = 0;
+ virtual void ClearDebugBreak() = 0;
+ virtual void SetDebugBreak() = 0;
- class Iterator {
- public:
- virtual ~Iterator() {}
+ protected:
+ explicit BreakIterator(Handle<DebugInfo> debug_info,
+ BreakLocatorType break_locator_type);
- virtual BreakLocation GetBreakLocation() = 0;
- virtual bool Done() const = 0;
- virtual void Next() = 0;
+ int BreakIndexFromPosition(int position, BreakPositionAlignment alignment);
- void SkipTo(int count) {
- while (count-- > 0) Next();
- }
+ Isolate* isolate() { return debug_info_->GetIsolate(); }
- virtual int code_offset() = 0;
- int break_index() const { return break_index_; }
- inline int position() const { return position_; }
- inline int statement_position() const { return statement_position_; }
+ Handle<DebugInfo> debug_info_;
+ int break_index_;
+ int position_;
+ int statement_position_;
+ BreakLocatorType break_locator_type_;
- protected:
- explicit Iterator(Handle<DebugInfo> debug_info);
- int ReturnPosition();
+ private:
+ DisallowHeapAllocation no_gc_;
+ DISALLOW_COPY_AND_ASSIGN(BreakIterator);
+};
- Isolate* isolate() { return debug_info_->GetIsolate(); }
+class CodeBreakIterator : public BreakIterator {
+ public:
+ CodeBreakIterator(Handle<DebugInfo> debug_info, BreakLocatorType type);
+ ~CodeBreakIterator() override {}
- Handle<DebugInfo> debug_info_;
- int break_index_;
- int position_;
- int statement_position_;
+ BreakLocation GetBreakLocation() override;
+ bool Done() const override { return reloc_iterator_.done(); }
+ void Next() override;
- private:
- DisallowHeapAllocation no_gc_;
- DISALLOW_COPY_AND_ASSIGN(Iterator);
- };
+ bool IsDebugBreak() override;
+ void ClearDebugBreak() override;
+ void SetDebugBreak() override;
- class CodeIterator : public Iterator {
- public:
- CodeIterator(Handle<DebugInfo> debug_info, BreakLocatorType type);
- ~CodeIterator() override {}
-
- BreakLocation GetBreakLocation() override;
- bool Done() const override { return reloc_iterator_.done(); }
- void Next() override;
-
- int code_offset() override {
- return static_cast<int>(
- rinfo()->pc() -
- debug_info_->abstract_code()->GetCode()->instruction_start());
- }
-
- private:
- int GetModeMask(BreakLocatorType type);
- RelocInfo::Mode rmode() { return reloc_iterator_.rinfo()->rmode(); }
- RelocInfo* rinfo() { return reloc_iterator_.rinfo(); }
-
- RelocIterator reloc_iterator_;
- DISALLOW_COPY_AND_ASSIGN(CodeIterator);
- };
+ void SkipToPosition(int position, BreakPositionAlignment alignment);
- class BytecodeArrayIterator : public Iterator {
- public:
- BytecodeArrayIterator(Handle<DebugInfo> debug_info, BreakLocatorType type);
- ~BytecodeArrayIterator() override {}
+ int code_offset() override {
+ return static_cast<int>(rinfo()->pc() -
+ debug_info_->DebugCode()->instruction_start());
+ }
- BreakLocation GetBreakLocation() override;
- bool Done() const override { return source_position_iterator_.done(); }
- void Next() override;
+ private:
+ int GetModeMask(BreakLocatorType type);
+ DebugBreakType GetDebugBreakType();
- int code_offset() override {
- return source_position_iterator_.bytecode_offset();
- }
+ RelocInfo::Mode rmode() { return reloc_iterator_.rinfo()->rmode(); }
+ RelocInfo* rinfo() { return reloc_iterator_.rinfo(); }
- private:
- DebugBreakType GetDebugBreakType();
+ RelocIterator reloc_iterator_;
+ SourcePositionTableIterator source_position_iterator_;
+ DISALLOW_COPY_AND_ASSIGN(CodeBreakIterator);
+};
- interpreter::SourcePositionTableIterator source_position_iterator_;
- BreakLocatorType break_locator_type_;
- int start_position_;
- DISALLOW_COPY_AND_ASSIGN(BytecodeArrayIterator);
- };
+class BytecodeArrayBreakIterator : public BreakIterator {
+ public:
+ BytecodeArrayBreakIterator(Handle<DebugInfo> debug_info,
+ BreakLocatorType type);
+ ~BytecodeArrayBreakIterator() override {}
- static Iterator* GetIterator(Handle<DebugInfo> debug_info,
- BreakLocatorType type = ALL_BREAK_LOCATIONS);
+ BreakLocation GetBreakLocation() override;
+ bool Done() const override { return source_position_iterator_.done(); }
+ void Next() override;
- private:
- friend class Debug;
+ bool IsDebugBreak() override;
+ void ClearDebugBreak() override;
+ void SetDebugBreak() override;
- static int BreakIndexFromCodeOffset(Handle<DebugInfo> debug_info, int offset);
+ void SkipToPosition(int position, BreakPositionAlignment alignment);
- void SetDebugBreak();
- void ClearDebugBreak();
+ int code_offset() override { return source_position_iterator_.code_offset(); }
- Handle<DebugInfo> debug_info_;
- int code_offset_;
- DebugBreakType type_;
- int position_;
- int statement_position_;
-};
+ private:
+ DebugBreakType GetDebugBreakType();
+ SourcePositionTableIterator source_position_iterator_;
+ DISALLOW_COPY_AND_ASSIGN(BytecodeArrayBreakIterator);
+};
// Linked list holding debug info objects. The debug info objects are kept as
// weak handles to avoid a debug info object to keep a function alive.
@@ -241,7 +240,6 @@ class DebugInfoListNode {
};
-
// Message delivered to the message handler callback. This is either a debugger
// event or the response to a command.
class MessageImpl: public v8::Debug::Message {
@@ -305,6 +303,8 @@ class EventDetailsImpl : public v8::Debug::EventDetails {
virtual v8::Local<v8::Context> GetEventContext() const;
virtual v8::Local<v8::Value> GetCallbackData() const;
virtual v8::Debug::ClientData* GetClientData() const;
+ virtual v8::Isolate* GetIsolate() const;
+
private:
DebugEvent event_; // Debug event causing the break.
Handle<JSObject> exec_state_; // Current execution state.
@@ -417,7 +417,6 @@ class Debug {
void OnCompileError(Handle<Script> script);
void OnBeforeCompile(Handle<Script> script);
void OnAfterCompile(Handle<Script> script);
- void OnPromiseEvent(Handle<JSObject> data);
void OnAsyncTaskEvent(Handle<JSObject> data);
// API facing.
@@ -448,22 +447,21 @@ class Debug {
int* source_position,
BreakPositionAlignment alignment);
void ClearBreakPoint(Handle<Object> break_point_object);
- void ClearAllBreakPoints();
- void FloodWithOneShot(Handle<JSFunction> function,
- BreakLocatorType type = ALL_BREAK_LOCATIONS);
void ChangeBreakOnException(ExceptionBreakType type, bool enable);
bool IsBreakOnException(ExceptionBreakType type);
// Stepping handling.
void PrepareStep(StepAction step_action);
void PrepareStepIn(Handle<JSFunction> function);
+ void PrepareStepInSuspendedGenerator();
void PrepareStepOnThrow();
void ClearStepping();
void ClearStepOut();
- void EnableStepIn();
bool PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared);
+ void RecordAsyncFunction(Handle<JSGeneratorObject> generator_object);
+
// Returns whether the operation succeeded. Compilation can only be triggered
// if a valid closure is passed as the second argument, otherwise the shared
// function needs to be compiled already.
@@ -498,9 +496,7 @@ class Debug {
char* RestoreDebug(char* from);
static int ArchiveSpacePerThread();
void FreeThreadResources() { }
-
- // Record function from which eval was called.
- static void RecordEvalCaller(Handle<Script> script);
+ void Iterate(ObjectVisitor* v);
bool CheckExecutionState(int id) {
return is_active() && !debug_context().is_null() && break_id() != 0 &&
@@ -544,8 +540,12 @@ class Debug {
return reinterpret_cast<Address>(&after_break_target_);
}
- Address step_in_enabled_address() {
- return reinterpret_cast<Address>(&thread_local_.step_in_enabled_);
+ Address last_step_action_address() {
+ return reinterpret_cast<Address>(&thread_local_.last_step_action_);
+ }
+
+ Address suspended_generator_address() {
+ return reinterpret_cast<Address>(&thread_local_.suspended_generator_);
}
StepAction last_step_action() { return thread_local_.last_step_action_; }
@@ -568,6 +568,14 @@ class Debug {
return break_disabled_ || in_debug_event_listener_;
}
+ void clear_suspended_generator() {
+ thread_local_.suspended_generator_ = Smi::FromInt(0);
+ }
+
+ bool has_suspended_generator() const {
+ return thread_local_.suspended_generator_ != Smi::FromInt(0);
+ }
+
void OnException(Handle<Object> exception, Handle<Object> promise);
// Constructors for debug event objects.
@@ -580,8 +588,6 @@ class Debug {
Handle<Object> promise);
MUST_USE_RESULT MaybeHandle<Object> MakeCompileEvent(
Handle<Script> script, v8::DebugEvent type);
- MUST_USE_RESULT MaybeHandle<Object> MakePromiseEvent(
- Handle<JSObject> promise_event);
MUST_USE_RESULT MaybeHandle<Object> MakeAsyncTaskEvent(
Handle<JSObject> task_event);
@@ -605,10 +611,25 @@ class Debug {
bool auto_continue);
void InvokeMessageHandler(MessageImpl message);
+ // Find the closest source position for a break point for a given position.
+ int FindBreakablePosition(Handle<DebugInfo> debug_info, int source_position,
+ BreakPositionAlignment alignment);
+ // Instrument code to break at break points.
+ void ApplyBreakPoints(Handle<DebugInfo> debug_info);
+ // Clear code from instrumentation.
+ void ClearBreakPoints(Handle<DebugInfo> debug_info);
+ // Clear all code from instrumentation.
+ void ClearAllBreakPoints();
+ // Instrument a function with one-shots.
+ void FloodWithOneShot(Handle<JSFunction> function,
+ BreakLocatorType type = ALL_BREAK_LOCATIONS);
+ // Clear all one-shot instrumentations, but restore break points.
void ClearOneShot();
+
void ActivateStepOut(StackFrame* frame);
void RemoveDebugInfoAndClearFromShared(Handle<DebugInfo> debug_info);
- Handle<Object> CheckBreakPoints(BreakLocation* location,
+ Handle<Object> CheckBreakPoints(Handle<DebugInfo> debug_info,
+ BreakLocation* location,
bool* has_break_points = nullptr);
bool IsMutedAtCurrentLocation(JavaScriptFrame* frame);
bool CheckBreakPoint(Handle<Object> break_point_object);
@@ -681,11 +702,6 @@ class Debug {
// Frame pointer of the target frame we want to arrive at.
Address target_fp_;
- // Whether functions are flooded on entry for step-in and step-frame.
- // If we stepped out to the embedder, disable flooding to spill stepping
- // to the next call that the embedder makes.
- bool step_in_enabled_;
-
// Stores the way how LiveEdit has patched the stack. It is used when
// debugger returns control back to user script.
LiveEdit::FrameDropMode frame_drop_mode_;
@@ -693,6 +709,8 @@ class Debug {
// Value of accumulator in interpreter frames. In non-interpreter frames
// this value will be the hole.
Handle<Object> return_value_;
+
+ Object* suspended_generator_;
};
// Storage location for registers when handling debug break calls
diff --git a/deps/v8/src/debug/debug.js b/deps/v8/src/debug/debug.js
index 6849bf5345..b2111eb8d7 100644
--- a/deps/v8/src/debug/debug.js
+++ b/deps/v8/src/debug/debug.js
@@ -15,8 +15,6 @@ var IsNaN = global.isNaN;
var JSONParse = global.JSON.parse;
var JSONStringify = global.JSON.stringify;
var LookupMirror = global.LookupMirror;
-var MakeError;
-var MakeTypeError;
var MakeMirror = global.MakeMirror;
var MakeMirrorSerializer = global.MakeMirrorSerializer;
var MathMin = global.Math.min;
@@ -26,8 +24,6 @@ var ParseInt = global.parseInt;
var ValueMirror = global.ValueMirror;
utils.Import(function(from) {
- MakeError = from.MakeError;
- MakeTypeError = from.MakeTypeError;
MirrorType = from.MirrorType;
});
@@ -51,8 +47,7 @@ Debug.DebugEvent = { Break: 1,
BeforeCompile: 4,
AfterCompile: 5,
CompileError: 6,
- PromiseEvent: 7,
- AsyncTaskEvent: 8 };
+ AsyncTaskEvent: 7 };
// Types of exceptions that can be broken upon.
Debug.ExceptionBreak = { Caught : 0,
@@ -67,7 +62,8 @@ Debug.StepAction = { StepOut: 0,
// The different types of scripts matching enum ScriptType in objects.h.
Debug.ScriptType = { Native: 0,
Extension: 1,
- Normal: 2 };
+ Normal: 2,
+ Wasm: 3};
// The different types of script compilations matching enum
// Script::CompilationType in objects.h.
@@ -247,7 +243,7 @@ function ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
} else if (type == Debug.ScriptBreakPointType.ScriptRegExp) {
this.script_regexp_object_ = new GlobalRegExp(script_id_or_name);
} else {
- throw MakeError(kDebugger, "Unexpected breakpoint type " + type);
+ throw %make_error(kDebugger, "Unexpected breakpoint type " + type);
}
this.line_ = opt_line || 0;
this.column_ = opt_column;
@@ -362,7 +358,7 @@ ScriptBreakPoint.prototype.matchesScript = function(script) {
} else {
// We might want to account columns here as well.
if (!(script.line_offset <= this.line_ &&
- this.line_ < script.line_offset + script.lineCount())) {
+ this.line_ < script.line_offset + %ScriptLineCount(script))) {
return false;
}
if (this.type_ == Debug.ScriptBreakPointType.ScriptName) {
@@ -370,7 +366,7 @@ ScriptBreakPoint.prototype.matchesScript = function(script) {
} else if (this.type_ == Debug.ScriptBreakPointType.ScriptRegExp) {
return this.script_regexp_object_.test(script.nameOrSourceURL());
} else {
- throw MakeError(kDebugger, "Unexpected breakpoint type " + this.type_);
+ throw %make_error(kDebugger, "Unexpected breakpoint type " + this.type_);
}
}
};
@@ -384,11 +380,11 @@ ScriptBreakPoint.prototype.set = function (script) {
// first piece of breakable code on the line try to find the column on the
// line which contains some source.
if (IS_UNDEFINED(column)) {
- var source_line = script.sourceLine(this.line());
+ var source_line = %ScriptSourceLine(script, line || script.line_offset);
// Allocate array for caching the columns where the actual source starts.
if (!script.sourceColumnStart_) {
- script.sourceColumnStart_ = new GlobalArray(script.lineCount());
+ script.sourceColumnStart_ = new GlobalArray(%ScriptLineCount(script));
}
// Fill cache if needed and get column where the actual source starts.
@@ -466,19 +462,12 @@ function GetScriptBreakPoints(script) {
Debug.setListener = function(listener, opt_data) {
if (!IS_FUNCTION(listener) && !IS_UNDEFINED(listener) && !IS_NULL(listener)) {
- throw MakeTypeError(kDebuggerType);
+ throw %make_type_error(kDebuggerType);
}
%SetDebugEventListener(listener, opt_data);
};
-Debug.breakLocations = function(f, opt_position_aligment) {
- if (!IS_FUNCTION(f)) throw MakeTypeError(kDebuggerType);
- var position_aligment = IS_UNDEFINED(opt_position_aligment)
- ? Debug.BreakPositionAlignment.Statement : opt_position_aligment;
- return %GetBreakLocations(f, position_aligment);
-};
-
// Returns a Script object. If the parameter is a function the return value
// is the script in which the function is defined. If the parameter is a string
// the return value is the script for which the script name has that string
@@ -488,7 +477,7 @@ Debug.findScript = function(func_or_script_name) {
if (IS_FUNCTION(func_or_script_name)) {
return %FunctionGetScript(func_or_script_name);
} else if (IS_REGEXP(func_or_script_name)) {
- var scripts = Debug.scripts();
+ var scripts = this.scripts();
var last_result = null;
var result_count = 0;
for (var i in scripts) {
@@ -523,13 +512,13 @@ Debug.scriptSource = function(func_or_script_name) {
Debug.source = function(f) {
- if (!IS_FUNCTION(f)) throw MakeTypeError(kDebuggerType);
+ if (!IS_FUNCTION(f)) throw %make_type_error(kDebuggerType);
return %FunctionGetSourceCode(f);
};
Debug.sourcePosition = function(f) {
- if (!IS_FUNCTION(f)) throw MakeTypeError(kDebuggerType);
+ if (!IS_FUNCTION(f)) throw %make_type_error(kDebuggerType);
return %FunctionGetScriptSourcePosition(f);
};
@@ -537,14 +526,14 @@ Debug.sourcePosition = function(f) {
Debug.findFunctionSourceLocation = function(func, opt_line, opt_column) {
var script = %FunctionGetScript(func);
var script_offset = %FunctionGetScriptSourcePosition(func);
- return script.locationFromLine(opt_line, opt_column, script_offset);
+ return %ScriptLocationFromLine(script, opt_line, opt_column, script_offset);
};
// Returns the character position in a script based on a line number and an
// optional position within that line.
Debug.findScriptSourcePosition = function(script, opt_line, opt_column) {
- var location = script.locationFromLine(opt_line, opt_column);
+ var location = %ScriptLocationFromLine(script, opt_line, opt_column, 0);
return location ? location.position : null;
};
@@ -583,26 +572,23 @@ Debug.findBreakPointActualLocations = function(break_point_number) {
};
Debug.setBreakPoint = function(func, opt_line, opt_column, opt_condition) {
- if (!IS_FUNCTION(func)) throw MakeTypeError(kDebuggerType);
+ if (!IS_FUNCTION(func)) throw %make_type_error(kDebuggerType);
// Break points in API functions are not supported.
if (%FunctionIsAPIFunction(func)) {
- throw MakeError(kDebugger, 'Cannot set break point in native code.');
+ throw %make_error(kDebugger, 'Cannot set break point in native code.');
}
- // Find source position relative to start of the function
- var break_position =
+ // Find source position.
+ var source_position =
this.findFunctionSourceLocation(func, opt_line, opt_column).position;
- var source_position = break_position - this.sourcePosition(func);
// Find the script for the function.
var script = %FunctionGetScript(func);
// Break in builtin JavaScript code is not supported.
if (script.type == Debug.ScriptType.Native) {
- throw MakeError(kDebugger, 'Cannot set break point in native code.');
+ throw %make_error(kDebugger, 'Cannot set break point in native code.');
}
// If the script for the function has a name convert this to a script break
// point.
if (script && script.id) {
- // Adjust the source position to be script relative.
- source_position += %FunctionGetScriptSourcePosition(func);
// Find line and column for the position in the script and set a script
// break point from that.
var location = script.locationFromPosition(source_position, false);
@@ -614,7 +600,6 @@ Debug.setBreakPoint = function(func, opt_line, opt_column, opt_condition) {
var break_point = MakeBreakPoint(source_position);
var actual_position =
%SetFunctionBreakPoint(func, source_position, break_point);
- actual_position += this.sourcePosition(func);
var actual_location = script.locationFromPosition(actual_position, true);
break_point.actual_location = { line: actual_location.line,
column: actual_location.column,
@@ -634,15 +619,12 @@ Debug.setBreakPointByScriptIdAndPosition = function(script_id, position,
if (!enabled) {
break_point.disable();
}
- var scripts = this.scripts();
- var position_alignment = IS_UNDEFINED(opt_position_alignment)
- ? Debug.BreakPositionAlignment.Statement : opt_position_alignment;
- for (var i = 0; i < scripts.length; i++) {
- if (script_id == scripts[i].id) {
- break_point.actual_position = %SetScriptBreakPoint(scripts[i], position,
- position_alignment, break_point);
- break;
- }
+ var script = scriptById(script_id);
+ if (script) {
+ var position_alignment = IS_UNDEFINED(opt_position_alignment)
+ ? Debug.BreakPositionAlignment.Statement : opt_position_alignment;
+ break_point.actual_position = %SetScriptBreakPoint(script, position,
+ position_alignment, break_point);
}
return break_point;
};
@@ -678,7 +660,7 @@ Debug.clearBreakPoint = function(break_point_number) {
return %ClearBreakPoint(break_point);
} else {
break_point = this.findScriptBreakPoint(break_point_number, true);
- if (!break_point) throw MakeError(kDebugger, 'Invalid breakpoint');
+ if (!break_point) throw %make_error(kDebugger, 'Invalid breakpoint');
}
};
@@ -830,10 +812,12 @@ Debug.isBreakOnUncaughtException = function() {
};
Debug.showBreakPoints = function(f, full, opt_position_alignment) {
- if (!IS_FUNCTION(f)) throw MakeError(kDebuggerType);
+ if (!IS_FUNCTION(f)) throw %make_error(kDebuggerType);
var source = full ? this.scriptSource(f) : this.source(f);
- var offset = full ? this.sourcePosition(f) : 0;
- var locations = this.breakLocations(f, opt_position_alignment);
+ var offset = full ? 0 : this.sourcePosition(f);
+ var position_alignment = IS_UNDEFINED(opt_position_alignment)
+ ? Debug.BreakPositionAlignment.Statement : opt_position_alignment;
+ var locations = %GetBreakLocations(f, position_alignment);
if (!locations) return source;
locations.sort(function(x, y) { return x - y; });
var result = "";
@@ -859,10 +843,31 @@ Debug.scripts = function() {
};
+// Get a specific script currently loaded. This is based on scanning the heap.
+// TODO(clemensh): Create a runtime function for this.
+function scriptById(scriptId) {
+ var scripts = Debug.scripts();
+ for (var script of scripts) {
+ if (script.id == scriptId) return script;
+ }
+ return UNDEFINED;
+};
+
+
Debug.debuggerFlags = function() {
return debugger_flags;
};
+Debug.getWasmFunctionOffsetTable = function(scriptId) {
+ var script = scriptById(scriptId);
+ return script ? %GetWasmFunctionOffsetTable(script) : UNDEFINED;
+}
+
+Debug.disassembleWasmFunction = function(scriptId) {
+ var script = scriptById(scriptId);
+ return script ? %DisassembleWasmFunction(script) : UNDEFINED;
+}
+
Debug.MakeMirror = MakeMirror;
function MakeExecutionState(break_id) {
@@ -881,7 +886,7 @@ ExecutionState.prototype.prepareStep = function(action) {
action === Debug.StepAction.StepFrame) {
return %PrepareStep(this.break_id, action);
}
- throw MakeTypeError(kDebuggerType);
+ throw %make_type_error(kDebuggerType);
};
ExecutionState.prototype.evaluateGlobal = function(source, disable_break,
@@ -895,15 +900,11 @@ ExecutionState.prototype.frameCount = function() {
return %GetFrameCount(this.break_id);
};
-ExecutionState.prototype.threadCount = function() {
- return %GetThreadCount(this.break_id);
-};
-
ExecutionState.prototype.frame = function(opt_index) {
// If no index supplied return the selected frame.
if (opt_index == null) opt_index = this.selected_frame;
if (opt_index < 0 || opt_index >= this.frameCount()) {
- throw MakeTypeError(kDebuggerFrame);
+ throw %make_type_error(kDebuggerFrame);
}
return new FrameMirror(this.break_id, opt_index);
};
@@ -911,7 +912,7 @@ ExecutionState.prototype.frame = function(opt_index) {
ExecutionState.prototype.setSelectedFrame = function(index) {
var i = TO_NUMBER(index);
if (i < 0 || i >= this.frameCount()) {
- throw MakeTypeError(kDebuggerFrame);
+ throw %make_type_error(kDebuggerFrame);
}
this.selected_frame = i;
};
@@ -1141,39 +1142,6 @@ function MakeScriptObject_(script, include_source) {
}
-function MakePromiseEvent(event_data) {
- return new PromiseEvent(event_data);
-}
-
-
-function PromiseEvent(event_data) {
- this.promise_ = event_data.promise;
- this.parentPromise_ = event_data.parentPromise;
- this.status_ = event_data.status;
- this.value_ = event_data.value;
-}
-
-
-PromiseEvent.prototype.promise = function() {
- return MakeMirror(this.promise_);
-}
-
-
-PromiseEvent.prototype.parentPromise = function() {
- return MakeMirror(this.parentPromise_);
-}
-
-
-PromiseEvent.prototype.status = function() {
- return this.status_;
-}
-
-
-PromiseEvent.prototype.value = function() {
- return MakeMirror(this.value_);
-}
-
-
function MakeAsyncTaskEvent(event_data) {
return new AsyncTaskEvent(event_data);
}
@@ -1318,16 +1286,16 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(
response = this.createResponse(request);
if (!request.type) {
- throw MakeError(kDebugger, 'Type not specified');
+ throw %make_error(kDebugger, 'Type not specified');
}
if (request.type != 'request') {
- throw MakeError(kDebugger,
+ throw %make_error(kDebugger,
"Illegal type '" + request.type + "' in request");
}
if (!request.command) {
- throw MakeError(kDebugger, 'Command not specified');
+ throw %make_error(kDebugger, 'Command not specified');
}
if (request.arguments) {
@@ -1347,7 +1315,7 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(
if (IS_FUNCTION(handler)) {
%_Call(handler, this, request, response);
} else {
- throw MakeError(kDebugger,
+ throw %make_error(kDebugger,
'Unknown command "' + request.command + '" in request');
}
} catch (e) {
@@ -1399,7 +1367,7 @@ DebugCommandProcessor.prototype.continueRequest_ = function(request, response) {
} else if (stepaction == 'out') {
action = Debug.StepAction.StepOut;
} else {
- throw MakeError(kDebugger,
+ throw %make_error(kDebugger,
'Invalid stepaction argument "' + stepaction + '".');
}
}
@@ -1519,7 +1487,7 @@ DebugCommandProcessor.prototype.setBreakPointRequest_ =
response.body.type = 'scriptRegExp';
response.body.script_regexp = break_point.script_regexp_object().source;
} else {
- throw MakeError(kDebugger,
+ throw %make_error(kDebugger,
"Unexpected breakpoint type: " + break_point.type());
}
response.body.line = break_point.line();
@@ -1653,7 +1621,7 @@ DebugCommandProcessor.prototype.listBreakpointsRequest_ = function(
description.type = 'scriptRegExp';
description.script_regexp = break_point.script_regexp_object().source;
} else {
- throw MakeError(kDebugger,
+ throw %make_error(kDebugger,
"Unexpected breakpoint type: " + break_point.type());
}
array.push(description);
@@ -1802,7 +1770,7 @@ DebugCommandProcessor.prototype.resolveFrameFromScopeDescription_ =
if (scope_description && !IS_UNDEFINED(scope_description.frameNumber)) {
var frame_index = scope_description.frameNumber;
if (frame_index < 0 || this.exec_state_.frameCount() <= frame_index) {
- throw MakeTypeError(kDebuggerFrame);
+ throw %make_type_error(kDebuggerFrame);
}
return this.exec_state_.frame(frame_index);
} else {
@@ -1818,21 +1786,21 @@ DebugCommandProcessor.prototype.resolveScopeHolder_ =
function(scope_description) {
if (scope_description && "functionHandle" in scope_description) {
if (!IS_NUMBER(scope_description.functionHandle)) {
- throw MakeError(kDebugger, 'Function handle must be a number');
+ throw %make_error(kDebugger, 'Function handle must be a number');
}
var function_mirror = LookupMirror(scope_description.functionHandle);
if (!function_mirror) {
- throw MakeError(kDebugger, 'Failed to find function object by handle');
+ throw %make_error(kDebugger, 'Failed to find function object by handle');
}
if (!function_mirror.isFunction()) {
- throw MakeError(kDebugger,
+ throw %make_error(kDebugger,
'Value of non-function type is found by handle');
}
return function_mirror;
} else {
// No frames no scopes.
if (this.exec_state_.frameCount() == 0) {
- throw MakeError(kDebugger, 'No scopes');
+ throw %make_error(kDebugger, 'No scopes');
}
// Get the frame for which the scopes are requested.
@@ -1885,7 +1853,7 @@ DebugCommandProcessor.resolveValue_ = function(value_description) {
if ("handle" in value_description) {
var value_mirror = LookupMirror(value_description.handle);
if (!value_mirror) {
- throw MakeError(kDebugger, "Failed to resolve value by handle, ' #" +
+ throw %make_error(kDebugger, "Failed to resolve value by handle, ' #" +
value_description.handle + "# not found");
}
return value_mirror.value();
@@ -1897,7 +1865,7 @@ DebugCommandProcessor.resolveValue_ = function(value_description) {
} if (value_description.type == MirrorType.STRING_TYPE) {
return TO_STRING(value_description.stringDescription);
} else {
- throw MakeError(kDebugger, "Unknown type");
+ throw %make_error(kDebugger, "Unknown type");
}
} else if ("value" in value_description) {
return value_description.value;
@@ -1906,7 +1874,7 @@ DebugCommandProcessor.resolveValue_ = function(value_description) {
} else if (value_description.type == MirrorType.NULL_TYPE) {
return null;
} else {
- throw MakeError(kDebugger, "Failed to parse value description");
+ throw %make_error(kDebugger, "Failed to parse value description");
}
};
@@ -2123,18 +2091,34 @@ DebugCommandProcessor.prototype.sourceRequest_ = function(request, response) {
return response.failed('No source');
}
- // Get the source slice and fill it into the response.
- var slice = script.sourceSlice(from_line, to_line);
- if (!slice) {
+ var raw_script = script.value();
+
+ // Sanitize arguments and remove line offset.
+ var line_offset = raw_script.line_offset;
+ var line_count = %ScriptLineCount(raw_script);
+ from_line = IS_UNDEFINED(from_line) ? 0 : from_line - line_offset;
+ to_line = IS_UNDEFINED(to_line) ? line_count : to_line - line_offset;
+
+ if (from_line < 0) from_line = 0;
+ if (to_line > line_count) to_line = line_count;
+
+ if (from_line >= line_count || to_line < 0 || from_line > to_line) {
return response.failed('Invalid line interval');
}
+
+ // Fill in the response.
+
response.body = {};
- response.body.source = slice.sourceText();
- response.body.fromLine = slice.from_line;
- response.body.toLine = slice.to_line;
- response.body.fromPosition = slice.from_position;
- response.body.toPosition = slice.to_position;
- response.body.totalLines = script.lineCount();
+ response.body.fromLine = from_line + line_offset;
+ response.body.toLine = to_line + line_offset;
+ response.body.fromPosition = %ScriptLineStartPosition(raw_script, from_line);
+ response.body.toPosition =
+ (to_line == 0) ? 0 : %ScriptLineEndPosition(raw_script, to_line - 1);
+ response.body.totalLines = %ScriptLineCount(raw_script);
+
+ response.body.source = %_SubString(raw_script.source,
+ response.body.fromPosition,
+ response.body.toPosition);
};
@@ -2177,7 +2161,7 @@ DebugCommandProcessor.prototype.scriptsRequest_ = function(request, response) {
}
// Collect all scripts in the heap.
- var scripts = %DebugGetLoadedScripts();
+ var scripts = Debug.scripts();
response.body = [];
@@ -2207,28 +2191,6 @@ DebugCommandProcessor.prototype.scriptsRequest_ = function(request, response) {
};
-DebugCommandProcessor.prototype.threadsRequest_ = function(request, response) {
- // Get the number of threads.
- var total_threads = this.exec_state_.threadCount();
-
- // Get information for all threads.
- var threads = [];
- for (var i = 0; i < total_threads; i++) {
- var details = %GetThreadDetails(this.exec_state_.break_id, i);
- var thread_info = { current: details[0],
- id: details[1]
- };
- threads.push(thread_info);
- }
-
- // Create the response body.
- response.body = {
- totalThreads: total_threads,
- threads: threads
- };
-};
-
-
DebugCommandProcessor.prototype.suspendRequest_ = function(request, response) {
response.running = false;
};
@@ -2249,14 +2211,7 @@ DebugCommandProcessor.prototype.changeLiveRequest_ = function(
var script_id = request.arguments.script_id;
var preview_only = !!request.arguments.preview_only;
- var scripts = %DebugGetLoadedScripts();
-
- var the_script = null;
- for (var i = 0; i < scripts.length; i++) {
- if (scripts[i].id == script_id) {
- the_script = scripts[i];
- }
- }
+ var the_script = scriptById(script_id);
if (!the_script) {
response.failed('Script not found');
return;
@@ -2394,7 +2349,6 @@ DebugCommandProcessor.prototype.dispatch_ = (function() {
"references": proto.referencesRequest_,
"source": proto.sourceRequest_,
"scripts": proto.scriptsRequest_,
- "threads": proto.threadsRequest_,
"suspend": proto.suspendRequest_,
"version": proto.versionRequest_,
"changelive": proto.changeLiveRequest_,
@@ -2517,7 +2471,6 @@ utils.InstallFunctions(utils, DONT_ENUM, [
"MakeExceptionEvent", MakeExceptionEvent,
"MakeBreakEvent", MakeBreakEvent,
"MakeCompileEvent", MakeCompileEvent,
- "MakePromiseEvent", MakePromiseEvent,
"MakeAsyncTaskEvent", MakeAsyncTaskEvent,
"IsBreakPointTriggered", IsBreakPointTriggered,
"UpdateScriptBreakPoints", UpdateScriptBreakPoints,
diff --git a/deps/v8/src/debug/ia32/debug-ia32.cc b/deps/v8/src/debug/ia32/debug-ia32.cc
index 056407f29a..8e4dee7797 100644
--- a/deps/v8/src/debug/ia32/debug-ia32.cc
+++ b/deps/v8/src/debug/ia32/debug-ia32.cc
@@ -38,7 +38,7 @@ void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
Handle<Code> code) {
- DCHECK_EQ(Code::BUILTIN, code->kind());
+ DCHECK(code->is_debug_stub());
static const int kSize = Assembler::kDebugBreakSlotLength;
CodePatcher patcher(isolate, pc, kSize);
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index 78ed6f157a..b51bb1a1c4 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -4,7 +4,6 @@
#include "src/debug/liveedit.h"
-#include "src/ast/scopeinfo.h"
#include "src/ast/scopes.h"
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
@@ -16,6 +15,7 @@
#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/parsing/parser.h"
+#include "src/source-position-table.h"
#include "src/v8.h"
#include "src/v8memory.h"
@@ -620,54 +620,20 @@ void FunctionInfoWrapper::SetInitialProperties(Handle<String> name,
this->SetSmiValueField(kParentIndexOffset_, parent_index);
}
-
-void FunctionInfoWrapper::SetFunctionCode(Handle<Code> function_code,
- Handle<HeapObject> code_scope_info) {
- Handle<JSValue> code_wrapper = WrapInJSValue(function_code);
- this->SetField(kCodeOffset_, code_wrapper);
-
- Handle<JSValue> scope_wrapper = WrapInJSValue(code_scope_info);
- this->SetField(kCodeScopeInfoOffset_, scope_wrapper);
-}
-
-
void FunctionInfoWrapper::SetSharedFunctionInfo(
Handle<SharedFunctionInfo> info) {
Handle<JSValue> info_holder = WrapInJSValue(info);
this->SetField(kSharedFunctionInfoOffset_, info_holder);
}
-
-Handle<Code> FunctionInfoWrapper::GetFunctionCode() {
- Handle<Object> element = this->GetField(kCodeOffset_);
+Handle<SharedFunctionInfo> FunctionInfoWrapper::GetSharedFunctionInfo() {
+ Handle<Object> element = this->GetField(kSharedFunctionInfoOffset_);
Handle<JSValue> value_wrapper = Handle<JSValue>::cast(element);
Handle<Object> raw_result = UnwrapJSValue(value_wrapper);
- CHECK(raw_result->IsCode());
- return Handle<Code>::cast(raw_result);
+ CHECK(raw_result->IsSharedFunctionInfo());
+ return Handle<SharedFunctionInfo>::cast(raw_result);
}
-
-MaybeHandle<TypeFeedbackVector> FunctionInfoWrapper::GetFeedbackVector() {
- Handle<Object> element = this->GetField(kSharedFunctionInfoOffset_);
- if (element->IsJSValue()) {
- Handle<JSValue> value_wrapper = Handle<JSValue>::cast(element);
- Handle<Object> raw_result = UnwrapJSValue(value_wrapper);
- Handle<SharedFunctionInfo> shared =
- Handle<SharedFunctionInfo>::cast(raw_result);
- return Handle<TypeFeedbackVector>(shared->feedback_vector(), isolate());
- } else {
- // Scripts may never have a SharedFunctionInfo created.
- return MaybeHandle<TypeFeedbackVector>();
- }
-}
-
-
-Handle<Object> FunctionInfoWrapper::GetCodeScopeInfo() {
- Handle<Object> element = this->GetField(kCodeScopeInfoOffset_);
- return UnwrapJSValue(Handle<JSValue>::cast(element));
-}
-
-
void SharedInfoWrapper::SetProperties(Handle<String> name,
int start_position,
int end_position,
@@ -688,115 +654,6 @@ Handle<SharedFunctionInfo> SharedInfoWrapper::GetInfo() {
}
-class FunctionInfoListener {
- public:
- explicit FunctionInfoListener(Isolate* isolate) {
- current_parent_index_ = -1;
- len_ = 0;
- result_ = isolate->factory()->NewJSArray(10);
- }
-
- void FunctionStarted(FunctionLiteral* fun) {
- HandleScope scope(isolate());
- FunctionInfoWrapper info = FunctionInfoWrapper::Create(isolate());
- info.SetInitialProperties(fun->name(), fun->start_position(),
- fun->end_position(), fun->parameter_count(),
- fun->materialized_literal_count(),
- current_parent_index_);
- current_parent_index_ = len_;
- SetElementSloppy(result_, len_, info.GetJSArray());
- len_++;
- }
-
- void FunctionDone() {
- HandleScope scope(isolate());
- FunctionInfoWrapper info = FunctionInfoWrapper::cast(
- *JSReceiver::GetElement(isolate(), result_, current_parent_index_)
- .ToHandleChecked());
- current_parent_index_ = info.GetParentIndex();
- }
-
- // Saves only function code, because for a script function we
- // may never create a SharedFunctionInfo object.
- void FunctionCode(Handle<Code> function_code) {
- FunctionInfoWrapper info = FunctionInfoWrapper::cast(
- *JSReceiver::GetElement(isolate(), result_, current_parent_index_)
- .ToHandleChecked());
- info.SetFunctionCode(function_code,
- Handle<HeapObject>(isolate()->heap()->null_value()));
- }
-
- // Saves full information about a function: its code, its scope info
- // and a SharedFunctionInfo object.
- void FunctionInfo(Handle<SharedFunctionInfo> shared, Scope* scope,
- Zone* zone) {
- if (!shared->IsSharedFunctionInfo()) {
- return;
- }
- FunctionInfoWrapper info = FunctionInfoWrapper::cast(
- *JSReceiver::GetElement(isolate(), result_, current_parent_index_)
- .ToHandleChecked());
- info.SetFunctionCode(Handle<Code>(shared->code()),
- Handle<HeapObject>(shared->scope_info()));
- info.SetSharedFunctionInfo(shared);
-
- Handle<Object> scope_info_list = SerializeFunctionScope(scope, zone);
- info.SetFunctionScopeInfo(scope_info_list);
- }
-
- Handle<JSArray> GetResult() { return result_; }
-
- private:
- Isolate* isolate() const { return result_->GetIsolate(); }
-
- Handle<Object> SerializeFunctionScope(Scope* scope, Zone* zone) {
- Handle<JSArray> scope_info_list = isolate()->factory()->NewJSArray(10);
- int scope_info_length = 0;
-
- // Saves some description of scope. It stores name and indexes of
- // variables in the whole scope chain. Null-named slots delimit
- // scopes of this chain.
- Scope* current_scope = scope;
- while (current_scope != NULL) {
- HandleScope handle_scope(isolate());
- ZoneList<Variable*> stack_list(current_scope->StackLocalCount(), zone);
- ZoneList<Variable*> context_list(
- current_scope->ContextLocalCount(), zone);
- ZoneList<Variable*> globals_list(current_scope->ContextGlobalCount(),
- zone);
- current_scope->CollectStackAndContextLocals(&stack_list, &context_list,
- &globals_list);
- context_list.Sort(&Variable::CompareIndex);
-
- for (int i = 0; i < context_list.length(); i++) {
- SetElementSloppy(scope_info_list,
- scope_info_length,
- context_list[i]->name());
- scope_info_length++;
- SetElementSloppy(
- scope_info_list,
- scope_info_length,
- Handle<Smi>(Smi::FromInt(context_list[i]->index()), isolate()));
- scope_info_length++;
- }
- SetElementSloppy(scope_info_list,
- scope_info_length,
- Handle<Object>(isolate()->heap()->null_value(),
- isolate()));
- scope_info_length++;
-
- current_scope = current_scope->outer_scope();
- }
-
- return scope_info_list;
- }
-
- Handle<JSArray> result_;
- int len_;
- int current_parent_index_;
-};
-
-
void LiveEdit::InitializeThreadLocal(Debug* debug) {
debug->thread_local_.frame_drop_mode_ = LiveEdit::FRAMES_UNTOUCHED;
}
@@ -832,11 +689,10 @@ MaybeHandle<JSArray> LiveEdit::GatherCompileInfo(Handle<Script> script,
Handle<String> source) {
Isolate* isolate = script->GetIsolate();
- FunctionInfoListener listener(isolate);
+ MaybeHandle<JSArray> infos;
Handle<Object> original_source =
Handle<Object>(script->source(), isolate);
script->set_source(*source);
- isolate->set_active_function_info_listener(&listener);
{
// Creating verbose TryCatch from public API is currently the only way to
@@ -845,7 +701,7 @@ MaybeHandle<JSArray> LiveEdit::GatherCompileInfo(Handle<Script> script,
try_catch.SetVerbose(true);
// A logical 'try' section.
- Compiler::CompileForLiveEdit(script);
+ infos = Compiler::CompileForLiveEdit(script);
}
// A logical 'catch' section.
@@ -883,11 +739,10 @@ MaybeHandle<JSArray> LiveEdit::GatherCompileInfo(Handle<Script> script,
}
// A logical 'finally' section.
- isolate->set_active_function_info_listener(NULL);
script->set_source(*original_source);
if (rethrow_exception.is_null()) {
- return listener.GetResult();
+ return infos.ToHandleChecked();
} else {
return isolate->Throw<JSArray>(rethrow_exception);
}
@@ -971,11 +826,11 @@ class LiteralFixer {
public:
static void PatchLiterals(FunctionInfoWrapper* compile_info_wrapper,
Handle<SharedFunctionInfo> shared_info,
- Isolate* isolate) {
+ bool feedback_metadata_changed, Isolate* isolate) {
int new_literal_count = compile_info_wrapper->GetLiteralCount();
int old_literal_count = shared_info->num_literals();
- if (old_literal_count == new_literal_count) {
+ if (old_literal_count == new_literal_count && !feedback_metadata_changed) {
// If literal count didn't change, simply go over all functions
// and clear literal arrays.
ClearValuesVisitor visitor;
@@ -986,12 +841,15 @@ class LiteralFixer {
// collect all functions and fix their literal arrays.
Handle<FixedArray> function_instances =
CollectJSFunctions(shared_info, isolate);
- Handle<TypeFeedbackVector> vector(shared_info->feedback_vector());
+ Handle<TypeFeedbackMetadata> feedback_metadata(
+ shared_info->feedback_metadata());
for (int i = 0; i < function_instances->length(); i++) {
Handle<JSFunction> fun(JSFunction::cast(function_instances->get(i)));
+ Handle<TypeFeedbackVector> vector =
+ TypeFeedbackVector::New(isolate, feedback_metadata);
Handle<LiteralsArray> new_literals =
- LiteralsArray::New(isolate, vector, new_literal_count, TENURED);
+ LiteralsArray::New(isolate, vector, new_literal_count);
fun->set_literals(*new_literals);
}
@@ -1037,10 +895,10 @@ class LiteralFixer {
class ClearValuesVisitor {
public:
void visit(JSFunction* fun) {
- FixedArray* literals = fun->literals();
- int len = literals->length();
+ LiteralsArray* literals = fun->literals();
+ int len = literals->literals_count();
for (int j = 0; j < len; j++) {
- literals->set_undefined(j);
+ literals->set_literal_undefined(j);
}
}
};
@@ -1115,21 +973,57 @@ void LiveEdit::ReplaceFunctionCode(
SharedInfoWrapper shared_info_wrapper(shared_info_array);
Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
+ Handle<SharedFunctionInfo> new_shared_info =
+ compile_info_wrapper.GetSharedFunctionInfo();
+ bool feedback_metadata_changed = false;
+
+ if (shared_info->is_compiled()) {
+ // Take whatever code we can get from the new shared function info. We
+ // expect activations of neither the old bytecode nor old FCG code, since
+ // the lowest activation is going to be restarted.
+ Handle<Code> old_code(shared_info->code());
+ Handle<Code> new_code(new_shared_info->code());
+ // Clear old bytecode. This will trigger self-healing if we do not install
+ // new bytecode.
+ shared_info->ClearBytecodeArray();
+ if (!shared_info->HasBaselineCode()) {
+ // Every function from this SFI is interpreted.
+ if (!new_shared_info->HasBaselineCode()) {
+ // We have newly compiled bytecode. Simply replace the old one.
+ shared_info->set_bytecode_array(new_shared_info->bytecode_array());
+ } else {
+ // Rely on self-healing for places that used to run bytecode.
+ shared_info->ReplaceCode(*new_code);
+ }
+ } else {
+ // Functions from this SFI can be either interpreted or running FCG.
+ DCHECK(old_code->kind() == Code::FUNCTION);
+ if (new_shared_info->HasBytecodeArray()) {
+ // Start using new bytecode everywhere.
+ shared_info->set_bytecode_array(new_shared_info->bytecode_array());
+ ReplaceCodeObject(old_code,
+ isolate->builtins()->InterpreterEntryTrampoline());
+ } else {
+ // Start using new FCG code everywhere.
+ // Rely on self-healing for places that used to run bytecode.
+ DCHECK(new_code->kind() == Code::FUNCTION);
+ ReplaceCodeObject(old_code, new_code);
+ }
+ }
- if (shared_info->code()->kind() == Code::FUNCTION) {
- Handle<Code> code = compile_info_wrapper.GetFunctionCode();
- ReplaceCodeObject(Handle<Code>(shared_info->code()), code);
- Handle<Object> code_scope_info = compile_info_wrapper.GetCodeScopeInfo();
- if (code_scope_info->IsFixedArray()) {
- shared_info->set_scope_info(ScopeInfo::cast(*code_scope_info));
+ if (shared_info->HasDebugInfo()) {
+ // Existing break points will be re-applied. Reset the debug info here.
+ isolate->debug()->RemoveDebugInfoAndClearFromShared(
+ handle(shared_info->GetDebugInfo()));
}
+ shared_info->set_scope_info(new_shared_info->scope_info());
shared_info->DisableOptimization(kLiveEdit);
// Update the type feedback vector, if needed.
- MaybeHandle<TypeFeedbackVector> feedback_vector =
- compile_info_wrapper.GetFeedbackVector();
- if (!feedback_vector.is_null()) {
- shared_info->set_feedback_vector(*feedback_vector.ToHandleChecked());
- }
+ Handle<TypeFeedbackMetadata> new_feedback_metadata(
+ new_shared_info->feedback_metadata());
+ feedback_metadata_changed =
+ new_feedback_metadata->DiffersFrom(shared_info->feedback_metadata());
+ shared_info->set_feedback_metadata(*new_feedback_metadata);
}
int start_position = compile_info_wrapper.GetStartPosition();
@@ -1137,7 +1031,8 @@ void LiveEdit::ReplaceFunctionCode(
shared_info->set_start_position(start_position);
shared_info->set_end_position(end_position);
- LiteralFixer::PatchLiterals(&compile_info_wrapper, shared_info, isolate);
+ LiteralFixer::PatchLiterals(&compile_info_wrapper, shared_info,
+ feedback_metadata_changed, isolate);
DeoptimizeDependentFunctions(*shared_info);
isolate->compilation_cache()->Remove(shared_info);
@@ -1157,14 +1052,15 @@ void LiveEdit::SetFunctionScript(Handle<JSValue> function_wrapper,
Handle<Object> script_handle) {
Handle<SharedFunctionInfo> shared_info =
UnwrapSharedFunctionInfoFromJSValue(function_wrapper);
- CHECK(script_handle->IsScript() || script_handle->IsUndefined());
+ Isolate* isolate = function_wrapper->GetIsolate();
+ CHECK(script_handle->IsScript() || script_handle->IsUndefined(isolate));
SharedFunctionInfo::SetScript(shared_info, script_handle);
shared_info->DisableOptimization(kLiveEdit);
function_wrapper->GetIsolate()->compilation_cache()->Remove(shared_info);
}
-
+namespace {
// For a script text change (defined as position_change_array), translates
// position in unchanged text to position in changed text.
// Text change is a set of non-overlapping regions in text, that have changed
@@ -1206,128 +1102,26 @@ static int TranslatePosition(int original_position,
return original_position + position_diff;
}
-
-// Auto-growing buffer for writing relocation info code section. This buffer
-// is a simplified version of buffer from Assembler. Unlike Assembler, this
-// class is platform-independent and it works without dealing with instructions.
-// As specified by RelocInfo format, the buffer is filled in reversed order:
-// from upper to lower addresses.
-// It uses NewArray/DeleteArray for memory management.
-class RelocInfoBuffer {
- public:
- RelocInfoBuffer(int buffer_initial_capicity, byte* pc) {
- buffer_size_ = buffer_initial_capicity + kBufferGap;
- buffer_ = NewArray<byte>(buffer_size_);
-
- reloc_info_writer_.Reposition(buffer_ + buffer_size_, pc);
- }
- ~RelocInfoBuffer() {
- DeleteArray(buffer_);
- }
-
- // As specified by RelocInfo format, the buffer is filled in reversed order:
- // from upper to lower addresses.
- void Write(const RelocInfo* rinfo) {
- if (buffer_ + kBufferGap >= reloc_info_writer_.pos()) {
- Grow();
- }
- reloc_info_writer_.Write(rinfo);
- }
-
- Vector<byte> GetResult() {
- // Return the bytes from pos up to end of buffer.
- int result_size =
- static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer_.pos());
- return Vector<byte>(reloc_info_writer_.pos(), result_size);
- }
-
- private:
- void Grow() {
- // Compute new buffer size.
- int new_buffer_size;
- if (buffer_size_ < 2 * KB) {
- new_buffer_size = 4 * KB;
- } else {
- new_buffer_size = 2 * buffer_size_;
- }
- // Some internal data structures overflow for very large buffers,
- // they must ensure that kMaximalBufferSize is not too large.
- if (new_buffer_size > kMaximalBufferSize) {
- V8::FatalProcessOutOfMemory("RelocInfoBuffer::GrowBuffer");
- }
-
- // Set up new buffer.
- byte* new_buffer = NewArray<byte>(new_buffer_size);
-
- // Copy the data.
- int curently_used_size =
- static_cast<int>(buffer_ + buffer_size_ - reloc_info_writer_.pos());
- MemMove(new_buffer + new_buffer_size - curently_used_size,
- reloc_info_writer_.pos(), curently_used_size);
-
- reloc_info_writer_.Reposition(
- new_buffer + new_buffer_size - curently_used_size,
- reloc_info_writer_.last_pc());
-
- DeleteArray(buffer_);
- buffer_ = new_buffer;
- buffer_size_ = new_buffer_size;
- }
-
- RelocInfoWriter reloc_info_writer_;
- byte* buffer_;
- int buffer_size_;
-
- static const int kBufferGap = RelocInfoWriter::kMaxSize;
- static const int kMaximalBufferSize = 512*MB;
-};
-
-
-// Patch positions in code (changes relocation info section) and possibly
-// returns new instance of code.
-static Handle<Code> PatchPositionsInCode(
- Handle<Code> code,
- Handle<JSArray> position_change_array) {
+void TranslateSourcePositionTable(Handle<AbstractCode> code,
+ Handle<JSArray> position_change_array) {
Isolate* isolate = code->GetIsolate();
+ Zone zone(isolate->allocator());
+ SourcePositionTableBuilder builder(&zone);
- RelocInfoBuffer buffer_writer(code->relocation_size(),
- code->instruction_start());
-
- {
- for (RelocIterator it(*code); !it.done(); it.next()) {
- RelocInfo* rinfo = it.rinfo();
- if (RelocInfo::IsPosition(rinfo->rmode())) {
- int position = static_cast<int>(rinfo->data());
- int new_position = TranslatePosition(position,
- position_change_array);
- if (position != new_position) {
- RelocInfo info_copy(rinfo->isolate(), rinfo->pc(), rinfo->rmode(),
- new_position, NULL);
- buffer_writer.Write(&info_copy);
- continue;
- }
- }
- if (RelocInfo::IsRealRelocMode(rinfo->rmode())) {
- buffer_writer.Write(it.rinfo());
- }
- }
+ Handle<ByteArray> source_position_table(code->source_position_table());
+ for (SourcePositionTableIterator iterator(*source_position_table);
+ !iterator.done(); iterator.Advance()) {
+ int position = iterator.source_position();
+ int new_position = TranslatePosition(position, position_change_array);
+ builder.AddPosition(iterator.code_offset(), new_position,
+ iterator.is_statement());
}
- Vector<byte> buffer = buffer_writer.GetResult();
-
- if (buffer.length() == code->relocation_size()) {
- // Simply patch relocation area of code.
- MemCopy(code->relocation_start(), buffer.start(), buffer.length());
- return code;
- } else {
- // Relocation info section now has different size. We cannot simply
- // rewrite it inside code object. Instead we have to create a new
- // code object.
- Handle<Code> result(isolate->factory()->CopyCode(code, buffer));
- return result;
- }
+ Handle<ByteArray> new_source_position_table(
+ builder.ToSourcePositionTable(isolate, code));
+ code->set_source_position_table(*new_source_position_table);
}
-
+} // namespace
void LiveEdit::PatchFunctionPositions(Handle<JSArray> shared_info_array,
Handle<JSArray> position_change_array) {
@@ -1346,18 +1140,20 @@ void LiveEdit::PatchFunctionPositions(Handle<JSArray> shared_info_array,
info->set_end_position(new_function_end);
info->set_function_token_position(new_function_token_pos);
+ if (info->HasBytecodeArray()) {
+ TranslateSourcePositionTable(
+ Handle<AbstractCode>(AbstractCode::cast(info->bytecode_array())),
+ position_change_array);
+ }
if (info->code()->kind() == Code::FUNCTION) {
- // Patch relocation info section of the code.
- Handle<Code> patched_code = PatchPositionsInCode(Handle<Code>(info->code()),
- position_change_array);
- if (*patched_code != info->code()) {
- // Replace all references to the code across the heap. In particular,
- // some stubs may refer to this code and this code may be being executed
- // on stack (it is safe to substitute the code object on stack, because
- // we only change the structure of rinfo and leave instructions
- // untouched).
- ReplaceCodeObject(Handle<Code>(info->code()), patched_code);
- }
+ TranslateSourcePositionTable(
+ Handle<AbstractCode>(AbstractCode::cast(info->code())),
+ position_change_array);
+ }
+ if (info->HasDebugInfo()) {
+ // Existing break points will be re-applied. Reset the debug info here.
+ info->GetIsolate()->debug()->RemoveDebugInfoAndClearFromShared(
+ handle(info->GetDebugInfo()));
}
}
@@ -1374,8 +1170,7 @@ static Handle<Script> CreateScriptCopy(Handle<Script> original) {
copy->set_type(original->type());
copy->set_context_data(original->context_data());
copy->set_eval_from_shared(original->eval_from_shared());
- copy->set_eval_from_instructions_offset(
- original->eval_from_instructions_offset());
+ copy->set_eval_from_position(original->eval_from_position());
// Copy all the flags, but clear compilation state.
copy->set_flags(original->flags());
@@ -1555,6 +1350,13 @@ static const char* DropFrames(Vector<StackFrame*> frames, int top_frame_index,
top_frame = frames[top_frame_index - 2];
*mode = LiveEdit::CURRENTLY_SET_MODE;
frame_has_padding = false;
+ } else if (pre_top_frame_code->kind() == Code::BYTECODE_HANDLER) {
+ // Interpreted bytecode takes up two stack frames, one for the bytecode
+ // handler and one for the interpreter entry trampoline. Therefore we shift
+ // up by one frame.
+ *mode = LiveEdit::FRAME_DROPPED_IN_DIRECT_CALL;
+ pre_top_frame = frames[top_frame_index - 2];
+ top_frame = frames[top_frame_index - 1];
} else {
return "Unknown structure of stack above changing function";
}
@@ -1669,7 +1471,7 @@ class MultipleFunctionTarget {
Handle<Object> new_element =
JSReceiver::GetElement(isolate, new_shared_array_, i)
.ToHandleChecked();
- if (new_element->IsUndefined()) return false;
+ if (new_element->IsUndefined(isolate)) return false;
Handle<SharedFunctionInfo> new_shared =
UnwrapSharedFunctionInfoFromJSValue(
Handle<JSValue>::cast(new_element));
@@ -1687,6 +1489,21 @@ class MultipleFunctionTarget {
return false;
}
+ void set_status(LiveEdit::FunctionPatchabilityStatus status) {
+ Isolate* isolate = old_shared_array_->GetIsolate();
+ int len = GetArrayLength(old_shared_array_);
+ for (int i = 0; i < len; ++i) {
+ Handle<Object> old_element =
+ JSReceiver::GetElement(isolate, result_, i).ToHandleChecked();
+ if (!old_element->IsSmi() ||
+ Smi::cast(*old_element)->value() ==
+ LiveEdit::FUNCTION_AVAILABLE_FOR_PATCH) {
+ SetElementSloppy(result_, i,
+ Handle<Smi>(Smi::FromInt(status), isolate));
+ }
+ }
+ }
+
private:
Handle<JSArray> old_shared_array_;
Handle<JSArray> new_shared_array_;
@@ -1732,7 +1549,7 @@ static const char* DropActivationsInActiveThreadImpl(Isolate* isolate,
for (; frame_index < frames.length(); frame_index++) {
StackFrame* frame = frames[frame_index];
- if (frame->is_exit()) {
+ if (frame->is_exit() || frame->is_builtin_exit()) {
non_droppable_frame_found = true;
non_droppable_reason = LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE;
break;
@@ -1740,7 +1557,7 @@ static const char* DropActivationsInActiveThreadImpl(Isolate* isolate,
if (frame->is_java_script()) {
SharedFunctionInfo* shared =
JavaScriptFrame::cast(frame)->function()->shared();
- if (shared->is_generator()) {
+ if (shared->is_resumable()) {
non_droppable_frame_found = true;
non_droppable_reason = LiveEdit::FUNCTION_BLOCKED_UNDER_GENERATOR;
break;
@@ -1764,6 +1581,13 @@ static const char* DropActivationsInActiveThreadImpl(Isolate* isolate,
// Fail.
return NULL;
}
+ if (non_droppable_reason ==
+ LiveEdit::FUNCTION_BLOCKED_UNDER_GENERATOR &&
+ !target_frame_found) {
+ // Fail.
+ target.set_status(non_droppable_reason);
+ return NULL;
+ }
}
}
}
@@ -1792,7 +1616,8 @@ static const char* DropActivationsInActiveThreadImpl(Isolate* isolate,
// Adjust break_frame after some frames has been dropped.
StackFrame::Id new_id = StackFrame::NO_ID;
for (int i = bottom_js_frame_index + 1; i < frames.length(); i++) {
- if (frames[i]->type() == StackFrame::JAVA_SCRIPT) {
+ if (frames[i]->type() == StackFrame::JAVA_SCRIPT ||
+ frames[i]->type() == StackFrame::INTERPRETED) {
new_id = frames[i]->id();
break;
}
@@ -1906,6 +1731,7 @@ Handle<JSArray> LiveEdit::CheckAndDropActivations(
FixedArray::cast(old_shared_array->elements()));
Handle<JSArray> result = isolate->factory()->NewJSArray(len);
+ result->set_length(Smi::FromInt(len));
JSObject::EnsureWritableFastElements(result);
Handle<FixedArray> result_elements =
handle(FixedArray::cast(result->elements()), isolate);
@@ -2004,40 +1830,101 @@ const char* LiveEdit::RestartFrame(JavaScriptFrame* frame) {
return NULL;
}
-
-LiveEditFunctionTracker::LiveEditFunctionTracker(Isolate* isolate,
- FunctionLiteral* fun)
- : isolate_(isolate) {
- if (isolate_->active_function_info_listener() != NULL) {
- isolate_->active_function_info_listener()->FunctionStarted(fun);
- }
+Handle<JSArray> LiveEditFunctionTracker::Collect(FunctionLiteral* node,
+ Handle<Script> script,
+ Zone* zone, Isolate* isolate) {
+ LiveEditFunctionTracker visitor(script, zone, isolate);
+ visitor.VisitFunctionLiteral(node);
+ return visitor.result_;
}
-
-LiveEditFunctionTracker::~LiveEditFunctionTracker() {
- if (isolate_->active_function_info_listener() != NULL) {
- isolate_->active_function_info_listener()->FunctionDone();
- }
+LiveEditFunctionTracker::LiveEditFunctionTracker(Handle<Script> script,
+ Zone* zone, Isolate* isolate)
+ : AstTraversalVisitor<LiveEditFunctionTracker>(isolate) {
+ current_parent_index_ = -1;
+ isolate_ = isolate;
+ len_ = 0;
+ result_ = isolate->factory()->NewJSArray(10);
+ script_ = script;
+ zone_ = zone;
}
+void LiveEditFunctionTracker::VisitFunctionLiteral(FunctionLiteral* node) {
+ // FunctionStarted is called in pre-order.
+ FunctionStarted(node);
+ // Recurse using the regular traversal.
+ AstTraversalVisitor::VisitFunctionLiteral(node);
+ // FunctionDone are called in post-order.
+ // TODO(jgruber): If required, replace the (linear cost)
+ // FindSharedFunctionInfo call with a more efficient implementation.
+ Handle<SharedFunctionInfo> info =
+ script_->FindSharedFunctionInfo(node).ToHandleChecked();
+ FunctionDone(info, node->scope());
+}
-void LiveEditFunctionTracker::RecordFunctionInfo(
- Handle<SharedFunctionInfo> info, FunctionLiteral* lit,
- Zone* zone) {
- if (isolate_->active_function_info_listener() != NULL) {
- isolate_->active_function_info_listener()->FunctionInfo(info, lit->scope(),
- zone);
- }
+void LiveEditFunctionTracker::FunctionStarted(FunctionLiteral* fun) {
+ HandleScope handle_scope(isolate_);
+ FunctionInfoWrapper info = FunctionInfoWrapper::Create(isolate_);
+ info.SetInitialProperties(fun->name(), fun->start_position(),
+ fun->end_position(), fun->parameter_count(),
+ fun->materialized_literal_count(),
+ current_parent_index_);
+ current_parent_index_ = len_;
+ SetElementSloppy(result_, len_, info.GetJSArray());
+ len_++;
}
+// Saves full information about a function: its code, its scope info
+// and a SharedFunctionInfo object.
+void LiveEditFunctionTracker::FunctionDone(Handle<SharedFunctionInfo> shared,
+ Scope* scope) {
+ HandleScope handle_scope(isolate_);
+ FunctionInfoWrapper info = FunctionInfoWrapper::cast(
+ *JSReceiver::GetElement(isolate_, result_, current_parent_index_)
+ .ToHandleChecked());
+ info.SetSharedFunctionInfo(shared);
+
+ Handle<Object> scope_info_list = SerializeFunctionScope(scope);
+ info.SetFunctionScopeInfo(scope_info_list);
-void LiveEditFunctionTracker::RecordRootFunctionInfo(Handle<Code> code) {
- isolate_->active_function_info_listener()->FunctionCode(code);
+ current_parent_index_ = info.GetParentIndex();
}
+Handle<Object> LiveEditFunctionTracker::SerializeFunctionScope(Scope* scope) {
+ Handle<JSArray> scope_info_list = isolate_->factory()->NewJSArray(10);
+ int scope_info_length = 0;
+
+ // Saves some description of scope. It stores name and indexes of
+ // variables in the whole scope chain. Null-named slots delimit
+ // scopes of this chain.
+ Scope* current_scope = scope;
+ while (current_scope != NULL) {
+ HandleScope handle_scope(isolate_);
+ ZoneList<Variable*> stack_list(current_scope->StackLocalCount(), zone_);
+ ZoneList<Variable*> context_list(current_scope->ContextLocalCount(), zone_);
+ ZoneList<Variable*> globals_list(current_scope->ContextGlobalCount(),
+ zone_);
+ current_scope->CollectStackAndContextLocals(&stack_list, &context_list,
+ &globals_list);
+ context_list.Sort(&Variable::CompareIndex);
+
+ for (int i = 0; i < context_list.length(); i++) {
+ SetElementSloppy(scope_info_list, scope_info_length,
+ context_list[i]->name());
+ scope_info_length++;
+ SetElementSloppy(
+ scope_info_list, scope_info_length,
+ Handle<Smi>(Smi::FromInt(context_list[i]->index()), isolate_));
+ scope_info_length++;
+ }
+ SetElementSloppy(scope_info_list, scope_info_length,
+ Handle<Object>(isolate_->heap()->null_value(), isolate_));
+ scope_info_length++;
+
+ current_scope = current_scope->outer_scope();
+ }
-bool LiveEditFunctionTracker::IsActive(Isolate* isolate) {
- return isolate->active_function_info_listener() != NULL;
+ return scope_info_list;
}
} // namespace internal
diff --git a/deps/v8/src/debug/liveedit.h b/deps/v8/src/debug/liveedit.h
index 67be70e00a..784f828162 100644
--- a/deps/v8/src/debug/liveedit.h
+++ b/deps/v8/src/debug/liveedit.h
@@ -26,32 +26,47 @@
#include "src/allocation.h"
-#include "src/compiler.h"
+#include "src/ast/ast-traversal-visitor.h"
namespace v8 {
namespace internal {
// This class collects some specific information on structure of functions
-// in a particular script. It gets called from compiler all the time, but
-// actually records any data only when liveedit operation is in process;
-// in any other time this class is very cheap.
+// in a particular script.
//
// The primary interest of the Tracker is to record function scope structures
-// in order to analyze whether function code maybe safely patched (with new
+// in order to analyze whether function code may be safely patched (with new
// code successfully reading existing data from function scopes). The Tracker
// also collects compiled function codes.
-class LiveEditFunctionTracker {
+class LiveEditFunctionTracker
+ : public AstTraversalVisitor<LiveEditFunctionTracker> {
public:
- explicit LiveEditFunctionTracker(Isolate* isolate, FunctionLiteral* fun);
- ~LiveEditFunctionTracker();
- void RecordFunctionInfo(Handle<SharedFunctionInfo> info,
- FunctionLiteral* lit, Zone* zone);
- void RecordRootFunctionInfo(Handle<Code> code);
+ // Traverses the entire AST, and records information about all
+ // FunctionLiterals for further use by LiveEdit code patching. The collected
+ // information is returned as a serialized array.
+ static Handle<JSArray> Collect(FunctionLiteral* node, Handle<Script> script,
+ Zone* zone, Isolate* isolate);
- static bool IsActive(Isolate* isolate);
+ protected:
+ friend AstTraversalVisitor<LiveEditFunctionTracker>;
+ void VisitFunctionLiteral(FunctionLiteral* node);
private:
+ LiveEditFunctionTracker(Handle<Script> script, Zone* zone, Isolate* isolate);
+
+ void FunctionStarted(FunctionLiteral* fun);
+ void FunctionDone(Handle<SharedFunctionInfo> shared, Scope* scope);
+ Handle<Object> SerializeFunctionScope(Scope* scope);
+
+ Handle<Script> script_;
+ Zone* zone_;
Isolate* isolate_;
+
+ Handle<JSArray> result_;
+ int len_;
+ int current_parent_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(LiveEditFunctionTracker);
};
@@ -279,15 +294,14 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
int end_position, int param_num, int literal_count,
int parent_index);
- void SetFunctionCode(Handle<Code> function_code,
- Handle<HeapObject> code_scope_info);
-
void SetFunctionScopeInfo(Handle<Object> scope_info_array) {
this->SetField(kFunctionScopeInfoOffset_, scope_info_array);
}
void SetSharedFunctionInfo(Handle<SharedFunctionInfo> info);
+ Handle<SharedFunctionInfo> GetSharedFunctionInfo();
+
int GetLiteralCount() {
return this->GetSmiValueField(kLiteralNumOffset_);
}
@@ -296,12 +310,6 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
return this->GetSmiValueField(kParentIndexOffset_);
}
- Handle<Code> GetFunctionCode();
-
- MaybeHandle<TypeFeedbackVector> GetFeedbackVector();
-
- Handle<Object> GetCodeScopeInfo();
-
int GetStartPosition() {
return this->GetSmiValueField(kStartPositionOffset_);
}
@@ -313,13 +321,11 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
static const int kStartPositionOffset_ = 1;
static const int kEndPositionOffset_ = 2;
static const int kParamNumOffset_ = 3;
- static const int kCodeOffset_ = 4;
- static const int kCodeScopeInfoOffset_ = 5;
- static const int kFunctionScopeInfoOffset_ = 6;
- static const int kParentIndexOffset_ = 7;
- static const int kSharedFunctionInfoOffset_ = 8;
- static const int kLiteralNumOffset_ = 9;
- static const int kSize_ = 10;
+ static const int kFunctionScopeInfoOffset_ = 4;
+ static const int kParentIndexOffset_ = 5;
+ static const int kSharedFunctionInfoOffset_ = 6;
+ static const int kLiteralNumOffset_ = 7;
+ static const int kSize_ = 8;
friend class JSArrayBasedStruct<FunctionInfoWrapper>;
};
diff --git a/deps/v8/src/debug/liveedit.js b/deps/v8/src/debug/liveedit.js
index 85e55c4c18..e9ee8092a2 100644
--- a/deps/v8/src/debug/liveedit.js
+++ b/deps/v8/src/debug/liveedit.js
@@ -842,11 +842,9 @@
this.start_position = raw_array[1];
this.end_position = raw_array[2];
this.param_num = raw_array[3];
- this.code = raw_array[4];
- this.code_scope_info = raw_array[5];
- this.scope_info = raw_array[6];
- this.outer_index = raw_array[7];
- this.shared_function_info = raw_array[8];
+ this.scope_info = raw_array[4];
+ this.outer_index = raw_array[5];
+ this.shared_function_info = raw_array[6];
this.next_sibling_index = null;
this.raw_array = raw_array;
}
@@ -1014,11 +1012,6 @@
details.position = position_struct;
}
- // A testing entry.
- function GetPcFromSourcePos(func, source_pos) {
- return %GetFunctionCodePositionFromSource(func, source_pos);
- }
-
// LiveEdit main entry point: changes a script text to a new string.
function SetScriptSource(script, new_source, preview_only, change_log) {
var old_source = script.source;
@@ -1119,7 +1112,6 @@
LiveEdit.SetScriptSource = SetScriptSource;
LiveEdit.ApplyPatchMultiChunk = ApplyPatchMultiChunk;
LiveEdit.Failure = Failure;
- LiveEdit.GetPcFromSourcePos = GetPcFromSourcePos;
LiveEdit.TestApi = {
PosTranslator: PosTranslator,
diff --git a/deps/v8/src/debug/mips/debug-mips.cc b/deps/v8/src/debug/mips/debug-mips.cc
index 8e00d61ef5..49320d8a81 100644
--- a/deps/v8/src/debug/mips/debug-mips.cc
+++ b/deps/v8/src/debug/mips/debug-mips.cc
@@ -41,7 +41,7 @@ void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
Handle<Code> code) {
- DCHECK_EQ(Code::BUILTIN, code->kind());
+ DCHECK(code->is_debug_stub());
CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotInstructions);
// Patch the code changing the debug break slot code from:
// nop(DEBUG_BREAK_NOP) - nop(1) is sll(zero_reg, zero_reg, 1)
diff --git a/deps/v8/src/debug/mips64/debug-mips64.cc b/deps/v8/src/debug/mips64/debug-mips64.cc
index aad095b64d..2e967d7b8e 100644
--- a/deps/v8/src/debug/mips64/debug-mips64.cc
+++ b/deps/v8/src/debug/mips64/debug-mips64.cc
@@ -40,7 +40,7 @@ void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
Handle<Code> code) {
- DCHECK_EQ(Code::BUILTIN, code->kind());
+ DCHECK(code->is_debug_stub());
CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotInstructions);
// Patch the code changing the debug break slot code from:
// nop(DEBUG_BREAK_NOP) - nop(1) is sll(zero_reg, zero_reg, 1)
diff --git a/deps/v8/src/debug/mirrors.js b/deps/v8/src/debug/mirrors.js
index 0696ec988e..165e172449 100644
--- a/deps/v8/src/debug/mirrors.js
+++ b/deps/v8/src/debug/mirrors.js
@@ -8,28 +8,21 @@
// ----------------------------------------------------------------------------
// Imports
-var ErrorToString;
var GlobalArray = global.Array;
var IsNaN = global.isNaN;
var JSONStringify = global.JSON.stringify;
-var MakeError;
var MapEntries;
var MapIteratorNext;
-var MathMin = global.Math.min;
-var promiseStatusSymbol = utils.ImportNow("promise_status_symbol");
-var promiseValueSymbol = utils.ImportNow("promise_value_symbol");
+var promiseStateSymbol = utils.ImportNow("promise_state_symbol");
+var promiseResultSymbol = utils.ImportNow("promise_result_symbol");
var SetIteratorNext;
var SetValues;
-var SymbolToString;
utils.Import(function(from) {
- ErrorToString = from.ErrorToString;
- MakeError = from.MakeError;
MapEntries = from.MapEntries;
MapIteratorNext = from.MapIteratorNext;
SetIteratorNext = from.SetIteratorNext;
SetValues = from.SetValues;
- SymbolToString = from.SymbolToString;
});
// ----------------------------------------------------------------------------
@@ -115,14 +108,14 @@ function ClearMirrorCache(value) {
function ObjectIsPromise(value) {
return IS_RECEIVER(value) &&
- !IS_UNDEFINED(%DebugGetProperty(value, promiseStatusSymbol));
+ !IS_UNDEFINED(%DebugGetProperty(value, promiseStateSymbol));
}
/**
* Returns the mirror for a specified value or object.
*
- * @param {value or Object} value the value or object to retreive the mirror for
+ * @param {value or Object} value the value or object to retrieve the mirror for
* @param {boolean} transient indicate whether this object is transient and
* should not be added to the mirror cache. The default is not transient.
* @returns {Mirror} the mirror reflects the passed value or object
@@ -197,7 +190,7 @@ function MakeMirror(value, opt_transient) {
*/
function LookupMirror(handle) {
if (!mirror_cache_enabled_) {
- throw MakeError(kDebugger, "Mirror cache is disabled");
+ throw %make_error(kDebugger, "Mirror cache is disabled");
}
return mirror_cache_[handle];
}
@@ -256,13 +249,15 @@ PropertyAttribute.DontDelete = DONT_DELETE;
// A copy of the scope types from runtime-debug.cc.
// NOTE: these constants should be backward-compatible, so
// add new ones to the end of this list.
-var ScopeType = { Global: 0,
- Local: 1,
- With: 2,
+var ScopeType = { Global: 0,
+ Local: 1,
+ With: 2,
Closure: 3,
- Catch: 4,
- Block: 5,
- Script: 6 };
+ Catch: 4,
+ Block: 5,
+ Script: 6,
+ Eval: 7,
+ };
/**
* Base class for all mirror objects.
@@ -685,12 +680,12 @@ inherits(SymbolMirror, ValueMirror);
SymbolMirror.prototype.description = function() {
- return %SymbolDescription(%_ValueOf(this.value_));
+ return %SymbolDescription(%ValueOf(this.value_));
}
SymbolMirror.prototype.toText = function() {
- return %_Call(SymbolToString, this.value_);
+ return %SymbolDescriptiveString(%ValueOf(this.value_));
}
@@ -786,7 +781,7 @@ ObjectMirror.prototype.internalProperties = function() {
ObjectMirror.prototype.property = function(name) {
- var details = %DebugGetPropertyDetails(this.value_, TO_NAME(name));
+ var details = %DebugGetPropertyDetails(this.value_, name);
if (details) {
return new PropertyMirror(this, name, details);
}
@@ -1027,7 +1022,7 @@ FunctionMirror.prototype.scopeCount = function() {
FunctionMirror.prototype.scope = function(index) {
if (this.resolved()) {
- return new ScopeMirror(UNDEFINED, this, index);
+ return new ScopeMirror(UNDEFINED, this, UNDEFINED, index);
}
};
@@ -1089,6 +1084,11 @@ UnresolvedFunctionMirror.prototype.name = function() {
};
+UnresolvedFunctionMirror.prototype.debugName = function() {
+ return this.value_;
+};
+
+
UnresolvedFunctionMirror.prototype.inferredName = function() {
return UNDEFINED;
};
@@ -1251,7 +1251,7 @@ ErrorMirror.prototype.toText = function() {
// Use the same text representation as in messages.js.
var text;
try {
- text = %_Call(ErrorToString, this.value_);
+ text = %ErrorToString(this.value_);
} catch (e) {
text = '#<Error>';
}
@@ -1272,7 +1272,7 @@ inherits(PromiseMirror, ObjectMirror);
function PromiseGetStatus_(value) {
- var status = %DebugGetProperty(value, promiseStatusSymbol);
+ var status = %DebugGetProperty(value, promiseStateSymbol);
if (status == 0) return "pending";
if (status == 1) return "resolved";
return "rejected";
@@ -1280,7 +1280,7 @@ function PromiseGetStatus_(value) {
function PromiseGetValue_(value) {
- return %DebugGetProperty(value, promiseValueSymbol);
+ return %DebugGetProperty(value, promiseResultSymbol);
}
@@ -1408,8 +1408,8 @@ inherits(GeneratorMirror, ObjectMirror);
function GeneratorGetStatus_(value) {
var continuation = %GeneratorGetContinuation(value);
- if (continuation < 0) return "running";
- if (continuation == 0) return "closed";
+ if (continuation < -1) return "running";
+ if (continuation == -1) return "closed";
return "suspended";
}
@@ -1451,6 +1451,27 @@ GeneratorMirror.prototype.receiver = function() {
};
+GeneratorMirror.prototype.scopeCount = function() {
+ // This value can change over time as the underlying generator is suspended
+ // at different locations.
+ return %GetGeneratorScopeCount(this.value());
+};
+
+
+GeneratorMirror.prototype.scope = function(index) {
+ return new ScopeMirror(UNDEFINED, UNDEFINED, this, index);
+};
+
+
+GeneratorMirror.prototype.allScopes = function() {
+ var scopes = [];
+ for (let i = 0; i < this.scopeCount(); i++) {
+ scopes.push(this.scope(i));
+ }
+ return scopes;
+};
+
+
/**
* Base mirror object for properties.
* @param {ObjectMirror} mirror The mirror object having this property
@@ -1535,11 +1556,6 @@ PropertyMirror.prototype.propertyType = function() {
};
-PropertyMirror.prototype.insertionIndex = function() {
- return %DebugPropertyIndexFromDetails(this.details_);
-};
-
-
/**
* Returns whether this property has a getter defined through __defineGetter__.
* @return {booolean} True if this property has a getter
@@ -1629,13 +1645,14 @@ InternalPropertyMirror.prototype.value = function() {
var kFrameDetailsFrameIdIndex = 0;
var kFrameDetailsReceiverIndex = 1;
var kFrameDetailsFunctionIndex = 2;
-var kFrameDetailsArgumentCountIndex = 3;
-var kFrameDetailsLocalCountIndex = 4;
-var kFrameDetailsSourcePositionIndex = 5;
-var kFrameDetailsConstructCallIndex = 6;
-var kFrameDetailsAtReturnIndex = 7;
-var kFrameDetailsFlagsIndex = 8;
-var kFrameDetailsFirstDynamicIndex = 9;
+var kFrameDetailsScriptIndex = 3;
+var kFrameDetailsArgumentCountIndex = 4;
+var kFrameDetailsLocalCountIndex = 5;
+var kFrameDetailsSourcePositionIndex = 6;
+var kFrameDetailsConstructCallIndex = 7;
+var kFrameDetailsAtReturnIndex = 8;
+var kFrameDetailsFlagsIndex = 9;
+var kFrameDetailsFirstDynamicIndex = 10;
var kFrameDetailsNameIndex = 0;
var kFrameDetailsValueIndex = 1;
@@ -1652,12 +1669,13 @@ var kFrameDetailsFlagInlinedFrameIndexMask = 7 << 2;
* 0: Id
* 1: Receiver
* 2: Function
- * 3: Argument count
- * 4: Local count
- * 5: Source position
- * 6: Construct call
- * 7: Is at return
- * 8: Flags (debugger frame, optimized frame, inlined frame index)
+ * 3: Script
+ * 4: Argument count
+ * 5: Local count
+ * 6: Source position
+ * 7: Construct call
+ * 8: Is at return
+ * 9: Flags (debugger frame, optimized frame, inlined frame index)
* Arguments name, value
* Locals name, value
* Return value if any
@@ -1689,6 +1707,12 @@ FrameDetails.prototype.func = function() {
};
+FrameDetails.prototype.script = function() {
+ %CheckExecutionState(this.break_id_);
+ return this.details_[kFrameDetailsScriptIndex];
+};
+
+
FrameDetails.prototype.isConstructCall = function() {
%CheckExecutionState(this.break_id_);
return this.details_[kFrameDetailsConstructCallIndex];
@@ -1959,7 +1983,7 @@ FrameMirror.prototype.sourceColumn = function() {
FrameMirror.prototype.sourceLineText = function() {
var location = this.sourceLocation();
if (location) {
- return location.sourceText();
+ return location.sourceText;
}
};
@@ -1970,7 +1994,7 @@ FrameMirror.prototype.scopeCount = function() {
FrameMirror.prototype.scope = function(index) {
- return new ScopeMirror(this, UNDEFINED, index);
+ return new ScopeMirror(this, UNDEFINED, UNDEFINED, index);
};
@@ -1981,7 +2005,8 @@ FrameMirror.prototype.allScopes = function(opt_ignore_nested_scopes) {
!!opt_ignore_nested_scopes);
var result = [];
for (var i = 0; i < scopeDetails.length; ++i) {
- result.push(new ScopeMirror(this, UNDEFINED, i, scopeDetails[i]));
+ result.push(new ScopeMirror(this, UNDEFINED, UNDEFINED, i,
+ scopeDetails[i]));
}
return result;
};
@@ -2160,7 +2185,7 @@ var kScopeDetailsStartPositionIndex = 3;
var kScopeDetailsEndPositionIndex = 4;
var kScopeDetailsFunctionIndex = 5;
-function ScopeDetails(frame, fun, index, opt_details) {
+function ScopeDetails(frame, fun, gen, index, opt_details) {
if (frame) {
this.break_id_ = frame.break_id_;
this.details_ = opt_details ||
@@ -2170,10 +2195,15 @@ function ScopeDetails(frame, fun, index, opt_details) {
index);
this.frame_id_ = frame.details_.frameId();
this.inlined_frame_id_ = frame.details_.inlinedFrameIndex();
- } else {
+ } else if (fun) {
this.details_ = opt_details || %GetFunctionScopeDetails(fun.value(), index);
this.fun_value_ = fun.value();
this.break_id_ = UNDEFINED;
+ } else {
+ this.details_ =
+ opt_details || %GetGeneratorScopeDetails(gen.value(), index);
+ this.gen_value_ = gen.value();
+ this.break_id_ = UNDEFINED;
}
this.index_ = index;
}
@@ -2232,11 +2262,14 @@ ScopeDetails.prototype.setVariableValueImpl = function(name, new_value) {
%CheckExecutionState(this.break_id_);
raw_res = %SetScopeVariableValue(this.break_id_, this.frame_id_,
this.inlined_frame_id_, this.index_, name, new_value);
- } else {
+ } else if (!IS_UNDEFINED(this.fun_value_)) {
raw_res = %SetScopeVariableValue(this.fun_value_, null, null, this.index_,
name, new_value);
+ } else {
+ raw_res = %SetScopeVariableValue(this.gen_value_, null, null, this.index_,
+ name, new_value);
}
- if (!raw_res) throw MakeError(kDebugger, "Failed to set variable value");
+ if (!raw_res) throw %make_error(kDebugger, "Failed to set variable value");
};
@@ -2245,12 +2278,13 @@ ScopeDetails.prototype.setVariableValueImpl = function(name, new_value) {
* be specified.
* @param {FrameMirror} frame The frame this scope is a part of
* @param {FunctionMirror} function The function this scope is a part of
+ * @param {GeneratorMirror} gen The generator this scope is a part of
* @param {number} index The scope index in the frame
* @param {Array=} opt_details Raw scope details data
* @constructor
* @extends Mirror
*/
-function ScopeMirror(frame, fun, index, opt_details) {
+function ScopeMirror(frame, fun, gen, index, opt_details) {
%_Call(Mirror, this, MirrorType.SCOPE_TYPE);
if (frame) {
this.frame_index_ = frame.index_;
@@ -2258,7 +2292,7 @@ function ScopeMirror(frame, fun, index, opt_details) {
this.frame_index_ = UNDEFINED;
}
this.scope_index_ = index;
- this.details_ = new ScopeDetails(frame, fun, index, opt_details);
+ this.details_ = new ScopeDetails(frame, fun, gen, index, opt_details);
}
inherits(ScopeMirror, Mirror);
@@ -2335,6 +2369,7 @@ ScriptMirror.prototype.source = function() {
ScriptMirror.prototype.setSource = function(source) {
+ if (!IS_STRING(source)) throw %make_error(kDebugger, "Source is not a string");
%DebugSetScriptSource(this.script_, source);
};
@@ -2365,7 +2400,7 @@ ScriptMirror.prototype.compilationType = function() {
ScriptMirror.prototype.lineCount = function() {
- return this.script_.lineCount();
+ return %ScriptLineCount(this.script_);
};
@@ -2375,11 +2410,6 @@ ScriptMirror.prototype.locationFromPosition = function(
};
-ScriptMirror.prototype.sourceSlice = function (opt_from_line, opt_to_line) {
- return this.script_.sourceSlice(opt_from_line, opt_to_line);
-};
-
-
ScriptMirror.prototype.context = function() {
return this.context_;
};
@@ -2659,7 +2689,7 @@ JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
case MirrorType.PROPERTY_TYPE:
case MirrorType.INTERNAL_PROPERTY_TYPE:
- throw MakeError(kDebugger,
+ throw %make_error(kDebugger,
'PropertyMirror cannot be serialized independently');
break;
diff --git a/deps/v8/src/debug/ppc/OWNERS b/deps/v8/src/debug/ppc/OWNERS
index eb007cb908..752e8e3d81 100644
--- a/deps/v8/src/debug/ppc/OWNERS
+++ b/deps/v8/src/debug/ppc/OWNERS
@@ -3,3 +3,4 @@ dstence@us.ibm.com
joransiu@ca.ibm.com
mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/deps/v8/src/debug/ppc/debug-ppc.cc b/deps/v8/src/debug/ppc/debug-ppc.cc
index a160bc2e91..7facf9526a 100644
--- a/deps/v8/src/debug/ppc/debug-ppc.cc
+++ b/deps/v8/src/debug/ppc/debug-ppc.cc
@@ -41,7 +41,7 @@ void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
Handle<Code> code) {
- DCHECK_EQ(Code::BUILTIN, code->kind());
+ DCHECK(code->is_debug_stub());
CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotInstructions);
// Patch the code changing the debug break slot code from
//
diff --git a/deps/v8/src/debug/s390/OWNERS b/deps/v8/src/debug/s390/OWNERS
index eb007cb908..752e8e3d81 100644
--- a/deps/v8/src/debug/s390/OWNERS
+++ b/deps/v8/src/debug/s390/OWNERS
@@ -3,3 +3,4 @@ dstence@us.ibm.com
joransiu@ca.ibm.com
mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/deps/v8/src/debug/s390/debug-s390.cc b/deps/v8/src/debug/s390/debug-s390.cc
index c6764c2dca..9c33b95e3b 100644
--- a/deps/v8/src/debug/s390/debug-s390.cc
+++ b/deps/v8/src/debug/s390/debug-s390.cc
@@ -45,7 +45,7 @@ void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
Handle<Code> code) {
- DCHECK_EQ(Code::BUILTIN, code->kind());
+ DCHECK(code->is_debug_stub());
CodePatcher patcher(isolate, pc, Assembler::kDebugBreakSlotLength);
// Patch the code changing the debug break slot code from
//
diff --git a/deps/v8/src/debug/x64/debug-x64.cc b/deps/v8/src/debug/x64/debug-x64.cc
index a85ddb3093..910d1ca001 100644
--- a/deps/v8/src/debug/x64/debug-x64.cc
+++ b/deps/v8/src/debug/x64/debug-x64.cc
@@ -39,7 +39,7 @@ void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
Handle<Code> code) {
- DCHECK_EQ(Code::BUILTIN, code->kind());
+ DCHECK(code->is_debug_stub());
static const int kSize = Assembler::kDebugBreakSlotLength;
CodePatcher patcher(isolate, pc, kSize);
Label check_codesize;
diff --git a/deps/v8/src/debug/x87/debug-x87.cc b/deps/v8/src/debug/x87/debug-x87.cc
index 029a00415f..1cbdf45b8c 100644
--- a/deps/v8/src/debug/x87/debug-x87.cc
+++ b/deps/v8/src/debug/x87/debug-x87.cc
@@ -38,7 +38,7 @@ void DebugCodegen::ClearDebugBreakSlot(Isolate* isolate, Address pc) {
void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
Handle<Code> code) {
- DCHECK_EQ(Code::BUILTIN, code->kind());
+ DCHECK(code->is_debug_stub());
static const int kSize = Assembler::kDebugBreakSlotLength;
CodePatcher patcher(isolate, pc, kSize);
diff --git a/deps/v8/src/deoptimize-reason.cc b/deps/v8/src/deoptimize-reason.cc
new file mode 100644
index 0000000000..87c8905ff8
--- /dev/null
+++ b/deps/v8/src/deoptimize-reason.cc
@@ -0,0 +1,38 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/deoptimize-reason.h"
+
+namespace v8 {
+namespace internal {
+
+std::ostream& operator<<(std::ostream& os, DeoptimizeReason reason) {
+ switch (reason) {
+#define DEOPTIMIZE_REASON(Name, message) \
+ case DeoptimizeReason::k##Name: \
+ return os << #Name;
+ DEOPTIMIZE_REASON_LIST(DEOPTIMIZE_REASON)
+#undef DEOPTIMIZE_REASON
+ }
+ UNREACHABLE();
+ return os;
+}
+
+size_t hash_value(DeoptimizeReason reason) {
+ return static_cast<uint8_t>(reason);
+}
+
+char const* const DeoptimizeReasonToString(DeoptimizeReason reason) {
+ static char const* kDeoptimizeReasonStrings[] = {
+#define DEOPTIMIZE_REASON(Name, message) message,
+ DEOPTIMIZE_REASON_LIST(DEOPTIMIZE_REASON)
+#undef DEOPTIMIZE_REASON
+ };
+ size_t const index = static_cast<size_t>(reason);
+ DCHECK_LT(index, arraysize(kDeoptimizeReasonStrings));
+ return kDeoptimizeReasonStrings[index];
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/deoptimize-reason.h b/deps/v8/src/deoptimize-reason.h
new file mode 100644
index 0000000000..60e0a59c5a
--- /dev/null
+++ b/deps/v8/src/deoptimize-reason.h
@@ -0,0 +1,98 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DEOPTIMIZE_REASON_H_
+#define V8_DEOPTIMIZE_REASON_H_
+
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEOPTIMIZE_REASON_LIST(V) \
+ V(AccessCheck, "Access check needed") \
+ V(NoReason, "no reason") \
+ V(ConstantGlobalVariableAssignment, "Constant global variable assignment") \
+ V(ConversionOverflow, "conversion overflow") \
+ V(DivisionByZero, "division by zero") \
+ V(ElementsKindUnhandledInKeyedLoadGenericStub, \
+ "ElementsKind unhandled in KeyedLoadGenericStub") \
+ V(ExpectedHeapNumber, "Expected heap number") \
+ V(ExpectedSmi, "Expected smi") \
+ V(ForcedDeoptToRuntime, "Forced deopt to runtime") \
+ V(Hole, "hole") \
+ V(InstanceMigrationFailed, "instance migration failed") \
+ V(InsufficientTypeFeedbackForCallWithArguments, \
+ "Insufficient type feedback for call with arguments") \
+ V(FastPathFailed, "Falling off the fast path") \
+ V(InsufficientTypeFeedbackForCombinedTypeOfBinaryOperation, \
+ "Insufficient type feedback for combined type of binary operation") \
+ V(InsufficientTypeFeedbackForGenericNamedAccess, \
+ "Insufficient type feedback for generic named access") \
+ V(InsufficientTypeFeedbackForGenericKeyedAccess, \
+ "Insufficient type feedback for generic keyed access") \
+ V(InsufficientTypeFeedbackForLHSOfBinaryOperation, \
+ "Insufficient type feedback for LHS of binary operation") \
+ V(InsufficientTypeFeedbackForRHSOfBinaryOperation, \
+ "Insufficient type feedback for RHS of binary operation") \
+ V(KeyIsNegative, "key is negative") \
+ V(LostPrecision, "lost precision") \
+ V(LostPrecisionOrNaN, "lost precision or NaN") \
+ V(MementoFound, "memento found") \
+ V(MinusZero, "minus zero") \
+ V(NaN, "NaN") \
+ V(NegativeKeyEncountered, "Negative key encountered") \
+ V(NegativeValue, "negative value") \
+ V(NoCache, "no cache") \
+ V(NonStrictElementsInKeyedLoadGenericStub, \
+ "non-strict elements in KeyedLoadGenericStub") \
+ V(NotAHeapNumber, "not a heap number") \
+ V(NotAHeapNumberUndefinedBoolean, "not a heap number/undefined/true/false") \
+ V(NotAHeapNumberUndefined, "not a heap number/undefined") \
+ V(NotAJavaScriptObject, "not a JavaScript object") \
+ V(NotASmi, "not a Smi") \
+ V(OutOfBounds, "out of bounds") \
+ V(OutsideOfRange, "Outside of range") \
+ V(Overflow, "overflow") \
+ V(Proxy, "proxy") \
+ V(ReceiverWasAGlobalObject, "receiver was a global object") \
+ V(Smi, "Smi") \
+ V(TooManyArguments, "too many arguments") \
+ V(TracingElementsTransitions, "Tracing elements transitions") \
+ V(TypeMismatchBetweenFeedbackAndConstant, \
+ "Type mismatch between feedback and constant") \
+ V(UnexpectedCellContentsInConstantGlobalStore, \
+ "Unexpected cell contents in constant global store") \
+ V(UnexpectedCellContentsInGlobalStore, \
+ "Unexpected cell contents in global store") \
+ V(UnexpectedObject, "unexpected object") \
+ V(UnexpectedRHSOfBinaryOperation, "Unexpected RHS of binary operation") \
+ V(UninitializedBoilerplateLiterals, "Uninitialized boilerplate literals") \
+ V(UnknownMapInPolymorphicAccess, "Unknown map in polymorphic access") \
+ V(UnknownMapInPolymorphicCall, "Unknown map in polymorphic call") \
+ V(UnknownMapInPolymorphicElementAccess, \
+ "Unknown map in polymorphic element access") \
+ V(UnknownMap, "Unknown map") \
+ V(ValueMismatch, "value mismatch") \
+ V(WrongInstanceType, "wrong instance type") \
+ V(WrongMap, "wrong map") \
+ V(UndefinedOrNullInForIn, "null or undefined in for-in") \
+ V(UndefinedOrNullInToObject, "null or undefined in ToObject")
+
+enum class DeoptimizeReason : uint8_t {
+#define DEOPTIMIZE_REASON(Name, message) k##Name,
+ DEOPTIMIZE_REASON_LIST(DEOPTIMIZE_REASON)
+#undef DEOPTIMIZE_REASON
+};
+
+std::ostream& operator<<(std::ostream&, DeoptimizeReason);
+
+size_t hash_value(DeoptimizeReason reason);
+
+char const* const DeoptimizeReasonToString(DeoptimizeReason reason);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_DEOPTIMIZE_REASON_H_
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index b2c5d42df4..d4756ff183 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -4,6 +4,8 @@
#include "src/deoptimizer.h"
+#include <memory>
+
#include "src/accessors.h"
#include "src/ast/prettyprinter.h"
#include "src/codegen.h"
@@ -13,7 +15,6 @@
#include "src/global-handles.h"
#include "src/interpreter/interpreter.h"
#include "src/macro-assembler.h"
-#include "src/profiler/cpu-profiler.h"
#include "src/tracing/trace-event.h"
#include "src/v8.h"
@@ -24,13 +25,7 @@ namespace internal {
static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) {
return allocator->AllocateChunk(Deoptimizer::GetMaxDeoptTableSize(),
base::OS::CommitPageSize(),
-#if defined(__native_client__)
- // The Native Client port of V8 uses an interpreter,
- // so code pages don't need PROT_EXEC.
- NOT_EXECUTABLE,
-#else
EXECUTABLE,
-#endif
NULL);
}
@@ -47,7 +42,7 @@ DeoptimizerData::DeoptimizerData(MemoryAllocator* allocator)
DeoptimizerData::~DeoptimizerData() {
for (int i = 0; i <= Deoptimizer::kLastBailoutType; ++i) {
- allocator_->Free(deopt_entry_code_[i]);
+ allocator_->Free<MemoryAllocator::kFull>(deopt_entry_code_[i]);
deopt_entry_code_[i] = NULL;
}
}
@@ -56,9 +51,10 @@ DeoptimizerData::~DeoptimizerData() {
Code* Deoptimizer::FindDeoptimizingCode(Address addr) {
if (function_->IsHeapObject()) {
// Search all deoptimizing code in the native context of the function.
+ Isolate* isolate = function_->GetIsolate();
Context* native_context = function_->context()->native_context();
Object* element = native_context->DeoptimizedCodeListHead();
- while (!element->IsUndefined()) {
+ while (!element->IsUndefined(isolate)) {
Code* code = Code::cast(element);
CHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
if (code->contains(addr)) return code;
@@ -159,13 +155,6 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
return info;
}
-
-void Deoptimizer::DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info,
- Isolate* isolate) {
- delete info;
-}
-
-
void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
int count,
BailoutType type) {
@@ -173,7 +162,6 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
generator.Generate();
}
-
void Deoptimizer::VisitAllOptimizedFunctionsForContext(
Context* context, OptimizedFunctionVisitor* visitor) {
DisallowHeapAllocation no_allocation;
@@ -186,7 +174,8 @@ void Deoptimizer::VisitAllOptimizedFunctionsForContext(
// no longer refer to optimized code.
JSFunction* prev = NULL;
Object* element = context->OptimizedFunctionsListHead();
- while (!element->IsUndefined()) {
+ Isolate* isolate = context->GetIsolate();
+ while (!element->IsUndefined(isolate)) {
JSFunction* function = JSFunction::cast(element);
Object* next = function->next_function_link();
if (function->code()->kind() != Code::OPTIMIZED_FUNCTION ||
@@ -226,9 +215,9 @@ void Deoptimizer::VisitAllOptimizedFunctions(
// Run through the list of all native contexts.
Object* context = isolate->heap()->native_contexts_list();
- while (!context->IsUndefined()) {
+ while (!context->IsUndefined(isolate)) {
VisitAllOptimizedFunctionsForContext(Context::cast(context), visitor);
- context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+ context = Context::cast(context)->next_context_link();
}
}
@@ -296,7 +285,9 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
!FLAG_turbo_asm_deoptimization;
bool safe_to_deopt =
deopt_index != Safepoint::kNoDeoptimizationIndex || turbofanned;
- CHECK(topmost_optimized_code == NULL || safe_to_deopt || turbofanned);
+ bool builtin = code->kind() == Code::BUILTIN;
+ CHECK(topmost_optimized_code == NULL || safe_to_deopt || turbofanned ||
+ builtin);
if (topmost_optimized_code == NULL) {
topmost_optimized_code = code;
safe_to_deopt_topmost_optimized_code = safe_to_deopt;
@@ -313,7 +304,7 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
// Walk over all optimized code objects in this native context.
Code* prev = NULL;
Object* element = context->OptimizedCodeListHead();
- while (!element->IsUndefined()) {
+ while (!element->IsUndefined(isolate)) {
Code* code = Code::cast(element);
CHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION);
Object* next = code->next_code_link();
@@ -372,8 +363,11 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
+ RuntimeCallTimerScope runtimeTimer(isolate,
+ &RuntimeCallStats::DeoptimizeCode);
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
- TRACE_EVENT0("v8", "V8.DeoptimizeCode");
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
+ isolate, &tracing::TraceEventStatsTable::DeoptimizeCode);
if (FLAG_trace_deopt) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(), "[deoptimize all code in all contexts]\n");
@@ -381,18 +375,21 @@ void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
DisallowHeapAllocation no_allocation;
// For all contexts, mark all code, then deoptimize.
Object* context = isolate->heap()->native_contexts_list();
- while (!context->IsUndefined()) {
+ while (!context->IsUndefined(isolate)) {
Context* native_context = Context::cast(context);
MarkAllCodeForContext(native_context);
DeoptimizeMarkedCodeForContext(native_context);
- context = native_context->get(Context::NEXT_CONTEXT_LINK);
+ context = native_context->next_context_link();
}
}
void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
+ RuntimeCallTimerScope runtimeTimer(isolate,
+ &RuntimeCallStats::DeoptimizeCode);
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
- TRACE_EVENT0("v8", "V8.DeoptimizeCode");
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
+ isolate, &tracing::TraceEventStatsTable::DeoptimizeCode);
if (FLAG_trace_deopt) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(), "[deoptimize marked code in all contexts]\n");
@@ -400,17 +397,18 @@ void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
DisallowHeapAllocation no_allocation;
// For all contexts, deoptimize code already marked.
Object* context = isolate->heap()->native_contexts_list();
- while (!context->IsUndefined()) {
+ while (!context->IsUndefined(isolate)) {
Context* native_context = Context::cast(context);
DeoptimizeMarkedCodeForContext(native_context);
- context = native_context->get(Context::NEXT_CONTEXT_LINK);
+ context = native_context->next_context_link();
}
}
void Deoptimizer::MarkAllCodeForContext(Context* context) {
Object* element = context->OptimizedCodeListHead();
- while (!element->IsUndefined()) {
+ Isolate* isolate = context->GetIsolate();
+ while (!element->IsUndefined(isolate)) {
Code* code = Code::cast(element);
CHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION);
code->set_marked_for_deoptimization(true);
@@ -420,8 +418,12 @@ void Deoptimizer::MarkAllCodeForContext(Context* context) {
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
- TimerEventScope<TimerEventDeoptimizeCode> timer(function->GetIsolate());
- TRACE_EVENT0("v8", "V8.DeoptimizeCode");
+ Isolate* isolate = function->GetIsolate();
+ RuntimeCallTimerScope runtimeTimer(isolate,
+ &RuntimeCallStats::DeoptimizeCode);
+ TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
+ isolate, &tracing::TraceEventStatsTable::DeoptimizeCode);
Code* code = function->code();
if (code->kind() == Code::OPTIMIZED_FUNCTION) {
// Mark the code for deoptimization and unlink any functions that also
@@ -556,7 +558,7 @@ Code* Deoptimizer::FindOptimizedCode(JSFunction* function,
void Deoptimizer::PrintFunctionName() {
- if (function_->IsJSFunction()) {
+ if (function_ != nullptr && function_->IsJSFunction()) {
function_->ShortPrint(trace_scope_->file());
} else {
PrintF(trace_scope_->file(),
@@ -651,16 +653,16 @@ int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
int length = 0;
// Count all entries in the deoptimizing code list of every context.
Object* context = isolate->heap()->native_contexts_list();
- while (!context->IsUndefined()) {
+ while (!context->IsUndefined(isolate)) {
Context* native_context = Context::cast(context);
Object* element = native_context->DeoptimizedCodeListHead();
- while (!element->IsUndefined()) {
+ while (!element->IsUndefined(isolate)) {
Code* code = Code::cast(element);
DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
length++;
element = code->next_code_link();
}
- context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+ context = Context::cast(context)->next_context_link();
}
return length;
}
@@ -839,9 +841,8 @@ void Deoptimizer::DoComputeOutputFrames() {
" @%d => node=%d, pc=0x%08" V8PRIxPTR ", caller sp=0x%08" V8PRIxPTR
", state=%s, took %0.3f ms]\n",
bailout_id_, node_id.ToInt(), output_[index]->GetPc(),
- caller_frame_top_, FullCodeGenerator::State2String(
- static_cast<FullCodeGenerator::State>(
- output_[index]->GetState()->value())),
+ caller_frame_top_, BailoutStateToString(static_cast<BailoutState>(
+ output_[index]->GetState()->value())),
ms);
}
}
@@ -872,7 +873,7 @@ void Deoptimizer::DoComputeJSFrame(TranslatedFrame* translated_frame,
input_index++;
if (trace_scope_ != NULL) {
PrintF(trace_scope_->file(), " translating frame ");
- base::SmartArrayPointer<char> name = shared->DebugName()->ToCString();
+ std::unique_ptr<char[]> name = shared->DebugName()->ToCString();
PrintF(trace_scope_->file(), "%s", name.get());
PrintF(trace_scope_->file(), " => node=%d, height=%d%s\n", node_id.ToInt(),
height_in_bytes, goto_catch_handler ? " (throw)" : "");
@@ -981,7 +982,7 @@ void Deoptimizer::DoComputeJSFrame(TranslatedFrame* translated_frame,
}
// Read the context from the translations.
Object* context = context_pos->GetRawValue();
- if (context == isolate_->heap()->undefined_value()) {
+ if (context->IsUndefined(isolate_)) {
// If the context was optimized away, just use the context from
// the activation. This should only apply to Crankshaft code.
CHECK(!compiled_code_->is_turbofanned());
@@ -1053,10 +1054,11 @@ void Deoptimizer::DoComputeJSFrame(TranslatedFrame* translated_frame,
// If we are going to the catch handler, then the exception lives in
// the accumulator.
- FullCodeGenerator::State state =
- goto_catch_handler ? FullCodeGenerator::TOS_REG
- : FullCodeGenerator::StateField::decode(pc_and_state);
- output_frame->SetState(Smi::FromInt(state));
+ BailoutState state =
+ goto_catch_handler
+ ? BailoutState::TOS_REGISTER
+ : FullCodeGenerator::BailoutStateField::decode(pc_and_state);
+ output_frame->SetState(Smi::FromInt(static_cast<int>(state)));
// Set the continuation for the topmost frame.
if (is_topmost) {
@@ -1090,7 +1092,7 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
input_index++;
if (trace_scope_ != NULL) {
PrintF(trace_scope_->file(), " translating interpreted frame ");
- base::SmartArrayPointer<char> name = shared->DebugName()->ToCString();
+ std::unique_ptr<char[]> name = shared->DebugName()->ToCString();
PrintF(trace_scope_->file(), "%s", name.get());
PrintF(trace_scope_->file(), " => bytecode_offset=%d, height=%d%s\n",
bytecode_offset, height_in_bytes,
@@ -1232,7 +1234,9 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
// Set the bytecode array pointer.
output_offset -= kPointerSize;
- Object* bytecode_array = shared->bytecode_array();
+ Object* bytecode_array = shared->HasDebugInfo()
+ ? shared->GetDebugInfo()->DebugBytecodeArray()
+ : shared->bytecode_array();
WriteValueToOutput(bytecode_array, 0, frame_index, output_offset,
"bytecode array ");
@@ -1272,7 +1276,9 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
Code* dispatch_builtin =
builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
output_frame->SetPc(reinterpret_cast<intptr_t>(dispatch_builtin->entry()));
- output_frame->SetState(0);
+ // Restore accumulator (TOS) register.
+ output_frame->SetState(
+ Smi::FromInt(static_cast<int>(BailoutState::TOS_REGISTER)));
// Update constant pool.
if (FLAG_enable_embedded_constant_pool) {
@@ -1288,14 +1294,11 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
// Set the continuation for the topmost frame.
if (is_topmost) {
- Code* continuation =
- builtins->builtin(Builtins::kInterpreterNotifyDeoptimized);
+ Code* continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
if (bailout_type_ == LAZY) {
- continuation =
- builtins->builtin(Builtins::kInterpreterNotifyLazyDeoptimized);
+ continuation = builtins->builtin(Builtins::kNotifyLazyDeoptimized);
} else if (bailout_type_ == SOFT) {
- continuation =
- builtins->builtin(Builtins::kInterpreterNotifySoftDeoptimized);
+ continuation = builtins->builtin(Builtins::kNotifySoftDeoptimized);
} else {
CHECK_EQ(bailout_type_, EAGER);
}
@@ -1436,7 +1439,7 @@ void Deoptimizer::DoComputeTailCallerFrame(TranslatedFrame* translated_frame,
if (trace_scope_ != NULL) {
PrintF(trace_scope_->file(), " translating tail caller frame ");
- base::SmartArrayPointer<char> name = shared->DebugName()->ToCString();
+ std::unique_ptr<char[]> name = shared->DebugName()->ToCString();
PrintF(trace_scope_->file(), "%s\n", name.get());
}
@@ -1509,7 +1512,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
// value of result register is preserved during continuation execution.
// We do this here by "pushing" the result of the constructor function to the
// top of the reconstructed stack and then using the
- // FullCodeGenerator::TOS_REG machinery.
+ // BailoutState::TOS_REGISTER machinery.
if (is_topmost) {
height_in_bytes += kPointerSize;
}
@@ -1630,7 +1633,8 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
DebugPrintOutputSlot(value, frame_index, output_offset,
"constructor result\n");
- output_frame->SetState(Smi::FromInt(FullCodeGenerator::TOS_REG));
+ output_frame->SetState(
+ Smi::FromInt(static_cast<int>(BailoutState::TOS_REGISTER)));
}
CHECK_EQ(0u, output_offset);
@@ -1684,7 +1688,7 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslatedFrame* translated_frame,
// value of result register is preserved during continuation execution.
// We do this here by "pushing" the result of the accessor function to the
// top of the reconstructed stack and then using the
- // FullCodeGenerator::TOS_REG machinery.
+ // BailoutState::TOS_REGISTER machinery.
// We don't need to restore the result in case of a setter call because we
// have to return the stored value but not the result of the setter function.
bool should_preserve_result = is_topmost && !is_setter_stub_frame;
@@ -1803,9 +1807,11 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslatedFrame* translated_frame,
DebugPrintOutputSlot(value, frame_index, output_offset,
"accessor result\n");
- output_frame->SetState(Smi::FromInt(FullCodeGenerator::TOS_REG));
+ output_frame->SetState(
+ Smi::FromInt(static_cast<int>(BailoutState::TOS_REGISTER)));
} else {
- output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
+ output_frame->SetState(
+ Smi::FromInt(static_cast<int>(BailoutState::NO_REGISTERS)));
}
CHECK_EQ(0u, output_offset);
@@ -2060,7 +2066,8 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslatedFrame* translated_frame,
output_frame->SetConstantPool(constant_pool_value);
output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value);
}
- output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
+ output_frame->SetState(
+ Smi::FromInt(static_cast<int>(BailoutState::NO_REGISTERS)));
Code* notify_failure =
isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
output_frame->SetContinuation(
@@ -2439,6 +2446,10 @@ void Translation::StoreBoolRegister(Register reg) {
buffer_->Add(reg.code(), zone());
}
+void Translation::StoreFloatRegister(FloatRegister reg) {
+ buffer_->Add(FLOAT_REGISTER, zone());
+ buffer_->Add(reg.code(), zone());
+}
void Translation::StoreDoubleRegister(DoubleRegister reg) {
buffer_->Add(DOUBLE_REGISTER, zone());
@@ -2469,6 +2480,10 @@ void Translation::StoreBoolStackSlot(int index) {
buffer_->Add(index, zone());
}
+void Translation::StoreFloatStackSlot(int index) {
+ buffer_->Add(FLOAT_STACK_SLOT, zone());
+ buffer_->Add(index, zone());
+}
void Translation::StoreDoubleStackSlot(int index) {
buffer_->Add(DOUBLE_STACK_SLOT, zone());
@@ -2509,11 +2524,13 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
case INT32_REGISTER:
case UINT32_REGISTER:
case BOOL_REGISTER:
+ case FLOAT_REGISTER:
case DOUBLE_REGISTER:
case STACK_SLOT:
case INT32_STACK_SLOT:
case UINT32_STACK_SLOT:
case BOOL_STACK_SLOT:
+ case FLOAT_STACK_SLOT:
case DOUBLE_STACK_SLOT:
case LITERAL:
case COMPILED_STUB_FRAME:
@@ -2639,23 +2656,6 @@ Handle<Object> GetValueForDebugger(TranslatedFrame::iterator it,
return it->GetValue();
}
-int ComputeSourcePosition(Handle<SharedFunctionInfo> shared,
- BailoutId node_id) {
- if (shared->HasBytecodeArray()) {
- BytecodeArray* bytecodes = shared->bytecode_array();
- // BailoutId points to the next bytecode in the bytecode aray. Subtract
- // 1 to get the end of current bytecode.
- return bytecodes->SourcePosition(node_id.ToInt() - 1);
- } else {
- Code* non_optimized_code = shared->code();
- FixedArray* raw_data = non_optimized_code->deoptimization_data();
- DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
- unsigned pc_and_state = Deoptimizer::GetOutputInfo(data, node_id, *shared);
- unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
- return non_optimized_code->SourcePosition(pc_offset);
- }
-}
-
} // namespace
DeoptimizedFrameInfo::DeoptimizedFrameInfo(TranslatedState* state,
@@ -2685,8 +2685,14 @@ DeoptimizedFrameInfo::DeoptimizedFrameInfo(TranslatedState* state,
parameter_frame != state->begin() &&
(parameter_frame - 1)->kind() == TranslatedFrame::kConstructStub;
- source_position_ =
- ComputeSourcePosition(frame_it->shared_info(), frame_it->node_id());
+ if (frame_it->kind() == TranslatedFrame::kInterpretedFunction) {
+ source_position_ = Deoptimizer::ComputeSourcePositionFromBytecodeArray(
+ *frame_it->shared_info(), frame_it->node_id());
+ } else {
+ DCHECK_EQ(TranslatedFrame::kFunction, frame_it->kind());
+ source_position_ = Deoptimizer::ComputeSourcePositionFromBaselineCode(
+ *frame_it->shared_info(), frame_it->node_id());
+ }
TranslatedFrame::iterator value_it = frame_it->begin();
// Get the function. Note that this might materialize the function.
@@ -2737,35 +2743,55 @@ DeoptimizedFrameInfo::DeoptimizedFrameInfo(TranslatedState* state,
}
-const char* Deoptimizer::GetDeoptReason(DeoptReason deopt_reason) {
- DCHECK(deopt_reason < kLastDeoptReason);
-#define DEOPT_MESSAGES_TEXTS(C, T) T,
- static const char* deopt_messages_[] = {
- DEOPT_MESSAGES_LIST(DEOPT_MESSAGES_TEXTS)};
-#undef DEOPT_MESSAGES_TEXTS
- return deopt_messages_[deopt_reason];
-}
-
-
Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code* code, Address pc) {
SourcePosition last_position = SourcePosition::Unknown();
- Deoptimizer::DeoptReason last_reason = Deoptimizer::kNoReason;
+ DeoptimizeReason last_reason = DeoptimizeReason::kNoReason;
+ int last_deopt_id = kNoDeoptimizationId;
int mask = RelocInfo::ModeMask(RelocInfo::DEOPT_REASON) |
- RelocInfo::ModeMask(RelocInfo::POSITION);
+ RelocInfo::ModeMask(RelocInfo::DEOPT_ID) |
+ RelocInfo::ModeMask(RelocInfo::DEOPT_POSITION);
for (RelocIterator it(code, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
- if (info->pc() >= pc) return DeoptInfo(last_position, NULL, last_reason);
- if (info->rmode() == RelocInfo::POSITION) {
+ if (info->pc() >= pc) {
+ return DeoptInfo(last_position, last_reason, last_deopt_id);
+ }
+ if (info->rmode() == RelocInfo::DEOPT_POSITION) {
int raw_position = static_cast<int>(info->data());
last_position = raw_position ? SourcePosition::FromRaw(raw_position)
: SourcePosition::Unknown();
+ } else if (info->rmode() == RelocInfo::DEOPT_ID) {
+ last_deopt_id = static_cast<int>(info->data());
} else if (info->rmode() == RelocInfo::DEOPT_REASON) {
- last_reason = static_cast<Deoptimizer::DeoptReason>(info->data());
+ last_reason = static_cast<DeoptimizeReason>(info->data());
}
}
- return DeoptInfo(SourcePosition::Unknown(), NULL, Deoptimizer::kNoReason);
+ return DeoptInfo(SourcePosition::Unknown(), DeoptimizeReason::kNoReason, -1);
+}
+
+
+// static
+int Deoptimizer::ComputeSourcePositionFromBaselineCode(
+ SharedFunctionInfo* shared, BailoutId node_id) {
+ DCHECK(shared->HasBaselineCode());
+ Code* code = shared->code();
+ FixedArray* raw_data = code->deoptimization_data();
+ DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
+ unsigned pc_and_state = Deoptimizer::GetOutputInfo(data, node_id, shared);
+ int code_offset =
+ static_cast<int>(FullCodeGenerator::PcField::decode(pc_and_state));
+ return AbstractCode::cast(code)->SourcePosition(code_offset);
}
+// static
+int Deoptimizer::ComputeSourcePositionFromBytecodeArray(
+ SharedFunctionInfo* shared, BailoutId node_id) {
+ DCHECK(shared->HasBytecodeArray());
+ // BailoutId points to the next bytecode in the bytecode aray. Subtract
+ // 1 to get the end of current bytecode.
+ int code_offset = node_id.ToInt() - 1;
+ return AbstractCode::cast(shared->bytecode_array())
+ ->SourcePosition(code_offset);
+}
// static
TranslatedValue TranslatedValue::NewArgumentsObject(TranslatedState* container,
@@ -2797,6 +2823,14 @@ TranslatedValue TranslatedValue::NewDuplicateObject(TranslatedState* container,
// static
+TranslatedValue TranslatedValue::NewFloat(TranslatedState* container,
+ float value) {
+ TranslatedValue slot(container, kFloat);
+ slot.float_value_ = value;
+ return slot;
+}
+
+// static
TranslatedValue TranslatedValue::NewDouble(TranslatedState* container,
double value) {
TranslatedValue slot(container, kDouble);
@@ -2867,6 +2901,10 @@ uint32_t TranslatedValue::uint32_value() const {
return uint32_value_;
}
+float TranslatedValue::float_value() const {
+ DCHECK_EQ(kFloat, kind());
+ return float_value_;
+}
double TranslatedValue::double_value() const {
DCHECK_EQ(kDouble, kind());
@@ -2945,6 +2983,7 @@ Handle<Object> TranslatedValue::GetValue() {
case TranslatedValue::kInt32:
case TranslatedValue::kUInt32:
case TranslatedValue::kBoolBit:
+ case TranslatedValue::kFloat:
case TranslatedValue::kDouble: {
MaterializeSimple();
return value_.ToHandleChecked();
@@ -2986,6 +3025,10 @@ void TranslatedValue::MaterializeSimple() {
value_ = Handle<Object>(isolate()->factory()->NewNumber(uint32_value()));
return;
+ case kFloat:
+ value_ = Handle<Object>(isolate()->factory()->NewNumber(float_value()));
+ return;
+
case kDouble:
value_ = Handle<Object>(isolate()->factory()->NewNumber(double_value()));
return;
@@ -3154,8 +3197,7 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
int height = iterator->Next();
if (trace_file != nullptr) {
- base::SmartArrayPointer<char> name =
- shared_info->DebugName()->ToCString();
+ std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString();
PrintF(trace_file, " reading input frame %s", name.get());
int arg_count = shared_info->internal_formal_parameter_count() + 1;
PrintF(trace_file, " => node=%d, args=%d, height=%d; inputs:\n",
@@ -3170,8 +3212,7 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
int height = iterator->Next();
if (trace_file != nullptr) {
- base::SmartArrayPointer<char> name =
- shared_info->DebugName()->ToCString();
+ std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString();
PrintF(trace_file, " reading input frame %s", name.get());
int arg_count = shared_info->internal_formal_parameter_count() + 1;
PrintF(trace_file,
@@ -3187,8 +3228,7 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
int height = iterator->Next();
if (trace_file != nullptr) {
- base::SmartArrayPointer<char> name =
- shared_info->DebugName()->ToCString();
+ std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString();
PrintF(trace_file, " reading arguments adaptor frame %s", name.get());
PrintF(trace_file, " => height=%d; inputs:\n", height);
}
@@ -3199,8 +3239,7 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
SharedFunctionInfo* shared_info =
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
if (trace_file != nullptr) {
- base::SmartArrayPointer<char> name =
- shared_info->DebugName()->ToCString();
+ std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString();
PrintF(trace_file, " reading tail caller frame marker %s\n",
name.get());
}
@@ -3212,8 +3251,7 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
int height = iterator->Next();
if (trace_file != nullptr) {
- base::SmartArrayPointer<char> name =
- shared_info->DebugName()->ToCString();
+ std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString();
PrintF(trace_file, " reading construct stub frame %s", name.get());
PrintF(trace_file, " => height=%d; inputs:\n", height);
}
@@ -3224,8 +3262,7 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
SharedFunctionInfo* shared_info =
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
if (trace_file != nullptr) {
- base::SmartArrayPointer<char> name =
- shared_info->DebugName()->ToCString();
+ std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString();
PrintF(trace_file, " reading getter frame %s; inputs:\n", name.get());
}
return TranslatedFrame::AccessorFrame(TranslatedFrame::kGetter,
@@ -3236,8 +3273,7 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
SharedFunctionInfo* shared_info =
SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
if (trace_file != nullptr) {
- base::SmartArrayPointer<char> name =
- shared_info->DebugName()->ToCString();
+ std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString();
PrintF(trace_file, " reading setter frame %s; inputs:\n", name.get());
}
return TranslatedFrame::AccessorFrame(TranslatedFrame::kSetter,
@@ -3262,11 +3298,13 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
case Translation::INT32_REGISTER:
case Translation::UINT32_REGISTER:
case Translation::BOOL_REGISTER:
+ case Translation::FLOAT_REGISTER:
case Translation::DOUBLE_REGISTER:
case Translation::STACK_SLOT:
case Translation::INT32_STACK_SLOT:
case Translation::UINT32_STACK_SLOT:
case Translation::BOOL_STACK_SLOT:
+ case Translation::FLOAT_STACK_SLOT:
case Translation::DOUBLE_STACK_SLOT:
case Translation::LITERAL:
break;
@@ -3393,13 +3431,26 @@ TranslatedValue TranslatedState::CreateNextTranslatedValue(
return TranslatedValue::NewBool(this, static_cast<uint32_t>(value));
}
+ case Translation::FLOAT_REGISTER: {
+ int input_reg = iterator->Next();
+ if (registers == nullptr) return TranslatedValue::NewInvalid(this);
+ float value = registers->GetFloatRegister(input_reg);
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "%e ; %s (float)", value,
+ RegisterConfiguration::Crankshaft()->GetFloatRegisterName(
+ input_reg));
+ }
+ return TranslatedValue::NewFloat(this, value);
+ }
+
case Translation::DOUBLE_REGISTER: {
int input_reg = iterator->Next();
if (registers == nullptr) return TranslatedValue::NewInvalid(this);
double value = registers->GetDoubleRegister(input_reg);
if (trace_file != nullptr) {
- PrintF(trace_file, "%e ; %s (bool)", value,
- DoubleRegister::from_code(input_reg).ToString());
+ PrintF(trace_file, "%e ; %s (double)", value,
+ RegisterConfiguration::Crankshaft()->GetDoubleRegisterName(
+ input_reg));
}
return TranslatedValue::NewDouble(this, value);
}
@@ -3450,6 +3501,17 @@ TranslatedValue TranslatedState::CreateNextTranslatedValue(
return TranslatedValue::NewBool(this, value);
}
+ case Translation::FLOAT_STACK_SLOT: {
+ int slot_offset =
+ OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
+ float value = ReadFloatValue(fp + slot_offset);
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "%e ; (float) [fp %c %d] ", value,
+ slot_offset < 0 ? '-' : '+', std::abs(slot_offset));
+ }
+ return TranslatedValue::NewFloat(this, value);
+ }
+
case Translation::DOUBLE_STACK_SLOT: {
int slot_offset =
OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
@@ -3486,6 +3548,7 @@ TranslatedState::TranslatedState(JavaScriptFrame* frame)
int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationInputData* data =
static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
+ DCHECK(data != nullptr && deopt_index != Safepoint::kNoDeoptimizationIndex);
TranslationIterator it(data->TranslationByteArray(),
data->TranslationIndex(deopt_index)->value());
Init(frame->fp(), &it, data->LiteralArray(), nullptr /* registers */,
@@ -3596,6 +3659,7 @@ Handle<Object> TranslatedState::MaterializeAt(int frame_index,
case TranslatedValue::kInt32:
case TranslatedValue::kUInt32:
case TranslatedValue::kBoolBit:
+ case TranslatedValue::kFloat:
case TranslatedValue::kDouble: {
slot->MaterializeSimple();
Handle<Object> value = slot->GetValue();
@@ -3664,7 +3728,9 @@ Handle<Object> TranslatedState::MaterializeAt(int frame_index,
}
return object;
}
- case JS_OBJECT_TYPE: {
+ case JS_OBJECT_TYPE:
+ case JS_ERROR_TYPE:
+ case JS_ARGUMENTS_TYPE: {
Handle<JSObject> object =
isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED);
slot->value_ = object;
@@ -3691,6 +3757,35 @@ Handle<Object> TranslatedState::MaterializeAt(int frame_index,
object->set_length(*length);
return object;
}
+ case JS_FUNCTION_TYPE: {
+ Handle<JSFunction> object =
+ isolate_->factory()->NewFunctionFromSharedFunctionInfo(
+ handle(isolate_->object_function()->shared()),
+ handle(isolate_->context()));
+ slot->value_ = object;
+ // We temporarily allocated a JSFunction for the {Object} function
+ // within the current context, to break cycles in the object graph.
+ // The correct function and context will be set below once available.
+ Handle<Object> properties = MaterializeAt(frame_index, value_index);
+ Handle<Object> elements = MaterializeAt(frame_index, value_index);
+ Handle<Object> prototype = MaterializeAt(frame_index, value_index);
+ Handle<Object> shared = MaterializeAt(frame_index, value_index);
+ Handle<Object> context = MaterializeAt(frame_index, value_index);
+ Handle<Object> literals = MaterializeAt(frame_index, value_index);
+ Handle<Object> entry = MaterializeAt(frame_index, value_index);
+ Handle<Object> next_link = MaterializeAt(frame_index, value_index);
+ object->ReplaceCode(*isolate_->builtins()->CompileLazy());
+ object->set_map(*map);
+ object->set_properties(FixedArray::cast(*properties));
+ object->set_elements(FixedArrayBase::cast(*elements));
+ object->set_prototype_or_initial_map(*prototype);
+ object->set_shared(SharedFunctionInfo::cast(*shared));
+ object->set_context(Context::cast(*context));
+ object->set_literals(LiteralsArray::cast(*literals));
+ CHECK(entry->IsNumber()); // Entry to compile lazy stub.
+ CHECK(next_link->IsUndefined(isolate_));
+ return object;
+ }
case FIXED_ARRAY_TYPE: {
Handle<Object> lengthObject = MaterializeAt(frame_index, value_index);
int32_t length = 0;
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index 21ca84ed52..7822d1cf50 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -6,8 +6,9 @@
#define V8_DEOPTIMIZER_H_
#include "src/allocation.h"
+#include "src/deoptimize-reason.h"
#include "src/macro-assembler.h"
-
+#include "src/source-position.h"
namespace v8 {
namespace internal {
@@ -39,6 +40,7 @@ class TranslatedValue {
kInt32,
kUInt32,
kBoolBit,
+ kFloat,
kDouble,
kCapturedObject, // Object captured by the escape analysis.
// The number of nested objects can be obtained
@@ -61,6 +63,7 @@ class TranslatedValue {
static TranslatedValue NewDeferredObject(TranslatedState* container,
int length, int object_index);
static TranslatedValue NewDuplicateObject(TranslatedState* container, int id);
+ static TranslatedValue NewFloat(TranslatedState* container, float value);
static TranslatedValue NewDouble(TranslatedState* container, double value);
static TranslatedValue NewInt32(TranslatedState* container, int32_t value);
static TranslatedValue NewUInt32(TranslatedState* container, uint32_t value);
@@ -93,6 +96,8 @@ class TranslatedValue {
uint32_t uint32_value_;
// kind is kInt32.
int32_t int32_value_;
+ // kind is kFloat
+ float float_value_;
// kind is kDouble
double double_value_;
// kind is kDuplicatedObject or kArgumentsObject or kCapturedObject.
@@ -103,6 +108,7 @@ class TranslatedValue {
Object* raw_literal() const;
int32_t int32_value() const;
uint32_t uint32_value() const;
+ float float_value() const;
double double_value() const;
int object_length() const;
int object_index() const;
@@ -317,110 +323,45 @@ class OptimizedFunctionVisitor BASE_EMBEDDED {
virtual void LeaveContext(Context* context) = 0;
};
-#define DEOPT_MESSAGES_LIST(V) \
- V(kAccessCheck, "Access check needed") \
- V(kNoReason, "no reason") \
- V(kConstantGlobalVariableAssignment, "Constant global variable assignment") \
- V(kConversionOverflow, "conversion overflow") \
- V(kDivisionByZero, "division by zero") \
- V(kElementsKindUnhandledInKeyedLoadGenericStub, \
- "ElementsKind unhandled in KeyedLoadGenericStub") \
- V(kExpectedHeapNumber, "Expected heap number") \
- V(kExpectedSmi, "Expected smi") \
- V(kForcedDeoptToRuntime, "Forced deopt to runtime") \
- V(kHole, "hole") \
- V(kHoleyArrayDespitePackedElements_kindFeedback, \
- "Holey array despite packed elements_kind feedback") \
- V(kInstanceMigrationFailed, "instance migration failed") \
- V(kInsufficientTypeFeedbackForCallWithArguments, \
- "Insufficient type feedback for call with arguments") \
- V(kFastArrayPushFailed, "Falling off the fast path for FastArrayPush") \
- V(kInsufficientTypeFeedbackForCombinedTypeOfBinaryOperation, \
- "Insufficient type feedback for combined type of binary operation") \
- V(kInsufficientTypeFeedbackForGenericNamedAccess, \
- "Insufficient type feedback for generic named access") \
- V(kInsufficientTypeFeedbackForKeyedLoad, \
- "Insufficient type feedback for keyed load") \
- V(kInsufficientTypeFeedbackForKeyedStore, \
- "Insufficient type feedback for keyed store") \
- V(kInsufficientTypeFeedbackForLHSOfBinaryOperation, \
- "Insufficient type feedback for LHS of binary operation") \
- V(kInsufficientTypeFeedbackForRHSOfBinaryOperation, \
- "Insufficient type feedback for RHS of binary operation") \
- V(kKeyIsNegative, "key is negative") \
- V(kLiteralsWereDisposed, "literals have been disposed") \
- V(kLostPrecision, "lost precision") \
- V(kLostPrecisionOrNaN, "lost precision or NaN") \
- V(kMementoFound, "memento found") \
- V(kMinusZero, "minus zero") \
- V(kNaN, "NaN") \
- V(kNegativeKeyEncountered, "Negative key encountered") \
- V(kNegativeValue, "negative value") \
- V(kNoCache, "no cache") \
- V(kNonStrictElementsInKeyedLoadGenericStub, \
- "non-strict elements in KeyedLoadGenericStub") \
- V(kNotADateObject, "not a date object") \
- V(kNotAHeapNumber, "not a heap number") \
- V(kNotAHeapNumberUndefinedBoolean, "not a heap number/undefined/true/false") \
- V(kNotAHeapNumberUndefined, "not a heap number/undefined") \
- V(kNotAJavaScriptObject, "not a JavaScript object") \
- V(kNotASmi, "not a Smi") \
- V(kNull, "null") \
- V(kOutOfBounds, "out of bounds") \
- V(kOutsideOfRange, "Outside of range") \
- V(kOverflow, "overflow") \
- V(kProxy, "proxy") \
- V(kReceiverWasAGlobalObject, "receiver was a global object") \
- V(kSmi, "Smi") \
- V(kTooManyArguments, "too many arguments") \
- V(kTooManyUndetectableTypes, "Too many undetectable types") \
- V(kTracingElementsTransitions, "Tracing elements transitions") \
- V(kTypeMismatchBetweenFeedbackAndConstant, \
- "Type mismatch between feedback and constant") \
- V(kUndefined, "undefined") \
- V(kUnexpectedCellContentsInConstantGlobalStore, \
- "Unexpected cell contents in constant global store") \
- V(kUnexpectedCellContentsInGlobalStore, \
- "Unexpected cell contents in global store") \
- V(kUnexpectedObject, "unexpected object") \
- V(kUnexpectedRHSOfBinaryOperation, "Unexpected RHS of binary operation") \
- V(kUninitializedBoilerplateInFastClone, \
- "Uninitialized boilerplate in fast clone") \
- V(kUninitializedBoilerplateLiterals, "Uninitialized boilerplate literals") \
- V(kUnknownMapInPolymorphicAccess, "Unknown map in polymorphic access") \
- V(kUnknownMapInPolymorphicCall, "Unknown map in polymorphic call") \
- V(kUnknownMapInPolymorphicElementAccess, \
- "Unknown map in polymorphic element access") \
- V(kUnknownMap, "Unknown map") \
- V(kValueMismatch, "value mismatch") \
- V(kWrongInstanceType, "wrong instance type") \
- V(kWrongMap, "wrong map") \
- V(kUndefinedOrNullInForIn, "null or undefined in for-in") \
- V(kUndefinedOrNullInToObject, "null or undefined in ToObject")
-
class Deoptimizer : public Malloced {
public:
enum BailoutType { EAGER, LAZY, SOFT, kLastBailoutType = SOFT };
-#define DEOPT_MESSAGES_CONSTANTS(C, T) C,
- enum DeoptReason {
- DEOPT_MESSAGES_LIST(DEOPT_MESSAGES_CONSTANTS) kLastDeoptReason
+ enum class BailoutState {
+ NO_REGISTERS,
+ TOS_REGISTER,
};
-#undef DEOPT_MESSAGES_CONSTANTS
- static const char* GetDeoptReason(DeoptReason deopt_reason);
+
+ static const char* BailoutStateToString(BailoutState state) {
+ switch (state) {
+ case BailoutState::NO_REGISTERS:
+ return "NO_REGISTERS";
+ case BailoutState::TOS_REGISTER:
+ return "TOS_REGISTER";
+ }
+ UNREACHABLE();
+ return nullptr;
+ }
struct DeoptInfo {
- DeoptInfo(SourcePosition position, const char* m, DeoptReason d)
- : position(position), mnemonic(m), deopt_reason(d), inlining_id(0) {}
+ DeoptInfo(SourcePosition position, DeoptimizeReason deopt_reason,
+ int deopt_id)
+ : position(position), deopt_reason(deopt_reason), deopt_id(deopt_id) {}
SourcePosition position;
- const char* mnemonic;
- DeoptReason deopt_reason;
- int inlining_id;
+ DeoptimizeReason deopt_reason;
+ int deopt_id;
+
+ static const int kNoDeoptId = -1;
};
static DeoptInfo GetDeoptInfo(Code* code, byte* from);
+ static int ComputeSourcePositionFromBaselineCode(SharedFunctionInfo* shared,
+ BailoutId node_id);
+ static int ComputeSourcePositionFromBytecodeArray(SharedFunctionInfo* shared,
+ BailoutId node_id);
+
struct JumpTableEntry : public ZoneObject {
inline JumpTableEntry(Address entry, const DeoptInfo& deopt_info,
Deoptimizer::BailoutType type, bool frame)
@@ -468,8 +409,6 @@ class Deoptimizer : public Malloced {
static DeoptimizedFrameInfo* DebuggerInspectableFrame(JavaScriptFrame* frame,
int jsframe_index,
Isolate* isolate);
- static void DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info,
- Isolate* isolate);
// Makes sure that there is enough room in the relocation
// information of a code object to perform lazy deoptimization
@@ -718,6 +657,11 @@ class RegisterValues {
return registers_[n];
}
+ float GetFloatRegister(unsigned n) const {
+ DCHECK(n < arraysize(float_registers_));
+ return float_registers_[n];
+ }
+
double GetDoubleRegister(unsigned n) const {
DCHECK(n < arraysize(double_registers_));
return double_registers_[n];
@@ -728,12 +672,18 @@ class RegisterValues {
registers_[n] = value;
}
+ void SetFloatRegister(unsigned n, float value) {
+ DCHECK(n < arraysize(float_registers_));
+ float_registers_[n] = value;
+ }
+
void SetDoubleRegister(unsigned n, double value) {
DCHECK(n < arraysize(double_registers_));
double_registers_[n] = value;
}
intptr_t registers_[Register::kNumRegisters];
+ float float_registers_[FloatRegister::kMaxNumRegisters];
double double_registers_[DoubleRegister::kMaxNumRegisters];
};
@@ -957,11 +907,13 @@ class TranslationIterator BASE_EMBEDDED {
V(INT32_REGISTER) \
V(UINT32_REGISTER) \
V(BOOL_REGISTER) \
+ V(FLOAT_REGISTER) \
V(DOUBLE_REGISTER) \
V(STACK_SLOT) \
V(INT32_STACK_SLOT) \
V(UINT32_STACK_SLOT) \
V(BOOL_STACK_SLOT) \
+ V(FLOAT_STACK_SLOT) \
V(DOUBLE_STACK_SLOT) \
V(LITERAL)
@@ -1003,11 +955,13 @@ class Translation BASE_EMBEDDED {
void StoreInt32Register(Register reg);
void StoreUint32Register(Register reg);
void StoreBoolRegister(Register reg);
+ void StoreFloatRegister(FloatRegister reg);
void StoreDoubleRegister(DoubleRegister reg);
void StoreStackSlot(int index);
void StoreInt32StackSlot(int index);
void StoreUint32StackSlot(int index);
void StoreBoolStackSlot(int index);
+ void StoreFloatStackSlot(int index);
void StoreDoubleStackSlot(int index);
void StoreLiteral(int literal_id);
void StoreArgumentsObject(bool args_known, int args_index, int args_length);
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index ed9ca9ac66..1da917167f 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -4,11 +4,14 @@
#include "src/disassembler.h"
+#include <memory>
+
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/disasm.h"
+#include "src/ic/ic.h"
#include "src/macro-assembler.h"
#include "src/snapshot/serializer-common.h"
#include "src/string-stream.h"
@@ -36,7 +39,7 @@ const char* V8NameConverter::NameOfAddress(byte* pc) const {
code_ == NULL ? NULL : code_->GetIsolate()->builtins()->Lookup(pc);
if (name != NULL) {
- SNPrintF(v8_buffer_, "%s (%p)", name, pc);
+ SNPrintF(v8_buffer_, "%s (%p)", name, static_cast<void*>(pc));
return v8_buffer_.start();
}
@@ -44,7 +47,7 @@ const char* V8NameConverter::NameOfAddress(byte* pc) const {
int offs = static_cast<int>(pc - code_->instruction_start());
// print as code offset, if it seems reasonable
if (0 <= offs && offs < code_->instruction_size()) {
- SNPrintF(v8_buffer_, "%d (%p)", offs, pc);
+ SNPrintF(v8_buffer_, "%d (%p)", offs, static_cast<void*>(pc));
return v8_buffer_.start();
}
}
@@ -109,10 +112,9 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
it->rinfo()->rmode() == RelocInfo::INTERNAL_REFERENCE) {
// raw pointer embedded in code stream, e.g., jump table
byte* ptr = *reinterpret_cast<byte**>(pc);
- SNPrintF(decode_buffer,
- "%08" V8PRIxPTR " jump table entry %4" V8PRIdPTR,
- reinterpret_cast<intptr_t>(ptr),
- ptr - begin);
+ SNPrintF(
+ decode_buffer, "%08" V8PRIxPTR " jump table entry %4" PRIuS,
+ reinterpret_cast<intptr_t>(ptr), static_cast<size_t>(ptr - begin));
pc += sizeof(ptr);
} else {
decode_buffer[0] = '\0';
@@ -147,7 +149,8 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
}
// Instruction address and instruction offset.
- out.AddFormatted("%p %4d ", prev_pc, prev_pc - begin);
+ out.AddFormatted("%p %4" V8PRIdPTRDIFF " ", static_cast<void*>(prev_pc),
+ prev_pc - begin);
// Instruction.
out.AddFormatted("%s", decode_buffer.start());
@@ -169,22 +172,22 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
}
RelocInfo::Mode rmode = relocinfo.rmode();
- if (RelocInfo::IsPosition(rmode)) {
- if (RelocInfo::IsStatementPosition(rmode)) {
- out.AddFormatted(" ;; debug: statement %d", relocinfo.data());
- } else {
- out.AddFormatted(" ;; debug: position %d", relocinfo.data());
- }
+ if (rmode == RelocInfo::DEOPT_POSITION) {
+ out.AddFormatted(" ;; debug: deopt position '%d'",
+ static_cast<int>(relocinfo.data()));
} else if (rmode == RelocInfo::DEOPT_REASON) {
- Deoptimizer::DeoptReason reason =
- static_cast<Deoptimizer::DeoptReason>(relocinfo.data());
+ DeoptimizeReason reason =
+ static_cast<DeoptimizeReason>(relocinfo.data());
out.AddFormatted(" ;; debug: deopt reason '%s'",
- Deoptimizer::GetDeoptReason(reason));
+ DeoptimizeReasonToString(reason));
+ } else if (rmode == RelocInfo::DEOPT_ID) {
+ out.AddFormatted(" ;; debug: deopt index %d",
+ static_cast<int>(relocinfo.data()));
} else if (rmode == RelocInfo::EMBEDDED_OBJECT) {
HeapStringAllocator allocator;
StringStream accumulator(&allocator);
relocinfo.target_object()->ShortPrint(&accumulator);
- base::SmartArrayPointer<const char> obj_name = accumulator.ToCString();
+ std::unique_ptr<char[]> obj_name = accumulator.ToCString();
out.AddFormatted(" ;; object: %s", obj_name.get());
} else if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
const char* reference_name = ref_encoder.NameOfAddress(
@@ -195,17 +198,15 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
Code* code = Code::GetCodeFromTargetAddress(relocinfo.target_address());
Code::Kind kind = code->kind();
if (code->is_inline_cache_stub()) {
- if (kind == Code::LOAD_IC &&
- LoadICState::GetTypeofMode(code->extra_ic_state()) ==
- NOT_INSIDE_TYPEOF) {
- out.AddFormatted(" contextual,");
+ if (kind == Code::LOAD_GLOBAL_IC &&
+ LoadGlobalICState::GetTypeofMode(code->extra_ic_state()) ==
+ INSIDE_TYPEOF) {
+ out.AddFormatted(" inside typeof,");
}
- InlineCacheState ic_state = code->ic_state();
- out.AddFormatted(" %s, %s", Code::Kind2String(kind),
- Code::ICState2String(ic_state));
- if (ic_state == MONOMORPHIC) {
- Code::StubType type = code->type();
- out.AddFormatted(", %s", Code::StubType2String(type));
+ out.AddFormatted(" %s", Code::Kind2String(kind));
+ if (!IC::ICUseVector(kind)) {
+ InlineCacheState ic_state = IC::StateFromCode(code);
+ out.AddFormatted(" %s", Code::ICState2String(ic_state));
}
} else if (kind == Code::STUB || kind == Code::HANDLER) {
// Get the STUB key and extract major and minor key.
diff --git a/deps/v8/src/eh-frame.cc b/deps/v8/src/eh-frame.cc
new file mode 100644
index 0000000000..5f0f1c1b35
--- /dev/null
+++ b/deps/v8/src/eh-frame.cc
@@ -0,0 +1,629 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/eh-frame.h"
+
+#include <iomanip>
+#include <ostream>
+
+#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_ARM) && \
+ !defined(V8_TARGET_ARCH_ARM64)
+
+// Placeholders for unsupported architectures.
+
+namespace v8 {
+namespace internal {
+
+const int EhFrameConstants::kCodeAlignmentFactor = 1;
+const int EhFrameConstants::kDataAlignmentFactor = 1;
+
+void EhFrameWriter::WriteReturnAddressRegisterCode() { UNIMPLEMENTED(); }
+
+void EhFrameWriter::WriteInitialStateInCie() { UNIMPLEMENTED(); }
+
+int EhFrameWriter::RegisterToDwarfCode(Register) {
+ UNIMPLEMENTED();
+ return -1;
+}
+
+#ifdef ENABLE_DISASSEMBLER
+
+const char* EhFrameDisassembler::DwarfRegisterCodeToString(int) {
+ UNIMPLEMENTED();
+ return nullptr;
+}
+
+#endif
+
+} // namespace internal
+} // namespace v8
+
+#endif
+
+namespace v8 {
+namespace internal {
+
+STATIC_CONST_MEMBER_DEFINITION const int
+ EhFrameConstants::kEhFrameTerminatorSize;
+STATIC_CONST_MEMBER_DEFINITION const int EhFrameConstants::kEhFrameHdrVersion;
+STATIC_CONST_MEMBER_DEFINITION const int EhFrameConstants::kEhFrameHdrSize;
+
+STATIC_CONST_MEMBER_DEFINITION const uint32_t EhFrameWriter::kInt32Placeholder;
+
+// static
+void EhFrameWriter::WriteEmptyEhFrame(std::ostream& stream) { // NOLINT
+ stream.put(EhFrameConstants::kEhFrameHdrVersion);
+
+ // .eh_frame pointer encoding specifier.
+ stream.put(EhFrameConstants::kSData4 | EhFrameConstants::kPcRel);
+
+ // Lookup table size encoding.
+ stream.put(EhFrameConstants::kUData4);
+
+ // Lookup table entries encoding.
+ stream.put(EhFrameConstants::kSData4 | EhFrameConstants::kDataRel);
+
+ // Dummy pointers and 0 entries in the lookup table.
+ char dummy_data[EhFrameConstants::kEhFrameHdrSize - 4] = {0};
+ stream.write(&dummy_data[0], sizeof(dummy_data));
+}
+
+EhFrameWriter::EhFrameWriter(Zone* zone)
+ : cie_size_(0),
+ last_pc_offset_(0),
+ writer_state_(InternalState::kUndefined),
+ base_register_(no_reg),
+ base_offset_(0),
+ eh_frame_buffer_(zone) {}
+
+void EhFrameWriter::Initialize() {
+ DCHECK(writer_state_ == InternalState::kUndefined);
+ eh_frame_buffer_.reserve(128);
+ writer_state_ = InternalState::kInitialized;
+ WriteCie();
+ WriteFdeHeader();
+}
+
+void EhFrameWriter::WriteCie() {
+ static const int kCIEIdentifier = 0;
+ static const int kCIEVersion = 3;
+ static const int kAugmentationDataSize = 2;
+ static const byte kAugmentationString[] = {'z', 'L', 'R', 0};
+
+ // Placeholder for the size of the CIE.
+ int size_offset = eh_frame_offset();
+ WriteInt32(kInt32Placeholder);
+
+ // CIE identifier and version.
+ int record_start_offset = eh_frame_offset();
+ WriteInt32(kCIEIdentifier);
+ WriteByte(kCIEVersion);
+
+ // Augmentation data contents descriptor: LSDA and FDE encoding.
+ WriteBytes(&kAugmentationString[0], sizeof(kAugmentationString));
+
+ // Alignment factors.
+ WriteSLeb128(EhFrameConstants::kCodeAlignmentFactor);
+ WriteSLeb128(EhFrameConstants::kDataAlignmentFactor);
+
+ WriteReturnAddressRegisterCode();
+
+ // Augmentation data.
+ WriteULeb128(kAugmentationDataSize);
+ // No language-specific data area (LSDA).
+ WriteByte(EhFrameConstants::kOmit);
+ // FDE pointers encoding.
+ WriteByte(EhFrameConstants::kSData4 | EhFrameConstants::kPcRel);
+
+ // Write directives to build the initial state of the unwinding table.
+ DCHECK_EQ(eh_frame_offset() - size_offset,
+ EhFrameConstants::kInitialStateOffsetInCie);
+ WriteInitialStateInCie();
+
+ WritePaddingToAlignedSize(eh_frame_offset() - record_start_offset);
+
+ int record_end_offset = eh_frame_offset();
+ int encoded_cie_size = record_end_offset - record_start_offset;
+ cie_size_ = record_end_offset - size_offset;
+
+ // Patch the size of the CIE now that we know it.
+ PatchInt32(size_offset, encoded_cie_size);
+}
+
+void EhFrameWriter::WriteFdeHeader() {
+ DCHECK_NE(cie_size_, 0);
+
+ // Placeholder for size of the FDE. Will be filled in Finish().
+ DCHECK_EQ(eh_frame_offset(), fde_offset());
+ WriteInt32(kInt32Placeholder);
+
+ // Backwards offset to the CIE.
+ WriteInt32(cie_size_ + kInt32Size);
+
+ // Placeholder for pointer to procedure. Will be filled in Finish().
+ DCHECK_EQ(eh_frame_offset(), GetProcedureAddressOffset());
+ WriteInt32(kInt32Placeholder);
+
+ // Placeholder for size of the procedure. Will be filled in Finish().
+ DCHECK_EQ(eh_frame_offset(), GetProcedureSizeOffset());
+ WriteInt32(kInt32Placeholder);
+
+ // No augmentation data.
+ WriteByte(0);
+}
+
+void EhFrameWriter::WriteEhFrameHdr(int code_size) {
+ DCHECK(writer_state_ == InternalState::kInitialized);
+
+ //
+ // In order to calculate offsets in the .eh_frame_hdr, we must know the layout
+ // of the DSO generated by perf inject, which is assumed to be the following:
+ //
+ // | ... | |
+ // +---------------+ <-- (F) --- | Larger offsets in file
+ // | | ^ |
+ // | Instructions | | .text v
+ // | | v
+ // +---------------+ <-- (E) ---
+ // |///////////////|
+ // |////Padding////|
+ // |///////////////|
+ // +---------------+ <-- (D) ---
+ // | | ^
+ // | CIE | |
+ // | | |
+ // +---------------+ <-- (C) |
+ // | | | .eh_frame
+ // | FDE | |
+ // | | |
+ // +---------------+ |
+ // | terminator | v
+ // +---------------+ <-- (B) ---
+ // | version | ^
+ // +---------------+ |
+ // | encoding | |
+ // | specifiers | |
+ // +---------------+ <---(A) | .eh_frame_hdr
+ // | offset to | |
+ // | .eh_frame | |
+ // +---------------+ |
+ // | ... | ...
+ //
+ // (F) is aligned to a 16-byte boundary.
+ // (D) is aligned to a 8-byte boundary.
+ // (B) is aligned to a 4-byte boundary.
+ // (C), (E) and (A) have no alignment requirements.
+ //
+ // The distance between (A) and (B) is 4 bytes.
+ //
+ // The size of the FDE is required to be a multiple of the pointer size, which
+ // means that (B) will be naturally aligned to a 4-byte boundary on all the
+ // architectures we support.
+ //
+ // Because (E) has no alignment requirements, there is padding between (E) and
+ // (D). (F) is aligned at a 16-byte boundary, thus to a 8-byte one as well.
+ //
+
+ int eh_frame_size = eh_frame_offset();
+
+ WriteByte(EhFrameConstants::kEhFrameHdrVersion);
+
+ // .eh_frame pointer encoding specifier.
+ WriteByte(EhFrameConstants::kSData4 | EhFrameConstants::kPcRel);
+ // Lookup table size encoding specifier.
+ WriteByte(EhFrameConstants::kUData4);
+ // Lookup table entries encoding specifier.
+ WriteByte(EhFrameConstants::kSData4 | EhFrameConstants::kDataRel);
+
+ // Pointer to .eh_frame, relative to this offset (A -> D in the diagram).
+ WriteInt32(-(eh_frame_size + EhFrameConstants::kFdeVersionSize +
+ EhFrameConstants::kFdeEncodingSpecifiersSize));
+
+ // Number of entries in the LUT, one for the only routine.
+ WriteInt32(1);
+
+ // Pointer to the start of the routine, relative to the beginning of the
+ // .eh_frame_hdr (B -> F in the diagram).
+ WriteInt32(-(RoundUp(code_size, 8) + eh_frame_size));
+
+ // Pointer to the start of the associated FDE, relative to the start of the
+ // .eh_frame_hdr (B -> C in the diagram).
+ WriteInt32(-(eh_frame_size - cie_size_));
+
+ DCHECK_EQ(eh_frame_offset() - eh_frame_size,
+ EhFrameConstants::kEhFrameHdrSize);
+}
+
+void EhFrameWriter::WritePaddingToAlignedSize(int unpadded_size) {
+ DCHECK(writer_state_ == InternalState::kInitialized);
+ DCHECK_GE(unpadded_size, 0);
+
+ int padding_size = RoundUp(unpadded_size, kPointerSize) - unpadded_size;
+
+ byte nop = static_cast<byte>(EhFrameConstants::DwarfOpcodes::kNop);
+ static const byte kPadding[] = {nop, nop, nop, nop, nop, nop, nop, nop};
+ DCHECK_LE(padding_size, static_cast<int>(sizeof(kPadding)));
+ WriteBytes(&kPadding[0], padding_size);
+}
+
+void EhFrameWriter::AdvanceLocation(int pc_offset) {
+ DCHECK(writer_state_ == InternalState::kInitialized);
+ DCHECK_GE(pc_offset, last_pc_offset_);
+ uint32_t delta = pc_offset - last_pc_offset_;
+
+ DCHECK_EQ(delta % EhFrameConstants::kCodeAlignmentFactor, 0);
+ uint32_t factored_delta = delta / EhFrameConstants::kCodeAlignmentFactor;
+
+ if (factored_delta <= EhFrameConstants::kLocationMask) {
+ WriteByte((EhFrameConstants::kLocationTag
+ << EhFrameConstants::kLocationMaskSize) |
+ (factored_delta & EhFrameConstants::kLocationMask));
+ } else if (factored_delta <= kMaxUInt8) {
+ WriteOpcode(EhFrameConstants::DwarfOpcodes::kAdvanceLoc1);
+ WriteByte(factored_delta);
+ } else if (factored_delta <= kMaxUInt16) {
+ WriteOpcode(EhFrameConstants::DwarfOpcodes::kAdvanceLoc2);
+ WriteInt16(factored_delta);
+ } else {
+ WriteOpcode(EhFrameConstants::DwarfOpcodes::kAdvanceLoc4);
+ WriteInt32(factored_delta);
+ }
+
+ last_pc_offset_ = pc_offset;
+}
+
+void EhFrameWriter::SetBaseAddressOffset(int base_offset) {
+ DCHECK(writer_state_ == InternalState::kInitialized);
+ DCHECK_GE(base_offset, 0);
+ WriteOpcode(EhFrameConstants::DwarfOpcodes::kDefCfaOffset);
+ WriteULeb128(base_offset);
+ base_offset_ = base_offset;
+}
+
+void EhFrameWriter::SetBaseAddressRegister(Register base_register) {
+ DCHECK(writer_state_ == InternalState::kInitialized);
+ int code = RegisterToDwarfCode(base_register);
+ WriteOpcode(EhFrameConstants::DwarfOpcodes::kDefCfaRegister);
+ WriteULeb128(code);
+ base_register_ = base_register;
+}
+
+void EhFrameWriter::SetBaseAddressRegisterAndOffset(Register base_register,
+ int base_offset) {
+ DCHECK(writer_state_ == InternalState::kInitialized);
+ DCHECK_GE(base_offset, 0);
+ int code = RegisterToDwarfCode(base_register);
+ WriteOpcode(EhFrameConstants::DwarfOpcodes::kDefCfa);
+ WriteULeb128(code);
+ WriteULeb128(base_offset);
+ base_offset_ = base_offset;
+ base_register_ = base_register;
+}
+
+void EhFrameWriter::RecordRegisterSavedToStack(int register_code, int offset) {
+ DCHECK(writer_state_ == InternalState::kInitialized);
+ DCHECK_EQ(offset % EhFrameConstants::kDataAlignmentFactor, 0);
+ int factored_offset = offset / EhFrameConstants::kDataAlignmentFactor;
+ if (factored_offset >= 0) {
+ DCHECK_LE(register_code, EhFrameConstants::kSavedRegisterMask);
+ WriteByte((EhFrameConstants::kSavedRegisterTag
+ << EhFrameConstants::kSavedRegisterMaskSize) |
+ (register_code & EhFrameConstants::kSavedRegisterMask));
+ WriteULeb128(factored_offset);
+ } else {
+ WriteOpcode(EhFrameConstants::DwarfOpcodes::kOffsetExtendedSf);
+ WriteULeb128(register_code);
+ WriteSLeb128(factored_offset);
+ }
+}
+
+void EhFrameWriter::RecordRegisterNotModified(Register name) {
+ DCHECK(writer_state_ == InternalState::kInitialized);
+ WriteOpcode(EhFrameConstants::DwarfOpcodes::kSameValue);
+ WriteULeb128(RegisterToDwarfCode(name));
+}
+
+void EhFrameWriter::RecordRegisterFollowsInitialRule(Register name) {
+ DCHECK(writer_state_ == InternalState::kInitialized);
+ int code = RegisterToDwarfCode(name);
+ DCHECK_LE(code, EhFrameConstants::kFollowInitialRuleMask);
+ WriteByte((EhFrameConstants::kFollowInitialRuleTag
+ << EhFrameConstants::kFollowInitialRuleMaskSize) |
+ (code & EhFrameConstants::kFollowInitialRuleMask));
+}
+
+void EhFrameWriter::Finish(int code_size) {
+ DCHECK(writer_state_ == InternalState::kInitialized);
+ DCHECK_GE(eh_frame_offset(), cie_size_);
+
+ DCHECK_GE(eh_frame_offset(), fde_offset() + kInt32Size);
+ WritePaddingToAlignedSize(eh_frame_offset() - fde_offset() - kInt32Size);
+
+ // Write the size of the FDE now that we know it.
+ // The encoded size does not include the size field itself.
+ int encoded_fde_size = eh_frame_offset() - fde_offset() - kInt32Size;
+ PatchInt32(fde_offset(), encoded_fde_size);
+
+ // Write size and offset to procedure.
+ PatchInt32(GetProcedureAddressOffset(),
+ -(RoundUp(code_size, 8) + GetProcedureAddressOffset()));
+ PatchInt32(GetProcedureSizeOffset(), code_size);
+
+ // Terminate the .eh_frame.
+ static const byte kTerminator[EhFrameConstants::kEhFrameTerminatorSize] = {0};
+ WriteBytes(&kTerminator[0], EhFrameConstants::kEhFrameTerminatorSize);
+
+ WriteEhFrameHdr(code_size);
+
+ writer_state_ = InternalState::kFinalized;
+}
+
+void EhFrameWriter::GetEhFrame(CodeDesc* desc) {
+ DCHECK(writer_state_ == InternalState::kFinalized);
+ desc->unwinding_info_size = static_cast<int>(eh_frame_buffer_.size());
+ desc->unwinding_info = eh_frame_buffer_.data();
+}
+
+void EhFrameWriter::WriteULeb128(uint32_t value) {
+ do {
+ byte chunk = value & 0x7f;
+ value >>= 7;
+ if (value != 0) chunk |= 0x80;
+ WriteByte(chunk);
+ } while (value != 0);
+}
+
+void EhFrameWriter::WriteSLeb128(int32_t value) {
+ static const int kSignBitMask = 0x40;
+ bool done;
+ do {
+ byte chunk = value & 0x7f;
+ value >>= 7;
+ done = ((value == 0) && ((chunk & kSignBitMask) == 0)) ||
+ ((value == -1) && ((chunk & kSignBitMask) != 0));
+ if (!done) chunk |= 0x80;
+ WriteByte(chunk);
+ } while (!done);
+}
+
+uint32_t EhFrameIterator::GetNextULeb128() {
+ int size = 0;
+ uint32_t result = DecodeULeb128(next_, &size);
+ DCHECK_LE(next_ + size, end_);
+ next_ += size;
+ return result;
+}
+
+int32_t EhFrameIterator::GetNextSLeb128() {
+ int size = 0;
+ int32_t result = DecodeSLeb128(next_, &size);
+ DCHECK_LE(next_ + size, end_);
+ next_ += size;
+ return result;
+}
+
+// static
+uint32_t EhFrameIterator::DecodeULeb128(const byte* encoded,
+ int* encoded_size) {
+ const byte* current = encoded;
+ uint32_t result = 0;
+ int shift = 0;
+
+ do {
+ DCHECK_LT(shift, 8 * static_cast<int>(sizeof(result)));
+ result |= (*current & 0x7f) << shift;
+ shift += 7;
+ } while (*current++ >= 128);
+
+ DCHECK_NOT_NULL(encoded_size);
+ *encoded_size = static_cast<int>(current - encoded);
+
+ return result;
+}
+
+// static
+int32_t EhFrameIterator::DecodeSLeb128(const byte* encoded, int* encoded_size) {
+ static const byte kSignBitMask = 0x40;
+
+ const byte* current = encoded;
+ int32_t result = 0;
+ int shift = 0;
+ byte chunk;
+
+ do {
+ chunk = *current++;
+ DCHECK_LT(shift, 8 * static_cast<int>(sizeof(result)));
+ result |= (chunk & 0x7f) << shift;
+ shift += 7;
+ } while (chunk >= 128);
+
+ // Sign extend the result if the last chunk has the sign bit set.
+ if (chunk & kSignBitMask) result |= (~0ull) << shift;
+
+ DCHECK_NOT_NULL(encoded_size);
+ *encoded_size = static_cast<int>(current - encoded);
+
+ return result;
+}
+
+#ifdef ENABLE_DISASSEMBLER
+
+namespace {
+
+class StreamModifiersScope final {
+ public:
+ explicit StreamModifiersScope(std::ostream* stream)
+ : stream_(stream), flags_(stream->flags()) {}
+ ~StreamModifiersScope() { stream_->flags(flags_); }
+
+ private:
+ std::ostream* stream_;
+ std::ios::fmtflags flags_;
+};
+
+} // namespace
+
+// static
+void EhFrameDisassembler::DumpDwarfDirectives(std::ostream& stream, // NOLINT
+ const byte* start,
+ const byte* end) {
+ StreamModifiersScope modifiers_scope(&stream);
+
+ EhFrameIterator eh_frame_iterator(start, end);
+ uint32_t offset_in_procedure = 0;
+
+ while (!eh_frame_iterator.Done()) {
+ stream << eh_frame_iterator.current_address() << " ";
+
+ byte bytecode = eh_frame_iterator.GetNextByte();
+
+ if (((bytecode >> EhFrameConstants::kLocationMaskSize) & 0xff) ==
+ EhFrameConstants::kLocationTag) {
+ int value = (bytecode & EhFrameConstants::kLocationMask) *
+ EhFrameConstants::kCodeAlignmentFactor;
+ offset_in_procedure += value;
+ stream << "| pc_offset=" << offset_in_procedure << " (delta=" << value
+ << ")\n";
+ continue;
+ }
+
+ if (((bytecode >> EhFrameConstants::kSavedRegisterMaskSize) & 0xff) ==
+ EhFrameConstants::kSavedRegisterTag) {
+ int32_t decoded_offset = eh_frame_iterator.GetNextULeb128();
+ stream << "| " << DwarfRegisterCodeToString(
+ bytecode & EhFrameConstants::kLocationMask)
+ << " saved at base" << std::showpos
+ << decoded_offset * EhFrameConstants::kDataAlignmentFactor
+ << std::noshowpos << '\n';
+ continue;
+ }
+
+ if (((bytecode >> EhFrameConstants::kFollowInitialRuleMaskSize) & 0xff) ==
+ EhFrameConstants::kFollowInitialRuleTag) {
+ stream << "| " << DwarfRegisterCodeToString(
+ bytecode & EhFrameConstants::kLocationMask)
+ << " follows rule in CIE\n";
+ continue;
+ }
+
+ switch (static_cast<EhFrameConstants::DwarfOpcodes>(bytecode)) {
+ case EhFrameConstants::DwarfOpcodes::kOffsetExtendedSf: {
+ stream << "| "
+ << DwarfRegisterCodeToString(eh_frame_iterator.GetNextULeb128());
+ int32_t decoded_offset = eh_frame_iterator.GetNextSLeb128();
+ stream << " saved at base" << std::showpos
+ << decoded_offset * EhFrameConstants::kDataAlignmentFactor
+ << std::noshowpos << '\n';
+ break;
+ }
+ case EhFrameConstants::DwarfOpcodes::kAdvanceLoc1: {
+ int value = eh_frame_iterator.GetNextByte() *
+ EhFrameConstants::kCodeAlignmentFactor;
+ offset_in_procedure += value;
+ stream << "| pc_offset=" << offset_in_procedure << " (delta=" << value
+ << ")\n";
+ break;
+ }
+ case EhFrameConstants::DwarfOpcodes::kAdvanceLoc2: {
+ int value = eh_frame_iterator.GetNextUInt16() *
+ EhFrameConstants::kCodeAlignmentFactor;
+ offset_in_procedure += value;
+ stream << "| pc_offset=" << offset_in_procedure << " (delta=" << value
+ << ")\n";
+ break;
+ }
+ case EhFrameConstants::DwarfOpcodes::kAdvanceLoc4: {
+ int value = eh_frame_iterator.GetNextUInt32() *
+ EhFrameConstants::kCodeAlignmentFactor;
+ offset_in_procedure += value;
+ stream << "| pc_offset=" << offset_in_procedure << " (delta=" << value
+ << ")\n";
+ break;
+ }
+ case EhFrameConstants::DwarfOpcodes::kDefCfa: {
+ uint32_t base_register = eh_frame_iterator.GetNextULeb128();
+ uint32_t base_offset = eh_frame_iterator.GetNextULeb128();
+ stream << "| base_register=" << DwarfRegisterCodeToString(base_register)
+ << ", base_offset=" << base_offset << '\n';
+ break;
+ }
+ case EhFrameConstants::DwarfOpcodes::kDefCfaOffset: {
+ stream << "| base_offset=" << eh_frame_iterator.GetNextULeb128()
+ << '\n';
+ break;
+ }
+ case EhFrameConstants::DwarfOpcodes::kDefCfaRegister: {
+ stream << "| base_register="
+ << DwarfRegisterCodeToString(eh_frame_iterator.GetNextULeb128())
+ << '\n';
+ break;
+ }
+ case EhFrameConstants::DwarfOpcodes::kSameValue: {
+ stream << "| "
+ << DwarfRegisterCodeToString(eh_frame_iterator.GetNextULeb128())
+ << " not modified from previous frame\n";
+ break;
+ }
+ case EhFrameConstants::DwarfOpcodes::kNop:
+ stream << "| nop\n";
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ }
+}
+
+void EhFrameDisassembler::DisassembleToStream(std::ostream& stream) { // NOLINT
+ // The encoded CIE size does not include the size field itself.
+ const int cie_size = ReadUnalignedUInt32(start_) + kInt32Size;
+ const int fde_offset = cie_size;
+
+ const byte* cie_directives_start =
+ start_ + EhFrameConstants::kInitialStateOffsetInCie;
+ const byte* cie_directives_end = start_ + cie_size;
+ DCHECK_LE(cie_directives_start, cie_directives_end);
+
+ stream << reinterpret_cast<const void*>(start_) << " .eh_frame: CIE\n";
+ DumpDwarfDirectives(stream, cie_directives_start, cie_directives_end);
+
+ const byte* procedure_offset_address =
+ start_ + fde_offset + EhFrameConstants::kProcedureAddressOffsetInFde;
+ int32_t procedure_offset =
+ ReadUnalignedValue<int32_t>(procedure_offset_address);
+
+ const byte* procedure_size_address =
+ start_ + fde_offset + EhFrameConstants::kProcedureSizeOffsetInFde;
+ uint32_t procedure_size = ReadUnalignedUInt32(procedure_size_address);
+
+ const byte* fde_start = start_ + fde_offset;
+ stream << reinterpret_cast<const void*>(fde_start) << " .eh_frame: FDE\n"
+ << reinterpret_cast<const void*>(procedure_offset_address)
+ << " | procedure_offset=" << procedure_offset << '\n'
+ << reinterpret_cast<const void*>(procedure_size_address)
+ << " | procedure_size=" << procedure_size << '\n';
+
+ const int fde_directives_offset = fde_offset + 4 * kInt32Size + 1;
+
+ const byte* fde_directives_start = start_ + fde_directives_offset;
+ const byte* fde_directives_end = end_ - EhFrameConstants::kEhFrameHdrSize -
+ EhFrameConstants::kEhFrameTerminatorSize;
+ DCHECK_LE(fde_directives_start, fde_directives_end);
+
+ DumpDwarfDirectives(stream, fde_directives_start, fde_directives_end);
+
+ const byte* fde_terminator_start = fde_directives_end;
+ stream << reinterpret_cast<const void*>(fde_terminator_start)
+ << " .eh_frame: terminator\n";
+
+ const byte* eh_frame_hdr_start =
+ fde_terminator_start + EhFrameConstants::kEhFrameTerminatorSize;
+ stream << reinterpret_cast<const void*>(eh_frame_hdr_start)
+ << " .eh_frame_hdr\n";
+}
+
+#endif
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/eh-frame.h b/deps/v8/src/eh-frame.h
new file mode 100644
index 0000000000..6e703d429a
--- /dev/null
+++ b/deps/v8/src/eh-frame.h
@@ -0,0 +1,297 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EH_FRAME_H_
+#define V8_EH_FRAME_H_
+
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+class EhFrameConstants final : public AllStatic {
+ public:
+ enum class DwarfOpcodes : byte {
+ kNop = 0x00,
+ kAdvanceLoc1 = 0x02,
+ kAdvanceLoc2 = 0x03,
+ kAdvanceLoc4 = 0x04,
+ kSameValue = 0x08,
+ kDefCfa = 0x0c,
+ kDefCfaRegister = 0x0d,
+ kDefCfaOffset = 0x0e,
+ kOffsetExtendedSf = 0x11,
+ };
+
+ enum DwarfEncodingSpecifiers : byte {
+ kUData4 = 0x03,
+ kSData4 = 0x0b,
+ kPcRel = 0x10,
+ kDataRel = 0x30,
+ kOmit = 0xff,
+ };
+
+ static const int kLocationTag = 1;
+ static const int kLocationMask = 0x3f;
+ static const int kLocationMaskSize = 6;
+
+ static const int kSavedRegisterTag = 2;
+ static const int kSavedRegisterMask = 0x3f;
+ static const int kSavedRegisterMaskSize = 6;
+
+ static const int kFollowInitialRuleTag = 3;
+ static const int kFollowInitialRuleMask = 0x3f;
+ static const int kFollowInitialRuleMaskSize = 6;
+
+ static const int kProcedureAddressOffsetInFde = 2 * kInt32Size;
+ static const int kProcedureSizeOffsetInFde = 3 * kInt32Size;
+
+ static const int kInitialStateOffsetInCie = 19;
+ static const int kEhFrameTerminatorSize = 4;
+
+ // Defined in eh-writer-<arch>.cc
+ static const int kCodeAlignmentFactor;
+ static const int kDataAlignmentFactor;
+
+ static const int kFdeVersionSize = 1;
+ static const int kFdeEncodingSpecifiersSize = 3;
+
+ static const int kEhFrameHdrVersion = 1;
+ static const int kEhFrameHdrSize = 20;
+};
+
+class EhFrameWriter {
+ public:
+ explicit EhFrameWriter(Zone* zone);
+
+ // The empty frame is a hack to trigger fp-based unwinding in Linux perf
+ // compiled with libunwind support when processing DWARF-based call graphs.
+ //
+ // It is effectively a valid eh_frame_hdr with an empty look up table.
+ //
+ static void WriteEmptyEhFrame(std::ostream& stream); // NOLINT
+
+ // Write the CIE and FDE header. Call it before any other method.
+ void Initialize();
+
+ void AdvanceLocation(int pc_offset);
+
+ // The <base_address> is the one to which all <offset>s in SaveRegisterToStack
+ // directives are relative. It is given by <base_register> + <base_offset>.
+ //
+ // The <base_offset> must be positive or 0.
+ //
+ void SetBaseAddressRegister(Register base_register);
+ void SetBaseAddressOffset(int base_offset);
+ void IncreaseBaseAddressOffset(int base_delta) {
+ SetBaseAddressOffset(base_offset_ + base_delta);
+ }
+ void SetBaseAddressRegisterAndOffset(Register base_register, int base_offset);
+
+ // Register saved at location <base_address> + <offset>.
+ // The <offset> must be a multiple of EhFrameConstants::kDataAlignment.
+ void RecordRegisterSavedToStack(Register name, int offset) {
+ RecordRegisterSavedToStack(RegisterToDwarfCode(name), offset);
+ }
+
+ // The register has not been modified from the previous frame.
+ void RecordRegisterNotModified(Register name);
+
+ // The register follows the rule defined in the CIE.
+ void RecordRegisterFollowsInitialRule(Register name);
+
+ void Finish(int code_size);
+
+ // Remember to call Finish() before GetEhFrame().
+ //
+ // The EhFrameWriter instance owns the buffer pointed by
+ // CodeDesc::unwinding_info, and must outlive any use of the CodeDesc.
+ //
+ void GetEhFrame(CodeDesc* desc);
+
+ int last_pc_offset() const { return last_pc_offset_; }
+ Register base_register() const { return base_register_; }
+ int base_offset() const { return base_offset_; }
+
+ private:
+ enum class InternalState { kUndefined, kInitialized, kFinalized };
+
+ static const uint32_t kInt32Placeholder = 0xdeadc0de;
+
+ void WriteSLeb128(int32_t value);
+ void WriteULeb128(uint32_t value);
+
+ void WriteByte(byte value) { eh_frame_buffer_.push_back(value); }
+ void WriteOpcode(EhFrameConstants::DwarfOpcodes opcode) {
+ WriteByte(static_cast<byte>(opcode));
+ }
+ void WriteBytes(const byte* start, int size) {
+ eh_frame_buffer_.insert(eh_frame_buffer_.end(), start, start + size);
+ }
+ void WriteInt16(uint16_t value) {
+ WriteBytes(reinterpret_cast<const byte*>(&value), sizeof(value));
+ }
+ void WriteInt32(uint32_t value) {
+ WriteBytes(reinterpret_cast<const byte*>(&value), sizeof(value));
+ }
+ void PatchInt32(int base_offset, uint32_t value) {
+ DCHECK_EQ(ReadUnalignedUInt32(eh_frame_buffer_.data() + base_offset),
+ kInt32Placeholder);
+ DCHECK_LT(base_offset + kInt32Size, eh_frame_offset());
+ WriteUnalignedUInt32(eh_frame_buffer_.data() + base_offset, value);
+ }
+
+ // Write the common information entry, which includes encoding specifiers,
+ // alignment factors, the return address (pseudo) register code and the
+ // directives to construct the initial state of the unwinding table.
+ void WriteCie();
+
+ // Write the header of the function data entry, containing a pointer to the
+ // correspondent CIE and the position and size of the associated routine.
+ void WriteFdeHeader();
+
+ // Write the contents of the .eh_frame_hdr section, including encoding
+ // specifiers and the routine => FDE lookup table.
+ void WriteEhFrameHdr(int code_size);
+
+ // Write nops until the size reaches a multiple of 8 bytes.
+ void WritePaddingToAlignedSize(int unpadded_size);
+
+ // Internal version that directly accepts a DWARF register code, needed for
+ // handling pseudo-registers on some platforms.
+ void RecordRegisterSavedToStack(int register_code, int offset);
+
+ int GetProcedureAddressOffset() const {
+ return fde_offset() + EhFrameConstants::kProcedureAddressOffsetInFde;
+ }
+
+ int GetProcedureSizeOffset() const {
+ return fde_offset() + EhFrameConstants::kProcedureSizeOffsetInFde;
+ }
+
+ int eh_frame_offset() const {
+ return static_cast<int>(eh_frame_buffer_.size());
+ }
+
+ int fde_offset() const { return cie_size_; }
+
+ // Platform specific functions implemented in eh-frame-<arch>.cc
+
+ static int RegisterToDwarfCode(Register name);
+
+ // Write directives to build the initial state in the CIE.
+ void WriteInitialStateInCie();
+
+ // Write the return address (pseudo) register code.
+ void WriteReturnAddressRegisterCode();
+
+ int cie_size_;
+ int last_pc_offset_;
+ InternalState writer_state_;
+ Register base_register_;
+ int base_offset_;
+ ZoneVector<byte> eh_frame_buffer_;
+
+ DISALLOW_COPY_AND_ASSIGN(EhFrameWriter);
+};
+
+class EhFrameIterator {
+ public:
+ EhFrameIterator(const byte* start, const byte* end)
+ : start_(start), next_(start), end_(end) {
+ DCHECK_LE(start, end);
+ }
+
+ void SkipCie() {
+ DCHECK_EQ(next_, start_);
+ next_ += ReadUnalignedUInt32(next_) + kInt32Size;
+ }
+
+ void SkipToFdeDirectives() {
+ SkipCie();
+ // Skip the FDE header.
+ Skip(kDirectivesOffsetInFde);
+ }
+
+ void Skip(int how_many) {
+ DCHECK_GE(how_many, 0);
+ next_ += how_many;
+ DCHECK_LE(next_, end_);
+ }
+
+ uint32_t GetNextUInt32() { return GetNextValue<uint32_t>(); }
+ uint16_t GetNextUInt16() { return GetNextValue<uint16_t>(); }
+ byte GetNextByte() { return GetNextValue<byte>(); }
+ EhFrameConstants::DwarfOpcodes GetNextOpcode() {
+ return static_cast<EhFrameConstants::DwarfOpcodes>(GetNextByte());
+ }
+
+ uint32_t GetNextULeb128();
+ int32_t GetNextSLeb128();
+
+ bool Done() const {
+ DCHECK_LE(next_, end_);
+ return next_ == end_;
+ }
+
+ int GetCurrentOffset() const {
+ DCHECK_GE(next_, start_);
+ return static_cast<int>(next_ - start_);
+ }
+
+ int GetBufferSize() { return static_cast<int>(end_ - start_); }
+
+ const void* current_address() const {
+ return reinterpret_cast<const void*>(next_);
+ }
+
+ private:
+ static const int kDirectivesOffsetInFde = 4 * kInt32Size + 1;
+
+ static uint32_t DecodeULeb128(const byte* encoded, int* encoded_size);
+ static int32_t DecodeSLeb128(const byte* encoded, int* encoded_size);
+
+ template <typename T>
+ T GetNextValue() {
+ T result;
+ DCHECK_LE(next_ + sizeof(result), end_);
+ result = ReadUnalignedValue<T>(next_);
+ next_ += sizeof(result);
+ return result;
+ }
+
+ const byte* start_;
+ const byte* next_;
+ const byte* end_;
+};
+
+#ifdef ENABLE_DISASSEMBLER
+
+class EhFrameDisassembler final {
+ public:
+ EhFrameDisassembler(const byte* start, const byte* end)
+ : start_(start), end_(end) {
+ DCHECK_LT(start, end);
+ }
+
+ void DisassembleToStream(std::ostream& stream); // NOLINT
+
+ private:
+ static void DumpDwarfDirectives(std::ostream& stream, // NOLINT
+ const byte* start, const byte* end);
+
+ static const char* DwarfRegisterCodeToString(int code);
+
+ const byte* start_;
+ const byte* end_;
+
+ DISALLOW_COPY_AND_ASSIGN(EhFrameDisassembler);
+};
+
+#endif
+
+} // namespace internal
+} // namespace v8
+
+#endif
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 288c60e305..56d800168d 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -140,14 +140,11 @@ void CopyObjectToObjectElements(FixedArrayBase* from_base,
if (copy_size == 0) return;
FixedArray* from = FixedArray::cast(from_base);
FixedArray* to = FixedArray::cast(to_base);
- DCHECK(IsFastSmiOrObjectElementsKind(from_kind) ||
- from_kind == FAST_STRING_WRAPPER_ELEMENTS);
+ DCHECK(IsFastSmiOrObjectElementsKind(from_kind));
DCHECK(IsFastSmiOrObjectElementsKind(to_kind));
WriteBarrierMode write_barrier_mode =
- ((IsFastObjectElementsKind(from_kind) &&
- IsFastObjectElementsKind(to_kind)) ||
- from_kind == FAST_STRING_WRAPPER_ELEMENTS)
+ (IsFastObjectElementsKind(from_kind) && IsFastObjectElementsKind(to_kind))
? UPDATE_WRITE_BARRIER
: SKIP_WRITE_BARRIER;
for (int i = 0; i < copy_size; i++) {
@@ -192,7 +189,7 @@ static void CopyDictionaryToObjectElements(
int entry = from->FindEntry(i + from_start);
if (entry != SeededNumberDictionary::kNotFound) {
Object* value = from->ValueAt(entry);
- DCHECK(!value->IsTheHole());
+ DCHECK(!value->IsTheHole(from->GetIsolate()));
to->set(i + to_start, value, write_barrier_mode);
} else {
to->set_the_hole(i + to_start);
@@ -355,7 +352,7 @@ static void CopyPackedSmiToDoubleElements(FixedArrayBase* from_base,
for (uint32_t from_end = from_start + static_cast<uint32_t>(packed_size);
from_start < from_end; from_start++, to_start++) {
Object* smi = from->get(from_start);
- DCHECK(!smi->IsTheHole());
+ DCHECK(!smi->IsTheHole(from->GetIsolate()));
to->set(to_start, Smi::cast(smi)->value());
}
}
@@ -448,6 +445,69 @@ static void TraceTopFrame(Isolate* isolate) {
JavaScriptFrame::PrintTop(isolate, stdout, false, true);
}
+static void SortIndices(
+ Handle<FixedArray> indices, uint32_t sort_size,
+ WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER) {
+ struct {
+ bool operator()(Object* a, Object* b) {
+ if (a->IsSmi() || !a->IsUndefined(HeapObject::cast(a)->GetIsolate())) {
+ if (!b->IsSmi() && b->IsUndefined(HeapObject::cast(b)->GetIsolate())) {
+ return true;
+ }
+ return a->Number() < b->Number();
+ }
+ return !b->IsSmi() && b->IsUndefined(HeapObject::cast(b)->GetIsolate());
+ }
+ } cmp;
+ Object** start =
+ reinterpret_cast<Object**>(indices->GetFirstElementAddress());
+ std::sort(start, start + sort_size, cmp);
+ if (write_barrier_mode != SKIP_WRITE_BARRIER) {
+ FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(indices->GetIsolate()->heap(), *indices,
+ 0, sort_size);
+ }
+}
+
+static Maybe<bool> IncludesValueSlowPath(Isolate* isolate,
+ Handle<JSObject> receiver,
+ Handle<Object> value,
+ uint32_t start_from, uint32_t length) {
+ bool search_for_hole = value->IsUndefined(isolate);
+ for (uint32_t k = start_from; k < length; ++k) {
+ LookupIterator it(isolate, receiver, k);
+ if (!it.IsFound()) {
+ if (search_for_hole) return Just(true);
+ continue;
+ }
+ Handle<Object> element_k;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, element_k,
+ Object::GetProperty(&it), Nothing<bool>());
+
+ if (value->SameValueZero(*element_k)) return Just(true);
+ }
+
+ return Just(false);
+}
+
+static Maybe<int64_t> IndexOfValueSlowPath(Isolate* isolate,
+ Handle<JSObject> receiver,
+ Handle<Object> value,
+ uint32_t start_from,
+ uint32_t length) {
+ for (uint32_t k = start_from; k < length; ++k) {
+ LookupIterator it(isolate, receiver, k);
+ if (!it.IsFound()) {
+ continue;
+ }
+ Handle<Object> element_k;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, element_k, Object::GetProperty(&it), Nothing<int64_t>());
+
+ if (value->StrictEquals(*element_k)) return Just<int64_t>(k);
+ }
+
+ return Just<int64_t>(-1);
+}
// Base class for element handler implementations. Contains the
// the common logic for objects with different ElementsKinds.
@@ -466,8 +526,7 @@ static void TraceTopFrame(Isolate* isolate) {
// http://en.wikipedia.org/wiki/Curiously_recurring_template_pattern). We use
// CRTP to guarantee aggressive compile time optimizations (i.e. inlining and
// specialization of SomeElementsAccessor methods).
-template <typename ElementsAccessorSubclass,
- typename ElementsTraitsParam>
+template <typename Subclass, typename ElementsTraitsParam>
class ElementsAccessorBase : public ElementsAccessor {
public:
explicit ElementsAccessorBase(const char* name)
@@ -495,12 +554,12 @@ class ElementsAccessorBase : public ElementsAccessor {
} else {
length = fixed_array_base->length();
}
- ElementsAccessorSubclass::ValidateContents(holder, length);
+ Subclass::ValidateContents(holder, length);
}
void Validate(Handle<JSObject> holder) final {
DisallowHeapAllocation no_gc;
- ElementsAccessorSubclass::ValidateImpl(holder);
+ Subclass::ValidateImpl(holder);
}
static bool IsPackedImpl(Handle<JSObject> holder,
@@ -508,8 +567,7 @@ class ElementsAccessorBase : public ElementsAccessor {
uint32_t end) {
if (IsFastPackedElementsKind(kind())) return true;
for (uint32_t i = start; i < end; i++) {
- if (!ElementsAccessorSubclass::HasElementImpl(holder, i, backing_store,
- ALL_PROPERTIES)) {
+ if (!Subclass::HasElementImpl(holder, i, backing_store, ALL_PROPERTIES)) {
return false;
}
}
@@ -520,8 +578,7 @@ class ElementsAccessorBase : public ElementsAccessor {
if (!IsHoleyElementsKind(kind())) return;
int length = Smi::cast(array->length())->value();
Handle<FixedArrayBase> backing_store(array->elements());
- if (!ElementsAccessorSubclass::IsPackedImpl(array, backing_store, 0,
- length)) {
+ if (!Subclass::IsPackedImpl(array, backing_store, 0, length)) {
return;
}
ElementsKind packed_kind = GetPackedElementsKind(kind());
@@ -537,20 +594,18 @@ class ElementsAccessorBase : public ElementsAccessor {
bool HasElement(Handle<JSObject> holder, uint32_t index,
Handle<FixedArrayBase> backing_store,
PropertyFilter filter) final {
- return ElementsAccessorSubclass::HasElementImpl(holder, index,
- backing_store, filter);
+ return Subclass::HasElementImpl(holder, index, backing_store, filter);
}
static bool HasElementImpl(Handle<JSObject> holder, uint32_t index,
Handle<FixedArrayBase> backing_store,
PropertyFilter filter) {
- return ElementsAccessorSubclass::GetEntryForIndexImpl(
- *holder, *backing_store, index, filter) != kMaxUInt32;
+ return Subclass::GetEntryForIndexImpl(*holder, *backing_store, index,
+ filter) != kMaxUInt32;
}
bool HasAccessors(JSObject* holder) final {
- return ElementsAccessorSubclass::HasAccessorsImpl(holder,
- holder->elements());
+ return Subclass::HasAccessorsImpl(holder, holder->elements());
}
static bool HasAccessorsImpl(JSObject* holder,
@@ -559,11 +614,11 @@ class ElementsAccessorBase : public ElementsAccessor {
}
Handle<Object> Get(Handle<JSObject> holder, uint32_t entry) final {
- return ElementsAccessorSubclass::GetImpl(holder, entry);
+ return Subclass::GetImpl(holder, entry);
}
static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) {
- return ElementsAccessorSubclass::GetImpl(holder->elements(), entry);
+ return Subclass::GetImpl(holder->elements(), entry);
}
static Handle<Object> GetImpl(FixedArrayBase* backing_store, uint32_t entry) {
@@ -573,14 +628,13 @@ class ElementsAccessorBase : public ElementsAccessor {
}
void Set(Handle<JSObject> holder, uint32_t entry, Object* value) final {
- ElementsAccessorSubclass::SetImpl(holder, entry, value);
+ Subclass::SetImpl(holder, entry, value);
}
void Reconfigure(Handle<JSObject> object, Handle<FixedArrayBase> store,
uint32_t entry, Handle<Object> value,
PropertyAttributes attributes) final {
- ElementsAccessorSubclass::ReconfigureImpl(object, store, entry, value,
- attributes);
+ Subclass::ReconfigureImpl(object, store, entry, value, attributes);
}
static void ReconfigureImpl(Handle<JSObject> object,
@@ -592,8 +646,7 @@ class ElementsAccessorBase : public ElementsAccessor {
void Add(Handle<JSObject> object, uint32_t index, Handle<Object> value,
PropertyAttributes attributes, uint32_t new_capacity) final {
- ElementsAccessorSubclass::AddImpl(object, index, value, attributes,
- new_capacity);
+ Subclass::AddImpl(object, index, value, attributes, new_capacity);
}
static void AddImpl(Handle<JSObject> object, uint32_t index,
@@ -604,7 +657,7 @@ class ElementsAccessorBase : public ElementsAccessor {
uint32_t Push(Handle<JSArray> receiver, Arguments* args,
uint32_t push_size) final {
- return ElementsAccessorSubclass::PushImpl(receiver, args, push_size);
+ return Subclass::PushImpl(receiver, args, push_size);
}
static uint32_t PushImpl(Handle<JSArray> receiver, Arguments* args,
@@ -615,7 +668,7 @@ class ElementsAccessorBase : public ElementsAccessor {
uint32_t Unshift(Handle<JSArray> receiver, Arguments* args,
uint32_t unshift_size) final {
- return ElementsAccessorSubclass::UnshiftImpl(receiver, args, unshift_size);
+ return Subclass::UnshiftImpl(receiver, args, unshift_size);
}
static uint32_t UnshiftImpl(Handle<JSArray> receiver, Arguments* args,
@@ -626,7 +679,7 @@ class ElementsAccessorBase : public ElementsAccessor {
Handle<JSArray> Slice(Handle<JSObject> receiver, uint32_t start,
uint32_t end) final {
- return ElementsAccessorSubclass::SliceImpl(receiver, start, end);
+ return Subclass::SliceImpl(receiver, start, end);
}
static Handle<JSArray> SliceImpl(Handle<JSObject> receiver,
@@ -638,8 +691,7 @@ class ElementsAccessorBase : public ElementsAccessor {
Handle<JSArray> Splice(Handle<JSArray> receiver, uint32_t start,
uint32_t delete_count, Arguments* args,
uint32_t add_count) final {
- return ElementsAccessorSubclass::SpliceImpl(receiver, start, delete_count,
- args, add_count);
+ return Subclass::SpliceImpl(receiver, start, delete_count, args, add_count);
}
static Handle<JSArray> SpliceImpl(Handle<JSArray> receiver,
@@ -650,7 +702,7 @@ class ElementsAccessorBase : public ElementsAccessor {
}
Handle<Object> Pop(Handle<JSArray> receiver) final {
- return ElementsAccessorSubclass::PopImpl(receiver);
+ return Subclass::PopImpl(receiver);
}
static Handle<Object> PopImpl(Handle<JSArray> receiver) {
@@ -659,7 +711,7 @@ class ElementsAccessorBase : public ElementsAccessor {
}
Handle<Object> Shift(Handle<JSArray> receiver) final {
- return ElementsAccessorSubclass::ShiftImpl(receiver);
+ return Subclass::ShiftImpl(receiver);
}
static Handle<Object> ShiftImpl(Handle<JSArray> receiver) {
@@ -668,8 +720,8 @@ class ElementsAccessorBase : public ElementsAccessor {
}
void SetLength(Handle<JSArray> array, uint32_t length) final {
- ElementsAccessorSubclass::SetLengthImpl(array->GetIsolate(), array, length,
- handle(array->elements()));
+ Subclass::SetLengthImpl(array->GetIsolate(), array, length,
+ handle(array->elements()));
}
static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
@@ -713,21 +765,25 @@ class ElementsAccessorBase : public ElementsAccessor {
} else {
// Check whether the backing store should be expanded.
capacity = Max(length, JSObject::NewElementsCapacity(capacity));
- ElementsAccessorSubclass::GrowCapacityAndConvertImpl(array, capacity);
+ Subclass::GrowCapacityAndConvertImpl(array, capacity);
}
array->set_length(Smi::FromInt(length));
JSObject::ValidateElements(array);
}
- static uint32_t GetIterationLength(JSObject* receiver,
- FixedArrayBase* elements) {
+ static uint32_t GetMaxIndex(JSObject* receiver, FixedArrayBase* elements) {
if (receiver->IsJSArray()) {
DCHECK(JSArray::cast(receiver)->length()->IsSmi());
return static_cast<uint32_t>(
Smi::cast(JSArray::cast(receiver)->length())->value());
}
- return ElementsAccessorSubclass::GetCapacityImpl(receiver, elements);
+ return Subclass::GetCapacityImpl(receiver, elements);
+ }
+
+ static uint32_t GetMaxNumberOfEntries(JSObject* receiver,
+ FixedArrayBase* elements) {
+ return Subclass::GetMaxIndex(receiver, elements);
}
static Handle<FixedArrayBase> ConvertElementsWithCapacity(
@@ -762,13 +818,51 @@ class ElementsAccessorBase : public ElementsAccessor {
packed_size = Smi::cast(JSArray::cast(*object)->length())->value();
}
- ElementsAccessorSubclass::CopyElementsImpl(
- *old_elements, src_index, *new_elements, from_kind, dst_index,
- packed_size, copy_size);
+ Subclass::CopyElementsImpl(*old_elements, src_index, *new_elements,
+ from_kind, dst_index, packed_size, copy_size);
return new_elements;
}
+ static void TransitionElementsKindImpl(Handle<JSObject> object,
+ Handle<Map> to_map) {
+ Handle<Map> from_map = handle(object->map());
+ ElementsKind from_kind = from_map->elements_kind();
+ ElementsKind to_kind = to_map->elements_kind();
+ if (IsFastHoleyElementsKind(from_kind)) {
+ to_kind = GetHoleyElementsKind(to_kind);
+ }
+ if (from_kind != to_kind) {
+ // This method should never be called for any other case.
+ DCHECK(IsFastElementsKind(from_kind));
+ DCHECK(IsFastElementsKind(to_kind));
+ DCHECK_NE(TERMINAL_FAST_ELEMENTS_KIND, from_kind);
+
+ Handle<FixedArrayBase> from_elements(object->elements());
+ if (object->elements() == object->GetHeap()->empty_fixed_array() ||
+ IsFastDoubleElementsKind(from_kind) ==
+ IsFastDoubleElementsKind(to_kind)) {
+ // No change is needed to the elements() buffer, the transition
+ // only requires a map change.
+ JSObject::MigrateToMap(object, to_map);
+ } else {
+ DCHECK((IsFastSmiElementsKind(from_kind) &&
+ IsFastDoubleElementsKind(to_kind)) ||
+ (IsFastDoubleElementsKind(from_kind) &&
+ IsFastObjectElementsKind(to_kind)));
+ uint32_t capacity = static_cast<uint32_t>(object->elements()->length());
+ Handle<FixedArrayBase> elements = ConvertElementsWithCapacity(
+ object, from_elements, from_kind, capacity);
+ JSObject::SetMapAndElements(object, to_map, elements);
+ }
+ if (FLAG_trace_elements_transitions) {
+ JSObject::PrintElementsTransition(stdout, object, from_kind,
+ from_elements, to_kind,
+ handle(object->elements()));
+ }
+ }
+ }
+
static void GrowCapacityAndConvertImpl(Handle<JSObject> object,
uint32_t capacity) {
ElementsKind from_kind = object->GetElementsKind();
@@ -784,12 +878,17 @@ class ElementsAccessorBase : public ElementsAccessor {
DCHECK(IsFastDoubleElementsKind(from_kind) !=
IsFastDoubleElementsKind(kind()) ||
IsDictionaryElementsKind(from_kind) ||
- from_kind == SLOW_STRING_WRAPPER_ELEMENTS ||
static_cast<uint32_t>(old_elements->length()) < capacity);
+ Subclass::BasicGrowCapacityAndConvertImpl(object, old_elements, from_kind,
+ kind(), capacity);
+ }
+
+ static void BasicGrowCapacityAndConvertImpl(
+ Handle<JSObject> object, Handle<FixedArrayBase> old_elements,
+ ElementsKind from_kind, ElementsKind to_kind, uint32_t capacity) {
Handle<FixedArrayBase> elements =
ConvertElementsWithCapacity(object, old_elements, from_kind, capacity);
- ElementsKind to_kind = kind();
if (IsHoleyElementsKind(from_kind)) to_kind = GetHoleyElementsKind(to_kind);
Handle<Map> new_map = JSObject::GetElementsTransitionMap(object, to_kind);
JSObject::SetMapAndElements(object, new_map, elements);
@@ -803,13 +902,17 @@ class ElementsAccessorBase : public ElementsAccessor {
}
}
+ void TransitionElementsKind(Handle<JSObject> object, Handle<Map> map) final {
+ Subclass::TransitionElementsKindImpl(object, map);
+ }
+
void GrowCapacityAndConvert(Handle<JSObject> object,
uint32_t capacity) final {
- ElementsAccessorSubclass::GrowCapacityAndConvertImpl(object, capacity);
+ Subclass::GrowCapacityAndConvertImpl(object, capacity);
}
void Delete(Handle<JSObject> obj, uint32_t entry) final {
- ElementsAccessorSubclass::DeleteImpl(obj, entry);
+ Subclass::DeleteImpl(obj, entry);
}
static void CopyElementsImpl(FixedArrayBase* from, uint32_t from_start,
@@ -833,7 +936,7 @@ class ElementsAccessorBase : public ElementsAccessor {
}
}
FixedArrayBase* from = from_holder->elements();
- // NOTE: the ElementsAccessorSubclass::CopyElementsImpl() methods
+ // NOTE: the Subclass::CopyElementsImpl() methods
// violate the handlified function signature convention:
// raw pointer parameters in the function that allocates. This is done
// intentionally to avoid ArrayConcat() builtin performance degradation.
@@ -842,13 +945,12 @@ class ElementsAccessorBase : public ElementsAccessor {
// copying from object with fast double elements to object with object
// elements. In all the other cases there are no allocations performed and
// handle creation causes noticeable performance degradation of the builtin.
- ElementsAccessorSubclass::CopyElementsImpl(
- from, from_start, *to, from_kind, to_start, packed_size, copy_size);
+ Subclass::CopyElementsImpl(from, from_start, *to, from_kind, to_start,
+ packed_size, copy_size);
}
Handle<SeededNumberDictionary> Normalize(Handle<JSObject> object) final {
- return ElementsAccessorSubclass::NormalizeImpl(object,
- handle(object->elements()));
+ return Subclass::NormalizeImpl(object, handle(object->elements()));
}
static Handle<SeededNumberDictionary> NormalizeImpl(
@@ -861,7 +963,7 @@ class ElementsAccessorBase : public ElementsAccessor {
Handle<FixedArray> values_or_entries,
bool get_entries, int* nof_items,
PropertyFilter filter) {
- return ElementsAccessorSubclass::CollectValuesOrEntriesImpl(
+ return Subclass::CollectValuesOrEntriesImpl(
isolate, object, values_or_entries, get_entries, nof_items, filter);
}
@@ -870,11 +972,10 @@ class ElementsAccessorBase : public ElementsAccessor {
Handle<FixedArray> values_or_entries, bool get_entries, int* nof_items,
PropertyFilter filter) {
int count = 0;
- KeyAccumulator accumulator(isolate, OWN_ONLY, ALL_PROPERTIES);
- accumulator.NextPrototype();
- ElementsAccessorSubclass::CollectElementIndicesImpl(
- object, handle(object->elements(), isolate), &accumulator, kMaxUInt32,
- ALL_PROPERTIES, 0);
+ KeyAccumulator accumulator(isolate, KeyCollectionMode::kOwnOnly,
+ ALL_PROPERTIES);
+ Subclass::CollectElementIndicesImpl(
+ object, handle(object->elements(), isolate), &accumulator);
Handle<FixedArray> keys = accumulator.GetKeys();
for (int i = 0; i < keys->length(); ++i) {
@@ -883,15 +984,14 @@ class ElementsAccessorBase : public ElementsAccessor {
uint32_t index;
if (!key->ToUint32(&index)) continue;
- uint32_t entry = ElementsAccessorSubclass::GetEntryForIndexImpl(
+ uint32_t entry = Subclass::GetEntryForIndexImpl(
*object, object->elements(), index, filter);
if (entry == kMaxUInt32) continue;
- PropertyDetails details =
- ElementsAccessorSubclass::GetDetailsImpl(*object, entry);
+ PropertyDetails details = Subclass::GetDetailsImpl(*object, entry);
if (details.kind() == kData) {
- value = ElementsAccessorSubclass::GetImpl(object, entry);
+ value = Subclass::GetImpl(object, entry);
} else {
LookupIterator it(isolate, object, index, LookupIterator::OWN);
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
@@ -909,26 +1009,22 @@ class ElementsAccessorBase : public ElementsAccessor {
void CollectElementIndices(Handle<JSObject> object,
Handle<FixedArrayBase> backing_store,
- KeyAccumulator* keys, uint32_t range,
- PropertyFilter filter, uint32_t offset) final {
- if (filter & ONLY_ALL_CAN_READ) return;
- ElementsAccessorSubclass::CollectElementIndicesImpl(
- object, backing_store, keys, range, filter, offset);
+ KeyAccumulator* keys) final {
+ if (keys->filter() & ONLY_ALL_CAN_READ) return;
+ Subclass::CollectElementIndicesImpl(object, backing_store, keys);
}
static void CollectElementIndicesImpl(Handle<JSObject> object,
Handle<FixedArrayBase> backing_store,
- KeyAccumulator* keys, uint32_t range,
- PropertyFilter filter,
- uint32_t offset) {
+ KeyAccumulator* keys) {
DCHECK_NE(DICTIONARY_ELEMENTS, kind());
// Non-dictionary elements can't have all-can-read accessors.
- uint32_t length = GetIterationLength(*object, *backing_store);
- if (range < length) length = range;
- for (uint32_t i = offset; i < length; i++) {
- if (ElementsAccessorSubclass::HasElementImpl(object, i, backing_store,
- filter)) {
- keys->AddKey(i);
+ uint32_t length = Subclass::GetMaxIndex(*object, *backing_store);
+ PropertyFilter filter = keys->filter();
+ Factory* factory = keys->isolate()->factory();
+ for (uint32_t i = 0; i < length; i++) {
+ if (Subclass::HasElementImpl(object, i, backing_store, filter)) {
+ keys->AddKey(factory->NewNumberFromUint(i));
}
}
}
@@ -938,12 +1034,10 @@ class ElementsAccessorBase : public ElementsAccessor {
Handle<FixedArrayBase> backing_store, GetKeysConversion convert,
PropertyFilter filter, Handle<FixedArray> list, uint32_t* nof_indices,
uint32_t insertion_index = 0) {
- uint32_t length =
- ElementsAccessorSubclass::GetIterationLength(*object, *backing_store);
+ uint32_t length = Subclass::GetMaxIndex(*object, *backing_store);
for (uint32_t i = 0; i < length; i++) {
- if (ElementsAccessorSubclass::HasElementImpl(object, i, backing_store,
- filter)) {
- if (convert == CONVERT_TO_STRING) {
+ if (Subclass::HasElementImpl(object, i, backing_store, filter)) {
+ if (convert == GetKeysConversion::kConvertToString) {
Handle<String> index_string = isolate->factory()->Uint32ToString(i);
list->set(insertion_index, *index_string);
} else {
@@ -956,68 +1050,51 @@ class ElementsAccessorBase : public ElementsAccessor {
return list;
}
- Handle<FixedArray> PrependElementIndices(Handle<JSObject> object,
- Handle<FixedArrayBase> backing_store,
- Handle<FixedArray> keys,
- GetKeysConversion convert,
- PropertyFilter filter) final {
- return ElementsAccessorSubclass::PrependElementIndicesImpl(
- object, backing_store, keys, convert, filter);
+ MaybeHandle<FixedArray> PrependElementIndices(
+ Handle<JSObject> object, Handle<FixedArrayBase> backing_store,
+ Handle<FixedArray> keys, GetKeysConversion convert,
+ PropertyFilter filter) final {
+ return Subclass::PrependElementIndicesImpl(object, backing_store, keys,
+ convert, filter);
}
- static Handle<FixedArray> PrependElementIndicesImpl(
+ static MaybeHandle<FixedArray> PrependElementIndicesImpl(
Handle<JSObject> object, Handle<FixedArrayBase> backing_store,
Handle<FixedArray> keys, GetKeysConversion convert,
PropertyFilter filter) {
Isolate* isolate = object->GetIsolate();
uint32_t nof_property_keys = keys->length();
uint32_t initial_list_length =
- ElementsAccessorSubclass::GetCapacityImpl(*object, *backing_store);
+ Subclass::GetMaxNumberOfEntries(*object, *backing_store);
initial_list_length += nof_property_keys;
+ if (initial_list_length > FixedArray::kMaxLength ||
+ initial_list_length < nof_property_keys) {
+ return isolate->Throw<FixedArray>(isolate->factory()->NewRangeError(
+ MessageTemplate::kInvalidArrayLength));
+ }
+
+ bool needs_sorting =
+ IsDictionaryElementsKind(kind()) || IsSloppyArgumentsElements(kind());
// Collect the element indices into a new list.
uint32_t nof_indices = 0;
Handle<FixedArray> combined_keys =
isolate->factory()->NewFixedArray(initial_list_length);
- combined_keys = ElementsAccessorSubclass::DirectCollectElementIndicesImpl(
- isolate, object, backing_store, convert, filter, combined_keys,
- &nof_indices);
-
- // Sort the indices list if necessary.
- if (IsDictionaryElementsKind(kind()) || IsSloppyArgumentsElements(kind())) {
- struct {
- bool operator()(Object* a, Object* b) {
- if (!a->IsUndefined()) {
- if (b->IsUndefined()) return true;
- return a->Number() < b->Number();
- }
- return !b->IsUndefined();
- }
- } cmp;
- Object** start =
- reinterpret_cast<Object**>(combined_keys->GetFirstElementAddress());
- std::sort(start, start + nof_indices, cmp);
- uint32_t array_length = 0;
+ combined_keys = Subclass::DirectCollectElementIndicesImpl(
+ isolate, object, backing_store,
+ needs_sorting ? GetKeysConversion::kKeepNumbers : convert, filter,
+ combined_keys, &nof_indices);
+
+ if (needs_sorting) {
+ SortIndices(combined_keys, nof_indices);
// Indices from dictionary elements should only be converted after
// sorting.
- if (convert == CONVERT_TO_STRING) {
+ if (convert == GetKeysConversion::kConvertToString) {
for (uint32_t i = 0; i < nof_indices; i++) {
Handle<Object> index_string = isolate->factory()->Uint32ToString(
- combined_keys->get(i)->Number());
+ combined_keys->get(i)->Number());
combined_keys->set(i, *index_string);
}
- } else if (!(object->IsJSArray() &&
- JSArray::cast(*object)->length()->ToArrayLength(
- &array_length) &&
- array_length <= Smi::kMaxValue)) {
- // Since we use std::sort above, the GC will no longer know where the
- // HeapNumbers are, hence we have to write them again.
- // For Arrays with valid Smi length, we are sure to have no HeapNumber
- // indices and thus we can skip this step.
- for (uint32_t i = 0; i < nof_indices; i++) {
- Object* index = combined_keys->get(i);
- combined_keys->set(i, index);
- }
}
}
@@ -1025,7 +1102,9 @@ class ElementsAccessorBase : public ElementsAccessor {
CopyObjectToObjectElements(*keys, FAST_ELEMENTS, 0, *combined_keys,
FAST_ELEMENTS, nof_indices, nof_property_keys);
- if (IsHoleyElementsKind(kind())) {
+ // For holey elements and arguments we might have to shrink the collected
+ // keys since the estimates might be off.
+ if (IsHoleyElementsKind(kind()) || IsSloppyArgumentsElements(kind())) {
// Shrink combined_keys to the final size.
int final_size = nof_indices + nof_property_keys;
DCHECK_LE(final_size, combined_keys->length());
@@ -1038,8 +1117,7 @@ class ElementsAccessorBase : public ElementsAccessor {
void AddElementsToKeyAccumulator(Handle<JSObject> receiver,
KeyAccumulator* accumulator,
AddKeyConversion convert) final {
- ElementsAccessorSubclass::AddElementsToKeyAccumulatorImpl(
- receiver, accumulator, convert);
+ Subclass::AddElementsToKeyAccumulatorImpl(receiver, accumulator, convert);
}
static uint32_t GetCapacityImpl(JSObject* holder,
@@ -1048,7 +1126,35 @@ class ElementsAccessorBase : public ElementsAccessor {
}
uint32_t GetCapacity(JSObject* holder, FixedArrayBase* backing_store) final {
- return ElementsAccessorSubclass::GetCapacityImpl(holder, backing_store);
+ return Subclass::GetCapacityImpl(holder, backing_store);
+ }
+
+ static Maybe<bool> IncludesValueImpl(Isolate* isolate,
+ Handle<JSObject> receiver,
+ Handle<Object> value,
+ uint32_t start_from, uint32_t length) {
+ return IncludesValueSlowPath(isolate, receiver, value, start_from, length);
+ }
+
+ Maybe<bool> IncludesValue(Isolate* isolate, Handle<JSObject> receiver,
+ Handle<Object> value, uint32_t start_from,
+ uint32_t length) final {
+ return Subclass::IncludesValueImpl(isolate, receiver, value, start_from,
+ length);
+ }
+
+ static Maybe<int64_t> IndexOfValueImpl(Isolate* isolate,
+ Handle<JSObject> receiver,
+ Handle<Object> value,
+ uint32_t start_from, uint32_t length) {
+ return IndexOfValueSlowPath(isolate, receiver, value, start_from, length);
+ }
+
+ Maybe<int64_t> IndexOfValue(Isolate* isolate, Handle<JSObject> receiver,
+ Handle<Object> value, uint32_t start_from,
+ uint32_t length) final {
+ return Subclass::IndexOfValueImpl(isolate, receiver, value, start_from,
+ length);
}
static uint32_t GetIndexForEntryImpl(FixedArrayBase* backing_store,
@@ -1060,21 +1166,20 @@ class ElementsAccessorBase : public ElementsAccessor {
FixedArrayBase* backing_store,
uint32_t index, PropertyFilter filter) {
if (IsHoleyElementsKind(kind())) {
- return index < ElementsAccessorSubclass::GetCapacityImpl(holder,
- backing_store) &&
+ return index < Subclass::GetCapacityImpl(holder, backing_store) &&
!BackingStore::cast(backing_store)->is_the_hole(index)
? index
: kMaxUInt32;
} else {
- uint32_t length = GetIterationLength(holder, backing_store);
+ uint32_t length = Subclass::GetMaxIndex(holder, backing_store);
return index < length ? index : kMaxUInt32;
}
}
uint32_t GetEntryForIndex(JSObject* holder, FixedArrayBase* backing_store,
uint32_t index) final {
- return ElementsAccessorSubclass::GetEntryForIndexImpl(
- holder, backing_store, index, ALL_PROPERTIES);
+ return Subclass::GetEntryForIndexImpl(holder, backing_store, index,
+ ALL_PROPERTIES);
}
static PropertyDetails GetDetailsImpl(FixedArrayBase* backing_store,
@@ -1087,7 +1192,7 @@ class ElementsAccessorBase : public ElementsAccessor {
}
PropertyDetails GetDetails(JSObject* holder, uint32_t entry) final {
- return ElementsAccessorSubclass::GetDetailsImpl(holder, entry);
+ return Subclass::GetDetailsImpl(holder, entry);
}
private:
@@ -1103,17 +1208,15 @@ class DictionaryElementsAccessor
: ElementsAccessorBase<DictionaryElementsAccessor,
ElementsKindTraits<DICTIONARY_ELEMENTS> >(name) {}
- static uint32_t GetIterationLength(JSObject* receiver,
- FixedArrayBase* elements) {
- uint32_t length;
- if (receiver->IsJSArray()) {
- // Special-case GetIterationLength for dictionary elements since the
- // length of the array might be a HeapNumber.
- JSArray::cast(receiver)->length()->ToArrayLength(&length);
- } else {
- length = GetCapacityImpl(receiver, elements);
- }
- return length;
+ static uint32_t GetMaxIndex(JSObject* receiver, FixedArrayBase* elements) {
+ // We cannot properly estimate this for dictionaries.
+ UNREACHABLE();
+ }
+
+ static uint32_t GetMaxNumberOfEntries(JSObject* receiver,
+ FixedArrayBase* backing_store) {
+ SeededNumberDictionary* dict = SeededNumberDictionary::cast(backing_store);
+ return dict->NumberOfElements();
}
static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
@@ -1184,7 +1287,7 @@ class DictionaryElementsAccessor
uint32_t index = GetIndexForEntryImpl(*dict, entry);
Handle<Object> result = SeededNumberDictionary::DeleteProperty(dict, entry);
USE(result);
- DCHECK(result->IsTrue());
+ DCHECK(result->IsTrue(dict->GetIsolate()));
Handle<FixedArray> new_elements =
SeededNumberDictionary::Shrink(dict, index);
obj->set_elements(*new_elements);
@@ -1196,12 +1299,10 @@ class DictionaryElementsAccessor
SeededNumberDictionary* dict = SeededNumberDictionary::cast(backing_store);
if (!dict->requires_slow_elements()) return false;
int capacity = dict->Capacity();
- Heap* heap = holder->GetHeap();
- Object* undefined = heap->undefined_value();
- Object* the_hole = heap->the_hole_value();
+ Isolate* isolate = dict->GetIsolate();
for (int i = 0; i < capacity; i++) {
Object* key = dict->KeyAt(i);
- if (key == the_hole || key == undefined) continue;
+ if (!dict->IsKey(isolate, key)) continue;
DCHECK(!dict->IsDeleted(i));
PropertyDetails details = dict->DetailsAt(i);
if (details.type() == ACCESSOR_CONSTANT) return true;
@@ -1266,7 +1367,7 @@ class DictionaryElementsAccessor
DisallowHeapAllocation no_gc;
SeededNumberDictionary* dict = SeededNumberDictionary::cast(store);
Object* index = dict->KeyAt(entry);
- return !index->IsTheHole();
+ return !index->IsTheHole(dict->GetIsolate());
}
static uint32_t GetIndexForEntryImpl(FixedArrayBase* store, uint32_t entry) {
@@ -1311,46 +1412,42 @@ class DictionaryElementsAccessor
return static_cast<uint32_t>(raw_key->Number());
}
- static uint32_t GetKeyForEntryImpl(Handle<SeededNumberDictionary> dictionary,
+ static uint32_t GetKeyForEntryImpl(Isolate* isolate,
+ Handle<SeededNumberDictionary> dictionary,
int entry, PropertyFilter filter) {
DisallowHeapAllocation no_gc;
Object* raw_key = dictionary->KeyAt(entry);
- if (!dictionary->IsKey(raw_key)) return kMaxUInt32;
- return FilterKey(dictionary, entry, raw_key, filter);
- }
-
- static uint32_t GetKeyForEntryImpl(Handle<SeededNumberDictionary> dictionary,
- int entry, PropertyFilter filter,
- Object* undefined, Object* the_hole) {
- DisallowHeapAllocation no_gc;
- Object* raw_key = dictionary->KeyAt(entry);
- // Replace the IsKey check with a direct comparison which is much faster.
- if (raw_key == undefined || raw_key == the_hole) {
- return kMaxUInt32;
- }
+ if (!dictionary->IsKey(isolate, raw_key)) return kMaxUInt32;
return FilterKey(dictionary, entry, raw_key, filter);
}
static void CollectElementIndicesImpl(Handle<JSObject> object,
Handle<FixedArrayBase> backing_store,
- KeyAccumulator* keys, uint32_t range,
- PropertyFilter filter,
- uint32_t offset) {
- if (filter & SKIP_STRINGS) return;
+ KeyAccumulator* keys) {
+ if (keys->filter() & SKIP_STRINGS) return;
Isolate* isolate = keys->isolate();
- Handle<Object> undefined = isolate->factory()->undefined_value();
- Handle<Object> the_hole = isolate->factory()->the_hole_value();
Handle<SeededNumberDictionary> dictionary =
Handle<SeededNumberDictionary>::cast(backing_store);
int capacity = dictionary->Capacity();
+ Handle<FixedArray> elements = isolate->factory()->NewFixedArray(
+ GetMaxNumberOfEntries(*object, *backing_store));
+ int insertion_index = 0;
+ PropertyFilter filter = keys->filter();
for (int i = 0; i < capacity; i++) {
- uint32_t key =
- GetKeyForEntryImpl(dictionary, i, filter, *undefined, *the_hole);
- if (key == kMaxUInt32) continue;
- keys->AddKey(key);
+ Object* raw_key = dictionary->KeyAt(i);
+ if (!dictionary->IsKey(isolate, raw_key)) continue;
+ uint32_t key = FilterKey(dictionary, i, raw_key, filter);
+ if (key == kMaxUInt32) {
+ keys->AddShadowingKey(raw_key);
+ continue;
+ }
+ elements->set(insertion_index, raw_key);
+ insertion_index++;
+ }
+ SortIndices(elements, insertion_index);
+ for (int i = 0; i < insertion_index; i++) {
+ keys->AddKey(elements->get(i));
}
-
- keys->SortCurrentElementsList();
}
static Handle<FixedArray> DirectCollectElementIndicesImpl(
@@ -1361,14 +1458,11 @@ class DictionaryElementsAccessor
if (filter & SKIP_STRINGS) return list;
if (filter & ONLY_ALL_CAN_READ) return list;
- Handle<Object> undefined = isolate->factory()->undefined_value();
- Handle<Object> the_hole = isolate->factory()->the_hole_value();
Handle<SeededNumberDictionary> dictionary =
Handle<SeededNumberDictionary>::cast(backing_store);
uint32_t capacity = dictionary->Capacity();
for (uint32_t i = 0; i < capacity; i++) {
- uint32_t key =
- GetKeyForEntryImpl(dictionary, i, filter, *undefined, *the_hole);
+ uint32_t key = GetKeyForEntryImpl(isolate, dictionary, i, filter);
if (key == kMaxUInt32) continue;
Handle<Object> index = isolate->factory()->NewNumberFromUint(key);
list->set(insertion_index, *index);
@@ -1393,31 +1487,202 @@ class DictionaryElementsAccessor
if (k == *the_hole) continue;
if (dictionary->IsDeleted(i)) continue;
Object* value = dictionary->ValueAt(i);
- DCHECK(!value->IsTheHole());
+ DCHECK(!value->IsTheHole(isolate));
DCHECK(!value->IsAccessorPair());
DCHECK(!value->IsAccessorInfo());
accumulator->AddKey(value, convert);
}
}
+
+ static bool IncludesValueFastPath(Isolate* isolate, Handle<JSObject> receiver,
+ Handle<Object> value, uint32_t start_from,
+ uint32_t length, Maybe<bool>* result) {
+ DisallowHeapAllocation no_gc;
+ SeededNumberDictionary* dictionary =
+ SeededNumberDictionary::cast(receiver->elements());
+ int capacity = dictionary->Capacity();
+ Object* the_hole = isolate->heap()->the_hole_value();
+ Object* undefined = isolate->heap()->undefined_value();
+
+ // Scan for accessor properties. If accessors are present, then elements
+ // must be accessed in order via the slow path.
+ bool found = false;
+ for (int i = 0; i < capacity; ++i) {
+ Object* k = dictionary->KeyAt(i);
+ if (k == the_hole) continue;
+ if (k == undefined) continue;
+
+ uint32_t index;
+ if (!k->ToArrayIndex(&index) || index < start_from || index >= length) {
+ continue;
+ }
+
+ if (dictionary->DetailsAt(i).type() == ACCESSOR_CONSTANT) {
+ // Restart from beginning in slow path, otherwise we may observably
+ // access getters out of order
+ return false;
+ } else if (!found) {
+ Object* element_k = dictionary->ValueAt(i);
+ if (value->SameValueZero(element_k)) found = true;
+ }
+ }
+
+ *result = Just(found);
+ return true;
+ }
+
+ static Maybe<bool> IncludesValueImpl(Isolate* isolate,
+ Handle<JSObject> receiver,
+ Handle<Object> value,
+ uint32_t start_from, uint32_t length) {
+ DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver));
+ bool search_for_hole = value->IsUndefined(isolate);
+
+ if (!search_for_hole) {
+ Maybe<bool> result = Nothing<bool>();
+ if (DictionaryElementsAccessor::IncludesValueFastPath(
+ isolate, receiver, value, start_from, length, &result)) {
+ return result;
+ }
+ }
+
+ Handle<SeededNumberDictionary> dictionary(
+ SeededNumberDictionary::cast(receiver->elements()), isolate);
+ // Iterate through entire range, as accessing elements out of order is
+ // observable
+ for (uint32_t k = start_from; k < length; ++k) {
+ int entry = dictionary->FindEntry(k);
+ if (entry == SeededNumberDictionary::kNotFound) {
+ if (search_for_hole) return Just(true);
+ continue;
+ }
+
+ PropertyDetails details = GetDetailsImpl(*dictionary, entry);
+ switch (details.kind()) {
+ case kData: {
+ Object* element_k = dictionary->ValueAt(entry);
+ if (value->SameValueZero(element_k)) return Just(true);
+ break;
+ }
+ case kAccessor: {
+ LookupIterator it(isolate, receiver, k,
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
+ DCHECK(it.IsFound());
+ DCHECK_EQ(it.state(), LookupIterator::ACCESSOR);
+ Handle<Object> element_k;
+
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, element_k, JSObject::GetPropertyWithAccessor(&it),
+ Nothing<bool>());
+
+ if (value->SameValueZero(*element_k)) return Just(true);
+
+ // Bailout to slow path if elements on prototype changed
+ if (!JSObject::PrototypeHasNoElements(isolate, *receiver)) {
+ return IncludesValueSlowPath(isolate, receiver, value, k + 1,
+ length);
+ }
+
+ // Continue if elements unchanged
+ if (*dictionary == receiver->elements()) continue;
+
+ // Otherwise, bailout or update elements
+ if (receiver->GetElementsKind() != DICTIONARY_ELEMENTS) {
+ if (receiver->map()->GetInitialElements() == receiver->elements()) {
+ // If switched to initial elements, return true if searching for
+ // undefined, and false otherwise.
+ return Just(search_for_hole);
+ }
+ // Otherwise, switch to slow path.
+ return IncludesValueSlowPath(isolate, receiver, value, k + 1,
+ length);
+ }
+ dictionary = handle(
+ SeededNumberDictionary::cast(receiver->elements()), isolate);
+ break;
+ }
+ }
+ }
+ return Just(false);
+ }
+
+ static Maybe<int64_t> IndexOfValueImpl(Isolate* isolate,
+ Handle<JSObject> receiver,
+ Handle<Object> value,
+ uint32_t start_from, uint32_t length) {
+ DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver));
+
+ Handle<SeededNumberDictionary> dictionary(
+ SeededNumberDictionary::cast(receiver->elements()), isolate);
+ // Iterate through entire range, as accessing elements out of order is
+ // observable.
+ for (uint32_t k = start_from; k < length; ++k) {
+ int entry = dictionary->FindEntry(k);
+ if (entry == SeededNumberDictionary::kNotFound) {
+ continue;
+ }
+
+ PropertyDetails details = GetDetailsImpl(*dictionary, entry);
+ switch (details.kind()) {
+ case kData: {
+ Object* element_k = dictionary->ValueAt(entry);
+ if (value->StrictEquals(element_k)) {
+ return Just<int64_t>(k);
+ }
+ break;
+ }
+ case kAccessor: {
+ LookupIterator it(isolate, receiver, k,
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
+ DCHECK(it.IsFound());
+ DCHECK_EQ(it.state(), LookupIterator::ACCESSOR);
+ Handle<Object> element_k;
+
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, element_k, JSObject::GetPropertyWithAccessor(&it),
+ Nothing<int64_t>());
+
+ if (value->StrictEquals(*element_k)) return Just<int64_t>(k);
+
+ // Bailout to slow path if elements on prototype changed.
+ if (!JSObject::PrototypeHasNoElements(isolate, *receiver)) {
+ return IndexOfValueSlowPath(isolate, receiver, value, k + 1,
+ length);
+ }
+
+ // Continue if elements unchanged.
+ if (*dictionary == receiver->elements()) continue;
+
+ // Otherwise, bailout or update elements.
+ if (receiver->GetElementsKind() != DICTIONARY_ELEMENTS) {
+ // Otherwise, switch to slow path.
+ return IndexOfValueSlowPath(isolate, receiver, value, k + 1,
+ length);
+ }
+ dictionary = handle(
+ SeededNumberDictionary::cast(receiver->elements()), isolate);
+ break;
+ }
+ }
+ }
+ return Just<int64_t>(-1);
+ }
};
// Super class for all fast element arrays.
-template<typename FastElementsAccessorSubclass,
- typename KindTraits>
-class FastElementsAccessor
- : public ElementsAccessorBase<FastElementsAccessorSubclass, KindTraits> {
+template <typename Subclass, typename KindTraits>
+class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
public:
explicit FastElementsAccessor(const char* name)
- : ElementsAccessorBase<FastElementsAccessorSubclass,
- KindTraits>(name) {}
+ : ElementsAccessorBase<Subclass, KindTraits>(name) {}
typedef typename KindTraits::BackingStore BackingStore;
static Handle<SeededNumberDictionary> NormalizeImpl(
Handle<JSObject> object, Handle<FixedArrayBase> store) {
Isolate* isolate = store->GetIsolate();
- ElementsKind kind = FastElementsAccessorSubclass::kind();
+ ElementsKind kind = Subclass::kind();
// Ensure that notifications fire if the array or object prototypes are
// normalizing.
@@ -1436,7 +1701,7 @@ class FastElementsAccessor
if (IsHoleyElementsKind(kind)) {
if (BackingStore::cast(*store)->is_the_hole(i)) continue;
}
- Handle<Object> value = FastElementsAccessorSubclass::GetImpl(*store, i);
+ Handle<Object> value = Subclass::GetImpl(*store, i);
dictionary = SeededNumberDictionary::AddNumberEntry(
dictionary, i, value, details, used_as_prototype);
j++;
@@ -1453,7 +1718,9 @@ class FastElementsAccessor
}
if (entry == 0) {
FixedArray* empty = heap->empty_fixed_array();
- if (obj->HasFastArgumentsElements()) {
+ // Dynamically ask for the elements kind here since we manually redirect
+ // the operations for argument backing stores.
+ if (obj->GetElementsKind() == FAST_SLOPPY_ARGUMENTS_ELEMENTS) {
FixedArray::cast(obj->elements())->set(1, empty);
} else {
obj->set_elements(empty);
@@ -1538,14 +1805,13 @@ class FastElementsAccessor
uint32_t new_capacity) {
DCHECK_EQ(NONE, attributes);
ElementsKind from_kind = object->GetElementsKind();
- ElementsKind to_kind = FastElementsAccessorSubclass::kind();
+ ElementsKind to_kind = Subclass::kind();
if (IsDictionaryElementsKind(from_kind) ||
IsFastDoubleElementsKind(from_kind) !=
IsFastDoubleElementsKind(to_kind) ||
- FastElementsAccessorSubclass::GetCapacityImpl(
- *object, object->elements()) != new_capacity) {
- FastElementsAccessorSubclass::GrowCapacityAndConvertImpl(object,
- new_capacity);
+ Subclass::GetCapacityImpl(*object, object->elements()) !=
+ new_capacity) {
+ Subclass::GrowCapacityAndConvertImpl(object, new_capacity);
} else {
if (IsFastElementsKind(from_kind) && from_kind != to_kind) {
JSObject::TransitionElementsKind(object, to_kind);
@@ -1555,7 +1821,7 @@ class FastElementsAccessor
JSObject::EnsureWritableFastElements(object);
}
}
- FastElementsAccessorSubclass::SetImpl(object, index, *value);
+ Subclass::SetImpl(object, index, *value);
}
static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
@@ -1577,14 +1843,12 @@ class FastElementsAccessor
KeyAccumulator* accumulator,
AddKeyConversion convert) {
Handle<FixedArrayBase> elements(receiver->elements(),
- receiver->GetIsolate());
- uint32_t length =
- FastElementsAccessorSubclass::GetIterationLength(*receiver, *elements);
+ accumulator->isolate());
+ uint32_t length = Subclass::GetMaxNumberOfEntries(*receiver, *elements);
for (uint32_t i = 0; i < length; i++) {
if (IsFastPackedElementsKind(KindTraits::Kind) ||
HasEntryImpl(*elements, i)) {
- accumulator->AddKey(FastElementsAccessorSubclass::GetImpl(*elements, i),
- convert);
+ accumulator->AddKey(Subclass::GetImpl(*elements, i), convert);
}
}
}
@@ -1592,16 +1856,20 @@ class FastElementsAccessor
static void ValidateContents(Handle<JSObject> holder, int length) {
#if DEBUG
Isolate* isolate = holder->GetIsolate();
+ Heap* heap = isolate->heap();
HandleScope scope(isolate);
Handle<FixedArrayBase> elements(holder->elements(), isolate);
Map* map = elements->map();
- DCHECK((IsFastSmiOrObjectElementsKind(KindTraits::Kind) &&
- (map == isolate->heap()->fixed_array_map() ||
- map == isolate->heap()->fixed_cow_array_map())) ||
- (IsFastDoubleElementsKind(KindTraits::Kind) ==
- ((map == isolate->heap()->fixed_array_map() && length == 0) ||
- map == isolate->heap()->fixed_double_array_map())));
+ if (IsFastSmiOrObjectElementsKind(KindTraits::Kind)) {
+ DCHECK_NE(map, heap->fixed_double_array_map());
+ } else if (IsFastDoubleElementsKind(KindTraits::Kind)) {
+ DCHECK_NE(map, heap->fixed_cow_array_map());
+ if (map == heap->fixed_array_map()) DCHECK_EQ(0, length);
+ } else {
+ UNREACHABLE();
+ }
if (length == 0) return; // nothing to do!
+#if ENABLE_SLOW_DCHECKS
DisallowHeapAllocation no_gc;
Handle<BackingStore> backing_store = Handle<BackingStore>::cast(elements);
if (IsFastSmiElementsKind(KindTraits::Kind)) {
@@ -1610,30 +1878,38 @@ class FastElementsAccessor
(IsFastHoleyElementsKind(KindTraits::Kind) &&
backing_store->is_the_hole(i)));
}
+ } else if (KindTraits::Kind == FAST_ELEMENTS ||
+ KindTraits::Kind == FAST_DOUBLE_ELEMENTS) {
+ for (int i = 0; i < length; i++) {
+ DCHECK(!backing_store->is_the_hole(i));
+ }
+ } else {
+ DCHECK(IsFastHoleyElementsKind(KindTraits::Kind));
}
#endif
+#endif
}
static Handle<Object> PopImpl(Handle<JSArray> receiver) {
- return FastElementsAccessorSubclass::RemoveElement(receiver, AT_END);
+ return Subclass::RemoveElement(receiver, AT_END);
}
static Handle<Object> ShiftImpl(Handle<JSArray> receiver) {
- return FastElementsAccessorSubclass::RemoveElement(receiver, AT_START);
+ return Subclass::RemoveElement(receiver, AT_START);
}
static uint32_t PushImpl(Handle<JSArray> receiver,
Arguments* args, uint32_t push_size) {
Handle<FixedArrayBase> backing_store(receiver->elements());
- return FastElementsAccessorSubclass::AddArguments(receiver, backing_store,
- args, push_size, AT_END);
+ return Subclass::AddArguments(receiver, backing_store, args, push_size,
+ AT_END);
}
static uint32_t UnshiftImpl(Handle<JSArray> receiver,
Arguments* args, uint32_t unshift_size) {
Handle<FixedArrayBase> backing_store(receiver->elements());
- return FastElementsAccessorSubclass::AddArguments(
- receiver, backing_store, args, unshift_size, AT_START);
+ return Subclass::AddArguments(receiver, backing_store, args, unshift_size,
+ AT_START);
}
static Handle<JSArray> SliceImpl(Handle<JSObject> receiver,
@@ -1644,11 +1920,10 @@ class FastElementsAccessor
Handle<JSArray> result_array = isolate->factory()->NewJSArray(
KindTraits::Kind, result_len, result_len);
DisallowHeapAllocation no_gc;
- FastElementsAccessorSubclass::CopyElementsImpl(
- *backing_store, start, result_array->elements(), KindTraits::Kind, 0,
- kPackedSizeNotKnown, result_len);
- FastElementsAccessorSubclass::TryTransitionResultArrayToPacked(
- result_array);
+ Subclass::CopyElementsImpl(*backing_store, start, result_array->elements(),
+ KindTraits::Kind, 0, kPackedSizeNotKnown,
+ result_len);
+ Subclass::TryTransitionResultArrayToPacked(result_array);
return result_array;
}
@@ -1681,29 +1956,26 @@ class FastElementsAccessor
KindTraits::Kind, delete_count, delete_count);
if (delete_count > 0) {
DisallowHeapAllocation no_gc;
- FastElementsAccessorSubclass::CopyElementsImpl(
- *backing_store, start, deleted_elements->elements(), KindTraits::Kind,
- 0, kPackedSizeNotKnown, delete_count);
+ Subclass::CopyElementsImpl(*backing_store, start,
+ deleted_elements->elements(), KindTraits::Kind,
+ 0, kPackedSizeNotKnown, delete_count);
}
// Delete and move elements to make space for add_count new elements.
if (add_count < delete_count) {
- FastElementsAccessorSubclass::SpliceShrinkStep(
- isolate, receiver, backing_store, start, delete_count, add_count,
- length, new_length);
+ Subclass::SpliceShrinkStep(isolate, receiver, backing_store, start,
+ delete_count, add_count, length, new_length);
} else if (add_count > delete_count) {
- backing_store = FastElementsAccessorSubclass::SpliceGrowStep(
- isolate, receiver, backing_store, start, delete_count, add_count,
- length, new_length);
+ backing_store =
+ Subclass::SpliceGrowStep(isolate, receiver, backing_store, start,
+ delete_count, add_count, length, new_length);
}
// Copy over the arguments.
- FastElementsAccessorSubclass::CopyArguments(args, backing_store, add_count,
- 3, start);
+ Subclass::CopyArguments(args, backing_store, add_count, 3, start);
receiver->set_length(Smi::FromInt(new_length));
- FastElementsAccessorSubclass::TryTransitionResultArrayToPacked(
- deleted_elements);
+ Subclass::TryTransitionResultArrayToPacked(deleted_elements);
return deleted_elements;
}
@@ -1715,8 +1987,7 @@ class FastElementsAccessor
uint32_t length = object->elements()->length();
for (uint32_t index = 0; index < length; ++index) {
if (!HasEntryImpl(object->elements(), index)) continue;
- Handle<Object> value =
- FastElementsAccessorSubclass::GetImpl(object->elements(), index);
+ Handle<Object> value = Subclass::GetImpl(object->elements(), index);
if (get_entries) {
value = MakeEntryPair(isolate, index, value);
}
@@ -1756,6 +2027,156 @@ class FastElementsAccessor
}
}
+ static Maybe<bool> IncludesValueImpl(Isolate* isolate,
+ Handle<JSObject> receiver,
+ Handle<Object> search_value,
+ uint32_t start_from, uint32_t length) {
+ DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver));
+ DisallowHeapAllocation no_gc;
+ FixedArrayBase* elements_base = receiver->elements();
+ Object* the_hole = isolate->heap()->the_hole_value();
+ Object* undefined = isolate->heap()->undefined_value();
+ Object* value = *search_value;
+
+ // Elements beyond the capacity of the backing store treated as undefined.
+ if (value == undefined &&
+ static_cast<uint32_t>(elements_base->length()) < length) {
+ return Just(true);
+ }
+
+ if (start_from >= length) return Just(false);
+
+ length = std::min(static_cast<uint32_t>(elements_base->length()), length);
+
+ if (!value->IsNumber()) {
+ if (value == undefined) {
+ // Only FAST_ELEMENTS, FAST_HOLEY_ELEMENTS, FAST_HOLEY_SMI_ELEMENTS, and
+ // FAST_HOLEY_DOUBLE_ELEMENTS can have `undefined` as a value.
+ if (!IsFastObjectElementsKind(Subclass::kind()) &&
+ !IsFastHoleyElementsKind(Subclass::kind())) {
+ return Just(false);
+ }
+
+ // Search for `undefined` or The Hole in FAST_ELEMENTS,
+ // FAST_HOLEY_ELEMENTS or FAST_HOLEY_SMI_ELEMENTS
+ if (IsFastSmiOrObjectElementsKind(Subclass::kind())) {
+ auto elements = FixedArray::cast(receiver->elements());
+
+ for (uint32_t k = start_from; k < length; ++k) {
+ Object* element_k = elements->get(k);
+
+ if (IsFastHoleyElementsKind(Subclass::kind()) &&
+ element_k == the_hole) {
+ return Just(true);
+ }
+ if (IsFastObjectElementsKind(Subclass::kind()) &&
+ element_k == undefined) {
+ return Just(true);
+ }
+ }
+ return Just(false);
+ } else {
+ // Seach for The Hole in FAST_HOLEY_DOUBLE_ELEMENTS
+ DCHECK_EQ(Subclass::kind(), FAST_HOLEY_DOUBLE_ELEMENTS);
+ auto elements = FixedDoubleArray::cast(receiver->elements());
+
+ for (uint32_t k = start_from; k < length; ++k) {
+ if (IsFastHoleyElementsKind(Subclass::kind()) &&
+ elements->is_the_hole(k)) {
+ return Just(true);
+ }
+ }
+ return Just(false);
+ }
+ } else if (!IsFastObjectElementsKind(Subclass::kind())) {
+ // Search for non-number, non-Undefined value, with either
+ // FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, FAST_HOLEY_SMI_ELEMENTS or
+ // FAST_HOLEY_DOUBLE_ELEMENTS. Guaranteed to return false, since these
+ // elements kinds can only contain Number values or undefined.
+ return Just(false);
+ } else {
+ // Search for non-number, non-Undefined value with either
+ // FAST_ELEMENTS or FAST_HOLEY_ELEMENTS.
+ DCHECK(IsFastObjectElementsKind(Subclass::kind()));
+ auto elements = FixedArray::cast(receiver->elements());
+
+ for (uint32_t k = start_from; k < length; ++k) {
+ Object* element_k = elements->get(k);
+ if (IsFastHoleyElementsKind(Subclass::kind()) &&
+ element_k == the_hole) {
+ continue;
+ }
+
+ if (value->SameValueZero(element_k)) return Just(true);
+ }
+ return Just(false);
+ }
+ } else {
+ if (!value->IsNaN()) {
+ double search_value = value->Number();
+ if (IsFastDoubleElementsKind(Subclass::kind())) {
+ // Search for non-NaN Number in FAST_DOUBLE_ELEMENTS or
+ // FAST_HOLEY_DOUBLE_ELEMENTS --- Skip TheHole, and trust UCOMISD or
+ // similar operation for result.
+ auto elements = FixedDoubleArray::cast(receiver->elements());
+
+ for (uint32_t k = start_from; k < length; ++k) {
+ if (IsFastHoleyElementsKind(Subclass::kind()) &&
+ elements->is_the_hole(k)) {
+ continue;
+ }
+ if (elements->get_scalar(k) == search_value) return Just(true);
+ }
+ return Just(false);
+ } else {
+ // Search for non-NaN Number in FAST_ELEMENTS, FAST_HOLEY_ELEMENTS,
+ // FAST_SMI_ELEMENTS or FAST_HOLEY_SMI_ELEMENTS --- Skip non-Numbers,
+ // and trust UCOMISD or similar operation for result
+ auto elements = FixedArray::cast(receiver->elements());
+
+ for (uint32_t k = start_from; k < length; ++k) {
+ Object* element_k = elements->get(k);
+ if (element_k->IsNumber() && element_k->Number() == search_value) {
+ return Just(true);
+ }
+ }
+ return Just(false);
+ }
+ } else {
+ // Search for NaN --- NaN cannot be represented with Smi elements, so
+ // abort if ElementsKind is FAST_SMI_ELEMENTS or FAST_HOLEY_SMI_ELEMENTS
+ if (IsFastSmiElementsKind(Subclass::kind())) return Just(false);
+
+ if (IsFastDoubleElementsKind(Subclass::kind())) {
+ // Search for NaN in FAST_DOUBLE_ELEMENTS or
+ // FAST_HOLEY_DOUBLE_ELEMENTS --- Skip The Hole and trust
+ // std::isnan(elementK) for result
+ auto elements = FixedDoubleArray::cast(receiver->elements());
+
+ for (uint32_t k = start_from; k < length; ++k) {
+ if (IsFastHoleyElementsKind(Subclass::kind()) &&
+ elements->is_the_hole(k)) {
+ continue;
+ }
+ if (std::isnan(elements->get_scalar(k))) return Just(true);
+ }
+ return Just(false);
+ } else {
+ // Search for NaN in FAST_ELEMENTS, FAST_HOLEY_ELEMENTS,
+ // FAST_SMI_ELEMENTS or FAST_HOLEY_SMI_ELEMENTS. Return true if
+ // elementK->IsHeapNumber() && std::isnan(elementK->Number())
+ DCHECK(IsFastSmiOrObjectElementsKind(Subclass::kind()));
+ auto elements = FixedArray::cast(receiver->elements());
+
+ for (uint32_t k = start_from; k < length; ++k) {
+ if (elements->get(k)->IsNaN()) return Just(true);
+ }
+ return Just(false);
+ }
+ }
+ }
+ }
+
private:
// SpliceShrinkStep might modify the backing_store.
static void SpliceShrinkStep(Isolate* isolate, Handle<JSArray> receiver,
@@ -1765,9 +2186,9 @@ class FastElementsAccessor
uint32_t new_length) {
const int move_left_count = len - delete_count - start;
const int move_left_dst_index = start + add_count;
- FastElementsAccessorSubclass::MoveElements(
- isolate, receiver, backing_store, move_left_dst_index,
- start + delete_count, move_left_count, new_length, len);
+ Subclass::MoveElements(isolate, receiver, backing_store,
+ move_left_dst_index, start + delete_count,
+ move_left_count, new_length, len);
}
// SpliceGrowStep might modify the backing_store.
@@ -1780,23 +2201,22 @@ class FastElementsAccessor
DCHECK((add_count - delete_count) <= (Smi::kMaxValue - length));
// Check if backing_store is big enough.
if (new_length <= static_cast<uint32_t>(backing_store->length())) {
- FastElementsAccessorSubclass::MoveElements(
- isolate, receiver, backing_store, start + add_count,
- start + delete_count, (length - delete_count - start), 0, 0);
+ Subclass::MoveElements(isolate, receiver, backing_store,
+ start + add_count, start + delete_count,
+ (length - delete_count - start), 0, 0);
// MoveElements updates the backing_store in-place.
return backing_store;
}
// New backing storage is needed.
int capacity = JSObject::NewElementsCapacity(new_length);
// Partially copy all elements up to start.
- Handle<FixedArrayBase> new_elms =
- FastElementsAccessorSubclass::ConvertElementsWithCapacity(
- receiver, backing_store, KindTraits::Kind, capacity, start);
+ Handle<FixedArrayBase> new_elms = Subclass::ConvertElementsWithCapacity(
+ receiver, backing_store, KindTraits::Kind, capacity, start);
// Copy the trailing elements after start + delete_count
- FastElementsAccessorSubclass::CopyElementsImpl(
- *backing_store, start + delete_count, *new_elms, KindTraits::Kind,
- start + add_count, kPackedSizeNotKnown,
- ElementsAccessor::kCopyToEndAndInitializeToHole);
+ Subclass::CopyElementsImpl(*backing_store, start + delete_count, *new_elms,
+ KindTraits::Kind, start + add_count,
+ kPackedSizeNotKnown,
+ ElementsAccessor::kCopyToEndAndInitializeToHole);
receiver->set_elements(*new_elms);
return new_elms;
}
@@ -1815,16 +2235,14 @@ class FastElementsAccessor
DCHECK(length > 0);
int new_length = length - 1;
int remove_index = remove_position == AT_START ? 0 : new_length;
- Handle<Object> result =
- FastElementsAccessorSubclass::GetImpl(*backing_store, remove_index);
+ Handle<Object> result = Subclass::GetImpl(*backing_store, remove_index);
if (remove_position == AT_START) {
- FastElementsAccessorSubclass::MoveElements(
- isolate, receiver, backing_store, 0, 1, new_length, 0, 0);
+ Subclass::MoveElements(isolate, receiver, backing_store, 0, 1, new_length,
+ 0, 0);
}
- FastElementsAccessorSubclass::SetLengthImpl(isolate, receiver, new_length,
- backing_store);
+ Subclass::SetLengthImpl(isolate, receiver, new_length, backing_store);
- if (IsHoleyElementsKind(kind) && result->IsTheHole()) {
+ if (IsHoleyElementsKind(kind) && result->IsTheHole(isolate)) {
return isolate->factory()->undefined_value();
}
return result;
@@ -1833,7 +2251,7 @@ class FastElementsAccessor
static uint32_t AddArguments(Handle<JSArray> receiver,
Handle<FixedArrayBase> backing_store,
Arguments* args, uint32_t add_size,
- Where remove_position) {
+ Where add_position) {
uint32_t length = Smi::cast(receiver->length())->value();
DCHECK(0 < add_size);
uint32_t elms_len = backing_store->length();
@@ -1845,24 +2263,23 @@ class FastElementsAccessor
// New backing storage is needed.
uint32_t capacity = JSObject::NewElementsCapacity(new_length);
// If we add arguments to the start we have to shift the existing objects.
- int copy_dst_index = remove_position == AT_START ? add_size : 0;
+ int copy_dst_index = add_position == AT_START ? add_size : 0;
// Copy over all objects to a new backing_store.
- backing_store = FastElementsAccessorSubclass::ConvertElementsWithCapacity(
+ backing_store = Subclass::ConvertElementsWithCapacity(
receiver, backing_store, KindTraits::Kind, capacity, 0,
copy_dst_index, ElementsAccessor::kCopyToEndAndInitializeToHole);
receiver->set_elements(*backing_store);
- } else if (remove_position == AT_START) {
+ } else if (add_position == AT_START) {
// If the backing store has enough capacity and we add elements to the
// start we have to shift the existing objects.
Isolate* isolate = receiver->GetIsolate();
- FastElementsAccessorSubclass::MoveElements(
- isolate, receiver, backing_store, add_size, 0, length, 0, 0);
+ Subclass::MoveElements(isolate, receiver, backing_store, add_size, 0,
+ length, 0, 0);
}
- int insertion_index = remove_position == AT_START ? 0 : length;
+ int insertion_index = add_position == AT_START ? 0 : length;
// Copy the arguments to the start.
- FastElementsAccessorSubclass::CopyArguments(args, backing_store, add_size,
- 1, insertion_index);
+ Subclass::CopyArguments(args, backing_store, add_size, 1, insertion_index);
// Set the length.
receiver->set_length(Smi::FromInt(new_length));
return new_length;
@@ -1876,22 +2293,19 @@ class FastElementsAccessor
FixedArrayBase* raw_backing_store = *dst_store;
WriteBarrierMode mode = raw_backing_store->GetWriteBarrierMode(no_gc);
for (uint32_t i = 0; i < copy_size; i++) {
- Object* argument = (*args)[i + src_index];
- FastElementsAccessorSubclass::SetImpl(raw_backing_store, i + dst_index,
- argument, mode);
+ Object* argument = (*args)[src_index + i];
+ DCHECK(!argument->IsTheHole(raw_backing_store->GetIsolate()));
+ Subclass::SetImpl(raw_backing_store, dst_index + i, argument, mode);
}
}
};
-
-template<typename FastElementsAccessorSubclass,
- typename KindTraits>
+template <typename Subclass, typename KindTraits>
class FastSmiOrObjectElementsAccessor
- : public FastElementsAccessor<FastElementsAccessorSubclass, KindTraits> {
+ : public FastElementsAccessor<Subclass, KindTraits> {
public:
explicit FastSmiOrObjectElementsAccessor(const char* name)
- : FastElementsAccessor<FastElementsAccessorSubclass,
- KindTraits>(name) {}
+ : FastElementsAccessor<Subclass, KindTraits>(name) {}
static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
Object* value) {
@@ -1909,8 +2323,7 @@ class FastSmiOrObjectElementsAccessor
}
static Object* GetRaw(FixedArray* backing_store, uint32_t entry) {
- uint32_t index = FastElementsAccessorSubclass::GetIndexForEntryImpl(
- backing_store, entry);
+ uint32_t index = Subclass::GetIndexForEntryImpl(backing_store, entry);
return backing_store->get(index);
}
@@ -1931,7 +2344,6 @@ class FastSmiOrObjectElementsAccessor
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
- case FAST_STRING_WRAPPER_ELEMENTS:
CopyObjectToObjectElements(from, from_kind, from_start, to, to_kind,
to_start, copy_size);
break;
@@ -1943,12 +2355,13 @@ class FastSmiOrObjectElementsAccessor
break;
}
case DICTIONARY_ELEMENTS:
- case SLOW_STRING_WRAPPER_ELEMENTS:
CopyDictionaryToObjectElements(from, from_start, to, to_kind, to_start,
copy_size);
break;
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
@@ -1960,6 +2373,33 @@ class FastSmiOrObjectElementsAccessor
break; // Nothing to do.
}
}
+
+ static Maybe<int64_t> IndexOfValueImpl(Isolate* isolate,
+ Handle<JSObject> receiver,
+ Handle<Object> search_value,
+ uint32_t start_from, uint32_t length) {
+ DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver));
+ DisallowHeapAllocation no_gc;
+ FixedArrayBase* elements_base = receiver->elements();
+ Object* value = *search_value;
+
+ if (start_from >= length) return Just<int64_t>(-1);
+
+ length = std::min(static_cast<uint32_t>(elements_base->length()), length);
+
+ // Only FAST_{,HOLEY_}ELEMENTS can store non-numbers.
+ if (!value->IsNumber() && !IsFastObjectElementsKind(Subclass::kind())) {
+ return Just<int64_t>(-1);
+ }
+ // NaN can never be found by strict equality.
+ if (value->IsNaN()) return Just<int64_t>(-1);
+
+ FixedArray* elements = FixedArray::cast(receiver->elements());
+ for (uint32_t k = start_from; k < length; ++k) {
+ if (value->StrictEquals(elements->get(k))) return Just<int64_t>(k);
+ }
+ return Just<int64_t>(-1);
+ }
};
@@ -2010,15 +2450,12 @@ class FastHoleyObjectElementsAccessor
ElementsKindTraits<FAST_HOLEY_ELEMENTS> >(name) {}
};
-
-template<typename FastElementsAccessorSubclass,
- typename KindTraits>
+template <typename Subclass, typename KindTraits>
class FastDoubleElementsAccessor
- : public FastElementsAccessor<FastElementsAccessorSubclass, KindTraits> {
+ : public FastElementsAccessor<Subclass, KindTraits> {
public:
explicit FastDoubleElementsAccessor(const char* name)
- : FastElementsAccessor<FastElementsAccessorSubclass,
- KindTraits>(name) {}
+ : FastElementsAccessor<Subclass, KindTraits>(name) {}
static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) {
return GetImpl(holder->elements(), entry);
@@ -2084,6 +2521,39 @@ class FastDoubleElementsAccessor
break;
}
}
+
+ static Maybe<int64_t> IndexOfValueImpl(Isolate* isolate,
+ Handle<JSObject> receiver,
+ Handle<Object> search_value,
+ uint32_t start_from, uint32_t length) {
+ DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver));
+ DisallowHeapAllocation no_gc;
+ FixedArrayBase* elements_base = receiver->elements();
+ Object* value = *search_value;
+
+ if (start_from >= length) return Just<int64_t>(-1);
+
+ length = std::min(static_cast<uint32_t>(elements_base->length()), length);
+
+ if (!value->IsNumber()) {
+ return Just<int64_t>(-1);
+ }
+ if (value->IsNaN()) {
+ return Just<int64_t>(-1);
+ }
+ double numeric_search_value = value->Number();
+ FixedDoubleArray* elements = FixedDoubleArray::cast(receiver->elements());
+
+ for (uint32_t k = start_from; k < length; ++k) {
+ if (elements->is_the_hole(k)) {
+ continue;
+ }
+ if (elements->get_scalar(k) == numeric_search_value) {
+ return Just<int64_t>(k);
+ }
+ }
+ return Just<int64_t>(-1);
+ }
};
@@ -2112,17 +2582,17 @@ class FastHoleyDoubleElementsAccessor
// Super class for all external element arrays.
-template<ElementsKind Kind>
+template <ElementsKind Kind, typename ctype>
class TypedElementsAccessor
- : public ElementsAccessorBase<TypedElementsAccessor<Kind>,
- ElementsKindTraits<Kind> > {
+ : public ElementsAccessorBase<TypedElementsAccessor<Kind, ctype>,
+ ElementsKindTraits<Kind>> {
public:
explicit TypedElementsAccessor(const char* name)
: ElementsAccessorBase<AccessorClass,
ElementsKindTraits<Kind> >(name) {}
typedef typename ElementsKindTraits<Kind>::BackingStore BackingStore;
- typedef TypedElementsAccessor<Kind> AccessorClass;
+ typedef TypedElementsAccessor<Kind, ctype> AccessorClass;
static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
Object* value) {
@@ -2228,27 +2698,116 @@ class TypedElementsAccessor
*nof_items = count;
return Just(true);
}
-};
+ static Maybe<bool> IncludesValueImpl(Isolate* isolate,
+ Handle<JSObject> receiver,
+ Handle<Object> value,
+ uint32_t start_from, uint32_t length) {
+ DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver));
+ DisallowHeapAllocation no_gc;
+
+ BackingStore* elements = BackingStore::cast(receiver->elements());
+ if (value->IsUndefined(isolate) &&
+ length > static_cast<uint32_t>(elements->length())) {
+ return Just(true);
+ }
+ if (!value->IsNumber()) return Just(false);
+
+ double search_value = value->Number();
+
+ if (!std::isfinite(search_value)) {
+ // Integral types cannot represent +Inf or NaN
+ if (AccessorClass::kind() < FLOAT32_ELEMENTS ||
+ AccessorClass::kind() > FLOAT64_ELEMENTS) {
+ return Just(false);
+ }
+ } else if (search_value < std::numeric_limits<ctype>::lowest() ||
+ search_value > std::numeric_limits<ctype>::max()) {
+ // Return false if value can't be represented in this space
+ return Just(false);
+ }
+
+ // Prototype has no elements, and not searching for the hole --- limit
+ // search to backing store length.
+ if (static_cast<uint32_t>(elements->length()) < length) {
+ length = elements->length();
+ }
+
+ if (!std::isnan(search_value)) {
+ for (uint32_t k = start_from; k < length; ++k) {
+ double element_k = elements->get_scalar(k);
+ if (element_k == search_value) return Just(true);
+ }
+ return Just(false);
+ } else {
+ for (uint32_t k = start_from; k < length; ++k) {
+ double element_k = elements->get_scalar(k);
+ if (std::isnan(element_k)) return Just(true);
+ }
+ return Just(false);
+ }
+ }
+
+ static Maybe<int64_t> IndexOfValueImpl(Isolate* isolate,
+ Handle<JSObject> receiver,
+ Handle<Object> value,
+ uint32_t start_from, uint32_t length) {
+ DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver));
+ DisallowHeapAllocation no_gc;
+
+ BackingStore* elements = BackingStore::cast(receiver->elements());
+ if (!value->IsNumber()) return Just<int64_t>(-1);
+ double search_value = value->Number();
+
+ if (!std::isfinite(search_value)) {
+ // Integral types cannot represent +Inf or NaN.
+ if (AccessorClass::kind() < FLOAT32_ELEMENTS ||
+ AccessorClass::kind() > FLOAT64_ELEMENTS) {
+ return Just<int64_t>(-1);
+ }
+ } else if (search_value < std::numeric_limits<ctype>::lowest() ||
+ search_value > std::numeric_limits<ctype>::max()) {
+ // Return false if value can't be represented in this ElementsKind.
+ return Just<int64_t>(-1);
+ }
+
+ // Prototype has no elements, and not searching for the hole --- limit
+ // search to backing store length.
+ if (static_cast<uint32_t>(elements->length()) < length) {
+ length = elements->length();
+ }
-#define FIXED_ELEMENTS_ACCESSOR(Type, type, TYPE, ctype, size) \
- typedef TypedElementsAccessor<TYPE##_ELEMENTS > \
+ if (std::isnan(search_value)) {
+ return Just<int64_t>(-1);
+ }
+
+ ctype typed_search_value = static_cast<ctype>(search_value);
+ if (static_cast<double>(typed_search_value) != search_value) {
+ return Just<int64_t>(-1); // Loss of precision.
+ }
+
+ for (uint32_t k = start_from; k < length; ++k) {
+ ctype element_k = elements->get_scalar(k);
+ if (element_k == typed_search_value) return Just<int64_t>(k);
+ }
+ return Just<int64_t>(-1);
+ }
+};
+
+#define FIXED_ELEMENTS_ACCESSOR(Type, type, TYPE, ctype, size) \
+ typedef TypedElementsAccessor<TYPE##_ELEMENTS, ctype> \
Fixed##Type##ElementsAccessor;
TYPED_ARRAYS(FIXED_ELEMENTS_ACCESSOR)
#undef FIXED_ELEMENTS_ACCESSOR
-
-template <typename SloppyArgumentsElementsAccessorSubclass,
- typename ArgumentsAccessor, typename KindTraits>
+template <typename Subclass, typename ArgumentsAccessor, typename KindTraits>
class SloppyArgumentsElementsAccessor
- : public ElementsAccessorBase<SloppyArgumentsElementsAccessorSubclass,
- KindTraits> {
+ : public ElementsAccessorBase<Subclass, KindTraits> {
public:
explicit SloppyArgumentsElementsAccessor(const char* name)
- : ElementsAccessorBase<SloppyArgumentsElementsAccessorSubclass,
- KindTraits>(name) {
+ : ElementsAccessorBase<Subclass, KindTraits>(name) {
USE(KindTraits::Kind);
}
@@ -2265,7 +2824,7 @@ class SloppyArgumentsElementsAccessor
Object* probe = parameter_map->get(entry + 2);
Context* context = Context::cast(parameter_map->get(0));
int context_entry = Smi::cast(probe)->value();
- DCHECK(!context->get(context_entry)->IsTheHole());
+ DCHECK(!context->get(context_entry)->IsTheHole(isolate));
return handle(context->get(context_entry), isolate);
} else {
// Object is not mapped, defer to the arguments.
@@ -2277,13 +2836,18 @@ class SloppyArgumentsElementsAccessor
AliasedArgumentsEntry* alias = AliasedArgumentsEntry::cast(*result);
Context* context = Context::cast(parameter_map->get(0));
int context_entry = alias->aliased_context_slot();
- DCHECK(!context->get(context_entry)->IsTheHole());
+ DCHECK(!context->get(context_entry)->IsTheHole(isolate));
return handle(context->get(context_entry), isolate);
}
return result;
}
}
+ static void TransitionElementsKindImpl(Handle<JSObject> object,
+ Handle<Map> map) {
+ UNREACHABLE();
+ }
+
static void GrowCapacityAndConvertImpl(Handle<JSObject> object,
uint32_t capacity) {
UNREACHABLE();
@@ -2302,7 +2866,7 @@ class SloppyArgumentsElementsAccessor
Object* probe = parameter_map->get(entry + 2);
Context* context = Context::cast(parameter_map->get(0));
int context_entry = Smi::cast(probe)->value();
- DCHECK(!context->get(context_entry)->IsTheHole());
+ DCHECK(!context->get(context_entry)->IsTheHole(store->GetIsolate()));
context->set(context_entry, value);
} else {
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
@@ -2311,7 +2875,7 @@ class SloppyArgumentsElementsAccessor
AliasedArgumentsEntry* alias = AliasedArgumentsEntry::cast(current);
Context* context = Context::cast(parameter_map->get(0));
int context_entry = alias->aliased_context_slot();
- DCHECK(!context->get(context_entry)->IsTheHole());
+ DCHECK(!context->get(context_entry)->IsTheHole(store->GetIsolate()));
context->set(context_entry, value);
} else {
ArgumentsAccessor::SetImpl(arguments, entry - length, value);
@@ -2334,6 +2898,14 @@ class SloppyArgumentsElementsAccessor
ArgumentsAccessor::GetCapacityImpl(holder, arguments);
}
+ static uint32_t GetMaxNumberOfEntries(JSObject* holder,
+ FixedArrayBase* backing_store) {
+ FixedArray* parameter_map = FixedArray::cast(backing_store);
+ FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
+ return parameter_map->length() - 2 +
+ ArgumentsAccessor::GetMaxNumberOfEntries(holder, arguments);
+ }
+
static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
KeyAccumulator* accumulator,
AddKeyConversion convert) {
@@ -2350,7 +2922,8 @@ class SloppyArgumentsElementsAccessor
FixedArray* parameter_map = FixedArray::cast(parameters);
uint32_t length = parameter_map->length() - 2;
if (entry < length) {
- return !GetParameterMapArg(parameter_map, entry)->IsTheHole();
+ return !GetParameterMapArg(parameter_map, entry)
+ ->IsTheHole(parameter_map->GetIsolate());
}
FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
@@ -2379,12 +2952,12 @@ class SloppyArgumentsElementsAccessor
uint32_t index, PropertyFilter filter) {
FixedArray* parameter_map = FixedArray::cast(parameters);
Object* probe = GetParameterMapArg(parameter_map, index);
- if (!probe->IsTheHole()) return index;
+ if (!probe->IsTheHole(holder->GetIsolate())) return index;
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
uint32_t entry = ArgumentsAccessor::GetEntryForIndexImpl(holder, arguments,
index, filter);
- if (entry == kMaxUInt32) return entry;
+ if (entry == kMaxUInt32) return kMaxUInt32;
return (parameter_map->length() - 2) + entry;
}
@@ -2414,32 +2987,23 @@ class SloppyArgumentsElementsAccessor
// would enable GC of the context.
parameter_map->set_the_hole(entry + 2);
} else {
- SloppyArgumentsElementsAccessorSubclass::DeleteFromArguments(
- obj, entry - length);
+ Subclass::DeleteFromArguments(obj, entry - length);
}
}
static void CollectElementIndicesImpl(Handle<JSObject> object,
Handle<FixedArrayBase> backing_store,
- KeyAccumulator* keys, uint32_t range,
- PropertyFilter filter,
- uint32_t offset) {
- FixedArray* parameter_map = FixedArray::cast(*backing_store);
- uint32_t length = parameter_map->length() - 2;
- if (range < length) length = range;
-
- for (uint32_t i = offset; i < length; ++i) {
- if (!parameter_map->get(i + 2)->IsTheHole()) {
- keys->AddKey(i);
- }
- }
-
- Handle<FixedArrayBase> store(FixedArrayBase::cast(parameter_map->get(1)));
- ArgumentsAccessor::CollectElementIndicesImpl(object, store, keys, range,
- filter, offset);
- if (SloppyArgumentsElementsAccessorSubclass::kind() ==
- FAST_SLOPPY_ARGUMENTS_ELEMENTS) {
- keys->SortCurrentElementsList();
+ KeyAccumulator* keys) {
+ Isolate* isolate = keys->isolate();
+ uint32_t nof_indices = 0;
+ Handle<FixedArray> indices = isolate->factory()->NewFixedArray(
+ GetCapacityImpl(*object, *backing_store));
+ DirectCollectElementIndicesImpl(isolate, object, backing_store,
+ GetKeysConversion::kKeepNumbers,
+ ENUMERABLE_STRINGS, indices, &nof_indices);
+ SortIndices(indices, nof_indices);
+ for (uint32_t i = 0; i < nof_indices; i++) {
+ keys->AddKey(indices->get(i));
}
}
@@ -2452,8 +3016,8 @@ class SloppyArgumentsElementsAccessor
uint32_t length = parameter_map->length() - 2;
for (uint32_t i = 0; i < length; ++i) {
- if (parameter_map->get(i + 2)->IsTheHole()) continue;
- if (convert == CONVERT_TO_STRING) {
+ if (parameter_map->get(i + 2)->IsTheHole(isolate)) continue;
+ if (convert == GetKeysConversion::kConvertToString) {
Handle<String> index_string = isolate->factory()->Uint32ToString(i);
list->set(insertion_index, *index_string);
} else {
@@ -2467,6 +3031,86 @@ class SloppyArgumentsElementsAccessor
isolate, object, store, convert, filter, list, nof_indices,
insertion_index);
}
+
+ static Maybe<bool> IncludesValueImpl(Isolate* isolate,
+ Handle<JSObject> object,
+ Handle<Object> value,
+ uint32_t start_from, uint32_t length) {
+ DCHECK(JSObject::PrototypeHasNoElements(isolate, *object));
+ Handle<Map> original_map = handle(object->map(), isolate);
+ FixedArray* parameter_map = FixedArray::cast(object->elements());
+ bool search_for_hole = value->IsUndefined(isolate);
+
+ for (uint32_t k = start_from; k < length; ++k) {
+ uint32_t entry =
+ GetEntryForIndexImpl(*object, parameter_map, k, ALL_PROPERTIES);
+ if (entry == kMaxUInt32) {
+ if (search_for_hole) return Just(true);
+ continue;
+ }
+
+ Handle<Object> element_k = GetImpl(parameter_map, entry);
+
+ if (element_k->IsAccessorPair()) {
+ LookupIterator it(isolate, object, k, LookupIterator::OWN);
+ DCHECK(it.IsFound());
+ DCHECK_EQ(it.state(), LookupIterator::ACCESSOR);
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, element_k,
+ Object::GetPropertyWithAccessor(&it),
+ Nothing<bool>());
+
+ if (value->SameValueZero(*element_k)) return Just(true);
+
+ if (object->map() != *original_map) {
+ // Some mutation occurred in accessor. Abort "fast" path
+ return IncludesValueSlowPath(isolate, object, value, k + 1, length);
+ }
+ } else if (value->SameValueZero(*element_k)) {
+ return Just(true);
+ }
+ }
+ return Just(false);
+ }
+
+ static Maybe<int64_t> IndexOfValueImpl(Isolate* isolate,
+ Handle<JSObject> object,
+ Handle<Object> value,
+ uint32_t start_from, uint32_t length) {
+ DCHECK(JSObject::PrototypeHasNoElements(isolate, *object));
+ Handle<Map> original_map = handle(object->map(), isolate);
+ FixedArray* parameter_map = FixedArray::cast(object->elements());
+
+ for (uint32_t k = start_from; k < length; ++k) {
+ uint32_t entry =
+ GetEntryForIndexImpl(*object, parameter_map, k, ALL_PROPERTIES);
+ if (entry == kMaxUInt32) {
+ continue;
+ }
+
+ Handle<Object> element_k = GetImpl(parameter_map, entry);
+
+ if (element_k->IsAccessorPair()) {
+ LookupIterator it(isolate, object, k, LookupIterator::OWN);
+ DCHECK(it.IsFound());
+ DCHECK_EQ(it.state(), LookupIterator::ACCESSOR);
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, element_k,
+ Object::GetPropertyWithAccessor(&it),
+ Nothing<int64_t>());
+
+ if (value->StrictEquals(*element_k)) {
+ return Just<int64_t>(k);
+ }
+
+ if (object->map() != *original_map) {
+ // Some mutation occurred in accessor. Abort "fast" path.
+ return IndexOfValueSlowPath(isolate, object, value, k + 1, length);
+ }
+ } else if (value->StrictEquals(*element_k)) {
+ return Just<int64_t>(k);
+ }
+ }
+ return Just<int64_t>(-1);
+ }
};
@@ -2488,7 +3132,7 @@ class SlowSloppyArgumentsElementsAccessor
uint32_t index = GetIndexForEntryImpl(*dict, entry);
Handle<Object> result = SeededNumberDictionary::DeleteProperty(dict, entry);
USE(result);
- DCHECK(result->IsTrue());
+ DCHECK(result->IsTrue(dict->GetIsolate()));
Handle<FixedArray> new_elements =
SeededNumberDictionary::Shrink(dict, index);
parameter_map->set(1, *new_elements);
@@ -2521,25 +3165,25 @@ class SlowSloppyArgumentsElementsAccessor
PropertyAttributes attributes) {
Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(store);
uint32_t length = parameter_map->length() - 2;
+ Isolate* isolate = store->GetIsolate();
if (entry < length) {
Object* probe = parameter_map->get(entry + 2);
- DCHECK(!probe->IsTheHole());
+ DCHECK(!probe->IsTheHole(isolate));
Context* context = Context::cast(parameter_map->get(0));
int context_entry = Smi::cast(probe)->value();
- DCHECK(!context->get(context_entry)->IsTheHole());
+ DCHECK(!context->get(context_entry)->IsTheHole(isolate));
context->set(context_entry, *value);
// Redefining attributes of an aliased element destroys fast aliasing.
parameter_map->set_the_hole(entry + 2);
// For elements that are still writable we re-establish slow aliasing.
if ((attributes & READ_ONLY) == 0) {
- Isolate* isolate = store->GetIsolate();
value = isolate->factory()->NewAliasedArgumentsEntry(context_entry);
}
PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
Handle<SeededNumberDictionary> arguments(
- SeededNumberDictionary::cast(parameter_map->get(1)));
+ SeededNumberDictionary::cast(parameter_map->get(1)), isolate);
arguments = SeededNumberDictionary::AddNumberEntry(
arguments, entry, value, details, object->map()->is_prototype_map());
// If the attributes were NONE, we would have called set rather than
@@ -2549,7 +3193,7 @@ class SlowSloppyArgumentsElementsAccessor
parameter_map->set(1, *arguments);
} else {
Handle<FixedArrayBase> arguments(
- FixedArrayBase::cast(parameter_map->get(1)));
+ FixedArrayBase::cast(parameter_map->get(1)), isolate);
DictionaryElementsAccessor::ReconfigureImpl(
object, arguments, entry - length, value, attributes);
}
@@ -2568,16 +3212,45 @@ class FastSloppyArgumentsElementsAccessor
FastHoleyObjectElementsAccessor,
ElementsKindTraits<FAST_SLOPPY_ARGUMENTS_ELEMENTS> >(name) {}
+ static Handle<FixedArray> GetArguments(Isolate* isolate,
+ FixedArrayBase* backing_store) {
+ FixedArray* parameter_map = FixedArray::cast(backing_store);
+ return Handle<FixedArray>(FixedArray::cast(parameter_map->get(1)), isolate);
+ }
+
+ static Handle<JSArray> SliceImpl(Handle<JSObject> receiver, uint32_t start,
+ uint32_t end) {
+ Isolate* isolate = receiver->GetIsolate();
+ uint32_t result_len = end < start ? 0u : end - start;
+ Handle<JSArray> result_array = isolate->factory()->NewJSArray(
+ FAST_HOLEY_ELEMENTS, result_len, result_len);
+ DisallowHeapAllocation no_gc;
+ FixedArray* elements = FixedArray::cast(result_array->elements());
+ FixedArray* parameters = FixedArray::cast(receiver->elements());
+ uint32_t insertion_index = 0;
+ for (uint32_t i = start; i < end; i++) {
+ uint32_t entry =
+ GetEntryForIndexImpl(*receiver, parameters, i, ALL_PROPERTIES);
+ if (entry != kMaxUInt32 && HasEntryImpl(parameters, entry)) {
+ elements->set(insertion_index, *GetImpl(parameters, entry));
+ } else {
+ elements->set_the_hole(insertion_index);
+ }
+ insertion_index++;
+ }
+ return result_array;
+ }
+
static Handle<SeededNumberDictionary> NormalizeImpl(
Handle<JSObject> object, Handle<FixedArrayBase> elements) {
- FixedArray* parameter_map = FixedArray::cast(*elements);
- Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)));
+ Handle<FixedArray> arguments =
+ GetArguments(elements->GetIsolate(), *elements);
return FastHoleyObjectElementsAccessor::NormalizeImpl(object, arguments);
}
static void DeleteFromArguments(Handle<JSObject> obj, uint32_t entry) {
- FixedArray* parameter_map = FixedArray::cast(obj->elements());
- Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)));
+ Handle<FixedArray> arguments =
+ GetArguments(obj->GetIsolate(), obj->elements());
FastHoleyObjectElementsAccessor::DeleteCommon(obj, entry, arguments);
}
@@ -2650,15 +3323,12 @@ class FastSloppyArgumentsElementsAccessor
}
};
-template <typename StringWrapperElementsAccessorSubclass,
- typename BackingStoreAccessor, typename KindTraits>
+template <typename Subclass, typename BackingStoreAccessor, typename KindTraits>
class StringWrapperElementsAccessor
- : public ElementsAccessorBase<StringWrapperElementsAccessorSubclass,
- KindTraits> {
+ : public ElementsAccessorBase<Subclass, KindTraits> {
public:
explicit StringWrapperElementsAccessor(const char* name)
- : ElementsAccessorBase<StringWrapperElementsAccessorSubclass, KindTraits>(
- name) {
+ : ElementsAccessorBase<Subclass, KindTraits>(name) {
USE(KindTraits::Kind);
}
@@ -2722,8 +3392,7 @@ class StringWrapperElementsAccessor
(object->GetElementsKind() == SLOW_STRING_WRAPPER_ELEMENTS ||
BackingStoreAccessor::GetCapacityImpl(*object, object->elements()) !=
new_capacity)) {
- StringWrapperElementsAccessorSubclass::GrowCapacityAndConvertImpl(
- object, new_capacity);
+ GrowCapacityAndConvertImpl(object, new_capacity);
}
BackingStoreAccessor::AddImpl(object, index, value, attributes,
new_capacity);
@@ -2760,23 +3429,42 @@ class StringWrapperElementsAccessor
static void CollectElementIndicesImpl(Handle<JSObject> object,
Handle<FixedArrayBase> backing_store,
- KeyAccumulator* keys, uint32_t range,
- PropertyFilter filter,
- uint32_t offset) {
+ KeyAccumulator* keys) {
uint32_t length = GetString(*object)->length();
+ Factory* factory = keys->isolate()->factory();
for (uint32_t i = 0; i < length; i++) {
- keys->AddKey(i);
+ keys->AddKey(factory->NewNumberFromUint(i));
}
- BackingStoreAccessor::CollectElementIndicesImpl(object, backing_store, keys,
- range, filter, offset);
+ BackingStoreAccessor::CollectElementIndicesImpl(object, backing_store,
+ keys);
+ }
+
+ static void GrowCapacityAndConvertImpl(Handle<JSObject> object,
+ uint32_t capacity) {
+ Handle<FixedArrayBase> old_elements(object->elements());
+ ElementsKind from_kind = object->GetElementsKind();
+ // This method should only be called if there's a reason to update the
+ // elements.
+ DCHECK(from_kind == SLOW_STRING_WRAPPER_ELEMENTS ||
+ static_cast<uint32_t>(old_elements->length()) < capacity);
+ Subclass::BasicGrowCapacityAndConvertImpl(object, old_elements, from_kind,
+ FAST_STRING_WRAPPER_ELEMENTS,
+ capacity);
}
static void CopyElementsImpl(FixedArrayBase* from, uint32_t from_start,
FixedArrayBase* to, ElementsKind from_kind,
uint32_t to_start, int packed_size,
int copy_size) {
- BackingStoreAccessor::CopyElementsImpl(from, from_start, to, from_kind,
- to_start, packed_size, copy_size);
+ DCHECK(!to->IsDictionary());
+ if (from_kind == SLOW_STRING_WRAPPER_ELEMENTS) {
+ CopyDictionaryToObjectElements(from, from_start, to, FAST_HOLEY_ELEMENTS,
+ to_start, copy_size);
+ } else {
+ DCHECK_EQ(FAST_STRING_WRAPPER_ELEMENTS, from_kind);
+ CopyObjectToObjectElements(from, FAST_HOLEY_ELEMENTS, from_start, to,
+ FAST_HOLEY_ELEMENTS, to_start, copy_size);
+ }
}
private:
@@ -2914,7 +3602,7 @@ MaybeHandle<Object> ArrayConstructInitializeElements(Handle<JSArray> array,
}
// Fill in the content
- switch (array->GetElementsKind()) {
+ switch (elements_kind) {
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_SMI_ELEMENTS: {
Handle<FixedArray> smi_elms = Handle<FixedArray>::cast(elms);
@@ -2975,31 +3663,17 @@ void ElementsAccessor::TearDown() {
elements_accessors_ = NULL;
}
-
Handle<JSArray> ElementsAccessor::Concat(Isolate* isolate, Arguments* args,
- uint32_t concat_size) {
- const int kHalfOfMaxInt = 1 << (kBitsPerInt - 2);
- STATIC_ASSERT(FixedDoubleArray::kMaxLength < kHalfOfMaxInt);
- USE(kHalfOfMaxInt);
- uint32_t result_len = 0;
- bool has_raw_doubles = false;
+ uint32_t concat_size,
+ uint32_t result_len) {
ElementsKind result_elements_kind = GetInitialFastElementsKind();
+ bool has_raw_doubles = false;
{
DisallowHeapAllocation no_gc;
bool is_holey = false;
- // Iterate through all the arguments performing checks
- // and calculating total length.
for (uint32_t i = 0; i < concat_size; i++) {
- JSArray* array = JSArray::cast((*args)[i]);
- uint32_t len = 0;
- array->length()->ToArrayLength(&len);
-
- // We shouldn't overflow when adding another len.
- result_len += len;
- DCHECK(0 <= result_len);
- DCHECK(result_len <= FixedDoubleArray::kMaxLength);
-
- ElementsKind arg_kind = array->GetElementsKind();
+ Object* arg = (*args)[i];
+ ElementsKind arg_kind = JSArray::cast(arg)->GetElementsKind();
has_raw_doubles = has_raw_doubles || IsFastDoubleElementsKind(arg_kind);
is_holey = is_holey || IsFastHoleyElementsKind(arg_kind);
result_elements_kind =
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index 2b18ab07d1..1ffd4d996f 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -81,18 +81,12 @@ class ElementsAccessor {
// whose PropertyAttribute match |filter|.
virtual void CollectElementIndices(Handle<JSObject> object,
Handle<FixedArrayBase> backing_store,
- KeyAccumulator* keys,
- uint32_t range = kMaxUInt32,
- PropertyFilter filter = ALL_PROPERTIES,
- uint32_t offset = 0) = 0;
+ KeyAccumulator* keys) = 0;
inline void CollectElementIndices(Handle<JSObject> object,
- KeyAccumulator* keys,
- uint32_t range = kMaxUInt32,
- PropertyFilter filter = ALL_PROPERTIES,
- uint32_t offset = 0) {
- CollectElementIndices(object, handle(object->elements()), keys, range,
- filter, offset);
+ KeyAccumulator* keys) {
+ CollectElementIndices(object, handle(object->elements(), keys->isolate()),
+ keys);
}
virtual Maybe<bool> CollectValuesOrEntries(
@@ -100,13 +94,12 @@ class ElementsAccessor {
Handle<FixedArray> values_or_entries, bool get_entries, int* nof_items,
PropertyFilter filter = ALL_PROPERTIES) = 0;
- //
- virtual Handle<FixedArray> PrependElementIndices(
+ virtual MaybeHandle<FixedArray> PrependElementIndices(
Handle<JSObject> object, Handle<FixedArrayBase> backing_store,
Handle<FixedArray> keys, GetKeysConversion convert,
PropertyFilter filter = ALL_PROPERTIES) = 0;
- inline Handle<FixedArray> PrependElementIndices(
+ inline MaybeHandle<FixedArray> PrependElementIndices(
Handle<JSObject> object, Handle<FixedArray> keys,
GetKeysConversion convert, PropertyFilter filter = ALL_PROPERTIES) {
return PrependElementIndices(object, handle(object->elements()), keys,
@@ -117,6 +110,8 @@ class ElementsAccessor {
KeyAccumulator* accumulator,
AddKeyConversion convert) = 0;
+ virtual void TransitionElementsKind(Handle<JSObject> object,
+ Handle<Map> map) = 0;
virtual void GrowCapacityAndConvert(Handle<JSObject> object,
uint32_t capacity) = 0;
@@ -135,7 +130,7 @@ class ElementsAccessor {
uint32_t new_capacity) = 0;
static Handle<JSArray> Concat(Isolate* isolate, Arguments* args,
- uint32_t concat_size);
+ uint32_t concat_size, uint32_t result_length);
virtual uint32_t Push(Handle<JSArray> receiver, Arguments* args,
uint32_t push_size) = 0;
@@ -159,6 +154,19 @@ class ElementsAccessor {
virtual uint32_t GetCapacity(JSObject* holder,
FixedArrayBase* backing_store) = 0;
+ // Check an Object's own elements for an element (using SameValueZero
+ // semantics)
+ virtual Maybe<bool> IncludesValue(Isolate* isolate, Handle<JSObject> receiver,
+ Handle<Object> value, uint32_t start,
+ uint32_t length) = 0;
+
+ // Check an Object's own elements for the index of an element (using SameValue
+ // semantics)
+ virtual Maybe<int64_t> IndexOfValue(Isolate* isolate,
+ Handle<JSObject> receiver,
+ Handle<Object> value, uint32_t start,
+ uint32_t length) = 0;
+
protected:
friend class LookupIterator;
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index a092a8a06c..c42d164603 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -6,8 +6,10 @@
#include "src/bootstrapper.h"
#include "src/codegen.h"
+#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
+#include "src/runtime-profiler.h"
#include "src/vm-state-inl.h"
namespace v8 {
@@ -59,6 +61,43 @@ MUST_USE_RESULT MaybeHandle<Object> Invoke(Isolate* isolate, bool is_construct,
Handle<Object> new_target) {
DCHECK(!receiver->IsJSGlobalObject());
+#ifdef USE_SIMULATOR
+ // Simulators use separate stacks for C++ and JS. JS stack overflow checks
+ // are performed whenever a JS function is called. However, it can be the case
+ // that the C++ stack grows faster than the JS stack, resulting in an overflow
+ // there. Add a check here to make that less likely.
+ StackLimitCheck check(isolate);
+ if (check.HasOverflowed()) {
+ isolate->StackOverflow();
+ isolate->ReportPendingMessages();
+ return MaybeHandle<Object>();
+ }
+#endif
+
+ // api callbacks can be called directly.
+ if (target->IsJSFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(target);
+ if ((!is_construct || function->IsConstructor()) &&
+ function->shared()->IsApiFunction()) {
+ SaveContext save(isolate);
+ isolate->set_context(function->context());
+ DCHECK(function->context()->global_object()->IsJSGlobalObject());
+ if (is_construct) receiver = isolate->factory()->the_hole_value();
+ auto value = Builtins::InvokeApiFunction(
+ isolate, is_construct, function, receiver, argc, args,
+ Handle<HeapObject>::cast(new_target));
+ bool has_exception = value.is_null();
+ DCHECK(has_exception == isolate->has_pending_exception());
+ if (has_exception) {
+ isolate->ReportPendingMessages();
+ return MaybeHandle<Object>();
+ } else {
+ isolate->clear_pending_message();
+ }
+ return value;
+ }
+ }
+
// Entering JavaScript.
VMState<JS> state(isolate);
CHECK(AllowJavascriptExecution::IsAllowed(isolate));
@@ -86,6 +125,8 @@ MUST_USE_RESULT MaybeHandle<Object> Invoke(Isolate* isolate, bool is_construct,
SealHandleScope shs(isolate);
JSEntryFunction stub_entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
+ if (FLAG_clear_exceptions_on_js_entry) isolate->clear_pending_exception();
+
// Call the function through the right JS entry stub.
Object* orig_func = *new_target;
Object* func = *target;
@@ -94,6 +135,9 @@ MUST_USE_RESULT MaybeHandle<Object> Invoke(Isolate* isolate, bool is_construct,
if (FLAG_profile_deserialization && target->IsJSFunction()) {
PrintDeserializedCodeInfo(Handle<JSFunction>::cast(target));
}
+ RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::JS_Execution);
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
+ isolate, &tracing::TraceEventStatsTable::JS_Execution);
value = CALL_GENERATED_CODE(isolate, stub_entry, orig_func, func, recv,
argc, argv);
}
@@ -105,7 +149,7 @@ MUST_USE_RESULT MaybeHandle<Object> Invoke(Isolate* isolate, bool is_construct,
#endif
// Update the pending exception flag and return the value.
- bool has_exception = value->IsException();
+ bool has_exception = value->IsException(isolate);
DCHECK(has_exception == isolate->has_pending_exception());
if (has_exception) {
isolate->ReportPendingMessages();
@@ -131,25 +175,6 @@ MaybeHandle<Object> Execution::Call(Isolate* isolate, Handle<Object> callable,
receiver =
handle(Handle<JSGlobalObject>::cast(receiver)->global_proxy(), isolate);
}
-
- // api callbacks can be called directly.
- if (callable->IsJSFunction() &&
- Handle<JSFunction>::cast(callable)->shared()->IsApiFunction()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(callable);
- SaveContext save(isolate);
- isolate->set_context(function->context());
- DCHECK(function->context()->global_object()->IsJSGlobalObject());
- auto value = Builtins::InvokeApiFunction(function, receiver, argc, argv);
- bool has_exception = value.is_null();
- DCHECK(has_exception == isolate->has_pending_exception());
- if (has_exception) {
- isolate->ReportPendingMessages();
- return MaybeHandle<Object>();
- } else {
- isolate->clear_pending_message();
- }
- return value;
- }
return Invoke(isolate, false, callable, receiver, argc, argv,
isolate->factory()->undefined_value());
}
@@ -416,16 +441,23 @@ Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
Handle<Object> pos,
Handle<Object> is_global) {
Isolate* isolate = fun->GetIsolate();
- Handle<Object> args[] = { recv, fun, pos, is_global };
- MaybeHandle<Object> maybe_result =
- TryCall(isolate, isolate->get_stack_trace_line_fun(),
- isolate->factory()->undefined_value(), arraysize(args), args);
- Handle<Object> result;
- if (!maybe_result.ToHandle(&result) || !result->IsString()) {
+ Handle<Object> strict_mode = isolate->factory()->ToBoolean(false);
+
+ MaybeHandle<Object> maybe_callsite =
+ CallSiteUtils::Construct(isolate, recv, fun, pos, strict_mode);
+ if (maybe_callsite.is_null()) {
+ isolate->clear_pending_exception();
+ return isolate->factory()->empty_string();
+ }
+
+ MaybeHandle<String> maybe_to_string =
+ CallSiteUtils::ToString(isolate, maybe_callsite.ToHandleChecked());
+ if (maybe_to_string.is_null()) {
+ isolate->clear_pending_exception();
return isolate->factory()->empty_string();
}
- return Handle<String>::cast(result);
+ return maybe_to_string.ToHandleChecked();
}
diff --git a/deps/v8/src/extensions/ignition-statistics-extension.cc b/deps/v8/src/extensions/ignition-statistics-extension.cc
new file mode 100644
index 0000000000..bab738f0f3
--- /dev/null
+++ b/deps/v8/src/extensions/ignition-statistics-extension.cc
@@ -0,0 +1,37 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/extensions/ignition-statistics-extension.h"
+
+#include "src/base/logging.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/interpreter/interpreter.h"
+#include "src/isolate.h"
+
+namespace v8 {
+namespace internal {
+
+v8::Local<v8::FunctionTemplate>
+IgnitionStatisticsExtension::GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> name) {
+ DCHECK_EQ(strcmp(*v8::String::Utf8Value(name), "getIgnitionDispatchCounters"),
+ 0);
+ return v8::FunctionTemplate::New(
+ isolate, IgnitionStatisticsExtension::GetIgnitionDispatchCounters);
+}
+
+const char* const IgnitionStatisticsExtension::kSource =
+ "native function getIgnitionDispatchCounters();";
+
+void IgnitionStatisticsExtension::GetIgnitionDispatchCounters(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ DCHECK_EQ(args.Length(), 0);
+ DCHECK(FLAG_trace_ignition_dispatches);
+ args.GetReturnValue().Set(reinterpret_cast<Isolate*>(args.GetIsolate())
+ ->interpreter()
+ ->GetDispatchCountersObject());
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/extensions/ignition-statistics-extension.h b/deps/v8/src/extensions/ignition-statistics-extension.h
new file mode 100644
index 0000000000..fee55f6128
--- /dev/null
+++ b/deps/v8/src/extensions/ignition-statistics-extension.h
@@ -0,0 +1,31 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXTENSIONS_IGNITION_STATISTICS_EXTENSION_H_
+#define V8_EXTENSIONS_IGNITION_STATISTICS_EXTENSION_H_
+
+#include "include/v8.h"
+
+namespace v8 {
+namespace internal {
+
+class IgnitionStatisticsExtension : public v8::Extension {
+ public:
+ IgnitionStatisticsExtension()
+ : v8::Extension("v8/ignition-statistics", kSource) {}
+
+ v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> name) override;
+
+ static void GetIgnitionDispatchCounters(
+ const v8::FunctionCallbackInfo<v8::Value>& args);
+
+ private:
+ static const char* const kSource;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_EXTENSIONS_IGNITION_STATISTICS_EXTENSION_H_
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
index 76dcd433af..5aafb7a974 100644
--- a/deps/v8/src/extensions/statistics-extension.cc
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -116,7 +116,7 @@ void StatisticsExtension::GetCounters(
};
const StatisticNumber numbers[] = {
- {isolate->memory_allocator()->Size(), "total_committed_bytes"},
+ {heap->memory_allocator()->Size(), "total_committed_bytes"},
{heap->new_space()->Size(), "new_space_live_bytes"},
{heap->new_space()->Available(), "new_space_available_bytes"},
{heap->new_space()->CommittedMemory(), "new_space_commited_bytes"},
@@ -135,10 +135,32 @@ void StatisticsExtension::GetCounters(
AddNumber(args.GetIsolate(), result, numbers[i].number, numbers[i].name);
}
- AddNumber64(args.GetIsolate(), result,
- heap->amount_of_external_allocated_memory(),
+ AddNumber64(args.GetIsolate(), result, heap->external_memory(),
"amount_of_external_allocated_memory");
args.GetReturnValue().Set(result);
+
+ HeapIterator iterator(reinterpret_cast<Isolate*>(args.GetIsolate())->heap());
+ HeapObject* obj;
+ int reloc_info_total = 0;
+ int source_position_table_total = 0;
+ while ((obj = iterator.next())) {
+ if (obj->IsCode()) {
+ Code* code = Code::cast(obj);
+ reloc_info_total += code->relocation_info()->Size();
+ ByteArray* source_position_table = code->source_position_table();
+ if (source_position_table->length() > 0) {
+ source_position_table_total += code->source_position_table()->Size();
+ }
+ } else if (obj->IsBytecodeArray()) {
+ source_position_table_total +=
+ BytecodeArray::cast(obj)->source_position_table()->Size();
+ }
+ }
+
+ AddNumber(args.GetIsolate(), result, reloc_info_total,
+ "reloc_info_total_size");
+ AddNumber(args.GetIsolate(), result, source_position_table_total,
+ "source_position_table_total_size");
}
} // namespace internal
diff --git a/deps/v8/src/external-reference-table.cc b/deps/v8/src/external-reference-table.cc
index 29a2474b09..5833eef4b7 100644
--- a/deps/v8/src/external-reference-table.cc
+++ b/deps/v8/src/external-reference-table.cc
@@ -6,6 +6,7 @@
#include "src/accessors.h"
#include "src/assembler.h"
+#include "src/builtins/builtins.h"
#include "src/counters.h"
#include "src/deoptimizer.h"
#include "src/ic/stub-cache.h"
@@ -13,6 +14,12 @@
namespace v8 {
namespace internal {
+// Forward declarations for C++ builtins.
+#define FORWARD_DECLARE(Name) \
+ Object* Builtin_##Name(int argc, Object** args, Isolate* isolate);
+BUILTIN_LIST_C(FORWARD_DECLARE)
+#undef FORWARD_DECLARE
+
ExternalReferenceTable* ExternalReferenceTable::instance(Isolate* isolate) {
ExternalReferenceTable* external_reference_table =
isolate->external_reference_table();
@@ -24,6 +31,18 @@ ExternalReferenceTable* ExternalReferenceTable::instance(Isolate* isolate) {
}
ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
+ AddReferences(isolate);
+ AddBuiltins(isolate);
+ AddRuntimeFunctions(isolate);
+ AddStatCounters(isolate);
+ AddIsolateAddresses(isolate);
+ AddAccessors(isolate);
+ AddStubCache(isolate);
+ AddDeoptEntries(isolate);
+ AddApiReferences(isolate);
+}
+
+void ExternalReferenceTable::AddReferences(Isolate* isolate) {
// Miscellaneous
Add(ExternalReference::roots_array_start(isolate).address(),
"Heap::roots_array_start()");
@@ -59,14 +78,52 @@ ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
Add(ExternalReference::isolate_address(isolate).address(), "isolate");
Add(ExternalReference::interpreter_dispatch_table_address(isolate).address(),
"Interpreter::dispatch_table_address");
+ Add(ExternalReference::interpreter_dispatch_counters(isolate).address(),
+ "Interpreter::interpreter_dispatch_counters");
Add(ExternalReference::address_of_negative_infinity().address(),
"LDoubleConstant::negative_infinity");
Add(ExternalReference::power_double_double_function(isolate).address(),
"power_double_double_function");
- Add(ExternalReference::power_double_int_function(isolate).address(),
- "power_double_int_function");
- Add(ExternalReference::math_log_double_function(isolate).address(),
- "std::log");
+ Add(ExternalReference::ieee754_acos_function(isolate).address(),
+ "base::ieee754::acos");
+ Add(ExternalReference::ieee754_acosh_function(isolate).address(),
+ "base::ieee754::acosh");
+ Add(ExternalReference::ieee754_asin_function(isolate).address(),
+ "base::ieee754::asin");
+ Add(ExternalReference::ieee754_asinh_function(isolate).address(),
+ "base::ieee754::asinh");
+ Add(ExternalReference::ieee754_atan_function(isolate).address(),
+ "base::ieee754::atan");
+ Add(ExternalReference::ieee754_atanh_function(isolate).address(),
+ "base::ieee754::atanh");
+ Add(ExternalReference::ieee754_atan2_function(isolate).address(),
+ "base::ieee754::atan2");
+ Add(ExternalReference::ieee754_cbrt_function(isolate).address(),
+ "base::ieee754::cbrt");
+ Add(ExternalReference::ieee754_cos_function(isolate).address(),
+ "base::ieee754::cos");
+ Add(ExternalReference::ieee754_cosh_function(isolate).address(),
+ "base::ieee754::cosh");
+ Add(ExternalReference::ieee754_exp_function(isolate).address(),
+ "base::ieee754::exp");
+ Add(ExternalReference::ieee754_expm1_function(isolate).address(),
+ "base::ieee754::expm1");
+ Add(ExternalReference::ieee754_log_function(isolate).address(),
+ "base::ieee754::log");
+ Add(ExternalReference::ieee754_log1p_function(isolate).address(),
+ "base::ieee754::log1p");
+ Add(ExternalReference::ieee754_log10_function(isolate).address(),
+ "base::ieee754::log10");
+ Add(ExternalReference::ieee754_log2_function(isolate).address(),
+ "base::ieee754::log2");
+ Add(ExternalReference::ieee754_sin_function(isolate).address(),
+ "base::ieee754::sin");
+ Add(ExternalReference::ieee754_sinh_function(isolate).address(),
+ "base::ieee754::sinh");
+ Add(ExternalReference::ieee754_tan_function(isolate).address(),
+ "base::ieee754::tan");
+ Add(ExternalReference::ieee754_tanh_function(isolate).address(),
+ "base::ieee754::tanh");
Add(ExternalReference::store_buffer_top(isolate).address(),
"store_buffer_top");
Add(ExternalReference::address_of_the_hole_nan().address(), "the_hole_nan");
@@ -89,7 +146,7 @@ ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
Add(ExternalReference::get_mark_code_as_executed_function(isolate).address(),
"Code::MarkCodeAsExecuted");
Add(ExternalReference::is_profiling_address(isolate).address(),
- "CpuProfiler::is_profiling");
+ "Isolate::is_profiling");
Add(ExternalReference::scheduled_exception_address(isolate).address(),
"Isolate::scheduled_exception");
Add(ExternalReference::invoke_function_callback(isolate).address(),
@@ -128,32 +185,26 @@ ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
"wasm::float64_to_int64_wrapper");
Add(ExternalReference::wasm_float64_to_uint64(isolate).address(),
"wasm::float64_to_uint64_wrapper");
+ Add(ExternalReference::wasm_float64_pow(isolate).address(),
+ "wasm::float64_pow");
Add(ExternalReference::wasm_int64_div(isolate).address(), "wasm::int64_div");
Add(ExternalReference::wasm_int64_mod(isolate).address(), "wasm::int64_mod");
Add(ExternalReference::wasm_uint64_div(isolate).address(),
"wasm::uint64_div");
Add(ExternalReference::wasm_uint64_mod(isolate).address(),
"wasm::uint64_mod");
+ Add(ExternalReference::wasm_word32_ctz(isolate).address(),
+ "wasm::word32_ctz");
+ Add(ExternalReference::wasm_word64_ctz(isolate).address(),
+ "wasm::word64_ctz");
+ Add(ExternalReference::wasm_word32_popcnt(isolate).address(),
+ "wasm::word32_popcnt");
+ Add(ExternalReference::wasm_word64_popcnt(isolate).address(),
+ "wasm::word64_popcnt");
Add(ExternalReference::f64_acos_wrapper_function(isolate).address(),
"f64_acos_wrapper");
Add(ExternalReference::f64_asin_wrapper_function(isolate).address(),
"f64_asin_wrapper");
- Add(ExternalReference::f64_atan_wrapper_function(isolate).address(),
- "f64_atan_wrapper");
- Add(ExternalReference::f64_cos_wrapper_function(isolate).address(),
- "f64_cos_wrapper");
- Add(ExternalReference::f64_sin_wrapper_function(isolate).address(),
- "f64_sin_wrapper");
- Add(ExternalReference::f64_tan_wrapper_function(isolate).address(),
- "f64_tan_wrapper");
- Add(ExternalReference::f64_exp_wrapper_function(isolate).address(),
- "f64_exp_wrapper");
- Add(ExternalReference::f64_log_wrapper_function(isolate).address(),
- "f64_log_wrapper");
- Add(ExternalReference::f64_pow_wrapper_function(isolate).address(),
- "f64_pow_wrapper");
- Add(ExternalReference::f64_atan2_wrapper_function(isolate).address(),
- "f64_atan2_wrapper");
Add(ExternalReference::f64_mod_wrapper_function(isolate).address(),
"f64_mod_wrapper");
Add(ExternalReference::log_enter_external_function(isolate).address(),
@@ -173,14 +224,24 @@ ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
Add(ExternalReference::is_tail_call_elimination_enabled_address(isolate)
.address(),
"Isolate::is_tail_call_elimination_enabled_address()");
+ Add(ExternalReference::address_of_float_abs_constant().address(),
+ "float_absolute_constant");
+ Add(ExternalReference::address_of_float_neg_constant().address(),
+ "float_negate_constant");
+ Add(ExternalReference::address_of_double_abs_constant().address(),
+ "double_absolute_constant");
+ Add(ExternalReference::address_of_double_neg_constant().address(),
+ "double_negate_constant");
// Debug addresses
Add(ExternalReference::debug_after_break_target_address(isolate).address(),
"Debug::after_break_target_address()");
Add(ExternalReference::debug_is_active_address(isolate).address(),
"Debug::is_active_address()");
- Add(ExternalReference::debug_step_in_enabled_address(isolate).address(),
+ Add(ExternalReference::debug_last_step_action_address(isolate).address(),
"Debug::step_in_enabled_address()");
+ Add(ExternalReference::debug_suspended_generator_address(isolate).address(),
+ "Debug::step_suspended_generator_address()");
#ifndef V8_INTERPRETED_REGEXP
Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
@@ -202,57 +263,68 @@ ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
"OffsetsVector::static_offsets_vector");
#endif // V8_INTERPRETED_REGEXP
- // The following populates all of the different type of external references
- // into the ExternalReferenceTable.
- //
- // NOTE: This function was originally 100k of code. It has since been
- // rewritten to be mostly table driven, as the callback macro style tends to
- // very easily cause code bloat. Please be careful in the future when adding
- // new references.
+ // Runtime entries
+ Add(ExternalReference::delete_handle_scope_extensions(isolate).address(),
+ "HandleScope::DeleteExtensions");
+ Add(ExternalReference::incremental_marking_record_write_function(isolate)
+ .address(),
+ "IncrementalMarking::RecordWrite");
+ Add(ExternalReference::incremental_marking_record_write_code_entry_function(
+ isolate)
+ .address(),
+ "IncrementalMarking::RecordWriteOfCodeEntryFromCode");
+ Add(ExternalReference::store_buffer_overflow_function(isolate).address(),
+ "StoreBuffer::StoreBufferOverflow");
+}
- struct RefTableEntry {
- uint16_t id;
+void ExternalReferenceTable::AddBuiltins(Isolate* isolate) {
+ struct CBuiltinEntry {
+ Address address;
const char* name;
};
-
- static const RefTableEntry c_builtins[] = {
-#define DEF_ENTRY_C(name, ignored) {Builtins::c_##name, "Builtins::" #name},
- BUILTIN_LIST_C(DEF_ENTRY_C)
-#undef DEF_ENTRY_C
+ static const CBuiltinEntry c_builtins[] = {
+#define DEF_ENTRY(Name, ...) {FUNCTION_ADDR(&Builtin_##Name), "Builtin_" #Name},
+ BUILTIN_LIST_C(DEF_ENTRY)
+#undef DEF_ENTRY
};
-
for (unsigned i = 0; i < arraysize(c_builtins); ++i) {
- ExternalReference ref(static_cast<Builtins::CFunctionId>(c_builtins[i].id),
- isolate);
- Add(ref.address(), c_builtins[i].name);
+ Add(ExternalReference(c_builtins[i].address, isolate).address(),
+ c_builtins[i].name);
}
- static const RefTableEntry builtins[] = {
-#define DEF_ENTRY_C(name, ignored) {Builtins::k##name, "Builtins::" #name},
-#define DEF_ENTRY_A(name, i1, i2, i3) {Builtins::k##name, "Builtins::" #name},
- BUILTIN_LIST_C(DEF_ENTRY_C) BUILTIN_LIST_A(DEF_ENTRY_A)
- BUILTIN_LIST_DEBUG_A(DEF_ENTRY_A)
-#undef DEF_ENTRY_C
-#undef DEF_ENTRY_A
+ struct BuiltinEntry {
+ Builtins::Name id;
+ const char* name;
+ };
+ static const BuiltinEntry builtins[] = {
+#define DEF_ENTRY(Name, ...) {Builtins::k##Name, "Builtin_" #Name},
+ BUILTIN_LIST_C(DEF_ENTRY) BUILTIN_LIST_A(DEF_ENTRY)
+#undef DEF_ENTRY
};
-
for (unsigned i = 0; i < arraysize(builtins); ++i) {
- ExternalReference ref(static_cast<Builtins::Name>(builtins[i].id), isolate);
- Add(ref.address(), builtins[i].name);
+ Add(isolate->builtins()->builtin_address(builtins[i].id), builtins[i].name);
}
+}
+
+void ExternalReferenceTable::AddRuntimeFunctions(Isolate* isolate) {
+ struct RuntimeEntry {
+ Runtime::FunctionId id;
+ const char* name;
+ };
- static const RefTableEntry runtime_functions[] = {
+ static const RuntimeEntry runtime_functions[] = {
#define RUNTIME_ENTRY(name, i1, i2) {Runtime::k##name, "Runtime::" #name},
FOR_EACH_INTRINSIC(RUNTIME_ENTRY)
#undef RUNTIME_ENTRY
};
for (unsigned i = 0; i < arraysize(runtime_functions); ++i) {
- ExternalReference ref(
- static_cast<Runtime::FunctionId>(runtime_functions[i].id), isolate);
+ ExternalReference ref(runtime_functions[i].id, isolate);
Add(ref.address(), runtime_functions[i].name);
}
+}
+void ExternalReferenceTable::AddStatCounters(Isolate* isolate) {
// Stat counters
struct StatsRefTableEntry {
StatsCounter* (Counters::*counter)();
@@ -276,7 +348,9 @@ ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
}
Add(address, stats_ref_table[i].name);
}
+}
+void ExternalReferenceTable::AddIsolateAddresses(Isolate* isolate) {
// Top addresses
static const char* address_names[] = {
#define BUILD_NAME_LITERAL(Name, name) "Isolate::" #name "_address",
@@ -288,58 +362,76 @@ ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
Add(isolate->get_address_from_id(static_cast<Isolate::AddressId>(i)),
address_names[i]);
}
+}
+void ExternalReferenceTable::AddAccessors(Isolate* isolate) {
// Accessors
struct AccessorRefTable {
Address address;
const char* name;
};
- static const AccessorRefTable accessors[] = {
+ static const AccessorRefTable getters[] = {
#define ACCESSOR_INFO_DECLARATION(name) \
{FUNCTION_ADDR(&Accessors::name##Getter), "Accessors::" #name "Getter"},
ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
#undef ACCESSOR_INFO_DECLARATION
+ };
+ static const AccessorRefTable setters[] = {
#define ACCESSOR_SETTER_DECLARATION(name) \
{FUNCTION_ADDR(&Accessors::name), "Accessors::" #name},
- ACCESSOR_SETTER_LIST(ACCESSOR_SETTER_DECLARATION)
+ ACCESSOR_SETTER_LIST(ACCESSOR_SETTER_DECLARATION)
#undef ACCESSOR_INFO_DECLARATION
};
- for (unsigned i = 0; i < arraysize(accessors); ++i) {
- Add(accessors[i].address, accessors[i].name);
+ for (unsigned i = 0; i < arraysize(getters); ++i) {
+ Add(getters[i].address, getters[i].name);
+ Add(AccessorInfo::redirect(isolate, getters[i].address, ACCESSOR_GETTER),
+ "");
}
- StubCache* stub_cache = isolate->stub_cache();
+ for (unsigned i = 0; i < arraysize(setters); ++i) {
+ Add(setters[i].address, setters[i].name);
+ }
+}
+
+void ExternalReferenceTable::AddStubCache(Isolate* isolate) {
+ StubCache* load_stub_cache = isolate->load_stub_cache();
// Stub cache tables
- Add(stub_cache->key_reference(StubCache::kPrimary).address(),
- "StubCache::primary_->key");
- Add(stub_cache->value_reference(StubCache::kPrimary).address(),
- "StubCache::primary_->value");
- Add(stub_cache->map_reference(StubCache::kPrimary).address(),
- "StubCache::primary_->map");
- Add(stub_cache->key_reference(StubCache::kSecondary).address(),
- "StubCache::secondary_->key");
- Add(stub_cache->value_reference(StubCache::kSecondary).address(),
- "StubCache::secondary_->value");
- Add(stub_cache->map_reference(StubCache::kSecondary).address(),
- "StubCache::secondary_->map");
+ Add(load_stub_cache->key_reference(StubCache::kPrimary).address(),
+ "Load StubCache::primary_->key");
+ Add(load_stub_cache->value_reference(StubCache::kPrimary).address(),
+ "Load StubCache::primary_->value");
+ Add(load_stub_cache->map_reference(StubCache::kPrimary).address(),
+ "Load StubCache::primary_->map");
+ Add(load_stub_cache->key_reference(StubCache::kSecondary).address(),
+ "Load StubCache::secondary_->key");
+ Add(load_stub_cache->value_reference(StubCache::kSecondary).address(),
+ "Load StubCache::secondary_->value");
+ Add(load_stub_cache->map_reference(StubCache::kSecondary).address(),
+ "Load StubCache::secondary_->map");
- // Runtime entries
- Add(ExternalReference::delete_handle_scope_extensions(isolate).address(),
- "HandleScope::DeleteExtensions");
- Add(ExternalReference::incremental_marking_record_write_function(isolate)
- .address(),
- "IncrementalMarking::RecordWrite");
- Add(ExternalReference::incremental_marking_record_write_code_entry_function(
- isolate)
- .address(),
- "IncrementalMarking::RecordWriteOfCodeEntryFromCode");
- Add(ExternalReference::store_buffer_overflow_function(isolate).address(),
- "StoreBuffer::StoreBufferOverflow");
+ StubCache* store_stub_cache = isolate->store_stub_cache();
- // Add a small set of deopt entry addresses to encoder without generating the
+ // Stub cache tables
+ Add(store_stub_cache->key_reference(StubCache::kPrimary).address(),
+ "Store StubCache::primary_->key");
+ Add(store_stub_cache->value_reference(StubCache::kPrimary).address(),
+ "Store StubCache::primary_->value");
+ Add(store_stub_cache->map_reference(StubCache::kPrimary).address(),
+ "Store StubCache::primary_->map");
+ Add(store_stub_cache->key_reference(StubCache::kSecondary).address(),
+ "Store StubCache::secondary_->key");
+ Add(store_stub_cache->value_reference(StubCache::kSecondary).address(),
+ "Store StubCache::secondary_->value");
+ Add(store_stub_cache->map_reference(StubCache::kSecondary).address(),
+ "Store StubCache::secondary_->map");
+}
+
+void ExternalReferenceTable::AddDeoptEntries(Isolate* isolate) {
+ // Add a small set of deopt entry addresses to encoder without generating
+ // the
// deopt table code, which isn't possible at deserialization time.
HandleScope scope(isolate);
for (int entry = 0; entry < kDeoptTableSerializeEntryCount; ++entry) {
@@ -350,5 +442,17 @@ ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
}
}
+void ExternalReferenceTable::AddApiReferences(Isolate* isolate) {
+ // Add external references provided by the embedder (a null-terminated
+ // array).
+ intptr_t* api_external_references = isolate->api_external_references();
+ if (api_external_references != nullptr) {
+ while (*api_external_references != 0) {
+ Add(reinterpret_cast<Address>(*api_external_references), "<embedder>");
+ api_external_references++;
+ }
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/external-reference-table.h b/deps/v8/src/external-reference-table.h
index 2ea4b14cf3..dc30dabd7c 100644
--- a/deps/v8/src/external-reference-table.h
+++ b/deps/v8/src/external-reference-table.h
@@ -40,6 +40,16 @@ class ExternalReferenceTable {
refs_.Add(entry);
}
+ void AddReferences(Isolate* isolate);
+ void AddBuiltins(Isolate* isolate);
+ void AddRuntimeFunctions(Isolate* isolate);
+ void AddStatCounters(Isolate* isolate);
+ void AddIsolateAddresses(Isolate* isolate);
+ void AddAccessors(Isolate* isolate);
+ void AddStubCache(Isolate* isolate);
+ void AddDeoptEntries(Isolate* isolate);
+ void AddApiReferences(Isolate* isolate);
+
List<ExternalReferenceEntry> refs_;
DISALLOW_COPY_AND_ASSIGN(ExternalReferenceTable);
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index 41c3cb5eaa..bedcb9b61a 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -4,9 +4,11 @@
#include "src/factory.h"
+#include "src/accessors.h"
#include "src/allocation-site-scopes.h"
#include "src/base/bits.h"
#include "src/bootstrapper.h"
+#include "src/compiler.h"
#include "src/conversions.h"
#include "src/isolate-inl.h"
#include "src/macro-assembler.h"
@@ -96,6 +98,7 @@ Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
result->set_prototype_users(WeakFixedArray::Empty());
result->set_registry_slot(PrototypeInfo::UNREGISTERED);
result->set_validity_cell(Smi::FromInt(0));
+ result->set_bit_field(0);
return result;
}
@@ -113,11 +116,10 @@ Factory::NewSloppyBlockWithEvalContextExtension(
}
Handle<Oddball> Factory::NewOddball(Handle<Map> map, const char* to_string,
- Handle<Object> to_number, bool to_boolean,
+ Handle<Object> to_number,
const char* type_of, byte kind) {
Handle<Oddball> oddball = New<Oddball>(map, OLD_SPACE);
- Oddball::Initialize(isolate(), oddball, to_string, to_number, to_boolean,
- type_of, kind);
+ Oddball::Initialize(isolate(), oddball, to_string, to_number, type_of, kind);
return oddball;
}
@@ -190,8 +192,8 @@ Handle<OrderedHashMap> Factory::NewOrderedHashMap() {
Handle<AccessorPair> Factory::NewAccessorPair() {
Handle<AccessorPair> accessors =
Handle<AccessorPair>::cast(NewStruct(ACCESSOR_PAIR_TYPE));
- accessors->set_getter(*the_hole_value(), SKIP_WRITE_BARRIER);
- accessors->set_setter(*the_hole_value(), SKIP_WRITE_BARRIER);
+ accessors->set_getter(*null_value(), SKIP_WRITE_BARRIER);
+ accessors->set_setter(*null_value(), SKIP_WRITE_BARRIER);
return accessors;
}
@@ -291,12 +293,10 @@ MaybeHandle<String> Factory::NewStringFromUtf8(Vector<const char> string,
return result;
}
-
-MaybeHandle<String> Factory::NewStringFromTwoByte(Vector<const uc16> string,
+MaybeHandle<String> Factory::NewStringFromTwoByte(const uc16* string,
+ int length,
PretenureFlag pretenure) {
- int length = string.length();
- const uc16* start = string.start();
- if (String::IsOneByte(start, length)) {
+ if (String::IsOneByte(string, length)) {
if (length == 1) return LookupSingleCharacterStringFromCode(string[0]);
Handle<SeqOneByteString> result;
ASSIGN_RETURN_ON_EXCEPTION(
@@ -304,7 +304,7 @@ MaybeHandle<String> Factory::NewStringFromTwoByte(Vector<const uc16> string,
result,
NewRawOneByteString(length, pretenure),
String);
- CopyChars(result->GetChars(), start, length);
+ CopyChars(result->GetChars(), string, length);
return result;
} else {
Handle<SeqTwoByteString> result;
@@ -313,11 +313,21 @@ MaybeHandle<String> Factory::NewStringFromTwoByte(Vector<const uc16> string,
result,
NewRawTwoByteString(length, pretenure),
String);
- CopyChars(result->GetChars(), start, length);
+ CopyChars(result->GetChars(), string, length);
return result;
}
}
+MaybeHandle<String> Factory::NewStringFromTwoByte(Vector<const uc16> string,
+ PretenureFlag pretenure) {
+ return NewStringFromTwoByte(string.start(), string.length(), pretenure);
+}
+
+MaybeHandle<String> Factory::NewStringFromTwoByte(
+ const ZoneVector<uc16>* string, PretenureFlag pretenure) {
+ return NewStringFromTwoByte(string->data(), static_cast<int>(string->size()),
+ pretenure);
+}
Handle<String> Factory::NewInternalizedStringFromUtf8(Vector<const char> str,
int chars,
@@ -704,6 +714,21 @@ MaybeHandle<String> Factory::NewExternalStringFromTwoByte(
return external_string;
}
+Handle<ExternalOneByteString> Factory::NewNativeSourceString(
+ const ExternalOneByteString::Resource* resource) {
+ size_t length = resource->length();
+ DCHECK_LE(length, static_cast<size_t>(String::kMaxLength));
+
+ Handle<Map> map = native_source_string_map();
+ Handle<ExternalOneByteString> external_string =
+ New<ExternalOneByteString>(map, OLD_SPACE);
+ external_string->set_length(static_cast<int>(length));
+ external_string->set_hash_field(String::kEmptyHashField);
+ external_string->set_resource(resource);
+
+ return external_string;
+}
+
Handle<Symbol> Factory::NewSymbol() {
CALL_HEAP_FUNCTION(
@@ -736,6 +761,7 @@ Handle<Context> Factory::NewNativeContext() {
Handle<Context> Factory::NewScriptContext(Handle<JSFunction> function,
Handle<ScopeInfo> scope_info) {
+ DCHECK_EQ(scope_info->scope_type(), SCRIPT_SCOPE);
Handle<FixedArray> array =
NewFixedArray(scope_info->ContextLength(), TENURED);
array->set_map_no_write_barrier(*script_context_map());
@@ -760,6 +786,7 @@ Handle<ScriptContextTable> Factory::NewScriptContextTable() {
Handle<Context> Factory::NewModuleContext(Handle<ScopeInfo> scope_info) {
+ DCHECK_EQ(scope_info->scope_type(), MODULE_SCOPE);
Handle<FixedArray> array =
NewFixedArray(scope_info->ContextLength(), TENURED);
array->set_map_no_write_barrier(*module_context_map());
@@ -772,6 +799,7 @@ Handle<Context> Factory::NewModuleContext(Handle<ScopeInfo> scope_info) {
Handle<Context> Factory::NewFunctionContext(int length,
Handle<JSFunction> function) {
+ DCHECK(function->shared()->scope_info()->scope_type() == FUNCTION_SCOPE);
DCHECK(length >= Context::MIN_CONTEXT_SLOTS);
Handle<FixedArray> array = NewFixedArray(length);
array->set_map_no_write_barrier(*function_context_map());
@@ -834,6 +862,7 @@ Handle<Context> Factory::NewWithContext(Handle<JSFunction> function,
Handle<Context> Factory::NewBlockContext(Handle<JSFunction> function,
Handle<Context> previous,
Handle<ScopeInfo> scope_info) {
+ DCHECK_EQ(scope_info->scope_type(), BLOCK_SCOPE);
Handle<FixedArray> array = NewFixedArray(scope_info->ContextLength());
array->set_map_no_write_barrier(*block_context_map());
Handle<Context> context = Handle<Context>::cast(array);
@@ -853,15 +882,6 @@ Handle<Struct> Factory::NewStruct(InstanceType type) {
}
-Handle<CodeCache> Factory::NewCodeCache() {
- Handle<CodeCache> code_cache =
- Handle<CodeCache>::cast(NewStruct(CODE_CACHE_TYPE));
- code_cache->set_default_cache(*empty_fixed_array(), SKIP_WRITE_BARRIER);
- code_cache->set_normal_type_cache(*undefined_value(), SKIP_WRITE_BARRIER);
- return code_cache;
-}
-
-
Handle<AliasedArgumentsEntry> Factory::NewAliasedArgumentsEntry(
int aliased_context_slot) {
Handle<AliasedArgumentsEntry> entry = Handle<AliasedArgumentsEntry>::cast(
@@ -894,7 +914,7 @@ Handle<Script> Factory::NewScript(Handle<String> source) {
script->set_wrapper(heap->undefined_value());
script->set_line_ends(heap->undefined_value());
script->set_eval_from_shared(heap->undefined_value());
- script->set_eval_from_instructions_offset(0);
+ script->set_eval_from_position(0);
script->set_shared_function_infos(Smi::FromInt(0));
script->set_flags(0);
@@ -1079,13 +1099,9 @@ Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray(
Handle<Object> Factory::NewNumber(double value,
PretenureFlag pretenure) {
- // We need to distinguish the minus zero value and this cannot be
- // done after conversion to int. Doing this by comparing bit
- // patterns is faster than using fpclassify() et al.
- if (IsMinusZero(value)) return NewHeapNumber(-0.0, IMMUTABLE, pretenure);
-
- int int_value = FastD2IChecked(value);
- if (value == int_value && Smi::IsValid(int_value)) {
+ // Materialize as a SMI if possible
+ int32_t int_value;
+ if (DoubleToSmiInteger(value, &int_value)) {
return handle(Smi::FromInt(int_value), isolate());
}
@@ -1143,47 +1159,41 @@ Handle<Object> Factory::NewError(Handle<JSFunction> constructor,
MessageTemplate::TemplateString(template_index)));
}
- Handle<JSFunction> fun = isolate()->make_error_function();
- Handle<Object> message_type(Smi::FromInt(template_index), isolate());
if (arg0.is_null()) arg0 = undefined_value();
if (arg1.is_null()) arg1 = undefined_value();
if (arg2.is_null()) arg2 = undefined_value();
- Handle<Object> argv[] = {constructor, message_type, arg0, arg1, arg2};
- // Invoke the JavaScript factory method. If an exception is thrown while
- // running the factory method, use the exception as the result.
Handle<Object> result;
- MaybeHandle<Object> exception;
- if (!Execution::TryCall(isolate(), fun, undefined_value(), arraysize(argv),
- argv, &exception)
+ if (!ErrorUtils::MakeGenericError(isolate(), constructor, template_index,
+ arg0, arg1, arg2, SKIP_NONE)
.ToHandle(&result)) {
- Handle<Object> exception_obj;
- if (exception.ToHandle(&exception_obj)) {
- result = exception_obj;
- } else {
- result = undefined_value();
- }
+ // If an exception is thrown while
+ // running the factory method, use the exception as the result.
+ DCHECK(isolate()->has_pending_exception());
+ result = handle(isolate()->pending_exception(), isolate());
+ isolate()->clear_pending_exception();
}
+
return scope.CloseAndEscape(result);
}
Handle<Object> Factory::NewError(Handle<JSFunction> constructor,
Handle<String> message) {
- Handle<Object> argv[] = { message };
-
- // Invoke the JavaScript factory method. If an exception is thrown while
- // running the factory method, use the exception as the result.
- Handle<Object> result;
- MaybeHandle<Object> exception;
- if (!Execution::TryCall(isolate(), constructor, undefined_value(),
- arraysize(argv), argv, &exception)
- .ToHandle(&result)) {
- Handle<Object> exception_obj;
- if (exception.ToHandle(&exception_obj)) return exception_obj;
- return undefined_value();
+ // Construct a new error object. If an exception is thrown, use the exception
+ // as the result.
+
+ Handle<Object> no_caller;
+ MaybeHandle<Object> maybe_error =
+ ErrorUtils::Construct(isolate(), constructor, constructor, message,
+ SKIP_NONE, no_caller, false);
+ if (maybe_error.is_null()) {
+ DCHECK(isolate()->has_pending_exception());
+ maybe_error = handle(isolate()->pending_exception(), isolate());
+ isolate()->clear_pending_exception();
}
- return result;
+
+ return maybe_error.ToHandleChecked();
}
@@ -1202,21 +1212,22 @@ DEFINE_ERROR(SyntaxError, syntax_error)
DEFINE_ERROR(TypeError, type_error)
#undef DEFINE_ERROR
-
Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
Handle<SharedFunctionInfo> info,
- Handle<Context> context,
+ Handle<Object> context_or_undefined,
PretenureFlag pretenure) {
AllocationSpace space = pretenure == TENURED ? OLD_SPACE : NEW_SPACE;
Handle<JSFunction> function = New<JSFunction>(map, space);
+ DCHECK(context_or_undefined->IsContext() ||
+ context_or_undefined->IsUndefined(isolate()));
function->initialize_properties();
function->initialize_elements();
function->set_shared(*info);
function->set_code(info->code());
- function->set_context(*context);
+ function->set_context(*context_or_undefined);
function->set_prototype_or_initial_map(*the_hole_value());
- function->set_literals(LiteralsArray::cast(*empty_fixed_array()));
+ function->set_literals(LiteralsArray::cast(*empty_literals_array()));
function->set_next_function_link(*undefined_value(), SKIP_WRITE_BARRIER);
isolate()->heap()->InitializeJSObjectBody(*function, *map, JSFunction::kSize);
return function;
@@ -1230,13 +1241,14 @@ Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
Handle<SharedFunctionInfo> info =
NewSharedFunctionInfo(name, code, map->is_constructor());
DCHECK(is_sloppy(info->language_mode()));
- DCHECK(!map->IsUndefined());
+ DCHECK(!map->IsUndefined(isolate()));
DCHECK(
map.is_identical_to(isolate()->sloppy_function_map()) ||
map.is_identical_to(isolate()->sloppy_function_without_prototype_map()) ||
map.is_identical_to(
isolate()->sloppy_function_with_readonly_prototype_map()) ||
map.is_identical_to(isolate()->strict_function_map()) ||
+ map.is_identical_to(isolate()->strict_function_without_prototype_map()) ||
// TODO(titzer): wasm_function_map() could be undefined here. ugly.
(*map == context->get(Context::WASM_FUNCTION_MAP_INDEX)) ||
map.is_identical_to(isolate()->proxy_function_map()));
@@ -1262,16 +1274,9 @@ Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name,
Handle<JSFunction> Factory::NewFunction(Handle<String> name, Handle<Code> code,
Handle<Object> prototype,
- bool read_only_prototype,
bool is_strict) {
- // In strict mode, readonly strict map is only available during bootstrap
- DCHECK(!is_strict || !read_only_prototype ||
- isolate()->bootstrapper()->IsActive());
- Handle<Map> map =
- is_strict ? isolate()->strict_function_map()
- : read_only_prototype
- ? isolate()->sloppy_function_with_readonly_prototype_map()
- : isolate()->sloppy_function_map();
+ Handle<Map> map = is_strict ? isolate()->strict_function_map()
+ : isolate()->sloppy_function_map();
Handle<JSFunction> result = NewFunction(map, name, code);
result->set_prototype_or_initial_map(*prototype);
return result;
@@ -1281,22 +1286,19 @@ Handle<JSFunction> Factory::NewFunction(Handle<String> name, Handle<Code> code,
Handle<JSFunction> Factory::NewFunction(Handle<String> name, Handle<Code> code,
Handle<Object> prototype,
InstanceType type, int instance_size,
- bool read_only_prototype,
- bool install_constructor,
bool is_strict) {
// Allocate the function
- Handle<JSFunction> function =
- NewFunction(name, code, prototype, read_only_prototype, is_strict);
+ Handle<JSFunction> function = NewFunction(name, code, prototype, is_strict);
ElementsKind elements_kind =
type == JS_ARRAY_TYPE ? FAST_SMI_ELEMENTS : FAST_HOLEY_SMI_ELEMENTS;
Handle<Map> initial_map = NewMap(type, instance_size, elements_kind);
- if (!function->shared()->is_generator()) {
- if (prototype->IsTheHole()) {
+ // TODO(littledan): Why do we have this is_generator test when
+ // NewFunctionPrototype already handles finding an appropriately
+ // shared prototype?
+ if (!function->shared()->is_resumable()) {
+ if (prototype->IsTheHole(isolate())) {
prototype = NewFunctionPrototype(function);
- } else if (install_constructor) {
- JSObject::AddProperty(Handle<JSObject>::cast(prototype),
- constructor_string(), function, DONT_ENUM);
}
}
@@ -1320,11 +1322,12 @@ Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
// can be from a different context.
Handle<Context> native_context(function->context()->native_context());
Handle<Map> new_map;
- if (function->shared()->is_generator()) {
- // Generator prototypes can share maps since they don't have "constructor"
- // properties.
+ if (function->shared()->is_resumable()) {
+ // Generator and async function prototypes can share maps since they
+ // don't have "constructor" properties.
new_map = handle(native_context->generator_object_prototype_map());
} else {
+ CHECK(!function->shared()->is_async());
// Each function prototype gets a fresh map to avoid unwanted sharing of
// maps between prototypes of different constructors.
Handle<JSFunction> object_function(native_context->object_function());
@@ -1335,7 +1338,7 @@ Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
DCHECK(!new_map->is_prototype_map());
Handle<JSObject> prototype = NewJSObjectFromMap(new_map);
- if (!function->shared()->is_generator()) {
+ if (!function->shared()->is_resumable()) {
JSObject::AddProperty(prototype, constructor_string(), function, DONT_ENUM);
}
@@ -1355,20 +1358,21 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
pretenure);
}
-
Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<Map> initial_map, Handle<SharedFunctionInfo> info,
- Handle<Context> context, PretenureFlag pretenure) {
+ Handle<Object> context_or_undefined, PretenureFlag pretenure) {
DCHECK_EQ(JS_FUNCTION_TYPE, initial_map->instance_type());
Handle<JSFunction> result =
- NewFunction(initial_map, info, context, pretenure);
+ NewFunction(initial_map, info, context_or_undefined, pretenure);
if (info->ic_age() != isolate()->heap()->global_ic_age()) {
info->ResetForNewContext(isolate()->heap()->global_ic_age());
}
- // Give compiler a chance to pre-initialize.
- Compiler::PostInstantiation(result, pretenure);
+ if (context_or_undefined->IsContext()) {
+ // Give compiler a chance to pre-initialize.
+ Compiler::PostInstantiation(result, pretenure);
+ }
return result;
}
@@ -1406,13 +1410,23 @@ Handle<Code> Factory::NewCode(const CodeDesc& desc,
bool is_debug) {
Handle<ByteArray> reloc_info = NewByteArray(desc.reloc_size, TENURED);
+ bool has_unwinding_info = desc.unwinding_info != nullptr;
+ DCHECK((has_unwinding_info && desc.unwinding_info_size > 0) ||
+ (!has_unwinding_info && desc.unwinding_info_size == 0));
+
// Compute size.
- int body_size = RoundUp(desc.instr_size, kObjectAlignment);
- int obj_size = Code::SizeFor(body_size);
+ int body_size = desc.instr_size;
+ int unwinding_info_size_field_size = kInt64Size;
+ if (has_unwinding_info) {
+ body_size = RoundUp(body_size, kInt64Size) + desc.unwinding_info_size +
+ unwinding_info_size_field_size;
+ }
+ int obj_size = Code::SizeFor(RoundUp(body_size, kObjectAlignment));
Handle<Code> code = NewCodeRaw(obj_size, immovable);
- DCHECK(isolate()->code_range() == NULL || !isolate()->code_range()->valid() ||
- isolate()->code_range()->contains(code->address()) ||
+ DCHECK(!isolate()->heap()->memory_allocator()->code_range()->valid() ||
+ isolate()->heap()->memory_allocator()->code_range()->contains(
+ code->address()) ||
obj_size <= isolate()->heap()->code_space()->AreaSize());
// The code object has not been fully initialized yet. We rely on the
@@ -1423,15 +1437,18 @@ Handle<Code> Factory::NewCode(const CodeDesc& desc,
code->set_instruction_size(desc.instr_size);
code->set_relocation_info(*reloc_info);
code->set_flags(flags);
+ code->set_has_unwinding_info(has_unwinding_info);
code->set_raw_kind_specific_flags1(0);
code->set_raw_kind_specific_flags2(0);
code->set_is_crankshafted(crankshafted);
code->set_deoptimization_data(*empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_raw_type_feedback_info(Smi::FromInt(0));
- code->set_next_code_link(*undefined_value());
+ code->set_next_code_link(*undefined_value(), SKIP_WRITE_BARRIER);
code->set_handler_table(*empty_fixed_array(), SKIP_WRITE_BARRIER);
+ code->set_source_position_table(*empty_byte_array(), SKIP_WRITE_BARRIER);
code->set_prologue_offset(prologue_offset);
code->set_constant_pool_offset(desc.instr_size - desc.constant_pool_size);
+ code->set_builtin_index(-1);
if (code->kind() == Code::OPTIMIZED_FUNCTION) {
code->set_marked_for_deoptimization(false);
@@ -1467,12 +1484,6 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
}
-Handle<Code> Factory::CopyCode(Handle<Code> code, Vector<byte> reloc_info) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->CopyCode(*code, reloc_info),
- Code);
-}
-
Handle<BytecodeArray> Factory::CopyBytecodeArray(
Handle<BytecodeArray> bytecode_array) {
CALL_HEAP_FUNCTION(isolate(),
@@ -1508,19 +1519,6 @@ Handle<JSObject> Factory::NewJSObjectWithNullProto() {
return result;
}
-Handle<JSModule> Factory::NewJSModule(Handle<Context> context,
- Handle<ScopeInfo> scope_info) {
- // Allocate a fresh map. Modules do not have a prototype.
- Handle<Map> map = NewMap(JS_MODULE_TYPE, JSModule::kSize);
- // Allocate the object based on the map.
- Handle<JSModule> module =
- Handle<JSModule>::cast(NewJSObjectFromMap(map, TENURED));
- module->set_context(*context);
- module->set_scope_info(*scope_info);
- return module;
-}
-
-
Handle<JSGlobalObject> Factory::NewJSGlobalObject(
Handle<JSFunction> constructor) {
DCHECK(constructor->has_initial_map());
@@ -1668,7 +1666,7 @@ void Factory::NewJSArrayStorage(Handle<JSArray> array,
Handle<JSGeneratorObject> Factory::NewJSGeneratorObject(
Handle<JSFunction> function) {
- DCHECK(function->shared()->is_generator());
+ DCHECK(function->shared()->is_resumable());
JSFunction::EnsureHasInitialMap(function);
Handle<Map> map(function->initial_map());
DCHECK_EQ(JS_GENERATOR_OBJECT_TYPE, map->instance_type());
@@ -1968,13 +1966,9 @@ MaybeHandle<JSBoundFunction> Factory::NewJSBoundFunction(
}
// Setup the map for the JSBoundFunction instance.
- Handle<Map> map = handle(
- target_function->IsConstructor()
- ? isolate()->native_context()->bound_function_with_constructor_map()
- : isolate()
- ->native_context()
- ->bound_function_without_constructor_map(),
- isolate());
+ Handle<Map> map = target_function->IsConstructor()
+ ? isolate()->bound_function_with_constructor_map()
+ : isolate()->bound_function_without_constructor_map();
if (map->prototype() != *prototype) {
map = Map::TransitionToPrototype(map, prototype, REGULAR_PROTOTYPE);
}
@@ -1986,8 +1980,6 @@ MaybeHandle<JSBoundFunction> Factory::NewJSBoundFunction(
result->set_bound_target_function(*target_function);
result->set_bound_this(*bound_this);
result->set_bound_arguments(*bound_arguments);
- result->set_length(Smi::FromInt(0));
- result->set_name(*undefined_value(), SKIP_WRITE_BARRIER);
return result;
}
@@ -2006,7 +1998,7 @@ Handle<JSProxy> Factory::NewJSProxy(Handle<JSReceiver> target,
} else {
map = Handle<Map>(isolate()->proxy_map());
}
- DCHECK(map->prototype()->IsNull());
+ DCHECK(map->prototype()->IsNull(isolate()));
Handle<JSProxy> result = New<JSProxy>(map, NEW_SPACE);
result->initialize_properties();
result->set_target(*target);
@@ -2078,7 +2070,6 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
shared->set_num_literals(number_of_literals);
if (IsGeneratorFunction(kind)) {
shared->set_instance_class_name(isolate()->heap()->Generator_string());
- shared->DisableOptimization(kGenerator);
}
return shared;
}
@@ -2124,7 +2115,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
Handle<Code> construct_stub =
is_constructor ? isolate()->builtins()->JSConstructStubGeneric()
: isolate()->builtins()->ConstructedNonConstructable();
- share->set_construct_stub(*construct_stub);
+ share->SetConstructStub(*construct_stub);
share->set_instance_class_name(*Object_string());
share->set_function_data(*undefined_value(), SKIP_WRITE_BARRIER);
share->set_script(*undefined_value(), SKIP_WRITE_BARRIER);
@@ -2133,9 +2124,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
StaticFeedbackVectorSpec empty_spec;
Handle<TypeFeedbackMetadata> feedback_metadata =
TypeFeedbackMetadata::New(isolate(), &empty_spec);
- Handle<TypeFeedbackVector> feedback_vector =
- TypeFeedbackVector::New(isolate(), feedback_metadata);
- share->set_feedback_vector(*feedback_vector, SKIP_WRITE_BARRIER);
+ share->set_feedback_metadata(*feedback_metadata, SKIP_WRITE_BARRIER);
#if TRACE_MAPS
share->set_unique_id(isolate()->GetNextUniqueSharedFunctionInfoId());
#endif
@@ -2170,9 +2159,8 @@ static inline int NumberCacheHash(Handle<FixedArray> cache,
if (number->IsSmi()) {
return Handle<Smi>::cast(number)->value() & mask;
} else {
- DoubleRepresentation rep(number->Number());
- return
- (static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32)) & mask;
+ int64_t bits = bit_cast<int64_t>(number->Number());
+ return (static_cast<int>(bits) ^ static_cast<int>(bits >> 32)) & mask;
}
}
@@ -2211,7 +2199,7 @@ Handle<String> Factory::NumberToString(Handle<Object> number,
isolate()->counters()->number_to_string_runtime()->Increment();
if (check_number_string_cache) {
Handle<Object> cached = GetNumberStringCache(number);
- if (!cached->IsUndefined()) return Handle<String>::cast(cached);
+ if (!cached->IsUndefined(isolate())) return Handle<String>::cast(cached);
}
char arr[100];
@@ -2240,20 +2228,20 @@ Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
Handle<FixedArray> break_points(
NewFixedArray(DebugInfo::kEstimatedNofBreakPointsInFunction));
+ // Make a copy of the bytecode array if available.
+ Handle<Object> maybe_debug_bytecode_array = undefined_value();
+ if (shared->HasBytecodeArray()) {
+ Handle<BytecodeArray> original(shared->bytecode_array());
+ maybe_debug_bytecode_array = CopyBytecodeArray(original);
+ }
+
// Create and set up the debug info object. Debug info contains function, a
// copy of the original code, the executing code and initial fixed array for
// active break points.
Handle<DebugInfo> debug_info =
Handle<DebugInfo>::cast(NewStruct(DEBUG_INFO_TYPE));
debug_info->set_shared(*shared);
- if (shared->HasBytecodeArray()) {
- // Create a copy for debugging.
- Handle<BytecodeArray> original(shared->bytecode_array(), isolate());
- Handle<BytecodeArray> copy = CopyBytecodeArray(original);
- debug_info->set_abstract_code(AbstractCode::cast(*copy));
- } else {
- debug_info->set_abstract_code(AbstractCode::cast(shared->code()));
- }
+ debug_info->set_debug_bytecode_array(*maybe_debug_bytecode_array);
debug_info->set_break_points(*break_points);
// Link debug info to function.
@@ -2312,7 +2300,7 @@ Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> context,
int cache_index = number_of_properties - 1;
Handle<Object> maybe_cache(context->map_cache(), isolate());
- if (maybe_cache->IsUndefined()) {
+ if (maybe_cache->IsUndefined(isolate())) {
// Allocate the new map cache for the native context.
maybe_cache = NewFixedArray(kMapCacheSize, TENURED);
context->set_map_cache(*maybe_cache);
@@ -2368,6 +2356,7 @@ void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp,
store->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::FromInt(0));
store->set(JSRegExp::kIrregexpCaptureCountIndex,
Smi::FromInt(capture_count));
+ store->set(JSRegExp::kIrregexpCaptureNameMapIndex, uninitialized);
regexp->set_data(*store);
}
@@ -2384,6 +2373,134 @@ Handle<Object> Factory::ToBoolean(bool value) {
return value ? true_value() : false_value();
}
+Handle<String> Factory::ToPrimitiveHintString(ToPrimitiveHint hint) {
+ switch (hint) {
+ case ToPrimitiveHint::kDefault:
+ return default_string();
+ case ToPrimitiveHint::kNumber:
+ return number_string();
+ case ToPrimitiveHint::kString:
+ return string_string();
+ }
+ UNREACHABLE();
+ return Handle<String>::null();
+}
+
+Handle<Map> Factory::CreateSloppyFunctionMap(FunctionMode function_mode) {
+ Handle<Map> map = NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
+ SetFunctionInstanceDescriptor(map, function_mode);
+ map->set_is_constructor(IsFunctionModeWithPrototype(function_mode));
+ map->set_is_callable();
+ return map;
+}
+
+void Factory::SetFunctionInstanceDescriptor(Handle<Map> map,
+ FunctionMode function_mode) {
+ int size = IsFunctionModeWithPrototype(function_mode) ? 5 : 4;
+ Map::EnsureDescriptorSlack(map, size);
+
+ PropertyAttributes ro_attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+ PropertyAttributes roc_attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
+
+ STATIC_ASSERT(JSFunction::kLengthDescriptorIndex == 0);
+ Handle<AccessorInfo> length =
+ Accessors::FunctionLengthInfo(isolate(), roc_attribs);
+ { // Add length.
+ AccessorConstantDescriptor d(Handle<Name>(Name::cast(length->name())),
+ length, roc_attribs);
+ map->AppendDescriptor(&d);
+ }
+
+ STATIC_ASSERT(JSFunction::kNameDescriptorIndex == 1);
+ Handle<AccessorInfo> name =
+ Accessors::FunctionNameInfo(isolate(), ro_attribs);
+ { // Add name.
+ AccessorConstantDescriptor d(Handle<Name>(Name::cast(name->name())), name,
+ roc_attribs);
+ map->AppendDescriptor(&d);
+ }
+ Handle<AccessorInfo> args =
+ Accessors::FunctionArgumentsInfo(isolate(), ro_attribs);
+ { // Add arguments.
+ AccessorConstantDescriptor d(Handle<Name>(Name::cast(args->name())), args,
+ ro_attribs);
+ map->AppendDescriptor(&d);
+ }
+ Handle<AccessorInfo> caller =
+ Accessors::FunctionCallerInfo(isolate(), ro_attribs);
+ { // Add caller.
+ AccessorConstantDescriptor d(Handle<Name>(Name::cast(caller->name())),
+ caller, ro_attribs);
+ map->AppendDescriptor(&d);
+ }
+ if (IsFunctionModeWithPrototype(function_mode)) {
+ if (function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE) {
+ ro_attribs = static_cast<PropertyAttributes>(ro_attribs & ~READ_ONLY);
+ }
+ Handle<AccessorInfo> prototype =
+ Accessors::FunctionPrototypeInfo(isolate(), ro_attribs);
+ AccessorConstantDescriptor d(Handle<Name>(Name::cast(prototype->name())),
+ prototype, ro_attribs);
+ map->AppendDescriptor(&d);
+ }
+}
+
+Handle<Map> Factory::CreateStrictFunctionMap(
+ FunctionMode function_mode, Handle<JSFunction> empty_function) {
+ Handle<Map> map = NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
+ SetStrictFunctionInstanceDescriptor(map, function_mode);
+ map->set_is_constructor(IsFunctionModeWithPrototype(function_mode));
+ map->set_is_callable();
+ Map::SetPrototype(map, empty_function);
+ return map;
+}
+
+void Factory::SetStrictFunctionInstanceDescriptor(Handle<Map> map,
+ FunctionMode function_mode) {
+ int size = IsFunctionModeWithPrototype(function_mode) ? 3 : 2;
+ Map::EnsureDescriptorSlack(map, size);
+
+ PropertyAttributes rw_attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
+ PropertyAttributes ro_attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+ PropertyAttributes roc_attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
+
+ DCHECK(function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE ||
+ function_mode == FUNCTION_WITH_READONLY_PROTOTYPE ||
+ function_mode == FUNCTION_WITHOUT_PROTOTYPE);
+ STATIC_ASSERT(JSFunction::kLengthDescriptorIndex == 0);
+ { // Add length.
+ Handle<AccessorInfo> length =
+ Accessors::FunctionLengthInfo(isolate(), roc_attribs);
+ AccessorConstantDescriptor d(handle(Name::cast(length->name())), length,
+ roc_attribs);
+ map->AppendDescriptor(&d);
+ }
+
+ STATIC_ASSERT(JSFunction::kNameDescriptorIndex == 1);
+ { // Add name.
+ Handle<AccessorInfo> name =
+ Accessors::FunctionNameInfo(isolate(), roc_attribs);
+ AccessorConstantDescriptor d(handle(Name::cast(name->name())), name,
+ roc_attribs);
+ map->AppendDescriptor(&d);
+ }
+ if (IsFunctionModeWithPrototype(function_mode)) {
+ // Add prototype.
+ PropertyAttributes attribs =
+ function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE ? rw_attribs
+ : ro_attribs;
+ Handle<AccessorInfo> prototype =
+ Accessors::FunctionPrototypeInfo(isolate(), attribs);
+ AccessorConstantDescriptor d(Handle<Name>(Name::cast(prototype->name())),
+ prototype, attribs);
+ map->AppendDescriptor(&d);
+ }
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 2fa2901437..4908d5fad8 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -12,12 +12,20 @@
namespace v8 {
namespace internal {
+enum FunctionMode {
+ // With prototype.
+ FUNCTION_WITH_WRITEABLE_PROTOTYPE,
+ FUNCTION_WITH_READONLY_PROTOTYPE,
+ // Without prototype.
+ FUNCTION_WITHOUT_PROTOTYPE
+};
+
// Interface for handle based allocation.
class Factory final {
public:
Handle<Oddball> NewOddball(Handle<Map> map, const char* to_string,
- Handle<Object> to_number, bool to_boolean,
- const char* type_of, byte kind);
+ Handle<Object> to_number, const char* type_of,
+ byte kind);
// Allocates a fixed array initialized with undefined values.
Handle<FixedArray> NewFixedArray(
@@ -163,6 +171,9 @@ class Factory final {
Vector<const uc16> str,
PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT MaybeHandle<String> NewStringFromTwoByte(
+ const ZoneVector<uc16>* str, PretenureFlag pretenure = NOT_TENURED);
+
// Allocates an internalized string in old space based on the character
// stream.
Handle<String> NewInternalizedStringFromUtf8(Vector<const char> str,
@@ -224,6 +235,10 @@ class Factory final {
const ExternalOneByteString::Resource* resource);
MUST_USE_RESULT MaybeHandle<String> NewExternalStringFromTwoByte(
const ExternalTwoByteString::Resource* resource);
+ // Create a new external string object for one-byte encoded native script.
+ // It does not cache the resource data pointer.
+ Handle<ExternalOneByteString> NewNativeSourceString(
+ const ExternalOneByteString::Resource* resource);
// Create a symbol.
Handle<Symbol> NewSymbol();
@@ -270,8 +285,6 @@ class Factory final {
// the old generation).
Handle<Struct> NewStruct(InstanceType type);
- Handle<CodeCache> NewCodeCache();
-
Handle<AliasedArgumentsEntry> NewAliasedArgumentsEntry(
int aliased_context_slot);
@@ -367,6 +380,16 @@ class Factory final {
}
return NewNumber(static_cast<double>(value), pretenure);
}
+ Handle<Object> NewNumberFromInt64(int64_t value,
+ PretenureFlag pretenure = NOT_TENURED) {
+ if (value <= std::numeric_limits<int32_t>::max() &&
+ value >= std::numeric_limits<int32_t>::min() &&
+ Smi::IsValid(static_cast<int32_t>(value))) {
+ return Handle<Object>(Smi::FromInt(static_cast<int32_t>(value)),
+ isolate());
+ }
+ return NewNumber(static_cast<double>(value), pretenure);
+ }
Handle<HeapNumber> NewHeapNumber(double value,
MutableMode mode = IMMUTABLE,
PretenureFlag pretenure = NOT_TENURED);
@@ -407,10 +430,6 @@ class Factory final {
PretenureFlag pretenure = NOT_TENURED,
Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null());
- // JS modules are pretenured.
- Handle<JSModule> NewJSModule(Handle<Context> context,
- Handle<ScopeInfo> scope_info);
-
// JS arrays are pretenured when allocated by the parser.
// Create a JSArray with a specified length and elements initialized
@@ -501,9 +520,12 @@ class Factory final {
Handle<JSGlobalProxy> NewUninitializedJSGlobalProxy();
+ Handle<JSFunction> NewFunction(Handle<Map> map,
+ Handle<SharedFunctionInfo> info,
+ Handle<Object> context_or_undefined,
+ PretenureFlag pretenure = TENURED);
Handle<JSFunction> NewFunction(Handle<String> name, Handle<Code> code,
Handle<Object> prototype,
- bool read_only_prototype = false,
bool is_strict = false);
Handle<JSFunction> NewFunction(Handle<String> name);
Handle<JSFunction> NewFunctionWithoutPrototype(Handle<String> name,
@@ -512,7 +534,7 @@ class Factory final {
Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
Handle<Map> initial_map, Handle<SharedFunctionInfo> function_info,
- Handle<Context> context, PretenureFlag pretenure = TENURED);
+ Handle<Object> context_or_undefined, PretenureFlag pretenure = TENURED);
Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> function_info, Handle<Context> context,
@@ -521,8 +543,6 @@ class Factory final {
Handle<JSFunction> NewFunction(Handle<String> name, Handle<Code> code,
Handle<Object> prototype, InstanceType type,
int instance_size,
- bool read_only_prototype = false,
- bool install_constructor = false,
bool is_strict = false);
Handle<JSFunction> NewFunction(Handle<String> name,
Handle<Code> code,
@@ -550,8 +570,6 @@ class Factory final {
Handle<Code> CopyCode(Handle<Code> code);
- Handle<Code> CopyCode(Handle<Code> code, Vector<byte> reloc_info);
-
Handle<BytecodeArray> CopyBytecodeArray(Handle<BytecodeArray>);
// Interface for creating error objects.
@@ -562,6 +580,11 @@ class Factory final {
return NewRangeError(MessageTemplate::kInvalidStringLength);
}
+ Handle<Object> NewURIError() {
+ return NewError(isolate()->uri_error_function(),
+ MessageTemplate::kURIMalformed);
+ }
+
Handle<Object> NewError(Handle<JSFunction> constructor,
MessageTemplate::Template template_index,
Handle<Object> arg0 = Handle<Object>(),
@@ -639,6 +662,16 @@ class Factory final {
MaybeHandle<Code> code,
bool is_constructor);
+ static bool IsFunctionModeWithPrototype(FunctionMode function_mode) {
+ return (function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE ||
+ function_mode == FUNCTION_WITH_READONLY_PROTOTYPE);
+ }
+
+ Handle<Map> CreateSloppyFunctionMap(FunctionMode function_mode);
+
+ Handle<Map> CreateStrictFunctionMap(FunctionMode function_mode,
+ Handle<JSFunction> empty_function);
+
// Allocates a new JSMessageObject object.
Handle<JSMessageObject> NewJSMessageObject(MessageTemplate::Template message,
Handle<Object> argument,
@@ -679,6 +712,9 @@ class Factory final {
// Converts the given boolean condition to JavaScript boolean value.
Handle<Object> ToBoolean(bool value);
+ // Converts the given ToPrimitive hint to it's string representation.
+ Handle<String> ToPrimitiveHintString(ToPrimitiveHint hint);
+
private:
Isolate* isolate() { return reinterpret_cast<Isolate*>(this); }
@@ -693,6 +729,9 @@ class Factory final {
AllocationSpace space,
Handle<AllocationSite> allocation_site);
+ MaybeHandle<String> NewStringFromTwoByte(const uc16* string, int length,
+ PretenureFlag pretenure);
+
// Creates a code object that is not yet fully initialized yet.
inline Handle<Code> NewCodeRaw(int object_size, bool immovable);
@@ -703,15 +742,15 @@ class Factory final {
// Update the cache with a new number-string pair.
void SetNumberStringCache(Handle<Object> number, Handle<String> string);
- // Creates a function initialized with a shared part.
- Handle<JSFunction> NewFunction(Handle<Map> map,
- Handle<SharedFunctionInfo> info,
- Handle<Context> context,
- PretenureFlag pretenure = TENURED);
-
// Create a JSArray with no elements and no length.
Handle<JSArray> NewJSArray(ElementsKind elements_kind,
PretenureFlag pretenure = NOT_TENURED);
+
+ void SetFunctionInstanceDescriptor(Handle<Map> map,
+ FunctionMode function_mode);
+
+ void SetStrictFunctionInstanceDescriptor(Handle<Map> map,
+ FunctionMode function_mode);
};
} // namespace internal
diff --git a/deps/v8/src/fast-accessor-assembler.cc b/deps/v8/src/fast-accessor-assembler.cc
index cd2910ca13..ebaab9a529 100644
--- a/deps/v8/src/fast-accessor-assembler.cc
+++ b/deps/v8/src/fast-accessor-assembler.cc
@@ -5,12 +5,12 @@
#include "src/fast-accessor-assembler.h"
#include "src/base/logging.h"
+#include "src/code-stub-assembler.h"
#include "src/code-stubs.h" // For CallApiCallbackStub.
-#include "src/compiler/code-stub-assembler.h"
#include "src/handles-inl.h"
#include "src/objects.h" // For FAA::LoadInternalField impl.
-using v8::internal::compiler::CodeStubAssembler;
+using v8::internal::CodeStubAssembler;
using v8::internal::compiler::Node;
namespace v8 {
@@ -43,40 +43,23 @@ FastAccessorAssembler::ValueId FastAccessorAssembler::LoadInternalField(
ValueId value, int field_no) {
CHECK_EQ(kBuilding, state_);
- // Determine the 'value' object's instance type.
- Node* object_map = assembler_->LoadObjectField(
- FromId(value), Internals::kHeapObjectMapOffset, MachineType::Pointer());
- Node* instance_type = assembler_->WordAnd(
- assembler_->LoadObjectField(object_map,
- Internals::kMapInstanceTypeAndBitFieldOffset,
- MachineType::Uint16()),
- assembler_->IntPtrConstant(0xff));
-
- // Check whether we have a proper JSObject.
CodeStubAssembler::Variable result(assembler_.get(),
MachineRepresentation::kTagged);
- CodeStubAssembler::Label is_jsobject(assembler_.get());
- CodeStubAssembler::Label is_not_jsobject(assembler_.get());
+ LabelId is_not_jsobject = MakeLabel();
CodeStubAssembler::Label merge(assembler_.get(), &result);
- assembler_->Branch(
- assembler_->WordEqual(
- instance_type, assembler_->IntPtrConstant(Internals::kJSObjectType)),
- &is_jsobject, &is_not_jsobject);
- // JSObject? Then load the internal field field_no.
- assembler_->Bind(&is_jsobject);
+ CheckIsJSObjectOrJump(value, is_not_jsobject);
+
Node* internal_field = assembler_->LoadObjectField(
FromId(value), JSObject::kHeaderSize + kPointerSize * field_no,
MachineType::Pointer());
+
result.Bind(internal_field);
assembler_->Goto(&merge);
- // No JSObject? Return undefined.
- // TODO(vogelheim): Check whether this is the appropriate action, or whether
- // the method should take a label instead.
- assembler_->Bind(&is_not_jsobject);
- Node* fail_value = assembler_->UndefinedConstant();
- result.Bind(fail_value);
+ // Return null, mimicking the C++ counterpart.
+ SetLabel(is_not_jsobject);
+ result.Bind(assembler_->NullConstant());
assembler_->Goto(&merge);
// Return.
@@ -84,6 +67,31 @@ FastAccessorAssembler::ValueId FastAccessorAssembler::LoadInternalField(
return FromRaw(result.value());
}
+FastAccessorAssembler::ValueId
+FastAccessorAssembler::LoadInternalFieldUnchecked(ValueId value, int field_no) {
+ CHECK_EQ(kBuilding, state_);
+
+ // Defensive debug checks.
+ if (FLAG_debug_code) {
+ LabelId is_jsobject = MakeLabel();
+ LabelId is_not_jsobject = MakeLabel();
+ CheckIsJSObjectOrJump(value, is_not_jsobject);
+ assembler_->Goto(FromId(is_jsobject));
+
+ SetLabel(is_not_jsobject);
+ assembler_->DebugBreak();
+ assembler_->Goto(FromId(is_jsobject));
+
+ SetLabel(is_jsobject);
+ }
+
+ Node* result = assembler_->LoadObjectField(
+ FromId(value), JSObject::kHeaderSize + kPointerSize * field_no,
+ MachineType::Pointer());
+
+ return FromRaw(result);
+}
+
FastAccessorAssembler::ValueId FastAccessorAssembler::LoadValue(ValueId value,
int offset) {
CHECK_EQ(kBuilding, state_);
@@ -100,6 +108,11 @@ FastAccessorAssembler::ValueId FastAccessorAssembler::LoadObject(ValueId value,
0, MachineType::AnyTagged()));
}
+FastAccessorAssembler::ValueId FastAccessorAssembler::ToSmi(ValueId value) {
+ CHECK_EQ(kBuilding, state_);
+ return FromRaw(assembler_->SmiTag(FromId(value)));
+}
+
void FastAccessorAssembler::ReturnValue(ValueId value) {
CHECK_EQ(kBuilding, state_);
assembler_->Return(FromId(value));
@@ -113,7 +126,7 @@ void FastAccessorAssembler::CheckFlagSetOrReturnNull(ValueId value, int mask) {
assembler_->Word32Equal(
assembler_->Word32And(FromId(value), assembler_->Int32Constant(mask)),
assembler_->Int32Constant(0)),
- &pass, &fail);
+ &fail, &pass);
assembler_->Bind(&fail);
assembler_->Return(assembler_->NullConstant());
assembler_->Bind(&pass);
@@ -141,13 +154,18 @@ void FastAccessorAssembler::SetLabel(LabelId label_id) {
assembler_->Bind(FromId(label_id));
}
+void FastAccessorAssembler::Goto(LabelId label_id) {
+ CHECK_EQ(kBuilding, state_);
+ assembler_->Goto(FromId(label_id));
+}
+
void FastAccessorAssembler::CheckNotZeroOrJump(ValueId value_id,
LabelId label_id) {
CHECK_EQ(kBuilding, state_);
CodeStubAssembler::Label pass(assembler_.get());
assembler_->Branch(
assembler_->WordEqual(FromId(value_id), assembler_->IntPtrConstant(0)),
- &pass, FromId(label_id));
+ FromId(label_id), &pass);
assembler_->Bind(&pass);
}
@@ -161,23 +179,23 @@ FastAccessorAssembler::ValueId FastAccessorAssembler::Call(
ExternalReference::DIRECT_API_CALL, isolate());
// Create & call API callback via stub.
- CallApiCallbackStub stub(isolate(), 1, true);
+ CallApiCallbackStub stub(isolate(), 1, true, true);
DCHECK_EQ(5, stub.GetCallInterfaceDescriptor().GetParameterCount());
DCHECK_EQ(1, stub.GetCallInterfaceDescriptor().GetStackParameterCount());
// TODO(vogelheim): There is currently no clean way to retrieve the context
// parameter for a stub and the implementation details are hidden in
// compiler/*. The context_paramter is computed as:
// Linkage::GetJSCallContextParamIndex(descriptor->JSParameterCount())
- const int context_parameter = 2;
+ const int context_parameter = 3;
Node* call = assembler_->CallStub(
stub.GetCallInterfaceDescriptor(),
assembler_->HeapConstant(stub.GetCode()),
assembler_->Parameter(context_parameter),
// Stub/register parameters:
- assembler_->Parameter(0), /* receiver (use accessor's) */
- assembler_->UndefinedConstant(), /* call_data (undefined) */
- assembler_->NullConstant(), /* holder (null) */
+ assembler_->UndefinedConstant(), /* callee (there's no JSFunction) */
+ assembler_->UndefinedConstant(), /* call_data (undefined) */
+ assembler_->Parameter(0), /* receiver (same as holder in this case) */
assembler_->ExternalConstant(callback), /* API callback function */
// JS arguments, on stack:
@@ -186,6 +204,40 @@ FastAccessorAssembler::ValueId FastAccessorAssembler::Call(
return FromRaw(call);
}
+void FastAccessorAssembler::CheckIsJSObjectOrJump(ValueId value_id,
+ LabelId label_id) {
+ CHECK_EQ(kBuilding, state_);
+
+ // Determine the 'value' object's instance type.
+ Node* object_map = assembler_->LoadObjectField(
+ FromId(value_id), Internals::kHeapObjectMapOffset,
+ MachineType::Pointer());
+
+ Node* instance_type = assembler_->WordAnd(
+ assembler_->LoadObjectField(object_map,
+ Internals::kMapInstanceTypeAndBitFieldOffset,
+ MachineType::Uint16()),
+ assembler_->IntPtrConstant(0xff));
+
+ CodeStubAssembler::Label is_jsobject(assembler_.get());
+
+ // Check whether we have a proper JSObject.
+ assembler_->GotoIf(
+ assembler_->WordEqual(
+ instance_type, assembler_->IntPtrConstant(Internals::kJSObjectType)),
+ &is_jsobject);
+
+ // JSApiObject?.
+ assembler_->GotoUnless(
+ assembler_->WordEqual(instance_type, assembler_->IntPtrConstant(
+ Internals::kJSApiObjectType)),
+ FromId(label_id));
+
+ // Continue.
+ assembler_->Goto(&is_jsobject);
+ assembler_->Bind(&is_jsobject);
+}
+
MaybeHandle<Code> FastAccessorAssembler::Build() {
CHECK_EQ(kBuilding, state_);
Handle<Code> code = assembler_->GenerateCode();
diff --git a/deps/v8/src/fast-accessor-assembler.h b/deps/v8/src/fast-accessor-assembler.h
index 57e72e8eb1..9468d8603a 100644
--- a/deps/v8/src/fast-accessor-assembler.h
+++ b/deps/v8/src/fast-accessor-assembler.h
@@ -6,15 +6,15 @@
#define V8_FAST_ACCESSOR_ASSEMBLER_H_
#include <stdint.h>
+#include <memory>
#include <vector>
#include "include/v8-experimental.h"
#include "src/base/macros.h"
-#include "src/base/smart-pointers.h"
#include "src/handles.h"
// For CodeStubAssembler::Label. (We cannot forward-declare inner classes.)
-#include "src/compiler/code-stub-assembler.h"
+#include "src/code-stub-assembler.h"
namespace v8 {
namespace internal {
@@ -54,15 +54,28 @@ class FastAccessorAssembler {
ValueId IntegerConstant(int int_constant);
ValueId GetReceiver();
ValueId LoadInternalField(ValueId value_id, int field_no);
+
+ // Loads internal field and assumes the object is indeed a valid API object
+ // with the proper internal fields present.
+ // The intended use is to call this on an object whose structure has already
+ // been checked previously, e.g. the accessor's receiver, which is map-checked
+ // before the fast accessor is called on it. Using this on an arbitrary object
+ // will result in unsafe memory accesses.
+ ValueId LoadInternalFieldUnchecked(ValueId value_id, int field_no);
+
ValueId LoadValue(ValueId value_id, int offset);
ValueId LoadObject(ValueId value_id, int offset);
+ // Converts a machine integer to a SMI.
+ ValueId ToSmi(ValueId value_id);
+
// Builder / assembler functions for control flow.
void ReturnValue(ValueId value_id);
void CheckFlagSetOrReturnNull(ValueId value_id, int mask);
void CheckNotZeroOrReturnNull(ValueId value_id);
LabelId MakeLabel();
void SetLabel(LabelId label_id);
+ void Goto(LabelId label_id);
void CheckNotZeroOrJump(ValueId value_id, LabelId label_id);
// C++ callback.
@@ -73,9 +86,11 @@ class FastAccessorAssembler {
private:
ValueId FromRaw(compiler::Node* node);
- LabelId FromRaw(compiler::CodeStubAssembler::Label* label);
+ LabelId FromRaw(CodeStubAssembler::Label* label);
compiler::Node* FromId(ValueId value) const;
- compiler::CodeStubAssembler::Label* FromId(LabelId value) const;
+ CodeStubAssembler::Label* FromId(LabelId value) const;
+
+ void CheckIsJSObjectOrJump(ValueId value, LabelId label_id);
void Clear();
Zone* zone() { return &zone_; }
@@ -83,13 +98,13 @@ class FastAccessorAssembler {
Zone zone_;
Isolate* isolate_;
- base::SmartPointer<compiler::CodeStubAssembler> assembler_;
+ std::unique_ptr<CodeStubAssembler> assembler_;
// To prevent exposing the RMA internals to the outside world, we'll map
// Node + Label pointers integers wrapped in ValueId and LabelId instances.
// These vectors maintain this mapping.
std::vector<compiler::Node*> nodes_;
- std::vector<compiler::CodeStubAssembler::Label*> labels_;
+ std::vector<CodeStubAssembler::Label*> labels_;
// Remember the current state for easy error checking. (We prefer to be
// strict as this class will be exposed at the API.)
diff --git a/deps/v8/src/field-index-inl.h b/deps/v8/src/field-index-inl.h
index 2e6693ce38..c2f25bb7f0 100644
--- a/deps/v8/src/field-index-inl.h
+++ b/deps/v8/src/field-index-inl.h
@@ -6,6 +6,7 @@
#define V8_FIELD_INDEX_INL_H_
#include "src/field-index.h"
+#include "src/ic/handler-configuration.h"
namespace v8 {
namespace internal {
@@ -39,8 +40,7 @@ inline FieldIndex FieldIndex::ForPropertyIndex(Map* map,
is_double, inobject_properties, first_inobject_offset);
}
-
-// Takes an index as computed by GetLoadFieldByIndex and reconstructs a
+// Takes an index as computed by GetLoadByFieldIndex and reconstructs a
// FieldIndex object from it.
inline FieldIndex FieldIndex::ForLoadByFieldIndex(Map* map, int orig_index) {
int field_index = orig_index;
@@ -85,6 +85,38 @@ inline int FieldIndex::GetLoadByFieldIndex() const {
return is_double() ? (result | 1) : result;
}
+// Takes an offset as computed by GetLoadByFieldOffset and reconstructs a
+// FieldIndex object from it.
+// static
+inline FieldIndex FieldIndex::ForLoadByFieldOffset(Map* map, int offset) {
+ DCHECK(LoadHandlerTypeBit::decode(offset) == kLoadICHandlerForProperties);
+ bool is_inobject = FieldOffsetIsInobject::decode(offset);
+ bool is_double = FieldOffsetIsDouble::decode(offset);
+ int field_index = FieldOffsetOffset::decode(offset) >> kPointerSizeLog2;
+ int first_inobject_offset = 0;
+ if (is_inobject) {
+ first_inobject_offset =
+ map->IsJSObjectMap() ? map->GetInObjectPropertyOffset(0) : 0;
+ } else {
+ first_inobject_offset = FixedArray::kHeaderSize;
+ }
+ int inobject_properties =
+ map->IsJSObjectMap() ? map->GetInObjectProperties() : 0;
+ FieldIndex result(is_inobject, field_index, is_double, inobject_properties,
+ first_inobject_offset);
+ DCHECK(result.GetLoadByFieldOffset() == offset);
+ return result;
+}
+
+// Returns the offset format consumed by TurboFan stubs:
+// (offset << 3) | (is_double << 2) | (is_inobject << 1) | is_property
+// Where |offset| is relative to object start or FixedArray start, respectively.
+inline int FieldIndex::GetLoadByFieldOffset() const {
+ return FieldOffsetIsInobject::encode(is_inobject()) |
+ FieldOffsetIsDouble::encode(is_double()) |
+ FieldOffsetOffset::encode(index() << kPointerSizeLog2) |
+ LoadHandlerTypeBit::encode(kLoadICHandlerForProperties);
+}
inline FieldIndex FieldIndex::ForDescriptor(Map* map, int descriptor_index) {
PropertyDetails details =
diff --git a/deps/v8/src/field-index.h b/deps/v8/src/field-index.h
index 2862d36bdb..404c0f613f 100644
--- a/deps/v8/src/field-index.h
+++ b/deps/v8/src/field-index.h
@@ -27,10 +27,12 @@ class FieldIndex final {
static FieldIndex ForInObjectOffset(int offset, Map* map = NULL);
static FieldIndex ForDescriptor(Map* map, int descriptor_index);
static FieldIndex ForLoadByFieldIndex(Map* map, int index);
+ static FieldIndex ForLoadByFieldOffset(Map* map, int index);
static FieldIndex ForKeyedLookupCacheIndex(Map* map, int index);
static FieldIndex FromFieldAccessStubKey(int key);
int GetLoadByFieldIndex() const;
+ int GetLoadByFieldOffset() const;
bool is_inobject() const {
return IsInObjectBits::decode(bit_field_);
@@ -74,6 +76,11 @@ class FieldIndex final {
(IsInObjectBits::kMask | IsDoubleBits::kMask | IndexBits::kMask);
}
+ bool operator==(FieldIndex const& other) const {
+ return bit_field_ == other.bit_field_;
+ }
+ bool operator!=(FieldIndex const& other) const { return !(*this == other); }
+
private:
FieldIndex(bool is_inobject, int local_index, bool is_double,
int inobject_properties, int first_inobject_property_offset,
diff --git a/deps/v8/src/field-type.cc b/deps/v8/src/field-type.cc
index 76d694c132..2e4cbfbedd 100644
--- a/deps/v8/src/field-type.cc
+++ b/deps/v8/src/field-type.cc
@@ -13,7 +13,9 @@ namespace internal {
// static
FieldType* FieldType::None() {
- return reinterpret_cast<FieldType*>(Smi::FromInt(0));
+ // Do not Smi::FromInt(0) here or for Any(), as that may translate
+ // as `nullptr` which is not a valid value for `this`.
+ return reinterpret_cast<FieldType*>(Smi::FromInt(2));
}
// static
@@ -70,7 +72,7 @@ bool FieldType::NowIs(FieldType* other) {
bool FieldType::NowIs(Handle<FieldType> other) { return NowIs(*other); }
Type* FieldType::Convert(Zone* zone) {
- if (IsAny()) return Type::Any();
+ if (IsAny()) return Type::NonInternal();
if (IsNone()) return Type::None();
DCHECK(IsClass());
return Type::Class(AsClass(), zone);
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 8450c0c0f9..e5ddbadd2c 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -172,9 +172,6 @@ struct MaybeBoolFlag {
//
#define FLAG FLAG_FULL
-DEFINE_BOOL(warn_template_set, true,
- "warn on deprecated v8::Template::Set() use")
-
DEFINE_BOOL(experimental_extras, false,
"enable code compiled in via v8_experimental_extra_library_files")
@@ -187,9 +184,9 @@ DEFINE_BOOL(harmony, false, "enable all completed harmony features")
DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
DEFINE_IMPLICATION(es_staging, harmony)
-DEFINE_BOOL(promise_extra, true, "additional V8 Promise functions")
-// Removing extra Promise functions is staged
-DEFINE_NEG_IMPLICATION(harmony, promise_extra)
+DEFINE_BOOL(intl_extra, false, "additional V8 Intl functions")
+// Removing extra Intl functions is shipped
+DEFINE_NEG_VALUE_IMPLICATION(harmony_shipping, intl_extra, true)
// Activate on ClusterFuzz.
DEFINE_IMPLICATION(es_staging, harmony_regexp_lookbehind)
@@ -198,37 +195,41 @@ DEFINE_IMPLICATION(es_staging, move_object_start)
// Features that are still work in progress (behind individual flags).
#define HARMONY_INPROGRESS(V) \
V(harmony_array_prototype_values, "harmony Array.prototype.values") \
- V(harmony_object_observe, "harmony Object.observe") \
V(harmony_function_sent, "harmony function.sent") \
V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
V(harmony_simd, "harmony simd") \
+ V(harmony_explicit_tailcalls, "harmony explicit tail calls") \
V(harmony_do_expressions, "harmony do-expressions") \
+ V(harmony_restrictive_generators, \
+ "harmony restrictions on generator declarations") \
+ V(harmony_regexp_named_captures, "harmony regexp named captures") \
V(harmony_regexp_property, "harmony unicode regexp property classes") \
- V(harmony_string_padding, "harmony String-padding methods")
+ V(harmony_for_in, "harmony for-in syntax") \
+ V(harmony_trailing_commas, \
+ "harmony trailing commas in function parameter lists")
// Features that are complete (but still behind --harmony/es-staging flag).
-#define HARMONY_STAGED(V) \
+#define HARMONY_STAGED_BASE(V) \
V(harmony_regexp_lookbehind, "harmony regexp lookbehind") \
V(harmony_tailcalls, "harmony tail calls") \
- V(harmony_object_values_entries, "harmony Object.values / Object.entries") \
- V(harmony_object_own_property_descriptors, \
- "harmony Object.getOwnPropertyDescriptors()") \
- V(harmony_exponentiation_operator, "harmony exponentiation operator `**`")
+ V(harmony_async_await, "harmony async-await") \
+ V(harmony_string_padding, "harmony String-padding methods")
+
+#ifdef V8_I18N_SUPPORT
+#define HARMONY_STAGED(V) \
+ HARMONY_STAGED_BASE(V) \
+ V(icu_case_mapping, "case mapping with ICU rather than Unibrow")
+#else
+#define HARMONY_STAGED(V) HARMONY_STAGED_BASE(V)
+#endif
// Features that are shipping (turned on by default, but internal flag remains).
-#define HARMONY_SHIPPING(V) \
- V(harmony_function_name, "harmony Function name inference") \
- V(harmony_instanceof, "harmony instanceof support") \
- V(harmony_iterator_close, "harmony iterator finalization") \
- V(harmony_unicode_regexps, "harmony unicode regexps") \
- V(harmony_regexp_exec, "harmony RegExp exec override behavior") \
- V(harmony_sloppy, "harmony features in sloppy mode") \
- V(harmony_sloppy_let, "harmony let in sloppy mode") \
- V(harmony_sloppy_function, "harmony sloppy function block scoping") \
- V(harmony_regexp_subclass, "harmony regexp subclassing") \
- V(harmony_restrictive_declarations, \
- "harmony limitations on sloppy mode function declarations") \
- V(harmony_species, "harmony Symbol.species")
+#define HARMONY_SHIPPING(V) \
+ V(harmony_restrictive_declarations, \
+ "harmony limitations on sloppy mode function declarations") \
+ V(harmony_object_values_entries, "harmony Object.values / Object.entries") \
+ V(harmony_object_own_property_descriptors, \
+ "harmony Object.getOwnPropertyDescriptors()")
// Once a shipping feature has proved stable in the wild, it will be dropped
// from HARMONY_SHIPPING, all occurrences of the FLAG_ variable are removed,
@@ -253,16 +254,14 @@ HARMONY_STAGED(FLAG_STAGED_FEATURES)
HARMONY_SHIPPING(FLAG_SHIPPING_FEATURES)
#undef FLAG_SHIPPING_FEATURES
-
-// Feature dependencies.
-DEFINE_IMPLICATION(harmony_sloppy_let, harmony_sloppy)
-DEFINE_IMPLICATION(harmony_sloppy_function, harmony_sloppy)
-
// Flags for experimental implementation features.
DEFINE_BOOL(compiled_keyed_generic_loads, false,
"use optimizing compiler to generate keyed generic load stubs")
DEFINE_BOOL(allocation_site_pretenuring, true,
"pretenure with allocation sites")
+DEFINE_BOOL(page_promotion, true, "promote pages based on utilization")
+DEFINE_INT(page_promotion_threshold, 70,
+ "min percentage of live bytes on a page to enable fast evacuation")
DEFINE_BOOL(trace_pretenuring, false,
"trace pretenuring decisions of HAllocate instructions")
DEFINE_BOOL(trace_pretenuring_statistics, false,
@@ -271,7 +270,6 @@ DEFINE_BOOL(track_fields, true, "track fields with only smi values")
DEFINE_BOOL(track_double_fields, true, "track fields with double values")
DEFINE_BOOL(track_heap_object_fields, true, "track fields with heap values")
DEFINE_BOOL(track_computed_fields, true, "track computed boilerplate fields")
-DEFINE_BOOL(harmony_instanceof_opt, true, "optimize ES6 instanceof support")
DEFINE_IMPLICATION(track_double_fields, track_fields)
DEFINE_IMPLICATION(track_heap_object_fields, track_fields)
DEFINE_IMPLICATION(track_computed_fields, track_fields)
@@ -293,14 +291,34 @@ DEFINE_BOOL(string_slices, true, "use string slices")
// Flags for Ignition.
DEFINE_BOOL(ignition, false, "use ignition interpreter")
-DEFINE_BOOL(ignition_eager, true, "eagerly compile and parse with ignition")
+DEFINE_BOOL(ignition_staging, false, "use ignition with all staged features")
+DEFINE_IMPLICATION(ignition_staging, ignition)
+DEFINE_IMPLICATION(ignition_staging, ignition_osr)
+DEFINE_IMPLICATION(ignition_staging, turbo_from_bytecode)
+DEFINE_IMPLICATION(ignition_staging, ignition_preserve_bytecode)
+DEFINE_BOOL(ignition_eager, false, "eagerly compile and parse with ignition")
DEFINE_STRING(ignition_filter, "*", "filter for ignition interpreter")
+DEFINE_BOOL(ignition_deadcode, true,
+ "use ignition dead code elimination optimizer")
+DEFINE_BOOL(ignition_osr, false, "enable support for OSR from ignition code")
+DEFINE_BOOL(ignition_peephole, true, "use ignition peephole optimizer")
+DEFINE_BOOL(ignition_reo, true, "use ignition register equivalence optimizer")
+DEFINE_BOOL(ignition_filter_expression_positions, true,
+ "filter expression positions before the bytecode pipeline")
+DEFINE_BOOL(ignition_preserve_bytecode, false,
+ "preserve generated bytecode even when switching tiers")
DEFINE_BOOL(print_bytecode, false,
"print bytecode generated by ignition interpreter")
DEFINE_BOOL(trace_ignition, false,
"trace the bytecodes executed by the ignition interpreter")
DEFINE_BOOL(trace_ignition_codegen, false,
"trace the codegen of ignition interpreter bytecode handlers")
+DEFINE_BOOL(trace_ignition_dispatches, false,
+ "traces the dispatches to bytecode handlers by the ignition "
+ "interpreter")
+DEFINE_STRING(trace_ignition_dispatches_output_file, nullptr,
+ "the file to which the bytecode handler dispatch table is "
+ "written (by default, the table is not written to a file)")
// Flags for Crankshaft.
DEFINE_BOOL(crankshaft, true, "use crankshaft")
@@ -337,6 +355,8 @@ DEFINE_STRING(trace_phase, "HLZ", "trace generated IR for specified phases")
DEFINE_BOOL(trace_inlining, false, "trace inlining decisions")
DEFINE_BOOL(trace_load_elimination, false, "trace load elimination")
DEFINE_BOOL(trace_store_elimination, false, "trace store elimination")
+DEFINE_BOOL(turbo_verify_store_elimination, false,
+ "verify store elimination more rigorously")
DEFINE_BOOL(trace_alloc, false, "trace register allocator")
DEFINE_BOOL(trace_all_uses, false, "trace all use positions")
DEFINE_BOOL(trace_range, false, "trace range analysis")
@@ -406,8 +426,8 @@ DEFINE_BOOL(omit_map_checks_for_leaf_maps, true,
// Flags for TurboFan.
DEFINE_BOOL(turbo, false, "enable TurboFan compiler")
DEFINE_IMPLICATION(turbo, turbo_asm_deoptimization)
-DEFINE_BOOL(turbo_shipping, true, "enable TurboFan compiler on subset")
-DEFINE_BOOL(turbo_greedy_regalloc, false, "use the greedy register allocator")
+DEFINE_IMPLICATION(turbo, turbo_loop_peeling)
+DEFINE_BOOL(turbo_from_bytecode, false, "enable building graphs from bytecode")
DEFINE_BOOL(turbo_sp_frame_access, false,
"use stack pointer-relative access to frame wherever possible")
DEFINE_BOOL(turbo_preprocess_ranges, true,
@@ -422,15 +442,20 @@ DEFINE_STRING(trace_turbo_cfg_file, NULL,
DEFINE_BOOL(trace_turbo_types, true, "trace TurboFan's types")
DEFINE_BOOL(trace_turbo_scheduler, false, "trace TurboFan's scheduler")
DEFINE_BOOL(trace_turbo_reduction, false, "trace TurboFan's various reducers")
+DEFINE_BOOL(trace_turbo_trimming, false, "trace TurboFan's graph trimmer")
DEFINE_BOOL(trace_turbo_jt, false, "trace TurboFan's jump threading")
DEFINE_BOOL(trace_turbo_ceq, false, "trace TurboFan's control equivalence")
+DEFINE_BOOL(trace_turbo_loop, false, "trace TurboFan's loop optimizations")
DEFINE_BOOL(turbo_asm, true, "enable TurboFan for asm.js code")
DEFINE_BOOL(turbo_asm_deoptimization, false,
"enable deoptimization in TurboFan for asm.js code")
DEFINE_BOOL(turbo_verify, DEBUG_BOOL, "verify TurboFan graphs at each phase")
DEFINE_BOOL(turbo_stats, false, "print TurboFan statistics")
+DEFINE_BOOL(turbo_stats_nvp, false,
+ "print TurboFan statistics in machine-readable format")
DEFINE_BOOL(turbo_splitting, true, "split nodes during scheduling in TurboFan")
-DEFINE_BOOL(turbo_types, true, "use typed lowering in TurboFan")
+DEFINE_BOOL(turbo_type_feedback, true,
+ "use typed feedback for representation inference in Turbofan")
DEFINE_BOOL(turbo_source_positions, false,
"track source code positions when building TurboFan IR")
DEFINE_IMPLICATION(trace_turbo, turbo_source_positions)
@@ -440,15 +465,19 @@ DEFINE_BOOL(native_context_specialization, true,
"enable native context specialization in TurboFan")
DEFINE_BOOL(turbo_inlining, true, "enable inlining in TurboFan")
DEFINE_BOOL(trace_turbo_inlining, false, "trace TurboFan inlining")
+DEFINE_BOOL(turbo_load_elimination, true, "enable load elimination in TurboFan")
+DEFINE_BOOL(trace_turbo_load_elimination, false,
+ "trace TurboFan load elimination")
DEFINE_BOOL(loop_assignment_analysis, true, "perform loop assignment analysis")
DEFINE_BOOL(turbo_profiling, false, "enable profiling in TurboFan")
DEFINE_BOOL(turbo_verify_allocation, DEBUG_BOOL,
"verify register allocation in TurboFan")
DEFINE_BOOL(turbo_move_optimization, true, "optimize gap moves in TurboFan")
DEFINE_BOOL(turbo_jt, true, "enable jump threading in TurboFan")
-DEFINE_BOOL(turbo_osr, true, "enable OSR in TurboFan")
DEFINE_BOOL(turbo_stress_loop_peeling, false,
"stress loop peeling optimization")
+DEFINE_BOOL(turbo_loop_peeling, false, "Turbofan loop peeling")
+DEFINE_BOOL(turbo_loop_variable, true, "Turbofan loop variable optimization")
DEFINE_BOOL(turbo_cf_optimization, true, "optimize control flow in TurboFan")
DEFINE_BOOL(turbo_frame_elision, true, "elide frames in TurboFan")
DEFINE_BOOL(turbo_cache_shared_code, true, "cache context-independent code")
@@ -458,13 +487,27 @@ DEFINE_BOOL(turbo_instruction_scheduling, false,
"enable instruction scheduling in TurboFan")
DEFINE_BOOL(turbo_stress_instruction_scheduling, false,
"randomly schedule instructions to stress dependency tracking")
+DEFINE_BOOL(turbo_store_elimination, false,
+ "enable store-store elimination in TurboFan")
+DEFINE_IMPLICATION(turbo, turbo_store_elimination)
+
+// Flags to help platform porters
+DEFINE_BOOL(minimal, false,
+ "simplifies execution model to make porting "
+ "easier (e.g. always use Ignition, never use Crankshaft")
+DEFINE_IMPLICATION(minimal, ignition)
+DEFINE_NEG_IMPLICATION(minimal, crankshaft)
+DEFINE_NEG_IMPLICATION(minimal, use_ic)
// Flags for native WebAssembly.
DEFINE_BOOL(expose_wasm, false, "expose WASM interface to JavaScript")
+DEFINE_INT(wasm_num_compilation_tasks, 10,
+ "number of parallel compilation tasks for wasm")
DEFINE_BOOL(trace_wasm_encoder, false, "trace encoding of wasm code")
DEFINE_BOOL(trace_wasm_decoder, false, "trace decoding of wasm code")
DEFINE_BOOL(trace_wasm_decode_time, false, "trace decoding time of wasm code")
DEFINE_BOOL(trace_wasm_compiler, false, "trace compiling of wasm code")
+DEFINE_BOOL(trace_wasm_interpreter, false, "trace interpretation of wasm code")
DEFINE_INT(trace_wasm_ast_start, 0,
"start function for WASM AST trace (inclusive)")
DEFINE_INT(trace_wasm_ast_end, 0, "end function for WASM AST trace (exclusive)")
@@ -474,7 +517,7 @@ DEFINE_BOOL(wasm_break_on_decoder_error, false,
DEFINE_BOOL(wasm_loop_assignment_analysis, true,
"perform loop assignment analysis for WASM")
-DEFINE_BOOL(enable_simd_asmjs, false, "enable SIMD.js in asm.js stdlib")
+DEFINE_BOOL(validate_asm, false, "validate asm.js modules before compiling")
DEFINE_BOOL(dump_wasm_module, false, "dump WASM module bytes")
DEFINE_STRING(dump_wasm_module_path, NULL, "directory to dump wasm modules to")
@@ -482,6 +525,11 @@ DEFINE_STRING(dump_wasm_module_path, NULL, "directory to dump wasm modules to")
DEFINE_INT(typed_array_max_size_in_heap, 64,
"threshold for in-heap typed array")
+DEFINE_BOOL(wasm_simd_prototype, false,
+ "enable prototype simd opcodes for wasm")
+DEFINE_BOOL(wasm_eh_prototype, false,
+ "enable prototype exception handling opcodes for wasm")
+
// Profiler flags.
DEFINE_INT(frame_count, 1, "number of stack frames inspected by the profiler")
// 0x1800 fits in the immediate field of an ARM instruction.
@@ -521,13 +569,9 @@ DEFINE_BOOL(enable_neon, ENABLE_NEON_DEFAULT,
"enable use of NEON instructions if available (ARM only)")
DEFINE_BOOL(enable_sudiv, true,
"enable use of SDIV and UDIV instructions if available (ARM only)")
-DEFINE_BOOL(enable_mls, true,
- "enable use of MLS instructions if available (ARM only)")
DEFINE_BOOL(enable_movw_movt, false,
"enable loading 32-bit constant by means of movw/movt "
"instruction pairs (ARM only)")
-DEFINE_BOOL(enable_unaligned_accesses, true,
- "enable unaligned accesses for ARMv7 (ARM only)")
DEFINE_BOOL(enable_32dregs, ENABLE_32DREGS_DEFAULT,
"enable use of d16-d31 registers on ARM - this requires VFP3")
DEFINE_BOOL(enable_vldr_imm, false,
@@ -536,11 +580,14 @@ DEFINE_BOOL(force_long_branches, false,
"force all emitted branches to be in long mode (MIPS/PPC only)")
DEFINE_STRING(mcpu, "auto", "enable optimization for specific cpu")
+// regexp-macro-assembler-*.cc
+DEFINE_BOOL(enable_regexp_unaligned_accesses, true,
+ "enable unaligned accesses for the regexp engine")
+
DEFINE_IMPLICATION(enable_armv8, enable_vfp3)
DEFINE_IMPLICATION(enable_armv8, enable_neon)
DEFINE_IMPLICATION(enable_armv8, enable_32dregs)
DEFINE_IMPLICATION(enable_armv8, enable_sudiv)
-DEFINE_IMPLICATION(enable_armv8, enable_mls)
// bootstrapper.cc
DEFINE_STRING(expose_natives_as, NULL, "expose natives in global object")
@@ -556,7 +603,10 @@ DEFINE_BOOL(expose_trigger_failure, false, "expose trigger-failure extension")
DEFINE_INT(stack_trace_limit, 10, "number of stack frames to capture")
DEFINE_BOOL(builtins_in_stack_traces, false,
"show built-in functions in stack traces")
-DEFINE_BOOL(disable_native_files, false, "disable builtin natives files")
+
+// builtins.cc
+DEFINE_BOOL(allow_unsafe_function_constructor, false,
+ "allow invoking the function constructor without security checks")
// builtins-ia32.cc
DEFINE_BOOL(inline_new, true, "use fast inline allocation")
@@ -572,6 +622,8 @@ DEFINE_BOOL(mask_constants_with_cookie, true,
DEFINE_BOOL(lazy, true, "use lazy compilation")
DEFINE_BOOL(trace_opt, false, "trace lazy optimization")
DEFINE_BOOL(trace_opt_stats, false, "trace lazy optimization statistics")
+DEFINE_BOOL(trace_file_names, false,
+ "include file names in trace-opt/trace-deopt output")
DEFINE_BOOL(opt, true, "use adaptive optimizations")
DEFINE_BOOL(always_opt, false, "always try to optimize functions")
DEFINE_BOOL(always_osr, false, "always try to OSR functions")
@@ -684,12 +736,14 @@ DEFINE_BOOL(age_code, true,
"track un-executed functions to age code and flush only "
"old code (required for code flushing)")
DEFINE_BOOL(incremental_marking, true, "use incremental marking")
+DEFINE_BOOL(incremental_marking_wrappers, true,
+ "use incremental marking for marking wrappers")
DEFINE_INT(min_progress_during_incremental_marking_finalization, 32,
"keep finalizing incremental marking as long as we discover at "
"least this many unmarked objects")
DEFINE_INT(max_incremental_marking_finalization_rounds, 3,
"at most try this many times to finalize incremental marking")
-DEFINE_BOOL(black_allocation, true, "use black allocation")
+DEFINE_BOOL(black_allocation, false, "use black allocation")
DEFINE_BOOL(concurrent_sweeping, true, "use concurrent sweeping")
DEFINE_BOOL(parallel_compaction, true, "use parallel compaction")
DEFINE_BOOL(parallel_pointer_update, true,
@@ -701,6 +755,7 @@ DEFINE_BOOL(track_gc_object_stats, false,
DEFINE_BOOL(trace_gc_object_stats, false,
"trace object counts and memory usage")
DEFINE_IMPLICATION(trace_gc_object_stats, track_gc_object_stats)
+DEFINE_NEG_IMPLICATION(trace_gc_object_stats, incremental_marking)
DEFINE_BOOL(track_detached_contexts, true,
"track native contexts that are expected to be garbage collected")
DEFINE_BOOL(trace_detached_contexts, false,
@@ -716,6 +771,10 @@ DEFINE_BOOL(scavenge_reclaim_unmodified_objects, true,
DEFINE_INT(heap_growing_percent, 0,
"specifies heap growing factor as (1 + heap_growing_percent/100)")
+// execution.cc, messages.cc
+DEFINE_BOOL(clear_exceptions_on_js_entry, false,
+ "clear pending exceptions when entering JavaScript")
+
// counters.cc
DEFINE_INT(histogram_interval, 600000,
"time interval in ms for aggregating memory histograms")
@@ -740,6 +799,7 @@ DEFINE_BOOL(use_idle_notification, true,
// ic.cc
DEFINE_BOOL(use_ic, true, "use inline caching")
DEFINE_BOOL(trace_ic, false, "trace inline cache state transitions")
+DEFINE_BOOL(tf_load_ic_stub, true, "use TF LoadIC stub")
// macro-assembler-ia32.cc
DEFINE_BOOL(native_code_counters, false,
@@ -764,13 +824,8 @@ DEFINE_INT(random_seed, 0,
// objects.cc
DEFINE_BOOL(trace_weak_arrays, false, "Trace WeakFixedArray usage")
-DEFINE_BOOL(track_prototype_users, false,
- "Keep track of which maps refer to a given prototype object")
DEFINE_BOOL(trace_prototype_users, false,
"Trace updates to prototype user tracking")
-DEFINE_BOOL(eliminate_prototype_chain_checks, true,
- "Collapse prototype chain checks into single-cell checks")
-DEFINE_IMPLICATION(eliminate_prototype_chain_checks, track_prototype_users)
DEFINE_BOOL(use_verbose_printer, true, "allows verbose printing")
DEFINE_BOOL(trace_for_in_enumerate, false, "Trace for-in enumerate slow-paths")
#if TRACE_MAPS
@@ -819,6 +874,7 @@ DEFINE_BOOL(randomize_hashes, true,
DEFINE_INT(hash_seed, 0,
"Fixed seed to use to hash property keys (0 means random)"
"(with snapshots this option cannot override the baked-in seed)")
+DEFINE_BOOL(trace_rail, false, "trace RAIL mode")
// runtime.cc
DEFINE_BOOL(runtime_call_stats, false, "report runtime call counts and times")
@@ -839,13 +895,6 @@ DEFINE_INT(testing_int_flag, 13, "testing_int_flag")
DEFINE_FLOAT(testing_float_flag, 2.5, "float-flag")
DEFINE_STRING(testing_string_flag, "Hello, world!", "string-flag")
DEFINE_INT(testing_prng_seed, 42, "Seed used for threading test randomness")
-#ifdef _WIN32
-DEFINE_STRING(testing_serialization_file, "C:\\Windows\\Temp\\serdes",
- "file in which to testing_serialize heap")
-#else
-DEFINE_STRING(testing_serialization_file, "/tmp/serdes",
- "file in which to serialize heap")
-#endif
// mksnapshot.cc
DEFINE_STRING(startup_src, NULL,
@@ -935,9 +984,6 @@ DEFINE_BOOL(enable_slow_asserts, false,
#endif
// codegen-ia32.cc / codegen-arm.cc / macro-assembler-*.cc
-DEFINE_BOOL(print_source, false, "pretty print source code")
-DEFINE_BOOL(print_builtin_source, false,
- "pretty print source code for builtins")
DEFINE_BOOL(print_ast, false, "print source AST")
DEFINE_BOOL(print_builtin_ast, false, "print source AST for builtins")
DEFINE_BOOL(trap_on_abort, false, "replace aborts by breakpoints")
@@ -1028,6 +1074,8 @@ DEFINE_BOOL(perf_prof, false,
DEFINE_NEG_IMPLICATION(perf_prof, compact_code_space)
DEFINE_BOOL(perf_prof_debug_info, false,
"Enable debug info for perf linux profiler (experimental).")
+DEFINE_BOOL(perf_prof_unwinding_info, false,
+ "Enable unwinding info for perf linux profiler (experimental).")
DEFINE_STRING(gc_fake_mmap, "/tmp/__v8_gc__",
"Specify the name of the file for fake gc mmap used in ll_prof")
DEFINE_BOOL(log_internal_timer_events, false, "Time internal events.")
diff --git a/deps/v8/src/flags.cc b/deps/v8/src/flags.cc
index f67defd5a9..f7ae004ac2 100644
--- a/deps/v8/src/flags.cc
+++ b/deps/v8/src/flags.cc
@@ -429,6 +429,10 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
PrintF(stderr, "Error: illegal value for flag %s of type %s\n"
"Try --help for options\n",
arg, Type2String(flag->type()));
+ if (is_bool_type) {
+ PrintF(stderr,
+ "To set or unset a boolean flag, use --flag or --no-flag.\n");
+ }
return_code = j;
break;
}
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index 5ecbd4567e..77784b8234 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -98,6 +98,35 @@ inline ExitFrame::ExitFrame(StackFrameIteratorBase* iterator)
: StackFrame(iterator) {
}
+inline BuiltinExitFrame::BuiltinExitFrame(StackFrameIteratorBase* iterator)
+ : ExitFrame(iterator) {}
+
+inline Object* BuiltinExitFrame::receiver_slot_object() const {
+ // The receiver is the first argument on the frame.
+ // fp[1]: return address.
+ // fp[2]: the last argument (new target).
+ // fp[4]: argc.
+ // fp[2 + argc - 1]: receiver.
+ Object* argc_slot = argc_slot_object();
+ DCHECK(argc_slot->IsSmi());
+ int argc = Smi::cast(argc_slot)->value();
+
+ const int receiverOffset =
+ BuiltinExitFrameConstants::kNewTargetOffset + (argc - 1) * kPointerSize;
+ return Memory::Object_at(fp() + receiverOffset);
+}
+
+inline Object* BuiltinExitFrame::argc_slot_object() const {
+ return Memory::Object_at(fp() + BuiltinExitFrameConstants::kArgcOffset);
+}
+
+inline Object* BuiltinExitFrame::target_slot_object() const {
+ return Memory::Object_at(fp() + BuiltinExitFrameConstants::kTargetOffset);
+}
+
+inline Object* BuiltinExitFrame::new_target_slot_object() const {
+ return Memory::Object_at(fp() + BuiltinExitFrameConstants::kNewTargetOffset);
+}
inline StandardFrame::StandardFrame(StackFrameIteratorBase* iterator)
: StackFrame(iterator) {
@@ -114,14 +143,6 @@ inline void StandardFrame::SetExpression(int index, Object* value) {
}
-inline Object* StandardFrame::context() const {
- const int offset = StandardFrameConstants::kContextOffset;
- Object* maybe_result = Memory::Object_at(fp() + offset);
- DCHECK(!maybe_result->IsSmi());
- return maybe_result;
-}
-
-
inline Address StandardFrame::caller_fp() const {
return Memory::Address_at(fp() + StandardFrameConstants::kCallerFPOffset);
}
@@ -165,12 +186,6 @@ Address JavaScriptFrame::GetParameterSlot(int index) const {
return caller_sp() + parameter_offset;
}
-
-Object* JavaScriptFrame::GetParameter(int index) const {
- return Memory::Object_at(GetParameterSlot(index));
-}
-
-
inline Address JavaScriptFrame::GetOperandSlot(int index) const {
Address base = fp() + JavaScriptFrameConstants::kLocal0Offset;
DCHECK(IsAddressAligned(base, kPointerSize));
@@ -199,11 +214,6 @@ inline int JavaScriptFrame::ComputeOperandsCount() const {
}
-inline Object* JavaScriptFrame::receiver() const {
- return GetParameter(-1);
-}
-
-
inline void JavaScriptFrame::set_receiver(Object* value) {
Memory::Object_at(GetParameterSlot(-1)) = value;
}
@@ -214,17 +224,11 @@ inline bool JavaScriptFrame::has_adapted_arguments() const {
}
-inline JSFunction* JavaScriptFrame::function() const {
- return JSFunction::cast(function_slot_object());
-}
-
-
inline Object* JavaScriptFrame::function_slot_object() const {
const int offset = JavaScriptFrameConstants::kFunctionOffset;
return Memory::Object_at(fp() + offset);
}
-
inline StubFrame::StubFrame(StackFrameIteratorBase* iterator)
: StandardFrame(iterator) {
}
@@ -243,6 +247,9 @@ inline ArgumentsAdaptorFrame::ArgumentsAdaptorFrame(
StackFrameIteratorBase* iterator) : JavaScriptFrame(iterator) {
}
+inline BuiltinFrame::BuiltinFrame(StackFrameIteratorBase* iterator)
+ : JavaScriptFrame(iterator) {}
+
inline WasmFrame::WasmFrame(StackFrameIteratorBase* iterator)
: StandardFrame(iterator) {}
@@ -288,10 +295,33 @@ inline JavaScriptFrame* JavaScriptFrameIterator::frame() const {
return static_cast<JavaScriptFrame*>(frame);
}
+inline StandardFrame* StackTraceFrameIterator::frame() const {
+ StackFrame* frame = iterator_.frame();
+ DCHECK(frame->is_java_script() || frame->is_arguments_adaptor() ||
+ frame->is_wasm());
+ return static_cast<StandardFrame*>(frame);
+}
+
+bool StackTraceFrameIterator::is_javascript() const {
+ return frame()->is_java_script();
+}
+
+bool StackTraceFrameIterator::is_wasm() const { return frame()->is_wasm(); }
+
+JavaScriptFrame* StackTraceFrameIterator::javascript_frame() const {
+ DCHECK(is_javascript());
+ return static_cast<JavaScriptFrame*>(frame());
+}
+
+WasmFrame* StackTraceFrameIterator::wasm_frame() const {
+ DCHECK(is_wasm());
+ return static_cast<WasmFrame*>(frame());
+}
inline StackFrame* SafeStackFrameIterator::frame() const {
DCHECK(!done());
- DCHECK(frame_->is_java_script() || frame_->is_exit());
+ DCHECK(frame_->is_java_script() || frame_->is_exit() ||
+ frame_->is_builtin_exit());
return frame_;
}
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 0e57429ea3..f0fa58d27b 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -4,10 +4,9 @@
#include "src/frames.h"
+#include <memory>
#include <sstream>
-#include "src/ast/ast.h"
-#include "src/ast/scopeinfo.h"
#include "src/base/bits.h"
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
@@ -16,6 +15,8 @@
#include "src/safepoint-table.h"
#include "src/string-stream.h"
#include "src/vm-state-inl.h"
+#include "src/wasm/wasm-debug.h"
+#include "src/wasm/wasm-module.h"
namespace v8 {
namespace internal {
@@ -63,19 +64,14 @@ StackFrameIteratorBase::StackFrameIteratorBase(Isolate* isolate,
}
#undef INITIALIZE_SINGLETON
-
StackFrameIterator::StackFrameIterator(Isolate* isolate)
- : StackFrameIteratorBase(isolate, true) {
- Reset(isolate->thread_local_top());
-}
-
+ : StackFrameIterator(isolate, isolate->thread_local_top()) {}
StackFrameIterator::StackFrameIterator(Isolate* isolate, ThreadLocalTop* t)
: StackFrameIteratorBase(isolate, true) {
Reset(t);
}
-
void StackFrameIterator::Advance() {
DCHECK(!done());
// Compute the state of the calling frame before restoring
@@ -104,17 +100,15 @@ void StackFrameIterator::Reset(ThreadLocalTop* top) {
StackFrame::Type type = ExitFrame::GetStateForFramePointer(
Isolate::c_entry_fp(top), &state);
handler_ = StackHandler::FromAddress(Isolate::handler(top));
- if (SingletonFor(type) == NULL) return;
frame_ = SingletonFor(type, &state);
}
StackFrame* StackFrameIteratorBase::SingletonFor(StackFrame::Type type,
StackFrame::State* state) {
- if (type == StackFrame::NONE) return NULL;
StackFrame* result = SingletonFor(type);
- DCHECK(result != NULL);
- result->state_ = *state;
+ DCHECK((!result) == (type == StackFrame::NONE));
+ if (result) result->state_ = *state;
return result;
}
@@ -162,30 +156,41 @@ void JavaScriptFrameIterator::AdvanceToArgumentsFrame() {
// -------------------------------------------------------------------------
-
StackTraceFrameIterator::StackTraceFrameIterator(Isolate* isolate)
- : JavaScriptFrameIterator(isolate) {
- if (!done() && !IsValidFrame()) Advance();
+ : iterator_(isolate) {
+ if (!done() && !IsValidFrame(iterator_.frame())) Advance();
}
+StackTraceFrameIterator::StackTraceFrameIterator(Isolate* isolate,
+ StackFrame::Id id)
+ : StackTraceFrameIterator(isolate) {
+ while (!done() && frame()->id() != id) Advance();
+}
void StackTraceFrameIterator::Advance() {
- while (true) {
- JavaScriptFrameIterator::Advance();
- if (done()) return;
- if (IsValidFrame()) return;
- }
+ do {
+ iterator_.Advance();
+ } while (!done() && !IsValidFrame(iterator_.frame()));
}
-
-bool StackTraceFrameIterator::IsValidFrame() {
- if (!frame()->function()->IsJSFunction()) return false;
- Object* script = frame()->function()->shared()->script();
+bool StackTraceFrameIterator::IsValidFrame(StackFrame* frame) const {
+ if (frame->is_java_script()) {
+ JavaScriptFrame* jsFrame = static_cast<JavaScriptFrame*>(frame);
+ if (!jsFrame->function()->IsJSFunction()) return false;
+ Object* script = jsFrame->function()->shared()->script();
// Don't show functions from native scripts to user.
return (script->IsScript() &&
Script::TYPE_NATIVE != Script::cast(script)->type());
+ }
+ // apart from javascript, only wasm is valid
+ return frame->is_wasm();
}
+void StackTraceFrameIterator::AdvanceToArgumentsFrame() {
+ if (!is_javascript() || !javascript_frame()->has_adapted_arguments()) return;
+ iterator_.Advance();
+ DCHECK(iterator_.frame()->is_arguments_adaptor());
+}
// -------------------------------------------------------------------------
@@ -230,10 +235,8 @@ SafeStackFrameIterator::SafeStackFrameIterator(
} else {
return;
}
- if (SingletonFor(type) == NULL) return;
frame_ = SingletonFor(type, &state);
- DCHECK(frame_);
- Advance();
+ if (frame_) Advance();
}
@@ -261,12 +264,8 @@ void SafeStackFrameIterator::AdvanceOneFrame() {
// Advance to the previous frame.
StackFrame::State state;
StackFrame::Type type = frame_->GetCallerState(&state);
- if (SingletonFor(type) == NULL) {
- frame_ = NULL;
- return;
- }
frame_ = SingletonFor(type, &state);
- DCHECK(frame_);
+ if (!frame_) return;
// Check that we have actually moved to the previous frame in the stack.
if (frame_->sp() < last_sp || frame_->fp() < last_fp) {
@@ -311,7 +310,8 @@ bool SafeStackFrameIterator::IsValidExitFrame(Address fp) const {
if (!IsValidStackAddress(sp)) return false;
StackFrame::State state;
ExitFrame::FillState(fp, sp, &state);
- return *state.pc_address != NULL;
+ MSAN_MEMORY_IS_INITIALIZED(state.pc_address, sizeof(state.pc_address));
+ return *state.pc_address != nullptr;
}
@@ -331,7 +331,7 @@ void SafeStackFrameIterator::Advance() {
external_callback_scope_ = external_callback_scope_->previous();
}
if (frame_->is_java_script()) break;
- if (frame_->is_exit()) {
+ if (frame_->is_exit() || frame_->is_builtin_exit()) {
// Some of the EXIT frames may have ExternalCallbackScope allocated on
// top of them. In that case the scope corresponds to the first EXIT
// frame beneath it. There may be other EXIT frames on top of the
@@ -406,22 +406,24 @@ static bool IsInterpreterFramePc(Isolate* isolate, Address pc) {
isolate->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
Code* interpreter_bytecode_dispatch =
isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
+ Code* interpreter_baseline_on_return =
+ isolate->builtins()->builtin(Builtins::kInterpreterMarkBaselineOnReturn);
return (pc >= interpreter_entry_trampoline->instruction_start() &&
pc < interpreter_entry_trampoline->instruction_end()) ||
(pc >= interpreter_bytecode_dispatch->instruction_start() &&
- pc < interpreter_bytecode_dispatch->instruction_end());
+ pc < interpreter_bytecode_dispatch->instruction_end()) ||
+ (pc >= interpreter_baseline_on_return->instruction_start() &&
+ pc < interpreter_baseline_on_return->instruction_end());
}
StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
State* state) {
DCHECK(state->fp != NULL);
-#if defined(USE_SIMULATOR)
MSAN_MEMORY_IS_INITIALIZED(
state->fp + CommonFrameConstants::kContextOrFrameTypeOffset,
kPointerSize);
-#endif
Object* marker = Memory::Object_at(
state->fp + CommonFrameConstants::kContextOrFrameTypeOffset);
if (!iterator->can_access_heap_objects_) {
@@ -430,10 +432,8 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
// the VM with a signal at any arbitrary instruction, with essentially
// anything on the stack. So basically none of these checks are 100%
// reliable.
-#if defined(USE_SIMULATOR)
MSAN_MEMORY_IS_INITIALIZED(
state->fp + StandardFrameConstants::kFunctionOffset, kPointerSize);
-#endif
Object* maybe_function =
Memory::Object_at(state->fp + StandardFrameConstants::kFunctionOffset);
if (!marker->IsSmi()) {
@@ -451,17 +451,20 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
Code* code_obj =
GetContainingCode(iterator->isolate(), *(state->pc_address));
if (code_obj != nullptr) {
- if (code_obj->is_interpreter_entry_trampoline() ||
- code_obj->is_interpreter_enter_bytecode_dispatch()) {
- return INTERPRETED;
- }
switch (code_obj->kind()) {
case Code::BUILTIN:
if (marker->IsSmi()) break;
- // We treat frames for BUILTIN Code objects as OptimizedFrame for now
- // (all the builtins with JavaScript linkage are actually generated
- // with TurboFan currently, so this is sound).
- return OPTIMIZED;
+ if (code_obj->is_interpreter_trampoline_builtin()) {
+ return INTERPRETED;
+ }
+ if (code_obj->is_turbofanned()) {
+ // TODO(bmeurer): We treat frames for BUILTIN Code objects as
+ // OptimizedFrame for now (all the builtins with JavaScript
+ // linkage are actually generated with TurboFan currently, so
+ // this is sound).
+ return OPTIMIZED;
+ }
+ return BUILTIN;
case Code::FUNCTION:
return JAVA_SCRIPT;
case Code::OPTIMIZED_FUNCTION:
@@ -488,15 +491,16 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
case ENTRY:
case ENTRY_CONSTRUCT:
case EXIT:
+ case BUILTIN_EXIT:
case STUB:
case STUB_FAILURE_TRAMPOLINE:
case INTERNAL:
case CONSTRUCT:
case ARGUMENTS_ADAPTOR:
- return candidate;
- case JS_TO_WASM:
case WASM_TO_JS:
case WASM:
+ return candidate;
+ case JS_TO_WASM:
case JAVA_SCRIPT:
case OPTIMIZED:
case INTERPRETED:
@@ -561,7 +565,6 @@ Object*& ExitFrame::code_slot() const {
return Memory::Object_at(fp() + offset);
}
-
Code* ExitFrame::unchecked_code() const {
return reinterpret_cast<Code*>(code_slot());
}
@@ -603,15 +606,34 @@ StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
Address sp = ComputeStackPointer(fp);
FillState(fp, sp, state);
DCHECK(*state->pc_address != NULL);
- return EXIT;
+
+ return ComputeFrameType(fp);
}
+StackFrame::Type ExitFrame::ComputeFrameType(Address fp) {
+ // Distinguish between between regular and builtin exit frames.
+ // Default to EXIT in all hairy cases (e.g., when called from profiler).
+ const int offset = ExitFrameConstants::kFrameTypeOffset;
+ Object* marker = Memory::Object_at(fp + offset);
+
+ if (!marker->IsSmi()) {
+ return EXIT;
+ }
+
+ StackFrame::Type frame_type =
+ static_cast<StackFrame::Type>(Smi::cast(marker)->value());
+ if (frame_type == EXIT || frame_type == BUILTIN_EXIT) {
+ return frame_type;
+ }
+
+ return EXIT;
+}
Address ExitFrame::ComputeStackPointer(Address fp) {
+ MSAN_MEMORY_IS_INITIALIZED(fp + ExitFrameConstants::kSPOffset, kPointerSize);
return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
}
-
void ExitFrame::FillState(Address fp, Address sp, State* state) {
state->sp = sp;
state->fp = fp;
@@ -624,6 +646,55 @@ void ExitFrame::FillState(Address fp, Address sp, State* state) {
state->constant_pool_address = NULL;
}
+JSFunction* BuiltinExitFrame::function() const {
+ return JSFunction::cast(target_slot_object());
+}
+
+Object* BuiltinExitFrame::receiver() const { return receiver_slot_object(); }
+
+bool BuiltinExitFrame::IsConstructor() const {
+ return !new_target_slot_object()->IsUndefined(isolate());
+}
+
+Object* BuiltinExitFrame::GetParameter(int i) const {
+ DCHECK(i >= 0 && i < ComputeParametersCount());
+ int offset = BuiltinExitFrameConstants::kArgcOffset + (i + 1) * kPointerSize;
+ return Memory::Object_at(fp() + offset);
+}
+
+int BuiltinExitFrame::ComputeParametersCount() const {
+ Object* argc_slot = argc_slot_object();
+ DCHECK(argc_slot->IsSmi());
+ // Argc also counts the receiver, target, new target, and argc itself as args,
+ // therefore the real argument count is argc - 4.
+ int argc = Smi::cast(argc_slot)->value() - 4;
+ DCHECK(argc >= 0);
+ return argc;
+}
+
+void BuiltinExitFrame::Print(StringStream* accumulator, PrintMode mode,
+ int index) const {
+ DisallowHeapAllocation no_gc;
+ Object* receiver = this->receiver();
+ JSFunction* function = this->function();
+
+ accumulator->PrintSecurityTokenIfChanged(function);
+ PrintIndex(accumulator, mode, index);
+ accumulator->Add("builtin exit frame: ");
+ Code* code = NULL;
+ if (IsConstructor()) accumulator->Add("new ");
+ accumulator->PrintFunction(function, receiver, &code);
+
+ accumulator->Add("(this=%o", receiver);
+
+ // Print the parameters.
+ int parameters_count = ComputeParametersCount();
+ for (int i = 0; i < parameters_count; i++) {
+ accumulator->Add(",%o", GetParameter(i));
+ }
+
+ accumulator->Add(")\n\n");
+}
Address StandardFrame::GetExpressionAddress(int n) const {
const int offset = StandardFrameConstants::kExpressionsOffset;
@@ -635,6 +706,20 @@ Address InterpretedFrame::GetExpressionAddress(int n) const {
return fp() + offset - n * kPointerSize;
}
+Script* StandardFrame::script() const {
+ // This should only be called on frames which override this method.
+ DCHECK(false);
+ return nullptr;
+}
+
+Object* StandardFrame::receiver() const {
+ return isolate()->heap()->undefined_value();
+}
+
+Object* StandardFrame::context() const {
+ return isolate()->heap()->undefined_value();
+}
+
int StandardFrame::ComputeExpressionsCount() const {
Address base = GetExpressionAddress(0);
Address limit = sp() - kPointerSize;
@@ -643,6 +728,13 @@ int StandardFrame::ComputeExpressionsCount() const {
return static_cast<int>((base - limit) / kPointerSize);
}
+Object* StandardFrame::GetParameter(int index) const {
+ // StandardFrame does not define any parameters.
+ UNREACHABLE();
+ return nullptr;
+}
+
+int StandardFrame::ComputeParametersCount() const { return 0; }
void StandardFrame::ComputeCallerState(State* state) const {
state->sp = caller_sp();
@@ -659,6 +751,7 @@ void StandardFrame::SetCallerFp(Address caller_fp) {
caller_fp;
}
+bool StandardFrame::IsConstructor() const { return false; }
void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
// Make sure that we're not doing "safe" stack frame iteration. We cannot
@@ -683,6 +776,7 @@ void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
case ENTRY:
case ENTRY_CONSTRUCT:
case EXIT:
+ case BUILTIN_EXIT:
case STUB_FAILURE_TRAMPOLINE:
case ARGUMENTS_ADAPTOR:
case STUB:
@@ -696,6 +790,7 @@ void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
case JAVA_SCRIPT:
case OPTIMIZED:
case INTERPRETED:
+ case BUILTIN:
// These frame types have a context, but they are actually stored
// in the place on the stack that one finds the frame type.
UNREACHABLE();
@@ -711,7 +806,8 @@ void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
(frame_header_size + StandardFrameConstants::kFixedFrameSizeAboveFp);
Object** frame_header_base = &Memory::Object_at(fp() - frame_header_size);
- Object** frame_header_limit = &Memory::Object_at(fp());
+ Object** frame_header_limit =
+ &Memory::Object_at(fp() - StandardFrameConstants::kCPSlotSize);
Object** parameters_base = &Memory::Object_at(sp());
Object** parameters_limit = frame_header_base - slot_space / kPointerSize;
@@ -726,10 +822,9 @@ void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
if (safepoint_entry.has_doubles()) {
// Number of doubles not known at snapshot time.
DCHECK(!isolate()->serializer_enabled());
- parameters_base +=
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
- ->num_allocatable_double_registers() *
- kDoubleSize / kPointerSize;
+ parameters_base += RegisterConfiguration::Crankshaft()
+ ->num_allocatable_double_registers() *
+ kDoubleSize / kPointerSize;
}
// Visit the registers that contain pointers if any.
@@ -854,27 +949,42 @@ void JavaScriptFrame::GetFunctions(List<JSFunction*>* functions) const {
functions->Add(function());
}
-
-void JavaScriptFrame::Summarize(List<FrameSummary>* functions) {
+void JavaScriptFrame::Summarize(List<FrameSummary>* functions,
+ FrameSummary::Mode mode) const {
DCHECK(functions->length() == 0);
Code* code = LookupCode();
int offset = static_cast<int>(pc() - code->instruction_start());
AbstractCode* abstract_code = AbstractCode::cast(code);
FrameSummary summary(receiver(), function(), abstract_code, offset,
- IsConstructor());
+ IsConstructor(), mode);
functions->Add(summary);
}
+JSFunction* JavaScriptFrame::function() const {
+ return JSFunction::cast(function_slot_object());
+}
+
+Object* JavaScriptFrame::receiver() const { return GetParameter(-1); }
+
+Script* JavaScriptFrame::script() const {
+ return Script::cast(function()->shared()->script());
+}
+
+Object* JavaScriptFrame::context() const {
+ const int offset = StandardFrameConstants::kContextOffset;
+ Object* maybe_result = Memory::Object_at(fp() + offset);
+ DCHECK(!maybe_result->IsSmi());
+ return maybe_result;
+}
+
int JavaScriptFrame::LookupExceptionHandlerInTable(
int* stack_depth, HandlerTable::CatchPrediction* prediction) {
Code* code = LookupCode();
DCHECK(!code->is_optimized_code());
- HandlerTable* table = HandlerTable::cast(code->handler_table());
int pc_offset = static_cast<int>(pc() - code->entry());
- return table->LookupRange(pc_offset, stack_depth, prediction);
+ return code->LookupRangeInHandlerTable(pc_offset, stack_depth, prediction);
}
-
void JavaScriptFrame::PrintFunctionAndOffset(JSFunction* function, Code* code,
Address pc, FILE* file,
bool print_line_number) {
@@ -884,7 +994,7 @@ void JavaScriptFrame::PrintFunctionAndOffset(JSFunction* function, Code* code,
PrintF(file, "+%d", code_offset);
if (print_line_number) {
SharedFunctionInfo* shared = function->shared();
- int source_pos = code->SourcePosition(code_offset);
+ int source_pos = AbstractCode::cast(code)->SourcePosition(code_offset);
Object* maybe_script = shared->script();
if (maybe_script->IsScript()) {
Script* script = Script::cast(maybe_script);
@@ -892,7 +1002,7 @@ void JavaScriptFrame::PrintFunctionAndOffset(JSFunction* function, Code* code,
Object* script_name_raw = script->name();
if (script_name_raw->IsString()) {
String* script_name = String::cast(script->name());
- base::SmartArrayPointer<char> c_script_name =
+ std::unique_ptr<char[]> c_script_name =
script_name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
PrintF(file, " at %s:%d", c_script_name.get(), line);
} else {
@@ -944,14 +1054,12 @@ void JavaScriptFrame::SaveOperandStack(FixedArray* store) const {
}
}
+Object* JavaScriptFrame::GetParameter(int index) const {
+ return Memory::Object_at(GetParameterSlot(index));
+}
-void JavaScriptFrame::RestoreOperandStack(FixedArray* store) {
- int operands_count = store->length();
- DCHECK_LE(operands_count, ComputeOperandsCount());
- for (int i = 0; i < operands_count; i++) {
- DCHECK_EQ(GetOperand(i), isolate()->heap()->the_hole_value());
- Memory::Object_at(GetOperandSlot(i)) = store->get(i);
- }
+int JavaScriptFrame::ComputeParametersCount() const {
+ return GetNumberOfIncomingArguments();
}
namespace {
@@ -965,7 +1073,7 @@ bool CannotDeoptFromAsmCode(Code* code, JSFunction* function) {
FrameSummary::FrameSummary(Object* receiver, JSFunction* function,
AbstractCode* abstract_code, int code_offset,
- bool is_constructor)
+ bool is_constructor, Mode mode)
: receiver_(receiver, function->GetIsolate()),
function_(function),
abstract_code_(abstract_code),
@@ -973,7 +1081,14 @@ FrameSummary::FrameSummary(Object* receiver, JSFunction* function,
is_constructor_(is_constructor) {
DCHECK(abstract_code->IsBytecodeArray() ||
Code::cast(abstract_code)->kind() != Code::OPTIMIZED_FUNCTION ||
- CannotDeoptFromAsmCode(Code::cast(abstract_code), function));
+ CannotDeoptFromAsmCode(Code::cast(abstract_code), function) ||
+ mode == kApproximateSummary);
+}
+
+FrameSummary FrameSummary::GetFirst(JavaScriptFrame* frame) {
+ List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
+ frame->Summarize(&frames);
+ return frames.first();
}
void FrameSummary::Print() {
@@ -987,8 +1102,12 @@ void FrameSummary::Print() {
Code* code = abstract_code_->GetCode();
if (code->kind() == Code::FUNCTION) PrintF(" UNOPT ");
if (code->kind() == Code::OPTIMIZED_FUNCTION) {
- DCHECK(CannotDeoptFromAsmCode(code, *function()));
- PrintF(" ASM ");
+ if (function()->shared()->asm_function()) {
+ DCHECK(CannotDeoptFromAsmCode(code, *function()));
+ PrintF(" ASM ");
+ } else {
+ PrintF(" OPT (approximate)");
+ }
}
} else {
PrintF(" BYTECODE ");
@@ -996,8 +1115,8 @@ void FrameSummary::Print() {
PrintF("\npc: %d\n", code_offset_);
}
-
-void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
+void OptimizedFrame::Summarize(List<FrameSummary>* frames,
+ FrameSummary::Mode mode) const {
DCHECK(frames->length() == 0);
DCHECK(is_optimized());
@@ -1012,6 +1131,13 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
DisallowHeapAllocation no_gc;
int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationInputData* const data = GetDeoptimizationData(&deopt_index);
+ if (deopt_index == Safepoint::kNoDeoptimizationIndex) {
+ DCHECK(data == nullptr);
+ if (mode == FrameSummary::kApproximateSummary) {
+ return JavaScriptFrame::Summarize(frames, mode);
+ }
+ FATAL("Missing deoptimization information for OptimizedFrame::Summarize.");
+ }
FixedArray* const literal_array = data->LiteralArray();
TranslationIterator it(data->TranslationByteArray(),
@@ -1107,11 +1233,15 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
int OptimizedFrame::LookupExceptionHandlerInTable(
int* stack_slots, HandlerTable::CatchPrediction* prediction) {
+ // We cannot perform exception prediction on optimized code. Instead, we need
+ // to use FrameSummary to find the corresponding code offset in unoptimized
+ // code to perform prediction there.
+ DCHECK_NULL(prediction);
Code* code = LookupCode();
HandlerTable* table = HandlerTable::cast(code->handler_table());
int pc_offset = static_cast<int>(pc() - code->entry());
if (stack_slots) *stack_slots = code->stack_slots();
- return table->LookupReturn(pc_offset, prediction);
+ return table->LookupReturn(pc_offset);
}
@@ -1134,9 +1264,10 @@ DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
SafepointEntry safepoint_entry = code->GetSafepointEntry(pc());
*deopt_index = safepoint_entry.deoptimization_index();
- DCHECK(*deopt_index != Safepoint::kNoDeoptimizationIndex);
-
- return DeoptimizationInputData::cast(code->deoptimization_data());
+ if (*deopt_index != Safepoint::kNoDeoptimizationIndex) {
+ return DeoptimizationInputData::cast(code->deoptimization_data());
+ }
+ return nullptr;
}
@@ -1155,6 +1286,8 @@ void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) const {
DisallowHeapAllocation no_gc;
int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationInputData* const data = GetDeoptimizationData(&deopt_index);
+ DCHECK_NOT_NULL(data);
+ DCHECK_NE(Safepoint::kNoDeoptimizationIndex, deopt_index);
FixedArray* const literal_array = data->LiteralArray();
TranslationIterator it(data->TranslationByteArray(),
@@ -1205,9 +1338,8 @@ Object* OptimizedFrame::StackSlotAt(int index) const {
int InterpretedFrame::LookupExceptionHandlerInTable(
int* context_register, HandlerTable::CatchPrediction* prediction) {
BytecodeArray* bytecode = function()->shared()->bytecode_array();
- HandlerTable* table = HandlerTable::cast(bytecode->handler_table());
- int pc_offset = GetBytecodeOffset() + 1; // Point after current bytecode.
- return table->LookupRange(pc_offset, context_register, prediction);
+ return bytecode->LookupRangeInHandlerTable(GetBytecodeOffset(),
+ context_register, prediction);
}
int InterpretedFrame::GetBytecodeOffset() const {
@@ -1228,15 +1360,15 @@ void InterpretedFrame::PatchBytecodeOffset(int new_offset) {
SetExpression(index, Smi::FromInt(raw_offset));
}
-Object* InterpretedFrame::GetBytecodeArray() const {
+BytecodeArray* InterpretedFrame::GetBytecodeArray() const {
const int index = InterpreterFrameConstants::kBytecodeArrayExpressionIndex;
DCHECK_EQ(
InterpreterFrameConstants::kBytecodeArrayFromFp,
InterpreterFrameConstants::kExpressionsOffset - index * kPointerSize);
- return GetExpression(index);
+ return BytecodeArray::cast(GetExpression(index));
}
-void InterpretedFrame::PatchBytecodeArray(Object* bytecode_array) {
+void InterpretedFrame::PatchBytecodeArray(BytecodeArray* bytecode_array) {
const int index = InterpreterFrameConstants::kBytecodeArrayExpressionIndex;
DCHECK_EQ(
InterpreterFrameConstants::kBytecodeArrayFromFp,
@@ -1244,15 +1376,25 @@ void InterpretedFrame::PatchBytecodeArray(Object* bytecode_array) {
SetExpression(index, bytecode_array);
}
-Object* InterpretedFrame::GetInterpreterRegister(int register_index) const {
+Object* InterpretedFrame::ReadInterpreterRegister(int register_index) const {
const int index = InterpreterFrameConstants::kRegisterFileExpressionIndex;
DCHECK_EQ(
- InterpreterFrameConstants::kRegisterFilePointerFromFp,
+ InterpreterFrameConstants::kRegisterFileFromFp,
InterpreterFrameConstants::kExpressionsOffset - index * kPointerSize);
return GetExpression(index + register_index);
}
-void InterpretedFrame::Summarize(List<FrameSummary>* functions) {
+void InterpretedFrame::WriteInterpreterRegister(int register_index,
+ Object* value) {
+ const int index = InterpreterFrameConstants::kRegisterFileExpressionIndex;
+ DCHECK_EQ(
+ InterpreterFrameConstants::kRegisterFileFromFp,
+ InterpreterFrameConstants::kExpressionsOffset - index * kPointerSize);
+ return SetExpression(index + register_index, value);
+}
+
+void InterpretedFrame::Summarize(List<FrameSummary>* functions,
+ FrameSummary::Mode mode) const {
DCHECK(functions->length() == 0);
AbstractCode* abstract_code =
AbstractCode::cast(function()->shared()->bytecode_array());
@@ -1265,11 +1407,6 @@ int ArgumentsAdaptorFrame::GetNumberOfIncomingArguments() const {
return Smi::cast(GetExpression(0))->value();
}
-
-Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
- return fp() + StandardFrameConstants::kCallerSPOffset;
-}
-
int ArgumentsAdaptorFrame::GetLength(Address fp) {
const int offset = ArgumentsAdaptorFrameConstants::kLengthOffset;
return Smi::cast(Memory::Object_at(fp + offset))->value();
@@ -1280,6 +1417,14 @@ Code* ArgumentsAdaptorFrame::unchecked_code() const {
Builtins::kArgumentsAdaptorTrampoline);
}
+int BuiltinFrame::GetNumberOfIncomingArguments() const {
+ return Smi::cast(GetExpression(0))->value();
+}
+
+void BuiltinFrame::PrintFrameKind(StringStream* accumulator) const {
+ accumulator->Add("builtin frame: ");
+}
+
Address InternalFrame::GetCallerStackPointer() const {
// Internal frames have no arguments. The stack pointer of the
// caller is at a fixed offset from the frame pointer.
@@ -1315,6 +1460,24 @@ Address WasmFrame::GetCallerStackPointer() const {
return fp() + ExitFrameConstants::kCallerSPOffset;
}
+Object* WasmFrame::wasm_obj() const {
+ FixedArray* deopt_data = LookupCode()->deoptimization_data();
+ DCHECK(deopt_data->length() == 2);
+ return deopt_data->get(0);
+}
+
+uint32_t WasmFrame::function_index() const {
+ FixedArray* deopt_data = LookupCode()->deoptimization_data();
+ DCHECK(deopt_data->length() == 2);
+ return Smi::cast(deopt_data->get(1))->value();
+}
+
+Script* WasmFrame::script() const {
+ Handle<JSObject> wasm(JSObject::cast(wasm_obj()), isolate());
+ Handle<wasm::WasmDebugInfo> debug_info = wasm::GetDebugInfo(wasm);
+ return wasm::WasmDebugInfo::GetFunctionScript(debug_info, function_index());
+}
+
namespace {
@@ -1342,6 +1505,7 @@ void JavaScriptFrame::Print(StringStream* accumulator,
accumulator->PrintSecurityTokenIfChanged(function);
PrintIndex(accumulator, mode, index);
+ PrintFrameKind(accumulator);
Code* code = NULL;
if (IsConstructor()) accumulator->Add("new ");
accumulator->PrintFunction(function, receiver, &code);
@@ -1362,16 +1526,22 @@ void JavaScriptFrame::Print(StringStream* accumulator,
if (code != NULL && code->kind() == Code::FUNCTION &&
pc >= code->instruction_start() && pc < code->instruction_end()) {
int offset = static_cast<int>(pc - code->instruction_start());
- int source_pos = code->SourcePosition(offset);
+ int source_pos = AbstractCode::cast(code)->SourcePosition(offset);
+ int line = script->GetLineNumber(source_pos) + 1;
+ accumulator->Add(":%d] [pc=%p]", line, pc);
+ } else if (is_interpreted()) {
+ const InterpretedFrame* iframe =
+ reinterpret_cast<const InterpretedFrame*>(this);
+ BytecodeArray* bytecodes = iframe->GetBytecodeArray();
+ int offset = iframe->GetBytecodeOffset();
+ int source_pos = AbstractCode::cast(bytecodes)->SourcePosition(offset);
int line = script->GetLineNumber(source_pos) + 1;
- accumulator->Add(":%d", line);
+ accumulator->Add(":%d] [bytecode=%p offset=%d]", line, bytecodes, offset);
} else {
int function_start_pos = shared->start_position();
int line = script->GetLineNumber(function_start_pos) + 1;
- accumulator->Add(":~%d", line);
+ accumulator->Add(":~%d] [pc=%p]", line, pc);
}
-
- accumulator->Add("] [pc=%p] ", pc);
}
accumulator->Add("(this=%o", receiver);
@@ -1632,7 +1802,8 @@ Code* InnerPointerToCodeCache::GcSafeFindCodeForInnerPointer(
Page* page = Page::FromAddress(inner_pointer);
DCHECK_EQ(page->owner(), heap->code_space());
- heap->mark_compact_collector()->SweepOrWaitUntilSweepingCompleted(page);
+ heap->mark_compact_collector()->sweeper().SweepOrWaitUntilSweepingCompleted(
+ page);
Address addr = page->skip_list()->StartFor(inner_pointer);
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index f6806d7563..12770231cf 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -111,7 +111,9 @@ class StackHandler BASE_EMBEDDED {
V(STUB_FAILURE_TRAMPOLINE, StubFailureTrampolineFrame) \
V(INTERNAL, InternalFrame) \
V(CONSTRUCT, ConstructFrame) \
- V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame)
+ V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame) \
+ V(BUILTIN, BuiltinFrame) \
+ V(BUILTIN_EXIT, BuiltinExitFrame)
// Every pointer in a frame has a slot id. On 32-bit platforms, doubles consume
// two slots.
@@ -280,6 +282,14 @@ class ArgumentsAdaptorFrameConstants : public TypedFrameConstants {
DEFINE_TYPED_FRAME_SIZES(2);
};
+class BuiltinFrameConstants : public TypedFrameConstants {
+ public:
+ // FP-relative.
+ static const int kFunctionOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static const int kLengthOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ DEFINE_TYPED_FRAME_SIZES(2);
+};
+
class InternalFrameConstants : public TypedFrameConstants {
public:
// FP-relative.
@@ -314,41 +324,46 @@ class StubFailureTrampolineFrameConstants : public InternalFrameConstants {
DEFINE_TYPED_FRAME_SIZES(3);
};
+// Behaves like an exit frame but with target and new target args.
+class BuiltinExitFrameConstants : public CommonFrameConstants {
+ public:
+ static const int kNewTargetOffset = kCallerPCOffset + 1 * kPointerSize;
+ static const int kTargetOffset = kNewTargetOffset + 1 * kPointerSize;
+ static const int kArgcOffset = kTargetOffset + 1 * kPointerSize;
+};
class InterpreterFrameConstants : public AllStatic {
public:
- // Fixed frame includes new.target and bytecode offset.
+ // Fixed frame includes new.target, bytecode array, and bytecode offset.
static const int kFixedFrameSize =
StandardFrameConstants::kFixedFrameSize + 3 * kPointerSize;
static const int kFixedFrameSizeFromFp =
StandardFrameConstants::kFixedFrameSizeFromFp + 3 * kPointerSize;
// FP-relative.
+ static const int kLastParamFromFp = StandardFrameConstants::kCallerSPOffset;
+ static const int kCallerPCOffsetFromFp =
+ StandardFrameConstants::kCallerPCOffset;
static const int kNewTargetFromFp =
-StandardFrameConstants::kFixedFrameSizeFromFp - 1 * kPointerSize;
static const int kBytecodeArrayFromFp =
-StandardFrameConstants::kFixedFrameSizeFromFp - 2 * kPointerSize;
static const int kBytecodeOffsetFromFp =
-StandardFrameConstants::kFixedFrameSizeFromFp - 3 * kPointerSize;
- static const int kRegisterFilePointerFromFp =
+ static const int kRegisterFileFromFp =
-StandardFrameConstants::kFixedFrameSizeFromFp - 4 * kPointerSize;
- static const int kExpressionsOffset = kRegisterFilePointerFromFp;
+ static const int kExpressionsOffset = kRegisterFileFromFp;
+
+ // Number of fixed slots in addition to a {StandardFrame}.
+ static const int kExtraSlotCount =
+ InterpreterFrameConstants::kFixedFrameSize / kPointerSize -
+ StandardFrameConstants::kFixedFrameSize / kPointerSize;
// Expression index for {StandardFrame::GetExpressionAddress}.
static const int kBytecodeArrayExpressionIndex = -2;
static const int kBytecodeOffsetExpressionIndex = -1;
static const int kRegisterFileExpressionIndex = 0;
-
- // Register file pointer relative.
- static const int kLastParamFromRegisterPointer =
- StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
-
- static const int kBytecodeOffsetFromRegisterPointer = 1 * kPointerSize;
- static const int kBytecodeArrayFromRegisterPointer = 2 * kPointerSize;
- static const int kNewTargetFromRegisterPointer = 3 * kPointerSize;
- static const int kFunctionFromRegisterPointer = 4 * kPointerSize;
- static const int kContextFromRegisterPointer = 5 * kPointerSize;
};
inline static int FPOffsetToFrameSlot(int frame_offset) {
@@ -418,17 +433,19 @@ class StackFrame BASE_EMBEDDED {
bool is_wasm_to_js() const { return type() == WASM_TO_JS; }
bool is_js_to_wasm() const { return type() == JS_TO_WASM; }
bool is_arguments_adaptor() const { return type() == ARGUMENTS_ADAPTOR; }
+ bool is_builtin() const { return type() == BUILTIN; }
bool is_internal() const { return type() == INTERNAL; }
bool is_stub_failure_trampoline() const {
return type() == STUB_FAILURE_TRAMPOLINE;
}
bool is_construct() const { return type() == CONSTRUCT; }
+ bool is_builtin_exit() const { return type() == BUILTIN_EXIT; }
virtual bool is_standard() const { return false; }
bool is_java_script() const {
Type type = this->type();
return (type == JAVA_SCRIPT) || (type == OPTIMIZED) ||
- (type == INTERPRETED);
+ (type == INTERPRETED) || (type == BUILTIN);
}
// Accessors.
@@ -627,6 +644,7 @@ class ExitFrame: public StackFrame {
// iterator and the frames following entry frames.
static Type GetStateForFramePointer(Address fp, State* state);
static Address ComputeStackPointer(Address fp);
+ static StackFrame::Type ComputeFrameType(Address fp);
static void FillState(Address fp, Address sp, State* state);
protected:
@@ -640,22 +658,98 @@ class ExitFrame: public StackFrame {
friend class StackFrameIteratorBase;
};
+// Builtin exit frames are a special case of exit frames, which are used
+// whenever C++ builtins (e.g., Math.acos) are called. Their main purpose is
+// to allow such builtins to appear in stack traces.
+class BuiltinExitFrame : public ExitFrame {
+ public:
+ Type type() const override { return BUILTIN_EXIT; }
+
+ static BuiltinExitFrame* cast(StackFrame* frame) {
+ DCHECK(frame->is_builtin_exit());
+ return static_cast<BuiltinExitFrame*>(frame);
+ }
+
+ JSFunction* function() const;
+ Object* receiver() const;
+
+ bool IsConstructor() const;
+
+ void Print(StringStream* accumulator, PrintMode mode,
+ int index) const override;
+
+ protected:
+ inline explicit BuiltinExitFrame(StackFrameIteratorBase* iterator);
+
+ private:
+ Object* GetParameter(int i) const;
+ int ComputeParametersCount() const;
-class StandardFrame: public StackFrame {
+ inline Object* receiver_slot_object() const;
+ inline Object* argc_slot_object() const;
+ inline Object* target_slot_object() const;
+ inline Object* new_target_slot_object() const;
+
+ friend class StackFrameIteratorBase;
+};
+
+class JavaScriptFrame;
+
+class FrameSummary BASE_EMBEDDED {
+ public:
+ // Mode for JavaScriptFrame::Summarize. Exact summary is required to produce
+ // an exact stack trace. It will trigger an assertion failure if that is not
+ // possible, e.g., because of missing deoptimization information. The
+ // approximate mode should produce a summary even without deoptimization
+ // information, but it might miss frames.
+ enum Mode { kExactSummary, kApproximateSummary };
+
+ FrameSummary(Object* receiver, JSFunction* function,
+ AbstractCode* abstract_code, int code_offset,
+ bool is_constructor, Mode mode = kExactSummary);
+
+ static FrameSummary GetFirst(JavaScriptFrame* frame);
+
+ Handle<Object> receiver() const { return receiver_; }
+ Handle<JSFunction> function() const { return function_; }
+ Handle<AbstractCode> abstract_code() const { return abstract_code_; }
+ int code_offset() const { return code_offset_; }
+ bool is_constructor() const { return is_constructor_; }
+
+ void Print();
+
+ private:
+ Handle<Object> receiver_;
+ Handle<JSFunction> function_;
+ Handle<AbstractCode> abstract_code_;
+ int code_offset_;
+ bool is_constructor_;
+};
+
+class StandardFrame : public StackFrame {
public:
// Testers.
bool is_standard() const override { return true; }
// Accessors.
- inline Object* context() const;
+ virtual Object* receiver() const;
+ virtual Script* script() const;
+ virtual Object* context() const;
// Access the expressions in the stack frame including locals.
inline Object* GetExpression(int index) const;
inline void SetExpression(int index, Object* value);
int ComputeExpressionsCount() const;
+ // Access the parameters.
+ virtual Object* GetParameter(int index) const;
+ virtual int ComputeParametersCount() const;
+
void SetCallerFp(Address caller_fp) override;
+ // Check if this frame is a constructor frame invoked through 'new'.
+ virtual bool IsConstructor() const;
+
static StandardFrame* cast(StackFrame* frame) {
DCHECK(frame->is_standard());
return static_cast<StandardFrame*>(frame);
@@ -701,44 +795,27 @@ class StandardFrame: public StackFrame {
friend class SafeStackFrameIterator;
};
-
-class FrameSummary BASE_EMBEDDED {
- public:
- FrameSummary(Object* receiver, JSFunction* function,
- AbstractCode* abstract_code, int code_offset,
- bool is_constructor);
-
- Handle<Object> receiver() { return receiver_; }
- Handle<JSFunction> function() { return function_; }
- Handle<AbstractCode> abstract_code() { return abstract_code_; }
- int code_offset() { return code_offset_; }
- bool is_constructor() { return is_constructor_; }
-
- void Print();
-
- private:
- Handle<Object> receiver_;
- Handle<JSFunction> function_;
- Handle<AbstractCode> abstract_code_;
- int code_offset_;
- bool is_constructor_;
-};
-
class JavaScriptFrame : public StandardFrame {
public:
Type type() const override { return JAVA_SCRIPT; }
+ // Build a list with summaries for this frame including all inlined frames.
+ virtual void Summarize(
+ List<FrameSummary>* frames,
+ FrameSummary::Mode mode = FrameSummary::kExactSummary) const;
+
// Accessors.
- inline JSFunction* function() const;
- inline Object* receiver() const;
+ virtual JSFunction* function() const;
+ Object* receiver() const override;
+ Object* context() const override;
+ Script* script() const override;
+
inline void set_receiver(Object* value);
// Access the parameters.
inline Address GetParameterSlot(int index) const;
- inline Object* GetParameter(int index) const;
- inline int ComputeParametersCount() const {
- return GetNumberOfIncomingArguments();
- }
+ Object* GetParameter(int index) const override;
+ int ComputeParametersCount() const override;
// Access the operand stack.
inline Address GetOperandSlot(int index) const;
@@ -747,13 +824,12 @@ class JavaScriptFrame : public StandardFrame {
// Generator support to preserve operand stack.
void SaveOperandStack(FixedArray* store) const;
- void RestoreOperandStack(FixedArray* store);
// Debugger access.
void SetParameterValue(int index, Object* value) const;
// Check if this frame is a constructor frame invoked through 'new'.
- bool IsConstructor() const;
+ bool IsConstructor() const override;
// Determines whether this frame includes inlined activations. To get details
// about the inlined frames use {GetFunctions} and {Summarize}.
@@ -778,9 +854,6 @@ class JavaScriptFrame : public StandardFrame {
// Return a list with JSFunctions of this frame.
virtual void GetFunctions(List<JSFunction*>* functions) const;
- // Build a list with summaries for this frame including all inlined frames.
- virtual void Summarize(List<FrameSummary>* frames);
-
// Lookup exception handler for current {pc}, returns -1 if none found. Also
// returns data associated with the handler site specific to the frame type:
// - JavaScriptFrame : Data is the stack depth at entry of the try-block.
@@ -817,6 +890,8 @@ class JavaScriptFrame : public StandardFrame {
// receiver, and any callee-saved registers.
void IterateArguments(ObjectVisitor* v) const;
+ virtual void PrintFrameKind(StringStream* accumulator) const {}
+
private:
inline Object* function_slot_object() const;
@@ -857,7 +932,9 @@ class OptimizedFrame : public JavaScriptFrame {
// is the top-most activation)
void GetFunctions(List<JSFunction*>* functions) const override;
- void Summarize(List<FrameSummary>* frames) override;
+ void Summarize(
+ List<FrameSummary>* frames,
+ FrameSummary::Mode mode = FrameSummary::kExactSummary) const override;
// Lookup exception handler for current {pc}, returns -1 if none found.
int LookupExceptionHandlerInTable(
@@ -893,17 +970,20 @@ class InterpretedFrame : public JavaScriptFrame {
void PatchBytecodeOffset(int new_offset);
// Returns the frame's current bytecode array.
- Object* GetBytecodeArray() const;
+ BytecodeArray* GetBytecodeArray() const;
// Updates the frame's BytecodeArray with |bytecode_array|. Used by the
// debugger to swap execution onto a BytecodeArray patched with breakpoints.
- void PatchBytecodeArray(Object* bytecode_array);
+ void PatchBytecodeArray(BytecodeArray* bytecode_array);
// Access to the interpreter register file for this frame.
- Object* GetInterpreterRegister(int register_index) const;
+ Object* ReadInterpreterRegister(int register_index) const;
+ void WriteInterpreterRegister(int register_index, Object* value);
// Build a list with summaries for this frame including all inlined frames.
- void Summarize(List<FrameSummary>* frames) override;
+ void Summarize(
+ List<FrameSummary>* frames,
+ FrameSummary::Mode mode = FrameSummary::kExactSummary) const override;
protected:
inline explicit InterpretedFrame(StackFrameIteratorBase* iterator);
@@ -941,7 +1021,26 @@ class ArgumentsAdaptorFrame: public JavaScriptFrame {
int GetNumberOfIncomingArguments() const override;
- Address GetCallerStackPointer() const override;
+ private:
+ friend class StackFrameIteratorBase;
+};
+
+// Builtin frames are built for builtins with JavaScript linkage, such as
+// various standard library functions (i.e. Math.asin, Math.floor, etc.).
+class BuiltinFrame final : public JavaScriptFrame {
+ public:
+ Type type() const final { return BUILTIN; }
+
+ static BuiltinFrame* cast(StackFrame* frame) {
+ DCHECK(frame->is_builtin());
+ return static_cast<BuiltinFrame*>(frame);
+ }
+
+ protected:
+ inline explicit BuiltinFrame(StackFrameIteratorBase* iterator);
+
+ int GetNumberOfIncomingArguments() const final;
+ void PrintFrameKind(StringStream* accumulator) const override;
private:
friend class StackFrameIteratorBase;
@@ -961,6 +1060,11 @@ class WasmFrame : public StandardFrame {
// Determine the code for the frame.
Code* unchecked_code() const override;
+ // Accessors.
+ Object* wasm_obj() const;
+ uint32_t function_index() const;
+ Script* script() const override;
+
static WasmFrame* cast(StackFrame* frame) {
DCHECK(frame->is_wasm());
return static_cast<WasmFrame*>(frame);
@@ -1143,17 +1247,31 @@ class JavaScriptFrameIterator BASE_EMBEDDED {
StackFrameIterator iterator_;
};
-// NOTE: The stack trace frame iterator is an iterator that only
-// traverse proper JavaScript frames; that is JavaScript frames that
-// have proper JavaScript functions. This excludes the problematic
-// functions in runtime.js.
-class StackTraceFrameIterator: public JavaScriptFrameIterator {
+// NOTE: The stack trace frame iterator is an iterator that only traverse proper
+// JavaScript frames that have proper JavaScript functions and WASM frames.
+// This excludes the problematic functions in runtime.js.
+class StackTraceFrameIterator BASE_EMBEDDED {
public:
explicit StackTraceFrameIterator(Isolate* isolate);
+ StackTraceFrameIterator(Isolate* isolate, StackFrame::Id id);
+ bool done() const { return iterator_.done(); }
void Advance();
+ inline StandardFrame* frame() const;
+
+ inline bool is_javascript() const;
+ inline bool is_wasm() const;
+ inline JavaScriptFrame* javascript_frame() const;
+ inline WasmFrame* wasm_frame() const;
+
+ // Advance to the frame holding the arguments for the current
+ // frame. This only affects the current frame if it is a javascript frame and
+ // has adapted arguments.
+ void AdvanceToArgumentsFrame();
+
private:
- bool IsValidFrame();
+ StackFrameIterator iterator_;
+ bool IsValidFrame(StackFrame* frame) const;
};
diff --git a/deps/v8/src/full-codegen/arm/full-codegen-arm.cc b/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
index 81c5ff2ae7..e25a0441d8 100644
--- a/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
+++ b/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
@@ -176,22 +176,19 @@ void FullCodeGenerator::Generate() {
__ push(r1);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext);
- PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
+ PrepareForBailoutForId(BailoutId::ScriptContext(),
+ BailoutState::TOS_REGISTER);
// The new target value is not used, clobbering is safe.
DCHECK_NULL(info->scope()->new_target_var());
} else {
if (info->scope()->new_target_var() != nullptr) {
__ push(r3); // Preserve new target.
}
- if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
- __ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
- need_write_barrier = false;
- } else {
- __ push(r1);
- __ CallRuntime(Runtime::kNewFunctionContext);
- }
+ FastNewFunctionContextStub stub(isolate());
+ __ mov(FastNewFunctionContextDescriptor::SlotsRegister(), Operand(slots));
+ __ CallStub(&stub);
+ // Result of FastNewFunctionContextStub is always in new space.
+ need_write_barrier = false;
if (info->scope()->new_target_var() != nullptr) {
__ pop(r3); // Preserve new target.
}
@@ -205,7 +202,8 @@ void FullCodeGenerator::Generate() {
int num_parameters = info->scope()->num_parameters();
int first_parameter = info->scope()->has_this_declaration() ? -1 : 0;
for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+ Variable* var =
+ (i == -1) ? info->scope()->receiver() : info->scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -232,11 +230,12 @@ void FullCodeGenerator::Generate() {
// Register holding this function and new target are both trashed in case we
// bailout here. But since that can happen only when new target is not used
// and we allocate a context, the value of |function_in_register| is correct.
- PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::FunctionContext(),
+ BailoutState::NO_REGISTERS);
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
- Variable* this_function_var = scope()->this_function_var();
+ Variable* this_function_var = info->scope()->this_function_var();
if (this_function_var != nullptr) {
Comment cmnt(masm_, "[ This function");
if (!function_in_register_r1) {
@@ -247,7 +246,7 @@ void FullCodeGenerator::Generate() {
}
// Possibly set up a local binding to the new target value.
- Variable* new_target_var = scope()->new_target_var();
+ Variable* new_target_var = info->scope()->new_target_var();
if (new_target_var != nullptr) {
Comment cmnt(masm_, "[ new.target");
SetVar(new_target_var, r3, r0, r2);
@@ -255,7 +254,7 @@ void FullCodeGenerator::Generate() {
// Possibly allocate RestParameters
int rest_index;
- Variable* rest_param = scope()->rest_parameter(&rest_index);
+ Variable* rest_param = info->scope()->rest_parameter(&rest_index);
if (rest_param) {
Comment cmnt(masm_, "[ Allocate rest parameter array");
if (!function_in_register_r1) {
@@ -267,7 +266,7 @@ void FullCodeGenerator::Generate() {
SetVar(rest_param, r0, r1, r2);
}
- Variable* arguments = scope()->arguments();
+ Variable* arguments = info->scope()->arguments();
if (arguments != NULL) {
// Function uses arguments object.
Comment cmnt(masm_, "[ Allocate arguments object");
@@ -294,7 +293,8 @@ void FullCodeGenerator::Generate() {
}
// Visit the declarations and body.
- PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::FunctionEntry(),
+ BailoutState::NO_REGISTERS);
{
Comment cmnt(masm_, "[ Declarations");
VisitDeclarations(scope()->declarations());
@@ -307,7 +307,8 @@ void FullCodeGenerator::Generate() {
{
Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::Declarations(),
+ BailoutState::NO_REGISTERS);
Label ok;
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
@@ -405,11 +406,11 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
EmitProfilingCounterReset();
__ bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR
// entry becomes the target of a bailout. We don't expect it to be, but
// we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->OsrEntryId(), BailoutState::NO_REGISTERS);
}
void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
@@ -468,6 +469,9 @@ void FullCodeGenerator::EmitReturnSequence() {
}
}
+void FullCodeGenerator::RestoreContext() {
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -533,10 +537,12 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
true,
true_label_,
false_label_);
- DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
- if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+ DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
+ !lit->IsUndetectable());
+ if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
+ lit->IsFalse(isolate())) {
if (false_label_ != fall_through_) __ b(false_label_);
- } else if (lit->IsTrue() || lit->IsJSObject()) {
+ } else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
if (true_label_ != fall_through_) __ b(true_label_);
} else if (lit->IsString()) {
if (String::cast(*lit)->length() == 0) {
@@ -728,7 +734,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
Label skip;
if (should_normalize) __ b(&skip);
- PrepareForBailout(expr, TOS_REG);
+ PrepareForBailout(expr, BailoutState::TOS_REGISTER);
if (should_normalize) {
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(r0, ip);
@@ -755,26 +761,21 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
void FullCodeGenerator::VisitVariableDeclaration(
VariableDeclaration* declaration) {
- // If it was not possible to allocate the variable at compile time, we
- // need to "declare" it at runtime to make sure it actually exists in the
- // local context.
VariableProxy* proxy = declaration->proxy();
- VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED:
- globals_->Add(variable->name(), zone());
- globals_->Add(variable->binding_needs_init()
- ? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value(),
- zone());
+ case VariableLocation::UNALLOCATED: {
+ DCHECK(!variable->binding_needs_init());
+ FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+ globals_->Add(isolate()->factory()->undefined_value(), zone());
break;
-
+ }
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL:
- if (hole_init) {
+ if (variable->binding_needs_init()) {
Comment cmnt(masm_, "[ VariableDeclaration");
__ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
__ str(r0, StackOperand(variable));
@@ -782,35 +783,29 @@ void FullCodeGenerator::VisitVariableDeclaration(
break;
case VariableLocation::CONTEXT:
- if (hole_init) {
+ if (variable->binding_needs_init()) {
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
__ str(r0, ContextMemOperand(cp, variable->index()));
// No write barrier since the_hole_value is in old space.
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
}
break;
case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ VariableDeclaration");
+ DCHECK_EQ(VAR, variable->mode());
+ DCHECK(!variable->binding_needs_init());
__ mov(r2, Operand(variable->name()));
- // Declaration nodes are always introduced in one of four modes.
- DCHECK(IsDeclaredVariableMode(mode));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (hole_init) {
- __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
- } else {
- __ mov(r0, Operand(Smi::FromInt(0))); // Indicates no initial value.
- }
- __ Push(r2, r0);
- __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- __ CallRuntime(Runtime::kDeclareLookupSlot);
+ __ Push(r2);
+ __ CallRuntime(Runtime::kDeclareEvalVar);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
+
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
}
@@ -822,7 +817,9 @@ void FullCodeGenerator::VisitFunctionDeclaration(
switch (variable->location()) {
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
- globals_->Add(variable->name(), zone());
+ FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
Handle<SharedFunctionInfo> function =
Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
// Check for stack-overflow exception.
@@ -854,7 +851,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
@@ -864,10 +861,13 @@ void FullCodeGenerator::VisitFunctionDeclaration(
PushOperand(r2);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
+ CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
+
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
}
@@ -876,20 +876,13 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ mov(r1, Operand(pairs));
__ mov(r0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
- __ Push(r1, r0);
+ __ EmitLoadTypeFeedbackVector(r2);
+ __ Push(r1, r0, r2);
__ CallRuntime(Runtime::kDeclareGlobals);
// Return value is ignored.
}
-void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
- // Call the runtime to declare the modules.
- __ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules);
- // Return value is ignored.
-}
-
-
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
@@ -897,7 +890,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Keep the switch value on the stack until a case matches.
VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
ZoneList<CaseClause*>* clauses = stmt->cases();
CaseClause* default_clause = NULL; // Can occur anywhere in the list.
@@ -946,7 +939,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Label skip;
__ b(&skip);
- PrepareForBailout(clause, TOS_REG);
+ PrepareForBailout(clause, BailoutState::TOS_REGISTER);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(r0, ip);
__ b(ne, &next_test);
@@ -975,12 +968,12 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ Case body");
CaseClause* clause = clauses->at(i);
__ bind(clause->body_target());
- PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(clause->EntryId(), BailoutState::NO_REGISTERS);
VisitStatements(clause->statements());
}
__ bind(nested_statement.break_label());
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
}
@@ -1012,16 +1005,15 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&convert);
ToObjectStub stub(isolate());
__ CallStub(&stub);
+ RestoreContext();
__ bind(&done_convert);
- PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
+ PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
__ push(r0);
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- // Note: Proxies never have an enum cache, so will always take the
- // slow path.
+ // Check cache validity in generated code. If we cannot guarantee cache
+ // validity, call the runtime system to check cache validity or get the
+ // property names in a fixed array. Note: Proxies never have an enum cache,
+ // so will always take the slow path.
Label call_runtime;
__ CheckEnumCache(&call_runtime);
@@ -1035,7 +1027,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&call_runtime);
__ push(r0); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kForInEnumerate);
- PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
+ PrepareForBailoutForId(stmt->EnumId(), BailoutState::TOS_REGISTER);
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
@@ -1076,7 +1068,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(r1, r0); // Smi and array
__ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset));
__ Push(r1); // Fixed array length (as smi).
- PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
__ mov(r0, Operand(Smi::FromInt(0)));
__ Push(r0); // Initial index.
@@ -1089,10 +1081,10 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(r0, r1); // Compare to the array length.
__ b(hs, loop_statement.break_label());
- // Get the current entry of the array into register r3.
+ // Get the current entry of the array into register r0.
__ ldr(r2, MemOperand(sp, 2 * kPointerSize));
__ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r3, MemOperand::PointerAddressFromSmiKey(r2, r0));
+ __ ldr(r0, MemOperand::PointerAddressFromSmiKey(r2, r0));
// Get the expected map from the stack or a smi in the
// permanent slow case into register r2.
@@ -1108,34 +1100,32 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We need to filter the key, record slow-path here.
int const vector_index = SmiFromSlot(slot)->value();
- __ EmitLoadTypeFeedbackVector(r0);
+ __ EmitLoadTypeFeedbackVector(r3);
__ mov(r2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
- __ str(r2, FieldMemOperand(r0, FixedArray::OffsetOfElementAt(vector_index)));
-
- // Convert the entry to a string or (smi) 0 if it isn't a property
- // any more. If the property has been removed while iterating, we
- // just skip it.
- __ push(r1); // Enumerable.
- __ push(r3); // Current entry.
- __ CallRuntime(Runtime::kForInFilter);
- PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
- __ mov(r3, Operand(r0));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, ip);
+ __ str(r2, FieldMemOperand(r3, FixedArray::OffsetOfElementAt(vector_index)));
+
+ // r0 contains the key. The receiver in r1 is the second argument to the
+ // ForInFilterStub. ForInFilter returns undefined if the receiver doesn't
+ // have the key or returns the name-converted key.
+ ForInFilterStub filter_stub(isolate());
+ __ CallStub(&filter_stub);
+ RestoreContext();
+ PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
+ __ CompareRoot(result_register(), Heap::kUndefinedValueRootIndex);
__ b(eq, loop_statement.continue_label());
// Update the 'each' property or variable from the possibly filtered
- // entry in register r3.
+ // entry in register r0.
__ bind(&update_each);
- __ mov(result_register(), r3);
+
// Perform the assignment as if via '='.
{ EffectContext context(this);
EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
- PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->AssignmentId(), BailoutState::NO_REGISTERS);
}
// Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
@@ -1154,7 +1144,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
DropOperands(5);
// Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
__ bind(&exit);
decrement_loop_depth();
}
@@ -1194,43 +1184,19 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
Register next = r1;
Register temp = r2;
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_sloppy_eval()) {
- // Check that extension is "the hole".
- __ ldr(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
- __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
- }
- // Load next context in chain.
- __ ldr(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering cp.
- current = next;
- }
- // If no outer scope calls eval, we do not need to check more
- // context extensions.
- if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s->is_eval_scope()) {
- Label loop, fast;
- if (!current.is(next)) {
- __ Move(next, current);
+ int to_check = scope()->ContextChainLengthUntilOutermostSloppyEval();
+ for (Scope* s = scope(); to_check > 0; s = s->outer_scope()) {
+ if (!s->NeedsContext()) continue;
+ if (s->calls_sloppy_eval()) {
+ // Check that extension is "the hole".
+ __ ldr(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
}
- __ bind(&loop);
- // Terminate at native context.
- __ ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kNativeContextMapRootIndex);
- __ cmp(temp, ip);
- __ b(eq, &fast);
- // Check that extension is "the hole".
- __ ldr(temp, ContextMemOperand(next, Context::EXTENSION_INDEX));
- __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
// Load next context in chain.
- __ ldr(next, ContextMemOperand(next, Context::PREVIOUS_INDEX));
- __ b(&loop);
- __ bind(&fast);
+ __ ldr(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering cp.
+ current = next;
+ to_check--;
}
// All extension objects were empty and it is safe to use a normal global
@@ -1284,33 +1250,29 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET || local->mode() == CONST ||
- local->mode() == CONST_LEGACY) {
+ if (local->binding_needs_init()) {
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
- if (local->mode() == CONST_LEGACY) {
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
- } else { // LET || CONST
- __ b(ne, done);
- __ mov(r0, Operand(var->name()));
- __ push(r0);
- __ CallRuntime(Runtime::kThrowReferenceError);
- }
+ __ b(ne, done);
+ __ mov(r0, Operand(var->name()));
+ __ push(r0);
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ } else {
+ __ jmp(done);
}
- __ jmp(done);
}
}
void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
+#ifdef DEBUG
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- __ LoadGlobalObject(LoadDescriptor::ReceiverRegister());
- __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
- __ mov(LoadDescriptor::SlotRegister(),
+#endif
+ __ mov(LoadGlobalDescriptor::SlotRegister(),
Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- CallLoadIC(typeof_mode);
+ CallLoadGlobalIC(typeof_mode);
}
@@ -1318,7 +1280,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
// Record position before possible IC call.
SetExpressionPosition(proxy);
- PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
+ PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
Variable* var = proxy->var();
// Three cases: global variables, lookup variables, and all other types of
@@ -1339,23 +1301,16 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
: "[ Stack variable");
if (NeedsHoleCheckForLoad(proxy)) {
- // Let and const need a read barrier.
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ Label done;
GetVar(r0, var);
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
- if (var->mode() == LET || var->mode() == CONST) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- Label done;
- __ b(ne, &done);
- __ mov(r0, Operand(var->name()));
- __ push(r0);
- __ CallRuntime(Runtime::kThrowReferenceError);
- __ bind(&done);
- } else {
- // Uninitialized legacy const bindings are unholed.
- DCHECK(var->mode() == CONST_LEGACY);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
- }
+ __ b(ne, &done);
+ __ mov(r0, Operand(var->name()));
+ __ push(r0);
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ __ bind(&done);
context()->Plug(r0);
break;
}
@@ -1378,20 +1333,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
__ CallRuntime(function_id);
__ bind(&done);
context()->Plug(r0);
+ break;
}
- }
-}
-
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExpLiteral");
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r1, Operand(expr->pattern()));
- __ mov(r0, Operand(Smi::FromInt(expr->flags())));
- FastCloneRegExpStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(r0);
+ case VariableLocation::MODULE:
+ UNREACHABLE();
+ }
}
@@ -1427,8 +1374,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
__ CallStub(&stub);
+ RestoreContext();
}
- PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+ PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
// If result_saved is true the result is on top of the stack. If
// result_saved is false the result is in r0.
@@ -1456,7 +1404,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::COMPUTED:
// It is safe to use [[Put]] here because the boilerplate already
// contains computed properties with an uninitialized value.
- if (key->value()->IsInternalizedString()) {
+ if (key->IsStringLiteral()) {
+ DCHECK(key->IsPropertyName());
if (property->emit_store()) {
VisitForAccumulatorValue(value);
DCHECK(StoreDescriptor::ValueRegister().is(r0));
@@ -1464,7 +1413,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
EmitLoadStoreICSlot(property->GetSlot(0));
CallStoreIC();
- PrepareForBailoutForId(key->id(), NO_REGISTERS);
+ PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1498,17 +1447,21 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- NO_REGISTERS);
+ BailoutState::NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->getter = property;
+ AccessorTable::Iterator it = accessor_table.lookup(key);
+ it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->setter = property;
+ AccessorTable::Iterator it = accessor_table.lookup(key);
+ it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->setter = property;
}
break;
}
@@ -1527,6 +1480,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(r0, Operand(Smi::FromInt(NONE)));
PushOperand(r0);
CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
+ PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1556,7 +1510,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- NO_REGISTERS);
+ BailoutState::NO_REGISTERS);
} else {
EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
VisitForStackValue(value);
@@ -1572,6 +1526,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PushOperand(Smi::FromInt(NONE));
PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ BailoutState::NO_REGISTERS);
} else {
DropOperands(3);
}
@@ -1629,7 +1585,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
}
- PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+ PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
bool result_saved = false; // Is the result saved to the stack?
ZoneList<Expression*>* subexprs = expr->values();
@@ -1659,7 +1615,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
- PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+ PrepareForBailoutForId(expr->GetIdForElement(array_index),
+ BailoutState::NO_REGISTERS);
}
// In case the array literal contains spread expressions it has two parts. The
@@ -1679,7 +1636,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForStackValue(subexpr);
CallRuntimeWithOperands(Runtime::kAppendElement);
- PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+ PrepareForBailoutForId(expr->GetIdForElement(array_index),
+ BailoutState::NO_REGISTERS);
}
if (result_saved) {
@@ -1694,7 +1652,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
Comment cmnt(masm_, "[ Assignment");
- SetExpressionPosition(expr, INSERT_BREAK);
Property* property = expr->target()->AsProperty();
LhsKind assign_type = Property::GetAssignType(property);
@@ -1763,23 +1720,27 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy());
- PrepareForBailout(expr->target(), TOS_REG);
+ PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
case NAMED_SUPER_PROPERTY:
EmitNamedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
case KEYED_SUPER_PROPERTY:
EmitKeyedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
}
}
@@ -1799,7 +1760,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
// Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), TOS_REG);
+ PrepareForBailout(expr->binary_operation(), BailoutState::TOS_REGISTER);
} else {
VisitForAccumulatorValue(expr->value());
}
@@ -1811,7 +1772,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
expr->op(), expr->AssignmentSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(r0);
break;
case NAMED_PROPERTY:
@@ -1840,21 +1801,27 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// this. It stays on the stack while we update the iterator.
VisitForStackValue(expr->expression());
- Label suspend, continuation, post_runtime, resume;
+ Label suspend, continuation, post_runtime, resume, exception;
__ jmp(&suspend);
__ bind(&continuation);
- // When we arrive here, the stack top is the resume mode and
- // result_register() holds the input value (the argument given to the
- // respective resume operation).
+ // When we arrive here, r0 holds the generator object.
__ RecordGeneratorContinuation();
- __ pop(r1);
- __ cmp(r1, Operand(Smi::FromInt(JSGeneratorObject::RETURN)));
- __ b(ne, &resume);
- __ push(result_register());
+ __ ldr(r1, FieldMemOperand(r0, JSGeneratorObject::kResumeModeOffset));
+ __ ldr(r0, FieldMemOperand(r0, JSGeneratorObject::kInputOrDebugPosOffset));
+ STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
+ STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
+ __ cmp(r1, Operand(Smi::FromInt(JSGeneratorObject::kReturn)));
+ __ b(lt, &resume);
+ __ Push(result_register());
+ __ b(gt, &exception);
EmitCreateIteratorResult(true);
EmitUnwindAndReturn();
+ __ bind(&exception);
+ __ CallRuntime(expr->rethrow_on_exception() ? Runtime::kReThrow
+ : Runtime::kThrow);
+
__ bind(&suspend);
OperandStackDepthIncrement(1); // Not popped on this path.
VisitForAccumulatorValue(expr->generator_object());
@@ -1870,7 +1837,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ b(eq, &post_runtime);
__ push(r0); // generator object
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
__ bind(&post_runtime);
PopOperand(result_register());
EmitReturnSequence();
@@ -1879,113 +1846,6 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
context()->Plug(result_register());
}
-
-void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
- Expression *value,
- JSGeneratorObject::ResumeMode resume_mode) {
- // The value stays in r0, and is ultimately read by the resumed generator, as
- // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
- // is read to throw the value when the resumed generator is already closed.
- // r1 will hold the generator object until the activation has been resumed.
- VisitForStackValue(generator);
- VisitForAccumulatorValue(value);
- PopOperand(r1);
-
- // Store input value into generator object.
- __ str(result_register(),
- FieldMemOperand(r1, JSGeneratorObject::kInputOffset));
- __ mov(r2, result_register());
- __ RecordWriteField(r1, JSGeneratorObject::kInputOffset, r2, r3,
- kLRHasBeenSaved, kDontSaveFPRegs);
-
- // Load suspended function and context.
- __ ldr(cp, FieldMemOperand(r1, JSGeneratorObject::kContextOffset));
- __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
-
- // Load receiver and store as the first argument.
- __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
- __ push(r2);
-
- // Push holes for arguments to generator function. Since the parser forced
- // context allocation for any variables in generators, the actual argument
- // values have already been copied into the context and these dummy values
- // will never be used.
- __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r3,
- FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
- __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
- Label push_argument_holes, push_frame;
- __ bind(&push_argument_holes);
- __ sub(r3, r3, Operand(Smi::FromInt(1)), SetCC);
- __ b(mi, &push_frame);
- __ push(r2);
- __ jmp(&push_argument_holes);
-
- // Enter a new JavaScript frame, and initialize its slots as they were when
- // the generator was suspended.
- Label resume_frame, done;
- __ bind(&push_frame);
- __ bl(&resume_frame);
- __ jmp(&done);
- __ bind(&resume_frame);
- // lr = return address.
- // fp = caller's frame pointer.
- // pp = caller's constant pool (if FLAG_enable_embedded_constant_pool),
- // cp = callee's context,
- // r4 = callee's JS function.
- __ PushStandardFrame(r4);
-
- // Load the operand stack size.
- __ ldr(r3, FieldMemOperand(r1, JSGeneratorObject::kOperandStackOffset));
- __ ldr(r3, FieldMemOperand(r3, FixedArray::kLengthOffset));
- __ SmiUntag(r3);
-
- // If we are sending a value and there is no operand stack, we can jump back
- // in directly.
- if (resume_mode == JSGeneratorObject::NEXT) {
- Label slow_resume;
- __ cmp(r3, Operand(0));
- __ b(ne, &slow_resume);
- __ ldr(r3, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
-
- { ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
- if (FLAG_enable_embedded_constant_pool) {
- // Load the new code object's constant pool pointer.
- __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r3);
- }
-
- __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
- __ SmiUntag(r2);
- __ add(r3, r3, r2);
- __ mov(r2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
- __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
- __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
- __ Jump(r3);
- }
- __ bind(&slow_resume);
- }
-
- // Otherwise, we push holes for the operand stack and call the runtime to fix
- // up the stack and the handlers.
- Label push_operand_holes, call_resume;
- __ bind(&push_operand_holes);
- __ sub(r3, r3, Operand(1), SetCC);
- __ b(mi, &call_resume);
- __ push(r2);
- __ b(&push_operand_holes);
- __ bind(&call_resume);
- __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
- DCHECK(!result_register().is(r1));
- __ Push(r1, result_register());
- __ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject);
- // Not reached: the runtime call returns elsewhere.
- __ stop("not-reached");
-
- __ bind(&done);
- context()->Plug(result_register());
-}
-
void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
OperandStackDepthIncrement(2);
__ Push(reg1, reg2);
@@ -2009,7 +1869,8 @@ void FullCodeGenerator::EmitOperandStackDepthCheck() {
void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label allocate, done_allocate;
- __ Allocate(JSIteratorResult::kSize, r0, r2, r3, &allocate, TAG_OBJECT);
+ __ Allocate(JSIteratorResult::kSize, r0, r2, r3, &allocate,
+ NO_ALLOCATION_FLAGS);
__ b(&done_allocate);
__ bind(&allocate);
@@ -2291,37 +2152,26 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->mode() == LET && op != Token::INIT) {
- // Non-initializing assignment to let variable needs a write barrier.
- DCHECK(!var->IsLookupSlot());
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label assign;
- MemOperand location = VarOperand(var, r1);
- __ ldr(r3, location);
- __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
- __ b(ne, &assign);
- __ mov(r3, Operand(var->name()));
- __ push(r3);
- __ CallRuntime(Runtime::kThrowReferenceError);
- // Perform the assignment.
- __ bind(&assign);
- EmitStoreToStackLocalOrContextSlot(var, location);
-
- } else if (var->mode() == CONST && op != Token::INIT) {
- // Assignment to const variable needs a write barrier.
+ } else if (IsLexicalVariableMode(var->mode()) && op != Token::INIT) {
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label const_error;
MemOperand location = VarOperand(var, r1);
- __ ldr(r3, location);
- __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
- __ b(ne, &const_error);
- __ mov(r3, Operand(var->name()));
- __ push(r3);
- __ CallRuntime(Runtime::kThrowReferenceError);
- __ bind(&const_error);
- __ CallRuntime(Runtime::kThrowConstAssignError);
-
+ // Perform an initialization check for lexically declared variables.
+ if (var->binding_needs_init()) {
+ Label assign;
+ __ ldr(r3, location);
+ __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ b(ne, &assign);
+ __ mov(r3, Operand(var->name()));
+ __ push(r3);
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ __ bind(&assign);
+ }
+ if (var->mode() == CONST) {
+ __ CallRuntime(Runtime::kThrowConstAssignError);
+ } else {
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ }
} else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2336,8 +2186,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() ||
- (var->mode() == CONST && op == Token::INIT)) {
+ } else if (!var->is_const_mode() || op == Token::INIT) {
if (var->IsLookupSlot()) {
// Assignment to var.
__ Push(var->name());
@@ -2359,25 +2208,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
- // Const initializers need a write barrier.
- DCHECK(!var->IsParameter()); // No const parameters.
- if (var->IsLookupSlot()) {
- __ push(r0);
- __ mov(r0, Operand(var->name()));
- __ Push(cp, r0); // Context and name.
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
- } else {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label skip;
- MemOperand location = VarOperand(var, r1);
- __ ldr(r2, location);
- __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
- __ b(ne, &skip);
- EmitStoreToStackLocalOrContextSlot(var, location);
- __ bind(&skip);
- }
-
} else {
DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
if (is_strict(language_mode())) {
@@ -2400,7 +2230,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
EmitLoadStoreICSlot(expr->AssignmentSlot());
CallStoreIC();
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(r0);
}
@@ -2445,44 +2275,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
EmitLoadStoreICSlot(expr->AssignmentSlot());
CallIC(ic);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
- Comment cmnt(masm_, "[ Property");
- SetExpressionPosition(expr);
-
- Expression* key = expr->key();
-
- if (key->IsPropertyName()) {
- if (!expr->IsSuperAccess()) {
- VisitForAccumulatorValue(expr->obj());
- __ Move(LoadDescriptor::ReceiverRegister(), r0);
- EmitNamedPropertyLoad(expr);
- } else {
- VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- expr->obj()->AsSuperPropertyReference()->home_object());
- EmitNamedSuperPropertyLoad(expr);
- }
- } else {
- if (!expr->IsSuperAccess()) {
- VisitForStackValue(expr->obj());
- VisitForAccumulatorValue(expr->key());
- __ Move(LoadDescriptor::NameRegister(), r0);
- PopOperand(LoadDescriptor::ReceiverRegister());
- EmitKeyedPropertyLoad(expr);
- } else {
- VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- expr->obj()->AsSuperPropertyReference()->home_object());
- VisitForStackValue(expr->key());
- EmitKeyedSuperPropertyLoad(expr);
- }
- }
- PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(r0);
}
@@ -2506,7 +2299,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
if (callee->IsVariableProxy()) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
- PrepareForBailout(callee, NO_REGISTERS);
+ PrepareForBailout(callee, BailoutState::NO_REGISTERS);
}
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
@@ -2519,7 +2312,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
DCHECK(!callee->AsProperty()->IsSuperAccess());
__ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
EmitNamedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+ BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
__ ldr(ip, MemOperand(sp, 0));
PushOperand(ip);
@@ -2558,6 +2352,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
CallRuntimeWithOperands(Runtime::kLoadFromSuper);
+ PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
// Replace home_object with target function.
__ str(r0, MemOperand(sp, kPointerSize));
@@ -2582,7 +2377,8 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
__ Move(LoadDescriptor::NameRegister(), r0);
EmitKeyedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+ BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
__ ldr(ip, MemOperand(sp, 0));
@@ -2618,6 +2414,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
+ PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
// Replace home_object with target function.
__ str(r0, MemOperand(sp, kPointerSize));
@@ -2637,7 +2434,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
VisitForStackValue(args->at(i));
}
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
SetCallPosition(expr, expr->tail_call_mode());
if (expr->tail_call_mode() == TailCallMode::kAllow) {
if (FLAG_trace) {
@@ -2658,13 +2455,12 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
context()->DropAndPlug(1, r0);
}
-
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
+ int arg_count = expr->arguments()->length();
// r4: copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
__ ldr(r4, MemOperand(sp, arg_count * kPointerSize));
@@ -2681,8 +2477,11 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// r1: the start position of the scope the calls resides in.
__ mov(r1, Operand(Smi::FromInt(scope()->start_position())));
+ // r0: the source position of the eval call.
+ __ mov(r0, Operand(Smi::FromInt(expr->position())));
+
// Do the runtime call.
- __ Push(r4, r3, r2, r1);
+ __ Push(r4, r3, r2, r1, r0);
__ CallRuntime(Runtime::kResolvePossiblyDirectEval);
}
@@ -2703,7 +2502,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
__ Push(callee->name());
__ CallRuntime(Runtime::kLoadLookupSlotForCall);
PushOperands(r0, r1); // Function, receiver.
- PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
// If fast case code has been generated, emit code to push the
// function and receiver and have the slow path jump around this
@@ -2731,7 +2530,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
// In a call to eval, we first call
- // RuntimeHidden_asResolvePossiblyDirectEval to resolve the function we need
+ // Runtime_ResolvePossiblyDirectEval to resolve the function we need
// to call. Then we call the resolved function using the given arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2747,12 +2546,12 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
// resolve eval.
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ push(r1);
- EmitResolvePossiblyDirectEval(arg_count);
+ EmitResolvePossiblyDirectEval(expr);
// Touch up the stack with the resolved function.
__ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
// Record source position for debugger.
SetCallPosition(expr);
@@ -2763,8 +2562,7 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
context()->DropAndPlug(1, r0);
}
@@ -2803,9 +2601,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
CallConstructStub stub(isolate());
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
- PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
+ RestoreContext();
context()->Plug(r0);
}
@@ -2848,9 +2645,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
-
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
context()->Plug(r0);
}
@@ -3038,90 +2833,6 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Label done;
- // If the object is a smi return the object.
- __ JumpIfSmi(r0, &done);
- // If the object is not a value type, return the object.
- __ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
- __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset), eq);
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(3, args->length());
-
- Register string = r0;
- Register index = r1;
- Register value = r2;
-
- VisitForStackValue(args->at(0)); // index
- VisitForStackValue(args->at(1)); // value
- VisitForAccumulatorValue(args->at(2)); // string
- PopOperands(index, value);
-
- if (FLAG_debug_code) {
- __ SmiTst(value);
- __ Check(eq, kNonSmiValue);
- __ SmiTst(index);
- __ Check(eq, kNonSmiIndex);
- __ SmiUntag(index, index);
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
- __ SmiTag(index, index);
- }
-
- __ SmiUntag(value, value);
- __ add(ip,
- string,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ strb(value, MemOperand(ip, index, LSR, kSmiTagSize));
- context()->Plug(string);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(3, args->length());
-
- Register string = r0;
- Register index = r1;
- Register value = r2;
-
- VisitForStackValue(args->at(0)); // index
- VisitForStackValue(args->at(1)); // value
- VisitForAccumulatorValue(args->at(2)); // string
- PopOperands(index, value);
-
- if (FLAG_debug_code) {
- __ SmiTst(value);
- __ Check(eq, kNonSmiValue);
- __ SmiTst(index);
- __ Check(eq, kNonSmiIndex);
- __ SmiUntag(index, index);
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
- __ SmiTag(index, index);
- }
-
- __ SmiUntag(value, value);
- __ add(ip,
- string,
- Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ strh(value, MemOperand(ip, index));
- context()->Plug(string);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3155,13 +2866,8 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
Label need_conversion;
Label index_out_of_range;
Label done;
- StringCharCodeAtGenerator generator(object,
- index,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
+ StringCharCodeAtGenerator generator(object, index, result, &need_conversion,
+ &need_conversion, &index_out_of_range);
generator.GenerateFast(masm_);
__ jmp(&done);
@@ -3185,53 +2891,6 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = r1;
- Register index = r0;
- Register scratch = r3;
- Register result = r0;
-
- PopOperand(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharAtGenerator generator(object,
- index,
- scratch,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ LoadRoot(result, Heap::kempty_stringRootIndex);
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ mov(result, Operand(Smi::FromInt(0)));
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
-
void FullCodeGenerator::EmitCall(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_LE(2, args->length());
@@ -3239,7 +2898,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
// Move target to r1.
int const argc = args->length() - 2;
__ ldr(r1, MemOperand(sp, (argc + 1) * kPointerSize));
@@ -3247,8 +2906,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
__ mov(r0, Operand(argc));
__ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(argc + 1);
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
// Discard the function left on TOS.
context()->DropAndPlug(1, r0);
}
@@ -3298,12 +2956,6 @@ void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
context()->Plug(r0);
}
-void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
- DCHECK_EQ(0, expr->arguments()->length());
- __ LoadNativeContextSlot(Context::ORDINARY_HAS_INSTANCE_INDEX, r0);
- context()->Plug(r0);
-}
-
void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
ExternalReference debug_is_active =
@@ -3323,7 +2975,8 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
Label runtime, done;
- __ Allocate(JSIteratorResult::kSize, r0, r2, r3, &runtime, TAG_OBJECT);
+ __ Allocate(JSIteratorResult::kSize, r0, r2, r3, &runtime,
+ NO_ALLOCATION_FLAGS);
__ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r1);
__ pop(r3);
__ pop(r2);
@@ -3365,9 +3018,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
-
- // Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
}
@@ -3389,7 +3040,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode but
// "delete this" is allowed.
- bool is_this = var->HasThisName(isolate());
+ bool is_this = var->is_this();
DCHECK(is_sloppy(language_mode()) || is_this);
if (var->IsUnallocatedOrGlobalSlot()) {
__ LoadGlobalObject(r2);
@@ -3451,12 +3102,14 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
&materialize_true);
if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
__ bind(&materialize_true);
- PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->MaterializeTrueId(),
+ BailoutState::NO_REGISTERS);
__ LoadRoot(r0, Heap::kTrueValueRootIndex);
if (context()->IsStackValue()) __ push(r0);
__ jmp(&done);
__ bind(&materialize_false);
- PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->MaterializeFalseId(),
+ BailoutState::NO_REGISTERS);
__ LoadRoot(r0, Heap::kFalseValueRootIndex);
if (context()->IsStackValue()) __ push(r0);
__ bind(&done);
@@ -3558,9 +3211,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// We need a second deoptimization point after loading the value
// in case evaluating the property load my have a side effect.
if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), TOS_REG);
+ PrepareForBailout(expr->expression(), BailoutState::TOS_REGISTER);
} else {
- PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+ PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
}
// Inline smi case if we are in a loop.
@@ -3607,9 +3260,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
// Convert old value into a number.
- ToNumberStub convert_stub(isolate());
- __ CallStub(&convert_stub);
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ __ Call(isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+ RestoreContext();
+ PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -3656,7 +3309,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
{ EffectContext context(this);
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN, expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(),
+ BailoutState::TOS_REGISTER);
context.Plug(r0);
}
// For all contexts except EffectConstant We have the result on
@@ -3667,7 +3321,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN, expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(),
+ BailoutState::TOS_REGISTER);
context()->Plug(r0);
}
break;
@@ -3677,7 +3332,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
PopOperand(StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(expr->CountSlot());
CallStoreIC();
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3689,6 +3344,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case NAMED_SUPER_PROPERTY: {
EmitNamedSuperPropertyStore(prop);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3700,6 +3356,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case KEYED_SUPER_PROPERTY: {
EmitKeyedSuperPropertyStore(prop);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3716,7 +3373,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
EmitLoadStoreICSlot(expr->CountSlot());
CallIC(ic);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3813,7 +3470,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
- SetExpressionPosition(expr);
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
@@ -3833,7 +3489,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- CallRuntimeWithOperands(Runtime::kHasProperty);
+ SetExpressionPosition(expr);
+ EmitHasProperty();
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(r0, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
@@ -3841,6 +3498,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::INSTANCEOF: {
VisitForAccumulatorValue(expr->right());
+ SetExpressionPosition(expr);
PopOperand(r1);
InstanceOfStub stub(isolate());
__ CallStub(&stub);
@@ -3852,6 +3510,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
+ SetExpressionPosition(expr);
Condition cond = CompareIC::ComputeCondition(op);
PopOperand(r1);
@@ -3937,7 +3596,7 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- Scope* closure_scope = scope()->ClosureScope();
+ DeclarationScope* closure_scope = scope()->GetClosureScope();
if (closure_scope->is_script_scope() ||
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
diff --git a/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc b/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
index aa67117a7f..3330325df4 100644
--- a/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
+++ b/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
@@ -179,22 +179,19 @@ void FullCodeGenerator::Generate() {
__ Mov(x10, Operand(info->scope()->GetScopeInfo(info->isolate())));
__ Push(x1, x10);
__ CallRuntime(Runtime::kNewScriptContext);
- PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
+ PrepareForBailoutForId(BailoutId::ScriptContext(),
+ BailoutState::TOS_REGISTER);
// The new target value is not used, clobbering is safe.
DCHECK_NULL(info->scope()->new_target_var());
} else {
if (info->scope()->new_target_var() != nullptr) {
__ Push(x3); // Preserve new target.
}
- if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
- __ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
- need_write_barrier = false;
- } else {
- __ Push(x1);
- __ CallRuntime(Runtime::kNewFunctionContext);
- }
+ FastNewFunctionContextStub stub(isolate());
+ __ Mov(FastNewFunctionContextDescriptor::SlotsRegister(), slots);
+ __ CallStub(&stub);
+ // Result of FastNewFunctionContextStub is always in new space.
+ need_write_barrier = false;
if (info->scope()->new_target_var() != nullptr) {
__ Pop(x3); // Restore new target.
}
@@ -208,7 +205,8 @@ void FullCodeGenerator::Generate() {
int num_parameters = info->scope()->num_parameters();
int first_parameter = info->scope()->has_this_declaration() ? -1 : 0;
for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+ Variable* var =
+ (i == -1) ? info->scope()->receiver() : info->scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -235,11 +233,12 @@ void FullCodeGenerator::Generate() {
// Register holding this function and new target are both trashed in case we
// bailout here. But since that can happen only when new target is not used
// and we allocate a context, the value of |function_in_register| is correct.
- PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::FunctionContext(),
+ BailoutState::NO_REGISTERS);
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
- Variable* this_function_var = scope()->this_function_var();
+ Variable* this_function_var = info->scope()->this_function_var();
if (this_function_var != nullptr) {
Comment cmnt(masm_, "[ This function");
if (!function_in_register_x1) {
@@ -250,7 +249,7 @@ void FullCodeGenerator::Generate() {
}
// Possibly set up a local binding to the new target value.
- Variable* new_target_var = scope()->new_target_var();
+ Variable* new_target_var = info->scope()->new_target_var();
if (new_target_var != nullptr) {
Comment cmnt(masm_, "[ new.target");
SetVar(new_target_var, x3, x0, x2);
@@ -258,7 +257,7 @@ void FullCodeGenerator::Generate() {
// Possibly allocate RestParameters
int rest_index;
- Variable* rest_param = scope()->rest_parameter(&rest_index);
+ Variable* rest_param = info->scope()->rest_parameter(&rest_index);
if (rest_param) {
Comment cmnt(masm_, "[ Allocate rest parameter array");
if (!function_in_register_x1) {
@@ -270,7 +269,7 @@ void FullCodeGenerator::Generate() {
SetVar(rest_param, x0, x1, x2);
}
- Variable* arguments = scope()->arguments();
+ Variable* arguments = info->scope()->arguments();
if (arguments != NULL) {
// Function uses arguments object.
Comment cmnt(masm_, "[ Allocate arguments object");
@@ -297,7 +296,8 @@ void FullCodeGenerator::Generate() {
}
// Visit the declarations and body.
- PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::FunctionEntry(),
+ BailoutState::NO_REGISTERS);
{
Comment cmnt(masm_, "[ Declarations");
VisitDeclarations(scope()->declarations());
@@ -310,7 +310,8 @@ void FullCodeGenerator::Generate() {
{
Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::Declarations(),
+ BailoutState::NO_REGISTERS);
Label ok;
DCHECK(jssp.Is(__ StackPointer()));
__ CompareRoot(jssp, Heap::kStackLimitRootIndex);
@@ -393,11 +394,11 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
EmitProfilingCounterReset();
__ Bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR
// entry becomes the target of a bailout. We don't expect it to be, but
// we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->OsrEntryId(), BailoutState::NO_REGISTERS);
}
void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
@@ -460,6 +461,9 @@ void FullCodeGenerator::EmitReturnSequence() {
}
}
+void FullCodeGenerator::RestoreContext() {
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -524,10 +528,12 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
true,
true_label_,
false_label_);
- DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
- if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+ DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
+ !lit->IsUndetectable());
+ if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
+ lit->IsFalse(isolate())) {
if (false_label_ != fall_through_) __ B(false_label_);
- } else if (lit->IsTrue() || lit->IsJSObject()) {
+ } else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
if (true_label_ != fall_through_) __ B(true_label_);
} else if (lit->IsString()) {
if (String::cast(*lit)->length() == 0) {
@@ -725,7 +731,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
if (should_normalize) {
__ B(&skip);
}
- PrepareForBailout(expr, TOS_REG);
+ PrepareForBailout(expr, BailoutState::TOS_REGISTER);
if (should_normalize) {
__ CompareRoot(x0, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, NULL);
@@ -751,27 +757,21 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
void FullCodeGenerator::VisitVariableDeclaration(
VariableDeclaration* declaration) {
- // If it was not possible to allocate the variable at compile time, we
- // need to "declare" it at runtime to make sure it actually exists in the
- // local context.
VariableProxy* proxy = declaration->proxy();
- VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
-
switch (variable->location()) {
case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED:
- globals_->Add(variable->name(), zone());
- globals_->Add(variable->binding_needs_init()
- ? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value(),
- zone());
+ case VariableLocation::UNALLOCATED: {
+ DCHECK(!variable->binding_needs_init());
+ FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+ globals_->Add(isolate()->factory()->undefined_value(), zone());
break;
-
+ }
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL:
- if (hole_init) {
+ if (variable->binding_needs_init()) {
Comment cmnt(masm_, "[ VariableDeclaration");
__ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
__ Str(x10, StackOperand(variable));
@@ -779,36 +779,29 @@ void FullCodeGenerator::VisitVariableDeclaration(
break;
case VariableLocation::CONTEXT:
- if (hole_init) {
+ if (variable->binding_needs_init()) {
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
__ Str(x10, ContextMemOperand(cp, variable->index()));
// No write barrier since the_hole_value is in old space.
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
}
break;
case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ VariableDeclaration");
+ DCHECK_EQ(VAR, variable->mode());
+ DCHECK(!variable->binding_needs_init());
__ Mov(x2, Operand(variable->name()));
- // Declaration nodes are always introduced in one of four modes.
- DCHECK(IsDeclaredVariableMode(mode));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (hole_init) {
- __ LoadRoot(x0, Heap::kTheHoleValueRootIndex);
- __ Push(x2, x0);
- } else {
- // Pushing 0 (xzr) indicates no initial value.
- __ Push(x2, xzr);
- }
- __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- __ CallRuntime(Runtime::kDeclareLookupSlot);
+ __ Push(x2);
+ __ CallRuntime(Runtime::kDeclareEvalVar);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
+
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
}
@@ -820,7 +813,9 @@ void FullCodeGenerator::VisitFunctionDeclaration(
switch (variable->location()) {
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
- globals_->Add(variable->name(), zone());
+ FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
Handle<SharedFunctionInfo> function =
Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
// Check for stack overflow exception.
@@ -852,7 +847,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
@@ -862,10 +857,13 @@ void FullCodeGenerator::VisitFunctionDeclaration(
PushOperand(x2);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
+ CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
+
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
}
@@ -878,20 +876,13 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
flags = x10;
__ Mov(flags, Smi::FromInt(DeclareGlobalsFlags()));
}
- __ Push(x11, flags);
+ __ EmitLoadTypeFeedbackVector(x12);
+ __ Push(x11, flags, x12);
__ CallRuntime(Runtime::kDeclareGlobals);
// Return value is ignored.
}
-void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
- // Call the runtime to declare the modules.
- __ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules);
- // Return value is ignored.
-}
-
-
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
ASM_LOCATION("FullCodeGenerator::VisitSwitchStatement");
Comment cmnt(masm_, "[ SwitchStatement");
@@ -900,7 +891,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Keep the switch value on the stack until a case matches.
VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
ZoneList<CaseClause*>* clauses = stmt->cases();
CaseClause* default_clause = NULL; // Can occur anywhere in the list.
@@ -947,7 +938,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Label skip;
__ B(&skip);
- PrepareForBailout(clause, TOS_REG);
+ PrepareForBailout(clause, BailoutState::TOS_REGISTER);
__ JumpIfNotRoot(x0, Heap::kTrueValueRootIndex, &next_test);
__ Drop(1);
__ B(clause->body_target());
@@ -973,12 +964,12 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ Case body");
CaseClause* clause = clauses->at(i);
__ Bind(clause->body_target());
- PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(clause->EntryId(), BailoutState::NO_REGISTERS);
VisitStatements(clause->statements());
}
__ Bind(nested_statement.break_label());
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
}
@@ -1010,16 +1001,15 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Bind(&convert);
ToObjectStub stub(isolate());
__ CallStub(&stub);
+ RestoreContext();
__ Bind(&done_convert);
- PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
+ PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
__ Push(x0);
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- // Note: Proxies never have an enum cache, so will always take the
- // slow path.
+ // Check cache validity in generated code. If we cannot guarantee cache
+ // validity, call the runtime system to check cache validity or get the
+ // property names in a fixed array. Note: Proxies never have an enum cache,
+ // so will always take the slow path.
Label call_runtime;
__ CheckEnumCache(x0, x15, x10, x11, x12, x13, &call_runtime);
@@ -1033,7 +1023,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Bind(&call_runtime);
__ Push(x0); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kForInEnumerate);
- PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
+ PrepareForBailoutForId(stmt->EnumId(), BailoutState::TOS_REGISTER);
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
@@ -1069,7 +1059,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Mov(x1, Smi::FromInt(1)); // Smi(1) indicates slow check.
__ Ldr(x2, FieldMemOperand(x0, FixedArray::kLengthOffset));
__ Push(x1, x0, x2); // Smi and array, fixed array length (as smi).
- PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
__ Push(xzr); // Initial index.
// Generate code for doing the condition check.
@@ -1081,13 +1071,13 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Cmp(x0, x1); // Compare to the array length.
__ B(hs, loop_statement.break_label());
- // Get the current entry of the array into register r3.
+ // Get the current entry of the array into register x0.
__ Peek(x10, 2 * kXRegSize);
__ Add(x10, x10, Operand::UntagSmiAndScale(x0, kPointerSizeLog2));
- __ Ldr(x3, MemOperand(x10, FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Ldr(x0, MemOperand(x10, FixedArray::kHeaderSize - kHeapObjectTag));
// Get the expected map from the stack or a smi in the
- // permanent slow case into register x10.
+ // permanent slow case into register x2.
__ Peek(x2, 3 * kXRegSize);
// Check if the expected map still matches that of the enumerable.
@@ -1100,32 +1090,31 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We need to filter the key, record slow-path here.
int const vector_index = SmiFromSlot(slot)->value();
- __ EmitLoadTypeFeedbackVector(x0);
+ __ EmitLoadTypeFeedbackVector(x3);
__ Mov(x10, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
- __ Str(x10, FieldMemOperand(x0, FixedArray::OffsetOfElementAt(vector_index)));
-
- // Convert the entry to a string or (smi) 0 if it isn't a property
- // any more. If the property has been removed while iterating, we
- // just skip it.
- __ Push(x1, x3);
- __ CallRuntime(Runtime::kForInFilter);
- PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
- __ Mov(x3, x0);
- __ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex,
- loop_statement.continue_label());
+ __ Str(x10, FieldMemOperand(x3, FixedArray::OffsetOfElementAt(vector_index)));
+
+ // x0 contains the key. The receiver in x1 is the second argument to the
+ // ForInFilterStub. ForInFilter returns undefined if the receiver doesn't
+ // have the key or returns the name-converted key.
+ ForInFilterStub filter_stub(isolate());
+ __ CallStub(&filter_stub);
+ RestoreContext();
+ PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
+ __ CompareRoot(result_register(), Heap::kUndefinedValueRootIndex);
+ __ B(eq, loop_statement.continue_label());
// Update the 'each' property or variable from the possibly filtered
- // entry in register x3.
+ // entry in register x0.
__ Bind(&update_each);
- __ Mov(result_register(), x3);
// Perform the assignment as if via '='.
{ EffectContext context(this);
EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
- PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->AssignmentId(), BailoutState::NO_REGISTERS);
}
// Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
@@ -1145,7 +1134,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
DropOperands(5);
// Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
__ Bind(&exit);
decrement_loop_depth();
}
@@ -1183,40 +1172,19 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
Register next = x10;
Register temp = x11;
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_sloppy_eval()) {
- // Check that extension is "the hole".
- __ Ldr(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
- __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
- }
- // Load next context in chain.
- __ Ldr(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering cp.
- current = next;
+ int to_check = scope()->ContextChainLengthUntilOutermostSloppyEval();
+ for (Scope* s = scope(); to_check > 0; s = s->outer_scope()) {
+ if (!s->NeedsContext()) continue;
+ if (s->calls_sloppy_eval()) {
+ // Check that extension is "the hole".
+ __ Ldr(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
}
- // If no outer scope calls eval, we do not need to check more
- // context extensions.
- if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s->is_eval_scope()) {
- Label loop, fast;
- __ Mov(next, current);
-
- __ Bind(&loop);
- // Terminate at native context.
- __ Ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset));
- __ JumpIfRoot(temp, Heap::kNativeContextMapRootIndex, &fast);
- // Check that extension is "the hole".
- __ Ldr(temp, ContextMemOperand(next, Context::EXTENSION_INDEX));
- __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
// Load next context in chain.
- __ Ldr(next, ContextMemOperand(next, Context::PREVIOUS_INDEX));
- __ B(&loop);
- __ Bind(&fast);
+ __ Ldr(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering cp.
+ current = next;
+ to_check--;
}
// All extension objects were empty and it is safe to use a normal global
@@ -1270,32 +1238,28 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ Ldr(x0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET || local->mode() == CONST ||
- local->mode() == CONST_LEGACY) {
+ if (local->binding_needs_init()) {
__ JumpIfNotRoot(x0, Heap::kTheHoleValueRootIndex, done);
- if (local->mode() == CONST_LEGACY) {
- __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
- } else { // LET || CONST
- __ Mov(x0, Operand(var->name()));
- __ Push(x0);
- __ CallRuntime(Runtime::kThrowReferenceError);
- }
+ __ Mov(x0, Operand(var->name()));
+ __ Push(x0);
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ } else {
+ __ B(done);
}
- __ B(done);
}
}
void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
+#ifdef DEBUG
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- __ LoadGlobalObject(LoadDescriptor::ReceiverRegister());
- __ Mov(LoadDescriptor::NameRegister(), Operand(var->name()));
- __ Mov(LoadDescriptor::SlotRegister(),
+#endif
+ __ Mov(LoadGlobalDescriptor::SlotRegister(),
SmiFromSlot(proxy->VariableFeedbackSlot()));
- CallLoadIC(typeof_mode);
+ CallLoadGlobalIC(typeof_mode);
}
@@ -1303,7 +1267,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
// Record position before possible IC call.
SetExpressionPosition(proxy);
- PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
+ PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
Variable* var = proxy->var();
// Three cases: global variables, lookup variables, and all other types of
@@ -1325,23 +1289,15 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
? "Context variable"
: "Stack variable");
if (NeedsHoleCheckForLoad(proxy)) {
- // Let and const need a read barrier.
- GetVar(x0, var);
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
Label done;
+ GetVar(x0, var);
__ JumpIfNotRoot(x0, Heap::kTheHoleValueRootIndex, &done);
- if (var->mode() == LET || var->mode() == CONST) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- __ Mov(x0, Operand(var->name()));
- __ Push(x0);
- __ CallRuntime(Runtime::kThrowReferenceError);
- __ Bind(&done);
- } else {
- // Uninitialized legacy const bindings are unholed.
- DCHECK(var->mode() == CONST_LEGACY);
- __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
- __ Bind(&done);
- }
+ __ Mov(x0, Operand(var->name()));
+ __ Push(x0);
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ __ Bind(&done);
context()->Plug(x0);
break;
}
@@ -1366,19 +1322,10 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
context()->Plug(x0);
break;
}
- }
-}
-
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExpLiteral");
- __ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Mov(x2, Smi::FromInt(expr->literal_index()));
- __ Mov(x1, Operand(expr->pattern()));
- __ Mov(x0, Smi::FromInt(expr->flags()));
- FastCloneRegExpStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(x0);
+ case VariableLocation::MODULE:
+ UNREACHABLE();
+ }
}
@@ -1414,8 +1361,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
__ CallStub(&stub);
+ RestoreContext();
}
- PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+ PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
// If result_saved is true the result is on top of the stack. If
// result_saved is false the result is in x0.
@@ -1443,7 +1391,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::COMPUTED:
// It is safe to use [[Put]] here because the boilerplate already
// contains computed properties with an uninitialized value.
- if (key->value()->IsInternalizedString()) {
+ if (key->IsStringLiteral()) {
+ DCHECK(key->IsPropertyName());
if (property->emit_store()) {
VisitForAccumulatorValue(value);
DCHECK(StoreDescriptor::ValueRegister().is(x0));
@@ -1451,7 +1400,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Peek(StoreDescriptor::ReceiverRegister(), 0);
EmitLoadStoreICSlot(property->GetSlot(0));
CallStoreIC();
- PrepareForBailoutForId(key->id(), NO_REGISTERS);
+ PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1484,16 +1433,20 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(value);
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- NO_REGISTERS);
+ BailoutState::NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->getter = property;
+ AccessorTable::Iterator it = accessor_table.lookup(key);
+ it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->setter = property;
+ AccessorTable::Iterator it = accessor_table.lookup(key);
+ it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->setter = property;
}
break;
}
@@ -1504,14 +1457,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
for (AccessorTable::Iterator it = accessor_table.begin();
it != accessor_table.end();
++it) {
- __ Peek(x10, 0); // Duplicate receiver.
- PushOperand(x10);
- VisitForStackValue(it->first);
- EmitAccessor(it->second->getter);
- EmitAccessor(it->second->setter);
- __ Mov(x10, Smi::FromInt(NONE));
- PushOperand(x10);
- CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
+ __ Peek(x10, 0); // Duplicate receiver.
+ PushOperand(x10);
+ VisitForStackValue(it->first);
+ EmitAccessor(it->second->getter);
+ EmitAccessor(it->second->setter);
+ __ Mov(x10, Smi::FromInt(NONE));
+ PushOperand(x10);
+ CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
+ PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1541,7 +1495,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- NO_REGISTERS);
+ BailoutState::NO_REGISTERS);
} else {
EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
VisitForStackValue(value);
@@ -1557,6 +1511,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PushOperand(Smi::FromInt(NONE));
PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ BailoutState::NO_REGISTERS);
} else {
DropOperands(3);
}
@@ -1612,7 +1568,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
}
- PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+ PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
bool result_saved = false; // Is the result saved to the stack?
ZoneList<Expression*>* subexprs = expr->values();
@@ -1642,7 +1598,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
- PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+ PrepareForBailoutForId(expr->GetIdForElement(array_index),
+ BailoutState::NO_REGISTERS);
}
// In case the array literal contains spread expressions it has two parts. The
@@ -1662,7 +1619,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForStackValue(subexpr);
CallRuntimeWithOperands(Runtime::kAppendElement);
- PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+ PrepareForBailoutForId(expr->GetIdForElement(array_index),
+ BailoutState::NO_REGISTERS);
}
if (result_saved) {
@@ -1677,7 +1635,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
Comment cmnt(masm_, "[ Assignment");
- SetExpressionPosition(expr, INSERT_BREAK);
Property* property = expr->target()->AsProperty();
LhsKind assign_type = Property::GetAssignType(property);
@@ -1743,23 +1700,27 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy());
- PrepareForBailout(expr->target(), TOS_REG);
+ PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
case NAMED_SUPER_PROPERTY:
EmitNamedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
case KEYED_SUPER_PROPERTY:
EmitKeyedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
}
}
@@ -1779,7 +1740,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
// Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), TOS_REG);
+ PrepareForBailout(expr->binary_operation(), BailoutState::TOS_REGISTER);
} else {
VisitForAccumulatorValue(expr->value());
}
@@ -1791,7 +1752,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
expr->op(), expr->AssignmentSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(x0);
break;
case NAMED_PROPERTY:
@@ -2085,35 +2046,25 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->mode() == LET && op != Token::INIT) {
- // Non-initializing assignment to let variable needs a write barrier.
+ } else if (IsLexicalVariableMode(var->mode()) && op != Token::INIT) {
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label assign;
MemOperand location = VarOperand(var, x1);
- __ Ldr(x10, location);
- __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &assign);
- __ Mov(x10, Operand(var->name()));
- __ Push(x10);
- __ CallRuntime(Runtime::kThrowReferenceError);
- // Perform the assignment.
- __ Bind(&assign);
- EmitStoreToStackLocalOrContextSlot(var, location);
-
- } else if (var->mode() == CONST && op != Token::INIT) {
- // Assignment to const variable needs a write barrier.
- DCHECK(!var->IsLookupSlot());
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label const_error;
- MemOperand location = VarOperand(var, x1);
- __ Ldr(x10, location);
- __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &const_error);
- __ Mov(x10, Operand(var->name()));
- __ Push(x10);
- __ CallRuntime(Runtime::kThrowReferenceError);
- __ Bind(&const_error);
- __ CallRuntime(Runtime::kThrowConstAssignError);
-
+ // Perform an initialization check for lexically declared variables.
+ if (var->binding_needs_init()) {
+ Label assign;
+ __ Ldr(x10, location);
+ __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &assign);
+ __ Mov(x10, Operand(var->name()));
+ __ Push(x10);
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ __ Bind(&assign);
+ }
+ if (var->mode() == CONST) {
+ __ CallRuntime(Runtime::kThrowConstAssignError);
+ } else {
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ }
} else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2127,8 +2078,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() ||
- (var->mode() == CONST && op == Token::INIT)) {
+ } else if (!var->is_const_mode() || op == Token::INIT) {
if (var->IsLookupSlot()) {
// Assignment to var.
__ Push(var->name());
@@ -2149,23 +2099,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
- // Const initializers need a write barrier.
- DCHECK(!var->IsParameter()); // No const parameters.
- if (var->IsLookupSlot()) {
- __ Mov(x1, Operand(var->name()));
- __ Push(x0, cp, x1);
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
- } else {
- DCHECK(var->IsStackLocal() || var->IsContextSlot());
- Label skip;
- MemOperand location = VarOperand(var, x1);
- __ Ldr(x10, location);
- __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &skip);
- EmitStoreToStackLocalOrContextSlot(var, location);
- __ Bind(&skip);
- }
-
} else {
DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
if (is_strict(language_mode())) {
@@ -2189,7 +2122,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
EmitLoadStoreICSlot(expr->AssignmentSlot());
CallStoreIC();
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(x0);
}
@@ -2237,43 +2170,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
EmitLoadStoreICSlot(expr->AssignmentSlot());
CallIC(ic);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
- Comment cmnt(masm_, "[ Property");
- SetExpressionPosition(expr);
- Expression* key = expr->key();
-
- if (key->IsPropertyName()) {
- if (!expr->IsSuperAccess()) {
- VisitForAccumulatorValue(expr->obj());
- __ Move(LoadDescriptor::ReceiverRegister(), x0);
- EmitNamedPropertyLoad(expr);
- } else {
- VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- expr->obj()->AsSuperPropertyReference()->home_object());
- EmitNamedSuperPropertyLoad(expr);
- }
- } else {
- if (!expr->IsSuperAccess()) {
- VisitForStackValue(expr->obj());
- VisitForAccumulatorValue(expr->key());
- __ Move(LoadDescriptor::NameRegister(), x0);
- PopOperand(LoadDescriptor::ReceiverRegister());
- EmitKeyedPropertyLoad(expr);
- } else {
- VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- expr->obj()->AsSuperPropertyReference()->home_object());
- VisitForStackValue(expr->key());
- EmitKeyedSuperPropertyLoad(expr);
- }
- }
- PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(x0);
}
@@ -2297,7 +2194,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
if (callee->IsVariableProxy()) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
- PrepareForBailout(callee, NO_REGISTERS);
+ PrepareForBailout(callee, BailoutState::NO_REGISTERS);
}
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
@@ -2314,7 +2211,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
DCHECK(!callee->AsProperty()->IsSuperAccess());
__ Peek(LoadDescriptor::ReceiverRegister(), 0);
EmitNamedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+ BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
PopOperand(x10);
PushOperands(x0, x10);
@@ -2354,6 +2252,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
CallRuntimeWithOperands(Runtime::kLoadFromSuper);
+ PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
// Replace home_object with target function.
__ Poke(x0, kPointerSize);
@@ -2379,7 +2278,8 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ Peek(LoadDescriptor::ReceiverRegister(), 0);
__ Move(LoadDescriptor::NameRegister(), x0);
EmitKeyedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+ BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
PopOperand(x10);
@@ -2415,6 +2315,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
+ PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
// Replace home_object with target function.
__ Poke(x0, kPointerSize);
@@ -2435,7 +2336,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
VisitForStackValue(args->at(i));
}
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
SetCallPosition(expr, expr->tail_call_mode());
if (expr->tail_call_mode() == TailCallMode::kAllow) {
if (FLAG_trace) {
@@ -2456,13 +2357,12 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
- // Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
context()->DropAndPlug(1, x0);
}
-
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
+ int arg_count = expr->arguments()->length();
ASM_LOCATION("FullCodeGenerator::EmitResolvePossiblyDirectEval");
// Prepare to push a copy of the first argument or undefined if it doesn't
// exist.
@@ -2478,9 +2378,11 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
__ Mov(x11, Smi::FromInt(language_mode()));
// Prepare to push the start position of the scope the calls resides in.
__ Mov(x12, Smi::FromInt(scope()->start_position()));
+ // Prepare to push the source position of the eval call.
+ __ Mov(x13, Smi::FromInt(expr->position()));
// Push.
- __ Push(x9, x10, x11, x12);
+ __ Push(x9, x10, x11, x12, x13);
// Do the runtime call.
__ CallRuntime(Runtime::kResolvePossiblyDirectEval);
@@ -2503,7 +2405,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
__ Push(callee->name());
__ CallRuntime(Runtime::kLoadLookupSlotForCall);
PushOperands(x0, x1); // Receiver, function.
- PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
// If fast case code has been generated, emit code to push the
// function and receiver and have the slow path jump around this
@@ -2530,7 +2432,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
ASM_LOCATION("FullCodeGenerator::EmitPossiblyEvalCall");
- // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // In a call to eval, we first call Runtime_ResolvePossiblyDirectEval
// to resolve the function we need to call. Then we call the resolved
// function using the given arguments.
ZoneList<Expression*>* args = expr->arguments();
@@ -2547,12 +2449,12 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
// resolve eval.
__ Peek(x10, (arg_count + 1) * kPointerSize);
__ Push(x10);
- EmitResolvePossiblyDirectEval(arg_count);
+ EmitResolvePossiblyDirectEval(expr);
// Touch up the stack with the resolved function.
__ Poke(x0, (arg_count + 1) * kPointerSize);
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
// Record source position for debugger.
SetCallPosition(expr);
@@ -2565,8 +2467,7 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
- // Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
context()->DropAndPlug(1, x0);
}
@@ -2605,9 +2506,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
CallConstructStub stub(isolate());
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
- PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
- // Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
+ RestoreContext();
context()->Plug(x0);
}
@@ -2651,9 +2551,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
-
- // Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
context()->Plug(x0);
}
@@ -2843,84 +2741,6 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitValueOf");
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Label done;
- // If the object is a smi return the object.
- __ JumpIfSmi(x0, &done);
- // If the object is not a value type, return the object.
- __ JumpIfNotObjectType(x0, x10, x11, JS_VALUE_TYPE, &done);
- __ Ldr(x0, FieldMemOperand(x0, JSValue::kValueOffset));
-
- __ Bind(&done);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(3, args->length());
-
- Register string = x0;
- Register index = x1;
- Register value = x2;
- Register scratch = x10;
-
- VisitForStackValue(args->at(0)); // index
- VisitForStackValue(args->at(1)); // value
- VisitForAccumulatorValue(args->at(2)); // string
- PopOperands(value, index);
-
- if (FLAG_debug_code) {
- __ AssertSmi(value, kNonSmiValue);
- __ AssertSmi(index, kNonSmiIndex);
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- __ EmitSeqStringSetCharCheck(string, index, kIndexIsSmi, scratch,
- one_byte_seq_type);
- }
-
- __ Add(scratch, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ SmiUntag(value);
- __ SmiUntag(index);
- __ Strb(value, MemOperand(scratch, index));
- context()->Plug(string);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(3, args->length());
-
- Register string = x0;
- Register index = x1;
- Register value = x2;
- Register scratch = x10;
-
- VisitForStackValue(args->at(0)); // index
- VisitForStackValue(args->at(1)); // value
- VisitForAccumulatorValue(args->at(2)); // string
- PopOperands(value, index);
-
- if (FLAG_debug_code) {
- __ AssertSmi(value, kNonSmiValue);
- __ AssertSmi(index, kNonSmiIndex);
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ EmitSeqStringSetCharCheck(string, index, kIndexIsSmi, scratch,
- two_byte_seq_type);
- }
-
- __ Add(scratch, string, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
- __ SmiUntag(value);
- __ SmiUntag(index);
- __ Strh(value, MemOperand(scratch, index, LSL, 1));
- context()->Plug(string);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -2959,13 +2779,8 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
Label need_conversion;
Label index_out_of_range;
Label done;
- StringCharCodeAtGenerator generator(object,
- index,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
+ StringCharCodeAtGenerator generator(object, index, result, &need_conversion,
+ &need_conversion, &index_out_of_range);
generator.GenerateFast(masm_);
__ B(&done);
@@ -2988,52 +2803,6 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = x1;
- Register index = x0;
- Register result = x0;
-
- PopOperand(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharAtGenerator generator(object,
- index,
- x3,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ B(&done);
-
- __ Bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ LoadRoot(result, Heap::kempty_stringRootIndex);
- __ B(&done);
-
- __ Bind(&need_conversion);
- // Move smi zero into the result register, which will trigger conversion.
- __ Mov(result, Smi::FromInt(0));
- __ B(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
-
- __ Bind(&done);
- context()->Plug(result);
-}
-
-
void FullCodeGenerator::EmitCall(CallRuntime* expr) {
ASM_LOCATION("FullCodeGenerator::EmitCall");
ZoneList<Expression*>* args = expr->arguments();
@@ -3042,7 +2811,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
// Move target to x1.
int const argc = args->length() - 2;
__ Peek(x1, (argc + 1) * kXRegSize);
@@ -3050,8 +2819,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
__ Mov(x0, argc);
__ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(argc + 1);
- // Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
// Discard the function left on TOS.
context()->DropAndPlug(1, x0);
}
@@ -3101,12 +2869,6 @@ void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
context()->Plug(x0);
}
-void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
- DCHECK_EQ(0, expr->arguments()->length());
- __ LoadNativeContextSlot(Context::ORDINARY_HAS_INSTANCE_INDEX, x0);
- context()->Plug(x0);
-}
-
void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
ExternalReference debug_is_active =
@@ -3127,7 +2889,8 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
Label runtime, done;
Register result = x0;
- __ Allocate(JSIteratorResult::kSize, result, x10, x11, &runtime, TAG_OBJECT);
+ __ Allocate(JSIteratorResult::kSize, result, x10, x11, &runtime,
+ NO_ALLOCATION_FLAGS);
Register map_reg = x1;
Register result_value = x2;
Register boolean_done = x3;
@@ -3179,9 +2942,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
-
- // Restore context register.
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
}
@@ -3203,7 +2964,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode but
// "delete this" is allowed.
- bool is_this = var->HasThisName(isolate());
+ bool is_this = var->is_this();
DCHECK(is_sloppy(language_mode()) || is_this);
if (var->IsUnallocatedOrGlobalSlot()) {
__ LoadGlobalObject(x12);
@@ -3263,12 +3024,14 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
__ Bind(&materialize_true);
- PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->MaterializeTrueId(),
+ BailoutState::NO_REGISTERS);
__ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
__ B(&done);
__ Bind(&materialize_false);
- PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->MaterializeFalseId(),
+ BailoutState::NO_REGISTERS);
__ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
__ B(&done);
@@ -3368,9 +3131,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// We need a second deoptimization point after loading the value
// in case evaluating the property load my have a side effect.
if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), TOS_REG);
+ PrepareForBailout(expr->expression(), BailoutState::TOS_REGISTER);
} else {
- PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+ PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
}
// Inline smi case if we are in a loop.
@@ -3417,9 +3180,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
// Convert old value into a number.
- ToNumberStub convert_stub(isolate());
- __ CallStub(&convert_stub);
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ __ Call(isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+ RestoreContext();
+ PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -3468,7 +3231,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
{ EffectContext context(this);
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN, expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(),
+ BailoutState::TOS_REGISTER);
context.Plug(x0);
}
// For all contexts except EffectConstant We have the result on
@@ -3479,7 +3243,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN, expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(),
+ BailoutState::TOS_REGISTER);
context()->Plug(x0);
}
break;
@@ -3489,7 +3254,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
PopOperand(StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(expr->CountSlot());
CallStoreIC();
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3501,6 +3266,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case NAMED_SUPER_PROPERTY: {
EmitNamedSuperPropertyStore(prop);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3512,6 +3278,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case KEYED_SUPER_PROPERTY: {
EmitKeyedSuperPropertyStore(prop);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3528,7 +3295,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
EmitLoadStoreICSlot(expr->CountSlot());
CallIC(ic);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3631,7 +3398,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
- SetExpressionPosition(expr);
// Try to generate an optimized comparison with a literal value.
// TODO(jbramley): This only checks common values like NaN or undefined.
@@ -3654,7 +3420,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- CallRuntimeWithOperands(Runtime::kHasProperty);
+ SetExpressionPosition(expr);
+ EmitHasProperty();
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(x0, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
@@ -3662,6 +3429,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::INSTANCEOF: {
VisitForAccumulatorValue(expr->right());
+ SetExpressionPosition(expr);
PopOperand(x1);
InstanceOfStub stub(isolate());
__ CallStub(&stub);
@@ -3673,6 +3441,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
+ SetExpressionPosition(expr);
Condition cond = CompareIC::ComputeCondition(op);
// Pop the stack value.
@@ -3745,24 +3514,30 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// and suchlike. The implementation changes a little by bleeding_edge so I
// don't want to spend too much time on it now.
- Label suspend, continuation, post_runtime, resume;
+ Label suspend, continuation, post_runtime, resume, exception;
__ B(&suspend);
// TODO(jbramley): This label is bound here because the following code
// looks at its pos(). Is it possible to do something more efficient here,
// perhaps using Adr?
__ Bind(&continuation);
- // When we arrive here, the stack top is the resume mode and
- // result_register() holds the input value (the argument given to the
- // respective resume operation).
+ // When we arrive here, x0 holds the generator object.
__ RecordGeneratorContinuation();
- __ Pop(x1);
- __ Cmp(x1, Smi::FromInt(JSGeneratorObject::RETURN));
- __ B(ne, &resume);
+ __ Ldr(x1, FieldMemOperand(x0, JSGeneratorObject::kResumeModeOffset));
+ __ Ldr(x0, FieldMemOperand(x0, JSGeneratorObject::kInputOrDebugPosOffset));
+ STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
+ STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
+ __ Cmp(x1, Operand(Smi::FromInt(JSGeneratorObject::kReturn)));
+ __ B(lt, &resume);
__ Push(result_register());
+ __ B(gt, &exception);
EmitCreateIteratorResult(true);
EmitUnwindAndReturn();
+ __ Bind(&exception);
+ __ CallRuntime(expr->rethrow_on_exception() ? Runtime::kReThrow
+ : Runtime::kThrow);
+
__ Bind(&suspend);
OperandStackDepthIncrement(1); // Not popped on this path.
VisitForAccumulatorValue(expr->generator_object());
@@ -3778,7 +3553,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ B(eq, &post_runtime);
__ Push(x0); // generator object
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
__ Bind(&post_runtime);
PopOperand(result_register());
EmitReturnSequence();
@@ -3787,110 +3562,6 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
context()->Plug(result_register());
}
-
-void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
- Expression *value,
- JSGeneratorObject::ResumeMode resume_mode) {
- ASM_LOCATION("FullCodeGenerator::EmitGeneratorResume");
- Register generator_object = x1;
- Register the_hole = x2;
- Register operand_stack_size = w3;
- Register function = x4;
-
- // The value stays in x0, and is ultimately read by the resumed generator, as
- // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
- // is read to throw the value when the resumed generator is already closed. x1
- // will hold the generator object until the activation has been resumed.
- VisitForStackValue(generator);
- VisitForAccumulatorValue(value);
- PopOperand(generator_object);
-
- // Store input value into generator object.
- __ Str(result_register(),
- FieldMemOperand(x1, JSGeneratorObject::kInputOffset));
- __ Mov(x2, result_register());
- __ RecordWriteField(x1, JSGeneratorObject::kInputOffset, x2, x3,
- kLRHasBeenSaved, kDontSaveFPRegs);
-
- // Load suspended function and context.
- __ Ldr(cp, FieldMemOperand(generator_object,
- JSGeneratorObject::kContextOffset));
- __ Ldr(function, FieldMemOperand(generator_object,
- JSGeneratorObject::kFunctionOffset));
-
- // Load receiver and store as the first argument.
- __ Ldr(x10, FieldMemOperand(generator_object,
- JSGeneratorObject::kReceiverOffset));
- __ Push(x10);
-
- // Push holes for arguments to generator function. Since the parser forced
- // context allocation for any variables in generators, the actual argument
- // values have already been copied into the context and these dummy values
- // will never be used.
- __ Ldr(x10, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
-
- // The number of arguments is stored as an int32_t, and -1 is a marker
- // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
- // extension to correctly handle it. However, in this case, we operate on
- // 32-bit W registers, so extension isn't required.
- __ Ldr(w10, FieldMemOperand(x10,
- SharedFunctionInfo::kFormalParameterCountOffset));
- __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
- __ PushMultipleTimes(the_hole, w10);
-
- // Enter a new JavaScript frame, and initialize its slots as they were when
- // the generator was suspended.
- Label resume_frame, done;
- __ Bl(&resume_frame);
- __ B(&done);
-
- __ Bind(&resume_frame);
- __ Push(lr, // Return address.
- fp, // Caller's frame pointer.
- cp, // Callee's context.
- function); // Callee's JS Function.
- __ Add(fp, __ StackPointer(), kPointerSize * 2);
-
- // Load and untag the operand stack size.
- __ Ldr(x10, FieldMemOperand(generator_object,
- JSGeneratorObject::kOperandStackOffset));
- __ Ldr(operand_stack_size,
- UntagSmiFieldMemOperand(x10, FixedArray::kLengthOffset));
-
- // If we are sending a value and there is no operand stack, we can jump back
- // in directly.
- if (resume_mode == JSGeneratorObject::NEXT) {
- Label slow_resume;
- __ Cbnz(operand_stack_size, &slow_resume);
- __ Ldr(x10, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
- __ Ldrsw(x11,
- UntagSmiFieldMemOperand(generator_object,
- JSGeneratorObject::kContinuationOffset));
- __ Add(x10, x10, x11);
- __ Mov(x12, Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
- __ Str(x12, FieldMemOperand(generator_object,
- JSGeneratorObject::kContinuationOffset));
- __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
- __ Br(x10);
-
- __ Bind(&slow_resume);
- }
-
- // Otherwise, we push holes for the operand stack and call the runtime to fix
- // up the stack and the handlers.
- __ PushMultipleTimes(the_hole, operand_stack_size);
-
- __ Mov(x10, Smi::FromInt(resume_mode));
- __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
- __ Push(generator_object, result_register(), x10);
- __ CallRuntime(Runtime::kResumeJSGeneratorObject);
- // Not reached: the runtime call returns elsewhere.
- __ Unreachable();
-
- __ Bind(&done);
- context()->Plug(result_register());
-}
-
void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
OperandStackDepthIncrement(2);
__ Push(reg1, reg2);
@@ -3923,7 +3594,8 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
// Allocate and populate an object with this form: { value: VAL, done: DONE }
Register result = x0;
- __ Allocate(JSIteratorResult::kSize, result, x10, x11, &allocate, TAG_OBJECT);
+ __ Allocate(JSIteratorResult::kSize, result, x10, x11, &allocate,
+ NO_ALLOCATION_FLAGS);
__ B(&done_allocate);
__ Bind(&allocate);
@@ -3990,7 +3662,7 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- Scope* closure_scope = scope()->ClosureScope();
+ DeclarationScope* closure_scope = scope()->GetClosureScope();
if (closure_scope->is_script_scope() ||
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
diff --git a/deps/v8/src/full-codegen/full-codegen.cc b/deps/v8/src/full-codegen/full-codegen.cc
index af5dd41885..d83a23b3f8 100644
--- a/deps/v8/src/full-codegen/full-codegen.cc
+++ b/deps/v8/src/full-codegen/full-codegen.cc
@@ -7,7 +7,6 @@
#include "src/ast/ast-numbering.h"
#include "src/ast/ast.h"
#include "src/ast/prettyprinter.h"
-#include "src/ast/scopeinfo.h"
#include "src/ast/scopes.h"
#include "src/code-factory.h"
#include "src/codegen.h"
@@ -15,6 +14,7 @@
#include "src/debug/debug.h"
#include "src/debug/liveedit.h"
#include "src/frames-inl.h"
+#include "src/globals.h"
#include "src/isolate-inl.h"
#include "src/macro-assembler.h"
#include "src/snapshot/snapshot.h"
@@ -28,11 +28,16 @@ namespace internal {
bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
Isolate* isolate = info->isolate();
+ DCHECK(!FLAG_minimal);
+ RuntimeCallTimerScope runtimeTimer(isolate,
+ &RuntimeCallStats::CompileFullCode);
TimerEventScope<TimerEventCompileFullCode> timer(info->isolate());
- TRACE_EVENT0("v8", "V8.CompileFullCode");
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
+ isolate, &tracing::TraceEventStatsTable::CompileFullCode);
Handle<Script> script = info->script();
- if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+ if (!script->IsUndefined(isolate) &&
+ !script->source()->IsUndefined(isolate)) {
int len = String::cast(script->source())->length();
isolate->counters()->total_full_codegen_source_size()->Increment(len);
}
@@ -42,9 +47,6 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
CodeObjectRequired::kYes);
if (info->will_serialize()) masm.enable_serializer();
- LOG_CODE_EVENT(isolate,
- CodeStartLinePosInfoRecordEvent(masm.positions_recorder()));
-
FullCodeGenerator cgen(&masm, info);
cgen.Generate();
if (cgen.HasStackOverflow()) {
@@ -53,7 +55,8 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
}
unsigned table_offset = cgen.EmitBackEdgeTable();
- Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, info);
+ Handle<Code> code =
+ CodeGenerator::MakeCodeEpilogue(&masm, nullptr, info, masm.CodeObject());
cgen.PopulateDeoptimizationData(code);
cgen.PopulateTypeFeedbackInfo(code);
cgen.PopulateHandlerTable(code);
@@ -62,11 +65,12 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
code->set_allow_osr_at_loop_nesting_level(0);
code->set_profiler_ticks(0);
code->set_back_edge_table_offset(table_offset);
+ Handle<ByteArray> source_positions =
+ cgen.source_position_table_builder_.ToSourcePositionTable(
+ isolate, Handle<AbstractCode>::cast(code));
+ code->set_source_position_table(*source_positions);
CodeGenerator::PrintCode(code, info);
info->SetCode(code);
- void* line_info = masm.positions_recorder()->DetachJITHandlerData();
- LOG_CODE_EVENT(isolate, CodeEndLinePosInfoRecordEvent(
- AbstractCode::cast(*code), line_info));
#ifdef DEBUG
// Check that no context-specific object has been embedded.
@@ -122,12 +126,10 @@ void FullCodeGenerator::PopulateHandlerTable(Handle<Code> code) {
Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
HandlerTable::LengthForRange(handler_table_size), TENURED));
for (int i = 0; i < handler_table_size; ++i) {
- HandlerTable::CatchPrediction prediction =
- handler_table_[i].try_catch_depth > 0 ? HandlerTable::CAUGHT
- : HandlerTable::UNCAUGHT;
table->SetRangeStart(i, handler_table_[i].range_start);
table->SetRangeEnd(i, handler_table_[i].range_end);
- table->SetRangeHandler(i, handler_table_[i].handler_offset, prediction);
+ table->SetRangeHandler(i, handler_table_[i].handler_offset,
+ handler_table_[i].catch_prediction);
table->SetRangeData(i, handler_table_[i].stack_depth);
}
code->set_handler_table(*table);
@@ -136,7 +138,7 @@ void FullCodeGenerator::PopulateHandlerTable(Handle<Code> code) {
int FullCodeGenerator::NewHandlerTableEntry() {
int index = static_cast<int>(handler_table_.size());
- HandlerTableEntry entry = {0, 0, 0, 0, 0};
+ HandlerTableEntry entry = {0, 0, 0, 0, HandlerTable::UNCAUGHT};
handler_table_.push_back(entry);
return index;
}
@@ -144,13 +146,8 @@ int FullCodeGenerator::NewHandlerTableEntry() {
bool FullCodeGenerator::MustCreateObjectLiteralWithRuntime(
ObjectLiteral* expr) const {
- // FastCloneShallowObjectStub doesn't copy elements, and object literals don't
- // support copy-on-write (COW) elements for now.
- // TODO(mvstanton): make object literals support COW elements.
- return masm()->serializer_enabled() || !expr->fast_elements() ||
- !expr->has_shallow_properties() ||
- expr->properties_count() >
- FastCloneShallowObjectStub::kMaximumClonedProperties;
+ return masm()->serializer_enabled() ||
+ !FastCloneShallowObjectStub::IsSupported(expr);
}
@@ -167,18 +164,22 @@ void FullCodeGenerator::Initialize() {
masm_->set_predictable_code_size(true);
}
-
-void FullCodeGenerator::PrepareForBailout(Expression* node, State state) {
+void FullCodeGenerator::PrepareForBailout(Expression* node,
+ BailoutState state) {
PrepareForBailoutForId(node->id(), state);
}
-
-void FullCodeGenerator::CallLoadIC(TypeofMode typeof_mode,
- TypeFeedbackId id) {
- Handle<Code> ic = CodeFactory::LoadIC(isolate(), typeof_mode).code();
+void FullCodeGenerator::CallLoadIC(TypeFeedbackId id) {
+ Handle<Code> ic = CodeFactory::LoadIC(isolate()).code();
CallIC(ic, id);
+ if (FLAG_tf_load_ic_stub) RestoreContext();
}
+void FullCodeGenerator::CallLoadGlobalIC(TypeofMode typeof_mode,
+ TypeFeedbackId id) {
+ Handle<Code> ic = CodeFactory::LoadGlobalIC(isolate(), typeof_mode).code();
+ CallIC(ic, id);
+}
void FullCodeGenerator::CallStoreIC(TypeFeedbackId id) {
Handle<Code> ic = CodeFactory::StoreIC(isolate(), language_mode()).code();
@@ -191,9 +192,9 @@ void FullCodeGenerator::RecordJSReturnSite(Call* call) {
// if the function was inlined, i.e., this is the return address in the
// inlined function's frame.
//
- // The state is ignored. We defensively set it to TOS_REG, which is the
- // real state of the unoptimized code at the return site.
- PrepareForBailoutForId(call->ReturnId(), TOS_REG);
+ // The bailout state is ignored. We defensively set it to TOS_REGISTER, which
+ // is the real state of the unoptimized code at the return site.
+ PrepareForBailoutForId(call->ReturnId(), BailoutState::TOS_REGISTER);
#ifdef DEBUG
// In debug builds, mark the return so we can verify that this function
// was called.
@@ -202,13 +203,13 @@ void FullCodeGenerator::RecordJSReturnSite(Call* call) {
#endif
}
-
-void FullCodeGenerator::PrepareForBailoutForId(BailoutId id, State state) {
+void FullCodeGenerator::PrepareForBailoutForId(BailoutId id,
+ BailoutState state) {
// There's no need to prepare this code for bailouts from already optimized
// code or code that can't be optimized.
if (!info_->HasDeoptimizationSupport()) return;
unsigned pc_and_state =
- StateField::encode(state) | PcField::encode(masm_->pc_offset());
+ BailoutStateField::encode(state) | PcField::encode(masm_->pc_offset());
DCHECK(Smi::IsValid(pc_and_state));
#ifdef DEBUG
for (int i = 0; i < bailout_entries_.length(); ++i) {
@@ -224,7 +225,7 @@ void FullCodeGenerator::RecordBackEdge(BailoutId ast_id) {
// The pc offset does not need to be encoded and packed together with a state.
DCHECK(masm_->pc_offset() > 0);
DCHECK(loop_depth() > 0);
- uint8_t depth = Min(loop_depth(), Code::kMaxLoopNestingMarker);
+ uint8_t depth = Min(loop_depth(), AbstractCode::kMaxLoopNestingMarker);
BackEdgeEntry entry =
{ ast_id, static_cast<unsigned>(masm_->pc_offset()), depth };
back_edges_.Add(entry, zone());
@@ -389,7 +390,7 @@ void FullCodeGenerator::VisitDeclarations(
ZoneList<Handle<Object> > inner_globals(10, zone());
globals_ = &inner_globals;
- AstVisitor::VisitDeclarations(declarations);
+ AstVisitor<FullCodeGenerator>::VisitDeclarations(declarations);
if (!globals_->is_empty()) {
// Invoke the platform-dependent code generator to do the actual
@@ -405,35 +406,6 @@ void FullCodeGenerator::VisitDeclarations(
}
-void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- switch (variable->location()) {
- case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED:
- // TODO(rossberg)
- break;
-
- case VariableLocation::CONTEXT: {
- Comment cmnt(masm_, "[ ImportDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- // TODO(rossberg)
- break;
- }
-
- case VariableLocation::PARAMETER:
- case VariableLocation::LOCAL:
- case VariableLocation::LOOKUP:
- UNREACHABLE();
- }
-}
-
-
-void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
- // TODO(rossberg)
-}
-
-
void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
EmitVariableLoad(expr);
@@ -447,10 +419,7 @@ void FullCodeGenerator::VisitSloppyBlockFunctionStatement(
int FullCodeGenerator::DeclareGlobalsFlags() {
- DCHECK(DeclareGlobalsLanguageMode::is_valid(language_mode()));
- return DeclareGlobalsEvalFlag::encode(is_eval()) |
- DeclareGlobalsNativeFlag::encode(is_native()) |
- DeclareGlobalsLanguageMode::encode(language_mode());
+ return info_->GetDeclareGlobalsFlags();
}
void FullCodeGenerator::PushOperand(Handle<Object> handle) {
@@ -524,19 +493,6 @@ void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
- // Load the arguments on the stack and call the runtime function.
- MathPowStub stub(isolate(), MathPowStub::ON_STACK);
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
- __ CallStub(&stub);
- OperandStackDepthDecrement(2);
- context()->Plug(result_register());
-}
-
-
void FullCodeGenerator::EmitIntrinsicAsStubCall(CallRuntime* expr,
const Callable& callable) {
ZoneList<Expression*>* args = expr->arguments();
@@ -582,11 +538,6 @@ void FullCodeGenerator::EmitToString(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitToName(CallRuntime* expr) {
- EmitIntrinsicAsStubCall(expr, CodeFactory::ToName(isolate()));
-}
-
-
void FullCodeGenerator::EmitToLength(CallRuntime* expr) {
EmitIntrinsicAsStubCall(expr, CodeFactory::ToLength(isolate()));
}
@@ -609,24 +560,27 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
EmitIntrinsicAsStubCall(expr, CodeFactory::RegExpConstructResult(isolate()));
}
-
-bool RecordStatementPosition(MacroAssembler* masm, int pos) {
- if (pos == RelocInfo::kNoPosition) return false;
- masm->positions_recorder()->RecordStatementPosition(pos);
- masm->positions_recorder()->RecordPosition(pos);
- return masm->positions_recorder()->WriteRecordedPositions();
+void FullCodeGenerator::EmitHasProperty() {
+ Callable callable = CodeFactory::HasProperty(isolate());
+ PopOperand(callable.descriptor().GetRegisterParameter(1));
+ PopOperand(callable.descriptor().GetRegisterParameter(0));
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ RestoreContext();
}
+void FullCodeGenerator::RecordStatementPosition(int pos) {
+ DCHECK_NE(kNoSourcePosition, pos);
+ source_position_table_builder_.AddPosition(masm_->pc_offset(), pos, true);
+}
-bool RecordPosition(MacroAssembler* masm, int pos) {
- if (pos == RelocInfo::kNoPosition) return false;
- masm->positions_recorder()->RecordPosition(pos);
- return masm->positions_recorder()->WriteRecordedPositions();
+void FullCodeGenerator::RecordPosition(int pos) {
+ DCHECK_NE(kNoSourcePosition, pos);
+ source_position_table_builder_.AddPosition(masm_->pc_offset(), pos, false);
}
void FullCodeGenerator::SetFunctionPosition(FunctionLiteral* fun) {
- RecordPosition(masm_, fun->start_position());
+ RecordPosition(fun->start_position());
}
@@ -634,7 +588,7 @@ void FullCodeGenerator::SetReturnPosition(FunctionLiteral* fun) {
// For default constructors, start position equals end position, and there
// is no source code besides the class literal.
int pos = std::max(fun->start_position(), fun->end_position() - 1);
- RecordStatementPosition(masm_, pos);
+ RecordStatementPosition(pos);
if (info_->is_debug()) {
// Always emit a debug break slot before a return.
DebugCodegen::GenerateSlot(masm_, RelocInfo::DEBUG_BREAK_SLOT_AT_RETURN);
@@ -644,37 +598,32 @@ void FullCodeGenerator::SetReturnPosition(FunctionLiteral* fun) {
void FullCodeGenerator::SetStatementPosition(
Statement* stmt, FullCodeGenerator::InsertBreak insert_break) {
- if (stmt->position() == RelocInfo::kNoPosition) return;
- bool recorded = RecordStatementPosition(masm_, stmt->position());
- if (recorded && insert_break == INSERT_BREAK && info_->is_debug() &&
+ if (stmt->position() == kNoSourcePosition) return;
+ RecordStatementPosition(stmt->position());
+ if (insert_break == INSERT_BREAK && info_->is_debug() &&
!stmt->IsDebuggerStatement()) {
DebugCodegen::GenerateSlot(masm_, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION);
}
}
-
-void FullCodeGenerator::SetExpressionPosition(
- Expression* expr, FullCodeGenerator::InsertBreak insert_break) {
- if (expr->position() == RelocInfo::kNoPosition) return;
- bool recorded = RecordPosition(masm_, expr->position());
- if (recorded && insert_break == INSERT_BREAK && info_->is_debug()) {
- DebugCodegen::GenerateSlot(masm_, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION);
- }
+void FullCodeGenerator::SetExpressionPosition(Expression* expr) {
+ if (expr->position() == kNoSourcePosition) return;
+ RecordPosition(expr->position());
}
void FullCodeGenerator::SetExpressionAsStatementPosition(Expression* expr) {
- if (expr->position() == RelocInfo::kNoPosition) return;
- bool recorded = RecordStatementPosition(masm_, expr->position());
- if (recorded && info_->is_debug()) {
+ if (expr->position() == kNoSourcePosition) return;
+ RecordStatementPosition(expr->position());
+ if (info_->is_debug()) {
DebugCodegen::GenerateSlot(masm_, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION);
}
}
void FullCodeGenerator::SetCallPosition(Expression* expr,
TailCallMode tail_call_mode) {
- if (expr->position() == RelocInfo::kNoPosition) return;
- RecordPosition(masm_, expr->position());
+ if (expr->position() == kNoSourcePosition) return;
+ RecordPosition(expr->position());
if (info_->is_debug()) {
RelocInfo::Mode mode = (tail_call_mode == TailCallMode::kAllow)
? RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL
@@ -688,32 +637,15 @@ void FullCodeGenerator::SetCallPosition(Expression* expr,
void FullCodeGenerator::VisitSuperPropertyReference(
SuperPropertyReference* super) {
__ CallRuntime(Runtime::kThrowUnsupportedSuperError);
+ // Even though this expression doesn't produce a value, we need to simulate
+ // plugging of the value context to ensure stack depth tracking is in sync.
+ if (context()->IsStackValue()) OperandStackDepthIncrement(1);
}
void FullCodeGenerator::VisitSuperCallReference(SuperCallReference* super) {
- __ CallRuntime(Runtime::kThrowUnsupportedSuperError);
-}
-
-
-void FullCodeGenerator::EmitGeneratorNext(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- EmitGeneratorResume(args->at(0), args->at(1), JSGeneratorObject::NEXT);
-}
-
-
-void FullCodeGenerator::EmitGeneratorReturn(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- EmitGeneratorResume(args->at(0), args->at(1), JSGeneratorObject::RETURN);
-}
-
-
-void FullCodeGenerator::EmitGeneratorThrow(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- EmitGeneratorResume(args->at(0), args->at(1), JSGeneratorObject::THROW);
+ // Handled by VisitCall
+ UNREACHABLE();
}
@@ -773,7 +705,7 @@ void FullCodeGenerator::VisitLogicalExpression(BinaryOperation* expr) {
} else {
VisitForControl(left, test->true_label(), &eval_right, &eval_right);
}
- PrepareForBailoutForId(right_id, NO_REGISTERS);
+ PrepareForBailoutForId(right_id, BailoutState::NO_REGISTERS);
__ bind(&eval_right);
} else if (context()->IsAccumulatorValue()) {
@@ -792,7 +724,7 @@ void FullCodeGenerator::VisitLogicalExpression(BinaryOperation* expr) {
__ jmp(&done);
__ bind(&discard);
__ Drop(1);
- PrepareForBailoutForId(right_id, NO_REGISTERS);
+ PrepareForBailoutForId(right_id, BailoutState::NO_REGISTERS);
} else if (context()->IsStackValue()) {
VisitForAccumulatorValue(left);
@@ -807,7 +739,7 @@ void FullCodeGenerator::VisitLogicalExpression(BinaryOperation* expr) {
}
__ bind(&discard);
__ Drop(1);
- PrepareForBailoutForId(right_id, NO_REGISTERS);
+ PrepareForBailoutForId(right_id, BailoutState::NO_REGISTERS);
} else {
DCHECK(context()->IsEffect());
@@ -817,7 +749,7 @@ void FullCodeGenerator::VisitLogicalExpression(BinaryOperation* expr) {
} else {
VisitForControl(left, &done, &eval_right, &eval_right);
}
- PrepareForBailoutForId(right_id, NO_REGISTERS);
+ PrepareForBailoutForId(right_id, BailoutState::NO_REGISTERS);
__ bind(&eval_right);
}
@@ -843,6 +775,41 @@ void FullCodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
}
}
+void FullCodeGenerator::VisitProperty(Property* expr) {
+ Comment cmnt(masm_, "[ Property");
+ SetExpressionPosition(expr);
+
+ Expression* key = expr->key();
+
+ if (key->IsPropertyName()) {
+ if (!expr->IsSuperAccess()) {
+ VisitForAccumulatorValue(expr->obj());
+ __ Move(LoadDescriptor::ReceiverRegister(), result_register());
+ EmitNamedPropertyLoad(expr);
+ } else {
+ VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ expr->obj()->AsSuperPropertyReference()->home_object());
+ EmitNamedSuperPropertyLoad(expr);
+ }
+ } else {
+ if (!expr->IsSuperAccess()) {
+ VisitForStackValue(expr->obj());
+ VisitForAccumulatorValue(expr->key());
+ __ Move(LoadDescriptor::NameRegister(), result_register());
+ PopOperand(LoadDescriptor::ReceiverRegister());
+ EmitKeyedPropertyLoad(expr);
+ } else {
+ VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
+ VisitForStackValue(
+ expr->obj()->AsSuperPropertyReference()->home_object());
+ VisitForStackValue(expr->key());
+ EmitKeyedSuperPropertyLoad(expr);
+ }
+ }
+ PrepareForBailoutForId(expr->LoadId(), BailoutState::TOS_REGISTER);
+ context()->Plug(result_register());
+}
void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
VariableProxy* proxy = expr->AsVariableProxy();
@@ -852,7 +819,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
if (proxy != NULL && (proxy->var()->IsUnallocatedOrGlobalSlot() ||
proxy->var()->IsLookupSlot())) {
EmitVariableLoad(proxy, INSIDE_TYPEOF);
- PrepareForBailout(proxy, TOS_REG);
+ PrepareForBailout(proxy, BailoutState::TOS_REGISTER);
} else {
// This expression cannot throw a reference error at the top level.
VisitInDuplicateContext(expr);
@@ -875,10 +842,9 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
void FullCodeGenerator::VisitDoExpression(DoExpression* expr) {
Comment cmnt(masm_, "[ Do Expression");
- NestedStatement nested_block(this);
SetExpressionPosition(expr);
VisitBlock(expr->block());
- EmitVariableLoad(expr->result());
+ VisitInDuplicateContext(expr->result());
}
@@ -901,24 +867,24 @@ void FullCodeGenerator::VisitIfStatement(IfStatement* stmt) {
if (stmt->HasElseStatement()) {
VisitForControl(stmt->condition(), &then_part, &else_part, &then_part);
- PrepareForBailoutForId(stmt->ThenId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->ThenId(), BailoutState::NO_REGISTERS);
__ bind(&then_part);
Visit(stmt->then_statement());
__ jmp(&done);
- PrepareForBailoutForId(stmt->ElseId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->ElseId(), BailoutState::NO_REGISTERS);
__ bind(&else_part);
Visit(stmt->else_statement());
} else {
VisitForControl(stmt->condition(), &then_part, &done, &then_part);
- PrepareForBailoutForId(stmt->ThenId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->ThenId(), BailoutState::NO_REGISTERS);
__ bind(&then_part);
Visit(stmt->then_statement());
- PrepareForBailoutForId(stmt->ElseId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->ElseId(), BailoutState::NO_REGISTERS);
}
__ bind(&done);
- PrepareForBailoutForId(stmt->IfId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->IfId(), BailoutState::NO_REGISTERS);
}
void FullCodeGenerator::EmitContinue(Statement* target) {
@@ -1019,18 +985,13 @@ void FullCodeGenerator::EmitUnwindAndReturn() {
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
bool pretenure) {
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning. If
- // we're running with the --always-opt or the --prepare-always-opt
+ // If we're running with the --always-opt or the --prepare-always-opt
// flag, we need to use the runtime function so that the new function
// we are creating here gets a chance to have its code optimized and
// doesn't just get a copy of the existing unoptimized code.
- if (!FLAG_always_opt &&
- !FLAG_prepare_always_opt &&
- !pretenure &&
- scope()->is_function_scope() &&
- info->num_literals() == 0) {
- FastNewClosureStub stub(isolate(), info->language_mode(), info->kind());
+ if (!FLAG_always_opt && !FLAG_prepare_always_opt && !pretenure &&
+ scope()->is_function_scope()) {
+ FastNewClosureStub stub(isolate());
__ Move(stub.GetCallInterfaceDescriptor().GetRegisterParameter(0), info);
__ CallStub(&stub);
} else {
@@ -1050,7 +1011,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ Move(LoadDescriptor::NameRegister(), key->value());
__ Move(LoadDescriptor::SlotRegister(),
SmiFromSlot(prop->PropertyFeedbackSlot()));
- CallLoadIC(NOT_INSIDE_TYPEOF);
+ CallLoadIC();
}
void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
@@ -1070,6 +1031,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
__ Move(LoadDescriptor::SlotRegister(),
SmiFromSlot(prop->PropertyFeedbackSlot()));
CallIC(ic);
+ if (FLAG_tf_load_ic_stub) RestoreContext();
}
void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
@@ -1082,13 +1044,13 @@ void FullCodeGenerator::EmitPropertyKey(ObjectLiteralProperty* property,
BailoutId bailout_id) {
VisitForStackValue(property->key());
CallRuntimeWithOperands(Runtime::kToName);
- PrepareForBailoutForId(bailout_id, NO_REGISTERS);
+ PrepareForBailoutForId(bailout_id, BailoutState::TOS_REGISTER);
PushOperand(result_register());
}
void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
DCHECK(!slot.IsInvalid());
- __ Move(VectorStoreICTrampolineDescriptor::SlotRegister(), SmiFromSlot(slot));
+ __ Move(StoreDescriptor::SlotRegister(), SmiFromSlot(slot));
}
void FullCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
@@ -1108,12 +1070,13 @@ void FullCodeGenerator::VisitWithStatement(WithStatement* stmt) {
Callable callable = CodeFactory::ToObject(isolate());
__ Move(callable.descriptor().GetRegisterParameter(0), result_register());
__ Call(callable.code(), RelocInfo::CODE_TARGET);
- PrepareForBailoutForId(stmt->ToObjectId(), NO_REGISTERS);
+ RestoreContext();
+ PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
PushOperand(result_register());
PushFunctionArgumentForContextAllocation();
CallRuntimeWithOperands(Runtime::kPushWithContext);
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
Scope* saved_scope = scope();
scope_ = stmt->scope();
@@ -1145,7 +1108,7 @@ void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
// Record the position of the do while condition and make sure it is
// possible to break on the condition.
__ bind(loop_statement.continue_label());
- PrepareForBailoutForId(stmt->ContinueId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->ContinueId(), BailoutState::NO_REGISTERS);
// Here is the actual 'while' keyword.
SetExpressionAsStatementPosition(stmt->cond());
@@ -1155,12 +1118,12 @@ void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
&book_keeping);
// Check stack before looping.
- PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->BackEdgeId(), BailoutState::NO_REGISTERS);
__ bind(&book_keeping);
EmitBackEdgeBookkeeping(stmt, &body);
__ jmp(&body);
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
__ bind(loop_statement.break_label());
decrement_loop_depth();
}
@@ -1181,7 +1144,7 @@ void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
loop_statement.break_label(),
&body);
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
__ bind(&body);
Visit(stmt->body());
@@ -1191,7 +1154,7 @@ void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
EmitBackEdgeBookkeeping(stmt, &loop);
__ jmp(&loop);
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
__ bind(loop_statement.break_label());
decrement_loop_depth();
}
@@ -1214,11 +1177,11 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
// Emit the test at the bottom of the loop (even if empty).
__ jmp(&test);
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
__ bind(&body);
Visit(stmt->body());
- PrepareForBailoutForId(stmt->ContinueId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->ContinueId(), BailoutState::NO_REGISTERS);
__ bind(loop_statement.continue_label());
if (stmt->next() != NULL) {
SetStatementPosition(stmt->next());
@@ -1239,7 +1202,7 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
__ jmp(&body);
}
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
__ bind(loop_statement.break_label());
decrement_loop_depth();
}
@@ -1252,6 +1215,7 @@ void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
increment_loop_depth();
// var iterator = iterable[Symbol.iterator]();
+ SetExpressionAsStatementPosition(stmt->assign_iterator());
VisitForEffect(stmt->assign_iterator());
// Loop entry.
@@ -1274,12 +1238,12 @@ void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
Visit(stmt->body());
// Check stack before looping.
- PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->BackEdgeId(), BailoutState::NO_REGISTERS);
EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
__ jmp(loop_statement.continue_label());
// Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
__ bind(loop_statement.break_label());
decrement_loop_depth();
}
@@ -1331,15 +1295,13 @@ void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
// Try block code. Sets up the exception handler chain.
__ bind(&try_entry);
- try_catch_depth_++;
int handler_index = NewHandlerTableEntry();
- EnterTryBlock(handler_index, &handler_entry);
+ EnterTryBlock(handler_index, &handler_entry, stmt->catch_prediction());
{
Comment cmnt_try(masm(), "[ Try block");
Visit(stmt->try_block());
}
ExitTryBlock(handler_index);
- try_catch_depth_--;
__ bind(&exit);
}
@@ -1383,7 +1345,7 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// Set up try handler.
__ bind(&try_entry);
int handler_index = NewHandlerTableEntry();
- EnterTryBlock(handler_index, &handler_entry);
+ EnterTryBlock(handler_index, &handler_entry, stmt->catch_prediction());
{
Comment cmnt_try(masm(), "[ Try block");
TryFinally try_body(this, &deferred);
@@ -1423,7 +1385,7 @@ void FullCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
__ DebugBreak();
// Ignore the return value.
- PrepareForBailoutForId(stmt->DebugBreakId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->DebugBreakId(), BailoutState::NO_REGISTERS);
}
@@ -1438,7 +1400,7 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
VisitForControl(expr->condition(), &true_case, &false_case, &true_case);
int original_stack_depth = operand_stack_depth_;
- PrepareForBailoutForId(expr->ThenId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->ThenId(), BailoutState::NO_REGISTERS);
__ bind(&true_case);
SetExpressionPosition(expr->then_expression());
if (context()->IsTest()) {
@@ -1453,7 +1415,7 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
}
operand_stack_depth_ = original_stack_depth;
- PrepareForBailoutForId(expr->ElseId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->ElseId(), BailoutState::NO_REGISTERS);
__ bind(&false_case);
SetExpressionPosition(expr->else_expression());
VisitInDuplicateContext(expr->else_expression());
@@ -1487,49 +1449,61 @@ void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
void FullCodeGenerator::VisitClassLiteral(ClassLiteral* lit) {
Comment cmnt(masm_, "[ ClassLiteral");
- {
- NestedClassLiteral nested_class_literal(this, lit);
- EnterBlockScopeIfNeeded block_scope_state(
- this, lit->scope(), lit->EntryId(), lit->DeclsId(), lit->ExitId());
-
- if (lit->extends() != NULL) {
- VisitForStackValue(lit->extends());
- } else {
- PushOperand(isolate()->factory()->the_hole_value());
- }
+ if (lit->extends() != NULL) {
+ VisitForStackValue(lit->extends());
+ } else {
+ PushOperand(isolate()->factory()->the_hole_value());
+ }
- VisitForStackValue(lit->constructor());
+ VisitForStackValue(lit->constructor());
- PushOperand(Smi::FromInt(lit->start_position()));
- PushOperand(Smi::FromInt(lit->end_position()));
+ PushOperand(Smi::FromInt(lit->start_position()));
+ PushOperand(Smi::FromInt(lit->end_position()));
- CallRuntimeWithOperands(Runtime::kDefineClass);
- PrepareForBailoutForId(lit->CreateLiteralId(), TOS_REG);
- PushOperand(result_register());
+ CallRuntimeWithOperands(Runtime::kDefineClass);
+ PrepareForBailoutForId(lit->CreateLiteralId(), BailoutState::TOS_REGISTER);
+ PushOperand(result_register());
- // Load the "prototype" from the constructor.
- __ Move(LoadDescriptor::ReceiverRegister(), result_register());
- __ LoadRoot(LoadDescriptor::NameRegister(),
- Heap::kprototype_stringRootIndex);
- __ Move(LoadDescriptor::SlotRegister(), SmiFromSlot(lit->PrototypeSlot()));
- CallLoadIC(NOT_INSIDE_TYPEOF);
- PrepareForBailoutForId(lit->PrototypeId(), TOS_REG);
- PushOperand(result_register());
+ // Load the "prototype" from the constructor.
+ __ Move(LoadDescriptor::ReceiverRegister(), result_register());
+ __ LoadRoot(LoadDescriptor::NameRegister(), Heap::kprototype_stringRootIndex);
+ __ Move(LoadDescriptor::SlotRegister(), SmiFromSlot(lit->PrototypeSlot()));
+ CallLoadIC();
+ PrepareForBailoutForId(lit->PrototypeId(), BailoutState::TOS_REGISTER);
+ PushOperand(result_register());
- EmitClassDefineProperties(lit);
+ EmitClassDefineProperties(lit);
+ DropOperands(1);
- // Set both the prototype and constructor to have fast properties.
- CallRuntimeWithOperands(Runtime::kFinalizeClassDefinition);
+ // Set the constructor to have fast properties.
+ CallRuntimeWithOperands(Runtime::kToFastProperties);
- if (lit->class_variable_proxy() != nullptr) {
- EmitVariableAssignment(lit->class_variable_proxy()->var(), Token::INIT,
- lit->ProxySlot());
- }
+ if (lit->class_variable_proxy() != nullptr) {
+ EmitVariableAssignment(lit->class_variable_proxy()->var(), Token::INIT,
+ lit->ProxySlot());
}
context()->Plug(result_register());
}
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+ Comment cmnt(masm_, "[ RegExpLiteral");
+ Callable callable = CodeFactory::FastCloneRegExp(isolate());
+ CallInterfaceDescriptor descriptor = callable.descriptor();
+ LoadFromFrameField(JavaScriptFrameConstants::kFunctionOffset,
+ descriptor.GetRegisterParameter(0));
+ __ Move(descriptor.GetRegisterParameter(1),
+ Smi::FromInt(expr->literal_index()));
+ __ Move(descriptor.GetRegisterParameter(2), expr->pattern());
+ __ Move(descriptor.GetRegisterParameter(3), Smi::FromInt(expr->flags()));
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+
+ // Reload the context register after the call as i.e. TurboFan code stubs
+ // won't preserve the context register.
+ LoadFromFrameField(StandardFrameConstants::kContextOffset,
+ context_register());
+ context()->Plug(result_register());
+}
void FullCodeGenerator::VisitNativeFunctionLiteral(
NativeFunctionLiteral* expr) {
@@ -1552,13 +1526,14 @@ void FullCodeGenerator::VisitThrow(Throw* expr) {
if (context()->IsStackValue()) OperandStackDepthIncrement(1);
}
-
-void FullCodeGenerator::EnterTryBlock(int handler_index, Label* handler) {
+void FullCodeGenerator::EnterTryBlock(
+ int handler_index, Label* handler,
+ HandlerTable::CatchPrediction catch_prediction) {
HandlerTableEntry* entry = &handler_table_[handler_index];
entry->range_start = masm()->pc_offset();
entry->handler_offset = handler->pos();
- entry->try_catch_depth = try_catch_depth_;
entry->stack_depth = operand_stack_depth_;
+ entry->catch_prediction = catch_prediction;
// We are using the operand stack depth, check for accuracy.
EmitOperandStackDepthCheck();
@@ -1589,7 +1564,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
? "[ TailCall"
: "[ Call");
Expression* callee = expr->expression();
- Call::CallType call_type = expr->GetCallType(isolate());
+ Call::CallType call_type = expr->GetCallType();
switch (call_type) {
case Call::POSSIBLY_EVAL_CALL:
@@ -1653,7 +1628,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
VisitForStackValue(args->at(i));
}
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
EmitCallJSRuntimeFunction(expr);
context()->DropAndPlug(1, result_register());
@@ -1675,7 +1650,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
}
// Call the C runtime function.
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
__ CallRuntime(expr->function(), arg_count);
OperandStackDepthDecrement(arg_count);
context()->Plug(result_register());
@@ -1768,16 +1743,19 @@ bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) {
Expression* sub_expr;
Handle<String> check;
if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
+ SetExpressionPosition(expr);
EmitLiteralCompareTypeof(expr, sub_expr, check);
return true;
}
if (expr->IsLiteralCompareUndefined(&sub_expr)) {
+ SetExpressionPosition(expr);
EmitLiteralCompareNil(expr, sub_expr, kUndefinedValue);
return true;
}
if (expr->IsLiteralCompareNull(&sub_expr)) {
+ SetExpressionPosition(expr);
EmitLiteralCompareNil(expr, sub_expr, kNullValue);
return true;
}
@@ -1794,7 +1772,7 @@ void BackEdgeTable::Patch(Isolate* isolate, Code* unoptimized) {
// to find the matching loops to patch the interrupt
// call to an unconditional call to the replacement code.
int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level() + 1;
- if (loop_nesting_level > Code::kMaxLoopNestingMarker) return;
+ if (loop_nesting_level > AbstractCode::kMaxLoopNestingMarker) return;
BackEdgeTable back_edges(unoptimized, &no_gc);
for (uint32_t i = 0; i < back_edges.length(); i++) {
@@ -1841,7 +1819,7 @@ bool BackEdgeTable::Verify(Isolate* isolate, Code* unoptimized) {
BackEdgeTable back_edges(unoptimized, &no_gc);
for (uint32_t i = 0; i < back_edges.length(); i++) {
uint32_t loop_depth = back_edges.loop_depth(i);
- CHECK_LE(static_cast<int>(loop_depth), Code::kMaxLoopNestingMarker);
+ CHECK_LE(static_cast<int>(loop_depth), AbstractCode::kMaxLoopNestingMarker);
// Assert that all back edges for shallower loops (and only those)
// have already been patched.
CHECK_EQ((static_cast<int>(loop_depth) <= loop_nesting_level),
@@ -1861,7 +1839,7 @@ FullCodeGenerator::EnterBlockScopeIfNeeded::EnterBlockScopeIfNeeded(
saved_scope_ = codegen_->scope();
if (scope == NULL) {
- codegen_->PrepareForBailoutForId(entry_id, NO_REGISTERS);
+ codegen_->PrepareForBailoutForId(entry_id, BailoutState::NO_REGISTERS);
needs_block_context_ = false;
} else {
needs_block_context_ = scope->NeedsContext();
@@ -1878,12 +1856,13 @@ FullCodeGenerator::EnterBlockScopeIfNeeded::EnterBlockScopeIfNeeded(
codegen_->context_register());
}
CHECK_EQ(0, scope->num_stack_slots());
- codegen_->PrepareForBailoutForId(entry_id, NO_REGISTERS);
+ codegen_->PrepareForBailoutForId(entry_id, BailoutState::NO_REGISTERS);
}
{
Comment cmnt(masm(), "[ Declarations");
codegen_->VisitDeclarations(scope->declarations());
- codegen_->PrepareForBailoutForId(declarations_id, NO_REGISTERS);
+ codegen_->PrepareForBailoutForId(declarations_id,
+ BailoutState::NO_REGISTERS);
}
}
}
@@ -1897,7 +1876,7 @@ FullCodeGenerator::EnterBlockScopeIfNeeded::~EnterBlockScopeIfNeeded() {
codegen_->StoreToFrameField(StandardFrameConstants::kContextOffset,
codegen_->context_register());
}
- codegen_->PrepareForBailoutForId(exit_id_, NO_REGISTERS);
+ codegen_->PrepareForBailoutForId(exit_id_, BailoutState::NO_REGISTERS);
codegen_->scope_ = saved_scope_;
}
@@ -1941,7 +1920,7 @@ bool FullCodeGenerator::NeedsHoleCheckForLoad(VariableProxy* proxy) {
// switch (1) { case 0: let x = 2; case 1: f(x); }
// The scope of the variable needs to be checked, in case the use is
// in a sub-block which may be linear.
- if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+ if (var->scope()->GetDeclarationScope() != scope()->GetDeclarationScope()) {
return true;
}
@@ -1953,10 +1932,10 @@ bool FullCodeGenerator::NeedsHoleCheckForLoad(VariableProxy* proxy) {
}
// Check that we always have valid source position.
- DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
- DCHECK(proxy->position() != RelocInfo::kNoPosition);
+ DCHECK(var->initializer_position() != kNoSourcePosition);
+ DCHECK(proxy->position() != kNoSourcePosition);
- return var->mode() == CONST_LEGACY || var->scope()->is_nonlinear() ||
+ return var->scope()->is_nonlinear() ||
var->initializer_position() >= proxy->position();
}
diff --git a/deps/v8/src/full-codegen/full-codegen.h b/deps/v8/src/full-codegen/full-codegen.h
index 0c12937149..71f065b092 100644
--- a/deps/v8/src/full-codegen/full-codegen.h
+++ b/deps/v8/src/full-codegen/full-codegen.h
@@ -14,6 +14,7 @@
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/compiler.h"
+#include "src/deoptimizer.h"
#include "src/globals.h"
#include "src/objects.h"
@@ -26,13 +27,8 @@ class JumpPatchSite;
// -----------------------------------------------------------------------------
// Full code generator.
-class FullCodeGenerator: public AstVisitor {
+class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
public:
- enum State {
- NO_REGISTERS,
- TOS_REG
- };
-
FullCodeGenerator(MacroAssembler* masm, CompilationInfo* info)
: masm_(masm),
info_(info),
@@ -41,7 +37,6 @@ class FullCodeGenerator: public AstVisitor {
scope_(info->scope()),
nesting_stack_(NULL),
loop_depth_(0),
- try_catch_depth_(0),
operand_stack_depth_(0),
globals_(NULL),
context_(NULL),
@@ -51,6 +46,8 @@ class FullCodeGenerator: public AstVisitor {
info->zone()),
back_edges_(2, info->zone()),
handler_table_(info->zone()),
+ source_position_table_builder_(info->zone(),
+ info->SourcePositionRecordingMode()),
ic_total_count_(0) {
DCHECK(!info->IsStub());
Initialize();
@@ -60,19 +57,10 @@ class FullCodeGenerator: public AstVisitor {
static bool MakeCode(CompilationInfo* info);
- // Encode state and pc-offset as a BitField<type, start, size>.
+ // Encode bailout state and pc-offset as a BitField<type, start, size>.
// Only use 30 bits because we encode the result as a smi.
- class StateField : public BitField<State, 0, 1> { };
- class PcField : public BitField<unsigned, 1, 30-1> { };
-
- static const char* State2String(State state) {
- switch (state) {
- case NO_REGISTERS: return "NO_REGISTERS";
- case TOS_REG: return "TOS_REG";
- }
- UNREACHABLE();
- return NULL;
- }
+ class BailoutStateField : public BitField<Deoptimizer::BailoutState, 0, 1> {};
+ class PcField : public BitField<unsigned, 1, 30 - 1> {};
static const int kMaxBackEdgeWeight = 127;
@@ -106,6 +94,8 @@ class FullCodeGenerator: public AstVisitor {
static Register result_register();
private:
+ typedef Deoptimizer::BailoutState BailoutState;
+
class Breakable;
class Iteration;
class TryFinally;
@@ -211,23 +201,6 @@ class FullCodeGenerator: public AstVisitor {
}
};
- // A class literal expression
- class NestedClassLiteral : public NestedStatement {
- public:
- NestedClassLiteral(FullCodeGenerator* codegen, ClassLiteral* lit)
- : NestedStatement(codegen),
- needs_context_(lit->scope() != nullptr &&
- lit->scope()->NeedsContext()) {}
-
- NestedStatement* Exit(int* context_length) override {
- if (needs_context_) ++(*context_length);
- return previous_;
- }
-
- private:
- const bool needs_context_;
- };
-
class DeferredCommands {
public:
enum Command { kReturn, kThrow, kBreak, kContinue };
@@ -366,21 +339,21 @@ class FullCodeGenerator: public AstVisitor {
if (FLAG_verify_operand_stack_depth) EmitOperandStackDepthCheck();
EffectContext context(this);
Visit(expr);
- PrepareForBailout(expr, NO_REGISTERS);
+ PrepareForBailout(expr, BailoutState::NO_REGISTERS);
}
void VisitForAccumulatorValue(Expression* expr) {
if (FLAG_verify_operand_stack_depth) EmitOperandStackDepthCheck();
AccumulatorValueContext context(this);
Visit(expr);
- PrepareForBailout(expr, TOS_REG);
+ PrepareForBailout(expr, BailoutState::TOS_REGISTER);
}
void VisitForStackValue(Expression* expr) {
if (FLAG_verify_operand_stack_depth) EmitOperandStackDepthCheck();
StackValueContext context(this);
Visit(expr);
- PrepareForBailout(expr, NO_REGISTERS);
+ PrepareForBailout(expr, BailoutState::NO_REGISTERS);
}
void VisitForControl(Expression* expr,
@@ -397,8 +370,7 @@ class FullCodeGenerator: public AstVisitor {
void VisitInDuplicateContext(Expression* expr);
- void VisitDeclarations(ZoneList<Declaration*>* declarations) override;
- void DeclareModules(Handle<FixedArray> descriptions);
+ void VisitDeclarations(ZoneList<Declaration*>* declarations);
void DeclareGlobals(Handle<FixedArray> pairs);
int DeclareGlobalsFlags();
@@ -452,8 +424,8 @@ class FullCodeGenerator: public AstVisitor {
NilValue nil);
// Bailout support.
- void PrepareForBailout(Expression* node, State state);
- void PrepareForBailoutForId(BailoutId id, State state);
+ void PrepareForBailout(Expression* node, Deoptimizer::BailoutState state);
+ void PrepareForBailoutForId(BailoutId id, Deoptimizer::BailoutState state);
// Returns a smi for the index into the FixedArray that backs the feedback
// vector
@@ -521,19 +493,11 @@ class FullCodeGenerator: public AstVisitor {
F(IsJSProxy) \
F(Call) \
F(NewObject) \
- F(ValueOf) \
F(StringCharFromCode) \
- F(StringCharAt) \
- F(OneByteSeqStringSetChar) \
- F(TwoByteSeqStringSetChar) \
F(IsJSReceiver) \
- F(MathPow) \
F(HasCachedArrayIndex) \
F(GetCachedArrayIndex) \
F(GetSuperConstructor) \
- F(GeneratorNext) \
- F(GeneratorReturn) \
- F(GeneratorThrow) \
F(DebugBreakInOptimizedCode) \
F(ClassOf) \
F(StringCharCodeAt) \
@@ -545,10 +509,8 @@ class FullCodeGenerator: public AstVisitor {
F(ToString) \
F(ToLength) \
F(ToNumber) \
- F(ToName) \
F(ToObject) \
F(DebugIsActive) \
- F(GetOrdinaryHasInstance) \
F(CreateIterResultObject)
#define GENERATOR_DECLARATION(Name) void Emit##Name(CallRuntime* call);
@@ -557,10 +519,11 @@ class FullCodeGenerator: public AstVisitor {
void EmitIntrinsicAsStubCall(CallRuntime* expr, const Callable& callable);
- // Platform-specific code for resuming generators.
- void EmitGeneratorResume(Expression *generator,
- Expression *value,
- JSGeneratorObject::ResumeMode resume_mode);
+ // Emits call to respective code stub.
+ void EmitHasProperty();
+
+ // Platform-specific code for restoring context from current JS frame.
+ void RestoreContext();
// Platform-specific code for loading variables.
void EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
@@ -577,7 +540,7 @@ class FullCodeGenerator: public AstVisitor {
bool NeedsHoleCheckForLoad(VariableProxy* proxy);
// Expects the arguments and the function already pushed.
- void EmitResolvePossiblyDirectEval(int arg_count);
+ void EmitResolvePossiblyDirectEval(Call* expr);
// Platform-specific support for allocating a new closure based on
// the given function info.
@@ -671,9 +634,10 @@ class FullCodeGenerator: public AstVisitor {
void CallIC(Handle<Code> code,
TypeFeedbackId id = TypeFeedbackId::None());
+ void CallLoadIC(TypeFeedbackId id = TypeFeedbackId::None());
// Inside typeof reference errors are never thrown.
- void CallLoadIC(TypeofMode typeof_mode,
- TypeFeedbackId id = TypeFeedbackId::None());
+ void CallLoadGlobalIC(TypeofMode typeof_mode,
+ TypeFeedbackId id = TypeFeedbackId::None());
void CallStoreIC(TypeFeedbackId id = TypeFeedbackId::None());
void SetFunctionPosition(FunctionLiteral* fun);
@@ -687,8 +651,7 @@ class FullCodeGenerator: public AstVisitor {
// otherwise.
void SetStatementPosition(Statement* stmt,
InsertBreak insert_break = INSERT_BREAK);
- void SetExpressionPosition(Expression* expr,
- InsertBreak insert_break = SKIP_BREAK);
+ void SetExpressionPosition(Expression* expr);
// Consider an expression a statement. As such, we also insert a break.
// This is used in loop headers where we want to break for each iteration.
@@ -702,8 +665,12 @@ class FullCodeGenerator: public AstVisitor {
SetCallPosition(expr);
}
+ void RecordStatementPosition(int pos);
+ void RecordPosition(int pos);
+
// Non-local control flow support.
- void EnterTryBlock(int handler_index, Label* handler);
+ void EnterTryBlock(int handler_index, Label* handler,
+ HandlerTable::CatchPrediction catch_prediction);
void ExitTryBlock(int handler_index);
void EnterFinallyBlock();
void ExitFinallyBlock();
@@ -729,8 +696,6 @@ class FullCodeGenerator: public AstVisitor {
Isolate* isolate() const { return isolate_; }
Zone* zone() const { return zone_; }
Handle<Script> script() { return info_->script(); }
- bool is_eval() { return info_->is_eval(); }
- bool is_native() { return info_->is_native(); }
LanguageMode language_mode() { return scope()->language_mode(); }
bool has_simple_parameters() { return info_->has_simple_parameters(); }
FunctionLiteral* literal() const { return info_->literal(); }
@@ -756,7 +721,7 @@ class FullCodeGenerator: public AstVisitor {
void PushCalleeAndWithBaseObject(Call* expr);
// AST node visit functions.
-#define DECLARE_VISIT(type) void Visit##type(type* node) override;
+#define DECLARE_VISIT(type) void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
@@ -792,7 +757,7 @@ class FullCodeGenerator: public AstVisitor {
unsigned range_end;
unsigned handler_offset;
int stack_depth;
- int try_catch_depth;
+ HandlerTable::CatchPrediction catch_prediction;
};
class ExpressionContext BASE_EMBEDDED {
@@ -987,15 +952,13 @@ class FullCodeGenerator: public AstVisitor {
Label return_label_;
NestedStatement* nesting_stack_;
int loop_depth_;
- int try_catch_depth_;
int operand_stack_depth_;
ZoneList<Handle<Object> >* globals_;
- Handle<FixedArray> modules_;
- int module_index_;
const ExpressionContext* context_;
ZoneList<BailoutEntry> bailout_entries_;
ZoneList<BackEdgeEntry> back_edges_;
ZoneVector<HandlerTableEntry> handler_table_;
+ SourcePositionTableBuilder source_position_table_builder_;
int ic_total_count_;
Handle<Cell> profiling_counter_;
diff --git a/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc b/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
index f1945c897c..0a00eeade8 100644
--- a/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
@@ -168,22 +168,20 @@ void FullCodeGenerator::Generate() {
__ push(edi);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext);
- PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
+ PrepareForBailoutForId(BailoutId::ScriptContext(),
+ BailoutState::TOS_REGISTER);
// The new target value is not used, clobbering is safe.
DCHECK_NULL(info->scope()->new_target_var());
} else {
if (info->scope()->new_target_var() != nullptr) {
__ push(edx); // Preserve new target.
}
- if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
- __ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
- need_write_barrier = false;
- } else {
- __ push(edi);
- __ CallRuntime(Runtime::kNewFunctionContext);
- }
+ FastNewFunctionContextStub stub(isolate());
+ __ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
+ Immediate(slots));
+ __ CallStub(&stub);
+ // Result of FastNewFunctionContextStub is always in new space.
+ need_write_barrier = false;
if (info->scope()->new_target_var() != nullptr) {
__ pop(edx); // Restore new target.
}
@@ -198,7 +196,8 @@ void FullCodeGenerator::Generate() {
int num_parameters = info->scope()->num_parameters();
int first_parameter = info->scope()->has_this_declaration() ? -1 : 0;
for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+ Variable* var =
+ (i == -1) ? info->scope()->receiver() : info->scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -227,11 +226,12 @@ void FullCodeGenerator::Generate() {
// Register holding this function and new target are both trashed in case we
// bailout here. But since that can happen only when new target is not used
// and we allocate a context, the value of |function_in_register| is correct.
- PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::FunctionContext(),
+ BailoutState::NO_REGISTERS);
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
- Variable* this_function_var = scope()->this_function_var();
+ Variable* this_function_var = info->scope()->this_function_var();
if (this_function_var != nullptr) {
Comment cmnt(masm_, "[ This function");
if (!function_in_register) {
@@ -242,7 +242,7 @@ void FullCodeGenerator::Generate() {
}
// Possibly set up a local binding to the new target value.
- Variable* new_target_var = scope()->new_target_var();
+ Variable* new_target_var = info->scope()->new_target_var();
if (new_target_var != nullptr) {
Comment cmnt(masm_, "[ new.target");
SetVar(new_target_var, edx, ebx, ecx);
@@ -250,7 +250,7 @@ void FullCodeGenerator::Generate() {
// Possibly allocate RestParameters
int rest_index;
- Variable* rest_param = scope()->rest_parameter(&rest_index);
+ Variable* rest_param = info->scope()->rest_parameter(&rest_index);
if (rest_param) {
Comment cmnt(masm_, "[ Allocate rest parameter array");
if (!function_in_register) {
@@ -262,7 +262,7 @@ void FullCodeGenerator::Generate() {
SetVar(rest_param, eax, ebx, edx);
}
- Variable* arguments = scope()->arguments();
+ Variable* arguments = info->scope()->arguments();
if (arguments != NULL) {
// Arguments object must be allocated after the context object, in
// case the "arguments" or ".arguments" variables are in the context.
@@ -289,10 +289,11 @@ void FullCodeGenerator::Generate() {
}
// Visit the declarations and body.
- PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::FunctionEntry(),
+ BailoutState::NO_REGISTERS);
{
Comment cmnt(masm_, "[ Declarations");
- VisitDeclarations(scope()->declarations());
+ VisitDeclarations(info->scope()->declarations());
}
// Assert that the declarations do not use ICs. Otherwise the debugger
@@ -302,7 +303,8 @@ void FullCodeGenerator::Generate() {
{
Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::Declarations(),
+ BailoutState::NO_REGISTERS);
Label ok;
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
@@ -369,11 +371,11 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
EmitProfilingCounterReset();
__ bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR
// entry becomes the target of a bailout. We don't expect it to be, but
// we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->OsrEntryId(), BailoutState::NO_REGISTERS);
}
void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
@@ -423,6 +425,9 @@ void FullCodeGenerator::EmitReturnSequence() {
}
}
+void FullCodeGenerator::RestoreContext() {
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+}
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -483,10 +488,12 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
true,
true_label_,
false_label_);
- DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
- if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+ DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
+ !lit->IsUndetectable());
+ if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
+ lit->IsFalse(isolate())) {
if (false_label_ != fall_through_) __ jmp(false_label_);
- } else if (lit->IsTrue() || lit->IsJSObject()) {
+ } else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
if (true_label_ != fall_through_) __ jmp(true_label_);
} else if (lit->IsString()) {
if (String::cast(*lit)->length() == 0) {
@@ -677,7 +684,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
Label skip;
if (should_normalize) __ jmp(&skip, Label::kNear);
- PrepareForBailout(expr, TOS_REG);
+ PrepareForBailout(expr, BailoutState::TOS_REGISTER);
if (should_normalize) {
__ cmp(eax, isolate()->factory()->true_value());
Split(equal, if_true, if_false, NULL);
@@ -702,25 +709,21 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
void FullCodeGenerator::VisitVariableDeclaration(
VariableDeclaration* declaration) {
- // If it was not possible to allocate the variable at compile time, we
- // need to "declare" it at runtime to make sure it actually exists in the
- // local context.
VariableProxy* proxy = declaration->proxy();
- VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED:
- globals_->Add(variable->name(), zone());
- globals_->Add(variable->binding_needs_init()
- ? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value(), zone());
+ case VariableLocation::UNALLOCATED: {
+ DCHECK(!variable->binding_needs_init());
+ FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+ globals_->Add(isolate()->factory()->undefined_value(), zone());
break;
-
+ }
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL:
- if (hole_init) {
+ if (variable->binding_needs_init()) {
Comment cmnt(masm_, "[ VariableDeclaration");
__ mov(StackOperand(variable),
Immediate(isolate()->factory()->the_hole_value()));
@@ -728,35 +731,28 @@ void FullCodeGenerator::VisitVariableDeclaration(
break;
case VariableLocation::CONTEXT:
- if (hole_init) {
+ if (variable->binding_needs_init()) {
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ mov(ContextOperand(esi, variable->index()),
Immediate(isolate()->factory()->the_hole_value()));
// No write barrier since the hole value is in old space.
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
}
break;
case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ VariableDeclaration");
+ DCHECK_EQ(VAR, variable->mode());
+ DCHECK(!variable->binding_needs_init());
__ push(Immediate(variable->name()));
- // VariableDeclaration nodes are always introduced in one of four modes.
- DCHECK(IsDeclaredVariableMode(mode));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (hole_init) {
- __ push(Immediate(isolate()->factory()->the_hole_value()));
- } else {
- __ push(Immediate(Smi::FromInt(0))); // Indicates no initial value.
- }
- __ push(
- Immediate(Smi::FromInt(variable->DeclarationPropertyAttributes())));
- __ CallRuntime(Runtime::kDeclareLookupSlot);
+ __ CallRuntime(Runtime::kDeclareEvalVar);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
+
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
}
@@ -768,7 +764,9 @@ void FullCodeGenerator::VisitFunctionDeclaration(
switch (variable->location()) {
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
- globals_->Add(variable->name(), zone());
+ FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
Handle<SharedFunctionInfo> function =
Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
// Check for stack-overflow exception.
@@ -798,7 +796,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
@@ -806,10 +804,13 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Comment cmnt(masm_, "[ FunctionDeclaration");
PushOperand(variable->name());
VisitForStackValue(declaration->fun());
- PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
+ CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
+
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
}
@@ -818,19 +819,13 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ Push(pairs);
__ Push(Smi::FromInt(DeclareGlobalsFlags()));
+ __ EmitLoadTypeFeedbackVector(eax);
+ __ Push(eax);
__ CallRuntime(Runtime::kDeclareGlobals);
// Return value is ignored.
}
-void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
- // Call the runtime to declare the modules.
- __ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules);
- // Return value is ignored.
-}
-
-
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
@@ -838,7 +833,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Keep the switch value on the stack until a case matches.
VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
ZoneList<CaseClause*>* clauses = stmt->cases();
CaseClause* default_clause = NULL; // Can occur anywhere in the list.
@@ -887,7 +882,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Label skip;
__ jmp(&skip, Label::kNear);
- PrepareForBailout(clause, TOS_REG);
+ PrepareForBailout(clause, BailoutState::TOS_REGISTER);
__ cmp(eax, isolate()->factory()->true_value());
__ j(not_equal, &next_test);
__ Drop(1);
@@ -915,12 +910,12 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ Case body");
CaseClause* clause = clauses->at(i);
__ bind(clause->body_target());
- PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(clause->EntryId(), BailoutState::NO_REGISTERS);
VisitStatements(clause->statements());
}
__ bind(nested_statement.break_label());
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
}
@@ -952,16 +947,15 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&convert);
ToObjectStub stub(isolate());
__ CallStub(&stub);
+ RestoreContext();
__ bind(&done_convert);
- PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
+ PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
__ push(eax);
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- // Note: Proxies never have an enum cache, so will always take the
- // slow path.
+ // Check cache validity in generated code. If we cannot guarantee cache
+ // validity, call the runtime system to check cache validity or get the
+ // property names in a fixed array. Note: Proxies never have an enum cache,
+ // so will always take the slow path.
Label call_runtime, use_cache, fixed_array;
__ CheckEnumCache(&call_runtime);
@@ -972,7 +966,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&call_runtime);
__ push(eax);
__ CallRuntime(Runtime::kForInEnumerate);
- PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
+ PrepareForBailoutForId(stmt->EnumId(), BailoutState::TOS_REGISTER);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->meta_map());
__ j(not_equal, &fixed_array);
@@ -1008,7 +1002,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(eax); // Array
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
__ push(eax); // Fixed array length (as smi).
- PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
__ push(Immediate(Smi::FromInt(0))); // Initial index.
// Generate code for doing the condition check.
@@ -1019,9 +1013,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(eax, Operand(esp, 1 * kPointerSize)); // Compare to the array length.
__ j(above_equal, loop_statement.break_label());
- // Get the current entry of the array into register ebx.
+ // Get the current entry of the array into register eax.
__ mov(ebx, Operand(esp, 2 * kPointerSize));
- __ mov(ebx, FieldOperand(ebx, eax, times_2, FixedArray::kHeaderSize));
+ __ mov(eax, FieldOperand(ebx, eax, times_2, FixedArray::kHeaderSize));
// Get the expected map from the stack or a smi in the
// permanent slow case into register edx.
@@ -1030,8 +1024,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Check if the expected map still matches that of the enumerable.
// If not, we may have to filter the key.
Label update_each;
- __ mov(ecx, Operand(esp, 4 * kPointerSize));
- __ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
+ __ mov(ebx, Operand(esp, 4 * kPointerSize));
+ __ cmp(edx, FieldOperand(ebx, HeapObject::kMapOffset));
__ j(equal, &update_each, Label::kNear);
// We need to filter the key, record slow-path here.
@@ -1040,29 +1034,27 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ mov(FieldOperand(edx, FixedArray::OffsetOfElementAt(vector_index)),
Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
- // Convert the entry to a string or null if it isn't a property
- // anymore. If the property has been removed while iterating, we
- // just skip it.
- __ push(ecx); // Enumerable.
- __ push(ebx); // Current entry.
- __ CallRuntime(Runtime::kForInFilter);
- PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
- __ cmp(eax, isolate()->factory()->undefined_value());
- __ j(equal, loop_statement.continue_label());
- __ mov(ebx, eax);
+ // eax contains the key. The receiver in ebx is the second argument to the
+ // ForInFilterStub. ForInFilter returns undefined if the receiver doesn't
+ // have the key or returns the name-converted key.
+ ForInFilterStub filter_stub(isolate());
+ __ CallStub(&filter_stub);
+ RestoreContext();
+ PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
+ __ JumpIfRoot(result_register(), Heap::kUndefinedValueRootIndex,
+ loop_statement.continue_label());
// Update the 'each' property or variable from the possibly filtered
- // entry in register ebx.
+ // entry in register eax.
__ bind(&update_each);
- __ mov(result_register(), ebx);
// Perform the assignment as if via '='.
{ EffectContext context(this);
EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
- PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->AssignmentId(), BailoutState::NO_REGISTERS);
}
// Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
@@ -1079,7 +1071,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
DropOperands(5);
// Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
__ bind(&exit);
decrement_loop_depth();
}
@@ -1116,45 +1108,19 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
Register context = esi;
Register temp = edx;
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_sloppy_eval()) {
- // Check that extension is "the hole".
- __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
- Heap::kTheHoleValueRootIndex, slow);
- }
- // Load next context in chain.
- __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering esi.
- context = temp;
+ int to_check = scope()->ContextChainLengthUntilOutermostSloppyEval();
+ for (Scope* s = scope(); to_check > 0; s = s->outer_scope()) {
+ if (!s->NeedsContext()) continue;
+ if (s->calls_sloppy_eval()) {
+ // Check that extension is "the hole".
+ __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
+ Heap::kTheHoleValueRootIndex, slow);
}
- // If no outer scope calls eval, we do not need to check more
- // context extensions. If we have reached an eval scope, we check
- // all extensions from this point.
- if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s != NULL && s->is_eval_scope()) {
- // Loop up the context chain. There is no frame effect so it is
- // safe to use raw labels here.
- Label next, fast;
- if (!context.is(temp)) {
- __ mov(temp, context);
- }
- __ bind(&next);
- // Terminate at native context.
- __ cmp(FieldOperand(temp, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->native_context_map()));
- __ j(equal, &fast, Label::kNear);
- // Check that extension is "the hole".
- __ JumpIfNotRoot(ContextOperand(temp, Context::EXTENSION_INDEX),
- Heap::kTheHoleValueRootIndex, slow);
// Load next context in chain.
- __ mov(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
- __ jmp(&next);
- __ bind(&fast);
+ __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering esi.
+ context = temp;
+ to_check--;
}
// All extension objects were empty and it is safe to use a normal global
@@ -1207,42 +1173,35 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ mov(eax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET || local->mode() == CONST ||
- local->mode() == CONST_LEGACY) {
+ if (local->binding_needs_init()) {
__ cmp(eax, isolate()->factory()->the_hole_value());
__ j(not_equal, done);
- if (local->mode() == CONST_LEGACY) {
- __ mov(eax, isolate()->factory()->undefined_value());
- } else { // LET || CONST
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError);
- }
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ } else {
+ __ jmp(done);
}
- __ jmp(done);
}
}
void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
+#ifdef DEBUG
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- __ mov(LoadDescriptor::ReceiverRegister(), NativeContextOperand());
- __ mov(LoadDescriptor::ReceiverRegister(),
- ContextOperand(LoadDescriptor::ReceiverRegister(),
- Context::EXTENSION_INDEX));
- __ mov(LoadDescriptor::NameRegister(), var->name());
- __ mov(LoadDescriptor::SlotRegister(),
+#endif
+ __ mov(LoadGlobalDescriptor::SlotRegister(),
Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
- CallLoadIC(typeof_mode);
+ CallLoadGlobalIC(typeof_mode);
}
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
SetExpressionPosition(proxy);
- PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
+ PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
Variable* var = proxy->var();
// Three cases: global variables, lookup variables, and all other types of
@@ -1264,21 +1223,14 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
: "[ Stack variable");
if (NeedsHoleCheckForLoad(proxy)) {
- // Let and const need a read barrier.
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
Label done;
GetVar(eax, var);
__ cmp(eax, isolate()->factory()->the_hole_value());
__ j(not_equal, &done, Label::kNear);
- if (var->mode() == LET || var->mode() == CONST) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError);
- } else {
- // Uninitialized legacy const bindings are unholed.
- DCHECK(var->mode() == CONST_LEGACY);
- __ mov(eax, isolate()->factory()->undefined_value());
- }
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&done);
context()->Plug(eax);
break;
@@ -1304,19 +1256,10 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
context()->Plug(eax);
break;
}
- }
-}
-
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExpLiteral");
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ Move(eax, Immediate(Smi::FromInt(expr->literal_index())));
- __ Move(ecx, Immediate(expr->pattern()));
- __ Move(edx, Immediate(Smi::FromInt(expr->flags())));
- FastCloneRegExpStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(eax);
+ case VariableLocation::MODULE:
+ UNREACHABLE();
+ }
}
@@ -1356,8 +1299,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(edx, Immediate(Smi::FromInt(flags)));
FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
__ CallStub(&stub);
+ RestoreContext();
}
- PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+ PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
// If result_saved is true the result is on top of the stack. If
// result_saved is false the result is in eax.
@@ -1385,7 +1329,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::COMPUTED:
// It is safe to use [[Put]] here because the boilerplate already
// contains computed properties with an uninitialized value.
- if (key->value()->IsInternalizedString()) {
+ if (key->IsStringLiteral()) {
+ DCHECK(key->IsPropertyName());
if (property->emit_store()) {
VisitForAccumulatorValue(value);
DCHECK(StoreDescriptor::ValueRegister().is(eax));
@@ -1393,7 +1338,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
EmitLoadStoreICSlot(property->GetSlot(0));
CallStoreIC();
- PrepareForBailoutForId(key->id(), NO_REGISTERS);
+ PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
}
@@ -1421,16 +1366,20 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- NO_REGISTERS);
+ BailoutState::NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->getter = property;
+ AccessorTable::Iterator it = accessor_table.lookup(key);
+ it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->setter = property;
+ AccessorTable::Iterator it = accessor_table.lookup(key);
+ it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->setter = property;
}
break;
}
@@ -1449,6 +1398,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PushOperand(Smi::FromInt(NONE));
CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
+ PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1477,7 +1427,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- NO_REGISTERS);
+ BailoutState::NO_REGISTERS);
} else {
EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
VisitForStackValue(value);
@@ -1493,6 +1443,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PushOperand(Smi::FromInt(NONE));
PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ BailoutState::NO_REGISTERS);
} else {
DropOperands(3);
}
@@ -1550,7 +1502,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
}
- PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+ PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
bool result_saved = false; // Is the result saved to the stack?
ZoneList<Expression*>* subexprs = expr->values();
@@ -1580,7 +1532,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
- PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+ PrepareForBailoutForId(expr->GetIdForElement(array_index),
+ BailoutState::NO_REGISTERS);
}
// In case the array literal contains spread expressions it has two parts. The
@@ -1600,7 +1553,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForStackValue(subexpr);
CallRuntimeWithOperands(Runtime::kAppendElement);
- PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+ PrepareForBailoutForId(expr->GetIdForElement(array_index),
+ BailoutState::NO_REGISTERS);
}
if (result_saved) {
@@ -1615,7 +1569,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
Comment cmnt(masm_, "[ Assignment");
- SetExpressionPosition(expr, INSERT_BREAK);
Property* property = expr->target()->AsProperty();
LhsKind assign_type = Property::GetAssignType(property);
@@ -1680,23 +1633,27 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy());
- PrepareForBailout(expr->target(), TOS_REG);
+ PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
break;
case NAMED_SUPER_PROPERTY:
EmitNamedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
case KEYED_SUPER_PROPERTY:
EmitKeyedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
}
}
@@ -1715,7 +1672,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
// Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), TOS_REG);
+ PrepareForBailout(expr->binary_operation(), BailoutState::TOS_REGISTER);
} else {
VisitForAccumulatorValue(expr->value());
}
@@ -1727,7 +1684,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
expr->op(), expr->AssignmentSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(eax);
break;
case NAMED_PROPERTY:
@@ -1756,21 +1713,27 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// this. It stays on the stack while we update the iterator.
VisitForStackValue(expr->expression());
- Label suspend, continuation, post_runtime, resume;
+ Label suspend, continuation, post_runtime, resume, exception;
__ jmp(&suspend);
__ bind(&continuation);
- // When we arrive here, the stack top is the resume mode and
- // result_register() holds the input value (the argument given to the
- // respective resume operation).
+ // When we arrive here, eax holds the generator object.
__ RecordGeneratorContinuation();
- __ pop(ebx);
- __ cmp(ebx, Immediate(Smi::FromInt(JSGeneratorObject::RETURN)));
- __ j(not_equal, &resume);
- __ push(result_register());
+ __ mov(ebx, FieldOperand(eax, JSGeneratorObject::kResumeModeOffset));
+ __ mov(eax, FieldOperand(eax, JSGeneratorObject::kInputOrDebugPosOffset));
+ STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
+ STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
+ __ cmp(ebx, Immediate(Smi::FromInt(JSGeneratorObject::kReturn)));
+ __ j(less, &resume);
+ __ Push(result_register());
+ __ j(greater, &exception);
EmitCreateIteratorResult(true);
EmitUnwindAndReturn();
+ __ bind(&exception);
+ __ CallRuntime(expr->rethrow_on_exception() ? Runtime::kReThrow
+ : Runtime::kThrow);
+
__ bind(&suspend);
OperandStackDepthIncrement(1); // Not popped on this path.
VisitForAccumulatorValue(expr->generator_object());
@@ -1786,8 +1749,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ j(equal, &post_runtime);
__ push(eax); // generator object
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ mov(context_register(),
- Operand(ebp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
__ bind(&post_runtime);
PopOperand(result_register());
EmitReturnSequence();
@@ -1796,101 +1758,6 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
context()->Plug(result_register());
}
-
-void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
- Expression *value,
- JSGeneratorObject::ResumeMode resume_mode) {
- // The value stays in eax, and is ultimately read by the resumed generator, as
- // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
- // is read to throw the value when the resumed generator is already closed.
- // ebx will hold the generator object until the activation has been resumed.
- VisitForStackValue(generator);
- VisitForAccumulatorValue(value);
- PopOperand(ebx);
-
- // Store input value into generator object.
- __ mov(FieldOperand(ebx, JSGeneratorObject::kInputOffset), result_register());
- __ mov(ecx, result_register());
- __ RecordWriteField(ebx, JSGeneratorObject::kInputOffset, ecx, edx,
- kDontSaveFPRegs);
-
- // Load suspended function and context.
- __ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset));
- __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
-
- // Push receiver.
- __ push(FieldOperand(ebx, JSGeneratorObject::kReceiverOffset));
-
- // Push holes for arguments to generator function. Since the parser forced
- // context allocation for any variables in generators, the actual argument
- // values have already been copied into the context and these dummy values
- // will never be used.
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(edx,
- FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(ecx, isolate()->factory()->the_hole_value());
- Label push_argument_holes, push_frame;
- __ bind(&push_argument_holes);
- __ sub(edx, Immediate(Smi::FromInt(1)));
- __ j(carry, &push_frame);
- __ push(ecx);
- __ jmp(&push_argument_holes);
-
- // Enter a new JavaScript frame, and initialize its slots as they were when
- // the generator was suspended.
- Label resume_frame, done;
- __ bind(&push_frame);
- __ call(&resume_frame);
- __ jmp(&done);
- __ bind(&resume_frame);
- __ push(ebp); // Caller's frame pointer.
- __ mov(ebp, esp);
- __ push(esi); // Callee's context.
- __ push(edi); // Callee's JS Function.
-
- // Load the operand stack size.
- __ mov(edx, FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset));
- __ mov(edx, FieldOperand(edx, FixedArray::kLengthOffset));
- __ SmiUntag(edx);
-
- // If we are sending a value and there is no operand stack, we can jump back
- // in directly.
- if (resume_mode == JSGeneratorObject::NEXT) {
- Label slow_resume;
- __ cmp(edx, Immediate(0));
- __ j(not_zero, &slow_resume);
- __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
- __ mov(ecx, FieldOperand(ebx, JSGeneratorObject::kContinuationOffset));
- __ SmiUntag(ecx);
- __ add(edx, ecx);
- __ mov(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
- Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
- __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
- __ jmp(edx);
- __ bind(&slow_resume);
- }
-
- // Otherwise, we push holes for the operand stack and call the runtime to fix
- // up the stack and the handlers.
- Label push_operand_holes, call_resume;
- __ bind(&push_operand_holes);
- __ sub(edx, Immediate(1));
- __ j(carry, &call_resume);
- __ push(ecx);
- __ jmp(&push_operand_holes);
- __ bind(&call_resume);
- __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
- __ push(ebx);
- __ push(result_register());
- __ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject);
- // Not reached: the runtime call returns elsewhere.
- __ Abort(kGeneratorFailedToResume);
-
- __ bind(&done);
- context()->Plug(result_register());
-}
-
void FullCodeGenerator::PushOperand(MemOperand operand) {
OperandStackDepthIncrement(1);
__ Push(operand);
@@ -1910,7 +1777,8 @@ void FullCodeGenerator::EmitOperandStackDepthCheck() {
void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label allocate, done_allocate;
- __ Allocate(JSIteratorResult::kSize, eax, ecx, edx, &allocate, TAG_OBJECT);
+ __ Allocate(JSIteratorResult::kSize, eax, ecx, edx, &allocate,
+ NO_ALLOCATION_FLAGS);
__ jmp(&done_allocate, Label::kNear);
__ bind(&allocate);
@@ -2191,34 +2059,25 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->mode() == LET && op != Token::INIT) {
- // Non-initializing assignment to let variable needs a write barrier.
- DCHECK(!var->IsLookupSlot());
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label assign;
- MemOperand location = VarOperand(var, ecx);
- __ mov(edx, location);
- __ cmp(edx, isolate()->factory()->the_hole_value());
- __ j(not_equal, &assign, Label::kNear);
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError);
- __ bind(&assign);
- EmitStoreToStackLocalOrContextSlot(var, location);
-
- } else if (var->mode() == CONST && op != Token::INIT) {
- // Assignment to const variable needs a write barrier.
+ } else if (IsLexicalVariableMode(var->mode()) && op != Token::INIT) {
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label const_error;
MemOperand location = VarOperand(var, ecx);
- __ mov(edx, location);
- __ cmp(edx, isolate()->factory()->the_hole_value());
- __ j(not_equal, &const_error, Label::kNear);
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError);
- __ bind(&const_error);
- __ CallRuntime(Runtime::kThrowConstAssignError);
-
+ // Perform an initialization check for lexically declared variables.
+ if (var->binding_needs_init()) {
+ Label assign;
+ __ mov(edx, location);
+ __ cmp(edx, isolate()->factory()->the_hole_value());
+ __ j(not_equal, &assign, Label::kNear);
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ __ bind(&assign);
+ }
+ if (var->mode() == CONST) {
+ __ CallRuntime(Runtime::kThrowConstAssignError);
+ } else {
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ }
} else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2232,8 +2091,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() ||
- (var->mode() == CONST && op == Token::INIT)) {
+ } else if (!var->is_const_mode() || op == Token::INIT) {
if (var->IsLookupSlot()) {
// Assignment to var.
__ Push(Immediate(var->name()));
@@ -2255,25 +2113,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
- // Const initializers need a write barrier.
- DCHECK(!var->IsParameter()); // No const parameters.
- if (var->IsLookupSlot()) {
- __ push(eax);
- __ push(esi);
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
- } else {
- DCHECK(var->IsStackLocal() || var->IsContextSlot());
- Label skip;
- MemOperand location = VarOperand(var, ecx);
- __ mov(edx, location);
- __ cmp(edx, isolate()->factory()->the_hole_value());
- __ j(not_equal, &skip, Label::kNear);
- EmitStoreToStackLocalOrContextSlot(var, location);
- __ bind(&skip);
- }
-
} else {
DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
if (is_strict(language_mode())) {
@@ -2296,7 +2135,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
PopOperand(StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(expr->AssignmentSlot());
CallStoreIC();
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(eax);
}
@@ -2342,44 +2181,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
EmitLoadStoreICSlot(expr->AssignmentSlot());
CallIC(ic);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
- Comment cmnt(masm_, "[ Property");
- SetExpressionPosition(expr);
-
- Expression* key = expr->key();
-
- if (key->IsPropertyName()) {
- if (!expr->IsSuperAccess()) {
- VisitForAccumulatorValue(expr->obj());
- __ Move(LoadDescriptor::ReceiverRegister(), result_register());
- EmitNamedPropertyLoad(expr);
- } else {
- VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- expr->obj()->AsSuperPropertyReference()->home_object());
- EmitNamedSuperPropertyLoad(expr);
- }
- } else {
- if (!expr->IsSuperAccess()) {
- VisitForStackValue(expr->obj());
- VisitForAccumulatorValue(expr->key());
- PopOperand(LoadDescriptor::ReceiverRegister()); // Object.
- __ Move(LoadDescriptor::NameRegister(), result_register()); // Key.
- EmitKeyedPropertyLoad(expr);
- } else {
- VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- expr->obj()->AsSuperPropertyReference()->home_object());
- VisitForStackValue(expr->key());
- EmitKeyedSuperPropertyLoad(expr);
- }
- }
- PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(eax);
}
@@ -2400,7 +2202,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
if (callee->IsVariableProxy()) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
- PrepareForBailout(callee, NO_REGISTERS);
+ PrepareForBailout(callee, BailoutState::NO_REGISTERS);
}
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
@@ -2412,7 +2214,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
DCHECK(!callee->AsProperty()->IsSuperAccess());
__ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
EmitNamedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+ BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
PushOperand(Operand(esp, 0));
__ mov(Operand(esp, kPointerSize), eax);
@@ -2447,6 +2250,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
CallRuntimeWithOperands(Runtime::kLoadFromSuper);
+ PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
// Replace home_object with target function.
__ mov(Operand(esp, kPointerSize), eax);
@@ -2471,7 +2275,8 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
__ mov(LoadDescriptor::NameRegister(), eax);
EmitKeyedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+ BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
PushOperand(Operand(esp, 0));
@@ -2503,6 +2308,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
+ PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
// Replace home_object with target function.
__ mov(Operand(esp, kPointerSize), eax);
@@ -2522,7 +2328,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
VisitForStackValue(args->at(i));
}
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
SetCallPosition(expr, expr->tail_call_mode());
if (expr->tail_call_mode() == TailCallMode::kAllow) {
if (FLAG_trace) {
@@ -2543,15 +2349,12 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
-
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-
+ RestoreContext();
context()->DropAndPlug(1, eax);
}
-
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
+ int arg_count = expr->arguments()->length();
// Push copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
__ push(Operand(esp, arg_count * kPointerSize));
@@ -2568,6 +2371,9 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Push the start position of the scope the calls resides in.
__ push(Immediate(Smi::FromInt(scope()->start_position())));
+ // Push the source position of the eval call.
+ __ push(Immediate(Smi::FromInt(expr->position())));
+
// Do the runtime call.
__ CallRuntime(Runtime::kResolvePossiblyDirectEval);
}
@@ -2590,7 +2396,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
__ CallRuntime(Runtime::kLoadLookupSlotForCall);
PushOperand(eax); // Function.
PushOperand(edx); // Receiver.
- PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
// If fast case code has been generated, emit code to push the function
// and receiver and have the slow path jump around this code.
@@ -2614,7 +2420,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
- // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // In a call to eval, we first call Runtime_ResolvePossiblyDirectEval
// to resolve the function we need to call. Then we call the resolved
// function using the given arguments.
ZoneList<Expression*>* args = expr->arguments();
@@ -2630,12 +2436,12 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
// Push a copy of the function (found below the arguments) and
// resolve eval.
__ push(Operand(esp, (arg_count + 1) * kPointerSize));
- EmitResolvePossiblyDirectEval(arg_count);
+ EmitResolvePossiblyDirectEval(expr);
// Touch up the stack with the resolved function.
__ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax);
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
SetCallPosition(expr);
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
@@ -2645,8 +2451,7 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
context()->DropAndPlug(1, eax);
}
@@ -2685,9 +2490,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
CallConstructStub stub(isolate());
__ call(stub.GetCode(), RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
- PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
+ RestoreContext();
context()->Plug(eax);
}
@@ -2728,9 +2532,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
-
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
context()->Plug(eax);
}
@@ -2916,94 +2718,6 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Label done;
- // If the object is a smi return the object.
- __ JumpIfSmi(eax, &done, Label::kNear);
- // If the object is not a value type, return the object.
- __ CmpObjectType(eax, JS_VALUE_TYPE, ebx);
- __ j(not_equal, &done, Label::kNear);
- __ mov(eax, FieldOperand(eax, JSValue::kValueOffset));
-
- __ bind(&done);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(3, args->length());
-
- Register string = eax;
- Register index = ebx;
- Register value = ecx;
-
- VisitForStackValue(args->at(0)); // index
- VisitForStackValue(args->at(1)); // value
- VisitForAccumulatorValue(args->at(2)); // string
-
- PopOperand(value);
- PopOperand(index);
-
- if (FLAG_debug_code) {
- __ test(value, Immediate(kSmiTagMask));
- __ Check(zero, kNonSmiValue);
- __ test(index, Immediate(kSmiTagMask));
- __ Check(zero, kNonSmiValue);
- }
-
- __ SmiUntag(value);
- __ SmiUntag(index);
-
- if (FLAG_debug_code) {
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
- }
-
- __ mov_b(FieldOperand(string, index, times_1, SeqOneByteString::kHeaderSize),
- value);
- context()->Plug(string);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(3, args->length());
-
- Register string = eax;
- Register index = ebx;
- Register value = ecx;
-
- VisitForStackValue(args->at(0)); // index
- VisitForStackValue(args->at(1)); // value
- VisitForAccumulatorValue(args->at(2)); // string
- PopOperand(value);
- PopOperand(index);
-
- if (FLAG_debug_code) {
- __ test(value, Immediate(kSmiTagMask));
- __ Check(zero, kNonSmiValue);
- __ test(index, Immediate(kSmiTagMask));
- __ Check(zero, kNonSmiValue);
- __ SmiUntag(index);
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
- __ SmiTag(index);
- }
-
- __ SmiUntag(value);
- // No need to untag a smi for two-byte addressing.
- __ mov_w(FieldOperand(string, index, times_1, SeqTwoByteString::kHeaderSize),
- value);
- context()->Plug(string);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3039,13 +2753,8 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
Label need_conversion;
Label index_out_of_range;
Label done;
- StringCharCodeAtGenerator generator(object,
- index,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
+ StringCharCodeAtGenerator generator(object, index, result, &need_conversion,
+ &need_conversion, &index_out_of_range);
generator.GenerateFast(masm_);
__ jmp(&done);
@@ -3069,54 +2778,6 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = ebx;
- Register index = eax;
- Register scratch = edx;
- Register result = eax;
-
- PopOperand(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharAtGenerator generator(object,
- index,
- scratch,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ Move(result, Immediate(isolate()->factory()->empty_string()));
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ Move(result, Immediate(Smi::FromInt(0)));
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
-
void FullCodeGenerator::EmitCall(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_LE(2, args->length());
@@ -3124,7 +2785,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
// Move target to edi.
int const argc = args->length() - 2;
__ mov(edi, Operand(esp, (argc + 1) * kPointerSize));
@@ -3132,8 +2793,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
__ mov(eax, Immediate(argc));
__ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(argc + 1);
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
// Discard the function left on TOS.
context()->DropAndPlug(1, eax);
}
@@ -3187,13 +2847,6 @@ void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
context()->Plug(eax);
}
-void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
- DCHECK_EQ(0, expr->arguments()->length());
- __ mov(eax, NativeContextOperand());
- __ mov(eax, ContextOperand(eax, Context::ORDINARY_HAS_INSTANCE_INDEX));
- context()->Plug(eax);
-}
-
void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
ExternalReference debug_is_active =
@@ -3212,7 +2865,8 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
Label runtime, done;
- __ Allocate(JSIteratorResult::kSize, eax, ecx, edx, &runtime, TAG_OBJECT);
+ __ Allocate(JSIteratorResult::kSize, eax, ecx, edx, &runtime,
+ NO_ALLOCATION_FLAGS);
__ mov(ebx, NativeContextOperand());
__ mov(ebx, ContextOperand(ebx, Context::ITERATOR_RESULT_MAP_INDEX));
__ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
@@ -3253,9 +2907,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
-
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
}
@@ -3277,7 +2929,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode but
// "delete this" is allowed.
- bool is_this = var->HasThisName(isolate());
+ bool is_this = var->is_this();
DCHECK(is_sloppy(language_mode()) || is_this);
if (var->IsUnallocatedOrGlobalSlot()) {
__ mov(eax, NativeContextOperand());
@@ -3340,7 +2992,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
&materialize_true);
if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
__ bind(&materialize_true);
- PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->MaterializeTrueId(),
+ BailoutState::NO_REGISTERS);
if (context()->IsAccumulatorValue()) {
__ mov(eax, isolate()->factory()->true_value());
} else {
@@ -3348,7 +3001,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
}
__ jmp(&done, Label::kNear);
__ bind(&materialize_false);
- PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->MaterializeFalseId(),
+ BailoutState::NO_REGISTERS);
if (context()->IsAccumulatorValue()) {
__ mov(eax, isolate()->factory()->false_value());
} else {
@@ -3447,9 +3101,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// We need a second deoptimization point after loading the value
// in case evaluating the property load my have a side effect.
if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), TOS_REG);
+ PrepareForBailout(expr->expression(), BailoutState::TOS_REGISTER);
} else {
- PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+ PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
}
// Inline smi case if we are in a loop.
@@ -3502,9 +3156,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
// Convert old value into a number.
- ToNumberStub convert_stub(isolate());
- __ CallStub(&convert_stub);
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ __ Call(isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+ RestoreContext();
+ PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -3552,7 +3206,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
{ EffectContext context(this);
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN, expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(),
+ BailoutState::TOS_REGISTER);
context.Plug(eax);
}
// For all contexts except EffectContext We have the result on
@@ -3564,7 +3219,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Perform the assignment as if via '='.
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN, expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(),
+ BailoutState::TOS_REGISTER);
context()->Plug(eax);
}
break;
@@ -3574,7 +3230,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
PopOperand(StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(expr->CountSlot());
CallStoreIC();
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3586,6 +3242,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case NAMED_SUPER_PROPERTY: {
EmitNamedSuperPropertyStore(prop);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3597,6 +3254,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case KEYED_SUPER_PROPERTY: {
EmitKeyedSuperPropertyStore(prop);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3613,7 +3271,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
EmitLoadStoreICSlot(expr->CountSlot());
CallIC(ic);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
// Result is on the stack
if (!context()->IsEffect()) {
@@ -3709,7 +3367,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
- SetExpressionPosition(expr);
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
@@ -3729,7 +3386,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- CallRuntimeWithOperands(Runtime::kHasProperty);
+ SetExpressionPosition(expr);
+ EmitHasProperty();
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ cmp(eax, isolate()->factory()->true_value());
Split(equal, if_true, if_false, fall_through);
@@ -3737,6 +3395,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::INSTANCEOF: {
VisitForAccumulatorValue(expr->right());
+ SetExpressionPosition(expr);
PopOperand(edx);
InstanceOfStub stub(isolate());
__ CallStub(&stub);
@@ -3748,6 +3407,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
+ SetExpressionPosition(expr);
Condition cc = CompareIC::ComputeCondition(op);
PopOperand(edx);
@@ -3835,7 +3495,7 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- Scope* closure_scope = scope()->ClosureScope();
+ DeclarationScope* closure_scope = scope()->GetClosureScope();
if (closure_scope->is_script_scope() ||
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
diff --git a/deps/v8/src/full-codegen/mips/full-codegen-mips.cc b/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
index f329a23d00..917474ae88 100644
--- a/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
+++ b/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
@@ -186,22 +186,19 @@ void FullCodeGenerator::Generate() {
__ push(a1);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext);
- PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
+ PrepareForBailoutForId(BailoutId::ScriptContext(),
+ BailoutState::TOS_REGISTER);
// The new target value is not used, clobbering is safe.
DCHECK_NULL(info->scope()->new_target_var());
} else {
if (info->scope()->new_target_var() != nullptr) {
__ push(a3); // Preserve new target.
}
- if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
- __ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
- need_write_barrier = false;
- } else {
- __ push(a1);
- __ CallRuntime(Runtime::kNewFunctionContext);
- }
+ FastNewFunctionContextStub stub(isolate());
+ __ li(FastNewFunctionContextDescriptor::SlotsRegister(), Operand(slots));
+ __ CallStub(&stub);
+ // Result of FastNewFunctionContextStub is always in new space.
+ need_write_barrier = false;
if (info->scope()->new_target_var() != nullptr) {
__ pop(a3); // Restore new target.
}
@@ -215,7 +212,8 @@ void FullCodeGenerator::Generate() {
int num_parameters = info->scope()->num_parameters();
int first_parameter = info->scope()->has_this_declaration() ? -1 : 0;
for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+ Variable* var =
+ (i == -1) ? info->scope()->receiver() : info->scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -242,11 +240,12 @@ void FullCodeGenerator::Generate() {
// Register holding this function and new target are both trashed in case we
// bailout here. But since that can happen only when new target is not used
// and we allocate a context, the value of |function_in_register| is correct.
- PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::FunctionContext(),
+ BailoutState::NO_REGISTERS);
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
- Variable* this_function_var = scope()->this_function_var();
+ Variable* this_function_var = info->scope()->this_function_var();
if (this_function_var != nullptr) {
Comment cmnt(masm_, "[ This function");
if (!function_in_register_a1) {
@@ -257,7 +256,7 @@ void FullCodeGenerator::Generate() {
}
// Possibly set up a local binding to the new target value.
- Variable* new_target_var = scope()->new_target_var();
+ Variable* new_target_var = info->scope()->new_target_var();
if (new_target_var != nullptr) {
Comment cmnt(masm_, "[ new.target");
SetVar(new_target_var, a3, a0, a2);
@@ -265,7 +264,7 @@ void FullCodeGenerator::Generate() {
// Possibly allocate RestParameters
int rest_index;
- Variable* rest_param = scope()->rest_parameter(&rest_index);
+ Variable* rest_param = info->scope()->rest_parameter(&rest_index);
if (rest_param) {
Comment cmnt(masm_, "[ Allocate rest parameter array");
if (!function_in_register_a1) {
@@ -277,7 +276,7 @@ void FullCodeGenerator::Generate() {
SetVar(rest_param, v0, a1, a2);
}
- Variable* arguments = scope()->arguments();
+ Variable* arguments = info->scope()->arguments();
if (arguments != NULL) {
// Function uses arguments object.
Comment cmnt(masm_, "[ Allocate arguments object");
@@ -305,7 +304,8 @@ void FullCodeGenerator::Generate() {
// Visit the declarations and body unless there is an illegal
// redeclaration.
- PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::FunctionEntry(),
+ BailoutState::NO_REGISTERS);
{
Comment cmnt(masm_, "[ Declarations");
VisitDeclarations(scope()->declarations());
@@ -318,7 +318,8 @@ void FullCodeGenerator::Generate() {
{
Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::Declarations(),
+ BailoutState::NO_REGISTERS);
Label ok;
__ LoadRoot(at, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(at));
@@ -397,11 +398,11 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
EmitProfilingCounterReset();
__ bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR
// entry becomes the target of a bailout. We don't expect it to be, but
// we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->OsrEntryId(), BailoutState::NO_REGISTERS);
}
void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
@@ -446,19 +447,20 @@ void FullCodeGenerator::EmitReturnSequence() {
// Make sure that the constant pool is not emitted inside of the return
// sequence.
{ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- // Here we use masm_-> instead of the __ macro to avoid the code coverage
- // tool from instrumenting as we rely on the code size here.
int32_t arg_count = info_->scope()->num_parameters() + 1;
int32_t sp_delta = arg_count * kPointerSize;
SetReturnPosition(literal());
- masm_->mov(sp, fp);
- masm_->MultiPop(static_cast<RegList>(fp.bit() | ra.bit()));
- masm_->Addu(sp, sp, Operand(sp_delta));
- masm_->Jump(ra);
+ __ mov(sp, fp);
+ __ MultiPop(static_cast<RegList>(fp.bit() | ra.bit()));
+ __ Addu(sp, sp, Operand(sp_delta));
+ __ Jump(ra);
}
}
}
+void FullCodeGenerator::RestoreContext() {
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -524,10 +526,12 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
true,
true_label_,
false_label_);
- DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
- if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+ DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
+ !lit->IsUndetectable());
+ if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
+ lit->IsFalse(isolate())) {
if (false_label_ != fall_through_) __ Branch(false_label_);
- } else if (lit->IsTrue() || lit->IsJSObject()) {
+ } else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
if (true_label_ != fall_through_) __ Branch(true_label_);
} else if (lit->IsString()) {
if (String::cast(*lit)->length() == 0) {
@@ -724,10 +728,10 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
Label skip;
if (should_normalize) __ Branch(&skip);
- PrepareForBailout(expr, TOS_REG);
+ PrepareForBailout(expr, BailoutState::TOS_REGISTER);
if (should_normalize) {
__ LoadRoot(t0, Heap::kTrueValueRootIndex);
- Split(eq, a0, Operand(t0), if_true, if_false, NULL);
+ Split(eq, v0, Operand(t0), if_true, if_false, NULL);
__ bind(&skip);
}
}
@@ -752,26 +756,21 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
void FullCodeGenerator::VisitVariableDeclaration(
VariableDeclaration* declaration) {
- // If it was not possible to allocate the variable at compile time, we
- // need to "declare" it at runtime to make sure it actually exists in the
- // local context.
VariableProxy* proxy = declaration->proxy();
- VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED:
- globals_->Add(variable->name(), zone());
- globals_->Add(variable->binding_needs_init()
- ? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value(),
- zone());
+ case VariableLocation::UNALLOCATED: {
+ DCHECK(!variable->binding_needs_init());
+ FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+ globals_->Add(isolate()->factory()->undefined_value(), zone());
break;
-
+ }
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL:
- if (hole_init) {
+ if (variable->binding_needs_init()) {
Comment cmnt(masm_, "[ VariableDeclaration");
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
__ sw(t0, StackOperand(variable));
@@ -779,36 +778,29 @@ void FullCodeGenerator::VisitVariableDeclaration(
break;
case VariableLocation::CONTEXT:
- if (hole_init) {
+ if (variable->binding_needs_init()) {
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ sw(at, ContextMemOperand(cp, variable->index()));
// No write barrier since the_hole_value is in old space.
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
}
break;
case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ VariableDeclaration");
+ DCHECK_EQ(VAR, variable->mode());
+ DCHECK(!variable->binding_needs_init());
__ li(a2, Operand(variable->name()));
- // Declaration nodes are always introduced in one of four modes.
- DCHECK(IsDeclaredVariableMode(mode));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (hole_init) {
- __ LoadRoot(a0, Heap::kTheHoleValueRootIndex);
- } else {
- DCHECK(Smi::FromInt(0) == 0);
- __ mov(a0, zero_reg); // Smi::FromInt(0) indicates no initial value.
- }
- __ Push(a2, a0);
- __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- __ CallRuntime(Runtime::kDeclareLookupSlot);
+ __ Push(a2);
+ __ CallRuntime(Runtime::kDeclareEvalVar);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
+
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
}
@@ -820,7 +812,9 @@ void FullCodeGenerator::VisitFunctionDeclaration(
switch (variable->location()) {
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
- globals_->Add(variable->name(), zone());
+ FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
Handle<SharedFunctionInfo> function =
Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
// Check for stack-overflow exception.
@@ -852,7 +846,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
@@ -862,10 +856,13 @@ void FullCodeGenerator::VisitFunctionDeclaration(
PushOperand(a2);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
+ CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
+
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
}
@@ -874,20 +871,13 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ li(a1, Operand(pairs));
__ li(a0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
- __ Push(a1, a0);
+ __ EmitLoadTypeFeedbackVector(a2);
+ __ Push(a1, a0, a2);
__ CallRuntime(Runtime::kDeclareGlobals);
// Return value is ignored.
}
-void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
- // Call the runtime to declare the modules.
- __ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules);
- // Return value is ignored.
-}
-
-
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
@@ -895,7 +885,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Keep the switch value on the stack until a case matches.
VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
ZoneList<CaseClause*>* clauses = stmt->cases();
CaseClause* default_clause = NULL; // Can occur anywhere in the list.
@@ -945,7 +935,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Label skip;
__ Branch(&skip);
- PrepareForBailout(clause, TOS_REG);
+ PrepareForBailout(clause, BailoutState::TOS_REGISTER);
__ LoadRoot(at, Heap::kTrueValueRootIndex);
__ Branch(&next_test, ne, v0, Operand(at));
__ Drop(1);
@@ -972,12 +962,12 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ Case body");
CaseClause* clause = clauses->at(i);
__ bind(clause->body_target());
- PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(clause->EntryId(), BailoutState::NO_REGISTERS);
VisitStatements(clause->statements());
}
__ bind(nested_statement.break_label());
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
}
@@ -1011,17 +1001,16 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&convert);
ToObjectStub stub(isolate());
__ CallStub(&stub);
+ RestoreContext();
__ mov(a0, v0);
__ bind(&done_convert);
- PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
+ PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
__ push(a0);
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- // Note: Proxies never have an enum cache, so will always take the
- // slow path.
+ // Check cache validity in generated code. If we cannot guarantee cache
+ // validity, call the runtime system to check cache validity or get the
+ // property names in a fixed array. Note: Proxies never have an enum cache,
+ // so will always take the slow path.
Label call_runtime;
__ CheckEnumCache(&call_runtime);
@@ -1035,7 +1024,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&call_runtime);
__ push(a0); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kForInEnumerate);
- PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
+ PrepareForBailoutForId(stmt->EnumId(), BailoutState::TOS_REGISTER);
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
@@ -1073,7 +1062,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(a1, v0); // Smi and array
__ lw(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
__ Push(a1); // Fixed array length (as smi).
- PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
__ li(a0, Operand(Smi::FromInt(0)));
__ Push(a0); // Initial index.
@@ -1086,11 +1075,11 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ lw(a1, MemOperand(sp, 1 * kPointerSize));
__ Branch(loop_statement.break_label(), hs, a0, Operand(a1));
- // Get the current entry of the array into register a3.
+ // Get the current entry of the array into result_register.
__ lw(a2, MemOperand(sp, 2 * kPointerSize));
__ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ Lsa(t0, a2, a0, kPointerSizeLog2 - kSmiTagSize);
- __ lw(a3, MemOperand(t0)); // Current entry.
+ __ lw(result_register(), MemOperand(t0)); // Current entry.
// Get the expected map from the stack or a smi in the
// permanent slow case into register a2.
@@ -1105,32 +1094,33 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We need to filter the key, record slow-path here.
int const vector_index = SmiFromSlot(slot)->value();
- __ EmitLoadTypeFeedbackVector(a0);
+ __ EmitLoadTypeFeedbackVector(a3);
__ li(a2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
- __ sw(a2, FieldMemOperand(a0, FixedArray::OffsetOfElementAt(vector_index)));
-
- // Convert the entry to a string or (smi) 0 if it isn't a property
- // any more. If the property has been removed while iterating, we
- // just skip it.
- __ Push(a1, a3); // Enumerable and current entry.
- __ CallRuntime(Runtime::kForInFilter);
- PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
- __ mov(a3, result_register());
+ __ sw(a2, FieldMemOperand(a3, FixedArray::OffsetOfElementAt(vector_index)));
+
+ __ mov(a0, result_register());
+ // a0 contains the key. The receiver in a1 is the second argument to the
+ // ForInFilterStub. ForInFilter returns undefined if the receiver doesn't
+ // have the key or returns the name-converted key.
+ ForInFilterStub filter_stub(isolate());
+ __ CallStub(&filter_stub);
+ RestoreContext();
+ PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(loop_statement.continue_label(), eq, a3, Operand(at));
+ __ Branch(loop_statement.continue_label(), eq, result_register(),
+ Operand(at));
// Update the 'each' property or variable from the possibly filtered
- // entry in register a3.
+ // entry in the result_register.
__ bind(&update_each);
- __ mov(result_register(), a3);
// Perform the assignment as if via '='.
{ EffectContext context(this);
EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
- PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->AssignmentId(), BailoutState::NO_REGISTERS);
}
// Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
@@ -1149,7 +1139,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
DropOperands(5);
// Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
__ bind(&exit);
decrement_loop_depth();
}
@@ -1189,42 +1179,19 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
Register next = a1;
Register temp = a2;
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_sloppy_eval()) {
- // Check that extension is "the hole".
- __ lw(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
- __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
- }
- // Load next context in chain.
- __ lw(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering cp.
- current = next;
+ int to_check = scope()->ContextChainLengthUntilOutermostSloppyEval();
+ for (Scope* s = scope(); to_check > 0; s = s->outer_scope()) {
+ if (!s->NeedsContext()) continue;
+ if (s->calls_sloppy_eval()) {
+ // Check that extension is "the hole".
+ __ lw(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
}
- // If no outer scope calls eval, we do not need to check more
- // context extensions.
- if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s->is_eval_scope()) {
- Label loop, fast;
- if (!current.is(next)) {
- __ Move(next, current);
- }
- __ bind(&loop);
- // Terminate at native context.
- __ lw(temp, FieldMemOperand(next, HeapObject::kMapOffset));
- __ LoadRoot(t0, Heap::kNativeContextMapRootIndex);
- __ Branch(&fast, eq, temp, Operand(t0));
- // Check that extension is "the hole".
- __ lw(temp, ContextMemOperand(next, Context::EXTENSION_INDEX));
- __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
// Load next context in chain.
- __ lw(next, ContextMemOperand(next, Context::PREVIOUS_INDEX));
- __ Branch(&loop);
- __ bind(&fast);
+ __ lw(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering cp.
+ current = next;
+ to_check--;
}
// All extension objects were empty and it is safe to use a normal global
@@ -1278,35 +1245,30 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ lw(v0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET || local->mode() == CONST ||
- local->mode() == CONST_LEGACY) {
+ if (local->binding_needs_init()) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ subu(at, v0, at); // Sub as compare: at == 0 on eq.
- if (local->mode() == CONST_LEGACY) {
- __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
- __ Movz(v0, a0, at); // Conditional move: return Undefined if TheHole.
- } else { // LET || CONST
- __ Branch(done, ne, at, Operand(zero_reg));
- __ li(a0, Operand(var->name()));
- __ push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError);
- }
+ __ Branch(done, ne, at, Operand(zero_reg));
+ __ li(a0, Operand(var->name()));
+ __ push(a0);
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ } else {
+ __ Branch(done);
}
- __ Branch(done);
}
}
void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
+#ifdef DEBUG
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- __ LoadGlobalObject(LoadDescriptor::ReceiverRegister());
- __ li(LoadDescriptor::NameRegister(), Operand(var->name()));
- __ li(LoadDescriptor::SlotRegister(),
+#endif
+ __ li(LoadGlobalDescriptor::SlotRegister(),
Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- CallLoadIC(typeof_mode);
+ CallLoadGlobalIC(typeof_mode);
}
@@ -1314,7 +1276,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
// Record position before possible IC call.
SetExpressionPosition(proxy);
- PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
+ PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
Variable* var = proxy->var();
// Three cases: global variables, lookup variables, and all other types of
@@ -1335,25 +1297,17 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
: "[ Stack variable");
if (NeedsHoleCheckForLoad(proxy)) {
- // Let and const need a read barrier.
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ Label done;
GetVar(v0, var);
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ subu(at, v0, at); // Sub as compare: at == 0 on eq.
- if (var->mode() == LET || var->mode() == CONST) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- Label done;
- __ Branch(&done, ne, at, Operand(zero_reg));
- __ li(a0, Operand(var->name()));
- __ push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError);
- __ bind(&done);
- } else {
- // Uninitialized legacy const bindings are unholed.
- DCHECK(var->mode() == CONST_LEGACY);
- __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
- __ Movz(v0, a0, at); // Conditional move: Undefined if TheHole.
- }
+ __ Branch(&done, ne, at, Operand(zero_reg));
+ __ li(a0, Operand(var->name()));
+ __ push(a0);
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ __ bind(&done);
context()->Plug(v0);
break;
}
@@ -1376,20 +1330,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
__ CallRuntime(function_id);
__ bind(&done);
context()->Plug(v0);
+ break;
}
- }
-}
-
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExpLiteral");
- __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
- __ li(a1, Operand(expr->pattern()));
- __ li(a0, Operand(Smi::FromInt(expr->flags())));
- FastCloneRegExpStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(v0);
+ case VariableLocation::MODULE:
+ UNREACHABLE();
+ }
}
@@ -1424,8 +1370,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
__ CallStub(&stub);
+ RestoreContext();
}
- PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+ PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
// If result_saved is true the result is on top of the stack. If
// result_saved is false the result is in v0.
@@ -1453,7 +1400,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::COMPUTED:
// It is safe to use [[Put]] here because the boilerplate already
// contains computed properties with an uninitialized value.
- if (key->value()->IsInternalizedString()) {
+ if (key->IsStringLiteral()) {
+ DCHECK(key->IsPropertyName());
if (property->emit_store()) {
VisitForAccumulatorValue(value);
__ mov(StoreDescriptor::ValueRegister(), result_register());
@@ -1462,7 +1410,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
EmitLoadStoreICSlot(property->GetSlot(0));
CallStoreIC();
- PrepareForBailoutForId(key->id(), NO_REGISTERS);
+ PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1496,16 +1444,20 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- NO_REGISTERS);
+ BailoutState::NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->getter = property;
+ AccessorTable::Iterator it = accessor_table.lookup(key);
+ it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->setter = property;
+ AccessorTable::Iterator it = accessor_table.lookup(key);
+ it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->setter = property;
}
break;
}
@@ -1524,6 +1476,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ li(a0, Operand(Smi::FromInt(NONE)));
PushOperand(a0);
CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
+ PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1553,7 +1506,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- NO_REGISTERS);
+ BailoutState::NO_REGISTERS);
} else {
EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
VisitForStackValue(value);
@@ -1569,6 +1522,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PushOperand(Smi::FromInt(NONE));
PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ BailoutState::NO_REGISTERS);
} else {
DropOperands(3);
}
@@ -1625,7 +1580,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
}
- PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+ PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
bool result_saved = false; // Is the result saved to the stack?
ZoneList<Expression*>* subexprs = expr->values();
@@ -1657,7 +1612,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
- PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+ PrepareForBailoutForId(expr->GetIdForElement(array_index),
+ BailoutState::NO_REGISTERS);
}
// In case the array literal contains spread expressions it has two parts. The
@@ -1677,7 +1633,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForStackValue(subexpr);
CallRuntimeWithOperands(Runtime::kAppendElement);
- PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+ PrepareForBailoutForId(expr->GetIdForElement(array_index),
+ BailoutState::NO_REGISTERS);
}
if (result_saved) {
@@ -1692,7 +1649,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
Comment cmnt(masm_, "[ Assignment");
- SetExpressionPosition(expr, INSERT_BREAK);
Property* property = expr->target()->AsProperty();
LhsKind assign_type = Property::GetAssignType(property);
@@ -1724,18 +1680,18 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
break;
case KEYED_SUPER_PROPERTY: {
- const Register scratch = a1;
VisitForStackValue(
property->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
+ VisitForStackValue(
property->obj()->AsSuperPropertyReference()->home_object());
- __ Move(scratch, result_register());
VisitForAccumulatorValue(property->key());
- PushOperands(scratch, result_register());
+ PushOperand(result_register());
if (expr->is_compound()) {
const Register scratch1 = t0;
+ const Register scratch2 = a1;
__ lw(scratch1, MemOperand(sp, 2 * kPointerSize));
- PushOperands(scratch1, scratch, result_register());
+ __ lw(scratch2, MemOperand(sp, 1 * kPointerSize));
+ PushOperands(scratch1, scratch2, result_register());
}
break;
}
@@ -1761,23 +1717,27 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy());
- PrepareForBailout(expr->target(), TOS_REG);
+ PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
case NAMED_SUPER_PROPERTY:
EmitNamedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
case KEYED_SUPER_PROPERTY:
EmitKeyedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
}
}
@@ -1797,7 +1757,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
// Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), TOS_REG);
+ PrepareForBailout(expr->binary_operation(), BailoutState::TOS_REGISTER);
} else {
VisitForAccumulatorValue(expr->value());
}
@@ -1809,7 +1769,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
expr->op(), expr->AssignmentSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(v0);
break;
case NAMED_PROPERTY:
@@ -1838,20 +1798,25 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// this. It stays on the stack while we update the iterator.
VisitForStackValue(expr->expression());
- Label suspend, continuation, post_runtime, resume;
+ Label suspend, continuation, post_runtime, resume, exception;
__ jmp(&suspend);
__ bind(&continuation);
- // When we arrive here, the stack top is the resume mode and
- // result_register() holds the input value (the argument given to the
- // respective resume operation).
+ // When we arrive here, v0 holds the generator object.
__ RecordGeneratorContinuation();
- __ pop(a1);
- __ Branch(&resume, ne, a1, Operand(Smi::FromInt(JSGeneratorObject::RETURN)));
- __ push(result_register());
+ __ lw(a1, FieldMemOperand(v0, JSGeneratorObject::kResumeModeOffset));
+ __ lw(v0, FieldMemOperand(v0, JSGeneratorObject::kInputOrDebugPosOffset));
+ __ Branch(&resume, eq, a1, Operand(Smi::FromInt(JSGeneratorObject::kNext)));
+ __ Push(result_register());
+ __ Branch(&exception, eq, a1,
+ Operand(Smi::FromInt(JSGeneratorObject::kThrow)));
EmitCreateIteratorResult(true);
EmitUnwindAndReturn();
+ __ bind(&exception);
+ __ CallRuntime(expr->rethrow_on_exception() ? Runtime::kReThrow
+ : Runtime::kThrow);
+
__ bind(&suspend);
OperandStackDepthIncrement(1); // Not popped on this path.
VisitForAccumulatorValue(expr->generator_object());
@@ -1866,7 +1831,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Branch(&post_runtime, eq, sp, Operand(a1));
__ push(v0); // generator object
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
__ bind(&post_runtime);
PopOperand(result_register());
EmitReturnSequence();
@@ -1875,103 +1840,6 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
context()->Plug(result_register());
}
-
-void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
- Expression *value,
- JSGeneratorObject::ResumeMode resume_mode) {
- // The value stays in a0, and is ultimately read by the resumed generator, as
- // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
- // is read to throw the value when the resumed generator is already closed.
- // a1 will hold the generator object until the activation has been resumed.
- VisitForStackValue(generator);
- VisitForAccumulatorValue(value);
- PopOperand(a1);
-
- // Store input value into generator object.
- __ sw(result_register(),
- FieldMemOperand(a1, JSGeneratorObject::kInputOffset));
- __ mov(a2, result_register());
- __ RecordWriteField(a1, JSGeneratorObject::kInputOffset, a2, a3,
- kRAHasBeenSaved, kDontSaveFPRegs);
-
- // Load suspended function and context.
- __ lw(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset));
- __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
-
- // Load receiver and store as the first argument.
- __ lw(a2, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
- __ push(a2);
-
- // Push holes for arguments to generator function. Since the parser forced
- // context allocation for any variables in generators, the actual argument
- // values have already been copied into the context and these dummy values
- // will never be used.
- __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a3,
- FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
- __ LoadRoot(a2, Heap::kTheHoleValueRootIndex);
- Label push_argument_holes, push_frame;
- __ bind(&push_argument_holes);
- __ Subu(a3, a3, Operand(Smi::FromInt(1)));
- __ Branch(&push_frame, lt, a3, Operand(zero_reg));
- __ push(a2);
- __ jmp(&push_argument_holes);
-
- // Enter a new JavaScript frame, and initialize its slots as they were when
- // the generator was suspended.
- Label resume_frame, done;
- __ bind(&push_frame);
- __ Call(&resume_frame);
- __ jmp(&done);
- __ bind(&resume_frame);
- // ra = return address.
- // fp = caller's frame pointer.
- // cp = callee's context,
- // t0 = callee's JS function.
- __ PushStandardFrame(t0);
-
- // Load the operand stack size.
- __ lw(a3, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
- __ lw(a3, FieldMemOperand(a3, FixedArray::kLengthOffset));
- __ SmiUntag(a3);
-
- // If we are sending a value and there is no operand stack, we can jump back
- // in directly.
- if (resume_mode == JSGeneratorObject::NEXT) {
- Label slow_resume;
- __ Branch(&slow_resume, ne, a3, Operand(zero_reg));
- __ lw(a3, FieldMemOperand(t0, JSFunction::kCodeEntryOffset));
- __ lw(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
- __ SmiUntag(a2);
- __ Addu(a3, a3, Operand(a2));
- __ li(a2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
- __ sw(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
- __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
- __ Jump(a3);
- __ bind(&slow_resume);
- }
-
- // Otherwise, we push holes for the operand stack and call the runtime to fix
- // up the stack and the handlers.
- Label push_operand_holes, call_resume;
- __ bind(&push_operand_holes);
- __ Subu(a3, a3, Operand(1));
- __ Branch(&call_resume, lt, a3, Operand(zero_reg));
- __ push(a2);
- __ Branch(&push_operand_holes);
- __ bind(&call_resume);
- __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
- DCHECK(!result_register().is(a1));
- __ Push(a1, result_register());
- __ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject);
- // Not reached: the runtime call returns elsewhere.
- __ stop("not-reached");
-
- __ bind(&done);
- context()->Plug(result_register());
-}
-
void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
OperandStackDepthIncrement(2);
__ Push(reg1, reg2);
@@ -2006,7 +1874,8 @@ void FullCodeGenerator::EmitOperandStackDepthCheck() {
void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label allocate, done_allocate;
- __ Allocate(JSIteratorResult::kSize, v0, a2, a3, &allocate, TAG_OBJECT);
+ __ Allocate(JSIteratorResult::kSize, v0, a2, a3, &allocate,
+ NO_ALLOCATION_FLAGS);
__ jmp(&done_allocate);
__ bind(&allocate);
@@ -2288,37 +2157,26 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->mode() == LET && op != Token::INIT) {
- // Non-initializing assignment to let variable needs a write barrier.
+ } else if (IsLexicalVariableMode(var->mode()) && op != Token::INIT) {
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label assign;
MemOperand location = VarOperand(var, a1);
- __ lw(a3, location);
- __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
- __ Branch(&assign, ne, a3, Operand(t0));
- __ li(a3, Operand(var->name()));
- __ push(a3);
- __ CallRuntime(Runtime::kThrowReferenceError);
- // Perform the assignment.
- __ bind(&assign);
- EmitStoreToStackLocalOrContextSlot(var, location);
-
- } else if (var->mode() == CONST && op != Token::INIT) {
- // Assignment to const variable needs a write barrier.
- DCHECK(!var->IsLookupSlot());
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label const_error;
- MemOperand location = VarOperand(var, a1);
- __ lw(a3, location);
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(&const_error, ne, a3, Operand(at));
- __ li(a3, Operand(var->name()));
- __ push(a3);
- __ CallRuntime(Runtime::kThrowReferenceError);
- __ bind(&const_error);
- __ CallRuntime(Runtime::kThrowConstAssignError);
-
+ // Perform an initialization check for lexically declared variables.
+ if (var->binding_needs_init()) {
+ Label assign;
+ __ lw(a3, location);
+ __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+ __ Branch(&assign, ne, a3, Operand(t0));
+ __ li(a3, Operand(var->name()));
+ __ push(a3);
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ __ bind(&assign);
+ }
+ if (var->mode() == CONST) {
+ __ CallRuntime(Runtime::kThrowConstAssignError);
+ } else {
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ }
} else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2333,8 +2191,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() ||
- (var->mode() == CONST && op == Token::INIT)) {
+ } else if (!var->is_const_mode() || op == Token::INIT) {
if (var->IsLookupSlot()) {
// Assignment to var.
__ Push(var->name());
@@ -2356,24 +2213,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
- // Const initializers need a write barrier.
- DCHECK(!var->IsParameter()); // No const parameters.
- if (var->IsLookupSlot()) {
- __ li(a0, Operand(var->name()));
- __ Push(v0, cp, a0); // Context and name.
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
- } else {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label skip;
- MemOperand location = VarOperand(var, a1);
- __ lw(a2, location);
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(&skip, ne, a2, Operand(at));
- EmitStoreToStackLocalOrContextSlot(var, location);
- __ bind(&skip);
- }
-
} else {
DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
if (is_strict(language_mode())) {
@@ -2397,7 +2236,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
EmitLoadStoreICSlot(expr->AssignmentSlot());
CallStoreIC();
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(v0);
}
@@ -2448,44 +2287,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
EmitLoadStoreICSlot(expr->AssignmentSlot());
CallIC(ic);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
- Comment cmnt(masm_, "[ Property");
- SetExpressionPosition(expr);
-
- Expression* key = expr->key();
-
- if (key->IsPropertyName()) {
- if (!expr->IsSuperAccess()) {
- VisitForAccumulatorValue(expr->obj());
- __ Move(LoadDescriptor::ReceiverRegister(), v0);
- EmitNamedPropertyLoad(expr);
- } else {
- VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- expr->obj()->AsSuperPropertyReference()->home_object());
- EmitNamedSuperPropertyLoad(expr);
- }
- } else {
- if (!expr->IsSuperAccess()) {
- VisitForStackValue(expr->obj());
- VisitForAccumulatorValue(expr->key());
- __ Move(LoadDescriptor::NameRegister(), v0);
- PopOperand(LoadDescriptor::ReceiverRegister());
- EmitKeyedPropertyLoad(expr);
- } else {
- VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- expr->obj()->AsSuperPropertyReference()->home_object());
- VisitForStackValue(expr->key());
- EmitKeyedSuperPropertyLoad(expr);
- }
- }
- PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(v0);
}
@@ -2506,7 +2308,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
if (callee->IsVariableProxy()) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
- PrepareForBailout(callee, NO_REGISTERS);
+ PrepareForBailout(callee, BailoutState::NO_REGISTERS);
}
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
@@ -2519,7 +2321,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
DCHECK(!callee->AsProperty()->IsSuperAccess());
__ lw(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
EmitNamedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+ BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
__ lw(at, MemOperand(sp, 0));
PushOperand(at);
@@ -2556,6 +2359,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
CallRuntimeWithOperands(Runtime::kLoadFromSuper);
+ PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
// Replace home_object with target function.
__ sw(v0, MemOperand(sp, kPointerSize));
@@ -2580,7 +2384,8 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ lw(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
__ Move(LoadDescriptor::NameRegister(), v0);
EmitKeyedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+ BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
__ lw(at, MemOperand(sp, 0));
@@ -2614,6 +2419,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
+ PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
// Replace home_object with target function.
__ sw(v0, MemOperand(sp, kPointerSize));
@@ -2633,7 +2439,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
VisitForStackValue(args->at(i));
}
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
// Record source position of the IC call.
SetCallPosition(expr, expr->tail_call_mode());
if (expr->tail_call_mode() == TailCallMode::kAllow) {
@@ -2655,31 +2461,33 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
- // Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
context()->DropAndPlug(1, v0);
}
-
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
- // t3: copy of the first argument or undefined if it doesn't exist.
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
+ int arg_count = expr->arguments()->length();
+ // t4: copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
- __ lw(t3, MemOperand(sp, arg_count * kPointerSize));
+ __ lw(t4, MemOperand(sp, arg_count * kPointerSize));
} else {
- __ LoadRoot(t3, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(t4, Heap::kUndefinedValueRootIndex);
}
- // t2: the receiver of the enclosing function.
- __ lw(t2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ // t3: the receiver of the enclosing function.
+ __ lw(t3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- // t1: the language mode.
- __ li(t1, Operand(Smi::FromInt(language_mode())));
+ // t2: the language mode.
+ __ li(t2, Operand(Smi::FromInt(language_mode())));
- // t0: the start position of the scope the calls resides in.
- __ li(t0, Operand(Smi::FromInt(scope()->start_position())));
+ // t1: the start position of the scope the calls resides in.
+ __ li(t1, Operand(Smi::FromInt(scope()->start_position())));
+
+ // t0: the source position of the eval call.
+ __ li(t0, Operand(Smi::FromInt(expr->position())));
// Do the runtime call.
- __ Push(t3, t2, t1, t0);
+ __ Push(t4, t3, t2, t1, t0);
__ CallRuntime(Runtime::kResolvePossiblyDirectEval);
}
@@ -2701,7 +2509,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
__ Push(callee->name());
__ CallRuntime(Runtime::kLoadLookupSlotForCall);
PushOperands(v0, v1); // Function, receiver.
- PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
// If fast case code has been generated, emit code to push the
// function and receiver and have the slow path jump around this
@@ -2728,7 +2536,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
- // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // In a call to eval, we first call Runtime_ResolvePossiblyDirectEval
// to resolve the function we need to call. Then we call the resolved
// function using the given arguments.
ZoneList<Expression*>* args = expr->arguments();
@@ -2744,12 +2552,12 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
// resolve eval.
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ push(a1);
- EmitResolvePossiblyDirectEval(arg_count);
+ EmitResolvePossiblyDirectEval(expr);
// Touch up the stack with the resolved function.
__ sw(v0, MemOperand(sp, (arg_count + 1) * kPointerSize));
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
// Record source position for debugger.
SetCallPosition(expr);
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
@@ -2759,8 +2567,7 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
- // Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
context()->DropAndPlug(1, v0);
}
@@ -2799,9 +2606,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
CallConstructStub stub(isolate());
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
- PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
- // Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
+ RestoreContext();
context()->Plug(v0);
}
@@ -2844,9 +2650,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
-
- // Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
context()->Plug(v0);
}
@@ -3034,100 +2838,6 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Label done;
- // If the object is a smi return the object.
- __ JumpIfSmi(v0, &done);
- // If the object is not a value type, return the object.
- __ GetObjectType(v0, a1, a1);
- __ Branch(&done, ne, a1, Operand(JS_VALUE_TYPE));
-
- __ lw(v0, FieldMemOperand(v0, JSValue::kValueOffset));
-
- __ bind(&done);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(3, args->length());
-
- Register string = v0;
- Register index = a1;
- Register value = a2;
-
- VisitForStackValue(args->at(0)); // index
- VisitForStackValue(args->at(1)); // value
- VisitForAccumulatorValue(args->at(2)); // string
- PopOperands(index, value);
-
- if (FLAG_debug_code) {
- __ SmiTst(value, at);
- __ Check(eq, kNonSmiValue, at, Operand(zero_reg));
- __ SmiTst(index, at);
- __ Check(eq, kNonSmiIndex, at, Operand(zero_reg));
- __ SmiUntag(index, index);
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- Register scratch = t5;
- __ EmitSeqStringSetCharCheck(
- string, index, value, scratch, one_byte_seq_type);
- __ SmiTag(index, index);
- }
-
- __ SmiUntag(value, value);
- __ Addu(at,
- string,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ SmiUntag(index);
- __ Addu(at, at, index);
- __ sb(value, MemOperand(at));
- context()->Plug(string);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(3, args->length());
-
- Register string = v0;
- Register index = a1;
- Register value = a2;
-
- VisitForStackValue(args->at(0)); // index
- VisitForStackValue(args->at(1)); // value
- VisitForAccumulatorValue(args->at(2)); // string
- PopOperands(index, value);
-
- if (FLAG_debug_code) {
- __ SmiTst(value, at);
- __ Check(eq, kNonSmiValue, at, Operand(zero_reg));
- __ SmiTst(index, at);
- __ Check(eq, kNonSmiIndex, at, Operand(zero_reg));
- __ SmiUntag(index, index);
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- Register scratch = t5;
- __ EmitSeqStringSetCharCheck(
- string, index, value, scratch, two_byte_seq_type);
- __ SmiTag(index, index);
- }
-
- __ SmiUntag(value, value);
- __ Addu(at,
- string,
- Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ Addu(at, at, index);
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ sh(value, MemOperand(at));
- context()->Plug(string);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3164,13 +2874,8 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
Label need_conversion;
Label index_out_of_range;
Label done;
- StringCharCodeAtGenerator generator(object,
- index,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
+ StringCharCodeAtGenerator generator(object, index, result, &need_conversion,
+ &need_conversion, &index_out_of_range);
generator.GenerateFast(masm_);
__ jmp(&done);
@@ -3194,55 +2899,6 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
- __ mov(a0, result_register());
-
- Register object = a1;
- Register index = a0;
- Register scratch = a3;
- Register result = v0;
-
- PopOperand(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharAtGenerator generator(object,
- index,
- scratch,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ LoadRoot(result, Heap::kempty_stringRootIndex);
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ li(result, Operand(Smi::FromInt(0)));
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
-
void FullCodeGenerator::EmitCall(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_LE(2, args->length());
@@ -3250,7 +2906,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
// Move target to a1.
int const argc = args->length() - 2;
__ lw(a1, MemOperand(sp, (argc + 1) * kPointerSize));
@@ -3258,8 +2914,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
__ li(a0, Operand(argc));
__ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(argc + 1);
- // Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
// Discard the function left on TOS.
context()->DropAndPlug(1, v0);
}
@@ -3310,12 +2965,6 @@ void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
context()->Plug(v0);
}
-void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
- DCHECK_EQ(0, expr->arguments()->length());
- __ LoadNativeContextSlot(Context::ORDINARY_HAS_INSTANCE_INDEX, v0);
- context()->Plug(v0);
-}
-
void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
ExternalReference debug_is_active =
@@ -3335,7 +2984,8 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
Label runtime, done;
- __ Allocate(JSIteratorResult::kSize, v0, a2, a3, &runtime, TAG_OBJECT);
+ __ Allocate(JSIteratorResult::kSize, v0, a2, a3, &runtime,
+ NO_ALLOCATION_FLAGS);
__ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, a1);
__ Pop(a2, a3);
__ LoadRoot(t0, Heap::kEmptyFixedArrayRootIndex);
@@ -3376,9 +3026,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
-
- // Restore context register.
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
}
@@ -3400,7 +3048,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode but
// "delete this" is allowed.
- bool is_this = var->HasThisName(isolate());
+ bool is_this = var->is_this();
DCHECK(is_sloppy(language_mode()) || is_this);
if (var->IsUnallocatedOrGlobalSlot()) {
__ LoadGlobalObject(a2);
@@ -3462,12 +3110,14 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
&materialize_true);
if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
__ bind(&materialize_true);
- PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->MaterializeTrueId(),
+ BailoutState::NO_REGISTERS);
__ LoadRoot(v0, Heap::kTrueValueRootIndex);
if (context()->IsStackValue()) __ push(v0);
__ jmp(&done);
__ bind(&materialize_false);
- PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->MaterializeFalseId(),
+ BailoutState::NO_REGISTERS);
__ LoadRoot(v0, Heap::kFalseValueRootIndex);
if (context()->IsStackValue()) __ push(v0);
__ bind(&done);
@@ -3526,25 +3176,23 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
prop->obj()->AsSuperPropertyReference()->home_object());
- PushOperand(result_register());
const Register scratch = a1;
- __ lw(scratch, MemOperand(sp, kPointerSize));
- PushOperands(scratch, result_register());
+ __ lw(scratch, MemOperand(sp, 0)); // this
+ PushOperands(result_register(), scratch, result_register());
EmitNamedSuperPropertyLoad(prop);
break;
}
case KEYED_SUPER_PROPERTY: {
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
+ VisitForStackValue(
prop->obj()->AsSuperPropertyReference()->home_object());
- const Register scratch = a1;
- const Register scratch1 = t0;
- __ Move(scratch, result_register());
VisitForAccumulatorValue(prop->key());
- PushOperands(scratch, result_register());
- __ lw(scratch1, MemOperand(sp, 2 * kPointerSize));
- PushOperands(scratch1, scratch, result_register());
+ const Register scratch1 = a1;
+ const Register scratch2 = t0;
+ __ lw(scratch1, MemOperand(sp, 1 * kPointerSize)); // this
+ __ lw(scratch2, MemOperand(sp, 0 * kPointerSize)); // home object
+ PushOperands(result_register(), scratch1, scratch2, result_register());
EmitKeyedSuperPropertyLoad(prop);
break;
}
@@ -3567,9 +3215,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// We need a second deoptimization point after loading the value
// in case evaluating the property load my have a side effect.
if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), TOS_REG);
+ PrepareForBailout(expr->expression(), BailoutState::TOS_REGISTER);
} else {
- PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+ PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
}
// Inline smi case if we are in a loop.
@@ -3618,9 +3266,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
// Convert old value into a number.
- ToNumberStub convert_stub(isolate());
- __ CallStub(&convert_stub);
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ __ Call(isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+ RestoreContext();
+ PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -3666,7 +3314,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
{ EffectContext context(this);
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN, expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(),
+ BailoutState::TOS_REGISTER);
context.Plug(v0);
}
// For all contexts except EffectConstant we have the result on
@@ -3677,7 +3326,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN, expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(),
+ BailoutState::TOS_REGISTER);
context()->Plug(v0);
}
break;
@@ -3688,7 +3338,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
PopOperand(StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(expr->CountSlot());
CallStoreIC();
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3700,6 +3350,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case NAMED_SUPER_PROPERTY: {
EmitNamedSuperPropertyStore(prop);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3711,6 +3362,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case KEYED_SUPER_PROPERTY: {
EmitKeyedSuperPropertyStore(prop);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3728,7 +3380,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
EmitLoadStoreICSlot(expr->CountSlot());
CallIC(ic);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3825,7 +3477,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
- SetExpressionPosition(expr);
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
@@ -3845,7 +3496,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- CallRuntimeWithOperands(Runtime::kHasProperty);
+ SetExpressionPosition(expr);
+ EmitHasProperty();
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ LoadRoot(t0, Heap::kTrueValueRootIndex);
Split(eq, v0, Operand(t0), if_true, if_false, fall_through);
@@ -3853,6 +3505,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::INSTANCEOF: {
VisitForAccumulatorValue(expr->right());
+ SetExpressionPosition(expr);
__ mov(a0, result_register());
PopOperand(a1);
InstanceOfStub stub(isolate());
@@ -3865,6 +3518,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
+ SetExpressionPosition(expr);
Condition cc = CompareIC::ComputeCondition(op);
__ mov(a0, result_register());
PopOperand(a1);
@@ -3948,7 +3602,7 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- Scope* closure_scope = scope()->ClosureScope();
+ DeclarationScope* closure_scope = scope()->GetClosureScope();
if (closure_scope->is_script_scope() ||
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
diff --git a/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc b/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
index 681abd1230..0c09bdf176 100644
--- a/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
+++ b/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
@@ -185,22 +185,19 @@ void FullCodeGenerator::Generate() {
__ push(a1);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext);
- PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
+ PrepareForBailoutForId(BailoutId::ScriptContext(),
+ BailoutState::TOS_REGISTER);
// The new target value is not used, clobbering is safe.
DCHECK_NULL(info->scope()->new_target_var());
} else {
if (info->scope()->new_target_var() != nullptr) {
__ push(a3); // Preserve new target.
}
- if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
- __ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
- need_write_barrier = false;
- } else {
- __ push(a1);
- __ CallRuntime(Runtime::kNewFunctionContext);
- }
+ FastNewFunctionContextStub stub(isolate());
+ __ li(FastNewFunctionContextDescriptor::SlotsRegister(), Operand(slots));
+ __ CallStub(&stub);
+ // Result of FastNewFunctionContextStub is always in new space.
+ need_write_barrier = false;
if (info->scope()->new_target_var() != nullptr) {
__ pop(a3); // Restore new target.
}
@@ -214,7 +211,8 @@ void FullCodeGenerator::Generate() {
int num_parameters = info->scope()->num_parameters();
int first_parameter = info->scope()->has_this_declaration() ? -1 : 0;
for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+ Variable* var =
+ (i == -1) ? info->scope()->receiver() : info->scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -241,11 +239,12 @@ void FullCodeGenerator::Generate() {
// Register holding this function and new target are both trashed in case we
// bailout here. But since that can happen only when new target is not used
// and we allocate a context, the value of |function_in_register| is correct.
- PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::FunctionContext(),
+ BailoutState::NO_REGISTERS);
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
- Variable* this_function_var = scope()->this_function_var();
+ Variable* this_function_var = info->scope()->this_function_var();
if (this_function_var != nullptr) {
Comment cmnt(masm_, "[ This function");
if (!function_in_register_a1) {
@@ -255,7 +254,7 @@ void FullCodeGenerator::Generate() {
SetVar(this_function_var, a1, a0, a2);
}
- Variable* new_target_var = scope()->new_target_var();
+ Variable* new_target_var = info->scope()->new_target_var();
if (new_target_var != nullptr) {
Comment cmnt(masm_, "[ new.target");
SetVar(new_target_var, a3, a0, a2);
@@ -263,7 +262,7 @@ void FullCodeGenerator::Generate() {
// Possibly allocate RestParameters
int rest_index;
- Variable* rest_param = scope()->rest_parameter(&rest_index);
+ Variable* rest_param = info->scope()->rest_parameter(&rest_index);
if (rest_param) {
Comment cmnt(masm_, "[ Allocate rest parameter array");
if (!function_in_register_a1) {
@@ -275,7 +274,7 @@ void FullCodeGenerator::Generate() {
SetVar(rest_param, v0, a1, a2);
}
- Variable* arguments = scope()->arguments();
+ Variable* arguments = info->scope()->arguments();
if (arguments != NULL) {
// Function uses arguments object.
Comment cmnt(masm_, "[ Allocate arguments object");
@@ -302,7 +301,8 @@ void FullCodeGenerator::Generate() {
}
// Visit the declarations and body.
- PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::FunctionEntry(),
+ BailoutState::NO_REGISTERS);
{
Comment cmnt(masm_, "[ Declarations");
VisitDeclarations(scope()->declarations());
@@ -315,7 +315,8 @@ void FullCodeGenerator::Generate() {
{
Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::Declarations(),
+ BailoutState::NO_REGISTERS);
Label ok;
__ LoadRoot(at, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(at));
@@ -396,11 +397,11 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
EmitProfilingCounterReset();
__ bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR
// entry becomes the target of a bailout. We don't expect it to be, but
// we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->OsrEntryId(), BailoutState::NO_REGISTERS);
}
void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
@@ -445,19 +446,20 @@ void FullCodeGenerator::EmitReturnSequence() {
// Make sure that the constant pool is not emitted inside of the return
// sequence.
{ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- // Here we use masm_-> instead of the __ macro to avoid the code coverage
- // tool from instrumenting as we rely on the code size here.
int32_t arg_count = info_->scope()->num_parameters() + 1;
int32_t sp_delta = arg_count * kPointerSize;
SetReturnPosition(literal());
- masm_->mov(sp, fp);
- masm_->MultiPop(static_cast<RegList>(fp.bit() | ra.bit()));
- masm_->Daddu(sp, sp, Operand(sp_delta));
- masm_->Jump(ra);
+ __ mov(sp, fp);
+ __ MultiPop(static_cast<RegList>(fp.bit() | ra.bit()));
+ __ Daddu(sp, sp, Operand(sp_delta));
+ __ Jump(ra);
}
}
}
+void FullCodeGenerator::RestoreContext() {
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -523,10 +525,12 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
true,
true_label_,
false_label_);
- DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
- if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+ DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
+ !lit->IsUndetectable());
+ if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
+ lit->IsFalse(isolate())) {
if (false_label_ != fall_through_) __ Branch(false_label_);
- } else if (lit->IsTrue() || lit->IsJSObject()) {
+ } else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
if (true_label_ != fall_through_) __ Branch(true_label_);
} else if (lit->IsString()) {
if (String::cast(*lit)->length() == 0) {
@@ -723,10 +727,10 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
Label skip;
if (should_normalize) __ Branch(&skip);
- PrepareForBailout(expr, TOS_REG);
+ PrepareForBailout(expr, BailoutState::TOS_REGISTER);
if (should_normalize) {
__ LoadRoot(a4, Heap::kTrueValueRootIndex);
- Split(eq, a0, Operand(a4), if_true, if_false, NULL);
+ Split(eq, v0, Operand(a4), if_true, if_false, NULL);
__ bind(&skip);
}
}
@@ -751,26 +755,21 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
void FullCodeGenerator::VisitVariableDeclaration(
VariableDeclaration* declaration) {
- // If it was not possible to allocate the variable at compile time, we
- // need to "declare" it at runtime to make sure it actually exists in the
- // local context.
VariableProxy* proxy = declaration->proxy();
- VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED:
- globals_->Add(variable->name(), zone());
- globals_->Add(variable->binding_needs_init()
- ? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value(),
- zone());
+ case VariableLocation::UNALLOCATED: {
+ DCHECK(!variable->binding_needs_init());
+ FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+ globals_->Add(isolate()->factory()->undefined_value(), zone());
break;
-
+ }
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL:
- if (hole_init) {
+ if (variable->binding_needs_init()) {
Comment cmnt(masm_, "[ VariableDeclaration");
__ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
__ sd(a4, StackOperand(variable));
@@ -778,36 +777,29 @@ void FullCodeGenerator::VisitVariableDeclaration(
break;
case VariableLocation::CONTEXT:
- if (hole_init) {
+ if (variable->binding_needs_init()) {
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ sd(at, ContextMemOperand(cp, variable->index()));
// No write barrier since the_hole_value is in old space.
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
}
break;
case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ VariableDeclaration");
+ DCHECK_EQ(VAR, variable->mode());
+ DCHECK(!variable->binding_needs_init());
__ li(a2, Operand(variable->name()));
- // Declaration nodes are always introduced in one of four modes.
- DCHECK(IsDeclaredVariableMode(mode));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (hole_init) {
- __ LoadRoot(a0, Heap::kTheHoleValueRootIndex);
- } else {
- DCHECK(Smi::FromInt(0) == 0);
- __ mov(a0, zero_reg); // Smi::FromInt(0) indicates no initial value.
- }
- __ Push(a2, a0);
- __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- __ CallRuntime(Runtime::kDeclareLookupSlot);
+ __ Push(a2);
+ __ CallRuntime(Runtime::kDeclareEvalVar);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
+
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
}
@@ -819,7 +811,9 @@ void FullCodeGenerator::VisitFunctionDeclaration(
switch (variable->location()) {
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
- globals_->Add(variable->name(), zone());
+ FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
Handle<SharedFunctionInfo> function =
Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
// Check for stack-overflow exception.
@@ -851,7 +845,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
@@ -861,10 +855,13 @@ void FullCodeGenerator::VisitFunctionDeclaration(
PushOperand(a2);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
+ CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
+
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
}
@@ -873,20 +870,13 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ li(a1, Operand(pairs));
__ li(a0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
- __ Push(a1, a0);
+ __ EmitLoadTypeFeedbackVector(a2);
+ __ Push(a1, a0, a2);
__ CallRuntime(Runtime::kDeclareGlobals);
// Return value is ignored.
}
-void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
- // Call the runtime to declare the modules.
- __ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules);
- // Return value is ignored.
-}
-
-
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
@@ -894,7 +884,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Keep the switch value on the stack until a case matches.
VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
ZoneList<CaseClause*>* clauses = stmt->cases();
CaseClause* default_clause = NULL; // Can occur anywhere in the list.
@@ -944,7 +934,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Label skip;
__ Branch(&skip);
- PrepareForBailout(clause, TOS_REG);
+ PrepareForBailout(clause, BailoutState::TOS_REGISTER);
__ LoadRoot(at, Heap::kTrueValueRootIndex);
__ Branch(&next_test, ne, v0, Operand(at));
__ Drop(1);
@@ -971,12 +961,12 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ Case body");
CaseClause* clause = clauses->at(i);
__ bind(clause->body_target());
- PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(clause->EntryId(), BailoutState::NO_REGISTERS);
VisitStatements(clause->statements());
}
__ bind(nested_statement.break_label());
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
}
@@ -1011,17 +1001,16 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&convert);
ToObjectStub stub(isolate());
__ CallStub(&stub);
+ RestoreContext();
__ mov(a0, v0);
__ bind(&done_convert);
- PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
+ PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
__ push(a0);
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- // Note: Proxies never have an enum cache, so will always take the
- // slow path.
+ // Check cache validity in generated code. If we cannot guarantee cache
+ // validity, call the runtime system to check cache validity or get the
+ // property names in a fixed array. Note: Proxies never have an enum cache,
+ // so will always take the slow path.
Label call_runtime;
__ CheckEnumCache(&call_runtime);
@@ -1035,7 +1024,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&call_runtime);
__ push(a0); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kForInEnumerate);
- PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
+ PrepareForBailoutForId(stmt->EnumId(), BailoutState::TOS_REGISTER);
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
@@ -1073,7 +1062,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(a1, v0); // Smi and array
__ ld(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
__ Push(a1); // Fixed array length (as smi).
- PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
__ li(a0, Operand(Smi::FromInt(0)));
__ Push(a0); // Initial index.
@@ -1091,7 +1080,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ SmiScale(a4, a0, kPointerSizeLog2);
__ daddu(a4, a2, a4); // Array base + scaled (smi) index.
- __ ld(a3, MemOperand(a4)); // Current entry.
+ __ ld(result_register(), MemOperand(a4)); // Current entry.
// Get the expected map from the stack or a smi in the
// permanent slow case into register a2.
@@ -1106,32 +1095,33 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We need to filter the key, record slow-path here.
int const vector_index = SmiFromSlot(slot)->value();
- __ EmitLoadTypeFeedbackVector(a0);
+ __ EmitLoadTypeFeedbackVector(a3);
__ li(a2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
- __ sd(a2, FieldMemOperand(a0, FixedArray::OffsetOfElementAt(vector_index)));
-
- // Convert the entry to a string or (smi) 0 if it isn't a property
- // any more. If the property has been removed while iterating, we
- // just skip it.
- __ Push(a1, a3); // Enumerable and current entry.
- __ CallRuntime(Runtime::kForInFilter);
- PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
- __ mov(a3, result_register());
+ __ sd(a2, FieldMemOperand(a3, FixedArray::OffsetOfElementAt(vector_index)));
+
+ __ mov(a0, result_register());
+ // a0 contains the key. The receiver in a1 is the second argument to the
+ // ForInFilterStub. ForInFilter returns undefined if the receiver doesn't
+ // have the key or returns the name-converted key.
+ ForInFilterStub filter_stub(isolate());
+ __ CallStub(&filter_stub);
+ RestoreContext();
+ PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(loop_statement.continue_label(), eq, a3, Operand(at));
+ __ Branch(loop_statement.continue_label(), eq, result_register(),
+ Operand(at));
// Update the 'each' property or variable from the possibly filtered
- // entry in register a3.
+ // entry in the result_register.
__ bind(&update_each);
- __ mov(result_register(), a3);
// Perform the assignment as if via '='.
{ EffectContext context(this);
EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
- PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->AssignmentId(), BailoutState::NO_REGISTERS);
}
// Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
@@ -1150,7 +1140,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
DropOperands(5);
// Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
__ bind(&exit);
decrement_loop_depth();
}
@@ -1190,42 +1180,19 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
Register next = a1;
Register temp = a2;
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_sloppy_eval()) {
- // Check that extension is "the hole".
- __ ld(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
- __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
- }
- // Load next context in chain.
- __ ld(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering cp.
- current = next;
+ int to_check = scope()->ContextChainLengthUntilOutermostSloppyEval();
+ for (Scope* s = scope(); to_check > 0; s = s->outer_scope()) {
+ if (!s->NeedsContext()) continue;
+ if (s->calls_sloppy_eval()) {
+ // Check that extension is "the hole".
+ __ ld(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
}
- // If no outer scope calls eval, we do not need to check more
- // context extensions.
- if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s->is_eval_scope()) {
- Label loop, fast;
- if (!current.is(next)) {
- __ Move(next, current);
- }
- __ bind(&loop);
- // Terminate at native context.
- __ ld(temp, FieldMemOperand(next, HeapObject::kMapOffset));
- __ LoadRoot(a4, Heap::kNativeContextMapRootIndex);
- __ Branch(&fast, eq, temp, Operand(a4));
- // Check that extension is "the hole".
- __ ld(temp, ContextMemOperand(next, Context::EXTENSION_INDEX));
- __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
// Load next context in chain.
- __ ld(next, ContextMemOperand(next, Context::PREVIOUS_INDEX));
- __ Branch(&loop);
- __ bind(&fast);
+ __ ld(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering cp.
+ current = next;
+ to_check--;
}
// All extension objects were empty and it is safe to use a normal global
@@ -1279,35 +1246,30 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ ld(v0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET || local->mode() == CONST ||
- local->mode() == CONST_LEGACY) {
+ if (local->binding_needs_init()) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ dsubu(at, v0, at); // Sub as compare: at == 0 on eq.
- if (local->mode() == CONST_LEGACY) {
- __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
- __ Movz(v0, a0, at); // Conditional move: return Undefined if TheHole.
- } else { // LET || CONST
- __ Branch(done, ne, at, Operand(zero_reg));
- __ li(a0, Operand(var->name()));
- __ push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError);
- }
+ __ Branch(done, ne, at, Operand(zero_reg));
+ __ li(a0, Operand(var->name()));
+ __ push(a0);
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ } else {
+ __ Branch(done);
}
- __ Branch(done);
}
}
void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
+#ifdef DEBUG
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- __ LoadGlobalObject(LoadDescriptor::ReceiverRegister());
- __ li(LoadDescriptor::NameRegister(), Operand(var->name()));
- __ li(LoadDescriptor::SlotRegister(),
+#endif
+ __ li(LoadGlobalDescriptor::SlotRegister(),
Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- CallLoadIC(typeof_mode);
+ CallLoadGlobalIC(typeof_mode);
}
@@ -1315,7 +1277,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
// Record position before possible IC call.
SetExpressionPosition(proxy);
- PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
+ PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
Variable* var = proxy->var();
// Three cases: global variables, lookup variables, and all other types of
@@ -1336,25 +1298,17 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
: "[ Stack variable");
if (NeedsHoleCheckForLoad(proxy)) {
- // Let and const need a read barrier.
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ Label done;
GetVar(v0, var);
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ dsubu(at, v0, at); // Sub as compare: at == 0 on eq.
- if (var->mode() == LET || var->mode() == CONST) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- Label done;
- __ Branch(&done, ne, at, Operand(zero_reg));
- __ li(a0, Operand(var->name()));
- __ push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError);
- __ bind(&done);
- } else {
- // Uninitialized legacy const bindings are unholed.
- DCHECK(var->mode() == CONST_LEGACY);
- __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
- __ Movz(v0, a0, at); // Conditional move: Undefined if TheHole.
- }
+ __ Branch(&done, ne, at, Operand(zero_reg));
+ __ li(a0, Operand(var->name()));
+ __ push(a0);
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ __ bind(&done);
context()->Plug(v0);
break;
}
@@ -1377,20 +1331,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
__ CallRuntime(function_id);
__ bind(&done);
context()->Plug(v0);
+ break;
}
- }
-}
-
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExpLiteral");
- __ ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
- __ li(a1, Operand(expr->pattern()));
- __ li(a0, Operand(Smi::FromInt(expr->flags())));
- FastCloneRegExpStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(v0);
+ case VariableLocation::MODULE:
+ UNREACHABLE();
+ }
}
@@ -1425,8 +1371,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
__ CallStub(&stub);
+ RestoreContext();
}
- PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+ PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
// If result_saved is true the result is on top of the stack. If
// result_saved is false the result is in v0.
@@ -1454,7 +1401,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::COMPUTED:
// It is safe to use [[Put]] here because the boilerplate already
// contains computed properties with an uninitialized value.
- if (key->value()->IsInternalizedString()) {
+ if (key->IsStringLiteral()) {
+ DCHECK(key->IsPropertyName());
if (property->emit_store()) {
VisitForAccumulatorValue(value);
__ mov(StoreDescriptor::ValueRegister(), result_register());
@@ -1463,7 +1411,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
EmitLoadStoreICSlot(property->GetSlot(0));
CallStoreIC();
- PrepareForBailoutForId(key->id(), NO_REGISTERS);
+ PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1497,16 +1445,20 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- NO_REGISTERS);
+ BailoutState::NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->getter = property;
+ AccessorTable::Iterator it = accessor_table.lookup(key);
+ it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->setter = property;
+ AccessorTable::Iterator it = accessor_table.lookup(key);
+ it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->setter = property;
}
break;
}
@@ -1525,6 +1477,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ li(a0, Operand(Smi::FromInt(NONE)));
PushOperand(a0);
CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
+ PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1554,7 +1507,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- NO_REGISTERS);
+ BailoutState::NO_REGISTERS);
} else {
EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
VisitForStackValue(value);
@@ -1570,6 +1523,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PushOperand(Smi::FromInt(NONE));
PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ BailoutState::NO_REGISTERS);
} else {
DropOperands(3);
}
@@ -1626,7 +1581,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
}
- PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+ PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
bool result_saved = false; // Is the result saved to the stack?
ZoneList<Expression*>* subexprs = expr->values();
@@ -1658,7 +1613,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
- PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+ PrepareForBailoutForId(expr->GetIdForElement(array_index),
+ BailoutState::NO_REGISTERS);
}
// In case the array literal contains spread expressions it has two parts. The
@@ -1678,7 +1634,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForStackValue(subexpr);
CallRuntimeWithOperands(Runtime::kAppendElement);
- PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+ PrepareForBailoutForId(expr->GetIdForElement(array_index),
+ BailoutState::NO_REGISTERS);
}
if (result_saved) {
@@ -1693,7 +1650,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
Comment cmnt(masm_, "[ Assignment");
- SetExpressionPosition(expr, INSERT_BREAK);
Property* property = expr->target()->AsProperty();
LhsKind assign_type = Property::GetAssignType(property);
@@ -1725,18 +1681,18 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
break;
case KEYED_SUPER_PROPERTY: {
- const Register scratch = a1;
VisitForStackValue(
property->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
+ VisitForStackValue(
property->obj()->AsSuperPropertyReference()->home_object());
- __ Move(scratch, result_register());
VisitForAccumulatorValue(property->key());
- PushOperands(scratch, result_register());
+ PushOperand(result_register());
if (expr->is_compound()) {
const Register scratch1 = a4;
+ const Register scratch2 = a1;
__ ld(scratch1, MemOperand(sp, 2 * kPointerSize));
- PushOperands(scratch1, scratch, result_register());
+ __ ld(scratch2, MemOperand(sp, 1 * kPointerSize));
+ PushOperands(scratch1, scratch2, result_register());
}
break;
}
@@ -1762,23 +1718,27 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy());
- PrepareForBailout(expr->target(), TOS_REG);
+ PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
case NAMED_SUPER_PROPERTY:
EmitNamedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
case KEYED_SUPER_PROPERTY:
EmitKeyedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
}
}
@@ -1798,7 +1758,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
// Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), TOS_REG);
+ PrepareForBailout(expr->binary_operation(), BailoutState::TOS_REGISTER);
} else {
VisitForAccumulatorValue(expr->value());
}
@@ -1810,7 +1770,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
expr->op(), expr->AssignmentSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(v0);
break;
case NAMED_PROPERTY:
@@ -1839,20 +1799,25 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// this. It stays on the stack while we update the iterator.
VisitForStackValue(expr->expression());
- Label suspend, continuation, post_runtime, resume;
+ Label suspend, continuation, post_runtime, resume, exception;
__ jmp(&suspend);
__ bind(&continuation);
- // When we arrive here, the stack top is the resume mode and
- // result_register() holds the input value (the argument given to the
- // respective resume operation).
+ // When we arrive here, v0 holds the generator object.
__ RecordGeneratorContinuation();
- __ pop(a1);
- __ Branch(&resume, ne, a1, Operand(Smi::FromInt(JSGeneratorObject::RETURN)));
- __ push(result_register());
+ __ ld(a1, FieldMemOperand(v0, JSGeneratorObject::kResumeModeOffset));
+ __ ld(v0, FieldMemOperand(v0, JSGeneratorObject::kInputOrDebugPosOffset));
+ __ Branch(&resume, eq, a1, Operand(Smi::FromInt(JSGeneratorObject::kNext)));
+ __ Push(result_register());
+ __ Branch(&exception, eq, a1,
+ Operand(Smi::FromInt(JSGeneratorObject::kThrow)));
EmitCreateIteratorResult(true);
EmitUnwindAndReturn();
+ __ bind(&exception);
+ __ CallRuntime(expr->rethrow_on_exception() ? Runtime::kReThrow
+ : Runtime::kThrow);
+
__ bind(&suspend);
OperandStackDepthIncrement(1); // Not popped on this path.
VisitForAccumulatorValue(expr->generator_object());
@@ -1867,7 +1832,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Branch(&post_runtime, eq, sp, Operand(a1));
__ push(v0); // generator object
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
__ bind(&post_runtime);
PopOperand(result_register());
EmitReturnSequence();
@@ -1876,105 +1841,6 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
context()->Plug(result_register());
}
-
-void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
- Expression *value,
- JSGeneratorObject::ResumeMode resume_mode) {
- // The value stays in a0, and is ultimately read by the resumed generator, as
- // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
- // is read to throw the value when the resumed generator is already closed.
- // a1 will hold the generator object until the activation has been resumed.
- VisitForStackValue(generator);
- VisitForAccumulatorValue(value);
- PopOperand(a1);
-
- // Store input value into generator object.
- __ sd(result_register(),
- FieldMemOperand(a1, JSGeneratorObject::kInputOffset));
- __ mov(a2, result_register());
- __ RecordWriteField(a1, JSGeneratorObject::kInputOffset, a2, a3,
- kRAHasBeenSaved, kDontSaveFPRegs);
-
- // Load suspended function and context.
- __ ld(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset));
- __ ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
-
- // Load receiver and store as the first argument.
- __ ld(a2, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
- __ push(a2);
-
- // Push holes for arguments to generator function. Since the parser forced
- // context allocation for any variables in generators, the actual argument
- // values have already been copied into the context and these dummy values
- // will never be used.
- __ ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
- // The argument count is stored as int32_t on 64-bit platforms.
- // TODO(plind): Smi on 32-bit platforms.
- __ lw(a3,
- FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
- __ LoadRoot(a2, Heap::kTheHoleValueRootIndex);
- Label push_argument_holes, push_frame;
- __ bind(&push_argument_holes);
- __ Dsubu(a3, a3, Operand(1));
- __ Branch(&push_frame, lt, a3, Operand(zero_reg));
- __ push(a2);
- __ jmp(&push_argument_holes);
-
- // Enter a new JavaScript frame, and initialize its slots as they were when
- // the generator was suspended.
- Label resume_frame, done;
- __ bind(&push_frame);
- __ Call(&resume_frame);
- __ jmp(&done);
- __ bind(&resume_frame);
- // ra = return address.
- // fp = caller's frame pointer.
- // cp = callee's context,
- // a4 = callee's JS function.
- __ PushStandardFrame(a4);
-
- // Load the operand stack size.
- __ ld(a3, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
- __ ld(a3, FieldMemOperand(a3, FixedArray::kLengthOffset));
- __ SmiUntag(a3);
-
- // If we are sending a value and there is no operand stack, we can jump back
- // in directly.
- if (resume_mode == JSGeneratorObject::NEXT) {
- Label slow_resume;
- __ Branch(&slow_resume, ne, a3, Operand(zero_reg));
- __ ld(a3, FieldMemOperand(a4, JSFunction::kCodeEntryOffset));
- __ ld(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
- __ SmiUntag(a2);
- __ Daddu(a3, a3, Operand(a2));
- __ li(a2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
- __ sd(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
- __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
- __ Jump(a3);
- __ bind(&slow_resume);
- }
-
- // Otherwise, we push holes for the operand stack and call the runtime to fix
- // up the stack and the handlers.
- Label push_operand_holes, call_resume;
- __ bind(&push_operand_holes);
- __ Dsubu(a3, a3, Operand(1));
- __ Branch(&call_resume, lt, a3, Operand(zero_reg));
- __ push(a2);
- __ Branch(&push_operand_holes);
- __ bind(&call_resume);
- __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
- DCHECK(!result_register().is(a1));
- __ Push(a1, result_register());
- __ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject);
- // Not reached: the runtime call returns elsewhere.
- __ stop("not-reached");
-
- __ bind(&done);
- context()->Plug(result_register());
-}
-
void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
OperandStackDepthIncrement(2);
__ Push(reg1, reg2);
@@ -2009,7 +1875,8 @@ void FullCodeGenerator::EmitOperandStackDepthCheck() {
void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label allocate, done_allocate;
- __ Allocate(JSIteratorResult::kSize, v0, a2, a3, &allocate, TAG_OBJECT);
+ __ Allocate(JSIteratorResult::kSize, v0, a2, a3, &allocate,
+ NO_ALLOCATION_FLAGS);
__ jmp(&done_allocate);
__ bind(&allocate);
@@ -2290,37 +2157,26 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->mode() == LET && op != Token::INIT) {
- // Non-initializing assignment to let variable needs a write barrier.
+ } else if (IsLexicalVariableMode(var->mode()) && op != Token::INIT) {
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label assign;
MemOperand location = VarOperand(var, a1);
- __ ld(a3, location);
- __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
- __ Branch(&assign, ne, a3, Operand(a4));
- __ li(a3, Operand(var->name()));
- __ push(a3);
- __ CallRuntime(Runtime::kThrowReferenceError);
- // Perform the assignment.
- __ bind(&assign);
- EmitStoreToStackLocalOrContextSlot(var, location);
-
- } else if (var->mode() == CONST && op != Token::INIT) {
- // Assignment to const variable needs a write barrier.
- DCHECK(!var->IsLookupSlot());
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label const_error;
- MemOperand location = VarOperand(var, a1);
- __ ld(a3, location);
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(&const_error, ne, a3, Operand(at));
- __ li(a3, Operand(var->name()));
- __ push(a3);
- __ CallRuntime(Runtime::kThrowReferenceError);
- __ bind(&const_error);
- __ CallRuntime(Runtime::kThrowConstAssignError);
-
+ // Perform an initialization check for lexically declared variables.
+ if (var->binding_needs_init()) {
+ Label assign;
+ __ ld(a3, location);
+ __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
+ __ Branch(&assign, ne, a3, Operand(a4));
+ __ li(a3, Operand(var->name()));
+ __ push(a3);
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ __ bind(&assign);
+ }
+ if (var->mode() == CONST) {
+ __ CallRuntime(Runtime::kThrowConstAssignError);
+ } else {
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ }
} else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2335,8 +2191,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() ||
- (var->mode() == CONST && op == Token::INIT)) {
+ } else if (!var->is_const_mode() || op == Token::INIT) {
if (var->IsLookupSlot()) {
__ Push(var->name());
__ Push(v0);
@@ -2357,24 +2212,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
- // Const initializers need a write barrier.
- DCHECK(!var->IsParameter()); // No const parameters.
- if (var->IsLookupSlot()) {
- __ li(a0, Operand(var->name()));
- __ Push(v0, cp, a0); // Context and name.
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
- } else {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label skip;
- MemOperand location = VarOperand(var, a1);
- __ ld(a2, location);
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(&skip, ne, a2, Operand(at));
- EmitStoreToStackLocalOrContextSlot(var, location);
- __ bind(&skip);
- }
-
} else {
DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
if (is_strict(language_mode())) {
@@ -2398,7 +2235,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
EmitLoadStoreICSlot(expr->AssignmentSlot());
CallStoreIC();
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(v0);
}
@@ -2449,44 +2286,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
EmitLoadStoreICSlot(expr->AssignmentSlot());
CallIC(ic);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
- Comment cmnt(masm_, "[ Property");
- SetExpressionPosition(expr);
-
- Expression* key = expr->key();
-
- if (key->IsPropertyName()) {
- if (!expr->IsSuperAccess()) {
- VisitForAccumulatorValue(expr->obj());
- __ Move(LoadDescriptor::ReceiverRegister(), v0);
- EmitNamedPropertyLoad(expr);
- } else {
- VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- expr->obj()->AsSuperPropertyReference()->home_object());
- EmitNamedSuperPropertyLoad(expr);
- }
- } else {
- if (!expr->IsSuperAccess()) {
- VisitForStackValue(expr->obj());
- VisitForAccumulatorValue(expr->key());
- __ Move(LoadDescriptor::NameRegister(), v0);
- PopOperand(LoadDescriptor::ReceiverRegister());
- EmitKeyedPropertyLoad(expr);
- } else {
- VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- expr->obj()->AsSuperPropertyReference()->home_object());
- VisitForStackValue(expr->key());
- EmitKeyedSuperPropertyLoad(expr);
- }
- }
- PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(v0);
}
@@ -2507,7 +2307,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
if (callee->IsVariableProxy()) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
- PrepareForBailout(callee, NO_REGISTERS);
+ PrepareForBailout(callee, BailoutState::NO_REGISTERS);
}
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
@@ -2520,7 +2320,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
DCHECK(!callee->AsProperty()->IsSuperAccess());
__ ld(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
EmitNamedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+ BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
__ ld(at, MemOperand(sp, 0));
PushOperand(at);
@@ -2557,6 +2358,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
CallRuntimeWithOperands(Runtime::kLoadFromSuper);
+ PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
// Replace home_object with target function.
__ sd(v0, MemOperand(sp, kPointerSize));
@@ -2581,7 +2383,8 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ ld(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
__ Move(LoadDescriptor::NameRegister(), v0);
EmitKeyedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+ BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
__ ld(at, MemOperand(sp, 0));
@@ -2615,6 +2418,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
+ PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
// Replace home_object with target function.
__ sd(v0, MemOperand(sp, kPointerSize));
@@ -2634,7 +2438,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
VisitForStackValue(args->at(i));
}
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
// Record source position of the IC call.
SetCallPosition(expr, expr->tail_call_mode());
if (expr->tail_call_mode() == TailCallMode::kAllow) {
@@ -2656,13 +2460,12 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
- // Restore context register.
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
context()->DropAndPlug(1, v0);
}
-
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
+ int arg_count = expr->arguments()->length();
// a6: copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
__ ld(a6, MemOperand(sp, arg_count * kPointerSize));
@@ -2679,8 +2482,11 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// a1: the start position of the scope the calls resides in.
__ li(a1, Operand(Smi::FromInt(scope()->start_position())));
+ // a0: the source position of the eval call.
+ __ li(a0, Operand(Smi::FromInt(expr->position())));
+
// Do the runtime call.
- __ Push(a6, a5, a4, a1);
+ __ Push(a6, a5, a4, a1, a0);
__ CallRuntime(Runtime::kResolvePossiblyDirectEval);
}
@@ -2702,7 +2508,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
__ Push(callee->name());
__ CallRuntime(Runtime::kLoadLookupSlotForCall);
PushOperands(v0, v1); // Function, receiver.
- PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
// If fast case code has been generated, emit code to push the
// function and receiver and have the slow path jump around this
@@ -2729,7 +2535,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
- // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // In a call to eval, we first call Runtime_ResolvePossiblyDirectEval
// to resolve the function we need to call. Then we call the resolved
// function using the given arguments.
ZoneList<Expression*>* args = expr->arguments();
@@ -2745,12 +2551,12 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
// resolve eval.
__ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ push(a1);
- EmitResolvePossiblyDirectEval(arg_count);
+ EmitResolvePossiblyDirectEval(expr);
// Touch up the stack with the resolved function.
__ sd(v0, MemOperand(sp, (arg_count + 1) * kPointerSize));
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
// Record source position for debugger.
SetCallPosition(expr);
__ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
@@ -2760,8 +2566,7 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
- // Restore context register.
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
context()->DropAndPlug(1, v0);
}
@@ -2800,9 +2605,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
CallConstructStub stub(isolate());
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
- PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
- // Restore context register.
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
+ RestoreContext();
context()->Plug(v0);
}
@@ -2845,9 +2649,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
-
- // Restore context register.
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
context()->Plug(v0);
}
@@ -3035,101 +2837,6 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Label done;
- // If the object is a smi return the object.
- __ JumpIfSmi(v0, &done);
- // If the object is not a value type, return the object.
- __ GetObjectType(v0, a1, a1);
- __ Branch(&done, ne, a1, Operand(JS_VALUE_TYPE));
-
- __ ld(v0, FieldMemOperand(v0, JSValue::kValueOffset));
-
- __ bind(&done);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(3, args->length());
-
- Register string = v0;
- Register index = a1;
- Register value = a2;
-
- VisitForStackValue(args->at(0)); // index
- VisitForStackValue(args->at(1)); // value
- VisitForAccumulatorValue(args->at(2)); // string
- PopOperands(index, value);
-
- if (FLAG_debug_code) {
- __ SmiTst(value, at);
- __ Check(eq, kNonSmiValue, at, Operand(zero_reg));
- __ SmiTst(index, at);
- __ Check(eq, kNonSmiIndex, at, Operand(zero_reg));
- __ SmiUntag(index, index);
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- Register scratch = t1;
- __ EmitSeqStringSetCharCheck(
- string, index, value, scratch, one_byte_seq_type);
- __ SmiTag(index, index);
- }
-
- __ SmiUntag(value, value);
- __ Daddu(at,
- string,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ SmiUntag(index);
- __ Daddu(at, at, index);
- __ sb(value, MemOperand(at));
- context()->Plug(string);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(3, args->length());
-
- Register string = v0;
- Register index = a1;
- Register value = a2;
-
- VisitForStackValue(args->at(0)); // index
- VisitForStackValue(args->at(1)); // value
- VisitForAccumulatorValue(args->at(2)); // string
- PopOperands(index, value);
-
- if (FLAG_debug_code) {
- __ SmiTst(value, at);
- __ Check(eq, kNonSmiValue, at, Operand(zero_reg));
- __ SmiTst(index, at);
- __ Check(eq, kNonSmiIndex, at, Operand(zero_reg));
- __ SmiUntag(index, index);
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- Register scratch = t1;
- __ EmitSeqStringSetCharCheck(
- string, index, value, scratch, two_byte_seq_type);
- __ SmiTag(index, index);
- }
-
- __ SmiUntag(value, value);
- __ Daddu(at,
- string,
- Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ dsra(index, index, 32 - 1);
- __ Daddu(at, at, index);
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ sh(value, MemOperand(at));
- context()->Plug(string);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3166,13 +2873,8 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
Label need_conversion;
Label index_out_of_range;
Label done;
- StringCharCodeAtGenerator generator(object,
- index,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
+ StringCharCodeAtGenerator generator(object, index, result, &need_conversion,
+ &need_conversion, &index_out_of_range);
generator.GenerateFast(masm_);
__ jmp(&done);
@@ -3196,55 +2898,6 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
- __ mov(a0, result_register());
-
- Register object = a1;
- Register index = a0;
- Register scratch = a3;
- Register result = v0;
-
- PopOperand(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharAtGenerator generator(object,
- index,
- scratch,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ LoadRoot(result, Heap::kempty_stringRootIndex);
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ li(result, Operand(Smi::FromInt(0)));
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
-
void FullCodeGenerator::EmitCall(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_LE(2, args->length());
@@ -3252,7 +2905,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
// Move target to a1.
int const argc = args->length() - 2;
__ ld(a1, MemOperand(sp, (argc + 1) * kPointerSize));
@@ -3260,8 +2913,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
__ li(a0, Operand(argc));
__ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(argc + 1);
- // Restore context register.
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
// Discard the function left on TOS.
context()->DropAndPlug(1, v0);
}
@@ -3312,12 +2964,6 @@ void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
context()->Plug(v0);
}
-void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
- DCHECK_EQ(0, expr->arguments()->length());
- __ LoadNativeContextSlot(Context::ORDINARY_HAS_INSTANCE_INDEX, v0);
- context()->Plug(v0);
-}
-
void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
ExternalReference debug_is_active =
@@ -3337,7 +2983,8 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
Label runtime, done;
- __ Allocate(JSIteratorResult::kSize, v0, a2, a3, &runtime, TAG_OBJECT);
+ __ Allocate(JSIteratorResult::kSize, v0, a2, a3, &runtime,
+ NO_ALLOCATION_FLAGS);
__ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, a1);
__ Pop(a2, a3);
__ LoadRoot(a4, Heap::kEmptyFixedArrayRootIndex);
@@ -3378,9 +3025,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
-
- // Restore context register.
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
}
@@ -3402,7 +3047,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode but
// "delete this" is allowed.
- bool is_this = var->HasThisName(isolate());
+ bool is_this = var->is_this();
DCHECK(is_sloppy(language_mode()) || is_this);
if (var->IsUnallocatedOrGlobalSlot()) {
__ LoadGlobalObject(a2);
@@ -3465,12 +3110,14 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
&materialize_true);
if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
__ bind(&materialize_true);
- PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->MaterializeTrueId(),
+ BailoutState::NO_REGISTERS);
__ LoadRoot(v0, Heap::kTrueValueRootIndex);
if (context()->IsStackValue()) __ push(v0);
__ jmp(&done);
__ bind(&materialize_false);
- PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->MaterializeFalseId(),
+ BailoutState::NO_REGISTERS);
__ LoadRoot(v0, Heap::kFalseValueRootIndex);
if (context()->IsStackValue()) __ push(v0);
__ bind(&done);
@@ -3529,25 +3176,23 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
prop->obj()->AsSuperPropertyReference()->home_object());
- PushOperand(result_register());
const Register scratch = a1;
- __ ld(scratch, MemOperand(sp, kPointerSize));
- PushOperands(scratch, result_register());
+ __ ld(scratch, MemOperand(sp, 0)); // this
+ PushOperands(result_register(), scratch, result_register());
EmitNamedSuperPropertyLoad(prop);
break;
}
case KEYED_SUPER_PROPERTY: {
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
+ VisitForStackValue(
prop->obj()->AsSuperPropertyReference()->home_object());
- const Register scratch = a1;
- const Register scratch1 = a4;
- __ Move(scratch, result_register());
VisitForAccumulatorValue(prop->key());
- PushOperands(scratch, result_register());
- __ ld(scratch1, MemOperand(sp, 2 * kPointerSize));
- PushOperands(scratch1, scratch, result_register());
+ const Register scratch1 = a1;
+ const Register scratch2 = a4;
+ __ ld(scratch1, MemOperand(sp, 1 * kPointerSize)); // this
+ __ ld(scratch2, MemOperand(sp, 0 * kPointerSize)); // home object
+ PushOperands(result_register(), scratch1, scratch2, result_register());
EmitKeyedSuperPropertyLoad(prop);
break;
}
@@ -3570,9 +3215,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// We need a second deoptimization point after loading the value
// in case evaluating the property load my have a side effect.
if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), TOS_REG);
+ PrepareForBailout(expr->expression(), BailoutState::TOS_REGISTER);
} else {
- PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+ PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
}
// Inline smi case if we are in a loop.
@@ -3621,9 +3266,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
// Convert old value into a number.
- ToNumberStub convert_stub(isolate());
- __ CallStub(&convert_stub);
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ __ Call(isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+ RestoreContext();
+ PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -3669,7 +3314,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
{ EffectContext context(this);
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN, expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(),
+ BailoutState::TOS_REGISTER);
context.Plug(v0);
}
// For all contexts except EffectConstant we have the result on
@@ -3680,7 +3326,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN, expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(),
+ BailoutState::TOS_REGISTER);
context()->Plug(v0);
}
break;
@@ -3691,7 +3338,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
PopOperand(StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(expr->CountSlot());
CallStoreIC();
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3703,6 +3350,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case NAMED_SUPER_PROPERTY: {
EmitNamedSuperPropertyStore(prop);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3714,6 +3362,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case KEYED_SUPER_PROPERTY: {
EmitKeyedSuperPropertyStore(prop);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3731,7 +3380,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
EmitLoadStoreICSlot(expr->CountSlot());
CallIC(ic);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3828,7 +3477,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
- SetExpressionPosition(expr);
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
@@ -3848,7 +3496,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- CallRuntimeWithOperands(Runtime::kHasProperty);
+ SetExpressionPosition(expr);
+ EmitHasProperty();
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ LoadRoot(a4, Heap::kTrueValueRootIndex);
Split(eq, v0, Operand(a4), if_true, if_false, fall_through);
@@ -3856,6 +3505,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::INSTANCEOF: {
VisitForAccumulatorValue(expr->right());
+ SetExpressionPosition(expr);
__ mov(a0, result_register());
PopOperand(a1);
InstanceOfStub stub(isolate());
@@ -3868,6 +3518,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
+ SetExpressionPosition(expr);
Condition cc = CompareIC::ComputeCondition(op);
__ mov(a0, result_register());
PopOperand(a1);
@@ -3955,7 +3606,7 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- Scope* closure_scope = scope()->ClosureScope();
+ DeclarationScope* closure_scope = scope()->GetClosureScope();
if (closure_scope->is_script_scope() ||
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
diff --git a/deps/v8/src/full-codegen/ppc/OWNERS b/deps/v8/src/full-codegen/ppc/OWNERS
index eb007cb908..752e8e3d81 100644
--- a/deps/v8/src/full-codegen/ppc/OWNERS
+++ b/deps/v8/src/full-codegen/ppc/OWNERS
@@ -3,3 +3,4 @@ dstence@us.ibm.com
joransiu@ca.ibm.com
mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc b/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
index 301ccf53cc..6bac8b15a3 100644
--- a/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
+++ b/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
@@ -182,22 +182,19 @@ void FullCodeGenerator::Generate() {
__ push(r4);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext);
- PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
+ PrepareForBailoutForId(BailoutId::ScriptContext(),
+ BailoutState::TOS_REGISTER);
// The new target value is not used, clobbering is safe.
DCHECK_NULL(info->scope()->new_target_var());
} else {
if (info->scope()->new_target_var() != nullptr) {
__ push(r6); // Preserve new target.
}
- if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
- __ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
- need_write_barrier = false;
- } else {
- __ push(r4);
- __ CallRuntime(Runtime::kNewFunctionContext);
- }
+ FastNewFunctionContextStub stub(isolate());
+ __ mov(FastNewFunctionContextDescriptor::SlotsRegister(), Operand(slots));
+ __ CallStub(&stub);
+ // Result of FastNewFunctionContextStub is always in new space.
+ need_write_barrier = false;
if (info->scope()->new_target_var() != nullptr) {
__ pop(r6); // Preserve new target.
}
@@ -211,7 +208,8 @@ void FullCodeGenerator::Generate() {
int num_parameters = info->scope()->num_parameters();
int first_parameter = info->scope()->has_this_declaration() ? -1 : 0;
for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+ Variable* var =
+ (i == -1) ? info->scope()->receiver() : info->scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -238,11 +236,12 @@ void FullCodeGenerator::Generate() {
// Register holding this function and new target are both trashed in case we
// bailout here. But since that can happen only when new target is not used
// and we allocate a context, the value of |function_in_register| is correct.
- PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::FunctionContext(),
+ BailoutState::NO_REGISTERS);
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
- Variable* this_function_var = scope()->this_function_var();
+ Variable* this_function_var = info->scope()->this_function_var();
if (this_function_var != nullptr) {
Comment cmnt(masm_, "[ This function");
if (!function_in_register_r4) {
@@ -253,7 +252,7 @@ void FullCodeGenerator::Generate() {
}
// Possibly set up a local binding to the new target value.
- Variable* new_target_var = scope()->new_target_var();
+ Variable* new_target_var = info->scope()->new_target_var();
if (new_target_var != nullptr) {
Comment cmnt(masm_, "[ new.target");
SetVar(new_target_var, r6, r3, r5);
@@ -261,7 +260,7 @@ void FullCodeGenerator::Generate() {
// Possibly allocate RestParameters
int rest_index;
- Variable* rest_param = scope()->rest_parameter(&rest_index);
+ Variable* rest_param = info->scope()->rest_parameter(&rest_index);
if (rest_param) {
Comment cmnt(masm_, "[ Allocate rest parameter array");
if (!function_in_register_r4) {
@@ -273,7 +272,7 @@ void FullCodeGenerator::Generate() {
SetVar(rest_param, r3, r4, r5);
}
- Variable* arguments = scope()->arguments();
+ Variable* arguments = info->scope()->arguments();
if (arguments != NULL) {
// Function uses arguments object.
Comment cmnt(masm_, "[ Allocate arguments object");
@@ -300,7 +299,8 @@ void FullCodeGenerator::Generate() {
}
// Visit the declarations and body.
- PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::FunctionEntry(),
+ BailoutState::NO_REGISTERS);
{
Comment cmnt(masm_, "[ Declarations");
VisitDeclarations(scope()->declarations());
@@ -313,7 +313,8 @@ void FullCodeGenerator::Generate() {
{
Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::Declarations(),
+ BailoutState::NO_REGISTERS);
Label ok;
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmpl(sp, ip);
@@ -390,11 +391,11 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
EmitProfilingCounterReset();
__ bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR
// entry becomes the target of a bailout. We don't expect it to be, but
// we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->OsrEntryId(), BailoutState::NO_REGISTERS);
}
void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
@@ -450,6 +451,9 @@ void FullCodeGenerator::EmitReturnSequence() {
}
}
+void FullCodeGenerator::RestoreContext() {
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -509,10 +513,12 @@ void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
false_label_);
- DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
- if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+ DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
+ !lit->IsUndetectable());
+ if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
+ lit->IsFalse(isolate())) {
if (false_label_ != fall_through_) __ b(false_label_);
- } else if (lit->IsTrue() || lit->IsJSObject()) {
+ } else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
if (true_label_ != fall_through_) __ b(true_label_);
} else if (lit->IsString()) {
if (String::cast(*lit)->length() == 0) {
@@ -690,7 +696,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
Label skip;
if (should_normalize) __ b(&skip);
- PrepareForBailout(expr, TOS_REG);
+ PrepareForBailout(expr, BailoutState::TOS_REGISTER);
if (should_normalize) {
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(r3, ip);
@@ -717,26 +723,21 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
void FullCodeGenerator::VisitVariableDeclaration(
VariableDeclaration* declaration) {
- // If it was not possible to allocate the variable at compile time, we
- // need to "declare" it at runtime to make sure it actually exists in the
- // local context.
VariableProxy* proxy = declaration->proxy();
- VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED:
- globals_->Add(variable->name(), zone());
- globals_->Add(variable->binding_needs_init()
- ? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value(),
- zone());
+ case VariableLocation::UNALLOCATED: {
+ DCHECK(!variable->binding_needs_init());
+ FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+ globals_->Add(isolate()->factory()->undefined_value(), zone());
break;
-
+ }
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL:
- if (hole_init) {
+ if (variable->binding_needs_init()) {
Comment cmnt(masm_, "[ VariableDeclaration");
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ StoreP(ip, StackOperand(variable));
@@ -744,35 +745,29 @@ void FullCodeGenerator::VisitVariableDeclaration(
break;
case VariableLocation::CONTEXT:
- if (hole_init) {
+ if (variable->binding_needs_init()) {
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ StoreP(ip, ContextMemOperand(cp, variable->index()), r0);
// No write barrier since the_hole_value is in old space.
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
}
break;
case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ VariableDeclaration");
+ DCHECK_EQ(VAR, variable->mode());
+ DCHECK(!variable->binding_needs_init());
__ mov(r5, Operand(variable->name()));
- // Declaration nodes are always introduced in one of four modes.
- DCHECK(IsDeclaredVariableMode(mode));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (hole_init) {
- __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
- } else {
- __ LoadSmiLiteral(r3, Smi::FromInt(0)); // Indicates no initial value.
- }
- __ Push(r5, r3);
- __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- __ CallRuntime(Runtime::kDeclareLookupSlot);
+ __ Push(r5);
+ __ CallRuntime(Runtime::kDeclareEvalVar);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
+
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
}
@@ -784,7 +779,9 @@ void FullCodeGenerator::VisitFunctionDeclaration(
switch (variable->location()) {
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
- globals_->Add(variable->name(), zone());
+ FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
Handle<SharedFunctionInfo> function =
Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
// Check for stack-overflow exception.
@@ -812,7 +809,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
__ RecordWriteContextSlot(cp, offset, result_register(), r5,
kLRHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
@@ -822,10 +819,13 @@ void FullCodeGenerator::VisitFunctionDeclaration(
PushOperand(r5);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
+ CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
+
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
}
@@ -834,20 +834,13 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ mov(r4, Operand(pairs));
__ LoadSmiLiteral(r3, Smi::FromInt(DeclareGlobalsFlags()));
- __ Push(r4, r3);
+ __ EmitLoadTypeFeedbackVector(r5);
+ __ Push(r4, r3, r5);
__ CallRuntime(Runtime::kDeclareGlobals);
// Return value is ignored.
}
-void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
- // Call the runtime to declare the modules.
- __ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules);
- // Return value is ignored.
-}
-
-
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
@@ -855,7 +848,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Keep the switch value on the stack until a case matches.
VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
ZoneList<CaseClause*>* clauses = stmt->cases();
CaseClause* default_clause = NULL; // Can occur anywhere in the list.
@@ -904,7 +897,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Label skip;
__ b(&skip);
- PrepareForBailout(clause, TOS_REG);
+ PrepareForBailout(clause, BailoutState::TOS_REGISTER);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(r3, ip);
__ bne(&next_test);
@@ -933,12 +926,12 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ Case body");
CaseClause* clause = clauses->at(i);
__ bind(clause->body_target());
- PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(clause->EntryId(), BailoutState::NO_REGISTERS);
VisitStatements(clause->statements());
}
__ bind(nested_statement.break_label());
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
}
@@ -970,16 +963,15 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&convert);
ToObjectStub stub(isolate());
__ CallStub(&stub);
+ RestoreContext();
__ bind(&done_convert);
- PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
+ PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
__ push(r3);
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- // Note: Proxies never have an enum cache, so will always take the
- // slow path.
+ // Check cache validity in generated code. If we cannot guarantee cache
+ // validity, call the runtime system to check cache validity or get the
+ // property names in a fixed array. Note: Proxies never have an enum cache,
+ // so will always take the slow path.
Label call_runtime;
__ CheckEnumCache(&call_runtime);
@@ -993,7 +985,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&call_runtime);
__ push(r3); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kForInEnumerate);
- PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
+ PrepareForBailoutForId(stmt->EnumId(), BailoutState::TOS_REGISTER);
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
@@ -1035,7 +1027,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(r4, r3); // Smi and array
__ LoadP(r4, FieldMemOperand(r3, FixedArray::kLengthOffset));
__ Push(r4); // Fixed array length (as smi).
- PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
__ LoadSmiLiteral(r3, Smi::FromInt(0));
__ Push(r3); // Initial index.
@@ -1079,7 +1071,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// just skip it.
__ Push(r4, r6); // Enumerable and current entry.
__ CallRuntime(Runtime::kForInFilter);
- PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
+ PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
__ mr(r6, r3);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ cmp(r3, r0);
@@ -1093,11 +1085,11 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
{
EffectContext context(this);
EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
- PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->AssignmentId(), BailoutState::NO_REGISTERS);
}
// Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
@@ -1116,7 +1108,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
DropOperands(5);
// Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
__ bind(&exit);
decrement_loop_depth();
}
@@ -1156,43 +1148,19 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
Register next = r4;
Register temp = r5;
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_sloppy_eval()) {
- // Check that extension is "the hole".
- __ LoadP(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
- __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
- }
- // Load next context in chain.
- __ LoadP(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering cp.
- current = next;
- }
- // If no outer scope calls eval, we do not need to check more
- // context extensions.
- if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s->is_eval_scope()) {
- Label loop, fast;
- if (!current.is(next)) {
- __ Move(next, current);
- }
- __ bind(&loop);
- // Terminate at native context.
- __ LoadP(temp, FieldMemOperand(next, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kNativeContextMapRootIndex);
- __ cmp(temp, ip);
- __ beq(&fast);
- // Check that extension is "the hole".
- __ LoadP(temp, ContextMemOperand(next, Context::EXTENSION_INDEX));
- __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
+ int to_check = scope()->ContextChainLengthUntilOutermostSloppyEval();
+ for (Scope* s = scope(); to_check > 0; s = s->outer_scope()) {
+ if (!s->NeedsContext()) continue;
+ if (s->calls_sloppy_eval()) {
+ // Check that extension is "the hole".
+ __ LoadP(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
+ }
// Load next context in chain.
- __ LoadP(next, ContextMemOperand(next, Context::PREVIOUS_INDEX));
- __ b(&loop);
- __ bind(&fast);
+ __ LoadP(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering cp.
+ current = next;
+ to_check--;
}
// All extension objects were empty and it is safe to use a normal global
@@ -1246,33 +1214,29 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ LoadP(r3, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET || local->mode() == CONST ||
- local->mode() == CONST_LEGACY) {
+ if (local->binding_needs_init()) {
__ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
__ bne(done);
- if (local->mode() == CONST_LEGACY) {
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
- } else { // LET || CONST
- __ mov(r3, Operand(var->name()));
- __ push(r3);
- __ CallRuntime(Runtime::kThrowReferenceError);
- }
+ __ mov(r3, Operand(var->name()));
+ __ push(r3);
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ } else {
+ __ b(done);
}
- __ b(done);
}
}
void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
+#ifdef DEBUG
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- __ LoadGlobalObject(LoadDescriptor::ReceiverRegister());
- __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
- __ mov(LoadDescriptor::SlotRegister(),
+#endif
+ __ mov(LoadGlobalDescriptor::SlotRegister(),
Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- CallLoadIC(typeof_mode);
+ CallLoadGlobalIC(typeof_mode);
}
@@ -1280,7 +1244,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
// Record position before possible IC call.
SetExpressionPosition(proxy);
- PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
+ PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
Variable* var = proxy->var();
// Three cases: global variables, lookup variables, and all other types of
@@ -1301,22 +1265,15 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
: "[ Stack variable");
if (NeedsHoleCheckForLoad(proxy)) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
Label done;
- // Let and const need a read barrier.
GetVar(r3, var);
__ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
__ bne(&done);
- if (var->mode() == LET || var->mode() == CONST) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- __ mov(r3, Operand(var->name()));
- __ push(r3);
- __ CallRuntime(Runtime::kThrowReferenceError);
- } else {
- // Uninitialized legacy const bindings are unholed.
- DCHECK(var->mode() == CONST_LEGACY);
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
- }
+ __ mov(r3, Operand(var->name()));
+ __ push(r3);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&done);
context()->Plug(r3);
break;
@@ -1340,20 +1297,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
__ CallRuntime(function_id);
__ bind(&done);
context()->Plug(r3);
+ break;
}
- }
-}
-
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExpLiteral");
- __ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ LoadSmiLiteral(r5, Smi::FromInt(expr->literal_index()));
- __ mov(r4, Operand(expr->pattern()));
- __ LoadSmiLiteral(r3, Smi::FromInt(expr->flags()));
- FastCloneRegExpStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(r3);
+ case VariableLocation::MODULE:
+ UNREACHABLE();
+ }
}
@@ -1389,8 +1338,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
__ CallStub(&stub);
+ RestoreContext();
}
- PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+ PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
// If result_saved is true the result is on top of the stack. If
// result_saved is false the result is in r3.
@@ -1418,7 +1368,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::COMPUTED:
// It is safe to use [[Put]] here because the boilerplate already
// contains computed properties with an uninitialized value.
- if (key->value()->IsInternalizedString()) {
+ if (key->IsStringLiteral()) {
+ DCHECK(key->IsPropertyName());
if (property->emit_store()) {
VisitForAccumulatorValue(value);
DCHECK(StoreDescriptor::ValueRegister().is(r3));
@@ -1426,7 +1377,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
EmitLoadStoreICSlot(property->GetSlot(0));
CallStoreIC();
- PrepareForBailoutForId(key->id(), NO_REGISTERS);
+ PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1460,16 +1411,20 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- NO_REGISTERS);
+ BailoutState::NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->getter = property;
+ AccessorTable::Iterator it = accessor_table.lookup(key);
+ it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->setter = property;
+ AccessorTable::Iterator it = accessor_table.lookup(key);
+ it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->setter = property;
}
break;
}
@@ -1487,6 +1442,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ LoadSmiLiteral(r3, Smi::FromInt(NONE));
PushOperand(r3);
CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
+ PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1516,7 +1472,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- NO_REGISTERS);
+ BailoutState::NO_REGISTERS);
} else {
EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
VisitForStackValue(value);
@@ -1532,6 +1488,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PushOperand(Smi::FromInt(NONE));
PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ BailoutState::NO_REGISTERS);
} else {
DropOperands(3);
}
@@ -1589,7 +1547,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
}
- PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+ PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
bool result_saved = false; // Is the result saved to the stack?
ZoneList<Expression*>* subexprs = expr->values();
@@ -1619,7 +1577,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
- PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+ PrepareForBailoutForId(expr->GetIdForElement(array_index),
+ BailoutState::NO_REGISTERS);
}
// In case the array literal contains spread expressions it has two parts. The
@@ -1639,7 +1598,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForStackValue(subexpr);
CallRuntimeWithOperands(Runtime::kAppendElement);
- PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+ PrepareForBailoutForId(expr->GetIdForElement(array_index),
+ BailoutState::NO_REGISTERS);
}
if (result_saved) {
@@ -1654,7 +1614,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
Comment cmnt(masm_, "[ Assignment");
- SetExpressionPosition(expr, INSERT_BREAK);
Property* property = expr->target()->AsProperty();
LhsKind assign_type = Property::GetAssignType(property);
@@ -1686,18 +1645,18 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
break;
case KEYED_SUPER_PROPERTY: {
- const Register scratch = r4;
VisitForStackValue(
property->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
+ VisitForStackValue(
property->obj()->AsSuperPropertyReference()->home_object());
- __ mr(scratch, result_register());
VisitForAccumulatorValue(property->key());
- PushOperands(scratch, result_register());
+ PushOperand(result_register());
if (expr->is_compound()) {
const Register scratch1 = r5;
+ const Register scratch2 = r4;
__ LoadP(scratch1, MemOperand(sp, 2 * kPointerSize));
- PushOperands(scratch1, scratch, result_register());
+ __ LoadP(scratch2, MemOperand(sp, 1 * kPointerSize));
+ PushOperands(scratch1, scratch2, result_register());
}
break;
}
@@ -1723,23 +1682,27 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy());
- PrepareForBailout(expr->target(), TOS_REG);
+ PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
case NAMED_SUPER_PROPERTY:
EmitNamedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
case KEYED_SUPER_PROPERTY:
EmitKeyedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
}
}
@@ -1757,7 +1720,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
// Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), TOS_REG);
+ PrepareForBailout(expr->binary_operation(), BailoutState::TOS_REGISTER);
} else {
VisitForAccumulatorValue(expr->value());
}
@@ -1769,7 +1732,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
expr->op(), expr->AssignmentSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(r3);
break;
case NAMED_PROPERTY:
@@ -1798,21 +1761,27 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// this. It stays on the stack while we update the iterator.
VisitForStackValue(expr->expression());
- Label suspend, continuation, post_runtime, resume;
+ Label suspend, continuation, post_runtime, resume, exception;
__ b(&suspend);
__ bind(&continuation);
- // When we arrive here, the stack top is the resume mode and
- // result_register() holds the input value (the argument given to the
- // respective resume operation).
+ // When we arrive here, r3 holds the generator object.
__ RecordGeneratorContinuation();
- __ pop(r4);
- __ CmpSmiLiteral(r4, Smi::FromInt(JSGeneratorObject::RETURN), r0);
- __ bne(&resume);
- __ push(result_register());
+ __ LoadP(r4, FieldMemOperand(r3, JSGeneratorObject::kResumeModeOffset));
+ __ LoadP(r3, FieldMemOperand(r3, JSGeneratorObject::kInputOrDebugPosOffset));
+ STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
+ STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
+ __ CmpSmiLiteral(r4, Smi::FromInt(JSGeneratorObject::kReturn), r0);
+ __ blt(&resume);
+ __ Push(result_register());
+ __ bgt(&exception);
EmitCreateIteratorResult(true);
EmitUnwindAndReturn();
+ __ bind(&exception);
+ __ CallRuntime(expr->rethrow_on_exception() ? Runtime::kReThrow
+ : Runtime::kThrow);
+
__ bind(&suspend);
OperandStackDepthIncrement(1); // Not popped on this path.
VisitForAccumulatorValue(expr->generator_object());
@@ -1829,7 +1798,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ beq(&post_runtime);
__ push(r3); // generator object
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
__ bind(&post_runtime);
PopOperand(result_register());
EmitReturnSequence();
@@ -1838,120 +1807,6 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
context()->Plug(result_register());
}
-
-void FullCodeGenerator::EmitGeneratorResume(
- Expression* generator, Expression* value,
- JSGeneratorObject::ResumeMode resume_mode) {
- // The value stays in r3, and is ultimately read by the resumed generator, as
- // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
- // is read to throw the value when the resumed generator is already closed.
- // r4 will hold the generator object until the activation has been resumed.
- VisitForStackValue(generator);
- VisitForAccumulatorValue(value);
- PopOperand(r4);
-
- // Store input value into generator object.
- __ StoreP(result_register(),
- FieldMemOperand(r4, JSGeneratorObject::kInputOffset), r0);
- __ mr(r5, result_register());
- __ RecordWriteField(r4, JSGeneratorObject::kInputOffset, r5, r6,
- kLRHasBeenSaved, kDontSaveFPRegs);
-
- // Load suspended function and context.
- __ LoadP(cp, FieldMemOperand(r4, JSGeneratorObject::kContextOffset));
- __ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
-
- // Load receiver and store as the first argument.
- __ LoadP(r5, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset));
- __ push(r5);
-
- // Push holes for arguments to generator function. Since the parser forced
- // context allocation for any variables in generators, the actual argument
- // values have already been copied into the context and these dummy values
- // will never be used.
- __ LoadP(r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
- __ LoadWordArith(
- r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
- __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
- Label argument_loop, push_frame;
-#if V8_TARGET_ARCH_PPC64
- __ cmpi(r6, Operand::Zero());
- __ beq(&push_frame);
-#else
- __ SmiUntag(r6, SetRC);
- __ beq(&push_frame, cr0);
-#endif
- __ mtctr(r6);
- __ bind(&argument_loop);
- __ push(r5);
- __ bdnz(&argument_loop);
-
- // Enter a new JavaScript frame, and initialize its slots as they were when
- // the generator was suspended.
- Label resume_frame, done;
- __ bind(&push_frame);
- __ b(&resume_frame, SetLK);
- __ b(&done);
- __ bind(&resume_frame);
- // lr = return address.
- // fp = caller's frame pointer.
- // cp = callee's context,
- // r7 = callee's JS function.
- __ PushStandardFrame(r7);
-
- // Load the operand stack size.
- __ LoadP(r6, FieldMemOperand(r4, JSGeneratorObject::kOperandStackOffset));
- __ LoadP(r6, FieldMemOperand(r6, FixedArray::kLengthOffset));
- __ SmiUntag(r6, SetRC);
-
- // If we are sending a value and there is no operand stack, we can jump back
- // in directly.
- Label call_resume;
- if (resume_mode == JSGeneratorObject::NEXT) {
- Label slow_resume;
- __ bne(&slow_resume, cr0);
- __ LoadP(ip, FieldMemOperand(r7, JSFunction::kCodeEntryOffset));
- {
- ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
- if (FLAG_enable_embedded_constant_pool) {
- __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(ip);
- }
- __ LoadP(r5, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset));
- __ SmiUntag(r5);
- __ add(ip, ip, r5);
- __ LoadSmiLiteral(r5,
- Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
- __ StoreP(r5, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset),
- r0);
- __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
- __ Jump(ip);
- __ bind(&slow_resume);
- }
- } else {
- __ beq(&call_resume, cr0);
- }
-
- // Otherwise, we push holes for the operand stack and call the runtime to fix
- // up the stack and the handlers.
- Label operand_loop;
- __ mtctr(r6);
- __ bind(&operand_loop);
- __ push(r5);
- __ bdnz(&operand_loop);
-
- __ bind(&call_resume);
- __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
- DCHECK(!result_register().is(r4));
- __ Push(r4, result_register());
- __ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject);
- // Not reached: the runtime call returns elsewhere.
- __ stop("not-reached");
-
- __ bind(&done);
- context()->Plug(result_register());
-}
-
void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
OperandStackDepthIncrement(2);
__ Push(reg1, reg2);
@@ -1979,7 +1834,8 @@ void FullCodeGenerator::EmitOperandStackDepthCheck() {
int expected_diff = StandardFrameConstants::kFixedFrameSizeFromFp +
operand_stack_depth_ * kPointerSize;
__ sub(r3, fp, sp);
- __ cmpi(r3, Operand(expected_diff));
+ __ mov(ip, Operand(expected_diff));
+ __ cmp(r3, ip);
__ Assert(eq, kUnexpectedStackDepth);
}
}
@@ -1987,7 +1843,8 @@ void FullCodeGenerator::EmitOperandStackDepthCheck() {
void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label allocate, done_allocate;
- __ Allocate(JSIteratorResult::kSize, r3, r5, r6, &allocate, TAG_OBJECT);
+ __ Allocate(JSIteratorResult::kSize, r3, r5, r6, &allocate,
+ NO_ALLOCATION_FLAGS);
__ b(&done_allocate);
__ bind(&allocate);
@@ -2301,37 +2158,26 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->mode() == LET && op != Token::INIT) {
- // Non-initializing assignment to let variable needs a write barrier.
- DCHECK(!var->IsLookupSlot());
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label assign;
- MemOperand location = VarOperand(var, r4);
- __ LoadP(r6, location);
- __ CompareRoot(r6, Heap::kTheHoleValueRootIndex);
- __ bne(&assign);
- __ mov(r6, Operand(var->name()));
- __ push(r6);
- __ CallRuntime(Runtime::kThrowReferenceError);
- // Perform the assignment.
- __ bind(&assign);
- EmitStoreToStackLocalOrContextSlot(var, location);
-
- } else if (var->mode() == CONST && op != Token::INIT) {
- // Assignment to const variable needs a write barrier.
+ } else if (IsLexicalVariableMode(var->mode()) && op != Token::INIT) {
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label const_error;
MemOperand location = VarOperand(var, r4);
- __ LoadP(r6, location);
- __ CompareRoot(r6, Heap::kTheHoleValueRootIndex);
- __ bne(&const_error);
- __ mov(r6, Operand(var->name()));
- __ push(r6);
- __ CallRuntime(Runtime::kThrowReferenceError);
- __ bind(&const_error);
- __ CallRuntime(Runtime::kThrowConstAssignError);
-
+ // Perform an initialization check for lexically declared variables.
+ if (var->binding_needs_init()) {
+ Label assign;
+ __ LoadP(r6, location);
+ __ CompareRoot(r6, Heap::kTheHoleValueRootIndex);
+ __ bne(&assign);
+ __ mov(r6, Operand(var->name()));
+ __ push(r6);
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ __ bind(&assign);
+ }
+ if (var->mode() == CONST) {
+ __ CallRuntime(Runtime::kThrowConstAssignError);
+ } else {
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ }
} else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2346,8 +2192,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() ||
- (var->mode() == CONST && op == Token::INIT)) {
+ } else if (!var->is_const_mode() || op == Token::INIT) {
if (var->IsLookupSlot()) {
// Assignment to var.
__ Push(var->name());
@@ -2368,25 +2213,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
- // Const initializers need a write barrier.
- DCHECK(!var->IsParameter()); // No const parameters.
- if (var->IsLookupSlot()) {
- __ push(r3);
- __ mov(r3, Operand(var->name()));
- __ Push(cp, r3); // Context and name.
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
- } else {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label skip;
- MemOperand location = VarOperand(var, r4);
- __ LoadP(r5, location);
- __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
- __ bne(&skip);
- EmitStoreToStackLocalOrContextSlot(var, location);
- __ bind(&skip);
- }
-
} else {
DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
if (is_strict(language_mode())) {
@@ -2409,7 +2235,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
EmitLoadStoreICSlot(expr->AssignmentSlot());
CallStoreIC();
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(r3);
}
@@ -2454,44 +2280,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
EmitLoadStoreICSlot(expr->AssignmentSlot());
CallIC(ic);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(r3);
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
- Comment cmnt(masm_, "[ Property");
- SetExpressionPosition(expr);
-
- Expression* key = expr->key();
-
- if (key->IsPropertyName()) {
- if (!expr->IsSuperAccess()) {
- VisitForAccumulatorValue(expr->obj());
- __ Move(LoadDescriptor::ReceiverRegister(), r3);
- EmitNamedPropertyLoad(expr);
- } else {
- VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- expr->obj()->AsSuperPropertyReference()->home_object());
- EmitNamedSuperPropertyLoad(expr);
- }
- } else {
- if (!expr->IsSuperAccess()) {
- VisitForStackValue(expr->obj());
- VisitForAccumulatorValue(expr->key());
- __ Move(LoadDescriptor::NameRegister(), r3);
- PopOperand(LoadDescriptor::ReceiverRegister());
- EmitKeyedPropertyLoad(expr);
- } else {
- VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- expr->obj()->AsSuperPropertyReference()->home_object());
- VisitForStackValue(expr->key());
- EmitKeyedSuperPropertyLoad(expr);
- }
- }
- PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(r3);
}
@@ -2512,7 +2301,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
{
StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
- PrepareForBailout(callee, NO_REGISTERS);
+ PrepareForBailout(callee, BailoutState::NO_REGISTERS);
}
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
@@ -2525,7 +2314,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
DCHECK(!callee->AsProperty()->IsSuperAccess());
__ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
EmitNamedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+ BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
__ LoadP(r0, MemOperand(sp, 0));
PushOperand(r0);
@@ -2562,6 +2352,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
CallRuntimeWithOperands(Runtime::kLoadFromSuper);
+ PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
// Replace home_object with target function.
__ StoreP(r3, MemOperand(sp, kPointerSize));
@@ -2585,7 +2376,8 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, Expression* key) {
__ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
__ Move(LoadDescriptor::NameRegister(), r3);
EmitKeyedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+ BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
__ LoadP(ip, MemOperand(sp, 0));
@@ -2619,6 +2411,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
+ PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
// Replace home_object with target function.
__ StoreP(r3, MemOperand(sp, kPointerSize));
@@ -2638,7 +2431,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
VisitForStackValue(args->at(i));
}
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
SetCallPosition(expr, expr->tail_call_mode());
if (expr->tail_call_mode() == TailCallMode::kAllow) {
if (FLAG_trace) {
@@ -2659,13 +2452,13 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
- // Restore context register.
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
context()->DropAndPlug(1, r3);
}
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
+ int arg_count = expr->arguments()->length();
// r7: copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
__ LoadP(r7, MemOperand(sp, arg_count * kPointerSize), r0);
@@ -2682,8 +2475,11 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// r4: the start position of the scope the calls resides in.
__ LoadSmiLiteral(r4, Smi::FromInt(scope()->start_position()));
+ // r3: the source position of the eval call.
+ __ LoadSmiLiteral(r3, Smi::FromInt(expr->position()));
+
// Do the runtime call.
- __ Push(r7, r6, r5, r4);
+ __ Push(r7, r6, r5, r4, r3);
__ CallRuntime(Runtime::kResolvePossiblyDirectEval);
}
@@ -2704,7 +2500,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
__ Push(callee->name());
__ CallRuntime(Runtime::kLoadLookupSlotForCall);
PushOperands(r3, r4); // Function, receiver.
- PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
// If fast case code has been generated, emit code to push the function
// and receiver and have the slow path jump around this code.
@@ -2731,9 +2527,9 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
- // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
- // to resolve the function we need to call. Then we call the resolved
- // function using the given arguments.
+ // In a call to eval, we first call
+ // Runtime_ResolvePossiblyDirectEval to resolve the function we need
+ // to call. Then we call the resolved function using the given arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2748,12 +2544,12 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
// resolve eval.
__ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
__ push(r4);
- EmitResolvePossiblyDirectEval(arg_count);
+ EmitResolvePossiblyDirectEval(expr);
// Touch up the stack with the resolved function.
__ StoreP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
// Record source position for debugger.
SetCallPosition(expr);
@@ -2764,8 +2560,7 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
- // Restore context register.
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
context()->DropAndPlug(1, r3);
}
@@ -2804,9 +2599,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
CallConstructStub stub(isolate());
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
- PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
- // Restore context register.
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
+ RestoreContext();
context()->Plug(r3);
}
@@ -2849,9 +2643,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
-
- // Restore context register.
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
context()->Plug(r3);
}
@@ -3040,88 +2832,6 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Label done;
- // If the object is a smi return the object.
- __ JumpIfSmi(r3, &done);
- // If the object is not a value type, return the object.
- __ CompareObjectType(r3, r4, r4, JS_VALUE_TYPE);
- __ bne(&done);
- __ LoadP(r3, FieldMemOperand(r3, JSValue::kValueOffset));
-
- __ bind(&done);
- context()->Plug(r3);
-}
-
-
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(3, args->length());
-
- Register string = r3;
- Register index = r4;
- Register value = r5;
-
- VisitForStackValue(args->at(0)); // index
- VisitForStackValue(args->at(1)); // value
- VisitForAccumulatorValue(args->at(2)); // string
- PopOperands(index, value);
-
- if (FLAG_debug_code) {
- __ TestIfSmi(value, r0);
- __ Check(eq, kNonSmiValue, cr0);
- __ TestIfSmi(index, r0);
- __ Check(eq, kNonSmiIndex, cr0);
- __ SmiUntag(index, index);
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
- __ SmiTag(index, index);
- }
-
- __ SmiUntag(value);
- __ addi(ip, string, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ SmiToByteArrayOffset(r0, index);
- __ stbx(value, MemOperand(ip, r0));
- context()->Plug(string);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(3, args->length());
-
- Register string = r3;
- Register index = r4;
- Register value = r5;
-
- VisitForStackValue(args->at(0)); // index
- VisitForStackValue(args->at(1)); // value
- VisitForAccumulatorValue(args->at(2)); // string
- PopOperands(index, value);
-
- if (FLAG_debug_code) {
- __ TestIfSmi(value, r0);
- __ Check(eq, kNonSmiValue, cr0);
- __ TestIfSmi(index, r0);
- __ Check(eq, kNonSmiIndex, cr0);
- __ SmiUntag(index, index);
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
- __ SmiTag(index, index);
- }
-
- __ SmiUntag(value);
- __ addi(ip, string, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ SmiToShortArrayOffset(r0, index);
- __ sthx(value, MemOperand(ip, r0));
- context()->Plug(string);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3156,8 +2866,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
Label index_out_of_range;
Label done;
StringCharCodeAtGenerator generator(object, index, result, &need_conversion,
- &need_conversion, &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
+ &need_conversion, &index_out_of_range);
generator.GenerateFast(masm_);
__ b(&done);
@@ -3181,48 +2890,6 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = r4;
- Register index = r3;
- Register scratch = r6;
- Register result = r3;
-
- PopOperand(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharAtGenerator generator(object, index, scratch, result,
- &need_conversion, &need_conversion,
- &index_out_of_range, STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ b(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ LoadRoot(result, Heap::kempty_stringRootIndex);
- __ b(&done);
-
- __ bind(&need_conversion);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ LoadSmiLiteral(result, Smi::FromInt(0));
- __ b(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
-
void FullCodeGenerator::EmitCall(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_LE(2, args->length());
@@ -3230,7 +2897,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
// Move target to r4.
int const argc = args->length() - 2;
__ LoadP(r4, MemOperand(sp, (argc + 1) * kPointerSize));
@@ -3238,8 +2905,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
__ mov(r3, Operand(argc));
__ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(argc + 1);
- // Restore context register.
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
// Discard the function left on TOS.
context()->DropAndPlug(1, r3);
}
@@ -3291,12 +2957,6 @@ void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
context()->Plug(r3);
}
-void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
- DCHECK_EQ(0, expr->arguments()->length());
- __ LoadNativeContextSlot(Context::ORDINARY_HAS_INSTANCE_INDEX, r3);
- context()->Plug(r3);
-}
-
void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
ExternalReference debug_is_active =
@@ -3316,7 +2976,8 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
Label runtime, done;
- __ Allocate(JSIteratorResult::kSize, r3, r5, r6, &runtime, TAG_OBJECT);
+ __ Allocate(JSIteratorResult::kSize, r3, r5, r6, &runtime,
+ NO_ALLOCATION_FLAGS);
__ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r4);
__ Pop(r5, r6);
__ LoadRoot(r7, Heap::kEmptyFixedArrayRootIndex);
@@ -3357,9 +3018,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
-
- // Restore context register.
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
}
@@ -3381,7 +3040,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode but
// "delete this" is allowed.
- bool is_this = var->HasThisName(isolate());
+ bool is_this = var->is_this();
DCHECK(is_sloppy(language_mode()) || is_this);
if (var->IsUnallocatedOrGlobalSlot()) {
__ LoadGlobalObject(r5);
@@ -3439,12 +3098,14 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
&materialize_true, &materialize_true);
if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
__ bind(&materialize_true);
- PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->MaterializeTrueId(),
+ BailoutState::NO_REGISTERS);
__ LoadRoot(r3, Heap::kTrueValueRootIndex);
if (context()->IsStackValue()) __ push(r3);
__ b(&done);
__ bind(&materialize_false);
- PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->MaterializeFalseId(),
+ BailoutState::NO_REGISTERS);
__ LoadRoot(r3, Heap::kFalseValueRootIndex);
if (context()->IsStackValue()) __ push(r3);
__ bind(&done);
@@ -3503,25 +3164,23 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
prop->obj()->AsSuperPropertyReference()->home_object());
- PushOperand(result_register());
const Register scratch = r4;
- __ LoadP(scratch, MemOperand(sp, kPointerSize));
- PushOperands(scratch, result_register());
+ __ LoadP(scratch, MemOperand(sp, 0)); // this
+ PushOperands(result_register(), scratch, result_register());
EmitNamedSuperPropertyLoad(prop);
break;
}
case KEYED_SUPER_PROPERTY: {
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
+ VisitForStackValue(
prop->obj()->AsSuperPropertyReference()->home_object());
- const Register scratch = r4;
- const Register scratch1 = r5;
- __ mr(scratch, result_register());
VisitForAccumulatorValue(prop->key());
- PushOperands(scratch, result_register());
- __ LoadP(scratch1, MemOperand(sp, 2 * kPointerSize));
- PushOperands(scratch1, scratch, result_register());
+ const Register scratch1 = r4;
+ const Register scratch2 = r5;
+ __ LoadP(scratch1, MemOperand(sp, 1 * kPointerSize)); // this
+ __ LoadP(scratch2, MemOperand(sp, 0 * kPointerSize)); // home object
+ PushOperands(result_register(), scratch1, scratch2, result_register());
EmitKeyedSuperPropertyLoad(prop);
break;
}
@@ -3544,9 +3203,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// We need a second deoptimization point after loading the value
// in case evaluating the property load my have a side effect.
if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), TOS_REG);
+ PrepareForBailout(expr->expression(), BailoutState::TOS_REGISTER);
} else {
- PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+ PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
}
// Inline smi case if we are in a loop.
@@ -3596,9 +3255,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
// Convert old value into a number.
- ToNumberStub convert_stub(isolate());
- __ CallStub(&convert_stub);
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ __ Call(isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+ RestoreContext();
+ PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -3645,7 +3304,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
EffectContext context(this);
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN, expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(),
+ BailoutState::TOS_REGISTER);
context.Plug(r3);
}
// For all contexts except EffectConstant We have the result on
@@ -3656,7 +3316,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN, expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(),
+ BailoutState::TOS_REGISTER);
context()->Plug(r3);
}
break;
@@ -3666,7 +3327,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
PopOperand(StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(expr->CountSlot());
CallStoreIC();
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3678,6 +3339,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case NAMED_SUPER_PROPERTY: {
EmitNamedSuperPropertyStore(prop);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3689,6 +3351,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case KEYED_SUPER_PROPERTY: {
EmitKeyedSuperPropertyStore(prop);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3705,7 +3368,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
EmitLoadStoreICSlot(expr->CountSlot());
CallIC(ic);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3804,7 +3467,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
- SetExpressionPosition(expr);
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
@@ -3824,7 +3486,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- CallRuntimeWithOperands(Runtime::kHasProperty);
+ SetExpressionPosition(expr);
+ EmitHasProperty();
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(r3, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
@@ -3832,6 +3495,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::INSTANCEOF: {
VisitForAccumulatorValue(expr->right());
+ SetExpressionPosition(expr);
PopOperand(r4);
InstanceOfStub stub(isolate());
__ CallStub(&stub);
@@ -3843,6 +3507,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
+ SetExpressionPosition(expr);
Condition cond = CompareIC::ComputeCondition(op);
PopOperand(r4);
@@ -3924,7 +3589,7 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- Scope* closure_scope = scope()->ClosureScope();
+ DeclarationScope* closure_scope = scope()->GetClosureScope();
if (closure_scope->is_script_scope() ||
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
diff --git a/deps/v8/src/full-codegen/s390/OWNERS b/deps/v8/src/full-codegen/s390/OWNERS
index eb007cb908..752e8e3d81 100644
--- a/deps/v8/src/full-codegen/s390/OWNERS
+++ b/deps/v8/src/full-codegen/s390/OWNERS
@@ -3,3 +3,4 @@ dstence@us.ibm.com
joransiu@ca.ibm.com
mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/deps/v8/src/full-codegen/s390/full-codegen-s390.cc b/deps/v8/src/full-codegen/s390/full-codegen-s390.cc
index 88bec4cab6..003c9312e4 100644
--- a/deps/v8/src/full-codegen/s390/full-codegen-s390.cc
+++ b/deps/v8/src/full-codegen/s390/full-codegen-s390.cc
@@ -187,22 +187,19 @@ void FullCodeGenerator::Generate() {
__ push(r3);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext);
- PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
+ PrepareForBailoutForId(BailoutId::ScriptContext(),
+ BailoutState::TOS_REGISTER);
// The new target value is not used, clobbering is safe.
DCHECK_NULL(info->scope()->new_target_var());
} else {
if (info->scope()->new_target_var() != nullptr) {
__ push(r5); // Preserve new target.
}
- if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
- __ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
- need_write_barrier = false;
- } else {
- __ push(r3);
- __ CallRuntime(Runtime::kNewFunctionContext);
- }
+ FastNewFunctionContextStub stub(isolate());
+ __ mov(FastNewFunctionContextDescriptor::SlotsRegister(), Operand(slots));
+ __ CallStub(&stub);
+ // Result of FastNewFunctionContextStub is always in new space.
+ need_write_barrier = false;
if (info->scope()->new_target_var() != nullptr) {
__ pop(r5); // Preserve new target.
}
@@ -216,7 +213,8 @@ void FullCodeGenerator::Generate() {
int num_parameters = info->scope()->num_parameters();
int first_parameter = info->scope()->has_this_declaration() ? -1 : 0;
for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+ Variable* var =
+ (i == -1) ? info->scope()->receiver() : info->scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -243,11 +241,12 @@ void FullCodeGenerator::Generate() {
// Register holding this function and new target are both trashed in case we
// bailout here. But since that can happen only when new target is not used
// and we allocate a context, the value of |function_in_register| is correct.
- PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::FunctionContext(),
+ BailoutState::NO_REGISTERS);
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
- Variable* this_function_var = scope()->this_function_var();
+ Variable* this_function_var = info->scope()->this_function_var();
if (this_function_var != nullptr) {
Comment cmnt(masm_, "[ This function");
if (!function_in_register_r3) {
@@ -258,7 +257,7 @@ void FullCodeGenerator::Generate() {
}
// Possibly set up a local binding to the new target value.
- Variable* new_target_var = scope()->new_target_var();
+ Variable* new_target_var = info->scope()->new_target_var();
if (new_target_var != nullptr) {
Comment cmnt(masm_, "[ new.target");
SetVar(new_target_var, r5, r2, r4);
@@ -266,7 +265,7 @@ void FullCodeGenerator::Generate() {
// Possibly allocate RestParameters
int rest_index;
- Variable* rest_param = scope()->rest_parameter(&rest_index);
+ Variable* rest_param = info->scope()->rest_parameter(&rest_index);
if (rest_param) {
Comment cmnt(masm_, "[ Allocate rest parameter array");
@@ -280,7 +279,7 @@ void FullCodeGenerator::Generate() {
SetVar(rest_param, r2, r3, r4);
}
- Variable* arguments = scope()->arguments();
+ Variable* arguments = info->scope()->arguments();
if (arguments != NULL) {
// Function uses arguments object.
Comment cmnt(masm_, "[ Allocate arguments object");
@@ -307,7 +306,8 @@ void FullCodeGenerator::Generate() {
}
// Visit the declarations and body.
- PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::FunctionEntry(),
+ BailoutState::NO_REGISTERS);
{
Comment cmnt(masm_, "[ Declarations");
VisitDeclarations(scope()->declarations());
@@ -320,7 +320,8 @@ void FullCodeGenerator::Generate() {
{
Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::Declarations(),
+ BailoutState::NO_REGISTERS);
Label ok;
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ CmpLogicalP(sp, ip);
@@ -392,11 +393,11 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
EmitProfilingCounterReset();
__ bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR
// entry becomes the target of a bailout. We don't expect it to be, but
// we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->OsrEntryId(), BailoutState::NO_REGISTERS);
}
void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
@@ -442,8 +443,6 @@ void FullCodeGenerator::EmitReturnSequence() {
// Make sure that the constant pool is not emitted inside of the return
// sequence.
{
- // Here we use masm_-> instead of the __ macro to avoid the code coverage
- // tool from instrumenting as we rely on the code size here.
int32_t arg_count = info_->scope()->num_parameters() + 1;
int32_t sp_delta = arg_count * kPointerSize;
SetReturnPosition(literal());
@@ -454,6 +453,10 @@ void FullCodeGenerator::EmitReturnSequence() {
}
}
+void FullCodeGenerator::RestoreContext() {
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
codegen()->GetVar(result_register(), var);
@@ -504,10 +507,12 @@ void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
false_label_);
- DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
- if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+ DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
+ !lit->IsUndetectable());
+ if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
+ lit->IsFalse(isolate())) {
if (false_label_ != fall_through_) __ b(false_label_);
- } else if (lit->IsTrue() || lit->IsJSObject()) {
+ } else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
if (true_label_ != fall_through_) __ b(true_label_);
} else if (lit->IsString()) {
if (String::cast(*lit)->length() == 0) {
@@ -670,7 +675,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
Label skip;
if (should_normalize) __ b(&skip);
- PrepareForBailout(expr, TOS_REG);
+ PrepareForBailout(expr, BailoutState::TOS_REGISTER);
if (should_normalize) {
__ CompareRoot(r2, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, NULL);
@@ -694,26 +699,21 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
void FullCodeGenerator::VisitVariableDeclaration(
VariableDeclaration* declaration) {
- // If it was not possible to allocate the variable at compile time, we
- // need to "declare" it at runtime to make sure it actually exists in the
- // local context.
VariableProxy* proxy = declaration->proxy();
- VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED:
- globals_->Add(variable->name(), zone());
- globals_->Add(variable->binding_needs_init()
- ? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value(),
- zone());
+ case VariableLocation::UNALLOCATED: {
+ DCHECK(!variable->binding_needs_init());
+ FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+ globals_->Add(isolate()->factory()->undefined_value(), zone());
break;
-
+ }
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL:
- if (hole_init) {
+ if (variable->binding_needs_init()) {
Comment cmnt(masm_, "[ VariableDeclaration");
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ StoreP(ip, StackOperand(variable));
@@ -721,35 +721,29 @@ void FullCodeGenerator::VisitVariableDeclaration(
break;
case VariableLocation::CONTEXT:
- if (hole_init) {
+ if (variable->binding_needs_init()) {
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ StoreP(ip, ContextMemOperand(cp, variable->index()));
// No write barrier since the_hole_value is in old space.
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
}
break;
case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ VariableDeclaration");
+ DCHECK_EQ(VAR, variable->mode());
+ DCHECK(!variable->binding_needs_init());
__ mov(r4, Operand(variable->name()));
- // Declaration nodes are always introduced in one of four modes.
- DCHECK(IsDeclaredVariableMode(mode));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (hole_init) {
- __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
- } else {
- __ LoadSmiLiteral(r2, Smi::FromInt(0)); // Indicates no initial value.
- }
- __ Push(r4, r2);
- __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- __ CallRuntime(Runtime::kDeclareLookupSlot);
+ __ Push(r4);
+ __ CallRuntime(Runtime::kDeclareEvalVar);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
+
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
}
@@ -760,7 +754,9 @@ void FullCodeGenerator::VisitFunctionDeclaration(
switch (variable->location()) {
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
- globals_->Add(variable->name(), zone());
+ FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
Handle<SharedFunctionInfo> function =
Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
// Check for stack-overflow exception.
@@ -787,7 +783,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
__ RecordWriteContextSlot(cp, offset, result_register(), r4,
kLRHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
@@ -797,10 +793,13 @@ void FullCodeGenerator::VisitFunctionDeclaration(
PushOperand(r4);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
+ CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
+
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
}
@@ -808,18 +807,12 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ mov(r3, Operand(pairs));
__ LoadSmiLiteral(r2, Smi::FromInt(DeclareGlobalsFlags()));
- __ Push(r3, r2);
+ __ EmitLoadTypeFeedbackVector(r4);
+ __ Push(r3, r2, r4);
__ CallRuntime(Runtime::kDeclareGlobals);
// Return value is ignored.
}
-void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
- // Call the runtime to declare the modules.
- __ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules);
- // Return value is ignored.
-}
-
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
@@ -827,7 +820,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Keep the switch value on the stack until a case matches.
VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
ZoneList<CaseClause*>* clauses = stmt->cases();
CaseClause* default_clause = NULL; // Can occur anywhere in the list.
@@ -877,7 +870,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Label skip;
__ b(&skip);
- PrepareForBailout(clause, TOS_REG);
+ PrepareForBailout(clause, BailoutState::TOS_REGISTER);
__ CompareRoot(r2, Heap::kTrueValueRootIndex);
__ bne(&next_test);
__ Drop(1);
@@ -905,12 +898,12 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ Case body");
CaseClause* clause = clauses->at(i);
__ bind(clause->body_target());
- PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(clause->EntryId(), BailoutState::NO_REGISTERS);
VisitStatements(clause->statements());
}
__ bind(nested_statement.break_label());
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
}
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
@@ -941,16 +934,15 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&convert);
ToObjectStub stub(isolate());
__ CallStub(&stub);
+ RestoreContext();
__ bind(&done_convert);
- PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
+ PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
__ push(r2);
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- // Note: Proxies never have an enum cache, so will always take the
- // slow path.
+ // Check cache validity in generated code. If we cannot guarantee cache
+ // validity, call the runtime system to check cache validity or get the
+ // property names in a fixed array. Note: Proxies never have an enum cache,
+ // so will always take the slow path.
Label call_runtime;
__ CheckEnumCache(&call_runtime);
@@ -964,7 +956,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&call_runtime);
__ push(r2); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kForInEnumerate);
- PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
+ PrepareForBailoutForId(stmt->EnumId(), BailoutState::TOS_REGISTER);
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
@@ -1005,7 +997,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(r3, r2); // Smi and array
__ LoadP(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
__ Push(r3); // Fixed array length (as smi).
- PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
__ LoadSmiLiteral(r2, Smi::FromInt(0));
__ Push(r2); // Initial index.
@@ -1049,7 +1041,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// just skip it.
__ Push(r3, r5); // Enumerable and current entry.
__ CallRuntime(Runtime::kForInFilter);
- PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
+ PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
__ LoadRR(r5, r2);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ CmpP(r2, r0);
@@ -1063,11 +1055,11 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
{
EffectContext context(this);
EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
- PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->AssignmentId(), BailoutState::NO_REGISTERS);
}
// Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
@@ -1086,7 +1078,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
DropOperands(5);
// Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
__ bind(&exit);
decrement_loop_depth();
}
@@ -1123,42 +1115,19 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
Register next = r3;
Register temp = r4;
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_sloppy_eval()) {
- // Check that extension is "the hole".
- __ LoadP(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
- __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
- }
- // Load next context in chain.
- __ LoadP(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering cp.
- current = next;
- }
- // If no outer scope calls eval, we do not need to check more
- // context extensions.
- if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s->is_eval_scope()) {
- Label loop, fast;
- if (!current.is(next)) {
- __ Move(next, current);
- }
- __ bind(&loop);
- // Terminate at native context.
- __ LoadP(temp, FieldMemOperand(next, HeapObject::kMapOffset));
- __ CompareRoot(temp, Heap::kNativeContextMapRootIndex);
- __ beq(&fast, Label::kNear);
- // Check that extension is "the hole".
- __ LoadP(temp, ContextMemOperand(next, Context::EXTENSION_INDEX));
- __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
+ int to_check = scope()->ContextChainLengthUntilOutermostSloppyEval();
+ for (Scope* s = scope(); to_check > 0; s = s->outer_scope()) {
+ if (!s->NeedsContext()) continue;
+ if (s->calls_sloppy_eval()) {
+ // Check that extension is "the hole".
+ __ LoadP(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
+ __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
+ }
// Load next context in chain.
- __ LoadP(next, ContextMemOperand(next, Context::PREVIOUS_INDEX));
- __ b(&loop);
- __ bind(&fast);
+ __ LoadP(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering cp.
+ current = next;
+ to_check--;
}
// All extension objects were empty and it is safe to use a normal global
@@ -1210,39 +1179,35 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ LoadP(r2, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET || local->mode() == CONST ||
- local->mode() == CONST_LEGACY) {
+ if (local->binding_needs_init()) {
__ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
__ bne(done);
- if (local->mode() == CONST_LEGACY) {
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- } else { // LET || CONST
- __ mov(r2, Operand(var->name()));
- __ push(r2);
- __ CallRuntime(Runtime::kThrowReferenceError);
- }
+ __ mov(r2, Operand(var->name()));
+ __ push(r2);
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ } else {
+ __ b(done);
}
- __ b(done);
}
}
void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
+#ifdef DEBUG
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- __ LoadGlobalObject(LoadDescriptor::ReceiverRegister());
- __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
- __ mov(LoadDescriptor::SlotRegister(),
+#endif
+ __ mov(LoadGlobalDescriptor::SlotRegister(),
Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
- CallLoadIC(typeof_mode);
+ CallLoadGlobalIC(typeof_mode);
}
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
// Record position before possible IC call.
SetExpressionPosition(proxy);
- PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
+ PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
Variable* var = proxy->var();
// Three cases: global variables, lookup variables, and all other types of
@@ -1263,22 +1228,15 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
: "[ Stack variable");
if (NeedsHoleCheckForLoad(proxy)) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
Label done;
- // Let and const need a read barrier.
GetVar(r2, var);
__ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
__ bne(&done);
- if (var->mode() == LET || var->mode() == CONST) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- __ mov(r2, Operand(var->name()));
- __ push(r2);
- __ CallRuntime(Runtime::kThrowReferenceError);
- } else {
- // Uninitialized legacy const bindings are unholed.
- DCHECK(var->mode() == CONST_LEGACY);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- }
+ __ mov(r2, Operand(var->name()));
+ __ push(r2);
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&done);
context()->Plug(r2);
break;
@@ -1302,19 +1260,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
__ CallRuntime(function_id);
__ bind(&done);
context()->Plug(r2);
+ break;
}
- }
-}
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExpLiteral");
- __ LoadP(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ LoadSmiLiteral(r4, Smi::FromInt(expr->literal_index()));
- __ mov(r3, Operand(expr->pattern()));
- __ LoadSmiLiteral(r2, Smi::FromInt(expr->flags()));
- FastCloneRegExpStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(r2);
+ case VariableLocation::MODULE:
+ UNREACHABLE();
+ }
}
void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
@@ -1348,8 +1299,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
__ CallStub(&stub);
+ RestoreContext();
}
- PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+ PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
// If result_saved is true the result is on top of the stack. If
// result_saved is false the result is in r2.
@@ -1377,7 +1329,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::COMPUTED:
// It is safe to use [[Put]] here because the boilerplate already
// contains computed properties with an uninitialized value.
- if (key->value()->IsInternalizedString()) {
+ if (key->IsStringLiteral()) {
+ DCHECK(key->IsPropertyName());
if (property->emit_store()) {
VisitForAccumulatorValue(value);
DCHECK(StoreDescriptor::ValueRegister().is(r2));
@@ -1385,7 +1338,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
EmitLoadStoreICSlot(property->GetSlot(0));
CallStoreIC();
- PrepareForBailoutForId(key->id(), NO_REGISTERS);
+ PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1419,16 +1372,20 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- NO_REGISTERS);
+ BailoutState::NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->getter = property;
+ AccessorTable::Iterator it = accessor_table.lookup(key);
+ it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->setter = property;
+ AccessorTable::Iterator it = accessor_table.lookup(key);
+ it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->setter = property;
}
break;
}
@@ -1446,6 +1403,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ LoadSmiLiteral(r2, Smi::FromInt(NONE));
PushOperand(r2);
CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
+ PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1475,7 +1433,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- NO_REGISTERS);
+ BailoutState::NO_REGISTERS);
} else {
EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
VisitForStackValue(value);
@@ -1491,6 +1449,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PushOperand(Smi::FromInt(NONE));
PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ BailoutState::NO_REGISTERS);
} else {
DropOperands(3);
}
@@ -1547,7 +1507,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
}
- PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+ PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
bool result_saved = false; // Is the result saved to the stack?
ZoneList<Expression*>* subexprs = expr->values();
@@ -1577,7 +1537,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
- PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+ PrepareForBailoutForId(expr->GetIdForElement(array_index),
+ BailoutState::NO_REGISTERS);
}
// In case the array literal contains spread expressions it has two parts. The
@@ -1597,7 +1558,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForStackValue(subexpr);
CallRuntimeWithOperands(Runtime::kAppendElement);
- PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+ PrepareForBailoutForId(expr->GetIdForElement(array_index),
+ BailoutState::NO_REGISTERS);
}
if (result_saved) {
@@ -1611,7 +1573,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
Comment cmnt(masm_, "[ Assignment");
- SetExpressionPosition(expr, INSERT_BREAK);
Property* property = expr->target()->AsProperty();
LhsKind assign_type = Property::GetAssignType(property);
@@ -1643,18 +1604,18 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
break;
case KEYED_SUPER_PROPERTY: {
- const Register scratch = r3;
VisitForStackValue(
property->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
+ VisitForStackValue(
property->obj()->AsSuperPropertyReference()->home_object());
- __ LoadRR(scratch, result_register());
VisitForAccumulatorValue(property->key());
- PushOperands(scratch, result_register());
+ PushOperand(result_register());
if (expr->is_compound()) {
const Register scratch1 = r4;
+ const Register scratch2 = r3;
__ LoadP(scratch1, MemOperand(sp, 2 * kPointerSize));
- PushOperands(scratch1, scratch, result_register());
+ __ LoadP(scratch2, MemOperand(sp, 1 * kPointerSize));
+ PushOperands(scratch1, scratch2, result_register());
}
break;
}
@@ -1680,23 +1641,27 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy());
- PrepareForBailout(expr->target(), TOS_REG);
+ PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
case NAMED_SUPER_PROPERTY:
EmitNamedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
case KEYED_SUPER_PROPERTY:
EmitKeyedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
}
}
@@ -1714,7 +1679,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
// Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), TOS_REG);
+ PrepareForBailout(expr->binary_operation(), BailoutState::TOS_REGISTER);
} else {
VisitForAccumulatorValue(expr->value());
}
@@ -1726,7 +1691,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
expr->op(), expr->AssignmentSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(r2);
break;
case NAMED_PROPERTY:
@@ -1754,21 +1719,27 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// this. It stays on the stack while we update the iterator.
VisitForStackValue(expr->expression());
- Label suspend, continuation, post_runtime, resume;
+ Label suspend, continuation, post_runtime, resume, exception;
__ b(&suspend);
__ bind(&continuation);
- // When we arrive here, the stack top is the resume mode and
- // result_register() holds the input value (the argument given to the
- // respective resume operation).
+ // When we arrive here, r2 holds the generator object.
__ RecordGeneratorContinuation();
- __ pop(r3);
- __ CmpSmiLiteral(r3, Smi::FromInt(JSGeneratorObject::RETURN), r0);
- __ bne(&resume);
- __ push(result_register());
+ __ LoadP(r3, FieldMemOperand(r2, JSGeneratorObject::kResumeModeOffset));
+ __ LoadP(r2, FieldMemOperand(r2, JSGeneratorObject::kInputOrDebugPosOffset));
+ STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
+ STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
+ __ CmpSmiLiteral(r3, Smi::FromInt(JSGeneratorObject::kReturn), r0);
+ __ blt(&resume);
+ __ Push(result_register());
+ __ bgt(&exception);
EmitCreateIteratorResult(true);
EmitUnwindAndReturn();
+ __ bind(&exception);
+ __ CallRuntime(expr->rethrow_on_exception() ? Runtime::kReThrow
+ : Runtime::kThrow);
+
__ bind(&suspend);
OperandStackDepthIncrement(1); // Not popped on this path.
VisitForAccumulatorValue(expr->generator_object());
@@ -1785,7 +1756,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ beq(&post_runtime);
__ push(r2); // generator object
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
__ bind(&post_runtime);
PopOperand(result_register());
EmitReturnSequence();
@@ -1794,113 +1765,6 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
context()->Plug(result_register());
}
-void FullCodeGenerator::EmitGeneratorResume(
- Expression* generator, Expression* value,
- JSGeneratorObject::ResumeMode resume_mode) {
- // The value stays in r2, and is ultimately read by the resumed generator, as
- // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
- // is read to throw the value when the resumed generator is already closed.
- // r3 will hold the generator object until the activation has been resumed.
- VisitForStackValue(generator);
- VisitForAccumulatorValue(value);
- PopOperand(r3);
-
- // Store input value into generator object.
- __ StoreP(result_register(),
- FieldMemOperand(r3, JSGeneratorObject::kInputOffset), r0);
- __ LoadRR(r4, result_register());
- __ RecordWriteField(r3, JSGeneratorObject::kInputOffset, r4, r5,
- kLRHasBeenSaved, kDontSaveFPRegs);
-
- // Load suspended function and context.
- __ LoadP(cp, FieldMemOperand(r3, JSGeneratorObject::kContextOffset));
- __ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
-
- // Load receiver and store as the first argument.
- __ LoadP(r4, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
- __ push(r4);
-
- // Push holes for arguments to generator function. Since the parser forced
- // context allocation for any variables in generators, the actual argument
- // values have already been copied into the context and these dummy values
- // will never be used.
- __ LoadP(r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
- __ LoadW(
- r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
- __ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
- Label argument_loop, push_frame;
-#if V8_TARGET_ARCH_S390X
- __ CmpP(r5, Operand::Zero());
- __ beq(&push_frame, Label::kNear);
-#else
- __ SmiUntag(r5);
- __ beq(&push_frame, Label::kNear);
-#endif
- __ LoadRR(r0, r5);
- __ bind(&argument_loop);
- __ push(r4);
- __ SubP(r0, Operand(1));
- __ bne(&argument_loop);
-
- // Enter a new JavaScript frame, and initialize its slots as they were when
- // the generator was suspended.
- Label resume_frame, done;
- __ bind(&push_frame);
- __ b(r14, &resume_frame); // brasl
- __ b(&done);
- __ bind(&resume_frame);
- // lr = return address.
- // fp = caller's frame pointer.
- // cp = callee's context,
- // r6 = callee's JS function.
- __ PushStandardFrame(r6);
-
- // Load the operand stack size.
- __ LoadP(r5, FieldMemOperand(r3, JSGeneratorObject::kOperandStackOffset));
- __ LoadP(r5, FieldMemOperand(r5, FixedArray::kLengthOffset));
- __ SmiUntag(r5);
-
- // If we are sending a value and there is no operand stack, we can jump back
- // in directly.
- Label call_resume;
- if (resume_mode == JSGeneratorObject::NEXT) {
- Label slow_resume;
- __ bne(&slow_resume, Label::kNear);
- __ LoadP(ip, FieldMemOperand(r6, JSFunction::kCodeEntryOffset));
- __ LoadP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset));
- __ SmiUntag(r4);
- __ AddP(ip, ip, r4);
- __ LoadSmiLiteral(r4, Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
- __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset));
- __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
- __ Jump(ip);
- __ bind(&slow_resume);
- } else {
- __ beq(&call_resume);
- }
-
- // Otherwise, we push holes for the operand stack and call the runtime to fix
- // up the stack and the handlers.
- Label operand_loop;
- __ LoadRR(r0, r5);
- __ bind(&operand_loop);
- __ push(r4);
- __ SubP(r0, Operand(1));
- __ bne(&operand_loop);
-
- __ bind(&call_resume);
- __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
- DCHECK(!result_register().is(r3));
- __ Push(r3, result_register());
- __ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject);
- // Not reached: the runtime call returns elsewhere.
- __ stop("not-reached");
-
- __ bind(&done);
- context()->Plug(result_register());
-}
-
void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
OperandStackDepthIncrement(2);
__ Push(reg1, reg2);
@@ -1936,7 +1800,8 @@ void FullCodeGenerator::EmitOperandStackDepthCheck() {
void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label allocate, done_allocate;
- __ Allocate(JSIteratorResult::kSize, r2, r4, r5, &allocate, TAG_OBJECT);
+ __ Allocate(JSIteratorResult::kSize, r2, r4, r5, &allocate,
+ NO_ALLOCATION_FLAGS);
__ b(&done_allocate);
__ bind(&allocate);
@@ -2015,14 +1880,14 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
break;
}
case Token::ADD: {
- __ AddAndCheckForOverflow(scratch1, left, right, scratch2, r0);
- __ BranchOnOverflow(&stub_call);
+ __ AddP(scratch1, left, right);
+ __ b(overflow, &stub_call);
__ LoadRR(right, scratch1);
break;
}
case Token::SUB: {
- __ SubAndCheckForOverflow(scratch1, left, right, scratch2, r0);
- __ BranchOnOverflow(&stub_call);
+ __ SubP(scratch1, left, right);
+ __ b(overflow, &stub_call);
__ LoadRR(right, scratch1);
break;
}
@@ -2247,37 +2112,27 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->mode() == LET && op != Token::INIT) {
+ } else if (IsLexicalVariableMode(var->mode()) && op != Token::INIT) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label assign;
- MemOperand location = VarOperand(var, r3);
- __ LoadP(r5, location);
- __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
- __ bne(&assign);
- __ mov(r5, Operand(var->name()));
- __ push(r5);
- __ CallRuntime(Runtime::kThrowReferenceError);
- // Perform the assignment.
- __ bind(&assign);
- EmitStoreToStackLocalOrContextSlot(var, location);
-
- } else if (var->mode() == CONST && op != Token::INIT) {
- // Assignment to const variable needs a write barrier.
- DCHECK(!var->IsLookupSlot());
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label const_error;
MemOperand location = VarOperand(var, r3);
- __ LoadP(r5, location);
- __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
- __ bne(&const_error, Label::kNear);
- __ mov(r5, Operand(var->name()));
- __ push(r5);
- __ CallRuntime(Runtime::kThrowReferenceError);
- __ bind(&const_error);
- __ CallRuntime(Runtime::kThrowConstAssignError);
-
+ // Perform an initialization check for lexically declared variables.
+ if (var->binding_needs_init()) {
+ Label assign;
+ __ LoadP(r5, location);
+ __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
+ __ bne(&assign);
+ __ mov(r5, Operand(var->name()));
+ __ push(r5);
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ __ bind(&assign);
+ }
+ if (var->mode() == CONST) {
+ __ CallRuntime(Runtime::kThrowConstAssignError);
+ } else {
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ }
} else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2292,8 +2147,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() ||
- (var->mode() == CONST && op == Token::INIT)) {
+ } else if (!var->is_const_mode() || op == Token::INIT) {
if (var->IsLookupSlot()) {
// Assignment to var.
__ Push(var->name());
@@ -2314,25 +2168,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
- // Const initializers need a write barrier.
- DCHECK(!var->IsParameter()); // No const parameters.
- if (var->IsLookupSlot()) {
- __ push(r2);
- __ mov(r2, Operand(var->name()));
- __ Push(cp, r2); // Context and name.
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
- } else {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label skip;
- MemOperand location = VarOperand(var, r3);
- __ LoadP(r4, location);
- __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
- __ bne(&skip);
- EmitStoreToStackLocalOrContextSlot(var, location);
- __ bind(&skip);
- }
-
} else {
DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
if (is_strict(language_mode())) {
@@ -2354,7 +2189,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
EmitLoadStoreICSlot(expr->AssignmentSlot());
CallStoreIC();
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(r2);
}
@@ -2396,43 +2231,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
EmitLoadStoreICSlot(expr->AssignmentSlot());
CallIC(ic);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(r2);
-}
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
- Comment cmnt(masm_, "[ Property");
- SetExpressionPosition(expr);
-
- Expression* key = expr->key();
-
- if (key->IsPropertyName()) {
- if (!expr->IsSuperAccess()) {
- VisitForAccumulatorValue(expr->obj());
- __ Move(LoadDescriptor::ReceiverRegister(), r2);
- EmitNamedPropertyLoad(expr);
- } else {
- VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- expr->obj()->AsSuperPropertyReference()->home_object());
- EmitNamedSuperPropertyLoad(expr);
- }
- } else {
- if (!expr->IsSuperAccess()) {
- VisitForStackValue(expr->obj());
- VisitForAccumulatorValue(expr->key());
- __ Move(LoadDescriptor::NameRegister(), r2);
- PopOperand(LoadDescriptor::ReceiverRegister());
- EmitKeyedPropertyLoad(expr);
- } else {
- VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- expr->obj()->AsSuperPropertyReference()->home_object());
- VisitForStackValue(expr->key());
- EmitKeyedSuperPropertyLoad(expr);
- }
- }
- PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(r2);
}
@@ -2451,7 +2250,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
{
StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
- PrepareForBailout(callee, NO_REGISTERS);
+ PrepareForBailout(callee, BailoutState::NO_REGISTERS);
}
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
@@ -2464,7 +2263,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
DCHECK(!callee->AsProperty()->IsSuperAccess());
__ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
EmitNamedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+ BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
__ LoadP(r1, MemOperand(sp, 0));
PushOperand(r1);
@@ -2500,6 +2300,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
CallRuntimeWithOperands(Runtime::kLoadFromSuper);
+ PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
// Replace home_object with target function.
__ StoreP(r2, MemOperand(sp, kPointerSize));
@@ -2522,7 +2323,8 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, Expression* key) {
__ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
__ Move(LoadDescriptor::NameRegister(), r2);
EmitKeyedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+ BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
__ LoadP(ip, MemOperand(sp, 0));
@@ -2555,6 +2357,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
+ PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
// Replace home_object with target function.
__ StoreP(r2, MemOperand(sp, kPointerSize));
@@ -2573,7 +2376,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
VisitForStackValue(args->at(i));
}
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
SetCallPosition(expr, expr->tail_call_mode());
if (expr->tail_call_mode() == TailCallMode::kAllow) {
if (FLAG_trace) {
@@ -2594,12 +2397,12 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
- // Restore context register.
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
context()->DropAndPlug(1, r2);
}
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
+ int arg_count = expr->arguments()->length();
// r6: copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
__ LoadP(r6, MemOperand(sp, arg_count * kPointerSize), r0);
@@ -2616,8 +2419,11 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// r3: the start position of the scope the calls resides in.
__ LoadSmiLiteral(r3, Smi::FromInt(scope()->start_position()));
+ // r2: the source position of the eval call.
+ __ LoadSmiLiteral(r2, Smi::FromInt(expr->position()));
+
// Do the runtime call.
- __ Push(r6, r5, r4, r3);
+ __ Push(r6, r5, r4, r3, r2);
__ CallRuntime(Runtime::kResolvePossiblyDirectEval);
}
@@ -2637,7 +2443,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
__ Push(callee->name());
__ CallRuntime(Runtime::kLoadLookupSlotForCall);
PushOperands(r2, r3); // Function, receiver.
- PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
// If fast case code has been generated, emit code to push the function
// and receiver and have the slow path jump around this code.
@@ -2663,9 +2469,9 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
}
void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
- // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
- // to resolve the function we need to call. Then we call the resolved
- // function using the given arguments.
+ // In a call to eval, we first call
+ // Runtime_ResolvePossiblyDirectEval to resolve the function we need
+ // to call. Then we call the resolved function using the given arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2680,12 +2486,12 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
// resolve eval.
__ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
__ push(r3);
- EmitResolvePossiblyDirectEval(arg_count);
+ EmitResolvePossiblyDirectEval(expr);
// Touch up the stack with the resolved function.
__ StoreP(r2, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
// Record source position for debugger.
SetCallPosition(expr);
@@ -2696,8 +2502,7 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
- // Restore context register.
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
context()->DropAndPlug(1, r2);
}
@@ -2735,9 +2540,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
CallConstructStub stub(isolate());
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
- PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
- // Restore context register.
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
+ RestoreContext();
context()->Plug(r2);
}
@@ -2779,9 +2583,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
-
- // Restore context register.
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
context()->Plug(r2);
}
@@ -2962,85 +2764,6 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
context()->Plug(r2);
}
-void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Label done;
- // If the object is a smi return the object.
- __ JumpIfSmi(r2, &done);
- // If the object is not a value type, return the object.
- __ CompareObjectType(r2, r3, r3, JS_VALUE_TYPE);
- __ bne(&done, Label::kNear);
- __ LoadP(r2, FieldMemOperand(r2, JSValue::kValueOffset));
-
- __ bind(&done);
- context()->Plug(r2);
-}
-
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(3, args->length());
-
- Register string = r2;
- Register index = r3;
- Register value = r4;
-
- VisitForStackValue(args->at(0)); // index
- VisitForStackValue(args->at(1)); // value
- VisitForAccumulatorValue(args->at(2)); // string
- PopOperands(index, value);
-
- if (FLAG_debug_code) {
- __ TestIfSmi(value);
- __ Check(eq, kNonSmiValue, cr0);
- __ TestIfSmi(index);
- __ Check(eq, kNonSmiIndex, cr0);
- __ SmiUntag(index);
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
- __ SmiTag(index);
- }
-
- __ SmiUntag(value);
- __ AddP(ip, string, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ SmiToByteArrayOffset(r1, index);
- __ StoreByte(value, MemOperand(ip, r1));
- context()->Plug(string);
-}
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(3, args->length());
-
- Register string = r2;
- Register index = r3;
- Register value = r4;
-
- VisitForStackValue(args->at(0)); // index
- VisitForStackValue(args->at(1)); // value
- VisitForAccumulatorValue(args->at(2)); // string
- PopOperands(index, value);
-
- if (FLAG_debug_code) {
- __ TestIfSmi(value);
- __ Check(eq, kNonSmiValue, cr0);
- __ TestIfSmi(index);
- __ Check(eq, kNonSmiIndex, cr0);
- __ SmiUntag(index, index);
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
- __ SmiTag(index, index);
- }
-
- __ SmiUntag(value);
- __ SmiToShortArrayOffset(r1, index);
- __ StoreHalfWord(value, MemOperand(r1, string, SeqTwoByteString::kHeaderSize -
- kHeapObjectTag));
- context()->Plug(string);
-}
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3074,8 +2797,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
Label index_out_of_range;
Label done;
StringCharCodeAtGenerator generator(object, index, result, &need_conversion,
- &need_conversion, &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
+ &need_conversion, &index_out_of_range);
generator.GenerateFast(masm_);
__ b(&done);
@@ -3098,47 +2820,6 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
context()->Plug(result);
}
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = r3;
- Register index = r2;
- Register scratch = r5;
- Register result = r2;
-
- PopOperand(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharAtGenerator generator(object, index, scratch, result,
- &need_conversion, &need_conversion,
- &index_out_of_range, STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ b(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ LoadRoot(result, Heap::kempty_stringRootIndex);
- __ b(&done);
-
- __ bind(&need_conversion);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ LoadSmiLiteral(result, Smi::FromInt(0));
- __ b(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
void FullCodeGenerator::EmitCall(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_LE(2, args->length());
@@ -3146,7 +2827,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
// Move target to r3.
int const argc = args->length() - 2;
__ LoadP(r3, MemOperand(sp, (argc + 1) * kPointerSize));
@@ -3154,8 +2835,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
__ mov(r2, Operand(argc));
__ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(argc + 1);
- // Restore context register.
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
// Discard the function left on TOS.
context()->DropAndPlug(1, r2);
}
@@ -3202,12 +2882,6 @@ void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
context()->Plug(r2);
}
-void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
- DCHECK_EQ(0, expr->arguments()->length());
- __ LoadNativeContextSlot(Context::ORDINARY_HAS_INSTANCE_INDEX, r2);
- context()->Plug(r2);
-}
-
void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
ExternalReference debug_is_active =
@@ -3226,7 +2900,8 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
Label runtime, done;
- __ Allocate(JSIteratorResult::kSize, r2, r4, r5, &runtime, TAG_OBJECT);
+ __ Allocate(JSIteratorResult::kSize, r2, r4, r5, &runtime,
+ NO_ALLOCATION_FLAGS);
__ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r3);
__ Pop(r4, r5);
__ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
@@ -3265,9 +2940,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
-
- // Restore context register.
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
}
void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
@@ -3288,7 +2961,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode but
// "delete this" is allowed.
- bool is_this = var->HasThisName(isolate());
+ bool is_this = var->is_this();
DCHECK(is_sloppy(language_mode()) || is_this);
if (var->IsUnallocatedOrGlobalSlot()) {
__ LoadGlobalObject(r4);
@@ -3346,12 +3019,14 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
&materialize_true, &materialize_true);
if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
__ bind(&materialize_true);
- PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->MaterializeTrueId(),
+ BailoutState::NO_REGISTERS);
__ LoadRoot(r2, Heap::kTrueValueRootIndex);
if (context()->IsStackValue()) __ push(r2);
__ b(&done);
__ bind(&materialize_false);
- PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->MaterializeFalseId(),
+ BailoutState::NO_REGISTERS);
__ LoadRoot(r2, Heap::kFalseValueRootIndex);
if (context()->IsStackValue()) __ push(r2);
__ bind(&done);
@@ -3409,25 +3084,23 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
prop->obj()->AsSuperPropertyReference()->home_object());
- PushOperand(result_register());
const Register scratch = r3;
- __ LoadP(scratch, MemOperand(sp, kPointerSize));
- PushOperands(scratch, result_register());
+ __ LoadP(scratch, MemOperand(sp, 0)); // this
+ PushOperands(result_register(), scratch, result_register());
EmitNamedSuperPropertyLoad(prop);
break;
}
case KEYED_SUPER_PROPERTY: {
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
+ VisitForStackValue(
prop->obj()->AsSuperPropertyReference()->home_object());
- const Register scratch = r3;
- const Register scratch1 = r4;
- __ LoadRR(scratch, result_register());
VisitForAccumulatorValue(prop->key());
- PushOperands(scratch, result_register());
- __ LoadP(scratch1, MemOperand(sp, 2 * kPointerSize));
- PushOperands(scratch1, scratch, result_register());
+ const Register scratch1 = r3;
+ const Register scratch2 = r4;
+ __ LoadP(scratch1, MemOperand(sp, 1 * kPointerSize)); // this
+ __ LoadP(scratch2, MemOperand(sp, 0 * kPointerSize)); // home object
+ PushOperands(result_register(), scratch1, scratch2, result_register());
EmitKeyedSuperPropertyLoad(prop);
break;
}
@@ -3450,9 +3123,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// We need a second deoptimization point after loading the value
// in case evaluating the property load my have a side effect.
if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), TOS_REG);
+ PrepareForBailout(expr->expression(), BailoutState::TOS_REGISTER);
} else {
- PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+ PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
}
// Inline smi case if we are in a loop.
@@ -3493,18 +3166,18 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Register scratch1 = r3;
Register scratch2 = r4;
__ LoadSmiLiteral(scratch1, Smi::FromInt(count_value));
- __ AddAndCheckForOverflow(r2, r2, scratch1, scratch2, r0);
- __ BranchOnNoOverflow(&done);
+ __ AddP(scratch2, r2, scratch1);
+ __ LoadOnConditionP(nooverflow, r2, scratch2);
+ __ b(nooverflow, &done);
// Call stub. Undo operation first.
- __ SubP(r2, r2, scratch1);
__ b(&stub_call);
__ bind(&slow);
}
// Convert old value into a number.
- ToNumberStub convert_stub(isolate());
- __ CallStub(&convert_stub);
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ __ Call(isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+ RestoreContext();
+ PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -3551,7 +3224,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
EffectContext context(this);
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN, expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(),
+ BailoutState::TOS_REGISTER);
context.Plug(r2);
}
// For all contexts except EffectConstant We have the result on
@@ -3562,7 +3236,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN, expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(),
+ BailoutState::TOS_REGISTER);
context()->Plug(r2);
}
break;
@@ -3572,7 +3247,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
PopOperand(StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(expr->CountSlot());
CallStoreIC();
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3584,6 +3259,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case NAMED_SUPER_PROPERTY: {
EmitNamedSuperPropertyStore(prop);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3595,6 +3271,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case KEYED_SUPER_PROPERTY: {
EmitKeyedSuperPropertyStore(prop);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3611,7 +3288,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
EmitLoadStoreICSlot(expr->CountSlot());
CallIC(ic);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3705,7 +3382,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
- SetExpressionPosition(expr);
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
@@ -3725,7 +3401,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- CallRuntimeWithOperands(Runtime::kHasProperty);
+ SetExpressionPosition(expr);
+ EmitHasProperty();
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(r2, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
@@ -3733,6 +3410,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::INSTANCEOF: {
VisitForAccumulatorValue(expr->right());
+ SetExpressionPosition(expr);
PopOperand(r3);
InstanceOfStub stub(isolate());
__ CallStub(&stub);
@@ -3744,6 +3422,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
+ SetExpressionPosition(expr);
Condition cond = CompareIC::ComputeCondition(op);
PopOperand(r3);
@@ -3819,7 +3498,7 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
}
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- Scope* closure_scope = scope()->ClosureScope();
+ DeclarationScope* closure_scope = scope()->GetClosureScope();
if (closure_scope->is_script_scope() || closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
diff --git a/deps/v8/src/full-codegen/x64/full-codegen-x64.cc b/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
index 992e7fe4f7..4b0e43c9b2 100644
--- a/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
+++ b/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
@@ -91,6 +91,7 @@ class JumpPatchSite BASE_EMBEDDED {
// frames-x64.h for its layout.
void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
+ DCHECK_EQ(scope(), info->scope());
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(literal());
@@ -166,22 +167,19 @@ void FullCodeGenerator::Generate() {
__ Push(rdi);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext);
- PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
+ PrepareForBailoutForId(BailoutId::ScriptContext(),
+ BailoutState::TOS_REGISTER);
// The new target value is not used, clobbering is safe.
DCHECK_NULL(info->scope()->new_target_var());
} else {
if (info->scope()->new_target_var() != nullptr) {
__ Push(rdx); // Preserve new target.
}
- if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
- __ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
- need_write_barrier = false;
- } else {
- __ Push(rdi);
- __ CallRuntime(Runtime::kNewFunctionContext);
- }
+ FastNewFunctionContextStub stub(isolate());
+ __ Set(FastNewFunctionContextDescriptor::SlotsRegister(), slots);
+ __ CallStub(&stub);
+ // Result of FastNewFunctionContextStub is always in new space.
+ need_write_barrier = false;
if (info->scope()->new_target_var() != nullptr) {
__ Pop(rdx); // Restore new target.
}
@@ -196,7 +194,8 @@ void FullCodeGenerator::Generate() {
int num_parameters = info->scope()->num_parameters();
int first_parameter = info->scope()->has_this_declaration() ? -1 : 0;
for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+ Variable* var =
+ (i == -1) ? info->scope()->receiver() : info->scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -222,11 +221,12 @@ void FullCodeGenerator::Generate() {
// Register holding this function and new target are both trashed in case we
// bailout here. But since that can happen only when new target is not used
// and we allocate a context, the value of |function_in_register| is correct.
- PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::FunctionContext(),
+ BailoutState::NO_REGISTERS);
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
- Variable* this_function_var = scope()->this_function_var();
+ Variable* this_function_var = info->scope()->this_function_var();
if (this_function_var != nullptr) {
Comment cmnt(masm_, "[ This function");
if (!function_in_register) {
@@ -237,7 +237,7 @@ void FullCodeGenerator::Generate() {
}
// Possibly set up a local binding to the new target value.
- Variable* new_target_var = scope()->new_target_var();
+ Variable* new_target_var = info->scope()->new_target_var();
if (new_target_var != nullptr) {
Comment cmnt(masm_, "[ new.target");
SetVar(new_target_var, rdx, rbx, rcx);
@@ -245,7 +245,7 @@ void FullCodeGenerator::Generate() {
// Possibly allocate RestParameters
int rest_index;
- Variable* rest_param = scope()->rest_parameter(&rest_index);
+ Variable* rest_param = info->scope()->rest_parameter(&rest_index);
if (rest_param) {
Comment cmnt(masm_, "[ Allocate rest parameter array");
if (!function_in_register) {
@@ -258,7 +258,8 @@ void FullCodeGenerator::Generate() {
}
// Possibly allocate an arguments object.
- Variable* arguments = scope()->arguments();
+ DCHECK_EQ(scope(), info->scope());
+ Variable* arguments = info->scope()->arguments();
if (arguments != NULL) {
// Arguments object must be allocated after the context object, in
// case the "arguments" or ".arguments" variables are in the context.
@@ -286,10 +287,11 @@ void FullCodeGenerator::Generate() {
// Visit the declarations and body unless there is an illegal
// redeclaration.
- PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::FunctionEntry(),
+ BailoutState::NO_REGISTERS);
{
Comment cmnt(masm_, "[ Declarations");
- VisitDeclarations(scope()->declarations());
+ VisitDeclarations(info->scope()->declarations());
}
// Assert that the declarations do not use ICs. Otherwise the debugger
@@ -299,7 +301,8 @@ void FullCodeGenerator::Generate() {
{
Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::Declarations(),
+ BailoutState::NO_REGISTERS);
Label ok;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &ok, Label::kNear);
@@ -372,11 +375,11 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
}
__ bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR
// entry becomes the target of a bailout. We don't expect it to be, but
// we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->OsrEntryId(), BailoutState::NO_REGISTERS);
}
void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
@@ -425,6 +428,9 @@ void FullCodeGenerator::EmitReturnSequence() {
}
}
+void FullCodeGenerator::RestoreContext() {
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+}
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -497,10 +503,12 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
true,
true_label_,
false_label_);
- DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
- if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+ DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
+ !lit->IsUndetectable());
+ if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
+ lit->IsFalse(isolate())) {
if (false_label_ != fall_through_) __ jmp(false_label_);
- } else if (lit->IsTrue() || lit->IsJSObject()) {
+ } else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
if (true_label_ != fall_through_) __ jmp(true_label_);
} else if (lit->IsString()) {
if (String::cast(*lit)->length() == 0) {
@@ -689,7 +697,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
Label skip;
if (should_normalize) __ jmp(&skip, Label::kNear);
- PrepareForBailout(expr, TOS_REG);
+ PrepareForBailout(expr, BailoutState::TOS_REGISTER);
if (should_normalize) {
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
Split(equal, if_true, if_false, NULL);
@@ -714,26 +722,21 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
void FullCodeGenerator::VisitVariableDeclaration(
VariableDeclaration* declaration) {
- // If it was not possible to allocate the variable at compile time, we
- // need to "declare" it at runtime to make sure it actually exists in the
- // local context.
VariableProxy* proxy = declaration->proxy();
- VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED:
- globals_->Add(variable->name(), zone());
- globals_->Add(variable->binding_needs_init()
- ? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value(),
- zone());
+ case VariableLocation::UNALLOCATED: {
+ DCHECK(!variable->binding_needs_init());
+ FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+ globals_->Add(isolate()->factory()->undefined_value(), zone());
break;
-
+ }
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL:
- if (hole_init) {
+ if (variable->binding_needs_init()) {
Comment cmnt(masm_, "[ VariableDeclaration");
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
__ movp(StackOperand(variable), kScratchRegister);
@@ -741,34 +744,28 @@ void FullCodeGenerator::VisitVariableDeclaration(
break;
case VariableLocation::CONTEXT:
- if (hole_init) {
+ if (variable->binding_needs_init()) {
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
__ movp(ContextOperand(rsi, variable->index()), kScratchRegister);
// No write barrier since the hole value is in old space.
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
}
break;
case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ VariableDeclaration");
+ DCHECK_EQ(VAR, variable->mode());
+ DCHECK(!variable->binding_needs_init());
__ Push(variable->name());
- // Declaration nodes are always introduced in one of four modes.
- DCHECK(IsDeclaredVariableMode(mode));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (hole_init) {
- __ PushRoot(Heap::kTheHoleValueRootIndex);
- } else {
- __ Push(Smi::FromInt(0)); // Indicates no initial value.
- }
- __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- __ CallRuntime(Runtime::kDeclareLookupSlot);
+ __ CallRuntime(Runtime::kDeclareEvalVar);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
+
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
}
@@ -780,7 +777,9 @@ void FullCodeGenerator::VisitFunctionDeclaration(
switch (variable->location()) {
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
- globals_->Add(variable->name(), zone());
+ FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
Handle<SharedFunctionInfo> function =
Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
// Check for stack-overflow exception.
@@ -811,7 +810,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
@@ -819,10 +818,13 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Comment cmnt(masm_, "[ FunctionDeclaration");
PushOperand(variable->name());
VisitForStackValue(declaration->fun());
- PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
+ CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
+
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
}
@@ -831,19 +833,13 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ Push(pairs);
__ Push(Smi::FromInt(DeclareGlobalsFlags()));
+ __ EmitLoadTypeFeedbackVector(rax);
+ __ Push(rax);
__ CallRuntime(Runtime::kDeclareGlobals);
// Return value is ignored.
}
-void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
- // Call the runtime to declare the modules.
- __ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules);
- // Return value is ignored.
-}
-
-
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
@@ -851,7 +847,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Keep the switch value on the stack until a case matches.
VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
ZoneList<CaseClause*>* clauses = stmt->cases();
CaseClause* default_clause = NULL; // Can occur anywhere in the list.
@@ -901,7 +897,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Label skip;
__ jmp(&skip, Label::kNear);
- PrepareForBailout(clause, TOS_REG);
+ PrepareForBailout(clause, BailoutState::TOS_REGISTER);
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
__ j(not_equal, &next_test);
__ Drop(1);
@@ -929,12 +925,12 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ Case body");
CaseClause* clause = clauses->at(i);
__ bind(clause->body_target());
- PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(clause->EntryId(), BailoutState::NO_REGISTERS);
VisitStatements(clause->statements());
}
__ bind(nested_statement.break_label());
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
}
@@ -966,16 +962,15 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&convert);
ToObjectStub stub(isolate());
__ CallStub(&stub);
+ RestoreContext();
__ bind(&done_convert);
- PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
+ PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
__ Push(rax);
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- // Note: Proxies never have an enum cache, so will always take the
- // slow path.
+ // Check cache validity in generated code. If we cannot guarantee cache
+ // validity, call the runtime system to check cache validity or get the
+ // property names in a fixed array. Note: Proxies never have an enum cache,
+ // so will always take the slow path.
Label call_runtime;
__ CheckEnumCache(&call_runtime);
@@ -989,7 +984,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&call_runtime);
__ Push(rax); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kForInEnumerate);
- PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
+ PrepareForBailoutForId(stmt->EnumId(), BailoutState::TOS_REGISTER);
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
@@ -1031,7 +1026,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(rax); // Array
__ movp(rax, FieldOperand(rax, FixedArray::kLengthOffset));
__ Push(rax); // Fixed array length (as smi).
- PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
__ Push(Smi::FromInt(0)); // Initial index.
// Generate code for doing the condition check.
@@ -1042,13 +1037,11 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmpp(rax, Operand(rsp, 1 * kPointerSize)); // Compare to the array length.
__ j(above_equal, loop_statement.break_label());
- // Get the current entry of the array into register rbx.
+ // Get the current entry of the array into register rax.
__ movp(rbx, Operand(rsp, 2 * kPointerSize));
SmiIndex index = masm()->SmiToIndex(rax, rax, kPointerSizeLog2);
- __ movp(rbx, FieldOperand(rbx,
- index.reg,
- index.scale,
- FixedArray::kHeaderSize));
+ __ movp(rax,
+ FieldOperand(rbx, index.reg, index.scale, FixedArray::kHeaderSize));
// Get the expected map from the stack or a smi in the
// permanent slow case into register rdx.
@@ -1057,8 +1050,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Check if the expected map still matches that of the enumerable.
// If not, we may have to filter the key.
Label update_each;
- __ movp(rcx, Operand(rsp, 4 * kPointerSize));
- __ cmpp(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movp(rbx, Operand(rsp, 4 * kPointerSize));
+ __ cmpp(rdx, FieldOperand(rbx, HeapObject::kMapOffset));
__ j(equal, &update_each, Label::kNear);
// We need to filter the key, record slow-path here.
@@ -1067,29 +1060,27 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Move(FieldOperand(rdx, FixedArray::OffsetOfElementAt(vector_index)),
TypeFeedbackVector::MegamorphicSentinel(isolate()));
- // Convert the entry to a string or null if it isn't a property
- // anymore. If the property has been removed while iterating, we
- // just skip it.
- __ Push(rcx); // Enumerable.
- __ Push(rbx); // Current entry.
- __ CallRuntime(Runtime::kForInFilter);
- PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(equal, loop_statement.continue_label());
- __ movp(rbx, rax);
+ // rax contains the key. The receiver in rbx is the second argument to the
+ // ForInFilterStub. ForInFilter returns undefined if the receiver doesn't
+ // have the key or returns the name-converted key.
+ ForInFilterStub has_stub(isolate());
+ __ CallStub(&has_stub);
+ RestoreContext();
+ PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
+ __ JumpIfRoot(result_register(), Heap::kUndefinedValueRootIndex,
+ loop_statement.continue_label());
// Update the 'each' property or variable from the possibly filtered
- // entry in register rbx.
+ // entry in register rax.
__ bind(&update_each);
- __ movp(result_register(), rbx);
// Perform the assignment as if via '='.
{ EffectContext context(this);
EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
- PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->AssignmentId(), BailoutState::NO_REGISTERS);
}
// Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
@@ -1106,7 +1097,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
DropOperands(5);
// Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
__ bind(&exit);
decrement_loop_depth();
}
@@ -1145,46 +1136,19 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
Register context = rsi;
Register temp = rdx;
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_sloppy_eval()) {
- // Check that extension is "the hole".
- __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
- Heap::kTheHoleValueRootIndex, slow);
- }
- // Load next context in chain.
- __ movp(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering rsi.
- context = temp;
+ int to_check = scope()->ContextChainLengthUntilOutermostSloppyEval();
+ for (Scope* s = scope(); to_check > 0; s = s->outer_scope()) {
+ if (!s->NeedsContext()) continue;
+ if (s->calls_sloppy_eval()) {
+ // Check that extension is "the hole".
+ __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
+ Heap::kTheHoleValueRootIndex, slow);
}
- // If no outer scope calls eval, we do not need to check more
- // context extensions. If we have reached an eval scope, we check
- // all extensions from this point.
- if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s != NULL && s->is_eval_scope()) {
- // Loop up the context chain. There is no frame effect so it is
- // safe to use raw labels here.
- Label next, fast;
- if (!context.is(temp)) {
- __ movp(temp, context);
- }
- // Load map for comparison into register, outside loop.
- __ LoadRoot(kScratchRegister, Heap::kNativeContextMapRootIndex);
- __ bind(&next);
- // Terminate at native context.
- __ cmpp(kScratchRegister, FieldOperand(temp, HeapObject::kMapOffset));
- __ j(equal, &fast, Label::kNear);
- // Check that extension is "the hole".
- __ JumpIfNotRoot(ContextOperand(temp, Context::EXTENSION_INDEX),
- Heap::kTheHoleValueRootIndex, slow);
// Load next context in chain.
- __ movp(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
- __ jmp(&next);
- __ bind(&fast);
+ __ movp(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering rsi.
+ context = temp;
+ to_check--;
}
// All extension objects were empty and it is safe to use a normal global
@@ -1237,32 +1201,28 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ movp(rax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET || local->mode() == CONST ||
- local->mode() == CONST_LEGACY) {
+ if (local->binding_needs_init()) {
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ j(not_equal, done);
- if (local->mode() == CONST_LEGACY) {
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- } else { // LET || CONST
- __ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError);
- }
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ } else {
+ __ jmp(done);
}
- __ jmp(done);
}
}
void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
+#ifdef DEBUG
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- __ Move(LoadDescriptor::NameRegister(), var->name());
- __ LoadGlobalObject(LoadDescriptor::ReceiverRegister());
- __ Move(LoadDescriptor::SlotRegister(),
+#endif
+ __ Move(LoadGlobalDescriptor::SlotRegister(),
SmiFromSlot(proxy->VariableFeedbackSlot()));
- CallLoadIC(typeof_mode);
+ CallLoadGlobalIC(typeof_mode);
}
@@ -1270,7 +1230,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
// Record position before possible IC call.
SetExpressionPosition(proxy);
- PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
+ PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
Variable* var = proxy->var();
// Three cases: global variables, lookup variables, and all other types of
@@ -1291,21 +1251,15 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context slot"
: "[ Stack slot");
if (NeedsHoleCheckForLoad(proxy)) {
- // Let and const need a read barrier.
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ DCHECK(IsLexicalVariableMode(var->mode()));
Label done;
GetVar(rax, var);
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &done, Label::kNear);
- if (var->mode() == LET || var->mode() == CONST) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- __ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError);
- } else {
- // Uninitialized legacy const bindings are unholed.
- DCHECK(var->mode() == CONST_LEGACY);
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- }
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&done);
context()->Plug(rax);
break;
@@ -1331,19 +1285,10 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
context()->Plug(rax);
break;
}
- }
-}
-
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExpLiteral");
- __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ Move(rax, Smi::FromInt(expr->literal_index()));
- __ Move(rcx, expr->pattern());
- __ Move(rdx, Smi::FromInt(expr->flags()));
- FastCloneRegExpStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(rax);
+ case VariableLocation::MODULE:
+ UNREACHABLE();
+ }
}
@@ -1382,8 +1327,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Move(rdx, Smi::FromInt(flags));
FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
__ CallStub(&stub);
+ RestoreContext();
}
- PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+ PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
// If result_saved is true the result is on top of the stack. If
// result_saved is false the result is in rax.
@@ -1411,7 +1357,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::COMPUTED:
// It is safe to use [[Put]] here because the boilerplate already
// contains computed properties with an uninitialized value.
- if (key->value()->IsInternalizedString()) {
+ if (key->IsStringLiteral()) {
+ DCHECK(key->IsPropertyName());
if (property->emit_store()) {
VisitForAccumulatorValue(value);
DCHECK(StoreDescriptor::ValueRegister().is(rax));
@@ -1419,7 +1366,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, 0));
EmitLoadStoreICSlot(property->GetSlot(0));
CallStoreIC();
- PrepareForBailoutForId(key->id(), NO_REGISTERS);
+ PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1448,16 +1395,20 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- NO_REGISTERS);
+ BailoutState::NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->getter = property;
+ AccessorTable::Iterator it = accessor_table.lookup(key);
+ it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->setter = property;
+ AccessorTable::Iterator it = accessor_table.lookup(key);
+ it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->setter = property;
}
break;
}
@@ -1474,6 +1425,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
EmitAccessor(it->second->setter);
PushOperand(Smi::FromInt(NONE));
CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
+ PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1502,7 +1454,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- NO_REGISTERS);
+ BailoutState::NO_REGISTERS);
} else {
EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
VisitForStackValue(value);
@@ -1518,6 +1470,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PushOperand(Smi::FromInt(NONE));
PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ BailoutState::NO_REGISTERS);
} else {
DropOperands(3);
}
@@ -1575,7 +1529,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
}
- PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+ PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
bool result_saved = false; // Is the result saved to the stack?
ZoneList<Expression*>* subexprs = expr->values();
@@ -1605,7 +1559,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
- PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+ PrepareForBailoutForId(expr->GetIdForElement(array_index),
+ BailoutState::NO_REGISTERS);
}
// In case the array literal contains spread expressions it has two parts. The
@@ -1625,7 +1580,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForStackValue(subexpr);
CallRuntimeWithOperands(Runtime::kAppendElement);
- PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+ PrepareForBailoutForId(expr->GetIdForElement(array_index),
+ BailoutState::NO_REGISTERS);
}
if (result_saved) {
@@ -1640,7 +1596,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
Comment cmnt(masm_, "[ Assignment");
- SetExpressionPosition(expr, INSERT_BREAK);
Property* property = expr->target()->AsProperty();
LhsKind assign_type = Property::GetAssignType(property);
@@ -1704,23 +1659,27 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy());
- PrepareForBailout(expr->target(), TOS_REG);
+ PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
case NAMED_SUPER_PROPERTY:
EmitNamedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
case KEYED_SUPER_PROPERTY:
EmitKeyedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
}
}
@@ -1739,7 +1698,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
EmitBinaryOp(expr->binary_operation(), op);
}
// Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), TOS_REG);
+ PrepareForBailout(expr->binary_operation(), BailoutState::TOS_REGISTER);
} else {
VisitForAccumulatorValue(expr->value());
}
@@ -1751,7 +1710,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
expr->op(), expr->AssignmentSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(rax);
break;
case NAMED_PROPERTY:
@@ -1780,21 +1739,27 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// this. It stays on the stack while we update the iterator.
VisitForStackValue(expr->expression());
- Label suspend, continuation, post_runtime, resume;
+ Label suspend, continuation, post_runtime, resume, exception;
__ jmp(&suspend);
__ bind(&continuation);
- // When we arrive here, the stack top is the resume mode and
- // result_register() holds the input value (the argument given to the
- // respective resume operation).
+ // When we arrive here, rax holds the generator object.
__ RecordGeneratorContinuation();
- __ Pop(rbx);
- __ SmiCompare(rbx, Smi::FromInt(JSGeneratorObject::RETURN));
- __ j(not_equal, &resume);
+ __ movp(rbx, FieldOperand(rax, JSGeneratorObject::kResumeModeOffset));
+ __ movp(rax, FieldOperand(rax, JSGeneratorObject::kInputOrDebugPosOffset));
+ STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
+ STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
+ __ SmiCompare(rbx, Smi::FromInt(JSGeneratorObject::kReturn));
+ __ j(less, &resume);
__ Push(result_register());
+ __ j(greater, &exception);
EmitCreateIteratorResult(true);
EmitUnwindAndReturn();
+ __ bind(&exception);
+ __ CallRuntime(expr->rethrow_on_exception() ? Runtime::kReThrow
+ : Runtime::kThrow);
+
__ bind(&suspend);
OperandStackDepthIncrement(1); // Not popped on this path.
VisitForAccumulatorValue(expr->generator_object());
@@ -1810,8 +1775,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ j(equal, &post_runtime);
__ Push(rax); // generator object
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ movp(context_register(),
- Operand(rbp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
__ bind(&post_runtime);
PopOperand(result_register());
@@ -1821,102 +1785,6 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
context()->Plug(result_register());
}
-
-void FullCodeGenerator::EmitGeneratorResume(
- Expression* generator, Expression* value,
- JSGeneratorObject::ResumeMode resume_mode) {
- // The value stays in rax, and is ultimately read by the resumed generator, as
- // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
- // is read to throw the value when the resumed generator is already closed.
- // rbx will hold the generator object until the activation has been resumed.
- VisitForStackValue(generator);
- VisitForAccumulatorValue(value);
- PopOperand(rbx);
-
- // Store input value into generator object.
- __ movp(FieldOperand(rbx, JSGeneratorObject::kInputOffset),
- result_register());
- __ movp(rcx, result_register());
- __ RecordWriteField(rbx, JSGeneratorObject::kInputOffset, rcx, rdx,
- kDontSaveFPRegs);
-
- // Load suspended function and context.
- __ movp(rsi, FieldOperand(rbx, JSGeneratorObject::kContextOffset));
- __ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
-
- // Push receiver.
- __ Push(FieldOperand(rbx, JSGeneratorObject::kReceiverOffset));
-
- // Push holes for arguments to generator function. Since the parser forced
- // context allocation for any variables in generators, the actual argument
- // values have already been copied into the context and these dummy values
- // will never be used.
- __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ LoadSharedFunctionInfoSpecialField(rdx, rdx,
- SharedFunctionInfo::kFormalParameterCountOffset);
- __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
- Label push_argument_holes, push_frame;
- __ bind(&push_argument_holes);
- __ subp(rdx, Immediate(1));
- __ j(carry, &push_frame);
- __ Push(rcx);
- __ jmp(&push_argument_holes);
-
- // Enter a new JavaScript frame, and initialize its slots as they were when
- // the generator was suspended.
- Label resume_frame, done;
- __ bind(&push_frame);
- __ call(&resume_frame);
- __ jmp(&done);
- __ bind(&resume_frame);
- __ pushq(rbp); // Caller's frame pointer.
- __ movp(rbp, rsp);
- __ Push(rsi); // Callee's context.
- __ Push(rdi); // Callee's JS Function.
-
- // Load the operand stack size.
- __ movp(rdx, FieldOperand(rbx, JSGeneratorObject::kOperandStackOffset));
- __ movp(rdx, FieldOperand(rdx, FixedArray::kLengthOffset));
- __ SmiToInteger32(rdx, rdx);
-
- // If we are sending a value and there is no operand stack, we can jump back
- // in directly.
- if (resume_mode == JSGeneratorObject::NEXT) {
- Label slow_resume;
- __ cmpp(rdx, Immediate(0));
- __ j(not_zero, &slow_resume);
- __ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- __ SmiToInteger64(rcx,
- FieldOperand(rbx, JSGeneratorObject::kContinuationOffset));
- __ addp(rdx, rcx);
- __ Move(FieldOperand(rbx, JSGeneratorObject::kContinuationOffset),
- Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
- __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
- __ jmp(rdx);
- __ bind(&slow_resume);
- }
-
- // Otherwise, we push holes for the operand stack and call the runtime to fix
- // up the stack and the handlers.
- Label push_operand_holes, call_resume;
- __ bind(&push_operand_holes);
- __ subp(rdx, Immediate(1));
- __ j(carry, &call_resume);
- __ Push(rcx);
- __ jmp(&push_operand_holes);
- __ bind(&call_resume);
- __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
- __ Push(rbx);
- __ Push(result_register());
- __ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject);
- // Not reached: the runtime call returns elsewhere.
- __ Abort(kGeneratorFailedToResume);
-
- __ bind(&done);
- context()->Plug(result_register());
-}
-
void FullCodeGenerator::PushOperand(MemOperand operand) {
OperandStackDepthIncrement(1);
__ Push(operand);
@@ -1936,7 +1804,8 @@ void FullCodeGenerator::EmitOperandStackDepthCheck() {
void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label allocate, done_allocate;
- __ Allocate(JSIteratorResult::kSize, rax, rcx, rdx, &allocate, TAG_OBJECT);
+ __ Allocate(JSIteratorResult::kSize, rax, rcx, rdx, &allocate,
+ NO_ALLOCATION_FLAGS);
__ jmp(&done_allocate, Label::kNear);
__ bind(&allocate);
@@ -2181,33 +2050,25 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->mode() == LET && op != Token::INIT) {
- // Non-initializing assignment to let variable needs a write barrier.
+ } else if (IsLexicalVariableMode(var->mode()) && op != Token::INIT) {
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label assign;
MemOperand location = VarOperand(var, rcx);
- __ movp(rdx, location);
- __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &assign, Label::kNear);
- __ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError);
- __ bind(&assign);
- EmitStoreToStackLocalOrContextSlot(var, location);
-
- } else if (var->mode() == CONST && op != Token::INIT) {
- // Assignment to const variable needs a write barrier.
- DCHECK(!var->IsLookupSlot());
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label const_error;
- MemOperand location = VarOperand(var, rcx);
- __ movp(rdx, location);
- __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &const_error, Label::kNear);
- __ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError);
- __ bind(&const_error);
- __ CallRuntime(Runtime::kThrowConstAssignError);
+ // Perform an initialization check for lexically declared variables.
+ if (var->binding_needs_init()) {
+ Label assign;
+ __ movp(rdx, location);
+ __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &assign, Label::kNear);
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ __ bind(&assign);
+ }
+ if (var->mode() == CONST) {
+ __ CallRuntime(Runtime::kThrowConstAssignError);
+ } else {
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ }
} else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
@@ -2222,8 +2083,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() ||
- (var->mode() == CONST && op == Token::INIT)) {
+ } else if (!var->is_const_mode() || op == Token::INIT) {
if (var->IsLookupSlot()) {
// Assignment to var.
__ Push(var->name());
@@ -2245,25 +2105,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
- // Const initializers need a write barrier.
- DCHECK(!var->IsParameter()); // No const parameters.
- if (var->IsLookupSlot()) {
- __ Push(rax);
- __ Push(rsi);
- __ Push(var->name());
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
- } else {
- DCHECK(var->IsStackLocal() || var->IsContextSlot());
- Label skip;
- MemOperand location = VarOperand(var, rcx);
- __ movp(rdx, location);
- __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &skip);
- EmitStoreToStackLocalOrContextSlot(var, location);
- __ bind(&skip);
- }
-
} else {
DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
if (is_strict(language_mode())) {
@@ -2285,7 +2126,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
EmitLoadStoreICSlot(expr->AssignmentSlot());
CallStoreIC();
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(rax);
}
@@ -2329,45 +2170,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
EmitLoadStoreICSlot(expr->AssignmentSlot());
CallIC(ic);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
- Comment cmnt(masm_, "[ Property");
- SetExpressionPosition(expr);
-
- Expression* key = expr->key();
-
- if (key->IsPropertyName()) {
- if (!expr->IsSuperAccess()) {
- VisitForAccumulatorValue(expr->obj());
- DCHECK(!rax.is(LoadDescriptor::ReceiverRegister()));
- __ movp(LoadDescriptor::ReceiverRegister(), rax);
- EmitNamedPropertyLoad(expr);
- } else {
- VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- expr->obj()->AsSuperPropertyReference()->home_object());
- EmitNamedSuperPropertyLoad(expr);
- }
- } else {
- if (!expr->IsSuperAccess()) {
- VisitForStackValue(expr->obj());
- VisitForAccumulatorValue(expr->key());
- __ Move(LoadDescriptor::NameRegister(), rax);
- PopOperand(LoadDescriptor::ReceiverRegister());
- EmitKeyedPropertyLoad(expr);
- } else {
- VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- expr->obj()->AsSuperPropertyReference()->home_object());
- VisitForStackValue(expr->key());
- EmitKeyedSuperPropertyLoad(expr);
- }
- }
- PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(rax);
}
@@ -2388,7 +2191,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
if (callee->IsVariableProxy()) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
- PrepareForBailout(callee, NO_REGISTERS);
+ PrepareForBailout(callee, BailoutState::NO_REGISTERS);
}
// Push undefined as receiver. This is patched in the Call builtin if it
// is a sloppy mode method.
@@ -2400,7 +2203,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
DCHECK(!callee->AsProperty()->IsSuperAccess());
__ movp(LoadDescriptor::ReceiverRegister(), Operand(rsp, 0));
EmitNamedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+ BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
PushOperand(Operand(rsp, 0));
__ movp(Operand(rsp, kPointerSize), rax);
@@ -2436,6 +2240,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
CallRuntimeWithOperands(Runtime::kLoadFromSuper);
+ PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
// Replace home_object with target function.
__ movp(Operand(rsp, kPointerSize), rax);
@@ -2460,7 +2265,8 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ movp(LoadDescriptor::ReceiverRegister(), Operand(rsp, 0));
__ Move(LoadDescriptor::NameRegister(), rax);
EmitKeyedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+ BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
PushOperand(Operand(rsp, 0));
@@ -2493,6 +2299,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
+ PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
// Replace home_object with target function.
__ movp(Operand(rsp, kPointerSize), rax);
@@ -2512,7 +2319,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
VisitForStackValue(args->at(i));
}
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
SetCallPosition(expr, expr->tail_call_mode());
if (expr->tail_call_mode() == TailCallMode::kAllow) {
if (FLAG_trace) {
@@ -2533,15 +2340,13 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
-
- // Restore context register.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
// Discard the function left on TOS.
context()->DropAndPlug(1, rax);
}
-
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
+ int arg_count = expr->arguments()->length();
// Push copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
__ Push(Operand(rsp, arg_count * kPointerSize));
@@ -2558,6 +2363,9 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Push the start position of the scope the calls resides in.
__ Push(Smi::FromInt(scope()->start_position()));
+ // Push the source position of the eval call.
+ __ Push(Smi::FromInt(expr->position()));
+
// Do the runtime call.
__ CallRuntime(Runtime::kResolvePossiblyDirectEval);
}
@@ -2579,7 +2387,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
__ CallRuntime(Runtime::kLoadLookupSlotForCall);
PushOperand(rax); // Function.
PushOperand(rdx); // Receiver.
- PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
// If fast case code has been generated, emit code to push the function
// and receiver and have the slow path jump around this code.
@@ -2605,7 +2413,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
- // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // In a call to eval, we first call Runtime_ResolvePossiblyDirectEval
// to resolve the function we need to call. Then we call the resolved
// function using the given arguments.
ZoneList<Expression*>* args = expr->arguments();
@@ -2620,12 +2428,12 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
// Push a copy of the function (found below the arguments) and resolve
// eval.
__ Push(Operand(rsp, (arg_count + 1) * kPointerSize));
- EmitResolvePossiblyDirectEval(arg_count);
+ EmitResolvePossiblyDirectEval(expr);
// Touch up the callee.
__ movp(Operand(rsp, (arg_count + 1) * kPointerSize), rax);
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
SetCallPosition(expr);
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
@@ -2635,8 +2443,7 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
- // Restore context register.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
context()->DropAndPlug(1, rax);
}
@@ -2675,9 +2482,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
CallConstructStub stub(isolate());
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
- PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
- // Restore context register.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
+ RestoreContext();
context()->Plug(rax);
}
@@ -2718,10 +2524,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
-
- // Restore context register.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-
+ RestoreContext();
context()->Plug(rax);
}
@@ -2908,91 +2711,6 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Label done;
- // If the object is a smi return the object.
- __ JumpIfSmi(rax, &done);
- // If the object is not a value type, return the object.
- __ CmpObjectType(rax, JS_VALUE_TYPE, rbx);
- __ j(not_equal, &done);
- __ movp(rax, FieldOperand(rax, JSValue::kValueOffset));
-
- __ bind(&done);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(3, args->length());
-
- Register string = rax;
- Register index = rbx;
- Register value = rcx;
-
- VisitForStackValue(args->at(0)); // index
- VisitForStackValue(args->at(1)); // value
- VisitForAccumulatorValue(args->at(2)); // string
- PopOperand(value);
- PopOperand(index);
-
- if (FLAG_debug_code) {
- __ Check(__ CheckSmi(value), kNonSmiValue);
- __ Check(__ CheckSmi(index), kNonSmiValue);
- }
-
- __ SmiToInteger32(value, value);
- __ SmiToInteger32(index, index);
-
- if (FLAG_debug_code) {
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
- }
-
- __ movb(FieldOperand(string, index, times_1, SeqOneByteString::kHeaderSize),
- value);
- context()->Plug(string);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(3, args->length());
-
- Register string = rax;
- Register index = rbx;
- Register value = rcx;
-
- VisitForStackValue(args->at(0)); // index
- VisitForStackValue(args->at(1)); // value
- VisitForAccumulatorValue(args->at(2)); // string
- PopOperand(value);
- PopOperand(index);
-
- if (FLAG_debug_code) {
- __ Check(__ CheckSmi(value), kNonSmiValue);
- __ Check(__ CheckSmi(index), kNonSmiValue);
- }
-
- __ SmiToInteger32(value, value);
- __ SmiToInteger32(index, index);
-
- if (FLAG_debug_code) {
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
- }
-
- __ movw(FieldOperand(string, index, times_2, SeqTwoByteString::kHeaderSize),
- value);
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3028,13 +2746,8 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
Label need_conversion;
Label index_out_of_range;
Label done;
- StringCharCodeAtGenerator generator(object,
- index,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
+ StringCharCodeAtGenerator generator(object, index, result, &need_conversion,
+ &need_conversion, &index_out_of_range);
generator.GenerateFast(masm_);
__ jmp(&done);
@@ -3058,54 +2771,6 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = rbx;
- Register index = rax;
- Register scratch = rdx;
- Register result = rax;
-
- PopOperand(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharAtGenerator generator(object,
- index,
- scratch,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ LoadRoot(result, Heap::kempty_stringRootIndex);
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ Move(result, Smi::FromInt(0));
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
-
void FullCodeGenerator::EmitCall(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_LE(2, args->length());
@@ -3113,7 +2778,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
// Move target to rdi.
int const argc = args->length() - 2;
__ movp(rdi, Operand(rsp, (argc + 1) * kPointerSize));
@@ -3121,8 +2786,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
__ Set(rax, argc);
__ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(argc + 1);
- // Restore context register.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
// Discard the function left on TOS.
context()->DropAndPlug(1, rax);
}
@@ -3176,12 +2840,6 @@ void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
context()->Plug(rax);
}
-void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
- DCHECK_EQ(0, expr->arguments()->length());
- __ LoadNativeContextSlot(Context::ORDINARY_HAS_INSTANCE_INDEX, rax);
- context()->Plug(rax);
-}
-
void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
ExternalReference debug_is_active =
@@ -3201,7 +2859,8 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
Label runtime, done;
- __ Allocate(JSIteratorResult::kSize, rax, rcx, rdx, &runtime, TAG_OBJECT);
+ __ Allocate(JSIteratorResult::kSize, rax, rcx, rdx, &runtime,
+ NO_ALLOCATION_FLAGS);
__ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, rbx);
__ movp(FieldOperand(rax, HeapObject::kMapOffset), rbx);
__ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
@@ -3241,9 +2900,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
-
- // Restore context register.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
}
@@ -3265,7 +2922,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode but
// "delete this" is allowed.
- bool is_this = var->HasThisName(isolate());
+ bool is_this = var->is_this();
DCHECK(is_sloppy(language_mode()) || is_this);
if (var->IsUnallocatedOrGlobalSlot()) {
__ movp(rax, NativeContextOperand());
@@ -3328,7 +2985,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
&materialize_true);
if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
__ bind(&materialize_true);
- PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->MaterializeTrueId(),
+ BailoutState::NO_REGISTERS);
if (context()->IsAccumulatorValue()) {
__ LoadRoot(rax, Heap::kTrueValueRootIndex);
} else {
@@ -3336,7 +2994,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
}
__ jmp(&done, Label::kNear);
__ bind(&materialize_false);
- PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->MaterializeFalseId(),
+ BailoutState::NO_REGISTERS);
if (context()->IsAccumulatorValue()) {
__ LoadRoot(rax, Heap::kFalseValueRootIndex);
} else {
@@ -3435,9 +3094,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// We need a second deoptimization point after loading the value
// in case evaluating the property load my have a side effect.
if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), TOS_REG);
+ PrepareForBailout(expr->expression(), BailoutState::TOS_REGISTER);
} else {
- PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+ PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
}
// Inline smi case if we are in a loop.
@@ -3488,9 +3147,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
// Convert old value into a number.
- ToNumberStub convert_stub(isolate());
- __ CallStub(&convert_stub);
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ __ Call(isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+ RestoreContext();
+ PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -3538,7 +3197,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
{ EffectContext context(this);
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN, expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(),
+ BailoutState::TOS_REGISTER);
context.Plug(rax);
}
// For all contexts except kEffect: We have the result on
@@ -3550,7 +3210,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Perform the assignment as if via '='.
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN, expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(),
+ BailoutState::TOS_REGISTER);
context()->Plug(rax);
}
break;
@@ -3560,7 +3221,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
PopOperand(StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(expr->CountSlot());
CallStoreIC();
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3572,6 +3233,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case NAMED_SUPER_PROPERTY: {
EmitNamedSuperPropertyStore(prop);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3583,6 +3245,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case KEYED_SUPER_PROPERTY: {
EmitKeyedSuperPropertyStore(prop);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3599,7 +3262,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
EmitLoadStoreICSlot(expr->CountSlot());
CallIC(ic);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3695,7 +3358,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
- SetExpressionPosition(expr);
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
@@ -3715,7 +3377,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- CallRuntimeWithOperands(Runtime::kHasProperty);
+ SetExpressionPosition(expr);
+ EmitHasProperty();
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
Split(equal, if_true, if_false, fall_through);
@@ -3723,6 +3386,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::INSTANCEOF: {
VisitForAccumulatorValue(expr->right());
+ SetExpressionPosition(expr);
PopOperand(rdx);
InstanceOfStub stub(isolate());
__ CallStub(&stub);
@@ -3734,6 +3398,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
+ SetExpressionPosition(expr);
Condition cc = CompareIC::ComputeCondition(op);
PopOperand(rdx);
@@ -3820,7 +3485,7 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- Scope* closure_scope = scope()->ClosureScope();
+ DeclarationScope* closure_scope = scope()->GetClosureScope();
if (closure_scope->is_script_scope() ||
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
diff --git a/deps/v8/src/full-codegen/x87/full-codegen-x87.cc b/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
index f14aaf69b0..0ccf63f9f0 100644
--- a/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
+++ b/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
@@ -168,22 +168,20 @@ void FullCodeGenerator::Generate() {
__ push(edi);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext);
- PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
+ PrepareForBailoutForId(BailoutId::ScriptContext(),
+ BailoutState::TOS_REGISTER);
// The new target value is not used, clobbering is safe.
DCHECK_NULL(info->scope()->new_target_var());
} else {
if (info->scope()->new_target_var() != nullptr) {
__ push(edx); // Preserve new target.
}
- if (slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), slots);
- __ CallStub(&stub);
- // Result of FastNewContextStub is always in new space.
- need_write_barrier = false;
- } else {
- __ push(edi);
- __ CallRuntime(Runtime::kNewFunctionContext);
- }
+ FastNewFunctionContextStub stub(isolate());
+ __ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
+ Immediate(slots));
+ __ CallStub(&stub);
+ // Result of FastNewFunctionContextStub is always in new space.
+ need_write_barrier = false;
if (info->scope()->new_target_var() != nullptr) {
__ pop(edx); // Restore new target.
}
@@ -198,7 +196,8 @@ void FullCodeGenerator::Generate() {
int num_parameters = info->scope()->num_parameters();
int first_parameter = info->scope()->has_this_declaration() ? -1 : 0;
for (int i = first_parameter; i < num_parameters; i++) {
- Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+ Variable* var =
+ (i == -1) ? info->scope()->receiver() : info->scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@@ -224,11 +223,12 @@ void FullCodeGenerator::Generate() {
// Register holding this function and new target are both trashed in case we
// bailout here. But since that can happen only when new target is not used
// and we allocate a context, the value of |function_in_register| is correct.
- PrepareForBailoutForId(BailoutId::FunctionContext(), NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::FunctionContext(),
+ BailoutState::NO_REGISTERS);
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
- Variable* this_function_var = scope()->this_function_var();
+ Variable* this_function_var = info->scope()->this_function_var();
if (this_function_var != nullptr) {
Comment cmnt(masm_, "[ This function");
if (!function_in_register) {
@@ -239,7 +239,7 @@ void FullCodeGenerator::Generate() {
}
// Possibly set up a local binding to the new target value.
- Variable* new_target_var = scope()->new_target_var();
+ Variable* new_target_var = info->scope()->new_target_var();
if (new_target_var != nullptr) {
Comment cmnt(masm_, "[ new.target");
SetVar(new_target_var, edx, ebx, ecx);
@@ -247,7 +247,7 @@ void FullCodeGenerator::Generate() {
// Possibly allocate RestParameters
int rest_index;
- Variable* rest_param = scope()->rest_parameter(&rest_index);
+ Variable* rest_param = info->scope()->rest_parameter(&rest_index);
if (rest_param) {
Comment cmnt(masm_, "[ Allocate rest parameter array");
if (!function_in_register) {
@@ -259,7 +259,7 @@ void FullCodeGenerator::Generate() {
SetVar(rest_param, eax, ebx, edx);
}
- Variable* arguments = scope()->arguments();
+ Variable* arguments = info->scope()->arguments();
if (arguments != NULL) {
// Arguments object must be allocated after the context object, in
// case the "arguments" or ".arguments" variables are in the context.
@@ -286,7 +286,8 @@ void FullCodeGenerator::Generate() {
}
// Visit the declarations and body.
- PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::FunctionEntry(),
+ BailoutState::NO_REGISTERS);
{
Comment cmnt(masm_, "[ Declarations");
VisitDeclarations(scope()->declarations());
@@ -299,7 +300,8 @@ void FullCodeGenerator::Generate() {
{
Comment cmnt(masm_, "[ Stack check");
- PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+ PrepareForBailoutForId(BailoutId::Declarations(),
+ BailoutState::NO_REGISTERS);
Label ok;
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
@@ -366,11 +368,11 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
EmitProfilingCounterReset();
__ bind(&ok);
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR
// entry becomes the target of a bailout. We don't expect it to be, but
// we want it to work if it is.
- PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->OsrEntryId(), BailoutState::NO_REGISTERS);
}
void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
@@ -420,6 +422,9 @@ void FullCodeGenerator::EmitReturnSequence() {
}
}
+void FullCodeGenerator::RestoreContext() {
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+}
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -480,10 +485,12 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
true,
true_label_,
false_label_);
- DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
- if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+ DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
+ !lit->IsUndetectable());
+ if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
+ lit->IsFalse(isolate())) {
if (false_label_ != fall_through_) __ jmp(false_label_);
- } else if (lit->IsTrue() || lit->IsJSObject()) {
+ } else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
if (true_label_ != fall_through_) __ jmp(true_label_);
} else if (lit->IsString()) {
if (String::cast(*lit)->length() == 0) {
@@ -674,7 +681,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
Label skip;
if (should_normalize) __ jmp(&skip, Label::kNear);
- PrepareForBailout(expr, TOS_REG);
+ PrepareForBailout(expr, BailoutState::TOS_REGISTER);
if (should_normalize) {
__ cmp(eax, isolate()->factory()->true_value());
Split(equal, if_true, if_false, NULL);
@@ -699,25 +706,21 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
void FullCodeGenerator::VisitVariableDeclaration(
VariableDeclaration* declaration) {
- // If it was not possible to allocate the variable at compile time, we
- // need to "declare" it at runtime to make sure it actually exists in the
- // local context.
VariableProxy* proxy = declaration->proxy();
- VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case VariableLocation::GLOBAL:
- case VariableLocation::UNALLOCATED:
- globals_->Add(variable->name(), zone());
- globals_->Add(variable->binding_needs_init()
- ? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value(), zone());
+ case VariableLocation::UNALLOCATED: {
+ DCHECK(!variable->binding_needs_init());
+ FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+ globals_->Add(isolate()->factory()->undefined_value(), zone());
break;
-
+ }
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL:
- if (hole_init) {
+ if (variable->binding_needs_init()) {
Comment cmnt(masm_, "[ VariableDeclaration");
__ mov(StackOperand(variable),
Immediate(isolate()->factory()->the_hole_value()));
@@ -725,35 +728,28 @@ void FullCodeGenerator::VisitVariableDeclaration(
break;
case VariableLocation::CONTEXT:
- if (hole_init) {
+ if (variable->binding_needs_init()) {
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ mov(ContextOperand(esi, variable->index()),
Immediate(isolate()->factory()->the_hole_value()));
// No write barrier since the hole value is in old space.
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
}
break;
case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ VariableDeclaration");
+ DCHECK_EQ(VAR, variable->mode());
+ DCHECK(!variable->binding_needs_init());
__ push(Immediate(variable->name()));
- // VariableDeclaration nodes are always introduced in one of four modes.
- DCHECK(IsDeclaredVariableMode(mode));
- // Push initial value, if any.
- // Note: For variables we must not push an initial value (such as
- // 'undefined') because we may have a (legal) redeclaration and we
- // must not destroy the current value.
- if (hole_init) {
- __ push(Immediate(isolate()->factory()->the_hole_value()));
- } else {
- __ push(Immediate(Smi::FromInt(0))); // Indicates no initial value.
- }
- __ push(
- Immediate(Smi::FromInt(variable->DeclarationPropertyAttributes())));
- __ CallRuntime(Runtime::kDeclareLookupSlot);
+ __ CallRuntime(Runtime::kDeclareEvalVar);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
+
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
}
@@ -764,7 +760,9 @@ void FullCodeGenerator::VisitFunctionDeclaration(
switch (variable->location()) {
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
- globals_->Add(variable->name(), zone());
+ FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+ DCHECK(!slot.IsInvalid());
+ globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
Handle<SharedFunctionInfo> function =
Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
// Check for stack-overflow exception.
@@ -790,7 +788,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
__ RecordWriteContextSlot(esi, Context::SlotOffset(variable->index()),
result_register(), ecx, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
@@ -798,10 +796,13 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Comment cmnt(masm_, "[ FunctionDeclaration");
PushOperand(variable->name());
VisitForStackValue(declaration->fun());
- PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
+ CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
+ PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
break;
}
+
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
}
@@ -810,19 +811,13 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ Push(pairs);
__ Push(Smi::FromInt(DeclareGlobalsFlags()));
+ __ EmitLoadTypeFeedbackVector(eax);
+ __ Push(eax);
__ CallRuntime(Runtime::kDeclareGlobals);
// Return value is ignored.
}
-void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
- // Call the runtime to declare the modules.
- __ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules);
- // Return value is ignored.
-}
-
-
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
@@ -830,7 +825,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Keep the switch value on the stack until a case matches.
VisitForStackValue(stmt->tag());
- PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
ZoneList<CaseClause*>* clauses = stmt->cases();
CaseClause* default_clause = NULL; // Can occur anywhere in the list.
@@ -879,7 +874,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Label skip;
__ jmp(&skip, Label::kNear);
- PrepareForBailout(clause, TOS_REG);
+ PrepareForBailout(clause, BailoutState::TOS_REGISTER);
__ cmp(eax, isolate()->factory()->true_value());
__ j(not_equal, &next_test);
__ Drop(1);
@@ -907,12 +902,12 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ Case body");
CaseClause* clause = clauses->at(i);
__ bind(clause->body_target());
- PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+ PrepareForBailoutForId(clause->EntryId(), BailoutState::NO_REGISTERS);
VisitStatements(clause->statements());
}
__ bind(nested_statement.break_label());
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
}
@@ -944,16 +939,15 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&convert);
ToObjectStub stub(isolate());
__ CallStub(&stub);
+ RestoreContext();
__ bind(&done_convert);
- PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
+ PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
__ push(eax);
- // Check cache validity in generated code. This is a fast case for
- // the JSObject::IsSimpleEnum cache validity checks. If we cannot
- // guarantee cache validity, call the runtime system to check cache
- // validity or get the property names in a fixed array.
- // Note: Proxies never have an enum cache, so will always take the
- // slow path.
+ // Check cache validity in generated code. If we cannot guarantee cache
+ // validity, call the runtime system to check cache validity or get the
+ // property names in a fixed array. Note: Proxies never have an enum cache,
+ // so will always take the slow path.
Label call_runtime, use_cache, fixed_array;
__ CheckEnumCache(&call_runtime);
@@ -964,7 +958,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&call_runtime);
__ push(eax);
__ CallRuntime(Runtime::kForInEnumerate);
- PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
+ PrepareForBailoutForId(stmt->EnumId(), BailoutState::TOS_REGISTER);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->meta_map());
__ j(not_equal, &fixed_array);
@@ -996,11 +990,11 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We got a fixed array in register eax. Iterate through that.
__ bind(&fixed_array);
- __ push(Immediate(Smi::FromInt(1))); // Smi(1) undicates slow check
+ __ push(Immediate(Smi::FromInt(1))); // Smi(1) indicates slow check
__ push(eax); // Array
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
__ push(eax); // Fixed array length (as smi).
- PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
__ push(Immediate(Smi::FromInt(0))); // Initial index.
// Generate code for doing the condition check.
@@ -1011,9 +1005,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(eax, Operand(esp, 1 * kPointerSize)); // Compare to the array length.
__ j(above_equal, loop_statement.break_label());
- // Get the current entry of the array into register ebx.
+ // Get the current entry of the array into register eax.
__ mov(ebx, Operand(esp, 2 * kPointerSize));
- __ mov(ebx, FieldOperand(ebx, eax, times_2, FixedArray::kHeaderSize));
+ __ mov(eax, FieldOperand(ebx, eax, times_2, FixedArray::kHeaderSize));
// Get the expected map from the stack or a smi in the
// permanent slow case into register edx.
@@ -1022,8 +1016,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Check if the expected map still matches that of the enumerable.
// If not, we may have to filter the key.
Label update_each;
- __ mov(ecx, Operand(esp, 4 * kPointerSize));
- __ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
+ __ mov(ebx, Operand(esp, 4 * kPointerSize));
+ __ cmp(edx, FieldOperand(ebx, HeapObject::kMapOffset));
__ j(equal, &update_each, Label::kNear);
// We need to filter the key, record slow-path here.
@@ -1032,29 +1026,27 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ mov(FieldOperand(edx, FixedArray::OffsetOfElementAt(vector_index)),
Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
- // Convert the entry to a string or null if it isn't a property
- // anymore. If the property has been removed while iterating, we
- // just skip it.
- __ push(ecx); // Enumerable.
- __ push(ebx); // Current entry.
- __ CallRuntime(Runtime::kForInFilter);
- PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
- __ cmp(eax, isolate()->factory()->undefined_value());
- __ j(equal, loop_statement.continue_label());
- __ mov(ebx, eax);
+ // eax contains the key. The receiver in ebx is the second argument to the
+ // ForInFilterStub. ForInFilter returns undefined if the receiver doesn't
+ // have the key or returns the name-converted key.
+ ForInFilterStub filter_stub(isolate());
+ __ CallStub(&filter_stub);
+ RestoreContext();
+ PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
+ __ JumpIfRoot(result_register(), Heap::kUndefinedValueRootIndex,
+ loop_statement.continue_label());
// Update the 'each' property or variable from the possibly filtered
- // entry in register ebx.
+ // entry in register eax.
__ bind(&update_each);
- __ mov(result_register(), ebx);
// Perform the assignment as if via '='.
{ EffectContext context(this);
EmitAssignment(stmt->each(), stmt->EachFeedbackSlot());
- PrepareForBailoutForId(stmt->AssignmentId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->AssignmentId(), BailoutState::NO_REGISTERS);
}
// Both Crankshaft and Turbofan expect BodyId to be right before stmt->body().
- PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->BodyId(), BailoutState::NO_REGISTERS);
// Generate code for the body of the loop.
Visit(stmt->body());
@@ -1071,7 +1063,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
DropOperands(5);
// Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+ PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
__ bind(&exit);
decrement_loop_depth();
}
@@ -1108,45 +1100,19 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
Register context = esi;
Register temp = edx;
- Scope* s = scope();
- while (s != NULL) {
- if (s->num_heap_slots() > 0) {
- if (s->calls_sloppy_eval()) {
- // Check that extension is "the hole".
- __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
- Heap::kTheHoleValueRootIndex, slow);
- }
- // Load next context in chain.
- __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering esi.
- context = temp;
+ int to_check = scope()->ContextChainLengthUntilOutermostSloppyEval();
+ for (Scope* s = scope(); to_check > 0; s = s->outer_scope()) {
+ if (!s->NeedsContext()) continue;
+ if (s->calls_sloppy_eval()) {
+ // Check that extension is "the hole".
+ __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
+ Heap::kTheHoleValueRootIndex, slow);
}
- // If no outer scope calls eval, we do not need to check more
- // context extensions. If we have reached an eval scope, we check
- // all extensions from this point.
- if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
- s = s->outer_scope();
- }
-
- if (s != NULL && s->is_eval_scope()) {
- // Loop up the context chain. There is no frame effect so it is
- // safe to use raw labels here.
- Label next, fast;
- if (!context.is(temp)) {
- __ mov(temp, context);
- }
- __ bind(&next);
- // Terminate at native context.
- __ cmp(FieldOperand(temp, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->native_context_map()));
- __ j(equal, &fast, Label::kNear);
- // Check that extension is "the hole".
- __ JumpIfNotRoot(ContextOperand(temp, Context::EXTENSION_INDEX),
- Heap::kTheHoleValueRootIndex, slow);
// Load next context in chain.
- __ mov(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
- __ jmp(&next);
- __ bind(&fast);
+ __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
+ // Walk the rest of the chain without clobbering esi.
+ context = temp;
+ to_check--;
}
// All extension objects were empty and it is safe to use a normal global
@@ -1199,42 +1165,35 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ mov(eax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET || local->mode() == CONST ||
- local->mode() == CONST_LEGACY) {
+ if (local->binding_needs_init()) {
__ cmp(eax, isolate()->factory()->the_hole_value());
__ j(not_equal, done);
- if (local->mode() == CONST_LEGACY) {
- __ mov(eax, isolate()->factory()->undefined_value());
- } else { // LET || CONST
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError);
- }
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ } else {
+ __ jmp(done);
}
- __ jmp(done);
}
}
void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
+#ifdef DEBUG
Variable* var = proxy->var();
DCHECK(var->IsUnallocatedOrGlobalSlot() ||
(var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
- __ mov(LoadDescriptor::ReceiverRegister(), NativeContextOperand());
- __ mov(LoadDescriptor::ReceiverRegister(),
- ContextOperand(LoadDescriptor::ReceiverRegister(),
- Context::EXTENSION_INDEX));
- __ mov(LoadDescriptor::NameRegister(), var->name());
- __ mov(LoadDescriptor::SlotRegister(),
+#endif
+ __ mov(LoadGlobalDescriptor::SlotRegister(),
Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
- CallLoadIC(typeof_mode);
+ CallLoadGlobalIC(typeof_mode);
}
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
SetExpressionPosition(proxy);
- PrepareForBailoutForId(proxy->BeforeId(), NO_REGISTERS);
+ PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
Variable* var = proxy->var();
// Three cases: global variables, lookup variables, and all other types of
@@ -1256,21 +1215,14 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
: "[ Stack variable");
if (NeedsHoleCheckForLoad(proxy)) {
- // Let and const need a read barrier.
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
Label done;
GetVar(eax, var);
__ cmp(eax, isolate()->factory()->the_hole_value());
__ j(not_equal, &done, Label::kNear);
- if (var->mode() == LET || var->mode() == CONST) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError);
- } else {
- // Uninitialized legacy const bindings are unholed.
- DCHECK(var->mode() == CONST_LEGACY);
- __ mov(eax, isolate()->factory()->undefined_value());
- }
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&done);
context()->Plug(eax);
break;
@@ -1296,19 +1248,10 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
context()->Plug(eax);
break;
}
- }
-}
-
-void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
- Comment cmnt(masm_, "[ RegExpLiteral");
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ Move(eax, Immediate(Smi::FromInt(expr->literal_index())));
- __ Move(ecx, Immediate(expr->pattern()));
- __ Move(edx, Immediate(Smi::FromInt(expr->flags())));
- FastCloneRegExpStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(eax);
+ case VariableLocation::MODULE:
+ UNREACHABLE();
+ }
}
@@ -1348,8 +1291,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(edx, Immediate(Smi::FromInt(flags)));
FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
__ CallStub(&stub);
+ RestoreContext();
}
- PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+ PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
// If result_saved is true the result is on top of the stack. If
// result_saved is false the result is in eax.
@@ -1377,7 +1321,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::COMPUTED:
// It is safe to use [[Put]] here because the boilerplate already
// contains computed properties with an uninitialized value.
- if (key->value()->IsInternalizedString()) {
+ if (key->IsStringLiteral()) {
+ DCHECK(key->IsPropertyName());
if (property->emit_store()) {
VisitForAccumulatorValue(value);
DCHECK(StoreDescriptor::ValueRegister().is(eax));
@@ -1385,7 +1330,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
EmitLoadStoreICSlot(property->GetSlot(0));
CallStoreIC();
- PrepareForBailoutForId(key->id(), NO_REGISTERS);
+ PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
if (NeedsHomeObject(value)) {
EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
}
@@ -1413,16 +1358,20 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- NO_REGISTERS);
+ BailoutState::NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->getter = property;
+ AccessorTable::Iterator it = accessor_table.lookup(key);
+ it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->setter = property;
+ AccessorTable::Iterator it = accessor_table.lookup(key);
+ it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->setter = property;
}
break;
}
@@ -1441,6 +1390,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PushOperand(Smi::FromInt(NONE));
CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
+ PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1469,7 +1419,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- NO_REGISTERS);
+ BailoutState::NO_REGISTERS);
} else {
EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
VisitForStackValue(value);
@@ -1485,6 +1435,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PushOperand(Smi::FromInt(NONE));
PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
+ PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ BailoutState::NO_REGISTERS);
} else {
DropOperands(3);
}
@@ -1542,7 +1494,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
}
- PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+ PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
bool result_saved = false; // Is the result saved to the stack?
ZoneList<Expression*>* subexprs = expr->values();
@@ -1572,7 +1524,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
CallIC(ic);
- PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+ PrepareForBailoutForId(expr->GetIdForElement(array_index),
+ BailoutState::NO_REGISTERS);
}
// In case the array literal contains spread expressions it has two parts. The
@@ -1592,7 +1545,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForStackValue(subexpr);
CallRuntimeWithOperands(Runtime::kAppendElement);
- PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
+ PrepareForBailoutForId(expr->GetIdForElement(array_index),
+ BailoutState::NO_REGISTERS);
}
if (result_saved) {
@@ -1607,7 +1561,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
Comment cmnt(masm_, "[ Assignment");
- SetExpressionPosition(expr, INSERT_BREAK);
Property* property = expr->target()->AsProperty();
LhsKind assign_type = Property::GetAssignType(property);
@@ -1672,23 +1625,27 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy());
- PrepareForBailout(expr->target(), TOS_REG);
+ PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
break;
case NAMED_SUPER_PROPERTY:
EmitNamedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
case KEYED_SUPER_PROPERTY:
EmitKeyedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(), TOS_REG);
+ PrepareForBailoutForId(property->LoadId(),
+ BailoutState::TOS_REGISTER);
break;
}
}
@@ -1707,7 +1664,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
// Deoptimization point in case the binary operation may have side effects.
- PrepareForBailout(expr->binary_operation(), TOS_REG);
+ PrepareForBailout(expr->binary_operation(), BailoutState::TOS_REGISTER);
} else {
VisitForAccumulatorValue(expr->value());
}
@@ -1719,7 +1676,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
expr->op(), expr->AssignmentSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(eax);
break;
case NAMED_PROPERTY:
@@ -1748,21 +1705,27 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// this. It stays on the stack while we update the iterator.
VisitForStackValue(expr->expression());
- Label suspend, continuation, post_runtime, resume;
+ Label suspend, continuation, post_runtime, resume, exception;
__ jmp(&suspend);
__ bind(&continuation);
- // When we arrive here, the stack top is the resume mode and
- // result_register() holds the input value (the argument given to the
- // respective resume operation).
+ // When we arrive here, eax holds the generator object.
__ RecordGeneratorContinuation();
- __ pop(ebx);
- __ cmp(ebx, Immediate(Smi::FromInt(JSGeneratorObject::RETURN)));
- __ j(not_equal, &resume);
- __ push(result_register());
+ __ mov(ebx, FieldOperand(eax, JSGeneratorObject::kResumeModeOffset));
+ __ mov(eax, FieldOperand(eax, JSGeneratorObject::kInputOrDebugPosOffset));
+ STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
+ STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
+ __ cmp(ebx, Immediate(Smi::FromInt(JSGeneratorObject::kReturn)));
+ __ j(less, &resume);
+ __ Push(result_register());
+ __ j(greater, &exception);
EmitCreateIteratorResult(true);
EmitUnwindAndReturn();
+ __ bind(&exception);
+ __ CallRuntime(expr->rethrow_on_exception() ? Runtime::kReThrow
+ : Runtime::kThrow);
+
__ bind(&suspend);
OperandStackDepthIncrement(1); // Not popped on this path.
VisitForAccumulatorValue(expr->generator_object());
@@ -1778,8 +1741,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ j(equal, &post_runtime);
__ push(eax); // generator object
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ mov(context_register(),
- Operand(ebp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
__ bind(&post_runtime);
PopOperand(result_register());
EmitReturnSequence();
@@ -1788,101 +1750,6 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
context()->Plug(result_register());
}
-
-void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
- Expression *value,
- JSGeneratorObject::ResumeMode resume_mode) {
- // The value stays in eax, and is ultimately read by the resumed generator, as
- // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
- // is read to throw the value when the resumed generator is already closed.
- // ebx will hold the generator object until the activation has been resumed.
- VisitForStackValue(generator);
- VisitForAccumulatorValue(value);
- PopOperand(ebx);
-
- // Store input value into generator object.
- __ mov(FieldOperand(ebx, JSGeneratorObject::kInputOffset), result_register());
- __ mov(ecx, result_register());
- __ RecordWriteField(ebx, JSGeneratorObject::kInputOffset, ecx, edx,
- kDontSaveFPRegs);
-
- // Load suspended function and context.
- __ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset));
- __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
-
- // Push receiver.
- __ push(FieldOperand(ebx, JSGeneratorObject::kReceiverOffset));
-
- // Push holes for arguments to generator function. Since the parser forced
- // context allocation for any variables in generators, the actual argument
- // values have already been copied into the context and these dummy values
- // will never be used.
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(edx,
- FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(ecx, isolate()->factory()->the_hole_value());
- Label push_argument_holes, push_frame;
- __ bind(&push_argument_holes);
- __ sub(edx, Immediate(Smi::FromInt(1)));
- __ j(carry, &push_frame);
- __ push(ecx);
- __ jmp(&push_argument_holes);
-
- // Enter a new JavaScript frame, and initialize its slots as they were when
- // the generator was suspended.
- Label resume_frame, done;
- __ bind(&push_frame);
- __ call(&resume_frame);
- __ jmp(&done);
- __ bind(&resume_frame);
- __ push(ebp); // Caller's frame pointer.
- __ mov(ebp, esp);
- __ push(esi); // Callee's context.
- __ push(edi); // Callee's JS Function.
-
- // Load the operand stack size.
- __ mov(edx, FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset));
- __ mov(edx, FieldOperand(edx, FixedArray::kLengthOffset));
- __ SmiUntag(edx);
-
- // If we are sending a value and there is no operand stack, we can jump back
- // in directly.
- if (resume_mode == JSGeneratorObject::NEXT) {
- Label slow_resume;
- __ cmp(edx, Immediate(0));
- __ j(not_zero, &slow_resume);
- __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
- __ mov(ecx, FieldOperand(ebx, JSGeneratorObject::kContinuationOffset));
- __ SmiUntag(ecx);
- __ add(edx, ecx);
- __ mov(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
- Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
- __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
- __ jmp(edx);
- __ bind(&slow_resume);
- }
-
- // Otherwise, we push holes for the operand stack and call the runtime to fix
- // up the stack and the handlers.
- Label push_operand_holes, call_resume;
- __ bind(&push_operand_holes);
- __ sub(edx, Immediate(1));
- __ j(carry, &call_resume);
- __ push(ecx);
- __ jmp(&push_operand_holes);
- __ bind(&call_resume);
- __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
- __ push(ebx);
- __ push(result_register());
- __ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject);
- // Not reached: the runtime call returns elsewhere.
- __ Abort(kGeneratorFailedToResume);
-
- __ bind(&done);
- context()->Plug(result_register());
-}
-
void FullCodeGenerator::PushOperand(MemOperand operand) {
OperandStackDepthIncrement(1);
__ Push(operand);
@@ -1902,7 +1769,8 @@ void FullCodeGenerator::EmitOperandStackDepthCheck() {
void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label allocate, done_allocate;
- __ Allocate(JSIteratorResult::kSize, eax, ecx, edx, &allocate, TAG_OBJECT);
+ __ Allocate(JSIteratorResult::kSize, eax, ecx, edx, &allocate,
+ NO_ALLOCATION_FLAGS);
__ jmp(&done_allocate, Label::kNear);
__ bind(&allocate);
@@ -2183,34 +2051,25 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitLoadStoreICSlot(slot);
CallStoreIC();
- } else if (var->mode() == LET && op != Token::INIT) {
- // Non-initializing assignment to let variable needs a write barrier.
- DCHECK(!var->IsLookupSlot());
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label assign;
- MemOperand location = VarOperand(var, ecx);
- __ mov(edx, location);
- __ cmp(edx, isolate()->factory()->the_hole_value());
- __ j(not_equal, &assign, Label::kNear);
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError);
- __ bind(&assign);
- EmitStoreToStackLocalOrContextSlot(var, location);
-
- } else if (var->mode() == CONST && op != Token::INIT) {
- // Assignment to const variable needs a write barrier.
+ } else if (IsLexicalVariableMode(var->mode()) && op != Token::INIT) {
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label const_error;
MemOperand location = VarOperand(var, ecx);
- __ mov(edx, location);
- __ cmp(edx, isolate()->factory()->the_hole_value());
- __ j(not_equal, &const_error, Label::kNear);
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError);
- __ bind(&const_error);
- __ CallRuntime(Runtime::kThrowConstAssignError);
-
+ // Perform an initialization check for lexically declared variables.
+ if (var->binding_needs_init()) {
+ Label assign;
+ __ mov(edx, location);
+ __ cmp(edx, isolate()->factory()->the_hole_value());
+ __ j(not_equal, &assign, Label::kNear);
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kThrowReferenceError);
+ __ bind(&assign);
+ }
+ if (var->mode() == CONST) {
+ __ CallRuntime(Runtime::kThrowConstAssignError);
+ } else {
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ }
} else if (var->is_this() && var->mode() == CONST && op == Token::INIT) {
// Initializing assignment to const {this} needs a write barrier.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
@@ -2224,8 +2083,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
- } else if (!var->is_const_mode() ||
- (var->mode() == CONST && op == Token::INIT)) {
+ } else if (!var->is_const_mode() || op == Token::INIT) {
if (var->IsLookupSlot()) {
// Assignment to var.
__ Push(Immediate(var->name()));
@@ -2247,25 +2105,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (var->mode() == CONST_LEGACY && op == Token::INIT) {
- // Const initializers need a write barrier.
- DCHECK(!var->IsParameter()); // No const parameters.
- if (var->IsLookupSlot()) {
- __ push(eax);
- __ push(esi);
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
- } else {
- DCHECK(var->IsStackLocal() || var->IsContextSlot());
- Label skip;
- MemOperand location = VarOperand(var, ecx);
- __ mov(edx, location);
- __ cmp(edx, isolate()->factory()->the_hole_value());
- __ j(not_equal, &skip, Label::kNear);
- EmitStoreToStackLocalOrContextSlot(var, location);
- __ bind(&skip);
- }
-
} else {
DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT);
if (is_strict(language_mode())) {
@@ -2288,7 +2127,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
PopOperand(StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(expr->AssignmentSlot());
CallStoreIC();
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(eax);
}
@@ -2334,44 +2173,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
EmitLoadStoreICSlot(expr->AssignmentSlot());
CallIC(ic);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::VisitProperty(Property* expr) {
- Comment cmnt(masm_, "[ Property");
- SetExpressionPosition(expr);
-
- Expression* key = expr->key();
-
- if (key->IsPropertyName()) {
- if (!expr->IsSuperAccess()) {
- VisitForAccumulatorValue(expr->obj());
- __ Move(LoadDescriptor::ReceiverRegister(), result_register());
- EmitNamedPropertyLoad(expr);
- } else {
- VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- expr->obj()->AsSuperPropertyReference()->home_object());
- EmitNamedSuperPropertyLoad(expr);
- }
- } else {
- if (!expr->IsSuperAccess()) {
- VisitForStackValue(expr->obj());
- VisitForAccumulatorValue(expr->key());
- PopOperand(LoadDescriptor::ReceiverRegister()); // Object.
- __ Move(LoadDescriptor::NameRegister(), result_register()); // Key.
- EmitKeyedPropertyLoad(expr);
- } else {
- VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- expr->obj()->AsSuperPropertyReference()->home_object());
- VisitForStackValue(expr->key());
- EmitKeyedSuperPropertyLoad(expr);
- }
- }
- PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(eax);
}
@@ -2392,7 +2194,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
if (callee->IsVariableProxy()) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
- PrepareForBailout(callee, NO_REGISTERS);
+ PrepareForBailout(callee, BailoutState::NO_REGISTERS);
}
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
@@ -2404,7 +2206,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
DCHECK(!callee->AsProperty()->IsSuperAccess());
__ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
EmitNamedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+ BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
PushOperand(Operand(esp, 0));
__ mov(Operand(esp, kPointerSize), eax);
@@ -2439,6 +2242,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
CallRuntimeWithOperands(Runtime::kLoadFromSuper);
+ PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
// Replace home_object with target function.
__ mov(Operand(esp, kPointerSize), eax);
@@ -2463,7 +2267,8 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
__ mov(LoadDescriptor::NameRegister(), eax);
EmitKeyedPropertyLoad(callee->AsProperty());
- PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+ PrepareForBailoutForId(callee->AsProperty()->LoadId(),
+ BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
PushOperand(Operand(esp, 0));
@@ -2495,6 +2300,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - home_object
// - key
CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
+ PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
// Replace home_object with target function.
__ mov(Operand(esp, kPointerSize), eax);
@@ -2514,7 +2320,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
VisitForStackValue(args->at(i));
}
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
SetCallPosition(expr, expr->tail_call_mode());
if (expr->tail_call_mode() == TailCallMode::kAllow) {
if (FLAG_trace) {
@@ -2535,15 +2341,12 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
-
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-
+ RestoreContext();
context()->DropAndPlug(1, eax);
}
-
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
+ int arg_count = expr->arguments()->length();
// Push copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
__ push(Operand(esp, arg_count * kPointerSize));
@@ -2560,6 +2363,9 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Push the start position of the scope the calls resides in.
__ push(Immediate(Smi::FromInt(scope()->start_position())));
+ // Push the source position of the eval call.
+ __ push(Immediate(Smi::FromInt(expr->position())));
+
// Do the runtime call.
__ CallRuntime(Runtime::kResolvePossiblyDirectEval);
}
@@ -2582,7 +2388,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
__ CallRuntime(Runtime::kLoadLookupSlotForCall);
PushOperand(eax); // Function.
PushOperand(edx); // Receiver.
- PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
// If fast case code has been generated, emit code to push the function
// and receiver and have the slow path jump around this code.
@@ -2606,7 +2412,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
- // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // In a call to eval, we first call Runtime_ResolvePossiblyDirectEval
// to resolve the function we need to call. Then we call the resolved
// function using the given arguments.
ZoneList<Expression*>* args = expr->arguments();
@@ -2622,12 +2428,12 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
// Push a copy of the function (found below the arguments) and
// resolve eval.
__ push(Operand(esp, (arg_count + 1) * kPointerSize));
- EmitResolvePossiblyDirectEval(arg_count);
+ EmitResolvePossiblyDirectEval(expr);
// Touch up the stack with the resolved function.
__ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax);
- PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
SetCallPosition(expr);
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
@@ -2637,8 +2443,7 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
context()->DropAndPlug(1, eax);
}
@@ -2677,9 +2482,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
CallConstructStub stub(isolate());
__ call(stub.GetCode(), RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
- PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
+ RestoreContext();
context()->Plug(eax);
}
@@ -2720,9 +2524,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
-
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
context()->Plug(eax);
}
@@ -2908,94 +2710,6 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0)); // Load the object.
-
- Label done;
- // If the object is a smi return the object.
- __ JumpIfSmi(eax, &done, Label::kNear);
- // If the object is not a value type, return the object.
- __ CmpObjectType(eax, JS_VALUE_TYPE, ebx);
- __ j(not_equal, &done, Label::kNear);
- __ mov(eax, FieldOperand(eax, JSValue::kValueOffset));
-
- __ bind(&done);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(3, args->length());
-
- Register string = eax;
- Register index = ebx;
- Register value = ecx;
-
- VisitForStackValue(args->at(0)); // index
- VisitForStackValue(args->at(1)); // value
- VisitForAccumulatorValue(args->at(2)); // string
-
- PopOperand(value);
- PopOperand(index);
-
- if (FLAG_debug_code) {
- __ test(value, Immediate(kSmiTagMask));
- __ Check(zero, kNonSmiValue);
- __ test(index, Immediate(kSmiTagMask));
- __ Check(zero, kNonSmiValue);
- }
-
- __ SmiUntag(value);
- __ SmiUntag(index);
-
- if (FLAG_debug_code) {
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
- }
-
- __ mov_b(FieldOperand(string, index, times_1, SeqOneByteString::kHeaderSize),
- value);
- context()->Plug(string);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(3, args->length());
-
- Register string = eax;
- Register index = ebx;
- Register value = ecx;
-
- VisitForStackValue(args->at(0)); // index
- VisitForStackValue(args->at(1)); // value
- VisitForAccumulatorValue(args->at(2)); // string
- PopOperand(value);
- PopOperand(index);
-
- if (FLAG_debug_code) {
- __ test(value, Immediate(kSmiTagMask));
- __ Check(zero, kNonSmiValue);
- __ test(index, Immediate(kSmiTagMask));
- __ Check(zero, kNonSmiValue);
- __ SmiUntag(index);
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
- __ SmiTag(index);
- }
-
- __ SmiUntag(value);
- // No need to untag a smi for two-byte addressing.
- __ mov_w(FieldOperand(string, index, times_1, SeqTwoByteString::kHeaderSize),
- value);
- context()->Plug(string);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3031,13 +2745,8 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
Label need_conversion;
Label index_out_of_range;
Label done;
- StringCharCodeAtGenerator generator(object,
- index,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
+ StringCharCodeAtGenerator generator(object, index, result, &need_conversion,
+ &need_conversion, &index_out_of_range);
generator.GenerateFast(masm_);
__ jmp(&done);
@@ -3061,54 +2770,6 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Register object = ebx;
- Register index = eax;
- Register scratch = edx;
- Register result = eax;
-
- PopOperand(object);
-
- Label need_conversion;
- Label index_out_of_range;
- Label done;
- StringCharAtGenerator generator(object,
- index,
- scratch,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
- generator.GenerateFast(masm_);
- __ jmp(&done);
-
- __ bind(&index_out_of_range);
- // When the index is out of range, the spec requires us to return
- // the empty string.
- __ Move(result, Immediate(isolate()->factory()->empty_string()));
- __ jmp(&done);
-
- __ bind(&need_conversion);
- // Move smi zero into the result register, which will trigger
- // conversion.
- __ Move(result, Immediate(Smi::FromInt(0)));
- __ jmp(&done);
-
- NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
-
- __ bind(&done);
- context()->Plug(result);
-}
-
-
void FullCodeGenerator::EmitCall(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_LE(2, args->length());
@@ -3116,7 +2777,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
for (Expression* const arg : *args) {
VisitForStackValue(arg);
}
- PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
// Move target to edi.
int const argc = args->length() - 2;
__ mov(edi, Operand(esp, (argc + 1) * kPointerSize));
@@ -3124,8 +2785,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
__ mov(eax, Immediate(argc));
__ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(argc + 1);
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
// Discard the function left on TOS.
context()->DropAndPlug(1, eax);
}
@@ -3179,13 +2839,6 @@ void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
context()->Plug(eax);
}
-void FullCodeGenerator::EmitGetOrdinaryHasInstance(CallRuntime* expr) {
- DCHECK_EQ(0, expr->arguments()->length());
- __ mov(eax, NativeContextOperand());
- __ mov(eax, ContextOperand(eax, Context::ORDINARY_HAS_INSTANCE_INDEX));
- context()->Plug(eax);
-}
-
void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
ExternalReference debug_is_active =
@@ -3204,7 +2857,8 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
Label runtime, done;
- __ Allocate(JSIteratorResult::kSize, eax, ecx, edx, &runtime, TAG_OBJECT);
+ __ Allocate(JSIteratorResult::kSize, eax, ecx, edx, &runtime,
+ NO_ALLOCATION_FLAGS);
__ mov(ebx, NativeContextOperand());
__ mov(ebx, ContextOperand(ebx, Context::ITERATOR_RESULT_MAP_INDEX));
__ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
@@ -3245,9 +2899,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
-
- // Restore context register.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ RestoreContext();
}
@@ -3269,7 +2921,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode but
// "delete this" is allowed.
- bool is_this = var->HasThisName(isolate());
+ bool is_this = var->is_this();
DCHECK(is_sloppy(language_mode()) || is_this);
if (var->IsUnallocatedOrGlobalSlot()) {
__ mov(eax, NativeContextOperand());
@@ -3332,7 +2984,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
&materialize_true);
if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
__ bind(&materialize_true);
- PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->MaterializeTrueId(),
+ BailoutState::NO_REGISTERS);
if (context()->IsAccumulatorValue()) {
__ mov(eax, isolate()->factory()->true_value());
} else {
@@ -3340,7 +2993,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
}
__ jmp(&done, Label::kNear);
__ bind(&materialize_false);
- PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+ PrepareForBailoutForId(expr->MaterializeFalseId(),
+ BailoutState::NO_REGISTERS);
if (context()->IsAccumulatorValue()) {
__ mov(eax, isolate()->factory()->false_value());
} else {
@@ -3439,9 +3093,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// We need a second deoptimization point after loading the value
// in case evaluating the property load my have a side effect.
if (assign_type == VARIABLE) {
- PrepareForBailout(expr->expression(), TOS_REG);
+ PrepareForBailout(expr->expression(), BailoutState::TOS_REGISTER);
} else {
- PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+ PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
}
// Inline smi case if we are in a loop.
@@ -3494,9 +3148,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
// Convert old value into a number.
- ToNumberStub convert_stub(isolate());
- __ CallStub(&convert_stub);
- PrepareForBailoutForId(expr->ToNumberId(), TOS_REG);
+ __ Call(isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
+ RestoreContext();
+ PrepareForBailoutForId(expr->ToNumberId(), BailoutState::TOS_REGISTER);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -3544,7 +3198,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
{ EffectContext context(this);
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN, expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(),
+ BailoutState::TOS_REGISTER);
context.Plug(eax);
}
// For all contexts except EffectContext We have the result on
@@ -3556,7 +3211,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Perform the assignment as if via '='.
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN, expr->CountSlot());
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(),
+ BailoutState::TOS_REGISTER);
context()->Plug(eax);
}
break;
@@ -3566,7 +3222,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
PopOperand(StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(expr->CountSlot());
CallStoreIC();
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3578,6 +3234,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case NAMED_SUPER_PROPERTY: {
EmitNamedSuperPropertyStore(prop);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3589,6 +3246,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case KEYED_SUPER_PROPERTY: {
EmitKeyedSuperPropertyStore(prop);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@@ -3605,7 +3263,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
EmitLoadStoreICSlot(expr->CountSlot());
CallIC(ic);
- PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+ PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
if (expr->is_postfix()) {
// Result is on the stack
if (!context()->IsEffect()) {
@@ -3701,7 +3359,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
- SetExpressionPosition(expr);
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
@@ -3721,7 +3378,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- CallRuntimeWithOperands(Runtime::kHasProperty);
+ SetExpressionPosition(expr);
+ EmitHasProperty();
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ cmp(eax, isolate()->factory()->true_value());
Split(equal, if_true, if_false, fall_through);
@@ -3729,6 +3387,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::INSTANCEOF: {
VisitForAccumulatorValue(expr->right());
+ SetExpressionPosition(expr);
PopOperand(edx);
InstanceOfStub stub(isolate());
__ CallStub(&stub);
@@ -3740,6 +3399,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
+ SetExpressionPosition(expr);
Condition cc = CompareIC::ComputeCondition(op);
PopOperand(edx);
@@ -3827,7 +3487,7 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
- Scope* closure_scope = scope()->ClosureScope();
+ DeclarationScope* closure_scope = scope()->GetClosureScope();
if (closure_scope->is_script_scope() ||
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
diff --git a/deps/v8/src/futex-emulation.cc b/deps/v8/src/futex-emulation.cc
index 991e4c3711..2d18488a78 100644
--- a/deps/v8/src/futex-emulation.cc
+++ b/deps/v8/src/futex-emulation.cc
@@ -75,7 +75,7 @@ void FutexWaitList::RemoveNode(FutexWaitListNode* node) {
Object* FutexEmulation::Wait(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer, size_t addr,
int32_t value, double rel_timeout_ms) {
- DCHECK(addr < NumberToSize(isolate, array_buffer->byte_length()));
+ DCHECK(addr < NumberToSize(array_buffer->byte_length()));
void* backing_store = array_buffer->backing_store();
int32_t* p =
@@ -84,7 +84,7 @@ Object* FutexEmulation::Wait(Isolate* isolate,
base::LockGuard<base::Mutex> lock_guard(mutex_.Pointer());
if (*p != value) {
- return Smi::FromInt(Result::kNotEqual);
+ return isolate->heap()->not_equal();
}
FutexWaitListNode* node = isolate->futex_wait_list_node();
@@ -142,7 +142,7 @@ Object* FutexEmulation::Wait(Isolate* isolate,
// be false, so we'll loop and then check interrupts.
if (interrupted) {
Object* interrupt_object = isolate->stack_guard()->HandleInterrupts();
- if (interrupt_object->IsException()) {
+ if (interrupt_object->IsException(isolate)) {
result = interrupt_object;
mutex_.Pointer()->Lock();
break;
@@ -157,7 +157,7 @@ Object* FutexEmulation::Wait(Isolate* isolate,
}
if (!node->waiting_) {
- result = Smi::FromInt(Result::kOk);
+ result = isolate->heap()->ok();
break;
}
@@ -165,7 +165,7 @@ Object* FutexEmulation::Wait(Isolate* isolate,
if (use_timeout) {
current_time = base::TimeTicks::Now();
if (current_time >= timeout_time) {
- result = Smi::FromInt(Result::kTimedOut);
+ result = isolate->heap()->timed_out();
break;
}
@@ -191,7 +191,7 @@ Object* FutexEmulation::Wait(Isolate* isolate,
Object* FutexEmulation::Wake(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer, size_t addr,
int num_waiters_to_wake) {
- DCHECK(addr < NumberToSize(isolate, array_buffer->byte_length()));
+ DCHECK(addr < NumberToSize(array_buffer->byte_length()));
int waiters_woken = 0;
void* backing_store = array_buffer->backing_store();
@@ -213,48 +213,10 @@ Object* FutexEmulation::Wake(Isolate* isolate,
}
-Object* FutexEmulation::WakeOrRequeue(Isolate* isolate,
- Handle<JSArrayBuffer> array_buffer,
- size_t addr, int num_waiters_to_wake,
- int32_t value, size_t addr2) {
- DCHECK(addr < NumberToSize(isolate, array_buffer->byte_length()));
- DCHECK(addr2 < NumberToSize(isolate, array_buffer->byte_length()));
-
- void* backing_store = array_buffer->backing_store();
- int32_t* p =
- reinterpret_cast<int32_t*>(static_cast<int8_t*>(backing_store) + addr);
-
- base::LockGuard<base::Mutex> lock_guard(mutex_.Pointer());
- if (*p != value) {
- return Smi::FromInt(Result::kNotEqual);
- }
-
- // Wake |num_waiters_to_wake|
- int waiters_woken = 0;
- FutexWaitListNode* node = wait_list_.Pointer()->head_;
- while (node) {
- if (backing_store == node->backing_store_ && addr == node->wait_addr_) {
- if (num_waiters_to_wake > 0) {
- node->waiting_ = false;
- node->cond_.NotifyOne();
- --num_waiters_to_wake;
- waiters_woken++;
- } else {
- node->wait_addr_ = addr2;
- }
- }
-
- node = node->next_;
- }
-
- return Smi::FromInt(waiters_woken);
-}
-
-
Object* FutexEmulation::NumWaitersForTesting(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer,
size_t addr) {
- DCHECK(addr < NumberToSize(isolate, array_buffer->byte_length()));
+ DCHECK(addr < NumberToSize(array_buffer->byte_length()));
void* backing_store = array_buffer->backing_store();
base::LockGuard<base::Mutex> lock_guard(mutex_.Pointer());
diff --git a/deps/v8/src/futex-emulation.h b/deps/v8/src/futex-emulation.h
index 9949bdf44f..a0e2b18bdc 100644
--- a/deps/v8/src/futex-emulation.h
+++ b/deps/v8/src/futex-emulation.h
@@ -21,7 +21,7 @@
// variables for consistency.
//
// This is used by the Futex API defined in the SharedArrayBuffer draft spec,
-// found here: https://github.com/lars-t-hansen/ecmascript_sharedmem
+// found here: https://github.com/tc39/ecmascript_sharedmem
namespace v8 {
@@ -81,18 +81,11 @@ class FutexWaitList {
class FutexEmulation : public AllStatic {
public:
- // These must match the values in src/harmony-atomics.js
- enum Result {
- kOk = 0,
- kNotEqual = -1,
- kTimedOut = -2,
- };
-
- // Check that array_buffer[addr] == value, and return kNotEqual if not. If
+ // Check that array_buffer[addr] == value, and return "not-equal" if not. If
// they are equal, block execution on |isolate|'s thread until woken via
// |Wake|, or when the time given in |rel_timeout_ms| elapses. Note that
// |rel_timeout_ms| can be Infinity.
- // If woken, return kOk, otherwise return kTimedOut. The initial check and
+ // If woken, return "ok", otherwise return "timed-out". The initial check and
// the decision to wait happen atomically.
static Object* Wait(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
size_t addr, int32_t value, double rel_timeout_ms);
@@ -103,16 +96,6 @@ class FutexEmulation : public AllStatic {
static Object* Wake(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
size_t addr, int num_waiters_to_wake);
- // Check that array_buffer[addr] == value, and return kNotEqual if not. If
- // they are equal, wake |num_waiters_to_wake| threads that are waiting on the
- // given |addr|. The rest of the waiters will continue to wait, but will now
- // be waiting on |addr2| instead of |addr|. The return value is the number of
- // woken waiters or kNotEqual as described above.
- static Object* WakeOrRequeue(Isolate* isolate,
- Handle<JSArrayBuffer> array_buffer, size_t addr,
- int num_waiters_to_wake, int32_t value,
- size_t addr2);
-
// Return the number of threads waiting on |addr|. Should only be used for
// testing.
static Object* NumWaitersForTesting(Isolate* isolate,
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index 0df5975b54..a3af1846db 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -4,6 +4,8 @@
#include "src/gdb-jit.h"
+#include <memory>
+
#include "src/base/bits.h"
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
@@ -1015,7 +1017,7 @@ class CodeDescription BASE_EMBEDDED {
}
#endif
- base::SmartArrayPointer<char> GetFilename() {
+ std::unique_ptr<char[]> GetFilename() {
return String::cast(script()->name())->ToCString();
}
@@ -2012,17 +2014,19 @@ static uint32_t HashCodeAddress(Address addr) {
return static_cast<uint32_t>((offset >> kCodeAlignmentBits) * kGoldenRatio);
}
-
-static HashMap* GetLineMap() {
- static HashMap* line_map = NULL;
- if (line_map == NULL) line_map = new HashMap(&HashMap::PointersMatch);
+static base::HashMap* GetLineMap() {
+ static base::HashMap* line_map = NULL;
+ if (line_map == NULL) {
+ line_map = new base::HashMap(&base::HashMap::PointersMatch);
+ }
return line_map;
}
static void PutLineInfo(Address addr, LineInfo* info) {
- HashMap* line_map = GetLineMap();
- HashMap::Entry* e = line_map->LookupOrInsert(addr, HashCodeAddress(addr));
+ base::HashMap* line_map = GetLineMap();
+ base::HashMap::Entry* e =
+ line_map->LookupOrInsert(addr, HashCodeAddress(addr));
if (e->value != NULL) delete static_cast<LineInfo*>(e->value);
e->value = info;
}
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index ed9caa92a9..ea46344bd3 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -82,7 +82,6 @@ class GlobalHandles::Node {
index_ = static_cast<uint8_t>(index);
DCHECK(static_cast<int>(index_) == index);
set_state(FREE);
- set_weakness_type(NORMAL_WEAK);
set_in_new_space_list(false);
parameter_or_next_free_.next_free = *first_free;
*first_free = this;
@@ -195,16 +194,26 @@ class GlobalHandles::Node {
bool IsInUse() const { return state() != FREE; }
+ bool IsPendingPhantomCallback() const {
+ return state() == PENDING &&
+ (weakness_type() == PHANTOM_WEAK ||
+ weakness_type() == PHANTOM_WEAK_2_INTERNAL_FIELDS);
+ }
+
+ bool IsPendingPhantomResetHandle() const {
+ return state() == PENDING && weakness_type() == PHANTOM_WEAK_RESET_HANDLE;
+ }
+
bool IsRetainer() const {
return state() != FREE &&
- !(state() == NEAR_DEATH && weakness_type() != NORMAL_WEAK);
+ !(state() == NEAR_DEATH && weakness_type() != FINALIZER_WEAK);
}
bool IsStrongRetainer() const { return state() == NORMAL; }
bool IsWeakRetainer() const {
return state() == WEAK || state() == PENDING ||
- (state() == NEAR_DEATH && weakness_type() == NORMAL_WEAK);
+ (state() == NEAR_DEATH && weakness_type() == FINALIZER_WEAK);
}
void MarkPending() {
@@ -250,16 +259,6 @@ class GlobalHandles::Node {
parameter_or_next_free_.next_free = value;
}
- void MakeWeak(void* parameter, WeakCallback weak_callback) {
- DCHECK(weak_callback != nullptr);
- DCHECK(IsInUse());
- CHECK_NE(object_, reinterpret_cast<Object*>(kGlobalHandleZapValue));
- set_state(WEAK);
- set_weakness_type(NORMAL_WEAK);
- set_parameter(parameter);
- weak_callback_ = weak_callback;
- }
-
void MakeWeak(void* parameter,
WeakCallbackInfo<void>::Callback phantom_callback,
v8::WeakCallbackType type) {
@@ -272,11 +271,23 @@ class GlobalHandles::Node {
set_weakness_type(PHANTOM_WEAK);
break;
case v8::WeakCallbackType::kInternalFields:
- set_weakness_type(PHANTOM_WEAK_2_INTERNAL_FIELDS);
- break;
+ set_weakness_type(PHANTOM_WEAK_2_INTERNAL_FIELDS);
+ break;
+ case v8::WeakCallbackType::kFinalizer:
+ set_weakness_type(FINALIZER_WEAK);
+ break;
}
set_parameter(parameter);
- weak_callback_ = reinterpret_cast<WeakCallback>(phantom_callback);
+ weak_callback_ = phantom_callback;
+ }
+
+ void MakeWeak(Object*** location_addr) {
+ DCHECK(IsInUse());
+ CHECK_NE(object_, reinterpret_cast<Object*>(kGlobalHandleZapValue));
+ set_state(WEAK);
+ set_weakness_type(PHANTOM_WEAK_RESET_HANDLE);
+ set_parameter(location_addr);
+ weak_callback_ = nullptr;
}
void* ClearWeakness() {
@@ -293,6 +304,7 @@ class GlobalHandles::Node {
DCHECK(weakness_type() == PHANTOM_WEAK ||
weakness_type() == PHANTOM_WEAK_2_INTERNAL_FIELDS);
DCHECK(state() == PENDING);
+ DCHECK(weak_callback_ != nullptr);
void* internal_fields[v8::kInternalFieldsInWeakCallback] = {nullptr,
nullptr};
@@ -317,6 +329,15 @@ class GlobalHandles::Node {
set_state(NEAR_DEATH);
}
+ void ResetPhantomHandle() {
+ DCHECK(weakness_type() == PHANTOM_WEAK_RESET_HANDLE);
+ DCHECK(state() == PENDING);
+ DCHECK(weak_callback_ == nullptr);
+ Object*** handle = reinterpret_cast<Object***>(parameter());
+ *handle = nullptr;
+ Release();
+ }
+
bool PostGarbageCollectionProcessing(Isolate* isolate) {
// Handles only weak handles (not phantom) that are dying.
if (state() != Node::PENDING) return false;
@@ -332,17 +353,17 @@ class GlobalHandles::Node {
ExternalOneByteString::cast(object_)->resource() != NULL);
DCHECK(!object_->IsExternalTwoByteString() ||
ExternalTwoByteString::cast(object_)->resource() != NULL);
- if (weakness_type() != NORMAL_WEAK) return false;
+ if (weakness_type() != FINALIZER_WEAK) {
+ return false;
+ }
// Leaving V8.
VMState<EXTERNAL> vmstate(isolate);
HandleScope handle_scope(isolate);
- Object** object = location();
- Handle<Object> handle(*object, isolate);
- v8::WeakCallbackData<v8::Value, void> data(
- reinterpret_cast<v8::Isolate*>(isolate), parameter(),
- v8::Utils::ToLocal(handle));
- set_parameter(NULL);
+ void* internal_fields[v8::kInternalFieldsInWeakCallback] = {nullptr,
+ nullptr};
+ v8::WeakCallbackInfo<void> data(reinterpret_cast<v8::Isolate*>(isolate),
+ parameter(), internal_fields, nullptr);
weak_callback_(data);
// Absence of explicit cleanup or revival of weak handle
@@ -384,7 +405,7 @@ class GlobalHandles::Node {
uint8_t flags_;
// Handle specific callback - might be a weak reference in disguise.
- WeakCallback weak_callback_;
+ WeakCallbackInfo<void>::Callback weak_callback_;
// Provided data for callback. In FREE state, this is used for
// the free list link.
@@ -534,6 +555,7 @@ class GlobalHandles::PendingPhantomCallbacksSecondPassTask
}
void RunInternal() override {
+ TRACE_EVENT0("v8", "V8.GCPhantomHandleProcessingCallback");
isolate()->heap()->CallGCPrologueCallbacks(
GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags);
InvokeSecondPassPhantomCallbacks(&pending_phantom_callbacks_, isolate());
@@ -547,7 +569,6 @@ class GlobalHandles::PendingPhantomCallbacksSecondPassTask
DISALLOW_COPY_AND_ASSIGN(PendingPhantomCallbacksSecondPassTask);
};
-
GlobalHandles::GlobalHandles(Isolate* isolate)
: isolate_(isolate),
number_of_global_handles_(0),
@@ -555,9 +576,9 @@ GlobalHandles::GlobalHandles(Isolate* isolate)
first_used_block_(NULL),
first_free_(NULL),
post_gc_processing_count_(0),
+ number_of_phantom_handle_resets_(0),
object_group_connections_(kObjectGroupConnectionsCapacity) {}
-
GlobalHandles::~GlobalHandles() {
NodeBlock* block = first_block_;
while (block != NULL) {
@@ -599,12 +620,6 @@ void GlobalHandles::Destroy(Object** location) {
}
-void GlobalHandles::MakeWeak(Object** location, void* parameter,
- WeakCallback weak_callback) {
- Node::FromLocation(location)->MakeWeak(parameter, weak_callback);
-}
-
-
typedef v8::WeakCallbackInfo<void>::Callback GenericCallback;
@@ -614,6 +629,9 @@ void GlobalHandles::MakeWeak(Object** location, void* parameter,
Node::FromLocation(location)->MakeWeak(parameter, phantom_callback, type);
}
+void GlobalHandles::MakeWeak(Object*** location_addr) {
+ Node::FromLocation(*location_addr)->MakeWeak(location_addr);
+}
void* GlobalHandles::ClearWeakness(Object** location) {
return Node::FromLocation(location)->ClearWeakness();
@@ -644,15 +662,18 @@ bool GlobalHandles::IsWeak(Object** location) {
return Node::FromLocation(location)->IsWeak();
}
+DISABLE_CFI_PERF
void GlobalHandles::IterateWeakRoots(ObjectVisitor* v) {
for (NodeIterator it(this); !it.done(); it.Advance()) {
Node* node = it.node();
if (node->IsWeakRetainer()) {
// Pending weak phantom handles die immediately. Everything else survives.
- if (node->state() == Node::PENDING &&
- node->weakness_type() != NORMAL_WEAK) {
- node->CollectPhantomCallbackData(isolate(),
- &pending_phantom_callbacks_);
+ if (node->IsPendingPhantomResetHandle()) {
+ node->ResetPhantomHandle();
+ ++number_of_phantom_handle_resets_;
+ } else if (node->IsPendingPhantomCallback()) {
+ node->CollectPhantomCallbackData(isolate(),
+ &pending_phantom_callbacks_);
} else {
v->VisitPointer(node->location());
}
@@ -710,8 +731,10 @@ void GlobalHandles::IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v) {
if ((node->is_independent() || node->is_partially_dependent()) &&
node->IsWeakRetainer()) {
// Pending weak phantom handles die immediately. Everything else survives.
- if (node->state() == Node::PENDING &&
- node->weakness_type() != NORMAL_WEAK) {
+ if (node->IsPendingPhantomResetHandle()) {
+ node->ResetPhantomHandle();
+ ++number_of_phantom_handle_resets_;
+ } else if (node->IsPendingPhantomCallback()) {
node->CollectPhantomCallbackData(isolate(),
&pending_phantom_callbacks_);
} else {
@@ -753,8 +776,10 @@ void GlobalHandles::IterateNewSpaceWeakUnmodifiedRoots(ObjectVisitor* v) {
if ((node->is_independent() || !node->is_active()) &&
node->IsWeakRetainer()) {
// Pending weak phantom handles die immediately. Everything else survives.
- if (node->state() == Node::PENDING &&
- node->weakness_type() != NORMAL_WEAK) {
+ if (node->IsPendingPhantomResetHandle()) {
+ node->ResetPhantomHandle();
+ ++number_of_phantom_handle_resets_;
+ } else if (node->IsPendingPhantomCallback()) {
node->CollectPhantomCallbackData(isolate(),
&pending_phantom_callbacks_);
} else {
@@ -765,6 +790,7 @@ void GlobalHandles::IterateNewSpaceWeakUnmodifiedRoots(ObjectVisitor* v) {
}
+DISABLE_CFI_PERF
bool GlobalHandles::IterateObjectGroups(ObjectVisitor* v,
WeakSlotCallbackWithHeap can_skip) {
ComputeObjectGroupsAndImplicitReferences();
@@ -855,7 +881,7 @@ void ObjectGroupsTracer::PrintObject(Object* object) {
PrintInternalFields(js_object);
PrintF(" ] }\n");
} else {
- PrintF("object of unexpected type: %p\n", object);
+ PrintF("object of unexpected type: %p\n", static_cast<void*>(object));
}
}
@@ -867,7 +893,7 @@ void ObjectGroupsTracer::PrintConstructor(JSObject* js_object) {
if (name->length() == 0) name = constructor->shared()->inferred_name();
PrintF("%s", name->ToCString().get());
- } else if (maybe_constructor->IsNull()) {
+ } else if (maybe_constructor->IsNull(isolate_)) {
if (js_object->IsOddball()) {
PrintF("<oddball>");
} else {
@@ -883,12 +909,12 @@ void ObjectGroupsTracer::PrintInternalFields(JSObject* js_object) {
if (i != 0) {
PrintF(", ");
}
- PrintF("%p", js_object->GetInternalField(i));
+ PrintF("%p", static_cast<void*>(js_object->GetInternalField(i)));
}
}
void ObjectGroupsTracer::PrintObjectGroup(ObjectGroup* group) {
- PrintIsolate(isolate_, "ObjectGroup (size: %lu)\n", group->length);
+ PrintIsolate(isolate_, "ObjectGroup (size: %" PRIuS ")\n", group->length);
Object*** objects = group->objects;
for (size_t i = 0; i < group->length; ++i) {
@@ -898,7 +924,7 @@ void ObjectGroupsTracer::PrintObjectGroup(ObjectGroup* group) {
}
void ObjectGroupsTracer::PrintImplicitRefGroup(ImplicitRefGroup* group) {
- PrintIsolate(isolate_, "ImplicitRefGroup (children count: %lu)\n",
+ PrintIsolate(isolate_, "ImplicitRefGroup (children count: %" PRIuS ")\n",
group->length);
PrintIsolate(isolate_, " - Parent: ");
PrintObject(*(group->parent));
@@ -1122,6 +1148,7 @@ void GlobalHandles::IterateStrongRoots(ObjectVisitor* v) {
}
+DISABLE_CFI_PERF
void GlobalHandles::IterateAllRoots(ObjectVisitor* v) {
for (NodeIterator it(this); !it.done(); it.Advance()) {
if (it.node()->IsRetainer()) {
@@ -1131,6 +1158,7 @@ void GlobalHandles::IterateAllRoots(ObjectVisitor* v) {
}
+DISABLE_CFI_PERF
void GlobalHandles::IterateAllRootsWithClassIds(ObjectVisitor* v) {
for (NodeIterator it(this); !it.done(); it.Advance()) {
if (it.node()->IsRetainer() && it.node()->has_wrapper_class_id()) {
@@ -1141,6 +1169,7 @@ void GlobalHandles::IterateAllRootsWithClassIds(ObjectVisitor* v) {
}
+DISABLE_CFI_PERF
void GlobalHandles::IterateAllRootsInNewSpaceWithClassIds(ObjectVisitor* v) {
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
@@ -1152,6 +1181,7 @@ void GlobalHandles::IterateAllRootsInNewSpaceWithClassIds(ObjectVisitor* v) {
}
+DISABLE_CFI_PERF
void GlobalHandles::IterateWeakRootsInNewSpaceWithClassIds(ObjectVisitor* v) {
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
@@ -1223,8 +1253,7 @@ void GlobalHandles::PrintStats() {
}
PrintF("Global Handle Statistics:\n");
- PrintF(" allocated memory = %" V8_SIZET_PREFIX V8_PTR_PREFIX "dB\n",
- total * sizeof(Node));
+ PrintF(" allocated memory = %" PRIuS "B\n", total * sizeof(Node));
PrintF(" # weak = %d\n", weak);
PrintF(" # pending = %d\n", pending);
PrintF(" # near_death = %d\n", near_death);
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index ac8487b19f..24a2273e36 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -96,19 +96,21 @@ struct ObjectGroupRetainerInfo {
RetainedObjectInfo* info;
};
-
enum WeaknessType {
- NORMAL_WEAK, // Embedder gets a handle to the dying object.
+ // Embedder gets a handle to the dying object.
+ FINALIZER_WEAK,
// In the following cases, the embedder gets the parameter they passed in
// earlier, and 0 or 2 first internal fields. Note that the internal
// fields must contain aligned non-V8 pointers. Getting pointers to V8
// objects through this interface would be GC unsafe so in that case the
// embedder gets a null pointer instead.
PHANTOM_WEAK,
- PHANTOM_WEAK_2_INTERNAL_FIELDS
+ PHANTOM_WEAK_2_INTERNAL_FIELDS,
+ // The handle is automatically reset by the garbage collector when
+ // the object is no longer reachable.
+ PHANTOM_WEAK_RESET_HANDLE
};
-
class GlobalHandles {
public:
~GlobalHandles();
@@ -122,14 +124,6 @@ class GlobalHandles {
// Destroy a global handle.
static void Destroy(Object** location);
- typedef WeakCallbackData<v8::Value, void>::Callback WeakCallback;
-
- // For a phantom weak reference, the callback does not have access to the
- // dying object. Phantom weak references are preferred because they allow
- // memory to be reclaimed in one GC cycle rather than two. However, for
- // historical reasons the default is non-phantom.
- enum PhantomState { Nonphantom, Phantom };
-
// Make the global handle weak and set the callback parameter for the
// handle. When the garbage collector recognizes that only weak global
// handles point to an object the callback function is invoked (for each
@@ -140,14 +134,11 @@ class GlobalHandles {
// before the callback is invoked, but the handle can still be identified
// in the callback by using the location() of the handle.
static void MakeWeak(Object** location, void* parameter,
- WeakCallback weak_callback);
-
- // It would be nice to template this one, but it's really hard to get
- // the template instantiator to work right if you do.
- static void MakeWeak(Object** location, void* parameter,
WeakCallbackInfo<void>::Callback weak_callback,
v8::WeakCallbackType type);
+ static void MakeWeak(Object*** location_addr);
+
void RecordStats(HeapStats* stats);
// Returns the current number of weak handles.
@@ -162,6 +153,14 @@ class GlobalHandles {
return number_of_global_handles_;
}
+ size_t NumberOfPhantomHandleResets() {
+ return number_of_phantom_handle_resets_;
+ }
+
+ void ResetNumberOfPhantomHandleResets() {
+ number_of_phantom_handle_resets_ = 0;
+ }
+
// Clear the weakness of a global handle.
static void* ClearWeakness(Object** location);
@@ -344,6 +343,8 @@ class GlobalHandles {
int post_gc_processing_count_;
+ size_t number_of_phantom_handle_resets_;
+
// Object groups and implicit references, public and more efficient
// representation.
List<ObjectGroup*> object_groups_;
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index e7ac2b9f7f..0d02f77fd6 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -124,11 +124,6 @@ const int kFloatSize = sizeof(float); // NOLINT
const int kDoubleSize = sizeof(double); // NOLINT
const int kIntptrSize = sizeof(intptr_t); // NOLINT
const int kPointerSize = sizeof(void*); // NOLINT
-#if V8_TARGET_ARCH_ARM64
-const int kFrameAlignmentInBytes = 2 * kPointerSize;
-#else
-const int kFrameAlignmentInBytes = kPointerSize;
-#endif
#if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
const int kRegisterSize = kPointerSize + kPointerSize;
#else
@@ -155,12 +150,21 @@ const bool kRequiresCodeRange = true;
// encoded immediate, the addresses have to be in range of 256MB aligned
// region. Used only for large object space.
const size_t kMaximalCodeRangeSize = 256 * MB;
+const size_t kCodeRangeAreaAlignment = 256 * MB;
+#elif V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
+const size_t kMaximalCodeRangeSize = 512 * MB;
+const size_t kCodeRangeAreaAlignment = 64 * KB; // OS page on PPC Linux
#else
const size_t kMaximalCodeRangeSize = 512 * MB;
+const size_t kCodeRangeAreaAlignment = 4 * KB; // OS page.
#endif
#if V8_OS_WIN
const size_t kMinimumCodeRangeSize = 4 * MB;
const size_t kReservedCodeRangePages = 1;
+// On PPC Linux PageSize is 4MB
+#elif V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
+const size_t kMinimumCodeRangeSize = 12 * MB;
+const size_t kReservedCodeRangePages = 0;
#else
const size_t kMinimumCodeRangeSize = 3 * MB;
const size_t kReservedCodeRangePages = 0;
@@ -174,15 +178,25 @@ const uintptr_t kUintptrAllBitsSet = 0xFFFFFFFFu;
const bool kRequiresCodeRange = true;
const size_t kMaximalCodeRangeSize = 256 * MB;
const size_t kMinimumCodeRangeSize = 3 * MB;
-const size_t kReservedCodeRangePages = 0;
+const size_t kCodeRangeAreaAlignment = 4 * KB; // OS page.
+#elif V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
+const bool kRequiresCodeRange = false;
+const size_t kMaximalCodeRangeSize = 0 * MB;
+const size_t kMinimumCodeRangeSize = 0 * MB;
+const size_t kCodeRangeAreaAlignment = 64 * KB; // OS page on PPC Linux
#else
const bool kRequiresCodeRange = false;
const size_t kMaximalCodeRangeSize = 0 * MB;
const size_t kMinimumCodeRangeSize = 0 * MB;
-const size_t kReservedCodeRangePages = 0;
+const size_t kCodeRangeAreaAlignment = 4 * KB; // OS page.
#endif
+const size_t kReservedCodeRangePages = 0;
#endif
+// The external allocation limit should be below 256 MB on all architectures
+// to avoid that resource-constrained embedders run low on memory.
+const int kExternalAllocationLimit = 192 * 1024 * 1024;
+
STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2));
const int kBitsPerByte = 8;
@@ -258,8 +272,7 @@ template <typename T, class P = FreeStoreAllocationPolicy> class List;
// The Strict Mode (ECMA-262 5th edition, 4.2.2).
-enum LanguageMode { SLOPPY, STRICT, LANGUAGE_END = 3 };
-
+enum LanguageMode : uint32_t { SLOPPY, STRICT, LANGUAGE_END };
inline std::ostream& operator<<(std::ostream& os, const LanguageMode& mode) {
switch (mode) {
@@ -290,6 +303,11 @@ inline LanguageMode construct_language_mode(bool strict_bit) {
return static_cast<LanguageMode>(strict_bit);
}
+// This constant is used as an undefined value when passing source positions.
+const int kNoSourcePosition = -1;
+
+// This constant is used to indicate missing deoptimization information.
+const int kNoDeoptimizationId = -1;
// Mask for the sign bit in a smi.
const intptr_t kSmiSignMask = kIntptrSignBit;
@@ -410,6 +428,8 @@ class OldSpace;
class ParameterCount;
class Foreign;
class Scope;
+class DeclarationScope;
+class ModuleScope;
class ScopeInfo;
class Script;
class Smi;
@@ -457,6 +477,53 @@ enum AllocationAlignment {
kSimd128Unaligned
};
+// Possible outcomes for decisions.
+enum class Decision : uint8_t { kUnknown, kTrue, kFalse };
+
+inline size_t hash_value(Decision decision) {
+ return static_cast<uint8_t>(decision);
+}
+
+inline std::ostream& operator<<(std::ostream& os, Decision decision) {
+ switch (decision) {
+ case Decision::kUnknown:
+ return os << "Unknown";
+ case Decision::kTrue:
+ return os << "True";
+ case Decision::kFalse:
+ return os << "False";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+// Supported write barrier modes.
+enum WriteBarrierKind : uint8_t {
+ kNoWriteBarrier,
+ kMapWriteBarrier,
+ kPointerWriteBarrier,
+ kFullWriteBarrier
+};
+
+inline size_t hash_value(WriteBarrierKind kind) {
+ return static_cast<uint8_t>(kind);
+}
+
+inline std::ostream& operator<<(std::ostream& os, WriteBarrierKind kind) {
+ switch (kind) {
+ case kNoWriteBarrier:
+ return os << "NoWriteBarrier";
+ case kMapWriteBarrier:
+ return os << "MapWriteBarrier";
+ case kPointerWriteBarrier:
+ return os << "PointerWriteBarrier";
+ case kFullWriteBarrier:
+ return os << "FullWriteBarrier";
+ }
+ UNREACHABLE();
+ return os;
+}
+
// A flag that indicates whether objects should be pretenured when
// allocated (allocated directly into the old generation) or not
// (allocated in the young generation if the object size and type
@@ -527,6 +594,8 @@ struct CodeDesc {
int instr_size;
int reloc_size;
int constant_pool_size;
+ byte* unwinding_info;
+ int unwinding_info_size;
Assembler* origin;
};
@@ -551,18 +620,15 @@ enum InlineCacheState {
// Has been executed and only one receiver type has been seen.
MONOMORPHIC,
// Check failed due to prototype (or map deprecation).
- PROTOTYPE_FAILURE,
+ RECOMPUTE_HANDLER,
// Multiple receiver types have been seen.
POLYMORPHIC,
// Many receiver types have been seen.
MEGAMORPHIC,
// A generic handler is installed and no extra typefeedback is recorded.
GENERIC,
- // Special state for debug break or step in prepare stubs.
- DEBUG_STUB
};
-
enum CacheHolderFlag {
kCacheOnPrototype,
kCacheOnPrototypeReceiverIsDictionary,
@@ -570,6 +636,7 @@ enum CacheHolderFlag {
kCacheOnReceiver
};
+enum WhereToStart { kStartAtReceiver, kStartAtPrototype };
// The Store Buffer (GC).
typedef enum {
@@ -583,18 +650,6 @@ typedef void (*StoreBufferCallback)(Heap* heap,
MemoryChunk* page,
StoreBufferEvent event);
-
-// Union used for fast testing of specific double values.
-union DoubleRepresentation {
- double value;
- int64_t bits;
- DoubleRepresentation(double x) { value = x; }
- bool operator==(const DoubleRepresentation& other) const {
- return bits == other.bits;
- }
-};
-
-
// Union used for customized checking of the IEEE double types
// inlined within v8 runtime, rather than going to the underlying
// platform headers and libraries
@@ -619,6 +674,15 @@ union IeeeDoubleBigEndianArchType {
} bits;
};
+#if V8_TARGET_LITTLE_ENDIAN
+typedef IeeeDoubleLittleEndianArchType IeeeDoubleArchType;
+const int kIeeeDoubleMantissaWordOffset = 0;
+const int kIeeeDoubleExponentWordOffset = 4;
+#else
+typedef IeeeDoubleBigEndianArchType IeeeDoubleArchType;
+const int kIeeeDoubleMantissaWordOffset = 4;
+const int kIeeeDoubleExponentWordOffset = 0;
+#endif
// AccessorCallback
struct AccessorDescriptor {
@@ -672,8 +736,6 @@ enum CpuFeature {
ARMv7,
ARMv8,
SUDIV,
- MLS,
- UNALIGNED_ACCESSES,
MOVW_MOVT_IMMEDIATE_LOADS,
VFP32DREGS,
NEON,
@@ -685,7 +747,6 @@ enum CpuFeature {
MIPSr6,
// ARM64
ALWAYS_ALIGN_CSP,
- COHERENT_CACHE,
// PPC
FPR_GPR_MOV,
LWSYNC,
@@ -694,6 +755,9 @@ enum CpuFeature {
DISTINCT_OPS,
GENERAL_INSTR_EXT,
FLOATING_POINT_EXT,
+ // PPC/S390
+ UNALIGNED_ACCESSES,
+
NUMBER_OF_CPU_FEATURES
};
@@ -737,6 +801,14 @@ inline std::ostream& operator<<(std::ostream& os, TailCallMode mode) {
return os;
}
+// Valid hints for the abstract operation OrdinaryToPrimitive,
+// implemented according to ES6, section 7.1.1.
+enum class OrdinaryToPrimitiveHint { kNumber, kString };
+
+// Valid hints for the abstract operation ToPrimitive,
+// implemented according to ES6, section 7.1.1.
+enum class ToPrimitiveHint { kDefault, kNumber, kString };
+
// Defines specifics about arguments object or rest parameter creation.
enum class CreateArgumentsType : uint8_t {
kMappedArguments,
@@ -780,8 +852,16 @@ enum ScopeType {
};
// The mips architecture prior to revision 5 has inverted encoding for sNaN.
-#if (V8_TARGET_ARCH_MIPS && !defined(_MIPS_ARCH_MIPS32R6)) || \
- (V8_TARGET_ARCH_MIPS64 && !defined(_MIPS_ARCH_MIPS64R6))
+// The x87 FPU convert the sNaN to qNaN automatically when loading sNaN from
+// memmory.
+// Use mips sNaN which is a not used qNaN in x87 port as sNaN to workaround this
+// issue
+// for some test cases.
+#if (V8_TARGET_ARCH_MIPS && !defined(_MIPS_ARCH_MIPS32R6) && \
+ (!defined(USE_SIMULATOR) || !defined(_MIPS_TARGET_SIMULATOR))) || \
+ (V8_TARGET_ARCH_MIPS64 && !defined(_MIPS_ARCH_MIPS64R6) && \
+ (!defined(USE_SIMULATOR) || !defined(_MIPS_TARGET_SIMULATOR))) || \
+ (V8_TARGET_ARCH_X87)
const uint32_t kHoleNanUpper32 = 0xFFFF7FFF;
const uint32_t kHoleNanLower32 = 0xFFFF7FFF;
#else
@@ -800,54 +880,50 @@ const double kMaxSafeInteger = 9007199254740991.0; // 2^53-1
// The order of this enum has to be kept in sync with the predicates below.
enum VariableMode {
// User declared variables:
- VAR, // declared via 'var', and 'function' declarations
-
- CONST_LEGACY, // declared via legacy 'const' declarations
+ VAR, // declared via 'var', and 'function' declarations
- LET, // declared via 'let' declarations (first lexical)
+ CONST_LEGACY, // declared via legacy 'const' declarations
- CONST, // declared via 'const' declarations
+ LET, // declared via 'let' declarations (first lexical)
- IMPORT, // declared via 'import' declarations (last lexical)
+ CONST, // declared via 'const' declarations (last lexical)
// Variables introduced by the compiler:
- TEMPORARY, // temporary variables (not user-visible), stack-allocated
- // unless the scope as a whole has forced context allocation
+ TEMPORARY, // temporary variables (not user-visible), stack-allocated
+ // unless the scope as a whole has forced context allocation
- DYNAMIC, // always require dynamic lookup (we don't know
- // the declaration)
+ DYNAMIC, // always require dynamic lookup (we don't know
+ // the declaration)
DYNAMIC_GLOBAL, // requires dynamic lookup, but we know that the
// variable is global unless it has been shadowed
// by an eval-introduced variable
- DYNAMIC_LOCAL // requires dynamic lookup, but we know that the
- // variable is local and where it is unless it
- // has been shadowed by an eval-introduced
- // variable
+ DYNAMIC_LOCAL // requires dynamic lookup, but we know that the
+ // variable is local and where it is unless it
+ // has been shadowed by an eval-introduced
+ // variable
};
-
inline bool IsDynamicVariableMode(VariableMode mode) {
return mode >= DYNAMIC && mode <= DYNAMIC_LOCAL;
}
inline bool IsDeclaredVariableMode(VariableMode mode) {
- return mode >= VAR && mode <= IMPORT;
+ return mode >= VAR && mode <= CONST;
}
inline bool IsLexicalVariableMode(VariableMode mode) {
- return mode >= LET && mode <= IMPORT;
+ return mode >= LET && mode <= CONST;
}
inline bool IsImmutableVariableMode(VariableMode mode) {
- return mode == CONST || mode == CONST_LEGACY || mode == IMPORT;
+ return mode == CONST || mode == CONST_LEGACY;
}
-
enum class VariableLocation {
// Before and during variable allocation, a variable whose location is
// not yet determined. After allocation, a variable looked up as a
@@ -878,9 +954,11 @@ enum class VariableLocation {
// A named slot in a heap context. name() is the variable name in the
// context object on the heap, with lookup starting at the current
// context. index() is invalid.
- LOOKUP
-};
+ LOOKUP,
+ // A named slot in a module's export table.
+ MODULE
+};
// ES6 Draft Rev3 10.2 specifies declarative environment records with mutable
// and immutable bindings that can be in two states: initialized and
@@ -934,7 +1012,7 @@ enum MinusZeroMode {
enum Signedness { kSigned, kUnsigned };
-enum FunctionKind {
+enum FunctionKind : uint16_t {
kNormalFunction = 0,
kArrowFunction = 1 << 0,
kGeneratorFunction = 1 << 1,
@@ -945,11 +1023,14 @@ enum FunctionKind {
kBaseConstructor = 1 << 5,
kGetterFunction = 1 << 6,
kSetterFunction = 1 << 7,
+ kAsyncFunction = 1 << 8,
kAccessorFunction = kGetterFunction | kSetterFunction,
kDefaultBaseConstructor = kDefaultConstructor | kBaseConstructor,
kDefaultSubclassConstructor = kDefaultConstructor | kSubclassConstructor,
kClassConstructor =
kBaseConstructor | kSubclassConstructor | kDefaultConstructor,
+ kAsyncArrowFunction = kArrowFunction | kAsyncFunction,
+ kAsyncConciseMethod = kAsyncFunction | kConciseMethod
};
inline bool IsValidFunctionKind(FunctionKind kind) {
@@ -964,7 +1045,10 @@ inline bool IsValidFunctionKind(FunctionKind kind) {
kind == FunctionKind::kDefaultBaseConstructor ||
kind == FunctionKind::kDefaultSubclassConstructor ||
kind == FunctionKind::kBaseConstructor ||
- kind == FunctionKind::kSubclassConstructor;
+ kind == FunctionKind::kSubclassConstructor ||
+ kind == FunctionKind::kAsyncFunction ||
+ kind == FunctionKind::kAsyncArrowFunction ||
+ kind == FunctionKind::kAsyncConciseMethod;
}
@@ -979,6 +1063,14 @@ inline bool IsGeneratorFunction(FunctionKind kind) {
return kind & FunctionKind::kGeneratorFunction;
}
+inline bool IsAsyncFunction(FunctionKind kind) {
+ DCHECK(IsValidFunctionKind(kind));
+ return kind & FunctionKind::kAsyncFunction;
+}
+
+inline bool IsResumableFunction(FunctionKind kind) {
+ return IsGeneratorFunction(kind) || IsAsyncFunction(kind);
+}
inline bool IsConciseMethod(FunctionKind kind) {
DCHECK(IsValidFunctionKind(kind));
@@ -1030,9 +1122,24 @@ inline bool IsConstructable(FunctionKind kind, LanguageMode mode) {
if (IsConciseMethod(kind)) return false;
if (IsArrowFunction(kind)) return false;
if (IsGeneratorFunction(kind)) return false;
+ if (IsAsyncFunction(kind)) return false;
return true;
}
+enum class CallableType : unsigned { kJSFunction, kAny };
+
+inline size_t hash_value(CallableType type) { return bit_cast<unsigned>(type); }
+
+inline std::ostream& operator<<(std::ostream& os, CallableType function_type) {
+ switch (function_type) {
+ case CallableType::kJSFunction:
+ return os << "JSFunction";
+ case CallableType::kAny:
+ return os << "Any";
+ }
+ UNREACHABLE();
+ return os;
+}
inline uint32_t ObjectHash(Address address) {
// All objects are at least pointer aligned, so we can remove the trailing
@@ -1041,6 +1148,15 @@ inline uint32_t ObjectHash(Address address) {
kPointerSizeLog2);
}
+// Type feedback is encoded in such a way that, we can combine the feedback
+// at different points by performing an 'OR' operation. Type feedback moves
+// to a more generic type when we combine feedback.
+// kSignedSmall -> kNumber -> kAny
+class BinaryOperationFeedback {
+ public:
+ enum { kNone = 0x00, kSignedSmall = 0x01, kNumber = 0x3, kAny = 0x7 };
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 1f97d6ff7e..a7cd0e2497 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -43,6 +43,10 @@ class HandleBase {
V8_INLINE bool is_null() const { return location_ == nullptr; }
+ // Returns the raw address where this handle is stored. This should only be
+ // used for hashing handles; do not ever try to dereference it.
+ V8_INLINE Address address() const { return bit_cast<Address>(location_); }
+
protected:
// Provides the C++ dereference operator.
V8_INLINE Object* operator*() const {
@@ -132,14 +136,14 @@ class Handle final : public HandleBase {
// Provide function object for location equality comparison.
struct equal_to : public std::binary_function<Handle<T>, Handle<T>, bool> {
V8_INLINE bool operator()(Handle<T> lhs, Handle<T> rhs) const {
- return lhs.location() == rhs.location();
+ return lhs.address() == rhs.address();
}
};
// Provide function object for location hashing.
struct hash : public std::unary_function<Handle<T>, size_t> {
V8_INLINE size_t operator()(Handle<T> const& handle) const {
- return base::hash<void*>()(handle.location());
+ return base::hash<void*>()(handle.address());
}
};
@@ -222,6 +226,10 @@ class MaybeHandle final {
}
}
+ // Returns the raw address where this handle is stored. This should only be
+ // used for hashing handles; do not ever try to dereference it.
+ V8_INLINE Address address() const { return bit_cast<Address>(location_); }
+
bool is_null() const { return location_ == nullptr; }
protected:
diff --git a/deps/v8/src/heap-symbols.h b/deps/v8/src/heap-symbols.h
index f019acecb2..d83f63fdbe 100644
--- a/deps/v8/src/heap-symbols.h
+++ b/deps/v8/src/heap-symbols.h
@@ -12,6 +12,16 @@
V(arguments_string, "arguments") \
V(Arguments_string, "Arguments") \
V(Array_string, "Array") \
+ V(arguments_to_string, "[object Arguments]") \
+ V(array_to_string, "[object Array]") \
+ V(boolean_to_string, "[object Boolean]") \
+ V(date_to_string, "[object Date]") \
+ V(error_to_string, "[object Error]") \
+ V(function_to_string, "[object Function]") \
+ V(number_to_string, "[object Number]") \
+ V(object_to_string, "[object Object]") \
+ V(regexp_to_string, "[object RegExp]") \
+ V(string_to_string, "[object String]") \
V(bind_string, "bind") \
V(bool16x8_string, "bool16x8") \
V(Bool16x8_string, "Bool16x8") \
@@ -22,6 +32,7 @@
V(boolean_string, "boolean") \
V(Boolean_string, "Boolean") \
V(bound__string, "bound ") \
+ V(buffer_string, "buffer") \
V(byte_length_string, "byteLength") \
V(byte_offset_string, "byteOffset") \
V(call_string, "call") \
@@ -30,6 +41,7 @@
V(cell_value_string, "%cell_value") \
V(char_at_string, "CharAt") \
V(closure_string, "(closure)") \
+ V(column_string, "column") \
V(compare_ic_string, "==") \
V(configurable_string, "configurable") \
V(constructor_string, "constructor") \
@@ -47,6 +59,7 @@
V(enumerable_string, "enumerable") \
V(Error_string, "Error") \
V(eval_string, "eval") \
+ V(EvalError_string, "EvalError") \
V(false_string, "false") \
V(float32x4_string, "float32x4") \
V(Float32x4_string, "Float32x4") \
@@ -78,19 +91,24 @@
V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \
V(last_index_string, "lastIndex") \
V(length_string, "length") \
+ V(line_string, "line") \
V(Map_string, "Map") \
+ V(message_string, "message") \
V(minus_infinity_string, "-Infinity") \
V(minus_zero_string, "-0") \
V(name_string, "name") \
V(nan_string, "NaN") \
V(next_string, "next") \
+ V(not_equal, "not-equal") \
V(null_string, "null") \
V(null_to_string, "[object Null]") \
V(number_string, "number") \
V(Number_string, "Number") \
V(object_string, "object") \
V(Object_string, "Object") \
+ V(ok, "ok") \
V(ownKeys_string, "ownKeys") \
+ V(position_string, "position") \
V(preventExtensions_string, "preventExtensions") \
V(private_api_string, "private_api") \
V(Promise_string, "Promise") \
@@ -98,12 +116,16 @@
V(prototype_string, "prototype") \
V(Proxy_string, "Proxy") \
V(query_colon_string, "(?:)") \
+ V(RangeError_string, "RangeError") \
+ V(ReferenceError_string, "ReferenceError") \
V(RegExp_string, "RegExp") \
+ V(script_string, "script") \
V(setPrototypeOf_string, "setPrototypeOf") \
V(set_string, "set") \
V(Set_string, "Set") \
V(source_mapping_url_string, "source_mapping_url") \
V(source_string, "source") \
+ V(sourceText_string, "sourceText") \
V(source_url_string, "source_url") \
V(stack_string, "stack") \
V(strict_compare_ic_string, "===") \
@@ -111,11 +133,14 @@
V(String_string, "String") \
V(symbol_string, "symbol") \
V(Symbol_string, "Symbol") \
+ V(SyntaxError_string, "SyntaxError") \
V(this_string, "this") \
V(throw_string, "throw") \
+ V(timed_out, "timed-out") \
V(toJSON_string, "toJSON") \
V(toString_string, "toString") \
V(true_string, "true") \
+ V(TypeError_string, "TypeError") \
V(uint16x8_string, "uint16x8") \
V(Uint16x8_string, "Uint16x8") \
V(uint32x4_string, "uint32x4") \
@@ -124,6 +149,7 @@
V(Uint8x16_string, "Uint8x16") \
V(undefined_string, "undefined") \
V(undefined_to_string, "[object Undefined]") \
+ V(URIError_string, "URIError") \
V(valueOf_string, "valueOf") \
V(values_string, "values") \
V(value_string, "value") \
@@ -135,10 +161,13 @@
V(array_iteration_kind_symbol) \
V(array_iterator_next_symbol) \
V(array_iterator_object_symbol) \
+ V(call_site_constructor_symbol) \
V(call_site_function_symbol) \
V(call_site_position_symbol) \
V(call_site_receiver_symbol) \
V(call_site_strict_symbol) \
+ V(call_site_wasm_obj_symbol) \
+ V(call_site_wasm_func_index_symbol) \
V(class_end_position_symbol) \
V(class_start_position_symbol) \
V(detailed_stack_trace_symbol) \
@@ -146,12 +175,9 @@
V(error_end_pos_symbol) \
V(error_script_symbol) \
V(error_start_pos_symbol) \
- V(formatted_stack_trace_symbol) \
V(frozen_symbol) \
V(hash_code_symbol) \
- V(hidden_properties_symbol) \
V(home_object_symbol) \
- V(internal_error_symbol) \
V(intl_impl_object_symbol) \
V(intl_initialized_marker_symbol) \
V(intl_pattern_symbol) \
@@ -162,16 +188,16 @@
V(nonextensible_symbol) \
V(normal_ic_symbol) \
V(not_mapped_symbol) \
- V(observed_symbol) \
V(premonomorphic_symbol) \
V(promise_combined_deferred_symbol) \
V(promise_debug_marker_symbol) \
+ V(promise_deferred_reactions_symbol) \
+ V(promise_fulfill_reactions_symbol) \
V(promise_has_handler_symbol) \
- V(promise_on_resolve_symbol) \
- V(promise_on_reject_symbol) \
V(promise_raw_symbol) \
- V(promise_status_symbol) \
- V(promise_value_symbol) \
+ V(promise_reject_reactions_symbol) \
+ V(promise_result_symbol) \
+ V(promise_state_symbol) \
V(sealed_symbol) \
V(stack_trace_symbol) \
V(strict_function_transition_symbol) \
diff --git a/deps/v8/src/heap/array-buffer-tracker-inl.h b/deps/v8/src/heap/array-buffer-tracker-inl.h
new file mode 100644
index 0000000000..f5bdead89a
--- /dev/null
+++ b/deps/v8/src/heap/array-buffer-tracker-inl.h
@@ -0,0 +1,68 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/conversions-inl.h"
+#include "src/heap/array-buffer-tracker.h"
+#include "src/heap/heap.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+void ArrayBufferTracker::RegisterNew(Heap* heap, JSArrayBuffer* buffer) {
+ void* data = buffer->backing_store();
+ if (!data) return;
+
+ size_t length = NumberToSize(buffer->byte_length());
+ Page* page = Page::FromAddress(buffer->address());
+ {
+ base::LockGuard<base::Mutex> guard(page->mutex());
+ LocalArrayBufferTracker* tracker = page->local_tracker();
+ if (tracker == nullptr) {
+ page->AllocateLocalTracker();
+ tracker = page->local_tracker();
+ }
+ DCHECK_NOT_NULL(tracker);
+ tracker->Add(buffer, length);
+ }
+ // We may go over the limit of externally allocated memory here. We call the
+ // api function to trigger a GC in this case.
+ reinterpret_cast<v8::Isolate*>(heap->isolate())
+ ->AdjustAmountOfExternalAllocatedMemory(length);
+}
+
+void ArrayBufferTracker::Unregister(Heap* heap, JSArrayBuffer* buffer) {
+ void* data = buffer->backing_store();
+ if (!data) return;
+
+ Page* page = Page::FromAddress(buffer->address());
+ size_t length = 0;
+ {
+ base::LockGuard<base::Mutex> guard(page->mutex());
+ LocalArrayBufferTracker* tracker = page->local_tracker();
+ DCHECK_NOT_NULL(tracker);
+ length = tracker->Remove(buffer);
+ }
+ heap->update_external_memory(-static_cast<intptr_t>(length));
+}
+
+void LocalArrayBufferTracker::Add(Key key, const Value& value) {
+ auto ret = array_buffers_.insert(std::make_pair(key, value));
+ USE(ret);
+ // Check that we indeed inserted a new value and did not overwrite an existing
+ // one (which would be a bug).
+ DCHECK(ret.second);
+}
+
+LocalArrayBufferTracker::Value LocalArrayBufferTracker::Remove(Key key) {
+ TrackingData::iterator it = array_buffers_.find(key);
+ // Check that we indeed find a key to remove.
+ DCHECK(it != array_buffers_.end());
+ Value value = it->second;
+ array_buffers_.erase(it);
+ return value;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/array-buffer-tracker.cc b/deps/v8/src/heap/array-buffer-tracker.cc
index 6e389c1cbf..62b848ef70 100644
--- a/deps/v8/src/heap/array-buffer-tracker.cc
+++ b/deps/v8/src/heap/array-buffer-tracker.cc
@@ -3,138 +3,136 @@
// found in the LICENSE file.
#include "src/heap/array-buffer-tracker.h"
+#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/heap.h"
-#include "src/isolate.h"
-#include "src/objects.h"
-#include "src/objects-inl.h"
-#include "src/v8.h"
namespace v8 {
namespace internal {
-ArrayBufferTracker::~ArrayBufferTracker() {
- Isolate* isolate = heap()->isolate();
+LocalArrayBufferTracker::~LocalArrayBufferTracker() {
+ CHECK(array_buffers_.empty());
+}
+
+template <LocalArrayBufferTracker::FreeMode free_mode>
+void LocalArrayBufferTracker::Free() {
size_t freed_memory = 0;
- for (auto& buffer : live_array_buffers_) {
- isolate->array_buffer_allocator()->Free(buffer.first, buffer.second);
- freed_memory += buffer.second;
- }
- for (auto& buffer : live_array_buffers_for_scavenge_) {
- isolate->array_buffer_allocator()->Free(buffer.first, buffer.second);
- freed_memory += buffer.second;
+ for (TrackingData::iterator it = array_buffers_.begin();
+ it != array_buffers_.end();) {
+ JSArrayBuffer* buffer = reinterpret_cast<JSArrayBuffer*>(it->first);
+ if ((free_mode == kFreeAll) ||
+ Marking::IsWhite(ObjectMarking::MarkBitFrom(buffer))) {
+ const size_t len = it->second;
+ heap_->isolate()->array_buffer_allocator()->Free(buffer->backing_store(),
+ len);
+ freed_memory += len;
+ it = array_buffers_.erase(it);
+ } else {
+ ++it;
+ }
}
- live_array_buffers_.clear();
- live_array_buffers_for_scavenge_.clear();
- not_yet_discovered_array_buffers_.clear();
- not_yet_discovered_array_buffers_for_scavenge_.clear();
-
if (freed_memory > 0) {
- heap()->update_amount_of_external_allocated_memory(
- -static_cast<int64_t>(freed_memory));
+ heap_->update_external_memory_concurrently_freed(
+ static_cast<intptr_t>(freed_memory));
}
}
-
-void ArrayBufferTracker::RegisterNew(JSArrayBuffer* buffer) {
- void* data = buffer->backing_store();
- if (!data) return;
-
- bool in_new_space = heap()->InNewSpace(buffer);
- size_t length = NumberToSize(heap()->isolate(), buffer->byte_length());
- if (in_new_space) {
- live_array_buffers_for_scavenge_[data] = length;
- } else {
- live_array_buffers_[data] = length;
+template <typename Callback>
+void LocalArrayBufferTracker::Process(Callback callback) {
+ JSArrayBuffer* new_buffer = nullptr;
+ size_t freed_memory = 0;
+ for (TrackingData::iterator it = array_buffers_.begin();
+ it != array_buffers_.end();) {
+ const CallbackResult result = callback(it->first, &new_buffer);
+ if (result == kKeepEntry) {
+ ++it;
+ } else if (result == kUpdateEntry) {
+ DCHECK_NOT_NULL(new_buffer);
+ Page* target_page = Page::FromAddress(new_buffer->address());
+ // We need to lock the target page because we cannot guarantee
+ // exclusive access to new space pages.
+ if (target_page->InNewSpace()) target_page->mutex()->Lock();
+ LocalArrayBufferTracker* tracker = target_page->local_tracker();
+ if (tracker == nullptr) {
+ target_page->AllocateLocalTracker();
+ tracker = target_page->local_tracker();
+ }
+ DCHECK_NOT_NULL(tracker);
+ tracker->Add(new_buffer, it->second);
+ if (target_page->InNewSpace()) target_page->mutex()->Unlock();
+ it = array_buffers_.erase(it);
+ } else if (result == kRemoveEntry) {
+ const size_t len = it->second;
+ heap_->isolate()->array_buffer_allocator()->Free(
+ it->first->backing_store(), len);
+ freed_memory += len;
+ it = array_buffers_.erase(it);
+ } else {
+ UNREACHABLE();
+ }
+ }
+ if (freed_memory > 0) {
+ heap_->update_external_memory_concurrently_freed(
+ static_cast<intptr_t>(freed_memory));
}
-
- // We may go over the limit of externally allocated memory here. We call the
- // api function to trigger a GC in this case.
- reinterpret_cast<v8::Isolate*>(heap()->isolate())
- ->AdjustAmountOfExternalAllocatedMemory(length);
-}
-
-
-void ArrayBufferTracker::Unregister(JSArrayBuffer* buffer) {
- void* data = buffer->backing_store();
- if (!data) return;
-
- bool in_new_space = heap()->InNewSpace(buffer);
- std::map<void*, size_t>* live_buffers =
- in_new_space ? &live_array_buffers_for_scavenge_ : &live_array_buffers_;
- std::map<void*, size_t>* not_yet_discovered_buffers =
- in_new_space ? &not_yet_discovered_array_buffers_for_scavenge_
- : &not_yet_discovered_array_buffers_;
-
- DCHECK(live_buffers->count(data) > 0);
-
- size_t length = (*live_buffers)[data];
- live_buffers->erase(data);
- not_yet_discovered_buffers->erase(data);
-
- heap()->update_amount_of_external_allocated_memory(
- -static_cast<int64_t>(length));
}
-
-void ArrayBufferTracker::MarkLive(JSArrayBuffer* buffer) {
- base::LockGuard<base::Mutex> guard(&mutex_);
- void* data = buffer->backing_store();
-
- // ArrayBuffer might be in the middle of being constructed.
- if (data == heap()->undefined_value()) return;
- if (heap()->InNewSpace(buffer)) {
- not_yet_discovered_array_buffers_for_scavenge_.erase(data);
- } else {
- not_yet_discovered_array_buffers_.erase(data);
+void ArrayBufferTracker::FreeDeadInNewSpace(Heap* heap) {
+ DCHECK_EQ(heap->gc_state(), Heap::HeapState::SCAVENGE);
+ for (Page* page : NewSpacePageRange(heap->new_space()->FromSpaceStart(),
+ heap->new_space()->FromSpaceEnd())) {
+ bool empty = ProcessBuffers(page, kUpdateForwardedRemoveOthers);
+ CHECK(empty);
}
+ heap->account_external_memory_concurrently_freed();
}
-
-void ArrayBufferTracker::FreeDead(bool from_scavenge) {
- size_t freed_memory = 0;
- Isolate* isolate = heap()->isolate();
- for (auto& buffer : not_yet_discovered_array_buffers_for_scavenge_) {
- isolate->array_buffer_allocator()->Free(buffer.first, buffer.second);
- freed_memory += buffer.second;
- live_array_buffers_for_scavenge_.erase(buffer.first);
+void ArrayBufferTracker::FreeDead(Page* page) {
+ // Callers need to ensure having the page lock.
+ LocalArrayBufferTracker* tracker = page->local_tracker();
+ if (tracker == nullptr) return;
+ DCHECK(!page->SweepingDone());
+ tracker->Free<LocalArrayBufferTracker::kFreeDead>();
+ if (tracker->IsEmpty()) {
+ page->ReleaseLocalTracker();
}
+}
- if (!from_scavenge) {
- for (auto& buffer : not_yet_discovered_array_buffers_) {
- isolate->array_buffer_allocator()->Free(buffer.first, buffer.second);
- freed_memory += buffer.second;
- live_array_buffers_.erase(buffer.first);
- }
+void ArrayBufferTracker::FreeAll(Page* page) {
+ LocalArrayBufferTracker* tracker = page->local_tracker();
+ if (tracker == nullptr) return;
+ tracker->Free<LocalArrayBufferTracker::kFreeAll>();
+ if (tracker->IsEmpty()) {
+ page->ReleaseLocalTracker();
}
-
- not_yet_discovered_array_buffers_for_scavenge_ =
- live_array_buffers_for_scavenge_;
- if (!from_scavenge) not_yet_discovered_array_buffers_ = live_array_buffers_;
-
- // Do not call through the api as this code is triggered while doing a GC.
- heap()->update_amount_of_external_allocated_memory(
- -static_cast<int64_t>(freed_memory));
}
-
-void ArrayBufferTracker::PrepareDiscoveryInNewSpace() {
- not_yet_discovered_array_buffers_for_scavenge_ =
- live_array_buffers_for_scavenge_;
+bool ArrayBufferTracker::ProcessBuffers(Page* page, ProcessingMode mode) {
+ LocalArrayBufferTracker* tracker = page->local_tracker();
+ if (tracker == nullptr) return true;
+
+ DCHECK(page->SweepingDone());
+ tracker->Process(
+ [mode](JSArrayBuffer* old_buffer, JSArrayBuffer** new_buffer) {
+ MapWord map_word = old_buffer->map_word();
+ if (map_word.IsForwardingAddress()) {
+ *new_buffer = JSArrayBuffer::cast(map_word.ToForwardingAddress());
+ return LocalArrayBufferTracker::kUpdateEntry;
+ }
+ return mode == kUpdateForwardedKeepOthers
+ ? LocalArrayBufferTracker::kKeepEntry
+ : LocalArrayBufferTracker::kRemoveEntry;
+ });
+ return tracker->IsEmpty();
}
-
-void ArrayBufferTracker::Promote(JSArrayBuffer* buffer) {
- base::LockGuard<base::Mutex> guard(&mutex_);
-
- if (buffer->is_external()) return;
- void* data = buffer->backing_store();
- if (!data) return;
- // ArrayBuffer might be in the middle of being constructed.
- if (data == heap()->undefined_value()) return;
- DCHECK(live_array_buffers_for_scavenge_.count(data) > 0);
- live_array_buffers_[data] = live_array_buffers_for_scavenge_[data];
- live_array_buffers_for_scavenge_.erase(data);
- not_yet_discovered_array_buffers_for_scavenge_.erase(data);
+bool ArrayBufferTracker::IsTracked(JSArrayBuffer* buffer) {
+ Page* page = Page::FromAddress(buffer->address());
+ {
+ base::LockGuard<base::Mutex> guard(page->mutex());
+ LocalArrayBufferTracker* tracker = page->local_tracker();
+ if (tracker == nullptr) return false;
+ return tracker->IsTracked(buffer);
+ }
}
} // namespace internal
diff --git a/deps/v8/src/heap/array-buffer-tracker.h b/deps/v8/src/heap/array-buffer-tracker.h
index 6130003d15..3a57ab70cd 100644
--- a/deps/v8/src/heap/array-buffer-tracker.h
+++ b/deps/v8/src/heap/array-buffer-tracker.h
@@ -5,71 +5,97 @@
#ifndef V8_HEAP_ARRAY_BUFFER_TRACKER_H_
#define V8_HEAP_ARRAY_BUFFER_TRACKER_H_
-#include <map>
+#include <unordered_map>
+#include "src/allocation.h"
#include "src/base/platform/mutex.h"
#include "src/globals.h"
namespace v8 {
namespace internal {
-// Forward declarations.
class Heap;
class JSArrayBuffer;
+class Page;
-class ArrayBufferTracker {
+class ArrayBufferTracker : public AllStatic {
public:
- explicit ArrayBufferTracker(Heap* heap) : heap_(heap) {}
- ~ArrayBufferTracker();
-
- inline Heap* heap() { return heap_; }
+ enum ProcessingMode {
+ kUpdateForwardedRemoveOthers,
+ kUpdateForwardedKeepOthers,
+ };
// The following methods are used to track raw C++ pointers to externally
// allocated memory used as backing store in live array buffers.
- // A new ArrayBuffer was created with |data| as backing store.
- void RegisterNew(JSArrayBuffer* buffer);
+ // Register/unregister a new JSArrayBuffer |buffer| for tracking. Guards all
+ // access to the tracker by taking the page lock for the corresponding page.
+ inline static void RegisterNew(Heap* heap, JSArrayBuffer* buffer);
+ inline static void Unregister(Heap* heap, JSArrayBuffer* buffer);
- // The backing store |data| is no longer owned by V8.
- void Unregister(JSArrayBuffer* buffer);
+ // Frees all backing store pointers for dead JSArrayBuffers in new space.
+ // Does not take any locks and can only be called during Scavenge.
+ static void FreeDeadInNewSpace(Heap* heap);
- // A live ArrayBuffer was discovered during marking/scavenge.
- void MarkLive(JSArrayBuffer* buffer);
+ // Frees all backing store pointers for dead JSArrayBuffer on a given page.
+ // Requires marking information to be present. Requires the page lock to be
+ // taken by the caller.
+ static void FreeDead(Page* page);
- // Frees all backing store pointers that weren't discovered in the previous
- // marking or scavenge phase.
- void FreeDead(bool from_scavenge);
+ // Frees all remaining, live or dead, array buffers on a page. Only useful
+ // during tear down.
+ static void FreeAll(Page* page);
- // Prepare for a new scavenge phase. A new marking phase is implicitly
- // prepared by finishing the previous one.
- void PrepareDiscoveryInNewSpace();
+ // Processes all array buffers on a given page. |mode| specifies the action
+ // to perform on the buffers. Returns whether the tracker is empty or not.
+ static bool ProcessBuffers(Page* page, ProcessingMode mode);
- // An ArrayBuffer moved from new space to old space.
- void Promote(JSArrayBuffer* buffer);
+ // Returns whether a buffer is currently tracked.
+ static bool IsTracked(JSArrayBuffer* buffer);
+};
- private:
- base::Mutex mutex_;
- Heap* heap_;
+// LocalArrayBufferTracker tracks internalized array buffers.
+//
+// Never use directly but instead always call through |ArrayBufferTracker|.
+class LocalArrayBufferTracker {
+ public:
+ typedef JSArrayBuffer* Key;
+ typedef size_t Value;
- // |live_array_buffers_| maps externally allocated memory used as backing
- // store for ArrayBuffers to the length of the respective memory blocks.
- //
- // At the beginning of mark/compact, |not_yet_discovered_array_buffers_| is
- // a copy of |live_array_buffers_| and we remove pointers as we discover live
- // ArrayBuffer objects during marking. At the end of mark/compact, the
- // remaining memory blocks can be freed.
- std::map<void*, size_t> live_array_buffers_;
- std::map<void*, size_t> not_yet_discovered_array_buffers_;
-
- // To be able to free memory held by ArrayBuffers during scavenge as well, we
- // have a separate list of allocated memory held by ArrayBuffers in new space.
+ enum CallbackResult { kKeepEntry, kUpdateEntry, kRemoveEntry };
+ enum FreeMode { kFreeDead, kFreeAll };
+
+ explicit LocalArrayBufferTracker(Heap* heap) : heap_(heap) {}
+ ~LocalArrayBufferTracker();
+
+ inline void Add(Key key, const Value& value);
+ inline Value Remove(Key key);
+
+ // Frees up array buffers determined by |free_mode|.
+ template <FreeMode free_mode>
+ void Free();
+
+ // Processes buffers one by one. The CallbackResult of the callback decides
+ // what action to take on the buffer.
//
- // Since mark/compact also evacuates the new space, all pointers in the
- // |live_array_buffers_for_scavenge_| list are also in the
- // |live_array_buffers_| list.
- std::map<void*, size_t> live_array_buffers_for_scavenge_;
- std::map<void*, size_t> not_yet_discovered_array_buffers_for_scavenge_;
+ // Callback should be of type:
+ // CallbackResult fn(JSArrayBuffer* buffer, JSArrayBuffer** new_buffer);
+ template <typename Callback>
+ void Process(Callback callback);
+
+ bool IsEmpty() { return array_buffers_.empty(); }
+
+ bool IsTracked(Key key) {
+ return array_buffers_.find(key) != array_buffers_.end();
+ }
+
+ private:
+ typedef std::unordered_map<Key, Value> TrackingData;
+
+ Heap* heap_;
+ TrackingData array_buffers_;
};
+
} // namespace internal
} // namespace v8
#endif // V8_HEAP_ARRAY_BUFFER_TRACKER_H_
diff --git a/deps/v8/src/heap/code-stats.cc b/deps/v8/src/heap/code-stats.cc
new file mode 100644
index 0000000000..d4ff5fbba7
--- /dev/null
+++ b/deps/v8/src/heap/code-stats.cc
@@ -0,0 +1,220 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/code-stats.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// Record code statisitcs.
+void CodeStatistics::RecordCodeAndMetadataStatistics(HeapObject* object,
+ Isolate* isolate) {
+ if (!object->IsAbstractCode()) {
+ return;
+ }
+
+ // Record code+metadata statisitcs.
+ AbstractCode* abstract_code = AbstractCode::cast(object);
+ int size = abstract_code->SizeIncludingMetadata();
+ if (abstract_code->IsCode()) {
+ size += isolate->code_and_metadata_size();
+ isolate->set_code_and_metadata_size(size);
+ } else {
+ size += isolate->bytecode_and_metadata_size();
+ isolate->set_bytecode_and_metadata_size(size);
+ }
+
+#ifdef DEBUG
+ // Record code kind and code comment statistics.
+ isolate->code_kind_statistics()[abstract_code->kind()] +=
+ abstract_code->Size();
+ CodeStatistics::CollectCodeCommentStatistics(object, isolate);
+#endif
+}
+
+void CodeStatistics::ResetCodeAndMetadataStatistics(Isolate* isolate) {
+ isolate->set_code_and_metadata_size(0);
+ isolate->set_bytecode_and_metadata_size(0);
+#ifdef DEBUG
+ ResetCodeStatistics(isolate);
+#endif
+}
+
+// Collects code size statistics:
+// - code and metadata size
+// - by code kind (only in debug mode)
+// - by code comment (only in debug mode)
+void CodeStatistics::CollectCodeStatistics(PagedSpace* space,
+ Isolate* isolate) {
+ HeapObjectIterator obj_it(space);
+ for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
+ RecordCodeAndMetadataStatistics(obj, isolate);
+ }
+}
+
+// Collects code size statistics in LargeObjectSpace:
+// - code and metadata size
+// - by code kind (only in debug mode)
+// - by code comment (only in debug mode)
+void CodeStatistics::CollectCodeStatistics(LargeObjectSpace* space,
+ Isolate* isolate) {
+ LargeObjectIterator obj_it(space);
+ for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
+ RecordCodeAndMetadataStatistics(obj, isolate);
+ }
+}
+
+#ifdef DEBUG
+void CodeStatistics::ReportCodeStatistics(Isolate* isolate) {
+ // Report code kind statistics
+ int* code_kind_statistics = isolate->code_kind_statistics();
+ PrintF("\n Code kind histograms: \n");
+ for (int i = 0; i < AbstractCode::NUMBER_OF_KINDS; i++) {
+ if (code_kind_statistics[i] > 0) {
+ PrintF(" %-20s: %10d bytes\n",
+ AbstractCode::Kind2String(static_cast<AbstractCode::Kind>(i)),
+ code_kind_statistics[i]);
+ }
+ }
+ PrintF("\n");
+
+ // Report code and metadata statisitcs
+ if (isolate->code_and_metadata_size() > 0) {
+ PrintF("Code size including metadata : %10d bytes\n",
+ isolate->code_and_metadata_size());
+ }
+ if (isolate->bytecode_and_metadata_size() > 0) {
+ PrintF("Bytecode size including metadata: %10d bytes\n",
+ isolate->bytecode_and_metadata_size());
+ }
+
+ // Report code comment statistics
+ CommentStatistic* comments_statistics =
+ isolate->paged_space_comments_statistics();
+ PrintF(
+ "Code comment statistics (\" [ comment-txt : size/ "
+ "count (average)\"):\n");
+ for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
+ const CommentStatistic& cs = comments_statistics[i];
+ if (cs.size > 0) {
+ PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
+ cs.size / cs.count);
+ }
+ }
+ PrintF("\n");
+}
+
+void CodeStatistics::ResetCodeStatistics(Isolate* isolate) {
+ // Clear code kind statistics
+ int* code_kind_statistics = isolate->code_kind_statistics();
+ for (int i = 0; i < AbstractCode::NUMBER_OF_KINDS; i++) {
+ code_kind_statistics[i] = 0;
+ }
+
+ // Clear code comment statistics
+ CommentStatistic* comments_statistics =
+ isolate->paged_space_comments_statistics();
+ for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
+ comments_statistics[i].Clear();
+ }
+ comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
+ comments_statistics[CommentStatistic::kMaxComments].size = 0;
+ comments_statistics[CommentStatistic::kMaxComments].count = 0;
+}
+
+// Adds comment to 'comment_statistics' table. Performance OK as long as
+// 'kMaxComments' is small
+void CodeStatistics::EnterComment(Isolate* isolate, const char* comment,
+ int delta) {
+ CommentStatistic* comments_statistics =
+ isolate->paged_space_comments_statistics();
+ // Do not count empty comments
+ if (delta <= 0) return;
+ CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
+ // Search for a free or matching entry in 'comments_statistics': 'cs'
+ // points to result.
+ for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
+ if (comments_statistics[i].comment == NULL) {
+ cs = &comments_statistics[i];
+ cs->comment = comment;
+ break;
+ } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
+ cs = &comments_statistics[i];
+ break;
+ }
+ }
+ // Update entry for 'comment'
+ cs->size += delta;
+ cs->count += 1;
+}
+
+// Call for each nested comment start (start marked with '[ xxx', end marked
+// with ']'. RelocIterator 'it' must point to a comment reloc info.
+void CodeStatistics::CollectCommentStatistics(Isolate* isolate,
+ RelocIterator* it) {
+ DCHECK(!it->done());
+ DCHECK(it->rinfo()->rmode() == RelocInfo::COMMENT);
+ const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
+ if (tmp[0] != '[') {
+ // Not a nested comment; skip
+ return;
+ }
+
+ // Search for end of nested comment or a new nested comment
+ const char* const comment_txt =
+ reinterpret_cast<const char*>(it->rinfo()->data());
+ const byte* prev_pc = it->rinfo()->pc();
+ int flat_delta = 0;
+ it->next();
+ while (true) {
+ // All nested comments must be terminated properly, and therefore exit
+ // from loop.
+ DCHECK(!it->done());
+ if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
+ const char* const txt =
+ reinterpret_cast<const char*>(it->rinfo()->data());
+ flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
+ if (txt[0] == ']') break; // End of nested comment
+ // A new comment
+ CollectCommentStatistics(isolate, it);
+ // Skip code that was covered with previous comment
+ prev_pc = it->rinfo()->pc();
+ }
+ it->next();
+ }
+ EnterComment(isolate, comment_txt, flat_delta);
+}
+
+// Collects code comment statistics
+void CodeStatistics::CollectCodeCommentStatistics(HeapObject* obj,
+ Isolate* isolate) {
+ // Bytecode objects do not contain RelocInfo. Only process code objects
+ // for code comment statistics.
+ if (!obj->IsCode()) {
+ return;
+ }
+
+ Code* code = Code::cast(obj);
+ RelocIterator it(code);
+ int delta = 0;
+ const byte* prev_pc = code->instruction_start();
+ while (!it.done()) {
+ if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
+ delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
+ CollectCommentStatistics(isolate, &it);
+ prev_pc = it.rinfo()->pc();
+ }
+ it.next();
+ }
+
+ DCHECK(code->instruction_start() <= prev_pc &&
+ prev_pc <= code->instruction_end());
+ delta += static_cast<int>(code->instruction_end() - prev_pc);
+ EnterComment(isolate, "NoComment", delta);
+}
+#endif
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/code-stats.h b/deps/v8/src/heap/code-stats.h
new file mode 100644
index 0000000000..499c9fa5ac
--- /dev/null
+++ b/deps/v8/src/heap/code-stats.h
@@ -0,0 +1,42 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/assembler.h"
+#include "src/heap/spaces.h"
+#include "src/isolate.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+class CodeStatistics {
+ public:
+ // Collect statistics related to code size.
+ static void CollectCodeStatistics(PagedSpace* space, Isolate* isolate);
+
+ // Collect statistics related to code size from large object space.
+ static void CollectCodeStatistics(LargeObjectSpace* space, Isolate* isolate);
+
+ // Reset code size related statistics
+ static void ResetCodeAndMetadataStatistics(Isolate* isolate);
+
+#ifdef DEBUG
+ // Report statistics about code kind, code+metadata and code comments.
+ static void ReportCodeStatistics(Isolate* isolate);
+#endif
+
+ private:
+ static void RecordCodeAndMetadataStatistics(HeapObject* object,
+ Isolate* isolate);
+
+#ifdef DEBUG
+ static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it);
+ static void CollectCodeCommentStatistics(HeapObject* obj, Isolate* isolate);
+ static void EnterComment(Isolate* isolate, const char* comment, int delta);
+ static void ResetCodeStatistics(Isolate* isolate);
+#endif
+};
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/gc-idle-time-handler.cc b/deps/v8/src/heap/gc-idle-time-handler.cc
index 972dfa6e5c..0c411f7b4c 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.cc
+++ b/deps/v8/src/heap/gc-idle-time-handler.cc
@@ -41,8 +41,7 @@ void GCIdleTimeAction::Print() {
void GCIdleTimeHeapState::Print() {
PrintF("contexts_disposed=%d ", contexts_disposed);
PrintF("contexts_disposal_rate=%f ", contexts_disposal_rate);
- PrintF("size_of_objects=%" V8_SIZET_PREFIX V8_PTR_PREFIX "d ",
- size_of_objects);
+ PrintF("size_of_objects=%" PRIuS " ", size_of_objects);
PrintF("incremental_marking_stopped=%d ", incremental_marking_stopped);
}
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index 3c46f5292d..695a259884 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -26,21 +26,17 @@ GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope)
start_time_ = tracer_->heap_->MonotonicallyIncreasingTimeInMs();
// TODO(cbruni): remove once we fully moved to a trace-based system.
if (FLAG_runtime_call_stats) {
- RuntimeCallStats* stats =
- tracer_->heap_->isolate()->counters()->runtime_call_stats();
- timer_.Initialize(&stats->GC, stats->current_timer());
- stats->Enter(&timer_);
+ RuntimeCallStats::Enter(tracer_->heap_->isolate(), &timer_,
+ &RuntimeCallStats::GC);
}
}
-
GCTracer::Scope::~Scope() {
- DCHECK(scope_ < NUMBER_OF_SCOPES); // scope_ is unsigned.
- tracer_->current_.scopes[scope_] +=
- tracer_->heap_->MonotonicallyIncreasingTimeInMs() - start_time_;
+ tracer_->AddScopeSample(
+ scope_, tracer_->heap_->MonotonicallyIncreasingTimeInMs() - start_time_);
// TODO(cbruni): remove once we fully moved to a trace-based system.
if (FLAG_runtime_call_stats) {
- tracer_->heap_->isolate()->counters()->runtime_call_stats()->Leave(&timer_);
+ RuntimeCallStats::Leave(tracer_->heap_->isolate(), &timer_);
}
}
@@ -71,15 +67,12 @@ GCTracer::Event::Event(Type type, const char* gc_reason,
end_memory_size(0),
start_holes_size(0),
end_holes_size(0),
- cumulative_incremental_marking_steps(0),
- incremental_marking_steps(0),
+ new_space_object_size(0),
+ survived_new_space_object_size(0),
cumulative_incremental_marking_bytes(0),
incremental_marking_bytes(0),
- cumulative_incremental_marking_duration(0.0),
- incremental_marking_duration(0.0),
cumulative_pure_incremental_marking_duration(0.0),
- pure_incremental_marking_duration(0.0),
- longest_incremental_marking_step(0.0) {
+ pure_incremental_marking_duration(0.0) {
for (int i = 0; i < Scope::NUMBER_OF_SCOPES; i++) {
scopes[i] = 0;
}
@@ -111,17 +104,14 @@ const char* GCTracer::Event::TypeName(bool short_name) const {
return "Unknown Event Type";
}
-
GCTracer::GCTracer(Heap* heap)
: heap_(heap),
- cumulative_incremental_marking_steps_(0),
+ current_(Event::START, nullptr, nullptr),
+ previous_(current_),
+ previous_incremental_mark_compactor_event_(current_),
cumulative_incremental_marking_bytes_(0),
cumulative_incremental_marking_duration_(0.0),
cumulative_pure_incremental_marking_duration_(0.0),
- longest_incremental_marking_step_(0.0),
- cumulative_incremental_marking_finalization_steps_(0),
- cumulative_incremental_marking_finalization_duration_(0.0),
- longest_incremental_marking_finalization_step_(0.0),
cumulative_marking_duration_(0.0),
cumulative_sweeping_duration_(0.0),
allocation_time_ms_(0.0),
@@ -132,12 +122,42 @@ GCTracer::GCTracer(Heap* heap)
old_generation_allocation_in_bytes_since_gc_(0),
combined_mark_compact_speed_cache_(0.0),
start_counter_(0) {
+ current_.end_time = heap_->MonotonicallyIncreasingTimeInMs();
+}
+
+void GCTracer::ResetForTesting() {
current_ = Event(Event::START, NULL, NULL);
current_.end_time = heap_->MonotonicallyIncreasingTimeInMs();
previous_ = previous_incremental_mark_compactor_event_ = current_;
+ cumulative_incremental_marking_bytes_ = 0.0;
+ cumulative_incremental_marking_duration_ = 0.0;
+ cumulative_pure_incremental_marking_duration_ = 0.0;
+ cumulative_marking_duration_ = 0.0;
+ for (int i = 0; i < Scope::NUMBER_OF_INCREMENTAL_SCOPES; i++) {
+ incremental_marking_scopes_[i].cumulative_duration = 0.0;
+ incremental_marking_scopes_[i].steps = 0;
+ incremental_marking_scopes_[i].longest_step = 0.0;
+ }
+ cumulative_sweeping_duration_ = 0.0;
+ allocation_time_ms_ = 0.0;
+ new_space_allocation_counter_bytes_ = 0.0;
+ old_generation_allocation_counter_bytes_ = 0.0;
+ allocation_duration_since_gc_ = 0.0;
+ new_space_allocation_in_bytes_since_gc_ = 0.0;
+ old_generation_allocation_in_bytes_since_gc_ = 0.0;
+ combined_mark_compact_speed_cache_ = 0.0;
+ recorded_scavenges_total_.Reset();
+ recorded_scavenges_survived_.Reset();
+ recorded_compactions_.Reset();
+ recorded_mark_compacts_.Reset();
+ recorded_incremental_mark_compacts_.Reset();
+ recorded_new_generation_allocations_.Reset();
+ recorded_old_generation_allocations_.Reset();
+ recorded_context_disposal_times_.Reset();
+ recorded_survival_ratios_.Reset();
+ start_counter_ = 0;
}
-
void GCTracer::Start(GarbageCollector collector, const char* gc_reason,
const char* collector_reason) {
start_counter_++;
@@ -164,24 +184,20 @@ void GCTracer::Start(GarbageCollector collector, const char* gc_reason,
current_.reduce_memory = heap_->ShouldReduceMemory();
current_.start_time = start_time;
current_.start_object_size = heap_->SizeOfObjects();
- current_.start_memory_size = heap_->isolate()->memory_allocator()->Size();
+ current_.start_memory_size = heap_->memory_allocator()->Size();
current_.start_holes_size = CountTotalHolesSize(heap_);
current_.new_space_object_size =
heap_->new_space()->top() - heap_->new_space()->bottom();
- current_.cumulative_incremental_marking_steps =
- cumulative_incremental_marking_steps_;
current_.cumulative_incremental_marking_bytes =
cumulative_incremental_marking_bytes_;
- current_.cumulative_incremental_marking_duration =
- cumulative_incremental_marking_duration_;
current_.cumulative_pure_incremental_marking_duration =
cumulative_pure_incremental_marking_duration_;
- current_.longest_incremental_marking_step = longest_incremental_marking_step_;
for (int i = 0; i < Scope::NUMBER_OF_SCOPES; i++) {
current_.scopes[i] = 0;
}
+
int committed_memory = static_cast<int>(heap_->CommittedMemory() / KB);
int used_memory = static_cast<int>(current_.start_object_size / KB);
heap_->isolate()->counters()->aggregated_memory_heap_committed()->AddSample(
@@ -190,19 +206,31 @@ void GCTracer::Start(GarbageCollector collector, const char* gc_reason,
start_time, used_memory);
// TODO(cbruni): remove once we fully moved to a trace-based system.
if (FLAG_runtime_call_stats) {
- RuntimeCallStats* stats =
- heap_->isolate()->counters()->runtime_call_stats();
- timer_.Initialize(&stats->GC, stats->current_timer());
- stats->Enter(&timer_);
+ RuntimeCallStats::Enter(heap_->isolate(), &timer_, &RuntimeCallStats::GC);
+ }
+}
+
+void GCTracer::MergeBaseline(const Event& baseline) {
+ current_.incremental_marking_bytes =
+ current_.cumulative_incremental_marking_bytes -
+ baseline.cumulative_incremental_marking_bytes;
+ current_.pure_incremental_marking_duration =
+ current_.cumulative_pure_incremental_marking_duration -
+ baseline.cumulative_pure_incremental_marking_duration;
+ for (int i = Scope::FIRST_INCREMENTAL_SCOPE;
+ i <= Scope::LAST_INCREMENTAL_SCOPE; i++) {
+ current_.scopes[i] =
+ current_.incremental_marking_scopes[i].cumulative_duration -
+ baseline.incremental_marking_scopes[i].cumulative_duration;
}
}
void GCTracer::Stop(GarbageCollector collector) {
start_counter_--;
if (start_counter_ != 0) {
- Output("[Finished reentrant %s during %s.]\n",
- collector == SCAVENGER ? "Scavenge" : "Mark-sweep",
- current_.TypeName(false));
+ PrintIsolate(heap_->isolate(), "[Finished reentrant %s during %s.]\n",
+ collector == SCAVENGER ? "Scavenge" : "Mark-sweep",
+ current_.TypeName(false));
return;
}
@@ -212,9 +240,14 @@ void GCTracer::Stop(GarbageCollector collector) {
(current_.type == Event::MARK_COMPACTOR ||
current_.type == Event::INCREMENTAL_MARK_COMPACTOR)));
+ for (int i = Scope::FIRST_INCREMENTAL_SCOPE;
+ i <= Scope::LAST_INCREMENTAL_SCOPE; i++) {
+ current_.incremental_marking_scopes[i] = incremental_marking_scopes_[i];
+ }
+
current_.end_time = heap_->MonotonicallyIncreasingTimeInMs();
current_.end_object_size = heap_->SizeOfObjects();
- current_.end_memory_size = heap_->isolate()->memory_allocator()->Size();
+ current_.end_memory_size = heap_->memory_allocator()->Size();
current_.end_holes_size = CountTotalHolesSize(heap_);
current_.survived_new_space_object_size = heap_->SurvivedNewSpaceObjectSize();
@@ -228,82 +261,55 @@ void GCTracer::Stop(GarbageCollector collector) {
current_.end_time, used_memory);
double duration = current_.end_time - current_.start_time;
+
if (current_.type == Event::SCAVENGER) {
- current_.incremental_marking_steps =
- current_.cumulative_incremental_marking_steps -
- previous_.cumulative_incremental_marking_steps;
- current_.incremental_marking_bytes =
- current_.cumulative_incremental_marking_bytes -
- previous_.cumulative_incremental_marking_bytes;
- current_.incremental_marking_duration =
- current_.cumulative_incremental_marking_duration -
- previous_.cumulative_incremental_marking_duration;
- current_.pure_incremental_marking_duration =
- current_.cumulative_pure_incremental_marking_duration -
- previous_.cumulative_pure_incremental_marking_duration;
+ MergeBaseline(previous_);
recorded_scavenges_total_.Push(
MakeBytesAndDuration(current_.new_space_object_size, duration));
recorded_scavenges_survived_.Push(MakeBytesAndDuration(
current_.survived_new_space_object_size, duration));
} else if (current_.type == Event::INCREMENTAL_MARK_COMPACTOR) {
- current_.incremental_marking_steps =
- current_.cumulative_incremental_marking_steps -
- previous_incremental_mark_compactor_event_
- .cumulative_incremental_marking_steps;
- current_.incremental_marking_bytes =
- current_.cumulative_incremental_marking_bytes -
- previous_incremental_mark_compactor_event_
- .cumulative_incremental_marking_bytes;
- current_.incremental_marking_duration =
- current_.cumulative_incremental_marking_duration -
- previous_incremental_mark_compactor_event_
- .cumulative_incremental_marking_duration;
- current_.pure_incremental_marking_duration =
- current_.cumulative_pure_incremental_marking_duration -
- previous_incremental_mark_compactor_event_
- .cumulative_pure_incremental_marking_duration;
- longest_incremental_marking_step_ = 0.0;
+ MergeBaseline(previous_incremental_mark_compactor_event_);
recorded_incremental_marking_steps_.Push(
MakeBytesAndDuration(current_.incremental_marking_bytes,
current_.pure_incremental_marking_duration));
recorded_incremental_mark_compacts_.Push(
MakeBytesAndDuration(current_.start_object_size, duration));
combined_mark_compact_speed_cache_ = 0.0;
+ for (int i = 0; i < Scope::NUMBER_OF_INCREMENTAL_SCOPES; i++) {
+ incremental_marking_scopes_[i].ResetCurrentCycle();
+ }
} else {
DCHECK(current_.incremental_marking_bytes == 0);
- DCHECK(current_.incremental_marking_duration == 0);
DCHECK(current_.pure_incremental_marking_duration == 0);
- longest_incremental_marking_step_ = 0.0;
recorded_mark_compacts_.Push(
MakeBytesAndDuration(current_.start_object_size, duration));
combined_mark_compact_speed_cache_ = 0.0;
+ for (int i = 0; i < Scope::NUMBER_OF_INCREMENTAL_SCOPES; i++) {
+ incremental_marking_scopes_[i].ResetCurrentCycle();
+ }
}
- // TODO(ernstm): move the code below out of GCTracer.
-
double spent_in_mutator = Max(current_.start_time - previous_.end_time, 0.0);
-
heap_->UpdateCumulativeGCStatistics(duration, spent_in_mutator,
current_.scopes[Scope::MC_MARK]);
if (current_.type == Event::SCAVENGER && FLAG_trace_gc_ignore_scavenger)
return;
- if (FLAG_trace_gc_nvp)
+ if (FLAG_trace_gc_nvp) {
PrintNVP();
- else
+ } else {
Print();
+ }
if (FLAG_trace_gc) {
heap_->PrintShortHeapStatistics();
}
- longest_incremental_marking_finalization_step_ = 0.0;
- cumulative_incremental_marking_finalization_steps_ = 0;
- cumulative_incremental_marking_finalization_duration_ = 0.0;
// TODO(cbruni): remove once we fully moved to a trace-based system.
if (FLAG_runtime_call_stats) {
- heap_->isolate()->counters()->runtime_call_stats()->Leave(&timer_);
+ RuntimeCallStats::Leave(heap_->isolate(), &timer_);
}
}
@@ -319,7 +325,7 @@ void GCTracer::SampleAllocation(double current_ms,
return;
}
// This assumes that counters are unsigned integers so that the subtraction
- // below works even if the new counter is less then the old counter.
+ // below works even if the new counter is less than the old counter.
size_t new_space_allocated_bytes =
new_space_counter_bytes - new_space_allocation_counter_bytes_;
size_t old_generation_allocated_bytes =
@@ -369,26 +375,14 @@ void GCTracer::AddSurvivalRatio(double promotion_ratio) {
void GCTracer::AddIncrementalMarkingStep(double duration, intptr_t bytes) {
- cumulative_incremental_marking_steps_++;
cumulative_incremental_marking_bytes_ += bytes;
cumulative_incremental_marking_duration_ += duration;
- longest_incremental_marking_step_ =
- Max(longest_incremental_marking_step_, duration);
cumulative_marking_duration_ += duration;
if (bytes > 0) {
cumulative_pure_incremental_marking_duration_ += duration;
}
}
-
-void GCTracer::AddIncrementalMarkingFinalizationStep(double duration) {
- cumulative_incremental_marking_finalization_steps_++;
- cumulative_incremental_marking_finalization_duration_ += duration;
- longest_incremental_marking_finalization_step_ =
- Max(longest_incremental_marking_finalization_step_, duration);
-}
-
-
void GCTracer::Output(const char* format, ...) const {
if (FLAG_trace_gc) {
va_list arguments;
@@ -410,46 +404,46 @@ void GCTracer::Output(const char* format, ...) const {
void GCTracer::Print() const {
- if (FLAG_trace_gc) {
- PrintIsolate(heap_->isolate(), "");
- }
- Output("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
-
- Output("%s %.1f (%.1f) -> %.1f (%.1f) MB, ", current_.TypeName(false),
- static_cast<double>(current_.start_object_size) / MB,
- static_cast<double>(current_.start_memory_size) / MB,
- static_cast<double>(current_.end_object_size) / MB,
- static_cast<double>(current_.end_memory_size) / MB);
-
double duration = current_.end_time - current_.start_time;
- Output("%.1f / %.1f ms", duration, TotalExternalTime());
-
- if (current_.type == Event::SCAVENGER) {
- if (current_.incremental_marking_steps > 0) {
- Output(" (+ %.1f ms in %d steps since last GC)",
- current_.incremental_marking_duration,
- current_.incremental_marking_steps);
- }
- } else {
- if (current_.incremental_marking_steps > 0) {
- Output(
+ const size_t kIncrementalStatsSize = 128;
+ char incremental_buffer[kIncrementalStatsSize] = {0};
+
+ if (current_.incremental_marking_scopes[Scope::MC_INCREMENTAL].steps > 0) {
+ if (current_.type == Event::SCAVENGER) {
+ base::OS::SNPrintF(
+ incremental_buffer, kIncrementalStatsSize,
+ " (+ %.1f ms in %d steps since last GC)",
+ current_.scopes[Scope::MC_INCREMENTAL],
+ current_.incremental_marking_scopes[Scope::MC_INCREMENTAL].steps);
+ } else {
+ base::OS::SNPrintF(
+ incremental_buffer, kIncrementalStatsSize,
" (+ %.1f ms in %d steps since start of marking, "
"biggest step %.1f ms)",
- current_.incremental_marking_duration,
- current_.incremental_marking_steps,
- current_.longest_incremental_marking_step);
+ current_.scopes[Scope::MC_INCREMENTAL],
+ current_.incremental_marking_scopes[Scope::MC_INCREMENTAL].steps,
+ current_.incremental_marking_scopes[Scope::MC_INCREMENTAL]
+ .longest_step);
}
}
- if (current_.gc_reason != NULL) {
- Output(" [%s]", current_.gc_reason);
- }
-
- if (current_.collector_reason != NULL) {
- Output(" [%s]", current_.collector_reason);
- }
-
- Output(".\n");
+ // Avoid PrintF as Output also appends the string to the tracing ring buffer
+ // that gets printed on OOM failures.
+ Output(
+ "[%d:%p] "
+ "%8.0f ms: "
+ "%s %.1f (%.1f) -> %.1f (%.1f) MB, "
+ "%.1f / %.1f ms %s %s %s\n",
+ base::OS::GetCurrentProcessId(),
+ reinterpret_cast<void*>(heap_->isolate()),
+ heap_->isolate()->time_millis_since_init(), current_.TypeName(false),
+ static_cast<double>(current_.start_object_size) / MB,
+ static_cast<double>(current_.start_memory_size) / MB,
+ static_cast<double>(current_.end_object_size) / MB,
+ static_cast<double>(current_.end_memory_size) / MB, duration,
+ TotalExternalTime(), incremental_buffer,
+ current_.gc_reason != nullptr ? current_.gc_reason : "",
+ current_.collector_reason != nullptr ? current_.collector_reason : "");
}
@@ -461,74 +455,75 @@ void GCTracer::PrintNVP() const {
switch (current_.type) {
case Event::SCAVENGER:
- PrintIsolate(heap_->isolate(),
- "%8.0f ms: "
- "pause=%.1f "
- "mutator=%.1f "
- "gc=%s "
- "reduce_memory=%d "
- "scavenge=%.2f "
- "old_new=%.2f "
- "weak=%.2f "
- "roots=%.2f "
- "code=%.2f "
- "semispace=%.2f "
- "object_groups=%.2f "
- "external_prologue=%.2f "
- "external_epilogue=%.2f "
- "external_weak_global_handles=%.2f "
- "steps_count=%d "
- "steps_took=%.1f "
- "scavenge_throughput=%.f "
- "total_size_before=%" V8_PTR_PREFIX
- "d "
- "total_size_after=%" V8_PTR_PREFIX
- "d "
- "holes_size_before=%" V8_PTR_PREFIX
- "d "
- "holes_size_after=%" V8_PTR_PREFIX
- "d "
- "allocated=%" V8_PTR_PREFIX
- "d "
- "promoted=%" V8_PTR_PREFIX
- "d "
- "semi_space_copied=%" V8_PTR_PREFIX
- "d "
- "nodes_died_in_new=%d "
- "nodes_copied_in_new=%d "
- "nodes_promoted=%d "
- "promotion_ratio=%.1f%% "
- "average_survival_ratio=%.1f%% "
- "promotion_rate=%.1f%% "
- "semi_space_copy_rate=%.1f%% "
- "new_space_allocation_throughput=%.1f "
- "context_disposal_rate=%.1f\n",
- heap_->isolate()->time_millis_since_init(), duration,
- spent_in_mutator, current_.TypeName(true),
- current_.reduce_memory,
- current_.scopes[Scope::SCAVENGER_SCAVENGE],
- current_.scopes[Scope::SCAVENGER_OLD_TO_NEW_POINTERS],
- current_.scopes[Scope::SCAVENGER_WEAK],
- current_.scopes[Scope::SCAVENGER_ROOTS],
- current_.scopes[Scope::SCAVENGER_CODE_FLUSH_CANDIDATES],
- current_.scopes[Scope::SCAVENGER_SEMISPACE],
- current_.scopes[Scope::SCAVENGER_OBJECT_GROUPS],
- current_.scopes[Scope::SCAVENGER_EXTERNAL_PROLOGUE],
- current_.scopes[Scope::SCAVENGER_EXTERNAL_EPILOGUE],
- current_.scopes[Scope::EXTERNAL_WEAK_GLOBAL_HANDLES],
- current_.incremental_marking_steps,
- current_.incremental_marking_duration,
- ScavengeSpeedInBytesPerMillisecond(),
- current_.start_object_size, current_.end_object_size,
- current_.start_holes_size, current_.end_holes_size,
- allocated_since_last_gc, heap_->promoted_objects_size(),
- heap_->semi_space_copied_object_size(),
- heap_->nodes_died_in_new_space_,
- heap_->nodes_copied_in_new_space_, heap_->nodes_promoted_,
- heap_->promotion_ratio_, AverageSurvivalRatio(),
- heap_->promotion_rate_, heap_->semi_space_copied_rate_,
- NewSpaceAllocationThroughputInBytesPerMillisecond(),
- ContextDisposalRateInMilliseconds());
+ PrintIsolate(
+ heap_->isolate(),
+ "%8.0f ms: "
+ "pause=%.1f "
+ "mutator=%.1f "
+ "gc=%s "
+ "reduce_memory=%d "
+ "scavenge=%.2f "
+ "old_new=%.2f "
+ "weak=%.2f "
+ "roots=%.2f "
+ "code=%.2f "
+ "semispace=%.2f "
+ "object_groups=%.2f "
+ "external_prologue=%.2f "
+ "external_epilogue=%.2f "
+ "external_weak_global_handles=%.2f "
+ "steps_count=%d "
+ "steps_took=%.1f "
+ "scavenge_throughput=%.f "
+ "total_size_before=%" V8PRIdPTR
+ " "
+ "total_size_after=%" V8PRIdPTR
+ " "
+ "holes_size_before=%" V8PRIdPTR
+ " "
+ "holes_size_after=%" V8PRIdPTR
+ " "
+ "allocated=%" V8PRIdPTR
+ " "
+ "promoted=%" V8PRIdPTR
+ " "
+ "semi_space_copied=%" V8PRIdPTR
+ " "
+ "nodes_died_in_new=%d "
+ "nodes_copied_in_new=%d "
+ "nodes_promoted=%d "
+ "promotion_ratio=%.1f%% "
+ "average_survival_ratio=%.1f%% "
+ "promotion_rate=%.1f%% "
+ "semi_space_copy_rate=%.1f%% "
+ "new_space_allocation_throughput=%.1f "
+ "context_disposal_rate=%.1f\n",
+ heap_->isolate()->time_millis_since_init(), duration,
+ spent_in_mutator, current_.TypeName(true), current_.reduce_memory,
+ current_.scopes[Scope::SCAVENGER_SCAVENGE],
+ current_.scopes[Scope::SCAVENGER_OLD_TO_NEW_POINTERS],
+ current_.scopes[Scope::SCAVENGER_WEAK],
+ current_.scopes[Scope::SCAVENGER_ROOTS],
+ current_.scopes[Scope::SCAVENGER_CODE_FLUSH_CANDIDATES],
+ current_.scopes[Scope::SCAVENGER_SEMISPACE],
+ current_.scopes[Scope::SCAVENGER_OBJECT_GROUPS],
+ current_.scopes[Scope::SCAVENGER_EXTERNAL_PROLOGUE],
+ current_.scopes[Scope::SCAVENGER_EXTERNAL_EPILOGUE],
+ current_.scopes[Scope::EXTERNAL_WEAK_GLOBAL_HANDLES],
+ current_.incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL]
+ .steps,
+ current_.scopes[Scope::MC_INCREMENTAL],
+ ScavengeSpeedInBytesPerMillisecond(), current_.start_object_size,
+ current_.end_object_size, current_.start_holes_size,
+ current_.end_holes_size, allocated_since_last_gc,
+ heap_->promoted_objects_size(),
+ heap_->semi_space_copied_object_size(),
+ heap_->nodes_died_in_new_space_, heap_->nodes_copied_in_new_space_,
+ heap_->nodes_promoted_, heap_->promotion_ratio_,
+ AverageSurvivalRatio(), heap_->promotion_rate_,
+ heap_->semi_space_copied_rate_,
+ NewSpaceAllocationThroughputInBytesPerMillisecond(),
+ ContextDisposalRateInMilliseconds());
break;
case Event::MARK_COMPACTOR:
case Event::INCREMENTAL_MARK_COMPACTOR:
@@ -555,18 +550,16 @@ void GCTracer::PrintNVP() const {
"evacuate.clean_up=%.1f "
"evacuate.copy=%.1f "
"evacuate.update_pointers=%.1f "
- "evacuate.update_pointers.between_evacuated=%.1f "
"evacuate.update_pointers.to_evacuated=%.1f "
"evacuate.update_pointers.to_new=%.1f "
"evacuate.update_pointers.weak=%.1f "
"external.mc_prologue=%.1f "
"external.mc_epilogue=%.1f "
- "external.mc_incremental_prologue=%.1f "
- "external.mc_incremental_epilogue=%.1f "
"external.weak_global_handles=%.1f "
"finish=%.1f "
"mark=%.1f "
"mark.finish_incremental=%.1f "
+ "mark.object_grouping=%.1f "
"mark.prepare_code_flush=%.1f "
"mark.roots=%.1f "
"mark.weak_closure=%.1f "
@@ -574,32 +567,41 @@ void GCTracer::PrintNVP() const {
"mark.weak_closure.weak_handles=%.1f "
"mark.weak_closure.weak_roots=%.1f "
"mark.weak_closure.harmony=%.1f "
+ "mark.wrapper_prologue=%.1f "
+ "mark.wrapper_epilogue=%.1f "
+ "mark.wrapper_tracing=%.1f "
"sweep=%.1f "
"sweep.code=%.1f "
"sweep.map=%.1f "
"sweep.old=%.1f "
- "incremental_finalize=%.1f "
- "steps_count=%d "
- "steps_took=%.1f "
- "longest_step=%.1f "
- "finalization_steps_count=%d "
- "finalization_steps_took=%.1f "
- "finalization_longest_step=%.1f "
+ "incremental=%.1f "
+ "incremental.finalize=%.1f "
+ "incremental.finalize.body=%.1f "
+ "incremental.finalize.external.prologue=%.1f "
+ "incremental.finalize.external.epilogue=%.1f "
+ "incremental.finalize.object_grouping=%.1f "
+ "incremental.wrapper_prologue=%.1f "
+ "incremental.wrapper_tracing=%.1f "
+ "incremental_wrapper_tracing_longest_step=%.1f "
+ "incremental_finalize_longest_step=%.1f "
+ "incremental_finalize_steps_count=%d "
+ "incremental_longest_step=%.1f "
+ "incremental_steps_count=%d "
"incremental_marking_throughput=%.f "
- "total_size_before=%" V8_PTR_PREFIX
- "d "
- "total_size_after=%" V8_PTR_PREFIX
- "d "
- "holes_size_before=%" V8_PTR_PREFIX
- "d "
- "holes_size_after=%" V8_PTR_PREFIX
- "d "
- "allocated=%" V8_PTR_PREFIX
- "d "
- "promoted=%" V8_PTR_PREFIX
- "d "
- "semi_space_copied=%" V8_PTR_PREFIX
- "d "
+ "total_size_before=%" V8PRIdPTR
+ " "
+ "total_size_after=%" V8PRIdPTR
+ " "
+ "holes_size_before=%" V8PRIdPTR
+ " "
+ "holes_size_after=%" V8PRIdPTR
+ " "
+ "allocated=%" V8PRIdPTR
+ " "
+ "promoted=%" V8PRIdPTR
+ " "
+ "semi_space_copied=%" V8PRIdPTR
+ " "
"nodes_died_in_new=%d "
"nodes_copied_in_new=%d "
"nodes_promoted=%d "
@@ -628,17 +630,15 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_EVACUATE_CLEAN_UP],
current_.scopes[Scope::MC_EVACUATE_COPY],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS],
- current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK],
current_.scopes[Scope::MC_EXTERNAL_PROLOGUE],
current_.scopes[Scope::MC_EXTERNAL_EPILOGUE],
- current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE],
- current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE],
current_.scopes[Scope::EXTERNAL_WEAK_GLOBAL_HANDLES],
current_.scopes[Scope::MC_FINISH], current_.scopes[Scope::MC_MARK],
current_.scopes[Scope::MC_MARK_FINISH_INCREMENTAL],
+ current_.scopes[Scope::MC_MARK_OBJECT_GROUPING],
current_.scopes[Scope::MC_MARK_PREPARE_CODE_FLUSH],
current_.scopes[Scope::MC_MARK_ROOTS],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE],
@@ -646,17 +646,33 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_HARMONY],
+ current_.scopes[Scope::MC_MARK_WRAPPER_PROLOGUE],
+ current_.scopes[Scope::MC_MARK_WRAPPER_EPILOGUE],
+ current_.scopes[Scope::MC_MARK_WRAPPER_TRACING],
current_.scopes[Scope::MC_SWEEP],
current_.scopes[Scope::MC_SWEEP_CODE],
current_.scopes[Scope::MC_SWEEP_MAP],
current_.scopes[Scope::MC_SWEEP_OLD],
+ current_.scopes[Scope::MC_INCREMENTAL],
current_.scopes[Scope::MC_INCREMENTAL_FINALIZE],
- current_.incremental_marking_steps,
- current_.incremental_marking_duration,
- current_.longest_incremental_marking_step,
- cumulative_incremental_marking_finalization_steps_,
- cumulative_incremental_marking_finalization_duration_,
- longest_incremental_marking_finalization_step_,
+ current_.scopes[Scope::MC_INCREMENTAL_FINALIZE_BODY],
+ current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE],
+ current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE],
+ current_.scopes[Scope::MC_INCREMENTAL_FINALIZE_OBJECT_GROUPING],
+ current_.scopes[Scope::MC_INCREMENTAL_WRAPPER_PROLOGUE],
+ current_.scopes[Scope::MC_INCREMENTAL_WRAPPER_TRACING],
+ current_
+ .incremental_marking_scopes[Scope::MC_INCREMENTAL_WRAPPER_TRACING]
+ .longest_step,
+ current_
+ .incremental_marking_scopes[Scope::MC_INCREMENTAL_FINALIZE_BODY]
+ .longest_step,
+ current_
+ .incremental_marking_scopes[Scope::MC_INCREMENTAL_FINALIZE_BODY]
+ .steps,
+ current_.incremental_marking_scopes[Scope::MC_INCREMENTAL]
+ .longest_step,
+ current_.incremental_marking_scopes[Scope::MC_INCREMENTAL].steps,
IncrementalMarkingSpeedInBytesPerMillisecond(),
current_.start_object_size, current_.end_object_size,
current_.start_holes_size, current_.end_holes_size,
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index 9ea3cce8fa..a11823e984 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -5,9 +5,11 @@
#ifndef V8_HEAP_GC_TRACER_H_
#define V8_HEAP_GC_TRACER_H_
+#include "src/base/compiler-specific.h"
#include "src/base/platform/platform.h"
#include "src/counters.h"
#include "src/globals.h"
+#include "testing/gtest/include/gtest/gtest_prod.h"
namespace v8 {
namespace internal {
@@ -58,75 +60,113 @@ inline BytesAndDuration MakeBytesAndDuration(uint64_t bytes, double duration) {
enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
-#define TRACER_SCOPES(F) \
- F(EXTERNAL_WEAK_GLOBAL_HANDLES) \
- F(MC_CLEAR) \
- F(MC_CLEAR_CODE_FLUSH) \
- F(MC_CLEAR_DEPENDENT_CODE) \
- F(MC_CLEAR_GLOBAL_HANDLES) \
- F(MC_CLEAR_MAPS) \
- F(MC_CLEAR_SLOTS_BUFFER) \
- F(MC_CLEAR_STORE_BUFFER) \
- F(MC_CLEAR_STRING_TABLE) \
- F(MC_CLEAR_WEAK_CELLS) \
- F(MC_CLEAR_WEAK_COLLECTIONS) \
- F(MC_CLEAR_WEAK_LISTS) \
- F(MC_EVACUATE) \
- F(MC_EVACUATE_CANDIDATES) \
- F(MC_EVACUATE_CLEAN_UP) \
- F(MC_EVACUATE_COPY) \
- F(MC_EVACUATE_UPDATE_POINTERS) \
- F(MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED) \
- F(MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED) \
- F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW) \
- F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \
- F(MC_EXTERNAL_EPILOGUE) \
- F(MC_EXTERNAL_PROLOGUE) \
- F(MC_FINISH) \
- F(MC_INCREMENTAL_FINALIZE) \
- F(MC_INCREMENTAL_EXTERNAL_EPILOGUE) \
- F(MC_INCREMENTAL_EXTERNAL_PROLOGUE) \
- F(MC_MARK) \
- F(MC_MARK_FINISH_INCREMENTAL) \
- F(MC_MARK_PREPARE_CODE_FLUSH) \
- F(MC_MARK_ROOTS) \
- F(MC_MARK_WEAK_CLOSURE) \
- F(MC_MARK_WEAK_CLOSURE_EPHEMERAL) \
- F(MC_MARK_WEAK_CLOSURE_WEAK_HANDLES) \
- F(MC_MARK_WEAK_CLOSURE_WEAK_ROOTS) \
- F(MC_MARK_WEAK_CLOSURE_HARMONY) \
- F(MC_SWEEP) \
- F(MC_SWEEP_CODE) \
- F(MC_SWEEP_MAP) \
- F(MC_SWEEP_OLD) \
- F(SCAVENGER_CODE_FLUSH_CANDIDATES) \
- F(SCAVENGER_EXTERNAL_EPILOGUE) \
- F(SCAVENGER_EXTERNAL_PROLOGUE) \
- F(SCAVENGER_OBJECT_GROUPS) \
- F(SCAVENGER_OLD_TO_NEW_POINTERS) \
- F(SCAVENGER_ROOTS) \
- F(SCAVENGER_SCAVENGE) \
- F(SCAVENGER_SEMISPACE) \
+#define INCREMENTAL_SCOPES(F) \
+ /* MC_INCREMENTAL is the top-level incremental marking scope. */ \
+ F(MC_INCREMENTAL) \
+ F(MC_INCREMENTAL_WRAPPER_PROLOGUE) \
+ F(MC_INCREMENTAL_WRAPPER_TRACING) \
+ F(MC_INCREMENTAL_FINALIZE) \
+ F(MC_INCREMENTAL_FINALIZE_BODY) \
+ F(MC_INCREMENTAL_FINALIZE_OBJECT_GROUPING) \
+ F(MC_INCREMENTAL_EXTERNAL_EPILOGUE) \
+ F(MC_INCREMENTAL_EXTERNAL_PROLOGUE)
+
+#define TRACER_SCOPES(F) \
+ INCREMENTAL_SCOPES(F) \
+ F(EXTERNAL_WEAK_GLOBAL_HANDLES) \
+ F(MC_CLEAR) \
+ F(MC_CLEAR_CODE_FLUSH) \
+ F(MC_CLEAR_DEPENDENT_CODE) \
+ F(MC_CLEAR_GLOBAL_HANDLES) \
+ F(MC_CLEAR_MAPS) \
+ F(MC_CLEAR_SLOTS_BUFFER) \
+ F(MC_CLEAR_STORE_BUFFER) \
+ F(MC_CLEAR_STRING_TABLE) \
+ F(MC_CLEAR_WEAK_CELLS) \
+ F(MC_CLEAR_WEAK_COLLECTIONS) \
+ F(MC_CLEAR_WEAK_LISTS) \
+ F(MC_EVACUATE) \
+ F(MC_EVACUATE_CANDIDATES) \
+ F(MC_EVACUATE_CLEAN_UP) \
+ F(MC_EVACUATE_COPY) \
+ F(MC_EVACUATE_UPDATE_POINTERS) \
+ F(MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED) \
+ F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW) \
+ F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \
+ F(MC_EXTERNAL_EPILOGUE) \
+ F(MC_EXTERNAL_PROLOGUE) \
+ F(MC_FINISH) \
+ F(MC_MARK) \
+ F(MC_MARK_FINISH_INCREMENTAL) \
+ F(MC_MARK_PREPARE_CODE_FLUSH) \
+ F(MC_MARK_ROOTS) \
+ F(MC_MARK_WEAK_CLOSURE) \
+ F(MC_MARK_WEAK_CLOSURE_EPHEMERAL) \
+ F(MC_MARK_WEAK_CLOSURE_WEAK_HANDLES) \
+ F(MC_MARK_WEAK_CLOSURE_WEAK_ROOTS) \
+ F(MC_MARK_WEAK_CLOSURE_HARMONY) \
+ F(MC_MARK_WRAPPER_EPILOGUE) \
+ F(MC_MARK_WRAPPER_PROLOGUE) \
+ F(MC_MARK_WRAPPER_TRACING) \
+ F(MC_MARK_OBJECT_GROUPING) \
+ F(MC_SWEEP) \
+ F(MC_SWEEP_CODE) \
+ F(MC_SWEEP_MAP) \
+ F(MC_SWEEP_OLD) \
+ F(SCAVENGER_CODE_FLUSH_CANDIDATES) \
+ F(SCAVENGER_EXTERNAL_EPILOGUE) \
+ F(SCAVENGER_EXTERNAL_PROLOGUE) \
+ F(SCAVENGER_OBJECT_GROUPS) \
+ F(SCAVENGER_OLD_TO_NEW_POINTERS) \
+ F(SCAVENGER_ROOTS) \
+ F(SCAVENGER_SCAVENGE) \
+ F(SCAVENGER_SEMISPACE) \
F(SCAVENGER_WEAK)
#define TRACE_GC(tracer, scope_id) \
GCTracer::Scope::ScopeId gc_tracer_scope_id(scope_id); \
GCTracer::Scope gc_tracer_scope(tracer, gc_tracer_scope_id); \
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), \
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), \
GCTracer::Scope::Name(gc_tracer_scope_id))
// GCTracer collects and prints ONE line after each garbage collector
// invocation IFF --trace_gc is used.
-// TODO(ernstm): Unit tests.
class GCTracer {
public:
+ struct IncrementalMarkingInfos {
+ IncrementalMarkingInfos()
+ : cumulative_duration(0), longest_step(0), steps(0) {}
+
+ void Update(double duration) {
+ steps++;
+ cumulative_duration += duration;
+ if (duration > longest_step) {
+ longest_step = duration;
+ }
+ }
+
+ void ResetCurrentCycle() {
+ longest_step = 0;
+ steps = 0;
+ }
+
+ double cumulative_duration;
+ double longest_step;
+ int steps;
+ };
+
class Scope {
public:
enum ScopeId {
#define DEFINE_SCOPE(scope) scope,
TRACER_SCOPES(DEFINE_SCOPE)
#undef DEFINE_SCOPE
- NUMBER_OF_SCOPES
+ NUMBER_OF_SCOPES,
+
+ FIRST_INCREMENTAL_SCOPE = MC_INCREMENTAL,
+ LAST_INCREMENTAL_SCOPE = MC_INCREMENTAL_EXTERNAL_PROLOGUE,
+ NUMBER_OF_INCREMENTAL_SCOPES =
+ LAST_INCREMENTAL_SCOPE - FIRST_INCREMENTAL_SCOPE + 1
};
Scope(GCTracer* tracer, ScopeId scope);
@@ -152,9 +192,6 @@ class GCTracer {
START = 3
};
- // Default constructor leaves the event uninitialized.
- Event() {}
-
Event(Type type, const char* gc_reason, const char* collector_reason);
// Returns a string describing the event type.
@@ -197,18 +234,9 @@ class GCTracer {
// Size of new space objects in constructor.
intptr_t new_space_object_size;
- // Size of survived new space objects in desctructor.
- intptr_t survived_new_space_object_size;
-
- // Number of incremental marking steps since creation of tracer.
- // (value at start of event)
- int cumulative_incremental_marking_steps;
- // Incremental marking steps since
- // - last event for SCAVENGER events
- // - last INCREMENTAL_MARK_COMPACTOR event for INCREMENTAL_MARK_COMPACTOR
- // events
- int incremental_marking_steps;
+ // Size of survived new space objects in destructor.
+ intptr_t survived_new_space_object_size;
// Bytes marked since creation of tracer (value at start of event).
intptr_t cumulative_incremental_marking_bytes;
@@ -219,16 +247,6 @@ class GCTracer {
// events
intptr_t incremental_marking_bytes;
- // Cumulative duration of incremental marking steps since creation of
- // tracer. (value at start of event)
- double cumulative_incremental_marking_duration;
-
- // Duration of incremental marking steps since
- // - last event for SCAVENGER events
- // - last INCREMENTAL_MARK_COMPACTOR event for INCREMENTAL_MARK_COMPACTOR
- // events
- double incremental_marking_duration;
-
// Cumulative pure duration of incremental marking steps since creation of
// tracer. (value at start of event)
double cumulative_pure_incremental_marking_duration;
@@ -239,12 +257,12 @@ class GCTracer {
// events
double pure_incremental_marking_duration;
- // Longest incremental marking step since start of marking.
- // (value at start of event)
- double longest_incremental_marking_step;
-
// Amounts of time spent in different scopes during GC.
double scopes[Scope::NUMBER_OF_SCOPES];
+
+ // Holds details for incremental marking scopes.
+ IncrementalMarkingInfos
+ incremental_marking_scopes[Scope::NUMBER_OF_INCREMENTAL_SCOPES];
};
static const int kThroughputTimeFrameMs = 5000;
@@ -274,8 +292,6 @@ class GCTracer {
// Log an incremental marking step.
void AddIncrementalMarkingStep(double duration, intptr_t bytes);
- void AddIncrementalMarkingFinalizationStep(double duration);
-
// Log time spent in marking.
void AddMarkingTime(double duration) {
cumulative_marking_duration_ += duration;
@@ -365,6 +381,26 @@ class GCTracer {
// Discard all recorded survival events.
void ResetSurvivalEvents();
+ V8_INLINE void AddScopeSample(Scope::ScopeId scope, double duration) {
+ DCHECK(scope < Scope::NUMBER_OF_SCOPES);
+ if (scope >= Scope::FIRST_INCREMENTAL_SCOPE &&
+ scope <= Scope::LAST_INCREMENTAL_SCOPE) {
+ incremental_marking_scopes_[scope].Update(duration);
+ } else {
+ current_.scopes[scope] += duration;
+ }
+ }
+
+ private:
+ FRIEND_TEST(GCTracer, AverageSpeed);
+ FRIEND_TEST(GCTracerTest, AllocationThroughput);
+ FRIEND_TEST(GCTracerTest, NewSpaceAllocationThroughput);
+ FRIEND_TEST(GCTracerTest, NewSpaceAllocationThroughputWithProvidedTime);
+ FRIEND_TEST(GCTracerTest, OldGenerationAllocationThroughputWithProvidedTime);
+ FRIEND_TEST(GCTracerTest, RegularScope);
+ FRIEND_TEST(GCTracerTest, IncrementalMarkingDetails);
+ FRIEND_TEST(GCTracerTest, IncrementalScope);
+
// Returns the average speed of the events in the buffer.
// If the buffer is empty, the result is 0.
// Otherwise, the result is between 1 byte/ms and 1 GB/ms.
@@ -372,7 +408,10 @@ class GCTracer {
static double AverageSpeed(const RingBuffer<BytesAndDuration>& buffer,
const BytesAndDuration& initial, double time_ms);
- private:
+ void MergeBaseline(const Event& baseline);
+
+ void ResetForTesting();
+
// Print one detailed trace line in name=value format.
// TODO(ernstm): Move to Heap.
void PrintNVP() const;
@@ -383,20 +422,7 @@ class GCTracer {
// Prints a line and also adds it to the heap's ring buffer so that
// it can be included in later crash dumps.
- void Output(const char* format, ...) const;
-
- void ClearMarkCompactStatistics() {
- cumulative_incremental_marking_steps_ = 0;
- cumulative_incremental_marking_bytes_ = 0;
- cumulative_incremental_marking_duration_ = 0;
- cumulative_pure_incremental_marking_duration_ = 0;
- longest_incremental_marking_step_ = 0;
- cumulative_incremental_marking_finalization_steps_ = 0;
- cumulative_incremental_marking_finalization_duration_ = 0;
- longest_incremental_marking_finalization_step_ = 0;
- cumulative_marking_duration_ = 0;
- cumulative_sweeping_duration_ = 0;
- }
+ void PRINTF_FORMAT(2, 3) Output(const char* format, ...) const;
double TotalExternalTime() const {
return current_.scopes[Scope::EXTERNAL_WEAK_GLOBAL_HANDLES] +
@@ -421,9 +447,6 @@ class GCTracer {
// Previous INCREMENTAL_MARK_COMPACTOR event.
Event previous_incremental_mark_compactor_event_;
- // Cumulative number of incremental marking steps since creation of tracer.
- int cumulative_incremental_marking_steps_;
-
// Cumulative size of incremental marking steps (in bytes) since creation of
// tracer.
intptr_t cumulative_incremental_marking_bytes_;
@@ -435,24 +458,15 @@ class GCTracer {
// tracer.
double cumulative_pure_incremental_marking_duration_;
- // Longest incremental marking step since start of marking.
- double longest_incremental_marking_step_;
-
- // Cumulative number of incremental marking finalization steps since creation
- // of tracer.
- int cumulative_incremental_marking_finalization_steps_;
-
- // Cumulative duration of incremental marking finalization steps since
- // creation of tracer.
- double cumulative_incremental_marking_finalization_duration_;
-
- // Longest incremental marking finalization step since start of marking.
- double longest_incremental_marking_finalization_step_;
-
// Total marking time.
// This timer is precise when run with --print-cumulative-gc-stat
double cumulative_marking_duration_;
+ // Incremental scopes carry more information than just the duration. The infos
+ // here are merged back upon starting/stopping the GC tracer.
+ IncrementalMarkingInfos
+ incremental_marking_scopes_[Scope::NUMBER_OF_INCREMENTAL_SCOPES];
+
// Total sweeping time on the main thread.
// This timer is precise when run with --print-cumulative-gc-stat
// TODO(hpayer): Account for sweeping time on sweeper threads. Add a
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index e31d3d6859..21f465fe78 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -252,11 +252,6 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
old_gen_exhausted_ = true;
}
- if (!old_gen_exhausted_ && incremental_marking()->black_allocation() &&
- space != OLD_SPACE) {
- Marking::MarkBlack(Marking::MarkBitFrom(object));
- MemoryChunk::IncrementLiveBytesFromGC(object, size_in_bytes);
- }
return allocation;
}
@@ -389,27 +384,65 @@ bool Heap::InOldSpaceSlow(Address address) {
}
bool Heap::OldGenerationAllocationLimitReached() {
- if (!incremental_marking()->IsStopped()) return false;
+ if (!incremental_marking()->IsStopped() && !ShouldOptimizeForMemoryUsage()) {
+ return false;
+ }
return OldGenerationSpaceAvailable() < 0;
}
-
+template <PromotionMode promotion_mode>
bool Heap::ShouldBePromoted(Address old_address, int object_size) {
- NewSpacePage* page = NewSpacePage::FromAddress(old_address);
+ Page* page = Page::FromAddress(old_address);
Address age_mark = new_space_.age_mark();
+
+ if (promotion_mode == PROMOTE_MARKED) {
+ MarkBit mark_bit = ObjectMarking::MarkBitFrom(old_address);
+ if (!Marking::IsWhite(mark_bit)) {
+ return true;
+ }
+ }
+
return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
(!page->ContainsLimit(age_mark) || old_address < age_mark);
}
+PromotionMode Heap::CurrentPromotionMode() {
+ if (incremental_marking()->IsMarking()) {
+ return PROMOTE_MARKED;
+ } else {
+ return DEFAULT_PROMOTION;
+ }
+}
+
void Heap::RecordWrite(Object* object, int offset, Object* o) {
if (!InNewSpace(o) || !object->IsHeapObject() || InNewSpace(object)) {
return;
}
- Page* page = Page::FromAddress(reinterpret_cast<Address>(object));
- Address slot = HeapObject::cast(object)->address() + offset;
- RememberedSet<OLD_TO_NEW>::Insert(page, slot);
+ RememberedSet<OLD_TO_NEW>::Insert(
+ Page::FromAddress(reinterpret_cast<Address>(object)),
+ HeapObject::cast(object)->address() + offset);
+}
+
+void Heap::RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* value) {
+ if (InNewSpace(value)) {
+ RecordWriteIntoCodeSlow(host, rinfo, value);
+ }
+}
+
+void Heap::RecordFixedArrayElements(FixedArray* array, int offset, int length) {
+ if (InNewSpace(array)) return;
+ Page* page = Page::FromAddress(reinterpret_cast<Address>(array));
+ for (int i = 0; i < length; i++) {
+ if (!InNewSpace(array->get(offset + i))) continue;
+ RememberedSet<OLD_TO_NEW>::Insert(
+ page,
+ reinterpret_cast<Address>(array->RawFieldOfElementAt(offset + i)));
+ }
}
+Address* Heap::store_buffer_top_address() {
+ return store_buffer()->top_address();
+}
bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
// Object migration is governed by the following rules:
@@ -451,13 +484,11 @@ void Heap::CopyBlock(Address dst, Address src, int byte_size) {
template <Heap::FindMementoMode mode>
AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
- // Check if there is potentially a memento behind the object. If
- // the last word of the memento is on another page we return
- // immediately.
Address object_address = object->address();
Address memento_address = object_address + object->Size();
Address last_memento_word_address = memento_address + kPointerSize;
- if (!NewSpacePage::OnSamePage(object_address, last_memento_word_address)) {
+ // If the memento would be on another page, bail out immediately.
+ if (!Page::OnSamePage(object_address, last_memento_word_address)) {
return nullptr;
}
HeapObject* candidate = HeapObject::FromAddress(memento_address);
@@ -469,6 +500,22 @@ AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
if (candidate_map != allocation_memento_map()) {
return nullptr;
}
+
+ // Bail out if the memento is below the age mark, which can happen when
+ // mementos survived because a page got moved within new space.
+ Page* object_page = Page::FromAddress(object_address);
+ if (object_page->IsFlagSet(Page::NEW_SPACE_BELOW_AGE_MARK)) {
+ Address age_mark =
+ reinterpret_cast<SemiSpace*>(object_page->owner())->age_mark();
+ if (!object_page->Contains(age_mark)) {
+ return nullptr;
+ }
+ // Do an exact check in the case where the age mark is on the same page.
+ if (object_address < age_mark) {
+ return nullptr;
+ }
+ }
+
AllocationMemento* memento_candidate = AllocationMemento::cast(candidate);
// Depending on what the memento is used for, we might need to perform
@@ -485,7 +532,7 @@ AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
top = NewSpaceTop();
DCHECK(memento_address == top ||
memento_address + HeapObject::kHeaderSize <= top ||
- !NewSpacePage::OnSamePage(memento_address, top - 1));
+ !Page::OnSamePage(memento_address, top - 1));
if ((memento_address != top) && memento_candidate->IsValid()) {
return memento_candidate;
}
@@ -499,7 +546,7 @@ AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
template <Heap::UpdateAllocationSiteMode mode>
void Heap::UpdateAllocationSite(HeapObject* object,
- HashMap* pretenuring_feedback) {
+ base::HashMap* pretenuring_feedback) {
DCHECK(InFromSpace(object));
if (!FLAG_allocation_site_pretenuring ||
!AllocationSite::CanTrack(object->map()->instance_type()))
@@ -527,7 +574,7 @@ void Heap::UpdateAllocationSite(HeapObject* object,
// to dereference the allocation site and rather have to postpone all checks
// till actually merging the data.
Address key = memento_candidate->GetAllocationSiteUnchecked();
- HashMap::Entry* e =
+ base::HashMap::Entry* e =
pretenuring_feedback->LookupOrInsert(key, ObjectHash(key));
DCHECK(e != nullptr);
(*bit_cast<intptr_t*>(&e->value))++;
@@ -585,12 +632,12 @@ void Heap::ExternalStringTable::Verify() {
for (int i = 0; i < new_space_strings_.length(); ++i) {
Object* obj = Object::cast(new_space_strings_[i]);
DCHECK(heap_->InNewSpace(obj));
- DCHECK(obj != heap_->the_hole_value());
+ DCHECK(!obj->IsTheHole(heap_->isolate()));
}
for (int i = 0; i < old_space_strings_.length(); ++i) {
Object* obj = Object::cast(old_space_strings_[i]);
DCHECK(!heap_->InNewSpace(obj));
- DCHECK(obj != heap_->the_hole_value());
+ DCHECK(!obj->IsTheHole(heap_->isolate()));
}
#endif
}
@@ -674,30 +721,41 @@ int Heap::NextScriptId() {
return last_id;
}
-
void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
DCHECK(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0));
set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
}
-
void Heap::SetConstructStubDeoptPCOffset(int pc_offset) {
DCHECK(construct_stub_deopt_pc_offset() == Smi::FromInt(0));
set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
}
-
void Heap::SetGetterStubDeoptPCOffset(int pc_offset) {
DCHECK(getter_stub_deopt_pc_offset() == Smi::FromInt(0));
set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
}
-
void Heap::SetSetterStubDeoptPCOffset(int pc_offset) {
DCHECK(setter_stub_deopt_pc_offset() == Smi::FromInt(0));
set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
}
+void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
+ DCHECK(interpreter_entry_return_pc_offset() == Smi::FromInt(0));
+ set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
+}
+
+int Heap::GetNextTemplateSerialNumber() {
+ int next_serial_number = next_template_serial_number()->value() + 1;
+ set_next_template_serial_number(Smi::FromInt(next_serial_number));
+ return next_serial_number;
+}
+
+void Heap::SetSerializedTemplates(FixedArray* templates) {
+ DCHECK_EQ(empty_fixed_array(), serialized_templates());
+ set_serialized_templates(templates);
+}
AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
: heap_(isolate->heap()) {
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 2b5ff9cd1a..7eb5af3b6a 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -6,18 +6,20 @@
#include "src/accessors.h"
#include "src/api.h"
-#include "src/ast/scopeinfo.h"
+#include "src/ast/context-slot-cache.h"
#include "src/base/bits.h"
#include "src/base/once.h"
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/compilation-cache.h"
+#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/conversions.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/global-handles.h"
-#include "src/heap/array-buffer-tracker.h"
+#include "src/heap/array-buffer-tracker-inl.h"
+#include "src/heap/code-stats.h"
#include "src/heap/gc-idle-time-handler.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/incremental-marking.h"
@@ -32,7 +34,6 @@
#include "src/heap/scavenger-inl.h"
#include "src/heap/store-buffer.h"
#include "src/interpreter/interpreter.h"
-#include "src/profiler/cpu-profiler.h"
#include "src/regexp/jsregexp.h"
#include "src/runtime-profiler.h"
#include "src/snapshot/natives.h"
@@ -69,9 +70,10 @@ class IdleScavengeObserver : public AllocationObserver {
};
Heap::Heap()
- : amount_of_external_allocated_memory_(0),
- amount_of_external_allocated_memory_at_last_global_gc_(0),
- isolate_(NULL),
+ : external_memory_(0),
+ external_memory_limit_(kExternalAllocationLimit),
+ external_memory_at_last_mark_compact_(0),
+ isolate_(nullptr),
code_range_size_(0),
// semispace_size_ should be a power of 2 and old_generation_size_ should
// be a multiple of Page::kPageSize.
@@ -111,11 +113,9 @@ Heap::Heap()
#endif // DEBUG
old_generation_allocation_limit_(initial_old_generation_size_),
old_gen_exhausted_(false),
- optimize_for_memory_usage_(false),
inline_allocation_disabled_(false),
total_regexp_code_generated_(0),
tracer_(nullptr),
- embedder_heap_tracer_(nullptr),
high_survival_rate_period_length_(0),
promoted_objects_size_(0),
promotion_ratio_(0),
@@ -136,11 +136,13 @@ Heap::Heap()
last_gc_time_(0.0),
scavenge_collector_(nullptr),
mark_compact_collector_(nullptr),
- store_buffer_(this),
+ memory_allocator_(nullptr),
+ store_buffer_(nullptr),
incremental_marking_(nullptr),
gc_idle_time_handler_(nullptr),
memory_reducer_(nullptr),
- object_stats_(nullptr),
+ live_object_stats_(nullptr),
+ dead_object_stats_(nullptr),
scavenge_job_(nullptr),
idle_scavenge_observer_(nullptr),
full_codegen_bytes_generated_(0),
@@ -157,13 +159,9 @@ Heap::Heap()
current_gc_flags_(Heap::kNoGCFlags),
current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
external_string_table_(this),
- chunks_queued_for_free_(NULL),
- concurrent_unmapping_tasks_active_(0),
- pending_unmapping_tasks_semaphore_(0),
gc_callbacks_depth_(0),
deserialization_complete_(false),
strong_roots_list_(NULL),
- array_buffer_tracker_(NULL),
heap_iterator_depth_(0),
force_oom_(false) {
// Allow build-time customization of the max semispace size. Building
@@ -191,8 +189,14 @@ Heap::Heap()
intptr_t Heap::Capacity() {
if (!HasBeenSetUp()) return 0;
- return new_space_.Capacity() + old_space_->Capacity() +
- code_space_->Capacity() + map_space_->Capacity();
+ return new_space_.Capacity() + OldGenerationCapacity();
+}
+
+intptr_t Heap::OldGenerationCapacity() {
+ if (!HasBeenSetUp()) return 0;
+
+ return old_space_->Capacity() + code_space_->Capacity() +
+ map_space_->Capacity() + lo_space_->SizeOfObjects();
}
@@ -225,7 +229,7 @@ size_t Heap::CommittedPhysicalMemory() {
intptr_t Heap::CommittedMemoryExecutable() {
if (!HasBeenSetUp()) return 0;
- return isolate()->memory_allocator()->SizeExecutable();
+ return memory_allocator()->SizeExecutable();
}
@@ -296,7 +300,7 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
// and does not count available bytes already in the old space or code
// space. Undercounting is safe---we may get an unrequested full GC when
// a scavenge would have succeeded.
- if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
+ if (memory_allocator()->MaxAvailable() <= new_space_.Size()) {
isolate_->counters()
->gc_compactor_caused_by_oldspace_exhaustion()
->Increment();
@@ -336,61 +340,58 @@ void Heap::ReportStatisticsBeforeGC() {
void Heap::PrintShortHeapStatistics() {
if (!FLAG_trace_gc_verbose) return;
- PrintIsolate(isolate_, "Memory allocator, used: %6" V8_PTR_PREFIX
- "d KB"
- ", available: %6" V8_PTR_PREFIX "d KB\n",
- isolate_->memory_allocator()->Size() / KB,
- isolate_->memory_allocator()->Available() / KB);
- PrintIsolate(isolate_, "New space, used: %6" V8_PTR_PREFIX
- "d KB"
- ", available: %6" V8_PTR_PREFIX
- "d KB"
- ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ PrintIsolate(isolate_, "Memory allocator, used: %6" V8PRIdPTR
+ " KB, available: %6" V8PRIdPTR " KB\n",
+ memory_allocator()->Size() / KB,
+ memory_allocator()->Available() / KB);
+ PrintIsolate(isolate_, "New space, used: %6" V8PRIdPTR
+ " KB"
+ ", available: %6" V8PRIdPTR
+ " KB"
+ ", committed: %6" V8PRIdPTR " KB\n",
new_space_.Size() / KB, new_space_.Available() / KB,
new_space_.CommittedMemory() / KB);
- PrintIsolate(isolate_, "Old space, used: %6" V8_PTR_PREFIX
- "d KB"
- ", available: %6" V8_PTR_PREFIX
- "d KB"
- ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ PrintIsolate(isolate_, "Old space, used: %6" V8PRIdPTR
+ " KB"
+ ", available: %6" V8PRIdPTR
+ " KB"
+ ", committed: %6" V8PRIdPTR " KB\n",
old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
old_space_->CommittedMemory() / KB);
- PrintIsolate(isolate_, "Code space, used: %6" V8_PTR_PREFIX
- "d KB"
- ", available: %6" V8_PTR_PREFIX
- "d KB"
- ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ PrintIsolate(isolate_, "Code space, used: %6" V8PRIdPTR
+ " KB"
+ ", available: %6" V8PRIdPTR
+ " KB"
+ ", committed: %6" V8PRIdPTR " KB\n",
code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
code_space_->CommittedMemory() / KB);
- PrintIsolate(isolate_, "Map space, used: %6" V8_PTR_PREFIX
- "d KB"
- ", available: %6" V8_PTR_PREFIX
- "d KB"
- ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ PrintIsolate(isolate_, "Map space, used: %6" V8PRIdPTR
+ " KB"
+ ", available: %6" V8PRIdPTR
+ " KB"
+ ", committed: %6" V8PRIdPTR " KB\n",
map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
map_space_->CommittedMemory() / KB);
- PrintIsolate(isolate_, "Large object space, used: %6" V8_PTR_PREFIX
- "d KB"
- ", available: %6" V8_PTR_PREFIX
- "d KB"
- ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ PrintIsolate(isolate_, "Large object space, used: %6" V8PRIdPTR
+ " KB"
+ ", available: %6" V8PRIdPTR
+ " KB"
+ ", committed: %6" V8PRIdPTR " KB\n",
lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
lo_space_->CommittedMemory() / KB);
- PrintIsolate(isolate_, "All spaces, used: %6" V8_PTR_PREFIX
- "d KB"
- ", available: %6" V8_PTR_PREFIX
- "d KB"
- ", committed: %6" V8_PTR_PREFIX "d KB\n",
+ PrintIsolate(isolate_, "All spaces, used: %6" V8PRIdPTR
+ " KB"
+ ", available: %6" V8PRIdPTR
+ " KB"
+ ", committed: %6" V8PRIdPTR " KB\n",
this->SizeOfObjects() / KB, this->Available() / KB,
this->CommittedMemory() / KB);
- PrintIsolate(
- isolate_, "External memory reported: %6" V8_PTR_PREFIX "d KB\n",
- static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
+ PrintIsolate(isolate_, "External memory reported: %6" V8PRIdPTR " KB\n",
+ static_cast<intptr_t>(external_memory_ / KB));
PrintIsolate(isolate_, "Total time spent in GC : %.1f ms\n",
total_gc_time_ms_);
}
-
// TODO(1238405): Combine the infrastructure for --heap-stats and
// --log-gc to avoid the complicated preprocessor and flag testing.
void Heap::ReportStatisticsAfterGC() {
@@ -501,11 +502,10 @@ void Heap::RepairFreeListsAfterDeserialization() {
}
}
-
void Heap::MergeAllocationSitePretenuringFeedback(
- const HashMap& local_pretenuring_feedback) {
+ const base::HashMap& local_pretenuring_feedback) {
AllocationSite* site = nullptr;
- for (HashMap::Entry* local_entry = local_pretenuring_feedback.Start();
+ for (base::HashMap::Entry* local_entry = local_pretenuring_feedback.Start();
local_entry != nullptr;
local_entry = local_pretenuring_feedback.Next(local_entry)) {
site = reinterpret_cast<AllocationSite*>(local_entry->key);
@@ -534,8 +534,8 @@ void Heap::MergeAllocationSitePretenuringFeedback(
class Heap::PretenuringScope {
public:
explicit PretenuringScope(Heap* heap) : heap_(heap) {
- heap_->global_pretenuring_feedback_ =
- new HashMap(HashMap::PointersMatch, kInitialFeedbackCapacity);
+ heap_->global_pretenuring_feedback_ = new base::HashMap(
+ base::HashMap::PointersMatch, kInitialFeedbackCapacity);
}
~PretenuringScope() {
@@ -561,7 +561,7 @@ void Heap::ProcessPretenuringFeedback() {
// Step 1: Digest feedback for recorded allocation sites.
bool maximum_size_scavenge = MaximumSizeScavenge();
- for (HashMap::Entry* e = global_pretenuring_feedback_->Start();
+ for (base::HashMap::Entry* e = global_pretenuring_feedback_->Start();
e != nullptr; e = global_pretenuring_feedback_->Next(e)) {
allocation_sites++;
site = reinterpret_cast<AllocationSite*>(e->key);
@@ -677,13 +677,6 @@ void Heap::GarbageCollectionEpilogue() {
isolate_->counters()->number_of_symbols()->Set(
string_table()->NumberOfElements());
- if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
- isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
- static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
- (crankshaft_codegen_bytes_generated_ +
- full_codegen_bytes_generated_)));
- }
-
if (CommittedMemory() > 0) {
isolate_->counters()->external_fragmentation_total()->AddSample(
static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
@@ -762,10 +755,10 @@ void Heap::PreprocessStackTraces() {
// If GC happens while adding a stack trace to the weak fixed array,
// which has been copied into a larger backing store, we may run into
// a stack trace that has already been preprocessed. Guard against this.
- if (!maybe_code->IsCode()) break;
- Code* code = Code::cast(maybe_code);
+ if (!maybe_code->IsAbstractCode()) break;
+ AbstractCode* abstract_code = AbstractCode::cast(maybe_code);
int offset = Smi::cast(elements->get(j + 3))->value();
- int pos = code->SourcePosition(offset);
+ int pos = abstract_code->SourcePosition(offset);
elements->set(j + 2, Smi::FromInt(pos));
}
}
@@ -818,10 +811,10 @@ void Heap::FinalizeIncrementalMarking(const char* gc_reason) {
PrintF("[IncrementalMarking] (%s).\n", gc_reason);
}
- TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
HistogramTimerScope incremental_marking_scope(
isolate()->counters()->gc_incremental_marking_finalize());
TRACE_EVENT0("v8", "V8.GCIncrementalMarkingFinalize");
+ TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
{
GCCallbacksScope scope(this);
@@ -914,7 +907,10 @@ void Heap::ReportExternalMemoryPressure(const char* gc_reason) {
if (incremental_marking()->CanBeActivated()) {
StartIncrementalMarking(
i::Heap::kNoGCFlags,
- kGCCallbackFlagSynchronousPhantomCallbackProcessing, gc_reason);
+ static_cast<GCCallbackFlags>(
+ kGCCallbackFlagSynchronousPhantomCallbackProcessing |
+ kGCCallbackFlagCollectAllExternalMemory),
+ gc_reason);
} else {
CollectAllGarbage(i::Heap::kNoGCFlags, gc_reason,
kGCCallbackFlagSynchronousPhantomCallbackProcessing);
@@ -941,7 +937,7 @@ void Heap::EnsureFillerObjectAtTop() {
// may be uninitialized memory behind top. We fill the remainder of the page
// with a filler.
Address to_top = new_space_.top();
- NewSpacePage* page = NewSpacePage::FromAddress(to_top - kPointerSize);
+ Page* page = Page::FromAddress(to_top - kPointerSize);
if (page->Contains(to_top)) {
int remaining_in_page = static_cast<int>(page->area_end() - to_top);
CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo);
@@ -976,10 +972,6 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
!ShouldAbortIncrementalMarking() && !incremental_marking()->IsStopped() &&
!incremental_marking()->should_hurry() && FLAG_incremental_marking &&
OldGenerationAllocationLimitReached()) {
- // Make progress in incremental marking.
- const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
- incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
- IncrementalMarking::NO_GC_VIA_STACK_GUARD);
if (!incremental_marking()->IsComplete() &&
!mark_compact_collector()->marking_deque_.IsEmpty() &&
!FLAG_gc_global) {
@@ -1102,13 +1094,7 @@ void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
DCHECK(array->map() != fixed_cow_array_map());
Object** dst_objects = array->data_start() + dst_index;
MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
- if (!InNewSpace(array)) {
- for (int i = 0; i < len; i++) {
- RecordWrite(array, array->OffsetOfElementAt(dst_index + i),
- dst_objects[i]);
- }
- }
- incremental_marking()->IterateBlackObject(array);
+ FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(this, array, dst_index, len);
}
@@ -1120,9 +1106,11 @@ class StringTableVerifier : public ObjectVisitor {
// Visit all HeapObject pointers in [start, end).
for (Object** p = start; p < end; p++) {
if ((*p)->IsHeapObject()) {
+ HeapObject* object = HeapObject::cast(*p);
+ Isolate* isolate = object->GetIsolate();
// Check that the string is actually internalized.
- CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
- (*p)->IsInternalizedString());
+ CHECK(object->IsTheHole(isolate) || object->IsUndefined(isolate) ||
+ object->IsInternalizedString());
}
}
}
@@ -1135,8 +1123,7 @@ static void VerifyStringTable(Heap* heap) {
}
#endif // VERIFY_HEAP
-
-bool Heap::ReserveSpace(Reservation* reservations) {
+bool Heap::ReserveSpace(Reservation* reservations, List<Address>* maps) {
bool gc_performed = true;
int counter = 0;
static const int kThreshold = 20;
@@ -1148,7 +1135,30 @@ bool Heap::ReserveSpace(Reservation* reservations) {
DCHECK_LE(1, reservation->length());
if (reservation->at(0).size == 0) continue;
bool perform_gc = false;
- if (space == LO_SPACE) {
+ if (space == MAP_SPACE) {
+ // We allocate each map individually to avoid fragmentation.
+ maps->Clear();
+ DCHECK_EQ(1, reservation->length());
+ int num_maps = reservation->at(0).size / Map::kSize;
+ for (int i = 0; i < num_maps; i++) {
+ // The deserializer will update the skip list.
+ AllocationResult allocation = map_space()->AllocateRawUnaligned(
+ Map::kSize, PagedSpace::IGNORE_SKIP_LIST);
+ HeapObject* free_space = nullptr;
+ if (allocation.To(&free_space)) {
+ // Mark with a free list node, in case we have a GC before
+ // deserializing.
+ Address free_space_address = free_space->address();
+ CreateFillerObjectAt(free_space_address, Map::kSize,
+ ClearRecordedSlots::kNo, ClearBlackArea::kNo);
+ maps->Add(free_space_address);
+ } else {
+ perform_gc = true;
+ break;
+ }
+ }
+ } else if (space == LO_SPACE) {
+ // Just check that we can allocate during deserialization.
DCHECK_EQ(1, reservation->length());
perform_gc = !CanExpandOldGeneration(reservation->at(0).size);
} else {
@@ -1170,7 +1180,7 @@ bool Heap::ReserveSpace(Reservation* reservations) {
// deserializing.
Address free_space_address = free_space->address();
CreateFillerObjectAt(free_space_address, size,
- ClearRecordedSlots::kNo);
+ ClearRecordedSlots::kNo, ClearBlackArea::kNo);
DCHECK(space < SerializerDeserializer::kNumberOfPreallocatedSpaces);
chunk.start = free_space_address;
chunk.end = free_space_address + size;
@@ -1221,15 +1231,15 @@ void Heap::ClearNormalizedMapCaches() {
}
Object* context = native_contexts_list();
- while (!context->IsUndefined()) {
+ while (!context->IsUndefined(isolate())) {
// GC can happen when the context is not fully initialized,
// so the cache can be undefined.
Object* cache =
Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
- if (!cache->IsUndefined()) {
+ if (!cache->IsUndefined(isolate())) {
NormalizedMapCache::cast(cache)->Clear();
}
- context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+ context = Context::cast(context)->next_context_link();
}
}
@@ -1349,8 +1359,8 @@ bool Heap::PerformGarbageCollection(
intptr_t old_gen_size = PromotedSpaceSizeOfObjects();
if (collector == MARK_COMPACTOR) {
// Register the amount of external allocated memory.
- amount_of_external_allocated_memory_at_last_global_gc_ =
- amount_of_external_allocated_memory_;
+ external_memory_at_last_mark_compact_ = external_memory_;
+ external_memory_limit_ = external_memory_ + kExternalAllocationLimit;
SetOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
} else if (HasLowYoungGenerationAllocationRate() &&
old_generation_size_configured_) {
@@ -1475,46 +1485,10 @@ void Heap::MarkCompactPrologue() {
CompletelyClearInstanceofCache();
FlushNumberStringCache();
- if (FLAG_cleanup_code_caches_at_gc) {
- polymorphic_code_cache()->set_cache(undefined_value());
- }
-
ClearNormalizedMapCaches();
}
-#ifdef VERIFY_HEAP
-// Visitor class to verify pointers in code or data space do not point into
-// new space.
-class VerifyNonPointerSpacePointersVisitor : public ObjectVisitor {
- public:
- explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
-
- void VisitPointers(Object** start, Object** end) override {
- for (Object** current = start; current < end; current++) {
- if ((*current)->IsHeapObject()) {
- CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
- }
- }
- }
-
- private:
- Heap* heap_;
-};
-
-
-static void VerifyNonPointerSpacePointers(Heap* heap) {
- // Verify that there are no pointers to new space in spaces where we
- // do not expect them.
- VerifyNonPointerSpacePointersVisitor v(heap);
- HeapObjectIterator code_it(heap->code_space());
- for (HeapObject* object = code_it.Next(); object != NULL;
- object = code_it.Next())
- object->Iterate(&v);
-}
-#endif // VERIFY_HEAP
-
-
void Heap::CheckNewSpaceExpansionCriteria() {
if (FLAG_experimental_new_space_growth_heuristic) {
if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
@@ -1545,15 +1519,12 @@ static bool IsUnmodifiedHeapObject(Object** p) {
if (object->IsSmi()) return false;
HeapObject* heap_object = HeapObject::cast(object);
if (!object->IsJSObject()) return false;
- Object* obj_constructor = (JSObject::cast(object))->map()->GetConstructor();
- if (!obj_constructor->IsJSFunction()) return false;
- JSFunction* constructor = JSFunction::cast(obj_constructor);
- if (!constructor->shared()->IsApiFunction()) return false;
- if (constructor != nullptr &&
- constructor->initial_map() == heap_object->map()) {
- return true;
- }
- return false;
+ JSObject* js_object = JSObject::cast(object);
+ if (!js_object->WasConstructedFromApiFunction()) return false;
+ JSFunction* constructor =
+ JSFunction::cast(js_object->map()->GetConstructor());
+
+ return constructor->initial_map() == heap_object->map();
}
@@ -1565,7 +1536,8 @@ void PromotionQueue::Initialize() {
front_ = rear_ =
reinterpret_cast<struct Entry*>(heap_->new_space()->ToSpaceEnd());
limit_ = reinterpret_cast<struct Entry*>(
- Page::FromAllocationTop(reinterpret_cast<Address>(rear_))->area_start());
+ Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_))
+ ->area_start());
emergency_stack_ = NULL;
}
@@ -1573,7 +1545,7 @@ void PromotionQueue::Initialize() {
void PromotionQueue::RelocateQueueHead() {
DCHECK(emergency_stack_ == NULL);
- Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
+ Page* p = Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
struct Entry* head_start = rear_;
struct Entry* head_end =
Min(front_, reinterpret_cast<struct Entry*>(p->area_end()));
@@ -1627,9 +1599,7 @@ void Heap::Scavenge() {
// Pause the inline allocation steps.
PauseAllocationObserversScope pause_observers(this);
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
-#endif
+ mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
gc_state_ = SCAVENGE;
@@ -1641,7 +1611,12 @@ void Heap::Scavenge() {
scavenge_collector_->SelectScavengingVisitorsTable();
- array_buffer_tracker()->PrepareDiscoveryInNewSpace();
+ if (UsingEmbedderHeapTracer()) {
+ // Register found wrappers with embedder so it can add them to its marking
+ // deque and correctly manage the case when v8 scavenger collects the
+ // wrappers by either keeping wrappables alive, or cleaning marking deque.
+ mark_compact_collector()->RegisterWrappersWithEmbedderHeapTracer();
+ }
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
@@ -1668,6 +1643,7 @@ void Heap::Scavenge() {
Address new_space_front = new_space_.ToSpaceStart();
promotion_queue_.Initialize();
+ PromotionMode promotion_mode = CurrentPromotionMode();
ScavengeVisitor scavenge_visitor(this);
if (FLAG_scavenge_reclaim_unmodified_objects) {
@@ -1684,8 +1660,21 @@ void Heap::Scavenge() {
{
// Copy objects reachable from the old generation.
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS);
- RememberedSet<OLD_TO_NEW>::IterateWithWrapper(this,
- Scavenger::ScavengeObject);
+ RememberedSet<OLD_TO_NEW>::Iterate(this, [this](Address addr) {
+ return Scavenger::CheckAndScavengeObject(this, addr);
+ });
+
+ RememberedSet<OLD_TO_NEW>::IterateTyped(
+ this, [this](SlotType type, Address host_addr, Address addr) {
+ return UpdateTypedSlotHelper::UpdateTypedSlot(
+ isolate(), type, addr, [this](Object** addr) {
+ // We expect that objects referenced by code are long living.
+ // If we do not force promotion, then we need to clear
+ // old_to_new slots in dead code objects after mark-compact.
+ return Scavenger::CheckAndScavengeObject(
+ this, reinterpret_cast<Address>(addr));
+ });
+ });
}
{
@@ -1707,7 +1696,8 @@ void Heap::Scavenge() {
{
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SEMISPACE);
- new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+ new_space_front =
+ DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
}
if (FLAG_scavenge_reclaim_unmodified_objects) {
@@ -1716,12 +1706,14 @@ void Heap::Scavenge() {
isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
&scavenge_visitor);
- new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+ new_space_front =
+ DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
} else {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OBJECT_GROUPS);
while (isolate()->global_handles()->IterateObjectGroups(
&scavenge_visitor, &IsUnscavengedHeapObject)) {
- new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+ new_space_front =
+ DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
}
isolate()->global_handles()->RemoveObjectGroups();
isolate()->global_handles()->RemoveImplicitRefGroups();
@@ -1731,7 +1723,8 @@ void Heap::Scavenge() {
isolate()->global_handles()->IterateNewSpaceWeakIndependentRoots(
&scavenge_visitor);
- new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+ new_space_front =
+ DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
}
UpdateNewSpaceReferencesInExternalStringTable(
@@ -1749,7 +1742,7 @@ void Heap::Scavenge() {
// Set age mark.
new_space_.set_age_mark(new_space_.top());
- array_buffer_tracker()->FreeDead(true);
+ ArrayBufferTracker::FreeDeadInNewSpace(this);
// Update how much has survived scavenge.
IncrementYoungSurvivorsCounter(static_cast<int>(
@@ -1778,12 +1771,6 @@ String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
void Heap::UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func) {
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- external_string_table_.Verify();
- }
-#endif
-
if (external_string_table_.new_space_strings_.is_empty()) return;
Object** start = &external_string_table_.new_space_strings_[0];
@@ -1791,7 +1778,6 @@ void Heap::UpdateNewSpaceReferencesInExternalStringTable(
Object** last = start;
for (Object** p = start; p < end; ++p) {
- DCHECK(InFromSpace(*p));
String* target = updater_func(this, p);
if (target == NULL) continue;
@@ -1920,22 +1906,29 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
external_string_table_.Iterate(&external_string_table_visitor);
}
-
Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
- Address new_space_front) {
+ Address new_space_front,
+ PromotionMode promotion_mode) {
do {
SemiSpace::AssertValidRange(new_space_front, new_space_.top());
// The addresses new_space_front and new_space_.top() define a
// queue of unprocessed copied objects. Process them until the
// queue is empty.
while (new_space_front != new_space_.top()) {
- if (!NewSpacePage::IsAtEnd(new_space_front)) {
+ if (!Page::IsAlignedToPageSize(new_space_front)) {
HeapObject* object = HeapObject::FromAddress(new_space_front);
- new_space_front +=
- StaticScavengeVisitor::IterateBody(object->map(), object);
+ if (promotion_mode == PROMOTE_MARKED) {
+ new_space_front += StaticScavengeVisitor<PROMOTE_MARKED>::IterateBody(
+ object->map(), object);
+ } else {
+ new_space_front +=
+ StaticScavengeVisitor<DEFAULT_PROMOTION>::IterateBody(
+ object->map(), object);
+ }
} else {
- new_space_front =
- NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
+ new_space_front = Page::FromAllocationAreaAddress(new_space_front)
+ ->next_page()
+ ->area_start();
}
}
@@ -2035,19 +2028,19 @@ HeapObject* Heap::DoubleAlignForDeserialization(HeapObject* object, int size) {
void Heap::RegisterNewArrayBuffer(JSArrayBuffer* buffer) {
- return array_buffer_tracker()->RegisterNew(buffer);
+ ArrayBufferTracker::RegisterNew(this, buffer);
}
void Heap::UnregisterArrayBuffer(JSArrayBuffer* buffer) {
- return array_buffer_tracker()->Unregister(buffer);
+ ArrayBufferTracker::Unregister(this, buffer);
}
void Heap::ConfigureInitialOldGenerationSize() {
if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
old_generation_allocation_limit_ =
- Max(kMinimumOldGenerationAllocationLimit,
+ Max(MinimumAllocationLimitGrowingStep(),
static_cast<intptr_t>(
static_cast<double>(old_generation_allocation_limit_) *
(tracer()->AverageSurvivalRatio() / 100)));
@@ -2142,7 +2135,8 @@ AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
DCHECK(chunk->owner()->identity() == space);
#endif
- CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
+ CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo,
+ ClearBlackArea::kNo);
return obj;
}
@@ -2174,6 +2168,21 @@ const Heap::StructTable Heap::struct_table[] = {
#undef STRUCT_TABLE_ELEMENT
};
+namespace {
+
+void FinalizePartialMap(Heap* heap, Map* map) {
+ map->set_code_cache(heap->empty_fixed_array());
+ map->set_dependent_code(DependentCode::cast(heap->empty_fixed_array()));
+ map->set_raw_transitions(Smi::FromInt(0));
+ map->set_instance_descriptors(heap->empty_descriptor_array());
+ if (FLAG_unbox_double_fields) {
+ map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
+ }
+ map->set_prototype(heap->null_value());
+ map->set_constructor_or_backpointer(heap->null_value());
+}
+
+} // namespace
bool Heap::CreateInitialMaps() {
HeapObject* obj = nullptr;
@@ -2195,8 +2204,10 @@ bool Heap::CreateInitialMaps() {
}
ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
+ fixed_array_map()->set_elements_kind(FAST_HOLEY_ELEMENTS);
ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
+ ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
#undef ALLOCATE_PARTIAL_MAP
}
@@ -2222,6 +2233,12 @@ bool Heap::CreateInitialMaps() {
set_undefined_value(Oddball::cast(obj));
Oddball::cast(obj)->set_kind(Oddball::kUndefined);
DCHECK(!InNewSpace(undefined_value()));
+ {
+ AllocationResult allocation = Allocate(the_hole_map(), OLD_SPACE);
+ if (!allocation.To(&obj)) return false;
+ }
+ set_the_hole_value(Oddball::cast(obj));
+ Oddball::cast(obj)->set_kind(Oddball::kTheHole);
// Set preliminary exception sentinel value before actually initializing it.
set_exception(null_value());
@@ -2234,55 +2251,13 @@ bool Heap::CreateInitialMaps() {
set_empty_descriptor_array(DescriptorArray::cast(obj));
// Fix the instance_descriptors for the existing maps.
- meta_map()->set_code_cache(empty_fixed_array());
- meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
- meta_map()->set_raw_transitions(Smi::FromInt(0));
- meta_map()->set_instance_descriptors(empty_descriptor_array());
- if (FLAG_unbox_double_fields) {
- meta_map()->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
- }
-
- fixed_array_map()->set_code_cache(empty_fixed_array());
- fixed_array_map()->set_dependent_code(
- DependentCode::cast(empty_fixed_array()));
- fixed_array_map()->set_raw_transitions(Smi::FromInt(0));
- fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
- if (FLAG_unbox_double_fields) {
- fixed_array_map()->set_layout_descriptor(
- LayoutDescriptor::FastPointerLayout());
- }
-
- undefined_map()->set_code_cache(empty_fixed_array());
- undefined_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
- undefined_map()->set_raw_transitions(Smi::FromInt(0));
- undefined_map()->set_instance_descriptors(empty_descriptor_array());
- if (FLAG_unbox_double_fields) {
- undefined_map()->set_layout_descriptor(
- LayoutDescriptor::FastPointerLayout());
- }
-
- null_map()->set_code_cache(empty_fixed_array());
- null_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
- null_map()->set_raw_transitions(Smi::FromInt(0));
- null_map()->set_instance_descriptors(empty_descriptor_array());
- if (FLAG_unbox_double_fields) {
- null_map()->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
- }
- null_map()->set_is_undetectable();
-
- // Fix prototype object for existing maps.
- meta_map()->set_prototype(null_value());
- meta_map()->set_constructor_or_backpointer(null_value());
-
- fixed_array_map()->set_prototype(null_value());
- fixed_array_map()->set_constructor_or_backpointer(null_value());
-
- undefined_map()->set_prototype(null_value());
- undefined_map()->set_constructor_or_backpointer(null_value());
+ FinalizePartialMap(this, meta_map());
+ FinalizePartialMap(this, fixed_array_map());
+ FinalizePartialMap(this, undefined_map());
undefined_map()->set_is_undetectable();
-
- null_map()->set_prototype(null_value());
- null_map()->set_constructor_or_backpointer(null_value());
+ FinalizePartialMap(this, null_map());
+ null_map()->set_is_undetectable();
+ FinalizePartialMap(this, the_hole_map());
{ // Map allocation
#define ALLOCATE_MAP(instance_type, size, field_name) \
@@ -2304,7 +2279,8 @@ bool Heap::CreateInitialMaps() {
}
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
- DCHECK(fixed_array_map() != fixed_cow_array_map());
+ fixed_cow_array_map()->set_elements_kind(FAST_HOLEY_ELEMENTS);
+ DCHECK_NE(fixed_array_map(), fixed_cow_array_map());
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number,
@@ -2320,7 +2296,6 @@ bool Heap::CreateInitialMaps() {
#undef ALLOCATE_SIMD128_MAP
ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
- ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
ALLOCATE_PRIMITIVE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean,
Context::BOOLEAN_FUNCTION_INDEX);
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
@@ -2329,6 +2304,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, optimized_out);
+ ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, stale_register);
for (unsigned i = 0; i < arraysize(string_type_table); i++) {
const StringTypeTable& entry = string_type_table[i];
@@ -2345,8 +2321,9 @@ bool Heap::CreateInitialMaps() {
}
{ // Create a separate external one byte string map for native sources.
- AllocationResult allocation = AllocateMap(EXTERNAL_ONE_BYTE_STRING_TYPE,
- ExternalOneByteString::kSize);
+ AllocationResult allocation =
+ AllocateMap(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE,
+ ExternalOneByteString::kShortSize);
if (!allocation.To(&obj)) return false;
Map* map = Map::cast(obj);
map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
@@ -2354,6 +2331,7 @@ bool Heap::CreateInitialMaps() {
}
ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
+ fixed_double_array_map()->set_elements_kind(FAST_HOLEY_DOUBLE_ELEMENTS);
ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
ALLOCATE_VARSIZE_MAP(BYTECODE_ARRAY_TYPE, bytecode_array)
ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
@@ -2385,6 +2363,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, ordered_hash_table)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, unseeded_number_dictionary)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
@@ -2579,12 +2558,7 @@ void Heap::CreateApiObjects() {
// appears to be no benefit for optimize this case.
new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
set_neander_map(*new_neander_map);
-
- Handle<JSObject> listeners = factory->NewNeanderObject();
- Handle<FixedArray> elements = factory->NewFixedArray(2);
- elements->set(0, Smi::FromInt(0));
- listeners->set_elements(*elements);
- set_message_listeners(*listeners);
+ set_message_listeners(*TemplateList::New(isolate(), 2));
}
@@ -2640,14 +2614,12 @@ void Heap::CreateInitialObjects() {
set_nan_value(*factory->NewHeapNumber(
std::numeric_limits<double>::quiet_NaN(), IMMUTABLE, TENURED));
+ set_hole_nan_value(*factory->NewHeapNumber(bit_cast<double>(kHoleNanInt64),
+ IMMUTABLE, TENURED));
set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
set_minus_infinity_value(
*factory->NewHeapNumber(-V8_INFINITY, IMMUTABLE, TENURED));
- // The hole has not been created yet, but we want to put something
- // predictable in the gaps in the string table, so lets make that Smi zero.
- set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
-
// Allocate initial string table.
set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
@@ -2655,56 +2627,60 @@ void Heap::CreateInitialObjects() {
// Finish initializing oddballs after creating the string table.
Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
- factory->nan_value(), false, "undefined",
- Oddball::kUndefined);
+ factory->nan_value(), "undefined", Oddball::kUndefined);
// Initialize the null_value.
Oddball::Initialize(isolate(), factory->null_value(), "null",
- handle(Smi::FromInt(0), isolate()), false, "object",
+ handle(Smi::FromInt(0), isolate()), "object",
Oddball::kNull);
+ // Initialize the_hole_value.
+ Oddball::Initialize(isolate(), factory->the_hole_value(), "hole",
+ factory->hole_nan_value(), "undefined",
+ Oddball::kTheHole);
+
// Initialize the true_value.
Oddball::Initialize(isolate(), factory->true_value(), "true",
- handle(Smi::FromInt(1), isolate()), true, "boolean",
+ handle(Smi::FromInt(1), isolate()), "boolean",
Oddball::kTrue);
// Initialize the false_value.
Oddball::Initialize(isolate(), factory->false_value(), "false",
- handle(Smi::FromInt(0), isolate()), false, "boolean",
+ handle(Smi::FromInt(0), isolate()), "boolean",
Oddball::kFalse);
- set_the_hole_value(*factory->NewOddball(
- factory->the_hole_map(), "hole", handle(Smi::FromInt(-1), isolate()),
- false, "undefined", Oddball::kTheHole));
-
set_uninitialized_value(
*factory->NewOddball(factory->uninitialized_map(), "uninitialized",
- handle(Smi::FromInt(-1), isolate()), false,
- "undefined", Oddball::kUninitialized));
+ handle(Smi::FromInt(-1), isolate()), "undefined",
+ Oddball::kUninitialized));
set_arguments_marker(
*factory->NewOddball(factory->arguments_marker_map(), "arguments_marker",
- handle(Smi::FromInt(-4), isolate()), false,
- "undefined", Oddball::kArgumentsMarker));
+ handle(Smi::FromInt(-4), isolate()), "undefined",
+ Oddball::kArgumentsMarker));
set_no_interceptor_result_sentinel(*factory->NewOddball(
factory->no_interceptor_result_sentinel_map(),
"no_interceptor_result_sentinel", handle(Smi::FromInt(-2), isolate()),
- false, "undefined", Oddball::kOther));
+ "undefined", Oddball::kOther));
set_termination_exception(*factory->NewOddball(
factory->termination_exception_map(), "termination_exception",
- handle(Smi::FromInt(-3), isolate()), false, "undefined",
- Oddball::kOther));
+ handle(Smi::FromInt(-3), isolate()), "undefined", Oddball::kOther));
set_exception(*factory->NewOddball(factory->exception_map(), "exception",
- handle(Smi::FromInt(-5), isolate()), false,
+ handle(Smi::FromInt(-5), isolate()),
"undefined", Oddball::kException));
- set_optimized_out(
- *factory->NewOddball(factory->optimized_out_map(), "optimized_out",
- handle(Smi::FromInt(-6), isolate()), false,
- "undefined", Oddball::kOptimizedOut));
+ set_optimized_out(*factory->NewOddball(factory->optimized_out_map(),
+ "optimized_out",
+ handle(Smi::FromInt(-6), isolate()),
+ "undefined", Oddball::kOptimizedOut));
+
+ set_stale_register(
+ *factory->NewOddball(factory->stale_register_map(), "stale_register",
+ handle(Smi::FromInt(-7), isolate()), "undefined",
+ Oddball::kStaleRegister));
for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
Handle<String> str =
@@ -2716,13 +2692,6 @@ void Heap::CreateInitialObjects() {
// expanding the dictionary during bootstrapping.
set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
- // Create the non_monomorphic_cache used in stub-cache.cc. The initial size
- // is set to avoid expanding the dictionary during bootstrapping.
- set_non_monomorphic_cache(*UnseededNumberDictionary::New(isolate(), 64));
-
- set_polymorphic_code_cache(PolymorphicCodeCache::cast(
- *factory->NewStruct(POLYMORPHIC_CODE_CACHE_TYPE)));
-
set_instanceof_cache_function(Smi::FromInt(0));
set_instanceof_cache_map(Smi::FromInt(0));
set_instanceof_cache_answer(Smi::FromInt(0));
@@ -2740,14 +2709,6 @@ void Heap::CreateInitialObjects() {
#undef SYMBOL_INIT
}
- // The {hidden_properties_symbol} is special because it is the only name with
- // hash code zero. This ensures that it will always be the first entry as
- // sorted by hash code in descriptor arrays. It is used to identify the hidden
- // properties in JSObjects.
- // kIsNotArrayIndexMask is a computed hash with value zero.
- Symbol::cast(roots_[khidden_properties_symbolRootIndex])
- ->set_hash_field(Name::kIsNotArrayIndexMask);
-
{
HandleScope scope(isolate());
#define SYMBOL_INIT(name, description) \
@@ -2768,8 +2729,6 @@ void Heap::CreateInitialObjects() {
#undef SYMBOL_INIT
}
- CreateFixedStubs();
-
// Allocate the dictionary of intrinsic function names.
Handle<NameDictionary> intrinsic_names =
NameDictionary::New(isolate(), Runtime::kNumFunctions, TENURED);
@@ -2812,10 +2771,6 @@ void Heap::CreateInitialObjects() {
// The symbol registry is initialized lazily.
set_symbol_registry(Smi::FromInt(0));
- // Allocate object to hold object observation state.
- set_observation_state(*factory->NewJSObjectFromMap(
- factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize)));
-
// Microtask queue uses the empty fixed array as a sentinel for "empty".
// Number of queued microtasks stored in Isolate::pending_microtask_count().
set_microtask_queue(empty_fixed_array());
@@ -2851,6 +2806,20 @@ void Heap::CreateInitialObjects() {
}
{
+ Handle<FixedArray> empty_literals_array =
+ factory->NewFixedArray(1, TENURED);
+ empty_literals_array->set(0, *factory->empty_fixed_array());
+ set_empty_literals_array(*empty_literals_array);
+ }
+
+ {
+ Handle<FixedArray> empty_sloppy_arguments_elements =
+ factory->NewFixedArray(2, TENURED);
+ empty_sloppy_arguments_elements->set_map(sloppy_arguments_elements_map());
+ set_empty_sloppy_arguments_elements(*empty_sloppy_arguments_elements);
+ }
+
+ {
Handle<WeakCell> cell = factory->NewWeakCell(factory->undefined_value());
set_empty_weak_cell(*cell);
cell->clear();
@@ -2871,6 +2840,10 @@ void Heap::CreateInitialObjects() {
*WeakHashTable::New(isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY,
TENURED));
+ set_weak_new_space_object_to_code_list(
+ ArrayList::cast(*(factory->NewFixedArray(16, TENURED))));
+ weak_new_space_object_to_code_list()->SetLength(0);
+
set_script_list(Smi::FromInt(0));
Handle<SeededNumberDictionary> slow_element_dictionary =
@@ -2882,6 +2855,7 @@ void Heap::CreateInitialObjects() {
// Handling of script id generation is in Heap::NextScriptId().
set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
+ set_next_template_serial_number(Smi::FromInt(0));
// Allocate the empty script.
Handle<Script> script = factory->NewScript(factory->empty_string());
@@ -2896,10 +2870,20 @@ void Heap::CreateInitialObjects() {
cell->set_value(the_hole_value());
set_empty_property_cell(*cell);
- Handle<PropertyCell> species_cell = factory->NewPropertyCell();
- species_cell->set_value(Smi::FromInt(Isolate::kArrayProtectorValid));
+ cell = factory->NewPropertyCell();
+ cell->set_value(Smi::FromInt(Isolate::kArrayProtectorValid));
+ set_has_instance_protector(*cell);
+
+ Handle<Cell> is_concat_spreadable_cell = factory->NewCell(
+ handle(Smi::FromInt(Isolate::kArrayProtectorValid), isolate()));
+ set_is_concat_spreadable_protector(*is_concat_spreadable_cell);
+
+ Handle<Cell> species_cell = factory->NewCell(
+ handle(Smi::FromInt(Isolate::kArrayProtectorValid), isolate()));
set_species_protector(*species_cell);
+ set_serialized_templates(empty_fixed_array());
+
set_weak_stack_trace_list(Smi::FromInt(0));
set_noscript_shared_function_infos(Smi::FromInt(0));
@@ -2917,7 +2901,6 @@ void Heap::CreateInitialObjects() {
isolate_->compilation_cache()->Clear();
}
-
bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
switch (root_index) {
case kNumberStringCacheRootIndex:
@@ -2925,8 +2908,6 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
case kInstanceofCacheMapRootIndex:
case kInstanceofCacheAnswerRootIndex:
case kCodeStubsRootIndex:
- case kNonMonomorphicCacheRootIndex:
- case kPolymorphicCodeCacheRootIndex:
case kEmptyScriptRootIndex:
case kSymbolRegistryRootIndex:
case kScriptListRootIndex:
@@ -2934,9 +2915,11 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
case kMicrotaskQueueRootIndex:
case kDetachedContextsRootIndex:
case kWeakObjectToCodeTableRootIndex:
+ case kWeakNewSpaceObjectToCodeListRootIndex:
case kRetainedMapsRootIndex:
case kNoScriptSharedFunctionInfosRootIndex:
case kWeakStackTraceListRootIndex:
+ case kSerializedTemplatesRootIndex:
// Smi values
#define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
SMI_ROOT_LIST(SMI_ENTRY)
@@ -3078,6 +3061,7 @@ AllocationResult Heap::AllocateBytecodeArray(int length,
instance->set_frame_size(frame_size);
instance->set_parameter_count(parameter_count);
instance->set_interrupt_budget(interpreter::Interpreter::InterruptBudget());
+ instance->set_osr_loop_nesting_level(0);
instance->set_constant_pool(constant_pool);
instance->set_handler_table(empty_fixed_array());
instance->set_source_position_table(empty_byte_array());
@@ -3086,8 +3070,8 @@ AllocationResult Heap::AllocateBytecodeArray(int length,
return result;
}
-void Heap::CreateFillerObjectAt(Address addr, int size,
- ClearRecordedSlots mode) {
+void Heap::CreateFillerObjectAt(Address addr, int size, ClearRecordedSlots mode,
+ ClearBlackArea black_area_mode) {
if (size == 0) return;
HeapObject* filler = HeapObject::FromAddress(addr);
if (size == kPointerSize) {
@@ -3105,6 +3089,17 @@ void Heap::CreateFillerObjectAt(Address addr, int size,
if (mode == ClearRecordedSlots::kYes) {
ClearRecordedSlotRange(addr, addr + size);
}
+
+ // If the location where the filler is created is within a black area we have
+ // to clear the mark bits of the filler space.
+ if (black_area_mode == ClearBlackArea::kYes &&
+ incremental_marking()->black_allocation() &&
+ Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(addr))) {
+ Page* page = Page::FromAddress(addr);
+ page->markbits()->ClearRange(page->AddressToMarkbitIndex(addr),
+ page->AddressToMarkbitIndex(addr + size));
+ }
+
// At this point, we may be deserializing the heap from a snapshot, and
// none of the maps have been created yet and are NULL.
DCHECK((filler->map() == NULL && !deserialization_complete_) ||
@@ -3122,14 +3117,8 @@ bool Heap::CanMoveObjectStart(HeapObject* object) {
if (lo_space()->Contains(object)) return false;
- Page* page = Page::FromAddress(address);
- // We can move the object start if:
- // (1) the object is not in old space,
- // (2) the page of the object was already swept,
- // (3) the page was already concurrently swept. This case is an optimization
- // for concurrent sweeping. The WasSwept predicate for concurrently swept
- // pages is set after sweeping all pages.
- return !InOldSpace(object) || page->SweepingDone();
+ // We can move the object start if the page was already swept.
+ return Page::FromAddress(address)->SweepingDone();
}
@@ -3142,7 +3131,7 @@ void Heap::AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode) {
lo_space()->AdjustLiveBytes(by);
} else if (!in_heap_iterator() &&
!mark_compact_collector()->sweeping_in_progress() &&
- Marking::IsBlack(Marking::MarkBitFrom(object->address()))) {
+ Marking::IsBlack(ObjectMarking::MarkBitFrom(object->address()))) {
if (mode == SEQUENTIAL_TO_SWEEPER) {
MemoryChunk::IncrementLiveBytesFromGC(object, by);
} else {
@@ -3154,6 +3143,7 @@ void Heap::AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode) {
FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
int elements_to_trim) {
+ CHECK_NOT_NULL(object);
DCHECK(!object->IsFixedTypedArrayBase());
DCHECK(!object->IsByteArray());
const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
@@ -3166,10 +3156,6 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
DCHECK(!lo_space()->Contains(object));
DCHECK(object->map() != fixed_cow_array_map());
- // Ensure that the no handle-scope has more than one pointer to the same
- // backing-store.
- SLOW_DCHECK(CountHandlesForObject(object) <= 1);
-
STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
@@ -3178,14 +3164,20 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
DCHECK(elements_to_trim <= len);
// Calculate location of new array start.
- Address new_start = object->address() + bytes_to_trim;
+ Address old_start = object->address();
+ Address new_start = old_start + bytes_to_trim;
+
+ // Transfer the mark bits to their new location if the object is not within
+ // a black area.
+ if (!incremental_marking()->black_allocation() ||
+ !Marking::IsBlack(ObjectMarking::MarkBitFrom(new_start))) {
+ IncrementalMarking::TransferMark(this, old_start, new_start);
+ }
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
- CreateFillerObjectAt(object->address(), bytes_to_trim,
- ClearRecordedSlots::kYes);
-
+ CreateFillerObjectAt(old_start, bytes_to_trim, ClearRecordedSlots::kYes);
// Initialize header of the trimmed array. Since left trimming is only
// performed on pages which are not concurrently swept creating a filler
// object does not require synchronization.
@@ -3194,13 +3186,18 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
int new_start_index = elements_to_trim * (element_size / kPointerSize);
former_start[new_start_index] = map;
former_start[new_start_index + 1] = Smi::FromInt(len - elements_to_trim);
+
FixedArrayBase* new_object =
FixedArrayBase::cast(HeapObject::FromAddress(new_start));
// Maintain consistency of live bytes during incremental marking
- Marking::TransferMark(this, object->address(), new_start);
AdjustLiveBytes(new_object, -bytes_to_trim, Heap::CONCURRENT_TO_SWEEPER);
+ // Remove recorded slots for the new map and length offset.
+ ClearRecordedSlot(new_object, HeapObject::RawField(new_object, 0));
+ ClearRecordedSlot(new_object, HeapObject::RawField(
+ new_object, FixedArrayBase::kLengthOffset));
+
// Notify the heap profiler of change in object layout.
OnMoveEvent(new_object, object, new_object->Size());
return new_object;
@@ -3372,8 +3369,8 @@ AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
result->set_map_no_write_barrier(code_map());
Code* code = Code::cast(result);
DCHECK(IsAligned(bit_cast<intptr_t>(code->address()), kCodeAlignment));
- DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
- isolate_->code_range()->contains(code->address()) ||
+ DCHECK(!memory_allocator()->code_range()->valid() ||
+ memory_allocator()->code_range()->contains(code->address()) ||
object_size <= code_space()->AreaSize());
code->set_gc_metadata(Smi::FromInt(0));
code->set_ic_age(global_ic_age_);
@@ -3398,13 +3395,15 @@ AllocationResult Heap::CopyCode(Code* code) {
// Relocate the copy.
DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
- DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
- isolate_->code_range()->contains(code->address()) ||
+ DCHECK(!memory_allocator()->code_range()->valid() ||
+ memory_allocator()->code_range()->contains(code->address()) ||
obj_size <= code_space()->AreaSize());
new_code->Relocate(new_addr - old_addr);
// We have to iterate over the object and process its pointers when black
// allocation is on.
incremental_marking()->IterateBlackObject(new_code);
+ // Record all references to embedded objects in the new code object.
+ RecordWritesIntoCode(new_code);
return new_code;
}
@@ -3425,63 +3424,11 @@ AllocationResult Heap::CopyBytecodeArray(BytecodeArray* bytecode_array) {
copy->set_handler_table(bytecode_array->handler_table());
copy->set_source_position_table(bytecode_array->source_position_table());
copy->set_interrupt_budget(bytecode_array->interrupt_budget());
+ copy->set_osr_loop_nesting_level(bytecode_array->osr_loop_nesting_level());
bytecode_array->CopyBytecodesTo(copy);
return copy;
}
-AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
- // Allocate ByteArray before the Code object, so that we do not risk
- // leaving uninitialized Code object (and breaking the heap).
- ByteArray* reloc_info_array = nullptr;
- {
- AllocationResult allocation =
- AllocateByteArray(reloc_info.length(), TENURED);
- if (!allocation.To(&reloc_info_array)) return allocation;
- }
-
- int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
-
- int new_obj_size = Code::SizeFor(new_body_size);
-
- Address old_addr = code->address();
-
- size_t relocation_offset =
- static_cast<size_t>(code->instruction_end() - old_addr);
-
- HeapObject* result = nullptr;
- AllocationResult allocation = AllocateRaw(new_obj_size, CODE_SPACE);
- if (!allocation.To(&result)) return allocation;
-
- // Copy code object.
- Address new_addr = result->address();
-
- // Copy header and instructions.
- CopyBytes(new_addr, old_addr, relocation_offset);
-
- Code* new_code = Code::cast(result);
- new_code->set_relocation_info(reloc_info_array);
-
- // Copy patched rinfo.
- CopyBytes(new_code->relocation_start(), reloc_info.start(),
- static_cast<size_t>(reloc_info.length()));
-
- // Relocate the copy.
- DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
- DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
- isolate_->code_range()->contains(code->address()) ||
- new_obj_size <= code_space()->AreaSize());
-
- new_code->Relocate(new_addr - old_addr);
- // We have to iterate over over the object and process its pointers when
- // black allocation is on.
- incremental_marking()->IterateBlackObject(new_code);
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) code->ObjectVerify();
-#endif
- return new_code;
-}
-
-
void Heap::InitializeAllocationMemento(AllocationMemento* memento,
AllocationSite* allocation_site) {
memento->set_map_no_write_barrier(allocation_memento_map());
@@ -3579,7 +3526,8 @@ AllocationResult Heap::AllocateJSObjectFromMap(
// Initialize the JSObject.
InitializeJSObjectFromMap(js_obj, properties, map);
DCHECK(js_obj->HasFastElements() || js_obj->HasFixedTypedArrayElements() ||
- js_obj->HasFastStringWrapperElements());
+ js_obj->HasFastStringWrapperElements() ||
+ js_obj->HasFastArgumentsElements());
return js_obj;
}
@@ -3605,11 +3553,13 @@ AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
// Make the clone.
Map* map = source->map();
- // We can only clone regexps, normal objects, api objects or arrays. Copying
- // anything else will break invariants.
+ // We can only clone regexps, normal objects, api objects, errors or arrays.
+ // Copying anything else will break invariants.
CHECK(map->instance_type() == JS_REGEXP_TYPE ||
map->instance_type() == JS_OBJECT_TYPE ||
+ map->instance_type() == JS_ERROR_TYPE ||
map->instance_type() == JS_ARRAY_TYPE ||
+ map->instance_type() == JS_API_OBJECT_TYPE ||
map->instance_type() == JS_SPECIAL_API_OBJECT_TYPE);
int object_size = map->instance_size();
@@ -3915,17 +3865,20 @@ AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
if (!allocation.To(&obj)) return allocation;
}
obj->set_map_no_write_barrier(map);
- if (InNewSpace(obj)) {
+
+ FixedArray* result = FixedArray::cast(obj);
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+
+ // Eliminate the write barrier if possible.
+ if (mode == SKIP_WRITE_BARRIER) {
CopyBlock(obj->address() + kPointerSize, src->address() + kPointerSize,
FixedArray::SizeFor(len) - kPointerSize);
return obj;
}
- FixedArray* result = FixedArray::cast(obj);
- result->set_length(len);
- // Copy the content.
- DisallowHeapAllocation no_gc;
- WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+ // Slow case: Just copy the content one-by-one.
+ result->set_length(len);
for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
return result;
}
@@ -4185,19 +4138,24 @@ bool Heap::HasHighFragmentation(intptr_t used, intptr_t committed) {
return committed - used > used + kSlack;
}
-void Heap::SetOptimizeForMemoryUsage() {
+bool Heap::ShouldOptimizeForMemoryUsage() {
+ return FLAG_optimize_for_size || isolate()->IsIsolateInBackground() ||
+ HighMemoryPressure() || IsLowMemoryDevice();
+}
+
+void Heap::ActivateMemoryReducerIfNeeded() {
// Activate memory reducer when switching to background if
// - there was no mark compact since the start.
// - the committed memory can be potentially reduced.
// 2 pages for the old, code, and map space + 1 page for new space.
const int kMinCommittedMemory = 7 * Page::kPageSize;
- if (ms_count_ == 0 && CommittedMemory() > kMinCommittedMemory) {
+ if (ms_count_ == 0 && CommittedMemory() > kMinCommittedMemory &&
+ isolate()->IsIsolateInBackground()) {
MemoryReducer::Event event;
event.type = MemoryReducer::kPossibleGarbage;
event.time_ms = MonotonicallyIncreasingTimeInMs();
memory_reducer_->NotifyPossibleGarbage(event);
}
- optimize_for_memory_usage_ = true;
}
void Heap::ReduceNewSpaceSize() {
@@ -4264,14 +4222,13 @@ void Heap::RegisterReservationsForBlackAllocation(Reservation* reservations) {
// Hence we have to color all objects of the reservation first black to avoid
// unnecessary marking deque load.
if (incremental_marking()->black_allocation()) {
- for (int i = CODE_SPACE; i < Serializer::kNumberOfSpaces; i++) {
+ for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
const Heap::Reservation& res = reservations[i];
for (auto& chunk : res) {
Address addr = chunk.start;
while (addr < chunk.end) {
HeapObject* obj = HeapObject::FromAddress(addr);
- Marking::MarkBlack(Marking::MarkBitFrom(obj));
- MemoryChunk::IncrementLiveBytesFromGC(obj, obj->Size());
+ Marking::MarkBlack(ObjectMarking::MarkBitFrom(obj));
addr += obj->Size();
}
}
@@ -4451,6 +4408,13 @@ class MemoryPressureInterruptTask : public CancelableTask {
};
void Heap::CheckMemoryPressure() {
+ if (HighMemoryPressure()) {
+ if (isolate()->concurrent_recompilation_enabled()) {
+ // The optimizing compiler may be unnecessarily holding on to memory.
+ DisallowHeapAllocation no_recursive_gc;
+ isolate()->optimizing_compile_dispatcher()->Flush();
+ }
+ }
if (memory_pressure_level_.Value() == MemoryPressureLevel::kCritical) {
CollectGarbageOnMemoryPressure("memory pressure");
} else if (memory_pressure_level_.Value() == MemoryPressureLevel::kModerate) {
@@ -4465,8 +4429,36 @@ void Heap::CheckMemoryPressure() {
}
void Heap::CollectGarbageOnMemoryPressure(const char* source) {
+ const int kGarbageThresholdInBytes = 8 * MB;
+ const double kGarbageThresholdAsFractionOfTotalMemory = 0.1;
+ // This constant is the maximum response time in RAIL performance model.
+ const double kMaxMemoryPressurePauseMs = 100;
+
+ double start = MonotonicallyIncreasingTimeInMs();
CollectAllGarbage(kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
- source);
+ source, kGCCallbackFlagCollectAllAvailableGarbage);
+ double end = MonotonicallyIncreasingTimeInMs();
+
+ // Estimate how much memory we can free.
+ int64_t potential_garbage =
+ (CommittedMemory() - SizeOfObjects()) + external_memory_;
+ // If we can potentially free large amount of memory, then start GC right
+ // away instead of waiting for memory reducer.
+ if (potential_garbage >= kGarbageThresholdInBytes &&
+ potential_garbage >=
+ CommittedMemory() * kGarbageThresholdAsFractionOfTotalMemory) {
+ // If we spent less than half of the time budget, then perform full GC
+ // Otherwise, start incremental marking.
+ if (end - start < kMaxMemoryPressurePauseMs / 2) {
+ CollectAllGarbage(
+ kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask, source,
+ kGCCallbackFlagCollectAllAvailableGarbage);
+ } else {
+ if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
+ StartIdleIncrementalMarking();
+ }
+ }
+ }
}
void Heap::MemoryPressureNotification(MemoryPressureLevel level,
@@ -4489,6 +4481,15 @@ void Heap::MemoryPressureNotification(MemoryPressureLevel level,
}
}
+void Heap::CollectCodeStatistics() {
+ CodeStatistics::ResetCodeAndMetadataStatistics(isolate());
+ // We do not look for code in new space, or map space. If code
+ // somehow ends up in those spaces, we would miss it here.
+ CodeStatistics::CollectCodeStatistics(code_space_, isolate());
+ CodeStatistics::CollectCodeStatistics(old_space_, isolate());
+ CodeStatistics::CollectCodeStatistics(lo_space_, isolate());
+}
+
#ifdef DEBUG
void Heap::Print() {
@@ -4503,12 +4504,8 @@ void Heap::Print() {
void Heap::ReportCodeStatistics(const char* title) {
PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
- PagedSpace::ResetCodeStatistics(isolate());
- // We do not look for code in new space, map space, or old space. If code
- // somehow ends up in those spaces, we would miss it here.
- code_space_->CollectCodeStatistics();
- lo_space_->CollectCodeStatistics();
- PagedSpace::ReportCodeStatistics(isolate());
+ CollectCodeStatistics();
+ CodeStatistics::ReportCodeStatistics(isolate());
}
@@ -4519,7 +4516,7 @@ void Heap::ReportHeapStatistics(const char* title) {
USE(title);
PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", title,
gc_count_);
- PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
+ PrintF("old_generation_allocation_limit_ %" V8PRIdPTR "\n",
old_generation_allocation_limit_);
PrintF("\n");
@@ -4528,7 +4525,7 @@ void Heap::ReportHeapStatistics(const char* title) {
PrintF("\n");
PrintF("Heap statistics : ");
- isolate_->memory_allocator()->ReportStatistics();
+ memory_allocator()->ReportStatistics();
PrintF("To space : ");
new_space_.ReportStatistics();
PrintF("Old space : ");
@@ -4545,7 +4542,7 @@ void Heap::ReportHeapStatistics(const char* title) {
#endif // DEBUG
bool Heap::Contains(HeapObject* value) {
- if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
+ if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
return false;
}
return HasBeenSetUp() &&
@@ -4555,7 +4552,7 @@ bool Heap::Contains(HeapObject* value) {
}
bool Heap::ContainsSlow(Address addr) {
- if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) {
+ if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
return false;
}
return HasBeenSetUp() &&
@@ -4565,7 +4562,7 @@ bool Heap::ContainsSlow(Address addr) {
}
bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
- if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
+ if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
return false;
}
if (!HasBeenSetUp()) return false;
@@ -4587,7 +4584,7 @@ bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
}
bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
- if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) {
+ if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
return false;
}
if (!HasBeenSetUp()) return false;
@@ -4677,10 +4674,8 @@ void Heap::Verify() {
void Heap::ZapFromSpace() {
if (!new_space_.IsFromSpaceCommitted()) return;
- NewSpacePageIterator it(new_space_.FromSpaceStart(),
- new_space_.FromSpaceEnd());
- while (it.has_next()) {
- NewSpacePage* page = it.next();
+ for (Page* page : NewSpacePageRange(new_space_.FromSpaceStart(),
+ new_space_.FromSpaceEnd())) {
for (Address cursor = page->area_start(), limit = page->area_end();
cursor < limit; cursor += kPointerSize) {
Memory::Address_at(cursor) = kFromSpaceZapValue;
@@ -4737,7 +4732,7 @@ class IteratePromotedObjectsVisitor final : public ObjectVisitor {
// promoted objects.
if (heap_->incremental_marking()->black_allocation()) {
Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
- IncrementalMarking::MarkObject(heap_, code);
+ IncrementalMarking::MarkGrey(heap_, code);
}
}
@@ -4759,7 +4754,7 @@ void Heap::IteratePromotedObject(HeapObject* target, int size,
// it would be a violation of the invariant to record it's slots.
bool record_slots = false;
if (incremental_marking()->IsCompacting()) {
- MarkBit mark_bit = Marking::MarkBitFrom(target);
+ MarkBit mark_bit = ObjectMarking::MarkBitFrom(target);
record_slots = Marking::IsBlack(mark_bit);
}
@@ -4773,7 +4768,7 @@ void Heap::IteratePromotedObject(HeapObject* target, int size,
// regular visiting and IteratePromotedObjectPointers.
if (!was_marked_black) {
if (incremental_marking()->black_allocation()) {
- IncrementalMarking::MarkObject(this, target->map());
+ IncrementalMarking::MarkGrey(this, target->map());
incremental_marking()->IterateBlackObject(target);
}
}
@@ -4804,6 +4799,49 @@ void Heap::IterateSmiRoots(ObjectVisitor* v) {
v->Synchronize(VisitorSynchronization::kSmiRootList);
}
+// We cannot avoid stale handles to left-trimmed objects, but can only make
+// sure all handles still needed are updated. Filter out a stale pointer
+// and clear the slot to allow post processing of handles (needed because
+// the sweeper might actually free the underlying page).
+class FixStaleLeftTrimmedHandlesVisitor : public ObjectVisitor {
+ public:
+ explicit FixStaleLeftTrimmedHandlesVisitor(Heap* heap) : heap_(heap) {
+ USE(heap_);
+ }
+
+ void VisitPointer(Object** p) override { FixHandle(p); }
+
+ void VisitPointers(Object** start, Object** end) override {
+ for (Object** p = start; p < end; p++) FixHandle(p);
+ }
+
+ private:
+ inline void FixHandle(Object** p) {
+ HeapObject* current = reinterpret_cast<HeapObject*>(*p);
+ if (!current->IsHeapObject()) return;
+ const MapWord map_word = current->map_word();
+ if (!map_word.IsForwardingAddress() && current->IsFiller()) {
+#ifdef DEBUG
+ // We need to find a FixedArrayBase map after walking the fillers.
+ while (current->IsFiller()) {
+ Address next = reinterpret_cast<Address>(current);
+ if (current->map() == heap_->one_pointer_filler_map()) {
+ next += kPointerSize;
+ } else if (current->map() == heap_->two_pointer_filler_map()) {
+ next += 2 * kPointerSize;
+ } else {
+ next += current->Size();
+ }
+ current = reinterpret_cast<HeapObject*>(next);
+ }
+ DCHECK(current->IsFixedArrayBase());
+#endif // DEBUG
+ *p = nullptr;
+ }
+ }
+
+ Heap* heap_;
+};
void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
@@ -4819,11 +4857,15 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
v->Synchronize(VisitorSynchronization::kTop);
Relocatable::Iterate(isolate_, v);
v->Synchronize(VisitorSynchronization::kRelocatable);
+ isolate_->debug()->Iterate(v);
+ v->Synchronize(VisitorSynchronization::kDebug);
isolate_->compilation_cache()->Iterate(v);
v->Synchronize(VisitorSynchronization::kCompilationCache);
// Iterate over local handles in handle scopes.
+ FixStaleLeftTrimmedHandlesVisitor left_trim_visitor(this);
+ isolate_->handle_scope_implementer()->Iterate(&left_trim_visitor);
isolate_->handle_scope_implementer()->Iterate(v);
isolate_->IterateDeferredHandles(v);
v->Synchronize(VisitorSynchronization::kHandleScope);
@@ -5019,7 +5061,7 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->start_marker = HeapStats::kStartMarker;
*stats->end_marker = HeapStats::kEndMarker;
*stats->new_space_size = new_space_.SizeAsInt();
- *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
+ *stats->new_space_capacity = new_space_.Capacity();
*stats->old_space_size = old_space_->SizeOfObjects();
*stats->old_space_capacity = old_space_->Capacity();
*stats->code_space_size = code_space_->SizeOfObjects();
@@ -5028,12 +5070,12 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->map_space_capacity = map_space_->Capacity();
*stats->lo_space_size = lo_space_->Size();
isolate_->global_handles()->RecordStats(stats);
- *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
+ *stats->memory_allocator_size = memory_allocator()->Size();
*stats->memory_allocator_capacity =
- isolate()->memory_allocator()->Size() +
- isolate()->memory_allocator()->Available();
+ memory_allocator()->Size() + memory_allocator()->Available();
*stats->os_error = base::OS::GetLastError();
- isolate()->memory_allocator()->Available();
+ *stats->malloced_memory = isolate_->allocator()->GetCurrentMemoryUsage();
+ *stats->malloced_peak_memory = isolate_->allocator()->GetMaxMemoryUsage();
if (take_snapshot) {
HeapIterator iterator(this);
for (HeapObject* obj = iterator.next(); obj != NULL;
@@ -5065,11 +5107,8 @@ intptr_t Heap::PromotedSpaceSizeOfObjects() {
int64_t Heap::PromotedExternalMemorySize() {
- if (amount_of_external_allocated_memory_ <=
- amount_of_external_allocated_memory_at_last_global_gc_)
- return 0;
- return amount_of_external_allocated_memory_ -
- amount_of_external_allocated_memory_at_last_global_gc_;
+ if (external_memory_ <= external_memory_at_last_mark_compact_) return 0;
+ return external_memory_ - external_memory_at_last_mark_compact_;
}
@@ -5077,6 +5116,7 @@ const double Heap::kMinHeapGrowingFactor = 1.1;
const double Heap::kMaxHeapGrowingFactor = 4.0;
const double Heap::kMaxHeapGrowingFactorMemoryConstrained = 2.0;
const double Heap::kMaxHeapGrowingFactorIdle = 1.5;
+const double Heap::kConservativeHeapGrowingFactor = 1.3;
const double Heap::kTargetMutatorUtilization = 0.97;
@@ -5142,7 +5182,7 @@ intptr_t Heap::CalculateOldGenerationAllocationLimit(double factor,
CHECK(factor > 1.0);
CHECK(old_gen_size > 0);
intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
- limit = Max(limit, old_gen_size + kMinimumOldGenerationAllocationLimit);
+ limit = Max(limit, old_gen_size + MinimumAllocationLimitGrowingStep());
limit += new_space_.Capacity();
intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
return Min(limit, halfway_to_the_max);
@@ -5152,8 +5192,6 @@ intptr_t Heap::CalculateOldGenerationAllocationLimit(double factor,
void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
double gc_speed,
double mutator_speed) {
- const double kConservativeHeapGrowingFactor = 1.3;
-
double factor = HeapGrowingFactor(gc_speed, mutator_speed);
if (FLAG_trace_gc_verbose) {
@@ -5164,14 +5202,12 @@ void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
gc_speed, mutator_speed);
}
- // We set the old generation growing factor to 2 to grow the heap slower on
- // memory-constrained devices.
- if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice ||
- FLAG_optimize_for_size) {
+ if (IsMemoryConstrainedDevice()) {
factor = Min(factor, kMaxHeapGrowingFactorMemoryConstrained);
}
- if (memory_reducer_->ShouldGrowHeapSlowly() || optimize_for_memory_usage_) {
+ if (memory_reducer_->ShouldGrowHeapSlowly() ||
+ ShouldOptimizeForMemoryUsage()) {
factor = Min(factor, kConservativeHeapGrowingFactor);
}
@@ -5187,8 +5223,8 @@ void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
CalculateOldGenerationAllocationLimit(factor, old_gen_size);
if (FLAG_trace_gc_verbose) {
- PrintIsolate(isolate_, "Grow: old size: %" V8_PTR_PREFIX
- "d KB, new limit: %" V8_PTR_PREFIX "d KB (%.1f)\n",
+ PrintIsolate(isolate_, "Grow: old size: %" V8PRIdPTR
+ " KB, new limit: %" V8PRIdPTR " KB (%.1f)\n",
old_gen_size / KB, old_generation_allocation_limit_ / KB,
factor);
}
@@ -5202,10 +5238,10 @@ void Heap::DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
intptr_t limit = CalculateOldGenerationAllocationLimit(factor, old_gen_size);
if (limit < old_generation_allocation_limit_) {
if (FLAG_trace_gc_verbose) {
- PrintIsolate(isolate_, "Dampen: old size: %" V8_PTR_PREFIX
- "d KB, old limit: %" V8_PTR_PREFIX
- "d KB, "
- "new limit: %" V8_PTR_PREFIX "d KB (%.1f)\n",
+ PrintIsolate(isolate_,
+ "Dampen: old size: %" V8PRIdPTR " KB, old limit: %" V8PRIdPTR
+ " KB, "
+ "new limit: %" V8PRIdPTR " KB (%.1f)\n",
old_gen_size / KB, old_generation_allocation_limit_ / KB,
limit / KB, factor);
}
@@ -5243,7 +5279,8 @@ V8_DECLARE_ONCE(initialize_gc_once);
static void InitializeGCOnce() {
Scavenger::Initialize();
- StaticScavengeVisitor::Initialize();
+ StaticScavengeVisitor<DEFAULT_PROMOTION>::Initialize();
+ StaticScavengeVisitor<PROMOTE_MARKED>::Initialize();
MarkCompactCollector::Initialize();
}
@@ -5268,9 +5305,14 @@ bool Heap::SetUp() {
base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
// Set up memory allocator.
- if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
+ memory_allocator_ = new MemoryAllocator(isolate_);
+ if (!memory_allocator_->SetUp(MaxReserved(), MaxExecutableSize(),
+ code_range_size_))
return false;
+ // Initialize store buffer.
+ store_buffer_ = new StoreBuffer(this);
+
// Initialize incremental marking.
incremental_marking_ = new IncrementalMarking(this);
@@ -5285,8 +5327,6 @@ bool Heap::SetUp() {
if (old_space_ == NULL) return false;
if (!old_space_->SetUp()) return false;
- if (!isolate_->code_range()->SetUp(code_range_size_)) return false;
-
// Initialize the code space, set its maximum capacity to the old
// generation size. It needs executable memory.
code_space_ = new OldSpace(this, CODE_SPACE, EXECUTABLE);
@@ -5331,13 +5371,13 @@ bool Heap::SetUp() {
memory_reducer_ = new MemoryReducer(this);
- object_stats_ = new ObjectStats(this);
- object_stats_->ClearObjectStats(true);
+ if (FLAG_track_gc_object_stats) {
+ live_object_stats_ = new ObjectStats(this);
+ dead_object_stats_ = new ObjectStats(this);
+ }
scavenge_job_ = new ScavengeJob();
- array_buffer_tracker_ = new ArrayBufferTracker(this);
-
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
LOG(isolate_, IntPtrTEvent("heap-available", Available()));
@@ -5400,18 +5440,35 @@ void Heap::NotifyDeserializationComplete() {
// All pages right after bootstrapping must be marked as never-evacuate.
PagedSpaces spaces(this);
for (PagedSpace* s = spaces.next(); s != NULL; s = spaces.next()) {
- PageIterator it(s);
- while (it.has_next()) CHECK(it.next()->NeverEvacuate());
+ for (Page* p : *s) {
+ CHECK(p->NeverEvacuate());
+ }
}
#endif // DEBUG
}
+void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
+ mark_compact_collector()->SetEmbedderHeapTracer(tracer);
+}
+
+bool Heap::UsingEmbedderHeapTracer() {
+ return mark_compact_collector()->UsingEmbedderHeapTracer();
+}
+
+void Heap::TracePossibleWrapper(JSObject* js_object) {
+ mark_compact_collector()->TracePossibleWrapper(js_object);
+}
+
void Heap::RegisterExternallyReferencedObject(Object** object) {
- DCHECK(mark_compact_collector()->in_use());
HeapObject* heap_object = HeapObject::cast(*object);
DCHECK(Contains(heap_object));
- MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
- mark_compact_collector()->MarkObject(heap_object, mark_bit);
+ if (FLAG_incremental_marking_wrappers && incremental_marking()->IsMarking()) {
+ IncrementalMarking::MarkGrey(this, heap_object);
+ } else {
+ DCHECK(mark_compact_collector()->in_use());
+ MarkBit mark_bit = ObjectMarking::MarkBitFrom(heap_object);
+ mark_compact_collector()->MarkObject(heap_object, mark_bit);
+ }
}
void Heap::TearDown() {
@@ -5430,7 +5487,7 @@ void Heap::TearDown() {
PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
- PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ", get_max_alive_after_gc());
+ PrintF("max_alive_after_gc=%" V8PRIdPTR " ", get_max_alive_after_gc());
PrintF("total_marking_time=%.1f ", tracer()->cumulative_marking_duration());
PrintF("total_sweeping_time=%.1f ",
tracer()->cumulative_sweeping_duration());
@@ -5439,17 +5496,17 @@ void Heap::TearDown() {
if (FLAG_print_max_heap_committed) {
PrintF("\n");
- PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
+ PrintF("maximum_committed_by_heap=%" V8PRIdPTR " ",
MaximumCommittedMemory());
- PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
+ PrintF("maximum_committed_by_new_space=%" V8PRIdPTR " ",
new_space_.MaximumCommittedMemory());
- PrintF("maximum_committed_by_old_space=%" V8_PTR_PREFIX "d ",
+ PrintF("maximum_committed_by_old_space=%" V8PRIdPTR " ",
old_space_->MaximumCommittedMemory());
- PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
+ PrintF("maximum_committed_by_code_space=%" V8PRIdPTR " ",
code_space_->MaximumCommittedMemory());
- PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
+ PrintF("maximum_committed_by_map_space=%" V8PRIdPTR " ",
map_space_->MaximumCommittedMemory());
- PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
+ PrintF("maximum_committed_by_lo_space=%" V8PRIdPTR " ",
lo_space_->MaximumCommittedMemory());
PrintF("\n\n");
}
@@ -5483,17 +5540,19 @@ void Heap::TearDown() {
memory_reducer_ = nullptr;
}
- delete object_stats_;
- object_stats_ = nullptr;
+ if (live_object_stats_ != nullptr) {
+ delete live_object_stats_;
+ live_object_stats_ = nullptr;
+ }
+
+ if (dead_object_stats_ != nullptr) {
+ delete dead_object_stats_;
+ dead_object_stats_ = nullptr;
+ }
delete scavenge_job_;
scavenge_job_ = nullptr;
- WaitUntilUnmappingOfFreeChunksCompleted();
-
- delete array_buffer_tracker_;
- array_buffer_tracker_ = nullptr;
-
isolate_->global_handles()->TearDown();
external_string_table_.TearDown();
@@ -5526,7 +5585,7 @@ void Heap::TearDown() {
store_buffer()->TearDown();
- isolate_->memory_allocator()->TearDown();
+ memory_allocator()->TearDown();
StrongRootsList* next = NULL;
for (StrongRootsList* list = strong_roots_list_; list; list = next) {
@@ -5534,6 +5593,12 @@ void Heap::TearDown() {
delete list;
}
strong_roots_list_ = NULL;
+
+ delete store_buffer_;
+ store_buffer_ = nullptr;
+
+ delete memory_allocator_;
+ memory_allocator_ = nullptr;
}
@@ -5578,10 +5643,16 @@ void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCCallback callback) {
UNREACHABLE();
}
-void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
- DCHECK_NOT_NULL(tracer);
- CHECK_NULL(embedder_heap_tracer_);
- embedder_heap_tracer_ = tracer;
+// TODO(ishell): Find a better place for this.
+void Heap::AddWeakNewSpaceObjectToCodeDependency(Handle<HeapObject> obj,
+ Handle<WeakCell> code) {
+ DCHECK(InNewSpace(*obj));
+ DCHECK(!InNewSpace(*code));
+ Handle<ArrayList> list(weak_new_space_object_to_code_list(), isolate());
+ list = ArrayList::Add(list, isolate()->factory()->NewWeakCell(obj), code);
+ if (*list != weak_new_space_object_to_code_list()) {
+ set_weak_new_space_object_to_code_list(*list);
+ }
}
// TODO(ishell): Find a better place for this.
@@ -5603,6 +5674,33 @@ DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<HeapObject> obj) {
return DependentCode::cast(empty_fixed_array());
}
+namespace {
+void CompactWeakFixedArray(Object* object) {
+ if (object->IsWeakFixedArray()) {
+ WeakFixedArray* array = WeakFixedArray::cast(object);
+ array->Compact<WeakFixedArray::NullCallback>();
+ }
+}
+} // anonymous namespace
+
+void Heap::CompactWeakFixedArrays() {
+ // Find known WeakFixedArrays and compact them.
+ HeapIterator iterator(this);
+ for (HeapObject* o = iterator.next(); o != NULL; o = iterator.next()) {
+ if (o->IsPrototypeInfo()) {
+ Object* prototype_users = PrototypeInfo::cast(o)->prototype_users();
+ if (prototype_users->IsWeakFixedArray()) {
+ WeakFixedArray* array = WeakFixedArray::cast(prototype_users);
+ array->Compact<JSObject::PrototypeRegistryCompactionCallback>();
+ }
+ } else if (o->IsScript()) {
+ CompactWeakFixedArray(Script::cast(o)->shared_function_infos());
+ }
+ }
+ CompactWeakFixedArray(noscript_shared_function_infos());
+ CompactWeakFixedArray(script_list());
+ CompactWeakFixedArray(weak_stack_trace_list());
+}
void Heap::AddRetainedMap(Handle<Map> map) {
Handle<WeakCell> cell = Map::WeakCellForMap(map);
@@ -5671,32 +5769,6 @@ void Heap::PrintHandles() {
#endif
-#ifdef ENABLE_SLOW_DCHECKS
-
-class CountHandleVisitor : public ObjectVisitor {
- public:
- explicit CountHandleVisitor(Object* object) : object_(object) {}
-
- void VisitPointers(Object** start, Object** end) override {
- for (Object** p = start; p < end; p++) {
- if (object_ == reinterpret_cast<Object*>(*p)) count_++;
- }
- }
-
- int count() { return count_; }
-
- private:
- Object* object_;
- int count_ = 0;
-};
-
-int Heap::CountHandlesForObject(Object* object) {
- CountHandleVisitor v(object);
- isolate_->handle_scope_implementer()->Iterate(&v);
- return v.count();
-}
-#endif
-
class CheckHandleCountVisitor : public ObjectVisitor {
public:
CheckHandleCountVisitor() : handle_count_(0) {}
@@ -5738,6 +5810,33 @@ void Heap::ClearRecordedSlotRange(Address start, Address end) {
}
}
+void Heap::RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo,
+ Object* value) {
+ DCHECK(InNewSpace(value));
+ Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
+ RelocInfo::Mode rmode = rinfo->rmode();
+ Address addr = rinfo->pc();
+ SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
+ if (rinfo->IsInConstantPool()) {
+ addr = rinfo->constant_pool_entry_address();
+ if (RelocInfo::IsCodeTarget(rmode)) {
+ slot_type = CODE_ENTRY_SLOT;
+ } else {
+ DCHECK(RelocInfo::IsEmbeddedObject(rmode));
+ slot_type = OBJECT_SLOT;
+ }
+ }
+ RememberedSet<OLD_TO_NEW>::InsertTyped(
+ source_page, reinterpret_cast<Address>(host), slot_type, addr);
+}
+
+void Heap::RecordWritesIntoCode(Code* code) {
+ for (RelocIterator it(code, RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT));
+ !it.done(); it.next()) {
+ RecordWriteIntoCode(code, it.rinfo(), it.rinfo()->target_object());
+ }
+}
+
Space* AllSpaces::next() {
switch (counter_++) {
case NEW_SPACE:
@@ -5755,7 +5854,6 @@ Space* AllSpaces::next() {
}
}
-
PagedSpace* PagedSpaces::next() {
switch (counter_++) {
case OLD_SPACE:
@@ -5861,7 +5959,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
bool SkipObject(HeapObject* object) {
if (object->IsFiller()) return true;
- MarkBit mark_bit = Marking::MarkBitFrom(object);
+ MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
return Marking::IsWhite(mark_bit);
}
@@ -5874,7 +5972,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
for (Object** p = start; p < end; p++) {
if (!(*p)->IsHeapObject()) continue;
HeapObject* obj = HeapObject::cast(*p);
- MarkBit mark_bit = Marking::MarkBitFrom(obj);
+ MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
if (Marking::IsWhite(mark_bit)) {
Marking::WhiteToBlack(mark_bit);
marking_stack_.Add(obj);
@@ -5956,14 +6054,14 @@ HeapObject* HeapIterator::NextObject() {
// No iterator means we are done.
if (object_iterator_ == nullptr) return nullptr;
- if (HeapObject* obj = object_iterator_->next_object()) {
+ if (HeapObject* obj = object_iterator_->Next()) {
// If the current iterator has more objects we are fine.
return obj;
} else {
// Go though the spaces looking for one that has objects.
while (space_iterator_->has_next()) {
object_iterator_ = space_iterator_->next();
- if (HeapObject* obj = object_iterator_->next_object()) {
+ if (HeapObject* obj = object_iterator_->Next()) {
return obj;
}
}
@@ -6253,11 +6351,11 @@ void DescriptorLookupCache::Clear() {
for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
}
-
void Heap::ExternalStringTable::CleanUp() {
int last = 0;
+ Isolate* isolate = heap_->isolate();
for (int i = 0; i < new_space_strings_.length(); ++i) {
- if (new_space_strings_[i] == heap_->the_hole_value()) {
+ if (new_space_strings_[i]->IsTheHole(isolate)) {
continue;
}
DCHECK(new_space_strings_[i]->IsExternalString());
@@ -6272,7 +6370,7 @@ void Heap::ExternalStringTable::CleanUp() {
last = 0;
for (int i = 0; i < old_space_strings_.length(); ++i) {
- if (old_space_strings_[i] == heap_->the_hole_value()) {
+ if (old_space_strings_[i]->IsTheHole(isolate)) {
continue;
}
DCHECK(old_space_strings_[i]->IsExternalString());
@@ -6288,7 +6386,6 @@ void Heap::ExternalStringTable::CleanUp() {
#endif
}
-
void Heap::ExternalStringTable::TearDown() {
for (int i = 0; i < new_space_strings_.length(); ++i) {
heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
@@ -6301,75 +6398,6 @@ void Heap::ExternalStringTable::TearDown() {
}
-class Heap::UnmapFreeMemoryTask : public v8::Task {
- public:
- UnmapFreeMemoryTask(Heap* heap, MemoryChunk* head)
- : heap_(heap), head_(head) {}
- virtual ~UnmapFreeMemoryTask() {}
-
- private:
- // v8::Task overrides.
- void Run() override {
- heap_->FreeQueuedChunks(head_);
- heap_->pending_unmapping_tasks_semaphore_.Signal();
- }
-
- Heap* heap_;
- MemoryChunk* head_;
-
- DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
-};
-
-
-void Heap::WaitUntilUnmappingOfFreeChunksCompleted() {
- while (concurrent_unmapping_tasks_active_ > 0) {
- pending_unmapping_tasks_semaphore_.Wait();
- concurrent_unmapping_tasks_active_--;
- }
-}
-
-
-void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
- // PreFree logically frees the memory chunk. However, the actual freeing
- // will happen on a separate thread sometime later.
- isolate_->memory_allocator()->PreFreeMemory(chunk);
-
- // The chunks added to this queue will be freed by a concurrent thread.
- chunk->set_next_chunk(chunks_queued_for_free_);
- chunks_queued_for_free_ = chunk;
-}
-
-
-void Heap::FreeQueuedChunks() {
- if (chunks_queued_for_free_ != NULL) {
- if (FLAG_concurrent_sweeping) {
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new UnmapFreeMemoryTask(this, chunks_queued_for_free_),
- v8::Platform::kShortRunningTask);
- } else {
- FreeQueuedChunks(chunks_queued_for_free_);
- pending_unmapping_tasks_semaphore_.Signal();
- }
- chunks_queued_for_free_ = NULL;
- } else {
- // If we do not have anything to unmap, we just signal the semaphore
- // that we are done.
- pending_unmapping_tasks_semaphore_.Signal();
- }
- concurrent_unmapping_tasks_active_++;
-}
-
-
-void Heap::FreeQueuedChunks(MemoryChunk* list_head) {
- MemoryChunk* next;
- MemoryChunk* chunk;
- for (chunk = list_head; chunk != NULL; chunk = next) {
- next = chunk->next_chunk();
- isolate_->memory_allocator()->PerformFreeMemory(chunk);
- }
-}
-
-
void Heap::RememberUnmappedPage(Address page, bool compacted) {
uintptr_t p = reinterpret_cast<uintptr_t>(page);
// Tag the page pointer to make it findable in the dump file.
@@ -6420,14 +6448,16 @@ size_t Heap::NumberOfTrackedHeapObjectTypes() {
size_t Heap::ObjectCountAtLastGC(size_t index) {
- if (index >= ObjectStats::OBJECT_STATS_COUNT) return 0;
- return object_stats_->object_count_last_gc(index);
+ if (live_object_stats_ == nullptr || index >= ObjectStats::OBJECT_STATS_COUNT)
+ return 0;
+ return live_object_stats_->object_count_last_gc(index);
}
size_t Heap::ObjectSizeAtLastGC(size_t index) {
- if (index >= ObjectStats::OBJECT_STATS_COUNT) return 0;
- return object_stats_->object_size_last_gc(index);
+ if (live_object_stats_ == nullptr || index >= ObjectStats::OBJECT_STATS_COUNT)
+ return 0;
+ return live_object_stats_->object_size_last_gc(index);
}
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index bbe1f05e4f..b9b058c1cd 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -13,12 +13,11 @@
#include "include/v8.h"
#include "src/allocation.h"
#include "src/assert-scope.h"
-#include "src/atomic-utils.h"
+#include "src/base/atomic-utils.h"
#include "src/globals.h"
#include "src/heap-symbols.h"
-// TODO(mstarzinger): Two more includes to kill!
+// TODO(mstarzinger): One more include to kill!
#include "src/heap/spaces.h"
-#include "src/heap/store-buffer.h"
#include "src/list.h"
namespace v8 {
@@ -28,67 +27,73 @@ using v8::MemoryPressureLevel;
// Defines all the roots in Heap.
#define STRONG_ROOT_LIST(V) \
- V(Map, byte_array_map, ByteArrayMap) \
+ /* Cluster the most popular ones in a few cache lines here at the top. */ \
+ /* The first 32 entries are most often used in the startup snapshot and */ \
+ /* can use a shorter representation in the serialization format. */ \
V(Map, free_space_map, FreeSpaceMap) \
V(Map, one_pointer_filler_map, OnePointerFillerMap) \
V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
- /* Cluster the most popular ones in a few cache lines here at the top. */ \
V(Oddball, uninitialized_value, UninitializedValue) \
V(Oddball, undefined_value, UndefinedValue) \
- V(Map, cell_map, CellMap) \
+ V(Oddball, the_hole_value, TheHoleValue) \
V(Oddball, null_value, NullValue) \
V(Oddball, true_value, TrueValue) \
V(Oddball, false_value, FalseValue) \
V(String, empty_string, empty_string) \
- V(Oddball, the_hole_value, TheHoleValue) \
- V(Map, global_property_cell_map, GlobalPropertyCellMap) \
- V(Map, shared_function_info_map, SharedFunctionInfoMap) \
V(Map, meta_map, MetaMap) \
- V(Map, heap_number_map, HeapNumberMap) \
- V(Map, mutable_heap_number_map, MutableHeapNumberMap) \
- V(Map, float32x4_map, Float32x4Map) \
- V(Map, int32x4_map, Int32x4Map) \
- V(Map, uint32x4_map, Uint32x4Map) \
- V(Map, bool32x4_map, Bool32x4Map) \
- V(Map, int16x8_map, Int16x8Map) \
- V(Map, uint16x8_map, Uint16x8Map) \
- V(Map, bool16x8_map, Bool16x8Map) \
- V(Map, int8x16_map, Int8x16Map) \
- V(Map, uint8x16_map, Uint8x16Map) \
- V(Map, bool8x16_map, Bool8x16Map) \
- V(Map, native_context_map, NativeContextMap) \
+ V(Map, byte_array_map, ByteArrayMap) \
V(Map, fixed_array_map, FixedArrayMap) \
- V(Map, code_map, CodeMap) \
- V(Map, scope_info_map, ScopeInfoMap) \
V(Map, fixed_cow_array_map, FixedCOWArrayMap) \
- V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
- V(Map, weak_cell_map, WeakCellMap) \
- V(Map, transition_array_map, TransitionArrayMap) \
+ V(Map, hash_table_map, HashTableMap) \
+ V(Map, symbol_map, SymbolMap) \
V(Map, one_byte_string_map, OneByteStringMap) \
V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap) \
+ V(Map, scope_info_map, ScopeInfoMap) \
+ V(Map, shared_function_info_map, SharedFunctionInfoMap) \
+ V(Map, code_map, CodeMap) \
V(Map, function_context_map, FunctionContextMap) \
+ V(Map, cell_map, CellMap) \
+ V(Map, weak_cell_map, WeakCellMap) \
+ V(Map, global_property_cell_map, GlobalPropertyCellMap) \
+ V(Map, foreign_map, ForeignMap) \
+ V(Map, heap_number_map, HeapNumberMap) \
+ V(Map, transition_array_map, TransitionArrayMap) \
+ V(FixedArray, empty_literals_array, EmptyLiteralsArray) \
V(FixedArray, empty_fixed_array, EmptyFixedArray) \
- V(ByteArray, empty_byte_array, EmptyByteArray) \
+ V(FixedArray, cleared_optimized_code_map, ClearedOptimizedCodeMap) \
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
+ /* Entries beyond the first 32 */ \
/* The roots above this line should be boring from a GC point of view. */ \
/* This means they are never in new space and never on a page that is */ \
/* being compacted. */ \
+ /* Oddballs */ \
V(Oddball, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
V(Oddball, arguments_marker, ArgumentsMarker) \
V(Oddball, exception, Exception) \
V(Oddball, termination_exception, TerminationException) \
V(Oddball, optimized_out, OptimizedOut) \
- V(FixedArray, number_string_cache, NumberStringCache) \
- V(Object, instanceof_cache_function, InstanceofCacheFunction) \
- V(Object, instanceof_cache_map, InstanceofCacheMap) \
- V(Object, instanceof_cache_answer, InstanceofCacheAnswer) \
- V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
- V(FixedArray, string_split_cache, StringSplitCache) \
- V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \
- V(Smi, hash_seed, HashSeed) \
- V(Map, hash_table_map, HashTableMap) \
+ V(Oddball, stale_register, StaleRegister) \
+ /* Context maps */ \
+ V(Map, native_context_map, NativeContextMap) \
+ V(Map, module_context_map, ModuleContextMap) \
+ V(Map, script_context_map, ScriptContextMap) \
+ V(Map, block_context_map, BlockContextMap) \
+ V(Map, catch_context_map, CatchContextMap) \
+ V(Map, with_context_map, WithContextMap) \
+ V(Map, debug_evaluate_context_map, DebugEvaluateContextMap) \
+ V(Map, script_context_table_map, ScriptContextTableMap) \
+ /* Maps */ \
+ V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
+ V(Map, mutable_heap_number_map, MutableHeapNumberMap) \
V(Map, ordered_hash_table_map, OrderedHashTableMap) \
- V(Map, symbol_map, SymbolMap) \
+ V(Map, unseeded_number_dictionary_map, UnseededNumberDictionaryMap) \
+ V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \
+ V(Map, message_object_map, JSMessageObjectMap) \
+ V(Map, neander_map, NeanderMap) \
+ V(Map, external_map, ExternalMap) \
+ V(Map, bytecode_array_map, BytecodeArrayMap) \
+ /* String maps */ \
+ V(Map, native_source_string_map, NativeSourceStringMap) \
V(Map, string_map, StringMap) \
V(Map, cons_one_byte_string_map, ConsOneByteStringMap) \
V(Map, cons_string_map, ConsStringMap) \
@@ -98,7 +103,6 @@ using v8::MemoryPressureLevel;
V(Map, external_string_with_one_byte_data_map, \
ExternalStringWithOneByteDataMap) \
V(Map, external_one_byte_string_map, ExternalOneByteStringMap) \
- V(Map, native_source_string_map, NativeSourceStringMap) \
V(Map, short_external_string_map, ShortExternalStringMap) \
V(Map, short_external_string_with_one_byte_data_map, \
ShortExternalStringWithOneByteDataMap) \
@@ -115,6 +119,7 @@ using v8::MemoryPressureLevel;
V(Map, short_external_one_byte_internalized_string_map, \
ShortExternalOneByteInternalizedStringMap) \
V(Map, short_external_one_byte_string_map, ShortExternalOneByteStringMap) \
+ /* Array element maps */ \
V(Map, fixed_uint8_array_map, FixedUint8ArrayMap) \
V(Map, fixed_int8_array_map, FixedInt8ArrayMap) \
V(Map, fixed_uint16_array_map, FixedUint16ArrayMap) \
@@ -124,6 +129,18 @@ using v8::MemoryPressureLevel;
V(Map, fixed_float32_array_map, FixedFloat32ArrayMap) \
V(Map, fixed_float64_array_map, FixedFloat64ArrayMap) \
V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap) \
+ V(Map, float32x4_map, Float32x4Map) \
+ V(Map, int32x4_map, Int32x4Map) \
+ V(Map, uint32x4_map, Uint32x4Map) \
+ V(Map, bool32x4_map, Bool32x4Map) \
+ V(Map, int16x8_map, Int16x8Map) \
+ V(Map, uint16x8_map, Uint16x8Map) \
+ V(Map, bool16x8_map, Bool16x8Map) \
+ V(Map, int8x16_map, Int8x16Map) \
+ V(Map, uint8x16_map, Uint8x16Map) \
+ V(Map, bool8x16_map, Bool8x16Map) \
+ /* Canonical empty values */ \
+ V(ByteArray, empty_byte_array, EmptyByteArray) \
V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array) \
V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array) \
V(FixedTypedArrayBase, empty_fixed_uint16_array, EmptyFixedUint16Array) \
@@ -134,78 +151,89 @@ using v8::MemoryPressureLevel;
V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array) \
V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array, \
EmptyFixedUint8ClampedArray) \
- V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \
- V(Map, catch_context_map, CatchContextMap) \
- V(Map, with_context_map, WithContextMap) \
- V(Map, debug_evaluate_context_map, DebugEvaluateContextMap) \
- V(Map, block_context_map, BlockContextMap) \
- V(Map, module_context_map, ModuleContextMap) \
- V(Map, script_context_map, ScriptContextMap) \
- V(Map, script_context_table_map, ScriptContextTableMap) \
- V(Map, undefined_map, UndefinedMap) \
- V(Map, the_hole_map, TheHoleMap) \
- V(Map, null_map, NullMap) \
- V(Map, boolean_map, BooleanMap) \
- V(Map, uninitialized_map, UninitializedMap) \
- V(Map, arguments_marker_map, ArgumentsMarkerMap) \
- V(Map, no_interceptor_result_sentinel_map, NoInterceptorResultSentinelMap) \
- V(Map, exception_map, ExceptionMap) \
- V(Map, termination_exception_map, TerminationExceptionMap) \
- V(Map, optimized_out_map, OptimizedOutMap) \
- V(Map, message_object_map, JSMessageObjectMap) \
- V(Map, foreign_map, ForeignMap) \
- V(Map, neander_map, NeanderMap) \
- V(Map, external_map, ExternalMap) \
+ V(Script, empty_script, EmptyScript) \
+ V(Cell, undefined_cell, UndefinedCell) \
+ V(FixedArray, empty_sloppy_arguments_elements, EmptySloppyArgumentsElements) \
+ V(SeededNumberDictionary, empty_slow_element_dictionary, \
+ EmptySlowElementDictionary) \
+ V(TypeFeedbackVector, dummy_vector, DummyVector) \
+ V(PropertyCell, empty_property_cell, EmptyPropertyCell) \
+ V(WeakCell, empty_weak_cell, EmptyWeakCell) \
+ /* Protectors */ \
+ V(PropertyCell, array_protector, ArrayProtector) \
+ V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector) \
+ V(PropertyCell, has_instance_protector, HasInstanceProtector) \
+ V(Cell, species_protector, SpeciesProtector) \
+ /* Special numbers */ \
V(HeapNumber, nan_value, NanValue) \
+ V(HeapNumber, hole_nan_value, HoleNanValue) \
V(HeapNumber, infinity_value, InfinityValue) \
V(HeapNumber, minus_zero_value, MinusZeroValue) \
V(HeapNumber, minus_infinity_value, MinusInfinityValue) \
- V(JSObject, message_listeners, MessageListeners) \
- V(UnseededNumberDictionary, code_stubs, CodeStubs) \
- V(UnseededNumberDictionary, non_monomorphic_cache, NonMonomorphicCache) \
- V(PolymorphicCodeCache, polymorphic_code_cache, PolymorphicCodeCache) \
- V(Code, js_entry_code, JsEntryCode) \
- V(Code, js_construct_entry_code, JsConstructEntryCode) \
+ /* Caches */ \
+ V(FixedArray, number_string_cache, NumberStringCache) \
+ V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
+ V(FixedArray, string_split_cache, StringSplitCache) \
+ V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \
+ V(Object, instanceof_cache_function, InstanceofCacheFunction) \
+ V(Object, instanceof_cache_map, InstanceofCacheMap) \
+ V(Object, instanceof_cache_answer, InstanceofCacheAnswer) \
V(FixedArray, natives_source_cache, NativesSourceCache) \
V(FixedArray, experimental_natives_source_cache, \
ExperimentalNativesSourceCache) \
V(FixedArray, extra_natives_source_cache, ExtraNativesSourceCache) \
V(FixedArray, experimental_extra_natives_source_cache, \
ExperimentalExtraNativesSourceCache) \
- V(Script, empty_script, EmptyScript) \
+ /* Lists and dictionaries */ \
V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
V(NameDictionary, empty_properties_dictionary, EmptyPropertiesDictionary) \
- V(Cell, undefined_cell, UndefinedCell) \
- V(JSObject, observation_state, ObservationState) \
V(Object, symbol_registry, SymbolRegistry) \
V(Object, script_list, ScriptList) \
- V(SeededNumberDictionary, empty_slow_element_dictionary, \
- EmptySlowElementDictionary) \
+ V(UnseededNumberDictionary, code_stubs, CodeStubs) \
V(FixedArray, materialized_objects, MaterializedObjects) \
V(FixedArray, microtask_queue, MicrotaskQueue) \
- V(TypeFeedbackVector, dummy_vector, DummyVector) \
- V(FixedArray, cleared_optimized_code_map, ClearedOptimizedCodeMap) \
V(FixedArray, detached_contexts, DetachedContexts) \
V(ArrayList, retained_maps, RetainedMaps) \
V(WeakHashTable, weak_object_to_code_table, WeakObjectToCodeTable) \
- V(PropertyCell, array_protector, ArrayProtector) \
- V(PropertyCell, empty_property_cell, EmptyPropertyCell) \
+ /* weak_new_space_object_to_code_list is an array of weak cells, where */ \
+ /* slots with even indices refer to the weak object, and the subsequent */ \
+ /* slots refer to the code with the reference to the weak object. */ \
+ V(ArrayList, weak_new_space_object_to_code_list, \
+ WeakNewSpaceObjectToCodeList) \
V(Object, weak_stack_trace_list, WeakStackTraceList) \
V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos) \
- V(Map, bytecode_array_map, BytecodeArrayMap) \
- V(WeakCell, empty_weak_cell, EmptyWeakCell) \
- V(PropertyCell, species_protector, SpeciesProtector)
+ V(FixedArray, serialized_templates, SerializedTemplates) \
+ /* Configured values */ \
+ V(TemplateList, message_listeners, MessageListeners) \
+ V(Code, js_entry_code, JsEntryCode) \
+ V(Code, js_construct_entry_code, JsConstructEntryCode) \
+ /* Oddball maps */ \
+ V(Map, undefined_map, UndefinedMap) \
+ V(Map, the_hole_map, TheHoleMap) \
+ V(Map, null_map, NullMap) \
+ V(Map, boolean_map, BooleanMap) \
+ V(Map, uninitialized_map, UninitializedMap) \
+ V(Map, arguments_marker_map, ArgumentsMarkerMap) \
+ V(Map, no_interceptor_result_sentinel_map, NoInterceptorResultSentinelMap) \
+ V(Map, exception_map, ExceptionMap) \
+ V(Map, termination_exception_map, TerminationExceptionMap) \
+ V(Map, optimized_out_map, OptimizedOutMap) \
+ V(Map, stale_register_map, StaleRegisterMap)
// Entries in this list are limited to Smis and are not visited during GC.
-#define SMI_ROOT_LIST(V) \
- V(Smi, stack_limit, StackLimit) \
- V(Smi, real_stack_limit, RealStackLimit) \
- V(Smi, last_script_id, LastScriptId) \
- V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
- V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \
- V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
- V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset)
-
+#define SMI_ROOT_LIST(V) \
+ V(Smi, stack_limit, StackLimit) \
+ V(Smi, real_stack_limit, RealStackLimit) \
+ V(Smi, last_script_id, LastScriptId) \
+ V(Smi, hash_seed, HashSeed) \
+ /* To distinguish the function templates, so that we can find them in the */ \
+ /* function cache of the native context. */ \
+ V(Smi, next_template_serial_number, NextTemplateSerialNumber) \
+ V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
+ V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \
+ V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
+ V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) \
+ V(Smi, interpreter_entry_return_pc_offset, InterpreterEntryReturnPCOffset)
#define ROOT_LIST(V) \
STRONG_ROOT_LIST(V) \
@@ -298,8 +326,11 @@ class MemoryReducer;
class ObjectStats;
class Scavenger;
class ScavengeJob;
+class StoreBuffer;
class WeakObjectRetainer;
+enum PromotionMode { PROMOTE_MARKED, DEFAULT_PROMOTION };
+
typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
// A queue of objects promoted during scavenge. Each object is accompanied
@@ -325,7 +356,7 @@ class PromotionQueue {
}
Page* GetHeadPage() {
- return Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
+ return Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
}
void SetNewLimit(Address limit) {
@@ -333,7 +364,7 @@ class PromotionQueue {
if (emergency_stack_) return;
// If the limit is not on the same page, we can ignore it.
- if (Page::FromAllocationTop(limit) != GetHeadPage()) return;
+ if (Page::FromAllocationAreaAddress(limit) != GetHeadPage()) return;
limit_ = reinterpret_cast<struct Entry*>(limit);
@@ -419,6 +450,8 @@ enum ArrayStorageAllocationMode {
enum class ClearRecordedSlots { kYes, kNo };
+enum class ClearBlackArea { kYes, kNo };
+
class Heap {
public:
// Declare all the root indices. This defines the root list order.
@@ -489,9 +522,6 @@ class Heap {
};
typedef List<Chunk> Reservation;
- static const intptr_t kMinimumOldGenerationAllocationLimit =
- 8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
-
static const int kInitalOldGenerationLimitFactor = 2;
#if V8_OS_ANDROID
@@ -533,6 +563,7 @@ class Heap {
static const double kMaxHeapGrowingFactor;
static const double kMaxHeapGrowingFactorMemoryConstrained;
static const double kMaxHeapGrowingFactorIdle;
+ static const double kConservativeHeapGrowingFactor;
static const double kTargetMutatorUtilization;
static const int kNoGCFlags = 0;
@@ -626,11 +657,9 @@ class Heap {
return old_space_->allocation_limit_address();
}
- // TODO(hpayer): There is still a missmatch between capacity and actual
- // committed memory size.
- bool CanExpandOldGeneration(int size = 0) {
+ bool CanExpandOldGeneration(int size) {
if (force_oom_) return false;
- return (CommittedOldGenerationMemory() + size) < MaxOldGenerationSize();
+ return (OldGenerationCapacity() + size) < MaxOldGenerationSize();
}
// Clear the Instanceof cache (used when a prototype changes).
@@ -646,8 +675,12 @@ class Heap {
// Initialize a filler object to keep the ability to iterate over the heap
// when introducing gaps within pages. If slots could have been recorded in
// the freed area, then pass ClearRecordedSlots::kYes as the mode. Otherwise,
- // pass ClearRecordedSlots::kNo.
- void CreateFillerObjectAt(Address addr, int size, ClearRecordedSlots mode);
+ // pass ClearRecordedSlots::kNo. If the filler was created in a black area
+ // we may want to clear the corresponding mark bits with ClearBlackArea::kYes,
+ // which is the default. ClearBlackArea::kNo does not clear the mark bits.
+ void CreateFillerObjectAt(
+ Address addr, int size, ClearRecordedSlots mode,
+ ClearBlackArea black_area_mode = ClearBlackArea::kYes);
bool CanMoveObjectStart(HeapObject* object);
@@ -728,7 +761,15 @@ class Heap {
inline AllocationMemento* FindAllocationMemento(HeapObject* object);
// Returns false if not able to reserve.
- bool ReserveSpace(Reservation* reservations);
+ bool ReserveSpace(Reservation* reservations, List<Address>* maps);
+
+ void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
+
+ bool UsingEmbedderHeapTracer();
+
+ void TracePossibleWrapper(JSObject* js_object);
+
+ void RegisterExternallyReferencedObject(Object** object);
//
// Support for the API.
@@ -767,19 +808,17 @@ class Heap {
// An object should be promoted if the object has survived a
// scavenge operation.
+ template <PromotionMode promotion_mode>
inline bool ShouldBePromoted(Address old_address, int object_size);
+ inline PromotionMode CurrentPromotionMode();
+
void ClearNormalizedMapCaches();
void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
inline bool OldGenerationAllocationLimitReached();
- void QueueMemoryChunkForFree(MemoryChunk* chunk);
- void FreeQueuedChunks(MemoryChunk* list_head);
- void FreeQueuedChunks();
- void WaitUntilUnmappingOfFreeChunksCompleted();
-
// Completely clear the Instanceof cache (to stop it keeping objects alive
// around a GC).
inline void CompletelyClearInstanceofCache();
@@ -792,6 +831,10 @@ class Heap {
inline void SetConstructStubDeoptPCOffset(int pc_offset);
inline void SetGetterStubDeoptPCOffset(int pc_offset);
inline void SetSetterStubDeoptPCOffset(int pc_offset);
+ inline void SetInterpreterEntryReturnPCOffset(int pc_offset);
+ inline int GetNextTemplateSerialNumber();
+
+ inline void SetSerializedTemplates(FixedArray* templates);
// For post mortem debugging.
void RememberUnmappedPage(Address page, bool compacted);
@@ -804,12 +847,16 @@ class Heap {
global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
}
- int64_t amount_of_external_allocated_memory() {
- return amount_of_external_allocated_memory_;
+ int64_t external_memory() { return external_memory_; }
+ void update_external_memory(int64_t delta) { external_memory_ += delta; }
+
+ void update_external_memory_concurrently_freed(intptr_t freed) {
+ external_memory_concurrently_freed_.Increment(freed);
}
- void update_amount_of_external_allocated_memory(int64_t delta) {
- amount_of_external_allocated_memory_ += delta;
+ void account_external_memory_concurrently_freed() {
+ external_memory_ -= external_memory_concurrently_freed_.Value();
+ external_memory_concurrently_freed_.SetValue(0);
}
void DeoptMarkedAllocationSites();
@@ -818,11 +865,16 @@ class Heap {
return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
}
+ void AddWeakNewSpaceObjectToCodeDependency(Handle<HeapObject> obj,
+ Handle<WeakCell> code);
+
void AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
Handle<DependentCode> dep);
DependentCode* LookupWeakObjectToCodeDependency(Handle<HeapObject> obj);
+ void CompactWeakFixedArrays();
+
void AddRetainedMap(Handle<Map> map);
// This event is triggered after successful allocation of a new object made
@@ -841,11 +893,18 @@ class Heap {
bool HasHighFragmentation();
bool HasHighFragmentation(intptr_t used, intptr_t committed);
- void SetOptimizeForLatency() { optimize_for_memory_usage_ = false; }
- void SetOptimizeForMemoryUsage();
- bool ShouldOptimizeForMemoryUsage() {
- return optimize_for_memory_usage_ || HighMemoryPressure();
+ void ActivateMemoryReducerIfNeeded();
+
+ bool ShouldOptimizeForMemoryUsage();
+
+ bool IsLowMemoryDevice() {
+ return max_old_generation_size_ <= kMaxOldSpaceSizeLowMemoryDevice;
}
+
+ bool IsMemoryConstrainedDevice() {
+ return max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice;
+ }
+
bool HighMemoryPressure() {
return memory_pressure_level_.Value() != MemoryPressureLevel::kNone;
}
@@ -916,20 +975,12 @@ class Heap {
const char* GetSpaceName(int idx);
// ===========================================================================
- // API. ======================================================================
- // ===========================================================================
-
- void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
-
- void RegisterExternallyReferencedObject(Object** object);
-
- // ===========================================================================
// Getters to other components. ==============================================
// ===========================================================================
GCTracer* tracer() { return tracer_; }
- EmbedderHeapTracer* embedder_heap_tracer() { return embedder_heap_tracer_; }
+ MemoryAllocator* memory_allocator() { return memory_allocator_; }
PromotionQueue* promotion_queue() { return &promotion_queue_; }
@@ -979,11 +1030,6 @@ class Heap {
roots_[kCodeStubsRootIndex] = value;
}
- // Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
- void SetRootNonMonomorphicCache(UnseededNumberDictionary* value) {
- roots_[kNonMonomorphicCacheRootIndex] = value;
- }
-
void SetRootMaterializedObjects(FixedArray* objects) {
roots_[kMaterializedObjectsRootIndex] = objects;
}
@@ -1000,6 +1046,10 @@ class Heap {
roots_[kNoScriptSharedFunctionInfosRootIndex] = value;
}
+ void SetMessageListeners(TemplateList* value) {
+ roots_[kMessageListenersRootIndex] = value;
+ }
+
// Set the stack limit in the roots_ array. Some architectures generate
// code that looks here, because it is faster than loading from the static
// jslimit_/real_jslimit_ variable in the StackGuard.
@@ -1089,8 +1139,13 @@ class Heap {
// Write barrier support for object[offset] = o;
inline void RecordWrite(Object* object, int offset, Object* o);
+ inline void RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* target);
+ void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* target);
+ void RecordWritesIntoCode(Code* code);
+ inline void RecordFixedArrayElements(FixedArray* array, int offset,
+ int length);
- Address* store_buffer_top_address() { return store_buffer()->top_address(); }
+ inline Address* store_buffer_top_address();
void ClearRecordedSlot(HeapObject* object, Object** slot);
void ClearRecordedSlotRange(Address start, Address end);
@@ -1176,6 +1231,13 @@ class Heap {
const char** object_sub_type);
// ===========================================================================
+ // Code statistics. ==========================================================
+ // ===========================================================================
+
+ // Collect code (Code and BytecodeArray objects) statistics.
+ void CollectCodeStatistics();
+
+ // ===========================================================================
// GC statistics. ============================================================
// ===========================================================================
@@ -1192,6 +1254,9 @@ class Heap {
// more spaces are needed until it reaches the limit.
intptr_t Capacity();
+ // Returns the capacity of the old generation.
+ intptr_t OldGenerationCapacity();
+
// Returns the amount of memory currently committed for the heap.
intptr_t CommittedMemory();
@@ -1347,10 +1412,6 @@ class Heap {
void RegisterNewArrayBuffer(JSArrayBuffer* buffer);
void UnregisterArrayBuffer(JSArrayBuffer* buffer);
- inline ArrayBufferTracker* array_buffer_tracker() {
- return array_buffer_tracker_;
- }
-
// ===========================================================================
// Allocation site tracking. =================================================
// ===========================================================================
@@ -1362,7 +1423,7 @@ class Heap {
// value) is cached on the local pretenuring feedback.
template <UpdateAllocationSiteMode mode>
inline void UpdateAllocationSite(HeapObject* object,
- HashMap* pretenuring_feedback);
+ base::HashMap* pretenuring_feedback);
// Removes an entry from the global pretenuring storage.
inline void RemoveAllocationSitePretenuringFeedback(AllocationSite* site);
@@ -1371,7 +1432,7 @@ class Heap {
// method needs to be called after evacuation, as allocation sites may be
// evacuated and this method resolves forward pointers accordingly.
void MergeAllocationSitePretenuringFeedback(
- const HashMap& local_pretenuring_feedback);
+ const base::HashMap& local_pretenuring_feedback);
// =============================================================================
@@ -1394,13 +1455,9 @@ class Heap {
void ReportHeapStatistics(const char* title);
void ReportCodeStatistics(const char* title);
#endif
-#ifdef ENABLE_SLOW_DCHECKS
- int CountHandlesForObject(Object* object);
-#endif
private:
class PretenuringScope;
- class UnmapFreeMemoryTask;
// External strings table is a place where all external strings are
// registered. We need to keep track of such strings to properly
@@ -1511,7 +1568,7 @@ class Heap {
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
- StoreBuffer* store_buffer() { return &store_buffer_; }
+ StoreBuffer* store_buffer() { return store_buffer_; }
void set_current_gc_flags(int flags) {
current_gc_flags_ = flags;
@@ -1692,7 +1749,8 @@ class Heap {
// Performs a minor collection in new generation.
void Scavenge();
- Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
+ Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front,
+ PromotionMode promotion_mode);
void UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func);
@@ -1749,6 +1807,15 @@ class Heap {
void SetOldGenerationAllocationLimit(intptr_t old_gen_size, double gc_speed,
double mutator_speed);
+ intptr_t MinimumAllocationLimitGrowingStep() {
+ const double kRegularAllocationLimitGrowingStep = 8;
+ const double kLowMemoryAllocationLimitGrowingStep = 2;
+ intptr_t limit = (Page::kPageSize > MB ? Page::kPageSize : MB);
+ return limit * (ShouldOptimizeForMemoryUsage()
+ ? kLowMemoryAllocationLimitGrowingStep
+ : kRegularAllocationLimitGrowingStep);
+ }
+
// ===========================================================================
// Idle notification. ========================================================
// ===========================================================================
@@ -1817,11 +1884,6 @@ class Heap {
AllocateBytecodeArray(int length, const byte* raw_bytecodes, int frame_size,
int parameter_count, FixedArray* constant_pool);
- // Copy the code and scope info part of the code object, but insert
- // the provided data as the relocation information.
- MUST_USE_RESULT AllocationResult CopyCode(Code* code,
- Vector<byte> reloc_info);
-
MUST_USE_RESULT AllocationResult CopyCode(Code* code);
MUST_USE_RESULT AllocationResult
@@ -1981,12 +2043,17 @@ class Heap {
void set_force_oom(bool value) { force_oom_ = value; }
- // The amount of external memory registered through the API kept alive
- // by global handles
- int64_t amount_of_external_allocated_memory_;
+ // The amount of external memory registered through the API.
+ int64_t external_memory_;
+
+ // The limit when to trigger memory pressure from the API.
+ int64_t external_memory_limit_;
+
+ // Caches the amount of external memory registered at the last MC.
+ int64_t external_memory_at_last_mark_compact_;
- // Caches the amount of external memory registered at the last global gc.
- int64_t amount_of_external_allocated_memory_at_last_global_gc_;
+ // The amount of memory that has been freed concurrently.
+ base::AtomicNumber<intptr_t> external_memory_concurrently_freed_;
// This can be calculated directly from a pointer to the heap; however, it is
// more expedient to get at the isolate directly from within Heap methods.
@@ -2012,11 +2079,11 @@ class Heap {
// This is not the depth of nested AlwaysAllocateScope's but rather a single
// count, as scopes can be acquired from multiple tasks (read: threads).
- AtomicNumber<size_t> always_allocate_scope_count_;
+ base::AtomicNumber<size_t> always_allocate_scope_count_;
// Stores the memory pressure level that set by MemoryPressureNotification
// and reset by a mark-compact garbage collection.
- AtomicValue<MemoryPressureLevel> memory_pressure_level_;
+ base::AtomicValue<MemoryPressureLevel> memory_pressure_level_;
// For keeping track of context disposals.
int contexts_disposed_;
@@ -2073,10 +2140,6 @@ class Heap {
// last GC.
bool old_gen_exhausted_;
- // Indicates that memory usage is more important than latency.
- // TODO(ulan): Merge it with memory reducer once chromium:490559 is fixed.
- bool optimize_for_memory_usage_;
-
// Indicates that inline bump-pointer allocation has been globally disabled
// for all spaces. This is used to disable allocations in generated code.
bool inline_allocation_disabled_;
@@ -2104,7 +2167,6 @@ class Heap {
int deferred_counters_[v8::Isolate::kUseCounterFeatureCount];
GCTracer* tracer_;
- EmbedderHeapTracer* embedder_heap_tracer_;
int high_survival_rate_period_length_;
intptr_t promoted_objects_size_;
@@ -2151,7 +2213,9 @@ class Heap {
MarkCompactCollector* mark_compact_collector_;
- StoreBuffer store_buffer_;
+ MemoryAllocator* memory_allocator_;
+
+ StoreBuffer* store_buffer_;
IncrementalMarking* incremental_marking_;
@@ -2159,7 +2223,8 @@ class Heap {
MemoryReducer* memory_reducer_;
- ObjectStats* object_stats_;
+ ObjectStats* live_object_stats_;
+ ObjectStats* dead_object_stats_;
ScavengeJob* scavenge_job_;
@@ -2192,7 +2257,7 @@ class Heap {
// storage is only alive temporary during a GC. The invariant is that all
// pointers in this map are already fixed, i.e., they do not point to
// forwarding pointers.
- HashMap* global_pretenuring_feedback_;
+ base::HashMap* global_pretenuring_feedback_;
char trace_ring_buffer_[kTraceRingBufferSize];
// If it's not full then the data is from 0 to ring_buffer_end_. If it's
@@ -2217,12 +2282,6 @@ class Heap {
ExternalStringTable external_string_table_;
- MemoryChunk* chunks_queued_for_free_;
-
- size_t concurrent_unmapping_tasks_active_;
-
- base::Semaphore pending_unmapping_tasks_semaphore_;
-
base::Mutex relocation_mutex_;
int gc_callbacks_depth_;
@@ -2231,8 +2290,6 @@ class Heap {
StrongRootsList* strong_roots_list_;
- ArrayBufferTracker* array_buffer_tracker_;
-
// The depth of HeapIterator nestings.
int heap_iterator_depth_;
@@ -2250,10 +2307,11 @@ class Heap {
friend class MarkCompactCollector;
friend class MarkCompactMarkingVisitor;
friend class NewSpace;
- friend class ObjectStatsVisitor;
+ friend class ObjectStatsCollector;
friend class Page;
friend class Scavenger;
friend class StoreBuffer;
+ friend class TestMemoryAllocatorScope;
// The allocator interface.
friend class Factory;
@@ -2273,29 +2331,31 @@ class HeapStats {
static const int kStartMarker = 0xDECADE00;
static const int kEndMarker = 0xDECADE01;
- int* start_marker; // 0
- int* new_space_size; // 1
- int* new_space_capacity; // 2
- intptr_t* old_space_size; // 3
- intptr_t* old_space_capacity; // 4
- intptr_t* code_space_size; // 5
- intptr_t* code_space_capacity; // 6
- intptr_t* map_space_size; // 7
- intptr_t* map_space_capacity; // 8
- intptr_t* lo_space_size; // 9
- int* global_handle_count; // 10
- int* weak_global_handle_count; // 11
- int* pending_global_handle_count; // 12
- int* near_death_global_handle_count; // 13
- int* free_global_handle_count; // 14
- intptr_t* memory_allocator_size; // 15
- intptr_t* memory_allocator_capacity; // 16
- int* objects_per_type; // 17
- int* size_per_type; // 18
- int* os_error; // 19
- char* last_few_messages; // 20
- char* js_stacktrace; // 21
- int* end_marker; // 22
+ intptr_t* start_marker; // 0
+ size_t* new_space_size; // 1
+ size_t* new_space_capacity; // 2
+ size_t* old_space_size; // 3
+ size_t* old_space_capacity; // 4
+ size_t* code_space_size; // 5
+ size_t* code_space_capacity; // 6
+ size_t* map_space_size; // 7
+ size_t* map_space_capacity; // 8
+ size_t* lo_space_size; // 9
+ size_t* global_handle_count; // 10
+ size_t* weak_global_handle_count; // 11
+ size_t* pending_global_handle_count; // 12
+ size_t* near_death_global_handle_count; // 13
+ size_t* free_global_handle_count; // 14
+ size_t* memory_allocator_size; // 15
+ size_t* memory_allocator_capacity; // 16
+ size_t* malloced_memory; // 17
+ size_t* malloced_peak_memory; // 18
+ size_t* objects_per_type; // 19
+ size_t* size_per_type; // 20
+ int* os_error; // 21
+ char* last_few_messages; // 22
+ char* js_stacktrace; // 23
+ intptr_t* end_marker; // 24
};
diff --git a/deps/v8/src/heap/incremental-marking-job.cc b/deps/v8/src/heap/incremental-marking-job.cc
index 3ccbec23d6..fe14dd04c6 100644
--- a/deps/v8/src/heap/incremental-marking-job.cc
+++ b/deps/v8/src/heap/incremental-marking-job.cc
@@ -71,14 +71,12 @@ void IncrementalMarkingJob::ScheduleDelayedTask(Heap* heap) {
IncrementalMarkingJob::IdleTask::Progress IncrementalMarkingJob::IdleTask::Step(
Heap* heap, double deadline_in_ms) {
IncrementalMarking* incremental_marking = heap->incremental_marking();
- MarkCompactCollector* mark_compact_collector = heap->mark_compact_collector();
if (incremental_marking->IsStopped()) {
return kDone;
}
- if (mark_compact_collector->sweeping_in_progress()) {
- if (mark_compact_collector->IsSweepingCompleted()) {
- mark_compact_collector->EnsureSweepingCompleted();
- }
+ if (incremental_marking->IsSweeping()) {
+ incremental_marking->FinalizeSweeping();
+ // TODO(hpayer): We can continue here if enough idle time is left.
return kMoreWork;
}
const double remaining_idle_time_in_ms =
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 46f95cc181..b9e7c61ba0 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -10,8 +10,9 @@
#include "src/heap/gc-idle-time-handler.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/mark-compact-inl.h"
-#include "src/heap/objects-visiting.h"
+#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
+#include "src/heap/objects-visiting.h"
#include "src/tracing/trace-event.h"
#include "src/v8.h"
@@ -48,10 +49,10 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
HeapObject* value_heap_obj = HeapObject::cast(value);
- MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj);
+ MarkBit value_bit = ObjectMarking::MarkBitFrom(value_heap_obj);
DCHECK(!Marking::IsImpossible(value_bit));
- MarkBit obj_bit = Marking::MarkBitFrom(obj);
+ MarkBit obj_bit = ObjectMarking::MarkBitFrom(obj);
DCHECK(!Marking::IsImpossible(obj_bit));
bool is_black = Marking::IsBlack(obj_bit);
@@ -149,7 +150,7 @@ void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
static void MarkObjectGreyDoNotEnqueue(Object* obj) {
if (obj->IsHeapObject()) {
HeapObject* heap_obj = HeapObject::cast(obj);
- MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
+ MarkBit mark_bit = ObjectMarking::MarkBitFrom(HeapObject::cast(obj));
if (Marking::IsBlack(mark_bit)) {
MemoryChunk::IncrementLiveBytesFromGC(heap_obj, -heap_obj->Size());
}
@@ -157,15 +158,42 @@ static void MarkObjectGreyDoNotEnqueue(Object* obj) {
}
}
+void IncrementalMarking::TransferMark(Heap* heap, Address old_start,
+ Address new_start) {
+ // This is only used when resizing an object.
+ DCHECK(MemoryChunk::FromAddress(old_start) ==
+ MemoryChunk::FromAddress(new_start));
-static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
- MarkBit mark_bit, int size) {
- DCHECK(!Marking::IsImpossible(mark_bit));
- if (Marking::IsBlack(mark_bit)) return;
- Marking::MarkBlack(mark_bit);
- MemoryChunk::IncrementLiveBytesFromGC(heap_object, size);
-}
+ if (!heap->incremental_marking()->IsMarking()) return;
+ // If the mark doesn't move, we don't check the color of the object.
+ // It doesn't matter whether the object is black, since it hasn't changed
+ // size, so the adjustment to the live data count will be zero anyway.
+ if (old_start == new_start) return;
+
+ MarkBit new_mark_bit = ObjectMarking::MarkBitFrom(new_start);
+ MarkBit old_mark_bit = ObjectMarking::MarkBitFrom(old_start);
+
+#ifdef DEBUG
+ Marking::ObjectColor old_color = Marking::Color(old_mark_bit);
+#endif
+
+ if (Marking::IsBlack(old_mark_bit)) {
+ Marking::BlackToWhite(old_mark_bit);
+ Marking::MarkBlack(new_mark_bit);
+ return;
+ } else if (Marking::IsGrey(old_mark_bit)) {
+ Marking::GreyToWhite(old_mark_bit);
+ heap->incremental_marking()->WhiteToGreyAndPush(
+ HeapObject::FromAddress(new_start), new_mark_bit);
+ heap->incremental_marking()->RestartIfNotMarking();
+ }
+
+#ifdef DEBUG
+ Marking::ObjectColor new_color = Marking::Color(new_mark_bit);
+ DCHECK(new_color == old_color);
+#endif
+}
class IncrementalMarkingMarkingVisitor
: public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
@@ -210,10 +238,10 @@ class IncrementalMarkingMarkingVisitor
} while (scan_until_end && start_offset < object_size);
chunk->set_progress_bar(start_offset);
if (start_offset < object_size) {
- if (Marking::IsGrey(Marking::MarkBitFrom(object))) {
+ if (Marking::IsGrey(ObjectMarking::MarkBitFrom(object))) {
heap->mark_compact_collector()->marking_deque()->Unshift(object);
} else {
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+ DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
heap->mark_compact_collector()->UnshiftBlack(object);
}
heap->incremental_marking()->NotifyIncompleteScanOfObject(
@@ -231,7 +259,7 @@ class IncrementalMarkingMarkingVisitor
// Note that GC can happen when the context is not fully initialized,
// so the cache can be undefined.
Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX);
- if (!cache->IsUndefined()) {
+ if (!cache->IsUndefined(map->GetIsolate())) {
MarkObjectGreyDoNotEnqueue(cache);
}
VisitNativeContext(map, context);
@@ -258,14 +286,14 @@ class IncrementalMarkingMarkingVisitor
// Marks the object grey and pushes it on the marking stack.
INLINE(static void MarkObject(Heap* heap, Object* obj)) {
- IncrementalMarking::MarkObject(heap, HeapObject::cast(obj));
+ IncrementalMarking::MarkGrey(heap, HeapObject::cast(obj));
}
// Marks the object black without pushing it on the marking stack.
// Returns true if object needed marking and false otherwise.
INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
HeapObject* heap_object = HeapObject::cast(obj);
- MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
+ MarkBit mark_bit = ObjectMarking::MarkBitFrom(heap_object);
if (Marking::IsWhite(mark_bit)) {
Marking::MarkBlack(mark_bit);
MemoryChunk::IncrementLiveBytesFromGC(heap_object, heap_object->Size());
@@ -276,13 +304,15 @@ class IncrementalMarkingMarkingVisitor
};
void IncrementalMarking::IterateBlackObject(HeapObject* object) {
- if (IsMarking() && Marking::IsBlack(Marking::MarkBitFrom(object))) {
+ if (IsMarking() && Marking::IsBlack(ObjectMarking::MarkBitFrom(object))) {
Page* page = Page::FromAddress(object->address());
if ((page->owner() != nullptr) && (page->owner()->identity() == LO_SPACE)) {
- // IterateBlackObject requires us to visit the hole object.
+ // IterateBlackObject requires us to visit the whole object.
page->ResetProgressBar();
}
- IncrementalMarkingMarkingVisitor::IterateBody(object->map(), object);
+ Map* map = object->map();
+ MarkGrey(heap_, map);
+ IncrementalMarkingMarkingVisitor::IterateBody(map, object);
}
}
@@ -303,7 +333,7 @@ class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
Object* obj = *p;
if (!obj->IsHeapObject()) return;
- IncrementalMarking::MarkObject(heap_, HeapObject::cast(obj));
+ IncrementalMarking::MarkGrey(heap_, HeapObject::cast(obj));
}
Heap* heap_;
@@ -341,9 +371,7 @@ void IncrementalMarking::SetNewSpacePageFlags(MemoryChunk* chunk,
void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
PagedSpace* space) {
- PageIterator it(space);
- while (it.has_next()) {
- Page* p = it.next();
+ for (Page* p : *space) {
SetOldSpacePageFlags(p, false, false);
}
}
@@ -351,9 +379,7 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
NewSpace* space) {
- NewSpacePageIterator it(space);
- while (it.has_next()) {
- NewSpacePage* p = it.next();
+ for (Page* p : *space) {
SetNewSpacePageFlags(p, false);
}
}
@@ -365,27 +391,21 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
- LargePage* lop = heap_->lo_space()->first_page();
- while (LargePage::IsValid(lop)) {
+ for (LargePage* lop : *heap_->lo_space()) {
SetOldSpacePageFlags(lop, false, false);
- lop = lop->next_page();
}
}
void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
- PageIterator it(space);
- while (it.has_next()) {
- Page* p = it.next();
+ for (Page* p : *space) {
SetOldSpacePageFlags(p, true, is_compacting_);
}
}
void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
- NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
- while (it.has_next()) {
- NewSpacePage* p = it.next();
+ for (Page* p : *space) {
SetNewSpacePageFlags(p, true);
}
}
@@ -397,10 +417,8 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() {
ActivateIncrementalWriteBarrier(heap_->code_space());
ActivateIncrementalWriteBarrier(heap_->new_space());
- LargePage* lop = heap_->lo_space()->first_page();
- while (LargePage::IsValid(lop)) {
+ for (LargePage* lop : *heap_->lo_space()) {
SetOldSpacePageFlags(lop, true, is_compacting_);
- lop = lop->next_page();
}
}
@@ -469,9 +487,10 @@ static void PatchIncrementalMarkingRecordWriteStubs(
UnseededNumberDictionary* stubs = heap->code_stubs();
int capacity = stubs->Capacity();
+ Isolate* isolate = heap->isolate();
for (int i = 0; i < capacity; i++) {
Object* k = stubs->KeyAt(i);
- if (stubs->IsKey(k)) {
+ if (stubs->IsKey(isolate, k)) {
uint32_t key = NumberToUint32(k);
if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) {
@@ -537,6 +556,12 @@ void IncrementalMarking::StartMarking() {
state_ = MARKING;
+ if (heap_->UsingEmbedderHeapTracer()) {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MC_INCREMENTAL_WRAPPER_PROLOGUE);
+ heap_->mark_compact_collector()->embedder_heap_tracer()->TracePrologue();
+ }
+
RecordWriteStub::Mode mode = is_compacting_
? RecordWriteStub::INCREMENTAL_COMPACTION
: RecordWriteStub::INCREMENTAL;
@@ -558,12 +583,6 @@ void IncrementalMarking::StartMarking() {
heap_->CompletelyClearInstanceofCache();
heap_->isolate()->compilation_cache()->MarkCompactPrologue();
- if (FLAG_cleanup_code_caches_at_gc) {
- // We will mark cache black with a separate pass
- // when we finish marking.
- MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
- }
-
// Mark strong roots grey.
IncrementalMarkingRootMarkingVisitor visitor(this);
heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
@@ -578,9 +597,9 @@ void IncrementalMarking::StartBlackAllocation() {
DCHECK(FLAG_black_allocation);
DCHECK(IsMarking());
black_allocation_ = true;
- OldSpace* old_space = heap()->old_space();
- old_space->EmptyAllocationInfo();
- old_space->free_list()->Reset();
+ heap()->old_space()->MarkAllocationInfoBlack();
+ heap()->map_space()->MarkAllocationInfoBlack();
+ heap()->code_space()->MarkAllocationInfoBlack();
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Black allocation started\n");
}
@@ -605,11 +624,15 @@ void IncrementalMarking::MarkRoots() {
void IncrementalMarking::MarkObjectGroups() {
+ TRACE_GC(heap_->tracer(),
+ GCTracer::Scope::MC_INCREMENTAL_FINALIZE_OBJECT_GROUPING);
+
+ DCHECK(!heap_->UsingEmbedderHeapTracer());
DCHECK(!finalize_marking_completed_);
DCHECK(IsMarking());
IncrementalMarkingRootMarkingVisitor visitor(this);
- heap_->mark_compact_collector()->MarkImplicitRefGroups(&MarkObject);
+ heap_->mark_compact_collector()->MarkImplicitRefGroups(&MarkGrey);
heap_->isolate()->global_handles()->IterateObjectGroups(
&visitor, &MarkCompactCollector::IsUnmarkedHeapObjectWithHeap);
heap_->isolate()->global_handles()->RemoveImplicitRefGroups();
@@ -662,7 +685,8 @@ bool ShouldRetainMap(Map* map, int age) {
}
Object* constructor = map->GetConstructor();
if (!constructor->IsHeapObject() ||
- Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(constructor)))) {
+ Marking::IsWhite(
+ ObjectMarking::MarkBitFrom(HeapObject::cast(constructor)))) {
// The constructor is dead, no new objects with this map can
// be created. Do not retain this map.
return false;
@@ -691,15 +715,16 @@ void IncrementalMarking::RetainMaps() {
int age = Smi::cast(retained_maps->Get(i + 1))->value();
int new_age;
Map* map = Map::cast(cell->value());
- MarkBit map_mark = Marking::MarkBitFrom(map);
+ MarkBit map_mark = ObjectMarking::MarkBitFrom(map);
if (i >= number_of_disposed_maps && !map_retaining_is_disabled &&
Marking::IsWhite(map_mark)) {
if (ShouldRetainMap(map, age)) {
- MarkObject(heap(), map);
+ MarkGrey(heap(), map);
}
Object* prototype = map->prototype();
if (age > 0 && prototype->IsHeapObject() &&
- Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(prototype)))) {
+ Marking::IsWhite(
+ ObjectMarking::MarkBitFrom(HeapObject::cast(prototype)))) {
// The prototype is not marked, age the map.
new_age = age - 1;
} else {
@@ -719,6 +744,7 @@ void IncrementalMarking::RetainMaps() {
void IncrementalMarking::FinalizeIncrementally() {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE_BODY);
DCHECK(!finalize_marking_completed_);
DCHECK(IsMarking());
@@ -735,7 +761,9 @@ void IncrementalMarking::FinalizeIncrementally() {
// 4) Remove weak cell with live values from the list of weak cells, they
// do not need processing during GC.
MarkRoots();
- MarkObjectGroups();
+ if (!heap_->UsingEmbedderHeapTracer()) {
+ MarkObjectGroups();
+ }
if (incremental_marking_finalization_rounds_ == 0) {
// Map retaining is needed for perfromance, not correctness,
// so we can do it only once at the beginning of the finalization.
@@ -750,7 +778,6 @@ void IncrementalMarking::FinalizeIncrementally() {
double end = heap_->MonotonicallyIncreasingTimeInMs();
double delta = end - start;
heap_->tracer()->AddMarkingTime(delta);
- heap_->tracer()->AddIncrementalMarkingFinalizationStep(delta);
if (FLAG_trace_incremental_marking) {
PrintF(
"[IncrementalMarking] Finalize incrementally round %d, "
@@ -803,13 +830,13 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
// them.
if (map_word.IsForwardingAddress()) {
HeapObject* dest = map_word.ToForwardingAddress();
- if (Page::FromAddress(dest->address())->IsFlagSet(Page::BLACK_PAGE))
+ if (Marking::IsBlack(ObjectMarking::MarkBitFrom(dest->address())))
continue;
array[new_top] = dest;
new_top = ((new_top + 1) & mask);
DCHECK(new_top != marking_deque->bottom());
#ifdef DEBUG
- MarkBit mark_bit = Marking::MarkBitFrom(obj);
+ MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
DCHECK(Marking::IsGrey(mark_bit) ||
(obj->IsFiller() && Marking::IsWhite(mark_bit)));
#endif
@@ -821,7 +848,7 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
new_top = ((new_top + 1) & mask);
DCHECK(new_top != marking_deque->bottom());
#ifdef DEBUG
- MarkBit mark_bit = Marking::MarkBitFrom(obj);
+ MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
DCHECK(Marking::IsGrey(mark_bit) ||
(obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
@@ -835,48 +862,53 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
- MarkObject(heap_, map);
+ MarkGrey(heap_, map);
IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
- MarkBit mark_bit = Marking::MarkBitFrom(obj);
#if ENABLE_SLOW_DCHECKS
+ MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
SLOW_DCHECK(Marking::IsGrey(mark_bit) ||
(obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
(chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
Marking::IsBlack(mark_bit)));
#endif
- MarkBlackOrKeepBlack(obj, mark_bit, size);
+ MarkBlack(obj, size);
}
-
-void IncrementalMarking::MarkObject(Heap* heap, HeapObject* obj) {
- MarkBit mark_bit = Marking::MarkBitFrom(obj);
+void IncrementalMarking::MarkGrey(Heap* heap, HeapObject* object) {
+ MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
if (Marking::IsWhite(mark_bit)) {
- heap->incremental_marking()->WhiteToGreyAndPush(obj, mark_bit);
+ heap->incremental_marking()->WhiteToGreyAndPush(object, mark_bit);
}
}
+void IncrementalMarking::MarkBlack(HeapObject* obj, int size) {
+ MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
+ if (Marking::IsBlack(mark_bit)) return;
+ Marking::GreyToBlack(mark_bit);
+ MemoryChunk::IncrementLiveBytesFromGC(obj, size);
+}
-intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
+intptr_t IncrementalMarking::ProcessMarkingDeque(
+ intptr_t bytes_to_process, ForceCompletionAction completion) {
intptr_t bytes_processed = 0;
- Map* one_pointer_filler_map = heap_->one_pointer_filler_map();
- Map* two_pointer_filler_map = heap_->two_pointer_filler_map();
MarkingDeque* marking_deque =
heap_->mark_compact_collector()->marking_deque();
- while (!marking_deque->IsEmpty() && bytes_processed < bytes_to_process) {
+ while (!marking_deque->IsEmpty() && (bytes_processed < bytes_to_process ||
+ completion == FORCE_COMPLETION)) {
HeapObject* obj = marking_deque->Pop();
- // Explicitly skip one and two word fillers. Incremental markbit patterns
- // are correct only for objects that occupy at least two words.
- // Moreover, slots filtering for left-trimmed arrays works only when
- // the distance between the old array start and the new array start
- // is greater than two if both starts are marked.
- Map* map = obj->map();
- if (map == one_pointer_filler_map || map == two_pointer_filler_map)
+ // Left trimming may result in white filler objects on the marking deque.
+ // Ignore these objects.
+ if (obj->IsFiller()) {
+ DCHECK(Marking::IsImpossible(ObjectMarking::MarkBitFrom(obj)) ||
+ Marking::IsWhite(ObjectMarking::MarkBitFrom(obj)));
continue;
+ }
+ Map* map = obj->map();
int size = obj->SizeFromMap(map);
unscanned_bytes_of_large_object_ = 0;
VisitObject(map, obj, size);
@@ -886,23 +918,6 @@ intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
}
-void IncrementalMarking::ProcessMarkingDeque() {
- Map* filler_map = heap_->one_pointer_filler_map();
- MarkingDeque* marking_deque =
- heap_->mark_compact_collector()->marking_deque();
- while (!marking_deque->IsEmpty()) {
- HeapObject* obj = marking_deque->Pop();
-
- // Explicitly skip one word fillers. Incremental markbit patterns are
- // correct only for objects that occupy at least two words.
- Map* map = obj->map();
- if (map == filler_map) continue;
-
- VisitObject(map, obj, obj->SizeFromMap(map));
- }
-}
-
-
void IncrementalMarking::Hurry() {
// A scavenge may have pushed new objects on the marking deque (due to black
// allocation) even in COMPLETE state. This may happen if scavenges are
@@ -919,7 +934,7 @@ void IncrementalMarking::Hurry() {
}
// TODO(gc) hurry can mark objects it encounters black as mutator
// was stopped.
- ProcessMarkingDeque();
+ ProcessMarkingDeque(0, FORCE_COMPLETION);
state_ = COMPLETE;
if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
double end = heap_->MonotonicallyIncreasingTimeInMs();
@@ -932,27 +947,20 @@ void IncrementalMarking::Hurry() {
}
}
- if (FLAG_cleanup_code_caches_at_gc) {
- PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
- Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
- MemoryChunk::IncrementLiveBytesFromGC(poly_cache,
- PolymorphicCodeCache::kSize);
- }
-
Object* context = heap_->native_contexts_list();
- while (!context->IsUndefined()) {
+ while (!context->IsUndefined(heap_->isolate())) {
// GC can happen when the context is not fully initialized,
// so the cache can be undefined.
HeapObject* cache = HeapObject::cast(
Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
- if (!cache->IsUndefined()) {
- MarkBit mark_bit = Marking::MarkBitFrom(cache);
+ if (!cache->IsUndefined(heap_->isolate())) {
+ MarkBit mark_bit = ObjectMarking::MarkBitFrom(cache);
if (Marking::IsGrey(mark_bit)) {
Marking::GreyToBlack(mark_bit);
MemoryChunk::IncrementLiveBytesFromGC(cache, cache->Size());
}
}
- context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+ context = Context::cast(context)->next_context_link();
}
}
@@ -1130,6 +1138,18 @@ void IncrementalMarking::SpeedUp() {
}
}
+void IncrementalMarking::FinalizeSweeping() {
+ DCHECK(state_ == SWEEPING);
+ if (heap_->mark_compact_collector()->sweeping_in_progress() &&
+ (heap_->mark_compact_collector()->sweeper().IsSweepingCompleted() ||
+ !FLAG_concurrent_sweeping)) {
+ heap_->mark_compact_collector()->EnsureSweepingCompleted();
+ }
+ if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
+ bytes_scanned_ = 0;
+ StartMarking();
+ }
+}
intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
CompletionAction action,
@@ -1161,6 +1181,7 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
HistogramTimerScope incremental_marking_scope(
heap_->isolate()->counters()->gc_incremental_marking());
TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
+ TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
double start = heap_->MonotonicallyIncreasingTimeInMs();
// The marking speed is driven either by the allocation rate or by the rate
@@ -1179,19 +1200,28 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
bytes_scanned_ += bytes_to_process;
+ // TODO(hpayer): Do not account for sweeping finalization while marking.
if (state_ == SWEEPING) {
- if (heap_->mark_compact_collector()->sweeping_in_progress() &&
- (heap_->mark_compact_collector()->IsSweepingCompleted() ||
- !FLAG_concurrent_sweeping)) {
- heap_->mark_compact_collector()->EnsureSweepingCompleted();
- }
- if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
- bytes_scanned_ = 0;
- StartMarking();
- }
+ FinalizeSweeping();
}
+
if (state_ == MARKING) {
bytes_processed = ProcessMarkingDeque(bytes_to_process);
+ if (FLAG_incremental_marking_wrappers &&
+ heap_->UsingEmbedderHeapTracer()) {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MC_INCREMENTAL_WRAPPER_TRACING);
+ // This currently marks through all registered wrappers and does not
+ // respect bytes_to_process.
+ // TODO(hpayer): Integrate incremental marking of wrappers into
+ // bytes_to_process logic.
+ heap_->mark_compact_collector()
+ ->RegisterWrappersWithEmbedderHeapTracer();
+ heap_->mark_compact_collector()->embedder_heap_tracer()->AdvanceTracing(
+ 0,
+ EmbedderHeapTracer::AdvanceTracingActions(
+ EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
+ }
if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
if (completion == FORCE_COMPLETION ||
IsIdleMarkingDelayCounterLimitReached()) {
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index f10150da34..877f05e0e0 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -9,6 +9,7 @@
#include "src/execution.h"
#include "src/heap/heap.h"
#include "src/heap/incremental-marking-job.h"
+#include "src/heap/mark-compact.h"
#include "src/heap/spaces.h"
#include "src/objects.h"
@@ -68,6 +69,8 @@ class IncrementalMarking {
inline bool IsStopped() { return state() == STOPPED; }
+ inline bool IsSweeping() { return state() == SWEEPING; }
+
INLINE(bool IsMarking()) { return state() >= MARKING; }
inline bool IsMarkingIncomplete() { return state() == MARKING; }
@@ -135,6 +138,8 @@ class IncrementalMarking {
// incremental marking to be postponed.
static const size_t kMaxIdleMarkingDelayCounter = 3;
+ void FinalizeSweeping();
+
void OldSpaceStep(intptr_t allocated);
intptr_t Step(intptr_t allocated, CompletionAction action,
@@ -181,7 +186,7 @@ class IncrementalMarking {
SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting());
}
- inline void SetNewSpacePageFlags(MemoryChunk* chunk) {
+ inline void SetNewSpacePageFlags(Page* chunk) {
SetNewSpacePageFlags(chunk, IsMarking());
}
@@ -199,7 +204,33 @@ class IncrementalMarking {
bool IsIdleMarkingDelayCounterLimitReached();
- static void MarkObject(Heap* heap, HeapObject* object);
+ static void MarkGrey(Heap* heap, HeapObject* object);
+
+ static void MarkBlack(HeapObject* object, int size);
+
+ static void TransferMark(Heap* heap, Address old_start, Address new_start);
+
+ // Returns true if the color transfer requires live bytes updating.
+ INLINE(static bool TransferColor(HeapObject* from, HeapObject* to,
+ int size)) {
+ MarkBit from_mark_bit = ObjectMarking::MarkBitFrom(from);
+ MarkBit to_mark_bit = ObjectMarking::MarkBitFrom(to);
+
+ if (Marking::IsBlack(to_mark_bit)) {
+ DCHECK(to->GetHeap()->incremental_marking()->black_allocation());
+ return false;
+ }
+
+ DCHECK(Marking::IsWhite(to_mark_bit));
+ if (from_mark_bit.Get()) {
+ to_mark_bit.Set();
+ if (from_mark_bit.Next().Get()) {
+ to_mark_bit.Next().Set();
+ return true;
+ }
+ }
+ return false;
+ }
void IterateBlackObject(HeapObject* object);
@@ -211,6 +242,8 @@ class IncrementalMarking {
bool black_allocation() { return black_allocation_; }
+ void StartBlackAllocationForTesting() { StartBlackAllocation(); }
+
private:
class Observer : public AllocationObserver {
public:
@@ -260,7 +293,9 @@ class IncrementalMarking {
INLINE(void ProcessMarkingDeque());
- INLINE(intptr_t ProcessMarkingDeque(intptr_t bytes_to_process));
+ INLINE(intptr_t ProcessMarkingDeque(
+ intptr_t bytes_to_process,
+ ForceCompletionAction completion = DO_NOT_FORCE_COMPLETION));
INLINE(void VisitObject(Map* map, HeapObject* obj, int size));
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index 281ece4cc8..7ead42150b 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -12,38 +12,29 @@
namespace v8 {
namespace internal {
-inline std::vector<Page*>& MarkCompactCollector::sweeping_list(Space* space) {
- if (space == heap()->old_space()) {
- return sweeping_list_old_space_;
- } else if (space == heap()->code_space()) {
- return sweeping_list_code_space_;
- }
- DCHECK_EQ(space, heap()->map_space());
- return sweeping_list_map_space_;
-}
-
-
void MarkCompactCollector::PushBlack(HeapObject* obj) {
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(obj)));
+ DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(obj)));
if (marking_deque_.Push(obj)) {
MemoryChunk::IncrementLiveBytesFromGC(obj, obj->Size());
} else {
- Marking::BlackToGrey(obj);
+ MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
+ Marking::BlackToGrey(mark_bit);
}
}
void MarkCompactCollector::UnshiftBlack(HeapObject* obj) {
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(obj)));
+ DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(obj)));
if (!marking_deque_.Unshift(obj)) {
MemoryChunk::IncrementLiveBytesFromGC(obj, -obj->Size());
- Marking::BlackToGrey(obj);
+ MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
+ Marking::BlackToGrey(mark_bit);
}
}
void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
- DCHECK(Marking::MarkBitFrom(obj) == mark_bit);
+ DCHECK(ObjectMarking::MarkBitFrom(obj) == mark_bit);
if (Marking::IsWhite(mark_bit)) {
Marking::WhiteToBlack(mark_bit);
DCHECK(obj->GetIsolate()->heap()->Contains(obj));
@@ -54,7 +45,7 @@ void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
void MarkCompactCollector::SetMark(HeapObject* obj, MarkBit mark_bit) {
DCHECK(Marking::IsWhite(mark_bit));
- DCHECK(Marking::MarkBitFrom(obj) == mark_bit);
+ DCHECK(ObjectMarking::MarkBitFrom(obj) == mark_bit);
Marking::WhiteToBlack(mark_bit);
MemoryChunk::IncrementLiveBytesFromGC(obj, obj->Size());
}
@@ -63,7 +54,7 @@ void MarkCompactCollector::SetMark(HeapObject* obj, MarkBit mark_bit) {
bool MarkCompactCollector::IsMarked(Object* obj) {
DCHECK(obj->IsHeapObject());
HeapObject* heap_object = HeapObject::cast(obj);
- return Marking::IsBlackOrGrey(Marking::MarkBitFrom(heap_object));
+ return Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(heap_object));
}
@@ -73,7 +64,7 @@ void MarkCompactCollector::RecordSlot(HeapObject* object, Object** slot,
Page* source_page = Page::FromAddress(reinterpret_cast<Address>(object));
if (target_page->IsEvacuationCandidate() &&
!ShouldSkipEvacuationSlotRecording(object)) {
- DCHECK(Marking::IsBlackOrGrey(Marking::MarkBitFrom(object)));
+ DCHECK(Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(object)));
RememberedSet<OLD_TO_OLD>::Insert(source_page,
reinterpret_cast<Address>(slot));
}
@@ -90,7 +81,7 @@ void CodeFlusher::AddCandidate(SharedFunctionInfo* shared_info) {
void CodeFlusher::AddCandidate(JSFunction* function) {
DCHECK(function->code() == function->shared()->code());
- if (function->next_function_link()->IsUndefined()) {
+ if (function->next_function_link()->IsUndefined(isolate_)) {
SetNextCandidate(function, jsfunction_candidates_head_);
jsfunction_candidates_head_ = function;
}
@@ -116,7 +107,7 @@ void CodeFlusher::SetNextCandidate(JSFunction* candidate,
void CodeFlusher::ClearNextCandidate(JSFunction* candidate, Object* undefined) {
- DCHECK(undefined->IsUndefined());
+ DCHECK(undefined->IsUndefined(candidate->GetIsolate()));
candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
}
@@ -157,25 +148,66 @@ HeapObject* LiveObjectIterator<T>::Next() {
second_bit_index = 0x1;
// The overlapping case; there has to exist a cell after the current
// cell.
- DCHECK(!it_.Done());
+ // However, if there is a black area at the end of the page, and the
+ // last word is a one word filler, we are not allowed to advance. In
+ // that case we can return immediately.
+ if (it_.Done()) {
+ DCHECK(HeapObject::FromAddress(addr)->map() ==
+ HeapObject::FromAddress(addr)
+ ->GetHeap()
+ ->one_pointer_filler_map());
+ return nullptr;
+ }
it_.Advance();
cell_base_ = it_.CurrentCellBase();
current_cell_ = *it_.CurrentCell();
}
- if (T == kBlackObjects && (current_cell_ & second_bit_index)) {
- object = HeapObject::FromAddress(addr);
- } else if (T == kGreyObjects && !(current_cell_ & second_bit_index)) {
- object = HeapObject::FromAddress(addr);
- } else if (T == kAllLiveObjects) {
+
+ if (current_cell_ & second_bit_index) {
+ // We found a black object. If the black object is within a black area,
+ // make sure that we skip all set bits in the black area until the
+ // object ends.
+ HeapObject* black_object = HeapObject::FromAddress(addr);
+ Address end = addr + black_object->Size() - kPointerSize;
+ // One word filler objects do not borrow the second mark bit. We have
+ // to jump over the advancing and clearing part.
+ // Note that we know that we are at a one word filler when
+ // object_start + object_size - kPointerSize == object_start.
+ if (addr != end) {
+ DCHECK_EQ(chunk_, MemoryChunk::FromAddress(end));
+ uint32_t end_mark_bit_index = chunk_->AddressToMarkbitIndex(end);
+ unsigned int end_cell_index =
+ end_mark_bit_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType end_index_mask =
+ 1u << Bitmap::IndexInCell(end_mark_bit_index);
+ if (it_.Advance(end_cell_index)) {
+ cell_base_ = it_.CurrentCellBase();
+ current_cell_ = *it_.CurrentCell();
+ }
+
+ // Clear all bits in current_cell, including the end index.
+ current_cell_ &= ~(end_index_mask + end_index_mask - 1);
+ }
+
+ if (T == kBlackObjects || T == kAllLiveObjects) {
+ object = black_object;
+ }
+ } else if ((T == kGreyObjects || T == kAllLiveObjects)) {
object = HeapObject::FromAddress(addr);
}
- // Clear the second bit of the found object.
- current_cell_ &= ~second_bit_index;
-
// We found a live object.
- if (object != nullptr) break;
+ if (object != nullptr) {
+ if (object->IsFiller()) {
+ // Black areas together with slack tracking may result in black filler
+ // objects. We filter these objects out in the iterator.
+ object = nullptr;
+ } else {
+ break;
+ }
+ }
}
+
if (current_cell_ == 0) {
if (!it_.Done()) {
it_.Advance();
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 78ee33f530..ae7b4676ae 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -25,7 +25,6 @@
#include "src/heap/spaces-inl.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
-#include "src/profiler/cpu-profiler.h"
#include "src/utils-inl.h"
#include "src/v8.h"
@@ -38,9 +37,8 @@ const char* Marking::kBlackBitPattern = "11";
const char* Marking::kGreyBitPattern = "10";
const char* Marking::kImpossibleBitPattern = "01";
-
-// The following has to hold in order for {Marking::MarkBitFrom} to not produce
-// invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
+// The following has to hold in order for {ObjectMarking::MarkBitFrom} to not
+// produce invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
STATIC_ASSERT(Heap::kMinObjectSizeInWords >= 2);
@@ -49,22 +47,22 @@ STATIC_ASSERT(Heap::kMinObjectSizeInWords >= 2);
MarkCompactCollector::MarkCompactCollector(Heap* heap)
: // NOLINT
+ heap_(heap),
+ page_parallel_job_semaphore_(0),
#ifdef DEBUG
state_(IDLE),
#endif
marking_parity_(ODD_MARKING_PARITY),
was_marked_incrementally_(false),
evacuation_(false),
- heap_(heap),
+ compacting_(false),
+ black_allocation_(false),
+ have_code_to_deoptimize_(false),
marking_deque_memory_(NULL),
marking_deque_memory_committed_(0),
code_flusher_(nullptr),
- have_code_to_deoptimize_(false),
- compacting_(false),
- sweeping_in_progress_(false),
- pending_sweeper_tasks_semaphore_(0),
- pending_compaction_tasks_semaphore_(0),
- page_parallel_job_semaphore_(0) {
+ embedder_heap_tracer_(nullptr),
+ sweeper_(heap) {
}
#ifdef VERIFY_HEAP
@@ -106,40 +104,40 @@ static void VerifyMarking(Heap* heap, Address bottom, Address top) {
VerifyMarkingVisitor visitor(heap);
HeapObject* object;
Address next_object_must_be_here_or_later = bottom;
-
- for (Address current = bottom; current < top; current += kPointerSize) {
+ for (Address current = bottom; current < top;) {
object = HeapObject::FromAddress(current);
if (MarkCompactCollector::IsMarked(object)) {
- CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+ CHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
CHECK(current >= next_object_must_be_here_or_later);
object->Iterate(&visitor);
next_object_must_be_here_or_later = current + object->Size();
- // The next word for sure belongs to the current object, jump over it.
+ // The object is either part of a black area of black allocation or a
+ // regular black object
+ Page* page = Page::FromAddress(current);
+ CHECK(
+ page->markbits()->AllBitsSetInRange(
+ page->AddressToMarkbitIndex(current),
+ page->AddressToMarkbitIndex(next_object_must_be_here_or_later)) ||
+ page->markbits()->AllBitsClearInRange(
+ page->AddressToMarkbitIndex(current + kPointerSize * 2),
+ page->AddressToMarkbitIndex(next_object_must_be_here_or_later)));
+ current = next_object_must_be_here_or_later;
+ } else {
current += kPointerSize;
}
}
}
-static void VerifyMarkingBlackPage(Heap* heap, Page* page) {
- CHECK(page->IsFlagSet(Page::BLACK_PAGE));
- VerifyMarkingVisitor visitor(heap);
- HeapObjectIterator it(page);
- for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
- CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
- object->Iterate(&visitor);
- }
-}
-
static void VerifyMarking(NewSpace* space) {
Address end = space->top();
- NewSpacePageIterator it(space->bottom(), end);
// The bottom position is at the start of its page. Allows us to use
// page->area_start() as start of range on all pages.
- CHECK_EQ(space->bottom(),
- NewSpacePage::FromAddress(space->bottom())->area_start());
- while (it.has_next()) {
- NewSpacePage* page = it.next();
- Address limit = it.has_next() ? page->area_end() : end;
+ CHECK_EQ(space->bottom(), Page::FromAddress(space->bottom())->area_start());
+
+ NewSpacePageRange range(space->bottom(), end);
+ for (auto it = range.begin(); it != range.end();) {
+ Page* page = *(it++);
+ Address limit = it != range.end() ? page->area_end() : end;
CHECK(limit == end || !page->Contains(end));
VerifyMarking(space->heap(), page->area_start(), limit);
}
@@ -147,15 +145,8 @@ static void VerifyMarking(NewSpace* space) {
static void VerifyMarking(PagedSpace* space) {
- PageIterator it(space);
-
- while (it.has_next()) {
- Page* p = it.next();
- if (p->IsFlagSet(Page::BLACK_PAGE)) {
- VerifyMarkingBlackPage(space->heap(), p);
- } else {
- VerifyMarking(space->heap(), p->area_start(), p->area_end());
- }
+ for (Page* p : *space) {
+ VerifyMarking(space->heap(), p->area_start(), p->area_end());
}
}
@@ -206,13 +197,12 @@ static void VerifyEvacuation(Page* page) {
static void VerifyEvacuation(NewSpace* space) {
- NewSpacePageIterator it(space->bottom(), space->top());
VerifyEvacuationVisitor visitor;
-
- while (it.has_next()) {
- NewSpacePage* page = it.next();
+ NewSpacePageRange range(space->bottom(), space->top());
+ for (auto it = range.begin(); it != range.end();) {
+ Page* page = *(it++);
Address current = page->area_start();
- Address limit = it.has_next() ? page->area_end() : space->top();
+ Address limit = it != range.end() ? page->area_end() : space->top();
CHECK(limit == space->top() || !page->Contains(space->top()));
while (current < limit) {
HeapObject* object = HeapObject::FromAddress(current);
@@ -227,10 +217,7 @@ static void VerifyEvacuation(Heap* heap, PagedSpace* space) {
if (FLAG_use_allocation_folding && (space == heap->old_space())) {
return;
}
- PageIterator it(space);
-
- while (it.has_next()) {
- Page* p = it.next();
+ for (Page* p : *space) {
if (p->IsEvacuationCandidate()) continue;
VerifyEvacuation(p);
}
@@ -334,7 +321,6 @@ void MarkCompactCollector::ClearInvalidRememberedSetSlots() {
#endif
}
-
void MarkCompactCollector::CollectGarbage() {
// Make sure that Prepare() has been called. The individual steps below will
// update the state as they proceed.
@@ -346,6 +332,8 @@ void MarkCompactCollector::CollectGarbage() {
ClearNonLiveReferences();
+ RecordObjectStats();
+
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
VerifyMarking(heap_);
@@ -362,10 +350,7 @@ void MarkCompactCollector::CollectGarbage() {
#ifdef VERIFY_HEAP
void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
- PageIterator it(space);
-
- while (it.has_next()) {
- Page* p = it.next();
+ for (Page* p : *space) {
CHECK(p->markbits()->IsClean());
CHECK_EQ(0, p->LiveBytes());
}
@@ -373,10 +358,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
- NewSpacePageIterator it(space->bottom(), space->top());
-
- while (it.has_next()) {
- NewSpacePage* p = it.next();
+ for (Page* p : NewSpacePageRange(space->bottom(), space->top())) {
CHECK(p->markbits()->IsClean());
CHECK_EQ(0, p->LiveBytes());
}
@@ -391,7 +373,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
LargeObjectIterator it(heap_->lo_space());
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
- MarkBit mark_bit = Marking::MarkBitFrom(obj);
+ MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
CHECK(Marking::IsWhite(mark_bit));
CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
}
@@ -421,23 +403,15 @@ void MarkCompactCollector::VerifyOmittedMapChecks() {
static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
- PageIterator it(space);
-
- while (it.has_next()) {
- Page* p = it.next();
- Bitmap::Clear(p);
- if (p->IsFlagSet(Page::BLACK_PAGE)) {
- p->ClearFlag(Page::BLACK_PAGE);
- }
+ for (Page* p : *space) {
+ p->ClearLiveness();
}
}
static void ClearMarkbitsInNewSpace(NewSpace* space) {
- NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
-
- while (it.has_next()) {
- Bitmap::Clear(it.next());
+ for (Page* page : *space) {
+ page->ClearLiveness();
}
}
@@ -450,62 +424,72 @@ void MarkCompactCollector::ClearMarkbits() {
LargeObjectIterator it(heap_->lo_space());
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
- Marking::MarkWhite(Marking::MarkBitFrom(obj));
+ Marking::MarkWhite(ObjectMarking::MarkBitFrom(obj));
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
chunk->ResetProgressBar();
chunk->ResetLiveBytes();
- if (chunk->IsFlagSet(Page::BLACK_PAGE)) {
- chunk->ClearFlag(Page::BLACK_PAGE);
- }
}
}
-
-class MarkCompactCollector::SweeperTask : public v8::Task {
+class MarkCompactCollector::Sweeper::SweeperTask : public v8::Task {
public:
- SweeperTask(Heap* heap, AllocationSpace space_to_start)
- : heap_(heap), space_to_start_(space_to_start) {}
+ SweeperTask(Sweeper* sweeper, base::Semaphore* pending_sweeper_tasks,
+ AllocationSpace space_to_start)
+ : sweeper_(sweeper),
+ pending_sweeper_tasks_(pending_sweeper_tasks),
+ space_to_start_(space_to_start) {}
virtual ~SweeperTask() {}
private:
// v8::Task overrides.
void Run() override {
- DCHECK_GE(space_to_start_, FIRST_PAGED_SPACE);
+ DCHECK_GE(space_to_start_, FIRST_SPACE);
DCHECK_LE(space_to_start_, LAST_PAGED_SPACE);
- const int offset = space_to_start_ - FIRST_PAGED_SPACE;
- const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
+ const int offset = space_to_start_ - FIRST_SPACE;
+ const int num_spaces = LAST_PAGED_SPACE - FIRST_SPACE + 1;
for (int i = 0; i < num_spaces; i++) {
- const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces);
- DCHECK_GE(space_id, FIRST_PAGED_SPACE);
+ const int space_id = FIRST_SPACE + ((i + offset) % num_spaces);
+ DCHECK_GE(space_id, FIRST_SPACE);
DCHECK_LE(space_id, LAST_PAGED_SPACE);
- heap_->mark_compact_collector()->SweepInParallel(
- heap_->paged_space(space_id), 0);
+ sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0);
}
- heap_->mark_compact_collector()->pending_sweeper_tasks_semaphore_.Signal();
+ pending_sweeper_tasks_->Signal();
}
- Heap* heap_;
+ Sweeper* sweeper_;
+ base::Semaphore* pending_sweeper_tasks_;
AllocationSpace space_to_start_;
DISALLOW_COPY_AND_ASSIGN(SweeperTask);
};
+void MarkCompactCollector::Sweeper::StartSweeping() {
+ sweeping_in_progress_ = true;
+ ForAllSweepingSpaces([this](AllocationSpace space) {
+ std::sort(sweeping_list_[space].begin(), sweeping_list_[space].end(),
+ [](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); });
+ });
+ if (FLAG_concurrent_sweeping) {
+ ForAllSweepingSpaces([this](AllocationSpace space) {
+ if (space == NEW_SPACE) return;
+ StartSweepingHelper(space);
+ });
+ }
+}
-void MarkCompactCollector::StartSweeperThreads() {
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new SweeperTask(heap(), OLD_SPACE), v8::Platform::kShortRunningTask);
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new SweeperTask(heap(), CODE_SPACE), v8::Platform::kShortRunningTask);
+void MarkCompactCollector::Sweeper::StartSweepingHelper(
+ AllocationSpace space_to_start) {
+ num_sweeping_tasks_.Increment(1);
V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new SweeperTask(heap(), MAP_SPACE), v8::Platform::kShortRunningTask);
+ new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space_to_start),
+ v8::Platform::kShortRunningTask);
}
-
-void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) {
- PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
+void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted(
+ Page* page) {
if (!page->SweepingDone()) {
- SweepInParallel(page, owner);
+ ParallelSweepPage(page, page->owner()->identity());
if (!page->SweepingDone()) {
// We were not able to sweep that page, i.e., a concurrent
// sweeper thread currently owns this page. Wait for the sweeper
@@ -515,34 +499,62 @@ void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) {
}
}
-
void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
- if (FLAG_concurrent_sweeping && !IsSweepingCompleted()) {
- SweepInParallel(heap()->paged_space(space->identity()), 0);
+ if (FLAG_concurrent_sweeping && !sweeper().IsSweepingCompleted()) {
+ sweeper().ParallelSweepSpace(space->identity(), 0);
space->RefillFreeList();
}
}
+Page* MarkCompactCollector::Sweeper::GetSweptPageSafe(PagedSpace* space) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ SweptList& list = swept_list_[space->identity()];
+ if (list.length() > 0) {
+ return list.RemoveLast();
+ }
+ return nullptr;
+}
-void MarkCompactCollector::EnsureSweepingCompleted() {
- DCHECK(sweeping_in_progress_ == true);
+void MarkCompactCollector::Sweeper::EnsureCompleted() {
+ if (!sweeping_in_progress_) return;
// If sweeping is not completed or not running at all, we try to complete it
// here.
if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
- SweepInParallel(heap()->paged_space(OLD_SPACE), 0);
- SweepInParallel(heap()->paged_space(CODE_SPACE), 0);
- SweepInParallel(heap()->paged_space(MAP_SPACE), 0);
+ ForAllSweepingSpaces(
+ [this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
}
if (FLAG_concurrent_sweeping) {
- pending_sweeper_tasks_semaphore_.Wait();
- pending_sweeper_tasks_semaphore_.Wait();
- pending_sweeper_tasks_semaphore_.Wait();
+ while (num_sweeping_tasks_.Value() > 0) {
+ pending_sweeper_tasks_semaphore_.Wait();
+ num_sweeping_tasks_.Increment(-1);
+ }
}
- ParallelSweepSpacesComplete();
+ ForAllSweepingSpaces([this](AllocationSpace space) {
+ if (space == NEW_SPACE) {
+ swept_list_[NEW_SPACE].Clear();
+ }
+ DCHECK(sweeping_list_[space].empty());
+ });
+ late_pages_ = false;
sweeping_in_progress_ = false;
+}
+
+void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() {
+ if (!sweeping_in_progress_) return;
+ if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
+ for (Page* p : *heap_->new_space()) {
+ SweepOrWaitUntilSweepingCompleted(p);
+ }
+ }
+}
+
+void MarkCompactCollector::EnsureSweepingCompleted() {
+ if (!sweeper().sweeping_in_progress()) return;
+
+ sweeper().EnsureCompleted();
heap()->old_space()->RefillFreeList();
heap()->code_space()->RefillFreeList();
heap()->map_space()->RefillFreeList();
@@ -554,56 +566,14 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
#endif
}
-
-bool MarkCompactCollector::IsSweepingCompleted() {
- if (!pending_sweeper_tasks_semaphore_.WaitFor(
- base::TimeDelta::FromSeconds(0))) {
- return false;
- }
- pending_sweeper_tasks_semaphore_.Signal();
- return true;
-}
-
-
-void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) {
- // This is only used when resizing an object.
- DCHECK(MemoryChunk::FromAddress(old_start) ==
- MemoryChunk::FromAddress(new_start));
-
- if (!heap->incremental_marking()->IsMarking() ||
- Page::FromAddress(old_start)->IsFlagSet(Page::BLACK_PAGE))
- return;
-
- // If the mark doesn't move, we don't check the color of the object.
- // It doesn't matter whether the object is black, since it hasn't changed
- // size, so the adjustment to the live data count will be zero anyway.
- if (old_start == new_start) return;
-
- MarkBit new_mark_bit = MarkBitFrom(new_start);
- MarkBit old_mark_bit = MarkBitFrom(old_start);
-
-#ifdef DEBUG
- ObjectColor old_color = Color(old_mark_bit);
-#endif
-
- if (Marking::IsBlack(old_mark_bit)) {
- Marking::BlackToWhite(old_mark_bit);
- Marking::MarkBlack(new_mark_bit);
- return;
- } else if (Marking::IsGrey(old_mark_bit)) {
- Marking::GreyToWhite(old_mark_bit);
- heap->incremental_marking()->WhiteToGreyAndPush(
- HeapObject::FromAddress(new_start), new_mark_bit);
- heap->incremental_marking()->RestartIfNotMarking();
+bool MarkCompactCollector::Sweeper::IsSweepingCompleted() {
+ while (pending_sweeper_tasks_semaphore_.WaitFor(
+ base::TimeDelta::FromSeconds(0))) {
+ num_sweeping_tasks_.Increment(-1);
}
-
-#ifdef DEBUG
- ObjectColor new_color = Color(new_mark_bit);
- DCHECK(new_color == old_color);
-#endif
+ return num_sweeping_tasks_.Value() == 0;
}
-
const char* AllocationSpaceName(AllocationSpace space) {
switch (space) {
case NEW_SPACE:
@@ -627,9 +597,12 @@ const char* AllocationSpaceName(AllocationSpace space) {
void MarkCompactCollector::ComputeEvacuationHeuristics(
int area_size, int* target_fragmentation_percent,
int* max_evacuated_bytes) {
- // For memory reducing mode we directly define both constants.
+ // For memory reducing and optimize for memory mode we directly define both
+ // constants.
const int kTargetFragmentationPercentForReduceMemory = 20;
const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize;
+ const int kTargetFragmentationPercentForOptimizeMemory = 20;
+ const int kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
// For regular mode (which is latency critical) we define less aggressive
// defaults to start and switch to a trace-based (using compaction speed)
@@ -643,6 +616,10 @@ void MarkCompactCollector::ComputeEvacuationHeuristics(
if (heap()->ShouldReduceMemory()) {
*target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
*max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
+ } else if (heap()->ShouldOptimizeForMemoryUsage()) {
+ *target_fragmentation_percent =
+ kTargetFragmentationPercentForOptimizeMemory;
+ *max_evacuated_bytes = kMaxEvacuatedBytesForOptimizeMemory;
} else {
const double estimated_compaction_speed =
heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
@@ -677,11 +654,8 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
std::vector<LiveBytesPagePair> pages;
pages.reserve(number_of_pages);
- PageIterator it(space);
- while (it.has_next()) {
- Page* p = it.next();
+ for (Page* p : *space) {
if (p->NeverEvacuate()) continue;
- if (p->IsFlagSet(Page::BLACK_PAGE)) continue;
// Invariant: Evacuation candidates are just created when marking is
// started. This means that sweeping has finished. Furthermore, at the end
// of a GC all evacuation candidates are cleared and their slot buffers are
@@ -760,8 +734,8 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
if (FLAG_trace_fragmentation_verbose) {
PrintIsolate(isolate(),
"compaction-selection-page: space=%s free_bytes_page=%d "
- "fragmentation_limit_kb=%d fragmentation_limit_percent=%d "
- "sum_compaction_kb=%d "
+ "fragmentation_limit_kb=%" V8PRIdPTR
+ " fragmentation_limit_percent=%d sum_compaction_kb=%d "
"compaction_limit_kb=%d\n",
AllocationSpaceName(space->identity()), free_bytes / KB,
free_bytes_threshold / KB, target_fragmentation_percent,
@@ -822,7 +796,7 @@ void MarkCompactCollector::Prepare() {
// If concurrent unmapping tasks are still running, we should wait for
// them here.
- heap()->WaitUntilUnmappingOfFreeChunksCompleted();
+ heap()->memory_allocator()->unmapper()->WaitUntilCompleted();
// Clear marking bits if incremental marking is aborted.
if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
@@ -832,9 +806,23 @@ void MarkCompactCollector::Prepare() {
AbortWeakCells();
AbortTransitionArrays();
AbortCompaction();
+ if (heap_->UsingEmbedderHeapTracer()) {
+ heap_->mark_compact_collector()->embedder_heap_tracer()->AbortTracing();
+ }
was_marked_incrementally_ = false;
}
+ if (!was_marked_incrementally_) {
+ if (heap_->UsingEmbedderHeapTracer()) {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_PROLOGUE);
+ heap_->mark_compact_collector()->embedder_heap_tracer()->TracePrologue();
+ }
+ }
+
+ if (UsingEmbedderHeapTracer()) {
+ embedder_heap_tracer()->EnterFinalPause();
+ }
+
// Don't start compaction if we are in the middle of incremental
// marking cycle. We did not collect any slots.
if (!FLAG_never_compact && !was_marked_incrementally_) {
@@ -846,6 +834,7 @@ void MarkCompactCollector::Prepare() {
space = spaces.next()) {
space->PrepareForMarkCompact();
}
+ heap()->account_external_memory_concurrently_freed();
#ifdef VERIFY_HEAP
if (!was_marked_incrementally_ && FLAG_verify_heap) {
@@ -858,6 +847,12 @@ void MarkCompactCollector::Prepare() {
void MarkCompactCollector::Finish() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
+ if (sweeper().contains_late_pages() && FLAG_concurrent_sweeping) {
+ // If we added some more pages during MC, we need to start at least one
+ // more task as all other tasks might already be finished.
+ sweeper().StartSweepingHelper(OLD_SPACE);
+ }
+
// The hashing of weak_object_to_code_table is no longer valid.
heap()->weak_object_to_code_table()->Rehash(
heap()->isolate()->factory()->undefined_value());
@@ -871,11 +866,12 @@ void MarkCompactCollector::Finish() {
#endif
heap_->isolate()->inner_pointer_to_code_cache()->Flush();
- // The stub cache is not traversed during GC; clear the cache to
- // force lazy re-initialization of it. This must be done after the
+ // The stub caches are not traversed during GC; clear them to force
+ // their lazy re-initialization. This must be done after the
// GC, because it relies on the new address of certain old space
// objects (empty string, illegal builtin).
- isolate()->stub_cache()->Clear();
+ isolate()->load_stub_cache()->Clear();
+ isolate()->store_stub_cache()->Clear();
if (have_code_to_deoptimize_) {
// Some code objects were marked for deoptimization during the GC.
@@ -935,7 +931,7 @@ void CodeFlusher::ProcessJSFunctionCandidates() {
SharedFunctionInfo* shared = candidate->shared();
Code* code = shared->code();
- MarkBit code_mark = Marking::MarkBitFrom(code);
+ MarkBit code_mark = ObjectMarking::MarkBitFrom(code);
if (Marking::IsWhite(code_mark)) {
if (FLAG_trace_code_flushing && shared->is_compiled()) {
PrintF("[code-flushing clears: ");
@@ -982,7 +978,7 @@ void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
ClearNextCandidate(candidate);
Code* code = candidate->code();
- MarkBit code_mark = Marking::MarkBitFrom(code);
+ MarkBit code_mark = ObjectMarking::MarkBitFrom(code);
if (Marking::IsWhite(code_mark)) {
if (FLAG_trace_code_flushing && candidate->is_compiled()) {
PrintF("[code-flushing clears: ");
@@ -1042,7 +1038,7 @@ void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
void CodeFlusher::EvictCandidate(JSFunction* function) {
- DCHECK(!function->next_function_link()->IsUndefined());
+ DCHECK(!function->next_function_link()->IsUndefined(isolate_));
Object* undefined = isolate_->heap()->undefined_value();
// Make sure previous flushing decisions are revisited.
@@ -1119,14 +1115,14 @@ class MarkCompactMarkingVisitor
// Marks the object black and pushes it on the marking stack.
INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
- MarkBit mark = Marking::MarkBitFrom(object);
+ MarkBit mark = ObjectMarking::MarkBitFrom(object);
heap->mark_compact_collector()->MarkObject(object, mark);
}
// Marks the object black without pushing it on the marking stack.
// Returns true if object needed marking and false otherwise.
INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
- MarkBit mark_bit = Marking::MarkBitFrom(object);
+ MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
if (Marking::IsWhite(mark_bit)) {
heap->mark_compact_collector()->SetMark(object, mark_bit);
return true;
@@ -1140,7 +1136,7 @@ class MarkCompactMarkingVisitor
if (!(*p)->IsHeapObject()) return;
HeapObject* target_object = HeapObject::cast(*p);
collector->RecordSlot(object, p, target_object);
- MarkBit mark = Marking::MarkBitFrom(target_object);
+ MarkBit mark = ObjectMarking::MarkBitFrom(target_object);
collector->MarkObject(target_object, mark);
}
@@ -1154,10 +1150,10 @@ class MarkCompactMarkingVisitor
#endif
Map* map = obj->map();
Heap* heap = obj->GetHeap();
- MarkBit mark = Marking::MarkBitFrom(obj);
+ MarkBit mark = ObjectMarking::MarkBitFrom(obj);
heap->mark_compact_collector()->SetMark(obj, mark);
// Mark the map pointer and the body.
- MarkBit map_mark = Marking::MarkBitFrom(map);
+ MarkBit map_mark = ObjectMarking::MarkBitFrom(map);
heap->mark_compact_collector()->MarkObject(map, map_mark);
IterateBody(map, obj);
}
@@ -1177,7 +1173,7 @@ class MarkCompactMarkingVisitor
if (!o->IsHeapObject()) continue;
collector->RecordSlot(object, p, o);
HeapObject* obj = HeapObject::cast(o);
- MarkBit mark = Marking::MarkBitFrom(obj);
+ MarkBit mark = ObjectMarking::MarkBitFrom(obj);
if (Marking::IsBlackOrGrey(mark)) continue;
VisitUnmarkedObject(collector, obj);
}
@@ -1211,7 +1207,7 @@ class MarkCompactMarkingVisitor
// was marked through the compilation cache before marker reached JSRegExp
// object.
FixedArray* data = FixedArray::cast(re->data());
- if (Marking::IsBlackOrGrey(Marking::MarkBitFrom(data))) {
+ if (Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(data))) {
Object** slot =
data->data_start() + JSRegExp::saved_code_index(is_one_byte);
heap->mark_compact_collector()->RecordSlot(data, slot, code);
@@ -1265,10 +1261,6 @@ void MarkCompactMarkingVisitor::Initialize() {
StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
-
- if (FLAG_track_gc_object_stats) {
- ObjectStatsVisitor::Initialize(&table_);
- }
}
@@ -1299,8 +1291,8 @@ class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
Object* obj = *slot;
if (obj->IsSharedFunctionInfo()) {
SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
- MarkBit shared_mark = Marking::MarkBitFrom(shared);
- MarkBit code_mark = Marking::MarkBitFrom(shared->code());
+ MarkBit shared_mark = ObjectMarking::MarkBitFrom(shared);
+ MarkBit code_mark = ObjectMarking::MarkBitFrom(shared->code());
collector_->MarkObject(shared->code(), code_mark);
collector_->MarkObject(shared, shared_mark);
}
@@ -1320,11 +1312,11 @@ void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
// actual optimized code object.
StackFrame* frame = it.frame();
Code* code = frame->unchecked_code();
- MarkBit code_mark = Marking::MarkBitFrom(code);
+ MarkBit code_mark = ObjectMarking::MarkBitFrom(code);
MarkObject(code, code_mark);
if (frame->is_optimized()) {
Code* optimized_code = frame->LookupCode();
- MarkBit optimized_code_mark = Marking::MarkBitFrom(optimized_code);
+ MarkBit optimized_code_mark = ObjectMarking::MarkBitFrom(optimized_code);
MarkObject(optimized_code, optimized_code_mark);
}
}
@@ -1374,9 +1366,9 @@ class RootMarkingVisitor : public ObjectVisitor {
void MarkObjectByPointer(Object** p) {
if (!(*p)->IsHeapObject()) return;
- // Replace flat cons strings in place.
HeapObject* object = HeapObject::cast(*p);
- MarkBit mark_bit = Marking::MarkBitFrom(object);
+
+ MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
if (Marking::IsBlackOrGrey(mark_bit)) return;
Map* map = object->map();
@@ -1384,7 +1376,7 @@ class RootMarkingVisitor : public ObjectVisitor {
collector_->SetMark(object, mark_bit);
// Mark the map pointer and body, and push them on the marking stack.
- MarkBit map_mark = Marking::MarkBitFrom(map);
+ MarkBit map_mark = ObjectMarking::MarkBitFrom(map);
collector_->MarkObject(map, map_mark);
MarkCompactMarkingVisitor::IterateBody(map, object);
@@ -1412,7 +1404,7 @@ class StringTableCleaner : public ObjectVisitor {
for (Object** p = start; p < end; p++) {
Object* o = *p;
if (o->IsHeapObject()) {
- if (Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(o)))) {
+ if (Marking::IsWhite(ObjectMarking::MarkBitFrom(HeapObject::cast(o)))) {
if (finalize_external_strings) {
DCHECK(o->IsExternalString());
heap_->FinalizeExternalString(String::cast(*p));
@@ -1449,7 +1441,7 @@ typedef StringTableCleaner<true, false> ExternalStringTableCleaner;
class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
public:
virtual Object* RetainAs(Object* object) {
- MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(object));
+ MarkBit mark_bit = ObjectMarking::MarkBitFrom(HeapObject::cast(object));
DCHECK(!Marking::IsGrey(mark_bit));
if (Marking::IsBlack(mark_bit)) {
return object;
@@ -1479,7 +1471,7 @@ void MarkCompactCollector::DiscoverGreyObjectsWithIterator(T* it) {
Map* filler_map = heap()->one_pointer_filler_map();
for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
- MarkBit markbit = Marking::MarkBitFrom(object);
+ MarkBit markbit = ObjectMarking::MarkBitFrom(object);
if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
Marking::GreyToBlack(markbit);
PushBlack(object);
@@ -1493,7 +1485,7 @@ void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
LiveObjectIterator<kGreyObjects> it(p);
HeapObject* object = NULL;
while ((object = it.Next()) != NULL) {
- MarkBit markbit = Marking::MarkBitFrom(object);
+ MarkBit markbit = ObjectMarking::MarkBitFrom(object);
DCHECK(Marking::IsGrey(markbit));
Marking::GreyToBlack(markbit);
PushBlack(object);
@@ -1503,6 +1495,9 @@ void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
class RecordMigratedSlotVisitor final : public ObjectVisitor {
public:
+ explicit RecordMigratedSlotVisitor(MarkCompactCollector* collector)
+ : collector_(collector) {}
+
inline void VisitPointer(Object** p) final {
RecordMigratedSlot(*p, reinterpret_cast<Address>(p));
}
@@ -1518,10 +1513,69 @@ class RecordMigratedSlotVisitor final : public ObjectVisitor {
Address code_entry = Memory::Address_at(code_entry_slot);
if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(code_entry_slot),
- CODE_ENTRY_SLOT, code_entry_slot);
+ nullptr, CODE_ENTRY_SLOT,
+ code_entry_slot);
}
}
+ inline void VisitCodeTarget(RelocInfo* rinfo) final {
+ DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
+ Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ Code* host = rinfo->host();
+ // The target is always in old space, we don't have to record the slot in
+ // the old-to-new remembered set.
+ DCHECK(!collector_->heap()->InNewSpace(target));
+ collector_->RecordRelocSlot(host, rinfo, target);
+ }
+
+ inline void VisitDebugTarget(RelocInfo* rinfo) final {
+ DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+ rinfo->IsPatchedDebugBreakSlotSequence());
+ Code* target = Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
+ Code* host = rinfo->host();
+ // The target is always in old space, we don't have to record the slot in
+ // the old-to-new remembered set.
+ DCHECK(!collector_->heap()->InNewSpace(target));
+ collector_->RecordRelocSlot(host, rinfo, target);
+ }
+
+ inline void VisitEmbeddedPointer(RelocInfo* rinfo) final {
+ DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+ HeapObject* object = HeapObject::cast(rinfo->target_object());
+ Code* host = rinfo->host();
+ collector_->heap()->RecordWriteIntoCode(host, rinfo, object);
+ collector_->RecordRelocSlot(host, rinfo, object);
+ }
+
+ inline void VisitCell(RelocInfo* rinfo) final {
+ DCHECK(rinfo->rmode() == RelocInfo::CELL);
+ Cell* cell = rinfo->target_cell();
+ Code* host = rinfo->host();
+ // The cell is always in old space, we don't have to record the slot in
+ // the old-to-new remembered set.
+ DCHECK(!collector_->heap()->InNewSpace(cell));
+ collector_->RecordRelocSlot(host, rinfo, cell);
+ }
+
+ // Entries that will never move.
+ inline void VisitCodeAgeSequence(RelocInfo* rinfo) final {
+ DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
+ Code* stub = rinfo->code_age_stub();
+ USE(stub);
+ DCHECK(!Page::FromAddress(stub->address())->IsEvacuationCandidate());
+ }
+
+ // Entries that are skipped for recording.
+ inline void VisitExternalReference(RelocInfo* rinfo) final {}
+ inline void VisitExternalReference(Address* p) final {}
+ inline void VisitRuntimeEntry(RelocInfo* rinfo) final {}
+ inline void VisitExternalOneByteString(
+ v8::String::ExternalOneByteStringResource** resource) final {}
+ inline void VisitExternalTwoByteString(
+ v8::String::ExternalStringResource** resource) final {}
+ inline void VisitInternalReference(RelocInfo* rinfo) final {}
+ inline void VisitEmbedderReference(Object** p, uint16_t class_id) final {}
+
private:
inline void RecordMigratedSlot(Object* value, Address slot) {
if (value->IsHeapObject()) {
@@ -1533,6 +1587,8 @@ class RecordMigratedSlotVisitor final : public ObjectVisitor {
}
}
}
+
+ MarkCompactCollector* collector_;
};
class MarkCompactCollector::HeapObjectVisitor {
@@ -1550,12 +1606,15 @@ class MarkCompactCollector::EvacuateVisitorBase
: heap_(heap),
compaction_spaces_(compaction_spaces),
profiling_(
- heap->isolate()->cpu_profiler()->is_profiling() ||
+ heap->isolate()->is_profiling() ||
heap->isolate()->logger()->is_logging_code_events() ||
heap->isolate()->heap_profiler()->is_tracking_object_moves()) {}
inline bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
HeapObject** target_object) {
+#ifdef VERIFY_HEAP
+ if (AbortCompactionForTesting(object)) return false;
+#endif // VERIFY_HEAP
int size = object->Size();
AllocationAlignment alignment = object->RequiredAlignment();
AllocationResult allocation = target_space->AllocateRaw(size, alignment);
@@ -1590,7 +1649,7 @@ class MarkCompactCollector::EvacuateVisitorBase
PROFILE(heap_->isolate(),
CodeMoveEvent(AbstractCode::cast(src), dst_addr));
}
- RecordMigratedSlotVisitor visitor;
+ RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
dst->IterateBodyFast(dst->map()->instance_type(), size, &visitor);
} else if (dest == CODE_SPACE) {
DCHECK_CODEOBJECT_SIZE(size, heap_->code_space());
@@ -1599,9 +1658,9 @@ class MarkCompactCollector::EvacuateVisitorBase
CodeMoveEvent(AbstractCode::cast(src), dst_addr));
}
heap_->CopyBlock(dst_addr, src_addr, size);
- RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(dst_addr),
- RELOCATED_CODE_OBJECT, dst_addr);
Code::cast(dst)->Relocate(dst_addr - src_addr);
+ RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
+ dst->IterateBodyFast(dst->map()->instance_type(), size, &visitor);
} else {
DCHECK_OBJECT_SIZE(size);
DCHECK(dest == NEW_SPACE);
@@ -1613,6 +1672,26 @@ class MarkCompactCollector::EvacuateVisitorBase
Memory::Address_at(src_addr) = dst_addr;
}
+#ifdef VERIFY_HEAP
+ bool AbortCompactionForTesting(HeapObject* object) {
+ if (FLAG_stress_compaction) {
+ const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
+ Page::kPageAlignmentMask & ~kPointerAlignmentMask;
+ if ((reinterpret_cast<uintptr_t>(object->address()) &
+ Page::kPageAlignmentMask) == mask) {
+ Page* page = Page::FromAddress(object->address());
+ if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
+ page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
+ } else {
+ page->SetFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+#endif // VERIFY_HEAP
+
Heap* heap_;
CompactionSpaceCollection* compaction_spaces_;
bool profiling_;
@@ -1626,7 +1705,7 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final
explicit EvacuateNewSpaceVisitor(Heap* heap,
CompactionSpaceCollection* compaction_spaces,
- HashMap* local_pretenuring_feedback)
+ base::HashMap* local_pretenuring_feedback)
: EvacuateVisitorBase(heap, compaction_spaces),
buffer_(LocalAllocationBuffer::InvalidBuffer()),
space_to_allocate_(NEW_SPACE),
@@ -1634,28 +1713,20 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final
semispace_copied_size_(0),
local_pretenuring_feedback_(local_pretenuring_feedback) {}
- bool Visit(HeapObject* object) override {
+ inline bool Visit(HeapObject* object) override {
heap_->UpdateAllocationSite<Heap::kCached>(object,
local_pretenuring_feedback_);
int size = object->Size();
HeapObject* target_object = nullptr;
- if (heap_->ShouldBePromoted(object->address(), size) &&
+ if (heap_->ShouldBePromoted<DEFAULT_PROMOTION>(object->address(), size) &&
TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object,
&target_object)) {
- // If we end up needing more special cases, we should factor this out.
- if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) {
- heap_->array_buffer_tracker()->Promote(
- JSArrayBuffer::cast(target_object));
- }
promoted_size_ += size;
return true;
}
HeapObject* target = nullptr;
AllocationSpace space = AllocateTargetObject(object, &target);
MigrateObject(HeapObject::cast(target), object, size, space);
- if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
- heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
- }
semispace_copied_size_ += size;
return true;
}
@@ -1674,6 +1745,7 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final
const int size = old_object->Size();
AllocationAlignment alignment = old_object->RequiredAlignment();
AllocationResult allocation;
+ AllocationSpace space_allocated_in = space_to_allocate_;
if (space_to_allocate_ == NEW_SPACE) {
if (size > kMaxLabObjectSize) {
allocation =
@@ -1684,11 +1756,12 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final
}
if (allocation.IsRetry() || (space_to_allocate_ == OLD_SPACE)) {
allocation = AllocateInOldSpace(size, alignment);
+ space_allocated_in = OLD_SPACE;
}
bool ok = allocation.To(target_object);
DCHECK(ok);
USE(ok);
- return space_to_allocate_;
+ return space_allocated_in;
}
inline bool NewLocalAllocationBuffer() {
@@ -1763,9 +1836,45 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final
AllocationSpace space_to_allocate_;
intptr_t promoted_size_;
intptr_t semispace_copied_size_;
- HashMap* local_pretenuring_feedback_;
+ base::HashMap* local_pretenuring_feedback_;
};
+class MarkCompactCollector::EvacuateNewSpacePageVisitor final
+ : public MarkCompactCollector::HeapObjectVisitor {
+ public:
+ explicit EvacuateNewSpacePageVisitor(Heap* heap)
+ : heap_(heap), promoted_size_(0), semispace_copied_size_(0) {}
+
+ static void MoveToOldSpace(Page* page, PagedSpace* owner) {
+ page->Unlink();
+ Page* new_page = Page::ConvertNewToOld(page, owner);
+ new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
+ }
+
+ static void MoveToToSpace(Page* page) {
+ page->heap()->new_space()->MovePageFromSpaceToSpace(page);
+ page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
+ }
+
+ inline bool Visit(HeapObject* object) {
+ RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
+ object->IterateBodyFast(&visitor);
+ promoted_size_ += object->Size();
+ return true;
+ }
+
+ intptr_t promoted_size() { return promoted_size_; }
+ intptr_t semispace_copied_size() { return semispace_copied_size_; }
+
+ void account_semispace_copied(intptr_t copied) {
+ semispace_copied_size_ += copied;
+ }
+
+ private:
+ Heap* heap_;
+ intptr_t promoted_size_;
+ intptr_t semispace_copied_size_;
+};
class MarkCompactCollector::EvacuateOldSpaceVisitor final
: public MarkCompactCollector::EvacuateVisitorBase {
@@ -1774,7 +1883,7 @@ class MarkCompactCollector::EvacuateOldSpaceVisitor final
CompactionSpaceCollection* compaction_spaces)
: EvacuateVisitorBase(heap, compaction_spaces) {}
- bool Visit(HeapObject* object) override {
+ inline bool Visit(HeapObject* object) override {
CompactionSpace* target_space = compaction_spaces_->Get(
Page::FromAddress(object->address())->owner()->identity());
HeapObject* target_object = nullptr;
@@ -1786,14 +1895,24 @@ class MarkCompactCollector::EvacuateOldSpaceVisitor final
}
};
+class MarkCompactCollector::EvacuateRecordOnlyVisitor final
+ : public MarkCompactCollector::HeapObjectVisitor {
+ public:
+ explicit EvacuateRecordOnlyVisitor(Heap* heap) : heap_(heap) {}
+
+ inline bool Visit(HeapObject* object) {
+ RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
+ object->IterateBody(&visitor);
+ return true;
+ }
+
+ private:
+ Heap* heap_;
+};
void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
- PageIterator it(space);
- while (it.has_next()) {
- Page* p = it.next();
- if (!p->IsFlagSet(Page::BLACK_PAGE)) {
- DiscoverGreyObjectsOnPage(p);
- }
+ for (Page* p : *space) {
+ DiscoverGreyObjectsOnPage(p);
if (marking_deque()->IsFull()) return;
}
}
@@ -1801,9 +1920,7 @@ void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
NewSpace* space = heap()->new_space();
- NewSpacePageIterator it(space->bottom(), space->top());
- while (it.has_next()) {
- NewSpacePage* page = it.next();
+ for (Page* page : NewSpacePageRange(space->bottom(), space->top())) {
DiscoverGreyObjectsOnPage(page);
if (marking_deque()->IsFull()) return;
}
@@ -1814,7 +1931,7 @@ bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
Object* o = *p;
if (!o->IsHeapObject()) return false;
HeapObject* heap_object = HeapObject::cast(o);
- MarkBit mark = Marking::MarkBitFrom(heap_object);
+ MarkBit mark = ObjectMarking::MarkBitFrom(heap_object);
return Marking::IsWhite(mark);
}
@@ -1824,7 +1941,7 @@ bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
Object* o = *p;
DCHECK(o->IsHeapObject());
HeapObject* heap_object = HeapObject::cast(o);
- MarkBit mark = Marking::MarkBitFrom(heap_object);
+ MarkBit mark = ObjectMarking::MarkBitFrom(heap_object);
return Marking::IsWhite(mark);
}
@@ -1832,7 +1949,7 @@ bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
StringTable* string_table = heap()->string_table();
// Mark the string table itself.
- MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
+ MarkBit string_table_mark = ObjectMarking::MarkBitFrom(string_table);
if (Marking::IsWhite(string_table_mark)) {
// String table could have already been marked by visiting the handles list.
SetMark(string_table, string_table_mark);
@@ -1844,7 +1961,7 @@ void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
- MarkBit mark_bit = Marking::MarkBitFrom(site);
+ MarkBit mark_bit = ObjectMarking::MarkBitFrom(site);
SetMark(site, mark_bit);
}
@@ -1901,19 +2018,16 @@ void MarkCompactCollector::MarkImplicitRefGroups(
// After: the marking stack is empty, and all objects reachable from the
// marking stack have been marked, or are overflowed in the heap.
void MarkCompactCollector::EmptyMarkingDeque() {
- Map* filler_map = heap_->one_pointer_filler_map();
while (!marking_deque_.IsEmpty()) {
HeapObject* object = marking_deque_.Pop();
- // Explicitly skip one word fillers. Incremental markbit patterns are
- // correct only for objects that occupy at least two words.
- Map* map = object->map();
- if (map == filler_map) continue;
+ DCHECK(!object->IsFiller());
DCHECK(object->IsHeapObject());
DCHECK(heap()->Contains(object));
- DCHECK(!Marking::IsWhite(Marking::MarkBitFrom(object)));
+ DCHECK(!Marking::IsWhite(ObjectMarking::MarkBitFrom(object)));
- MarkBit map_mark = Marking::MarkBitFrom(map);
+ Map* map = object->map();
+ MarkBit map_mark = ObjectMarking::MarkBitFrom(map);
MarkObject(map, map_mark);
MarkCompactMarkingVisitor::IterateBody(map, object);
@@ -1962,15 +2076,22 @@ void MarkCompactCollector::ProcessMarkingDeque() {
}
}
-
// Mark all objects reachable (transitively) from objects on the marking
// stack including references only considered in the atomic marking pause.
void MarkCompactCollector::ProcessEphemeralMarking(
ObjectVisitor* visitor, bool only_process_harmony_weak_collections) {
- bool work_to_do = true;
DCHECK(marking_deque_.IsEmpty() && !marking_deque_.overflowed());
+ bool work_to_do = true;
while (work_to_do) {
+ if (UsingEmbedderHeapTracer()) {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING);
+ RegisterWrappersWithEmbedderHeapTracer();
+ embedder_heap_tracer()->AdvanceTracing(
+ 0, EmbedderHeapTracer::AdvanceTracingActions(
+ EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
+ }
if (!only_process_harmony_weak_collections) {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_OBJECT_GROUPING);
isolate()->global_handles()->IterateObjectGroups(
visitor, &IsUnmarkedHeapObjectWithHeap);
MarkImplicitRefGroups(&MarkCompactMarkingVisitor::MarkObject);
@@ -1981,7 +2102,6 @@ void MarkCompactCollector::ProcessEphemeralMarking(
}
}
-
void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
!it.done(); it.Advance()) {
@@ -2080,6 +2200,85 @@ void MarkingDeque::Uninitialize(bool aborting) {
in_use_ = false;
}
+void MarkCompactCollector::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
+ DCHECK_NOT_NULL(tracer);
+ CHECK_NULL(embedder_heap_tracer_);
+ embedder_heap_tracer_ = tracer;
+}
+
+void MarkCompactCollector::RegisterWrappersWithEmbedderHeapTracer() {
+ DCHECK(UsingEmbedderHeapTracer());
+ if (wrappers_to_trace_.empty()) {
+ return;
+ }
+ embedder_heap_tracer()->RegisterV8References(wrappers_to_trace_);
+ wrappers_to_trace_.clear();
+}
+
+void MarkCompactCollector::TracePossibleWrapper(JSObject* js_object) {
+ DCHECK(js_object->WasConstructedFromApiFunction());
+ if (js_object->GetInternalFieldCount() >= 2 &&
+ js_object->GetInternalField(0) &&
+ js_object->GetInternalField(0) != heap_->undefined_value() &&
+ js_object->GetInternalField(1) != heap_->undefined_value()) {
+ DCHECK(reinterpret_cast<intptr_t>(js_object->GetInternalField(0)) % 2 == 0);
+ wrappers_to_trace_.push_back(std::pair<void*, void*>(
+ reinterpret_cast<void*>(js_object->GetInternalField(0)),
+ reinterpret_cast<void*>(js_object->GetInternalField(1))));
+ }
+}
+
+class MarkCompactCollector::ObjectStatsVisitor
+ : public MarkCompactCollector::HeapObjectVisitor {
+ public:
+ ObjectStatsVisitor(Heap* heap, ObjectStats* live_stats,
+ ObjectStats* dead_stats)
+ : live_collector_(heap, live_stats), dead_collector_(heap, dead_stats) {
+ DCHECK_NOT_NULL(live_stats);
+ DCHECK_NOT_NULL(dead_stats);
+ // Global objects are roots and thus recorded as live.
+ live_collector_.CollectGlobalStatistics();
+ }
+
+ bool Visit(HeapObject* obj) override {
+ if (Marking::IsBlack(ObjectMarking::MarkBitFrom(obj))) {
+ live_collector_.CollectStatistics(obj);
+ } else {
+ DCHECK(!Marking::IsGrey(ObjectMarking::MarkBitFrom(obj)));
+ dead_collector_.CollectStatistics(obj);
+ }
+ return true;
+ }
+
+ private:
+ ObjectStatsCollector live_collector_;
+ ObjectStatsCollector dead_collector_;
+};
+
+void MarkCompactCollector::VisitAllObjects(HeapObjectVisitor* visitor) {
+ SpaceIterator space_it(heap());
+ HeapObject* obj = nullptr;
+ while (space_it.has_next()) {
+ ObjectIterator* it = space_it.next();
+ while ((obj = it->Next()) != nullptr) {
+ visitor->Visit(obj);
+ }
+ }
+}
+
+void MarkCompactCollector::RecordObjectStats() {
+ if (FLAG_track_gc_object_stats) {
+ ObjectStatsVisitor visitor(heap(), heap()->live_object_stats_,
+ heap()->dead_object_stats_);
+ VisitAllObjects(&visitor);
+ if (FLAG_trace_gc_object_stats) {
+ heap()->live_object_stats_->PrintJSON("live");
+ heap()->dead_object_stats_->PrintJSON("dead");
+ }
+ heap()->live_object_stats_->CheckpointObjectStats();
+ heap()->dead_object_stats_->ClearObjectStats();
+ }
+}
void MarkCompactCollector::MarkLiveObjects() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
@@ -2137,7 +2336,6 @@ void MarkCompactCollector::MarkLiveObjects() {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL);
ProcessEphemeralMarking(&root_visitor, false);
- ProcessMarkingDeque();
}
// The objects reachable from the roots, weak maps or object groups
@@ -2171,7 +2369,10 @@ void MarkCompactCollector::MarkLiveObjects() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
ProcessEphemeralMarking(&root_visitor, true);
- ProcessMarkingDeque();
+ if (UsingEmbedderHeapTracer()) {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_EPILOGUE);
+ embedder_heap_tracer()->TraceEpilogue();
+ }
}
}
@@ -2179,12 +2380,6 @@ void MarkCompactCollector::MarkLiveObjects() {
heap_->tracer()->AddMarkingTime(heap_->MonotonicallyIncreasingTimeInMs() -
start_time);
}
- if (FLAG_track_gc_object_stats) {
- if (FLAG_trace_gc_object_stats) {
- heap()->object_stats_->TraceObjectStats();
- }
- heap()->object_stats_->CheckpointObjectStats();
- }
}
@@ -2258,12 +2453,41 @@ void MarkCompactCollector::MarkDependentCodeForDeoptimization(
current = current->next_link();
}
+ {
+ ArrayList* list = heap_->weak_new_space_object_to_code_list();
+ int counter = 0;
+ for (int i = 0; i < list->Length(); i += 2) {
+ WeakCell* obj = WeakCell::cast(list->Get(i));
+ WeakCell* dep = WeakCell::cast(list->Get(i + 1));
+ if (obj->cleared() || dep->cleared()) {
+ if (!dep->cleared()) {
+ Code* code = Code::cast(dep->value());
+ if (!code->marked_for_deoptimization()) {
+ DependentCode::SetMarkedForDeoptimization(
+ code, DependentCode::DependencyGroup::kWeakCodeGroup);
+ code->InvalidateEmbeddedObjects();
+ have_code_to_deoptimize_ = true;
+ }
+ }
+ } else {
+ // We record the slot manually because marking is finished at this
+ // point and the write barrier would bailout.
+ list->Set(counter, obj, SKIP_WRITE_BARRIER);
+ RecordSlot(list, list->Slot(counter), obj);
+ counter++;
+ list->Set(counter, dep, SKIP_WRITE_BARRIER);
+ RecordSlot(list, list->Slot(counter), dep);
+ counter++;
+ }
+ }
+ }
+
WeakHashTable* table = heap_->weak_object_to_code_table();
uint32_t capacity = table->Capacity();
for (uint32_t i = 0; i < capacity; i++) {
uint32_t key_index = table->EntryToIndex(i);
Object* key = table->get(key_index);
- if (!table->IsKey(key)) continue;
+ if (!table->IsKey(isolate, key)) continue;
uint32_t value_index = table->EntryToValueIndex(i);
Object* value = table->get(value_index);
DCHECK(key->IsWeakCell());
@@ -2286,11 +2510,11 @@ void MarkCompactCollector::ClearSimpleMapTransitions(
while (weak_cell_obj != Smi::FromInt(0)) {
WeakCell* weak_cell = WeakCell::cast(weak_cell_obj);
Map* map = Map::cast(weak_cell->value());
- DCHECK(Marking::IsWhite(Marking::MarkBitFrom(map)));
+ DCHECK(Marking::IsWhite(ObjectMarking::MarkBitFrom(map)));
Object* potential_parent = map->constructor_or_backpointer();
if (potential_parent->IsMap()) {
Map* parent = Map::cast(potential_parent);
- if (Marking::IsBlackOrGrey(Marking::MarkBitFrom(parent)) &&
+ if (Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(parent)) &&
parent->raw_transitions() == weak_cell) {
ClearSimpleMapTransition(parent, map);
}
@@ -2330,7 +2554,7 @@ void MarkCompactCollector::ClearFullMapTransitions() {
Map* map = array->GetTarget(0);
Map* parent = Map::cast(map->constructor_or_backpointer());
bool parent_is_alive =
- Marking::IsBlackOrGrey(Marking::MarkBitFrom(parent));
+ Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(parent));
DescriptorArray* descriptors =
parent_is_alive ? parent->instance_descriptors() : nullptr;
bool descriptors_owner_died =
@@ -2355,7 +2579,7 @@ bool MarkCompactCollector::CompactTransitionArray(
for (int i = 0; i < num_transitions; ++i) {
Map* target = transitions->GetTarget(i);
DCHECK_EQ(target->constructor_or_backpointer(), map);
- if (Marking::IsWhite(Marking::MarkBitFrom(target))) {
+ if (Marking::IsWhite(ObjectMarking::MarkBitFrom(target))) {
if (descriptors != nullptr &&
target->instance_descriptors() == descriptors) {
descriptors_owner_died = true;
@@ -2532,7 +2756,7 @@ void MarkCompactCollector::ClearWeakCells(Object** non_live_map_list,
if (cell_value->IsHeapObject() &&
MarkCompactCollector::IsMarked(HeapObject::cast(cell_value))) {
// Resurrect the cell.
- MarkBit mark = Marking::MarkBitFrom(value);
+ MarkBit mark = ObjectMarking::MarkBitFrom(value);
SetMark(value, mark);
Object** slot = HeapObject::RawField(value, Cell::kValueOffset);
RecordSlot(value, slot, *slot);
@@ -2602,30 +2826,16 @@ void MarkCompactCollector::AbortTransitionArrays() {
heap()->set_encountered_transition_arrays(Smi::FromInt(0));
}
-static inline SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
- if (RelocInfo::IsCodeTarget(rmode)) {
- return CODE_TARGET_SLOT;
- } else if (RelocInfo::IsCell(rmode)) {
- return CELL_TARGET_SLOT;
- } else if (RelocInfo::IsEmbeddedObject(rmode)) {
- return EMBEDDED_OBJECT_SLOT;
- } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
- return DEBUG_TARGET_SLOT;
- }
- UNREACHABLE();
- return NUMBER_OF_SLOT_TYPES;
-}
-
void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
Object* target) {
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
- RelocInfo::Mode rmode = rinfo->rmode();
if (target_page->IsEvacuationCandidate() &&
(rinfo->host() == NULL ||
!ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
+ RelocInfo::Mode rmode = rinfo->rmode();
Address addr = rinfo->pc();
- SlotType slot_type = SlotTypeForRMode(rmode);
+ SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
if (rinfo->IsInConstantPool()) {
addr = rinfo->constant_pool_entry_address();
if (RelocInfo::IsCodeTarget(rmode)) {
@@ -2635,145 +2845,64 @@ void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
slot_type = OBJECT_SLOT;
}
}
- RememberedSet<OLD_TO_OLD>::InsertTyped(source_page, slot_type, addr);
+ RememberedSet<OLD_TO_OLD>::InsertTyped(
+ source_page, reinterpret_cast<Address>(host), slot_type, addr);
}
}
-static inline void UpdateTypedSlot(Isolate* isolate, ObjectVisitor* v,
- SlotType slot_type, Address addr) {
- switch (slot_type) {
- case CODE_TARGET_SLOT: {
- RelocInfo rinfo(isolate, addr, RelocInfo::CODE_TARGET, 0, NULL);
- rinfo.Visit(isolate, v);
- break;
- }
- case CELL_TARGET_SLOT: {
- RelocInfo rinfo(isolate, addr, RelocInfo::CELL, 0, NULL);
- rinfo.Visit(isolate, v);
- break;
- }
- case CODE_ENTRY_SLOT: {
- v->VisitCodeEntry(addr);
- break;
- }
- case RELOCATED_CODE_OBJECT: {
- HeapObject* obj = HeapObject::FromAddress(addr);
- Code::BodyDescriptor::IterateBody(obj, v);
- break;
- }
- case DEBUG_TARGET_SLOT: {
- RelocInfo rinfo(isolate, addr, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION, 0,
- NULL);
- if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
- break;
- }
- case EMBEDDED_OBJECT_SLOT: {
- RelocInfo rinfo(isolate, addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
- rinfo.Visit(isolate, v);
- break;
- }
- case OBJECT_SLOT: {
- v->VisitPointer(reinterpret_cast<Object**>(addr));
- break;
+static inline SlotCallbackResult UpdateSlot(Object** slot) {
+ Object* obj = reinterpret_cast<Object*>(
+ base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
+
+ if (obj->IsHeapObject()) {
+ HeapObject* heap_obj = HeapObject::cast(obj);
+ MapWord map_word = heap_obj->map_word();
+ if (map_word.IsForwardingAddress()) {
+ DCHECK(heap_obj->GetHeap()->InFromSpace(heap_obj) ||
+ MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
+ Page::FromAddress(heap_obj->address())
+ ->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
+ HeapObject* target = map_word.ToForwardingAddress();
+ base::NoBarrier_CompareAndSwap(
+ reinterpret_cast<base::AtomicWord*>(slot),
+ reinterpret_cast<base::AtomicWord>(obj),
+ reinterpret_cast<base::AtomicWord>(target));
+ DCHECK(!heap_obj->GetHeap()->InFromSpace(target) &&
+ !MarkCompactCollector::IsOnEvacuationCandidate(target));
}
- default:
- UNREACHABLE();
- break;
}
+ return REMOVE_SLOT;
}
-
// Visitor for updating pointers from live objects in old spaces to new space.
// It does not expect to encounter pointers to dead objects.
class PointersUpdatingVisitor : public ObjectVisitor {
public:
- explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {}
-
- void VisitPointer(Object** p) override { UpdatePointer(p); }
+ void VisitPointer(Object** p) override { UpdateSlot(p); }
void VisitPointers(Object** start, Object** end) override {
- for (Object** p = start; p < end; p++) UpdatePointer(p);
+ for (Object** p = start; p < end; p++) UpdateSlot(p);
}
void VisitCell(RelocInfo* rinfo) override {
- DCHECK(rinfo->rmode() == RelocInfo::CELL);
- Object* cell = rinfo->target_cell();
- Object* old_cell = cell;
- VisitPointer(&cell);
- if (cell != old_cell) {
- rinfo->set_target_cell(reinterpret_cast<Cell*>(cell));
- }
+ UpdateTypedSlotHelper::UpdateCell(rinfo, UpdateSlot);
}
void VisitEmbeddedPointer(RelocInfo* rinfo) override {
- DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- Object* target = rinfo->target_object();
- Object* old_target = target;
- VisitPointer(&target);
- // Avoid unnecessary changes that might unnecessary flush the instruction
- // cache.
- if (target != old_target) {
- rinfo->set_target_object(target);
- }
+ UpdateTypedSlotHelper::UpdateEmbeddedPointer(rinfo, UpdateSlot);
}
void VisitCodeTarget(RelocInfo* rinfo) override {
- DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- Object* old_target = target;
- VisitPointer(&target);
- if (target != old_target) {
- rinfo->set_target_address(Code::cast(target)->instruction_start());
- }
+ UpdateTypedSlotHelper::UpdateCodeTarget(rinfo, UpdateSlot);
}
- void VisitCodeAgeSequence(RelocInfo* rinfo) override {
- DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
- Object* stub = rinfo->code_age_stub();
- DCHECK(stub != NULL);
- VisitPointer(&stub);
- if (stub != rinfo->code_age_stub()) {
- rinfo->set_code_age_stub(Code::cast(stub));
- }
+ void VisitCodeEntry(Address entry_address) override {
+ UpdateTypedSlotHelper::UpdateCodeEntry(entry_address, UpdateSlot);
}
void VisitDebugTarget(RelocInfo* rinfo) override {
- DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
- rinfo->IsPatchedDebugBreakSlotSequence());
- Object* target =
- Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
- VisitPointer(&target);
- rinfo->set_debug_call_address(Code::cast(target)->instruction_start());
- }
-
- static inline void UpdateSlot(Heap* heap, Object** slot) {
- Object* obj = reinterpret_cast<Object*>(
- base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
-
- if (!obj->IsHeapObject()) return;
-
- HeapObject* heap_obj = HeapObject::cast(obj);
-
- MapWord map_word = heap_obj->map_word();
- if (map_word.IsForwardingAddress()) {
- DCHECK(heap->InFromSpace(heap_obj) ||
- MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
- Page::FromAddress(heap_obj->address())
- ->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
- HeapObject* target = map_word.ToForwardingAddress();
- base::NoBarrier_CompareAndSwap(
- reinterpret_cast<base::AtomicWord*>(slot),
- reinterpret_cast<base::AtomicWord>(obj),
- reinterpret_cast<base::AtomicWord>(target));
- DCHECK(!heap->InFromSpace(target) &&
- !MarkCompactCollector::IsOnEvacuationCandidate(target));
- }
+ UpdateTypedSlotHelper::UpdateDebugTarget(rinfo, UpdateSlot);
}
-
- private:
- inline void UpdatePointer(Object** p) { UpdateSlot(heap_, p); }
-
- Heap* heap_;
};
static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
@@ -2792,9 +2921,8 @@ bool MarkCompactCollector::IsSlotInBlackObject(MemoryChunk* p, Address slot) {
DCHECK(owner != heap_->lo_space() && owner != nullptr);
USE(owner);
- // If we are on a black page, we cannot find the actual object start
- // easiliy. We just return true but do not set the out_object.
- if (p->IsFlagSet(Page::BLACK_PAGE)) {
+ // We may be part of a black area.
+ if (Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(slot))) {
return true;
}
@@ -2861,8 +2989,13 @@ bool MarkCompactCollector::IsSlotInBlackObject(MemoryChunk* p, Address slot) {
base_address += (cell_index - base_address_cell_index) *
Bitmap::kBitsPerCell * kPointerSize;
Address address = base_address + offset * kPointerSize;
+
+ // If the found mark bit is part of a black area, the slot cannot be part
+ // of a live object since it is not marked.
+ if (p->IsBlackAreaEndMarker(address + kPointerSize)) return false;
+
HeapObject* object = HeapObject::FromAddress(address);
- CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+ CHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
CHECK(object->address() < reinterpret_cast<Address>(slot));
if ((object->address() + kPointerSize) <= slot &&
(object->address() + object->Size()) > slot) {
@@ -2891,61 +3024,71 @@ HeapObject* MarkCompactCollector::FindBlackObjectBySlotSlow(Address slot) {
return nullptr;
}
- if (p->IsFlagSet(Page::BLACK_PAGE)) {
- HeapObjectIterator it(p);
- HeapObject* object = nullptr;
- while ((object = it.Next()) != nullptr) {
- int size = object->Size();
- if (object->address() > slot) return nullptr;
- if (object->address() <= slot && slot < (object->address() + size)) {
- return object;
- }
- }
- } else {
- LiveObjectIterator<kBlackObjects> it(p);
- HeapObject* object = nullptr;
- while ((object = it.Next()) != nullptr) {
- int size = object->Size();
- if (object->address() > slot) return nullptr;
- if (object->address() <= slot && slot < (object->address() + size)) {
- return object;
- }
+ LiveObjectIterator<kBlackObjects> it(p);
+ HeapObject* object = nullptr;
+ while ((object = it.Next()) != nullptr) {
+ int size = object->Size();
+ if (object->address() > slot) return nullptr;
+ if (object->address() <= slot && slot < (object->address() + size)) {
+ return object;
}
}
+
return nullptr;
}
void MarkCompactCollector::EvacuateNewSpacePrologue() {
NewSpace* new_space = heap()->new_space();
- NewSpacePageIterator it(new_space->bottom(), new_space->top());
// Append the list of new space pages to be processed.
- while (it.has_next()) {
- newspace_evacuation_candidates_.Add(it.next());
+ for (Page* p : NewSpacePageRange(new_space->bottom(), new_space->top())) {
+ newspace_evacuation_candidates_.Add(p);
}
new_space->Flip();
new_space->ResetAllocationInfo();
}
-void MarkCompactCollector::EvacuateNewSpaceEpilogue() {
- newspace_evacuation_candidates_.Rewind(0);
-}
-
-
class MarkCompactCollector::Evacuator : public Malloced {
public:
+ enum EvacuationMode {
+ kObjectsNewToOld,
+ kPageNewToOld,
+ kObjectsOldToOld,
+ kPageNewToNew,
+ };
+
+ static inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
+ // Note: The order of checks is important in this function.
+ if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
+ return kPageNewToOld;
+ if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION))
+ return kPageNewToNew;
+ if (chunk->InNewSpace()) return kObjectsNewToOld;
+ DCHECK(chunk->IsEvacuationCandidate());
+ return kObjectsOldToOld;
+ }
+
+ // NewSpacePages with more live bytes than this threshold qualify for fast
+ // evacuation.
+ static int PageEvacuationThreshold() {
+ if (FLAG_page_promotion)
+ return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
+ return Page::kAllocatableMemory + kPointerSize;
+ }
+
explicit Evacuator(MarkCompactCollector* collector)
: collector_(collector),
compaction_spaces_(collector->heap()),
- local_pretenuring_feedback_(HashMap::PointersMatch,
+ local_pretenuring_feedback_(base::HashMap::PointersMatch,
kInitialLocalPretenuringFeedbackCapacity),
new_space_visitor_(collector->heap(), &compaction_spaces_,
&local_pretenuring_feedback_),
+ new_space_page_visitor(collector->heap()),
old_space_visitor_(collector->heap(), &compaction_spaces_),
duration_(0.0),
bytes_compacted_(0) {}
- inline bool EvacuatePage(MemoryChunk* chunk);
+ inline bool EvacuatePage(Page* chunk);
// Merge back locally cached info sequentially. Note that this method needs
// to be called from the main thread.
@@ -2956,23 +3099,22 @@ class MarkCompactCollector::Evacuator : public Malloced {
private:
static const int kInitialLocalPretenuringFeedbackCapacity = 256;
- Heap* heap() { return collector_->heap(); }
+ inline Heap* heap() { return collector_->heap(); }
void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
duration_ += duration;
bytes_compacted_ += bytes_compacted;
}
- inline bool EvacuateSinglePage(MemoryChunk* p, HeapObjectVisitor* visitor);
-
MarkCompactCollector* collector_;
// Locally cached collector data.
CompactionSpaceCollection compaction_spaces_;
- HashMap local_pretenuring_feedback_;
+ base::HashMap local_pretenuring_feedback_;
// Visitors for the corresponding spaces.
EvacuateNewSpaceVisitor new_space_visitor_;
+ EvacuateNewSpacePageVisitor new_space_page_visitor;
EvacuateOldSpaceVisitor old_space_visitor_;
// Book keeping info.
@@ -2980,43 +3122,74 @@ class MarkCompactCollector::Evacuator : public Malloced {
intptr_t bytes_compacted_;
};
-bool MarkCompactCollector::Evacuator::EvacuateSinglePage(
- MemoryChunk* p, HeapObjectVisitor* visitor) {
+bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
bool success = false;
- DCHECK(p->IsEvacuationCandidate() || p->InNewSpace());
- int saved_live_bytes = p->LiveBytes();
- double evacuation_time;
+ DCHECK(page->SweepingDone());
+ int saved_live_bytes = page->LiveBytes();
+ double evacuation_time = 0.0;
+ Heap* heap = page->heap();
{
- AlwaysAllocateScope always_allocate(heap()->isolate());
+ AlwaysAllocateScope always_allocate(heap->isolate());
TimedScope timed_scope(&evacuation_time);
- success = collector_->VisitLiveObjects(p, visitor, kClearMarkbits);
+ switch (ComputeEvacuationMode(page)) {
+ case kObjectsNewToOld:
+ success = collector_->VisitLiveObjects(page, &new_space_visitor_,
+ kClearMarkbits);
+ ArrayBufferTracker::ProcessBuffers(
+ page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
+ DCHECK(success);
+ break;
+ case kPageNewToOld:
+ success = collector_->VisitLiveObjects(page, &new_space_page_visitor,
+ kKeepMarking);
+ // ArrayBufferTracker will be updated during sweeping.
+ DCHECK(success);
+ break;
+ case kPageNewToNew:
+ new_space_page_visitor.account_semispace_copied(page->LiveBytes());
+ // ArrayBufferTracker will be updated during sweeping.
+ success = true;
+ break;
+ case kObjectsOldToOld:
+ success = collector_->VisitLiveObjects(page, &old_space_visitor_,
+ kClearMarkbits);
+ if (!success) {
+ // Aborted compaction page. We have to record slots here, since we
+ // might not have recorded them in first place.
+ // Note: We mark the page as aborted here to be able to record slots
+ // for code objects in |RecordMigratedSlotVisitor|.
+ page->SetFlag(Page::COMPACTION_WAS_ABORTED);
+ EvacuateRecordOnlyVisitor record_visitor(collector_->heap());
+ success =
+ collector_->VisitLiveObjects(page, &record_visitor, kKeepMarking);
+ ArrayBufferTracker::ProcessBuffers(
+ page, ArrayBufferTracker::kUpdateForwardedKeepOthers);
+ DCHECK(success);
+ // We need to return failure here to indicate that we want this page
+ // added to the sweeper.
+ success = false;
+ } else {
+ ArrayBufferTracker::ProcessBuffers(
+ page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
}
+ ReportCompactionProgress(evacuation_time, saved_live_bytes);
if (FLAG_trace_evacuation) {
- PrintIsolate(heap()->isolate(),
- "evacuation[%p]: page=%p new_space=%d executable=%d "
+ PrintIsolate(heap->isolate(),
+ "evacuation[%p]: page=%p new_space=%d "
+ "page_evacuation=%d executable=%d contains_age_mark=%d "
"live_bytes=%d time=%f\n",
- this, p, p->InNewSpace(),
- p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes,
- evacuation_time);
- }
- if (success) {
- ReportCompactionProgress(evacuation_time, saved_live_bytes);
- }
- return success;
-}
-
-bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) {
- bool success = false;
- if (chunk->InNewSpace()) {
- DCHECK_EQ(chunk->concurrent_sweeping_state().Value(),
- NewSpacePage::kSweepingDone);
- success = EvacuateSinglePage(chunk, &new_space_visitor_);
- DCHECK(success);
- USE(success);
- } else {
- DCHECK(chunk->IsEvacuationCandidate());
- DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), Page::kSweepingDone);
- success = EvacuateSinglePage(chunk, &old_space_visitor_);
+ static_cast<void*>(this), static_cast<void*>(page),
+ page->InNewSpace(),
+ page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
+ page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
+ page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
+ page->Contains(heap->new_space()->age_mark()),
+ saved_live_bytes, evacuation_time);
}
return success;
}
@@ -3026,12 +3199,16 @@ void MarkCompactCollector::Evacuator::Finalize() {
heap()->code_space()->MergeCompactionSpace(
compaction_spaces_.Get(CODE_SPACE));
heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
- heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size());
+ heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
+ new_space_page_visitor.promoted_size());
heap()->IncrementSemiSpaceCopiedObjectSize(
- new_space_visitor_.semispace_copied_size());
+ new_space_visitor_.semispace_copied_size() +
+ new_space_page_visitor.semispace_copied_size());
heap()->IncrementYoungSurvivorsCounter(
new_space_visitor_.promoted_size() +
- new_space_visitor_.semispace_copied_size());
+ new_space_visitor_.semispace_copied_size() +
+ new_space_page_visitor.promoted_size() +
+ new_space_page_visitor.semispace_copied_size());
heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
}
@@ -3074,35 +3251,38 @@ class EvacuationJobTraits {
static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
MemoryChunk* chunk, PerPageData) {
- return evacuator->EvacuatePage(chunk);
+ return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk));
}
- static void FinalizePageSequentially(Heap*, MemoryChunk* chunk, bool success,
- PerPageData data) {
- if (chunk->InNewSpace()) {
- DCHECK(success);
- } else {
- Page* p = static_cast<Page*>(chunk);
- if (success) {
- DCHECK(p->IsEvacuationCandidate());
- DCHECK(p->SweepingDone());
- p->Unlink();
- } else {
- // We have partially compacted the page, i.e., some objects may have
- // moved, others are still in place.
- // We need to:
- // - Leave the evacuation candidate flag for later processing of slots
- // buffer entries.
- // - Leave the slots buffer there for processing of entries added by
- // the write barrier.
- // - Rescan the page as slot recording in the migration buffer only
- // happens upon moving (which we potentially didn't do).
- // - Leave the page in the list of pages of a space since we could not
- // fully evacuate it.
- DCHECK(p->IsEvacuationCandidate());
- p->SetFlag(Page::COMPACTION_WAS_ABORTED);
- *data += 1;
- }
+ static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk,
+ bool success, PerPageData data) {
+ using Evacuator = MarkCompactCollector::Evacuator;
+ Page* p = static_cast<Page*>(chunk);
+ switch (Evacuator::ComputeEvacuationMode(p)) {
+ case Evacuator::kPageNewToOld:
+ break;
+ case Evacuator::kPageNewToNew:
+ DCHECK(success);
+ break;
+ case Evacuator::kObjectsNewToOld:
+ DCHECK(success);
+ break;
+ case Evacuator::kObjectsOldToOld:
+ if (success) {
+ DCHECK(p->IsEvacuationCandidate());
+ DCHECK(p->SweepingDone());
+ p->Unlink();
+ } else {
+ // We have partially compacted the page, i.e., some objects may have
+ // moved, others are still in place.
+ p->ClearEvacuationCandidate();
+ // Slots have already been recorded so we just need to add it to the
+ // sweeper, which will happen after updating pointers.
+ *data += 1;
+ }
+ break;
+ default:
+ UNREACHABLE();
}
}
};
@@ -3118,8 +3298,20 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
live_bytes += page->LiveBytes();
job.AddPage(page, &abandoned_pages);
}
- for (NewSpacePage* page : newspace_evacuation_candidates_) {
+
+ const Address age_mark = heap()->new_space()->age_mark();
+ for (Page* page : newspace_evacuation_candidates_) {
live_bytes += page->LiveBytes();
+ if (!page->NeverEvacuate() &&
+ (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
+ !page->Contains(age_mark)) {
+ if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
+ EvacuateNewSpacePageVisitor::MoveToOldSpace(page, heap()->old_space());
+ } else {
+ EvacuateNewSpacePageVisitor::MoveToToSpace(page);
+ }
+ }
+
job.AddPage(page, &abandoned_pages);
}
DCHECK_GE(job.NumberOfPages(), 1);
@@ -3144,16 +3336,15 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
delete[] evacuators;
if (FLAG_trace_evacuation) {
- PrintIsolate(
- isolate(),
- "%8.0f ms: evacuation-summary: parallel=%s pages=%d aborted=%d "
- "wanted_tasks=%d tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX
- "d compaction_speed=%.f\n",
- isolate()->time_millis_since_init(),
- FLAG_parallel_compaction ? "yes" : "no", job.NumberOfPages(),
- abandoned_pages, wanted_num_tasks, job.NumberOfTasks(),
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(),
- live_bytes, compaction_speed);
+ PrintIsolate(isolate(),
+ "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
+ "aborted=%d wanted_tasks=%d tasks=%d cores=%" PRIuS
+ " live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n",
+ isolate()->time_millis_since_init(),
+ FLAG_parallel_compaction ? "yes" : "no", job.NumberOfPages(),
+ abandoned_pages, wanted_num_tasks, job.NumberOfTasks(),
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(),
+ live_bytes, compaction_speed);
}
}
@@ -3171,28 +3362,21 @@ class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
}
};
-enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
-
-enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST };
+int MarkCompactCollector::Sweeper::RawSweep(
+ Page* p, FreeListRebuildingMode free_list_mode,
+ FreeSpaceTreatmentMode free_space_mode) {
+ Space* space = p->owner();
+ DCHECK_NOT_NULL(space);
+ DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
+ space->identity() == CODE_SPACE || space->identity() == MAP_SPACE);
+ DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
-enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
+ // Before we sweep objects on the page, we free dead array buffers which
+ // requires valid mark bits.
+ ArrayBufferTracker::FreeDead(p);
-// Sweeps a page. After sweeping the page can be iterated.
-// Slots in live objects pointing into evacuation candidates are updated
-// if requested.
-// Returns the size of the biggest continuous freed memory chunk in bytes.
-template <SweepingMode sweeping_mode,
- MarkCompactCollector::SweepingParallelism parallelism,
- SkipListRebuildingMode skip_list_mode,
- FreeSpaceTreatmentMode free_space_mode>
-static int Sweep(PagedSpace* space, Page* p, ObjectVisitor* v) {
- DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
- DCHECK(!p->IsFlagSet(Page::BLACK_PAGE));
- DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
- space->identity() == CODE_SPACE);
- DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
- DCHECK(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD ||
- sweeping_mode == SWEEP_ONLY);
+ // We also release the black area markers here.
+ p->ReleaseBlackAreaEndMarkerMap();
Address free_start = p->area_start();
DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
@@ -3200,8 +3384,10 @@ static int Sweep(PagedSpace* space, Page* p, ObjectVisitor* v) {
// If we use the skip list for code space pages, we have to lock the skip
// list because it could be accessed concurrently by the runtime or the
// deoptimizer.
+ const bool rebuild_skip_list =
+ space->identity() == CODE_SPACE && p->skip_list() != nullptr;
SkipList* skip_list = p->skip_list();
- if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
+ if (rebuild_skip_list) {
skip_list->Clear();
}
@@ -3212,22 +3398,25 @@ static int Sweep(PagedSpace* space, Page* p, ObjectVisitor* v) {
LiveObjectIterator<kBlackObjects> it(p);
HeapObject* object = NULL;
while ((object = it.Next()) != NULL) {
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+ DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
Address free_end = object->address();
if (free_end != free_start) {
int size = static_cast<int>(free_end - free_start);
if (free_space_mode == ZAP_FREE_SPACE) {
memset(free_start, 0xcc, size);
}
- freed_bytes = space->UnaccountedFree(free_start, size);
- max_freed_bytes = Max(freed_bytes, max_freed_bytes);
+ if (free_list_mode == REBUILD_FREE_LIST) {
+ freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
+ free_start, size);
+ max_freed_bytes = Max(freed_bytes, max_freed_bytes);
+ } else {
+ p->heap()->CreateFillerObjectAt(free_start, size,
+ ClearRecordedSlots::kNo);
+ }
}
Map* map = object->synchronized_map();
int size = object->SizeFromMap(map);
- if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
- object->IterateBody(map->instance_type(), size, v);
- }
- if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
+ if (rebuild_skip_list) {
int new_region_start = SkipList::RegionNumber(free_end);
int new_region_end =
SkipList::RegionNumber(free_end + size - kPointerSize);
@@ -3240,36 +3429,45 @@ static int Sweep(PagedSpace* space, Page* p, ObjectVisitor* v) {
}
// Clear the mark bits of that page and reset live bytes count.
- Bitmap::Clear(p);
+ p->ClearLiveness();
if (free_start != p->area_end()) {
int size = static_cast<int>(p->area_end() - free_start);
if (free_space_mode == ZAP_FREE_SPACE) {
memset(free_start, 0xcc, size);
}
- freed_bytes = space->UnaccountedFree(free_start, size);
- max_freed_bytes = Max(freed_bytes, max_freed_bytes);
+ if (free_list_mode == REBUILD_FREE_LIST) {
+ freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
+ free_start, size);
+ max_freed_bytes = Max(freed_bytes, max_freed_bytes);
+ } else {
+ p->heap()->CreateFillerObjectAt(free_start, size,
+ ClearRecordedSlots::kNo);
+ }
}
p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
+ if (free_list_mode == IGNORE_FREE_LIST) return 0;
return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
}
-
void MarkCompactCollector::InvalidateCode(Code* code) {
+ Page* page = Page::FromAddress(code->address());
+ Address start = code->instruction_start();
+ Address end = code->address() + code->Size();
+
+ RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, start, end);
+
if (heap_->incremental_marking()->IsCompacting() &&
!ShouldSkipEvacuationSlotRecording(code)) {
DCHECK(compacting_);
// If the object is white than no slots were recorded on it yet.
- MarkBit mark_bit = Marking::MarkBitFrom(code);
+ MarkBit mark_bit = ObjectMarking::MarkBitFrom(code);
if (Marking::IsWhite(mark_bit)) return;
// Ignore all slots that might have been recorded in the body of the
// deoptimized code object. Assumption: no slots will be recorded for
// this object after invalidating it.
- Page* page = Page::FromAddress(code->address());
- Address start = code->instruction_start();
- Address end = code->address() + code->Size();
RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(page, start, end);
}
}
@@ -3286,14 +3484,13 @@ static void VerifyAllBlackObjects(MemoryChunk* page) {
LiveObjectIterator<kAllLiveObjects> it(page);
HeapObject* object = NULL;
while ((object = it.Next()) != NULL) {
- CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+ CHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
}
}
#endif // VERIFY_HEAP
-
-bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page,
- HeapObjectVisitor* visitor,
+template <class Visitor>
+bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page, Visitor* visitor,
IterationMode mode) {
#ifdef VERIFY_HEAP
VerifyAllBlackObjects(page);
@@ -3302,7 +3499,7 @@ bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page,
LiveObjectIterator<kBlackObjects> it(page);
HeapObject* object = nullptr;
while ((object = it.Next()) != nullptr) {
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+ DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
if (!visitor->Visit(object)) {
if (mode == kClearMarkbits) {
page->markbits()->ClearRange(
@@ -3312,13 +3509,17 @@ bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page,
page->old_to_new_slots()->RemoveRange(
0, static_cast<int>(object->address() - page->address()));
}
+ if (page->typed_old_to_new_slots() != nullptr) {
+ RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
+ object->address());
+ }
RecomputeLiveBytes(page);
}
return false;
}
}
if (mode == kClearMarkbits) {
- Bitmap::Clear(page);
+ page->ClearLiveness();
}
return true;
}
@@ -3334,58 +3535,12 @@ void MarkCompactCollector::RecomputeLiveBytes(MemoryChunk* page) {
page->SetLiveBytes(new_live_size);
}
-
-void MarkCompactCollector::VisitLiveObjectsBody(Page* page,
- ObjectVisitor* visitor) {
-#ifdef VERIFY_HEAP
- VerifyAllBlackObjects(page);
-#endif // VERIFY_HEAP
-
- LiveObjectIterator<kBlackObjects> it(page);
- HeapObject* object = NULL;
- while ((object = it.Next()) != NULL) {
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
- Map* map = object->synchronized_map();
- int size = object->SizeFromMap(map);
- object->IterateBody(map->instance_type(), size, visitor);
- }
-}
-
-
-void MarkCompactCollector::SweepAbortedPages() {
- // Second pass on aborted pages.
- for (Page* p : evacuation_candidates_) {
- if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
- p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED);
- p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
- PagedSpace* space = static_cast<PagedSpace*>(p->owner());
- switch (space->identity()) {
- case OLD_SPACE:
- Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, p, nullptr);
- break;
- case CODE_SPACE:
- if (FLAG_zap_code_space) {
- Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
- ZAP_FREE_SPACE>(space, p, nullptr);
- } else {
- Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, p, nullptr);
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
- {
- base::LockGuard<base::Mutex> guard(&swept_pages_mutex_);
- swept_pages(space->identity())->Add(p);
- }
- }
- }
+void MarkCompactCollector::Sweeper::AddSweptPageSafe(PagedSpace* space,
+ Page* page) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ swept_list_[space->identity()].Add(page);
}
-
void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
Heap::RelocationLock relocation_lock(heap());
@@ -3396,35 +3551,55 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
EvacuateNewSpacePrologue();
EvacuatePagesInParallel();
- EvacuateNewSpaceEpilogue();
heap()->new_space()->set_age_mark(heap()->new_space()->top());
}
UpdatePointersAfterEvacuation();
+ if (!heap()->new_space()->Rebalance()) {
+ FatalProcessOutOfMemory("NewSpace::Rebalance");
+ }
+
// Give pages that are queued to be freed back to the OS. Note that filtering
// slots only handles old space (for unboxed doubles), and thus map space can
// still contain stale pointers. We only free the chunks after pointer updates
// to still have access to page headers.
- heap()->FreeQueuedChunks();
+ heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
- // After updating all pointers, we can finally sweep the aborted pages,
- // effectively overriding any forward pointers.
- SweepAbortedPages();
- // EvacuateNewSpaceAndCandidates iterates over new space objects and for
- // ArrayBuffers either re-registers them as live or promotes them. This is
- // needed to properly free them.
- heap()->array_buffer_tracker()->FreeDead(false);
+ for (Page* p : newspace_evacuation_candidates_) {
+ if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
+ p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
+ sweeper().AddLatePage(p->owner()->identity(), p);
+ } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
+ p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
+ p->ForAllFreeListCategories(
+ [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
+ sweeper().AddLatePage(p->owner()->identity(), p);
+ }
+ }
+ newspace_evacuation_candidates_.Rewind(0);
+
+ for (Page* p : evacuation_candidates_) {
+ // Important: skip list should be cleared only after roots were updated
+ // because root iteration traverses the stack and might have to find
+ // code objects from non-updated pc pointing into evacuation candidate.
+ SkipList* list = p->skip_list();
+ if (list != NULL) list->Clear();
+ if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
+ sweeper().AddLatePage(p->owner()->identity(), p);
+ p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
+ }
+ }
// Deallocate evacuated candidate pages.
ReleaseEvacuationCandidates();
}
#ifdef VERIFY_HEAP
- if (FLAG_verify_heap && !sweeping_in_progress_) {
+ if (FLAG_verify_heap && !sweeper().sweeping_in_progress()) {
VerifyEvacuation(heap());
}
#endif
@@ -3434,12 +3609,12 @@ template <PointerDirection direction>
class PointerUpdateJobTraits {
public:
typedef int PerPageData; // Per page data is not used in this job.
- typedef PointersUpdatingVisitor* PerTaskData;
+ typedef int PerTaskData; // Per task data is not used in this job.
- static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor,
- MemoryChunk* chunk, PerPageData) {
+ static bool ProcessPageInParallel(Heap* heap, PerTaskData, MemoryChunk* chunk,
+ PerPageData) {
UpdateUntypedPointers(heap, chunk);
- UpdateTypedPointers(heap, chunk, visitor);
+ UpdateTypedPointers(heap, chunk);
return true;
}
static const bool NeedSequentialFinalization = false;
@@ -3449,41 +3624,70 @@ class PointerUpdateJobTraits {
private:
static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) {
if (direction == OLD_TO_NEW) {
- RememberedSet<OLD_TO_NEW>::IterateWithWrapper(heap, chunk,
- UpdateOldToNewSlot);
+ RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap, chunk](Address slot) {
+ return CheckAndUpdateOldToNewSlot(heap, slot);
+ });
} else {
- RememberedSet<OLD_TO_OLD>::Iterate(chunk, [heap](Address slot) {
- PointersUpdatingVisitor::UpdateSlot(heap,
- reinterpret_cast<Object**>(slot));
- return REMOVE_SLOT;
+ RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) {
+ return UpdateSlot(reinterpret_cast<Object**>(slot));
});
}
}
- static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk,
- PointersUpdatingVisitor* visitor) {
+ static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk) {
if (direction == OLD_TO_OLD) {
Isolate* isolate = heap->isolate();
RememberedSet<OLD_TO_OLD>::IterateTyped(
- chunk, [isolate, visitor](SlotType type, Address slot) {
- UpdateTypedSlot(isolate, visitor, type, slot);
- return REMOVE_SLOT;
+ chunk, [isolate](SlotType type, Address host_addr, Address slot) {
+ return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, type, slot,
+ UpdateSlot);
+ });
+ } else {
+ Isolate* isolate = heap->isolate();
+ RememberedSet<OLD_TO_NEW>::IterateTyped(
+ chunk,
+ [isolate, heap](SlotType type, Address host_addr, Address slot) {
+ return UpdateTypedSlotHelper::UpdateTypedSlot(
+ isolate, type, slot, [heap](Object** slot) {
+ return CheckAndUpdateOldToNewSlot(
+ heap, reinterpret_cast<Address>(slot));
+ });
});
}
}
- static void UpdateOldToNewSlot(HeapObject** address, HeapObject* object) {
- MapWord map_word = object->map_word();
- // Since we only filter invalid slots in old space, the store buffer can
- // still contain stale pointers in large object and in map spaces. Ignore
- // these pointers here.
- DCHECK(map_word.IsForwardingAddress() ||
- !object->GetHeap()->old_space()->Contains(
- reinterpret_cast<Address>(address)));
- if (map_word.IsForwardingAddress()) {
- // Update the corresponding slot.
- *address = map_word.ToForwardingAddress();
+ static SlotCallbackResult CheckAndUpdateOldToNewSlot(Heap* heap,
+ Address slot_address) {
+ Object** slot = reinterpret_cast<Object**>(slot_address);
+ if (heap->InFromSpace(*slot)) {
+ HeapObject* heap_object = reinterpret_cast<HeapObject*>(*slot);
+ DCHECK(heap_object->IsHeapObject());
+ MapWord map_word = heap_object->map_word();
+ // There could still be stale pointers in large object space, map space,
+ // and old space for pages that have been promoted.
+ if (map_word.IsForwardingAddress()) {
+ // Update the corresponding slot.
+ *slot = map_word.ToForwardingAddress();
+ }
+ // If the object was in from space before and is after executing the
+ // callback in to space, the object is still live.
+ // Unfortunately, we do not know about the slot. It could be in a
+ // just freed free space object.
+ if (heap->InToSpace(*slot)) {
+ return KEEP_SLOT;
+ }
+ } else if (heap->InToSpace(*slot)) {
+ // Slots can point to "to" space if the page has been moved, or if the
+ // slot has been recorded multiple times in the remembered set. Since
+ // there is no forwarding information present we need to check the
+ // markbits to determine liveness.
+ if (Marking::IsBlack(
+ ObjectMarking::MarkBitFrom(reinterpret_cast<HeapObject*>(*slot))))
+ return KEEP_SLOT;
+ } else {
+ DCHECK(!heap->InNewSpace(*slot));
}
+ return REMOVE_SLOT;
}
};
@@ -3496,24 +3700,13 @@ int NumberOfPointerUpdateTasks(int pages) {
template <PointerDirection direction>
void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
- // Work-around bug in clang-3.4
- // https://github.com/nodejs/node/issues/8323
- struct MemoryChunkVisitor {
- PageParallelJob<PointerUpdateJobTraits<direction> >& job_;
- MemoryChunkVisitor(PageParallelJob<PointerUpdateJobTraits<direction> >& job)
- : job_(job) {}
- void operator()(MemoryChunk* chunk) {
- job_.AddPage(chunk, 0);
- }
- };
-
PageParallelJob<PointerUpdateJobTraits<direction> > job(
heap, heap->isolate()->cancelable_task_manager(), semaphore);
- RememberedSet<direction>::IterateMemoryChunks(heap, MemoryChunkVisitor(job));
- PointersUpdatingVisitor visitor(heap);
+ RememberedSet<direction>::IterateMemoryChunks(
+ heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); });
int num_pages = job.NumberOfPages();
int num_tasks = NumberOfPointerUpdateTasks(num_pages);
- job.Run(num_tasks, [&visitor](int i) { return &visitor; });
+ job.Run(num_tasks, [](int i) { return 0; });
}
class ToSpacePointerUpdateJobTraits {
@@ -3523,6 +3716,24 @@ class ToSpacePointerUpdateJobTraits {
static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor,
MemoryChunk* chunk, PerPageData limits) {
+ if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
+ // New->new promoted pages contain garbage so they require iteration
+ // using markbits.
+ ProcessPageInParallelVisitLive(heap, visitor, chunk, limits);
+ } else {
+ ProcessPageInParallelVisitAll(heap, visitor, chunk, limits);
+ }
+ return true;
+ }
+
+ static const bool NeedSequentialFinalization = false;
+ static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
+ }
+
+ private:
+ static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor,
+ MemoryChunk* chunk,
+ PerPageData limits) {
for (Address cur = limits.first; cur < limits.second;) {
HeapObject* object = HeapObject::FromAddress(cur);
Map* map = object->map();
@@ -3530,10 +3741,18 @@ class ToSpacePointerUpdateJobTraits {
object->IterateBody(map->instance_type(), size, visitor);
cur += size;
}
- return true;
}
- static const bool NeedSequentialFinalization = false;
- static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
+
+ static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor,
+ MemoryChunk* chunk,
+ PerPageData limits) {
+ LiveObjectIterator<kBlackObjects> it(chunk);
+ HeapObject* object = NULL;
+ while ((object = it.Next()) != NULL) {
+ Map* map = object->map();
+ int size = object->SizeFromMap(map);
+ object->IterateBody(map->instance_type(), size, visitor);
+ }
}
};
@@ -3542,15 +3761,13 @@ void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
heap, heap->isolate()->cancelable_task_manager(), semaphore);
Address space_start = heap->new_space()->bottom();
Address space_end = heap->new_space()->top();
- NewSpacePageIterator it(space_start, space_end);
- while (it.has_next()) {
- NewSpacePage* page = it.next();
+ for (Page* page : NewSpacePageRange(space_start, space_end)) {
Address start =
page->Contains(space_start) ? space_start : page->area_start();
Address end = page->Contains(space_end) ? space_end : page->area_end();
job.AddPage(page, std::make_pair(start, end));
}
- PointersUpdatingVisitor visitor(heap);
+ PointersUpdatingVisitor visitor;
int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1;
job.Run(num_tasks, [&visitor](int i) { return &visitor; });
}
@@ -3558,7 +3775,7 @@ void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
void MarkCompactCollector::UpdatePointersAfterEvacuation() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
- PointersUpdatingVisitor updating_visitor(heap());
+ PointersUpdatingVisitor updating_visitor;
{
TRACE_GC(heap()->tracer(),
@@ -3578,25 +3795,6 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
{
TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED);
- for (Page* p : evacuation_candidates_) {
- DCHECK(p->IsEvacuationCandidate());
- // Important: skip list should be cleared only after roots were updated
- // because root iteration traverses the stack and might have to find
- // code objects from non-updated pc pointing into evacuation candidate.
- SkipList* list = p->skip_list();
- if (list != NULL) list->Clear();
-
- // First pass on aborted pages, fixing up all live objects.
- if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
- p->ClearEvacuationCandidate();
- VisitLiveObjectsBody(p, &updating_visitor);
- }
- }
- }
-
- {
- TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
// Update pointers from external string table.
heap_->UpdateReferencesInExternalStringTable(
@@ -3618,33 +3816,29 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
}
evacuation_candidates_.Rewind(0);
compacting_ = false;
- heap()->FreeQueuedChunks();
+ heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
}
-
-int MarkCompactCollector::SweepInParallel(PagedSpace* space,
- int required_freed_bytes,
- int max_pages) {
+int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity,
+ int required_freed_bytes,
+ int max_pages) {
int max_freed = 0;
- int max_freed_overall = 0;
- int page_count = 0;
- for (Page* p : sweeping_list(space)) {
- max_freed = SweepInParallel(p, space);
- DCHECK(max_freed >= 0);
- if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) {
+ int pages_freed = 0;
+ Page* page = nullptr;
+ while ((page = GetSweepingPageSafe(identity)) != nullptr) {
+ int freed = ParallelSweepPage(page, identity);
+ pages_freed += 1;
+ DCHECK_GE(freed, 0);
+ max_freed = Max(max_freed, freed);
+ if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes))
return max_freed;
- }
- max_freed_overall = Max(max_freed, max_freed_overall);
- page_count++;
- if (max_pages > 0 && page_count >= max_pages) {
- break;
- }
+ if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed;
}
- return max_freed_overall;
+ return max_freed;
}
-
-int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
+int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
+ AllocationSpace identity) {
int max_freed = 0;
if (page->mutex()->TryLock()) {
// If this page was already swept in the meantime, we can return here.
@@ -3653,19 +3847,20 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
return 0;
}
page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
- if (space->identity() == OLD_SPACE) {
- max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, page, NULL);
- } else if (space->identity() == CODE_SPACE) {
- max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, page, NULL);
+ const Sweeper::FreeSpaceTreatmentMode free_space_mode =
+ Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
+ if (identity == NEW_SPACE) {
+ RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
+ } else if (identity == OLD_SPACE) {
+ max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
+ } else if (identity == CODE_SPACE) {
+ max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
} else {
- max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, page, NULL);
+ max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
}
{
- base::LockGuard<base::Mutex> guard(&swept_pages_mutex_);
- swept_pages(space->identity())->Add(page);
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ swept_list_[identity].Add(page);
}
page->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
page->mutex()->Unlock();
@@ -3673,32 +3868,60 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
return max_freed;
}
+void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) {
+ DCHECK(!sweeping_in_progress_);
+ PrepareToBeSweptPage(space, page);
+ sweeping_list_[space].push_back(page);
+}
+
+void MarkCompactCollector::Sweeper::AddLatePage(AllocationSpace space,
+ Page* page) {
+ DCHECK(sweeping_in_progress_);
+ PrepareToBeSweptPage(space, page);
+ late_pages_ = true;
+ AddSweepingPageSafe(space, page);
+}
+
+void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space,
+ Page* page) {
+ page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
+ int to_sweep = page->area_size() - page->LiveBytes();
+ if (space != NEW_SPACE)
+ heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep);
+}
+
+Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe(
+ AllocationSpace space) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ Page* page = nullptr;
+ if (!sweeping_list_[space].empty()) {
+ page = sweeping_list_[space].front();
+ sweeping_list_[space].pop_front();
+ }
+ return page;
+}
+
+void MarkCompactCollector::Sweeper::AddSweepingPageSafe(AllocationSpace space,
+ Page* page) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ sweeping_list_[space].push_back(page);
+}
void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
space->ClearStats();
- PageIterator it(space);
-
int will_be_swept = 0;
bool unused_page_present = false;
- while (it.has_next()) {
- Page* p = it.next();
+ // Loop needs to support deletion if live bytes == 0 for a page.
+ for (auto it = space->begin(); it != space->end();) {
+ Page* p = *(it++);
DCHECK(p->SweepingDone());
if (p->IsEvacuationCandidate()) {
// Will be processed in EvacuateNewSpaceAndCandidates.
DCHECK(evacuation_candidates_.length() > 0);
- continue;
- }
-
- // We can not sweep black pages, since all mark bits are set for these
- // pages.
- if (p->IsFlagSet(Page::BLACK_PAGE)) {
- Bitmap::Clear(p);
- p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
- p->ClearFlag(Page::BLACK_PAGE);
- // TODO(hpayer): Free unused memory of last black page.
+ DCHECK(!p->HasBlackAreas());
continue;
}
@@ -3708,8 +3931,9 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
// (in the free list) dropped again. Since we only use the flag for
// testing this is fine.
p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
- Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, p, nullptr);
+ Sweeper::RawSweep(p, Sweeper::IGNORE_FREE_LIST,
+ Heap::ShouldZapGarbage() ? Sweeper::ZAP_FREE_SPACE
+ : Sweeper::IGNORE_FREE_SPACE);
continue;
}
@@ -3717,18 +3941,17 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
if (p->LiveBytes() == 0) {
if (unused_page_present) {
if (FLAG_gc_verbose) {
- PrintIsolate(isolate(), "sweeping: released page: %p", p);
+ PrintIsolate(isolate(), "sweeping: released page: %p",
+ static_cast<void*>(p));
}
+ ArrayBufferTracker::FreeAll(p);
space->ReleasePage(p);
continue;
}
unused_page_present = true;
}
- p->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
- sweeping_list(space).push_back(p);
- int to_sweep = p->area_size() - p->LiveBytes();
- space->accounting_stats_.ShrinkSpace(to_sweep);
+ sweeper().AddPage(space->identity(), p);
will_be_swept++;
}
@@ -3736,8 +3959,6 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
AllocationSpaceName(space->identity()), will_be_swept);
}
- std::sort(sweeping_list(space).begin(), sweeping_list(space).end(),
- [](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); });
}
@@ -3753,7 +3974,6 @@ void MarkCompactCollector::SweepSpaces() {
#endif
{
- sweeping_in_progress_ = true;
{
GCTracer::Scope sweep_scope(heap()->tracer(),
GCTracer::Scope::MC_SWEEP_OLD);
@@ -3769,9 +3989,7 @@ void MarkCompactCollector::SweepSpaces() {
GCTracer::Scope::MC_SWEEP_MAP);
StartSweepSpace(heap()->map_space());
}
- if (FLAG_concurrent_sweeping) {
- StartSweeperThreads();
- }
+ sweeper().StartSweeping();
}
// Deallocate unmarked large objects.
@@ -3783,13 +4001,6 @@ void MarkCompactCollector::SweepSpaces() {
}
}
-
-void MarkCompactCollector::ParallelSweepSpacesComplete() {
- sweeping_list(heap()->old_space()).clear();
- sweeping_list(heap()->code_space()).clear();
- sweeping_list(heap()->map_space()).clear();
-}
-
Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); }
@@ -3804,7 +4015,10 @@ void MarkCompactCollector::RecordCodeEntrySlot(HeapObject* host, Address slot,
Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
if (target_page->IsEvacuationCandidate() &&
!ShouldSkipEvacuationSlotRecording(host)) {
- RememberedSet<OLD_TO_OLD>::InsertTyped(source_page, CODE_ENTRY_SLOT, slot);
+ // TODO(ulan): remove this check after investigating crbug.com/414964.
+ CHECK(target->IsCode());
+ RememberedSet<OLD_TO_OLD>::InsertTyped(
+ source_page, reinterpret_cast<Address>(host), CODE_ENTRY_SLOT, slot);
}
}
@@ -3815,9 +4029,12 @@ void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
Code* host =
isolate()->inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(
pc);
- MarkBit mark_bit = Marking::MarkBitFrom(host);
+ MarkBit mark_bit = ObjectMarking::MarkBitFrom(host);
if (Marking::IsBlack(mark_bit)) {
RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
+ // The target is always in old space, we don't have to record the slot in
+ // the old-to-new remembered set.
+ DCHECK(!heap()->InNewSpace(target));
RecordRelocSlot(host, &rinfo, target);
}
}
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 9fee8269d5..b2c637bc63 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -5,7 +5,10 @@
#ifndef V8_HEAP_MARK_COMPACT_H_
#define V8_HEAP_MARK_COMPACT_H_
+#include <deque>
+
#include "src/base/bits.h"
+#include "src/heap/marking.h"
#include "src/heap/spaces.h"
#include "src/heap/store-buffer.h"
@@ -26,7 +29,7 @@ class MarkCompactCollector;
class MarkingVisitor;
class RootMarkingVisitor;
-class Marking : public AllStatic {
+class ObjectMarking : public AllStatic {
public:
INLINE(static MarkBit MarkBitFrom(Address addr)) {
MemoryChunk* p = MemoryChunk::FromAddress(addr);
@@ -37,143 +40,12 @@ class Marking : public AllStatic {
return MarkBitFrom(reinterpret_cast<Address>(obj));
}
- // Impossible markbits: 01
- static const char* kImpossibleBitPattern;
- INLINE(static bool IsImpossible(MarkBit mark_bit)) {
- return !mark_bit.Get() && mark_bit.Next().Get();
- }
-
- // Black markbits: 11
- static const char* kBlackBitPattern;
- INLINE(static bool IsBlack(MarkBit mark_bit)) {
- return mark_bit.Get() && mark_bit.Next().Get();
- }
-
- // White markbits: 00 - this is required by the mark bit clearer.
- static const char* kWhiteBitPattern;
- INLINE(static bool IsWhite(MarkBit mark_bit)) {
- DCHECK(!IsImpossible(mark_bit));
- return !mark_bit.Get();
- }
-
- // Grey markbits: 10
- static const char* kGreyBitPattern;
- INLINE(static bool IsGrey(MarkBit mark_bit)) {
- return mark_bit.Get() && !mark_bit.Next().Get();
- }
-
- // IsBlackOrGrey assumes that the first bit is set for black or grey
- // objects.
- INLINE(static bool IsBlackOrGrey(MarkBit mark_bit)) { return mark_bit.Get(); }
-
- INLINE(static void MarkBlack(MarkBit mark_bit)) {
- mark_bit.Set();
- mark_bit.Next().Set();
- }
-
- INLINE(static void MarkWhite(MarkBit mark_bit)) {
- mark_bit.Clear();
- mark_bit.Next().Clear();
- }
-
- INLINE(static void BlackToWhite(MarkBit markbit)) {
- DCHECK(IsBlack(markbit));
- markbit.Clear();
- markbit.Next().Clear();
- }
-
- INLINE(static void GreyToWhite(MarkBit markbit)) {
- DCHECK(IsGrey(markbit));
- markbit.Clear();
- markbit.Next().Clear();
- }
-
- INLINE(static void BlackToGrey(MarkBit markbit)) {
- DCHECK(IsBlack(markbit));
- markbit.Next().Clear();
- }
-
- INLINE(static void WhiteToGrey(MarkBit markbit)) {
- DCHECK(IsWhite(markbit));
- markbit.Set();
- }
-
- INLINE(static void WhiteToBlack(MarkBit markbit)) {
- DCHECK(IsWhite(markbit));
- markbit.Set();
- markbit.Next().Set();
- }
-
- INLINE(static void GreyToBlack(MarkBit markbit)) {
- DCHECK(IsGrey(markbit));
- markbit.Next().Set();
- }
-
- INLINE(static void BlackToGrey(HeapObject* obj)) {
- BlackToGrey(MarkBitFrom(obj));
- }
-
- INLINE(static void AnyToGrey(MarkBit markbit)) {
- markbit.Set();
- markbit.Next().Clear();
- }
-
- static void TransferMark(Heap* heap, Address old_start, Address new_start);
-
-#ifdef DEBUG
- enum ObjectColor {
- BLACK_OBJECT,
- WHITE_OBJECT,
- GREY_OBJECT,
- IMPOSSIBLE_COLOR
- };
-
- static const char* ColorName(ObjectColor color) {
- switch (color) {
- case BLACK_OBJECT:
- return "black";
- case WHITE_OBJECT:
- return "white";
- case GREY_OBJECT:
- return "grey";
- case IMPOSSIBLE_COLOR:
- return "impossible";
- }
- return "error";
- }
-
- static ObjectColor Color(HeapObject* obj) {
- return Color(Marking::MarkBitFrom(obj));
- }
-
- static ObjectColor Color(MarkBit mark_bit) {
- if (IsBlack(mark_bit)) return BLACK_OBJECT;
- if (IsWhite(mark_bit)) return WHITE_OBJECT;
- if (IsGrey(mark_bit)) return GREY_OBJECT;
- UNREACHABLE();
- return IMPOSSIBLE_COLOR;
- }
-#endif
-
- // Returns true if the transferred color is black.
- INLINE(static bool TransferColor(HeapObject* from, HeapObject* to)) {
- if (Page::FromAddress(to->address())->IsFlagSet(Page::BLACK_PAGE))
- return true;
- MarkBit from_mark_bit = MarkBitFrom(from);
- MarkBit to_mark_bit = MarkBitFrom(to);
- DCHECK(Marking::IsWhite(to_mark_bit));
- if (from_mark_bit.Get()) {
- to_mark_bit.Set();
- if (from_mark_bit.Next().Get()) {
- to_mark_bit.Next().Set();
- return true;
- }
- }
- return false;
+ static Marking::ObjectColor Color(HeapObject* obj) {
+ return Marking::Color(ObjectMarking::MarkBitFrom(obj));
}
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Marking);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectMarking);
};
// ----------------------------------------------------------------------------
@@ -346,7 +218,19 @@ class MarkBitCellIterator BASE_EMBEDDED {
inline void Advance() {
cell_index_++;
- cell_base_ += 32 * kPointerSize;
+ cell_base_ += Bitmap::kBitsPerCell * kPointerSize;
+ }
+
+ inline bool Advance(unsigned int new_cell_index) {
+ if (new_cell_index != cell_index_) {
+ DCHECK_GT(new_cell_index, cell_index_);
+ DCHECK_LE(new_cell_index, last_cell_index_);
+ unsigned int diff = new_cell_index - cell_index_;
+ cell_index_ = new_cell_index;
+ cell_base_ += diff * (Bitmap::kBitsPerCell * kPointerSize);
+ return true;
+ }
+ return false;
}
// Return the next mark bit cell. If there is no next it returns 0;
@@ -381,8 +265,6 @@ class LiveObjectIterator BASE_EMBEDDED {
it_(chunk_),
cell_base_(it_.CurrentCellBase()),
current_cell_(*it_.CurrentCell()) {
- // Black pages can not be iterated.
- DCHECK(!chunk->IsFlagSet(Page::BLACK_PAGE));
}
HeapObject* Next();
@@ -400,6 +282,71 @@ class MarkCompactCollector {
public:
class Evacuator;
+ class Sweeper {
+ public:
+ class SweeperTask;
+
+ enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
+ enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
+
+ typedef std::deque<Page*> SweepingList;
+ typedef List<Page*> SweptList;
+
+ static int RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
+ FreeSpaceTreatmentMode free_space_mode);
+
+ explicit Sweeper(Heap* heap)
+ : heap_(heap),
+ pending_sweeper_tasks_semaphore_(0),
+ sweeping_in_progress_(false),
+ late_pages_(false),
+ num_sweeping_tasks_(0) {}
+
+ bool sweeping_in_progress() { return sweeping_in_progress_; }
+ bool contains_late_pages() { return late_pages_; }
+
+ void AddPage(AllocationSpace space, Page* page);
+ void AddLatePage(AllocationSpace space, Page* page);
+
+ int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes,
+ int max_pages = 0);
+ int ParallelSweepPage(Page* page, AllocationSpace identity);
+
+ void StartSweeping();
+ void StartSweepingHelper(AllocationSpace space_to_start);
+ void EnsureCompleted();
+ void EnsureNewSpaceCompleted();
+ bool IsSweepingCompleted();
+ void SweepOrWaitUntilSweepingCompleted(Page* page);
+
+ void AddSweptPageSafe(PagedSpace* space, Page* page);
+ Page* GetSweptPageSafe(PagedSpace* space);
+
+ private:
+ static const int kAllocationSpaces = LAST_PAGED_SPACE + 1;
+
+ template <typename Callback>
+ void ForAllSweepingSpaces(Callback callback) {
+ for (int i = 0; i < kAllocationSpaces; i++) {
+ callback(static_cast<AllocationSpace>(i));
+ }
+ }
+
+ Page* GetSweepingPageSafe(AllocationSpace space);
+ void AddSweepingPageSafe(AllocationSpace space, Page* page);
+
+ void PrepareToBeSweptPage(AllocationSpace space, Page* page);
+
+ Heap* heap_;
+ base::Semaphore pending_sweeper_tasks_semaphore_;
+ base::Mutex mutex_;
+ SweptList swept_list_[kAllocationSpaces];
+ SweepingList sweeping_list_[kAllocationSpaces];
+ bool sweeping_in_progress_;
+ bool late_pages_;
+ base::AtomicNumber<intptr_t> num_sweeping_tasks_;
+ };
+
enum IterationMode {
kKeepMarking,
kClearMarkbits,
@@ -451,8 +398,6 @@ class MarkCompactCollector {
CodeFlusher* code_flusher() { return code_flusher_; }
inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
- enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
-
#ifdef VERIFY_HEAP
void VerifyValidStoreAndSlotsBufferEntries();
void VerifyMarkbitsAreClean();
@@ -490,38 +435,19 @@ class MarkCompactCollector {
MarkingParity marking_parity() { return marking_parity_; }
- // Concurrent and parallel sweeping support. If required_freed_bytes was set
- // to a value larger than 0, then sweeping returns after a block of at least
- // required_freed_bytes was freed. If required_freed_bytes was set to zero
- // then the whole given space is swept. It returns the size of the maximum
- // continuous freed memory chunk.
- int SweepInParallel(PagedSpace* space, int required_freed_bytes,
- int max_pages = 0);
-
- // Sweeps a given page concurrently to the sweeper threads. It returns the
- // size of the maximum continuous freed memory chunk.
- int SweepInParallel(Page* page, PagedSpace* space);
-
// Ensures that sweeping is finished.
//
// Note: Can only be called safely from main thread.
void EnsureSweepingCompleted();
- void SweepOrWaitUntilSweepingCompleted(Page* page);
-
// Help out in sweeping the corresponding space and refill memory that has
// been regained.
//
// Note: Thread-safe.
void SweepAndRefill(CompactionSpace* space);
- // If sweeper threads are not active this method will return true. If
- // this is a latency issue we should be smarter here. Otherwise, it will
- // return true if the sweeper threads are done processing the pages.
- bool IsSweepingCompleted();
-
// Checks if sweeping is in progress right now on any space.
- bool sweeping_in_progress() { return sweeping_in_progress_; }
+ bool sweeping_in_progress() { return sweeper().sweeping_in_progress(); }
void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
@@ -562,61 +488,39 @@ class MarkCompactCollector {
// address range.
void RemoveObjectSlots(Address start_slot, Address end_slot);
- base::Mutex* swept_pages_mutex() { return &swept_pages_mutex_; }
- List<Page*>* swept_pages(AllocationSpace id) {
- switch (id) {
- case OLD_SPACE:
- return &swept_old_space_pages_;
- case CODE_SPACE:
- return &swept_code_space_pages_;
- case MAP_SPACE:
- return &swept_map_space_pages_;
- default:
- UNREACHABLE();
- }
- return nullptr;
- }
+ Sweeper& sweeper() { return sweeper_; }
+
+ void RegisterWrappersWithEmbedderHeapTracer();
+
+ void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
+
+ EmbedderHeapTracer* embedder_heap_tracer() { return embedder_heap_tracer_; }
+
+ bool UsingEmbedderHeapTracer() { return embedder_heap_tracer(); }
+
+ void TracePossibleWrapper(JSObject* js_object);
private:
+ class EvacuateNewSpacePageVisitor;
class EvacuateNewSpaceVisitor;
class EvacuateOldSpaceVisitor;
+ class EvacuateRecordOnlyVisitor;
class EvacuateVisitorBase;
class HeapObjectVisitor;
- class SweeperTask;
-
- typedef std::vector<Page*> SweepingList;
+ class ObjectStatsVisitor;
explicit MarkCompactCollector(Heap* heap);
bool WillBeDeoptimized(Code* code);
void ClearInvalidRememberedSetSlots();
- void StartSweeperThreads();
-
void ComputeEvacuationHeuristics(int area_size,
int* target_fragmentation_percent,
int* max_evacuated_bytes);
-#ifdef DEBUG
- enum CollectorState {
- IDLE,
- PREPARE_GC,
- MARK_LIVE_OBJECTS,
- SWEEP_SPACES,
- ENCODE_FORWARDING_ADDRESSES,
- UPDATE_POINTERS,
- RELOCATE_OBJECTS
- };
-
- // The current stage of the collector.
- CollectorState state_;
-#endif
-
- MarkingParity marking_parity_;
-
- bool was_marked_incrementally_;
+ void VisitAllObjects(HeapObjectVisitor* visitor);
- bool evacuation_;
+ void RecordObjectStats();
// Finishes GC, performs heap verification if enabled.
void Finish();
@@ -678,8 +582,8 @@ class MarkCompactCollector {
// or overflowed in the heap. This respects references only considered in
// the final atomic marking pause including the following:
// - Processing of objects reachable through Harmony WeakMaps.
- // - Objects reachable due to host application logic like object groups
- // or implicit references' groups.
+ // - Objects reachable due to host application logic like object groups,
+ // implicit references' groups, or embedder heap tracing.
void ProcessEphemeralMarking(ObjectVisitor* visitor,
bool only_process_harmony_weak_collections);
@@ -761,15 +665,12 @@ class MarkCompactCollector {
// evacuation.
//
- inline SweepingList& sweeping_list(Space* space);
-
// If we are not compacting the heap, we simply sweep the spaces except
// for the large object space, clearing mark bits and adding unmarked
// regions to each space's free list.
void SweepSpaces();
void EvacuateNewSpacePrologue();
- void EvacuateNewSpaceEpilogue();
void EvacuatePagesInParallel();
@@ -782,25 +683,18 @@ class MarkCompactCollector {
// Iterates through all live objects on a page using marking information.
// Returns whether all objects have successfully been visited.
- bool VisitLiveObjects(MemoryChunk* page, HeapObjectVisitor* visitor,
+ template <class Visitor>
+ bool VisitLiveObjects(MemoryChunk* page, Visitor* visitor,
IterationMode mode);
- void VisitLiveObjectsBody(Page* page, ObjectVisitor* visitor);
-
void RecomputeLiveBytes(MemoryChunk* page);
- void SweepAbortedPages();
-
void ReleaseEvacuationCandidates();
// Starts sweeping of a space by contributing on the main thread and setting
// up other pages for sweeping.
void StartSweepSpace(PagedSpace* space);
- // Finalizes the parallel sweeping phase. Marks all the pages that were
- // swept in parallel.
- void ParallelSweepSpacesComplete();
-
#ifdef DEBUG
friend class MarkObjectVisitor;
static void VisitObject(HeapObject* obj);
@@ -810,40 +704,51 @@ class MarkCompactCollector {
#endif
Heap* heap_;
- base::VirtualMemory* marking_deque_memory_;
- size_t marking_deque_memory_committed_;
- MarkingDeque marking_deque_;
- CodeFlusher* code_flusher_;
- bool have_code_to_deoptimize_;
- List<Page*> evacuation_candidates_;
- List<NewSpacePage*> newspace_evacuation_candidates_;
+ base::Semaphore page_parallel_job_semaphore_;
- base::Mutex swept_pages_mutex_;
- List<Page*> swept_old_space_pages_;
- List<Page*> swept_code_space_pages_;
- List<Page*> swept_map_space_pages_;
+#ifdef DEBUG
+ enum CollectorState {
+ IDLE,
+ PREPARE_GC,
+ MARK_LIVE_OBJECTS,
+ SWEEP_SPACES,
+ ENCODE_FORWARDING_ADDRESSES,
+ UPDATE_POINTERS,
+ RELOCATE_OBJECTS
+ };
- SweepingList sweeping_list_old_space_;
- SweepingList sweeping_list_code_space_;
- SweepingList sweeping_list_map_space_;
+ // The current stage of the collector.
+ CollectorState state_;
+#endif
+
+ MarkingParity marking_parity_;
+
+ bool was_marked_incrementally_;
+
+ bool evacuation_;
// True if we are collecting slots to perform evacuation from evacuation
// candidates.
bool compacting_;
- // True if concurrent or parallel sweeping is currently in progress.
- bool sweeping_in_progress_;
+ bool black_allocation_;
- // Semaphore used to synchronize sweeper tasks.
- base::Semaphore pending_sweeper_tasks_semaphore_;
+ bool have_code_to_deoptimize_;
- // Semaphore used to synchronize compaction tasks.
- base::Semaphore pending_compaction_tasks_semaphore_;
+ base::VirtualMemory* marking_deque_memory_;
+ size_t marking_deque_memory_committed_;
+ MarkingDeque marking_deque_;
+ std::vector<std::pair<void*, void*>> wrappers_to_trace_;
- base::Semaphore page_parallel_job_semaphore_;
+ CodeFlusher* code_flusher_;
- bool black_allocation_;
+ EmbedderHeapTracer* embedder_heap_tracer_;
+
+ List<Page*> evacuation_candidates_;
+ List<Page*> newspace_evacuation_candidates_;
+
+ Sweeper sweeper_;
friend class Heap;
friend class StoreBuffer;
diff --git a/deps/v8/src/heap/marking.h b/deps/v8/src/heap/marking.h
new file mode 100644
index 0000000000..1248e59fe2
--- /dev/null
+++ b/deps/v8/src/heap/marking.h
@@ -0,0 +1,385 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MARKING_H
+#define V8_MARKING_H
+
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+class MarkBit {
+ public:
+ typedef uint32_t CellType;
+
+ inline MarkBit(CellType* cell, CellType mask) : cell_(cell), mask_(mask) {}
+
+#ifdef DEBUG
+ bool operator==(const MarkBit& other) {
+ return cell_ == other.cell_ && mask_ == other.mask_;
+ }
+#endif
+
+ private:
+ inline CellType* cell() { return cell_; }
+ inline CellType mask() { return mask_; }
+
+ inline MarkBit Next() {
+ CellType new_mask = mask_ << 1;
+ if (new_mask == 0) {
+ return MarkBit(cell_ + 1, 1);
+ } else {
+ return MarkBit(cell_, new_mask);
+ }
+ }
+
+ inline void Set() { *cell_ |= mask_; }
+ inline bool Get() { return (*cell_ & mask_) != 0; }
+ inline void Clear() { *cell_ &= ~mask_; }
+
+ CellType* cell_;
+ CellType mask_;
+
+ friend class IncrementalMarking;
+ friend class Marking;
+};
+
+// Bitmap is a sequence of cells each containing fixed number of bits.
+class Bitmap {
+ public:
+ static const uint32_t kBitsPerCell = 32;
+ static const uint32_t kBitsPerCellLog2 = 5;
+ static const uint32_t kBitIndexMask = kBitsPerCell - 1;
+ static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
+ static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
+
+ static const size_t kLength = (1 << kPageSizeBits) >> (kPointerSizeLog2);
+
+ static const size_t kSize = (1 << kPageSizeBits) >>
+ (kPointerSizeLog2 + kBitsPerByteLog2);
+
+ static int CellsForLength(int length) {
+ return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
+ }
+
+ int CellsCount() { return CellsForLength(kLength); }
+
+ static int SizeFor(int cells_count) {
+ return sizeof(MarkBit::CellType) * cells_count;
+ }
+
+ INLINE(static uint32_t IndexToCell(uint32_t index)) {
+ return index >> kBitsPerCellLog2;
+ }
+
+ V8_INLINE static uint32_t IndexInCell(uint32_t index) {
+ return index & kBitIndexMask;
+ }
+
+ INLINE(static uint32_t CellToIndex(uint32_t index)) {
+ return index << kBitsPerCellLog2;
+ }
+
+ INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
+ return (index + kBitIndexMask) & ~kBitIndexMask;
+ }
+
+ INLINE(MarkBit::CellType* cells()) {
+ return reinterpret_cast<MarkBit::CellType*>(this);
+ }
+
+ INLINE(Address address()) { return reinterpret_cast<Address>(this); }
+
+ INLINE(static Bitmap* FromAddress(Address addr)) {
+ return reinterpret_cast<Bitmap*>(addr);
+ }
+
+ inline MarkBit MarkBitFromIndex(uint32_t index) {
+ MarkBit::CellType mask = 1u << IndexInCell(index);
+ MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
+ return MarkBit(cell, mask);
+ }
+
+ void Clear() {
+ for (int i = 0; i < CellsCount(); i++) cells()[i] = 0;
+ }
+
+ // Sets all bits in the range [start_index, end_index).
+ void SetRange(uint32_t start_index, uint32_t end_index) {
+ unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
+
+ unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
+
+ if (start_cell_index != end_cell_index) {
+ // Firstly, fill all bits from the start address to the end of the first
+ // cell with 1s.
+ cells()[start_cell_index] |= ~(start_index_mask - 1);
+ // Then fill all in between cells with 1s.
+ for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
+ cells()[i] = ~0u;
+ }
+ // Finally, fill all bits until the end address in the last cell with 1s.
+ cells()[end_cell_index] |= (end_index_mask - 1);
+ } else {
+ cells()[start_cell_index] |= end_index_mask - start_index_mask;
+ }
+ }
+
+ // Clears all bits in the range [start_index, end_index).
+ void ClearRange(uint32_t start_index, uint32_t end_index) {
+ unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
+
+ unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
+
+ if (start_cell_index != end_cell_index) {
+ // Firstly, fill all bits from the start address to the end of the first
+ // cell with 0s.
+ cells()[start_cell_index] &= (start_index_mask - 1);
+ // Then fill all in between cells with 0s.
+ for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
+ cells()[i] = 0;
+ }
+ // Finally, set all bits until the end address in the last cell with 0s.
+ cells()[end_cell_index] &= ~(end_index_mask - 1);
+ } else {
+ cells()[start_cell_index] &= ~(end_index_mask - start_index_mask);
+ }
+ }
+
+ // Returns true if all bits in the range [start_index, end_index) are set.
+ bool AllBitsSetInRange(uint32_t start_index, uint32_t end_index) {
+ unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
+
+ unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
+
+ MarkBit::CellType matching_mask;
+ if (start_cell_index != end_cell_index) {
+ matching_mask = ~(start_index_mask - 1);
+ if ((cells()[start_cell_index] & matching_mask) != matching_mask) {
+ return false;
+ }
+ for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
+ if (cells()[i] != ~0u) return false;
+ }
+ matching_mask = (end_index_mask - 1);
+ return ((cells()[end_cell_index] & matching_mask) == matching_mask);
+ } else {
+ matching_mask = end_index_mask - start_index_mask;
+ return (cells()[end_cell_index] & matching_mask) == matching_mask;
+ }
+ }
+
+ // Returns true if all bits in the range [start_index, end_index) are cleared.
+ bool AllBitsClearInRange(uint32_t start_index, uint32_t end_index) {
+ unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
+
+ unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
+
+ MarkBit::CellType matching_mask;
+ if (start_cell_index != end_cell_index) {
+ matching_mask = ~(start_index_mask - 1);
+ if ((cells()[start_cell_index] & matching_mask)) return false;
+ for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
+ if (cells()[i]) return false;
+ }
+ matching_mask = (end_index_mask - 1);
+ return !(cells()[end_cell_index] & matching_mask);
+ } else {
+ matching_mask = end_index_mask - start_index_mask;
+ return !(cells()[end_cell_index] & matching_mask);
+ }
+ }
+
+ static void PrintWord(uint32_t word, uint32_t himask = 0) {
+ for (uint32_t mask = 1; mask != 0; mask <<= 1) {
+ if ((mask & himask) != 0) PrintF("[");
+ PrintF((mask & word) ? "1" : "0");
+ if ((mask & himask) != 0) PrintF("]");
+ }
+ }
+
+ class CellPrinter {
+ public:
+ CellPrinter() : seq_start(0), seq_type(0), seq_length(0) {}
+
+ void Print(uint32_t pos, uint32_t cell) {
+ if (cell == seq_type) {
+ seq_length++;
+ return;
+ }
+
+ Flush();
+
+ if (IsSeq(cell)) {
+ seq_start = pos;
+ seq_length = 0;
+ seq_type = cell;
+ return;
+ }
+
+ PrintF("%d: ", pos);
+ PrintWord(cell);
+ PrintF("\n");
+ }
+
+ void Flush() {
+ if (seq_length > 0) {
+ PrintF("%d: %dx%d\n", seq_start, seq_type == 0 ? 0 : 1,
+ seq_length * kBitsPerCell);
+ seq_length = 0;
+ }
+ }
+
+ static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
+
+ private:
+ uint32_t seq_start;
+ uint32_t seq_type;
+ uint32_t seq_length;
+ };
+
+ void Print() {
+ CellPrinter printer;
+ for (int i = 0; i < CellsCount(); i++) {
+ printer.Print(i, cells()[i]);
+ }
+ printer.Flush();
+ PrintF("\n");
+ }
+
+ bool IsClean() {
+ for (int i = 0; i < CellsCount(); i++) {
+ if (cells()[i] != 0) {
+ return false;
+ }
+ }
+ return true;
+ }
+};
+
+class Marking : public AllStatic {
+ public:
+ // Impossible markbits: 01
+ static const char* kImpossibleBitPattern;
+ INLINE(static bool IsImpossible(MarkBit mark_bit)) {
+ return !mark_bit.Get() && mark_bit.Next().Get();
+ }
+
+ // Black markbits: 11
+ static const char* kBlackBitPattern;
+ INLINE(static bool IsBlack(MarkBit mark_bit)) {
+ return mark_bit.Get() && mark_bit.Next().Get();
+ }
+
+ // White markbits: 00 - this is required by the mark bit clearer.
+ static const char* kWhiteBitPattern;
+ INLINE(static bool IsWhite(MarkBit mark_bit)) {
+ DCHECK(!IsImpossible(mark_bit));
+ return !mark_bit.Get();
+ }
+
+ // Grey markbits: 10
+ static const char* kGreyBitPattern;
+ INLINE(static bool IsGrey(MarkBit mark_bit)) {
+ return mark_bit.Get() && !mark_bit.Next().Get();
+ }
+
+ // IsBlackOrGrey assumes that the first bit is set for black or grey
+ // objects.
+ INLINE(static bool IsBlackOrGrey(MarkBit mark_bit)) { return mark_bit.Get(); }
+
+ INLINE(static void MarkBlack(MarkBit mark_bit)) {
+ mark_bit.Set();
+ mark_bit.Next().Set();
+ }
+
+ INLINE(static void MarkWhite(MarkBit mark_bit)) {
+ mark_bit.Clear();
+ mark_bit.Next().Clear();
+ }
+
+ INLINE(static void BlackToWhite(MarkBit markbit)) {
+ DCHECK(IsBlack(markbit));
+ markbit.Clear();
+ markbit.Next().Clear();
+ }
+
+ INLINE(static void GreyToWhite(MarkBit markbit)) {
+ DCHECK(IsGrey(markbit));
+ markbit.Clear();
+ markbit.Next().Clear();
+ }
+
+ INLINE(static void BlackToGrey(MarkBit markbit)) {
+ DCHECK(IsBlack(markbit));
+ markbit.Next().Clear();
+ }
+
+ INLINE(static void WhiteToGrey(MarkBit markbit)) {
+ DCHECK(IsWhite(markbit));
+ markbit.Set();
+ }
+
+ INLINE(static void WhiteToBlack(MarkBit markbit)) {
+ DCHECK(IsWhite(markbit));
+ markbit.Set();
+ markbit.Next().Set();
+ }
+
+ INLINE(static void GreyToBlack(MarkBit markbit)) {
+ DCHECK(IsGrey(markbit));
+ markbit.Next().Set();
+ }
+
+ INLINE(static void AnyToGrey(MarkBit markbit)) {
+ markbit.Set();
+ markbit.Next().Clear();
+ }
+
+ enum ObjectColor {
+ BLACK_OBJECT,
+ WHITE_OBJECT,
+ GREY_OBJECT,
+ IMPOSSIBLE_COLOR
+ };
+
+ static const char* ColorName(ObjectColor color) {
+ switch (color) {
+ case BLACK_OBJECT:
+ return "black";
+ case WHITE_OBJECT:
+ return "white";
+ case GREY_OBJECT:
+ return "grey";
+ case IMPOSSIBLE_COLOR:
+ return "impossible";
+ }
+ return "error";
+ }
+
+ static ObjectColor Color(MarkBit mark_bit) {
+ if (IsBlack(mark_bit)) return BLACK_OBJECT;
+ if (IsWhite(mark_bit)) return WHITE_OBJECT;
+ if (IsGrey(mark_bit)) return GREY_OBJECT;
+ UNREACHABLE();
+ return IMPOSSIBLE_COLOR;
+ }
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Marking);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MARKING_H_
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index c1566abfc5..3f43212151 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -4,9 +4,11 @@
#include "src/heap/object-stats.h"
+#include "src/compilation-cache.h"
#include "src/counters.h"
#include "src/heap/heap-inl.h"
#include "src/isolate.h"
+#include "src/macro-assembler.h"
#include "src/utils.h"
namespace v8 {
@@ -18,61 +20,90 @@ static base::LazyMutex object_stats_mutex = LAZY_MUTEX_INITIALIZER;
void ObjectStats::ClearObjectStats(bool clear_last_time_stats) {
memset(object_counts_, 0, sizeof(object_counts_));
memset(object_sizes_, 0, sizeof(object_sizes_));
+ memset(over_allocated_, 0, sizeof(over_allocated_));
+ memset(size_histogram_, 0, sizeof(size_histogram_));
+ memset(over_allocated_histogram_, 0, sizeof(over_allocated_histogram_));
if (clear_last_time_stats) {
memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
}
+ visited_fixed_array_sub_types_.clear();
}
-
-void ObjectStats::TraceObjectStat(const char* name, int count, int size,
- double time) {
- int ms_count = heap()->ms_count();
- PrintIsolate(isolate(),
- "heap:%p, time:%f, gc:%d, type:%s, count:%d, size:%d\n",
- static_cast<void*>(heap()), time, ms_count, name, count, size);
+// Tell the compiler to never inline this: occasionally, the optimizer will
+// decide to inline this and unroll the loop, making the compiled code more than
+// 100KB larger.
+V8_NOINLINE static void PrintJSONArray(size_t* array, const int len) {
+ PrintF("[ ");
+ for (int i = 0; i < len; i++) {
+ PrintF("%zu", array[i]);
+ if (i != (len - 1)) PrintF(", ");
+ }
+ PrintF(" ]");
}
-
-void ObjectStats::TraceObjectStats() {
- base::LockGuard<base::Mutex> lock_guard(object_stats_mutex.Pointer());
- int index;
- int count;
- int size;
- int total_size = 0;
+void ObjectStats::PrintJSON(const char* key) {
double time = isolate()->time_millis_since_init();
-#define TRACE_OBJECT_COUNT(name) \
- count = static_cast<int>(object_counts_[name]); \
- size = static_cast<int>(object_sizes_[name]) / KB; \
- total_size += size; \
- TraceObjectStat(#name, count, size, time);
- INSTANCE_TYPE_LIST(TRACE_OBJECT_COUNT)
-#undef TRACE_OBJECT_COUNT
-#define TRACE_OBJECT_COUNT(name) \
- index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
- count = static_cast<int>(object_counts_[index]); \
- size = static_cast<int>(object_sizes_[index]) / KB; \
- TraceObjectStat("*CODE_" #name, count, size, time);
- CODE_KIND_LIST(TRACE_OBJECT_COUNT)
-#undef TRACE_OBJECT_COUNT
-#define TRACE_OBJECT_COUNT(name) \
- index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
- count = static_cast<int>(object_counts_[index]); \
- size = static_cast<int>(object_sizes_[index]) / KB; \
- TraceObjectStat("*FIXED_ARRAY_" #name, count, size, time);
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(TRACE_OBJECT_COUNT)
-#undef TRACE_OBJECT_COUNT
-#define TRACE_OBJECT_COUNT(name) \
- index = \
- FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
- count = static_cast<int>(object_counts_[index]); \
- size = static_cast<int>(object_sizes_[index]) / KB; \
- TraceObjectStat("*CODE_AGE_" #name, count, size, time);
- CODE_AGE_LIST_COMPLETE(TRACE_OBJECT_COUNT)
-#undef TRACE_OBJECT_COUNT
+ int gc_count = heap()->gc_count();
+
+#define PRINT_KEY_AND_ID() \
+ PrintF("\"isolate\": \"%p\", \"id\": %d, \"key\": \"%s\", ", \
+ reinterpret_cast<void*>(isolate()), gc_count, key);
+
+ // gc_descriptor
+ PrintF("{ ");
+ PRINT_KEY_AND_ID();
+ PrintF("\"type\": \"gc_descriptor\", \"time\": %f }\n", time);
+ // bucket_sizes
+ PrintF("{ ");
+ PRINT_KEY_AND_ID();
+ PrintF("\"type\": \"bucket_sizes\", \"sizes\": [ ");
+ for (int i = 0; i < kNumberOfBuckets; i++) {
+ PrintF("%d", 1 << (kFirstBucketShift + i));
+ if (i != (kNumberOfBuckets - 1)) PrintF(", ");
+ }
+ PrintF(" ] }\n");
+// instance_type_data
+#define PRINT_INSTANCE_TYPE_DATA(name, index) \
+ PrintF("{ "); \
+ PRINT_KEY_AND_ID(); \
+ PrintF("\"type\": \"instance_type_data\", "); \
+ PrintF("\"instance_type\": %d, ", index); \
+ PrintF("\"instance_type_name\": \"%s\", ", name); \
+ PrintF("\"overall\": %zu, ", object_sizes_[index]); \
+ PrintF("\"count\": %zu, ", object_counts_[index]); \
+ PrintF("\"over_allocated\": %zu, ", over_allocated_[index]); \
+ PrintF("\"histogram\": "); \
+ PrintJSONArray(size_histogram_[index], kNumberOfBuckets); \
+ PrintF(","); \
+ PrintF("\"over_allocated_histogram\": "); \
+ PrintJSONArray(over_allocated_histogram_[index], kNumberOfBuckets); \
+ PrintF(" }\n");
+
+#define INSTANCE_TYPE_WRAPPER(name) PRINT_INSTANCE_TYPE_DATA(#name, name)
+#define CODE_KIND_WRAPPER(name) \
+ PRINT_INSTANCE_TYPE_DATA("*CODE_" #name, \
+ FIRST_CODE_KIND_SUB_TYPE + Code::name)
+#define FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER(name) \
+ PRINT_INSTANCE_TYPE_DATA("*FIXED_ARRAY_" #name, \
+ FIRST_FIXED_ARRAY_SUB_TYPE + name)
+#define CODE_AGE_WRAPPER(name) \
+ PRINT_INSTANCE_TYPE_DATA( \
+ "*CODE_AGE_" #name, \
+ FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge)
+
+ INSTANCE_TYPE_LIST(INSTANCE_TYPE_WRAPPER)
+ CODE_KIND_LIST(CODE_KIND_WRAPPER)
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER)
+ CODE_AGE_LIST_COMPLETE(CODE_AGE_WRAPPER)
+
+#undef INSTANCE_TYPE_WRAPPER
+#undef CODE_KIND_WRAPPER
+#undef FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER
+#undef CODE_AGE_WRAPPER
+#undef PRINT_INSTANCE_TYPE_DATA
}
-
void ObjectStats::CheckpointObjectStats() {
base::LockGuard<base::Mutex> lock_guard(object_stats_mutex.Pointer());
Counters* counters = isolate()->counters();
@@ -134,118 +165,377 @@ void ObjectStats::CheckpointObjectStats() {
Isolate* ObjectStats::isolate() { return heap()->isolate(); }
+void ObjectStatsCollector::CollectStatistics(HeapObject* obj) {
+ Map* map = obj->map();
-void ObjectStatsVisitor::CountFixedArray(
- FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
- FixedArraySubInstanceType dictionary_type) {
- Heap* heap = fixed_array->map()->GetHeap();
- if (fixed_array->map() != heap->fixed_cow_array_map() &&
- fixed_array->map() != heap->fixed_double_array_map() &&
- fixed_array != heap->empty_fixed_array()) {
- if (fixed_array->IsDictionary()) {
- heap->object_stats_->RecordFixedArraySubTypeStats(dictionary_type,
- fixed_array->Size());
- } else {
- heap->object_stats_->RecordFixedArraySubTypeStats(fast_type,
- fixed_array->Size());
+ // Record for the InstanceType.
+ int object_size = obj->Size();
+ stats_->RecordObjectStats(map->instance_type(), object_size);
+
+ // Record specific sub types where possible.
+ if (obj->IsMap()) RecordMapDetails(Map::cast(obj));
+ if (obj->IsObjectTemplateInfo() || obj->IsFunctionTemplateInfo()) {
+ RecordTemplateInfoDetails(TemplateInfo::cast(obj));
+ }
+ if (obj->IsBytecodeArray()) {
+ RecordBytecodeArrayDetails(BytecodeArray::cast(obj));
+ }
+ if (obj->IsCode()) RecordCodeDetails(Code::cast(obj));
+ if (obj->IsSharedFunctionInfo()) {
+ RecordSharedFunctionInfoDetails(SharedFunctionInfo::cast(obj));
+ }
+ if (obj->IsFixedArray()) RecordFixedArrayDetails(FixedArray::cast(obj));
+ if (obj->IsJSObject()) RecordJSObjectDetails(JSObject::cast(obj));
+ if (obj->IsJSWeakCollection()) {
+ RecordJSWeakCollectionDetails(JSWeakCollection::cast(obj));
+ }
+ if (obj->IsJSCollection()) {
+ RecordJSCollectionDetails(JSObject::cast(obj));
+ }
+ if (obj->IsJSFunction()) RecordJSFunctionDetails(JSFunction::cast(obj));
+ if (obj->IsScript()) RecordScriptDetails(Script::cast(obj));
+}
+
+class ObjectStatsCollector::CompilationCacheTableVisitor
+ : public ObjectVisitor {
+ public:
+ explicit CompilationCacheTableVisitor(ObjectStatsCollector* parent)
+ : parent_(parent) {}
+
+ void VisitPointers(Object** start, Object** end) override {
+ for (Object** current = start; current < end; current++) {
+ HeapObject* obj = HeapObject::cast(*current);
+ if (obj->IsUndefined(parent_->heap_->isolate())) continue;
+ CHECK(obj->IsCompilationCacheTable());
+ parent_->RecordHashTableHelper(nullptr, CompilationCacheTable::cast(obj),
+ COMPILATION_CACHE_TABLE_SUB_TYPE);
}
}
+
+ private:
+ ObjectStatsCollector* parent_;
+};
+
+void ObjectStatsCollector::CollectGlobalStatistics() {
+ // Global FixedArrays.
+ RecordFixedArrayHelper(nullptr, heap_->weak_new_space_object_to_code_list(),
+ WEAK_NEW_SPACE_OBJECT_TO_CODE_SUB_TYPE, 0);
+ RecordFixedArrayHelper(nullptr, heap_->serialized_templates(),
+ SERIALIZED_TEMPLATES_SUB_TYPE, 0);
+ RecordFixedArrayHelper(nullptr, heap_->number_string_cache(),
+ NUMBER_STRING_CACHE_SUB_TYPE, 0);
+ RecordFixedArrayHelper(nullptr, heap_->single_character_string_cache(),
+ SINGLE_CHARACTER_STRING_CACHE_SUB_TYPE, 0);
+ RecordFixedArrayHelper(nullptr, heap_->string_split_cache(),
+ STRING_SPLIT_CACHE_SUB_TYPE, 0);
+ RecordFixedArrayHelper(nullptr, heap_->regexp_multiple_cache(),
+ REGEXP_MULTIPLE_CACHE_SUB_TYPE, 0);
+ RecordFixedArrayHelper(nullptr, heap_->retained_maps(),
+ RETAINED_MAPS_SUB_TYPE, 0);
+
+ // Global weak FixedArrays.
+ RecordFixedArrayHelper(
+ nullptr, WeakFixedArray::cast(heap_->noscript_shared_function_infos()),
+ NOSCRIPT_SHARED_FUNCTION_INFOS_SUB_TYPE, 0);
+ RecordFixedArrayHelper(nullptr, WeakFixedArray::cast(heap_->script_list()),
+ SCRIPT_LIST_SUB_TYPE, 0);
+
+ // Global hash tables.
+ RecordHashTableHelper(nullptr, heap_->string_table(), STRING_TABLE_SUB_TYPE);
+ RecordHashTableHelper(nullptr, heap_->weak_object_to_code_table(),
+ OBJECT_TO_CODE_SUB_TYPE);
+ RecordHashTableHelper(nullptr, heap_->code_stubs(),
+ CODE_STUBS_TABLE_SUB_TYPE);
+ RecordHashTableHelper(nullptr, heap_->intrinsic_function_names(),
+ INTRINSIC_FUNCTION_NAMES_SUB_TYPE);
+ RecordHashTableHelper(nullptr, heap_->empty_properties_dictionary(),
+ EMPTY_PROPERTIES_DICTIONARY_SUB_TYPE);
+ CompilationCache* compilation_cache = heap_->isolate()->compilation_cache();
+ CompilationCacheTableVisitor v(this);
+ compilation_cache->Iterate(&v);
}
+static bool CanRecordFixedArray(Heap* heap, FixedArrayBase* array) {
+ return array->map()->instance_type() == FIXED_ARRAY_TYPE &&
+ array->map() != heap->fixed_double_array_map() &&
+ array != heap->empty_fixed_array() &&
+ array != heap->empty_byte_array() &&
+ array != heap->empty_literals_array() &&
+ array != heap->empty_sloppy_arguments_elements() &&
+ array != heap->empty_slow_element_dictionary() &&
+ array != heap->empty_descriptor_array() &&
+ array != heap->empty_properties_dictionary();
+}
-void ObjectStatsVisitor::VisitBase(VisitorId id, Map* map, HeapObject* obj) {
- Heap* heap = map->GetHeap();
- int object_size = obj->Size();
- heap->object_stats_->RecordObjectStats(map->instance_type(), object_size);
- table_.GetVisitorById(id)(map, obj);
- if (obj->IsJSObject()) {
- JSObject* object = JSObject::cast(obj);
- CountFixedArray(object->elements(), DICTIONARY_ELEMENTS_SUB_TYPE,
- FAST_ELEMENTS_SUB_TYPE);
- CountFixedArray(object->properties(), DICTIONARY_PROPERTIES_SUB_TYPE,
- FAST_PROPERTIES_SUB_TYPE);
+static bool IsCowArray(Heap* heap, FixedArrayBase* array) {
+ return array->map() == heap->fixed_cow_array_map();
+}
+
+static bool SameLiveness(HeapObject* obj1, HeapObject* obj2) {
+ return obj1 == nullptr || obj2 == nullptr ||
+ ObjectMarking::Color(obj1) == ObjectMarking::Color(obj2);
+}
+
+bool ObjectStatsCollector::RecordFixedArrayHelper(HeapObject* parent,
+ FixedArray* array,
+ int subtype,
+ size_t overhead) {
+ if (SameLiveness(parent, array) && CanRecordFixedArray(heap_, array) &&
+ !IsCowArray(heap_, array)) {
+ return stats_->RecordFixedArraySubTypeStats(array, subtype, array->Size(),
+ overhead);
}
+ return false;
}
+void ObjectStatsCollector::RecursivelyRecordFixedArrayHelper(HeapObject* parent,
+ FixedArray* array,
+ int subtype) {
+ if (RecordFixedArrayHelper(parent, array, subtype, 0)) {
+ for (int i = 0; i < array->length(); i++) {
+ if (array->get(i)->IsFixedArray()) {
+ RecursivelyRecordFixedArrayHelper(
+ parent, FixedArray::cast(array->get(i)), subtype);
+ }
+ }
+ }
+}
-template <ObjectStatsVisitor::VisitorId id>
-void ObjectStatsVisitor::Visit(Map* map, HeapObject* obj) {
- VisitBase(id, map, obj);
+template <class HashTable>
+void ObjectStatsCollector::RecordHashTableHelper(HeapObject* parent,
+ HashTable* array,
+ int subtype) {
+ int used = array->NumberOfElements() * HashTable::kEntrySize * kPointerSize;
+ CHECK_GE(array->Size(), used);
+ size_t overhead = array->Size() - used -
+ HashTable::kElementsStartIndex * kPointerSize -
+ FixedArray::kHeaderSize;
+ RecordFixedArrayHelper(parent, array, subtype, overhead);
}
+void ObjectStatsCollector::RecordJSObjectDetails(JSObject* object) {
+ size_t overhead = 0;
+ FixedArrayBase* elements = object->elements();
+ if (CanRecordFixedArray(heap_, elements) && !IsCowArray(heap_, elements)) {
+ if (elements->IsDictionary() && SameLiveness(object, elements)) {
+ SeededNumberDictionary* dict = SeededNumberDictionary::cast(elements);
+ RecordHashTableHelper(object, dict, DICTIONARY_ELEMENTS_SUB_TYPE);
+ } else {
+ if (IsFastHoleyElementsKind(object->GetElementsKind())) {
+ int used = object->GetFastElementsUsage() * kPointerSize;
+ if (object->GetElementsKind() == FAST_HOLEY_DOUBLE_ELEMENTS) used *= 2;
+ CHECK_GE(elements->Size(), used);
+ overhead = elements->Size() - used - FixedArray::kHeaderSize;
+ }
+ stats_->RecordFixedArraySubTypeStats(elements, FAST_ELEMENTS_SUB_TYPE,
+ elements->Size(), overhead);
+ }
+ }
-template <>
-void ObjectStatsVisitor::Visit<ObjectStatsVisitor::kVisitMap>(Map* map,
- HeapObject* obj) {
- Heap* heap = map->GetHeap();
- Map* map_obj = Map::cast(obj);
- DCHECK(map->instance_type() == MAP_TYPE);
+ overhead = 0;
+ FixedArrayBase* properties = object->properties();
+ if (CanRecordFixedArray(heap_, properties) &&
+ SameLiveness(object, properties) && !IsCowArray(heap_, properties)) {
+ if (properties->IsDictionary()) {
+ NameDictionary* dict = NameDictionary::cast(properties);
+ RecordHashTableHelper(object, dict, DICTIONARY_PROPERTIES_SUB_TYPE);
+ } else {
+ stats_->RecordFixedArraySubTypeStats(properties, FAST_PROPERTIES_SUB_TYPE,
+ properties->Size(), overhead);
+ }
+ }
+}
+
+void ObjectStatsCollector::RecordJSWeakCollectionDetails(
+ JSWeakCollection* obj) {
+ if (obj->table()->IsHashTable()) {
+ ObjectHashTable* table = ObjectHashTable::cast(obj->table());
+ int used = table->NumberOfElements() * ObjectHashTable::kEntrySize;
+ size_t overhead = table->Size() - used;
+ RecordFixedArrayHelper(obj, table, JS_WEAK_COLLECTION_SUB_TYPE, overhead);
+ }
+}
+
+void ObjectStatsCollector::RecordJSCollectionDetails(JSObject* obj) {
+ // The JS versions use a different HashTable implementation that cannot use
+ // the regular helper. Since overall impact is usually small just record
+ // without overhead.
+ if (obj->IsJSMap()) {
+ RecordFixedArrayHelper(nullptr, FixedArray::cast(JSMap::cast(obj)->table()),
+ JS_COLLECTION_SUB_TYPE, 0);
+ }
+ if (obj->IsJSSet()) {
+ RecordFixedArrayHelper(nullptr, FixedArray::cast(JSSet::cast(obj)->table()),
+ JS_COLLECTION_SUB_TYPE, 0);
+ }
+}
+
+void ObjectStatsCollector::RecordScriptDetails(Script* obj) {
+ Object* infos = WeakFixedArray::cast(obj->shared_function_infos());
+ if (infos->IsWeakFixedArray())
+ RecordFixedArrayHelper(obj, WeakFixedArray::cast(infos),
+ SHARED_FUNCTION_INFOS_SUB_TYPE, 0);
+}
+
+void ObjectStatsCollector::RecordMapDetails(Map* map_obj) {
DescriptorArray* array = map_obj->instance_descriptors();
- if (map_obj->owns_descriptors() && array != heap->empty_descriptor_array()) {
- int fixed_array_size = array->Size();
- heap->object_stats_->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE,
- fixed_array_size);
+ if (map_obj->owns_descriptors() && array != heap_->empty_descriptor_array() &&
+ SameLiveness(map_obj, array)) {
+ RecordFixedArrayHelper(map_obj, array, DESCRIPTOR_ARRAY_SUB_TYPE, 0);
+ if (array->HasEnumCache()) {
+ RecordFixedArrayHelper(array, array->GetEnumCache(), ENUM_CACHE_SUB_TYPE,
+ 0);
+ }
+ if (array->HasEnumIndicesCache()) {
+ RecordFixedArrayHelper(array, array->GetEnumIndicesCache(),
+ ENUM_INDICES_CACHE_SUB_TYPE, 0);
+ }
}
+
if (map_obj->has_code_cache()) {
- CodeCache* cache = CodeCache::cast(map_obj->code_cache());
- heap->object_stats_->RecordFixedArraySubTypeStats(
- MAP_CODE_CACHE_SUB_TYPE, cache->default_cache()->Size());
- if (!cache->normal_type_cache()->IsUndefined()) {
- heap->object_stats_->RecordFixedArraySubTypeStats(
- MAP_CODE_CACHE_SUB_TYPE,
- FixedArray::cast(cache->normal_type_cache())->Size());
+ FixedArray* code_cache = map_obj->code_cache();
+ if (code_cache->IsCodeCacheHashTable()) {
+ RecordHashTableHelper(map_obj, CodeCacheHashTable::cast(code_cache),
+ MAP_CODE_CACHE_SUB_TYPE);
+ } else {
+ RecordFixedArrayHelper(map_obj, code_cache, MAP_CODE_CACHE_SUB_TYPE, 0);
}
}
- VisitBase(kVisitMap, map, obj);
-}
+ for (DependentCode* cur_dependent_code = map_obj->dependent_code();
+ cur_dependent_code != heap_->empty_fixed_array();
+ cur_dependent_code = DependentCode::cast(
+ cur_dependent_code->get(DependentCode::kNextLinkIndex))) {
+ RecordFixedArrayHelper(map_obj, cur_dependent_code, DEPENDENT_CODE_SUB_TYPE,
+ 0);
+ }
+
+ if (map_obj->is_prototype_map()) {
+ if (map_obj->prototype_info()->IsPrototypeInfo()) {
+ PrototypeInfo* info = PrototypeInfo::cast(map_obj->prototype_info());
+ Object* users = info->prototype_users();
+ if (users->IsWeakFixedArray()) {
+ RecordFixedArrayHelper(map_obj, WeakFixedArray::cast(users),
+ PROTOTYPE_USERS_SUB_TYPE, 0);
+ }
+ }
+ }
+}
-template <>
-void ObjectStatsVisitor::Visit<ObjectStatsVisitor::kVisitCode>(
- Map* map, HeapObject* obj) {
- Heap* heap = map->GetHeap();
- int object_size = obj->Size();
- DCHECK(map->instance_type() == CODE_TYPE);
- Code* code_obj = Code::cast(obj);
- heap->object_stats_->RecordCodeSubTypeStats(code_obj->kind(),
- code_obj->GetAge(), object_size);
- VisitBase(kVisitCode, map, obj);
+void ObjectStatsCollector::RecordTemplateInfoDetails(TemplateInfo* obj) {
+ if (obj->property_accessors()->IsFixedArray()) {
+ RecordFixedArrayHelper(obj, FixedArray::cast(obj->property_accessors()),
+ TEMPLATE_INFO_SUB_TYPE, 0);
+ }
+ if (obj->property_list()->IsFixedArray()) {
+ RecordFixedArrayHelper(obj, FixedArray::cast(obj->property_list()),
+ TEMPLATE_INFO_SUB_TYPE, 0);
+ }
}
+void ObjectStatsCollector::RecordBytecodeArrayDetails(BytecodeArray* obj) {
+ RecordFixedArrayHelper(obj, obj->constant_pool(),
+ BYTECODE_ARRAY_CONSTANT_POOL_SUB_TYPE, 0);
+ RecordFixedArrayHelper(obj, obj->handler_table(),
+ BYTECODE_ARRAY_HANDLER_TABLE_SUB_TYPE, 0);
+}
-template <>
-void ObjectStatsVisitor::Visit<ObjectStatsVisitor::kVisitSharedFunctionInfo>(
- Map* map, HeapObject* obj) {
- Heap* heap = map->GetHeap();
- SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
- if (sfi->scope_info() != heap->empty_fixed_array()) {
- heap->object_stats_->RecordFixedArraySubTypeStats(
- SCOPE_INFO_SUB_TYPE, FixedArray::cast(sfi->scope_info())->Size());
+void ObjectStatsCollector::RecordCodeDetails(Code* code) {
+ stats_->RecordCodeSubTypeStats(code->kind(), code->GetAge(), code->Size());
+ RecordFixedArrayHelper(code, code->deoptimization_data(),
+ DEOPTIMIZATION_DATA_SUB_TYPE, 0);
+ if (code->kind() == Code::Kind::OPTIMIZED_FUNCTION) {
+ DeoptimizationInputData* input_data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ RecordFixedArrayHelper(code->deoptimization_data(),
+ input_data->LiteralArray(),
+ OPTIMIZED_CODE_LITERALS_SUB_TYPE, 0);
+ }
+ RecordFixedArrayHelper(code, code->handler_table(), HANDLER_TABLE_SUB_TYPE,
+ 0);
+ int const mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ Object* target = it.rinfo()->target_object();
+ if (target->IsFixedArray()) {
+ RecursivelyRecordFixedArrayHelper(code, FixedArray::cast(target),
+ EMBEDDED_OBJECT_SUB_TYPE);
+ }
+ }
}
- VisitBase(kVisitSharedFunctionInfo, map, obj);
}
+void ObjectStatsCollector::RecordSharedFunctionInfoDetails(
+ SharedFunctionInfo* sfi) {
+ FixedArray* scope_info = sfi->scope_info();
+ RecordFixedArrayHelper(sfi, scope_info, SCOPE_INFO_SUB_TYPE, 0);
+ TypeFeedbackMetadata* feedback_metadata = sfi->feedback_metadata();
+ if (!feedback_metadata->is_empty()) {
+ RecordFixedArrayHelper(sfi, feedback_metadata,
+ TYPE_FEEDBACK_METADATA_SUB_TYPE, 0);
+ Object* names =
+ feedback_metadata->get(TypeFeedbackMetadata::kNamesTableIndex);
+ if (!names->IsSmi()) {
+ UnseededNumberDictionary* names = UnseededNumberDictionary::cast(
+ feedback_metadata->get(TypeFeedbackMetadata::kNamesTableIndex));
+ RecordHashTableHelper(sfi, names, TYPE_FEEDBACK_METADATA_SUB_TYPE);
+ }
+ }
-template <>
-void ObjectStatsVisitor::Visit<ObjectStatsVisitor::kVisitFixedArray>(
- Map* map, HeapObject* obj) {
- Heap* heap = map->GetHeap();
- FixedArray* fixed_array = FixedArray::cast(obj);
- if (fixed_array == heap->string_table()) {
- heap->object_stats_->RecordFixedArraySubTypeStats(STRING_TABLE_SUB_TYPE,
- fixed_array->Size());
+ if (!sfi->OptimizedCodeMapIsCleared()) {
+ FixedArray* optimized_code_map = sfi->optimized_code_map();
+ RecordFixedArrayHelper(sfi, optimized_code_map, OPTIMIZED_CODE_MAP_SUB_TYPE,
+ 0);
+ // Optimized code map should be small, so skip accounting.
+ int len = optimized_code_map->length();
+ for (int i = SharedFunctionInfo::kEntriesStart; i < len;
+ i += SharedFunctionInfo::kEntryLength) {
+ Object* slot =
+ optimized_code_map->get(i + SharedFunctionInfo::kLiteralsOffset);
+ LiteralsArray* literals = nullptr;
+ if (slot->IsWeakCell()) {
+ WeakCell* cell = WeakCell::cast(slot);
+ if (!cell->cleared()) {
+ literals = LiteralsArray::cast(cell->value());
+ }
+ } else {
+ literals = LiteralsArray::cast(slot);
+ }
+ if (literals != nullptr) {
+ RecordFixedArrayHelper(sfi, literals, LITERALS_ARRAY_SUB_TYPE, 0);
+ RecordFixedArrayHelper(sfi, literals->feedback_vector(),
+ TYPE_FEEDBACK_VECTOR_SUB_TYPE, 0);
+ }
+ }
}
- VisitBase(kVisitFixedArray, map, obj);
}
+void ObjectStatsCollector::RecordJSFunctionDetails(JSFunction* function) {
+ LiteralsArray* literals = function->literals();
+ RecordFixedArrayHelper(function, literals, LITERALS_ARRAY_SUB_TYPE, 0);
+ RecordFixedArrayHelper(function, literals->feedback_vector(),
+ TYPE_FEEDBACK_VECTOR_SUB_TYPE, 0);
+}
-void ObjectStatsVisitor::Initialize(VisitorDispatchTable<Callback>* original) {
- // Copy the original visitor table to make call-through possible. After we
- // preserved a copy locally, we patch the original table to call us.
- table_.CopyFrom(original);
-#define COUNT_FUNCTION(id) original->Register(kVisit##id, Visit<kVisit##id>);
- VISITOR_ID_LIST(COUNT_FUNCTION)
-#undef COUNT_FUNCTION
+void ObjectStatsCollector::RecordFixedArrayDetails(FixedArray* array) {
+ if (array->IsContext()) {
+ RecordFixedArrayHelper(nullptr, array, CONTEXT_SUB_TYPE, 0);
+ }
+ if (IsCowArray(heap_, array) && CanRecordFixedArray(heap_, array)) {
+ stats_->RecordFixedArraySubTypeStats(array, COPY_ON_WRITE_SUB_TYPE,
+ array->Size(), 0);
+ }
+ if (array->IsNativeContext()) {
+ Context* native_ctx = Context::cast(array);
+ RecordHashTableHelper(array,
+ native_ctx->slow_template_instantiations_cache(),
+ SLOW_TEMPLATE_INSTANTIATIONS_CACHE_SUB_TYPE);
+ FixedArray* fast_cache = native_ctx->fast_template_instantiations_cache();
+ stats_->RecordFixedArraySubTypeStats(
+ fast_cache, FAST_TEMPLATE_INSTANTIATIONS_CACHE_SUB_TYPE,
+ fast_cache->Size(), 0);
+ }
}
} // namespace internal
diff --git a/deps/v8/src/heap/object-stats.h b/deps/v8/src/heap/object-stats.h
index e2dcfaa4b7..4780696952 100644
--- a/deps/v8/src/heap/object-stats.h
+++ b/deps/v8/src/heap/object-stats.h
@@ -5,6 +5,9 @@
#ifndef V8_HEAP_OBJECT_STATS_H_
#define V8_HEAP_OBJECT_STATS_H_
+#include <set>
+
+#include "src/base/ieee754.h"
#include "src/heap/heap.h"
#include "src/heap/objects-visiting.h"
#include "src/objects.h"
@@ -14,7 +17,7 @@ namespace internal {
class ObjectStats {
public:
- explicit ObjectStats(Heap* heap) : heap_(heap) {}
+ explicit ObjectStats(Heap* heap) : heap_(heap) { ClearObjectStats(); }
// ObjectStats are kept in two arrays, counts and sizes. Related stats are
// stored in a contiguous linear buffer. Stats groups are stored one after
@@ -30,14 +33,14 @@ class ObjectStats {
void ClearObjectStats(bool clear_last_time_stats = false);
- void TraceObjectStats();
- void TraceObjectStat(const char* name, int count, int size, double time);
void CheckpointObjectStats();
+ void PrintJSON(const char* key);
void RecordObjectStats(InstanceType type, size_t size) {
DCHECK(type <= LAST_TYPE);
object_counts_[type]++;
object_sizes_[type] += size;
+ size_histogram_[type][HistogramIndexFromSize(size)]++;
}
void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size) {
@@ -52,12 +55,27 @@ class ObjectStats {
object_sizes_[code_sub_type_index] += size;
object_counts_[code_age_index]++;
object_sizes_[code_age_index] += size;
+ const int idx = HistogramIndexFromSize(size);
+ size_histogram_[code_sub_type_index][idx]++;
+ size_histogram_[code_age_index][idx]++;
}
- void RecordFixedArraySubTypeStats(int array_sub_type, size_t size) {
+ bool RecordFixedArraySubTypeStats(FixedArrayBase* array, int array_sub_type,
+ size_t size, size_t over_allocated) {
+ auto it = visited_fixed_array_sub_types_.insert(array);
+ if (!it.second) return false;
DCHECK(array_sub_type <= LAST_FIXED_ARRAY_SUB_TYPE);
object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]++;
object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] += size;
+ size_histogram_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]
+ [HistogramIndexFromSize(size)]++;
+ if (over_allocated > 0) {
+ over_allocated_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] +=
+ over_allocated;
+ over_allocated_histogram_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]
+ [HistogramIndexFromSize(over_allocated)]++;
+ }
+ return true;
}
size_t object_count_last_gc(size_t index) {
@@ -72,28 +90,67 @@ class ObjectStats {
Heap* heap() { return heap_; }
private:
- Heap* heap_;
+ static const int kFirstBucketShift = 5; // <=32
+ static const int kLastBucketShift = 19; // >512k
+ static const int kFirstBucket = 1 << kFirstBucketShift;
+ static const int kLastBucket = 1 << kLastBucketShift;
+ static const int kNumberOfBuckets = kLastBucketShift - kFirstBucketShift + 1;
+
+ int HistogramIndexFromSize(size_t size) {
+ if (size == 0) return 0;
+ int idx = static_cast<int>(base::ieee754::log2(static_cast<double>(size))) -
+ kFirstBucketShift;
+ return idx < 0 ? 0 : idx;
+ }
- // Object counts and used memory by InstanceType
+ Heap* heap_;
+ // Object counts and used memory by InstanceType.
size_t object_counts_[OBJECT_STATS_COUNT];
size_t object_counts_last_time_[OBJECT_STATS_COUNT];
size_t object_sizes_[OBJECT_STATS_COUNT];
size_t object_sizes_last_time_[OBJECT_STATS_COUNT];
-};
+ // Approximation of overallocated memory by InstanceType.
+ size_t over_allocated_[OBJECT_STATS_COUNT];
+ // Detailed histograms by InstanceType.
+ size_t size_histogram_[OBJECT_STATS_COUNT][kNumberOfBuckets];
+ size_t over_allocated_histogram_[OBJECT_STATS_COUNT][kNumberOfBuckets];
+ std::set<FixedArrayBase*> visited_fixed_array_sub_types_;
+};
-class ObjectStatsVisitor : public StaticMarkingVisitor<ObjectStatsVisitor> {
+class ObjectStatsCollector {
public:
- static void Initialize(VisitorDispatchTable<Callback>* original);
+ ObjectStatsCollector(Heap* heap, ObjectStats* stats)
+ : heap_(heap), stats_(stats) {}
- static void VisitBase(VisitorId id, Map* map, HeapObject* obj);
+ void CollectGlobalStatistics();
+ void CollectStatistics(HeapObject* obj);
- static void CountFixedArray(FixedArrayBase* fixed_array,
- FixedArraySubInstanceType fast_type,
- FixedArraySubInstanceType dictionary_type);
+ private:
+ class CompilationCacheTableVisitor;
+
+ void RecordBytecodeArrayDetails(BytecodeArray* obj);
+ void RecordCodeDetails(Code* code);
+ void RecordFixedArrayDetails(FixedArray* array);
+ void RecordJSCollectionDetails(JSObject* obj);
+ void RecordJSFunctionDetails(JSFunction* function);
+ void RecordJSObjectDetails(JSObject* object);
+ void RecordJSWeakCollectionDetails(JSWeakCollection* obj);
+ void RecordMapDetails(Map* map);
+ void RecordScriptDetails(Script* obj);
+ void RecordTemplateInfoDetails(TemplateInfo* obj);
+ void RecordSharedFunctionInfoDetails(SharedFunctionInfo* sfi);
+
+ bool RecordFixedArrayHelper(HeapObject* parent, FixedArray* array,
+ int subtype, size_t overhead);
+ void RecursivelyRecordFixedArrayHelper(HeapObject* parent, FixedArray* array,
+ int subtype);
+ template <class HashTable>
+ void RecordHashTableHelper(HeapObject* parent, HashTable* array, int subtype);
+ Heap* heap_;
+ ObjectStats* stats_;
- template <VisitorId id>
- static inline void Visit(Map* map, HeapObject* obj);
+ friend class ObjectStatsCollector::CompilationCacheTableVisitor;
};
} // namespace internal
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index c415713ee3..148975f630 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -77,7 +77,10 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
&FlexibleBodyVisitor<StaticVisitor, JSFunction::BodyDescriptorWeakCode,
int>::Visit);
- table_.Register(kVisitJSArrayBuffer, &VisitJSArrayBuffer);
+ table_.Register(
+ kVisitJSArrayBuffer,
+ &FlexibleBodyVisitor<StaticVisitor, JSArrayBuffer::BodyDescriptor,
+ int>::Visit);
table_.Register(kVisitFreeSpace, &VisitFreeSpace);
@@ -90,25 +93,15 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
table_.template RegisterSpecializations<JSObjectVisitor, kVisitJSObject,
kVisitJSObjectGeneric>();
- table_.template RegisterSpecializations<StructVisitor, kVisitStruct,
- kVisitStructGeneric>();
-}
-
-template <typename StaticVisitor>
-int StaticNewSpaceVisitor<StaticVisitor>::VisitJSArrayBuffer(
- Map* map, HeapObject* object) {
- typedef FlexibleBodyVisitor<StaticVisitor, JSArrayBuffer::BodyDescriptor, int>
- JSArrayBufferBodyVisitor;
+ // Not using specialized Api object visitor for newspace.
+ table_.template RegisterSpecializations<JSObjectVisitor, kVisitJSApiObject,
+ kVisitJSApiObjectGeneric>();
- if (!JSArrayBuffer::cast(object)->is_external()) {
- Heap* heap = map->GetHeap();
- heap->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(object));
- }
- return JSArrayBufferBodyVisitor::Visit(map, object);
+ table_.template RegisterSpecializations<StructVisitor, kVisitStruct,
+ kVisitStructGeneric>();
}
-
template <typename StaticVisitor>
int StaticNewSpaceVisitor<StaticVisitor>::VisitBytecodeArray(
Map* map, HeapObject* object) {
@@ -180,7 +173,10 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitJSFunction, &VisitJSFunction);
- table_.Register(kVisitJSArrayBuffer, &VisitJSArrayBuffer);
+ table_.Register(
+ kVisitJSArrayBuffer,
+ &FlexibleBodyVisitor<StaticVisitor, JSArrayBuffer::BodyDescriptor,
+ void>::Visit);
// Registration for kVisitJSRegExp is done by StaticVisitor.
@@ -200,6 +196,9 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.template RegisterSpecializations<JSObjectVisitor, kVisitJSObject,
kVisitJSObjectGeneric>();
+ table_.template RegisterSpecializations<JSApiObjectVisitor, kVisitJSApiObject,
+ kVisitJSApiObjectGeneric>();
+
table_.template RegisterSpecializations<StructObjectVisitor, kVisitStruct,
kVisitStructGeneric>();
}
@@ -265,8 +264,8 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget(Heap* heap,
// when they might be keeping a Context alive, or when the heap is about
// to be serialized.
if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub() &&
- !target->is_call_stub() && (heap->isolate()->serializer_enabled() ||
- target->ic_age() != heap->global_ic_age())) {
+ (heap->isolate()->serializer_enabled() ||
+ target->ic_age() != heap->global_ic_age())) {
ICUtility::Clear(heap->isolate(), rinfo->pc(),
rinfo->host()->constant_pool());
target = Code::GetCodeFromTargetAddress(rinfo->target_address());
@@ -377,7 +376,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitTransitionArray(
}
// Enqueue the array in linked list of encountered transition arrays if it is
// not already in the list.
- if (array->next_link()->IsUndefined()) {
+ if (array->next_link()->IsUndefined(heap->isolate())) {
Heap* heap = map->GetHeap();
array->set_next_link(heap->encountered_transition_arrays(),
UPDATE_WEAK_WRITE_BARRIER);
@@ -451,9 +450,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
if (shared->ic_age() != heap->global_ic_age()) {
shared->ResetForNewContext(heap->global_ic_age());
}
- if (FLAG_cleanup_code_caches_at_gc) {
- shared->ClearTypeFeedbackInfoAtGCTime();
- }
if (FLAG_flush_optimized_code_cache) {
if (!shared->OptimizedCodeMapIsCleared()) {
// Always flush the optimized code map if requested by flag.
@@ -484,6 +480,9 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSFunction(Map* map,
HeapObject* object) {
Heap* heap = map->GetHeap();
JSFunction* function = JSFunction::cast(object);
+ if (FLAG_cleanup_code_caches_at_gc) {
+ function->ClearTypeFeedbackInfoAtGCTime();
+ }
MarkCompactCollector* collector = heap->mark_compact_collector();
if (collector->is_code_flushing_enabled()) {
if (IsFlushable(heap, function)) {
@@ -512,24 +511,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSRegExp(Map* map,
JSObjectVisitor::Visit(map, object);
}
-
-template <typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitJSArrayBuffer(
- Map* map, HeapObject* object) {
- Heap* heap = map->GetHeap();
-
- typedef FlexibleBodyVisitor<StaticVisitor, JSArrayBuffer::BodyDescriptor,
- void> JSArrayBufferBodyVisitor;
-
- JSArrayBufferBodyVisitor::Visit(map, object);
-
- if (!JSArrayBuffer::cast(object)->is_external() &&
- !heap->InNewSpace(object)) {
- heap->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(object));
- }
-}
-
-
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitBytecodeArray(
Map* map, HeapObject* object) {
@@ -589,7 +570,7 @@ bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(Heap* heap,
// Code is either on stack, in compilation cache or referenced
// by optimized version of function.
- MarkBit code_mark = Marking::MarkBitFrom(function->code());
+ MarkBit code_mark = ObjectMarking::MarkBitFrom(function->code());
if (Marking::IsBlackOrGrey(code_mark)) {
return false;
}
@@ -613,7 +594,7 @@ bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
Heap* heap, SharedFunctionInfo* shared_info) {
// Code is either on stack, in compilation cache or referenced
// by optimized version of function.
- MarkBit code_mark = Marking::MarkBitFrom(shared_info->code());
+ MarkBit code_mark = ObjectMarking::MarkBitFrom(shared_info->code());
if (Marking::IsBlackOrGrey(code_mark)) {
return false;
}
@@ -625,8 +606,7 @@ bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
}
// We never flush code for API functions.
- Object* function_data = shared_info->function_data();
- if (function_data->IsFunctionTemplateInfo()) {
+ if (shared_info->IsApiFunction()) {
return false;
}
@@ -640,9 +620,10 @@ bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
return false;
}
- // We do not (yet?) flush code for generator functions, because we don't know
- // if there are still live activations (generator objects) on the heap.
- if (shared_info->is_generator()) {
+ // We do not (yet?) flush code for generator functions, or async functions,
+ // because we don't know if there are still live activations
+ // (generator objects) on the heap.
+ if (shared_info->is_resumable()) {
return false;
}
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index 0b857dc423..83e2e1c820 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -103,6 +103,8 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
return kVisitJSArrayBuffer;
case JS_OBJECT_TYPE:
+ case JS_ERROR_TYPE:
+ case JS_ARGUMENTS_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
case JS_MODULE_TYPE:
@@ -111,7 +113,6 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case JS_ARRAY_TYPE:
case JS_GLOBAL_PROXY_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
- case JS_SPECIAL_API_OBJECT_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
case JS_TYPED_ARRAY_TYPE:
case JS_DATA_VIEW_TYPE:
@@ -123,6 +124,10 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case JS_BOUND_FUNCTION_TYPE:
return GetVisitorIdForSize(kVisitJSObject, kVisitJSObjectGeneric,
instance_size, has_unboxed_fields);
+ case JS_API_OBJECT_TYPE:
+ case JS_SPECIAL_API_OBJECT_TYPE:
+ return GetVisitorIdForSize(kVisitJSApiObject, kVisitJSApiObjectGeneric,
+ instance_size, has_unboxed_fields);
case JS_FUNCTION_TYPE:
return kVisitJSFunction;
@@ -209,7 +214,7 @@ Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer) {
}
}
// Retained object is new tail.
- DCHECK(!retained->IsUndefined());
+ DCHECK(!retained->IsUndefined(heap->isolate()));
candidate = reinterpret_cast<T*>(retained);
tail = candidate;
@@ -282,7 +287,7 @@ struct WeakListVisitor<Context> {
}
static Object* WeakNext(Context* context) {
- return context->get(Context::NEXT_CONTEXT_LINK);
+ return context->next_context_link();
}
static int WeakNextOffset() {
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index 1fe8a1749a..303db0eb07 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -58,6 +58,15 @@ class StaticVisitorBase : public AllStatic {
V(JSObject8) \
V(JSObject9) \
V(JSObjectGeneric) \
+ V(JSApiObject2) \
+ V(JSApiObject3) \
+ V(JSApiObject4) \
+ V(JSApiObject5) \
+ V(JSApiObject6) \
+ V(JSApiObject7) \
+ V(JSApiObject8) \
+ V(JSApiObject9) \
+ V(JSApiObjectGeneric) \
V(Struct2) \
V(Struct3) \
V(Struct4) \
@@ -96,9 +105,10 @@ class StaticVisitorBase : public AllStatic {
#define VISITOR_ID_ENUM_DECL(id) kVisit##id,
VISITOR_ID_LIST(VISITOR_ID_ENUM_DECL)
#undef VISITOR_ID_ENUM_DECL
- kVisitorIdCount,
+ kVisitorIdCount,
kVisitDataObject = kVisitDataObject2,
kVisitJSObject = kVisitJSObject2,
+ kVisitJSApiObject = kVisitJSApiObject2,
kVisitStruct = kVisitStruct2,
};
@@ -119,11 +129,12 @@ class StaticVisitorBase : public AllStatic {
int object_size,
bool has_unboxed_fields) {
DCHECK((base == kVisitDataObject) || (base == kVisitStruct) ||
- (base == kVisitJSObject));
+ (base == kVisitJSObject) || (base == kVisitJSApiObject));
DCHECK(IsAligned(object_size, kPointerSize));
DCHECK(Heap::kMinObjectSizeInWords * kPointerSize <= object_size);
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
- DCHECK(!has_unboxed_fields || (base == kVisitJSObject));
+ DCHECK(!has_unboxed_fields || (base == kVisitJSObject) ||
+ (base == kVisitJSApiObject));
if (has_unboxed_fields) return generic;
@@ -289,7 +300,6 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
return FreeSpace::cast(object)->size();
}
- INLINE(static int VisitJSArrayBuffer(Map* map, HeapObject* object));
INLINE(static int VisitBytecodeArray(Map* map, HeapObject* object));
class DataObjectVisitor {
@@ -368,7 +378,6 @@ class StaticMarkingVisitor : public StaticVisitorBase {
INLINE(static void VisitWeakCollection(Map* map, HeapObject* object));
INLINE(static void VisitJSFunction(Map* map, HeapObject* object));
INLINE(static void VisitJSRegExp(Map* map, HeapObject* object));
- INLINE(static void VisitJSArrayBuffer(Map* map, HeapObject* object));
INLINE(static void VisitNativeContext(Map* map, HeapObject* object));
INLINE(static void VisitBytecodeArray(Map* map, HeapObject* object));
@@ -400,6 +409,28 @@ class StaticMarkingVisitor : public StaticVisitorBase {
typedef FlexibleBodyVisitor<StaticVisitor, JSObject::BodyDescriptor, void>
JSObjectVisitor;
+ class JSApiObjectVisitor : AllStatic {
+ public:
+ template <int size>
+ static inline void VisitSpecialized(Map* map, HeapObject* object) {
+ TracePossibleWrapper(object);
+ JSObjectVisitor::template VisitSpecialized<size>(map, object);
+ }
+
+ INLINE(static void Visit(Map* map, HeapObject* object)) {
+ TracePossibleWrapper(object);
+ JSObjectVisitor::Visit(map, object);
+ }
+
+ private:
+ INLINE(static void TracePossibleWrapper(HeapObject* object)) {
+ if (object->GetHeap()->UsingEmbedderHeapTracer()) {
+ DCHECK(object->IsJSObject());
+ object->GetHeap()->TracePossibleWrapper(JSObject::cast(object));
+ }
+ }
+ };
+
typedef FlexibleBodyVisitor<StaticVisitor, StructBodyDescriptor, void>
StructObjectVisitor;
diff --git a/deps/v8/src/heap/page-parallel-job.h b/deps/v8/src/heap/page-parallel-job.h
index 02583c7818..440c440b7e 100644
--- a/deps/v8/src/heap/page-parallel-job.h
+++ b/deps/v8/src/heap/page-parallel-job.h
@@ -37,7 +37,7 @@ class PageParallelJob {
// glibc. See http://crbug.com/609249 and
// https://sourceware.org/bugzilla/show_bug.cgi?id=12674.
// The caller must provide a semaphore with value 0 and ensure that
- // the lifetime of the semaphore is the same as the lifetime of the Isolate
+ // the lifetime of the semaphore is the same as the lifetime of the Isolate.
// It is guaranteed that the semaphore value will be 0 after Run() call.
PageParallelJob(Heap* heap, CancelableTaskManager* cancelable_task_manager,
base::Semaphore* semaphore)
@@ -127,7 +127,7 @@ class PageParallelJob {
Item(MemoryChunk* chunk, typename JobTraits::PerPageData data, Item* next)
: chunk(chunk), state(kAvailable), data(data), next(next) {}
MemoryChunk* chunk;
- AtomicValue<ProcessingState> state;
+ base::AtomicValue<ProcessingState> state;
typename JobTraits::PerPageData data;
Item* next;
};
diff --git a/deps/v8/src/heap/remembered-set.cc b/deps/v8/src/heap/remembered-set.cc
index 403c99b057..6575d55d52 100644
--- a/deps/v8/src/heap/remembered-set.cc
+++ b/deps/v8/src/heap/remembered-set.cc
@@ -9,6 +9,7 @@
#include "src/heap/slot-set.h"
#include "src/heap/spaces.h"
#include "src/heap/store-buffer.h"
+#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
@@ -16,10 +17,7 @@ namespace internal {
template <PointerDirection direction>
void RememberedSet<direction>::ClearInvalidSlots(Heap* heap) {
STATIC_ASSERT(direction == OLD_TO_NEW);
- PageIterator it(heap->old_space());
- MemoryChunk* chunk;
- while (it.has_next()) {
- chunk = it.next();
+ for (MemoryChunk* chunk : *heap->old_space()) {
SlotSet* slots = GetSlotSet(chunk);
if (slots != nullptr) {
slots->Iterate([heap, chunk](Address addr) {
@@ -28,6 +26,32 @@ void RememberedSet<direction>::ClearInvalidSlots(Heap* heap) {
});
}
}
+ for (MemoryChunk* chunk : *heap->code_space()) {
+ TypedSlotSet* slots = GetTypedSlotSet(chunk);
+ if (slots != nullptr) {
+ slots->Iterate(
+ [heap, chunk](SlotType type, Address host_addr, Address addr) {
+ if (Marking::IsBlack(ObjectMarking::MarkBitFrom(host_addr))) {
+ return KEEP_SLOT;
+ } else {
+ return REMOVE_SLOT;
+ }
+ });
+ }
+ }
+ for (MemoryChunk* chunk : *heap->map_space()) {
+ SlotSet* slots = GetSlotSet(chunk);
+ if (slots != nullptr) {
+ slots->Iterate([heap, chunk](Address addr) {
+ Object** slot = reinterpret_cast<Object**>(addr);
+ // TODO(mlippautz): In map space all allocations would ideally be map
+ // aligned. After establishing this invariant IsValidSlot could just
+ // refer to the containing object using alignment and check the mark
+ // bits.
+ return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
+ });
+ }
+ }
}
template <PointerDirection direction>
@@ -64,7 +88,7 @@ bool RememberedSet<direction>::IsValidSlot(Heap* heap, MemoryChunk* chunk,
HeapObject* heap_object = HeapObject::cast(object);
// If the target object is not black, the source slot must be part
// of a non-black (dead) object.
- return Marking::IsBlack(Marking::MarkBitFrom(heap_object)) &&
+ return Marking::IsBlack(ObjectMarking::MarkBitFrom(heap_object)) &&
heap->mark_compact_collector()->IsSlotInBlackObject(
chunk, reinterpret_cast<Address>(slot));
}
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index 45408bf1e9..8022d52775 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -5,6 +5,7 @@
#ifndef V8_REMEMBERED_SET_H
#define V8_REMEMBERED_SET_H
+#include "src/assembler.h"
#include "src/heap/heap.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces.h"
@@ -14,44 +15,71 @@ namespace internal {
enum PointerDirection { OLD_TO_OLD, OLD_TO_NEW };
+// TODO(ulan): Investigate performance of de-templatizing this class.
template <PointerDirection direction>
class RememberedSet {
public:
// Given a page and a slot in that page, this function adds the slot to the
// remembered set.
- static void Insert(Page* page, Address slot_addr) {
- DCHECK(page->Contains(slot_addr));
- SlotSet* slot_set = GetSlotSet(page);
+ static void Insert(MemoryChunk* chunk, Address slot_addr) {
+ DCHECK(chunk->Contains(slot_addr));
+ SlotSet* slot_set = GetSlotSet(chunk);
if (slot_set == nullptr) {
- slot_set = AllocateSlotSet(page);
+ slot_set = AllocateSlotSet(chunk);
}
- uintptr_t offset = slot_addr - page->address();
+ uintptr_t offset = slot_addr - chunk->address();
slot_set[offset / Page::kPageSize].Insert(offset % Page::kPageSize);
}
// Given a page and a slot in that page, this function removes the slot from
// the remembered set.
// If the slot was never added, then the function does nothing.
- static void Remove(Page* page, Address slot_addr) {
- DCHECK(page->Contains(slot_addr));
- SlotSet* slot_set = GetSlotSet(page);
+ static void Remove(MemoryChunk* chunk, Address slot_addr) {
+ DCHECK(chunk->Contains(slot_addr));
+ SlotSet* slot_set = GetSlotSet(chunk);
if (slot_set != nullptr) {
- uintptr_t offset = slot_addr - page->address();
+ uintptr_t offset = slot_addr - chunk->address();
slot_set[offset / Page::kPageSize].Remove(offset % Page::kPageSize);
}
}
// Given a page and a range of slots in that page, this function removes the
// slots from the remembered set.
- static void RemoveRange(Page* page, Address start, Address end) {
- SlotSet* slot_set = GetSlotSet(page);
+ static void RemoveRange(MemoryChunk* chunk, Address start, Address end) {
+ SlotSet* slot_set = GetSlotSet(chunk);
if (slot_set != nullptr) {
- uintptr_t start_offset = start - page->address();
- uintptr_t end_offset = end - page->address();
+ uintptr_t start_offset = start - chunk->address();
+ uintptr_t end_offset = end - chunk->address();
DCHECK_LT(start_offset, end_offset);
- DCHECK_LE(end_offset, static_cast<uintptr_t>(Page::kPageSize));
- slot_set->RemoveRange(static_cast<uint32_t>(start_offset),
- static_cast<uint32_t>(end_offset));
+ if (end_offset < static_cast<uintptr_t>(Page::kPageSize)) {
+ slot_set->RemoveRange(static_cast<int>(start_offset),
+ static_cast<int>(end_offset));
+ } else {
+ // The large page has multiple slot sets.
+ // Compute slot set indicies for the range [start_offset, end_offset).
+ int start_chunk = static_cast<int>(start_offset / Page::kPageSize);
+ int end_chunk = static_cast<int>((end_offset - 1) / Page::kPageSize);
+ int offset_in_start_chunk =
+ static_cast<int>(start_offset % Page::kPageSize);
+ // Note that using end_offset % Page::kPageSize would be incorrect
+ // because end_offset is one beyond the last slot to clear.
+ int offset_in_end_chunk = static_cast<int>(
+ end_offset - static_cast<uintptr_t>(end_chunk) * Page::kPageSize);
+ if (start_chunk == end_chunk) {
+ slot_set[start_chunk].RemoveRange(offset_in_start_chunk,
+ offset_in_end_chunk);
+ } else {
+ // Clear all slots from start_offset to the end of first chunk.
+ slot_set[start_chunk].RemoveRange(offset_in_start_chunk,
+ Page::kPageSize);
+ // Clear all slots in intermediate chunks.
+ for (int i = start_chunk + 1; i < end_chunk; i++) {
+ slot_set[i].RemoveRange(0, Page::kPageSize);
+ }
+ // Clear slots from the beginning of the last page to end_offset.
+ slot_set[end_chunk].RemoveRange(0, offset_in_end_chunk);
+ }
+ }
}
}
@@ -67,9 +95,7 @@ class RememberedSet {
// The callback should take (MemoryChunk* chunk) and return void.
template <typename Callback>
static void IterateMemoryChunks(Heap* heap, Callback callback) {
- MemoryChunkIterator it(heap, direction == OLD_TO_OLD
- ? MemoryChunkIterator::ALL
- : MemoryChunkIterator::ALL_BUT_CODE_SPACE);
+ MemoryChunkIterator it(heap);
MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) {
SlotSet* slots = GetSlotSet(chunk);
@@ -98,62 +124,58 @@ class RememberedSet {
}
}
- // Iterates and filters the remembered set with the given callback.
- // The callback should take (HeapObject** slot, HeapObject* target) and
- // update the slot.
- // A special wrapper takes care of filtering the slots based on their values.
- // For OLD_TO_NEW case: slots that do not point to the ToSpace after
- // callback invocation will be removed from the set.
- template <typename Callback>
- static void IterateWithWrapper(Heap* heap, Callback callback) {
- Iterate(heap, [heap, callback](Address addr) {
- return Wrapper(heap, addr, callback);
- });
- }
-
- template <typename Callback>
- static void IterateWithWrapper(Heap* heap, MemoryChunk* chunk,
- Callback callback) {
- Iterate(chunk, [heap, callback](Address addr) {
- return Wrapper(heap, addr, callback);
- });
- }
-
// Given a page and a typed slot in that page, this function adds the slot
// to the remembered set.
- static void InsertTyped(Page* page, SlotType slot_type, Address slot_addr) {
- STATIC_ASSERT(direction == OLD_TO_OLD);
- TypedSlotSet* slot_set = page->typed_old_to_old_slots();
+ static void InsertTyped(Page* page, Address host_addr, SlotType slot_type,
+ Address slot_addr) {
+ TypedSlotSet* slot_set = GetTypedSlotSet(page);
if (slot_set == nullptr) {
- page->AllocateTypedOldToOldSlots();
- slot_set = page->typed_old_to_old_slots();
+ AllocateTypedSlotSet(page);
+ slot_set = GetTypedSlotSet(page);
+ }
+ if (host_addr == nullptr) {
+ host_addr = page->address();
}
uintptr_t offset = slot_addr - page->address();
+ uintptr_t host_offset = host_addr - page->address();
DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
- slot_set->Insert(slot_type, static_cast<uint32_t>(offset));
+ DCHECK_LT(host_offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
+ slot_set->Insert(slot_type, static_cast<uint32_t>(host_offset),
+ static_cast<uint32_t>(offset));
}
// Given a page and a range of typed slots in that page, this function removes
// the slots from the remembered set.
- static void RemoveRangeTyped(Page* page, Address start, Address end) {
- TypedSlotSet* slots = page->typed_old_to_old_slots();
+ static void RemoveRangeTyped(MemoryChunk* page, Address start, Address end) {
+ TypedSlotSet* slots = GetTypedSlotSet(page);
if (slots != nullptr) {
- slots->Iterate([start, end](SlotType slot_type, Address slot_addr) {
+ slots->Iterate([start, end](SlotType slot_type, Address host_addr,
+ Address slot_addr) {
return start <= slot_addr && slot_addr < end ? REMOVE_SLOT : KEEP_SLOT;
});
}
}
+ // Iterates and filters the remembered set with the given callback.
+ // The callback should take (SlotType slot_type, SlotAddress slot) and return
+ // SlotCallbackResult.
+ template <typename Callback>
+ static void IterateTyped(Heap* heap, Callback callback) {
+ IterateMemoryChunks(heap, [callback](MemoryChunk* chunk) {
+ IterateTyped(chunk, callback);
+ });
+ }
+
// Iterates and filters typed old to old pointers in the given memory chunk
// with the given callback. The callback should take (SlotType slot_type,
// Address slot_addr) and return SlotCallbackResult.
template <typename Callback>
static void IterateTyped(MemoryChunk* chunk, Callback callback) {
- TypedSlotSet* slots = chunk->typed_old_to_old_slots();
+ TypedSlotSet* slots = GetTypedSlotSet(chunk);
if (slots != nullptr) {
int new_count = slots->Iterate(callback);
if (new_count == 0) {
- chunk->ReleaseTypedOldToOldSlots();
+ ReleaseTypedSlotSet(chunk);
}
}
}
@@ -161,7 +183,7 @@ class RememberedSet {
// Clear all old to old slots from the remembered set.
static void ClearAll(Heap* heap) {
STATIC_ASSERT(direction == OLD_TO_OLD);
- MemoryChunkIterator it(heap, MemoryChunkIterator::ALL);
+ MemoryChunkIterator it(heap);
MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) {
chunk->ReleaseOldToOldSlots();
@@ -190,7 +212,7 @@ class RememberedSet {
if (direction == OLD_TO_OLD) {
return chunk->typed_old_to_old_slots();
} else {
- return nullptr;
+ return chunk->typed_old_to_new_slots();
}
}
@@ -202,6 +224,14 @@ class RememberedSet {
}
}
+ static void ReleaseTypedSlotSet(MemoryChunk* chunk) {
+ if (direction == OLD_TO_OLD) {
+ chunk->ReleaseTypedOldToOldSlots();
+ } else {
+ chunk->ReleaseTypedOldToNewSlots();
+ }
+ }
+
static SlotSet* AllocateSlotSet(MemoryChunk* chunk) {
if (direction == OLD_TO_OLD) {
chunk->AllocateOldToOldSlots();
@@ -212,33 +242,149 @@ class RememberedSet {
}
}
- template <typename Callback>
- static SlotCallbackResult Wrapper(Heap* heap, Address slot_address,
- Callback slot_callback) {
- STATIC_ASSERT(direction == OLD_TO_NEW);
- Object** slot = reinterpret_cast<Object**>(slot_address);
- Object* object = *slot;
- if (heap->InFromSpace(object)) {
- HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
- DCHECK(heap_object->IsHeapObject());
- slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
- object = *slot;
- // If the object was in from space before and is after executing the
- // callback in to space, the object is still live.
- // Unfortunately, we do not know about the slot. It could be in a
- // just freed free space object.
- if (heap->InToSpace(object)) {
- return KEEP_SLOT;
- }
+ static TypedSlotSet* AllocateTypedSlotSet(MemoryChunk* chunk) {
+ if (direction == OLD_TO_OLD) {
+ chunk->AllocateTypedOldToOldSlots();
+ return chunk->typed_old_to_old_slots();
} else {
- DCHECK(!heap->InNewSpace(object));
+ chunk->AllocateTypedOldToNewSlots();
+ return chunk->typed_old_to_new_slots();
}
- return REMOVE_SLOT;
}
static bool IsValidSlot(Heap* heap, MemoryChunk* chunk, Object** slot);
};
+class UpdateTypedSlotHelper {
+ public:
+ // Updates a cell slot using an untyped slot callback.
+ // The callback accepts (Heap*, Object**) and returns SlotCallbackResult.
+ template <typename Callback>
+ static SlotCallbackResult UpdateCell(RelocInfo* rinfo, Callback callback) {
+ DCHECK(rinfo->rmode() == RelocInfo::CELL);
+ Object* cell = rinfo->target_cell();
+ Object* old_cell = cell;
+ SlotCallbackResult result = callback(&cell);
+ if (cell != old_cell) {
+ rinfo->set_target_cell(reinterpret_cast<Cell*>(cell));
+ }
+ return result;
+ }
+
+ // Updates a code entry slot using an untyped slot callback.
+ // The callback accepts (Heap*, Object**) and returns SlotCallbackResult.
+ template <typename Callback>
+ static SlotCallbackResult UpdateCodeEntry(Address entry_address,
+ Callback callback) {
+ Object* code = Code::GetObjectFromEntryAddress(entry_address);
+ Object* old_code = code;
+ SlotCallbackResult result = callback(&code);
+ if (code != old_code) {
+ Memory::Address_at(entry_address) =
+ reinterpret_cast<Code*>(code)->entry();
+ }
+ return result;
+ }
+
+ // Updates a code target slot using an untyped slot callback.
+ // The callback accepts (Heap*, Object**) and returns SlotCallbackResult.
+ template <typename Callback>
+ static SlotCallbackResult UpdateCodeTarget(RelocInfo* rinfo,
+ Callback callback) {
+ DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
+ Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ Object* old_target = target;
+ SlotCallbackResult result = callback(&target);
+ if (target != old_target) {
+ rinfo->set_target_address(Code::cast(target)->instruction_start());
+ }
+ return result;
+ }
+
+ // Updates an embedded pointer slot using an untyped slot callback.
+ // The callback accepts (Heap*, Object**) and returns SlotCallbackResult.
+ template <typename Callback>
+ static SlotCallbackResult UpdateEmbeddedPointer(RelocInfo* rinfo,
+ Callback callback) {
+ DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+ Object* target = rinfo->target_object();
+ Object* old_target = target;
+ SlotCallbackResult result = callback(&target);
+ if (target != old_target) {
+ rinfo->set_target_object(target);
+ }
+ return result;
+ }
+
+ // Updates a debug target slot using an untyped slot callback.
+ // The callback accepts (Heap*, Object**) and returns SlotCallbackResult.
+ template <typename Callback>
+ static SlotCallbackResult UpdateDebugTarget(RelocInfo* rinfo,
+ Callback callback) {
+ DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+ rinfo->IsPatchedDebugBreakSlotSequence());
+ Object* target =
+ Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
+ SlotCallbackResult result = callback(&target);
+ rinfo->set_debug_call_address(Code::cast(target)->instruction_start());
+ return result;
+ }
+
+ // Updates a typed slot using an untyped slot callback.
+ // The callback accepts (Heap*, Object**) and returns SlotCallbackResult.
+ template <typename Callback>
+ static SlotCallbackResult UpdateTypedSlot(Isolate* isolate,
+ SlotType slot_type, Address addr,
+ Callback callback) {
+ switch (slot_type) {
+ case CODE_TARGET_SLOT: {
+ RelocInfo rinfo(isolate, addr, RelocInfo::CODE_TARGET, 0, NULL);
+ return UpdateCodeTarget(&rinfo, callback);
+ }
+ case CELL_TARGET_SLOT: {
+ RelocInfo rinfo(isolate, addr, RelocInfo::CELL, 0, NULL);
+ return UpdateCell(&rinfo, callback);
+ }
+ case CODE_ENTRY_SLOT: {
+ return UpdateCodeEntry(addr, callback);
+ }
+ case DEBUG_TARGET_SLOT: {
+ RelocInfo rinfo(isolate, addr, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION,
+ 0, NULL);
+ if (rinfo.IsPatchedDebugBreakSlotSequence()) {
+ return UpdateDebugTarget(&rinfo, callback);
+ }
+ return REMOVE_SLOT;
+ }
+ case EMBEDDED_OBJECT_SLOT: {
+ RelocInfo rinfo(isolate, addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
+ return UpdateEmbeddedPointer(&rinfo, callback);
+ }
+ case OBJECT_SLOT: {
+ return callback(reinterpret_cast<Object**>(addr));
+ }
+ case NUMBER_OF_SLOT_TYPES:
+ break;
+ }
+ UNREACHABLE();
+ return REMOVE_SLOT;
+ }
+};
+
+inline SlotType SlotTypeForRelocInfoMode(RelocInfo::Mode rmode) {
+ if (RelocInfo::IsCodeTarget(rmode)) {
+ return CODE_TARGET_SLOT;
+ } else if (RelocInfo::IsCell(rmode)) {
+ return CELL_TARGET_SLOT;
+ } else if (RelocInfo::IsEmbeddedObject(rmode)) {
+ return EMBEDDED_OBJECT_SLOT;
+ } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
+ return DEBUG_TARGET_SLOT;
+ }
+ UNREACHABLE();
+ return NUMBER_OF_SLOT_TYPES;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index b8fd1c8292..9671f3615f 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -37,10 +37,35 @@ void Scavenger::ScavengeObject(HeapObject** p, HeapObject* object) {
return ScavengeObjectSlow(p, object);
}
+SlotCallbackResult Scavenger::CheckAndScavengeObject(Heap* heap,
+ Address slot_address) {
+ Object** slot = reinterpret_cast<Object**>(slot_address);
+ Object* object = *slot;
+ if (heap->InFromSpace(object)) {
+ HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
+ DCHECK(heap_object->IsHeapObject());
+
+ ScavengeObject(reinterpret_cast<HeapObject**>(slot), heap_object);
+
+ object = *slot;
+ // If the object was in from space before and is after executing the
+ // callback in to space, the object is still live.
+ // Unfortunately, we do not know about the slot. It could be in a
+ // just freed free space object.
+ if (heap->InToSpace(object)) {
+ return KEEP_SLOT;
+ }
+ }
+ // Slots can point to "to" space if the slot has been recorded multiple
+ // times in the remembered set. We remove the redundant slot now.
+ return REMOVE_SLOT;
+}
// static
-void StaticScavengeVisitor::VisitPointer(Heap* heap, HeapObject* obj,
- Object** p) {
+template <PromotionMode promotion_mode>
+void StaticScavengeVisitor<promotion_mode>::VisitPointer(Heap* heap,
+ HeapObject* obj,
+ Object** p) {
Object* object = *p;
if (!heap->InNewSpace(object)) return;
Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(p),
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index 3f532ead62..59d04300e6 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -10,7 +10,6 @@
#include "src/heap/scavenger-inl.h"
#include "src/isolate.h"
#include "src/log.h"
-#include "src/profiler/cpu-profiler.h"
namespace v8 {
namespace internal {
@@ -23,8 +22,7 @@ enum LoggingAndProfiling {
enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
-
-template <MarksHandling marks_handling,
+template <MarksHandling marks_handling, PromotionMode promotion_mode,
LoggingAndProfiling logging_and_profiling_mode>
class ScavengingVisitor : public StaticVisitorBase {
public:
@@ -37,7 +35,8 @@ class ScavengingVisitor : public StaticVisitorBase {
table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
- table_.Register(kVisitJSArrayBuffer, &EvacuateJSArrayBuffer);
+ table_.Register(kVisitJSArrayBuffer,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
table_.Register(
kVisitNativeContext,
@@ -78,6 +77,10 @@ class ScavengingVisitor : public StaticVisitorBase {
table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
kVisitJSObject, kVisitJSObjectGeneric>();
+ table_
+ .RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
+ kVisitJSApiObject, kVisitJSApiObjectGeneric>();
+
table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
kVisitStruct, kVisitStructGeneric>();
}
@@ -135,7 +138,7 @@ class ScavengingVisitor : public StaticVisitorBase {
}
if (marks_handling == TRANSFER_MARKS) {
- if (Marking::TransferColor(source, target)) {
+ if (IncrementalMarking::TransferColor(source, target, size)) {
MemoryChunk::IncrementLiveBytesFromGC(target, size);
}
}
@@ -188,7 +191,7 @@ class ScavengingVisitor : public StaticVisitorBase {
if (object_contents == POINTER_OBJECT) {
heap->promotion_queue()->insert(
target, object_size,
- Marking::IsBlack(Marking::MarkBitFrom(object)));
+ Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
}
heap->IncrementPromotedObjectsSize(object_size);
return true;
@@ -196,7 +199,6 @@ class ScavengingVisitor : public StaticVisitorBase {
return false;
}
-
template <ObjectContents object_contents, AllocationAlignment alignment>
static inline void EvacuateObject(Map* map, HeapObject** slot,
HeapObject* object, int object_size) {
@@ -204,7 +206,8 @@ class ScavengingVisitor : public StaticVisitorBase {
SLOW_DCHECK(object->Size() == object_size);
Heap* heap = map->GetHeap();
- if (!heap->ShouldBePromoted(object->address(), object_size)) {
+ if (!heap->ShouldBePromoted<promotion_mode>(object->address(),
+ object_size)) {
// A semi-space copy may fail due to fragmentation. In that case, we
// try to promote the object.
if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
@@ -216,14 +219,15 @@ class ScavengingVisitor : public StaticVisitorBase {
object_size)) {
return;
}
-
+ if (promotion_mode == PROMOTE_MARKED) {
+ FatalProcessOutOfMemory("Scavenger: promoting marked\n");
+ }
// If promotion failed, we try to copy the object to the other semi-space
if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;
FatalProcessOutOfMemory("Scavenger: semi-space copy\n");
}
-
static inline void EvacuateJSFunction(Map* map, HeapObject** slot,
HeapObject* object) {
ObjectEvacuationStrategy<POINTER_OBJECT>::Visit(map, slot, object);
@@ -234,7 +238,7 @@ class ScavengingVisitor : public StaticVisitorBase {
DCHECK(map_word.IsForwardingAddress());
HeapObject* target = map_word.ToForwardingAddress();
- MarkBit mark_bit = Marking::MarkBitFrom(target);
+ MarkBit mark_bit = ObjectMarking::MarkBitFrom(target);
if (Marking::IsBlack(mark_bit)) {
// This object is black and it might not be rescanned by marker.
// We should explicitly record code entry slot for compaction because
@@ -248,7 +252,6 @@ class ScavengingVisitor : public StaticVisitorBase {
}
}
-
static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
HeapObject* object) {
int length = reinterpret_cast<FixedArray*>(object)->synchronized_length();
@@ -257,7 +260,6 @@ class ScavengingVisitor : public StaticVisitorBase {
object_size);
}
-
static inline void EvacuateFixedDoubleArray(Map* map, HeapObject** slot,
HeapObject* object) {
int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
@@ -265,7 +267,6 @@ class ScavengingVisitor : public StaticVisitorBase {
EvacuateObject<DATA_OBJECT, kDoubleAligned>(map, slot, object, object_size);
}
-
static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot,
HeapObject* object) {
int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
@@ -273,7 +274,6 @@ class ScavengingVisitor : public StaticVisitorBase {
object_size);
}
-
static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot,
HeapObject* object) {
int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
@@ -281,28 +281,12 @@ class ScavengingVisitor : public StaticVisitorBase {
object_size);
}
-
- static inline void EvacuateJSArrayBuffer(Map* map, HeapObject** slot,
- HeapObject* object) {
- ObjectEvacuationStrategy<POINTER_OBJECT>::Visit(map, slot, object);
-
- Heap* heap = map->GetHeap();
- MapWord map_word = object->map_word();
- DCHECK(map_word.IsForwardingAddress());
- HeapObject* target = map_word.ToForwardingAddress();
- if (!heap->InNewSpace(target)) {
- heap->array_buffer_tracker()->Promote(JSArrayBuffer::cast(target));
- }
- }
-
-
static inline void EvacuateByteArray(Map* map, HeapObject** slot,
HeapObject* object) {
int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
}
-
static inline void EvacuateSeqOneByteString(Map* map, HeapObject** slot,
HeapObject* object) {
int object_size = SeqOneByteString::cast(object)
@@ -310,7 +294,6 @@ class ScavengingVisitor : public StaticVisitorBase {
EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
}
-
static inline void EvacuateSeqTwoByteString(Map* map, HeapObject** slot,
HeapObject* object) {
int object_size = SeqTwoByteString::cast(object)
@@ -318,7 +301,6 @@ class ScavengingVisitor : public StaticVisitorBase {
EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
}
-
static inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot,
HeapObject* object) {
DCHECK(IsShortcutCandidate(map->instance_type()));
@@ -376,21 +358,21 @@ class ScavengingVisitor : public StaticVisitorBase {
static VisitorDispatchTable<ScavengingCallback> table_;
};
-
-template <MarksHandling marks_handling,
+template <MarksHandling marks_handling, PromotionMode promotion_mode,
LoggingAndProfiling logging_and_profiling_mode>
-VisitorDispatchTable<ScavengingCallback>
- ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
-
+VisitorDispatchTable<ScavengingCallback> ScavengingVisitor<
+ marks_handling, promotion_mode, logging_and_profiling_mode>::table_;
// static
void Scavenger::Initialize() {
- ScavengingVisitor<TRANSFER_MARKS,
+ ScavengingVisitor<TRANSFER_MARKS, PROMOTE_MARKED,
LOGGING_AND_PROFILING_DISABLED>::Initialize();
- ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
- ScavengingVisitor<TRANSFER_MARKS,
+ ScavengingVisitor<IGNORE_MARKS, DEFAULT_PROMOTION,
+ LOGGING_AND_PROFILING_DISABLED>::Initialize();
+ ScavengingVisitor<TRANSFER_MARKS, PROMOTE_MARKED,
+ LOGGING_AND_PROFILING_ENABLED>::Initialize();
+ ScavengingVisitor<IGNORE_MARKS, DEFAULT_PROMOTION,
LOGGING_AND_PROFILING_ENABLED>::Initialize();
- ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
}
@@ -408,28 +390,28 @@ void Scavenger::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
void Scavenger::SelectScavengingVisitorsTable() {
bool logging_and_profiling =
FLAG_verify_predictable || isolate()->logger()->is_logging() ||
- isolate()->cpu_profiler()->is_profiling() ||
+ isolate()->is_profiling() ||
(isolate()->heap_profiler() != NULL &&
isolate()->heap_profiler()->is_tracking_object_moves());
if (!heap()->incremental_marking()->IsMarking()) {
if (!logging_and_profiling) {
scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<IGNORE_MARKS,
+ ScavengingVisitor<IGNORE_MARKS, DEFAULT_PROMOTION,
LOGGING_AND_PROFILING_DISABLED>::GetTable());
} else {
scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<IGNORE_MARKS,
+ ScavengingVisitor<IGNORE_MARKS, DEFAULT_PROMOTION,
LOGGING_AND_PROFILING_ENABLED>::GetTable());
}
} else {
if (!logging_and_profiling) {
scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<TRANSFER_MARKS,
+ ScavengingVisitor<TRANSFER_MARKS, PROMOTE_MARKED,
LOGGING_AND_PROFILING_DISABLED>::GetTable());
} else {
scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<TRANSFER_MARKS,
+ ScavengingVisitor<TRANSFER_MARKS, PROMOTE_MARKED,
LOGGING_AND_PROFILING_ENABLED>::GetTable());
}
@@ -462,6 +444,7 @@ void ScavengeVisitor::VisitPointers(Object** start, Object** end) {
void ScavengeVisitor::ScavengePointer(Object** p) {
Object* object = *p;
if (!heap_->InNewSpace(object)) return;
+
Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(p),
reinterpret_cast<HeapObject*>(object));
}
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index 5d0abf49d3..f2213b8a36 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -6,6 +6,7 @@
#define V8_HEAP_SCAVENGER_H_
#include "src/heap/objects-visiting.h"
+#include "src/heap/slot-set.h"
namespace v8 {
namespace internal {
@@ -25,6 +26,8 @@ class Scavenger {
// ensure the precondition that the object is (a) a heap object and (b) in
// the heap's from space.
static inline void ScavengeObject(HeapObject** p, HeapObject* object);
+ static inline SlotCallbackResult CheckAndScavengeObject(Heap* heap,
+ Address slot_address);
// Slow part of {ScavengeObject} above.
static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
@@ -60,8 +63,9 @@ class ScavengeVisitor : public ObjectVisitor {
// Helper class for turning the scavenger into an object visitor that is also
// filtering out non-HeapObjects and objects which do not reside in new space.
+template <PromotionMode promotion_mode>
class StaticScavengeVisitor
- : public StaticNewSpaceVisitor<StaticScavengeVisitor> {
+ : public StaticNewSpaceVisitor<StaticScavengeVisitor<promotion_mode>> {
public:
static inline void VisitPointer(Heap* heap, HeapObject* object, Object** p);
};
diff --git a/deps/v8/src/heap/slot-set.h b/deps/v8/src/heap/slot-set.h
index e55ffe98e6..651af88bf8 100644
--- a/deps/v8/src/heap/slot-set.h
+++ b/deps/v8/src/heap/slot-set.h
@@ -64,6 +64,7 @@ class SlotSet : public Malloced {
// The slot offsets specify a range of slots at addresses:
// [page_start_ + start_offset ... page_start_ + end_offset).
void RemoveRange(int start_offset, int end_offset) {
+ CHECK_LE(end_offset, 1 << kPageSizeBits);
DCHECK_LE(start_offset, end_offset);
int start_bucket, start_cell, start_bit;
SlotToIndices(start_offset, &start_bucket, &start_cell, &start_bit);
@@ -193,9 +194,15 @@ class SlotSet : public Malloced {
}
void MaskCell(int bucket_index, int cell_index, uint32_t mask) {
- uint32_t* cells = bucket[bucket_index];
- if (cells != nullptr && cells[cell_index] != 0) {
- cells[cell_index] &= mask;
+ if (bucket_index < kBuckets) {
+ uint32_t* cells = bucket[bucket_index];
+ if (cells != nullptr && cells[cell_index] != 0) {
+ cells[cell_index] &= mask;
+ }
+ } else {
+ // GCC bug 59124: Emits wrong warnings
+ // "array subscript is above array bounds"
+ UNREACHABLE();
}
}
@@ -217,7 +224,6 @@ class SlotSet : public Malloced {
enum SlotType {
EMBEDDED_OBJECT_SLOT,
OBJECT_SLOT,
- RELOCATED_CODE_OBJECT,
CELL_TARGET_SLOT,
CODE_TARGET_SLOT,
CODE_ENTRY_SLOT,
@@ -234,7 +240,30 @@ enum SlotType {
// typed slots contain V8 internal pointers that are not directly exposed to JS.
class TypedSlotSet {
public:
- typedef uint32_t TypedSlot;
+ struct TypedSlot {
+ TypedSlot() : type_and_offset_(0), host_offset_(0) {}
+
+ TypedSlot(SlotType type, uint32_t host_offset, uint32_t offset)
+ : type_and_offset_(TypeField::encode(type) |
+ OffsetField::encode(offset)),
+ host_offset_(host_offset) {}
+
+ bool operator==(const TypedSlot other) {
+ return type_and_offset_ == other.type_and_offset_ &&
+ host_offset_ == other.host_offset_;
+ }
+
+ bool operator!=(const TypedSlot other) { return !(*this == other); }
+
+ SlotType type() { return TypeField::decode(type_and_offset_); }
+
+ uint32_t offset() { return OffsetField::decode(type_and_offset_); }
+
+ uint32_t host_offset() { return host_offset_; }
+
+ uint32_t type_and_offset_;
+ uint32_t host_offset_;
+ };
static const int kMaxOffset = 1 << 29;
explicit TypedSlotSet(Address page_start) : page_start_(page_start) {
@@ -251,8 +280,8 @@ class TypedSlotSet {
}
// The slot offset specifies a slot at address page_start_ + offset.
- void Insert(SlotType type, int offset) {
- TypedSlot slot = ToTypedSlot(type, offset);
+ void Insert(SlotType type, uint32_t host_offset, uint32_t offset) {
+ TypedSlot slot(type, host_offset, offset);
if (!chunk_->AddSlot(slot)) {
chunk_ = new Chunk(chunk_, NextCapacity(chunk_->capacity));
bool added = chunk_->AddSlot(slot);
@@ -273,7 +302,7 @@ class TypedSlotSet {
template <typename Callback>
int Iterate(Callback callback) {
STATIC_ASSERT(NUMBER_OF_SLOT_TYPES < 8);
- const TypedSlot kRemovedSlot = TypeField::encode(NUMBER_OF_SLOT_TYPES);
+ const TypedSlot kRemovedSlot(NUMBER_OF_SLOT_TYPES, 0, 0);
Chunk* chunk = chunk_;
int new_count = 0;
while (chunk != nullptr) {
@@ -282,9 +311,10 @@ class TypedSlotSet {
for (int i = 0; i < count; i++) {
TypedSlot slot = buffer[i];
if (slot != kRemovedSlot) {
- SlotType type = TypeField::decode(slot);
- Address addr = page_start_ + OffsetField::decode(slot);
- if (callback(type, addr) == KEEP_SLOT) {
+ SlotType type = slot.type();
+ Address addr = page_start_ + slot.offset();
+ Address host_addr = page_start_ + slot.host_offset();
+ if (callback(type, host_addr, addr) == KEEP_SLOT) {
new_count++;
} else {
buffer[i] = kRemovedSlot;
@@ -304,10 +334,6 @@ class TypedSlotSet {
return Min(kMaxBufferSize, capacity * 2);
}
- static TypedSlot ToTypedSlot(SlotType type, int offset) {
- return TypeField::encode(type) | OffsetField::encode(offset);
- }
-
class OffsetField : public BitField<int, 0, 29> {};
class TypeField : public BitField<SlotType, 29, 3> {};
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index c16c9f00a4..0fd69dacfe 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -15,49 +15,32 @@
namespace v8 {
namespace internal {
-
-// -----------------------------------------------------------------------------
-// Bitmap
-
-void Bitmap::Clear(MemoryChunk* chunk) {
- Bitmap* bitmap = chunk->markbits();
- for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
- chunk->ResetLiveBytes();
+template <class PAGE_TYPE>
+PageIteratorImpl<PAGE_TYPE>& PageIteratorImpl<PAGE_TYPE>::operator++() {
+ p_ = p_->next_page();
+ return *this;
}
-void Bitmap::SetAllBits(MemoryChunk* chunk) {
- Bitmap* bitmap = chunk->markbits();
- for (int i = 0; i < bitmap->CellsCount(); i++)
- bitmap->cells()[i] = 0xffffffff;
+template <class PAGE_TYPE>
+PageIteratorImpl<PAGE_TYPE> PageIteratorImpl<PAGE_TYPE>::operator++(int) {
+ PageIteratorImpl<PAGE_TYPE> tmp(*this);
+ operator++();
+ return tmp;
}
-// -----------------------------------------------------------------------------
-// PageIterator
-
-PageIterator::PageIterator(PagedSpace* space)
- : space_(space),
- prev_page_(&space->anchor_),
- next_page_(prev_page_->next_page()) {}
-
-
-bool PageIterator::has_next() { return next_page_ != &space_->anchor_; }
-
-
-Page* PageIterator::next() {
- DCHECK(has_next());
- prev_page_ = next_page_;
- next_page_ = next_page_->next_page();
- return prev_page_;
+NewSpacePageRange::NewSpacePageRange(Address start, Address limit)
+ : range_(Page::FromAddress(start),
+ Page::FromAllocationAreaAddress(limit)->next_page()) {
+ SemiSpace::AssertValidRange(start, limit);
}
-
// -----------------------------------------------------------------------------
// SemiSpaceIterator
HeapObject* SemiSpaceIterator::Next() {
while (current_ != limit_) {
- if (NewSpacePage::IsAtEnd(current_)) {
- NewSpacePage* page = NewSpacePage::FromLimit(current_);
+ if (Page::IsAlignedToPageSize(current_)) {
+ Page* page = Page::FromAllocationAreaAddress(current_);
page = page->next_page();
DCHECK(!page->is_anchor());
current_ = page->area_start();
@@ -72,57 +55,17 @@ HeapObject* SemiSpaceIterator::Next() {
return nullptr;
}
-
-HeapObject* SemiSpaceIterator::next_object() { return Next(); }
-
-
-// -----------------------------------------------------------------------------
-// NewSpacePageIterator
-
-NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
- : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
- next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
- last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) {}
-
-NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
- : prev_page_(space->anchor()),
- next_page_(prev_page_->next_page()),
- last_page_(prev_page_->prev_page()) {}
-
-NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
- : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
- next_page_(NewSpacePage::FromAddress(start)),
- last_page_(NewSpacePage::FromLimit(limit)) {
- SemiSpace::AssertValidRange(start, limit);
-}
-
-
-bool NewSpacePageIterator::has_next() { return prev_page_ != last_page_; }
-
-
-NewSpacePage* NewSpacePageIterator::next() {
- DCHECK(has_next());
- prev_page_ = next_page_;
- next_page_ = next_page_->next_page();
- return prev_page_;
-}
-
-
// -----------------------------------------------------------------------------
// HeapObjectIterator
HeapObject* HeapObjectIterator::Next() {
do {
HeapObject* next_obj = FromCurrentPage();
- if (next_obj != NULL) return next_obj;
+ if (next_obj != nullptr) return next_obj;
} while (AdvanceToNextPage());
- return NULL;
+ return nullptr;
}
-
-HeapObject* HeapObjectIterator::next_object() { return Next(); }
-
-
HeapObject* HeapObjectIterator::FromCurrentPage() {
while (cur_addr_ != cur_end_) {
if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
@@ -130,15 +73,9 @@ HeapObject* HeapObjectIterator::FromCurrentPage() {
continue;
}
HeapObject* obj = HeapObject::FromAddress(cur_addr_);
- int obj_size = obj->Size();
+ const int obj_size = obj->Size();
cur_addr_ += obj_size;
- DCHECK(cur_addr_ <= cur_end_);
- // TODO(hpayer): Remove the debugging code.
- if (cur_addr_ > cur_end_) {
- space_->heap()->isolate()->PushStackTraceAndDie(0xaaaaaaaa, obj, NULL,
- obj_size);
- }
-
+ DCHECK_LE(cur_addr_, cur_end_);
if (!obj->IsFiller()) {
if (obj->IsCode()) {
DCHECK_EQ(space_, space_->heap()->code_space());
@@ -149,21 +86,7 @@ HeapObject* HeapObjectIterator::FromCurrentPage() {
return obj;
}
}
- return NULL;
-}
-
-// -----------------------------------------------------------------------------
-// LargePageIterator
-
-LargePageIterator::LargePageIterator(LargeObjectSpace* space)
- : next_page_(space->first_page()) {}
-
-LargePage* LargePageIterator::next() {
- LargePage* result = next_page_;
- if (next_page_ != nullptr) {
- next_page_ = next_page_->next_page();
- }
- return result;
+ return nullptr;
}
// -----------------------------------------------------------------------------
@@ -210,9 +133,8 @@ bool SemiSpace::Contains(Object* o) {
}
bool SemiSpace::ContainsSlow(Address a) {
- NewSpacePageIterator it(this);
- while (it.has_next()) {
- if (it.next() == MemoryChunk::FromAddress(a)) return true;
+ for (Page* p : *this) {
+ if (p == MemoryChunk::FromAddress(a)) return true;
}
return false;
}
@@ -251,27 +173,27 @@ AllocationSpace AllocationResult::RetrySpace() {
return static_cast<AllocationSpace>(Smi::cast(object_)->value());
}
-NewSpacePage* NewSpacePage::Initialize(Heap* heap, MemoryChunk* chunk,
- Executability executable,
- SemiSpace* owner) {
+Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
+ SemiSpace* owner) {
DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
bool in_to_space = (owner->id() != kFromSpace);
chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
: MemoryChunk::IN_FROM_SPACE);
DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
: MemoryChunk::IN_TO_SPACE));
- NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
+ Page* page = static_cast<Page*>(chunk);
heap->incremental_marking()->SetNewSpacePageFlags(page);
+ page->AllocateLocalTracker();
return page;
}
// --------------------------------------------------------------------------
// PagedSpace
+template <Page::InitializationMode mode>
Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
PagedSpace* owner) {
Page* page = reinterpret_cast<Page*>(chunk);
- page->mutex_ = new base::Mutex();
DCHECK(page->area_size() <= kAllocatableMemory);
DCHECK(chunk->owner() == owner);
@@ -280,11 +202,26 @@ Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
// Make sure that categories are initialized before freeing the area.
page->InitializeFreeListCategories();
- owner->Free(page->area_start(), page->area_size());
+ // In the case we do not free the memory, we effectively account for the whole
+ // page as allocated memory that cannot be used for further allocations.
+ if (mode == kFreeMemory) {
+ owner->Free(page->area_start(), page->area_size());
+ }
return page;
}
+Page* Page::ConvertNewToOld(Page* old_page, PagedSpace* new_owner) {
+ DCHECK(old_page->InNewSpace());
+ old_page->set_owner(new_owner);
+ old_page->SetFlags(0, ~0);
+ new_owner->AccountCommitted(old_page->size());
+ Page* new_page = Page::Initialize<kDoNotFreeMemory>(
+ old_page->heap(), old_page, NOT_EXECUTABLE, new_owner);
+ new_page->InsertAfter(new_owner->anchor()->prev_page());
+ return new_page;
+}
+
void Page::InitializeFreeListCategories() {
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
categories_[i].Initialize(static_cast<FreeListCategoryType>(i));
@@ -297,18 +234,17 @@ void MemoryChunk::IncrementLiveBytesFromGC(HeapObject* object, int by) {
void MemoryChunk::ResetLiveBytes() {
if (FLAG_trace_live_bytes) {
- PrintIsolate(heap()->isolate(), "live-bytes: reset page=%p %d->0\n", this,
- live_byte_count_);
+ PrintIsolate(heap()->isolate(), "live-bytes: reset page=%p %d->0\n",
+ static_cast<void*>(this), live_byte_count_);
}
live_byte_count_ = 0;
}
void MemoryChunk::IncrementLiveBytes(int by) {
- if (IsFlagSet(BLACK_PAGE)) return;
if (FLAG_trace_live_bytes) {
- PrintIsolate(heap()->isolate(),
- "live-bytes: update page=%p delta=%d %d->%d\n", this, by,
- live_byte_count_, live_byte_count_ + by);
+ PrintIsolate(
+ heap()->isolate(), "live-bytes: update page=%p delta=%d %d->%d\n",
+ static_cast<void*>(this), by, live_byte_count_, live_byte_count_ + by);
}
live_byte_count_ += by;
DCHECK_GE(live_byte_count_, 0);
@@ -368,6 +304,7 @@ Page* Page::FromAnyPointerAddress(Heap* heap, Address addr) {
}
void Page::MarkNeverAllocateForTesting() {
+ DCHECK(this->owner()->identity() != NEW_SPACE);
DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
SetFlag(NEVER_ALLOCATE_ON_PAGE);
reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
@@ -382,48 +319,42 @@ void Page::MarkEvacuationCandidate() {
}
void Page::ClearEvacuationCandidate() {
- DCHECK_NULL(old_to_old_slots_);
- DCHECK_NULL(typed_old_to_old_slots_);
+ if (!IsFlagSet(COMPACTION_WAS_ABORTED)) {
+ DCHECK_NULL(old_to_old_slots_);
+ DCHECK_NULL(typed_old_to_old_slots_);
+ }
ClearFlag(EVACUATION_CANDIDATE);
InitializeFreeListCategories();
}
-MemoryChunkIterator::MemoryChunkIterator(Heap* heap, Mode mode)
- : state_(kOldSpaceState),
- mode_(mode),
- old_iterator_(heap->old_space()),
- code_iterator_(heap->code_space()),
- map_iterator_(heap->map_space()),
- lo_iterator_(heap->lo_space()) {}
+MemoryChunkIterator::MemoryChunkIterator(Heap* heap)
+ : heap_(heap),
+ state_(kOldSpaceState),
+ old_iterator_(heap->old_space()->begin()),
+ code_iterator_(heap->code_space()->begin()),
+ map_iterator_(heap->map_space()->begin()),
+ lo_iterator_(heap->lo_space()->begin()) {}
MemoryChunk* MemoryChunkIterator::next() {
switch (state_) {
case kOldSpaceState: {
- if (old_iterator_.has_next()) {
- return old_iterator_.next();
- }
+ if (old_iterator_ != heap_->old_space()->end()) return *(old_iterator_++);
state_ = kMapState;
// Fall through.
}
case kMapState: {
- if (mode_ != ALL_BUT_MAP_SPACE && map_iterator_.has_next()) {
- return map_iterator_.next();
- }
+ if (map_iterator_ != heap_->map_space()->end()) return *(map_iterator_++);
state_ = kCodeState;
// Fall through.
}
case kCodeState: {
- if (mode_ != ALL_BUT_CODE_SPACE && code_iterator_.has_next()) {
- return code_iterator_.next();
- }
+ if (code_iterator_ != heap_->code_space()->end())
+ return *(code_iterator_++);
state_ = kLargeObjectState;
// Fall through.
}
case kLargeObjectState: {
- MemoryChunk* answer = lo_iterator_.next();
- if (answer != nullptr) {
- return answer;
- }
+ if (lo_iterator_ != heap_->lo_space()->end()) return *(lo_iterator_++);
state_ = kFinishedState;
// Fall through;
}
@@ -436,16 +367,6 @@ MemoryChunk* MemoryChunkIterator::next() {
return nullptr;
}
-void Page::set_next_page(Page* page) {
- DCHECK(page->owner() == owner());
- set_next_chunk(page);
-}
-
-void Page::set_prev_page(Page* page) {
- DCHECK(page->owner() == owner());
- set_prev_chunk(page);
-}
-
Page* FreeListCategory::page() {
return Page::FromAddress(reinterpret_cast<Address>(this));
}
@@ -521,6 +442,12 @@ AllocationResult PagedSpace::AllocateRawUnaligned(
if (object == NULL) {
object = SlowAllocateRaw(size_in_bytes);
}
+ if (object != NULL) {
+ if (heap()->incremental_marking()->black_allocation()) {
+ Marking::MarkBlack(ObjectMarking::MarkBitFrom(object));
+ MemoryChunk::IncrementLiveBytesFromGC(object, size_in_bytes);
+ }
+ }
}
if (object != NULL) {
@@ -668,15 +595,19 @@ MUST_USE_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
return AllocateRaw(size_in_bytes, alignment);
}
-
-LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
+LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
+ Executability executable, Space* owner) {
+ if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
+ STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
+ FATAL("Code page is too large.");
+ }
heap->incremental_marking()->SetOldSpacePageFlags(chunk);
return static_cast<LargePage*>(chunk);
}
intptr_t LargeObjectSpace::Available() {
- return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
+ return ObjectSizeFor(heap()->memory_allocator()->Available());
}
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 8a7fd1a14f..95d5687a8f 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -4,13 +4,18 @@
#include "src/heap/spaces.h"
+#include <utility>
+
#include "src/base/bits.h"
#include "src/base/platform/platform.h"
+#include "src/base/platform/semaphore.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/heap/array-buffer-tracker.h"
#include "src/heap/slot-set.h"
#include "src/macro-assembler.h"
#include "src/msan.h"
#include "src/snapshot/snapshot.h"
+#include "src/v8.h"
namespace v8 {
namespace internal {
@@ -19,51 +24,37 @@ namespace internal {
// ----------------------------------------------------------------------------
// HeapObjectIterator
-HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
- // You can't actually iterate over the anchor page. It is not a real page,
- // just an anchor for the double linked page list. Initialize as if we have
- // reached the end of the anchor page, then the first iteration will move on
- // to the first page.
- Initialize(space, NULL, NULL, kAllPagesInSpace);
-}
-
-
-HeapObjectIterator::HeapObjectIterator(Page* page) {
+HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
+ : cur_addr_(nullptr),
+ cur_end_(nullptr),
+ space_(space),
+ page_range_(space->anchor()->next_page(), space->anchor()),
+ current_page_(page_range_.begin()) {}
+
+HeapObjectIterator::HeapObjectIterator(Page* page)
+ : cur_addr_(nullptr),
+ cur_end_(nullptr),
+ space_(reinterpret_cast<PagedSpace*>(page->owner())),
+ page_range_(page),
+ current_page_(page_range_.begin()) {
+#ifdef DEBUG
Space* owner = page->owner();
DCHECK(owner == page->heap()->old_space() ||
owner == page->heap()->map_space() ||
owner == page->heap()->code_space());
- Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
- page->area_end(), kOnePageOnly);
- DCHECK(page->SweepingDone());
-}
-
-
-void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end,
- HeapObjectIterator::PageMode mode) {
- space_ = space;
- cur_addr_ = cur;
- cur_end_ = end;
- page_mode_ = mode;
+#endif // DEBUG
}
-
// We have hit the end of the page and should advance to the next block of
// objects. This happens at the end of the page.
bool HeapObjectIterator::AdvanceToNextPage() {
- DCHECK(cur_addr_ == cur_end_);
- if (page_mode_ == kOnePageOnly) return false;
- Page* cur_page;
- if (cur_addr_ == NULL) {
- cur_page = space_->anchor();
- } else {
- cur_page = Page::FromAddress(cur_addr_ - 1);
- DCHECK(cur_addr_ == cur_page->area_end());
- }
- cur_page = cur_page->next_page();
- if (cur_page == space_->anchor()) return false;
- cur_page->heap()->mark_compact_collector()->SweepOrWaitUntilSweepingCompleted(
- cur_page);
+ DCHECK_EQ(cur_addr_, cur_end_);
+ if (current_page_ == page_range_.end()) return false;
+ Page* cur_page = *(current_page_++);
+ space_->heap()
+ ->mark_compact_collector()
+ ->sweeper()
+ .SweepOrWaitUntilSweepingCompleted(cur_page);
cur_addr_ = cur_page->area_start();
cur_end_ = cur_page->area_end();
DCHECK(cur_page->SweepingDone());
@@ -115,15 +106,16 @@ bool CodeRange::SetUp(size_t requested) {
requested = kMinimumCodeRangeSize;
}
+ const size_t reserved_area =
+ kReservedCodeRangePages * base::OS::CommitPageSize();
+ if (requested < (kMaximalCodeRangeSize - reserved_area))
+ requested += reserved_area;
+
DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
-#ifdef V8_TARGET_ARCH_MIPS64
- // To use pseudo-relative jumps such as j/jal instructions which have 28-bit
- // encoded immediate, the addresses have to be in range of 256Mb aligned
- // region.
- code_range_ = new base::VirtualMemory(requested, kMaximalCodeRangeSize);
-#else
- code_range_ = new base::VirtualMemory(requested);
-#endif
+
+ code_range_ = new base::VirtualMemory(
+ requested, Max(kCodeRangeAreaAlignment,
+ static_cast<size_t>(base::OS::AllocateAlignment())));
CHECK(code_range_ != NULL);
if (!code_range_->IsReserved()) {
delete code_range_;
@@ -137,18 +129,16 @@ bool CodeRange::SetUp(size_t requested) {
// On some platforms, specifically Win64, we need to reserve some pages at
// the beginning of an executable space.
- if (kReservedCodeRangePages) {
- if (!code_range_->Commit(
- base, kReservedCodeRangePages * base::OS::CommitPageSize(), true)) {
+ if (reserved_area > 0) {
+ if (!code_range_->Commit(base, reserved_area, true)) {
delete code_range_;
code_range_ = NULL;
return false;
}
- base += kReservedCodeRangePages * base::OS::CommitPageSize();
+ base += reserved_area;
}
Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
- size_t size = code_range_->size() - (aligned_base - base) -
- kReservedCodeRangePages * base::OS::CommitPageSize();
+ size_t size = code_range_->size() - (aligned_base - base) - reserved_area;
allocation_list_.Add(FreeBlock(aligned_base, size));
current_allocation_block_index_ = 0;
@@ -222,7 +212,7 @@ Address CodeRange::AllocateRawMemory(const size_t requested_size,
*allocated = current.size;
DCHECK(*allocated <= current.size);
DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
- if (!isolate_->memory_allocator()->CommitExecutableMemory(
+ if (!isolate_->heap()->memory_allocator()->CommitExecutableMemory(
code_range_, current.start, commit_size, *allocated)) {
*allocated = 0;
ReleaseBlock(&current);
@@ -233,7 +223,8 @@ Address CodeRange::AllocateRawMemory(const size_t requested_size,
bool CodeRange::CommitRawMemory(Address start, size_t length) {
- return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
+ return isolate_->heap()->memory_allocator()->CommitMemory(start, length,
+ EXECUTABLE);
}
@@ -294,15 +285,17 @@ void CodeRange::ReleaseBlock(const FreeBlock* block) {
MemoryAllocator::MemoryAllocator(Isolate* isolate)
: isolate_(isolate),
+ code_range_(nullptr),
capacity_(0),
capacity_executable_(0),
size_(0),
size_executable_(0),
lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
- highest_ever_allocated_(reinterpret_cast<void*>(0)) {}
-
+ highest_ever_allocated_(reinterpret_cast<void*>(0)),
+ unmapper_(this) {}
-bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
+bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable,
+ intptr_t code_range_size) {
capacity_ = RoundUp(capacity, Page::kPageSize);
capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
DCHECK_GE(capacity_, capacity_executable_);
@@ -310,21 +303,103 @@ bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
size_ = 0;
size_executable_ = 0;
+ code_range_ = new CodeRange(isolate_);
+ if (!code_range_->SetUp(static_cast<size_t>(code_range_size))) return false;
+
return true;
}
void MemoryAllocator::TearDown() {
- for (MemoryChunk* chunk : chunk_pool_) {
+ unmapper()->WaitUntilCompleted();
+
+ MemoryChunk* chunk = nullptr;
+ while ((chunk = unmapper()->TryGetPooledMemoryChunkSafe()) != nullptr) {
FreeMemory(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize,
NOT_EXECUTABLE);
}
+
// Check that spaces were torn down before MemoryAllocator.
DCHECK_EQ(size_.Value(), 0);
// TODO(gc) this will be true again when we fix FreeMemory.
// DCHECK(size_executable_ == 0);
capacity_ = 0;
capacity_executable_ = 0;
+
+ if (last_chunk_.IsReserved()) {
+ last_chunk_.Release();
+ }
+
+ delete code_range_;
+ code_range_ = nullptr;
+}
+
+class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public v8::Task {
+ public:
+ explicit UnmapFreeMemoryTask(Unmapper* unmapper) : unmapper_(unmapper) {}
+
+ private:
+ // v8::Task overrides.
+ void Run() override {
+ unmapper_->PerformFreeMemoryOnQueuedChunks();
+ unmapper_->pending_unmapping_tasks_semaphore_.Signal();
+ }
+
+ Unmapper* unmapper_;
+ DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
+};
+
+void MemoryAllocator::Unmapper::FreeQueuedChunks() {
+ ReconsiderDelayedChunks();
+ if (FLAG_concurrent_sweeping) {
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ new UnmapFreeMemoryTask(this), v8::Platform::kShortRunningTask);
+ concurrent_unmapping_tasks_active_++;
+ } else {
+ PerformFreeMemoryOnQueuedChunks();
+ }
+}
+
+bool MemoryAllocator::Unmapper::WaitUntilCompleted() {
+ bool waited = false;
+ while (concurrent_unmapping_tasks_active_ > 0) {
+ pending_unmapping_tasks_semaphore_.Wait();
+ concurrent_unmapping_tasks_active_--;
+ waited = true;
+ }
+ return waited;
+}
+
+void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
+ MemoryChunk* chunk = nullptr;
+ // Regular chunks.
+ while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
+ bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
+ allocator_->PerformFreeMemory(chunk);
+ if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
+ }
+ // Non-regular chunks.
+ while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
+ allocator_->PerformFreeMemory(chunk);
+ }
+}
+
+void MemoryAllocator::Unmapper::ReconsiderDelayedChunks() {
+ std::list<MemoryChunk*> delayed_chunks(std::move(delayed_regular_chunks_));
+ // Move constructed, so the permanent list should be empty.
+ DCHECK(delayed_regular_chunks_.empty());
+ for (auto it = delayed_chunks.begin(); it != delayed_chunks.end(); ++it) {
+ AddMemoryChunkSafe<kRegular>(*it);
+ }
+}
+
+bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
+ MarkCompactCollector* mc = isolate_->heap()->mark_compact_collector();
+ // We cannot free memory chunks in new space while the sweeper is running
+ // since a sweeper thread might be stuck right before trying to lock the
+ // corresponding page.
+ return !chunk->InNewSpace() || (mc == nullptr) ||
+ mc->sweeper().IsSweepingCompleted();
}
bool MemoryAllocator::CommitMemory(Address base, size_t size,
@@ -342,11 +417,9 @@ void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
Executability executable) {
// TODO(gc) make code_range part of memory allocator?
// Code which is part of the code-range does not have its own VirtualMemory.
- DCHECK(isolate_->code_range() == NULL ||
- !isolate_->code_range()->contains(
- static_cast<Address>(reservation->address())));
- DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
- !isolate_->code_range()->valid() ||
+ DCHECK(code_range() == NULL ||
+ !code_range()->contains(static_cast<Address>(reservation->address())));
+ DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid() ||
reservation->size() <= Page::kPageSize);
reservation->Release();
@@ -356,20 +429,18 @@ void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
void MemoryAllocator::FreeMemory(Address base, size_t size,
Executability executable) {
// TODO(gc) make code_range part of memory allocator?
- if (isolate_->code_range() != NULL &&
- isolate_->code_range()->contains(static_cast<Address>(base))) {
+ if (code_range() != NULL &&
+ code_range()->contains(static_cast<Address>(base))) {
DCHECK(executable == EXECUTABLE);
- isolate_->code_range()->FreeRawMemory(base, size);
+ code_range()->FreeRawMemory(base, size);
} else {
- DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
- !isolate_->code_range()->valid());
+ DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid());
bool result = base::VirtualMemory::ReleaseRegion(base, size);
USE(result);
DCHECK(result);
}
}
-
Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
base::VirtualMemory* controller) {
base::VirtualMemory reservation(size, alignment);
@@ -382,7 +453,6 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
return base;
}
-
Address MemoryAllocator::AllocateAlignedMemory(
size_t reserve_size, size_t commit_size, size_t alignment,
Executability executable, base::VirtualMemory* controller) {
@@ -415,20 +485,12 @@ Address MemoryAllocator::AllocateAlignedMemory(
return base;
}
-
-void Page::InitializeAsAnchor(PagedSpace* owner) {
- set_owner(owner);
- set_prev_page(this);
- set_next_page(this);
-}
-
-void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
- set_owner(semi_space);
+void Page::InitializeAsAnchor(Space* space) {
+ set_owner(space);
set_next_chunk(this);
set_prev_chunk(this);
- // Flags marks this invalid page as not being in new-space.
- // All real new-space pages will be in new-space.
SetFlags(0, ~0);
+ SetFlag(ANCHOR);
}
MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
@@ -448,19 +510,22 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->InitializeReservedMemory();
chunk->old_to_new_slots_ = nullptr;
chunk->old_to_old_slots_ = nullptr;
+ chunk->typed_old_to_new_slots_ = nullptr;
chunk->typed_old_to_old_slots_ = nullptr;
chunk->skip_list_ = nullptr;
chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
chunk->progress_bar_ = 0;
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
- chunk->mutex_ = nullptr;
+ chunk->mutex_ = new base::Mutex();
chunk->available_in_free_list_ = 0;
chunk->wasted_memory_ = 0;
chunk->ResetLiveBytes();
- Bitmap::Clear(chunk);
+ chunk->ClearLiveness();
chunk->set_next_chunk(nullptr);
chunk->set_prev_chunk(nullptr);
+ chunk->local_tracker_ = nullptr;
+ chunk->black_area_end_marker_map_ = nullptr;
DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
@@ -496,19 +561,18 @@ bool MemoryChunk::CommitArea(size_t requested) {
if (reservation_.IsReserved()) {
Executability executable =
IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
- if (!heap()->isolate()->memory_allocator()->CommitMemory(start, length,
- executable)) {
+ if (!heap()->memory_allocator()->CommitMemory(start, length,
+ executable)) {
return false;
}
} else {
- CodeRange* code_range = heap_->isolate()->code_range();
- DCHECK(code_range != NULL && code_range->valid() &&
- IsFlagSet(IS_EXECUTABLE));
+ CodeRange* code_range = heap_->memory_allocator()->code_range();
+ DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE));
if (!code_range->CommitRawMemory(start, length)) return false;
}
if (Heap::ShouldZapGarbage()) {
- heap_->isolate()->memory_allocator()->ZapBlock(start, length);
+ heap_->memory_allocator()->ZapBlock(start, length);
}
} else if (commit_size < committed_size) {
DCHECK(commit_size > 0);
@@ -518,9 +582,8 @@ bool MemoryChunk::CommitArea(size_t requested) {
if (reservation_.IsReserved()) {
if (!reservation_.Uncommit(start, length)) return false;
} else {
- CodeRange* code_range = heap_->isolate()->code_range();
- DCHECK(code_range != NULL && code_range->valid() &&
- IsFlagSet(IS_EXECUTABLE));
+ CodeRange* code_range = heap_->memory_allocator()->code_range();
+ DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE));
if (!code_range->UncommitRawMemory(start, length)) return false;
}
}
@@ -529,6 +592,11 @@ bool MemoryChunk::CommitArea(size_t requested) {
return true;
}
+size_t MemoryChunk::CommittedPhysicalMemory() {
+ if (!base::VirtualMemory::HasLazyCommits() || owner()->identity() == LO_SPACE)
+ return size();
+ return high_water_mark_.Value();
+}
void MemoryChunk::InsertAfter(MemoryChunk* other) {
MemoryChunk* other_next = other->next_chunk();
@@ -614,13 +682,12 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
#ifdef V8_TARGET_ARCH_MIPS64
// Use code range only for large object space on mips64 to keep address
// range within 256-MB memory region.
- if (isolate_->code_range() != NULL && isolate_->code_range()->valid() &&
- reserve_area_size > CodePageAreaSize()) {
+ if (code_range()->valid() && reserve_area_size > CodePageAreaSize()) {
#else
- if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
+ if (code_range()->valid()) {
#endif
- base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size,
- &chunk_size);
+ base =
+ code_range()->AllocateRawMemory(chunk_size, commit_size, &chunk_size);
DCHECK(
IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
if (base == NULL) return NULL;
@@ -669,9 +736,22 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
static_cast<int>(chunk_size));
LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
- if (owner != NULL) {
- ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
- PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
+
+ // We cannot use the last chunk in the address space because we would
+ // overflow when comparing top and limit if this chunk is used for a
+ // linear allocation area.
+ if ((reinterpret_cast<uintptr_t>(base) + chunk_size) == 0u) {
+ CHECK(!last_chunk_.IsReserved());
+ last_chunk_.TakeControl(&reservation);
+ UncommitBlock(reinterpret_cast<Address>(last_chunk_.address()),
+ last_chunk_.size());
+ size_.Increment(-static_cast<intptr_t>(chunk_size));
+ if (executable == EXECUTABLE) {
+ size_executable_.Increment(-static_cast<intptr_t>(chunk_size));
+ }
+ CHECK(last_chunk_.IsReserved());
+ return AllocateChunk(reserve_area_size, commit_area_size, executable,
+ owner);
}
return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
@@ -684,28 +764,30 @@ void Page::ResetFreeListStatistics() {
available_in_free_list_ = 0;
}
-LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
- Space* owner,
- Executability executable) {
- MemoryChunk* chunk =
- AllocateChunk(object_size, object_size, executable, owner);
- if (chunk == NULL) return NULL;
- if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
- STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
- FATAL("Code page is too large.");
- }
- return LargePage::Initialize(isolate_->heap(), chunk);
-}
+void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk,
+ Address start_free) {
+ // We do not allow partial shrink for code.
+ DCHECK(chunk->executable() == NOT_EXECUTABLE);
+
+ intptr_t size;
+ base::VirtualMemory* reservation = chunk->reserved_memory();
+ DCHECK(reservation->IsReserved());
+ size = static_cast<intptr_t>(reservation->size());
+ size_t to_free_size = size - (start_free - chunk->address());
+
+ DCHECK(size_.Value() >= static_cast<intptr_t>(to_free_size));
+ size_.Increment(-static_cast<intptr_t>(to_free_size));
+ isolate_->counters()->memory_allocated()->Decrement(
+ static_cast<int>(to_free_size));
+ chunk->set_size(size - to_free_size);
+
+ reservation->ReleasePartial(start_free);
+}
void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
- if (chunk->owner() != NULL) {
- ObjectSpace space =
- static_cast<ObjectSpace>(1 << chunk->owner()->identity());
- PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
- }
isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
chunk->IsEvacuationCandidate());
@@ -735,36 +817,52 @@ void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
chunk->ReleaseAllocatedMemory();
base::VirtualMemory* reservation = chunk->reserved_memory();
- if (reservation->IsReserved()) {
- FreeMemory(reservation, chunk->executable());
+ if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
+ UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
} else {
- FreeMemory(chunk->address(), chunk->size(), chunk->executable());
+ if (reservation->IsReserved()) {
+ FreeMemory(reservation, chunk->executable());
+ } else {
+ FreeMemory(chunk->address(), chunk->size(), chunk->executable());
+ }
}
}
-template <MemoryAllocator::AllocationMode mode>
+template <MemoryAllocator::FreeMode mode>
void MemoryAllocator::Free(MemoryChunk* chunk) {
- if (mode == kRegular) {
- PreFreeMemory(chunk);
- PerformFreeMemory(chunk);
- } else {
- DCHECK_EQ(mode, kPooled);
- FreePooled(chunk);
+ switch (mode) {
+ case kFull:
+ PreFreeMemory(chunk);
+ PerformFreeMemory(chunk);
+ break;
+ case kPooledAndQueue:
+ DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
+ DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
+ chunk->SetFlag(MemoryChunk::POOLED);
+ // Fall through to kPreFreeAndQueue.
+ case kPreFreeAndQueue:
+ PreFreeMemory(chunk);
+ // The chunks added to this queue will be freed by a concurrent thread.
+ unmapper()->AddMemoryChunkSafe(chunk);
+ break;
+ default:
+ UNREACHABLE();
}
}
-template void MemoryAllocator::Free<MemoryAllocator::kRegular>(
+template void MemoryAllocator::Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
+
+template void MemoryAllocator::Free<MemoryAllocator::kPreFreeAndQueue>(
MemoryChunk* chunk);
-template void MemoryAllocator::Free<MemoryAllocator::kPooled>(
+template void MemoryAllocator::Free<MemoryAllocator::kPooledAndQueue>(
MemoryChunk* chunk);
-template <typename PageType, MemoryAllocator::AllocationMode mode,
- typename SpaceType>
-PageType* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
- Executability executable) {
+template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
+Page* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
+ Executability executable) {
MemoryChunk* chunk = nullptr;
- if (mode == kPooled) {
+ if (alloc_mode == kPooled) {
DCHECK_EQ(size, static_cast<intptr_t>(MemoryChunk::kAllocatableMemory));
DCHECK_EQ(executable, NOT_EXECUTABLE);
chunk = AllocatePagePooled(owner);
@@ -773,22 +871,32 @@ PageType* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
chunk = AllocateChunk(size, size, executable, owner);
}
if (chunk == nullptr) return nullptr;
- return PageType::Initialize(isolate_->heap(), chunk, executable, owner);
+ return Page::Initialize(isolate_->heap(), chunk, executable, owner);
}
-template Page* MemoryAllocator::AllocatePage<Page, MemoryAllocator::kRegular,
- PagedSpace>(intptr_t, PagedSpace*,
- Executability);
+template Page*
+MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
+ intptr_t size, PagedSpace* owner, Executability executable);
+template Page*
+MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
+ intptr_t size, SemiSpace* owner, Executability executable);
+template Page*
+MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
+ intptr_t size, SemiSpace* owner, Executability executable);
-template NewSpacePage* MemoryAllocator::AllocatePage<
- NewSpacePage, MemoryAllocator::kPooled, SemiSpace>(intptr_t, SemiSpace*,
- Executability);
+LargePage* MemoryAllocator::AllocateLargePage(intptr_t size,
+ LargeObjectSpace* owner,
+ Executability executable) {
+ MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
+ if (chunk == nullptr) return nullptr;
+ return LargePage::Initialize(isolate_->heap(), chunk, executable, owner);
+}
template <typename SpaceType>
MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
- if (chunk_pool_.is_empty()) return nullptr;
+ MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
+ if (chunk == nullptr) return nullptr;
const int size = MemoryChunk::kPageSize;
- MemoryChunk* chunk = chunk_pool_.RemoveLast();
const Address start = reinterpret_cast<Address>(chunk);
const Address area_start = start + MemoryChunk::kObjectStartOffset;
const Address area_end = start + size;
@@ -802,18 +910,6 @@ MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
return chunk;
}
-void MemoryAllocator::FreePooled(MemoryChunk* chunk) {
- DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
- DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
- chunk_pool_.Add(chunk);
- intptr_t chunk_size = static_cast<intptr_t>(chunk->size());
- if (chunk->executable() == EXECUTABLE) {
- size_executable_.Increment(-chunk_size);
- }
- size_.Increment(-chunk_size);
- UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
-}
-
bool MemoryAllocator::CommitBlock(Address start, size_t size,
Executability executable) {
if (!CommitMemory(start, size, executable)) return false;
@@ -840,60 +936,11 @@ void MemoryAllocator::ZapBlock(Address start, size_t size) {
}
}
-
-void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
- AllocationAction action,
- size_t size) {
- for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
- MemoryAllocationCallbackRegistration registration =
- memory_allocation_callbacks_[i];
- if ((registration.space & space) == space &&
- (registration.action & action) == action)
- registration.callback(space, action, static_cast<int>(size));
- }
-}
-
-
-bool MemoryAllocator::MemoryAllocationCallbackRegistered(
- MemoryAllocationCallback callback) {
- for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
- if (memory_allocation_callbacks_[i].callback == callback) return true;
- }
- return false;
-}
-
-
-void MemoryAllocator::AddMemoryAllocationCallback(
- MemoryAllocationCallback callback, ObjectSpace space,
- AllocationAction action) {
- DCHECK(callback != NULL);
- MemoryAllocationCallbackRegistration registration(callback, space, action);
- DCHECK(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
- return memory_allocation_callbacks_.Add(registration);
-}
-
-
-void MemoryAllocator::RemoveMemoryAllocationCallback(
- MemoryAllocationCallback callback) {
- DCHECK(callback != NULL);
- for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
- if (memory_allocation_callbacks_[i].callback == callback) {
- memory_allocation_callbacks_.Remove(i);
- return;
- }
- }
- UNREACHABLE();
-}
-
-
#ifdef DEBUG
void MemoryAllocator::ReportStatistics() {
intptr_t size = Size();
float pct = static_cast<float>(capacity_ - size) / capacity_;
- PrintF(" capacity: %" V8_PTR_PREFIX
- "d"
- ", used: %" V8_PTR_PREFIX
- "d"
+ PrintF(" capacity: %" V8PRIdPTR ", used: %" V8PRIdPTR
", available: %%%d\n\n",
capacity_, size, static_cast<int>(pct * 100));
}
@@ -959,12 +1006,19 @@ bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
// MemoryChunk implementation
void MemoryChunk::ReleaseAllocatedMemory() {
- delete skip_list_;
- skip_list_ = nullptr;
- delete mutex_;
- mutex_ = nullptr;
- ReleaseOldToNewSlots();
- ReleaseOldToOldSlots();
+ if (skip_list_ != nullptr) {
+ delete skip_list_;
+ skip_list_ = nullptr;
+ }
+ if (mutex_ != nullptr) {
+ delete mutex_;
+ mutex_ = nullptr;
+ }
+ if (old_to_new_slots_ != nullptr) ReleaseOldToNewSlots();
+ if (old_to_old_slots_ != nullptr) ReleaseOldToOldSlots();
+ if (typed_old_to_new_slots_ != nullptr) ReleaseTypedOldToNewSlots();
+ if (typed_old_to_old_slots_ != nullptr) ReleaseTypedOldToOldSlots();
+ if (local_tracker_ != nullptr) ReleaseLocalTracker();
}
static SlotSet* AllocateSlotSet(size_t size, Address page_start) {
@@ -997,6 +1051,16 @@ void MemoryChunk::ReleaseOldToOldSlots() {
old_to_old_slots_ = nullptr;
}
+void MemoryChunk::AllocateTypedOldToNewSlots() {
+ DCHECK(nullptr == typed_old_to_new_slots_);
+ typed_old_to_new_slots_ = new TypedSlotSet(address());
+}
+
+void MemoryChunk::ReleaseTypedOldToNewSlots() {
+ delete typed_old_to_new_slots_;
+ typed_old_to_new_slots_ = nullptr;
+}
+
void MemoryChunk::AllocateTypedOldToOldSlots() {
DCHECK(nullptr == typed_old_to_old_slots_);
typed_old_to_old_slots_ = new TypedSlotSet(address());
@@ -1006,6 +1070,23 @@ void MemoryChunk::ReleaseTypedOldToOldSlots() {
delete typed_old_to_old_slots_;
typed_old_to_old_slots_ = nullptr;
}
+
+void MemoryChunk::AllocateLocalTracker() {
+ DCHECK_NULL(local_tracker_);
+ local_tracker_ = new LocalArrayBufferTracker(heap());
+}
+
+void MemoryChunk::ReleaseLocalTracker() {
+ DCHECK_NOT_NULL(local_tracker_);
+ delete local_tracker_;
+ local_tracker_ = nullptr;
+}
+
+void MemoryChunk::ClearLiveness() {
+ markbits()->Clear();
+ ResetLiveBytes();
+}
+
// -----------------------------------------------------------------------------
// PagedSpace implementation
@@ -1029,13 +1110,11 @@ void Space::AllocationStep(Address soon_object, int size) {
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Executability executable)
- : Space(heap, space, executable), free_list_(this) {
+ : Space(heap, space, executable), anchor_(this), free_list_(this) {
area_size_ = MemoryAllocator::PageAreaSize(space);
accounting_stats_.Clear();
allocation_info_.Reset(nullptr, nullptr);
-
- anchor_.InitializeAsAnchor(this);
}
@@ -1046,9 +1125,10 @@ bool PagedSpace::HasBeenSetUp() { return true; }
void PagedSpace::TearDown() {
- PageIterator iterator(this);
- while (iterator.has_next()) {
- heap()->isolate()->memory_allocator()->Free(iterator.next());
+ for (auto it = begin(); it != end();) {
+ Page* page = *(it++); // Will be erased.
+ ArrayBufferTracker::FreeAll(page);
+ heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
}
anchor_.set_next_page(&anchor_);
anchor_.set_prev_page(&anchor_);
@@ -1063,17 +1143,14 @@ void PagedSpace::RefillFreeList() {
return;
}
MarkCompactCollector* collector = heap()->mark_compact_collector();
- List<Page*>* swept_pages = collector->swept_pages(identity());
intptr_t added = 0;
{
- base::LockGuard<base::Mutex> guard(collector->swept_pages_mutex());
- for (int i = swept_pages->length() - 1; i >= 0; --i) {
- Page* p = (*swept_pages)[i];
+ Page* p = nullptr;
+ while ((p = collector->sweeper().GetSweptPageSafe(this)) != nullptr) {
// Only during compaction pages can actually change ownership. This is
// safe because there exists no other competing action on the page links
// during compaction.
if (is_local() && (p->owner() != this)) {
- if (added > kCompactionMemoryWanted) break;
base::LockGuard<base::Mutex> guard(
reinterpret_cast<PagedSpace*>(p->owner())->mutex());
p->Unlink();
@@ -1082,7 +1159,7 @@ void PagedSpace::RefillFreeList() {
}
added += RelinkFreeListCategories(p);
added += p->wasted_memory();
- swept_pages->Remove(i);
+ if (is_local() && (added > kCompactionMemoryWanted)) break;
}
}
accounting_stats_.IncreaseCapacity(added);
@@ -1107,10 +1184,8 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
AccountCommitted(other->CommittedMemory());
// Move over pages.
- PageIterator it(other);
- Page* p = nullptr;
- while (it.has_next()) {
- p = it.next();
+ for (auto it = other->begin(); it != other->end();) {
+ Page* p = *(it++);
// Relinking requires the category to be unlinked.
other->UnlinkFreeListCategories(p);
@@ -1127,18 +1202,16 @@ size_t PagedSpace::CommittedPhysicalMemory() {
if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
size_t size = 0;
- PageIterator it(this);
- while (it.has_next()) {
- size += it.next()->CommittedPhysicalMemory();
+ for (Page* page : *this) {
+ size += page->CommittedPhysicalMemory();
}
return size;
}
bool PagedSpace::ContainsSlow(Address addr) {
Page* p = Page::FromAddress(addr);
- PageIterator iterator(this);
- while (iterator.has_next()) {
- if (iterator.next() == p) return true;
+ for (Page* page : *this) {
+ if (page == p) return true;
}
return false;
}
@@ -1162,48 +1235,22 @@ Object* PagedSpace::FindObject(Address addr) {
return Smi::FromInt(0);
}
-
-bool PagedSpace::CanExpand(size_t size) {
- DCHECK(heap()->mark_compact_collector()->is_compacting() ||
- Capacity() <= heap()->MaxOldGenerationSize());
-
- // Are we going to exceed capacity for this space? At this point we can be
- // way over the maximum size because of AlwaysAllocate scopes and large
- // objects.
- if (!heap()->CanExpandOldGeneration(static_cast<int>(size))) return false;
-
- return true;
-}
-
-
bool PagedSpace::Expand() {
- intptr_t size = AreaSize();
+ int size = AreaSize();
if (snapshotable() && !HasPages()) {
size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
}
- if (!CanExpand(size)) return false;
+ if (!heap()->CanExpandOldGeneration(size)) return false;
- Page* p = heap()->isolate()->memory_allocator()->AllocatePage<Page>(
- size, this, executable());
- if (p == NULL) return false;
+ Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable());
+ if (p == nullptr) return false;
AccountCommitted(static_cast<intptr_t>(p->size()));
// Pages created during bootstrapping may contain immortal immovable objects.
if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
- // When incremental marking was activated, old space pages are allocated
- // black.
- if (heap()->incremental_marking()->black_allocation() &&
- identity() == OLD_SPACE) {
- Bitmap::SetAllBits(p);
- p->SetFlag(Page::BLACK_PAGE);
- if (FLAG_trace_incremental_marking) {
- PrintIsolate(heap()->isolate(), "Added black page %p\n", p);
- }
- }
-
DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
p->InsertAfter(anchor_.prev_page());
@@ -1213,27 +1260,78 @@ bool PagedSpace::Expand() {
int PagedSpace::CountTotalPages() {
- PageIterator it(this);
int count = 0;
- while (it.has_next()) {
- it.next();
+ for (Page* page : *this) {
count++;
+ USE(page);
}
return count;
}
void PagedSpace::ResetFreeListStatistics() {
- PageIterator page_iterator(this);
- while (page_iterator.has_next()) {
- Page* page = page_iterator.next();
+ for (Page* page : *this) {
page->ResetFreeListStatistics();
}
}
+void PagedSpace::SetAllocationInfo(Address top, Address limit) {
+ SetTopAndLimit(top, limit);
+ if (top != nullptr && top != limit &&
+ heap()->incremental_marking()->black_allocation()) {
+ Page* page = Page::FromAllocationAreaAddress(top);
+ page->markbits()->SetRange(page->AddressToMarkbitIndex(top),
+ page->AddressToMarkbitIndex(limit));
+ page->IncrementLiveBytes(static_cast<int>(limit - top));
+ }
+}
+
+void PagedSpace::MarkAllocationInfoBlack() {
+ DCHECK(heap()->incremental_marking()->black_allocation());
+ Address current_top = top();
+ Address current_limit = limit();
+ if (current_top != nullptr && current_top != current_limit) {
+ Page* page = Page::FromAllocationAreaAddress(current_top);
+ page->markbits()->SetRange(page->AddressToMarkbitIndex(current_top),
+ page->AddressToMarkbitIndex(current_limit));
+ page->IncrementLiveBytes(static_cast<int>(current_limit - current_top));
+ }
+}
+
+// Empty space allocation info, returning unused area to free list.
+void PagedSpace::EmptyAllocationInfo() {
+ // Mark the old linear allocation area with a free space map so it can be
+ // skipped when scanning the heap.
+ Address current_top = top();
+ Address current_limit = limit();
+ if (current_top == nullptr) {
+ DCHECK(current_limit == nullptr);
+ return;
+ }
+
+ if (heap()->incremental_marking()->black_allocation()) {
+ Page* page = Page::FromAllocationAreaAddress(current_top);
+ // We have to remember the end of the current black allocation area if
+ // something was allocated in the current bump pointer range.
+ if (allocation_info_.original_top() != current_top) {
+ Address end_black_area = current_top - kPointerSize;
+ page->AddBlackAreaEndMarker(end_black_area);
+ }
+
+ // Clear the bits in the unused black area.
+ if (current_top != current_limit) {
+ page->markbits()->ClearRange(page->AddressToMarkbitIndex(current_top),
+ page->AddressToMarkbitIndex(current_limit));
+ page->IncrementLiveBytes(-static_cast<int>(current_limit - current_top));
+ }
+ }
-void PagedSpace::IncreaseCapacity(int size) {
- accounting_stats_.ExpandSpace(size);
+ SetTopAndLimit(NULL, NULL);
+ Free(current_top, static_cast<int>(current_limit - current_top));
+}
+
+void PagedSpace::IncreaseCapacity(size_t bytes) {
+ accounting_stats_.ExpandSpace(bytes);
}
void PagedSpace::ReleasePage(Page* page) {
@@ -1244,7 +1342,9 @@ void PagedSpace::ReleasePage(Page* page) {
free_list_.EvictFreeListItems(page);
DCHECK(!free_list_.ContainsPageFreeListItems(page));
- if (Page::FromAllocationTop(allocation_info_.top()) == page) {
+ page->ReleaseBlackAreaEndMarkerMap();
+
+ if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
allocation_info_.Reset(nullptr, nullptr);
}
@@ -1255,7 +1355,7 @@ void PagedSpace::ReleasePage(Page* page) {
}
AccountUncommitted(static_cast<intptr_t>(page->size()));
- heap()->QueueMemoryChunkForFree(page);
+ heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
DCHECK(Capacity() > 0);
accounting_stats_.ShrinkSpace(AreaSize());
@@ -1269,11 +1369,9 @@ void PagedSpace::Print() {}
void PagedSpace::Verify(ObjectVisitor* visitor) {
bool allocation_pointer_found_in_space =
(allocation_info_.top() == allocation_info_.limit());
- PageIterator page_iterator(this);
- while (page_iterator.has_next()) {
- Page* page = page_iterator.next();
+ for (Page* page : *this) {
CHECK(page->owner() == this);
- if (page == Page::FromAllocationTop(allocation_info_.top())) {
+ if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
allocation_pointer_found_in_space = true;
}
CHECK(page->SweepingDone());
@@ -1299,8 +1397,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
// All the interior pointers should be contained in the heap.
int size = object->Size();
object->IterateBody(map->instance_type(), size, visitor);
- if (!page->IsFlagSet(Page::BLACK_PAGE) &&
- Marking::IsBlack(Marking::MarkBitFrom(object))) {
+ if (Marking::IsBlack(ObjectMarking::MarkBitFrom(object))) {
black_size += size;
}
@@ -1358,7 +1455,6 @@ void NewSpace::TearDown() {
from_space_.TearDown();
}
-
void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
@@ -1404,6 +1500,48 @@ void NewSpace::Shrink() {
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
+bool NewSpace::Rebalance() {
+ CHECK(heap()->promotion_queue()->is_empty());
+ // Order here is important to make use of the page pool.
+ return to_space_.EnsureCurrentCapacity() &&
+ from_space_.EnsureCurrentCapacity();
+}
+
+bool SemiSpace::EnsureCurrentCapacity() {
+ if (is_committed()) {
+ const int expected_pages = current_capacity_ / Page::kPageSize;
+ int actual_pages = 0;
+ Page* current_page = anchor()->next_page();
+ while (current_page != anchor()) {
+ actual_pages++;
+ current_page = current_page->next_page();
+ if (actual_pages > expected_pages) {
+ Page* to_remove = current_page->prev_page();
+ // Make sure we don't overtake the actual top pointer.
+ CHECK_NE(to_remove, current_page_);
+ to_remove->Unlink();
+ heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
+ to_remove);
+ }
+ }
+ while (actual_pages < expected_pages) {
+ actual_pages++;
+ current_page =
+ heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
+ Page::kAllocatableMemory, this, executable());
+ if (current_page == nullptr) return false;
+ DCHECK_NOT_NULL(current_page);
+ current_page->InsertAfter(anchor());
+ current_page->ClearLiveness();
+ current_page->SetFlags(anchor()->prev_page()->GetFlags(),
+ Page::kCopyAllFlags);
+ heap()->CreateFillerObjectAt(current_page->area_start(),
+ current_page->area_size(),
+ ClearRecordedSlots::kNo);
+ }
+ }
+ return true;
+}
void LocalAllocationBuffer::Close() {
if (IsValid()) {
@@ -1460,11 +1598,9 @@ void NewSpace::ResetAllocationInfo() {
Address old_top = allocation_info_.top();
to_space_.Reset();
UpdateAllocationInfo();
- pages_used_ = 0;
// Clear all mark-bits in the to-space.
- NewSpacePageIterator it(&to_space_);
- while (it.has_next()) {
- Bitmap::Clear(it.next());
+ for (Page* p : to_space_) {
+ p->ClearLiveness();
}
InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
}
@@ -1492,21 +1628,20 @@ void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
bool NewSpace::AddFreshPage() {
Address top = allocation_info_.top();
- DCHECK(!NewSpacePage::IsAtStart(top));
+ DCHECK(!Page::IsAtObjectStart(top));
if (!to_space_.AdvancePage()) {
// No more pages left to advance.
return false;
}
// Clear remainder of current page.
- Address limit = NewSpacePage::FromLimit(top)->area_end();
+ Address limit = Page::FromAllocationAreaAddress(top)->area_end();
if (heap()->gc_state() == Heap::SCAVENGE) {
heap()->promotion_queue()->SetNewLimit(limit);
}
int remaining_in_page = static_cast<int>(limit - top);
heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
- pages_used_++;
UpdateAllocationInfo();
return true;
@@ -1526,7 +1661,7 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
int filler_size = Heap::GetFillToAlign(old_top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
- if (old_top + aligned_size_in_bytes >= high) {
+ if (old_top + aligned_size_in_bytes > high) {
// Not enough room in the page, try to allocate a new one.
if (!AddFreshPage()) {
return false;
@@ -1537,10 +1672,9 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
old_top = allocation_info_.top();
high = to_space_.page_high();
filler_size = Heap::GetFillToAlign(old_top, alignment);
- aligned_size_in_bytes = size_in_bytes + filler_size;
}
- DCHECK(old_top + aligned_size_in_bytes < high);
+ DCHECK(old_top + aligned_size_in_bytes <= high);
if (allocation_info_.limit() < high) {
// Either the limit has been lowered because linear allocation was disabled
@@ -1626,9 +1760,9 @@ void NewSpace::Verify() {
CHECK_EQ(current, to_space_.space_start());
while (current != top()) {
- if (!NewSpacePage::IsAtEnd(current)) {
+ if (!Page::IsAlignedToPageSize(current)) {
// The allocation pointer should not be in the middle of an object.
- CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
+ CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
current < top());
HeapObject* object = HeapObject::FromAddress(current);
@@ -1654,7 +1788,7 @@ void NewSpace::Verify() {
current += size;
} else {
// At end of page, switch to next page.
- NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
+ Page* page = Page::FromAllocationAreaAddress(current)->next_page();
// Next page should be valid.
CHECK(!page->is_anchor());
current = page->area_start();
@@ -1683,22 +1817,24 @@ void SemiSpace::SetUp(int initial_capacity, int maximum_capacity) {
void SemiSpace::TearDown() {
// Properly uncommit memory to keep the allocator counters in sync.
- if (is_committed()) Uncommit();
+ if (is_committed()) {
+ for (Page* p : *this) {
+ ArrayBufferTracker::FreeAll(p);
+ }
+ Uncommit();
+ }
current_capacity_ = maximum_capacity_ = 0;
}
bool SemiSpace::Commit() {
DCHECK(!is_committed());
- NewSpacePage* current = anchor();
+ Page* current = anchor();
const int num_pages = current_capacity_ / Page::kPageSize;
for (int pages_added = 0; pages_added < num_pages; pages_added++) {
- NewSpacePage* new_page =
- heap()
- ->isolate()
- ->memory_allocator()
- ->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>(
- NewSpacePage::kAllocatableMemory, this, executable());
+ Page* new_page =
+ heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
+ Page::kAllocatableMemory, this, executable());
if (new_page == nullptr) {
RewindPages(current, pages_added);
return false;
@@ -1718,15 +1854,15 @@ bool SemiSpace::Commit() {
bool SemiSpace::Uncommit() {
DCHECK(is_committed());
- NewSpacePageIterator it(this);
- while (it.has_next()) {
- heap()->isolate()->memory_allocator()->Free<MemoryAllocator::kPooled>(
- it.next());
+ for (auto it = begin(); it != end();) {
+ Page* p = *(it++);
+ heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(p);
}
anchor()->set_next_page(anchor());
anchor()->set_prev_page(anchor());
AccountUncommitted(current_capacity_);
committed_ = false;
+ heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
return true;
}
@@ -1734,9 +1870,8 @@ bool SemiSpace::Uncommit() {
size_t SemiSpace::CommittedPhysicalMemory() {
if (!is_committed()) return 0;
size_t size = 0;
- NewSpacePageIterator it(this);
- while (it.has_next()) {
- size += it.next()->CommittedPhysicalMemory();
+ for (Page* p : *this) {
+ size += p->CommittedPhysicalMemory();
}
return size;
}
@@ -1746,30 +1881,26 @@ bool SemiSpace::GrowTo(int new_capacity) {
if (!is_committed()) {
if (!Commit()) return false;
}
- DCHECK_EQ(new_capacity & NewSpacePage::kPageAlignmentMask, 0);
+ DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
DCHECK_LE(new_capacity, maximum_capacity_);
DCHECK_GT(new_capacity, current_capacity_);
const int delta = new_capacity - current_capacity_;
DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
- int delta_pages = delta / NewSpacePage::kPageSize;
- NewSpacePage* last_page = anchor()->prev_page();
+ const int delta_pages = delta / Page::kPageSize;
+ Page* last_page = anchor()->prev_page();
DCHECK_NE(last_page, anchor());
for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
- NewSpacePage* new_page =
- heap()
- ->isolate()
- ->memory_allocator()
- ->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>(
- NewSpacePage::kAllocatableMemory, this, executable());
+ Page* new_page =
+ heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
+ Page::kAllocatableMemory, this, executable());
if (new_page == nullptr) {
RewindPages(last_page, pages_added);
return false;
}
new_page->InsertAfter(last_page);
- Bitmap::Clear(new_page);
+ new_page->ClearLiveness();
// Duplicate the flags that was set on the old page.
- new_page->SetFlags(last_page->GetFlags(),
- NewSpacePage::kCopyOnFlipFlagsMask);
+ new_page->SetFlags(last_page->GetFlags(), Page::kCopyOnFlipFlagsMask);
last_page = new_page;
}
AccountCommitted(static_cast<intptr_t>(delta));
@@ -1777,9 +1908,9 @@ bool SemiSpace::GrowTo(int new_capacity) {
return true;
}
-void SemiSpace::RewindPages(NewSpacePage* start, int num_pages) {
- NewSpacePage* new_last_page = nullptr;
- NewSpacePage* last_page = start;
+void SemiSpace::RewindPages(Page* start, int num_pages) {
+ Page* new_last_page = nullptr;
+ Page* last_page = start;
while (num_pages > 0) {
DCHECK_NE(last_page, anchor());
new_last_page = last_page->prev_page();
@@ -1791,25 +1922,26 @@ void SemiSpace::RewindPages(NewSpacePage* start, int num_pages) {
}
bool SemiSpace::ShrinkTo(int new_capacity) {
- DCHECK_EQ(new_capacity & NewSpacePage::kPageAlignmentMask, 0);
+ DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
DCHECK_GE(new_capacity, minimum_capacity_);
DCHECK_LT(new_capacity, current_capacity_);
if (is_committed()) {
const int delta = current_capacity_ - new_capacity;
DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
- int delta_pages = delta / NewSpacePage::kPageSize;
- NewSpacePage* new_last_page;
- NewSpacePage* last_page;
+ int delta_pages = delta / Page::kPageSize;
+ Page* new_last_page;
+ Page* last_page;
while (delta_pages > 0) {
last_page = anchor()->prev_page();
new_last_page = last_page->prev_page();
new_last_page->set_next_page(anchor());
anchor()->set_prev_page(new_last_page);
- heap()->isolate()->memory_allocator()->Free<MemoryAllocator::kPooled>(
+ heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
last_page);
delta_pages--;
}
AccountUncommitted(static_cast<intptr_t>(delta));
+ heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
}
current_capacity_ = new_capacity;
return true;
@@ -1817,13 +1949,10 @@ bool SemiSpace::ShrinkTo(int new_capacity) {
void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
anchor_.set_owner(this);
- // Fixup back-pointers to anchor. Address of anchor changes when we swap.
anchor_.prev_page()->set_next_page(&anchor_);
anchor_.next_page()->set_prev_page(&anchor_);
- NewSpacePageIterator it(this);
- while (it.has_next()) {
- NewSpacePage* page = it.next();
+ for (Page* page : *this) {
page->set_owner(this);
page->SetFlags(flags, mask);
if (id_ == kToSpace) {
@@ -1844,8 +1973,22 @@ void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
void SemiSpace::Reset() {
DCHECK_NE(anchor_.next_page(), &anchor_);
current_page_ = anchor_.next_page();
+ pages_used_ = 0;
+}
+
+void SemiSpace::RemovePage(Page* page) {
+ if (current_page_ == page) {
+ current_page_ = page->prev_page();
+ }
+ page->Unlink();
}
+void SemiSpace::PrependPage(Page* page) {
+ page->SetFlags(current_page()->GetFlags(), Page::kCopyAllFlags);
+ page->set_owner(this);
+ page->InsertAfter(anchor());
+ pages_used_++;
+}
void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
// We won't be swapping semispaces without data in them.
@@ -1863,18 +2006,17 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
std::swap(from->anchor_, to->anchor_);
std::swap(from->current_page_, to->current_page_);
- to->FixPagesFlags(saved_to_space_flags, NewSpacePage::kCopyOnFlipFlagsMask);
+ to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
from->FixPagesFlags(0, 0);
}
void SemiSpace::set_age_mark(Address mark) {
- DCHECK_EQ(NewSpacePage::FromLimit(mark)->semi_space(), this);
+ DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
age_mark_ = mark;
// Mark all pages up to the one containing mark.
- NewSpacePageIterator it(space_start(), mark);
- while (it.has_next()) {
- it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
+ for (Page* p : NewSpacePageRange(space_start(), mark)) {
+ p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
}
}
@@ -1886,10 +2028,10 @@ void SemiSpace::Print() {}
#ifdef VERIFY_HEAP
void SemiSpace::Verify() {
bool is_from_space = (id_ == kFromSpace);
- NewSpacePage* page = anchor_.next_page();
- CHECK(anchor_.semi_space() == this);
+ Page* page = anchor_.next_page();
+ CHECK(anchor_.owner() == this);
while (page != &anchor_) {
- CHECK_EQ(page->semi_space(), this);
+ CHECK_EQ(page->owner(), this);
CHECK(page->InNewSpace());
CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
: MemoryChunk::IN_TO_SPACE));
@@ -1917,10 +2059,10 @@ void SemiSpace::Verify() {
#ifdef DEBUG
void SemiSpace::AssertValidRange(Address start, Address end) {
// Addresses belong to same semi-space
- NewSpacePage* page = NewSpacePage::FromLimit(start);
- NewSpacePage* end_page = NewSpacePage::FromLimit(end);
- SemiSpace* space = page->semi_space();
- CHECK_EQ(space, end_page->semi_space());
+ Page* page = Page::FromAllocationAreaAddress(start);
+ Page* end_page = Page::FromAllocationAreaAddress(end);
+ SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
+ CHECK_EQ(space, end_page->owner());
// Start address is before end address, either on same page,
// or end address is on a later page in the linked list of
// semi-space pages.
@@ -1950,7 +2092,6 @@ void SemiSpaceIterator::Initialize(Address start, Address end) {
limit_ = end;
}
-
#ifdef DEBUG
// heap_histograms is shared, always clear it before using it.
static void ClearHistograms(Isolate* isolate) {
@@ -1966,27 +2107,6 @@ static void ClearHistograms(Isolate* isolate) {
isolate->js_spill_information()->Clear();
}
-
-static void ClearCodeKindStatistics(int* code_kind_statistics) {
- for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
- code_kind_statistics[i] = 0;
- }
-}
-
-
-static void ReportCodeKindStatistics(int* code_kind_statistics) {
- PrintF("\n Code kind histograms: \n");
- for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
- if (code_kind_statistics[i] > 0) {
- PrintF(" %-20s: %10d bytes\n",
- Code::Kind2String(static_cast<Code::Kind>(i)),
- code_kind_statistics[i]);
- }
- }
- PrintF("\n");
-}
-
-
static int CollectHistogramInfo(HeapObject* obj) {
Isolate* isolate = obj->GetIsolate();
InstanceType type = obj->map()->instance_type();
@@ -2087,9 +2207,7 @@ void NewSpace::ReportStatistics() {
#ifdef DEBUG
if (FLAG_heap_stats) {
float pct = static_cast<float>(Available()) / TotalCapacity();
- PrintF(" capacity: %" V8_PTR_PREFIX
- "d"
- ", available: %" V8_PTR_PREFIX "d, %%%d\n",
+ PrintF(" capacity: %" V8PRIdPTR ", available: %" V8PRIdPTR ", %%%d\n",
TotalCapacity(), Available(), static_cast<int>(pct * 100));
PrintF("\n Object Histogram:\n");
for (int i = 0; i <= LAST_TYPE; i++) {
@@ -2321,6 +2439,9 @@ FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
DCHECK(IsVeryLong() || Available() == SumFreeLists());
return node;
}
+ if (current->is_empty()) {
+ RemoveCategory(current);
+ }
}
return node;
}
@@ -2373,8 +2494,7 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// Mark the old linear allocation area with a free space map so it can be
// skipped when scanning the heap. This also puts it back in the free list
// if it is big enough.
- owner_->Free(owner_->top(), old_linear_size);
- owner_->SetTopAndLimit(nullptr, nullptr);
+ owner_->EmptyAllocationInfo();
owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
old_linear_size);
@@ -2382,7 +2502,6 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
int new_node_size = 0;
FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
if (new_node == nullptr) return nullptr;
- owner_->AllocationStep(new_node->address(), size_in_bytes);
int bytes_left = new_node_size - size_in_bytes;
DCHECK(bytes_left >= 0);
@@ -2409,7 +2528,8 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// Keep the linear allocation area empty if requested to do so, just
// return area back to the free list instead.
owner_->Free(new_node->address() + size_in_bytes, bytes_left);
- DCHECK(owner_->top() == NULL && owner_->limit() == NULL);
+ owner_->SetAllocationInfo(new_node->address() + size_in_bytes,
+ new_node->address() + size_in_bytes);
} else if (bytes_left > kThreshold &&
owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
FLAG_incremental_marking) {
@@ -2419,13 +2539,15 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// we want to do another increment until the linear area is used up.
owner_->Free(new_node->address() + size_in_bytes + linear_size,
new_node_size - size_in_bytes - linear_size);
- owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
- new_node->address() + size_in_bytes + linear_size);
- } else if (bytes_left > 0) {
+ owner_->SetAllocationInfo(
+ new_node->address() + size_in_bytes,
+ new_node->address() + size_in_bytes + linear_size);
+ } else {
+ DCHECK(bytes_left >= 0);
// Normally we give the rest of the node to the allocator as its new
// linear allocation area.
- owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
- new_node->address() + new_node_size);
+ owner_->SetAllocationInfo(new_node->address() + size_in_bytes,
+ new_node->address() + new_node_size);
}
return new_node;
@@ -2495,10 +2617,11 @@ void FreeList::RemoveCategory(FreeListCategory* category) {
void FreeList::PrintCategories(FreeListCategoryType type) {
FreeListCategoryIterator it(this, type);
- PrintF("FreeList[%p, top=%p, %d] ", this, categories_[type], type);
+ PrintF("FreeList[%p, top=%p, %d] ", static_cast<void*>(this),
+ static_cast<void*>(categories_[type]), type);
while (it.HasNext()) {
FreeListCategory* current = it.Next();
- PrintF("%p -> ", current);
+ PrintF("%p -> ", static_cast<void*>(current));
}
PrintF("null\n");
}
@@ -2582,9 +2705,7 @@ void PagedSpace::RepairFreeListsAfterDeserialization() {
free_list_.RepairLists(heap());
// Each page may have a small free space that is not tracked by a free list.
// Update the maps for those free space objects.
- PageIterator iterator(this);
- while (iterator.has_next()) {
- Page* page = iterator.next();
+ for (Page* page : *this) {
int size = static_cast<int>(page->wasted_memory());
if (size == 0) continue;
Address address = page->OffsetToAddress(Page::kPageSize - size);
@@ -2596,7 +2717,7 @@ void PagedSpace::RepairFreeListsAfterDeserialization() {
void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() {
if (allocation_info_.top() >= allocation_info_.limit()) return;
- if (!Page::FromAllocationTop(allocation_info_.top())->CanAllocate()) {
+ if (!Page::FromAllocationAreaAddress(allocation_info_.top())->CanAllocate()) {
// Create filler object to keep page iterable if it was iterable.
int remaining =
static_cast<int>(allocation_info_.limit() - allocation_info_.top());
@@ -2648,8 +2769,8 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
if (object != NULL) return object;
// If sweeping is still in progress try to sweep pages on the main thread.
- int max_freed = collector->SweepInParallel(heap()->paged_space(identity()),
- size_in_bytes, kMaxPagesToSweep);
+ int max_freed = collector->sweeper().ParallelSweepSpace(
+ identity(), size_in_bytes, kMaxPagesToSweep);
RefillFreeList();
if (max_freed >= size_in_bytes) {
object = free_list_.Allocate(size_in_bytes);
@@ -2681,145 +2802,11 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
return SweepAndRetryAllocation(size_in_bytes);
}
-
#ifdef DEBUG
-void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
- CommentStatistic* comments_statistics =
- isolate->paged_space_comments_statistics();
- ReportCodeKindStatistics(isolate->code_kind_statistics());
- PrintF(
- "Code comment statistics (\" [ comment-txt : size/ "
- "count (average)\"):\n");
- for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
- const CommentStatistic& cs = comments_statistics[i];
- if (cs.size > 0) {
- PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
- cs.size / cs.count);
- }
- }
- PrintF("\n");
-}
-
-
-void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
- CommentStatistic* comments_statistics =
- isolate->paged_space_comments_statistics();
- ClearCodeKindStatistics(isolate->code_kind_statistics());
- for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
- comments_statistics[i].Clear();
- }
- comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
- comments_statistics[CommentStatistic::kMaxComments].size = 0;
- comments_statistics[CommentStatistic::kMaxComments].count = 0;
-}
-
-
-// Adds comment to 'comment_statistics' table. Performance OK as long as
-// 'kMaxComments' is small
-static void EnterComment(Isolate* isolate, const char* comment, int delta) {
- CommentStatistic* comments_statistics =
- isolate->paged_space_comments_statistics();
- // Do not count empty comments
- if (delta <= 0) return;
- CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
- // Search for a free or matching entry in 'comments_statistics': 'cs'
- // points to result.
- for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
- if (comments_statistics[i].comment == NULL) {
- cs = &comments_statistics[i];
- cs->comment = comment;
- break;
- } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
- cs = &comments_statistics[i];
- break;
- }
- }
- // Update entry for 'comment'
- cs->size += delta;
- cs->count += 1;
-}
-
-
-// Call for each nested comment start (start marked with '[ xxx', end marked
-// with ']'. RelocIterator 'it' must point to a comment reloc info.
-static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
- DCHECK(!it->done());
- DCHECK(it->rinfo()->rmode() == RelocInfo::COMMENT);
- const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
- if (tmp[0] != '[') {
- // Not a nested comment; skip
- return;
- }
-
- // Search for end of nested comment or a new nested comment
- const char* const comment_txt =
- reinterpret_cast<const char*>(it->rinfo()->data());
- const byte* prev_pc = it->rinfo()->pc();
- int flat_delta = 0;
- it->next();
- while (true) {
- // All nested comments must be terminated properly, and therefore exit
- // from loop.
- DCHECK(!it->done());
- if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
- const char* const txt =
- reinterpret_cast<const char*>(it->rinfo()->data());
- flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
- if (txt[0] == ']') break; // End of nested comment
- // A new comment
- CollectCommentStatistics(isolate, it);
- // Skip code that was covered with previous comment
- prev_pc = it->rinfo()->pc();
- }
- it->next();
- }
- EnterComment(isolate, comment_txt, flat_delta);
-}
-
-
-// Collects code size statistics:
-// - by code kind
-// - by code comment
-void PagedSpace::CollectCodeStatistics() {
- Isolate* isolate = heap()->isolate();
- HeapObjectIterator obj_it(this);
- for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
- if (obj->IsAbstractCode()) {
- AbstractCode* code = AbstractCode::cast(obj);
- isolate->code_kind_statistics()[code->kind()] += code->Size();
- }
- if (obj->IsCode()) {
- // TODO(mythria): Also enable this for BytecodeArray when it supports
- // RelocInformation.
- Code* code = Code::cast(obj);
- RelocIterator it(code);
- int delta = 0;
- const byte* prev_pc = code->instruction_start();
- while (!it.done()) {
- if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
- delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
- CollectCommentStatistics(isolate, &it);
- prev_pc = it.rinfo()->pc();
- }
- it.next();
- }
-
- DCHECK(code->instruction_start() <= prev_pc &&
- prev_pc <= code->instruction_end());
- delta += static_cast<int>(code->instruction_end() - prev_pc);
- EnterComment(isolate, "NoComment", delta);
- }
- }
-}
-
-
void PagedSpace::ReportStatistics() {
int pct = static_cast<int>(Available() * 100 / Capacity());
- PrintF(" capacity: %" V8_PTR_PREFIX
- "d"
- ", waste: %" V8_PTR_PREFIX
- "d"
- ", available: %" V8_PTR_PREFIX "d, %%%d\n",
+ PrintF(" capacity: %" V8PRIdPTR ", waste: %" V8PRIdPTR
+ ", available: %" V8PRIdPTR ", %%%d\n",
Capacity(), Waste(), Available(), pct);
if (heap()->mark_compact_collector()->sweeping_in_progress()) {
@@ -2841,6 +2828,25 @@ void PagedSpace::ReportStatistics() {
void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
#endif
+Address LargePage::GetAddressToShrink() {
+ HeapObject* object = GetObject();
+ if (executable() == EXECUTABLE) {
+ return 0;
+ }
+ size_t used_size = RoundUp((object->address() - address()) + object->Size(),
+ base::OS::CommitPageSize());
+ if (used_size < CommittedPhysicalMemory()) {
+ return address() + used_size;
+ }
+ return 0;
+}
+
+void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
+ RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end());
+ RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end());
+ RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(this, free_start, area_end());
+ RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(this, free_start, area_end());
+}
// -----------------------------------------------------------------------------
// LargeObjectIterator
@@ -2862,15 +2868,13 @@ HeapObject* LargeObjectIterator::Next() {
// -----------------------------------------------------------------------------
// LargeObjectSpace
-
LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
: Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
first_page_(NULL),
size_(0),
page_count_(0),
objects_size_(0),
- chunk_map_(HashMap::PointersMatch, 1024) {}
-
+ chunk_map_(base::HashMap::PointersMatch, 1024) {}
LargeObjectSpace::~LargeObjectSpace() {}
@@ -2890,11 +2894,7 @@ void LargeObjectSpace::TearDown() {
LargePage* page = first_page_;
first_page_ = first_page_->next_page();
LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
-
- ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
- heap()->isolate()->memory_allocator()->PerformAllocationCallback(
- space, kAllocationActionFree, page->size());
- heap()->isolate()->memory_allocator()->Free(page);
+ heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
}
SetUp();
}
@@ -2908,7 +2908,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
return AllocationResult::Retry(identity());
}
- LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage(
+ LargePage* page = heap()->memory_allocator()->AllocateLargePage(
object_size, this, executable);
if (page == NULL) return AllocationResult::Retry(identity());
DCHECK(page->area_size() >= object_size);
@@ -2920,16 +2920,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
page->set_next_page(first_page_);
first_page_ = page;
- // Register all MemoryChunk::kAlignment-aligned chunks covered by
- // this large page in the chunk map.
- uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
- uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
- for (uintptr_t key = base; key <= limit; key++) {
- HashMap::Entry* entry = chunk_map_.LookupOrInsert(
- reinterpret_cast<void*>(key), static_cast<uint32_t>(key));
- DCHECK(entry != NULL);
- entry->value = page;
- }
+ InsertChunkMapEntries(page);
HeapObject* object = page->GetObject();
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size);
@@ -2944,19 +2935,20 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
heap()->incremental_marking()->OldSpaceStep(object_size);
AllocationStep(object->address(), object_size);
+
+ if (heap()->incremental_marking()->black_allocation()) {
+ Marking::MarkBlack(ObjectMarking::MarkBitFrom(object));
+ MemoryChunk::IncrementLiveBytesFromGC(object, object_size);
+ }
return object;
}
size_t LargeObjectSpace::CommittedPhysicalMemory() {
- if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
- size_t size = 0;
- LargePage* current = first_page_;
- while (current != NULL) {
- size += current->CommittedPhysicalMemory();
- current = current->next_page();
- }
- return size;
+ // On a platform that provides lazy committing of memory, we over-account
+ // the actually committed memory. There is no easy way right now to support
+ // precise accounting of committed memory in large object space.
+ return CommittedMemory();
}
@@ -2972,8 +2964,8 @@ Object* LargeObjectSpace::FindObject(Address a) {
LargePage* LargeObjectSpace::FindPage(Address a) {
uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
- HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
- static_cast<uint32_t>(key));
+ base::HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
+ static_cast<uint32_t>(key));
if (e != NULL) {
DCHECK(e->value != NULL);
LargePage* page = reinterpret_cast<LargePage*>(e->value);
@@ -2990,7 +2982,7 @@ void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
LargePage* current = first_page_;
while (current != NULL) {
HeapObject* object = current->GetObject();
- MarkBit mark_bit = Marking::MarkBitFrom(object);
+ MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
DCHECK(Marking::IsBlack(mark_bit));
Marking::BlackToWhite(mark_bit);
Page::FromAddress(object->address())->ResetProgressBar();
@@ -2999,15 +2991,51 @@ void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
}
}
+void LargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
+ // Register all MemoryChunk::kAlignment-aligned chunks covered by
+ // this large page in the chunk map.
+ uintptr_t start = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
+ uintptr_t limit = (reinterpret_cast<uintptr_t>(page) + (page->size() - 1)) /
+ MemoryChunk::kAlignment;
+ for (uintptr_t key = start; key <= limit; key++) {
+ base::HashMap::Entry* entry = chunk_map_.InsertNew(
+ reinterpret_cast<void*>(key), static_cast<uint32_t>(key));
+ DCHECK(entry != NULL);
+ entry->value = page;
+ }
+}
+
+void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
+ RemoveChunkMapEntries(page, page->address());
+}
+
+void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page,
+ Address free_start) {
+ uintptr_t start = RoundUp(reinterpret_cast<uintptr_t>(free_start),
+ MemoryChunk::kAlignment) /
+ MemoryChunk::kAlignment;
+ uintptr_t limit = (reinterpret_cast<uintptr_t>(page) + (page->size() - 1)) /
+ MemoryChunk::kAlignment;
+ for (uintptr_t key = start; key <= limit; key++) {
+ chunk_map_.Remove(reinterpret_cast<void*>(key), static_cast<uint32_t>(key));
+ }
+}
void LargeObjectSpace::FreeUnmarkedObjects() {
LargePage* previous = NULL;
LargePage* current = first_page_;
while (current != NULL) {
HeapObject* object = current->GetObject();
- MarkBit mark_bit = Marking::MarkBitFrom(object);
+ MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
DCHECK(!Marking::IsGrey(mark_bit));
if (Marking::IsBlack(mark_bit)) {
+ Address free_start;
+ if ((free_start = current->GetAddressToShrink()) != 0) {
+ // TODO(hpayer): Perform partial free concurrently.
+ current->ClearOutOfLiveRangeSlots(free_start);
+ RemoveChunkMapEntries(current, free_start);
+ heap()->memory_allocator()->PartialFreeMemory(current, free_start);
+ }
previous = current;
current = current->next_page();
} else {
@@ -3026,18 +3054,8 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
objects_size_ -= object->Size();
page_count_--;
- // Remove entries belonging to this page.
- // Use variable alignment to help pass length check (<= 80 characters)
- // of single line in tools/presubmit.py.
- const intptr_t alignment = MemoryChunk::kAlignment;
- uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment;
- uintptr_t limit = base + (page->size() - 1) / alignment;
- for (uintptr_t key = base; key <= limit; key++) {
- chunk_map_.Remove(reinterpret_cast<void*>(key),
- static_cast<uint32_t>(key));
- }
-
- heap()->QueueMemoryChunkForFree(page);
+ RemoveChunkMapEntries(page);
+ heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
}
}
}
@@ -3103,7 +3121,6 @@ void LargeObjectSpace::Verify() {
}
#endif
-
#ifdef DEBUG
void LargeObjectSpace::Print() {
OFStream os(stdout);
@@ -3115,7 +3132,7 @@ void LargeObjectSpace::Print() {
void LargeObjectSpace::ReportStatistics() {
- PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
+ PrintF(" size: %" V8PRIdPTR "\n", size_);
int num_objects = 0;
ClearHistograms(heap()->isolate());
LargeObjectIterator it(this);
@@ -3126,34 +3143,22 @@ void LargeObjectSpace::ReportStatistics() {
PrintF(
" number of objects %d, "
- "size of objects %" V8_PTR_PREFIX "d\n",
+ "size of objects %" V8PRIdPTR "\n",
num_objects, objects_size_);
if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
}
-void LargeObjectSpace::CollectCodeStatistics() {
- Isolate* isolate = heap()->isolate();
- LargeObjectIterator obj_it(this);
- for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
- if (obj->IsAbstractCode()) {
- AbstractCode* code = AbstractCode::cast(obj);
- isolate->code_kind_statistics()[code->kind()] += code->Size();
- }
- }
-}
-
-
void Page::Print() {
// Make a best-effort to print the objects in the page.
- PrintF("Page@%p in %s\n", this->address(),
+ PrintF("Page@%p in %s\n", static_cast<void*>(this->address()),
AllocationSpaceName(this->owner()->identity()));
printf(" --------------------------------------\n");
HeapObjectIterator objects(this);
unsigned mark_size = 0;
for (HeapObject* object = objects.Next(); object != NULL;
object = objects.Next()) {
- bool is_marked = Marking::IsBlackOrGrey(Marking::MarkBitFrom(object));
+ bool is_marked = Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(object));
PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
if (is_marked) {
mark_size += object->Size();
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 49a43dc83d..de5ea1b16a 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -5,13 +5,18 @@
#ifndef V8_HEAP_SPACES_H_
#define V8_HEAP_SPACES_H_
+#include <list>
+#include <memory>
+#include <unordered_set>
+
#include "src/allocation.h"
-#include "src/atomic-utils.h"
+#include "src/base/atomic-utils.h"
#include "src/base/atomicops.h"
#include "src/base/bits.h"
+#include "src/base/hashmap.h"
#include "src/base/platform/mutex.h"
#include "src/flags.h"
-#include "src/hashmap.h"
+#include "src/heap/marking.h"
#include "src/list.h"
#include "src/objects.h"
#include "src/utils.h"
@@ -25,9 +30,9 @@ class CompactionSpace;
class CompactionSpaceCollection;
class FreeList;
class Isolate;
+class LocalArrayBufferTracker;
class MemoryAllocator;
class MemoryChunk;
-class NewSpacePage;
class Page;
class PagedSpace;
class SemiSpace;
@@ -108,187 +113,6 @@ class Space;
#define DCHECK_PAGE_OFFSET(offset) \
DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
-class MarkBit {
- public:
- typedef uint32_t CellType;
-
- inline MarkBit(CellType* cell, CellType mask) : cell_(cell), mask_(mask) {}
-
-#ifdef DEBUG
- bool operator==(const MarkBit& other) {
- return cell_ == other.cell_ && mask_ == other.mask_;
- }
-#endif
-
- private:
- inline CellType* cell() { return cell_; }
- inline CellType mask() { return mask_; }
-
- inline MarkBit Next() {
- CellType new_mask = mask_ << 1;
- if (new_mask == 0) {
- return MarkBit(cell_ + 1, 1);
- } else {
- return MarkBit(cell_, new_mask);
- }
- }
-
- inline void Set() { *cell_ |= mask_; }
- inline bool Get() { return (*cell_ & mask_) != 0; }
- inline void Clear() { *cell_ &= ~mask_; }
-
- CellType* cell_;
- CellType mask_;
-
- friend class Marking;
-};
-
-
-// Bitmap is a sequence of cells each containing fixed number of bits.
-class Bitmap {
- public:
- static const uint32_t kBitsPerCell = 32;
- static const uint32_t kBitsPerCellLog2 = 5;
- static const uint32_t kBitIndexMask = kBitsPerCell - 1;
- static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
- static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
-
- static const size_t kLength = (1 << kPageSizeBits) >> (kPointerSizeLog2);
-
- static const size_t kSize =
- (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
-
-
- static int CellsForLength(int length) {
- return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
- }
-
- int CellsCount() { return CellsForLength(kLength); }
-
- static int SizeFor(int cells_count) {
- return sizeof(MarkBit::CellType) * cells_count;
- }
-
- INLINE(static uint32_t IndexToCell(uint32_t index)) {
- return index >> kBitsPerCellLog2;
- }
-
- V8_INLINE static uint32_t IndexInCell(uint32_t index) {
- return index & kBitIndexMask;
- }
-
- INLINE(static uint32_t CellToIndex(uint32_t index)) {
- return index << kBitsPerCellLog2;
- }
-
- INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
- return (index + kBitIndexMask) & ~kBitIndexMask;
- }
-
- INLINE(MarkBit::CellType* cells()) {
- return reinterpret_cast<MarkBit::CellType*>(this);
- }
-
- INLINE(Address address()) { return reinterpret_cast<Address>(this); }
-
- INLINE(static Bitmap* FromAddress(Address addr)) {
- return reinterpret_cast<Bitmap*>(addr);
- }
-
- inline MarkBit MarkBitFromIndex(uint32_t index) {
- MarkBit::CellType mask = 1u << IndexInCell(index);
- MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
- return MarkBit(cell, mask);
- }
-
- static inline void Clear(MemoryChunk* chunk);
-
- static inline void SetAllBits(MemoryChunk* chunk);
-
- static void PrintWord(uint32_t word, uint32_t himask = 0) {
- for (uint32_t mask = 1; mask != 0; mask <<= 1) {
- if ((mask & himask) != 0) PrintF("[");
- PrintF((mask & word) ? "1" : "0");
- if ((mask & himask) != 0) PrintF("]");
- }
- }
-
- class CellPrinter {
- public:
- CellPrinter() : seq_start(0), seq_type(0), seq_length(0) {}
-
- void Print(uint32_t pos, uint32_t cell) {
- if (cell == seq_type) {
- seq_length++;
- return;
- }
-
- Flush();
-
- if (IsSeq(cell)) {
- seq_start = pos;
- seq_length = 0;
- seq_type = cell;
- return;
- }
-
- PrintF("%d: ", pos);
- PrintWord(cell);
- PrintF("\n");
- }
-
- void Flush() {
- if (seq_length > 0) {
- PrintF("%d: %dx%d\n", seq_start, seq_type == 0 ? 0 : 1,
- seq_length * kBitsPerCell);
- seq_length = 0;
- }
- }
-
- static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
-
- private:
- uint32_t seq_start;
- uint32_t seq_type;
- uint32_t seq_length;
- };
-
- void Print() {
- CellPrinter printer;
- for (int i = 0; i < CellsCount(); i++) {
- printer.Print(i, cells()[i]);
- }
- printer.Flush();
- PrintF("\n");
- }
-
- bool IsClean() {
- for (int i = 0; i < CellsCount(); i++) {
- if (cells()[i] != 0) {
- return false;
- }
- }
- return true;
- }
-
- // Clears all bits starting from {cell_base_index} up to and excluding
- // {index}. Note that {cell_base_index} is required to be cell aligned.
- void ClearRange(uint32_t cell_base_index, uint32_t index) {
- DCHECK_EQ(IndexInCell(cell_base_index), 0u);
- DCHECK_GE(index, cell_base_index);
- uint32_t start_cell_index = IndexToCell(cell_base_index);
- uint32_t end_cell_index = IndexToCell(index);
- DCHECK_GE(end_cell_index, start_cell_index);
- // Clear all cells till the cell containing the last index.
- for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
- cells()[i] = 0;
- }
- // Clear all bits in the last cell till the last bit before index.
- uint32_t clear_mask = ~((1u << IndexInCell(index)) - 1);
- cells()[end_cell_index] &= clear_mask;
- }
-};
-
enum FreeListCategoryType {
kTiniest,
kTiny,
@@ -419,10 +243,13 @@ class MemoryChunk {
// to grey transition is performed in the value.
HAS_PROGRESS_BAR,
- // A black page has all mark bits set to 1 (black). A black page currently
- // cannot be iterated because it is not swept. Moreover live bytes are also
- // not updated.
- BLACK_PAGE,
+ // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
+ // from new to old space during evacuation.
+ PAGE_NEW_OLD_PROMOTION,
+
+ // |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
+ // within the new space during evacuation.
+ PAGE_NEW_NEW_PROMOTION,
// This flag is intended to be used for testing. Works only when both
// FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
@@ -437,10 +264,22 @@ class MemoryChunk {
// still has to be performed.
PRE_FREED,
+ // |POOLED|: When actually freeing this chunk, only uncommit and do not
+ // give up the reservation as we still reuse the chunk at some point.
+ POOLED,
+
// |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
// has been aborted and needs special handling by the sweeper.
COMPACTION_WAS_ABORTED,
+ // |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation
+ // on pages is sometimes aborted. The flag is used to avoid repeatedly
+ // triggering on the same page.
+ COMPACTION_WAS_ABORTED_FOR_TESTING,
+
+ // |ANCHOR|: Flag is set if page is an anchor.
+ ANCHOR,
+
// Last flag, keep at bottom.
NUM_MEMORY_CHUNK_FLAGS
};
@@ -497,6 +336,7 @@ class MemoryChunk {
static const size_t kWriteBarrierCounterOffset =
kOldToNewSlotsOffset + kPointerSize // SlotSet* old_to_new_slots_;
+ kPointerSize // SlotSet* old_to_old_slots_;
+ + kPointerSize // TypedSlotSet* typed_old_to_new_slots_;
+ kPointerSize // TypedSlotSet* typed_old_to_old_slots_;
+ kPointerSize; // SkipList* skip_list_;
@@ -510,7 +350,10 @@ class MemoryChunk {
+ kPointerSize // AtomicValue next_chunk_
+ kPointerSize // AtomicValue prev_chunk_
// FreeListCategory categories_[kNumberOfCategories]
- + FreeListCategory::kSize * kNumberOfCategories;
+ + FreeListCategory::kSize * kNumberOfCategories +
+ kPointerSize // LocalArrayBufferTracker* local_tracker_;
+ // std::unordered_set<Address>* black_area_end_marker_map_
+ + kPointerSize;
// We add some more space to the computed header size to amount for missing
// alignment requirements in our computation.
@@ -548,7 +391,7 @@ class MemoryChunk {
if (mark == nullptr) return;
// Need to subtract one from the mark because when a chunk is full the
// top points to the next address after the chunk, which effectively belongs
- // to another chunk. See the comment to Page::FromAllocationTop.
+ // to another chunk. See the comment to Page::FromTopOrLimit.
MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
intptr_t old_mark = 0;
@@ -574,7 +417,7 @@ class MemoryChunk {
return addr >= area_start() && addr <= area_end();
}
- AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() {
+ base::AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() {
return concurrent_sweeping_;
}
@@ -584,12 +427,10 @@ class MemoryChunk {
int LiveBytes() {
DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
- DCHECK(!IsFlagSet(BLACK_PAGE) || live_byte_count_ == 0);
return live_byte_count_;
}
void SetLiveBytes(int live_bytes) {
- if (IsFlagSet(BLACK_PAGE)) return;
DCHECK_GE(live_bytes, 0);
DCHECK_LE(static_cast<size_t>(live_bytes), size_);
live_byte_count_ = live_bytes;
@@ -604,6 +445,7 @@ class MemoryChunk {
}
size_t size() const { return size_; }
+ void set_size(size_t size) { size_ = size; }
inline Heap* heap() const { return heap_; }
@@ -613,16 +455,24 @@ class MemoryChunk {
inline SlotSet* old_to_new_slots() { return old_to_new_slots_; }
inline SlotSet* old_to_old_slots() { return old_to_old_slots_; }
+ inline TypedSlotSet* typed_old_to_new_slots() {
+ return typed_old_to_new_slots_;
+ }
inline TypedSlotSet* typed_old_to_old_slots() {
return typed_old_to_old_slots_;
}
+ inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
void AllocateOldToNewSlots();
void ReleaseOldToNewSlots();
void AllocateOldToOldSlots();
void ReleaseOldToOldSlots();
+ void AllocateTypedOldToNewSlots();
+ void ReleaseTypedOldToNewSlots();
void AllocateTypedOldToOldSlots();
void ReleaseTypedOldToOldSlots();
+ void AllocateLocalTracker();
+ void ReleaseLocalTracker();
Address area_start() { return area_start_; }
Address area_end() { return area_end_; }
@@ -631,7 +481,9 @@ class MemoryChunk {
bool CommitArea(size_t requested);
// Approximate amount of physical memory committed for this chunk.
- size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); }
+ size_t CommittedPhysicalMemory();
+
+ Address HighWaterMark() { return address() + high_water_mark_.Value(); }
int progress_bar() {
DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
@@ -662,6 +514,8 @@ class MemoryChunk {
return this->address() + (index << kPointerSizeLog2);
}
+ void ClearLiveness();
+
void PrintMarkbits() { markbits()->Print(); }
void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; }
@@ -695,7 +549,8 @@ class MemoryChunk {
}
bool ShouldSkipEvacuationSlotRecording() {
- return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
+ return ((flags_ & kSkipEvacuationSlotsRecordingMask) != 0) &&
+ !IsFlagSet(COMPACTION_WAS_ABORTED);
}
Executability executable() {
@@ -740,6 +595,33 @@ class MemoryChunk {
void InsertAfter(MemoryChunk* other);
void Unlink();
+ void ReleaseBlackAreaEndMarkerMap() {
+ if (black_area_end_marker_map_) {
+ delete black_area_end_marker_map_;
+ black_area_end_marker_map_ = nullptr;
+ }
+ }
+
+ bool IsBlackAreaEndMarker(Address address) {
+ if (black_area_end_marker_map_) {
+ return black_area_end_marker_map_->find(address) !=
+ black_area_end_marker_map_->end();
+ }
+ return false;
+ }
+
+ void AddBlackAreaEndMarker(Address address) {
+ if (!black_area_end_marker_map_) {
+ black_area_end_marker_map_ = new std::unordered_set<Address>();
+ }
+ auto ret = black_area_end_marker_map_->insert(address);
+ USE(ret);
+ // Check that we inserted a new black area end marker.
+ DCHECK(ret.second);
+ }
+
+ bool HasBlackAreas() { return black_area_end_marker_map_ != nullptr; }
+
protected:
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
@@ -780,6 +662,7 @@ class MemoryChunk {
// is ceil(size() / kPageSize).
SlotSet* old_to_new_slots_;
SlotSet* old_to_old_slots_;
+ TypedSlotSet* typed_old_to_new_slots_;
TypedSlotSet* typed_old_to_old_slots_;
SkipList* skip_list_;
@@ -788,23 +671,28 @@ class MemoryChunk {
// Assuming the initial allocation on a page is sequential,
// count highest number of bytes ever allocated on the page.
- AtomicValue<intptr_t> high_water_mark_;
+ base::AtomicValue<intptr_t> high_water_mark_;
base::Mutex* mutex_;
- AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
+ base::AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
// PagedSpace free-list statistics.
- AtomicNumber<intptr_t> available_in_free_list_;
- AtomicNumber<intptr_t> wasted_memory_;
+ base::AtomicNumber<intptr_t> available_in_free_list_;
+ base::AtomicNumber<intptr_t> wasted_memory_;
// next_chunk_ holds a pointer of type MemoryChunk
- AtomicValue<MemoryChunk*> next_chunk_;
+ base::AtomicValue<MemoryChunk*> next_chunk_;
// prev_chunk_ holds a pointer of type MemoryChunk
- AtomicValue<MemoryChunk*> prev_chunk_;
+ base::AtomicValue<MemoryChunk*> prev_chunk_;
FreeListCategory categories_[kNumberOfCategories];
+ LocalArrayBufferTracker* local_tracker_;
+
+ // Stores the end addresses of black areas.
+ std::unordered_set<Address>* black_area_end_marker_map_;
+
private:
void InitializeReservedMemory() { reservation_.Reset(); }
@@ -817,48 +705,81 @@ class MemoryChunk {
//
// The only way to get a page pointer is by calling factory methods:
// Page* p = Page::FromAddress(addr); or
-// Page* p = Page::FromAllocationTop(top);
+// Page* p = Page::FromTopOrLimit(top);
class Page : public MemoryChunk {
public:
+ static const intptr_t kCopyAllFlags = ~0;
+
+ // Page flags copied from from-space to to-space when flipping semispaces.
+ static const intptr_t kCopyOnFlipFlagsMask =
+ (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
+ (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+
+ // Maximum object size that gets allocated into regular pages. Objects larger
+ // than that size are allocated in large object space and are never moved in
+ // memory. This also applies to new space allocation, since objects are never
+ // migrated from new space to large object space. Takes double alignment into
+ // account.
+ // TODO(hpayer): This limit should be way smaller but we currently have
+ // short living objects >256K.
+ static const int kMaxRegularHeapObjectSize = 600 * KB;
+
+ static inline Page* ConvertNewToOld(Page* old_page, PagedSpace* new_owner);
+
// Returns the page containing a given address. The address ranges
- // from [page_addr .. page_addr + kPageSize[
- // This only works if the object is in fact in a page. See also MemoryChunk::
- // FromAddress() and FromAnyAddress().
- INLINE(static Page* FromAddress(Address a)) {
- return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
+ // from [page_addr .. page_addr + kPageSize[. This only works if the object
+ // is in fact in a page.
+ static Page* FromAddress(Address addr) {
+ return reinterpret_cast<Page*>(OffsetFrom(addr) & ~kPageAlignmentMask);
}
- // Only works for addresses in pointer spaces, not code space.
- inline static Page* FromAnyPointerAddress(Heap* heap, Address addr);
+ // Returns the page containing the address provided. The address can
+ // potentially point righter after the page. To be also safe for tagged values
+ // we subtract a hole word. The valid address ranges from
+ // [page_addr + kObjectStartOffset .. page_addr + kPageSize + kPointerSize].
+ static Page* FromAllocationAreaAddress(Address address) {
+ return Page::FromAddress(address - kPointerSize);
+ }
- // Returns the page containing an allocation top. Because an allocation
- // top address can be the upper bound of the page, we need to subtract
- // it with kPointerSize first. The address ranges from
- // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
- INLINE(static Page* FromAllocationTop(Address top)) {
- Page* p = FromAddress(top - kPointerSize);
- return p;
+ // Checks if address1 and address2 are on the same new space page.
+ static bool OnSamePage(Address address1, Address address2) {
+ return Page::FromAddress(address1) == Page::FromAddress(address2);
}
- // Returns the next page in the chain of pages owned by a space.
- inline Page* next_page() {
- DCHECK(next_chunk()->owner() == owner());
- return static_cast<Page*>(next_chunk());
+ // Checks whether an address is page aligned.
+ static bool IsAlignedToPageSize(Address addr) {
+ return (OffsetFrom(addr) & kPageAlignmentMask) == 0;
}
- inline Page* prev_page() {
- DCHECK(prev_chunk()->owner() == owner());
- return static_cast<Page*>(prev_chunk());
+
+ static bool IsAtObjectStart(Address addr) {
+ return (reinterpret_cast<intptr_t>(addr) & kPageAlignmentMask) ==
+ kObjectStartOffset;
}
- inline void set_next_page(Page* page);
- inline void set_prev_page(Page* page);
- // Checks whether an address is page aligned.
- static bool IsAlignedToPageSize(Address a) {
- return 0 == (OffsetFrom(a) & kPageAlignmentMask);
+ inline static Page* FromAnyPointerAddress(Heap* heap, Address addr);
+
+ // Create a Page object that is only used as anchor for the doubly-linked
+ // list of real pages.
+ explicit Page(Space* owner) { InitializeAsAnchor(owner); }
+
+ inline void MarkNeverAllocateForTesting();
+ inline void MarkEvacuationCandidate();
+ inline void ClearEvacuationCandidate();
+
+ Page* next_page() { return static_cast<Page*>(next_chunk()); }
+ Page* prev_page() { return static_cast<Page*>(prev_chunk()); }
+ void set_next_page(Page* page) { set_next_chunk(page); }
+ void set_prev_page(Page* page) { set_prev_chunk(page); }
+
+ template <typename Callback>
+ inline void ForAllFreeListCategories(Callback callback) {
+ for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ callback(&categories_[i]);
+ }
}
// Returns the offset of a given address to this page.
- INLINE(int Offset(Address a)) {
+ inline int Offset(Address a) {
int offset = static_cast<int>(a - address());
return offset;
}
@@ -869,24 +790,6 @@ class Page : public MemoryChunk {
return address() + offset;
}
- // ---------------------------------------------------------------------
-
- // Maximum object size that gets allocated into regular pages. Objects larger
- // than that size are allocated in large object space and are never moved in
- // memory. This also applies to new space allocation, since objects are never
- // migrated from new space to large object space. Takes double alignment into
- // account.
- // TODO(hpayer): This limit should be way smaller but we currently have
- // short living objects >256K.
- static const int kMaxRegularHeapObjectSize = 600 * KB;
-
- inline void ClearGCFields();
-
- static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
- Executability executable, PagedSpace* owner);
-
- void InitializeAsAnchor(PagedSpace* owner);
-
// WaitUntilSweepingCompleted only works when concurrent sweeping is in
// progress. In particular, when we know that right before this call a
// sweeper thread was sweeping this page.
@@ -907,42 +810,39 @@ class Page : public MemoryChunk {
available_in_free_list());
}
- template <typename Callback>
- inline void ForAllFreeListCategories(Callback callback) {
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- callback(&categories_[i]);
- }
- }
-
FreeListCategory* free_list_category(FreeListCategoryType type) {
return &categories_[type];
}
-#define FRAGMENTATION_STATS_ACCESSORS(type, name) \
- type name() { return name##_.Value(); } \
- void set_##name(type name) { name##_.SetValue(name); } \
- void add_##name(type name) { name##_.Increment(name); }
-
- FRAGMENTATION_STATS_ACCESSORS(intptr_t, wasted_memory)
- FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_free_list)
+ bool is_anchor() { return IsFlagSet(Page::ANCHOR); }
-#undef FRAGMENTATION_STATS_ACCESSORS
+ intptr_t wasted_memory() { return wasted_memory_.Value(); }
+ void add_wasted_memory(intptr_t waste) { wasted_memory_.Increment(waste); }
+ intptr_t available_in_free_list() { return available_in_free_list_.Value(); }
+ void add_available_in_free_list(intptr_t available) {
+ available_in_free_list_.Increment(available);
+ }
#ifdef DEBUG
void Print();
#endif // DEBUG
- inline void MarkNeverAllocateForTesting();
- inline void MarkEvacuationCandidate();
- inline void ClearEvacuationCandidate();
-
private:
+ enum InitializationMode { kFreeMemory, kDoNotFreeMemory };
+
+ template <InitializationMode mode = kFreeMemory>
+ static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
+ Executability executable, PagedSpace* owner);
+ static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
+ Executability executable, SemiSpace* owner);
+
inline void InitializeFreeListCategories();
+ void InitializeAsAnchor(Space* owner);
+
friend class MemoryAllocator;
};
-
class LargePage : public MemoryChunk {
public:
HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
@@ -953,6 +853,12 @@ class LargePage : public MemoryChunk {
inline void set_next_page(LargePage* page) { set_next_chunk(page); }
+ // Uncommit memory that is not in use anymore by the object. If the object
+ // cannot be shrunk 0 is returned.
+ Address GetAddressToShrink();
+
+ void ClearOutOfLiveRangeSlots(Address free_start);
+
// A limit to guarantee that we do not overflow typed slot offset in
// the old to old remembered set.
// Note that this limit is higher than what assembler already imposes on
@@ -960,7 +866,8 @@ class LargePage : public MemoryChunk {
static const int kMaxCodePageSize = 512 * MB;
private:
- static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
+ static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
+ Executability executable, Space* owner);
friend class MemoryAllocator;
};
@@ -1036,11 +943,6 @@ class Space : public Malloced {
}
}
-#ifdef DEBUG
- virtual void Print() = 0;
-#endif
-
- protected:
void AccountCommitted(intptr_t bytes) {
DCHECK_GE(bytes, 0);
committed_ += bytes;
@@ -1055,7 +957,12 @@ class Space : public Malloced {
DCHECK_GE(committed_, 0);
}
- v8::base::SmartPointer<List<AllocationObserver*>> allocation_observers_;
+#ifdef DEBUG
+ virtual void Print() = 0;
+#endif
+
+ protected:
+ std::unique_ptr<List<AllocationObserver*>> allocation_observers_;
bool allocation_observers_paused_;
private:
@@ -1066,6 +973,8 @@ class Space : public Malloced {
// Keeps track of committed memory in a space.
intptr_t committed_;
intptr_t max_committed_;
+
+ DISALLOW_COPY_AND_ASSIGN(Space);
};
@@ -1129,15 +1038,6 @@ class CodeRange {
void FreeRawMemory(Address buf, size_t length);
private:
- // Frees the range of virtual memory, and frees the data structures used to
- // manage it.
- void TearDown();
-
- Isolate* isolate_;
-
- // The reserved range of virtual memory that all code objects are put in.
- base::VirtualMemory* code_range_;
- // Plain old data class, just a struct plus a constructor.
class FreeBlock {
public:
FreeBlock() : start(0), size(0) {}
@@ -1156,6 +1056,26 @@ class CodeRange {
size_t size;
};
+ // Frees the range of virtual memory, and frees the data structures used to
+ // manage it.
+ void TearDown();
+
+ // Finds a block on the allocation list that contains at least the
+ // requested amount of memory. If none is found, sorts and merges
+ // the existing free memory blocks, and searches again.
+ // If none can be found, returns false.
+ bool GetNextAllocationBlock(size_t requested);
+ // Compares the start addresses of two free blocks.
+ static int CompareFreeBlockAddress(const FreeBlock* left,
+ const FreeBlock* right);
+ bool ReserveBlock(const size_t requested_size, FreeBlock* block);
+ void ReleaseBlock(const FreeBlock* block);
+
+ Isolate* isolate_;
+
+ // The reserved range of virtual memory that all code objects are put in.
+ base::VirtualMemory* code_range_;
+
// The global mutex guards free_list_ and allocation_list_ as GC threads may
// access both lists concurrently to the main thread.
base::Mutex code_range_mutex_;
@@ -1170,17 +1090,6 @@ class CodeRange {
List<FreeBlock> allocation_list_;
int current_allocation_block_index_;
- // Finds a block on the allocation list that contains at least the
- // requested amount of memory. If none is found, sorts and merges
- // the existing free memory blocks, and searches again.
- // If none can be found, returns false.
- bool GetNextAllocationBlock(size_t requested);
- // Compares the start addresses of two free blocks.
- static int CompareFreeBlockAddress(const FreeBlock* left,
- const FreeBlock* right);
- bool ReserveBlock(const size_t requested_size, FreeBlock* block);
- void ReleaseBlock(const FreeBlock* block);
-
DISALLOW_COPY_AND_ASSIGN(CodeRange);
};
@@ -1242,47 +1151,127 @@ class SkipList {
// A space acquires chunks of memory from the operating system. The memory
// allocator allocated and deallocates pages for the paged heap spaces and large
// pages for large object space.
-//
-// Each space has to manage it's own pages.
-//
class MemoryAllocator {
public:
+ // Unmapper takes care of concurrently unmapping and uncommitting memory
+ // chunks.
+ class Unmapper {
+ public:
+ class UnmapFreeMemoryTask;
+
+ explicit Unmapper(MemoryAllocator* allocator)
+ : allocator_(allocator),
+ pending_unmapping_tasks_semaphore_(0),
+ concurrent_unmapping_tasks_active_(0) {}
+
+ void AddMemoryChunkSafe(MemoryChunk* chunk) {
+ if ((chunk->size() == Page::kPageSize) &&
+ (chunk->executable() != EXECUTABLE)) {
+ AddMemoryChunkSafe<kRegular>(chunk);
+ } else {
+ AddMemoryChunkSafe<kNonRegular>(chunk);
+ }
+ }
+
+ MemoryChunk* TryGetPooledMemoryChunkSafe() {
+ // Procedure:
+ // (1) Try to get a chunk that was declared as pooled and already has
+ // been uncommitted.
+ // (2) Try to steal any memory chunk of kPageSize that would've been
+ // unmapped.
+ MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
+ if (chunk == nullptr) {
+ chunk = GetMemoryChunkSafe<kRegular>();
+ if (chunk != nullptr) {
+ // For stolen chunks we need to manually free any allocated memory.
+ chunk->ReleaseAllocatedMemory();
+ }
+ }
+ return chunk;
+ }
+
+ void FreeQueuedChunks();
+ bool WaitUntilCompleted();
+
+ private:
+ enum ChunkQueueType {
+ kRegular, // Pages of kPageSize that do not live in a CodeRange and
+ // can thus be used for stealing.
+ kNonRegular, // Large chunks and executable chunks.
+ kPooled, // Pooled chunks, already uncommited and ready for reuse.
+ kNumberOfChunkQueues,
+ };
+
+ template <ChunkQueueType type>
+ void AddMemoryChunkSafe(MemoryChunk* chunk) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ if (type != kRegular || allocator_->CanFreeMemoryChunk(chunk)) {
+ chunks_[type].push_back(chunk);
+ } else {
+ DCHECK_EQ(type, kRegular);
+ delayed_regular_chunks_.push_back(chunk);
+ }
+ }
+
+ template <ChunkQueueType type>
+ MemoryChunk* GetMemoryChunkSafe() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ if (chunks_[type].empty()) return nullptr;
+ MemoryChunk* chunk = chunks_[type].front();
+ chunks_[type].pop_front();
+ return chunk;
+ }
+
+ void ReconsiderDelayedChunks();
+ void PerformFreeMemoryOnQueuedChunks();
+
+ base::Mutex mutex_;
+ MemoryAllocator* allocator_;
+ std::list<MemoryChunk*> chunks_[kNumberOfChunkQueues];
+ // Delayed chunks cannot be processed in the current unmapping cycle because
+ // of dependencies such as an active sweeper.
+ // See MemoryAllocator::CanFreeMemoryChunk.
+ std::list<MemoryChunk*> delayed_regular_chunks_;
+ base::Semaphore pending_unmapping_tasks_semaphore_;
+ intptr_t concurrent_unmapping_tasks_active_;
+
+ friend class MemoryAllocator;
+ };
+
enum AllocationMode {
kRegular,
kPooled,
};
+ enum FreeMode {
+ kFull,
+ kPreFreeAndQueue,
+ kPooledAndQueue,
+ };
explicit MemoryAllocator(Isolate* isolate);
// Initializes its internal bookkeeping structures.
// Max capacity of the total space and executable memory limit.
- bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
+ bool SetUp(intptr_t max_capacity, intptr_t capacity_executable,
+ intptr_t code_range_size);
void TearDown();
- // Allocates either Page or NewSpacePage from the allocator. AllocationMode
- // is used to indicate whether pooled allocation, which only works for
- // MemoryChunk::kPageSize, should be tried first.
- template <typename PageType, MemoryAllocator::AllocationMode mode = kRegular,
+ // Allocates a Page from the allocator. AllocationMode is used to indicate
+ // whether pooled allocation, which only works for MemoryChunk::kPageSize,
+ // should be tried first.
+ template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
typename SpaceType>
- PageType* AllocatePage(intptr_t size, SpaceType* owner,
- Executability executable);
+ Page* AllocatePage(intptr_t size, SpaceType* owner, Executability executable);
- LargePage* AllocateLargePage(intptr_t object_size, Space* owner,
+ LargePage* AllocateLargePage(intptr_t size, LargeObjectSpace* owner,
Executability executable);
- // PreFree logically frees the object, i.e., it takes care of the size
- // bookkeeping and calls the allocation callback.
- void PreFreeMemory(MemoryChunk* chunk);
-
- // FreeMemory can be called concurrently when PreFree was executed before.
- void PerformFreeMemory(MemoryChunk* chunk);
-
- // Free is a wrapper method. For kRegular AllocationMode it calls PreFree and
- // PerformFreeMemory together. For kPooled it will dispatch to pooled free.
- template <MemoryAllocator::AllocationMode mode = kRegular>
+ template <MemoryAllocator::FreeMode mode = kFull>
void Free(MemoryChunk* chunk);
+ bool CanFreeMemoryChunk(MemoryChunk* chunk);
+
// Returns allocated spaces in bytes.
intptr_t Size() { return size_.Value(); }
@@ -1335,6 +1324,7 @@ class MemoryAllocator {
bool CommitMemory(Address addr, size_t size, Executability executable);
void FreeMemory(base::VirtualMemory* reservation, Executability executable);
+ void PartialFreeMemory(MemoryChunk* chunk, Address start_free);
void FreeMemory(Address addr, size_t size, Executability executable);
// Commit a contiguous block of memory from the initial chunk. Assumes that
@@ -1353,16 +1343,6 @@ class MemoryAllocator {
// filling it up with a recognizable non-NULL bit pattern.
void ZapBlock(Address start, size_t size);
- void PerformAllocationCallback(ObjectSpace space, AllocationAction action,
- size_t size);
-
- void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
- ObjectSpace space, AllocationAction action);
-
- void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
-
- bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
-
static int CodePageGuardStartOffset();
static int CodePageGuardSize();
@@ -1385,47 +1365,43 @@ class MemoryAllocator {
Address start, size_t commit_size,
size_t reserved_size);
+ CodeRange* code_range() { return code_range_; }
+ Unmapper* unmapper() { return &unmapper_; }
+
private:
+ // PreFree logically frees the object, i.e., it takes care of the size
+ // bookkeeping and calls the allocation callback.
+ void PreFreeMemory(MemoryChunk* chunk);
+
+ // FreeMemory can be called concurrently when PreFree was executed before.
+ void PerformFreeMemory(MemoryChunk* chunk);
+
// See AllocatePage for public interface. Note that currently we only support
// pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
template <typename SpaceType>
MemoryChunk* AllocatePagePooled(SpaceType* owner);
- // Free that chunk into the pool.
- void FreePooled(MemoryChunk* chunk);
-
Isolate* isolate_;
+ CodeRange* code_range_;
+
// Maximum space size in bytes.
intptr_t capacity_;
// Maximum subset of capacity_ that can be executable
intptr_t capacity_executable_;
// Allocated space size in bytes.
- AtomicNumber<intptr_t> size_;
+ base::AtomicNumber<intptr_t> size_;
// Allocated executable space size in bytes.
- AtomicNumber<intptr_t> size_executable_;
+ base::AtomicNumber<intptr_t> size_executable_;
// We keep the lowest and highest addresses allocated as a quick way
// of determining that pointers are outside the heap. The estimate is
// conservative, i.e. not all addrsses in 'allocated' space are allocated
// to our heap. The range is [lowest, highest[, inclusive on the low end
// and exclusive on the high end.
- AtomicValue<void*> lowest_ever_allocated_;
- AtomicValue<void*> highest_ever_allocated_;
-
- struct MemoryAllocationCallbackRegistration {
- MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
- ObjectSpace space,
- AllocationAction action)
- : callback(callback), space(space), action(action) {}
- MemoryAllocationCallback callback;
- ObjectSpace space;
- AllocationAction action;
- };
-
- // A List of callback that are triggered when memory is allocated or free'd
- List<MemoryAllocationCallbackRegistration> memory_allocation_callbacks_;
+ base::AtomicValue<void*> lowest_ever_allocated_;
+ base::AtomicValue<void*> highest_ever_allocated_;
// Initializes pages in a chunk. Returns the first page address.
// This function and GetChunkId() are provided for the mark-compact
@@ -1447,7 +1423,10 @@ class MemoryAllocator {
} while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
}
- List<MemoryChunk*> chunk_pool_;
+ base::VirtualMemory last_chunk_;
+ Unmapper unmapper_;
+
+ friend class TestCodeRangeScope;
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
};
@@ -1464,10 +1443,44 @@ class MemoryAllocator {
class ObjectIterator : public Malloced {
public:
virtual ~ObjectIterator() {}
+ virtual HeapObject* Next() = 0;
+};
- virtual HeapObject* next_object() = 0;
+template <class PAGE_TYPE>
+class PageIteratorImpl
+ : public std::iterator<std::forward_iterator_tag, PAGE_TYPE> {
+ public:
+ explicit PageIteratorImpl(PAGE_TYPE* p) : p_(p) {}
+ PageIteratorImpl(const PageIteratorImpl<PAGE_TYPE>& other) : p_(other.p_) {}
+ PAGE_TYPE* operator*() { return p_; }
+ bool operator==(const PageIteratorImpl<PAGE_TYPE>& rhs) {
+ return rhs.p_ == p_;
+ }
+ bool operator!=(const PageIteratorImpl<PAGE_TYPE>& rhs) {
+ return rhs.p_ != p_;
+ }
+ inline PageIteratorImpl<PAGE_TYPE>& operator++();
+ inline PageIteratorImpl<PAGE_TYPE> operator++(int);
+
+ private:
+ PAGE_TYPE* p_;
};
+typedef PageIteratorImpl<Page> PageIterator;
+typedef PageIteratorImpl<LargePage> LargePageIterator;
+
+class PageRange {
+ public:
+ typedef PageIterator iterator;
+ PageRange(Page* begin, Page* end) : begin_(begin), end_(end) {}
+ explicit PageRange(Page* page) : PageRange(page, page->next_page()) {}
+ iterator begin() { return iterator(begin_); }
+ iterator end() { return iterator(end_); }
+
+ private:
+ Page* begin_;
+ Page* end_;
+};
// -----------------------------------------------------------------------------
// Heap object iterator in new/old/map spaces.
@@ -1486,18 +1499,10 @@ class HeapObjectIterator : public ObjectIterator {
// Advance to the next object, skipping free spaces and other fillers and
// skipping the special garbage section of which there is one per space.
- // Returns NULL when the iteration has ended.
- inline HeapObject* Next();
- inline HeapObject* next_object() override;
+ // Returns nullptr when the iteration has ended.
+ inline HeapObject* Next() override;
private:
- enum PageMode { kOnePageOnly, kAllPagesInSpace };
-
- Address cur_addr_; // Current iteration point.
- Address cur_end_; // End iteration point.
- PagedSpace* space_;
- PageMode page_mode_;
-
// Fast (inlined) path of next().
inline HeapObject* FromCurrentPage();
@@ -1505,28 +1510,11 @@ class HeapObjectIterator : public ObjectIterator {
// iteration has ended.
bool AdvanceToNextPage();
- // Initializes fields.
- inline void Initialize(PagedSpace* owner, Address start, Address end,
- PageMode mode);
-};
-
-
-// -----------------------------------------------------------------------------
-// A PageIterator iterates the pages in a paged space.
-
-class PageIterator BASE_EMBEDDED {
- public:
- explicit inline PageIterator(PagedSpace* space);
-
- inline bool has_next();
- inline Page* next();
-
- private:
+ Address cur_addr_; // Current iteration point.
+ Address cur_end_; // End iteration point.
PagedSpace* space_;
- Page* prev_page_; // Previous page returned.
- // Next page that will be returned. Cached here so that we can use this
- // iterator for operations that deallocate pages.
- Page* next_page_;
+ PageRange page_range_;
+ PageRange::iterator current_page_;
};
@@ -1538,14 +1526,22 @@ class PageIterator BASE_EMBEDDED {
// space.
class AllocationInfo {
public:
- AllocationInfo() : top_(nullptr), limit_(nullptr) {}
- AllocationInfo(Address top, Address limit) : top_(top), limit_(limit) {}
+ AllocationInfo() : original_top_(nullptr), top_(nullptr), limit_(nullptr) {}
+ AllocationInfo(Address top, Address limit)
+ : original_top_(top), top_(top), limit_(limit) {}
void Reset(Address top, Address limit) {
+ original_top_ = top;
set_top(top);
set_limit(limit);
}
+ Address original_top() {
+ SLOW_DCHECK(top_ == NULL ||
+ (reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0);
+ return original_top_;
+ }
+
INLINE(void set_top(Address top)) {
SLOW_DCHECK(top == NULL ||
(reinterpret_cast<intptr_t>(top) & kHeapObjectTagMask) == 0);
@@ -1572,12 +1568,15 @@ class AllocationInfo {
#ifdef DEBUG
bool VerifyPagedAllocation() {
- return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_)) &&
+ return (Page::FromAllocationAreaAddress(top_) ==
+ Page::FromAllocationAreaAddress(limit_)) &&
(top_ <= limit_);
}
#endif
private:
+ // The original top address when the allocation info was initialized.
+ Address original_top_;
// Current allocation top.
Address top_;
// Current allocation limit.
@@ -1605,74 +1604,75 @@ class AllocationStats BASE_EMBEDDED {
void ClearSize() { size_ = capacity_; }
// Accessors for the allocation statistics.
- intptr_t Capacity() { return capacity_; }
- intptr_t MaxCapacity() { return max_capacity_; }
- intptr_t Size() {
- CHECK_GE(size_, 0);
- return size_;
- }
+ size_t Capacity() { return capacity_; }
+ size_t MaxCapacity() { return max_capacity_; }
+ size_t Size() { return size_; }
// Grow the space by adding available bytes. They are initially marked as
// being in use (part of the size), but will normally be immediately freed,
// putting them on the free list and removing them from size_.
- void ExpandSpace(int size_in_bytes) {
- capacity_ += size_in_bytes;
- size_ += size_in_bytes;
+ void ExpandSpace(size_t bytes) {
+ DCHECK_GE(size_ + bytes, size_);
+ DCHECK_GE(capacity_ + bytes, capacity_);
+ capacity_ += bytes;
+ size_ += bytes;
if (capacity_ > max_capacity_) {
max_capacity_ = capacity_;
}
- CHECK(size_ >= 0);
}
// Shrink the space by removing available bytes. Since shrinking is done
// during sweeping, bytes have been marked as being in use (part of the size)
// and are hereby freed.
- void ShrinkSpace(int size_in_bytes) {
- capacity_ -= size_in_bytes;
- size_ -= size_in_bytes;
- CHECK_GE(size_, 0);
+ void ShrinkSpace(size_t bytes) {
+ DCHECK_GE(capacity_, bytes);
+ DCHECK_GE(size_, bytes);
+ capacity_ -= bytes;
+ size_ -= bytes;
+ }
+
+ void AllocateBytes(size_t bytes) {
+ DCHECK_GE(size_ + bytes, size_);
+ size_ += bytes;
}
- // Allocate from available bytes (available -> size).
- void AllocateBytes(intptr_t size_in_bytes) {
- size_ += size_in_bytes;
- CHECK_GE(size_, 0);
+ void DeallocateBytes(size_t bytes) {
+ DCHECK_GE(size_, bytes);
+ size_ -= bytes;
}
- // Free allocated bytes, making them available (size -> available).
- void DeallocateBytes(intptr_t size_in_bytes) {
- size_ -= size_in_bytes;
- CHECK_GE(size_, 0);
+ void DecreaseCapacity(size_t bytes) {
+ DCHECK_GE(capacity_, bytes);
+ DCHECK_GE(capacity_ - bytes, size_);
+ capacity_ -= bytes;
}
- // Merge {other} into {this}.
+ void IncreaseCapacity(size_t bytes) {
+ DCHECK_GE(capacity_ + bytes, capacity_);
+ capacity_ += bytes;
+ }
+
+ // Merge |other| into |this|.
void Merge(const AllocationStats& other) {
+ DCHECK_GE(capacity_ + other.capacity_, capacity_);
+ DCHECK_GE(size_ + other.size_, size_);
capacity_ += other.capacity_;
size_ += other.size_;
if (other.max_capacity_ > max_capacity_) {
max_capacity_ = other.max_capacity_;
}
- CHECK_GE(size_, 0);
}
- void DecreaseCapacity(intptr_t size_in_bytes) {
- capacity_ -= size_in_bytes;
- CHECK_GE(capacity_, 0);
- CHECK_GE(capacity_, size_);
- }
-
- void IncreaseCapacity(intptr_t size_in_bytes) { capacity_ += size_in_bytes; }
-
private:
// |capacity_|: The number of object-area bytes (i.e., not including page
// bookkeeping structures) currently in the space.
- intptr_t capacity_;
+ size_t capacity_;
// |max_capacity_|: The maximum capacity ever observed.
- intptr_t max_capacity_;
+ size_t max_capacity_;
// |size_|: The number of allocated bytes.
- intptr_t size_;
+ size_t size_;
};
// A free list maintaining free blocks of memory. The free list is organized in
@@ -1872,7 +1872,7 @@ class FreeList {
FreeListCategory* top(FreeListCategoryType type) { return categories_[type]; }
PagedSpace* owner_;
- AtomicNumber<intptr_t> wasted_bytes_;
+ base::AtomicNumber<intptr_t> wasted_bytes_;
FreeListCategory* categories_[kNumberOfCategories];
friend class FreeListCategory;
@@ -1977,8 +1977,21 @@ class LocalAllocationBuffer {
AllocationInfo allocation_info_;
};
+class NewSpacePageRange {
+ public:
+ typedef PageRange::iterator iterator;
+ inline NewSpacePageRange(Address start, Address limit);
+ iterator begin() { return range_.begin(); }
+ iterator end() { return range_.end(); }
+
+ private:
+ PageRange range_;
+};
+
class PagedSpace : public Space {
public:
+ typedef PageIterator iterator;
+
static const intptr_t kCompactionMemoryWanted = 500 * KB;
// Creates a space with an id.
@@ -2111,18 +2124,16 @@ class PagedSpace : public Space {
allocation_info_.Reset(top, limit);
}
+ void SetAllocationInfo(Address top, Address limit);
+
// Empty space allocation info, returning unused area to free list.
- void EmptyAllocationInfo() {
- // Mark the old linear allocation area with a free space map so it can be
- // skipped when scanning the heap.
- int old_linear_size = static_cast<int>(limit() - top());
- Free(top(), old_linear_size);
- SetTopAndLimit(NULL, NULL);
- }
+ void EmptyAllocationInfo();
+
+ void MarkAllocationInfoBlack();
void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); }
- void IncreaseCapacity(int size);
+ void IncreaseCapacity(size_t bytes);
// Releases an unused page and shrinks the space.
void ReleasePage(Page* page);
@@ -2130,6 +2141,7 @@ class PagedSpace : public Space {
// The dummy page that anchors the linked list of pages.
Page* anchor() { return &anchor_; }
+
#ifdef VERIFY_HEAP
// Verify integrity of this space.
virtual void Verify(ObjectVisitor* visitor);
@@ -2147,17 +2159,10 @@ class PagedSpace : public Space {
void ReportStatistics();
// Report code object related statistics
- void CollectCodeStatistics();
static void ReportCodeStatistics(Isolate* isolate);
static void ResetCodeStatistics(Isolate* isolate);
#endif
- // This function tries to steal size_in_bytes memory from the sweeper threads
- // free-lists. If it does not succeed stealing enough memory, it will wait
- // for the sweeper threads to finish sweeping.
- // It returns true when sweeping is completed and false otherwise.
- bool EnsureSweeperProgress(intptr_t size_in_bytes);
-
Page* FirstPage() { return anchor_.next_page(); }
Page* LastPage() { return anchor_.prev_page(); }
@@ -2188,6 +2193,9 @@ class PagedSpace : public Space {
inline void UnlinkFreeListCategories(Page* page);
inline intptr_t RelinkFreeListCategories(Page* page);
+ iterator begin() { return iterator(anchor_.next_page()); }
+ iterator end() { return iterator(&anchor_); }
+
protected:
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
@@ -2242,7 +2250,6 @@ class PagedSpace : public Space {
friend class IncrementalMarking;
friend class MarkCompactCollector;
- friend class PageIterator;
// Used in cctest.
friend class HeapTester;
@@ -2283,83 +2290,8 @@ class HistogramInfo : public NumberAndSizeInfo {
const char* name_;
};
-
enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
-
-class NewSpacePage : public MemoryChunk {
- public:
- static inline NewSpacePage* Initialize(Heap* heap, MemoryChunk* chunk,
- Executability executable,
- SemiSpace* owner);
-
- static bool IsAtStart(Address addr) {
- return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) ==
- kObjectStartOffset;
- }
-
- static bool IsAtEnd(Address addr) {
- return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
- }
-
- // Finds the NewSpacePage containing the given address.
- static inline NewSpacePage* FromAddress(Address address_in_page) {
- Address page_start =
- reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
- ~Page::kPageAlignmentMask);
- NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
- return page;
- }
-
- // Find the page for a limit address. A limit address is either an address
- // inside a page, or the address right after the last byte of a page.
- static inline NewSpacePage* FromLimit(Address address_limit) {
- return NewSpacePage::FromAddress(address_limit - 1);
- }
-
- // Checks if address1 and address2 are on the same new space page.
- static inline bool OnSamePage(Address address1, Address address2) {
- return NewSpacePage::FromAddress(address1) ==
- NewSpacePage::FromAddress(address2);
- }
-
- inline NewSpacePage* next_page() {
- return static_cast<NewSpacePage*>(next_chunk());
- }
-
- inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); }
-
- inline NewSpacePage* prev_page() {
- return static_cast<NewSpacePage*>(prev_chunk());
- }
-
- inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); }
-
- SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); }
-
- bool is_anchor() { return !this->InNewSpace(); }
-
- private:
- // GC related flags copied from from-space to to-space when
- // flipping semispaces.
- static const intptr_t kCopyOnFlipFlagsMask =
- (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
- (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
-
- // Create a NewSpacePage object that is only used as anchor
- // for the doubly-linked list of real pages.
- explicit NewSpacePage(SemiSpace* owner) { InitializeAsAnchor(owner); }
-
- // Intialize a fake NewSpacePage used as sentinel at the ends
- // of a doubly-linked list of real NewSpacePages.
- // Only uses the prev/next links, and sets flags to not be in new-space.
- void InitializeAsAnchor(SemiSpace* owner);
-
- friend class SemiSpace;
- friend class SemiSpaceIterator;
-};
-
-
// -----------------------------------------------------------------------------
// SemiSpace in young generation
//
@@ -2368,6 +2300,8 @@ class NewSpacePage : public MemoryChunk {
// space as a marking stack when tracing live objects.
class SemiSpace : public Space {
public:
+ typedef PageIterator iterator;
+
static void Swap(SemiSpace* from, SemiSpace* to);
SemiSpace(Heap* heap, SemiSpaceId semispace)
@@ -2379,7 +2313,8 @@ class SemiSpace : public Space {
committed_(false),
id_(semispace),
anchor_(this),
- current_page_(nullptr) {}
+ current_page_(nullptr),
+ pages_used_(0) {}
inline bool Contains(HeapObject* o);
inline bool Contains(Object* o);
@@ -2402,14 +2337,17 @@ class SemiSpace : public Space {
// than the current capacity.
bool ShrinkTo(int new_capacity);
+ bool EnsureCurrentCapacity();
+
// Returns the start address of the first page of the space.
Address space_start() {
DCHECK_NE(anchor_.next_page(), anchor());
return anchor_.next_page()->area_start();
}
- NewSpacePage* first_page() { return anchor_.next_page(); }
- NewSpacePage* current_page() { return current_page_; }
+ Page* first_page() { return anchor_.next_page(); }
+ Page* current_page() { return current_page_; }
+ int pages_used() { return pages_used_; }
// Returns one past the end address of the space.
Address space_end() { return anchor_.prev_page()->area_end(); }
@@ -2421,15 +2359,25 @@ class SemiSpace : public Space {
Address page_high() { return current_page_->area_end(); }
bool AdvancePage() {
- NewSpacePage* next_page = current_page_->next_page();
- if (next_page == anchor()) return false;
+ Page* next_page = current_page_->next_page();
+ // We cannot expand if we reached the maximum number of pages already. Note
+ // that we need to account for the next page already for this check as we
+ // could potentially fill the whole page after advancing.
+ const bool reached_max_pages = (pages_used_ + 1) == max_pages();
+ if (next_page == anchor() || reached_max_pages) {
+ return false;
+ }
current_page_ = next_page;
+ pages_used_++;
return true;
}
// Resets the space to using the first page.
void Reset();
+ void RemovePage(Page* page);
+ void PrependPage(Page* page);
+
// Age mark accessors.
Address age_mark() { return age_mark_; }
void set_age_mark(Address mark);
@@ -2478,10 +2426,14 @@ class SemiSpace : public Space {
virtual void Verify();
#endif
+ iterator begin() { return iterator(anchor_.next_page()); }
+ iterator end() { return iterator(anchor()); }
+
private:
- void RewindPages(NewSpacePage* start, int num_pages);
+ void RewindPages(Page* start, int num_pages);
- inline NewSpacePage* anchor() { return &anchor_; }
+ inline Page* anchor() { return &anchor_; }
+ inline int max_pages() { return current_capacity_ / Page::kPageSize; }
// Copies the flags into the masked positions on all pages in the space.
void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
@@ -2489,7 +2441,8 @@ class SemiSpace : public Space {
// The currently committed space capacity.
int current_capacity_;
- // The maximum capacity that can be used by this space.
+ // The maximum capacity that can be used by this space. A space cannot grow
+ // beyond that size.
int maximum_capacity_;
// The minimum capacity for the space. A space cannot shrink below this size.
@@ -2501,11 +2454,12 @@ class SemiSpace : public Space {
bool committed_;
SemiSpaceId id_;
- NewSpacePage anchor_;
- NewSpacePage* current_page_;
+ Page anchor_;
+ Page* current_page_;
+ int pages_used_;
+ friend class NewSpace;
friend class SemiSpaceIterator;
- friend class NewSpacePageIterator;
};
@@ -2519,10 +2473,7 @@ class SemiSpaceIterator : public ObjectIterator {
// Create an iterator over the allocated objects in the given to-space.
explicit SemiSpaceIterator(NewSpace* space);
- inline HeapObject* Next();
-
- // Implementation of the ObjectIterator functions.
- inline HeapObject* next_object() override;
+ inline HeapObject* Next() override;
private:
void Initialize(Address start, Address end);
@@ -2533,35 +2484,6 @@ class SemiSpaceIterator : public ObjectIterator {
Address limit_;
};
-
-// -----------------------------------------------------------------------------
-// A PageIterator iterates the pages in a semi-space.
-class NewSpacePageIterator BASE_EMBEDDED {
- public:
- // Make an iterator that runs over all pages in to-space.
- explicit inline NewSpacePageIterator(NewSpace* space);
-
- // Make an iterator that runs over all pages in the given semispace,
- // even those not used in allocation.
- explicit inline NewSpacePageIterator(SemiSpace* space);
-
- // Make iterator that iterates from the page containing start
- // to the page that contains limit in the same semispace.
- inline NewSpacePageIterator(Address start, Address limit);
-
- inline bool has_next();
- inline NewSpacePage* next();
-
- private:
- NewSpacePage* prev_page_; // Previous page returned.
- // Next page that will be returned. Cached here so that we can use this
- // iterator for operations that deallocate pages.
- NewSpacePage* next_page_;
- // Last page returned.
- NewSpacePage* last_page_;
-};
-
-
// -----------------------------------------------------------------------------
// The young generation space.
//
@@ -2570,12 +2492,13 @@ class NewSpacePageIterator BASE_EMBEDDED {
class NewSpace : public Space {
public:
+ typedef PageIterator iterator;
+
explicit NewSpace(Heap* heap)
: Space(heap, NEW_SPACE, NOT_EXECUTABLE),
to_space_(heap, kToSpace),
from_space_(heap, kFromSpace),
reservation_(),
- pages_used_(0),
top_on_previous_step_(0),
allocated_histogram_(nullptr),
promoted_histogram_(nullptr) {}
@@ -2607,7 +2530,7 @@ class NewSpace : public Space {
// Return the allocated bytes in the active semispace.
intptr_t Size() override {
- return pages_used_ * NewSpacePage::kAllocatableMemory +
+ return to_space_.pages_used() * Page::kAllocatableMemory +
static_cast<int>(top() - to_space_.page_low());
}
@@ -2620,7 +2543,7 @@ class NewSpace : public Space {
intptr_t Capacity() {
SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
return (to_space_.current_capacity() / Page::kPageSize) *
- NewSpacePage::kAllocatableMemory;
+ Page::kAllocatableMemory;
}
// Return the current size of a semispace, allocatable and non-allocatable
@@ -2650,9 +2573,9 @@ class NewSpace : public Space {
size_t AllocatedSinceLastGC() {
bool seen_age_mark = false;
Address age_mark = to_space_.age_mark();
- NewSpacePage* current_page = to_space_.first_page();
- NewSpacePage* age_mark_page = NewSpacePage::FromAddress(age_mark);
- NewSpacePage* last_page = NewSpacePage::FromAddress(top() - kPointerSize);
+ Page* current_page = to_space_.first_page();
+ Page* age_mark_page = Page::FromAddress(age_mark);
+ Page* last_page = Page::FromAddress(top() - kPointerSize);
if (age_mark_page == last_page) {
if (top() - age_mark >= 0) {
return top() - age_mark;
@@ -2675,7 +2598,7 @@ class NewSpace : public Space {
DCHECK_EQ(current_page, age_mark_page);
current_page = age_mark_page->next_page();
while (current_page != last_page) {
- allocated += NewSpacePage::kAllocatableMemory;
+ allocated += Page::kAllocatableMemory;
current_page = current_page->next_page();
}
allocated += top() - current_page->area_start();
@@ -2684,6 +2607,14 @@ class NewSpace : public Space {
return static_cast<size_t>(allocated);
}
+ void MovePageFromSpaceToSpace(Page* page) {
+ DCHECK(page->InFromSpace());
+ from_space_.RemovePage(page);
+ to_space_.PrependPage(page);
+ }
+
+ bool Rebalance();
+
// Return the maximum capacity of a semispace.
int MaximumCapacity() {
DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
@@ -2826,6 +2757,9 @@ class NewSpace : public Space {
void PauseAllocationObservers() override;
void ResumeAllocationObservers() override;
+ iterator begin() { return to_space_.begin(); }
+ iterator end() { return to_space_.end(); }
+
private:
// Update allocation info to match the current to-space page.
void UpdateAllocationInfo();
@@ -2836,7 +2770,6 @@ class NewSpace : public Space {
SemiSpace to_space_;
SemiSpace from_space_;
base::VirtualMemory reservation_;
- int pages_used_;
// Allocation pointer and limit for normal allocation and allocation during
// mark-compact collection.
@@ -2970,6 +2903,8 @@ class MapSpace : public PagedSpace {
class LargeObjectSpace : public Space {
public:
+ typedef LargePageIterator iterator;
+
LargeObjectSpace(Heap* heap, AllocationSpace id);
virtual ~LargeObjectSpace();
@@ -3015,6 +2950,10 @@ class LargeObjectSpace : public Space {
// Frees unmarked objects.
void FreeUnmarkedObjects();
+ void InsertChunkMapEntries(LargePage* page);
+ void RemoveChunkMapEntries(LargePage* page);
+ void RemoveChunkMapEntries(LargePage* page, Address free_start);
+
// Checks whether a heap object is in this space; O(1).
bool Contains(HeapObject* obj);
// Checks whether an address is in the object area in this space. Iterates
@@ -3028,6 +2967,12 @@ class LargeObjectSpace : public Space {
LargePage* first_page() { return first_page_; }
+ // Collect code statistics.
+ void CollectCodeStatistics();
+
+ iterator begin() { return iterator(first_page_); }
+ iterator end() { return iterator(nullptr); }
+
#ifdef VERIFY_HEAP
virtual void Verify();
#endif
@@ -3035,7 +2980,6 @@ class LargeObjectSpace : public Space {
#ifdef DEBUG
void Print() override;
void ReportStatistics();
- void CollectCodeStatistics();
#endif
private:
@@ -3045,7 +2989,7 @@ class LargeObjectSpace : public Space {
int page_count_; // number of chunks
intptr_t objects_size_; // size of objects
// Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
- HashMap chunk_map_;
+ base::HashMap chunk_map_;
friend class LargeObjectIterator;
};
@@ -3055,31 +2999,17 @@ class LargeObjectIterator : public ObjectIterator {
public:
explicit LargeObjectIterator(LargeObjectSpace* space);
- HeapObject* Next();
-
- // implementation of ObjectIterator.
- virtual HeapObject* next_object() { return Next(); }
+ HeapObject* Next() override;
private:
LargePage* current_;
};
-class LargePageIterator BASE_EMBEDDED {
- public:
- explicit inline LargePageIterator(LargeObjectSpace* space);
-
- inline LargePage* next();
-
- private:
- LargePage* next_page_;
-};
-
// Iterates over the chunks (pages and large object pages) that can contain
// pointers to new space or to evacuation candidates.
class MemoryChunkIterator BASE_EMBEDDED {
public:
- enum Mode { ALL, ALL_BUT_MAP_SPACE, ALL_BUT_CODE_SPACE };
- inline explicit MemoryChunkIterator(Heap* heap, Mode mode);
+ inline explicit MemoryChunkIterator(Heap* heap);
// Return NULL when the iterator is done.
inline MemoryChunk* next();
@@ -3092,8 +3022,8 @@ class MemoryChunkIterator BASE_EMBEDDED {
kLargeObjectState,
kFinishedState
};
+ Heap* heap_;
State state_;
- const Mode mode_;
PageIterator old_iterator_;
PageIterator code_iterator_;
PageIterator map_iterator_;
diff --git a/deps/v8/src/i18n.cc b/deps/v8/src/i18n.cc
index 623de50157..3418ae79f1 100644
--- a/deps/v8/src/i18n.cc
+++ b/deps/v8/src/i18n.cc
@@ -157,6 +157,9 @@ void SetResolvedDateSettings(Isolate* isolate,
// Set time zone and calendar.
const icu::Calendar* calendar = date_format->getCalendar();
+ // getType() returns legacy calendar type name instead of LDML/BCP47 calendar
+ // key values. i18n.js maps them to BCP47 values for key "ca".
+ // TODO(jshin): Consider doing it here, instead.
const char* calendar_name = calendar->getType();
JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("calendar"),
factory->NewStringFromAsciiChecked(calendar_name),
@@ -768,28 +771,12 @@ icu::SimpleDateFormat* DateFormat::UnpackDateFormat(
return NULL;
}
-
-template<class T>
-void DeleteNativeObjectAt(const v8::WeakCallbackData<v8::Value, void>& data,
- int index) {
- v8::Local<v8::Object> obj = v8::Local<v8::Object>::Cast(data.GetValue());
- delete reinterpret_cast<T*>(obj->GetAlignedPointerFromInternalField(index));
-}
-
-
-static void DestroyGlobalHandle(
- const v8::WeakCallbackData<v8::Value, void>& data) {
+void DateFormat::DeleteDateFormat(const v8::WeakCallbackInfo<void>& data) {
+ delete reinterpret_cast<icu::SimpleDateFormat*>(data.GetInternalField(0));
GlobalHandles::Destroy(reinterpret_cast<Object**>(data.GetParameter()));
}
-void DateFormat::DeleteDateFormat(
- const v8::WeakCallbackData<v8::Value, void>& data) {
- DeleteNativeObjectAt<icu::SimpleDateFormat>(data, 0);
- DestroyGlobalHandle(data);
-}
-
-
icu::DecimalFormat* NumberFormat::InitializeNumberFormat(
Isolate* isolate,
Handle<String> locale,
@@ -847,11 +834,9 @@ icu::DecimalFormat* NumberFormat::UnpackNumberFormat(
return NULL;
}
-
-void NumberFormat::DeleteNumberFormat(
- const v8::WeakCallbackData<v8::Value, void>& data) {
- DeleteNativeObjectAt<icu::DecimalFormat>(data, 0);
- DestroyGlobalHandle(data);
+void NumberFormat::DeleteNumberFormat(const v8::WeakCallbackInfo<void>& data) {
+ delete reinterpret_cast<icu::DecimalFormat*>(data.GetInternalField(0));
+ GlobalHandles::Destroy(reinterpret_cast<Object**>(data.GetParameter()));
}
@@ -908,11 +893,9 @@ icu::Collator* Collator::UnpackCollator(Isolate* isolate,
return NULL;
}
-
-void Collator::DeleteCollator(
- const v8::WeakCallbackData<v8::Value, void>& data) {
- DeleteNativeObjectAt<icu::Collator>(data, 0);
- DestroyGlobalHandle(data);
+void Collator::DeleteCollator(const v8::WeakCallbackInfo<void>& data) {
+ delete reinterpret_cast<icu::Collator*>(data.GetInternalField(0));
+ GlobalHandles::Destroy(reinterpret_cast<Object**>(data.GetParameter()));
}
@@ -973,12 +956,11 @@ icu::BreakIterator* BreakIterator::UnpackBreakIterator(Isolate* isolate,
return NULL;
}
-
void BreakIterator::DeleteBreakIterator(
- const v8::WeakCallbackData<v8::Value, void>& data) {
- DeleteNativeObjectAt<icu::BreakIterator>(data, 0);
- DeleteNativeObjectAt<icu::UnicodeString>(data, 1);
- DestroyGlobalHandle(data);
+ const v8::WeakCallbackInfo<void>& data) {
+ delete reinterpret_cast<icu::BreakIterator*>(data.GetInternalField(0));
+ delete reinterpret_cast<icu::UnicodeString*>(data.GetInternalField(1));
+ GlobalHandles::Destroy(reinterpret_cast<Object**>(data.GetParameter()));
}
} // namespace internal
diff --git a/deps/v8/src/i18n.h b/deps/v8/src/i18n.h
index a8db4d18a6..2a4c208601 100644
--- a/deps/v8/src/i18n.h
+++ b/deps/v8/src/i18n.h
@@ -51,8 +51,7 @@ class DateFormat {
// Release memory we allocated for the DateFormat once the JS object that
// holds the pointer gets garbage collected.
- static void DeleteDateFormat(
- const v8::WeakCallbackData<v8::Value, void>& data);
+ static void DeleteDateFormat(const v8::WeakCallbackInfo<void>& data);
private:
DateFormat();
@@ -75,8 +74,7 @@ class NumberFormat {
// Release memory we allocated for the NumberFormat once the JS object that
// holds the pointer gets garbage collected.
- static void DeleteNumberFormat(
- const v8::WeakCallbackData<v8::Value, void>& data);
+ static void DeleteNumberFormat(const v8::WeakCallbackInfo<void>& data);
private:
NumberFormat();
@@ -98,8 +96,7 @@ class Collator {
// Release memory we allocated for the Collator once the JS object that holds
// the pointer gets garbage collected.
- static void DeleteCollator(
- const v8::WeakCallbackData<v8::Value, void>& data);
+ static void DeleteCollator(const v8::WeakCallbackInfo<void>& data);
private:
Collator();
@@ -121,8 +118,7 @@ class BreakIterator {
// Release memory we allocated for the BreakIterator once the JS object that
// holds the pointer gets garbage collected.
- static void DeleteBreakIterator(
- const v8::WeakCallbackData<v8::Value, void>& data);
+ static void DeleteBreakIterator(const v8::WeakCallbackInfo<void>& data);
private:
BreakIterator();
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index cafa6763fa..281c3ef932 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -47,6 +47,7 @@ namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; }
+bool CpuFeatures::SupportsSimd128() { return false; }
static const byte kCallOpcode = 0xE8;
static const int kNoCodeAgeSequenceLength = 5;
@@ -81,11 +82,6 @@ Address RelocInfo::target_address() {
return Assembler::target_address_at(pc_, host_);
}
-Address RelocInfo::wasm_memory_reference() {
- DCHECK(IsWasmMemoryReference(rmode_));
- return Memory::Address_at(pc_);
-}
-
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
@@ -105,35 +101,6 @@ int RelocInfo::target_address_size() {
}
-void RelocInfo::set_target_address(Address target,
- WriteBarrierMode write_barrier_mode,
- ICacheFlushMode icache_flush_mode) {
- Assembler::set_target_address_at(isolate_, pc_, host_, target,
- icache_flush_mode);
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
- IsCodeTarget(rmode_)) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
-}
-
-void RelocInfo::update_wasm_memory_reference(
- Address old_base, Address new_base, size_t old_size, size_t new_size,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(IsWasmMemoryReference(rmode_));
- DCHECK(old_base <= wasm_memory_reference() &&
- wasm_memory_reference() < old_base + old_size);
- Address updated_reference = new_base + (wasm_memory_reference() - old_base);
- DCHECK(new_base <= updated_reference &&
- updated_reference < new_base + new_size);
- Memory::Address_at(pc_) = updated_reference;
- if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate_, pc_, sizeof(int32_t));
- }
-}
-
Object* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Memory::Object_at(pc_);
@@ -157,6 +124,7 @@ void RelocInfo::set_target_object(Object* target,
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
+ host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target));
}
@@ -283,7 +251,7 @@ void RelocInfo::WipeOut() {
}
}
-
+template <typename ObjectVisitor>
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
@@ -361,7 +329,6 @@ Immediate::Immediate(Handle<Object> handle) {
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
if (obj->IsHeapObject()) {
- DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
x_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
} else {
@@ -400,7 +367,6 @@ void Assembler::emit(Handle<Object> handle) {
AllowDeferredHandleDereference heap_object_check;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
- DCHECK(!isolate()->heap()->InNewSpace(obj));
if (obj->IsHeapObject()) {
emit(reinterpret_cast<intptr_t>(handle.location()),
RelocInfo::EMBEDDED_OBJECT);
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 680c40c6a1..4d3195957e 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -186,6 +186,30 @@ bool RelocInfo::IsInConstantPool() {
return false;
}
+Address RelocInfo::wasm_memory_reference() {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ return Memory::Address_at(pc_);
+}
+
+Address RelocInfo::wasm_global_reference() {
+ DCHECK(IsWasmGlobalReference(rmode_));
+ return Memory::Address_at(pc_);
+}
+
+uint32_t RelocInfo::wasm_memory_size_reference() {
+ DCHECK(IsWasmMemorySizeReference(rmode_));
+ return Memory::uint32_at(pc_);
+}
+
+void RelocInfo::unchecked_update_wasm_memory_reference(
+ Address address, ICacheFlushMode flush_mode) {
+ Memory::Address_at(pc_) = address;
+}
+
+void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
+ ICacheFlushMode flush_mode) {
+ Memory::uint32_at(pc_) = size;
+}
// -----------------------------------------------------------------------------
// Implementation of Operand
@@ -271,17 +295,11 @@ Register Operand::reg() const {
#define EMIT(x) \
*pc_++ = (x)
-
-#ifdef GENERATED_CODE_COVERAGE
-static void InitCoverageLog();
-#endif
-
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
- : AssemblerBase(isolate, buffer, buffer_size),
- positions_recorder_(this) {
- // Clear the buffer in debug mode unless it was provided by the
- // caller in which case we can't be sure it's okay to overwrite
- // existing code in it; see CodePatcher::CodePatcher(...).
+ : AssemblerBase(isolate, buffer, buffer_size) {
+// Clear the buffer in debug mode unless it was provided by the
+// caller in which case we can't be sure it's okay to overwrite
+// existing code in it; see CodePatcher::CodePatcher(...).
#ifdef DEBUG
if (own_buffer_) {
memset(buffer_, 0xCC, buffer_size_); // int3
@@ -289,17 +307,12 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
#endif
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
-
-#ifdef GENERATED_CODE_COVERAGE
- InitCoverageLog();
-#endif
}
void Assembler::GetCode(CodeDesc* desc) {
// Finalize code (at this point overflow() may be true, but the gap ensures
// that we are still not overlapping instructions and relocation info).
- reloc_info_writer.Finish();
DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
// Set up code descriptor.
desc->buffer = buffer_;
@@ -308,6 +321,8 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
desc->origin = this;
desc->constant_pool_size = 0;
+ desc->unwinding_info_size = 0;
+ desc->unwinding_info = nullptr;
}
@@ -681,6 +696,45 @@ void Assembler::xchg(Register dst, const Operand& src) {
emit_operand(dst, src);
}
+void Assembler::xchg_b(Register reg, const Operand& op) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x86);
+ emit_operand(reg, op);
+}
+
+void Assembler::xchg_w(Register reg, const Operand& op) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x87);
+ emit_operand(reg, op);
+}
+
+void Assembler::lock() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF0);
+}
+
+void Assembler::cmpxchg(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xB1);
+ emit_operand(src, dst);
+}
+
+void Assembler::cmpxchg_b(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xB0);
+ emit_operand(src, dst);
+}
+
+void Assembler::cmpxchg_w(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xB1);
+ emit_operand(src, dst);
+}
void Assembler::adc(Register dst, int32_t imm32) {
EnsureSpace ensure_space(this);
@@ -1468,7 +1522,6 @@ void Assembler::bind(Label* L) {
void Assembler::call(Label* L) {
- positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
if (L->is_bound()) {
const int long_size = 5;
@@ -1486,7 +1539,6 @@ void Assembler::call(Label* L) {
void Assembler::call(byte* entry, RelocInfo::Mode rmode) {
- positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
DCHECK(!RelocInfo::IsCodeTarget(rmode));
EMIT(0xE8);
@@ -1505,7 +1557,6 @@ int Assembler::CallSize(const Operand& adr) {
void Assembler::call(const Operand& adr) {
- positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
EMIT(0xFF);
emit_operand(edx, adr);
@@ -1520,7 +1571,6 @@ int Assembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
void Assembler::call(Handle<Code> code,
RelocInfo::Mode rmode,
TypeFeedbackId ast_id) {
- positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
DCHECK(RelocInfo::IsCodeTarget(rmode)
|| rmode == RelocInfo::CODE_AGE_SEQUENCE);
@@ -2342,6 +2392,26 @@ void Assembler::movaps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
+void Assembler::movups(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x11);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::movups(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x10);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::movups(const Operand& dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x11);
+ emit_sse_operand(src, dst);
+}
void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) {
DCHECK(is_uint8(imm8));
@@ -2991,31 +3061,6 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
-#ifdef GENERATED_CODE_COVERAGE
-static FILE* coverage_log = NULL;
-
-
-static void InitCoverageLog() {
- char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
- if (file_name != NULL) {
- coverage_log = fopen(file_name, "aw+");
- }
-}
-
-
-void LogGeneratedCodeCoverage(const char* file_line) {
- const char* return_address = (&file_line)[-1];
- char* push_insn = const_cast<char*>(return_address - 12);
- push_insn[0] = 0xeb; // Relative branch insn.
- push_insn[1] = 13; // Skip over coverage insns.
- if (coverage_log != NULL) {
- fprintf(coverage_log, "%s\n", file_line);
- fflush(coverage_log);
- }
-}
-
-#endif
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 5105ff5a4e..a1dc4b62be 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -74,6 +74,9 @@ namespace internal {
V(xmm6) \
V(xmm7)
+#define FLOAT_REGISTERS DOUBLE_REGISTERS
+#define SIMD128_REGISTERS DOUBLE_REGISTERS
+
#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
V(xmm1) \
V(xmm2) \
@@ -121,8 +124,6 @@ struct Register {
Register r = {code};
return r;
}
- const char* ToString();
- bool IsAllocatable() const;
bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
bool is(Register reg) const { return reg_code == reg.reg_code; }
int code() const {
@@ -146,8 +147,9 @@ GENERAL_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
const Register no_reg = {Register::kCode_no_reg};
+static const bool kSimpleFPAliasing = true;
-struct DoubleRegister {
+struct XMMRegister {
enum Code {
#define REGISTER_CODE(R) kCode_##R,
DOUBLE_REGISTERS(REGISTER_CODE)
@@ -158,12 +160,11 @@ struct DoubleRegister {
static const int kMaxNumRegisters = Code::kAfterLast;
- static DoubleRegister from_code(int code) {
- DoubleRegister result = {code};
+ static XMMRegister from_code(int code) {
+ XMMRegister result = {code};
return result;
}
- bool IsAllocatable() const;
bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
int code() const {
@@ -171,23 +172,23 @@ struct DoubleRegister {
return reg_code;
}
- bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
-
- const char* ToString();
+ bool is(XMMRegister reg) const { return reg_code == reg.reg_code; }
int reg_code;
};
+typedef XMMRegister FloatRegister;
+
+typedef XMMRegister DoubleRegister;
+
+typedef XMMRegister Simd128Register;
+
#define DECLARE_REGISTER(R) \
const DoubleRegister R = {DoubleRegister::kCode_##R};
DOUBLE_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
const DoubleRegister no_double_reg = {DoubleRegister::kCode_no_reg};
-typedef DoubleRegister Simd128Register;
-
-typedef DoubleRegister XMMRegister;
-
enum Condition {
// any value < 0 is considered no_condition
no_condition = -1,
@@ -655,6 +656,16 @@ class Assembler : public AssemblerBase {
// Exchange
void xchg(Register dst, Register src);
void xchg(Register dst, const Operand& src);
+ void xchg_b(Register reg, const Operand& op);
+ void xchg_w(Register reg, const Operand& op);
+
+ // Lock prefix
+ void lock();
+
+ // CompareExchange
+ void cmpxchg(const Operand& dst, Register src);
+ void cmpxchg_b(const Operand& dst, Register src);
+ void cmpxchg_w(const Operand& dst, Register src);
// Arithmetics
void adc(Register dst, int32_t imm32);
@@ -948,6 +959,9 @@ class Assembler : public AssemblerBase {
void ucomiss(XMMRegister dst, XMMRegister src) { ucomiss(dst, Operand(src)); }
void ucomiss(XMMRegister dst, const Operand& src);
void movaps(XMMRegister dst, XMMRegister src);
+ void movups(XMMRegister dst, XMMRegister src);
+ void movups(XMMRegister dst, const Operand& src);
+ void movups(const Operand& dst, XMMRegister src);
void shufps(XMMRegister dst, XMMRegister src, byte imm8);
void maxss(XMMRegister dst, XMMRegister src) { maxss(dst, Operand(src)); }
@@ -1432,7 +1446,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, int raw_position);
+ void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
@@ -1454,10 +1468,6 @@ class Assembler : public AssemblerBase {
static bool IsNop(Address addr);
- AssemblerPositionsRecorder* positions_recorder() {
- return &positions_recorder_;
- }
-
int relocation_writer_size() {
return (buffer_ + buffer_size_) - reloc_info_writer.pos();
}
@@ -1563,9 +1573,6 @@ class Assembler : public AssemblerBase {
// code generation
RelocInfoWriter reloc_info_writer;
-
- AssemblerPositionsRecorder positions_recorder_;
- friend class AssemblerPositionsRecorder;
};
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 53b35a3a84..6f2fb97908 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -22,67 +22,16 @@
namespace v8 {
namespace internal {
+#define __ ACCESS_MASM(masm)
-static void InitializeArrayConstructorDescriptor(
- Isolate* isolate, CodeStubDescriptor* descriptor,
- int constant_stack_parameter_count) {
- // register state
- // eax -- number of arguments
- // edi -- function
- // ebx -- allocation site with elements kind
- Address deopt_handler = Runtime::FunctionForId(
- Runtime::kArrayConstructor)->entry;
-
- if (constant_stack_parameter_count == 0) {
- descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- } else {
- descriptor->Initialize(eax, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- }
-}
-
-
-static void InitializeInternalArrayConstructorDescriptor(
- Isolate* isolate, CodeStubDescriptor* descriptor,
- int constant_stack_parameter_count) {
- // register state
- // eax -- number of arguments
- // edi -- constructor function
- Address deopt_handler = Runtime::FunctionForId(
- Runtime::kInternalArrayConstructor)->entry;
-
- if (constant_stack_parameter_count == 0) {
- descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- } else {
- descriptor->Initialize(eax, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- }
-}
-
-
-void ArrayNoArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
-
-void ArraySingleArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
-}
-
-
-void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
+void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
+ __ pop(ecx);
+ __ mov(MemOperand(esp, eax, times_4, 0), edi);
+ __ push(edi);
+ __ push(ebx);
+ __ push(ecx);
+ __ add(eax, Immediate(3));
+ __ TailCallRuntime(Runtime::kNewArray);
}
void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
@@ -90,21 +39,12 @@ void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize(eax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
}
-void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+void FastFunctionBindStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
-}
-
-
-void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
+ Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
+ descriptor->Initialize(eax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
}
-
-#define __ ACCESS_MASM(masm)
-
-
void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
ExternalReference miss) {
// Update the static counter each time a new code stub is generated.
@@ -386,10 +326,8 @@ void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
void MathPowStub::Generate(MacroAssembler* masm) {
- Factory* factory = isolate()->factory();
const Register exponent = MathPowTaggedDescriptor::exponent();
DCHECK(exponent.is(eax));
- const Register base = edx;
const Register scratch = ecx;
const XMMRegister double_result = xmm3;
const XMMRegister double_base = xmm2;
@@ -402,38 +340,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ mov(scratch, Immediate(1));
__ Cvtsi2sd(double_result, scratch);
- if (exponent_type() == ON_STACK) {
- Label base_is_smi, unpack_exponent;
- // The exponent and base are supplied as arguments on the stack.
- // This can only happen if the stub is called from non-optimized code.
- // Load input parameters from stack.
- __ mov(base, Operand(esp, 2 * kPointerSize));
- __ mov(exponent, Operand(esp, 1 * kPointerSize));
-
- __ JumpIfSmi(base, &base_is_smi, Label::kNear);
- __ cmp(FieldOperand(base, HeapObject::kMapOffset),
- factory->heap_number_map());
- __ j(not_equal, &call_runtime);
-
- __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
- __ jmp(&unpack_exponent, Label::kNear);
-
- __ bind(&base_is_smi);
- __ SmiUntag(base);
- __ Cvtsi2sd(double_base, base);
-
- __ bind(&unpack_exponent);
- __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
- __ SmiUntag(exponent);
- __ jmp(&int_exponent);
-
- __ bind(&exponent_not_smi);
- __ cmp(FieldOperand(exponent, HeapObject::kMapOffset),
- factory->heap_number_map());
- __ j(not_equal, &call_runtime);
- __ movsd(double_exponent,
- FieldOperand(exponent, HeapNumber::kValueOffset));
- } else if (exponent_type() == TAGGED) {
+ if (exponent_type() == TAGGED) {
__ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
__ SmiUntag(exponent);
__ jmp(&int_exponent);
@@ -457,79 +364,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ cmp(exponent, Immediate(0x1));
__ j(overflow, &call_runtime);
- if (exponent_type() == ON_STACK) {
- // Detect square root case. Crankshaft detects constant +/-0.5 at
- // compile time and uses DoMathPowHalf instead. We then skip this check
- // for non-constant cases of +/-0.5 as these hardly occur.
- Label continue_sqrt, continue_rsqrt, not_plus_half;
- // Test for 0.5.
- // Load double_scratch with 0.5.
- __ mov(scratch, Immediate(0x3F000000u));
- __ movd(double_scratch, scratch);
- __ cvtss2sd(double_scratch, double_scratch);
- // Already ruled out NaNs for exponent.
- __ ucomisd(double_scratch, double_exponent);
- __ j(not_equal, &not_plus_half, Label::kNear);
-
- // Calculates square root of base. Check for the special case of
- // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
- // According to IEEE-754, single-precision -Infinity has the highest
- // 9 bits set and the lowest 23 bits cleared.
- __ mov(scratch, 0xFF800000u);
- __ movd(double_scratch, scratch);
- __ cvtss2sd(double_scratch, double_scratch);
- __ ucomisd(double_base, double_scratch);
- // Comparing -Infinity with NaN results in "unordered", which sets the
- // zero flag as if both were equal. However, it also sets the carry flag.
- __ j(not_equal, &continue_sqrt, Label::kNear);
- __ j(carry, &continue_sqrt, Label::kNear);
-
- // Set result to Infinity in the special case.
- __ xorps(double_result, double_result);
- __ subsd(double_result, double_scratch);
- __ jmp(&done);
-
- __ bind(&continue_sqrt);
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorps(double_scratch, double_scratch);
- __ addsd(double_scratch, double_base); // Convert -0 to +0.
- __ sqrtsd(double_result, double_scratch);
- __ jmp(&done);
-
- // Test for -0.5.
- __ bind(&not_plus_half);
- // Load double_exponent with -0.5 by substracting 1.
- __ subsd(double_scratch, double_result);
- // Already ruled out NaNs for exponent.
- __ ucomisd(double_scratch, double_exponent);
- __ j(not_equal, &fast_power, Label::kNear);
-
- // Calculates reciprocal of square root of base. Check for the special
- // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
- // According to IEEE-754, single-precision -Infinity has the highest
- // 9 bits set and the lowest 23 bits cleared.
- __ mov(scratch, 0xFF800000u);
- __ movd(double_scratch, scratch);
- __ cvtss2sd(double_scratch, double_scratch);
- __ ucomisd(double_base, double_scratch);
- // Comparing -Infinity with NaN results in "unordered", which sets the
- // zero flag as if both were equal. However, it also sets the carry flag.
- __ j(not_equal, &continue_rsqrt, Label::kNear);
- __ j(carry, &continue_rsqrt, Label::kNear);
-
- // Set result to 0 in the special case.
- __ xorps(double_result, double_result);
- __ jmp(&done);
-
- __ bind(&continue_rsqrt);
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorps(double_exponent, double_exponent);
- __ addsd(double_exponent, double_base); // Convert -0 to +0.
- __ sqrtsd(double_exponent, double_exponent);
- __ divsd(double_result, double_exponent);
- __ jmp(&done);
- }
-
// Using FPU instructions to calculate power.
Label fast_power_failed;
__ bind(&fast_power);
@@ -619,39 +453,25 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ Cvtsi2sd(double_exponent, exponent);
// Returning or bailing out.
- if (exponent_type() == ON_STACK) {
- // The arguments are still on the stack.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMathPowRT);
-
- // The stub is called from non-optimized code, which expects the result
- // as heap number in exponent.
- __ bind(&done);
- __ AllocateHeapNumber(eax, scratch, base, &call_runtime);
- __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
- __ ret(2 * kPointerSize);
- } else {
- __ bind(&call_runtime);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(4, scratch);
- __ movsd(Operand(esp, 0 * kDoubleSize), double_base);
- __ movsd(Operand(esp, 1 * kDoubleSize), double_exponent);
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()), 4);
- }
- // Return value is in st(0) on ia32.
- // Store it into the (fixed) result register.
- __ sub(esp, Immediate(kDoubleSize));
- __ fstp_d(Operand(esp, 0));
- __ movsd(double_result, Operand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
-
- __ bind(&done);
- __ ret(0);
+ __ bind(&call_runtime);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(4, scratch);
+ __ movsd(Operand(esp, 0 * kDoubleSize), double_base);
+ __ movsd(Operand(esp, 1 * kDoubleSize), double_exponent);
+ __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
+ 4);
}
-}
+ // Return value is in st(0) on ia32.
+ // Store it into the (fixed) result register.
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fstp_d(Operand(esp, 0));
+ __ movsd(double_result, Operand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ __ bind(&done);
+ __ ret(0);
+}
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
@@ -691,7 +511,6 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
&miss, // When not a string.
&miss, // When not a number.
&miss, // When index out of range.
- STRING_INDEX_IS_ARRAY_INDEX,
RECEIVER_IS_STRING);
char_at_generator.GenerateFast(masm);
__ ret(0);
@@ -991,13 +810,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ add(edx, Immediate(2)); // edx was a smi.
// edx: Number of capture registers
- // Load last_match_info which is still known to be a fast case JSArray.
- // Check that the fourth object is a JSArray object.
+ // Load last_match_info which is still known to be a fast-elements JSObject.
+ // Check that the fourth object is a JSObject.
__ mov(eax, Operand(esp, kLastMatchInfoOffset));
__ JumpIfSmi(eax, &runtime);
- __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
+ __ CmpObjectType(eax, JS_OBJECT_TYPE, ebx);
__ j(not_equal, &runtime);
- // Check that the JSArray is in fast case.
+ // Check that the object has fast elements.
__ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
__ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
__ cmp(eax, factory->fixed_array_map());
@@ -1458,9 +1277,11 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
__ push(edi);
__ push(edx);
__ push(ebx);
+ __ push(esi);
__ CallStub(stub);
+ __ pop(esi);
__ pop(ebx);
__ pop(edx);
__ pop(edi);
@@ -1480,6 +1301,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// edi : the function to call
Isolate* isolate = masm->isolate();
Label initialize, done, miss, megamorphic, not_array_function;
+ Label done_increment_count, done_initialize_count;
// Load the cache state into ecx.
__ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
@@ -1492,7 +1314,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// type-feedback-vector.h).
Label check_allocation_site;
__ cmp(edi, FieldOperand(ecx, WeakCell::kValueOffset));
- __ j(equal, &done, Label::kFar);
+ __ j(equal, &done_increment_count, Label::kFar);
__ CompareRoot(ecx, Heap::kmegamorphic_symbolRootIndex);
__ j(equal, &done, Label::kFar);
__ CompareRoot(FieldOperand(ecx, HeapObject::kMapOffset),
@@ -1515,7 +1337,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
__ cmp(edi, ecx);
__ j(not_equal, &megamorphic);
- __ jmp(&done, Label::kFar);
+ __ jmp(&done_increment_count, Label::kFar);
__ bind(&miss);
@@ -1544,11 +1366,25 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// slot.
CreateAllocationSiteStub create_stub(isolate);
CallStubInRecordCallTarget(masm, &create_stub);
- __ jmp(&done);
+ __ jmp(&done_initialize_count);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(isolate);
CallStubInRecordCallTarget(masm, &weak_cell_stub);
+ __ bind(&done_initialize_count);
+
+ // Initialize the call counter.
+ __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize),
+ Immediate(Smi::FromInt(1)));
+ __ jmp(&done);
+
+ __ bind(&done_increment_count);
+ // Increment the call count for monomorphic function calls.
+ __ add(FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize),
+ Immediate(Smi::FromInt(1)));
+
__ bind(&done);
}
@@ -1612,7 +1448,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// Increment the call count for monomorphic function calls.
__ add(FieldOperand(ebx, edx, times_half_pointer_size,
FixedArray::kHeaderSize + kPointerSize),
- Immediate(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+ Immediate(Smi::FromInt(1)));
__ mov(ebx, ecx);
__ mov(edx, edi);
@@ -1660,7 +1496,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Increment the call count for monomorphic function calls.
__ add(FieldOperand(ebx, edx, times_half_pointer_size,
FixedArray::kHeaderSize + kPointerSize),
- Immediate(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+ Immediate(Smi::FromInt(1)));
__ bind(&call_function);
__ Set(eax, argc);
@@ -1731,7 +1567,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Initialize the call counter.
__ mov(FieldOperand(ebx, edx, times_half_pointer_size,
FixedArray::kHeaderSize + kPointerSize),
- Immediate(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+ Immediate(Smi::FromInt(1)));
// Store the function. Use a stub since we need a frame for allocation.
// ebx - vector
@@ -1741,7 +1577,9 @@ void CallICStub::Generate(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
CreateWeakCellStub create_stub(isolate);
__ push(edi);
+ __ push(esi);
__ CallStub(&create_stub);
+ __ pop(esi);
__ pop(edi);
}
@@ -1785,13 +1623,12 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
// It is important that the store buffer overflow stubs are generated first.
- ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+ CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
- TypeofStub::GenerateAheadOfTime(isolate);
}
@@ -1830,13 +1667,16 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Enter the exit frame that transitions from JavaScript to C++.
if (argv_in_register()) {
DCHECK(!save_doubles());
+ DCHECK(!is_builtin_exit());
__ EnterApiExitFrame(arg_stack_space);
// Move argc and argv into the correct registers.
__ mov(esi, ecx);
__ mov(edi, eax);
} else {
- __ EnterExitFrame(arg_stack_space, save_doubles());
+ __ EnterExitFrame(
+ arg_stack_space, save_doubles(),
+ is_builtin_exit() ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
}
// ebx: pointer to C function (C callee-saved)
@@ -2007,10 +1847,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ bind(&invoke);
__ PushStackHandler();
- // Clear any pending exceptions.
- __ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
- __ mov(Operand::StaticVariable(pending_exception), edx);
-
// Fake a receiver (NULL).
__ push(Immediate(0)); // receiver
@@ -2057,129 +1893,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
}
-void InstanceOfStub::Generate(MacroAssembler* masm) {
- Register const object = edx; // Object (lhs).
- Register const function = eax; // Function (rhs).
- Register const object_map = ecx; // Map of {object}.
- Register const function_map = ebx; // Map of {function}.
- Register const function_prototype = function_map; // Prototype of {function}.
- Register const scratch = edi;
-
- DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
- DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
-
- // Check if {object} is a smi.
- Label object_is_smi;
- __ JumpIfSmi(object, &object_is_smi, Label::kNear);
-
- // Lookup the {function} and the {object} map in the global instanceof cache.
- // Note: This is safe because we clear the global instanceof cache whenever
- // we change the prototype of any object.
- Label fast_case, slow_case;
- __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
- __ CompareRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
- __ j(not_equal, &fast_case, Label::kNear);
- __ CompareRoot(object_map, scratch, Heap::kInstanceofCacheMapRootIndex);
- __ j(not_equal, &fast_case, Label::kNear);
- __ LoadRoot(eax, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(0);
-
- // If {object} is a smi we can safely return false if {function} is a JS
- // function, otherwise we have to miss to the runtime and throw an exception.
- __ bind(&object_is_smi);
- __ JumpIfSmi(function, &slow_case);
- __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
- __ j(not_equal, &slow_case);
- __ LoadRoot(eax, Heap::kFalseValueRootIndex);
- __ ret(0);
-
- // Fast-case: The {function} must be a valid JSFunction.
- __ bind(&fast_case);
- __ JumpIfSmi(function, &slow_case);
- __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
- __ j(not_equal, &slow_case);
-
- // Go to the runtime if the function is not a constructor.
- __ test_b(FieldOperand(function_map, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsConstructor));
- __ j(zero, &slow_case);
-
- // Ensure that {function} has an instance prototype.
- __ test_b(FieldOperand(function_map, Map::kBitFieldOffset),
- Immediate(1 << Map::kHasNonInstancePrototype));
- __ j(not_zero, &slow_case);
-
- // Get the "prototype" (or initial map) of the {function}.
- __ mov(function_prototype,
- FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- __ AssertNotSmi(function_prototype);
-
- // Resolve the prototype if the {function} has an initial map. Afterwards the
- // {function_prototype} will be either the JSReceiver prototype object or the
- // hole value, which means that no instances of the {function} were created so
- // far and hence we should return false.
- Label function_prototype_valid;
- Register const function_prototype_map = scratch;
- __ CmpObjectType(function_prototype, MAP_TYPE, function_prototype_map);
- __ j(not_equal, &function_prototype_valid, Label::kNear);
- __ mov(function_prototype,
- FieldOperand(function_prototype, Map::kPrototypeOffset));
- __ bind(&function_prototype_valid);
- __ AssertNotSmi(function_prototype);
-
- // Update the global instanceof cache with the current {object} map and
- // {function}. The cached answer will be set when it is known below.
- __ StoreRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(object_map, scratch, Heap::kInstanceofCacheMapRootIndex);
-
- // Loop through the prototype chain looking for the {function} prototype.
- // Assume true, and change to false if not found.
- Label done, loop, fast_runtime_fallback;
- __ mov(eax, isolate()->factory()->true_value());
- __ bind(&loop);
-
- // Check if the object needs to be access checked.
- __ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
- __ j(not_zero, &fast_runtime_fallback, Label::kNear);
- // Check if the current object is a Proxy.
- __ CmpInstanceType(object_map, JS_PROXY_TYPE);
- __ j(equal, &fast_runtime_fallback, Label::kNear);
-
- __ mov(object, FieldOperand(object_map, Map::kPrototypeOffset));
- __ cmp(object, function_prototype);
- __ j(equal, &done, Label::kNear);
- __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
- __ cmp(object, isolate()->factory()->null_value());
- __ j(not_equal, &loop);
- __ mov(eax, isolate()->factory()->false_value());
-
- __ bind(&done);
- __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(0);
-
- // Found Proxy or access check needed: Call the runtime.
- __ bind(&fast_runtime_fallback);
- __ PopReturnAddressTo(scratch);
- __ Push(object);
- __ Push(function_prototype);
- __ PushReturnAddressFrom(scratch);
- // Invalidate the instanceof cache.
- __ Move(eax, Immediate(Smi::FromInt(0)));
- __ StoreRoot(eax, scratch, Heap::kInstanceofCacheFunctionRootIndex);
- __ TailCallRuntime(Runtime::kHasInPrototypeChain);
-
- // Slow-case: Call the %InstanceOf runtime function.
- __ bind(&slow_case);
- __ PopReturnAddressTo(scratch);
- __ Push(object);
- __ Push(function);
- __ PushReturnAddressFrom(scratch);
- __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
- : Runtime::kInstanceOf);
-}
-
-
// -------------------------------------------------------------------------
// StringCharCodeAtGenerator
@@ -2236,13 +1949,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
}
__ push(object_);
__ push(index_); // Consumed by runtime conversion function.
- if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
- } else {
- DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
- // NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi);
- }
+ __ CallRuntime(Runtime::kNumberToSmi);
if (!index_.is(eax)) {
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
@@ -2575,77 +2282,12 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// ecx: sub string length (smi)
// edx: from index (smi)
StringCharAtGenerator generator(eax, edx, ecx, eax, &runtime, &runtime,
- &runtime, STRING_INDEX_IS_NUMBER,
- RECEIVER_IS_STRING);
+ &runtime, RECEIVER_IS_STRING);
generator.GenerateFast(masm);
__ ret(3 * kPointerSize);
generator.SkipSlow(masm, &runtime);
}
-
-void ToNumberStub::Generate(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in eax.
- Label not_smi;
- __ JumpIfNotSmi(eax, &not_smi, Label::kNear);
- __ Ret();
- __ bind(&not_smi);
-
- Label not_heap_number;
- __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, &not_heap_number, Label::kNear);
- __ Ret();
- __ bind(&not_heap_number);
-
- NonNumberToNumberStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
- // The NonNumberToNumber stub takes one argument in eax.
- __ AssertNotNumber(eax);
-
- Label not_string;
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edi);
- // eax: object
- // edi: object map
- __ j(above_equal, &not_string, Label::kNear);
- StringToNumberStub stub(masm->isolate());
- __ TailCallStub(&stub);
- __ bind(&not_string);
-
- Label not_oddball;
- __ CmpInstanceType(edi, ODDBALL_TYPE);
- __ j(not_equal, &not_oddball, Label::kNear);
- __ mov(eax, FieldOperand(eax, Oddball::kToNumberOffset));
- __ Ret();
- __ bind(&not_oddball);
-
- __ pop(ecx); // Pop return address.
- __ push(eax); // Push argument.
- __ push(ecx); // Push return address.
- __ TailCallRuntime(Runtime::kToNumber);
-}
-
-void StringToNumberStub::Generate(MacroAssembler* masm) {
- // The StringToNumber stub takes one argument in eax.
- __ AssertString(eax);
-
- // Check if string has a cached array index.
- Label runtime;
- __ test(FieldOperand(eax, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- __ j(not_zero, &runtime, Label::kNear);
- __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
- __ IndexFromHash(eax, eax);
- __ Ret();
-
- __ bind(&runtime);
- __ PopReturnAddressTo(ecx); // Pop return address.
- __ Push(eax); // Push argument.
- __ PushReturnAddressFrom(ecx); // Push return address.
- __ TailCallRuntime(Runtime::kStringToNumber);
-}
-
void ToStringStub::Generate(MacroAssembler* masm) {
// The ToString stub takes one argument in eax.
Label is_number;
@@ -2852,7 +2494,7 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// Load ecx with the allocation site. We stick an undefined dummy value here
// and replace it with the real allocation site later when we instantiate this
// stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
- __ mov(ecx, handle(isolate()->heap()->undefined_value()));
+ __ mov(ecx, isolate()->factory()->undefined_value());
// Make sure that we actually patched the allocation site.
if (FLAG_debug_code) {
@@ -3683,14 +3325,14 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- LoadICStub stub(isolate(), state());
+ LoadICStub stub(isolate());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- KeyedLoadICStub stub(isolate(), state());
+ KeyedLoadICStub stub(isolate());
stub.GenerateForTrampoline(masm);
}
@@ -3842,10 +3484,8 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ j(not_equal, &miss);
__ push(slot);
__ push(vector);
- Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
- receiver, name, vector, scratch);
+ masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, name,
+ vector, scratch);
__ pop(vector);
__ pop(slot);
@@ -3911,27 +3551,21 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
KeyedLoadIC::GenerateMiss(masm);
}
-
-void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
- VectorStoreICStub stub(isolate(), state());
+void StoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
+ StoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
-
-void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
- VectorKeyedStoreICStub stub(isolate(), state());
+void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
+ KeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
+void StoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-void VectorStoreICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-
-void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+void StoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
@@ -3969,7 +3603,7 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
// found, now call handler.
Register handler = feedback;
- DCHECK(handler.is(VectorStoreICDescriptor::ValueRegister()));
+ DCHECK(handler.is(StoreWithVectorDescriptor::ValueRegister()));
__ mov(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1)));
__ pop(vector);
__ pop(receiver);
@@ -4029,7 +3663,7 @@ static void HandleMonomorphicStoreCase(MacroAssembler* masm, Register receiver,
Register slot, Register weak_cell,
Label* miss) {
// The store ic value is on the stack.
- DCHECK(weak_cell.is(VectorStoreICDescriptor::ValueRegister()));
+ DCHECK(weak_cell.is(StoreWithVectorDescriptor::ValueRegister()));
ExternalReference virtual_register =
ExternalReference::virtual_handler_register(masm->isolate());
@@ -4067,13 +3701,12 @@ static void HandleMonomorphicStoreCase(MacroAssembler* masm, Register receiver,
__ jmp(Operand::StaticVariable(virtual_register));
}
-
-void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // edx
- Register key = VectorStoreICDescriptor::NameRegister(); // ecx
- Register value = VectorStoreICDescriptor::ValueRegister(); // eax
- Register vector = VectorStoreICDescriptor::VectorRegister(); // ebx
- Register slot = VectorStoreICDescriptor::SlotRegister(); // edi
+void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // edx
+ Register key = StoreWithVectorDescriptor::NameRegister(); // ecx
+ Register value = StoreWithVectorDescriptor::ValueRegister(); // eax
+ Register vector = StoreWithVectorDescriptor::VectorRegister(); // ebx
+ Register slot = StoreWithVectorDescriptor::SlotRegister(); // edi
Label miss;
__ push(value);
@@ -4103,10 +3736,8 @@ void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ pop(value);
__ push(slot);
__ push(vector);
- Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, code_flags,
- receiver, key, slot, no_reg);
+ masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, slot,
+ no_reg);
__ pop(vector);
__ pop(slot);
Label no_pop_miss;
@@ -4118,13 +3749,11 @@ void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
StoreIC::GenerateMiss(masm);
}
-
-void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
+void KeyedStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
-
-void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
@@ -4237,13 +3866,12 @@ static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
__ jmp(&compare_map);
}
-
-void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // edx
- Register key = VectorStoreICDescriptor::NameRegister(); // ecx
- Register value = VectorStoreICDescriptor::ValueRegister(); // eax
- Register vector = VectorStoreICDescriptor::VectorRegister(); // ebx
- Register slot = VectorStoreICDescriptor::SlotRegister(); // edi
+void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // edx
+ Register key = StoreWithVectorDescriptor::NameRegister(); // ecx
+ Register value = StoreWithVectorDescriptor::ValueRegister(); // eax
+ Register vector = StoreWithVectorDescriptor::VectorRegister(); // ebx
+ Register slot = StoreWithVectorDescriptor::SlotRegister(); // edi
Label miss;
__ push(value);
@@ -4466,19 +4094,14 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
}
}
-
-void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
isolate);
ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
isolate);
- ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
- isolate);
-}
-
+ ArrayNArgumentsConstructorStub stub(isolate);
+ stub.GetCode();
-void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
- Isolate* isolate) {
ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
@@ -4486,8 +4109,6 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
stubh1.GetCode();
InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
stubh2.GetCode();
- InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
- stubh3.GetCode();
}
}
@@ -4507,13 +4128,15 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
CreateArrayDispatchOneArgument(masm, mode);
__ bind(&not_one_case);
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ ArrayNArgumentsConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
} else if (argument_count() == NONE) {
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
} else if (argument_count() == ONE) {
CreateArrayDispatchOneArgument(masm, mode);
} else if (argument_count() == MORE_THAN_ONE) {
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ ArrayNArgumentsConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
} else {
UNREACHABLE();
}
@@ -4625,7 +4248,7 @@ void InternalArrayConstructorStub::GenerateCase(
__ TailCallStub(&stub1);
__ bind(&not_one_case);
- InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+ ArrayNArgumentsConstructorStub stubN(isolate());
__ TailCallStub(&stubN);
}
@@ -4714,16 +4337,16 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
__ bind(&done_allocate);
// Initialize the JSObject fields.
- __ mov(Operand(eax, JSObject::kMapOffset), ecx);
- __ mov(Operand(eax, JSObject::kPropertiesOffset),
+ __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
+ __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
masm->isolate()->factory()->empty_fixed_array());
- __ mov(Operand(eax, JSObject::kElementsOffset),
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset),
masm->isolate()->factory()->empty_fixed_array());
STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ lea(ebx, Operand(eax, JSObject::kHeaderSize));
+ __ lea(ebx, FieldOperand(eax, JSObject::kHeaderSize));
// ----------- S t a t e -------------
- // -- eax : result (untagged)
+ // -- eax : result (tagged)
// -- ebx : result fields (untagged)
// -- edi : result end (untagged)
// -- ecx : initial map
@@ -4741,10 +4364,6 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
// Initialize all in-object fields with undefined.
__ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
__ InitializeFieldsWithFiller(ebx, edi, edx);
-
- // Add the object tag to make the JSObject real.
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ inc(eax);
__ Ret();
}
__ bind(&slack_tracking);
@@ -4767,10 +4386,6 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
__ LoadRoot(edi, Heap::kOnePointerFillerMapRootIndex);
__ InitializeFieldsWithFiller(ebx, edx, edi);
- // Add the object tag to make the JSObject real.
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ inc(eax);
-
// Check if we can finalize the instance size.
Label finalize;
STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
@@ -4801,10 +4416,10 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ Pop(ecx);
}
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ dec(eax);
__ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceSizeOffset));
__ lea(edi, Operand(eax, ebx, times_pointer_size, 0));
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ dec(edi);
__ jmp(&done_allocate);
// Fall back to %NewObject.
@@ -4826,19 +4441,19 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// -----------------------------------
__ AssertFunction(edi);
- // For Ignition we need to skip all possible handler/stub frames until
- // we reach the JavaScript frame for the function (similar to what the
- // runtime fallback implementation does). So make edx point to that
- // JavaScript frame.
- {
- Label loop, loop_entry;
- __ mov(edx, ebp);
- __ jmp(&loop_entry, Label::kNear);
- __ bind(&loop);
+ // Make edx point to the JavaScript frame.
+ __ mov(edx, ebp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
__ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
- __ bind(&loop_entry);
+ }
+ if (FLAG_debug_code) {
+ Label ok;
__ cmp(edi, Operand(edx, StandardFrameConstants::kFunctionOffset));
- __ j(not_equal, &loop);
+ __ j(equal, &ok);
+ __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+ __ bind(&ok);
}
// Check if we have rest parameters (only possible if we have an
@@ -4868,7 +4483,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// Allocate an empty rest parameter array.
Label allocate, done_allocate;
- __ Allocate(JSArray::kSize, eax, edx, ecx, &allocate, TAG_OBJECT);
+ __ Allocate(JSArray::kSize, eax, edx, ecx, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Setup the rest parameter array in rax.
@@ -4910,7 +4525,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
Label allocate, done_allocate;
__ lea(ecx, Operand(eax, times_half_pointer_size,
JSArray::kSize + FixedArray::kHeaderSize));
- __ Allocate(ecx, edx, edi, no_reg, &allocate, TAG_OBJECT);
+ __ Allocate(ecx, edx, edi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Setup the elements array in edx.
@@ -4946,8 +4561,11 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
__ mov(eax, edi);
__ Ret();
- // Fall back to %AllocateInNewSpace.
+ // Fall back to %AllocateInNewSpace (if not too big).
+ Label too_big_for_new_space;
__ bind(&allocate);
+ __ cmp(ecx, Immediate(Page::kMaxRegularHeapObjectSize));
+ __ j(greater, &too_big_for_new_space);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(ecx);
@@ -4960,6 +4578,22 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
__ Pop(eax);
}
__ jmp(&done_allocate);
+
+ // Fall back to %NewRestParameter.
+ __ bind(&too_big_for_new_space);
+ __ PopReturnAddressTo(ecx);
+ // We reload the function from the caller frame due to register pressure
+ // within this stub. This is the slow path, hence reloading is preferable.
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ Push(Operand(edx, StandardFrameConstants::kFunctionOffset));
+ } else {
+ __ Push(Operand(ebp, StandardFrameConstants::kFunctionOffset));
+ }
+ __ PushReturnAddressFrom(ecx);
+ __ TailCallRuntime(Runtime::kNewRestParameter);
}
}
@@ -4973,35 +4607,50 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
// -----------------------------------
__ AssertFunction(edi);
+ // Make ecx point to the JavaScript frame.
+ __ mov(ecx, ebp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
+ __ mov(ecx, Operand(ecx, StandardFrameConstants::kCallerFPOffset));
+ }
+ if (FLAG_debug_code) {
+ Label ok;
+ __ cmp(edi, Operand(ecx, StandardFrameConstants::kFunctionOffset));
+ __ j(equal, &ok);
+ __ Abort(kInvalidFrameForFastNewSloppyArgumentsStub);
+ __ bind(&ok);
+ }
+
// TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx,
- FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
- __ lea(edx, Operand(ebp, ecx, times_half_pointer_size,
+ __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ebx,
+ FieldOperand(ebx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ lea(edx, Operand(ecx, ebx, times_half_pointer_size,
StandardFrameConstants::kCallerSPOffset));
- // ecx : number of parameters (tagged)
+ // ebx : number of parameters (tagged)
// edx : parameters pointer
// edi : function
+ // ecx : JavaScript frame pointer.
// esp[0] : return address
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
- __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(eax, Operand(ebx, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ mov(eax, Operand(ecx, StandardFrameConstants::kCallerFPOffset));
+ __ mov(eax, Operand(eax, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor_frame, Label::kNear);
// No adaptor, parameter count = argument count.
- __ mov(ebx, ecx);
- __ push(ecx);
+ __ mov(ecx, ebx);
+ __ push(ebx);
__ jmp(&try_allocate, Label::kNear);
// We have an adaptor frame. Patch the parameters pointer.
__ bind(&adaptor_frame);
- __ mov(ebx, ecx);
- __ push(ecx);
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ push(ebx);
+ __ mov(edx, Operand(ecx, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ lea(edx, Operand(edx, ecx, times_2,
StandardFrameConstants::kCallerSPOffset));
@@ -5035,7 +4684,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
__ add(ebx, Immediate(JSSloppyArgumentsObject::kSize));
// Do the allocation of all three objects in one go.
- __ Allocate(ebx, eax, edi, no_reg, &runtime, TAG_OBJECT);
+ __ Allocate(ebx, eax, edi, no_reg, &runtime, NO_ALLOCATION_FLAGS);
// eax = address of new object(s) (tagged)
// ecx = argument count (smi-tagged)
@@ -5214,19 +4863,19 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// -----------------------------------
__ AssertFunction(edi);
- // For Ignition we need to skip all possible handler/stub frames until
- // we reach the JavaScript frame for the function (similar to what the
- // runtime fallback implementation does). So make edx point to that
- // JavaScript frame.
- {
- Label loop, loop_entry;
- __ mov(edx, ebp);
- __ jmp(&loop_entry, Label::kNear);
- __ bind(&loop);
+ // Make edx point to the JavaScript frame.
+ __ mov(edx, ebp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
__ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
- __ bind(&loop_entry);
+ }
+ if (FLAG_debug_code) {
+ Label ok;
__ cmp(edi, Operand(edx, StandardFrameConstants::kFunctionOffset));
- __ j(not_equal, &loop);
+ __ j(equal, &ok);
+ __ Abort(kInvalidFrameForFastNewStrictArgumentsStub);
+ __ bind(&ok);
}
// Check if we have an arguments adaptor frame below the function frame.
@@ -5265,7 +4914,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ lea(ecx,
Operand(eax, times_half_pointer_size,
JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
- __ Allocate(ecx, edx, edi, no_reg, &allocate, TAG_OBJECT);
+ __ Allocate(ecx, edx, edi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Setup the elements array in edx.
@@ -5301,8 +4950,11 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ mov(eax, edi);
__ Ret();
- // Fall back to %AllocateInNewSpace.
+ // Fall back to %AllocateInNewSpace (if not too big).
+ Label too_big_for_new_space;
__ bind(&allocate);
+ __ cmp(ecx, Immediate(Page::kMaxRegularHeapObjectSize));
+ __ j(greater, &too_big_for_new_space);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(ecx);
@@ -5315,37 +4967,22 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ Pop(eax);
}
__ jmp(&done_allocate);
-}
-
-
-void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
- Register context_reg = esi;
- Register slot_reg = ebx;
- Register result_reg = eax;
- Label slow_case;
- // Go up context chain to the script context.
- for (int i = 0; i < depth(); ++i) {
- __ mov(result_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
- context_reg = result_reg;
+ // Fall back to %NewStrictArguments.
+ __ bind(&too_big_for_new_space);
+ __ PopReturnAddressTo(ecx);
+ // We reload the function from the caller frame due to register pressure
+ // within this stub. This is the slow path, hence reloading is preferable.
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ Push(Operand(edx, StandardFrameConstants::kFunctionOffset));
+ } else {
+ __ Push(Operand(ebp, StandardFrameConstants::kFunctionOffset));
}
-
- // Load the PropertyCell value at the specified slot.
- __ mov(result_reg, ContextOperand(context_reg, slot_reg));
- __ mov(result_reg, FieldOperand(result_reg, PropertyCell::kValueOffset));
-
- // Check that value is not the_hole.
- __ CompareRoot(result_reg, Heap::kTheHoleValueRootIndex);
- __ j(equal, &slow_case, Label::kNear);
- __ Ret();
-
- // Fallback to the runtime.
- __ bind(&slow_case);
- __ SmiTag(slot_reg);
- __ Pop(result_reg); // Pop return address.
- __ Push(slot_reg);
- __ Push(result_reg); // Push return address.
- __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
+ __ PushReturnAddressFrom(ecx);
+ __ TailCallRuntime(Runtime::kNewStrictArguments);
}
@@ -5686,9 +5323,14 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
STATIC_ASSERT(FCA::kIsolateIndex == 1);
STATIC_ASSERT(FCA::kHolderIndex == 0);
- STATIC_ASSERT(FCA::kArgsLength == 7);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 7);
+ STATIC_ASSERT(FCA::kArgsLength == 8);
__ pop(return_address);
+
+ // new target
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+
// context save.
__ push(context);
@@ -5733,7 +5375,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
- const int kApiStackSpace = 4;
+ const int kApiStackSpace = 3;
PrepareCallApiFunction(masm, kApiArgc + kApiStackSpace);
@@ -5744,8 +5386,6 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
__ mov(ApiParameterOperand(3), scratch);
// FunctionCallbackInfo::length_.
__ Move(ApiParameterOperand(4), Immediate(argc()));
- // FunctionCallbackInfo::is_construct_call_.
- __ Move(ApiParameterOperand(5), Immediate(0));
// v8::InvocationCallback's argument.
__ lea(scratch, ApiParameterOperand(2));
@@ -5765,8 +5405,8 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
}
Operand return_value_operand(ebp, return_value_offset * kPointerSize);
int stack_space = 0;
- Operand is_construct_call_operand = ApiParameterOperand(5);
- Operand* stack_space_operand = &is_construct_call_operand;
+ Operand length_operand = ApiParameterOperand(4);
+ Operand* stack_space_operand = &length_operand;
stack_space = argc() + FCA::kArgsLength + 1;
stack_space_operand = nullptr;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
@@ -5777,14 +5417,34 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
void CallApiGetterStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- esp[0] : return address
- // -- esp[4] : name
- // -- esp[8 .. (8 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_
- // -- ...
- // -- edx : api_function_address
- // -----------------------------------
- DCHECK(edx.is(ApiGetterDescriptor::function_address()));
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+ Register receiver = ApiGetterDescriptor::ReceiverRegister();
+ Register holder = ApiGetterDescriptor::HolderRegister();
+ Register callback = ApiGetterDescriptor::CallbackRegister();
+ Register scratch = ebx;
+ DCHECK(!AreAliased(receiver, holder, callback, scratch));
+
+ __ pop(scratch); // Pop return address to extend the frame.
+ __ push(receiver);
+ __ push(FieldOperand(callback, AccessorInfo::kDataOffset));
+ __ PushRoot(Heap::kUndefinedValueRootIndex); // ReturnValue
+ // ReturnValue default value
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ push(Immediate(ExternalReference::isolate_address(isolate())));
+ __ push(holder);
+ __ push(Immediate(Smi::FromInt(0))); // should_throw_on_error -> false
+ __ push(FieldOperand(callback, AccessorInfo::kNameOffset));
+ __ push(scratch); // Restore return address.
// v8::PropertyCallbackInfo::args_ array and name handle.
const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
@@ -5794,9 +5454,6 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// active) in non-GCed stack space.
const int kApiArgc = 3 + 1;
- Register api_function_address = edx;
- Register scratch = ebx;
-
// Load address of v8::PropertyAccessorInfo::args_ array.
__ lea(scratch, Operand(esp, 2 * kPointerSize));
@@ -5806,25 +5463,30 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
Operand info_object = ApiParameterOperand(3);
__ mov(info_object, scratch);
+ // Name as handle.
__ sub(scratch, Immediate(kPointerSize));
- __ mov(ApiParameterOperand(0), scratch); // name.
+ __ mov(ApiParameterOperand(0), scratch);
+ // Arguments pointer.
__ lea(scratch, info_object);
- __ mov(ApiParameterOperand(1), scratch); // arguments pointer.
+ __ mov(ApiParameterOperand(1), scratch);
// Reserve space for optional callback address parameter.
Operand thunk_last_arg = ApiParameterOperand(2);
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
+ __ mov(scratch, FieldOperand(callback, AccessorInfo::kJsGetterOffset));
+ Register function_address = edx;
+ __ mov(function_address,
+ FieldOperand(scratch, Foreign::kForeignAddressOffset));
// +3 is to skip prolog, return address and name handle.
Operand return_value_operand(
ebp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
- CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
- thunk_last_arg, kStackUnwindSpace, nullptr,
- return_value_operand, NULL);
+ CallApiFunctionAndReturn(masm, function_address, thunk_ref, thunk_last_arg,
+ kStackUnwindSpace, nullptr, return_value_operand,
+ NULL);
}
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h
index fc813f50c1..c1878f0207 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.h
+++ b/deps/v8/src/ia32/code-stubs-ia32.h
@@ -301,8 +301,8 @@ class RecordWriteStub: public PlatformCodeStub {
Register r2,
Register r3) {
for (int i = 0; i < Register::kNumRegisters; i++) {
- Register candidate = Register::from_code(i);
- if (candidate.IsAllocatable()) {
+ if (RegisterConfiguration::Crankshaft()->IsAllocatableGeneralCode(i)) {
+ Register candidate = Register::from_code(i);
if (candidate.is(ecx)) continue;
if (candidate.is(r1)) continue;
if (candidate.is(r2)) continue;
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 2190531b43..18e53641e6 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -34,43 +34,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ masm.
-UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
- size_t actual_size;
- byte* buffer =
- static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == nullptr) return nullptr;
- ExternalReference::InitializeMathExpData();
-
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
- CodeObjectRequired::kNo);
- // esp[1 * kPointerSize]: raw double input
- // esp[0 * kPointerSize]: return address
- {
- XMMRegister input = xmm1;
- XMMRegister result = xmm2;
- __ movsd(input, Operand(esp, 1 * kPointerSize));
- __ push(eax);
- __ push(ebx);
-
- MathExpGenerator::EmitMathExp(&masm, input, result, xmm0, eax, ebx);
-
- __ pop(ebx);
- __ pop(eax);
- __ movsd(Operand(esp, 1 * kPointerSize), result);
- __ fld_d(Operand(esp, 1 * kPointerSize));
- __ Ret();
- }
-
- CodeDesc desc;
- masm.GetCode(&desc);
- DCHECK(!RelocInfo::RequiresRelocation(desc));
-
- Assembler::FlushICache(isolate, buffer, actual_size);
- base::OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
-}
-
-
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
size_t actual_size;
// Allocate buffer in executable space.
@@ -580,14 +543,14 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ push(eax);
__ push(ebx);
+ __ push(esi);
__ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));
// Allocate new FixedDoubleArray.
// edx: receiver
// edi: length of source FixedArray (smi-tagged)
- AllocationFlags flags =
- static_cast<AllocationFlags>(TAG_OBJECT | DOUBLE_ALIGNMENT);
+ AllocationFlags flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT);
__ Allocate(FixedDoubleArray::kHeaderSize, times_8, edi,
REGISTER_VALUE_IS_SMI, eax, ebx, no_reg, &gc_required, flags);
@@ -621,8 +584,9 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Call into runtime if GC is required.
__ bind(&gc_required);
+
// Restore registers before jumping into runtime.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ pop(esi);
__ pop(ebx);
__ pop(eax);
__ jmp(fail);
@@ -657,12 +621,11 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ sub(edi, Immediate(Smi::FromInt(1)));
__ j(not_sign, &loop);
+ // Restore registers.
+ __ pop(esi);
__ pop(ebx);
__ pop(eax);
- // Restore esi.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-
__ bind(&only_change_map);
// eax: value
// ebx: target map
@@ -714,7 +677,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Allocate new FixedArray.
// ebx: length of source FixedDoubleArray (smi-tagged)
__ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
- __ Allocate(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT);
+ __ Allocate(edi, eax, esi, no_reg, &gc_required, NO_ALLOCATION_FLAGS);
// eax: destination FixedArray
// ebx: number of elements
@@ -928,64 +891,6 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ bind(&done);
}
-
-static Operand ExpConstant(int index) {
- return Operand::StaticVariable(ExternalReference::math_exp_constants(index));
-}
-
-
-void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
- XMMRegister input,
- XMMRegister result,
- XMMRegister double_scratch,
- Register temp1,
- Register temp2) {
- DCHECK(!input.is(double_scratch));
- DCHECK(!input.is(result));
- DCHECK(!result.is(double_scratch));
- DCHECK(!temp1.is(temp2));
- DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
- DCHECK(!masm->serializer_enabled()); // External references not serializable.
-
- Label done;
-
- __ movsd(double_scratch, ExpConstant(0));
- __ xorpd(result, result);
- __ ucomisd(double_scratch, input);
- __ j(above_equal, &done);
- __ ucomisd(input, ExpConstant(1));
- __ movsd(result, ExpConstant(2));
- __ j(above_equal, &done);
- __ movsd(double_scratch, ExpConstant(3));
- __ movsd(result, ExpConstant(4));
- __ mulsd(double_scratch, input);
- __ addsd(double_scratch, result);
- __ movd(temp2, double_scratch);
- __ subsd(double_scratch, result);
- __ movsd(result, ExpConstant(6));
- __ mulsd(double_scratch, ExpConstant(5));
- __ subsd(double_scratch, input);
- __ subsd(result, double_scratch);
- __ movsd(input, double_scratch);
- __ mulsd(input, double_scratch);
- __ mulsd(result, input);
- __ mov(temp1, temp2);
- __ mulsd(result, ExpConstant(7));
- __ subsd(result, double_scratch);
- __ add(temp1, Immediate(0x1ff800));
- __ addsd(result, ExpConstant(8));
- __ and_(temp2, Immediate(0x7ff));
- __ shr(temp1, 11);
- __ shl(temp1, 20);
- __ movd(input, temp1);
- __ pshufd(input, input, static_cast<uint8_t>(0xe1)); // Order: 11 10 00 01
- __ movsd(double_scratch, Operand::StaticArray(
- temp2, times_8, ExternalReference::math_exp_log_table()));
- __ orps(input, double_scratch);
- __ mulsd(result, input);
- __ bind(&done);
-}
-
#undef __
diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h
index 133b1adbdf..685157ddb1 100644
--- a/deps/v8/src/ia32/codegen-ia32.h
+++ b/deps/v8/src/ia32/codegen-ia32.h
@@ -5,7 +5,6 @@
#ifndef V8_IA32_CODEGEN_IA32_H_
#define V8_IA32_CODEGEN_IA32_H_
-#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {
@@ -29,19 +28,6 @@ class StringCharLoadGenerator : public AllStatic {
};
-class MathExpGenerator : public AllStatic {
- public:
- static void EmitMathExp(MacroAssembler* masm,
- XMMRegister input,
- XMMRegister result,
- XMMRegister double_scratch,
- Register temp1,
- Register temp2);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 656d3e97c3..390f3a76a8 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -35,6 +35,7 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
int pc_offset = deopt_data->Pc(i)->value();
if (pc_offset == -1) continue;
+ pc_offset = pc_offset + 1; // We will encode the pc offset after the call.
DCHECK_GE(pc_offset, prev_pc_offset);
int pc_delta = pc_offset - prev_pc_offset;
// We use RUNTIME_ENTRY reloc info which has a size of 2 bytes
@@ -196,8 +197,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
const int kDoubleRegsSize = kDoubleSize * XMMRegister::kMaxNumRegisters;
__ sub(esp, Immediate(kDoubleRegsSize));
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
XMMRegister xmm_reg = XMMRegister::from_code(code);
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index b669d82642..a0a4e1ceeb 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -8,6 +8,7 @@
#if V8_TARGET_ARCH_IA32
+#include "src/base/compiler-specific.h"
#include "src/disasm.h"
namespace disasm {
@@ -29,18 +30,19 @@ struct ByteMnemonic {
};
static const ByteMnemonic two_operands_instr[] = {
- {0x01, "add", OPER_REG_OP_ORDER}, {0x03, "add", REG_OPER_OP_ORDER},
- {0x09, "or", OPER_REG_OP_ORDER}, {0x0B, "or", REG_OPER_OP_ORDER},
- {0x13, "adc", REG_OPER_OP_ORDER}, {0x1B, "sbb", REG_OPER_OP_ORDER},
- {0x21, "and", OPER_REG_OP_ORDER}, {0x23, "and", REG_OPER_OP_ORDER},
- {0x29, "sub", OPER_REG_OP_ORDER}, {0x2A, "subb", REG_OPER_OP_ORDER},
- {0x2B, "sub", REG_OPER_OP_ORDER}, {0x31, "xor", OPER_REG_OP_ORDER},
- {0x33, "xor", REG_OPER_OP_ORDER}, {0x38, "cmpb", OPER_REG_OP_ORDER},
- {0x39, "cmp", OPER_REG_OP_ORDER}, {0x3A, "cmpb", REG_OPER_OP_ORDER},
- {0x3B, "cmp", REG_OPER_OP_ORDER}, {0x84, "test_b", REG_OPER_OP_ORDER},
- {0x85, "test", REG_OPER_OP_ORDER}, {0x87, "xchg", REG_OPER_OP_ORDER},
- {0x8A, "mov_b", REG_OPER_OP_ORDER}, {0x8B, "mov", REG_OPER_OP_ORDER},
- {0x8D, "lea", REG_OPER_OP_ORDER}, {-1, "", UNSET_OP_ORDER}};
+ {0x01, "add", OPER_REG_OP_ORDER}, {0x03, "add", REG_OPER_OP_ORDER},
+ {0x09, "or", OPER_REG_OP_ORDER}, {0x0B, "or", REG_OPER_OP_ORDER},
+ {0x13, "adc", REG_OPER_OP_ORDER}, {0x1B, "sbb", REG_OPER_OP_ORDER},
+ {0x21, "and", OPER_REG_OP_ORDER}, {0x23, "and", REG_OPER_OP_ORDER},
+ {0x29, "sub", OPER_REG_OP_ORDER}, {0x2A, "subb", REG_OPER_OP_ORDER},
+ {0x2B, "sub", REG_OPER_OP_ORDER}, {0x31, "xor", OPER_REG_OP_ORDER},
+ {0x33, "xor", REG_OPER_OP_ORDER}, {0x38, "cmpb", OPER_REG_OP_ORDER},
+ {0x39, "cmp", OPER_REG_OP_ORDER}, {0x3A, "cmpb", REG_OPER_OP_ORDER},
+ {0x3B, "cmp", REG_OPER_OP_ORDER}, {0x84, "test_b", REG_OPER_OP_ORDER},
+ {0x85, "test", REG_OPER_OP_ORDER}, {0x86, "xchg_b", REG_OPER_OP_ORDER},
+ {0x87, "xchg", REG_OPER_OP_ORDER}, {0x8A, "mov_b", REG_OPER_OP_ORDER},
+ {0x8B, "mov", REG_OPER_OP_ORDER}, {0x8D, "lea", REG_OPER_OP_ORDER},
+ {-1, "", UNSET_OP_ORDER}};
static const ByteMnemonic zero_operands_instr[] = {
{0xC3, "ret", UNSET_OP_ORDER},
@@ -281,7 +283,7 @@ class DisassemblerIA32 {
bool vex_128() {
DCHECK(vex_byte0_ == 0xc4 || vex_byte0_ == 0xc5);
byte checked = vex_byte0_ == 0xc4 ? vex_byte2_ : vex_byte1_;
- return (checked & 4) != 1;
+ return (checked & 4) == 0;
}
bool vex_none() {
@@ -389,8 +391,7 @@ class DisassemblerIA32 {
int MemoryFPUInstruction(int escape_opcode, int regop, byte* modrm_start);
int RegisterFPUInstruction(int escape_opcode, byte modrm_byte);
int AVXInstruction(byte* data);
- void AppendToBuffer(const char* format, ...);
-
+ PRINTF_FORMAT(2, 3) void AppendToBuffer(const char* format, ...);
void UnimplementedInstruction() {
if (abort_on_unimplemented_) {
@@ -1231,6 +1232,10 @@ static const char* F0Mnem(byte f0byte) {
return "shrd"; // 3-operand version.
case 0xAB:
return "bts";
+ case 0xB0:
+ return "cmpxchg_b";
+ case 0xB1:
+ return "cmpxchg";
case 0xBC:
return "bsf";
case 0xBD:
@@ -1263,6 +1268,9 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
vex_byte0_ = *data;
vex_byte1_ = *(data + 1);
data += 2;
+ } else if (*data == 0xF0 /*lock*/) {
+ AppendToBuffer("lock ");
+ data++;
}
bool processed = true; // Will be set to false if the current instruction
@@ -1274,7 +1282,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
const InstructionDesc& idesc = instruction_table_->Get(*data);
switch (idesc.type) {
case ZERO_OPERANDS_INSTR:
- AppendToBuffer(idesc.mnem);
+ AppendToBuffer("%s", idesc.mnem);
data++;
break;
@@ -1414,6 +1422,20 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (f0byte == 0x10 || f0byte == 0x11) {
+ data += 2;
+ // movups xmm, xmm/m128
+ // movups xmm/m128, xmm
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("movups ");
+ if (f0byte == 0x11) {
+ data += PrintRightXMMOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else {
+ AppendToBuffer("%s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ }
} else if (f0byte == 0x2e) {
data += 2;
int mod, regop, rm;
@@ -1495,6 +1517,18 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
} else {
AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
}
+ } else if (f0byte == 0xB0) {
+ // cmpxchg_b
+ data += 2;
+ AppendToBuffer("%s ", f0mnem);
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfByteCPURegister(regop));
+ } else if (f0byte == 0xB1) {
+ // cmpxchg
+ data += 2;
+ data += PrintOperands(f0mnem, OPER_REG_OP_ORDER, data);
} else if (f0byte == 0xBC) {
data += 2;
int mod, regop, rm;
@@ -1615,6 +1649,12 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
int imm = *reinterpret_cast<int16_t*>(data);
AppendToBuffer(",0x%x", imm);
data += 2;
+ } else if (*data == 0x87) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("xchg_w %s,", NameOfCPURegister(regop));
+ data += PrintRightOperand(data);
} else if (*data == 0x89) {
data++;
int mod, regop, rm;
@@ -1889,6 +1929,9 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (*data == 0xB1) {
+ data++;
+ data += PrintOperands("cmpxchg_w", OPER_REG_OP_ORDER, data);
} else {
UnimplementedInstruction();
}
@@ -2219,7 +2262,7 @@ static const char* const xmm_regs[8] = {
const char* NameConverter::NameOfAddress(byte* addr) const {
- v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
+ v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
return tmp_buffer_.start();
}
@@ -2282,7 +2325,7 @@ int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
buffer[0] = '\0';
byte* prev_pc = pc;
pc += d.InstructionDecode(buffer, pc);
- fprintf(f, "%p", prev_pc);
+ fprintf(f, "%p", static_cast<void*>(prev_pc));
fprintf(f, " ");
for (byte* bp = prev_pc; bp < pc; bp++) {
diff --git a/deps/v8/src/ia32/interface-descriptors-ia32.cc b/deps/v8/src/ia32/interface-descriptors-ia32.cc
index 2748f907ac..f1972b9561 100644
--- a/deps/v8/src/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/ia32/interface-descriptors-ia32.cc
@@ -11,6 +11,19 @@ namespace internal {
const Register CallInterfaceDescriptor::ContextRegister() { return esi; }
+void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
+ CallInterfaceDescriptorData* data, int register_parameter_count) {
+ const Register default_stub_registers[] = {eax, ebx, ecx, edx, edi};
+ CHECK_LE(static_cast<size_t>(register_parameter_count),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(register_parameter_count,
+ default_stub_registers);
+}
+
+const Register FastNewFunctionContextDescriptor::FunctionRegister() {
+ return edi;
+}
+const Register FastNewFunctionContextDescriptor::SlotsRegister() { return eax; }
const Register LoadDescriptor::ReceiverRegister() { return edx; }
const Register LoadDescriptor::NameRegister() { return ecx; }
@@ -22,13 +35,9 @@ const Register LoadWithVectorDescriptor::VectorRegister() { return ebx; }
const Register StoreDescriptor::ReceiverRegister() { return edx; }
const Register StoreDescriptor::NameRegister() { return ecx; }
const Register StoreDescriptor::ValueRegister() { return eax; }
+const Register StoreDescriptor::SlotRegister() { return edi; }
-
-const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return edi; }
-
-
-const Register VectorStoreICDescriptor::VectorRegister() { return ebx; }
-
+const Register StoreWithVectorDescriptor::VectorRegister() { return ebx; }
const Register VectorStoreTransitionDescriptor::SlotRegister() {
return no_reg;
@@ -44,23 +53,15 @@ const Register VectorStoreTransitionDescriptor::MapRegister() { return edi; }
const Register StoreTransitionDescriptor::MapRegister() { return ebx; }
-const Register LoadGlobalViaContextDescriptor::SlotRegister() { return ebx; }
-
-
const Register StoreGlobalViaContextDescriptor::SlotRegister() { return ebx; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return eax; }
-const Register InstanceOfDescriptor::LeftRegister() { return edx; }
-const Register InstanceOfDescriptor::RightRegister() { return eax; }
-
-
const Register StringCompareDescriptor::LeftRegister() { return edx; }
const Register StringCompareDescriptor::RightRegister() { return eax; }
-
-const Register ApiGetterDescriptor::function_address() { return edx; }
-
+const Register ApiGetterDescriptor::HolderRegister() { return ecx; }
+const Register ApiGetterDescriptor::CallbackRegister() { return eax; }
const Register MathPowTaggedDescriptor::exponent() { return eax; }
@@ -80,13 +81,6 @@ void FastNewClosureDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-
-void FastNewContextDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edi};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
void FastNewObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edi, edx};
@@ -252,50 +246,37 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC
-void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {eax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
+void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
// eax -- number of arguments
// edi -- function
// ebx -- allocation site with elements kind
- Register registers[] = {edi, ebx};
+ Register registers[] = {edi, ebx, eax};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-
-void ArrayConstructorDescriptor::InitializePlatformSpecific(
+void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // stack param count needs (constructor pointer, and single argument)
- Register registers[] = {edi, ebx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void InternalArrayConstructorConstantArgCountDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
// register state
// eax -- number of arguments
// edi -- function
- Register registers[] = {edi};
+ // ebx -- allocation site with elements kind
+ Register registers[] = {edi, ebx, eax};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-
-void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
+void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // stack param count needs (constructor pointer, and single argument)
- Register registers[] = {edi, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ // register state
+ // eax -- number of arguments
+ // edi -- function
+ // ebx -- allocation site with elements kind
+ Register registers[] = {edi, ebx, eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void FastArrayPushDescriptor::InitializePlatformSpecific(
+void VarArgFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (arg count)
Register registers[] = {eax};
@@ -322,6 +303,22 @@ void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
+void BinaryOpWithVectorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // register state
+ // edx -- lhs
+ // eax -- rhs
+ // edi -- slot id
+ // ebx -- vector
+ Register registers[] = {edx, eax, edi, ebx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CountOpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void StringAddDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -382,8 +379,8 @@ void ApiCallbackDescriptorBase::InitializePlatformSpecific(
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
- kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister };
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -418,6 +415,16 @@ void InterpreterCEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void ResumeGeneratorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ eax, // the value to pass to the generator
+ ebx, // the JSGeneratorObject to resume
+ edx // the resume mode (tagged)
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index f9fd8d6a40..83c7ce8917 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -1025,6 +1025,16 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
}
+void MacroAssembler::AssertGeneratorObject(Register object) {
+ if (emit_debug_code()) {
+ test(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsASmiAndNotAGeneratorObject);
+ Push(object);
+ CmpObjectType(object, JS_GENERATOR_OBJECT_TYPE, object);
+ Pop(object);
+ Check(equal, kOperandIsNotAGeneratorObject);
+ }
+}
void MacroAssembler::AssertReceiver(Register object) {
if (emit_debug_code()) {
@@ -1085,8 +1095,8 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
mov(vector, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- mov(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset));
- mov(vector, FieldOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+ mov(vector, FieldOperand(vector, JSFunction::kLiteralsOffset));
+ mov(vector, FieldOperand(vector, LiteralsArray::kFeedbackVectorOffset));
}
@@ -1120,8 +1130,27 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
leave();
}
+void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
+ Register argc) {
+ Push(ebp);
+ Move(ebp, esp);
+ Push(context);
+ Push(target);
+ Push(argc);
+}
+
+void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
+ Register argc) {
+ Pop(argc);
+ Pop(target);
+ Pop(context);
+ leave();
+}
+
+void MacroAssembler::EnterExitFramePrologue(StackFrame::Type frame_type) {
+ DCHECK(frame_type == StackFrame::EXIT ||
+ frame_type == StackFrame::BUILTIN_EXIT);
-void MacroAssembler::EnterExitFramePrologue() {
// Set up the frame structure on the stack.
DCHECK_EQ(+2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
DCHECK_EQ(+1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
@@ -1130,7 +1159,7 @@ void MacroAssembler::EnterExitFramePrologue() {
mov(ebp, esp);
// Reserve room for entry stack pointer and push the code object.
- push(Immediate(Smi::FromInt(StackFrame::EXIT)));
+ push(Immediate(Smi::FromInt(frame_type)));
DCHECK_EQ(-2 * kPointerSize, ExitFrameConstants::kSPOffset);
push(Immediate(0)); // Saved entry sp, patched before call.
DCHECK_EQ(-3 * kPointerSize, ExitFrameConstants::kCodeOffset);
@@ -1172,9 +1201,9 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp);
}
-
-void MacroAssembler::EnterExitFrame(int argc, bool save_doubles) {
- EnterExitFramePrologue();
+void MacroAssembler::EnterExitFrame(int argc, bool save_doubles,
+ StackFrame::Type frame_type) {
+ EnterExitFramePrologue(frame_type);
// Set up argc and argv in callee-saved registers.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
@@ -1187,7 +1216,7 @@ void MacroAssembler::EnterExitFrame(int argc, bool save_doubles) {
void MacroAssembler::EnterApiExitFrame(int argc) {
- EnterExitFramePrologue();
+ EnterExitFramePrologue(StackFrame::EXIT);
EnterExitFrameEpilogue(argc, false);
}
@@ -1517,6 +1546,7 @@ void MacroAssembler::Allocate(int object_size,
AllocationFlags flags) {
DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1558,26 +1588,23 @@ void MacroAssembler::Allocate(int object_size,
// Calculate new top and bail out if space is exhausted.
Register top_reg = result_end.is_valid() ? result_end : result;
+
if (!top_reg.is(result)) {
mov(top_reg, result);
}
add(top_reg, Immediate(object_size));
- j(carry, gc_required);
cmp(top_reg, Operand::StaticVariable(allocation_limit));
j(above, gc_required);
- // Update allocation top.
- UpdateAllocationTopHelper(top_reg, scratch, flags);
+ if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+ // The top pointer is not updated for allocation folding dominators.
+ UpdateAllocationTopHelper(top_reg, scratch, flags);
+ }
- // Tag result if requested.
- bool tag_result = (flags & TAG_OBJECT) != 0;
if (top_reg.is(result)) {
- if (tag_result) {
- sub(result, Immediate(object_size - kHeapObjectTag));
- } else {
- sub(result, Immediate(object_size));
- }
- } else if (tag_result) {
+ sub(result, Immediate(object_size - kHeapObjectTag));
+ } else {
+ // Tag the result.
DCHECK(kHeapObjectTag == 1);
inc(result);
}
@@ -1594,6 +1621,8 @@ void MacroAssembler::Allocate(int header_size,
Label* gc_required,
AllocationFlags flags) {
DCHECK((flags & SIZE_IN_WORDS) == 0);
+ DCHECK((flags & ALLOCATION_FOLDING_DOMINATOR) == 0);
+ DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1645,18 +1674,16 @@ void MacroAssembler::Allocate(int header_size,
} else {
DCHECK(element_count_type == REGISTER_VALUE_IS_INT32);
}
+
lea(result_end, Operand(element_count, element_size, header_size));
add(result_end, result);
- j(carry, gc_required);
cmp(result_end, Operand::StaticVariable(allocation_limit));
j(above, gc_required);
- if ((flags & TAG_OBJECT) != 0) {
- DCHECK(kHeapObjectTag == 1);
- inc(result);
- }
+ // Tag result.
+ DCHECK(kHeapObjectTag == 1);
+ inc(result);
- // Update allocation top.
UpdateAllocationTopHelper(result_end, scratch, flags);
}
@@ -1668,6 +1695,7 @@ void MacroAssembler::Allocate(Register object_size,
Label* gc_required,
AllocationFlags flags) {
DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
+ DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1711,18 +1739,65 @@ void MacroAssembler::Allocate(Register object_size,
mov(result_end, object_size);
}
add(result_end, result);
- j(carry, gc_required);
cmp(result_end, Operand::StaticVariable(allocation_limit));
j(above, gc_required);
- // Tag result if requested.
- if ((flags & TAG_OBJECT) != 0) {
- DCHECK(kHeapObjectTag == 1);
- inc(result);
+ // Tag result.
+ DCHECK(kHeapObjectTag == 1);
+ inc(result);
+
+ if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+ // The top pointer is not updated for allocation folding dominators.
+ UpdateAllocationTopHelper(result_end, scratch, flags);
}
+}
- // Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch, flags);
+void MacroAssembler::FastAllocate(int object_size, Register result,
+ Register result_end, AllocationFlags flags) {
+ DCHECK(!result.is(result_end));
+ // Load address of new object into result.
+ LoadAllocationTopHelper(result, no_reg, flags);
+
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+ Label aligned;
+ test(result, Immediate(kDoubleAlignmentMask));
+ j(zero, &aligned, Label::kNear);
+ mov(Operand(result, 0),
+ Immediate(isolate()->factory()->one_pointer_filler_map()));
+ add(result, Immediate(kDoubleSize / 2));
+ bind(&aligned);
+ }
+
+ lea(result_end, Operand(result, object_size));
+ UpdateAllocationTopHelper(result_end, no_reg, flags);
+
+ DCHECK(kHeapObjectTag == 1);
+ inc(result);
+}
+
+void MacroAssembler::FastAllocate(Register object_size, Register result,
+ Register result_end, AllocationFlags flags) {
+ DCHECK(!result.is(result_end));
+ // Load address of new object into result.
+ LoadAllocationTopHelper(result, no_reg, flags);
+
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+ Label aligned;
+ test(result, Immediate(kDoubleAlignmentMask));
+ j(zero, &aligned, Label::kNear);
+ mov(Operand(result, 0),
+ Immediate(isolate()->factory()->one_pointer_filler_map()));
+ add(result, Immediate(kDoubleSize / 2));
+ bind(&aligned);
+ }
+
+ lea(result_end, Operand(result, object_size, times_1, 0));
+ UpdateAllocationTopHelper(result_end, no_reg, flags);
+
+ DCHECK(kHeapObjectTag == 1);
+ inc(result);
}
@@ -1733,7 +1808,7 @@ void MacroAssembler::AllocateHeapNumber(Register result,
MutableMode mode) {
// Allocate heap number in new space.
Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
Handle<Map> map = mode == MUTABLE
? isolate()->factory()->mutable_heap_number_map()
@@ -1759,15 +1834,9 @@ void MacroAssembler::AllocateTwoByteString(Register result,
and_(scratch1, Immediate(~kObjectAlignmentMask));
// Allocate two byte string in new space.
- Allocate(SeqTwoByteString::kHeaderSize,
- times_1,
- scratch1,
- REGISTER_VALUE_IS_INT32,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(SeqTwoByteString::kHeaderSize, times_1, scratch1,
+ REGISTER_VALUE_IS_INT32, result, scratch2, scratch3, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1793,15 +1862,9 @@ void MacroAssembler::AllocateOneByteString(Register result, Register length,
and_(scratch1, Immediate(~kObjectAlignmentMask));
// Allocate one-byte string in new space.
- Allocate(SeqOneByteString::kHeaderSize,
- times_1,
- scratch1,
- REGISTER_VALUE_IS_INT32,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(SeqOneByteString::kHeaderSize, times_1, scratch1,
+ REGISTER_VALUE_IS_INT32, result, scratch2, scratch3, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1821,7 +1884,7 @@ void MacroAssembler::AllocateOneByteString(Register result, int length,
// Allocate one-byte string in new space.
Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2,
- gc_required, TAG_OBJECT);
+ gc_required, NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1839,7 +1902,7 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
Label* gc_required) {
// Allocate heap number in new space.
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1851,12 +1914,8 @@ void MacroAssembler::AllocateOneByteConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
- Allocate(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1870,7 +1929,7 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
Label* gc_required) {
// Allocate heap number in new space.
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1884,7 +1943,7 @@ void MacroAssembler::AllocateOneByteSlicedString(Register result,
Label* gc_required) {
// Allocate heap number in new space.
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1900,7 +1959,8 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
DCHECK(!result.is(value));
// Allocate JSValue in new space.
- Allocate(JSValue::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
+ Allocate(JSValue::kSize, result, scratch, no_reg, gc_required,
+ NO_ALLOCATION_FLAGS);
// Initialize the JSValue.
LoadGlobalFunctionInitialMap(constructor, scratch);
@@ -2160,11 +2220,12 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
JumpToExternalReference(ExternalReference(fid, isolate()));
}
-
-void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
+void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
+ bool builtin_exit_frame) {
// Set the entry point and jump to the C entry runtime stub.
mov(ebx, Immediate(ext));
- CEntryStub ces(isolate(), 1);
+ CEntryStub ces(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
+ builtin_exit_frame);
jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
}
@@ -2321,10 +2382,11 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
Label skip_flooding;
- ExternalReference step_in_enabled =
- ExternalReference::debug_step_in_enabled_address(isolate());
- cmpb(Operand::StaticVariable(step_in_enabled), Immediate(0));
- j(equal, &skip_flooding);
+ ExternalReference last_step_action =
+ ExternalReference::debug_last_step_action_address(isolate());
+ STATIC_ASSERT(StepFrame > StepIn);
+ cmpb(Operand::StaticVariable(last_step_action), Immediate(StepIn));
+ j(less, &skip_flooding);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -2553,37 +2615,15 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
void MacroAssembler::LoadHeapObject(Register result,
Handle<HeapObject> object) {
- AllowDeferredHandleDereference embedding_raw_address;
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<Cell> cell = isolate()->factory()->NewCell(object);
- mov(result, Operand::ForCell(cell));
- } else {
- mov(result, object);
- }
+ mov(result, object);
}
void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) {
- AllowDeferredHandleDereference using_raw_address;
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<Cell> cell = isolate()->factory()->NewCell(object);
- cmp(reg, Operand::ForCell(cell));
- } else {
- cmp(reg, object);
- }
-}
-
-
-void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
- AllowDeferredHandleDereference using_raw_address;
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<Cell> cell = isolate()->factory()->NewCell(object);
- push(Operand::ForCell(cell));
- } else {
- Push(object);
- }
+ cmp(reg, object);
}
+void MacroAssembler::PushHeapObject(Handle<HeapObject> object) { Push(object); }
void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
Register scratch) {
@@ -2637,7 +2677,7 @@ void MacroAssembler::Move(Register dst, Register src) {
void MacroAssembler::Move(Register dst, const Immediate& x) {
- if (x.is_zero()) {
+ if (x.is_zero() && RelocInfo::IsNone(x.rmode_)) {
xor_(dst, dst); // Shorter than mov of 32-bit immediate 0.
} else {
mov(dst, x);
@@ -2912,15 +2952,19 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(reason))));
+ // Check if Abort() has already been initialized.
+ DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
+
+ Move(edx, Smi::FromInt(static_cast<int>(reason)));
+
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort);
+ Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
} else {
- CallRuntime(Runtime::kAbort);
+ Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
}
// will not return here
int3();
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index be11f66202..08cc7ceb64 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -19,18 +19,16 @@ const Register kReturnRegister1 = {Register::kCode_edx};
const Register kReturnRegister2 = {Register::kCode_edi};
const Register kJSFunctionRegister = {Register::kCode_edi};
const Register kContextRegister = {Register::kCode_esi};
+const Register kAllocateSizeRegister = {Register::kCode_edx};
const Register kInterpreterAccumulatorRegister = {Register::kCode_eax};
-const Register kInterpreterRegisterFileRegister = {Register::kCode_edx};
const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_ecx};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_edi};
+const Register kInterpreterDispatchTableRegister = {Register::kCode_esi};
const Register kJavaScriptCallArgCountRegister = {Register::kCode_eax};
const Register kJavaScriptCallNewTargetRegister = {Register::kCode_edx};
const Register kRuntimeCallFunctionRegister = {Register::kCode_ebx};
const Register kRuntimeCallArgCountRegister = {Register::kCode_eax};
-// Spill slots used by interpreter dispatch calling convention.
-const int kInterpreterDispatchTableSpillSlot = -1;
-
// Convenience for platform-independent signatures. We do not normally
// distinguish memory operands from other operands on ia32.
typedef Operand MemOperand;
@@ -243,7 +241,7 @@ class MacroAssembler: public Assembler {
// arguments in register eax and sets up the number of arguments in
// register edi and the pointer to the first argument in register
// esi.
- void EnterExitFrame(int argc, bool save_doubles);
+ void EnterExitFrame(int argc, bool save_doubles, StackFrame::Type frame_type);
void EnterApiExitFrame(int argc);
@@ -511,6 +509,23 @@ class MacroAssembler: public Assembler {
j(not_zero, not_smi_label, distance);
}
+ // Jump if the value cannot be represented by a smi.
+ inline void JumpIfNotValidSmiValue(Register value, Register scratch,
+ Label* on_invalid,
+ Label::Distance distance = Label::kFar) {
+ mov(scratch, value);
+ add(scratch, Immediate(0x40000000U));
+ j(sign, on_invalid, distance);
+ }
+
+ // Jump if the unsigned integer value cannot be represented by a smi.
+ inline void JumpIfUIntNotValidSmiValue(
+ Register value, Label* on_invalid,
+ Label::Distance distance = Label::kFar) {
+ cmp(value, Immediate(0x40000000U));
+ j(above_equal, on_invalid, distance);
+ }
+
void LoadInstanceDescriptors(Register map, Register descriptors);
void EnumLength(Register dst, Register map);
void NumberOfOwnDescriptors(Register dst, Register map);
@@ -566,6 +581,10 @@ class MacroAssembler: public Assembler {
// enabled via --debug-code.
void AssertBoundFunction(Register object);
+ // Abort execution if argument is not a JSGeneratorObject,
+ // enabled via --debug-code.
+ void AssertGeneratorObject(Register object);
+
// Abort execution if argument is not a JSReceiver, enabled via --debug-code.
void AssertReceiver(Register object);
@@ -621,6 +640,14 @@ class MacroAssembler: public Assembler {
void Allocate(Register object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
+ // FastAllocate is right now only used for folded allocations. It just
+ // increments the top pointer without checking against limit. This can only
+ // be done if it was proved earlier that the allocation will succeed.
+ void FastAllocate(int object_size, Register result, Register result_end,
+ AllocationFlags flags);
+ void FastAllocate(Register object_size, Register result, Register result_end,
+ AllocationFlags flags);
+
// Allocate a heap number in new space with undefined value. The
// register scratch2 can be passed as no_reg; the others must be
// valid registers. Returns tagged pointer in result register, or
@@ -759,7 +786,8 @@ class MacroAssembler: public Assembler {
void CallCFunction(Register function, int num_arguments);
// Jump to a runtime routine.
- void JumpToExternalReference(const ExternalReference& ext);
+ void JumpToExternalReference(const ExternalReference& ext,
+ bool builtin_exit_frame = false);
// ---------------------------------------------------------------------------
// Utilities
@@ -904,6 +932,9 @@ class MacroAssembler: public Assembler {
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
void LeaveFrame(StackFrame::Type type);
+ void EnterBuiltinFrame(Register context, Register target, Register argc);
+ void LeaveBuiltinFrame(Register context, Register target, Register argc);
+
// Expects object in eax and returns map with validated enum cache
// in eax. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Label* call_runtime);
@@ -945,7 +976,7 @@ class MacroAssembler: public Assembler {
Label::Distance done_distance,
const CallWrapper& call_wrapper);
- void EnterExitFramePrologue();
+ void EnterExitFramePrologue(StackFrame::Type frame_type);
void EnterExitFrameEpilogue(int argc, bool save_doubles);
void LeaveExitFrameEpilogue(bool restore_context);
@@ -1029,26 +1060,7 @@ inline Operand NativeContextOperand() {
return ContextOperand(esi, Context::NATIVE_CONTEXT_INDEX);
}
-#ifdef GENERATED_CODE_COVERAGE
-extern void LogGeneratedCodeCoverage(const char* file_line);
-#define CODE_COVERAGE_STRINGIFY(x) #x
-#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
-#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
-#define ACCESS_MASM(masm) { \
- byte* ia32_coverage_function = \
- reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
- masm->pushfd(); \
- masm->pushad(); \
- masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
- masm->call(ia32_coverage_function, RelocInfo::RUNTIME_ENTRY); \
- masm->pop(eax); \
- masm->popad(); \
- masm->popfd(); \
- } \
- masm->
-#else
#define ACCESS_MASM(masm) masm->
-#endif
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/access-compiler.cc b/deps/v8/src/ic/access-compiler.cc
index c99219201a..bb6b5e50d9 100644
--- a/deps/v8/src/ic/access-compiler.cc
+++ b/deps/v8/src/ic/access-compiler.cc
@@ -57,7 +57,7 @@ Register PropertyAccessCompiler::slot() const {
return LoadDescriptor::SlotRegister();
}
DCHECK(kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC);
- return VectorStoreICDescriptor::SlotRegister();
+ return StoreWithVectorDescriptor::SlotRegister();
}
@@ -66,7 +66,7 @@ Register PropertyAccessCompiler::vector() const {
return LoadWithVectorDescriptor::VectorRegister();
}
DCHECK(kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC);
- return VectorStoreICDescriptor::VectorRegister();
+ return StoreWithVectorDescriptor::VectorRegister();
}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/access-compiler.h b/deps/v8/src/ic/access-compiler.h
index 50c2cc7303..ecc5c08a59 100644
--- a/deps/v8/src/ic/access-compiler.h
+++ b/deps/v8/src/ic/access-compiler.h
@@ -58,7 +58,6 @@ class PropertyAccessCompiler BASE_EMBEDDED {
Register vector() const;
Register scratch1() const { return registers_[2]; }
Register scratch2() const { return registers_[3]; }
- Register scratch3() const { return registers_[4]; }
static Register* GetCallingConvention(Code::Kind);
static Register* load_calling_convention();
diff --git a/deps/v8/src/ic/arm/access-compiler-arm.cc b/deps/v8/src/ic/arm/access-compiler-arm.cc
index d360f5a62b..9ce485ed46 100644
--- a/deps/v8/src/ic/arm/access-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/access-compiler-arm.cc
@@ -19,19 +19,19 @@ void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ // receiver, name, scratch1, scratch2, scratch3.
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- static Register registers[] = {receiver, name, r3, r0, r4, r5};
+ static Register registers[] = {receiver, name, r3, r0, r4};
return registers;
}
Register* PropertyAccessCompiler::store_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
+ // receiver, name, scratch1, scratch2.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- static Register registers[] = {receiver, name, r3, r4, r5};
+ static Register registers[] = {receiver, name, r3, r4};
return registers;
}
diff --git a/deps/v8/src/ic/arm/handler-compiler-arm.cc b/deps/v8/src/ic/arm/handler-compiler-arm.cc
index a3f23d3f22..4ed765e73f 100644
--- a/deps/v8/src/ic/arm/handler-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/handler-compiler-arm.cc
@@ -203,9 +203,11 @@ void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
void PropertyHandlerCompiler::GenerateCheckPropertyCell(
MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
Register scratch, Label* miss) {
- Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
- DCHECK(cell->value()->IsTheHole());
- Handle<WeakCell> weak_cell = masm->isolate()->factory()->NewWeakCell(cell);
+ Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
+ global, name, PropertyCellType::kInvalidated);
+ Isolate* isolate = masm->isolate();
+ DCHECK(cell->value()->IsTheHole(isolate));
+ Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
__ LoadWeakValue(scratch, weak_cell, miss);
__ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
@@ -290,7 +292,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
bool call_data_undefined = false;
// Put call data in place.
- if (api_call_info->data()->IsUndefined()) {
+ if (api_call_info->data()->IsUndefined(isolate)) {
call_data_undefined = true;
__ LoadRoot(data, Heap::kUndefinedValueRootIndex);
} else {
@@ -332,17 +334,8 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
-}
-
-
-void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
- StoreIC_PushArgs(masm);
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kStoreIC_Slow);
+ StoreWithVectorDescriptor::SlotRegister(),
+ StoreWithVectorDescriptor::VectorRegister());
}
@@ -437,28 +430,25 @@ Register PropertyHandlerCompiler::CheckPrototypes(
DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
!scratch2.is(scratch1));
- if (FLAG_eliminate_prototype_chain_checks) {
- Handle<Cell> validity_cell =
- Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
- if (!validity_cell.is_null()) {
- DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid),
- validity_cell->value());
- __ mov(scratch1, Operand(validity_cell));
- __ ldr(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
- __ cmp(scratch1, Operand(Smi::FromInt(Map::kPrototypeChainValid)));
- __ b(ne, miss);
- }
+ Handle<Cell> validity_cell =
+ Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+ if (!validity_cell.is_null()) {
+ DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
+ __ mov(scratch1, Operand(validity_cell));
+ __ ldr(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
+ __ cmp(scratch1, Operand(Smi::FromInt(Map::kPrototypeChainValid)));
+ __ b(ne, miss);
+ }
- // The prototype chain of primitives (and their JSValue wrappers) depends
- // on the native context, which can't be guarded by validity cells.
- // |object_reg| holds the native context specific prototype in this case;
- // we need to check its map.
- if (check == CHECK_ALL_MAPS) {
- __ ldr(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- __ CmpWeakValue(scratch1, cell, scratch2);
- __ b(ne, miss);
- }
+ // The prototype chain of primitives (and their JSValue wrappers) depends
+ // on the native context, which can't be guarded by validity cells.
+ // |object_reg| holds the native context specific prototype in this case;
+ // we need to check its map.
+ if (check == CHECK_ALL_MAPS) {
+ __ ldr(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+ Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
+ __ CmpWeakValue(scratch1, cell, scratch2);
+ __ b(ne, miss);
}
// Keep track of the current object in register reg.
@@ -494,8 +484,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
!current_map->is_access_check_needed());
prototype = handle(JSObject::cast(current_map->prototype()));
- if (current_map->is_dictionary_map() &&
- !current_map->IsJSGlobalObjectMap()) {
+ if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+ name, scratch2, miss);
+ } else if (current_map->is_dictionary_map()) {
DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
if (!name->IsUniqueName()) {
DCHECK(name->IsString());
@@ -505,33 +497,12 @@ Register PropertyHandlerCompiler::CheckPrototypes(
current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
- if (FLAG_eliminate_prototype_chain_checks && depth > 1) {
+ if (depth > 1) {
// TODO(jkummerow): Cache and re-use weak cell.
__ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
}
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
scratch2);
- if (!FLAG_eliminate_prototype_chain_checks) {
- __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ ldr(holder_reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
- }
- } else {
- Register map_reg = scratch1;
- if (!FLAG_eliminate_prototype_chain_checks) {
- __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
- }
- if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
- name, scratch2, miss);
- } else if (!FLAG_eliminate_prototype_chain_checks &&
- (depth != 1 || check == CHECK_ALL_MAPS)) {
- Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
- __ CmpWeakValue(map_reg, cell, scratch2);
- __ b(ne, miss);
- }
- if (!FLAG_eliminate_prototype_chain_checks) {
- __ ldr(holder_reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
- }
}
reg = holder_reg; // From now on the object will be in holder_reg.
@@ -545,17 +516,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
- if (!FLAG_eliminate_prototype_chain_checks &&
- (depth != 0 || check == CHECK_ALL_MAPS)) {
- // Check the holder map.
- __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
- __ CmpWeakValue(scratch1, cell, scratch2);
- __ b(ne, miss);
- }
-
bool return_holder = return_what == RETURN_HOLDER;
- if (FLAG_eliminate_prototype_chain_checks && return_holder && depth != 0) {
+ if (return_holder && depth != 0) {
__ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
}
@@ -597,61 +559,10 @@ void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
__ Ret();
}
-
-void NamedLoadHandlerCompiler::GenerateLoadCallback(
- Register reg, Handle<AccessorInfo> callback) {
- DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), receiver()));
- DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
-
- // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
- // name below the exit frame to make GC aware of them.
- STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
-
- __ push(receiver());
- // Push data from AccessorInfo.
- Handle<Object> data(callback->data(), isolate());
- if (data->IsUndefined() || data->IsSmi()) {
- __ Move(scratch2(), data);
- } else {
- Handle<WeakCell> cell =
- isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
- // The callback is alive if this instruction is executed,
- // so the weak cell is not cleared and points to data.
- __ GetWeakValue(scratch2(), cell);
- }
- __ push(scratch2());
- __ LoadRoot(scratch2(), Heap::kUndefinedValueRootIndex);
- __ Push(scratch2(), scratch2());
- __ mov(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
- __ Push(scratch2(), reg);
- __ Push(Smi::FromInt(0)); // should_throw_on_error -> false
- __ push(name());
-
- // Abi for CallApiGetter
- Register getter_address_reg = ApiGetterDescriptor::function_address();
-
- Address getter_address = v8::ToCData<Address>(callback->getter());
- ApiFunction fun(getter_address);
- ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
- ExternalReference ref = ExternalReference(&fun, type, isolate());
- __ mov(getter_address_reg, Operand(ref));
-
- CallApiGetterStub stub(isolate());
- __ TailCallStub(&stub);
-}
-
-
void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
LookupIterator* it, Register holder_reg) {
DCHECK(holder()->HasNamedInterceptor());
- DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
// Compile the interceptor call, followed by inline code to load the
// property from further up the prototype chain if the call fails.
@@ -711,7 +622,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
// Call the runtime system to load the interceptor.
DCHECK(holder()->HasNamedInterceptor());
- DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
holder());
@@ -729,7 +640,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
// If the callback cannot leak, then push the callback directly,
// otherwise wrap it in a weak cell.
- if (callback->data()->IsUndefined() || callback->data()->IsSmi()) {
+ if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
__ mov(ip, Operand(callback));
} else {
Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
@@ -744,7 +655,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
- return GetCode(kind(), Code::FAST, name);
+ return GetCode(kind(), name);
}
@@ -784,7 +695,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
FrontendFooter(name, &miss);
// Return the generated code.
- return GetCode(kind(), Code::NORMAL, name);
+ return GetCode(kind(), name);
}
diff --git a/deps/v8/src/ic/arm/ic-arm.cc b/deps/v8/src/ic/arm/ic-arm.cc
index 14ed8b41a5..fee6ebf259 100644
--- a/deps/v8/src/ic/arm/ic-arm.cc
+++ b/deps/v8/src/ic/arm/ic-arm.cc
@@ -415,10 +415,8 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ mov(slot, Operand(Smi::FromInt(slot_index)));
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
- receiver, key, r4, r5, r6, r9);
+ masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, r4, r5,
+ r6, r9);
// Cache miss.
GenerateMiss(masm);
@@ -445,8 +443,8 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
+ StoreWithVectorDescriptor::SlotRegister(),
+ StoreWithVectorDescriptor::VectorRegister());
}
@@ -626,10 +624,10 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ JumpIfSmi(receiver, &slow);
// Get the map of the object.
__ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks and is not observed.
- // The generic stub does not perform map checks or handle observed objects.
+ // Check that the receiver does not require access checks.
+ // The generic stub does not perform map checks.
__ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
+ __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &slow);
// Check if the object is a JS array or not.
__ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
@@ -671,8 +669,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Register temporary2 = r8;
// The handlers in the stub cache expect a vector and slot. Since we won't
// change the IC from any downstream misses, a dummy vector can be used.
- Register vector = VectorStoreICDescriptor::VectorRegister();
- Register slot = VectorStoreICDescriptor::SlotRegister();
+ Register vector = StoreWithVectorDescriptor::VectorRegister();
+ Register slot = StoreWithVectorDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, r5, temporary2, r6, r9));
Handle<TypeFeedbackVector> dummy_vector =
@@ -682,10 +680,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ mov(slot, Operand(Smi::FromInt(slot_index)));
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::STORE_IC, flags, receiver, key, r5, temporary2, r6, r9);
+ masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, r5,
+ temporary2, r6, r9);
// Cache miss.
__ b(&miss);
@@ -734,26 +730,6 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
GenerateMiss(masm);
}
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- DCHECK(receiver.is(r1));
- DCHECK(name.is(r2));
- DCHECK(StoreDescriptor::ValueRegister().is(r0));
-
- // Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
-
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
- receiver, name, r3, r4, r5, r6);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
void StoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
@@ -771,8 +747,8 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
DCHECK(receiver.is(r1));
DCHECK(name.is(r2));
DCHECK(value.is(r0));
- DCHECK(VectorStoreICDescriptor::VectorRegister().is(r3));
- DCHECK(VectorStoreICDescriptor::SlotRegister().is(r4));
+ DCHECK(StoreWithVectorDescriptor::VectorRegister().is(r3));
+ DCHECK(StoreWithVectorDescriptor::SlotRegister().is(r4));
__ ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
@@ -845,8 +821,9 @@ void PatchInlinedSmiCode(Isolate* isolate, Address address,
}
if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, cmp=%p, delta=%d\n", address,
- cmp_instruction_address, delta);
+ PrintF("[ patching ic at %p, cmp=%p, delta=%d\n",
+ static_cast<void*>(address),
+ static_cast<void*>(cmp_instruction_address), delta);
}
Address patch_address =
diff --git a/deps/v8/src/ic/arm/stub-cache-arm.cc b/deps/v8/src/ic/arm/stub-cache-arm.cc
index 86710eb29a..b0f93e32dc 100644
--- a/deps/v8/src/ic/arm/stub-cache-arm.cc
+++ b/deps/v8/src/ic/arm/stub-cache-arm.cc
@@ -14,16 +14,15 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Kind ic_kind, Code::Flags flags,
+static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
StubCache::Table table, Register receiver, Register name,
- // Number of the cache entry, not scaled.
+ // The offset is scaled by 4, based on
+ // kCacheIndexShift, which is two bits
Register offset, Register scratch, Register scratch2,
Register offset_scratch) {
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
- ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+ ExternalReference key_offset(stub_cache->key_reference(table));
+ ExternalReference value_offset(stub_cache->value_reference(table));
+ ExternalReference map_offset(stub_cache->map_reference(table));
uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
@@ -45,8 +44,7 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
__ add(offset_scratch, offset, Operand(offset, LSL, 1));
// Calculate the base address of the entry.
- __ mov(base_addr, Operand(key_offset));
- __ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2));
+ __ add(base_addr, offset_scratch, Operand(key_offset));
// Check that the key in the entry matches the name.
__ ldr(ip, MemOperand(base_addr, 0));
@@ -64,18 +62,6 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
scratch2 = no_reg;
__ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr));
- // Check that the flags match what we're looking for.
- Register flags_reg = base_addr;
- base_addr = no_reg;
- __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
- // It's a nice optimization if this constant is encodable in the bic insn.
-
- uint32_t mask = Code::kFlagsNotUsedInLookup;
- DCHECK(__ ImmediateFitsAddrMode1Instruction(mask));
- __ bic(flags_reg, flags_reg, Operand(mask));
- __ cmp(flags_reg, Operand(flags));
- __ b(ne, &miss);
-
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ jmp(&miss);
@@ -91,21 +77,15 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
__ bind(&miss);
}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
- Code::Flags flags, Register receiver,
+void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
Register name, Register scratch, Register extra,
Register extra2, Register extra3) {
- Isolate* isolate = masm->isolate();
Label miss;
// Make sure that code is valid. The multiplying code relies on the
// entry size being 12.
DCHECK(sizeof(Entry) == 12);
- // Make sure the flags does not name a specific type.
- DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
-
// Make sure that there are no register conflicts.
DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
@@ -119,12 +99,13 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// If vector-based ics are in use, ensure that scratch, extra, extra2 and
// extra3 don't conflict with the vector and slot registers, which need
// to be preserved for a handler call or miss.
- if (IC::ICUseVector(ic_kind)) {
+ if (IC::ICUseVector(ic_kind_)) {
Register vector, slot;
- if (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC) {
- vector = VectorStoreICDescriptor::VectorRegister();
- slot = VectorStoreICDescriptor::SlotRegister();
+ if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) {
+ vector = StoreWithVectorDescriptor::VectorRegister();
+ slot = StoreWithVectorDescriptor::SlotRegister();
} else {
+ DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC);
vector = LoadWithVectorDescriptor::VectorRegister();
slot = LoadWithVectorDescriptor::SlotRegister();
}
@@ -143,29 +124,23 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
__ ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
__ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ add(scratch, scratch, Operand(ip));
- uint32_t mask = kPrimaryTableSize - 1;
- // We shift out the last two bits because they are not part of the hash and
- // they are always 01 for maps.
- __ mov(scratch, Operand(scratch, LSR, kCacheIndexShift));
- // Mask down the eor argument to the minimum to keep the immediate
- // ARM-encodable.
- __ eor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask));
- // Prefer and_ to ubfx here because ubfx takes 2 cycles.
- __ and_(scratch, scratch, Operand(mask));
+ __ eor(scratch, scratch, Operand(kPrimaryMagic));
+ __ mov(ip, Operand(kPrimaryTableSize - 1));
+ __ and_(scratch, scratch, Operand(ip, LSL, kCacheIndexShift));
// Probe the primary table.
- ProbeTable(isolate, masm, ic_kind, flags, kPrimary, receiver, name, scratch,
- extra, extra2, extra3);
+ ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
+ extra3);
// Primary miss: Compute hash for secondary probe.
- __ sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
- uint32_t mask2 = kSecondaryTableSize - 1;
- __ add(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2));
- __ and_(scratch, scratch, Operand(mask2));
+ __ sub(scratch, scratch, Operand(name));
+ __ add(scratch, scratch, Operand(kSecondaryMagic));
+ __ mov(ip, Operand(kSecondaryTableSize - 1));
+ __ and_(scratch, scratch, Operand(ip, LSL, kCacheIndexShift));
// Probe the secondary table.
- ProbeTable(isolate, masm, ic_kind, flags, kSecondary, receiver, name, scratch,
- extra, extra2, extra3);
+ ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
+ extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
diff --git a/deps/v8/src/ic/arm64/access-compiler-arm64.cc b/deps/v8/src/ic/arm64/access-compiler-arm64.cc
index 892ce85dfb..6273633822 100644
--- a/deps/v8/src/ic/arm64/access-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/access-compiler-arm64.cc
@@ -26,19 +26,19 @@ void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
// we use the same assignments as ARM to remain on the safe side.
Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ // receiver, name, scratch1, scratch2, scratch3.
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- static Register registers[] = {receiver, name, x3, x0, x4, x5};
+ static Register registers[] = {receiver, name, x3, x0, x4};
return registers;
}
Register* PropertyAccessCompiler::store_calling_convention() {
- // receiver, value, scratch1, scratch2, scratch3.
+ // receiver, value, scratch1, scratch2.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- static Register registers[] = {receiver, name, x3, x4, x5};
+ static Register registers[] = {receiver, name, x3, x4};
return registers;
}
diff --git a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
index a704492550..277b4e7117 100644
--- a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
@@ -109,9 +109,11 @@ void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
void PropertyHandlerCompiler::GenerateCheckPropertyCell(
MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
Register scratch, Label* miss) {
- Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
- DCHECK(cell->value()->IsTheHole());
- Handle<WeakCell> weak_cell = masm->isolate()->factory()->NewWeakCell(cell);
+ Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
+ global, name, PropertyCellType::kInvalidated);
+ Isolate* isolate = masm->isolate();
+ DCHECK(cell->value()->IsTheHole(isolate));
+ Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
__ LoadWeakValue(scratch, weak_cell, miss);
__ Ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, miss);
@@ -197,7 +199,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
bool call_data_undefined = false;
// Put call data in place.
- if (api_call_info->data()->IsUndefined()) {
+ if (api_call_info->data()->IsUndefined(isolate)) {
call_data_undefined = true;
__ LoadRoot(data, Heap::kUndefinedValueRootIndex);
} else {
@@ -325,17 +327,8 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
-}
-
-
-void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
- StoreIC_PushArgs(masm);
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kStoreIC_Slow);
+ StoreWithVectorDescriptor::SlotRegister(),
+ StoreWithVectorDescriptor::VectorRegister());
}
@@ -382,7 +375,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
FrontendFooter(name, &miss);
// Return the generated code.
- return GetCode(kind(), Code::NORMAL, name);
+ return GetCode(kind(), name);
}
@@ -467,28 +460,25 @@ Register PropertyHandlerCompiler::CheckPrototypes(
DCHECK(!AreAliased(object_reg, scratch1, scratch2));
DCHECK(!AreAliased(holder_reg, scratch1, scratch2));
- if (FLAG_eliminate_prototype_chain_checks) {
- Handle<Cell> validity_cell =
- Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
- if (!validity_cell.is_null()) {
- DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid),
- validity_cell->value());
- __ Mov(scratch1, Operand(validity_cell));
- __ Ldr(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
- __ Cmp(scratch1, Operand(Smi::FromInt(Map::kPrototypeChainValid)));
- __ B(ne, miss);
- }
+ Handle<Cell> validity_cell =
+ Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+ if (!validity_cell.is_null()) {
+ DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
+ __ Mov(scratch1, Operand(validity_cell));
+ __ Ldr(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
+ __ Cmp(scratch1, Operand(Smi::FromInt(Map::kPrototypeChainValid)));
+ __ B(ne, miss);
+ }
- // The prototype chain of primitives (and their JSValue wrappers) depends
- // on the native context, which can't be guarded by validity cells.
- // |object_reg| holds the native context specific prototype in this case;
- // we need to check its map.
- if (check == CHECK_ALL_MAPS) {
- __ Ldr(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- __ CmpWeakValue(scratch1, cell, scratch2);
- __ B(ne, miss);
- }
+ // The prototype chain of primitives (and their JSValue wrappers) depends
+ // on the native context, which can't be guarded by validity cells.
+ // |object_reg| holds the native context specific prototype in this case;
+ // we need to check its map.
+ if (check == CHECK_ALL_MAPS) {
+ __ Ldr(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+ Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
+ __ CmpWeakValue(scratch1, cell, scratch2);
+ __ B(ne, miss);
}
// Keep track of the current object in register reg.
@@ -525,8 +515,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
!current_map->is_access_check_needed());
prototype = handle(JSObject::cast(current_map->prototype()));
- if (current_map->is_dictionary_map() &&
- !current_map->IsJSGlobalObjectMap()) {
+ if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+ name, scratch2, miss);
+ } else if (current_map->is_dictionary_map()) {
DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
if (!name->IsUniqueName()) {
DCHECK(name->IsString());
@@ -535,34 +527,12 @@ Register PropertyHandlerCompiler::CheckPrototypes(
DCHECK(current.is_null() || (current->property_dictionary()->FindEntry(
name) == NameDictionary::kNotFound));
- if (FLAG_eliminate_prototype_chain_checks && depth > 1) {
+ if (depth > 1) {
// TODO(jkummerow): Cache and re-use weak cell.
__ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
}
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
scratch2);
-
- if (!FLAG_eliminate_prototype_chain_checks) {
- __ Ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ Ldr(holder_reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
- }
- } else {
- Register map_reg = scratch1;
- if (!FLAG_eliminate_prototype_chain_checks) {
- __ Ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
- }
- if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
- name, scratch2, miss);
- } else if (!FLAG_eliminate_prototype_chain_checks &&
- (depth != 1 || check == CHECK_ALL_MAPS)) {
- Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
- __ CmpWeakValue(map_reg, cell, scratch2);
- __ B(ne, miss);
- }
- if (!FLAG_eliminate_prototype_chain_checks) {
- __ Ldr(holder_reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
- }
}
reg = holder_reg; // From now on the object will be in holder_reg.
@@ -576,17 +546,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
- if (!FLAG_eliminate_prototype_chain_checks &&
- (depth != 0 || check == CHECK_ALL_MAPS)) {
- // Check the holder map.
- __ Ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
- __ CmpWeakValue(scratch1, cell, scratch2);
- __ B(ne, miss);
- }
-
bool return_holder = return_what == RETURN_HOLDER;
- if (FLAG_eliminate_prototype_chain_checks && return_holder && depth != 0) {
+ if (return_holder && depth != 0) {
__ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
}
@@ -632,62 +593,12 @@ void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
__ Ret();
}
-
-void NamedLoadHandlerCompiler::GenerateLoadCallback(
- Register reg, Handle<AccessorInfo> callback) {
- DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), receiver()));
- DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
-
- // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
- // name below the exit frame to make GC aware of them.
- STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
-
- __ Push(receiver());
-
- Handle<Object> data(callback->data(), isolate());
- if (data->IsUndefined() || data->IsSmi()) {
- __ Mov(scratch3(), Operand(data));
- } else {
- Handle<WeakCell> cell =
- isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
- // The callback is alive if this instruction is executed,
- // so the weak cell is not cleared and points to data.
- __ GetWeakValue(scratch3(), cell);
- }
- __ LoadRoot(scratch4(), Heap::kUndefinedValueRootIndex);
- __ Mov(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
- __ Push(scratch3(), scratch4(), scratch4(), scratch2(), reg);
- __ Push(Smi::FromInt(0)); // should_throw_on_error -> false
- __ Push(name());
-
- // Abi for CallApiGetter.
- Register getter_address_reg = x2;
-
- // Set up the call.
- Address getter_address = v8::ToCData<Address>(callback->getter());
- ApiFunction fun(getter_address);
- ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
- ExternalReference ref = ExternalReference(&fun, type, isolate());
- __ Mov(getter_address_reg, ref);
-
- CallApiGetterStub stub(isolate());
- __ TailCallStub(&stub);
-}
-
-
void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
LookupIterator* it, Register holder_reg) {
DCHECK(!AreAliased(receiver(), this->name(), scratch1(), scratch2(),
scratch3()));
DCHECK(holder()->HasNamedInterceptor());
- DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
// Compile the interceptor call, followed by inline code to load the
// property from further up the prototype chain if the call fails.
@@ -746,7 +657,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
// Call the runtime system to load the interceptor.
DCHECK(holder()->HasNamedInterceptor());
- DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
holder());
@@ -768,7 +679,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
DCHECK(!AreAliased(holder_reg, scratch1(), scratch2(), value()));
// If the callback cannot leak, then push the callback directly,
// otherwise wrap it in a weak cell.
- if (callback->data()->IsUndefined() || callback->data()->IsSmi()) {
+ if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
__ Mov(scratch1(), Operand(callback));
} else {
Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
@@ -782,7 +693,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
- return GetCode(kind(), Code::FAST, name);
+ return GetCode(kind(), name);
}
diff --git a/deps/v8/src/ic/arm64/ic-arm64.cc b/deps/v8/src/ic/arm64/ic-arm64.cc
index 726a68e45f..9d66eb2495 100644
--- a/deps/v8/src/ic/arm64/ic-arm64.cc
+++ b/deps/v8/src/ic/arm64/ic-arm64.cc
@@ -394,11 +394,8 @@ static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, Register key,
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ Mov(slot, Operand(Smi::FromInt(slot_index)));
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
- receiver, key, scratch1,
- scratch2, scratch3, scratch4);
+ masm->isolate()->load_stub_cache()->GenerateProbe(
+ masm, receiver, key, scratch1, scratch2, scratch3, scratch4);
// Cache miss.
KeyedLoadIC::GenerateMiss(masm);
@@ -450,8 +447,8 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
+ StoreWithVectorDescriptor::SlotRegister(),
+ StoreWithVectorDescriptor::VectorRegister());
}
@@ -622,11 +619,10 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ JumpIfSmi(receiver, &slow);
__ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks and is not observed.
- // The generic stub does not perform map checks or handle observed objects.
+ // Check that the receiver does not require access checks.
+ // The generic stub does not perform map checks.
__ Ldrb(x10, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
- __ TestAndBranchIfAnySet(
- x10, (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kIsObserved), &slow);
+ __ TestAndBranchIfAnySet(x10, (1 << Map::kIsAccessCheckNeeded), &slow);
// Check if the object is a JS array or not.
Register instance_type = x10;
@@ -663,8 +659,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
// The handlers in the stub cache expect a vector and slot. Since we won't
// change the IC from any downstream misses, a dummy vector can be used.
- Register vector = VectorStoreICDescriptor::VectorRegister();
- Register slot = VectorStoreICDescriptor::SlotRegister();
+ Register vector = StoreWithVectorDescriptor::VectorRegister();
+ Register slot = StoreWithVectorDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, x5, x6, x7, x8));
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
@@ -673,10 +669,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ Mov(slot, Operand(Smi::FromInt(slot_index)));
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
- receiver, key, x5, x6, x7, x8);
+ masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, x5,
+ x6, x7, x8);
// Cache miss.
__ B(&miss);
@@ -725,24 +719,6 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
GenerateMiss(masm);
}
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- DCHECK(!AreAliased(receiver, name, StoreDescriptor::ValueRegister(), x3, x4,
- x5, x6));
-
- // Probe the stub cache.
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
- receiver, name, x3, x4, x5, x6);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
void StoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
@@ -758,8 +734,8 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
Register name = StoreDescriptor::NameRegister();
Register dictionary = x5;
DCHECK(!AreAliased(value, receiver, name,
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister(), x5, x6, x7));
+ StoreWithVectorDescriptor::SlotRegister(),
+ StoreWithVectorDescriptor::VectorRegister(), x5, x6, x7));
__ Ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
@@ -822,8 +798,9 @@ void PatchInlinedSmiCode(Isolate* isolate, Address address,
}
if (FLAG_trace_ic) {
- PrintF("[ Patching ic at %p, marker=%p, SMI check=%p\n", address,
- info_address, reinterpret_cast<void*>(info.SmiCheck()));
+ PrintF("[ Patching ic at %p, marker=%p, SMI check=%p\n",
+ static_cast<void*>(address), static_cast<void*>(info_address),
+ static_cast<void*>(info.SmiCheck()));
}
// Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi()
diff --git a/deps/v8/src/ic/arm64/stub-cache-arm64.cc b/deps/v8/src/ic/arm64/stub-cache-arm64.cc
index eb82f2af86..81c820725a 100644
--- a/deps/v8/src/ic/arm64/stub-cache-arm64.cc
+++ b/deps/v8/src/ic/arm64/stub-cache-arm64.cc
@@ -22,18 +22,19 @@ namespace internal {
// If there is a miss the code fall trough.
//
// 'receiver', 'name' and 'offset' registers are preserved on miss.
-static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Kind ic_kind, Code::Flags flags,
+static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
StubCache::Table table, Register receiver, Register name,
+ // The offset is scaled by 4, based on
+ // kCacheIndexShift, which is two bits
Register offset, Register scratch, Register scratch2,
Register scratch3) {
// Some code below relies on the fact that the Entry struct contains
// 3 pointers (name, code, map).
STATIC_ASSERT(sizeof(StubCache::Entry) == (3 * kPointerSize));
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
- ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+ ExternalReference key_offset(stub_cache->key_reference(table));
+ ExternalReference value_offset(stub_cache->value_reference(table));
+ ExternalReference map_offset(stub_cache->map_reference(table));
uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
uintptr_t value_off_addr =
@@ -49,7 +50,9 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
// Calculate the base address of the entry.
__ Mov(scratch, key_offset);
- __ Add(scratch, scratch, Operand(scratch3, LSL, kPointerSizeLog2));
+ __ Add(
+ scratch, scratch,
+ Operand(scratch3, LSL, kPointerSizeLog2 - StubCache::kCacheIndexShift));
// Check that the key in the entry matches the name.
__ Ldr(scratch2, MemOperand(scratch));
@@ -65,12 +68,6 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
// Get the code entry from the cache.
__ Ldr(scratch, MemOperand(scratch, value_off_addr - key_off_addr));
- // Check that the flags match what we're looking for.
- __ Ldr(scratch2.W(), FieldMemOperand(scratch, Code::kFlagsOffset));
- __ Bic(scratch2.W(), scratch2.W(), Code::kFlagsNotUsedInLookup);
- __ Cmp(scratch2.W(), flags);
- __ B(ne, &miss);
-
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ B(&miss);
@@ -87,17 +84,11 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
__ Bind(&miss);
}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
- Code::Flags flags, Register receiver,
+void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
Register name, Register scratch, Register extra,
Register extra2, Register extra3) {
- Isolate* isolate = masm->isolate();
Label miss;
- // Make sure the flags does not name a specific type.
- DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
-
// Make sure that there are no register conflicts.
DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
@@ -110,12 +101,13 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// If vector-based ics are in use, ensure that scratch, extra, extra2 and
// extra3 don't conflict with the vector and slot registers, which need
// to be preserved for a handler call or miss.
- if (IC::ICUseVector(ic_kind)) {
+ if (IC::ICUseVector(ic_kind_)) {
Register vector, slot;
- if (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC) {
- vector = VectorStoreICDescriptor::VectorRegister();
- slot = VectorStoreICDescriptor::SlotRegister();
+ if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) {
+ vector = StoreWithVectorDescriptor::VectorRegister();
+ slot = StoreWithVectorDescriptor::SlotRegister();
} else {
+ DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC);
vector = LoadWithVectorDescriptor::VectorRegister();
slot = LoadWithVectorDescriptor::SlotRegister();
}
@@ -131,26 +123,26 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
__ JumpIfSmi(receiver, &miss);
// Compute the hash for primary table.
- __ Ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
+ __ Ldr(scratch.W(), FieldMemOperand(name, Name::kHashFieldOffset));
__ Ldr(extra, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Add(scratch, scratch, extra);
- __ Eor(scratch, scratch, flags);
- // We shift out the last two bits because they are not part of the hash.
- __ Ubfx(scratch, scratch, kCacheIndexShift,
- CountTrailingZeros(kPrimaryTableSize, 64));
+ __ Eor(scratch, scratch, kPrimaryMagic);
+ __ And(scratch, scratch,
+ Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
// Probe the primary table.
- ProbeTable(isolate, masm, ic_kind, flags, kPrimary, receiver, name, scratch,
- extra, extra2, extra3);
+ ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
+ extra3);
// Primary miss: Compute hash for secondary table.
- __ Sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
- __ Add(scratch, scratch, flags >> kCacheIndexShift);
- __ And(scratch, scratch, kSecondaryTableSize - 1);
+ __ Sub(scratch, scratch, Operand(name));
+ __ Add(scratch, scratch, Operand(kSecondaryMagic));
+ __ And(scratch, scratch,
+ Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
// Probe the secondary table.
- ProbeTable(isolate, masm, ic_kind, flags, kSecondary, receiver, name, scratch,
- extra, extra2, extra3);
+ ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
+ extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
diff --git a/deps/v8/src/ic/call-optimization.cc b/deps/v8/src/ic/call-optimization.cc
index 571b614dde..f7a1f6982f 100644
--- a/deps/v8/src/ic/call-optimization.cc
+++ b/deps/v8/src/ic/call-optimization.cc
@@ -89,11 +89,12 @@ bool CallOptimization::IsCompatibleReceiverMap(Handle<Map> map,
void CallOptimization::Initialize(
Handle<FunctionTemplateInfo> function_template_info) {
- if (function_template_info->call_code()->IsUndefined()) return;
+ Isolate* isolate = function_template_info->GetIsolate();
+ if (function_template_info->call_code()->IsUndefined(isolate)) return;
api_call_info_ =
handle(CallHandlerInfo::cast(function_template_info->call_code()));
- if (!function_template_info->signature()->IsUndefined()) {
+ if (!function_template_info->signature()->IsUndefined(isolate)) {
expected_receiver_type_ =
handle(FunctionTemplateInfo::cast(function_template_info->signature()));
}
@@ -110,15 +111,17 @@ void CallOptimization::Initialize(Handle<JSFunction> function) {
void CallOptimization::AnalyzePossibleApiFunction(Handle<JSFunction> function) {
if (!function->shared()->IsApiFunction()) return;
- Handle<FunctionTemplateInfo> info(function->shared()->get_api_func_data());
+ Isolate* isolate = function->GetIsolate();
+ Handle<FunctionTemplateInfo> info(function->shared()->get_api_func_data(),
+ isolate);
// Require a C++ callback.
- if (info->call_code()->IsUndefined()) return;
- api_call_info_ = handle(CallHandlerInfo::cast(info->call_code()));
+ if (info->call_code()->IsUndefined(isolate)) return;
+ api_call_info_ = handle(CallHandlerInfo::cast(info->call_code()), isolate);
- if (!info->signature()->IsUndefined()) {
+ if (!info->signature()->IsUndefined(isolate)) {
expected_receiver_type_ =
- handle(FunctionTemplateInfo::cast(info->signature()));
+ handle(FunctionTemplateInfo::cast(info->signature()), isolate);
}
is_simple_api_call_ = true;
diff --git a/deps/v8/src/ic/handler-compiler.cc b/deps/v8/src/ic/handler-compiler.cc
index 714888c8b3..b6b81def54 100644
--- a/deps/v8/src/ic/handler-compiler.cc
+++ b/deps/v8/src/ic/handler-compiler.cc
@@ -6,31 +6,29 @@
#include "src/field-type.h"
#include "src/ic/call-optimization.h"
+#include "src/ic/handler-configuration.h"
#include "src/ic/ic-inl.h"
#include "src/ic/ic.h"
#include "src/isolate-inl.h"
-#include "src/profiler/cpu-profiler.h"
namespace v8 {
namespace internal {
-
Handle<Code> PropertyHandlerCompiler::Find(Handle<Name> name,
Handle<Map> stub_holder,
Code::Kind kind,
- CacheHolderFlag cache_holder,
- Code::StubType type) {
- Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder);
- Object* probe = stub_holder->FindInCodeCache(*name, flags);
- if (probe->IsCode()) return handle(Code::cast(probe));
- return Handle<Code>::null();
+ CacheHolderFlag cache_holder) {
+ Code::Flags flags = Code::ComputeHandlerFlags(kind, cache_holder);
+ Code* code = stub_holder->LookupInCodeCache(*name, flags);
+ if (code == nullptr) return Handle<Code>();
+ return handle(code);
}
Handle<Code> NamedLoadHandlerCompiler::ComputeLoadNonexistent(
Handle<Name> name, Handle<Map> receiver_map) {
Isolate* isolate = name->GetIsolate();
- if (receiver_map->prototype()->IsNull()) {
+ if (receiver_map->prototype()->IsNull(isolate)) {
// TODO(jkummerow/verwaest): If there is no prototype and the property
// is nonexistent, introduce a builtin to handle this (fast properties
// -> return undefined, dictionary properties -> do negative lookup).
@@ -53,7 +51,7 @@ Handle<Code> NamedLoadHandlerCompiler::ComputeLoadNonexistent(
Handle<JSObject> last(JSObject::cast(receiver_map->prototype()));
while (true) {
if (current_map->is_dictionary_map()) cache_name = name;
- if (current_map->prototype()->IsNull()) break;
+ if (current_map->prototype()->IsNull(isolate)) break;
if (name->IsPrivate()) {
// TODO(verwaest): Use nonexistent_private_symbol.
cache_name = name;
@@ -66,9 +64,10 @@ Handle<Code> NamedLoadHandlerCompiler::ComputeLoadNonexistent(
// Compile the stub that is either shared for all names or
// name specific if there are global objects involved.
Handle<Code> handler = PropertyHandlerCompiler::Find(
- cache_name, stub_holder_map, Code::LOAD_IC, flag, Code::FAST);
+ cache_name, stub_holder_map, Code::LOAD_IC, flag);
if (!handler.is_null()) return handler;
+ TRACE_HANDLER_STATS(isolate, LoadIC_LoadNonexistent);
NamedLoadHandlerCompiler compiler(isolate, receiver_map, last, flag);
handler = compiler.CompileLoadNonexistent(cache_name);
Map::UpdateCodeCache(stub_holder_map, cache_name, handler);
@@ -77,11 +76,10 @@ Handle<Code> NamedLoadHandlerCompiler::ComputeLoadNonexistent(
Handle<Code> PropertyHandlerCompiler::GetCode(Code::Kind kind,
- Code::StubType type,
Handle<Name> name) {
- Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder());
+ Code::Flags flags = Code::ComputeHandlerFlags(kind, cache_holder());
Handle<Code> code = GetCodeWithFlags(flags, name);
- PROFILE(isolate(), CodeCreateEvent(Logger::HANDLER_TAG,
+ PROFILE(isolate(), CodeCreateEvent(CodeEventListener::HANDLER_TAG,
AbstractCode::cast(*code), *name));
#ifdef DEBUG
code->VerifyEmbeddedObjects();
@@ -194,7 +192,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadField(Handle<Name> name,
__ Move(receiver(), reg);
LoadFieldStub stub(isolate(), field);
GenerateTailCall(masm(), stub.GetCode());
- return GetCode(kind(), Code::FAST, name);
+ return GetCode(kind(), name);
}
@@ -204,7 +202,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadConstant(Handle<Name> name,
__ Move(receiver(), reg);
LoadConstantStub stub(isolate(), constant_index);
GenerateTailCall(masm(), stub.GetCode());
- return GetCode(kind(), Code::FAST, name);
+ return GetCode(kind(), name);
}
@@ -221,26 +219,30 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadNonexistent(
}
GenerateLoadConstant(isolate()->factory()->undefined_value());
FrontendFooter(name, &miss);
- return GetCode(kind(), Code::FAST, name);
+ return GetCode(kind(), name);
}
-
Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
- Handle<Name> name, Handle<AccessorInfo> callback) {
+ Handle<Name> name, Handle<AccessorInfo> callback, Handle<Code> slow_stub) {
+ if (FLAG_runtime_call_stats) {
+ GenerateTailCall(masm(), slow_stub);
+ }
Register reg = Frontend(name);
GenerateLoadCallback(reg, callback);
- return GetCode(kind(), Code::FAST, name);
+ return GetCode(kind(), name);
}
-
Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
Handle<Name> name, const CallOptimization& call_optimization,
- int accessor_index) {
+ int accessor_index, Handle<Code> slow_stub) {
DCHECK(call_optimization.is_simple_api_call());
+ if (FLAG_runtime_call_stats) {
+ GenerateTailCall(masm(), slow_stub);
+ }
Register holder = Frontend(name);
GenerateApiAccessorCall(masm(), call_optimization, map(), receiver(),
scratch2(), false, no_reg, holder, accessor_index);
- return GetCode(kind(), Code::FAST, name);
+ return GetCode(kind(), name);
}
@@ -358,9 +360,21 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadInterceptor(
} else {
GenerateLoadInterceptor(reg);
}
- return GetCode(kind(), Code::FAST, it->name());
+ return GetCode(kind(), it->name());
}
+void NamedLoadHandlerCompiler::GenerateLoadCallback(
+ Register reg, Handle<AccessorInfo> callback) {
+ DCHECK(receiver().is(ApiGetterDescriptor::ReceiverRegister()));
+ __ Move(ApiGetterDescriptor::HolderRegister(), reg);
+ // The callback is alive if this instruction is executed,
+ // so the weak cell is not cleared and points to data.
+ Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
+ __ GetWeakValue(ApiGetterDescriptor::CallbackRegister(), cell);
+
+ CallApiGetterStub stub(isolate());
+ __ TailCallStub(&stub);
+}
void NamedLoadHandlerCompiler::GenerateLoadPostInterceptor(
LookupIterator* it, Register interceptor_reg) {
@@ -410,13 +424,12 @@ void NamedLoadHandlerCompiler::GenerateLoadPostInterceptor(
}
}
-
Handle<Code> NamedLoadHandlerCompiler::CompileLoadViaGetter(
Handle<Name> name, int accessor_index, int expected_arguments) {
Register holder = Frontend(name);
GenerateLoadViaGetter(masm(), map(), receiver(), holder, accessor_index,
expected_arguments, scratch2());
- return GetCode(kind(), Code::FAST, name);
+ return GetCode(kind(), name);
}
@@ -435,8 +448,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
PrototypeIterator::WhereToEnd end =
name->IsPrivate() ? PrototypeIterator::END_AT_NON_HIDDEN
: PrototypeIterator::END_AT_NULL;
- PrototypeIterator iter(isolate(), holder(),
- PrototypeIterator::START_AT_PROTOTYPE, end);
+ PrototypeIterator iter(isolate(), holder(), kStartAtPrototype, end);
while (!iter.IsAtEnd()) {
last = PrototypeIterator::GetCurrent<JSObject>(iter);
iter.Advance();
@@ -464,7 +476,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
if (details.type() == DATA_CONSTANT) {
DCHECK(descriptors->GetValue(descriptor)->IsJSFunction());
Register tmp =
- virtual_args ? VectorStoreICDescriptor::VectorRegister() : map_reg;
+ virtual_args ? StoreWithVectorDescriptor::VectorRegister() : map_reg;
GenerateRestoreMap(transition, tmp, scratch2(), &miss);
GenerateConstantCheck(tmp, descriptor, value(), scratch2(), &miss);
if (virtual_args) {
@@ -488,7 +500,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
: StoreTransitionStub::StoreMapAndValue;
Register tmp =
- virtual_args ? VectorStoreICDescriptor::VectorRegister() : map_reg;
+ virtual_args ? StoreWithVectorDescriptor::VectorRegister() : map_reg;
GenerateRestoreMap(transition, tmp, scratch2(), &miss);
if (virtual_args) {
RearrangeVectorAndSlot(tmp, map_reg);
@@ -506,7 +518,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
PopVectorAndSlot();
TailCallBuiltin(masm(), MissBuiltin(kind()));
- return GetCode(kind(), Code::FAST, name);
+ return GetCode(kind(), name);
}
bool NamedStoreHandlerCompiler::RequiresFieldTypeChecks(
@@ -534,7 +546,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreField(LookupIterator* it) {
__ bind(&miss);
if (need_save_restore) PopVectorAndSlot();
TailCallBuiltin(masm(), MissBuiltin(kind()));
- return GetCode(kind(), Code::FAST, it->name());
+ return GetCode(kind(), it->name());
}
@@ -545,61 +557,78 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreViaSetter(
GenerateStoreViaSetter(masm(), map(), receiver(), holder, accessor_index,
expected_arguments, scratch2());
- return GetCode(kind(), Code::FAST, name);
+ return GetCode(kind(), name);
}
-
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name,
- const CallOptimization& call_optimization, int accessor_index) {
+ const CallOptimization& call_optimization, int accessor_index,
+ Handle<Code> slow_stub) {
+ if (FLAG_runtime_call_stats) {
+ GenerateTailCall(masm(), slow_stub);
+ }
Register holder = Frontend(name);
GenerateApiAccessorCall(masm(), call_optimization, handle(object->map()),
receiver(), scratch2(), true, value(), holder,
accessor_index);
- return GetCode(kind(), Code::FAST, name);
+ return GetCode(kind(), name);
}
#undef __
-void ElementHandlerCompiler::CompileElementHandlers(
- MapHandleList* receiver_maps, CodeHandleList* handlers) {
- for (int i = 0; i < receiver_maps->length(); ++i) {
- Handle<Map> receiver_map = receiver_maps->at(i);
- Handle<Code> cached_stub;
+// static
+Handle<Object> ElementHandlerCompiler::GetKeyedLoadHandler(
+ Handle<Map> receiver_map, Isolate* isolate) {
+ if (receiver_map->has_indexed_interceptor() &&
+ !receiver_map->GetIndexedInterceptor()->getter()->IsUndefined(isolate) &&
+ !receiver_map->GetIndexedInterceptor()->non_masking()) {
+ TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadIndexedInterceptorStub);
+ return LoadIndexedInterceptorStub(isolate).GetCode();
+ }
+ if (receiver_map->IsStringMap()) {
+ TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadIndexedStringStub);
+ return LoadIndexedStringStub(isolate).GetCode();
+ }
+ InstanceType instance_type = receiver_map->instance_type();
+ if (instance_type < FIRST_JS_RECEIVER_TYPE) {
+ TRACE_HANDLER_STATS(isolate, KeyedLoadIC_SlowStub);
+ return isolate->builtins()->KeyedLoadIC_Slow();
+ }
- if (receiver_map->IsStringMap()) {
- cached_stub = LoadIndexedStringStub(isolate()).GetCode();
- } else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
- cached_stub = isolate()->builtins()->KeyedLoadIC_Slow();
- } else {
- bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- ElementsKind elements_kind = receiver_map->elements_kind();
-
- // No need to check for an elements-free prototype chain here, the
- // generated stub code needs to check that dynamically anyway.
- bool convert_hole_to_undefined =
- (is_js_array && elements_kind == FAST_HOLEY_ELEMENTS &&
- *receiver_map == isolate()->get_initial_js_array_map(elements_kind));
-
- if (receiver_map->has_indexed_interceptor() &&
- !receiver_map->GetIndexedInterceptor()->getter()->IsUndefined() &&
- !receiver_map->GetIndexedInterceptor()->non_masking()) {
- cached_stub = LoadIndexedInterceptorStub(isolate()).GetCode();
- } else if (IsSloppyArgumentsElements(elements_kind)) {
- cached_stub = KeyedLoadSloppyArgumentsStub(isolate()).GetCode();
- } else if (IsFastElementsKind(elements_kind) ||
- IsFixedTypedArrayElementsKind(elements_kind)) {
- cached_stub = LoadFastElementStub(isolate(), is_js_array, elements_kind,
- convert_hole_to_undefined).GetCode();
- } else {
- DCHECK(elements_kind == DICTIONARY_ELEMENTS);
- LoadICState state = LoadICState(kNoExtraICState);
- cached_stub = LoadDictionaryElementStub(isolate(), state).GetCode();
- }
- }
+ ElementsKind elements_kind = receiver_map->elements_kind();
+ if (IsSloppyArgumentsElements(elements_kind)) {
+ TRACE_HANDLER_STATS(isolate, KeyedLoadIC_KeyedLoadSloppyArgumentsStub);
+ return KeyedLoadSloppyArgumentsStub(isolate).GetCode();
+ }
+ if (elements_kind == DICTIONARY_ELEMENTS) {
+ TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadDictionaryElementStub);
+ return LoadDictionaryElementStub(isolate).GetCode();
+ }
+ DCHECK(IsFastElementsKind(elements_kind) ||
+ IsFixedTypedArrayElementsKind(elements_kind));
+ bool is_js_array = instance_type == JS_ARRAY_TYPE;
+ bool convert_hole_to_undefined =
+ is_js_array && elements_kind == FAST_HOLEY_ELEMENTS &&
+ *receiver_map == isolate->get_initial_js_array_map(elements_kind);
+ if (FLAG_tf_load_ic_stub) {
+ int config = KeyedLoadElementsKind::encode(elements_kind) |
+ KeyedLoadConvertHole::encode(convert_hole_to_undefined) |
+ KeyedLoadIsJsArray::encode(is_js_array) |
+ LoadHandlerTypeBit::encode(kLoadICHandlerForElements);
+ return handle(Smi::FromInt(config), isolate);
+ } else {
+ TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadFastElementStub);
+ return LoadFastElementStub(isolate, is_js_array, elements_kind,
+ convert_hole_to_undefined)
+ .GetCode();
+ }
+}
- handlers->Add(cached_stub);
+void ElementHandlerCompiler::CompileElementHandlers(
+ MapHandleList* receiver_maps, List<Handle<Object>>* handlers) {
+ for (int i = 0; i < receiver_maps->length(); ++i) {
+ handlers->Add(GetKeyedLoadHandler(receiver_maps->at(i), isolate()));
}
}
} // namespace internal
diff --git a/deps/v8/src/ic/handler-compiler.h b/deps/v8/src/ic/handler-compiler.h
index 76036a260f..525889b80b 100644
--- a/deps/v8/src/ic/handler-compiler.h
+++ b/deps/v8/src/ic/handler-compiler.h
@@ -19,7 +19,7 @@ enum ReturnHolder { RETURN_HOLDER, DONT_RETURN_ANYTHING };
class PropertyHandlerCompiler : public PropertyAccessCompiler {
public:
static Handle<Code> Find(Handle<Name> name, Handle<Map> map, Code::Kind kind,
- CacheHolderFlag cache_holder, Code::StubType type);
+ CacheHolderFlag cache_holder);
protected:
PropertyHandlerCompiler(Isolate* isolate, Code::Kind kind, Handle<Map> map,
@@ -98,7 +98,7 @@ class PropertyHandlerCompiler : public PropertyAccessCompiler {
Handle<Name> name, Label* miss,
PrototypeCheckType check, ReturnHolder return_what);
- Handle<Code> GetCode(Code::Kind kind, Code::StubType type, Handle<Name> name);
+ Handle<Code> GetCode(Code::Kind kind, Handle<Name> name);
void set_holder(Handle<JSObject> holder) { holder_ = holder; }
Handle<Map> map() const { return map_; }
void set_map(Handle<Map> map) { map_ = map; }
@@ -123,11 +123,12 @@ class NamedLoadHandlerCompiler : public PropertyHandlerCompiler {
Handle<Code> CompileLoadField(Handle<Name> name, FieldIndex index);
Handle<Code> CompileLoadCallback(Handle<Name> name,
- Handle<AccessorInfo> callback);
+ Handle<AccessorInfo> callback,
+ Handle<Code> slow_stub);
Handle<Code> CompileLoadCallback(Handle<Name> name,
const CallOptimization& call_optimization,
- int accessor_index);
+ int accessor_index, Handle<Code> slow_stub);
Handle<Code> CompileLoadConstant(Handle<Name> name, int constant_index);
@@ -205,8 +206,7 @@ class NamedLoadHandlerCompiler : public PropertyHandlerCompiler {
Register prototype,
Label* miss);
-
- Register scratch4() { return registers_[5]; }
+ Register scratch3() { return registers_[4]; }
};
@@ -227,7 +227,7 @@ class NamedStoreHandlerCompiler : public PropertyHandlerCompiler {
LanguageMode language_mode);
Handle<Code> CompileStoreCallback(Handle<JSObject> object, Handle<Name> name,
const CallOptimization& call_optimization,
- int accessor_index);
+ int accessor_index, Handle<Code> slow_stub);
Handle<Code> CompileStoreViaSetter(Handle<JSObject> object, Handle<Name> name,
int accessor_index,
int expected_arguments);
@@ -242,8 +242,6 @@ class NamedStoreHandlerCompiler : public PropertyHandlerCompiler {
no_reg);
}
- static void GenerateSlow(MacroAssembler* masm);
-
protected:
virtual Register FrontendHeader(Register object_reg, Handle<Name> name,
Label* miss, ReturnHolder return_what);
@@ -268,18 +266,6 @@ class NamedStoreHandlerCompiler : public PropertyHandlerCompiler {
void GenerateFieldTypeChecks(FieldType* field_type, Register value_reg,
Label* miss_label);
- static Builtins::Name SlowBuiltin(Code::Kind kind) {
- switch (kind) {
- case Code::STORE_IC:
- return Builtins::kStoreIC_Slow;
- case Code::KEYED_STORE_IC:
- return Builtins::kKeyedStoreIC_Slow;
- default:
- UNREACHABLE();
- }
- return Builtins::kStoreIC_Slow;
- }
-
static Register value();
};
@@ -293,8 +279,10 @@ class ElementHandlerCompiler : public PropertyHandlerCompiler {
virtual ~ElementHandlerCompiler() {}
+ static Handle<Object> GetKeyedLoadHandler(Handle<Map> receiver_map,
+ Isolate* isolate);
void CompileElementHandlers(MapHandleList* receiver_maps,
- CodeHandleList* handlers);
+ List<Handle<Object>>* handlers);
static void GenerateStoreSlow(MacroAssembler* masm);
};
diff --git a/deps/v8/src/ic/handler-configuration.h b/deps/v8/src/ic/handler-configuration.h
new file mode 100644
index 0000000000..bf7c4770b9
--- /dev/null
+++ b/deps/v8/src/ic/handler-configuration.h
@@ -0,0 +1,45 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_IC_HANDLER_CONFIGURATION_H_
+#define V8_IC_HANDLER_CONFIGURATION_H_
+
+#include "src/elements-kind.h"
+#include "src/globals.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+enum LoadHandlerType {
+ kLoadICHandlerForElements = 0,
+ kLoadICHandlerForProperties = 1
+};
+
+class LoadHandlerTypeBit : public BitField<bool, 0, 1> {};
+
+// Encoding for configuration Smis for property loads:
+class FieldOffsetIsInobject
+ : public BitField<bool, LoadHandlerTypeBit::kNext, 1> {};
+class FieldOffsetIsDouble
+ : public BitField<bool, FieldOffsetIsInobject::kNext, 1> {};
+class FieldOffsetOffset : public BitField<int, FieldOffsetIsDouble::kNext, 27> {
+};
+// Make sure we don't overflow into the sign bit.
+STATIC_ASSERT(FieldOffsetOffset::kNext <= kSmiValueSize - 1);
+
+// Encoding for configuration Smis for elements loads:
+class KeyedLoadIsJsArray : public BitField<bool, LoadHandlerTypeBit::kNext, 1> {
+};
+class KeyedLoadConvertHole
+ : public BitField<bool, KeyedLoadIsJsArray::kNext, 1> {};
+class KeyedLoadElementsKind
+ : public BitField<ElementsKind, KeyedLoadConvertHole::kNext, 8> {};
+// Make sure we don't overflow into the sign bit.
+STATIC_ASSERT(KeyedLoadElementsKind::kNext <= kSmiValueSize - 1);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_IC_HANDLER_CONFIGURATION_H_
diff --git a/deps/v8/src/ic/ia32/access-compiler-ia32.cc b/deps/v8/src/ic/ia32/access-compiler-ia32.cc
index 1825202366..3219f3d1cb 100644
--- a/deps/v8/src/ic/ia32/access-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/access-compiler-ia32.cc
@@ -18,19 +18,19 @@ void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ // receiver, name, scratch1, scratch2, scratch3.
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- static Register registers[] = {receiver, name, ebx, eax, edi, no_reg};
+ static Register registers[] = {receiver, name, ebx, eax, edi};
return registers;
}
Register* PropertyAccessCompiler::store_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
+ // receiver, name, scratch1, scratch2.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- static Register registers[] = {receiver, name, ebx, edi, no_reg};
+ static Register registers[] = {receiver, name, ebx, edi};
return registers;
}
diff --git a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
index 132090dc8e..b332f117b8 100644
--- a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
@@ -199,7 +199,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
bool call_data_undefined = false;
// Put call data in place.
- if (api_call_info->data()->IsUndefined()) {
+ if (api_call_info->data()->IsUndefined(isolate)) {
call_data_undefined = true;
__ mov(data, Immediate(isolate->factory()->undefined_value()));
} else {
@@ -236,13 +236,14 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
void PropertyHandlerCompiler::GenerateCheckPropertyCell(
MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
Register scratch, Label* miss) {
- Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
- DCHECK(cell->value()->IsTheHole());
- Factory* factory = masm->isolate()->factory();
- Handle<WeakCell> weak_cell = factory->NewWeakCell(cell);
+ Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
+ global, name, PropertyCellType::kInvalidated);
+ Isolate* isolate = masm->isolate();
+ DCHECK(cell->value()->IsTheHole(isolate));
+ Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
__ LoadWeakValue(scratch, weak_cell, miss);
__ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
- Immediate(factory->the_hole_value()));
+ Immediate(isolate->factory()->the_hole_value()));
__ j(not_equal, miss);
}
@@ -320,8 +321,8 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- Register slot = VectorStoreICDescriptor::SlotRegister();
- Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = StoreWithVectorDescriptor::SlotRegister();
+ Register vector = StoreWithVectorDescriptor::VectorRegister();
__ xchg(receiver, Operand(esp, 0));
__ push(name);
@@ -332,15 +333,6 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
}
-void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
- // Return address is on the stack.
- StoreIC_PushArgs(masm);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kStoreIC_Slow);
-}
-
-
void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
// Return address is on the stack.
StoreIC_PushArgs(masm);
@@ -439,28 +431,25 @@ Register PropertyHandlerCompiler::CheckPrototypes(
DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
!scratch2.is(scratch1));
- if (FLAG_eliminate_prototype_chain_checks) {
- Handle<Cell> validity_cell =
- Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
- if (!validity_cell.is_null()) {
- DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid),
- validity_cell->value());
- // Operand::ForCell(...) points to the cell's payload!
- __ cmp(Operand::ForCell(validity_cell),
- Immediate(Smi::FromInt(Map::kPrototypeChainValid)));
- __ j(not_equal, miss);
- }
+ Handle<Cell> validity_cell =
+ Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+ if (!validity_cell.is_null()) {
+ DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
+ // Operand::ForCell(...) points to the cell's payload!
+ __ cmp(Operand::ForCell(validity_cell),
+ Immediate(Smi::FromInt(Map::kPrototypeChainValid)));
+ __ j(not_equal, miss);
+ }
- // The prototype chain of primitives (and their JSValue wrappers) depends
- // on the native context, which can't be guarded by validity cells.
- // |object_reg| holds the native context specific prototype in this case;
- // we need to check its map.
- if (check == CHECK_ALL_MAPS) {
- __ mov(scratch1, FieldOperand(object_reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- __ CmpWeakValue(scratch1, cell, scratch2);
- __ j(not_equal, miss);
- }
+ // The prototype chain of primitives (and their JSValue wrappers) depends
+ // on the native context, which can't be guarded by validity cells.
+ // |object_reg| holds the native context specific prototype in this case;
+ // we need to check its map.
+ if (check == CHECK_ALL_MAPS) {
+ __ mov(scratch1, FieldOperand(object_reg, HeapObject::kMapOffset));
+ Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
+ __ CmpWeakValue(scratch1, cell, scratch2);
+ __ j(not_equal, miss);
}
// Keep track of the current object in register reg.
@@ -496,8 +485,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
!current_map->is_access_check_needed());
prototype = handle(JSObject::cast(current_map->prototype()));
- if (current_map->is_dictionary_map() &&
- !current_map->IsJSGlobalObjectMap()) {
+ if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+ name, scratch2, miss);
+ } else if (current_map->is_dictionary_map()) {
DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
if (!name->IsUniqueName()) {
DCHECK(name->IsString());
@@ -507,34 +498,12 @@ Register PropertyHandlerCompiler::CheckPrototypes(
current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
- if (FLAG_eliminate_prototype_chain_checks && depth > 1) {
+ if (depth > 1) {
// TODO(jkummerow): Cache and re-use weak cell.
__ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
}
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
scratch2);
-
- if (!FLAG_eliminate_prototype_chain_checks) {
- __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- __ mov(holder_reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- }
- } else {
- Register map_reg = scratch1;
- if (!FLAG_eliminate_prototype_chain_checks) {
- __ mov(map_reg, FieldOperand(reg, HeapObject::kMapOffset));
- }
- if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
- name, scratch2, miss);
- } else if (!FLAG_eliminate_prototype_chain_checks &&
- (depth != 1 || check == CHECK_ALL_MAPS)) {
- Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
- __ CmpWeakValue(map_reg, cell, scratch2);
- __ j(not_equal, miss);
- }
- if (!FLAG_eliminate_prototype_chain_checks) {
- __ mov(holder_reg, FieldOperand(map_reg, Map::kPrototypeOffset));
- }
}
reg = holder_reg; // From now on the object will be in holder_reg.
@@ -548,17 +517,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
- if (!FLAG_eliminate_prototype_chain_checks &&
- (depth != 0 || check == CHECK_ALL_MAPS)) {
- // Check the holder map.
- __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
- __ CmpWeakValue(scratch1, cell, scratch2);
- __ j(not_equal, miss);
- }
-
bool return_holder = return_what == RETURN_HOLDER;
- if (FLAG_eliminate_prototype_chain_checks && return_holder && depth != 0) {
+ if (return_holder && depth != 0) {
__ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
}
@@ -594,58 +554,6 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
}
-void NamedLoadHandlerCompiler::GenerateLoadCallback(
- Register reg, Handle<AccessorInfo> callback) {
- DCHECK(!AreAliased(scratch2(), scratch3(), receiver()));
- DCHECK(!AreAliased(scratch2(), scratch3(), reg));
-
- // Insert additional parameters into the stack frame above return address.
- __ pop(scratch3()); // Get return address to place it below.
-
- // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
- // name below the exit frame to make GC aware of them.
- STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
-
- __ push(receiver()); // receiver
- // Push data from AccessorInfo.
- Handle<Object> data(callback->data(), isolate());
- if (data->IsUndefined() || data->IsSmi()) {
- __ push(Immediate(data));
- } else {
- Handle<WeakCell> cell =
- isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
- // The callback is alive if this instruction is executed,
- // so the weak cell is not cleared and points to data.
- __ GetWeakValue(scratch2(), cell);
- __ push(scratch2());
- }
- __ push(Immediate(isolate()->factory()->undefined_value())); // ReturnValue
- // ReturnValue default value
- __ push(Immediate(isolate()->factory()->undefined_value()));
- __ push(Immediate(reinterpret_cast<int>(isolate())));
- __ push(reg); // holder
- __ push(Immediate(Smi::FromInt(0))); // should_throw_on_error -> false
-
- __ push(name()); // name
- __ push(scratch3()); // Restore return address.
-
- // Abi for CallApiGetter
- Register getter_address = ApiGetterDescriptor::function_address();
- Address function_address = v8::ToCData<Address>(callback->getter());
- __ mov(getter_address, Immediate(function_address));
-
- CallApiGetterStub stub(isolate());
- __ TailCallStub(&stub);
-}
-
-
void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
// Return the constant value.
__ LoadObject(eax, value);
@@ -656,7 +564,7 @@ void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
LookupIterator* it, Register holder_reg) {
DCHECK(holder()->HasNamedInterceptor());
- DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
// Compile the interceptor call, followed by inline code to load the
// property from further up the prototype chain if the call fails.
@@ -723,7 +631,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
DCHECK(holder()->HasNamedInterceptor());
- DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
// Call the runtime system to load the interceptor.
__ pop(scratch2()); // save old return address
PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
@@ -744,7 +652,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ push(holder_reg);
// If the callback cannot leak, then push the callback directly,
// otherwise wrap it in a weak cell.
- if (callback->data()->IsUndefined() || callback->data()->IsSmi()) {
+ if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
__ Push(callback);
} else {
Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
@@ -759,7 +667,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
- return GetCode(kind(), Code::FAST, name);
+ return GetCode(kind(), name);
}
@@ -801,7 +709,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
FrontendFooter(name, &miss);
// Return the generated code.
- return GetCode(kind(), Code::NORMAL, name);
+ return GetCode(kind(), name);
}
diff --git a/deps/v8/src/ic/ia32/ic-ia32.cc b/deps/v8/src/ic/ia32/ic-ia32.cc
index e66716f6cb..0550d92e91 100644
--- a/deps/v8/src/ic/ia32/ic-ia32.cc
+++ b/deps/v8/src/ic/ia32/ic-ia32.cc
@@ -336,10 +336,8 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
__ push(Immediate(Smi::FromInt(slot)));
__ push(Immediate(dummy_vector));
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
- receiver, key, ebx, edi);
+ masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, ebx,
+ edi);
__ pop(LoadWithVectorDescriptor::VectorRegister());
__ pop(LoadDescriptor::SlotRegister());
@@ -519,10 +517,10 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ JumpIfSmi(receiver, &slow);
// Get the map from the receiver.
__ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks and is not observed.
- // The generic stub does not perform map checks or handle observed objects.
+ // Check that the receiver does not require access checks.
+ // The generic stub does not perform map checks.
__ test_b(FieldOperand(edi, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
+ Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow);
// Check that the key is a smi.
__ JumpIfNotSmi(key, &maybe_name_key);
@@ -563,13 +561,11 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ push(Immediate(Smi::FromInt(slot)));
__ push(Immediate(dummy_vector));
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
- receiver, key, edi, no_reg);
+ masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, edi,
+ no_reg);
- __ pop(VectorStoreICDescriptor::VectorRegister());
- __ pop(VectorStoreICDescriptor::SlotRegister());
+ __ pop(StoreWithVectorDescriptor::VectorRegister());
+ __ pop(StoreWithVectorDescriptor::SlotRegister());
// Cache miss.
__ jmp(&miss);
@@ -708,21 +704,12 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedGetProperty);
}
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
- // This shouldn't be called.
- // TODO(mvstanton): remove this method.
- __ int3();
- return;
-}
-
-
static void StoreIC_PushArgs(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- Register slot = VectorStoreICDescriptor::SlotRegister();
- Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = StoreWithVectorDescriptor::SlotRegister();
+ Register vector = StoreWithVectorDescriptor::VectorRegister();
__ xchg(receiver, Operand(esp, 0));
__ push(name);
@@ -747,8 +734,8 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- Register vector = VectorStoreICDescriptor::VectorRegister();
- Register slot = VectorStoreICDescriptor::SlotRegister();
+ Register vector = StoreWithVectorDescriptor::VectorRegister();
+ Register slot = StoreWithVectorDescriptor::SlotRegister();
// A lot of registers are needed for storing to slow case
// objects. Push and restore receiver but rely on
@@ -836,8 +823,9 @@ void PatchInlinedSmiCode(Isolate* isolate, Address address,
// condition code uses at the patched jump.
uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, test=%p, delta=%d\n", address,
- test_instruction_address, delta);
+ PrintF("[ patching ic at %p, test=%p, delta=%d\n",
+ static_cast<void*>(address),
+ static_cast<void*>(test_instruction_address), delta);
}
// Patch with a short conditional jump. Enabling means switching from a short
diff --git a/deps/v8/src/ic/ia32/stub-cache-ia32.cc b/deps/v8/src/ic/ia32/stub-cache-ia32.cc
index fcfae4bc0c..939e7fc0fd 100644
--- a/deps/v8/src/ic/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ic/ia32/stub-cache-ia32.cc
@@ -14,19 +14,19 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Kind ic_kind, Code::Flags flags,
+static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
StubCache::Table table, Register name, Register receiver,
- // Number of the cache entry pointer-size scaled.
+ // The offset is scaled by 4, based on
+ // kCacheIndexShift, which is two bits
Register offset, Register extra) {
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
- ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+ ExternalReference key_offset(stub_cache->key_reference(table));
+ ExternalReference value_offset(stub_cache->value_reference(table));
+ ExternalReference map_offset(stub_cache->map_reference(table));
ExternalReference virtual_register =
ExternalReference::virtual_handler_register(masm->isolate());
Label miss;
+ Code::Kind ic_kind = stub_cache->ic_kind();
bool is_vector_store =
IC::ICUseVector(ic_kind) &&
(ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
@@ -47,12 +47,6 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
__ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ j(not_equal, &miss);
- // Check that the flags match what we're looking for.
- __ mov(offset, FieldOperand(extra, Code::kFlagsOffset));
- __ and_(offset, ~Code::kFlagsNotUsedInLookup);
- __ cmp(offset, flags);
- __ j(not_equal, &miss);
-
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ jmp(&miss);
@@ -65,8 +59,8 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
// probe, and need to be dropped before calling the handler.
if (is_vector_store) {
// The overlap here is rather embarrassing. One does what one must.
- Register vector = VectorStoreICDescriptor::VectorRegister();
- DCHECK(extra.is(VectorStoreICDescriptor::SlotRegister()));
+ Register vector = StoreWithVectorDescriptor::VectorRegister();
+ DCHECK(extra.is(StoreWithVectorDescriptor::SlotRegister()));
__ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ pop(vector);
__ mov(Operand::StaticVariable(virtual_register), extra);
@@ -102,12 +96,6 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
// Get the code entry from the cache.
__ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
- // Check that the flags match what we're looking for.
- __ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
- __ and_(offset, ~Code::kFlagsNotUsedInLookup);
- __ cmp(offset, flags);
- __ j(not_equal, &miss);
-
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ jmp(&miss);
@@ -124,8 +112,8 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
if (is_vector_store) {
// The vector and slot were pushed onto the stack before starting the
// probe, and need to be dropped before calling the handler.
- Register vector = VectorStoreICDescriptor::VectorRegister();
- DCHECK(offset.is(VectorStoreICDescriptor::SlotRegister()));
+ Register vector = StoreWithVectorDescriptor::VectorRegister();
+ DCHECK(offset.is(StoreWithVectorDescriptor::SlotRegister()));
__ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ mov(Operand::StaticVariable(virtual_register), offset);
__ pop(vector);
@@ -142,9 +130,7 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
}
}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
- Code::Flags flags, Register receiver,
+void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
Register name, Register scratch, Register extra,
Register extra2, Register extra3) {
Label miss;
@@ -153,9 +139,6 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// being 12.
DCHECK(sizeof(Entry) == 12);
- // Assert the flags do not name a specific type.
- DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
-
// Assert that there are no register conflicts.
DCHECK(!scratch.is(receiver));
DCHECK(!scratch.is(name));
@@ -180,7 +163,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// Get the map of the receiver and compute the hash.
__ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
__ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(offset, flags);
+ __ xor_(offset, kPrimaryMagic);
// We mask out the last two bits because they are not part of the hash and
// they are always 01 for maps. Also in the two 'and' instructions below.
__ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
@@ -189,21 +172,19 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
DCHECK(kCacheIndexShift == kPointerSizeLog2);
// Probe the primary table.
- ProbeTable(isolate(), masm, ic_kind, flags, kPrimary, name, receiver, offset,
- extra);
+ ProbeTable(this, masm, kPrimary, name, receiver, offset, extra);
// Primary miss: Compute hash for secondary probe.
__ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
__ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(offset, flags);
+ __ xor_(offset, kPrimaryMagic);
__ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
__ sub(offset, name);
- __ add(offset, Immediate(flags));
+ __ add(offset, Immediate(kSecondaryMagic));
__ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
// Probe the secondary table.
- ProbeTable(isolate(), masm, ic_kind, flags, kSecondary, name, receiver,
- offset, extra);
+ ProbeTable(this, masm, kSecondary, name, receiver, offset, extra);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
diff --git a/deps/v8/src/ic/ic-compiler.cc b/deps/v8/src/ic/ic-compiler.cc
index d1e9416d41..2f0633e0d8 100644
--- a/deps/v8/src/ic/ic-compiler.cc
+++ b/deps/v8/src/ic/ic-compiler.cc
@@ -6,211 +6,39 @@
#include "src/ic/handler-compiler.h"
#include "src/ic/ic-inl.h"
-#include "src/profiler/cpu-profiler.h"
-
namespace v8 {
namespace internal {
-
-Handle<Code> PropertyICCompiler::Find(Handle<Name> name,
- Handle<Map> stub_holder, Code::Kind kind,
- ExtraICState extra_state,
- CacheHolderFlag cache_holder) {
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(kind, extra_state, cache_holder);
- Object* probe = stub_holder->FindInCodeCache(*name, flags);
- if (probe->IsCode()) return handle(Code::cast(probe));
- return Handle<Code>::null();
-}
-
-
-bool PropertyICCompiler::IncludesNumberMap(MapHandleList* maps) {
- for (int i = 0; i < maps->length(); ++i) {
- if (maps->at(i)->instance_type() == HEAP_NUMBER_TYPE) return true;
- }
- return false;
-}
-
-
-Handle<Code> PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(
- Handle<Map> receiver_map, ExtraICState extra_ic_state) {
- Isolate* isolate = receiver_map->GetIsolate();
- bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- ElementsKind elements_kind = receiver_map->elements_kind();
-
- // No need to check for an elements-free prototype chain here, the generated
- // stub code needs to check that dynamically anyway.
- bool convert_hole_to_undefined =
- is_js_array && elements_kind == FAST_HOLEY_ELEMENTS &&
- *receiver_map == isolate->get_initial_js_array_map(elements_kind);
- Handle<Code> stub;
- if (receiver_map->has_indexed_interceptor()) {
- stub = LoadIndexedInterceptorStub(isolate).GetCode();
- } else if (receiver_map->IsStringMap()) {
- stub = LoadIndexedStringStub(isolate).GetCode();
- } else if (receiver_map->has_sloppy_arguments_elements()) {
- stub = KeyedLoadSloppyArgumentsStub(isolate).GetCode();
- } else if (receiver_map->has_fast_elements() ||
- receiver_map->has_fixed_typed_array_elements()) {
- stub = LoadFastElementStub(isolate, is_js_array, elements_kind,
- convert_hole_to_undefined).GetCode();
- } else {
- DCHECK(receiver_map->has_dictionary_elements());
- stub = LoadDictionaryElementStub(isolate, LoadICState(extra_ic_state))
- .GetCode();
- }
- return stub;
-}
-
-
Handle<Code> PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
- Handle<Map> receiver_map, LanguageMode language_mode,
- KeyedAccessStoreMode store_mode) {
+ Handle<Map> receiver_map, KeyedAccessStoreMode store_mode) {
Isolate* isolate = receiver_map->GetIsolate();
- ExtraICState extra_state =
- KeyedStoreIC::ComputeExtraICState(language_mode, store_mode);
DCHECK(store_mode == STANDARD_STORE ||
store_mode == STORE_AND_GROW_NO_TRANSITION ||
store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
store_mode == STORE_NO_TRANSITION_HANDLE_COW);
- PropertyICCompiler compiler(isolate, Code::KEYED_STORE_IC, extra_state);
+ PropertyICCompiler compiler(isolate);
Handle<Code> code =
compiler.CompileKeyedStoreMonomorphicHandler(receiver_map, store_mode);
return code;
}
-
-Code* PropertyICCompiler::FindPreMonomorphic(Isolate* isolate, Code::Kind kind,
- ExtraICState state) {
- Code::Flags flags = Code::ComputeFlags(kind, PREMONOMORPHIC, state);
- UnseededNumberDictionary* dictionary =
- isolate->heap()->non_monomorphic_cache();
- int entry = dictionary->FindEntry(isolate, flags);
- DCHECK(entry != -1);
- Object* code = dictionary->ValueAt(entry);
- // This might be called during the marking phase of the collector
- // hence the unchecked cast.
- return reinterpret_cast<Code*>(code);
-}
-
-
-static void FillCache(Isolate* isolate, Handle<Code> code) {
- Handle<UnseededNumberDictionary> dictionary = UnseededNumberDictionary::Set(
- isolate->factory()->non_monomorphic_cache(), code->flags(), code);
- isolate->heap()->SetRootNonMonomorphicCache(*dictionary);
-}
-
-
-Handle<Code> PropertyICCompiler::ComputeStore(Isolate* isolate,
- InlineCacheState ic_state,
- ExtraICState extra_state) {
- Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, ic_state, extra_state);
- Handle<UnseededNumberDictionary> cache =
- isolate->factory()->non_monomorphic_cache();
- int entry = cache->FindEntry(isolate, flags);
- if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
- PropertyICCompiler compiler(isolate, Code::STORE_IC);
- Handle<Code> code;
- if (ic_state == UNINITIALIZED) {
- code = compiler.CompileStoreInitialize(flags);
- } else if (ic_state == PREMONOMORPHIC) {
- code = compiler.CompileStorePreMonomorphic(flags);
- } else if (ic_state == GENERIC) {
- code = compiler.CompileStoreGeneric(flags);
- } else if (ic_state == MEGAMORPHIC) {
- code = compiler.CompileStoreMegamorphic(flags);
- } else {
- UNREACHABLE();
- }
-
- FillCache(isolate, code);
- return code;
-}
-
-
void PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
- CodeHandleList* handlers, KeyedAccessStoreMode store_mode,
- LanguageMode language_mode) {
+ CodeHandleList* handlers, KeyedAccessStoreMode store_mode) {
Isolate* isolate = receiver_maps->at(0)->GetIsolate();
DCHECK(store_mode == STANDARD_STORE ||
store_mode == STORE_AND_GROW_NO_TRANSITION ||
store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
store_mode == STORE_NO_TRANSITION_HANDLE_COW);
- ExtraICState extra_state =
- KeyedStoreIC::ComputeExtraICState(language_mode, store_mode);
- PropertyICCompiler compiler(isolate, Code::KEYED_STORE_IC, extra_state);
+ PropertyICCompiler compiler(isolate);
compiler.CompileKeyedStorePolymorphicHandlers(
receiver_maps, transitioned_maps, handlers, store_mode);
}
-Handle<Code> PropertyICCompiler::CompileLoadInitialize(Code::Flags flags) {
- LoadIC::GenerateInitialize(masm());
- Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadInitialize");
- PROFILE(isolate(), CodeCreateEvent(Logger::LOAD_INITIALIZE_TAG,
- AbstractCode::cast(*code), 0));
- return code;
-}
-
-
-Handle<Code> PropertyICCompiler::CompileStoreInitialize(Code::Flags flags) {
- StoreIC::GenerateInitialize(masm());
- Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreInitialize");
- PROFILE(isolate(), CodeCreateEvent(Logger::STORE_INITIALIZE_TAG,
- AbstractCode::cast(*code), 0));
- return code;
-}
-
-
-Handle<Code> PropertyICCompiler::CompileStorePreMonomorphic(Code::Flags flags) {
- StoreIC::GeneratePreMonomorphic(masm());
- Handle<Code> code = GetCodeWithFlags(flags, "CompileStorePreMonomorphic");
- PROFILE(isolate(), CodeCreateEvent(Logger::STORE_PREMONOMORPHIC_TAG,
- AbstractCode::cast(*code), 0));
- return code;
-}
-
-
-Handle<Code> PropertyICCompiler::CompileStoreGeneric(Code::Flags flags) {
- ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
- LanguageMode language_mode = StoreICState::GetLanguageMode(extra_state);
- GenerateRuntimeSetProperty(masm(), language_mode);
- Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreGeneric");
- PROFILE(isolate(), CodeCreateEvent(Logger::STORE_GENERIC_TAG,
- AbstractCode::cast(*code), 0));
- return code;
-}
-
-
-Handle<Code> PropertyICCompiler::CompileStoreMegamorphic(Code::Flags flags) {
- StoreIC::GenerateMegamorphic(masm());
- Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreMegamorphic");
- PROFILE(isolate(), CodeCreateEvent(Logger::STORE_MEGAMORPHIC_TAG,
- AbstractCode::cast(*code), 0));
- return code;
-}
-
-
-Handle<Code> PropertyICCompiler::GetCode(Code::Kind kind, Code::StubType type,
- Handle<Name> name,
- InlineCacheState state) {
- Code::Flags flags =
- Code::ComputeFlags(kind, state, extra_ic_state_, type, cache_holder());
- Handle<Code> code = GetCodeWithFlags(flags, name);
- PROFILE(isolate(),
- CodeCreateEvent(log_kind(code), AbstractCode::cast(*code), *name));
-#ifdef DEBUG
- code->VerifyEmbeddedObjects();
-#endif
- return code;
-}
-
-
void PropertyICCompiler::CompileKeyedStorePolymorphicHandlers(
MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
CodeHandleList* handlers, KeyedAccessStoreMode store_mode) {
@@ -268,34 +96,21 @@ Handle<Code> PropertyICCompiler::CompileKeyedStoreMonomorphicHandler(
bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
Handle<Code> stub;
if (receiver_map->has_sloppy_arguments_elements()) {
+ TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_KeyedStoreSloppyArgumentsStub);
stub = KeyedStoreSloppyArgumentsStub(isolate(), store_mode).GetCode();
} else if (receiver_map->has_fast_elements() ||
receiver_map->has_fixed_typed_array_elements()) {
+ TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_StoreFastElementStub);
stub = StoreFastElementStub(isolate(), is_jsarray, elements_kind,
store_mode).GetCode();
} else {
+ TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_StoreElementStub);
stub = StoreElementStub(isolate(), elements_kind, store_mode).GetCode();
}
return stub;
}
-Handle<Code> PropertyICCompiler::CompileKeyedStoreMonomorphic(
- Handle<Map> receiver_map, KeyedAccessStoreMode store_mode) {
- Handle<Code> stub =
- CompileKeyedStoreMonomorphicHandler(receiver_map, store_mode);
-
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
-
- __ DispatchWeakMap(receiver(), scratch1(), scratch2(), cell, stub,
- DO_SMI_CHECK);
-
- TailCallBuiltin(masm(), Builtins::kKeyedStoreIC_Miss);
-
- return GetCode(kind(), Code::NORMAL, factory()->empty_string());
-}
-
-
#undef __
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/ic-compiler.h b/deps/v8/src/ic/ic-compiler.h
index 3a5aecccbb..fa3ba15af2 100644
--- a/deps/v8/src/ic/ic-compiler.h
+++ b/deps/v8/src/ic/ic-compiler.h
@@ -13,25 +13,12 @@ namespace internal {
class PropertyICCompiler : public PropertyAccessCompiler {
public:
- // Finds the Code object stored in the Heap::non_monomorphic_cache().
- static Code* FindPreMonomorphic(Isolate* isolate, Code::Kind kind,
- ExtraICState extra_ic_state);
-
- // Named
- static Handle<Code> ComputeStore(Isolate* isolate, InlineCacheState ic_state,
- ExtraICState extra_state);
-
// Keyed
- static Handle<Code> ComputeKeyedLoadMonomorphicHandler(
- Handle<Map> receiver_map, ExtraICState extra_ic_state);
-
static Handle<Code> ComputeKeyedStoreMonomorphicHandler(
- Handle<Map> receiver_map, LanguageMode language_mode,
- KeyedAccessStoreMode store_mode);
+ Handle<Map> receiver_map, KeyedAccessStoreMode store_mode);
static void ComputeKeyedStorePolymorphicHandlers(
MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
- CodeHandleList* handlers, KeyedAccessStoreMode store_mode,
- LanguageMode language_mode);
+ CodeHandleList* handlers, KeyedAccessStoreMode store_mode);
// Helpers
// TODO(verwaest): Move all uses of these helpers to the PropertyICCompiler
@@ -41,57 +28,16 @@ class PropertyICCompiler : public PropertyAccessCompiler {
private:
- PropertyICCompiler(Isolate* isolate, Code::Kind kind,
- ExtraICState extra_ic_state = kNoExtraICState,
- CacheHolderFlag cache_holder = kCacheOnReceiver)
- : PropertyAccessCompiler(isolate, kind, cache_holder),
- extra_ic_state_(extra_ic_state) {}
-
- static Handle<Code> Find(Handle<Name> name, Handle<Map> stub_holder_map,
- Code::Kind kind,
- ExtraICState extra_ic_state = kNoExtraICState,
- CacheHolderFlag cache_holder = kCacheOnReceiver);
-
- Handle<Code> CompileLoadInitialize(Code::Flags flags);
- Handle<Code> CompileStoreInitialize(Code::Flags flags);
- Handle<Code> CompileStorePreMonomorphic(Code::Flags flags);
- Handle<Code> CompileStoreGeneric(Code::Flags flags);
- Handle<Code> CompileStoreMegamorphic(Code::Flags flags);
+ explicit PropertyICCompiler(Isolate* isolate)
+ : PropertyAccessCompiler(isolate, Code::KEYED_STORE_IC,
+ kCacheOnReceiver) {}
Handle<Code> CompileKeyedStoreMonomorphicHandler(
Handle<Map> receiver_map, KeyedAccessStoreMode store_mode);
- Handle<Code> CompileKeyedStoreMonomorphic(Handle<Map> receiver_map,
- KeyedAccessStoreMode store_mode);
void CompileKeyedStorePolymorphicHandlers(MapHandleList* receiver_maps,
MapHandleList* transitioned_maps,
CodeHandleList* handlers,
KeyedAccessStoreMode store_mode);
-
- bool IncludesNumberMap(MapHandleList* maps);
-
- Handle<Code> GetCode(Code::Kind kind, Code::StubType type, Handle<Name> name,
- InlineCacheState state = MONOMORPHIC);
-
- Logger::LogEventsAndTags log_kind(Handle<Code> code) {
- if (kind() == Code::LOAD_IC) {
- return code->ic_state() == MONOMORPHIC ? Logger::LOAD_IC_TAG
- : Logger::LOAD_POLYMORPHIC_IC_TAG;
- } else if (kind() == Code::KEYED_LOAD_IC) {
- return code->ic_state() == MONOMORPHIC
- ? Logger::KEYED_LOAD_IC_TAG
- : Logger::KEYED_LOAD_POLYMORPHIC_IC_TAG;
- } else if (kind() == Code::STORE_IC) {
- return code->ic_state() == MONOMORPHIC ? Logger::STORE_IC_TAG
- : Logger::STORE_POLYMORPHIC_IC_TAG;
- } else {
- DCHECK_EQ(Code::KEYED_STORE_IC, kind());
- return code->ic_state() == MONOMORPHIC
- ? Logger::KEYED_STORE_IC_TAG
- : Logger::KEYED_STORE_POLYMORPHIC_IC_TAG;
- }
- }
-
- const ExtraICState extra_ic_state_;
};
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index 998bd8cf12..f77c40a396 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -87,42 +87,12 @@ void IC::SetTargetAtAddress(Address address, Code* target,
void IC::set_target(Code* code) {
SetTargetAtAddress(address(), code, constant_pool());
- target_set_ = true;
}
-
-void LoadIC::set_target(Code* code) {
- // The contextual mode must be preserved across IC patching.
- DCHECK(LoadICState::GetTypeofMode(code->extra_ic_state()) ==
- LoadICState::GetTypeofMode(target()->extra_ic_state()));
-
- IC::set_target(code);
-}
-
-
-void StoreIC::set_target(Code* code) {
- // Language mode must be preserved across IC patching.
- DCHECK(StoreICState::GetLanguageMode(code->extra_ic_state()) ==
- StoreICState::GetLanguageMode(target()->extra_ic_state()));
- IC::set_target(code);
-}
-
-
-void KeyedStoreIC::set_target(Code* code) {
- // Language mode must be preserved across IC patching.
- DCHECK(StoreICState::GetLanguageMode(code->extra_ic_state()) ==
- language_mode());
- IC::set_target(code);
-}
-
-
-Code* IC::raw_target() const {
+Code* IC::target() const {
return GetTargetAtAddress(address(), constant_pool());
}
-void IC::UpdateTarget() { target_ = handle(raw_target(), isolate_); }
-
-
Handle<Map> IC::GetHandlerCacheHolder(Handle<Map> receiver_map,
bool receiver_is_holder, Isolate* isolate,
CacheHolderFlag* flag) {
diff --git a/deps/v8/src/ic/ic-state.cc b/deps/v8/src/ic/ic-state.cc
index bf1e45fb50..d157c926dd 100644
--- a/deps/v8/src/ic/ic-state.cc
+++ b/deps/v8/src/ic/ic-state.cc
@@ -257,10 +257,10 @@ void BinaryOpICState::Update(Handle<Object> left, Handle<Object> right,
if (old_extra_ic_state == GetExtraICState()) {
// Tagged operations can lead to non-truncating HChanges
- if (left->IsUndefined() || left->IsBoolean()) {
+ if (left->IsUndefined(isolate_) || left->IsBoolean()) {
left_kind_ = GENERIC;
} else {
- DCHECK(right->IsUndefined() || right->IsBoolean());
+ DCHECK(right->IsUndefined(isolate_) || right->IsBoolean());
right_kind_ = GENERIC;
}
}
@@ -274,7 +274,7 @@ BinaryOpICState::Kind BinaryOpICState::UpdateKind(Handle<Object> object,
if (object->IsBoolean() && is_truncating) {
// Booleans will be automatically truncated by HChange.
new_kind = INT32;
- } else if (object->IsUndefined()) {
+ } else if (object->IsUndefined(isolate_)) {
// Undefined will be automatically truncated by HChange.
new_kind = is_truncating ? INT32 : NUMBER;
} else if (object->IsSmi()) {
@@ -446,8 +446,9 @@ CompareICState::State CompareICState::NewInputState(State old_state,
// static
CompareICState::State CompareICState::TargetState(
- State old_state, State old_left, State old_right, Token::Value op,
- bool has_inlined_smi_code, Handle<Object> x, Handle<Object> y) {
+ Isolate* isolate, State old_state, State old_left, State old_right,
+ Token::Value op, bool has_inlined_smi_code, Handle<Object> x,
+ Handle<Object> y) {
switch (old_state) {
case UNINITIALIZED:
if (x->IsBoolean() && y->IsBoolean()) return BOOLEAN;
@@ -456,8 +457,8 @@ CompareICState::State CompareICState::TargetState(
if (Token::IsOrderedRelationalCompareOp(op)) {
// Ordered comparisons treat undefined as NaN, so the
// NUMBER stub will do the right thing.
- if ((x->IsNumber() && y->IsUndefined()) ||
- (y->IsNumber() && x->IsUndefined())) {
+ if ((x->IsNumber() && y->IsUndefined(isolate)) ||
+ (y->IsNumber() && x->IsUndefined(isolate))) {
return NUMBER;
}
}
diff --git a/deps/v8/src/ic/ic-state.h b/deps/v8/src/ic/ic-state.h
index e1d33f8678..6888a7ab5c 100644
--- a/deps/v8/src/ic/ic-state.h
+++ b/deps/v8/src/ic/ic-state.h
@@ -128,11 +128,15 @@ class BinaryOpICState final BASE_EMBEDDED {
Isolate* isolate() const { return isolate_; }
+ enum Kind { NONE, SMI, INT32, NUMBER, STRING, GENERIC };
+ Kind kind() const {
+ return KindGeneralize(KindGeneralize(left_kind_, right_kind_),
+ result_kind_);
+ }
+
private:
friend std::ostream& operator<<(std::ostream& os, const BinaryOpICState& s);
- enum Kind { NONE, SMI, INT32, NUMBER, STRING, GENERIC };
-
Kind UpdateKind(Handle<Object> object, Kind kind) const;
static const char* KindToString(Kind kind);
@@ -140,6 +144,18 @@ class BinaryOpICState final BASE_EMBEDDED {
static bool KindMaybeSmi(Kind kind) {
return (kind >= SMI && kind <= NUMBER) || kind == GENERIC;
}
+ static bool KindLessGeneralThan(Kind kind1, Kind kind2) {
+ if (kind1 == NONE) return true;
+ if (kind1 == kind2) return true;
+ if (kind2 == GENERIC) return true;
+ if (kind2 == STRING) return false;
+ return kind1 <= kind2;
+ }
+ static Kind KindGeneralize(Kind kind1, Kind kind2) {
+ if (KindLessGeneralThan(kind1, kind2)) return kind2;
+ if (KindLessGeneralThan(kind2, kind1)) return kind1;
+ return GENERIC;
+ }
// We truncate the last bit of the token.
STATIC_ASSERT(LAST_TOKEN - FIRST_TOKEN < (1 << 4));
@@ -193,13 +209,13 @@ class CompareICState {
static const char* GetStateName(CompareICState::State state);
- static State TargetState(State old_state, State old_left, State old_right,
- Token::Value op, bool has_inlined_smi_code,
- Handle<Object> x, Handle<Object> y);
+ static State TargetState(Isolate* isolate, State old_state, State old_left,
+ State old_right, Token::Value op,
+ bool has_inlined_smi_code, Handle<Object> x,
+ Handle<Object> y);
};
-
-class LoadICState final BASE_EMBEDDED {
+class LoadGlobalICState final BASE_EMBEDDED {
private:
class TypeofModeBits : public BitField<TypeofMode, 0, 1> {};
STATIC_ASSERT(static_cast<int>(INSIDE_TYPEOF) == 0);
@@ -208,9 +224,10 @@ class LoadICState final BASE_EMBEDDED {
public:
static const uint32_t kNextBitFieldOffset = TypeofModeBits::kNext;
- explicit LoadICState(ExtraICState extra_ic_state) : state_(extra_ic_state) {}
+ explicit LoadGlobalICState(ExtraICState extra_ic_state)
+ : state_(extra_ic_state) {}
- explicit LoadICState(TypeofMode typeof_mode)
+ explicit LoadGlobalICState(TypeofMode typeof_mode)
: state_(TypeofModeBits::encode(typeof_mode)) {}
ExtraICState GetExtraICState() const { return state_; }
@@ -218,7 +235,7 @@ class LoadICState final BASE_EMBEDDED {
TypeofMode typeof_mode() const { return TypeofModeBits::decode(state_); }
static TypeofMode GetTypeofMode(ExtraICState state) {
- return LoadICState(state).typeof_mode();
+ return LoadGlobalICState(state).typeof_mode();
}
};
@@ -240,8 +257,8 @@ class StoreICState final BASE_EMBEDDED {
return StoreICState(state).language_mode();
}
- class LanguageModeState : public BitField<LanguageMode, 1, 2> {};
- STATIC_ASSERT(i::LANGUAGE_END == 3);
+ class LanguageModeState : public BitField<LanguageMode, 1, 1> {};
+ STATIC_ASSERT(i::LANGUAGE_END == 2);
// For convenience, a statically declared encoding of strict mode extra
// IC state.
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 49bbc6ed8d..b72791aa9e 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -5,11 +5,10 @@
#include "src/ic/ic.h"
#include "src/accessors.h"
+#include "src/api-arguments-inl.h"
#include "src/api.h"
-#include "src/api-arguments.h"
#include "src/arguments.h"
#include "src/base/bits.h"
-#include "src/code-factory.h"
#include "src/codegen.h"
#include "src/conversions.h"
#include "src/execution.h"
@@ -17,14 +16,15 @@
#include "src/frames-inl.h"
#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
-#include "src/ic/ic-inl.h"
#include "src/ic/ic-compiler.h"
+#include "src/ic/ic-inl.h"
#include "src/ic/stub-cache.h"
#include "src/isolate-inl.h"
#include "src/macro-assembler.h"
#include "src/prototype.h"
-#include "src/runtime/runtime.h"
+#include "src/runtime-profiler.h"
#include "src/runtime/runtime-utils.h"
+#include "src/runtime/runtime.h"
#include "src/tracing/trace-event.h"
namespace v8 {
@@ -38,7 +38,7 @@ char IC::TransitionMarkFromState(IC::State state) {
return '.';
case MONOMORPHIC:
return '1';
- case PROTOTYPE_FAILURE:
+ case RECOMPUTE_HANDLER:
return '^';
case POLYMORPHIC:
return 'P';
@@ -46,12 +46,6 @@ char IC::TransitionMarkFromState(IC::State state) {
return 'N';
case GENERIC:
return 'G';
-
- // We never see the debugger states here, because the state is
- // computed from the original code - not the patched code. Let
- // these cases fall through to the unreachable code below.
- case DEBUG_STUB:
- break;
}
UNREACHABLE();
return 0;
@@ -95,8 +89,8 @@ const char* GetTransitionMarkModifier(KeyedAccessStoreMode mode) {
void IC::TraceIC(const char* type, Handle<Object> name) {
if (FLAG_trace_ic) {
if (AddressIsDeoptimizedCode()) return;
- State new_state =
- UseVector() ? nexus()->StateFromFeedback() : raw_target()->ic_state();
+ DCHECK(UseVector());
+ State new_state = nexus()->StateFromFeedback();
TraceIC(type, name, state(), new_state);
}
}
@@ -105,8 +99,7 @@ void IC::TraceIC(const char* type, Handle<Object> name) {
void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
State new_state) {
if (FLAG_trace_ic) {
- Code* new_target = raw_target();
- PrintF("[%s%s in ", new_target->is_keyed_stub() ? "Keyed" : "", type);
+ PrintF("[%s%s in ", is_keyed() ? "Keyed" : "", type);
// TODO(jkummerow): Add support for "apply". The logic is roughly:
// marker = [fp_ + kMarkerOffset];
@@ -123,19 +116,18 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
}
const char* modifier = "";
- if (new_target->kind() == Code::KEYED_STORE_IC) {
+ if (kind() == Code::KEYED_STORE_IC) {
KeyedAccessStoreMode mode =
casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode();
modifier = GetTransitionMarkModifier(mode);
}
- PrintF(" (%c->%c%s) ", TransitionMarkFromState(old_state),
- TransitionMarkFromState(new_state), modifier);
-#ifdef OBJECT_PRINT
- OFStream os(stdout);
- name->Print(os);
-#else
+ void* map = nullptr;
+ if (!receiver_map().is_null()) {
+ map = reinterpret_cast<void*>(*receiver_map());
+ }
+ PrintF(" (%c->%c%s) map=%p ", TransitionMarkFromState(old_state),
+ TransitionMarkFromState(new_state), modifier, map);
name->ShortPrint(stdout);
-#endif
PrintF("]\n");
}
}
@@ -146,7 +138,6 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
IC::IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus)
: isolate_(isolate),
- target_set_(false),
vector_set_(false),
target_maps_set_(false),
nexus_(nexus) {
@@ -185,13 +176,34 @@ IC::IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus)
constant_pool_address_ = constant_pool;
}
pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address);
- target_ = handle(raw_target(), isolate);
- kind_ = target_->kind();
- state_ = UseVector() ? nexus->StateFromFeedback() : target_->ic_state();
+ Code* target = this->target();
+ kind_ = target->kind();
+ state_ = UseVector() ? nexus->StateFromFeedback() : StateFromCode(target);
old_state_ = state_;
- extra_ic_state_ = target_->extra_ic_state();
+ extra_ic_state_ = target->extra_ic_state();
}
+InlineCacheState IC::StateFromCode(Code* code) {
+ Isolate* isolate = code->GetIsolate();
+ switch (code->kind()) {
+ case Code::BINARY_OP_IC: {
+ BinaryOpICState state(isolate, code->extra_ic_state());
+ return state.GetICState();
+ }
+ case Code::COMPARE_IC: {
+ CompareICStub stub(isolate, code->extra_ic_state());
+ return stub.GetICState();
+ }
+ case Code::TO_BOOLEAN_IC: {
+ ToBooleanICStub stub(isolate, code->extra_ic_state());
+ return stub.GetICState();
+ }
+ default:
+ if (code->is_debug_stub()) return UNINITIALIZED;
+ UNREACHABLE();
+ return UNINITIALIZED;
+ }
+}
SharedFunctionInfo* IC::GetSharedFunctionInfo() const {
// Compute the JavaScript frame for the frame pointer of this IC
@@ -226,7 +238,6 @@ bool IC::AddressIsOptimizedCode() const {
return host->kind() == Code::OPTIMIZED_FUNCTION;
}
-
static void LookupForRead(LookupIterator* it) {
for (; it->IsFound(); it->Next()) {
switch (it->state()) {
@@ -238,7 +249,8 @@ static void LookupForRead(LookupIterator* it) {
case LookupIterator::INTERCEPTOR: {
// If there is a getter, return; otherwise loop to perform the lookup.
Handle<JSObject> holder = it->GetHolder<JSObject>();
- if (!holder->GetNamedInterceptor()->getter()->IsUndefined()) {
+ if (!holder->GetNamedInterceptor()->getter()->IsUndefined(
+ it->isolate())) {
return;
}
break;
@@ -258,15 +270,15 @@ static void LookupForRead(LookupIterator* it) {
}
}
+bool IC::ShouldRecomputeHandler(Handle<Object> receiver, Handle<String> name) {
+ if (!RecomputeHandlerForName(name)) return false;
-bool IC::TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
- Handle<String> name) {
- if (!IsNameCompatibleWithPrototypeFailure(name)) return false;
- if (UseVector()) {
- maybe_handler_ = nexus()->FindHandlerForMap(receiver_map());
- } else {
- maybe_handler_ = target()->FindHandlerForMap(*receiver_map());
- }
+ DCHECK(UseVector());
+ maybe_handler_ = nexus()->FindHandlerForMap(receiver_map());
+
+ // This is a contextual access, always just update the handler and stay
+ // monomorphic.
+ if (kind() == Code::LOAD_GLOBAL_IC) return true;
// The current map wasn't handled yet. There's no reason to stay monomorphic,
// *unless* we're moving from a deprecated map to its replacement, or
@@ -283,38 +295,15 @@ bool IC::TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
receiver_map()->elements_kind());
}
- CacheHolderFlag flag;
- Handle<Map> ic_holder_map(GetICCacheHolder(receiver_map(), isolate(), &flag));
-
- DCHECK(flag != kCacheOnReceiver || receiver->IsJSObject());
- DCHECK(flag != kCacheOnPrototype || !receiver->IsJSReceiver());
- DCHECK(flag != kCacheOnPrototypeReceiverIsDictionary);
-
- if (state() == MONOMORPHIC) {
- int index = ic_holder_map->IndexInCodeCache(*name, *target());
- if (index >= 0) {
- ic_holder_map->RemoveFromCodeCache(*name, *target(), index);
- }
- }
-
- if (receiver->IsJSGlobalObject()) {
- Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(receiver);
- LookupIterator it(global, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
- if (it.state() == LookupIterator::ACCESS_CHECK) return false;
- if (!it.IsFound()) return false;
- return it.property_details().cell_type() == PropertyCellType::kConstant;
- }
-
return true;
}
-
-bool IC::IsNameCompatibleWithPrototypeFailure(Handle<Object> name) {
- if (target()->is_keyed_stub()) {
+bool IC::RecomputeHandlerForName(Handle<Object> name) {
+ if (is_keyed()) {
// Determine whether the failure is due to a name failure.
if (!name->IsName()) return false;
- Name* stub_name =
- UseVector() ? nexus()->FindFirstName() : target()->FindFirstName();
+ DCHECK(UseVector());
+ Name* stub_name = nexus()->FindFirstName();
if (*name != stub_name) return false;
}
@@ -326,15 +315,13 @@ void IC::UpdateState(Handle<Object> receiver, Handle<Object> name) {
update_receiver_map(receiver);
if (!name->IsString()) return;
if (state() != MONOMORPHIC && state() != POLYMORPHIC) return;
- if (receiver->IsUndefined() || receiver->IsNull()) return;
+ if (receiver->IsUndefined(isolate()) || receiver->IsNull(isolate())) return;
// Remove the target from the code cache if it became invalid
// because of changes in the prototype chain to avoid hitting it
// again.
- if (TryRemoveInvalidPrototypeDependentStub(receiver,
- Handle<String>::cast(name))) {
- MarkPrototypeFailure(name);
- return;
+ if (ShouldRecomputeHandler(receiver, Handle<String>::cast(name))) {
+ MarkRecomputeHandler(name);
}
}
@@ -382,43 +369,11 @@ static void ComputeTypeInfoCountDelta(IC::State old_state, IC::State new_state,
*polymorphic_delta = 1;
}
break;
- case PROTOTYPE_FAILURE:
- case DEBUG_STUB:
+ case RECOMPUTE_HANDLER:
UNREACHABLE();
}
}
-
-void IC::OnTypeFeedbackChanged(Isolate* isolate, Address address,
- State old_state, State new_state,
- bool target_remains_ic_stub) {
- Code* host =
- isolate->inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
- if (host->kind() != Code::FUNCTION) return;
-
- if (FLAG_type_info_threshold > 0 && target_remains_ic_stub &&
- // Not all Code objects have TypeFeedbackInfo.
- host->type_feedback_info()->IsTypeFeedbackInfo()) {
- int polymorphic_delta = 0; // "Polymorphic" here includes monomorphic.
- int generic_delta = 0; // "Generic" here includes megamorphic.
- ComputeTypeInfoCountDelta(old_state, new_state, &polymorphic_delta,
- &generic_delta);
- TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info());
- info->change_ic_with_type_info_count(polymorphic_delta);
- info->change_ic_generic_count(generic_delta);
- }
- if (host->type_feedback_info()->IsTypeFeedbackInfo()) {
- TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info());
- info->change_own_type_change_checksum();
- }
- host->set_profiler_ticks(0);
- isolate->runtime_profiler()->NotifyICChanged();
- // TODO(2029): When an optimized function is patched, it would
- // be nice to propagate the corresponding type information to its
- // unoptimized version for the benefit of later inlining.
-}
-
-
// static
void IC::OnTypeFeedbackChanged(Isolate* isolate, Code* host) {
if (host->kind() != Code::FUNCTION) return;
@@ -432,26 +387,42 @@ void IC::OnTypeFeedbackChanged(Isolate* isolate, Code* host) {
// unoptimized version for the benefit of later inlining.
}
-
void IC::PostPatching(Address address, Code* target, Code* old_target) {
// Type vector based ICs update these statistics at a different time because
// they don't always patch on state change.
if (ICUseVector(target->kind())) return;
- Isolate* isolate = target->GetHeap()->isolate();
- State old_state = UNINITIALIZED;
- State new_state = UNINITIALIZED;
- bool target_remains_ic_stub = false;
- if (old_target->is_inline_cache_stub() && target->is_inline_cache_stub()) {
- old_state = old_target->ic_state();
- new_state = target->ic_state();
- target_remains_ic_stub = true;
- }
+ DCHECK(old_target->is_inline_cache_stub());
+ DCHECK(target->is_inline_cache_stub());
+ State old_state = StateFromCode(old_target);
+ State new_state = StateFromCode(target);
- OnTypeFeedbackChanged(isolate, address, old_state, new_state,
- target_remains_ic_stub);
-}
+ Isolate* isolate = target->GetIsolate();
+ Code* host =
+ isolate->inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
+ if (host->kind() != Code::FUNCTION) return;
+ // Not all Code objects have TypeFeedbackInfo.
+ if (host->type_feedback_info()->IsTypeFeedbackInfo()) {
+ if (FLAG_type_info_threshold > 0) {
+ int polymorphic_delta = 0; // "Polymorphic" here includes monomorphic.
+ int generic_delta = 0; // "Generic" here includes megamorphic.
+ ComputeTypeInfoCountDelta(old_state, new_state, &polymorphic_delta,
+ &generic_delta);
+ TypeFeedbackInfo* info =
+ TypeFeedbackInfo::cast(host->type_feedback_info());
+ info->change_ic_with_type_info_count(polymorphic_delta);
+ info->change_ic_generic_count(generic_delta);
+ }
+ TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info());
+ info->change_own_type_change_checksum();
+ }
+ host->set_profiler_ticks(0);
+ isolate->runtime_profiler()->NotifyICChanged();
+ // TODO(2029): When an optimized function is patched, it would
+ // be nice to propagate the corresponding type information to its
+ // unoptimized version for the benefit of later inlining.
+}
void IC::Clear(Isolate* isolate, Address address, Address constant_pool) {
Code* target = GetTargetAtAddress(address, constant_pool);
@@ -459,22 +430,8 @@ void IC::Clear(Isolate* isolate, Address address, Address constant_pool) {
// Don't clear debug break inline cache as it will remove the break point.
if (target->is_debug_stub()) return;
- switch (target->kind()) {
- case Code::LOAD_IC:
- case Code::KEYED_LOAD_IC:
- case Code::STORE_IC:
- case Code::KEYED_STORE_IC:
- return;
- case Code::COMPARE_IC:
- return CompareIC::Clear(isolate, address, target, constant_pool);
- case Code::CALL_IC: // CallICs are vector-based and cleared differently.
- case Code::BINARY_OP_IC:
- case Code::TO_BOOLEAN_IC:
- // Clearing these is tricky and does not
- // make any performance difference.
- return;
- default:
- UNREACHABLE();
+ if (target->kind() == Code::COMPARE_IC) {
+ CompareIC::Clear(isolate, address, target, constant_pool);
}
}
@@ -508,16 +465,13 @@ void LoadIC::Clear(Isolate* isolate, Code* host, LoadICNexus* nexus) {
OnTypeFeedbackChanged(isolate, host);
}
-
-void StoreIC::Clear(Isolate* isolate, Address address, Code* target,
- Address constant_pool) {
- if (IsCleared(target)) return;
- Code* code = PropertyICCompiler::FindPreMonomorphic(isolate, Code::STORE_IC,
- target->extra_ic_state());
- SetTargetAtAddress(address, code, constant_pool);
+void LoadGlobalIC::Clear(Isolate* isolate, Code* host,
+ LoadGlobalICNexus* nexus) {
+ if (IsCleared(nexus)) return;
+ nexus->ConfigureUninitialized();
+ OnTypeFeedbackChanged(isolate, host);
}
-
void StoreIC::Clear(Isolate* isolate, Code* host, StoreICNexus* nexus) {
if (IsCleared(nexus)) return;
nexus->ConfigurePremonomorphic();
@@ -525,15 +479,6 @@ void StoreIC::Clear(Isolate* isolate, Code* host, StoreICNexus* nexus) {
}
-void KeyedStoreIC::Clear(Isolate* isolate, Address address, Code* target,
- Address constant_pool) {
- if (IsCleared(target)) return;
- Handle<Code> code = pre_monomorphic_stub(
- isolate, StoreICState::GetLanguageMode(target->extra_ic_state()));
- SetTargetAtAddress(address, *code, constant_pool);
-}
-
-
void KeyedStoreIC::Clear(Isolate* isolate, Code* host,
KeyedStoreICNexus* nexus) {
if (IsCleared(nexus)) return;
@@ -557,8 +502,9 @@ void CompareIC::Clear(Isolate* isolate, Address address, Code* target,
// static
Handle<Code> KeyedLoadIC::ChooseMegamorphicStub(Isolate* isolate,
ExtraICState extra_state) {
+ // TODO(ishell): remove extra_ic_state
if (FLAG_compiled_keyed_generic_loads) {
- return KeyedLoadGenericStub(isolate, LoadICState(extra_state)).GetCode();
+ return KeyedLoadGenericStub(isolate).GetCode();
} else {
return isolate->builtins()->KeyedLoadIC_Megamorphic();
}
@@ -596,32 +542,33 @@ void IC::ConfigureVectorState(IC::State new_state, Handle<Object> key) {
OnTypeFeedbackChanged(isolate(), get_host());
}
-
void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map,
- Handle<Code> handler) {
+ Handle<Object> handler) {
DCHECK(UseVector());
if (kind() == Code::LOAD_IC) {
LoadICNexus* nexus = casted_nexus<LoadICNexus>();
nexus->ConfigureMonomorphic(map, handler);
+ } else if (kind() == Code::LOAD_GLOBAL_IC) {
+ LoadGlobalICNexus* nexus = casted_nexus<LoadGlobalICNexus>();
+ nexus->ConfigureHandlerMode(Handle<Code>::cast(handler));
} else if (kind() == Code::KEYED_LOAD_IC) {
KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
nexus->ConfigureMonomorphic(name, map, handler);
} else if (kind() == Code::STORE_IC) {
StoreICNexus* nexus = casted_nexus<StoreICNexus>();
- nexus->ConfigureMonomorphic(map, handler);
+ nexus->ConfigureMonomorphic(map, Handle<Code>::cast(handler));
} else {
DCHECK(kind() == Code::KEYED_STORE_IC);
KeyedStoreICNexus* nexus = casted_nexus<KeyedStoreICNexus>();
- nexus->ConfigureMonomorphic(name, map, handler);
+ nexus->ConfigureMonomorphic(name, map, Handle<Code>::cast(handler));
}
vector_set_ = true;
OnTypeFeedbackChanged(isolate(), get_host());
}
-
void IC::ConfigureVectorState(Handle<Name> name, MapHandleList* maps,
- CodeHandleList* handlers) {
+ List<Handle<Object>>* handlers) {
DCHECK(UseVector());
if (kind() == Code::LOAD_IC) {
LoadICNexus* nexus = casted_nexus<LoadICNexus>();
@@ -659,34 +606,45 @@ void IC::ConfigureVectorState(MapHandleList* maps,
MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
// If the object is undefined or null it's illegal to try to get any
// of its properties; throw a TypeError in that case.
- if (object->IsUndefined() || object->IsNull()) {
+ if (object->IsUndefined(isolate()) || object->IsNull(isolate())) {
return TypeError(MessageTemplate::kNonObjectPropertyLoad, object, name);
}
- // Check if the name is trivially convertible to an index and get
- // the element or char if so.
- uint32_t index;
- if (kind() == Code::KEYED_LOAD_IC && name->AsArrayIndex(&index)) {
- // Rewrite to the generic keyed load stub.
- if (FLAG_use_ic) {
- DCHECK(UseVector());
- ConfigureVectorState(MEGAMORPHIC, name);
- TRACE_IC("LoadIC", name);
- TRACE_GENERIC_IC(isolate(), "LoadIC", "name as array index");
- }
+ bool use_ic = MigrateDeprecated(object) ? false : FLAG_use_ic;
+
+ if (state() != UNINITIALIZED) {
+ JSObject::MakePrototypesFast(object, kStartAtReceiver, isolate());
+ update_receiver_map(object);
+ }
+ // Named lookup in the object.
+ LookupIterator it(object, name);
+ LookupForRead(&it);
+
+ if (it.IsFound() || !ShouldThrowReferenceError()) {
+ // Update inline cache and stub cache.
+ if (use_ic) UpdateCaches(&it);
+
+ // Get the property.
Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
- Object::GetElement(isolate(), object, index),
+
+ ASSIGN_RETURN_ON_EXCEPTION(isolate(), result, Object::GetProperty(&it),
Object);
- return result;
+ if (it.IsFound()) {
+ return result;
+ } else if (!ShouldThrowReferenceError()) {
+ LOG(isolate(), SuspectReadEvent(*name, *object));
+ return result;
+ }
}
+ return ReferenceError(name);
+}
- bool use_ic = MigrateDeprecated(object) ? false : FLAG_use_ic;
+MaybeHandle<Object> LoadGlobalIC::Load(Handle<Name> name) {
+ Handle<JSGlobalObject> global = isolate()->global_object();
- if (object->IsJSGlobalObject() && name->IsString()) {
+ if (name->IsString()) {
// Look up in script context table.
Handle<String> str_name = Handle<String>::cast(name);
- Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(object);
Handle<ScriptContextTable> script_contexts(
global->native_context()->script_context_table());
@@ -696,44 +654,24 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
FixedArray::get(*ScriptContextTable::GetContext(
script_contexts, lookup_result.context_index),
lookup_result.slot_index, isolate());
- if (*result == *isolate()->factory()->the_hole_value()) {
+ if (result->IsTheHole(isolate())) {
// Do not install stubs and stay pre-monomorphic for
// uninitialized accesses.
return ReferenceError(name);
}
- if (use_ic && LoadScriptContextFieldStub::Accepted(&lookup_result)) {
+ if (FLAG_use_ic && LoadScriptContextFieldStub::Accepted(&lookup_result)) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadScriptContextFieldStub);
LoadScriptContextFieldStub stub(isolate(), &lookup_result);
PatchCache(name, stub.GetCode());
+ TRACE_IC("LoadGlobalIC", name);
}
return result;
}
}
-
- // Named lookup in the object.
- LookupIterator it(object, name);
- LookupForRead(&it);
-
- if (it.IsFound() || !ShouldThrowReferenceError(object)) {
- // Update inline cache and stub cache.
- if (use_ic) UpdateCaches(&it);
-
- // Get the property.
- Handle<Object> result;
-
- ASSIGN_RETURN_ON_EXCEPTION(isolate(), result, Object::GetProperty(&it),
- Object);
- if (it.IsFound()) {
- return result;
- } else if (!ShouldThrowReferenceError(object)) {
- LOG(isolate(), SuspectReadEvent(*name, *object));
- return result;
- }
- }
- return ReferenceError(name);
+ return LoadIC::Load(global, name);
}
-
static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps,
Handle<Map> new_receiver_map) {
DCHECK(!new_receiver_map.is_null());
@@ -747,13 +685,15 @@ static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps,
return true;
}
-
-bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code) {
- if (!code->is_handler()) return false;
- if (target()->is_keyed_stub() && state() != PROTOTYPE_FAILURE) return false;
+bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Object> code) {
+ DCHECK(code->IsSmi() || code->IsCode());
+ if (!code->IsSmi() && !Code::cast(*code)->is_handler()) {
+ return false;
+ }
+ if (is_keyed() && state() != RECOMPUTE_HANDLER) return false;
Handle<Map> map = receiver_map();
MapHandleList maps;
- CodeHandleList handlers;
+ List<Handle<Object>> handlers;
TargetMaps(&maps);
int number_of_maps = maps.length();
@@ -783,14 +723,11 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code) {
if (number_of_maps == 0 && state() != MONOMORPHIC && state() != POLYMORPHIC) {
return false;
}
- if (UseVector()) {
- if (!nexus()->FindHandlers(&handlers, maps.length())) return false;
- } else {
- if (!target()->FindHandlers(&handlers, maps.length())) return false;
- }
+ DCHECK(UseVector());
+ if (!nexus()->FindHandlers(&handlers, maps.length())) return false;
number_of_valid_maps++;
- if (number_of_valid_maps > 1 && target()->is_keyed_stub()) return false;
+ if (number_of_valid_maps > 1 && is_keyed()) return false;
Handle<Code> ic;
if (number_of_valid_maps == 1) {
ConfigureVectorState(name, receiver_map(), code);
@@ -808,22 +745,21 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code) {
ConfigureVectorState(name, &maps, &handlers);
}
- if (!UseVector()) set_target(*ic);
return true;
}
-
-void IC::UpdateMonomorphicIC(Handle<Code> handler, Handle<Name> name) {
- DCHECK(handler->is_handler());
+void IC::UpdateMonomorphicIC(Handle<Object> handler, Handle<Name> name) {
+ DCHECK(handler->IsSmi() ||
+ (handler->IsCode() && Handle<Code>::cast(handler)->is_handler()));
ConfigureVectorState(name, receiver_map(), handler);
}
void IC::CopyICToMegamorphicCache(Handle<Name> name) {
MapHandleList maps;
- CodeHandleList handlers;
+ List<Handle<Object>> handlers;
TargetMaps(&maps);
- if (!target()->FindHandlers(&handlers, maps.length())) return;
+ if (!nexus()->FindHandlers(&handlers, maps.length())) return;
for (int i = 0; i < maps.length(); i++) {
UpdateMegamorphicCache(*maps.at(i), *name, *handlers.at(i));
}
@@ -845,38 +781,36 @@ bool IC::IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map) {
return transitioned_map == target_map;
}
-
-void IC::PatchCache(Handle<Name> name, Handle<Code> code) {
+void IC::PatchCache(Handle<Name> name, Handle<Object> code) {
+ DCHECK(code->IsCode() || (code->IsSmi() && (kind() == Code::LOAD_IC ||
+ kind() == Code::KEYED_LOAD_IC)));
switch (state()) {
case UNINITIALIZED:
case PREMONOMORPHIC:
UpdateMonomorphicIC(code, name);
break;
- case PROTOTYPE_FAILURE:
+ case RECOMPUTE_HANDLER:
case MONOMORPHIC:
+ if (kind() == Code::LOAD_GLOBAL_IC) {
+ UpdateMonomorphicIC(code, name);
+ break;
+ }
+ // Fall through.
case POLYMORPHIC:
- if (!target()->is_keyed_stub() || state() == PROTOTYPE_FAILURE) {
+ if (!is_keyed() || state() == RECOMPUTE_HANDLER) {
if (UpdatePolymorphicIC(name, code)) break;
// For keyed stubs, we can't know whether old handlers were for the
// same key.
CopyICToMegamorphicCache(name);
}
- if (UseVector()) {
- ConfigureVectorState(MEGAMORPHIC, name);
- } else {
- set_target(*megamorphic_stub());
- }
+ DCHECK(UseVector());
+ ConfigureVectorState(MEGAMORPHIC, name);
// Fall through.
case MEGAMORPHIC:
UpdateMegamorphicCache(*receiver_map(), *name, *code);
// Indicate that we've handled this case.
- if (UseVector()) {
- vector_set_ = true;
- } else {
- target_set_ = true;
- }
- break;
- case DEBUG_STUB:
+ DCHECK(UseVector());
+ vector_set_ = true;
break;
case GENERIC:
UNREACHABLE();
@@ -884,96 +818,19 @@ void IC::PatchCache(Handle<Name> name, Handle<Code> code) {
}
}
-
-Handle<Code> LoadIC::initialize_stub(Isolate* isolate,
- ExtraICState extra_state) {
- return LoadICTrampolineStub(isolate, LoadICState(extra_state)).GetCode();
-}
-
-
-Handle<Code> LoadIC::initialize_stub_in_optimized_code(
- Isolate* isolate, ExtraICState extra_state, State initialization_state) {
- return LoadICStub(isolate, LoadICState(extra_state)).GetCode();
-}
-
-
-Handle<Code> KeyedLoadIC::initialize_stub(Isolate* isolate,
- ExtraICState extra_state) {
- return KeyedLoadICTrampolineStub(isolate, LoadICState(extra_state)).GetCode();
-}
-
-
-Handle<Code> KeyedLoadIC::initialize_stub_in_optimized_code(
- Isolate* isolate, State initialization_state, ExtraICState extra_state) {
- if (initialization_state != MEGAMORPHIC) {
- return KeyedLoadICStub(isolate, LoadICState(extra_state)).GetCode();
- }
- return isolate->builtins()->KeyedLoadIC_Megamorphic();
-}
-
-
-static Handle<Code> KeyedStoreICInitializeStubHelper(
- Isolate* isolate, LanguageMode language_mode,
- InlineCacheState initialization_state) {
- switch (initialization_state) {
- case UNINITIALIZED:
- return is_strict(language_mode)
- ? isolate->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate->builtins()->KeyedStoreIC_Initialize();
- case PREMONOMORPHIC:
- return is_strict(language_mode)
- ? isolate->builtins()->KeyedStoreIC_PreMonomorphic_Strict()
- : isolate->builtins()->KeyedStoreIC_PreMonomorphic();
- case MEGAMORPHIC:
- return is_strict(language_mode)
- ? isolate->builtins()->KeyedStoreIC_Megamorphic_Strict()
- : isolate->builtins()->KeyedStoreIC_Megamorphic();
- default:
- UNREACHABLE();
- }
- return Handle<Code>();
-}
-
-
-Handle<Code> KeyedStoreIC::initialize_stub(Isolate* isolate,
- LanguageMode language_mode,
- State initialization_state) {
- if (initialization_state != MEGAMORPHIC) {
- VectorKeyedStoreICTrampolineStub stub(isolate, StoreICState(language_mode));
- return stub.GetCode();
- }
-
- return KeyedStoreICInitializeStubHelper(isolate, language_mode,
- initialization_state);
-}
-
-
-Handle<Code> KeyedStoreIC::initialize_stub_in_optimized_code(
- Isolate* isolate, LanguageMode language_mode, State initialization_state) {
- if (initialization_state != MEGAMORPHIC) {
- VectorKeyedStoreICStub stub(isolate, StoreICState(language_mode));
- return stub.GetCode();
- }
-
- return KeyedStoreICInitializeStubHelper(isolate, language_mode,
- initialization_state);
-}
-
-
Handle<Code> KeyedStoreIC::ChooseMegamorphicStub(Isolate* isolate,
ExtraICState extra_state) {
LanguageMode mode = StoreICState::GetLanguageMode(extra_state);
- return KeyedStoreICInitializeStubHelper(isolate, mode, MEGAMORPHIC);
-}
-
-
-Handle<Code> LoadIC::megamorphic_stub() {
- DCHECK_EQ(Code::KEYED_LOAD_IC, kind());
- return KeyedLoadIC::ChooseMegamorphicStub(isolate(), extra_ic_state());
+ return is_strict(mode)
+ ? isolate->builtins()->KeyedStoreIC_Megamorphic_Strict()
+ : isolate->builtins()->KeyedStoreIC_Megamorphic();
}
-
-Handle<Code> LoadIC::SimpleFieldLoad(FieldIndex index) {
+Handle<Object> LoadIC::SimpleFieldLoad(FieldIndex index) {
+ if (FLAG_tf_load_ic_stub) {
+ return handle(Smi::FromInt(index.GetLoadByFieldOffset()), isolate());
+ }
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldStub);
LoadFieldStub stub(isolate(), index);
return stub.GetCode();
}
@@ -992,8 +849,9 @@ bool IsCompatibleReceiver(LookupIterator* lookup, Handle<Map> receiver_map) {
} else if (accessors->IsAccessorPair()) {
Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
isolate);
- if (!getter->IsJSFunction() && !getter->IsFunctionTemplateInfo())
+ if (!getter->IsJSFunction() && !getter->IsFunctionTemplateInfo()) {
return false;
+ }
Handle<JSObject> holder = lookup->GetHolder<JSObject>();
Handle<Object> receiver = lookup->GetReceiver();
if (holder->HasFastProperties()) {
@@ -1018,7 +876,7 @@ bool IsCompatibleReceiver(LookupIterator* lookup, Handle<Map> receiver_map) {
void LoadIC::UpdateCaches(LookupIterator* lookup) {
- if (state() == UNINITIALIZED) {
+ if (state() == UNINITIALIZED && kind() != Code::LOAD_GLOBAL_IC) {
// This is the first time we execute this inline cache. Set the target to
// the pre monomorphic stub to delay setting the monomorphic state.
ConfigureVectorState(PREMONOMORPHIC, Handle<Object>());
@@ -1026,12 +884,12 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
return;
}
- Handle<Code> code;
+ Handle<Object> code;
if (lookup->state() == LookupIterator::JSPROXY ||
lookup->state() == LookupIterator::ACCESS_CHECK) {
code = slow_stub();
} else if (!lookup->IsFound()) {
- if (kind() == Code::LOAD_IC) {
+ if (kind() == Code::LOAD_IC || kind() == Code::LOAD_GLOBAL_IC) {
code = NamedLoadHandlerCompiler::ComputeLoadNonexistent(lookup->name(),
receiver_map());
// TODO(jkummerow/verwaest): Introduce a builtin that handles this case.
@@ -1040,21 +898,41 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
code = slow_stub();
}
} else {
- if (lookup->state() == LookupIterator::ACCESSOR) {
+ if (kind() == Code::LOAD_GLOBAL_IC &&
+ lookup->state() == LookupIterator::DATA &&
+ lookup->GetHolder<Object>()->IsJSGlobalObject()) {
+#if DEBUG
+ Handle<Object> holder = lookup->GetHolder<Object>();
+ Handle<Object> receiver = lookup->GetReceiver();
+ DCHECK_EQ(*receiver, *holder);
+#endif
+ // Now update the cell in the feedback vector.
+ LoadGlobalICNexus* nexus = casted_nexus<LoadGlobalICNexus>();
+ nexus->ConfigurePropertyCellMode(lookup->GetPropertyCell());
+ TRACE_IC("LoadGlobalIC", lookup->name());
+ return;
+ } else if (lookup->state() == LookupIterator::ACCESSOR) {
if (!IsCompatibleReceiver(lookup, receiver_map())) {
TRACE_GENERIC_IC(isolate(), "LoadIC", "incompatible receiver type");
code = slow_stub();
}
} else if (lookup->state() == LookupIterator::INTERCEPTOR) {
- // Perform a lookup behind the interceptor. Copy the LookupIterator since
- // the original iterator will be used to fetch the value.
- LookupIterator it = *lookup;
- it.Next();
- LookupForRead(&it);
- if (it.state() == LookupIterator::ACCESSOR &&
- !IsCompatibleReceiver(&it, receiver_map())) {
- TRACE_GENERIC_IC(isolate(), "LoadIC", "incompatible receiver type");
+ if (kind() == Code::LOAD_GLOBAL_IC) {
+ // The interceptor handler requires name but it is not passed explicitly
+ // to LoadGlobalIC and the LoadGlobalIC dispatcher also does not load
+ // it so we will just use slow stub.
code = slow_stub();
+ } else {
+ // Perform a lookup behind the interceptor. Copy the LookupIterator
+ // since the original iterator will be used to fetch the value.
+ LookupIterator it = *lookup;
+ it.Next();
+ LookupForRead(&it);
+ if (it.state() == LookupIterator::ACCESSOR &&
+ !IsCompatibleReceiver(&it, receiver_map())) {
+ TRACE_GENERIC_IC(isolate(), "LoadIC", "incompatible receiver type");
+ code = slow_stub();
+ }
}
}
if (code.is_null()) code = ComputeHandler(lookup);
@@ -1064,39 +942,90 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
TRACE_IC("LoadIC", lookup->name());
}
+StubCache* IC::stub_cache() {
+ switch (kind()) {
+ case Code::LOAD_IC:
+ case Code::KEYED_LOAD_IC:
+ return isolate()->load_stub_cache();
+
+ case Code::STORE_IC:
+ case Code::KEYED_STORE_IC:
+ return isolate()->store_stub_cache();
-void IC::UpdateMegamorphicCache(Map* map, Name* name, Code* code) {
- isolate()->stub_cache()->Set(name, map, code);
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
+void IC::UpdateMegamorphicCache(Map* map, Name* name, Object* code) {
+ if (code->IsSmi()) {
+ // TODO(jkummerow): Support Smis in the code cache.
+ Handle<Map> map_handle(map, isolate());
+ Handle<Name> name_handle(name, isolate());
+ FieldIndex index =
+ FieldIndex::ForLoadByFieldOffset(map, Smi::cast(code)->value());
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldStub);
+ LoadFieldStub stub(isolate(), index);
+ Code* handler = *stub.GetCode();
+ stub_cache()->Set(*name_handle, *map_handle, handler);
+ return;
+ }
+ DCHECK(code->IsCode());
+ stub_cache()->Set(name, map, Code::cast(code));
}
+Handle<Object> IC::ComputeHandler(LookupIterator* lookup,
+ Handle<Object> value) {
+ // Try to find a globally shared handler stub.
+ Handle<Object> handler_or_index = GetMapIndependentHandler(lookup);
+ if (!handler_or_index.is_null()) {
+ DCHECK(handler_or_index->IsCode() || handler_or_index->IsSmi());
+ return handler_or_index;
+ }
-Handle<Code> IC::ComputeHandler(LookupIterator* lookup, Handle<Object> value) {
+ // Otherwise check the map's handler cache for a map-specific handler, and
+ // compile one if the cache comes up empty.
bool receiver_is_holder =
lookup->GetReceiver().is_identical_to(lookup->GetHolder<JSObject>());
CacheHolderFlag flag;
- Handle<Map> stub_holder_map = IC::GetHandlerCacheHolder(
- receiver_map(), receiver_is_holder, isolate(), &flag);
+ Handle<Map> stub_holder_map;
+ if (kind() == Code::LOAD_IC || kind() == Code::LOAD_GLOBAL_IC ||
+ kind() == Code::KEYED_LOAD_IC) {
+ stub_holder_map = IC::GetHandlerCacheHolder(
+ receiver_map(), receiver_is_holder, isolate(), &flag);
+ } else {
+ DCHECK(kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC);
+ // Store handlers cannot be cached on prototypes.
+ flag = kCacheOnReceiver;
+ stub_holder_map = receiver_map();
+ }
Handle<Code> code = PropertyHandlerCompiler::Find(
- lookup->name(), stub_holder_map, kind(), flag,
- lookup->is_dictionary_holder() ? Code::NORMAL : Code::FAST);
+ lookup->name(), stub_holder_map, kind(), flag);
// Use the cached value if it exists, and if it is different from the
// handler that just missed.
if (!code.is_null()) {
- if (!maybe_handler_.is_null() &&
- !maybe_handler_.ToHandleChecked().is_identical_to(code)) {
- return code;
- }
- if (maybe_handler_.is_null()) {
+ Handle<Object> handler;
+ if (maybe_handler_.ToHandle(&handler)) {
+ if (!handler.is_identical_to(code)) {
+ TRACE_HANDLER_STATS(isolate(), IC_HandlerCacheHit);
+ return code;
+ }
+ } else {
// maybe_handler_ is only populated for MONOMORPHIC and POLYMORPHIC ICs.
// In MEGAMORPHIC case, check if the handler in the megamorphic stub
// cache (which just missed) is different from the cached handler.
if (state() == MEGAMORPHIC && lookup->GetReceiver()->IsHeapObject()) {
Map* map = Handle<HeapObject>::cast(lookup->GetReceiver())->map();
- Code* megamorphic_cached_code =
- isolate()->stub_cache()->Get(*lookup->name(), map, code->flags());
- if (megamorphic_cached_code != *code) return code;
+ Code* megamorphic_cached_code = stub_cache()->Get(*lookup->name(), map);
+ if (megamorphic_cached_code != *code) {
+ TRACE_HANDLER_STATS(isolate(), IC_HandlerCacheHit);
+ return code;
+ }
} else {
+ TRACE_HANDLER_STATS(isolate(), IC_HandlerCacheHit);
return code;
}
}
@@ -1104,24 +1033,13 @@ Handle<Code> IC::ComputeHandler(LookupIterator* lookup, Handle<Object> value) {
code = CompileHandler(lookup, value, flag);
DCHECK(code->is_handler());
-
- // TODO(mvstanton): we'd only like to cache code on the map when it's custom
- // code compiled for this map, otherwise it's already cached in the global
- // code cache. We are also guarding against installing code with flags that
- // don't match the desired CacheHolderFlag computed above, which would lead to
- // invalid lookups later.
- if (code->type() != Code::NORMAL &&
- Code::ExtractCacheHolderFromFlags(code->flags()) == flag) {
- Map::UpdateCodeCache(stub_holder_map, lookup->name(), code);
- }
+ DCHECK(Code::ExtractCacheHolderFromFlags(code->flags()) == flag);
+ Map::UpdateCodeCache(stub_holder_map, lookup->name(), code);
return code;
}
-
-Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
- Handle<Object> unused,
- CacheHolderFlag cache_holder) {
+Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
Handle<Object> receiver = lookup->GetReceiver();
if (receiver->IsString() &&
Name::Equals(isolate()->factory()->length_string(), lookup->name())) {
@@ -1131,6 +1049,7 @@ Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
if (receiver->IsStringWrapper() &&
Name::Equals(isolate()->factory()->length_string(), lookup->name())) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_StringLengthStub);
StringLengthStub string_length_stub(isolate());
return string_length_stub.GetCode();
}
@@ -1143,6 +1062,7 @@ Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
->map()
->has_non_instance_prototype()) {
Handle<Code> stub;
+ TRACE_HANDLER_STATS(isolate(), LoadIC_FunctionPrototypeStub);
FunctionPrototypeStub function_prototype_stub(isolate());
return function_prototype_stub.GetCode();
}
@@ -1151,16 +1071,8 @@ Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
Handle<JSObject> holder = lookup->GetHolder<JSObject>();
bool receiver_is_holder = receiver.is_identical_to(holder);
switch (lookup->state()) {
- case LookupIterator::INTERCEPTOR: {
- DCHECK(!holder->GetNamedInterceptor()->getter()->IsUndefined());
- NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
- // Perform a lookup behind the interceptor. Copy the LookupIterator since
- // the original iterator will be used to fetch the value.
- LookupIterator it = *lookup;
- it.Next();
- LookupForRead(&it);
- return compiler.CompileLoadInterceptor(&it);
- }
+ case LookupIterator::INTERCEPTOR:
+ break; // Custom-compiled handler.
case LookupIterator::ACCESSOR: {
// Use simple field loads for some well-known callback properties.
@@ -1172,72 +1084,64 @@ Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
FieldIndex index = FieldIndex::ForInObjectOffset(object_offset, *map);
return SimpleFieldLoad(index);
}
- if (Accessors::IsJSArrayBufferViewFieldAccessor(map, lookup->name(),
- &object_offset)) {
- FieldIndex index = FieldIndex::ForInObjectOffset(object_offset, *map);
- ArrayBufferViewLoadFieldStub stub(isolate(), index);
- return stub.GetCode();
- }
if (IsCompatibleReceiver(lookup, map)) {
Handle<Object> accessors = lookup->GetAccessors();
if (accessors->IsAccessorPair()) {
- if (!holder->HasFastProperties()) break;
+ if (!holder->HasFastProperties()) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
+ return slow_stub();
+ }
// When debugging we need to go the slow path to flood the accessor.
- if (GetSharedFunctionInfo()->HasDebugInfo()) break;
- Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
- isolate());
- CallOptimization call_optimization(getter);
- NamedLoadHandlerCompiler compiler(isolate(), map, holder,
- cache_holder);
- if (call_optimization.is_simple_api_call()) {
- return compiler.CompileLoadCallback(
- lookup->name(), call_optimization, lookup->GetAccessorIndex());
+ if (GetSharedFunctionInfo()->HasDebugInfo()) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
+ return slow_stub();
}
- int expected_arguments = Handle<JSFunction>::cast(getter)
- ->shared()
- ->internal_formal_parameter_count();
- return compiler.CompileLoadViaGetter(
- lookup->name(), lookup->GetAccessorIndex(), expected_arguments);
+ break; // Custom-compiled handler.
} else if (accessors->IsAccessorInfo()) {
Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(accessors);
- if (v8::ToCData<Address>(info->getter()) == 0) break;
- if (!AccessorInfo::IsCompatibleReceiverMap(isolate(), info, map)) {
- // This case should be already handled in LoadIC::UpdateCaches.
- UNREACHABLE();
- break;
+ if (v8::ToCData<Address>(info->getter()) == nullptr) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
+ return slow_stub();
+ }
+ // Ruled out by IsCompatibleReceiver() above.
+ DCHECK(AccessorInfo::IsCompatibleReceiverMap(isolate(), info, map));
+ if (!holder->HasFastProperties()) return slow_stub();
+ if (receiver_is_holder) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterStub);
+ int index = lookup->GetAccessorIndex();
+ LoadApiGetterStub stub(isolate(), true, index);
+ return stub.GetCode();
}
- if (!holder->HasFastProperties()) break;
- if (info->is_sloppy() && !receiver->IsJSReceiver()) break;
- NamedLoadHandlerCompiler compiler(isolate(), map, holder,
- cache_holder);
- return compiler.CompileLoadCallback(lookup->name(), info);
+ if (info->is_sloppy() && !receiver->IsJSReceiver()) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
+ return slow_stub();
+ }
+ break; // Custom-compiled handler.
}
}
- break;
+ TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
+ return slow_stub();
}
case LookupIterator::DATA: {
if (lookup->is_dictionary_holder()) {
- if (kind() != Code::LOAD_IC) break;
+ if (kind() != Code::LOAD_IC && kind() != Code::LOAD_GLOBAL_IC) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
+ return slow_stub();
+ }
if (holder->IsJSGlobalObject()) {
- NamedLoadHandlerCompiler compiler(isolate(), map, holder,
- cache_holder);
- Handle<PropertyCell> cell = lookup->GetPropertyCell();
- Handle<Code> code = compiler.CompileLoadGlobal(
- cell, lookup->name(), lookup->IsConfigurable());
- // TODO(verwaest): Move caching of these NORMAL stubs outside as well.
- CacheHolderFlag flag;
- Handle<Map> stub_holder_map =
- GetHandlerCacheHolder(map, receiver_is_holder, isolate(), &flag);
- Map::UpdateCodeCache(stub_holder_map, lookup->name(), code);
- return code;
+ break; // Custom-compiled handler.
}
// There is only one shared stub for loading normalized
// properties. It does not traverse the prototype chain, so the
// property must be found in the object for the stub to be
// applicable.
- if (!receiver_is_holder) break;
+ if (!receiver_is_holder) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
+ return slow_stub();
+ }
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNormal);
return isolate()->builtins()->LoadIC_Normal();
}
@@ -1247,30 +1151,153 @@ Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
if (receiver_is_holder) {
return SimpleFieldLoad(field);
}
- NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
- return compiler.CompileLoadField(lookup->name(), field);
+ break; // Custom-compiled handler.
}
// -------------- Constant properties --------------
DCHECK(lookup->property_details().type() == DATA_CONSTANT);
if (receiver_is_holder) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantStub);
LoadConstantStub stub(isolate(), lookup->GetConstantIndex());
return stub.GetCode();
}
+ break; // Custom-compiled handler.
+ }
+
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
+ return slow_stub();
+ case LookupIterator::ACCESS_CHECK:
+ case LookupIterator::JSPROXY:
+ case LookupIterator::NOT_FOUND:
+ case LookupIterator::TRANSITION:
+ UNREACHABLE();
+ }
+
+ return Handle<Code>::null();
+}
+
+Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
+ Handle<Object> unused,
+ CacheHolderFlag cache_holder) {
+ Handle<JSObject> holder = lookup->GetHolder<JSObject>();
+#ifdef DEBUG
+ // Only used by DCHECKs below.
+ Handle<Object> receiver = lookup->GetReceiver();
+ bool receiver_is_holder = receiver.is_identical_to(holder);
+#endif
+ // Non-map-specific handler stubs have already been selected.
+ DCHECK(!receiver->IsString() ||
+ !Name::Equals(isolate()->factory()->length_string(), lookup->name()));
+ DCHECK(!receiver->IsStringWrapper() ||
+ !Name::Equals(isolate()->factory()->length_string(), lookup->name()));
+
+ DCHECK(!(
+ receiver->IsJSFunction() &&
+ Name::Equals(isolate()->factory()->prototype_string(), lookup->name()) &&
+ receiver->IsConstructor() &&
+ !Handle<JSFunction>::cast(receiver)
+ ->map()
+ ->has_non_instance_prototype()));
+
+ Handle<Map> map = receiver_map();
+ switch (lookup->state()) {
+ case LookupIterator::INTERCEPTOR: {
+ DCHECK(!holder->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadInterceptor);
+ NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
+ // Perform a lookup behind the interceptor. Copy the LookupIterator since
+ // the original iterator will be used to fetch the value.
+ LookupIterator it = *lookup;
+ it.Next();
+ LookupForRead(&it);
+ return compiler.CompileLoadInterceptor(&it);
+ }
+
+ case LookupIterator::ACCESSOR: {
+#ifdef DEBUG
+ int object_offset;
+ DCHECK(!Accessors::IsJSObjectFieldAccessor(map, lookup->name(),
+ &object_offset));
+#endif
+
+ DCHECK(IsCompatibleReceiver(lookup, map));
+ Handle<Object> accessors = lookup->GetAccessors();
+ if (accessors->IsAccessorPair()) {
+ DCHECK(holder->HasFastProperties());
+ DCHECK(!GetSharedFunctionInfo()->HasDebugInfo());
+ Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
+ isolate());
+ CallOptimization call_optimization(getter);
+ NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
+ if (call_optimization.is_simple_api_call()) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadCallback);
+ int index = lookup->GetAccessorIndex();
+ Handle<Code> code = compiler.CompileLoadCallback(
+ lookup->name(), call_optimization, index, slow_stub());
+ return code;
+ }
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadViaGetter);
+ int expected_arguments = Handle<JSFunction>::cast(getter)
+ ->shared()
+ ->internal_formal_parameter_count();
+ return compiler.CompileLoadViaGetter(
+ lookup->name(), lookup->GetAccessorIndex(), expected_arguments);
+ } else {
+ DCHECK(accessors->IsAccessorInfo());
+ Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(accessors);
+ DCHECK(v8::ToCData<Address>(info->getter()) != nullptr);
+ DCHECK(AccessorInfo::IsCompatibleReceiverMap(isolate(), info, map));
+ DCHECK(holder->HasFastProperties());
+ DCHECK(!receiver_is_holder);
+ DCHECK(!info->is_sloppy() || receiver->IsJSReceiver());
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadCallback);
+ NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
+ Handle<Code> code =
+ compiler.CompileLoadCallback(lookup->name(), info, slow_stub());
+ return code;
+ }
+ UNREACHABLE();
+ }
+
+ case LookupIterator::DATA: {
+ if (lookup->is_dictionary_holder()) {
+ DCHECK(kind() == Code::LOAD_IC || kind() == Code::LOAD_GLOBAL_IC);
+ DCHECK(holder->IsJSGlobalObject());
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadGlobal);
+ NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
+ Handle<PropertyCell> cell = lookup->GetPropertyCell();
+ Handle<Code> code = compiler.CompileLoadGlobal(
+ cell, lookup->name(), lookup->IsConfigurable());
+ return code;
+ }
+
+ // -------------- Fields --------------
+ if (lookup->property_details().type() == DATA) {
+ FieldIndex field = lookup->GetFieldIndex();
+ DCHECK(!receiver_is_holder);
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadField);
+ NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
+ return compiler.CompileLoadField(lookup->name(), field);
+ }
+
+ // -------------- Constant properties --------------
+ DCHECK(lookup->property_details().type() == DATA_CONSTANT);
+ DCHECK(!receiver_is_holder);
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstant);
NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
return compiler.CompileLoadConstant(lookup->name(),
lookup->GetConstantIndex());
}
case LookupIterator::INTEGER_INDEXED_EXOTIC:
- return slow_stub();
case LookupIterator::ACCESS_CHECK:
case LookupIterator::JSPROXY:
case LookupIterator::NOT_FOUND:
case LookupIterator::TRANSITION:
UNREACHABLE();
}
-
+ UNREACHABLE();
return slow_stub();
}
@@ -1288,33 +1315,35 @@ static Handle<Object> TryConvertKey(Handle<Object> key, Isolate* isolate) {
key = handle(Smi::FromInt(int_value), isolate);
}
}
- } else if (key->IsUndefined()) {
+ } else if (key->IsUndefined(isolate)) {
key = isolate->factory()->undefined_string();
}
return key;
}
-
-Handle<Code> KeyedLoadIC::LoadElementStub(Handle<HeapObject> receiver) {
- Handle<Code> null_handle;
+void KeyedLoadIC::UpdateLoadElement(Handle<HeapObject> receiver) {
Handle<Map> receiver_map(receiver->map(), isolate());
- DCHECK(receiver_map->instance_type() != JS_VALUE_TYPE); // Checked by caller.
+ DCHECK(receiver_map->instance_type() != JS_VALUE_TYPE &&
+ receiver_map->instance_type() != JS_PROXY_TYPE); // Checked by caller.
MapHandleList target_receiver_maps;
TargetMaps(&target_receiver_maps);
if (target_receiver_maps.length() == 0) {
- Handle<Code> handler =
- PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(
- receiver_map, extra_ic_state());
- ConfigureVectorState(Handle<Name>::null(), receiver_map, handler);
- return null_handle;
+ Handle<Object> handler =
+ ElementHandlerCompiler::GetKeyedLoadHandler(receiver_map, isolate());
+ return ConfigureVectorState(Handle<Name>(), receiver_map, handler);
}
for (int i = 0; i < target_receiver_maps.length(); i++) {
- if (!target_receiver_maps.at(i).is_null() &&
- target_receiver_maps.at(i)->instance_type() == JS_VALUE_TYPE) {
+ Handle<Map> map = target_receiver_maps.at(i);
+ if (map.is_null()) continue;
+ if (map->instance_type() == JS_VALUE_TYPE) {
TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "JSValue");
- return megamorphic_stub();
+ return;
+ }
+ if (map->instance_type() == JS_PROXY_TYPE) {
+ TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "JSProxy");
+ return;
}
}
@@ -1329,11 +1358,9 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<HeapObject> receiver) {
IsMoreGeneralElementsKindTransition(
target_receiver_maps.at(0)->elements_kind(),
Handle<JSObject>::cast(receiver)->GetElementsKind())) {
- Handle<Code> handler =
- PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(
- receiver_map, extra_ic_state());
- ConfigureVectorState(Handle<Name>::null(), receiver_map, handler);
- return null_handle;
+ Handle<Object> handler =
+ ElementHandlerCompiler::GetKeyedLoadHandler(receiver_map, isolate());
+ return ConfigureVectorState(Handle<Name>(), receiver_map, handler);
}
DCHECK(state() != GENERIC);
@@ -1344,21 +1371,20 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<HeapObject> receiver) {
// If the miss wasn't due to an unseen map, a polymorphic stub
// won't help, use the generic stub.
TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "same map added twice");
- return megamorphic_stub();
+ return;
}
// If the maximum number of receiver maps has been exceeded, use the generic
// version of the IC.
if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "max polymorph exceeded");
- return megamorphic_stub();
+ return;
}
- CodeHandleList handlers(target_receiver_maps.length());
+ List<Handle<Object>> handlers(target_receiver_maps.length());
ElementHandlerCompiler compiler(isolate());
compiler.CompileElementHandlers(&target_receiver_maps, &handlers);
- ConfigureVectorState(Handle<Name>::null(), &target_receiver_maps, &handlers);
- return null_handle;
+ ConfigureVectorState(Handle<Name>(), &target_receiver_maps, &handlers);
}
@@ -1373,13 +1399,15 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
}
Handle<Object> load_handle;
- Handle<Code> stub = megamorphic_stub();
// Check for non-string values that can be converted into an
// internalized string directly or is representable as a smi.
key = TryConvertKey(key, isolate());
- if (key->IsInternalizedString() || key->IsSymbol()) {
+ uint32_t index;
+ if ((key->IsInternalizedString() &&
+ !String::cast(*key)->AsArrayIndex(&index)) ||
+ key->IsSymbol()) {
ASSIGN_RETURN_ON_EXCEPTION(isolate(), load_handle,
LoadIC::Load(object, Handle<Name>::cast(key)),
Object);
@@ -1387,20 +1415,15 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
!object->IsJSValue()) {
if (object->IsJSObject() || (object->IsString() && key->IsNumber())) {
Handle<HeapObject> receiver = Handle<HeapObject>::cast(object);
- if (object->IsString() || key->IsSmi()) stub = LoadElementStub(receiver);
+ if (object->IsString() || key->IsSmi()) UpdateLoadElement(receiver);
}
}
- DCHECK(UseVector());
- if (!is_vector_set() || stub.is_null()) {
- Code* generic = *megamorphic_stub();
- if (!stub.is_null() && *stub == generic) {
- ConfigureVectorState(MEGAMORPHIC, key);
- TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic");
- }
-
- TRACE_IC("LoadIC", key);
+ if (!is_vector_set()) {
+ ConfigureVectorState(MEGAMORPHIC, key);
+ TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic");
}
+ TRACE_IC("LoadIC", key);
if (!load_handle.is_null()) return load_handle;
@@ -1432,9 +1455,9 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
InterceptorInfo* info = holder->GetNamedInterceptor();
if (it->HolderIsReceiverOrHiddenPrototype()) {
return !info->non_masking() && receiver.is_identical_to(holder) &&
- !info->setter()->IsUndefined();
- } else if (!info->getter()->IsUndefined() ||
- !info->query()->IsUndefined()) {
+ !info->setter()->IsUndefined(it->isolate());
+ } else if (!info->getter()->IsUndefined(it->isolate()) ||
+ !info->query()->IsUndefined(it->isolate())) {
return false;
}
break;
@@ -1483,27 +1506,6 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
Handle<Object> value,
JSReceiver::StoreFromKeyed store_mode) {
- // Check if the name is trivially convertible to an index and set the element.
- uint32_t index;
- if (kind() == Code::KEYED_STORE_IC && name->AsArrayIndex(&index)) {
- // Rewrite to the generic keyed store stub.
- if (FLAG_use_ic) {
- if (UseVector()) {
- ConfigureVectorState(MEGAMORPHIC, name);
- } else if (!AddressIsDeoptimizedCode()) {
- set_target(*megamorphic_stub());
- }
- TRACE_IC("StoreIC", name);
- TRACE_GENERIC_IC(isolate(), "StoreIC", "name as array index");
- }
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result,
- Object::SetElement(isolate(), object, index, value, language_mode()),
- Object);
- return result;
- }
-
if (object->IsJSGlobalObject() && name->IsString()) {
// Look up in script context table.
Handle<String> str_name = Handle<String>::cast(name);
@@ -1522,7 +1524,7 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
Handle<Object> previous_value =
FixedArray::get(*script_context, lookup_result.slot_index, isolate());
- if (*previous_value == *isolate()->factory()->the_hole_value()) {
+ if (previous_value->IsTheHole(isolate())) {
// Do not install stubs and stay pre-monomorphic for
// uninitialized accesses.
return ReferenceError(name);
@@ -1530,6 +1532,7 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
if (FLAG_use_ic &&
StoreScriptContextFieldStub::Accepted(&lookup_result)) {
+ TRACE_HANDLER_STATS(isolate(), StoreIC_StoreScriptContextFieldStub);
StoreScriptContextFieldStub stub(isolate(), &lookup_result);
PatchCache(name, stub.GetCode());
}
@@ -1551,21 +1554,13 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
// If the object is undefined or null it's illegal to try to set any
// properties on it; throw a TypeError in that case.
- if (object->IsUndefined() || object->IsNull()) {
+ if (object->IsUndefined(isolate()) || object->IsNull(isolate())) {
return TypeError(MessageTemplate::kNonObjectPropertyStore, object, name);
}
- // Observed objects are always modified through the runtime.
- if (object->IsHeapObject() &&
- Handle<HeapObject>::cast(object)->map()->is_observed()) {
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result,
- Object::SetProperty(object, name, value, language_mode(), store_mode),
- Object);
- return result;
+ if (state() != UNINITIALIZED) {
+ JSObject::MakePrototypesFast(object, kStartAtPrototype, isolate());
}
-
LookupIterator it(object, name);
if (FLAG_use_ic) UpdateCaches(&it, value, store_mode);
@@ -1574,90 +1569,6 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
return value;
}
-Handle<Code> CallIC::initialize_stub(Isolate* isolate, int argc,
- ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
- CallICTrampolineStub stub(isolate, CallICState(argc, mode, tail_call_mode));
- Handle<Code> code = stub.GetCode();
- return code;
-}
-
-Handle<Code> CallIC::initialize_stub_in_optimized_code(
- Isolate* isolate, int argc, ConvertReceiverMode mode,
- TailCallMode tail_call_mode) {
- CallICStub stub(isolate, CallICState(argc, mode, tail_call_mode));
- Handle<Code> code = stub.GetCode();
- return code;
-}
-
-
-static Handle<Code> StoreICInitializeStubHelper(
- Isolate* isolate, ExtraICState extra_state,
- InlineCacheState initialization_state) {
- Handle<Code> ic = PropertyICCompiler::ComputeStore(
- isolate, initialization_state, extra_state);
- return ic;
-}
-
-
-Handle<Code> StoreIC::initialize_stub(Isolate* isolate,
- LanguageMode language_mode,
- State initialization_state) {
- DCHECK(initialization_state == UNINITIALIZED ||
- initialization_state == PREMONOMORPHIC ||
- initialization_state == MEGAMORPHIC);
- VectorStoreICTrampolineStub stub(isolate, StoreICState(language_mode));
- return stub.GetCode();
-}
-
-
-Handle<Code> StoreIC::initialize_stub_in_optimized_code(
- Isolate* isolate, LanguageMode language_mode, State initialization_state) {
- DCHECK(initialization_state == UNINITIALIZED ||
- initialization_state == PREMONOMORPHIC ||
- initialization_state == MEGAMORPHIC);
- if (initialization_state != MEGAMORPHIC) {
- VectorStoreICStub stub(isolate, StoreICState(language_mode));
- return stub.GetCode();
- }
-
- return StoreICInitializeStubHelper(
- isolate, ComputeExtraICState(language_mode), initialization_state);
-}
-
-
-Handle<Code> StoreIC::megamorphic_stub() {
- if (kind() == Code::STORE_IC) {
- return PropertyICCompiler::ComputeStore(isolate(), MEGAMORPHIC,
- extra_ic_state());
- } else {
- DCHECK(kind() == Code::KEYED_STORE_IC);
- if (is_strict(language_mode())) {
- return isolate()->builtins()->KeyedStoreIC_Megamorphic_Strict();
- } else {
- return isolate()->builtins()->KeyedStoreIC_Megamorphic();
- }
- }
-}
-
-
-Handle<Code> StoreIC::slow_stub() const {
- if (kind() == Code::STORE_IC) {
- return isolate()->builtins()->StoreIC_Slow();
- } else {
- DCHECK(kind() == Code::KEYED_STORE_IC);
- return isolate()->builtins()->KeyedStoreIC_Slow();
- }
-}
-
-
-Handle<Code> StoreIC::pre_monomorphic_stub(Isolate* isolate,
- LanguageMode language_mode) {
- ExtraICState state = ComputeExtraICState(language_mode);
- return PropertyICCompiler::ComputeStore(isolate, PREMONOMORPHIC, state);
-}
-
-
void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
JSReceiver::StoreFromKeyed store_mode) {
if (state() == UNINITIALIZED) {
@@ -1672,7 +1583,8 @@ void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
if (!use_ic) {
TRACE_GENERIC_IC(isolate(), "StoreIC", "LookupForWrite said 'false'");
}
- Handle<Code> code = use_ic ? ComputeHandler(lookup, value) : slow_stub();
+ Handle<Code> code =
+ use_ic ? Handle<Code>::cast(ComputeHandler(lookup, value)) : slow_stub();
PatchCache(lookup->name(), code);
TRACE_IC("StoreIC", lookup->name());
@@ -1694,10 +1606,7 @@ static Handle<Code> PropertyCellStoreHandler(
return code;
}
-
-Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
- Handle<Object> value,
- CacheHolderFlag cache_holder) {
+Handle<Object> StoreIC::GetMapIndependentHandler(LookupIterator* lookup) {
DCHECK_NE(LookupIterator::JSPROXY, lookup->state());
// This is currently guaranteed by checks in StoreIC::Store.
@@ -1709,97 +1618,87 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
case LookupIterator::TRANSITION: {
auto store_target = lookup->GetStoreTarget();
if (store_target->IsJSGlobalObject()) {
- // TODO(dcarney): this currently just deopts. Use the transition cell.
- auto cell = isolate()->factory()->NewPropertyCell();
- cell->set_value(*value);
- auto code = PropertyCellStoreHandler(
- isolate(), store_target, Handle<JSGlobalObject>::cast(store_target),
- lookup->name(), cell, PropertyCellType::kConstant);
- cell->set_value(isolate()->heap()->the_hole_value());
- return code;
+ break; // Custom-compiled handler.
}
- Handle<Map> transition = lookup->transition_map();
// Currently not handled by CompileStoreTransition.
if (!holder->HasFastProperties()) {
TRACE_GENERIC_IC(isolate(), "StoreIC", "transition from slow");
- break;
+ TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
+ return slow_stub();
}
DCHECK(lookup->IsCacheableTransition());
- NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
- return compiler.CompileStoreTransition(transition, lookup->name());
+ break; // Custom-compiled handler.
}
case LookupIterator::INTERCEPTOR: {
- DCHECK(!holder->GetNamedInterceptor()->setter()->IsUndefined());
- return CodeFactory::StoreInterceptor(isolate()).code();
+ DCHECK(!holder->GetNamedInterceptor()->setter()->IsUndefined(isolate()));
+ TRACE_HANDLER_STATS(isolate(), StoreIC_StoreInterceptorStub);
+ StoreInterceptorStub stub(isolate());
+ return stub.GetCode();
}
case LookupIterator::ACCESSOR: {
if (!holder->HasFastProperties()) {
TRACE_GENERIC_IC(isolate(), "StoreIC", "accessor on slow map");
- break;
+ TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
+ return slow_stub();
}
Handle<Object> accessors = lookup->GetAccessors();
if (accessors->IsAccessorInfo()) {
Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(accessors);
- if (v8::ToCData<Address>(info->setter()) == 0) {
- TRACE_GENERIC_IC(isolate(), "StoreIC", "setter == 0");
- break;
+ if (v8::ToCData<Address>(info->setter()) == nullptr) {
+ TRACE_GENERIC_IC(isolate(), "StoreIC", "setter == nullptr");
+ TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
+ return slow_stub();
}
if (AccessorInfo::cast(*accessors)->is_special_data_property() &&
!lookup->HolderIsReceiverOrHiddenPrototype()) {
TRACE_GENERIC_IC(isolate(), "StoreIC",
"special data property in prototype chain");
- break;
+ TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
+ return slow_stub();
}
if (!AccessorInfo::IsCompatibleReceiverMap(isolate(), info,
receiver_map())) {
TRACE_GENERIC_IC(isolate(), "StoreIC", "incompatible receiver type");
- break;
+ TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
+ return slow_stub();
}
- if (info->is_sloppy() && !receiver->IsJSReceiver()) break;
- NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
- return compiler.CompileStoreCallback(receiver, lookup->name(), info,
- language_mode());
+ if (info->is_sloppy() && !receiver->IsJSReceiver()) {
+ TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
+ return slow_stub();
+ }
+ break; // Custom-compiled handler.
} else if (accessors->IsAccessorPair()) {
Handle<Object> setter(Handle<AccessorPair>::cast(accessors)->setter(),
isolate());
- if (!setter->IsJSFunction()) {
+ if (!setter->IsJSFunction() && !setter->IsFunctionTemplateInfo()) {
TRACE_GENERIC_IC(isolate(), "StoreIC", "setter not a function");
- break;
+ TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
+ return slow_stub();
}
- Handle<JSFunction> function = Handle<JSFunction>::cast(setter);
- CallOptimization call_optimization(function);
- NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
- if (call_optimization.is_simple_api_call() &&
- call_optimization.IsCompatibleReceiver(receiver, holder)) {
- return compiler.CompileStoreCallback(receiver, lookup->name(),
- call_optimization,
- lookup->GetAccessorIndex());
+ CallOptimization call_optimization(setter);
+ if (call_optimization.is_simple_api_call()) {
+ if (call_optimization.IsCompatibleReceiver(receiver, holder)) {
+ break; // Custom-compiled handler.
+ }
+ TRACE_GENERIC_IC(isolate(), "StoreIC", "incompatible receiver");
+ TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
+ return slow_stub();
}
- int expected_arguments =
- function->shared()->internal_formal_parameter_count();
- return compiler.CompileStoreViaSetter(receiver, lookup->name(),
- lookup->GetAccessorIndex(),
- expected_arguments);
+ break; // Custom-compiled handler.
}
- break;
+ TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
+ return slow_stub();
}
case LookupIterator::DATA: {
if (lookup->is_dictionary_holder()) {
if (holder->IsJSGlobalObject()) {
- DCHECK(holder.is_identical_to(receiver) ||
- receiver->map()->prototype() == *holder);
- auto cell = lookup->GetPropertyCell();
- auto updated_type = PropertyCell::UpdatedType(
- cell, value, lookup->property_details());
- auto code = PropertyCellStoreHandler(
- isolate(), receiver, Handle<JSGlobalObject>::cast(holder),
- lookup->name(), cell, updated_type);
- return code;
+ break; // Custom-compiled handler.
}
+ TRACE_HANDLER_STATS(isolate(), StoreIC_StoreNormal);
DCHECK(holder.is_identical_to(receiver));
return isolate()->builtins()->StoreIC_Normal();
}
@@ -1813,18 +1712,19 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
use_stub = !field_type->IsClass();
}
if (use_stub) {
+ TRACE_HANDLER_STATS(isolate(), StoreIC_StoreFieldStub);
StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
lookup->representation());
return stub.GetCode();
}
- NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
- return compiler.CompileStoreField(lookup);
+ break; // Custom-compiled handler.
}
// -------------- Constant properties --------------
DCHECK(lookup->property_details().type() == DATA_CONSTANT);
TRACE_GENERIC_IC(isolate(), "StoreIC", "constant property");
- break;
+ TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
+ return slow_stub();
}
case LookupIterator::INTEGER_INDEXED_EXOTIC:
@@ -1833,22 +1733,134 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
case LookupIterator::NOT_FOUND:
UNREACHABLE();
}
- return slow_stub();
+ return Handle<Code>::null();
}
+Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
+ Handle<Object> value,
+ CacheHolderFlag cache_holder) {
+ DCHECK_NE(LookupIterator::JSPROXY, lookup->state());
+
+ // This is currently guaranteed by checks in StoreIC::Store.
+ Handle<JSObject> receiver = Handle<JSObject>::cast(lookup->GetReceiver());
+ Handle<JSObject> holder = lookup->GetHolder<JSObject>();
+ DCHECK(!receiver->IsAccessCheckNeeded() || lookup->name()->IsPrivate());
+
+ switch (lookup->state()) {
+ case LookupIterator::TRANSITION: {
+ auto store_target = lookup->GetStoreTarget();
+ if (store_target->IsJSGlobalObject()) {
+ TRACE_HANDLER_STATS(isolate(), StoreIC_StoreGlobalTransition);
+ Handle<PropertyCell> cell = lookup->transition_cell();
+ cell->set_value(*value);
+ Handle<Code> code = PropertyCellStoreHandler(
+ isolate(), store_target, Handle<JSGlobalObject>::cast(store_target),
+ lookup->name(), cell, PropertyCellType::kConstant);
+ cell->set_value(isolate()->heap()->the_hole_value());
+ return code;
+ }
+ Handle<Map> transition = lookup->transition_map();
+ // Currently not handled by CompileStoreTransition.
+ DCHECK(holder->HasFastProperties());
+
+ DCHECK(lookup->IsCacheableTransition());
+ TRACE_HANDLER_STATS(isolate(), StoreIC_StoreTransition);
+ NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
+ return compiler.CompileStoreTransition(transition, lookup->name());
+ }
+
+ case LookupIterator::INTERCEPTOR:
+ UNREACHABLE();
+
+ case LookupIterator::ACCESSOR: {
+ DCHECK(holder->HasFastProperties());
+ Handle<Object> accessors = lookup->GetAccessors();
+ if (accessors->IsAccessorInfo()) {
+ Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(accessors);
+ DCHECK(v8::ToCData<Address>(info->setter()) != 0);
+ DCHECK(!AccessorInfo::cast(*accessors)->is_special_data_property() ||
+ lookup->HolderIsReceiverOrHiddenPrototype());
+ DCHECK(AccessorInfo::IsCompatibleReceiverMap(isolate(), info,
+ receiver_map()));
+ DCHECK(!info->is_sloppy() || receiver->IsJSReceiver());
+ TRACE_HANDLER_STATS(isolate(), StoreIC_StoreCallback);
+ NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
+ Handle<Code> code = compiler.CompileStoreCallback(
+ receiver, lookup->name(), info, language_mode());
+ return code;
+ } else {
+ DCHECK(accessors->IsAccessorPair());
+ Handle<Object> setter(Handle<AccessorPair>::cast(accessors)->setter(),
+ isolate());
+ DCHECK(setter->IsJSFunction() || setter->IsFunctionTemplateInfo());
+ CallOptimization call_optimization(setter);
+ NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
+ if (call_optimization.is_simple_api_call()) {
+ DCHECK(call_optimization.IsCompatibleReceiver(receiver, holder));
+ TRACE_HANDLER_STATS(isolate(), StoreIC_StoreCallback);
+ Handle<Code> code = compiler.CompileStoreCallback(
+ receiver, lookup->name(), call_optimization,
+ lookup->GetAccessorIndex(), slow_stub());
+ return code;
+ }
+ TRACE_HANDLER_STATS(isolate(), StoreIC_StoreViaSetter);
+ int expected_arguments = JSFunction::cast(*setter)
+ ->shared()
+ ->internal_formal_parameter_count();
+ return compiler.CompileStoreViaSetter(receiver, lookup->name(),
+ lookup->GetAccessorIndex(),
+ expected_arguments);
+ }
+ }
+
+ case LookupIterator::DATA: {
+ if (lookup->is_dictionary_holder()) {
+ DCHECK(holder->IsJSGlobalObject());
+ TRACE_HANDLER_STATS(isolate(), StoreIC_StoreGlobal);
+ DCHECK(holder.is_identical_to(receiver) ||
+ receiver->map()->prototype() == *holder);
+ auto cell = lookup->GetPropertyCell();
+ auto updated_type =
+ PropertyCell::UpdatedType(cell, value, lookup->property_details());
+ auto code = PropertyCellStoreHandler(
+ isolate(), receiver, Handle<JSGlobalObject>::cast(holder),
+ lookup->name(), cell, updated_type);
+ return code;
+ }
-Handle<Code> KeyedStoreIC::StoreElementStub(Handle<Map> receiver_map,
- KeyedAccessStoreMode store_mode) {
- Handle<Code> null_handle;
- // Don't handle megamorphic property accesses for INTERCEPTORS or
- // ACCESSOR_CONSTANT
- // via megamorphic stubs, since they don't have a map in their relocation info
- // and so the stubs can't be harvested for the object needed for a map check.
- if (target()->type() != Code::NORMAL) {
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "non-NORMAL target type");
- return megamorphic_stub();
+ // -------------- Fields --------------
+ if (lookup->property_details().type() == DATA) {
+#ifdef DEBUG
+ bool use_stub = true;
+ if (lookup->representation().IsHeapObject()) {
+ // Only use a generic stub if no types need to be tracked.
+ Handle<FieldType> field_type = lookup->GetFieldType();
+ use_stub = !field_type->IsClass();
+ }
+ DCHECK(!use_stub);
+#endif
+ TRACE_HANDLER_STATS(isolate(), StoreIC_StoreField);
+ NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
+ return compiler.CompileStoreField(lookup);
+ }
+
+ // -------------- Constant properties --------------
+ DCHECK(lookup->property_details().type() == DATA_CONSTANT);
+ UNREACHABLE();
+ }
+
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ case LookupIterator::ACCESS_CHECK:
+ case LookupIterator::JSPROXY:
+ case LookupIterator::NOT_FOUND:
+ UNREACHABLE();
}
+ UNREACHABLE();
+ return slow_stub();
+}
+void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
+ KeyedAccessStoreMode store_mode) {
MapHandleList target_receiver_maps;
TargetMaps(&target_receiver_maps);
if (target_receiver_maps.length() == 0) {
@@ -1856,10 +1868,17 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<Map> receiver_map,
ComputeTransitionedMap(receiver_map, store_mode);
store_mode = GetNonTransitioningStoreMode(store_mode);
Handle<Code> handler =
- PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
- monomorphic_map, language_mode(), store_mode);
- ConfigureVectorState(Handle<Name>::null(), monomorphic_map, handler);
- return null_handle;
+ PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(monomorphic_map,
+ store_mode);
+ return ConfigureVectorState(Handle<Name>(), monomorphic_map, handler);
+ }
+
+ for (int i = 0; i < target_receiver_maps.length(); i++) {
+ if (!target_receiver_maps.at(i).is_null() &&
+ target_receiver_maps.at(i)->instance_type() == JS_VALUE_TYPE) {
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "JSValue");
+ return;
+ }
}
// There are several special cases where an IC that is MONOMORPHIC can still
@@ -1884,23 +1903,22 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<Map> receiver_map,
store_mode = GetNonTransitioningStoreMode(store_mode);
Handle<Code> handler =
PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
- transitioned_receiver_map, language_mode(), store_mode);
- ConfigureVectorState(Handle<Name>::null(), transitioned_receiver_map,
- handler);
- return null_handle;
- } else if (receiver_map.is_identical_to(previous_receiver_map) &&
- old_store_mode == STANDARD_STORE &&
- (store_mode == STORE_AND_GROW_NO_TRANSITION ||
- store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
- store_mode == STORE_NO_TRANSITION_HANDLE_COW)) {
+ transitioned_receiver_map, store_mode);
+ ConfigureVectorState(Handle<Name>(), transitioned_receiver_map, handler);
+ return;
+ }
+ if (receiver_map.is_identical_to(previous_receiver_map) &&
+ old_store_mode == STANDARD_STORE &&
+ (store_mode == STORE_AND_GROW_NO_TRANSITION ||
+ store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
+ store_mode == STORE_NO_TRANSITION_HANDLE_COW)) {
// A "normal" IC that handles stores can switch to a version that can
// grow at the end of the array, handle OOB accesses or copy COW arrays
// and still stay MONOMORPHIC.
Handle<Code> handler =
- PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
- receiver_map, language_mode(), store_mode);
- ConfigureVectorState(Handle<Name>::null(), receiver_map, handler);
- return null_handle;
+ PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(receiver_map,
+ store_mode);
+ return ConfigureVectorState(Handle<Name>(), receiver_map, handler);
}
}
@@ -1920,14 +1938,12 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<Map> receiver_map,
// If the miss wasn't due to an unseen map, a polymorphic stub
// won't help, use the megamorphic stub which can handle everything.
TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "same map added twice");
- return megamorphic_stub();
+ return;
}
// If the maximum number of receiver maps has been exceeded, use the
// megamorphic version of the IC.
- if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
- return megamorphic_stub();
- }
+ if (target_receiver_maps.length() > kMaxKeyedPolymorphism) return;
// Make sure all polymorphic handlers have the same store mode, otherwise the
// megamorphic stub must be used.
@@ -1937,7 +1953,7 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<Map> receiver_map,
store_mode = old_store_mode;
} else if (store_mode != old_store_mode) {
TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "store mode mismatch");
- return megamorphic_stub();
+ return;
}
}
@@ -1955,17 +1971,16 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<Map> receiver_map,
external_arrays != target_receiver_maps.length()) {
TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
"unsupported combination of external and normal arrays");
- return megamorphic_stub();
+ return;
}
}
+ TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_Polymorphic);
MapHandleList transitioned_maps(target_receiver_maps.length());
CodeHandleList handlers(target_receiver_maps.length());
PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
- &target_receiver_maps, &transitioned_maps, &handlers, store_mode,
- language_mode());
+ &target_receiver_maps, &transitioned_maps, &handlers, store_mode);
ConfigureVectorState(&target_receiver_maps, &transitioned_maps, &handlers);
- return null_handle;
}
@@ -2078,7 +2093,6 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
key = TryConvertKey(key, isolate());
Handle<Object> store_handle;
- Handle<Code> stub = megamorphic_stub();
uint32_t index;
if ((key->IsInternalizedString() &&
@@ -2098,10 +2112,8 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
return store_handle;
}
- bool use_ic =
- FLAG_use_ic && !object->IsStringWrapper() &&
- !object->IsAccessCheckNeeded() && !object->IsJSGlobalProxy() &&
- !(object->IsJSObject() && JSObject::cast(*object)->map()->is_observed());
+ bool use_ic = FLAG_use_ic && !object->IsStringWrapper() &&
+ !object->IsAccessCheckNeeded() && !object->IsJSGlobalProxy();
if (use_ic && !object->IsSmi()) {
// Don't use ICs for maps of the objects in Array's prototype chain. We
// expect to be able to trap element sets to objects with those maps in
@@ -2149,7 +2161,7 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
// other non-dictionary receivers in the polymorphic case benefit
// from fast path keyed stores.
if (!old_receiver_map->DictionaryElementsInPrototypeChainOnly()) {
- stub = StoreElementStub(old_receiver_map, store_mode);
+ UpdateStoreElement(old_receiver_map, store_mode);
} else {
TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
"dictionary or proxy prototype");
@@ -2162,13 +2174,9 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
}
}
- if (!is_vector_set() || stub.is_null()) {
- Code* megamorphic = *megamorphic_stub();
- if (!stub.is_null() && (*stub == megamorphic || *stub == *slow_stub())) {
- ConfigureVectorState(MEGAMORPHIC, key);
- TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
- *stub == megamorphic ? "set generic" : "slow stub");
- }
+ if (!is_vector_set()) {
+ ConfigureVectorState(MEGAMORPHIC, key);
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic");
}
TRACE_IC("StoreIC", key);
@@ -2228,7 +2236,6 @@ void CallIC::HandleMiss(Handle<Object> function) {
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(Runtime_CallIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
HandleScope scope(isolate);
DCHECK(args.length() == 3);
Handle<Object> function = args.at<Object>(0);
@@ -2245,44 +2252,120 @@ RUNTIME_FUNCTION(Runtime_CallIC_Miss) {
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
HandleScope scope(isolate);
Handle<Object> receiver = args.at<Object>(0);
- Handle<Name> key = args.at<Name>(1);
- Handle<Object> result;
- DCHECK(args.length() == 4);
+ DCHECK_EQ(4, args.length());
Handle<Smi> slot = args.at<Smi>(2);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
// A monomorphic or polymorphic KeyedLoadIC with a string key can call the
// LoadIC miss handler if the handler misses. Since the vector Nexus is
// set up outside the IC, handle that here.
- if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::LOAD_IC) {
+ FeedbackVectorSlotKind kind = vector->GetKind(vector_slot);
+ if (kind == FeedbackVectorSlotKind::LOAD_IC) {
+ Handle<Name> key = args.at<Name>(1);
LoadICNexus nexus(vector, vector_slot);
LoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+ RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
+
+ } else if (kind == FeedbackVectorSlotKind::LOAD_GLOBAL_IC) {
+ Handle<Name> key(vector->GetName(vector_slot), isolate);
+ DCHECK_NE(*key, *isolate->factory()->empty_string());
+ DCHECK_EQ(*isolate->global_object(), *receiver);
+ LoadGlobalICNexus nexus(vector, vector_slot);
+ LoadGlobalIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ RETURN_RESULT_OR_FAILURE(isolate, ic.Load(key));
+
} else {
- DCHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC,
- vector->GetKind(vector_slot));
+ Handle<Name> key = args.at<Name>(1);
+ DCHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC, kind);
KeyedLoadICNexus nexus(vector, vector_slot);
KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+ RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
}
+}
+
+// Used from ic-<arch>.cc.
+RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Miss) {
+ TimerEventScope<TimerEventIcMiss> timer(isolate);
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ Handle<JSGlobalObject> global = isolate->global_object();
+ Handle<Smi> slot = args.at<Smi>(0);
+ Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(1);
+ FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
+ DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC,
+ vector->GetKind(vector_slot));
+ Handle<String> name(vector->GetName(vector_slot), isolate);
+ DCHECK_NE(*name, *isolate->factory()->empty_string());
+
+ LoadGlobalICNexus nexus(vector, vector_slot);
+ LoadGlobalIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ ic.UpdateState(global, name);
+
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(name));
return *result;
}
+RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Slow) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_SMI_ARG_CHECKED(slot, 0);
+ CONVERT_ARG_HANDLE_CHECKED(TypeFeedbackVector, vector, 1);
+
+ FeedbackVectorSlot vector_slot = vector->ToSlot(slot);
+ DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC,
+ vector->GetKind(vector_slot));
+ Handle<String> name(vector->GetName(vector_slot), isolate);
+ DCHECK_NE(*name, *isolate->factory()->empty_string());
+
+ Handle<JSGlobalObject> global = isolate->global_object();
+
+ Handle<ScriptContextTable> script_contexts(
+ global->native_context()->script_context_table());
+
+ ScriptContextTable::LookupResult lookup_result;
+ if (ScriptContextTable::Lookup(script_contexts, name, &lookup_result)) {
+ Handle<Context> script_context = ScriptContextTable::GetContext(
+ script_contexts, lookup_result.context_index);
+ Handle<Object> result =
+ FixedArray::get(*script_context, lookup_result.slot_index, isolate);
+ if (*result == *isolate->factory()->the_hole_value()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewReferenceError(MessageTemplate::kNotDefined, name));
+ }
+ return *result;
+ }
+
+ Handle<Object> result;
+ bool is_found = false;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ Runtime::GetObjectProperty(isolate, global, name, &is_found));
+ if (!is_found) {
+ LoadICNexus nexus(isolate);
+ LoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ // It is actually a LoadGlobalICs here but the predicate handles this case
+ // properly.
+ if (ic.ShouldThrowReferenceError()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewReferenceError(MessageTemplate::kNotDefined, name));
+ }
+ }
+ return *result;
+}
// Used from ic-<arch>.cc
RUNTIME_FUNCTION(Runtime_KeyedLoadIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
HandleScope scope(isolate);
Handle<Object> receiver = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
- Handle<Object> result;
DCHECK(args.length() == 4);
Handle<Smi> slot = args.at<Smi>(2);
@@ -2291,41 +2374,35 @@ RUNTIME_FUNCTION(Runtime_KeyedLoadIC_Miss) {
KeyedLoadICNexus nexus(vector, vector_slot);
KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
}
RUNTIME_FUNCTION(Runtime_KeyedLoadIC_MissFromStubFailure) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
HandleScope scope(isolate);
- Handle<Object> receiver = args.at<Object>(0);
- Handle<Object> key = args.at<Object>(1);
- Handle<Object> result;
-
- DCHECK(args.length() == 4);
- Handle<Smi> slot = args.at<Smi>(2);
- Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
+ DCHECK_EQ(4, args.length());
+ typedef LoadWithVectorDescriptor Descriptor;
+ Handle<Object> receiver = args.at<Object>(Descriptor::kReceiver);
+ Handle<Object> key = args.at<Object>(Descriptor::kName);
+ Handle<Smi> slot = args.at<Smi>(Descriptor::kSlot);
+ Handle<TypeFeedbackVector> vector =
+ args.at<TypeFeedbackVector>(Descriptor::kVector);
FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
KeyedLoadICNexus nexus(vector, vector_slot);
KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
-
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
}
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
HandleScope scope(isolate);
Handle<Object> receiver = args.at<Object>(0);
Handle<Name> key = args.at<Name>(1);
Handle<Object> value = args.at<Object>(2);
- Handle<Object> result;
DCHECK(args.length() == 5 || args.length() == 6);
Handle<Smi> slot = args.at<Smi>(3);
@@ -2335,32 +2412,58 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
StoreICNexus nexus(vector, vector_slot);
StoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- ic.Store(receiver, key, value));
+ RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
} else {
DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC,
vector->GetKind(vector_slot));
KeyedStoreICNexus nexus(vector, vector_slot);
KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- ic.Store(receiver, key, value));
+ RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
}
- return *result;
}
RUNTIME_FUNCTION(Runtime_StoreIC_MissFromStubFailure) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
+ HandleScope scope(isolate);
+ DCHECK_EQ(5, args.length());
+ typedef StoreWithVectorDescriptor Descriptor;
+ Handle<Object> receiver = args.at<Object>(Descriptor::kReceiver);
+ Handle<Name> key = args.at<Name>(Descriptor::kName);
+ Handle<Object> value = args.at<Object>(Descriptor::kValue);
+ Handle<Smi> slot = args.at<Smi>(Descriptor::kSlot);
+ Handle<TypeFeedbackVector> vector =
+ args.at<TypeFeedbackVector>(Descriptor::kVector);
+
+ FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
+ if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::STORE_IC) {
+ StoreICNexus nexus(vector, vector_slot);
+ StoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
+ } else {
+ DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC,
+ vector->GetKind(vector_slot));
+ KeyedStoreICNexus nexus(vector, vector_slot);
+ KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
+ ic.UpdateState(receiver, key);
+ RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
+ }
+}
+
+RUNTIME_FUNCTION(Runtime_TransitionStoreIC_MissFromStubFailure) {
+ TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
Handle<Object> receiver = args.at<Object>(0);
Handle<Name> key = args.at<Name>(1);
Handle<Object> value = args.at<Object>(2);
- Handle<Object> result;
int length = args.length();
DCHECK(length == 5 || length == 6);
+ // TODO(ishell): use VectorStoreTransitionDescriptor indices here and update
+ // this comment:
+ //
// We might have slot and vector, for a normal miss (slot(3), vector(4)).
// Or, map and vector for a transitioning store miss (map(3), vector(4)).
// In this case, we need to recover the slot from a virtual register.
@@ -2368,15 +2471,10 @@ RUNTIME_FUNCTION(Runtime_StoreIC_MissFromStubFailure) {
Handle<Smi> slot;
Handle<TypeFeedbackVector> vector;
if (length == 5) {
- if (args.at<Object>(3)->IsMap()) {
- vector = args.at<TypeFeedbackVector>(4);
- slot = handle(
- *reinterpret_cast<Smi**>(isolate->virtual_slot_register_address()),
- isolate);
- } else {
- vector = args.at<TypeFeedbackVector>(4);
- slot = args.at<Smi>(3);
- }
+ vector = args.at<TypeFeedbackVector>(4);
+ slot = handle(
+ *reinterpret_cast<Smi**>(isolate->virtual_slot_register_address()),
+ isolate);
} else {
vector = args.at<TypeFeedbackVector>(5);
slot = args.at<Smi>(4);
@@ -2387,87 +2485,57 @@ RUNTIME_FUNCTION(Runtime_StoreIC_MissFromStubFailure) {
StoreICNexus nexus(vector, vector_slot);
StoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- ic.Store(receiver, key, value));
+ RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
} else {
DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC,
vector->GetKind(vector_slot));
KeyedStoreICNexus nexus(vector, vector_slot);
KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- ic.Store(receiver, key, value));
+ RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
}
- return *result;
}
-
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
HandleScope scope(isolate);
+ DCHECK_EQ(5, args.length());
Handle<Object> receiver = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
- Handle<Object> result;
-
- DCHECK(args.length() == 5);
Handle<Smi> slot = args.at<Smi>(3);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
KeyedStoreICNexus nexus(vector, vector_slot);
KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- ic.Store(receiver, key, value));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
}
RUNTIME_FUNCTION(Runtime_KeyedStoreIC_MissFromStubFailure) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
HandleScope scope(isolate);
- Handle<Object> receiver = args.at<Object>(0);
- Handle<Object> key = args.at<Object>(1);
- Handle<Object> value = args.at<Object>(2);
- Handle<Object> result;
-
- DCHECK(args.length() == 5);
- Handle<Smi> slot = args.at<Smi>(3);
- Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
+ DCHECK_EQ(5, args.length());
+ typedef StoreWithVectorDescriptor Descriptor;
+ Handle<Object> receiver = args.at<Object>(Descriptor::kReceiver);
+ Handle<Object> key = args.at<Object>(Descriptor::kName);
+ Handle<Object> value = args.at<Object>(Descriptor::kValue);
+ Handle<Smi> slot = args.at<Smi>(Descriptor::kSlot);
+ Handle<TypeFeedbackVector> vector =
+ args.at<TypeFeedbackVector>(Descriptor::kVector);
FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
KeyedStoreICNexus nexus(vector, vector_slot);
KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- ic.Store(receiver, key, value));
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_StoreIC_Slow) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 5);
- Handle<Object> object = args.at<Object>(0);
- Handle<Object> key = args.at<Object>(1);
- Handle<Object> value = args.at<Object>(2);
- LanguageMode language_mode;
- StoreICNexus nexus(isolate);
- StoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
- language_mode = ic.language_mode();
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Runtime::SetObjectProperty(isolate, object, key, value, language_mode));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
}
RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
HandleScope scope(isolate);
- DCHECK(args.length() == 5);
+ DCHECK_EQ(5, args.length());
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
@@ -2475,17 +2543,14 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
KeyedStoreICNexus nexus(isolate);
KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
language_mode = ic.language_mode();
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
+ RETURN_RESULT_OR_FAILURE(
+ isolate,
Runtime::SetObjectProperty(isolate, object, key, value, language_mode));
- return *result;
}
RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
HandleScope scope(isolate);
// Length == 5 or 6, depending on whether the vector slot
// is passed in a virtual register or not.
@@ -2502,18 +2567,16 @@ RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
JSObject::TransitionElementsKind(Handle<JSObject>::cast(object),
map->elements_kind());
}
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
+ RETURN_RESULT_OR_FAILURE(
+ isolate,
Runtime::SetObjectProperty(isolate, object, key, value, language_mode));
- return *result;
}
MaybeHandle<Object> BinaryOpIC::Transition(
Handle<AllocationSite> allocation_site, Handle<Object> left,
Handle<Object> right) {
- BinaryOpICState state(isolate(), target()->extra_ic_state());
+ BinaryOpICState state(isolate(), extra_ic_state());
// Compute the actual result using the builtin for the binary operation.
Handle<Object> result;
@@ -2577,16 +2640,12 @@ MaybeHandle<Object> BinaryOpIC::Transition(
return result;
}
- // Execution::Call can execute arbitrary JavaScript, hence potentially
- // update the state of this very IC, so we must update the stored state.
- UpdateTarget();
-
// Compute the new state.
BinaryOpICState old_state(isolate(), target()->extra_ic_state());
state.Update(left, right, result);
// Check if we have a string operation here.
- Handle<Code> target;
+ Handle<Code> new_target;
if (!allocation_site.is_null() || state.ShouldCreateAllocationMementos()) {
// Setup the allocation site on-demand.
if (allocation_site.is_null()) {
@@ -2595,24 +2654,24 @@ MaybeHandle<Object> BinaryOpIC::Transition(
// Install the stub with an allocation site.
BinaryOpICWithAllocationSiteStub stub(isolate(), state);
- target = stub.GetCodeCopyFromTemplate(allocation_site);
+ new_target = stub.GetCodeCopyFromTemplate(allocation_site);
// Sanity check the trampoline stub.
- DCHECK_EQ(*allocation_site, target->FindFirstAllocationSite());
+ DCHECK_EQ(*allocation_site, new_target->FindFirstAllocationSite());
} else {
// Install the generic stub.
BinaryOpICStub stub(isolate(), state);
- target = stub.GetCode();
+ new_target = stub.GetCode();
// Sanity check the generic stub.
- DCHECK_NULL(target->FindFirstAllocationSite());
+ DCHECK_NULL(new_target->FindFirstAllocationSite());
}
- set_target(*target);
+ set_target(*new_target);
if (FLAG_trace_ic) {
OFStream os(stdout);
os << "[BinaryOpIC" << old_state << " => " << state << " @ "
- << static_cast<void*>(*target) << " <- ";
+ << static_cast<void*>(*new_target) << " <- ";
JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
if (!allocation_site.is_null()) {
os << " using allocation site " << static_cast<void*>(*allocation_site);
@@ -2633,35 +2692,29 @@ MaybeHandle<Object> BinaryOpIC::Transition(
RUNTIME_FUNCTION(Runtime_BinaryOpIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- Handle<Object> left = args.at<Object>(BinaryOpICStub::kLeft);
- Handle<Object> right = args.at<Object>(BinaryOpICStub::kRight);
+ typedef BinaryOpDescriptor Descriptor;
+ Handle<Object> left = args.at<Object>(Descriptor::kLeft);
+ Handle<Object> right = args.at<Object>(Descriptor::kRight);
BinaryOpIC ic(isolate);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- ic.Transition(Handle<AllocationSite>::null(), left, right));
- return *result;
+ RETURN_RESULT_OR_FAILURE(
+ isolate, ic.Transition(Handle<AllocationSite>::null(), left, right));
}
RUNTIME_FUNCTION(Runtime_BinaryOpIC_MissWithAllocationSite) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
+ typedef BinaryOpWithAllocationSiteDescriptor Descriptor;
Handle<AllocationSite> allocation_site =
- args.at<AllocationSite>(BinaryOpWithAllocationSiteStub::kAllocationSite);
- Handle<Object> left = args.at<Object>(BinaryOpWithAllocationSiteStub::kLeft);
- Handle<Object> right =
- args.at<Object>(BinaryOpWithAllocationSiteStub::kRight);
+ args.at<AllocationSite>(Descriptor::kAllocationSite);
+ Handle<Object> left = args.at<Object>(Descriptor::kLeft);
+ Handle<Object> right = args.at<Object>(Descriptor::kRight);
BinaryOpIC ic(isolate);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, ic.Transition(allocation_site, left, right));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate,
+ ic.Transition(allocation_site, left, right));
}
Code* CompareIC::GetRawUninitialized(Isolate* isolate, Token::Value op) {
@@ -2673,14 +2726,6 @@ Code* CompareIC::GetRawUninitialized(Isolate* isolate, Token::Value op) {
return code;
}
-Handle<Code> CompareIC::GetUninitialized(Isolate* isolate, Token::Value op) {
- CompareICStub stub(isolate, op, CompareICState::UNINITIALIZED,
- CompareICState::UNINITIALIZED,
- CompareICState::UNINITIALIZED);
- return stub.GetCode();
-}
-
-
Code* CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
HandleScope scope(isolate());
CompareICStub old_stub(target()->stub_key(), isolate());
@@ -2689,7 +2734,7 @@ Code* CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
CompareICState::State new_right =
CompareICState::NewInputState(old_stub.right(), y);
CompareICState::State state = CompareICState::TargetState(
- old_stub.state(), old_stub.left(), old_stub.right(), op_,
+ isolate(), old_stub.state(), old_stub.left(), old_stub.right(), op_,
HasInlinedSmiCode(address()), x, y);
CompareICStub stub(isolate(), op_, new_left, new_right, state);
if (state == CompareICState::KNOWN_RECEIVER) {
@@ -2724,7 +2769,6 @@ Code* CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
// Used from CompareICStub::GenerateMiss in code-stubs-<arch>.cc.
RUNTIME_FUNCTION(Runtime_CompareIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
HandleScope scope(isolate);
DCHECK(args.length() == 3);
CompareIC ic(isolate, static_cast<Token::Value>(args.smi_at(2)));
@@ -2740,7 +2784,7 @@ RUNTIME_FUNCTION(Runtime_Unreachable) {
Handle<Object> ToBooleanIC::ToBoolean(Handle<Object> object) {
- ToBooleanICStub stub(isolate(), target()->extra_ic_state());
+ ToBooleanICStub stub(isolate(), extra_ic_state());
bool to_boolean_value = stub.UpdateStatus(object);
Handle<Code> code = stub.GetCode();
set_target(*code);
@@ -2750,7 +2794,6 @@ Handle<Object> ToBooleanIC::ToBoolean(Handle<Object> object) {
RUNTIME_FUNCTION(Runtime_ToBooleanIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
DCHECK(args.length() == 1);
HandleScope scope(isolate);
Handle<Object> object = args.at<Object>(0);
@@ -2768,6 +2811,12 @@ RUNTIME_FUNCTION(Runtime_StoreCallbackProperty) {
CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 5);
HandleScope scope(isolate);
+ if (FLAG_runtime_call_stats) {
+ RETURN_RESULT_OR_FAILURE(
+ isolate, Runtime::SetObjectProperty(isolate, receiver, name, value,
+ language_mode));
+ }
+
Handle<AccessorInfo> callback(
callback_or_cell->IsWeakCell()
? AccessorInfo::cast(WeakCell::cast(*callback_or_cell)->value())
@@ -2873,15 +2922,15 @@ RUNTIME_FUNCTION(Runtime_LoadPropertyWithInterceptor) {
if (it.IsFound()) return *result;
+#ifdef DEBUG
LoadICNexus nexus(isolate);
LoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
- if (!ic.ShouldThrowReferenceError(it.GetReceiver())) {
- return isolate->heap()->undefined_value();
- }
+ // It could actually be any kind of LoadICs here but the predicate handles
+ // all the cases properly.
+ DCHECK(!ic.ShouldThrowReferenceError());
+#endif
- // Throw a reference error.
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewReferenceError(MessageTemplate::kNotDefined, it.name()));
+ return isolate->heap()->undefined_value();
}
@@ -2955,15 +3004,14 @@ RUNTIME_FUNCTION(Runtime_LoadElementWithInterceptor) {
RUNTIME_FUNCTION(Runtime_LoadIC_MissFromStubFailure) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
HandleScope scope(isolate);
- Handle<Object> receiver = args.at<Object>(0);
- Handle<Name> key = args.at<Name>(1);
- Handle<Object> result;
-
- DCHECK(args.length() == 4);
- Handle<Smi> slot = args.at<Smi>(2);
- Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
+ DCHECK_EQ(4, args.length());
+ typedef LoadWithVectorDescriptor Descriptor;
+ Handle<Object> receiver = args.at<Object>(Descriptor::kReceiver);
+ Handle<Name> key = args.at<Name>(Descriptor::kName);
+ Handle<Smi> slot = args.at<Smi>(Descriptor::kSlot);
+ Handle<TypeFeedbackVector> vector =
+ args.at<TypeFeedbackVector>(Descriptor::kVector);
FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
// A monomorphic or polymorphic KeyedLoadIC with a string key can call the
// LoadIC miss handler if the handler misses. Since the vector Nexus is
@@ -2972,17 +3020,15 @@ RUNTIME_FUNCTION(Runtime_LoadIC_MissFromStubFailure) {
LoadICNexus nexus(vector, vector_slot);
LoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+ RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
} else {
DCHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC,
vector->GetKind(vector_slot));
KeyedLoadICNexus nexus(vector, vector_slot);
KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+ RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
}
-
- return *result;
}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index 8bd2f447b8..35f3844464 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -35,11 +35,11 @@ class IC {
// Compute the current IC state based on the target stub, receiver and name.
void UpdateState(Handle<Object> receiver, Handle<Object> name);
- bool IsNameCompatibleWithPrototypeFailure(Handle<Object> name);
- void MarkPrototypeFailure(Handle<Object> name) {
- DCHECK(IsNameCompatibleWithPrototypeFailure(name));
+ bool RecomputeHandlerForName(Handle<Object> name);
+ void MarkRecomputeHandler(Handle<Object> name) {
+ DCHECK(RecomputeHandlerForName(name));
old_state_ = state_;
- state_ = PROTOTYPE_FAILURE;
+ state_ = RECOMPUTE_HANDLER;
}
// Clear the inline cache to initial state.
@@ -47,14 +47,13 @@ class IC {
#ifdef DEBUG
bool IsLoadStub() const {
- return target()->is_load_stub() || target()->is_keyed_load_stub();
+ return kind_ == Code::LOAD_IC || kind_ == Code::LOAD_GLOBAL_IC ||
+ kind_ == Code::KEYED_LOAD_IC;
}
-
bool IsStoreStub() const {
- return target()->is_store_stub() || target()->is_keyed_store_stub();
+ return kind_ == Code::STORE_IC || kind_ == Code::KEYED_STORE_IC;
}
-
- bool IsCallStub() const { return target()->is_call_stub(); }
+ bool IsCallStub() const { return kind_ == Code::CALL_IC; }
#endif
static inline Handle<Map> GetHandlerCacheHolder(Handle<Map> receiver_map,
@@ -65,26 +64,20 @@ class IC {
Isolate* isolate,
CacheHolderFlag* flag);
- static bool IsCleared(Code* code) {
- InlineCacheState state = code->ic_state();
- return !FLAG_use_ic || state == UNINITIALIZED || state == PREMONOMORPHIC;
- }
-
static bool IsCleared(FeedbackNexus* nexus) {
InlineCacheState state = nexus->StateFromFeedback();
return !FLAG_use_ic || state == UNINITIALIZED || state == PREMONOMORPHIC;
}
static bool ICUseVector(Code::Kind kind) {
- return kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC ||
- kind == Code::CALL_IC || kind == Code::STORE_IC ||
- kind == Code::KEYED_STORE_IC;
+ return kind == Code::LOAD_IC || kind == Code::LOAD_GLOBAL_IC ||
+ kind == Code::KEYED_LOAD_IC || kind == Code::CALL_IC ||
+ kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC;
}
- protected:
- // Get the call-site target; used for determining the state.
- Handle<Code> target() const { return target_; }
+ static InlineCacheState StateFromCode(Code* code);
+ protected:
Address fp() const { return fp_; }
Address pc() const { return *pc_address_; }
Isolate* isolate() const { return isolate_; }
@@ -101,13 +94,12 @@ class IC {
// Set the call-site target.
inline void set_target(Code* code);
- bool is_target_set() { return target_set_; }
bool is_vector_set() { return vector_set_; }
bool UseVector() const {
bool use = ICUseVector(kind());
// If we are supposed to use the nexus, verify the nexus is non-null.
- DCHECK(!use || nexus_ != NULL);
+ DCHECK(!use || nexus_ != nullptr);
return use;
}
@@ -115,10 +107,10 @@ class IC {
void ConfigureVectorState(IC::State new_state, Handle<Object> key);
// Configure the vector for MONOMORPHIC.
void ConfigureVectorState(Handle<Name> name, Handle<Map> map,
- Handle<Code> handler);
+ Handle<Object> handler);
// Configure the vector for POLYMORPHIC.
void ConfigureVectorState(Handle<Name> name, MapHandleList* maps,
- CodeHandleList* handlers);
+ List<Handle<Object>>* handlers);
// Configure the vector for POLYMORPHIC with transitions (only for element
// keyed stores).
void ConfigureVectorState(MapHandleList* maps,
@@ -139,16 +131,17 @@ class IC {
Address constant_pool);
static inline void SetTargetAtAddress(Address address, Code* target,
Address constant_pool);
- static void OnTypeFeedbackChanged(Isolate* isolate, Address address,
- State old_state, State new_state,
- bool target_remains_ic_stub);
// As a vector-based IC, type feedback must be updated differently.
static void OnTypeFeedbackChanged(Isolate* isolate, Code* host);
static void PostPatching(Address address, Code* target, Code* old_target);
// Compute the handler either by compiling or by retrieving a cached version.
- Handle<Code> ComputeHandler(LookupIterator* lookup,
- Handle<Object> value = Handle<Code>::null());
+ Handle<Object> ComputeHandler(LookupIterator* lookup,
+ Handle<Object> value = Handle<Code>::null());
+ virtual Handle<Object> GetMapIndependentHandler(LookupIterator* lookup) {
+ UNREACHABLE();
+ return Handle<Code>::null();
+ }
virtual Handle<Code> CompileHandler(LookupIterator* lookup,
Handle<Object> value,
CacheHolderFlag cache_holder) {
@@ -156,30 +149,28 @@ class IC {
return Handle<Code>::null();
}
- void UpdateMonomorphicIC(Handle<Code> handler, Handle<Name> name);
- bool UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code);
- void UpdateMegamorphicCache(Map* map, Name* name, Code* code);
+ void UpdateMonomorphicIC(Handle<Object> handler, Handle<Name> name);
+ bool UpdatePolymorphicIC(Handle<Name> name, Handle<Object> code);
+ void UpdateMegamorphicCache(Map* map, Name* name, Object* code);
+
+ StubCache* stub_cache();
void CopyICToMegamorphicCache(Handle<Name> name);
bool IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map);
- void PatchCache(Handle<Name> name, Handle<Code> code);
+ void PatchCache(Handle<Name> name, Handle<Object> code);
Code::Kind kind() const { return kind_; }
+ bool is_keyed() const {
+ return kind_ == Code::KEYED_LOAD_IC || kind_ == Code::KEYED_STORE_IC;
+ }
Code::Kind handler_kind() const {
if (kind_ == Code::KEYED_LOAD_IC) return Code::LOAD_IC;
DCHECK(kind_ == Code::LOAD_IC || kind_ == Code::STORE_IC ||
kind_ == Code::KEYED_STORE_IC);
return kind_;
}
- virtual Handle<Code> megamorphic_stub() {
- UNREACHABLE();
- return Handle<Code>::null();
- }
-
- bool TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
- Handle<String> name);
+ bool ShouldRecomputeHandler(Handle<Object> receiver, Handle<String> name);
ExtraICState extra_ic_state() const { return extra_ic_state_; }
- void set_extra_ic_state(ExtraICState state) { extra_ic_state_ = state; }
Handle<Map> receiver_map() { return receiver_map_; }
void update_receiver_map(Handle<Object> receiver) {
@@ -202,12 +193,10 @@ class IC {
return target_maps_.length() > 0 ? *target_maps_.at(0) : NULL;
}
- inline void UpdateTarget();
-
Handle<TypeFeedbackVector> vector() const { return nexus()->vector_handle(); }
FeedbackVectorSlot slot() const { return nexus()->slot(); }
State saved_state() const {
- return state() == PROTOTYPE_FAILURE ? old_state_ : state();
+ return state() == RECOMPUTE_HANDLER ? old_state_ : state();
}
template <class NexusClass>
@@ -217,25 +206,17 @@ class IC {
FeedbackNexus* nexus() const { return nexus_; }
inline Code* get_host();
+ inline Code* target() const;
private:
- inline Code* raw_target() const;
inline Address constant_pool() const;
inline Address raw_constant_pool() const;
void FindTargetMaps() {
if (target_maps_set_) return;
target_maps_set_ = true;
- if (UseVector()) {
- nexus()->ExtractMaps(&target_maps_);
- } else {
- if (state_ == MONOMORPHIC) {
- Map* map = target_->FindFirstMap();
- if (map != NULL) target_maps_.Add(handle(map));
- } else if (state_ != UNINITIALIZED && state_ != PREMONOMORPHIC) {
- target_->FindAllMaps(&target_maps_);
- }
- }
+ DCHECK(UseVector());
+ nexus()->ExtractMaps(&target_maps_);
}
// Frame pointer for the frame that uses (calls) the IC.
@@ -253,15 +234,12 @@ class IC {
Isolate* isolate_;
- // The original code target that missed.
- Handle<Code> target_;
- bool target_set_;
bool vector_set_;
State old_state_; // For saving if we marked as prototype failure.
State state_;
Code::Kind kind_;
Handle<Map> receiver_map_;
- MaybeHandle<Code> maybe_handler_;
+ MaybeHandle<Object> maybe_handler_;
ExtraICState extra_ic_state_;
MapHandleList target_maps_;
@@ -282,81 +260,69 @@ class CallIC : public IC {
void HandleMiss(Handle<Object> function);
- // Code generator routines.
- static Handle<Code> initialize_stub(Isolate* isolate, int argc,
- ConvertReceiverMode mode,
- TailCallMode tail_call_mode);
- static Handle<Code> initialize_stub_in_optimized_code(
- Isolate* isolate, int argc, ConvertReceiverMode mode,
- TailCallMode tail_call_mode);
-
static void Clear(Isolate* isolate, Code* host, CallICNexus* nexus);
};
class LoadIC : public IC {
public:
- TypeofMode typeof_mode() const {
- return LoadICState::GetTypeofMode(extra_ic_state());
- }
-
LoadIC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus = NULL)
: IC(depth, isolate, nexus) {
DCHECK(nexus != NULL);
DCHECK(IsLoadStub());
}
- bool ShouldThrowReferenceError(Handle<Object> receiver) {
- return receiver->IsJSGlobalObject() && typeof_mode() == NOT_INSIDE_TYPEOF;
+ bool ShouldThrowReferenceError() const {
+ return kind() == Code::LOAD_GLOBAL_IC &&
+ LoadGlobalICState::GetTypeofMode(extra_ic_state()) ==
+ NOT_INSIDE_TYPEOF;
}
// Code generator routines.
- static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
static void GenerateMiss(MacroAssembler* masm);
static void GenerateRuntimeGetProperty(MacroAssembler* masm);
static void GenerateNormal(MacroAssembler* masm);
- static Handle<Code> initialize_stub(Isolate* isolate,
- ExtraICState extra_state);
- static Handle<Code> initialize_stub_in_optimized_code(
- Isolate* isolate, ExtraICState extra_state, State initialization_state);
-
MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
Handle<Name> name);
static void Clear(Isolate* isolate, Code* host, LoadICNexus* nexus);
protected:
- inline void set_target(Code* code);
-
- Handle<Code> slow_stub() const {
- if (kind() == Code::LOAD_IC) {
- return isolate()->builtins()->LoadIC_Slow();
- } else {
- DCHECK_EQ(Code::KEYED_LOAD_IC, kind());
- return isolate()->builtins()->KeyedLoadIC_Slow();
- }
+ virtual Handle<Code> slow_stub() const {
+ return isolate()->builtins()->LoadIC_Slow();
}
- Handle<Code> megamorphic_stub() override;
-
// Update the inline cache and the global stub cache based on the
// lookup result.
void UpdateCaches(LookupIterator* lookup);
+ Handle<Object> GetMapIndependentHandler(LookupIterator* lookup) override;
+
Handle<Code> CompileHandler(LookupIterator* lookup, Handle<Object> unused,
CacheHolderFlag cache_holder) override;
private:
- Handle<Code> SimpleFieldLoad(FieldIndex index);
-
- static void Clear(Isolate* isolate, Address address, Code* target,
- Address constant_pool);
+ Handle<Object> SimpleFieldLoad(FieldIndex index);
friend class IC;
};
+class LoadGlobalIC : public LoadIC {
+ public:
+ LoadGlobalIC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus = NULL)
+ : LoadIC(depth, isolate, nexus) {}
+
+ MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Name> name);
+
+ static void Clear(Isolate* isolate, Code* host, LoadGlobalICNexus* nexus);
+
+ protected:
+ Handle<Code> slow_stub() const override {
+ return isolate()->builtins()->LoadGlobalIC_Slow();
+ }
+};
class KeyedLoadIC : public LoadIC {
public:
@@ -364,7 +330,6 @@ class KeyedLoadIC : public LoadIC {
KeyedLoadICNexus* nexus = NULL)
: LoadIC(depth, isolate, nexus) {
DCHECK(nexus != NULL);
- DCHECK(target()->is_keyed_load_stub());
}
MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
@@ -373,20 +338,8 @@ class KeyedLoadIC : public LoadIC {
// Code generator routines.
static void GenerateMiss(MacroAssembler* masm);
static void GenerateRuntimeGetProperty(MacroAssembler* masm);
- static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
static void GenerateMegamorphic(MacroAssembler* masm);
- // Bit mask to be tested against bit field for the cases when
- // generic stub should go into slow case.
- // Access check is necessary explicitly since generic stub does not perform
- // map checks.
- static const int kSlowCaseBitFieldMask =
- (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
-
- static Handle<Code> initialize_stub(Isolate* isolate,
- ExtraICState extra_state);
- static Handle<Code> initialize_stub_in_optimized_code(
- Isolate* isolate, State initialization_state, ExtraICState extra_state);
static Handle<Code> ChooseMegamorphicStub(Isolate* isolate,
ExtraICState extra_state);
@@ -394,22 +347,15 @@ class KeyedLoadIC : public LoadIC {
protected:
// receiver is HeapObject because it could be a String or a JSObject
- Handle<Code> LoadElementStub(Handle<HeapObject> receiver);
+ void UpdateLoadElement(Handle<HeapObject> receiver);
private:
- static void Clear(Isolate* isolate, Address address, Code* target,
- Address constant_pool);
-
friend class IC;
};
class StoreIC : public IC {
public:
- static ExtraICState ComputeExtraICState(LanguageMode flag) {
- return StoreICState(flag).GetExtraICState();
- }
-
StoreIC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus = NULL)
: IC(depth, isolate, nexus) {
DCHECK(IsStoreStub());
@@ -421,21 +367,8 @@ class StoreIC : public IC {
// Code generators for stub routines. Only called once at startup.
static void GenerateSlow(MacroAssembler* masm);
- static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
- static void GeneratePreMonomorphic(MacroAssembler* masm) {
- GenerateMiss(masm);
- }
static void GenerateMiss(MacroAssembler* masm);
- static void GenerateMegamorphic(MacroAssembler* masm);
static void GenerateNormal(MacroAssembler* masm);
- static void GenerateRuntimeSetProperty(MacroAssembler* masm,
- LanguageMode language_mode);
-
- static Handle<Code> initialize_stub(Isolate* isolate,
- LanguageMode language_mode,
- State initialization_state);
- static Handle<Code> initialize_stub_in_optimized_code(
- Isolate* isolate, LanguageMode language_mode, State initialization_state);
MUST_USE_RESULT MaybeHandle<Object> Store(
Handle<Object> object, Handle<Name> name, Handle<Object> value,
@@ -449,29 +382,27 @@ class StoreIC : public IC {
protected:
// Stub accessors.
- Handle<Code> megamorphic_stub() override;
- Handle<Code> slow_stub() const;
-
- virtual Handle<Code> pre_monomorphic_stub() const {
- return pre_monomorphic_stub(isolate(), language_mode());
+ Handle<Code> slow_stub() const {
+ switch (language_mode()) {
+ case SLOPPY:
+ return isolate()->builtins()->StoreIC_SlowSloppy();
+ case STRICT:
+ return isolate()->builtins()->StoreIC_SlowStrict();
+ default:
+ UNREACHABLE();
+ return Handle<Code>();
+ }
}
- static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
- LanguageMode language_mode);
-
// Update the inline cache and the global stub cache based on the
// lookup result.
void UpdateCaches(LookupIterator* lookup, Handle<Object> value,
JSReceiver::StoreFromKeyed store_mode);
+ Handle<Object> GetMapIndependentHandler(LookupIterator* lookup) override;
Handle<Code> CompileHandler(LookupIterator* lookup, Handle<Object> value,
CacheHolderFlag cache_holder) override;
private:
- inline void set_target(Code* code);
-
- static void Clear(Isolate* isolate, Address address, Code* target,
- Address constant_pool);
-
friend class IC;
};
@@ -484,79 +415,34 @@ enum KeyedStoreIncrementLength { kDontIncrementLength, kIncrementLength };
class KeyedStoreIC : public StoreIC {
public:
- // ExtraICState bits (building on IC)
- // ExtraICState bits
- // When more language modes are added, these BitFields need to move too.
- STATIC_ASSERT(i::LANGUAGE_END == 3);
- class ExtraICStateKeyedAccessStoreMode
- : public BitField<KeyedAccessStoreMode, 3, 3> {}; // NOLINT
-
- class IcCheckTypeField : public BitField<IcCheckType, 6, 1> {};
-
- static ExtraICState ComputeExtraICState(LanguageMode flag,
- KeyedAccessStoreMode mode) {
- return StoreICState(flag).GetExtraICState() |
- ExtraICStateKeyedAccessStoreMode::encode(mode) |
- IcCheckTypeField::encode(ELEMENT);
- }
-
KeyedAccessStoreMode GetKeyedAccessStoreMode() {
return casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode();
}
KeyedStoreIC(FrameDepth depth, Isolate* isolate,
KeyedStoreICNexus* nexus = NULL)
- : StoreIC(depth, isolate, nexus) {
- DCHECK(target()->is_keyed_store_stub());
- }
+ : StoreIC(depth, isolate, nexus) {}
MUST_USE_RESULT MaybeHandle<Object> Store(Handle<Object> object,
Handle<Object> name,
Handle<Object> value);
// Code generators for stub routines. Only called once at startup.
- static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
- static void GeneratePreMonomorphic(MacroAssembler* masm) {
- GenerateMiss(masm);
- }
static void GenerateMiss(MacroAssembler* masm);
static void GenerateSlow(MacroAssembler* masm);
static void GenerateMegamorphic(MacroAssembler* masm,
LanguageMode language_mode);
- static Handle<Code> initialize_stub(Isolate* isolate,
- LanguageMode language_mode,
- State initialization_state);
-
- static Handle<Code> initialize_stub_in_optimized_code(
- Isolate* isolate, LanguageMode language_mode, State initialization_state);
static Handle<Code> ChooseMegamorphicStub(Isolate* isolate,
ExtraICState extra_state);
static void Clear(Isolate* isolate, Code* host, KeyedStoreICNexus* nexus);
protected:
- virtual Handle<Code> pre_monomorphic_stub() const {
- return pre_monomorphic_stub(isolate(), language_mode());
- }
- static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
- LanguageMode language_mode) {
- if (is_strict(language_mode)) {
- return isolate->builtins()->KeyedStoreIC_PreMonomorphic_Strict();
- } else {
- return isolate->builtins()->KeyedStoreIC_PreMonomorphic();
- }
- }
-
- Handle<Code> StoreElementStub(Handle<Map> receiver_map,
- KeyedAccessStoreMode store_mode);
+ void UpdateStoreElement(Handle<Map> receiver_map,
+ KeyedAccessStoreMode store_mode);
private:
- inline void set_target(Code* code);
-
- static void Clear(Isolate* isolate, Address address, Code* target,
- Address constant_pool);
-
Handle<Map> ComputeTransitionedMap(Handle<Map> map,
KeyedAccessStoreMode store_mode);
@@ -586,9 +472,6 @@ class CompareIC : public IC {
// Helper function for computing the condition for a compare operation.
static Condition ComputeCondition(Token::Value op);
- // Factory method for getting an uninitialized compare stub.
- static Handle<Code> GetUninitialized(Isolate* isolate, Token::Value op);
-
private:
static bool HasInlinedSmiCode(Address address);
diff --git a/deps/v8/src/ic/mips/access-compiler-mips.cc b/deps/v8/src/ic/mips/access-compiler-mips.cc
index b122946577..2aa0283485 100644
--- a/deps/v8/src/ic/mips/access-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/access-compiler-mips.cc
@@ -19,19 +19,19 @@ void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ // receiver, name, scratch1, scratch2, scratch3.
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- static Register registers[] = {receiver, name, a3, a0, t0, t1};
+ static Register registers[] = {receiver, name, a3, a0, t0};
return registers;
}
Register* PropertyAccessCompiler::store_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
+ // receiver, name, scratch1, scratch2.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- static Register registers[] = {receiver, name, a3, t0, t1};
+ static Register registers[] = {receiver, name, a3, t0};
return registers;
}
diff --git a/deps/v8/src/ic/mips/handler-compiler-mips.cc b/deps/v8/src/ic/mips/handler-compiler-mips.cc
index b924bdad78..f4e0f0baba 100644
--- a/deps/v8/src/ic/mips/handler-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/handler-compiler-mips.cc
@@ -195,9 +195,11 @@ void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
void PropertyHandlerCompiler::GenerateCheckPropertyCell(
MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
Register scratch, Label* miss) {
- Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
- DCHECK(cell->value()->IsTheHole());
- Handle<WeakCell> weak_cell = masm->isolate()->factory()->NewWeakCell(cell);
+ Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
+ global, name, PropertyCellType::kInvalidated);
+ Isolate* isolate = masm->isolate();
+ DCHECK(cell->value()->IsTheHole(isolate));
+ Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
__ LoadWeakValue(scratch, weak_cell, miss);
__ lw(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
@@ -279,7 +281,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
bool call_data_undefined = false;
// Put call data in place.
- if (api_call_info->data()->IsUndefined()) {
+ if (api_call_info->data()->IsUndefined(isolate)) {
call_data_undefined = true;
__ LoadRoot(data, Heap::kUndefinedValueRootIndex);
} else {
@@ -319,17 +321,8 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
-}
-
-
-void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
- StoreIC_PushArgs(masm);
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kStoreIC_Slow);
+ StoreWithVectorDescriptor::SlotRegister(),
+ StoreWithVectorDescriptor::VectorRegister());
}
@@ -423,28 +416,25 @@ Register PropertyHandlerCompiler::CheckPrototypes(
DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
!scratch2.is(scratch1));
- if (FLAG_eliminate_prototype_chain_checks) {
- Handle<Cell> validity_cell =
- Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
- if (!validity_cell.is_null()) {
- DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid),
- validity_cell->value());
- __ li(scratch1, Operand(validity_cell));
- __ lw(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
- __ Branch(miss, ne, scratch1,
- Operand(Smi::FromInt(Map::kPrototypeChainValid)));
- }
+ Handle<Cell> validity_cell =
+ Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+ if (!validity_cell.is_null()) {
+ DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
+ __ li(scratch1, Operand(validity_cell));
+ __ lw(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
+ __ Branch(miss, ne, scratch1,
+ Operand(Smi::FromInt(Map::kPrototypeChainValid)));
+ }
- // The prototype chain of primitives (and their JSValue wrappers) depends
- // on the native context, which can't be guarded by validity cells.
- // |object_reg| holds the native context specific prototype in this case;
- // we need to check its map.
- if (check == CHECK_ALL_MAPS) {
- __ lw(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- __ GetWeakValue(scratch2, cell);
- __ Branch(miss, ne, scratch1, Operand(scratch2));
- }
+ // The prototype chain of primitives (and their JSValue wrappers) depends
+ // on the native context, which can't be guarded by validity cells.
+ // |object_reg| holds the native context specific prototype in this case;
+ // we need to check its map.
+ if (check == CHECK_ALL_MAPS) {
+ __ lw(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+ Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
+ __ GetWeakValue(scratch2, cell);
+ __ Branch(miss, ne, scratch1, Operand(scratch2));
}
// Keep track of the current object in register reg.
@@ -480,8 +470,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
!current_map->is_access_check_needed());
prototype = handle(JSObject::cast(current_map->prototype()));
- if (current_map->is_dictionary_map() &&
- !current_map->IsJSGlobalObjectMap()) {
+ if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+ name, scratch2, miss);
+ } else if (current_map->is_dictionary_map()) {
DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
if (!name->IsUniqueName()) {
DCHECK(name->IsString());
@@ -491,33 +483,12 @@ Register PropertyHandlerCompiler::CheckPrototypes(
current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
- if (FLAG_eliminate_prototype_chain_checks && depth > 1) {
+ if (depth > 1) {
// TODO(jkummerow): Cache and re-use weak cell.
__ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
}
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
scratch2);
- if (!FLAG_eliminate_prototype_chain_checks) {
- __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ lw(holder_reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
- }
- } else {
- Register map_reg = scratch1;
- if (!FLAG_eliminate_prototype_chain_checks) {
- __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
- }
- if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
- name, scratch2, miss);
- } else if (!FLAG_eliminate_prototype_chain_checks &&
- (depth != 1 || check == CHECK_ALL_MAPS)) {
- Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
- __ GetWeakValue(scratch2, cell);
- __ Branch(miss, ne, scratch2, Operand(map_reg));
- }
- if (!FLAG_eliminate_prototype_chain_checks) {
- __ lw(holder_reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
- }
}
reg = holder_reg; // From now on the object will be in holder_reg.
@@ -531,17 +502,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
- if (!FLAG_eliminate_prototype_chain_checks &&
- (depth != 0 || check == CHECK_ALL_MAPS)) {
- // Check the holder map.
- __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
- __ GetWeakValue(scratch2, cell);
- __ Branch(miss, ne, scratch2, Operand(scratch1));
- }
-
bool return_holder = return_what == RETURN_HOLDER;
- if (FLAG_eliminate_prototype_chain_checks && return_holder && depth != 0) {
+ if (return_holder && depth != 0) {
__ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
}
@@ -584,70 +546,10 @@ void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
}
-void NamedLoadHandlerCompiler::GenerateLoadCallback(
- Register reg, Handle<AccessorInfo> callback) {
- DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), receiver()));
- DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
-
- // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
- // name below the exit frame to make GC aware of them.
- STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
-
- // Here and below +1 is for name() pushed after the args_ array.
- typedef PropertyCallbackArguments PCA;
- __ Subu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
- __ sw(receiver(), MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
- Handle<Object> data(callback->data(), isolate());
- if (data->IsUndefined() || data->IsSmi()) {
- __ li(scratch2(), data);
- } else {
- Handle<WeakCell> cell =
- isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
- // The callback is alive if this instruction is executed,
- // so the weak cell is not cleared and points to data.
- __ GetWeakValue(scratch2(), cell);
- }
- __ sw(scratch2(), MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
- __ LoadRoot(scratch2(), Heap::kUndefinedValueRootIndex);
- __ sw(scratch2(),
- MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
- __ sw(scratch2(), MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
- kPointerSize));
- __ li(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
- __ sw(scratch2(), MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
- __ sw(reg, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
- // should_throw_on_error -> false
- DCHECK(Smi::FromInt(0) == nullptr);
- __ sw(zero_reg,
- MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
-
- __ sw(name(), MemOperand(sp, 0 * kPointerSize));
-
- // Abi for CallApiGetter.
- Register getter_address_reg = ApiGetterDescriptor::function_address();
-
- Address getter_address = v8::ToCData<Address>(callback->getter());
- ApiFunction fun(getter_address);
- ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
- ExternalReference ref = ExternalReference(&fun, type, isolate());
- __ li(getter_address_reg, Operand(ref));
-
- CallApiGetterStub stub(isolate());
- __ TailCallStub(&stub);
-}
-
-
void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
LookupIterator* it, Register holder_reg) {
DCHECK(holder()->HasNamedInterceptor());
- DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
// Compile the interceptor call, followed by inline code to load the
// property from further up the prototype chain if the call fails.
@@ -706,7 +608,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
// Call the runtime system to load the interceptor.
DCHECK(holder()->HasNamedInterceptor());
- DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
holder());
@@ -722,7 +624,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ Push(receiver(), holder_reg); // Receiver.
// If the callback cannot leak, then push the callback directly,
// otherwise wrap it in a weak cell.
- if (callback->data()->IsUndefined() || callback->data()->IsSmi()) {
+ if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
__ li(at, Operand(callback));
} else {
Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
@@ -737,7 +639,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
- return GetCode(kind(), Code::FAST, name);
+ return GetCode(kind(), name);
}
@@ -778,7 +680,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
FrontendFooter(name, &miss);
// Return the generated code.
- return GetCode(kind(), Code::NORMAL, name);
+ return GetCode(kind(), name);
}
diff --git a/deps/v8/src/ic/mips/ic-mips.cc b/deps/v8/src/ic/mips/ic-mips.cc
index ae3615e3bb..3a28b13bd8 100644
--- a/deps/v8/src/ic/mips/ic-mips.cc
+++ b/deps/v8/src/ic/mips/ic-mips.cc
@@ -419,10 +419,8 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ li(slot, Operand(Smi::FromInt(slot_index)));
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, flags,
- receiver, key, t0, t1, t2, t5);
+ masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, t0, t1,
+ t2, t5);
// Cache miss.
GenerateMiss(masm);
@@ -616,11 +614,10 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ JumpIfSmi(receiver, &slow);
// Get the map of the object.
__ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks and is not observed.
- // The generic stub does not perform map checks or handle observed objects.
+ // Check that the receiver does not require access checks.
+ // The generic stub does not perform map checks.
__ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
- __ And(t0, t0,
- Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
+ __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded));
__ Branch(&slow, ne, t0, Operand(zero_reg));
// Check if the object is a JS array or not.
__ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
@@ -653,8 +650,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
// The handlers in the stub cache expect a vector and slot. Since we won't
// change the IC from any downstream misses, a dummy vector can be used.
- Register vector = VectorStoreICDescriptor::VectorRegister();
- Register slot = VectorStoreICDescriptor::SlotRegister();
+ Register vector = StoreWithVectorDescriptor::VectorRegister();
+ Register slot = StoreWithVectorDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, t1, t2, t4, t5));
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
@@ -663,10 +660,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ li(slot, Operand(Smi::FromInt(slot_index)));
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
- receiver, key, t1, t2, t4, t5);
+ masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, t1,
+ t2, t4, t5);
// Cache miss.
__ Branch(&miss);
@@ -717,8 +712,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
+ StoreWithVectorDescriptor::SlotRegister(),
+ StoreWithVectorDescriptor::VectorRegister());
}
@@ -728,25 +723,6 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
}
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- DCHECK(receiver.is(a1));
- DCHECK(name.is(a2));
- DCHECK(StoreDescriptor::ValueRegister().is(a0));
-
- // Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
- receiver, name, a3, t0, t1, t2);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
void StoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
@@ -764,8 +740,8 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
DCHECK(receiver.is(a1));
DCHECK(name.is(a2));
DCHECK(value.is(a0));
- DCHECK(VectorStoreICDescriptor::VectorRegister().is(a3));
- DCHECK(VectorStoreICDescriptor::SlotRegister().is(t0));
+ DCHECK(StoreWithVectorDescriptor::VectorRegister().is(a3));
+ DCHECK(StoreWithVectorDescriptor::SlotRegister().is(t0));
__ lw(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
@@ -840,8 +816,9 @@ void PatchInlinedSmiCode(Isolate* isolate, Address address,
}
if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, andi=%p, delta=%d\n", address,
- andi_instruction_address, delta);
+ PrintF("[ patching ic at %p, andi=%p, delta=%d\n",
+ static_cast<void*>(address),
+ static_cast<void*>(andi_instruction_address), delta);
}
Address patch_address =
diff --git a/deps/v8/src/ic/mips/stub-cache-mips.cc b/deps/v8/src/ic/mips/stub-cache-mips.cc
index 039763c4cf..d476c1e63e 100644
--- a/deps/v8/src/ic/mips/stub-cache-mips.cc
+++ b/deps/v8/src/ic/mips/stub-cache-mips.cc
@@ -14,16 +14,15 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Kind ic_kind, Code::Flags flags,
+static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
StubCache::Table table, Register receiver, Register name,
- // Number of the cache entry, not scaled.
+ // The offset is scaled by 4, based on
+ // kCacheIndexShift, which is two bits
Register offset, Register scratch, Register scratch2,
Register offset_scratch) {
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
- ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+ ExternalReference key_offset(stub_cache->key_reference(table));
+ ExternalReference value_offset(stub_cache->value_reference(table));
+ ExternalReference map_offset(stub_cache->map_reference(table));
uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
@@ -46,7 +45,7 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
// Calculate the base address of the entry.
__ li(base_addr, Operand(key_offset));
- __ Lsa(base_addr, base_addr, offset_scratch, kPointerSizeLog2);
+ __ Addu(base_addr, base_addr, offset_scratch);
// Check that the key in the entry matches the name.
__ lw(at, MemOperand(base_addr, 0));
@@ -62,13 +61,6 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
scratch2 = no_reg;
__ lw(code, MemOperand(base_addr, value_off_addr - key_off_addr));
- // Check that the flags match what we're looking for.
- Register flags_reg = base_addr;
- base_addr = no_reg;
- __ lw(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
- __ And(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup));
- __ Branch(&miss, ne, flags_reg, Operand(flags));
-
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ jmp(&miss);
@@ -85,21 +77,15 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
__ bind(&miss);
}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
- Code::Flags flags, Register receiver,
+void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
Register name, Register scratch, Register extra,
Register extra2, Register extra3) {
- Isolate* isolate = masm->isolate();
Label miss;
// Make sure that code is valid. The multiplying code relies on the
// entry size being 12.
DCHECK(sizeof(Entry) == 12);
- // Make sure the flags does not name a specific type.
- DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
-
// Make sure that there are no register conflicts.
DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
@@ -113,12 +99,13 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// If vector-based ics are in use, ensure that scratch, extra, extra2 and
// extra3 don't conflict with the vector and slot registers, which need
// to be preserved for a handler call or miss.
- if (IC::ICUseVector(ic_kind)) {
+ if (IC::ICUseVector(ic_kind_)) {
Register vector, slot;
- if (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC) {
- vector = VectorStoreICDescriptor::VectorRegister();
- slot = VectorStoreICDescriptor::SlotRegister();
+ if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) {
+ vector = StoreWithVectorDescriptor::VectorRegister();
+ slot = StoreWithVectorDescriptor::SlotRegister();
} else {
+ DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC);
vector = LoadWithVectorDescriptor::VectorRegister();
slot = LoadWithVectorDescriptor::SlotRegister();
}
@@ -137,27 +124,23 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
__ lw(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
__ lw(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Addu(scratch, scratch, at);
- uint32_t mask = kPrimaryTableSize - 1;
- // We shift out the last two bits because they are not part of the hash and
- // they are always 01 for maps.
- __ srl(scratch, scratch, kCacheIndexShift);
- __ Xor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask));
- __ And(scratch, scratch, Operand(mask));
+ __ Xor(scratch, scratch, Operand(kPrimaryMagic));
+ __ And(scratch, scratch,
+ Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
// Probe the primary table.
- ProbeTable(isolate, masm, ic_kind, flags, kPrimary, receiver, name, scratch,
- extra, extra2, extra3);
+ ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
+ extra3);
// Primary miss: Compute hash for secondary probe.
- __ srl(at, name, kCacheIndexShift);
- __ Subu(scratch, scratch, at);
- uint32_t mask2 = kSecondaryTableSize - 1;
- __ Addu(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2));
- __ And(scratch, scratch, Operand(mask2));
+ __ Subu(scratch, scratch, name);
+ __ Addu(scratch, scratch, Operand(kSecondaryMagic));
+ __ And(scratch, scratch,
+ Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
// Probe the secondary table.
- ProbeTable(isolate, masm, ic_kind, flags, kSecondary, receiver, name, scratch,
- extra, extra2, extra3);
+ ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
+ extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
diff --git a/deps/v8/src/ic/mips64/access-compiler-mips64.cc b/deps/v8/src/ic/mips64/access-compiler-mips64.cc
index 96e921c7c6..bf6c73e86f 100644
--- a/deps/v8/src/ic/mips64/access-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/access-compiler-mips64.cc
@@ -19,19 +19,19 @@ void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ // receiver, name, scratch1, scratch2, scratch3.
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- static Register registers[] = {receiver, name, a3, a0, a4, a5};
+ static Register registers[] = {receiver, name, a3, a0, a4};
return registers;
}
Register* PropertyAccessCompiler::store_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
+ // receiver, name, scratch1, scratch2.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- static Register registers[] = {receiver, name, a3, a4, a5};
+ static Register registers[] = {receiver, name, a3, a4};
return registers;
}
diff --git a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
index 52260ee754..53b097f8ce 100644
--- a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
@@ -195,9 +195,11 @@ void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
void PropertyHandlerCompiler::GenerateCheckPropertyCell(
MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
Register scratch, Label* miss) {
- Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
- DCHECK(cell->value()->IsTheHole());
- Handle<WeakCell> weak_cell = masm->isolate()->factory()->NewWeakCell(cell);
+ Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
+ global, name, PropertyCellType::kInvalidated);
+ Isolate* isolate = masm->isolate();
+ DCHECK(cell->value()->IsTheHole(isolate));
+ Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
__ LoadWeakValue(scratch, weak_cell, miss);
__ ld(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
@@ -279,7 +281,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
bool call_data_undefined = false;
// Put call data in place.
- if (api_call_info->data()->IsUndefined()) {
+ if (api_call_info->data()->IsUndefined(isolate)) {
call_data_undefined = true;
__ LoadRoot(data, Heap::kUndefinedValueRootIndex);
} else {
@@ -319,17 +321,8 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
-}
-
-
-void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
- StoreIC_PushArgs(masm);
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kStoreIC_Slow);
+ StoreWithVectorDescriptor::SlotRegister(),
+ StoreWithVectorDescriptor::VectorRegister());
}
@@ -423,28 +416,25 @@ Register PropertyHandlerCompiler::CheckPrototypes(
DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
!scratch2.is(scratch1));
- if (FLAG_eliminate_prototype_chain_checks) {
- Handle<Cell> validity_cell =
- Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
- if (!validity_cell.is_null()) {
- DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid),
- validity_cell->value());
- __ li(scratch1, Operand(validity_cell));
- __ ld(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
- __ Branch(miss, ne, scratch1,
- Operand(Smi::FromInt(Map::kPrototypeChainValid)));
- }
+ Handle<Cell> validity_cell =
+ Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+ if (!validity_cell.is_null()) {
+ DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
+ __ li(scratch1, Operand(validity_cell));
+ __ ld(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
+ __ Branch(miss, ne, scratch1,
+ Operand(Smi::FromInt(Map::kPrototypeChainValid)));
+ }
- // The prototype chain of primitives (and their JSValue wrappers) depends
- // on the native context, which can't be guarded by validity cells.
- // |object_reg| holds the native context specific prototype in this case;
- // we need to check its map.
- if (check == CHECK_ALL_MAPS) {
- __ ld(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- __ GetWeakValue(scratch2, cell);
- __ Branch(miss, ne, scratch1, Operand(scratch2));
- }
+ // The prototype chain of primitives (and their JSValue wrappers) depends
+ // on the native context, which can't be guarded by validity cells.
+ // |object_reg| holds the native context specific prototype in this case;
+ // we need to check its map.
+ if (check == CHECK_ALL_MAPS) {
+ __ ld(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+ Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
+ __ GetWeakValue(scratch2, cell);
+ __ Branch(miss, ne, scratch1, Operand(scratch2));
}
// Keep track of the current object in register reg.
@@ -480,8 +470,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
!current_map->is_access_check_needed());
prototype = handle(JSObject::cast(current_map->prototype()));
- if (current_map->is_dictionary_map() &&
- !current_map->IsJSGlobalObjectMap()) {
+ if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+ name, scratch2, miss);
+ } else if (current_map->is_dictionary_map()) {
DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
if (!name->IsUniqueName()) {
DCHECK(name->IsString());
@@ -491,33 +483,12 @@ Register PropertyHandlerCompiler::CheckPrototypes(
current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
- if (FLAG_eliminate_prototype_chain_checks && depth > 1) {
+ if (depth > 1) {
// TODO(jkummerow): Cache and re-use weak cell.
__ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
}
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
scratch2);
- if (!FLAG_eliminate_prototype_chain_checks) {
- __ ld(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ ld(holder_reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
- }
- } else {
- Register map_reg = scratch1;
- if (!FLAG_eliminate_prototype_chain_checks) {
- __ ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
- }
- if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
- name, scratch2, miss);
- } else if (!FLAG_eliminate_prototype_chain_checks &&
- (depth != 1 || check == CHECK_ALL_MAPS)) {
- Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
- __ GetWeakValue(scratch2, cell);
- __ Branch(miss, ne, scratch2, Operand(map_reg));
- }
- if (!FLAG_eliminate_prototype_chain_checks) {
- __ ld(holder_reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
- }
}
reg = holder_reg; // From now on the object will be in holder_reg.
@@ -531,17 +502,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
- if (!FLAG_eliminate_prototype_chain_checks &&
- (depth != 0 || check == CHECK_ALL_MAPS)) {
- // Check the holder map.
- __ ld(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
- __ GetWeakValue(scratch2, cell);
- __ Branch(miss, ne, scratch2, Operand(scratch1));
- }
-
bool return_holder = return_what == RETURN_HOLDER;
- if (FLAG_eliminate_prototype_chain_checks && return_holder && depth != 0) {
+ if (return_holder && depth != 0) {
__ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
}
@@ -584,70 +546,10 @@ void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
}
-void NamedLoadHandlerCompiler::GenerateLoadCallback(
- Register reg, Handle<AccessorInfo> callback) {
- DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), receiver()));
- DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
-
- // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
- // name below the exit frame to make GC aware of them.
- STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
-
- // Here and below +1 is for name() pushed after the args_ array.
- typedef PropertyCallbackArguments PCA;
- __ Dsubu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
- __ sd(receiver(), MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
- Handle<Object> data(callback->data(), isolate());
- if (data->IsUndefined() || data->IsSmi()) {
- __ li(scratch2(), data);
- } else {
- Handle<WeakCell> cell =
- isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
- // The callback is alive if this instruction is executed,
- // so the weak cell is not cleared and points to data.
- __ GetWeakValue(scratch2(), cell);
- }
- __ sd(scratch2(), MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
- __ LoadRoot(scratch2(), Heap::kUndefinedValueRootIndex);
- __ sd(scratch2(),
- MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
- __ sd(scratch2(), MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
- kPointerSize));
- __ li(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
- __ sd(scratch2(), MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
- __ sd(reg, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
- // should_throw_on_error -> false
- DCHECK(Smi::FromInt(0) == nullptr);
- __ sd(zero_reg,
- MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
-
- __ sd(name(), MemOperand(sp, 0 * kPointerSize));
-
- // Abi for CallApiGetter.
- Register getter_address_reg = ApiGetterDescriptor::function_address();
-
- Address getter_address = v8::ToCData<Address>(callback->getter());
- ApiFunction fun(getter_address);
- ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
- ExternalReference ref = ExternalReference(&fun, type, isolate());
- __ li(getter_address_reg, Operand(ref));
-
- CallApiGetterStub stub(isolate());
- __ TailCallStub(&stub);
-}
-
-
void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
LookupIterator* it, Register holder_reg) {
DCHECK(holder()->HasNamedInterceptor());
- DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
// Compile the interceptor call, followed by inline code to load the
// property from further up the prototype chain if the call fails.
@@ -706,7 +608,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
// Call the runtime system to load the interceptor.
DCHECK(holder()->HasNamedInterceptor());
- DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
holder());
@@ -722,7 +624,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ Push(receiver(), holder_reg); // Receiver.
// If the callback cannot leak, then push the callback directly,
// otherwise wrap it in a weak cell.
- if (callback->data()->IsUndefined() || callback->data()->IsSmi()) {
+ if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
__ li(at, Operand(callback));
} else {
Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
@@ -737,7 +639,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
- return GetCode(kind(), Code::FAST, name);
+ return GetCode(kind(), name);
}
@@ -778,7 +680,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
FrontendFooter(name, &miss);
// Return the generated code.
- return GetCode(kind(), Code::NORMAL, name);
+ return GetCode(kind(), name);
}
diff --git a/deps/v8/src/ic/mips64/ic-mips64.cc b/deps/v8/src/ic/mips64/ic-mips64.cc
index f46c9dcb26..b551bc70f6 100644
--- a/deps/v8/src/ic/mips64/ic-mips64.cc
+++ b/deps/v8/src/ic/mips64/ic-mips64.cc
@@ -418,10 +418,8 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ li(slot, Operand(Smi::FromInt(slot_index)));
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, flags,
- receiver, key, a4, a5, a6, t1);
+ masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, a4, a5,
+ a6, t1);
// Cache miss.
GenerateMiss(masm);
@@ -622,11 +620,10 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ JumpIfSmi(receiver, &slow);
// Get the map of the object.
__ ld(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks and is not observed.
- // The generic stub does not perform map checks or handle observed objects.
+ // Check that the receiver does not require access checks.
+ // The generic stub does not perform map checks.
__ lbu(a4, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
- __ And(a4, a4,
- Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
+ __ And(a4, a4, Operand(1 << Map::kIsAccessCheckNeeded));
__ Branch(&slow, ne, a4, Operand(zero_reg));
// Check if the object is a JS array or not.
__ lbu(a4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
@@ -656,8 +653,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
// The handlers in the stub cache expect a vector and slot. Since we won't
// change the IC from any downstream misses, a dummy vector can be used.
- Register vector = VectorStoreICDescriptor::VectorRegister();
- Register slot = VectorStoreICDescriptor::SlotRegister();
+ Register vector = StoreWithVectorDescriptor::VectorRegister();
+ Register slot = StoreWithVectorDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, a5, a6, a7, t0));
Handle<TypeFeedbackVector> dummy_vector =
@@ -667,10 +664,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ li(slot, Operand(Smi::FromInt(slot_index)));
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
- receiver, key, a5, a6, a7, t0);
+ masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, a5,
+ a6, a7, t0);
// Cache miss.
__ Branch(&miss);
@@ -721,8 +716,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
+ StoreWithVectorDescriptor::SlotRegister(),
+ StoreWithVectorDescriptor::VectorRegister());
}
@@ -732,25 +727,6 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
}
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- DCHECK(receiver.is(a1));
- DCHECK(name.is(a2));
- DCHECK(StoreDescriptor::ValueRegister().is(a0));
-
- // Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
- receiver, name, a3, a4, a5, a6);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
void StoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
@@ -766,8 +742,8 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
Register value = StoreDescriptor::ValueRegister();
Register dictionary = a5;
DCHECK(!AreAliased(
- value, receiver, name, VectorStoreICDescriptor::VectorRegister(),
- VectorStoreICDescriptor::SlotRegister(), dictionary, a6, a7));
+ value, receiver, name, StoreWithVectorDescriptor::VectorRegister(),
+ StoreWithVectorDescriptor::SlotRegister(), dictionary, a6, a7));
__ ld(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
@@ -842,8 +818,9 @@ void PatchInlinedSmiCode(Isolate* isolate, Address address,
}
if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, andi=%p, delta=%d\n", address,
- andi_instruction_address, delta);
+ PrintF("[ patching ic at %p, andi=%p, delta=%d\n",
+ static_cast<void*>(address),
+ static_cast<void*>(andi_instruction_address), delta);
}
Address patch_address =
diff --git a/deps/v8/src/ic/mips64/stub-cache-mips64.cc b/deps/v8/src/ic/mips64/stub-cache-mips64.cc
index 0bd7dd0f2d..6a87b7ba88 100644
--- a/deps/v8/src/ic/mips64/stub-cache-mips64.cc
+++ b/deps/v8/src/ic/mips64/stub-cache-mips64.cc
@@ -14,16 +14,15 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Kind ic_kind, Code::Flags flags,
+static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
StubCache::Table table, Register receiver, Register name,
- // Number of the cache entry, not scaled.
+ // The offset is scaled by 4, based on
+ // kCacheIndexShift, which is two bits
Register offset, Register scratch, Register scratch2,
Register offset_scratch) {
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
- ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+ ExternalReference key_offset(stub_cache->key_reference(table));
+ ExternalReference value_offset(stub_cache->value_reference(table));
+ ExternalReference map_offset(stub_cache->map_reference(table));
uint64_t key_off_addr = reinterpret_cast<uint64_t>(key_offset.address());
uint64_t value_off_addr = reinterpret_cast<uint64_t>(value_offset.address());
@@ -46,7 +45,8 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
// Calculate the base address of the entry.
__ li(base_addr, Operand(key_offset));
- __ Dlsa(base_addr, base_addr, offset_scratch, kPointerSizeLog2);
+ __ Dlsa(base_addr, base_addr, offset_scratch,
+ kPointerSizeLog2 - StubCache::kCacheIndexShift);
// Check that the key in the entry matches the name.
__ ld(at, MemOperand(base_addr, 0));
@@ -64,13 +64,6 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
__ ld(code, MemOperand(base_addr,
static_cast<int32_t>(value_off_addr - key_off_addr)));
- // Check that the flags match what we're looking for.
- Register flags_reg = base_addr;
- base_addr = no_reg;
- __ lw(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
- __ And(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup));
- __ Branch(&miss, ne, flags_reg, Operand(flags));
-
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ jmp(&miss);
@@ -87,12 +80,9 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
__ bind(&miss);
}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
- Code::Flags flags, Register receiver,
+void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
Register name, Register scratch, Register extra,
Register extra2, Register extra3) {
- Isolate* isolate = masm->isolate();
Label miss;
// Make sure that code is valid. The multiplying code relies on the
@@ -100,9 +90,6 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// DCHECK(sizeof(Entry) == 12);
// DCHECK(sizeof(Entry) == 3 * kPointerSize);
- // Make sure the flags does not name a specific type.
- DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
-
// Make sure that there are no register conflicts.
DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
@@ -116,12 +103,13 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// If vector-based ics are in use, ensure that scratch, extra, extra2 and
// extra3 don't conflict with the vector and slot registers, which need
// to be preserved for a handler call or miss.
- if (IC::ICUseVector(ic_kind)) {
+ if (IC::ICUseVector(ic_kind_)) {
Register vector, slot;
- if (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC) {
- vector = VectorStoreICDescriptor::VectorRegister();
- slot = VectorStoreICDescriptor::SlotRegister();
+ if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) {
+ vector = StoreWithVectorDescriptor::VectorRegister();
+ slot = StoreWithVectorDescriptor::SlotRegister();
} else {
+ DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC);
vector = LoadWithVectorDescriptor::VectorRegister();
slot = LoadWithVectorDescriptor::SlotRegister();
}
@@ -137,30 +125,26 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
__ JumpIfSmi(receiver, &miss);
// Get the map of the receiver and compute the hash.
- __ ld(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
+ __ lwu(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
__ ld(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Daddu(scratch, scratch, at);
- uint64_t mask = kPrimaryTableSize - 1;
- // We shift out the last two bits because they are not part of the hash and
- // they are always 01 for maps.
- __ dsrl(scratch, scratch, kCacheIndexShift);
- __ Xor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask));
- __ And(scratch, scratch, Operand(mask));
+ __ Addu(scratch, scratch, at);
+ __ Xor(scratch, scratch, Operand(kPrimaryMagic));
+ __ And(scratch, scratch,
+ Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
// Probe the primary table.
- ProbeTable(isolate, masm, ic_kind, flags, kPrimary, receiver, name, scratch,
- extra, extra2, extra3);
+ ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
+ extra3);
// Primary miss: Compute hash for secondary probe.
- __ dsrl(at, name, kCacheIndexShift);
- __ Dsubu(scratch, scratch, at);
- uint64_t mask2 = kSecondaryTableSize - 1;
- __ Daddu(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2));
- __ And(scratch, scratch, Operand(mask2));
+ __ Subu(scratch, scratch, name);
+ __ Addu(scratch, scratch, kSecondaryMagic);
+ __ And(scratch, scratch,
+ Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
// Probe the secondary table.
- ProbeTable(isolate, masm, ic_kind, flags, kSecondary, receiver, name, scratch,
- extra, extra2, extra3);
+ ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
+ extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
diff --git a/deps/v8/src/ic/ppc/OWNERS b/deps/v8/src/ic/ppc/OWNERS
index eb007cb908..752e8e3d81 100644
--- a/deps/v8/src/ic/ppc/OWNERS
+++ b/deps/v8/src/ic/ppc/OWNERS
@@ -3,3 +3,4 @@ dstence@us.ibm.com
joransiu@ca.ibm.com
mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/deps/v8/src/ic/ppc/access-compiler-ppc.cc b/deps/v8/src/ic/ppc/access-compiler-ppc.cc
index b1e06e16e1..6143b4ce47 100644
--- a/deps/v8/src/ic/ppc/access-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/access-compiler-ppc.cc
@@ -19,19 +19,19 @@ void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ // receiver, name, scratch1, scratch2, scratch3.
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- static Register registers[] = {receiver, name, r6, r3, r7, r8};
+ static Register registers[] = {receiver, name, r6, r3, r7};
return registers;
}
Register* PropertyAccessCompiler::store_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
+ // receiver, name, scratch1, scratch2.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- static Register registers[] = {receiver, name, r6, r7, r8};
+ static Register registers[] = {receiver, name, r6, r7};
return registers;
}
diff --git a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
index 832c25ae48..22c0608c97 100644
--- a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
@@ -198,9 +198,11 @@ void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
void PropertyHandlerCompiler::GenerateCheckPropertyCell(
MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
Register scratch, Label* miss) {
- Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
- DCHECK(cell->value()->IsTheHole());
- Handle<WeakCell> weak_cell = masm->isolate()->factory()->NewWeakCell(cell);
+ Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
+ global, name, PropertyCellType::kInvalidated);
+ Isolate* isolate = masm->isolate();
+ DCHECK(cell->value()->IsTheHole(isolate));
+ Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
__ LoadWeakValue(scratch, weak_cell, miss);
__ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
@@ -285,7 +287,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
bool call_data_undefined = false;
// Put call data in place.
- if (api_call_info->data()->IsUndefined()) {
+ if (api_call_info->data()->IsUndefined(isolate)) {
call_data_undefined = true;
__ LoadRoot(data, Heap::kUndefinedValueRootIndex);
} else {
@@ -327,17 +329,8 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
-}
-
-
-void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
- StoreIC_PushArgs(masm);
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kStoreIC_Slow);
+ StoreWithVectorDescriptor::SlotRegister(),
+ StoreWithVectorDescriptor::VectorRegister());
}
@@ -432,28 +425,25 @@ Register PropertyHandlerCompiler::CheckPrototypes(
DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
!scratch2.is(scratch1));
- if (FLAG_eliminate_prototype_chain_checks) {
- Handle<Cell> validity_cell =
- Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
- if (!validity_cell.is_null()) {
- DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid),
- validity_cell->value());
- __ mov(scratch1, Operand(validity_cell));
- __ LoadP(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
- __ CmpSmiLiteral(scratch1, Smi::FromInt(Map::kPrototypeChainValid), r0);
- __ bne(miss);
- }
+ Handle<Cell> validity_cell =
+ Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+ if (!validity_cell.is_null()) {
+ DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
+ __ mov(scratch1, Operand(validity_cell));
+ __ LoadP(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
+ __ CmpSmiLiteral(scratch1, Smi::FromInt(Map::kPrototypeChainValid), r0);
+ __ bne(miss);
+ }
- // The prototype chain of primitives (and their JSValue wrappers) depends
- // on the native context, which can't be guarded by validity cells.
- // |object_reg| holds the native context specific prototype in this case;
- // we need to check its map.
- if (check == CHECK_ALL_MAPS) {
- __ LoadP(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- __ CmpWeakValue(scratch1, cell, scratch2);
- __ b(ne, miss);
- }
+ // The prototype chain of primitives (and their JSValue wrappers) depends
+ // on the native context, which can't be guarded by validity cells.
+ // |object_reg| holds the native context specific prototype in this case;
+ // we need to check its map.
+ if (check == CHECK_ALL_MAPS) {
+ __ LoadP(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+ Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
+ __ CmpWeakValue(scratch1, cell, scratch2);
+ __ b(ne, miss);
}
// Keep track of the current object in register reg.
@@ -488,8 +478,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
!current_map->is_access_check_needed());
prototype = handle(JSObject::cast(current_map->prototype()));
- if (current_map->is_dictionary_map() &&
- !current_map->IsJSGlobalObjectMap()) {
+ if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+ name, scratch2, miss);
+ } else if (current_map->is_dictionary_map()) {
DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
if (!name->IsUniqueName()) {
DCHECK(name->IsString());
@@ -499,33 +491,12 @@ Register PropertyHandlerCompiler::CheckPrototypes(
current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
- if (FLAG_eliminate_prototype_chain_checks && depth > 1) {
+ if (depth > 1) {
// TODO(jkummerow): Cache and re-use weak cell.
__ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
}
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
scratch2);
- if (!FLAG_eliminate_prototype_chain_checks) {
- __ LoadP(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ LoadP(holder_reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
- }
- } else {
- Register map_reg = scratch1;
- if (!FLAG_eliminate_prototype_chain_checks) {
- __ LoadP(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
- }
- if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
- name, scratch2, miss);
- } else if (!FLAG_eliminate_prototype_chain_checks &&
- (depth != 1 || check == CHECK_ALL_MAPS)) {
- Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
- __ CmpWeakValue(map_reg, cell, scratch2);
- __ bne(miss);
- }
- if (!FLAG_eliminate_prototype_chain_checks) {
- __ LoadP(holder_reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
- }
}
reg = holder_reg; // From now on the object will be in holder_reg.
@@ -539,17 +510,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
- if (!FLAG_eliminate_prototype_chain_checks &&
- (depth != 0 || check == CHECK_ALL_MAPS)) {
- // Check the holder map.
- __ LoadP(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
- __ CmpWeakValue(scratch1, cell, scratch2);
- __ bne(miss);
- }
-
bool return_holder = return_what == RETURN_HOLDER;
- if (FLAG_eliminate_prototype_chain_checks && return_holder && depth != 0) {
+ if (return_holder && depth != 0) {
__ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
}
@@ -592,60 +554,10 @@ void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
}
-void NamedLoadHandlerCompiler::GenerateLoadCallback(
- Register reg, Handle<AccessorInfo> callback) {
- DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), receiver()));
- DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
-
- // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
- // name below the exit frame to make GC aware of them.
- STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
-
- __ push(receiver());
- // Push data from AccessorInfo.
- Handle<Object> data(callback->data(), isolate());
- if (data->IsUndefined() || data->IsSmi()) {
- __ Move(scratch2(), data);
- } else {
- Handle<WeakCell> cell =
- isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
- // The callback is alive if this instruction is executed,
- // so the weak cell is not cleared and points to data.
- __ GetWeakValue(scratch2(), cell);
- }
- __ push(scratch2());
- __ LoadRoot(scratch2(), Heap::kUndefinedValueRootIndex);
- __ Push(scratch2(), scratch2());
- __ mov(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
- // should_throw_on_error -> false
- __ mov(scratch3(), Operand(Smi::FromInt(0)));
- __ Push(scratch2(), reg, scratch3(), name());
-
- // Abi for CallApiGetter
- Register getter_address_reg = ApiGetterDescriptor::function_address();
-
- Address getter_address = v8::ToCData<Address>(callback->getter());
- ApiFunction fun(getter_address);
- ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
- ExternalReference ref = ExternalReference(&fun, type, isolate());
- __ mov(getter_address_reg, Operand(ref));
-
- CallApiGetterStub stub(isolate());
- __ TailCallStub(&stub);
-}
-
-
void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
LookupIterator* it, Register holder_reg) {
DCHECK(holder()->HasNamedInterceptor());
- DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
// Compile the interceptor call, followed by inline code to load the
// property from further up the prototype chain if the call fails.
@@ -705,7 +617,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
// Call the runtime system to load the interceptor.
DCHECK(holder()->HasNamedInterceptor());
- DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
holder());
@@ -722,7 +634,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
// If the callback cannot leak, then push the callback directly,
// otherwise wrap it in a weak cell.
- if (callback->data()->IsUndefined() || callback->data()->IsSmi()) {
+ if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
__ mov(ip, Operand(callback));
} else {
Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
@@ -737,7 +649,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
- return GetCode(kind(), Code::FAST, name);
+ return GetCode(kind(), name);
}
@@ -777,7 +689,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
FrontendFooter(name, &miss);
// Return the generated code.
- return GetCode(kind(), Code::NORMAL, name);
+ return GetCode(kind(), name);
}
diff --git a/deps/v8/src/ic/ppc/ic-ppc.cc b/deps/v8/src/ic/ppc/ic-ppc.cc
index 567296c4c5..fd2962d0fa 100644
--- a/deps/v8/src/ic/ppc/ic-ppc.cc
+++ b/deps/v8/src/ic/ppc/ic-ppc.cc
@@ -425,10 +425,8 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
- receiver, key, r7, r8, r9, r10);
+ masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, r7, r8,
+ r9, r10);
// Cache miss.
GenerateMiss(masm);
@@ -455,8 +453,8 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
+ StoreWithVectorDescriptor::SlotRegister(),
+ StoreWithVectorDescriptor::VectorRegister());
}
@@ -639,11 +637,10 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ JumpIfSmi(receiver, &slow);
// Get the map of the object.
__ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks and is not observed.
- // The generic stub does not perform map checks or handle observed objects.
+ // Check that the receiver does not require access checks.
+ // The generic stub does not perform map checks.
__ lbz(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
- __ andi(r0, ip,
- Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
+ __ andi(r0, ip, Operand(1 << Map::kIsAccessCheckNeeded));
__ bne(&slow, cr0);
// Check if the object is a JS array or not.
__ lbz(r7, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
@@ -676,8 +673,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
// The handlers in the stub cache expect a vector and slot. Since we won't
// change the IC from any downstream misses, a dummy vector can be used.
- Register vector = VectorStoreICDescriptor::VectorRegister();
- Register slot = VectorStoreICDescriptor::SlotRegister();
+ Register vector = StoreWithVectorDescriptor::VectorRegister();
+ Register slot = StoreWithVectorDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, r8, r9, r10, r11));
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
@@ -686,10 +683,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
- receiver, key, r8, r9, r10, r11);
+ masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, r8,
+ r9, r10, r11);
// Cache miss.
__ b(&miss);
@@ -738,26 +733,6 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
GenerateMiss(masm);
}
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- DCHECK(receiver.is(r4));
- DCHECK(name.is(r5));
- DCHECK(StoreDescriptor::ValueRegister().is(r3));
-
- // Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
-
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
- receiver, name, r6, r7, r8, r9);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
-
void StoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
@@ -775,8 +750,8 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
DCHECK(receiver.is(r4));
DCHECK(name.is(r5));
DCHECK(value.is(r3));
- DCHECK(VectorStoreICDescriptor::VectorRegister().is(r6));
- DCHECK(VectorStoreICDescriptor::SlotRegister().is(r7));
+ DCHECK(StoreWithVectorDescriptor::VectorRegister().is(r6));
+ DCHECK(StoreWithVectorDescriptor::SlotRegister().is(r7));
__ LoadP(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
@@ -852,8 +827,9 @@ void PatchInlinedSmiCode(Isolate* isolate, Address address,
}
if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, cmp=%p, delta=%d\n", address,
- cmp_instruction_address, delta);
+ PrintF("[ patching ic at %p, cmp=%p, delta=%d\n",
+ static_cast<void*>(address),
+ static_cast<void*>(cmp_instruction_address), delta);
}
Address patch_address =
diff --git a/deps/v8/src/ic/ppc/stub-cache-ppc.cc b/deps/v8/src/ic/ppc/stub-cache-ppc.cc
index 6030b2cbc8..3dad306f11 100644
--- a/deps/v8/src/ic/ppc/stub-cache-ppc.cc
+++ b/deps/v8/src/ic/ppc/stub-cache-ppc.cc
@@ -14,16 +14,15 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Kind ic_kind, Code::Flags flags,
+static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
StubCache::Table table, Register receiver, Register name,
- // Number of the cache entry, not scaled.
+ // The offset is scaled by 4, based on
+ // kCacheIndexShift, which is two bits
Register offset, Register scratch, Register scratch2,
Register offset_scratch) {
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
- ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+ ExternalReference key_offset(stub_cache->key_reference(table));
+ ExternalReference value_offset(stub_cache->value_reference(table));
+ ExternalReference map_offset(stub_cache->map_reference(table));
uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
uintptr_t value_off_addr =
@@ -73,18 +72,6 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
scratch2 = no_reg;
__ LoadP(code, MemOperand(base_addr, value_off_addr - key_off_addr));
- // Check that the flags match what we're looking for.
- Register flags_reg = base_addr;
- base_addr = no_reg;
- __ lwz(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
-
- DCHECK(!r0.is(flags_reg));
- __ li(r0, Operand(Code::kFlagsNotUsedInLookup));
- __ andc(flags_reg, flags_reg, r0);
- __ mov(r0, Operand(flags));
- __ cmpl(flags_reg, r0);
- __ bne(&miss);
-
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ b(&miss);
@@ -102,12 +89,9 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
__ bind(&miss);
}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
- Code::Flags flags, Register receiver,
+void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
Register name, Register scratch, Register extra,
Register extra2, Register extra3) {
- Isolate* isolate = masm->isolate();
Label miss;
#if V8_TARGET_ARCH_PPC64
@@ -120,9 +104,6 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
DCHECK(sizeof(Entry) == 12);
#endif
- // Make sure the flags does not name a specific type.
- DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
-
// Make sure that there are no register conflicts.
DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
@@ -136,12 +117,13 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// If vector-based ics are in use, ensure that scratch, extra, extra2 and
// extra3 don't conflict with the vector and slot registers, which need
// to be preserved for a handler call or miss.
- if (IC::ICUseVector(ic_kind)) {
+ if (IC::ICUseVector(ic_kind_)) {
Register vector, slot;
- if (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC) {
- vector = VectorStoreICDescriptor::VectorRegister();
- slot = VectorStoreICDescriptor::SlotRegister();
+ if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) {
+ vector = StoreWithVectorDescriptor::VectorRegister();
+ slot = StoreWithVectorDescriptor::SlotRegister();
} else {
+ DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC);
vector = LoadWithVectorDescriptor::VectorRegister();
slot = LoadWithVectorDescriptor::SlotRegister();
}
@@ -160,24 +142,24 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
__ lwz(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
__ LoadP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ add(scratch, scratch, ip);
- __ xori(scratch, scratch, Operand(flags));
+ __ Xor(scratch, scratch, Operand(kPrimaryMagic));
// The mask omits the last two bits because they are not part of the hash.
__ andi(scratch, scratch,
Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
// Probe the primary table.
- ProbeTable(isolate, masm, ic_kind, flags, kPrimary, receiver, name, scratch,
- extra, extra2, extra3);
+ ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
+ extra3);
// Primary miss: Compute hash for secondary probe.
__ sub(scratch, scratch, name);
- __ addi(scratch, scratch, Operand(flags));
+ __ Add(scratch, scratch, kSecondaryMagic, r0);
__ andi(scratch, scratch,
Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
// Probe the secondary table.
- ProbeTable(isolate, masm, ic_kind, flags, kSecondary, receiver, name, scratch,
- extra, extra2, extra3);
+ ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
+ extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
diff --git a/deps/v8/src/ic/s390/OWNERS b/deps/v8/src/ic/s390/OWNERS
index eb007cb908..752e8e3d81 100644
--- a/deps/v8/src/ic/s390/OWNERS
+++ b/deps/v8/src/ic/s390/OWNERS
@@ -3,3 +3,4 @@ dstence@us.ibm.com
joransiu@ca.ibm.com
mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/deps/v8/src/ic/s390/access-compiler-s390.cc b/deps/v8/src/ic/s390/access-compiler-s390.cc
index 316be715c2..0a3285d5aa 100644
--- a/deps/v8/src/ic/s390/access-compiler-s390.cc
+++ b/deps/v8/src/ic/s390/access-compiler-s390.cc
@@ -19,18 +19,18 @@ void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
}
Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ // receiver, name, scratch1, scratch2, scratch3.
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- static Register registers[] = {receiver, name, r5, r2, r6, r7};
+ static Register registers[] = {receiver, name, r5, r2, r6};
return registers;
}
Register* PropertyAccessCompiler::store_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
+ // receiver, name, scratch1, scratch2.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- static Register registers[] = {receiver, name, r5, r6, r7};
+ static Register registers[] = {receiver, name, r5, r6};
return registers;
}
diff --git a/deps/v8/src/ic/s390/handler-compiler-s390.cc b/deps/v8/src/ic/s390/handler-compiler-s390.cc
index 1b39782c28..b399c5a601 100644
--- a/deps/v8/src/ic/s390/handler-compiler-s390.cc
+++ b/deps/v8/src/ic/s390/handler-compiler-s390.cc
@@ -187,9 +187,11 @@ void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
void PropertyHandlerCompiler::GenerateCheckPropertyCell(
MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
Register scratch, Label* miss) {
- Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
- DCHECK(cell->value()->IsTheHole());
- Handle<WeakCell> weak_cell = masm->isolate()->factory()->NewWeakCell(cell);
+ Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
+ global, name, PropertyCellType::kInvalidated);
+ Isolate* isolate = masm->isolate();
+ DCHECK(cell->value()->IsTheHole(isolate));
+ Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
__ LoadWeakValue(scratch, weak_cell, miss);
__ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
@@ -270,7 +272,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
bool call_data_undefined = false;
// Put call data in place.
- if (api_call_info->data()->IsUndefined()) {
+ if (api_call_info->data()->IsUndefined(isolate)) {
call_data_undefined = true;
__ LoadRoot(data, Heap::kUndefinedValueRootIndex);
} else {
@@ -311,16 +313,8 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
-}
-
-void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
- StoreIC_PushArgs(masm);
-
- // The slow case calls into the runtime to complete the store without causing
- // an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kStoreIC_Slow);
+ StoreWithVectorDescriptor::SlotRegister(),
+ StoreWithVectorDescriptor::VectorRegister());
}
void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
@@ -406,28 +400,25 @@ Register PropertyHandlerCompiler::CheckPrototypes(
DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
!scratch2.is(scratch1));
- if (FLAG_eliminate_prototype_chain_checks) {
- Handle<Cell> validity_cell =
- Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
- if (!validity_cell.is_null()) {
- DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid),
- validity_cell->value());
- __ mov(scratch1, Operand(validity_cell));
- __ LoadP(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
- __ CmpSmiLiteral(scratch1, Smi::FromInt(Map::kPrototypeChainValid), r0);
- __ bne(miss);
- }
+ Handle<Cell> validity_cell =
+ Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+ if (!validity_cell.is_null()) {
+ DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
+ __ mov(scratch1, Operand(validity_cell));
+ __ LoadP(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
+ __ CmpSmiLiteral(scratch1, Smi::FromInt(Map::kPrototypeChainValid), r0);
+ __ bne(miss);
+ }
- // The prototype chain of primitives (and their JSValue wrappers) depends
- // on the native context, which can't be guarded by validity cells.
- // |object_reg| holds the native context specific prototype in this case;
- // we need to check its map.
- if (check == CHECK_ALL_MAPS) {
- __ LoadP(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- __ CmpWeakValue(scratch1, cell, scratch2);
- __ b(ne, miss);
- }
+ // The prototype chain of primitives (and their JSValue wrappers) depends
+ // on the native context, which can't be guarded by validity cells.
+ // |object_reg| holds the native context specific prototype in this case;
+ // we need to check its map.
+ if (check == CHECK_ALL_MAPS) {
+ __ LoadP(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+ Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
+ __ CmpWeakValue(scratch1, cell, scratch2);
+ __ b(ne, miss);
}
// Keep track of the current object in register reg.
@@ -462,8 +453,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
!current_map->is_access_check_needed());
prototype = handle(JSObject::cast(current_map->prototype()));
- if (current_map->is_dictionary_map() &&
- !current_map->IsJSGlobalObjectMap()) {
+ if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+ name, scratch2, miss);
+ } else if (current_map->is_dictionary_map()) {
DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
if (!name->IsUniqueName()) {
DCHECK(name->IsString());
@@ -473,33 +466,12 @@ Register PropertyHandlerCompiler::CheckPrototypes(
current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
- if (FLAG_eliminate_prototype_chain_checks && depth > 1) {
+ if (depth > 1) {
// TODO(jkummerow): Cache and re-use weak cell.
__ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
}
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
scratch2);
- if (!FLAG_eliminate_prototype_chain_checks) {
- __ LoadP(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ LoadP(holder_reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
- }
- } else {
- Register map_reg = scratch1;
- if (!FLAG_eliminate_prototype_chain_checks) {
- __ LoadP(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
- }
- if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
- name, scratch2, miss);
- } else if (!FLAG_eliminate_prototype_chain_checks &&
- (depth != 1 || check == CHECK_ALL_MAPS)) {
- Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
- __ CmpWeakValue(map_reg, cell, scratch2);
- __ bne(miss);
- }
- if (!FLAG_eliminate_prototype_chain_checks) {
- __ LoadP(holder_reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
- }
}
reg = holder_reg; // From now on the object will be in holder_reg.
@@ -513,17 +485,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
- if (!FLAG_eliminate_prototype_chain_checks &&
- (depth != 0 || check == CHECK_ALL_MAPS)) {
- // Check the holder map.
- __ LoadP(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
- __ CmpWeakValue(scratch1, cell, scratch2);
- __ bne(miss);
- }
-
bool return_holder = return_what == RETURN_HOLDER;
- if (FLAG_eliminate_prototype_chain_checks && return_holder && depth != 0) {
+ if (return_holder && depth != 0) {
__ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
}
@@ -562,59 +525,10 @@ void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
__ Ret();
}
-void NamedLoadHandlerCompiler::GenerateLoadCallback(
- Register reg, Handle<AccessorInfo> callback) {
- DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), receiver()));
- DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
-
- // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
- // name below the exit frame to make GC aware of them.
- STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
-
- __ Push(receiver());
- // Push data from AccessorInfo.
- Handle<Object> data(callback->data(), isolate());
- if (data->IsUndefined() || data->IsSmi()) {
- __ Move(scratch2(), data);
- } else {
- Handle<WeakCell> cell =
- isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
- // The callback is alive if this instruction is executed,
- // so the weak cell is not cleared and points to data.
- __ GetWeakValue(scratch2(), cell);
- }
- __ push(scratch2());
- __ LoadRoot(scratch2(), Heap::kUndefinedValueRootIndex);
- __ Push(scratch2(), scratch2());
- __ mov(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
- // should_throw_on_error -> false
- __ mov(scratch3(), Operand(Smi::FromInt(0)));
- __ Push(scratch2(), reg, scratch3(), name());
-
- // Abi for CallApiGetter
- Register getter_address_reg = ApiGetterDescriptor::function_address();
-
- Address getter_address = v8::ToCData<Address>(callback->getter());
- ApiFunction fun(getter_address);
- ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
- ExternalReference ref = ExternalReference(&fun, type, isolate());
- __ mov(getter_address_reg, Operand(ref));
-
- CallApiGetterStub stub(isolate());
- __ TailCallStub(&stub);
-}
-
void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
LookupIterator* it, Register holder_reg) {
DCHECK(holder()->HasNamedInterceptor());
- DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
// Compile the interceptor call, followed by inline code to load the
// property from further up the prototype chain if the call fails.
@@ -672,7 +586,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
// Call the runtime system to load the interceptor.
DCHECK(holder()->HasNamedInterceptor());
- DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
holder());
@@ -688,7 +602,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
// If the callback cannot leak, then push the callback directly,
// otherwise wrap it in a weak cell.
- if (callback->data()->IsUndefined() || callback->data()->IsSmi()) {
+ if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
__ mov(ip, Operand(callback));
} else {
Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
@@ -703,7 +617,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
- return GetCode(kind(), Code::FAST, name);
+ return GetCode(kind(), name);
}
Register NamedStoreHandlerCompiler::value() {
@@ -740,7 +654,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
FrontendFooter(name, &miss);
// Return the generated code.
- return GetCode(kind(), Code::NORMAL, name);
+ return GetCode(kind(), name);
}
#undef __
diff --git a/deps/v8/src/ic/s390/ic-s390.cc b/deps/v8/src/ic/s390/ic-s390.cc
index d4f28868e7..6bb484a2fd 100644
--- a/deps/v8/src/ic/s390/ic-s390.cc
+++ b/deps/v8/src/ic/s390/ic-s390.cc
@@ -412,10 +412,8 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
- receiver, key, r6, r7, r8, r9);
+ masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, r6, r7,
+ r8, r9);
// Cache miss.
GenerateMiss(masm);
@@ -441,8 +439,8 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
StoreDescriptor::ValueRegister(),
- VectorStoreICDescriptor::SlotRegister(),
- VectorStoreICDescriptor::VectorRegister());
+ StoreWithVectorDescriptor::SlotRegister(),
+ StoreWithVectorDescriptor::VectorRegister());
}
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
@@ -625,11 +623,10 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ JumpIfSmi(receiver, &slow);
// Get the map of the object.
__ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks and is not observed.
- // The generic stub does not perform map checks or handle observed objects.
+ // Check that the receiver does not require access checks.
+ // The generic stub does not perform map checks.
__ LoadlB(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
- __ AndP(r0, ip,
- Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
+ __ AndP(r0, ip, Operand(1 << Map::kIsAccessCheckNeeded));
__ bne(&slow, Label::kNear);
// Check if the object is a JS array or not.
__ LoadlB(r6, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
@@ -661,8 +658,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
// The handlers in the stub cache expect a vector and slot. Since we won't
// change the IC from any downstream misses, a dummy vector can be used.
- Register vector = VectorStoreICDescriptor::VectorRegister();
- Register slot = VectorStoreICDescriptor::SlotRegister();
+ Register vector = StoreWithVectorDescriptor::VectorRegister();
+ Register slot = StoreWithVectorDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, r7, r8, r9, ip));
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
@@ -671,10 +668,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ LoadRoot(vector, Heap::kDummyVectorRootIndex);
__ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
- receiver, key, r7, r8, r9, ip);
+ masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, r7,
+ r8, r9, ip);
// Cache miss.
__ b(&miss);
@@ -720,24 +715,6 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
GenerateMiss(masm);
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- DCHECK(receiver.is(r3));
- DCHECK(name.is(r4));
- DCHECK(StoreDescriptor::ValueRegister().is(r2));
-
- // Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
-
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
- receiver, name, r5, r6, r7, r8);
-
- // Cache miss: Jump to runtime.
- GenerateMiss(masm);
-}
-
void StoreIC::GenerateMiss(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
@@ -754,8 +731,8 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
DCHECK(receiver.is(r3));
DCHECK(name.is(r4));
DCHECK(value.is(r2));
- DCHECK(VectorStoreICDescriptor::VectorRegister().is(r5));
- DCHECK(VectorStoreICDescriptor::SlotRegister().is(r6));
+ DCHECK(StoreWithVectorDescriptor::VectorRegister().is(r5));
+ DCHECK(StoreWithVectorDescriptor::SlotRegister().is(r6));
__ LoadP(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
@@ -829,8 +806,9 @@ void PatchInlinedSmiCode(Isolate* isolate, Address address,
}
if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, cmp=%p, delta=%d\n", address,
- cmp_instruction_address, delta);
+ PrintF("[ patching ic at %p, cmp=%p, delta=%d\n",
+ static_cast<void*>(address),
+ static_cast<void*>(cmp_instruction_address), delta);
}
// Expected sequence to enable by changing the following
diff --git a/deps/v8/src/ic/s390/stub-cache-s390.cc b/deps/v8/src/ic/s390/stub-cache-s390.cc
index 054b946df8..a0564a3be3 100644
--- a/deps/v8/src/ic/s390/stub-cache-s390.cc
+++ b/deps/v8/src/ic/s390/stub-cache-s390.cc
@@ -14,15 +14,15 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Kind ic_kind, Code::Flags flags,
+static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
StubCache::Table table, Register receiver, Register name,
- // Number of the cache entry, not scaled.
+ // The offset is scaled by 4, based on
+ // kCacheIndexShift, which is two bits
Register offset, Register scratch, Register scratch2,
Register offset_scratch) {
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
- ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+ ExternalReference key_offset(stub_cache->key_reference(table));
+ ExternalReference value_offset(stub_cache->value_reference(table));
+ ExternalReference map_offset(stub_cache->map_reference(table));
uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
uintptr_t value_off_addr =
@@ -70,16 +70,6 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
scratch2 = no_reg;
__ LoadP(code, MemOperand(base_addr, value_off_addr - key_off_addr));
- // Check that the flags match what we're looking for.
- Register flags_reg = base_addr;
- base_addr = no_reg;
- __ LoadlW(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
-
- DCHECK(!r0.is(flags_reg));
- __ AndP(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup));
- __ CmpLogicalP(flags_reg, Operand(flags));
- __ bne(&miss, Label::kNear);
-
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ b(&miss, Label::kNear);
@@ -97,11 +87,9 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
__ bind(&miss);
}
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
- Code::Flags flags, Register receiver,
+void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
Register name, Register scratch, Register extra,
Register extra2, Register extra3) {
- Isolate* isolate = masm->isolate();
Label miss;
#if V8_TARGET_ARCH_S390X
@@ -114,9 +102,6 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
DCHECK(sizeof(Entry) == 12);
#endif
- // Make sure the flags does not name a specific type.
- DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
-
// Make sure that there are no register conflicts.
DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
@@ -130,12 +115,13 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// If vector-based ics are in use, ensure that scratch, extra, extra2 and
// extra3 don't conflict with the vector and slot registers, which need
// to be preserved for a handler call or miss.
- if (IC::ICUseVector(ic_kind)) {
+ if (IC::ICUseVector(ic_kind_)) {
Register vector, slot;
- if (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC) {
- vector = VectorStoreICDescriptor::VectorRegister();
- slot = VectorStoreICDescriptor::SlotRegister();
+ if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) {
+ vector = StoreWithVectorDescriptor::VectorRegister();
+ slot = StoreWithVectorDescriptor::SlotRegister();
} else {
+ DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC);
vector = LoadWithVectorDescriptor::VectorRegister();
slot = LoadWithVectorDescriptor::SlotRegister();
}
@@ -154,24 +140,24 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
__ LoadlW(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
__ LoadP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ AddP(scratch, scratch, ip);
- __ XorP(scratch, scratch, Operand(flags));
+ __ XorP(scratch, scratch, Operand(kPrimaryMagic));
// The mask omits the last two bits because they are not part of the hash.
__ AndP(scratch, scratch,
Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
// Probe the primary table.
- ProbeTable(isolate, masm, ic_kind, flags, kPrimary, receiver, name, scratch,
- extra, extra2, extra3);
+ ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
+ extra3);
// Primary miss: Compute hash for secondary probe.
__ SubP(scratch, scratch, name);
- __ AddP(scratch, scratch, Operand(flags));
+ __ AddP(scratch, scratch, Operand(kSecondaryMagic));
__ AndP(scratch, scratch,
Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
// Probe the secondary table.
- ProbeTable(isolate, masm, ic_kind, flags, kSecondary, receiver, name, scratch,
- extra, extra2, extra3);
+ ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
+ extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
diff --git a/deps/v8/src/ic/stub-cache.cc b/deps/v8/src/ic/stub-cache.cc
index 4a5f9bd7ad..31d7e2e0a8 100644
--- a/deps/v8/src/ic/stub-cache.cc
+++ b/deps/v8/src/ic/stub-cache.cc
@@ -10,9 +10,8 @@
namespace v8 {
namespace internal {
-
-StubCache::StubCache(Isolate* isolate) : isolate_(isolate) {}
-
+StubCache::StubCache(Isolate* isolate, Code::Kind ic_kind)
+ : isolate_(isolate), ic_kind_(ic_kind) {}
void StubCache::Initialize() {
DCHECK(base::bits::IsPowerOfTwo32(kPrimaryTableSize));
@@ -20,35 +19,34 @@ void StubCache::Initialize() {
Clear();
}
+#ifdef DEBUG
+namespace {
-static Code::Flags CommonStubCacheChecks(Name* name, Map* map,
- Code::Flags flags) {
- flags = Code::RemoveTypeAndHolderFromFlags(flags);
-
+bool CommonStubCacheChecks(StubCache* stub_cache, Name* name, Map* map,
+ Code* code) {
// Validate that the name does not move on scavenge, and that we
// can use identity checks instead of structural equality checks.
DCHECK(!name->GetHeap()->InNewSpace(name));
DCHECK(name->IsUniqueName());
-
- // The state bits are not important to the hash function because the stub
- // cache only contains handlers. Make sure that the bits are the least
- // significant so they will be the ones masked out.
- DCHECK_EQ(Code::HANDLER, Code::ExtractKindFromFlags(flags));
- STATIC_ASSERT((Code::ICStateField::kMask & 1) == 1);
-
- // Make sure that the code type and cache holder are not included in the hash.
- DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
- DCHECK(Code::ExtractCacheHolderFromFlags(flags) == 0);
-
- return flags;
+ DCHECK(name->HasHashCode());
+ if (code) {
+ Code::Flags expected_flags = Code::RemoveHolderFromFlags(
+ Code::ComputeHandlerFlags(stub_cache->ic_kind()));
+ Code::Flags flags = Code::RemoveHolderFromFlags(code->flags());
+ DCHECK_EQ(expected_flags, flags);
+ DCHECK_EQ(Code::HANDLER, Code::ExtractKindFromFlags(code->flags()));
+ }
+ return true;
}
+} // namespace
+#endif
Code* StubCache::Set(Name* name, Map* map, Code* code) {
- Code::Flags flags = CommonStubCacheChecks(name, map, code->flags());
+ DCHECK(CommonStubCacheChecks(this, name, map, code));
// Compute the primary entry.
- int primary_offset = PrimaryOffset(name, flags, map);
+ int primary_offset = PrimaryOffset(name, map);
Entry* primary = entry(primary_, primary_offset);
Code* old_code = primary->value;
@@ -56,10 +54,8 @@ Code* StubCache::Set(Name* name, Map* map, Code* code) {
// secondary cache before overwriting it.
if (old_code != isolate_->builtins()->builtin(Builtins::kIllegal)) {
Map* old_map = primary->map;
- Code::Flags old_flags =
- Code::RemoveTypeAndHolderFromFlags(old_code->flags());
- int seed = PrimaryOffset(primary->key, old_flags, old_map);
- int secondary_offset = SecondaryOffset(primary->key, old_flags, seed);
+ int seed = PrimaryOffset(primary->key, old_map);
+ int secondary_offset = SecondaryOffset(primary->key, seed);
Entry* secondary = entry(secondary_, secondary_offset);
*secondary = *primary;
}
@@ -72,15 +68,14 @@ Code* StubCache::Set(Name* name, Map* map, Code* code) {
return code;
}
-
-Code* StubCache::Get(Name* name, Map* map, Code::Flags flags) {
- flags = CommonStubCacheChecks(name, map, flags);
- int primary_offset = PrimaryOffset(name, flags, map);
+Code* StubCache::Get(Name* name, Map* map) {
+ DCHECK(CommonStubCacheChecks(this, name, map, nullptr));
+ int primary_offset = PrimaryOffset(name, map);
Entry* primary = entry(primary_, primary_offset);
if (primary->key == name && primary->map == map) {
return primary->value;
}
- int secondary_offset = SecondaryOffset(name, flags, primary_offset);
+ int secondary_offset = SecondaryOffset(name, primary_offset);
Entry* secondary = entry(secondary_, secondary_offset);
if (secondary->key == name && secondary->map == map) {
return secondary->value;
@@ -105,7 +100,6 @@ void StubCache::Clear() {
void StubCache::CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
- Code::Flags flags,
Handle<Context> native_context,
Zone* zone) {
for (int i = 0; i < kPrimaryTableSize; i++) {
@@ -115,7 +109,7 @@ void StubCache::CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
// with a primitive receiver.
if (map == NULL) continue;
- int offset = PrimaryOffset(*name, flags, map);
+ int offset = PrimaryOffset(*name, map);
if (entry(primary_, offset) == &primary_[i] &&
TypeFeedbackOracle::IsRelevantFeedback(map, *native_context)) {
types->AddMapIfMissing(Handle<Map>(map), zone);
@@ -131,10 +125,10 @@ void StubCache::CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
if (map == NULL) continue;
// Lookup in primary table and skip duplicates.
- int primary_offset = PrimaryOffset(*name, flags, map);
+ int primary_offset = PrimaryOffset(*name, map);
// Lookup in secondary table and add matches.
- int offset = SecondaryOffset(*name, flags, primary_offset);
+ int offset = SecondaryOffset(*name, primary_offset);
if (entry(secondary_, offset) == &secondary_[i] &&
TypeFeedbackOracle::IsRelevantFeedback(map, *native_context)) {
types->AddMapIfMissing(Handle<Map>(map), zone);
diff --git a/deps/v8/src/ic/stub-cache.h b/deps/v8/src/ic/stub-cache.h
index 4b27e6e396..a053555d9f 100644
--- a/deps/v8/src/ic/stub-cache.h
+++ b/deps/v8/src/ic/stub-cache.h
@@ -41,19 +41,17 @@ class StubCache {
void Initialize();
// Access cache for entry hash(name, map).
Code* Set(Name* name, Map* map, Code* code);
- Code* Get(Name* name, Map* map, Code::Flags flags);
+ Code* Get(Name* name, Map* map);
// Clear the lookup table (@ mark compact collection).
void Clear();
- // Collect all maps that match the name and flags.
+ // Collect all maps that match the name.
void CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
- Code::Flags flags, Handle<Context> native_context,
- Zone* zone);
+ Handle<Context> native_context, Zone* zone);
// Generate code for probing the stub cache table.
// Arguments extra, extra2 and extra3 may be used to pass additional scratch
// registers. Set to no_reg if not needed.
// If leave_frame is true, then exit a frame before the tail call.
- void GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
- Code::Flags flags, Register receiver, Register name,
+ void GenerateProbe(MacroAssembler* masm, Register receiver, Register name,
Register scratch, Register extra, Register extra2 = no_reg,
Register extra3 = no_reg);
@@ -86,15 +84,34 @@ class StubCache {
}
Isolate* isolate() { return isolate_; }
+ Code::Kind ic_kind() const { return ic_kind_; }
// Setting the entry size such that the index is shifted by Name::kHashShift
// is convenient; shifting down the length field (to extract the hash code)
// automatically discards the hash bit field.
static const int kCacheIndexShift = Name::kHashShift;
- private:
- explicit StubCache(Isolate* isolate);
+ static const int kPrimaryTableBits = 11;
+ static const int kPrimaryTableSize = (1 << kPrimaryTableBits);
+ static const int kSecondaryTableBits = 9;
+ static const int kSecondaryTableSize = (1 << kSecondaryTableBits);
+ // Some magic number used in primary and secondary hash computations.
+ static const int kPrimaryMagic = 0x3d532433;
+ static const int kSecondaryMagic = 0xb16b00b5;
+
+ static int PrimaryOffsetForTesting(Name* name, Map* map) {
+ return PrimaryOffset(name, map);
+ }
+
+ static int SecondaryOffsetForTesting(Name* name, int seed) {
+ return SecondaryOffset(name, seed);
+ }
+
+ // The constructor is made public only for the purposes of testing.
+ StubCache(Isolate* isolate, Code::Kind ic_kind);
+
+ private:
// The stub cache has a primary and secondary level. The two levels have
// different hashing algorithms in order to avoid simultaneous collisions
// in both caches. Unlike a probing strategy (quadratic or otherwise) the
@@ -105,7 +122,7 @@ class StubCache {
// Hash algorithm for the primary table. This algorithm is replicated in
// assembler for every architecture. Returns an index into the table that
// is scaled by 1 << kCacheIndexShift.
- static int PrimaryOffset(Name* name, Code::Flags flags, Map* map) {
+ static int PrimaryOffset(Name* name, Map* map) {
STATIC_ASSERT(kCacheIndexShift == Name::kHashShift);
// Compute the hash of the name (use entire hash field).
DCHECK(name->HasHashCode());
@@ -115,27 +132,19 @@ class StubCache {
// 4Gb (and not at all if it isn't).
uint32_t map_low32bits =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
- // We always set the in_loop bit to zero when generating the lookup code
- // so do it here too so the hash codes match.
- uint32_t iflags =
- (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
- // Base the offset on a simple combination of name, flags, and map.
- uint32_t key = (map_low32bits + field) ^ iflags;
+ // Base the offset on a simple combination of name and map.
+ uint32_t key = (map_low32bits + field) ^ kPrimaryMagic;
return key & ((kPrimaryTableSize - 1) << kCacheIndexShift);
}
// Hash algorithm for the secondary table. This algorithm is replicated in
// assembler for every architecture. Returns an index into the table that
// is scaled by 1 << kCacheIndexShift.
- static int SecondaryOffset(Name* name, Code::Flags flags, int seed) {
+ static int SecondaryOffset(Name* name, int seed) {
// Use the seed from the primary cache in the secondary cache.
uint32_t name_low32bits =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
- // We always set the in_loop bit to zero when generating the lookup code
- // so do it here too so the hash codes match.
- uint32_t iflags =
- (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
- uint32_t key = (seed - name_low32bits) + iflags;
+ uint32_t key = (seed - name_low32bits) + kSecondaryMagic;
return key & ((kSecondaryTableSize - 1) << kCacheIndexShift);
}
@@ -150,15 +159,11 @@ class StubCache {
offset * multiplier);
}
- static const int kPrimaryTableBits = 11;
- static const int kPrimaryTableSize = (1 << kPrimaryTableBits);
- static const int kSecondaryTableBits = 9;
- static const int kSecondaryTableSize = (1 << kSecondaryTableBits);
-
private:
Entry primary_[kPrimaryTableSize];
Entry secondary_[kSecondaryTableSize];
Isolate* isolate_;
+ Code::Kind ic_kind_;
friend class Isolate;
friend class SCTableReference;
diff --git a/deps/v8/src/ic/x64/access-compiler-x64.cc b/deps/v8/src/ic/x64/access-compiler-x64.cc
index b8d50b3d2c..2b292528c8 100644
--- a/deps/v8/src/ic/x64/access-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/access-compiler-x64.cc
@@ -19,19 +19,19 @@ void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ // receiver, name, scratch1, scratch2, scratch3.
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- static Register registers[] = {receiver, name, rax, rbx, rdi, r8};
+ static Register registers[] = {receiver, name, rax, rbx, rdi};
return registers;
}
Register* PropertyAccessCompiler::store_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
+ // receiver, name, scratch1, scratch2.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- static Register registers[] = {receiver, name, rbx, rdi, r8};
+ static Register registers[] = {receiver, name, rbx, rdi};
return registers;
}
diff --git a/deps/v8/src/ic/x64/handler-compiler-x64.cc b/deps/v8/src/ic/x64/handler-compiler-x64.cc
index dde61691d5..ba4daed32c 100644
--- a/deps/v8/src/ic/x64/handler-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/handler-compiler-x64.cc
@@ -180,7 +180,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
bool call_data_undefined = false;
// Put call data in place.
- if (api_call_info->data()->IsUndefined()) {
+ if (api_call_info->data()->IsUndefined(isolate)) {
call_data_undefined = true;
__ LoadRoot(data, Heap::kUndefinedValueRootIndex);
} else {
@@ -219,13 +219,14 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
void PropertyHandlerCompiler::GenerateCheckPropertyCell(
MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
Register scratch, Label* miss) {
- Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
- DCHECK(cell->value()->IsTheHole());
- Factory* factory = masm->isolate()->factory();
- Handle<WeakCell> weak_cell = factory->NewWeakCell(cell);
+ Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
+ global, name, PropertyCellType::kInvalidated);
+ Isolate* isolate = masm->isolate();
+ DCHECK(cell->value()->IsTheHole(isolate));
+ Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
__ LoadWeakValue(scratch, weak_cell, miss);
__ Cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
- factory->the_hole_value());
+ isolate->factory()->the_hole_value());
__ j(not_equal, miss);
}
@@ -326,8 +327,8 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- Register slot = VectorStoreICDescriptor::SlotRegister();
- Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = StoreWithVectorDescriptor::SlotRegister();
+ Register vector = StoreWithVectorDescriptor::VectorRegister();
__ PopReturnAddressTo(r11);
__ Push(receiver);
@@ -339,15 +340,6 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
}
-void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
- // Return address is on the stack.
- StoreIC_PushArgs(masm);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kStoreIC_Slow);
-}
-
-
void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
// Return address is on the stack.
StoreIC_PushArgs(masm);
@@ -440,29 +432,26 @@ Register PropertyHandlerCompiler::CheckPrototypes(
DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
!scratch2.is(scratch1));
- if (FLAG_eliminate_prototype_chain_checks) {
- Handle<Cell> validity_cell =
- Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
- if (!validity_cell.is_null()) {
- DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid),
- validity_cell->value());
- __ Move(scratch1, validity_cell, RelocInfo::CELL);
- // Move(..., CELL) loads the payload's address!
- __ SmiCompare(Operand(scratch1, 0),
- Smi::FromInt(Map::kPrototypeChainValid));
- __ j(not_equal, miss);
- }
+ Handle<Cell> validity_cell =
+ Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+ if (!validity_cell.is_null()) {
+ DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
+ __ Move(scratch1, validity_cell, RelocInfo::CELL);
+ // Move(..., CELL) loads the payload's address!
+ __ SmiCompare(Operand(scratch1, 0),
+ Smi::FromInt(Map::kPrototypeChainValid));
+ __ j(not_equal, miss);
+ }
- // The prototype chain of primitives (and their JSValue wrappers) depends
- // on the native context, which can't be guarded by validity cells.
- // |object_reg| holds the native context specific prototype in this case;
- // we need to check its map.
- if (check == CHECK_ALL_MAPS) {
- __ movp(scratch1, FieldOperand(object_reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- __ CmpWeakValue(scratch1, cell, scratch2);
- __ j(not_equal, miss);
- }
+ // The prototype chain of primitives (and their JSValue wrappers) depends
+ // on the native context, which can't be guarded by validity cells.
+ // |object_reg| holds the native context specific prototype in this case;
+ // we need to check its map.
+ if (check == CHECK_ALL_MAPS) {
+ __ movp(scratch1, FieldOperand(object_reg, HeapObject::kMapOffset));
+ Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
+ __ CmpWeakValue(scratch1, cell, scratch2);
+ __ j(not_equal, miss);
}
// Keep track of the current object in register reg. On the first
@@ -500,8 +489,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
!current_map->is_access_check_needed());
prototype = handle(JSObject::cast(current_map->prototype()));
- if (current_map->is_dictionary_map() &&
- !current_map->IsJSGlobalObjectMap()) {
+ if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+ name, scratch2, miss);
+ } else if (current_map->is_dictionary_map()) {
DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
if (!name->IsUniqueName()) {
DCHECK(name->IsString());
@@ -511,34 +502,12 @@ Register PropertyHandlerCompiler::CheckPrototypes(
current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
- if (FLAG_eliminate_prototype_chain_checks && depth > 1) {
+ if (depth > 1) {
// TODO(jkummerow): Cache and re-use weak cell.
__ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
}
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
scratch2);
-
- if (!FLAG_eliminate_prototype_chain_checks) {
- __ movp(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- __ movp(holder_reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- }
- } else {
- Register map_reg = scratch1;
- if (!FLAG_eliminate_prototype_chain_checks) {
- __ movp(map_reg, FieldOperand(reg, HeapObject::kMapOffset));
- }
- if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
- name, scratch2, miss);
- } else if (!FLAG_eliminate_prototype_chain_checks &&
- (depth != 1 || check == CHECK_ALL_MAPS)) {
- Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
- __ CmpWeakValue(map_reg, cell, scratch2);
- __ j(not_equal, miss);
- }
- if (!FLAG_eliminate_prototype_chain_checks) {
- __ movp(holder_reg, FieldOperand(map_reg, Map::kPrototypeOffset));
- }
}
reg = holder_reg; // From now on the object will be in holder_reg.
@@ -552,17 +521,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
- if (!FLAG_eliminate_prototype_chain_checks &&
- (depth != 0 || check == CHECK_ALL_MAPS)) {
- // Check the holder map.
- __ movp(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
- __ CmpWeakValue(scratch1, cell, scratch2);
- __ j(not_equal, miss);
- }
-
bool return_holder = return_what == RETURN_HOLDER;
- if (FLAG_eliminate_prototype_chain_checks && return_holder && depth != 0) {
+ if (return_holder && depth != 0) {
__ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
}
@@ -597,58 +557,6 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
}
}
-
-void NamedLoadHandlerCompiler::GenerateLoadCallback(
- Register reg, Handle<AccessorInfo> callback) {
- DCHECK(!AreAliased(kScratchRegister, scratch2(), scratch3(), receiver()));
- DCHECK(!AreAliased(kScratchRegister, scratch2(), scratch3(), reg));
-
- // Insert additional parameters into the stack frame above return address.
- __ PopReturnAddressTo(scratch3());
-
- // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
- // name below the exit frame to make GC aware of them.
- STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
-
- __ Push(receiver()); // receiver
- Handle<Object> data(callback->data(), isolate());
- if (data->IsUndefined() || data->IsSmi()) {
- __ Push(data);
- } else {
- Handle<WeakCell> cell =
- isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
- // The callback is alive if this instruction is executed,
- // so the weak cell is not cleared and points to data.
- __ GetWeakValue(scratch2(), cell);
- __ Push(scratch2());
- }
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ Push(kScratchRegister); // return value
- __ Push(kScratchRegister); // return value default
- __ PushAddress(ExternalReference::isolate_address(isolate()));
- __ Push(reg); // holder
- __ Push(Smi::FromInt(0)); // should_throw_on_error -> false
-
- __ Push(name()); // name
- __ PushReturnAddressFrom(scratch3());
-
- // Abi for CallApiGetter
- Register api_function_address = ApiGetterDescriptor::function_address();
- Address getter_address = v8::ToCData<Address>(callback->getter());
- __ Move(api_function_address, getter_address, RelocInfo::EXTERNAL_REFERENCE);
-
- CallApiGetterStub stub(isolate());
- __ TailCallStub(&stub);
-}
-
-
void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
// Return the constant value.
__ Move(rax, value);
@@ -659,7 +567,7 @@ void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
LookupIterator* it, Register holder_reg) {
DCHECK(holder()->HasNamedInterceptor());
- DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
// Compile the interceptor call, followed by inline code to load the
// property from further up the prototype chain if the call fails.
@@ -721,7 +629,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
// Call the runtime system to load the interceptor.
DCHECK(holder()->HasNamedInterceptor());
- DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
__ PopReturnAddressTo(scratch2());
PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
holder());
@@ -741,7 +649,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ Push(holder_reg);
// If the callback cannot leak, then push the callback directly,
// otherwise wrap it in a weak cell.
- if (callback->data()->IsUndefined() || callback->data()->IsSmi()) {
+ if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
__ Push(callback);
} else {
Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
@@ -756,7 +664,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
- return GetCode(kind(), Code::FAST, name);
+ return GetCode(kind(), name);
}
@@ -798,7 +706,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
FrontendFooter(name, &miss);
// Return the generated code.
- return GetCode(kind(), Code::NORMAL, name);
+ return GetCode(kind(), name);
}
diff --git a/deps/v8/src/ic/x64/ic-x64.cc b/deps/v8/src/ic/x64/ic-x64.cc
index 247116d7fe..21a114830f 100644
--- a/deps/v8/src/ic/x64/ic-x64.cc
+++ b/deps/v8/src/ic/x64/ic-x64.cc
@@ -340,11 +340,8 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
__ Move(vector, dummy_vector);
__ Move(slot, Smi::FromInt(slot_index));
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
- receiver, key,
- megamorphic_scratch, no_reg);
+ masm->isolate()->load_stub_cache()->GenerateProbe(
+ masm, receiver, key, megamorphic_scratch, no_reg);
// Cache miss.
GenerateMiss(masm);
@@ -451,7 +448,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ JumpIfDictionaryInPrototypeChain(receiver, rdi, kScratchRegister, slow);
__ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value, rbx, key, xmm0,
+ __ StoreNumberToDoubleElements(value, rbx, key, kScratchDoubleReg,
&transition_double_elements);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
@@ -519,10 +516,10 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ JumpIfSmi(receiver, &slow_with_tagged_index);
// Get the map from the receiver.
__ movp(r9, FieldOperand(receiver, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks and is not observed.
- // The generic stub does not perform map checks or handle observed objects.
+ // Check that the receiver does not require access checks.
+ // The generic stub does not perform map checks.
__ testb(FieldOperand(r9, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
+ Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow_with_tagged_index);
// Check that the key is a smi.
__ JumpIfNotSmi(key, &maybe_name_key);
@@ -556,8 +553,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ movzxbp(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(r9, &slow_with_tagged_index);
- Register vector = VectorStoreICDescriptor::VectorRegister();
- Register slot = VectorStoreICDescriptor::SlotRegister();
+ Register vector = StoreWithVectorDescriptor::VectorRegister();
+ Register slot = StoreWithVectorDescriptor::SlotRegister();
// The handlers in the stub cache expect a vector and slot. Since we won't
// change the IC from any downstream misses, a dummy vector can be used.
Handle<TypeFeedbackVector> dummy_vector =
@@ -567,10 +564,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ Move(vector, dummy_vector);
__ Move(slot, Smi::FromInt(slot_index));
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
- receiver, key, r9, no_reg);
+ masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, r9,
+ no_reg);
// Cache miss.
__ jmp(&miss);
@@ -710,13 +705,6 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedGetProperty);
}
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
- // This shouldn't be called.
- __ int3();
-}
-
-
static void StoreIC_PushArgs(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
@@ -728,8 +716,8 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(receiver);
__ Push(name);
__ Push(value);
- Register slot = VectorStoreICDescriptor::SlotRegister();
- Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = StoreWithVectorDescriptor::SlotRegister();
+ Register vector = StoreWithVectorDescriptor::VectorRegister();
DCHECK(!temp.is(slot) && !temp.is(vector));
__ Push(slot);
__ Push(vector);
@@ -751,8 +739,8 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
Register dictionary = r11;
- DCHECK(!AreAliased(dictionary, VectorStoreICDescriptor::VectorRegister(),
- VectorStoreICDescriptor::SlotRegister()));
+ DCHECK(!AreAliased(dictionary, StoreWithVectorDescriptor::VectorRegister(),
+ StoreWithVectorDescriptor::SlotRegister()));
Label miss;
@@ -829,8 +817,9 @@ void PatchInlinedSmiCode(Isolate* isolate, Address address,
// condition code uses at the patched jump.
uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, test=%p, delta=%d\n", address,
- test_instruction_address, delta);
+ PrintF("[ patching ic at %p, test=%p, delta=%d\n",
+ static_cast<void*>(address),
+ static_cast<void*>(test_instruction_address), delta);
}
// Patch with a short conditional jump. Enabling means switching from a short
diff --git a/deps/v8/src/ic/x64/stub-cache-x64.cc b/deps/v8/src/ic/x64/stub-cache-x64.cc
index 9a9dfe9f4b..946aee51fc 100644
--- a/deps/v8/src/ic/x64/stub-cache-x64.cc
+++ b/deps/v8/src/ic/x64/stub-cache-x64.cc
@@ -14,9 +14,7 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Kind ic_kind, Code::Flags flags,
+static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
StubCache::Table table, Register receiver, Register name,
// The offset is scaled by 4, based on
// kCacheIndexShift, which is two bits
@@ -31,8 +29,8 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
DCHECK_EQ(3u * kPointerSize, sizeof(StubCache::Entry));
// The offset register holds the entry offset times four (due to masking
// and shifting optimizations).
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+ ExternalReference key_offset(stub_cache->key_reference(table));
+ ExternalReference value_offset(stub_cache->value_reference(table));
Label miss;
// Multiply by 3 because there are 3 fields per entry (name, code, map).
@@ -46,8 +44,8 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
// Get the map entry from the cache.
// Use key_offset + kPointerSize * 2, rather than loading map_offset.
- DCHECK(isolate->stub_cache()->map_reference(table).address() -
- isolate->stub_cache()->key_reference(table).address() ==
+ DCHECK(stub_cache->map_reference(table).address() -
+ stub_cache->key_reference(table).address() ==
kPointerSize * 2);
__ movp(kScratchRegister,
Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2));
@@ -58,12 +56,6 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
__ LoadAddress(kScratchRegister, value_offset);
__ movp(kScratchRegister, Operand(kScratchRegister, offset, scale_factor, 0));
- // Check that the flags match what we're looking for.
- __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
- __ andp(offset, Immediate(~Code::kFlagsNotUsedInLookup));
- __ cmpl(offset, Immediate(flags));
- __ j(not_equal, &miss);
-
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ jmp(&miss);
@@ -79,12 +71,9 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
__ bind(&miss);
}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
- Code::Flags flags, Register receiver,
+void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
Register name, Register scratch, Register extra,
Register extra2, Register extra3) {
- Isolate* isolate = masm->isolate();
Label miss;
USE(extra); // The register extra is not used on the X64 platform.
USE(extra2); // The register extra2 is not used on the X64 platform.
@@ -93,9 +82,6 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// entry size being 3 * kPointerSize.
DCHECK(sizeof(Entry) == 3 * kPointerSize);
- // Make sure the flags do not name a specific type.
- DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
-
// Make sure that there are no register conflicts.
DCHECK(!scratch.is(receiver));
DCHECK(!scratch.is(name));
@@ -109,15 +95,15 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// If vector-based ics are in use, ensure that scratch doesn't conflict with
// the vector and slot registers, which need to be preserved for a handler
// call or miss.
- if (IC::ICUseVector(ic_kind)) {
- if (ic_kind == Code::LOAD_IC || ic_kind == Code::KEYED_LOAD_IC) {
+ if (IC::ICUseVector(ic_kind_)) {
+ if (ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC) {
Register vector = LoadWithVectorDescriptor::VectorRegister();
Register slot = LoadDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, scratch));
} else {
- DCHECK(ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
- Register vector = VectorStoreICDescriptor::VectorRegister();
- Register slot = VectorStoreICDescriptor::SlotRegister();
+ DCHECK(ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC);
+ Register vector = StoreWithVectorDescriptor::VectorRegister();
+ Register slot = StoreWithVectorDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, scratch));
}
}
@@ -133,26 +119,25 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
__ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
// Use only the low 32 bits of the map pointer.
__ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xorp(scratch, Immediate(flags));
+ __ xorp(scratch, Immediate(kPrimaryMagic));
// We mask out the last two bits because they are not part of the hash and
// they are always 01 for maps. Also in the two 'and' instructions below.
__ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
// Probe the primary table.
- ProbeTable(isolate, masm, ic_kind, flags, kPrimary, receiver, name, scratch);
+ ProbeTable(this, masm, kPrimary, receiver, name, scratch);
// Primary miss: Compute hash for secondary probe.
__ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
__ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xorp(scratch, Immediate(flags));
+ __ xorp(scratch, Immediate(kPrimaryMagic));
__ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
__ subl(scratch, name);
- __ addl(scratch, Immediate(flags));
+ __ addl(scratch, Immediate(kSecondaryMagic));
__ andp(scratch, Immediate((kSecondaryTableSize - 1) << kCacheIndexShift));
// Probe the secondary table.
- ProbeTable(isolate, masm, ic_kind, flags, kSecondary, receiver, name,
- scratch);
+ ProbeTable(this, masm, kSecondary, receiver, name, scratch);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
diff --git a/deps/v8/src/ic/x87/access-compiler-x87.cc b/deps/v8/src/ic/x87/access-compiler-x87.cc
index 2c1b942756..e528de65ba 100644
--- a/deps/v8/src/ic/x87/access-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/access-compiler-x87.cc
@@ -18,19 +18,19 @@ void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ // receiver, name, scratch1, scratch2, scratch3.
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- static Register registers[] = {receiver, name, ebx, eax, edi, no_reg};
+ static Register registers[] = {receiver, name, ebx, eax, edi};
return registers;
}
Register* PropertyAccessCompiler::store_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
+ // receiver, name, scratch1, scratch2.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- static Register registers[] = {receiver, name, ebx, edi, no_reg};
+ static Register registers[] = {receiver, name, ebx, edi};
return registers;
}
diff --git a/deps/v8/src/ic/x87/handler-compiler-x87.cc b/deps/v8/src/ic/x87/handler-compiler-x87.cc
index 281faba3c7..4bf0af2569 100644
--- a/deps/v8/src/ic/x87/handler-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/handler-compiler-x87.cc
@@ -199,7 +199,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
bool call_data_undefined = false;
// Put call data in place.
- if (api_call_info->data()->IsUndefined()) {
+ if (api_call_info->data()->IsUndefined(isolate)) {
call_data_undefined = true;
__ mov(data, Immediate(isolate->factory()->undefined_value()));
} else {
@@ -236,13 +236,14 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
void PropertyHandlerCompiler::GenerateCheckPropertyCell(
MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
Register scratch, Label* miss) {
- Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
- DCHECK(cell->value()->IsTheHole());
- Factory* factory = masm->isolate()->factory();
- Handle<WeakCell> weak_cell = factory->NewWeakCell(cell);
+ Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
+ global, name, PropertyCellType::kInvalidated);
+ Isolate* isolate = masm->isolate();
+ DCHECK(cell->value()->IsTheHole(isolate));
+ Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
__ LoadWeakValue(scratch, weak_cell, miss);
__ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
- Immediate(factory->the_hole_value()));
+ Immediate(isolate->factory()->the_hole_value()));
__ j(not_equal, miss);
}
@@ -320,8 +321,8 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- Register slot = VectorStoreICDescriptor::SlotRegister();
- Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = StoreWithVectorDescriptor::SlotRegister();
+ Register vector = StoreWithVectorDescriptor::VectorRegister();
__ xchg(receiver, Operand(esp, 0));
__ push(name);
@@ -332,15 +333,6 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
}
-void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
- // Return address is on the stack.
- StoreIC_PushArgs(masm);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kStoreIC_Slow);
-}
-
-
void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
// Return address is on the stack.
StoreIC_PushArgs(masm);
@@ -439,28 +431,25 @@ Register PropertyHandlerCompiler::CheckPrototypes(
DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
!scratch2.is(scratch1));
- if (FLAG_eliminate_prototype_chain_checks) {
- Handle<Cell> validity_cell =
- Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
- if (!validity_cell.is_null()) {
- DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid),
- validity_cell->value());
- // Operand::ForCell(...) points to the cell's payload!
- __ cmp(Operand::ForCell(validity_cell),
- Immediate(Smi::FromInt(Map::kPrototypeChainValid)));
- __ j(not_equal, miss);
- }
+ Handle<Cell> validity_cell =
+ Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+ if (!validity_cell.is_null()) {
+ DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
+ // Operand::ForCell(...) points to the cell's payload!
+ __ cmp(Operand::ForCell(validity_cell),
+ Immediate(Smi::FromInt(Map::kPrototypeChainValid)));
+ __ j(not_equal, miss);
+ }
- // The prototype chain of primitives (and their JSValue wrappers) depends
- // on the native context, which can't be guarded by validity cells.
- // |object_reg| holds the native context specific prototype in this case;
- // we need to check its map.
- if (check == CHECK_ALL_MAPS) {
- __ mov(scratch1, FieldOperand(object_reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- __ CmpWeakValue(scratch1, cell, scratch2);
- __ j(not_equal, miss);
- }
+ // The prototype chain of primitives (and their JSValue wrappers) depends
+ // on the native context, which can't be guarded by validity cells.
+ // |object_reg| holds the native context specific prototype in this case;
+ // we need to check its map.
+ if (check == CHECK_ALL_MAPS) {
+ __ mov(scratch1, FieldOperand(object_reg, HeapObject::kMapOffset));
+ Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
+ __ CmpWeakValue(scratch1, cell, scratch2);
+ __ j(not_equal, miss);
}
// Keep track of the current object in register reg.
@@ -496,8 +485,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
!current_map->is_access_check_needed());
prototype = handle(JSObject::cast(current_map->prototype()));
- if (current_map->is_dictionary_map() &&
- !current_map->IsJSGlobalObjectMap()) {
+ if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+ name, scratch2, miss);
+ } else if (current_map->is_dictionary_map()) {
DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
if (!name->IsUniqueName()) {
DCHECK(name->IsString());
@@ -507,34 +498,12 @@ Register PropertyHandlerCompiler::CheckPrototypes(
current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
- if (FLAG_eliminate_prototype_chain_checks && depth > 1) {
+ if (depth > 1) {
// TODO(jkummerow): Cache and re-use weak cell.
__ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
}
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
scratch2);
-
- if (!FLAG_eliminate_prototype_chain_checks) {
- __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- __ mov(holder_reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- }
- } else {
- Register map_reg = scratch1;
- if (!FLAG_eliminate_prototype_chain_checks) {
- __ mov(map_reg, FieldOperand(reg, HeapObject::kMapOffset));
- }
- if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
- name, scratch2, miss);
- } else if (!FLAG_eliminate_prototype_chain_checks &&
- (depth != 1 || check == CHECK_ALL_MAPS)) {
- Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
- __ CmpWeakValue(map_reg, cell, scratch2);
- __ j(not_equal, miss);
- }
- if (!FLAG_eliminate_prototype_chain_checks) {
- __ mov(holder_reg, FieldOperand(map_reg, Map::kPrototypeOffset));
- }
}
reg = holder_reg; // From now on the object will be in holder_reg.
@@ -548,17 +517,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
- if (!FLAG_eliminate_prototype_chain_checks &&
- (depth != 0 || check == CHECK_ALL_MAPS)) {
- // Check the holder map.
- __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
- __ CmpWeakValue(scratch1, cell, scratch2);
- __ j(not_equal, miss);
- }
-
bool return_holder = return_what == RETURN_HOLDER;
- if (FLAG_eliminate_prototype_chain_checks && return_holder && depth != 0) {
+ if (return_holder && depth != 0) {
__ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
}
@@ -594,58 +554,6 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
}
-void NamedLoadHandlerCompiler::GenerateLoadCallback(
- Register reg, Handle<AccessorInfo> callback) {
- DCHECK(!AreAliased(scratch2(), scratch3(), receiver()));
- DCHECK(!AreAliased(scratch2(), scratch3(), reg));
-
- // Insert additional parameters into the stack frame above return address.
- __ pop(scratch3()); // Get return address to place it below.
-
- // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
- // name below the exit frame to make GC aware of them.
- STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
-
- __ push(receiver()); // receiver
- // Push data from AccessorInfo.
- Handle<Object> data(callback->data(), isolate());
- if (data->IsUndefined() || data->IsSmi()) {
- __ push(Immediate(data));
- } else {
- Handle<WeakCell> cell =
- isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
- // The callback is alive if this instruction is executed,
- // so the weak cell is not cleared and points to data.
- __ GetWeakValue(scratch2(), cell);
- __ push(scratch2());
- }
- __ push(Immediate(isolate()->factory()->undefined_value())); // ReturnValue
- // ReturnValue default value
- __ push(Immediate(isolate()->factory()->undefined_value()));
- __ push(Immediate(reinterpret_cast<int>(isolate())));
- __ push(reg); // holder
- __ push(Immediate(Smi::FromInt(0))); // should_throw_on_error -> false
-
- __ push(name()); // name
- __ push(scratch3()); // Restore return address.
-
- // Abi for CallApiGetter
- Register getter_address = ApiGetterDescriptor::function_address();
- Address function_address = v8::ToCData<Address>(callback->getter());
- __ mov(getter_address, Immediate(function_address));
-
- CallApiGetterStub stub(isolate());
- __ TailCallStub(&stub);
-}
-
-
void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
// Return the constant value.
__ LoadObject(eax, value);
@@ -656,7 +564,7 @@ void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
LookupIterator* it, Register holder_reg) {
DCHECK(holder()->HasNamedInterceptor());
- DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
// Compile the interceptor call, followed by inline code to load the
// property from further up the prototype chain if the call fails.
@@ -723,7 +631,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
DCHECK(holder()->HasNamedInterceptor());
- DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+ DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
// Call the runtime system to load the interceptor.
__ pop(scratch2()); // save old return address
PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
@@ -744,7 +652,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ push(holder_reg);
// If the callback cannot leak, then push the callback directly,
// otherwise wrap it in a weak cell.
- if (callback->data()->IsUndefined() || callback->data()->IsSmi()) {
+ if (callback->data()->IsUndefined(isolate()) || callback->data()->IsSmi()) {
__ Push(callback);
} else {
Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
@@ -759,7 +667,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ TailCallRuntime(Runtime::kStoreCallbackProperty);
// Return the generated code.
- return GetCode(kind(), Code::FAST, name);
+ return GetCode(kind(), name);
}
@@ -801,7 +709,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
FrontendFooter(name, &miss);
// Return the generated code.
- return GetCode(kind(), Code::NORMAL, name);
+ return GetCode(kind(), name);
}
diff --git a/deps/v8/src/ic/x87/ic-x87.cc b/deps/v8/src/ic/x87/ic-x87.cc
index b51045bee8..76933f01bb 100644
--- a/deps/v8/src/ic/x87/ic-x87.cc
+++ b/deps/v8/src/ic/x87/ic-x87.cc
@@ -336,10 +336,8 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
__ push(Immediate(Smi::FromInt(slot)));
__ push(Immediate(dummy_vector));
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
- receiver, key, ebx, edi);
+ masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, ebx,
+ edi);
__ pop(LoadWithVectorDescriptor::VectorRegister());
__ pop(LoadDescriptor::SlotRegister());
@@ -519,10 +517,10 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ JumpIfSmi(receiver, &slow);
// Get the map from the receiver.
__ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks and is not observed.
- // The generic stub does not perform map checks or handle observed objects.
+ // Check that the receiver does not require access checks.
+ // The generic stub does not perform map checks.
__ test_b(FieldOperand(edi, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
+ Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow);
// Check that the key is a smi.
__ JumpIfNotSmi(key, &maybe_name_key);
@@ -563,13 +561,11 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ push(Immediate(Smi::FromInt(slot)));
__ push(Immediate(dummy_vector));
- Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
- receiver, key, edi, no_reg);
+ masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, edi,
+ no_reg);
- __ pop(VectorStoreICDescriptor::VectorRegister());
- __ pop(VectorStoreICDescriptor::SlotRegister());
+ __ pop(StoreWithVectorDescriptor::VectorRegister());
+ __ pop(StoreWithVectorDescriptor::SlotRegister());
// Cache miss.
__ jmp(&miss);
@@ -708,21 +704,12 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedGetProperty);
}
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
- // This shouldn't be called.
- // TODO(mvstanton): remove this method.
- __ int3();
- return;
-}
-
-
static void StoreIC_PushArgs(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- Register slot = VectorStoreICDescriptor::SlotRegister();
- Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = StoreWithVectorDescriptor::SlotRegister();
+ Register vector = StoreWithVectorDescriptor::VectorRegister();
__ xchg(receiver, Operand(esp, 0));
__ push(name);
@@ -747,8 +734,8 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- Register vector = VectorStoreICDescriptor::VectorRegister();
- Register slot = VectorStoreICDescriptor::SlotRegister();
+ Register vector = StoreWithVectorDescriptor::VectorRegister();
+ Register slot = StoreWithVectorDescriptor::SlotRegister();
// A lot of registers are needed for storing to slow case
// objects. Push and restore receiver but rely on
@@ -836,8 +823,9 @@ void PatchInlinedSmiCode(Isolate* isolate, Address address,
// condition code uses at the patched jump.
uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
if (FLAG_trace_ic) {
- PrintF("[ patching ic at %p, test=%p, delta=%d\n", address,
- test_instruction_address, delta);
+ PrintF("[ patching ic at %p, test=%p, delta=%d\n",
+ static_cast<void*>(address),
+ static_cast<void*>(test_instruction_address), delta);
}
// Patch with a short conditional jump. Enabling means switching from a short
diff --git a/deps/v8/src/ic/x87/stub-cache-x87.cc b/deps/v8/src/ic/x87/stub-cache-x87.cc
index dfc0ef6c66..e0656f7cff 100644
--- a/deps/v8/src/ic/x87/stub-cache-x87.cc
+++ b/deps/v8/src/ic/x87/stub-cache-x87.cc
@@ -14,19 +14,19 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
- Code::Kind ic_kind, Code::Flags flags,
+static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
StubCache::Table table, Register name, Register receiver,
- // Number of the cache entry pointer-size scaled.
+ // The offset is scaled by 4, based on
+ // kCacheIndexShift, which is two bits
Register offset, Register extra) {
- ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
- ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
- ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+ ExternalReference key_offset(stub_cache->key_reference(table));
+ ExternalReference value_offset(stub_cache->value_reference(table));
+ ExternalReference map_offset(stub_cache->map_reference(table));
ExternalReference virtual_register =
ExternalReference::virtual_handler_register(masm->isolate());
Label miss;
+ Code::Kind ic_kind = stub_cache->ic_kind();
bool is_vector_store =
IC::ICUseVector(ic_kind) &&
(ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
@@ -47,12 +47,6 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
__ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ j(not_equal, &miss);
- // Check that the flags match what we're looking for.
- __ mov(offset, FieldOperand(extra, Code::kFlagsOffset));
- __ and_(offset, ~Code::kFlagsNotUsedInLookup);
- __ cmp(offset, flags);
- __ j(not_equal, &miss);
-
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ jmp(&miss);
@@ -65,8 +59,8 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
// probe, and need to be dropped before calling the handler.
if (is_vector_store) {
// The overlap here is rather embarrassing. One does what one must.
- Register vector = VectorStoreICDescriptor::VectorRegister();
- DCHECK(extra.is(VectorStoreICDescriptor::SlotRegister()));
+ Register vector = StoreWithVectorDescriptor::VectorRegister();
+ DCHECK(extra.is(StoreWithVectorDescriptor::SlotRegister()));
__ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ pop(vector);
__ mov(Operand::StaticVariable(virtual_register), extra);
@@ -102,12 +96,6 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
// Get the code entry from the cache.
__ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
- // Check that the flags match what we're looking for.
- __ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
- __ and_(offset, ~Code::kFlagsNotUsedInLookup);
- __ cmp(offset, flags);
- __ j(not_equal, &miss);
-
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ jmp(&miss);
@@ -124,8 +112,8 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
if (is_vector_store) {
// The vector and slot were pushed onto the stack before starting the
// probe, and need to be dropped before calling the handler.
- Register vector = VectorStoreICDescriptor::VectorRegister();
- DCHECK(offset.is(VectorStoreICDescriptor::SlotRegister()));
+ Register vector = StoreWithVectorDescriptor::VectorRegister();
+ DCHECK(offset.is(StoreWithVectorDescriptor::SlotRegister()));
__ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ mov(Operand::StaticVariable(virtual_register), offset);
__ pop(vector);
@@ -142,9 +130,7 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
}
}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
- Code::Flags flags, Register receiver,
+void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
Register name, Register scratch, Register extra,
Register extra2, Register extra3) {
Label miss;
@@ -153,9 +139,6 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// being 12.
DCHECK(sizeof(Entry) == 12);
- // Assert the flags do not name a specific type.
- DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
-
// Assert that there are no register conflicts.
DCHECK(!scratch.is(receiver));
DCHECK(!scratch.is(name));
@@ -180,7 +163,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// Get the map of the receiver and compute the hash.
__ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
__ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(offset, flags);
+ __ xor_(offset, kPrimaryMagic);
// We mask out the last two bits because they are not part of the hash and
// they are always 01 for maps. Also in the two 'and' instructions below.
__ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
@@ -189,21 +172,19 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
DCHECK(kCacheIndexShift == kPointerSizeLog2);
// Probe the primary table.
- ProbeTable(isolate(), masm, ic_kind, flags, kPrimary, name, receiver, offset,
- extra);
+ ProbeTable(this, masm, kPrimary, name, receiver, offset, extra);
// Primary miss: Compute hash for secondary probe.
__ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
__ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(offset, flags);
+ __ xor_(offset, kPrimaryMagic);
__ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
__ sub(offset, name);
- __ add(offset, Immediate(flags));
+ __ add(offset, Immediate(kSecondaryMagic));
__ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
// Probe the secondary table.
- ProbeTable(isolate(), masm, ic_kind, flags, kSecondary, name, receiver,
- offset, extra);
+ ProbeTable(this, masm, kSecondary, name, receiver, offset, extra);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
diff --git a/deps/v8/src/icu_util.cc b/deps/v8/src/icu_util.cc
index 02251306c8..bf59fd0aec 100644
--- a/deps/v8/src/icu_util.cc
+++ b/deps/v8/src/icu_util.cc
@@ -15,6 +15,9 @@
#include "unicode/putil.h"
#include "unicode/udata.h"
+#include "src/base/build_config.h"
+#include "src/base/file-utils.h"
+
#define ICU_UTIL_DATA_FILE 0
#define ICU_UTIL_DATA_SHARED 1
#define ICU_UTIL_DATA_STATIC 2
@@ -38,6 +41,32 @@ void free_icu_data_ptr() {
} // namespace
#endif
+bool InitializeICUDefaultLocation(const char* exec_path,
+ const char* icu_data_file) {
+#if !defined(V8_I18N_SUPPORT)
+ return true;
+#else
+#if ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_FILE
+ if (icu_data_file) {
+ return InitializeICU(icu_data_file);
+ }
+ char* icu_data_file_default;
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ RelativePath(&icu_data_file_default, exec_path, "icudtl.dat");
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ RelativePath(&icu_data_file_default, exec_path, "icudtb.dat");
+#else
+#error Unknown byte ordering
+#endif
+ bool result = InitializeICU(icu_data_file_default);
+ free(icu_data_file_default);
+ return result;
+#else
+ return InitializeICU(NULL);
+#endif
+#endif
+}
+
bool InitializeICU(const char* icu_data_file) {
#if !defined(V8_I18N_SUPPORT)
return true;
diff --git a/deps/v8/src/icu_util.h b/deps/v8/src/icu_util.h
index c308decfe5..af7f994231 100644
--- a/deps/v8/src/icu_util.h
+++ b/deps/v8/src/icu_util.h
@@ -14,6 +14,11 @@ namespace internal {
// function should be called before ICU is used.
bool InitializeICU(const char* icu_data_file);
+// Like above, but using the default icudt[lb].dat location if icu_data_file is
+// not specified.
+bool InitializeICUDefaultLocation(const char* exec_path,
+ const char* icu_data_file);
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/inspector/BUILD.gn b/deps/v8/src/inspector/BUILD.gn
new file mode 100644
index 0000000000..56b96e1cd6
--- /dev/null
+++ b/deps/v8/src/inspector/BUILD.gn
@@ -0,0 +1,101 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+protocol_path = "//third_party/WebKit/Source/platform/inspector_protocol"
+protocol_sources = [
+ "$target_gen_dir/Console.cpp",
+ "$target_gen_dir/Console.h",
+ "$target_gen_dir/Debugger.cpp",
+ "$target_gen_dir/Debugger.h",
+ "$target_gen_dir/HeapProfiler.cpp",
+ "$target_gen_dir/HeapProfiler.h",
+ "$target_gen_dir/Profiler.cpp",
+ "$target_gen_dir/Profiler.h",
+ "$target_gen_dir/public/Debugger.h",
+ "$target_gen_dir/public/Runtime.h",
+ "$target_gen_dir/Runtime.cpp",
+ "$target_gen_dir/Runtime.h",
+]
+
+action("inspector_protocol_sources") {
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
+ script = "$protocol_path/CodeGenerator.py"
+ sources = [
+ "$protocol_path/CodeGenerator.py",
+ "$protocol_path/Exported_h.template",
+ "$protocol_path/Imported_h.template",
+ "$protocol_path/TypeBuilder_cpp.template",
+ "$protocol_path/TypeBuilder_h.template",
+ ]
+ inputs = [
+ "js_protocol.json",
+ ]
+ outputs = protocol_sources
+ args = [
+ "--protocol",
+ rebase_path("js_protocol.json", root_build_dir),
+ "--string_type",
+ "String16",
+ "--export_macro",
+ "PLATFORM_EXPORT",
+ "--output_dir",
+ rebase_path(target_gen_dir, root_build_dir),
+ "--output_package",
+ "inspector",
+ "--exported_dir",
+ rebase_path("$target_gen_dir/public", root_build_dir),
+ "--exported_package",
+ "inspector/public",
+ ]
+}
+
+config("inspector_protocol_config") {
+ include_dirs = [ "$protocol_path/../.." ]
+ defines = [ "V8_INSPECTOR_USE_STL" ]
+ cflags = []
+ if (is_win) {
+ cflags += [
+ "/wd4267", # Truncation from size_t to int.
+ "/wd4305", # Truncation from 'type1' to 'type2'.
+ "/wd4324", # Struct padded due to declspec(align).
+ "/wd4714", # Function marked forceinline not inlined.
+ "/wd4800", # Value forced to bool.
+ "/wd4996", # Deprecated function call.
+ ]
+ }
+}
+
+source_set("inspector_protocol") {
+ deps = [
+ ":inspector_protocol_sources",
+ ]
+ configs += [ ":inspector_protocol_config" ]
+ include_dirs = [ "$target_gen_dir/.." ]
+ sources = protocol_sources + [
+ "$protocol_path/Allocator.h",
+ "$protocol_path/Array.h",
+ "$protocol_path/BackendCallback.h",
+ "$protocol_path/CodeGenerator.py",
+ "$protocol_path/Collections.h",
+ "$protocol_path/DispatcherBase.cpp",
+ "$protocol_path/DispatcherBase.h",
+ "$protocol_path/ErrorSupport.cpp",
+ "$protocol_path/ErrorSupport.h",
+ "$protocol_path/FrontendChannel.h",
+ "$protocol_path/Maybe.h",
+ "$protocol_path/Object.cpp",
+ "$protocol_path/Object.h",
+ "$protocol_path/Parser.cpp",
+ "$protocol_path/Parser.h",
+ "$protocol_path/Platform.h",
+ "$protocol_path/PlatformSTL.h",
+ "$protocol_path/String16.cpp",
+ "$protocol_path/String16.h",
+ "$protocol_path/String16STL.cpp",
+ "$protocol_path/String16STL.h",
+ "$protocol_path/ValueConversions.h",
+ "$protocol_path/Values.cpp",
+ "$protocol_path/Values.h",
+ ]
+}
diff --git a/deps/v8/src/inspector/inspector.gyp b/deps/v8/src/inspector/inspector.gyp
new file mode 100644
index 0000000000..5fc49b15ea
--- /dev/null
+++ b/deps/v8/src/inspector/inspector.gyp
@@ -0,0 +1,113 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{ 'variables': {
+ 'protocol_path': '../../third_party/WebKit/Source/platform/inspector_protocol',
+ 'protocol_sources': [
+ '<(SHARED_INTERMEDIATE_DIR)/inspector/Console.cpp',
+ '<(SHARED_INTERMEDIATE_DIR)/inspector/Console.h',
+ '<(SHARED_INTERMEDIATE_DIR)/inspector/Debugger.cpp',
+ '<(SHARED_INTERMEDIATE_DIR)/inspector/Debugger.h',
+ '<(SHARED_INTERMEDIATE_DIR)/inspector/HeapProfiler.cpp',
+ '<(SHARED_INTERMEDIATE_DIR)/inspector/HeapProfiler.h',
+ '<(SHARED_INTERMEDIATE_DIR)/inspector/Profiler.cpp',
+ '<(SHARED_INTERMEDIATE_DIR)/inspector/Profiler.h',
+ '<(SHARED_INTERMEDIATE_DIR)/inspector/public/Debugger.h',
+ '<(SHARED_INTERMEDIATE_DIR)/inspector/public/Runtime.h',
+ '<(SHARED_INTERMEDIATE_DIR)/inspector/Runtime.cpp',
+ '<(SHARED_INTERMEDIATE_DIR)/inspector/Runtime.h',
+ ]
+ },
+ 'targets': [
+ { 'target_name': 'inspector_protocol_sources',
+ 'type': 'none',
+ 'variables': {
+ 'jinja_module_files': [
+ # jinja2/__init__.py contains version string, so sufficient for package
+ '../third_party/jinja2/__init__.py',
+ '../third_party/markupsafe/__init__.py', # jinja2 dep
+ ]
+ },
+ 'actions': [
+ {
+ 'action_name': 'generate_inspector_protocol_sources',
+ 'inputs': [
+ # Source generator script.
+ '<(protocol_path)/CodeGenerator.py',
+ # Source code templates.
+ '<(protocol_path)/Exported_h.template',
+ '<(protocol_path)/Imported_h.template',
+ '<(protocol_path)/TypeBuilder_h.template',
+ '<(protocol_path)/TypeBuilder_cpp.template',
+ # Protocol definition.
+ 'js_protocol.json',
+ ],
+ 'outputs': [
+ '<@(protocol_sources)',
+ ],
+ 'action': [
+ 'python',
+ '<(protocol_path)/CodeGenerator.py',
+ '--protocol', 'js_protocol.json',
+ '--string_type', 'String16',
+ '--export_macro', 'PLATFORM_EXPORT',
+ '--output_dir', '<(SHARED_INTERMEDIATE_DIR)/inspector',
+ '--output_package', 'inspector',
+ '--exported_dir', '<(SHARED_INTERMEDIATE_DIR)/inspector/public',
+ '--exported_package', 'inspector/public',
+ ],
+ 'message': 'Generating Inspector protocol backend sources from json definitions',
+ },
+ ]
+ },
+ { 'target_name': 'inspector_protocol',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'inspector_protocol_sources',
+ ],
+ 'include_dirs+': [
+ '<(protocol_path)/../..',
+ '<(SHARED_INTERMEDIATE_DIR)',
+ ],
+ 'defines': [
+ 'V8_INSPECTOR_USE_STL',
+ ],
+ 'msvs_disabled_warnings': [
+ 4267, # Truncation from size_t to int.
+ 4305, # Truncation from 'type1' to 'type2'.
+ 4324, # Struct padded due to declspec(align).
+ 4714, # Function marked forceinline not inlined.
+ 4800, # Value forced to bool.
+ 4996, # Deprecated function call.
+ ],
+ 'sources': [
+ '<@(protocol_sources)',
+ '<(protocol_path)/Allocator.h',
+ '<(protocol_path)/Array.h',
+ '<(protocol_path)/BackendCallback.h',
+ '<(protocol_path)/CodeGenerator.py',
+ '<(protocol_path)/Collections.h',
+ '<(protocol_path)/DispatcherBase.cpp',
+ '<(protocol_path)/DispatcherBase.h',
+ '<(protocol_path)/ErrorSupport.cpp',
+ '<(protocol_path)/ErrorSupport.h',
+ '<(protocol_path)/FrontendChannel.h',
+ '<(protocol_path)/Maybe.h',
+ '<(protocol_path)/Object.cpp',
+ '<(protocol_path)/Object.h',
+ '<(protocol_path)/Parser.cpp',
+ '<(protocol_path)/Parser.h',
+ '<(protocol_path)/Platform.h',
+ '<(protocol_path)/PlatformSTL.h',
+ '<(protocol_path)/String16.cpp',
+ '<(protocol_path)/String16.h',
+ '<(protocol_path)/String16STL.cpp',
+ '<(protocol_path)/String16STL.h',
+ '<(protocol_path)/ValueConversions.h',
+ '<(protocol_path)/Values.cpp',
+ '<(protocol_path)/Values.h',
+ ]
+ },
+ ],
+}
diff --git a/deps/v8/src/inspector/js_protocol.json b/deps/v8/src/inspector/js_protocol.json
new file mode 100644
index 0000000000..314cb5f13c
--- /dev/null
+++ b/deps/v8/src/inspector/js_protocol.json
@@ -0,0 +1,1011 @@
+{
+ "version": { "major": "1", "minor": "1" },
+ "domains": [{
+ "domain": "Runtime",
+ "description": "Runtime domain exposes JavaScript runtime by means of remote evaluation and mirror objects. Evaluation results are returned as mirror object that expose object type, string representation and unique identifier that can be used for further object reference. Original objects are maintained in memory unless they are either explicitly released or are released along with the other objects in their object group.",
+ "types": [
+ {
+ "id": "ScriptId",
+ "type": "string",
+ "description": "Unique script identifier."
+ },
+ {
+ "id": "RemoteObjectId",
+ "type": "string",
+ "description": "Unique object identifier."
+ },
+ {
+ "id": "RemoteObject",
+ "type": "object",
+ "description": "Mirror object referencing original JavaScript object.",
+ "exported": true,
+ "properties": [
+ { "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol"], "description": "Object type." },
+ { "name": "subtype", "type": "string", "optional": true, "enum": ["array", "null", "node", "regexp", "date", "map", "set", "iterator", "generator", "error"], "description": "Object subtype hint. Specified for <code>object</code> type values only." },
+ { "name": "className", "type": "string", "optional": true, "description": "Object class (constructor) name. Specified for <code>object</code> type values only." },
+ { "name": "value", "type": "any", "optional": true, "description": "Remote object value in case of primitive values or JSON values (if it was requested), or description string if the value can not be JSON-stringified (like NaN, Infinity, -Infinity, -0)." },
+ { "name": "description", "type": "string", "optional": true, "description": "String representation of the object." },
+ { "name": "objectId", "$ref": "RemoteObjectId", "optional": true, "description": "Unique object identifier (for non-primitive values)." },
+ { "name": "preview", "$ref": "ObjectPreview", "optional": true, "description": "Preview containing abbreviated property values. Specified for <code>object</code> type values only.", "hidden": true },
+ { "name": "customPreview", "$ref": "CustomPreview", "optional": true, "hidden": true}
+ ]
+ },
+ {
+ "id": "CustomPreview",
+ "type": "object",
+ "hidden": true,
+ "properties": [
+ { "name": "header", "type": "string"},
+ { "name": "hasBody", "type": "boolean"},
+ { "name": "formatterObjectId", "$ref": "RemoteObjectId"},
+ { "name": "bindRemoteObjectFunctionId", "$ref": "RemoteObjectId" },
+ { "name": "configObjectId", "$ref": "RemoteObjectId", "optional": true }
+ ]
+ },
+ {
+ "id": "ObjectPreview",
+ "type": "object",
+ "hidden": true,
+ "description": "Object containing abbreviated remote object value.",
+ "properties": [
+ { "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol"], "description": "Object type." },
+ { "name": "subtype", "type": "string", "optional": true, "enum": ["array", "null", "node", "regexp", "date", "map", "set", "iterator", "generator", "error"], "description": "Object subtype hint. Specified for <code>object</code> type values only." },
+ { "name": "description", "type": "string", "optional": true, "description": "String representation of the object." },
+ { "name": "overflow", "type": "boolean", "description": "True iff some of the properties or entries of the original object did not fit." },
+ { "name": "properties", "type": "array", "items": { "$ref": "PropertyPreview" }, "description": "List of the properties." },
+ { "name": "entries", "type": "array", "items": { "$ref": "EntryPreview" }, "optional": true, "description": "List of the entries. Specified for <code>map</code> and <code>set</code> subtype values only." }
+ ]
+ },
+ {
+ "id": "PropertyPreview",
+ "type": "object",
+ "hidden": true,
+ "properties": [
+ { "name": "name", "type": "string", "description": "Property name." },
+ { "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol", "accessor"], "description": "Object type. Accessor means that the property itself is an accessor property." },
+ { "name": "value", "type": "string", "optional": true, "description": "User-friendly property value string." },
+ { "name": "valuePreview", "$ref": "ObjectPreview", "optional": true, "description": "Nested value preview." },
+ { "name": "subtype", "type": "string", "optional": true, "enum": ["array", "null", "node", "regexp", "date", "map", "set", "iterator", "generator", "error"], "description": "Object subtype hint. Specified for <code>object</code> type values only." }
+ ]
+ },
+ {
+ "id": "EntryPreview",
+ "type": "object",
+ "hidden": true,
+ "properties": [
+ { "name": "key", "$ref": "ObjectPreview", "optional": true, "description": "Preview of the key. Specified for map-like collection entries." },
+ { "name": "value", "$ref": "ObjectPreview", "description": "Preview of the value." }
+ ]
+ },
+ {
+ "id": "PropertyDescriptor",
+ "type": "object",
+ "description": "Object property descriptor.",
+ "properties": [
+ { "name": "name", "type": "string", "description": "Property name or symbol description." },
+ { "name": "value", "$ref": "RemoteObject", "optional": true, "description": "The value associated with the property." },
+ { "name": "writable", "type": "boolean", "optional": true, "description": "True if the value associated with the property may be changed (data descriptors only)." },
+ { "name": "get", "$ref": "RemoteObject", "optional": true, "description": "A function which serves as a getter for the property, or <code>undefined</code> if there is no getter (accessor descriptors only)." },
+ { "name": "set", "$ref": "RemoteObject", "optional": true, "description": "A function which serves as a setter for the property, or <code>undefined</code> if there is no setter (accessor descriptors only)." },
+ { "name": "configurable", "type": "boolean", "description": "True if the type of this property descriptor may be changed and if the property may be deleted from the corresponding object." },
+ { "name": "enumerable", "type": "boolean", "description": "True if this property shows up during enumeration of the properties on the corresponding object." },
+ { "name": "wasThrown", "type": "boolean", "optional": true, "description": "True if the result was thrown during the evaluation." },
+ { "name": "isOwn", "optional": true, "type": "boolean", "description": "True if the property is owned for the object.", "hidden": true },
+ { "name": "symbol", "$ref": "RemoteObject", "optional": true, "description": "Property symbol object, if the property is of the <code>symbol</code> type.", "hidden": true }
+ ]
+ },
+ {
+ "id": "InternalPropertyDescriptor",
+ "type": "object",
+ "description": "Object internal property descriptor. This property isn't normally visible in JavaScript code.",
+ "properties": [
+ { "name": "name", "type": "string", "description": "Conventional property name." },
+ { "name": "value", "$ref": "RemoteObject", "optional": true, "description": "The value associated with the property." }
+ ],
+ "hidden": true
+ },
+ {
+ "id": "CallArgument",
+ "type": "object",
+ "description": "Represents function call argument. Either remote object id <code>objectId</code> or primitive <code>value</code> or neither of (for undefined) them should be specified.",
+ "properties": [
+ { "name": "value", "type": "any", "optional": true, "description": "Primitive value, or description string if the value can not be JSON-stringified (like NaN, Infinity, -Infinity, -0)." },
+ { "name": "objectId", "$ref": "RemoteObjectId", "optional": true, "description": "Remote object handle." },
+ { "name": "type", "optional": true, "hidden": true, "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol"], "description": "Object type." }
+ ]
+ },
+ {
+ "id": "ExecutionContextId",
+ "type": "integer",
+ "description": "Id of an execution context."
+ },
+ {
+ "id": "ExecutionContextDescription",
+ "type": "object",
+ "description": "Description of an isolated world.",
+ "properties": [
+ { "name": "id", "$ref": "ExecutionContextId", "description": "Unique id of the execution context. It can be used to specify in which execution context script evaluation should be performed." },
+ { "name": "isDefault", "type": "boolean", "description": "Whether context is the default page context (as opposite to e.g. context of content script).", "hidden": true },
+ { "name": "origin", "type": "string", "description": "Execution context origin.", "hidden": true},
+ { "name": "name", "type": "string", "description": "Human readable name describing given context.", "hidden": true},
+ { "name": "frameId", "type": "string", "description": "Id of the owning frame. May be an empty string if the context is not associated with a frame." }
+ ]
+ },
+ {
+ "id": "ExceptionDetails",
+ "type": "object",
+ "hidden": true,
+ "description": "Detailed information about exception (or error) that was thrown during script compilation or execution.",
+ "properties": [
+ { "name": "text", "type": "string", "description": "Exception text." },
+ { "name": "scriptId", "$ref": "ScriptId", "description": "Script ID of the exception location." },
+ { "name": "lineNumber", "type": "integer", "description": "Line number of the exception location (0-based)." },
+ { "name": "columnNumber", "type": "integer", "description": "Column number of the exception location (0-based)." },
+ { "name": "url", "type": "string", "optional": true, "description": "URL of the exception location, to be used when the script was not reported." },
+ { "name": "stackTrace", "$ref": "StackTrace", "optional": true, "description": "JavaScript stack trace if available." }
+ ]
+ },
+ {
+ "id": "Timestamp",
+ "type": "number",
+ "description": "Number of milliseconds since epoch.",
+ "hidden": true
+ },
+ {
+ "id": "CallFrame",
+ "type": "object",
+ "description": "Stack entry for runtime errors and assertions.",
+ "properties": [
+ { "name": "functionName", "type": "string", "description": "JavaScript function name." },
+ { "name": "scriptId", "$ref": "ScriptId", "description": "JavaScript script id." },
+ { "name": "url", "type": "string", "description": "JavaScript script name or url." },
+ { "name": "lineNumber", "type": "integer", "description": "JavaScript script line number (0-based)." },
+ { "name": "columnNumber", "type": "integer", "description": "JavaScript script column number (0-based)." }
+ ]
+ },
+ {
+ "id": "StackTrace",
+ "type": "object",
+ "description": "Call frames for assertions or error messages.",
+ "exported": true,
+ "properties": [
+ { "name": "description", "type": "string", "optional": true, "description": "String label of this stack trace. For async traces this may be a name of the function that initiated the async call." },
+ { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "JavaScript function name." },
+ { "name": "parent", "$ref": "StackTrace", "optional": true, "hidden": true, "description": "Asynchronous JavaScript stack trace that preceded this stack, if available." }
+ ]
+ }
+ ],
+ "commands": [
+ {
+ "name": "evaluate",
+ "async": true,
+ "parameters": [
+ { "name": "expression", "type": "string", "description": "Expression to evaluate." },
+ { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects." },
+ { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Determines whether Command Line API should be available during the evaluation.", "hidden": true },
+ { "name": "doNotPauseOnExceptionsAndMuteConsole", "type": "boolean", "optional": true, "description": "Specifies whether evaluation should stop on exceptions and mute console. Overrides setPauseOnException state.", "hidden": true },
+ { "name": "contextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which isolated context to perform evaluation. Each content script lives in an isolated context and this parameter may be used to specify one of those contexts. If the parameter is omitted or 0 the evaluation will be performed in the context of the inspected page." },
+ { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
+ { "name": "generatePreview", "type": "boolean", "optional": true, "hidden": true, "description": "Whether preview should be generated for the result." },
+ { "name": "userGesture", "type": "boolean", "optional": true, "hidden": true, "description": "Whether execution should be treated as initiated by user in the UI." },
+ { "name": "awaitPromise", "type": "boolean", "optional":true, "hidden": true, "description": "Whether execution should wait for promise to be resolved. If the result of evaluation is not a Promise, it's considered to be an error." }
+ ],
+ "returns": [
+ { "name": "result", "$ref": "RemoteObject", "description": "Evaluation result." },
+ { "name": "wasThrown", "type": "boolean", "optional": true, "description": "True if the result was thrown during the evaluation." },
+ { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "hidden": true, "description": "Exception details."}
+ ],
+ "description": "Evaluates expression on global object."
+ },
+ {
+ "name": "awaitPromise",
+ "hidden": true,
+ "async": true,
+ "parameters": [
+ { "name": "promiseObjectId", "$ref": "RemoteObjectId", "description": "Identifier of the promise." },
+ { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
+ { "name": "generatePreview", "type": "boolean", "optional": true, "description": "Whether preview should be generated for the result." }
+ ],
+ "returns": [
+ { "name": "result", "$ref": "RemoteObject", "description": "Promise result. Will contain rejected value if promise was rejected." },
+ { "name": "wasThrown", "type": "boolean", "optional": true, "description": "True if the promise was rejected." },
+ { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details if stack strace is available."}
+ ],
+ "description": "Add handler to promise with given promise object id."
+ },
+ {
+ "name": "callFunctionOn",
+ "parameters": [
+ { "name": "objectId", "$ref": "RemoteObjectId", "description": "Identifier of the object to call function on." },
+ { "name": "functionDeclaration", "type": "string", "description": "Declaration of the function to call." },
+ { "name": "arguments", "type": "array", "items": { "$ref": "CallArgument", "description": "Call argument." }, "optional": true, "description": "Call arguments. All call arguments must belong to the same JavaScript world as the target object." },
+ { "name": "doNotPauseOnExceptionsAndMuteConsole", "type": "boolean", "optional": true, "description": "Specifies whether function call should stop on exceptions and mute console. Overrides setPauseOnException state.", "hidden": true },
+ { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object which should be sent by value." },
+ { "name": "generatePreview", "type": "boolean", "optional": true, "hidden": true, "description": "Whether preview should be generated for the result." },
+ { "name": "userGesture", "type": "boolean", "optional": true, "hidden": true, "description": "Whether execution should be treated as initiated by user in the UI." }
+ ],
+ "returns": [
+ { "name": "result", "$ref": "RemoteObject", "description": "Call result." },
+ { "name": "wasThrown", "type": "boolean", "optional": true, "description": "True if the result was thrown during the evaluation." }
+ ],
+ "description": "Calls function with given declaration on the given object. Object group of the result is inherited from the target object."
+ },
+ {
+ "name": "getProperties",
+ "parameters": [
+ { "name": "objectId", "$ref": "RemoteObjectId", "description": "Identifier of the object to return properties for." },
+ { "name": "ownProperties", "optional": true, "type": "boolean", "description": "If true, returns properties belonging only to the element itself, not to its prototype chain." },
+ { "name": "accessorPropertiesOnly", "optional": true, "type": "boolean", "description": "If true, returns accessor properties (with getter/setter) only; internal properties are not returned either.", "hidden": true },
+ { "name": "generatePreview", "type": "boolean", "optional": true, "hidden": true, "description": "Whether preview should be generated for the results." }
+ ],
+ "returns": [
+ { "name": "result", "type": "array", "items": { "$ref": "PropertyDescriptor" }, "description": "Object properties." },
+ { "name": "internalProperties", "optional": true, "type": "array", "items": { "$ref": "InternalPropertyDescriptor" }, "description": "Internal object properties (only of the element itself).", "hidden": true },
+ { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "hidden": true, "description": "Exception details."}
+ ],
+ "description": "Returns properties of a given object. Object group of the result is inherited from the target object."
+ },
+ {
+ "name": "releaseObject",
+ "parameters": [
+ { "name": "objectId", "$ref": "RemoteObjectId", "description": "Identifier of the object to release." }
+ ],
+ "description": "Releases remote object with given id."
+ },
+ {
+ "name": "releaseObjectGroup",
+ "parameters": [
+ { "name": "objectGroup", "type": "string", "description": "Symbolic object group name." }
+ ],
+ "description": "Releases all remote objects that belong to a given group."
+ },
+ {
+ "name": "run",
+ "hidden": true,
+ "description": "Tells inspected instance(worker or page) that it can run in case it was started paused."
+ },
+ {
+ "name": "enable",
+ "description": "Enables reporting of execution contexts creation by means of <code>executionContextCreated</code> event. When the reporting gets enabled the event will be sent immediately for each existing execution context."
+ },
+ {
+ "name": "disable",
+ "hidden": true,
+ "description": "Disables reporting of execution contexts creation."
+ },
+ {
+ "name": "discardConsoleEntries",
+ "hidden": true,
+ "description": "Discards collected exceptions and console API calls."
+ },
+ {
+ "name": "setCustomObjectFormatterEnabled",
+ "parameters": [
+ {
+ "name": "enabled",
+ "type": "boolean"
+ }
+ ],
+ "hidden": true
+ },
+ {
+ "name": "compileScript",
+ "hidden": true,
+ "parameters": [
+ { "name": "expression", "type": "string", "description": "Expression to compile." },
+ { "name": "sourceURL", "type": "string", "description": "Source url to be set for the script." },
+ { "name": "persistScript", "type": "boolean", "description": "Specifies whether the compiled script should be persisted." },
+ { "name": "executionContextId", "$ref": "ExecutionContextId", "description": "Specifies in which isolated context to perform script run. Each content script lives in an isolated context and this parameter is used to specify one of those contexts." }
+ ],
+ "returns": [
+ { "name": "scriptId", "$ref": "ScriptId", "optional": true, "description": "Id of the script." },
+ { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
+ ],
+ "description": "Compiles expression."
+ },
+ {
+ "name": "runScript",
+ "hidden": true,
+ "parameters": [
+ { "name": "scriptId", "$ref": "ScriptId", "description": "Id of the script to run." },
+ { "name": "executionContextId", "$ref": "ExecutionContextId", "description": "Specifies in which isolated context to perform script run. Each content script lives in an isolated context and this parameter is used to specify one of those contexts." },
+ { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects." },
+ { "name": "doNotPauseOnExceptionsAndMuteConsole", "type": "boolean", "optional": true, "description": "Specifies whether script run should stop on exceptions and mute console. Overrides setPauseOnException state." },
+ { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Determines whether Command Line API should be available during the evaluation." }
+ ],
+ "returns": [
+ { "name": "result", "$ref": "RemoteObject", "description": "Run result." },
+ { "name": "exceptionDetails", "$ref": "ExceptionDetails", "optional": true, "description": "Exception details."}
+ ],
+ "description": "Runs script with given id in a given context."
+ }
+ ],
+ "events": [
+ {
+ "name": "executionContextCreated",
+ "parameters": [
+ { "name": "context", "$ref": "ExecutionContextDescription", "description": "A newly created execution contex." }
+ ],
+ "description": "Issued when new execution context is created."
+ },
+ {
+ "name": "executionContextDestroyed",
+ "parameters": [
+ { "name": "executionContextId", "$ref": "ExecutionContextId", "description": "Id of the destroyed context" }
+ ],
+ "description": "Issued when execution context is destroyed."
+ },
+ {
+ "name": "executionContextsCleared",
+ "description": "Issued when all executionContexts were cleared in browser"
+ },
+ {
+ "name": "exceptionThrown",
+ "description": "Issued when exception was thrown and unhandled.",
+ "parameters": [
+ { "name": "exceptionId", "type": "integer", "description": "Exception id." },
+ { "name": "timestamp", "$ref": "Timestamp", "description": "Timestamp of the exception." },
+ { "name": "details", "$ref": "ExceptionDetails" },
+ { "name": "exception", "$ref": "RemoteObject", "optional": true, "description": "Exception object." },
+ { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Identifier of the context where exception happened." }
+ ],
+ "hidden": true
+ },
+ {
+ "name": "exceptionRevoked",
+ "description": "Issued when unhandled exception was revoked.",
+ "parameters": [
+ { "name": "message", "type": "string", "description": "Message describing why exception was revoked." },
+ { "name": "exceptionId", "type": "integer", "description": "The id of revoked exception, as reported in <code>exceptionUnhandled</code>." }
+ ],
+ "hidden": true
+ },
+ {
+ "name": "consoleAPICalled",
+ "description": "Issued when console API was called.",
+ "parameters": [
+ { "name": "type", "type": "string", "enum": ["log", "debug", "info", "error", "warning", "dir", "dirxml", "table", "trace", "clear", "startGroup", "startGroupCollapsed", "endGroup", "assert", "profile", "profileEnd"], "description": "Type of the call." },
+ { "name": "args", "type": "array", "items": { "$ref": "RemoteObject" }, "description": "Call arguments." },
+ { "name": "executionContextId", "$ref": "ExecutionContextId", "description": "Identifier of the context where the call was made." },
+ { "name": "timestamp", "$ref": "Timestamp", "description": "Call timestamp." },
+ { "name": "stackTrace", "$ref": "StackTrace", "optional": true, "description": "Stack trace captured when the call was made." }
+ ],
+ "hidden": true
+ },
+ {
+ "name": "inspectRequested",
+ "parameters": [
+ { "name": "object", "$ref": "RemoteObject" },
+ { "name": "hints", "type": "object" }
+ ],
+ "hidden": true
+ }
+ ]
+ },
+ {
+ "domain": "Debugger",
+ "description": "Debugger domain exposes JavaScript debugging capabilities. It allows setting and removing breakpoints, stepping through execution, exploring stack traces, etc.",
+ "dependencies": ["Runtime"],
+ "types": [
+ {
+ "id": "BreakpointId",
+ "type": "string",
+ "description": "Breakpoint identifier."
+ },
+ {
+ "id": "CallFrameId",
+ "type": "string",
+ "description": "Call frame identifier."
+ },
+ {
+ "id": "Location",
+ "type": "object",
+ "properties": [
+ { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Script identifier as reported in the <code>Debugger.scriptParsed</code>." },
+ { "name": "lineNumber", "type": "integer", "description": "Line number in the script (0-based)." },
+ { "name": "columnNumber", "type": "integer", "optional": true, "description": "Column number in the script (0-based)." }
+ ],
+ "description": "Location in the source code."
+ },
+ {
+ "id": "ScriptPosition",
+ "hidden": true,
+ "type": "object",
+ "properties": [
+ { "name": "lineNumber", "type": "integer" },
+ { "name": "columnNumber", "type": "integer" }
+ ],
+ "description": "Location in the source code."
+ },
+ {
+ "id": "CallFrame",
+ "type": "object",
+ "properties": [
+ { "name": "callFrameId", "$ref": "CallFrameId", "description": "Call frame identifier. This identifier is only valid while the virtual machine is paused." },
+ { "name": "functionName", "type": "string", "description": "Name of the JavaScript function called on this call frame." },
+ { "name": "functionLocation", "$ref": "Location", "optional": true, "hidden": true, "description": "Location in the source code." },
+ { "name": "location", "$ref": "Location", "description": "Location in the source code." },
+ { "name": "scopeChain", "type": "array", "items": { "$ref": "Scope" }, "description": "Scope chain for this call frame." },
+ { "name": "this", "$ref": "Runtime.RemoteObject", "description": "<code>this</code> object for this call frame." },
+ { "name": "returnValue", "$ref": "Runtime.RemoteObject", "optional": true, "hidden": true, "description": "The value being returned, if the function is at return point." }
+ ],
+ "description": "JavaScript call frame. Array of call frames form the call stack."
+ },
+ {
+ "id": "Scope",
+ "type": "object",
+ "properties": [
+ { "name": "type", "type": "string", "enum": ["global", "local", "with", "closure", "catch", "block", "script"], "description": "Scope type." },
+ { "name": "object", "$ref": "Runtime.RemoteObject", "description": "Object representing the scope. For <code>global</code> and <code>with</code> scopes it represents the actual object; for the rest of the scopes, it is artificial transient object enumerating scope variables as its properties." },
+ { "name": "name", "type": "string", "optional": true, "hidden": true },
+ { "name": "startLocation", "$ref": "Location", "optional": true, "hidden": true, "description": "Location in the source code where scope starts" },
+ { "name": "endLocation", "$ref": "Location", "optional": true, "hidden": true, "description": "Location in the source code where scope ends" }
+ ],
+ "description": "Scope description."
+ },
+ {
+ "id": "SearchMatch",
+ "type": "object",
+ "description": "Search match for resource.",
+ "exported": true,
+ "properties": [
+ { "name": "lineNumber", "type": "number", "description": "Line number in resource content." },
+ { "name": "lineContent", "type": "string", "description": "Line with match content." }
+ ],
+ "hidden": true
+ }
+ ],
+ "commands": [
+ {
+ "name": "enable",
+ "description": "Enables debugger for the given page. Clients should not assume that the debugging has been enabled until the result for this command is received."
+ },
+ {
+ "name": "disable",
+ "description": "Disables debugger for given page."
+ },
+ {
+ "name": "setBreakpointsActive",
+ "parameters": [
+ { "name": "active", "type": "boolean", "description": "New value for breakpoints active state." }
+ ],
+ "description": "Activates / deactivates all breakpoints on the page."
+ },
+ {
+ "name": "setSkipAllPauses",
+ "hidden": true,
+ "parameters": [
+ { "name": "skipped", "type": "boolean", "description": "New value for skip pauses state." }
+ ],
+ "description": "Makes page not interrupt on any pauses (breakpoint, exception, dom exception etc)."
+ },
+ {
+ "name": "setBreakpointByUrl",
+ "parameters": [
+ { "name": "lineNumber", "type": "integer", "description": "Line number to set breakpoint at." },
+ { "name": "url", "type": "string", "optional": true, "description": "URL of the resources to set breakpoint on." },
+ { "name": "urlRegex", "type": "string", "optional": true, "description": "Regex pattern for the URLs of the resources to set breakpoints on. Either <code>url</code> or <code>urlRegex</code> must be specified." },
+ { "name": "columnNumber", "type": "integer", "optional": true, "description": "Offset in the line to set breakpoint at." },
+ { "name": "condition", "type": "string", "optional": true, "description": "Expression to use as a breakpoint condition. When specified, debugger will only stop on the breakpoint if this expression evaluates to true." }
+ ],
+ "returns": [
+ { "name": "breakpointId", "$ref": "BreakpointId", "description": "Id of the created breakpoint for further reference." },
+ { "name": "locations", "type": "array", "items": { "$ref": "Location" }, "description": "List of the locations this breakpoint resolved into upon addition." }
+ ],
+ "description": "Sets JavaScript breakpoint at given location specified either by URL or URL regex. Once this command is issued, all existing parsed scripts will have breakpoints resolved and returned in <code>locations</code> property. Further matching script parsing will result in subsequent <code>breakpointResolved</code> events issued. This logical breakpoint will survive page reloads."
+ },
+ {
+ "name": "setBreakpoint",
+ "parameters": [
+ { "name": "location", "$ref": "Location", "description": "Location to set breakpoint in." },
+ { "name": "condition", "type": "string", "optional": true, "description": "Expression to use as a breakpoint condition. When specified, debugger will only stop on the breakpoint if this expression evaluates to true." }
+ ],
+ "returns": [
+ { "name": "breakpointId", "$ref": "BreakpointId", "description": "Id of the created breakpoint for further reference." },
+ { "name": "actualLocation", "$ref": "Location", "description": "Location this breakpoint resolved into." }
+ ],
+ "description": "Sets JavaScript breakpoint at a given location."
+ },
+ {
+ "name": "removeBreakpoint",
+ "parameters": [
+ { "name": "breakpointId", "$ref": "BreakpointId" }
+ ],
+ "description": "Removes JavaScript breakpoint."
+ },
+ {
+ "name": "continueToLocation",
+ "parameters": [
+ { "name": "location", "$ref": "Location", "description": "Location to continue to." },
+ { "name": "interstatementLocation", "type": "boolean", "optional": true, "hidden": true, "description": "Allows breakpoints at the intemediate positions inside statements." }
+ ],
+ "description": "Continues execution until specific location is reached."
+ },
+ {
+ "name": "stepOver",
+ "description": "Steps over the statement."
+ },
+ {
+ "name": "stepInto",
+ "description": "Steps into the function call."
+ },
+ {
+ "name": "stepOut",
+ "description": "Steps out of the function call."
+ },
+ {
+ "name": "pause",
+ "description": "Stops on the next JavaScript statement."
+ },
+ {
+ "name": "resume",
+ "description": "Resumes JavaScript execution."
+ },
+ {
+ "name": "searchInContent",
+ "parameters": [
+ { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script to search in." },
+ { "name": "query", "type": "string", "description": "String to search for." },
+ { "name": "caseSensitive", "type": "boolean", "optional": true, "description": "If true, search is case sensitive." },
+ { "name": "isRegex", "type": "boolean", "optional": true, "description": "If true, treats string parameter as regex." }
+ ],
+ "returns": [
+ { "name": "result", "type": "array", "items": { "$ref": "SearchMatch" }, "description": "List of search matches." }
+ ],
+ "description": "Searches for given string in script content."
+ },
+ {
+ "name": "canSetScriptSource",
+ "returns": [
+ { "name": "result", "type": "boolean", "description": "True if <code>setScriptSource</code> is supported." }
+ ],
+ "description": "Always returns true."
+ },
+ {
+ "name": "setScriptSource",
+ "parameters": [
+ { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script to edit." },
+ { "name": "scriptSource", "type": "string", "description": "New content of the script." },
+ { "name": "preview", "type": "boolean", "optional": true, "description": " If true the change will not actually be applied. Preview mode may be used to get result description without actually modifying the code.", "hidden": true }
+ ],
+ "returns": [
+ { "name": "callFrames", "type": "array", "optional": true, "items": { "$ref": "CallFrame" }, "description": "New stack trace in case editing has happened while VM was stopped." },
+ { "name": "stackChanged", "type": "boolean", "optional": true, "description": "Whether current call stack was modified after applying the changes.", "hidden": true },
+ { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any.", "hidden": true },
+ { "name": "compileError", "optional": true, "$ref": "Runtime.ExceptionDetails", "description": "Error data if any." }
+ ],
+ "description": "Edits JavaScript source live."
+ },
+ {
+ "name": "restartFrame",
+ "parameters": [
+ { "name": "callFrameId", "$ref": "CallFrameId", "description": "Call frame identifier to evaluate on." }
+ ],
+ "returns": [
+ { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "New stack trace." },
+ { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." }
+ ],
+ "hidden": true,
+ "description": "Restarts particular call frame from the beginning."
+ },
+ {
+ "name": "getScriptSource",
+ "parameters": [
+ { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script to get source for." }
+ ],
+ "returns": [
+ { "name": "scriptSource", "type": "string", "description": "Script source." }
+ ],
+ "description": "Returns source for the script with given id."
+ },
+ {
+ "name": "setPauseOnExceptions",
+ "parameters": [
+ { "name": "state", "type": "string", "enum": ["none", "uncaught", "all"], "description": "Pause on exceptions mode." }
+ ],
+ "description": "Defines pause on exceptions state. Can be set to stop on all exceptions, uncaught exceptions or no exceptions. Initial pause on exceptions state is <code>none</code>."
+ },
+ {
+ "name": "evaluateOnCallFrame",
+ "parameters": [
+ { "name": "callFrameId", "$ref": "CallFrameId", "description": "Call frame identifier to evaluate on." },
+ { "name": "expression", "type": "string", "description": "Expression to evaluate." },
+ { "name": "objectGroup", "type": "string", "optional": true, "description": "String object group name to put result into (allows rapid releasing resulting object handles using <code>releaseObjectGroup</code>)." },
+ { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Specifies whether command line API should be available to the evaluated expression, defaults to false.", "hidden": true },
+ { "name": "doNotPauseOnExceptionsAndMuteConsole", "type": "boolean", "optional": true, "description": "Specifies whether evaluation should stop on exceptions and mute console. Overrides setPauseOnException state.", "hidden": true },
+ { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
+ { "name": "generatePreview", "type": "boolean", "optional": true, "hidden": true, "description": "Whether preview should be generated for the result." }
+ ],
+ "returns": [
+ { "name": "result", "$ref": "Runtime.RemoteObject", "description": "Object wrapper for the evaluation result." },
+ { "name": "wasThrown", "type": "boolean", "optional": true, "description": "True if the result was thrown during the evaluation." },
+ { "name": "exceptionDetails", "$ref": "Runtime.ExceptionDetails", "optional": true, "hidden": true, "description": "Exception details."}
+ ],
+ "description": "Evaluates expression on a given call frame."
+ },
+ {
+ "name": "setVariableValue",
+ "parameters": [
+ { "name": "scopeNumber", "type": "integer", "description": "0-based number of scope as was listed in scope chain. Only 'local', 'closure' and 'catch' scope types are allowed. Other scopes could be manipulated manually." },
+ { "name": "variableName", "type": "string", "description": "Variable name." },
+ { "name": "newValue", "$ref": "Runtime.CallArgument", "description": "New variable value." },
+ { "name": "callFrameId", "$ref": "CallFrameId", "description": "Id of callframe that holds variable." }
+ ],
+ "hidden": true,
+ "description": "Changes value of variable in a callframe. Object-based scopes are not supported and must be mutated manually."
+ },
+ {
+ "name": "getBacktrace",
+ "returns": [
+ { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "Call stack the virtual machine stopped on." },
+ { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." }
+ ],
+ "hidden": true,
+ "description": "Returns call stack including variables changed since VM was paused. VM must be paused."
+ },
+ {
+ "name": "setAsyncCallStackDepth",
+ "parameters": [
+ { "name": "maxDepth", "type": "integer", "description": "Maximum depth of async call stacks. Setting to <code>0</code> will effectively disable collecting async call stacks (default)." }
+ ],
+ "hidden": true,
+ "description": "Enables or disables async call stacks tracking."
+ },
+ {
+ "name": "setBlackboxPatterns",
+ "parameters": [
+ { "name": "patterns", "type": "array", "items": { "type": "string" }, "description": "Array of regexps that will be used to check script url for blackbox state." }
+ ],
+ "hidden": true,
+ "description": "Replace previous blackbox patterns with passed ones. Forces backend to skip stepping/pausing in scripts with url matching one of the patterns. VM will try to leave blackboxed script by performing 'step in' several times, finally resorting to 'step out' if unsuccessful."
+ },
+ {
+ "name": "setBlackboxedRanges",
+ "parameters": [
+ { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Id of the script." },
+ { "name": "positions", "type": "array", "items": { "$ref": "ScriptPosition" } }
+ ],
+ "hidden": true,
+ "description": "Makes backend skip steps in the script in blackboxed ranges. VM will try leave blacklisted scripts by performing 'step in' several times, finally resorting to 'step out' if unsuccessful. Positions array contains positions where blackbox state is changed. First interval isn't blackboxed. Array should be sorted."
+ }
+ ],
+ "events": [
+ {
+ "name": "scriptParsed",
+ "parameters": [
+ { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Identifier of the script parsed." },
+ { "name": "url", "type": "string", "description": "URL or name of the script parsed (if any)." },
+ { "name": "startLine", "type": "integer", "description": "Line offset of the script within the resource with given URL (for script tags)." },
+ { "name": "startColumn", "type": "integer", "description": "Column offset of the script within the resource with given URL." },
+ { "name": "endLine", "type": "integer", "description": "Last line of the script." },
+ { "name": "endColumn", "type": "integer", "description": "Length of the last line of the script." },
+ { "name": "executionContextId", "$ref": "Runtime.ExecutionContextId", "description": "Specifies script creation context.", "hidden": true },
+ { "name": "hash", "type": "string", "hidden": true, "description": "Content hash of the script."},
+ { "name": "isContentScript", "type": "boolean", "optional": true, "description": "Determines whether this script is a user extension script." },
+ { "name": "isInternalScript", "type": "boolean", "optional": true, "description": "Determines whether this script is an internal script.", "hidden": true },
+ { "name": "isLiveEdit", "type": "boolean", "optional": true, "description": "True, if this script is generated as a result of the live edit operation.", "hidden": true },
+ { "name": "sourceMapURL", "type": "string", "optional": true, "description": "URL of source map associated with script (if any)." },
+ { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL.", "hidden": true },
+ { "name": "deprecatedCommentWasUsed", "type": "boolean", "optional": true, "hidden": true, "description": "True, if '//@ sourceURL' or '//@ sourceMappingURL' was used."}
+ ],
+ "description": "Fired when virtual machine parses script. This event is also fired for all known and uncollected scripts upon enabling debugger."
+ },
+ {
+ "name": "scriptFailedToParse",
+ "parameters": [
+ { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "Identifier of the script parsed." },
+ { "name": "url", "type": "string", "description": "URL or name of the script parsed (if any)." },
+ { "name": "startLine", "type": "integer", "description": "Line offset of the script within the resource with given URL (for script tags)." },
+ { "name": "startColumn", "type": "integer", "description": "Column offset of the script within the resource with given URL." },
+ { "name": "endLine", "type": "integer", "description": "Last line of the script." },
+ { "name": "endColumn", "type": "integer", "description": "Length of the last line of the script." },
+ { "name": "executionContextId", "$ref": "Runtime.ExecutionContextId", "description": "Specifies script creation context.", "hidden": true },
+ { "name": "hash", "type": "string", "hidden": true, "description": "Content hash of the script."},
+ { "name": "isContentScript", "type": "boolean", "optional": true, "description": "Determines whether this script is a user extension script." },
+ { "name": "isInternalScript", "type": "boolean", "optional": true, "description": "Determines whether this script is an internal script.", "hidden": true },
+ { "name": "sourceMapURL", "type": "string", "optional": true, "description": "URL of source map associated with script (if any)." },
+ { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL.", "hidden": true },
+ { "name": "deprecatedCommentWasUsed", "type": "boolean", "optional": true, "hidden": true, "description": "True, if '//@ sourceURL' or '//@ sourceMappingURL' was used."}
+ ],
+ "description": "Fired when virtual machine fails to parse the script."
+ },
+ {
+ "name": "breakpointResolved",
+ "parameters": [
+ { "name": "breakpointId", "$ref": "BreakpointId", "description": "Breakpoint unique identifier." },
+ { "name": "location", "$ref": "Location", "description": "Actual breakpoint location." }
+ ],
+ "description": "Fired when breakpoint is resolved to an actual script and location."
+ },
+ {
+ "name": "paused",
+ "parameters": [
+ { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "Call stack the virtual machine stopped on." },
+ { "name": "reason", "type": "string", "enum": [ "XHR", "DOM", "EventListener", "exception", "assert", "debugCommand", "promiseRejection", "other" ], "description": "Pause reason.", "exported": true },
+ { "name": "data", "type": "object", "optional": true, "description": "Object containing break-specific auxiliary properties." },
+ { "name": "hitBreakpoints", "type": "array", "optional": true, "items": { "type": "string" }, "description": "Hit breakpoints IDs", "hidden": true },
+ { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any.", "hidden": true }
+ ],
+ "description": "Fired when the virtual machine stopped on breakpoint or exception or any other stop criteria."
+ },
+ {
+ "name": "resumed",
+ "description": "Fired when the virtual machine resumed execution."
+ }
+ ]
+ },
+ {
+ "domain": "Console",
+ "description": "This domain is deprecated - use Runtime or Log instead.",
+ "dependencies": ["Runtime"],
+ "deprecated": true,
+ "types": [
+ {
+ "id": "ConsoleMessage",
+ "type": "object",
+ "description": "Console message.",
+ "properties": [
+ { "name": "source", "type": "string", "enum": ["xml", "javascript", "network", "console-api", "storage", "appcache", "rendering", "security", "other", "deprecation", "worker"], "description": "Message source." },
+ { "name": "level", "type": "string", "enum": ["log", "warning", "error", "debug", "info"], "description": "Message severity." },
+ { "name": "text", "type": "string", "description": "Message text." },
+ { "name": "url", "type": "string", "optional": true, "description": "URL of the message origin." },
+ { "name": "line", "type": "integer", "optional": true, "description": "Line number in the resource that generated this message (1-based)." },
+ { "name": "column", "type": "integer", "optional": true, "description": "Column number in the resource that generated this message (1-based)." }
+ ]
+ }
+ ],
+ "commands": [
+ {
+ "name": "enable",
+ "description": "Enables console domain, sends the messages collected so far to the client by means of the <code>messageAdded</code> notification."
+ },
+ {
+ "name": "disable",
+ "description": "Disables console domain, prevents further console messages from being reported to the client."
+ },
+ {
+ "name": "clearMessages",
+ "description": "Does nothing."
+ }
+ ],
+ "events": [
+ {
+ "name": "messageAdded",
+ "parameters": [
+ { "name": "message", "$ref": "ConsoleMessage", "description": "Console message that has been added." }
+ ],
+ "description": "Issued when new console message is added."
+ },
+ {
+ "name": "messageRepeatCountUpdated",
+ "parameters": [
+ { "name": "count", "type": "integer", "description": "New repeat count value." },
+ { "name": "timestamp", "$ref": "Runtime.Timestamp", "description": "Timestamp of most recent message in batch.", "hidden": true }
+ ],
+ "description": "Not issued.",
+ "deprecated": true
+ },
+ {
+ "name": "messagesCleared",
+ "description": "Not issued.",
+ "deprecated": true
+ }
+ ]
+ },
+ {
+ "domain": "Profiler",
+ "dependencies": ["Runtime", "Debugger"],
+ "hidden": true,
+ "types": [
+ {
+ "id": "CPUProfileNode",
+ "type": "object",
+ "description": "CPU Profile node. Holds callsite information, execution statistics and child nodes.",
+ "properties": [
+ { "name": "callFrame", "$ref": "Runtime.CallFrame", "description": "Function location." },
+ { "name": "hitCount", "type": "integer", "description": "Number of samples where this node was on top of the call stack." },
+ { "name": "children", "type": "array", "items": { "$ref": "CPUProfileNode" }, "description": "Child nodes." },
+ { "name": "deoptReason", "type": "string", "description": "The reason of being not optimized. The function may be deoptimized or marked as don't optimize."},
+ { "name": "id", "type": "integer", "description": "Unique id of the node." },
+ { "name": "positionTicks", "type": "array", "items": { "$ref": "PositionTickInfo" }, "description": "An array of source position ticks." }
+ ]
+ },
+ {
+ "id": "CPUProfile",
+ "type": "object",
+ "description": "Profile.",
+ "properties": [
+ { "name": "head", "$ref": "CPUProfileNode" },
+ { "name": "startTime", "type": "number", "description": "Profiling start time in seconds." },
+ { "name": "endTime", "type": "number", "description": "Profiling end time in seconds." },
+ { "name": "samples", "optional": true, "type": "array", "items": { "type": "integer" }, "description": "Ids of samples top nodes." },
+ { "name": "timestamps", "optional": true, "type": "array", "items": { "type": "number" }, "description": "Timestamps of the samples in microseconds." }
+ ]
+ },
+ {
+ "id": "PositionTickInfo",
+ "type": "object",
+ "description": "Specifies a number of samples attributed to a certain source position.",
+ "properties": [
+ { "name": "line", "type": "integer", "description": "Source line number (1-based)." },
+ { "name": "ticks", "type": "integer", "description": "Number of samples attributed to the source line." }
+ ]
+ }
+ ],
+ "commands": [
+ {
+ "name": "enable"
+ },
+ {
+ "name": "disable"
+ },
+ {
+ "name": "setSamplingInterval",
+ "parameters": [
+ { "name": "interval", "type": "integer", "description": "New sampling interval in microseconds." }
+ ],
+ "description": "Changes CPU profiler sampling interval. Must be called before CPU profiles recording started."
+ },
+ {
+ "name": "start"
+ },
+ {
+ "name": "stop",
+ "returns": [
+ { "name": "profile", "$ref": "CPUProfile", "description": "Recorded profile." }
+ ]
+ }
+ ],
+ "events": [
+ {
+ "name": "consoleProfileStarted",
+ "parameters": [
+ { "name": "id", "type": "string" },
+ { "name": "location", "$ref": "Debugger.Location", "description": "Location of console.profile()." },
+ { "name": "title", "type": "string", "optional": true, "description": "Profile title passed as argument to console.profile()." }
+ ],
+ "description": "Sent when new profile recodring is started using console.profile() call."
+ },
+ {
+ "name": "consoleProfileFinished",
+ "parameters": [
+ { "name": "id", "type": "string" },
+ { "name": "location", "$ref": "Debugger.Location", "description": "Location of console.profileEnd()." },
+ { "name": "profile", "$ref": "CPUProfile" },
+ { "name": "title", "type": "string", "optional": true, "description": "Profile title passed as argunet to console.profile()." }
+ ]
+ }
+ ]
+ },
+ {
+ "domain": "HeapProfiler",
+ "dependencies": ["Runtime"],
+ "hidden": true,
+ "types": [
+ {
+ "id": "HeapSnapshotObjectId",
+ "type": "string",
+ "description": "Heap snapshot object id."
+ },
+ {
+ "id": "SamplingHeapProfileNode",
+ "type": "object",
+ "description": "Sampling Heap Profile node. Holds callsite information, allocation statistics and child nodes.",
+ "properties": [
+ { "name": "callFrame", "$ref": "Runtime.CallFrame", "description": "Function location." },
+ { "name": "selfSize", "type": "number", "description": "Allocations size in bytes for the node excluding children." },
+ { "name": "children", "type": "array", "items": { "$ref": "SamplingHeapProfileNode" }, "description": "Child nodes." }
+ ]
+ },
+ {
+ "id": "SamplingHeapProfile",
+ "type": "object",
+ "description": "Profile.",
+ "properties": [
+ { "name": "head", "$ref": "SamplingHeapProfileNode" }
+ ]
+ }
+ ],
+ "commands": [
+ {
+ "name": "enable"
+ },
+ {
+ "name": "disable"
+ },
+ {
+ "name": "startTrackingHeapObjects",
+ "parameters": [
+ { "name": "trackAllocations", "type": "boolean", "optional": true }
+ ]
+ },
+ {
+ "name": "stopTrackingHeapObjects",
+ "parameters": [
+ { "name": "reportProgress", "type": "boolean", "optional": true, "description": "If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken when the tracking is stopped." }
+ ]
+ },
+ {
+ "name": "takeHeapSnapshot",
+ "parameters": [
+ { "name": "reportProgress", "type": "boolean", "optional": true, "description": "If true 'reportHeapSnapshotProgress' events will be generated while snapshot is being taken." }
+ ]
+ },
+ {
+ "name": "collectGarbage"
+ },
+ {
+ "name": "getObjectByHeapObjectId",
+ "parameters": [
+ { "name": "objectId", "$ref": "HeapSnapshotObjectId" },
+ { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects." }
+ ],
+ "returns": [
+ { "name": "result", "$ref": "Runtime.RemoteObject", "description": "Evaluation result." }
+ ]
+ },
+ {
+ "name": "addInspectedHeapObject",
+ "parameters": [
+ { "name": "heapObjectId", "$ref": "HeapSnapshotObjectId", "description": "Heap snapshot object id to be accessible by means of $x command line API." }
+ ],
+ "description": "Enables console to refer to the node with given id via $x (see Command Line API for more details $x functions)."
+ },
+ {
+ "name": "getHeapObjectId",
+ "parameters": [
+ { "name": "objectId", "$ref": "Runtime.RemoteObjectId", "description": "Identifier of the object to get heap object id for." }
+ ],
+ "returns": [
+ { "name": "heapSnapshotObjectId", "$ref": "HeapSnapshotObjectId", "description": "Id of the heap snapshot object corresponding to the passed remote object id." }
+ ]
+ },
+ {
+ "name": "startSampling",
+ "parameters": [
+ { "name": "samplingInterval", "type": "number", "optional": true, "description": "Average sample interval in bytes. Poisson distribution is used for the intervals. The default value is 32768 bytes." }
+ ]
+ },
+ {
+ "name": "stopSampling",
+ "returns": [
+ { "name": "profile", "$ref": "SamplingHeapProfile", "description": "Recorded sampling heap profile." }
+ ]
+ }
+ ],
+ "events": [
+ {
+ "name": "addHeapSnapshotChunk",
+ "parameters": [
+ { "name": "chunk", "type": "string" }
+ ]
+ },
+ {
+ "name": "resetProfiles"
+ },
+ {
+ "name": "reportHeapSnapshotProgress",
+ "parameters": [
+ { "name": "done", "type": "integer" },
+ { "name": "total", "type": "integer" },
+ { "name": "finished", "type": "boolean", "optional": true }
+ ]
+ },
+ {
+ "name": "lastSeenObjectId",
+ "description": "If heap objects tracking has been started then backend regulary sends a current value for last seen object id and corresponding timestamp. If the were changes in the heap since last event then one or more heapStatsUpdate events will be sent before a new lastSeenObjectId event.",
+ "parameters": [
+ { "name": "lastSeenObjectId", "type": "integer" },
+ { "name": "timestamp", "type": "number" }
+ ]
+ },
+ {
+ "name": "heapStatsUpdate",
+ "description": "If heap objects tracking has been started then backend may send update for one or more fragments",
+ "parameters": [
+ { "name": "statsUpdate", "type": "array", "items": { "type": "integer" }, "description": "An array of triplets. Each triplet describes a fragment. The first integer is the fragment index, the second integer is a total count of objects for the fragment, the third integer is a total size of the objects for the fragment."}
+ ]
+ }
+ ]
+ }]
+}
diff --git a/deps/v8/src/interface-descriptors.cc b/deps/v8/src/interface-descriptors.cc
index 9ee4269d3e..a16cae7d61 100644
--- a/deps/v8/src/interface-descriptors.cc
+++ b/deps/v8/src/interface-descriptors.cc
@@ -43,15 +43,14 @@ FunctionType* CallInterfaceDescriptor::BuildDefaultFunctionType(
return function;
}
-
void CallInterfaceDescriptorData::InitializePlatformSpecific(
- int register_parameter_count, Register* registers,
+ int register_parameter_count, const Register* registers,
PlatformInterfaceDescriptor* platform_descriptor) {
platform_specific_descriptor_ = platform_descriptor;
register_param_count_ = register_parameter_count;
// InterfaceDescriptor owns a copy of the registers array.
- register_params_.Reset(NewArray<Register>(register_parameter_count));
+ register_params_.reset(NewArray<Register>(register_parameter_count));
for (int i = 0; i < register_parameter_count; i++) {
register_params_[i] = registers[i];
}
@@ -75,25 +74,37 @@ const char* CallInterfaceDescriptor::DebugName(Isolate* isolate) const {
}
-void AllocateMutableHeapNumberDescriptor::InitializePlatformSpecific(
+void VoidDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr, nullptr);
+ data->InitializePlatformSpecific(0, nullptr);
}
+FunctionType*
+FastNewFunctionContextDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int parameter_count) {
+ Zone* zone = isolate->interface_descriptor_zone();
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), Type::Undefined(), 2, zone)->AsFunction();
+ function->InitParameter(0, AnyTagged(zone));
+ function->InitParameter(1, UntaggedIntegral32(zone));
+ return function;
+}
-void VoidDescriptor::InitializePlatformSpecific(
+void FastNewFunctionContextDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
+ Register registers[] = {FunctionRegister(), SlotsRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
FunctionType* LoadDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int paramater_count) {
+ Isolate* isolate, int parameter_count) {
Zone* zone = isolate->interface_descriptor_zone();
FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), 3, zone)->AsFunction();
- function->InitParameter(0, AnyTagged(zone));
- function->InitParameter(1, AnyTagged(zone));
- function->InitParameter(2, SmiType(zone));
+ Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
+ ->AsFunction();
+ function->InitParameter(kReceiver, AnyTagged(zone));
+ function->InitParameter(kName, AnyTagged(zone));
+ function->InitParameter(kSlot, SmiType(zone));
return function;
}
@@ -104,10 +115,58 @@ void LoadDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+FunctionType* LoadGlobalDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int parameter_count) {
+ Zone* zone = isolate->interface_descriptor_zone();
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
+ ->AsFunction();
+ function->InitParameter(kSlot, SmiType(zone));
+ return function;
+}
+
+void LoadGlobalDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {LoadWithVectorDescriptor::SlotRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+FunctionType*
+LoadGlobalWithVectorDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int parameter_count) {
+ Zone* zone = isolate->interface_descriptor_zone();
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
+ ->AsFunction();
+ function->InitParameter(kSlot, SmiType(zone));
+ function->InitParameter(kVector, AnyTagged(zone));
+ return function;
+}
+
+void LoadGlobalWithVectorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {LoadWithVectorDescriptor::SlotRegister(),
+ LoadWithVectorDescriptor::VectorRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+FunctionType* StoreDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int parameter_count) {
+ Zone* zone = isolate->interface_descriptor_zone();
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
+ ->AsFunction();
+ function->InitParameter(kReceiver, AnyTagged(zone));
+ function->InitParameter(kName, AnyTagged(zone));
+ function->InitParameter(kValue, AnyTagged(zone));
+ function->InitParameter(kSlot, SmiType(zone));
+ return function;
+}
void StoreDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister()};
+ Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
+ SlotRegister()};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -137,42 +196,27 @@ void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
FunctionType*
StoreTransitionDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int paramater_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), 4, zone)->AsFunction();
- function->InitParameter(0, AnyTagged(zone)); // Receiver
- function->InitParameter(1, AnyTagged(zone)); // Name
- function->InitParameter(2, AnyTagged(zone)); // Value
- function->InitParameter(3, AnyTagged(zone)); // Map
- return function;
-}
-
-FunctionType*
-LoadGlobalViaContextDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int paramater_count) {
+ Isolate* isolate, int parameter_count) {
Zone* zone = isolate->interface_descriptor_zone();
FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), 1, zone)->AsFunction();
- function->InitParameter(0, UntaggedIntegral32(zone));
+ Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
+ ->AsFunction();
+ function->InitParameter(kReceiver, AnyTagged(zone));
+ function->InitParameter(kName, AnyTagged(zone));
+ function->InitParameter(kValue, AnyTagged(zone));
+ function->InitParameter(kMap, AnyTagged(zone));
return function;
}
-
-void LoadGlobalViaContextDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {SlotRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
FunctionType*
StoreGlobalViaContextDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int paramater_count) {
+ Isolate* isolate, int parameter_count) {
Zone* zone = isolate->interface_descriptor_zone();
FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), 2, zone)->AsFunction();
- function->InitParameter(0, UntaggedIntegral32(zone));
- function->InitParameter(1, AnyTagged(zone));
+ Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
+ ->AsFunction();
+ function->InitParameter(kSlot, UntaggedIntegral32(zone));
+ function->InitParameter(kValue, AnyTagged(zone));
return function;
}
@@ -184,13 +228,6 @@ void StoreGlobalViaContextDescriptor::InitializePlatformSpecific(
}
-void InstanceOfDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {LeftRegister(), RightRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void StringCompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {LeftRegister(), RightRegister()};
@@ -203,14 +240,12 @@ void TypeConversionDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void MathPowTaggedDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {exponent()};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void MathPowIntegerDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {exponent()};
@@ -219,14 +254,15 @@ void MathPowIntegerDescriptor::InitializePlatformSpecific(
FunctionType*
LoadWithVectorDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int paramater_count) {
+ Isolate* isolate, int parameter_count) {
Zone* zone = isolate->interface_descriptor_zone();
FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), 4, zone)->AsFunction();
- function->InitParameter(0, AnyTagged(zone));
- function->InitParameter(1, AnyTagged(zone));
- function->InitParameter(2, SmiType(zone));
- function->InitParameter(3, AnyTagged(zone));
+ Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
+ ->AsFunction();
+ function->InitParameter(kReceiver, AnyTagged(zone));
+ function->InitParameter(kName, AnyTagged(zone));
+ function->InitParameter(kSlot, SmiType(zone));
+ function->InitParameter(kVector, AnyTagged(zone));
return function;
}
@@ -240,7 +276,7 @@ void LoadWithVectorDescriptor::InitializePlatformSpecific(
FunctionType*
VectorStoreTransitionDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int paramater_count) {
+ Isolate* isolate, int parameter_count) {
Zone* zone = isolate->interface_descriptor_zone();
bool has_slot = !VectorStoreTransitionDescriptor::SlotRegister().is(no_reg);
int arg_count = has_slot ? 6 : 5;
@@ -248,6 +284,7 @@ VectorStoreTransitionDescriptor::BuildCallInterfaceDescriptorFunctionType(
Type::Function(AnyTagged(zone), Type::Undefined(), arg_count, zone)
->AsFunction();
int index = 0;
+ // TODO(ishell): use ParameterIndices here
function->InitParameter(index++, AnyTagged(zone)); // receiver
function->InitParameter(index++, AnyTagged(zone)); // name
function->InitParameter(index++, AnyTagged(zone)); // value
@@ -259,21 +296,22 @@ VectorStoreTransitionDescriptor::BuildCallInterfaceDescriptorFunctionType(
return function;
}
-FunctionType* VectorStoreICDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int paramater_count) {
+FunctionType*
+StoreWithVectorDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int parameter_count) {
Zone* zone = isolate->interface_descriptor_zone();
FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), 5, zone)->AsFunction();
- function->InitParameter(0, AnyTagged(zone));
- function->InitParameter(1, AnyTagged(zone));
- function->InitParameter(2, AnyTagged(zone));
- function->InitParameter(3, SmiType(zone));
- function->InitParameter(4, AnyTagged(zone));
+ Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
+ ->AsFunction();
+ function->InitParameter(kReceiver, AnyTagged(zone));
+ function->InitParameter(kName, AnyTagged(zone));
+ function->InitParameter(kValue, AnyTagged(zone));
+ function->InitParameter(kSlot, SmiType(zone));
+ function->InitParameter(kVector, AnyTagged(zone));
return function;
}
-
-void VectorStoreICDescriptor::InitializePlatformSpecific(
+void StoreWithVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
SlotRegister(), VectorRegister()};
@@ -281,203 +319,264 @@ void VectorStoreICDescriptor::InitializePlatformSpecific(
}
FunctionType*
-VectorStoreICTrampolineDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int paramater_count) {
+BinaryOpWithVectorDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int parameter_count) {
+ DCHECK_EQ(parameter_count, kParameterCount);
Zone* zone = isolate->interface_descriptor_zone();
FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), 4, zone)->AsFunction();
- function->InitParameter(0, AnyTagged(zone));
- function->InitParameter(1, AnyTagged(zone));
- function->InitParameter(2, AnyTagged(zone));
- function->InitParameter(3, SmiType(zone));
+ Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
+ ->AsFunction();
+ function->InitParameter(kLeft, AnyTagged(zone));
+ function->InitParameter(kRight, AnyTagged(zone));
+ function->InitParameter(kSlot, UntaggedIntegral32(zone));
+ function->InitParameter(kVector, AnyTagged(zone));
return function;
}
+const Register ApiGetterDescriptor::ReceiverRegister() {
+ return LoadDescriptor::ReceiverRegister();
+}
-void VectorStoreICTrampolineDescriptor::InitializePlatformSpecific(
+void ApiGetterDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
- SlotRegister()};
+ Register registers[] = {ReceiverRegister(), HolderRegister(),
+ CallbackRegister()};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-FunctionType* ApiGetterDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int paramater_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), 1, zone)->AsFunction();
- function->InitParameter(0, ExternalPointer(zone));
- return function;
+void ContextOnlyDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ data->InitializePlatformSpecific(0, nullptr);
}
-
-void ApiGetterDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {function_address()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
+CallInterfaceDescriptor OnStackArgsDescriptorBase::ForArgs(
+ Isolate* isolate, int parameter_count) {
+ switch (parameter_count) {
+ case 1:
+ return OnStackWith1ArgsDescriptor(isolate);
+ case 2:
+ return OnStackWith2ArgsDescriptor(isolate);
+ case 3:
+ return OnStackWith3ArgsDescriptor(isolate);
+ case 4:
+ return OnStackWith4ArgsDescriptor(isolate);
+ case 5:
+ return OnStackWith5ArgsDescriptor(isolate);
+ case 6:
+ return OnStackWith6ArgsDescriptor(isolate);
+ case 7:
+ return OnStackWith7ArgsDescriptor(isolate);
+ default:
+ UNREACHABLE();
+ return VoidDescriptor(isolate);
+ }
}
+FunctionType*
+OnStackArgsDescriptorBase::BuildCallInterfaceDescriptorFunctionTypeWithArg(
+ Isolate* isolate, int register_parameter_count, int parameter_count) {
+ DCHECK_EQ(0, register_parameter_count);
+ DCHECK_GT(parameter_count, 0);
+ Zone* zone = isolate->interface_descriptor_zone();
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), AnyTagged(zone), parameter_count, zone)
+ ->AsFunction();
+ for (int i = 0; i < parameter_count; i++) {
+ function->InitParameter(i, AnyTagged(zone));
+ }
+ return function;
+}
-void ContextOnlyDescriptor::InitializePlatformSpecific(
+void OnStackArgsDescriptorBase::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
data->InitializePlatformSpecific(0, nullptr);
}
-
void GrowArrayElementsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ObjectRegister(), KeyRegister()};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-FunctionType* FastArrayPushDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int paramater_count) {
+FunctionType*
+VarArgFunctionDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int parameter_count) {
Zone* zone = isolate->interface_descriptor_zone();
FunctionType* function =
- Type::Function(AnyTagged(zone), AnyTagged(zone), 1, zone)->AsFunction();
- function->InitParameter(0, UntaggedIntegral32(zone)); // actual #arguments
+ Type::Function(AnyTagged(zone), AnyTagged(zone), kParameterCount, zone)
+ ->AsFunction();
+ function->InitParameter(kActualArgumentsCount, UntaggedIntegral32(zone));
return function;
}
FunctionType*
FastCloneRegExpDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int paramater_count) {
+ Isolate* isolate, int parameter_count) {
Zone* zone = isolate->interface_descriptor_zone();
FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), 4, zone)->AsFunction();
- function->InitParameter(0, AnyTagged(zone)); // closure
- function->InitParameter(1, SmiType(zone)); // literal_index
- function->InitParameter(2, AnyTagged(zone)); // pattern
- function->InitParameter(3, AnyTagged(zone)); // flags
+ Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
+ ->AsFunction();
+ function->InitParameter(kClosure, AnyTagged(zone));
+ function->InitParameter(kLiteralIndex, SmiType(zone));
+ function->InitParameter(kPattern, AnyTagged(zone));
+ function->InitParameter(kFlags, AnyTagged(zone));
return function;
}
FunctionType*
FastCloneShallowArrayDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int paramater_count) {
+ Isolate* isolate, int parameter_count) {
Zone* zone = isolate->interface_descriptor_zone();
FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), 3, zone)->AsFunction();
- function->InitParameter(0, AnyTagged(zone));
- function->InitParameter(1, SmiType(zone));
- function->InitParameter(2, AnyTagged(zone));
+ Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
+ ->AsFunction();
+ function->InitParameter(kClosure, AnyTagged(zone));
+ function->InitParameter(kLiteralIndex, SmiType(zone));
+ function->InitParameter(kConstantElements, AnyTagged(zone));
return function;
}
FunctionType*
CreateAllocationSiteDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int paramater_count) {
+ Isolate* isolate, int parameter_count) {
Zone* zone = isolate->interface_descriptor_zone();
FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), 2, zone)->AsFunction();
- function->InitParameter(0, AnyTagged(zone));
- function->InitParameter(1, SmiType(zone));
+ Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
+ ->AsFunction();
+ function->InitParameter(kVector, AnyTagged(zone));
+ function->InitParameter(kSlot, SmiType(zone));
return function;
}
FunctionType*
CreateWeakCellDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int paramater_count) {
+ Isolate* isolate, int parameter_count) {
Zone* zone = isolate->interface_descriptor_zone();
FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), 3, zone)->AsFunction();
- function->InitParameter(0, AnyTagged(zone));
- function->InitParameter(1, SmiType(zone));
- function->InitParameter(2, AnyTagged(zone));
+ Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
+ ->AsFunction();
+ function->InitParameter(kVector, AnyTagged(zone));
+ function->InitParameter(kSlot, SmiType(zone));
+ function->InitParameter(kValue, AnyTagged(zone));
return function;
}
FunctionType*
CallTrampolineDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int paramater_count) {
+ Isolate* isolate, int parameter_count) {
Zone* zone = isolate->interface_descriptor_zone();
FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), 2, zone)->AsFunction();
- function->InitParameter(0, AnyTagged(zone)); // target
- function->InitParameter(1, UntaggedIntegral32(zone)); // actual #arguments
+ Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
+ ->AsFunction();
+ function->InitParameter(kFunction, AnyTagged(zone));
+ function->InitParameter(kActualArgumentsCount, UntaggedIntegral32(zone));
return function;
}
FunctionType* ConstructStubDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int paramater_count) {
+ Isolate* isolate, int parameter_count) {
Zone* zone = isolate->interface_descriptor_zone();
FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), 4, zone)->AsFunction();
- function->InitParameter(0, AnyTagged(zone)); // target
- function->InitParameter(1, AnyTagged(zone)); // new.target
- function->InitParameter(2, UntaggedIntegral32(zone)); // actual #arguments
- function->InitParameter(3, AnyTagged(zone)); // opt. allocation site
+ Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
+ ->AsFunction();
+ function->InitParameter(kFunction, AnyTagged(zone));
+ function->InitParameter(kNewTarget, AnyTagged(zone));
+ function->InitParameter(kActualArgumentsCount, UntaggedIntegral32(zone));
+ function->InitParameter(kAllocationSite, AnyTagged(zone));
return function;
}
FunctionType*
ConstructTrampolineDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int paramater_count) {
+ Isolate* isolate, int parameter_count) {
Zone* zone = isolate->interface_descriptor_zone();
FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), 3, zone)->AsFunction();
- function->InitParameter(0, AnyTagged(zone)); // target
- function->InitParameter(1, AnyTagged(zone)); // new.target
- function->InitParameter(2, UntaggedIntegral32(zone)); // actual #arguments
+ Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
+ ->AsFunction();
+ function->InitParameter(kFunction, AnyTagged(zone));
+ function->InitParameter(kNewTarget, AnyTagged(zone));
+ function->InitParameter(kActualArgumentsCount, UntaggedIntegral32(zone));
return function;
}
FunctionType*
CallFunctionWithFeedbackDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int paramater_count) {
+ Isolate* isolate, int parameter_count) {
Zone* zone = isolate->interface_descriptor_zone();
FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), 2, zone)->AsFunction();
- function->InitParameter(0, Type::Receiver()); // JSFunction
- function->InitParameter(1, SmiType(zone));
+ Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
+ ->AsFunction();
+ function->InitParameter(kFunction, Type::Receiver());
+ function->InitParameter(kSlot, SmiType(zone));
return function;
}
FunctionType* CallFunctionWithFeedbackAndVectorDescriptor::
BuildCallInterfaceDescriptorFunctionType(Isolate* isolate,
- int paramater_count) {
+ int parameter_count) {
Zone* zone = isolate->interface_descriptor_zone();
FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), 3, zone)->AsFunction();
- function->InitParameter(0, Type::Receiver()); // JSFunction
- function->InitParameter(1, SmiType(zone));
- function->InitParameter(2, AnyTagged(zone));
+ Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
+ ->AsFunction();
+ function->InitParameter(kFunction, Type::Receiver());
+ function->InitParameter(kSlot, SmiType(zone));
+ function->InitParameter(kVector, AnyTagged(zone));
return function;
}
FunctionType*
-ArrayConstructorDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int paramater_count) {
+ArrayNoArgumentConstructorDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int parameter_count) {
+ Zone* zone = isolate->interface_descriptor_zone();
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
+ ->AsFunction();
+ function->InitParameter(kFunction, Type::Receiver());
+ function->InitParameter(kAllocationSite, AnyTagged(zone));
+ function->InitParameter(kActualArgumentsCount, UntaggedIntegral32(zone));
+ function->InitParameter(kFunctionParameter, AnyTagged(zone));
+ return function;
+}
+
+FunctionType* ArraySingleArgumentConstructorDescriptor::
+ BuildCallInterfaceDescriptorFunctionType(Isolate* isolate,
+ int parameter_count) {
Zone* zone = isolate->interface_descriptor_zone();
FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), 3, zone)->AsFunction();
- function->InitParameter(0, Type::Receiver()); // JSFunction
- function->InitParameter(1, AnyTagged(zone));
- function->InitParameter(2, UntaggedIntegral32(zone));
+ Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
+ ->AsFunction();
+ function->InitParameter(kFunction, Type::Receiver());
+ function->InitParameter(kAllocationSite, AnyTagged(zone));
+ function->InitParameter(kActualArgumentsCount, UntaggedIntegral32(zone));
+ function->InitParameter(kFunctionParameter, AnyTagged(zone));
+ function->InitParameter(kArraySizeSmiParameter, AnyTagged(zone));
return function;
}
FunctionType*
-InternalArrayConstructorDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int paramater_count) {
+ArrayNArgumentsConstructorDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int parameter_count) {
Zone* zone = isolate->interface_descriptor_zone();
FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), 2, zone)->AsFunction();
- function->InitParameter(0, Type::Receiver()); // JSFunction
- function->InitParameter(1, UntaggedIntegral32(zone));
+ Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
+ ->AsFunction();
+ function->InitParameter(kFunction, Type::Receiver());
+ function->InitParameter(kAllocationSite, AnyTagged(zone));
+ function->InitParameter(kActualArgumentsCount, UntaggedIntegral32(zone));
return function;
}
FunctionType*
ArgumentAdaptorDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int paramater_count) {
+ Isolate* isolate, int parameter_count) {
Zone* zone = isolate->interface_descriptor_zone();
FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), 4, zone)->AsFunction();
- function->InitParameter(0, Type::Receiver()); // JSFunction
- function->InitParameter(1, AnyTagged(zone)); // the new target
- function->InitParameter(2, UntaggedIntegral32(zone)); // actual #arguments
- function->InitParameter(3, UntaggedIntegral32(zone)); // expected #arguments
+ Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
+ ->AsFunction();
+ function->InitParameter(kFunction, Type::Receiver());
+ function->InitParameter(kNewTarget, AnyTagged(zone));
+ function->InitParameter(kActualArgumentsCount, UntaggedIntegral32(zone));
+ function->InitParameter(kExpectedArgumentsCount, UntaggedIntegral32(zone));
return function;
}
@@ -510,13 +609,13 @@ FunctionType*
ApiCallbackDescriptorBase::BuildCallInterfaceDescriptorFunctionTypeWithArg(
Isolate* isolate, int parameter_count, int argc) {
Zone* zone = isolate->interface_descriptor_zone();
- FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), 4 + argc, zone)
- ->AsFunction();
- function->InitParameter(0, AnyTagged(zone)); // callee
- function->InitParameter(1, AnyTagged(zone)); // call_data
- function->InitParameter(2, AnyTagged(zone)); // holder
- function->InitParameter(3, ExternalPointer(zone)); // api_function_address
+ FunctionType* function = Type::Function(AnyTagged(zone), Type::Undefined(),
+ kParameterCount + argc, zone)
+ ->AsFunction();
+ function->InitParameter(kFunction, AnyTagged(zone));
+ function->InitParameter(kCallData, AnyTagged(zone));
+ function->InitParameter(kHolder, AnyTagged(zone));
+ function->InitParameter(kApiFunctionAddress, ExternalPointer(zone));
for (int i = 0; i < argc; i++) {
function->InitParameter(i, AnyTagged(zone));
}
@@ -528,12 +627,12 @@ InterpreterDispatchDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int parameter_count) {
Zone* zone = isolate->interface_descriptor_zone();
FunctionType* function =
- Type::Function(AnyTagged(zone), Type::Undefined(), 5, zone)->AsFunction();
- function->InitParameter(kAccumulatorParameter, AnyTagged(zone));
- function->InitParameter(kRegisterFileParameter, ExternalPointer(zone));
- function->InitParameter(kBytecodeOffsetParameter, UntaggedIntegral32(zone));
- function->InitParameter(kBytecodeArrayParameter, AnyTagged(zone));
- function->InitParameter(kDispatchTableParameter, AnyTagged(zone));
+ Type::Function(AnyTagged(zone), Type::Undefined(), kParameterCount, zone)
+ ->AsFunction();
+ function->InitParameter(kAccumulator, AnyTagged(zone));
+ function->InitParameter(kBytecodeOffset, UntaggedIntegral32(zone));
+ function->InitParameter(kBytecodeArray, AnyTagged(zone));
+ function->InitParameter(kDispatchTable, AnyTagged(zone));
return function;
}
diff --git a/deps/v8/src/interface-descriptors.h b/deps/v8/src/interface-descriptors.h
index dcce0afe5c..af59bdb121 100644
--- a/deps/v8/src/interface-descriptors.h
+++ b/deps/v8/src/interface-descriptors.h
@@ -5,6 +5,8 @@
#ifndef V8_CALL_INTERFACE_DESCRIPTOR_H_
#define V8_CALL_INTERFACE_DESCRIPTOR_H_
+#include <memory>
+
#include "src/assembler.h"
#include "src/macro-assembler.h"
@@ -13,84 +15,94 @@ namespace internal {
class PlatformInterfaceDescriptor;
-#define INTERFACE_DESCRIPTOR_LIST(V) \
- V(Void) \
- V(Load) \
- V(Store) \
- V(StoreTransition) \
- V(VectorStoreTransition) \
- V(VectorStoreICTrampoline) \
- V(VectorStoreIC) \
- V(InstanceOf) \
- V(LoadWithVector) \
- V(FastArrayPush) \
- V(FastNewClosure) \
- V(FastNewContext) \
- V(FastNewObject) \
- V(FastNewRestParameter) \
- V(FastNewSloppyArguments) \
- V(FastNewStrictArguments) \
- V(TypeConversion) \
- V(Typeof) \
- V(FastCloneRegExp) \
- V(FastCloneShallowArray) \
- V(FastCloneShallowObject) \
- V(CreateAllocationSite) \
- V(CreateWeakCell) \
- V(CallFunction) \
- V(CallFunctionWithFeedback) \
- V(CallFunctionWithFeedbackAndVector) \
- V(CallConstruct) \
- V(CallTrampoline) \
- V(ConstructStub) \
- V(ConstructTrampoline) \
- V(RegExpConstructResult) \
- V(TransitionElementsKind) \
- V(AllocateHeapNumber) \
- V(AllocateMutableHeapNumber) \
- V(AllocateFloat32x4) \
- V(AllocateInt32x4) \
- V(AllocateUint32x4) \
- V(AllocateBool32x4) \
- V(AllocateInt16x8) \
- V(AllocateUint16x8) \
- V(AllocateBool16x8) \
- V(AllocateInt8x16) \
- V(AllocateUint8x16) \
- V(AllocateBool8x16) \
- V(AllocateInNewSpace) \
- V(ArrayConstructorConstantArgCount) \
- V(ArrayConstructor) \
- V(InternalArrayConstructorConstantArgCount) \
- V(InternalArrayConstructor) \
- V(Compare) \
- V(BinaryOp) \
- V(BinaryOpWithAllocationSite) \
- V(StringAdd) \
- V(StringCompare) \
- V(Keyed) \
- V(Named) \
- V(CallHandler) \
- V(ArgumentAdaptor) \
- V(ApiCallbackWith0Args) \
- V(ApiCallbackWith1Args) \
- V(ApiCallbackWith2Args) \
- V(ApiCallbackWith3Args) \
- V(ApiCallbackWith4Args) \
- V(ApiCallbackWith5Args) \
- V(ApiCallbackWith6Args) \
- V(ApiCallbackWith7Args) \
- V(ApiGetter) \
- V(LoadGlobalViaContext) \
- V(StoreGlobalViaContext) \
- V(MathPowTagged) \
- V(MathPowInteger) \
- V(ContextOnly) \
- V(GrowArrayElements) \
- V(InterpreterDispatch) \
- V(InterpreterPushArgsAndCall) \
- V(InterpreterPushArgsAndConstruct) \
- V(InterpreterCEntry)
+#define INTERFACE_DESCRIPTOR_LIST(V) \
+ V(Void) \
+ V(ContextOnly) \
+ V(OnStackWith1Args) \
+ V(OnStackWith2Args) \
+ V(OnStackWith3Args) \
+ V(OnStackWith4Args) \
+ V(OnStackWith5Args) \
+ V(OnStackWith6Args) \
+ V(OnStackWith7Args) \
+ V(Load) \
+ V(LoadWithVector) \
+ V(LoadGlobal) \
+ V(LoadGlobalWithVector) \
+ V(Store) \
+ V(StoreWithVector) \
+ V(StoreTransition) \
+ V(VectorStoreTransition) \
+ V(VarArgFunction) \
+ V(FastNewClosure) \
+ V(FastNewFunctionContext) \
+ V(FastNewObject) \
+ V(FastNewRestParameter) \
+ V(FastNewSloppyArguments) \
+ V(FastNewStrictArguments) \
+ V(TypeConversion) \
+ V(Typeof) \
+ V(FastCloneRegExp) \
+ V(FastCloneShallowArray) \
+ V(FastCloneShallowObject) \
+ V(CreateAllocationSite) \
+ V(CreateWeakCell) \
+ V(CallFunction) \
+ V(CallFunctionWithFeedback) \
+ V(CallFunctionWithFeedbackAndVector) \
+ V(CallConstruct) \
+ V(CallTrampoline) \
+ V(ConstructStub) \
+ V(ConstructTrampoline) \
+ V(RegExpConstructResult) \
+ V(CopyFastSmiOrObjectElements) \
+ V(TransitionElementsKind) \
+ V(AllocateHeapNumber) \
+ V(AllocateFloat32x4) \
+ V(AllocateInt32x4) \
+ V(AllocateUint32x4) \
+ V(AllocateBool32x4) \
+ V(AllocateInt16x8) \
+ V(AllocateUint16x8) \
+ V(AllocateBool16x8) \
+ V(AllocateInt8x16) \
+ V(AllocateUint8x16) \
+ V(AllocateBool8x16) \
+ V(ArrayNoArgumentConstructor) \
+ V(ArraySingleArgumentConstructor) \
+ V(ArrayNArgumentsConstructor) \
+ V(Compare) \
+ V(BinaryOp) \
+ V(BinaryOpWithAllocationSite) \
+ V(BinaryOpWithVector) \
+ V(CountOp) \
+ V(StringAdd) \
+ V(StringCompare) \
+ V(Keyed) \
+ V(Named) \
+ V(HasProperty) \
+ V(ForInFilter) \
+ V(GetProperty) \
+ V(CallHandler) \
+ V(ArgumentAdaptor) \
+ V(ApiCallbackWith0Args) \
+ V(ApiCallbackWith1Args) \
+ V(ApiCallbackWith2Args) \
+ V(ApiCallbackWith3Args) \
+ V(ApiCallbackWith4Args) \
+ V(ApiCallbackWith5Args) \
+ V(ApiCallbackWith6Args) \
+ V(ApiCallbackWith7Args) \
+ V(ApiGetter) \
+ V(StoreGlobalViaContext) \
+ V(MathPowTagged) \
+ V(MathPowInteger) \
+ V(GrowArrayElements) \
+ V(InterpreterDispatch) \
+ V(InterpreterPushArgsAndCall) \
+ V(InterpreterPushArgsAndConstruct) \
+ V(InterpreterCEntry) \
+ V(ResumeGenerator)
class CallInterfaceDescriptorData {
public:
@@ -109,7 +121,7 @@ class CallInterfaceDescriptorData {
// and register side by side (eg, RegRep(r1, Representation::Tagged()).
// The same should go for the CodeStubDescriptor class.
void InitializePlatformSpecific(
- int register_parameter_count, Register* registers,
+ int register_parameter_count, const Register* registers,
PlatformInterfaceDescriptor* platform_descriptor = NULL);
bool IsInitialized() const { return register_param_count_ >= 0; }
@@ -123,8 +135,6 @@ class CallInterfaceDescriptorData {
return platform_specific_descriptor_;
}
- FunctionType* function_type() const { return function_type_; }
-
private:
int register_param_count_;
@@ -132,7 +142,7 @@ class CallInterfaceDescriptorData {
// InterfaceDescriptor, and freed on destruction. This is because static
// arrays of Registers cause creation of runtime static initializers
// which we don't want.
- base::SmartArrayPointer<Register> register_params_;
+ std::unique_ptr<Register[]> register_params_;
// Specifies types for parameters and return
FunctionType* function_type_;
@@ -169,7 +179,7 @@ class CallInterfaceDescriptor {
}
int GetStackParameterCount() const {
- return data()->function_type()->Arity() - data()->register_param_count();
+ return data()->param_count() - data()->register_param_count();
}
Register GetRegisterParameter(int index) const {
@@ -186,14 +196,12 @@ class CallInterfaceDescriptor {
return data()->platform_specific_descriptor();
}
- FunctionType* GetFunctionType() const { return data()->function_type(); }
-
static const Register ContextRegister();
const char* DebugName(Isolate* isolate) const;
static FunctionType* BuildDefaultFunctionType(Isolate* isolate,
- int paramater_count);
+ int parameter_count);
protected:
const CallInterfaceDescriptorData* data() const { return data_; }
@@ -209,6 +217,8 @@ class CallInterfaceDescriptor {
void Initialize(Isolate* isolate, CallDescriptors::Key key) {
if (!data()->IsInitialized()) {
+ // We should only initialize descriptors on the isolate's main thread.
+ DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
CallInterfaceDescriptorData* d = isolate->call_descriptor_data(key);
DCHECK(d == data()); // d should be a modifiable pointer to data().
InitializePlatformSpecific(d);
@@ -218,6 +228,12 @@ class CallInterfaceDescriptor {
}
}
+ // Initializes |data| using the platform dependent default set of registers.
+ // It is intended to be used for TurboFan stubs when particular set of
+ // registers does not matter.
+ static void DefaultInitializePlatformSpecific(
+ CallInterfaceDescriptorData* data, int register_parameter_count);
+
private:
const CallInterfaceDescriptorData* data_;
};
@@ -229,6 +245,17 @@ class CallInterfaceDescriptor {
} \
static inline CallDescriptors::Key key();
+#define DECLARE_DEFAULT_DESCRIPTOR(name, base, parameter_count) \
+ DECLARE_DESCRIPTOR_WITH_BASE(name, base) \
+ protected: \
+ void InitializePlatformSpecific(CallInterfaceDescriptorData* data) \
+ override { \
+ DefaultInitializePlatformSpecific(data, parameter_count); \
+ } \
+ name(Isolate* isolate, CallDescriptors::Key key) : base(isolate, key) {} \
+ \
+ public:
+
#define DECLARE_DESCRIPTOR(name, base) \
DECLARE_DESCRIPTOR_WITH_BASE(name, base) \
protected: \
@@ -256,55 +283,133 @@ class CallInterfaceDescriptor {
\
public:
+#define DEFINE_PARAMETERS(...) \
+ enum ParameterIndices { \
+ __VA_ARGS__, \
+ \
+ kParameterCount, \
+ kContext = kParameterCount /* implicit parameter */ \
+ };
+
class VoidDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(VoidDescriptor, CallInterfaceDescriptor)
};
+class ContextOnlyDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(ContextOnlyDescriptor, CallInterfaceDescriptor)
+};
+
+// The OnStackWith*ArgsDescriptors have a lot of boilerplate. The superclass
+// OnStackArgsDescriptorBase is not meant to be instantiated directly and has no
+// public constructors to ensure this is so.contains all the logic, and the
+//
+// Use OnStackArgsDescriptorBase::ForArgs(isolate, parameter_count) to
+// instantiate a descriptor with the number of args.
+class OnStackArgsDescriptorBase : public CallInterfaceDescriptor {
+ public:
+ static CallInterfaceDescriptor ForArgs(Isolate* isolate, int parameter_count);
+
+ protected:
+ OnStackArgsDescriptorBase(Isolate* isolate, CallDescriptors::Key key)
+ : CallInterfaceDescriptor(isolate, key) {}
+ void InitializePlatformSpecific(CallInterfaceDescriptorData* data) override;
+ FunctionType* BuildCallInterfaceDescriptorFunctionTypeWithArg(
+ Isolate* isolate, int register_parameter_count, int parameter_count);
+};
+
+class OnStackWith1ArgsDescriptor : public OnStackArgsDescriptorBase {
+ public:
+ DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(OnStackWith1ArgsDescriptor,
+ OnStackArgsDescriptorBase,
+ 1)
+};
+
+class OnStackWith2ArgsDescriptor : public OnStackArgsDescriptorBase {
+ public:
+ DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(OnStackWith2ArgsDescriptor,
+ OnStackArgsDescriptorBase,
+ 2)
+};
+
+class OnStackWith3ArgsDescriptor : public OnStackArgsDescriptorBase {
+ public:
+ DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(OnStackWith3ArgsDescriptor,
+ OnStackArgsDescriptorBase,
+ 3)
+};
+
+class OnStackWith4ArgsDescriptor : public OnStackArgsDescriptorBase {
+ public:
+ DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(OnStackWith4ArgsDescriptor,
+ OnStackArgsDescriptorBase,
+ 4)
+};
+
+class OnStackWith5ArgsDescriptor : public OnStackArgsDescriptorBase {
+ public:
+ DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(OnStackWith5ArgsDescriptor,
+ OnStackArgsDescriptorBase,
+ 5)
+};
+
+class OnStackWith6ArgsDescriptor : public OnStackArgsDescriptorBase {
+ public:
+ DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(OnStackWith6ArgsDescriptor,
+ OnStackArgsDescriptorBase,
+ 6)
+};
+
+class OnStackWith7ArgsDescriptor : public OnStackArgsDescriptorBase {
+ public:
+ DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG(OnStackWith7ArgsDescriptor,
+ OnStackArgsDescriptorBase,
+ 7)
+};
// LoadDescriptor is used by all stubs that implement Load/KeyedLoad ICs.
class LoadDescriptor : public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS(kReceiver, kName, kSlot)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(LoadDescriptor,
CallInterfaceDescriptor)
- enum ParameterIndices { kReceiverIndex, kNameIndex, kSlotIndex };
static const Register ReceiverRegister();
static const Register NameRegister();
static const Register SlotRegister();
};
+class LoadGlobalDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kSlot)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(LoadGlobalDescriptor,
+ CallInterfaceDescriptor)
+
+ static const Register SlotRegister() {
+ return LoadDescriptor::SlotRegister();
+ }
+};
class StoreDescriptor : public CallInterfaceDescriptor {
public:
- DECLARE_DESCRIPTOR(StoreDescriptor, CallInterfaceDescriptor)
+ DEFINE_PARAMETERS(kReceiver, kName, kValue, kSlot)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StoreDescriptor,
+ CallInterfaceDescriptor)
- enum ParameterIndices {
- kReceiverIndex,
- kNameIndex,
- kValueIndex,
- kParameterCount
- };
static const Register ReceiverRegister();
static const Register NameRegister();
static const Register ValueRegister();
+ static const Register SlotRegister();
};
class StoreTransitionDescriptor : public StoreDescriptor {
public:
+ DEFINE_PARAMETERS(kReceiver, kName, kValue, kMap)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StoreTransitionDescriptor,
StoreDescriptor)
- // Extends StoreDescriptor with Map parameter.
- enum ParameterIndices {
- kReceiverIndex,
- kNameIndex,
- kValueIndex,
- kMapIndex,
- kParameterCount
- };
-
static const Register MapRegister();
};
@@ -314,18 +419,19 @@ class VectorStoreTransitionDescriptor : public StoreDescriptor {
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(VectorStoreTransitionDescriptor,
StoreDescriptor)
+ // TODO(ishell): use DEFINE_PARAMETERS macro here
// Extends StoreDescriptor with Map parameter.
enum ParameterIndices {
- kReceiverIndex = 0,
- kNameIndex = 1,
- kValueIndex = 2,
+ kReceiver = 0,
+ kName = 1,
+ kValue = 2,
- kMapIndex = 3,
+ kMap = 3,
- kSlotIndex = 4, // not present on ia32.
- kVirtualSlotVectorIndex = 4,
+ kSlot = 4, // not present on ia32.
+ kVirtualSlotVector = 4,
- kVectorIndex = 5
+ kVector = 5
};
static const Register MapRegister();
@@ -333,70 +439,48 @@ class VectorStoreTransitionDescriptor : public StoreDescriptor {
static const Register VectorRegister();
};
-
-class InstanceOfDescriptor final : public CallInterfaceDescriptor {
- public:
- DECLARE_DESCRIPTOR(InstanceOfDescriptor, CallInterfaceDescriptor)
-
- enum ParameterIndices { kLeftIndex, kRightIndex, kParameterCount };
- static const Register LeftRegister();
- static const Register RightRegister();
-};
-
-
-class VectorStoreICTrampolineDescriptor : public StoreDescriptor {
+class StoreWithVectorDescriptor : public StoreDescriptor {
public:
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
- VectorStoreICTrampolineDescriptor, StoreDescriptor)
-
- enum ParameterIndices { kReceiverIndex, kNameIndex, kValueIndex, kSlotIndex };
-
- static const Register SlotRegister();
-};
-
-
-class VectorStoreICDescriptor : public VectorStoreICTrampolineDescriptor {
- public:
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
- VectorStoreICDescriptor, VectorStoreICTrampolineDescriptor)
-
- enum ParameterIndices {
- kReceiverIndex,
- kNameIndex,
- kValueIndex,
- kSlotIndex,
- kVectorIndex
- };
+ DEFINE_PARAMETERS(kReceiver, kName, kValue, kSlot, kVector)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StoreWithVectorDescriptor,
+ StoreDescriptor)
static const Register VectorRegister();
};
-
class LoadWithVectorDescriptor : public LoadDescriptor {
public:
+ DEFINE_PARAMETERS(kReceiver, kName, kSlot, kVector)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(LoadWithVectorDescriptor,
LoadDescriptor)
- enum ParameterIndices {
- kReceiverIndex,
- kNameIndex,
- kSlotIndex,
- kVectorIndex
- };
-
static const Register VectorRegister();
};
+class LoadGlobalWithVectorDescriptor : public LoadGlobalDescriptor {
+ public:
+ DEFINE_PARAMETERS(kSlot, kVector)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(LoadGlobalWithVectorDescriptor,
+ LoadGlobalDescriptor)
+
+ static const Register VectorRegister() {
+ return LoadWithVectorDescriptor::VectorRegister();
+ }
+};
class FastNewClosureDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(FastNewClosureDescriptor, CallInterfaceDescriptor)
};
-
-class FastNewContextDescriptor : public CallInterfaceDescriptor {
+class FastNewFunctionContextDescriptor : public CallInterfaceDescriptor {
public:
- DECLARE_DESCRIPTOR(FastNewContextDescriptor, CallInterfaceDescriptor)
+ DEFINE_PARAMETERS(kFunction, kSlots)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(FastNewFunctionContextDescriptor,
+ CallInterfaceDescriptor)
+
+ static const Register FunctionRegister();
+ static const Register SlotsRegister();
};
class FastNewObjectDescriptor : public CallInterfaceDescriptor {
@@ -423,22 +507,43 @@ class FastNewStrictArgumentsDescriptor : public CallInterfaceDescriptor {
class TypeConversionDescriptor final : public CallInterfaceDescriptor {
public:
- enum ParameterIndices { kArgumentIndex };
-
+ DEFINE_PARAMETERS(kArgument)
DECLARE_DESCRIPTOR(TypeConversionDescriptor, CallInterfaceDescriptor)
static const Register ArgumentRegister();
};
+class HasPropertyDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kKey, kObject)
+ DECLARE_DEFAULT_DESCRIPTOR(HasPropertyDescriptor, CallInterfaceDescriptor,
+ kParameterCount)
+};
+
+class ForInFilterDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kKey, kObject)
+ DECLARE_DEFAULT_DESCRIPTOR(ForInFilterDescriptor, CallInterfaceDescriptor,
+ kParameterCount)
+};
+
+class GetPropertyDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kObject, kKey)
+ DECLARE_DEFAULT_DESCRIPTOR(GetPropertyDescriptor, CallInterfaceDescriptor,
+ kParameterCount)
+};
class TypeofDescriptor : public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS(kObject)
DECLARE_DESCRIPTOR(TypeofDescriptor, CallInterfaceDescriptor)
};
class FastCloneRegExpDescriptor : public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS(kClosure, kLiteralIndex, kPattern, kFlags)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(FastCloneRegExpDescriptor,
CallInterfaceDescriptor)
};
@@ -446,6 +551,7 @@ class FastCloneRegExpDescriptor : public CallInterfaceDescriptor {
class FastCloneShallowArrayDescriptor : public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS(kClosure, kLiteralIndex, kConstantElements)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(FastCloneShallowArrayDescriptor,
CallInterfaceDescriptor)
};
@@ -459,6 +565,7 @@ class FastCloneShallowObjectDescriptor : public CallInterfaceDescriptor {
class CreateAllocationSiteDescriptor : public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS(kVector, kSlot)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(CreateAllocationSiteDescriptor,
CallInterfaceDescriptor)
};
@@ -466,13 +573,7 @@ class CreateAllocationSiteDescriptor : public CallInterfaceDescriptor {
class CreateWeakCellDescriptor : public CallInterfaceDescriptor {
public:
- enum ParameterIndices {
- kVectorIndex,
- kSlotIndex,
- kValueIndex,
- kParameterCount
- };
-
+ DEFINE_PARAMETERS(kVector, kSlot, kValue)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(CreateWeakCellDescriptor,
CallInterfaceDescriptor)
};
@@ -480,6 +581,7 @@ class CreateWeakCellDescriptor : public CallInterfaceDescriptor {
class CallTrampolineDescriptor : public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS(kFunction, kActualArgumentsCount)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(CallTrampolineDescriptor,
CallInterfaceDescriptor)
};
@@ -487,6 +589,8 @@ class CallTrampolineDescriptor : public CallInterfaceDescriptor {
class ConstructStubDescriptor : public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS(kFunction, kNewTarget, kActualArgumentsCount,
+ kAllocationSite)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ConstructStubDescriptor,
CallInterfaceDescriptor)
};
@@ -494,6 +598,7 @@ class ConstructStubDescriptor : public CallInterfaceDescriptor {
class ConstructTrampolineDescriptor : public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS(kFunction, kNewTarget, kActualArgumentsCount)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ConstructTrampolineDescriptor,
CallInterfaceDescriptor)
};
@@ -507,6 +612,7 @@ class CallFunctionDescriptor : public CallInterfaceDescriptor {
class CallFunctionWithFeedbackDescriptor : public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS(kFunction, kSlot)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
CallFunctionWithFeedbackDescriptor, CallInterfaceDescriptor)
};
@@ -515,6 +621,7 @@ class CallFunctionWithFeedbackDescriptor : public CallInterfaceDescriptor {
class CallFunctionWithFeedbackAndVectorDescriptor
: public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS(kFunction, kSlot, kVector)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
CallFunctionWithFeedbackAndVectorDescriptor, CallInterfaceDescriptor)
};
@@ -528,21 +635,14 @@ class CallConstructDescriptor : public CallInterfaceDescriptor {
class RegExpConstructResultDescriptor : public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS(kLength, kIndex, kInput)
DECLARE_DESCRIPTOR(RegExpConstructResultDescriptor, CallInterfaceDescriptor)
};
-class LoadGlobalViaContextDescriptor : public CallInterfaceDescriptor {
- public:
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(LoadGlobalViaContextDescriptor,
- CallInterfaceDescriptor)
-
- static const Register SlotRegister();
-};
-
-
class StoreGlobalViaContextDescriptor : public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS(kSlot, kValue)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StoreGlobalViaContextDescriptor,
CallInterfaceDescriptor)
@@ -550,9 +650,16 @@ class StoreGlobalViaContextDescriptor : public CallInterfaceDescriptor {
static const Register ValueRegister();
};
+class CopyFastSmiOrObjectElementsDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kObject)
+ DECLARE_DEFAULT_DESCRIPTOR(CopyFastSmiOrObjectElementsDescriptor,
+ CallInterfaceDescriptor, kParameterCount)
+};
class TransitionElementsKindDescriptor : public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS(kObject, kMap)
DECLARE_DESCRIPTOR(TransitionElementsKindDescriptor, CallInterfaceDescriptor)
};
@@ -570,96 +677,93 @@ class AllocateHeapNumberDescriptor : public CallInterfaceDescriptor {
SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC
-class AllocateMutableHeapNumberDescriptor : public CallInterfaceDescriptor {
- public:
- DECLARE_DESCRIPTOR(AllocateMutableHeapNumberDescriptor,
- CallInterfaceDescriptor)
-};
-
-
-class AllocateInNewSpaceDescriptor : public CallInterfaceDescriptor {
- public:
- DECLARE_DESCRIPTOR(AllocateInNewSpaceDescriptor, CallInterfaceDescriptor)
-};
-
-
-class ArrayConstructorConstantArgCountDescriptor
- : public CallInterfaceDescriptor {
- public:
- DECLARE_DESCRIPTOR(ArrayConstructorConstantArgCountDescriptor,
- CallInterfaceDescriptor)
-};
-
-
-class ArrayConstructorDescriptor : public CallInterfaceDescriptor {
+class ArrayNoArgumentConstructorDescriptor : public CallInterfaceDescriptor {
public:
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ArrayConstructorDescriptor,
- CallInterfaceDescriptor)
+ DEFINE_PARAMETERS(kFunction, kAllocationSite, kActualArgumentsCount,
+ kFunctionParameter)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
+ ArrayNoArgumentConstructorDescriptor, CallInterfaceDescriptor)
};
-
-class InternalArrayConstructorConstantArgCountDescriptor
+class ArraySingleArgumentConstructorDescriptor
: public CallInterfaceDescriptor {
public:
- DECLARE_DESCRIPTOR(InternalArrayConstructorConstantArgCountDescriptor,
- CallInterfaceDescriptor)
+ DEFINE_PARAMETERS(kFunction, kAllocationSite, kActualArgumentsCount,
+ kFunctionParameter, kArraySizeSmiParameter)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
+ ArraySingleArgumentConstructorDescriptor, CallInterfaceDescriptor)
};
-
-class InternalArrayConstructorDescriptor : public CallInterfaceDescriptor {
+class ArrayNArgumentsConstructorDescriptor : public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS(kFunction, kAllocationSite, kActualArgumentsCount)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
- InternalArrayConstructorDescriptor, CallInterfaceDescriptor)
+ ArrayNArgumentsConstructorDescriptor, CallInterfaceDescriptor)
};
class CompareDescriptor : public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS(kLeft, kRight)
DECLARE_DESCRIPTOR(CompareDescriptor, CallInterfaceDescriptor)
};
class BinaryOpDescriptor : public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS(kLeft, kRight)
DECLARE_DESCRIPTOR(BinaryOpDescriptor, CallInterfaceDescriptor)
};
class BinaryOpWithAllocationSiteDescriptor : public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS(kAllocationSite, kLeft, kRight)
DECLARE_DESCRIPTOR(BinaryOpWithAllocationSiteDescriptor,
CallInterfaceDescriptor)
};
+class BinaryOpWithVectorDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kLeft, kRight, kSlot, kVector)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(BinaryOpWithVectorDescriptor,
+ CallInterfaceDescriptor)
+};
+
+class CountOpDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(CountOpDescriptor, CallInterfaceDescriptor)
+};
class StringAddDescriptor : public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS(kLeft, kRight)
DECLARE_DESCRIPTOR(StringAddDescriptor, CallInterfaceDescriptor)
};
class StringCompareDescriptor : public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS(kLeft, kRight)
DECLARE_DESCRIPTOR(StringCompareDescriptor, CallInterfaceDescriptor)
- enum ParameterIndices { kLeftIndex, kRightIndex, kParameterCount };
static const Register LeftRegister();
static const Register RightRegister();
};
-
+// TODO(ishell): not used, remove.
class KeyedDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(KeyedDescriptor, CallInterfaceDescriptor)
};
-
+// TODO(ishell): not used, remove
class NamedDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(NamedDescriptor, CallInterfaceDescriptor)
};
-
+// TODO(ishell): not used, remove.
class CallHandlerDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(CallHandlerDescriptor, CallInterfaceDescriptor)
@@ -668,6 +772,8 @@ class CallHandlerDescriptor : public CallInterfaceDescriptor {
class ArgumentAdaptorDescriptor : public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS(kFunction, kNewTarget, kActualArgumentsCount,
+ kExpectedArgumentsCount)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ArgumentAdaptorDescriptor,
CallInterfaceDescriptor)
};
@@ -685,6 +791,7 @@ class ArgumentAdaptorDescriptor : public CallInterfaceDescriptor {
//
class ApiCallbackDescriptorBase : public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS(kFunction, kCallData, kHolder, kApiFunctionAddress)
static CallInterfaceDescriptor ForArgs(Isolate* isolate, int argc);
protected:
@@ -746,60 +853,53 @@ class ApiCallbackWith7ArgsDescriptor : public ApiCallbackDescriptorBase {
class ApiGetterDescriptor : public CallInterfaceDescriptor {
public:
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ApiGetterDescriptor,
- CallInterfaceDescriptor)
+ DEFINE_PARAMETERS(kReceiver, kHolder, kCallback)
+ DECLARE_DESCRIPTOR(ApiGetterDescriptor, CallInterfaceDescriptor)
- static const Register function_address();
+ static const Register ReceiverRegister();
+ static const Register HolderRegister();
+ static const Register CallbackRegister();
};
-
class MathPowTaggedDescriptor : public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS(kExponent)
DECLARE_DESCRIPTOR(MathPowTaggedDescriptor, CallInterfaceDescriptor)
static const Register exponent();
};
-
class MathPowIntegerDescriptor : public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS(kExponent)
DECLARE_DESCRIPTOR(MathPowIntegerDescriptor, CallInterfaceDescriptor)
static const Register exponent();
};
-
-class ContextOnlyDescriptor : public CallInterfaceDescriptor {
- public:
- DECLARE_DESCRIPTOR(ContextOnlyDescriptor, CallInterfaceDescriptor)
-};
-
-class FastArrayPushDescriptor : public CallInterfaceDescriptor {
+class VarArgFunctionDescriptor : public CallInterfaceDescriptor {
public:
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(FastArrayPushDescriptor,
+ DEFINE_PARAMETERS(kActualArgumentsCount)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(VarArgFunctionDescriptor,
CallInterfaceDescriptor)
};
+// TODO(turbofan): We should probably rename this to GrowFastElementsDescriptor.
class GrowArrayElementsDescriptor : public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS(kObject, kKey)
DECLARE_DESCRIPTOR(GrowArrayElementsDescriptor, CallInterfaceDescriptor)
- enum RegisterInfo { kObjectIndex, kKeyIndex };
static const Register ObjectRegister();
static const Register KeyRegister();
};
class InterpreterDispatchDescriptor : public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS(kAccumulator, kBytecodeOffset, kBytecodeArray,
+ kDispatchTable)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(InterpreterDispatchDescriptor,
CallInterfaceDescriptor)
-
- static const int kAccumulatorParameter = 0;
- static const int kRegisterFileParameter = 1;
- static const int kBytecodeOffsetParameter = 2;
- static const int kBytecodeArrayParameter = 3;
- static const int kDispatchTableParameter = 4;
- static const int kContextParameter = 5;
};
class InterpreterPushArgsAndCallDescriptor : public CallInterfaceDescriptor {
@@ -822,10 +922,16 @@ class InterpreterCEntryDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(InterpreterCEntryDescriptor, CallInterfaceDescriptor)
};
+class ResumeGeneratorDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(ResumeGeneratorDescriptor, CallInterfaceDescriptor)
+};
+
#undef DECLARE_DESCRIPTOR_WITH_BASE
#undef DECLARE_DESCRIPTOR
#undef DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE
#undef DECLARE_DESCRIPTOR_WITH_BASE_AND_FUNCTION_TYPE_ARG
+#undef DEFINE_PARAMETERS
// We define the association between CallDescriptors::Key and the specialized
// descriptor here to reduce boilerplate and mistakes.
diff --git a/deps/v8/src/interpreter/OWNERS b/deps/v8/src/interpreter/OWNERS
index 5ad730c8a4..d12fcf90d9 100644
--- a/deps/v8/src/interpreter/OWNERS
+++ b/deps/v8/src/interpreter/OWNERS
@@ -2,5 +2,6 @@ set noparent
bmeurer@chromium.org
mstarzinger@chromium.org
+mythria@chromium.org
oth@chromium.org
rmcilroy@chromium.org
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index 109b01eab3..9bef5a5a4c 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -3,276 +3,169 @@
// found in the LICENSE file.
#include "src/interpreter/bytecode-array-builder.h"
+
#include "src/compiler.h"
+#include "src/globals.h"
+#include "src/interpreter/bytecode-array-writer.h"
+#include "src/interpreter/bytecode-dead-code-optimizer.h"
+#include "src/interpreter/bytecode-label.h"
+#include "src/interpreter/bytecode-peephole-optimizer.h"
+#include "src/interpreter/bytecode-register-optimizer.h"
#include "src/interpreter/interpreter-intrinsics.h"
namespace v8 {
namespace internal {
namespace interpreter {
-class BytecodeArrayBuilder::PreviousBytecodeHelper BASE_EMBEDDED {
- public:
- explicit PreviousBytecodeHelper(const BytecodeArrayBuilder& array_builder)
- : array_builder_(array_builder),
- previous_bytecode_start_(array_builder_.last_bytecode_start_) {
- // This helper is expected to be instantiated only when the last bytecode is
- // in the same basic block.
- DCHECK(array_builder_.LastBytecodeInSameBlock());
- bytecode_ = Bytecodes::FromByte(
- array_builder_.bytecodes()->at(previous_bytecode_start_));
- operand_scale_ = OperandScale::kSingle;
- if (Bytecodes::IsPrefixScalingBytecode(bytecode_)) {
- operand_scale_ = Bytecodes::PrefixBytecodeToOperandScale(bytecode_);
- bytecode_ = Bytecodes::FromByte(
- array_builder_.bytecodes()->at(previous_bytecode_start_ + 1));
- }
- }
-
- // Returns the previous bytecode in the same basic block.
- MUST_USE_RESULT Bytecode GetBytecode() const {
- DCHECK_EQ(array_builder_.last_bytecode_start_, previous_bytecode_start_);
- return bytecode_;
- }
-
- MUST_USE_RESULT Register GetRegisterOperand(int operand_index) const {
- return Register::FromOperand(GetSignedOperand(operand_index));
- }
-
- MUST_USE_RESULT uint32_t GetIndexOperand(int operand_index) const {
- return GetUnsignedOperand(operand_index);
- }
-
- Handle<Object> GetConstantForIndexOperand(int operand_index) const {
- return array_builder_.constant_array_builder()->At(
- GetIndexOperand(operand_index));
- }
-
- private:
- // Returns the signed operand at operand_index for the previous
- // bytecode in the same basic block.
- MUST_USE_RESULT int32_t GetSignedOperand(int operand_index) const {
- DCHECK_EQ(array_builder_.last_bytecode_start_, previous_bytecode_start_);
- OperandType operand_type =
- Bytecodes::GetOperandType(bytecode_, operand_index);
- DCHECK(!Bytecodes::IsUnsignedOperandType(operand_type));
- const uint8_t* operand_start = GetOperandStart(operand_index);
- return Bytecodes::DecodeSignedOperand(operand_start, operand_type,
- operand_scale_);
- }
-
- // Returns the unsigned operand at operand_index for the previous
- // bytecode in the same basic block.
- MUST_USE_RESULT uint32_t GetUnsignedOperand(int operand_index) const {
- DCHECK_EQ(array_builder_.last_bytecode_start_, previous_bytecode_start_);
- OperandType operand_type =
- Bytecodes::GetOperandType(bytecode_, operand_index);
- DCHECK(Bytecodes::IsUnsignedOperandType(operand_type));
- const uint8_t* operand_start = GetOperandStart(operand_index);
- return Bytecodes::DecodeUnsignedOperand(operand_start, operand_type,
- operand_scale_);
- }
-
- const uint8_t* GetOperandStart(int operand_index) const {
- size_t operand_offset =
- previous_bytecode_start_ + prefix_offset() +
- Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale_);
- return &(*array_builder_.bytecodes())[0] + operand_offset;
- }
-
- int prefix_offset() const {
- return Bytecodes::OperandScaleRequiresPrefixBytecode(operand_scale_) ? 1
- : 0;
- }
-
- const BytecodeArrayBuilder& array_builder_;
- OperandScale operand_scale_;
- Bytecode bytecode_;
- size_t previous_bytecode_start_;
-
- DISALLOW_COPY_AND_ASSIGN(PreviousBytecodeHelper);
-};
-
-BytecodeArrayBuilder::BytecodeArrayBuilder(Isolate* isolate, Zone* zone,
- int parameter_count,
- int context_count, int locals_count,
- FunctionLiteral* literal)
- : isolate_(isolate),
- zone_(zone),
- bytecodes_(zone),
+BytecodeArrayBuilder::BytecodeArrayBuilder(
+ Isolate* isolate, Zone* zone, int parameter_count, int context_count,
+ int locals_count, FunctionLiteral* literal,
+ SourcePositionTableBuilder::RecordingMode source_position_mode)
+ : zone_(zone),
bytecode_generated_(false),
- constant_array_builder_(isolate, zone),
- handler_table_builder_(isolate, zone),
- source_position_table_builder_(isolate, zone),
- last_block_end_(0),
- last_bytecode_start_(~0),
- exit_seen_in_block_(false),
- unbound_jumps_(0),
+ constant_array_builder_(zone, isolate->factory()->the_hole_value()),
+ handler_table_builder_(zone),
+ return_seen_in_block_(false),
parameter_count_(parameter_count),
local_register_count_(locals_count),
context_register_count_(context_count),
- temporary_allocator_(zone, fixed_register_count()) {
+ temporary_allocator_(zone, fixed_register_count()),
+ bytecode_array_writer_(zone, &constant_array_builder_,
+ source_position_mode),
+ pipeline_(&bytecode_array_writer_) {
DCHECK_GE(parameter_count_, 0);
DCHECK_GE(context_register_count_, 0);
DCHECK_GE(local_register_count_, 0);
+
+ if (FLAG_ignition_deadcode) {
+ pipeline_ = new (zone) BytecodeDeadCodeOptimizer(pipeline_);
+ }
+
+ if (FLAG_ignition_peephole) {
+ pipeline_ = new (zone) BytecodePeepholeOptimizer(pipeline_);
+ }
+
+ if (FLAG_ignition_reo) {
+ pipeline_ = new (zone) BytecodeRegisterOptimizer(
+ zone, &temporary_allocator_, parameter_count, pipeline_);
+ }
+
return_position_ =
literal ? std::max(literal->start_position(), literal->end_position() - 1)
- : RelocInfo::kNoPosition;
- LOG_CODE_EVENT(isolate_, CodeStartLinePosInfoRecordEvent(
- source_position_table_builder()));
+ : kNoSourcePosition;
}
-BytecodeArrayBuilder::~BytecodeArrayBuilder() { DCHECK_EQ(0, unbound_jumps_); }
-
Register BytecodeArrayBuilder::first_context_register() const {
DCHECK_GT(context_register_count_, 0);
return Register(local_register_count_);
}
-
Register BytecodeArrayBuilder::last_context_register() const {
DCHECK_GT(context_register_count_, 0);
return Register(local_register_count_ + context_register_count_ - 1);
}
-
Register BytecodeArrayBuilder::Parameter(int parameter_index) const {
DCHECK_GE(parameter_index, 0);
return Register::FromParameterIndex(parameter_index, parameter_count());
}
-
bool BytecodeArrayBuilder::RegisterIsParameterOrLocal(Register reg) const {
return reg.is_parameter() || reg.index() < locals_count();
}
-
-Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray() {
- DCHECK_EQ(bytecode_generated_, false);
- DCHECK(exit_seen_in_block_);
-
- int bytecode_size = static_cast<int>(bytecodes_.size());
- int register_count = fixed_and_temporary_register_count();
- int frame_size = register_count * kPointerSize;
- Handle<FixedArray> constant_pool = constant_array_builder()->ToFixedArray();
- Handle<FixedArray> handler_table = handler_table_builder()->ToHandlerTable();
- Handle<ByteArray> source_position_table =
- source_position_table_builder()->ToSourcePositionTable();
- Handle<BytecodeArray> bytecode_array = isolate_->factory()->NewBytecodeArray(
- bytecode_size, &bytecodes_.front(), frame_size, parameter_count(),
- constant_pool);
- bytecode_array->set_handler_table(*handler_table);
- bytecode_array->set_source_position_table(*source_position_table);
-
- void* line_info = source_position_table_builder()->DetachJITHandlerData();
- LOG_CODE_EVENT(isolate_, CodeEndLinePosInfoRecordEvent(
- AbstractCode::cast(*bytecode_array), line_info));
-
+Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(Isolate* isolate) {
+ DCHECK(return_seen_in_block_);
+ DCHECK(!bytecode_generated_);
bytecode_generated_ = true;
- return bytecode_array;
-}
-template <size_t N>
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t (&operands)[N],
- OperandScale operand_scale) {
- // Don't output dead code.
- if (exit_seen_in_block_) return;
+ Handle<FixedArray> handler_table =
+ handler_table_builder()->ToHandlerTable(isolate);
+ return pipeline_->ToBytecodeArray(isolate, fixed_register_count(),
+ parameter_count(), handler_table);
+}
- int operand_count = static_cast<int>(N);
- DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count);
+namespace {
- last_bytecode_start_ = bytecodes()->size();
- // Emit prefix bytecode for scale if required.
- if (Bytecodes::OperandScaleRequiresPrefixBytecode(operand_scale)) {
- bytecodes()->push_back(Bytecodes::ToByte(
- Bytecodes::OperandScaleToPrefixBytecode(operand_scale)));
- }
+static bool ExpressionPositionIsNeeded(Bytecode bytecode) {
+ // An expression position is always needed if filtering is turned
+ // off. Otherwise an expression is only needed if the bytecode has
+ // external side effects.
+ return !FLAG_ignition_filter_expression_positions ||
+ !Bytecodes::IsWithoutExternalSideEffects(bytecode);
+}
- // Emit bytecode.
- bytecodes()->push_back(Bytecodes::ToByte(bytecode));
+} // namespace
- // Emit operands.
- for (int i = 0; i < operand_count; i++) {
- DCHECK(OperandIsValid(bytecode, operand_scale, i, operands[i]));
- switch (Bytecodes::GetOperandSize(bytecode, i, operand_scale)) {
- case OperandSize::kNone:
- UNREACHABLE();
- break;
- case OperandSize::kByte:
- bytecodes()->push_back(static_cast<uint8_t>(operands[i]));
- break;
- case OperandSize::kShort: {
- uint8_t operand_bytes[2];
- WriteUnalignedUInt16(operand_bytes, operands[i]);
- bytecodes()->insert(bytecodes()->end(), operand_bytes,
- operand_bytes + 2);
- break;
- }
- case OperandSize::kQuad: {
- uint8_t operand_bytes[4];
- WriteUnalignedUInt32(operand_bytes, operands[i]);
- bytecodes()->insert(bytecodes()->end(), operand_bytes,
- operand_bytes + 4);
- break;
- }
+void BytecodeArrayBuilder::AttachSourceInfo(BytecodeNode* node) {
+ if (latest_source_info_.is_valid()) {
+ // Statement positions need to be emitted immediately. Expression
+ // positions can be pushed back until a bytecode is found that can
+ // throw. Hence we only invalidate the existing source position
+ // information if it is used.
+ if (latest_source_info_.is_statement() ||
+ ExpressionPositionIsNeeded(node->bytecode())) {
+ node->source_info().Clone(latest_source_info_);
+ latest_source_info_.set_invalid();
}
}
}
-void BytecodeArrayBuilder::Output(Bytecode bytecode) {
- // Don't output dead code.
- if (exit_seen_in_block_) return;
-
- DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
- last_bytecode_start_ = bytecodes()->size();
- bytecodes()->push_back(Bytecodes::ToByte(bytecode));
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
+ uint32_t operand1, uint32_t operand2,
+ uint32_t operand3) {
+ DCHECK(OperandsAreValid(bytecode, 4, operand0, operand1, operand2, operand3));
+ BytecodeNode node(bytecode, operand0, operand1, operand2, operand3);
+ AttachSourceInfo(&node);
+ pipeline()->Write(&node);
}
-void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
- OperandScale operand_scale,
- uint32_t operand0, uint32_t operand1,
- uint32_t operand2, uint32_t operand3) {
- uint32_t operands[] = {operand0, operand1, operand2, operand3};
- Output(bytecode, operands, operand_scale);
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
+ uint32_t operand1, uint32_t operand2) {
+ DCHECK(OperandsAreValid(bytecode, 3, operand0, operand1, operand2));
+ BytecodeNode node(bytecode, operand0, operand1, operand2);
+ AttachSourceInfo(&node);
+ pipeline()->Write(&node);
}
-void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
- OperandScale operand_scale,
- uint32_t operand0, uint32_t operand1,
- uint32_t operand2) {
- uint32_t operands[] = {operand0, operand1, operand2};
- Output(bytecode, operands, operand_scale);
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
+ uint32_t operand1) {
+ DCHECK(OperandsAreValid(bytecode, 2, operand0, operand1));
+ BytecodeNode node(bytecode, operand0, operand1);
+ AttachSourceInfo(&node);
+ pipeline()->Write(&node);
}
-void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
- OperandScale operand_scale,
- uint32_t operand0, uint32_t operand1) {
- uint32_t operands[] = {operand0, operand1};
- Output(bytecode, operands, operand_scale);
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0) {
+ DCHECK(OperandsAreValid(bytecode, 1, operand0));
+ BytecodeNode node(bytecode, operand0);
+ AttachSourceInfo(&node);
+ pipeline()->Write(&node);
}
-void BytecodeArrayBuilder::OutputScaled(Bytecode bytecode,
- OperandScale operand_scale,
- uint32_t operand0) {
- uint32_t operands[] = {operand0};
- Output(bytecode, operands, operand_scale);
+void BytecodeArrayBuilder::Output(Bytecode bytecode) {
+ DCHECK(OperandsAreValid(bytecode, 0));
+ BytecodeNode node(bytecode);
+ AttachSourceInfo(&node);
+ pipeline()->Write(&node);
}
BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op,
- Register reg) {
- OperandScale operand_scale = OperandSizesToScale(SizeForRegisterOperand(reg));
- OutputScaled(BytecodeForBinaryOperation(op), operand_scale,
- RegisterOperand(reg));
+ Register reg,
+ int feedback_slot) {
+ Output(BytecodeForBinaryOperation(op), RegisterOperand(reg),
+ UnsignedOperand(feedback_slot));
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::CountOperation(Token::Value op) {
- Output(BytecodeForCountOperation(op));
+BytecodeArrayBuilder& BytecodeArrayBuilder::CountOperation(Token::Value op,
+ int feedback_slot) {
+ Output(BytecodeForCountOperation(op), UnsignedOperand(feedback_slot));
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::LogicalNot() {
- Output(Bytecode::kLogicalNot);
+ Output(Bytecode::kToBooleanLogicalNot);
return *this;
}
@@ -284,12 +177,15 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::TypeOf() {
BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(Token::Value op,
Register reg) {
- OperandScale operand_scale = OperandSizesToScale(SizeForRegisterOperand(reg));
- OutputScaled(BytecodeForCompareOperation(op), operand_scale,
- RegisterOperand(reg));
+ Output(BytecodeForCompareOperation(op), RegisterOperand(reg));
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadConstantPoolEntry(
+ size_t entry) {
+ Output(Bytecode::kLdaConstant, UnsignedOperand(entry));
+ return *this;
+}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(
v8::internal::Smi* smi) {
@@ -297,48 +193,37 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(
if (raw_smi == 0) {
Output(Bytecode::kLdaZero);
} else {
- OperandSize operand_size = SizeForSignedOperand(raw_smi);
- OperandScale operand_scale = OperandSizesToScale(operand_size);
- OutputScaled(Bytecode::kLdaSmi, operand_scale,
- SignedOperand(raw_smi, operand_size));
+ Output(Bytecode::kLdaSmi, SignedOperand(raw_smi));
}
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(Handle<Object> object) {
size_t entry = GetConstantPoolEntry(object);
- OperandScale operand_scale =
- OperandSizesToScale(SizeForUnsignedOperand(entry));
- OutputScaled(Bytecode::kLdaConstant, operand_scale, UnsignedOperand(entry));
+ Output(Bytecode::kLdaConstant, UnsignedOperand(entry));
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadUndefined() {
Output(Bytecode::kLdaUndefined);
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNull() {
Output(Bytecode::kLdaNull);
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadTheHole() {
Output(Bytecode::kLdaTheHole);
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadTrue() {
Output(Bytecode::kLdaTrue);
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadFalse() {
Output(Bytecode::kLdaFalse);
return *this;
@@ -346,47 +231,29 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadFalse() {
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadAccumulatorWithRegister(
Register reg) {
- if (!IsRegisterInAccumulator(reg)) {
- OperandScale operand_scale =
- OperandSizesToScale(SizeForRegisterOperand(reg));
- OutputScaled(Bytecode::kLdar, operand_scale, RegisterOperand(reg));
- }
+ Output(Bytecode::kLdar, RegisterOperand(reg));
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreAccumulatorInRegister(
Register reg) {
- if (!IsRegisterInAccumulator(reg)) {
- OperandScale operand_scale =
- OperandSizesToScale(SizeForRegisterOperand(reg));
- OutputScaled(Bytecode::kStar, operand_scale, RegisterOperand(reg));
- }
+ Output(Bytecode::kStar, RegisterOperand(reg));
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::MoveRegister(Register from,
Register to) {
DCHECK(from != to);
- OperandScale operand_scale = OperandSizesToScale(SizeForRegisterOperand(from),
- SizeForRegisterOperand(to));
- OutputScaled(Bytecode::kMov, operand_scale, RegisterOperand(from),
- RegisterOperand(to));
+ Output(Bytecode::kMov, RegisterOperand(from), RegisterOperand(to));
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(
- const Handle<String> name, int feedback_slot, TypeofMode typeof_mode) {
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(int feedback_slot,
+ TypeofMode typeof_mode) {
// TODO(rmcilroy): Potentially store typeof information in an
// operand rather than having extra bytecodes.
Bytecode bytecode = BytecodeForLoadGlobal(typeof_mode);
- size_t name_index = GetConstantPoolEntry(name);
- OperandScale operand_scale =
- OperandSizesToScale(SizeForUnsignedOperand(name_index),
- SizeForUnsignedOperand(feedback_slot));
- OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index),
- UnsignedOperand(feedback_slot));
+ Output(bytecode, UnsignedOperand(feedback_slot));
return *this;
}
@@ -394,31 +261,21 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreGlobal(
const Handle<String> name, int feedback_slot, LanguageMode language_mode) {
Bytecode bytecode = BytecodeForStoreGlobal(language_mode);
size_t name_index = GetConstantPoolEntry(name);
- OperandScale operand_scale =
- OperandSizesToScale(SizeForUnsignedOperand(name_index),
- SizeForUnsignedOperand(feedback_slot));
- OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index),
- UnsignedOperand(feedback_slot));
+ Output(bytecode, UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadContextSlot(Register context,
int slot_index) {
- OperandScale operand_scale = OperandSizesToScale(
- SizeForRegisterOperand(context), SizeForUnsignedOperand(slot_index));
- OutputScaled(Bytecode::kLdaContextSlot, operand_scale,
- RegisterOperand(context), UnsignedOperand(slot_index));
+ Output(Bytecode::kLdaContextSlot, RegisterOperand(context),
+ UnsignedOperand(slot_index));
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreContextSlot(Register context,
int slot_index) {
- OperandScale operand_scale = OperandSizesToScale(
- SizeForRegisterOperand(context), SizeForUnsignedOperand(slot_index));
- OutputScaled(Bytecode::kStaContextSlot, operand_scale,
- RegisterOperand(context), UnsignedOperand(slot_index));
+ Output(Bytecode::kStaContextSlot, RegisterOperand(context),
+ UnsignedOperand(slot_index));
return *this;
}
@@ -428,9 +285,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupSlot(
? Bytecode::kLdaLookupSlotInsideTypeof
: Bytecode::kLdaLookupSlot;
size_t name_index = GetConstantPoolEntry(name);
- OperandScale operand_scale =
- OperandSizesToScale(SizeForUnsignedOperand(name_index));
- OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index));
+ Output(bytecode, UnsignedOperand(name_index));
return *this;
}
@@ -438,69 +293,75 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreLookupSlot(
const Handle<String> name, LanguageMode language_mode) {
Bytecode bytecode = BytecodeForStoreLookupSlot(language_mode);
size_t name_index = GetConstantPoolEntry(name);
- OperandScale operand_scale =
- OperandSizesToScale(SizeForUnsignedOperand(name_index));
- OutputScaled(bytecode, operand_scale, UnsignedOperand(name_index));
+ Output(bytecode, UnsignedOperand(name_index));
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedProperty(
Register object, const Handle<Name> name, int feedback_slot) {
size_t name_index = GetConstantPoolEntry(name);
- OperandScale operand_scale = OperandSizesToScale(
- SizeForRegisterOperand(object), SizeForUnsignedOperand(name_index),
- SizeForUnsignedOperand(feedback_slot));
- OutputScaled(Bytecode::kLoadIC, operand_scale, RegisterOperand(object),
- UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
+ Output(Bytecode::kLdaNamedProperty, RegisterOperand(object),
+ UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
Register object, int feedback_slot) {
- OperandScale operand_scale = OperandSizesToScale(
- SizeForRegisterOperand(object), SizeForUnsignedOperand(feedback_slot));
- OutputScaled(Bytecode::kKeyedLoadIC, operand_scale, RegisterOperand(object),
- UnsignedOperand(feedback_slot));
+ Output(Bytecode::kLdaKeyedProperty, RegisterOperand(object),
+ UnsignedOperand(feedback_slot));
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
Register object, const Handle<Name> name, int feedback_slot,
LanguageMode language_mode) {
- Bytecode bytecode = BytecodeForStoreIC(language_mode);
+ Bytecode bytecode = BytecodeForStoreNamedProperty(language_mode);
size_t name_index = GetConstantPoolEntry(name);
- OperandScale operand_scale = OperandSizesToScale(
- SizeForRegisterOperand(object), SizeForUnsignedOperand(name_index),
- SizeForUnsignedOperand(feedback_slot));
- OutputScaled(bytecode, operand_scale, RegisterOperand(object),
- UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
+ Output(bytecode, RegisterOperand(object), UnsignedOperand(name_index),
+ UnsignedOperand(feedback_slot));
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreKeyedProperty(
Register object, Register key, int feedback_slot,
LanguageMode language_mode) {
- Bytecode bytecode = BytecodeForKeyedStoreIC(language_mode);
- OperandScale operand_scale = OperandSizesToScale(
- SizeForRegisterOperand(object), SizeForRegisterOperand(key),
- SizeForUnsignedOperand(feedback_slot));
- OutputScaled(bytecode, operand_scale, RegisterOperand(object),
- RegisterOperand(key), UnsignedOperand(feedback_slot));
+ Bytecode bytecode = BytecodeForStoreKeyedProperty(language_mode);
+ Output(bytecode, RegisterOperand(object), RegisterOperand(key),
+ UnsignedOperand(feedback_slot));
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateClosure(size_t entry,
+ int flags) {
+ Output(Bytecode::kCreateClosure, UnsignedOperand(entry),
+ UnsignedOperand(flags));
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateBlockContext(
+ Handle<ScopeInfo> scope_info) {
+ size_t entry = GetConstantPoolEntry(scope_info);
+ Output(Bytecode::kCreateBlockContext, UnsignedOperand(entry));
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateCatchContext(
+ Register exception, Handle<String> name) {
+ size_t name_index = GetConstantPoolEntry(name);
+ Output(Bytecode::kCreateCatchContext, RegisterOperand(exception),
+ UnsignedOperand(name_index));
+ return *this;
+}
-BytecodeArrayBuilder& BytecodeArrayBuilder::CreateClosure(
- Handle<SharedFunctionInfo> shared_info, PretenureFlag tenured) {
- size_t entry = GetConstantPoolEntry(shared_info);
- OperandScale operand_scale =
- OperandSizesToScale(SizeForUnsignedOperand(entry));
- OutputScaled(Bytecode::kCreateClosure, operand_scale, UnsignedOperand(entry),
- UnsignedOperand(static_cast<size_t>(tenured)));
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateFunctionContext(int slots) {
+ Output(Bytecode::kCreateFunctionContext, UnsignedOperand(slots));
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateWithContext(Register object) {
+ Output(Bytecode::kCreateWithContext, RegisterOperand(object));
+ return *this;
+}
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArguments(
CreateArgumentsType type) {
@@ -512,387 +373,151 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArguments(
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateRegExpLiteral(
Handle<String> pattern, int literal_index, int flags) {
size_t pattern_entry = GetConstantPoolEntry(pattern);
- OperandScale operand_scale = OperandSizesToScale(
- SizeForUnsignedOperand(pattern_entry),
- SizeForUnsignedOperand(literal_index), SizeForUnsignedOperand(flags));
- OutputScaled(Bytecode::kCreateRegExpLiteral, operand_scale,
- UnsignedOperand(pattern_entry), UnsignedOperand(literal_index),
- UnsignedOperand(flags));
+ Output(Bytecode::kCreateRegExpLiteral, UnsignedOperand(pattern_entry),
+ UnsignedOperand(literal_index), UnsignedOperand(flags));
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArrayLiteral(
Handle<FixedArray> constant_elements, int literal_index, int flags) {
size_t constant_elements_entry = GetConstantPoolEntry(constant_elements);
- OperandScale operand_scale = OperandSizesToScale(
- SizeForUnsignedOperand(constant_elements_entry),
- SizeForUnsignedOperand(literal_index), SizeForUnsignedOperand(flags));
- OutputScaled(Bytecode::kCreateArrayLiteral, operand_scale,
- UnsignedOperand(constant_elements_entry),
- UnsignedOperand(literal_index), UnsignedOperand(flags));
+ Output(Bytecode::kCreateArrayLiteral,
+ UnsignedOperand(constant_elements_entry),
+ UnsignedOperand(literal_index), UnsignedOperand(flags));
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateObjectLiteral(
- Handle<FixedArray> constant_properties, int literal_index, int flags) {
+ Handle<FixedArray> constant_properties, int literal_index, int flags,
+ Register output) {
size_t constant_properties_entry = GetConstantPoolEntry(constant_properties);
- OperandScale operand_scale = OperandSizesToScale(
- SizeForUnsignedOperand(constant_properties_entry),
- SizeForUnsignedOperand(literal_index), SizeForUnsignedOperand(flags));
- OutputScaled(Bytecode::kCreateObjectLiteral, operand_scale,
- UnsignedOperand(constant_properties_entry),
- UnsignedOperand(literal_index), UnsignedOperand(flags));
+ Output(Bytecode::kCreateObjectLiteral,
+ UnsignedOperand(constant_properties_entry),
+ UnsignedOperand(literal_index), UnsignedOperand(flags),
+ RegisterOperand(output));
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::PushContext(Register context) {
- OperandScale operand_scale =
- OperandSizesToScale(SizeForRegisterOperand(context));
- OutputScaled(Bytecode::kPushContext, operand_scale, RegisterOperand(context));
+ Output(Bytecode::kPushContext, RegisterOperand(context));
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::PopContext(Register context) {
- OperandScale operand_scale =
- OperandSizesToScale(SizeForRegisterOperand(context));
- OutputScaled(Bytecode::kPopContext, operand_scale, RegisterOperand(context));
+ Output(Bytecode::kPopContext, RegisterOperand(context));
return *this;
}
-
-bool BytecodeArrayBuilder::NeedToBooleanCast() {
- if (!LastBytecodeInSameBlock()) {
- return true;
- }
- PreviousBytecodeHelper previous_bytecode(*this);
- switch (previous_bytecode.GetBytecode()) {
- // If the previous bytecode puts a boolean in the accumulator return true.
- case Bytecode::kLdaTrue:
- case Bytecode::kLdaFalse:
- case Bytecode::kLogicalNot:
- case Bytecode::kTestEqual:
- case Bytecode::kTestNotEqual:
- case Bytecode::kTestEqualStrict:
- case Bytecode::kTestLessThan:
- case Bytecode::kTestLessThanOrEqual:
- case Bytecode::kTestGreaterThan:
- case Bytecode::kTestGreaterThanOrEqual:
- case Bytecode::kTestInstanceOf:
- case Bytecode::kTestIn:
- case Bytecode::kForInDone:
- return false;
- default:
- return true;
- }
-}
-
-
-BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToJSObject() {
- Output(Bytecode::kToObject);
+BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToJSObject(
+ Register out) {
+ Output(Bytecode::kToObject, RegisterOperand(out));
return *this;
}
-
-BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToName() {
- if (LastBytecodeInSameBlock()) {
- PreviousBytecodeHelper previous_bytecode(*this);
- switch (previous_bytecode.GetBytecode()) {
- case Bytecode::kToName:
- case Bytecode::kTypeOf:
- return *this;
- case Bytecode::kLdaConstant: {
- Handle<Object> object = previous_bytecode.GetConstantForIndexOperand(0);
- if (object->IsName()) return *this;
- break;
- }
- default:
- break;
- }
- }
- Output(Bytecode::kToName);
+BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToName(
+ Register out) {
+ Output(Bytecode::kToName, RegisterOperand(out));
return *this;
}
-
-BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToNumber() {
- // TODO(rmcilroy): consider omitting if the preceeding bytecode always returns
- // a number.
- Output(Bytecode::kToNumber);
+BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToNumber(
+ Register out) {
+ Output(Bytecode::kToNumber, RegisterOperand(out));
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(BytecodeLabel* label) {
- if (label->is_forward_target()) {
- // An earlier jump instruction refers to this label. Update it's location.
- PatchJump(bytecodes()->end(), bytecodes()->begin() + label->offset());
- // Now treat as if the label will only be back referred to.
- }
- label->bind_to(bytecodes()->size());
+ pipeline_->BindLabel(label);
LeaveBasicBlock();
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(const BytecodeLabel& target,
BytecodeLabel* label) {
- DCHECK(!label->is_bound());
- DCHECK(target.is_bound());
- if (label->is_forward_target()) {
- // An earlier jump instruction refers to this label. Update it's location.
- PatchJump(bytecodes()->begin() + target.offset(),
- bytecodes()->begin() + label->offset());
- // Now treat as if the label will only be back referred to.
- }
- label->bind_to(target.offset());
+ pipeline_->BindLabel(target, label);
LeaveBasicBlock();
return *this;
}
-
-// static
-Bytecode BytecodeArrayBuilder::GetJumpWithConstantOperand(
- Bytecode jump_bytecode) {
- switch (jump_bytecode) {
- case Bytecode::kJump:
- return Bytecode::kJumpConstant;
- case Bytecode::kJumpIfTrue:
- return Bytecode::kJumpIfTrueConstant;
- case Bytecode::kJumpIfFalse:
- return Bytecode::kJumpIfFalseConstant;
- case Bytecode::kJumpIfToBooleanTrue:
- return Bytecode::kJumpIfToBooleanTrueConstant;
- case Bytecode::kJumpIfToBooleanFalse:
- return Bytecode::kJumpIfToBooleanFalseConstant;
- case Bytecode::kJumpIfNotHole:
- return Bytecode::kJumpIfNotHoleConstant;
- case Bytecode::kJumpIfNull:
- return Bytecode::kJumpIfNullConstant;
- case Bytecode::kJumpIfUndefined:
- return Bytecode::kJumpIfUndefinedConstant;
- default:
- UNREACHABLE();
- return Bytecode::kIllegal;
- }
-}
-
-// static
-Bytecode BytecodeArrayBuilder::GetJumpWithToBoolean(Bytecode jump_bytecode) {
- switch (jump_bytecode) {
- case Bytecode::kJump:
- case Bytecode::kJumpIfNull:
- case Bytecode::kJumpIfUndefined:
- case Bytecode::kJumpIfNotHole:
- return jump_bytecode;
- case Bytecode::kJumpIfTrue:
- return Bytecode::kJumpIfToBooleanTrue;
- case Bytecode::kJumpIfFalse:
- return Bytecode::kJumpIfToBooleanFalse;
- default:
- UNREACHABLE();
- }
- return Bytecode::kIllegal;
-}
-
-
-void BytecodeArrayBuilder::PatchIndirectJumpWith8BitOperand(
- const ZoneVector<uint8_t>::iterator& jump_location, int delta) {
- Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
- DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
- ZoneVector<uint8_t>::iterator operand_location = jump_location + 1;
- DCHECK_EQ(*operand_location, 0);
- if (SizeForSignedOperand(delta) == OperandSize::kByte) {
- // The jump fits within the range of an Imm operand, so cancel
- // the reservation and jump directly.
- constant_array_builder()->DiscardReservedEntry(OperandSize::kByte);
- *operand_location = static_cast<uint8_t>(delta);
- } else {
- // The jump does not fit within the range of an Imm operand, so
- // commit reservation putting the offset into the constant pool,
- // and update the jump instruction and operand.
- size_t entry = constant_array_builder()->CommitReservedEntry(
- OperandSize::kByte, handle(Smi::FromInt(delta), isolate()));
- DCHECK(SizeForUnsignedOperand(entry) == OperandSize::kByte);
- jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
- *jump_location = Bytecodes::ToByte(jump_bytecode);
- *operand_location = static_cast<uint8_t>(entry);
- }
-}
-
-void BytecodeArrayBuilder::PatchIndirectJumpWith16BitOperand(
- const ZoneVector<uint8_t>::iterator& jump_location, int delta) {
- Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
- DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
- ZoneVector<uint8_t>::iterator operand_location = jump_location + 1;
- uint8_t operand_bytes[2];
- if (SizeForSignedOperand(delta) <= OperandSize::kShort) {
- constant_array_builder()->DiscardReservedEntry(OperandSize::kShort);
- WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(delta));
- } else {
- jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
- *jump_location = Bytecodes::ToByte(jump_bytecode);
- size_t entry = constant_array_builder()->CommitReservedEntry(
- OperandSize::kShort, handle(Smi::FromInt(delta), isolate()));
- WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(entry));
- }
- DCHECK(*operand_location == 0 && *(operand_location + 1) == 0);
- *operand_location++ = operand_bytes[0];
- *operand_location = operand_bytes[1];
-}
-
-void BytecodeArrayBuilder::PatchIndirectJumpWith32BitOperand(
- const ZoneVector<uint8_t>::iterator& jump_location, int delta) {
- DCHECK(Bytecodes::IsJumpImmediate(Bytecodes::FromByte(*jump_location)));
- constant_array_builder()->DiscardReservedEntry(OperandSize::kQuad);
- ZoneVector<uint8_t>::iterator operand_location = jump_location + 1;
- uint8_t operand_bytes[4];
- WriteUnalignedUInt32(operand_bytes, static_cast<uint32_t>(delta));
- DCHECK(*operand_location == 0 && *(operand_location + 1) == 0 &&
- *(operand_location + 2) == 0 && *(operand_location + 3) == 0);
- *operand_location++ = operand_bytes[0];
- *operand_location++ = operand_bytes[1];
- *operand_location++ = operand_bytes[2];
- *operand_location = operand_bytes[3];
-}
-
-void BytecodeArrayBuilder::PatchJump(
- const ZoneVector<uint8_t>::iterator& jump_target,
- const ZoneVector<uint8_t>::iterator& jump_location) {
- int delta = static_cast<int>(jump_target - jump_location);
- Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
- int prefix_offset = 0;
- OperandScale operand_scale = OperandScale::kSingle;
- if (Bytecodes::IsPrefixScalingBytecode(jump_bytecode)) {
- // If a prefix scaling bytecode is emitted the target offset is one
- // less than the case of no prefix scaling bytecode.
- delta -= 1;
- prefix_offset = 1;
- operand_scale = Bytecodes::PrefixBytecodeToOperandScale(jump_bytecode);
- jump_bytecode = Bytecodes::FromByte(*(jump_location + prefix_offset));
- }
-
- DCHECK(Bytecodes::IsJump(jump_bytecode));
- switch (operand_scale) {
- case OperandScale::kSingle:
- PatchIndirectJumpWith8BitOperand(jump_location, delta);
- break;
- case OperandScale::kDouble:
- PatchIndirectJumpWith16BitOperand(jump_location + prefix_offset, delta);
- break;
- case OperandScale::kQuadruple:
- PatchIndirectJumpWith32BitOperand(jump_location + prefix_offset, delta);
- break;
- default:
- UNREACHABLE();
- }
- unbound_jumps_--;
-}
-
-
BytecodeArrayBuilder& BytecodeArrayBuilder::OutputJump(Bytecode jump_bytecode,
BytecodeLabel* label) {
- // Don't emit dead code.
- if (exit_seen_in_block_) return *this;
-
- // Check if the value in accumulator is boolean, if not choose an
- // appropriate JumpIfToBoolean bytecode.
- if (NeedToBooleanCast()) {
- jump_bytecode = GetJumpWithToBoolean(jump_bytecode);
- }
-
- if (label->is_bound()) {
- // Label has been bound already so this is a backwards jump.
- CHECK_GE(bytecodes()->size(), label->offset());
- CHECK_LE(bytecodes()->size(), static_cast<size_t>(kMaxInt));
- size_t abs_delta = bytecodes()->size() - label->offset();
- int delta = -static_cast<int>(abs_delta);
- OperandSize operand_size = SizeForSignedOperand(delta);
- if (operand_size > OperandSize::kByte) {
- // Adjust for scaling byte prefix for wide jump offset.
- DCHECK_LE(delta, 0);
- delta -= 1;
- }
- OutputScaled(jump_bytecode, OperandSizesToScale(operand_size),
- SignedOperand(delta, operand_size));
- } else {
- // The label has not yet been bound so this is a forward reference
- // that will be patched when the label is bound. We create a
- // reservation in the constant pool so the jump can be patched
- // when the label is bound. The reservation means the maximum size
- // of the operand for the constant is known and the jump can
- // be emitted into the bytecode stream with space for the operand.
- label->set_referrer(bytecodes()->size());
- unbound_jumps_++;
- OperandSize reserved_operand_size =
- constant_array_builder()->CreateReservedEntry();
- OutputScaled(jump_bytecode, OperandSizesToScale(reserved_operand_size), 0);
- }
+ BytecodeNode node(jump_bytecode, 0);
+ AttachSourceInfo(&node);
+ pipeline_->WriteJump(&node, label);
LeaveBasicBlock();
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::Jump(BytecodeLabel* label) {
return OutputJump(Bytecode::kJump, label);
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfTrue(BytecodeLabel* label) {
- return OutputJump(Bytecode::kJumpIfTrue, label);
+ // The peephole optimizer attempts to simplify JumpIfToBooleanTrue
+ // to JumpIfTrue.
+ return OutputJump(Bytecode::kJumpIfToBooleanTrue, label);
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfFalse(BytecodeLabel* label) {
- return OutputJump(Bytecode::kJumpIfFalse, label);
+ // The peephole optimizer attempts to simplify JumpIfToBooleanFalse
+ // to JumpIfFalse.
+ return OutputJump(Bytecode::kJumpIfToBooleanFalse, label);
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNull(BytecodeLabel* label) {
return OutputJump(Bytecode::kJumpIfNull, label);
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfUndefined(
BytecodeLabel* label) {
return OutputJump(Bytecode::kJumpIfUndefined, label);
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::StackCheck() {
+BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNotHole(
+ BytecodeLabel* label) {
+ return OutputJump(Bytecode::kJumpIfNotHole, label);
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::StackCheck(int position) {
+ if (position != kNoSourcePosition) {
+ // We need to attach a non-breakable source position to a stack
+ // check, so we simply add it as expression position. There can be
+ // a prior statement position from constructs like:
+ //
+ // do var x; while (false);
+ //
+ // A Nop could be inserted for empty statements, but since no code
+ // is associated with these positions, instead we force the stack
+ // check's expression position which eliminates the empty
+ // statement's position.
+ latest_source_info_.ForceExpressionPosition(position);
+ }
Output(Bytecode::kStackCheck);
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNotHole(
- BytecodeLabel* label) {
- return OutputJump(Bytecode::kJumpIfNotHole, label);
+BytecodeArrayBuilder& BytecodeArrayBuilder::OsrPoll(int loop_depth) {
+ Output(Bytecode::kOsrPoll, UnsignedOperand(loop_depth));
+ return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::Throw() {
Output(Bytecode::kThrow);
- exit_seen_in_block_ = true;
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::ReThrow() {
Output(Bytecode::kReThrow);
- exit_seen_in_block_ = true;
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::Return() {
SetReturnPosition();
Output(Bytecode::kReturn);
- exit_seen_in_block_ = true;
+ return_seen_in_block_ = true;
return *this;
}
@@ -902,78 +527,76 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Debugger() {
}
BytecodeArrayBuilder& BytecodeArrayBuilder::ForInPrepare(
- Register cache_info_triple) {
- OperandScale operand_scale =
- OperandSizesToScale(SizeForRegisterOperand(cache_info_triple));
- OutputScaled(Bytecode::kForInPrepare, operand_scale,
- RegisterOperand(cache_info_triple));
+ Register receiver, Register cache_info_triple) {
+ Output(Bytecode::kForInPrepare, RegisterOperand(receiver),
+ RegisterOperand(cache_info_triple));
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::ForInDone(Register index,
Register cache_length) {
- OperandScale operand_scale = OperandSizesToScale(
- SizeForRegisterOperand(index), SizeForRegisterOperand(cache_length));
- OutputScaled(Bytecode::kForInDone, operand_scale, RegisterOperand(index),
- RegisterOperand(cache_length));
+ Output(Bytecode::kForInDone, RegisterOperand(index),
+ RegisterOperand(cache_length));
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::ForInNext(
Register receiver, Register index, Register cache_type_array_pair,
int feedback_slot) {
- OperandScale operand_scale = OperandSizesToScale(
- SizeForRegisterOperand(receiver), SizeForRegisterOperand(index),
- SizeForRegisterOperand(cache_type_array_pair),
- SizeForUnsignedOperand(feedback_slot));
- OutputScaled(Bytecode::kForInNext, operand_scale, RegisterOperand(receiver),
- RegisterOperand(index), RegisterOperand(cache_type_array_pair),
- UnsignedOperand(feedback_slot));
+ Output(Bytecode::kForInNext, RegisterOperand(receiver),
+ RegisterOperand(index), RegisterOperand(cache_type_array_pair),
+ UnsignedOperand(feedback_slot));
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::ForInStep(Register index) {
- OperandScale operand_scale =
- OperandSizesToScale(SizeForRegisterOperand(index));
- OutputScaled(Bytecode::kForInStep, operand_scale, RegisterOperand(index));
+ Output(Bytecode::kForInStep, RegisterOperand(index));
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::SuspendGenerator(
+ Register generator) {
+ Output(Bytecode::kSuspendGenerator, RegisterOperand(generator));
+ return *this;
+}
-BytecodeArrayBuilder& BytecodeArrayBuilder::MarkHandler(int handler_id,
- bool will_catch) {
- handler_table_builder()->SetHandlerTarget(handler_id, bytecodes()->size());
- handler_table_builder()->SetPrediction(handler_id, will_catch);
+BytecodeArrayBuilder& BytecodeArrayBuilder::ResumeGenerator(
+ Register generator) {
+ Output(Bytecode::kResumeGenerator, RegisterOperand(generator));
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::MarkHandler(
+ int handler_id, HandlerTable::CatchPrediction catch_prediction) {
+ BytecodeLabel handler;
+ Bind(&handler);
+ handler_table_builder()->SetHandlerTarget(handler_id, handler.offset());
+ handler_table_builder()->SetPrediction(handler_id, catch_prediction);
+ return *this;
+}
BytecodeArrayBuilder& BytecodeArrayBuilder::MarkTryBegin(int handler_id,
Register context) {
- handler_table_builder()->SetTryRegionStart(handler_id, bytecodes()->size());
+ BytecodeLabel try_begin;
+ Bind(&try_begin);
+ handler_table_builder()->SetTryRegionStart(handler_id, try_begin.offset());
handler_table_builder()->SetContextRegister(handler_id, context);
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::MarkTryEnd(int handler_id) {
- handler_table_builder()->SetTryRegionEnd(handler_id, bytecodes()->size());
+ BytecodeLabel try_end;
+ Bind(&try_end);
+ handler_table_builder()->SetTryRegionEnd(handler_id, try_end.offset());
return *this;
}
-
-void BytecodeArrayBuilder::LeaveBasicBlock() {
- last_block_end_ = bytecodes()->size();
- exit_seen_in_block_ = false;
-}
-
void BytecodeArrayBuilder::EnsureReturn() {
- if (!exit_seen_in_block_) {
+ if (!return_seen_in_block_) {
LoadUndefined();
Return();
}
- DCHECK(exit_seen_in_block_);
+ DCHECK(return_seen_in_block_);
}
BytecodeArrayBuilder& BytecodeArrayBuilder::Call(Register callable,
@@ -982,14 +605,8 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Call(Register callable,
int feedback_slot,
TailCallMode tail_call_mode) {
Bytecode bytecode = BytecodeForCall(tail_call_mode);
- OperandScale operand_scale = OperandSizesToScale(
- SizeForRegisterOperand(callable), SizeForRegisterOperand(receiver_args),
- SizeForUnsignedOperand(receiver_args_count),
- SizeForUnsignedOperand(feedback_slot));
- OutputScaled(bytecode, operand_scale, RegisterOperand(callable),
- RegisterOperand(receiver_args),
- UnsignedOperand(receiver_args_count),
- UnsignedOperand(feedback_slot));
+ Output(bytecode, RegisterOperand(callable), RegisterOperand(receiver_args),
+ UnsignedOperand(receiver_args_count), UnsignedOperand(feedback_slot));
return *this;
}
@@ -1000,71 +617,57 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::New(Register constructor,
DCHECK_EQ(0u, arg_count);
first_arg = Register(0);
}
- OperandScale operand_scale = OperandSizesToScale(
- SizeForRegisterOperand(constructor), SizeForRegisterOperand(first_arg),
- SizeForUnsignedOperand(arg_count));
- OutputScaled(Bytecode::kNew, operand_scale, RegisterOperand(constructor),
- RegisterOperand(first_arg), UnsignedOperand(arg_count));
+ Output(Bytecode::kNew, RegisterOperand(constructor),
+ RegisterOperand(first_arg), UnsignedOperand(arg_count));
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
Runtime::FunctionId function_id, Register first_arg, size_t arg_count) {
DCHECK_EQ(1, Runtime::FunctionForId(function_id)->result_size);
- DCHECK(SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
+ DCHECK(Bytecodes::SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
if (!first_arg.is_valid()) {
DCHECK_EQ(0u, arg_count);
first_arg = Register(0);
}
- Bytecode bytecode = IntrinsicsHelper::IsSupported(function_id)
- ? Bytecode::kInvokeIntrinsic
- : Bytecode::kCallRuntime;
- OperandScale operand_scale = OperandSizesToScale(
- SizeForRegisterOperand(first_arg), SizeForUnsignedOperand(arg_count));
- OutputScaled(bytecode, operand_scale, static_cast<uint16_t>(function_id),
- RegisterOperand(first_arg), UnsignedOperand(arg_count));
+ Bytecode bytecode;
+ uint32_t id;
+ if (IntrinsicsHelper::IsSupported(function_id)) {
+ bytecode = Bytecode::kInvokeIntrinsic;
+ id = static_cast<uint32_t>(IntrinsicsHelper::FromRuntimeId(function_id));
+ } else {
+ bytecode = Bytecode::kCallRuntime;
+ id = static_cast<uint32_t>(function_id);
+ }
+ Output(bytecode, id, RegisterOperand(first_arg), UnsignedOperand(arg_count));
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntimeForPair(
Runtime::FunctionId function_id, Register first_arg, size_t arg_count,
Register first_return) {
DCHECK_EQ(2, Runtime::FunctionForId(function_id)->result_size);
- DCHECK(SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
+ DCHECK(Bytecodes::SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
if (!first_arg.is_valid()) {
DCHECK_EQ(0u, arg_count);
first_arg = Register(0);
}
- OperandScale operand_scale = OperandSizesToScale(
- SizeForRegisterOperand(first_arg), SizeForUnsignedOperand(arg_count),
- SizeForRegisterOperand(first_return));
- OutputScaled(Bytecode::kCallRuntimeForPair, operand_scale,
- static_cast<uint16_t>(function_id), RegisterOperand(first_arg),
- UnsignedOperand(arg_count), RegisterOperand(first_return));
+ Output(Bytecode::kCallRuntimeForPair, static_cast<uint16_t>(function_id),
+ RegisterOperand(first_arg), UnsignedOperand(arg_count),
+ RegisterOperand(first_return));
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(
int context_index, Register receiver_args, size_t receiver_args_count) {
- OperandScale operand_scale =
- OperandSizesToScale(SizeForUnsignedOperand(context_index),
- SizeForRegisterOperand(receiver_args),
- SizeForUnsignedOperand(receiver_args_count));
- OutputScaled(Bytecode::kCallJSRuntime, operand_scale,
- UnsignedOperand(context_index), RegisterOperand(receiver_args),
- UnsignedOperand(receiver_args_count));
+ Output(Bytecode::kCallJSRuntime, UnsignedOperand(context_index),
+ RegisterOperand(receiver_args), UnsignedOperand(receiver_args_count));
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::Delete(Register object,
LanguageMode language_mode) {
- OperandScale operand_scale =
- OperandSizesToScale(SizeForRegisterOperand(object));
- OutputScaled(BytecodeForDelete(language_mode), operand_scale,
- RegisterOperand(object));
+ Output(BytecodeForDelete(language_mode), RegisterOperand(object));
return *this;
}
@@ -1072,107 +675,48 @@ size_t BytecodeArrayBuilder::GetConstantPoolEntry(Handle<Object> object) {
return constant_array_builder()->Insert(object);
}
+size_t BytecodeArrayBuilder::AllocateConstantPoolEntry() {
+ return constant_array_builder()->AllocateEntry();
+}
+
+void BytecodeArrayBuilder::InsertConstantPoolEntryAt(size_t entry,
+ Handle<Object> object) {
+ constant_array_builder()->InsertAllocatedEntry(entry, object);
+}
+
void BytecodeArrayBuilder::SetReturnPosition() {
- if (return_position_ == RelocInfo::kNoPosition) return;
- if (exit_seen_in_block_) return;
- source_position_table_builder_.AddStatementPosition(bytecodes_.size(),
- return_position_);
+ if (return_position_ == kNoSourcePosition) return;
+ latest_source_info_.MakeStatementPosition(return_position_);
}
void BytecodeArrayBuilder::SetStatementPosition(Statement* stmt) {
- if (stmt->position() == RelocInfo::kNoPosition) return;
- if (exit_seen_in_block_) return;
- source_position_table_builder_.AddStatementPosition(bytecodes_.size(),
- stmt->position());
+ if (stmt->position() == kNoSourcePosition) return;
+ latest_source_info_.MakeStatementPosition(stmt->position());
}
void BytecodeArrayBuilder::SetExpressionPosition(Expression* expr) {
- if (expr->position() == RelocInfo::kNoPosition) return;
- if (exit_seen_in_block_) return;
- source_position_table_builder_.AddExpressionPosition(bytecodes_.size(),
- expr->position());
+ if (expr->position() == kNoSourcePosition) return;
+ if (!latest_source_info_.is_statement()) {
+ // Ensure the current expression position is overwritten with the
+ // latest value.
+ latest_source_info_.MakeExpressionPosition(expr->position());
+ }
}
void BytecodeArrayBuilder::SetExpressionAsStatementPosition(Expression* expr) {
- if (expr->position() == RelocInfo::kNoPosition) return;
- if (exit_seen_in_block_) return;
- source_position_table_builder_.AddStatementPosition(bytecodes_.size(),
- expr->position());
+ if (expr->position() == kNoSourcePosition) return;
+ latest_source_info_.MakeStatementPosition(expr->position());
}
bool BytecodeArrayBuilder::TemporaryRegisterIsLive(Register reg) const {
return temporary_register_allocator()->RegisterIsLive(reg);
}
-bool BytecodeArrayBuilder::OperandIsValid(Bytecode bytecode,
- OperandScale operand_scale,
- int operand_index,
- uint32_t operand_value) const {
- OperandSize operand_size =
- Bytecodes::GetOperandSize(bytecode, operand_index, operand_scale);
- OperandType operand_type = Bytecodes::GetOperandType(bytecode, operand_index);
- switch (operand_type) {
- case OperandType::kNone:
- return false;
- case OperandType::kRegCount: {
- if (operand_index > 0) {
- OperandType previous_operand_type =
- Bytecodes::GetOperandType(bytecode, operand_index - 1);
- if (previous_operand_type != OperandType::kMaybeReg &&
- previous_operand_type != OperandType::kReg) {
- return false;
- }
- }
- } // Fall-through
- case OperandType::kFlag8:
- case OperandType::kIdx:
- case OperandType::kRuntimeId:
- case OperandType::kImm: {
- size_t unsigned_value = static_cast<size_t>(operand_value);
- return SizeForUnsignedOperand(unsigned_value) <= operand_size;
- }
- case OperandType::kMaybeReg:
- if (operand_value == 0) {
- return true;
- }
- // Fall-through to kReg case.
- case OperandType::kReg:
- case OperandType::kRegOut: {
- Register reg = RegisterFromOperand(operand_value);
- return RegisterIsValid(reg, operand_size);
- }
- case OperandType::kRegOutPair:
- case OperandType::kRegPair: {
- Register reg0 = RegisterFromOperand(operand_value);
- Register reg1 = Register(reg0.index() + 1);
- // The size of reg1 is immaterial.
- return RegisterIsValid(reg0, operand_size) &&
- RegisterIsValid(reg1, OperandSize::kQuad);
- }
- case OperandType::kRegOutTriple: {
- Register reg0 = RegisterFromOperand(operand_value);
- Register reg1 = Register(reg0.index() + 1);
- Register reg2 = Register(reg0.index() + 2);
- // The size of reg1 and reg2 is immaterial.
- return RegisterIsValid(reg0, operand_size) &&
- RegisterIsValid(reg1, OperandSize::kQuad) &&
- RegisterIsValid(reg2, OperandSize::kQuad);
- }
- }
- UNREACHABLE();
- return false;
-}
-
-bool BytecodeArrayBuilder::RegisterIsValid(Register reg,
- OperandSize reg_size) const {
+bool BytecodeArrayBuilder::RegisterIsValid(Register reg) const {
if (!reg.is_valid()) {
return false;
}
- if (SizeForRegisterOperand(reg) > reg_size) {
- return false;
- }
-
if (reg.is_current_context() || reg.is_function_closure() ||
reg.is_new_target()) {
return true;
@@ -1186,24 +730,89 @@ bool BytecodeArrayBuilder::RegisterIsValid(Register reg,
}
}
+bool BytecodeArrayBuilder::OperandsAreValid(
+ Bytecode bytecode, int operand_count, uint32_t operand0, uint32_t operand1,
+ uint32_t operand2, uint32_t operand3) const {
+ if (Bytecodes::NumberOfOperands(bytecode) != operand_count) {
+ return false;
+ }
-bool BytecodeArrayBuilder::LastBytecodeInSameBlock() const {
- return last_bytecode_start_ < bytecodes()->size() &&
- last_bytecode_start_ >= last_block_end_;
-}
-
-
-bool BytecodeArrayBuilder::IsRegisterInAccumulator(Register reg) {
- if (LastBytecodeInSameBlock()) {
- PreviousBytecodeHelper previous_bytecode(*this);
- Bytecode bytecode = previous_bytecode.GetBytecode();
- if (bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar) {
- return previous_bytecode.GetRegisterOperand(0) == reg;
+ uint32_t operands[] = {operand0, operand1, operand2, operand3};
+ const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
+ for (int i = 0; i < operand_count; ++i) {
+ switch (operand_types[i]) {
+ case OperandType::kNone:
+ return false;
+ case OperandType::kRegCount: {
+ CHECK_NE(i, 0);
+ CHECK(operand_types[i - 1] == OperandType::kMaybeReg ||
+ operand_types[i - 1] == OperandType::kReg);
+ if (i > 0 && operands[i] > 0) {
+ Register start = Register::FromOperand(operands[i - 1]);
+ Register end(start.index() + static_cast<int>(operands[i]) - 1);
+ if (!RegisterIsValid(start) || !RegisterIsValid(end) || start > end) {
+ return false;
+ }
+ }
+ break;
+ }
+ case OperandType::kFlag8:
+ case OperandType::kIntrinsicId:
+ if (Bytecodes::SizeForUnsignedOperand(operands[i]) >
+ OperandSize::kByte) {
+ return false;
+ }
+ break;
+ case OperandType::kRuntimeId:
+ if (Bytecodes::SizeForUnsignedOperand(operands[i]) >
+ OperandSize::kShort) {
+ return false;
+ }
+ break;
+ case OperandType::kIdx:
+ // TODO(oth): Consider splitting OperandType::kIdx into two
+ // operand types. One which is a constant pool index that can
+ // be checked, and the other is an unsigned value.
+ break;
+ case OperandType::kImm:
+ break;
+ case OperandType::kMaybeReg:
+ if (Register::FromOperand(operands[i]) == Register(0)) {
+ break;
+ }
+ // Fall-through to kReg case.
+ case OperandType::kReg:
+ case OperandType::kRegOut: {
+ Register reg = Register::FromOperand(operands[i]);
+ if (!RegisterIsValid(reg)) {
+ return false;
+ }
+ break;
+ }
+ case OperandType::kRegOutPair:
+ case OperandType::kRegPair: {
+ Register reg0 = Register::FromOperand(operands[i]);
+ Register reg1 = Register(reg0.index() + 1);
+ if (!RegisterIsValid(reg0) || !RegisterIsValid(reg1)) {
+ return false;
+ }
+ break;
+ }
+ case OperandType::kRegOutTriple: {
+ Register reg0 = Register::FromOperand(operands[i]);
+ Register reg1 = Register(reg0.index() + 1);
+ Register reg2 = Register(reg0.index() + 2);
+ if (!RegisterIsValid(reg0) || !RegisterIsValid(reg1) ||
+ !RegisterIsValid(reg2)) {
+ return false;
+ }
+ break;
+ }
}
}
- return false;
-}
+ return true;
+}
// static
Bytecode BytecodeArrayBuilder::BytecodeForBinaryOperation(Token::Value op) {
@@ -1236,7 +845,6 @@ Bytecode BytecodeArrayBuilder::BytecodeForBinaryOperation(Token::Value op) {
}
}
-
// static
Bytecode BytecodeArrayBuilder::BytecodeForCountOperation(Token::Value op) {
switch (op) {
@@ -1250,7 +858,6 @@ Bytecode BytecodeArrayBuilder::BytecodeForCountOperation(Token::Value op) {
}
}
-
// static
Bytecode BytecodeArrayBuilder::BytecodeForCompareOperation(Token::Value op) {
switch (op) {
@@ -1278,43 +885,40 @@ Bytecode BytecodeArrayBuilder::BytecodeForCompareOperation(Token::Value op) {
}
}
-
// static
-Bytecode BytecodeArrayBuilder::BytecodeForStoreIC(LanguageMode language_mode) {
+Bytecode BytecodeArrayBuilder::BytecodeForStoreNamedProperty(
+ LanguageMode language_mode) {
switch (language_mode) {
case SLOPPY:
- return Bytecode::kStoreICSloppy;
+ return Bytecode::kStaNamedPropertySloppy;
case STRICT:
- return Bytecode::kStoreICStrict;
+ return Bytecode::kStaNamedPropertyStrict;
default:
UNREACHABLE();
}
return Bytecode::kIllegal;
}
-
// static
-Bytecode BytecodeArrayBuilder::BytecodeForKeyedStoreIC(
+Bytecode BytecodeArrayBuilder::BytecodeForStoreKeyedProperty(
LanguageMode language_mode) {
switch (language_mode) {
case SLOPPY:
- return Bytecode::kKeyedStoreICSloppy;
+ return Bytecode::kStaKeyedPropertySloppy;
case STRICT:
- return Bytecode::kKeyedStoreICStrict;
+ return Bytecode::kStaKeyedPropertyStrict;
default:
UNREACHABLE();
}
return Bytecode::kIllegal;
}
-
// static
Bytecode BytecodeArrayBuilder::BytecodeForLoadGlobal(TypeofMode typeof_mode) {
return typeof_mode == INSIDE_TYPEOF ? Bytecode::kLdaGlobalInsideTypeof
: Bytecode::kLdaGlobal;
}
-
// static
Bytecode BytecodeArrayBuilder::BytecodeForStoreGlobal(
LanguageMode language_mode) {
@@ -1329,7 +933,6 @@ Bytecode BytecodeArrayBuilder::BytecodeForStoreGlobal(
return Bytecode::kIllegal;
}
-
// static
Bytecode BytecodeArrayBuilder::BytecodeForStoreLookupSlot(
LanguageMode language_mode) {
@@ -1359,7 +962,6 @@ Bytecode BytecodeArrayBuilder::BytecodeForCreateArguments(
return Bytecode::kIllegal;
}
-
// static
Bytecode BytecodeArrayBuilder::BytecodeForDelete(LanguageMode language_mode) {
switch (language_mode) {
@@ -1386,108 +988,6 @@ Bytecode BytecodeArrayBuilder::BytecodeForCall(TailCallMode tail_call_mode) {
return Bytecode::kIllegal;
}
-// static
-OperandSize BytecodeArrayBuilder::SizeForRegisterOperand(Register value) {
- if (value.is_byte_operand()) {
- return OperandSize::kByte;
- } else if (value.is_short_operand()) {
- return OperandSize::kShort;
- } else {
- return OperandSize::kQuad;
- }
-}
-
-// static
-OperandSize BytecodeArrayBuilder::SizeForSignedOperand(int value) {
- if (kMinInt8 <= value && value <= kMaxInt8) {
- return OperandSize::kByte;
- } else if (kMinInt16 <= value && value <= kMaxInt16) {
- return OperandSize::kShort;
- } else {
- return OperandSize::kQuad;
- }
-}
-
-// static
-OperandSize BytecodeArrayBuilder::SizeForUnsignedOperand(int value) {
- DCHECK_GE(value, 0);
- if (value <= kMaxUInt8) {
- return OperandSize::kByte;
- } else if (value <= kMaxUInt16) {
- return OperandSize::kShort;
- } else {
- return OperandSize::kQuad;
- }
-}
-
-OperandSize BytecodeArrayBuilder::SizeForUnsignedOperand(size_t value) {
- if (value <= static_cast<size_t>(kMaxUInt8)) {
- return OperandSize::kByte;
- } else if (value <= static_cast<size_t>(kMaxUInt16)) {
- return OperandSize::kShort;
- } else if (value <= kMaxUInt32) {
- return OperandSize::kQuad;
- } else {
- UNREACHABLE();
- return OperandSize::kQuad;
- }
-}
-
-OperandScale BytecodeArrayBuilder::OperandSizesToScale(OperandSize size0,
- OperandSize size1,
- OperandSize size2,
- OperandSize size3) {
- OperandSize upper = std::max(size0, size1);
- OperandSize lower = std::max(size2, size3);
- OperandSize result = std::max(upper, lower);
- // Operand sizes have been scaled before calling this function.
- // Currently all scalable operands are byte sized at
- // OperandScale::kSingle.
- STATIC_ASSERT(static_cast<int>(OperandSize::kByte) ==
- static_cast<int>(OperandScale::kSingle) &&
- static_cast<int>(OperandSize::kShort) ==
- static_cast<int>(OperandScale::kDouble) &&
- static_cast<int>(OperandSize::kQuad) ==
- static_cast<int>(OperandScale::kQuadruple));
- OperandScale operand_scale = static_cast<OperandScale>(result);
- DCHECK(operand_scale == OperandScale::kSingle ||
- operand_scale == OperandScale::kDouble ||
- operand_scale == OperandScale::kQuadruple);
- return operand_scale;
-}
-
-uint32_t BytecodeArrayBuilder::RegisterOperand(Register reg) {
- return static_cast<uint32_t>(reg.ToOperand());
-}
-
-Register BytecodeArrayBuilder::RegisterFromOperand(uint32_t operand) {
- return Register::FromOperand(static_cast<int32_t>(operand));
-}
-
-uint32_t BytecodeArrayBuilder::SignedOperand(int value, OperandSize size) {
- switch (size) {
- case OperandSize::kByte:
- return static_cast<uint8_t>(value & 0xff);
- case OperandSize::kShort:
- return static_cast<uint16_t>(value & 0xffff);
- case OperandSize::kQuad:
- return static_cast<uint32_t>(value);
- case OperandSize::kNone:
- UNREACHABLE();
- }
- return 0;
-}
-
-uint32_t BytecodeArrayBuilder::UnsignedOperand(int value) {
- DCHECK_GE(value, 0);
- return static_cast<uint32_t>(value);
-}
-
-uint32_t BytecodeArrayBuilder::UnsignedOperand(size_t value) {
- DCHECK_LE(value, kMaxUInt32);
- return static_cast<uint32_t>(value);
-}
-
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index 4446a63596..51b61861c3 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -6,11 +6,12 @@
#define V8_INTERPRETER_BYTECODE_ARRAY_BUILDER_H_
#include "src/ast/ast.h"
+#include "src/interpreter/bytecode-array-writer.h"
#include "src/interpreter/bytecode-register-allocator.h"
+#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/constant-array-builder.h"
#include "src/interpreter/handler-table-builder.h"
-#include "src/interpreter/source-position-table.h"
#include "src/zone-containers.h"
namespace v8 {
@@ -21,16 +22,19 @@ class Isolate;
namespace interpreter {
class BytecodeLabel;
+class BytecodeNode;
+class BytecodePipelineStage;
class Register;
class BytecodeArrayBuilder final : public ZoneObject {
public:
- BytecodeArrayBuilder(Isolate* isolate, Zone* zone, int parameter_count,
- int context_count, int locals_count,
- FunctionLiteral* literal = nullptr);
- ~BytecodeArrayBuilder();
+ BytecodeArrayBuilder(
+ Isolate* isolate, Zone* zone, int parameter_count, int context_count,
+ int locals_count, FunctionLiteral* literal = nullptr,
+ SourcePositionTableBuilder::RecordingMode source_position_mode =
+ SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS);
- Handle<BytecodeArray> ToBytecodeArray();
+ Handle<BytecodeArray> ToBytecodeArray(Isolate* isolate);
// Get the number of parameters expected by function.
int parameter_count() const {
@@ -75,6 +79,7 @@ class BytecodeArrayBuilder final : public ZoneObject {
bool TemporaryRegisterIsLive(Register reg) const;
// Constant loads to accumulator.
+ BytecodeArrayBuilder& LoadConstantPoolEntry(size_t entry);
BytecodeArrayBuilder& LoadLiteral(v8::internal::Smi* value);
BytecodeArrayBuilder& LoadLiteral(Handle<Object> object);
BytecodeArrayBuilder& LoadUndefined();
@@ -84,8 +89,7 @@ class BytecodeArrayBuilder final : public ZoneObject {
BytecodeArrayBuilder& LoadFalse();
// Global loads to the accumulator and stores from the accumulator.
- BytecodeArrayBuilder& LoadGlobal(const Handle<String> name, int feedback_slot,
- TypeofMode typeof_mode);
+ BytecodeArrayBuilder& LoadGlobal(int feedback_slot, TypeofMode typeof_mode);
BytecodeArrayBuilder& StoreGlobal(const Handle<String> name,
int feedback_slot,
LanguageMode language_mode);
@@ -127,9 +131,25 @@ class BytecodeArrayBuilder final : public ZoneObject {
BytecodeArrayBuilder& StoreLookupSlot(const Handle<String> name,
LanguageMode language_mode);
- // Create a new closure for the SharedFunctionInfo.
- BytecodeArrayBuilder& CreateClosure(Handle<SharedFunctionInfo> shared_info,
- PretenureFlag tenured);
+ // Create a new closure for a SharedFunctionInfo which will be inserted at
+ // constant pool index |entry|.
+ BytecodeArrayBuilder& CreateClosure(size_t entry, int flags);
+
+ // Create a new local context for a |scope_info| and a closure which should be
+ // in the accumulator.
+ BytecodeArrayBuilder& CreateBlockContext(Handle<ScopeInfo> scope_info);
+
+ // Create a new context for a catch block with |exception| and |name| and the
+ // closure in the accumulator.
+ BytecodeArrayBuilder& CreateCatchContext(Register exception,
+ Handle<String> name);
+
+ // Create a new context with size |slots|.
+ BytecodeArrayBuilder& CreateFunctionContext(int slots);
+
+ // Creates a new context for a with-statement with the |object| in a register
+ // and the closure in the accumulator.
+ BytecodeArrayBuilder& CreateWithContext(Register object);
// Create a new arguments object in the accumulator.
BytecodeArrayBuilder& CreateArguments(CreateArgumentsType type);
@@ -140,7 +160,8 @@ class BytecodeArrayBuilder final : public ZoneObject {
BytecodeArrayBuilder& CreateArrayLiteral(Handle<FixedArray> constant_elements,
int literal_index, int flags);
BytecodeArrayBuilder& CreateObjectLiteral(
- Handle<FixedArray> constant_properties, int literal_index, int flags);
+ Handle<FixedArray> constant_properties, int literal_index, int flags,
+ Register output);
// Push the context in accumulator as the new context, and store in register
// |context|.
@@ -152,7 +173,8 @@ class BytecodeArrayBuilder final : public ZoneObject {
// Call a JS function. The JSFunction or Callable to be called should be in
// |callable|, the receiver should be in |receiver_args| and all subsequent
// arguments should be in registers <receiver_args + 1> to
- // <receiver_args + receiver_arg_count - 1>.
+ // <receiver_args + receiver_arg_count - 1>. Type feedback is recorded in
+ // the |feedback_slot| in the type feedback vector.
BytecodeArrayBuilder& Call(
Register callable, Register receiver_args, size_t receiver_arg_count,
int feedback_slot, TailCallMode tail_call_mode = TailCallMode::kDisallow);
@@ -191,10 +213,13 @@ class BytecodeArrayBuilder final : public ZoneObject {
size_t receiver_args_count);
// Operators (register holds the lhs value, accumulator holds the rhs value).
- BytecodeArrayBuilder& BinaryOperation(Token::Value binop, Register reg);
+ // Type feedback will be recorded in the |feedback_slot|
+ BytecodeArrayBuilder& BinaryOperation(Token::Value binop, Register reg,
+ int feedback_slot);
// Count Operators (value stored in accumulator).
- BytecodeArrayBuilder& CountOperation(Token::Value op);
+ // Type feedback will be recorded in the |feedback_slot|
+ BytecodeArrayBuilder& CountOperation(Token::Value op, int feedback_slot);
// Unary Operators.
BytecodeArrayBuilder& LogicalNot();
@@ -207,11 +232,13 @@ class BytecodeArrayBuilder final : public ZoneObject {
// Tests.
BytecodeArrayBuilder& CompareOperation(Token::Value op, Register reg);
- // Casts.
+ // Casts accumulator and stores result in accumulator.
BytecodeArrayBuilder& CastAccumulatorToBoolean();
- BytecodeArrayBuilder& CastAccumulatorToJSObject();
- BytecodeArrayBuilder& CastAccumulatorToName();
- BytecodeArrayBuilder& CastAccumulatorToNumber();
+
+ // Casts accumulator and stores result in register |out|.
+ BytecodeArrayBuilder& CastAccumulatorToJSObject(Register out);
+ BytecodeArrayBuilder& CastAccumulatorToName(Register out);
+ BytecodeArrayBuilder& CastAccumulatorToNumber(Register out);
// Flow Control.
BytecodeArrayBuilder& Bind(BytecodeLabel* label);
@@ -224,7 +251,9 @@ class BytecodeArrayBuilder final : public ZoneObject {
BytecodeArrayBuilder& JumpIfNull(BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfUndefined(BytecodeLabel* label);
- BytecodeArrayBuilder& StackCheck();
+ BytecodeArrayBuilder& StackCheck(int position);
+
+ BytecodeArrayBuilder& OsrPoll(int loop_depth);
BytecodeArrayBuilder& Throw();
BytecodeArrayBuilder& ReThrow();
@@ -234,15 +263,21 @@ class BytecodeArrayBuilder final : public ZoneObject {
BytecodeArrayBuilder& Debugger();
// Complex flow control.
- BytecodeArrayBuilder& ForInPrepare(Register cache_info_triple);
+ BytecodeArrayBuilder& ForInPrepare(Register receiver,
+ Register cache_info_triple);
BytecodeArrayBuilder& ForInDone(Register index, Register cache_length);
BytecodeArrayBuilder& ForInNext(Register receiver, Register index,
Register cache_type_array_pair,
int feedback_slot);
BytecodeArrayBuilder& ForInStep(Register index);
+ // Generators.
+ BytecodeArrayBuilder& SuspendGenerator(Register generator);
+ BytecodeArrayBuilder& ResumeGenerator(Register generator);
+
// Exception handling.
- BytecodeArrayBuilder& MarkHandler(int handler_id, bool will_catch);
+ BytecodeArrayBuilder& MarkHandler(int handler_id,
+ HandlerTable::CatchPrediction will_catch);
BytecodeArrayBuilder& MarkTryBegin(int handler_id, Register context);
BytecodeArrayBuilder& MarkTryEnd(int handler_id);
@@ -250,6 +285,11 @@ class BytecodeArrayBuilder final : public ZoneObject {
// entry, so that it can be referenced by above exception handling support.
int NewHandlerEntry() { return handler_table_builder()->NewHandlerEntry(); }
+ // Allocates a slot in the constant pool which can later be inserted.
+ size_t AllocateConstantPoolEntry();
+ // Inserts a entry into an allocated constant pool entry.
+ void InsertConstantPoolEntryAt(size_t entry, Handle<Object> object);
+
void InitializeReturnPosition(FunctionLiteral* literal);
void SetStatementPosition(Statement* stmt);
@@ -257,41 +297,42 @@ class BytecodeArrayBuilder final : public ZoneObject {
void SetExpressionAsStatementPosition(Expression* expr);
// Accessors
- Zone* zone() const { return zone_; }
TemporaryRegisterAllocator* temporary_register_allocator() {
return &temporary_allocator_;
}
const TemporaryRegisterAllocator* temporary_register_allocator() const {
return &temporary_allocator_;
}
+ Zone* zone() const { return zone_; }
void EnsureReturn();
- static OperandScale OperandSizesToScale(
- OperandSize size0, OperandSize size1 = OperandSize::kByte,
- OperandSize size2 = OperandSize::kByte,
- OperandSize size3 = OperandSize::kByte);
+ static uint32_t RegisterOperand(Register reg) {
+ return static_cast<uint32_t>(reg.ToOperand());
+ }
- static OperandSize SizeForRegisterOperand(Register reg);
- static OperandSize SizeForSignedOperand(int value);
- static OperandSize SizeForUnsignedOperand(int value);
- static OperandSize SizeForUnsignedOperand(size_t value);
+ static uint32_t SignedOperand(int value) {
+ return static_cast<uint32_t>(value);
+ }
- static uint32_t RegisterOperand(Register reg);
- static Register RegisterFromOperand(uint32_t operand);
- static uint32_t SignedOperand(int value, OperandSize size);
- static uint32_t UnsignedOperand(int value);
- static uint32_t UnsignedOperand(size_t value);
+ static uint32_t UnsignedOperand(int value) {
+ DCHECK_GE(value, 0);
+ return static_cast<uint32_t>(value);
+ }
+
+ static uint32_t UnsignedOperand(size_t value) {
+ DCHECK_LE(value, kMaxUInt32);
+ return static_cast<uint32_t>(value);
+ }
private:
- class PreviousBytecodeHelper;
friend class BytecodeRegisterAllocator;
static Bytecode BytecodeForBinaryOperation(Token::Value op);
static Bytecode BytecodeForCountOperation(Token::Value op);
static Bytecode BytecodeForCompareOperation(Token::Value op);
- static Bytecode BytecodeForStoreIC(LanguageMode language_mode);
- static Bytecode BytecodeForKeyedStoreIC(LanguageMode language_mode);
+ static Bytecode BytecodeForStoreNamedProperty(LanguageMode language_mode);
+ static Bytecode BytecodeForStoreKeyedProperty(LanguageMode language_mode);
static Bytecode BytecodeForLoadGlobal(TypeofMode typeof_mode);
static Bytecode BytecodeForStoreGlobal(LanguageMode language_mode);
static Bytecode BytecodeForStoreLookupSlot(LanguageMode language_mode);
@@ -299,43 +340,24 @@ class BytecodeArrayBuilder final : public ZoneObject {
static Bytecode BytecodeForDelete(LanguageMode language_mode);
static Bytecode BytecodeForCall(TailCallMode tail_call_mode);
- static Bytecode GetJumpWithConstantOperand(Bytecode jump_smi8_operand);
- static Bytecode GetJumpWithToBoolean(Bytecode jump_smi8_operand);
-
- template <size_t N>
- INLINE(void Output(Bytecode bytecode, uint32_t (&operands)[N],
- OperandScale operand_scale = OperandScale::kSingle));
+ void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+ uint32_t operand2, uint32_t operand3);
+ void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+ uint32_t operand2);
+ void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1);
+ void Output(Bytecode bytecode, uint32_t operand0);
void Output(Bytecode bytecode);
- void OutputScaled(Bytecode bytecode, OperandScale operand_scale,
- uint32_t operand0, uint32_t operand1, uint32_t operand2,
- uint32_t operand3);
- void OutputScaled(Bytecode bytecode, OperandScale operand_scale,
- uint32_t operand0, uint32_t operand1, uint32_t operand2);
- void OutputScaled(Bytecode bytecode, OperandScale operand_scale,
- uint32_t operand0, uint32_t operand1);
- void OutputScaled(Bytecode bytecode, OperandScale operand_scale,
- uint32_t operand0);
BytecodeArrayBuilder& OutputJump(Bytecode jump_bytecode,
BytecodeLabel* label);
- void PatchJump(const ZoneVector<uint8_t>::iterator& jump_target,
- const ZoneVector<uint8_t>::iterator& jump_location);
- void PatchIndirectJumpWith8BitOperand(
- const ZoneVector<uint8_t>::iterator& jump_location, int delta);
- void PatchIndirectJumpWith16BitOperand(
- const ZoneVector<uint8_t>::iterator& jump_location, int delta);
- void PatchIndirectJumpWith32BitOperand(
- const ZoneVector<uint8_t>::iterator& jump_location, int delta);
- void LeaveBasicBlock();
+ bool RegisterIsValid(Register reg) const;
+ bool OperandsAreValid(Bytecode bytecode, int operand_count,
+ uint32_t operand0 = 0, uint32_t operand1 = 0,
+ uint32_t operand2 = 0, uint32_t operand3 = 0) const;
- bool OperandIsValid(Bytecode bytecode, OperandScale operand_scale,
- int operand_index, uint32_t operand_value) const;
- bool RegisterIsValid(Register reg, OperandSize reg_size) const;
-
- bool LastBytecodeInSameBlock() const;
- bool NeedToBooleanCast();
- bool IsRegisterInAccumulator(Register reg);
+ // Attach latest source position to |node|.
+ void AttachSourceInfo(BytecodeNode* node);
// Set position for return.
void SetReturnPosition();
@@ -343,9 +365,17 @@ class BytecodeArrayBuilder final : public ZoneObject {
// Gets a constant pool entry for the |object|.
size_t GetConstantPoolEntry(Handle<Object> object);
- ZoneVector<uint8_t>* bytecodes() { return &bytecodes_; }
- const ZoneVector<uint8_t>* bytecodes() const { return &bytecodes_; }
- Isolate* isolate() const { return isolate_; }
+ // Not implemented as the illegal bytecode is used inside internally
+ // to indicate a bytecode field is not valid or an error has occured
+ // during bytecode generation.
+ BytecodeArrayBuilder& Illegal();
+
+ void LeaveBasicBlock() { return_seen_in_block_ = false; }
+
+ BytecodeArrayWriter* bytecode_array_writer() {
+ return &bytecode_array_writer_;
+ }
+ BytecodePipelineStage* pipeline() { return pipeline_; }
ConstantArrayBuilder* constant_array_builder() {
return &constant_array_builder_;
}
@@ -355,71 +385,24 @@ class BytecodeArrayBuilder final : public ZoneObject {
HandlerTableBuilder* handler_table_builder() {
return &handler_table_builder_;
}
- SourcePositionTableBuilder* source_position_table_builder() {
- return &source_position_table_builder_;
- }
- Isolate* isolate_;
Zone* zone_;
- ZoneVector<uint8_t> bytecodes_;
bool bytecode_generated_;
ConstantArrayBuilder constant_array_builder_;
HandlerTableBuilder handler_table_builder_;
- SourcePositionTableBuilder source_position_table_builder_;
- size_t last_block_end_;
- size_t last_bytecode_start_;
- bool exit_seen_in_block_;
- int unbound_jumps_;
+ bool return_seen_in_block_;
int parameter_count_;
int local_register_count_;
int context_register_count_;
int return_position_;
TemporaryRegisterAllocator temporary_allocator_;
+ BytecodeArrayWriter bytecode_array_writer_;
+ BytecodePipelineStage* pipeline_;
+ BytecodeSourceInfo latest_source_info_;
DISALLOW_COPY_AND_ASSIGN(BytecodeArrayBuilder);
};
-
-// A label representing a branch target in a bytecode array. When a
-// label is bound, it represents a known position in the bytecode
-// array. For labels that are forward references there can be at most
-// one reference whilst it is unbound.
-class BytecodeLabel final {
- public:
- BytecodeLabel() : bound_(false), offset_(kInvalidOffset) {}
-
- bool is_bound() const { return bound_; }
- size_t offset() const { return offset_; }
-
- private:
- static const size_t kInvalidOffset = static_cast<size_t>(-1);
-
- void bind_to(size_t offset) {
- DCHECK(!bound_ && offset != kInvalidOffset);
- offset_ = offset;
- bound_ = true;
- }
-
- void set_referrer(size_t offset) {
- DCHECK(!bound_ && offset != kInvalidOffset && offset_ == kInvalidOffset);
- offset_ = offset;
- }
-
- bool is_forward_target() const {
- return offset() != kInvalidOffset && !is_bound();
- }
-
- // There are three states for a label:
- // bound_ offset_
- // UNSET false kInvalidOffset
- // FORWARD_TARGET false Offset of referring jump
- // BACKWARD_TARGET true Offset of label in bytecode array when bound
- bool bound_;
- size_t offset_;
-
- friend class BytecodeArrayBuilder;
-};
-
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.cc b/deps/v8/src/interpreter/bytecode-array-iterator.cc
index a17efcb6ca..84c0028342 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.cc
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.cc
@@ -4,6 +4,8 @@
#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecode-decoder.h"
+#include "src/interpreter/interpreter-intrinsics.h"
#include "src/objects-inl.h"
namespace v8 {
@@ -69,8 +71,8 @@ uint32_t BytecodeArrayIterator::GetUnsignedOperand(
current_prefix_offset() +
Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
current_operand_scale());
- return Bytecodes::DecodeUnsignedOperand(operand_start, operand_type,
- current_operand_scale());
+ return BytecodeDecoder::DecodeUnsignedOperand(operand_start, operand_type,
+ current_operand_scale());
}
int32_t BytecodeArrayIterator::GetSignedOperand(
@@ -85,8 +87,8 @@ int32_t BytecodeArrayIterator::GetSignedOperand(
current_prefix_offset() +
Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
current_operand_scale());
- return Bytecodes::DecodeSignedOperand(operand_start, operand_type,
- current_operand_scale());
+ return BytecodeDecoder::DecodeSignedOperand(operand_start, operand_type,
+ current_operand_scale());
}
uint32_t BytecodeArrayIterator::GetFlagOperand(int operand_index) const {
@@ -123,39 +125,40 @@ Register BytecodeArrayIterator::GetRegisterOperand(int operand_index) const {
current_prefix_offset() +
Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
current_operand_scale());
- return Bytecodes::DecodeRegisterOperand(operand_start, operand_type,
- current_operand_scale());
+ return BytecodeDecoder::DecodeRegisterOperand(operand_start, operand_type,
+ current_operand_scale());
}
int BytecodeArrayIterator::GetRegisterOperandRange(int operand_index) const {
- interpreter::OperandType operand_type =
- Bytecodes::GetOperandType(current_bytecode(), operand_index);
- DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
- switch (operand_type) {
- case OperandType::kRegPair:
- case OperandType::kRegOutPair:
- return 2;
- case OperandType::kRegOutTriple:
- return 3;
- default: {
- if (operand_index + 1 !=
- Bytecodes::NumberOfOperands(current_bytecode())) {
- OperandType next_operand_type =
- Bytecodes::GetOperandType(current_bytecode(), operand_index + 1);
- if (OperandType::kRegCount == next_operand_type) {
- return GetRegisterCountOperand(operand_index + 1);
- }
- }
- return 1;
- }
+ DCHECK_LE(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
+ const OperandType* operand_types =
+ Bytecodes::GetOperandTypes(current_bytecode());
+ DCHECK(Bytecodes::IsRegisterOperandType(operand_types[operand_index]));
+ if (operand_types[operand_index + 1] == OperandType::kRegCount) {
+ return GetRegisterCountOperand(operand_index + 1);
+ } else {
+ OperandType operand_type = operand_types[operand_index];
+ return Bytecodes::GetNumberOfRegistersRepresentedBy(operand_type);
}
}
-uint32_t BytecodeArrayIterator::GetRuntimeIdOperand(int operand_index) const {
+Runtime::FunctionId BytecodeArrayIterator::GetRuntimeIdOperand(
+ int operand_index) const {
OperandType operand_type =
Bytecodes::GetOperandType(current_bytecode(), operand_index);
DCHECK(operand_type == OperandType::kRuntimeId);
- return GetUnsignedOperand(operand_index, operand_type);
+ uint32_t raw_id = GetUnsignedOperand(operand_index, operand_type);
+ return static_cast<Runtime::FunctionId>(raw_id);
+}
+
+Runtime::FunctionId BytecodeArrayIterator::GetIntrinsicIdOperand(
+ int operand_index) const {
+ OperandType operand_type =
+ Bytecodes::GetOperandType(current_bytecode(), operand_index);
+ DCHECK(operand_type == OperandType::kIntrinsicId);
+ uint32_t raw_id = GetUnsignedOperand(operand_index, operand_type);
+ return IntrinsicsHelper::ToRuntimeId(
+ static_cast<IntrinsicsHelper::IntrinsicId>(raw_id));
}
Handle<Object> BytecodeArrayIterator::GetConstantForIndexOperand(
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.h b/deps/v8/src/interpreter/bytecode-array-iterator.h
index b372894fd8..0f7c6c7df9 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.h
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.h
@@ -6,8 +6,10 @@
#define V8_INTERPRETER_BYTECODE_ARRAY_ITERATOR_H_
#include "src/handles.h"
+#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
#include "src/objects.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
@@ -34,7 +36,8 @@ class BytecodeArrayIterator {
uint32_t GetRegisterCountOperand(int operand_index) const;
Register GetRegisterOperand(int operand_index) const;
int GetRegisterOperandRange(int operand_index) const;
- uint32_t GetRuntimeIdOperand(int operand_index) const;
+ Runtime::FunctionId GetRuntimeIdOperand(int operand_index) const;
+ Runtime::FunctionId GetIntrinsicIdOperand(int operand_index) const;
Handle<Object> GetConstantForIndexOperand(int operand_index) const;
// Returns the absolute offset of the branch target at the current
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.cc b/deps/v8/src/interpreter/bytecode-array-writer.cc
new file mode 100644
index 0000000000..6694a3697c
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-array-writer.cc
@@ -0,0 +1,392 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-array-writer.h"
+
+#include "src/api.h"
+#include "src/interpreter/bytecode-label.h"
+#include "src/interpreter/bytecode-register.h"
+#include "src/interpreter/constant-array-builder.h"
+#include "src/log.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+STATIC_CONST_MEMBER_DEFINITION const size_t
+ BytecodeArrayWriter::kMaxSizeOfPackedBytecode;
+
+BytecodeArrayWriter::BytecodeArrayWriter(
+ Zone* zone, ConstantArrayBuilder* constant_array_builder,
+ SourcePositionTableBuilder::RecordingMode source_position_mode)
+ : bytecodes_(zone),
+ max_register_count_(0),
+ unbound_jumps_(0),
+ source_position_table_builder_(zone, source_position_mode),
+ constant_array_builder_(constant_array_builder) {}
+
+// override
+BytecodeArrayWriter::~BytecodeArrayWriter() {}
+
+// override
+Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray(
+ Isolate* isolate, int fixed_register_count, int parameter_count,
+ Handle<FixedArray> handler_table) {
+ DCHECK_EQ(0, unbound_jumps_);
+
+ int bytecode_size = static_cast<int>(bytecodes()->size());
+
+ // All locals need a frame slot for the debugger, but may not be
+ // present in generated code.
+ int frame_size_for_locals = fixed_register_count * kPointerSize;
+ int frame_size_used = max_register_count() * kPointerSize;
+ int frame_size = std::max(frame_size_for_locals, frame_size_used);
+ Handle<FixedArray> constant_pool =
+ constant_array_builder()->ToFixedArray(isolate);
+ Handle<BytecodeArray> bytecode_array = isolate->factory()->NewBytecodeArray(
+ bytecode_size, &bytecodes()->front(), frame_size, parameter_count,
+ constant_pool);
+ bytecode_array->set_handler_table(*handler_table);
+ Handle<ByteArray> source_position_table =
+ source_position_table_builder()->ToSourcePositionTable(
+ isolate, Handle<AbstractCode>::cast(bytecode_array));
+ bytecode_array->set_source_position_table(*source_position_table);
+ return bytecode_array;
+}
+
+// override
+void BytecodeArrayWriter::Write(BytecodeNode* node) {
+ DCHECK(!Bytecodes::IsJump(node->bytecode()));
+ UpdateSourcePositionTable(node);
+ EmitBytecode(node);
+}
+
+// override
+void BytecodeArrayWriter::WriteJump(BytecodeNode* node, BytecodeLabel* label) {
+ DCHECK(Bytecodes::IsJump(node->bytecode()));
+ UpdateSourcePositionTable(node);
+ EmitJump(node, label);
+}
+
+// override
+void BytecodeArrayWriter::BindLabel(BytecodeLabel* label) {
+ size_t current_offset = bytecodes()->size();
+ if (label->is_forward_target()) {
+ // An earlier jump instruction refers to this label. Update it's location.
+ PatchJump(current_offset, label->offset());
+ // Now treat as if the label will only be back referred to.
+ }
+ label->bind_to(current_offset);
+}
+
+// override
+void BytecodeArrayWriter::BindLabel(const BytecodeLabel& target,
+ BytecodeLabel* label) {
+ DCHECK(!label->is_bound());
+ DCHECK(target.is_bound());
+ if (label->is_forward_target()) {
+ // An earlier jump instruction refers to this label. Update it's location.
+ PatchJump(target.offset(), label->offset());
+ // Now treat as if the label will only be back referred to.
+ }
+ label->bind_to(target.offset());
+}
+
+void BytecodeArrayWriter::UpdateSourcePositionTable(
+ const BytecodeNode* const node) {
+ int bytecode_offset = static_cast<int>(bytecodes()->size());
+ const BytecodeSourceInfo& source_info = node->source_info();
+ if (source_info.is_valid()) {
+ source_position_table_builder()->AddPosition(bytecode_offset,
+ source_info.source_position(),
+ source_info.is_statement());
+ }
+}
+
+namespace {
+
+OperandScale ScaleForScalableByteOperand(OperandSize operand_size) {
+ STATIC_ASSERT(static_cast<int>(OperandSize::kByte) ==
+ static_cast<int>(OperandScale::kSingle));
+ STATIC_ASSERT(static_cast<int>(OperandSize::kShort) ==
+ static_cast<int>(OperandScale::kDouble));
+ STATIC_ASSERT(static_cast<int>(OperandSize::kQuad) ==
+ static_cast<int>(OperandScale::kQuadruple));
+ return static_cast<OperandScale>(operand_size);
+}
+
+OperandScale OperandScaleForScalableSignedByte(uint32_t operand_value) {
+ int32_t signed_operand = static_cast<int32_t>(operand_value);
+ OperandSize bytes_required = Bytecodes::SizeForSignedOperand(signed_operand);
+ return ScaleForScalableByteOperand(bytes_required);
+}
+
+OperandScale OperandScaleForScalableUnsignedByte(uint32_t operand_value) {
+ OperandSize bytes_required = Bytecodes::SizeForUnsignedOperand(operand_value);
+ return ScaleForScalableByteOperand(bytes_required);
+}
+
+OperandScale GetOperandScale(const BytecodeNode* const node) {
+ const OperandTypeInfo* operand_type_infos =
+ Bytecodes::GetOperandTypeInfos(node->bytecode());
+ OperandScale operand_scale = OperandScale::kSingle;
+ int operand_count = node->operand_count();
+ for (int i = 0; i < operand_count; ++i) {
+ switch (operand_type_infos[i]) {
+ case OperandTypeInfo::kScalableSignedByte: {
+ uint32_t operand = node->operand(i);
+ operand_scale =
+ std::max(operand_scale, OperandScaleForScalableSignedByte(operand));
+ break;
+ }
+ case OperandTypeInfo::kScalableUnsignedByte: {
+ uint32_t operand = node->operand(i);
+ operand_scale = std::max(operand_scale,
+ OperandScaleForScalableUnsignedByte(operand));
+ break;
+ }
+ case OperandTypeInfo::kFixedUnsignedByte:
+ case OperandTypeInfo::kFixedUnsignedShort:
+ break;
+ case OperandTypeInfo::kNone:
+ UNREACHABLE();
+ break;
+ }
+ }
+ return operand_scale;
+}
+
+} // namespace
+
+void BytecodeArrayWriter::EmitBytecode(const BytecodeNode* const node) {
+ DCHECK_NE(node->bytecode(), Bytecode::kIllegal);
+
+ uint8_t buffer[kMaxSizeOfPackedBytecode];
+ uint8_t* buffer_limit = buffer;
+
+ OperandScale operand_scale = GetOperandScale(node);
+ if (operand_scale != OperandScale::kSingle) {
+ Bytecode prefix = Bytecodes::OperandScaleToPrefixBytecode(operand_scale);
+ *buffer_limit++ = Bytecodes::ToByte(prefix);
+ }
+
+ Bytecode bytecode = node->bytecode();
+ *buffer_limit++ = Bytecodes::ToByte(bytecode);
+
+ const uint32_t* const operands = node->operands();
+ const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
+ const int operand_count = Bytecodes::NumberOfOperands(bytecode);
+ for (int i = 0; i < operand_count; ++i) {
+ OperandSize operand_size =
+ Bytecodes::SizeOfOperand(operand_types[i], operand_scale);
+ switch (operand_size) {
+ case OperandSize::kNone:
+ UNREACHABLE();
+ break;
+ case OperandSize::kByte:
+ *buffer_limit++ = static_cast<uint8_t>(operands[i]);
+ break;
+ case OperandSize::kShort: {
+ WriteUnalignedUInt16(buffer_limit, operands[i]);
+ buffer_limit += 2;
+ break;
+ }
+ case OperandSize::kQuad: {
+ WriteUnalignedUInt32(buffer_limit, operands[i]);
+ buffer_limit += 4;
+ break;
+ }
+ }
+
+ int count = Bytecodes::GetNumberOfRegistersRepresentedBy(operand_types[i]);
+ if (count == 0) {
+ continue;
+ }
+ // NB operand_types is terminated by OperandType::kNone so
+ // operand_types[i + 1] is valid whilst i < operand_count.
+ if (operand_types[i + 1] == OperandType::kRegCount) {
+ count = static_cast<int>(operands[i]);
+ }
+ Register reg = Register::FromOperand(static_cast<int32_t>(operands[i]));
+ max_register_count_ = std::max(max_register_count_, reg.index() + count);
+ }
+
+ DCHECK_LE(buffer_limit, buffer + sizeof(buffer));
+ bytecodes()->insert(bytecodes()->end(), buffer, buffer_limit);
+}
+
+// static
+Bytecode GetJumpWithConstantOperand(Bytecode jump_bytecode) {
+ switch (jump_bytecode) {
+ case Bytecode::kJump:
+ return Bytecode::kJumpConstant;
+ case Bytecode::kJumpIfTrue:
+ return Bytecode::kJumpIfTrueConstant;
+ case Bytecode::kJumpIfFalse:
+ return Bytecode::kJumpIfFalseConstant;
+ case Bytecode::kJumpIfToBooleanTrue:
+ return Bytecode::kJumpIfToBooleanTrueConstant;
+ case Bytecode::kJumpIfToBooleanFalse:
+ return Bytecode::kJumpIfToBooleanFalseConstant;
+ case Bytecode::kJumpIfNotHole:
+ return Bytecode::kJumpIfNotHoleConstant;
+ case Bytecode::kJumpIfNull:
+ return Bytecode::kJumpIfNullConstant;
+ case Bytecode::kJumpIfUndefined:
+ return Bytecode::kJumpIfUndefinedConstant;
+ default:
+ UNREACHABLE();
+ return Bytecode::kIllegal;
+ }
+}
+
+void BytecodeArrayWriter::PatchJumpWith8BitOperand(size_t jump_location,
+ int delta) {
+ Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes()->at(jump_location));
+ DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
+ size_t operand_location = jump_location + 1;
+ DCHECK_EQ(bytecodes()->at(operand_location), k8BitJumpPlaceholder);
+ if (Bytecodes::SizeForSignedOperand(delta) == OperandSize::kByte) {
+ // The jump fits within the range of an Imm operand, so cancel
+ // the reservation and jump directly.
+ constant_array_builder()->DiscardReservedEntry(OperandSize::kByte);
+ bytecodes()->at(operand_location) = static_cast<uint8_t>(delta);
+ } else {
+ // The jump does not fit within the range of an Imm operand, so
+ // commit reservation putting the offset into the constant pool,
+ // and update the jump instruction and operand.
+ size_t entry = constant_array_builder()->CommitReservedEntry(
+ OperandSize::kByte, Smi::FromInt(delta));
+ DCHECK_LE(entry, kMaxUInt32);
+ DCHECK_EQ(Bytecodes::SizeForUnsignedOperand(static_cast<uint32_t>(entry)),
+ OperandSize::kByte);
+ jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
+ bytecodes()->at(jump_location) = Bytecodes::ToByte(jump_bytecode);
+ bytecodes()->at(operand_location) = static_cast<uint8_t>(entry);
+ }
+}
+
+void BytecodeArrayWriter::PatchJumpWith16BitOperand(size_t jump_location,
+ int delta) {
+ Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes()->at(jump_location));
+ DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
+ size_t operand_location = jump_location + 1;
+ uint8_t operand_bytes[2];
+ if (Bytecodes::SizeForSignedOperand(delta) <= OperandSize::kShort) {
+ constant_array_builder()->DiscardReservedEntry(OperandSize::kShort);
+ WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(delta));
+ } else {
+ jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
+ bytecodes()->at(jump_location) = Bytecodes::ToByte(jump_bytecode);
+ size_t entry = constant_array_builder()->CommitReservedEntry(
+ OperandSize::kShort, Smi::FromInt(delta));
+ WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(entry));
+ }
+ DCHECK(bytecodes()->at(operand_location) == k8BitJumpPlaceholder &&
+ bytecodes()->at(operand_location + 1) == k8BitJumpPlaceholder);
+ bytecodes()->at(operand_location++) = operand_bytes[0];
+ bytecodes()->at(operand_location) = operand_bytes[1];
+}
+
+void BytecodeArrayWriter::PatchJumpWith32BitOperand(size_t jump_location,
+ int delta) {
+ DCHECK(Bytecodes::IsJumpImmediate(
+ Bytecodes::FromByte(bytecodes()->at(jump_location))));
+ constant_array_builder()->DiscardReservedEntry(OperandSize::kQuad);
+ uint8_t operand_bytes[4];
+ WriteUnalignedUInt32(operand_bytes, static_cast<uint32_t>(delta));
+ size_t operand_location = jump_location + 1;
+ DCHECK(bytecodes()->at(operand_location) == k8BitJumpPlaceholder &&
+ bytecodes()->at(operand_location + 1) == k8BitJumpPlaceholder &&
+ bytecodes()->at(operand_location + 2) == k8BitJumpPlaceholder &&
+ bytecodes()->at(operand_location + 3) == k8BitJumpPlaceholder);
+ bytecodes()->at(operand_location++) = operand_bytes[0];
+ bytecodes()->at(operand_location++) = operand_bytes[1];
+ bytecodes()->at(operand_location++) = operand_bytes[2];
+ bytecodes()->at(operand_location) = operand_bytes[3];
+}
+
+void BytecodeArrayWriter::PatchJump(size_t jump_target, size_t jump_location) {
+ Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes()->at(jump_location));
+ int delta = static_cast<int>(jump_target - jump_location);
+ int prefix_offset = 0;
+ OperandScale operand_scale = OperandScale::kSingle;
+ if (Bytecodes::IsPrefixScalingBytecode(jump_bytecode)) {
+ // If a prefix scaling bytecode is emitted the target offset is one
+ // less than the case of no prefix scaling bytecode.
+ delta -= 1;
+ prefix_offset = 1;
+ operand_scale = Bytecodes::PrefixBytecodeToOperandScale(jump_bytecode);
+ jump_bytecode =
+ Bytecodes::FromByte(bytecodes()->at(jump_location + prefix_offset));
+ }
+
+ DCHECK(Bytecodes::IsJump(jump_bytecode));
+ switch (operand_scale) {
+ case OperandScale::kSingle:
+ PatchJumpWith8BitOperand(jump_location, delta);
+ break;
+ case OperandScale::kDouble:
+ PatchJumpWith16BitOperand(jump_location + prefix_offset, delta);
+ break;
+ case OperandScale::kQuadruple:
+ PatchJumpWith32BitOperand(jump_location + prefix_offset, delta);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ unbound_jumps_--;
+}
+
+void BytecodeArrayWriter::EmitJump(BytecodeNode* node, BytecodeLabel* label) {
+ DCHECK(Bytecodes::IsJump(node->bytecode()));
+ DCHECK_EQ(0, node->operand(0));
+
+ size_t current_offset = bytecodes()->size();
+
+ if (label->is_bound()) {
+ CHECK_GE(current_offset, label->offset());
+ CHECK_LE(current_offset, static_cast<size_t>(kMaxInt));
+ // Label has been bound already so this is a backwards jump.
+ size_t abs_delta = current_offset - label->offset();
+ int delta = -static_cast<int>(abs_delta);
+ OperandSize operand_size = Bytecodes::SizeForSignedOperand(delta);
+ if (operand_size > OperandSize::kByte) {
+ // Adjust for scaling byte prefix for wide jump offset.
+ DCHECK_LE(delta, 0);
+ delta -= 1;
+ }
+ node->set_bytecode(node->bytecode(), delta);
+ } else {
+ // The label has not yet been bound so this is a forward reference
+ // that will be patched when the label is bound. We create a
+ // reservation in the constant pool so the jump can be patched
+ // when the label is bound. The reservation means the maximum size
+ // of the operand for the constant is known and the jump can
+ // be emitted into the bytecode stream with space for the operand.
+ unbound_jumps_++;
+ label->set_referrer(current_offset);
+ OperandSize reserved_operand_size =
+ constant_array_builder()->CreateReservedEntry();
+ switch (reserved_operand_size) {
+ case OperandSize::kNone:
+ UNREACHABLE();
+ break;
+ case OperandSize::kByte:
+ node->set_bytecode(node->bytecode(), k8BitJumpPlaceholder);
+ break;
+ case OperandSize::kShort:
+ node->set_bytecode(node->bytecode(), k16BitJumpPlaceholder);
+ break;
+ case OperandSize::kQuad:
+ node->set_bytecode(node->bytecode(), k32BitJumpPlaceholder);
+ break;
+ }
+ }
+ EmitBytecode(node);
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.h b/deps/v8/src/interpreter/bytecode-array-writer.h
new file mode 100644
index 0000000000..17fe3d4732
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-array-writer.h
@@ -0,0 +1,88 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_ARRAY_WRITER_H_
+#define V8_INTERPRETER_BYTECODE_ARRAY_WRITER_H_
+
+#include "src/interpreter/bytecode-pipeline.h"
+#include "src/source-position-table.h"
+
+namespace v8 {
+namespace internal {
+
+class SourcePositionTableBuilder;
+
+namespace interpreter {
+
+class BytecodeLabel;
+class ConstantArrayBuilder;
+
+// Class for emitting bytecode as the final stage of the bytecode
+// generation pipeline.
+class BytecodeArrayWriter final : public BytecodePipelineStage {
+ public:
+ BytecodeArrayWriter(
+ Zone* zone, ConstantArrayBuilder* constant_array_builder,
+ SourcePositionTableBuilder::RecordingMode source_position_mode);
+ virtual ~BytecodeArrayWriter();
+
+ // BytecodePipelineStage interface.
+ void Write(BytecodeNode* node) override;
+ void WriteJump(BytecodeNode* node, BytecodeLabel* label) override;
+ void BindLabel(BytecodeLabel* label) override;
+ void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
+ Handle<BytecodeArray> ToBytecodeArray(
+ Isolate* isolate, int fixed_register_count, int parameter_count,
+ Handle<FixedArray> handler_table) override;
+
+ private:
+ // Maximum sized packed bytecode is comprised of a prefix bytecode,
+ // plus the actual bytecode, plus the maximum number of operands times
+ // the maximum operand size.
+ static const size_t kMaxSizeOfPackedBytecode =
+ 2 * sizeof(Bytecode) +
+ Bytecodes::kMaxOperands * static_cast<size_t>(OperandSize::kLast);
+
+ // Constants that act as placeholders for jump operands to be
+ // patched. These have operand sizes that match the sizes of
+ // reserved constant pool entries.
+ const uint32_t k8BitJumpPlaceholder = 0x7f;
+ const uint32_t k16BitJumpPlaceholder =
+ k8BitJumpPlaceholder | (k8BitJumpPlaceholder << 8);
+ const uint32_t k32BitJumpPlaceholder =
+ k16BitJumpPlaceholder | (k16BitJumpPlaceholder << 16);
+
+ void PatchJump(size_t jump_target, size_t jump_location);
+ void PatchJumpWith8BitOperand(size_t jump_location, int delta);
+ void PatchJumpWith16BitOperand(size_t jump_location, int delta);
+ void PatchJumpWith32BitOperand(size_t jump_location, int delta);
+
+ void EmitBytecode(const BytecodeNode* const node);
+ void EmitJump(BytecodeNode* node, BytecodeLabel* label);
+ void UpdateSourcePositionTable(const BytecodeNode* const node);
+
+ ZoneVector<uint8_t>* bytecodes() { return &bytecodes_; }
+ SourcePositionTableBuilder* source_position_table_builder() {
+ return &source_position_table_builder_;
+ }
+ ConstantArrayBuilder* constant_array_builder() {
+ return constant_array_builder_;
+ }
+ int max_register_count() { return max_register_count_; }
+
+ ZoneVector<uint8_t> bytecodes_;
+ int max_register_count_;
+ int unbound_jumps_;
+ SourcePositionTableBuilder source_position_table_builder_;
+ ConstantArrayBuilder* constant_array_builder_;
+
+ friend class BytecodeArrayWriterUnittest;
+ DISALLOW_COPY_AND_ASSIGN(BytecodeArrayWriter);
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_ARRAY_WRITER_H_
diff --git a/deps/v8/src/interpreter/bytecode-dead-code-optimizer.cc b/deps/v8/src/interpreter/bytecode-dead-code-optimizer.cc
new file mode 100644
index 0000000000..5d301c76ce
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-dead-code-optimizer.cc
@@ -0,0 +1,77 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-dead-code-optimizer.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+BytecodeDeadCodeOptimizer::BytecodeDeadCodeOptimizer(
+ BytecodePipelineStage* next_stage)
+ : next_stage_(next_stage), exit_seen_in_block_(false) {}
+
+// override
+Handle<BytecodeArray> BytecodeDeadCodeOptimizer::ToBytecodeArray(
+ Isolate* isolate, int fixed_register_count, int parameter_count,
+ Handle<FixedArray> handler_table) {
+ return next_stage_->ToBytecodeArray(isolate, fixed_register_count,
+ parameter_count, handler_table);
+}
+
+// override
+void BytecodeDeadCodeOptimizer::Write(BytecodeNode* node) {
+ // Don't emit dead code.
+ if (exit_seen_in_block_) return;
+
+ switch (node->bytecode()) {
+ case Bytecode::kReturn:
+ case Bytecode::kThrow:
+ case Bytecode::kReThrow:
+ exit_seen_in_block_ = true;
+ break;
+ default:
+ break;
+ }
+
+ next_stage_->Write(node);
+}
+
+// override
+void BytecodeDeadCodeOptimizer::WriteJump(BytecodeNode* node,
+ BytecodeLabel* label) {
+ // Don't emit dead code.
+ // TODO(rmcilroy): For forward jumps we could mark the label as dead, thereby
+ // avoiding emitting dead code when we bind the label.
+ if (exit_seen_in_block_) return;
+
+ switch (node->bytecode()) {
+ case Bytecode::kJump:
+ case Bytecode::kJumpConstant:
+ exit_seen_in_block_ = true;
+ break;
+ default:
+ break;
+ }
+
+ next_stage_->WriteJump(node, label);
+}
+
+// override
+void BytecodeDeadCodeOptimizer::BindLabel(BytecodeLabel* label) {
+ next_stage_->BindLabel(label);
+ exit_seen_in_block_ = false;
+}
+
+// override
+void BytecodeDeadCodeOptimizer::BindLabel(const BytecodeLabel& target,
+ BytecodeLabel* label) {
+ next_stage_->BindLabel(target, label);
+ // exit_seen_in_block_ was reset when target was bound, so shouldn't be
+ // changed here.
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-dead-code-optimizer.h b/deps/v8/src/interpreter/bytecode-dead-code-optimizer.h
new file mode 100644
index 0000000000..8a9732cb3f
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-dead-code-optimizer.h
@@ -0,0 +1,41 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_DEAD_CODE_OPTIMIZER_H_
+#define V8_INTERPRETER_BYTECODE_DEAD_CODE_OPTIMIZER_H_
+
+#include "src/interpreter/bytecode-pipeline.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+// An optimization stage for eliminating obviously dead code in bytecode
+// generation.
+class BytecodeDeadCodeOptimizer final : public BytecodePipelineStage,
+ public ZoneObject {
+ public:
+ explicit BytecodeDeadCodeOptimizer(BytecodePipelineStage* next_stage);
+
+ // BytecodePipelineStage interface.
+ void Write(BytecodeNode* node) override;
+ void WriteJump(BytecodeNode* node, BytecodeLabel* label) override;
+ void BindLabel(BytecodeLabel* label) override;
+ void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
+ Handle<BytecodeArray> ToBytecodeArray(
+ Isolate* isolate, int fixed_register_count, int parameter_count,
+ Handle<FixedArray> handler_table) override;
+
+ private:
+ BytecodePipelineStage* next_stage_;
+ bool exit_seen_in_block_;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeDeadCodeOptimizer);
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_DEAD_CODE_OPTIMIZER_H_
diff --git a/deps/v8/src/interpreter/bytecode-decoder.cc b/deps/v8/src/interpreter/bytecode-decoder.cc
new file mode 100644
index 0000000000..74c5806ef5
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-decoder.cc
@@ -0,0 +1,157 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-decoder.h"
+
+#include <iomanip>
+
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+// static
+Register BytecodeDecoder::DecodeRegisterOperand(const uint8_t* operand_start,
+ OperandType operand_type,
+ OperandScale operand_scale) {
+ DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
+ int32_t operand =
+ DecodeSignedOperand(operand_start, operand_type, operand_scale);
+ return Register::FromOperand(operand);
+}
+
+// static
+int32_t BytecodeDecoder::DecodeSignedOperand(const uint8_t* operand_start,
+ OperandType operand_type,
+ OperandScale operand_scale) {
+ DCHECK(!Bytecodes::IsUnsignedOperandType(operand_type));
+ switch (Bytecodes::SizeOfOperand(operand_type, operand_scale)) {
+ case OperandSize::kByte:
+ return static_cast<int8_t>(*operand_start);
+ case OperandSize::kShort:
+ return static_cast<int16_t>(ReadUnalignedUInt16(operand_start));
+ case OperandSize::kQuad:
+ return static_cast<int32_t>(ReadUnalignedUInt32(operand_start));
+ case OperandSize::kNone:
+ UNREACHABLE();
+ }
+ return 0;
+}
+
+// static
+uint32_t BytecodeDecoder::DecodeUnsignedOperand(const uint8_t* operand_start,
+ OperandType operand_type,
+ OperandScale operand_scale) {
+ DCHECK(Bytecodes::IsUnsignedOperandType(operand_type));
+ switch (Bytecodes::SizeOfOperand(operand_type, operand_scale)) {
+ case OperandSize::kByte:
+ return *operand_start;
+ case OperandSize::kShort:
+ return ReadUnalignedUInt16(operand_start);
+ case OperandSize::kQuad:
+ return ReadUnalignedUInt32(operand_start);
+ case OperandSize::kNone:
+ UNREACHABLE();
+ }
+ return 0;
+}
+
+// static
+std::ostream& BytecodeDecoder::Decode(std::ostream& os,
+ const uint8_t* bytecode_start,
+ int parameter_count) {
+ Bytecode bytecode = Bytecodes::FromByte(bytecode_start[0]);
+ int prefix_offset = 0;
+ OperandScale operand_scale = OperandScale::kSingle;
+ if (Bytecodes::IsPrefixScalingBytecode(bytecode)) {
+ prefix_offset = 1;
+ operand_scale = Bytecodes::PrefixBytecodeToOperandScale(bytecode);
+ bytecode = Bytecodes::FromByte(bytecode_start[1]);
+ }
+
+ // Prepare to print bytecode and operands as hex digits.
+ std::ios saved_format(nullptr);
+ saved_format.copyfmt(saved_format);
+ os.fill('0');
+ os.flags(std::ios::hex);
+
+ int bytecode_size = Bytecodes::Size(bytecode, operand_scale);
+ for (int i = 0; i < prefix_offset + bytecode_size; i++) {
+ os << std::setw(2) << static_cast<uint32_t>(bytecode_start[i]) << ' ';
+ }
+ os.copyfmt(saved_format);
+
+ const int kBytecodeColumnSize = 6;
+ for (int i = prefix_offset + bytecode_size; i < kBytecodeColumnSize; i++) {
+ os << " ";
+ }
+
+ os << Bytecodes::ToString(bytecode, operand_scale) << " ";
+
+ // Operands for the debug break are from the original instruction.
+ if (Bytecodes::IsDebugBreak(bytecode)) return os;
+
+ int number_of_operands = Bytecodes::NumberOfOperands(bytecode);
+ int range = 0;
+ for (int i = 0; i < number_of_operands; i++) {
+ OperandType op_type = Bytecodes::GetOperandType(bytecode, i);
+ int operand_offset =
+ Bytecodes::GetOperandOffset(bytecode, i, operand_scale);
+ const uint8_t* operand_start =
+ &bytecode_start[prefix_offset + operand_offset];
+ switch (op_type) {
+ case interpreter::OperandType::kRegCount:
+ os << "#"
+ << DecodeUnsignedOperand(operand_start, op_type, operand_scale);
+ break;
+ case interpreter::OperandType::kIdx:
+ case interpreter::OperandType::kRuntimeId:
+ case interpreter::OperandType::kIntrinsicId:
+ os << "["
+ << DecodeUnsignedOperand(operand_start, op_type, operand_scale)
+ << "]";
+ break;
+ case interpreter::OperandType::kImm:
+ os << "[" << DecodeSignedOperand(operand_start, op_type, operand_scale)
+ << "]";
+ break;
+ case interpreter::OperandType::kFlag8:
+ os << "#"
+ << DecodeUnsignedOperand(operand_start, op_type, operand_scale);
+ break;
+ case interpreter::OperandType::kMaybeReg:
+ case interpreter::OperandType::kReg:
+ case interpreter::OperandType::kRegOut: {
+ Register reg =
+ DecodeRegisterOperand(operand_start, op_type, operand_scale);
+ os << reg.ToString(parameter_count);
+ break;
+ }
+ case interpreter::OperandType::kRegOutTriple:
+ range += 1;
+ case interpreter::OperandType::kRegOutPair:
+ case interpreter::OperandType::kRegPair: {
+ range += 1;
+ Register first_reg =
+ DecodeRegisterOperand(operand_start, op_type, operand_scale);
+ Register last_reg = Register(first_reg.index() + range);
+ os << first_reg.ToString(parameter_count) << "-"
+ << last_reg.ToString(parameter_count);
+ break;
+ }
+ case interpreter::OperandType::kNone:
+ UNREACHABLE();
+ break;
+ }
+ if (i != number_of_operands - 1) {
+ os << ", ";
+ }
+ }
+ return os;
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-decoder.h b/deps/v8/src/interpreter/bytecode-decoder.h
new file mode 100644
index 0000000000..6613179d0c
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-decoder.h
@@ -0,0 +1,43 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_DECODER_H_
+#define V8_INTERPRETER_BYTECODE_DECODER_H_
+
+#include <iosfwd>
+
+#include "src/interpreter/bytecode-register.h"
+#include "src/interpreter/bytecodes.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodeDecoder final {
+ public:
+ // Decodes a register operand in a byte array.
+ static Register DecodeRegisterOperand(const uint8_t* operand_start,
+ OperandType operand_type,
+ OperandScale operand_scale);
+
+ // Decodes a signed operand in a byte array.
+ static int32_t DecodeSignedOperand(const uint8_t* operand_start,
+ OperandType operand_type,
+ OperandScale operand_scale);
+
+ // Decodes an unsigned operand in a byte array.
+ static uint32_t DecodeUnsignedOperand(const uint8_t* operand_start,
+ OperandType operand_type,
+ OperandScale operand_scale);
+
+ // Decode a single bytecode and operands to |os|.
+ static std::ostream& Decode(std::ostream& os, const uint8_t* bytecode_start,
+ int number_of_parameters);
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_DECODER_H_
diff --git a/deps/v8/src/interpreter/bytecode-flags.cc b/deps/v8/src/interpreter/bytecode-flags.cc
new file mode 100644
index 0000000000..9b25dbd230
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-flags.cc
@@ -0,0 +1,42 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-flags.h"
+
+#include "src/code-stubs.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+// static
+uint8_t CreateObjectLiteralFlags::Encode(bool fast_clone_supported,
+ int properties_count,
+ int runtime_flags) {
+ uint8_t result = FlagsBits::encode(runtime_flags);
+ if (fast_clone_supported) {
+ STATIC_ASSERT(
+ FastCloneShallowObjectStub::kMaximumClonedProperties <=
+ 1 << CreateObjectLiteralFlags::FastClonePropertiesCountBits::kShift);
+ DCHECK_LE(properties_count,
+ FastCloneShallowObjectStub::kMaximumClonedProperties);
+ result |= CreateObjectLiteralFlags::FastClonePropertiesCountBits::encode(
+ properties_count);
+ }
+ return result;
+}
+
+// static
+uint8_t CreateClosureFlags::Encode(bool pretenure, bool is_function_scope) {
+ uint8_t result = PretenuredBit::encode(pretenure);
+ if (!FLAG_always_opt && !FLAG_prepare_always_opt &&
+ pretenure == NOT_TENURED && is_function_scope) {
+ result |= FastNewClosureBit::encode(true);
+ }
+ return result;
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-flags.h b/deps/v8/src/interpreter/bytecode-flags.h
new file mode 100644
index 0000000000..1068d8a9d9
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-flags.h
@@ -0,0 +1,42 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_FLAGS_H_
+#define V8_INTERPRETER_BYTECODE_FLAGS_H_
+
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class CreateObjectLiteralFlags {
+ public:
+ class FlagsBits : public BitField8<int, 0, 3> {};
+ class FastClonePropertiesCountBits
+ : public BitField8<int, FlagsBits::kNext, 3> {};
+
+ static uint8_t Encode(bool fast_clone_supported, int properties_count,
+ int runtime_flags);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CreateObjectLiteralFlags);
+};
+
+class CreateClosureFlags {
+ public:
+ class PretenuredBit : public BitField8<bool, 0, 1> {};
+ class FastNewClosureBit : public BitField8<bool, PretenuredBit::kNext, 1> {};
+
+ static uint8_t Encode(bool pretenure, bool is_function_scope);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CreateClosureFlags);
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_FLAGS_H_
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index b0fa245e18..a57399cf6f 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -5,7 +5,10 @@
#include "src/interpreter/bytecode-generator.h"
#include "src/ast/scopes.h"
+#include "src/code-stubs.h"
#include "src/compiler.h"
+#include "src/interpreter/bytecode-flags.h"
+#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-register-allocator.h"
#include "src/interpreter/control-flow-builders.h"
#include "src/objects.h"
@@ -16,7 +19,6 @@ namespace v8 {
namespace internal {
namespace interpreter {
-
// Scoped class tracking context objects created by the visitor. Represents
// mutations of the context chain within the function body, allowing pushing and
// popping of the current {context_register} during visitation.
@@ -87,7 +89,6 @@ class BytecodeGenerator::ContextScope BASE_EMBEDDED {
bool should_pop_context_;
};
-
// Scoped class for tracking control statements entered by the
// visitor. The pattern derives AstGraphBuilder::ControlScope.
class BytecodeGenerator::ControlScope BASE_EMBEDDED {
@@ -123,7 +124,6 @@ class BytecodeGenerator::ControlScope BASE_EMBEDDED {
DISALLOW_COPY_AND_ASSIGN(ControlScope);
};
-
// Helper class for a try-finally control scope. It can record intercepted
// control-flow commands that cause entry into a finally-block, and re-apply
// them after again leaving that block. Special tokens are used to identify
@@ -202,7 +202,6 @@ class BytecodeGenerator::ControlScope::DeferredCommands final {
Register result_register_;
};
-
// Scoped class for dealing with control flow reaching the function level.
class BytecodeGenerator::ControlScopeForTopLevel final
: public BytecodeGenerator::ControlScope {
@@ -227,7 +226,6 @@ class BytecodeGenerator::ControlScopeForTopLevel final
}
};
-
// Scoped class for enabling break inside blocks and switch blocks.
class BytecodeGenerator::ControlScopeForBreakable final
: public BytecodeGenerator::ControlScope {
@@ -259,7 +257,6 @@ class BytecodeGenerator::ControlScopeForBreakable final
BreakableControlFlowBuilder* control_builder_;
};
-
// Scoped class for enabling 'break' and 'continue' in iteration
// constructs, e.g. do...while, while..., for...
class BytecodeGenerator::ControlScopeForIteration final
@@ -270,7 +267,10 @@ class BytecodeGenerator::ControlScopeForIteration final
LoopBuilder* loop_builder)
: ControlScope(generator),
statement_(statement),
- loop_builder_(loop_builder) {}
+ loop_builder_(loop_builder) {
+ generator->loop_depth_++;
+ }
+ ~ControlScopeForIteration() { generator()->loop_depth_--; }
protected:
bool Execute(Command command, Statement* statement) override {
@@ -294,19 +294,13 @@ class BytecodeGenerator::ControlScopeForIteration final
LoopBuilder* loop_builder_;
};
-
// Scoped class for enabling 'throw' in try-catch constructs.
class BytecodeGenerator::ControlScopeForTryCatch final
: public BytecodeGenerator::ControlScope {
public:
ControlScopeForTryCatch(BytecodeGenerator* generator,
TryCatchBuilder* try_catch_builder)
- : ControlScope(generator) {
- generator->try_catch_nesting_level_++;
- }
- virtual ~ControlScopeForTryCatch() {
- generator()->try_catch_nesting_level_--;
- }
+ : ControlScope(generator) {}
protected:
bool Execute(Command command, Statement* statement) override {
@@ -323,7 +317,6 @@ class BytecodeGenerator::ControlScopeForTryCatch final
}
};
-
// Scoped class for enabling control flow through try-finally constructs.
class BytecodeGenerator::ControlScopeForTryFinally final
: public BytecodeGenerator::ControlScope {
@@ -333,12 +326,7 @@ class BytecodeGenerator::ControlScopeForTryFinally final
DeferredCommands* commands)
: ControlScope(generator),
try_finally_builder_(try_finally_builder),
- commands_(commands) {
- generator->try_finally_nesting_level_++;
- }
- virtual ~ControlScopeForTryFinally() {
- generator()->try_finally_nesting_level_--;
- }
+ commands_(commands) {}
protected:
bool Execute(Command command, Statement* statement) override {
@@ -359,7 +347,6 @@ class BytecodeGenerator::ControlScopeForTryFinally final
DeferredCommands* commands_;
};
-
void BytecodeGenerator::ControlScope::PerformCommand(Command command,
Statement* statement) {
ControlScope* current = this;
@@ -382,7 +369,6 @@ void BytecodeGenerator::ControlScope::PerformCommand(Command command,
UNREACHABLE();
}
-
class BytecodeGenerator::RegisterAllocationScope {
public:
explicit RegisterAllocationScope(BytecodeGenerator* generator)
@@ -440,7 +426,6 @@ class BytecodeGenerator::RegisterAllocationScope {
DISALLOW_COPY_AND_ASSIGN(RegisterAllocationScope);
};
-
// Scoped base class for determining where the result of an expression
// is stored.
class BytecodeGenerator::ExpressionResultScope {
@@ -461,6 +446,12 @@ class BytecodeGenerator::ExpressionResultScope {
bool IsEffect() const { return kind_ == Expression::kEffect; }
bool IsValue() const { return kind_ == Expression::kValue; }
+ bool IsTest() const { return kind_ == Expression::kTest; }
+
+ TestResultScope* AsTest() {
+ DCHECK(IsTest());
+ return reinterpret_cast<TestResultScope*>(this);
+ }
virtual void SetResultInAccumulator() = 0;
virtual void SetResultInRegister(Register reg) = 0;
@@ -488,7 +479,6 @@ class BytecodeGenerator::ExpressionResultScope {
DISALLOW_COPY_AND_ASSIGN(ExpressionResultScope);
};
-
// Scoped class used when the result of the current expression is not
// expected to produce a result.
class BytecodeGenerator::EffectResultScope final
@@ -503,7 +493,6 @@ class BytecodeGenerator::EffectResultScope final
virtual void SetResultInRegister(Register reg) {}
};
-
// Scoped class used when the result of the current expression to be
// evaluated should go into the interpreter's accumulator register.
class BytecodeGenerator::AccumulatorResultScope final
@@ -520,7 +509,6 @@ class BytecodeGenerator::AccumulatorResultScope final
}
};
-
// Scoped class used when the result of the current expression to be
// evaluated should go into an interpreter register.
class BytecodeGenerator::RegisterResultScope final
@@ -554,31 +542,194 @@ class BytecodeGenerator::RegisterResultScope final
Register result_register_;
};
-BytecodeGenerator::BytecodeGenerator(Isolate* isolate, Zone* zone)
- : isolate_(isolate),
- zone_(zone),
- builder_(nullptr),
- info_(nullptr),
- scope_(nullptr),
- globals_(0, zone),
+// Scoped class used when the result of the current expression to be
+// evaluated is only tested with jumps to two branches.
+class BytecodeGenerator::TestResultScope final : public ExpressionResultScope {
+ public:
+ TestResultScope(BytecodeGenerator* generator, BytecodeLabels* then_labels,
+ BytecodeLabels* else_labels, TestFallthrough fallthrough)
+ : ExpressionResultScope(generator, Expression::kTest),
+ then_labels_(then_labels),
+ else_labels_(else_labels),
+ fallthrough_(fallthrough),
+ result_consumed_by_test_(false) {}
+
+ virtual void SetResultInAccumulator() { set_result_identified(); }
+
+ virtual void SetResultInRegister(Register reg) {
+ builder()->LoadAccumulatorWithRegister(reg);
+ set_result_identified();
+ }
+
+ // Used when code special cases for TestResultScope and consumes any
+ // possible value by testing and jumping to a then/else label.
+ void SetResultConsumedByTest() {
+ result_consumed_by_test_ = true;
+ set_result_identified();
+ }
+
+ bool ResultConsumedByTest() { return result_consumed_by_test_; }
+
+ BytecodeLabel* NewThenLabel() { return then_labels_->New(); }
+ BytecodeLabel* NewElseLabel() { return else_labels_->New(); }
+
+ BytecodeLabels* then_labels() const { return then_labels_; }
+ BytecodeLabels* else_labels() const { return else_labels_; }
+
+ TestFallthrough fallthrough() const { return fallthrough_; }
+ TestFallthrough inverted_fallthrough() const {
+ switch (fallthrough_) {
+ case TestFallthrough::kThen:
+ return TestFallthrough::kElse;
+ case TestFallthrough::kElse:
+ return TestFallthrough::kThen;
+ default:
+ return TestFallthrough::kNone;
+ }
+ }
+
+ private:
+ BytecodeLabels* then_labels_;
+ BytecodeLabels* else_labels_;
+ TestFallthrough fallthrough_;
+ bool result_consumed_by_test_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestResultScope);
+};
+
+// Used to build a list of global declaration initial value pairs.
+class BytecodeGenerator::GlobalDeclarationsBuilder final : public ZoneObject {
+ public:
+ explicit GlobalDeclarationsBuilder(Zone* zone)
+ : declarations_(0, zone),
+ constant_pool_entry_(0),
+ has_constant_pool_entry_(false) {}
+
+ void AddFunctionDeclaration(FeedbackVectorSlot slot, FunctionLiteral* func) {
+ DCHECK(!slot.IsInvalid());
+ declarations_.push_back(std::make_pair(slot, func));
+ }
+
+ void AddUndefinedDeclaration(FeedbackVectorSlot slot) {
+ DCHECK(!slot.IsInvalid());
+ declarations_.push_back(std::make_pair(slot, nullptr));
+ }
+
+ Handle<FixedArray> AllocateDeclarationPairs(CompilationInfo* info) {
+ DCHECK(has_constant_pool_entry_);
+ int array_index = 0;
+ Handle<FixedArray> pairs = info->isolate()->factory()->NewFixedArray(
+ static_cast<int>(declarations_.size() * 2), TENURED);
+ for (std::pair<FeedbackVectorSlot, FunctionLiteral*> declaration :
+ declarations_) {
+ FunctionLiteral* func = declaration.second;
+ Handle<Object> initial_value;
+ if (func == nullptr) {
+ initial_value = info->isolate()->factory()->undefined_value();
+ } else {
+ initial_value =
+ Compiler::GetSharedFunctionInfo(func, info->script(), info);
+ }
+
+ // Return a null handle if any initial values can't be created. Caller
+ // will set stack overflow.
+ if (initial_value.is_null()) return Handle<FixedArray>();
+
+ pairs->set(array_index++, Smi::FromInt(declaration.first.ToInt()));
+ pairs->set(array_index++, *initial_value);
+ }
+ return pairs;
+ }
+
+ size_t constant_pool_entry() {
+ DCHECK(has_constant_pool_entry_);
+ return constant_pool_entry_;
+ }
+
+ void set_constant_pool_entry(size_t constant_pool_entry) {
+ DCHECK(!empty());
+ DCHECK(!has_constant_pool_entry_);
+ constant_pool_entry_ = constant_pool_entry;
+ has_constant_pool_entry_ = true;
+ }
+
+ bool empty() { return declarations_.empty(); }
+
+ private:
+ ZoneVector<std::pair<FeedbackVectorSlot, FunctionLiteral*>> declarations_;
+ size_t constant_pool_entry_;
+ bool has_constant_pool_entry_;
+};
+
+BytecodeGenerator::BytecodeGenerator(CompilationInfo* info)
+ : zone_(info->zone()),
+ builder_(new (zone()) BytecodeArrayBuilder(
+ info->isolate(), info->zone(), info->num_parameters_including_this(),
+ info->scope()->MaxNestedContextChainLength(),
+ info->scope()->num_stack_slots(), info->literal(),
+ info->SourcePositionRecordingMode())),
+ info_(info),
+ scope_(info->scope()),
+ globals_builder_(new (zone()) GlobalDeclarationsBuilder(info->zone())),
+ global_declarations_(0, info->zone()),
+ function_literals_(0, info->zone()),
+ native_function_literals_(0, info->zone()),
execution_control_(nullptr),
execution_context_(nullptr),
execution_result_(nullptr),
register_allocator_(nullptr),
- try_catch_nesting_level_(0),
- try_finally_nesting_level_(0) {
- InitializeAstVisitor(isolate);
-}
-
-Handle<BytecodeArray> BytecodeGenerator::MakeBytecode(CompilationInfo* info) {
- set_info(info);
- set_scope(info->scope());
-
- // Initialize bytecode array builder.
- set_builder(new (zone()) BytecodeArrayBuilder(
- isolate(), zone(), info->num_parameters_including_this(),
- scope()->MaxNestedContextChainLength(), scope()->num_stack_slots(),
- info->literal()));
+ generator_resume_points_(info->literal()->yield_count(), info->zone()),
+ generator_state_(),
+ loop_depth_(0),
+ home_object_symbol_(info->isolate()->factory()->home_object_symbol()),
+ prototype_string_(info->isolate()->factory()->prototype_string()) {
+ InitializeAstVisitor(info->isolate()->stack_guard()->real_climit());
+}
+
+Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(Isolate* isolate) {
+ // Create an inner HandleScope to avoid unnecessarily canonicalizing handles
+ // created as part of bytecode finalization.
+ HandleScope scope(isolate);
+ AllocateDeferredConstants();
+ if (HasStackOverflow()) return Handle<BytecodeArray>();
+ return scope.CloseAndEscape(builder()->ToBytecodeArray(isolate));
+}
+
+void BytecodeGenerator::AllocateDeferredConstants() {
+ // Build global declaration pair arrays.
+ for (GlobalDeclarationsBuilder* globals_builder : global_declarations_) {
+ Handle<FixedArray> declarations =
+ globals_builder->AllocateDeclarationPairs(info());
+ if (declarations.is_null()) return SetStackOverflow();
+ builder()->InsertConstantPoolEntryAt(globals_builder->constant_pool_entry(),
+ declarations);
+ }
+
+ // Find or build shared function infos.
+ for (std::pair<FunctionLiteral*, size_t> literal : function_literals_) {
+ FunctionLiteral* expr = literal.first;
+ Handle<SharedFunctionInfo> shared_info =
+ Compiler::GetSharedFunctionInfo(expr, info()->script(), info());
+ if (shared_info.is_null()) return SetStackOverflow();
+ builder()->InsertConstantPoolEntryAt(literal.second, shared_info);
+ }
+
+ // Find or build shared function infos for the native function templates.
+ for (std::pair<NativeFunctionLiteral*, size_t> literal :
+ native_function_literals_) {
+ NativeFunctionLiteral* expr = literal.first;
+ Handle<SharedFunctionInfo> shared_info =
+ Compiler::GetSharedFunctionInfoForNative(expr->extension(),
+ expr->name());
+ if (shared_info.is_null()) return SetStackOverflow();
+ builder()->InsertConstantPoolEntryAt(literal.second, shared_info);
+ }
+}
+
+void BytecodeGenerator::GenerateBytecode() {
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
// Initialize the incoming context.
ContextScope incoming_context(this, scope(), false);
@@ -586,25 +737,36 @@ Handle<BytecodeArray> BytecodeGenerator::MakeBytecode(CompilationInfo* info) {
// Initialize control scope.
ControlScopeForTopLevel control(this);
+ RegisterAllocationScope register_scope(this);
+
+ if (IsResumableFunction(info()->literal()->kind())) {
+ generator_state_ = register_allocator()->NewRegister();
+ VisitGeneratorPrologue();
+ }
+
// Build function context only if there are context allocated variables.
if (scope()->NeedsContext()) {
// Push a new inner context scope for the function.
VisitNewLocalFunctionContext();
ContextScope local_function_context(this, scope(), false);
VisitBuildLocalActivationContext();
- MakeBytecodeBody();
+ GenerateBytecodeBody();
} else {
- MakeBytecodeBody();
+ GenerateBytecodeBody();
+ }
+
+ // In generator functions, we may not have visited every yield in the AST
+ // since we skip some obviously dead code. Hence the generated bytecode may
+ // contain jumps to unbound labels (resume points that will never be used).
+ // We bind these now.
+ for (auto& label : generator_resume_points_) {
+ if (!label.is_bound()) builder()->Bind(&label);
}
builder()->EnsureReturn();
- set_scope(nullptr);
- set_info(nullptr);
- return builder()->ToBytecodeArray();
}
-
-void BytecodeGenerator::MakeBytecodeBody() {
+void BytecodeGenerator::GenerateBytecodeBody() {
// Build the arguments object if it is used.
VisitArgumentsObject(scope()->arguments());
@@ -628,12 +790,94 @@ void BytecodeGenerator::MakeBytecodeBody() {
VisitDeclarations(scope()->declarations());
// Perform a stack-check before the body.
- builder()->StackCheck();
+ builder()->StackCheck(info()->literal()->start_position());
// Visit statements in the function body.
VisitStatements(info()->literal()->body());
}
+void BytecodeGenerator::BuildIndexedJump(Register index, size_t start_index,
+ size_t size,
+ ZoneVector<BytecodeLabel>& targets) {
+ // TODO(neis): Optimize this by using a proper jump table.
+ DCHECK_LE(start_index + size, targets.size());
+ for (size_t i = start_index; i < start_index + size; i++) {
+ builder()
+ ->LoadLiteral(Smi::FromInt(static_cast<int>(i)))
+ .CompareOperation(Token::Value::EQ_STRICT, index)
+ .JumpIfTrue(&(targets[i]));
+ }
+ BuildAbort(BailoutReason::kInvalidJumpTableIndex);
+}
+
+void BytecodeGenerator::VisitIterationHeader(IterationStatement* stmt,
+ LoopBuilder* loop_builder) {
+ // Recall that stmt->yield_count() is always zero inside ordinary
+ // (i.e. non-generator) functions.
+
+ // Collect all labels for generator resume points within the loop (if any) so
+ // that they can be bound to the loop header below. Also create fresh labels
+ // for these resume points, to be used inside the loop.
+ ZoneVector<BytecodeLabel> resume_points_in_loop(zone());
+ size_t first_yield = stmt->first_yield_id();
+ DCHECK_LE(first_yield + stmt->yield_count(), generator_resume_points_.size());
+ for (size_t id = first_yield; id < first_yield + stmt->yield_count(); id++) {
+ auto& label = generator_resume_points_[id];
+ resume_points_in_loop.push_back(label);
+ generator_resume_points_[id] = BytecodeLabel();
+ }
+
+ loop_builder->LoopHeader(&resume_points_in_loop);
+
+ // Insert an explicit {OsrPoll} right after the loop header, to trigger
+ // on-stack replacement when armed for the given loop nesting depth.
+ if (FLAG_ignition_osr) {
+ // TODO(4764): Merge this with another bytecode (e.g. {Jump} back edge).
+ int level = Min(loop_depth_, AbstractCode::kMaxLoopNestingMarker - 1);
+ builder()->OsrPoll(level);
+ }
+
+ if (stmt->yield_count() > 0) {
+ // If we are not resuming, fall through to loop body.
+ // If we are resuming, perform state dispatch.
+ BytecodeLabel not_resuming;
+ builder()
+ ->LoadLiteral(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))
+ .CompareOperation(Token::Value::EQ, generator_state_)
+ .JumpIfTrue(&not_resuming);
+ BuildIndexedJump(generator_state_, first_yield,
+ stmt->yield_count(), generator_resume_points_);
+ builder()->Bind(&not_resuming);
+ }
+}
+
+void BytecodeGenerator::VisitGeneratorPrologue() {
+ // The generator resume trampoline abuses the new.target register both to
+ // indicate that this is a resume call and to pass in the generator object.
+ // In ordinary calls, new.target is always undefined because generator
+ // functions are non-constructable.
+ Register generator_object = Register::new_target();
+ BytecodeLabel regular_call;
+ builder()
+ ->LoadAccumulatorWithRegister(generator_object)
+ .JumpIfUndefined(&regular_call);
+
+ // This is a resume call. Restore registers and perform state dispatch.
+ // (The current context has already been restored by the trampoline.)
+ builder()
+ ->ResumeGenerator(generator_object)
+ .StoreAccumulatorInRegister(generator_state_);
+ BuildIndexedJump(generator_state_, 0, generator_resume_points_.size(),
+ generator_resume_points_);
+
+ builder()
+ ->Bind(&regular_call)
+ .LoadLiteral(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))
+ .StoreAccumulatorInRegister(generator_state_);
+ // This is a regular call. Fall through to the ordinary function prologue,
+ // after which we will run into the generator object creation and other extra
+ // code inserted by the parser.
+}
void BytecodeGenerator::VisitBlock(Block* stmt) {
// Visit declarations and statements.
@@ -646,7 +890,6 @@ void BytecodeGenerator::VisitBlock(Block* stmt) {
}
}
-
void BytecodeGenerator::VisitBlockDeclarationsAndStatements(Block* stmt) {
BlockBuilder block_builder(builder());
ControlScopeForBreakable execution_control(this, stmt, &block_builder);
@@ -657,31 +900,24 @@ void BytecodeGenerator::VisitBlockDeclarationsAndStatements(Block* stmt) {
if (stmt->labels() != nullptr) block_builder.EndBlock();
}
-
void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
Variable* variable = decl->proxy()->var();
- VariableMode mode = decl->mode();
- // Const and let variables are initialized with the hole so that we can
- // check that they are only assigned once.
- bool hole_init = mode == CONST || mode == CONST_LEGACY || mode == LET;
switch (variable->location()) {
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
- Handle<Oddball> value = variable->binding_needs_init()
- ? isolate()->factory()->the_hole_value()
- : isolate()->factory()->undefined_value();
- globals()->push_back(variable->name());
- globals()->push_back(value);
+ DCHECK(!variable->binding_needs_init());
+ FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
+ globals_builder()->AddUndefinedDeclaration(slot);
break;
}
case VariableLocation::LOCAL:
- if (hole_init) {
+ if (variable->binding_needs_init()) {
Register destination(variable->index());
builder()->LoadTheHole().StoreAccumulatorInRegister(destination);
}
break;
case VariableLocation::PARAMETER:
- if (hole_init) {
+ if (variable->binding_needs_init()) {
// The parameter indices are shifted by 1 (receiver is variable
// index -1 but is parameter index 0 in BytecodeArrayBuilder).
Register destination(builder()->Parameter(variable->index() + 1));
@@ -689,51 +925,35 @@ void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
}
break;
case VariableLocation::CONTEXT:
- if (hole_init) {
+ if (variable->binding_needs_init()) {
builder()->LoadTheHole().StoreContextSlot(execution_context()->reg(),
variable->index());
}
break;
case VariableLocation::LOOKUP: {
- DCHECK(IsDeclaredVariableMode(mode));
+ DCHECK_EQ(VAR, variable->mode());
+ DCHECK(!variable->binding_needs_init());
- register_allocator()->PrepareForConsecutiveAllocations(3);
- Register name = register_allocator()->NextConsecutiveRegister();
- Register init_value = register_allocator()->NextConsecutiveRegister();
- Register attributes = register_allocator()->NextConsecutiveRegister();
+ Register name = register_allocator()->NewRegister();
- builder()->LoadLiteral(variable->name()).StoreAccumulatorInRegister(name);
- if (hole_init) {
- builder()->LoadTheHole().StoreAccumulatorInRegister(init_value);
- } else {
- // For variables, we must not use an initial value (such as 'undefined')
- // because we may have a (legal) redeclaration and we must not destroy
- // the current value.
- builder()
- ->LoadLiteral(Smi::FromInt(0))
- .StoreAccumulatorInRegister(init_value);
- }
builder()
- ->LoadLiteral(Smi::FromInt(variable->DeclarationPropertyAttributes()))
- .StoreAccumulatorInRegister(attributes)
- .CallRuntime(Runtime::kDeclareLookupSlot, name, 3);
+ ->LoadLiteral(variable->name())
+ .StoreAccumulatorInRegister(name)
+ .CallRuntime(Runtime::kDeclareEvalVar, name, 1);
break;
}
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
}
-
void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
Variable* variable = decl->proxy()->var();
switch (variable->location()) {
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
- Handle<SharedFunctionInfo> function = Compiler::GetSharedFunctionInfo(
- decl->fun(), info()->script(), info());
- // Check for stack-overflow exception.
- if (function.is_null()) return SetStackOverflow();
- globals()->push_back(variable->name());
- globals()->push_back(function);
+ FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
+ globals_builder()->AddFunctionDeclaration(slot, decl->fun());
break;
}
case VariableLocation::PARAMETER:
@@ -753,63 +973,54 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
break;
}
case VariableLocation::LOOKUP: {
- register_allocator()->PrepareForConsecutiveAllocations(3);
+ register_allocator()->PrepareForConsecutiveAllocations(2);
Register name = register_allocator()->NextConsecutiveRegister();
Register literal = register_allocator()->NextConsecutiveRegister();
- Register attributes = register_allocator()->NextConsecutiveRegister();
builder()->LoadLiteral(variable->name()).StoreAccumulatorInRegister(name);
VisitForAccumulatorValue(decl->fun());
- builder()
- ->StoreAccumulatorInRegister(literal)
- .LoadLiteral(Smi::FromInt(variable->DeclarationPropertyAttributes()))
- .StoreAccumulatorInRegister(attributes)
- .CallRuntime(Runtime::kDeclareLookupSlot, name, 3);
+ builder()->StoreAccumulatorInRegister(literal).CallRuntime(
+ Runtime::kDeclareEvalFunction, name, 2);
+ break;
}
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
}
-
-void BytecodeGenerator::VisitImportDeclaration(ImportDeclaration* decl) {
- UNIMPLEMENTED();
-}
-
-
-void BytecodeGenerator::VisitExportDeclaration(ExportDeclaration* decl) {
- UNIMPLEMENTED();
-}
-
-
void BytecodeGenerator::VisitDeclarations(
ZoneList<Declaration*>* declarations) {
RegisterAllocationScope register_scope(this);
- DCHECK(globals()->empty());
+ DCHECK(globals_builder()->empty());
for (int i = 0; i < declarations->length(); i++) {
RegisterAllocationScope register_scope(this);
Visit(declarations->at(i));
}
- if (globals()->empty()) return;
- int array_index = 0;
- Handle<FixedArray> data = isolate()->factory()->NewFixedArray(
- static_cast<int>(globals()->size()), TENURED);
- for (Handle<Object> obj : *globals()) data->set(array_index++, *obj);
- int encoded_flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) |
- DeclareGlobalsNativeFlag::encode(info()->is_native()) |
- DeclareGlobalsLanguageMode::encode(language_mode());
+ if (globals_builder()->empty()) return;
+
+ globals_builder()->set_constant_pool_entry(
+ builder()->AllocateConstantPoolEntry());
+ int encoded_flags = info()->GetDeclareGlobalsFlags();
- Register pairs = register_allocator()->NewRegister();
- builder()->LoadLiteral(data);
- builder()->StoreAccumulatorInRegister(pairs);
+ register_allocator()->PrepareForConsecutiveAllocations(3);
- Register flags = register_allocator()->NewRegister();
- builder()->LoadLiteral(Smi::FromInt(encoded_flags));
- builder()->StoreAccumulatorInRegister(flags);
- DCHECK(flags.index() == pairs.index() + 1);
+ Register pairs = register_allocator()->NextConsecutiveRegister();
+ Register flags = register_allocator()->NextConsecutiveRegister();
+ Register function = register_allocator()->NextConsecutiveRegister();
- builder()->CallRuntime(Runtime::kDeclareGlobals, pairs, 2);
- globals()->clear();
-}
+ // Emit code to declare globals.
+ builder()
+ ->LoadConstantPoolEntry(globals_builder()->constant_pool_entry())
+ .StoreAccumulatorInRegister(pairs)
+ .LoadLiteral(Smi::FromInt(encoded_flags))
+ .StoreAccumulatorInRegister(flags)
+ .MoveRegister(Register::function_closure(), function)
+ .CallRuntime(Runtime::kDeclareGlobalsForInterpreter, pairs, 3);
+ // Push and reset globals builder.
+ global_declarations_.push_back(globals_builder());
+ globals_builder_ = new (zone()) GlobalDeclarationsBuilder(zone());
+}
void BytecodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
for (int i = 0; i < statements->length(); i++) {
@@ -821,20 +1032,16 @@ void BytecodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
}
}
-
void BytecodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
builder()->SetStatementPosition(stmt);
VisitForEffect(stmt->expression());
}
-
void BytecodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
}
-
void BytecodeGenerator::VisitIfStatement(IfStatement* stmt) {
builder()->SetStatementPosition(stmt);
- BytecodeLabel else_label, end_label;
if (stmt->condition()->ToBooleanIsTrue()) {
// Generate then block unconditionally as always true.
Visit(stmt->then_statement());
@@ -847,55 +1054,53 @@ void BytecodeGenerator::VisitIfStatement(IfStatement* stmt) {
// TODO(oth): If then statement is BreakStatement or
// ContinueStatement we can reduce number of generated
// jump/jump_ifs here. See BasicLoops test.
- VisitForAccumulatorValue(stmt->condition());
- builder()->JumpIfFalse(&else_label);
+ BytecodeLabel end_label;
+ BytecodeLabels then_labels(zone()), else_labels(zone());
+ VisitForTest(stmt->condition(), &then_labels, &else_labels,
+ TestFallthrough::kThen);
+
+ then_labels.Bind(builder());
Visit(stmt->then_statement());
+
if (stmt->HasElseStatement()) {
builder()->Jump(&end_label);
- builder()->Bind(&else_label);
+ else_labels.Bind(builder());
Visit(stmt->else_statement());
} else {
- builder()->Bind(&else_label);
+ else_labels.Bind(builder());
}
builder()->Bind(&end_label);
}
}
-
void BytecodeGenerator::VisitSloppyBlockFunctionStatement(
SloppyBlockFunctionStatement* stmt) {
Visit(stmt->statement());
}
-
void BytecodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
builder()->SetStatementPosition(stmt);
execution_control()->Continue(stmt->target());
}
-
void BytecodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
builder()->SetStatementPosition(stmt);
execution_control()->Break(stmt->target());
}
-
void BytecodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
builder()->SetStatementPosition(stmt);
VisitForAccumulatorValue(stmt->expression());
execution_control()->ReturnAccumulator();
}
-
void BytecodeGenerator::VisitWithStatement(WithStatement* stmt) {
builder()->SetStatementPosition(stmt);
VisitForAccumulatorValue(stmt->expression());
- builder()->CastAccumulatorToJSObject();
VisitNewLocalWithContext();
VisitInScope(stmt->statement(), stmt->scope());
}
-
void BytecodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// We need this scope because we visit for register values. We have to
// maintain a execution result scope where registers can be allocated.
@@ -910,7 +1115,6 @@ void BytecodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Register tag = VisitForRegisterValue(stmt->tag());
// Iterate over all cases and create nodes for label comparison.
- BytecodeLabel done_label;
for (int i = 0; i < clauses->length(); i++) {
CaseClause* clause = clauses->at(i);
@@ -931,8 +1135,8 @@ void BytecodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
switch_builder.DefaultAt(default_index);
} else {
// Otherwise if we have reached here none of the cases matched, so jump to
- // done.
- builder()->Jump(&done_label);
+ // the end.
+ switch_builder.Break();
}
// Iterate over all cases and create the case bodies.
@@ -941,12 +1145,9 @@ void BytecodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
switch_builder.SetCaseTarget(i);
VisitStatements(clause->statements());
}
- builder()->Bind(&done_label);
-
- switch_builder.SetBreakTarget(done_label);
+ switch_builder.BindBreakTarget();
}
-
void BytecodeGenerator::VisitCaseClause(CaseClause* clause) {
// Handled entirely in VisitSwitchStatement.
UNREACHABLE();
@@ -955,26 +1156,25 @@ void BytecodeGenerator::VisitCaseClause(CaseClause* clause) {
void BytecodeGenerator::VisitIterationBody(IterationStatement* stmt,
LoopBuilder* loop_builder) {
ControlScopeForIteration execution_control(this, stmt, loop_builder);
- builder()->StackCheck();
+ builder()->StackCheck(stmt->position());
Visit(stmt->body());
+ loop_builder->BindContinueTarget();
}
void BytecodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
LoopBuilder loop_builder(builder());
- loop_builder.LoopHeader();
if (stmt->cond()->ToBooleanIsFalse()) {
VisitIterationBody(stmt, &loop_builder);
- loop_builder.Condition();
} else if (stmt->cond()->ToBooleanIsTrue()) {
- loop_builder.Condition();
+ VisitIterationHeader(stmt, &loop_builder);
VisitIterationBody(stmt, &loop_builder);
loop_builder.JumpToHeader();
} else {
+ VisitIterationHeader(stmt, &loop_builder);
VisitIterationBody(stmt, &loop_builder);
- loop_builder.Condition();
builder()->SetExpressionAsStatementPosition(stmt->cond());
- VisitForAccumulatorValue(stmt->cond());
- loop_builder.JumpToHeaderIfTrue();
+ VisitForTest(stmt->cond(), loop_builder.header_labels(),
+ loop_builder.break_labels(), TestFallthrough::kElse);
}
loop_builder.EndLoop();
}
@@ -986,19 +1186,19 @@ void BytecodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
}
LoopBuilder loop_builder(builder());
- loop_builder.LoopHeader();
- loop_builder.Condition();
+ VisitIterationHeader(stmt, &loop_builder);
if (!stmt->cond()->ToBooleanIsTrue()) {
builder()->SetExpressionAsStatementPosition(stmt->cond());
- VisitForAccumulatorValue(stmt->cond());
- loop_builder.BreakIfFalse();
+ BytecodeLabels loop_body(zone());
+ VisitForTest(stmt->cond(), &loop_body, loop_builder.break_labels(),
+ TestFallthrough::kThen);
+ loop_body.Bind(builder());
}
VisitIterationBody(stmt, &loop_builder);
loop_builder.JumpToHeader();
loop_builder.EndLoop();
}
-
void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
if (stmt->init() != nullptr) {
Visit(stmt->init());
@@ -1010,16 +1210,16 @@ void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
}
LoopBuilder loop_builder(builder());
- loop_builder.LoopHeader();
- loop_builder.Condition();
+ VisitIterationHeader(stmt, &loop_builder);
if (stmt->cond() && !stmt->cond()->ToBooleanIsTrue()) {
builder()->SetExpressionAsStatementPosition(stmt->cond());
- VisitForAccumulatorValue(stmt->cond());
- loop_builder.BreakIfFalse();
+ BytecodeLabels loop_body(zone());
+ VisitForTest(stmt->cond(), &loop_body, loop_builder.break_labels(),
+ TestFallthrough::kThen);
+ loop_body.Bind(builder());
}
VisitIterationBody(stmt, &loop_builder);
if (stmt->next() != nullptr) {
- loop_builder.Next();
builder()->SetStatementPosition(stmt->next());
Visit(stmt->next());
}
@@ -1027,7 +1227,6 @@ void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
loop_builder.EndLoop();
}
-
void BytecodeGenerator::VisitForInAssignment(Expression* expr,
FeedbackVectorSlot slot) {
DCHECK(expr->IsValidReferenceExpression());
@@ -1101,7 +1300,6 @@ void BytecodeGenerator::VisitForInAssignment(Expression* expr,
}
}
-
void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
if (stmt->subject()->IsNullLiteral() ||
stmt->subject()->IsUndefinedLiteral()) {
@@ -1118,8 +1316,7 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
builder()->JumpIfUndefined(&subject_undefined_label);
builder()->JumpIfNull(&subject_null_label);
Register receiver = register_allocator()->NewRegister();
- builder()->CastAccumulatorToJSObject();
- builder()->StoreAccumulatorInRegister(receiver);
+ builder()->CastAccumulatorToJSObject(receiver);
register_allocator()->PrepareForConsecutiveAllocations(3);
Register cache_type = register_allocator()->NextConsecutiveRegister();
@@ -1127,7 +1324,7 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Register cache_length = register_allocator()->NextConsecutiveRegister();
// Used as kRegTriple and kRegPair in ForInPrepare and ForInNext.
USE(cache_array);
- builder()->ForInPrepare(cache_type);
+ builder()->ForInPrepare(receiver, cache_type);
// Set up loop counter
Register index = register_allocator()->NewRegister();
@@ -1135,9 +1332,8 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
builder()->StoreAccumulatorInRegister(index);
// The loop
- loop_builder.LoopHeader();
+ VisitIterationHeader(stmt, &loop_builder);
builder()->SetExpressionAsStatementPosition(stmt->each());
- loop_builder.Condition();
builder()->ForInDone(index, cache_length);
loop_builder.BreakIfTrue();
DCHECK(Register::AreContiguous(cache_type, cache_array));
@@ -1146,7 +1342,6 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
loop_builder.ContinueIfUndefined();
VisitForInAssignment(stmt->each(), stmt->EachFeedbackSlot());
VisitIterationBody(stmt, &loop_builder);
- loop_builder.Next();
builder()->ForInStep(index);
builder()->StoreAccumulatorInRegister(index);
loop_builder.JumpToHeader();
@@ -1155,15 +1350,13 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
builder()->Bind(&subject_undefined_label);
}
-
void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
LoopBuilder loop_builder(builder());
- ControlScopeForIteration control_scope(this, stmt, &loop_builder);
+ builder()->SetExpressionAsStatementPosition(stmt->assign_iterator());
VisitForEffect(stmt->assign_iterator());
- loop_builder.LoopHeader();
- loop_builder.Next();
+ VisitIterationHeader(stmt, &loop_builder);
builder()->SetExpressionAsStatementPosition(stmt->next_result());
VisitForEffect(stmt->next_result());
VisitForAccumulatorValue(stmt->result_done());
@@ -1175,9 +1368,8 @@ void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
loop_builder.EndLoop();
}
-
void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
- TryCatchBuilder try_control_builder(builder());
+ TryCatchBuilder try_control_builder(builder(), stmt->catch_prediction());
Register no_reg;
// Preserve the context in a dedicated register, so that it can be restored
@@ -1212,9 +1404,8 @@ void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
try_control_builder.EndCatch();
}
-
void BytecodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- TryFinallyBuilder try_control_builder(builder(), IsInsideTryCatch());
+ TryFinallyBuilder try_control_builder(builder(), stmt->catch_prediction());
Register no_reg;
// We keep a record of all paths that enter the finally-block to be able to
@@ -1277,54 +1468,35 @@ void BytecodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
commands.ApplyDeferredCommands();
}
-
void BytecodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
builder()->SetStatementPosition(stmt);
builder()->Debugger();
}
-
void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
- // Find or build a shared function info.
- Handle<SharedFunctionInfo> shared_info =
- Compiler::GetSharedFunctionInfo(expr, info()->script(), info());
- if (shared_info.is_null()) {
- return SetStackOverflow();
- }
- builder()->CreateClosure(shared_info,
- expr->pretenure() ? TENURED : NOT_TENURED);
+ uint8_t flags = CreateClosureFlags::Encode(expr->pretenure(),
+ scope()->is_function_scope());
+ size_t entry = builder()->AllocateConstantPoolEntry();
+ builder()->CreateClosure(entry, flags);
+ function_literals_.push_back(std::make_pair(expr, entry));
execution_result()->SetResultInAccumulator();
}
-
void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
- if (expr->scope()->ContextLocalCount() > 0) {
- VisitNewLocalBlockContext(expr->scope());
- ContextScope scope(this, expr->scope());
- VisitDeclarations(expr->scope()->declarations());
- VisitClassLiteralContents(expr);
- } else {
- VisitDeclarations(expr->scope()->declarations());
- VisitClassLiteralContents(expr);
- }
-}
-
-void BytecodeGenerator::VisitClassLiteralContents(ClassLiteral* expr) {
VisitClassLiteralForRuntimeDefinition(expr);
// Load the "prototype" from the constructor.
register_allocator()->PrepareForConsecutiveAllocations(2);
Register literal = register_allocator()->NextConsecutiveRegister();
Register prototype = register_allocator()->NextConsecutiveRegister();
- Handle<String> name = isolate()->factory()->prototype_string();
FeedbackVectorSlot slot = expr->PrototypeSlot();
builder()
->StoreAccumulatorInRegister(literal)
- .LoadNamedProperty(literal, name, feedback_index(slot))
+ .LoadNamedProperty(literal, prototype_string(), feedback_index(slot))
.StoreAccumulatorInRegister(prototype);
VisitClassLiteralProperties(expr, literal, prototype);
- builder()->CallRuntime(Runtime::kFinalizeClassDefinition, literal, 2);
+ builder()->CallRuntime(Runtime::kToFastProperties, literal, 1);
// Assign to class variable.
if (expr->class_variable_proxy() != nullptr) {
Variable* var = expr->class_variable_proxy()->var();
@@ -1385,7 +1557,7 @@ void BytecodeGenerator::VisitClassLiteralProperties(ClassLiteral* expr,
}
VisitForAccumulatorValue(property->key());
- builder()->CastAccumulatorToName().StoreAccumulatorInRegister(key);
+ builder()->CastAccumulatorToName(key);
// The static prototype property is read only. We handle the non computed
// property name case in the parser. Since this is the only case where we
// need to check for an own read only property we special case this so we do
@@ -1438,7 +1610,7 @@ void BytecodeGenerator::VisitClassLiteralStaticPrototypeWithComputedName(
Register key) {
BytecodeLabel done;
builder()
- ->LoadLiteral(isolate()->factory()->prototype_string())
+ ->LoadLiteral(prototype_string())
.CompareOperation(Token::Value::EQ_STRICT, key)
.JumpIfFalse(&done)
.CallRuntime(Runtime::kThrowStaticPrototypeError, Register(0), 0)
@@ -1447,64 +1619,65 @@ void BytecodeGenerator::VisitClassLiteralStaticPrototypeWithComputedName(
void BytecodeGenerator::VisitNativeFunctionLiteral(
NativeFunctionLiteral* expr) {
- // Find or build a shared function info for the native function template.
- Handle<SharedFunctionInfo> shared_info =
- Compiler::GetSharedFunctionInfoForNative(expr->extension(), expr->name());
- builder()->CreateClosure(shared_info, NOT_TENURED);
+ size_t entry = builder()->AllocateConstantPoolEntry();
+ builder()->CreateClosure(entry, NOT_TENURED);
+ native_function_literals_.push_back(std::make_pair(expr, entry));
execution_result()->SetResultInAccumulator();
}
-
void BytecodeGenerator::VisitDoExpression(DoExpression* expr) {
VisitBlock(expr->block());
VisitVariableProxy(expr->result());
}
-
void BytecodeGenerator::VisitConditional(Conditional* expr) {
- // TODO(rmcilroy): Spot easy cases where there code would not need to
- // emit the then block or the else block, e.g. condition is
- // obviously true/1/false/0.
-
- BytecodeLabel else_label, end_label;
+ if (expr->condition()->ToBooleanIsTrue()) {
+ // Generate then block unconditionally as always true.
+ VisitForAccumulatorValue(expr->then_expression());
+ } else if (expr->condition()->ToBooleanIsFalse()) {
+ // Generate else block unconditionally if it exists.
+ VisitForAccumulatorValue(expr->else_expression());
+ } else {
+ BytecodeLabel end_label;
+ BytecodeLabels then_labels(zone()), else_labels(zone());
- VisitForAccumulatorValue(expr->condition());
- builder()->JumpIfFalse(&else_label);
+ VisitForTest(expr->condition(), &then_labels, &else_labels,
+ TestFallthrough::kThen);
- VisitForAccumulatorValue(expr->then_expression());
- builder()->Jump(&end_label);
+ then_labels.Bind(builder());
+ VisitForAccumulatorValue(expr->then_expression());
+ builder()->Jump(&end_label);
- builder()->Bind(&else_label);
- VisitForAccumulatorValue(expr->else_expression());
- builder()->Bind(&end_label);
+ else_labels.Bind(builder());
+ VisitForAccumulatorValue(expr->else_expression());
+ builder()->Bind(&end_label);
+ }
execution_result()->SetResultInAccumulator();
}
-
void BytecodeGenerator::VisitLiteral(Literal* expr) {
if (!execution_result()->IsEffect()) {
- Handle<Object> value = expr->value();
- if (value->IsSmi()) {
- builder()->LoadLiteral(Smi::cast(*value));
- } else if (value->IsUndefined()) {
+ const AstValue* raw_value = expr->raw_value();
+ if (raw_value->IsSmi()) {
+ builder()->LoadLiteral(raw_value->AsSmi());
+ } else if (raw_value->IsUndefined()) {
builder()->LoadUndefined();
- } else if (value->IsTrue()) {
+ } else if (raw_value->IsTrue()) {
builder()->LoadTrue();
- } else if (value->IsFalse()) {
+ } else if (raw_value->IsFalse()) {
builder()->LoadFalse();
- } else if (value->IsNull()) {
+ } else if (raw_value->IsNull()) {
builder()->LoadNull();
- } else if (value->IsTheHole()) {
+ } else if (raw_value->IsTheHole()) {
builder()->LoadTheHole();
} else {
- builder()->LoadLiteral(value);
+ builder()->LoadLiteral(raw_value->value());
}
execution_result()->SetResultInAccumulator();
}
}
-
void BytecodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
// Materialize a regular expression literal.
builder()->CreateRegExpLiteral(expr->pattern(), expr->literal_index(),
@@ -1512,17 +1685,17 @@ void BytecodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
execution_result()->SetResultInAccumulator();
}
-
void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- // Deep-copy the literal boilerplate.
- builder()->CreateObjectLiteral(expr->constant_properties(),
- expr->literal_index(),
- expr->ComputeFlags(true));
-
+ // Copy the literal boilerplate.
+ uint8_t flags = CreateObjectLiteralFlags::Encode(
+ FastCloneShallowObjectStub::IsSupported(expr),
+ FastCloneShallowObjectStub::PropertiesCount(expr->properties_count()),
+ expr->ComputeFlags());
// Allocate in the outer scope since this register is used to return the
// expression's results to the caller.
Register literal = register_allocator()->outer()->NewRegister();
- builder()->StoreAccumulatorInRegister(literal);
+ builder()->CreateObjectLiteral(expr->constant_properties(),
+ expr->literal_index(), flags, literal);
// Store computed values into the literal.
int property_index = 0;
@@ -1533,7 +1706,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (property->IsCompileTimeValue()) continue;
RegisterAllocationScope inner_register_scope(this);
- Literal* literal_key = property->key()->AsLiteral();
+ Literal* key = property->key()->AsLiteral();
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
UNREACHABLE();
@@ -1543,7 +1716,8 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::COMPUTED: {
// It is safe to use [[Put]] here because the boilerplate already
// contains computed properties with an uninitialized value.
- if (literal_key->value()->IsInternalizedString()) {
+ if (key->IsStringLiteral()) {
+ DCHECK(key->IsPropertyName());
if (property->emit_store()) {
VisitForAccumulatorValue(property->value());
if (FunctionLiteral::NeedsHomeObject(property->value())) {
@@ -1551,12 +1725,12 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Register value = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(value);
builder()->StoreNamedProperty(
- literal, literal_key->AsPropertyName(),
+ literal, key->AsPropertyName(),
feedback_index(property->GetSlot(0)), language_mode());
VisitSetHomeObject(value, literal, property, 1);
} else {
builder()->StoreNamedProperty(
- literal, literal_key->AsPropertyName(),
+ literal, key->AsPropertyName(),
feedback_index(property->GetSlot(0)), language_mode());
}
} else {
@@ -1600,12 +1774,12 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
- accessor_table.lookup(literal_key)->second->getter = property;
+ accessor_table.lookup(key)->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
- accessor_table.lookup(literal_key)->second->setter = property;
+ accessor_table.lookup(key)->second->setter = property;
}
break;
}
@@ -1673,7 +1847,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
builder()->MoveRegister(literal, literal_argument);
VisitForAccumulatorValue(property->key());
- builder()->CastAccumulatorToName().StoreAccumulatorInRegister(key);
+ builder()->CastAccumulatorToName(key);
VisitForAccumulatorValue(property->value());
builder()->StoreAccumulatorInRegister(value);
VisitSetHomeObject(value, literal, property);
@@ -1705,7 +1879,6 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
execution_result()->SetResultInRegister(literal);
}
-
void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Deep-copy the literal boilerplate.
builder()->CreateArrayLiteral(expr->constant_elements(),
@@ -1745,48 +1918,44 @@ void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
execution_result()->SetResultInAccumulator();
}
-
void BytecodeGenerator::VisitVariableProxy(VariableProxy* proxy) {
builder()->SetExpressionPosition(proxy);
VisitVariableLoad(proxy->var(), proxy->VariableFeedbackSlot());
}
-void BytecodeGenerator::BuildHoleCheckForVariableLoad(VariableMode mode,
- Handle<String> name) {
- if (mode == CONST_LEGACY) {
- BytecodeLabel end_label;
- builder()->JumpIfNotHole(&end_label).LoadUndefined().Bind(&end_label);
- } else if (mode == LET || mode == CONST) {
- BuildThrowIfHole(name);
+void BytecodeGenerator::BuildHoleCheckForVariableLoad(Variable* variable) {
+ if (variable->binding_needs_init()) {
+ BuildThrowIfHole(variable->name());
}
}
void BytecodeGenerator::VisitVariableLoad(Variable* variable,
FeedbackVectorSlot slot,
TypeofMode typeof_mode) {
- VariableMode mode = variable->mode();
switch (variable->location()) {
case VariableLocation::LOCAL: {
Register source(Register(variable->index()));
+ // We need to load the variable into the accumulator, even when in a
+ // VisitForRegisterScope, in order to avoid register aliasing if
+ // subsequent expressions assign to the same variable.
builder()->LoadAccumulatorWithRegister(source);
- BuildHoleCheckForVariableLoad(mode, variable->name());
- execution_result()->SetResultInAccumulator();
+ BuildHoleCheckForVariableLoad(variable);
break;
}
case VariableLocation::PARAMETER: {
// The parameter indices are shifted by 1 (receiver is variable
// index -1 but is parameter index 0 in BytecodeArrayBuilder).
Register source = builder()->Parameter(variable->index() + 1);
+ // We need to load the variable into the accumulator, even when in a
+ // VisitForRegisterScope, in order to avoid register aliasing if
+ // subsequent expressions assign to the same variable.
builder()->LoadAccumulatorWithRegister(source);
- BuildHoleCheckForVariableLoad(mode, variable->name());
- execution_result()->SetResultInAccumulator();
+ BuildHoleCheckForVariableLoad(variable);
break;
}
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
- builder()->LoadGlobal(variable->name(), feedback_index(slot),
- typeof_mode);
- execution_result()->SetResultInAccumulator();
+ builder()->LoadGlobal(feedback_index(slot), typeof_mode);
break;
}
case VariableLocation::CONTEXT: {
@@ -1813,16 +1982,17 @@ void BytecodeGenerator::VisitVariableLoad(Variable* variable,
}
builder()->LoadContextSlot(context_reg, variable->index());
- BuildHoleCheckForVariableLoad(mode, variable->name());
- execution_result()->SetResultInAccumulator();
+ BuildHoleCheckForVariableLoad(variable);
break;
}
case VariableLocation::LOOKUP: {
builder()->LoadLookupSlot(variable->name(), typeof_mode);
- execution_result()->SetResultInAccumulator();
break;
}
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
+ execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitVariableLoadForAccumulatorValue(
@@ -1874,6 +2044,15 @@ void BytecodeGenerator::BuildKeyedSuperPropertyStore(Register receiver,
builder()->CallRuntime(function_id, receiver, 4);
}
+void BytecodeGenerator::BuildAbort(BailoutReason bailout_reason) {
+ RegisterAllocationScope register_scope(this);
+ Register reason = register_allocator()->NewRegister();
+ builder()
+ ->LoadLiteral(Smi::FromInt(static_cast<int>(bailout_reason)))
+ .StoreAccumulatorInRegister(reason)
+ .CallRuntime(Runtime::kAbort, reason, 1);
+}
+
void BytecodeGenerator::BuildThrowReferenceError(Handle<String> name) {
RegisterAllocationScope register_scope(this);
Register name_reg = register_allocator()->NewRegister();
@@ -1902,30 +2081,16 @@ void BytecodeGenerator::BuildThrowIfNotHole(Handle<String> name) {
builder()->Bind(&no_reference_error);
}
-void BytecodeGenerator::BuildThrowReassignConstant(Handle<String> name) {
- // TODO(mythria): This will be replaced by a new bytecode that throws an
- // appropriate error depending on the whether the value is a hole or not.
- BytecodeLabel const_assign_error;
- builder()->JumpIfNotHole(&const_assign_error);
- BuildThrowReferenceError(name);
- builder()
- ->Bind(&const_assign_error)
- .CallRuntime(Runtime::kThrowConstAssignError, Register(), 0);
-}
-
void BytecodeGenerator::BuildHoleCheckForVariableAssignment(Variable* variable,
Token::Value op) {
- VariableMode mode = variable->mode();
- DCHECK(mode != CONST_LEGACY);
- if (mode == CONST && op != Token::INIT) {
- // Non-intializing assignments to constant is not allowed.
- BuildThrowReassignConstant(variable->name());
- } else if (mode == LET && op != Token::INIT) {
- // Perform an initialization check for let declared variables.
+ DCHECK(variable->mode() != CONST_LEGACY);
+ if (op != Token::INIT) {
+ // Perform an initialization check for let/const declared variables.
// E.g. let x = (x = 20); is not allowed.
BuildThrowIfHole(variable->name());
} else {
- DCHECK(variable->is_this() && mode == CONST && op == Token::INIT);
+ DCHECK(variable->is_this() && variable->mode() == CONST &&
+ op == Token::INIT);
// Perform an initialization check for 'this'. 'this' variable is the
// only variable able to trigger bind operations outside the TDZ
// via 'super' calls.
@@ -1940,9 +2105,8 @@ void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
RegisterAllocationScope assignment_register_scope(this);
BytecodeLabel end_label;
bool hole_check_required =
- (mode == CONST_LEGACY) || (mode == LET && op != Token::INIT) ||
- (mode == CONST && op != Token::INIT) ||
- (mode == CONST && op == Token::INIT && variable->is_this());
+ variable->binding_needs_init() &&
+ (op != Token::INIT || (mode == CONST && variable->is_this()));
switch (variable->location()) {
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL: {
@@ -1960,26 +2124,18 @@ void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
->StoreAccumulatorInRegister(value_temp)
.LoadAccumulatorWithRegister(destination);
- if (mode == CONST_LEGACY && op == Token::INIT) {
- // Perform an intialization check for legacy constants.
- builder()
- ->JumpIfNotHole(&end_label)
- .MoveRegister(value_temp, destination)
- .Bind(&end_label)
- .LoadAccumulatorWithRegister(value_temp);
- // Break here because the value should not be stored unconditionally.
- break;
- } else if (mode == CONST_LEGACY && op != Token::INIT) {
- DCHECK(!is_strict(language_mode()));
- // Ensure accumulator is in the correct state.
- builder()->LoadAccumulatorWithRegister(value_temp);
- // Break here, non-initializing assignments to legacy constants are
- // ignored.
- break;
- } else {
- BuildHoleCheckForVariableAssignment(variable, op);
- builder()->LoadAccumulatorWithRegister(value_temp);
+ BuildHoleCheckForVariableAssignment(variable, op);
+ builder()->LoadAccumulatorWithRegister(value_temp);
+ }
+
+ if ((mode == CONST || mode == CONST_LEGACY) && op != Token::INIT) {
+ if (mode == CONST || is_strict(language_mode())) {
+ builder()->CallRuntime(Runtime::kThrowConstAssignError, Register(),
+ 0);
}
+ // Non-initializing assignments to legacy constants are ignored
+ // in sloppy mode. Break here to avoid storing into variable.
+ break;
}
builder()->StoreAccumulatorInRegister(destination);
@@ -2025,61 +2181,33 @@ void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
->StoreAccumulatorInRegister(value_temp)
.LoadContextSlot(context_reg, variable->index());
- if (mode == CONST_LEGACY && op == Token::INIT) {
- // Perform an intialization check for legacy constants.
- builder()
- ->JumpIfNotHole(&end_label)
- .LoadAccumulatorWithRegister(value_temp)
- .StoreContextSlot(context_reg, variable->index())
- .Bind(&end_label);
- builder()->LoadAccumulatorWithRegister(value_temp);
- // Break here because the value should not be stored unconditionally.
- // The above code performs the store conditionally.
- break;
- } else if (mode == CONST_LEGACY && op != Token::INIT) {
- DCHECK(!is_strict(language_mode()));
- // Ensure accumulator is in the correct state.
- builder()->LoadAccumulatorWithRegister(value_temp);
- // Break here, non-initializing assignments to legacy constants are
- // ignored.
- break;
- } else {
- BuildHoleCheckForVariableAssignment(variable, op);
- builder()->LoadAccumulatorWithRegister(value_temp);
+ BuildHoleCheckForVariableAssignment(variable, op);
+ builder()->LoadAccumulatorWithRegister(value_temp);
+ }
+
+ if ((mode == CONST || mode == CONST_LEGACY) && op != Token::INIT) {
+ if (mode == CONST || is_strict(language_mode())) {
+ builder()->CallRuntime(Runtime::kThrowConstAssignError, Register(),
+ 0);
}
+ // Non-initializing assignments to legacy constants are ignored
+ // in sloppy mode. Break here to avoid storing into variable.
+ break;
}
builder()->StoreContextSlot(context_reg, variable->index());
break;
}
case VariableLocation::LOOKUP: {
- if (mode == CONST_LEGACY && op == Token::INIT) {
- register_allocator()->PrepareForConsecutiveAllocations(3);
- Register value = register_allocator()->NextConsecutiveRegister();
- Register context = register_allocator()->NextConsecutiveRegister();
- Register name = register_allocator()->NextConsecutiveRegister();
-
- // InitializeLegacyConstLookupSlot runtime call returns the 'value'
- // passed to it. So, accumulator will have its original contents when
- // runtime call returns.
- builder()
- ->StoreAccumulatorInRegister(value)
- .MoveRegister(execution_context()->reg(), context)
- .LoadLiteral(variable->name())
- .StoreAccumulatorInRegister(name)
- .CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, value, 3);
- } else if (mode == CONST_LEGACY && op != Token::INIT) {
- // Non-intializing assignments to legacy constants are ignored.
- DCHECK(!is_strict(language_mode()));
- } else {
- builder()->StoreLookupSlot(variable->name(), language_mode());
- }
+ DCHECK_NE(CONST_LEGACY, variable->mode());
+ builder()->StoreLookupSlot(variable->name(), language_mode());
break;
}
+ case VariableLocation::MODULE:
+ UNREACHABLE();
}
}
-
void BytecodeGenerator::VisitAssignment(Assignment* expr) {
DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
Register object, key, home_object, value;
@@ -2186,7 +2314,10 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
}
}
VisitForAccumulatorValue(expr->value());
- builder()->BinaryOperation(expr->binary_op(), old_value);
+ FeedbackVectorSlot slot =
+ expr->binary_operation()->BinaryOperationFeedbackSlot();
+ builder()->BinaryOperation(expr->binary_op(), old_value,
+ feedback_index(slot));
} else {
VisitForAccumulatorValue(expr->value());
}
@@ -2224,22 +2355,95 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
execution_result()->SetResultInAccumulator();
}
+void BytecodeGenerator::VisitYield(Yield* expr) {
+ builder()->SetExpressionPosition(expr);
+ Register value = VisitForRegisterValue(expr->expression());
-void BytecodeGenerator::VisitYield(Yield* expr) { UNIMPLEMENTED(); }
+ Register generator = VisitForRegisterValue(expr->generator_object());
+ // Save context, registers, and state. Then return.
+ builder()
+ ->LoadLiteral(Smi::FromInt(expr->yield_id()))
+ .SuspendGenerator(generator)
+ .LoadAccumulatorWithRegister(value)
+ .Return(); // Hard return (ignore any finally blocks).
+
+ builder()->Bind(&(generator_resume_points_[expr->yield_id()]));
+ // Upon resume, we continue here.
+
+ {
+ RegisterAllocationScope register_scope(this);
+
+ // Update state to indicate that we have finished resuming. Loop headers
+ // rely on this.
+ builder()
+ ->LoadLiteral(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))
+ .StoreAccumulatorInRegister(generator_state_);
+
+ Register input = register_allocator()->NewRegister();
+ builder()
+ ->CallRuntime(Runtime::kInlineGeneratorGetInputOrDebugPos, generator, 1)
+ .StoreAccumulatorInRegister(input);
+
+ Register resume_mode = register_allocator()->NewRegister();
+ builder()
+ ->CallRuntime(Runtime::kInlineGeneratorGetResumeMode, generator, 1)
+ .StoreAccumulatorInRegister(resume_mode);
+
+ // Now dispatch on resume mode.
+
+ BytecodeLabel resume_with_next;
+ BytecodeLabel resume_with_return;
+ BytecodeLabel resume_with_throw;
+
+ builder()
+ ->LoadLiteral(Smi::FromInt(JSGeneratorObject::kNext))
+ .CompareOperation(Token::EQ_STRICT, resume_mode)
+ .JumpIfTrue(&resume_with_next)
+ .LoadLiteral(Smi::FromInt(JSGeneratorObject::kThrow))
+ .CompareOperation(Token::EQ_STRICT, resume_mode)
+ .JumpIfTrue(&resume_with_throw)
+ .Jump(&resume_with_return);
+
+ builder()->Bind(&resume_with_return);
+ {
+ register_allocator()->PrepareForConsecutiveAllocations(2);
+ Register value = register_allocator()->NextConsecutiveRegister();
+ Register done = register_allocator()->NextConsecutiveRegister();
+ builder()
+ ->MoveRegister(input, value)
+ .LoadTrue()
+ .StoreAccumulatorInRegister(done)
+ .CallRuntime(Runtime::kInlineCreateIterResultObject, value, 2);
+ execution_control()->ReturnAccumulator();
+ }
+
+ builder()->Bind(&resume_with_throw);
+ builder()->SetExpressionPosition(expr);
+ builder()->LoadAccumulatorWithRegister(input);
+ if (expr->rethrow_on_exception()) {
+ builder()->ReThrow();
+ } else {
+ builder()->Throw();
+ }
+
+ builder()->Bind(&resume_with_next);
+ builder()->LoadAccumulatorWithRegister(input);
+ }
+ execution_result()->SetResultInAccumulator();
+}
void BytecodeGenerator::VisitThrow(Throw* expr) {
VisitForAccumulatorValue(expr->exception());
builder()->SetExpressionPosition(expr);
builder()->Throw();
- // Throw statments are modeled as expression instead of statments. These are
- // converted from assignment statements in Rewriter::ReWrite pass. An
+ // Throw statements are modeled as expressions instead of statements. These
+ // are converted from assignment statements in Rewriter::ReWrite pass. An
// assignment statement expects a value in the accumulator. This is a hack to
// avoid DCHECK fails assert accumulator has been set.
execution_result()->SetResultInAccumulator();
}
-
void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* expr) {
LhsKind property_kind = Property::GetAssignType(expr);
FeedbackVectorSlot slot = expr->PropertyFeedbackSlot();
@@ -2363,7 +2567,7 @@ Register BytecodeGenerator::VisitArguments(ZoneList<Expression*>* args) {
void BytecodeGenerator::VisitCall(Call* expr) {
Expression* callee_expr = expr->expression();
- Call::CallType call_type = expr->GetCallType(isolate());
+ Call::CallType call_type = expr->GetCallType();
if (call_type == Call::SUPER_CALL) {
return VisitCallSuper(expr);
@@ -2452,12 +2656,14 @@ void BytecodeGenerator::VisitCall(Call* expr) {
// callee value.
if (call_type == Call::POSSIBLY_EVAL_CALL && args->length() > 0) {
RegisterAllocationScope inner_register_scope(this);
- register_allocator()->PrepareForConsecutiveAllocations(5);
+ register_allocator()->PrepareForConsecutiveAllocations(6);
Register callee_for_eval = register_allocator()->NextConsecutiveRegister();
Register source = register_allocator()->NextConsecutiveRegister();
Register function = register_allocator()->NextConsecutiveRegister();
Register language = register_allocator()->NextConsecutiveRegister();
- Register position = register_allocator()->NextConsecutiveRegister();
+ Register eval_scope_position =
+ register_allocator()->NextConsecutiveRegister();
+ Register eval_position = register_allocator()->NextConsecutiveRegister();
// Set up arguments for ResolvePossiblyDirectEval by copying callee, source
// strings and function closure, and loading language and
@@ -2470,17 +2676,30 @@ void BytecodeGenerator::VisitCall(Call* expr) {
.StoreAccumulatorInRegister(language)
.LoadLiteral(
Smi::FromInt(execution_context()->scope()->start_position()))
- .StoreAccumulatorInRegister(position);
+ .StoreAccumulatorInRegister(eval_scope_position)
+ .LoadLiteral(Smi::FromInt(expr->position()))
+ .StoreAccumulatorInRegister(eval_position);
// Call ResolvePossiblyDirectEval and modify the callee.
builder()
- ->CallRuntime(Runtime::kResolvePossiblyDirectEval, callee_for_eval, 5)
+ ->CallRuntime(Runtime::kResolvePossiblyDirectEval, callee_for_eval, 6)
.StoreAccumulatorInRegister(callee);
}
builder()->SetExpressionPosition(expr);
- builder()->Call(callee, receiver, 1 + args->length(),
- feedback_index(expr->CallFeedbackICSlot()),
+
+ int feedback_slot_index;
+ if (expr->CallFeedbackICSlot().IsInvalid()) {
+ DCHECK(call_type == Call::POSSIBLY_EVAL_CALL);
+ // Valid type feedback slots can only be greater than kReservedIndexCount.
+ // We use 0 to indicate an invalid slot it. Statically assert that 0 cannot
+ // be a valid slot id.
+ STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
+ feedback_slot_index = 0;
+ } else {
+ feedback_slot_index = feedback_index(expr->CallFeedbackICSlot());
+ }
+ builder()->Call(callee, receiver, 1 + args->length(), feedback_slot_index,
expr->tail_call_mode());
execution_result()->SetResultInAccumulator();
}
@@ -2529,7 +2748,6 @@ void BytecodeGenerator::VisitCallNew(CallNew* expr) {
execution_result()->SetResultInAccumulator();
}
-
void BytecodeGenerator::VisitCallRuntime(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
if (expr->is_jsruntime()) {
@@ -2550,14 +2768,12 @@ void BytecodeGenerator::VisitCallRuntime(CallRuntime* expr) {
execution_result()->SetResultInAccumulator();
}
-
void BytecodeGenerator::VisitVoid(UnaryOperation* expr) {
VisitForEffect(expr->expression());
builder()->LoadUndefined();
execution_result()->SetResultInAccumulator();
}
-
void BytecodeGenerator::VisitTypeOf(UnaryOperation* expr) {
if (expr->expression()->IsVariableProxy()) {
// Typeof does not throw a reference error on global variables, hence we
@@ -2572,14 +2788,24 @@ void BytecodeGenerator::VisitTypeOf(UnaryOperation* expr) {
execution_result()->SetResultInAccumulator();
}
-
void BytecodeGenerator::VisitNot(UnaryOperation* expr) {
- VisitForAccumulatorValue(expr->expression());
- builder()->LogicalNot();
- execution_result()->SetResultInAccumulator();
+ if (execution_result()->IsEffect()) {
+ VisitForEffect(expr->expression());
+ } else if (execution_result()->IsTest()) {
+ TestResultScope* test_result = execution_result()->AsTest();
+ // No actual logical negation happening, we just swap the control flow by
+ // swapping the target labels and the fallthrough branch.
+ VisitForTest(expr->expression(), test_result->else_labels(),
+ test_result->then_labels(),
+ test_result->inverted_fallthrough());
+ test_result->SetResultConsumedByTest();
+ } else {
+ VisitForAccumulatorValue(expr->expression());
+ builder()->LogicalNot();
+ execution_result()->SetResultInAccumulator();
+ }
}
-
void BytecodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
switch (expr->op()) {
case Token::Value::NOT:
@@ -2605,7 +2831,6 @@ void BytecodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
}
}
-
void BytecodeGenerator::VisitDelete(UnaryOperation* expr) {
if (expr->expression()->IsProperty()) {
// Delete of an object property is allowed both in sloppy
@@ -2619,7 +2844,7 @@ void BytecodeGenerator::VisitDelete(UnaryOperation* expr) {
// not allowed in strict mode. Deleting 'this' is allowed in both modes.
VariableProxy* proxy = expr->expression()->AsVariableProxy();
Variable* variable = proxy->var();
- DCHECK(is_sloppy(language_mode()) || variable->HasThisName(isolate()));
+ DCHECK(is_sloppy(language_mode()) || variable->is_this());
switch (variable->location()) {
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
@@ -2641,7 +2866,7 @@ void BytecodeGenerator::VisitDelete(UnaryOperation* expr) {
case VariableLocation::CONTEXT: {
// Deleting local var/let/const, context variables, and arguments
// does not have any effect.
- if (variable->HasThisName(isolate())) {
+ if (variable->is_this()) {
builder()->LoadTrue();
} else {
builder()->LoadFalse();
@@ -2667,7 +2892,6 @@ void BytecodeGenerator::VisitDelete(UnaryOperation* expr) {
execution_result()->SetResultInAccumulator();
}
-
void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
DCHECK(expr->expression()->IsValidReferenceExpressionOrThis());
@@ -2675,8 +2899,7 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
Property* property = expr->expression()->AsProperty();
LhsKind assign_type = Property::GetAssignType(property);
- // TODO(rmcilroy): Set is_postfix to false if visiting for effect.
- bool is_postfix = expr->is_postfix();
+ bool is_postfix = expr->is_postfix() && !execution_result()->IsEffect();
// Evaluate LHS expression and get old value.
Register object, home_object, key, old_value, value;
@@ -2739,17 +2962,17 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
- // Convert old value into a number.
- builder()->CastAccumulatorToNumber();
-
// Save result for postfix expressions.
if (is_postfix) {
old_value = register_allocator()->outer()->NewRegister();
- builder()->StoreAccumulatorInRegister(old_value);
+
+ // Convert old value into a number before saving it.
+ builder()->CastAccumulatorToNumber(old_value);
}
// Perform +1/-1 operation.
- builder()->CountOperation(expr->binary_op());
+ FeedbackVectorSlot slot = expr->CountBinaryOpFeedbackSlot();
+ builder()->CountOperation(expr->binary_op(), feedback_index(slot));
// Store the value.
builder()->SetExpressionPosition(expr);
@@ -2790,7 +3013,6 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
}
}
-
void BytecodeGenerator::VisitBinaryOperation(BinaryOperation* binop) {
switch (binop->op()) {
case Token::COMMA:
@@ -2808,7 +3030,6 @@ void BytecodeGenerator::VisitBinaryOperation(BinaryOperation* binop) {
}
}
-
void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Register lhs = VisitForRegisterValue(expr->left());
VisitForAccumulatorValue(expr->right());
@@ -2817,90 +3038,118 @@ void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
execution_result()->SetResultInAccumulator();
}
-
void BytecodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
+ // TODO(rmcilroy): Special case "x * 1.0" and "x * -1" which are generated for
+ // +x and -x by the parser.
Register lhs = VisitForRegisterValue(expr->left());
VisitForAccumulatorValue(expr->right());
- builder()->BinaryOperation(expr->op(), lhs);
+ FeedbackVectorSlot slot = expr->BinaryOperationFeedbackSlot();
+ builder()->BinaryOperation(expr->op(), lhs, feedback_index(slot));
execution_result()->SetResultInAccumulator();
}
-
void BytecodeGenerator::VisitSpread(Spread* expr) { UNREACHABLE(); }
-
void BytecodeGenerator::VisitEmptyParentheses(EmptyParentheses* expr) {
UNREACHABLE();
}
-
void BytecodeGenerator::VisitThisFunction(ThisFunction* expr) {
execution_result()->SetResultInRegister(Register::function_closure());
}
-
void BytecodeGenerator::VisitSuperCallReference(SuperCallReference* expr) {
// Handled by VisitCall().
UNREACHABLE();
}
-
void BytecodeGenerator::VisitSuperPropertyReference(
SuperPropertyReference* expr) {
builder()->CallRuntime(Runtime::kThrowUnsupportedSuperError, Register(0), 0);
execution_result()->SetResultInAccumulator();
}
-
void BytecodeGenerator::VisitCommaExpression(BinaryOperation* binop) {
VisitForEffect(binop->left());
Visit(binop->right());
}
-
void BytecodeGenerator::VisitLogicalOrExpression(BinaryOperation* binop) {
Expression* left = binop->left();
Expression* right = binop->right();
- // Short-circuit evaluation- If it is known that left is always true,
- // no need to visit right
- if (left->ToBooleanIsTrue()) {
- VisitForAccumulatorValue(left);
+ if (execution_result()->IsTest()) {
+ TestResultScope* test_result = execution_result()->AsTest();
+
+ if (left->ToBooleanIsTrue() || right->ToBooleanIsTrue()) {
+ builder()->Jump(test_result->NewThenLabel());
+ } else if (left->ToBooleanIsFalse() && right->ToBooleanIsFalse()) {
+ builder()->Jump(test_result->NewElseLabel());
+ } else {
+ BytecodeLabels test_right(zone());
+ VisitForTest(left, test_result->then_labels(), &test_right,
+ TestFallthrough::kElse);
+ test_right.Bind(builder());
+ VisitForTest(right, test_result->then_labels(),
+ test_result->else_labels(), test_result->fallthrough());
+ }
+ test_result->SetResultConsumedByTest();
} else {
- BytecodeLabel end_label;
- VisitForAccumulatorValue(left);
- builder()->JumpIfTrue(&end_label);
- VisitForAccumulatorValue(right);
- builder()->Bind(&end_label);
+ if (left->ToBooleanIsTrue()) {
+ VisitForAccumulatorValue(left);
+ } else if (left->ToBooleanIsFalse()) {
+ VisitForAccumulatorValue(right);
+ } else {
+ BytecodeLabel end_label;
+ VisitForAccumulatorValue(left);
+ builder()->JumpIfTrue(&end_label);
+ VisitForAccumulatorValue(right);
+ builder()->Bind(&end_label);
+ }
+ execution_result()->SetResultInAccumulator();
}
- execution_result()->SetResultInAccumulator();
}
-
void BytecodeGenerator::VisitLogicalAndExpression(BinaryOperation* binop) {
Expression* left = binop->left();
Expression* right = binop->right();
- // Short-circuit evaluation- If it is known that left is always false,
- // no need to visit right
- if (left->ToBooleanIsFalse()) {
- VisitForAccumulatorValue(left);
+ if (execution_result()->IsTest()) {
+ TestResultScope* test_result = execution_result()->AsTest();
+
+ if (left->ToBooleanIsFalse() || right->ToBooleanIsFalse()) {
+ builder()->Jump(test_result->NewElseLabel());
+ } else if (left->ToBooleanIsTrue() && right->ToBooleanIsTrue()) {
+ builder()->Jump(test_result->NewThenLabel());
+ } else {
+ BytecodeLabels test_right(zone());
+ VisitForTest(left, &test_right, test_result->else_labels(),
+ TestFallthrough::kThen);
+ test_right.Bind(builder());
+ VisitForTest(right, test_result->then_labels(),
+ test_result->else_labels(), test_result->fallthrough());
+ }
+ test_result->SetResultConsumedByTest();
} else {
- BytecodeLabel end_label;
- VisitForAccumulatorValue(left);
- builder()->JumpIfFalse(&end_label);
- VisitForAccumulatorValue(right);
- builder()->Bind(&end_label);
+ if (left->ToBooleanIsFalse()) {
+ VisitForAccumulatorValue(left);
+ } else if (left->ToBooleanIsTrue()) {
+ VisitForAccumulatorValue(right);
+ } else {
+ BytecodeLabel end_label;
+ VisitForAccumulatorValue(left);
+ builder()->JumpIfFalse(&end_label);
+ VisitForAccumulatorValue(right);
+ builder()->Bind(&end_label);
+ }
+ execution_result()->SetResultInAccumulator();
}
- execution_result()->SetResultInAccumulator();
}
-
void BytecodeGenerator::VisitRewritableExpression(RewritableExpression* expr) {
Visit(expr->expression());
}
-
void BytecodeGenerator::VisitNewLocalFunctionContext() {
AccumulatorResultScope accumulator_execution_result(this);
Scope* scope = this->scope();
@@ -2914,19 +3163,18 @@ void BytecodeGenerator::VisitNewLocalFunctionContext() {
builder()
->LoadAccumulatorWithRegister(Register::function_closure())
.StoreAccumulatorInRegister(closure)
- .LoadLiteral(scope->GetScopeInfo(isolate()))
+ .LoadLiteral(scope->scope_info())
.StoreAccumulatorInRegister(scope_info)
.CallRuntime(Runtime::kNewScriptContext, closure, 2);
} else {
- builder()->CallRuntime(Runtime::kNewFunctionContext,
- Register::function_closure(), 1);
+ int slot_count = scope->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ builder()->CreateFunctionContext(slot_count);
}
execution_result()->SetResultInAccumulator();
}
-
void BytecodeGenerator::VisitBuildLocalActivationContext() {
- Scope* scope = this->scope();
+ DeclarationScope* scope = this->scope();
if (scope->has_this_declaration() && scope->receiver()->IsContextSlot()) {
Variable* variable = scope->receiver();
@@ -2953,37 +3201,23 @@ void BytecodeGenerator::VisitBuildLocalActivationContext() {
}
}
-
void BytecodeGenerator::VisitNewLocalBlockContext(Scope* scope) {
AccumulatorResultScope accumulator_execution_result(this);
DCHECK(scope->is_block_scope());
- // Allocate a new local block context.
- register_allocator()->PrepareForConsecutiveAllocations(2);
- Register scope_info = register_allocator()->NextConsecutiveRegister();
- Register closure = register_allocator()->NextConsecutiveRegister();
-
- builder()
- ->LoadLiteral(scope->GetScopeInfo(isolate()))
- .StoreAccumulatorInRegister(scope_info);
VisitFunctionClosureForContext();
- builder()
- ->StoreAccumulatorInRegister(closure)
- .CallRuntime(Runtime::kPushBlockContext, scope_info, 2);
+ builder()->CreateBlockContext(scope->scope_info());
execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitNewLocalWithContext() {
AccumulatorResultScope accumulator_execution_result(this);
- register_allocator()->PrepareForConsecutiveAllocations(2);
- Register extension_object = register_allocator()->NextConsecutiveRegister();
- Register closure = register_allocator()->NextConsecutiveRegister();
+ Register extension_object = register_allocator()->NewRegister();
- builder()->StoreAccumulatorInRegister(extension_object);
+ builder()->CastAccumulatorToJSObject(extension_object);
VisitFunctionClosureForContext();
- builder()->StoreAccumulatorInRegister(closure).CallRuntime(
- Runtime::kPushWithContext, extension_object, 2);
+ builder()->CreateWithContext(extension_object);
execution_result()->SetResultInAccumulator();
}
@@ -2991,23 +3225,13 @@ void BytecodeGenerator::VisitNewLocalCatchContext(Variable* variable) {
AccumulatorResultScope accumulator_execution_result(this);
DCHECK(variable->IsContextSlot());
- // Allocate a new local block context.
- register_allocator()->PrepareForConsecutiveAllocations(3);
- Register name = register_allocator()->NextConsecutiveRegister();
- Register exception = register_allocator()->NextConsecutiveRegister();
- Register closure = register_allocator()->NextConsecutiveRegister();
-
- builder()
- ->StoreAccumulatorInRegister(exception)
- .LoadLiteral(variable->name())
- .StoreAccumulatorInRegister(name);
+ Register exception = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(exception);
VisitFunctionClosureForContext();
- builder()->StoreAccumulatorInRegister(closure).CallRuntime(
- Runtime::kPushCatchContext, name, 3);
+ builder()->CreateCatchContext(exception, variable->name());
execution_result()->SetResultInAccumulator();
}
-
void BytecodeGenerator::VisitObjectLiteralAccessor(
Register home_object, ObjectLiteralProperty* property, Register value_out) {
// TODO(rmcilroy): Replace value_out with VisitForRegister();
@@ -3025,15 +3249,14 @@ void BytecodeGenerator::VisitSetHomeObject(Register value, Register home_object,
int slot_number) {
Expression* expr = property->value();
if (FunctionLiteral::NeedsHomeObject(expr)) {
- Handle<Name> name = isolate()->factory()->home_object_symbol();
FeedbackVectorSlot slot = property->GetSlot(slot_number);
builder()
->LoadAccumulatorWithRegister(home_object)
- .StoreNamedProperty(value, name, feedback_index(slot), language_mode());
+ .StoreNamedProperty(value, home_object_symbol(), feedback_index(slot),
+ language_mode());
}
}
-
void BytecodeGenerator::VisitArgumentsObject(Variable* variable) {
if (variable == nullptr) return;
@@ -3068,7 +3291,6 @@ void BytecodeGenerator::VisitThisFunctionVariable(Variable* variable) {
VisitVariableAssignment(variable, Token::INIT, FeedbackVectorSlot::Invalid());
}
-
void BytecodeGenerator::VisitNewTargetVariable(Variable* variable) {
if (variable == nullptr) return;
@@ -3077,10 +3299,10 @@ void BytecodeGenerator::VisitNewTargetVariable(Variable* variable) {
VisitVariableAssignment(variable, Token::INIT, FeedbackVectorSlot::Invalid());
}
-
void BytecodeGenerator::VisitFunctionClosureForContext() {
AccumulatorResultScope accumulator_execution_result(this);
- Scope* closure_scope = execution_context()->scope()->ClosureScope();
+ DeclarationScope* closure_scope =
+ execution_context()->scope()->GetClosureScope();
if (closure_scope->is_script_scope() ||
closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function as
@@ -3104,7 +3326,6 @@ void BytecodeGenerator::VisitFunctionClosureForContext() {
execution_result()->SetResultInAccumulator();
}
-
// Visits the expression |expr| and places the result in the accumulator.
void BytecodeGenerator::VisitForAccumulatorValue(Expression* expr) {
AccumulatorResultScope accumulator_scope(this);
@@ -3125,7 +3346,6 @@ void BytecodeGenerator::VisitForEffect(Expression* expr) {
Visit(expr);
}
-
// Visits the expression |expr| and returns the register containing
// the expression result.
Register BytecodeGenerator::VisitForRegisterValue(Expression* expr) {
@@ -3143,20 +3363,48 @@ void BytecodeGenerator::VisitForRegisterValue(Expression* expr,
builder()->StoreAccumulatorInRegister(destination);
}
+// Visits the expression |expr| for testing its boolean value and jumping to the
+// |then| or |other| label depending on value and short-circuit semantics
+void BytecodeGenerator::VisitForTest(Expression* expr,
+ BytecodeLabels* then_labels,
+ BytecodeLabels* else_labels,
+ TestFallthrough fallthrough) {
+ bool result_consumed;
+ {
+ // To make sure that all temporary registers are returned before generating
+ // jumps below, we ensure that the result scope is deleted before doing so.
+ // Dead registers might be materialized otherwise.
+ TestResultScope test_result(this, then_labels, else_labels, fallthrough);
+ Visit(expr);
+ result_consumed = test_result.ResultConsumedByTest();
+ }
+ if (!result_consumed) {
+ switch (fallthrough) {
+ case TestFallthrough::kThen:
+ builder()->JumpIfFalse(else_labels->New());
+ break;
+ case TestFallthrough::kElse:
+ builder()->JumpIfTrue(then_labels->New());
+ break;
+ case TestFallthrough::kNone:
+ builder()->JumpIfTrue(then_labels->New());
+ builder()->Jump(else_labels->New());
+ }
+ }
+}
+
void BytecodeGenerator::VisitInScope(Statement* stmt, Scope* scope) {
ContextScope context_scope(this, scope);
DCHECK(scope->declarations()->is_empty());
Visit(stmt);
}
-
LanguageMode BytecodeGenerator::language_mode() const {
return execution_context()->scope()->language_mode();
}
-
int BytecodeGenerator::feedback_index(FeedbackVectorSlot slot) const {
- return info()->shared_info()->feedback_vector()->GetIndex(slot);
+ return TypeFeedbackVector::GetIndex(slot);
}
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index 4ef173890c..ee72135f43 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -7,29 +7,36 @@
#include "src/ast/ast.h"
#include "src/interpreter/bytecode-array-builder.h"
+#include "src/interpreter/bytecode-label.h"
+#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
namespace v8 {
namespace internal {
+
+class CompilationInfo;
+
namespace interpreter {
class LoopBuilder;
-class BytecodeGenerator final : public AstVisitor {
+class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
public:
- BytecodeGenerator(Isolate* isolate, Zone* zone);
+ explicit BytecodeGenerator(CompilationInfo* info);
- Handle<BytecodeArray> MakeBytecode(CompilationInfo* info);
+ void GenerateBytecode();
+ Handle<BytecodeArray> FinalizeBytecode(Isolate* isolate);
-#define DECLARE_VISIT(type) void Visit##type(type* node) override;
+#define DECLARE_VISIT(type) void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
// Visiting function for declarations list and statements are overridden.
- void VisitDeclarations(ZoneList<Declaration*>* declarations) override;
- void VisitStatements(ZoneList<Statement*>* statments) override;
+ void VisitDeclarations(ZoneList<Declaration*>* declarations);
+ void VisitStatements(ZoneList<Statement*>* statments);
private:
+ class AccumulatorResultScope;
class ContextScope;
class ControlScope;
class ControlScopeForBreakable;
@@ -39,11 +46,15 @@ class BytecodeGenerator final : public AstVisitor {
class ControlScopeForTryFinally;
class ExpressionResultScope;
class EffectResultScope;
- class AccumulatorResultScope;
+ class GlobalDeclarationsBuilder;
class RegisterResultScope;
class RegisterAllocationScope;
+ class TestResultScope;
- void MakeBytecodeBody();
+ enum class TestFallthrough { kThen, kElse, kNone };
+
+ void GenerateBytecodeBody();
+ void AllocateDeferredConstants();
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
@@ -102,17 +113,23 @@ class BytecodeGenerator final : public AstVisitor {
void BuildKeyedSuperPropertyLoad(Register receiver, Register home_object,
Register key);
+ void BuildAbort(BailoutReason bailout_reason);
void BuildThrowIfHole(Handle<String> name);
void BuildThrowIfNotHole(Handle<String> name);
- void BuildThrowReassignConstant(Handle<String> name);
void BuildThrowReferenceError(Handle<String> name);
- void BuildHoleCheckForVariableLoad(VariableMode mode, Handle<String> name);
+ void BuildHoleCheckForVariableLoad(Variable* variable);
void BuildHoleCheckForVariableAssignment(Variable* variable, Token::Value op);
+ // Build jump to targets[value], where
+ // start_index <= value < start_index + size.
+ void BuildIndexedJump(Register value, size_t start_index, size_t size,
+ ZoneVector<BytecodeLabel>& targets);
+
+ void VisitGeneratorPrologue();
+
void VisitArgumentsObject(Variable* variable);
void VisitRestArgumentsArray(Variable* rest);
void VisitCallSuper(Call* call);
- void VisitClassLiteralContents(ClassLiteral* expr);
void VisitClassLiteralForRuntimeDefinition(ClassLiteral* expr);
void VisitClassLiteralProperties(ClassLiteral* expr, Register literal,
Register prototype);
@@ -133,7 +150,9 @@ class BytecodeGenerator final : public AstVisitor {
Register value_out);
void VisitForInAssignment(Expression* expr, FeedbackVectorSlot slot);
- // Visit the body of a loop iteration.
+ // Visit the header/body of a loop iteration.
+ void VisitIterationHeader(IterationStatement* stmt,
+ LoopBuilder* loop_builder);
void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop_builder);
// Visit a statement and switch scopes, the context is in the accumulator.
@@ -146,29 +165,21 @@ class BytecodeGenerator final : public AstVisitor {
MUST_USE_RESULT Register VisitForRegisterValue(Expression* expr);
void VisitForRegisterValue(Expression* expr, Register destination);
void VisitForEffect(Expression* expr);
+ void VisitForTest(Expression* expr, BytecodeLabels* then_labels,
+ BytecodeLabels* else_labels, TestFallthrough fallthrough);
// Methods for tracking and remapping register.
void RecordStoreToRegister(Register reg);
Register LoadFromAliasedRegister(Register reg);
- // Methods for tracking try-block nesting.
- bool IsInsideTryCatch() const { return try_catch_nesting_level_ > 0; }
- bool IsInsideTryFinally() const { return try_finally_nesting_level_ > 0; }
-
// Initialize an array of temporary registers with consecutive registers.
template <size_t N>
void InitializeWithConsecutiveRegisters(Register (&registers)[N]);
- inline void set_builder(BytecodeArrayBuilder* builder) { builder_ = builder; }
inline BytecodeArrayBuilder* builder() const { return builder_; }
-
- inline Isolate* isolate() const { return isolate_; }
inline Zone* zone() const { return zone_; }
-
- inline Scope* scope() const { return scope_; }
- inline void set_scope(Scope* scope) { scope_ = scope; }
+ inline DeclarationScope* scope() const { return scope_; }
inline CompilationInfo* info() const { return info_; }
- inline void set_info(CompilationInfo* info) { info_ = info; }
inline ControlScope* execution_control() const { return execution_control_; }
inline void set_execution_control(ControlScope* scope) {
@@ -190,22 +201,35 @@ class BytecodeGenerator final : public AstVisitor {
return register_allocator_;
}
- ZoneVector<Handle<Object>>* globals() { return &globals_; }
+ GlobalDeclarationsBuilder* globals_builder() { return globals_builder_; }
inline LanguageMode language_mode() const;
int feedback_index(FeedbackVectorSlot slot) const;
- Isolate* isolate_;
+ Handle<Name> home_object_symbol() const { return home_object_symbol_; }
+ Handle<Name> prototype_string() const { return prototype_string_; }
+
Zone* zone_;
BytecodeArrayBuilder* builder_;
CompilationInfo* info_;
- Scope* scope_;
- ZoneVector<Handle<Object>> globals_;
+ DeclarationScope* scope_;
+
+ GlobalDeclarationsBuilder* globals_builder_;
+ ZoneVector<GlobalDeclarationsBuilder*> global_declarations_;
+ ZoneVector<std::pair<FunctionLiteral*, size_t>> function_literals_;
+ ZoneVector<std::pair<NativeFunctionLiteral*, size_t>>
+ native_function_literals_;
+
ControlScope* execution_control_;
ContextScope* execution_context_;
ExpressionResultScope* execution_result_;
RegisterAllocationScope* register_allocator_;
- int try_catch_nesting_level_;
- int try_finally_nesting_level_;
+
+ ZoneVector<BytecodeLabel> generator_resume_points_;
+ Register generator_state_;
+ int loop_depth_;
+
+ Handle<Name> home_object_symbol_;
+ Handle<Name> prototype_string_;
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-label.cc b/deps/v8/src/interpreter/bytecode-label.cc
new file mode 100644
index 0000000000..a12e8ab4cc
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-label.cc
@@ -0,0 +1,34 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-label.h"
+
+#include "src/interpreter/bytecode-array-builder.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+BytecodeLabel* BytecodeLabels::New() {
+ DCHECK(!is_bound());
+ labels_.push_back(BytecodeLabel());
+ return &labels_.back();
+}
+
+void BytecodeLabels::Bind(BytecodeArrayBuilder* builder) {
+ for (auto& label : labels_) {
+ builder->Bind(&label);
+ }
+}
+
+void BytecodeLabels::BindToLabel(BytecodeArrayBuilder* builder,
+ const BytecodeLabel& target) {
+ for (auto& label : labels_) {
+ builder->Bind(target, &label);
+ }
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-label.h b/deps/v8/src/interpreter/bytecode-label.h
new file mode 100644
index 0000000000..d96cf66d13
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-label.h
@@ -0,0 +1,87 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_LABEL_H_
+#define V8_INTERPRETER_BYTECODE_LABEL_H_
+
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodeArrayBuilder;
+
+// A label representing a branch target in a bytecode array. When a
+// label is bound, it represents a known position in the bytecode
+// array. For labels that are forward references there can be at most
+// one reference whilst it is unbound.
+class BytecodeLabel final {
+ public:
+ BytecodeLabel() : bound_(false), offset_(kInvalidOffset) {}
+
+ bool is_bound() const { return bound_; }
+ size_t offset() const { return offset_; }
+
+ private:
+ static const size_t kInvalidOffset = static_cast<size_t>(-1);
+
+ void bind_to(size_t offset) {
+ DCHECK(!bound_ && offset != kInvalidOffset);
+ offset_ = offset;
+ bound_ = true;
+ }
+
+ void set_referrer(size_t offset) {
+ DCHECK(!bound_ && offset != kInvalidOffset && offset_ == kInvalidOffset);
+ offset_ = offset;
+ }
+
+ bool is_forward_target() const {
+ return offset() != kInvalidOffset && !is_bound();
+ }
+
+ // There are three states for a label:
+ // bound_ offset_
+ // UNSET false kInvalidOffset
+ // FORWARD_TARGET false Offset of referring jump
+ // BACKWARD_TARGET true Offset of label in bytecode array when bound
+ bool bound_;
+ size_t offset_;
+
+ friend class BytecodeArrayWriter;
+};
+
+// Class representing a branch target of multiple jumps.
+class BytecodeLabels {
+ public:
+ explicit BytecodeLabels(Zone* zone) : labels_(zone) {}
+
+ BytecodeLabel* New();
+
+ void Bind(BytecodeArrayBuilder* builder);
+
+ void BindToLabel(BytecodeArrayBuilder* builder, const BytecodeLabel& target);
+
+ bool is_bound() const {
+ bool is_bound = !labels_.empty() && labels_.at(0).is_bound();
+ DCHECK(!is_bound ||
+ std::all_of(labels_.begin(), labels_.end(),
+ [](const BytecodeLabel& l) { return l.is_bound(); }));
+ return is_bound;
+ }
+
+ bool empty() const { return labels_.empty(); }
+
+ private:
+ ZoneVector<BytecodeLabel> labels_;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeLabels);
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_LABEL_H_
diff --git a/deps/v8/src/interpreter/bytecode-peephole-optimizer.cc b/deps/v8/src/interpreter/bytecode-peephole-optimizer.cc
new file mode 100644
index 0000000000..11aebb6ddb
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-peephole-optimizer.cc
@@ -0,0 +1,345 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-peephole-optimizer.h"
+
+#include "src/objects-inl.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+BytecodePeepholeOptimizer::BytecodePeepholeOptimizer(
+ BytecodePipelineStage* next_stage)
+ : next_stage_(next_stage) {
+ InvalidateLast();
+}
+
+// override
+Handle<BytecodeArray> BytecodePeepholeOptimizer::ToBytecodeArray(
+ Isolate* isolate, int fixed_register_count, int parameter_count,
+ Handle<FixedArray> handler_table) {
+ Flush();
+ return next_stage_->ToBytecodeArray(isolate, fixed_register_count,
+ parameter_count, handler_table);
+}
+
+// override
+void BytecodePeepholeOptimizer::BindLabel(BytecodeLabel* label) {
+ Flush();
+ next_stage_->BindLabel(label);
+}
+
+// override
+void BytecodePeepholeOptimizer::BindLabel(const BytecodeLabel& target,
+ BytecodeLabel* label) {
+ // There is no need to flush here, it will have been flushed when
+ // |target| was bound.
+ next_stage_->BindLabel(target, label);
+}
+
+// override
+void BytecodePeepholeOptimizer::WriteJump(BytecodeNode* node,
+ BytecodeLabel* label) {
+ // Handlers for jump bytecodes do not emit |node| as WriteJump()
+ // requires the |label| and having a label argument in all action
+ // handlers results in dead work in the non-jump case.
+ ApplyPeepholeAction(node);
+ next_stage()->WriteJump(node, label);
+}
+
+// override
+void BytecodePeepholeOptimizer::Write(BytecodeNode* node) {
+ // Handlers for non-jump bytecodes run to completion emitting
+ // bytecode to next stage as appropriate.
+ ApplyPeepholeAction(node);
+}
+
+void BytecodePeepholeOptimizer::Flush() {
+ if (LastIsValid()) {
+ next_stage_->Write(&last_);
+ InvalidateLast();
+ }
+}
+
+void BytecodePeepholeOptimizer::InvalidateLast() {
+ last_.set_bytecode(Bytecode::kIllegal);
+}
+
+bool BytecodePeepholeOptimizer::LastIsValid() const {
+ return last_.bytecode() != Bytecode::kIllegal;
+}
+
+void BytecodePeepholeOptimizer::SetLast(const BytecodeNode* const node) {
+ // An action shouldn't leave a NOP as last bytecode unless it has
+ // source position information. NOP without source information can
+ // always be elided.
+ DCHECK(node->bytecode() != Bytecode::kNop || node->source_info().is_valid());
+
+ last_.Clone(node);
+}
+
+bool BytecodePeepholeOptimizer::CanElideLastBasedOnSourcePosition(
+ const BytecodeNode* const current) const {
+ //
+ // The rules for allowing the elision of the last bytecode based
+ // on source position are:
+ //
+ // C U R R E N T
+ // +--------+--------+--------+
+ // | None | Expr | Stmt |
+ // L +--------+--------+--------+--------+
+ // | None | YES | YES | YES |
+ // A +--------+--------+--------+--------+
+ // | Expr | YES | MAYBE | MAYBE |
+ // S +--------+--------+--------+--------+
+ // | Stmt | YES | NO | NO |
+ // T +--------+--------+--------+--------+
+ //
+ // The goal is not lose any statement positions and not lose useful
+ // expression positions. Whenever the last bytecode is elided it's
+ // source position information is applied to the current node
+ // updating it if necessary.
+ //
+ // The last bytecode could be elided for the MAYBE cases if the last
+ // bytecode is known not to throw. If it throws, the system would
+ // not have correct stack trace information. The appropriate check
+ // for this would be Bytecodes::IsWithoutExternalSideEffects(). By
+ // default, the upstream bytecode generator filters out unneeded
+ // expression position information so there is neglible benefit to
+ // handling MAYBE specially. Hence MAYBE is treated the same as NO.
+ //
+ return (!last_.source_info().is_valid() ||
+ !current->source_info().is_valid());
+}
+
+namespace {
+
+void TransformLdaStarToLdrLdar(Bytecode new_bytecode, BytecodeNode* const last,
+ BytecodeNode* const current) {
+ DCHECK_EQ(current->bytecode(), Bytecode::kStar);
+
+ //
+ // An example transformation here would be:
+ //
+ // LdaGlobal i0, i1 ____\ LdrGlobal i0, i1, R
+ // Star R ====/ Ldar R
+ //
+ // which loads a global value into both a register and the
+ // accumulator. However, in the second form the Ldar can often be
+ // peephole optimized away unlike the Star in the first form.
+ //
+ last->Transform(new_bytecode, current->operand(0));
+ current->set_bytecode(Bytecode::kLdar, current->operand(0));
+}
+
+void TransformLdaSmiBinaryOpToBinaryOpWithSmi(Bytecode new_bytecode,
+ BytecodeNode* const last,
+ BytecodeNode* const current) {
+ DCHECK_EQ(last->bytecode(), Bytecode::kLdaSmi);
+ current->set_bytecode(new_bytecode, last->operand(0), current->operand(0),
+ current->operand(1));
+ if (last->source_info().is_valid()) {
+ current->source_info().Clone(last->source_info());
+ }
+}
+
+void TransformLdaZeroBinaryOpToBinaryOpWithZero(Bytecode new_bytecode,
+ BytecodeNode* const last,
+ BytecodeNode* const current) {
+ DCHECK_EQ(last->bytecode(), Bytecode::kLdaZero);
+ current->set_bytecode(new_bytecode, 0, current->operand(0),
+ current->operand(1));
+ if (last->source_info().is_valid()) {
+ current->source_info().Clone(last->source_info());
+ }
+}
+
+} // namespace
+
+void BytecodePeepholeOptimizer::DefaultAction(
+ BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+ DCHECK(LastIsValid());
+ DCHECK(!Bytecodes::IsJump(node->bytecode()));
+
+ next_stage()->Write(last());
+ SetLast(node);
+}
+
+void BytecodePeepholeOptimizer::UpdateLastAction(
+ BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+ DCHECK(!LastIsValid());
+ DCHECK(!Bytecodes::IsJump(node->bytecode()));
+
+ SetLast(node);
+}
+
+void BytecodePeepholeOptimizer::UpdateLastIfSourceInfoPresentAction(
+ BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+ DCHECK(!LastIsValid());
+ DCHECK(!Bytecodes::IsJump(node->bytecode()));
+
+ if (node->source_info().is_valid()) {
+ SetLast(node);
+ }
+}
+
+void BytecodePeepholeOptimizer::ElideCurrentAction(
+ BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+ DCHECK(LastIsValid());
+ DCHECK(!Bytecodes::IsJump(node->bytecode()));
+
+ if (node->source_info().is_valid()) {
+ // Preserve the source information by replacing the node bytecode
+ // with a no op bytecode.
+ node->set_bytecode(Bytecode::kNop);
+ DefaultAction(node);
+ } else {
+ // Nothing to do, keep last and wait for next bytecode to pair with it.
+ }
+}
+
+void BytecodePeepholeOptimizer::ElideCurrentIfOperand0MatchesAction(
+ BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+ DCHECK(LastIsValid());
+ DCHECK(!Bytecodes::IsJump(node->bytecode()));
+
+ if (last()->operand(0) == node->operand(0)) {
+ ElideCurrentAction(node);
+ } else {
+ DefaultAction(node);
+ }
+}
+
+void BytecodePeepholeOptimizer::ElideLastAction(
+ BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+ DCHECK(LastIsValid());
+ DCHECK(!Bytecodes::IsJump(node->bytecode()));
+
+ if (CanElideLastBasedOnSourcePosition(node)) {
+ if (last()->source_info().is_valid()) {
+ // |node| can not have a valid source position if the source
+ // position of last() is valid (per rules in
+ // CanElideLastBasedOnSourcePosition()).
+ node->source_info().Clone(last()->source_info());
+ }
+ SetLast(node);
+ } else {
+ DefaultAction(node);
+ }
+}
+
+void BytecodePeepholeOptimizer::ChangeBytecodeAction(
+ BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+ DCHECK(LastIsValid());
+ DCHECK(!Bytecodes::IsJump(node->bytecode()));
+
+ node->replace_bytecode(action_data->bytecode);
+ DefaultAction(node);
+}
+
+void BytecodePeepholeOptimizer::TransformLdaStarToLdrLdarAction(
+ BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+ DCHECK(LastIsValid());
+ DCHECK(!Bytecodes::IsJump(node->bytecode()));
+
+ if (!node->source_info().is_statement()) {
+ TransformLdaStarToLdrLdar(action_data->bytecode, last(), node);
+ }
+ DefaultAction(node);
+}
+
+void BytecodePeepholeOptimizer::TransformLdaSmiBinaryOpToBinaryOpWithSmiAction(
+ BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+ DCHECK(LastIsValid());
+ DCHECK(!Bytecodes::IsJump(node->bytecode()));
+
+ if (!node->source_info().is_valid() || !last()->source_info().is_valid()) {
+ // Fused last and current into current.
+ TransformLdaSmiBinaryOpToBinaryOpWithSmi(action_data->bytecode, last(),
+ node);
+ SetLast(node);
+ } else {
+ DefaultAction(node);
+ }
+}
+
+void BytecodePeepholeOptimizer::
+ TransformLdaZeroBinaryOpToBinaryOpWithZeroAction(
+ BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+ DCHECK(LastIsValid());
+ DCHECK(!Bytecodes::IsJump(node->bytecode()));
+ if (!node->source_info().is_valid() || !last()->source_info().is_valid()) {
+ // Fused last and current into current.
+ TransformLdaZeroBinaryOpToBinaryOpWithZero(action_data->bytecode, last(),
+ node);
+ SetLast(node);
+ } else {
+ DefaultAction(node);
+ }
+}
+
+void BytecodePeepholeOptimizer::DefaultJumpAction(
+ BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+ DCHECK(LastIsValid());
+ DCHECK(Bytecodes::IsJump(node->bytecode()));
+
+ next_stage()->Write(last());
+ InvalidateLast();
+}
+
+void BytecodePeepholeOptimizer::UpdateLastJumpAction(
+ BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+ DCHECK(!LastIsValid());
+ DCHECK(Bytecodes::IsJump(node->bytecode()));
+}
+
+void BytecodePeepholeOptimizer::ChangeJumpBytecodeAction(
+ BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+ DCHECK(LastIsValid());
+ DCHECK(Bytecodes::IsJump(node->bytecode()));
+
+ next_stage()->Write(last());
+ InvalidateLast();
+ node->set_bytecode(action_data->bytecode, node->operand(0));
+}
+
+void BytecodePeepholeOptimizer::ElideLastBeforeJumpAction(
+ BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+ DCHECK(LastIsValid());
+ DCHECK(Bytecodes::IsJump(node->bytecode()));
+
+ if (!CanElideLastBasedOnSourcePosition(node)) {
+ next_stage()->Write(last());
+ } else if (!node->source_info().is_valid()) {
+ node->source_info().Clone(last()->source_info());
+ }
+ InvalidateLast();
+}
+
+void BytecodePeepholeOptimizer::ApplyPeepholeAction(BytecodeNode* const node) {
+ // A single table is used for looking up peephole optimization
+ // matches as it is observed to have better performance. This is
+ // inspite of the fact that jump bytecodes and non-jump bytecodes
+ // have different processing logic, in particular a jump bytecode
+ // always needs to emit the jump via WriteJump().
+ const PeepholeActionAndData* const action_data =
+ PeepholeActionTable::Lookup(last()->bytecode(), node->bytecode());
+ switch (action_data->action) {
+#define CASE(Action) \
+ case PeepholeAction::k##Action: \
+ Action(node, action_data); \
+ break;
+ PEEPHOLE_ACTION_LIST(CASE)
+#undef CASE
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-peephole-optimizer.h b/deps/v8/src/interpreter/bytecode-peephole-optimizer.h
new file mode 100644
index 0000000000..2f4a35fd1b
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-peephole-optimizer.h
@@ -0,0 +1,62 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_PEEPHOLE_OPTIMIZER_H_
+#define V8_INTERPRETER_BYTECODE_PEEPHOLE_OPTIMIZER_H_
+
+#include "src/interpreter/bytecode-peephole-table.h"
+#include "src/interpreter/bytecode-pipeline.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodePeepholeActionAndData;
+
+// An optimization stage for performing peephole optimizations on
+// generated bytecode. The optimizer may buffer one bytecode
+// internally.
+class BytecodePeepholeOptimizer final : public BytecodePipelineStage,
+ public ZoneObject {
+ public:
+ explicit BytecodePeepholeOptimizer(BytecodePipelineStage* next_stage);
+
+ // BytecodePipelineStage interface.
+ void Write(BytecodeNode* node) override;
+ void WriteJump(BytecodeNode* node, BytecodeLabel* label) override;
+ void BindLabel(BytecodeLabel* label) override;
+ void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
+ Handle<BytecodeArray> ToBytecodeArray(
+ Isolate* isolate, int fixed_register_count, int parameter_count,
+ Handle<FixedArray> handler_table) override;
+
+ private:
+#define DECLARE_ACTION(Action) \
+ void Action(BytecodeNode* const node, \
+ const PeepholeActionAndData* const action_data = nullptr);
+ PEEPHOLE_ACTION_LIST(DECLARE_ACTION)
+#undef DECLARE_ACTION
+
+ void ApplyPeepholeAction(BytecodeNode* const node);
+ void Flush();
+ bool CanElideLastBasedOnSourcePosition(
+ const BytecodeNode* const current) const;
+ void InvalidateLast();
+ bool LastIsValid() const;
+ void SetLast(const BytecodeNode* const node);
+
+ BytecodePipelineStage* next_stage() const { return next_stage_; }
+ BytecodeNode* last() { return &last_; }
+
+ BytecodePipelineStage* next_stage_;
+ BytecodeNode last_;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodePeepholeOptimizer);
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_PEEPHOLE_OPTIMIZER_H_
diff --git a/deps/v8/src/interpreter/bytecode-peephole-table.h b/deps/v8/src/interpreter/bytecode-peephole-table.h
new file mode 100644
index 0000000000..e716aef496
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-peephole-table.h
@@ -0,0 +1,72 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_PEEPHOLE_TABLE_H_
+#define V8_INTERPRETER_BYTECODE_PEEPHOLE_TABLE_H_
+
+#include "src/interpreter/bytecodes.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+#define PEEPHOLE_NON_JUMP_ACTION_LIST(V) \
+ V(DefaultAction) \
+ V(UpdateLastAction) \
+ V(UpdateLastIfSourceInfoPresentAction) \
+ V(ElideCurrentAction) \
+ V(ElideCurrentIfOperand0MatchesAction) \
+ V(ElideLastAction) \
+ V(ChangeBytecodeAction) \
+ V(TransformLdaStarToLdrLdarAction) \
+ V(TransformLdaSmiBinaryOpToBinaryOpWithSmiAction) \
+ V(TransformLdaZeroBinaryOpToBinaryOpWithZeroAction)
+
+#define PEEPHOLE_JUMP_ACTION_LIST(V) \
+ V(DefaultJumpAction) \
+ V(UpdateLastJumpAction) \
+ V(ChangeJumpBytecodeAction) \
+ V(ElideLastBeforeJumpAction)
+
+#define PEEPHOLE_ACTION_LIST(V) \
+ PEEPHOLE_NON_JUMP_ACTION_LIST(V) \
+ PEEPHOLE_JUMP_ACTION_LIST(V)
+
+// Actions to take when a pair of bytes is encountered. A handler
+// exists for each action.
+enum class PeepholeAction : uint8_t {
+#define DECLARE_PEEPHOLE_ACTION(Action) k##Action,
+ PEEPHOLE_ACTION_LIST(DECLARE_PEEPHOLE_ACTION)
+#undef DECLARE_PEEPHOLE_ACTION
+};
+
+// Tuple of action to take when pair of bytecodes is encountered and
+// optional data to invoke handler with.
+struct PeepholeActionAndData final {
+ // Action to take when tuple of bytecodes encountered.
+ PeepholeAction action;
+
+ // Replacement bytecode (if valid).
+ Bytecode bytecode;
+};
+
+// Lookup table for matching pairs of bytecodes to peephole optimization
+// actions. The contents of the table are generated by mkpeephole.cc.
+struct PeepholeActionTable final {
+ public:
+ static const PeepholeActionAndData* Lookup(Bytecode last, Bytecode current);
+
+ private:
+ static const size_t kNumberOfBytecodes =
+ static_cast<size_t>(Bytecode::kLast) + 1;
+
+ static const PeepholeActionAndData row_data_[][kNumberOfBytecodes];
+ static const PeepholeActionAndData* const row_[kNumberOfBytecodes];
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_PEEPHOLE_TABLE_H_
diff --git a/deps/v8/src/interpreter/bytecode-pipeline.cc b/deps/v8/src/interpreter/bytecode-pipeline.cc
new file mode 100644
index 0000000000..66b8bdf533
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-pipeline.cc
@@ -0,0 +1,134 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-pipeline.h"
+
+#include <iomanip>
+#include "src/source-position-table.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+BytecodeNode::BytecodeNode(Bytecode bytecode) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
+ bytecode_ = bytecode;
+}
+
+BytecodeNode::BytecodeNode(Bytecode bytecode, uint32_t operand0) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 1);
+ bytecode_ = bytecode;
+ operands_[0] = operand0;
+}
+
+BytecodeNode::BytecodeNode(Bytecode bytecode, uint32_t operand0,
+ uint32_t operand1) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 2);
+ bytecode_ = bytecode;
+ operands_[0] = operand0;
+ operands_[1] = operand1;
+}
+
+BytecodeNode::BytecodeNode(Bytecode bytecode, uint32_t operand0,
+ uint32_t operand1, uint32_t operand2) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 3);
+ bytecode_ = bytecode;
+ operands_[0] = operand0;
+ operands_[1] = operand1;
+ operands_[2] = operand2;
+}
+
+BytecodeNode::BytecodeNode(Bytecode bytecode, uint32_t operand0,
+ uint32_t operand1, uint32_t operand2,
+ uint32_t operand3) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 4);
+ bytecode_ = bytecode;
+ operands_[0] = operand0;
+ operands_[1] = operand1;
+ operands_[2] = operand2;
+ operands_[3] = operand3;
+}
+
+BytecodeNode::BytecodeNode(const BytecodeNode& other) {
+ memcpy(this, &other, sizeof(other));
+}
+
+BytecodeNode& BytecodeNode::operator=(const BytecodeNode& other) {
+ memcpy(this, &other, sizeof(other));
+ return *this;
+}
+
+void BytecodeNode::Clone(const BytecodeNode* const other) {
+ memcpy(this, other, sizeof(*other));
+}
+
+void BytecodeNode::Print(std::ostream& os) const {
+#ifdef DEBUG
+ std::ios saved_state(nullptr);
+ saved_state.copyfmt(os);
+ os << Bytecodes::ToString(bytecode_);
+
+ for (int i = 0; i < operand_count(); ++i) {
+ os << ' ' << std::setw(8) << std::setfill('0') << std::hex << operands_[i];
+ }
+ os.copyfmt(saved_state);
+
+ if (source_info_.is_valid()) {
+ os << ' ' << source_info_;
+ }
+ os << '\n';
+#else
+ os << static_cast<const void*>(this);
+#endif // DEBUG
+}
+
+void BytecodeNode::Transform(Bytecode new_bytecode, uint32_t extra_operand) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(new_bytecode),
+ Bytecodes::NumberOfOperands(bytecode()) + 1);
+ DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 1 ||
+ Bytecodes::GetOperandType(new_bytecode, 0) ==
+ Bytecodes::GetOperandType(bytecode(), 0));
+ DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 2 ||
+ Bytecodes::GetOperandType(new_bytecode, 1) ==
+ Bytecodes::GetOperandType(bytecode(), 1));
+ DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 3 ||
+ Bytecodes::GetOperandType(new_bytecode, 2) ==
+ Bytecodes::GetOperandType(bytecode(), 2));
+ DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 4);
+ operands_[operand_count()] = extra_operand;
+ bytecode_ = new_bytecode;
+}
+
+bool BytecodeNode::operator==(const BytecodeNode& other) const {
+ if (this == &other) {
+ return true;
+ } else if (this->bytecode() != other.bytecode() ||
+ this->source_info() != other.source_info()) {
+ return false;
+ } else {
+ for (int i = 0; i < this->operand_count(); ++i) {
+ if (this->operand(i) != other.operand(i)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+std::ostream& operator<<(std::ostream& os, const BytecodeSourceInfo& info) {
+ if (info.is_valid()) {
+ char description = info.is_statement() ? 'S' : 'E';
+ os << info.source_position() << ' ' << description << '>';
+ }
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const BytecodeNode& node) {
+ node.Print(os);
+ return os;
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-pipeline.h b/deps/v8/src/interpreter/bytecode-pipeline.h
new file mode 100644
index 0000000000..1668bab9c1
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-pipeline.h
@@ -0,0 +1,228 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_PIPELINE_H_
+#define V8_INTERPRETER_BYTECODE_PIPELINE_H_
+
+#include "src/interpreter/bytecode-register-allocator.h"
+#include "src/interpreter/bytecode-register.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/objects.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodeLabel;
+class BytecodeNode;
+class BytecodeSourceInfo;
+
+// Interface for bytecode pipeline stages.
+class BytecodePipelineStage {
+ public:
+ virtual ~BytecodePipelineStage() {}
+
+ // Write bytecode node |node| into pipeline. The node is only valid
+ // for the duration of the call. Callee's should clone it if
+ // deferring Write() to the next stage.
+ virtual void Write(BytecodeNode* node) = 0;
+
+ // Write jump bytecode node |node| which jumps to |label| into pipeline.
+ // The node and label are only valid for the duration of the call. This call
+ // implicitly ends the current basic block so should always write to the next
+ // stage.
+ virtual void WriteJump(BytecodeNode* node, BytecodeLabel* label) = 0;
+
+ // Binds |label| to the current bytecode location. This call implicitly
+ // ends the current basic block and so any deferred bytecodes should be
+ // written to the next stage.
+ virtual void BindLabel(BytecodeLabel* label) = 0;
+
+ // Binds |label| to the location of |target|. This call implicitly
+ // ends the current basic block and so any deferred bytecodes should be
+ // written to the next stage.
+ virtual void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) = 0;
+
+ // Flush the pipeline and generate a bytecode array.
+ virtual Handle<BytecodeArray> ToBytecodeArray(
+ Isolate* isolate, int fixed_register_count, int parameter_count,
+ Handle<FixedArray> handler_table) = 0;
+};
+
+// Source code position information.
+class BytecodeSourceInfo final {
+ public:
+ static const int kUninitializedPosition = -1;
+
+ BytecodeSourceInfo()
+ : position_type_(PositionType::kNone),
+ source_position_(kUninitializedPosition) {}
+
+ BytecodeSourceInfo(int source_position, bool is_statement)
+ : position_type_(is_statement ? PositionType::kStatement
+ : PositionType::kExpression),
+ source_position_(source_position) {
+ DCHECK_GE(source_position, 0);
+ }
+
+ // Makes instance into a statement position.
+ void MakeStatementPosition(int source_position) {
+ // Statement positions can be replaced by other statement
+ // positions. For example , "for (x = 0; x < 3; ++x) 7;" has a
+ // statement position associated with 7 but no bytecode associated
+ // with it. Then Next is emitted after the body and has
+ // statement position and overrides the existing one.
+ position_type_ = PositionType::kStatement;
+ source_position_ = source_position;
+ }
+
+ // Makes instance into an expression position. Instance should not
+ // be a statement position otherwise it could be lost and impair the
+ // debugging experience.
+ void MakeExpressionPosition(int source_position) {
+ DCHECK(!is_statement());
+ position_type_ = PositionType::kExpression;
+ source_position_ = source_position;
+ }
+
+ // Forces an instance into an expression position.
+ void ForceExpressionPosition(int source_position) {
+ position_type_ = PositionType::kExpression;
+ source_position_ = source_position;
+ }
+
+ // Clones a source position. The current instance is expected to be
+ // invalid.
+ void Clone(const BytecodeSourceInfo& other) {
+ DCHECK(!is_valid());
+ position_type_ = other.position_type_;
+ source_position_ = other.source_position_;
+ }
+
+ int source_position() const {
+ DCHECK(is_valid());
+ return source_position_;
+ }
+
+ bool is_statement() const {
+ return position_type_ == PositionType::kStatement;
+ }
+ bool is_expression() const {
+ return position_type_ == PositionType::kExpression;
+ }
+
+ bool is_valid() const { return position_type_ != PositionType::kNone; }
+ void set_invalid() {
+ position_type_ = PositionType::kNone;
+ source_position_ = kUninitializedPosition;
+ }
+
+ bool operator==(const BytecodeSourceInfo& other) const {
+ return position_type_ == other.position_type_ &&
+ source_position_ == other.source_position_;
+ }
+
+ bool operator!=(const BytecodeSourceInfo& other) const {
+ return position_type_ != other.position_type_ ||
+ source_position_ != other.source_position_;
+ }
+
+ private:
+ enum class PositionType : uint8_t { kNone, kExpression, kStatement };
+
+ PositionType position_type_;
+ int source_position_;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeSourceInfo);
+};
+
+// A container for a generated bytecode, it's operands, and source information.
+// These must be allocated by a BytecodeNodeAllocator instance.
+class BytecodeNode final : ZoneObject {
+ public:
+ explicit BytecodeNode(Bytecode bytecode = Bytecode::kIllegal);
+ BytecodeNode(Bytecode bytecode, uint32_t operand0);
+ BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1);
+ BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+ uint32_t operand2);
+ BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+ uint32_t operand2, uint32_t operand3);
+
+ BytecodeNode(const BytecodeNode& other);
+ BytecodeNode& operator=(const BytecodeNode& other);
+
+ // Replace the bytecode of this node with |bytecode| and keep the operands.
+ void replace_bytecode(Bytecode bytecode) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode_),
+ Bytecodes::NumberOfOperands(bytecode));
+ bytecode_ = bytecode;
+ }
+ void set_bytecode(Bytecode bytecode) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
+ bytecode_ = bytecode;
+ }
+ void set_bytecode(Bytecode bytecode, uint32_t operand0) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 1);
+ bytecode_ = bytecode;
+ operands_[0] = operand0;
+ }
+ void set_bytecode(Bytecode bytecode, uint32_t operand0, uint32_t operand1) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 2);
+ bytecode_ = bytecode;
+ operands_[0] = operand0;
+ operands_[1] = operand1;
+ }
+ void set_bytecode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+ uint32_t operand2) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 3);
+ bytecode_ = bytecode;
+ operands_[0] = operand0;
+ operands_[1] = operand1;
+ operands_[2] = operand2;
+ }
+
+ // Clone |other|.
+ void Clone(const BytecodeNode* const other);
+
+ // Print to stream |os|.
+ void Print(std::ostream& os) const;
+
+ // Transform to a node representing |new_bytecode| which has one
+ // operand more than the current bytecode.
+ void Transform(Bytecode new_bytecode, uint32_t extra_operand);
+
+ Bytecode bytecode() const { return bytecode_; }
+
+ uint32_t operand(int i) const {
+ DCHECK_LT(i, operand_count());
+ return operands_[i];
+ }
+ uint32_t* operands() { return operands_; }
+ const uint32_t* operands() const { return operands_; }
+
+ int operand_count() const { return Bytecodes::NumberOfOperands(bytecode_); }
+
+ const BytecodeSourceInfo& source_info() const { return source_info_; }
+ BytecodeSourceInfo& source_info() { return source_info_; }
+
+ bool operator==(const BytecodeNode& other) const;
+ bool operator!=(const BytecodeNode& other) const { return !(*this == other); }
+
+ private:
+ static const int kInvalidPosition = kMinInt;
+
+ Bytecode bytecode_;
+ uint32_t operands_[Bytecodes::kMaxOperands];
+ BytecodeSourceInfo source_info_;
+};
+
+std::ostream& operator<<(std::ostream& os, const BytecodeSourceInfo& info);
+std::ostream& operator<<(std::ostream& os, const BytecodeNode& node);
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_PIPELINE_H_
diff --git a/deps/v8/src/interpreter/bytecode-register-allocator.cc b/deps/v8/src/interpreter/bytecode-register-allocator.cc
index 9bdde9a470..10afcdc76d 100644
--- a/deps/v8/src/interpreter/bytecode-register-allocator.cc
+++ b/deps/v8/src/interpreter/bytecode-register-allocator.cc
@@ -14,7 +14,8 @@ TemporaryRegisterAllocator::TemporaryRegisterAllocator(Zone* zone,
int allocation_base)
: free_temporaries_(zone),
allocation_base_(allocation_base),
- allocation_count_(0) {}
+ allocation_count_(0),
+ observer_(nullptr) {}
Register TemporaryRegisterAllocator::first_temporary_register() const {
DCHECK(allocation_count() > 0);
@@ -26,6 +27,12 @@ Register TemporaryRegisterAllocator::last_temporary_register() const {
return Register(allocation_base() + allocation_count() - 1);
}
+void TemporaryRegisterAllocator::set_observer(
+ TemporaryRegisterObserver* observer) {
+ DCHECK(observer_ == nullptr);
+ observer_ = observer;
+}
+
int TemporaryRegisterAllocator::AllocateTemporaryRegister() {
allocation_count_ += 1;
return allocation_base() + allocation_count() - 1;
@@ -140,6 +147,9 @@ void TemporaryRegisterAllocator::BorrowConsecutiveTemporaryRegister(
void TemporaryRegisterAllocator::ReturnTemporaryRegister(int reg_index) {
DCHECK(free_temporaries_.find(reg_index) == free_temporaries_.end());
free_temporaries_.insert(reg_index);
+ if (observer_) {
+ observer_->TemporaryRegisterFreeEvent(Register(reg_index));
+ }
}
BytecodeRegisterAllocator::BytecodeRegisterAllocator(
@@ -156,7 +166,6 @@ BytecodeRegisterAllocator::~BytecodeRegisterAllocator() {
allocated_.clear();
}
-
Register BytecodeRegisterAllocator::NewRegister() {
int allocated = -1;
if (next_consecutive_count_ <= 0) {
@@ -170,7 +179,6 @@ Register BytecodeRegisterAllocator::NewRegister() {
return Register(allocated);
}
-
bool BytecodeRegisterAllocator::RegisterIsAllocatedInThisScope(
Register reg) const {
for (auto i = allocated_.begin(); i != allocated_.end(); i++) {
@@ -179,7 +187,6 @@ bool BytecodeRegisterAllocator::RegisterIsAllocatedInThisScope(
return false;
}
-
void BytecodeRegisterAllocator::PrepareForConsecutiveAllocations(size_t count) {
if (static_cast<int>(count) > next_consecutive_count_) {
next_consecutive_register_ =
@@ -188,7 +195,6 @@ void BytecodeRegisterAllocator::PrepareForConsecutiveAllocations(size_t count) {
}
}
-
Register BytecodeRegisterAllocator::NextConsecutiveRegister() {
DCHECK_GE(next_consecutive_register_, 0);
DCHECK_GT(next_consecutive_count_, 0);
diff --git a/deps/v8/src/interpreter/bytecode-register-allocator.h b/deps/v8/src/interpreter/bytecode-register-allocator.h
index 696a3b174a..b8f737be79 100644
--- a/deps/v8/src/interpreter/bytecode-register-allocator.h
+++ b/deps/v8/src/interpreter/bytecode-register-allocator.h
@@ -14,6 +14,7 @@ namespace interpreter {
class BytecodeArrayBuilder;
class Register;
+class TemporaryRegisterObserver;
class TemporaryRegisterAllocator final {
public:
@@ -54,6 +55,9 @@ class TemporaryRegisterAllocator final {
// Returns the number of temporary register allocations made.
int allocation_count() const { return allocation_count_; }
+ // Sets an observer for temporary register events.
+ void set_observer(TemporaryRegisterObserver* observer);
+
private:
// Allocate a temporary register.
int AllocateTemporaryRegister();
@@ -61,11 +65,18 @@ class TemporaryRegisterAllocator final {
ZoneSet<int> free_temporaries_;
int allocation_base_;
int allocation_count_;
+ TemporaryRegisterObserver* observer_;
DISALLOW_COPY_AND_ASSIGN(TemporaryRegisterAllocator);
};
-// A class than allows the instantiator to allocate temporary registers that are
+class TemporaryRegisterObserver {
+ public:
+ virtual ~TemporaryRegisterObserver() {}
+ virtual void TemporaryRegisterFreeEvent(Register reg) = 0;
+};
+
+// A class that allows the instantiator to allocate temporary registers that are
// cleaned up when scope is closed.
class BytecodeRegisterAllocator final {
public:
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.cc b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
new file mode 100644
index 0000000000..d28f215de8
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
@@ -0,0 +1,627 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-register-optimizer.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+const uint32_t BytecodeRegisterOptimizer::kInvalidEquivalenceId;
+
+// A class for tracking the state of a register. This class tracks
+// which equivalence set a register is a member of and also whether a
+// register is materialized in the bytecode stream.
+class BytecodeRegisterOptimizer::RegisterInfo final : public ZoneObject {
+ public:
+ RegisterInfo(Register reg, uint32_t equivalence_id, bool materialized)
+ : register_(reg),
+ equivalence_id_(equivalence_id),
+ materialized_(materialized),
+ next_(this),
+ prev_(this) {}
+
+ void AddToEquivalenceSetOf(RegisterInfo* info);
+ void MoveToNewEquivalenceSet(uint32_t equivalence_id, bool materialized);
+ bool IsOnlyMemberOfEquivalenceSet() const;
+ bool IsOnlyMaterializedMemberOfEquivalenceSet() const;
+ bool IsInSameEquivalenceSet(RegisterInfo* info) const;
+
+ // Get a member of this register's equivalence set that is
+ // materialized. The materialized equivalent will be this register
+ // if it is materialized. Returns nullptr if no materialized
+ // equivalent exists.
+ RegisterInfo* GetMaterializedEquivalent();
+
+ // Get a member of this register's equivalence set that is
+ // materialized and not register |reg|. The materialized equivalent
+ // will be this register if it is materialized. Returns nullptr if
+ // no materialized equivalent exists.
+ RegisterInfo* GetMaterializedEquivalentOtherThan(Register reg);
+
+ // Get a member of this register's equivalence set that is intended
+ // to be materialized in place of this register (which is currently
+ // materialized). The best candidate is deemed to be the register
+ // with the lowest index as this permits temporary registers to be
+ // removed from the bytecode stream. Returns nullptr if no candidate
+ // exists.
+ RegisterInfo* GetEquivalentToMaterialize();
+
+ // Get an equivalent register. Returns this if none exists.
+ RegisterInfo* GetEquivalent();
+
+ Register register_value() const { return register_; }
+ bool materialized() const { return materialized_; }
+ void set_materialized(bool materialized) { materialized_ = materialized; }
+ void set_equivalence_id(uint32_t equivalence_id) {
+ equivalence_id_ = equivalence_id;
+ }
+ uint32_t equivalence_id() const { return equivalence_id_; }
+
+ private:
+ Register register_;
+ uint32_t equivalence_id_;
+ bool materialized_;
+
+ // Equivalence set pointers.
+ RegisterInfo* next_;
+ RegisterInfo* prev_;
+
+ DISALLOW_COPY_AND_ASSIGN(RegisterInfo);
+};
+
+void BytecodeRegisterOptimizer::RegisterInfo::AddToEquivalenceSetOf(
+ RegisterInfo* info) {
+ DCHECK_NE(kInvalidEquivalenceId, info->equivalence_id());
+ // Fix old list
+ next_->prev_ = prev_;
+ prev_->next_ = next_;
+ // Add to new list.
+ next_ = info->next_;
+ prev_ = info;
+ prev_->next_ = this;
+ next_->prev_ = this;
+ set_equivalence_id(info->equivalence_id());
+ set_materialized(false);
+}
+
+void BytecodeRegisterOptimizer::RegisterInfo::MoveToNewEquivalenceSet(
+ uint32_t equivalence_id, bool materialized) {
+ next_->prev_ = prev_;
+ prev_->next_ = next_;
+ next_ = prev_ = this;
+ equivalence_id_ = equivalence_id;
+ materialized_ = materialized;
+}
+
+bool BytecodeRegisterOptimizer::RegisterInfo::IsOnlyMemberOfEquivalenceSet()
+ const {
+ return this->next_ == this;
+}
+
+bool BytecodeRegisterOptimizer::RegisterInfo::
+ IsOnlyMaterializedMemberOfEquivalenceSet() const {
+ DCHECK(materialized());
+
+ const RegisterInfo* visitor = this->next_;
+ while (visitor != this) {
+ if (visitor->materialized()) {
+ return false;
+ }
+ visitor = visitor->next_;
+ }
+ return true;
+}
+
+bool BytecodeRegisterOptimizer::RegisterInfo::IsInSameEquivalenceSet(
+ RegisterInfo* info) const {
+ return equivalence_id() == info->equivalence_id();
+}
+
+BytecodeRegisterOptimizer::RegisterInfo*
+BytecodeRegisterOptimizer::RegisterInfo::GetMaterializedEquivalent() {
+ RegisterInfo* visitor = this;
+ do {
+ if (visitor->materialized()) {
+ return visitor;
+ }
+ visitor = visitor->next_;
+ } while (visitor != this);
+
+ return nullptr;
+}
+
+BytecodeRegisterOptimizer::RegisterInfo*
+BytecodeRegisterOptimizer::RegisterInfo::GetMaterializedEquivalentOtherThan(
+ Register reg) {
+ RegisterInfo* visitor = this;
+ do {
+ if (visitor->materialized() && visitor->register_value() != reg) {
+ return visitor;
+ }
+ visitor = visitor->next_;
+ } while (visitor != this);
+
+ return nullptr;
+}
+
+BytecodeRegisterOptimizer::RegisterInfo*
+BytecodeRegisterOptimizer::RegisterInfo::GetEquivalentToMaterialize() {
+ DCHECK(this->materialized());
+ RegisterInfo* visitor = this->next_;
+ RegisterInfo* best_info = nullptr;
+ while (visitor != this) {
+ if (visitor->materialized()) {
+ return nullptr;
+ }
+ if (best_info == nullptr ||
+ visitor->register_value() < best_info->register_value()) {
+ best_info = visitor;
+ }
+ visitor = visitor->next_;
+ }
+ return best_info;
+}
+
+BytecodeRegisterOptimizer::RegisterInfo*
+BytecodeRegisterOptimizer::RegisterInfo::GetEquivalent() {
+ return next_;
+}
+
+BytecodeRegisterOptimizer::BytecodeRegisterOptimizer(
+ Zone* zone, TemporaryRegisterAllocator* register_allocator,
+ int parameter_count, BytecodePipelineStage* next_stage)
+ : accumulator_(Register::virtual_accumulator()),
+ temporary_base_(register_allocator->allocation_base()),
+ register_info_table_(zone),
+ equivalence_id_(0),
+ next_stage_(next_stage),
+ flush_required_(false),
+ zone_(zone) {
+ register_allocator->set_observer(this);
+
+ // Calculate offset so register index values can be mapped into
+ // a vector of register metadata.
+ if (parameter_count != 0) {
+ register_info_table_offset_ =
+ -Register::FromParameterIndex(0, parameter_count).index();
+ } else {
+ // TODO(oth): This path shouldn't be necessary in bytecode generated
+ // from Javascript, but a set of tests do not include the JS receiver.
+ register_info_table_offset_ = -accumulator_.index();
+ }
+
+ // Initialize register map for parameters, locals, and the
+ // accumulator.
+ register_info_table_.resize(register_info_table_offset_ +
+ static_cast<size_t>(temporary_base_.index()));
+ for (size_t i = 0; i < register_info_table_.size(); ++i) {
+ register_info_table_[i] = new (zone) RegisterInfo(
+ RegisterFromRegisterInfoTableIndex(i), NextEquivalenceId(), true);
+ DCHECK_EQ(register_info_table_[i]->register_value().index(),
+ RegisterFromRegisterInfoTableIndex(i).index());
+ }
+ accumulator_info_ = GetRegisterInfo(accumulator_);
+ DCHECK(accumulator_info_->register_value() == accumulator_);
+}
+
+// override
+Handle<BytecodeArray> BytecodeRegisterOptimizer::ToBytecodeArray(
+ Isolate* isolate, int fixed_register_count, int parameter_count,
+ Handle<FixedArray> handler_table) {
+ FlushState();
+ return next_stage_->ToBytecodeArray(isolate, fixed_register_count,
+ parameter_count, handler_table);
+}
+
+// override
+void BytecodeRegisterOptimizer::Write(BytecodeNode* node) {
+ //
+ // Transfers with observable registers as the destination will be
+ // immediately materialized so the source position information will
+ // be ordered correctly.
+ //
+ // Transfers without observable destination registers will initially
+ // be emitted as Nop's with the source position. They may, or may
+ // not, be materialized by the optimizer. However, the source
+ // position is not lost and being attached to a Nop is fine as the
+ // destination register is not observable in the debugger.
+ //
+ switch (node->bytecode()) {
+ case Bytecode::kLdar: {
+ DoLdar(node);
+ return;
+ }
+ case Bytecode::kStar: {
+ DoStar(node);
+ return;
+ }
+ case Bytecode::kMov: {
+ DoMov(node);
+ return;
+ }
+ default:
+ break;
+ }
+
+ if (Bytecodes::IsJump(node->bytecode()) ||
+ node->bytecode() == Bytecode::kDebugger ||
+ node->bytecode() == Bytecode::kSuspendGenerator) {
+ // All state must be flushed before emitting
+ // - a jump (due to how bytecode offsets for jumps are evaluated),
+ // - a call to the debugger (as it can manipulate locals and parameters),
+ // - a generator suspend (as this involves saving all registers).
+ FlushState();
+ }
+
+ PrepareOperands(node);
+ WriteToNextStage(node);
+}
+
+// override
+void BytecodeRegisterOptimizer::WriteJump(BytecodeNode* node,
+ BytecodeLabel* label) {
+ FlushState();
+ next_stage_->WriteJump(node, label);
+}
+
+// override
+void BytecodeRegisterOptimizer::BindLabel(BytecodeLabel* label) {
+ FlushState();
+ next_stage_->BindLabel(label);
+}
+
+// override
+void BytecodeRegisterOptimizer::BindLabel(const BytecodeLabel& target,
+ BytecodeLabel* label) {
+ // There is no need to flush here, it will have been flushed when |target|
+ // was bound.
+ next_stage_->BindLabel(target, label);
+}
+
+void BytecodeRegisterOptimizer::FlushState() {
+ if (!flush_required_) {
+ return;
+ }
+
+ // Materialize all live registers and break equivalences.
+ size_t count = register_info_table_.size();
+ for (size_t i = 0; i < count; ++i) {
+ RegisterInfo* reg_info = register_info_table_[i];
+ if (reg_info->materialized()) {
+ // Walk equivalents of materialized registers, materializing
+ // each equivalent register as necessary and placing in their
+ // own equivalence set.
+ RegisterInfo* equivalent;
+ while ((equivalent = reg_info->GetEquivalent()) != reg_info) {
+ if (!equivalent->materialized()) {
+ OutputRegisterTransfer(reg_info, equivalent);
+ }
+ equivalent->MoveToNewEquivalenceSet(NextEquivalenceId(), true);
+ }
+ }
+ }
+
+ flush_required_ = false;
+}
+
+void BytecodeRegisterOptimizer::WriteToNextStage(BytecodeNode* node) const {
+ next_stage_->Write(node);
+}
+
+void BytecodeRegisterOptimizer::WriteToNextStage(
+ BytecodeNode* node, const BytecodeSourceInfo& source_info) const {
+ if (source_info.is_valid()) {
+ node->source_info().Clone(source_info);
+ }
+ next_stage_->Write(node);
+}
+
+void BytecodeRegisterOptimizer::OutputRegisterTransfer(
+ RegisterInfo* input_info, RegisterInfo* output_info,
+ const BytecodeSourceInfo& source_info) {
+ Register input = input_info->register_value();
+ Register output = output_info->register_value();
+ DCHECK_NE(input.index(), output.index());
+
+ if (input == accumulator_) {
+ uint32_t operand = static_cast<uint32_t>(output.ToOperand());
+ BytecodeNode node(Bytecode::kStar, operand);
+ WriteToNextStage(&node, source_info);
+ } else if (output == accumulator_) {
+ uint32_t operand = static_cast<uint32_t>(input.ToOperand());
+ BytecodeNode node(Bytecode::kLdar, operand);
+ WriteToNextStage(&node, source_info);
+ } else {
+ uint32_t operand0 = static_cast<uint32_t>(input.ToOperand());
+ uint32_t operand1 = static_cast<uint32_t>(output.ToOperand());
+ BytecodeNode node(Bytecode::kMov, operand0, operand1);
+ WriteToNextStage(&node, source_info);
+ }
+ output_info->set_materialized(true);
+}
+
+void BytecodeRegisterOptimizer::CreateMaterializedEquivalent(
+ RegisterInfo* info) {
+ DCHECK(info->materialized());
+ RegisterInfo* unmaterialized = info->GetEquivalentToMaterialize();
+ if (unmaterialized) {
+ OutputRegisterTransfer(info, unmaterialized);
+ }
+}
+
+BytecodeRegisterOptimizer::RegisterInfo*
+BytecodeRegisterOptimizer::GetMaterializedEquivalent(RegisterInfo* info) {
+ return info->materialized() ? info : info->GetMaterializedEquivalent();
+}
+
+BytecodeRegisterOptimizer::RegisterInfo*
+BytecodeRegisterOptimizer::GetMaterializedEquivalentNotAccumulator(
+ RegisterInfo* info) {
+ if (info->materialized()) {
+ return info;
+ }
+
+ RegisterInfo* result = info->GetMaterializedEquivalentOtherThan(accumulator_);
+ if (result == nullptr) {
+ Materialize(info);
+ result = info;
+ }
+ DCHECK(result->register_value() != accumulator_);
+ return result;
+}
+
+void BytecodeRegisterOptimizer::Materialize(RegisterInfo* info) {
+ if (!info->materialized()) {
+ RegisterInfo* materialized = info->GetMaterializedEquivalent();
+ OutputRegisterTransfer(materialized, info);
+ }
+}
+
+void BytecodeRegisterOptimizer::AddToEquivalenceSet(
+ RegisterInfo* set_member, RegisterInfo* non_set_member) {
+ non_set_member->AddToEquivalenceSetOf(set_member);
+ // Flushing is only required when two or more registers are placed
+ // in the same equivalence set.
+ flush_required_ = true;
+}
+
+void BytecodeRegisterOptimizer::RegisterTransfer(
+ RegisterInfo* input_info, RegisterInfo* output_info,
+ const BytecodeSourceInfo& source_info) {
+ // Materialize an alternate in the equivalence set that
+ // |output_info| is leaving.
+ if (output_info->materialized()) {
+ CreateMaterializedEquivalent(output_info);
+ }
+
+ // Add |output_info| to new equivalence set.
+ if (!output_info->IsInSameEquivalenceSet(input_info)) {
+ AddToEquivalenceSet(input_info, output_info);
+ }
+
+ bool output_is_observable =
+ RegisterIsObservable(output_info->register_value());
+ if (output_is_observable) {
+ // Force store to be emitted when register is observable.
+ output_info->set_materialized(false);
+ RegisterInfo* materialized_info = input_info->GetMaterializedEquivalent();
+ OutputRegisterTransfer(materialized_info, output_info, source_info);
+ } else if (source_info.is_valid()) {
+ // Emit a placeholder nop to maintain source position info.
+ EmitNopForSourceInfo(source_info);
+ }
+}
+
+void BytecodeRegisterOptimizer::EmitNopForSourceInfo(
+ const BytecodeSourceInfo& source_info) const {
+ DCHECK(source_info.is_valid());
+ BytecodeNode nop(Bytecode::kNop);
+ nop.source_info().Clone(source_info);
+ WriteToNextStage(&nop);
+}
+
+void BytecodeRegisterOptimizer::DoLdar(const BytecodeNode* const node) {
+ Register input = GetRegisterInputOperand(
+ 0, node->bytecode(), node->operands(), node->operand_count());
+ RegisterInfo* input_info = GetRegisterInfo(input);
+ RegisterTransfer(input_info, accumulator_info_, node->source_info());
+}
+
+void BytecodeRegisterOptimizer::DoMov(const BytecodeNode* const node) {
+ Register input = GetRegisterInputOperand(
+ 0, node->bytecode(), node->operands(), node->operand_count());
+ RegisterInfo* input_info = GetRegisterInfo(input);
+ Register output = GetRegisterOutputOperand(
+ 1, node->bytecode(), node->operands(), node->operand_count());
+ RegisterInfo* output_info = GetOrCreateRegisterInfo(output);
+ RegisterTransfer(input_info, output_info, node->source_info());
+}
+
+void BytecodeRegisterOptimizer::DoStar(const BytecodeNode* const node) {
+ Register output = GetRegisterOutputOperand(
+ 0, node->bytecode(), node->operands(), node->operand_count());
+ RegisterInfo* output_info = GetOrCreateRegisterInfo(output);
+ RegisterTransfer(accumulator_info_, output_info, node->source_info());
+}
+
+void BytecodeRegisterOptimizer::PrepareRegisterOutputOperand(
+ RegisterInfo* reg_info) {
+ if (reg_info->materialized()) {
+ CreateMaterializedEquivalent(reg_info);
+ }
+ reg_info->MoveToNewEquivalenceSet(NextEquivalenceId(), true);
+}
+
+void BytecodeRegisterOptimizer::PrepareRegisterRangeOutputOperand(
+ Register start, int count) {
+ for (int i = 0; i < count; ++i) {
+ Register reg(start.index() + i);
+ RegisterInfo* reg_info = GetOrCreateRegisterInfo(reg);
+ PrepareRegisterOutputOperand(reg_info);
+ }
+}
+
+Register BytecodeRegisterOptimizer::GetEquivalentRegisterForInputOperand(
+ Register reg) {
+ // For a temporary register, RegInfo state may need be created. For
+ // locals and parameters, the RegInfo state is created in the
+ // BytecodeRegisterOptimizer constructor.
+ RegisterInfo* reg_info = GetOrCreateRegisterInfo(reg);
+ if (reg_info->materialized()) {
+ return reg;
+ } else {
+ RegisterInfo* equivalent_info =
+ GetMaterializedEquivalentNotAccumulator(reg_info);
+ return equivalent_info->register_value();
+ }
+}
+
+void BytecodeRegisterOptimizer::PrepareRegisterInputOperand(
+ BytecodeNode* const node, Register reg, int operand_index) {
+ Register equivalent = GetEquivalentRegisterForInputOperand(reg);
+ node->operands()[operand_index] =
+ static_cast<uint32_t>(equivalent.ToOperand());
+}
+
+void BytecodeRegisterOptimizer::PrepareRegisterRangeInputOperand(Register start,
+ int count) {
+ for (int i = 0; i < count; ++i) {
+ Register current(start.index() + i);
+ RegisterInfo* input_info = GetRegisterInfo(current);
+ Materialize(input_info);
+ }
+}
+
+void BytecodeRegisterOptimizer::PrepareRegisterOperands(
+ BytecodeNode* const node) {
+ //
+ // For each input operand, get a materialized equivalent if it is
+ // just a single register, otherwise materialize register range.
+ // Update operand_scale if necessary.
+ //
+ // For each output register about to be clobbered, materialize an
+ // equivalent if it exists. Put each register in it's own equivalence set.
+ //
+ const uint32_t* operands = node->operands();
+ int operand_count = node->operand_count();
+ const OperandType* operand_types =
+ Bytecodes::GetOperandTypes(node->bytecode());
+ for (int i = 0; i < operand_count; ++i) {
+ int count;
+ // operand_types is terminated by OperandType::kNone so this does not
+ // go out of bounds.
+ if (operand_types[i + 1] == OperandType::kRegCount) {
+ count = static_cast<int>(operands[i + 1]);
+ } else {
+ count = Bytecodes::GetNumberOfRegistersRepresentedBy(operand_types[i]);
+ }
+
+ if (count == 0) {
+ continue;
+ }
+
+ Register reg = Register::FromOperand(static_cast<int32_t>(operands[i]));
+ if (Bytecodes::IsRegisterInputOperandType(operand_types[i])) {
+ if (count == 1) {
+ PrepareRegisterInputOperand(node, reg, i);
+ } else if (count > 1) {
+ PrepareRegisterRangeInputOperand(reg, count);
+ }
+ } else if (Bytecodes::IsRegisterOutputOperandType(operand_types[i])) {
+ PrepareRegisterRangeOutputOperand(reg, count);
+ }
+ }
+}
+
+void BytecodeRegisterOptimizer::PrepareAccumulator(BytecodeNode* const node) {
+ // Materialize the accumulator if it is read by the bytecode. The
+ // accumulator is special and no other register can be materialized
+ // in it's place.
+ if (Bytecodes::ReadsAccumulator(node->bytecode()) &&
+ !accumulator_info_->materialized()) {
+ Materialize(accumulator_info_);
+ }
+
+ // Materialize an equivalent to the accumulator if it will be
+ // clobbered when the bytecode is dispatched.
+ if (Bytecodes::WritesAccumulator(node->bytecode())) {
+ PrepareRegisterOutputOperand(accumulator_info_);
+ }
+}
+
+void BytecodeRegisterOptimizer::PrepareOperands(BytecodeNode* const node) {
+ PrepareAccumulator(node);
+ PrepareRegisterOperands(node);
+}
+
+// static
+Register BytecodeRegisterOptimizer::GetRegisterInputOperand(
+ int index, Bytecode bytecode, const uint32_t* operands, int operand_count) {
+ DCHECK_LT(index, operand_count);
+ DCHECK(Bytecodes::IsRegisterInputOperandType(
+ Bytecodes::GetOperandType(bytecode, index)));
+ return OperandToRegister(operands[index]);
+}
+
+// static
+Register BytecodeRegisterOptimizer::GetRegisterOutputOperand(
+ int index, Bytecode bytecode, const uint32_t* operands, int operand_count) {
+ DCHECK_LT(index, operand_count);
+ DCHECK(Bytecodes::IsRegisterOutputOperandType(
+ Bytecodes::GetOperandType(bytecode, index)));
+ return OperandToRegister(operands[index]);
+}
+
+BytecodeRegisterOptimizer::RegisterInfo*
+BytecodeRegisterOptimizer::GetRegisterInfo(Register reg) {
+ size_t index = GetRegisterInfoTableIndex(reg);
+ return (index < register_info_table_.size()) ? register_info_table_[index]
+ : nullptr;
+}
+
+BytecodeRegisterOptimizer::RegisterInfo*
+BytecodeRegisterOptimizer::GetOrCreateRegisterInfo(Register reg) {
+ size_t index = GetRegisterInfoTableIndex(reg);
+ return index < register_info_table_.size() ? register_info_table_[index]
+ : NewRegisterInfo(reg);
+}
+
+BytecodeRegisterOptimizer::RegisterInfo*
+BytecodeRegisterOptimizer::NewRegisterInfo(Register reg) {
+ size_t index = GetRegisterInfoTableIndex(reg);
+ DCHECK_GE(index, register_info_table_.size());
+ GrowRegisterMap(reg);
+ return register_info_table_[index];
+}
+
+void BytecodeRegisterOptimizer::GrowRegisterMap(Register reg) {
+ DCHECK(RegisterIsTemporary(reg));
+ size_t index = GetRegisterInfoTableIndex(reg);
+ DCHECK_GE(index, register_info_table_.size());
+ size_t new_size = index + 1;
+ size_t old_size = register_info_table_.size();
+ register_info_table_.resize(new_size);
+ for (size_t i = old_size; i < new_size; ++i) {
+ register_info_table_[i] = new (zone()) RegisterInfo(
+ RegisterFromRegisterInfoTableIndex(i), NextEquivalenceId(), false);
+ }
+}
+
+void BytecodeRegisterOptimizer::TemporaryRegisterFreeEvent(Register reg) {
+ RegisterInfo* info = GetRegisterInfo(reg);
+ if (info != nullptr) {
+ // If register is materialized and part of equivalence set, make
+ // sure another member of the set holds the value before the
+ // temporary register is removed.
+ if (info->materialized()) {
+ CreateMaterializedEquivalent(info);
+ }
+ info->MoveToNewEquivalenceSet(kInvalidEquivalenceId, false);
+ }
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.h b/deps/v8/src/interpreter/bytecode-register-optimizer.h
new file mode 100644
index 0000000000..fb087b527a
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.h
@@ -0,0 +1,155 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_REGISTER_OPTIMIZER_H_
+#define V8_INTERPRETER_BYTECODE_REGISTER_OPTIMIZER_H_
+
+#include "src/interpreter/bytecode-pipeline.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+// An optimization stage for eliminating unnecessary transfers between
+// registers. The bytecode generator uses temporary registers
+// liberally for correctness and convenience and this stage removes
+// transfers that are not required and preserves correctness.
+class BytecodeRegisterOptimizer final : public BytecodePipelineStage,
+ public TemporaryRegisterObserver,
+ public ZoneObject {
+ public:
+ BytecodeRegisterOptimizer(Zone* zone,
+ TemporaryRegisterAllocator* register_allocator,
+ int parameter_count,
+ BytecodePipelineStage* next_stage);
+ virtual ~BytecodeRegisterOptimizer() {}
+
+ // BytecodePipelineStage interface.
+ void Write(BytecodeNode* node) override;
+ void WriteJump(BytecodeNode* node, BytecodeLabel* label) override;
+ void BindLabel(BytecodeLabel* label) override;
+ void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
+ Handle<BytecodeArray> ToBytecodeArray(
+ Isolate* isolate, int fixed_register_count, int parameter_count,
+ Handle<FixedArray> handler_table) override;
+
+ private:
+ static const uint32_t kInvalidEquivalenceId = kMaxUInt32;
+
+ class RegisterInfo;
+
+ // TemporaryRegisterObserver interface.
+ void TemporaryRegisterFreeEvent(Register reg) override;
+
+ // Helpers for BytecodePipelineStage interface.
+ void FlushState();
+ void WriteToNextStage(BytecodeNode* node) const;
+ void WriteToNextStage(BytecodeNode* node,
+ const BytecodeSourceInfo& output_info) const;
+
+ // Update internal state for register transfer from |input| to
+ // |output| using |source_info| as source position information if
+ // any bytecodes are emitted due to transfer.
+ void RegisterTransfer(RegisterInfo* input, RegisterInfo* output,
+ const BytecodeSourceInfo& source_info);
+
+ // Emit a register transfer bytecode from |input| to |output|.
+ void OutputRegisterTransfer(
+ RegisterInfo* input, RegisterInfo* output,
+ const BytecodeSourceInfo& source_info = BytecodeSourceInfo());
+
+ // Emits a Nop to preserve source position information in the
+ // bytecode pipeline.
+ void EmitNopForSourceInfo(const BytecodeSourceInfo& source_info) const;
+
+ // Handlers for bytecode nodes for register to register transfers.
+ void DoLdar(const BytecodeNode* const node);
+ void DoMov(const BytecodeNode* const node);
+ void DoStar(const BytecodeNode* const node);
+
+ // Operand processing methods for bytecodes other than those
+ // performing register to register transfers.
+ void PrepareOperands(BytecodeNode* const node);
+ void PrepareAccumulator(BytecodeNode* const node);
+ void PrepareRegisterOperands(BytecodeNode* const node);
+
+ void PrepareRegisterOutputOperand(RegisterInfo* reg_info);
+ void PrepareRegisterRangeOutputOperand(Register start, int count);
+ void PrepareRegisterInputOperand(BytecodeNode* const node, Register reg,
+ int operand_index);
+ void PrepareRegisterRangeInputOperand(Register start, int count);
+
+ Register GetEquivalentRegisterForInputOperand(Register reg);
+
+ static Register GetRegisterInputOperand(int index, Bytecode bytecode,
+ const uint32_t* operands,
+ int operand_count);
+ static Register GetRegisterOutputOperand(int index, Bytecode bytecode,
+ const uint32_t* operands,
+ int operand_count);
+
+ void CreateMaterializedEquivalent(RegisterInfo* info);
+ RegisterInfo* GetMaterializedEquivalent(RegisterInfo* info);
+ RegisterInfo* GetMaterializedEquivalentNotAccumulator(RegisterInfo* info);
+ void Materialize(RegisterInfo* info);
+ void AddToEquivalenceSet(RegisterInfo* set_member,
+ RegisterInfo* non_set_member);
+
+ // Methods for finding and creating metadata for each register.
+ RegisterInfo* GetOrCreateRegisterInfo(Register reg);
+ RegisterInfo* GetRegisterInfo(Register reg);
+ RegisterInfo* NewRegisterInfo(Register reg);
+ void GrowRegisterMap(Register reg);
+
+ bool RegisterIsTemporary(Register reg) const {
+ return reg >= temporary_base_;
+ }
+
+ bool RegisterIsObservable(Register reg) const {
+ return reg != accumulator_ && !RegisterIsTemporary(reg);
+ }
+
+ static Register OperandToRegister(uint32_t operand) {
+ return Register::FromOperand(static_cast<int32_t>(operand));
+ }
+
+ size_t GetRegisterInfoTableIndex(Register reg) const {
+ return static_cast<size_t>(reg.index() + register_info_table_offset_);
+ }
+
+ Register RegisterFromRegisterInfoTableIndex(size_t index) const {
+ return Register(static_cast<int>(index) - register_info_table_offset_);
+ }
+
+ uint32_t NextEquivalenceId() {
+ equivalence_id_++;
+ CHECK_NE(equivalence_id_, kInvalidEquivalenceId);
+ return equivalence_id_;
+ }
+
+ Zone* zone() { return zone_; }
+
+ const Register accumulator_;
+ RegisterInfo* accumulator_info_;
+ const Register temporary_base_;
+
+ // Direct mapping to register info.
+ ZoneVector<RegisterInfo*> register_info_table_;
+ int register_info_table_offset_;
+
+ // Counter for equivalence sets identifiers.
+ int equivalence_id_;
+
+ BytecodePipelineStage* next_stage_;
+ bool flush_required_;
+ Zone* zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeRegisterOptimizer);
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_REGISTER_OPTIMIZER_H_
diff --git a/deps/v8/src/interpreter/bytecode-register.cc b/deps/v8/src/interpreter/bytecode-register.cc
new file mode 100644
index 0000000000..31e3b90852
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-register.cc
@@ -0,0 +1,149 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-register.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+static const int kLastParamRegisterIndex =
+ (InterpreterFrameConstants::kRegisterFileFromFp -
+ InterpreterFrameConstants::kLastParamFromFp) /
+ kPointerSize;
+static const int kFunctionClosureRegisterIndex =
+ (InterpreterFrameConstants::kRegisterFileFromFp -
+ StandardFrameConstants::kFunctionOffset) /
+ kPointerSize;
+static const int kCurrentContextRegisterIndex =
+ (InterpreterFrameConstants::kRegisterFileFromFp -
+ StandardFrameConstants::kContextOffset) /
+ kPointerSize;
+static const int kNewTargetRegisterIndex =
+ (InterpreterFrameConstants::kRegisterFileFromFp -
+ InterpreterFrameConstants::kNewTargetFromFp) /
+ kPointerSize;
+static const int kBytecodeArrayRegisterIndex =
+ (InterpreterFrameConstants::kRegisterFileFromFp -
+ InterpreterFrameConstants::kBytecodeArrayFromFp) /
+ kPointerSize;
+static const int kBytecodeOffsetRegisterIndex =
+ (InterpreterFrameConstants::kRegisterFileFromFp -
+ InterpreterFrameConstants::kBytecodeOffsetFromFp) /
+ kPointerSize;
+static const int kCallerPCOffsetRegisterIndex =
+ (InterpreterFrameConstants::kRegisterFileFromFp -
+ InterpreterFrameConstants::kCallerPCOffsetFromFp) /
+ kPointerSize;
+
+Register Register::FromParameterIndex(int index, int parameter_count) {
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, parameter_count);
+ int register_index = kLastParamRegisterIndex - parameter_count + index + 1;
+ DCHECK_LT(register_index, 0);
+ return Register(register_index);
+}
+
+int Register::ToParameterIndex(int parameter_count) const {
+ DCHECK(is_parameter());
+ return index() - kLastParamRegisterIndex + parameter_count - 1;
+}
+
+Register Register::function_closure() {
+ return Register(kFunctionClosureRegisterIndex);
+}
+
+bool Register::is_function_closure() const {
+ return index() == kFunctionClosureRegisterIndex;
+}
+
+Register Register::current_context() {
+ return Register(kCurrentContextRegisterIndex);
+}
+
+bool Register::is_current_context() const {
+ return index() == kCurrentContextRegisterIndex;
+}
+
+Register Register::new_target() { return Register(kNewTargetRegisterIndex); }
+
+bool Register::is_new_target() const {
+ return index() == kNewTargetRegisterIndex;
+}
+
+Register Register::bytecode_array() {
+ return Register(kBytecodeArrayRegisterIndex);
+}
+
+bool Register::is_bytecode_array() const {
+ return index() == kBytecodeArrayRegisterIndex;
+}
+
+Register Register::bytecode_offset() {
+ return Register(kBytecodeOffsetRegisterIndex);
+}
+
+bool Register::is_bytecode_offset() const {
+ return index() == kBytecodeOffsetRegisterIndex;
+}
+
+// static
+Register Register::virtual_accumulator() {
+ return Register(kCallerPCOffsetRegisterIndex);
+}
+
+OperandSize Register::SizeOfOperand() const {
+ int32_t operand = ToOperand();
+ if (operand >= kMinInt8 && operand <= kMaxInt8) {
+ return OperandSize::kByte;
+ } else if (operand >= kMinInt16 && operand <= kMaxInt16) {
+ return OperandSize::kShort;
+ } else {
+ return OperandSize::kQuad;
+ }
+}
+
+bool Register::AreContiguous(Register reg1, Register reg2, Register reg3,
+ Register reg4, Register reg5) {
+ if (reg1.index() + 1 != reg2.index()) {
+ return false;
+ }
+ if (reg3.is_valid() && reg2.index() + 1 != reg3.index()) {
+ return false;
+ }
+ if (reg4.is_valid() && reg3.index() + 1 != reg4.index()) {
+ return false;
+ }
+ if (reg5.is_valid() && reg4.index() + 1 != reg5.index()) {
+ return false;
+ }
+ return true;
+}
+
+std::string Register::ToString(int parameter_count) {
+ if (is_current_context()) {
+ return std::string("<context>");
+ } else if (is_function_closure()) {
+ return std::string("<closure>");
+ } else if (is_new_target()) {
+ return std::string("<new.target>");
+ } else if (is_parameter()) {
+ int parameter_index = ToParameterIndex(parameter_count);
+ if (parameter_index == 0) {
+ return std::string("<this>");
+ } else {
+ std::ostringstream s;
+ s << "a" << parameter_index - 1;
+ return s.str();
+ }
+ } else {
+ std::ostringstream s;
+ s << "r" << index();
+ return s.str();
+ }
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-register.h b/deps/v8/src/interpreter/bytecode-register.h
new file mode 100644
index 0000000000..b698da6a74
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-register.h
@@ -0,0 +1,105 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_REGISTER_H_
+#define V8_INTERPRETER_BYTECODE_REGISTER_H_
+
+#include "src/interpreter/bytecodes.h"
+
+#include "src/frames.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+// An interpreter Register which is located in the function's Register file
+// in its stack-frame. Register hold parameters, this, and expression values.
+class Register final {
+ public:
+ explicit Register(int index = kInvalidIndex) : index_(index) {}
+
+ int index() const { return index_; }
+ bool is_parameter() const { return index() < 0; }
+ bool is_valid() const { return index_ != kInvalidIndex; }
+
+ static Register FromParameterIndex(int index, int parameter_count);
+ int ToParameterIndex(int parameter_count) const;
+
+ // Returns an invalid register.
+ static Register invalid_value() { return Register(); }
+
+ // Returns the register for the function's closure object.
+ static Register function_closure();
+ bool is_function_closure() const;
+
+ // Returns the register which holds the current context object.
+ static Register current_context();
+ bool is_current_context() const;
+
+ // Returns the register for the incoming new target value.
+ static Register new_target();
+ bool is_new_target() const;
+
+ // Returns the register for the bytecode array.
+ static Register bytecode_array();
+ bool is_bytecode_array() const;
+
+ // Returns the register for the saved bytecode offset.
+ static Register bytecode_offset();
+ bool is_bytecode_offset() const;
+
+ // Returns a register that can be used to represent the accumulator
+ // within code in the interpreter, but should never be emitted in
+ // bytecode.
+ static Register virtual_accumulator();
+
+ OperandSize SizeOfOperand() const;
+
+ int32_t ToOperand() const { return kRegisterFileStartOffset - index_; }
+ static Register FromOperand(int32_t operand) {
+ return Register(kRegisterFileStartOffset - operand);
+ }
+
+ static bool AreContiguous(Register reg1, Register reg2,
+ Register reg3 = Register(),
+ Register reg4 = Register(),
+ Register reg5 = Register());
+
+ std::string ToString(int parameter_count);
+
+ bool operator==(const Register& other) const {
+ return index() == other.index();
+ }
+ bool operator!=(const Register& other) const {
+ return index() != other.index();
+ }
+ bool operator<(const Register& other) const {
+ return index() < other.index();
+ }
+ bool operator<=(const Register& other) const {
+ return index() <= other.index();
+ }
+ bool operator>(const Register& other) const {
+ return index() > other.index();
+ }
+ bool operator>=(const Register& other) const {
+ return index() >= other.index();
+ }
+
+ private:
+ static const int kInvalidIndex = kMaxInt;
+ static const int kRegisterFileStartOffset =
+ InterpreterFrameConstants::kRegisterFileFromFp / kPointerSize;
+
+ void* operator new(size_t size) = delete;
+ void operator delete(void* p) = delete;
+
+ int index_;
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_REGISTER_H_
diff --git a/deps/v8/src/interpreter/bytecode-traits.h b/deps/v8/src/interpreter/bytecode-traits.h
index c724827356..672a687faf 100644
--- a/deps/v8/src/interpreter/bytecode-traits.h
+++ b/deps/v8/src/interpreter/bytecode-traits.h
@@ -30,17 +30,41 @@ OPERAND_TYPE_INFO_LIST(DECLARE_OPERAND_TYPE_INFO)
template <OperandType>
struct OperandTraits {
- typedef OperandTypeInfoTraits<OperandTypeInfo::kNone> TypeInfo;
+ typedef OperandTypeInfoTraits<OperandTypeInfo::kNone> TypeInfoTraits;
+ static const OperandTypeInfo kOperandTypeInfo = OperandTypeInfo::kNone;
};
-#define DECLARE_OPERAND_TYPE_TRAITS(Name, InfoType) \
- template <> \
- struct OperandTraits<OperandType::k##Name> { \
- typedef OperandTypeInfoTraits<InfoType> TypeInfo; \
+#define DECLARE_OPERAND_TYPE_TRAITS(Name, InfoType) \
+ template <> \
+ struct OperandTraits<OperandType::k##Name> { \
+ typedef OperandTypeInfoTraits<InfoType> TypeInfoTraits; \
+ static const OperandTypeInfo kOperandTypeInfo = InfoType; \
};
OPERAND_TYPE_LIST(DECLARE_OPERAND_TYPE_TRAITS)
#undef DECLARE_OPERAND_TYPE_TRAITS
+template <OperandType operand_type, OperandScale operand_scale>
+struct OperandScaler {
+ template <bool, OperandSize, OperandScale>
+ struct Helper {
+ static const int kSize = 0;
+ };
+ template <OperandSize size, OperandScale scale>
+ struct Helper<false, size, scale> {
+ static const int kSize = static_cast<int>(size);
+ };
+ template <OperandSize size, OperandScale scale>
+ struct Helper<true, size, scale> {
+ static const int kSize = static_cast<int>(size) * static_cast<int>(scale);
+ };
+
+ static const int kSize =
+ Helper<OperandTraits<operand_type>::TypeInfoTraits::kIsScalable,
+ OperandTraits<operand_type>::TypeInfoTraits::kUnscaledSize,
+ operand_scale>::kSize;
+ static const OperandSize kOperandSize = static_cast<OperandSize>(kSize);
+};
+
template <OperandType>
struct RegisterOperandTraits {
static const int kIsRegisterOperand = 0;
@@ -61,11 +85,19 @@ template <AccumulatorUse accumulator_use, OperandType operand_0,
OperandType operand_1, OperandType operand_2, OperandType operand_3>
struct BytecodeTraits<accumulator_use, operand_0, operand_1, operand_2,
operand_3> {
- static OperandType GetOperandType(int i) {
- DCHECK(0 <= i && i < kOperandCount);
- const OperandType kOperands[] = {operand_0, operand_1, operand_2,
- operand_3};
- return kOperands[i];
+ static const OperandType* GetOperandTypes() {
+ static const OperandType operand_types[] = {operand_0, operand_1, operand_2,
+ operand_3, OperandType::kNone};
+ return operand_types;
+ }
+
+ static const OperandTypeInfo* GetOperandTypeInfos() {
+ static const OperandTypeInfo operand_type_infos[] = {
+ OperandTraits<operand_0>::kOperandTypeInfo,
+ OperandTraits<operand_1>::kOperandTypeInfo,
+ OperandTraits<operand_2>::kOperandTypeInfo,
+ OperandTraits<operand_3>::kOperandTypeInfo, OperandTypeInfo::kNone};
+ return operand_type_infos;
}
template <OperandType ot>
@@ -75,10 +107,10 @@ struct BytecodeTraits<accumulator_use, operand_0, operand_1, operand_2,
}
static inline bool IsScalable() {
- return (OperandTraits<operand_0>::TypeInfo::kIsScalable |
- OperandTraits<operand_1>::TypeInfo::kIsScalable |
- OperandTraits<operand_2>::TypeInfo::kIsScalable |
- OperandTraits<operand_3>::TypeInfo::kIsScalable);
+ return (OperandTraits<operand_0>::TypeInfoTraits::kIsScalable |
+ OperandTraits<operand_1>::TypeInfoTraits::kIsScalable |
+ OperandTraits<operand_2>::TypeInfoTraits::kIsScalable |
+ OperandTraits<operand_3>::TypeInfoTraits::kIsScalable);
}
static const AccumulatorUse kAccumulatorUse = accumulator_use;
@@ -88,20 +120,23 @@ struct BytecodeTraits<accumulator_use, operand_0, operand_1, operand_2,
RegisterOperandTraits<operand_1>::kIsRegisterOperand +
RegisterOperandTraits<operand_2>::kIsRegisterOperand +
RegisterOperandTraits<operand_3>::kIsRegisterOperand;
- static const int kRegisterOperandBitmap =
- RegisterOperandTraits<operand_0>::kIsRegisterOperand +
- (RegisterOperandTraits<operand_1>::kIsRegisterOperand << 1) +
- (RegisterOperandTraits<operand_2>::kIsRegisterOperand << 2) +
- (RegisterOperandTraits<operand_3>::kIsRegisterOperand << 3);
};
template <AccumulatorUse accumulator_use, OperandType operand_0,
OperandType operand_1, OperandType operand_2>
struct BytecodeTraits<accumulator_use, operand_0, operand_1, operand_2> {
- static inline OperandType GetOperandType(int i) {
- DCHECK(0 <= i && i <= 2);
- const OperandType kOperands[] = {operand_0, operand_1, operand_2};
- return kOperands[i];
+ static const OperandType* GetOperandTypes() {
+ static const OperandType operand_types[] = {operand_0, operand_1, operand_2,
+ OperandType::kNone};
+ return operand_types;
+ }
+
+ static const OperandTypeInfo* GetOperandTypeInfos() {
+ static const OperandTypeInfo operand_type_infos[] = {
+ OperandTraits<operand_0>::kOperandTypeInfo,
+ OperandTraits<operand_1>::kOperandTypeInfo,
+ OperandTraits<operand_2>::kOperandTypeInfo, OperandTypeInfo::kNone};
+ return operand_type_infos;
}
template <OperandType ot>
@@ -110,9 +145,9 @@ struct BytecodeTraits<accumulator_use, operand_0, operand_1, operand_2> {
}
static inline bool IsScalable() {
- return (OperandTraits<operand_0>::TypeInfo::kIsScalable |
- OperandTraits<operand_1>::TypeInfo::kIsScalable |
- OperandTraits<operand_2>::TypeInfo::kIsScalable);
+ return (OperandTraits<operand_0>::TypeInfoTraits::kIsScalable |
+ OperandTraits<operand_1>::TypeInfoTraits::kIsScalable |
+ OperandTraits<operand_2>::TypeInfoTraits::kIsScalable);
}
static const AccumulatorUse kAccumulatorUse = accumulator_use;
@@ -121,19 +156,22 @@ struct BytecodeTraits<accumulator_use, operand_0, operand_1, operand_2> {
RegisterOperandTraits<operand_0>::kIsRegisterOperand +
RegisterOperandTraits<operand_1>::kIsRegisterOperand +
RegisterOperandTraits<operand_2>::kIsRegisterOperand;
- static const int kRegisterOperandBitmap =
- RegisterOperandTraits<operand_0>::kIsRegisterOperand +
- (RegisterOperandTraits<operand_1>::kIsRegisterOperand << 1) +
- (RegisterOperandTraits<operand_2>::kIsRegisterOperand << 2);
};
template <AccumulatorUse accumulator_use, OperandType operand_0,
OperandType operand_1>
struct BytecodeTraits<accumulator_use, operand_0, operand_1> {
- static inline OperandType GetOperandType(int i) {
- DCHECK(0 <= i && i < kOperandCount);
- const OperandType kOperands[] = {operand_0, operand_1};
- return kOperands[i];
+ static const OperandType* GetOperandTypes() {
+ static const OperandType operand_types[] = {operand_0, operand_1,
+ OperandType::kNone};
+ return operand_types;
+ }
+
+ static const OperandTypeInfo* GetOperandTypeInfos() {
+ static const OperandTypeInfo operand_type_infos[] = {
+ OperandTraits<operand_0>::kOperandTypeInfo,
+ OperandTraits<operand_1>::kOperandTypeInfo, OperandTypeInfo::kNone};
+ return operand_type_infos;
}
template <OperandType ot>
@@ -142,8 +180,8 @@ struct BytecodeTraits<accumulator_use, operand_0, operand_1> {
}
static inline bool IsScalable() {
- return (OperandTraits<operand_0>::TypeInfo::kIsScalable |
- OperandTraits<operand_1>::TypeInfo::kIsScalable);
+ return (OperandTraits<operand_0>::TypeInfoTraits::kIsScalable |
+ OperandTraits<operand_1>::TypeInfoTraits::kIsScalable);
}
static const AccumulatorUse kAccumulatorUse = accumulator_use;
@@ -151,16 +189,19 @@ struct BytecodeTraits<accumulator_use, operand_0, operand_1> {
static const int kRegisterOperandCount =
RegisterOperandTraits<operand_0>::kIsRegisterOperand +
RegisterOperandTraits<operand_1>::kIsRegisterOperand;
- static const int kRegisterOperandBitmap =
- RegisterOperandTraits<operand_0>::kIsRegisterOperand +
- (RegisterOperandTraits<operand_1>::kIsRegisterOperand << 1);
};
template <AccumulatorUse accumulator_use, OperandType operand_0>
struct BytecodeTraits<accumulator_use, operand_0> {
- static inline OperandType GetOperandType(int i) {
- DCHECK(i == 0);
- return operand_0;
+ static const OperandType* GetOperandTypes() {
+ static const OperandType operand_types[] = {operand_0, OperandType::kNone};
+ return operand_types;
+ }
+
+ static const OperandTypeInfo* GetOperandTypeInfos() {
+ static const OperandTypeInfo operand_type_infos[] = {
+ OperandTraits<operand_0>::kOperandTypeInfo, OperandTypeInfo::kNone};
+ return operand_type_infos;
}
template <OperandType ot>
@@ -169,22 +210,26 @@ struct BytecodeTraits<accumulator_use, operand_0> {
}
static inline bool IsScalable() {
- return OperandTraits<operand_0>::TypeInfo::kIsScalable;
+ return OperandTraits<operand_0>::TypeInfoTraits::kIsScalable;
}
static const AccumulatorUse kAccumulatorUse = accumulator_use;
static const int kOperandCount = 1;
static const int kRegisterOperandCount =
RegisterOperandTraits<operand_0>::kIsRegisterOperand;
- static const int kRegisterOperandBitmap =
- RegisterOperandTraits<operand_0>::kIsRegisterOperand;
};
template <AccumulatorUse accumulator_use>
struct BytecodeTraits<accumulator_use> {
- static inline OperandType GetOperandType(int i) {
- UNREACHABLE();
- return OperandType::kNone;
+ static const OperandType* GetOperandTypes() {
+ static const OperandType operand_types[] = {OperandType::kNone};
+ return operand_types;
+ }
+
+ static const OperandTypeInfo* GetOperandTypeInfos() {
+ static const OperandTypeInfo operand_type_infos[] = {
+ OperandTypeInfo::kNone};
+ return operand_type_infos;
}
template <OperandType ot>
@@ -197,40 +242,24 @@ struct BytecodeTraits<accumulator_use> {
static const AccumulatorUse kAccumulatorUse = accumulator_use;
static const int kOperandCount = 0;
static const int kRegisterOperandCount = 0;
- static const int kRegisterOperandBitmap = 0;
-};
-
-template <bool>
-struct OperandScaler {
- static int Multiply(int size, int operand_scale) { return 0; }
-};
-
-template <>
-struct OperandScaler<false> {
- static int Multiply(int size, int operand_scale) { return size; }
-};
-
-template <>
-struct OperandScaler<true> {
- static int Multiply(int size, int operand_scale) {
- return size * operand_scale;
- }
};
static OperandSize ScaledOperandSize(OperandType operand_type,
OperandScale operand_scale) {
+ STATIC_ASSERT(static_cast<int>(OperandScale::kQuadruple) == 4 &&
+ OperandScale::kLast == OperandScale::kQuadruple);
+ int index = static_cast<int>(operand_scale) >> 1;
switch (operand_type) {
-#define CASE(Name, TypeInfo) \
- case OperandType::k##Name: { \
- OperandSize base_size = OperandTypeInfoTraits<TypeInfo>::kUnscaledSize; \
- int size = \
- OperandScaler<OperandTypeInfoTraits<TypeInfo>::kIsScalable>::Multiply( \
- static_cast<int>(base_size), static_cast<int>(operand_scale)); \
- OperandSize operand_size = static_cast<OperandSize>(size); \
- DCHECK(operand_size == OperandSize::kByte || \
- operand_size == OperandSize::kShort || \
- operand_size == OperandSize::kQuad); \
- return operand_size; \
+#define CASE(Name, TypeInfo) \
+ case OperandType::k##Name: { \
+ static const OperandSize kOperandSizes[] = { \
+ OperandScaler<OperandType::k##Name, \
+ OperandScale::kSingle>::kOperandSize, \
+ OperandScaler<OperandType::k##Name, \
+ OperandScale::kDouble>::kOperandSize, \
+ OperandScaler<OperandType::k##Name, \
+ OperandScale::kQuadruple>::kOperandSize}; \
+ return kOperandSizes[index]; \
}
OPERAND_TYPE_LIST(CASE)
#undef CASE
diff --git a/deps/v8/src/interpreter/bytecodes.cc b/deps/v8/src/interpreter/bytecodes.cc
index fd27f391aa..09bcd22b92 100644
--- a/deps/v8/src/interpreter/bytecodes.cc
+++ b/deps/v8/src/interpreter/bytecodes.cc
@@ -6,14 +6,15 @@
#include <iomanip>
-#include "src/frames.h"
+#include "src/base/bits.h"
+#include "src/globals.h"
#include "src/interpreter/bytecode-traits.h"
-#include "src/interpreter/interpreter.h"
namespace v8 {
namespace internal {
namespace interpreter {
+STATIC_CONST_MEMBER_DEFINITION const int Bytecodes::kMaxOperands;
// static
const char* Bytecodes::ToString(Bytecode bytecode) {
@@ -74,15 +75,13 @@ const char* Bytecodes::OperandTypeToString(OperandType operand_type) {
// static
const char* Bytecodes::OperandScaleToString(OperandScale operand_scale) {
switch (operand_scale) {
- case OperandScale::kSingle:
- return "Single";
- case OperandScale::kDouble:
- return "Double";
- case OperandScale::kQuadruple:
- return "Quadruple";
- case OperandScale::kInvalid:
- UNREACHABLE();
+#define CASE(Name, _) \
+ case OperandScale::k##Name: \
+ return #Name;
+ OPERAND_SCALE_LIST(CASE)
+#undef CASE
}
+ UNREACHABLE();
return "";
}
@@ -102,14 +101,12 @@ const char* Bytecodes::OperandSizeToString(OperandSize operand_size) {
return "";
}
-
// static
uint8_t Bytecodes::ToByte(Bytecode bytecode) {
- DCHECK(bytecode <= Bytecode::kLast);
+ DCHECK_LE(bytecode, Bytecode::kLast);
return static_cast<uint8_t>(bytecode);
}
-
// static
Bytecode Bytecodes::FromByte(uint8_t value) {
Bytecode bytecode = static_cast<Bytecode>(value);
@@ -117,7 +114,6 @@ Bytecode Bytecodes::FromByte(uint8_t value) {
return bytecode;
}
-
// static
Bytecode Bytecodes::GetDebugBreak(Bytecode bytecode) {
DCHECK(!IsDebugBreak(bytecode));
@@ -150,6 +146,10 @@ int Bytecodes::Size(Bytecode bytecode, OperandScale operand_scale) {
return size;
}
+// static
+size_t Bytecodes::ReturnCount(Bytecode bytecode) {
+ return bytecode == Bytecode::kReturn ? 1 : 0;
+}
// static
int Bytecodes::NumberOfOperands(Bytecode bytecode) {
@@ -165,7 +165,6 @@ int Bytecodes::NumberOfOperands(Bytecode bytecode) {
return 0;
}
-
// static
int Bytecodes::NumberOfRegisterOperands(Bytecode bytecode) {
DCHECK(bytecode <= Bytecode::kLast);
@@ -241,44 +240,122 @@ bool Bytecodes::WritesAccumulator(Bytecode bytecode) {
}
// static
+bool Bytecodes::WritesBooleanToAccumulator(Bytecode bytecode) {
+ switch (bytecode) {
+ case Bytecode::kLdaTrue:
+ case Bytecode::kLdaFalse:
+ case Bytecode::kToBooleanLogicalNot:
+ case Bytecode::kLogicalNot:
+ case Bytecode::kTestEqual:
+ case Bytecode::kTestNotEqual:
+ case Bytecode::kTestEqualStrict:
+ case Bytecode::kTestLessThan:
+ case Bytecode::kTestLessThanOrEqual:
+ case Bytecode::kTestGreaterThan:
+ case Bytecode::kTestGreaterThanOrEqual:
+ case Bytecode::kTestInstanceOf:
+ case Bytecode::kTestIn:
+ case Bytecode::kForInDone:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// static
+bool Bytecodes::IsAccumulatorLoadWithoutEffects(Bytecode bytecode) {
+ switch (bytecode) {
+ case Bytecode::kLdaZero:
+ case Bytecode::kLdaSmi:
+ case Bytecode::kLdaUndefined:
+ case Bytecode::kLdaNull:
+ case Bytecode::kLdaTheHole:
+ case Bytecode::kLdaTrue:
+ case Bytecode::kLdaFalse:
+ case Bytecode::kLdaConstant:
+ case Bytecode::kLdar:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// static
+bool Bytecodes::IsJumpWithoutEffects(Bytecode bytecode) {
+ return IsJump(bytecode) && !IsJumpIfToBoolean(bytecode);
+}
+
+// static
+bool Bytecodes::IsRegisterLoadWithoutEffects(Bytecode bytecode) {
+ switch (bytecode) {
+ case Bytecode::kMov:
+ case Bytecode::kPopContext:
+ case Bytecode::kPushContext:
+ case Bytecode::kStar:
+ case Bytecode::kLdrUndefined:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// static
+bool Bytecodes::IsWithoutExternalSideEffects(Bytecode bytecode) {
+ // These bytecodes only manipulate interpreter frame state and will
+ // never throw.
+ return (IsAccumulatorLoadWithoutEffects(bytecode) ||
+ IsRegisterLoadWithoutEffects(bytecode) ||
+ bytecode == Bytecode::kNop || IsJumpWithoutEffects(bytecode));
+}
+
+// static
OperandType Bytecodes::GetOperandType(Bytecode bytecode, int i) {
+ DCHECK_LE(bytecode, Bytecode::kLast);
+ DCHECK_LT(i, NumberOfOperands(bytecode));
+ DCHECK_GE(i, 0);
+ return GetOperandTypes(bytecode)[i];
+}
+
+// static
+const OperandType* Bytecodes::GetOperandTypes(Bytecode bytecode) {
DCHECK(bytecode <= Bytecode::kLast);
switch (bytecode) {
#define CASE(Name, ...) \
case Bytecode::k##Name: \
- return BytecodeTraits<__VA_ARGS__>::GetOperandType(i);
+ return BytecodeTraits<__VA_ARGS__>::GetOperandTypes();
BYTECODE_LIST(CASE)
#undef CASE
}
UNREACHABLE();
- return OperandType::kNone;
-}
-
-// static
-OperandSize Bytecodes::GetOperandSize(Bytecode bytecode, int i,
- OperandScale operand_scale) {
- OperandType op_type = GetOperandType(bytecode, i);
- return ScaledOperandSize(op_type, operand_scale);
+ return nullptr;
}
// static
-int Bytecodes::GetRegisterOperandBitmap(Bytecode bytecode) {
+const OperandTypeInfo* Bytecodes::GetOperandTypeInfos(Bytecode bytecode) {
DCHECK(bytecode <= Bytecode::kLast);
switch (bytecode) {
-#define CASE(Name, ...) \
- case Bytecode::k##Name: \
- typedef BytecodeTraits<__VA_ARGS__> Name##Trait; \
- return Name##Trait::kRegisterOperandBitmap;
+#define CASE(Name, ...) \
+ case Bytecode::k##Name: \
+ return BytecodeTraits<__VA_ARGS__>::GetOperandTypeInfos();
BYTECODE_LIST(CASE)
#undef CASE
}
UNREACHABLE();
- return false;
+ return nullptr;
+}
+
+// static
+OperandSize Bytecodes::GetOperandSize(Bytecode bytecode, int i,
+ OperandScale operand_scale) {
+ DCHECK_LT(i, NumberOfOperands(bytecode));
+ OperandType operand_type = GetOperandType(bytecode, i);
+ return SizeOfOperand(operand_type, operand_scale);
}
// static
int Bytecodes::GetOperandOffset(Bytecode bytecode, int i,
OperandScale operand_scale) {
+ DCHECK_LT(i, Bytecodes::NumberOfOperands(bytecode));
// TODO(oth): restore this to a statically determined constant.
int offset = 1;
for (int operand_index = 0; operand_index < i; ++operand_index) {
@@ -307,7 +384,6 @@ bool Bytecodes::IsConditionalJumpImmediate(Bytecode bytecode) {
bytecode == Bytecode::kJumpIfUndefined;
}
-
// static
bool Bytecodes::IsConditionalJumpConstant(Bytecode bytecode) {
return bytecode == Bytecode::kJumpIfTrueConstant ||
@@ -343,6 +419,31 @@ bool Bytecodes::IsJump(Bytecode bytecode) {
return IsJumpImmediate(bytecode) || IsJumpConstant(bytecode);
}
+// static
+bool Bytecodes::IsJumpIfToBoolean(Bytecode bytecode) {
+ return bytecode == Bytecode::kJumpIfToBooleanTrue ||
+ bytecode == Bytecode::kJumpIfToBooleanFalse ||
+ bytecode == Bytecode::kJumpIfToBooleanTrueConstant ||
+ bytecode == Bytecode::kJumpIfToBooleanFalseConstant;
+}
+
+// static
+Bytecode Bytecodes::GetJumpWithoutToBoolean(Bytecode bytecode) {
+ switch (bytecode) {
+ case Bytecode::kJumpIfToBooleanTrue:
+ return Bytecode::kJumpIfTrue;
+ case Bytecode::kJumpIfToBooleanFalse:
+ return Bytecode::kJumpIfFalse;
+ case Bytecode::kJumpIfToBooleanTrueConstant:
+ return Bytecode::kJumpIfTrueConstant;
+ case Bytecode::kJumpIfToBooleanFalseConstant:
+ return Bytecode::kJumpIfFalseConstant;
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return Bytecode::kIllegal;
+}
// static
bool Bytecodes::IsCallOrNew(Bytecode bytecode) {
@@ -371,6 +472,11 @@ bool Bytecodes::IsDebugBreak(Bytecode bytecode) {
}
// static
+bool Bytecodes::IsLdarOrStar(Bytecode bytecode) {
+ return bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar;
+}
+
+// static
bool Bytecodes::IsBytecodeWithScalableOperands(Bytecode bytecode) {
switch (bytecode) {
#define CASE(Name, ...) \
@@ -398,6 +504,11 @@ bool Bytecodes::IsPrefixScalingBytecode(Bytecode bytecode) {
}
// static
+bool Bytecodes::PutsNameInAccumulator(Bytecode bytecode) {
+ return bytecode == Bytecode::kTypeOf;
+}
+
+// static
bool Bytecodes::IsJumpOrReturn(Bytecode bytecode) {
return bytecode == Bytecode::kReturn || IsJump(bytecode);
}
@@ -461,160 +572,83 @@ bool Bytecodes::IsRegisterOutputOperandType(OperandType operand_type) {
}
// static
-bool Bytecodes::IsUnsignedOperandType(OperandType operand_type) {
- switch (operand_type) {
-#define CASE(Name, _) \
- case OperandType::k##Name: \
- return OperandTraits<OperandType::k##Name>::TypeInfo::kIsUnsigned;
- OPERAND_TYPE_LIST(CASE)
-#undef CASE
+bool Bytecodes::IsStarLookahead(Bytecode bytecode, OperandScale operand_scale) {
+ if (operand_scale == OperandScale::kSingle) {
+ switch (bytecode) {
+ case Bytecode::kLdaZero:
+ case Bytecode::kLdaSmi:
+ case Bytecode::kLdaNull:
+ case Bytecode::kLdaTheHole:
+ case Bytecode::kLdaConstant:
+ case Bytecode::kAdd:
+ case Bytecode::kSub:
+ case Bytecode::kMul:
+ case Bytecode::kAddSmi:
+ case Bytecode::kSubSmi:
+ case Bytecode::kInc:
+ case Bytecode::kDec:
+ case Bytecode::kTypeOf:
+ case Bytecode::kCall:
+ case Bytecode::kNew:
+ return true;
+ default:
+ return false;
+ }
}
- UNREACHABLE();
return false;
}
// static
-OperandScale Bytecodes::NextOperandScale(OperandScale operand_scale) {
- DCHECK(operand_scale >= OperandScale::kSingle &&
- operand_scale <= OperandScale::kMaxValid);
- return static_cast<OperandScale>(2 * static_cast<int>(operand_scale));
-}
-
-// static
-Register Bytecodes::DecodeRegisterOperand(const uint8_t* operand_start,
- OperandType operand_type,
- OperandScale operand_scale) {
- DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
- int32_t operand =
- DecodeSignedOperand(operand_start, operand_type, operand_scale);
- return Register::FromOperand(operand);
+int Bytecodes::GetNumberOfRegistersRepresentedBy(OperandType operand_type) {
+ switch (operand_type) {
+ case OperandType::kMaybeReg:
+ case OperandType::kReg:
+ case OperandType::kRegOut:
+ return 1;
+ case OperandType::kRegPair:
+ case OperandType::kRegOutPair:
+ return 2;
+ case OperandType::kRegOutTriple:
+ return 3;
+ default:
+ return 0;
+ }
+ return 0;
}
// static
-int32_t Bytecodes::DecodeSignedOperand(const uint8_t* operand_start,
- OperandType operand_type,
- OperandScale operand_scale) {
- DCHECK(!Bytecodes::IsUnsignedOperandType(operand_type));
- switch (Bytecodes::SizeOfOperand(operand_type, operand_scale)) {
- case OperandSize::kByte:
- return static_cast<int8_t>(*operand_start);
- case OperandSize::kShort:
- return static_cast<int16_t>(ReadUnalignedUInt16(operand_start));
- case OperandSize::kQuad:
- return static_cast<int32_t>(ReadUnalignedUInt32(operand_start));
- case OperandSize::kNone:
- UNREACHABLE();
+bool Bytecodes::IsUnsignedOperandType(OperandType operand_type) {
+ switch (operand_type) {
+#define CASE(Name, _) \
+ case OperandType::k##Name: \
+ return OperandTraits<OperandType::k##Name>::TypeInfoTraits::kIsUnsigned;
+ OPERAND_TYPE_LIST(CASE)
+#undef CASE
}
- return 0;
+ UNREACHABLE();
+ return false;
}
// static
-uint32_t Bytecodes::DecodeUnsignedOperand(const uint8_t* operand_start,
- OperandType operand_type,
- OperandScale operand_scale) {
- DCHECK(Bytecodes::IsUnsignedOperandType(operand_type));
- switch (Bytecodes::SizeOfOperand(operand_type, operand_scale)) {
- case OperandSize::kByte:
- return *operand_start;
- case OperandSize::kShort:
- return ReadUnalignedUInt16(operand_start);
- case OperandSize::kQuad:
- return ReadUnalignedUInt32(operand_start);
- case OperandSize::kNone:
- UNREACHABLE();
+OperandSize Bytecodes::SizeForSignedOperand(int value) {
+ if (value >= kMinInt8 && value <= kMaxInt8) {
+ return OperandSize::kByte;
+ } else if (value >= kMinInt16 && value <= kMaxInt16) {
+ return OperandSize::kShort;
+ } else {
+ return OperandSize::kQuad;
}
- return 0;
}
// static
-std::ostream& Bytecodes::Decode(std::ostream& os, const uint8_t* bytecode_start,
- int parameter_count) {
- Bytecode bytecode = Bytecodes::FromByte(bytecode_start[0]);
- int prefix_offset = 0;
- OperandScale operand_scale = OperandScale::kSingle;
- if (IsPrefixScalingBytecode(bytecode)) {
- prefix_offset = 1;
- operand_scale = Bytecodes::PrefixBytecodeToOperandScale(bytecode);
- bytecode = Bytecodes::FromByte(bytecode_start[1]);
- }
-
- // Prepare to print bytecode and operands as hex digits.
- std::ios saved_format(nullptr);
- saved_format.copyfmt(saved_format);
- os.fill('0');
- os.flags(std::ios::hex);
-
- int bytecode_size = Bytecodes::Size(bytecode, operand_scale);
- for (int i = 0; i < prefix_offset + bytecode_size; i++) {
- os << std::setw(2) << static_cast<uint32_t>(bytecode_start[i]) << ' ';
- }
- os.copyfmt(saved_format);
-
- const int kBytecodeColumnSize = 6;
- for (int i = prefix_offset + bytecode_size; i < kBytecodeColumnSize; i++) {
- os << " ";
- }
-
- os << Bytecodes::ToString(bytecode, operand_scale) << " ";
-
- // Operands for the debug break are from the original instruction.
- if (IsDebugBreak(bytecode)) return os;
-
- int number_of_operands = NumberOfOperands(bytecode);
- int range = 0;
- for (int i = 0; i < number_of_operands; i++) {
- OperandType op_type = GetOperandType(bytecode, i);
- const uint8_t* operand_start =
- &bytecode_start[prefix_offset +
- GetOperandOffset(bytecode, i, operand_scale)];
- switch (op_type) {
- case interpreter::OperandType::kRegCount:
- os << "#"
- << DecodeUnsignedOperand(operand_start, op_type, operand_scale);
- break;
- case interpreter::OperandType::kIdx:
- case interpreter::OperandType::kRuntimeId:
- os << "["
- << DecodeUnsignedOperand(operand_start, op_type, operand_scale)
- << "]";
- break;
- case interpreter::OperandType::kImm:
- os << "[" << DecodeSignedOperand(operand_start, op_type, operand_scale)
- << "]";
- break;
- case interpreter::OperandType::kFlag8:
- os << "#"
- << DecodeUnsignedOperand(operand_start, op_type, operand_scale);
- break;
- case interpreter::OperandType::kMaybeReg:
- case interpreter::OperandType::kReg:
- case interpreter::OperandType::kRegOut: {
- Register reg =
- DecodeRegisterOperand(operand_start, op_type, operand_scale);
- os << reg.ToString(parameter_count);
- break;
- }
- case interpreter::OperandType::kRegOutTriple:
- range += 1;
- case interpreter::OperandType::kRegOutPair:
- case interpreter::OperandType::kRegPair: {
- range += 1;
- Register first_reg =
- DecodeRegisterOperand(operand_start, op_type, operand_scale);
- Register last_reg = Register(first_reg.index() + range);
- os << first_reg.ToString(parameter_count) << "-"
- << last_reg.ToString(parameter_count);
- break;
- }
- case interpreter::OperandType::kNone:
- UNREACHABLE();
- break;
- }
- if (i != number_of_operands - 1) {
- os << ", ";
- }
+OperandSize Bytecodes::SizeForUnsignedOperand(uint32_t value) {
+ if (value <= kMaxUInt8) {
+ return OperandSize::kByte;
+ } else if (value <= kMaxUInt16) {
+ return OperandSize::kShort;
+ } else {
+ return OperandSize::kQuad;
}
- return os;
}
// static
@@ -644,105 +678,6 @@ std::ostream& operator<<(std::ostream& os, const OperandType& operand_type) {
return os << Bytecodes::OperandTypeToString(operand_type);
}
-static const int kLastParamRegisterIndex =
- -InterpreterFrameConstants::kLastParamFromRegisterPointer / kPointerSize;
-static const int kFunctionClosureRegisterIndex =
- -InterpreterFrameConstants::kFunctionFromRegisterPointer / kPointerSize;
-static const int kCurrentContextRegisterIndex =
- -InterpreterFrameConstants::kContextFromRegisterPointer / kPointerSize;
-static const int kNewTargetRegisterIndex =
- -InterpreterFrameConstants::kNewTargetFromRegisterPointer / kPointerSize;
-
-bool Register::is_byte_operand() const {
- return index_ >= -kMaxInt8 && index_ <= -kMinInt8;
-}
-
-bool Register::is_short_operand() const {
- return index_ >= -kMaxInt16 && index_ <= -kMinInt16;
-}
-
-Register Register::FromParameterIndex(int index, int parameter_count) {
- DCHECK_GE(index, 0);
- DCHECK_LT(index, parameter_count);
- int register_index = kLastParamRegisterIndex - parameter_count + index + 1;
- DCHECK_LT(register_index, 0);
- return Register(register_index);
-}
-
-
-int Register::ToParameterIndex(int parameter_count) const {
- DCHECK(is_parameter());
- return index() - kLastParamRegisterIndex + parameter_count - 1;
-}
-
-
-Register Register::function_closure() {
- return Register(kFunctionClosureRegisterIndex);
-}
-
-
-bool Register::is_function_closure() const {
- return index() == kFunctionClosureRegisterIndex;
-}
-
-
-Register Register::current_context() {
- return Register(kCurrentContextRegisterIndex);
-}
-
-
-bool Register::is_current_context() const {
- return index() == kCurrentContextRegisterIndex;
-}
-
-
-Register Register::new_target() { return Register(kNewTargetRegisterIndex); }
-
-
-bool Register::is_new_target() const {
- return index() == kNewTargetRegisterIndex;
-}
-
-bool Register::AreContiguous(Register reg1, Register reg2, Register reg3,
- Register reg4, Register reg5) {
- if (reg1.index() + 1 != reg2.index()) {
- return false;
- }
- if (reg3.is_valid() && reg2.index() + 1 != reg3.index()) {
- return false;
- }
- if (reg4.is_valid() && reg3.index() + 1 != reg4.index()) {
- return false;
- }
- if (reg5.is_valid() && reg4.index() + 1 != reg5.index()) {
- return false;
- }
- return true;
-}
-
-std::string Register::ToString(int parameter_count) {
- if (is_current_context()) {
- return std::string("<context>");
- } else if (is_function_closure()) {
- return std::string("<closure>");
- } else if (is_new_target()) {
- return std::string("<new.target>");
- } else if (is_parameter()) {
- int parameter_index = ToParameterIndex(parameter_count);
- if (parameter_index == 0) {
- return std::string("<this>");
- } else {
- std::ostringstream s;
- s << "a" << parameter_index - 1;
- return s.str();
- }
- } else {
- std::ostringstream s;
- s << "r" << index();
- return s.str();
- }
-}
-
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index 23612713aa..036ae72872 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -5,11 +5,13 @@
#ifndef V8_INTERPRETER_BYTECODES_H_
#define V8_INTERPRETER_BYTECODES_H_
+#include <cstdint>
#include <iosfwd>
+#include <string>
-// Clients of this interface shouldn't depend on lots of interpreter internals.
-// Do not include anything from src/interpreter here!
-#include "src/utils.h"
+// This interface and it's implementation are independent of the
+// libv8_base library as they are used by the interpreter and the
+// standalone mkpeephole table generator program.
namespace v8 {
namespace internal {
@@ -29,6 +31,7 @@ namespace interpreter {
#define SCALAR_OPERAND_TYPE_LIST(V) \
V(Flag8, OperandTypeInfo::kFixedUnsignedByte) \
+ V(IntrinsicId, OperandTypeInfo::kFixedUnsignedByte) \
V(Idx, OperandTypeInfo::kScalableUnsignedByte) \
V(Imm, OperandTypeInfo::kScalableSignedByte) \
V(RegCount, OperandTypeInfo::kScalableUnsignedByte) \
@@ -72,180 +75,230 @@ namespace interpreter {
DEBUG_BREAK_PREFIX_BYTECODE_LIST(V)
// The list of bytecodes which are interpreted by the interpreter.
-#define BYTECODE_LIST(V) \
- /* Extended width operands */ \
- V(Wide, AccumulatorUse::kNone) \
- V(ExtraWide, AccumulatorUse::kNone) \
- \
- /* Loading the accumulator */ \
- V(LdaZero, AccumulatorUse::kWrite) \
- V(LdaSmi, AccumulatorUse::kWrite, OperandType::kImm) \
- V(LdaUndefined, AccumulatorUse::kWrite) \
- V(LdaNull, AccumulatorUse::kWrite) \
- V(LdaTheHole, AccumulatorUse::kWrite) \
- V(LdaTrue, AccumulatorUse::kWrite) \
- V(LdaFalse, AccumulatorUse::kWrite) \
- V(LdaConstant, AccumulatorUse::kWrite, OperandType::kIdx) \
- \
- /* Globals */ \
- V(LdaGlobal, AccumulatorUse::kWrite, OperandType::kIdx, OperandType::kIdx) \
- V(LdaGlobalInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx, \
- OperandType::kIdx) \
- V(StaGlobalSloppy, AccumulatorUse::kRead, OperandType::kIdx, \
- OperandType::kIdx) \
- V(StaGlobalStrict, AccumulatorUse::kRead, OperandType::kIdx, \
- OperandType::kIdx) \
- \
- /* Context operations */ \
- V(PushContext, AccumulatorUse::kRead, OperandType::kReg) \
- V(PopContext, AccumulatorUse::kNone, OperandType::kReg) \
- V(LdaContextSlot, AccumulatorUse::kWrite, OperandType::kReg, \
- OperandType::kIdx) \
- V(StaContextSlot, AccumulatorUse::kRead, OperandType::kReg, \
- OperandType::kIdx) \
- \
- /* Load-Store lookup slots */ \
- V(LdaLookupSlot, AccumulatorUse::kWrite, OperandType::kIdx) \
- V(LdaLookupSlotInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx) \
- V(StaLookupSlotSloppy, AccumulatorUse::kReadWrite, OperandType::kIdx) \
- V(StaLookupSlotStrict, AccumulatorUse::kReadWrite, OperandType::kIdx) \
- \
- /* Register-accumulator transfers */ \
- V(Ldar, AccumulatorUse::kWrite, OperandType::kReg) \
- V(Star, AccumulatorUse::kRead, OperandType::kRegOut) \
- \
- /* Register-register transfers */ \
- V(Mov, AccumulatorUse::kNone, OperandType::kReg, OperandType::kRegOut) \
- \
- /* LoadIC operations */ \
- V(LoadIC, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kIdx, \
- OperandType::kIdx) \
- V(KeyedLoadIC, AccumulatorUse::kReadWrite, OperandType::kReg, \
- OperandType::kIdx) \
- \
- /* StoreIC operations */ \
- V(StoreICSloppy, AccumulatorUse::kRead, OperandType::kReg, \
- OperandType::kIdx, OperandType::kIdx) \
- V(StoreICStrict, AccumulatorUse::kRead, OperandType::kReg, \
- OperandType::kIdx, OperandType::kIdx) \
- V(KeyedStoreICSloppy, AccumulatorUse::kRead, OperandType::kReg, \
- OperandType::kReg, OperandType::kIdx) \
- V(KeyedStoreICStrict, AccumulatorUse::kRead, OperandType::kReg, \
- OperandType::kReg, OperandType::kIdx) \
- \
- /* Binary Operators */ \
- V(Add, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(Sub, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(Mul, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(Div, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(Mod, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(BitwiseOr, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(BitwiseXor, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(BitwiseAnd, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(ShiftLeft, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(ShiftRight, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(ShiftRightLogical, AccumulatorUse::kReadWrite, OperandType::kReg) \
- \
- /* Unary Operators */ \
- V(Inc, AccumulatorUse::kReadWrite) \
- V(Dec, AccumulatorUse::kReadWrite) \
- V(LogicalNot, AccumulatorUse::kReadWrite) \
- V(TypeOf, AccumulatorUse::kReadWrite) \
- V(DeletePropertyStrict, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(DeletePropertySloppy, AccumulatorUse::kReadWrite, OperandType::kReg) \
- \
- /* Call operations */ \
- V(Call, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg, \
- OperandType::kRegCount, OperandType::kIdx) \
- V(TailCall, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg, \
- OperandType::kRegCount, OperandType::kIdx) \
- V(CallRuntime, AccumulatorUse::kWrite, OperandType::kRuntimeId, \
- OperandType::kMaybeReg, OperandType::kRegCount) \
- V(CallRuntimeForPair, AccumulatorUse::kNone, OperandType::kRuntimeId, \
- OperandType::kMaybeReg, OperandType::kRegCount, OperandType::kRegOutPair) \
- V(CallJSRuntime, AccumulatorUse::kWrite, OperandType::kIdx, \
- OperandType::kReg, OperandType::kRegCount) \
- \
- /* Intrinsics */ \
- V(InvokeIntrinsic, AccumulatorUse::kWrite, OperandType::kRuntimeId, \
- OperandType::kMaybeReg, OperandType::kRegCount) \
- \
- /* New operator */ \
- V(New, AccumulatorUse::kReadWrite, OperandType::kReg, \
- OperandType::kMaybeReg, OperandType::kRegCount) \
- \
- /* Test Operators */ \
- V(TestEqual, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(TestNotEqual, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(TestEqualStrict, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(TestLessThan, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(TestGreaterThan, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(TestLessThanOrEqual, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(TestGreaterThanOrEqual, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(TestInstanceOf, AccumulatorUse::kReadWrite, OperandType::kReg) \
- V(TestIn, AccumulatorUse::kReadWrite, OperandType::kReg) \
- \
- /* Cast operators */ \
- V(ToName, AccumulatorUse::kReadWrite) \
- V(ToNumber, AccumulatorUse::kReadWrite) \
- V(ToObject, AccumulatorUse::kReadWrite) \
- \
- /* Literals */ \
- V(CreateRegExpLiteral, AccumulatorUse::kWrite, OperandType::kIdx, \
- OperandType::kIdx, OperandType::kFlag8) \
- V(CreateArrayLiteral, AccumulatorUse::kWrite, OperandType::kIdx, \
- OperandType::kIdx, OperandType::kFlag8) \
- V(CreateObjectLiteral, AccumulatorUse::kWrite, OperandType::kIdx, \
- OperandType::kIdx, OperandType::kFlag8) \
- \
- /* Closure allocation */ \
- V(CreateClosure, AccumulatorUse::kWrite, OperandType::kIdx, \
- OperandType::kFlag8) \
- \
- /* Arguments allocation */ \
- V(CreateMappedArguments, AccumulatorUse::kWrite) \
- V(CreateUnmappedArguments, AccumulatorUse::kWrite) \
- V(CreateRestParameter, AccumulatorUse::kWrite) \
- \
- /* Control Flow */ \
- V(Jump, AccumulatorUse::kNone, OperandType::kImm) \
- V(JumpConstant, AccumulatorUse::kNone, OperandType::kIdx) \
- V(JumpIfTrue, AccumulatorUse::kRead, OperandType::kImm) \
- V(JumpIfTrueConstant, AccumulatorUse::kRead, OperandType::kIdx) \
- V(JumpIfFalse, AccumulatorUse::kRead, OperandType::kImm) \
- V(JumpIfFalseConstant, AccumulatorUse::kRead, OperandType::kIdx) \
- V(JumpIfToBooleanTrue, AccumulatorUse::kRead, OperandType::kImm) \
- V(JumpIfToBooleanTrueConstant, AccumulatorUse::kRead, OperandType::kIdx) \
- V(JumpIfToBooleanFalse, AccumulatorUse::kRead, OperandType::kImm) \
- V(JumpIfToBooleanFalseConstant, AccumulatorUse::kRead, OperandType::kIdx) \
- V(JumpIfNull, AccumulatorUse::kRead, OperandType::kImm) \
- V(JumpIfNullConstant, AccumulatorUse::kRead, OperandType::kIdx) \
- V(JumpIfUndefined, AccumulatorUse::kRead, OperandType::kImm) \
- V(JumpIfUndefinedConstant, AccumulatorUse::kRead, OperandType::kIdx) \
- V(JumpIfNotHole, AccumulatorUse::kRead, OperandType::kImm) \
- V(JumpIfNotHoleConstant, AccumulatorUse::kRead, OperandType::kIdx) \
- \
- /* Complex flow control For..in */ \
- V(ForInPrepare, AccumulatorUse::kRead, OperandType::kRegOutTriple) \
- V(ForInDone, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg) \
- V(ForInNext, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg, \
- OperandType::kRegPair, OperandType::kIdx) \
- V(ForInStep, AccumulatorUse::kWrite, OperandType::kReg) \
- \
- /* Perform a stack guard check */ \
- V(StackCheck, AccumulatorUse::kNone) \
- \
- /* Non-local flow control */ \
- V(Throw, AccumulatorUse::kRead) \
- V(ReThrow, AccumulatorUse::kRead) \
- V(Return, AccumulatorUse::kNone) \
- \
- /* Debugger */ \
- V(Debugger, AccumulatorUse::kNone) \
- DEBUG_BREAK_BYTECODE_LIST(V) \
- \
- /* Illegal bytecode (terminates execution) */ \
- V(Illegal, AccumulatorUse::kNone)
+#define BYTECODE_LIST(V) \
+ /* Extended width operands */ \
+ V(Wide, AccumulatorUse::kNone) \
+ V(ExtraWide, AccumulatorUse::kNone) \
+ \
+ /* Loading the accumulator */ \
+ V(LdaZero, AccumulatorUse::kWrite) \
+ V(LdaSmi, AccumulatorUse::kWrite, OperandType::kImm) \
+ V(LdaUndefined, AccumulatorUse::kWrite) \
+ V(LdaNull, AccumulatorUse::kWrite) \
+ V(LdaTheHole, AccumulatorUse::kWrite) \
+ V(LdaTrue, AccumulatorUse::kWrite) \
+ V(LdaFalse, AccumulatorUse::kWrite) \
+ V(LdaConstant, AccumulatorUse::kWrite, OperandType::kIdx) \
+ \
+ /* Loading registers */ \
+ V(LdrUndefined, AccumulatorUse::kNone, OperandType::kRegOut) \
+ \
+ /* Globals */ \
+ V(LdaGlobal, AccumulatorUse::kWrite, OperandType::kIdx) \
+ V(LdrGlobal, AccumulatorUse::kNone, OperandType::kIdx, OperandType::kRegOut) \
+ V(LdaGlobalInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx) \
+ V(StaGlobalSloppy, AccumulatorUse::kRead, OperandType::kIdx, \
+ OperandType::kIdx) \
+ V(StaGlobalStrict, AccumulatorUse::kRead, OperandType::kIdx, \
+ OperandType::kIdx) \
+ \
+ /* Context operations */ \
+ V(PushContext, AccumulatorUse::kRead, OperandType::kRegOut) \
+ V(PopContext, AccumulatorUse::kNone, OperandType::kReg) \
+ V(LdaContextSlot, AccumulatorUse::kWrite, OperandType::kReg, \
+ OperandType::kIdx) \
+ V(LdrContextSlot, AccumulatorUse::kNone, OperandType::kReg, \
+ OperandType::kIdx, OperandType::kRegOut) \
+ V(StaContextSlot, AccumulatorUse::kRead, OperandType::kReg, \
+ OperandType::kIdx) \
+ \
+ /* Load-Store lookup slots */ \
+ V(LdaLookupSlot, AccumulatorUse::kWrite, OperandType::kIdx) \
+ V(LdaLookupSlotInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx) \
+ V(StaLookupSlotSloppy, AccumulatorUse::kReadWrite, OperandType::kIdx) \
+ V(StaLookupSlotStrict, AccumulatorUse::kReadWrite, OperandType::kIdx) \
+ \
+ /* Register-accumulator transfers */ \
+ V(Ldar, AccumulatorUse::kWrite, OperandType::kReg) \
+ V(Star, AccumulatorUse::kRead, OperandType::kRegOut) \
+ \
+ /* Register-register transfers */ \
+ V(Mov, AccumulatorUse::kNone, OperandType::kReg, OperandType::kRegOut) \
+ \
+ /* Property loads (LoadIC) operations */ \
+ V(LdaNamedProperty, AccumulatorUse::kWrite, OperandType::kReg, \
+ OperandType::kIdx, OperandType::kIdx) \
+ V(LdrNamedProperty, AccumulatorUse::kNone, OperandType::kReg, \
+ OperandType::kIdx, OperandType::kIdx, OperandType::kRegOut) \
+ V(LdaKeyedProperty, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kIdx) \
+ V(LdrKeyedProperty, AccumulatorUse::kRead, OperandType::kReg, \
+ OperandType::kIdx, OperandType::kRegOut) \
+ \
+ /* Propery stores (StoreIC) operations */ \
+ V(StaNamedPropertySloppy, AccumulatorUse::kRead, OperandType::kReg, \
+ OperandType::kIdx, OperandType::kIdx) \
+ V(StaNamedPropertyStrict, AccumulatorUse::kRead, OperandType::kReg, \
+ OperandType::kIdx, OperandType::kIdx) \
+ V(StaKeyedPropertySloppy, AccumulatorUse::kRead, OperandType::kReg, \
+ OperandType::kReg, OperandType::kIdx) \
+ V(StaKeyedPropertyStrict, AccumulatorUse::kRead, OperandType::kReg, \
+ OperandType::kReg, OperandType::kIdx) \
+ \
+ /* Binary Operators */ \
+ V(Add, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kIdx) \
+ V(Sub, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kIdx) \
+ V(Mul, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kIdx) \
+ V(Div, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kIdx) \
+ V(Mod, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kIdx) \
+ V(BitwiseOr, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kIdx) \
+ V(BitwiseXor, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kIdx) \
+ V(BitwiseAnd, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kIdx) \
+ V(ShiftLeft, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kIdx) \
+ V(ShiftRight, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kIdx) \
+ V(ShiftRightLogical, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kIdx) \
+ \
+ /* Binary operators with immediate operands */ \
+ V(AddSmi, AccumulatorUse::kWrite, OperandType::kImm, OperandType::kReg, \
+ OperandType::kIdx) \
+ V(SubSmi, AccumulatorUse::kWrite, OperandType::kImm, OperandType::kReg, \
+ OperandType::kIdx) \
+ V(BitwiseOrSmi, AccumulatorUse::kWrite, OperandType::kImm, \
+ OperandType::kReg, OperandType::kIdx) \
+ V(BitwiseAndSmi, AccumulatorUse::kWrite, OperandType::kImm, \
+ OperandType::kReg, OperandType::kIdx) \
+ V(ShiftLeftSmi, AccumulatorUse::kWrite, OperandType::kImm, \
+ OperandType::kReg, OperandType::kIdx) \
+ V(ShiftRightSmi, AccumulatorUse::kWrite, OperandType::kImm, \
+ OperandType::kReg, OperandType::kIdx) \
+ \
+ /* Unary Operators */ \
+ V(Inc, AccumulatorUse::kReadWrite, OperandType::kIdx) \
+ V(Dec, AccumulatorUse::kReadWrite, OperandType::kIdx) \
+ V(ToBooleanLogicalNot, AccumulatorUse::kReadWrite) \
+ V(LogicalNot, AccumulatorUse::kReadWrite) \
+ V(TypeOf, AccumulatorUse::kReadWrite) \
+ V(DeletePropertyStrict, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(DeletePropertySloppy, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ \
+ /* Call operations */ \
+ V(Call, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg, \
+ OperandType::kRegCount, OperandType::kIdx) \
+ V(TailCall, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg, \
+ OperandType::kRegCount, OperandType::kIdx) \
+ V(CallRuntime, AccumulatorUse::kWrite, OperandType::kRuntimeId, \
+ OperandType::kMaybeReg, OperandType::kRegCount) \
+ V(CallRuntimeForPair, AccumulatorUse::kNone, OperandType::kRuntimeId, \
+ OperandType::kMaybeReg, OperandType::kRegCount, OperandType::kRegOutPair) \
+ V(CallJSRuntime, AccumulatorUse::kWrite, OperandType::kIdx, \
+ OperandType::kReg, OperandType::kRegCount) \
+ \
+ /* Intrinsics */ \
+ V(InvokeIntrinsic, AccumulatorUse::kWrite, OperandType::kIntrinsicId, \
+ OperandType::kMaybeReg, OperandType::kRegCount) \
+ \
+ /* New operator */ \
+ V(New, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kMaybeReg, OperandType::kRegCount) \
+ \
+ /* Test Operators */ \
+ V(TestEqual, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(TestNotEqual, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(TestEqualStrict, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(TestLessThan, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(TestGreaterThan, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(TestLessThanOrEqual, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(TestGreaterThanOrEqual, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(TestInstanceOf, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ V(TestIn, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ \
+ /* Cast operators */ \
+ V(ToName, AccumulatorUse::kRead, OperandType::kRegOut) \
+ V(ToNumber, AccumulatorUse::kRead, OperandType::kRegOut) \
+ V(ToObject, AccumulatorUse::kRead, OperandType::kRegOut) \
+ \
+ /* Literals */ \
+ V(CreateRegExpLiteral, AccumulatorUse::kWrite, OperandType::kIdx, \
+ OperandType::kIdx, OperandType::kFlag8) \
+ V(CreateArrayLiteral, AccumulatorUse::kWrite, OperandType::kIdx, \
+ OperandType::kIdx, OperandType::kFlag8) \
+ V(CreateObjectLiteral, AccumulatorUse::kNone, OperandType::kIdx, \
+ OperandType::kIdx, OperandType::kFlag8, OperandType::kRegOut) \
+ \
+ /* Closure allocation */ \
+ V(CreateClosure, AccumulatorUse::kWrite, OperandType::kIdx, \
+ OperandType::kFlag8) \
+ \
+ /* Context allocation */ \
+ V(CreateBlockContext, AccumulatorUse::kReadWrite, OperandType::kIdx) \
+ V(CreateCatchContext, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kIdx) \
+ /* TODO(klaasb) rename Idx or add unsigned Imm OperandType? */ \
+ V(CreateFunctionContext, AccumulatorUse::kWrite, OperandType::kIdx) \
+ V(CreateWithContext, AccumulatorUse::kReadWrite, OperandType::kReg) \
+ \
+ /* Arguments allocation */ \
+ V(CreateMappedArguments, AccumulatorUse::kWrite) \
+ V(CreateUnmappedArguments, AccumulatorUse::kWrite) \
+ V(CreateRestParameter, AccumulatorUse::kWrite) \
+ \
+ /* Control Flow */ \
+ V(Jump, AccumulatorUse::kNone, OperandType::kImm) \
+ V(JumpConstant, AccumulatorUse::kNone, OperandType::kIdx) \
+ V(JumpIfTrue, AccumulatorUse::kRead, OperandType::kImm) \
+ V(JumpIfTrueConstant, AccumulatorUse::kRead, OperandType::kIdx) \
+ V(JumpIfFalse, AccumulatorUse::kRead, OperandType::kImm) \
+ V(JumpIfFalseConstant, AccumulatorUse::kRead, OperandType::kIdx) \
+ V(JumpIfToBooleanTrue, AccumulatorUse::kRead, OperandType::kImm) \
+ V(JumpIfToBooleanTrueConstant, AccumulatorUse::kRead, OperandType::kIdx) \
+ V(JumpIfToBooleanFalse, AccumulatorUse::kRead, OperandType::kImm) \
+ V(JumpIfToBooleanFalseConstant, AccumulatorUse::kRead, OperandType::kIdx) \
+ V(JumpIfNull, AccumulatorUse::kRead, OperandType::kImm) \
+ V(JumpIfNullConstant, AccumulatorUse::kRead, OperandType::kIdx) \
+ V(JumpIfUndefined, AccumulatorUse::kRead, OperandType::kImm) \
+ V(JumpIfUndefinedConstant, AccumulatorUse::kRead, OperandType::kIdx) \
+ V(JumpIfNotHole, AccumulatorUse::kRead, OperandType::kImm) \
+ V(JumpIfNotHoleConstant, AccumulatorUse::kRead, OperandType::kIdx) \
+ \
+ /* Complex flow control For..in */ \
+ V(ForInPrepare, AccumulatorUse::kNone, OperandType::kReg, \
+ OperandType::kRegOutTriple) \
+ V(ForInDone, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg) \
+ V(ForInNext, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kReg, \
+ OperandType::kRegPair, OperandType::kIdx) \
+ V(ForInStep, AccumulatorUse::kWrite, OperandType::kReg) \
+ \
+ /* Perform a stack guard check */ \
+ V(StackCheck, AccumulatorUse::kNone) \
+ \
+ /* Perform a check to trigger on-stack replacement */ \
+ V(OsrPoll, AccumulatorUse::kNone, OperandType::kImm) \
+ \
+ /* Non-local flow control */ \
+ V(Throw, AccumulatorUse::kRead) \
+ V(ReThrow, AccumulatorUse::kRead) \
+ V(Return, AccumulatorUse::kRead) \
+ \
+ /* Generators */ \
+ V(SuspendGenerator, AccumulatorUse::kRead, OperandType::kReg) \
+ V(ResumeGenerator, AccumulatorUse::kWrite, OperandType::kReg) \
+ \
+ /* Debugger */ \
+ V(Debugger, AccumulatorUse::kNone) \
+ DEBUG_BREAK_BYTECODE_LIST(V) \
+ \
+ /* Illegal bytecode (terminates execution) */ \
+ V(Illegal, AccumulatorUse::kNone) \
+ \
+ /* No operation (used to maintain source positions for peephole */ \
+ /* eliminated bytecodes). */ \
+ V(Nop, AccumulatorUse::kNone)
enum class AccumulatorUse : uint8_t {
kNone = 0,
@@ -254,24 +307,28 @@ enum class AccumulatorUse : uint8_t {
kReadWrite = kRead | kWrite
};
-V8_INLINE AccumulatorUse operator&(AccumulatorUse lhs, AccumulatorUse rhs) {
+inline AccumulatorUse operator&(AccumulatorUse lhs, AccumulatorUse rhs) {
int result = static_cast<int>(lhs) & static_cast<int>(rhs);
return static_cast<AccumulatorUse>(result);
}
-V8_INLINE AccumulatorUse operator|(AccumulatorUse lhs, AccumulatorUse rhs) {
+inline AccumulatorUse operator|(AccumulatorUse lhs, AccumulatorUse rhs) {
int result = static_cast<int>(lhs) | static_cast<int>(rhs);
return static_cast<AccumulatorUse>(result);
}
// Enumeration of scaling factors applicable to scalable operands. Code
// relies on being able to cast values to integer scaling values.
+#define OPERAND_SCALE_LIST(V) \
+ V(Single, 1) \
+ V(Double, 2) \
+ V(Quadruple, 4)
+
enum class OperandScale : uint8_t {
- kSingle = 1,
- kDouble = 2,
- kQuadruple = 4,
- kMaxValid = kQuadruple,
- kInvalid = 8,
+#define DECLARE_OPERAND_SCALE(Name, Scale) k##Name = Scale,
+ OPERAND_SCALE_LIST(DECLARE_OPERAND_SCALE)
+#undef DECLARE_OPERAND_SCALE
+ kLast = kQuadruple
};
// Enumeration of the size classes of operand types used by
@@ -312,7 +369,6 @@ enum class OperandType : uint8_t {
#undef COUNT_OPERAND_TYPES
};
-
// Enumeration of interpreter bytecodes.
enum class Bytecode : uint8_t {
#define DECLARE_BYTECODE(Name, ...) k##Name,
@@ -325,78 +381,11 @@ enum class Bytecode : uint8_t {
#undef COUNT_BYTECODE
};
-
-// An interpreter Register which is located in the function's Register file
-// in its stack-frame. Register hold parameters, this, and expression values.
-class Register {
+class Bytecodes final {
public:
- explicit Register(int index = kInvalidIndex) : index_(index) {}
-
- int index() const { return index_; }
- bool is_parameter() const { return index() < 0; }
- bool is_valid() const { return index_ != kInvalidIndex; }
- bool is_byte_operand() const;
- bool is_short_operand() const;
-
- static Register FromParameterIndex(int index, int parameter_count);
- int ToParameterIndex(int parameter_count) const;
-
- // Returns an invalid register.
- static Register invalid_value() { return Register(); }
-
- // Returns the register for the function's closure object.
- static Register function_closure();
- bool is_function_closure() const;
-
- // Returns the register which holds the current context object.
- static Register current_context();
- bool is_current_context() const;
-
- // Returns the register for the incoming new target value.
- static Register new_target();
- bool is_new_target() const;
-
- int32_t ToOperand() const { return -index_; }
- static Register FromOperand(int32_t operand) { return Register(-operand); }
-
- static bool AreContiguous(Register reg1, Register reg2,
- Register reg3 = Register(),
- Register reg4 = Register(),
- Register reg5 = Register());
-
- std::string ToString(int parameter_count);
-
- bool operator==(const Register& other) const {
- return index() == other.index();
- }
- bool operator!=(const Register& other) const {
- return index() != other.index();
- }
- bool operator<(const Register& other) const {
- return index() < other.index();
- }
- bool operator<=(const Register& other) const {
- return index() <= other.index();
- }
- bool operator>(const Register& other) const {
- return index() > other.index();
- }
- bool operator>=(const Register& other) const {
- return index() >= other.index();
- }
-
- private:
- static const int kInvalidIndex = kMaxInt;
-
- void* operator new(size_t size);
- void operator delete(void* p);
-
- int index_;
-};
+ // The maximum number of operands a bytecode may have.
+ static const int kMaxOperands = 4;
-
-class Bytecodes {
- public:
// Returns string representation of |bytecode|.
static const char* ToString(Bytecode bytecode);
@@ -447,9 +436,36 @@ class Bytecodes {
// Returns true if |bytecode| writes the accumulator.
static bool WritesAccumulator(Bytecode bytecode);
+ // Return true if |bytecode| writes the accumulator with a boolean value.
+ static bool WritesBooleanToAccumulator(Bytecode bytecode);
+
+ // Return true if |bytecode| is an accumulator load without effects,
+ // e.g. LdaConstant, LdaTrue, Ldar.
+ static bool IsAccumulatorLoadWithoutEffects(Bytecode bytecode);
+
+ // Return true if |bytecode| is a jump without effects,
+ // e.g. any jump excluding those that include type coercion like
+ // JumpIfTrueToBoolean.
+ static bool IsJumpWithoutEffects(Bytecode bytecode);
+
+ // Return true if |bytecode| is a register load without effects,
+ // e.g. Mov, Star, LdrUndefined.
+ static bool IsRegisterLoadWithoutEffects(Bytecode bytecode);
+
+ // Returns true if |bytecode| has no effects.
+ static bool IsWithoutExternalSideEffects(Bytecode bytecode);
+
// Returns the i-th operand of |bytecode|.
static OperandType GetOperandType(Bytecode bytecode, int i);
+ // Returns a pointer to an array of operand types terminated in
+ // OperandType::kNone.
+ static const OperandType* GetOperandTypes(Bytecode bytecode);
+
+ // Returns a pointer to an array of operand type info terminated in
+ // OperandTypeInfo::kNone.
+ static const OperandTypeInfo* GetOperandTypeInfos(Bytecode bytecode);
+
// Returns the size of the i-th operand of |bytecode|.
static OperandSize GetOperandSize(Bytecode bytecode, int i,
OperandScale operand_scale);
@@ -459,10 +475,6 @@ class Bytecodes {
static int GetOperandOffset(Bytecode bytecode, int i,
OperandScale operand_scale);
- // Returns a zero-based bitmap of the register operand positions of
- // |bytecode|.
- static int GetRegisterOperandBitmap(Bytecode bytecode);
-
// Returns a debug break bytecode to replace |bytecode|.
static Bytecode GetDebugBreak(Bytecode bytecode);
@@ -473,6 +485,9 @@ class Bytecodes {
// Returns the size of |operand|.
static OperandSize SizeOfOperand(OperandType operand, OperandScale scale);
+ // Returns the number of values which |bytecode| returns.
+ static size_t ReturnCount(Bytecode bytecode);
+
// Returns true if the bytecode is a conditional jump taking
// an immediate byte operand (OperandType::kImm).
static bool IsConditionalJumpImmediate(Bytecode bytecode);
@@ -497,6 +512,13 @@ class Bytecodes {
// any kind of operand.
static bool IsJump(Bytecode bytecode);
+ // Returns true if the bytecode is a jump that internally coerces the
+ // accumulator to a boolean.
+ static bool IsJumpIfToBoolean(Bytecode bytecode);
+
+ // Returns the equivalent jump bytecode without the accumulator coercion.
+ static Bytecode GetJumpWithoutToBoolean(Bytecode bytecode);
+
// Returns true if the bytecode is a conditional jump, a jump, or a return.
static bool IsJumpOrReturn(Bytecode bytecode);
@@ -509,12 +531,18 @@ class Bytecodes {
// Returns true if the bytecode is a debug break.
static bool IsDebugBreak(Bytecode bytecode);
+ // Returns true if the bytecode is Ldar or Star.
+ static bool IsLdarOrStar(Bytecode bytecode);
+
// Returns true if the bytecode has wider operand forms.
static bool IsBytecodeWithScalableOperands(Bytecode bytecode);
// Returns true if the bytecode is a scaling prefix bytecode.
static bool IsPrefixScalingBytecode(Bytecode bytecode);
+ // Returns true if |bytecode| puts a name in the accumulator.
+ static bool PutsNameInAccumulator(Bytecode bytecode);
+
// Returns true if |operand_type| is any type of register operand.
static bool IsRegisterOperandType(OperandType operand_type);
@@ -524,6 +552,14 @@ class Bytecodes {
// Returns true if |operand_type| represents a register used as an output.
static bool IsRegisterOutputOperandType(OperandType operand_type);
+ // Returns true if the handler for |bytecode| should look ahead and inline a
+ // dispatch to a Star bytecode.
+ static bool IsStarLookahead(Bytecode bytecode, OperandScale operand_scale);
+
+ // Returns the number of registers represented by a register operand. For
+ // instance, a RegPair represents two registers.
+ static int GetNumberOfRegistersRepresentedBy(OperandType operand_type);
+
// Returns true if |operand_type| is a maybe register operand
// (kMaybeReg).
static bool IsMaybeRegisterOperandType(OperandType operand_type);
@@ -534,36 +570,17 @@ class Bytecodes {
// Returns true if |operand_type| is unsigned, false if signed.
static bool IsUnsignedOperandType(OperandType operand_type);
- // Decodes a register operand in a byte array.
- static Register DecodeRegisterOperand(const uint8_t* operand_start,
- OperandType operand_type,
- OperandScale operand_scale);
-
- // Decodes a signed operand in a byte array.
- static int32_t DecodeSignedOperand(const uint8_t* operand_start,
- OperandType operand_type,
- OperandScale operand_scale);
-
- // Decodes an unsigned operand in a byte array.
- static uint32_t DecodeUnsignedOperand(const uint8_t* operand_start,
- OperandType operand_type,
- OperandScale operand_scale);
-
- // Decode a single bytecode and operands to |os|.
- static std::ostream& Decode(std::ostream& os, const uint8_t* bytecode_start,
- int number_of_parameters);
-
// Returns true if a handler is generated for a bytecode at a given
// operand scale. All bytecodes have handlers at OperandScale::kSingle,
// but only bytecodes with scalable operands have handlers with larger
// OperandScale values.
static bool BytecodeHasHandler(Bytecode bytecode, OperandScale operand_scale);
- // Return the next larger operand scale.
- static OperandScale NextOperandScale(OperandScale operand_scale);
+ // Return the operand size required to hold a signed operand.
+ static OperandSize SizeForSignedOperand(int value);
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Bytecodes);
+ // Return the operand size required to hold an unsigned operand.
+ static OperandSize SizeForUnsignedOperand(uint32_t value);
};
std::ostream& operator<<(std::ostream& os, const Bytecode& bytecode);
diff --git a/deps/v8/src/interpreter/constant-array-builder.cc b/deps/v8/src/interpreter/constant-array-builder.cc
index 7ce50b580e..ff3823fde2 100644
--- a/deps/v8/src/interpreter/constant-array-builder.cc
+++ b/deps/v8/src/interpreter/constant-array-builder.cc
@@ -4,6 +4,8 @@
#include "src/interpreter/constant-array-builder.h"
+#include <set>
+
#include "src/isolate.h"
#include "src/objects-inl.h"
@@ -46,14 +48,34 @@ Handle<Object> ConstantArrayBuilder::ConstantArraySlice::At(
return constants_[index - start_index()];
}
+void ConstantArrayBuilder::ConstantArraySlice::InsertAt(size_t index,
+ Handle<Object> object) {
+ DCHECK_GE(index, start_index());
+ DCHECK_LT(index, start_index() + size());
+ constants_[index - start_index()] = object;
+}
+
+bool ConstantArrayBuilder::ConstantArraySlice::AllElementsAreUnique() const {
+ std::set<Object*> elements;
+ for (auto constant : constants_) {
+ if (elements.find(*constant) != elements.end()) return false;
+ elements.insert(*constant);
+ }
+ return true;
+}
+
STATIC_CONST_MEMBER_DEFINITION const size_t ConstantArrayBuilder::k8BitCapacity;
STATIC_CONST_MEMBER_DEFINITION const size_t
ConstantArrayBuilder::k16BitCapacity;
STATIC_CONST_MEMBER_DEFINITION const size_t
ConstantArrayBuilder::k32BitCapacity;
-ConstantArrayBuilder::ConstantArrayBuilder(Isolate* isolate, Zone* zone)
- : isolate_(isolate), constants_map_(isolate->heap(), zone) {
+ConstantArrayBuilder::ConstantArrayBuilder(Zone* zone,
+ Handle<Object> the_hole_value)
+ : constants_map_(zone),
+ smi_map_(zone),
+ smi_pairs_(zone),
+ the_hole_value_(the_hole_value) {
idx_slice_[0] =
new (zone) ConstantArraySlice(zone, 0, k8BitCapacity, OperandSize::kByte);
idx_slice_[1] = new (zone) ConstantArraySlice(
@@ -73,9 +95,9 @@ size_t ConstantArrayBuilder::size() const {
return idx_slice_[0]->size();
}
-const ConstantArrayBuilder::ConstantArraySlice*
-ConstantArrayBuilder::IndexToSlice(size_t index) const {
- for (const ConstantArraySlice* slice : idx_slice_) {
+ConstantArrayBuilder::ConstantArraySlice* ConstantArrayBuilder::IndexToSlice(
+ size_t index) const {
+ for (ConstantArraySlice* slice : idx_slice_) {
if (index <= slice->max_index()) {
return slice;
}
@@ -90,12 +112,18 @@ Handle<Object> ConstantArrayBuilder::At(size_t index) const {
return slice->At(index);
} else {
DCHECK_LT(index, slice->capacity());
- return isolate_->factory()->the_hole_value();
+ return the_hole_value();
}
}
-Handle<FixedArray> ConstantArrayBuilder::ToFixedArray() {
- Handle<FixedArray> fixed_array = isolate_->factory()->NewFixedArray(
+Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(Isolate* isolate) {
+ // First insert reserved SMI values.
+ for (auto reserved_smi : smi_pairs_) {
+ InsertAllocatedEntry(reserved_smi.second,
+ handle(reserved_smi.first, isolate));
+ }
+
+ Handle<FixedArray> fixed_array = isolate->factory()->NewFixedArray(
static_cast<int>(size()), PretenureFlag::TENURED);
int array_index = 0;
for (const ConstantArraySlice* slice : idx_slice_) {
@@ -104,6 +132,10 @@ Handle<FixedArray> ConstantArrayBuilder::ToFixedArray() {
}
DCHECK(array_index == 0 ||
base::bits::IsPowerOfTwo32(static_cast<uint32_t>(array_index)));
+ // Different slices might contain the same element due to reservations, but
+ // all elements within a slice should be unique. If this DCHECK fails, then
+ // the AST nodes are not being internalized within a CanonicalHandleScope.
+ DCHECK(slice->AllElementsAreUnique());
// Copy objects from slice into array.
for (size_t i = 0; i < slice->size(); ++i) {
fixed_array->set(array_index++, *slice->At(slice->start_index() + i));
@@ -113,44 +145,35 @@ Handle<FixedArray> ConstantArrayBuilder::ToFixedArray() {
std::min(static_cast<size_t>(fixed_array->length() - array_index),
slice->capacity() - slice->size());
for (size_t i = 0; i < padding; i++) {
- fixed_array->set(array_index++, *isolate_->factory()->the_hole_value());
+ fixed_array->set(array_index++, *the_hole_value());
}
}
DCHECK_EQ(array_index, fixed_array->length());
- constants_map()->Clear();
return fixed_array;
}
size_t ConstantArrayBuilder::Insert(Handle<Object> object) {
- index_t* entry = constants_map()->Find(object);
- return (entry == nullptr) ? AllocateEntry(object) : *entry;
+ auto entry = constants_map_.find(object.address());
+ return (entry == constants_map_.end()) ? AllocateEntry(object)
+ : entry->second;
}
ConstantArrayBuilder::index_t ConstantArrayBuilder::AllocateEntry(
Handle<Object> object) {
- DCHECK(!object->IsOddball());
- index_t* entry = constants_map()->Get(object);
- for (size_t i = 0; i < arraysize(idx_slice_); ++i) {
- if (idx_slice_[i]->available() > 0) {
- size_t index = idx_slice_[i]->Allocate(object);
- *entry = static_cast<index_t>(index);
- return *entry;
- break;
- }
- }
- UNREACHABLE();
- return kMaxUInt32;
+ index_t index = AllocateIndex(object);
+ constants_map_[object.address()] = index;
+ return index;
}
-OperandSize ConstantArrayBuilder::CreateReservedEntry() {
+ConstantArrayBuilder::index_t ConstantArrayBuilder::AllocateIndex(
+ Handle<Object> object) {
for (size_t i = 0; i < arraysize(idx_slice_); ++i) {
if (idx_slice_[i]->available() > 0) {
- idx_slice_[i]->Reserve();
- return idx_slice_[i]->operand_size();
+ return static_cast<index_t>(idx_slice_[i]->Allocate(object));
}
}
UNREACHABLE();
- return OperandSize::kNone;
+ return kMaxUInt32;
}
ConstantArrayBuilder::ConstantArraySlice*
@@ -174,22 +197,53 @@ ConstantArrayBuilder::OperandSizeToSlice(OperandSize operand_size) const {
return slice;
}
+size_t ConstantArrayBuilder::AllocateEntry() {
+ return AllocateIndex(the_hole_value());
+}
+
+void ConstantArrayBuilder::InsertAllocatedEntry(size_t index,
+ Handle<Object> object) {
+ DCHECK_EQ(the_hole_value().address(), At(index).address());
+ ConstantArraySlice* slice = IndexToSlice(index);
+ slice->InsertAt(index, object);
+}
+
+OperandSize ConstantArrayBuilder::CreateReservedEntry() {
+ for (size_t i = 0; i < arraysize(idx_slice_); ++i) {
+ if (idx_slice_[i]->available() > 0) {
+ idx_slice_[i]->Reserve();
+ return idx_slice_[i]->operand_size();
+ }
+ }
+ UNREACHABLE();
+ return OperandSize::kNone;
+}
+
+ConstantArrayBuilder::index_t ConstantArrayBuilder::AllocateReservedEntry(
+ Smi* value) {
+ index_t index = static_cast<index_t>(AllocateEntry());
+ smi_map_[value] = index;
+ smi_pairs_.push_back(std::make_pair(value, index));
+ return index;
+}
+
size_t ConstantArrayBuilder::CommitReservedEntry(OperandSize operand_size,
- Handle<Object> object) {
+ Smi* value) {
DiscardReservedEntry(operand_size);
size_t index;
- index_t* entry = constants_map()->Find(object);
- if (nullptr == entry) {
- index = AllocateEntry(object);
+ auto entry = smi_map_.find(value);
+ if (entry == smi_map_.end()) {
+ index = AllocateReservedEntry(value);
} else {
ConstantArraySlice* slice = OperandSizeToSlice(operand_size);
- if (*entry > slice->max_index()) {
+ index = entry->second;
+ if (index > slice->max_index()) {
// The object is already in the constant array, but may have an
// index too big for the reserved operand_size. So, duplicate
// entry with the smaller operand size.
- *entry = static_cast<index_t>(slice->Allocate(object));
+ index = AllocateReservedEntry(value);
}
- index = *entry;
+ DCHECK_LE(index, slice->max_index());
}
return index;
}
diff --git a/deps/v8/src/interpreter/constant-array-builder.h b/deps/v8/src/interpreter/constant-array-builder.h
index 1a68646251..2018f25693 100644
--- a/deps/v8/src/interpreter/constant-array-builder.h
+++ b/deps/v8/src/interpreter/constant-array-builder.h
@@ -32,10 +32,10 @@ class ConstantArrayBuilder final BASE_EMBEDDED {
static const size_t k32BitCapacity =
kMaxUInt32 - k16BitCapacity - k8BitCapacity + 1;
- ConstantArrayBuilder(Isolate* isolate, Zone* zone);
+ ConstantArrayBuilder(Zone* zone, Handle<Object> the_hole_value);
// Generate a fixed array of constants based on inserted objects.
- Handle<FixedArray> ToFixedArray();
+ Handle<FixedArray> ToFixedArray(Isolate* isolate);
// Returns the object in the constant pool array that at index
// |index|.
@@ -48,14 +48,21 @@ class ConstantArrayBuilder final BASE_EMBEDDED {
// present. Returns the array index associated with the object.
size_t Insert(Handle<Object> object);
+ // Allocates an empty entry and returns the array index associated with the
+ // reservation. Entry can be inserted by calling InsertReservedEntry().
+ size_t AllocateEntry();
+
+ // Inserts the given object into an allocated entry.
+ void InsertAllocatedEntry(size_t index, Handle<Object> object);
+
// Creates a reserved entry in the constant pool and returns
// the size of the operand that'll be required to hold the entry
// when committed.
OperandSize CreateReservedEntry();
// Commit reserved entry and returns the constant pool index for the
- // object.
- size_t CommitReservedEntry(OperandSize operand_size, Handle<Object> object);
+ // SMI value.
+ size_t CommitReservedEntry(OperandSize operand_size, Smi* value);
// Discards constant pool reservation.
void DiscardReservedEntry(OperandSize operand_size);
@@ -64,6 +71,8 @@ class ConstantArrayBuilder final BASE_EMBEDDED {
typedef uint32_t index_t;
index_t AllocateEntry(Handle<Object> object);
+ index_t AllocateIndex(Handle<Object> object);
+ index_t AllocateReservedEntry(Smi* value);
struct ConstantArraySlice final : public ZoneObject {
ConstantArraySlice(Zone* zone, size_t start_index, size_t capacity,
@@ -72,6 +81,8 @@ class ConstantArrayBuilder final BASE_EMBEDDED {
void Unreserve();
size_t Allocate(Handle<Object> object);
Handle<Object> At(size_t index) const;
+ void InsertAt(size_t index, Handle<Object> object);
+ bool AllElementsAreUnique() const;
inline size_t available() const { return capacity() - reserved() - size(); }
inline size_t reserved() const { return reserved_; }
@@ -91,14 +102,16 @@ class ConstantArrayBuilder final BASE_EMBEDDED {
DISALLOW_COPY_AND_ASSIGN(ConstantArraySlice);
};
- const ConstantArraySlice* IndexToSlice(size_t index) const;
+ ConstantArraySlice* IndexToSlice(size_t index) const;
ConstantArraySlice* OperandSizeToSlice(OperandSize operand_size) const;
- IdentityMap<index_t>* constants_map() { return &constants_map_; }
+ Handle<Object> the_hole_value() const { return the_hole_value_; }
- Isolate* isolate_;
ConstantArraySlice* idx_slice_[3];
- IdentityMap<index_t> constants_map_;
+ ZoneMap<Address, index_t> constants_map_;
+ ZoneMap<Smi*, index_t> smi_map_;
+ ZoneVector<std::pair<Smi*, index_t>> smi_pairs_;
+ Handle<Object> the_hole_value_;
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/control-flow-builders.cc b/deps/v8/src/interpreter/control-flow-builders.cc
index 6510aa443a..56cd481f9c 100644
--- a/deps/v8/src/interpreter/control-flow-builders.cc
+++ b/deps/v8/src/interpreter/control-flow-builders.cc
@@ -10,118 +10,76 @@ namespace interpreter {
BreakableControlFlowBuilder::~BreakableControlFlowBuilder() {
- DCHECK(break_sites_.empty());
+ DCHECK(break_labels_.empty() || break_labels_.is_bound());
}
-
-void BreakableControlFlowBuilder::SetBreakTarget(const BytecodeLabel& target) {
- BindLabels(target, &break_sites_);
-}
-
-
-void BreakableControlFlowBuilder::EmitJump(ZoneVector<BytecodeLabel>* sites) {
- sites->push_back(BytecodeLabel());
- builder()->Jump(&sites->back());
-}
-
-
-void BreakableControlFlowBuilder::EmitJumpIfTrue(
- ZoneVector<BytecodeLabel>* sites) {
- sites->push_back(BytecodeLabel());
- builder()->JumpIfTrue(&sites->back());
-}
-
-
-void BreakableControlFlowBuilder::EmitJumpIfFalse(
- ZoneVector<BytecodeLabel>* sites) {
- sites->push_back(BytecodeLabel());
- builder()->JumpIfFalse(&sites->back());
+void BreakableControlFlowBuilder::BindBreakTarget() {
+ break_labels_.Bind(builder());
}
-
-void BreakableControlFlowBuilder::EmitJumpIfUndefined(
- ZoneVector<BytecodeLabel>* sites) {
- sites->push_back(BytecodeLabel());
- builder()->JumpIfUndefined(&sites->back());
+void BreakableControlFlowBuilder::EmitJump(BytecodeLabels* sites) {
+ builder()->Jump(sites->New());
}
-
-void BreakableControlFlowBuilder::EmitJumpIfNull(
- ZoneVector<BytecodeLabel>* sites) {
- sites->push_back(BytecodeLabel());
- builder()->JumpIfNull(&sites->back());
+void BreakableControlFlowBuilder::EmitJumpIfTrue(BytecodeLabels* sites) {
+ builder()->JumpIfTrue(sites->New());
}
-
-void BreakableControlFlowBuilder::EmitJump(ZoneVector<BytecodeLabel>* sites,
- int index) {
- builder()->Jump(&sites->at(index));
+void BreakableControlFlowBuilder::EmitJumpIfFalse(BytecodeLabels* sites) {
+ builder()->JumpIfFalse(sites->New());
}
-
-void BreakableControlFlowBuilder::EmitJumpIfTrue(
- ZoneVector<BytecodeLabel>* sites, int index) {
- builder()->JumpIfTrue(&sites->at(index));
+void BreakableControlFlowBuilder::EmitJumpIfUndefined(BytecodeLabels* sites) {
+ builder()->JumpIfUndefined(sites->New());
}
-
-void BreakableControlFlowBuilder::EmitJumpIfFalse(
- ZoneVector<BytecodeLabel>* sites, int index) {
- builder()->JumpIfFalse(&sites->at(index));
-}
-
-
-void BreakableControlFlowBuilder::BindLabels(const BytecodeLabel& target,
- ZoneVector<BytecodeLabel>* sites) {
- for (size_t i = 0; i < sites->size(); i++) {
- BytecodeLabel& site = sites->at(i);
- builder()->Bind(target, &site);
- }
- sites->clear();
+void BreakableControlFlowBuilder::EmitJumpIfNull(BytecodeLabels* sites) {
+ builder()->JumpIfNull(sites->New());
}
void BlockBuilder::EndBlock() {
builder()->Bind(&block_end_);
- SetBreakTarget(block_end_);
+ BindBreakTarget();
}
+LoopBuilder::~LoopBuilder() {
+ DCHECK(continue_labels_.empty() || continue_labels_.is_bound());
+ DCHECK(header_labels_.empty() || header_labels_.is_bound());
+}
-LoopBuilder::~LoopBuilder() { DCHECK(continue_sites_.empty()); }
-
-
-void LoopBuilder::LoopHeader() {
+void LoopBuilder::LoopHeader(ZoneVector<BytecodeLabel>* additional_labels) {
// Jumps from before the loop header into the loop violate ordering
// requirements of bytecode basic blocks. The only entry into a loop
// must be the loop header. Surely breaks is okay? Not if nested
// and misplaced between the headers.
- DCHECK(break_sites_.empty() && continue_sites_.empty());
+ DCHECK(break_labels_.empty() && continue_labels_.empty());
builder()->Bind(&loop_header_);
+ for (auto& label : *additional_labels) {
+ builder()->Bind(&label);
+ }
}
-
-void LoopBuilder::EndLoop() {
+void LoopBuilder::JumpToHeader() {
// Loop must have closed form, i.e. all loop elements are within the loop,
// the loop header precedes the body and next elements in the loop.
DCHECK(loop_header_.is_bound());
- builder()->Bind(&loop_end_);
- SetBreakTarget(loop_end_);
- if (next_.is_bound()) {
- DCHECK(!condition_.is_bound() || next_.offset() >= condition_.offset());
- SetContinueTarget(next_);
- } else {
- DCHECK(condition_.is_bound());
- DCHECK_GE(condition_.offset(), loop_header_.offset());
- DCHECK_LE(condition_.offset(), loop_end_.offset());
- SetContinueTarget(condition_);
- }
+ builder()->Jump(&loop_header_);
}
+void LoopBuilder::JumpToHeaderIfTrue() {
+ // Loop must have closed form, i.e. all loop elements are within the loop,
+ // the loop header precedes the body and next elements in the loop.
+ DCHECK(loop_header_.is_bound());
+ builder()->JumpIfTrue(&loop_header_);
+}
-void LoopBuilder::SetContinueTarget(const BytecodeLabel& target) {
- BindLabels(target, &continue_sites_);
+void LoopBuilder::EndLoop() {
+ BindBreakTarget();
+ header_labels_.BindToLabel(builder(), loop_header_);
}
+void LoopBuilder::BindContinueTarget() { continue_labels_.Bind(builder()); }
SwitchBuilder::~SwitchBuilder() {
#ifdef DEBUG
@@ -147,7 +105,7 @@ void TryCatchBuilder::EndTry() {
builder()->MarkTryEnd(handler_id_);
builder()->Jump(&exit_);
builder()->Bind(&handler_);
- builder()->MarkHandler(handler_id_, true);
+ builder()->MarkHandler(handler_id_, catch_prediction_);
}
@@ -160,8 +118,7 @@ void TryFinallyBuilder::BeginTry(Register context) {
void TryFinallyBuilder::LeaveTry() {
- finalization_sites_.push_back(BytecodeLabel());
- builder()->Jump(&finalization_sites_.back());
+ builder()->Jump(finalization_sites_.New());
}
@@ -172,17 +129,10 @@ void TryFinallyBuilder::EndTry() {
void TryFinallyBuilder::BeginHandler() {
builder()->Bind(&handler_);
- builder()->MarkHandler(handler_id_, will_catch_);
-}
-
-
-void TryFinallyBuilder::BeginFinally() {
- for (size_t i = 0; i < finalization_sites_.size(); i++) {
- BytecodeLabel& site = finalization_sites_.at(i);
- builder()->Bind(&site);
- }
+ builder()->MarkHandler(handler_id_, catch_prediction_);
}
+void TryFinallyBuilder::BeginFinally() { finalization_sites_.Bind(builder()); }
void TryFinallyBuilder::EndFinally() {
// Nothing to be done here.
diff --git a/deps/v8/src/interpreter/control-flow-builders.h b/deps/v8/src/interpreter/control-flow-builders.h
index e4d376b9b2..5cd9b5bc99 100644
--- a/deps/v8/src/interpreter/control-flow-builders.h
+++ b/deps/v8/src/interpreter/control-flow-builders.h
@@ -7,6 +7,7 @@
#include "src/interpreter/bytecode-array-builder.h"
+#include "src/interpreter/bytecode-label.h"
#include "src/zone-containers.h"
namespace v8 {
@@ -31,37 +32,33 @@ class ControlFlowBuilder BASE_EMBEDDED {
class BreakableControlFlowBuilder : public ControlFlowBuilder {
public:
explicit BreakableControlFlowBuilder(BytecodeArrayBuilder* builder)
- : ControlFlowBuilder(builder),
- break_sites_(builder->zone()) {}
+ : ControlFlowBuilder(builder), break_labels_(builder->zone()) {}
virtual ~BreakableControlFlowBuilder();
// This method should be called by the control flow owner before
// destruction to update sites that emit jumps for break.
- void SetBreakTarget(const BytecodeLabel& break_target);
+ void BindBreakTarget();
// This method is called when visiting break statements in the AST.
- // Inserts a jump to a unbound label that is patched when the corresponding
- // SetBreakTarget is called.
- void Break() { EmitJump(&break_sites_); }
- void BreakIfTrue() { EmitJumpIfTrue(&break_sites_); }
- void BreakIfFalse() { EmitJumpIfFalse(&break_sites_); }
- void BreakIfUndefined() { EmitJumpIfUndefined(&break_sites_); }
- void BreakIfNull() { EmitJumpIfNull(&break_sites_); }
+ // Inserts a jump to an unbound label that is patched when the corresponding
+ // BindBreakTarget is called.
+ void Break() { EmitJump(&break_labels_); }
+ void BreakIfTrue() { EmitJumpIfTrue(&break_labels_); }
+ void BreakIfFalse() { EmitJumpIfFalse(&break_labels_); }
+ void BreakIfUndefined() { EmitJumpIfUndefined(&break_labels_); }
+ void BreakIfNull() { EmitJumpIfNull(&break_labels_); }
- protected:
- void EmitJump(ZoneVector<BytecodeLabel>* labels);
- void EmitJump(ZoneVector<BytecodeLabel>* labels, int index);
- void EmitJumpIfTrue(ZoneVector<BytecodeLabel>* labels);
- void EmitJumpIfTrue(ZoneVector<BytecodeLabel>* labels, int index);
- void EmitJumpIfFalse(ZoneVector<BytecodeLabel>* labels);
- void EmitJumpIfFalse(ZoneVector<BytecodeLabel>* labels, int index);
- void EmitJumpIfUndefined(ZoneVector<BytecodeLabel>* labels);
- void EmitJumpIfNull(ZoneVector<BytecodeLabel>* labels);
+ BytecodeLabels* break_labels() { return &break_labels_; }
- void BindLabels(const BytecodeLabel& target, ZoneVector<BytecodeLabel>* site);
+ protected:
+ void EmitJump(BytecodeLabels* labels);
+ void EmitJumpIfTrue(BytecodeLabels* labels);
+ void EmitJumpIfFalse(BytecodeLabels* labels);
+ void EmitJumpIfUndefined(BytecodeLabels* labels);
+ void EmitJumpIfNull(BytecodeLabels* labels);
// Unbound labels that identify jumps for break statements in the code.
- ZoneVector<BytecodeLabel> break_sites_;
+ BytecodeLabels break_labels_;
};
@@ -84,34 +81,34 @@ class LoopBuilder final : public BreakableControlFlowBuilder {
public:
explicit LoopBuilder(BytecodeArrayBuilder* builder)
: BreakableControlFlowBuilder(builder),
- continue_sites_(builder->zone()) {}
+ continue_labels_(builder->zone()),
+ header_labels_(builder->zone()) {}
~LoopBuilder();
- void LoopHeader();
- void Condition() { builder()->Bind(&condition_); }
- void Next() { builder()->Bind(&next_); }
- void JumpToHeader() { builder()->Jump(&loop_header_); }
- void JumpToHeaderIfTrue() { builder()->JumpIfTrue(&loop_header_); }
+ void LoopHeader(ZoneVector<BytecodeLabel>* additional_labels);
+ void JumpToHeader();
+ void JumpToHeaderIfTrue();
+ void BindContinueTarget();
void EndLoop();
// This method is called when visiting continue statements in the AST.
- // Inserts a jump to a unbound label that is patched when the corresponding
- // SetContinueTarget is called.
- void Continue() { EmitJump(&continue_sites_); }
- void ContinueIfTrue() { EmitJumpIfTrue(&continue_sites_); }
- void ContinueIfUndefined() { EmitJumpIfUndefined(&continue_sites_); }
- void ContinueIfNull() { EmitJumpIfNull(&continue_sites_); }
+ // Inserts a jump to an unbound label that is patched when BindContinueTarget
+ // is called.
+ void Continue() { EmitJump(&continue_labels_); }
+ void ContinueIfTrue() { EmitJumpIfTrue(&continue_labels_); }
+ void ContinueIfUndefined() { EmitJumpIfUndefined(&continue_labels_); }
+ void ContinueIfNull() { EmitJumpIfNull(&continue_labels_); }
- private:
- void SetContinueTarget(const BytecodeLabel& continue_target);
+ BytecodeLabels* header_labels() { return &header_labels_; }
+ BytecodeLabels* continue_labels() { return &continue_labels_; }
+ private:
BytecodeLabel loop_header_;
- BytecodeLabel condition_;
- BytecodeLabel next_;
- BytecodeLabel loop_end_;
- // Unbound labels that identify jumps for continue statements in the code.
- ZoneVector<BytecodeLabel> continue_sites_;
+ // Unbound labels that identify jumps for continue statements in the code and
+ // jumps from checking the loop condition to the header for do-while loops.
+ BytecodeLabels continue_labels_;
+ BytecodeLabels header_labels_;
};
@@ -132,12 +129,12 @@ class SwitchBuilder final : public BreakableControlFlowBuilder {
// This method is called when visiting case comparison operation for |index|.
// Inserts a JumpIfTrue to a unbound label that is patched when the
// corresponding SetCaseTarget is called.
- void Case(int index) { EmitJumpIfTrue(&case_sites_, index); }
+ void Case(int index) { builder()->JumpIfTrue(&case_sites_.at(index)); }
// This method is called when all cases comparisons have been emitted if there
// is a default case statement. Inserts a Jump to a unbound label that is
// patched when the corresponding SetCaseTarget is called.
- void DefaultAt(int index) { EmitJump(&case_sites_, index); }
+ void DefaultAt(int index) { builder()->Jump(&case_sites_.at(index)); }
private:
// Unbound labels that identify jumps for case statements in the code.
@@ -148,8 +145,11 @@ class SwitchBuilder final : public BreakableControlFlowBuilder {
// A class to help with co-ordinating control flow in try-catch statements.
class TryCatchBuilder final : public ControlFlowBuilder {
public:
- explicit TryCatchBuilder(BytecodeArrayBuilder* builder)
- : ControlFlowBuilder(builder), handler_id_(builder->NewHandlerEntry()) {}
+ explicit TryCatchBuilder(BytecodeArrayBuilder* builder,
+ HandlerTable::CatchPrediction catch_prediction)
+ : ControlFlowBuilder(builder),
+ handler_id_(builder->NewHandlerEntry()),
+ catch_prediction_(catch_prediction) {}
void BeginTry(Register context);
void EndTry();
@@ -157,6 +157,7 @@ class TryCatchBuilder final : public ControlFlowBuilder {
private:
int handler_id_;
+ HandlerTable::CatchPrediction catch_prediction_;
BytecodeLabel handler_;
BytecodeLabel exit_;
};
@@ -165,11 +166,12 @@ class TryCatchBuilder final : public ControlFlowBuilder {
// A class to help with co-ordinating control flow in try-finally statements.
class TryFinallyBuilder final : public ControlFlowBuilder {
public:
- explicit TryFinallyBuilder(BytecodeArrayBuilder* builder, bool will_catch)
+ explicit TryFinallyBuilder(BytecodeArrayBuilder* builder,
+ HandlerTable::CatchPrediction catch_prediction)
: ControlFlowBuilder(builder),
handler_id_(builder->NewHandlerEntry()),
- finalization_sites_(builder->zone()),
- will_catch_(will_catch) {}
+ catch_prediction_(catch_prediction),
+ finalization_sites_(builder->zone()) {}
void BeginTry(Register context);
void LeaveTry();
@@ -180,15 +182,11 @@ class TryFinallyBuilder final : public ControlFlowBuilder {
private:
int handler_id_;
+ HandlerTable::CatchPrediction catch_prediction_;
BytecodeLabel handler_;
// Unbound labels that identify jumps to the finally block in the code.
- ZoneVector<BytecodeLabel> finalization_sites_;
-
- // Conservative prediction of whether exceptions thrown into the handler for
- // this finally block will be caught. Note that such a prediction depends on
- // whether this try-finally is nested inside a surrounding try-catch.
- bool will_catch_;
+ BytecodeLabels finalization_sites_;
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/handler-table-builder.cc b/deps/v8/src/interpreter/handler-table-builder.cc
index 374089bdc3..2ff7f2130a 100644
--- a/deps/v8/src/interpreter/handler-table-builder.cc
+++ b/deps/v8/src/interpreter/handler-table-builder.cc
@@ -5,6 +5,7 @@
#include "src/interpreter/handler-table-builder.h"
#include "src/factory.h"
+#include "src/interpreter/bytecode-register.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
@@ -12,18 +13,16 @@ namespace v8 {
namespace internal {
namespace interpreter {
-HandlerTableBuilder::HandlerTableBuilder(Isolate* isolate, Zone* zone)
- : isolate_(isolate), entries_(zone) {}
+HandlerTableBuilder::HandlerTableBuilder(Zone* zone) : entries_(zone) {}
-Handle<HandlerTable> HandlerTableBuilder::ToHandlerTable() {
+Handle<HandlerTable> HandlerTableBuilder::ToHandlerTable(Isolate* isolate) {
int handler_table_size = static_cast<int>(entries_.size());
Handle<HandlerTable> table =
- Handle<HandlerTable>::cast(isolate_->factory()->NewFixedArray(
+ Handle<HandlerTable>::cast(isolate->factory()->NewFixedArray(
HandlerTable::LengthForRange(handler_table_size), TENURED));
for (int i = 0; i < handler_table_size; ++i) {
Entry& entry = entries_[i];
- HandlerTable::CatchPrediction pred =
- entry.will_catch ? HandlerTable::CAUGHT : HandlerTable::UNCAUGHT;
+ HandlerTable::CatchPrediction pred = entry.catch_prediction_;
table->SetRangeStart(i, static_cast<int>(entry.offset_start));
table->SetRangeEnd(i, static_cast<int>(entry.offset_end));
table->SetRangeHandler(i, static_cast<int>(entry.offset_target), pred);
@@ -35,7 +34,7 @@ Handle<HandlerTable> HandlerTableBuilder::ToHandlerTable() {
int HandlerTableBuilder::NewHandlerEntry() {
int handler_id = static_cast<int>(entries_.size());
- Entry entry = {0, 0, 0, Register(), false};
+ Entry entry = {0, 0, 0, Register(), HandlerTable::UNCAUGHT};
entries_.push_back(entry);
return handler_id;
}
@@ -58,9 +57,9 @@ void HandlerTableBuilder::SetHandlerTarget(int handler_id, size_t offset) {
entries_[handler_id].offset_target = offset;
}
-
-void HandlerTableBuilder::SetPrediction(int handler_id, bool will_catch) {
- entries_[handler_id].will_catch = will_catch;
+void HandlerTableBuilder::SetPrediction(
+ int handler_id, HandlerTable::CatchPrediction prediction) {
+ entries_[handler_id].catch_prediction_ = prediction;
}
diff --git a/deps/v8/src/interpreter/handler-table-builder.h b/deps/v8/src/interpreter/handler-table-builder.h
index 7356e37767..26c45f4056 100644
--- a/deps/v8/src/interpreter/handler-table-builder.h
+++ b/deps/v8/src/interpreter/handler-table-builder.h
@@ -6,6 +6,7 @@
#define V8_INTERPRETER_HANDLER_TABLE_BUILDER_H_
#include "src/handles.h"
+#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
#include "src/zone-containers.h"
@@ -20,11 +21,11 @@ namespace interpreter {
// A helper class for constructing exception handler tables for the interpreter.
class HandlerTableBuilder final BASE_EMBEDDED {
public:
- HandlerTableBuilder(Isolate* isolate, Zone* zone);
+ explicit HandlerTableBuilder(Zone* zone);
// Builds the actual handler table by copying the current values into a heap
// object. Any further mutations to the builder won't be reflected.
- Handle<HandlerTable> ToHandlerTable();
+ Handle<HandlerTable> ToHandlerTable(Isolate* isolate);
// Creates a new handler table entry and returns a {hander_id} identifying the
// entry, so that it can be referenced by below setter functions.
@@ -36,7 +37,7 @@ class HandlerTableBuilder final BASE_EMBEDDED {
void SetTryRegionStart(int handler_id, size_t offset);
void SetTryRegionEnd(int handler_id, size_t offset);
void SetHandlerTarget(int handler_id, size_t offset);
- void SetPrediction(int handler_id, bool will_catch);
+ void SetPrediction(int handler_id, HandlerTable::CatchPrediction prediction);
void SetContextRegister(int handler_id, Register reg);
private:
@@ -45,10 +46,10 @@ class HandlerTableBuilder final BASE_EMBEDDED {
size_t offset_end; // Bytecode offset ending try-region.
size_t offset_target; // Bytecode offset of handler target.
Register context; // Register holding context for handler.
- bool will_catch; // Optimistic prediction for handler.
+ // Optimistic prediction for handler.
+ HandlerTable::CatchPrediction catch_prediction_;
};
- Isolate* isolate_;
ZoneVector<Entry> entries_;
DISALLOW_COPY_AND_ASSIGN(HandlerTableBuilder);
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index 2663e4a876..227fd395ce 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -4,6 +4,7 @@
#include "src/interpreter/interpreter-assembler.h"
+#include <limits>
#include <ostream>
#include "src/code-factory.h"
@@ -24,23 +25,22 @@ using compiler::Node;
InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
Bytecode bytecode,
OperandScale operand_scale)
- : compiler::CodeStubAssembler(isolate, zone,
- InterpreterDispatchDescriptor(isolate),
- Code::ComputeFlags(Code::BYTECODE_HANDLER),
- Bytecodes::ToString(bytecode), 0),
+ : CodeStubAssembler(isolate, zone, InterpreterDispatchDescriptor(isolate),
+ Code::ComputeFlags(Code::BYTECODE_HANDLER),
+ Bytecodes::ToString(bytecode),
+ Bytecodes::ReturnCount(bytecode)),
bytecode_(bytecode),
operand_scale_(operand_scale),
+ bytecode_offset_(this, MachineType::PointerRepresentation()),
+ interpreted_frame_pointer_(this, MachineType::PointerRepresentation()),
accumulator_(this, MachineRepresentation::kTagged),
accumulator_use_(AccumulatorUse::kNone),
- context_(this, MachineRepresentation::kTagged),
- bytecode_array_(this, MachineRepresentation::kTagged),
+ made_call_(false),
disable_stack_check_across_call_(false),
stack_pointer_before_call_(nullptr) {
- accumulator_.Bind(
- Parameter(InterpreterDispatchDescriptor::kAccumulatorParameter));
- context_.Bind(Parameter(InterpreterDispatchDescriptor::kContextParameter));
- bytecode_array_.Bind(
- Parameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter));
+ accumulator_.Bind(Parameter(InterpreterDispatchDescriptor::kAccumulator));
+ bytecode_offset_.Bind(
+ Parameter(InterpreterDispatchDescriptor::kBytecodeOffset));
if (FLAG_trace_ignition) {
TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
}
@@ -53,6 +53,13 @@ InterpreterAssembler::~InterpreterAssembler() {
DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
}
+Node* InterpreterAssembler::GetInterpretedFramePointer() {
+ if (!interpreted_frame_pointer_.IsBound()) {
+ interpreted_frame_pointer_.Bind(LoadParentFramePointer());
+ }
+ return interpreted_frame_pointer_.value();
+}
+
Node* InterpreterAssembler::GetAccumulatorUnchecked() {
return accumulator_.value();
}
@@ -69,64 +76,60 @@ void InterpreterAssembler::SetAccumulator(Node* value) {
accumulator_.Bind(value);
}
-Node* InterpreterAssembler::GetContext() { return context_.value(); }
+Node* InterpreterAssembler::GetContext() {
+ return LoadRegister(Register::current_context());
+}
void InterpreterAssembler::SetContext(Node* value) {
StoreRegister(value, Register::current_context());
- context_.Bind(value);
}
Node* InterpreterAssembler::BytecodeOffset() {
- return Parameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter);
-}
-
-Node* InterpreterAssembler::RegisterFileRawPointer() {
- return Parameter(InterpreterDispatchDescriptor::kRegisterFileParameter);
+ return bytecode_offset_.value();
}
Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
- return bytecode_array_.value();
+ if (made_call_) {
+ // If we have made a call, restore bytecode array from stack frame in case
+ // the debugger has swapped us to the patched debugger bytecode array.
+ return LoadRegister(Register::bytecode_array());
+ } else {
+ return Parameter(InterpreterDispatchDescriptor::kBytecodeArray);
+ }
}
Node* InterpreterAssembler::DispatchTableRawPointer() {
- return Parameter(InterpreterDispatchDescriptor::kDispatchTableParameter);
+ return Parameter(InterpreterDispatchDescriptor::kDispatchTable);
}
Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
- return IntPtrAdd(RegisterFileRawPointer(), RegisterFrameOffset(reg_index));
+ return IntPtrAdd(GetInterpretedFramePointer(),
+ RegisterFrameOffset(reg_index));
}
-Node* InterpreterAssembler::LoadRegister(int offset) {
- return Load(MachineType::AnyTagged(), RegisterFileRawPointer(),
- IntPtrConstant(offset));
+Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
+ return WordShl(index, kPointerSizeLog2);
}
Node* InterpreterAssembler::LoadRegister(Register reg) {
- return LoadRegister(IntPtrConstant(-reg.index()));
-}
-
-Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
- return WordShl(index, kPointerSizeLog2);
+ return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
+ IntPtrConstant(reg.ToOperand() << kPointerSizeLog2));
}
Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
- return Load(MachineType::AnyTagged(), RegisterFileRawPointer(),
+ return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
RegisterFrameOffset(reg_index));
}
-Node* InterpreterAssembler::StoreRegister(Node* value, int offset) {
- return StoreNoWriteBarrier(MachineRepresentation::kTagged,
- RegisterFileRawPointer(), IntPtrConstant(offset),
- value);
-}
-
Node* InterpreterAssembler::StoreRegister(Node* value, Register reg) {
- return StoreRegister(value, IntPtrConstant(-reg.index()));
+ return StoreNoWriteBarrier(
+ MachineRepresentation::kTagged, GetInterpretedFramePointer(),
+ IntPtrConstant(reg.ToOperand() << kPointerSizeLog2), value);
}
Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
return StoreNoWriteBarrier(MachineRepresentation::kTagged,
- RegisterFileRawPointer(),
+ GetInterpretedFramePointer(),
RegisterFrameOffset(reg_index), value);
}
@@ -371,6 +374,15 @@ Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) {
return BytecodeUnsignedOperand(operand_index, operand_size);
}
+Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) {
+ DCHECK(OperandType::kIntrinsicId ==
+ Bytecodes::GetOperandType(bytecode_, operand_index));
+ OperandSize operand_size =
+ Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
+ DCHECK_EQ(operand_size, OperandSize::kByte);
+ return BytecodeUnsignedOperand(operand_index, operand_size);
+}
+
Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
BytecodeArray::kConstantPoolOffset);
@@ -380,9 +392,24 @@ Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
return Load(MachineType::AnyTagged(), constant_pool, entry_offset);
}
-Node* InterpreterAssembler::LoadObjectField(Node* object, int offset) {
- return Load(MachineType::AnyTagged(), object,
- IntPtrConstant(offset - kHeapObjectTag));
+Node* InterpreterAssembler::LoadAndUntagConstantPoolEntry(Node* index) {
+ Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
+ BytecodeArray::kConstantPoolOffset);
+ int offset = FixedArray::kHeaderSize - kHeapObjectTag;
+#if V8_TARGET_LITTLE_ENDIAN
+ if (Is64()) {
+ offset += kPointerSize / 2;
+ }
+#endif
+ Node* entry_offset =
+ IntPtrAdd(IntPtrConstant(offset), WordShl(index, kPointerSizeLog2));
+ if (Is64()) {
+ return ChangeInt32ToInt64(
+ Load(MachineType::Int32(), constant_pool, entry_offset));
+ } else {
+ return SmiUntag(
+ Load(MachineType::AnyTagged(), constant_pool, entry_offset));
+ }
}
Node* InterpreterAssembler::LoadContextSlot(Node* context, int slot_index) {
@@ -406,24 +433,21 @@ Node* InterpreterAssembler::StoreContextSlot(Node* context, Node* slot_index,
}
Node* InterpreterAssembler::LoadTypeFeedbackVector() {
- Node* function = Load(
- MachineType::AnyTagged(), RegisterFileRawPointer(),
- IntPtrConstant(InterpreterFrameConstants::kFunctionFromRegisterPointer));
- Node* shared_info =
- LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset);
+ Node* function = LoadRegister(Register::function_closure());
+ Node* literals = LoadObjectField(function, JSFunction::kLiteralsOffset);
Node* vector =
- LoadObjectField(shared_info, SharedFunctionInfo::kFeedbackVectorOffset);
+ LoadObjectField(literals, LiteralsArray::kFeedbackVectorOffset);
return vector;
}
void InterpreterAssembler::CallPrologue() {
- StoreRegister(SmiTag(BytecodeOffset()),
- InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer);
+ StoreRegister(SmiTag(BytecodeOffset()), Register::bytecode_offset());
if (FLAG_debug_code && !disable_stack_check_across_call_) {
DCHECK(stack_pointer_before_call_ == nullptr);
stack_pointer_before_call_ = LoadStackPointer();
}
+ made_call_ = true;
}
void InterpreterAssembler::CallEpilogue() {
@@ -434,18 +458,164 @@ void InterpreterAssembler::CallEpilogue() {
AbortIfWordNotEqual(stack_pointer_before_call, stack_pointer_after_call,
kUnexpectedStackPointer);
}
+}
+
+Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
+ Node* first_arg, Node* arg_count,
+ Node* slot_id,
+ Node* type_feedback_vector,
+ TailCallMode tail_call_mode) {
+ // Static checks to assert it is safe to examine the type feedback element.
+ // We don't know that we have a weak cell. We might have a private symbol
+ // or an AllocationSite, but the memory is safe to examine.
+ // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
+ // FixedArray.
+ // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
+ // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
+ // computed, meaning that it can't appear to be a pointer. If the low bit is
+ // 0, then hash is computed, but the 0 bit prevents the field from appearing
+ // to be a pointer.
+ STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
+ STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
+ WeakCell::kValueOffset &&
+ WeakCell::kValueOffset == Symbol::kHashFieldSlot);
+
+ Variable return_value(this, MachineRepresentation::kTagged);
+ Label handle_monomorphic(this), extra_checks(this), end(this), call(this);
+
+ // Slot id of 0 is used to indicate no typefeedback is available. Call using
+ // call builtin.
+ STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
+ Node* is_feedback_unavailable = Word32Equal(slot_id, Int32Constant(0));
+ GotoIf(is_feedback_unavailable, &call);
+
+ // The checks. First, does rdi match the recorded monomorphic target?
+ Node* feedback_element = LoadFixedArrayElement(type_feedback_vector, slot_id);
+ Node* feedback_value = LoadWeakCellValue(feedback_element);
+ Node* is_monomorphic = WordEqual(function, feedback_value);
+ BranchIf(is_monomorphic, &handle_monomorphic, &extra_checks);
+
+ Bind(&handle_monomorphic);
+ {
+ // The compare above could have been a SMI/SMI comparison. Guard against
+ // this convincing us that we have a monomorphic JSFunction.
+ Node* is_smi = WordIsSmi(function);
+ GotoIf(is_smi, &extra_checks);
+
+ // Increment the call count.
+ Node* call_count_slot = IntPtrAdd(slot_id, IntPtrConstant(1));
+ Node* call_count =
+ LoadFixedArrayElement(type_feedback_vector, call_count_slot);
+ Node* new_count = SmiAdd(call_count, SmiTag(Int32Constant(1)));
+ // Count is Smi, so we don't need a write barrier.
+ StoreFixedArrayElement(type_feedback_vector, call_count_slot, new_count,
+ SKIP_WRITE_BARRIER);
+
+ // Call using call function builtin.
+ Callable callable = CodeFactory::InterpreterPushArgsAndCall(
+ isolate(), tail_call_mode, CallableType::kJSFunction);
+ Node* code_target = HeapConstant(callable.code());
+ Node* ret_value = CallStub(callable.descriptor(), code_target, context,
+ arg_count, first_arg, function);
+ return_value.Bind(ret_value);
+ Goto(&end);
+ }
+
+ Bind(&extra_checks);
+ {
+ Label check_initialized(this, Label::kDeferred), mark_megamorphic(this);
+ // Check if it is a megamorphic target
+ Node* is_megamorphic = WordEqual(
+ feedback_element,
+ HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+ BranchIf(is_megamorphic, &call, &check_initialized);
+
+ Bind(&check_initialized);
+ {
+ Label possibly_monomorphic(this);
+ // Check if it is uninitialized.
+ Node* is_uninitialized = WordEqual(
+ feedback_element,
+ HeapConstant(TypeFeedbackVector::UninitializedSentinel(isolate())));
+ GotoUnless(is_uninitialized, &mark_megamorphic);
+
+ Node* is_smi = WordIsSmi(function);
+ GotoIf(is_smi, &mark_megamorphic);
+
+ // Check if function is an object of JSFunction type
+ Node* instance_type = LoadInstanceType(function);
+ Node* is_js_function =
+ WordEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE));
+ GotoUnless(is_js_function, &mark_megamorphic);
+
+ // Check that it is not the Array() function.
+ Node* context_slot =
+ LoadFixedArrayElement(LoadNativeContext(context),
+ Int32Constant(Context::ARRAY_FUNCTION_INDEX));
+ Node* is_array_function = WordEqual(context_slot, function);
+ GotoIf(is_array_function, &mark_megamorphic);
+
+ // Check if the function belongs to the same native context
+ Node* native_context = LoadNativeContext(
+ LoadObjectField(function, JSFunction::kContextOffset));
+ Node* is_same_native_context =
+ WordEqual(native_context, LoadNativeContext(context));
+ GotoUnless(is_same_native_context, &mark_megamorphic);
+
+ // Initialize it to a monomorphic target.
+ Node* call_count_slot = IntPtrAdd(slot_id, IntPtrConstant(1));
+ // Count is Smi, so we don't need a write barrier.
+ StoreFixedArrayElement(type_feedback_vector, call_count_slot,
+ SmiTag(Int32Constant(1)), SKIP_WRITE_BARRIER);
+
+ CreateWeakCellInFeedbackVector(type_feedback_vector, SmiTag(slot_id),
+ function);
+
+ // Call using call function builtin.
+ Callable callable = CodeFactory::InterpreterPushArgsAndCall(
+ isolate(), tail_call_mode, CallableType::kJSFunction);
+ Node* code_target = HeapConstant(callable.code());
+ Node* ret_value = CallStub(callable.descriptor(), code_target, context,
+ arg_count, first_arg, function);
+ return_value.Bind(ret_value);
+ Goto(&end);
+ }
+
+ Bind(&mark_megamorphic);
+ {
+ // Mark it as a megamorphic.
+ // MegamorphicSentinel is created as a part of Heap::InitialObjects
+ // and will not move during a GC. So it is safe to skip write barrier.
+ DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
+ StoreFixedArrayElement(
+ type_feedback_vector, slot_id,
+ HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())),
+ SKIP_WRITE_BARRIER);
+ Goto(&call);
+ }
+ }
+
+ Bind(&call);
+ {
+ // Call using call builtin.
+ Callable callable_call = CodeFactory::InterpreterPushArgsAndCall(
+ isolate(), tail_call_mode, CallableType::kAny);
+ Node* code_target_call = HeapConstant(callable_call.code());
+ Node* ret_value = CallStub(callable_call.descriptor(), code_target_call,
+ context, arg_count, first_arg, function);
+ return_value.Bind(ret_value);
+ Goto(&end);
+ }
- // Restore bytecode array from stack frame in case the debugger has swapped us
- // to the patched debugger bytecode array.
- bytecode_array_.Bind(LoadRegister(
- InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
+ Bind(&end);
+ return return_value.value();
}
Node* InterpreterAssembler::CallJS(Node* function, Node* context,
Node* first_arg, Node* arg_count,
TailCallMode tail_call_mode) {
- Callable callable =
- CodeFactory::InterpreterPushArgsAndCall(isolate(), tail_call_mode);
+ Callable callable = CodeFactory::InterpreterPushArgsAndCall(
+ isolate(), tail_call_mode, CallableType::kAny);
Node* code_target = HeapConstant(callable.code());
return CallStub(callable.descriptor(), code_target, context, arg_count,
first_arg, function);
@@ -481,53 +651,64 @@ Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
}
void InterpreterAssembler::UpdateInterruptBudget(Node* weight) {
- CodeStubAssembler::Label ok(this);
- CodeStubAssembler::Label interrupt_check(this);
- CodeStubAssembler::Label end(this);
+ Label ok(this), interrupt_check(this, Label::kDeferred), end(this);
Node* budget_offset =
IntPtrConstant(BytecodeArray::kInterruptBudgetOffset - kHeapObjectTag);
// Update budget by |weight| and check if it reaches zero.
+ Variable new_budget(this, MachineRepresentation::kWord32);
Node* old_budget =
Load(MachineType::Int32(), BytecodeArrayTaggedPointer(), budget_offset);
- Node* new_budget = Int32Add(old_budget, weight);
- Node* condition = Int32GreaterThanOrEqual(new_budget, Int32Constant(0));
+ new_budget.Bind(Int32Add(old_budget, weight));
+ Node* condition =
+ Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0));
Branch(condition, &ok, &interrupt_check);
// Perform interrupt and reset budget.
Bind(&interrupt_check);
- CallRuntime(Runtime::kInterrupt, GetContext());
- StoreNoWriteBarrier(MachineRepresentation::kWord32,
- BytecodeArrayTaggedPointer(), budget_offset,
- Int32Constant(Interpreter::InterruptBudget()));
- Goto(&end);
+ {
+ CallRuntime(Runtime::kInterrupt, GetContext());
+ new_budget.Bind(Int32Constant(Interpreter::InterruptBudget()));
+ Goto(&ok);
+ }
// Update budget.
Bind(&ok);
StoreNoWriteBarrier(MachineRepresentation::kWord32,
- BytecodeArrayTaggedPointer(), budget_offset, new_budget);
- Goto(&end);
- Bind(&end);
+ BytecodeArrayTaggedPointer(), budget_offset,
+ new_budget.value());
+}
+
+Node* InterpreterAssembler::Advance() {
+ return Advance(Bytecodes::Size(bytecode_, operand_scale_));
}
Node* InterpreterAssembler::Advance(int delta) {
- return IntPtrAdd(BytecodeOffset(), IntPtrConstant(delta));
+ return Advance(IntPtrConstant(delta));
}
Node* InterpreterAssembler::Advance(Node* delta) {
- return IntPtrAdd(BytecodeOffset(), delta);
+ if (FLAG_trace_ignition) {
+ TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
+ }
+ Node* next_offset = IntPtrAdd(BytecodeOffset(), delta);
+ bytecode_offset_.Bind(next_offset);
+ return next_offset;
}
-void InterpreterAssembler::Jump(Node* delta) {
+Node* InterpreterAssembler::Jump(Node* delta) {
+ DCHECK(!Bytecodes::IsStarLookahead(bytecode_, operand_scale_));
+
UpdateInterruptBudget(delta);
- DispatchTo(Advance(delta));
+ Node* new_bytecode_offset = Advance(delta);
+ Node* target_bytecode = LoadBytecode(new_bytecode_offset);
+ return DispatchToBytecode(target_bytecode, new_bytecode_offset);
}
void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
- CodeStubAssembler::Label match(this);
- CodeStubAssembler::Label no_match(this);
+ Label match(this), no_match(this);
- Branch(condition, &match, &no_match);
+ BranchIf(condition, &match, &no_match);
Bind(&match);
Jump(delta);
Bind(&no_match);
@@ -543,37 +724,90 @@ void InterpreterAssembler::JumpIfWordNotEqual(Node* lhs, Node* rhs,
JumpConditional(WordNotEqual(lhs, rhs), delta);
}
-void InterpreterAssembler::Dispatch() {
- DispatchTo(Advance(Bytecodes::Size(bytecode_, operand_scale_)));
+Node* InterpreterAssembler::LoadBytecode(compiler::Node* bytecode_offset) {
+ Node* bytecode =
+ Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(), bytecode_offset);
+ if (kPointerSize == 8) {
+ bytecode = ChangeUint32ToUint64(bytecode);
+ }
+ return bytecode;
}
-void InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
- Node* target_bytecode = Load(
- MachineType::Uint8(), BytecodeArrayTaggedPointer(), new_bytecode_offset);
- if (kPointerSize == 8) {
- target_bytecode = ChangeUint32ToUint64(target_bytecode);
+Node* InterpreterAssembler::StarDispatchLookahead(Node* target_bytecode) {
+ Label do_inline_star(this), done(this);
+
+ Variable var_bytecode(this, MachineRepresentation::kWord8);
+ var_bytecode.Bind(target_bytecode);
+
+ Node* star_bytecode = IntPtrConstant(static_cast<int>(Bytecode::kStar));
+ Node* is_star = WordEqual(target_bytecode, star_bytecode);
+ BranchIf(is_star, &do_inline_star, &done);
+
+ Bind(&do_inline_star);
+ {
+ InlineStar();
+ var_bytecode.Bind(LoadBytecode(BytecodeOffset()));
+ Goto(&done);
}
+ Bind(&done);
+ return var_bytecode.value();
+}
+
+void InterpreterAssembler::InlineStar() {
+ Bytecode previous_bytecode = bytecode_;
+ AccumulatorUse previous_acc_use = accumulator_use_;
- // TODO(rmcilroy): Create a code target dispatch table to avoid conversion
- // from code object on every dispatch.
- Node* target_code_object =
+ bytecode_ = Bytecode::kStar;
+ accumulator_use_ = AccumulatorUse::kNone;
+
+ if (FLAG_trace_ignition) {
+ TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
+ }
+ StoreRegister(GetAccumulator(), BytecodeOperandReg(0));
+
+ DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
+
+ Advance();
+ bytecode_ = previous_bytecode;
+ accumulator_use_ = previous_acc_use;
+}
+
+Node* InterpreterAssembler::Dispatch() {
+ Node* target_offset = Advance();
+ Node* target_bytecode = LoadBytecode(target_offset);
+
+ if (Bytecodes::IsStarLookahead(bytecode_, operand_scale_)) {
+ target_bytecode = StarDispatchLookahead(target_bytecode);
+ }
+ return DispatchToBytecode(target_bytecode, BytecodeOffset());
+}
+
+Node* InterpreterAssembler::DispatchToBytecode(Node* target_bytecode,
+ Node* new_bytecode_offset) {
+ if (FLAG_trace_ignition_dispatches) {
+ TraceBytecodeDispatch(target_bytecode);
+ }
+
+ Node* target_code_entry =
Load(MachineType::Pointer(), DispatchTableRawPointer(),
WordShl(target_bytecode, IntPtrConstant(kPointerSizeLog2)));
- DispatchToBytecodeHandler(target_code_object, new_bytecode_offset);
+ return DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset);
}
-void InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
- Node* bytecode_offset) {
- if (FLAG_trace_ignition) {
- TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
- }
+Node* InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
+ Node* bytecode_offset) {
+ Node* handler_entry =
+ IntPtrAdd(handler, IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
+ return DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset);
+}
+Node* InterpreterAssembler::DispatchToBytecodeHandlerEntry(
+ Node* handler_entry, Node* bytecode_offset) {
InterpreterDispatchDescriptor descriptor(isolate());
- Node* args[] = {GetAccumulatorUnchecked(), RegisterFileRawPointer(),
- bytecode_offset, BytecodeArrayTaggedPointer(),
- DispatchTableRawPointer(), GetContext()};
- TailCall(descriptor, handler, args, 0);
+ Node* args[] = {GetAccumulatorUnchecked(), bytecode_offset,
+ BytecodeArrayTaggedPointer(), DispatchTableRawPointer()};
+ return TailCallBytecodeDispatch(descriptor, handler_entry, args);
}
void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
@@ -585,11 +819,12 @@ void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
// Indices 256-511 correspond to bytecodes with operand_scale == 1
// Indices 512-767 correspond to bytecodes with operand_scale == 2
Node* next_bytecode_offset = Advance(1);
- Node* next_bytecode = Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
- next_bytecode_offset);
- if (kPointerSize == 8) {
- next_bytecode = ChangeUint32ToUint64(next_bytecode);
+ Node* next_bytecode = LoadBytecode(next_bytecode_offset);
+
+ if (FLAG_trace_ignition_dispatches) {
+ TraceBytecodeDispatch(next_bytecode);
}
+
Node* base_index;
switch (operand_scale) {
case OperandScale::kDouble:
@@ -603,14 +838,75 @@ void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
base_index = nullptr;
}
Node* target_index = IntPtrAdd(base_index, next_bytecode);
- Node* target_code_object =
+ Node* target_code_entry =
Load(MachineType::Pointer(), DispatchTableRawPointer(),
WordShl(target_index, kPointerSizeLog2));
- DispatchToBytecodeHandler(target_code_object, next_bytecode_offset);
+ DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset);
+}
+
+Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
+ Node* context, Node* value, Variable* var_type_feedback) {
+ // We might need to loop once due to ToNumber conversion.
+ Variable var_value(this, MachineRepresentation::kTagged),
+ var_result(this, MachineRepresentation::kWord32);
+ Variable* loop_vars[] = {&var_value, var_type_feedback};
+ Label loop(this, 2, loop_vars), done_loop(this, &var_result);
+ var_value.Bind(value);
+ var_type_feedback->Bind(Int32Constant(BinaryOperationFeedback::kNone));
+ Goto(&loop);
+ Bind(&loop);
+ {
+ // Load the current {value}.
+ value = var_value.value();
+
+ // Check if the {value} is a Smi or a HeapObject.
+ Label if_valueissmi(this), if_valueisnotsmi(this);
+ Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+
+ Bind(&if_valueissmi);
+ {
+ // Convert the Smi {value}.
+ var_result.Bind(SmiToWord32(value));
+ var_type_feedback->Bind(
+ Word32Or(var_type_feedback->value(),
+ Int32Constant(BinaryOperationFeedback::kSignedSmall)));
+ Goto(&done_loop);
+ }
+
+ Bind(&if_valueisnotsmi);
+ {
+ // Check if {value} is a HeapNumber.
+ Label if_valueisheapnumber(this),
+ if_valueisnotheapnumber(this, Label::kDeferred);
+ Branch(WordEqual(LoadMap(value), HeapNumberMapConstant()),
+ &if_valueisheapnumber, &if_valueisnotheapnumber);
+
+ Bind(&if_valueisheapnumber);
+ {
+ // Truncate the floating point value.
+ var_result.Bind(TruncateHeapNumberValueToWord32(value));
+ var_type_feedback->Bind(
+ Word32Or(var_type_feedback->value(),
+ Int32Constant(BinaryOperationFeedback::kNumber)));
+ Goto(&done_loop);
+ }
+
+ Bind(&if_valueisnotheapnumber);
+ {
+ // Convert the {value} to a Number first.
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_value.Bind(CallStub(callable, context, value));
+ var_type_feedback->Bind(Int32Constant(BinaryOperationFeedback::kAny));
+ Goto(&loop);
+ }
+ }
+ }
+ Bind(&done_loop);
+ return var_result.value();
}
-void InterpreterAssembler::InterpreterReturn() {
+void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
// TODO(rmcilroy): Investigate whether it is worth supporting self
// optimization of primitive functions like FullCodegen.
@@ -620,29 +916,20 @@ void InterpreterAssembler::InterpreterReturn() {
Int32Sub(Int32Constant(kHeapObjectTag + BytecodeArray::kHeaderSize),
BytecodeOffset());
UpdateInterruptBudget(profiling_weight);
-
- Node* exit_trampoline_code_object =
- HeapConstant(isolate()->builtins()->InterpreterExitTrampoline());
- DispatchToBytecodeHandler(exit_trampoline_code_object);
}
-void InterpreterAssembler::StackCheck() {
- CodeStubAssembler::Label end(this);
- CodeStubAssembler::Label ok(this);
- CodeStubAssembler::Label stack_guard(this);
-
+Node* InterpreterAssembler::StackCheckTriggeredInterrupt() {
Node* sp = LoadStackPointer();
Node* stack_limit = Load(
MachineType::Pointer(),
ExternalConstant(ExternalReference::address_of_stack_limit(isolate())));
- Node* condition = UintPtrGreaterThanOrEqual(sp, stack_limit);
- Branch(condition, &ok, &stack_guard);
- Bind(&stack_guard);
- CallRuntime(Runtime::kStackGuard, GetContext());
- Goto(&end);
- Bind(&ok);
- Goto(&end);
- Bind(&end);
+ return UintPtrLessThan(sp, stack_limit);
+}
+
+Node* InterpreterAssembler::LoadOSRNestingLevel() {
+ Node* offset =
+ IntPtrConstant(BytecodeArray::kOSRNestingLevelOffset - kHeapObjectTag);
+ return Load(MachineType::Int8(), BytecodeArrayTaggedPointer(), offset);
}
void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
@@ -654,18 +941,14 @@ void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
BailoutReason bailout_reason) {
- CodeStubAssembler::Label match(this);
- CodeStubAssembler::Label no_match(this);
- CodeStubAssembler::Label end(this);
+ Label ok(this), abort(this, Label::kDeferred);
+ BranchIfWordEqual(lhs, rhs, &ok, &abort);
- Node* condition = WordEqual(lhs, rhs);
- Branch(condition, &match, &no_match);
- Bind(&no_match);
+ Bind(&abort);
Abort(bailout_reason);
- Goto(&end);
- Bind(&match);
- Goto(&end);
- Bind(&end);
+ Goto(&ok);
+
+ Bind(&ok);
}
void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
@@ -673,20 +956,126 @@ void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
SmiTag(BytecodeOffset()), GetAccumulatorUnchecked());
}
+void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
+ Node* counters_table = ExternalConstant(
+ ExternalReference::interpreter_dispatch_counters(isolate()));
+ Node* source_bytecode_table_index = IntPtrConstant(
+ static_cast<int>(bytecode_) * (static_cast<int>(Bytecode::kLast) + 1));
+
+ Node* counter_offset =
+ WordShl(IntPtrAdd(source_bytecode_table_index, target_bytecode),
+ IntPtrConstant(kPointerSizeLog2));
+ Node* old_counter =
+ Load(MachineType::IntPtr(), counters_table, counter_offset);
+
+ Label counter_ok(this), counter_saturated(this, Label::kDeferred);
+
+ Node* counter_reached_max = WordEqual(
+ old_counter, IntPtrConstant(std::numeric_limits<uintptr_t>::max()));
+ BranchIf(counter_reached_max, &counter_saturated, &counter_ok);
+
+ Bind(&counter_ok);
+ {
+ Node* new_counter = IntPtrAdd(old_counter, IntPtrConstant(1));
+ StoreNoWriteBarrier(MachineType::PointerRepresentation(), counters_table,
+ counter_offset, new_counter);
+ Goto(&counter_saturated);
+ }
+
+ Bind(&counter_saturated);
+}
+
// static
bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
return false;
-#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
- return CpuFeatures::IsSupported(UNALIGNED_ACCESSES);
#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_X87 || \
- V8_TARGET_ARCH_S390
+ V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || \
+ V8_TARGET_ARCH_PPC
return true;
#else
#error "Unknown Architecture"
#endif
}
+Node* InterpreterAssembler::RegisterCount() {
+ Node* bytecode_array = LoadRegister(Register::bytecode_array());
+ Node* frame_size = LoadObjectField(
+ bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Int32());
+ return Word32Sar(frame_size, Int32Constant(kPointerSizeLog2));
+}
+
+Node* InterpreterAssembler::ExportRegisterFile(Node* array) {
+ if (FLAG_debug_code) {
+ Node* array_size = LoadAndUntagFixedArrayBaseLength(array);
+ AbortIfWordNotEqual(
+ array_size, RegisterCount(), kInvalidRegisterFileInGenerator);
+ }
+
+ Variable var_index(this, MachineRepresentation::kWord32);
+ var_index.Bind(Int32Constant(0));
+
+ // Iterate over register file and write values into array.
+ // The mapping of register to array index must match that used in
+ // BytecodeGraphBuilder::VisitResumeGenerator.
+ Label loop(this, &var_index), done_loop(this);
+ Goto(&loop);
+ Bind(&loop);
+ {
+ Node* index = var_index.value();
+ Node* condition = Int32LessThan(index, RegisterCount());
+ GotoUnless(condition, &done_loop);
+
+ Node* reg_index =
+ Int32Sub(Int32Constant(Register(0).ToOperand()), index);
+ Node* value = LoadRegister(ChangeInt32ToIntPtr(reg_index));
+
+ StoreFixedArrayElement(array, index, value);
+
+ var_index.Bind(Int32Add(index, Int32Constant(1)));
+ Goto(&loop);
+ }
+ Bind(&done_loop);
+
+ return array;
+}
+
+Node* InterpreterAssembler::ImportRegisterFile(Node* array) {
+ if (FLAG_debug_code) {
+ Node* array_size = LoadAndUntagFixedArrayBaseLength(array);
+ AbortIfWordNotEqual(
+ array_size, RegisterCount(), kInvalidRegisterFileInGenerator);
+ }
+
+ Variable var_index(this, MachineRepresentation::kWord32);
+ var_index.Bind(Int32Constant(0));
+
+ // Iterate over array and write values into register file. Also erase the
+ // array contents to not keep them alive artificially.
+ Label loop(this, &var_index), done_loop(this);
+ Goto(&loop);
+ Bind(&loop);
+ {
+ Node* index = var_index.value();
+ Node* condition = Int32LessThan(index, RegisterCount());
+ GotoUnless(condition, &done_loop);
+
+ Node* value = LoadFixedArrayElement(array, index);
+
+ Node* reg_index =
+ Int32Sub(Int32Constant(Register(0).ToOperand()), index);
+ StoreRegister(value, ChangeInt32ToIntPtr(reg_index));
+
+ StoreFixedArrayElement(array, index, StaleRegisterConstant());
+
+ var_index.Bind(Int32Add(index, Int32Constant(1)));
+ Goto(&loop);
+ }
+ Bind(&done_loop);
+
+ return array;
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index 86ecea54dd..b3fa42fbf6 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -6,10 +6,10 @@
#define V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_
#include "src/allocation.h"
-#include "src/base/smart-pointers.h"
-#include "src/builtins.h"
-#include "src/compiler/code-stub-assembler.h"
+#include "src/builtins/builtins.h"
+#include "src/code-stub-assembler.h"
#include "src/frames.h"
+#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
#include "src/runtime/runtime.h"
@@ -17,7 +17,7 @@ namespace v8 {
namespace internal {
namespace interpreter {
-class InterpreterAssembler : public compiler::CodeStubAssembler {
+class InterpreterAssembler : public CodeStubAssembler {
public:
InterpreterAssembler(Isolate* isolate, Zone* zone, Bytecode bytecode,
OperandScale operand_scale);
@@ -41,6 +41,9 @@ class InterpreterAssembler : public compiler::CodeStubAssembler {
// Returns the runtime id immediate for bytecode operand
// |operand_index| in the current bytecode.
compiler::Node* BytecodeOperandRuntimeId(int operand_index);
+ // Returns the intrinsic id immediate for bytecode operand
+ // |operand_index| in the current bytecode.
+ compiler::Node* BytecodeOperandIntrinsicId(int operand_index);
// Accumulator.
compiler::Node* GetAccumulator();
@@ -50,11 +53,16 @@ class InterpreterAssembler : public compiler::CodeStubAssembler {
compiler::Node* GetContext();
void SetContext(compiler::Node* value);
+ // Number of registers.
+ compiler::Node* RegisterCount();
+
+ // Backup/restore register file to/from a fixed array of the correct length.
+ compiler::Node* ExportRegisterFile(compiler::Node* array);
+ compiler::Node* ImportRegisterFile(compiler::Node* array);
+
// Loads from and stores to the interpreter register file.
- compiler::Node* LoadRegister(int offset);
compiler::Node* LoadRegister(Register reg);
compiler::Node* LoadRegister(compiler::Node* reg_index);
- compiler::Node* StoreRegister(compiler::Node* value, int offset);
compiler::Node* StoreRegister(compiler::Node* value, Register reg);
compiler::Node* StoreRegister(compiler::Node* value,
compiler::Node* reg_index);
@@ -69,8 +77,8 @@ class InterpreterAssembler : public compiler::CodeStubAssembler {
// Load constant at |index| in the constant pool.
compiler::Node* LoadConstantPoolEntry(compiler::Node* index);
- // Load a field from an object on the heap.
- compiler::Node* LoadObjectField(compiler::Node* object, int offset);
+ // Load and untag constant at |index| in the constant pool.
+ compiler::Node* LoadAndUntagConstantPoolEntry(compiler::Node* index);
// Load |slot_index| from |context|.
compiler::Node* LoadContextSlot(compiler::Node* context, int slot_index);
@@ -86,6 +94,18 @@ class InterpreterAssembler : public compiler::CodeStubAssembler {
// Call JSFunction or Callable |function| with |arg_count|
// arguments (not including receiver) and the first argument
+ // located at |first_arg|. Type feedback is collected in the
+ // slot at index |slot_id|.
+ compiler::Node* CallJSWithFeedback(compiler::Node* function,
+ compiler::Node* context,
+ compiler::Node* first_arg,
+ compiler::Node* arg_count,
+ compiler::Node* slot_id,
+ compiler::Node* type_feedback_vector,
+ TailCallMode tail_call_mode);
+
+ // Call JSFunction or Callable |function| with |arg_count|
+ // arguments (not including receiver) and the first argument
// located at |first_arg|.
compiler::Node* CallJS(compiler::Node* function, compiler::Node* context,
compiler::Node* first_arg, compiler::Node* arg_count,
@@ -110,12 +130,7 @@ class InterpreterAssembler : public compiler::CodeStubAssembler {
compiler::Node* arg_count, int return_size = 1);
// Jump relative to the current bytecode by |jump_offset|.
- void Jump(compiler::Node* jump_offset);
-
- // Jump relative to the current bytecode by |jump_offset| if the
- // |condition| is true. Helper function for JumpIfWordEqual and
- // JumpIfWordNotEqual.
- void JumpConditional(compiler::Node* condition, compiler::Node* jump_offset);
+ compiler::Node* Jump(compiler::Node* jump_offset);
// Jump relative to the current bytecode by |jump_offset| if the
// word values |lhs| and |rhs| are equal.
@@ -127,39 +142,48 @@ class InterpreterAssembler : public compiler::CodeStubAssembler {
void JumpIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
compiler::Node* jump_offset);
- // Perform a stack guard check.
- void StackCheck();
+ // Returns true if the stack guard check triggers an interrupt.
+ compiler::Node* StackCheckTriggeredInterrupt();
+
+ // Updates the profiler interrupt budget for a return.
+ void UpdateInterruptBudgetOnReturn();
- // Returns from the function.
- void InterpreterReturn();
+ // Returns the OSR nesting level from the bytecode header.
+ compiler::Node* LoadOSRNestingLevel();
// Dispatch to the bytecode.
- void Dispatch();
+ compiler::Node* Dispatch();
// Dispatch to bytecode handler.
- void DispatchToBytecodeHandler(compiler::Node* handler,
- compiler::Node* bytecode_offset);
- void DispatchToBytecodeHandler(compiler::Node* handler) {
- DispatchToBytecodeHandler(handler, BytecodeOffset());
+ compiler::Node* DispatchToBytecodeHandler(compiler::Node* handler) {
+ return DispatchToBytecodeHandler(handler, BytecodeOffset());
}
// Dispatch bytecode as wide operand variant.
void DispatchWide(OperandScale operand_scale);
+ // Truncate tagged |value| to word32 and store the type feedback in
+ // |var_type_feedback|.
+ compiler::Node* TruncateTaggedToWord32WithFeedback(
+ compiler::Node* context, compiler::Node* value,
+ Variable* var_type_feedback);
+
// Abort with the given bailout reason.
void Abort(BailoutReason bailout_reason);
+ void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
+ BailoutReason bailout_reason);
+
+ // Returns the offset from the BytecodeArrayPointer of the current bytecode.
+ compiler::Node* BytecodeOffset();
protected:
Bytecode bytecode() const { return bytecode_; }
static bool TargetSupportsUnalignedAccess();
private:
- // Returns a raw pointer to start of the register file on the stack.
- compiler::Node* RegisterFileRawPointer();
// Returns a tagged pointer to the current function's BytecodeArray object.
compiler::Node* BytecodeArrayTaggedPointer();
- // Returns the offset from the BytecodeArrayPointer of the current bytecode.
- compiler::Node* BytecodeOffset();
+
// Returns a raw pointer to first entry in the interpreter dispatch table.
compiler::Node* DispatchTableRawPointer();
@@ -168,11 +192,18 @@ class InterpreterAssembler : public compiler::CodeStubAssembler {
// tracing as these need to bypass accumulator use validity checks.
compiler::Node* GetAccumulatorUnchecked();
+ // Returns the frame pointer for the interpreted frame of the function being
+ // interpreted.
+ compiler::Node* GetInterpretedFramePointer();
+
// Saves and restores interpreter bytecode offset to the interpreter stack
// frame when performing a call.
void CallPrologue() override;
void CallEpilogue() override;
+ // Increment the dispatch counter for the (current, next) bytecode pair.
+ void TraceBytecodeDispatch(compiler::Node* target_index);
+
// Traces the current bytecode by calling |function_id|.
void TraceBytecode(Runtime::FunctionId function_id);
@@ -206,26 +237,53 @@ class InterpreterAssembler : public compiler::CodeStubAssembler {
compiler::Node* BytecodeUnsignedOperand(int operand_index,
OperandSize operand_size);
- // Returns BytecodeOffset() advanced by delta bytecodes. Note: this does not
- // update BytecodeOffset() itself.
+ // Jump relative to the current bytecode by |jump_offset| if the
+ // |condition| is true. Helper function for JumpIfWordEqual and
+ // JumpIfWordNotEqual.
+ void JumpConditional(compiler::Node* condition, compiler::Node* jump_offset);
+
+ // Updates and returns BytecodeOffset() advanced by the current bytecode's
+ // size. Traces the exit of the current bytecode.
+ compiler::Node* Advance();
+
+ // Updates and returns BytecodeOffset() advanced by delta bytecodes.
+ // Traces the exit of the current bytecode.
compiler::Node* Advance(int delta);
compiler::Node* Advance(compiler::Node* delta);
- // Starts next instruction dispatch at |new_bytecode_offset|.
- void DispatchTo(compiler::Node* new_bytecode_offset);
+ // Load the bytecode at |bytecode_offset|.
+ compiler::Node* LoadBytecode(compiler::Node* bytecode_offset);
- // Abort operations for debug code.
- void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
- BailoutReason bailout_reason);
+ // Look ahead for Star and inline it in a branch. Returns a new target
+ // bytecode node for dispatch.
+ compiler::Node* StarDispatchLookahead(compiler::Node* target_bytecode);
+
+ // Build code for Star at the current BytecodeOffset() and Advance() to the
+ // next dispatch offset.
+ void InlineStar();
+
+ // Dispatch to |target_bytecode| at |new_bytecode_offset|.
+ // |target_bytecode| should be equivalent to loading from the offset.
+ compiler::Node* DispatchToBytecode(compiler::Node* target_bytecode,
+ compiler::Node* new_bytecode_offset);
+
+ // Dispatch to the bytecode handler with code offset |handler|.
+ compiler::Node* DispatchToBytecodeHandler(compiler::Node* handler,
+ compiler::Node* bytecode_offset);
+
+ // Dispatch to the bytecode handler with code entry point |handler_entry|.
+ compiler::Node* DispatchToBytecodeHandlerEntry(
+ compiler::Node* handler_entry, compiler::Node* bytecode_offset);
OperandScale operand_scale() const { return operand_scale_; }
Bytecode bytecode_;
OperandScale operand_scale_;
+ CodeStubAssembler::Variable bytecode_offset_;
+ CodeStubAssembler::Variable interpreted_frame_pointer_;
CodeStubAssembler::Variable accumulator_;
AccumulatorUse accumulator_use_;
- CodeStubAssembler::Variable context_;
- CodeStubAssembler::Variable bytecode_array_;
+ bool made_call_;
bool disable_stack_check_across_call_;
compiler::Node* stack_pointer_before_call_;
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.cc b/deps/v8/src/interpreter/interpreter-intrinsics.cc
index 6d9917de4f..600b9c086f 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics.cc
@@ -4,6 +4,8 @@
#include "src/interpreter/interpreter-intrinsics.h"
+#include "src/code-factory.h"
+
namespace v8 {
namespace internal {
namespace interpreter {
@@ -13,8 +15,11 @@ using compiler::Node;
#define __ assembler_->
IntrinsicsHelper::IntrinsicsHelper(InterpreterAssembler* assembler)
- : assembler_(assembler) {}
+ : isolate_(assembler->isolate()),
+ zone_(assembler->zone()),
+ assembler_(assembler) {}
+// static
bool IntrinsicsHelper::IsSupported(Runtime::FunctionId function_id) {
switch (function_id) {
#define SUPPORTED(name, lower_case, count) case Runtime::kInline##name:
@@ -26,6 +31,36 @@ bool IntrinsicsHelper::IsSupported(Runtime::FunctionId function_id) {
}
}
+// static
+IntrinsicsHelper::IntrinsicId IntrinsicsHelper::FromRuntimeId(
+ Runtime::FunctionId function_id) {
+ switch (function_id) {
+#define TO_RUNTIME_ID(name, lower_case, count) \
+ case Runtime::kInline##name: \
+ return IntrinsicId::k##name;
+ INTRINSICS_LIST(TO_RUNTIME_ID)
+#undef TO_RUNTIME_ID
+ default:
+ UNREACHABLE();
+ return static_cast<IntrinsicsHelper::IntrinsicId>(-1);
+ }
+}
+
+// static
+Runtime::FunctionId IntrinsicsHelper::ToRuntimeId(
+ IntrinsicsHelper::IntrinsicId intrinsic_id) {
+ switch (intrinsic_id) {
+#define TO_INTRINSIC_ID(name, lower_case, count) \
+ case IntrinsicId::k##name: \
+ return Runtime::kInline##name;
+ INTRINSICS_LIST(TO_INTRINSIC_ID)
+#undef TO_INTRINSIC_ID
+ default:
+ UNREACHABLE();
+ return static_cast<Runtime::FunctionId>(-1);
+ }
+}
+
Node* IntrinsicsHelper::InvokeIntrinsic(Node* function_id, Node* context,
Node* first_arg_reg, Node* arg_count) {
InterpreterAssembler::Label abort(assembler_), end(assembler_);
@@ -42,116 +77,332 @@ Node* IntrinsicsHelper::InvokeIntrinsic(Node* function_id, Node* context,
#undef LABEL_POINTER
#define CASE(name, lower_case, count) \
- static_cast<int32_t>(Runtime::kInline##name),
+ static_cast<int32_t>(IntrinsicId::k##name),
int32_t cases[] = {INTRINSICS_LIST(CASE)};
#undef CASE
__ Switch(function_id, &abort, cases, labels, arraysize(cases));
#define HANDLE_CASE(name, lower_case, expected_arg_count) \
__ Bind(&lower_case); \
- if (FLAG_debug_code) { \
+ if (FLAG_debug_code && expected_arg_count >= 0) { \
AbortIfArgCountMismatch(expected_arg_count, arg_count); \
} \
- result.Bind(name(first_arg_reg)); \
+ result.Bind(name(first_arg_reg, arg_count, context)); \
__ Goto(&end);
INTRINSICS_LIST(HANDLE_CASE)
#undef HANDLE_CASE
__ Bind(&abort);
- __ Abort(BailoutReason::kUnexpectedFunctionIDForInvokeIntrinsic);
- result.Bind(__ UndefinedConstant());
- __ Goto(&end);
+ {
+ __ Abort(BailoutReason::kUnexpectedFunctionIDForInvokeIntrinsic);
+ result.Bind(__ UndefinedConstant());
+ __ Goto(&end);
+ }
__ Bind(&end);
return result.value();
}
-Node* IntrinsicsHelper::CompareInstanceType(Node* map, int type,
+Node* IntrinsicsHelper::CompareInstanceType(Node* object, int type,
InstanceTypeCompareMode mode) {
InterpreterAssembler::Variable return_value(assembler_,
MachineRepresentation::kTagged);
- Node* instance_type = __ LoadInstanceType(map);
+ Node* instance_type = __ LoadInstanceType(object);
InterpreterAssembler::Label if_true(assembler_), if_false(assembler_),
end(assembler_);
- Node* condition;
if (mode == kInstanceTypeEqual) {
- condition = __ Word32Equal(instance_type, __ Int32Constant(type));
+ return __ Word32Equal(instance_type, __ Int32Constant(type));
} else {
DCHECK(mode == kInstanceTypeGreaterThanOrEqual);
- condition =
- __ Int32GreaterThanOrEqual(instance_type, __ Int32Constant(type));
+ return __ Int32GreaterThanOrEqual(instance_type, __ Int32Constant(type));
}
- __ Branch(condition, &if_true, &if_false);
+}
- __ Bind(&if_true);
- return_value.Bind(__ BooleanConstant(true));
- __ Goto(&end);
+Node* IntrinsicsHelper::IsInstanceType(Node* input, int type) {
+ InterpreterAssembler::Variable return_value(assembler_,
+ MachineRepresentation::kTagged);
+ InterpreterAssembler::Label if_not_smi(assembler_), return_true(assembler_),
+ return_false(assembler_), end(assembler_);
+ Node* arg = __ LoadRegister(input);
+ __ GotoIf(__ WordIsSmi(arg), &return_false);
- __ Bind(&if_false);
- return_value.Bind(__ BooleanConstant(false));
- __ Goto(&end);
+ Node* condition = CompareInstanceType(arg, type, kInstanceTypeEqual);
+ __ Branch(condition, &return_true, &return_false);
+
+ __ Bind(&return_true);
+ {
+ return_value.Bind(__ BooleanConstant(true));
+ __ Goto(&end);
+ }
+
+ __ Bind(&return_false);
+ {
+ return_value.Bind(__ BooleanConstant(false));
+ __ Goto(&end);
+ }
__ Bind(&end);
return return_value.value();
}
-Node* IntrinsicsHelper::IsJSReceiver(Node* input) {
+Node* IntrinsicsHelper::IsJSReceiver(Node* input, Node* arg_count,
+ Node* context) {
InterpreterAssembler::Variable return_value(assembler_,
MachineRepresentation::kTagged);
-
- InterpreterAssembler::Label if_smi(assembler_), if_not_smi(assembler_),
+ InterpreterAssembler::Label return_true(assembler_), return_false(assembler_),
end(assembler_);
- Node* arg = __ LoadRegister(input);
- __ Branch(__ WordIsSmi(arg), &if_smi, &if_not_smi);
- __ Bind(&if_smi);
- return_value.Bind(__ BooleanConstant(false));
- __ Goto(&end);
+ Node* arg = __ LoadRegister(input);
+ __ GotoIf(__ WordIsSmi(arg), &return_false);
- __ Bind(&if_not_smi);
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- return_value.Bind(CompareInstanceType(arg, FIRST_JS_RECEIVER_TYPE,
- kInstanceTypeGreaterThanOrEqual));
- __ Goto(&end);
+ Node* condition = CompareInstanceType(arg, FIRST_JS_RECEIVER_TYPE,
+ kInstanceTypeGreaterThanOrEqual);
+ __ Branch(condition, &return_true, &return_false);
+
+ __ Bind(&return_true);
+ {
+ return_value.Bind(__ BooleanConstant(true));
+ __ Goto(&end);
+ }
+
+ __ Bind(&return_false);
+ {
+ return_value.Bind(__ BooleanConstant(false));
+ __ Goto(&end);
+ }
__ Bind(&end);
return return_value.value();
}
-Node* IntrinsicsHelper::IsArray(Node* input) {
+Node* IntrinsicsHelper::IsArray(Node* input, Node* arg_count, Node* context) {
+ return IsInstanceType(input, JS_ARRAY_TYPE);
+}
+
+Node* IntrinsicsHelper::IsJSProxy(Node* input, Node* arg_count, Node* context) {
+ return IsInstanceType(input, JS_PROXY_TYPE);
+}
+
+Node* IntrinsicsHelper::IsRegExp(Node* input, Node* arg_count, Node* context) {
+ return IsInstanceType(input, JS_REGEXP_TYPE);
+}
+
+Node* IntrinsicsHelper::IsTypedArray(Node* input, Node* arg_count,
+ Node* context) {
+ return IsInstanceType(input, JS_TYPED_ARRAY_TYPE);
+}
+
+Node* IntrinsicsHelper::IsSmi(Node* input, Node* arg_count, Node* context) {
InterpreterAssembler::Variable return_value(assembler_,
MachineRepresentation::kTagged);
-
InterpreterAssembler::Label if_smi(assembler_), if_not_smi(assembler_),
end(assembler_);
+
Node* arg = __ LoadRegister(input);
__ Branch(__ WordIsSmi(arg), &if_smi, &if_not_smi);
__ Bind(&if_smi);
- return_value.Bind(__ BooleanConstant(false));
- __ Goto(&end);
+ {
+ return_value.Bind(__ BooleanConstant(true));
+ __ Goto(&end);
+ }
__ Bind(&if_not_smi);
- return_value.Bind(
- CompareInstanceType(arg, JS_ARRAY_TYPE, kInstanceTypeEqual));
- __ Goto(&end);
+ {
+ return_value.Bind(__ BooleanConstant(false));
+ __ Goto(&end);
+ }
__ Bind(&end);
return return_value.value();
}
+Node* IntrinsicsHelper::IntrinsicAsStubCall(Node* args_reg, Node* context,
+ Callable const& callable) {
+ int param_count = callable.descriptor().GetParameterCount();
+ Node** args = zone()->NewArray<Node*>(param_count + 1); // 1 for context
+ for (int i = 0; i < param_count; i++) {
+ args[i] = __ LoadRegister(args_reg);
+ args_reg = __ NextRegister(args_reg);
+ }
+ args[param_count] = context;
+
+ return __ CallStubN(callable, args);
+}
+
+Node* IntrinsicsHelper::HasProperty(Node* input, Node* arg_count,
+ Node* context) {
+ return IntrinsicAsStubCall(input, context,
+ CodeFactory::HasProperty(isolate()));
+}
+
+Node* IntrinsicsHelper::NewObject(Node* input, Node* arg_count, Node* context) {
+ return IntrinsicAsStubCall(input, context,
+ CodeFactory::FastNewObject(isolate()));
+}
+
+Node* IntrinsicsHelper::NumberToString(Node* input, Node* arg_count,
+ Node* context) {
+ return IntrinsicAsStubCall(input, context,
+ CodeFactory::NumberToString(isolate()));
+}
+
+Node* IntrinsicsHelper::RegExpConstructResult(Node* input, Node* arg_count,
+ Node* context) {
+ return IntrinsicAsStubCall(input, context,
+ CodeFactory::RegExpConstructResult(isolate()));
+}
+
+Node* IntrinsicsHelper::RegExpExec(Node* input, Node* arg_count,
+ Node* context) {
+ return IntrinsicAsStubCall(input, context,
+ CodeFactory::RegExpExec(isolate()));
+}
+
+Node* IntrinsicsHelper::SubString(Node* input, Node* arg_count, Node* context) {
+ return IntrinsicAsStubCall(input, context, CodeFactory::SubString(isolate()));
+}
+
+Node* IntrinsicsHelper::ToString(Node* input, Node* arg_count, Node* context) {
+ return IntrinsicAsStubCall(input, context, CodeFactory::ToString(isolate()));
+}
+
+Node* IntrinsicsHelper::ToLength(Node* input, Node* arg_count, Node* context) {
+ return IntrinsicAsStubCall(input, context, CodeFactory::ToLength(isolate()));
+}
+
+Node* IntrinsicsHelper::ToInteger(Node* input, Node* arg_count, Node* context) {
+ return IntrinsicAsStubCall(input, context, CodeFactory::ToInteger(isolate()));
+}
+
+Node* IntrinsicsHelper::ToNumber(Node* input, Node* arg_count, Node* context) {
+ return IntrinsicAsStubCall(input, context, CodeFactory::ToNumber(isolate()));
+}
+
+Node* IntrinsicsHelper::ToObject(Node* input, Node* arg_count, Node* context) {
+ return IntrinsicAsStubCall(input, context, CodeFactory::ToObject(isolate()));
+}
+
+Node* IntrinsicsHelper::Call(Node* args_reg, Node* arg_count, Node* context) {
+ // First argument register contains the function target.
+ Node* function = __ LoadRegister(args_reg);
+
+ // Receiver is the second runtime call argument.
+ Node* receiver_reg = __ NextRegister(args_reg);
+ Node* receiver_arg = __ RegisterLocation(receiver_reg);
+
+ // Subtract function and receiver from arg count.
+ Node* function_and_receiver_count = __ Int32Constant(2);
+ Node* target_args_count = __ Int32Sub(arg_count, function_and_receiver_count);
+
+ if (FLAG_debug_code) {
+ InterpreterAssembler::Label arg_count_positive(assembler_);
+ Node* comparison = __ Int32LessThan(target_args_count, __ Int32Constant(0));
+ __ GotoUnless(comparison, &arg_count_positive);
+ __ Abort(kWrongArgumentCountForInvokeIntrinsic);
+ __ Goto(&arg_count_positive);
+ __ Bind(&arg_count_positive);
+ }
+
+ Node* result = __ CallJS(function, context, receiver_arg, target_args_count,
+ TailCallMode::kDisallow);
+ return result;
+}
+
+Node* IntrinsicsHelper::ValueOf(Node* args_reg, Node* arg_count,
+ Node* context) {
+ InterpreterAssembler::Variable return_value(assembler_,
+ MachineRepresentation::kTagged);
+ InterpreterAssembler::Label done(assembler_);
+
+ Node* object = __ LoadRegister(args_reg);
+ return_value.Bind(object);
+
+ // If the object is a smi return the object.
+ __ GotoIf(__ WordIsSmi(object), &done);
+
+ // If the object is not a value type, return the object.
+ Node* condition =
+ CompareInstanceType(object, JS_VALUE_TYPE, kInstanceTypeEqual);
+ __ GotoUnless(condition, &done);
+
+ // If the object is a value type, return the value field.
+ return_value.Bind(__ LoadObjectField(object, JSValue::kValueOffset));
+ __ Goto(&done);
+
+ __ Bind(&done);
+ return return_value.value();
+}
+
+Node* IntrinsicsHelper::ClassOf(Node* args_reg, Node* arg_count,
+ Node* context) {
+ InterpreterAssembler::Variable return_value(assembler_,
+ MachineRepresentation::kTagged);
+ InterpreterAssembler::Label done(assembler_), null(assembler_),
+ function(assembler_), non_function_constructor(assembler_);
+
+ Node* object = __ LoadRegister(args_reg);
+
+ // If the object is not a JSReceiver, we return null.
+ __ GotoIf(__ WordIsSmi(object), &null);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ Node* is_js_receiver = CompareInstanceType(object, FIRST_JS_RECEIVER_TYPE,
+ kInstanceTypeGreaterThanOrEqual);
+ __ GotoUnless(is_js_receiver, &null);
+
+ // Return 'Function' for JSFunction and JSBoundFunction objects.
+ Node* is_function = CompareInstanceType(object, FIRST_FUNCTION_TYPE,
+ kInstanceTypeGreaterThanOrEqual);
+ STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
+ __ GotoIf(is_function, &function);
+
+ // Check if the constructor in the map is a JS function.
+ Node* constructor = __ LoadMapConstructor(__ LoadMap(object));
+ Node* constructor_is_js_function =
+ CompareInstanceType(constructor, JS_FUNCTION_TYPE, kInstanceTypeEqual);
+ __ GotoUnless(constructor_is_js_function, &non_function_constructor);
+
+ // Grab the instance class name from the constructor function.
+ Node* shared =
+ __ LoadObjectField(constructor, JSFunction::kSharedFunctionInfoOffset);
+ return_value.Bind(
+ __ LoadObjectField(shared, SharedFunctionInfo::kInstanceClassNameOffset));
+ __ Goto(&done);
+
+ // Non-JS objects have class null.
+ __ Bind(&null);
+ {
+ return_value.Bind(__ LoadRoot(Heap::kNullValueRootIndex));
+ __ Goto(&done);
+ }
+
+ // Functions have class 'Function'.
+ __ Bind(&function);
+ {
+ return_value.Bind(__ LoadRoot(Heap::kFunction_stringRootIndex));
+ __ Goto(&done);
+ }
+
+ // Objects with a non-function constructor have class 'Object'.
+ __ Bind(&non_function_constructor);
+ {
+ return_value.Bind(__ LoadRoot(Heap::kObject_stringRootIndex));
+ __ Goto(&done);
+ }
+
+ __ Bind(&done);
+ return return_value.value();
+}
+
void IntrinsicsHelper::AbortIfArgCountMismatch(int expected, Node* actual) {
- InterpreterAssembler::Label match(assembler_), mismatch(assembler_),
- end(assembler_);
+ InterpreterAssembler::Label match(assembler_);
Node* comparison = __ Word32Equal(actual, __ Int32Constant(expected));
- __ Branch(comparison, &match, &mismatch);
- __ Bind(&mismatch);
+ __ GotoIf(comparison, &match);
__ Abort(kWrongArgumentCountForInvokeIntrinsic);
- __ Goto(&end);
+ __ Goto(&match);
__ Bind(&match);
- __ Goto(&end);
- __ Bind(&end);
}
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.h b/deps/v8/src/interpreter/interpreter-intrinsics.h
index e27c678e25..11fe4a0a8e 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics.h
+++ b/deps/v8/src/interpreter/interpreter-intrinsics.h
@@ -6,8 +6,7 @@
#define V8_INTERPRETER_INTERPRETER_INTRINSICS_H_
#include "src/allocation.h"
-#include "src/base/smart-pointers.h"
-#include "src/builtins.h"
+#include "src/builtins/builtins.h"
#include "src/frames.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter-assembler.h"
@@ -20,14 +19,42 @@ namespace compiler {
class Node;
} // namespace compiler
-#define INTRINSICS_LIST(V) \
- V(IsJSReceiver, is_js_receiver, 1) \
- V(IsArray, is_array, 1)
-
namespace interpreter {
+// List of supported intrisics, with upper case name, lower case name and
+// expected number of arguments (-1 denoting argument count is variable).
+#define INTRINSICS_LIST(V) \
+ V(Call, call, -1) \
+ V(ClassOf, class_of, 1) \
+ V(HasProperty, has_property, 2) \
+ V(IsArray, is_array, 1) \
+ V(IsJSProxy, is_js_proxy, 1) \
+ V(IsJSReceiver, is_js_receiver, 1) \
+ V(IsRegExp, is_regexp, 1) \
+ V(IsSmi, is_smi, 1) \
+ V(IsTypedArray, is_typed_array, 1) \
+ V(NewObject, new_object, 2) \
+ V(NumberToString, number_to_string, 1) \
+ V(RegExpConstructResult, reg_exp_construct_result, 3) \
+ V(RegExpExec, reg_exp_exec, 4) \
+ V(SubString, sub_string, 3) \
+ V(ToString, to_string, 1) \
+ V(ToLength, to_length, 1) \
+ V(ToInteger, to_integer, 1) \
+ V(ToNumber, to_number, 1) \
+ V(ToObject, to_object, 1) \
+ V(ValueOf, value_of, 1)
+
class IntrinsicsHelper {
public:
+ enum class IntrinsicId {
+#define DECLARE_INTRINSIC_ID(name, lower_case, count) k##name,
+ INTRINSICS_LIST(DECLARE_INTRINSIC_ID)
+#undef DECLARE_INTRINSIC_ID
+ kIdCount
+ };
+ STATIC_ASSERT(static_cast<uint32_t>(IntrinsicId::kIdCount) <= kMaxUInt8);
+
explicit IntrinsicsHelper(InterpreterAssembler* assembler);
compiler::Node* InvokeIntrinsic(compiler::Node* function_id,
@@ -36,22 +63,36 @@ class IntrinsicsHelper {
compiler::Node* arg_count);
static bool IsSupported(Runtime::FunctionId function_id);
+ static IntrinsicId FromRuntimeId(Runtime::FunctionId function_id);
+ static Runtime::FunctionId ToRuntimeId(IntrinsicId intrinsic_id);
private:
enum InstanceTypeCompareMode {
kInstanceTypeEqual,
kInstanceTypeGreaterThanOrEqual
};
+
+ compiler::Node* IsInstanceType(compiler::Node* input, int type);
compiler::Node* CompareInstanceType(compiler::Node* map, int type,
InstanceTypeCompareMode mode);
+ compiler::Node* IntrinsicAsStubCall(compiler::Node* input,
+ compiler::Node* context,
+ Callable const& callable);
void AbortIfArgCountMismatch(int expected, compiler::Node* actual);
- InterpreterAssembler* assembler_;
-#define DECLARE_INTRINSIC_HELPER(name, lower_case, count) \
- compiler::Node* name(compiler::Node* input);
+#define DECLARE_INTRINSIC_HELPER(name, lower_case, count) \
+ compiler::Node* name(compiler::Node* input, compiler::Node* arg_count, \
+ compiler::Node* context);
INTRINSICS_LIST(DECLARE_INTRINSIC_HELPER)
#undef DECLARE_INTRINSIC_HELPER
+ Isolate* isolate() { return isolate_; }
+ Zone* zone() { return zone_; }
+
+ Isolate* isolate_;
+ Zone* zone_;
+ InterpreterAssembler* assembler_;
+
DISALLOW_COPY_AND_ASSIGN(IntrinsicsHelper);
};
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index 5084300dfe..68f0342180 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -4,10 +4,14 @@
#include "src/interpreter/interpreter.h"
+#include <fstream>
+#include <memory>
+
#include "src/ast/prettyprinter.h"
#include "src/code-factory.h"
#include "src/compiler.h"
#include "src/factory.h"
+#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecode-generator.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter-assembler.h"
@@ -20,23 +24,54 @@ namespace internal {
namespace interpreter {
using compiler::Node;
+typedef CodeStubAssembler::Label Label;
+typedef CodeStubAssembler::Variable Variable;
+typedef InterpreterAssembler::Arg Arg;
#define __ assembler->
+class InterpreterCompilationJob final : public CompilationJob {
+ public:
+ explicit InterpreterCompilationJob(CompilationInfo* info);
+
+ protected:
+ Status PrepareJobImpl() final;
+ Status ExecuteJobImpl() final;
+ Status FinalizeJobImpl() final;
+
+ private:
+ BytecodeGenerator* generator() { return &generator_; }
+
+ BytecodeGenerator generator_;
+
+ DISALLOW_COPY_AND_ASSIGN(InterpreterCompilationJob);
+};
+
Interpreter::Interpreter(Isolate* isolate) : isolate_(isolate) {
memset(dispatch_table_, 0, sizeof(dispatch_table_));
}
void Interpreter::Initialize() {
- DCHECK(FLAG_ignition);
if (IsDispatchTableInitialized()) return;
Zone zone(isolate_->allocator());
HandleScope scope(isolate_);
+ if (FLAG_trace_ignition_dispatches) {
+ static const int kBytecodeCount = static_cast<int>(Bytecode::kLast) + 1;
+ bytecode_dispatch_counters_table_.reset(
+ new uintptr_t[kBytecodeCount * kBytecodeCount]);
+ memset(bytecode_dispatch_counters_table_.get(), 0,
+ sizeof(uintptr_t) * kBytecodeCount * kBytecodeCount);
+ }
+
// Generate bytecode handlers for all bytecodes and scales.
- for (OperandScale operand_scale = OperandScale::kSingle;
- operand_scale <= OperandScale::kMaxValid;
- operand_scale = Bytecodes::NextOperandScale(operand_scale)) {
+ const OperandScale kOperandScales[] = {
+#define VALUE(Name, _) OperandScale::k##Name,
+ OPERAND_SCALE_LIST(VALUE)
+#undef VALUE
+ };
+
+ for (OperandScale operand_scale : kOperandScales) {
#define GENERATE_CODE(Name, ...) \
{ \
if (Bytecodes::BytecodeHasHandler(Bytecode::k##Name, operand_scale)) { \
@@ -45,12 +80,13 @@ void Interpreter::Initialize() {
Do##Name(&assembler); \
Handle<Code> code = assembler.GenerateCode(); \
size_t index = GetDispatchTableIndex(Bytecode::k##Name, operand_scale); \
- dispatch_table_[index] = *code; \
+ dispatch_table_[index] = code->entry(); \
TraceCodegen(code); \
- LOG_CODE_EVENT( \
+ PROFILE( \
isolate_, \
CodeCreateEvent( \
- Logger::BYTECODE_HANDLER_TAG, AbstractCode::cast(*code), \
+ CodeEventListener::BYTECODE_HANDLER_TAG, \
+ AbstractCode::cast(*code), \
Bytecodes::ToString(Bytecode::k##Name, operand_scale).c_str())); \
} \
}
@@ -73,7 +109,8 @@ Code* Interpreter::GetBytecodeHandler(Bytecode bytecode,
DCHECK(IsDispatchTableInitialized());
DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
size_t index = GetDispatchTableIndex(bytecode, operand_scale);
- return dispatch_table_[index];
+ Address code_entry = dispatch_table_[index];
+ return Code::GetCodeFromTargetAddress(code_entry);
}
// static
@@ -81,48 +118,86 @@ size_t Interpreter::GetDispatchTableIndex(Bytecode bytecode,
OperandScale operand_scale) {
static const size_t kEntriesPerOperandScale = 1u << kBitsPerByte;
size_t index = static_cast<size_t>(bytecode);
- OperandScale current_scale = OperandScale::kSingle;
- while (current_scale != operand_scale) {
- index += kEntriesPerOperandScale;
- current_scale = Bytecodes::NextOperandScale(current_scale);
+ switch (operand_scale) {
+ case OperandScale::kSingle:
+ return index;
+ case OperandScale::kDouble:
+ return index + kEntriesPerOperandScale;
+ case OperandScale::kQuadruple:
+ return index + 2 * kEntriesPerOperandScale;
}
- return index;
+ UNREACHABLE();
+ return 0;
}
void Interpreter::IterateDispatchTable(ObjectVisitor* v) {
- v->VisitPointers(
- reinterpret_cast<Object**>(&dispatch_table_[0]),
- reinterpret_cast<Object**>(&dispatch_table_[0] + kDispatchTableSize));
+ for (int i = 0; i < kDispatchTableSize; i++) {
+ Address code_entry = dispatch_table_[i];
+ Object* code = code_entry == nullptr
+ ? nullptr
+ : Code::GetCodeFromTargetAddress(code_entry);
+ Object* old_code = code;
+ v->VisitPointer(&code);
+ if (code != old_code) {
+ dispatch_table_[i] = reinterpret_cast<Code*>(code)->entry();
+ }
+ }
}
// static
int Interpreter::InterruptBudget() {
- // TODO(ignition): Tune code size multiplier.
- const int kCodeSizeMultiplier = 32;
return FLAG_interrupt_budget * kCodeSizeMultiplier;
}
+InterpreterCompilationJob::InterpreterCompilationJob(CompilationInfo* info)
+ : CompilationJob(info, "Ignition"), generator_(info) {}
+
+InterpreterCompilationJob::Status InterpreterCompilationJob::PrepareJobImpl() {
+ return SUCCEEDED;
+}
+
+InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() {
+ generator()->GenerateBytecode();
+
+ if (generator()->HasStackOverflow()) {
+ return FAILED;
+ }
+ return SUCCEEDED;
+}
+
+InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl() {
+ Handle<BytecodeArray> bytecodes = generator()->FinalizeBytecode(isolate());
+ if (generator()->HasStackOverflow()) {
+ return FAILED;
+ }
+
+ if (FLAG_print_bytecode) {
+ OFStream os(stdout);
+ bytecodes->Print(os);
+ os << std::flush;
+ }
+
+ info()->SetBytecodeArray(bytecodes);
+ info()->SetCode(info()->isolate()->builtins()->InterpreterEntryTrampoline());
+ return SUCCEEDED;
+}
+
bool Interpreter::MakeBytecode(CompilationInfo* info) {
+ RuntimeCallTimerScope runtimeTimer(info->isolate(),
+ &RuntimeCallStats::CompileIgnition);
TimerEventScope<TimerEventCompileIgnition> timer(info->isolate());
- TRACE_EVENT0("v8", "V8.CompileIgnition");
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
+ info->isolate(), &tracing::TraceEventStatsTable::CompileIgnition);
- if (FLAG_print_bytecode || FLAG_print_source || FLAG_print_ast) {
+ if (FLAG_print_bytecode || FLAG_print_ast) {
OFStream os(stdout);
- base::SmartArrayPointer<char> name = info->GetDebugName();
+ std::unique_ptr<char[]> name = info->GetDebugName();
os << "[generating bytecode for function: " << info->GetDebugName().get()
<< "]" << std::endl
<< std::flush;
}
#ifdef DEBUG
- if (info->parse_info() && FLAG_print_source) {
- OFStream os(stdout);
- os << "--- Source from AST ---" << std::endl
- << PrettyPrinter(info->isolate()).PrintProgram(info->literal())
- << std::endl
- << std::flush;
- }
-
if (info->parse_info() && FLAG_print_ast) {
OFStream os(stdout);
os << "--- AST ---" << std::endl
@@ -131,26 +206,17 @@ bool Interpreter::MakeBytecode(CompilationInfo* info) {
}
#endif // DEBUG
- BytecodeGenerator generator(info->isolate(), info->zone());
- Handle<BytecodeArray> bytecodes = generator.MakeBytecode(info);
-
- if (generator.HasStackOverflow()) return false;
-
- if (FLAG_print_bytecode) {
- OFStream os(stdout);
- bytecodes->Print(os);
- os << std::flush;
- }
-
- info->SetBytecodeArray(bytecodes);
- info->SetCode(info->isolate()->builtins()->InterpreterEntryTrampoline());
- return true;
+ InterpreterCompilationJob job(info);
+ if (job.PrepareJob() != CompilationJob::SUCCEEDED) return false;
+ if (job.ExecuteJob() != CompilationJob::SUCCEEDED) return false;
+ return job.FinalizeJob() == CompilationJob::SUCCEEDED;
}
bool Interpreter::IsDispatchTableInitialized() {
- if (FLAG_trace_ignition || FLAG_trace_ignition_codegen) {
- // Regenerate table to add bytecode tracing operations
- // or to print the assembly code generated by TurboFan.
+ if (FLAG_trace_ignition || FLAG_trace_ignition_codegen ||
+ FLAG_trace_ignition_dispatches) {
+ // Regenerate table to add bytecode tracing operations, print the assembly
+ // code generated by TurboFan or instrument handlers with dispatch counters.
return false;
}
return dispatch_table_[0] != nullptr;
@@ -168,9 +234,10 @@ void Interpreter::TraceCodegen(Handle<Code> code) {
const char* Interpreter::LookupNameOfBytecodeHandler(Code* code) {
#ifdef ENABLE_DISASSEMBLER
-#define RETURN_NAME(Name, ...) \
- if (dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] == code) { \
- return #Name; \
+#define RETURN_NAME(Name, ...) \
+ if (dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] == \
+ code->entry()) { \
+ return #Name; \
}
BYTECODE_LIST(RETURN_NAME)
#undef RETURN_NAME
@@ -178,6 +245,65 @@ const char* Interpreter::LookupNameOfBytecodeHandler(Code* code) {
return nullptr;
}
+uintptr_t Interpreter::GetDispatchCounter(Bytecode from, Bytecode to) const {
+ int from_index = Bytecodes::ToByte(from);
+ int to_index = Bytecodes::ToByte(to);
+ return bytecode_dispatch_counters_table_[from_index * kNumberOfBytecodes +
+ to_index];
+}
+
+Local<v8::Object> Interpreter::GetDispatchCountersObject() {
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(isolate_);
+ Local<v8::Context> context = isolate->GetCurrentContext();
+
+ Local<v8::Object> counters_map = v8::Object::New(isolate);
+
+ // Output is a JSON-encoded object of objects.
+ //
+ // The keys on the top level object are source bytecodes,
+ // and corresponding value are objects. Keys on these last are the
+ // destinations of the dispatch and the value associated is a counter for
+ // the correspondent source-destination dispatch chain.
+ //
+ // Only non-zero counters are written to file, but an entry in the top-level
+ // object is always present, even if the value is empty because all counters
+ // for that source are zero.
+
+ for (int from_index = 0; from_index < kNumberOfBytecodes; ++from_index) {
+ Bytecode from_bytecode = Bytecodes::FromByte(from_index);
+ Local<v8::Object> counters_row = v8::Object::New(isolate);
+
+ for (int to_index = 0; to_index < kNumberOfBytecodes; ++to_index) {
+ Bytecode to_bytecode = Bytecodes::FromByte(to_index);
+ uintptr_t counter = GetDispatchCounter(from_bytecode, to_bytecode);
+
+ if (counter > 0) {
+ std::string to_name = Bytecodes::ToString(to_bytecode);
+ Local<v8::String> to_name_object =
+ v8::String::NewFromUtf8(isolate, to_name.c_str(),
+ NewStringType::kNormal)
+ .ToLocalChecked();
+ Local<v8::Number> counter_object = v8::Number::New(isolate, counter);
+ CHECK(counters_row
+ ->DefineOwnProperty(context, to_name_object, counter_object)
+ .IsJust());
+ }
+ }
+
+ std::string from_name = Bytecodes::ToString(from_bytecode);
+ Local<v8::String> from_name_object =
+ v8::String::NewFromUtf8(isolate, from_name.c_str(),
+ NewStringType::kNormal)
+ .ToLocalChecked();
+
+ CHECK(
+ counters_map->DefineOwnProperty(context, from_name_object, counters_row)
+ .IsJust());
+ }
+
+ return counters_map;
+}
+
// LdaZero
//
// Load literal '0' into the accumulator.
@@ -197,19 +323,14 @@ void Interpreter::DoLdaSmi(InterpreterAssembler* assembler) {
__ Dispatch();
}
-void Interpreter::DoLoadConstant(InterpreterAssembler* assembler) {
- Node* index = __ BytecodeOperandIdx(0);
- Node* constant = __ LoadConstantPoolEntry(index);
- __ SetAccumulator(constant);
- __ Dispatch();
-}
-
-
// LdaConstant <idx>
//
// Load constant literal at |idx| in the constant pool into the accumulator.
void Interpreter::DoLdaConstant(InterpreterAssembler* assembler) {
- DoLoadConstant(assembler);
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* constant = __ LoadConstantPoolEntry(index);
+ __ SetAccumulator(constant);
+ __ Dispatch();
}
// LdaUndefined
@@ -222,6 +343,16 @@ void Interpreter::DoLdaUndefined(InterpreterAssembler* assembler) {
__ Dispatch();
}
+// LdrUndefined <reg>
+//
+// Loads undefined into the accumulator and |reg|.
+void Interpreter::DoLdrUndefined(InterpreterAssembler* assembler) {
+ Node* undefined_value =
+ __ HeapConstant(isolate_->factory()->undefined_value());
+ Node* destination = __ BytecodeOperandReg(0);
+ __ StoreRegister(undefined_value, destination);
+ __ Dispatch();
+}
// LdaNull
//
@@ -232,7 +363,6 @@ void Interpreter::DoLdaNull(InterpreterAssembler* assembler) {
__ Dispatch();
}
-
// LdaTheHole
//
// Load TheHole into the accumulator.
@@ -242,7 +372,6 @@ void Interpreter::DoLdaTheHole(InterpreterAssembler* assembler) {
__ Dispatch();
}
-
// LdaTrue
//
// Load True into the accumulator.
@@ -252,7 +381,6 @@ void Interpreter::DoLdaTrue(InterpreterAssembler* assembler) {
__ Dispatch();
}
-
// LdaFalse
//
// Load False into the accumulator.
@@ -262,7 +390,6 @@ void Interpreter::DoLdaFalse(InterpreterAssembler* assembler) {
__ Dispatch();
}
-
// Ldar <src>
//
// Load accumulator with value from register <src>.
@@ -273,7 +400,6 @@ void Interpreter::DoLdar(InterpreterAssembler* assembler) {
__ Dispatch();
}
-
// Star <dst>
//
// Store accumulator to register <dst>.
@@ -284,7 +410,6 @@ void Interpreter::DoStar(InterpreterAssembler* assembler) {
__ Dispatch();
}
-
// Mov <src> <dst>
//
// Stores the value of register <src> to register <dst>.
@@ -296,48 +421,61 @@ void Interpreter::DoMov(InterpreterAssembler* assembler) {
__ Dispatch();
}
-
-void Interpreter::DoLoadGlobal(Callable ic, InterpreterAssembler* assembler) {
+Node* Interpreter::BuildLoadGlobal(Callable ic,
+ InterpreterAssembler* assembler) {
+ typedef LoadGlobalWithVectorDescriptor Descriptor;
// Get the global object.
Node* context = __ GetContext();
- Node* native_context =
- __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX);
- Node* global = __ LoadContextSlot(native_context, Context::EXTENSION_INDEX);
- // Load the global via the LoadIC.
+ // Load the global via the LoadGlobalIC.
Node* code_target = __ HeapConstant(ic.code());
- Node* constant_index = __ BytecodeOperandIdx(0);
- Node* name = __ LoadConstantPoolEntry(constant_index);
- Node* raw_slot = __ BytecodeOperandIdx(1);
+ Node* raw_slot = __ BytecodeOperandIdx(0);
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
- Node* result = __ CallStub(ic.descriptor(), code_target, context, global,
- name, smi_slot, type_feedback_vector);
- __ SetAccumulator(result);
- __ Dispatch();
+ return __ CallStub(ic.descriptor(), code_target, context,
+ Arg(Descriptor::kSlot, smi_slot),
+ Arg(Descriptor::kVector, type_feedback_vector));
}
-// LdaGlobal <name_index> <slot>
+// LdaGlobal <slot>
//
// Load the global with name in constant pool entry <name_index> into the
// accumulator using FeedBackVector slot <slot> outside of a typeof.
void Interpreter::DoLdaGlobal(InterpreterAssembler* assembler) {
- Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
- UNINITIALIZED);
- DoLoadGlobal(ic, assembler);
+ Callable ic =
+ CodeFactory::LoadGlobalICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF);
+ Node* result = BuildLoadGlobal(ic, assembler);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+// LdrGlobal <slot> <reg>
+//
+// Load the global with name in constant pool entry <name_index> into
+// register <reg> using FeedBackVector slot <slot> outside of a typeof.
+void Interpreter::DoLdrGlobal(InterpreterAssembler* assembler) {
+ Callable ic =
+ CodeFactory::LoadGlobalICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF);
+ Node* result = BuildLoadGlobal(ic, assembler);
+ Node* destination = __ BytecodeOperandReg(1);
+ __ StoreRegister(result, destination);
+ __ Dispatch();
}
-// LdaGlobalInsideTypeof <name_index> <slot>
+// LdaGlobalInsideTypeof <slot>
//
// Load the global with name in constant pool entry <name_index> into the
// accumulator using FeedBackVector slot <slot> inside of a typeof.
void Interpreter::DoLdaGlobalInsideTypeof(InterpreterAssembler* assembler) {
- Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
- UNINITIALIZED);
- DoLoadGlobal(ic, assembler);
+ Callable ic =
+ CodeFactory::LoadGlobalICInOptimizedCode(isolate_, INSIDE_TYPEOF);
+ Node* result = BuildLoadGlobal(ic, assembler);
+ __ SetAccumulator(result);
+ __ Dispatch();
}
-void Interpreter::DoStoreGlobal(Callable ic, InterpreterAssembler* assembler) {
+void Interpreter::DoStaGlobal(Callable ic, InterpreterAssembler* assembler) {
+ typedef StoreWithVectorDescriptor Descriptor;
// Get the global object.
Node* context = __ GetContext();
Node* native_context =
@@ -352,45 +490,58 @@ void Interpreter::DoStoreGlobal(Callable ic, InterpreterAssembler* assembler) {
Node* raw_slot = __ BytecodeOperandIdx(1);
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
- __ CallStub(ic.descriptor(), code_target, context, global, name, value,
- smi_slot, type_feedback_vector);
+ __ CallStub(ic.descriptor(), code_target, context,
+ Arg(Descriptor::kReceiver, global), Arg(Descriptor::kName, name),
+ Arg(Descriptor::kValue, value), Arg(Descriptor::kSlot, smi_slot),
+ Arg(Descriptor::kVector, type_feedback_vector));
__ Dispatch();
}
-
// StaGlobalSloppy <name_index> <slot>
//
// Store the value in the accumulator into the global with name in constant pool
// entry <name_index> using FeedBackVector slot <slot> in sloppy mode.
void Interpreter::DoStaGlobalSloppy(InterpreterAssembler* assembler) {
- Callable ic =
- CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
- DoStoreGlobal(ic, assembler);
+ Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY);
+ DoStaGlobal(ic, assembler);
}
-
// StaGlobalStrict <name_index> <slot>
//
// Store the value in the accumulator into the global with name in constant pool
// entry <name_index> using FeedBackVector slot <slot> in strict mode.
void Interpreter::DoStaGlobalStrict(InterpreterAssembler* assembler) {
- Callable ic =
- CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
- DoStoreGlobal(ic, assembler);
+ Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, STRICT);
+ DoStaGlobal(ic, assembler);
+}
+
+compiler::Node* Interpreter::BuildLoadContextSlot(
+ InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* context = __ LoadRegister(reg_index);
+ Node* slot_index = __ BytecodeOperandIdx(1);
+ return __ LoadContextSlot(context, slot_index);
}
// LdaContextSlot <context> <slot_index>
//
// Load the object in |slot_index| of |context| into the accumulator.
void Interpreter::DoLdaContextSlot(InterpreterAssembler* assembler) {
- Node* reg_index = __ BytecodeOperandReg(0);
- Node* context = __ LoadRegister(reg_index);
- Node* slot_index = __ BytecodeOperandIdx(1);
- Node* result = __ LoadContextSlot(context, slot_index);
+ Node* result = BuildLoadContextSlot(assembler);
__ SetAccumulator(result);
__ Dispatch();
}
+// LdrContextSlot <context> <slot_index> <reg>
+//
+// Load the object in <slot_index> of <context> into register <reg>.
+void Interpreter::DoLdrContextSlot(InterpreterAssembler* assembler) {
+ Node* result = BuildLoadContextSlot(assembler);
+ Node* destination = __ BytecodeOperandReg(2);
+ __ StoreRegister(result, destination);
+ __ Dispatch();
+}
+
// StaContextSlot <context> <slot_index>
//
// Stores the object in the accumulator into |slot_index| of |context|.
@@ -403,8 +554,8 @@ void Interpreter::DoStaContextSlot(InterpreterAssembler* assembler) {
__ Dispatch();
}
-void Interpreter::DoLoadLookupSlot(Runtime::FunctionId function_id,
- InterpreterAssembler* assembler) {
+void Interpreter::DoLdaLookupSlot(Runtime::FunctionId function_id,
+ InterpreterAssembler* assembler) {
Node* index = __ BytecodeOperandIdx(0);
Node* name = __ LoadConstantPoolEntry(index);
Node* context = __ GetContext();
@@ -418,7 +569,7 @@ void Interpreter::DoLoadLookupSlot(Runtime::FunctionId function_id,
// Lookup the object with the name in constant pool entry |name_index|
// dynamically.
void Interpreter::DoLdaLookupSlot(InterpreterAssembler* assembler) {
- DoLoadLookupSlot(Runtime::kLoadLookupSlot, assembler);
+ DoLdaLookupSlot(Runtime::kLoadLookupSlot, assembler);
}
// LdaLookupSlotInsideTypeof <name_index>
@@ -426,11 +577,11 @@ void Interpreter::DoLdaLookupSlot(InterpreterAssembler* assembler) {
// Lookup the object with the name in constant pool entry |name_index|
// dynamically without causing a NoReferenceError.
void Interpreter::DoLdaLookupSlotInsideTypeof(InterpreterAssembler* assembler) {
- DoLoadLookupSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler);
+ DoLdaLookupSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler);
}
-void Interpreter::DoStoreLookupSlot(LanguageMode language_mode,
- InterpreterAssembler* assembler) {
+void Interpreter::DoStaLookupSlot(LanguageMode language_mode,
+ InterpreterAssembler* assembler) {
Node* value = __ GetAccumulator();
Node* index = __ BytecodeOperandIdx(0);
Node* name = __ LoadConstantPoolEntry(index);
@@ -448,19 +599,20 @@ void Interpreter::DoStoreLookupSlot(LanguageMode language_mode,
// Store the object in accumulator to the object with the name in constant
// pool entry |name_index| in sloppy mode.
void Interpreter::DoStaLookupSlotSloppy(InterpreterAssembler* assembler) {
- DoStoreLookupSlot(LanguageMode::SLOPPY, assembler);
+ DoStaLookupSlot(LanguageMode::SLOPPY, assembler);
}
-
// StaLookupSlotStrict <name_index>
//
// Store the object in accumulator to the object with the name in constant
// pool entry |name_index| in strict mode.
void Interpreter::DoStaLookupSlotStrict(InterpreterAssembler* assembler) {
- DoStoreLookupSlot(LanguageMode::STRICT, assembler);
+ DoStaLookupSlot(LanguageMode::STRICT, assembler);
}
-void Interpreter::DoLoadIC(Callable ic, InterpreterAssembler* assembler) {
+Node* Interpreter::BuildLoadNamedProperty(Callable ic,
+ InterpreterAssembler* assembler) {
+ typedef LoadWithVectorDescriptor Descriptor;
Node* code_target = __ HeapConstant(ic.code());
Node* register_index = __ BytecodeOperandReg(0);
Node* object = __ LoadRegister(register_index);
@@ -470,23 +622,38 @@ void Interpreter::DoLoadIC(Callable ic, InterpreterAssembler* assembler) {
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
Node* context = __ GetContext();
- Node* result = __ CallStub(ic.descriptor(), code_target, context, object,
- name, smi_slot, type_feedback_vector);
+ return __ CallStub(
+ ic.descriptor(), code_target, context, Arg(Descriptor::kReceiver, object),
+ Arg(Descriptor::kName, name), Arg(Descriptor::kSlot, smi_slot),
+ Arg(Descriptor::kVector, type_feedback_vector));
+}
+
+// LdaNamedProperty <object> <name_index> <slot>
+//
+// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
+// constant pool entry <name_index>.
+void Interpreter::DoLdaNamedProperty(InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_);
+ Node* result = BuildLoadNamedProperty(ic, assembler);
__ SetAccumulator(result);
__ Dispatch();
}
-// LoadIC <object> <name_index> <slot>
+// LdrNamedProperty <object> <name_index> <slot> <reg>
//
// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
-// constant pool entry <name_index>.
-void Interpreter::DoLoadIC(InterpreterAssembler* assembler) {
- Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
- UNINITIALIZED);
- DoLoadIC(ic, assembler);
+// constant pool entry <name_index> and puts the result into register <reg>.
+void Interpreter::DoLdrNamedProperty(InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_);
+ Node* result = BuildLoadNamedProperty(ic, assembler);
+ Node* destination = __ BytecodeOperandReg(3);
+ __ StoreRegister(result, destination);
+ __ Dispatch();
}
-void Interpreter::DoKeyedLoadIC(Callable ic, InterpreterAssembler* assembler) {
+Node* Interpreter::BuildLoadKeyedProperty(Callable ic,
+ InterpreterAssembler* assembler) {
+ typedef LoadWithVectorDescriptor Descriptor;
Node* code_target = __ HeapConstant(ic.code());
Node* reg_index = __ BytecodeOperandReg(0);
Node* object = __ LoadRegister(reg_index);
@@ -495,23 +662,37 @@ void Interpreter::DoKeyedLoadIC(Callable ic, InterpreterAssembler* assembler) {
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
Node* context = __ GetContext();
- Node* result = __ CallStub(ic.descriptor(), code_target, context, object,
- name, smi_slot, type_feedback_vector);
- __ SetAccumulator(result);
- __ Dispatch();
+ return __ CallStub(
+ ic.descriptor(), code_target, context, Arg(Descriptor::kReceiver, object),
+ Arg(Descriptor::kName, name), Arg(Descriptor::kSlot, smi_slot),
+ Arg(Descriptor::kVector, type_feedback_vector));
}
// KeyedLoadIC <object> <slot>
//
// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
// in the accumulator.
-void Interpreter::DoKeyedLoadIC(InterpreterAssembler* assembler) {
- Callable ic =
- CodeFactory::KeyedLoadICInOptimizedCode(isolate_, UNINITIALIZED);
- DoKeyedLoadIC(ic, assembler);
+void Interpreter::DoLdaKeyedProperty(InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_);
+ Node* result = BuildLoadKeyedProperty(ic, assembler);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+// LdrKeyedProperty <object> <slot> <reg>
+//
+// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
+// in the accumulator and puts the result in register <reg>.
+void Interpreter::DoLdrKeyedProperty(InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_);
+ Node* result = BuildLoadKeyedProperty(ic, assembler);
+ Node* destination = __ BytecodeOperandReg(2);
+ __ StoreRegister(result, destination);
+ __ Dispatch();
}
void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) {
+ typedef StoreWithVectorDescriptor Descriptor;
Node* code_target = __ HeapConstant(ic.code());
Node* object_reg_index = __ BytecodeOperandReg(0);
Node* object = __ LoadRegister(object_reg_index);
@@ -522,36 +703,35 @@ void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) {
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
Node* context = __ GetContext();
- __ CallStub(ic.descriptor(), code_target, context, object, name, value,
- smi_slot, type_feedback_vector);
+ __ CallStub(ic.descriptor(), code_target, context,
+ Arg(Descriptor::kReceiver, object), Arg(Descriptor::kName, name),
+ Arg(Descriptor::kValue, value), Arg(Descriptor::kSlot, smi_slot),
+ Arg(Descriptor::kVector, type_feedback_vector));
__ Dispatch();
}
-
-// StoreICSloppy <object> <name_index> <slot>
+// StaNamedPropertySloppy <object> <name_index> <slot>
//
// Calls the sloppy mode StoreIC at FeedBackVector slot <slot> for <object> and
// the name in constant pool entry <name_index> with the value in the
// accumulator.
-void Interpreter::DoStoreICSloppy(InterpreterAssembler* assembler) {
- Callable ic =
- CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+void Interpreter::DoStaNamedPropertySloppy(InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY);
DoStoreIC(ic, assembler);
}
-
-// StoreICStrict <object> <name_index> <slot>
+// StaNamedPropertyStrict <object> <name_index> <slot>
//
// Calls the strict mode StoreIC at FeedBackVector slot <slot> for <object> and
// the name in constant pool entry <name_index> with the value in the
// accumulator.
-void Interpreter::DoStoreICStrict(InterpreterAssembler* assembler) {
- Callable ic =
- CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+void Interpreter::DoStaNamedPropertyStrict(InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, STRICT);
DoStoreIC(ic, assembler);
}
void Interpreter::DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler) {
+ typedef StoreWithVectorDescriptor Descriptor;
Node* code_target = __ HeapConstant(ic.code());
Node* object_reg_index = __ BytecodeOperandReg(0);
Node* object = __ LoadRegister(object_reg_index);
@@ -562,30 +742,28 @@ void Interpreter::DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler) {
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
Node* context = __ GetContext();
- __ CallStub(ic.descriptor(), code_target, context, object, name, value,
- smi_slot, type_feedback_vector);
+ __ CallStub(ic.descriptor(), code_target, context,
+ Arg(Descriptor::kReceiver, object), Arg(Descriptor::kName, name),
+ Arg(Descriptor::kValue, value), Arg(Descriptor::kSlot, smi_slot),
+ Arg(Descriptor::kVector, type_feedback_vector));
__ Dispatch();
}
-
-// KeyedStoreICSloppy <object> <key> <slot>
+// StaKeyedPropertySloppy <object> <key> <slot>
//
// Calls the sloppy mode KeyStoreIC at FeedBackVector slot <slot> for <object>
// and the key <key> with the value in the accumulator.
-void Interpreter::DoKeyedStoreICSloppy(InterpreterAssembler* assembler) {
- Callable ic =
- CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+void Interpreter::DoStaKeyedPropertySloppy(InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY);
DoKeyedStoreIC(ic, assembler);
}
-
-// KeyedStoreICStore <object> <key> <slot>
+// StaKeyedPropertyStrict <object> <key> <slot>
//
// Calls the strict mode KeyStoreIC at FeedBackVector slot <slot> for <object>
// and the key <key> with the value in the accumulator.
-void Interpreter::DoKeyedStoreICStrict(InterpreterAssembler* assembler) {
- Callable ic =
- CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+void Interpreter::DoStaKeyedPropertyStrict(InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT);
DoKeyedStoreIC(ic, assembler);
}
@@ -602,7 +780,6 @@ void Interpreter::DoPushContext(InterpreterAssembler* assembler) {
__ Dispatch();
}
-
// PopContext <context>
//
// Pops the current context and sets <context> as the new context.
@@ -613,98 +790,160 @@ void Interpreter::DoPopContext(InterpreterAssembler* assembler) {
__ Dispatch();
}
-void Interpreter::DoBinaryOp(Callable callable,
- InterpreterAssembler* assembler) {
- // TODO(bmeurer): Collect definition side type feedback for various
- // binary operations.
- Node* target = __ HeapConstant(callable.code());
+// TODO(mythria): Remove this function once all BinaryOps record type feedback.
+template <class Generator>
+void Interpreter::DoBinaryOp(InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(0);
Node* lhs = __ LoadRegister(reg_index);
Node* rhs = __ GetAccumulator();
Node* context = __ GetContext();
- Node* result = __ CallStub(callable.descriptor(), target, context, lhs, rhs);
+ Node* result = Generator::Generate(assembler, lhs, rhs, context);
__ SetAccumulator(result);
__ Dispatch();
}
-void Interpreter::DoBinaryOp(Runtime::FunctionId function_id,
- InterpreterAssembler* assembler) {
- // TODO(rmcilroy): Call ICs which back-patch bytecode with type specialized
- // operations, instead of calling builtins directly.
+template <class Generator>
+void Interpreter::DoBinaryOpWithFeedback(InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(0);
Node* lhs = __ LoadRegister(reg_index);
Node* rhs = __ GetAccumulator();
Node* context = __ GetContext();
- Node* result = __ CallRuntime(function_id, context, lhs, rhs);
+ Node* slot_index = __ BytecodeOperandIdx(1);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+ Node* result = Generator::Generate(assembler, lhs, rhs, slot_index,
+ type_feedback_vector, context);
__ SetAccumulator(result);
__ Dispatch();
}
-
// Add <src>
//
// Add register <src> to accumulator.
void Interpreter::DoAdd(InterpreterAssembler* assembler) {
- DoBinaryOp(CodeFactory::Add(isolate_), assembler);
+ DoBinaryOpWithFeedback<AddWithFeedbackStub>(assembler);
}
-
// Sub <src>
//
// Subtract register <src> from accumulator.
void Interpreter::DoSub(InterpreterAssembler* assembler) {
- DoBinaryOp(CodeFactory::Subtract(isolate_), assembler);
+ DoBinaryOpWithFeedback<SubtractWithFeedbackStub>(assembler);
}
-
// Mul <src>
//
// Multiply accumulator by register <src>.
void Interpreter::DoMul(InterpreterAssembler* assembler) {
- DoBinaryOp(Runtime::kMultiply, assembler);
+ DoBinaryOpWithFeedback<MultiplyWithFeedbackStub>(assembler);
}
-
// Div <src>
//
// Divide register <src> by accumulator.
void Interpreter::DoDiv(InterpreterAssembler* assembler) {
- DoBinaryOp(Runtime::kDivide, assembler);
+ DoBinaryOpWithFeedback<DivideWithFeedbackStub>(assembler);
}
-
// Mod <src>
//
// Modulo register <src> by accumulator.
void Interpreter::DoMod(InterpreterAssembler* assembler) {
- DoBinaryOp(Runtime::kModulus, assembler);
+ DoBinaryOpWithFeedback<ModulusWithFeedbackStub>(assembler);
}
+void Interpreter::DoBitwiseBinaryOp(Token::Value bitwise_op,
+ InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* lhs = __ LoadRegister(reg_index);
+ Node* rhs = __ GetAccumulator();
+ Node* context = __ GetContext();
+ Node* slot_index = __ BytecodeOperandIdx(1);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+
+ Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32),
+ var_rhs_type_feedback(assembler, MachineRepresentation::kWord32);
+ Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
+ context, lhs, &var_lhs_type_feedback);
+ Node* rhs_value = __ TruncateTaggedToWord32WithFeedback(
+ context, rhs, &var_rhs_type_feedback);
+ Node* result = nullptr;
+
+ switch (bitwise_op) {
+ case Token::BIT_OR: {
+ Node* value = __ Word32Or(lhs_value, rhs_value);
+ result = __ ChangeInt32ToTagged(value);
+ } break;
+ case Token::BIT_AND: {
+ Node* value = __ Word32And(lhs_value, rhs_value);
+ result = __ ChangeInt32ToTagged(value);
+ } break;
+ case Token::BIT_XOR: {
+ Node* value = __ Word32Xor(lhs_value, rhs_value);
+ result = __ ChangeInt32ToTagged(value);
+ } break;
+ case Token::SHL: {
+ Node* value = __ Word32Shl(
+ lhs_value, __ Word32And(rhs_value, __ Int32Constant(0x1f)));
+ result = __ ChangeInt32ToTagged(value);
+ } break;
+ case Token::SHR: {
+ Node* value = __ Word32Shr(
+ lhs_value, __ Word32And(rhs_value, __ Int32Constant(0x1f)));
+ result = __ ChangeUint32ToTagged(value);
+ } break;
+ case Token::SAR: {
+ Node* value = __ Word32Sar(
+ lhs_value, __ Word32And(rhs_value, __ Int32Constant(0x1f)));
+ result = __ ChangeInt32ToTagged(value);
+ } break;
+ default:
+ UNREACHABLE();
+ }
+
+ Node* result_type =
+ __ Select(__ WordIsSmi(result),
+ __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
+ __ Int32Constant(BinaryOperationFeedback::kNumber));
+
+ if (FLAG_debug_code) {
+ Label ok(assembler);
+ __ GotoIf(__ WordIsSmi(result), &ok);
+ Node* result_map = __ LoadMap(result);
+ __ AbortIfWordNotEqual(result_map, __ HeapNumberMapConstant(),
+ kExpectedHeapNumber);
+ __ Goto(&ok);
+ __ Bind(&ok);
+ }
+
+ Node* input_feedback =
+ __ Word32Or(var_lhs_type_feedback.value(), var_rhs_type_feedback.value());
+ __ UpdateFeedback(__ Word32Or(result_type, input_feedback),
+ type_feedback_vector, slot_index);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
// BitwiseOr <src>
//
// BitwiseOr register <src> to accumulator.
void Interpreter::DoBitwiseOr(InterpreterAssembler* assembler) {
- DoBinaryOp(CodeFactory::BitwiseOr(isolate_), assembler);
+ DoBitwiseBinaryOp(Token::BIT_OR, assembler);
}
-
// BitwiseXor <src>
//
// BitwiseXor register <src> to accumulator.
void Interpreter::DoBitwiseXor(InterpreterAssembler* assembler) {
- DoBinaryOp(CodeFactory::BitwiseXor(isolate_), assembler);
+ DoBitwiseBinaryOp(Token::BIT_XOR, assembler);
}
-
// BitwiseAnd <src>
//
// BitwiseAnd register <src> to accumulator.
void Interpreter::DoBitwiseAnd(InterpreterAssembler* assembler) {
- DoBinaryOp(CodeFactory::BitwiseAnd(isolate_), assembler);
+ DoBitwiseBinaryOp(Token::BIT_AND, assembler);
}
-
// ShiftLeft <src>
//
// Left shifts register <src> by the count specified in the accumulator.
@@ -712,10 +951,9 @@ void Interpreter::DoBitwiseAnd(InterpreterAssembler* assembler) {
// before the operation. 5 lsb bits from the accumulator are used as count
// i.e. <src> << (accumulator & 0x1F).
void Interpreter::DoShiftLeft(InterpreterAssembler* assembler) {
- DoBinaryOp(Runtime::kShiftLeft, assembler);
+ DoBitwiseBinaryOp(Token::SHL, assembler);
}
-
// ShiftRight <src>
//
// Right shifts register <src> by the count specified in the accumulator.
@@ -723,10 +961,9 @@ void Interpreter::DoShiftLeft(InterpreterAssembler* assembler) {
// accumulator to uint32 before the operation. 5 lsb bits from the accumulator
// are used as count i.e. <src> >> (accumulator & 0x1F).
void Interpreter::DoShiftRight(InterpreterAssembler* assembler) {
- DoBinaryOp(Runtime::kShiftRight, assembler);
+ DoBitwiseBinaryOp(Token::SAR, assembler);
}
-
// ShiftRightLogical <src>
//
// Right Shifts register <src> by the count specified in the accumulator.
@@ -734,62 +971,356 @@ void Interpreter::DoShiftRight(InterpreterAssembler* assembler) {
// uint32 before the operation 5 lsb bits from the accumulator are used as
// count i.e. <src> << (accumulator & 0x1F).
void Interpreter::DoShiftRightLogical(InterpreterAssembler* assembler) {
- DoBinaryOp(Runtime::kShiftRightLogical, assembler);
+ DoBitwiseBinaryOp(Token::SHR, assembler);
+}
+
+// AddSmi <imm> <reg>
+//
+// Adds an immediate value <imm> to register <reg>. For this
+// operation <reg> is the lhs operand and <imm> is the <rhs> operand.
+void Interpreter::DoAddSmi(InterpreterAssembler* assembler) {
+ Variable var_result(assembler, MachineRepresentation::kTagged);
+ Label fastpath(assembler), slowpath(assembler, Label::kDeferred),
+ end(assembler);
+
+ Node* reg_index = __ BytecodeOperandReg(1);
+ Node* left = __ LoadRegister(reg_index);
+ Node* raw_int = __ BytecodeOperandImm(0);
+ Node* right = __ SmiTag(raw_int);
+ Node* slot_index = __ BytecodeOperandIdx(2);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+
+ // {right} is known to be a Smi.
+ // Check if the {left} is a Smi take the fast path.
+ __ BranchIf(__ WordIsSmi(left), &fastpath, &slowpath);
+ __ Bind(&fastpath);
+ {
+ // Try fast Smi addition first.
+ Node* pair = __ SmiAddWithOverflow(left, right);
+ Node* overflow = __ Projection(1, pair);
+
+ // Check if the Smi additon overflowed.
+ Label if_notoverflow(assembler);
+ __ BranchIf(overflow, &slowpath, &if_notoverflow);
+ __ Bind(&if_notoverflow);
+ {
+ __ UpdateFeedback(__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
+ type_feedback_vector, slot_index);
+ var_result.Bind(__ Projection(0, pair));
+ __ Goto(&end);
+ }
+ }
+ __ Bind(&slowpath);
+ {
+ Node* context = __ GetContext();
+ AddWithFeedbackStub stub(__ isolate());
+ Callable callable =
+ Callable(stub.GetCode(), AddWithFeedbackStub::Descriptor(__ isolate()));
+ Node* args[] = {left, right, slot_index, type_feedback_vector, context};
+ var_result.Bind(__ CallStubN(callable, args, 1));
+ __ Goto(&end);
+ }
+ __ Bind(&end);
+ {
+ __ SetAccumulator(var_result.value());
+ __ Dispatch();
+ }
+}
+
+// SubSmi <imm> <reg>
+//
+// Subtracts an immediate value <imm> to register <reg>. For this
+// operation <reg> is the lhs operand and <imm> is the rhs operand.
+void Interpreter::DoSubSmi(InterpreterAssembler* assembler) {
+ Variable var_result(assembler, MachineRepresentation::kTagged);
+ Label fastpath(assembler), slowpath(assembler, Label::kDeferred),
+ end(assembler);
+
+ Node* reg_index = __ BytecodeOperandReg(1);
+ Node* left = __ LoadRegister(reg_index);
+ Node* raw_int = __ BytecodeOperandImm(0);
+ Node* right = __ SmiTag(raw_int);
+ Node* slot_index = __ BytecodeOperandIdx(2);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+
+ // {right} is known to be a Smi.
+ // Check if the {left} is a Smi take the fast path.
+ __ BranchIf(__ WordIsSmi(left), &fastpath, &slowpath);
+ __ Bind(&fastpath);
+ {
+ // Try fast Smi subtraction first.
+ Node* pair = __ SmiSubWithOverflow(left, right);
+ Node* overflow = __ Projection(1, pair);
+
+ // Check if the Smi subtraction overflowed.
+ Label if_notoverflow(assembler);
+ __ BranchIf(overflow, &slowpath, &if_notoverflow);
+ __ Bind(&if_notoverflow);
+ {
+ __ UpdateFeedback(__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
+ type_feedback_vector, slot_index);
+ var_result.Bind(__ Projection(0, pair));
+ __ Goto(&end);
+ }
+ }
+ __ Bind(&slowpath);
+ {
+ Node* context = __ GetContext();
+ SubtractWithFeedbackStub stub(__ isolate());
+ Callable callable = Callable(
+ stub.GetCode(), SubtractWithFeedbackStub::Descriptor(__ isolate()));
+ Node* args[] = {left, right, slot_index, type_feedback_vector, context};
+ var_result.Bind(__ CallStubN(callable, args, 1));
+ __ Goto(&end);
+ }
+ __ Bind(&end);
+ {
+ __ SetAccumulator(var_result.value());
+ __ Dispatch();
+ }
+}
+
+// BitwiseOr <imm> <reg>
+//
+// BitwiseOr <reg> with <imm>. For this operation <reg> is the lhs
+// operand and <imm> is the rhs operand.
+void Interpreter::DoBitwiseOrSmi(InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(1);
+ Node* left = __ LoadRegister(reg_index);
+ Node* raw_int = __ BytecodeOperandImm(0);
+ Node* right = __ SmiTag(raw_int);
+ Node* context = __ GetContext();
+ Node* slot_index = __ BytecodeOperandIdx(2);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+ Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32);
+ Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
+ context, left, &var_lhs_type_feedback);
+ Node* rhs_value = __ SmiToWord32(right);
+ Node* value = __ Word32Or(lhs_value, rhs_value);
+ Node* result = __ ChangeInt32ToTagged(value);
+ Node* result_type =
+ __ Select(__ WordIsSmi(result),
+ __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
+ __ Int32Constant(BinaryOperationFeedback::kNumber));
+ __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
+ type_feedback_vector, slot_index);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+// BitwiseAnd <imm> <reg>
+//
+// BitwiseAnd <reg> with <imm>. For this operation <reg> is the lhs
+// operand and <imm> is the rhs operand.
+void Interpreter::DoBitwiseAndSmi(InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(1);
+ Node* left = __ LoadRegister(reg_index);
+ Node* raw_int = __ BytecodeOperandImm(0);
+ Node* right = __ SmiTag(raw_int);
+ Node* context = __ GetContext();
+ Node* slot_index = __ BytecodeOperandIdx(2);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+ Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32);
+ Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
+ context, left, &var_lhs_type_feedback);
+ Node* rhs_value = __ SmiToWord32(right);
+ Node* value = __ Word32And(lhs_value, rhs_value);
+ Node* result = __ ChangeInt32ToTagged(value);
+ Node* result_type =
+ __ Select(__ WordIsSmi(result),
+ __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
+ __ Int32Constant(BinaryOperationFeedback::kNumber));
+ __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
+ type_feedback_vector, slot_index);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+// ShiftLeftSmi <imm> <reg>
+//
+// Left shifts register <src> by the count specified in <imm>.
+// Register <src> is converted to an int32 before the operation. The 5
+// lsb bits from <imm> are used as count i.e. <src> << (<imm> & 0x1F).
+void Interpreter::DoShiftLeftSmi(InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(1);
+ Node* left = __ LoadRegister(reg_index);
+ Node* raw_int = __ BytecodeOperandImm(0);
+ Node* right = __ SmiTag(raw_int);
+ Node* context = __ GetContext();
+ Node* slot_index = __ BytecodeOperandIdx(2);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+ Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32);
+ Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
+ context, left, &var_lhs_type_feedback);
+ Node* rhs_value = __ SmiToWord32(right);
+ Node* shift_count = __ Word32And(rhs_value, __ Int32Constant(0x1f));
+ Node* value = __ Word32Shl(lhs_value, shift_count);
+ Node* result = __ ChangeInt32ToTagged(value);
+ Node* result_type =
+ __ Select(__ WordIsSmi(result),
+ __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
+ __ Int32Constant(BinaryOperationFeedback::kNumber));
+ __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
+ type_feedback_vector, slot_index);
+ __ SetAccumulator(result);
+ __ Dispatch();
}
-void Interpreter::DoCountOp(Runtime::FunctionId function_id,
- InterpreterAssembler* assembler) {
+// ShiftRightSmi <imm> <reg>
+//
+// Right shifts register <src> by the count specified in <imm>.
+// Register <src> is converted to an int32 before the operation. The 5
+// lsb bits from <imm> are used as count i.e. <src> << (<imm> & 0x1F).
+void Interpreter::DoShiftRightSmi(InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(1);
+ Node* left = __ LoadRegister(reg_index);
+ Node* raw_int = __ BytecodeOperandImm(0);
+ Node* right = __ SmiTag(raw_int);
+ Node* context = __ GetContext();
+ Node* slot_index = __ BytecodeOperandIdx(2);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+ Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32);
+ Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
+ context, left, &var_lhs_type_feedback);
+ Node* rhs_value = __ SmiToWord32(right);
+ Node* shift_count = __ Word32And(rhs_value, __ Int32Constant(0x1f));
+ Node* value = __ Word32Sar(lhs_value, shift_count);
+ Node* result = __ ChangeInt32ToTagged(value);
+ Node* result_type =
+ __ Select(__ WordIsSmi(result),
+ __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
+ __ Int32Constant(BinaryOperationFeedback::kNumber));
+ __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
+ type_feedback_vector, slot_index);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+Node* Interpreter::BuildUnaryOp(Callable callable,
+ InterpreterAssembler* assembler) {
+ Node* target = __ HeapConstant(callable.code());
+ Node* accumulator = __ GetAccumulator();
+ Node* context = __ GetContext();
+ return __ CallStub(callable.descriptor(), target, context, accumulator);
+}
+
+template <class Generator>
+void Interpreter::DoUnaryOp(InterpreterAssembler* assembler) {
Node* value = __ GetAccumulator();
- Node* one = __ NumberConstant(1);
Node* context = __ GetContext();
- Node* result = __ CallRuntime(function_id, context, value, one);
+ Node* result = Generator::Generate(assembler, value, context);
__ SetAccumulator(result);
__ Dispatch();
}
+template <class Generator>
+void Interpreter::DoUnaryOpWithFeedback(InterpreterAssembler* assembler) {
+ Node* value = __ GetAccumulator();
+ Node* context = __ GetContext();
+ Node* slot_index = __ BytecodeOperandIdx(0);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+ Node* result = Generator::Generate(assembler, value, context,
+ type_feedback_vector, slot_index);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+// ToName
+//
+// Cast the object referenced by the accumulator to a name.
+void Interpreter::DoToName(InterpreterAssembler* assembler) {
+ Node* result = BuildUnaryOp(CodeFactory::ToName(isolate_), assembler);
+ __ StoreRegister(result, __ BytecodeOperandReg(0));
+ __ Dispatch();
+}
+
+// ToNumber
+//
+// Cast the object referenced by the accumulator to a number.
+void Interpreter::DoToNumber(InterpreterAssembler* assembler) {
+ Node* result = BuildUnaryOp(CodeFactory::ToNumber(isolate_), assembler);
+ __ StoreRegister(result, __ BytecodeOperandReg(0));
+ __ Dispatch();
+}
+
+// ToObject
+//
+// Cast the object referenced by the accumulator to a JSObject.
+void Interpreter::DoToObject(InterpreterAssembler* assembler) {
+ Node* result = BuildUnaryOp(CodeFactory::ToObject(isolate_), assembler);
+ __ StoreRegister(result, __ BytecodeOperandReg(0));
+ __ Dispatch();
+}
// Inc
//
// Increments value in the accumulator by one.
void Interpreter::DoInc(InterpreterAssembler* assembler) {
- DoCountOp(Runtime::kAdd, assembler);
+ DoUnaryOpWithFeedback<IncStub>(assembler);
}
-
// Dec
//
// Decrements value in the accumulator by one.
void Interpreter::DoDec(InterpreterAssembler* assembler) {
- DoCountOp(Runtime::kSubtract, assembler);
+ DoUnaryOpWithFeedback<DecStub>(assembler);
}
-
// LogicalNot
//
// Perform logical-not on the accumulator, first casting the
// accumulator to a boolean value if required.
+// ToBooleanLogicalNot
+void Interpreter::DoToBooleanLogicalNot(InterpreterAssembler* assembler) {
+ Node* value = __ GetAccumulator();
+ Variable result(assembler, MachineRepresentation::kTagged);
+ Label if_true(assembler), if_false(assembler), end(assembler);
+ Node* true_value = __ BooleanConstant(true);
+ Node* false_value = __ BooleanConstant(false);
+ __ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
+ __ Bind(&if_true);
+ {
+ result.Bind(false_value);
+ __ Goto(&end);
+ }
+ __ Bind(&if_false);
+ {
+ result.Bind(true_value);
+ __ Goto(&end);
+ }
+ __ Bind(&end);
+ __ SetAccumulator(result.value());
+ __ Dispatch();
+}
+
+// LogicalNot
+//
+// Perform logical-not on the accumulator, which must already be a boolean
+// value.
void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
- Callable callable = CodeFactory::ToBoolean(isolate_);
- Node* target = __ HeapConstant(callable.code());
- Node* accumulator = __ GetAccumulator();
- Node* context = __ GetContext();
- Node* to_boolean_value =
- __ CallStub(callable.descriptor(), target, context, accumulator);
- InterpreterAssembler::Label if_true(assembler), if_false(assembler);
+ Node* value = __ GetAccumulator();
+ Variable result(assembler, MachineRepresentation::kTagged);
+ Label if_true(assembler), if_false(assembler), end(assembler);
Node* true_value = __ BooleanConstant(true);
Node* false_value = __ BooleanConstant(false);
- Node* condition = __ WordEqual(to_boolean_value, true_value);
- __ Branch(condition, &if_true, &if_false);
+ __ BranchIfWordEqual(value, true_value, &if_true, &if_false);
__ Bind(&if_true);
{
- __ SetAccumulator(false_value);
- __ Dispatch();
+ result.Bind(false_value);
+ __ Goto(&end);
}
__ Bind(&if_false);
{
- __ SetAccumulator(true_value);
- __ Dispatch();
+ if (FLAG_debug_code) {
+ __ AbortIfWordNotEqual(value, false_value,
+ BailoutReason::kExpectedBooleanValue);
+ }
+ result.Bind(true_value);
+ __ Goto(&end);
}
+ __ Bind(&end);
+ __ SetAccumulator(result.value());
+ __ Dispatch();
}
// TypeOf
@@ -797,14 +1328,7 @@ void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
// Load the accumulator with the string representating type of the
// object in the accumulator.
void Interpreter::DoTypeOf(InterpreterAssembler* assembler) {
- Callable callable = CodeFactory::Typeof(isolate_);
- Node* target = __ HeapConstant(callable.code());
- Node* accumulator = __ GetAccumulator();
- Node* context = __ GetContext();
- Node* result =
- __ CallStub(callable.descriptor(), target, context, accumulator);
- __ SetAccumulator(result);
- __ Dispatch();
+ DoUnaryOp<TypeofStub>(assembler);
}
void Interpreter::DoDelete(Runtime::FunctionId function_id,
@@ -818,7 +1342,6 @@ void Interpreter::DoDelete(Runtime::FunctionId function_id,
__ Dispatch();
}
-
// DeletePropertyStrict
//
// Delete the property specified in the accumulator from the object
@@ -827,7 +1350,6 @@ void Interpreter::DoDeletePropertyStrict(InterpreterAssembler* assembler) {
DoDelete(Runtime::kDeleteProperty_Strict, assembler);
}
-
// DeletePropertySloppy
//
// Delete the property specified in the accumulator from the object
@@ -845,27 +1367,30 @@ void Interpreter::DoJSCall(InterpreterAssembler* assembler,
Node* receiver_args_count = __ BytecodeOperandCount(2);
Node* receiver_count = __ Int32Constant(1);
Node* args_count = __ Int32Sub(receiver_args_count, receiver_count);
+ Node* slot_id = __ BytecodeOperandIdx(3);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
Node* context = __ GetContext();
- // TODO(rmcilroy): Use the call type feedback slot to call via CallStub.
Node* result =
- __ CallJS(function, context, receiver_arg, args_count, tail_call_mode);
+ __ CallJSWithFeedback(function, context, receiver_arg, args_count,
+ slot_id, type_feedback_vector, tail_call_mode);
__ SetAccumulator(result);
__ Dispatch();
}
-
-// Call <callable> <receiver> <arg_count>
+// Call <callable> <receiver> <arg_count> <feedback_slot_id>
//
// Call a JSfunction or Callable in |callable| with the |receiver| and
-// |arg_count| arguments in subsequent registers.
+// |arg_count| arguments in subsequent registers. Collect type feedback
+// into |feedback_slot_id|
void Interpreter::DoCall(InterpreterAssembler* assembler) {
DoJSCall(assembler, TailCallMode::kDisallow);
}
-// TailCall <callable> <receiver> <arg_count>
+// TailCall <callable> <receiver> <arg_count> <feedback_slot_id>
//
// Tail call a JSfunction or Callable in |callable| with the |receiver| and
-// |arg_count| arguments in subsequent registers.
+// |arg_count| arguments in subsequent registers. Collect type feedback
+// into |feedback_slot_id|
void Interpreter::DoTailCall(InterpreterAssembler* assembler) {
DoJSCall(assembler, TailCallMode::kAllow);
}
@@ -881,7 +1406,6 @@ void Interpreter::DoCallRuntimeCommon(InterpreterAssembler* assembler) {
__ Dispatch();
}
-
// CallRuntime <function_id> <first_arg> <arg_count>
//
// Call the runtime function |function_id| with the first argument in
@@ -897,7 +1421,7 @@ void Interpreter::DoCallRuntime(InterpreterAssembler* assembler) {
// |function_id| with the first argument in |first_arg| and |arg_count|
// arguments in subsequent registers.
void Interpreter::DoInvokeIntrinsic(InterpreterAssembler* assembler) {
- Node* function_id = __ BytecodeOperandRuntimeId(0);
+ Node* function_id = __ BytecodeOperandIntrinsicId(0);
Node* first_arg_reg = __ BytecodeOperandReg(1);
Node* arg_count = __ BytecodeOperandCount(2);
Node* context = __ GetContext();
@@ -928,7 +1452,6 @@ void Interpreter::DoCallRuntimeForPairCommon(InterpreterAssembler* assembler) {
__ Dispatch();
}
-
// CallRuntimeForPair <function_id> <first_arg> <arg_count> <first_return>
//
// Call the runtime function |function_id| which returns a pair, with the
@@ -960,7 +1483,6 @@ void Interpreter::DoCallJSRuntimeCommon(InterpreterAssembler* assembler) {
__ Dispatch();
}
-
// CallJSRuntime <context_index> <receiver> <arg_count>
//
// Call the JS runtime function that has the |context_index| with the receiver
@@ -984,7 +1506,6 @@ void Interpreter::DoCallConstruct(InterpreterAssembler* assembler) {
__ Dispatch();
}
-
// New <constructor> <first_arg> <arg_count>
//
// Call operator new with |constructor| and the first argument in
@@ -999,109 +1520,67 @@ void Interpreter::DoNew(InterpreterAssembler* assembler) {
//
// Test if the value in the <src> register equals the accumulator.
void Interpreter::DoTestEqual(InterpreterAssembler* assembler) {
- DoBinaryOp(CodeFactory::Equal(isolate_), assembler);
+ DoBinaryOp<EqualStub>(assembler);
}
-
// TestNotEqual <src>
//
// Test if the value in the <src> register is not equal to the accumulator.
void Interpreter::DoTestNotEqual(InterpreterAssembler* assembler) {
- DoBinaryOp(CodeFactory::NotEqual(isolate_), assembler);
+ DoBinaryOp<NotEqualStub>(assembler);
}
-
// TestEqualStrict <src>
//
// Test if the value in the <src> register is strictly equal to the accumulator.
void Interpreter::DoTestEqualStrict(InterpreterAssembler* assembler) {
- DoBinaryOp(CodeFactory::StrictEqual(isolate_), assembler);
+ DoBinaryOp<StrictEqualStub>(assembler);
}
-
// TestLessThan <src>
//
// Test if the value in the <src> register is less than the accumulator.
void Interpreter::DoTestLessThan(InterpreterAssembler* assembler) {
- DoBinaryOp(CodeFactory::LessThan(isolate_), assembler);
+ DoBinaryOp<LessThanStub>(assembler);
}
-
// TestGreaterThan <src>
//
// Test if the value in the <src> register is greater than the accumulator.
void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) {
- DoBinaryOp(CodeFactory::GreaterThan(isolate_), assembler);
+ DoBinaryOp<GreaterThanStub>(assembler);
}
-
// TestLessThanOrEqual <src>
//
// Test if the value in the <src> register is less than or equal to the
// accumulator.
void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) {
- DoBinaryOp(CodeFactory::LessThanOrEqual(isolate_), assembler);
+ DoBinaryOp<LessThanOrEqualStub>(assembler);
}
-
// TestGreaterThanOrEqual <src>
//
// Test if the value in the <src> register is greater than or equal to the
// accumulator.
void Interpreter::DoTestGreaterThanOrEqual(InterpreterAssembler* assembler) {
- DoBinaryOp(CodeFactory::GreaterThanOrEqual(isolate_), assembler);
+ DoBinaryOp<GreaterThanOrEqualStub>(assembler);
}
-
// TestIn <src>
//
// Test if the object referenced by the register operand is a property of the
// object referenced by the accumulator.
void Interpreter::DoTestIn(InterpreterAssembler* assembler) {
- DoBinaryOp(Runtime::kHasProperty, assembler);
+ DoBinaryOp<HasPropertyStub>(assembler);
}
-
// TestInstanceOf <src>
//
// Test if the object referenced by the <src> register is an an instance of type
// referenced by the accumulator.
void Interpreter::DoTestInstanceOf(InterpreterAssembler* assembler) {
- DoBinaryOp(Runtime::kInstanceOf, assembler);
-}
-
-void Interpreter::DoTypeConversionOp(Callable callable,
- InterpreterAssembler* assembler) {
- Node* target = __ HeapConstant(callable.code());
- Node* accumulator = __ GetAccumulator();
- Node* context = __ GetContext();
- Node* result =
- __ CallStub(callable.descriptor(), target, context, accumulator);
- __ SetAccumulator(result);
- __ Dispatch();
-}
-
-// ToName
-//
-// Cast the object referenced by the accumulator to a name.
-void Interpreter::DoToName(InterpreterAssembler* assembler) {
- DoTypeConversionOp(CodeFactory::ToName(isolate_), assembler);
-}
-
-
-// ToNumber
-//
-// Cast the object referenced by the accumulator to a number.
-void Interpreter::DoToNumber(InterpreterAssembler* assembler) {
- DoTypeConversionOp(CodeFactory::ToNumber(isolate_), assembler);
-}
-
-
-// ToObject
-//
-// Cast the object referenced by the accumulator to a JSObject.
-void Interpreter::DoToObject(InterpreterAssembler* assembler) {
- DoTypeConversionOp(CodeFactory::ToObject(isolate_), assembler);
+ DoBinaryOp<InstanceOfStub>(assembler);
}
// Jump <imm>
@@ -1117,8 +1596,7 @@ void Interpreter::DoJump(InterpreterAssembler* assembler) {
// Jump by number of bytes in the Smi in the |idx| entry in the constant pool.
void Interpreter::DoJumpConstant(InterpreterAssembler* assembler) {
Node* index = __ BytecodeOperandIdx(0);
- Node* constant = __ LoadConstantPoolEntry(index);
- Node* relative_jump = __ SmiUntag(constant);
+ Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
__ Jump(relative_jump);
}
@@ -1140,8 +1618,7 @@ void Interpreter::DoJumpIfTrue(InterpreterAssembler* assembler) {
void Interpreter::DoJumpIfTrueConstant(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
Node* index = __ BytecodeOperandIdx(0);
- Node* constant = __ LoadConstantPoolEntry(index);
- Node* relative_jump = __ SmiUntag(constant);
+ Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
Node* true_value = __ BooleanConstant(true);
__ JumpIfWordEqual(accumulator, true_value, relative_jump);
}
@@ -1164,8 +1641,7 @@ void Interpreter::DoJumpIfFalse(InterpreterAssembler* assembler) {
void Interpreter::DoJumpIfFalseConstant(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
Node* index = __ BytecodeOperandIdx(0);
- Node* constant = __ LoadConstantPoolEntry(index);
- Node* relative_jump = __ SmiUntag(constant);
+ Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
Node* false_value = __ BooleanConstant(false);
__ JumpIfWordEqual(accumulator, false_value, relative_jump);
}
@@ -1175,15 +1651,14 @@ void Interpreter::DoJumpIfFalseConstant(InterpreterAssembler* assembler) {
// Jump by number of bytes represented by an immediate operand if the object
// referenced by the accumulator is true when the object is cast to boolean.
void Interpreter::DoJumpIfToBooleanTrue(InterpreterAssembler* assembler) {
- Callable callable = CodeFactory::ToBoolean(isolate_);
- Node* target = __ HeapConstant(callable.code());
- Node* accumulator = __ GetAccumulator();
- Node* context = __ GetContext();
- Node* to_boolean_value =
- __ CallStub(callable.descriptor(), target, context, accumulator);
+ Node* value = __ GetAccumulator();
Node* relative_jump = __ BytecodeOperandImm(0);
- Node* true_value = __ BooleanConstant(true);
- __ JumpIfWordEqual(to_boolean_value, true_value, relative_jump);
+ Label if_true(assembler), if_false(assembler);
+ __ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
+ __ Bind(&if_true);
+ __ Jump(relative_jump);
+ __ Bind(&if_false);
+ __ Dispatch();
}
// JumpIfToBooleanTrueConstant <idx>
@@ -1193,17 +1668,15 @@ void Interpreter::DoJumpIfToBooleanTrue(InterpreterAssembler* assembler) {
// to boolean.
void Interpreter::DoJumpIfToBooleanTrueConstant(
InterpreterAssembler* assembler) {
- Callable callable = CodeFactory::ToBoolean(isolate_);
- Node* target = __ HeapConstant(callable.code());
- Node* accumulator = __ GetAccumulator();
- Node* context = __ GetContext();
- Node* to_boolean_value =
- __ CallStub(callable.descriptor(), target, context, accumulator);
+ Node* value = __ GetAccumulator();
Node* index = __ BytecodeOperandIdx(0);
- Node* constant = __ LoadConstantPoolEntry(index);
- Node* relative_jump = __ SmiUntag(constant);
- Node* true_value = __ BooleanConstant(true);
- __ JumpIfWordEqual(to_boolean_value, true_value, relative_jump);
+ Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
+ Label if_true(assembler), if_false(assembler);
+ __ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
+ __ Bind(&if_true);
+ __ Jump(relative_jump);
+ __ Bind(&if_false);
+ __ Dispatch();
}
// JumpIfToBooleanFalse <imm>
@@ -1211,15 +1684,14 @@ void Interpreter::DoJumpIfToBooleanTrueConstant(
// Jump by number of bytes represented by an immediate operand if the object
// referenced by the accumulator is false when the object is cast to boolean.
void Interpreter::DoJumpIfToBooleanFalse(InterpreterAssembler* assembler) {
- Callable callable = CodeFactory::ToBoolean(isolate_);
- Node* target = __ HeapConstant(callable.code());
- Node* accumulator = __ GetAccumulator();
- Node* context = __ GetContext();
- Node* to_boolean_value =
- __ CallStub(callable.descriptor(), target, context, accumulator);
+ Node* value = __ GetAccumulator();
Node* relative_jump = __ BytecodeOperandImm(0);
- Node* false_value = __ BooleanConstant(false);
- __ JumpIfWordEqual(to_boolean_value, false_value, relative_jump);
+ Label if_true(assembler), if_false(assembler);
+ __ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
+ __ Bind(&if_true);
+ __ Dispatch();
+ __ Bind(&if_false);
+ __ Jump(relative_jump);
}
// JumpIfToBooleanFalseConstant <idx>
@@ -1229,17 +1701,15 @@ void Interpreter::DoJumpIfToBooleanFalse(InterpreterAssembler* assembler) {
// to boolean.
void Interpreter::DoJumpIfToBooleanFalseConstant(
InterpreterAssembler* assembler) {
- Callable callable = CodeFactory::ToBoolean(isolate_);
- Node* target = __ HeapConstant(callable.code());
- Node* accumulator = __ GetAccumulator();
- Node* context = __ GetContext();
- Node* to_boolean_value =
- __ CallStub(callable.descriptor(), target, context, accumulator);
+ Node* value = __ GetAccumulator();
Node* index = __ BytecodeOperandIdx(0);
- Node* constant = __ LoadConstantPoolEntry(index);
- Node* relative_jump = __ SmiUntag(constant);
- Node* false_value = __ BooleanConstant(false);
- __ JumpIfWordEqual(to_boolean_value, false_value, relative_jump);
+ Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
+ Label if_true(assembler), if_false(assembler);
+ __ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
+ __ Bind(&if_true);
+ __ Dispatch();
+ __ Bind(&if_false);
+ __ Jump(relative_jump);
}
// JumpIfNull <imm>
@@ -1261,8 +1731,7 @@ void Interpreter::DoJumpIfNullConstant(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
Node* index = __ BytecodeOperandIdx(0);
- Node* constant = __ LoadConstantPoolEntry(index);
- Node* relative_jump = __ SmiUntag(constant);
+ Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
__ JumpIfWordEqual(accumulator, null_value, relative_jump);
}
@@ -1287,8 +1756,7 @@ void Interpreter::DoJumpIfUndefinedConstant(InterpreterAssembler* assembler) {
Node* undefined_value =
__ HeapConstant(isolate_->factory()->undefined_value());
Node* index = __ BytecodeOperandIdx(0);
- Node* constant = __ LoadConstantPoolEntry(index);
- Node* relative_jump = __ SmiUntag(constant);
+ Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
__ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
}
@@ -1311,35 +1779,15 @@ void Interpreter::DoJumpIfNotHoleConstant(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
Node* index = __ BytecodeOperandIdx(0);
- Node* constant = __ LoadConstantPoolEntry(index);
- Node* relative_jump = __ SmiUntag(constant);
+ Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
__ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
}
-void Interpreter::DoCreateLiteral(Runtime::FunctionId function_id,
- InterpreterAssembler* assembler) {
- Node* index = __ BytecodeOperandIdx(0);
- Node* constant_elements = __ LoadConstantPoolEntry(index);
- Node* literal_index_raw = __ BytecodeOperandIdx(1);
- Node* literal_index = __ SmiTag(literal_index_raw);
- Node* flags_raw = __ BytecodeOperandFlag(2);
- Node* flags = __ SmiTag(flags_raw);
- Node* closure = __ LoadRegister(Register::function_closure());
- Node* context = __ GetContext();
- Node* result = __ CallRuntime(function_id, context, closure, literal_index,
- constant_elements, flags);
- __ SetAccumulator(result);
- __ Dispatch();
-}
-
-
// CreateRegExpLiteral <pattern_idx> <literal_idx> <flags>
//
// Creates a regular expression literal for literal index <literal_idx> with
// <flags> and the pattern in <pattern_idx>.
void Interpreter::DoCreateRegExpLiteral(InterpreterAssembler* assembler) {
- Callable callable = CodeFactory::FastCloneRegExp(isolate_);
- Node* target = __ HeapConstant(callable.code());
Node* index = __ BytecodeOperandIdx(0);
Node* pattern = __ LoadConstantPoolEntry(index);
Node* literal_index_raw = __ BytecodeOperandIdx(1);
@@ -1348,8 +1796,8 @@ void Interpreter::DoCreateRegExpLiteral(InterpreterAssembler* assembler) {
Node* flags = __ SmiTag(flags_raw);
Node* closure = __ LoadRegister(Register::function_closure());
Node* context = __ GetContext();
- Node* result = __ CallStub(callable.descriptor(), target, context, closure,
- literal_index, pattern, flags);
+ Node* result = FastCloneRegExpStub::Generate(
+ assembler, closure, literal_index, pattern, flags, context);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -1359,15 +1807,68 @@ void Interpreter::DoCreateRegExpLiteral(InterpreterAssembler* assembler) {
// Creates an array literal for literal index <literal_idx> with flags <flags>
// and constant elements in <element_idx>.
void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) {
- DoCreateLiteral(Runtime::kCreateArrayLiteral, assembler);
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* constant_elements = __ LoadConstantPoolEntry(index);
+ Node* literal_index_raw = __ BytecodeOperandIdx(1);
+ Node* literal_index = __ SmiTag(literal_index_raw);
+ Node* flags_raw = __ BytecodeOperandFlag(2);
+ Node* flags = __ SmiTag(flags_raw);
+ Node* closure = __ LoadRegister(Register::function_closure());
+ Node* context = __ GetContext();
+ Node* result = __ CallRuntime(Runtime::kCreateArrayLiteral, context, closure,
+ literal_index, constant_elements, flags);
+ __ SetAccumulator(result);
+ __ Dispatch();
}
// CreateObjectLiteral <element_idx> <literal_idx> <flags>
//
-// Creates an object literal for literal index <literal_idx> with flags <flags>
-// and constant elements in <element_idx>.
+// Creates an object literal for literal index <literal_idx> with
+// CreateObjectLiteralFlags <flags> and constant elements in <element_idx>.
void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
- DoCreateLiteral(Runtime::kCreateObjectLiteral, assembler);
+ Node* literal_index_raw = __ BytecodeOperandIdx(1);
+ Node* literal_index = __ SmiTag(literal_index_raw);
+ Node* bytecode_flags = __ BytecodeOperandFlag(2);
+ Node* closure = __ LoadRegister(Register::function_closure());
+
+ // Check if we can do a fast clone or have to call the runtime.
+ Label if_fast_clone(assembler),
+ if_not_fast_clone(assembler, Label::kDeferred);
+ Node* fast_clone_properties_count =
+ __ BitFieldDecode<CreateObjectLiteralFlags::FastClonePropertiesCountBits>(
+ bytecode_flags);
+ __ BranchIf(fast_clone_properties_count, &if_fast_clone, &if_not_fast_clone);
+
+ __ Bind(&if_fast_clone);
+ {
+ // If we can do a fast clone do the fast-path in FastCloneShallowObjectStub.
+ Node* result = FastCloneShallowObjectStub::GenerateFastPath(
+ assembler, &if_not_fast_clone, closure, literal_index,
+ fast_clone_properties_count);
+ __ StoreRegister(result, __ BytecodeOperandReg(3));
+ __ Dispatch();
+ }
+
+ __ Bind(&if_not_fast_clone);
+ {
+ // If we can't do a fast clone, call into the runtime.
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* constant_elements = __ LoadConstantPoolEntry(index);
+ Node* context = __ GetContext();
+
+ STATIC_ASSERT(CreateObjectLiteralFlags::FlagsBits::kShift == 0);
+ Node* flags_raw = __ Word32And(
+ bytecode_flags,
+ __ Int32Constant(CreateObjectLiteralFlags::FlagsBits::kMask));
+ Node* flags = __ SmiTag(flags_raw);
+
+ Node* result =
+ __ CallRuntime(Runtime::kCreateObjectLiteral, context, closure,
+ literal_index, constant_elements, flags);
+ __ StoreRegister(result, __ BytecodeOperandReg(3));
+ // TODO(klaasb) build a single dispatch once the call is inlined
+ __ Dispatch();
+ }
}
// CreateClosure <index> <tenured>
@@ -1375,16 +1876,84 @@ void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
// Creates a new closure for SharedFunctionInfo at position |index| in the
// constant pool and with the PretenureFlag <tenured>.
void Interpreter::DoCreateClosure(InterpreterAssembler* assembler) {
- // TODO(rmcilroy): Possibly call FastNewClosureStub when possible instead of
- // calling into the runtime.
Node* index = __ BytecodeOperandIdx(0);
Node* shared = __ LoadConstantPoolEntry(index);
- Node* tenured_raw = __ BytecodeOperandFlag(1);
- Node* tenured = __ SmiTag(tenured_raw);
+ Node* flags = __ BytecodeOperandFlag(1);
Node* context = __ GetContext();
- Node* result =
- __ CallRuntime(Runtime::kInterpreterNewClosure, context, shared, tenured);
- __ SetAccumulator(result);
+
+ Label call_runtime(assembler, Label::kDeferred);
+ Node* fast_new_closure = __ Word32And(
+ flags, __ Int32Constant(CreateClosureFlags::FastNewClosureBit::kMask));
+ __ GotoUnless(fast_new_closure, &call_runtime);
+ __ SetAccumulator(FastNewClosureStub::Generate(assembler, shared, context));
+ __ Dispatch();
+
+ __ Bind(&call_runtime);
+ {
+ STATIC_ASSERT(CreateClosureFlags::PretenuredBit::kShift == 0);
+ Node* tenured_raw = __ Word32And(
+ flags, __ Int32Constant(CreateClosureFlags::PretenuredBit::kMask));
+ Node* tenured = __ SmiTag(tenured_raw);
+ Node* result = __ CallRuntime(Runtime::kInterpreterNewClosure, context,
+ shared, tenured);
+ __ SetAccumulator(result);
+ __ Dispatch();
+ }
+}
+
+// CreateBlockContext <index>
+//
+// Creates a new block context with the scope info constant at |index| and the
+// closure in the accumulator.
+void Interpreter::DoCreateBlockContext(InterpreterAssembler* assembler) {
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* scope_info = __ LoadConstantPoolEntry(index);
+ Node* closure = __ GetAccumulator();
+ Node* context = __ GetContext();
+ __ SetAccumulator(
+ __ CallRuntime(Runtime::kPushBlockContext, context, scope_info, closure));
+ __ Dispatch();
+}
+
+// CreateCatchContext <exception> <index>
+//
+// Creates a new context for a catch block with the |exception| in a register,
+// the variable name at |index| and the closure in the accumulator.
+void Interpreter::DoCreateCatchContext(InterpreterAssembler* assembler) {
+ Node* exception_reg = __ BytecodeOperandReg(0);
+ Node* exception = __ LoadRegister(exception_reg);
+ Node* index = __ BytecodeOperandIdx(1);
+ Node* name = __ LoadConstantPoolEntry(index);
+ Node* closure = __ GetAccumulator();
+ Node* context = __ GetContext();
+ __ SetAccumulator(__ CallRuntime(Runtime::kPushCatchContext, context, name,
+ exception, closure));
+ __ Dispatch();
+}
+
+// CreateFunctionContext <slots>
+//
+// Creates a new context with number of |slots| for the function closure.
+void Interpreter::DoCreateFunctionContext(InterpreterAssembler* assembler) {
+ Node* closure = __ LoadRegister(Register::function_closure());
+ Node* slots = __ BytecodeOperandIdx(0);
+ Node* context = __ GetContext();
+ __ SetAccumulator(
+ FastNewFunctionContextStub::Generate(assembler, closure, slots, context));
+ __ Dispatch();
+}
+
+// CreateWithContext <register>
+//
+// Creates a new context for a with-statement with the object in |register| and
+// the closure in the accumulator.
+void Interpreter::DoCreateWithContext(InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* object = __ LoadRegister(reg_index);
+ Node* closure = __ GetAccumulator();
+ Node* context = __ GetContext();
+ __ SetAccumulator(
+ __ CallRuntime(Runtime::kPushWithContext, context, object, closure));
__ Dispatch();
}
@@ -1394,18 +1963,48 @@ void Interpreter::DoCreateClosure(InterpreterAssembler* assembler) {
void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) {
Node* closure = __ LoadRegister(Register::function_closure());
Node* context = __ GetContext();
- Node* result =
- __ CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure);
- __ SetAccumulator(result);
- __ Dispatch();
-}
+ Label if_duplicate_parameters(assembler, Label::kDeferred);
+ Label if_not_duplicate_parameters(assembler);
+
+ // Check if function has duplicate parameters.
+ // TODO(rmcilroy): Remove this check when FastNewSloppyArgumentsStub supports
+ // duplicate parameters.
+ Node* shared_info =
+ __ LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset);
+ Node* compiler_hints = __ LoadObjectField(
+ shared_info, SharedFunctionInfo::kHasDuplicateParametersByteOffset,
+ MachineType::Uint8());
+ Node* duplicate_parameters_bit = __ Int32Constant(
+ 1 << SharedFunctionInfo::kHasDuplicateParametersBitWithinByte);
+ Node* compare = __ Word32And(compiler_hints, duplicate_parameters_bit);
+ __ BranchIf(compare, &if_duplicate_parameters, &if_not_duplicate_parameters);
+
+ __ Bind(&if_not_duplicate_parameters);
+ {
+ // TODO(rmcilroy): Inline FastNewSloppyArguments when it is a TurboFan stub.
+ Callable callable = CodeFactory::FastNewSloppyArguments(isolate_, true);
+ Node* target = __ HeapConstant(callable.code());
+ Node* result = __ CallStub(callable.descriptor(), target, context, closure);
+ __ SetAccumulator(result);
+ __ Dispatch();
+ }
+
+ __ Bind(&if_duplicate_parameters);
+ {
+ Node* result =
+ __ CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure);
+ __ SetAccumulator(result);
+ __ Dispatch();
+ }
+}
// CreateUnmappedArguments
//
// Creates a new unmapped arguments object.
void Interpreter::DoCreateUnmappedArguments(InterpreterAssembler* assembler) {
- Callable callable = CodeFactory::FastNewStrictArguments(isolate_);
+ // TODO(rmcilroy): Inline FastNewStrictArguments when it is a TurboFan stub.
+ Callable callable = CodeFactory::FastNewStrictArguments(isolate_, true);
Node* target = __ HeapConstant(callable.code());
Node* context = __ GetContext();
Node* closure = __ LoadRegister(Register::function_closure());
@@ -1418,7 +2017,8 @@ void Interpreter::DoCreateUnmappedArguments(InterpreterAssembler* assembler) {
//
// Creates a new rest parameter array.
void Interpreter::DoCreateRestParameter(InterpreterAssembler* assembler) {
- Callable callable = CodeFactory::FastNewRestParameter(isolate_);
+ // TODO(rmcilroy): Inline FastNewRestArguments when it is a TurboFan stub.
+ Callable callable = CodeFactory::FastNewRestParameter(isolate_, true);
Node* target = __ HeapConstant(callable.code());
Node* closure = __ LoadRegister(Register::function_closure());
Node* context = __ GetContext();
@@ -1431,8 +2031,46 @@ void Interpreter::DoCreateRestParameter(InterpreterAssembler* assembler) {
//
// Performs a stack guard check.
void Interpreter::DoStackCheck(InterpreterAssembler* assembler) {
- __ StackCheck();
+ Label ok(assembler), stack_check_interrupt(assembler, Label::kDeferred);
+
+ Node* interrupt = __ StackCheckTriggeredInterrupt();
+ __ BranchIf(interrupt, &stack_check_interrupt, &ok);
+
+ __ Bind(&ok);
__ Dispatch();
+
+ __ Bind(&stack_check_interrupt);
+ {
+ Node* context = __ GetContext();
+ __ CallRuntime(Runtime::kStackGuard, context);
+ __ Dispatch();
+ }
+}
+
+// OsrPoll <loop_depth>
+//
+// Performs a loop nesting check and potentially triggers OSR.
+void Interpreter::DoOsrPoll(InterpreterAssembler* assembler) {
+ Node* loop_depth = __ BytecodeOperandImm(0);
+ Node* osr_level = __ LoadOSRNestingLevel();
+
+ // Check if OSR points at the given {loop_depth} are armed by comparing it to
+ // the current {osr_level} loaded from the header of the BytecodeArray.
+ Label ok(assembler), osr_armed(assembler, Label::kDeferred);
+ Node* condition = __ Int32GreaterThanOrEqual(loop_depth, osr_level);
+ __ Branch(condition, &ok, &osr_armed);
+
+ __ Bind(&ok);
+ __ Dispatch();
+
+ __ Bind(&osr_armed);
+ {
+ Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate_);
+ Node* target = __ HeapConstant(callable.code());
+ Node* context = __ GetContext();
+ __ CallStub(callable.descriptor(), target, context);
+ __ Dispatch();
+ }
}
// Throw
@@ -1446,7 +2084,6 @@ void Interpreter::DoThrow(InterpreterAssembler* assembler) {
__ Abort(kUnexpectedReturnFromThrow);
}
-
// ReThrow
//
// Re-throws the exception in the accumulator.
@@ -1458,12 +2095,13 @@ void Interpreter::DoReThrow(InterpreterAssembler* assembler) {
__ Abort(kUnexpectedReturnFromThrow);
}
-
// Return
//
// Return the value in the accumulator.
void Interpreter::DoReturn(InterpreterAssembler* assembler) {
- __ InterpreterReturn();
+ __ UpdateInterruptBudgetOnReturn();
+ Node* accumulator = __ GetAccumulator();
+ __ Return(accumulator);
}
// Debugger
@@ -1489,26 +2127,91 @@ void Interpreter::DoDebugger(InterpreterAssembler* assembler) {
DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
#undef DEBUG_BREAK
-// ForInPrepare <cache_info_triple>
+void Interpreter::BuildForInPrepareResult(Node* output_register,
+ Node* cache_type, Node* cache_array,
+ Node* cache_length,
+ InterpreterAssembler* assembler) {
+ __ StoreRegister(cache_type, output_register);
+ output_register = __ NextRegister(output_register);
+ __ StoreRegister(cache_array, output_register);
+ output_register = __ NextRegister(output_register);
+ __ StoreRegister(cache_length, output_register);
+}
+
+// ForInPrepare <receiver> <cache_info_triple>
//
-// Returns state for for..in loop execution based on the object in the
-// accumulator. The result is output in registers |cache_info_triple| to
+// Returns state for for..in loop execution based on the object in the register
+// |receiver|. The object must not be null or undefined and must have been
+// converted to a receiver already.
+// The result is output in registers |cache_info_triple| to
// |cache_info_triple + 2|, with the registers holding cache_type, cache_array,
// and cache_length respectively.
void Interpreter::DoForInPrepare(InterpreterAssembler* assembler) {
- Node* object = __ GetAccumulator();
+ Node* object_reg = __ BytecodeOperandReg(0);
+ Node* receiver = __ LoadRegister(object_reg);
Node* context = __ GetContext();
- Node* result_triple = __ CallRuntime(Runtime::kForInPrepare, context, object);
-
- // Set output registers:
- // 0 == cache_type, 1 == cache_array, 2 == cache_length
- Node* output_register = __ BytecodeOperandReg(0);
- for (int i = 0; i < 3; i++) {
- Node* cache_info = __ Projection(i, result_triple);
- __ StoreRegister(cache_info, output_register);
- output_register = __ NextRegister(output_register);
+ Node* const zero_smi = __ SmiConstant(Smi::FromInt(0));
+
+ Label nothing_to_iterate(assembler, Label::kDeferred),
+ use_enum_cache(assembler), use_runtime(assembler, Label::kDeferred);
+
+ if (FLAG_debug_code) {
+ Label already_receiver(assembler), abort(assembler);
+ Node* instance_type = __ LoadInstanceType(receiver);
+ Node* first_receiver_type = __ Int32Constant(FIRST_JS_RECEIVER_TYPE);
+ __ BranchIfInt32GreaterThanOrEqual(instance_type, first_receiver_type,
+ &already_receiver, &abort);
+ __ Bind(&abort);
+ {
+ __ Abort(kExpectedJSReceiver);
+ // TODO(klaasb) remove this unreachable Goto once Abort ends the block
+ __ Goto(&already_receiver);
+ }
+ __ Bind(&already_receiver);
+ }
+
+ __ CheckEnumCache(receiver, &use_enum_cache, &use_runtime);
+
+ __ Bind(&use_enum_cache);
+ {
+ // The enum cache is valid. Load the map of the object being
+ // iterated over and use the cache for the iteration.
+ Node* cache_type = __ LoadMap(receiver);
+ Node* cache_length = __ EnumLength(cache_type);
+ __ GotoIf(assembler->WordEqual(cache_length, zero_smi),
+ &nothing_to_iterate);
+ Node* descriptors = __ LoadMapDescriptors(cache_type);
+ Node* cache_offset =
+ __ LoadObjectField(descriptors, DescriptorArray::kEnumCacheOffset);
+ Node* cache_array = __ LoadObjectField(
+ cache_offset, DescriptorArray::kEnumCacheBridgeCacheOffset);
+ Node* output_register = __ BytecodeOperandReg(1);
+ BuildForInPrepareResult(output_register, cache_type, cache_array,
+ cache_length, assembler);
+ __ Dispatch();
+ }
+
+ __ Bind(&use_runtime);
+ {
+ Node* result_triple =
+ __ CallRuntime(Runtime::kForInPrepare, context, receiver);
+ Node* cache_type = __ Projection(0, result_triple);
+ Node* cache_array = __ Projection(1, result_triple);
+ Node* cache_length = __ Projection(2, result_triple);
+ Node* output_register = __ BytecodeOperandReg(1);
+ BuildForInPrepareResult(output_register, cache_type, cache_array,
+ cache_length, assembler);
+ __ Dispatch();
+ }
+
+ __ Bind(&nothing_to_iterate);
+ {
+ // Receiver is null or undefined or descriptors are zero length.
+ Node* output_register = __ BytecodeOperandReg(1);
+ BuildForInPrepareResult(output_register, zero_smi, zero_smi, zero_smi,
+ assembler);
+ __ Dispatch();
}
- __ Dispatch();
}
// ForInNext <receiver> <index> <cache_info_pair>
@@ -1525,13 +2228,13 @@ void Interpreter::DoForInNext(InterpreterAssembler* assembler) {
Node* cache_array = __ LoadRegister(cache_array_reg);
// Load the next key from the enumeration array.
- Node* key = __ LoadFixedArrayElementSmiIndex(cache_array, index);
+ Node* key = __ LoadFixedArrayElement(cache_array, index, 0,
+ CodeStubAssembler::SMI_PARAMETERS);
// Check if we can use the for-in fast path potentially using the enum cache.
- InterpreterAssembler::Label if_fast(assembler), if_slow(assembler);
+ Label if_fast(assembler), if_slow(assembler, Label::kDeferred);
Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset);
- Node* condition = __ WordEqual(receiver_map, cache_type);
- __ Branch(condition, &if_fast, &if_slow);
+ __ BranchIfWordEqual(receiver_map, cache_type, &if_fast, &if_slow);
__ Bind(&if_fast);
{
// Enum cache in use for {receiver}, the {key} is definitely valid.
@@ -1545,13 +2248,13 @@ void Interpreter::DoForInNext(InterpreterAssembler* assembler) {
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
Node* megamorphic_sentinel =
__ HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate_));
- __ StoreFixedArrayElementNoWriteBarrier(type_feedback_vector, vector_index,
- megamorphic_sentinel);
+ __ StoreFixedArrayElement(type_feedback_vector, vector_index,
+ megamorphic_sentinel, SKIP_WRITE_BARRIER);
// Need to filter the {key} for the {receiver}.
Node* context = __ GetContext();
- Node* result =
- __ CallRuntime(Runtime::kForInFilter, context, receiver, key);
+ Callable callable = CodeFactory::ForInFilter(assembler->isolate());
+ Node* result = __ CallStub(callable, context, key, receiver);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -1567,21 +2270,20 @@ void Interpreter::DoForInDone(InterpreterAssembler* assembler) {
Node* cache_length = __ LoadRegister(cache_length_reg);
// Check if {index} is at {cache_length} already.
- InterpreterAssembler::Label if_true(assembler), if_false(assembler);
- Node* condition = __ WordEqual(index, cache_length);
- __ Branch(condition, &if_true, &if_false);
+ Label if_true(assembler), if_false(assembler), end(assembler);
+ __ BranchIfWordEqual(index, cache_length, &if_true, &if_false);
__ Bind(&if_true);
{
- Node* result = __ BooleanConstant(true);
- __ SetAccumulator(result);
- __ Dispatch();
+ __ SetAccumulator(__ BooleanConstant(true));
+ __ Goto(&end);
}
__ Bind(&if_false);
{
- Node* result = __ BooleanConstant(false);
- __ SetAccumulator(result);
- __ Dispatch();
+ __ SetAccumulator(__ BooleanConstant(false));
+ __ Goto(&end);
}
+ __ Bind(&end);
+ __ Dispatch();
}
// ForInStep <index>
@@ -1618,6 +2320,76 @@ void Interpreter::DoIllegal(InterpreterAssembler* assembler) {
__ Abort(kInvalidBytecode);
}
+// Nop
+//
+// No operation.
+void Interpreter::DoNop(InterpreterAssembler* assembler) { __ Dispatch(); }
+
+// SuspendGenerator <generator>
+//
+// Exports the register file and stores it into the generator. Also stores the
+// current context, the state given in the accumulator, and the current bytecode
+// offset (for debugging purposes) into the generator.
+void Interpreter::DoSuspendGenerator(InterpreterAssembler* assembler) {
+ Node* generator_reg = __ BytecodeOperandReg(0);
+ Node* generator = __ LoadRegister(generator_reg);
+
+ Label if_stepping(assembler, Label::kDeferred), ok(assembler);
+ Node* step_action_address = __ ExternalConstant(
+ ExternalReference::debug_last_step_action_address(isolate_));
+ Node* step_action = __ Load(MachineType::Int8(), step_action_address);
+ STATIC_ASSERT(StepIn > StepNext);
+ STATIC_ASSERT(StepFrame > StepNext);
+ STATIC_ASSERT(LastStepAction == StepFrame);
+ Node* step_next = __ Int32Constant(StepNext);
+ __ BranchIfInt32LessThanOrEqual(step_next, step_action, &if_stepping, &ok);
+ __ Bind(&ok);
+
+ Node* array =
+ __ LoadObjectField(generator, JSGeneratorObject::kOperandStackOffset);
+ Node* context = __ GetContext();
+ Node* state = __ GetAccumulator();
+
+ __ ExportRegisterFile(array);
+ __ StoreObjectField(generator, JSGeneratorObject::kContextOffset, context);
+ __ StoreObjectField(generator, JSGeneratorObject::kContinuationOffset, state);
+
+ Node* offset = __ SmiTag(__ BytecodeOffset());
+ __ StoreObjectField(generator, JSGeneratorObject::kInputOrDebugPosOffset,
+ offset);
+
+ __ Dispatch();
+
+ __ Bind(&if_stepping);
+ {
+ Node* context = __ GetContext();
+ __ CallRuntime(Runtime::kDebugRecordAsyncFunction, context, generator);
+ __ Goto(&ok);
+ }
+}
+
+// ResumeGenerator <generator>
+//
+// Imports the register file stored in the generator. Also loads the
+// generator's state and stores it in the accumulator, before overwriting it
+// with kGeneratorExecuting.
+void Interpreter::DoResumeGenerator(InterpreterAssembler* assembler) {
+ Node* generator_reg = __ BytecodeOperandReg(0);
+ Node* generator = __ LoadRegister(generator_reg);
+
+ __ ImportRegisterFile(
+ __ LoadObjectField(generator, JSGeneratorObject::kOperandStackOffset));
+
+ Node* old_state =
+ __ LoadObjectField(generator, JSGeneratorObject::kContinuationOffset);
+ Node* new_state = __ Int32Constant(JSGeneratorObject::kGeneratorExecuting);
+ __ StoreObjectField(generator, JSGeneratorObject::kContinuationOffset,
+ __ SmiTag(new_state));
+ __ SetAccumulator(old_state);
+
+ __ Dispatch();
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index ea50faa02d..bbd0102999 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -5,11 +5,13 @@
#ifndef V8_INTERPRETER_INTERPRETER_H_
#define V8_INTERPRETER_INTERPRETER_H_
+#include <memory>
+
// Clients of this interface shouldn't depend on lots of interpreter internals.
// Do not include anything from src/interpreter other than
// src/interpreter/bytecodes.h here!
#include "src/base/macros.h"
-#include "src/builtins.h"
+#include "src/builtins/builtins.h"
#include "src/interpreter/bytecodes.h"
#include "src/parsing/token.h"
#include "src/runtime/runtime.h"
@@ -21,6 +23,10 @@ class Isolate;
class Callable;
class CompilationInfo;
+namespace compiler {
+class Node;
+} // namespace compiler
+
namespace interpreter {
class InterpreterAssembler;
@@ -49,10 +55,19 @@ class Interpreter {
void TraceCodegen(Handle<Code> code);
const char* LookupNameOfBytecodeHandler(Code* code);
+ Local<v8::Object> GetDispatchCountersObject();
+
Address dispatch_table_address() {
return reinterpret_cast<Address>(&dispatch_table_[0]);
}
+ Address bytecode_dispatch_counters_table() {
+ return reinterpret_cast<Address>(bytecode_dispatch_counters_table_.get());
+ }
+
+ // TODO(ignition): Tune code size multiplier.
+ static const int kCodeSizeMultiplier = 32;
+
private:
// Bytecode handler generator functions.
#define DECLARE_BYTECODE_HANDLER_GENERATOR(Name, ...) \
@@ -60,43 +75,47 @@ class Interpreter {
BYTECODE_LIST(DECLARE_BYTECODE_HANDLER_GENERATOR)
#undef DECLARE_BYTECODE_HANDLER_GENERATOR
- // Generates code to perform the binary operations via |callable|.
- void DoBinaryOp(Callable callable, InterpreterAssembler* assembler);
+ // Generates code to perform the binary operation via |Generator|.
+ template <class Generator>
+ void DoBinaryOp(InterpreterAssembler* assembler);
+
+ // Generates code to perform the binary operation via |Generator|.
+ template <class Generator>
+ void DoBinaryOpWithFeedback(InterpreterAssembler* assembler);
- // Generates code to perform the binary operations via |function_id|.
- void DoBinaryOp(Runtime::FunctionId function_id,
- InterpreterAssembler* assembler);
+ // Generates code to perform the bitwise binary operation corresponding to
+ // |bitwise_op| while gathering type feedback.
+ void DoBitwiseBinaryOp(Token::Value bitwise_op,
+ InterpreterAssembler* assembler);
+
+ // Generates code to perform the binary operation via |Generator| using
+ // an immediate value rather the accumulator as the rhs operand.
+ template <class Generator>
+ void DoBinaryOpWithImmediate(InterpreterAssembler* assembler);
- // Generates code to perform the count operations via |function_id|.
- void DoCountOp(Runtime::FunctionId function_id,
- InterpreterAssembler* assembler);
+ // Generates code to perform the unary operation via |Generator|.
+ template <class Generator>
+ void DoUnaryOp(InterpreterAssembler* assembler);
+
+ // Generates code to perform the unary operation via |Generator| while
+ // gatering type feedback.
+ template <class Generator>
+ void DoUnaryOpWithFeedback(InterpreterAssembler* assembler);
// Generates code to perform the comparison operation associated with
// |compare_op|.
void DoCompareOp(Token::Value compare_op, InterpreterAssembler* assembler);
- // Generates code to load a constant from the constant pool.
- void DoLoadConstant(InterpreterAssembler* assembler);
-
- // Generates code to perform a global load via |ic|.
- void DoLoadGlobal(Callable ic, InterpreterAssembler* assembler);
-
// Generates code to perform a global store via |ic|.
- void DoStoreGlobal(Callable ic, InterpreterAssembler* assembler);
-
- // Generates code to perform a named property load via |ic|.
- void DoLoadIC(Callable ic, InterpreterAssembler* assembler);
-
- // Generates code to perform a keyed property load via |ic|.
- void DoKeyedLoadIC(Callable ic, InterpreterAssembler* assembler);
+ void DoStaGlobal(Callable ic, InterpreterAssembler* assembler);
- // Generates code to perform a namedproperty store via |ic|.
+ // Generates code to perform a named property store via |ic|.
void DoStoreIC(Callable ic, InterpreterAssembler* assembler);
// Generates code to perform a keyed property store via |ic|.
void DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler);
- // Generates code to perform a JS call.
+ // Generates code to perform a JS call that collects type feedback.
void DoJSCall(InterpreterAssembler* assembler, TailCallMode tail_call_mode);
// Generates code to perform a runtime call.
@@ -111,24 +130,49 @@ class Interpreter {
// Generates code to perform a constructor call.
void DoCallConstruct(InterpreterAssembler* assembler);
- // Generates code to perform a type conversion.
- void DoTypeConversionOp(Callable callable, InterpreterAssembler* assembler);
-
- // Generates code ro create a literal via |function_id|.
- void DoCreateLiteral(Runtime::FunctionId function_id,
- InterpreterAssembler* assembler);
-
// Generates code to perform delete via function_id.
void DoDelete(Runtime::FunctionId function_id,
InterpreterAssembler* assembler);
// Generates code to perform a lookup slot load via |function_id|.
- void DoLoadLookupSlot(Runtime::FunctionId function_id,
- InterpreterAssembler* assembler);
+ void DoLdaLookupSlot(Runtime::FunctionId function_id,
+ InterpreterAssembler* assembler);
// Generates code to perform a lookup slot store depending on |language_mode|.
- void DoStoreLookupSlot(LanguageMode language_mode,
- InterpreterAssembler* assembler);
+ void DoStaLookupSlot(LanguageMode language_mode,
+ InterpreterAssembler* assembler);
+
+ // Generates a node with the undefined constant.
+ compiler::Node* BuildLoadUndefined(InterpreterAssembler* assembler);
+
+ // Generates code to load a context slot.
+ compiler::Node* BuildLoadContextSlot(InterpreterAssembler* assembler);
+
+ // Generates code to load a global.
+ compiler::Node* BuildLoadGlobal(Callable ic, InterpreterAssembler* assembler);
+
+ // Generates code to load a named property.
+ compiler::Node* BuildLoadNamedProperty(Callable ic,
+ InterpreterAssembler* assembler);
+
+ // Generates code to load a keyed property.
+ compiler::Node* BuildLoadKeyedProperty(Callable ic,
+ InterpreterAssembler* assembler);
+
+ // Generates code to prepare the result for ForInPrepare. Cache data
+ // are placed into the consecutive series of registers starting at
+ // |output_register|.
+ void BuildForInPrepareResult(compiler::Node* output_register,
+ compiler::Node* cache_type,
+ compiler::Node* cache_array,
+ compiler::Node* cache_length,
+ InterpreterAssembler* assembler);
+
+ // Generates code to perform the unary operation via |callable|.
+ compiler::Node* BuildUnaryOp(Callable callable,
+ InterpreterAssembler* assembler);
+
+ uintptr_t GetDispatchCounter(Bytecode from, Bytecode to) const;
// Get dispatch table index of bytecode.
static size_t GetDispatchTableIndex(Bytecode bytecode,
@@ -138,9 +182,11 @@ class Interpreter {
static const int kNumberOfWideVariants = 3;
static const int kDispatchTableSize = kNumberOfWideVariants * (kMaxUInt8 + 1);
+ static const int kNumberOfBytecodes = static_cast<int>(Bytecode::kLast) + 1;
Isolate* isolate_;
- Code* dispatch_table_[kDispatchTableSize];
+ Address dispatch_table_[kDispatchTableSize];
+ std::unique_ptr<uintptr_t[]> bytecode_dispatch_counters_table_;
DISALLOW_COPY_AND_ASSIGN(Interpreter);
};
diff --git a/deps/v8/src/interpreter/mkpeephole.cc b/deps/v8/src/interpreter/mkpeephole.cc
new file mode 100644
index 0000000000..8e9d5fea47
--- /dev/null
+++ b/deps/v8/src/interpreter/mkpeephole.cc
@@ -0,0 +1,383 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <array>
+#include <fstream>
+#include <map>
+#include <string>
+#include <vector>
+
+#include "src/globals.h"
+#include "src/interpreter/bytecode-peephole-table.h"
+#include "src/interpreter/bytecodes.h"
+
+namespace v8 {
+namespace internal {
+
+namespace interpreter {
+
+const char* ActionName(PeepholeAction action) {
+ switch (action) {
+#define CASE(Name) \
+ case PeepholeAction::k##Name: \
+ return "PeepholeAction::k" #Name;
+ PEEPHOLE_ACTION_LIST(CASE)
+#undef CASE
+ default:
+ UNREACHABLE();
+ return "";
+ }
+}
+
+std::string BytecodeName(Bytecode bytecode) {
+ return "Bytecode::k" + std::string(Bytecodes::ToString(bytecode));
+}
+
+class PeepholeActionTableWriter final {
+ public:
+ static const size_t kNumberOfBytecodes =
+ static_cast<size_t>(Bytecode::kLast) + 1;
+ typedef std::array<PeepholeActionAndData, kNumberOfBytecodes> Row;
+
+ void BuildTable();
+ void Write(std::ostream& os);
+
+ private:
+ static const char* kIndent;
+ static const char* kNamespaceElements[];
+
+ void WriteHeader(std::ostream& os);
+ void WriteIncludeFiles(std::ostream& os);
+ void WriteClassMethods(std::ostream& os);
+ void WriteUniqueRows(std::ostream& os);
+ void WriteRowMap(std::ostream& os);
+ void WriteRow(std::ostream& os, size_t row_index);
+ void WriteOpenNamespace(std::ostream& os);
+ void WriteCloseNamespace(std::ostream& os);
+
+ PeepholeActionAndData LookupActionAndData(Bytecode last, Bytecode current);
+ void BuildRow(Bytecode last, Row* row);
+ size_t HashRow(const Row* row);
+ void InsertRow(size_t row_index, const Row* const row, size_t row_hash,
+ std::map<size_t, size_t>* hash_to_row_map);
+ bool RowsEqual(const Row* const first, const Row* const second);
+
+ std::vector<Row>* table() { return &table_; }
+
+ // Table of unique rows.
+ std::vector<Row> table_;
+
+ // Mapping of row index to unique row index.
+ std::array<size_t, kNumberOfBytecodes> row_map_;
+};
+
+const char* PeepholeActionTableWriter::kIndent = " ";
+const char* PeepholeActionTableWriter::kNamespaceElements[] = {"v8", "internal",
+ "interpreter"};
+
+// static
+PeepholeActionAndData PeepholeActionTableWriter::LookupActionAndData(
+ Bytecode last, Bytecode current) {
+ // Optimize various accumulator loads followed by store accumulator
+ // to an equivalent register load and loading the accumulator with
+ // the register. The latter accumulator load can often be elided as
+ // it is side-effect free and often followed by another accumulator
+ // load so can be elided.
+ if (current == Bytecode::kStar) {
+ switch (last) {
+ case Bytecode::kLdaNamedProperty:
+ return {PeepholeAction::kTransformLdaStarToLdrLdarAction,
+ Bytecode::kLdrNamedProperty};
+ case Bytecode::kLdaKeyedProperty:
+ return {PeepholeAction::kTransformLdaStarToLdrLdarAction,
+ Bytecode::kLdrKeyedProperty};
+ case Bytecode::kLdaGlobal:
+ return {PeepholeAction::kTransformLdaStarToLdrLdarAction,
+ Bytecode::kLdrGlobal};
+ case Bytecode::kLdaContextSlot:
+ return {PeepholeAction::kTransformLdaStarToLdrLdarAction,
+ Bytecode::kLdrContextSlot};
+ case Bytecode::kLdaUndefined:
+ return {PeepholeAction::kTransformLdaStarToLdrLdarAction,
+ Bytecode::kLdrUndefined};
+ default:
+ break;
+ }
+ }
+
+ // ToName bytecodes can be replaced by Star with the same output register if
+ // the value in the accumulator is already a name.
+ if (current == Bytecode::kToName && Bytecodes::PutsNameInAccumulator(last)) {
+ return {PeepholeAction::kChangeBytecodeAction, Bytecode::kStar};
+ }
+
+ // Nop are placeholders for holding source position information and can be
+ // elided if there is no source information.
+ if (last == Bytecode::kNop) {
+ if (Bytecodes::IsJump(current)) {
+ return {PeepholeAction::kElideLastBeforeJumpAction, Bytecode::kIllegal};
+ } else {
+ return {PeepholeAction::kElideLastAction, Bytecode::kIllegal};
+ }
+ }
+
+ // The accumulator is invisible to the debugger. If there is a sequence
+ // of consecutive accumulator loads (that don't have side effects) then
+ // only the final load is potentially visible.
+ if (Bytecodes::IsAccumulatorLoadWithoutEffects(last) &&
+ Bytecodes::IsAccumulatorLoadWithoutEffects(current)) {
+ return {PeepholeAction::kElideLastAction, Bytecode::kIllegal};
+ }
+
+ // The current instruction clobbers the accumulator without reading
+ // it. The load in the last instruction can be elided as it has no
+ // effect.
+ if (Bytecodes::IsAccumulatorLoadWithoutEffects(last) &&
+ Bytecodes::GetAccumulatorUse(current) == AccumulatorUse::kWrite) {
+ return {PeepholeAction::kElideLastAction, Bytecode::kIllegal};
+ }
+
+ // Ldar and Star make the accumulator and register hold equivalent
+ // values. Only the first bytecode is needed if there's a sequence
+ // of back-to-back Ldar and Star bytecodes with the same operand.
+ if (Bytecodes::IsLdarOrStar(last) && Bytecodes::IsLdarOrStar(current)) {
+ return {PeepholeAction::kElideCurrentIfOperand0MatchesAction,
+ Bytecode::kIllegal};
+ }
+
+ // Remove ToBoolean coercion from conditional jumps where possible.
+ if (Bytecodes::WritesBooleanToAccumulator(last)) {
+ if (Bytecodes::IsJumpIfToBoolean(current)) {
+ return {PeepholeAction::kChangeJumpBytecodeAction,
+ Bytecodes::GetJumpWithoutToBoolean(current)};
+ } else if (current == Bytecode::kToBooleanLogicalNot) {
+ return {PeepholeAction::kChangeBytecodeAction, Bytecode::kLogicalNot};
+ }
+ }
+
+ // Fuse LdaSmi followed by binary op to produce binary op with a
+ // immediate integer argument. This savaes on dispatches and size.
+ if (last == Bytecode::kLdaSmi) {
+ switch (current) {
+ case Bytecode::kAdd:
+ return {PeepholeAction::kTransformLdaSmiBinaryOpToBinaryOpWithSmiAction,
+ Bytecode::kAddSmi};
+ case Bytecode::kSub:
+ return {PeepholeAction::kTransformLdaSmiBinaryOpToBinaryOpWithSmiAction,
+ Bytecode::kSubSmi};
+ case Bytecode::kBitwiseAnd:
+ return {PeepholeAction::kTransformLdaSmiBinaryOpToBinaryOpWithSmiAction,
+ Bytecode::kBitwiseAndSmi};
+ case Bytecode::kBitwiseOr:
+ return {PeepholeAction::kTransformLdaSmiBinaryOpToBinaryOpWithSmiAction,
+ Bytecode::kBitwiseOrSmi};
+ case Bytecode::kShiftLeft:
+ return {PeepholeAction::kTransformLdaSmiBinaryOpToBinaryOpWithSmiAction,
+ Bytecode::kShiftLeftSmi};
+ case Bytecode::kShiftRight:
+ return {PeepholeAction::kTransformLdaSmiBinaryOpToBinaryOpWithSmiAction,
+ Bytecode::kShiftRightSmi};
+ default:
+ break;
+ }
+ }
+
+ // Fuse LdaZero followed by binary op to produce binary op with a
+ // zero immediate argument. This saves dispatches, but not size.
+ if (last == Bytecode::kLdaZero) {
+ switch (current) {
+ case Bytecode::kAdd:
+ return {
+ PeepholeAction::kTransformLdaZeroBinaryOpToBinaryOpWithZeroAction,
+ Bytecode::kAddSmi};
+ case Bytecode::kSub:
+ return {
+ PeepholeAction::kTransformLdaZeroBinaryOpToBinaryOpWithZeroAction,
+ Bytecode::kSubSmi};
+ case Bytecode::kBitwiseAnd:
+ return {
+ PeepholeAction::kTransformLdaZeroBinaryOpToBinaryOpWithZeroAction,
+ Bytecode::kBitwiseAndSmi};
+ case Bytecode::kBitwiseOr:
+ return {
+ PeepholeAction::kTransformLdaZeroBinaryOpToBinaryOpWithZeroAction,
+ Bytecode::kBitwiseOrSmi};
+ case Bytecode::kShiftLeft:
+ return {
+ PeepholeAction::kTransformLdaZeroBinaryOpToBinaryOpWithZeroAction,
+ Bytecode::kShiftLeftSmi};
+ case Bytecode::kShiftRight:
+ return {
+ PeepholeAction::kTransformLdaZeroBinaryOpToBinaryOpWithZeroAction,
+ Bytecode::kShiftRightSmi};
+ default:
+ break;
+ }
+ }
+
+ // If there is no last bytecode to optimize against, store the incoming
+ // bytecode or for jumps emit incoming bytecode immediately.
+ if (last == Bytecode::kIllegal) {
+ if (Bytecodes::IsJump(current)) {
+ return {PeepholeAction::kUpdateLastJumpAction, Bytecode::kIllegal};
+ } else if (current == Bytecode::kNop) {
+ return {PeepholeAction::kUpdateLastIfSourceInfoPresentAction,
+ Bytecode::kIllegal};
+ } else {
+ return {PeepholeAction::kUpdateLastAction, Bytecode::kIllegal};
+ }
+ }
+
+ // No matches, take the default action.
+ if (Bytecodes::IsJump(current)) {
+ return {PeepholeAction::kDefaultJumpAction, Bytecode::kIllegal};
+ } else {
+ return {PeepholeAction::kDefaultAction, Bytecode::kIllegal};
+ }
+}
+
+void PeepholeActionTableWriter::Write(std::ostream& os) {
+ WriteHeader(os);
+ WriteIncludeFiles(os);
+ WriteOpenNamespace(os);
+ WriteUniqueRows(os);
+ WriteRowMap(os);
+ WriteClassMethods(os);
+ WriteCloseNamespace(os);
+}
+
+void PeepholeActionTableWriter::WriteHeader(std::ostream& os) {
+ os << "// Copyright 2016 the V8 project authors. All rights reserved.\n"
+ << "// Use of this source code is governed by a BSD-style license that\n"
+ << "// can be found in the LICENSE file.\n\n"
+ << "// Autogenerated by " __FILE__ ". Do not edit.\n\n";
+}
+
+void PeepholeActionTableWriter::WriteIncludeFiles(std::ostream& os) {
+ os << "#include \"src/interpreter/bytecode-peephole-table.h\"\n\n";
+}
+
+void PeepholeActionTableWriter::WriteUniqueRows(std::ostream& os) {
+ os << "const PeepholeActionAndData PeepholeActionTable::row_data_["
+ << table_.size() << "][" << kNumberOfBytecodes << "] = {\n";
+ for (size_t i = 0; i < table_.size(); ++i) {
+ os << "{\n";
+ WriteRow(os, i);
+ os << "},\n";
+ }
+ os << "};\n\n";
+}
+
+void PeepholeActionTableWriter::WriteRowMap(std::ostream& os) {
+ os << "const PeepholeActionAndData* const PeepholeActionTable::row_["
+ << kNumberOfBytecodes << "] = {\n";
+ for (size_t i = 0; i < kNumberOfBytecodes; ++i) {
+ os << kIndent << " PeepholeActionTable::row_data_[" << row_map_[i]
+ << "], \n";
+ }
+ os << "};\n\n";
+}
+
+void PeepholeActionTableWriter::WriteRow(std::ostream& os, size_t row_index) {
+ const Row row = table_.at(row_index);
+ for (PeepholeActionAndData action_data : row) {
+ os << kIndent << "{" << ActionName(action_data.action) << ","
+ << BytecodeName(action_data.bytecode) << "},\n";
+ }
+}
+
+void PeepholeActionTableWriter::WriteOpenNamespace(std::ostream& os) {
+ for (auto element : kNamespaceElements) {
+ os << "namespace " << element << " {\n";
+ }
+ os << "\n";
+}
+
+void PeepholeActionTableWriter::WriteCloseNamespace(std::ostream& os) {
+ for (auto element : kNamespaceElements) {
+ os << "} // namespace " << element << "\n";
+ }
+}
+
+void PeepholeActionTableWriter::WriteClassMethods(std::ostream& os) {
+ os << "// static\n"
+ << "const PeepholeActionAndData*\n"
+ << "PeepholeActionTable::Lookup(Bytecode last, Bytecode current) {\n"
+ << kIndent
+ << "return &row_[Bytecodes::ToByte(last)][Bytecodes::ToByte(current)];\n"
+ << "}\n\n";
+}
+
+void PeepholeActionTableWriter::BuildTable() {
+ std::map<size_t, size_t> hash_to_row_map;
+ Row row;
+ for (size_t i = 0; i < kNumberOfBytecodes; ++i) {
+ uint8_t byte_value = static_cast<uint8_t>(i);
+ Bytecode last = Bytecodes::FromByte(byte_value);
+ BuildRow(last, &row);
+ size_t row_hash = HashRow(&row);
+ InsertRow(i, &row, row_hash, &hash_to_row_map);
+ }
+}
+
+void PeepholeActionTableWriter::BuildRow(Bytecode last, Row* row) {
+ for (size_t i = 0; i < kNumberOfBytecodes; ++i) {
+ uint8_t byte_value = static_cast<uint8_t>(i);
+ Bytecode current = Bytecodes::FromByte(byte_value);
+ PeepholeActionAndData action_data = LookupActionAndData(last, current);
+ row->at(i) = action_data;
+ }
+}
+
+// static
+bool PeepholeActionTableWriter::RowsEqual(const Row* const first,
+ const Row* const second) {
+ return memcmp(first, second, sizeof(*first)) == 0;
+}
+
+// static
+void PeepholeActionTableWriter::InsertRow(
+ size_t row_index, const Row* const row, size_t row_hash,
+ std::map<size_t, size_t>* hash_to_row_map) {
+ // Insert row if no existing row matches, otherwise use existing row.
+ auto iter = hash_to_row_map->find(row_hash);
+ if (iter == hash_to_row_map->end()) {
+ row_map_[row_index] = table()->size();
+ table()->push_back(*row);
+ } else {
+ row_map_[row_index] = iter->second;
+
+ // If the following DCHECK fails, the HashRow() is not adequate.
+ DCHECK(RowsEqual(&table()->at(iter->second), row));
+ }
+}
+
+// static
+size_t PeepholeActionTableWriter::HashRow(const Row* row) {
+ static const size_t kHashShift = 3;
+ std::size_t result = (1u << 31) - 1u;
+ const uint8_t* raw_data = reinterpret_cast<const uint8_t*>(row);
+ for (size_t i = 0; i < sizeof(*row); ++i) {
+ size_t top_bits = result >> (kBitsPerByte * sizeof(size_t) - kHashShift);
+ result = (result << kHashShift) ^ top_bits ^ raw_data[i];
+ }
+ return result;
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+int main(int argc, const char* argv[]) {
+ CHECK_EQ(argc, 2);
+
+ std::ofstream ofs(argv[1], std::ofstream::trunc);
+ v8::internal::interpreter::PeepholeActionTableWriter writer;
+ writer.BuildTable();
+ writer.Write(ofs);
+ ofs.flush();
+ ofs.close();
+
+ return 0;
+}
diff --git a/deps/v8/src/interpreter/source-position-table.h b/deps/v8/src/interpreter/source-position-table.h
deleted file mode 100644
index 3ac58d6217..0000000000
--- a/deps/v8/src/interpreter/source-position-table.h
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_INTERPRETER_SOURCE_POSITION_TABLE_H_
-#define V8_INTERPRETER_SOURCE_POSITION_TABLE_H_
-
-#include "src/assert-scope.h"
-#include "src/checks.h"
-#include "src/handles.h"
-#include "src/log.h"
-#include "src/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-
-class BytecodeArray;
-class ByteArray;
-class Isolate;
-class Zone;
-
-namespace interpreter {
-
-struct PositionTableEntry {
- PositionTableEntry()
- : bytecode_offset(0), source_position(0), is_statement(false) {}
- PositionTableEntry(int bytecode, int source, bool statement)
- : bytecode_offset(bytecode),
- source_position(source),
- is_statement(statement) {}
-
- int bytecode_offset;
- int source_position;
- bool is_statement;
-};
-
-class SourcePositionTableBuilder : public PositionsRecorder {
- public:
- SourcePositionTableBuilder(Isolate* isolate, Zone* zone)
- : isolate_(isolate),
- bytes_(zone),
-#ifdef ENABLE_SLOW_DCHECKS
- raw_entries_(zone),
-#endif
- candidate_(kUninitializedCandidateOffset, 0, false) {
- }
-
- void AddStatementPosition(size_t bytecode_offset, int source_position);
- void AddExpressionPosition(size_t bytecode_offset, int source_position);
- Handle<ByteArray> ToSourcePositionTable();
-
- private:
- static const int kUninitializedCandidateOffset = -1;
-
- void AddEntry(const PositionTableEntry& entry);
- void CommitEntry();
-
- Isolate* isolate_;
- ZoneVector<byte> bytes_;
-#ifdef ENABLE_SLOW_DCHECKS
- ZoneVector<PositionTableEntry> raw_entries_;
-#endif
- PositionTableEntry candidate_; // Next entry to be written, if initialized.
- PositionTableEntry previous_; // Previously written entry, to compute delta.
-};
-
-class SourcePositionTableIterator {
- public:
- explicit SourcePositionTableIterator(ByteArray* byte_array);
-
- void Advance();
-
- int bytecode_offset() const {
- DCHECK(!done());
- return current_.bytecode_offset;
- }
- int source_position() const {
- DCHECK(!done());
- return current_.source_position;
- }
- bool is_statement() const {
- DCHECK(!done());
- return current_.is_statement;
- }
- bool done() const { return index_ == kDone; }
-
- private:
- static const int kDone = -1;
-
- ByteArray* table_;
- int index_;
- PositionTableEntry current_;
- DisallowHeapAllocation no_gc;
-};
-
-} // namespace interpreter
-} // namespace internal
-} // namespace v8
-
-#endif // V8_INTERPRETER_SOURCE_POSITION_TABLE_H_
diff --git a/deps/v8/src/isolate-inl.h b/deps/v8/src/isolate-inl.h
index da36f769a0..5c71d9188e 100644
--- a/deps/v8/src/isolate-inl.h
+++ b/deps/v8/src/isolate-inl.h
@@ -17,29 +17,34 @@ void Isolate::set_context(Context* context) {
thread_local_top_.context_ = context;
}
+Handle<Context> Isolate::native_context() {
+ return handle(context()->native_context(), this);
+}
+
+Context* Isolate::raw_native_context() { return context()->native_context(); }
Object* Isolate::pending_exception() {
DCHECK(has_pending_exception());
- DCHECK(!thread_local_top_.pending_exception_->IsException());
+ DCHECK(!thread_local_top_.pending_exception_->IsException(this));
return thread_local_top_.pending_exception_;
}
void Isolate::set_pending_exception(Object* exception_obj) {
- DCHECK(!exception_obj->IsException());
+ DCHECK(!exception_obj->IsException(this));
thread_local_top_.pending_exception_ = exception_obj;
}
void Isolate::clear_pending_exception() {
- DCHECK(!thread_local_top_.pending_exception_->IsException());
+ DCHECK(!thread_local_top_.pending_exception_->IsException(this));
thread_local_top_.pending_exception_ = heap_.the_hole_value();
}
bool Isolate::has_pending_exception() {
- DCHECK(!thread_local_top_.pending_exception_->IsException());
- return !thread_local_top_.pending_exception_->IsTheHole();
+ DCHECK(!thread_local_top_.pending_exception_->IsException(this));
+ return !thread_local_top_.pending_exception_->IsTheHole(this);
}
@@ -50,19 +55,19 @@ void Isolate::clear_pending_message() {
Object* Isolate::scheduled_exception() {
DCHECK(has_scheduled_exception());
- DCHECK(!thread_local_top_.scheduled_exception_->IsException());
+ DCHECK(!thread_local_top_.scheduled_exception_->IsException(this));
return thread_local_top_.scheduled_exception_;
}
bool Isolate::has_scheduled_exception() {
- DCHECK(!thread_local_top_.scheduled_exception_->IsException());
+ DCHECK(!thread_local_top_.scheduled_exception_->IsException(this));
return thread_local_top_.scheduled_exception_ != heap_.the_hole_value();
}
void Isolate::clear_scheduled_exception() {
- DCHECK(!thread_local_top_.scheduled_exception_->IsException());
+ DCHECK(!thread_local_top_.scheduled_exception_->IsException(this));
thread_local_top_.scheduled_exception_ = heap_.the_hole_value();
}
@@ -71,9 +76,18 @@ bool Isolate::is_catchable_by_javascript(Object* exception) {
return exception != heap()->termination_exception();
}
+void Isolate::FireBeforeCallEnteredCallback() {
+ for (int i = 0; i < before_call_entered_callbacks_.length(); i++) {
+ before_call_entered_callbacks_.at(i)(reinterpret_cast<v8::Isolate*>(this));
+ }
+}
Handle<JSGlobalObject> Isolate::global_object() {
- return Handle<JSGlobalObject>(context()->global_object(), this);
+ return handle(context()->global_object(), this);
+}
+
+Handle<JSObject> Isolate::global_proxy() {
+ return handle(context()->global_proxy(), this);
}
@@ -86,19 +100,31 @@ Isolate::ExceptionScope::~ExceptionScope() {
isolate_->set_pending_exception(*pending_exception_);
}
+SaveContext::SaveContext(Isolate* isolate)
+ : isolate_(isolate), prev_(isolate->save_context()) {
+ if (isolate->context() != NULL) {
+ context_ = Handle<Context>(isolate->context());
+ }
+ isolate->set_save_context(this);
+ c_entry_fp_ = isolate->c_entry_fp(isolate->thread_local_top());
+}
+
+SaveContext::~SaveContext() {
+ isolate_->set_context(context_.is_null() ? NULL : *context_);
+ isolate_->set_save_context(prev_);
+}
-#define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
- Handle<type> Isolate::name() { \
- return Handle<type>(native_context()->name(), this); \
- } \
- bool Isolate::is_##name(type* value) { \
- return native_context()->is_##name(value); \
+#define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
+ Handle<type> Isolate::name() { \
+ return Handle<type>(raw_native_context()->name(), this); \
+ } \
+ bool Isolate::is_##name(type* value) { \
+ return raw_native_context()->is_##name(value); \
}
NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
#undef NATIVE_CONTEXT_FIELD_ACCESSOR
bool Isolate::IsArraySpeciesLookupChainIntact() {
- if (!FLAG_harmony_species) return true;
// Note: It would be nice to have debug checks to make sure that the
// species protector is accurate, but this would be hard to do for most of
// what the protector stands for:
@@ -111,11 +137,16 @@ bool Isolate::IsArraySpeciesLookupChainIntact() {
// done here. In place, there are mjsunit tests harmony/array-species* which
// ensure that behavior is correct in various invalid protector cases.
- PropertyCell* species_cell = heap()->species_protector();
+ Cell* species_cell = heap()->species_protector();
return species_cell->value()->IsSmi() &&
Smi::cast(species_cell->value())->value() == kArrayProtectorValid;
}
+bool Isolate::IsHasInstanceLookupChainIntact() {
+ PropertyCell* has_instance_cell = heap()->has_instance_protector();
+ return has_instance_cell->value() == Smi::FromInt(kArrayProtectorValid);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index c9f01118c5..e14db60385 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -9,16 +9,19 @@
#include <fstream> // NOLINT(readability/streams)
#include <sstream>
-#include "src/ast/ast.h"
-#include "src/ast/scopeinfo.h"
+#include "src/ast/context-slot-cache.h"
+#include "src/base/accounting-allocator.h"
+#include "src/base/hashmap.h"
#include "src/base/platform/platform.h"
#include "src/base/sys-info.h"
#include "src/base/utils/random-number-generator.h"
#include "src/basic-block-profiler.h"
#include "src/bootstrapper.h"
+#include "src/cancelable-task.h"
#include "src/codegen.h"
#include "src/compilation-cache.h"
#include "src/compilation-statistics.h"
+#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/crankshaft/hydrogen.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
@@ -27,10 +30,10 @@
#include "src/ic/stub-cache.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
+#include "src/libsampler/sampler.h"
#include "src/log.h"
#include "src/messages.h"
#include "src/profiler/cpu-profiler.h"
-#include "src/profiler/sampler.h"
#include "src/prototype.h"
#include "src/regexp/regexp-stack.h"
#include "src/runtime-profiler.h"
@@ -39,7 +42,7 @@
#include "src/v8.h"
#include "src/version.h"
#include "src/vm-state-inl.h"
-
+#include "src/wasm/wasm-module.h"
namespace v8 {
namespace internal {
@@ -312,33 +315,6 @@ void Isolate::PushStackTraceAndDie(unsigned int magic, void* ptr1, void* ptr2,
base::OS::Abort();
}
-
-// Determines whether the given stack frame should be displayed in
-// a stack trace. The caller is the error constructor that asked
-// for the stack trace to be collected. The first time a construct
-// call to this function is encountered it is skipped. The seen_caller
-// in/out parameter is used to remember if the caller has been seen
-// yet.
-static bool IsVisibleInStackTrace(JSFunction* fun,
- Object* caller,
- Object* receiver,
- bool* seen_caller) {
- if ((fun == caller) && !(*seen_caller)) {
- *seen_caller = true;
- return false;
- }
- // Skip all frames until we've seen the caller.
- if (!(*seen_caller)) return false;
- // Functions defined in native scripts are not visible unless directly
- // exposed, in which case the native flag is set.
- // The --builtins-in-stack-traces command line flag allows including
- // internal call sites in the stack trace for debugging purposes.
- if (!FLAG_builtins_in_stack_traces && fun->shared()->IsBuiltin()) {
- return fun->shared()->native();
- }
- return true;
-}
-
static Handle<FixedArray> MaybeGrow(Isolate* isolate,
Handle<FixedArray> elements,
int cur_position, int new_size) {
@@ -355,8 +331,117 @@ static Handle<FixedArray> MaybeGrow(Isolate* isolate,
return elements;
}
+class StackTraceHelper {
+ public:
+ StackTraceHelper(Isolate* isolate, FrameSkipMode mode, Handle<Object> caller)
+ : isolate_(isolate),
+ mode_(mode),
+ caller_(caller),
+ skip_next_frame_(true) {
+ switch (mode_) {
+ case SKIP_FIRST:
+ skip_next_frame_ = true;
+ break;
+ case SKIP_UNTIL_SEEN:
+ DCHECK(caller_->IsJSFunction());
+ skip_next_frame_ = true;
+ break;
+ case SKIP_NONE:
+ skip_next_frame_ = false;
+ break;
+ }
+ encountered_strict_function_ = false;
+ sloppy_frames_ = 0;
+ }
+
+ // The stack trace API should not expose receivers and function
+ // objects on frames deeper than the top-most one with a strict mode
+ // function. The number of sloppy frames is stored as first element in
+ // the result array.
+ void CountSloppyFrames(JSFunction* fun) {
+ if (!encountered_strict_function_) {
+ if (is_strict(fun->shared()->language_mode())) {
+ encountered_strict_function_ = true;
+ } else {
+ sloppy_frames_++;
+ }
+ }
+ }
+
+ // Determines whether the given stack frame should be displayed in a stack
+ // trace.
+ bool IsVisibleInStackTrace(JSFunction* fun) {
+ return ShouldIncludeFrame(fun) && IsNotInNativeScript(fun) &&
+ IsInSameSecurityContext(fun);
+ }
+
+ int sloppy_frames() const { return sloppy_frames_; }
+
+ private:
+ // This mechanism excludes a number of uninteresting frames from the stack
+ // trace. This can be be the first frame (which will be a builtin-exit frame
+ // for the error constructor builtin) or every frame until encountering a
+ // user-specified function.
+ bool ShouldIncludeFrame(JSFunction* fun) {
+ switch (mode_) {
+ case SKIP_NONE:
+ return true;
+ case SKIP_FIRST:
+ if (!skip_next_frame_) return true;
+ skip_next_frame_ = false;
+ return false;
+ case SKIP_UNTIL_SEEN:
+ if (skip_next_frame_ && (fun == *caller_)) {
+ skip_next_frame_ = false;
+ return false;
+ }
+ return !skip_next_frame_;
+ }
+ UNREACHABLE();
+ return false;
+ }
+
+ bool IsNotInNativeScript(JSFunction* fun) {
+ // Functions defined in native scripts are not visible unless directly
+ // exposed, in which case the native flag is set.
+ // The --builtins-in-stack-traces command line flag allows including
+ // internal call sites in the stack trace for debugging purposes.
+ if (!FLAG_builtins_in_stack_traces && fun->shared()->IsBuiltin()) {
+ return fun->shared()->native();
+ }
+ return true;
+ }
+
+ bool IsInSameSecurityContext(JSFunction* fun) {
+ return isolate_->context()->HasSameSecurityTokenAs(fun->context());
+ }
+
+ Isolate* isolate_;
+
+ const FrameSkipMode mode_;
+ const Handle<Object> caller_;
+ bool skip_next_frame_;
+
+ int sloppy_frames_;
+ bool encountered_strict_function_;
+};
+
+namespace {
+
+// TODO(jgruber): Fix all cases in which frames give us a hole value (e.g. the
+// receiver in RegExp constructor frames.
+Handle<Object> TheHoleToUndefined(Isolate* isolate, Handle<Object> in) {
+ return (in->IsTheHole(isolate))
+ ? Handle<Object>::cast(isolate->factory()->undefined_value())
+ : in;
+}
+}
+
Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
+ FrameSkipMode mode,
Handle<Object> caller) {
+ DisallowJavascriptExecution no_js(this);
+
// Get stack trace limit.
Handle<JSObject> error = error_function();
Handle<String> stackTraceLimit =
@@ -372,14 +457,11 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
Handle<FixedArray> elements =
factory()->NewFixedArrayWithHoles(initial_size * 4 + 1);
- // If the caller parameter is a function we skip frames until we're
- // under it before starting to collect.
- bool seen_caller = !caller->IsJSFunction();
+ StackTraceHelper helper(this, mode, caller);
+
// First element is reserved to store the number of sloppy frames.
int cursor = 1;
int frames_seen = 0;
- int sloppy_frames = 0;
- bool encountered_strict_function = false;
for (StackFrameIterator iter(this); !iter.done() && frames_seen < limit;
iter.Advance()) {
StackFrame* frame = iter.frame();
@@ -387,7 +469,8 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
switch (frame->type()) {
case StackFrame::JAVA_SCRIPT:
case StackFrame::OPTIMIZED:
- case StackFrame::INTERPRETED: {
+ case StackFrame::INTERPRETED:
+ case StackFrame::BUILTIN: {
JavaScriptFrame* js_frame = JavaScriptFrame::cast(frame);
// Set initial size to the maximum inlining level + 1 for the outermost
// function.
@@ -395,32 +478,25 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
js_frame->Summarize(&frames);
for (int i = frames.length() - 1; i >= 0; i--) {
Handle<JSFunction> fun = frames[i].function();
- Handle<Object> recv = frames[i].receiver();
+
// Filter out internal frames that we do not want to show.
- if (!IsVisibleInStackTrace(*fun, *caller, *recv, &seen_caller)) {
- continue;
- }
- // Filter out frames from other security contexts.
- if (!this->context()->HasSameSecurityTokenAs(fun->context())) {
- continue;
- }
- elements = MaybeGrow(this, elements, cursor, cursor + 4);
+ if (!helper.IsVisibleInStackTrace(*fun)) continue;
+ helper.CountSloppyFrames(*fun);
+ Handle<Object> recv = frames[i].receiver();
Handle<AbstractCode> abstract_code = frames[i].abstract_code();
-
- Handle<Smi> offset(Smi::FromInt(frames[i].code_offset()), this);
- // The stack trace API should not expose receivers and function
- // objects on frames deeper than the top-most one with a strict mode
- // function. The number of sloppy frames is stored as first element in
- // the result array.
- if (!encountered_strict_function) {
- if (is_strict(fun->shared()->language_mode())) {
- encountered_strict_function = true;
- } else {
- sloppy_frames++;
+ if (frame->type() == StackFrame::BUILTIN) {
+ // Help CallSite::IsConstructor correctly detect hand-written
+ // construct stubs.
+ Code* code = Code::cast(*abstract_code);
+ if (code->is_construct_stub()) {
+ recv = handle(heap()->call_site_constructor_symbol(), this);
}
}
- elements->set(cursor++, *recv);
+ Handle<Smi> offset(Smi::FromInt(frames[i].code_offset()), this);
+
+ elements = MaybeGrow(this, elements, cursor, cursor + 4);
+ elements->set(cursor++, *TheHoleToUndefined(this, recv));
elements->set(cursor++, *fun);
elements->set(cursor++, *abstract_code);
elements->set(cursor++, *offset);
@@ -428,19 +504,47 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
}
} break;
+ case StackFrame::BUILTIN_EXIT: {
+ BuiltinExitFrame* exit_frame = BuiltinExitFrame::cast(frame);
+ Handle<JSFunction> fun = handle(exit_frame->function(), this);
+
+ // Filter out internal frames that we do not want to show.
+ if (!helper.IsVisibleInStackTrace(*fun)) continue;
+ helper.CountSloppyFrames(*fun);
+
+ Handle<Code> code = handle(exit_frame->LookupCode(), this);
+ int offset =
+ static_cast<int>(exit_frame->pc() - code->instruction_start());
+
+ // In order to help CallSite::IsConstructor detect builtin constructors,
+ // we reuse the receiver field to pass along a special symbol.
+ Handle<Object> recv;
+ if (exit_frame->IsConstructor()) {
+ recv = factory()->call_site_constructor_symbol();
+ } else {
+ recv = handle(exit_frame->receiver(), this);
+ }
+
+ elements = MaybeGrow(this, elements, cursor, cursor + 4);
+ elements->set(cursor++, *recv);
+ elements->set(cursor++, *fun);
+ elements->set(cursor++, *code);
+ elements->set(cursor++, Smi::FromInt(offset));
+ frames_seen++;
+ } break;
+
case StackFrame::WASM: {
WasmFrame* wasm_frame = WasmFrame::cast(frame);
Code* code = wasm_frame->unchecked_code();
Handle<AbstractCode> abstract_code =
- Handle<AbstractCode>(AbstractCode::cast(code));
- Handle<JSFunction> fun = factory()->NewFunction(
- factory()->NewStringFromAsciiChecked("<WASM>"));
+ Handle<AbstractCode>(AbstractCode::cast(code), this);
+ int offset =
+ static_cast<int>(wasm_frame->pc() - code->instruction_start());
elements = MaybeGrow(this, elements, cursor, cursor + 4);
- // TODO(jfb) Pass module object.
- elements->set(cursor++, *factory()->undefined_value());
- elements->set(cursor++, *fun);
+ elements->set(cursor++, wasm_frame->wasm_obj());
+ elements->set(cursor++, Smi::FromInt(wasm_frame->function_index()));
elements->set(cursor++, *abstract_code);
- elements->set(cursor++, Internals::IntToSmi(0));
+ elements->set(cursor++, Smi::FromInt(offset));
frames_seen++;
} break;
@@ -448,7 +552,7 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
break;
}
}
- elements->set(0, Smi::FromInt(sloppy_frames));
+ elements->set(0, Smi::FromInt(helper.sloppy_frames()));
elements->Shrink(cursor);
Handle<JSArray> result = factory()->NewJSArrayWithElements(elements);
result->set_length(Smi::FromInt(cursor));
@@ -472,10 +576,12 @@ MaybeHandle<JSReceiver> Isolate::CaptureAndSetDetailedStackTrace(
}
MaybeHandle<JSReceiver> Isolate::CaptureAndSetSimpleStackTrace(
- Handle<JSReceiver> error_object, Handle<Object> caller) {
+ Handle<JSReceiver> error_object, FrameSkipMode mode,
+ Handle<Object> caller) {
// Capture stack trace for simple stack trace string formatting.
Handle<Name> key = factory()->stack_trace_symbol();
- Handle<Object> stack_trace = CaptureSimpleStackTrace(error_object, caller);
+ Handle<Object> stack_trace =
+ CaptureSimpleStackTrace(error_object, mode, caller);
RETURN_ON_EXCEPTION(
this, JSReceiver::SetProperty(error_object, key, stack_trace, STRICT),
JSReceiver);
@@ -488,18 +594,7 @@ Handle<JSArray> Isolate::GetDetailedStackTrace(Handle<JSObject> error_object) {
Handle<Object> stack_trace =
JSReceiver::GetDataProperty(error_object, key_detailed);
if (stack_trace->IsJSArray()) return Handle<JSArray>::cast(stack_trace);
-
- if (!capture_stack_trace_for_uncaught_exceptions_) return Handle<JSArray>();
-
- // Try to get details from simple stack trace.
- Handle<JSArray> detailed_stack_trace =
- GetDetailedFromSimpleStackTrace(error_object);
- if (!detailed_stack_trace.is_null()) {
- // Save the detailed stack since the simple one might be withdrawn later.
- JSObject::SetProperty(error_object, key_detailed, detailed_stack_trace,
- STRICT).Assert();
- }
- return detailed_stack_trace;
+ return Handle<JSArray>();
}
@@ -542,34 +637,30 @@ class CaptureStackTraceHelper {
}
}
+ Handle<JSObject> NewStackFrameObject(FrameSummary& summ) {
+ int position = summ.abstract_code()->SourcePosition(summ.code_offset());
+ return NewStackFrameObject(summ.function(), position,
+ summ.is_constructor());
+ }
+
Handle<JSObject> NewStackFrameObject(Handle<JSFunction> fun, int position,
bool is_constructor) {
Handle<JSObject> stack_frame =
factory()->NewJSObject(isolate_->object_function());
-
- Handle<Script> script(Script::cast(fun->shared()->script()));
+ Handle<Script> script(Script::cast(fun->shared()->script()), isolate_);
if (!line_key_.is_null()) {
- int script_line_offset = script->line_offset();
- int line_number = Script::GetLineNumber(script, position);
- // line_number is already shifted by the script_line_offset.
- int relative_line_number = line_number - script_line_offset;
- if (!column_key_.is_null() && relative_line_number >= 0) {
- Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
- int start = (relative_line_number == 0) ? 0 :
- Smi::cast(line_ends->get(relative_line_number - 1))->value() + 1;
- int column_offset = position - start;
- if (relative_line_number == 0) {
- // For the case where the code is on the same line as the script
- // tag.
- column_offset += script->column_offset();
- }
+ Script::PositionInfo info;
+ bool valid_pos =
+ script->GetPositionInfo(position, &info, Script::WITH_OFFSET);
+
+ if (!column_key_.is_null() && valid_pos) {
JSObject::AddProperty(stack_frame, column_key_,
- handle(Smi::FromInt(column_offset + 1), isolate_),
+ handle(Smi::FromInt(info.column + 1), isolate_),
NONE);
}
JSObject::AddProperty(stack_frame, line_key_,
- handle(Smi::FromInt(line_number + 1), isolate_),
+ handle(Smi::FromInt(info.line + 1), isolate_),
NONE);
}
@@ -589,22 +680,72 @@ class CaptureStackTraceHelper {
NONE);
}
- if (!function_key_.is_null()) {
- Handle<Object> fun_name = JSFunction::GetDebugName(fun);
- JSObject::AddProperty(stack_frame, function_key_, fun_name, NONE);
- }
-
if (!eval_key_.is_null()) {
Handle<Object> is_eval = factory()->ToBoolean(
script->compilation_type() == Script::COMPILATION_TYPE_EVAL);
JSObject::AddProperty(stack_frame, eval_key_, is_eval, NONE);
}
+ if (!function_key_.is_null()) {
+ Handle<Object> fun_name = JSFunction::GetDebugName(fun);
+ JSObject::AddProperty(stack_frame, function_key_, fun_name, NONE);
+ }
+
if (!constructor_key_.is_null()) {
Handle<Object> is_constructor_obj = factory()->ToBoolean(is_constructor);
JSObject::AddProperty(stack_frame, constructor_key_, is_constructor_obj,
NONE);
}
+ return stack_frame;
+ }
+
+ Handle<JSObject> NewStackFrameObject(BuiltinExitFrame* frame) {
+ Handle<JSObject> stack_frame =
+ factory()->NewJSObject(isolate_->object_function());
+ Handle<JSFunction> fun = handle(frame->function(), isolate_);
+ if (!function_key_.is_null()) {
+ Handle<Object> fun_name = JSFunction::GetDebugName(fun);
+ JSObject::AddProperty(stack_frame, function_key_, fun_name, NONE);
+ }
+
+ // We don't have a script and hence cannot set line and col positions.
+ DCHECK(!fun->shared()->script()->IsScript());
+
+ return stack_frame;
+ }
+
+ Handle<JSObject> NewStackFrameObject(WasmFrame* frame) {
+ Handle<JSObject> stack_frame =
+ factory()->NewJSObject(isolate_->object_function());
+
+ if (!function_key_.is_null()) {
+ Handle<String> name = wasm::GetWasmFunctionName(
+ isolate_, handle(frame->wasm_obj(), isolate_),
+ frame->function_index());
+ JSObject::AddProperty(stack_frame, function_key_, name, NONE);
+ }
+ // Encode the function index as line number.
+ if (!line_key_.is_null()) {
+ JSObject::AddProperty(
+ stack_frame, line_key_,
+ isolate_->factory()->NewNumberFromInt(frame->function_index()), NONE);
+ }
+ // Encode the byte offset as column.
+ if (!column_key_.is_null()) {
+ Code* code = frame->LookupCode();
+ int offset = static_cast<int>(frame->pc() - code->instruction_start());
+ int position = AbstractCode::cast(code)->SourcePosition(offset);
+ // Make position 1-based.
+ if (position >= 0) ++position;
+ JSObject::AddProperty(stack_frame, column_key_,
+ isolate_->factory()->NewNumberFromInt(position),
+ NONE);
+ }
+ if (!script_id_key_.is_null()) {
+ int script_id = frame->script()->id();
+ JSObject::AddProperty(stack_frame, script_id_key_,
+ handle(Smi::FromInt(script_id), isolate_), NONE);
+ }
return stack_frame;
}
@@ -636,76 +777,43 @@ int PositionFromStackTrace(Handle<FixedArray> elements, int index) {
}
}
-
-Handle<JSArray> Isolate::GetDetailedFromSimpleStackTrace(
- Handle<JSObject> error_object) {
- Handle<Name> key = factory()->stack_trace_symbol();
- Handle<Object> property = JSReceiver::GetDataProperty(error_object, key);
- if (!property->IsJSArray()) return Handle<JSArray>();
- Handle<JSArray> simple_stack_trace = Handle<JSArray>::cast(property);
-
- CaptureStackTraceHelper helper(this,
- stack_trace_for_uncaught_exceptions_options_);
-
- int frames_seen = 0;
- Handle<FixedArray> elements(FixedArray::cast(simple_stack_trace->elements()));
- int elements_limit = Smi::cast(simple_stack_trace->length())->value();
-
- int frame_limit = stack_trace_for_uncaught_exceptions_frame_limit_;
- if (frame_limit < 0) frame_limit = (elements_limit - 1) / 4;
-
- Handle<JSArray> stack_trace = factory()->NewJSArray(frame_limit);
- for (int i = 1; i < elements_limit && frames_seen < frame_limit; i += 4) {
- Handle<Object> recv = handle(elements->get(i), this);
- Handle<JSFunction> fun =
- handle(JSFunction::cast(elements->get(i + 1)), this);
- bool is_constructor =
- recv->IsJSObject() &&
- Handle<JSObject>::cast(recv)->map()->GetConstructor() == *fun;
- int position = PositionFromStackTrace(elements, i);
-
- Handle<JSObject> stack_frame =
- helper.NewStackFrameObject(fun, position, is_constructor);
-
- FixedArray::cast(stack_trace->elements())->set(frames_seen, *stack_frame);
- frames_seen++;
- }
-
- stack_trace->set_length(Smi::FromInt(frames_seen));
- return stack_trace;
-}
-
-
Handle<JSArray> Isolate::CaptureCurrentStackTrace(
int frame_limit, StackTrace::StackTraceOptions options) {
+ DisallowJavascriptExecution no_js(this);
CaptureStackTraceHelper helper(this, options);
// Ensure no negative values.
int limit = Max(frame_limit, 0);
Handle<JSArray> stack_trace = factory()->NewJSArray(frame_limit);
+ Handle<FixedArray> stack_trace_elems(
+ FixedArray::cast(stack_trace->elements()), this);
- StackTraceFrameIterator it(this);
int frames_seen = 0;
- while (!it.done() && (frames_seen < limit)) {
- JavaScriptFrame* frame = it.frame();
- // Set initial size to the maximum inlining level + 1 for the outermost
- // function.
- List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
- frame->Summarize(&frames);
- for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
- Handle<JSFunction> fun = frames[i].function();
- // Filter frames from other security contexts.
- if (!(options & StackTrace::kExposeFramesAcrossSecurityOrigins) &&
- !this->context()->HasSameSecurityTokenAs(fun->context())) continue;
- int position =
- frames[i].abstract_code()->SourcePosition(frames[i].code_offset());
- Handle<JSObject> stack_frame =
- helper.NewStackFrameObject(fun, position, frames[i].is_constructor());
-
- FixedArray::cast(stack_trace->elements())->set(frames_seen, *stack_frame);
+ for (StackTraceFrameIterator it(this); !it.done() && (frames_seen < limit);
+ it.Advance()) {
+ StandardFrame* frame = it.frame();
+ if (frame->is_java_script()) {
+ // Set initial size to the maximum inlining level + 1 for the outermost
+ // function.
+ List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
+ JavaScriptFrame::cast(frame)->Summarize(&frames);
+ for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
+ Handle<JSFunction> fun = frames[i].function();
+ // Filter frames from other security contexts.
+ if (!(options & StackTrace::kExposeFramesAcrossSecurityOrigins) &&
+ !this->context()->HasSameSecurityTokenAs(fun->context()))
+ continue;
+ Handle<JSObject> new_frame_obj = helper.NewStackFrameObject(frames[i]);
+ stack_trace_elems->set(frames_seen, *new_frame_obj);
+ frames_seen++;
+ }
+ } else {
+ DCHECK(frame->is_wasm());
+ WasmFrame* wasm_frame = WasmFrame::cast(frame);
+ Handle<JSObject> new_frame_obj = helper.NewStackFrameObject(wasm_frame);
+ stack_trace_elems->set(frames_seen, *new_frame_obj);
frames_seen++;
}
- it.Advance();
}
stack_trace->set_length(Smi::FromInt(frames_seen));
@@ -750,6 +858,7 @@ static void PrintFrames(Isolate* isolate,
void Isolate::PrintStack(StringStream* accumulator, PrintStackMode mode) {
// The MentionedObjectCache is not GC-proof at the moment.
DisallowHeapAllocation no_gc;
+ HandleScope scope(this);
DCHECK(accumulator->IsMentionedObjectCacheClear(this));
// Avoid printing anything if there are no frames.
@@ -774,21 +883,6 @@ void Isolate::SetFailedAccessCheckCallback(
}
-static inline AccessCheckInfo* GetAccessCheckInfo(Isolate* isolate,
- Handle<JSObject> receiver) {
- Object* maybe_constructor = receiver->map()->GetConstructor();
- if (!maybe_constructor->IsJSFunction()) return NULL;
- JSFunction* constructor = JSFunction::cast(maybe_constructor);
- if (!constructor->shared()->IsApiFunction()) return NULL;
-
- Object* data_obj =
- constructor->shared()->get_api_func_data()->access_check_info();
- if (data_obj == isolate->heap()->undefined_value()) return NULL;
-
- return AccessCheckInfo::cast(data_obj);
-}
-
-
void Isolate::ReportFailedAccessCheck(Handle<JSObject> receiver) {
if (!thread_local_top()->failed_access_check_callback_) {
return ScheduleThrow(*factory()->NewTypeError(MessageTemplate::kNoAccess));
@@ -801,7 +895,7 @@ void Isolate::ReportFailedAccessCheck(Handle<JSObject> receiver) {
HandleScope scope(this);
Handle<Object> data;
{ DisallowHeapAllocation no_gc;
- AccessCheckInfo* access_check_info = GetAccessCheckInfo(this, receiver);
+ AccessCheckInfo* access_check_info = AccessCheckInfo::Get(this, receiver);
if (!access_check_info) {
AllowHeapAllocation doesnt_matter_anymore;
return ScheduleThrow(
@@ -849,18 +943,12 @@ bool Isolate::MayAccess(Handle<Context> accessing_context,
HandleScope scope(this);
Handle<Object> data;
v8::AccessCheckCallback callback = nullptr;
- v8::NamedSecurityCallback named_callback = nullptr;
{ DisallowHeapAllocation no_gc;
- AccessCheckInfo* access_check_info = GetAccessCheckInfo(this, receiver);
+ AccessCheckInfo* access_check_info = AccessCheckInfo::Get(this, receiver);
if (!access_check_info) return false;
Object* fun_obj = access_check_info->callback();
callback = v8::ToCData<v8::AccessCheckCallback>(fun_obj);
data = handle(access_check_info->data(), this);
- if (!callback) {
- fun_obj = access_check_info->named_callback();
- named_callback = v8::ToCData<v8::NamedSecurityCallback>(fun_obj);
- if (!named_callback) return false;
- }
}
LOG(this, ApiSecurityCheck());
@@ -868,37 +956,25 @@ bool Isolate::MayAccess(Handle<Context> accessing_context,
{
// Leaving JavaScript.
VMState<EXTERNAL> state(this);
- if (callback) {
- return callback(v8::Utils::ToLocal(accessing_context),
- v8::Utils::ToLocal(receiver), v8::Utils::ToLocal(data));
- }
- Handle<Object> key = factory()->undefined_value();
- return named_callback(v8::Utils::ToLocal(receiver), v8::Utils::ToLocal(key),
- v8::ACCESS_HAS, v8::Utils::ToLocal(data));
+ return callback(v8::Utils::ToLocal(accessing_context),
+ v8::Utils::ToLocal(receiver), v8::Utils::ToLocal(data));
}
}
-const char* const Isolate::kStackOverflowMessage =
- "Uncaught RangeError: Maximum call stack size exceeded";
-
-
Object* Isolate::StackOverflow() {
+ DisallowJavascriptExecution no_js(this);
HandleScope scope(this);
- // At this point we cannot create an Error object using its javascript
- // constructor. Instead, we copy the pre-constructed boilerplate and
- // attach the stack trace as a hidden property.
+
+ Handle<JSFunction> fun = range_error_function();
+ Handle<Object> msg = factory()->NewStringFromAsciiChecked(
+ MessageTemplate::TemplateString(MessageTemplate::kStackOverflow));
+ Handle<Object> no_caller;
Handle<Object> exception;
- if (bootstrapper()->IsActive()) {
- // There is no boilerplate to use during bootstrapping.
- exception = factory()->NewStringFromAsciiChecked(
- MessageTemplate::TemplateString(MessageTemplate::kStackOverflow));
- } else {
- Handle<JSObject> boilerplate = stack_overflow_boilerplate();
- Handle<JSObject> copy = factory()->CopyJSObject(boilerplate);
- CaptureAndSetSimpleStackTrace(copy, factory()->undefined_value());
- exception = copy;
- }
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ this, exception,
+ ErrorUtils::Construct(this, fun, fun, msg, SKIP_NONE, no_caller, true));
+
Throw(*exception, nullptr);
#ifdef VERIFY_HEAP
@@ -1061,19 +1137,21 @@ Object* Isolate::Throw(Object* exception, MessageLocation* location) {
// embedder didn't specify a custom uncaught exception callback,
// or if the custom callback determined that V8 should abort, then
// abort.
- if (FLAG_abort_on_uncaught_exception &&
- PredictExceptionCatcher() != CAUGHT_BY_JAVASCRIPT &&
- (!abort_on_uncaught_exception_callback_ ||
- abort_on_uncaught_exception_callback_(
- reinterpret_cast<v8::Isolate*>(this)))) {
- // Prevent endless recursion.
- FLAG_abort_on_uncaught_exception = false;
- // This flag is intended for use by JavaScript developers, so
- // print a user-friendly stack trace (not an internal one).
- PrintF(stderr, "%s\n\nFROM\n",
- MessageHandler::GetLocalizedMessage(this, message_obj).get());
- PrintCurrentStackTrace(stderr);
- base::OS::Abort();
+ if (FLAG_abort_on_uncaught_exception) {
+ CatchType prediction = PredictExceptionCatcher();
+ if ((prediction == NOT_CAUGHT || prediction == CAUGHT_BY_EXTERNAL) &&
+ (!abort_on_uncaught_exception_callback_ ||
+ abort_on_uncaught_exception_callback_(
+ reinterpret_cast<v8::Isolate*>(this)))) {
+ // Prevent endless recursion.
+ FLAG_abort_on_uncaught_exception = false;
+ // This flag is intended for use by JavaScript developers, so
+ // print a user-friendly stack trace (not an internal one).
+ PrintF(stderr, "%s\n\nFROM\n",
+ MessageHandler::GetLocalizedMessage(this, message_obj).get());
+ PrintCurrentStackTrace(stderr);
+ base::OS::Abort();
+ }
}
}
}
@@ -1139,7 +1217,11 @@ Object* Isolate::UnwindAndFindHandler() {
// Gather information from the frame.
code = frame->LookupCode();
- if (code->marked_for_deoptimization()) {
+
+ // TODO(bmeurer): Turbofanned BUILTIN frames appear as OPTIMIZED, but
+ // do not have a code kind of OPTIMIZED_FUNCTION.
+ if (code->kind() == Code::OPTIMIZED_FUNCTION &&
+ code->marked_for_deoptimization()) {
// If the target code is lazy deoptimized, we jump to the original
// return address, but we make a note that we are throwing, so that
// the deoptimizer can do the right thing.
@@ -1162,7 +1244,7 @@ Object* Isolate::UnwindAndFindHandler() {
// position of the exception handler. The special builtin below will
// take care of continuing to dispatch at that position. Also restore
// the correct context for the handler from the interpreter register.
- context = Context::cast(js_frame->GetInterpreterRegister(context_reg));
+ context = Context::cast(js_frame->ReadInterpreterRegister(context_reg));
js_frame->PatchBytecodeOffset(static_cast<int>(offset));
offset = 0;
@@ -1215,6 +1297,38 @@ Object* Isolate::UnwindAndFindHandler() {
return exception;
}
+namespace {
+HandlerTable::CatchPrediction PredictException(JavaScriptFrame* frame) {
+ HandlerTable::CatchPrediction prediction;
+ if (frame->is_optimized()) {
+ if (frame->LookupExceptionHandlerInTable(nullptr, nullptr) > 0) {
+ // This optimized frame will catch. It's handler table does not include
+ // exception prediction, and we need to use the corresponding handler
+ // tables on the unoptimized code objects.
+ List<FrameSummary> summaries;
+ frame->Summarize(&summaries);
+ for (const FrameSummary& summary : summaries) {
+ Handle<AbstractCode> code = summary.abstract_code();
+ if (code->kind() == AbstractCode::OPTIMIZED_FUNCTION) {
+ DCHECK(summary.function()->shared()->asm_function());
+ DCHECK(!FLAG_turbo_asm_deoptimization);
+ // asm code cannot contain try-catch.
+ continue;
+ }
+ int code_offset = summary.code_offset();
+ int index =
+ code->LookupRangeInHandlerTable(code_offset, nullptr, &prediction);
+ if (index <= 0) continue;
+ if (prediction == HandlerTable::UNCAUGHT) continue;
+ return prediction;
+ }
+ }
+ } else if (frame->LookupExceptionHandlerInTable(nullptr, &prediction) > 0) {
+ return prediction;
+ }
+ return HandlerTable::UNCAUGHT;
+}
+} // anonymous namespace
Isolate::CatchType Isolate::PredictExceptionCatcher() {
Address external_handler = thread_local_top()->try_catch_handler_address();
@@ -1233,12 +1347,9 @@ Isolate::CatchType Isolate::PredictExceptionCatcher() {
// For JavaScript frames we perform a lookup in the handler table.
if (frame->is_java_script()) {
JavaScriptFrame* js_frame = static_cast<JavaScriptFrame*>(frame);
- HandlerTable::CatchPrediction prediction;
- if (js_frame->LookupExceptionHandlerInTable(nullptr, &prediction) > 0) {
- // We are conservative with our prediction: try-finally is considered
- // to always rethrow, to meet the expectation of the debugger.
- if (prediction == HandlerTable::CAUGHT) return CAUGHT_BY_JAVASCRIPT;
- }
+ HandlerTable::CatchPrediction prediction = PredictException(js_frame);
+ if (prediction == HandlerTable::DESUGARING) return CAUGHT_BY_DESUGARING;
+ if (prediction != HandlerTable::UNCAUGHT) return CAUGHT_BY_JAVASCRIPT;
}
// The exception has been externally caught if and only if there is an
@@ -1291,7 +1402,7 @@ void Isolate::RestorePendingMessageFromTryCatch(v8::TryCatch* handler) {
DCHECK(handler->rethrow_);
DCHECK(handler->capture_message_);
Object* message = reinterpret_cast<Object*>(handler->message_obj_);
- DCHECK(message->IsJSMessageObject() || message->IsTheHole());
+ DCHECK(message->IsJSMessageObject() || message->IsTheHole(this));
thread_local_top()->pending_message_obj_ = message;
}
@@ -1318,14 +1429,25 @@ void Isolate::PrintCurrentStackTrace(FILE* out) {
while (!it.done()) {
HandleScope scope(this);
// Find code position if recorded in relocation info.
- JavaScriptFrame* frame = it.frame();
- Code* code = frame->LookupCode();
- int offset = static_cast<int>(frame->pc() - code->instruction_start());
- int pos = frame->LookupCode()->SourcePosition(offset);
+ StandardFrame* frame = it.frame();
+ AbstractCode* abstract_code;
+ int code_offset;
+ if (frame->is_interpreted()) {
+ InterpretedFrame* iframe = reinterpret_cast<InterpretedFrame*>(frame);
+ abstract_code = AbstractCode::cast(iframe->GetBytecodeArray());
+ code_offset = iframe->GetBytecodeOffset();
+ } else {
+ DCHECK(frame->is_java_script() || frame->is_wasm());
+ Code* code = frame->LookupCode();
+ abstract_code = AbstractCode::cast(code);
+ code_offset = static_cast<int>(frame->pc() - code->instruction_start());
+ }
+ int pos = abstract_code->SourcePosition(code_offset);
+ JavaScriptFrame* js_frame = JavaScriptFrame::cast(frame);
Handle<Object> pos_obj(Smi::FromInt(pos), this);
// Fetch function and receiver.
- Handle<JSFunction> fun(frame->function());
- Handle<Object> recv(frame->receiver(), this);
+ Handle<JSFunction> fun(js_frame->function(), this);
+ Handle<Object> recv(js_frame->receiver(), this);
// Advance to the next JavaScript frame and determine if the
// current frame is the top-level frame.
it.Advance();
@@ -1340,31 +1462,30 @@ void Isolate::PrintCurrentStackTrace(FILE* out) {
}
}
-
bool Isolate::ComputeLocation(MessageLocation* target) {
StackTraceFrameIterator it(this);
- if (!it.done()) {
- JavaScriptFrame* frame = it.frame();
- JSFunction* fun = frame->function();
- Object* script = fun->shared()->script();
- if (script->IsScript() &&
- !(Script::cast(script)->source()->IsUndefined())) {
- Handle<Script> casted_script(Script::cast(script));
- // Compute the location from the function and the relocation info of the
- // baseline code. For optimized code this will use the deoptimization
- // information to get canonical location information.
- List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
- it.frame()->Summarize(&frames);
- FrameSummary& summary = frames.last();
- int pos = summary.abstract_code()->SourcePosition(summary.code_offset());
- *target = MessageLocation(casted_script, pos, pos + 1, handle(fun));
- return true;
- }
+ if (it.done()) return false;
+ StandardFrame* frame = it.frame();
+ // TODO(clemensh): handle wasm frames
+ if (!frame->is_java_script()) return false;
+ JSFunction* fun = JavaScriptFrame::cast(frame)->function();
+ Object* script = fun->shared()->script();
+ if (!script->IsScript() ||
+ (Script::cast(script)->source()->IsUndefined(this))) {
+ return false;
}
- return false;
+ Handle<Script> casted_script(Script::cast(script), this);
+ // Compute the location from the function and the relocation info of the
+ // baseline code. For optimized code this will use the deoptimization
+ // information to get canonical location information.
+ List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
+ JavaScriptFrame::cast(frame)->Summarize(&frames);
+ FrameSummary& summary = frames.last();
+ int pos = summary.abstract_code()->SourcePosition(summary.code_offset());
+ *target = MessageLocation(casted_script, pos, pos + 1, handle(fun, this));
+ return true;
}
-
bool Isolate::ComputeLocationFromException(MessageLocation* target,
Handle<Object> exception) {
if (!exception->IsJSObject()) return false;
@@ -1405,13 +1526,17 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
int elements_limit = Smi::cast(simple_stack_trace->length())->value();
for (int i = 1; i < elements_limit; i += 4) {
- Handle<JSFunction> fun =
- handle(JSFunction::cast(elements->get(i + 1)), this);
+ Handle<Object> fun_obj = handle(elements->get(i + 1), this);
+ if (fun_obj->IsSmi()) {
+ // TODO(clemensh): handle wasm frames
+ return false;
+ }
+ Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj);
if (!fun->shared()->IsSubjectToDebugging()) continue;
Object* script = fun->shared()->script();
if (script->IsScript() &&
- !(Script::cast(script)->source()->IsUndefined())) {
+ !(Script::cast(script)->source()->IsUndefined(this))) {
int pos = PositionFromStackTrace(elements, i);
Handle<Script> casted_script(Script::cast(script));
*target = MessageLocation(casted_script, pos, pos + 1);
@@ -1426,7 +1551,7 @@ Handle<JSMessageObject> Isolate::CreateMessage(Handle<Object> exception,
MessageLocation* location) {
Handle<JSArray> stack_trace_object;
if (capture_stack_trace_for_uncaught_exceptions_) {
- if (Object::IsErrorObject(this, exception)) {
+ if (exception->IsJSError()) {
// We fetch the stack trace that corresponds to this error object.
// If the lookup fails, the exception is probably not a valid Error
// object. In that case, we fall through and capture the stack trace
@@ -1535,11 +1660,11 @@ void Isolate::ReportPendingMessages() {
}
// Actually report the pending message to all message handlers.
- if (!message_obj->IsTheHole() && should_report_exception) {
+ if (!message_obj->IsTheHole(this) && should_report_exception) {
HandleScope scope(this);
- Handle<JSMessageObject> message(JSMessageObject::cast(message_obj));
- Handle<JSValue> script_wrapper(JSValue::cast(message->script()));
- Handle<Script> script(Script::cast(script_wrapper->value()));
+ Handle<JSMessageObject> message(JSMessageObject::cast(message_obj), this);
+ Handle<JSValue> script_wrapper(JSValue::cast(message->script()), this);
+ Handle<Script> script(Script::cast(script_wrapper->value()), this);
int start_pos = message->start_position();
int end_pos = message->end_position();
MessageLocation location(script, start_pos, end_pos);
@@ -1552,11 +1677,11 @@ MessageLocation Isolate::GetMessageLocation() {
DCHECK(has_pending_exception());
if (thread_local_top_.pending_exception_ != heap()->termination_exception() &&
- !thread_local_top_.pending_message_obj_->IsTheHole()) {
+ !thread_local_top_.pending_message_obj_->IsTheHole(this)) {
Handle<JSMessageObject> message_obj(
- JSMessageObject::cast(thread_local_top_.pending_message_obj_));
- Handle<JSValue> script_wrapper(JSValue::cast(message_obj->script()));
- Handle<Script> script(Script::cast(script_wrapper->value()));
+ JSMessageObject::cast(thread_local_top_.pending_message_obj_), this);
+ Handle<JSValue> script_wrapper(JSValue::cast(message_obj->script()), this);
+ Handle<Script> script(Script::cast(script_wrapper->value()), this);
int start_pos = message_obj->start_position();
int end_pos = message_obj->end_position();
return MessageLocation(script, start_pos, end_pos);
@@ -1608,17 +1733,12 @@ bool Isolate::OptionalRescheduleException(bool is_bottom_call) {
return true;
}
-
-void Isolate::PushPromise(Handle<JSObject> promise,
- Handle<JSFunction> function) {
+void Isolate::PushPromise(Handle<JSObject> promise) {
ThreadLocalTop* tltop = thread_local_top();
PromiseOnStack* prev = tltop->promise_on_stack_;
Handle<JSObject> global_promise =
Handle<JSObject>::cast(global_handles()->Create(*promise));
- Handle<JSFunction> global_function =
- Handle<JSFunction>::cast(global_handles()->Create(*function));
- tltop->promise_on_stack_ =
- new PromiseOnStack(global_function, global_promise, prev);
+ tltop->promise_on_stack_ = new PromiseOnStack(global_promise, prev);
}
@@ -1626,11 +1746,9 @@ void Isolate::PopPromise() {
ThreadLocalTop* tltop = thread_local_top();
if (tltop->promise_on_stack_ == NULL) return;
PromiseOnStack* prev = tltop->promise_on_stack_->prev();
- Handle<Object> global_function = tltop->promise_on_stack_->function();
Handle<Object> global_promise = tltop->promise_on_stack_->promise();
delete tltop->promise_on_stack_;
tltop->promise_on_stack_ = prev;
- global_handles()->Destroy(global_function.location());
global_handles()->Destroy(global_promise.location());
}
@@ -1639,18 +1757,20 @@ Handle<Object> Isolate::GetPromiseOnStackOnThrow() {
Handle<Object> undefined = factory()->undefined_value();
ThreadLocalTop* tltop = thread_local_top();
if (tltop->promise_on_stack_ == NULL) return undefined;
- Handle<JSFunction> promise_function = tltop->promise_on_stack_->function();
// Find the top-most try-catch or try-finally handler.
- if (PredictExceptionCatcher() != CAUGHT_BY_JAVASCRIPT) return undefined;
+ CatchType prediction = PredictExceptionCatcher();
+ if (prediction == NOT_CAUGHT || prediction == CAUGHT_BY_EXTERNAL) {
+ return undefined;
+ }
for (JavaScriptFrameIterator it(this); !it.done(); it.Advance()) {
- JavaScriptFrame* frame = it.frame();
- if (frame->LookupExceptionHandlerInTable(nullptr, nullptr) > 0) {
- // Throwing inside a Promise only leads to a reject if not caught by an
- // inner try-catch or try-finally.
- if (frame->function() == *promise_function) {
+ switch (PredictException(it.frame())) {
+ case HandlerTable::UNCAUGHT:
+ break;
+ case HandlerTable::CAUGHT:
+ case HandlerTable::DESUGARING:
+ return undefined;
+ case HandlerTable::PROMISE:
return tltop->promise_on_stack_->promise();
- }
- return undefined;
}
}
return undefined;
@@ -1673,11 +1793,6 @@ void Isolate::SetAbortOnUncaughtExceptionCallback(
}
-Handle<Context> Isolate::native_context() {
- return handle(context()->native_context());
-}
-
-
Handle<Context> Isolate::GetCallingNativeContext() {
JavaScriptFrameIterator it(this);
if (debug_->in_debug_scope()) {
@@ -1694,7 +1809,7 @@ Handle<Context> Isolate::GetCallingNativeContext() {
if (it.done()) return Handle<Context>::null();
JavaScriptFrame* frame = it.frame();
Context* context = Context::cast(frame->context());
- return Handle<Context>(context->native_context());
+ return Handle<Context>(context->native_context(), this);
}
@@ -1789,6 +1904,52 @@ void Isolate::ThreadDataTable::RemoveAllThreads(Isolate* isolate) {
#define TRACE_ISOLATE(tag)
#endif
+class VerboseAccountingAllocator : public base::AccountingAllocator {
+ public:
+ VerboseAccountingAllocator(Heap* heap, size_t sample_bytes)
+ : heap_(heap), last_memory_usage_(0), sample_bytes_(sample_bytes) {}
+
+ void* Allocate(size_t size) override {
+ void* memory = base::AccountingAllocator::Allocate(size);
+ if (memory) {
+ size_t current = GetCurrentMemoryUsage();
+ if (last_memory_usage_.Value() + sample_bytes_ < current) {
+ PrintJSON(current);
+ last_memory_usage_.SetValue(current);
+ }
+ }
+ return memory;
+ }
+
+ void Free(void* memory, size_t bytes) override {
+ base::AccountingAllocator::Free(memory, bytes);
+ size_t current = GetCurrentMemoryUsage();
+ if (current + sample_bytes_ < last_memory_usage_.Value()) {
+ PrintJSON(current);
+ last_memory_usage_.SetValue(current);
+ }
+ }
+
+ private:
+ void PrintJSON(size_t sample) {
+ // Note: Neither isolate, nor heap is locked, so be careful with accesses
+ // as the allocator is potentially used on a concurrent thread.
+ double time = heap_->isolate()->time_millis_since_init();
+ PrintF(
+ "{"
+ "\"type\": \"malloced\", "
+ "\"isolate\": \"%p\", "
+ "\"time\": %f, "
+ "\"value\": %zu"
+ "}\n",
+ reinterpret_cast<void*>(heap_->isolate()), time, sample);
+ }
+
+ Heap* heap_;
+ base::AtomicNumber<size_t> last_memory_usage_;
+ size_t sample_bytes_;
+};
+
Isolate::Isolate(bool enable_serializer)
: embedder_data_(),
entry_stack_(NULL),
@@ -1798,10 +1959,10 @@ Isolate::Isolate(bool enable_serializer)
runtime_profiler_(NULL),
compilation_cache_(NULL),
counters_(NULL),
- code_range_(NULL),
logger_(NULL),
stats_table_(NULL),
- stub_cache_(NULL),
+ load_stub_cache_(NULL),
+ store_stub_cache_(NULL),
code_aging_helper_(NULL),
deoptimizer_data_(NULL),
deoptimizer_lazy_throw_(false),
@@ -1809,14 +1970,16 @@ Isolate::Isolate(bool enable_serializer)
capture_stack_trace_for_uncaught_exceptions_(false),
stack_trace_for_uncaught_exceptions_frame_limit_(0),
stack_trace_for_uncaught_exceptions_options_(StackTrace::kOverview),
- memory_allocator_(NULL),
keyed_lookup_cache_(NULL),
context_slot_cache_(NULL),
descriptor_lookup_cache_(NULL),
handle_scope_implementer_(NULL),
unicode_cache_(NULL),
- runtime_zone_(&allocator_),
- interface_descriptor_zone_(&allocator_),
+ allocator_(FLAG_trace_gc_object_stats
+ ? new VerboseAccountingAllocator(&heap_, 256 * KB)
+ : new base::AccountingAllocator()),
+ runtime_zone_(new Zone(allocator_)),
+ interface_descriptor_zone_(new Zone(allocator_)),
inner_pointer_to_code_cache_(NULL),
global_handles_(NULL),
eternal_handles_(NULL),
@@ -1828,12 +1991,15 @@ Isolate::Isolate(bool enable_serializer)
// TODO(bmeurer) Initialized lazily because it depends on flags; can
// be fixed once the default isolate cleanup is done.
random_number_generator_(NULL),
+ rail_mode_(PERFORMANCE_ANIMATION),
serializer_enabled_(enable_serializer),
has_fatal_error_(false),
initialized_from_snapshot_(false),
is_tail_call_elimination_enabled_(true),
+ is_isolate_in_background_(false),
cpu_profiler_(NULL),
heap_profiler_(NULL),
+ code_event_dispatcher_(new CodeEventDispatcher()),
function_entry_hook_(NULL),
deferred_handles_head_(NULL),
optimizing_compile_dispatcher_(NULL),
@@ -1845,6 +2011,7 @@ Isolate::Isolate(bool enable_serializer)
#if TRACE_MAPS
next_unique_sfi_id_(0),
#endif
+ is_running_microtasks_(false),
use_counter_callback_(NULL),
basic_block_profiler_(NULL),
cancelable_task_manager_(new CancelableTaskManager()),
@@ -1960,7 +2127,7 @@ void Isolate::Deinit() {
}
// We must stop the logger before we tear down other components.
- Sampler* sampler = logger_->sampler();
+ sampler::Sampler* sampler = logger_->sampler();
if (sampler && sampler->IsActive()) sampler->Stop();
delete deoptimizer_data_;
@@ -1990,6 +2157,8 @@ void Isolate::Deinit() {
delete cpu_profiler_;
cpu_profiler_ = NULL;
+ code_event_dispatcher_.reset();
+
delete root_index_map_;
root_index_map_ = NULL;
@@ -2008,7 +2177,7 @@ Isolate::~Isolate() {
TRACE_ISOLATE(destructor);
// Has to be called while counters_ are still alive
- runtime_zone_.DeleteKeptSegment();
+ runtime_zone_->DeleteKeptSegment();
// The entry stack must be empty when we get here.
DCHECK(entry_stack_ == NULL || entry_stack_->previous_item == NULL);
@@ -2035,8 +2204,10 @@ Isolate::~Isolate() {
delete keyed_lookup_cache_;
keyed_lookup_cache_ = NULL;
- delete stub_cache_;
- stub_cache_ = NULL;
+ delete load_stub_cache_;
+ load_stub_cache_ = NULL;
+ delete store_stub_cache_;
+ store_stub_cache_ = NULL;
delete code_aging_helper_;
code_aging_helper_ = NULL;
delete stats_table_;
@@ -2067,10 +2238,6 @@ Isolate::~Isolate() {
delete thread_manager_;
thread_manager_ = NULL;
- delete memory_allocator_;
- memory_allocator_ = NULL;
- delete code_range_;
- code_range_ = NULL;
delete global_handles_;
global_handles_ = NULL;
delete eternal_handles_;
@@ -2088,6 +2255,15 @@ Isolate::~Isolate() {
delete cancelable_task_manager_;
cancelable_task_manager_ = nullptr;
+ delete runtime_zone_;
+ runtime_zone_ = nullptr;
+
+ delete interface_descriptor_zone_;
+ interface_descriptor_zone_ = nullptr;
+
+ delete allocator_;
+ allocator_ = nullptr;
+
#if USE_SIMULATOR
Simulator::TearDown(simulator_i_cache_, simulator_redirection_);
simulator_i_cache_ = nullptr;
@@ -2123,12 +2299,12 @@ bool Isolate::PropagatePendingExceptionToExternalTryCatch() {
} else {
v8::TryCatch* handler = try_catch_handler();
DCHECK(thread_local_top_.pending_message_obj_->IsJSMessageObject() ||
- thread_local_top_.pending_message_obj_->IsTheHole());
+ thread_local_top_.pending_message_obj_->IsTheHole(this));
handler->can_continue_ = true;
handler->has_terminated_ = false;
handler->exception_ = pending_exception();
// Propagate to the external try-catch only if we got an actual message.
- if (thread_local_top_.pending_message_obj_->IsTheHole()) return true;
+ if (thread_local_top_.pending_message_obj_->IsTheHole(this)) return true;
handler->message_obj_ = thread_local_top_.pending_message_obj_;
}
@@ -2164,9 +2340,6 @@ bool Isolate::Init(Deserializer* des) {
// The initialization process does not handle memory exhaustion.
AlwaysAllocateScope always_allocate(this);
- memory_allocator_ = new MemoryAllocator(this);
- code_range_ = new CodeRange(this);
-
// Safe after setting Heap::isolate_, and initializing StackGuard
heap_.SetStackLimits();
@@ -2186,7 +2359,8 @@ bool Isolate::Init(Deserializer* des) {
eternal_handles_ = new EternalHandles();
bootstrapper_ = new Bootstrapper(this);
handle_scope_implementer_ = new HandleScopeImplementer(this);
- stub_cache_ = new StubCache(this);
+ load_stub_cache_ = new StubCache(this, Code::LOAD_IC);
+ store_stub_cache_ = new StubCache(this, Code::STORE_IC);
materialized_object_store_ = new MaterializedObjectStore(this);
regexp_stack_ = new RegExpStack();
regexp_stack_->isolate_ = this;
@@ -2225,7 +2399,7 @@ bool Isolate::Init(Deserializer* des) {
return false;
}
- deoptimizer_data_ = new DeoptimizerData(memory_allocator_);
+ deoptimizer_data_ = new DeoptimizerData(heap()->memory_allocator());
const bool create_heap_objects = (des == NULL);
if (create_heap_objects && !heap_.CreateHeapObjects()) {
@@ -2242,12 +2416,16 @@ bool Isolate::Init(Deserializer* des) {
bootstrapper_->Initialize(create_heap_objects);
builtins_.SetUp(this, create_heap_objects);
+ if (create_heap_objects) {
+ heap_.CreateFixedStubs();
+ }
if (FLAG_log_internal_timer_events) {
set_event_logger(Logger::DefaultEventLoggerSentinel);
}
- if (FLAG_trace_hydrogen || FLAG_trace_hydrogen_stubs) {
+ if (FLAG_trace_hydrogen || FLAG_trace_hydrogen_stubs || FLAG_trace_turbo ||
+ FLAG_trace_turbo_graph) {
PrintF("Concurrent recompilation has been disabled for tracing.\n");
} else if (OptimizingCompileDispatcher::Enabled()) {
optimizing_compile_dispatcher_ = new OptimizingCompileDispatcher(this);
@@ -2261,9 +2439,9 @@ bool Isolate::Init(Deserializer* des) {
if (!create_heap_objects) {
des->Deserialize(this);
}
- stub_cache_->Initialize();
-
- if (FLAG_ignition) {
+ load_stub_cache_->Initialize();
+ store_stub_cache_->Initialize();
+ if (FLAG_ignition || serializer_enabled()) {
interpreter_->Initialize();
}
@@ -2288,13 +2466,10 @@ bool Isolate::Init(Deserializer* des) {
Internals::kIsolateEmbedderDataOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, heap_.roots_)),
Internals::kIsolateRootsOffset);
- CHECK_EQ(static_cast<int>(
- OFFSET_OF(Isolate, heap_.amount_of_external_allocated_memory_)),
- Internals::kAmountOfExternalAllocatedMemoryOffset);
- CHECK_EQ(static_cast<int>(OFFSET_OF(
- Isolate,
- heap_.amount_of_external_allocated_memory_at_last_global_gc_)),
- Internals::kAmountOfExternalAllocatedMemoryAtLastGlobalGCOffset);
+ CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, heap_.external_memory_)),
+ Internals::kExternalMemoryOffset);
+ CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, heap_.external_memory_limit_)),
+ Internals::kExternalMemoryLimitOffset);
time_millis_at_init_ = heap_.MonotonicallyIncreasingTimeInMs();
@@ -2428,8 +2603,17 @@ void Isolate::UnlinkDeferredHandles(DeferredHandles* deferred) {
void Isolate::DumpAndResetCompilationStats() {
if (turbo_statistics() != nullptr) {
+ DCHECK(FLAG_turbo_stats || FLAG_turbo_stats_nvp);
+
OFStream os(stdout);
- os << *turbo_statistics() << std::endl;
+ if (FLAG_turbo_stats) {
+ AsPrintableStatistics ps = {*turbo_statistics(), false};
+ os << ps << std::endl;
+ }
+ if (FLAG_turbo_stats_nvp) {
+ AsPrintableStatistics ps = {*turbo_statistics(), true};
+ os << ps << std::endl;
+ }
}
if (hstatistics() != nullptr) hstatistics()->Print();
delete turbo_statistics_;
@@ -2473,7 +2657,7 @@ Map* Isolate::get_initial_js_array_map(ElementsKind kind) {
DisallowHeapAllocation no_gc;
Object* const initial_js_array_map =
context()->native_context()->get(Context::ArrayMapIndex(kind));
- if (!initial_js_array_map->IsUndefined()) {
+ if (!initial_js_array_map->IsUndefined(this)) {
return Map::cast(initial_js_array_map);
}
}
@@ -2487,6 +2671,31 @@ bool Isolate::use_crankshaft() const {
CpuFeatures::SupportsCrankshaft();
}
+bool Isolate::IsArrayOrObjectPrototype(Object* object) {
+ Object* context = heap()->native_contexts_list();
+ while (!context->IsUndefined(this)) {
+ Context* current_context = Context::cast(context);
+ if (current_context->initial_object_prototype() == object ||
+ current_context->initial_array_prototype() == object) {
+ return true;
+ }
+ context = current_context->next_context_link();
+ }
+ return false;
+}
+
+bool Isolate::IsInAnyContext(Object* object, uint32_t index) {
+ DisallowHeapAllocation no_gc;
+ Object* context = heap()->native_contexts_list();
+ while (!context->IsUndefined(this)) {
+ Context* current_context = Context::cast(context);
+ if (current_context->get(index) == object) {
+ return true;
+ }
+ context = current_context->next_context_link();
+ }
+ return false;
+}
bool Isolate::IsFastArrayConstructorPrototypeChainIntact() {
PropertyCell* no_elements_cell = heap()->array_protector();
@@ -2547,52 +2756,76 @@ bool Isolate::IsFastArrayConstructorPrototypeChainIntact() {
return cell_reports_intact;
}
-void Isolate::InvalidateArraySpeciesProtector() {
- if (!FLAG_harmony_species) return;
- DCHECK(factory()->species_protector()->value()->IsSmi());
- DCHECK(IsArraySpeciesLookupChainIntact());
- PropertyCell::SetValueWithInvalidation(
- factory()->species_protector(),
- handle(Smi::FromInt(kArrayProtectorInvalid), this));
- DCHECK(!IsArraySpeciesLookupChainIntact());
+bool Isolate::IsIsConcatSpreadableLookupChainIntact() {
+ Cell* is_concat_spreadable_cell = heap()->is_concat_spreadable_protector();
+ bool is_is_concat_spreadable_set =
+ Smi::cast(is_concat_spreadable_cell->value())->value() ==
+ kArrayProtectorInvalid;
+#ifdef DEBUG
+ Map* root_array_map = get_initial_js_array_map(GetInitialFastElementsKind());
+ if (root_array_map == NULL) {
+ // Ignore the value of is_concat_spreadable during bootstrap.
+ return !is_is_concat_spreadable_set;
+ }
+ Handle<Object> array_prototype(array_function()->prototype(), this);
+ Handle<Symbol> key = factory()->is_concat_spreadable_symbol();
+ Handle<Object> value;
+ LookupIterator it(array_prototype, key);
+ if (it.IsFound() && !JSReceiver::GetDataProperty(&it)->IsUndefined(this)) {
+ // TODO(cbruni): Currently we do not revert if we unset the
+ // @@isConcatSpreadable property on Array.prototype or Object.prototype
+ // hence the reverse implication doesn't hold.
+ DCHECK(is_is_concat_spreadable_set);
+ return false;
+ }
+#endif // DEBUG
+
+ return !is_is_concat_spreadable_set;
+}
+
+bool Isolate::IsIsConcatSpreadableLookupChainIntact(JSReceiver* receiver) {
+ if (!IsIsConcatSpreadableLookupChainIntact()) return false;
+ return !receiver->HasProxyInPrototype(this);
}
void Isolate::UpdateArrayProtectorOnSetElement(Handle<JSObject> object) {
DisallowHeapAllocation no_gc;
- if (IsFastArrayConstructorPrototypeChainIntact() &&
- object->map()->is_prototype_map()) {
- Object* context = heap()->native_contexts_list();
- while (!context->IsUndefined()) {
- Context* current_context = Context::cast(context);
- if (current_context->get(Context::INITIAL_OBJECT_PROTOTYPE_INDEX) ==
- *object ||
- current_context->get(Context::INITIAL_ARRAY_PROTOTYPE_INDEX) ==
- *object) {
- CountUsage(v8::Isolate::UseCounterFeature::kArrayProtectorDirtied);
- PropertyCell::SetValueWithInvalidation(
- factory()->array_protector(),
- handle(Smi::FromInt(kArrayProtectorInvalid), this));
- break;
- }
- context = current_context->get(Context::NEXT_CONTEXT_LINK);
- }
- }
+ if (!object->map()->is_prototype_map()) return;
+ if (!IsFastArrayConstructorPrototypeChainIntact()) return;
+ if (!IsArrayOrObjectPrototype(*object)) return;
+ PropertyCell::SetValueWithInvalidation(
+ factory()->array_protector(),
+ handle(Smi::FromInt(kArrayProtectorInvalid), this));
+}
+
+void Isolate::InvalidateHasInstanceProtector() {
+ DCHECK(factory()->has_instance_protector()->value()->IsSmi());
+ DCHECK(IsHasInstanceLookupChainIntact());
+ PropertyCell::SetValueWithInvalidation(
+ factory()->has_instance_protector(),
+ handle(Smi::FromInt(kArrayProtectorInvalid), this));
+ DCHECK(!IsHasInstanceLookupChainIntact());
}
+void Isolate::InvalidateIsConcatSpreadableProtector() {
+ DCHECK(factory()->is_concat_spreadable_protector()->value()->IsSmi());
+ DCHECK(IsIsConcatSpreadableLookupChainIntact());
+ factory()->is_concat_spreadable_protector()->set_value(
+ Smi::FromInt(kArrayProtectorInvalid));
+ DCHECK(!IsIsConcatSpreadableLookupChainIntact());
+}
+
+void Isolate::InvalidateArraySpeciesProtector() {
+ DCHECK(factory()->species_protector()->value()->IsSmi());
+ DCHECK(IsArraySpeciesLookupChainIntact());
+ factory()->species_protector()->set_value(
+ Smi::FromInt(kArrayProtectorInvalid));
+ DCHECK(!IsArraySpeciesLookupChainIntact());
+}
bool Isolate::IsAnyInitialArrayPrototype(Handle<JSArray> array) {
- if (array->map()->is_prototype_map()) {
- Object* context = heap()->native_contexts_list();
- while (!context->IsUndefined()) {
- Context* current_context = Context::cast(context);
- if (current_context->get(Context::INITIAL_ARRAY_PROTOTYPE_INDEX) ==
- *array) {
- return true;
- }
- context = current_context->get(Context::NEXT_CONTEXT_LINK);
- }
- }
- return false;
+ DisallowHeapAllocation no_gc;
+ return IsInAnyContext(*array, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
}
@@ -2673,13 +2906,6 @@ void Isolate::RemoveBeforeCallEnteredCallback(
}
-void Isolate::FireBeforeCallEnteredCallback() {
- for (int i = 0; i < before_call_entered_callbacks_.length(); i++) {
- before_call_entered_callbacks_.at(i)(reinterpret_cast<v8::Isolate*>(this));
- }
-}
-
-
void Isolate::AddCallCompletedCallback(CallCompletedCallback callback) {
for (int i = 0; i < call_completed_callbacks_.length(); i++) {
if (callback == call_completed_callbacks_.at(i)) return;
@@ -2698,16 +2924,19 @@ void Isolate::RemoveCallCompletedCallback(CallCompletedCallback callback) {
void Isolate::FireCallCompletedCallback() {
- bool has_call_completed_callbacks = !call_completed_callbacks_.is_empty();
+ if (!handle_scope_implementer()->CallDepthIsZero()) return;
+
bool run_microtasks =
pending_microtask_count() &&
!handle_scope_implementer()->HasMicrotasksSuppressions() &&
handle_scope_implementer()->microtasks_policy() ==
v8::MicrotasksPolicy::kAuto;
- if (!has_call_completed_callbacks && !run_microtasks) return;
- if (!handle_scope_implementer()->CallDepthIsZero()) return;
if (run_microtasks) RunMicrotasks();
+ // Prevent stepping from spilling into the next call made by the embedder.
+ if (debug()->is_active()) debug()->ClearStepping();
+
+ if (call_completed_callbacks_.is_empty()) return;
// Fire callbacks. Increase call depth to prevent recursive callbacks.
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this);
v8::Isolate::SuppressMicrotaskExecutionScope suppress(isolate);
@@ -2748,7 +2977,7 @@ void Isolate::EnqueueMicrotask(Handle<Object> microtask) {
queue = factory()->CopyFixedArrayAndGrow(queue, num_tasks);
heap()->set_microtask_queue(*queue);
}
- DCHECK(queue->get(num_tasks)->IsUndefined());
+ DCHECK(queue->get(num_tasks)->IsUndefined(this));
queue->set(num_tasks, *microtask);
set_pending_microtask_count(num_tasks + 1);
}
@@ -2758,7 +2987,9 @@ void Isolate::RunMicrotasks() {
// Increase call depth to prevent recursive callbacks.
v8::Isolate::SuppressMicrotaskExecutionScope suppress(
reinterpret_cast<v8::Isolate*>(this));
+ is_running_microtasks_ = true;
RunMicrotasksInternal();
+ is_running_microtasks_ = false;
FireMicrotasksCompletedCallback();
}
@@ -2780,12 +3011,14 @@ void Isolate::RunMicrotasksInternal() {
Handle<JSFunction>::cast(microtask);
SaveContext save(this);
set_context(microtask_function->context()->native_context());
+ handle_scope_implementer_->EnterMicrotaskContext(
+ handle(microtask_function->context(), this));
MaybeHandle<Object> maybe_exception;
MaybeHandle<Object> result = Execution::TryCall(
this, microtask_function, factory()->undefined_value(), 0, NULL,
&maybe_exception);
+ handle_scope_implementer_->LeaveMicrotaskContext();
// If execution is terminating, just bail out.
- Handle<Object> exception;
if (result.is_null() && maybe_exception.is_null()) {
// Clear out any remaining callbacks in the queue.
heap()->set_microtask_queue(heap()->empty_fixed_array());
@@ -2883,7 +3116,7 @@ void Isolate::SetTailCallEliminationEnabled(bool enabled) {
void Isolate::AddDetachedContext(Handle<Context> context) {
HandleScope scope(this);
Handle<WeakCell> cell = factory()->NewWeakCell(context);
- Handle<FixedArray> detached_contexts(heap()->detached_contexts());
+ Handle<FixedArray> detached_contexts = factory()->detached_contexts();
int length = detached_contexts->length();
detached_contexts = factory()->CopyFixedArrayAndGrow(detached_contexts, 2);
detached_contexts->set(length, Smi::FromInt(0));
@@ -2894,7 +3127,7 @@ void Isolate::AddDetachedContext(Handle<Context> context) {
void Isolate::CheckDetachedContextsAfterGC() {
HandleScope scope(this);
- Handle<FixedArray> detached_contexts(heap()->detached_contexts());
+ Handle<FixedArray> detached_contexts = factory()->detached_contexts();
int length = detached_contexts->length();
if (length == 0) return;
int new_length = 0;
@@ -2917,7 +3150,7 @@ void Isolate::CheckDetachedContextsAfterGC() {
DCHECK(detached_contexts->get(i + 1)->IsWeakCell());
WeakCell* cell = WeakCell::cast(detached_contexts->get(i + 1));
if (mark_sweeps > 3) {
- PrintF("detached context 0x%p\n survived %d GCs (leak?)\n",
+ PrintF("detached context %p\n survived %d GCs (leak?)\n",
static_cast<void*>(cell->value()), mark_sweeps);
}
}
@@ -2930,6 +3163,21 @@ void Isolate::CheckDetachedContextsAfterGC() {
}
}
+void Isolate::SetRAILMode(RAILMode rail_mode) {
+ rail_mode_.SetValue(rail_mode);
+ if (FLAG_trace_rail) {
+ PrintIsolate(this, "RAIL mode: %s\n", RAILModeName(rail_mode));
+ }
+}
+
+void Isolate::IsolateInBackgroundNotification() {
+ is_isolate_in_background_ = true;
+ heap()->ActivateMemoryReducerIfNeeded();
+}
+
+void Isolate::IsolateInForegroundNotification() {
+ is_isolate_in_background_ = false;
+}
bool StackLimitCheck::JsHasOverflowed(uintptr_t gap) const {
StackGuard* stack_guard = isolate_->stack_guard();
@@ -2942,24 +3190,6 @@ bool StackLimitCheck::JsHasOverflowed(uintptr_t gap) const {
return GetCurrentStackPosition() - gap < stack_guard->real_climit();
}
-
-SaveContext::SaveContext(Isolate* isolate)
- : isolate_(isolate), prev_(isolate->save_context()) {
- if (isolate->context() != NULL) {
- context_ = Handle<Context>(isolate->context());
- }
- isolate->set_save_context(this);
-
- c_entry_fp_ = isolate->c_entry_fp(isolate->thread_local_top());
-}
-
-
-SaveContext::~SaveContext() {
- isolate_->set_context(context_.is_null() ? NULL : *context_);
- isolate_->set_save_context(prev_);
-}
-
-
#ifdef DEBUG
AssertNoContextChange::AssertNoContextChange(Isolate* isolate)
: isolate_(isolate), context_(isolate->context(), isolate) {}
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index 8847164012..eb1841d4d8 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -5,16 +5,13 @@
#ifndef V8_ISOLATE_H_
#define V8_ISOLATE_H_
+#include <memory>
#include <queue>
-#include <set>
#include "include/v8-debug.h"
#include "src/allocation.h"
-#include "src/assert-scope.h"
-#include "src/base/accounting-allocator.h"
#include "src/base/atomicops.h"
-#include "src/builtins.h"
-#include "src/cancelable-task.h"
+#include "src/builtins/builtins.h"
#include "src/contexts.h"
#include "src/date.h"
#include "src/execution.h"
@@ -22,18 +19,17 @@
#include "src/futex-emulation.h"
#include "src/global-handles.h"
#include "src/handles.h"
-#include "src/hashmap.h"
#include "src/heap/heap.h"
#include "src/messages.h"
-#include "src/optimizing-compile-dispatcher.h"
#include "src/regexp/regexp-stack.h"
-#include "src/runtime-profiler.h"
#include "src/runtime/runtime.h"
+#include "src/tracing/trace-event.h"
#include "src/zone.h"
namespace v8 {
namespace base {
+class AccountingAllocator;
class RandomNumberGenerator;
}
@@ -41,7 +37,10 @@ namespace internal {
class BasicBlockProfiler;
class Bootstrapper;
+class CancelableTaskManager;
class CallInterfaceDescriptorData;
+class CodeAgingHelper;
+class CodeEventDispatcher;
class CodeGenerator;
class CodeRange;
class CodeStubDescriptor;
@@ -58,7 +57,6 @@ class EmptyStatement;
class ExternalCallbackScope;
class ExternalReferenceTable;
class Factory;
-class FunctionInfoListener;
class HandleScopeImplementer;
class HeapProfiler;
class HStatistics;
@@ -67,8 +65,9 @@ class InlineRuntimeFunctionsTable;
class InnerPointerToCodeCache;
class Logger;
class MaterializedObjectStore;
-class CodeAgingHelper;
+class OptimizingCompileDispatcher;
class RegExpStack;
+class RuntimeProfiler;
class SaveContext;
class StatsTable;
class StringTracker;
@@ -125,6 +124,27 @@ typedef ZoneList<Handle<Object> > ZoneObjectList;
#define RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, T) \
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, MaybeHandle<T>())
+#define RETURN_RESULT(isolate, call, T) \
+ do { \
+ Handle<T> __result__; \
+ if (!(call).ToHandle(&__result__)) { \
+ DCHECK((isolate)->has_pending_exception()); \
+ return MaybeHandle<T>(); \
+ } \
+ return __result__; \
+ } while (false)
+
+#define RETURN_RESULT_OR_FAILURE(isolate, call) \
+ do { \
+ Handle<Object> __result__; \
+ Isolate* __isolate__ = (isolate); \
+ if (!(call).ToHandle(&__result__)) { \
+ DCHECK(__isolate__->has_pending_exception()); \
+ return __isolate__->heap()->exception(); \
+ } \
+ return *__result__; \
+ } while (false)
+
#define ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, value) \
do { \
if (!(call).ToHandle(&dst)) { \
@@ -133,21 +153,26 @@ typedef ZoneList<Handle<Object> > ZoneObjectList;
} \
} while (false)
-#define ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, dst, call) \
- ASSIGN_RETURN_ON_EXCEPTION_VALUE( \
- isolate, dst, call, isolate->heap()->exception())
+#define ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, dst, call) \
+ do { \
+ Isolate* __isolate__ = (isolate); \
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(__isolate__, dst, call, \
+ __isolate__->heap()->exception()); \
+ } while (false)
#define ASSIGN_RETURN_ON_EXCEPTION(isolate, dst, call, T) \
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, MaybeHandle<T>())
-#define THROW_NEW_ERROR(isolate, call, T) \
- do { \
- return isolate->Throw<T>(isolate->factory()->call); \
+#define THROW_NEW_ERROR(isolate, call, T) \
+ do { \
+ Isolate* __isolate__ = (isolate); \
+ return __isolate__->Throw<T>(__isolate__->factory()->call); \
} while (false)
-#define THROW_NEW_ERROR_RETURN_FAILURE(isolate, call) \
- do { \
- return isolate->Throw(*isolate->factory()->call); \
+#define THROW_NEW_ERROR_RETURN_FAILURE(isolate, call) \
+ do { \
+ Isolate* __isolate__ = (isolate); \
+ return __isolate__->Throw(*__isolate__->factory()->call); \
} while (false)
#define RETURN_ON_EXCEPTION_VALUE(isolate, call, value) \
@@ -158,8 +183,12 @@ typedef ZoneList<Handle<Object> > ZoneObjectList;
} \
} while (false)
-#define RETURN_FAILURE_ON_EXCEPTION(isolate, call) \
- RETURN_ON_EXCEPTION_VALUE(isolate, call, isolate->heap()->exception())
+#define RETURN_FAILURE_ON_EXCEPTION(isolate, call) \
+ do { \
+ Isolate* __isolate__ = (isolate); \
+ RETURN_ON_EXCEPTION_VALUE(__isolate__, call, \
+ __isolate__->heap()->exception()); \
+ } while (false);
#define RETURN_ON_EXCEPTION(isolate, call, T) \
RETURN_ON_EXCEPTION_VALUE(isolate, call, MaybeHandle<T>())
@@ -340,9 +369,9 @@ class ThreadLocalTop BASE_EMBEDDED {
#if USE_SIMULATOR
-#define ISOLATE_INIT_SIMULATOR_LIST(V) \
- V(bool, simulator_initialized, false) \
- V(HashMap*, simulator_i_cache, NULL) \
+#define ISOLATE_INIT_SIMULATOR_LIST(V) \
+ V(bool, simulator_initialized, false) \
+ V(base::HashMap*, simulator_i_cache, NULL) \
V(Redirection*, simulator_redirection, NULL)
#else
@@ -353,10 +382,10 @@ class ThreadLocalTop BASE_EMBEDDED {
#ifdef DEBUG
-#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V) \
- V(CommentStatistic, paged_space_comments_statistics, \
- CommentStatistic::kMaxComments + 1) \
- V(int, code_kind_statistics, Code::NUMBER_OF_KINDS)
+#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V) \
+ V(CommentStatistic, paged_space_comments_statistics, \
+ CommentStatistic::kMaxComments + 1) \
+ V(int, code_kind_statistics, AbstractCode::NUMBER_OF_KINDS)
#else
#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
@@ -374,33 +403,37 @@ class ThreadLocalTop BASE_EMBEDDED {
typedef List<HeapObject*> DebugObjectCache;
-#define ISOLATE_INIT_LIST(V) \
- /* Assembler state. */ \
- V(FatalErrorCallback, exception_behavior, NULL) \
- V(LogEventCallback, event_logger, NULL) \
- V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, NULL) \
- /* To distinguish the function templates, so that we can find them in the */ \
- /* function cache of the native context. */ \
- V(int, next_serial_number, 0) \
- V(ExternalReferenceRedirectorPointer*, external_reference_redirector, NULL) \
- /* Part of the state of liveedit. */ \
- V(FunctionInfoListener*, active_function_info_listener, NULL) \
- /* State for Relocatable. */ \
- V(Relocatable*, relocatable_top, NULL) \
- V(DebugObjectCache*, string_stream_debug_object_cache, NULL) \
- V(Object*, string_stream_current_security_token, NULL) \
- V(ExternalReferenceTable*, external_reference_table, NULL) \
- V(HashMap*, external_reference_map, NULL) \
- V(HashMap*, root_index_map, NULL) \
- V(int, pending_microtask_count, 0) \
- V(HStatistics*, hstatistics, NULL) \
- V(CompilationStatistics*, turbo_statistics, NULL) \
- V(HTracer*, htracer, NULL) \
- V(CodeTracer*, code_tracer, NULL) \
- V(bool, fp_stubs_generated, false) \
- V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu) \
- V(PromiseRejectCallback, promise_reject_callback, NULL) \
- V(const v8::StartupData*, snapshot_blob, NULL) \
+#define ISOLATE_INIT_LIST(V) \
+ /* Assembler state. */ \
+ V(FatalErrorCallback, exception_behavior, nullptr) \
+ V(OOMErrorCallback, oom_behavior, nullptr) \
+ V(LogEventCallback, event_logger, nullptr) \
+ V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, nullptr) \
+ V(ExternalReferenceRedirectorPointer*, external_reference_redirector, \
+ nullptr) \
+ /* State for Relocatable. */ \
+ V(Relocatable*, relocatable_top, nullptr) \
+ V(DebugObjectCache*, string_stream_debug_object_cache, nullptr) \
+ V(Object*, string_stream_current_security_token, nullptr) \
+ V(ExternalReferenceTable*, external_reference_table, nullptr) \
+ V(intptr_t*, api_external_references, nullptr) \
+ V(base::HashMap*, external_reference_map, nullptr) \
+ V(base::HashMap*, root_index_map, nullptr) \
+ V(int, pending_microtask_count, 0) \
+ V(HStatistics*, hstatistics, nullptr) \
+ V(CompilationStatistics*, turbo_statistics, nullptr) \
+ V(HTracer*, htracer, nullptr) \
+ V(CodeTracer*, code_tracer, nullptr) \
+ V(bool, fp_stubs_generated, false) \
+ V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu) \
+ V(PromiseRejectCallback, promise_reject_callback, nullptr) \
+ V(const v8::StartupData*, snapshot_blob, nullptr) \
+ V(int, code_and_metadata_size, 0) \
+ V(int, bytecode_and_metadata_size, 0) \
+ /* true if being profiled. Causes collection of extra compile info. */ \
+ V(bool, is_profiling, false) \
+ /* true if a trace is being formatted through Error.prepareStackTrace. */ \
+ V(bool, formatting_stack_trace, false) \
ISOLATE_INIT_SIMULATOR_LIST(V)
#define THREAD_LOCAL_TOP_ACCESSOR(type, name) \
@@ -495,14 +528,6 @@ class Isolate {
return isolate;
}
- // Like Current, but skips the check that |isolate_key_| was initialized.
- // Callers have to ensure that themselves.
- // DO NOT USE. The only remaining callsite will be deleted soon.
- INLINE(static Isolate* UnsafeCurrent()) {
- return reinterpret_cast<Isolate*>(
- base::Thread::GetThreadLocal(isolate_key_));
- }
-
// Usually called by Init(), but can be called early e.g. to allow
// testing components that require logging but not the whole
// isolate.
@@ -633,9 +658,7 @@ class Isolate {
inline Handle<JSGlobalObject> global_object();
// Returns the global proxy object of the current context.
- JSObject* global_proxy() {
- return context()->global_proxy();
- }
+ inline Handle<JSObject> global_proxy();
static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
void FreeThreadResources() { thread_local_top_.Free(); }
@@ -647,7 +670,7 @@ class Isolate {
bool OptionalRescheduleException(bool is_bottom_call);
// Push and pop a promise and the current try-catch handler.
- void PushPromise(Handle<JSObject> promise, Handle<JSFunction> function);
+ void PushPromise(Handle<JSObject> promise);
void PopPromise();
Handle<Object> GetPromiseOnStackOnThrow();
@@ -683,14 +706,14 @@ class Isolate {
int frame_limit,
StackTrace::StackTraceOptions options);
Handle<Object> CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
+ FrameSkipMode mode,
Handle<Object> caller);
MaybeHandle<JSReceiver> CaptureAndSetDetailedStackTrace(
Handle<JSReceiver> error_object);
MaybeHandle<JSReceiver> CaptureAndSetSimpleStackTrace(
- Handle<JSReceiver> error_object, Handle<Object> caller);
+ Handle<JSReceiver> error_object, FrameSkipMode mode,
+ Handle<Object> caller);
Handle<JSArray> GetDetailedStackTrace(Handle<JSObject> error_object);
- Handle<JSArray> GetDetailedFromSimpleStackTrace(
- Handle<JSObject> error_object);
// Returns if the given context may access the given global object. If
// the result is false, the pending exception is guaranteed to be
@@ -701,7 +724,7 @@ class Isolate {
void ReportFailedAccessCheck(Handle<JSObject> receiver);
// Exception throwing support. The caller should use the result
- // of Throw() as its return value.
+ // of Throw() as its return vaue.
Object* Throw(Object* exception, MessageLocation* location = NULL);
Object* ThrowIllegalOperation();
@@ -722,9 +745,13 @@ class Isolate {
// Tries to predict whether an exception will be caught. Note that this can
// only produce an estimate, because it is undecidable whether a finally
- // clause will consume or re-throw an exception. We conservatively assume any
- // finally clause will behave as if the exception were consumed.
- enum CatchType { NOT_CAUGHT, CAUGHT_BY_JAVASCRIPT, CAUGHT_BY_EXTERNAL };
+ // clause will consume or re-throw an exception.
+ enum CatchType {
+ NOT_CAUGHT,
+ CAUGHT_BY_JAVASCRIPT,
+ CAUGHT_BY_EXTERNAL,
+ CAUGHT_BY_DESUGARING
+ };
CatchType PredictExceptionCatcher();
void ScheduleThrow(Object* exception);
@@ -766,7 +793,8 @@ class Isolate {
void IterateThread(ThreadVisitor* v, char* t);
// Returns the current native context.
- Handle<Context> native_context();
+ inline Handle<Context> native_context();
+ inline Context* raw_native_context();
// Returns the native context of the calling JavaScript code. That
// is, the native context of the top-most JavaScript frame.
@@ -778,8 +806,6 @@ class Isolate {
char* ArchiveThread(char* to);
char* RestoreThread(char* from);
- static const char* const kStackOverflowMessage;
-
static const int kUC16AlphabetSize = 256; // See StringSearchBase.
static const int kBMMaxShift = 250; // See StringSearchBase.
@@ -817,7 +843,9 @@ class Isolate {
DCHECK(counters_ != NULL);
return counters_;
}
- CodeRange* code_range() { return code_range_; }
+ tracing::TraceEventStatsTable* trace_event_stats_table() {
+ return &trace_event_stats_table_;
+ }
RuntimeProfiler* runtime_profiler() { return runtime_profiler_; }
CompilationCache* compilation_cache() { return compilation_cache_; }
Logger* logger() {
@@ -829,7 +857,8 @@ class Isolate {
StackGuard* stack_guard() { return &stack_guard_; }
Heap* heap() { return &heap_; }
StatsTable* stats_table();
- StubCache* stub_cache() { return stub_cache_; }
+ StubCache* load_stub_cache() { return load_stub_cache_; }
+ StubCache* store_stub_cache() { return store_stub_cache_; }
CodeAgingHelper* code_aging_helper() { return code_aging_helper_; }
DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
bool deoptimizer_lazy_throw() const { return deoptimizer_lazy_throw_; }
@@ -841,10 +870,6 @@ class Isolate {
return materialized_object_store_;
}
- MemoryAllocator* memory_allocator() {
- return memory_allocator_;
- }
-
KeyedLookupCache* keyed_lookup_cache() {
return keyed_lookup_cache_;
}
@@ -863,8 +888,8 @@ class Isolate {
DCHECK(handle_scope_implementer_);
return handle_scope_implementer_;
}
- Zone* runtime_zone() { return &runtime_zone_; }
- Zone* interface_descriptor_zone() { return &interface_descriptor_zone_; }
+ Zone* runtime_zone() { return runtime_zone_; }
+ Zone* interface_descriptor_zone() { return interface_descriptor_zone_; }
UnicodeCache* unicode_cache() {
return unicode_cache_;
@@ -912,7 +937,10 @@ class Isolate {
Debug* debug() { return debug_; }
- CpuProfiler* cpu_profiler() const { return cpu_profiler_; }
+ bool* is_profiling_address() { return &is_profiling_; }
+ CodeEventDispatcher* code_event_dispatcher() const {
+ return code_event_dispatcher_.get();
+ }
HeapProfiler* heap_profiler() const { return heap_profiler_; }
#ifdef DEBUG
@@ -974,6 +1002,9 @@ class Isolate {
bool IsFastArrayConstructorPrototypeChainIntact();
inline bool IsArraySpeciesLookupChainIntact();
+ inline bool IsHasInstanceLookupChainIntact();
+ bool IsIsConcatSpreadableLookupChainIntact();
+ bool IsIsConcatSpreadableLookupChainIntact(JSReceiver* receiver);
// On intent to set an element in object, make sure that appropriate
// notifications occur if the set is on the elements of the array or
@@ -990,6 +1021,8 @@ class Isolate {
UpdateArrayProtectorOnSetElement(object);
}
void InvalidateArraySpeciesProtector();
+ void InvalidateHasInstanceProtector();
+ void InvalidateIsConcatSpreadableProtector();
// Returns true if array is the initial array prototype in any native context.
bool IsAnyInitialArrayPrototype(Handle<JSArray> array);
@@ -1065,7 +1098,7 @@ class Isolate {
void AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
void RemoveBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
- void FireBeforeCallEnteredCallback();
+ inline void FireBeforeCallEnteredCallback();
void AddMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
void RemoveMicrotasksCompletedCallback(MicrotasksCompletedCallback callback);
@@ -1077,6 +1110,7 @@ class Isolate {
void EnqueueMicrotask(Handle<Object> microtask);
void RunMicrotasks();
+ bool IsRunningMicrotasks() const { return is_running_microtasks_; }
void SetUseCounterCallback(v8::Isolate::UseCounterCallback callback);
void CountUsage(v8::Isolate::UseCounterFeature feature);
@@ -1119,10 +1153,21 @@ class Isolate {
interpreter::Interpreter* interpreter() const { return interpreter_; }
- base::AccountingAllocator* allocator() { return &allocator_; }
+ base::AccountingAllocator* allocator() { return allocator_; }
+
+ bool IsInAnyContext(Object* object, uint32_t index);
+
+ void SetRAILMode(RAILMode rail_mode);
+
+ void IsolateInForegroundNotification();
+
+ void IsolateInBackgroundNotification();
+
+ bool IsIsolateInBackground() { return is_isolate_in_background_; }
protected:
explicit Isolate(bool enable_serializer);
+ bool IsArrayOrObjectPrototype(Object* object);
private:
friend struct GlobalState;
@@ -1156,7 +1201,7 @@ class Isolate {
// the Isolate. The top of the stack points to a thread which is currently
// running the Isolate. When the stack is empty, the Isolate is considered
// not entered by any thread and can be Disposed.
- // If the same thread enters the Isolate more then once, the entry_count_
+ // If the same thread enters the Isolate more than once, the entry_count_
// is incremented rather then a new item pushed to the stack.
class EntryStackItem {
public:
@@ -1231,6 +1276,24 @@ class Isolate {
void RunMicrotasksInternal();
+ const char* RAILModeName(RAILMode rail_mode) const {
+ switch (rail_mode) {
+ case PERFORMANCE_RESPONSE:
+ return "RESPONSE";
+ case PERFORMANCE_ANIMATION:
+ return "ANIMATION";
+ case PERFORMANCE_IDLE:
+ return "IDLE";
+ case PERFORMANCE_LOAD:
+ return "LOAD";
+ }
+ return "";
+ }
+
+ // TODO(alph): Remove along with the deprecated GetCpuProfiler().
+ friend v8::CpuProfiler* v8::Isolate::GetCpuProfiler();
+ CpuProfiler* cpu_profiler() const { return cpu_profiler_; }
+
base::Atomic32 id_;
EntryStackItem* entry_stack_;
int stack_trace_nesting_level_;
@@ -1240,12 +1303,13 @@ class Isolate {
RuntimeProfiler* runtime_profiler_;
CompilationCache* compilation_cache_;
Counters* counters_;
- CodeRange* code_range_;
+ tracing::TraceEventStatsTable trace_event_stats_table_;
base::RecursiveMutex break_access_;
Logger* logger_;
StackGuard stack_guard_;
StatsTable* stats_table_;
- StubCache* stub_cache_;
+ StubCache* load_stub_cache_;
+ StubCache* store_stub_cache_;
CodeAgingHelper* code_aging_helper_;
DeoptimizerData* deoptimizer_data_;
bool deoptimizer_lazy_throw_;
@@ -1254,16 +1318,15 @@ class Isolate {
bool capture_stack_trace_for_uncaught_exceptions_;
int stack_trace_for_uncaught_exceptions_frame_limit_;
StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_;
- MemoryAllocator* memory_allocator_;
KeyedLookupCache* keyed_lookup_cache_;
ContextSlotCache* context_slot_cache_;
DescriptorLookupCache* descriptor_lookup_cache_;
HandleScopeData handle_scope_data_;
HandleScopeImplementer* handle_scope_implementer_;
UnicodeCache* unicode_cache_;
- base::AccountingAllocator allocator_;
- Zone runtime_zone_;
- Zone interface_descriptor_zone_;
+ base::AccountingAllocator* allocator_;
+ Zone* runtime_zone_;
+ Zone* interface_descriptor_zone_;
InnerPointerToCodeCache* inner_pointer_to_code_cache_;
GlobalHandles* global_handles_;
EternalHandles* eternal_handles_;
@@ -1279,6 +1342,7 @@ class Isolate {
DateCache* date_cache_;
CallInterfaceDescriptorData* call_descriptor_data_;
base::RandomNumberGenerator* random_number_generator_;
+ base::AtomicValue<RAILMode> rail_mode_;
// Whether the isolate has been created for snapshotting.
bool serializer_enabled_;
@@ -1292,6 +1356,10 @@ class Isolate {
// True if ES2015 tail call elimination feature is enabled.
bool is_tail_call_elimination_enabled_;
+ // True if the isolate is in background. This flag is used
+ // to prioritize between memory usage and latency.
+ bool is_isolate_in_background_;
+
// Time stamp at initialization.
double time_millis_at_init_;
@@ -1304,6 +1372,7 @@ class Isolate {
Debug* debug_;
CpuProfiler* cpu_profiler_;
HeapProfiler* heap_profiler_;
+ std::unique_ptr<CodeEventDispatcher> code_event_dispatcher_;
FunctionEntryHook function_entry_hook_;
interpreter::Interpreter* interpreter_;
@@ -1358,6 +1427,7 @@ class Isolate {
// List of callbacks after microtasks were run.
List<MicrotasksCompletedCallback> microtasks_completed_callbacks_;
+ bool is_running_microtasks_;
v8::Isolate::UseCounterCallback use_counter_callback_;
BasicBlockProfiler* basic_block_profiler_;
@@ -1381,11 +1451,10 @@ class Isolate {
friend class Simulator;
friend class StackGuard;
friend class ThreadId;
- friend class TestMemoryAllocatorScope;
- friend class TestCodeRangeScope;
friend class v8::Isolate;
friend class v8::Locker;
friend class v8::Unlocker;
+ friend class v8::SnapshotCreator;
friend v8::StartupData v8::V8::CreateSnapshotDataBlob(const char*);
friend v8::StartupData v8::V8::WarmUpSnapshotDataBlob(v8::StartupData,
const char*);
@@ -1400,15 +1469,12 @@ class Isolate {
class PromiseOnStack {
public:
- PromiseOnStack(Handle<JSFunction> function, Handle<JSObject> promise,
- PromiseOnStack* prev)
- : function_(function), promise_(promise), prev_(prev) {}
- Handle<JSFunction> function() { return function_; }
+ PromiseOnStack(Handle<JSObject> promise, PromiseOnStack* prev)
+ : promise_(promise), prev_(prev) {}
Handle<JSObject> promise() { return promise_; }
PromiseOnStack* prev() { return prev_; }
private:
- Handle<JSFunction> function_;
Handle<JSObject> promise_;
PromiseOnStack* prev_;
};
@@ -1419,21 +1485,23 @@ class PromiseOnStack {
// versions of GCC. See V8 issue 122 for details.
class SaveContext BASE_EMBEDDED {
public:
- explicit SaveContext(Isolate* isolate);
- ~SaveContext();
+ explicit inline SaveContext(Isolate* isolate);
+ inline ~SaveContext();
Handle<Context> context() { return context_; }
SaveContext* prev() { return prev_; }
// Returns true if this save context is below a given JavaScript frame.
- bool IsBelowFrame(JavaScriptFrame* frame) {
+ bool IsBelowFrame(StandardFrame* frame) {
return (c_entry_fp_ == 0) || (c_entry_fp_ > frame->sp());
}
+ Isolate* isolate() { return isolate_; }
+
private:
- Isolate* isolate_;
+ Isolate* const isolate_;
Handle<Context> context_;
- SaveContext* prev_;
+ SaveContext* const prev_;
Address c_entry_fp_;
};
@@ -1499,6 +1567,15 @@ class StackLimitCheck BASE_EMBEDDED {
Isolate* isolate_;
};
+#define STACK_CHECK(isolate, result_value) \
+ do { \
+ StackLimitCheck stack_check(isolate); \
+ if (stack_check.HasOverflowed()) { \
+ isolate->Throw(*isolate->factory()->NewRangeError( \
+ MessageTemplate::kStackOverflow)); \
+ return result_value; \
+ } \
+ } while (false)
// Support for temporarily postponing interrupts. When the outermost
// postpone scope is left the interrupts will be re-enabled and any
diff --git a/deps/v8/src/js/array-iterator.js b/deps/v8/src/js/array-iterator.js
index b3e25e9adb..227f733a05 100644
--- a/deps/v8/src/js/array-iterator.js
+++ b/deps/v8/src/js/array-iterator.js
@@ -20,13 +20,8 @@ var arrayIteratorObjectSymbol =
var GlobalArray = global.Array;
var IteratorPrototype = utils.ImportNow("IteratorPrototype");
var iteratorSymbol = utils.ImportNow("iterator_symbol");
-var MakeTypeError;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
-var GlobalTypedArray = global.Uint8Array.__proto__;
-
-utils.Import(function(from) {
- MakeTypeError = from.MakeTypeError;
-})
+var GlobalTypedArray = %object_get_prototype_of(global.Uint8Array);
// -----------------------------------------------------------------------
@@ -63,7 +58,7 @@ function ArrayIteratorNext() {
if (!IS_RECEIVER(iterator) ||
!HAS_DEFINED_PRIVATE(iterator, arrayIteratorNextIndexSymbol)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'Array Iterator.prototype.next', this);
}
@@ -111,19 +106,19 @@ function ArrayKeys() {
// TODO(littledan): Check for detached TypedArray in these three methods
function TypedArrayEntries() {
- if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
return %_Call(ArrayEntries, this);
}
function TypedArrayValues() {
- if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
return %_Call(ArrayValues, this);
}
function TypedArrayKeys() {
- if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
return %_Call(ArrayKeys, this);
}
@@ -144,9 +139,11 @@ utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
'keys', ArrayKeys
]);
-// TODO(adam): Remove this call once 'values' is in the above
-// InstallFunctions block, as it'll be redundant.
+// TODO(adam): Remove these calls once 'values' is in the above
+// InstallFunctions block, as they'll be redundant.
utils.SetFunctionName(ArrayValues, 'values');
+%FunctionRemovePrototype(ArrayValues);
+%SetNativeFlag(ArrayValues);
%AddNamedProperty(GlobalArray.prototype, iteratorSymbol, ArrayValues,
DONT_ENUM);
diff --git a/deps/v8/src/js/array.js b/deps/v8/src/js/array.js
index 1406df336d..d10e7f18b5 100644
--- a/deps/v8/src/js/array.js
+++ b/deps/v8/src/js/array.js
@@ -11,77 +11,46 @@
// -------------------------------------------------------------------
// Imports
-var AddIndexedProperty;
-var FLAG_harmony_species;
var GetIterator;
var GetMethod;
var GlobalArray = global.Array;
var InternalArray = utils.InternalArray;
var InternalPackedArray = utils.InternalPackedArray;
-var MakeTypeError;
var MaxSimple;
var MinSimple;
-var ObjectDefineProperty;
var ObjectHasOwnProperty;
var ObjectToString = utils.ImportNow("object_to_string");
-var ObserveBeginPerformSplice;
-var ObserveEndPerformSplice;
-var ObserveEnqueueSpliceRecord;
var iteratorSymbol = utils.ImportNow("iterator_symbol");
+var speciesSymbol = utils.ImportNow("species_symbol");
var unscopablesSymbol = utils.ImportNow("unscopables_symbol");
utils.Import(function(from) {
- AddIndexedProperty = from.AddIndexedProperty;
GetIterator = from.GetIterator;
GetMethod = from.GetMethod;
- MakeTypeError = from.MakeTypeError;
MaxSimple = from.MaxSimple;
MinSimple = from.MinSimple;
- ObjectDefineProperty = from.ObjectDefineProperty;
ObjectHasOwnProperty = from.ObjectHasOwnProperty;
- ObserveBeginPerformSplice = from.ObserveBeginPerformSplice;
- ObserveEndPerformSplice = from.ObserveEndPerformSplice;
- ObserveEnqueueSpliceRecord = from.ObserveEnqueueSpliceRecord;
-});
-
-utils.ImportFromExperimental(function(from) {
- FLAG_harmony_species = from.FLAG_harmony_species;
});
// -------------------------------------------------------------------
function ArraySpeciesCreate(array, length) {
- var constructor;
- if (FLAG_harmony_species) {
- constructor = %ArraySpeciesConstructor(array);
- } else {
- constructor = GlobalArray;
- }
+ length = INVERT_NEG_ZERO(length);
+ var constructor = %ArraySpeciesConstructor(array);
return new constructor(length);
}
-function DefineIndexedProperty(array, i, value) {
- if (FLAG_harmony_species) {
- var result = ObjectDefineProperty(array, i, {
- value: value, writable: true, configurable: true, enumerable: true
- });
- if (!result) throw MakeTypeError(kStrictCannotAssign, i);
- } else {
- AddIndexedProperty(array, i, value);
- }
-}
-
function KeySortCompare(a, b) {
return a - b;
}
function GetSortedArrayKeys(array, indices) {
if (IS_NUMBER(indices)) {
- var keys = new InternalArray();
// It's an interval
var limit = indices;
+ var keys = new InternalArray();
for (var i = 0; i < limit; ++i) {
var e = array[i];
if (!IS_UNDEFINED(e) || i in array) {
@@ -94,26 +63,24 @@ function GetSortedArrayKeys(array, indices) {
}
-function SparseJoinWithSeparatorJS(array, keys, length, convert, separator) {
+function SparseJoinWithSeparatorJS(array, keys, length, use_locale, separator) {
var keys_length = keys.length;
var elements = new InternalArray(keys_length * 2);
for (var i = 0; i < keys_length; i++) {
var key = keys[i];
- var e = array[key];
elements[i * 2] = key;
- elements[i * 2 + 1] = IS_STRING(e) ? e : convert(e);
+ elements[i * 2 + 1] = ConvertToString(use_locale, array[key]);
}
return %SparseJoinWithSeparator(elements, length, separator);
}
// Optimized for sparse arrays if separator is ''.
-function SparseJoin(array, keys, convert) {
+function SparseJoin(array, keys, use_locale) {
var keys_length = keys.length;
var elements = new InternalArray(keys_length);
for (var i = 0; i < keys_length; i++) {
- var e = array[keys[i]];
- elements[i] = IS_STRING(e) ? e : convert(e);
+ elements[i] = ConvertToString(use_locale, array[keys[i]]);
}
return %StringBuilderConcat(elements, keys_length, '');
}
@@ -123,8 +90,7 @@ function UseSparseVariant(array, length, is_array, touched) {
// Only use the sparse variant on arrays that are likely to be sparse and the
// number of elements touched in the operation is relatively small compared to
// the overall size of the array.
- if (!is_array || length < 1000 || %IsObserved(array) ||
- %HasComplexElements(array)) {
+ if (!is_array || length < 1000 || %HasComplexElements(array)) {
return false;
}
if (!%_IsSmi(length)) {
@@ -167,60 +133,38 @@ function StackHas(stack, v) {
// join invocations.
var visited_arrays = new Stack();
-function DoJoin(array, length, is_array, separator, convert) {
+function DoJoin(array, length, is_array, separator, use_locale) {
if (UseSparseVariant(array, length, is_array, length)) {
%NormalizeElements(array);
var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, length));
if (separator === '') {
if (keys.length === 0) return '';
- return SparseJoin(array, keys, convert);
+ return SparseJoin(array, keys, use_locale);
} else {
- return SparseJoinWithSeparatorJS(array, keys, length, convert, separator);
+ return SparseJoinWithSeparatorJS(
+ array, keys, length, use_locale, separator);
}
}
// Fast case for one-element arrays.
if (length === 1) {
- var e = array[0];
- return IS_STRING(e) ? e : convert(e);
+ return ConvertToString(use_locale, array[0]);
}
// Construct an array for the elements.
var elements = new InternalArray(length);
+ for (var i = 0; i < length; i++) {
+ elements[i] = ConvertToString(use_locale, array[i]);
+ }
- // We pull the empty separator check outside the loop for speed!
if (separator === '') {
- for (var i = 0; i < length; i++) {
- var e = array[i];
- elements[i] = IS_STRING(e) ? e : convert(e);
- }
return %StringBuilderConcat(elements, length, '');
- }
- // Non-empty separator case.
- // If the first element is a number then use the heuristic that the
- // remaining elements are also likely to be numbers.
- var e = array[0];
- if (IS_NUMBER(e)) {
- elements[0] = %_NumberToString(e);
- for (var i = 1; i < length; i++) {
- e = array[i];
- if (IS_NUMBER(e)) {
- elements[i] = %_NumberToString(e);
- } else {
- elements[i] = IS_STRING(e) ? e : convert(e);
- }
- }
} else {
- elements[0] = IS_STRING(e) ? e : convert(e);
- for (var i = 1; i < length; i++) {
- e = array[i];
- elements[i] = IS_STRING(e) ? e : convert(e);
- }
+ return %StringBuilderJoin(elements, length, separator);
}
- return %StringBuilderJoin(elements, length, separator);
}
-function Join(array, length, separator, convert) {
+function Join(array, length, separator, use_locale) {
if (length === 0) return '';
var is_array = IS_ARRAY(array);
@@ -234,7 +178,7 @@ function Join(array, length, separator, convert) {
// Attempt to convert the elements.
try {
- return DoJoin(array, length, is_array, separator, convert);
+ return DoJoin(array, length, is_array, separator, use_locale);
} finally {
// Make sure to remove the last element of the visited array no
// matter what happens.
@@ -243,15 +187,9 @@ function Join(array, length, separator, convert) {
}
-function ConvertToString(x) {
+function ConvertToString(use_locale, x) {
if (IS_NULL_OR_UNDEFINED(x)) return '';
- return TO_STRING(x);
-}
-
-
-function ConvertToLocaleString(e) {
- if (IS_NULL_OR_UNDEFINED(e)) return '';
- return TO_STRING(e.toLocaleString());
+ return TO_STRING(use_locale ? x.toLocaleString() : x);
}
@@ -265,7 +203,7 @@ function SparseSlice(array, start_i, del_count, len, deleted_elements) {
for (var i = start_i; i < limit; ++i) {
var current = array[i];
if (!IS_UNDEFINED(current) || i in array) {
- DefineIndexedProperty(deleted_elements, i - start_i, current);
+ %CreateDataProperty(deleted_elements, i - start_i, current);
}
}
} else {
@@ -275,7 +213,7 @@ function SparseSlice(array, start_i, del_count, len, deleted_elements) {
if (key >= start_i) {
var current = array[key];
if (!IS_UNDEFINED(current) || key in array) {
- DefineIndexedProperty(deleted_elements, key - start_i, current);
+ %CreateDataProperty(deleted_elements, key - start_i, current);
}
}
}
@@ -347,19 +285,17 @@ function SparseMove(array, start_i, del_count, len, num_additional_args) {
// because the receiver is not an array (so we have no choice) or because we
// know we are not deleting or moving a lot of elements.
function SimpleSlice(array, start_i, del_count, len, deleted_elements) {
- var is_array = IS_ARRAY(array);
for (var i = 0; i < del_count; i++) {
var index = start_i + i;
- if (HAS_INDEX(array, index, is_array)) {
+ if (index in array) {
var current = array[index];
- DefineIndexedProperty(deleted_elements, i, current);
+ %CreateDataProperty(deleted_elements, i, current);
}
}
}
function SimpleMove(array, start_i, del_count, len, num_additional_args) {
- var is_array = IS_ARRAY(array);
if (num_additional_args !== del_count) {
// Move the existing elements after the elements to be deleted
// to the right position in the resulting array.
@@ -367,7 +303,7 @@ function SimpleMove(array, start_i, del_count, len, num_additional_args) {
for (var i = len - del_count; i > start_i; i--) {
var from_index = i + del_count - 1;
var to_index = i + num_additional_args - 1;
- if (HAS_INDEX(array, from_index, is_array)) {
+ if (from_index in array) {
array[to_index] = array[from_index];
} else {
delete array[to_index];
@@ -377,7 +313,7 @@ function SimpleMove(array, start_i, del_count, len, num_additional_args) {
for (var i = start_i; i < len - del_count; i++) {
var from_index = i + del_count;
var to_index = i + num_additional_args;
- if (HAS_INDEX(array, from_index, is_array)) {
+ if (from_index in array) {
array[to_index] = array[from_index];
} else {
delete array[to_index];
@@ -400,7 +336,7 @@ function ArrayToString() {
if (IS_ARRAY(this)) {
func = this.join;
if (func === ArrayJoin) {
- return Join(this, this.length, ',', ConvertToString);
+ return Join(this, this.length, ',', false);
}
array = this;
} else {
@@ -415,9 +351,7 @@ function ArrayToString() {
function InnerArrayToLocaleString(array, length) {
- var len = TO_LENGTH(length);
- if (len === 0) return "";
- return Join(array, len, ',', ConvertToLocaleString);
+ return Join(array, TO_LENGTH(length), ',', true);
}
@@ -442,7 +376,7 @@ function InnerArrayJoin(separator, array, length) {
return TO_STRING(e);
}
- return Join(array, length, separator, ConvertToString);
+ return Join(array, length, separator, false);
}
@@ -456,23 +390,6 @@ function ArrayJoin(separator) {
}
-function ObservedArrayPop(n) {
- n--;
- var value = this[n];
-
- try {
- ObserveBeginPerformSplice(this);
- delete this[n];
- this.length = n;
- } finally {
- ObserveEndPerformSplice(this);
- ObserveEnqueueSpliceRecord(this, n, [value], 0);
- }
-
- return value;
-}
-
-
// Removes the last element from the array and returns it. See
// ECMA-262, section 15.4.4.6.
function ArrayPop() {
@@ -485,9 +402,6 @@ function ArrayPop() {
return;
}
- if (%IsObserved(array))
- return ObservedArrayPop.call(array, n);
-
n--;
var value = array[n];
%DeleteProperty_Strict(array, n);
@@ -496,46 +410,19 @@ function ArrayPop() {
}
-function ObservedArrayPush() {
- var n = TO_LENGTH(this.length);
- var m = arguments.length;
-
- try {
- ObserveBeginPerformSplice(this);
- for (var i = 0; i < m; i++) {
- this[i+n] = arguments[i];
- }
- var new_length = n + m;
- this.length = new_length;
- } finally {
- ObserveEndPerformSplice(this);
- ObserveEnqueueSpliceRecord(this, n, [], m);
- }
-
- return new_length;
-}
-
-
// Appends the arguments to the end of the array and returns the new
// length of the array. See ECMA-262, section 15.4.4.7.
function ArrayPush() {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.push");
- if (%IsObserved(this))
- return ObservedArrayPush.apply(this, arguments);
-
var array = TO_OBJECT(this);
var n = TO_LENGTH(array.length);
var m = arguments.length;
- // It appears that there is no enforced, absolute limit on the number of
- // arguments, but it would surely blow the stack to use 2**30 or more.
- // To avoid integer overflow, do the comparison to the max safe integer
- // after subtracting 2**30 from both sides. (2**31 would seem like a
- // natural value, but it is negative in JS, and 2**32 is 1.)
- if (m > (1 << 30) || (n - (1 << 30)) + m > kMaxSafeInteger - (1 << 30)) {
- throw MakeTypeError(kPushPastSafeLength, m, n);
- }
+ // Subtract n from kMaxSafeInteger rather than testing m + n >
+ // kMaxSafeInteger. n may already be kMaxSafeInteger. In that case adding
+ // e.g., 1 would not be safe.
+ if (m > kMaxSafeInteger - n) throw %make_type_error(kPushPastSafeLength, m, n);
for (var i = 0; i < m; i++) {
array[i+n] = arguments[i];
@@ -646,22 +533,6 @@ function ArrayReverse() {
}
-function ObservedArrayShift(len) {
- var first = this[0];
-
- try {
- ObserveBeginPerformSplice(this);
- SimpleMove(this, 0, 1, len, 0);
- this.length = len - 1;
- } finally {
- ObserveEndPerformSplice(this);
- ObserveEnqueueSpliceRecord(this, 0, [first], 0);
- }
-
- return first;
-}
-
-
function ArrayShift() {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.shift");
@@ -673,10 +544,7 @@ function ArrayShift() {
return;
}
- if (%object_is_sealed(array)) throw MakeTypeError(kArrayFunctionsOnSealed);
-
- if (%IsObserved(array))
- return ObservedArrayShift.call(array, len);
+ if (%object_is_sealed(array)) throw %make_type_error(kArrayFunctionsOnSealed);
var first = array[0];
@@ -692,33 +560,9 @@ function ArrayShift() {
}
-function ObservedArrayUnshift() {
- var len = TO_LENGTH(this.length);
- var num_arguments = arguments.length;
-
- try {
- ObserveBeginPerformSplice(this);
- SimpleMove(this, 0, 0, len, num_arguments);
- for (var i = 0; i < num_arguments; i++) {
- this[i] = arguments[i];
- }
- var new_length = len + num_arguments;
- this.length = new_length;
- } finally {
- ObserveEndPerformSplice(this);
- ObserveEnqueueSpliceRecord(this, 0, [], num_arguments);
- }
-
- return new_length;
-}
-
-
function ArrayUnshift(arg1) { // length == 1
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.unshift");
- if (%IsObserved(this))
- return ObservedArrayUnshift.apply(this, arguments);
-
var array = TO_OBJECT(this);
var len = TO_LENGTH(array.length);
var num_arguments = arguments.length;
@@ -770,7 +614,7 @@ function ArraySlice(start, end) {
if (UseSparseVariant(array, len, IS_ARRAY(array), end_i - start_i)) {
%NormalizeElements(array);
- %NormalizeElements(result);
+ if (IS_ARRAY(result)) %NormalizeElements(result);
SparseSlice(array, start_i, end_i - start_i, len, result);
} else {
SimpleSlice(array, start_i, end_i - start_i, len, result);
@@ -813,53 +657,9 @@ function ComputeSpliceDeleteCount(delete_count, num_arguments, len, start_i) {
}
-function ObservedArraySplice(start, delete_count) {
- var num_arguments = arguments.length;
- var len = TO_LENGTH(this.length);
- var start_i = ComputeSpliceStartIndex(TO_INTEGER(start), len);
- var del_count = ComputeSpliceDeleteCount(delete_count, num_arguments, len,
- start_i);
- var deleted_elements = [];
- deleted_elements.length = del_count;
- var num_elements_to_add = num_arguments > 2 ? num_arguments - 2 : 0;
-
- try {
- ObserveBeginPerformSplice(this);
-
- SimpleSlice(this, start_i, del_count, len, deleted_elements);
- SimpleMove(this, start_i, del_count, len, num_elements_to_add);
-
- // Insert the arguments into the resulting array in
- // place of the deleted elements.
- var i = start_i;
- var arguments_index = 2;
- var arguments_length = arguments.length;
- while (arguments_index < arguments_length) {
- this[i++] = arguments[arguments_index++];
- }
- this.length = len - del_count + num_elements_to_add;
-
- } finally {
- ObserveEndPerformSplice(this);
- if (deleted_elements.length || num_elements_to_add) {
- ObserveEnqueueSpliceRecord(this,
- start_i,
- deleted_elements.slice(),
- num_elements_to_add);
- }
- }
-
- // Return the deleted elements.
- return deleted_elements;
-}
-
-
function ArraySplice(start, delete_count) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.splice");
- if (%IsObserved(this))
- return ObservedArraySplice.apply(this, arguments);
-
var num_arguments = arguments.length;
var array = TO_OBJECT(this);
var len = TO_LENGTH(array.length);
@@ -871,9 +671,9 @@ function ArraySplice(start, delete_count) {
var num_elements_to_add = num_arguments > 2 ? num_arguments - 2 : 0;
if (del_count != num_elements_to_add && %object_is_sealed(array)) {
- throw MakeTypeError(kArrayFunctionsOnSealed);
+ throw %make_type_error(kArrayFunctionsOnSealed);
} else if (del_count > 0 && %object_is_frozen(array)) {
- throw MakeTypeError(kArrayFunctionsOnFrozen);
+ throw %make_type_error(kArrayFunctionsOnFrozen);
}
var changed_elements = del_count;
@@ -884,7 +684,7 @@ function ArraySplice(start, delete_count) {
}
if (UseSparseVariant(array, len, IS_ARRAY(array), changed_elements)) {
%NormalizeElements(array);
- %NormalizeElements(deleted_elements);
+ if (IS_ARRAY(deleted_elements)) %NormalizeElements(deleted_elements);
SparseSlice(array, start_i, del_count, len, deleted_elements);
SparseMove(array, start_i, del_count, len, num_elements_to_add);
} else {
@@ -1048,7 +848,8 @@ function InnerArraySort(array, length, comparefn) {
// of a prototype property.
var CopyFromPrototype = function CopyFromPrototype(obj, length) {
var max = 0;
- for (var proto = %_GetPrototype(obj); proto; proto = %_GetPrototype(proto)) {
+ for (var proto = %object_get_prototype_of(obj); proto;
+ proto = %object_get_prototype_of(proto)) {
var indices = IS_PROXY(proto) ? length : %GetArrayKeys(proto, length);
if (IS_NUMBER(indices)) {
// It's an interval.
@@ -1076,7 +877,8 @@ function InnerArraySort(array, length, comparefn) {
// where a prototype of obj has an element. I.e., shadow all prototype
// elements in that range.
var ShadowPrototypeElements = function(obj, from, to) {
- for (var proto = %_GetPrototype(obj); proto; proto = %_GetPrototype(proto)) {
+ for (var proto = %object_get_prototype_of(obj); proto;
+ proto = %object_get_prototype_of(proto)) {
var indices = IS_PROXY(proto) ? to : %GetArrayKeys(proto, to);
if (IS_NUMBER(indices)) {
// It's an interval.
@@ -1143,7 +945,7 @@ function InnerArraySort(array, length, comparefn) {
}
for (i = length - num_holes; i < length; i++) {
// For compatability with Webkit, do not expose elements in the prototype.
- if (i in %_GetPrototype(obj)) {
+ if (i in %object_get_prototype_of(obj)) {
obj[i] = UNDEFINED;
} else {
delete obj[i];
@@ -1174,9 +976,9 @@ function InnerArraySort(array, length, comparefn) {
var num_non_undefined = %RemoveArrayHoles(array, length);
if (num_non_undefined == -1) {
- // The array is observed, or there were indexed accessors in the array.
+ // There were indexed accessors in the array.
// Move array holes and undefineds to the end using a Javascript function
- // that is safe in the presence of accessors and is observable.
+ // that is safe in the presence of accessors.
num_non_undefined = SafeRemoveArrayHoles(array);
}
@@ -1206,12 +1008,11 @@ function ArraySort(comparefn) {
// or delete elements from the array.
function InnerArrayFilter(f, receiver, array, length, result) {
var result_length = 0;
- var is_array = IS_ARRAY(array);
for (var i = 0; i < length; i++) {
- if (HAS_INDEX(array, i, is_array)) {
+ if (i in array) {
var element = array[i];
if (%_Call(f, receiver, element, i, array)) {
- DefineIndexedProperty(result, result_length, element);
+ %CreateDataProperty(result, result_length, element);
result_length++;
}
}
@@ -1228,26 +1029,25 @@ function ArrayFilter(f, receiver) {
// loop will not affect the looping and side effects are visible.
var array = TO_OBJECT(this);
var length = TO_LENGTH(array.length);
- if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
+ if (!IS_CALLABLE(f)) throw %make_type_error(kCalledNonCallable, f);
var result = ArraySpeciesCreate(array, 0);
return InnerArrayFilter(f, receiver, array, length, result);
}
function InnerArrayForEach(f, receiver, array, length) {
- if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
+ if (!IS_CALLABLE(f)) throw %make_type_error(kCalledNonCallable, f);
- var is_array = IS_ARRAY(array);
if (IS_UNDEFINED(receiver)) {
for (var i = 0; i < length; i++) {
- if (HAS_INDEX(array, i, is_array)) {
+ if (i in array) {
var element = array[i];
f(element, i, array);
}
}
} else {
for (var i = 0; i < length; i++) {
- if (HAS_INDEX(array, i, is_array)) {
+ if (i in array) {
var element = array[i];
%_Call(f, receiver, element, i, array);
}
@@ -1268,11 +1068,10 @@ function ArrayForEach(f, receiver) {
function InnerArraySome(f, receiver, array, length) {
- if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
+ if (!IS_CALLABLE(f)) throw %make_type_error(kCalledNonCallable, f);
- var is_array = IS_ARRAY(array);
for (var i = 0; i < length; i++) {
- if (HAS_INDEX(array, i, is_array)) {
+ if (i in array) {
var element = array[i];
if (%_Call(f, receiver, element, i, array)) return true;
}
@@ -1295,11 +1094,10 @@ function ArraySome(f, receiver) {
function InnerArrayEvery(f, receiver, array, length) {
- if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
+ if (!IS_CALLABLE(f)) throw %make_type_error(kCalledNonCallable, f);
- var is_array = IS_ARRAY(array);
for (var i = 0; i < length; i++) {
- if (HAS_INDEX(array, i, is_array)) {
+ if (i in array) {
var element = array[i];
if (!%_Call(f, receiver, element, i, array)) return false;
}
@@ -1325,91 +1123,24 @@ function ArrayMap(f, receiver) {
// loop will not affect the looping and side effects are visible.
var array = TO_OBJECT(this);
var length = TO_LENGTH(array.length);
- if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
+ if (!IS_CALLABLE(f)) throw %make_type_error(kCalledNonCallable, f);
var result = ArraySpeciesCreate(array, length);
- var is_array = IS_ARRAY(array);
for (var i = 0; i < length; i++) {
- if (HAS_INDEX(array, i, is_array)) {
+ if (i in array) {
var element = array[i];
- DefineIndexedProperty(result, i, %_Call(f, receiver, element, i, array));
+ %CreateDataProperty(result, i, %_Call(f, receiver, element, i, array));
}
}
return result;
}
-// For .indexOf, we don't need to pass in the number of arguments
-// at the callsite since ToInteger(undefined) == 0; however, for
-// .lastIndexOf, we need to pass it, since the behavior for passing
-// undefined is 0 but for not including the argument is length-1.
-function InnerArrayIndexOf(array, element, index, length) {
- if (length == 0) return -1;
- if (IS_UNDEFINED(index)) {
- index = 0;
- } else {
- index = TO_INTEGER(index) + 0; // Add 0 to convert -0 to 0
- // If index is negative, index from the end of the array.
- if (index < 0) {
- index = length + index;
- // If index is still negative, search the entire array.
- if (index < 0) index = 0;
- }
- }
- var min = index;
- var max = length;
- if (UseSparseVariant(array, length, IS_ARRAY(array), max - min)) {
- %NormalizeElements(array);
- var indices = %GetArrayKeys(array, length);
- if (IS_NUMBER(indices)) {
- // It's an interval.
- max = indices; // Capped by length already.
- // Fall through to loop below.
- } else {
- if (indices.length == 0) return -1;
- // Get all the keys in sorted order.
- var sortedKeys = GetSortedArrayKeys(array, indices);
- var n = sortedKeys.length;
- var i = 0;
- while (i < n && sortedKeys[i] < index) i++;
- while (i < n) {
- var key = sortedKeys[i];
- if (array[key] === element) return key;
- i++;
- }
- return -1;
- }
- }
- // Lookup through the array.
- if (!IS_UNDEFINED(element)) {
- for (var i = min; i < max; i++) {
- if (array[i] === element) return i;
- }
- return -1;
- }
- // Lookup through the array.
- for (var i = min; i < max; i++) {
- if (IS_UNDEFINED(array[i]) && i in array) {
- return i;
- }
- }
- return -1;
-}
-
-
-function ArrayIndexOf(element, index) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.indexOf");
-
- var length = TO_LENGTH(this.length);
- return InnerArrayIndexOf(this, element, index, length);
-}
-
-
function InnerArrayLastIndexOf(array, element, index, length, argumentsLength) {
if (length == 0) return -1;
if (argumentsLength < 2) {
index = length - 1;
} else {
- index = TO_INTEGER(index) + 0; // Add 0 to convert -0 to 0
+ index = INVERT_NEG_ZERO(TO_INTEGER(index));
// If index is negative, index from end of the array.
if (index < 0) index += length;
// If index is still negative, do not search the array.
@@ -1465,23 +1196,22 @@ function ArrayLastIndexOf(element, index) {
function InnerArrayReduce(callback, current, array, length, argumentsLength) {
if (!IS_CALLABLE(callback)) {
- throw MakeTypeError(kCalledNonCallable, callback);
+ throw %make_type_error(kCalledNonCallable, callback);
}
- var is_array = IS_ARRAY(array);
var i = 0;
find_initial: if (argumentsLength < 2) {
for (; i < length; i++) {
- if (HAS_INDEX(array, i, is_array)) {
+ if (i in array) {
current = array[i++];
break find_initial;
}
}
- throw MakeTypeError(kReduceNoInitial);
+ throw %make_type_error(kReduceNoInitial);
}
for (; i < length; i++) {
- if (HAS_INDEX(array, i, is_array)) {
+ if (i in array) {
var element = array[i];
current = callback(current, element, i, array);
}
@@ -1505,23 +1235,22 @@ function ArrayReduce(callback, current) {
function InnerArrayReduceRight(callback, current, array, length,
argumentsLength) {
if (!IS_CALLABLE(callback)) {
- throw MakeTypeError(kCalledNonCallable, callback);
+ throw %make_type_error(kCalledNonCallable, callback);
}
- var is_array = IS_ARRAY(array);
var i = length - 1;
find_initial: if (argumentsLength < 2) {
for (; i >= 0; i--) {
- if (HAS_INDEX(array, i, is_array)) {
+ if (i in array) {
current = array[i--];
break find_initial;
}
}
- throw MakeTypeError(kReduceNoInitial);
+ throw %make_type_error(kReduceNoInitial);
}
for (; i >= 0; i--) {
- if (HAS_INDEX(array, i, is_array)) {
+ if (i in array) {
var element = array[i];
current = callback(current, element, i, array);
}
@@ -1603,7 +1332,7 @@ function ArrayCopyWithin(target, start, end) {
function InnerArrayFind(predicate, thisArg, array, length) {
if (!IS_CALLABLE(predicate)) {
- throw MakeTypeError(kCalledNonCallable, predicate);
+ throw %make_type_error(kCalledNonCallable, predicate);
}
for (var i = 0; i < length; i++) {
@@ -1630,7 +1359,7 @@ function ArrayFind(predicate, thisArg) {
function InnerArrayFindIndex(predicate, thisArg, array, length) {
if (!IS_CALLABLE(predicate)) {
- throw MakeTypeError(kCalledNonCallable, predicate);
+ throw %make_type_error(kCalledNonCallable, predicate);
}
for (var i = 0; i < length; i++) {
@@ -1675,7 +1404,7 @@ function InnerArrayFill(value, start, end, array, length) {
}
if ((end - i) > 0 && %object_is_frozen(array)) {
- throw MakeTypeError(kArrayFunctionsOnFrozen);
+ throw %make_type_error(kArrayFunctionsOnFrozen);
}
for (; i < end; i++)
@@ -1695,58 +1424,6 @@ function ArrayFill(value, start, end) {
}
-function InnerArrayIncludes(searchElement, fromIndex, array, length) {
- if (length === 0) {
- return false;
- }
-
- var n = TO_INTEGER(fromIndex);
-
- var k;
- if (n >= 0) {
- k = n;
- } else {
- k = length + n;
- if (k < 0) {
- k = 0;
- }
- }
-
- while (k < length) {
- var elementK = array[k];
- if (%SameValueZero(searchElement, elementK)) {
- return true;
- }
-
- ++k;
- }
-
- return false;
-}
-
-
-// ES2016 draft, section 22.1.3.11
-function ArrayIncludes(searchElement, fromIndex) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.includes");
-
- var array = TO_OBJECT(this);
- var length = TO_LENGTH(array.length);
-
- return InnerArrayIncludes(searchElement, fromIndex, array, length);
-}
-
-
-function AddArrayElement(constructor, array, i, value) {
- if (constructor === GlobalArray) {
- AddIndexedProperty(array, i, value);
- } else {
- ObjectDefineProperty(array, i, {
- value: value, writable: true, configurable: true, enumerable: true
- });
- }
-}
-
-
// ES6, draft 10-14-14, section 22.1.2.1
function ArrayFrom(arrayLike, mapfn, receiver) {
var items = TO_OBJECT(arrayLike);
@@ -1754,7 +1431,7 @@ function ArrayFrom(arrayLike, mapfn, receiver) {
if (mapping) {
if (!IS_CALLABLE(mapfn)) {
- throw MakeTypeError(kCalledNonCallable, mapfn);
+ throw %make_type_error(kCalledNonCallable, mapfn);
}
}
@@ -1775,7 +1452,7 @@ function ArrayFrom(arrayLike, mapfn, receiver) {
} else {
mappedValue = nextValue;
}
- AddArrayElement(this, result, k, mappedValue);
+ %CreateDataProperty(result, k, mappedValue);
k++;
}
result.length = k;
@@ -1791,7 +1468,7 @@ function ArrayFrom(arrayLike, mapfn, receiver) {
} else {
mappedValue = nextValue;
}
- AddArrayElement(this, result, k, mappedValue);
+ %CreateDataProperty(result, k, mappedValue);
}
result.length = k;
@@ -1807,12 +1484,18 @@ function ArrayOf(...args) {
// TODO: Implement IsConstructor (ES6 section 7.2.5)
var array = %IsConstructor(constructor) ? new constructor(length) : [];
for (var i = 0; i < length; i++) {
- AddArrayElement(constructor, array, i, args[i]);
+ %CreateDataProperty(array, i, args[i]);
}
array.length = length;
return array;
}
+
+function ArraySpecies() {
+ return this;
+}
+
+
// -------------------------------------------------------------------
// Set up non-enumerable constructor property on the Array.prototype
@@ -1828,6 +1511,7 @@ var unscopables = {
fill: true,
find: true,
findIndex: true,
+ includes: true,
keys: true,
};
@@ -1876,7 +1560,7 @@ utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
"some", getFunction("some", ArraySome, 1),
"every", getFunction("every", ArrayEvery, 1),
"map", getFunction("map", ArrayMap, 1),
- "indexOf", getFunction("indexOf", ArrayIndexOf, 1),
+ "indexOf", getFunction("indexOf", null, 1),
"lastIndexOf", getFunction("lastIndexOf", ArrayLastIndexOf, 1),
"reduce", getFunction("reduce", ArrayReduce, 1),
"reduceRight", getFunction("reduceRight", ArrayReduceRight, 1),
@@ -1884,16 +1568,18 @@ utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
"find", getFunction("find", ArrayFind, 1),
"findIndex", getFunction("findIndex", ArrayFindIndex, 1),
"fill", getFunction("fill", ArrayFill, 1),
- "includes", getFunction("includes", ArrayIncludes, 1),
+ "includes", getFunction("includes", null, 1)
]);
+utils.InstallGetter(GlobalArray, speciesSymbol, ArraySpecies);
+
%FinishArrayPrototypeSetup(GlobalArray.prototype);
// The internal Array prototype doesn't need to be fancy, since it's never
// exposed to user code.
// Adding only the functions that are actually used.
utils.SetUpLockedPrototype(InternalArray, GlobalArray(), [
- "indexOf", getFunction("indexOf", ArrayIndexOf),
+ "indexOf", getFunction("indexOf", null),
"join", getFunction("join", ArrayJoin),
"pop", getFunction("pop", ArrayPop),
"push", getFunction("push", ArrayPush),
@@ -1925,7 +1611,6 @@ utils.SetUpLockedPrototype(extrasUtils.InternalPackedArray, GlobalArray(), [
utils.Export(function(to) {
to.ArrayFrom = ArrayFrom;
- to.ArrayIndexOf = ArrayIndexOf;
to.ArrayJoin = ArrayJoin;
to.ArrayPush = ArrayPush;
to.ArrayToString = ArrayToString;
@@ -1936,8 +1621,6 @@ utils.Export(function(to) {
to.InnerArrayFind = InnerArrayFind;
to.InnerArrayFindIndex = InnerArrayFindIndex;
to.InnerArrayForEach = InnerArrayForEach;
- to.InnerArrayIncludes = InnerArrayIncludes;
- to.InnerArrayIndexOf = InnerArrayIndexOf;
to.InnerArrayJoin = InnerArrayJoin;
to.InnerArrayLastIndexOf = InnerArrayLastIndexOf;
to.InnerArrayReduce = InnerArrayReduce;
@@ -1946,10 +1629,6 @@ utils.Export(function(to) {
to.InnerArraySort = InnerArraySort;
to.InnerArrayToLocaleString = InnerArrayToLocaleString;
to.PackedArrayReverse = PackedArrayReverse;
- to.Stack = Stack;
- to.StackHas = StackHas;
- to.StackPush = StackPush;
- to.StackPop = StackPop;
});
%InstallToContext([
diff --git a/deps/v8/src/js/arraybuffer.js b/deps/v8/src/js/arraybuffer.js
index f0273c71ed..a1ff03daee 100644
--- a/deps/v8/src/js/arraybuffer.js
+++ b/deps/v8/src/js/arraybuffer.js
@@ -12,13 +12,12 @@
// Imports
var GlobalArrayBuffer = global.ArrayBuffer;
-var MakeTypeError;
var MaxSimple;
var MinSimple;
var SpeciesConstructor;
+var speciesSymbol = utils.ImportNow("species_symbol");
utils.Import(function(from) {
- MakeTypeError = from.MakeTypeError;
MaxSimple = from.MaxSimple;
MinSimple = from.MinSimple;
SpeciesConstructor = from.SpeciesConstructor;
@@ -26,18 +25,10 @@ utils.Import(function(from) {
// -------------------------------------------------------------------
-function ArrayBufferGetByteLen() {
- if (!IS_ARRAYBUFFER(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
- 'ArrayBuffer.prototype.byteLength', this);
- }
- return %_ArrayBufferGetByteLength(this);
-}
-
// ES6 Draft 15.13.5.5.3
function ArrayBufferSlice(start, end) {
if (!IS_ARRAYBUFFER(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'ArrayBuffer.prototype.slice', this);
}
@@ -67,23 +58,29 @@ function ArrayBufferSlice(start, end) {
var constructor = SpeciesConstructor(this, GlobalArrayBuffer, true);
var result = new constructor(newLen);
if (!IS_ARRAYBUFFER(result)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'ArrayBuffer.prototype.slice', result);
}
- // TODO(littledan): Check for a detached ArrayBuffer
+ // Checks for detached source/target ArrayBuffers are done inside of
+ // %ArrayBufferSliceImpl; the reordering of checks does not violate
+ // the spec because all exceptions thrown are TypeErrors.
if (result === this) {
- throw MakeTypeError(kArrayBufferSpeciesThis);
+ throw %make_type_error(kArrayBufferSpeciesThis);
}
if (%_ArrayBufferGetByteLength(result) < newLen) {
- throw MakeTypeError(kArrayBufferTooShort);
+ throw %make_type_error(kArrayBufferTooShort);
}
%ArrayBufferSliceImpl(this, result, first, newLen);
return result;
}
-utils.InstallGetter(GlobalArrayBuffer.prototype, "byteLength",
- ArrayBufferGetByteLen);
+
+function ArrayBufferSpecies() {
+ return this;
+}
+
+utils.InstallGetter(GlobalArrayBuffer, speciesSymbol, ArrayBufferSpecies);
utils.InstallFunctions(GlobalArrayBuffer.prototype, DONT_ENUM, [
"slice", ArrayBufferSlice
diff --git a/deps/v8/src/js/collection-iterator.js b/deps/v8/src/js/collection-iterator.js
index 621d7266fc..173f273f9b 100644
--- a/deps/v8/src/js/collection-iterator.js
+++ b/deps/v8/src/js/collection-iterator.js
@@ -14,15 +14,10 @@
var GlobalMap = global.Map;
var GlobalSet = global.Set;
var iteratorSymbol = utils.ImportNow("iterator_symbol");
-var MakeTypeError;
var MapIterator = utils.ImportNow("MapIterator");
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
var SetIterator = utils.ImportNow("SetIterator");
-utils.Import(function(from) {
- MakeTypeError = from.MakeTypeError;
-});
-
// -------------------------------------------------------------------
function SetIteratorConstructor(set, kind) {
@@ -32,7 +27,7 @@ function SetIteratorConstructor(set, kind) {
function SetIteratorNextJS() {
if (!IS_SET_ITERATOR(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'Set Iterator.prototype.next', this);
}
@@ -57,7 +52,7 @@ function SetIteratorNextJS() {
function SetEntries() {
if (!IS_SET(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'Set.prototype.entries', this);
}
return new SetIterator(this, ITERATOR_KIND_ENTRIES);
@@ -66,7 +61,7 @@ function SetEntries() {
function SetValues() {
if (!IS_SET(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'Set.prototype.values', this);
}
return new SetIterator(this, ITERATOR_KIND_VALUES);
@@ -100,7 +95,7 @@ function MapIteratorConstructor(map, kind) {
function MapIteratorNextJS() {
if (!IS_MAP_ITERATOR(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'Map Iterator.prototype.next', this);
}
@@ -126,7 +121,7 @@ function MapIteratorNextJS() {
function MapEntries() {
if (!IS_MAP(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'Map.prototype.entries', this);
}
return new MapIterator(this, ITERATOR_KIND_ENTRIES);
@@ -135,7 +130,7 @@ function MapEntries() {
function MapKeys() {
if (!IS_MAP(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'Map.prototype.keys', this);
}
return new MapIterator(this, ITERATOR_KIND_KEYS);
@@ -144,7 +139,7 @@ function MapKeys() {
function MapValues() {
if (!IS_MAP(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'Map.prototype.values', this);
}
return new MapIterator(this, ITERATOR_KIND_VALUES);
diff --git a/deps/v8/src/js/collection.js b/deps/v8/src/js/collection.js
index 0d7195d53e..83763af860 100644
--- a/deps/v8/src/js/collection.js
+++ b/deps/v8/src/js/collection.js
@@ -14,16 +14,15 @@ var GlobalMap = global.Map;
var GlobalObject = global.Object;
var GlobalSet = global.Set;
var hashCodeSymbol = utils.ImportNow("hash_code_symbol");
-var IntRandom;
-var MakeTypeError;
+var MathRandom;
var MapIterator;
var NumberIsNaN;
var SetIterator;
+var speciesSymbol = utils.ImportNow("species_symbol");
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
- IntRandom = from.IntRandom;
- MakeTypeError = from.MakeTypeError;
+ MathRandom = from.MathRandom;
MapIterator = from.MapIterator;
NumberIsNaN = from.NumberIsNaN;
SetIterator = from.SetIterator;
@@ -112,7 +111,7 @@ function GetExistingHash(key) {
function GetHash(key) {
var hash = GetExistingHash(key);
if (IS_UNDEFINED(hash)) {
- hash = IntRandom() | 0;
+ hash = (MathRandom() * 0x40000000) | 0;
if (hash === 0) hash = 1;
SET_PRIVATE(key, hashCodeSymbol, hash);
}
@@ -126,7 +125,7 @@ function GetHash(key) {
function SetConstructor(iterable) {
if (IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kConstructorNotFunction, "Set");
+ throw %make_type_error(kConstructorNotFunction, "Set");
}
%_SetInitialize(this);
@@ -134,7 +133,7 @@ function SetConstructor(iterable) {
if (!IS_NULL_OR_UNDEFINED(iterable)) {
var adder = this.add;
if (!IS_CALLABLE(adder)) {
- throw MakeTypeError(kPropertyNotFunction, adder, 'add', this);
+ throw %make_type_error(kPropertyNotFunction, adder, 'add', this);
}
for (var value of iterable) {
@@ -146,7 +145,7 @@ function SetConstructor(iterable) {
function SetAdd(key) {
if (!IS_SET(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver, 'Set.prototype.add', this);
+ throw %make_type_error(kIncompatibleMethodReceiver, 'Set.prototype.add', this);
}
// Normalize -0 to +0 as required by the spec.
// Even though we use SameValueZero as the comparison for the keys we don't
@@ -186,7 +185,7 @@ function SetAdd(key) {
function SetHas(key) {
if (!IS_SET(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver, 'Set.prototype.has', this);
+ throw %make_type_error(kIncompatibleMethodReceiver, 'Set.prototype.has', this);
}
var table = %_JSCollectionGetTable(this);
var numBuckets = ORDERED_HASH_TABLE_BUCKET_COUNT(table);
@@ -198,7 +197,7 @@ function SetHas(key) {
function SetDelete(key) {
if (!IS_SET(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'Set.prototype.delete', this);
}
var table = %_JSCollectionGetTable(this);
@@ -221,7 +220,7 @@ function SetDelete(key) {
function SetGetSize() {
if (!IS_SET(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'Set.prototype.size', this);
}
var table = %_JSCollectionGetTable(this);
@@ -231,7 +230,7 @@ function SetGetSize() {
function SetClearJS() {
if (!IS_SET(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'Set.prototype.clear', this);
}
%_SetClear(this);
@@ -240,11 +239,11 @@ function SetClearJS() {
function SetForEach(f, receiver) {
if (!IS_SET(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'Set.prototype.forEach', this);
}
- if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
+ if (!IS_CALLABLE(f)) throw %make_type_error(kCalledNonCallable, f);
var iterator = new SetIterator(this, ITERATOR_KIND_VALUES);
var key;
@@ -255,6 +254,12 @@ function SetForEach(f, receiver) {
}
}
+
+function SetSpecies() {
+ return this;
+}
+
+
// -------------------------------------------------------------------
%SetCode(GlobalSet, SetConstructor);
@@ -266,6 +271,8 @@ function SetForEach(f, receiver) {
%FunctionSetLength(SetForEach, 1);
+utils.InstallGetter(GlobalSet, speciesSymbol, SetSpecies);
+
// Set up the non-enumerable functions on the Set prototype object.
utils.InstallGetter(GlobalSet.prototype, "size", SetGetSize);
utils.InstallFunctions(GlobalSet.prototype, DONT_ENUM, [
@@ -282,7 +289,7 @@ utils.InstallFunctions(GlobalSet.prototype, DONT_ENUM, [
function MapConstructor(iterable) {
if (IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kConstructorNotFunction, "Map");
+ throw %make_type_error(kConstructorNotFunction, "Map");
}
%_MapInitialize(this);
@@ -290,12 +297,12 @@ function MapConstructor(iterable) {
if (!IS_NULL_OR_UNDEFINED(iterable)) {
var adder = this.set;
if (!IS_CALLABLE(adder)) {
- throw MakeTypeError(kPropertyNotFunction, adder, 'set', this);
+ throw %make_type_error(kPropertyNotFunction, adder, 'set', this);
}
for (var nextItem of iterable) {
if (!IS_RECEIVER(nextItem)) {
- throw MakeTypeError(kIteratorValueNotAnObject, nextItem);
+ throw %make_type_error(kIteratorValueNotAnObject, nextItem);
}
%_Call(adder, this, nextItem[0], nextItem[1]);
}
@@ -305,7 +312,7 @@ function MapConstructor(iterable) {
function MapGet(key) {
if (!IS_MAP(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'Map.prototype.get', this);
}
var table = %_JSCollectionGetTable(this);
@@ -320,7 +327,7 @@ function MapGet(key) {
function MapSet(key, value) {
if (!IS_MAP(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'Map.prototype.set', this);
}
// Normalize -0 to +0 as required by the spec.
@@ -368,7 +375,7 @@ function MapSet(key, value) {
function MapHas(key) {
if (!IS_MAP(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'Map.prototype.has', this);
}
var table = %_JSCollectionGetTable(this);
@@ -380,7 +387,7 @@ function MapHas(key) {
function MapDelete(key) {
if (!IS_MAP(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'Map.prototype.delete', this);
}
var table = %_JSCollectionGetTable(this);
@@ -403,7 +410,7 @@ function MapDelete(key) {
function MapGetSize() {
if (!IS_MAP(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'Map.prototype.size', this);
}
var table = %_JSCollectionGetTable(this);
@@ -413,7 +420,7 @@ function MapGetSize() {
function MapClearJS() {
if (!IS_MAP(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'Map.prototype.clear', this);
}
%_MapClear(this);
@@ -422,11 +429,11 @@ function MapClearJS() {
function MapForEach(f, receiver) {
if (!IS_MAP(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'Map.prototype.forEach', this);
}
- if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
+ if (!IS_CALLABLE(f)) throw %make_type_error(kCalledNonCallable, f);
var iterator = new MapIterator(this, ITERATOR_KIND_ENTRIES);
var value_array = [UNDEFINED, UNDEFINED];
@@ -435,6 +442,11 @@ function MapForEach(f, receiver) {
}
}
+
+function MapSpecies() {
+ return this;
+}
+
// -------------------------------------------------------------------
%SetCode(GlobalMap, MapConstructor);
@@ -446,6 +458,8 @@ function MapForEach(f, receiver) {
%FunctionSetLength(MapForEach, 1);
+utils.InstallGetter(GlobalMap, speciesSymbol, MapSpecies);
+
// Set up the non-enumerable functions on the Map prototype object.
utils.InstallGetter(GlobalMap.prototype, "size", MapGetSize);
utils.InstallFunctions(GlobalMap.prototype, DONT_ENUM, [
diff --git a/deps/v8/src/js/generator.js b/deps/v8/src/js/generator.js
deleted file mode 100644
index 3dcdcc0ffa..0000000000
--- a/deps/v8/src/js/generator.js
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GeneratorFunctionPrototype = utils.ImportNow("GeneratorFunctionPrototype");
-var GeneratorFunction = utils.ImportNow("GeneratorFunction");
-var GlobalFunction = global.Function;
-var MakeTypeError;
-var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
-
-utils.Import(function(from) {
- MakeTypeError = from.MakeTypeError;
-});
-
-// ----------------------------------------------------------------------------
-
-// Generator functions and objects are specified by ES6, sections 15.19.3 and
-// 15.19.4.
-
-function GeneratorObjectNext(value) {
- if (!IS_GENERATOR(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
- '[Generator].prototype.next', this);
- }
-
- var continuation = %GeneratorGetContinuation(this);
- if (continuation > 0) {
- // Generator is suspended.
- DEBUG_PREPARE_STEP_IN_IF_STEPPING(this);
- return %_GeneratorNext(this, value);
- } else if (continuation == 0) {
- // Generator is already closed.
- return %_CreateIterResultObject(UNDEFINED, true);
- } else {
- // Generator is running.
- throw MakeTypeError(kGeneratorRunning);
- }
-}
-
-
-function GeneratorObjectReturn(value) {
- if (!IS_GENERATOR(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
- '[Generator].prototype.return', this);
- }
-
- var continuation = %GeneratorGetContinuation(this);
- if (continuation > 0) {
- // Generator is suspended.
- DEBUG_PREPARE_STEP_IN_IF_STEPPING(this);
- return %_GeneratorReturn(this, value);
- } else if (continuation == 0) {
- // Generator is already closed.
- return %_CreateIterResultObject(value, true);
- } else {
- // Generator is running.
- throw MakeTypeError(kGeneratorRunning);
- }
-}
-
-
-function GeneratorObjectThrow(exn) {
- if (!IS_GENERATOR(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
- '[Generator].prototype.throw', this);
- }
-
- var continuation = %GeneratorGetContinuation(this);
- if (continuation > 0) {
- // Generator is suspended.
- DEBUG_PREPARE_STEP_IN_IF_STEPPING(this);
- return %_GeneratorThrow(this, exn);
- } else if (continuation == 0) {
- // Generator is already closed.
- throw exn;
- } else {
- // Generator is running.
- throw MakeTypeError(kGeneratorRunning);
- }
-}
-
-// ----------------------------------------------------------------------------
-
-// None of the three resume operations (Runtime_GeneratorNext,
-// Runtime_GeneratorReturn, Runtime_GeneratorThrow) is supported by
-// Crankshaft or TurboFan. Disable optimization of wrappers here.
-%NeverOptimizeFunction(GeneratorObjectNext);
-%NeverOptimizeFunction(GeneratorObjectReturn);
-%NeverOptimizeFunction(GeneratorObjectThrow);
-
-// Set up non-enumerable functions on the generator prototype object.
-var GeneratorObjectPrototype = GeneratorFunctionPrototype.prototype;
-utils.InstallFunctions(GeneratorObjectPrototype,
- DONT_ENUM,
- ["next", GeneratorObjectNext,
- "return", GeneratorObjectReturn,
- "throw", GeneratorObjectThrow]);
-
-%AddNamedProperty(GeneratorObjectPrototype, "constructor",
- GeneratorFunctionPrototype, DONT_ENUM | READ_ONLY);
-%AddNamedProperty(GeneratorObjectPrototype,
- toStringTagSymbol, "Generator", DONT_ENUM | READ_ONLY);
-%InternalSetPrototype(GeneratorFunctionPrototype, GlobalFunction.prototype);
-%AddNamedProperty(GeneratorFunctionPrototype,
- toStringTagSymbol, "GeneratorFunction", DONT_ENUM | READ_ONLY);
-%AddNamedProperty(GeneratorFunctionPrototype, "constructor",
- GeneratorFunction, DONT_ENUM | READ_ONLY);
-%InternalSetPrototype(GeneratorFunction, GlobalFunction);
-
-})
diff --git a/deps/v8/src/js/harmony-async-await.js b/deps/v8/src/js/harmony-async-await.js
new file mode 100644
index 0000000000..3a48d0c100
--- /dev/null
+++ b/deps/v8/src/js/harmony-async-await.js
@@ -0,0 +1,51 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils, extrasUtils) {
+
+"use strict";
+
+%CheckIsBootstrapping();
+
+// -------------------------------------------------------------------
+// Imports
+
+var AsyncFunctionNext;
+var AsyncFunctionThrow;
+var GlobalPromise;
+var NewPromiseCapability;
+var PerformPromiseThen;
+var PromiseCastResolved;
+
+utils.Import(function(from) {
+ AsyncFunctionNext = from.AsyncFunctionNext;
+ AsyncFunctionThrow = from.AsyncFunctionThrow;
+ GlobalPromise = from.GlobalPromise;
+ NewPromiseCapability = from.NewPromiseCapability;
+ PromiseCastResolved = from.PromiseCastResolved;
+ PerformPromiseThen = from.PerformPromiseThen;
+});
+
+// -------------------------------------------------------------------
+
+function AsyncFunctionAwait(generator, value) {
+ // Promise.resolve(value).then(
+ // value => AsyncFunctionNext(value),
+ // error => AsyncFunctionThrow(error)
+ // );
+ var promise = PromiseCastResolved(value);
+
+ var onFulfilled =
+ (sentValue) => %_Call(AsyncFunctionNext, generator, sentValue);
+ var onRejected =
+ (sentError) => %_Call(AsyncFunctionThrow, generator, sentError);
+
+ var throwawayCapability = NewPromiseCapability(GlobalPromise);
+ return PerformPromiseThen(promise, onFulfilled, onRejected,
+ throwawayCapability);
+}
+
+%InstallToContext([ "async_function_await", AsyncFunctionAwait ]);
+
+})
diff --git a/deps/v8/src/js/harmony-atomics.js b/deps/v8/src/js/harmony-atomics.js
index 9f80227426..bfbf0c505e 100644
--- a/deps/v8/src/js/harmony-atomics.js
+++ b/deps/v8/src/js/harmony-atomics.js
@@ -12,14 +12,10 @@
// Imports
var GlobalObject = global.Object;
-var MakeRangeError;
-var MakeTypeError;
var MaxSimple;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
- MakeTypeError = from.MakeTypeError;
- MakeRangeError = from.MakeRangeError;
MaxSimple = from.MaxSimple;
});
@@ -28,14 +24,14 @@ utils.Import(function(from) {
function CheckSharedIntegerTypedArray(ia) {
if (!%IsSharedIntegerTypedArray(ia)) {
- throw MakeTypeError(kNotIntegerSharedTypedArray, ia);
+ throw %make_type_error(kNotIntegerSharedTypedArray, ia);
}
}
function CheckSharedInteger32TypedArray(ia) {
CheckSharedIntegerTypedArray(ia);
if (!%IsSharedInteger32TypedArray(ia)) {
- throw MakeTypeError(kNotInt32SharedTypedArray, ia);
+ throw %make_type_error(kNotInt32SharedTypedArray, ia);
}
}
@@ -44,10 +40,10 @@ function ValidateIndex(index, length) {
var numberIndex = TO_NUMBER(index);
var accessIndex = TO_INTEGER(numberIndex);
if (numberIndex !== accessIndex) {
- throw MakeRangeError(kInvalidAtomicAccessIndex);
+ throw %make_range_error(kInvalidAtomicAccessIndex);
}
if (accessIndex < 0 || accessIndex >= length) {
- throw MakeRangeError(kInvalidAtomicAccessIndex);
+ throw %make_range_error(kInvalidAtomicAccessIndex);
}
return accessIndex;
}
@@ -62,19 +58,6 @@ function AtomicsCompareExchangeJS(sta, index, oldValue, newValue) {
return %_AtomicsCompareExchange(sta, index, oldValue, newValue);
}
-function AtomicsLoadJS(sta, index) {
- CheckSharedIntegerTypedArray(sta);
- index = ValidateIndex(index, %_TypedArrayGetLength(sta));
- return %_AtomicsLoad(sta, index);
-}
-
-function AtomicsStoreJS(sta, index, value) {
- CheckSharedIntegerTypedArray(sta);
- index = ValidateIndex(index, %_TypedArrayGetLength(sta));
- value = TO_NUMBER(value);
- return %_AtomicsStore(sta, index, value);
-}
-
function AtomicsAddJS(ia, index, value) {
CheckSharedIntegerTypedArray(ia);
index = ValidateIndex(index, %_TypedArrayGetLength(ia));
@@ -121,9 +104,7 @@ function AtomicsIsLockFreeJS(size) {
return %_AtomicsIsLockFree(size);
}
-// Futexes
-
-function AtomicsFutexWaitJS(ia, index, value, timeout) {
+function AtomicsWaitJS(ia, index, value, timeout) {
CheckSharedInteger32TypedArray(ia);
index = ValidateIndex(index, %_TypedArrayGetLength(ia));
if (IS_UNDEFINED(timeout)) {
@@ -136,52 +117,28 @@ function AtomicsFutexWaitJS(ia, index, value, timeout) {
timeout = MaxSimple(0, timeout);
}
}
- return %AtomicsFutexWait(ia, index, value, timeout);
+ return %AtomicsWait(ia, index, value, timeout);
}
-function AtomicsFutexWakeJS(ia, index, count) {
+function AtomicsWakeJS(ia, index, count) {
CheckSharedInteger32TypedArray(ia);
index = ValidateIndex(index, %_TypedArrayGetLength(ia));
count = MaxSimple(0, TO_INTEGER(count));
- return %AtomicsFutexWake(ia, index, count);
-}
-
-function AtomicsFutexWakeOrRequeueJS(ia, index1, count, value, index2) {
- CheckSharedInteger32TypedArray(ia);
- index1 = ValidateIndex(index1, %_TypedArrayGetLength(ia));
- count = MaxSimple(0, TO_INTEGER(count));
- value = TO_INT32(value);
- index2 = ValidateIndex(index2, %_TypedArrayGetLength(ia));
- if (index1 < 0 || index1 >= %_TypedArrayGetLength(ia) ||
- index2 < 0 || index2 >= %_TypedArrayGetLength(ia)) {
- return UNDEFINED;
- }
- return %AtomicsFutexWakeOrRequeue(ia, index1, count, value, index2);
+ return %AtomicsWake(ia, index, count);
}
// -------------------------------------------------------------------
-function AtomicsConstructor() {}
-
-var Atomics = new AtomicsConstructor();
+var Atomics = global.Atomics;
-%InternalSetPrototype(Atomics, GlobalObject.prototype);
-%AddNamedProperty(global, "Atomics", Atomics, DONT_ENUM);
-%FunctionSetInstanceClassName(AtomicsConstructor, 'Atomics');
+// The Atomics global is defined by the bootstrapper.
%AddNamedProperty(Atomics, toStringTagSymbol, "Atomics", READ_ONLY | DONT_ENUM);
-// These must match the values in src/futex-emulation.h
-utils.InstallConstants(Atomics, [
- "OK", 0,
- "NOTEQUAL", -1,
- "TIMEDOUT", -2,
-]);
-
utils.InstallFunctions(Atomics, DONT_ENUM, [
+ // TODO(binji): remove the rest of the (non futex) Atomics functions as they
+ // become builtins.
"compareExchange", AtomicsCompareExchangeJS,
- "load", AtomicsLoadJS,
- "store", AtomicsStoreJS,
"add", AtomicsAddJS,
"sub", AtomicsSubJS,
"and", AtomicsAndJS,
@@ -189,9 +146,8 @@ utils.InstallFunctions(Atomics, DONT_ENUM, [
"xor", AtomicsXorJS,
"exchange", AtomicsExchangeJS,
"isLockFree", AtomicsIsLockFreeJS,
- "futexWait", AtomicsFutexWaitJS,
- "futexWake", AtomicsFutexWakeJS,
- "futexWakeOrRequeue", AtomicsFutexWakeOrRequeueJS,
+ "wait", AtomicsWaitJS,
+ "wake", AtomicsWakeJS,
]);
})
diff --git a/deps/v8/src/js/harmony-object-observe.js b/deps/v8/src/js/harmony-object-observe.js
deleted file mode 100644
index 95dd298f0d..0000000000
--- a/deps/v8/src/js/harmony-object-observe.js
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-var ObserveArrayMethods = utils.ImportNow("ObserveArrayMethods");
-var ObserveObjectMethods = utils.ImportNow("ObserveObjectMethods");;
-
-utils.InstallFunctions(global.Object, DONT_ENUM, ObserveObjectMethods);
-utils.InstallFunctions(global.Array, DONT_ENUM, ObserveArrayMethods);
-
-})
diff --git a/deps/v8/src/js/harmony-regexp-exec.js b/deps/v8/src/js/harmony-regexp-exec.js
deleted file mode 100644
index e2eece98aa..0000000000
--- a/deps/v8/src/js/harmony-regexp-exec.js
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalRegExp = global.RegExp;
-var RegExpSubclassExecJS = utils.ImportNow("RegExpSubclassExecJS");
-var RegExpSubclassMatch = utils.ImportNow("RegExpSubclassMatch");
-var RegExpSubclassReplace = utils.ImportNow("RegExpSubclassReplace");
-var RegExpSubclassSearch = utils.ImportNow("RegExpSubclassSearch");
-var RegExpSubclassSplit = utils.ImportNow("RegExpSubclassSplit");
-var RegExpSubclassTest = utils.ImportNow("RegExpSubclassTest");
-var matchSymbol = utils.ImportNow("match_symbol");
-var replaceSymbol = utils.ImportNow("replace_symbol");
-var searchSymbol = utils.ImportNow("search_symbol");
-var splitSymbol = utils.ImportNow("split_symbol");
-
-utils.OverrideFunction(GlobalRegExp.prototype, "exec",
- RegExpSubclassExecJS, true);
-utils.OverrideFunction(GlobalRegExp.prototype, matchSymbol,
- RegExpSubclassMatch, true);
-utils.OverrideFunction(GlobalRegExp.prototype, replaceSymbol,
- RegExpSubclassReplace, true);
-utils.OverrideFunction(GlobalRegExp.prototype, searchSymbol,
- RegExpSubclassSearch, true);
-utils.OverrideFunction(GlobalRegExp.prototype, splitSymbol,
- RegExpSubclassSplit, true);
-utils.OverrideFunction(GlobalRegExp.prototype, "test",
- RegExpSubclassTest, true);
-
-})
diff --git a/deps/v8/src/js/harmony-sharedarraybuffer.js b/deps/v8/src/js/harmony-sharedarraybuffer.js
deleted file mode 100644
index 10ceb70d27..0000000000
--- a/deps/v8/src/js/harmony-sharedarraybuffer.js
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-var GlobalSharedArrayBuffer = global.SharedArrayBuffer;
-var MakeTypeError;
-
-utils.Import(function(from) {
- MakeTypeError = from.MakeTypeError;
-})
-
-// -------------------------------------------------------------------
-
-function SharedArrayBufferGetByteLen() {
- if (!IS_SHAREDARRAYBUFFER(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
- 'SharedArrayBuffer.prototype.byteLength', this);
- }
- return %_ArrayBufferGetByteLength(this);
-}
-
-utils.InstallGetter(GlobalSharedArrayBuffer.prototype, "byteLength",
- SharedArrayBufferGetByteLen);
-
-})
diff --git a/deps/v8/src/js/harmony-simd.js b/deps/v8/src/js/harmony-simd.js
index 4df2f437ec..0880b5bdf1 100644
--- a/deps/v8/src/js/harmony-simd.js
+++ b/deps/v8/src/js/harmony-simd.js
@@ -12,13 +12,8 @@
// Imports
var GlobalSIMD = global.SIMD;
-var MakeTypeError;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
-utils.Import(function(from) {
- MakeTypeError = from.MakeTypeError;
-});
-
// -------------------------------------------------------------------
macro SIMD_FLOAT_TYPES(FUNCTION)
@@ -62,9 +57,9 @@ function NAMECheckJS(a) {
}
function NAMEToString() {
- var value = %_ValueOf(this);
+ var value = %ValueOf(this);
if (typeof(value) !== 'TYPE') {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
"NAME.prototype.toString", this);
}
var str = "SIMD.NAME(";
@@ -76,9 +71,9 @@ function NAMEToString() {
}
function NAMEToLocaleString() {
- var value = %_ValueOf(this);
+ var value = %ValueOf(this);
if (typeof(value) !== 'TYPE') {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
"NAME.prototype.toLocaleString", this);
}
var str = "SIMD.NAME(";
@@ -90,9 +85,9 @@ function NAMEToLocaleString() {
}
function NAMEValueOf() {
- var value = %_ValueOf(this);
+ var value = %ValueOf(this);
if (typeof(value) !== 'TYPE') {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
"NAME.prototype.valueOf", this);
}
return value;
@@ -434,7 +429,7 @@ SIMD_X16_TYPES(DECLARE_X16_FUNCTIONS)
function Float32x4Constructor(c0, c1, c2, c3) {
if (!IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kNotConstructor, "Float32x4");
+ throw %make_type_error(kNotConstructor, "Float32x4");
}
return %CreateFloat32x4(TO_NUMBER(c0), TO_NUMBER(c1),
TO_NUMBER(c2), TO_NUMBER(c3));
@@ -443,7 +438,7 @@ function Float32x4Constructor(c0, c1, c2, c3) {
function Int32x4Constructor(c0, c1, c2, c3) {
if (!IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kNotConstructor, "Int32x4");
+ throw %make_type_error(kNotConstructor, "Int32x4");
}
return %CreateInt32x4(TO_NUMBER(c0), TO_NUMBER(c1),
TO_NUMBER(c2), TO_NUMBER(c3));
@@ -452,7 +447,7 @@ function Int32x4Constructor(c0, c1, c2, c3) {
function Uint32x4Constructor(c0, c1, c2, c3) {
if (!IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kNotConstructor, "Uint32x4");
+ throw %make_type_error(kNotConstructor, "Uint32x4");
}
return %CreateUint32x4(TO_NUMBER(c0), TO_NUMBER(c1),
TO_NUMBER(c2), TO_NUMBER(c3));
@@ -461,7 +456,7 @@ function Uint32x4Constructor(c0, c1, c2, c3) {
function Bool32x4Constructor(c0, c1, c2, c3) {
if (!IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kNotConstructor, "Bool32x4");
+ throw %make_type_error(kNotConstructor, "Bool32x4");
}
return %CreateBool32x4(c0, c1, c2, c3);
}
@@ -469,7 +464,7 @@ function Bool32x4Constructor(c0, c1, c2, c3) {
function Int16x8Constructor(c0, c1, c2, c3, c4, c5, c6, c7) {
if (!IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kNotConstructor, "Int16x8");
+ throw %make_type_error(kNotConstructor, "Int16x8");
}
return %CreateInt16x8(TO_NUMBER(c0), TO_NUMBER(c1),
TO_NUMBER(c2), TO_NUMBER(c3),
@@ -480,7 +475,7 @@ function Int16x8Constructor(c0, c1, c2, c3, c4, c5, c6, c7) {
function Uint16x8Constructor(c0, c1, c2, c3, c4, c5, c6, c7) {
if (!IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kNotConstructor, "Uint16x8");
+ throw %make_type_error(kNotConstructor, "Uint16x8");
}
return %CreateUint16x8(TO_NUMBER(c0), TO_NUMBER(c1),
TO_NUMBER(c2), TO_NUMBER(c3),
@@ -491,7 +486,7 @@ function Uint16x8Constructor(c0, c1, c2, c3, c4, c5, c6, c7) {
function Bool16x8Constructor(c0, c1, c2, c3, c4, c5, c6, c7) {
if (!IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kNotConstructor, "Bool16x8");
+ throw %make_type_error(kNotConstructor, "Bool16x8");
}
return %CreateBool16x8(c0, c1, c2, c3, c4, c5, c6, c7);
}
@@ -500,7 +495,7 @@ function Bool16x8Constructor(c0, c1, c2, c3, c4, c5, c6, c7) {
function Int8x16Constructor(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
c12, c13, c14, c15) {
if (!IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kNotConstructor, "Int8x16");
+ throw %make_type_error(kNotConstructor, "Int8x16");
}
return %CreateInt8x16(TO_NUMBER(c0), TO_NUMBER(c1),
TO_NUMBER(c2), TO_NUMBER(c3),
@@ -516,7 +511,7 @@ function Int8x16Constructor(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
function Uint8x16Constructor(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
c12, c13, c14, c15) {
if (!IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kNotConstructor, "Uint8x16");
+ throw %make_type_error(kNotConstructor, "Uint8x16");
}
return %CreateUint8x16(TO_NUMBER(c0), TO_NUMBER(c1),
TO_NUMBER(c2), TO_NUMBER(c3),
@@ -532,7 +527,7 @@ function Uint8x16Constructor(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
function Bool8x16Constructor(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
c12, c13, c14, c15) {
if (!IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kNotConstructor, "Bool8x16");
+ throw %make_type_error(kNotConstructor, "Bool8x16");
}
return %CreateBool8x16(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12,
c13, c14, c15);
@@ -925,17 +920,4 @@ utils.InstallFunctions(GlobalBool8x16, DONT_ENUM, [
'shuffle', Bool8x16ShuffleJS,
]);
-utils.Export(function(to) {
- to.Float32x4ToString = Float32x4ToString;
- to.Int32x4ToString = Int32x4ToString;
- to.Uint32x4ToString = Uint32x4ToString;
- to.Bool32x4ToString = Bool32x4ToString;
- to.Int16x8ToString = Int16x8ToString;
- to.Uint16x8ToString = Uint16x8ToString;
- to.Bool16x8ToString = Bool16x8ToString;
- to.Int8x16ToString = Int8x16ToString;
- to.Uint8x16ToString = Uint8x16ToString;
- to.Bool8x16ToString = Bool8x16ToString;
-});
-
})
diff --git a/deps/v8/src/js/harmony-species.js b/deps/v8/src/js/harmony-species.js
deleted file mode 100644
index 426ac466e7..0000000000
--- a/deps/v8/src/js/harmony-species.js
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils, extrasUtils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-var GlobalArray = global.Array;
-// It is important that this file is run after src/js/typedarray.js,
-// otherwise GlobalTypedArray would be Object, and we would break
-// old versions of Zepto.
-var GlobalTypedArray = global.Uint8Array.__proto__;
-var GlobalMap = global.Map;
-var GlobalSet = global.Set;
-var GlobalArrayBuffer = global.ArrayBuffer;
-var GlobalPromise = global.Promise;
-var GlobalRegExp = global.RegExp;
-var speciesSymbol = utils.ImportNow("species_symbol");
-
-function ArraySpecies() {
- return this;
-}
-
-function TypedArraySpecies() {
- return this;
-}
-
-function MapSpecies() {
- return this;
-}
-
-function SetSpecies() {
- return this;
-}
-
-function ArrayBufferSpecies() {
- return this;
-}
-
-function PromiseSpecies() {
- return this;
-}
-
-function RegExpSpecies() {
- return this;
-}
-
-utils.InstallGetter(GlobalArray, speciesSymbol, ArraySpecies, DONT_ENUM);
-utils.InstallGetter(GlobalTypedArray, speciesSymbol, TypedArraySpecies, DONT_ENUM);
-utils.InstallGetter(GlobalMap, speciesSymbol, MapSpecies, DONT_ENUM);
-utils.InstallGetter(GlobalSet, speciesSymbol, SetSpecies, DONT_ENUM);
-utils.InstallGetter(GlobalArrayBuffer, speciesSymbol, ArrayBufferSpecies,
- DONT_ENUM);
-utils.InstallGetter(GlobalPromise, speciesSymbol, PromiseSpecies, DONT_ENUM);
-utils.InstallGetter(GlobalRegExp, speciesSymbol, RegExpSpecies, DONT_ENUM);
-
-});
diff --git a/deps/v8/src/js/harmony-string-padding.js b/deps/v8/src/js/harmony-string-padding.js
index a6c6c474de..1af2359def 100644
--- a/deps/v8/src/js/harmony-string-padding.js
+++ b/deps/v8/src/js/harmony-string-padding.js
@@ -10,11 +10,6 @@
// Imports
var GlobalString = global.String;
-var MakeTypeError;
-
-utils.Import(function(from) {
- MakeTypeError = from.MakeTypeError;
-});
// -------------------------------------------------------------------
// http://tc39.github.io/proposal-string-pad-start-end/
@@ -30,7 +25,8 @@ function StringPad(thisString, maxLength, fillString) {
} else {
fillString = TO_STRING(fillString);
if (fillString === "") {
- fillString = " ";
+ // If filler is the empty String, return S.
+ return "";
}
}
diff --git a/deps/v8/src/js/harmony-unicode-regexps.js b/deps/v8/src/js/harmony-unicode-regexps.js
deleted file mode 100644
index 16d06ba7e3..0000000000
--- a/deps/v8/src/js/harmony-unicode-regexps.js
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-'use strict';
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalRegExp = global.RegExp;
-var GlobalRegExpPrototype = GlobalRegExp.prototype;
-var MakeTypeError;
-
-utils.Import(function(from) {
- MakeTypeError = from.MakeTypeError;
-});
-
-// -------------------------------------------------------------------
-
-// ES6 21.2.5.15.
-function RegExpGetUnicode() {
- if (!IS_REGEXP(this)) {
- // TODO(littledan): Remove this RegExp compat workaround
- if (this === GlobalRegExpPrototype) {
- %IncrementUseCounter(kRegExpPrototypeUnicodeGetter);
- return UNDEFINED;
- }
- throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.unicode");
- }
- return TO_BOOLEAN(REGEXP_UNICODE(this));
-}
-%SetForceInlineFlag(RegExpGetUnicode);
-
-utils.InstallGetter(GlobalRegExp.prototype, 'unicode', RegExpGetUnicode);
-
-})
diff --git a/deps/v8/src/js/i18n.js b/deps/v8/src/js/i18n.js
index 845289a91f..6046a6f2f9 100644
--- a/deps/v8/src/js/i18n.js
+++ b/deps/v8/src/js/i18n.js
@@ -17,26 +17,19 @@
// -------------------------------------------------------------------
// Imports
-var ArrayIndexOf;
var ArrayJoin;
var ArrayPush;
-var GlobalBoolean = global.Boolean;
+var FLAG_intl_extra;
var GlobalDate = global.Date;
var GlobalNumber = global.Number;
var GlobalRegExp = global.RegExp;
var GlobalString = global.String;
var InstallFunctions = utils.InstallFunctions;
var InstallGetter = utils.InstallGetter;
-var InternalPackedArray = utils.InternalPackedArray;
+var InternalArray = utils.InternalArray;
var InternalRegExpMatch;
var InternalRegExpReplace
-var IsFinite;
var IsNaN;
-var MakeError;
-var MakeRangeError;
-var MakeTypeError;
-var ObjectDefineProperties = utils.ImportNow("ObjectDefineProperties");
-var ObjectDefineProperty = utils.ImportNow("ObjectDefineProperty");
var ObjectHasOwnProperty = utils.ImportNow("ObjectHasOwnProperty");
var OverrideFunction = utils.OverrideFunction;
var patternSymbol = utils.ImportNow("intl_pattern_symbol");
@@ -44,28 +37,25 @@ var resolvedSymbol = utils.ImportNow("intl_resolved_symbol");
var SetFunctionName = utils.SetFunctionName;
var StringIndexOf;
var StringLastIndexOf;
-var StringSplit;
var StringSubstr;
var StringSubstring;
utils.Import(function(from) {
- ArrayIndexOf = from.ArrayIndexOf;
ArrayJoin = from.ArrayJoin;
ArrayPush = from.ArrayPush;
- IsFinite = from.IsFinite;
IsNaN = from.IsNaN;
- MakeError = from.MakeError;
- MakeRangeError = from.MakeRangeError;
- MakeTypeError = from.MakeTypeError;
InternalRegExpMatch = from.InternalRegExpMatch;
InternalRegExpReplace = from.InternalRegExpReplace;
StringIndexOf = from.StringIndexOf;
StringLastIndexOf = from.StringLastIndexOf;
- StringSplit = from.StringSplit;
StringSubstr = from.StringSubstr;
StringSubstring = from.StringSubstring;
});
+utils.ImportFromExperimental(function(from) {
+ FLAG_intl_extra = from.FLAG_intl_extra;
+});
+
// Utilities for definitions
function InstallFunction(object, name, func) {
@@ -84,43 +74,45 @@ function InstallConstructor(object, name, func) {
/**
* Adds bound method to the prototype of the given object.
*/
-function AddBoundMethod(obj, methodName, implementation, length) {
+function AddBoundMethod(obj, methodName, implementation, length, type) {
%CheckIsBootstrapping();
var internalName = %CreatePrivateSymbol(methodName);
- var getter = function() {
- if (!%IsInitializedIntlObject(this)) {
- throw MakeTypeError(kMethodCalledOnWrongObject, methodName);
+ // Making getter an anonymous function will cause
+ // %DefineGetterPropertyUnchecked to properly set the "name"
+ // property on each JSFunction instance created here, rather
+ // than (as utils.InstallGetter would) on the SharedFunctionInfo
+ // associated with all functions returned from AddBoundMethod.
+ var getter = ANONYMOUS_FUNCTION(function() {
+ if (!%IsInitializedIntlObjectOfType(this, type)) {
+ throw %make_type_error(kMethodCalledOnWrongObject, methodName);
}
if (IS_UNDEFINED(this[internalName])) {
var boundMethod;
if (IS_UNDEFINED(length) || length === 2) {
- boundMethod = (x, y) => implementation(this, x, y);
+ boundMethod = ANONYMOUS_FUNCTION((x, y) => implementation(this, x, y));
} else if (length === 1) {
- boundMethod = x => implementation(this, x);
+ boundMethod = ANONYMOUS_FUNCTION(x => implementation(this, x));
} else {
- boundMethod = (...args) => {
- // DateTimeFormat.format needs to be 0 arg method, but can stil
- // receive optional dateValue param. If one was provided, pass it
+ boundMethod = ANONYMOUS_FUNCTION((...args) => {
+ // DateTimeFormat.format needs to be 0 arg method, but can still
+ // receive an optional dateValue param. If one was provided, pass it
// along.
if (args.length > 0) {
return implementation(this, args[0]);
} else {
return implementation(this);
}
- }
+ });
}
- // TODO(littledan): Once function name reform is shipped, remove the
- // following line and wrap the boundMethod definition in an anonymous
- // function macro.
- %FunctionSetName(boundMethod, '__bound' + methodName + '__');
- %FunctionRemovePrototype(boundMethod);
%SetNativeFlag(boundMethod);
this[internalName] = boundMethod;
}
return this[internalName];
- };
+ });
- InstallGetter(obj.prototype, methodName, getter, DONT_ENUM);
+ %FunctionRemovePrototype(getter);
+ %DefineGetterPropertyUnchecked(obj.prototype, methodName, getter, DONT_ENUM);
+ %SetNativeFlag(getter);
}
// -------------------------------------------------------------------
@@ -144,6 +136,13 @@ var AVAILABLE_LOCALES = {
*/
var DEFAULT_ICU_LOCALE = UNDEFINED;
+function GetDefaultICULocaleJS() {
+ if (IS_UNDEFINED(DEFAULT_ICU_LOCALE)) {
+ DEFAULT_ICU_LOCALE = %GetDefaultICULocale();
+ }
+ return DEFAULT_ICU_LOCALE;
+}
+
/**
* Unicode extension regular expression.
*/
@@ -263,7 +262,7 @@ function GetTimezoneNameLocationPartRE() {
*/
function supportedLocalesOf(service, locales, options) {
if (IS_NULL(InternalRegExpMatch(GetServiceRE(), service))) {
- throw MakeError(kWrongServiceType, service);
+ throw %make_error(kWrongServiceType, service);
}
// Provide defaults if matcher was not specified.
@@ -275,9 +274,9 @@ function supportedLocalesOf(service, locales, options) {
var matcher = options.localeMatcher;
if (!IS_UNDEFINED(matcher)) {
- matcher = GlobalString(matcher);
+ matcher = TO_STRING(matcher);
if (matcher !== 'lookup' && matcher !== 'best fit') {
- throw MakeRangeError(kLocaleMatcher, matcher);
+ throw %make_range_error(kLocaleMatcher, matcher);
}
} else {
matcher = 'best fit';
@@ -307,7 +306,7 @@ function supportedLocalesOf(service, locales, options) {
* Locales appear in the same order in the returned list as in the input list.
*/
function lookupSupportedLocalesOf(requestedLocales, availableLocales) {
- var matchedLocales = [];
+ var matchedLocales = new InternalArray();
for (var i = 0; i < requestedLocales.length; ++i) {
// Remove -u- extension.
var locale = InternalRegExpReplace(
@@ -349,27 +348,27 @@ function bestFitSupportedLocalesOf(requestedLocales, availableLocales) {
* is out of range for that property it throws RangeError.
*/
function getGetOption(options, caller) {
- if (IS_UNDEFINED(options)) throw MakeError(kDefaultOptionsMissing, caller);
+ if (IS_UNDEFINED(options)) throw %make_error(kDefaultOptionsMissing, caller);
var getOption = function getOption(property, type, values, defaultValue) {
if (!IS_UNDEFINED(options[property])) {
var value = options[property];
switch (type) {
case 'boolean':
- value = GlobalBoolean(value);
+ value = TO_BOOLEAN(value);
break;
case 'string':
- value = GlobalString(value);
+ value = TO_STRING(value);
break;
case 'number':
- value = GlobalNumber(value);
+ value = TO_NUMBER(value);
break;
default:
- throw MakeError(kWrongValueType);
+ throw %make_error(kWrongValueType);
}
- if (!IS_UNDEFINED(values) && %_Call(ArrayIndexOf, values, value) === -1) {
- throw MakeRangeError(kValueOutOfRange, value, caller, property);
+ if (!IS_UNDEFINED(values) && %ArrayIndexOf(values, value, 0) === -1) {
+ throw %make_range_error(kValueOutOfRange, value, caller, property);
}
return value;
@@ -383,6 +382,9 @@ function getGetOption(options, caller) {
/**
+ * Ecma 402 9.2.5
+ * TODO(jshin): relevantExtensionKeys and localeData need to be taken into
+ * account per spec.
* Compares a BCP 47 language priority list requestedLocales against the locales
* in availableLocales and determines the best available language to meet the
* request. Two algorithms are available to match the locales: the Lookup
@@ -418,7 +420,7 @@ function resolveLocale(service, requestedLocales, options) {
*/
function lookupMatcher(service, requestedLocales) {
if (IS_NULL(InternalRegExpMatch(GetServiceRE(), service))) {
- throw MakeError(kWrongServiceType, service);
+ throw %make_error(kWrongServiceType, service);
}
// Cache these, they don't ever change per service.
@@ -448,11 +450,7 @@ function lookupMatcher(service, requestedLocales) {
}
// Didn't find a match, return default.
- if (IS_UNDEFINED(DEFAULT_ICU_LOCALE)) {
- DEFAULT_ICU_LOCALE = %GetDefaultICULocale();
- }
-
- return {'locale': DEFAULT_ICU_LOCALE, 'extension': '', 'position': -1};
+ return {'locale': GetDefaultICULocaleJS(), 'extension': '', 'position': -1};
}
@@ -470,9 +468,14 @@ function bestFitMatcher(service, requestedLocales) {
* Parses Unicode extension into key - value map.
* Returns empty object if the extension string is invalid.
* We are not concerned with the validity of the values at this point.
+ * 'attribute' in RFC 6047 is not supported. Keys without explicit
+ * values are assigned UNDEFINED.
+ * TODO(jshin): Fix the handling of 'attribute' (in RFC 6047, but none
+ * has been defined so that it's not used) and boolean keys without
+ * an explicit value.
*/
function parseExtension(extension) {
- var extensionSplit = %_Call(StringSplit, extension, '-');
+ var extensionSplit = %StringSplit(extension, '-', kMaxUint32);
// Assume ['', 'u', ...] input, but don't throw.
if (extensionSplit.length <= 2 ||
@@ -483,21 +486,33 @@ function parseExtension(extension) {
// Key is {2}alphanum, value is {3,8}alphanum.
// Some keys may not have explicit values (booleans).
var extensionMap = {};
- var previousKey = UNDEFINED;
+ var key = UNDEFINED;
+ var value = UNDEFINED;
for (var i = 2; i < extensionSplit.length; ++i) {
var length = extensionSplit[i].length;
var element = extensionSplit[i];
if (length === 2) {
- extensionMap[element] = UNDEFINED;
- previousKey = element;
- } else if (length >= 3 && length <=8 && !IS_UNDEFINED(previousKey)) {
- extensionMap[previousKey] = element;
- previousKey = UNDEFINED;
+ if (!IS_UNDEFINED(key)) {
+ if (!(key in extensionMap)) {
+ extensionMap[key] = value;
+ }
+ value = UNDEFINED;
+ }
+ key = element;
+ } else if (length >= 3 && length <= 8 && !IS_UNDEFINED(key)) {
+ if (IS_UNDEFINED(value)) {
+ value = element;
+ } else {
+ value = value + "-" + element;
+ }
} else {
// There is a value that's too long, or that doesn't have a key.
return {};
}
}
+ if (!IS_UNDEFINED(key) && !(key in extensionMap)) {
+ extensionMap[key] = value;
+ }
return extensionMap;
}
@@ -517,7 +532,7 @@ function setOptions(inOptions, extensionMap, keyValues, getOption, outOptions) {
var extension = '';
var updateExtension = function updateExtension(key, value) {
- return '-' + key + '-' + GlobalString(value);
+ return '-' + key + '-' + TO_STRING(value);
}
var updateProperty = function updateProperty(property, type, value) {
@@ -567,24 +582,33 @@ function setOptions(inOptions, extensionMap, keyValues, getOption, outOptions) {
/**
- * Converts all OwnProperties into
+ * Given an array-like, outputs an Array with the numbered
+ * properties copied over and defined
* configurable: false, writable: false, enumerable: true.
+ * When |expandable| is true, the result array can be expanded.
*/
-function freezeArray(array) {
- var l = array.length;
+function freezeArray(input) {
+ var array = [];
+ var l = input.length;
for (var i = 0; i < l; i++) {
- if (i in array) {
- ObjectDefineProperty(array, i, {value: array[i],
- configurable: false,
- writable: false,
- enumerable: true});
+ if (i in input) {
+ %object_define_property(array, i, {value: input[i],
+ configurable: false,
+ writable: false,
+ enumerable: true});
}
}
- ObjectDefineProperty(array, 'length', {value: l, writable: false});
+ %object_define_property(array, 'length', {value: l, writable: false});
return array;
}
+/* Make JS array[] out of InternalArray */
+function makeArray(input) {
+ var array = [];
+ %MoveArrayContents(input, array);
+ return array;
+}
/**
* It's sometimes desireable to leave user requested locale instead of ICU
@@ -643,8 +667,8 @@ function getAvailableLocalesOf(service) {
* Configurable is false by default.
*/
function defineWEProperty(object, property, value) {
- ObjectDefineProperty(object, property,
- {value: value, writable: true, enumerable: true});
+ %object_define_property(object, property,
+ {value: value, writable: true, enumerable: true});
}
@@ -663,10 +687,10 @@ function addWEPropertyIfDefined(object, property, value) {
* Defines a property and sets writable, enumerable and configurable to true.
*/
function defineWECProperty(object, property, value) {
- ObjectDefineProperty(object, property, {value: value,
- writable: true,
- enumerable: true,
- configurable: true});
+ %object_define_property(object, property, {value: value,
+ writable: true,
+ enumerable: true,
+ configurable: true});
}
@@ -697,14 +721,14 @@ function toTitleCaseWord(word) {
*/
function toTitleCaseTimezoneLocation(location) {
var match = InternalRegExpMatch(GetTimezoneNameLocationPartRE(), location)
- if (IS_NULL(match)) throw MakeRangeError(kExpectedLocation, location);
+ if (IS_NULL(match)) throw %make_range_error(kExpectedLocation, location);
var result = toTitleCaseWord(match[1]);
if (!IS_UNDEFINED(match[2]) && 2 < match.length) {
// The first character is a separator, '_' or '-'.
// None of IANA zone names has both '_' and '-'.
var separator = %_Call(StringSubstring, match[2], 0, 1);
- var parts = %_Call(StringSplit, match[2], separator);
+ var parts = %StringSplit(match[2], separator, kMaxUint32);
for (var i = 1; i < parts.length; i++) {
var part = parts[i]
var lowercasedPart = %StringToLowerCase(part);
@@ -719,27 +743,34 @@ function toTitleCaseTimezoneLocation(location) {
/**
* Canonicalizes the language tag, or throws in case the tag is invalid.
+ * ECMA 402 9.2.1 steps 7.c ii ~ v.
*/
function canonicalizeLanguageTag(localeID) {
// null is typeof 'object' so we have to do extra check.
- if (typeof localeID !== 'string' && typeof localeID !== 'object' ||
+ if ((!IS_STRING(localeID) && !IS_RECEIVER(localeID)) ||
IS_NULL(localeID)) {
- throw MakeTypeError(kLanguageID);
+ throw %make_type_error(kLanguageID);
+ }
+
+ // Optimize for the most common case; a language code alone in
+ // the canonical form/lowercase (e.g. "en", "fil").
+ if (IS_STRING(localeID) &&
+ !IS_NULL(InternalRegExpMatch(/^[a-z]{2,3}$/, localeID))) {
+ return localeID;
}
- var localeString = GlobalString(localeID);
+ var localeString = TO_STRING(localeID);
- if (isValidLanguageTag(localeString) === false) {
- throw MakeRangeError(kInvalidLanguageTag, localeString);
+ if (isStructuallyValidLanguageTag(localeString) === false) {
+ throw %make_range_error(kInvalidLanguageTag, localeString);
}
- // This call will strip -kn but not -kn-true extensions.
- // ICU bug filled - http://bugs.icu-project.org/trac/ticket/9265.
- // TODO(cira): check if -u-kn-true-kc-true-kh-true still throws after
- // upgrade to ICU 4.9.
+ // ECMA 402 6.2.3
var tag = %CanonicalizeLanguageTag(localeString);
+ // TODO(jshin): This should not happen because the structual validity
+ // is already checked. If that's the case, remove this.
if (tag === 'invalid-tag') {
- throw MakeRangeError(kInvalidLanguageTag, localeString);
+ throw %make_range_error(kInvalidLanguageTag, localeString);
}
return tag;
@@ -747,23 +778,22 @@ function canonicalizeLanguageTag(localeID) {
/**
- * Returns an array where all locales are canonicalized and duplicates removed.
+ * Returns an InternalArray where all locales are canonicalized and duplicates
+ * removed.
* Throws on locales that are not well formed BCP47 tags.
+ * ECMA 402 8.2.1 steps 1 (ECMA 402 9.2.1) and 2.
*/
-function initializeLocaleList(locales) {
- var seen = [];
- if (IS_UNDEFINED(locales)) {
- // Constructor is called without arguments.
- seen = [];
- } else {
+function canonicalizeLocaleList(locales) {
+ var seen = new InternalArray();
+ if (!IS_UNDEFINED(locales)) {
// We allow single string localeID.
if (typeof locales === 'string') {
%_Call(ArrayPush, seen, canonicalizeLanguageTag(locales));
- return freezeArray(seen);
+ return seen;
}
var o = TO_OBJECT(locales);
- var len = TO_UINT32(o.length);
+ var len = TO_LENGTH(o.length);
for (var k = 0; k < len; k++) {
if (k in o) {
@@ -771,27 +801,37 @@ function initializeLocaleList(locales) {
var tag = canonicalizeLanguageTag(value);
- if (%_Call(ArrayIndexOf, seen, tag) === -1) {
+ if (%ArrayIndexOf(seen, tag, 0) === -1) {
%_Call(ArrayPush, seen, tag);
}
}
}
}
- return freezeArray(seen);
+ return seen;
}
+function initializeLocaleList(locales) {
+ return freezeArray(canonicalizeLocaleList(locales));
+}
/**
- * Validates the language tag. Section 2.2.9 of the bcp47 spec
- * defines a valid tag.
+ * Check the structual Validity of the language tag per ECMA 402 6.2.2:
+ * - Well-formed per RFC 5646 2.1
+ * - There are no duplicate variant subtags
+ * - There are no duplicate singletion (extension) subtags
+ *
+ * One extra-check is done (from RFC 5646 2.2.9): the tag is compared
+ * against the list of grandfathered tags. However, subtags for
+ * primary/extended language, script, region, variant are not checked
+ * against the IANA language subtag registry.
*
* ICU is too permissible and lets invalid tags, like
* hant-cmn-cn, through.
*
* Returns false if the language tag is invalid.
*/
-function isValidLanguageTag(locale) {
+function isStructuallyValidLanguageTag(locale) {
// Check if it's well-formed, including grandfadered tags.
if (IS_NULL(InternalRegExpMatch(GetLanguageTagRE(), locale))) {
return false;
@@ -805,19 +845,19 @@ function isValidLanguageTag(locale) {
// Check if there are any duplicate variants or singletons (extensions).
// Remove private use section.
- locale = %_Call(StringSplit, locale, '-x-')[0];
+ locale = %StringSplit(locale, '-x-', kMaxUint32)[0];
// Skip language since it can match variant regex, so we start from 1.
// We are matching i-klingon here, but that's ok, since i-klingon-klingon
// is not valid and would fail LANGUAGE_TAG_RE test.
- var variants = [];
- var extensions = [];
- var parts = %_Call(StringSplit, locale, '-');
+ var variants = new InternalArray();
+ var extensions = new InternalArray();
+ var parts = %StringSplit(locale, '-', kMaxUint32);
for (var i = 1; i < parts.length; i++) {
var value = parts[i];
if (!IS_NULL(InternalRegExpMatch(GetLanguageVariantRE(), value)) &&
extensions.length === 0) {
- if (%_Call(ArrayIndexOf, variants, value) === -1) {
+ if (%ArrayIndexOf(variants, value, 0) === -1) {
%_Call(ArrayPush, variants, value);
} else {
return false;
@@ -825,7 +865,7 @@ function isValidLanguageTag(locale) {
}
if (!IS_NULL(InternalRegExpMatch(GetLanguageSingletonRE(), value))) {
- if (%_Call(ArrayIndexOf, extensions, value) === -1) {
+ if (%ArrayIndexOf(extensions, value, 0) === -1) {
%_Call(ArrayPush, extensions, value);
} else {
return false;
@@ -886,13 +926,23 @@ var resolvedAccessor = {
}
};
+// ECMA 402 section 8.2.1
+InstallFunction(Intl, 'getCanonicalLocales', function(locales) {
+ if (!IS_UNDEFINED(new.target)) {
+ throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
+ }
+
+ return makeArray(canonicalizeLocaleList(locales));
+ }
+);
+
/**
* Initializes the given object so it's a valid Collator instance.
* Useful for subclassing.
*/
function initializeCollator(collator, locales, options) {
if (%IsInitializedIntlObject(collator)) {
- throw MakeTypeError(kReinitializeIntl, "Collator");
+ throw %make_type_error(kReinitializeIntl, "Collator");
}
if (IS_UNDEFINED(options)) {
@@ -918,6 +968,9 @@ function initializeCollator(collator, locales, options) {
var locale = resolveLocale('collator', locales, options);
+ // TODO(jshin): ICU now can take kb, kc, etc. Switch over to using ICU
+ // directly. See Collator::InitializeCollator and
+ // Collator::CreateICUCollator in src/i18n.cc
// ICU can't take kb, kc... parameters through localeID, so we need to pass
// them as options.
// One exception is -co- which has to be part of the extension, but only for
@@ -950,7 +1003,7 @@ function initializeCollator(collator, locales, options) {
'pinyin', 'reformed', 'searchjl', 'stroke', 'trad', 'unihan', 'zhuyin'
];
- if (%_Call(ArrayIndexOf, ALLOWED_CO_VALUES, extensionMap.co) !== -1) {
+ if (%ArrayIndexOf(ALLOWED_CO_VALUES, extensionMap.co, 0) !== -1) {
extension = '-u-co-' + extensionMap.co;
// ICU can't tell us what the collation is, so save user's input.
collation = extensionMap.co;
@@ -965,8 +1018,8 @@ function initializeCollator(collator, locales, options) {
// We define all properties C++ code may produce, to prevent security
// problems. If malicious user decides to redefine Object.prototype.locale
// we can't just use plain x.locale = 'us' or in C++ Set("locale", "us").
- // ObjectDefineProperties will either succeed defining or throw an error.
- var resolved = ObjectDefineProperties({}, {
+ // %object_define_properties will either succeed defining or throw an error.
+ var resolved = %object_define_properties({}, {
caseFirst: {writable: true},
collation: {value: internalOptions.collation, writable: true},
ignorePunctuation: {writable: true},
@@ -985,7 +1038,9 @@ function initializeCollator(collator, locales, options) {
// Writable, configurable and enumerable are set to false by default.
%MarkAsInitializedIntlObjectOfType(collator, 'collator', internalCollator);
collator[resolvedSymbol] = resolved;
- ObjectDefineProperty(collator, 'resolved', resolvedAccessor);
+ if (FLAG_intl_extra) {
+ %object_define_property(collator, 'resolved', resolvedAccessor);
+ }
return collator;
}
@@ -1016,11 +1071,11 @@ InstallConstructor(Intl, 'Collator', function() {
*/
InstallFunction(Intl.Collator.prototype, 'resolvedOptions', function() {
if (!IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
+ throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
}
if (!%IsInitializedIntlObjectOfType(this, 'collator')) {
- throw MakeTypeError(kResolvedOptionsCalledOnNonObject, "Collator");
+ throw %make_type_error(kResolvedOptionsCalledOnNonObject, "Collator");
}
var coll = this;
@@ -1048,7 +1103,7 @@ InstallFunction(Intl.Collator.prototype, 'resolvedOptions', function() {
*/
InstallFunction(Intl.Collator, 'supportedLocalesOf', function(locales) {
if (!IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
+ throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
}
return supportedLocalesOf('collator', locales, arguments[1]);
@@ -1068,11 +1123,11 @@ InstallFunction(Intl.Collator, 'supportedLocalesOf', function(locales) {
*/
function compare(collator, x, y) {
return %InternalCompare(%GetImplFromInitializedIntlObject(collator),
- GlobalString(x), GlobalString(y));
+ TO_STRING(x), TO_STRING(y));
};
-AddBoundMethod(Intl.Collator, 'compare', compare, 2);
+AddBoundMethod(Intl.Collator, 'compare', compare, 2, 'collator');
/**
* Verifies that the input is a well-formed ISO 4217 currency code.
@@ -1092,9 +1147,9 @@ function isWellFormedCurrencyCode(currency) {
function getNumberOption(options, property, min, max, fallback) {
var value = options[property];
if (!IS_UNDEFINED(value)) {
- value = GlobalNumber(value);
- if (IsNaN(value) || value < min || value > max) {
- throw MakeRangeError(kPropertyValueOutOfRange, property);
+ value = TO_NUMBER(value);
+ if (NUMBER_IS_NAN(value) || value < min || value > max) {
+ throw %make_range_error(kPropertyValueOutOfRange, property);
}
return %math_floor(value);
}
@@ -1118,7 +1173,7 @@ var patternAccessor = {
*/
function initializeNumberFormat(numberFormat, locales, options) {
if (%IsInitializedIntlObject(numberFormat)) {
- throw MakeTypeError(kReinitializeIntl, "NumberFormat");
+ throw %make_type_error(kReinitializeIntl, "NumberFormat");
}
if (IS_UNDEFINED(options)) {
@@ -1135,11 +1190,11 @@ function initializeNumberFormat(numberFormat, locales, options) {
var currency = getOption('currency', 'string');
if (!IS_UNDEFINED(currency) && !isWellFormedCurrencyCode(currency)) {
- throw MakeRangeError(kInvalidCurrencyCode, currency);
+ throw %make_range_error(kInvalidCurrencyCode, currency);
}
if (internalOptions.style === 'currency' && IS_UNDEFINED(currency)) {
- throw MakeTypeError(kCurrencyCode);
+ throw %make_type_error(kCurrencyCode);
}
var currencyDisplay = getOption(
@@ -1198,7 +1253,7 @@ function initializeNumberFormat(numberFormat, locales, options) {
getOption, internalOptions);
var requestedLocale = locale.locale + extension;
- var resolved = ObjectDefineProperties({}, {
+ var resolved = %object_define_properties({}, {
currency: {writable: true},
currencyDisplay: {writable: true},
locale: {writable: true},
@@ -1206,7 +1261,6 @@ function initializeNumberFormat(numberFormat, locales, options) {
minimumFractionDigits: {writable: true},
minimumIntegerDigits: {writable: true},
numberingSystem: {writable: true},
- pattern: patternAccessor,
requestedLocale: {value: requestedLocale, writable: true},
style: {value: internalOptions.style, writable: true},
useGrouping: {writable: true}
@@ -1222,13 +1276,16 @@ function initializeNumberFormat(numberFormat, locales, options) {
resolved);
if (internalOptions.style === 'currency') {
- ObjectDefineProperty(resolved, 'currencyDisplay', {value: currencyDisplay,
- writable: true});
+ %object_define_property(resolved, 'currencyDisplay',
+ {value: currencyDisplay, writable: true});
}
%MarkAsInitializedIntlObjectOfType(numberFormat, 'numberformat', formatter);
numberFormat[resolvedSymbol] = resolved;
- ObjectDefineProperty(numberFormat, 'resolved', resolvedAccessor);
+ if (FLAG_intl_extra) {
+ %object_define_property(resolved, 'pattern', patternAccessor);
+ %object_define_property(numberFormat, 'resolved', resolvedAccessor);
+ }
return numberFormat;
}
@@ -1259,11 +1316,11 @@ InstallConstructor(Intl, 'NumberFormat', function() {
*/
InstallFunction(Intl.NumberFormat.prototype, 'resolvedOptions', function() {
if (!IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
+ throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
}
if (!%IsInitializedIntlObjectOfType(this, 'numberformat')) {
- throw MakeTypeError(kResolvedOptionsCalledOnNonObject, "NumberFormat");
+ throw %make_type_error(kResolvedOptionsCalledOnNonObject, "NumberFormat");
}
var format = this;
@@ -1309,7 +1366,7 @@ InstallFunction(Intl.NumberFormat.prototype, 'resolvedOptions', function() {
*/
InstallFunction(Intl.NumberFormat, 'supportedLocalesOf', function(locales) {
if (!IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
+ throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
}
return supportedLocalesOf('numberformat', locales, arguments[1]);
@@ -1334,14 +1391,12 @@ function formatNumber(formatter, value) {
/**
* Returns a Number that represents string value that was passed in.
*/
-function parseNumber(formatter, value) {
+function IntlParseNumber(formatter, value) {
return %InternalNumberParse(%GetImplFromInitializedIntlObject(formatter),
- GlobalString(value));
+ TO_STRING(value));
}
-
-AddBoundMethod(Intl.NumberFormat, 'format', formatNumber, 1);
-AddBoundMethod(Intl.NumberFormat, 'v8Parse', parseNumber, 1);
+AddBoundMethod(Intl.NumberFormat, 'format', formatNumber, 1, 'numberformat');
/**
* Returns a string that matches LDML representation of the options object.
@@ -1508,35 +1563,35 @@ function toDateTimeOptions(options, required, defaults) {
}
if (needsDefault && (defaults === 'date' || defaults === 'all')) {
- ObjectDefineProperty(options, 'year', {value: 'numeric',
- writable: true,
- enumerable: true,
- configurable: true});
- ObjectDefineProperty(options, 'month', {value: 'numeric',
- writable: true,
- enumerable: true,
- configurable: true});
- ObjectDefineProperty(options, 'day', {value: 'numeric',
- writable: true,
- enumerable: true,
- configurable: true});
- }
-
- if (needsDefault && (defaults === 'time' || defaults === 'all')) {
- ObjectDefineProperty(options, 'hour', {value: 'numeric',
- writable: true,
- enumerable: true,
- configurable: true});
- ObjectDefineProperty(options, 'minute', {value: 'numeric',
- writable: true,
- enumerable: true,
- configurable: true});
- ObjectDefineProperty(options, 'second', {value: 'numeric',
+ %object_define_property(options, 'year', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ %object_define_property(options, 'month', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ %object_define_property(options, 'day', {value: 'numeric',
writable: true,
enumerable: true,
configurable: true});
}
+ if (needsDefault && (defaults === 'time' || defaults === 'all')) {
+ %object_define_property(options, 'hour', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ %object_define_property(options, 'minute', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ %object_define_property(options, 'second', {value: 'numeric',
+ writable: true,
+ enumerable: true,
+ configurable: true});
+ }
+
return options;
}
@@ -1548,7 +1603,7 @@ function toDateTimeOptions(options, required, defaults) {
function initializeDateTimeFormat(dateFormat, locales, options) {
if (%IsInitializedIntlObject(dateFormat)) {
- throw MakeTypeError(kReinitializeIntl, "DateTimeFormat");
+ throw %make_type_error(kReinitializeIntl, "DateTimeFormat");
}
if (IS_UNDEFINED(options)) {
@@ -1592,7 +1647,7 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
getOption, internalOptions);
var requestedLocale = locale.locale + extension;
- var resolved = ObjectDefineProperties({}, {
+ var resolved = %object_define_properties({}, {
calendar: {writable: true},
day: {writable: true},
era: {writable: true},
@@ -1603,7 +1658,6 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
month: {writable: true},
numberingSystem: {writable: true},
[patternSymbol]: {writable: true},
- pattern: patternAccessor,
requestedLocale: {value: requestedLocale, writable: true},
second: {writable: true},
timeZone: {writable: true},
@@ -1617,12 +1671,15 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
requestedLocale, {skeleton: ldmlString, timeZone: tz}, resolved);
if (resolved.timeZone === "Etc/Unknown") {
- throw MakeRangeError(kUnsupportedTimeZone, tz);
+ throw %make_range_error(kUnsupportedTimeZone, tz);
}
%MarkAsInitializedIntlObjectOfType(dateFormat, 'dateformat', formatter);
dateFormat[resolvedSymbol] = resolved;
- ObjectDefineProperty(dateFormat, 'resolved', resolvedAccessor);
+ if (FLAG_intl_extra) {
+ %object_define_property(resolved, 'pattern', patternAccessor);
+ %object_define_property(dateFormat, 'resolved', resolvedAccessor);
+ }
return dateFormat;
}
@@ -1653,29 +1710,21 @@ InstallConstructor(Intl, 'DateTimeFormat', function() {
*/
InstallFunction(Intl.DateTimeFormat.prototype, 'resolvedOptions', function() {
if (!IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
+ throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
}
if (!%IsInitializedIntlObjectOfType(this, 'dateformat')) {
- throw MakeTypeError(kResolvedOptionsCalledOnNonObject, "DateTimeFormat");
+ throw %make_type_error(kResolvedOptionsCalledOnNonObject, "DateTimeFormat");
}
/**
- * Maps ICU calendar names into LDML type.
+ * Maps ICU calendar names to LDML/BCP47 types for key 'ca'.
+ * See typeMap section in third_party/icu/source/data/misc/keyTypeData.txt
+ * and
+ * http://www.unicode.org/repos/cldr/tags/latest/common/bcp47/calendar.xml
*/
var ICU_CALENDAR_MAP = {
'gregorian': 'gregory',
- 'japanese': 'japanese',
- 'buddhist': 'buddhist',
- 'roc': 'roc',
- 'persian': 'persian',
- 'islamic-civil': 'islamicc',
- 'islamic': 'islamic',
- 'hebrew': 'hebrew',
- 'chinese': 'chinese',
- 'indian': 'indian',
- 'coptic': 'coptic',
- 'ethiopic': 'ethiopic',
'ethiopic-amete-alem': 'ethioaa'
};
@@ -1683,8 +1732,7 @@ InstallFunction(Intl.DateTimeFormat.prototype, 'resolvedOptions', function() {
var fromPattern = fromLDMLString(format[resolvedSymbol][patternSymbol]);
var userCalendar = ICU_CALENDAR_MAP[format[resolvedSymbol].calendar];
if (IS_UNDEFINED(userCalendar)) {
- // Use ICU name if we don't have a match. It shouldn't happen, but
- // it would be too strict to throw for this.
+ // No match means that ICU's legacy name is identical to LDML/BCP type.
userCalendar = format[resolvedSymbol].calendar;
}
@@ -1722,7 +1770,7 @@ InstallFunction(Intl.DateTimeFormat.prototype, 'resolvedOptions', function() {
*/
InstallFunction(Intl.DateTimeFormat, 'supportedLocalesOf', function(locales) {
if (!IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
+ throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
}
return supportedLocalesOf('dateformat', locales, arguments[1]);
@@ -1743,7 +1791,7 @@ function formatDate(formatter, dateValue) {
dateMs = TO_NUMBER(dateValue);
}
- if (!IsFinite(dateMs)) throw MakeRangeError(kDateRange);
+ if (!NUMBER_IS_FINITE(dateMs)) throw %make_range_error(kDateRange);
return %InternalDateFormat(%GetImplFromInitializedIntlObject(formatter),
new GlobalDate(dateMs));
@@ -1756,15 +1804,14 @@ function formatDate(formatter, dateValue) {
* DateTimeFormat.
* Returns undefined if date string cannot be parsed.
*/
-function parseDate(formatter, value) {
+function IntlParseDate(formatter, value) {
return %InternalDateParse(%GetImplFromInitializedIntlObject(formatter),
- GlobalString(value));
+ TO_STRING(value));
}
// 0 because date is optional argument.
-AddBoundMethod(Intl.DateTimeFormat, 'format', formatDate, 0);
-AddBoundMethod(Intl.DateTimeFormat, 'v8Parse', parseDate, 1);
+AddBoundMethod(Intl.DateTimeFormat, 'format', formatDate, 0, 'dateformat');
/**
@@ -1777,6 +1824,9 @@ function canonicalizeTimeZoneID(tzID) {
return tzID;
}
+ // Convert zone name to string.
+ tzID = TO_STRING(tzID);
+
// Special case handling (UTC, GMT).
var upperID = %StringToUpperCase(tzID);
if (upperID === 'UTC' || upperID === 'GMT' ||
@@ -1789,13 +1839,13 @@ function canonicalizeTimeZoneID(tzID) {
// We expect only _, '-' and / beside ASCII letters.
// All inputs should conform to Area/Location(/Location)* from now on.
var match = InternalRegExpMatch(GetTimezoneNameCheckRE(), tzID);
- if (IS_NULL(match)) throw MakeRangeError(kExpectedTimezoneID, tzID);
+ if (IS_NULL(match)) throw %make_range_error(kExpectedTimezoneID, tzID);
var result = toTitleCaseTimezoneLocation(match[1]) + '/' +
toTitleCaseTimezoneLocation(match[2]);
if (!IS_UNDEFINED(match[3]) && 3 < match.length) {
- var locations = %_Call(StringSplit, match[3], '/');
+ var locations = %StringSplit(match[3], '/', kMaxUint32);
// The 1st element is empty. Starts with i=1.
for (var i = 1; i < locations.length; i++) {
result = result + '/' + toTitleCaseTimezoneLocation(locations[i]);
@@ -1811,7 +1861,7 @@ function canonicalizeTimeZoneID(tzID) {
*/
function initializeBreakIterator(iterator, locales, options) {
if (%IsInitializedIntlObject(iterator)) {
- throw MakeTypeError(kReinitializeIntl, "v8BreakIterator");
+ throw %make_type_error(kReinitializeIntl, "v8BreakIterator");
}
if (IS_UNDEFINED(options)) {
@@ -1826,7 +1876,7 @@ function initializeBreakIterator(iterator, locales, options) {
'type', 'string', ['character', 'word', 'sentence', 'line'], 'word'));
var locale = resolveLocale('breakiterator', locales, options);
- var resolved = ObjectDefineProperties({}, {
+ var resolved = %object_define_properties({}, {
requestedLocale: {value: locale.locale, writable: true},
type: {value: internalOptions.type, writable: true},
locale: {writable: true}
@@ -1839,7 +1889,9 @@ function initializeBreakIterator(iterator, locales, options) {
%MarkAsInitializedIntlObjectOfType(iterator, 'breakiterator',
internalIterator);
iterator[resolvedSymbol] = resolved;
- ObjectDefineProperty(iterator, 'resolved', resolvedAccessor);
+ if (FLAG_intl_extra) {
+ %object_define_property(iterator, 'resolved', resolvedAccessor);
+ }
return iterator;
}
@@ -1871,11 +1923,11 @@ InstallConstructor(Intl, 'v8BreakIterator', function() {
InstallFunction(Intl.v8BreakIterator.prototype, 'resolvedOptions',
function() {
if (!IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
+ throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
}
if (!%IsInitializedIntlObjectOfType(this, 'breakiterator')) {
- throw MakeTypeError(kResolvedOptionsCalledOnNonObject, "v8BreakIterator");
+ throw %make_type_error(kResolvedOptionsCalledOnNonObject, "v8BreakIterator");
}
var segmenter = this;
@@ -1900,7 +1952,7 @@ InstallFunction(Intl.v8BreakIterator.prototype, 'resolvedOptions',
InstallFunction(Intl.v8BreakIterator, 'supportedLocalesOf',
function(locales) {
if (!IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
+ throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
}
return supportedLocalesOf('breakiterator', locales, arguments[1]);
@@ -1914,7 +1966,7 @@ InstallFunction(Intl.v8BreakIterator, 'supportedLocalesOf',
*/
function adoptText(iterator, text) {
%BreakIteratorAdoptText(%GetImplFromInitializedIntlObject(iterator),
- GlobalString(text));
+ TO_STRING(text));
}
@@ -1950,11 +2002,13 @@ function breakType(iterator) {
}
-AddBoundMethod(Intl.v8BreakIterator, 'adoptText', adoptText, 1);
-AddBoundMethod(Intl.v8BreakIterator, 'first', first, 0);
-AddBoundMethod(Intl.v8BreakIterator, 'next', next, 0);
-AddBoundMethod(Intl.v8BreakIterator, 'current', current, 0);
-AddBoundMethod(Intl.v8BreakIterator, 'breakType', breakType, 0);
+AddBoundMethod(Intl.v8BreakIterator, 'adoptText', adoptText, 1,
+ 'breakiterator');
+AddBoundMethod(Intl.v8BreakIterator, 'first', first, 0, 'breakiterator');
+AddBoundMethod(Intl.v8BreakIterator, 'next', next, 0, 'breakiterator');
+AddBoundMethod(Intl.v8BreakIterator, 'current', current, 0, 'breakiterator');
+AddBoundMethod(Intl.v8BreakIterator, 'breakType', breakType, 0,
+ 'breakiterator');
// Save references to Intl objects and methods we use, for added security.
var savedObjects = {
@@ -1976,6 +2030,23 @@ var defaultObjects = {
'dateformattime': UNDEFINED,
};
+function clearDefaultObjects() {
+ defaultObjects['dateformatall'] = UNDEFINED;
+ defaultObjects['dateformatdate'] = UNDEFINED;
+ defaultObjects['dateformattime'] = UNDEFINED;
+}
+
+var date_cache_version = 0;
+
+function checkDateCacheCurrent() {
+ var new_date_cache_version = %DateCacheVersion();
+ if (new_date_cache_version == date_cache_version) {
+ return;
+ }
+ date_cache_version = new_date_cache_version;
+
+ clearDefaultObjects();
+}
/**
* Returns cached or newly created instance of a given service.
@@ -1984,6 +2055,7 @@ var defaultObjects = {
function cachedOrNewService(service, locales, options, defaults) {
var useOptions = (IS_UNDEFINED(defaults)) ? options : defaults;
if (IS_UNDEFINED(locales) && IS_UNDEFINED(options)) {
+ checkDateCacheCurrent();
if (IS_UNDEFINED(defaultObjects[service])) {
defaultObjects[service] = new savedObjects[service](locales, useOptions);
}
@@ -1992,17 +2064,48 @@ function cachedOrNewService(service, locales, options, defaults) {
return new savedObjects[service](locales, useOptions);
}
+function LocaleConvertCase(s, locales, isToUpper) {
+ // ECMA 402 section 13.1.2 steps 1 through 12.
+ var language;
+ // Optimize for the most common two cases. initializeLocaleList() can handle
+ // them as well, but it's rather slow accounting for over 60% of
+ // toLocale{U,L}Case() and about 40% of toLocale{U,L}Case("<locale>").
+ if (IS_UNDEFINED(locales)) {
+ language = GetDefaultICULocaleJS();
+ } else if (IS_STRING(locales)) {
+ language = canonicalizeLanguageTag(locales);
+ } else {
+ var locales = initializeLocaleList(locales);
+ language = locales.length > 0 ? locales[0] : GetDefaultICULocaleJS();
+ }
+
+ // StringSplit is slower than this.
+ var pos = %_Call(StringIndexOf, language, '-');
+ if (pos != -1) {
+ language = %_Call(StringSubstring, language, 0, pos);
+ }
+
+ var CUSTOM_CASE_LANGUAGES = ['az', 'el', 'lt', 'tr'];
+ var langIndex = %ArrayIndexOf(CUSTOM_CASE_LANGUAGES, language, 0);
+ if (langIndex == -1) {
+ // language-independent case conversion.
+ return isToUpper ? %StringToUpperCaseI18N(s) : %StringToLowerCaseI18N(s);
+ }
+ return %StringLocaleConvertCase(s, isToUpper,
+ CUSTOM_CASE_LANGUAGES[langIndex]);
+}
+
/**
* Compares this and that, and returns less than 0, 0 or greater than 0 value.
* Overrides the built-in method.
*/
OverrideFunction(GlobalString.prototype, 'localeCompare', function(that) {
if (!IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
+ throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
}
if (IS_NULL_OR_UNDEFINED(this)) {
- throw MakeTypeError(kMethodInvokedOnNullOrUndefined);
+ throw %make_type_error(kMethodInvokedOnNullOrUndefined);
}
var locales = arguments[1];
@@ -2023,7 +2126,7 @@ OverrideFunction(GlobalString.prototype, 'localeCompare', function(that) {
OverrideFunction(GlobalString.prototype, 'normalize', function() {
if (!IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
+ throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
}
CHECK_OBJECT_COERCIBLE(this, "String.prototype.normalize");
@@ -2034,9 +2137,9 @@ OverrideFunction(GlobalString.prototype, 'normalize', function() {
var NORMALIZATION_FORMS = ['NFC', 'NFD', 'NFKC', 'NFKD'];
- var normalizationForm = %_Call(ArrayIndexOf, NORMALIZATION_FORMS, form);
+ var normalizationForm = %ArrayIndexOf(NORMALIZATION_FORMS, form, 0);
if (normalizationForm === -1) {
- throw MakeRangeError(kNormalizationForm,
+ throw %make_range_error(kNormalizationForm,
%_Call(ArrayJoin, NORMALIZATION_FORMS, ', '));
}
@@ -2044,6 +2147,56 @@ OverrideFunction(GlobalString.prototype, 'normalize', function() {
}
);
+function ToLowerCaseI18N() {
+ if (!IS_UNDEFINED(new.target)) {
+ throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
+ }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLowerCase");
+ var s = TO_STRING(this);
+ return %StringToLowerCaseI18N(s);
+}
+
+function ToUpperCaseI18N() {
+ if (!IS_UNDEFINED(new.target)) {
+ throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
+ }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.toUpperCase");
+ var s = TO_STRING(this);
+ return %StringToUpperCaseI18N(s);
+}
+
+function ToLocaleLowerCaseI18N(locales) {
+ if (!IS_UNDEFINED(new.target)) {
+ throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
+ }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLocaleLowerCase");
+ return LocaleConvertCase(TO_STRING(this), locales, false);
+}
+
+%FunctionSetLength(ToLocaleLowerCaseI18N, 0);
+
+function ToLocaleUpperCaseI18N(locales) {
+ if (!IS_UNDEFINED(new.target)) {
+ throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
+ }
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLocaleUpperCase");
+ return LocaleConvertCase(TO_STRING(this), locales, true);
+}
+
+%FunctionSetLength(ToLocaleUpperCaseI18N, 0);
+
+%FunctionRemovePrototype(ToLowerCaseI18N);
+%FunctionRemovePrototype(ToUpperCaseI18N);
+%FunctionRemovePrototype(ToLocaleLowerCaseI18N);
+%FunctionRemovePrototype(ToLocaleUpperCaseI18N);
+
+utils.Export(function(to) {
+ to.ToLowerCaseI18N = ToLowerCaseI18N;
+ to.ToUpperCaseI18N = ToUpperCaseI18N;
+ to.ToLocaleLowerCaseI18N = ToLocaleLowerCaseI18N;
+ to.ToLocaleUpperCaseI18N = ToLocaleUpperCaseI18N;
+});
+
/**
* Formats a Number object (this) using locale and options values.
@@ -2051,11 +2204,11 @@ OverrideFunction(GlobalString.prototype, 'normalize', function() {
*/
OverrideFunction(GlobalNumber.prototype, 'toLocaleString', function() {
if (!IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
+ throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
}
if (!(this instanceof GlobalNumber) && typeof(this) !== 'number') {
- throw MakeTypeError(kMethodInvokedOnWrongType, "Number");
+ throw %make_type_error(kMethodInvokedOnWrongType, "Number");
}
var locales = arguments[0];
@@ -2071,7 +2224,7 @@ OverrideFunction(GlobalNumber.prototype, 'toLocaleString', function() {
*/
function toLocaleDateTime(date, locales, options, required, defaults, service) {
if (!(date instanceof GlobalDate)) {
- throw MakeTypeError(kMethodInvokedOnWrongType, "Date");
+ throw %make_type_error(kMethodInvokedOnWrongType, "Date");
}
if (IsNaN(date)) return 'Invalid Date';
@@ -2092,7 +2245,7 @@ function toLocaleDateTime(date, locales, options, required, defaults, service) {
*/
OverrideFunction(GlobalDate.prototype, 'toLocaleString', function() {
if (!IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
+ throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
}
var locales = arguments[0];
@@ -2110,7 +2263,7 @@ OverrideFunction(GlobalDate.prototype, 'toLocaleString', function() {
*/
OverrideFunction(GlobalDate.prototype, 'toLocaleDateString', function() {
if (!IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
+ throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
}
var locales = arguments[0];
@@ -2128,7 +2281,7 @@ OverrideFunction(GlobalDate.prototype, 'toLocaleDateString', function() {
*/
OverrideFunction(GlobalDate.prototype, 'toLocaleTimeString', function() {
if (!IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
+ throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
}
var locales = arguments[0];
@@ -2138,4 +2291,10 @@ OverrideFunction(GlobalDate.prototype, 'toLocaleTimeString', function() {
}
);
+utils.Export(function(to) {
+ to.AddBoundMethod = AddBoundMethod;
+ to.IntlParseDate = IntlParseDate;
+ to.IntlParseNumber = IntlParseNumber;
+});
+
})
diff --git a/deps/v8/src/js/icu-case-mapping.js b/deps/v8/src/js/icu-case-mapping.js
new file mode 100644
index 0000000000..9806249d71
--- /dev/null
+++ b/deps/v8/src/js/icu-case-mapping.js
@@ -0,0 +1,24 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+"use strict";
+
+%CheckIsBootstrapping();
+
+var GlobalString = global.String;
+var OverrideFunction = utils.OverrideFunction;
+var ToLowerCaseI18N = utils.ImportNow("ToLowerCaseI18N");
+var ToUpperCaseI18N = utils.ImportNow("ToUpperCaseI18N");
+var ToLocaleLowerCaseI18N = utils.ImportNow("ToLocaleLowerCaseI18N");
+var ToLocaleUpperCaseI18N = utils.ImportNow("ToLocaleUpperCaseI18N");
+
+OverrideFunction(GlobalString.prototype, 'toLowerCase', ToLowerCaseI18N, true);
+OverrideFunction(GlobalString.prototype, 'toUpperCase', ToUpperCaseI18N, true);
+OverrideFunction(GlobalString.prototype, 'toLocaleLowerCase',
+ ToLocaleLowerCaseI18N, true);
+OverrideFunction(GlobalString.prototype, 'toLocaleUpperCase',
+ ToLocaleUpperCaseI18N, true);
+
+})
diff --git a/deps/v8/src/js/intl-extra.js b/deps/v8/src/js/intl-extra.js
new file mode 100644
index 0000000000..a4d22568b9
--- /dev/null
+++ b/deps/v8/src/js/intl-extra.js
@@ -0,0 +1,22 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+
+"use strict";
+
+%CheckIsBootstrapping();
+
+var GlobalIntl = global.Intl;
+
+var AddBoundMethod = utils.ImportNow("AddBoundMethod");
+var IntlParseDate = utils.ImportNow("IntlParseDate");
+var IntlParseNumber = utils.ImportNow("IntlParseNumber");
+
+AddBoundMethod(GlobalIntl.DateTimeFormat, 'v8Parse', IntlParseDate, 1,
+ 'dateformat');
+AddBoundMethod(GlobalIntl.NumberFormat, 'v8Parse', IntlParseNumber, 1,
+ 'numberformat');
+
+})
diff --git a/deps/v8/src/js/json.js b/deps/v8/src/js/json.js
deleted file mode 100644
index c6dbed9cbb..0000000000
--- a/deps/v8/src/js/json.js
+++ /dev/null
@@ -1,297 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalDate = global.Date;
-var GlobalJSON = global.JSON;
-var GlobalSet = global.Set;
-var InternalArray = utils.InternalArray;
-var MakeTypeError;
-var MaxSimple;
-var MinSimple;
-var ObjectHasOwnProperty;
-var Stack;
-var StackHas;
-var StackPop;
-var StackPush;
-var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
-
-utils.Import(function(from) {
- MakeTypeError = from.MakeTypeError;
- MaxSimple = from.MaxSimple;
- MinSimple = from.MinSimple;
- ObjectHasOwnProperty = from.ObjectHasOwnProperty;
- Stack = from.Stack;
- StackHas = from.StackHas;
- StackPop = from.StackPop;
- StackPush = from.StackPush;
-});
-
-// -------------------------------------------------------------------
-
-function CreateDataProperty(o, p, v) {
- var desc = {value: v, enumerable: true, writable: true, configurable: true};
- return %reflect_define_property(o, p, desc);
-}
-
-
-function InternalizeJSONProperty(holder, name, reviver) {
- var val = holder[name];
- if (IS_RECEIVER(val)) {
- if (%is_arraylike(val)) {
- var length = TO_LENGTH(val.length);
- for (var i = 0; i < length; i++) {
- var newElement =
- InternalizeJSONProperty(val, %_NumberToString(i), reviver);
- if (IS_UNDEFINED(newElement)) {
- %reflect_delete_property(val, i);
- } else {
- CreateDataProperty(val, i, newElement);
- }
- }
- } else {
- var keys = %object_keys(val);
- for (var i = 0; i < keys.length; i++) {
- var p = keys[i];
- var newElement = InternalizeJSONProperty(val, p, reviver);
- if (IS_UNDEFINED(newElement)) {
- %reflect_delete_property(val, p);
- } else {
- CreateDataProperty(val, p, newElement);
- }
- }
- }
- }
- return %_Call(reviver, holder, name, val);
-}
-
-
-function JSONParse(text, reviver) {
- var unfiltered = %ParseJson(text);
- if (IS_CALLABLE(reviver)) {
- return InternalizeJSONProperty({'': unfiltered}, '', reviver);
- } else {
- return unfiltered;
- }
-}
-
-
-function SerializeArray(value, replacer, stack, indent, gap) {
- if (StackHas(stack, value)) throw MakeTypeError(kCircularStructure);
- StackPush(stack, value);
- var stepback = indent;
- indent += gap;
- var partial = new InternalArray();
- var len = TO_LENGTH(value.length);
- for (var i = 0; i < len; i++) {
- var strP = JSONSerialize(%_NumberToString(i), value, replacer, stack,
- indent, gap);
- if (IS_UNDEFINED(strP)) {
- strP = "null";
- }
- partial.push(strP);
- }
- var final;
- if (gap == "") {
- final = "[" + partial.join(",") + "]";
- } else if (partial.length > 0) {
- var separator = ",\n" + indent;
- final = "[\n" + indent + partial.join(separator) + "\n" +
- stepback + "]";
- } else {
- final = "[]";
- }
- StackPop(stack);
- return final;
-}
-
-
-function SerializeObject(value, replacer, stack, indent, gap) {
- if (StackHas(stack, value)) throw MakeTypeError(kCircularStructure);
- StackPush(stack, value);
- var stepback = indent;
- indent += gap;
- var partial = new InternalArray();
- if (IS_ARRAY(replacer)) {
- var length = replacer.length;
- for (var i = 0; i < length; i++) {
- var p = replacer[i];
- var strP = JSONSerialize(p, value, replacer, stack, indent, gap);
- if (!IS_UNDEFINED(strP)) {
- var member = %QuoteJSONString(p) + ":";
- if (gap != "") member += " ";
- member += strP;
- partial.push(member);
- }
- }
- } else {
- var keys = %object_keys(value);
- for (var i = 0; i < keys.length; i++) {
- var p = keys[i];
- var strP = JSONSerialize(p, value, replacer, stack, indent, gap);
- if (!IS_UNDEFINED(strP)) {
- var member = %QuoteJSONString(p) + ":";
- if (gap != "") member += " ";
- member += strP;
- partial.push(member);
- }
- }
- }
- var final;
- if (gap == "") {
- final = "{" + partial.join(",") + "}";
- } else if (partial.length > 0) {
- var separator = ",\n" + indent;
- final = "{\n" + indent + partial.join(separator) + "\n" +
- stepback + "}";
- } else {
- final = "{}";
- }
- StackPop(stack);
- return final;
-}
-
-
-function JSONSerialize(key, holder, replacer, stack, indent, gap) {
- var value = holder[key];
- if (IS_RECEIVER(value)) {
- var toJSON = value.toJSON;
- if (IS_CALLABLE(toJSON)) {
- value = %_Call(toJSON, value, key);
- }
- }
- if (IS_CALLABLE(replacer)) {
- value = %_Call(replacer, holder, key, value);
- }
- if (IS_STRING(value)) {
- return %QuoteJSONString(value);
- } else if (IS_NUMBER(value)) {
- return JSON_NUMBER_TO_STRING(value);
- } else if (IS_BOOLEAN(value)) {
- return value ? "true" : "false";
- } else if (IS_NULL(value)) {
- return "null";
- } else if (IS_RECEIVER(value) && !IS_CALLABLE(value)) {
- // Non-callable object. If it's a primitive wrapper, it must be unwrapped.
- if (%is_arraylike(value)) {
- return SerializeArray(value, replacer, stack, indent, gap);
- } else if (IS_NUMBER_WRAPPER(value)) {
- value = TO_NUMBER(value);
- return JSON_NUMBER_TO_STRING(value);
- } else if (IS_STRING_WRAPPER(value)) {
- return %QuoteJSONString(TO_STRING(value));
- } else if (IS_BOOLEAN_WRAPPER(value)) {
- return %_ValueOf(value) ? "true" : "false";
- } else {
- return SerializeObject(value, replacer, stack, indent, gap);
- }
- }
- // Undefined or a callable object.
- return UNDEFINED;
-}
-
-
-function JSONStringify(value, replacer, space) {
- if (arguments.length === 1 && !IS_PROXY(value)) {
- return %BasicJSONStringify(value);
- }
- if (!IS_CALLABLE(replacer) && %is_arraylike(replacer)) {
- var property_list = new InternalArray();
- var seen_properties = new GlobalSet();
- var length = TO_LENGTH(replacer.length);
- for (var i = 0; i < length; i++) {
- var v = replacer[i];
- var item;
- if (IS_STRING(v)) {
- item = v;
- } else if (IS_NUMBER(v)) {
- item = %_NumberToString(v);
- } else if (IS_STRING_WRAPPER(v) || IS_NUMBER_WRAPPER(v)) {
- item = TO_STRING(v);
- } else {
- continue;
- }
- if (!seen_properties.has(item)) {
- property_list.push(item);
- seen_properties.add(item);
- }
- }
- replacer = property_list;
- }
- if (IS_OBJECT(space)) {
- // Unwrap 'space' if it is wrapped
- if (IS_NUMBER_WRAPPER(space)) {
- space = TO_NUMBER(space);
- } else if (IS_STRING_WRAPPER(space)) {
- space = TO_STRING(space);
- }
- }
- var gap;
- if (IS_NUMBER(space)) {
- space = MaxSimple(0, MinSimple(TO_INTEGER(space), 10));
- gap = %_SubString(" ", 0, space);
- } else if (IS_STRING(space)) {
- if (space.length > 10) {
- gap = %_SubString(space, 0, 10);
- } else {
- gap = space;
- }
- } else {
- gap = "";
- }
- if (!IS_CALLABLE(replacer) && !property_list && !gap && !IS_PROXY(value)) {
- return %BasicJSONStringify(value);
- }
- return JSONSerialize('', {'': value}, replacer, new Stack(), "", gap);
-}
-
-// -------------------------------------------------------------------
-
-%AddNamedProperty(GlobalJSON, toStringTagSymbol, "JSON", READ_ONLY | DONT_ENUM);
-
-// Set up non-enumerable properties of the JSON object.
-utils.InstallFunctions(GlobalJSON, DONT_ENUM, [
- "parse", JSONParse,
- "stringify", JSONStringify
-]);
-
-// -------------------------------------------------------------------
-// Date.toJSON
-
-// 20.3.4.37 Date.prototype.toJSON ( key )
-function DateToJSON(key) {
- var o = TO_OBJECT(this);
- var tv = TO_PRIMITIVE_NUMBER(o);
- if (IS_NUMBER(tv) && !NUMBER_IS_FINITE(tv)) {
- return null;
- }
- return o.toISOString();
-}
-
-// Set up non-enumerable functions of the Date prototype object.
-utils.InstallFunctions(GlobalDate.prototype, DONT_ENUM, [
- "toJSON", DateToJSON
-]);
-
-// -------------------------------------------------------------------
-// JSON Builtins
-
-function JsonSerializeAdapter(key, object) {
- var holder = {};
- holder[key] = object;
- // No need to pass the actual holder since there is no replacer function.
- return JSONSerialize(key, holder, UNDEFINED, new Stack(), "", "");
-}
-
-%InstallToContext(["json_serialize_adapter", JsonSerializeAdapter]);
-
-})
diff --git a/deps/v8/src/js/macros.py b/deps/v8/src/js/macros.py
index a4c7f53293..cdc3d0ae0c 100644
--- a/deps/v8/src/js/macros.py
+++ b/deps/v8/src/js/macros.py
@@ -32,17 +32,6 @@ define NONE = 0;
define READ_ONLY = 1;
define DONT_ENUM = 2;
define DONT_DELETE = 4;
-define NEW_ONE_BYTE_STRING = true;
-define NEW_TWO_BYTE_STRING = false;
-
-# Constants used for getter and setter operations.
-define GETTER = 0;
-define SETTER = 1;
-
-# Safe maximum number of arguments to push to stack, when multiplied by
-# pointer size. Used by Function.prototype.apply(), Reflect.apply() and
-# Reflect.construct().
-define kSafeArgumentsLength = 0x800000;
# 2^53 - 1
define kMaxSafeInteger = 9007199254740991;
@@ -50,10 +39,6 @@ define kMaxSafeInteger = 9007199254740991;
# 2^32 - 1
define kMaxUint32 = 4294967295;
-# Strict mode flags for passing to %SetProperty
-define kSloppyMode = 0;
-define kStrictMode = 1;
-
# Native cache ids.
define STRING_TO_REGEXP_CACHE_ID = 0;
@@ -65,7 +50,6 @@ define STRING_TO_REGEXP_CACHE_ID = 0;
macro IS_ARRAY(arg) = (%_IsArray(arg));
macro IS_ARRAYBUFFER(arg) = (%_ClassOf(arg) === 'ArrayBuffer');
macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean');
-macro IS_BOOLEAN_WRAPPER(arg) = (%_ClassOf(arg) === 'Boolean');
macro IS_DATAVIEW(arg) = (%_ClassOf(arg) === 'DataView');
macro IS_DATE(arg) = (%IsDate(arg));
macro IS_ERROR(arg) = (%_ClassOf(arg) === 'Error');
@@ -77,7 +61,6 @@ macro IS_MAP_ITERATOR(arg) = (%_ClassOf(arg) === 'Map Iterator');
macro IS_NULL(arg) = (arg === null);
macro IS_NULL_OR_UNDEFINED(arg) = (arg == null);
macro IS_NUMBER(arg) = (typeof(arg) === 'number');
-macro IS_NUMBER_WRAPPER(arg) = (%_ClassOf(arg) === 'Number');
macro IS_OBJECT(arg) = (typeof(arg) === 'object');
macro IS_PROXY(arg) = (%_IsJSProxy(arg));
macro IS_REGEXP(arg) = (%_IsRegExp(arg));
@@ -87,9 +70,7 @@ macro IS_SET_ITERATOR(arg) = (%_ClassOf(arg) === 'Set Iterator');
macro IS_SHAREDARRAYBUFFER(arg) = (%_ClassOf(arg) === 'SharedArrayBuffer');
macro IS_SIMD_VALUE(arg) = (%IsSimdValue(arg));
macro IS_STRING(arg) = (typeof(arg) === 'string');
-macro IS_STRING_WRAPPER(arg) = (%_ClassOf(arg) === 'String');
macro IS_SYMBOL(arg) = (typeof(arg) === 'symbol');
-macro IS_SYMBOL_WRAPPER(arg) = (%_ClassOf(arg) === 'Symbol');
macro IS_TYPEDARRAY(arg) = (%_IsTypedArray(arg));
macro IS_UNDEFINED(arg) = (arg === (void 0));
macro IS_WEAKMAP(arg) = (%_ClassOf(arg) === 'WeakMap');
@@ -103,27 +84,21 @@ macro IS_CALLABLE(arg) = (typeof(arg) === 'function');
# Macro for ES6 CheckObjectCoercible
# Will throw a TypeError of the form "[functionName] called on null or undefined".
-macro CHECK_OBJECT_COERCIBLE(arg, functionName) = if (IS_NULL(%IS_VAR(arg)) || IS_UNDEFINED(arg)) throw MakeTypeError(kCalledOnNullOrUndefined, functionName);
+macro CHECK_OBJECT_COERCIBLE(arg, functionName) = if (IS_NULL(%IS_VAR(arg)) || IS_UNDEFINED(arg)) throw %make_type_error(kCalledOnNullOrUndefined, functionName);
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || ((arg == arg) && (arg != 1/0) && (arg != -1/0)));
macro TO_BOOLEAN(arg) = (!!(arg));
macro TO_INTEGER(arg) = (%_ToInteger(arg));
-macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(arg));
macro TO_INT32(arg) = ((arg) | 0);
macro TO_UINT32(arg) = ((arg) >>> 0);
+macro INVERT_NEG_ZERO(arg) = ((arg) + 0);
macro TO_LENGTH(arg) = (%_ToLength(arg));
macro TO_STRING(arg) = (%_ToString(arg));
macro TO_NUMBER(arg) = (%_ToNumber(arg));
macro TO_OBJECT(arg) = (%_ToObject(arg));
-macro TO_PRIMITIVE(arg) = (%_ToPrimitive(arg));
-macro TO_PRIMITIVE_NUMBER(arg) = (%_ToPrimitive_Number(arg));
-macro TO_PRIMITIVE_STRING(arg) = (%_ToPrimitive_String(arg));
-macro TO_NAME(arg) = (%_ToName(arg));
-macro JSON_NUMBER_TO_STRING(arg) = ((%_IsSmi(%IS_VAR(arg)) || arg - arg == 0) ? %_NumberToString(arg) : "null");
macro HAS_OWN_PROPERTY(obj, key) = (%_Call(ObjectHasOwnProperty, obj, key));
-macro HAS_INDEX(array, index, is_array) = ((is_array && %_HasFastPackedElements(%IS_VAR(array)) && (index < array.length)) || (index in array));
# Private names.
macro IS_PRIVATE(sym) = (%SymbolIsPrivate(sym));
@@ -132,6 +107,9 @@ macro HAS_DEFINED_PRIVATE(obj, sym) = (!IS_UNDEFINED(obj[sym]));
macro GET_PRIVATE(obj, sym) = (obj[sym]);
macro SET_PRIVATE(obj, sym, val) = (obj[sym] = val);
+# To avoid ES2015 Function name inference.
+macro ANONYMOUS_FUNCTION(fn) = (0, (fn));
+
# Constants. The compiler constant folds them.
define INFINITY = (1/0);
define UNDEFINED = (void 0);
@@ -139,9 +117,18 @@ define UNDEFINED = (void 0);
# Macros implemented in Python.
python macro CHAR_CODE(str) = ord(str[1]);
-# Constants used on an array to implement the properties of the RegExp object.
+# Layout of internal RegExpLastMatchInfo object.
define REGEXP_NUMBER_OF_CAPTURES = 0;
+define REGEXP_LAST_SUBJECT = 1;
+define REGEXP_LAST_INPUT = 2;
define REGEXP_FIRST_CAPTURE = 3;
+define CAPTURE0 = 3; # Aliases REGEXP_FIRST_CAPTURE.
+define CAPTURE1 = 4;
+
+macro NUMBER_OF_CAPTURES(array) = ((array)[REGEXP_NUMBER_OF_CAPTURES]);
+macro LAST_SUBJECT(array) = ((array)[REGEXP_LAST_SUBJECT]);
+macro LAST_INPUT(array) = ((array)[REGEXP_LAST_INPUT]);
+macro CAPTURE(index) = (REGEXP_FIRST_CAPTURE + (index));
# Macros for internal slot access.
macro REGEXP_GLOBAL(regexp) = (%_RegExpFlags(regexp) & 1);
@@ -151,20 +138,6 @@ macro REGEXP_STICKY(regexp) = (%_RegExpFlags(regexp) & 8);
macro REGEXP_UNICODE(regexp) = (%_RegExpFlags(regexp) & 16);
macro REGEXP_SOURCE(regexp) = (%_RegExpSource(regexp));
-# We can't put macros in macros so we use constants here.
-# REGEXP_NUMBER_OF_CAPTURES
-macro NUMBER_OF_CAPTURES(array) = ((array)[0]);
-
-# Last input and last subject of regexp matches.
-define LAST_SUBJECT_INDEX = 1;
-macro LAST_SUBJECT(array) = ((array)[1]);
-macro LAST_INPUT(array) = ((array)[2]);
-
-# REGEXP_FIRST_CAPTURE
-macro CAPTURE(index) = (3 + (index));
-define CAPTURE0 = 3;
-define CAPTURE1 = 4;
-
# For the regexp capture override array. This has the same
# format as the arguments to a function called from
# String.prototype.replace.
@@ -174,16 +147,6 @@ macro OVERRIDE_SUBJECT(override) = ((override)[(override).length - 1]);
# 1-based so index of 1 returns the first capture
macro OVERRIDE_CAPTURE(override, index) = ((override)[(index)]);
-# PropertyDescriptor return value indices - must match
-# PropertyDescriptorIndices in runtime-object.cc.
-define IS_ACCESSOR_INDEX = 0;
-define VALUE_INDEX = 1;
-define GETTER_INDEX = 2;
-define SETTER_INDEX = 3;
-define WRITABLE_INDEX = 4;
-define ENUMERABLE_INDEX = 5;
-define CONFIGURABLE_INDEX = 6;
-
# For messages.js
# Matches Script::Type from objects.h
define TYPE_NATIVE = 0;
@@ -195,9 +158,6 @@ define COMPILATION_TYPE_HOST = 0;
define COMPILATION_TYPE_EVAL = 1;
define COMPILATION_TYPE_JSON = 2;
-# Matches Messages::kNoLineNumberInfo from v8.h
-define kNoLineNumberInfo = 0;
-
# Must match PropertyFilter in property-details.h
define PROPERTY_FILTER_NONE = 0;
define PROPERTY_FILTER_ONLY_ENUMERABLE = 2;
@@ -238,11 +198,6 @@ define NOT_FOUND = -1;
# Check whether debug is active.
define DEBUG_IS_ACTIVE = (%_DebugIsActive() != 0);
-macro DEBUG_PREPARE_STEP_IN_IF_STEPPING(function) = if (%_DebugIsActive() != 0) %DebugPrepareStepInIfStepping(function);
-
-# SharedFlag equivalents
-define kNotShared = false;
-define kShared = true;
# UseCounters from include/v8.h
define kUseAsm = 0;
@@ -251,7 +206,6 @@ define kLegacyConst = 2;
define kMarkDequeOverflow = 3;
define kStoreBufferOverflow = 4;
define kSlotsBufferOverflow = 5;
-define kObjectObserve = 6;
define kForcedGC = 7;
define kSloppyMode = 8;
define kStrictMode = 9;
diff --git a/deps/v8/src/js/math.js b/deps/v8/src/js/math.js
index f8ad6b1fe6..346da24596 100644
--- a/deps/v8/src/js/math.js
+++ b/deps/v8/src/js/math.js
@@ -13,47 +13,13 @@
// The first two slots are reserved to persist PRNG state.
define kRandomNumberStart = 2;
-var GlobalFloat64Array = global.Float64Array;
var GlobalMath = global.Math;
-var GlobalObject = global.Object;
-var InternalArray = utils.InternalArray;
var NaN = %GetRootNaN();
var nextRandomIndex = 0;
var randomNumbers = UNDEFINED;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
//-------------------------------------------------------------------
-
-// ECMA 262 - 15.8.2.1
-function MathAbs(x) {
- x = +x;
- return (x > 0) ? x : 0 - x;
-}
-
-// ECMA 262 - 15.8.2.5
-// The naming of y and x matches the spec, as does the order in which
-// ToNumber (valueOf) is called.
-function MathAtan2JS(y, x) {
- y = +y;
- x = +x;
- return %MathAtan2(y, x);
-}
-
-// ECMA 262 - 15.8.2.8
-function MathExp(x) {
- return %MathExpRT(TO_NUMBER(x));
-}
-
-// ECMA 262 - 15.8.2.10
-function MathLog(x) {
- return %_MathLogRT(TO_NUMBER(x));
-}
-
-// ECMA 262 - 15.8.2.13
-function MathPowJS(x, y) {
- return %_MathPow(TO_NUMBER(x), TO_NUMBER(y));
-}
-
// ECMA 262 - 15.8.2.14
function MathRandom() {
// While creating a startup snapshot, %GenerateRandomNumbers returns a
@@ -63,162 +29,32 @@ function MathRandom() {
// first two elements are reserved for the PRNG state.
if (nextRandomIndex <= kRandomNumberStart) {
randomNumbers = %GenerateRandomNumbers(randomNumbers);
- nextRandomIndex = randomNumbers.length;
+ if (%_IsTypedArray(randomNumbers)) {
+ nextRandomIndex = %_TypedArrayGetLength(randomNumbers);
+ } else {
+ nextRandomIndex = randomNumbers.length;
+ }
}
return randomNumbers[--nextRandomIndex];
}
-function MathRandomRaw() {
- if (nextRandomIndex <= kRandomNumberStart) {
- randomNumbers = %GenerateRandomNumbers(randomNumbers);
- nextRandomIndex = randomNumbers.length;
- }
- return %_DoubleLo(randomNumbers[--nextRandomIndex]) & 0x3FFFFFFF;
-}
-
-// ES6 draft 09-27-13, section 20.2.2.28.
-function MathSign(x) {
- x = +x;
- if (x > 0) return 1;
- if (x < 0) return -1;
- // -0, 0 or NaN.
- return x;
-}
-
-// ES6 draft 09-27-13, section 20.2.2.5.
-function MathAsinh(x) {
- x = TO_NUMBER(x);
- // Idempotent for NaN, +/-0 and +/-Infinity.
- if (x === 0 || !NUMBER_IS_FINITE(x)) return x;
- if (x > 0) return MathLog(x + %math_sqrt(x * x + 1));
- // This is to prevent numerical errors caused by large negative x.
- return -MathLog(-x + %math_sqrt(x * x + 1));
-}
-
-// ES6 draft 09-27-13, section 20.2.2.3.
-function MathAcosh(x) {
- x = TO_NUMBER(x);
- if (x < 1) return NaN;
- // Idempotent for NaN and +Infinity.
- if (!NUMBER_IS_FINITE(x)) return x;
- return MathLog(x + %math_sqrt(x + 1) * %math_sqrt(x - 1));
-}
-
-// ES6 draft 09-27-13, section 20.2.2.7.
-function MathAtanh(x) {
- x = TO_NUMBER(x);
- // Idempotent for +/-0.
- if (x === 0) return x;
- // Returns NaN for NaN and +/- Infinity.
- if (!NUMBER_IS_FINITE(x)) return NaN;
- return 0.5 * MathLog((1 + x) / (1 - x));
-}
-
-// ES6 draft 09-27-13, section 20.2.2.17.
-function MathHypot(x, y) { // Function length is 2.
- // We may want to introduce fast paths for two arguments and when
- // normalization to avoid overflow is not necessary. For now, we
- // simply assume the general case.
- var length = arguments.length;
- var max = 0;
- for (var i = 0; i < length; i++) {
- var n = MathAbs(arguments[i]);
- if (n > max) max = n;
- arguments[i] = n;
- }
- if (max === INFINITY) return INFINITY;
-
- // Kahan summation to avoid rounding errors.
- // Normalize the numbers to the largest one to avoid overflow.
- if (max === 0) max = 1;
- var sum = 0;
- var compensation = 0;
- for (var i = 0; i < length; i++) {
- var n = arguments[i] / max;
- var summand = n * n - compensation;
- var preliminary = sum + summand;
- compensation = (preliminary - sum) - summand;
- sum = preliminary;
- }
- return %math_sqrt(sum) * max;
-}
-
-// ES6 draft 09-27-13, section 20.2.2.9.
-// Cube root approximation, refer to: http://metamerist.com/cbrt/cbrt.htm
-// Using initial approximation adapted from Kahan's cbrt and 4 iterations
-// of Newton's method.
-function MathCbrt(x) {
- x = TO_NUMBER(x);
- if (x == 0 || !NUMBER_IS_FINITE(x)) return x;
- return x >= 0 ? CubeRoot(x) : -CubeRoot(-x);
-}
-
-macro NEWTON_ITERATION_CBRT(x, approx)
- (1.0 / 3.0) * (x / (approx * approx) + 2 * approx);
-endmacro
-
-function CubeRoot(x) {
- var approx_hi = %math_floor(%_DoubleHi(x) / 3) + 0x2A9F7893;
- var approx = %_ConstructDouble(approx_hi | 0, 0);
- approx = NEWTON_ITERATION_CBRT(x, approx);
- approx = NEWTON_ITERATION_CBRT(x, approx);
- approx = NEWTON_ITERATION_CBRT(x, approx);
- return NEWTON_ITERATION_CBRT(x, approx);
-}
-
// -------------------------------------------------------------------
-%InstallToContext([
- "math_pow", MathPowJS,
-]);
-
%AddNamedProperty(GlobalMath, toStringTagSymbol, "Math", READ_ONLY | DONT_ENUM);
-// Set up math constants.
-utils.InstallConstants(GlobalMath, [
- // ECMA-262, section 15.8.1.1.
- "E", 2.7182818284590452354,
- // ECMA-262, section 15.8.1.2.
- "LN10", 2.302585092994046,
- // ECMA-262, section 15.8.1.3.
- "LN2", 0.6931471805599453,
- // ECMA-262, section 15.8.1.4.
- "LOG2E", 1.4426950408889634,
- "LOG10E", 0.4342944819032518,
- "PI", 3.1415926535897932,
- "SQRT1_2", 0.7071067811865476,
- "SQRT2", 1.4142135623730951
-]);
-
// Set up non-enumerable functions of the Math object and
// set their names.
utils.InstallFunctions(GlobalMath, DONT_ENUM, [
"random", MathRandom,
- "abs", MathAbs,
- "exp", MathExp,
- "log", MathLog,
- "atan2", MathAtan2JS,
- "pow", MathPowJS,
- "sign", MathSign,
- "asinh", MathAsinh,
- "acosh", MathAcosh,
- "atanh", MathAtanh,
- "hypot", MathHypot,
- "cbrt", MathCbrt
]);
-%SetForceInlineFlag(MathAbs);
-%SetForceInlineFlag(MathAtan2JS);
%SetForceInlineFlag(MathRandom);
-%SetForceInlineFlag(MathSign);
// -------------------------------------------------------------------
// Exports
utils.Export(function(to) {
- to.MathAbs = MathAbs;
- to.MathExp = MathExp;
- to.IntRandom = MathRandomRaw;
+ to.MathRandom = MathRandom;
});
})
diff --git a/deps/v8/src/js/messages.js b/deps/v8/src/js/messages.js
index 4529981c30..3ea2bef5ad 100644
--- a/deps/v8/src/js/messages.js
+++ b/deps/v8/src/js/messages.js
@@ -11,156 +11,10 @@
// -------------------------------------------------------------------
// Imports
-var ArrayJoin;
-var Bool16x8ToString;
-var Bool32x4ToString;
-var Bool8x16ToString;
-var callSiteReceiverSymbol =
- utils.ImportNow("call_site_receiver_symbol");
-var callSiteFunctionSymbol =
- utils.ImportNow("call_site_function_symbol");
-var callSitePositionSymbol =
- utils.ImportNow("call_site_position_symbol");
-var callSiteStrictSymbol =
- utils.ImportNow("call_site_strict_symbol");
-var Float32x4ToString;
-var formattedStackTraceSymbol =
- utils.ImportNow("formatted_stack_trace_symbol");
-var GlobalObject = global.Object;
-var Int16x8ToString;
-var Int32x4ToString;
-var Int8x16ToString;
-var InternalArray = utils.InternalArray;
-var internalErrorSymbol = utils.ImportNow("internal_error_symbol");
-var ObjectDefineProperty;
-var ObjectHasOwnProperty;
-var ObjectToString = utils.ImportNow("object_to_string");
var Script = utils.ImportNow("Script");
-var stackTraceSymbol = utils.ImportNow("stack_trace_symbol");
-var StringCharAt;
-var StringIndexOf;
-var StringSubstring;
-var SymbolToString;
-var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
-var Uint16x8ToString;
-var Uint32x4ToString;
-var Uint8x16ToString;
-
-utils.Import(function(from) {
- ArrayJoin = from.ArrayJoin;
- Bool16x8ToString = from.Bool16x8ToString;
- Bool32x4ToString = from.Bool32x4ToString;
- Bool8x16ToString = from.Bool8x16ToString;
- Float32x4ToString = from.Float32x4ToString;
- Int16x8ToString = from.Int16x8ToString;
- Int32x4ToString = from.Int32x4ToString;
- Int8x16ToString = from.Int8x16ToString;
- ObjectDefineProperty = from.ObjectDefineProperty;
- ObjectHasOwnProperty = from.ObjectHasOwnProperty;
- StringCharAt = from.StringCharAt;
- StringIndexOf = from.StringIndexOf;
- StringSubstring = from.StringSubstring;
- SymbolToString = from.SymbolToString;
- Uint16x8ToString = from.Uint16x8ToString;
- Uint32x4ToString = from.Uint32x4ToString;
- Uint8x16ToString = from.Uint8x16ToString;
-});
// -------------------------------------------------------------------
-
-var GlobalError;
-var GlobalTypeError;
-var GlobalRangeError;
-var GlobalURIError;
-var GlobalSyntaxError;
-var GlobalReferenceError;
-var GlobalEvalError;
-
-
-function NoSideEffectsObjectToString() {
- if (IS_UNDEFINED(this)) return "[object Undefined]";
- if (IS_NULL(this)) return "[object Null]";
- var O = TO_OBJECT(this);
- var builtinTag = %_ClassOf(O);
- var tag = %GetDataProperty(O, toStringTagSymbol);
- if (!IS_STRING(tag)) {
- tag = builtinTag;
- }
- return `[object ${tag}]`;
-}
-
-function IsErrorObject(obj) {
- return HAS_PRIVATE(obj, stackTraceSymbol);
-}
-
-function NoSideEffectsErrorToString() {
- var name = %GetDataProperty(this, "name");
- var message = %GetDataProperty(this, "message");
- name = IS_UNDEFINED(name) ? "Error" : NoSideEffectsToString(name);
- message = IS_UNDEFINED(message) ? "" : NoSideEffectsToString(message);
- if (name == "") return message;
- if (message == "") return name;
- return `${name}: ${message}`;
-}
-
-function NoSideEffectsToString(obj) {
- if (IS_STRING(obj)) return obj;
- if (IS_NUMBER(obj)) return %_NumberToString(obj);
- if (IS_BOOLEAN(obj)) return obj ? 'true' : 'false';
- if (IS_UNDEFINED(obj)) return 'undefined';
- if (IS_NULL(obj)) return 'null';
- if (IS_FUNCTION(obj)) {
- var str = %FunctionToString(obj);
- if (str.length > 128) {
- str = %_SubString(str, 0, 111) + "...<omitted>..." +
- %_SubString(str, str.length - 2, str.length);
- }
- return str;
- }
- if (IS_SYMBOL(obj)) return %_Call(SymbolToString, obj);
- if (IS_SIMD_VALUE(obj)) {
- switch (typeof(obj)) {
- case 'float32x4': return %_Call(Float32x4ToString, obj);
- case 'int32x4': return %_Call(Int32x4ToString, obj);
- case 'int16x8': return %_Call(Int16x8ToString, obj);
- case 'int8x16': return %_Call(Int8x16ToString, obj);
- case 'uint32x4': return %_Call(Uint32x4ToString, obj);
- case 'uint16x8': return %_Call(Uint16x8ToString, obj);
- case 'uint8x16': return %_Call(Uint8x16ToString, obj);
- case 'bool32x4': return %_Call(Bool32x4ToString, obj);
- case 'bool16x8': return %_Call(Bool16x8ToString, obj);
- case 'bool8x16': return %_Call(Bool8x16ToString, obj);
- }
- }
-
- if (IS_RECEIVER(obj)) {
- // When internally formatting error objects, use a side-effects-free version
- // of Error.prototype.toString independent of the actually installed
- // toString method.
- if (IsErrorObject(obj) ||
- %GetDataProperty(obj, "toString") === ErrorToString) {
- return %_Call(NoSideEffectsErrorToString, obj);
- }
-
- if (%GetDataProperty(obj, "toString") === ObjectToString) {
- var constructor = %GetDataProperty(obj, "constructor");
- if (IS_FUNCTION(constructor)) {
- var constructor_name = %FunctionGetName(constructor);
- if (constructor_name != "") return `#<${constructor_name}>`;
- }
- }
- }
-
- return %_Call(NoSideEffectsObjectToString, obj);
-}
-
-
-function MakeGenericError(constructor, type, arg0, arg1, arg2) {
- var error = new constructor(FormatMessage(type, arg0, arg1, arg2));
- error[internalErrorSymbol] = true;
- return error;
-}
-
+// Script
/**
* Set up the Script function and constructor.
@@ -168,255 +22,24 @@ function MakeGenericError(constructor, type, arg0, arg1, arg2) {
%FunctionSetInstanceClassName(Script, 'Script');
%AddNamedProperty(Script.prototype, 'constructor', Script,
DONT_ENUM | DONT_DELETE | READ_ONLY);
-%SetCode(Script, function(x) {
- // Script objects can only be created by the VM.
- throw MakeError(kUnsupported);
-});
-
-
-// Helper functions; called from the runtime system.
-function FormatMessage(type, arg0, arg1, arg2) {
- var arg0 = NoSideEffectsToString(arg0);
- var arg1 = NoSideEffectsToString(arg1);
- var arg2 = NoSideEffectsToString(arg2);
- try {
- return %FormatMessageString(type, arg0, arg1, arg2);
- } catch (e) {
- return "<error>";
- }
-}
-
-
-function GetLineNumber(message) {
- var start_position = %MessageGetStartPosition(message);
- if (start_position == -1) return kNoLineNumberInfo;
- var script = %MessageGetScript(message);
- var location = script.locationFromPosition(start_position, true);
- if (location == null) return kNoLineNumberInfo;
- return location.line + 1;
-}
-
-
-//Returns the offset of the given position within the containing line.
-function GetColumnNumber(message) {
- var script = %MessageGetScript(message);
- var start_position = %MessageGetStartPosition(message);
- var location = script.locationFromPosition(start_position, true);
- if (location == null) return -1;
- return location.column;
-}
-
-
-// Returns the source code line containing the given source
-// position, or the empty string if the position is invalid.
-function GetSourceLine(message) {
- var script = %MessageGetScript(message);
- var start_position = %MessageGetStartPosition(message);
- var location = script.locationFromPosition(start_position, true);
- if (location == null) return "";
- return location.sourceText();
-}
/**
- * Find a line number given a specific source position.
- * @param {number} position The source position.
- * @return {number} 0 if input too small, -1 if input too large,
- else the line number.
- */
-function ScriptLineFromPosition(position) {
- var lower = 0;
- var upper = this.lineCount() - 1;
- var line_ends = this.line_ends;
-
- // We'll never find invalid positions so bail right away.
- if (position > line_ends[upper]) {
- return -1;
- }
-
- // This means we don't have to safe-guard indexing line_ends[i - 1].
- if (position <= line_ends[0]) {
- return 0;
- }
-
- // Binary search to find line # from position range.
- while (upper >= 1) {
- var i = (lower + upper) >> 1;
-
- if (position > line_ends[i]) {
- lower = i + 1;
- } else if (position <= line_ends[i - 1]) {
- upper = i - 1;
- } else {
- return i;
- }
- }
-
- return -1;
-}
-
-/**
* Get information on a specific source position.
+ * Returns an object with the following following properties:
+ * script : script object for the source
+ * line : source line number
+ * column : source column within the line
+ * position : position within the source
+ * sourceText : a string containing the current line
* @param {number} position The source position
* @param {boolean} include_resource_offset Set to true to have the resource
* offset added to the location
- * @return {SourceLocation}
- * If line is negative or not in the source null is returned.
+ * @return If line is negative or not in the source null is returned.
*/
function ScriptLocationFromPosition(position,
include_resource_offset) {
- var line = this.lineFromPosition(position);
- if (line == -1) return null;
-
- // Determine start, end and column.
- var line_ends = this.line_ends;
- var start = line == 0 ? 0 : line_ends[line - 1] + 1;
- var end = line_ends[line];
- if (end > 0 && %_Call(StringCharAt, this.source, end - 1) == '\r') {
- end--;
- }
- var column = position - start;
-
- // Adjust according to the offset within the resource.
- if (include_resource_offset) {
- line += this.line_offset;
- if (line == this.line_offset) {
- column += this.column_offset;
- }
- }
-
- return new SourceLocation(this, position, line, column, start, end);
-}
-
-
-/**
- * Get information on a specific source line and column possibly offset by a
- * fixed source position. This function is used to find a source position from
- * a line and column position. The fixed source position offset is typically
- * used to find a source position in a function based on a line and column in
- * the source for the function alone. The offset passed will then be the
- * start position of the source for the function within the full script source.
- * @param {number} opt_line The line within the source. Default value is 0
- * @param {number} opt_column The column in within the line. Default value is 0
- * @param {number} opt_offset_position The offset from the begining of the
- * source from where the line and column calculation starts.
- * Default value is 0
- * @return {SourceLocation}
- * If line is negative or not in the source null is returned.
- */
-function ScriptLocationFromLine(opt_line, opt_column, opt_offset_position) {
- // Default is the first line in the script. Lines in the script is relative
- // to the offset within the resource.
- var line = 0;
- if (!IS_UNDEFINED(opt_line)) {
- line = opt_line - this.line_offset;
- }
-
- // Default is first column. If on the first line add the offset within the
- // resource.
- var column = opt_column || 0;
- if (line == 0) {
- column -= this.column_offset;
- }
-
- var offset_position = opt_offset_position || 0;
- if (line < 0 || column < 0 || offset_position < 0) return null;
- if (line == 0) {
- return this.locationFromPosition(offset_position + column, false);
- } else {
- // Find the line where the offset position is located.
- var offset_line = this.lineFromPosition(offset_position);
-
- if (offset_line == -1 || offset_line + line >= this.lineCount()) {
- return null;
- }
-
- return this.locationFromPosition(
- this.line_ends[offset_line + line - 1] + 1 + column); // line > 0 here.
- }
-}
-
-
-/**
- * Get a slice of source code from the script. The boundaries for the slice is
- * specified in lines.
- * @param {number} opt_from_line The first line (zero bound) in the slice.
- * Default is 0
- * @param {number} opt_to_column The last line (zero bound) in the slice (non
- * inclusive). Default is the number of lines in the script
- * @return {SourceSlice} The source slice or null of the parameters where
- * invalid
- */
-function ScriptSourceSlice(opt_from_line, opt_to_line) {
- var from_line = IS_UNDEFINED(opt_from_line) ? this.line_offset
- : opt_from_line;
- var to_line = IS_UNDEFINED(opt_to_line) ? this.line_offset + this.lineCount()
- : opt_to_line;
-
- // Adjust according to the offset within the resource.
- from_line -= this.line_offset;
- to_line -= this.line_offset;
- if (from_line < 0) from_line = 0;
- if (to_line > this.lineCount()) to_line = this.lineCount();
-
- // Check parameters.
- if (from_line >= this.lineCount() ||
- to_line < 0 ||
- from_line > to_line) {
- return null;
- }
-
- var line_ends = this.line_ends;
- var from_position = from_line == 0 ? 0 : line_ends[from_line - 1] + 1;
- var to_position = to_line == 0 ? 0 : line_ends[to_line - 1] + 1;
-
- // Return a source slice with line numbers re-adjusted to the resource.
- return new SourceSlice(this,
- from_line + this.line_offset,
- to_line + this.line_offset,
- from_position, to_position);
-}
-
-
-function ScriptSourceLine(opt_line) {
- // Default is the first line in the script. Lines in the script are relative
- // to the offset within the resource.
- var line = 0;
- if (!IS_UNDEFINED(opt_line)) {
- line = opt_line - this.line_offset;
- }
-
- // Check parameter.
- if (line < 0 || this.lineCount() <= line) {
- return null;
- }
-
- // Return the source line.
- var line_ends = this.line_ends;
- var start = line == 0 ? 0 : line_ends[line - 1] + 1;
- var end = line_ends[line];
- return %_Call(StringSubstring, this.source, start, end);
-}
-
-
-/**
- * Returns the number of source lines.
- * @return {number}
- * Number of source lines.
- */
-function ScriptLineCount() {
- // Return number of source lines.
- return this.line_ends.length;
-}
-
-
-/**
- * Returns the position of the nth line end.
- * @return {number}
- * Zero-based position of the nth line end in the script.
- */
-function ScriptLineEnd(n) {
- return this.line_ends[n];
+ return %ScriptPositionInfo(this, position, !!include_resource_offset);
}
@@ -431,6 +54,7 @@ function ScriptLineEnd(n) {
* deprecated //@ sourceURL comment otherwise.
*/
function ScriptNameOrSourceURL() {
+ // Keep in sync with Script::GetNameOrSourceURL.
if (this.source_url) return this.source_url;
return this.name;
}
@@ -441,589 +65,12 @@ utils.SetUpLockedPrototype(Script, [
"name",
"source_url",
"source_mapping_url",
- "line_ends",
"line_offset",
"column_offset"
], [
- "lineFromPosition", ScriptLineFromPosition,
"locationFromPosition", ScriptLocationFromPosition,
- "locationFromLine", ScriptLocationFromLine,
- "sourceSlice", ScriptSourceSlice,
- "sourceLine", ScriptSourceLine,
- "lineCount", ScriptLineCount,
"nameOrSourceURL", ScriptNameOrSourceURL,
- "lineEnd", ScriptLineEnd
]
);
-
-/**
- * Class for source location. A source location is a position within some
- * source with the following properties:
- * script : script object for the source
- * line : source line number
- * column : source column within the line
- * position : position within the source
- * start : position of start of source context (inclusive)
- * end : position of end of source context (not inclusive)
- * Source text for the source context is the character interval
- * [start, end[. In most cases end will point to a newline character.
- * It might point just past the final position of the source if the last
- * source line does not end with a newline character.
- * @param {Script} script The Script object for which this is a location
- * @param {number} position Source position for the location
- * @param {number} line The line number for the location
- * @param {number} column The column within the line for the location
- * @param {number} start Source position for start of source context
- * @param {number} end Source position for end of source context
- * @constructor
- */
-function SourceLocation(script, position, line, column, start, end) {
- this.script = script;
- this.position = position;
- this.line = line;
- this.column = column;
- this.start = start;
- this.end = end;
-}
-
-
-/**
- * Get the source text for a SourceLocation
- * @return {String}
- * Source text for this location.
- */
-function SourceLocationSourceText() {
- return %_Call(StringSubstring, this.script.source, this.start, this.end);
-}
-
-
-utils.SetUpLockedPrototype(SourceLocation,
- ["script", "position", "line", "column", "start", "end"],
- ["sourceText", SourceLocationSourceText]
-);
-
-
-/**
- * Class for a source slice. A source slice is a part of a script source with
- * the following properties:
- * script : script object for the source
- * from_line : line number for the first line in the slice
- * to_line : source line number for the last line in the slice
- * from_position : position of the first character in the slice
- * to_position : position of the last character in the slice
- * The to_line and to_position are not included in the slice, that is the lines
- * in the slice are [from_line, to_line[. Likewise the characters in the slice
- * are [from_position, to_position[.
- * @param {Script} script The Script object for the source slice
- * @param {number} from_line
- * @param {number} to_line
- * @param {number} from_position
- * @param {number} to_position
- * @constructor
- */
-function SourceSlice(script, from_line, to_line, from_position, to_position) {
- this.script = script;
- this.from_line = from_line;
- this.to_line = to_line;
- this.from_position = from_position;
- this.to_position = to_position;
-}
-
-/**
- * Get the source text for a SourceSlice
- * @return {String} Source text for this slice. The last line will include
- * the line terminating characters (if any)
- */
-function SourceSliceSourceText() {
- return %_Call(StringSubstring,
- this.script.source,
- this.from_position,
- this.to_position);
-}
-
-utils.SetUpLockedPrototype(SourceSlice,
- ["script", "from_line", "to_line", "from_position", "to_position"],
- ["sourceText", SourceSliceSourceText]
-);
-
-
-function GetStackTraceLine(recv, fun, pos, isGlobal) {
- return new CallSite(recv, fun, pos, false).toString();
-}
-
-// ----------------------------------------------------------------------------
-// Error implementation
-
-function CallSite(receiver, fun, pos, strict_mode) {
- if (!IS_FUNCTION(fun)) {
- throw MakeTypeError(kCallSiteExpectsFunction, typeof fun);
- }
-
- if (IS_UNDEFINED(new.target)) {
- return new CallSite(receiver, fun, pos, strict_mode);
- }
-
- SET_PRIVATE(this, callSiteReceiverSymbol, receiver);
- SET_PRIVATE(this, callSiteFunctionSymbol, fun);
- SET_PRIVATE(this, callSitePositionSymbol, TO_INT32(pos));
- SET_PRIVATE(this, callSiteStrictSymbol, TO_BOOLEAN(strict_mode));
-}
-
-function CheckCallSite(obj, name) {
- if (!IS_RECEIVER(obj) || !HAS_PRIVATE(obj, callSiteFunctionSymbol)) {
- throw MakeTypeError(kCallSiteMethod, name);
- }
-}
-
-function CallSiteGetThis() {
- CheckCallSite(this, "getThis");
- return GET_PRIVATE(this, callSiteStrictSymbol)
- ? UNDEFINED : GET_PRIVATE(this, callSiteReceiverSymbol);
-}
-
-function CallSiteGetFunction() {
- CheckCallSite(this, "getFunction");
- return GET_PRIVATE(this, callSiteStrictSymbol)
- ? UNDEFINED : GET_PRIVATE(this, callSiteFunctionSymbol);
-}
-
-function CallSiteGetPosition() {
- CheckCallSite(this, "getPosition");
- return GET_PRIVATE(this, callSitePositionSymbol);
-}
-
-function CallSiteGetTypeName() {
- CheckCallSite(this, "getTypeName");
- return GetTypeName(GET_PRIVATE(this, callSiteReceiverSymbol), false);
-}
-
-function CallSiteIsToplevel() {
- CheckCallSite(this, "isTopLevel");
- return %CallSiteIsToplevelRT(this);
-}
-
-function CallSiteIsEval() {
- CheckCallSite(this, "isEval");
- return %CallSiteIsEvalRT(this);
-}
-
-function CallSiteGetEvalOrigin() {
- CheckCallSite(this, "getEvalOrigin");
- var script = %FunctionGetScript(GET_PRIVATE(this, callSiteFunctionSymbol));
- return FormatEvalOrigin(script);
-}
-
-function CallSiteGetScriptNameOrSourceURL() {
- CheckCallSite(this, "getScriptNameOrSourceURL");
- return %CallSiteGetScriptNameOrSourceUrlRT(this);
-}
-
-function CallSiteGetFunctionName() {
- // See if the function knows its own name
- CheckCallSite(this, "getFunctionName");
- return %CallSiteGetFunctionNameRT(this);
-}
-
-function CallSiteGetMethodName() {
- // See if we can find a unique property on the receiver that holds
- // this function.
- CheckCallSite(this, "getMethodName");
- return %CallSiteGetMethodNameRT(this);
-}
-
-function CallSiteGetFileName() {
- CheckCallSite(this, "getFileName");
- return %CallSiteGetFileNameRT(this);
-}
-
-function CallSiteGetLineNumber() {
- CheckCallSite(this, "getLineNumber");
- return %CallSiteGetLineNumberRT(this);
-}
-
-function CallSiteGetColumnNumber() {
- CheckCallSite(this, "getColumnNumber");
- return %CallSiteGetColumnNumberRT(this);
-}
-
-function CallSiteIsNative() {
- CheckCallSite(this, "isNative");
- return %CallSiteIsNativeRT(this);
-}
-
-function CallSiteIsConstructor() {
- CheckCallSite(this, "isConstructor");
- return %CallSiteIsConstructorRT(this);
-}
-
-function CallSiteToString() {
- var fileName;
- var fileLocation = "";
- if (this.isNative()) {
- fileLocation = "native";
- } else {
- fileName = this.getScriptNameOrSourceURL();
- if (!fileName && this.isEval()) {
- fileLocation = this.getEvalOrigin();
- fileLocation += ", "; // Expecting source position to follow.
- }
-
- if (fileName) {
- fileLocation += fileName;
- } else {
- // Source code does not originate from a file and is not native, but we
- // can still get the source position inside the source string, e.g. in
- // an eval string.
- fileLocation += "<anonymous>";
- }
- var lineNumber = this.getLineNumber();
- if (lineNumber != null) {
- fileLocation += ":" + lineNumber;
- var columnNumber = this.getColumnNumber();
- if (columnNumber) {
- fileLocation += ":" + columnNumber;
- }
- }
- }
-
- var line = "";
- var functionName = this.getFunctionName();
- var addSuffix = true;
- var isConstructor = this.isConstructor();
- var isMethodCall = !(this.isToplevel() || isConstructor);
- if (isMethodCall) {
- var typeName = GetTypeName(GET_PRIVATE(this, callSiteReceiverSymbol), true);
- var methodName = this.getMethodName();
- if (functionName) {
- if (typeName && %_Call(StringIndexOf, functionName, typeName) != 0) {
- line += typeName + ".";
- }
- line += functionName;
- if (methodName &&
- (%_Call(StringIndexOf, functionName, "." + methodName) !=
- functionName.length - methodName.length - 1)) {
- line += " [as " + methodName + "]";
- }
- } else {
- line += typeName + "." + (methodName || "<anonymous>");
- }
- } else if (isConstructor) {
- line += "new " + (functionName || "<anonymous>");
- } else if (functionName) {
- line += functionName;
- } else {
- line += fileLocation;
- addSuffix = false;
- }
- if (addSuffix) {
- line += " (" + fileLocation + ")";
- }
- return line;
-}
-
-utils.SetUpLockedPrototype(CallSite, ["receiver", "fun", "pos"], [
- "getThis", CallSiteGetThis,
- "getTypeName", CallSiteGetTypeName,
- "isToplevel", CallSiteIsToplevel,
- "isEval", CallSiteIsEval,
- "getEvalOrigin", CallSiteGetEvalOrigin,
- "getScriptNameOrSourceURL", CallSiteGetScriptNameOrSourceURL,
- "getFunction", CallSiteGetFunction,
- "getFunctionName", CallSiteGetFunctionName,
- "getMethodName", CallSiteGetMethodName,
- "getFileName", CallSiteGetFileName,
- "getLineNumber", CallSiteGetLineNumber,
- "getColumnNumber", CallSiteGetColumnNumber,
- "isNative", CallSiteIsNative,
- "getPosition", CallSiteGetPosition,
- "isConstructor", CallSiteIsConstructor,
- "toString", CallSiteToString
-]);
-
-
-function FormatEvalOrigin(script) {
- var sourceURL = script.nameOrSourceURL();
- if (sourceURL) {
- return sourceURL;
- }
-
- var eval_origin = "eval at ";
- if (script.eval_from_function_name) {
- eval_origin += script.eval_from_function_name;
- } else {
- eval_origin += "<anonymous>";
- }
-
- var eval_from_script = script.eval_from_script;
- if (eval_from_script) {
- if (eval_from_script.compilation_type == COMPILATION_TYPE_EVAL) {
- // eval script originated from another eval.
- eval_origin += " (" + FormatEvalOrigin(eval_from_script) + ")";
- } else {
- // eval script originated from "real" source.
- if (eval_from_script.name) {
- eval_origin += " (" + eval_from_script.name;
- var location = eval_from_script.locationFromPosition(
- script.eval_from_script_position, true);
- if (location) {
- eval_origin += ":" + (location.line + 1);
- eval_origin += ":" + (location.column + 1);
- }
- eval_origin += ")";
- } else {
- eval_origin += " (unknown source)";
- }
- }
- }
-
- return eval_origin;
-}
-
-
-function FormatErrorString(error) {
- try {
- return %_Call(ErrorToString, error);
- } catch (e) {
- try {
- return "<error: " + e + ">";
- } catch (ee) {
- return "<error>";
- }
- }
-}
-
-
-function GetStackFrames(raw_stack) {
- var internal_raw_stack = new InternalArray();
- %MoveArrayContents(raw_stack, internal_raw_stack);
- var frames = new InternalArray();
- var sloppy_frames = internal_raw_stack[0];
- for (var i = 1; i < internal_raw_stack.length; i += 4) {
- var recv = internal_raw_stack[i];
- var fun = internal_raw_stack[i + 1];
- var code = internal_raw_stack[i + 2];
- var pc = internal_raw_stack[i + 3];
- var pos = %_IsSmi(code) ? code : %FunctionGetPositionForOffset(code, pc);
- sloppy_frames--;
- frames.push(new CallSite(recv, fun, pos, (sloppy_frames < 0)));
- }
- return frames;
-}
-
-
-// Flag to prevent recursive call of Error.prepareStackTrace.
-var formatting_custom_stack_trace = false;
-
-
-function FormatStackTrace(obj, raw_stack) {
- var frames = GetStackFrames(raw_stack);
- if (IS_FUNCTION(GlobalError.prepareStackTrace) &&
- !formatting_custom_stack_trace) {
- var array = [];
- %MoveArrayContents(frames, array);
- formatting_custom_stack_trace = true;
- var stack_trace = UNDEFINED;
- try {
- stack_trace = GlobalError.prepareStackTrace(obj, array);
- } catch (e) {
- throw e; // The custom formatting function threw. Rethrow.
- } finally {
- formatting_custom_stack_trace = false;
- }
- return stack_trace;
- }
-
- var lines = new InternalArray();
- lines.push(FormatErrorString(obj));
- for (var i = 0; i < frames.length; i++) {
- var frame = frames[i];
- var line;
- try {
- line = frame.toString();
- } catch (e) {
- try {
- line = "<error: " + e + ">";
- } catch (ee) {
- // Any code that reaches this point is seriously nasty!
- line = "<error>";
- }
- }
- lines.push(" at " + line);
- }
- return %_Call(ArrayJoin, lines, "\n");
-}
-
-
-function GetTypeName(receiver, requireConstructor) {
- if (IS_NULL_OR_UNDEFINED(receiver)) return null;
- if (IS_PROXY(receiver)) return "Proxy";
-
- var constructor = %GetDataProperty(TO_OBJECT(receiver), "constructor");
- if (!IS_FUNCTION(constructor)) {
- return requireConstructor ? null : %_Call(NoSideEffectsToString, receiver);
- }
- return %FunctionGetName(constructor);
-}
-
-
-// Format the stack trace if not yet done, and return it.
-// Cache the formatted stack trace on the holder.
-var StackTraceGetter = function() {
- var formatted_stack_trace = UNDEFINED;
- var holder = this;
- while (holder) {
- var formatted_stack_trace =
- GET_PRIVATE(holder, formattedStackTraceSymbol);
- if (IS_UNDEFINED(formatted_stack_trace)) {
- // No formatted stack trace available.
- var stack_trace = GET_PRIVATE(holder, stackTraceSymbol);
- if (IS_UNDEFINED(stack_trace)) {
- // Neither formatted nor structured stack trace available.
- // Look further up the prototype chain.
- holder = %_GetPrototype(holder);
- continue;
- }
- formatted_stack_trace = FormatStackTrace(holder, stack_trace);
- SET_PRIVATE(holder, stackTraceSymbol, UNDEFINED);
- SET_PRIVATE(holder, formattedStackTraceSymbol, formatted_stack_trace);
- }
- return formatted_stack_trace;
- }
- return UNDEFINED;
-};
-
-
-// If the receiver equals the holder, set the formatted stack trace that the
-// getter returns.
-var StackTraceSetter = function(v) {
- if (IsErrorObject(this)) {
- SET_PRIVATE(this, stackTraceSymbol, UNDEFINED);
- SET_PRIVATE(this, formattedStackTraceSymbol, v);
- }
-};
-
-
-// Use a dummy function since we do not actually want to capture a stack trace
-// when constructing the initial Error prototytpes.
-var captureStackTrace = function() {};
-
-
-// Set up special error type constructors.
-function SetUpError(error_function) {
- %FunctionSetInstanceClassName(error_function, 'Error');
- var name = error_function.name;
- var prototype = new GlobalObject();
- if (name !== 'Error') {
- %InternalSetPrototype(error_function, GlobalError);
- %InternalSetPrototype(prototype, GlobalError.prototype);
- }
- %FunctionSetPrototype(error_function, prototype);
-
- %AddNamedProperty(error_function.prototype, 'name', name, DONT_ENUM);
- %AddNamedProperty(error_function.prototype, 'message', '', DONT_ENUM);
- %AddNamedProperty(
- error_function.prototype, 'constructor', error_function, DONT_ENUM);
-
- %SetCode(error_function, function(m) {
- if (IS_UNDEFINED(new.target)) return new error_function(m);
-
- try { captureStackTrace(this, error_function); } catch (e) { }
- // Define all the expected properties directly on the error
- // object. This avoids going through getters and setters defined
- // on prototype objects.
- if (!IS_UNDEFINED(m)) {
- %AddNamedProperty(this, 'message', TO_STRING(m), DONT_ENUM);
- }
- });
-
- %SetNativeFlag(error_function);
- return error_function;
-};
-
-GlobalError = SetUpError(global.Error);
-GlobalEvalError = SetUpError(global.EvalError);
-GlobalRangeError = SetUpError(global.RangeError);
-GlobalReferenceError = SetUpError(global.ReferenceError);
-GlobalSyntaxError = SetUpError(global.SyntaxError);
-GlobalTypeError = SetUpError(global.TypeError);
-GlobalURIError = SetUpError(global.URIError);
-
-utils.InstallFunctions(GlobalError.prototype, DONT_ENUM,
- ['toString', ErrorToString]);
-
-function ErrorToString() {
- if (!IS_RECEIVER(this)) {
- throw MakeTypeError(kCalledOnNonObject, "Error.prototype.toString");
- }
-
- var name = this.name;
- name = IS_UNDEFINED(name) ? "Error" : TO_STRING(name);
-
- var message = this.message;
- message = IS_UNDEFINED(message) ? "" : TO_STRING(message);
-
- if (name == "") return message;
- if (message == "") return name;
- return `${name}: ${message}`
-}
-
-function MakeError(type, arg0, arg1, arg2) {
- return MakeGenericError(GlobalError, type, arg0, arg1, arg2);
-}
-
-function MakeRangeError(type, arg0, arg1, arg2) {
- return MakeGenericError(GlobalRangeError, type, arg0, arg1, arg2);
-}
-
-function MakeSyntaxError(type, arg0, arg1, arg2) {
- return MakeGenericError(GlobalSyntaxError, type, arg0, arg1, arg2);
-}
-
-function MakeTypeError(type, arg0, arg1, arg2) {
- return MakeGenericError(GlobalTypeError, type, arg0, arg1, arg2);
-}
-
-function MakeURIError() {
- return MakeGenericError(GlobalURIError, kURIMalformed);
-}
-
-// Boilerplate for exceptions for stack overflows. Used from
-// Isolate::StackOverflow().
-var StackOverflowBoilerplate = MakeRangeError(kStackOverflow);
-utils.InstallGetterSetter(StackOverflowBoilerplate, 'stack',
- StackTraceGetter, StackTraceSetter)
-
-// Define actual captureStackTrace function after everything has been set up.
-captureStackTrace = function captureStackTrace(obj, cons_opt) {
- // Define accessors first, as this may fail and throw.
- ObjectDefineProperty(obj, 'stack', { get: StackTraceGetter,
- set: StackTraceSetter,
- configurable: true });
- %CollectStackTrace(obj, cons_opt ? cons_opt : captureStackTrace);
-};
-
-GlobalError.captureStackTrace = captureStackTrace;
-
-%InstallToContext([
- "get_stack_trace_line_fun", GetStackTraceLine,
- "make_error_function", MakeGenericError,
- "make_range_error", MakeRangeError,
- "make_type_error", MakeTypeError,
- "message_get_column_number", GetColumnNumber,
- "message_get_line_number", GetLineNumber,
- "message_get_source_line", GetSourceLine,
- "no_side_effects_to_string_fun", NoSideEffectsToString,
- "stack_overflow_boilerplate", StackOverflowBoilerplate,
-]);
-
-utils.Export(function(to) {
- to.ErrorToString = ErrorToString;
- to.MakeError = MakeError;
- to.MakeRangeError = MakeRangeError;
- to.MakeSyntaxError = MakeSyntaxError;
- to.MakeTypeError = MakeTypeError;
- to.MakeURIError = MakeURIError;
-});
-
});
diff --git a/deps/v8/src/js/object-observe.js b/deps/v8/src/js/object-observe.js
deleted file mode 100644
index 5e256bf0bb..0000000000
--- a/deps/v8/src/js/object-observe.js
+++ /dev/null
@@ -1,717 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GetHash;
-var GlobalArray = global.Array;
-var GlobalObject = global.Object;
-var InternalArray = utils.InternalArray;
-var MakeTypeError;
-
-utils.Import(function(from) {
- GetHash = from.GetHash;
- MakeTypeError = from.MakeTypeError;
-});
-
-// -------------------------------------------------------------------
-
-// Overview:
-//
-// This file contains all of the routing and accounting for Object.observe.
-// User code will interact with these mechanisms via the Object.observe APIs
-// and, as a side effect of mutation objects which are observed. The V8 runtime
-// (both C++ and JS) will interact with these mechanisms primarily by enqueuing
-// proper change records for objects which were mutated. The Object.observe
-// routing and accounting consists primarily of three participants
-//
-// 1) ObjectInfo. This represents the observed state of a given object. It
-// records what callbacks are observing the object, with what options, and
-// what "change types" are in progress on the object (i.e. via
-// notifier.performChange).
-//
-// 2) CallbackInfo. This represents a callback used for observation. It holds
-// the records which must be delivered to the callback, as well as the global
-// priority of the callback (which determines delivery order between
-// callbacks).
-//
-// 3) observationState.pendingObservers. This is the set of observers which
-// have change records which must be delivered. During "normal" delivery
-// (i.e. not Object.deliverChangeRecords), this is the mechanism by which
-// callbacks are invoked in the proper order until there are no more
-// change records pending to a callback.
-//
-// Note that in order to reduce allocation and processing costs, the
-// implementation of (1) and (2) have "optimized" states which represent
-// common cases which can be handled more efficiently.
-
-var observationState;
-
-var notifierPrototype = {};
-
-// We have to wait until after bootstrapping to grab a reference to the
-// observationState object, since it's not possible to serialize that
-// reference into the snapshot.
-function GetObservationStateJS() {
- if (IS_UNDEFINED(observationState)) {
- observationState = %GetObservationState();
- }
-
- // TODO(adamk): Consider moving this code into heap.cc
- if (IS_UNDEFINED(observationState.callbackInfoMap)) {
- observationState.callbackInfoMap = %ObservationWeakMapCreate();
- observationState.objectInfoMap = %ObservationWeakMapCreate();
- observationState.notifierObjectInfoMap = %ObservationWeakMapCreate();
- observationState.pendingObservers = null;
- observationState.nextCallbackPriority = 0;
- observationState.lastMicrotaskId = 0;
- }
-
- return observationState;
-}
-
-
-function GetPendingObservers() {
- return GetObservationStateJS().pendingObservers;
-}
-
-
-function SetPendingObservers(pendingObservers) {
- GetObservationStateJS().pendingObservers = pendingObservers;
-}
-
-
-function GetNextCallbackPriority() {
- return GetObservationStateJS().nextCallbackPriority++;
-}
-
-
-function nullProtoObject() {
- return { __proto__: null };
-}
-
-
-function TypeMapCreate() {
- return nullProtoObject();
-}
-
-
-function TypeMapAddType(typeMap, type, ignoreDuplicate) {
- typeMap[type] = ignoreDuplicate ? 1 : (typeMap[type] || 0) + 1;
-}
-
-
-function TypeMapRemoveType(typeMap, type) {
- typeMap[type]--;
-}
-
-
-function TypeMapCreateFromList(typeList, length) {
- var typeMap = TypeMapCreate();
- for (var i = 0; i < length; i++) {
- TypeMapAddType(typeMap, typeList[i], true);
- }
- return typeMap;
-}
-
-
-function TypeMapHasType(typeMap, type) {
- return !!typeMap[type];
-}
-
-
-function TypeMapIsDisjointFrom(typeMap1, typeMap2) {
- if (!typeMap1 || !typeMap2)
- return true;
-
- for (var type in typeMap1) {
- if (TypeMapHasType(typeMap1, type) && TypeMapHasType(typeMap2, type))
- return false;
- }
-
- return true;
-}
-
-
-var defaultAcceptTypes = (function() {
- var defaultTypes = [
- 'add',
- 'update',
- 'delete',
- 'setPrototype',
- 'reconfigure',
- 'preventExtensions'
- ];
- return TypeMapCreateFromList(defaultTypes, defaultTypes.length);
-})();
-
-
-// An Observer is a registration to observe an object by a callback with
-// a given set of accept types. If the set of accept types is the default
-// set for Object.observe, the observer is represented as a direct reference
-// to the callback. An observer never changes its accept types and thus never
-// needs to "normalize".
-function ObserverCreate(callback, acceptList) {
- if (IS_UNDEFINED(acceptList))
- return callback;
- var observer = nullProtoObject();
- observer.callback = callback;
- observer.accept = acceptList;
- return observer;
-}
-
-
-function ObserverGetCallback(observer) {
- return IS_CALLABLE(observer) ? observer : observer.callback;
-}
-
-
-function ObserverGetAcceptTypes(observer) {
- return IS_CALLABLE(observer) ? defaultAcceptTypes : observer.accept;
-}
-
-
-function ObserverIsActive(observer, objectInfo) {
- return TypeMapIsDisjointFrom(ObjectInfoGetPerformingTypes(objectInfo),
- ObserverGetAcceptTypes(observer));
-}
-
-
-function ObjectInfoGetOrCreate(object) {
- var objectInfo = ObjectInfoGet(object);
- if (IS_UNDEFINED(objectInfo)) {
- if (!IS_PROXY(object)) {
- %SetIsObserved(object);
- }
- objectInfo = {
- object: object,
- changeObservers: null,
- notifier: null,
- performing: null,
- performingCount: 0,
- };
- %WeakCollectionSet(GetObservationStateJS().objectInfoMap,
- object, objectInfo, GetHash(object));
- }
- return objectInfo;
-}
-
-
-function ObjectInfoGet(object) {
- return %WeakCollectionGet(GetObservationStateJS().objectInfoMap, object,
- GetHash(object));
-}
-
-
-function ObjectInfoGetFromNotifier(notifier) {
- return %WeakCollectionGet(GetObservationStateJS().notifierObjectInfoMap,
- notifier, GetHash(notifier));
-}
-
-
-function ObjectInfoGetNotifier(objectInfo) {
- if (IS_NULL(objectInfo.notifier)) {
- var notifier = { __proto__: notifierPrototype };
- objectInfo.notifier = notifier;
- %WeakCollectionSet(GetObservationStateJS().notifierObjectInfoMap,
- notifier, objectInfo, GetHash(notifier));
- }
-
- return objectInfo.notifier;
-}
-
-
-function ChangeObserversIsOptimized(changeObservers) {
- return IS_CALLABLE(changeObservers) ||
- IS_CALLABLE(changeObservers.callback);
-}
-
-
-// The set of observers on an object is called 'changeObservers'. The first
-// observer is referenced directly via objectInfo.changeObservers. When a second
-// is added, changeObservers "normalizes" to become a mapping of callback
-// priority -> observer and is then stored on objectInfo.changeObservers.
-function ObjectInfoNormalizeChangeObservers(objectInfo) {
- if (ChangeObserversIsOptimized(objectInfo.changeObservers)) {
- var observer = objectInfo.changeObservers;
- var callback = ObserverGetCallback(observer);
- var callbackInfo = CallbackInfoGet(callback);
- var priority = CallbackInfoGetPriority(callbackInfo);
- objectInfo.changeObservers = nullProtoObject();
- objectInfo.changeObservers[priority] = observer;
- }
-}
-
-
-function ObjectInfoAddObserver(objectInfo, callback, acceptList) {
- var callbackInfo = CallbackInfoGetOrCreate(callback);
- var observer = ObserverCreate(callback, acceptList);
-
- if (!objectInfo.changeObservers) {
- objectInfo.changeObservers = observer;
- return;
- }
-
- ObjectInfoNormalizeChangeObservers(objectInfo);
- var priority = CallbackInfoGetPriority(callbackInfo);
- objectInfo.changeObservers[priority] = observer;
-}
-
-function ObjectInfoRemoveObserver(objectInfo, callback) {
- if (!objectInfo.changeObservers)
- return;
-
- if (ChangeObserversIsOptimized(objectInfo.changeObservers)) {
- if (callback === ObserverGetCallback(objectInfo.changeObservers))
- objectInfo.changeObservers = null;
- return;
- }
-
- var callbackInfo = CallbackInfoGet(callback);
- var priority = CallbackInfoGetPriority(callbackInfo);
- objectInfo.changeObservers[priority] = null;
-}
-
-function ObjectInfoHasActiveObservers(objectInfo) {
- if (IS_UNDEFINED(objectInfo) || !objectInfo.changeObservers)
- return false;
-
- if (ChangeObserversIsOptimized(objectInfo.changeObservers))
- return ObserverIsActive(objectInfo.changeObservers, objectInfo);
-
- for (var priority in objectInfo.changeObservers) {
- var observer = objectInfo.changeObservers[priority];
- if (!IS_NULL(observer) && ObserverIsActive(observer, objectInfo))
- return true;
- }
-
- return false;
-}
-
-
-function ObjectInfoAddPerformingType(objectInfo, type) {
- objectInfo.performing = objectInfo.performing || TypeMapCreate();
- TypeMapAddType(objectInfo.performing, type);
- objectInfo.performingCount++;
-}
-
-
-function ObjectInfoRemovePerformingType(objectInfo, type) {
- objectInfo.performingCount--;
- TypeMapRemoveType(objectInfo.performing, type);
-}
-
-
-function ObjectInfoGetPerformingTypes(objectInfo) {
- return objectInfo.performingCount > 0 ? objectInfo.performing : null;
-}
-
-
-function ConvertAcceptListToTypeMap(arg) {
- // We use undefined as a sentinel for the default accept list.
- if (IS_UNDEFINED(arg))
- return arg;
-
- if (!IS_RECEIVER(arg)) throw MakeTypeError(kObserveInvalidAccept);
-
- var len = TO_INTEGER(arg.length);
- if (len < 0) len = 0;
-
- return TypeMapCreateFromList(arg, len);
-}
-
-
-// CallbackInfo's optimized state is just a number which represents its global
-// priority. When a change record must be enqueued for the callback, it
-// normalizes. When delivery clears any pending change records, it re-optimizes.
-function CallbackInfoGet(callback) {
- return %WeakCollectionGet(GetObservationStateJS().callbackInfoMap, callback,
- GetHash(callback));
-}
-
-
-function CallbackInfoSet(callback, callbackInfo) {
- %WeakCollectionSet(GetObservationStateJS().callbackInfoMap,
- callback, callbackInfo, GetHash(callback));
-}
-
-
-function CallbackInfoGetOrCreate(callback) {
- var callbackInfo = CallbackInfoGet(callback);
- if (!IS_UNDEFINED(callbackInfo))
- return callbackInfo;
-
- var priority = GetNextCallbackPriority();
- CallbackInfoSet(callback, priority);
- return priority;
-}
-
-
-function CallbackInfoGetPriority(callbackInfo) {
- if (IS_NUMBER(callbackInfo))
- return callbackInfo;
- else
- return callbackInfo.priority;
-}
-
-
-function CallbackInfoNormalize(callback) {
- var callbackInfo = CallbackInfoGet(callback);
- if (IS_NUMBER(callbackInfo)) {
- var priority = callbackInfo;
- callbackInfo = new InternalArray;
- callbackInfo.priority = priority;
- CallbackInfoSet(callback, callbackInfo);
- }
- return callbackInfo;
-}
-
-
-function ObjectObserve(object, callback, acceptList) {
- if (!IS_RECEIVER(object))
- throw MakeTypeError(kObserveNonObject, "observe", "observe");
- if (%IsJSGlobalProxy(object))
- throw MakeTypeError(kObserveGlobalProxy, "observe");
- if (%IsAccessCheckNeeded(object))
- throw MakeTypeError(kObserveAccessChecked, "observe");
- if (!IS_CALLABLE(callback))
- throw MakeTypeError(kObserveNonFunction, "observe");
- if (%object_is_frozen(callback))
- throw MakeTypeError(kObserveCallbackFrozen);
-
- var objectObserveFn = %GetObjectContextObjectObserve(object);
- return objectObserveFn(object, callback, acceptList);
-}
-
-
-function NativeObjectObserve(object, callback, acceptList) {
- var objectInfo = ObjectInfoGetOrCreate(object);
- var typeList = ConvertAcceptListToTypeMap(acceptList);
- ObjectInfoAddObserver(objectInfo, callback, typeList);
- return object;
-}
-
-
-function ObjectUnobserve(object, callback) {
- if (!IS_RECEIVER(object))
- throw MakeTypeError(kObserveNonObject, "unobserve", "unobserve");
- if (%IsJSGlobalProxy(object))
- throw MakeTypeError(kObserveGlobalProxy, "unobserve");
- if (!IS_CALLABLE(callback))
- throw MakeTypeError(kObserveNonFunction, "unobserve");
-
- var objectInfo = ObjectInfoGet(object);
- if (IS_UNDEFINED(objectInfo))
- return object;
-
- ObjectInfoRemoveObserver(objectInfo, callback);
- return object;
-}
-
-
-function ArrayObserve(object, callback) {
- return ObjectObserve(object, callback, ['add',
- 'update',
- 'delete',
- 'splice']);
-}
-
-
-function ArrayUnobserve(object, callback) {
- return ObjectUnobserve(object, callback);
-}
-
-
-function ObserverEnqueueIfActive(observer, objectInfo, changeRecord) {
- if (!ObserverIsActive(observer, objectInfo) ||
- !TypeMapHasType(ObserverGetAcceptTypes(observer), changeRecord.type)) {
- return;
- }
-
- var callback = ObserverGetCallback(observer);
- if (!%ObserverObjectAndRecordHaveSameOrigin(callback, changeRecord.object,
- changeRecord)) {
- return;
- }
-
- var callbackInfo = CallbackInfoNormalize(callback);
- if (IS_NULL(GetPendingObservers())) {
- SetPendingObservers(nullProtoObject());
- if (DEBUG_IS_ACTIVE) {
- var id = ++GetObservationStateJS().lastMicrotaskId;
- var name = "Object.observe";
- %EnqueueMicrotask(function() {
- %DebugAsyncTaskEvent({ type: "willHandle", id: id, name: name });
- ObserveMicrotaskRunner();
- %DebugAsyncTaskEvent({ type: "didHandle", id: id, name: name });
- });
- %DebugAsyncTaskEvent({ type: "enqueue", id: id, name: name });
- } else {
- %EnqueueMicrotask(ObserveMicrotaskRunner);
- }
- }
- GetPendingObservers()[callbackInfo.priority] = callback;
- callbackInfo.push(changeRecord);
-}
-
-
-function ObjectInfoEnqueueExternalChangeRecord(objectInfo, changeRecord, type) {
- if (!ObjectInfoHasActiveObservers(objectInfo))
- return;
-
- var hasType = !IS_UNDEFINED(type);
- var newRecord = hasType ?
- { object: objectInfo.object, type: type } :
- { object: objectInfo.object };
-
- for (var prop in changeRecord) {
- if (prop === 'object' || (hasType && prop === 'type')) continue;
- %DefineDataPropertyUnchecked(
- newRecord, prop, changeRecord[prop], READ_ONLY + DONT_DELETE);
- }
- %object_freeze(newRecord);
-
- ObjectInfoEnqueueInternalChangeRecord(objectInfo, newRecord);
-}
-
-
-function ObjectInfoEnqueueInternalChangeRecord(objectInfo, changeRecord) {
- // TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (IS_SYMBOL(changeRecord.name)) return;
-
- if (ChangeObserversIsOptimized(objectInfo.changeObservers)) {
- var observer = objectInfo.changeObservers;
- ObserverEnqueueIfActive(observer, objectInfo, changeRecord);
- return;
- }
-
- for (var priority in objectInfo.changeObservers) {
- var observer = objectInfo.changeObservers[priority];
- if (IS_NULL(observer))
- continue;
- ObserverEnqueueIfActive(observer, objectInfo, changeRecord);
- }
-}
-
-
-function BeginPerformSplice(array) {
- var objectInfo = ObjectInfoGet(array);
- if (!IS_UNDEFINED(objectInfo))
- ObjectInfoAddPerformingType(objectInfo, 'splice');
-}
-
-
-function EndPerformSplice(array) {
- var objectInfo = ObjectInfoGet(array);
- if (!IS_UNDEFINED(objectInfo))
- ObjectInfoRemovePerformingType(objectInfo, 'splice');
-}
-
-
-function EnqueueSpliceRecord(array, index, removed, addedCount) {
- var objectInfo = ObjectInfoGet(array);
- if (!ObjectInfoHasActiveObservers(objectInfo))
- return;
-
- var changeRecord = {
- type: 'splice',
- object: array,
- index: index,
- removed: removed,
- addedCount: addedCount
- };
-
- %object_freeze(changeRecord);
- %object_freeze(changeRecord.removed);
- ObjectInfoEnqueueInternalChangeRecord(objectInfo, changeRecord);
-}
-
-
-function NotifyChange(type, object, name, oldValue) {
- var objectInfo = ObjectInfoGet(object);
- if (!ObjectInfoHasActiveObservers(objectInfo))
- return;
-
- var changeRecord;
- if (arguments.length == 2) {
- changeRecord = { type: type, object: object };
- } else if (arguments.length == 3) {
- changeRecord = { type: type, object: object, name: name };
- } else {
- changeRecord = {
- type: type,
- object: object,
- name: name,
- oldValue: oldValue
- };
- }
-
- %object_freeze(changeRecord);
- ObjectInfoEnqueueInternalChangeRecord(objectInfo, changeRecord);
-}
-
-
-function ObjectNotifierNotify(changeRecord) {
- if (!IS_RECEIVER(this))
- throw MakeTypeError(kCalledOnNonObject, "notify");
-
- var objectInfo = ObjectInfoGetFromNotifier(this);
- if (IS_UNDEFINED(objectInfo))
- throw MakeTypeError(kObserveNotifyNonNotifier);
- if (!IS_STRING(changeRecord.type))
- throw MakeTypeError(kObserveTypeNonString);
-
- ObjectInfoEnqueueExternalChangeRecord(objectInfo, changeRecord);
-}
-
-
-function ObjectNotifierPerformChange(changeType, changeFn) {
- if (!IS_RECEIVER(this))
- throw MakeTypeError(kCalledOnNonObject, "performChange");
-
- var objectInfo = ObjectInfoGetFromNotifier(this);
- if (IS_UNDEFINED(objectInfo))
- throw MakeTypeError(kObserveNotifyNonNotifier);
- if (!IS_STRING(changeType))
- throw MakeTypeError(kObservePerformNonString);
- if (!IS_CALLABLE(changeFn))
- throw MakeTypeError(kObservePerformNonFunction);
-
- var performChangeFn = %GetObjectContextNotifierPerformChange(objectInfo);
- performChangeFn(objectInfo, changeType, changeFn);
-}
-
-
-function NativeObjectNotifierPerformChange(objectInfo, changeType, changeFn) {
- ObjectInfoAddPerformingType(objectInfo, changeType);
-
- var changeRecord;
- try {
- changeRecord = changeFn();
- } finally {
- ObjectInfoRemovePerformingType(objectInfo, changeType);
- }
-
- if (IS_RECEIVER(changeRecord))
- ObjectInfoEnqueueExternalChangeRecord(objectInfo, changeRecord, changeType);
-}
-
-
-function ObjectGetNotifier(object) {
- if (!IS_RECEIVER(object))
- throw MakeTypeError(kObserveNonObject, "getNotifier", "getNotifier");
- if (%IsJSGlobalProxy(object))
- throw MakeTypeError(kObserveGlobalProxy, "getNotifier");
- if (%IsAccessCheckNeeded(object))
- throw MakeTypeError(kObserveAccessChecked, "getNotifier");
-
- if (%object_is_frozen(object)) return null;
-
- if (!%ObjectWasCreatedInCurrentOrigin(object)) return null;
-
- var getNotifierFn = %GetObjectContextObjectGetNotifier(object);
- return getNotifierFn(object);
-}
-
-
-function NativeObjectGetNotifier(object) {
- var objectInfo = ObjectInfoGetOrCreate(object);
- return ObjectInfoGetNotifier(objectInfo);
-}
-
-
-function CallbackDeliverPending(callback) {
- var callbackInfo = CallbackInfoGet(callback);
- if (IS_UNDEFINED(callbackInfo) || IS_NUMBER(callbackInfo))
- return false;
-
- // Clear the pending change records from callback and return it to its
- // "optimized" state.
- var priority = callbackInfo.priority;
- CallbackInfoSet(callback, priority);
-
- var pendingObservers = GetPendingObservers();
- if (!IS_NULL(pendingObservers))
- delete pendingObservers[priority];
-
- // TODO: combine the following runtime calls for perf optimization.
- var delivered = [];
- %MoveArrayContents(callbackInfo, delivered);
- %DeliverObservationChangeRecords(callback, delivered);
-
- return true;
-}
-
-
-function ObjectDeliverChangeRecords(callback) {
- if (!IS_CALLABLE(callback))
- throw MakeTypeError(kObserveNonFunction, "deliverChangeRecords");
-
- while (CallbackDeliverPending(callback)) {}
-}
-
-
-function ObserveMicrotaskRunner() {
- var pendingObservers = GetPendingObservers();
- if (!IS_NULL(pendingObservers)) {
- SetPendingObservers(null);
- for (var i in pendingObservers) {
- CallbackDeliverPending(pendingObservers[i]);
- }
- }
-}
-
-// -------------------------------------------------------------------
-
-utils.InstallFunctions(notifierPrototype, DONT_ENUM, [
- "notify", ObjectNotifierNotify,
- "performChange", ObjectNotifierPerformChange
-]);
-
-var ObserveObjectMethods = [
- "deliverChangeRecords", ObjectDeliverChangeRecords,
- "getNotifier", ObjectGetNotifier,
- "observe", ObjectObserve,
- "unobserve", ObjectUnobserve
-];
-
-var ObserveArrayMethods = [
- "observe", ArrayObserve,
- "unobserve", ArrayUnobserve
-];
-
-// TODO(adamk): Figure out why this prototype removal has to
-// happen as part of initial snapshotting.
-var removePrototypeFn = function(f, i) {
- if (i % 2 === 1) %FunctionRemovePrototype(f);
-};
-ObserveObjectMethods.forEach(removePrototypeFn);
-ObserveArrayMethods.forEach(removePrototypeFn);
-
-%InstallToContext([
- "native_object_get_notifier", NativeObjectGetNotifier,
- "native_object_notifier_perform_change", NativeObjectNotifierPerformChange,
- "native_object_observe", NativeObjectObserve,
- "observers_begin_perform_splice", BeginPerformSplice,
- "observers_end_perform_splice", EndPerformSplice,
- "observers_enqueue_splice", EnqueueSpliceRecord,
- "observers_notify_change", NotifyChange,
-]);
-
-utils.Export(function(to) {
- to.ObserveArrayMethods = ObserveArrayMethods;
- to.ObserveBeginPerformSplice = BeginPerformSplice;
- to.ObserveEndPerformSplice = EndPerformSplice;
- to.ObserveEnqueueSpliceRecord = EnqueueSpliceRecord;
- to.ObserveObjectMethods = ObserveObjectMethods;
-});
-
-})
diff --git a/deps/v8/src/js/prologue.js b/deps/v8/src/js/prologue.js
index f9589a51c2..bb818791a0 100644
--- a/deps/v8/src/js/prologue.js
+++ b/deps/v8/src/js/prologue.js
@@ -128,10 +128,10 @@ function InstallGetterSetter(object, name, getter, setter, attributes) {
function OverrideFunction(object, name, f, afterInitialBootstrap) {
%CheckIsBootstrapping();
- %ObjectDefineProperty(object, name, { value: f,
- writeable: true,
- configurable: true,
- enumerable: false });
+ %object_define_property(object, name, { value: f,
+ writeable: true,
+ configurable: true,
+ enumerable: false });
SetFunctionName(f, name);
if (!afterInitialBootstrap) %FunctionRemovePrototype(f);
%SetNativeFlag(f);
@@ -181,26 +181,26 @@ function PostNatives(utils) {
// Whitelist of exports from normal natives to experimental natives and debug.
var expose_list = [
+ "AddBoundMethod",
"ArrayToString",
- "ErrorToString",
+ "AsyncFunctionNext",
+ "AsyncFunctionThrow",
"GetIterator",
"GetMethod",
+ "GlobalPromise",
+ "IntlParseDate",
+ "IntlParseNumber",
"IsNaN",
- "MakeError",
- "MakeRangeError",
- "MakeTypeError",
"MapEntries",
"MapIterator",
"MapIteratorNext",
"MaxSimple",
"MinSimple",
+ "NewPromiseCapability",
"NumberIsInteger",
- "ObjectDefineProperty",
- "ObserveArrayMethods",
- "ObserveObjectMethods",
- "PromiseChain",
- "PromiseDeferred",
- "PromiseResolved",
+ "PerformPromiseThen",
+ "PromiseCastResolved",
+ "PromiseThen",
"RegExpSubclassExecJS",
"RegExpSubclassMatch",
"RegExpSubclassReplace",
@@ -210,13 +210,16 @@ function PostNatives(utils) {
"SetIterator",
"SetIteratorNext",
"SetValues",
- "SymbolToString",
+ "ToLocaleLowerCaseI18N",
+ "ToLocaleUpperCaseI18N",
+ "ToLowerCaseI18N",
"ToPositiveInteger",
+ "ToUpperCaseI18N",
// From runtime:
"is_concat_spreadable_symbol",
"iterator_symbol",
- "promise_status_symbol",
- "promise_value_symbol",
+ "promise_result_symbol",
+ "promise_state_symbol",
"object_freeze",
"object_is_frozen",
"object_is_sealed",
@@ -257,9 +260,6 @@ function PostExperimentals(utils) {
imports_from_experimental(exports_container);
}
- utils.CreateDoubleResultArray();
- utils.CreateDoubleResultArray = UNDEFINED;
-
utils.Export = UNDEFINED;
utils.PostDebug = UNDEFINED;
utils.PostExperimentals = UNDEFINED;
@@ -272,9 +272,6 @@ function PostDebug(utils) {
imports(exports_container);
}
- utils.CreateDoubleResultArray();
- utils.CreateDoubleResultArray = UNDEFINED;
-
exports_container = UNDEFINED;
utils.Export = UNDEFINED;
diff --git a/deps/v8/src/js/promise-extra.js b/deps/v8/src/js/promise-extra.js
deleted file mode 100644
index f6f79592bc..0000000000
--- a/deps/v8/src/js/promise-extra.js
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-var GlobalPromise = global.Promise;
-
-var PromiseChain = utils.ImportNow("PromiseChain");
-var PromiseDeferred = utils.ImportNow("PromiseDeferred");
-var PromiseResolved = utils.ImportNow("PromiseResolved");
-
-utils.InstallFunctions(GlobalPromise.prototype, DONT_ENUM, [
- "chain", PromiseChain,
-]);
-
-utils.InstallFunctions(GlobalPromise, DONT_ENUM, [
- "defer", PromiseDeferred,
- "accept", PromiseResolved,
-]);
-
-})
diff --git a/deps/v8/src/js/promise.js b/deps/v8/src/js/promise.js
index 201a06ca08..b50fc80b30 100644
--- a/deps/v8/src/js/promise.js
+++ b/deps/v8/src/js/promise.js
@@ -12,43 +12,55 @@
// Imports
var InternalArray = utils.InternalArray;
-var MakeTypeError;
var promiseCombinedDeferredSymbol =
utils.ImportNow("promise_combined_deferred_symbol");
var promiseHasHandlerSymbol =
utils.ImportNow("promise_has_handler_symbol");
-var promiseOnRejectSymbol = utils.ImportNow("promise_on_reject_symbol");
-var promiseOnResolveSymbol =
- utils.ImportNow("promise_on_resolve_symbol");
+var promiseRejectReactionsSymbol =
+ utils.ImportNow("promise_reject_reactions_symbol");
+var promiseFulfillReactionsSymbol =
+ utils.ImportNow("promise_fulfill_reactions_symbol");
+var promiseDeferredReactionsSymbol =
+ utils.ImportNow("promise_deferred_reactions_symbol");
var promiseRawSymbol = utils.ImportNow("promise_raw_symbol");
-var promiseStatusSymbol = utils.ImportNow("promise_status_symbol");
-var promiseValueSymbol = utils.ImportNow("promise_value_symbol");
+var promiseStateSymbol = utils.ImportNow("promise_state_symbol");
+var promiseResultSymbol = utils.ImportNow("promise_result_symbol");
var SpeciesConstructor;
+var speciesSymbol = utils.ImportNow("species_symbol");
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
- MakeTypeError = from.MakeTypeError;
SpeciesConstructor = from.SpeciesConstructor;
});
// -------------------------------------------------------------------
-// Status values: 0 = pending, +1 = resolved, -1 = rejected
+// [[PromiseState]] values:
+const kPending = 0;
+const kFulfilled = +1;
+const kRejected = -1;
+
var lastMicrotaskId = 0;
+// ES#sec-createresolvingfunctions
+// CreateResolvingFunctions ( promise )
function CreateResolvingFunctions(promise) {
var alreadyResolved = false;
+ // ES#sec-promise-resolve-functions
+ // Promise Resolve Functions
var resolve = value => {
if (alreadyResolved === true) return;
alreadyResolved = true;
- PromiseResolve(promise, value);
+ ResolvePromise(promise, value);
};
+ // ES#sec-promise-reject-functions
+ // Promise Reject Functions
var reject = reason => {
if (alreadyResolved === true) return;
alreadyResolved = true;
- PromiseReject(promise, reason);
+ RejectPromise(promise, reason);
};
return {
@@ -59,24 +71,27 @@ function CreateResolvingFunctions(promise) {
}
-var GlobalPromise = function Promise(resolver) {
- if (resolver === promiseRawSymbol) {
+// ES#sec-promise-executor
+// Promise ( executor )
+var GlobalPromise = function Promise(executor) {
+ if (executor === promiseRawSymbol) {
return %_NewObject(GlobalPromise, new.target);
}
- if (IS_UNDEFINED(new.target)) throw MakeTypeError(kNotAPromise, this);
- if (!IS_CALLABLE(resolver))
- throw MakeTypeError(kResolverNotAFunction, resolver);
+ if (IS_UNDEFINED(new.target)) throw %make_type_error(kNotAPromise, this);
+ if (!IS_CALLABLE(executor)) {
+ throw %make_type_error(kResolverNotAFunction, executor);
+ }
var promise = PromiseInit(%_NewObject(GlobalPromise, new.target));
var callbacks = CreateResolvingFunctions(promise);
-
+ var debug_is_active = DEBUG_IS_ACTIVE;
try {
- %DebugPushPromise(promise, Promise);
- resolver(callbacks.resolve, callbacks.reject);
- } catch (e) {
+ if (debug_is_active) %DebugPushPromise(promise);
+ executor(callbacks.resolve, callbacks.reject);
+ } %catch (e) { // Natives syntax to mark this catch block.
%_Call(callbacks.reject, UNDEFINED, e);
} finally {
- %DebugPopPromise();
+ if (debug_is_active) %DebugPopPromise();
}
return promise;
@@ -84,57 +99,83 @@ var GlobalPromise = function Promise(resolver) {
// Core functionality.
-function PromiseSet(promise, status, value, onResolve, onReject) {
- SET_PRIVATE(promise, promiseStatusSymbol, status);
- SET_PRIVATE(promise, promiseValueSymbol, value);
- SET_PRIVATE(promise, promiseOnResolveSymbol, onResolve);
- SET_PRIVATE(promise, promiseOnRejectSymbol, onReject);
- if (DEBUG_IS_ACTIVE) {
- %DebugPromiseEvent({ promise: promise, status: status, value: value });
- }
+function PromiseSet(promise, status, value) {
+ SET_PRIVATE(promise, promiseStateSymbol, status);
+ SET_PRIVATE(promise, promiseResultSymbol, value);
+
+ // There are 3 possible states for the resolve, reject symbols when we add
+ // a new callback --
+ // 1) UNDEFINED -- This is the zero state where there is no callback
+ // registered. When we see this state, we directly attach the callbacks to
+ // the symbol.
+ // 2) !IS_ARRAY -- There is a single callback directly attached to the
+ // symbols. We need to create a new array to store additional callbacks.
+ // 3) IS_ARRAY -- There are multiple callbacks already registered,
+ // therefore we can just push the new callback to the existing array.
+ SET_PRIVATE(promise, promiseFulfillReactionsSymbol, UNDEFINED);
+ SET_PRIVATE(promise, promiseRejectReactionsSymbol, UNDEFINED);
+
+ // There are 2 possible states for this symbol --
+ // 1) UNDEFINED -- This is the zero state, no deferred object is
+ // attached to this symbol. When we want to add a new deferred we
+ // directly attach it to this symbol.
+ // 2) symbol with attached deferred object -- New deferred objects
+ // are not attached to this symbol, but instead they are directly
+ // attached to the resolve, reject callback arrays. At this point,
+ // the deferred symbol's state is stale, and the deferreds should be
+ // read from the reject, resolve callbacks.
+ SET_PRIVATE(promise, promiseDeferredReactionsSymbol, UNDEFINED);
+
return promise;
}
function PromiseCreateAndSet(status, value) {
var promise = new GlobalPromise(promiseRawSymbol);
// If debug is active, notify about the newly created promise first.
- if (DEBUG_IS_ACTIVE) PromiseSet(promise, 0, UNDEFINED);
+ if (DEBUG_IS_ACTIVE) PromiseSet(promise, kPending, UNDEFINED);
return PromiseSet(promise, status, value);
}
function PromiseInit(promise) {
- return PromiseSet(
- promise, 0, UNDEFINED, new InternalArray, new InternalArray)
+ return PromiseSet(promise, kPending, UNDEFINED);
}
-function PromiseDone(promise, status, value, promiseQueue) {
- if (GET_PRIVATE(promise, promiseStatusSymbol) === 0) {
+function FulfillPromise(promise, status, value, promiseQueue) {
+ if (GET_PRIVATE(promise, promiseStateSymbol) === kPending) {
var tasks = GET_PRIVATE(promise, promiseQueue);
- if (tasks.length) PromiseEnqueue(value, tasks, status);
+ if (!IS_UNDEFINED(tasks)) {
+ var deferreds = GET_PRIVATE(promise, promiseDeferredReactionsSymbol);
+ PromiseEnqueue(value, tasks, deferreds, status);
+ }
PromiseSet(promise, status, value);
}
}
function PromiseHandle(value, handler, deferred) {
+ var debug_is_active = DEBUG_IS_ACTIVE;
try {
- %DebugPushPromise(deferred.promise, PromiseHandle);
+ if (debug_is_active) %DebugPushPromise(deferred.promise);
var result = handler(value);
deferred.resolve(result);
- } catch (exception) {
+ } %catch (exception) { // Natives syntax to mark this catch block.
try { deferred.reject(exception); } catch (e) { }
} finally {
- %DebugPopPromise();
+ if (debug_is_active) %DebugPopPromise();
}
}
-function PromiseEnqueue(value, tasks, status) {
+function PromiseEnqueue(value, tasks, deferreds, status) {
var id, name, instrumenting = DEBUG_IS_ACTIVE;
%EnqueueMicrotask(function() {
if (instrumenting) {
%DebugAsyncTaskEvent({ type: "willHandle", id: id, name: name });
}
- for (var i = 0; i < tasks.length; i += 2) {
- PromiseHandle(value, tasks[i], tasks[i + 1])
+ if (IS_ARRAY(tasks)) {
+ for (var i = 0; i < tasks.length; i += 2) {
+ PromiseHandle(value, tasks[i], tasks[i + 1]);
+ }
+ } else {
+ PromiseHandle(value, tasks, deferreds);
}
if (instrumenting) {
%DebugAsyncTaskEvent({ type: "didHandle", id: id, name: name });
@@ -142,11 +183,38 @@ function PromiseEnqueue(value, tasks, status) {
});
if (instrumenting) {
id = ++lastMicrotaskId;
- name = status > 0 ? "Promise.resolve" : "Promise.reject";
+ name = status === kFulfilled ? "Promise.resolve" : "Promise.reject";
%DebugAsyncTaskEvent({ type: "enqueue", id: id, name: name });
}
}
+function PromiseAttachCallbacks(promise, deferred, onResolve, onReject) {
+ var maybeResolveCallbacks =
+ GET_PRIVATE(promise, promiseFulfillReactionsSymbol);
+ if (IS_UNDEFINED(maybeResolveCallbacks)) {
+ SET_PRIVATE(promise, promiseFulfillReactionsSymbol, onResolve);
+ SET_PRIVATE(promise, promiseRejectReactionsSymbol, onReject);
+ SET_PRIVATE(promise, promiseDeferredReactionsSymbol, deferred);
+ } else if (!IS_ARRAY(maybeResolveCallbacks)) {
+ var resolveCallbacks = new InternalArray();
+ var rejectCallbacks = new InternalArray();
+ var existingDeferred = GET_PRIVATE(promise, promiseDeferredReactionsSymbol);
+
+ resolveCallbacks.push(
+ maybeResolveCallbacks, existingDeferred, onResolve, deferred);
+ rejectCallbacks.push(GET_PRIVATE(promise, promiseRejectReactionsSymbol),
+ existingDeferred,
+ onReject,
+ deferred);
+
+ SET_PRIVATE(promise, promiseFulfillReactionsSymbol, resolveCallbacks);
+ SET_PRIVATE(promise, promiseRejectReactionsSymbol, rejectCallbacks);
+ } else {
+ maybeResolveCallbacks.push(onResolve, deferred);
+ GET_PRIVATE(promise, promiseRejectReactionsSymbol).push(onReject, deferred);
+ }
+}
+
function PromiseIdResolveHandler(x) { return x }
function PromiseIdRejectHandler(r) { throw r }
@@ -157,35 +225,67 @@ function PromiseNopResolver() {}
// For bootstrapper.
+// ES#sec-ispromise IsPromise ( x )
function IsPromise(x) {
- return IS_RECEIVER(x) && HAS_DEFINED_PRIVATE(x, promiseStatusSymbol);
+ return IS_RECEIVER(x) && HAS_DEFINED_PRIVATE(x, promiseStateSymbol);
}
function PromiseCreate() {
return new GlobalPromise(PromiseNopResolver)
}
-function PromiseResolve(promise, x) {
- if (x === promise) {
- return PromiseReject(promise, MakeTypeError(kPromiseCyclic, x));
+// ES#sec-promise-resolve-functions
+// Promise Resolve Functions, steps 6-13
+function ResolvePromise(promise, resolution) {
+ if (resolution === promise) {
+ return RejectPromise(promise, %make_type_error(kPromiseCyclic, resolution));
}
- if (IS_RECEIVER(x)) {
+ if (IS_RECEIVER(resolution)) {
// 25.4.1.3.2 steps 8-12
try {
- var then = x.then;
+ var then = resolution.then;
} catch (e) {
- return PromiseReject(promise, e);
+ return RejectPromise(promise, e);
+ }
+
+ // Resolution is a native promise and if it's already resolved or
+ // rejected, shortcircuit the resolution procedure by directly
+ // reusing the value from the promise.
+ if (IsPromise(resolution) && then === PromiseThen) {
+ var thenableState = GET_PRIVATE(resolution, promiseStateSymbol);
+ if (thenableState === kFulfilled) {
+ // This goes inside the if-else to save one symbol lookup in
+ // the slow path.
+ var thenableValue = GET_PRIVATE(resolution, promiseResultSymbol);
+ FulfillPromise(promise, kFulfilled, thenableValue,
+ promiseFulfillReactionsSymbol);
+ SET_PRIVATE(promise, promiseHasHandlerSymbol, true);
+ return;
+ } else if (thenableState === kRejected) {
+ var thenableValue = GET_PRIVATE(resolution, promiseResultSymbol);
+ if (!HAS_DEFINED_PRIVATE(resolution, promiseHasHandlerSymbol)) {
+ // Promise has already been rejected, but had no handler.
+ // Revoke previously triggered reject event.
+ %PromiseRevokeReject(resolution);
+ }
+ RejectPromise(promise, thenableValue);
+ SET_PRIVATE(resolution, promiseHasHandlerSymbol, true);
+ return;
+ }
}
+
if (IS_CALLABLE(then)) {
// PromiseResolveThenableJob
- var id, name, instrumenting = DEBUG_IS_ACTIVE;
+ var id;
+ var name = "PromiseResolveThenableJob";
+ var instrumenting = DEBUG_IS_ACTIVE;
%EnqueueMicrotask(function() {
if (instrumenting) {
%DebugAsyncTaskEvent({ type: "willHandle", id: id, name: name });
}
var callbacks = CreateResolvingFunctions(promise);
try {
- %_Call(then, x, callbacks.resolve, callbacks.reject);
+ %_Call(then, resolution, callbacks.resolve, callbacks.reject);
} catch (e) {
%_Call(callbacks.reject, UNDEFINED, e);
}
@@ -195,28 +295,31 @@ function PromiseResolve(promise, x) {
});
if (instrumenting) {
id = ++lastMicrotaskId;
- name = "PromseResolveThenableJob";
%DebugAsyncTaskEvent({ type: "enqueue", id: id, name: name });
}
return;
}
}
- PromiseDone(promise, +1, x, promiseOnResolveSymbol);
+ FulfillPromise(promise, kFulfilled, resolution, promiseFulfillReactionsSymbol);
}
-function PromiseReject(promise, r) {
+// ES#sec-rejectpromise
+// RejectPromise ( promise, reason )
+function RejectPromise(promise, reason) {
// Check promise status to confirm that this reject has an effect.
// Call runtime for callbacks to the debugger or for unhandled reject.
- if (GET_PRIVATE(promise, promiseStatusSymbol) == 0) {
+ if (GET_PRIVATE(promise, promiseStateSymbol) === kPending) {
var debug_is_active = DEBUG_IS_ACTIVE;
if (debug_is_active ||
!HAS_DEFINED_PRIVATE(promise, promiseHasHandlerSymbol)) {
- %PromiseRejectEvent(promise, r, debug_is_active);
+ %PromiseRejectEvent(promise, reason, debug_is_active);
}
}
- PromiseDone(promise, -1, r, promiseOnRejectSymbol)
+ FulfillPromise(promise, kRejected, reason, promiseRejectReactionsSymbol)
}
+// ES#sec-newpromisecapability
+// NewPromiseCapability ( C )
function NewPromiseCapability(C) {
if (C === GlobalPromise) {
// Optimized case, avoid extra closure.
@@ -232,34 +335,26 @@ function NewPromiseCapability(C) {
var result = {promise: UNDEFINED, resolve: UNDEFINED, reject: UNDEFINED };
result.promise = new C((resolve, reject) => {
if (!IS_UNDEFINED(result.resolve) || !IS_UNDEFINED(result.reject))
- throw MakeTypeError(kPromiseExecutorAlreadyInvoked);
+ throw %make_type_error(kPromiseExecutorAlreadyInvoked);
result.resolve = resolve;
result.reject = reject;
});
if (!IS_CALLABLE(result.resolve) || !IS_CALLABLE(result.reject))
- throw MakeTypeError(kPromiseNonCallable);
+ throw %make_type_error(kPromiseNonCallable);
return result;
}
-function PromiseDeferred() {
- %IncrementUseCounter(kPromiseDefer);
- return NewPromiseCapability(this);
-}
-
-function PromiseResolved(x) {
- %IncrementUseCounter(kPromiseAccept);
- return %_Call(PromiseCast, this, x);
-}
-
-function PromiseRejected(r) {
+// ES#sec-promise.reject
+// Promise.reject ( x )
+function PromiseReject(r) {
if (!IS_RECEIVER(this)) {
- throw MakeTypeError(kCalledOnNonObject, PromiseRejected);
+ throw %make_type_error(kCalledOnNonObject, PromiseResolve);
}
if (this === GlobalPromise) {
// Optimized case, avoid extra closure.
- var promise = PromiseCreateAndSet(-1, r);
+ var promise = PromiseCreateAndSet(kRejected, r);
// The debug event for this would always be an uncaught promise reject,
// which is usually simply noise. Do not trigger that debug event.
%PromiseRejectEvent(promise, r, false);
@@ -271,73 +366,104 @@ function PromiseRejected(r) {
}
}
-// Multi-unwrapped chaining with thenable coercion.
+// Shortcut Promise.reject and Promise.resolve() implementations, used by
+// Async Functions implementation.
+function PromiseCreateRejected(r) {
+ return %_Call(PromiseReject, GlobalPromise, r);
+}
-function PromiseThen(onResolve, onReject) {
- var status = GET_PRIVATE(this, promiseStatusSymbol);
- if (IS_UNDEFINED(status)) {
- throw MakeTypeError(kNotAPromise, this);
+function PromiseCreateResolved(value) {
+ var promise = PromiseInit(new GlobalPromise(promiseRawSymbol));
+ var resolveResult = ResolvePromise(promise, value);
+ return promise;
+}
+
+function PromiseCastResolved(value) {
+ if (IsPromise(value)) {
+ return value;
+ } else {
+ var promise = PromiseInit(new GlobalPromise(promiseRawSymbol));
+ var resolveResult = ResolvePromise(promise, value);
+ return promise;
}
+}
- var constructor = SpeciesConstructor(this, GlobalPromise);
- onResolve = IS_CALLABLE(onResolve) ? onResolve : PromiseIdResolveHandler;
- onReject = IS_CALLABLE(onReject) ? onReject : PromiseIdRejectHandler;
- var deferred = NewPromiseCapability(constructor);
+function PerformPromiseThen(promise, onResolve, onReject, resultCapability) {
+ if (!IS_CALLABLE(onResolve)) onResolve = PromiseIdResolveHandler;
+ if (!IS_CALLABLE(onReject)) onReject = PromiseIdRejectHandler;
+
+ var status = GET_PRIVATE(promise, promiseStateSymbol);
switch (status) {
- case 0: // Pending
- GET_PRIVATE(this, promiseOnResolveSymbol).push(onResolve, deferred);
- GET_PRIVATE(this, promiseOnRejectSymbol).push(onReject, deferred);
+ case kPending:
+ PromiseAttachCallbacks(promise, resultCapability, onResolve, onReject);
break;
- case +1: // Resolved
- PromiseEnqueue(GET_PRIVATE(this, promiseValueSymbol),
- [onResolve, deferred],
- +1);
+ case kFulfilled:
+ PromiseEnqueue(GET_PRIVATE(promise, promiseResultSymbol),
+ onResolve, resultCapability, kFulfilled);
break;
- case -1: // Rejected
- if (!HAS_DEFINED_PRIVATE(this, promiseHasHandlerSymbol)) {
+ case kRejected:
+ if (!HAS_DEFINED_PRIVATE(promise, promiseHasHandlerSymbol)) {
// Promise has already been rejected, but had no handler.
// Revoke previously triggered reject event.
- %PromiseRevokeReject(this);
+ %PromiseRevokeReject(promise);
}
- PromiseEnqueue(GET_PRIVATE(this, promiseValueSymbol),
- [onReject, deferred],
- -1);
+ PromiseEnqueue(GET_PRIVATE(promise, promiseResultSymbol),
+ onReject, resultCapability, kRejected);
break;
}
+
// Mark this promise as having handler.
- SET_PRIVATE(this, promiseHasHandlerSymbol, true);
- if (DEBUG_IS_ACTIVE) {
- %DebugPromiseEvent({ promise: deferred.promise, parentPromise: this });
- }
- return deferred.promise;
+ SET_PRIVATE(promise, promiseHasHandlerSymbol, true);
+ return resultCapability.promise;
}
-// Chain is left around for now as an alias for then
-function PromiseChain(onResolve, onReject) {
- %IncrementUseCounter(kPromiseChain);
- return %_Call(PromiseThen, this, onResolve, onReject);
+// ES#sec-promise.prototype.then
+// Promise.prototype.then ( onFulfilled, onRejected )
+// Multi-unwrapped chaining with thenable coercion.
+function PromiseThen(onResolve, onReject) {
+ var status = GET_PRIVATE(this, promiseStateSymbol);
+ if (IS_UNDEFINED(status)) {
+ throw %make_type_error(kNotAPromise, this);
+ }
+
+ var constructor = SpeciesConstructor(this, GlobalPromise);
+ var resultCapability = NewPromiseCapability(constructor);
+ return PerformPromiseThen(this, onResolve, onReject, resultCapability);
}
+// ES#sec-promise.prototype.catch
+// Promise.prototype.catch ( onRejected )
function PromiseCatch(onReject) {
return this.then(UNDEFINED, onReject);
}
// Combinators.
-function PromiseCast(x) {
+// ES#sec-promise.resolve
+// Promise.resolve ( x )
+function PromiseResolve(x) {
if (!IS_RECEIVER(this)) {
- throw MakeTypeError(kCalledOnNonObject, PromiseCast);
+ throw %make_type_error(kCalledOnNonObject, PromiseResolve);
}
if (IsPromise(x) && x.constructor === this) return x;
+ // Avoid creating resolving functions.
+ if (this === GlobalPromise) {
+ var promise = PromiseInit(new GlobalPromise(promiseRawSymbol));
+ var resolveResult = ResolvePromise(promise, x);
+ return promise;
+ }
+
var promiseCapability = NewPromiseCapability(this);
var resolveResult = %_Call(promiseCapability.resolve, UNDEFINED, x);
return promiseCapability.promise;
}
+// ES#sec-promise.all
+// Promise.all ( iterable )
function PromiseAll(iterable) {
if (!IS_RECEIVER(this)) {
- throw MakeTypeError(kCalledOnNonObject, "Promise.all");
+ throw %make_type_error(kCalledOnNonObject, "Promise.all");
}
var deferred = NewPromiseCapability(this);
@@ -384,9 +510,11 @@ function PromiseAll(iterable) {
return deferred.promise;
}
+// ES#sec-promise.race
+// Promise.race ( iterable )
function PromiseRace(iterable) {
if (!IS_RECEIVER(this)) {
- throw MakeTypeError(kCalledOnNonObject, PromiseRace);
+ throw %make_type_error(kCalledOnNonObject, PromiseRace);
}
var deferred = NewPromiseCapability(this);
@@ -404,20 +532,30 @@ function PromiseRace(iterable) {
// Utility for debugger
+function PromiseHasUserDefinedRejectHandlerCheck(handler, deferred) {
+ if (handler !== PromiseIdRejectHandler) {
+ var combinedDeferred = GET_PRIVATE(handler, promiseCombinedDeferredSymbol);
+ if (IS_UNDEFINED(combinedDeferred)) return true;
+ if (PromiseHasUserDefinedRejectHandlerRecursive(combinedDeferred.promise)) {
+ return true;
+ }
+ } else if (PromiseHasUserDefinedRejectHandlerRecursive(deferred.promise)) {
+ return true;
+ }
+ return false;
+}
+
function PromiseHasUserDefinedRejectHandlerRecursive(promise) {
- var queue = GET_PRIVATE(promise, promiseOnRejectSymbol);
+ var queue = GET_PRIVATE(promise, promiseRejectReactionsSymbol);
+ var deferreds = GET_PRIVATE(promise, promiseDeferredReactionsSymbol);
if (IS_UNDEFINED(queue)) return false;
- for (var i = 0; i < queue.length; i += 2) {
- var handler = queue[i];
- if (handler !== PromiseIdRejectHandler) {
- var deferred = GET_PRIVATE(handler, promiseCombinedDeferredSymbol);
- if (IS_UNDEFINED(deferred)) return true;
- if (PromiseHasUserDefinedRejectHandlerRecursive(deferred.promise)) {
+ if (!IS_ARRAY(queue)) {
+ return PromiseHasUserDefinedRejectHandlerCheck(queue, deferreds);
+ } else {
+ for (var i = 0; i < queue.length; i += 2) {
+ if (PromiseHasUserDefinedRejectHandlerCheck(queue[i], queue[i + 1])) {
return true;
}
- } else if (PromiseHasUserDefinedRejectHandlerRecursive(
- queue[i + 1].promise)) {
- return true;
}
}
return false;
@@ -430,6 +568,11 @@ function PromiseHasUserDefinedRejectHandler() {
return PromiseHasUserDefinedRejectHandlerRecursive(this);
};
+
+function PromiseSpecies() {
+ return this;
+}
+
// -------------------------------------------------------------------
// Install exported functions.
@@ -438,12 +581,14 @@ function PromiseHasUserDefinedRejectHandler() {
DONT_ENUM | READ_ONLY);
utils.InstallFunctions(GlobalPromise, DONT_ENUM, [
- "reject", PromiseRejected,
+ "reject", PromiseReject,
"all", PromiseAll,
"race", PromiseRace,
- "resolve", PromiseCast
+ "resolve", PromiseResolve
]);
+utils.InstallGetter(GlobalPromise, speciesSymbol, PromiseSpecies);
+
utils.InstallFunctions(GlobalPromise.prototype, DONT_ENUM, [
"then", PromiseThen,
"catch", PromiseCatch
@@ -451,12 +596,13 @@ utils.InstallFunctions(GlobalPromise.prototype, DONT_ENUM, [
%InstallToContext([
"promise_catch", PromiseCatch,
- "promise_chain", PromiseChain,
"promise_create", PromiseCreate,
"promise_has_user_defined_reject_handler", PromiseHasUserDefinedRejectHandler,
- "promise_reject", PromiseReject,
- "promise_resolve", PromiseResolve,
+ "promise_reject", RejectPromise,
+ "promise_resolve", ResolvePromise,
"promise_then", PromiseThen,
+ "promise_create_rejected", PromiseCreateRejected,
+ "promise_create_resolved", PromiseCreateResolved
]);
// This allows extras to create promises quickly without building extra
@@ -464,18 +610,17 @@ utils.InstallFunctions(GlobalPromise.prototype, DONT_ENUM, [
// promise without having to hold on to those closures forever.
utils.InstallFunctions(extrasUtils, 0, [
"createPromise", PromiseCreate,
- "resolvePromise", PromiseResolve,
- "rejectPromise", PromiseReject
+ "resolvePromise", ResolvePromise,
+ "rejectPromise", RejectPromise
]);
-// TODO(v8:4567): Allow experimental natives to remove function prototype
-[PromiseChain, PromiseDeferred, PromiseResolved].forEach(
- fn => %FunctionRemovePrototype(fn));
-
utils.Export(function(to) {
- to.PromiseChain = PromiseChain;
- to.PromiseDeferred = PromiseDeferred;
- to.PromiseResolved = PromiseResolved;
+ to.PromiseCastResolved = PromiseCastResolved;
+ to.PromiseThen = PromiseThen;
+
+ to.GlobalPromise = GlobalPromise;
+ to.NewPromiseCapability = NewPromiseCapability;
+ to.PerformPromiseThen = PerformPromiseThen;
});
})
diff --git a/deps/v8/src/js/regexp.js b/deps/v8/src/js/regexp.js
index cc8cb41de1..dbe4837c64 100644
--- a/deps/v8/src/js/regexp.js
+++ b/deps/v8/src/js/regexp.js
@@ -4,14 +4,11 @@
(function(global, utils) {
-'use strict';
-
%CheckIsBootstrapping();
// -------------------------------------------------------------------
// Imports
-var AddIndexedProperty;
var ExpandReplacement;
var GlobalArray = global.Array;
var GlobalObject = global.Object;
@@ -19,19 +16,17 @@ var GlobalRegExp = global.RegExp;
var GlobalRegExpPrototype;
var InternalArray = utils.InternalArray;
var InternalPackedArray = utils.InternalPackedArray;
-var MakeTypeError;
var MaxSimple;
var MinSimple;
var matchSymbol = utils.ImportNow("match_symbol");
var replaceSymbol = utils.ImportNow("replace_symbol");
var searchSymbol = utils.ImportNow("search_symbol");
+var speciesSymbol = utils.ImportNow("species_symbol");
var splitSymbol = utils.ImportNow("split_symbol");
var SpeciesConstructor;
utils.Import(function(from) {
- AddIndexedProperty = from.AddIndexedProperty;
ExpandReplacement = from.ExpandReplacement;
- MakeTypeError = from.MakeTypeError;
MaxSimple = from.MaxSimple;
MinSimple = from.MinSimple;
SpeciesConstructor = from.SpeciesConstructor;
@@ -43,15 +38,17 @@ utils.Import(function(from) {
// regexp match. The property RegExpLastMatchInfo includes the matchIndices
// array of the last successful regexp match (an array of start/end index
// pairs for the match and all the captured substrings), the invariant is
-// that there are at least two capture indeces. The array also contains
+// that there are at least two capture indices. The array also contains
// the subject string for the last successful match.
-var RegExpLastMatchInfo = new InternalPackedArray(
- 2, // REGEXP_NUMBER_OF_CAPTURES
- "", // Last subject.
- UNDEFINED, // Last input - settable with RegExpSetInput.
- 0, // REGEXP_FIRST_CAPTURE + 0
- 0 // REGEXP_FIRST_CAPTURE + 1
-);
+// We use a JSObject rather than a JSArray so we don't have to manually update
+// its length.
+var RegExpLastMatchInfo = {
+ REGEXP_NUMBER_OF_CAPTURES: 2,
+ REGEXP_LAST_SUBJECT: "",
+ REGEXP_LAST_INPUT: UNDEFINED, // Settable with RegExpSetInput.
+ CAPTURE0: 0,
+ CAPTURE1: 0
+};
// -------------------------------------------------------------------
@@ -117,12 +114,12 @@ function RegExpConstructor(pattern, flags) {
// ES#sec-regexp.prototype.compile RegExp.prototype.compile (pattern, flags)
function RegExpCompileJS(pattern, flags) {
if (!IS_REGEXP(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
"RegExp.prototype.compile", this);
}
if (IS_REGEXP(pattern)) {
- if (!IS_UNDEFINED(flags)) throw MakeTypeError(kRegExpFlags);
+ if (!IS_UNDEFINED(flags)) throw %make_type_error(kRegExpFlags);
flags = PatternFlags(pattern);
pattern = REGEXP_SOURCE(pattern);
@@ -183,7 +180,7 @@ function RegExpExecNoTests(regexp, string, start) {
// RegExp.prototype.exec ( string )
function RegExpSubclassExecJS(string) {
if (!IS_REGEXP(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'RegExp.prototype.exec', this);
}
@@ -229,7 +226,7 @@ function RegExpSubclassExecJS(string) {
// Legacy implementation of RegExp.prototype.exec
function RegExpExecJS(string) {
if (!IS_REGEXP(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'RegExp.prototype.exec', this);
}
@@ -276,7 +273,7 @@ function RegExpSubclassExec(regexp, string, exec) {
if (IS_CALLABLE(exec)) {
var result = %_Call(exec, regexp, string);
if (!IS_RECEIVER(result) && !IS_NULL(result)) {
- throw MakeTypeError(kInvalidRegExpExecResult);
+ throw %make_type_error(kInvalidRegExpExecResult);
}
return result;
}
@@ -296,7 +293,7 @@ var regexp_val;
// else implements.
function RegExpTest(string) {
if (!IS_REGEXP(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'RegExp.prototype.test', this);
}
string = TO_STRING(string);
@@ -327,10 +324,10 @@ function RegExpTest(string) {
// not a '?'. But see https://code.google.com/p/v8/issues/detail?id=3560
var regexp = this;
var source = REGEXP_SOURCE(regexp);
- if (regexp.length >= 3 &&
- %_StringCharCodeAt(regexp, 0) == 46 && // '.'
- %_StringCharCodeAt(regexp, 1) == 42 && // '*'
- %_StringCharCodeAt(regexp, 2) != 63) { // '?'
+ if (source.length >= 3 &&
+ %_StringCharCodeAt(source, 0) == 46 && // '.'
+ %_StringCharCodeAt(source, 1) == 42 && // '*'
+ %_StringCharCodeAt(source, 2) != 63) { // '?'
regexp = TrimRegExp(regexp);
}
// matchIndices is either null or the RegExpLastMatchInfo array.
@@ -347,7 +344,7 @@ function RegExpTest(string) {
// ES#sec-regexp.prototype.test RegExp.prototype.test ( S )
function RegExpSubclassTest(string) {
if (!IS_RECEIVER(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'RegExp.prototype.test', this);
}
string = TO_STRING(string);
@@ -371,7 +368,7 @@ function TrimRegExp(regexp) {
function RegExpToString() {
if (!IS_RECEIVER(this)) {
- throw MakeTypeError(
+ throw %make_type_error(
kIncompatibleMethodReceiver, 'RegExp.prototype.toString', this);
}
if (this === GlobalRegExpPrototype) {
@@ -395,7 +392,7 @@ function AtSurrogatePair(subject, index) {
function RegExpSplit(string, limit) {
// TODO(yangguo): allow non-regexp receivers.
if (!IS_REGEXP(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
"RegExp.prototype.@@split", this);
}
var separator = this;
@@ -469,7 +466,7 @@ function RegExpSplit(string, limit) {
// RegExp.prototype [ @@split ] ( string, limit )
function RegExpSubclassSplit(string, limit) {
if (!IS_RECEIVER(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
"RegExp.prototype.@@split", this);
}
string = TO_STRING(string);
@@ -502,7 +499,7 @@ function RegExpSubclassSplit(string, limit) {
var result;
if (size === 0) {
result = RegExpSubclassExec(splitter, string);
- if (IS_NULL(result)) AddIndexedProperty(array, 0, string);
+ if (IS_NULL(result)) %AddElement(array, 0, string);
return array;
}
var stringIndex = prevStringIndex;
@@ -515,10 +512,10 @@ function RegExpSubclassSplit(string, limit) {
stringIndex += AdvanceStringIndex(string, stringIndex, unicode);
} else {
var end = MinSimple(TO_LENGTH(splitter.lastIndex), size);
- if (end === stringIndex) {
+ if (end === prevStringIndex) {
stringIndex += AdvanceStringIndex(string, stringIndex, unicode);
} else {
- AddIndexedProperty(
+ %AddElement(
array, arrayIndex,
%_SubString(string, prevStringIndex, stringIndex));
arrayIndex++;
@@ -526,7 +523,7 @@ function RegExpSubclassSplit(string, limit) {
prevStringIndex = end;
var numberOfCaptures = MaxSimple(TO_LENGTH(result.length), 0);
for (var i = 1; i < numberOfCaptures; i++) {
- AddIndexedProperty(array, arrayIndex, result[i]);
+ %AddElement(array, arrayIndex, result[i]);
arrayIndex++;
if (arrayIndex === lim) return array;
}
@@ -534,34 +531,18 @@ function RegExpSubclassSplit(string, limit) {
}
}
}
- AddIndexedProperty(array, arrayIndex,
+ %AddElement(array, arrayIndex,
%_SubString(string, prevStringIndex, size));
return array;
}
%FunctionRemovePrototype(RegExpSubclassSplit);
-// Legacy implementation of RegExp.prototype[Symbol.match] which
-// doesn't properly call the underlying exec method
-function RegExpMatch(string) {
- if (!IS_REGEXP(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
- "RegExp.prototype.@@match", this);
- }
- var subject = TO_STRING(string);
-
- if (!REGEXP_GLOBAL(this)) return RegExpExecNoTests(this, subject, 0);
- this.lastIndex = 0;
- var result = %StringMatch(subject, this, RegExpLastMatchInfo);
- return result;
-}
-
-
// ES#sec-regexp.prototype-@@match
// RegExp.prototype [ @@match ] ( string )
function RegExpSubclassMatch(string) {
if (!IS_RECEIVER(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
"RegExp.prototype.@@match", this);
}
string = TO_STRING(string);
@@ -719,7 +700,7 @@ function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
function RegExpReplace(string, replace) {
if (!IS_REGEXP(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
"RegExp.prototype.@@replace", this);
}
var subject = TO_STRING(string);
@@ -875,7 +856,7 @@ function SetAdvancedStringIndex(regexp, string, unicode) {
// RegExp.prototype [ @@replace ] ( string, replaceValue )
function RegExpSubclassReplace(string, replace) {
if (!IS_RECEIVER(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
"RegExp.prototype.@@replace", this);
}
string = TO_STRING(string);
@@ -956,24 +937,11 @@ function RegExpSubclassReplace(string, replace) {
%FunctionRemovePrototype(RegExpSubclassReplace);
-// Legacy implementation of RegExp.prototype[Symbol.search] which
-// doesn't properly use the overridden exec method
-function RegExpSearch(string) {
- if (!IS_REGEXP(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
- "RegExp.prototype.@@search", this);
- }
- var match = DoRegExpExec(this, TO_STRING(string), 0);
- if (match) return match[CAPTURE0];
- return -1;
-}
-
-
// ES#sec-regexp.prototype-@@search
// RegExp.prototype [ @@search ] ( string )
function RegExpSubclassSearch(string) {
if (!IS_RECEIVER(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
"RegExp.prototype.@@search", this);
}
string = TO_STRING(string);
@@ -1051,7 +1019,7 @@ function RegExpMakeCaptureGetter(n) {
// ES6 21.2.5.3.
function RegExpGetFlags() {
if (!IS_RECEIVER(this)) {
- throw MakeTypeError(
+ throw %make_type_error(
kRegExpNonObject, "RegExp.prototype.flags", TO_STRING(this));
}
var result = '';
@@ -1072,7 +1040,7 @@ function RegExpGetGlobal() {
%IncrementUseCounter(kRegExpPrototypeOldFlagGetter);
return UNDEFINED;
}
- throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.global");
+ throw %make_type_error(kRegExpNonRegExp, "RegExp.prototype.global");
}
return TO_BOOLEAN(REGEXP_GLOBAL(this));
}
@@ -1087,7 +1055,7 @@ function RegExpGetIgnoreCase() {
%IncrementUseCounter(kRegExpPrototypeOldFlagGetter);
return UNDEFINED;
}
- throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.ignoreCase");
+ throw %make_type_error(kRegExpNonRegExp, "RegExp.prototype.ignoreCase");
}
return TO_BOOLEAN(REGEXP_IGNORE_CASE(this));
}
@@ -1101,7 +1069,7 @@ function RegExpGetMultiline() {
%IncrementUseCounter(kRegExpPrototypeOldFlagGetter);
return UNDEFINED;
}
- throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.multiline");
+ throw %make_type_error(kRegExpNonRegExp, "RegExp.prototype.multiline");
}
return TO_BOOLEAN(REGEXP_MULTILINE(this));
}
@@ -1115,7 +1083,7 @@ function RegExpGetSource() {
%IncrementUseCounter(kRegExpPrototypeSourceGetter);
return "(?:)";
}
- throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.source");
+ throw %make_type_error(kRegExpNonRegExp, "RegExp.prototype.source");
}
return REGEXP_SOURCE(this);
}
@@ -1130,12 +1098,33 @@ function RegExpGetSticky() {
%IncrementUseCounter(kRegExpPrototypeStickyGetter);
return UNDEFINED;
}
- throw MakeTypeError(kRegExpNonRegExp, "RegExp.prototype.sticky");
+ throw %make_type_error(kRegExpNonRegExp, "RegExp.prototype.sticky");
}
return TO_BOOLEAN(REGEXP_STICKY(this));
}
%SetForceInlineFlag(RegExpGetSticky);
+
+// ES6 21.2.5.15.
+function RegExpGetUnicode() {
+ if (!IS_REGEXP(this)) {
+ // TODO(littledan): Remove this RegExp compat workaround
+ if (this === GlobalRegExpPrototype) {
+ %IncrementUseCounter(kRegExpPrototypeUnicodeGetter);
+ return UNDEFINED;
+ }
+ throw %make_type_error(kRegExpNonRegExp, "RegExp.prototype.unicode");
+ }
+ return TO_BOOLEAN(REGEXP_UNICODE(this));
+}
+%SetForceInlineFlag(RegExpGetUnicode);
+
+
+function RegExpSpecies() {
+ return this;
+}
+
+
// -------------------------------------------------------------------
%FunctionSetInstanceClassName(GlobalRegExp, 'RegExp');
@@ -1145,15 +1134,17 @@ GlobalRegExpPrototype = new GlobalObject();
GlobalRegExp.prototype, 'constructor', GlobalRegExp, DONT_ENUM);
%SetCode(GlobalRegExp, RegExpConstructor);
+utils.InstallGetter(GlobalRegExp, speciesSymbol, RegExpSpecies);
+
utils.InstallFunctions(GlobalRegExp.prototype, DONT_ENUM, [
- "exec", RegExpExecJS,
- "test", RegExpTest,
+ "exec", RegExpSubclassExecJS,
+ "test", RegExpSubclassTest,
"toString", RegExpToString,
"compile", RegExpCompileJS,
- matchSymbol, RegExpMatch,
- replaceSymbol, RegExpReplace,
- searchSymbol, RegExpSearch,
- splitSymbol, RegExpSplit,
+ matchSymbol, RegExpSubclassMatch,
+ replaceSymbol, RegExpSubclassReplace,
+ searchSymbol, RegExpSubclassSearch,
+ splitSymbol, RegExpSubclassSplit,
]);
utils.InstallGetter(GlobalRegExp.prototype, 'flags', RegExpGetFlags);
@@ -1162,6 +1153,7 @@ utils.InstallGetter(GlobalRegExp.prototype, 'ignoreCase', RegExpGetIgnoreCase);
utils.InstallGetter(GlobalRegExp.prototype, 'multiline', RegExpGetMultiline);
utils.InstallGetter(GlobalRegExp.prototype, 'source', RegExpGetSource);
utils.InstallGetter(GlobalRegExp.prototype, 'sticky', RegExpGetSticky);
+utils.InstallGetter(GlobalRegExp.prototype, 'unicode', RegExpGetUnicode);
// The properties `input` and `$_` are aliases for each other. When this
// value is set the value it is set to is coerced to a string.
@@ -1211,7 +1203,13 @@ for (var i = 1; i < 10; ++i) {
// -------------------------------------------------------------------
// Internal
-var InternalRegExpMatchInfo = new InternalPackedArray(2, "", UNDEFINED, 0, 0);
+var InternalRegExpMatchInfo = {
+ REGEXP_NUMBER_OF_CAPTURES: 2,
+ REGEXP_LAST_SUBJECT: "",
+ REGEXP_LAST_INPUT: UNDEFINED,
+ CAPTURE0: 0,
+ CAPTURE1: 0
+};
function InternalRegExpMatch(regexp, subject) {
var matchInfo = %_RegExpExec(regexp, subject, 0, InternalRegExpMatchInfo);
@@ -1236,12 +1234,6 @@ utils.Export(function(to) {
to.RegExpExec = DoRegExpExec;
to.RegExpInitialize = RegExpInitialize;
to.RegExpLastMatchInfo = RegExpLastMatchInfo;
- to.RegExpSubclassExecJS = RegExpSubclassExecJS;
- to.RegExpSubclassMatch = RegExpSubclassMatch;
- to.RegExpSubclassReplace = RegExpSubclassReplace;
- to.RegExpSubclassSearch = RegExpSubclassSearch;
- to.RegExpSubclassSplit = RegExpSubclassSplit;
- to.RegExpSubclassTest = RegExpSubclassTest;
to.RegExpTest = RegExpTest;
});
diff --git a/deps/v8/src/js/runtime.js b/deps/v8/src/js/runtime.js
index 8e4f283256..550b3e4afb 100644
--- a/deps/v8/src/js/runtime.js
+++ b/deps/v8/src/js/runtime.js
@@ -16,24 +16,15 @@
%CheckIsBootstrapping();
-var FLAG_harmony_species;
var GlobalArray = global.Array;
var GlobalBoolean = global.Boolean;
var GlobalString = global.String;
-var MakeRangeError;
-var MakeTypeError;
var speciesSymbol;
utils.Import(function(from) {
- MakeRangeError = from.MakeRangeError;
- MakeTypeError = from.MakeTypeError;
speciesSymbol = from.species_symbol;
});
-utils.ImportFromExperimental(function(from) {
- FLAG_harmony_species = from.FLAG_harmony_species;
-});
-
// ----------------------------------------------------------------------------
@@ -43,22 +34,16 @@ utils.ImportFromExperimental(function(from) {
*/
-// This function should be called rather than %AddElement in contexts where the
-// argument might not be less than 2**32-1. ES2015 ToLength semantics mean that
-// this is a concern at basically all callsites.
-function AddIndexedProperty(obj, index, value) {
- if (index === TO_UINT32(index) && index !== kMaxUint32) {
- %AddElement(obj, index, value);
- } else {
- %AddNamedProperty(obj, TO_STRING(index), value, NONE);
- }
+function ToPositiveInteger(x, rangeErrorIndex) {
+ var i = TO_INTEGER(x) + 0;
+ if (i < 0) throw %make_range_error(rangeErrorIndex);
+ return i;
}
-%SetForceInlineFlag(AddIndexedProperty);
-function ToPositiveInteger(x, rangeErrorIndex) {
- var i = TO_INTEGER_MAP_MINUS_ZERO(x);
- if (i < 0) throw MakeRangeError(rangeErrorIndex);
+function ToIndex(x, rangeErrorIndex) {
+ var i = TO_INTEGER(x) + 0;
+ if (i < 0 || i > kMaxSafeInteger) throw %make_range_error(rangeErrorIndex);
return i;
}
@@ -78,35 +63,22 @@ function MinSimple(a, b) {
// ES2015 7.3.20
-// For the fallback with --harmony-species off, there are two possible choices:
-// - "conservative": return defaultConstructor
-// - "not conservative": return object.constructor
-// This fallback path is only needed in the transition to ES2015, and the
-// choice is made simply to preserve the previous behavior so that we don't
-// have a three-step upgrade: old behavior, unspecified intermediate behavior,
-// and ES2015.
-// In some cases, we were "conservative" (e.g., ArrayBuffer, RegExp), and in
-// other cases we were "not conservative (e.g., TypedArray, Promise).
-function SpeciesConstructor(object, defaultConstructor, conservative) {
- if (FLAG_harmony_species) {
- var constructor = object.constructor;
- if (IS_UNDEFINED(constructor)) {
- return defaultConstructor;
- }
- if (!IS_RECEIVER(constructor)) {
- throw MakeTypeError(kConstructorNotReceiver);
- }
- var species = constructor[speciesSymbol];
- if (IS_NULL_OR_UNDEFINED(species)) {
- return defaultConstructor;
- }
- if (%IsConstructor(species)) {
- return species;
- }
- throw MakeTypeError(kSpeciesNotConstructor);
- } else {
- return conservative ? defaultConstructor : object.constructor;
+function SpeciesConstructor(object, defaultConstructor) {
+ var constructor = object.constructor;
+ if (IS_UNDEFINED(constructor)) {
+ return defaultConstructor;
+ }
+ if (!IS_RECEIVER(constructor)) {
+ throw %make_type_error(kConstructorNotReceiver);
+ }
+ var species = constructor[speciesSymbol];
+ if (IS_NULL_OR_UNDEFINED(species)) {
+ return defaultConstructor;
+ }
+ if (%IsConstructor(species)) {
+ return species;
}
+ throw %make_type_error(kSpeciesNotConstructor);
}
//----------------------------------------------------------------------------
@@ -122,10 +94,10 @@ function SpeciesConstructor(object, defaultConstructor, conservative) {
// Exports
utils.Export(function(to) {
- to.AddIndexedProperty = AddIndexedProperty;
to.MaxSimple = MaxSimple;
to.MinSimple = MinSimple;
to.ToPositiveInteger = ToPositiveInteger;
+ to.ToIndex = ToIndex;
to.SpeciesConstructor = SpeciesConstructor;
});
diff --git a/deps/v8/src/js/spread.js b/deps/v8/src/js/spread.js
index 82ea839598..39b12e7a8e 100644
--- a/deps/v8/src/js/spread.js
+++ b/deps/v8/src/js/spread.js
@@ -9,11 +9,6 @@
// -------------------------------------------------------------------
// Imports
var InternalArray = utils.InternalArray;
-var MakeTypeError;
-
-utils.Import(function(from) {
- MakeTypeError = from.MakeTypeError;
-});
// -------------------------------------------------------------------
@@ -35,7 +30,7 @@ function SpreadArguments() {
function SpreadIterable(collection) {
if (IS_NULL_OR_UNDEFINED(collection)) {
- throw MakeTypeError(kNotIterable, collection);
+ throw %make_type_error(kNotIterable, collection);
}
var args = new InternalArray();
diff --git a/deps/v8/src/js/string-iterator.js b/deps/v8/src/js/string-iterator.js
index af9af31efd..2319e5a679 100644
--- a/deps/v8/src/js/string-iterator.js
+++ b/deps/v8/src/js/string-iterator.js
@@ -14,17 +14,12 @@
var GlobalString = global.String;
var IteratorPrototype = utils.ImportNow("IteratorPrototype");
var iteratorSymbol = utils.ImportNow("iterator_symbol");
-var MakeTypeError;
var stringIteratorIteratedStringSymbol =
utils.ImportNow("string_iterator_iterated_string_symbol");
var stringIteratorNextIndexSymbol =
utils.ImportNow("string_iterator_next_index_symbol");
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
-utils.Import(function(from) {
- MakeTypeError = from.MakeTypeError;
-});
-
// -------------------------------------------------------------------
function StringIterator() {}
@@ -49,7 +44,7 @@ function StringIteratorNext() {
if (!IS_RECEIVER(iterator) ||
!HAS_DEFINED_PRIVATE(iterator, stringIteratorNextIndexSymbol)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'String Iterator.prototype.next');
}
diff --git a/deps/v8/src/js/string.js b/deps/v8/src/js/string.js
index 0eb394e173..38caab7b12 100644
--- a/deps/v8/src/js/string.js
+++ b/deps/v8/src/js/string.js
@@ -9,15 +9,10 @@
// -------------------------------------------------------------------
// Imports
-var ArrayIndexOf;
var ArrayJoin;
var GlobalRegExp = global.RegExp;
var GlobalString = global.String;
-var InternalArray = utils.InternalArray;
-var InternalPackedArray = utils.InternalPackedArray;
var IsRegExp;
-var MakeRangeError;
-var MakeTypeError;
var MaxSimple;
var MinSimple;
var RegExpInitialize;
@@ -27,11 +22,8 @@ var searchSymbol = utils.ImportNow("search_symbol");
var splitSymbol = utils.ImportNow("split_symbol");
utils.Import(function(from) {
- ArrayIndexOf = from.ArrayIndexOf;
ArrayJoin = from.ArrayJoin;
IsRegExp = from.IsRegExp;
- MakeRangeError = from.MakeRangeError;
- MakeTypeError = from.MakeTypeError;
MaxSimple = from.MaxSimple;
MinSimple = from.MinSimple;
RegExpInitialize = from.RegExpInitialize;
@@ -39,48 +31,6 @@ utils.Import(function(from) {
//-------------------------------------------------------------------
-// ECMA-262 section 15.5.4.2
-function StringToString() {
- if (!IS_STRING(this) && !IS_STRING_WRAPPER(this)) {
- throw MakeTypeError(kNotGeneric, 'String.prototype.toString');
- }
- return %_ValueOf(this);
-}
-
-
-// ECMA-262 section 15.5.4.3
-function StringValueOf() {
- if (!IS_STRING(this) && !IS_STRING_WRAPPER(this)) {
- throw MakeTypeError(kNotGeneric, 'String.prototype.valueOf');
- }
- return %_ValueOf(this);
-}
-
-
-// ECMA-262, section 15.5.4.4
-function StringCharAtJS(pos) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.charAt");
-
- var result = %_StringCharAt(this, pos);
- if (%_IsSmi(result)) {
- result = %_StringCharAt(TO_STRING(this), TO_INTEGER(pos));
- }
- return result;
-}
-
-
-// ECMA-262 section 15.5.4.5
-function StringCharCodeAtJS(pos) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.charCodeAt");
-
- var result = %_StringCharCodeAt(this, pos);
- if (!%_IsSmi(result)) {
- result = %_StringCharCodeAt(TO_STRING(this), TO_INTEGER(pos));
- }
- return result;
-}
-
-
// ECMA-262, section 15.5.4.6
function StringConcat(other /* and more */) { // length == 1
"use strict";
@@ -180,9 +130,9 @@ function StringNormalize(formArg) { // length == 0
var form = IS_UNDEFINED(formArg) ? 'NFC' : TO_STRING(formArg);
var NORMALIZATION_FORMS = ['NFC', 'NFD', 'NFKC', 'NFKD'];
- var normalizationForm = %_Call(ArrayIndexOf, NORMALIZATION_FORMS, form);
+ var normalizationForm = %ArrayIndexOf(NORMALIZATION_FORMS, form, 0);
if (normalizationForm === -1) {
- throw MakeRangeError(kNormalizationForm,
+ throw %make_range_error(kNormalizationForm,
%_Call(ArrayJoin, NORMALIZATION_FORMS, ', '));
}
@@ -473,43 +423,19 @@ function StringSubstring(start, end) {
}
-// ES6 draft, revision 26 (2014-07-18), section B.2.3.1
-function StringSubstr(start, n) {
+// ecma262/#sec-string.prototype.substr
+function StringSubstr(start, length) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.substr");
-
var s = TO_STRING(this);
- var len;
+ var size = s.length;
+ start = TO_INTEGER(start);
+ length = IS_UNDEFINED(length) ? size : TO_INTEGER(length);
- // Correct n: If not given, set to string length; if explicitly
- // set to undefined, zero, or negative, returns empty string.
- if (IS_UNDEFINED(n)) {
- len = s.length;
- } else {
- len = TO_INTEGER(n);
- if (len <= 0) return '';
- }
+ if (start < 0) start = MaxSimple(size + start, 0);
+ length = MinSimple(MaxSimple(length, 0), size - start);
- // Correct start: If not given (or undefined), set to zero; otherwise
- // convert to integer and handle negative case.
- if (IS_UNDEFINED(start)) {
- start = 0;
- } else {
- start = TO_INTEGER(start);
- // If positive, and greater than or equal to the string length,
- // return empty string.
- if (start >= s.length) return '';
- // If negative and absolute value is larger than the string length,
- // use zero.
- if (start < 0) {
- start += s.length;
- if (start < 0) start = 0;
- }
- }
-
- var end = start + len;
- if (end > s.length) end = s.length;
-
- return %_SubString(s, start, end);
+ if (length <= 0) return '';
+ return %_SubString(s, start, start + length);
}
@@ -544,25 +470,6 @@ function StringToLocaleUpperCase() {
return %StringToUpperCase(TO_STRING(this));
}
-// ES5, 15.5.4.20
-function StringTrimJS() {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.trim");
-
- return %StringTrim(TO_STRING(this), true, true);
-}
-
-function StringTrimLeft() {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.trimLeft");
-
- return %StringTrim(TO_STRING(this), true, false);
-}
-
-function StringTrimRight() {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.trimRight");
-
- return %StringTrim(TO_STRING(this), false, true);
-}
-
// ES6 draft, revision 26 (2014-07-18), section B.2.3.2.1
function HtmlEscape(str) {
@@ -670,14 +577,14 @@ function StringRepeat(count) {
var s = TO_STRING(this);
var n = TO_INTEGER(count);
- if (n < 0 || n === INFINITY) throw MakeRangeError(kInvalidCountValue);
+ if (n < 0 || n === INFINITY) throw %make_range_error(kInvalidCountValue);
// Early return to allow an arbitrarily-large repeat of the empty string.
if (s.length === 0) return "";
// The maximum string length is stored in a smi, so a longer repeat
// must result in a range error.
- if (n > %_MaxSmi()) throw MakeRangeError(kInvalidCountValue);
+ if (n > %_MaxSmi()) throw %make_range_error(kInvalidCountValue);
var r = "";
while (true) {
@@ -696,7 +603,7 @@ function StringStartsWith(searchString, position) { // length == 1
var s = TO_STRING(this);
if (IsRegExp(searchString)) {
- throw MakeTypeError(kFirstArgumentNotRegExp, "String.prototype.startsWith");
+ throw %make_type_error(kFirstArgumentNotRegExp, "String.prototype.startsWith");
}
var ss = TO_STRING(searchString);
@@ -722,7 +629,7 @@ function StringEndsWith(searchString, position) { // length == 1
var s = TO_STRING(this);
if (IsRegExp(searchString)) {
- throw MakeTypeError(kFirstArgumentNotRegExp, "String.prototype.endsWith");
+ throw %make_type_error(kFirstArgumentNotRegExp, "String.prototype.endsWith");
}
var ss = TO_STRING(searchString);
@@ -749,7 +656,7 @@ function StringIncludes(searchString, position) { // length == 1
var string = TO_STRING(this);
if (IsRegExp(searchString)) {
- throw MakeTypeError(kFirstArgumentNotRegExp, "String.prototype.includes");
+ throw %make_type_error(kFirstArgumentNotRegExp, "String.prototype.includes");
}
searchString = TO_STRING(searchString);
@@ -792,33 +699,6 @@ function StringCodePointAt(pos) {
}
-// ES6 Draft 05-22-2014, section 21.1.2.2
-function StringFromCodePoint(_) { // length = 1
- "use strict";
- var code;
- var length = arguments.length;
- var index;
- var result = "";
- for (index = 0; index < length; index++) {
- code = arguments[index];
- if (!%_IsSmi(code)) {
- code = TO_NUMBER(code);
- }
- if (code < 0 || code > 0x10FFFF || code !== TO_INTEGER(code)) {
- throw MakeRangeError(kInvalidCodePoint, code);
- }
- if (code <= 0xFFFF) {
- result += %_StringCharFromCode(code);
- } else {
- code -= 0x10000;
- result += %_StringCharFromCode((code >>> 10) & 0x3FF | 0xD800);
- result += %_StringCharFromCode(code & 0x3FF | 0xDC00);
- }
- }
- return result;
-}
-
-
// -------------------------------------------------------------------
// String methods related to templates
@@ -845,25 +725,13 @@ function StringRaw(callSite) {
// -------------------------------------------------------------------
-// Set the String function and constructor.
-%FunctionSetPrototype(GlobalString, new GlobalString());
-
-// Set up the constructor property on the String prototype object.
-%AddNamedProperty(
- GlobalString.prototype, "constructor", GlobalString, DONT_ENUM);
-
// Set up the non-enumerable functions on the String object.
utils.InstallFunctions(GlobalString, DONT_ENUM, [
- "fromCodePoint", StringFromCodePoint,
"raw", StringRaw
]);
// Set up the non-enumerable functions on the String prototype object.
utils.InstallFunctions(GlobalString.prototype, DONT_ENUM, [
- "valueOf", StringValueOf,
- "toString", StringToString,
- "charAt", StringCharAtJS,
- "charCodeAt", StringCharCodeAtJS,
"codePointAt", StringCodePointAt,
"concat", StringConcat,
"endsWith", StringEndsWith,
@@ -885,9 +753,6 @@ utils.InstallFunctions(GlobalString.prototype, DONT_ENUM, [
"toLocaleLowerCase", StringToLocaleLowerCase,
"toUpperCase", StringToUpperCaseJS,
"toLocaleUpperCase", StringToLocaleUpperCase,
- "trim", StringTrimJS,
- "trimLeft", StringTrimLeft,
- "trimRight", StringTrimRight,
"link", StringLink,
"anchor", StringAnchor,
@@ -909,7 +774,6 @@ utils.InstallFunctions(GlobalString.prototype, DONT_ENUM, [
utils.Export(function(to) {
to.ExpandReplacement = ExpandReplacement;
- to.StringCharAt = StringCharAtJS;
to.StringIndexOf = StringIndexOf;
to.StringLastIndexOf = StringLastIndexOf;
to.StringMatch = StringMatchJS;
diff --git a/deps/v8/src/js/symbol.js b/deps/v8/src/js/symbol.js
index 7365655e24..4ec31ae9bb 100644
--- a/deps/v8/src/js/symbol.js
+++ b/deps/v8/src/js/symbol.js
@@ -16,45 +16,17 @@ var hasInstanceSymbol = utils.ImportNow("has_instance_symbol");
var isConcatSpreadableSymbol =
utils.ImportNow("is_concat_spreadable_symbol");
var iteratorSymbol = utils.ImportNow("iterator_symbol");
-var MakeTypeError;
+var matchSymbol = utils.ImportNow("match_symbol");
+var replaceSymbol = utils.ImportNow("replace_symbol");
+var searchSymbol = utils.ImportNow("search_symbol");
+var speciesSymbol = utils.ImportNow("species_symbol");
+var splitSymbol = utils.ImportNow("split_symbol");
var toPrimitiveSymbol = utils.ImportNow("to_primitive_symbol");
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
var unscopablesSymbol = utils.ImportNow("unscopables_symbol");
-utils.Import(function(from) {
- MakeTypeError = from.MakeTypeError;
-});
-
// -------------------------------------------------------------------
-// 19.4.3.4 Symbol.prototype [ @@toPrimitive ] ( hint )
-function SymbolToPrimitive(hint) {
- if (!(IS_SYMBOL(this) || IS_SYMBOL_WRAPPER(this))) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
- "Symbol.prototype [ @@toPrimitive ]", this);
- }
- return %_ValueOf(this);
-}
-
-
-function SymbolToString() {
- if (!(IS_SYMBOL(this) || IS_SYMBOL_WRAPPER(this))) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
- "Symbol.prototype.toString", this);
- }
- return %SymbolDescriptiveString(%_ValueOf(this));
-}
-
-
-function SymbolValueOf() {
- if (!(IS_SYMBOL(this) || IS_SYMBOL_WRAPPER(this))) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
- "Symbol.prototype.valueOf", this);
- }
- return %_ValueOf(this);
-}
-
-
function SymbolFor(key) {
key = TO_STRING(key);
var registry = %SymbolRegistry();
@@ -68,7 +40,7 @@ function SymbolFor(key) {
function SymbolKeyFor(symbol) {
- if (!IS_SYMBOL(symbol)) throw MakeTypeError(kSymbolKeyFor, symbol);
+ if (!IS_SYMBOL(symbol)) throw %make_type_error(kSymbolKeyFor, symbol);
return %SymbolRegistry().keyFor[symbol];
}
@@ -78,11 +50,11 @@ utils.InstallConstants(GlobalSymbol, [
"hasInstance", hasInstanceSymbol,
"isConcatSpreadable", isConcatSpreadableSymbol,
"iterator", iteratorSymbol,
- // TODO(yangguo): expose when implemented.
- // "match", matchSymbol,
- // "replace", replaceSymbol,
- // "search", searchSymbol,
- // "split, splitSymbol,
+ "match", matchSymbol,
+ "replace", replaceSymbol,
+ "search", searchSymbol,
+ "species", speciesSymbol,
+ "split", splitSymbol,
"toPrimitive", toPrimitiveSymbol,
"toStringTag", toStringTagSymbol,
"unscopables", unscopablesSymbol,
@@ -93,23 +65,4 @@ utils.InstallFunctions(GlobalSymbol, DONT_ENUM, [
"keyFor", SymbolKeyFor
]);
-%AddNamedProperty(
- GlobalSymbol.prototype, toStringTagSymbol, "Symbol", DONT_ENUM | READ_ONLY);
-
-utils.InstallFunctions(GlobalSymbol.prototype, DONT_ENUM | READ_ONLY, [
- toPrimitiveSymbol, SymbolToPrimitive
-]);
-
-utils.InstallFunctions(GlobalSymbol.prototype, DONT_ENUM, [
- "toString", SymbolToString,
- "valueOf", SymbolValueOf
-]);
-
-// -------------------------------------------------------------------
-// Exports
-
-utils.Export(function(to) {
- to.SymbolToString = SymbolToString;
-})
-
})
diff --git a/deps/v8/src/js/typedarray.js b/deps/v8/src/js/typedarray.js
index 4fb174bc57..b97a9c86ce 100644
--- a/deps/v8/src/js/typedarray.js
+++ b/deps/v8/src/js/typedarray.js
@@ -11,7 +11,6 @@
// -------------------------------------------------------------------
// Imports
-var AddIndexedProperty;
// array.js has to come before typedarray.js for this to work
var ArrayToString = utils.ImportNow("ArrayToString");
var ArrayValues;
@@ -22,7 +21,6 @@ var GlobalArrayBuffer = global.ArrayBuffer;
var GlobalArrayBufferPrototype = GlobalArrayBuffer.prototype;
var GlobalDataView = global.DataView;
var GlobalObject = global.Object;
-var InternalArray = utils.InternalArray;
var InnerArrayCopyWithin;
var InnerArrayEvery;
var InnerArrayFill;
@@ -30,10 +28,7 @@ var InnerArrayFilter;
var InnerArrayFind;
var InnerArrayFindIndex;
var InnerArrayForEach;
-var InnerArrayIncludes;
-var InnerArrayIndexOf;
var InnerArrayJoin;
-var InnerArrayLastIndexOf;
var InnerArrayReduce;
var InnerArrayReduceRight;
var InnerArraySome;
@@ -41,14 +36,14 @@ var InnerArraySort;
var InnerArrayToLocaleString;
var InternalArray = utils.InternalArray;
var IsNaN;
-var MakeRangeError;
-var MakeTypeError;
var MaxSimple;
var MinSimple;
var PackedArrayReverse;
var SpeciesConstructor;
var ToPositiveInteger;
+var ToIndex;
var iteratorSymbol = utils.ImportNow("iterator_symbol");
+var speciesSymbol = utils.ImportNow("species_symbol");
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
macro TYPED_ARRAYS(FUNCTION)
@@ -70,8 +65,9 @@ endmacro
TYPED_ARRAYS(DECLARE_GLOBALS)
+var GlobalTypedArray = %object_get_prototype_of(GlobalUint8Array);
+
utils.Import(function(from) {
- AddIndexedProperty = from.AddIndexedProperty;
ArrayValues = from.ArrayValues;
GetIterator = from.GetIterator;
GetMethod = from.GetMethod;
@@ -82,23 +78,19 @@ utils.Import(function(from) {
InnerArrayFind = from.InnerArrayFind;
InnerArrayFindIndex = from.InnerArrayFindIndex;
InnerArrayForEach = from.InnerArrayForEach;
- InnerArrayIncludes = from.InnerArrayIncludes;
- InnerArrayIndexOf = from.InnerArrayIndexOf;
InnerArrayJoin = from.InnerArrayJoin;
- InnerArrayLastIndexOf = from.InnerArrayLastIndexOf;
InnerArrayReduce = from.InnerArrayReduce;
InnerArrayReduceRight = from.InnerArrayReduceRight;
InnerArraySome = from.InnerArraySome;
InnerArraySort = from.InnerArraySort;
InnerArrayToLocaleString = from.InnerArrayToLocaleString;
IsNaN = from.IsNaN;
- MakeRangeError = from.MakeRangeError;
- MakeTypeError = from.MakeTypeError;
MaxSimple = from.MaxSimple;
MinSimple = from.MinSimple;
PackedArrayReverse = from.PackedArrayReverse;
SpeciesConstructor = from.SpeciesConstructor;
ToPositiveInteger = from.ToPositiveInteger;
+ ToIndex = from.ToIndex;
});
// --------------- Typed Arrays ---------------------
@@ -113,7 +105,7 @@ TYPED_ARRAYS(TYPED_ARRAY_CONSTRUCTOR_CASE)
}
// The TypeError should not be generated since all callers should
// have already called ValidateTypedArray.
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
"TypedArrayDefaultConstructor", this);
}
@@ -123,12 +115,12 @@ function TypedArrayCreate(constructor, arg0, arg1, arg2) {
} else {
var newTypedArray = new constructor(arg0, arg1, arg2);
}
- if (!IS_TYPEDARRAY(newTypedArray)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(newTypedArray)) throw %make_type_error(kNotTypedArray);
// TODO(littledan): Check for being detached, here and elsewhere
// All callers where the first argument is a Number have no additional
// arguments.
if (IS_NUMBER(arg0) && %_TypedArrayGetLength(newTypedArray) < arg0) {
- throw MakeTypeError(kTypedArrayTooShort);
+ throw %make_type_error(kTypedArrayTooShort);
}
return newTypedArray;
}
@@ -143,10 +135,15 @@ function TypedArraySpeciesCreate(exemplar, arg0, arg1, arg2, conservative) {
macro TYPED_ARRAY_CONSTRUCTOR(ARRAY_ID, NAME, ELEMENT_SIZE)
function NAMEConstructByArrayBuffer(obj, buffer, byteOffset, length) {
if (!IS_UNDEFINED(byteOffset)) {
- byteOffset = ToPositiveInteger(byteOffset, kInvalidTypedArrayLength);
+ byteOffset = ToIndex(byteOffset, kInvalidTypedArrayLength);
}
if (!IS_UNDEFINED(length)) {
- length = ToPositiveInteger(length, kInvalidTypedArrayLength);
+ length = ToIndex(length, kInvalidTypedArrayLength);
+ }
+ if (length > %_MaxSmi()) {
+ // Note: this is not per spec, but rather a constraint of our current
+ // representation (which uses smi's).
+ throw %make_range_error(kInvalidTypedArrayLength);
}
var bufferByteLength = %_ArrayBufferGetByteLength(buffer);
@@ -157,39 +154,38 @@ function NAMEConstructByArrayBuffer(obj, buffer, byteOffset, length) {
offset = byteOffset;
if (offset % ELEMENT_SIZE !== 0) {
- throw MakeRangeError(kInvalidTypedArrayAlignment,
+ throw %make_range_error(kInvalidTypedArrayAlignment,
"start offset", "NAME", ELEMENT_SIZE);
}
- if (offset > bufferByteLength) {
- throw MakeRangeError(kInvalidTypedArrayOffset);
- }
}
var newByteLength;
- var newLength;
if (IS_UNDEFINED(length)) {
if (bufferByteLength % ELEMENT_SIZE !== 0) {
- throw MakeRangeError(kInvalidTypedArrayAlignment,
+ throw %make_range_error(kInvalidTypedArrayAlignment,
"byte length", "NAME", ELEMENT_SIZE);
}
newByteLength = bufferByteLength - offset;
- newLength = newByteLength / ELEMENT_SIZE;
+ if (newByteLength < 0) {
+ throw %make_range_error(kInvalidTypedArrayAlignment,
+ "byte length", "NAME", ELEMENT_SIZE);
+ }
} else {
- var newLength = length;
- newByteLength = newLength * ELEMENT_SIZE;
- }
- if ((offset + newByteLength > bufferByteLength)
- || (newLength > %_MaxSmi())) {
- throw MakeRangeError(kInvalidTypedArrayLength);
+ newByteLength = length * ELEMENT_SIZE;
+ if (offset + newByteLength > bufferByteLength) {
+ throw %make_range_error(kInvalidTypedArrayLength);
+ }
}
%_TypedArrayInitialize(obj, ARRAY_ID, buffer, offset, newByteLength, true);
}
function NAMEConstructByLength(obj, length) {
var l = IS_UNDEFINED(length) ?
- 0 : ToPositiveInteger(length, kInvalidTypedArrayLength);
- if (l > %_MaxSmi()) {
- throw MakeRangeError(kInvalidTypedArrayLength);
+ 0 : ToIndex(length, kInvalidTypedArrayLength);
+ if (length > %_MaxSmi()) {
+ // Note: this is not per spec, but rather a constraint of our current
+ // representation (which uses smi's).
+ throw %make_range_error(kInvalidTypedArrayLength);
}
var byteLength = l * ELEMENT_SIZE;
if (byteLength > %_TypedArrayMaxSizeInHeap()) {
@@ -204,7 +200,7 @@ function NAMEConstructByArrayLike(obj, arrayLike, length) {
var l = ToPositiveInteger(length, kInvalidTypedArrayLength);
if (l > %_MaxSmi()) {
- throw MakeRangeError(kInvalidTypedArrayLength);
+ throw %make_range_error(kInvalidTypedArrayLength);
}
var initialized = false;
var byteLength = l * ELEMENT_SIZE;
@@ -263,21 +259,20 @@ function NAMEConstructor(arg1, arg2, arg3) {
if (!IS_UNDEFINED(new.target)) {
if (IS_ARRAYBUFFER(arg1) || IS_SHAREDARRAYBUFFER(arg1)) {
NAMEConstructByArrayBuffer(this, arg1, arg2, arg3);
- } else if (IS_NUMBER(arg1) || IS_STRING(arg1) ||
- IS_BOOLEAN(arg1) || IS_UNDEFINED(arg1)) {
- NAMEConstructByLength(this, arg1);
} else if (IS_TYPEDARRAY(arg1)) {
NAMEConstructByTypedArray(this, arg1);
- } else {
+ } else if (IS_RECEIVER(arg1)) {
var iteratorFn = arg1[iteratorSymbol];
if (IS_UNDEFINED(iteratorFn) || iteratorFn === ArrayValues) {
NAMEConstructByArrayLike(this, arg1, arg1.length);
} else {
NAMEConstructByIterable(this, arg1, iteratorFn);
}
+ } else {
+ NAMEConstructByLength(this, arg1);
}
} else {
- throw MakeTypeError(kConstructorNotFunction, "NAME")
+ throw %make_type_error(kConstructorNotFunction, "NAME")
}
}
@@ -327,47 +322,11 @@ macro TYPED_ARRAY_SUBARRAY_CASE(ARRAY_ID, NAME, ELEMENT_SIZE)
endmacro
TYPED_ARRAYS(TYPED_ARRAY_SUBARRAY_CASE)
}
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
"get TypedArray.prototype.subarray", this);
}
%SetForceInlineFlag(TypedArraySubArray);
-function TypedArrayGetBuffer() {
- if (!IS_TYPEDARRAY(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
- "get TypedArray.prototype.buffer", this);
- }
- return %TypedArrayGetBuffer(this);
-}
-%SetForceInlineFlag(TypedArrayGetBuffer);
-
-function TypedArrayGetByteLength() {
- if (!IS_TYPEDARRAY(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
- "get TypedArray.prototype.byteLength", this);
- }
- return %_ArrayBufferViewGetByteLength(this);
-}
-%SetForceInlineFlag(TypedArrayGetByteLength);
-
-function TypedArrayGetByteOffset() {
- if (!IS_TYPEDARRAY(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
- "get TypedArray.prototype.byteOffset", this);
- }
- return %_ArrayBufferViewGetByteOffset(this);
-}
-%SetForceInlineFlag(TypedArrayGetByteOffset);
-
-function TypedArrayGetLength() {
- if (!IS_TYPEDARRAY(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
- "get TypedArray.prototype.length", this);
- }
- return %_TypedArrayGetLength(this);
-}
-%SetForceInlineFlag(TypedArrayGetLength);
-
function TypedArraySetFromArrayLike(target, source, sourceLength, offset) {
@@ -386,7 +345,7 @@ function TypedArraySetFromArrayLike(target, source, sourceLength, offset) {
function TypedArraySetFromOverlappingTypedArray(target, source, offset) {
var sourceElementSize = source.BYTES_PER_ELEMENT;
var targetElementSize = target.BYTES_PER_ELEMENT;
- var sourceLength = source.length;
+ var sourceLength = %_TypedArrayGetLength(source);
// Copy left part.
function CopyLeftPart() {
@@ -406,7 +365,7 @@ function TypedArraySetFromOverlappingTypedArray(target, source, offset) {
}
var leftIndex = CopyLeftPart();
- // Copy rigth part;
+ // Copy right part;
function CopyRightPart() {
// First unmutated byte before the next write
var targetPtr =
@@ -437,10 +396,10 @@ function TypedArraySetFromOverlappingTypedArray(target, source, offset) {
function TypedArraySet(obj, offset) {
var intOffset = IS_UNDEFINED(offset) ? 0 : TO_INTEGER(offset);
- if (intOffset < 0) throw MakeTypeError(kTypedArraySetNegativeOffset);
+ if (intOffset < 0) throw %make_type_error(kTypedArraySetNegativeOffset);
if (intOffset > %_MaxSmi()) {
- throw MakeRangeError(kTypedArraySetSourceTooLarge);
+ throw %make_range_error(kTypedArraySetSourceTooLarge);
}
switch (%TypedArraySetFastCases(this, obj, intOffset)) {
// These numbers should be synchronized with runtime.cc.
@@ -450,7 +409,8 @@ function TypedArraySet(obj, offset) {
TypedArraySetFromOverlappingTypedArray(this, obj, intOffset);
return;
case 2: // TYPED_ARRAY_SET_TYPED_ARRAY_NONOVERLAPPING
- TypedArraySetFromArrayLike(this, obj, obj.length, intOffset);
+ TypedArraySetFromArrayLike(this,
+ obj, %_TypedArrayGetLength(obj), intOffset);
return;
case 3: // TYPED_ARRAY_SET_NON_TYPED_ARRAY
var l = obj.length;
@@ -460,13 +420,13 @@ function TypedArraySet(obj, offset) {
// instead of silently ignoring the call, so that
// the user knows (s)he did something wrong.
// (Consistent with Firefox and Blink/WebKit)
- throw MakeTypeError(kInvalidArgument);
+ throw %make_type_error(kInvalidArgument);
}
return;
}
l = TO_LENGTH(l);
- if (intOffset + l > this.length) {
- throw MakeRangeError(kTypedArraySetSourceTooLarge);
+ if (intOffset + l > %_TypedArrayGetLength(this)) {
+ throw %make_range_error(kTypedArraySetSourceTooLarge);
}
TypedArraySetFromArrayLike(this, obj, l, intOffset);
return;
@@ -483,7 +443,7 @@ function TypedArrayGetToStringTag() {
function TypedArrayCopyWithin(target, start, end) {
- if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
@@ -495,7 +455,7 @@ function TypedArrayCopyWithin(target, start, end) {
// ES6 draft 05-05-15, section 22.2.3.7
function TypedArrayEvery(f, receiver) {
- if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
@@ -506,7 +466,7 @@ function TypedArrayEvery(f, receiver) {
// ES6 draft 08-24-14, section 22.2.3.12
function TypedArrayForEach(f, receiver) {
- if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
@@ -517,7 +477,7 @@ function TypedArrayForEach(f, receiver) {
// ES6 draft 04-05-14 section 22.2.3.8
function TypedArrayFill(value, start, end) {
- if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
@@ -528,10 +488,10 @@ function TypedArrayFill(value, start, end) {
// ES6 draft 07-15-13, section 22.2.3.9
function TypedArrayFilter(f, thisArg) {
- if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
- if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
+ if (!IS_CALLABLE(f)) throw %make_type_error(kCalledNonCallable, f);
var result = new InternalArray();
InnerArrayFilter(f, thisArg, this, length, result);
var captured = result.length;
@@ -546,7 +506,7 @@ function TypedArrayFilter(f, thisArg) {
// ES6 draft 07-15-13, section 22.2.3.10
function TypedArrayFind(predicate, thisArg) {
- if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
@@ -557,7 +517,7 @@ function TypedArrayFind(predicate, thisArg) {
// ES6 draft 07-15-13, section 22.2.3.11
function TypedArrayFindIndex(predicate, thisArg) {
- if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
@@ -568,7 +528,7 @@ function TypedArrayFindIndex(predicate, thisArg) {
// ES6 draft 05-18-15, section 22.2.3.21
function TypedArrayReverse() {
- if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
@@ -596,7 +556,7 @@ function TypedArrayComparefn(x, y) {
// ES6 draft 05-18-15, section 22.2.3.25
function TypedArraySort(comparefn) {
- if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
@@ -610,33 +570,85 @@ function TypedArraySort(comparefn) {
// ES6 section 22.2.3.13
function TypedArrayIndexOf(element, index) {
- if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
- return InnerArrayIndexOf(this, element, index, length);
+
+ if (length === 0) return -1;
+ if (!IS_NUMBER(element)) return -1;
+ var n = TO_INTEGER(index);
+
+ var k;
+ if (n === 0) {
+ k = 0;
+ } else if (n > 0) {
+ k = n;
+ } else {
+ k = length + n;
+ if (k < 0) {
+ k = 0;
+ }
+ }
+
+ while (k < length) {
+ var elementK = this[k];
+ if (element === elementK) {
+ return k;
+ }
+ ++k;
+ }
+ return -1;
}
%FunctionSetLength(TypedArrayIndexOf, 1);
// ES6 section 22.2.3.16
function TypedArrayLastIndexOf(element, index) {
- if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
- return InnerArrayLastIndexOf(this, element, index, length,
- arguments.length);
+ if (length === 0) return -1;
+ if (!IS_NUMBER(element)) return -1;
+ var n;
+ if (arguments.length < 2) {
+ n = length - 1;
+ } else {
+ n = TO_INTEGER(index);
+ }
+
+ var k;
+ if (n >= 0) {
+ if (length <= n) {
+ k = length - 1;
+ } else if (n === 0) {
+ k = 0;
+ } else {
+ k = n;
+ }
+ } else {
+ k = length + n;
+ }
+
+ while (k >= 0) {
+ var elementK = this[k];
+ if (element === elementK) {
+ return k;
+ }
+ --k;
+ }
+ return -1;
}
%FunctionSetLength(TypedArrayLastIndexOf, 1);
// ES6 draft 07-15-13, section 22.2.3.18
function TypedArrayMap(f, thisArg) {
- if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
var result = TypedArraySpeciesCreate(this, length);
- if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
+ if (!IS_CALLABLE(f)) throw %make_type_error(kCalledNonCallable, f);
for (var i = 0; i < length; i++) {
var element = this[i];
result[i] = %_Call(f, thisArg, element, i, this);
@@ -648,7 +660,7 @@ function TypedArrayMap(f, thisArg) {
// ES6 draft 05-05-15, section 22.2.3.24
function TypedArraySome(f, receiver) {
- if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
@@ -659,7 +671,7 @@ function TypedArraySome(f, receiver) {
// ES6 section 22.2.3.27
function TypedArrayToLocaleString() {
- if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
@@ -669,7 +681,7 @@ function TypedArrayToLocaleString() {
// ES6 section 22.2.3.14
function TypedArrayJoin(separator) {
- if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
@@ -679,7 +691,7 @@ function TypedArrayJoin(separator) {
// ES6 draft 07-15-13, section 22.2.3.19
function TypedArrayReduce(callback, current) {
- if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
return InnerArrayReduce(callback, current, this, length,
@@ -690,7 +702,7 @@ function TypedArrayReduce(callback, current) {
// ES6 draft 07-15-13, section 22.2.3.19
function TypedArrayReduceRight(callback, current) {
- if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
return InnerArrayReduceRight(callback, current, this, length,
@@ -700,7 +712,7 @@ function TypedArrayReduceRight(callback, current) {
function TypedArraySlice(start, end) {
- if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
var len = %_TypedArrayGetLength(this);
var relativeStart = TO_INTEGER(start);
@@ -744,11 +756,33 @@ function TypedArraySlice(start, end) {
// ES2016 draft, section 22.2.3.14
function TypedArrayIncludes(searchElement, fromIndex) {
- if (!IS_TYPEDARRAY(this)) throw MakeTypeError(kNotTypedArray);
+ if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
- return InnerArrayIncludes(searchElement, fromIndex, this, length);
+ if (length === 0) return false;
+ var n = TO_INTEGER(fromIndex);
+
+ var k;
+ if (n >= 0) {
+ k = n;
+ } else {
+ k = length + n;
+ if (k < 0) {
+ k = 0;
+ }
+ }
+
+ while (k < length) {
+ var elementK = this[k];
+ if (%SameValueZero(searchElement, elementK)) {
+ return true;
+ }
+
+ ++k;
+ }
+
+ return false;
}
%FunctionSetLength(TypedArrayIncludes, 1);
@@ -786,10 +820,10 @@ function IterableToArrayLike(items) {
// ES#sec-%typedarray%.from
// %TypedArray%.from ( source [ , mapfn [ , thisArg ] ] )
function TypedArrayFrom(source, mapfn, thisArg) {
- if (!%IsConstructor(this)) throw MakeTypeError(kNotConstructor, this);
+ if (!%IsConstructor(this)) throw %make_type_error(kNotConstructor, this);
var mapping;
if (!IS_UNDEFINED(mapfn)) {
- if (!IS_CALLABLE(mapfn)) throw MakeTypeError(kCalledNonCallable, this);
+ if (!IS_CALLABLE(mapfn)) throw %make_type_error(kCalledNonCallable, this);
mapping = true;
} else {
mapping = false;
@@ -811,34 +845,31 @@ function TypedArrayFrom(source, mapfn, thisArg) {
}
%FunctionSetLength(TypedArrayFrom, 1);
-function TypedArray() {
+// TODO(bmeurer): Migrate this to a proper builtin.
+function TypedArrayConstructor() {
if (IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kConstructorNonCallable, "TypedArray");
+ throw %make_type_error(kConstructorNonCallable, "TypedArray");
}
- if (new.target === TypedArray) {
- throw MakeTypeError(kConstructAbstractClass, "TypedArray");
+ if (new.target === GlobalTypedArray) {
+ throw %make_type_error(kConstructAbstractClass, "TypedArray");
}
}
+function TypedArraySpecies() {
+ return this;
+}
+
// -------------------------------------------------------------------
-%FunctionSetPrototype(TypedArray, new GlobalObject());
-%AddNamedProperty(TypedArray.prototype,
- "constructor", TypedArray, DONT_ENUM);
-utils.InstallFunctions(TypedArray, DONT_ENUM, [
+%SetCode(GlobalTypedArray, TypedArrayConstructor);
+utils.InstallFunctions(GlobalTypedArray, DONT_ENUM, [
"from", TypedArrayFrom,
"of", TypedArrayOf
]);
-utils.InstallGetter(TypedArray.prototype, "buffer", TypedArrayGetBuffer);
-utils.InstallGetter(TypedArray.prototype, "byteOffset", TypedArrayGetByteOffset,
- DONT_ENUM | DONT_DELETE);
-utils.InstallGetter(TypedArray.prototype, "byteLength",
- TypedArrayGetByteLength, DONT_ENUM | DONT_DELETE);
-utils.InstallGetter(TypedArray.prototype, "length", TypedArrayGetLength,
- DONT_ENUM | DONT_DELETE);
-utils.InstallGetter(TypedArray.prototype, toStringTagSymbol,
+utils.InstallGetter(GlobalTypedArray, speciesSymbol, TypedArraySpecies);
+utils.InstallGetter(GlobalTypedArray.prototype, toStringTagSymbol,
TypedArrayGetToStringTag);
-utils.InstallFunctions(TypedArray.prototype, DONT_ENUM, [
+utils.InstallFunctions(GlobalTypedArray.prototype, DONT_ENUM, [
"subarray", TypedArraySubArray,
"set", TypedArraySet,
"copyWithin", TypedArrayCopyWithin,
@@ -862,15 +893,15 @@ utils.InstallFunctions(TypedArray.prototype, DONT_ENUM, [
"toLocaleString", TypedArrayToLocaleString
]);
-%AddNamedProperty(TypedArray.prototype, "toString", ArrayToString,
+%AddNamedProperty(GlobalTypedArray.prototype, "toString", ArrayToString,
DONT_ENUM);
macro SETUP_TYPED_ARRAY(ARRAY_ID, NAME, ELEMENT_SIZE)
%SetCode(GlobalNAME, NAMEConstructor);
%FunctionSetPrototype(GlobalNAME, new GlobalObject());
- %InternalSetPrototype(GlobalNAME, TypedArray);
- %InternalSetPrototype(GlobalNAME.prototype, TypedArray.prototype);
+ %InternalSetPrototype(GlobalNAME, GlobalTypedArray);
+ %InternalSetPrototype(GlobalNAME.prototype, GlobalTypedArray.prototype);
%AddNamedProperty(GlobalNAME, "BYTES_PER_ELEMENT", ELEMENT_SIZE,
READ_ONLY | DONT_ENUM | DONT_DELETE);
@@ -886,29 +917,6 @@ TYPED_ARRAYS(SETUP_TYPED_ARRAY)
// --------------------------- DataView -----------------------------
-function DataViewGetBufferJS() {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver, 'DataView.buffer', this);
- }
- return %DataViewGetBuffer(this);
-}
-
-function DataViewGetByteOffset() {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
- 'DataView.byteOffset', this);
- }
- return %_ArrayBufferViewGetByteOffset(this);
-}
-
-function DataViewGetByteLength() {
- if (!IS_DATAVIEW(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
- 'DataView.byteLength', this);
- }
- return %_ArrayBufferViewGetByteLength(this);
-}
-
macro DATA_VIEW_TYPES(FUNCTION)
FUNCTION(Int8)
FUNCTION(Uint8)
@@ -924,22 +932,20 @@ endmacro
macro DATA_VIEW_GETTER_SETTER(TYPENAME)
function DataViewGetTYPENAMEJS(offset, little_endian) {
if (!IS_DATAVIEW(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'DataView.getTYPENAME', this);
}
- if (arguments.length < 1) throw MakeTypeError(kInvalidArgument);
- offset = ToPositiveInteger(offset, kInvalidDataViewAccessorOffset);
+ offset = IS_UNDEFINED(offset) ? 0 : ToIndex(offset, kInvalidDataViewAccessorOffset);
return %DataViewGetTYPENAME(this, offset, !!little_endian);
}
%FunctionSetLength(DataViewGetTYPENAMEJS, 1);
function DataViewSetTYPENAMEJS(offset, value, little_endian) {
if (!IS_DATAVIEW(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'DataView.setTYPENAME', this);
}
- if (arguments.length < 2) throw MakeTypeError(kInvalidArgument);
- offset = ToPositiveInteger(offset, kInvalidDataViewAccessorOffset);
+ offset = IS_UNDEFINED(offset) ? 0 : ToIndex(offset, kInvalidDataViewAccessorOffset);
%DataViewSetTYPENAME(this, offset, TO_NUMBER(value), !!little_endian);
}
%FunctionSetLength(DataViewSetTYPENAMEJS, 2);
@@ -947,21 +953,6 @@ endmacro
DATA_VIEW_TYPES(DATA_VIEW_GETTER_SETTER)
-// Setup the DataView constructor.
-%FunctionSetPrototype(GlobalDataView, new GlobalObject);
-
-// Set up constructor property on the DataView prototype.
-%AddNamedProperty(GlobalDataView.prototype, "constructor", GlobalDataView,
- DONT_ENUM);
-%AddNamedProperty(GlobalDataView.prototype, toStringTagSymbol, "DataView",
- READ_ONLY|DONT_ENUM);
-
-utils.InstallGetter(GlobalDataView.prototype, "buffer", DataViewGetBufferJS);
-utils.InstallGetter(GlobalDataView.prototype, "byteOffset",
- DataViewGetByteOffset);
-utils.InstallGetter(GlobalDataView.prototype, "byteLength",
- DataViewGetByteLength);
-
utils.InstallFunctions(GlobalDataView.prototype, DONT_ENUM, [
"getInt8", DataViewGetInt8JS,
"setInt8", DataViewSetInt8JS,
diff --git a/deps/v8/src/js/uri.js b/deps/v8/src/js/uri.js
deleted file mode 100644
index dca83c9b23..0000000000
--- a/deps/v8/src/js/uri.js
+++ /dev/null
@@ -1,379 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file contains support for URI manipulations written in
-// JavaScript.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-//- ------------------------------------------------------------------
-// Imports
-
-var GlobalObject = global.Object;
-var InternalArray = utils.InternalArray;
-var MakeURIError;
-
-utils.Import(function(from) {
- MakeURIError = from.MakeURIError;
-});
-
-
-// -------------------------------------------------------------------
-// Define internal helper functions.
-
-function HexValueOf(code) {
- // 0-9
- if (code >= 48 && code <= 57) return code - 48;
- // A-F
- if (code >= 65 && code <= 70) return code - 55;
- // a-f
- if (code >= 97 && code <= 102) return code - 87;
-
- return -1;
-}
-
-// Does the char code correspond to an alpha-numeric char.
-function isAlphaNumeric(cc) {
- // a - z
- if (97 <= cc && cc <= 122) return true;
- // A - Z
- if (65 <= cc && cc <= 90) return true;
- // 0 - 9
- if (48 <= cc && cc <= 57) return true;
-
- return false;
-}
-
-// Lazily initialized.
-var hexCharCodeArray = 0;
-
-function URIAddEncodedOctetToBuffer(octet, result, index) {
- result[index++] = 37; // Char code of '%'.
- result[index++] = hexCharCodeArray[octet >> 4];
- result[index++] = hexCharCodeArray[octet & 0x0F];
- return index;
-}
-
-function URIEncodeOctets(octets, result, index) {
- if (hexCharCodeArray === 0) {
- hexCharCodeArray = [48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
- 65, 66, 67, 68, 69, 70];
- }
- index = URIAddEncodedOctetToBuffer(octets[0], result, index);
- if (octets[1]) index = URIAddEncodedOctetToBuffer(octets[1], result, index);
- if (octets[2]) index = URIAddEncodedOctetToBuffer(octets[2], result, index);
- if (octets[3]) index = URIAddEncodedOctetToBuffer(octets[3], result, index);
- return index;
-}
-
-function URIEncodeSingle(cc, result, index) {
- var x = (cc >> 12) & 0xF;
- var y = (cc >> 6) & 63;
- var z = cc & 63;
- var octets = new InternalArray(3);
- if (cc <= 0x007F) {
- octets[0] = cc;
- } else if (cc <= 0x07FF) {
- octets[0] = y + 192;
- octets[1] = z + 128;
- } else {
- octets[0] = x + 224;
- octets[1] = y + 128;
- octets[2] = z + 128;
- }
- return URIEncodeOctets(octets, result, index);
-}
-
-function URIEncodePair(cc1 , cc2, result, index) {
- var u = ((cc1 >> 6) & 0xF) + 1;
- var w = (cc1 >> 2) & 0xF;
- var x = cc1 & 3;
- var y = (cc2 >> 6) & 0xF;
- var z = cc2 & 63;
- var octets = new InternalArray(4);
- octets[0] = (u >> 2) + 240;
- octets[1] = (((u & 3) << 4) | w) + 128;
- octets[2] = ((x << 4) | y) + 128;
- octets[3] = z + 128;
- return URIEncodeOctets(octets, result, index);
-}
-
-function URIHexCharsToCharCode(highChar, lowChar) {
- var highCode = HexValueOf(highChar);
- var lowCode = HexValueOf(lowChar);
- if (highCode == -1 || lowCode == -1) throw MakeURIError();
- return (highCode << 4) | lowCode;
-}
-
-// Callers must ensure that |result| is a sufficiently long sequential
-// two-byte string!
-function URIDecodeOctets(octets, result, index) {
- var value;
- var o0 = octets[0];
- if (o0 < 0x80) {
- value = o0;
- } else if (o0 < 0xc2) {
- throw MakeURIError();
- } else {
- var o1 = octets[1];
- if (o0 < 0xe0) {
- var a = o0 & 0x1f;
- if ((o1 < 0x80) || (o1 > 0xbf)) throw MakeURIError();
- var b = o1 & 0x3f;
- value = (a << 6) + b;
- if (value < 0x80 || value > 0x7ff) throw MakeURIError();
- } else {
- var o2 = octets[2];
- if (o0 < 0xf0) {
- var a = o0 & 0x0f;
- if ((o1 < 0x80) || (o1 > 0xbf)) throw MakeURIError();
- var b = o1 & 0x3f;
- if ((o2 < 0x80) || (o2 > 0xbf)) throw MakeURIError();
- var c = o2 & 0x3f;
- value = (a << 12) + (b << 6) + c;
- if ((value < 0x800) || (value > 0xffff)) throw MakeURIError();
- } else {
- var o3 = octets[3];
- if (o0 < 0xf8) {
- var a = (o0 & 0x07);
- if ((o1 < 0x80) || (o1 > 0xbf)) throw MakeURIError();
- var b = (o1 & 0x3f);
- if ((o2 < 0x80) || (o2 > 0xbf)) {
- throw MakeURIError();
- }
- var c = (o2 & 0x3f);
- if ((o3 < 0x80) || (o3 > 0xbf)) throw MakeURIError();
- var d = (o3 & 0x3f);
- value = (a << 18) + (b << 12) + (c << 6) + d;
- if ((value < 0x10000) || (value > 0x10ffff)) throw MakeURIError();
- } else {
- throw MakeURIError();
- }
- }
- }
- }
- if (0xD800 <= value && value <= 0xDFFF) throw MakeURIError();
- if (value < 0x10000) {
- %_TwoByteSeqStringSetChar(index++, value, result);
- } else {
- %_TwoByteSeqStringSetChar(index++, (value >> 10) + 0xd7c0, result);
- %_TwoByteSeqStringSetChar(index++, (value & 0x3ff) + 0xdc00, result);
- }
- return index;
-}
-
-// ECMA-262, section 15.1.3
-function Encode(uri, unescape) {
- uri = TO_STRING(uri);
- var uriLength = uri.length;
- var array = new InternalArray(uriLength);
- var index = 0;
- for (var k = 0; k < uriLength; k++) {
- var cc1 = %_StringCharCodeAt(uri, k);
- if (unescape(cc1)) {
- array[index++] = cc1;
- } else {
- if (cc1 >= 0xDC00 && cc1 <= 0xDFFF) throw MakeURIError();
- if (cc1 < 0xD800 || cc1 > 0xDBFF) {
- index = URIEncodeSingle(cc1, array, index);
- } else {
- k++;
- if (k == uriLength) throw MakeURIError();
- var cc2 = %_StringCharCodeAt(uri, k);
- if (cc2 < 0xDC00 || cc2 > 0xDFFF) throw MakeURIError();
- index = URIEncodePair(cc1, cc2, array, index);
- }
- }
- }
-
- var result = %NewString(array.length, NEW_ONE_BYTE_STRING);
- for (var i = 0; i < array.length; i++) {
- %_OneByteSeqStringSetChar(i, array[i], result);
- }
- return result;
-}
-
-// ECMA-262, section 15.1.3
-function Decode(uri, reserved) {
- uri = TO_STRING(uri);
- var uriLength = uri.length;
- var one_byte = %NewString(uriLength, NEW_ONE_BYTE_STRING);
- var index = 0;
- var k = 0;
-
- // Optimistically assume one-byte string.
- for ( ; k < uriLength; k++) {
- var code = %_StringCharCodeAt(uri, k);
- if (code == 37) { // '%'
- if (k + 2 >= uriLength) throw MakeURIError();
- var cc = URIHexCharsToCharCode(%_StringCharCodeAt(uri, k+1),
- %_StringCharCodeAt(uri, k+2));
- if (cc >> 7) break; // Assumption wrong, two-byte string.
- if (reserved(cc)) {
- %_OneByteSeqStringSetChar(index++, 37, one_byte); // '%'.
- %_OneByteSeqStringSetChar(index++, %_StringCharCodeAt(uri, k+1),
- one_byte);
- %_OneByteSeqStringSetChar(index++, %_StringCharCodeAt(uri, k+2),
- one_byte);
- } else {
- %_OneByteSeqStringSetChar(index++, cc, one_byte);
- }
- k += 2;
- } else {
- if (code > 0x7f) break; // Assumption wrong, two-byte string.
- %_OneByteSeqStringSetChar(index++, code, one_byte);
- }
- }
-
- one_byte = %TruncateString(one_byte, index);
- if (k == uriLength) return one_byte;
-
- // Write into two byte string.
- var two_byte = %NewString(uriLength - k, NEW_TWO_BYTE_STRING);
- index = 0;
-
- for ( ; k < uriLength; k++) {
- var code = %_StringCharCodeAt(uri, k);
- if (code == 37) { // '%'
- if (k + 2 >= uriLength) throw MakeURIError();
- var cc = URIHexCharsToCharCode(%_StringCharCodeAt(uri, ++k),
- %_StringCharCodeAt(uri, ++k));
- if (cc >> 7) {
- var n = 0;
- while (((cc << ++n) & 0x80) != 0) { }
- if (n == 1 || n > 4) throw MakeURIError();
- var octets = new InternalArray(n);
- octets[0] = cc;
- if (k + 3 * (n - 1) >= uriLength) throw MakeURIError();
- for (var i = 1; i < n; i++) {
- if (uri[++k] != '%') throw MakeURIError();
- octets[i] = URIHexCharsToCharCode(%_StringCharCodeAt(uri, ++k),
- %_StringCharCodeAt(uri, ++k));
- }
- index = URIDecodeOctets(octets, two_byte, index);
- } else if (reserved(cc)) {
- %_TwoByteSeqStringSetChar(index++, 37, two_byte); // '%'.
- %_TwoByteSeqStringSetChar(index++, %_StringCharCodeAt(uri, k - 1),
- two_byte);
- %_TwoByteSeqStringSetChar(index++, %_StringCharCodeAt(uri, k),
- two_byte);
- } else {
- %_TwoByteSeqStringSetChar(index++, cc, two_byte);
- }
- } else {
- %_TwoByteSeqStringSetChar(index++, code, two_byte);
- }
- }
-
- two_byte = %TruncateString(two_byte, index);
- return one_byte + two_byte;
-}
-
-// -------------------------------------------------------------------
-// Define exported functions.
-
-// ECMA-262 - B.2.1.
-function URIEscapeJS(s) {
- return %URIEscape(s);
-}
-
-// ECMA-262 - B.2.2.
-function URIUnescapeJS(s) {
- return %URIUnescape(s);
-}
-
-// ECMA-262 - 15.1.3.1.
-function URIDecode(uri) {
- var reservedPredicate = function(cc) {
- // #$
- if (35 <= cc && cc <= 36) return true;
- // &
- if (cc == 38) return true;
- // +,
- if (43 <= cc && cc <= 44) return true;
- // /
- if (cc == 47) return true;
- // :;
- if (58 <= cc && cc <= 59) return true;
- // =
- if (cc == 61) return true;
- // ?@
- if (63 <= cc && cc <= 64) return true;
-
- return false;
- };
- return Decode(uri, reservedPredicate);
-}
-
-// ECMA-262 - 15.1.3.2.
-function URIDecodeComponent(component) {
- var reservedPredicate = function(cc) { return false; };
- return Decode(component, reservedPredicate);
-}
-
-// ECMA-262 - 15.1.3.3.
-function URIEncode(uri) {
- var unescapePredicate = function(cc) {
- if (isAlphaNumeric(cc)) return true;
- // !
- if (cc == 33) return true;
- // #$
- if (35 <= cc && cc <= 36) return true;
- // &'()*+,-./
- if (38 <= cc && cc <= 47) return true;
- // :;
- if (58 <= cc && cc <= 59) return true;
- // =
- if (cc == 61) return true;
- // ?@
- if (63 <= cc && cc <= 64) return true;
- // _
- if (cc == 95) return true;
- // ~
- if (cc == 126) return true;
-
- return false;
- };
- return Encode(uri, unescapePredicate);
-}
-
-// ECMA-262 - 15.1.3.4
-function URIEncodeComponent(component) {
- var unescapePredicate = function(cc) {
- if (isAlphaNumeric(cc)) return true;
- // !
- if (cc == 33) return true;
- // '()*
- if (39 <= cc && cc <= 42) return true;
- // -.
- if (45 <= cc && cc <= 46) return true;
- // _
- if (cc == 95) return true;
- // ~
- if (cc == 126) return true;
-
- return false;
- };
- return Encode(component, unescapePredicate);
-}
-
-// -------------------------------------------------------------------
-// Install exported functions.
-
-// Set up non-enumerable URI functions on the global object and set
-// their names.
-utils.InstallFunctions(global, DONT_ENUM, [
- "escape", URIEscapeJS,
- "unescape", URIUnescapeJS,
- "decodeURI", URIDecode,
- "decodeURIComponent", URIDecodeComponent,
- "encodeURI", URIEncode,
- "encodeURIComponent", URIEncodeComponent
-]);
-
-})
diff --git a/deps/v8/src/js/v8natives.js b/deps/v8/src/js/v8natives.js
index 5185c620b3..0c0a7925b9 100644
--- a/deps/v8/src/js/v8natives.js
+++ b/deps/v8/src/js/v8natives.js
@@ -9,31 +9,11 @@
// ----------------------------------------------------------------------------
// Imports
-var GlobalArray = global.Array;
var GlobalNumber = global.Number;
var GlobalObject = global.Object;
-var InternalArray = utils.InternalArray;
var iteratorSymbol = utils.ImportNow("iterator_symbol");
-var MakeRangeError;
-var MakeSyntaxError;
-var MakeTypeError;
-var MathAbs;
var NaN = %GetRootNaN();
var ObjectToString = utils.ImportNow("object_to_string");
-var ObserveBeginPerformSplice;
-var ObserveEndPerformSplice;
-var ObserveEnqueueSpliceRecord;
-var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
-
-utils.Import(function(from) {
- MakeRangeError = from.MakeRangeError;
- MakeSyntaxError = from.MakeSyntaxError;
- MakeTypeError = from.MakeTypeError;
- MathAbs = from.MathAbs;
- ObserveBeginPerformSplice = from.ObserveBeginPerformSplice;
- ObserveEndPerformSplice = from.ObserveEndPerformSplice;
- ObserveEnqueueSpliceRecord = from.ObserveEnqueueSpliceRecord;
-});
// ----------------------------------------------------------------------------
@@ -142,606 +122,12 @@ function ObjectIsPrototypeOf(V) {
}
-// ES6 19.1.3.4
-function ObjectPropertyIsEnumerable(V) {
- var P = TO_NAME(V);
- return %PropertyIsEnumerable(TO_OBJECT(this), P);
-}
-
-
-// Extensions for providing property getters and setters.
-function ObjectDefineGetter(name, fun) {
- var receiver = this;
- if (IS_NULL(receiver) || IS_UNDEFINED(receiver)) {
- receiver = %GlobalProxy(ObjectDefineGetter);
- }
- if (!IS_CALLABLE(fun)) {
- throw MakeTypeError(kObjectGetterExpectingFunction);
- }
- var desc = new PropertyDescriptor();
- desc.setGet(fun);
- desc.setEnumerable(true);
- desc.setConfigurable(true);
- DefineOwnProperty(TO_OBJECT(receiver), TO_NAME(name), desc, false);
-}
-
-
-function ObjectLookupGetter(name) {
- var receiver = this;
- if (IS_NULL(receiver) || IS_UNDEFINED(receiver)) {
- receiver = %GlobalProxy(ObjectLookupGetter);
- }
- return %LookupAccessor(TO_OBJECT(receiver), TO_NAME(name), GETTER);
-}
-
-
-function ObjectDefineSetter(name, fun) {
- var receiver = this;
- if (IS_NULL(receiver) || IS_UNDEFINED(receiver)) {
- receiver = %GlobalProxy(ObjectDefineSetter);
- }
- if (!IS_CALLABLE(fun)) {
- throw MakeTypeError(kObjectSetterExpectingFunction);
- }
- var desc = new PropertyDescriptor();
- desc.setSet(fun);
- desc.setEnumerable(true);
- desc.setConfigurable(true);
- DefineOwnProperty(TO_OBJECT(receiver), TO_NAME(name), desc, false);
-}
-
-
-function ObjectLookupSetter(name) {
- var receiver = this;
- if (IS_NULL(receiver) || IS_UNDEFINED(receiver)) {
- receiver = %GlobalProxy(ObjectLookupSetter);
- }
- return %LookupAccessor(TO_OBJECT(receiver), TO_NAME(name), SETTER);
-}
-
-
-// ES6 6.2.4.1
-function IsAccessorDescriptor(desc) {
- if (IS_UNDEFINED(desc)) return false;
- return desc.hasGetter() || desc.hasSetter();
-}
-
-
-// ES6 6.2.4.2
-function IsDataDescriptor(desc) {
- if (IS_UNDEFINED(desc)) return false;
- return desc.hasValue() || desc.hasWritable();
-}
-
-
-// ES6 6.2.4.3
-function IsGenericDescriptor(desc) {
- if (IS_UNDEFINED(desc)) return false;
- return !(IsAccessorDescriptor(desc) || IsDataDescriptor(desc));
-}
-
-
-function IsInconsistentDescriptor(desc) {
- return IsAccessorDescriptor(desc) && IsDataDescriptor(desc);
-}
-
-
-// Harmony Proxies
-function FromGenericPropertyDescriptor(desc) {
- if (IS_UNDEFINED(desc)) return desc;
- var obj = new GlobalObject();
-
- if (desc.hasValue()) {
- %AddNamedProperty(obj, "value", desc.getValue(), NONE);
- }
- if (desc.hasWritable()) {
- %AddNamedProperty(obj, "writable", desc.isWritable(), NONE);
- }
- if (desc.hasGetter()) {
- %AddNamedProperty(obj, "get", desc.getGet(), NONE);
- }
- if (desc.hasSetter()) {
- %AddNamedProperty(obj, "set", desc.getSet(), NONE);
- }
- if (desc.hasEnumerable()) {
- %AddNamedProperty(obj, "enumerable", desc.isEnumerable(), NONE);
- }
- if (desc.hasConfigurable()) {
- %AddNamedProperty(obj, "configurable", desc.isConfigurable(), NONE);
- }
- return obj;
-}
-
-
-// ES6 6.2.4.5
-function ToPropertyDescriptor(obj) {
- if (!IS_RECEIVER(obj)) throw MakeTypeError(kPropertyDescObject, obj);
-
- var desc = new PropertyDescriptor();
-
- if ("enumerable" in obj) {
- desc.setEnumerable(TO_BOOLEAN(obj.enumerable));
- }
-
- if ("configurable" in obj) {
- desc.setConfigurable(TO_BOOLEAN(obj.configurable));
- }
-
- if ("value" in obj) {
- desc.setValue(obj.value);
- }
-
- if ("writable" in obj) {
- desc.setWritable(TO_BOOLEAN(obj.writable));
- }
-
- if ("get" in obj) {
- var get = obj.get;
- if (!IS_UNDEFINED(get) && !IS_CALLABLE(get)) {
- throw MakeTypeError(kObjectGetterCallable, get);
- }
- desc.setGet(get);
- }
-
- if ("set" in obj) {
- var set = obj.set;
- if (!IS_UNDEFINED(set) && !IS_CALLABLE(set)) {
- throw MakeTypeError(kObjectSetterCallable, set);
- }
- desc.setSet(set);
- }
-
- if (IsInconsistentDescriptor(desc)) {
- throw MakeTypeError(kValueAndAccessor, obj);
- }
- return desc;
-}
-
-// TODO(cbruni): remove once callers have been removed
-function ToCompletePropertyDescriptor(obj) {
- var desc = ToPropertyDescriptor(obj);
- if (IsGenericDescriptor(desc) || IsDataDescriptor(desc)) {
- if (!desc.hasValue()) desc.setValue(UNDEFINED);
- if (!desc.hasWritable()) desc.setWritable(false);
- } else {
- // Is accessor descriptor.
- if (!desc.hasGetter()) desc.setGet(UNDEFINED);
- if (!desc.hasSetter()) desc.setSet(UNDEFINED);
- }
- if (!desc.hasEnumerable()) desc.setEnumerable(false);
- if (!desc.hasConfigurable()) desc.setConfigurable(false);
- return desc;
-}
-
-
-function PropertyDescriptor() {
- // Initialize here so they are all in-object and have the same map.
- // Default values from ES5 8.6.1.
- this.value_ = UNDEFINED;
- this.hasValue_ = false;
- this.writable_ = false;
- this.hasWritable_ = false;
- this.enumerable_ = false;
- this.hasEnumerable_ = false;
- this.configurable_ = false;
- this.hasConfigurable_ = false;
- this.get_ = UNDEFINED;
- this.hasGetter_ = false;
- this.set_ = UNDEFINED;
- this.hasSetter_ = false;
-}
-
-utils.SetUpLockedPrototype(PropertyDescriptor, [
- "value_",
- "hasValue_",
- "writable_",
- "hasWritable_",
- "enumerable_",
- "hasEnumerable_",
- "configurable_",
- "hasConfigurable_",
- "get_",
- "hasGetter_",
- "set_",
- "hasSetter_"
-], [
- "toString", function PropertyDescriptor_ToString() {
- return "[object PropertyDescriptor]";
- },
- "setValue", function PropertyDescriptor_SetValue(value) {
- this.value_ = value;
- this.hasValue_ = true;
- },
- "getValue", function PropertyDescriptor_GetValue() {
- return this.value_;
- },
- "hasValue", function PropertyDescriptor_HasValue() {
- return this.hasValue_;
- },
- "setEnumerable", function PropertyDescriptor_SetEnumerable(enumerable) {
- this.enumerable_ = enumerable;
- this.hasEnumerable_ = true;
- },
- "isEnumerable", function PropertyDescriptor_IsEnumerable() {
- return this.enumerable_;
- },
- "hasEnumerable", function PropertyDescriptor_HasEnumerable() {
- return this.hasEnumerable_;
- },
- "setWritable", function PropertyDescriptor_SetWritable(writable) {
- this.writable_ = writable;
- this.hasWritable_ = true;
- },
- "isWritable", function PropertyDescriptor_IsWritable() {
- return this.writable_;
- },
- "hasWritable", function PropertyDescriptor_HasWritable() {
- return this.hasWritable_;
- },
- "setConfigurable",
- function PropertyDescriptor_SetConfigurable(configurable) {
- this.configurable_ = configurable;
- this.hasConfigurable_ = true;
- },
- "hasConfigurable", function PropertyDescriptor_HasConfigurable() {
- return this.hasConfigurable_;
- },
- "isConfigurable", function PropertyDescriptor_IsConfigurable() {
- return this.configurable_;
- },
- "setGet", function PropertyDescriptor_SetGetter(get) {
- this.get_ = get;
- this.hasGetter_ = true;
- },
- "getGet", function PropertyDescriptor_GetGetter() {
- return this.get_;
- },
- "hasGetter", function PropertyDescriptor_HasGetter() {
- return this.hasGetter_;
- },
- "setSet", function PropertyDescriptor_SetSetter(set) {
- this.set_ = set;
- this.hasSetter_ = true;
- },
- "getSet", function PropertyDescriptor_GetSetter() {
- return this.set_;
- },
- "hasSetter", function PropertyDescriptor_HasSetter() {
- return this.hasSetter_;
- }
-]);
-
-
-// Converts an array returned from Runtime_GetOwnProperty to an actual
-// property descriptor. For a description of the array layout please
-// see the runtime.cc file.
-function ConvertDescriptorArrayToDescriptor(desc_array) {
- if (IS_UNDEFINED(desc_array)) {
- return UNDEFINED;
- }
-
- var desc = new PropertyDescriptor();
- // This is an accessor.
- if (desc_array[IS_ACCESSOR_INDEX]) {
- desc.setGet(desc_array[GETTER_INDEX]);
- desc.setSet(desc_array[SETTER_INDEX]);
- } else {
- desc.setValue(desc_array[VALUE_INDEX]);
- desc.setWritable(desc_array[WRITABLE_INDEX]);
- }
- desc.setEnumerable(desc_array[ENUMERABLE_INDEX]);
- desc.setConfigurable(desc_array[CONFIGURABLE_INDEX]);
-
- return desc;
-}
-
-
-// For Harmony proxies.
-function GetTrap(handler, name, defaultTrap) {
- var trap = handler[name];
- if (IS_UNDEFINED(trap)) {
- if (IS_UNDEFINED(defaultTrap)) {
- throw MakeTypeError(kIllegalInvocation);
- }
- trap = defaultTrap;
- } else if (!IS_CALLABLE(trap)) {
- throw MakeTypeError(kIllegalInvocation);
- }
- return trap;
-}
-
-
-function CallTrap1(handler, name, defaultTrap, x) {
- return %_Call(GetTrap(handler, name, defaultTrap), handler, x);
-}
-
-
-function CallTrap2(handler, name, defaultTrap, x, y) {
- return %_Call(GetTrap(handler, name, defaultTrap), handler, x, y);
-}
-
-
-// ES5 section 8.12.1.
-// TODO(jkummerow): Deprecated. Migrate all callers to
-// ObjectGetOwnPropertyDescriptor and delete this.
-function GetOwnPropertyJS(obj, v) {
- var p = TO_NAME(v);
- if (IS_PROXY(obj)) {
- // TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (IS_SYMBOL(v)) return UNDEFINED;
-
- var handler = %JSProxyGetHandler(obj);
- var descriptor = CallTrap1(
- handler, "getOwnPropertyDescriptor", UNDEFINED, p);
- if (IS_UNDEFINED(descriptor)) return descriptor;
- var desc = ToCompletePropertyDescriptor(descriptor);
- if (!desc.isConfigurable()) {
- throw MakeTypeError(kIllegalInvocation);
- }
- return desc;
- }
-
- // GetOwnProperty returns an array indexed by the constants
- // defined in macros.py.
- // If p is not a property on obj undefined is returned.
- var props = %GetOwnProperty_Legacy(TO_OBJECT(obj), p);
-
- return ConvertDescriptorArrayToDescriptor(props);
-}
-
-
// ES6 7.3.9
function GetMethod(obj, p) {
var func = obj[p];
if (IS_NULL_OR_UNDEFINED(func)) return UNDEFINED;
if (IS_CALLABLE(func)) return func;
- throw MakeTypeError(kCalledNonCallable, typeof func);
-}
-
-
-// Harmony proxies.
-function DefineProxyProperty(obj, p, attributes, should_throw) {
- // TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (IS_SYMBOL(p)) return false;
-
- var handler = %JSProxyGetHandler(obj);
- var result = CallTrap2(handler, "defineProperty", UNDEFINED, p, attributes);
- if (!result) {
- if (should_throw) {
- throw MakeTypeError(kIllegalInvocation);
- } else {
- return false;
- }
- }
- return true;
-}
-
-
-// ES6 9.1.6 [[DefineOwnProperty]](P, Desc)
-function DefineObjectProperty(obj, p, desc, should_throw) {
- var current_array = %GetOwnProperty_Legacy(obj, TO_NAME(p));
- var current = ConvertDescriptorArrayToDescriptor(current_array);
- var extensible = %object_is_extensible(obj);
-
- if (IS_UNDEFINED(current) && !extensible) {
- if (should_throw) {
- throw MakeTypeError(kDefineDisallowed, p);
- } else {
- return false;
- }
- }
-
- if (!IS_UNDEFINED(current)) {
- if ((IsGenericDescriptor(desc) ||
- IsDataDescriptor(desc) == IsDataDescriptor(current)) &&
- (!desc.hasEnumerable() ||
- %SameValue(desc.isEnumerable(), current.isEnumerable())) &&
- (!desc.hasConfigurable() ||
- %SameValue(desc.isConfigurable(), current.isConfigurable())) &&
- (!desc.hasWritable() ||
- %SameValue(desc.isWritable(), current.isWritable())) &&
- (!desc.hasValue() ||
- %SameValue(desc.getValue(), current.getValue())) &&
- (!desc.hasGetter() ||
- %SameValue(desc.getGet(), current.getGet())) &&
- (!desc.hasSetter() ||
- %SameValue(desc.getSet(), current.getSet()))) {
- return true;
- }
- if (!current.isConfigurable()) {
- // Step 7
- if (desc.isConfigurable() ||
- (desc.hasEnumerable() &&
- desc.isEnumerable() != current.isEnumerable())) {
- if (should_throw) {
- throw MakeTypeError(kRedefineDisallowed, p);
- } else {
- return false;
- }
- }
- // Step 8
- if (!IsGenericDescriptor(desc)) {
- // Step 9a
- if (IsDataDescriptor(current) != IsDataDescriptor(desc)) {
- if (should_throw) {
- throw MakeTypeError(kRedefineDisallowed, p);
- } else {
- return false;
- }
- }
- // Step 10a
- if (IsDataDescriptor(current) && IsDataDescriptor(desc)) {
- var currentIsWritable = current.isWritable();
- if (currentIsWritable != desc.isWritable()) {
- if (!currentIsWritable) {
- if (should_throw) {
- throw MakeTypeError(kRedefineDisallowed, p);
- } else {
- return false;
- }
- }
- }
- if (!currentIsWritable && desc.hasValue() &&
- !%SameValue(desc.getValue(), current.getValue())) {
- if (should_throw) {
- throw MakeTypeError(kRedefineDisallowed, p);
- } else {
- return false;
- }
- }
- }
- // Step 11
- if (IsAccessorDescriptor(desc) && IsAccessorDescriptor(current)) {
- if (desc.hasSetter() &&
- !%SameValue(desc.getSet(), current.getSet())) {
- if (should_throw) {
- throw MakeTypeError(kRedefineDisallowed, p);
- } else {
- return false;
- }
- }
- if (desc.hasGetter() && !%SameValue(desc.getGet(),current.getGet())) {
- if (should_throw) {
- throw MakeTypeError(kRedefineDisallowed, p);
- } else {
- return false;
- }
- }
- }
- }
- }
- }
-
- // Send flags - enumerable and configurable are common - writable is
- // only send to the data descriptor.
- // Take special care if enumerable and configurable is not defined on
- // desc (we need to preserve the existing values from current).
- var flag = NONE;
- if (desc.hasEnumerable()) {
- flag |= desc.isEnumerable() ? 0 : DONT_ENUM;
- } else if (!IS_UNDEFINED(current)) {
- flag |= current.isEnumerable() ? 0 : DONT_ENUM;
- } else {
- flag |= DONT_ENUM;
- }
-
- if (desc.hasConfigurable()) {
- flag |= desc.isConfigurable() ? 0 : DONT_DELETE;
- } else if (!IS_UNDEFINED(current)) {
- flag |= current.isConfigurable() ? 0 : DONT_DELETE;
- } else
- flag |= DONT_DELETE;
-
- if (IsDataDescriptor(desc) ||
- (IsGenericDescriptor(desc) &&
- (IS_UNDEFINED(current) || IsDataDescriptor(current)))) {
- // There are 3 cases that lead here:
- // Step 4a - defining a new data property.
- // Steps 9b & 12 - replacing an existing accessor property with a data
- // property.
- // Step 12 - updating an existing data property with a data or generic
- // descriptor.
-
- if (desc.hasWritable()) {
- flag |= desc.isWritable() ? 0 : READ_ONLY;
- } else if (!IS_UNDEFINED(current)) {
- flag |= current.isWritable() ? 0 : READ_ONLY;
- } else {
- flag |= READ_ONLY;
- }
-
- var value = UNDEFINED; // Default value is undefined.
- if (desc.hasValue()) {
- value = desc.getValue();
- } else if (!IS_UNDEFINED(current) && IsDataDescriptor(current)) {
- value = current.getValue();
- }
-
- %DefineDataPropertyUnchecked(obj, p, value, flag);
- } else {
- // There are 3 cases that lead here:
- // Step 4b - defining a new accessor property.
- // Steps 9c & 12 - replacing an existing data property with an accessor
- // property.
- // Step 12 - updating an existing accessor property with an accessor
- // descriptor.
- var getter = null;
- if (desc.hasGetter()) {
- getter = desc.getGet();
- } else if (IsAccessorDescriptor(current) && current.hasGetter()) {
- getter = current.getGet();
- }
- var setter = null;
- if (desc.hasSetter()) {
- setter = desc.getSet();
- } else if (IsAccessorDescriptor(current) && current.hasSetter()) {
- setter = current.getSet();
- }
- %DefineAccessorPropertyUnchecked(obj, p, getter, setter, flag);
- }
- return true;
-}
-
-
-// ES5 section 15.4.5.1.
-function DefineArrayProperty(obj, p, desc, should_throw) {
- // Step 3 - Special handling for array index.
- if (!IS_SYMBOL(p)) {
- var index = TO_UINT32(p);
- var emit_splice = false;
- if (TO_STRING(index) == p && index != 4294967295) {
- var length = obj.length;
- if (index >= length && %IsObserved(obj)) {
- emit_splice = true;
- ObserveBeginPerformSplice(obj);
- }
-
- var length_desc = GetOwnPropertyJS(obj, "length");
- if ((index >= length && !length_desc.isWritable()) ||
- !DefineObjectProperty(obj, p, desc, true)) {
- if (emit_splice)
- ObserveEndPerformSplice(obj);
- if (should_throw) {
- throw MakeTypeError(kDefineDisallowed, p);
- } else {
- return false;
- }
- }
- if (index >= length) {
- obj.length = index + 1;
- }
- if (emit_splice) {
- ObserveEndPerformSplice(obj);
- ObserveEnqueueSpliceRecord(obj, length, [], index + 1 - length);
- }
- return true;
- }
- }
-
- // Step 5 - Fallback to default implementation.
- return DefineObjectProperty(obj, p, desc, should_throw);
-}
-
-
-// ES5 section 8.12.9, ES5 section 15.4.5.1 and Harmony proxies.
-function DefineOwnProperty(obj, p, desc, should_throw) {
- if (IS_PROXY(obj)) {
- // TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (IS_SYMBOL(p)) return false;
-
- var attributes = FromGenericPropertyDescriptor(desc);
- return DefineProxyProperty(obj, p, attributes, should_throw);
- } else if (IS_ARRAY(obj)) {
- return DefineArrayProperty(obj, p, desc, should_throw);
- } else {
- return DefineObjectProperty(obj, p, desc, should_throw);
- }
-}
-
-
-// ES6 section 19.1.2.9
-function ObjectGetPrototypeOf(obj) {
- return %_GetPrototype(TO_OBJECT(obj));
+ throw %make_type_error(kCalledNonCallable, typeof func);
}
// ES6 section 19.1.2.18.
@@ -749,7 +135,7 @@ function ObjectSetPrototypeOf(obj, proto) {
CHECK_OBJECT_COERCIBLE(obj, "Object.setPrototypeOf");
if (proto !== null && !IS_RECEIVER(proto)) {
- throw MakeTypeError(kProtoObjectOrNull, proto);
+ throw %make_type_error(kProtoObjectOrNull, proto);
}
if (IS_RECEIVER(obj)) {
@@ -759,50 +145,9 @@ function ObjectSetPrototypeOf(obj, proto) {
return obj;
}
-
-// ES5 section 15.2.3.6.
-function ObjectDefineProperty(obj, p, attributes) {
- // The new pure-C++ implementation doesn't support O.o.
- // TODO(jkummerow): Implement missing features and remove fallback path.
- if (%IsObserved(obj)) {
- if (!IS_RECEIVER(obj)) {
- throw MakeTypeError(kCalledOnNonObject, "Object.defineProperty");
- }
- var name = TO_NAME(p);
- var desc = ToPropertyDescriptor(attributes);
- DefineOwnProperty(obj, name, desc, true);
- return obj;
- }
- return %ObjectDefineProperty(obj, p, attributes);
-}
-
-
-// ES5 section 15.2.3.7.
-function ObjectDefineProperties(obj, properties) {
- // The new pure-C++ implementation doesn't support O.o.
- // TODO(jkummerow): Implement missing features and remove fallback path.
- if (%IsObserved(obj)) {
- if (!IS_RECEIVER(obj)) {
- throw MakeTypeError(kCalledOnNonObject, "Object.defineProperties");
- }
- var props = TO_OBJECT(properties);
- var names = %GetOwnPropertyKeys(props, PROPERTY_FILTER_ONLY_ENUMERABLE);
- var descriptors = new InternalArray();
- for (var i = 0; i < names.length; i++) {
- descriptors.push(ToPropertyDescriptor(props[names[i]]));
- }
- for (var i = 0; i < names.length; i++) {
- DefineOwnProperty(obj, names[i], descriptors[i], true);
- }
- return obj;
- }
- return %ObjectDefineProperties(obj, properties);
-}
-
-
// ES6 B.2.2.1.1
function ObjectGetProto() {
- return %_GetPrototype(TO_OBJECT(this));
+ return %object_get_prototype_of(this);
}
@@ -841,27 +186,20 @@ utils.InstallFunctions(GlobalObject.prototype, DONT_ENUM, [
"toLocaleString", ObjectToLocaleString,
"valueOf", ObjectValueOf,
"isPrototypeOf", ObjectIsPrototypeOf,
- "propertyIsEnumerable", ObjectPropertyIsEnumerable,
- "__defineGetter__", ObjectDefineGetter,
- "__lookupGetter__", ObjectLookupGetter,
- "__defineSetter__", ObjectDefineSetter,
- "__lookupSetter__", ObjectLookupSetter
+ // propertyIsEnumerable is added in bootstrapper.cc.
+ // __defineGetter__ is added in bootstrapper.cc.
+ // __lookupGetter__ is added in bootstrapper.cc.
+ // __defineSetter__ is added in bootstrapper.cc.
+ // __lookupSetter__ is added in bootstrapper.cc.
]);
-utils.InstallGetterSetter(GlobalObject.prototype, "__proto__", ObjectGetProto,
- ObjectSetProto);
+utils.InstallGetterSetter(
+ GlobalObject.prototype, "__proto__", ObjectGetProto, ObjectSetProto);
// Set up non-enumerable functions in the Object object.
utils.InstallFunctions(GlobalObject, DONT_ENUM, [
- // assign is added in bootstrapper.cc.
- // keys is added in bootstrapper.cc.
- "defineProperty", ObjectDefineProperty,
- "defineProperties", ObjectDefineProperties,
- "getPrototypeOf", ObjectGetPrototypeOf,
"setPrototypeOf", ObjectSetPrototypeOf,
// getOwnPropertySymbols is added in symbol.js.
- // is is added in bootstrapper.cc.
- // deliverChangeRecords, getNotifier, observe and unobserve are added
- // in object-observe.js.
+ // Others are added in bootstrapper.cc.
]);
@@ -869,124 +207,6 @@ utils.InstallFunctions(GlobalObject, DONT_ENUM, [
// ----------------------------------------------------------------------------
// Number
-// ES6 Number.prototype.toString([ radix ])
-function NumberToStringJS(radix) {
- // NOTE: Both Number objects and values can enter here as
- // 'this'. This is not as dictated by ECMA-262.
- var number = this;
- if (!IS_NUMBER(this)) {
- if (!IS_NUMBER_WRAPPER(this)) {
- throw MakeTypeError(kNotGeneric, 'Number.prototype.toString');
- }
- // Get the value of this number in case it's an object.
- number = %_ValueOf(this);
- }
- // Fast case: Convert number in radix 10.
- if (IS_UNDEFINED(radix) || radix === 10) {
- return %_NumberToString(number);
- }
-
- // Convert the radix to an integer and check the range.
- radix = TO_INTEGER(radix);
- if (radix < 2 || radix > 36) throw MakeRangeError(kToRadixFormatRange);
- // Convert the number to a string in the given radix.
- return %NumberToRadixString(number, radix);
-}
-
-
-// ES6 20.1.3.4 Number.prototype.toLocaleString([reserved1 [, reserved2]])
-function NumberToLocaleString() {
- return %_Call(NumberToStringJS, this);
-}
-
-
-// ES6 20.1.3.7 Number.prototype.valueOf()
-function NumberValueOf() {
- // NOTE: Both Number objects and values can enter here as
- // 'this'. This is not as dictated by ECMA-262.
- if (!IS_NUMBER(this) && !IS_NUMBER_WRAPPER(this)) {
- throw MakeTypeError(kNotGeneric, 'Number.prototype.valueOf');
- }
- return %_ValueOf(this);
-}
-
-
-// ES6 20.1.3.3 Number.prototype.toFixed(fractionDigits)
-function NumberToFixedJS(fractionDigits) {
- var x = this;
- if (!IS_NUMBER(this)) {
- if (!IS_NUMBER_WRAPPER(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
- "Number.prototype.toFixed", this);
- }
- // Get the value of this number in case it's an object.
- x = %_ValueOf(this);
- }
- var f = TO_INTEGER(fractionDigits);
-
- if (f < 0 || f > 20) {
- throw MakeRangeError(kNumberFormatRange, "toFixed() digits");
- }
-
- if (NUMBER_IS_NAN(x)) return "NaN";
- if (x == INFINITY) return "Infinity";
- if (x == -INFINITY) return "-Infinity";
-
- return %NumberToFixed(x, f);
-}
-
-
-// ES6 20.1.3.2 Number.prototype.toExponential(fractionDigits)
-function NumberToExponentialJS(fractionDigits) {
- var x = this;
- if (!IS_NUMBER(this)) {
- if (!IS_NUMBER_WRAPPER(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
- "Number.prototype.toExponential", this);
- }
- // Get the value of this number in case it's an object.
- x = %_ValueOf(this);
- }
- var f = IS_UNDEFINED(fractionDigits) ? UNDEFINED : TO_INTEGER(fractionDigits);
-
- if (NUMBER_IS_NAN(x)) return "NaN";
- if (x == INFINITY) return "Infinity";
- if (x == -INFINITY) return "-Infinity";
-
- if (IS_UNDEFINED(f)) {
- f = -1; // Signal for runtime function that f is not defined.
- } else if (f < 0 || f > 20) {
- throw MakeRangeError(kNumberFormatRange, "toExponential()");
- }
- return %NumberToExponential(x, f);
-}
-
-
-// ES6 20.1.3.5 Number.prototype.toPrecision(precision)
-function NumberToPrecisionJS(precision) {
- var x = this;
- if (!IS_NUMBER(this)) {
- if (!IS_NUMBER_WRAPPER(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
- "Number.prototype.toPrecision", this);
- }
- // Get the value of this number in case it's an object.
- x = %_ValueOf(this);
- }
- if (IS_UNDEFINED(precision)) return TO_STRING(x);
- var p = TO_INTEGER(precision);
-
- if (NUMBER_IS_NAN(x)) return "NaN";
- if (x == INFINITY) return "Infinity";
- if (x == -INFINITY) return "-Infinity";
-
- if (p < 1 || p > 21) {
- throw MakeRangeError(kToPrecisionFormatRange);
- }
- return %NumberToPrecision(x, p);
-}
-
-
// Harmony isFinite.
function NumberIsFinite(number) {
return IS_NUMBER(number) && NUMBER_IS_FINITE(number);
@@ -1010,7 +230,7 @@ function NumberIsSafeInteger(number) {
if (NumberIsFinite(number)) {
var integral = TO_INTEGER(number);
if (integral == number) {
- return MathAbs(integral) <= kMaxSafeInteger;
+ return -kMaxSafeInteger <= integral && integral <= kMaxSafeInteger;
}
}
return false;
@@ -1019,13 +239,6 @@ function NumberIsSafeInteger(number) {
// ----------------------------------------------------------------------------
-%FunctionSetPrototype(GlobalNumber, new GlobalNumber(0));
-
-%OptimizeObjectForAddingMultipleProperties(GlobalNumber.prototype, 8);
-// Set up the constructor property on the Number prototype object.
-%AddNamedProperty(GlobalNumber.prototype, "constructor", GlobalNumber,
- DONT_ENUM);
-
utils.InstallConstants(GlobalNumber, [
// ECMA-262 section 15.7.3.1.
"MAX_VALUE", 1.7976931348623157e+308,
@@ -1040,19 +253,9 @@ utils.InstallConstants(GlobalNumber, [
// --- Harmony constants (no spec refs until settled.)
- "MAX_SAFE_INTEGER", %_MathPow(2, 53) - 1,
- "MIN_SAFE_INTEGER", -%_MathPow(2, 53) + 1,
- "EPSILON", %_MathPow(2, -52)
-]);
-
-// Set up non-enumerable functions on the Number prototype object.
-utils.InstallFunctions(GlobalNumber.prototype, DONT_ENUM, [
- "toString", NumberToStringJS,
- "toLocaleString", NumberToLocaleString,
- "valueOf", NumberValueOf,
- "toFixed", NumberToFixedJS,
- "toExponential", NumberToExponentialJS,
- "toPrecision", NumberToPrecisionJS
+ "MAX_SAFE_INTEGER", 9007199254740991,
+ "MIN_SAFE_INTEGER", -9007199254740991,
+ "EPSILON", 2.220446049250313e-16,
]);
// Harmony Number constructor additions
@@ -1077,11 +280,11 @@ function GetIterator(obj, method) {
method = obj[iteratorSymbol];
}
if (!IS_CALLABLE(method)) {
- throw MakeTypeError(kNotIterable, obj);
+ throw %make_type_error(kNotIterable, obj);
}
var iterator = %_Call(method, obj);
if (!IS_RECEIVER(iterator)) {
- throw MakeTypeError(kNotAnIterator, iterator);
+ throw %make_type_error(kNotAnIterator, iterator);
}
return iterator;
}
@@ -1092,12 +295,9 @@ function GetIterator(obj, method) {
utils.Export(function(to) {
to.GetIterator = GetIterator;
to.GetMethod = GetMethod;
- to.IsFinite = GlobalIsFinite;
to.IsNaN = GlobalIsNaN;
to.NumberIsNaN = NumberIsNaN;
to.NumberIsInteger = NumberIsInteger;
- to.ObjectDefineProperties = ObjectDefineProperties;
- to.ObjectDefineProperty = ObjectDefineProperty;
to.ObjectHasOwnProperty = GlobalObject.prototype.hasOwnProperty;
});
diff --git a/deps/v8/src/js/weak-collection.js b/deps/v8/src/js/weak-collection.js
index 308b9edef7..f5092d29f5 100644
--- a/deps/v8/src/js/weak-collection.js
+++ b/deps/v8/src/js/weak-collection.js
@@ -16,13 +16,11 @@ var GetHash;
var GlobalObject = global.Object;
var GlobalWeakMap = global.WeakMap;
var GlobalWeakSet = global.WeakSet;
-var MakeTypeError;
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
GetExistingHash = from.GetExistingHash;
GetHash = from.GetHash;
- MakeTypeError = from.MakeTypeError;
});
// -------------------------------------------------------------------
@@ -30,7 +28,7 @@ utils.Import(function(from) {
function WeakMapConstructor(iterable) {
if (IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kConstructorNotFunction, "WeakMap");
+ throw %make_type_error(kConstructorNotFunction, "WeakMap");
}
%WeakCollectionInitialize(this);
@@ -38,11 +36,11 @@ function WeakMapConstructor(iterable) {
if (!IS_NULL_OR_UNDEFINED(iterable)) {
var adder = this.set;
if (!IS_CALLABLE(adder)) {
- throw MakeTypeError(kPropertyNotFunction, adder, 'set', this);
+ throw %make_type_error(kPropertyNotFunction, adder, 'set', this);
}
for (var nextItem of iterable) {
if (!IS_RECEIVER(nextItem)) {
- throw MakeTypeError(kIteratorValueNotAnObject, nextItem);
+ throw %make_type_error(kIteratorValueNotAnObject, nextItem);
}
%_Call(adder, this, nextItem[0], nextItem[1]);
}
@@ -52,7 +50,7 @@ function WeakMapConstructor(iterable) {
function WeakMapGet(key) {
if (!IS_WEAKMAP(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'WeakMap.prototype.get', this);
}
if (!IS_RECEIVER(key)) return UNDEFINED;
@@ -64,17 +62,17 @@ function WeakMapGet(key) {
function WeakMapSet(key, value) {
if (!IS_WEAKMAP(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'WeakMap.prototype.set', this);
}
- if (!IS_RECEIVER(key)) throw MakeTypeError(kInvalidWeakMapKey);
+ if (!IS_RECEIVER(key)) throw %make_type_error(kInvalidWeakMapKey);
return %WeakCollectionSet(this, key, value, GetHash(key));
}
function WeakMapHas(key) {
if (!IS_WEAKMAP(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'WeakMap.prototype.has', this);
}
if (!IS_RECEIVER(key)) return false;
@@ -86,7 +84,7 @@ function WeakMapHas(key) {
function WeakMapDelete(key) {
if (!IS_WEAKMAP(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'WeakMap.prototype.delete', this);
}
if (!IS_RECEIVER(key)) return false;
@@ -119,7 +117,7 @@ utils.InstallFunctions(GlobalWeakMap.prototype, DONT_ENUM, [
function WeakSetConstructor(iterable) {
if (IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kConstructorNotFunction, "WeakSet");
+ throw %make_type_error(kConstructorNotFunction, "WeakSet");
}
%WeakCollectionInitialize(this);
@@ -127,7 +125,7 @@ function WeakSetConstructor(iterable) {
if (!IS_NULL_OR_UNDEFINED(iterable)) {
var adder = this.add;
if (!IS_CALLABLE(adder)) {
- throw MakeTypeError(kPropertyNotFunction, adder, 'add', this);
+ throw %make_type_error(kPropertyNotFunction, adder, 'add', this);
}
for (var value of iterable) {
%_Call(adder, this, value);
@@ -138,17 +136,17 @@ function WeakSetConstructor(iterable) {
function WeakSetAdd(value) {
if (!IS_WEAKSET(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'WeakSet.prototype.add', this);
}
- if (!IS_RECEIVER(value)) throw MakeTypeError(kInvalidWeakSetValue);
+ if (!IS_RECEIVER(value)) throw %make_type_error(kInvalidWeakSetValue);
return %WeakCollectionSet(this, value, true, GetHash(value));
}
function WeakSetHas(value) {
if (!IS_WEAKSET(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'WeakSet.prototype.has', this);
}
if (!IS_RECEIVER(value)) return false;
@@ -160,7 +158,7 @@ function WeakSetHas(value) {
function WeakSetDelete(value) {
if (!IS_WEAKSET(this)) {
- throw MakeTypeError(kIncompatibleMethodReceiver,
+ throw %make_type_error(kIncompatibleMethodReceiver,
'WeakSet.prototype.delete', this);
}
if (!IS_RECEIVER(value)) return false;
diff --git a/deps/v8/src/json-parser.cc b/deps/v8/src/json-parser.cc
new file mode 100644
index 0000000000..bf2fd0d673
--- /dev/null
+++ b/deps/v8/src/json-parser.cc
@@ -0,0 +1,812 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/json-parser.h"
+
+#include "src/char-predicates-inl.h"
+#include "src/conversions.h"
+#include "src/debug/debug.h"
+#include "src/factory.h"
+#include "src/field-type.h"
+#include "src/messages.h"
+#include "src/objects-inl.h"
+#include "src/parsing/scanner.h"
+#include "src/parsing/token.h"
+#include "src/property-descriptor.h"
+#include "src/transitions.h"
+
+namespace v8 {
+namespace internal {
+
+MaybeHandle<Object> JsonParseInternalizer::Internalize(Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> reviver) {
+ DCHECK(reviver->IsCallable());
+ JsonParseInternalizer internalizer(isolate,
+ Handle<JSReceiver>::cast(reviver));
+ Handle<JSObject> holder =
+ isolate->factory()->NewJSObject(isolate->object_function());
+ Handle<String> name = isolate->factory()->empty_string();
+ JSObject::AddProperty(holder, name, object, NONE);
+ return internalizer.InternalizeJsonProperty(holder, name);
+}
+
+MaybeHandle<Object> JsonParseInternalizer::InternalizeJsonProperty(
+ Handle<JSReceiver> holder, Handle<String> name) {
+ HandleScope outer_scope(isolate_);
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate_, value, Object::GetPropertyOrElement(holder, name), Object);
+ if (value->IsJSReceiver()) {
+ Handle<JSReceiver> object = Handle<JSReceiver>::cast(value);
+ Maybe<bool> is_array = Object::IsArray(object);
+ if (is_array.IsNothing()) return MaybeHandle<Object>();
+ if (is_array.FromJust()) {
+ Handle<Object> length_object;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate_, length_object,
+ Object::GetLengthFromArrayLike(isolate_, object), Object);
+ double length = length_object->Number();
+ for (double i = 0; i < length; i++) {
+ HandleScope inner_scope(isolate_);
+ Handle<Object> index = isolate_->factory()->NewNumber(i);
+ Handle<String> name = isolate_->factory()->NumberToString(index);
+ if (!RecurseAndApply(object, name)) return MaybeHandle<Object>();
+ }
+ } else {
+ Handle<FixedArray> contents;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate_, contents,
+ KeyAccumulator::GetKeys(object, KeyCollectionMode::kOwnOnly,
+ ENUMERABLE_STRINGS,
+ GetKeysConversion::kConvertToString),
+ Object);
+ for (int i = 0; i < contents->length(); i++) {
+ HandleScope inner_scope(isolate_);
+ Handle<String> name(String::cast(contents->get(i)), isolate_);
+ if (!RecurseAndApply(object, name)) return MaybeHandle<Object>();
+ }
+ }
+ }
+ Handle<Object> argv[] = {name, value};
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate_, result, Execution::Call(isolate_, reviver_, holder, 2, argv),
+ Object);
+ return outer_scope.CloseAndEscape(result);
+}
+
+bool JsonParseInternalizer::RecurseAndApply(Handle<JSReceiver> holder,
+ Handle<String> name) {
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, result, InternalizeJsonProperty(holder, name), false);
+ Maybe<bool> change_result = Nothing<bool>();
+ if (result->IsUndefined(isolate_)) {
+ change_result = JSReceiver::DeletePropertyOrElement(holder, name, SLOPPY);
+ } else {
+ PropertyDescriptor desc;
+ desc.set_value(result);
+ desc.set_configurable(true);
+ desc.set_enumerable(true);
+ desc.set_writable(true);
+ change_result = JSReceiver::DefineOwnProperty(isolate_, holder, name, &desc,
+ Object::DONT_THROW);
+ }
+ MAYBE_RETURN(change_result, false);
+ return true;
+}
+
+template <bool seq_one_byte>
+JsonParser<seq_one_byte>::JsonParser(Isolate* isolate, Handle<String> source)
+ : source_(source),
+ source_length_(source->length()),
+ isolate_(isolate),
+ factory_(isolate_->factory()),
+ zone_(isolate_->allocator()),
+ object_constructor_(isolate_->native_context()->object_function(),
+ isolate_),
+ position_(-1) {
+ source_ = String::Flatten(source_);
+ pretenure_ = (source_length_ >= kPretenureTreshold) ? TENURED : NOT_TENURED;
+
+ // Optimized fast case where we only have Latin1 characters.
+ if (seq_one_byte) {
+ seq_source_ = Handle<SeqOneByteString>::cast(source_);
+ }
+}
+
+template <bool seq_one_byte>
+MaybeHandle<Object> JsonParser<seq_one_byte>::ParseJson() {
+ // Advance to the first character (possibly EOS)
+ AdvanceSkipWhitespace();
+ Handle<Object> result = ParseJsonValue();
+ if (result.is_null() || c0_ != kEndOfString) {
+ // Some exception (for example stack overflow) is already pending.
+ if (isolate_->has_pending_exception()) return Handle<Object>::null();
+
+ // Parse failed. Current character is the unexpected token.
+ Factory* factory = this->factory();
+ MessageTemplate::Template message;
+ Handle<Object> arg1 = Handle<Smi>(Smi::FromInt(position_), isolate());
+ Handle<Object> arg2;
+
+ switch (c0_) {
+ case kEndOfString:
+ message = MessageTemplate::kJsonParseUnexpectedEOS;
+ break;
+ case '-':
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ message = MessageTemplate::kJsonParseUnexpectedTokenNumber;
+ break;
+ case '"':
+ message = MessageTemplate::kJsonParseUnexpectedTokenString;
+ break;
+ default:
+ message = MessageTemplate::kJsonParseUnexpectedToken;
+ arg2 = arg1;
+ arg1 = factory->LookupSingleCharacterStringFromCode(c0_);
+ break;
+ }
+
+ Handle<Script> script(factory->NewScript(source_));
+ // We should sent compile error event because we compile JSON object in
+ // separated source file.
+ isolate()->debug()->OnCompileError(script);
+ MessageLocation location(script, position_, position_ + 1);
+ Handle<Object> error = factory->NewSyntaxError(message, arg1, arg2);
+ return isolate()->template Throw<Object>(error, &location);
+ }
+ return result;
+}
+
+MaybeHandle<Object> InternalizeJsonProperty(Handle<JSObject> holder,
+ Handle<String> key);
+
+template <bool seq_one_byte>
+void JsonParser<seq_one_byte>::Advance() {
+ position_++;
+ if (position_ >= source_length_) {
+ c0_ = kEndOfString;
+ } else if (seq_one_byte) {
+ c0_ = seq_source_->SeqOneByteStringGet(position_);
+ } else {
+ c0_ = source_->Get(position_);
+ }
+}
+
+template <bool seq_one_byte>
+void JsonParser<seq_one_byte>::AdvanceSkipWhitespace() {
+ do {
+ Advance();
+ } while (c0_ == ' ' || c0_ == '\t' || c0_ == '\n' || c0_ == '\r');
+}
+
+template <bool seq_one_byte>
+void JsonParser<seq_one_byte>::SkipWhitespace() {
+ while (c0_ == ' ' || c0_ == '\t' || c0_ == '\n' || c0_ == '\r') {
+ Advance();
+ }
+}
+
+template <bool seq_one_byte>
+uc32 JsonParser<seq_one_byte>::AdvanceGetChar() {
+ Advance();
+ return c0_;
+}
+
+template <bool seq_one_byte>
+bool JsonParser<seq_one_byte>::MatchSkipWhiteSpace(uc32 c) {
+ if (c0_ == c) {
+ AdvanceSkipWhitespace();
+ return true;
+ }
+ return false;
+}
+
+template <bool seq_one_byte>
+bool JsonParser<seq_one_byte>::ParseJsonString(Handle<String> expected) {
+ int length = expected->length();
+ if (source_->length() - position_ - 1 > length) {
+ DisallowHeapAllocation no_gc;
+ String::FlatContent content = expected->GetFlatContent();
+ if (content.IsOneByte()) {
+ DCHECK_EQ('"', c0_);
+ const uint8_t* input_chars = seq_source_->GetChars() + position_ + 1;
+ const uint8_t* expected_chars = content.ToOneByteVector().start();
+ for (int i = 0; i < length; i++) {
+ uint8_t c0 = input_chars[i];
+ if (c0 != expected_chars[i] || c0 == '"' || c0 < 0x20 || c0 == '\\') {
+ return false;
+ }
+ }
+ if (input_chars[length] == '"') {
+ position_ = position_ + length + 1;
+ AdvanceSkipWhitespace();
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+// Parse any JSON value.
+template <bool seq_one_byte>
+Handle<Object> JsonParser<seq_one_byte>::ParseJsonValue() {
+ StackLimitCheck stack_check(isolate_);
+ if (stack_check.HasOverflowed()) {
+ isolate_->StackOverflow();
+ return Handle<Object>::null();
+ }
+
+ if (stack_check.InterruptRequested() &&
+ isolate_->stack_guard()->HandleInterrupts()->IsException(isolate_)) {
+ return Handle<Object>::null();
+ }
+
+ if (c0_ == '"') return ParseJsonString();
+ if ((c0_ >= '0' && c0_ <= '9') || c0_ == '-') return ParseJsonNumber();
+ if (c0_ == '{') return ParseJsonObject();
+ if (c0_ == '[') return ParseJsonArray();
+ if (c0_ == 'f') {
+ if (AdvanceGetChar() == 'a' && AdvanceGetChar() == 'l' &&
+ AdvanceGetChar() == 's' && AdvanceGetChar() == 'e') {
+ AdvanceSkipWhitespace();
+ return factory()->false_value();
+ }
+ return ReportUnexpectedCharacter();
+ }
+ if (c0_ == 't') {
+ if (AdvanceGetChar() == 'r' && AdvanceGetChar() == 'u' &&
+ AdvanceGetChar() == 'e') {
+ AdvanceSkipWhitespace();
+ return factory()->true_value();
+ }
+ return ReportUnexpectedCharacter();
+ }
+ if (c0_ == 'n') {
+ if (AdvanceGetChar() == 'u' && AdvanceGetChar() == 'l' &&
+ AdvanceGetChar() == 'l') {
+ AdvanceSkipWhitespace();
+ return factory()->null_value();
+ }
+ return ReportUnexpectedCharacter();
+ }
+ return ReportUnexpectedCharacter();
+}
+
+template <bool seq_one_byte>
+ParseElementResult JsonParser<seq_one_byte>::ParseElement(
+ Handle<JSObject> json_object) {
+ uint32_t index = 0;
+ // Maybe an array index, try to parse it.
+ if (c0_ == '0') {
+ // With a leading zero, the string has to be "0" only to be an index.
+ Advance();
+ } else {
+ do {
+ int d = c0_ - '0';
+ if (index > 429496729U - ((d + 3) >> 3)) break;
+ index = (index * 10) + d;
+ Advance();
+ } while (IsDecimalDigit(c0_));
+ }
+
+ if (c0_ == '"') {
+ // Successfully parsed index, parse and store element.
+ AdvanceSkipWhitespace();
+
+ if (c0_ == ':') {
+ AdvanceSkipWhitespace();
+ Handle<Object> value = ParseJsonValue();
+ if (!value.is_null()) {
+ JSObject::SetOwnElementIgnoreAttributes(json_object, index, value, NONE)
+ .Assert();
+ return kElementFound;
+ } else {
+ return kNullHandle;
+ }
+ }
+ }
+ return kElementNotFound;
+}
+
+// Parse a JSON object. Position must be right at '{'.
+template <bool seq_one_byte>
+Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
+ HandleScope scope(isolate());
+ Handle<JSObject> json_object =
+ factory()->NewJSObject(object_constructor(), pretenure_);
+ Handle<Map> map(json_object->map());
+ int descriptor = 0;
+ ZoneList<Handle<Object> > properties(8, zone());
+ DCHECK_EQ(c0_, '{');
+
+ bool transitioning = true;
+
+ AdvanceSkipWhitespace();
+ if (c0_ != '}') {
+ do {
+ if (c0_ != '"') return ReportUnexpectedCharacter();
+
+ int start_position = position_;
+ Advance();
+
+ if (IsDecimalDigit(c0_)) {
+ ParseElementResult element_result = ParseElement(json_object);
+ if (element_result == kNullHandle) return Handle<Object>::null();
+ if (element_result == kElementFound) continue;
+ }
+ // Not an index, fallback to the slow path.
+
+ position_ = start_position;
+#ifdef DEBUG
+ c0_ = '"';
+#endif
+
+ Handle<String> key;
+ Handle<Object> value;
+
+ // Try to follow existing transitions as long as possible. Once we stop
+ // transitioning, no transition can be found anymore.
+ DCHECK(transitioning);
+ // First check whether there is a single expected transition. If so, try
+ // to parse it first.
+ bool follow_expected = false;
+ Handle<Map> target;
+ if (seq_one_byte) {
+ key = TransitionArray::ExpectedTransitionKey(map);
+ follow_expected = !key.is_null() && ParseJsonString(key);
+ }
+ // If the expected transition hits, follow it.
+ if (follow_expected) {
+ target = TransitionArray::ExpectedTransitionTarget(map);
+ } else {
+ // If the expected transition failed, parse an internalized string and
+ // try to find a matching transition.
+ key = ParseJsonInternalizedString();
+ if (key.is_null()) return ReportUnexpectedCharacter();
+
+ target = TransitionArray::FindTransitionToField(map, key);
+ // If a transition was found, follow it and continue.
+ transitioning = !target.is_null();
+ }
+ if (c0_ != ':') return ReportUnexpectedCharacter();
+
+ AdvanceSkipWhitespace();
+ value = ParseJsonValue();
+ if (value.is_null()) return ReportUnexpectedCharacter();
+
+ if (transitioning) {
+ PropertyDetails details =
+ target->instance_descriptors()->GetDetails(descriptor);
+ Representation expected_representation = details.representation();
+
+ if (value->FitsRepresentation(expected_representation)) {
+ if (expected_representation.IsHeapObject() &&
+ !target->instance_descriptors()
+ ->GetFieldType(descriptor)
+ ->NowContains(value)) {
+ Handle<FieldType> value_type(
+ value->OptimalType(isolate(), expected_representation));
+ Map::GeneralizeFieldType(target, descriptor,
+ expected_representation, value_type);
+ }
+ DCHECK(target->instance_descriptors()
+ ->GetFieldType(descriptor)
+ ->NowContains(value));
+ properties.Add(value, zone());
+ map = target;
+ descriptor++;
+ continue;
+ } else {
+ transitioning = false;
+ }
+ }
+
+ DCHECK(!transitioning);
+
+ // Commit the intermediate state to the object and stop transitioning.
+ CommitStateToJsonObject(json_object, map, &properties);
+
+ JSObject::DefinePropertyOrElementIgnoreAttributes(json_object, key, value)
+ .Check();
+ } while (transitioning && MatchSkipWhiteSpace(','));
+
+ // If we transitioned until the very end, transition the map now.
+ if (transitioning) {
+ CommitStateToJsonObject(json_object, map, &properties);
+ } else {
+ while (MatchSkipWhiteSpace(',')) {
+ HandleScope local_scope(isolate());
+ if (c0_ != '"') return ReportUnexpectedCharacter();
+
+ int start_position = position_;
+ Advance();
+
+ if (IsDecimalDigit(c0_)) {
+ ParseElementResult element_result = ParseElement(json_object);
+ if (element_result == kNullHandle) return Handle<Object>::null();
+ if (element_result == kElementFound) continue;
+ }
+ // Not an index, fallback to the slow path.
+
+ position_ = start_position;
+#ifdef DEBUG
+ c0_ = '"';
+#endif
+
+ Handle<String> key;
+ Handle<Object> value;
+
+ key = ParseJsonInternalizedString();
+ if (key.is_null() || c0_ != ':') return ReportUnexpectedCharacter();
+
+ AdvanceSkipWhitespace();
+ value = ParseJsonValue();
+ if (value.is_null()) return ReportUnexpectedCharacter();
+
+ JSObject::DefinePropertyOrElementIgnoreAttributes(json_object, key,
+ value)
+ .Check();
+ }
+ }
+
+ if (c0_ != '}') {
+ return ReportUnexpectedCharacter();
+ }
+ }
+ AdvanceSkipWhitespace();
+ return scope.CloseAndEscape(json_object);
+}
+
+template <bool seq_one_byte>
+void JsonParser<seq_one_byte>::CommitStateToJsonObject(
+ Handle<JSObject> json_object, Handle<Map> map,
+ ZoneList<Handle<Object> >* properties) {
+ JSObject::AllocateStorageForMap(json_object, map);
+ DCHECK(!json_object->map()->is_dictionary_map());
+
+ DisallowHeapAllocation no_gc;
+
+ int length = properties->length();
+ for (int i = 0; i < length; i++) {
+ Handle<Object> value = (*properties)[i];
+ json_object->WriteToField(i, *value);
+ }
+}
+
+// Parse a JSON array. Position must be right at '['.
+template <bool seq_one_byte>
+Handle<Object> JsonParser<seq_one_byte>::ParseJsonArray() {
+ HandleScope scope(isolate());
+ ZoneList<Handle<Object> > elements(4, zone());
+ DCHECK_EQ(c0_, '[');
+
+ AdvanceSkipWhitespace();
+ if (c0_ != ']') {
+ do {
+ Handle<Object> element = ParseJsonValue();
+ if (element.is_null()) return ReportUnexpectedCharacter();
+ elements.Add(element, zone());
+ } while (MatchSkipWhiteSpace(','));
+ if (c0_ != ']') {
+ return ReportUnexpectedCharacter();
+ }
+ }
+ AdvanceSkipWhitespace();
+ // Allocate a fixed array with all the elements.
+ Handle<FixedArray> fast_elements =
+ factory()->NewFixedArray(elements.length(), pretenure_);
+ for (int i = 0, n = elements.length(); i < n; i++) {
+ fast_elements->set(i, *elements[i]);
+ }
+ Handle<Object> json_array = factory()->NewJSArrayWithElements(
+ fast_elements, FAST_ELEMENTS, pretenure_);
+ return scope.CloseAndEscape(json_array);
+}
+
+template <bool seq_one_byte>
+Handle<Object> JsonParser<seq_one_byte>::ParseJsonNumber() {
+ bool negative = false;
+ int beg_pos = position_;
+ if (c0_ == '-') {
+ Advance();
+ negative = true;
+ }
+ if (c0_ == '0') {
+ Advance();
+ // Prefix zero is only allowed if it's the only digit before
+ // a decimal point or exponent.
+ if (IsDecimalDigit(c0_)) return ReportUnexpectedCharacter();
+ } else {
+ int i = 0;
+ int digits = 0;
+ if (c0_ < '1' || c0_ > '9') return ReportUnexpectedCharacter();
+ do {
+ i = i * 10 + c0_ - '0';
+ digits++;
+ Advance();
+ } while (IsDecimalDigit(c0_));
+ if (c0_ != '.' && c0_ != 'e' && c0_ != 'E' && digits < 10) {
+ SkipWhitespace();
+ return Handle<Smi>(Smi::FromInt((negative ? -i : i)), isolate());
+ }
+ }
+ if (c0_ == '.') {
+ Advance();
+ if (!IsDecimalDigit(c0_)) return ReportUnexpectedCharacter();
+ do {
+ Advance();
+ } while (IsDecimalDigit(c0_));
+ }
+ if (AsciiAlphaToLower(c0_) == 'e') {
+ Advance();
+ if (c0_ == '-' || c0_ == '+') Advance();
+ if (!IsDecimalDigit(c0_)) return ReportUnexpectedCharacter();
+ do {
+ Advance();
+ } while (IsDecimalDigit(c0_));
+ }
+ int length = position_ - beg_pos;
+ double number;
+ if (seq_one_byte) {
+ Vector<const uint8_t> chars(seq_source_->GetChars() + beg_pos, length);
+ number = StringToDouble(isolate()->unicode_cache(), chars,
+ NO_FLAGS, // Hex, octal or trailing junk.
+ std::numeric_limits<double>::quiet_NaN());
+ } else {
+ Vector<uint8_t> buffer = Vector<uint8_t>::New(length);
+ String::WriteToFlat(*source_, buffer.start(), beg_pos, position_);
+ Vector<const uint8_t> result =
+ Vector<const uint8_t>(buffer.start(), length);
+ number = StringToDouble(isolate()->unicode_cache(), result,
+ NO_FLAGS, // Hex, octal or trailing junk.
+ 0.0);
+ buffer.Dispose();
+ }
+ SkipWhitespace();
+ return factory()->NewNumber(number, pretenure_);
+}
+
+template <typename StringType>
+inline void SeqStringSet(Handle<StringType> seq_str, int i, uc32 c);
+
+template <>
+inline void SeqStringSet(Handle<SeqTwoByteString> seq_str, int i, uc32 c) {
+ seq_str->SeqTwoByteStringSet(i, c);
+}
+
+template <>
+inline void SeqStringSet(Handle<SeqOneByteString> seq_str, int i, uc32 c) {
+ seq_str->SeqOneByteStringSet(i, c);
+}
+
+template <typename StringType>
+inline Handle<StringType> NewRawString(Factory* factory, int length,
+ PretenureFlag pretenure);
+
+template <>
+inline Handle<SeqTwoByteString> NewRawString(Factory* factory, int length,
+ PretenureFlag pretenure) {
+ return factory->NewRawTwoByteString(length, pretenure).ToHandleChecked();
+}
+
+template <>
+inline Handle<SeqOneByteString> NewRawString(Factory* factory, int length,
+ PretenureFlag pretenure) {
+ return factory->NewRawOneByteString(length, pretenure).ToHandleChecked();
+}
+
+// Scans the rest of a JSON string starting from position_ and writes
+// prefix[start..end] along with the scanned characters into a
+// sequential string of type StringType.
+template <bool seq_one_byte>
+template <typename StringType, typename SinkChar>
+Handle<String> JsonParser<seq_one_byte>::SlowScanJsonString(
+ Handle<String> prefix, int start, int end) {
+ int count = end - start;
+ int max_length = count + source_length_ - position_;
+ int length = Min(max_length, Max(kInitialSpecialStringLength, 2 * count));
+ Handle<StringType> seq_string =
+ NewRawString<StringType>(factory(), length, pretenure_);
+ // Copy prefix into seq_str.
+ SinkChar* dest = seq_string->GetChars();
+ String::WriteToFlat(*prefix, dest, start, end);
+
+ while (c0_ != '"') {
+ // Check for control character (0x00-0x1f) or unterminated string (<0).
+ if (c0_ < 0x20) return Handle<String>::null();
+ if (count >= length) {
+ // We need to create a longer sequential string for the result.
+ return SlowScanJsonString<StringType, SinkChar>(seq_string, 0, count);
+ }
+ if (c0_ != '\\') {
+ // If the sink can contain UC16 characters, or source_ contains only
+ // Latin1 characters, there's no need to test whether we can store the
+ // character. Otherwise check whether the UC16 source character can fit
+ // in the Latin1 sink.
+ if (sizeof(SinkChar) == kUC16Size || seq_one_byte ||
+ c0_ <= String::kMaxOneByteCharCode) {
+ SeqStringSet(seq_string, count++, c0_);
+ Advance();
+ } else {
+ // StringType is SeqOneByteString and we just read a non-Latin1 char.
+ return SlowScanJsonString<SeqTwoByteString, uc16>(seq_string, 0, count);
+ }
+ } else {
+ Advance(); // Advance past the \.
+ switch (c0_) {
+ case '"':
+ case '\\':
+ case '/':
+ SeqStringSet(seq_string, count++, c0_);
+ break;
+ case 'b':
+ SeqStringSet(seq_string, count++, '\x08');
+ break;
+ case 'f':
+ SeqStringSet(seq_string, count++, '\x0c');
+ break;
+ case 'n':
+ SeqStringSet(seq_string, count++, '\x0a');
+ break;
+ case 'r':
+ SeqStringSet(seq_string, count++, '\x0d');
+ break;
+ case 't':
+ SeqStringSet(seq_string, count++, '\x09');
+ break;
+ case 'u': {
+ uc32 value = 0;
+ for (int i = 0; i < 4; i++) {
+ Advance();
+ int digit = HexValue(c0_);
+ if (digit < 0) {
+ return Handle<String>::null();
+ }
+ value = value * 16 + digit;
+ }
+ if (sizeof(SinkChar) == kUC16Size ||
+ value <= String::kMaxOneByteCharCode) {
+ SeqStringSet(seq_string, count++, value);
+ break;
+ } else {
+ // StringType is SeqOneByteString and we just read a non-Latin1
+ // char.
+ position_ -= 6; // Rewind position_ to \ in \uxxxx.
+ Advance();
+ return SlowScanJsonString<SeqTwoByteString, uc16>(seq_string, 0,
+ count);
+ }
+ }
+ default:
+ return Handle<String>::null();
+ }
+ Advance();
+ }
+ }
+
+ DCHECK_EQ('"', c0_);
+ // Advance past the last '"'.
+ AdvanceSkipWhitespace();
+
+ // Shrink seq_string length to count and return.
+ return SeqString::Truncate(seq_string, count);
+}
+
+template <bool seq_one_byte>
+template <bool is_internalized>
+Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
+ DCHECK_EQ('"', c0_);
+ Advance();
+ if (c0_ == '"') {
+ AdvanceSkipWhitespace();
+ return factory()->empty_string();
+ }
+
+ if (seq_one_byte && is_internalized) {
+ // Fast path for existing internalized strings. If the the string being
+ // parsed is not a known internalized string, contains backslashes or
+ // unexpectedly reaches the end of string, return with an empty handle.
+ uint32_t running_hash = isolate()->heap()->HashSeed();
+ int position = position_;
+ uc32 c0 = c0_;
+ do {
+ if (c0 == '\\') {
+ c0_ = c0;
+ int beg_pos = position_;
+ position_ = position;
+ return SlowScanJsonString<SeqOneByteString, uint8_t>(source_, beg_pos,
+ position_);
+ }
+ if (c0 < 0x20) return Handle<String>::null();
+ running_hash = StringHasher::AddCharacterCore(running_hash,
+ static_cast<uint16_t>(c0));
+ position++;
+ if (position >= source_length_) return Handle<String>::null();
+ c0 = seq_source_->SeqOneByteStringGet(position);
+ } while (c0 != '"');
+ int length = position - position_;
+ uint32_t hash = (length <= String::kMaxHashCalcLength)
+ ? StringHasher::GetHashCore(running_hash)
+ : static_cast<uint32_t>(length);
+ Vector<const uint8_t> string_vector(seq_source_->GetChars() + position_,
+ length);
+ StringTable* string_table = isolate()->heap()->string_table();
+ uint32_t capacity = string_table->Capacity();
+ uint32_t entry = StringTable::FirstProbe(hash, capacity);
+ uint32_t count = 1;
+ Handle<String> result;
+ while (true) {
+ Object* element = string_table->KeyAt(entry);
+ if (element->IsUndefined(isolate())) {
+ // Lookup failure.
+ result =
+ factory()->InternalizeOneByteString(seq_source_, position_, length);
+ break;
+ }
+ if (!element->IsTheHole(isolate()) &&
+ String::cast(element)->IsOneByteEqualTo(string_vector)) {
+ result = Handle<String>(String::cast(element), isolate());
+#ifdef DEBUG
+ uint32_t hash_field =
+ (hash << String::kHashShift) | String::kIsNotArrayIndexMask;
+ DCHECK_EQ(static_cast<int>(result->Hash()),
+ static_cast<int>(hash_field >> String::kHashShift));
+#endif
+ break;
+ }
+ entry = StringTable::NextProbe(entry, count++, capacity);
+ }
+ position_ = position;
+ // Advance past the last '"'.
+ AdvanceSkipWhitespace();
+ return result;
+ }
+
+ int beg_pos = position_;
+ // Fast case for Latin1 only without escape characters.
+ do {
+ // Check for control character (0x00-0x1f) or unterminated string (<0).
+ if (c0_ < 0x20) return Handle<String>::null();
+ if (c0_ != '\\') {
+ if (seq_one_byte || c0_ <= String::kMaxOneByteCharCode) {
+ Advance();
+ } else {
+ return SlowScanJsonString<SeqTwoByteString, uc16>(source_, beg_pos,
+ position_);
+ }
+ } else {
+ return SlowScanJsonString<SeqOneByteString, uint8_t>(source_, beg_pos,
+ position_);
+ }
+ } while (c0_ != '"');
+ int length = position_ - beg_pos;
+ Handle<String> result =
+ factory()->NewRawOneByteString(length, pretenure_).ToHandleChecked();
+ uint8_t* dest = SeqOneByteString::cast(*result)->GetChars();
+ String::WriteToFlat(*source_, dest, beg_pos, position_);
+
+ DCHECK_EQ('"', c0_);
+ // Advance past the last '"'.
+ AdvanceSkipWhitespace();
+ return result;
+}
+
+// Explicit instantiation.
+template class JsonParser<true>;
+template class JsonParser<false>;
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
index 1b9829fa40..2d08fefda9 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -5,95 +5,70 @@
#ifndef V8_JSON_PARSER_H_
#define V8_JSON_PARSER_H_
-#include "src/char-predicates.h"
-#include "src/conversions.h"
-#include "src/debug/debug.h"
#include "src/factory.h"
-#include "src/field-type.h"
-#include "src/messages.h"
-#include "src/parsing/scanner.h"
-#include "src/parsing/token.h"
-#include "src/transitions.h"
+#include "src/objects.h"
namespace v8 {
namespace internal {
enum ParseElementResult { kElementFound, kElementNotFound, kNullHandle };
+class JsonParseInternalizer BASE_EMBEDDED {
+ public:
+ static MaybeHandle<Object> Internalize(Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> reviver);
+
+ private:
+ JsonParseInternalizer(Isolate* isolate, Handle<JSReceiver> reviver)
+ : isolate_(isolate), reviver_(reviver) {}
+
+ MaybeHandle<Object> InternalizeJsonProperty(Handle<JSReceiver> holder,
+ Handle<String> key);
+
+ bool RecurseAndApply(Handle<JSReceiver> holder, Handle<String> name);
+
+ Isolate* isolate_;
+ Handle<JSReceiver> reviver_;
+};
// A simple json parser.
template <bool seq_one_byte>
class JsonParser BASE_EMBEDDED {
public:
- MUST_USE_RESULT static MaybeHandle<Object> Parse(Handle<String> source) {
- return JsonParser(source).ParseJson();
+ MUST_USE_RESULT static MaybeHandle<Object> Parse(Isolate* isolate,
+ Handle<String> source,
+ Handle<Object> reviver) {
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
+ JsonParser(isolate, source).ParseJson(), Object);
+ if (reviver->IsCallable()) {
+ return JsonParseInternalizer::Internalize(isolate, result, reviver);
+ }
+ return result;
}
static const int kEndOfString = -1;
private:
- explicit JsonParser(Handle<String> source)
- : source_(source),
- source_length_(source->length()),
- isolate_(source->map()->GetHeap()->isolate()),
- factory_(isolate_->factory()),
- zone_(isolate_->allocator()),
- object_constructor_(isolate_->native_context()->object_function(),
- isolate_),
- position_(-1) {
- source_ = String::Flatten(source_);
- pretenure_ = (source_length_ >= kPretenureTreshold) ? TENURED : NOT_TENURED;
-
- // Optimized fast case where we only have Latin1 characters.
- if (seq_one_byte) {
- seq_source_ = Handle<SeqOneByteString>::cast(source_);
- }
- }
+ JsonParser(Isolate* isolate, Handle<String> source);
// Parse a string containing a single JSON value.
MaybeHandle<Object> ParseJson();
- inline void Advance() {
- position_++;
- if (position_ >= source_length_) {
- c0_ = kEndOfString;
- } else if (seq_one_byte) {
- c0_ = seq_source_->SeqOneByteStringGet(position_);
- } else {
- c0_ = source_->Get(position_);
- }
- }
+ INLINE(void Advance());
// The JSON lexical grammar is specified in the ECMAScript 5 standard,
// section 15.12.1.1. The only allowed whitespace characters between tokens
// are tab, carriage-return, newline and space.
- inline void AdvanceSkipWhitespace() {
- do {
- Advance();
- } while (c0_ == ' ' || c0_ == '\t' || c0_ == '\n' || c0_ == '\r');
- }
-
- inline void SkipWhitespace() {
- while (c0_ == ' ' || c0_ == '\t' || c0_ == '\n' || c0_ == '\r') {
- Advance();
- }
- }
-
- inline uc32 AdvanceGetChar() {
- Advance();
- return c0_;
- }
+ INLINE(void AdvanceSkipWhitespace());
+ INLINE(void SkipWhitespace());
+ INLINE(uc32 AdvanceGetChar());
// Checks that current charater is c.
// If so, then consume c and skip whitespace.
- inline bool MatchSkipWhiteSpace(uc32 c) {
- if (c0_ == c) {
- AdvanceSkipWhitespace();
- return true;
- }
- return false;
- }
+ INLINE(bool MatchSkipWhiteSpace(uc32 c));
// A JSON string (production JSONString) is subset of valid JavaScript string
// literals. The string must only be double-quoted (not single-quoted), and
@@ -103,30 +78,7 @@ class JsonParser BASE_EMBEDDED {
return ScanJsonString<false>();
}
- bool ParseJsonString(Handle<String> expected) {
- int length = expected->length();
- if (source_->length() - position_ - 1 > length) {
- DisallowHeapAllocation no_gc;
- String::FlatContent content = expected->GetFlatContent();
- if (content.IsOneByte()) {
- DCHECK_EQ('"', c0_);
- const uint8_t* input_chars = seq_source_->GetChars() + position_ + 1;
- const uint8_t* expected_chars = content.ToOneByteVector().start();
- for (int i = 0; i < length; i++) {
- uint8_t c0 = input_chars[i];
- if (c0 != expected_chars[i] || c0 == '"' || c0 < 0x20 || c0 == '\\') {
- return false;
- }
- }
- if (input_chars[length] == '"') {
- position_ = position_ + length + 1;
- AdvanceSkipWhitespace();
- return true;
- }
- }
- }
- return false;
- }
+ bool ParseJsonString(Handle<String> expected);
Handle<String> ParseJsonInternalizedString() {
Handle<String> result = ScanJsonString<true>();
@@ -188,7 +140,6 @@ class JsonParser BASE_EMBEDDED {
static const int kInitialSpecialStringLength = 32;
static const int kPretenureTreshold = 100 * 1024;
-
private:
Zone* zone() { return &zone_; }
@@ -208,639 +159,6 @@ class JsonParser BASE_EMBEDDED {
int position_;
};
-template <bool seq_one_byte>
-MaybeHandle<Object> JsonParser<seq_one_byte>::ParseJson() {
- // Advance to the first character (possibly EOS)
- AdvanceSkipWhitespace();
- Handle<Object> result = ParseJsonValue();
- if (result.is_null() || c0_ != kEndOfString) {
- // Some exception (for example stack overflow) is already pending.
- if (isolate_->has_pending_exception()) return Handle<Object>::null();
-
- // Parse failed. Current character is the unexpected token.
- Factory* factory = this->factory();
- MessageTemplate::Template message;
- Handle<Object> arg1 = Handle<Smi>(Smi::FromInt(position_), isolate());
- Handle<Object> arg2;
-
- switch (c0_) {
- case kEndOfString:
- message = MessageTemplate::kJsonParseUnexpectedEOS;
- break;
- case '-':
- case '0':
- case '1':
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- case '7':
- case '8':
- case '9':
- message = MessageTemplate::kJsonParseUnexpectedTokenNumber;
- break;
- case '"':
- message = MessageTemplate::kJsonParseUnexpectedTokenString;
- break;
- default:
- message = MessageTemplate::kJsonParseUnexpectedToken;
- arg2 = arg1;
- arg1 = factory->LookupSingleCharacterStringFromCode(c0_);
- break;
- }
-
- Handle<Script> script(factory->NewScript(source_));
- // We should sent compile error event because we compile JSON object in
- // separated source file.
- isolate()->debug()->OnCompileError(script);
- MessageLocation location(script, position_, position_ + 1);
- Handle<Object> error = factory->NewSyntaxError(message, arg1, arg2);
- return isolate()->template Throw<Object>(error, &location);
- }
- return result;
-}
-
-
-// Parse any JSON value.
-template <bool seq_one_byte>
-Handle<Object> JsonParser<seq_one_byte>::ParseJsonValue() {
- StackLimitCheck stack_check(isolate_);
- if (stack_check.HasOverflowed()) {
- isolate_->StackOverflow();
- return Handle<Object>::null();
- }
-
- if (stack_check.InterruptRequested()) {
- ExecutionAccess access(isolate_);
- // Avoid blocking GC in long running parser (v8:3974).
- isolate_->stack_guard()->HandleGCInterrupt();
- }
-
- if (c0_ == '"') return ParseJsonString();
- if ((c0_ >= '0' && c0_ <= '9') || c0_ == '-') return ParseJsonNumber();
- if (c0_ == '{') return ParseJsonObject();
- if (c0_ == '[') return ParseJsonArray();
- if (c0_ == 'f') {
- if (AdvanceGetChar() == 'a' && AdvanceGetChar() == 'l' &&
- AdvanceGetChar() == 's' && AdvanceGetChar() == 'e') {
- AdvanceSkipWhitespace();
- return factory()->false_value();
- }
- return ReportUnexpectedCharacter();
- }
- if (c0_ == 't') {
- if (AdvanceGetChar() == 'r' && AdvanceGetChar() == 'u' &&
- AdvanceGetChar() == 'e') {
- AdvanceSkipWhitespace();
- return factory()->true_value();
- }
- return ReportUnexpectedCharacter();
- }
- if (c0_ == 'n') {
- if (AdvanceGetChar() == 'u' && AdvanceGetChar() == 'l' &&
- AdvanceGetChar() == 'l') {
- AdvanceSkipWhitespace();
- return factory()->null_value();
- }
- return ReportUnexpectedCharacter();
- }
- return ReportUnexpectedCharacter();
-}
-
-
-template <bool seq_one_byte>
-ParseElementResult JsonParser<seq_one_byte>::ParseElement(
- Handle<JSObject> json_object) {
- uint32_t index = 0;
- // Maybe an array index, try to parse it.
- if (c0_ == '0') {
- // With a leading zero, the string has to be "0" only to be an index.
- Advance();
- } else {
- do {
- int d = c0_ - '0';
- if (index > 429496729U - ((d + 3) >> 3)) break;
- index = (index * 10) + d;
- Advance();
- } while (IsDecimalDigit(c0_));
- }
-
- if (c0_ == '"') {
- // Successfully parsed index, parse and store element.
- AdvanceSkipWhitespace();
-
- if (c0_ == ':') {
- AdvanceSkipWhitespace();
- Handle<Object> value = ParseJsonValue();
- if (!value.is_null()) {
- JSObject::SetOwnElementIgnoreAttributes(json_object, index, value, NONE)
- .Assert();
- return kElementFound;
- } else {
- return kNullHandle;
- }
- }
- }
- return kElementNotFound;
-}
-
-// Parse a JSON object. Position must be right at '{'.
-template <bool seq_one_byte>
-Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
- HandleScope scope(isolate());
- Handle<JSObject> json_object =
- factory()->NewJSObject(object_constructor(), pretenure_);
- Handle<Map> map(json_object->map());
- int descriptor = 0;
- ZoneList<Handle<Object> > properties(8, zone());
- DCHECK_EQ(c0_, '{');
-
- bool transitioning = true;
-
- AdvanceSkipWhitespace();
- if (c0_ != '}') {
- do {
- if (c0_ != '"') return ReportUnexpectedCharacter();
-
- int start_position = position_;
- Advance();
-
- if (IsDecimalDigit(c0_)) {
- ParseElementResult element_result = ParseElement(json_object);
- if (element_result == kNullHandle) return Handle<Object>::null();
- if (element_result == kElementFound) continue;
- }
- // Not an index, fallback to the slow path.
-
- position_ = start_position;
-#ifdef DEBUG
- c0_ = '"';
-#endif
-
- Handle<String> key;
- Handle<Object> value;
-
- // Try to follow existing transitions as long as possible. Once we stop
- // transitioning, no transition can be found anymore.
- DCHECK(transitioning);
- // First check whether there is a single expected transition. If so, try
- // to parse it first.
- bool follow_expected = false;
- Handle<Map> target;
- if (seq_one_byte) {
- key = TransitionArray::ExpectedTransitionKey(map);
- follow_expected = !key.is_null() && ParseJsonString(key);
- }
- // If the expected transition hits, follow it.
- if (follow_expected) {
- target = TransitionArray::ExpectedTransitionTarget(map);
- } else {
- // If the expected transition failed, parse an internalized string and
- // try to find a matching transition.
- key = ParseJsonInternalizedString();
- if (key.is_null()) return ReportUnexpectedCharacter();
-
- target = TransitionArray::FindTransitionToField(map, key);
- // If a transition was found, follow it and continue.
- transitioning = !target.is_null();
- }
- if (c0_ != ':') return ReportUnexpectedCharacter();
-
- AdvanceSkipWhitespace();
- value = ParseJsonValue();
- if (value.is_null()) return ReportUnexpectedCharacter();
-
- if (transitioning) {
- PropertyDetails details =
- target->instance_descriptors()->GetDetails(descriptor);
- Representation expected_representation = details.representation();
-
- if (value->FitsRepresentation(expected_representation)) {
- if (expected_representation.IsHeapObject() &&
- !target->instance_descriptors()
- ->GetFieldType(descriptor)
- ->NowContains(value)) {
- Handle<FieldType> value_type(
- value->OptimalType(isolate(), expected_representation));
- Map::GeneralizeFieldType(target, descriptor,
- expected_representation, value_type);
- }
- DCHECK(target->instance_descriptors()
- ->GetFieldType(descriptor)
- ->NowContains(value));
- properties.Add(value, zone());
- map = target;
- descriptor++;
- continue;
- } else {
- transitioning = false;
- }
- }
-
- DCHECK(!transitioning);
-
- // Commit the intermediate state to the object and stop transitioning.
- CommitStateToJsonObject(json_object, map, &properties);
-
- JSObject::DefinePropertyOrElementIgnoreAttributes(json_object, key, value)
- .Check();
- } while (transitioning && MatchSkipWhiteSpace(','));
-
- // If we transitioned until the very end, transition the map now.
- if (transitioning) {
- CommitStateToJsonObject(json_object, map, &properties);
- } else {
- while (MatchSkipWhiteSpace(',')) {
- HandleScope local_scope(isolate());
- if (c0_ != '"') return ReportUnexpectedCharacter();
-
- int start_position = position_;
- Advance();
-
- if (IsDecimalDigit(c0_)) {
- ParseElementResult element_result = ParseElement(json_object);
- if (element_result == kNullHandle) return Handle<Object>::null();
- if (element_result == kElementFound) continue;
- }
- // Not an index, fallback to the slow path.
-
- position_ = start_position;
-#ifdef DEBUG
- c0_ = '"';
-#endif
-
- Handle<String> key;
- Handle<Object> value;
-
- key = ParseJsonInternalizedString();
- if (key.is_null() || c0_ != ':') return ReportUnexpectedCharacter();
-
- AdvanceSkipWhitespace();
- value = ParseJsonValue();
- if (value.is_null()) return ReportUnexpectedCharacter();
-
- JSObject::DefinePropertyOrElementIgnoreAttributes(json_object, key,
- value).Check();
- }
- }
-
- if (c0_ != '}') {
- return ReportUnexpectedCharacter();
- }
- }
- AdvanceSkipWhitespace();
- return scope.CloseAndEscape(json_object);
-}
-
-
-template <bool seq_one_byte>
-void JsonParser<seq_one_byte>::CommitStateToJsonObject(
- Handle<JSObject> json_object, Handle<Map> map,
- ZoneList<Handle<Object> >* properties) {
- JSObject::AllocateStorageForMap(json_object, map);
- DCHECK(!json_object->map()->is_dictionary_map());
-
- DisallowHeapAllocation no_gc;
-
- int length = properties->length();
- for (int i = 0; i < length; i++) {
- Handle<Object> value = (*properties)[i];
- json_object->WriteToField(i, *value);
- }
-}
-
-
-// Parse a JSON array. Position must be right at '['.
-template <bool seq_one_byte>
-Handle<Object> JsonParser<seq_one_byte>::ParseJsonArray() {
- HandleScope scope(isolate());
- ZoneList<Handle<Object> > elements(4, zone());
- DCHECK_EQ(c0_, '[');
-
- AdvanceSkipWhitespace();
- if (c0_ != ']') {
- do {
- Handle<Object> element = ParseJsonValue();
- if (element.is_null()) return ReportUnexpectedCharacter();
- elements.Add(element, zone());
- } while (MatchSkipWhiteSpace(','));
- if (c0_ != ']') {
- return ReportUnexpectedCharacter();
- }
- }
- AdvanceSkipWhitespace();
- // Allocate a fixed array with all the elements.
- Handle<FixedArray> fast_elements =
- factory()->NewFixedArray(elements.length(), pretenure_);
- for (int i = 0, n = elements.length(); i < n; i++) {
- fast_elements->set(i, *elements[i]);
- }
- Handle<Object> json_array = factory()->NewJSArrayWithElements(
- fast_elements, FAST_ELEMENTS, pretenure_);
- return scope.CloseAndEscape(json_array);
-}
-
-
-template <bool seq_one_byte>
-Handle<Object> JsonParser<seq_one_byte>::ParseJsonNumber() {
- bool negative = false;
- int beg_pos = position_;
- if (c0_ == '-') {
- Advance();
- negative = true;
- }
- if (c0_ == '0') {
- Advance();
- // Prefix zero is only allowed if it's the only digit before
- // a decimal point or exponent.
- if (IsDecimalDigit(c0_)) return ReportUnexpectedCharacter();
- } else {
- int i = 0;
- int digits = 0;
- if (c0_ < '1' || c0_ > '9') return ReportUnexpectedCharacter();
- do {
- i = i * 10 + c0_ - '0';
- digits++;
- Advance();
- } while (IsDecimalDigit(c0_));
- if (c0_ != '.' && c0_ != 'e' && c0_ != 'E' && digits < 10) {
- SkipWhitespace();
- return Handle<Smi>(Smi::FromInt((negative ? -i : i)), isolate());
- }
- }
- if (c0_ == '.') {
- Advance();
- if (!IsDecimalDigit(c0_)) return ReportUnexpectedCharacter();
- do {
- Advance();
- } while (IsDecimalDigit(c0_));
- }
- if (AsciiAlphaToLower(c0_) == 'e') {
- Advance();
- if (c0_ == '-' || c0_ == '+') Advance();
- if (!IsDecimalDigit(c0_)) return ReportUnexpectedCharacter();
- do {
- Advance();
- } while (IsDecimalDigit(c0_));
- }
- int length = position_ - beg_pos;
- double number;
- if (seq_one_byte) {
- Vector<const uint8_t> chars(seq_source_->GetChars() + beg_pos, length);
- number = StringToDouble(isolate()->unicode_cache(), chars,
- NO_FLAGS, // Hex, octal or trailing junk.
- std::numeric_limits<double>::quiet_NaN());
- } else {
- Vector<uint8_t> buffer = Vector<uint8_t>::New(length);
- String::WriteToFlat(*source_, buffer.start(), beg_pos, position_);
- Vector<const uint8_t> result =
- Vector<const uint8_t>(buffer.start(), length);
- number = StringToDouble(isolate()->unicode_cache(),
- result,
- NO_FLAGS, // Hex, octal or trailing junk.
- 0.0);
- buffer.Dispose();
- }
- SkipWhitespace();
- return factory()->NewNumber(number, pretenure_);
-}
-
-
-template <typename StringType>
-inline void SeqStringSet(Handle<StringType> seq_str, int i, uc32 c);
-
-template <>
-inline void SeqStringSet(Handle<SeqTwoByteString> seq_str, int i, uc32 c) {
- seq_str->SeqTwoByteStringSet(i, c);
-}
-
-template <>
-inline void SeqStringSet(Handle<SeqOneByteString> seq_str, int i, uc32 c) {
- seq_str->SeqOneByteStringSet(i, c);
-}
-
-template <typename StringType>
-inline Handle<StringType> NewRawString(Factory* factory,
- int length,
- PretenureFlag pretenure);
-
-template <>
-inline Handle<SeqTwoByteString> NewRawString(Factory* factory,
- int length,
- PretenureFlag pretenure) {
- return factory->NewRawTwoByteString(length, pretenure).ToHandleChecked();
-}
-
-template <>
-inline Handle<SeqOneByteString> NewRawString(Factory* factory,
- int length,
- PretenureFlag pretenure) {
- return factory->NewRawOneByteString(length, pretenure).ToHandleChecked();
-}
-
-
-// Scans the rest of a JSON string starting from position_ and writes
-// prefix[start..end] along with the scanned characters into a
-// sequential string of type StringType.
-template <bool seq_one_byte>
-template <typename StringType, typename SinkChar>
-Handle<String> JsonParser<seq_one_byte>::SlowScanJsonString(
- Handle<String> prefix, int start, int end) {
- int count = end - start;
- int max_length = count + source_length_ - position_;
- int length = Min(max_length, Max(kInitialSpecialStringLength, 2 * count));
- Handle<StringType> seq_string =
- NewRawString<StringType>(factory(), length, pretenure_);
- // Copy prefix into seq_str.
- SinkChar* dest = seq_string->GetChars();
- String::WriteToFlat(*prefix, dest, start, end);
-
- while (c0_ != '"') {
- // Check for control character (0x00-0x1f) or unterminated string (<0).
- if (c0_ < 0x20) return Handle<String>::null();
- if (count >= length) {
- // We need to create a longer sequential string for the result.
- return SlowScanJsonString<StringType, SinkChar>(seq_string, 0, count);
- }
- if (c0_ != '\\') {
- // If the sink can contain UC16 characters, or source_ contains only
- // Latin1 characters, there's no need to test whether we can store the
- // character. Otherwise check whether the UC16 source character can fit
- // in the Latin1 sink.
- if (sizeof(SinkChar) == kUC16Size || seq_one_byte ||
- c0_ <= String::kMaxOneByteCharCode) {
- SeqStringSet(seq_string, count++, c0_);
- Advance();
- } else {
- // StringType is SeqOneByteString and we just read a non-Latin1 char.
- return SlowScanJsonString<SeqTwoByteString, uc16>(seq_string, 0, count);
- }
- } else {
- Advance(); // Advance past the \.
- switch (c0_) {
- case '"':
- case '\\':
- case '/':
- SeqStringSet(seq_string, count++, c0_);
- break;
- case 'b':
- SeqStringSet(seq_string, count++, '\x08');
- break;
- case 'f':
- SeqStringSet(seq_string, count++, '\x0c');
- break;
- case 'n':
- SeqStringSet(seq_string, count++, '\x0a');
- break;
- case 'r':
- SeqStringSet(seq_string, count++, '\x0d');
- break;
- case 't':
- SeqStringSet(seq_string, count++, '\x09');
- break;
- case 'u': {
- uc32 value = 0;
- for (int i = 0; i < 4; i++) {
- Advance();
- int digit = HexValue(c0_);
- if (digit < 0) {
- return Handle<String>::null();
- }
- value = value * 16 + digit;
- }
- if (sizeof(SinkChar) == kUC16Size ||
- value <= String::kMaxOneByteCharCode) {
- SeqStringSet(seq_string, count++, value);
- break;
- } else {
- // StringType is SeqOneByteString and we just read a non-Latin1
- // char.
- position_ -= 6; // Rewind position_ to \ in \uxxxx.
- Advance();
- return SlowScanJsonString<SeqTwoByteString, uc16>(seq_string,
- 0,
- count);
- }
- }
- default:
- return Handle<String>::null();
- }
- Advance();
- }
- }
-
- DCHECK_EQ('"', c0_);
- // Advance past the last '"'.
- AdvanceSkipWhitespace();
-
- // Shrink seq_string length to count and return.
- return SeqString::Truncate(seq_string, count);
-}
-
-
-template <bool seq_one_byte>
-template <bool is_internalized>
-Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
- DCHECK_EQ('"', c0_);
- Advance();
- if (c0_ == '"') {
- AdvanceSkipWhitespace();
- return factory()->empty_string();
- }
-
- if (seq_one_byte && is_internalized) {
- // Fast path for existing internalized strings. If the the string being
- // parsed is not a known internalized string, contains backslashes or
- // unexpectedly reaches the end of string, return with an empty handle.
- uint32_t running_hash = isolate()->heap()->HashSeed();
- int position = position_;
- uc32 c0 = c0_;
- do {
- if (c0 == '\\') {
- c0_ = c0;
- int beg_pos = position_;
- position_ = position;
- return SlowScanJsonString<SeqOneByteString, uint8_t>(source_,
- beg_pos,
- position_);
- }
- if (c0 < 0x20) return Handle<String>::null();
- running_hash = StringHasher::AddCharacterCore(running_hash,
- static_cast<uint16_t>(c0));
- position++;
- if (position >= source_length_) return Handle<String>::null();
- c0 = seq_source_->SeqOneByteStringGet(position);
- } while (c0 != '"');
- int length = position - position_;
- uint32_t hash = (length <= String::kMaxHashCalcLength)
- ? StringHasher::GetHashCore(running_hash)
- : static_cast<uint32_t>(length);
- Vector<const uint8_t> string_vector(
- seq_source_->GetChars() + position_, length);
- StringTable* string_table = isolate()->heap()->string_table();
- uint32_t capacity = string_table->Capacity();
- uint32_t entry = StringTable::FirstProbe(hash, capacity);
- uint32_t count = 1;
- Handle<String> result;
- while (true) {
- Object* element = string_table->KeyAt(entry);
- if (element == isolate()->heap()->undefined_value()) {
- // Lookup failure.
- result = factory()->InternalizeOneByteString(
- seq_source_, position_, length);
- break;
- }
- if (element != isolate()->heap()->the_hole_value() &&
- String::cast(element)->IsOneByteEqualTo(string_vector)) {
- result = Handle<String>(String::cast(element), isolate());
-#ifdef DEBUG
- uint32_t hash_field =
- (hash << String::kHashShift) | String::kIsNotArrayIndexMask;
- DCHECK_EQ(static_cast<int>(result->Hash()),
- static_cast<int>(hash_field >> String::kHashShift));
-#endif
- break;
- }
- entry = StringTable::NextProbe(entry, count++, capacity);
- }
- position_ = position;
- // Advance past the last '"'.
- AdvanceSkipWhitespace();
- return result;
- }
-
- int beg_pos = position_;
- // Fast case for Latin1 only without escape characters.
- do {
- // Check for control character (0x00-0x1f) or unterminated string (<0).
- if (c0_ < 0x20) return Handle<String>::null();
- if (c0_ != '\\') {
- if (seq_one_byte || c0_ <= String::kMaxOneByteCharCode) {
- Advance();
- } else {
- return SlowScanJsonString<SeqTwoByteString, uc16>(source_,
- beg_pos,
- position_);
- }
- } else {
- return SlowScanJsonString<SeqOneByteString, uint8_t>(source_,
- beg_pos,
- position_);
- }
- } while (c0_ != '"');
- int length = position_ - beg_pos;
- Handle<String> result =
- factory()->NewRawOneByteString(length, pretenure_).ToHandleChecked();
- uint8_t* dest = SeqOneByteString::cast(*result)->GetChars();
- String::WriteToFlat(*source_, dest, beg_pos, position_);
-
- DCHECK_EQ('"', c0_);
- // Advance past the last '"'.
- AdvanceSkipWhitespace();
- return result;
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/json-stringifier.cc b/deps/v8/src/json-stringifier.cc
new file mode 100644
index 0000000000..29685c20e2
--- /dev/null
+++ b/deps/v8/src/json-stringifier.cc
@@ -0,0 +1,722 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/json-stringifier.h"
+
+#include "src/conversions.h"
+#include "src/lookup.h"
+#include "src/messages.h"
+#include "src/objects-inl.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Translation table to escape Latin1 characters.
+// Table entries start at a multiple of 8 and are null-terminated.
+const char* const JsonStringifier::JsonEscapeTable =
+ "\\u0000\0 \\u0001\0 \\u0002\0 \\u0003\0 "
+ "\\u0004\0 \\u0005\0 \\u0006\0 \\u0007\0 "
+ "\\b\0 \\t\0 \\n\0 \\u000b\0 "
+ "\\f\0 \\r\0 \\u000e\0 \\u000f\0 "
+ "\\u0010\0 \\u0011\0 \\u0012\0 \\u0013\0 "
+ "\\u0014\0 \\u0015\0 \\u0016\0 \\u0017\0 "
+ "\\u0018\0 \\u0019\0 \\u001a\0 \\u001b\0 "
+ "\\u001c\0 \\u001d\0 \\u001e\0 \\u001f\0 "
+ " \0 !\0 \\\"\0 #\0 "
+ "$\0 %\0 &\0 '\0 "
+ "(\0 )\0 *\0 +\0 "
+ ",\0 -\0 .\0 /\0 "
+ "0\0 1\0 2\0 3\0 "
+ "4\0 5\0 6\0 7\0 "
+ "8\0 9\0 :\0 ;\0 "
+ "<\0 =\0 >\0 ?\0 "
+ "@\0 A\0 B\0 C\0 "
+ "D\0 E\0 F\0 G\0 "
+ "H\0 I\0 J\0 K\0 "
+ "L\0 M\0 N\0 O\0 "
+ "P\0 Q\0 R\0 S\0 "
+ "T\0 U\0 V\0 W\0 "
+ "X\0 Y\0 Z\0 [\0 "
+ "\\\\\0 ]\0 ^\0 _\0 "
+ "`\0 a\0 b\0 c\0 "
+ "d\0 e\0 f\0 g\0 "
+ "h\0 i\0 j\0 k\0 "
+ "l\0 m\0 n\0 o\0 "
+ "p\0 q\0 r\0 s\0 "
+ "t\0 u\0 v\0 w\0 "
+ "x\0 y\0 z\0 {\0 "
+ "|\0 }\0 ~\0 \177\0 "
+ "\200\0 \201\0 \202\0 \203\0 "
+ "\204\0 \205\0 \206\0 \207\0 "
+ "\210\0 \211\0 \212\0 \213\0 "
+ "\214\0 \215\0 \216\0 \217\0 "
+ "\220\0 \221\0 \222\0 \223\0 "
+ "\224\0 \225\0 \226\0 \227\0 "
+ "\230\0 \231\0 \232\0 \233\0 "
+ "\234\0 \235\0 \236\0 \237\0 "
+ "\240\0 \241\0 \242\0 \243\0 "
+ "\244\0 \245\0 \246\0 \247\0 "
+ "\250\0 \251\0 \252\0 \253\0 "
+ "\254\0 \255\0 \256\0 \257\0 "
+ "\260\0 \261\0 \262\0 \263\0 "
+ "\264\0 \265\0 \266\0 \267\0 "
+ "\270\0 \271\0 \272\0 \273\0 "
+ "\274\0 \275\0 \276\0 \277\0 "
+ "\300\0 \301\0 \302\0 \303\0 "
+ "\304\0 \305\0 \306\0 \307\0 "
+ "\310\0 \311\0 \312\0 \313\0 "
+ "\314\0 \315\0 \316\0 \317\0 "
+ "\320\0 \321\0 \322\0 \323\0 "
+ "\324\0 \325\0 \326\0 \327\0 "
+ "\330\0 \331\0 \332\0 \333\0 "
+ "\334\0 \335\0 \336\0 \337\0 "
+ "\340\0 \341\0 \342\0 \343\0 "
+ "\344\0 \345\0 \346\0 \347\0 "
+ "\350\0 \351\0 \352\0 \353\0 "
+ "\354\0 \355\0 \356\0 \357\0 "
+ "\360\0 \361\0 \362\0 \363\0 "
+ "\364\0 \365\0 \366\0 \367\0 "
+ "\370\0 \371\0 \372\0 \373\0 "
+ "\374\0 \375\0 \376\0 \377\0 ";
+
+JsonStringifier::JsonStringifier(Isolate* isolate)
+ : isolate_(isolate), builder_(isolate), gap_(nullptr), indent_(0) {
+ tojson_string_ = factory()->toJSON_string();
+ stack_ = factory()->NewJSArray(8);
+}
+
+MaybeHandle<Object> JsonStringifier::Stringify(Handle<Object> object,
+ Handle<Object> replacer,
+ Handle<Object> gap) {
+ if (!InitializeReplacer(replacer)) return MaybeHandle<Object>();
+ if (!gap->IsUndefined(isolate_) && !InitializeGap(gap)) {
+ return MaybeHandle<Object>();
+ }
+ Result result = SerializeObject(object);
+ if (result == UNCHANGED) return factory()->undefined_value();
+ if (result == SUCCESS) return builder_.Finish();
+ DCHECK(result == EXCEPTION);
+ return MaybeHandle<Object>();
+}
+
+bool IsInList(Handle<String> key, List<Handle<String> >* list) {
+ // TODO(yangguo): This is O(n^2) for n properties in the list. Deal with this
+ // if this becomes an issue.
+ for (const Handle<String>& existing : *list) {
+ if (String::Equals(existing, key)) return true;
+ }
+ return false;
+}
+
+bool JsonStringifier::InitializeReplacer(Handle<Object> replacer) {
+ DCHECK(property_list_.is_null());
+ DCHECK(replacer_function_.is_null());
+ Maybe<bool> is_array = Object::IsArray(replacer);
+ if (is_array.IsNothing()) return false;
+ if (is_array.FromJust()) {
+ HandleScope handle_scope(isolate_);
+ List<Handle<String> > list;
+ Handle<Object> length_obj;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, length_obj,
+ Object::GetLengthFromArrayLike(isolate_, replacer), false);
+ uint32_t length;
+ if (!length_obj->ToUint32(&length)) length = kMaxUInt32;
+ for (uint32_t i = 0; i < length; i++) {
+ Handle<Object> element;
+ Handle<String> key;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, element, Object::GetElement(isolate_, replacer, i), false);
+ if (element->IsNumber() || element->IsString()) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, key, Object::ToString(isolate_, element), false);
+ } else if (element->IsJSValue()) {
+ Handle<Object> value(Handle<JSValue>::cast(element)->value(), isolate_);
+ if (value->IsNumber() || value->IsString()) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, key, Object::ToString(isolate_, element), false);
+ }
+ }
+ if (key.is_null()) continue;
+ if (!IsInList(key, &list)) list.Add(key);
+ }
+ property_list_ = factory()->NewUninitializedFixedArray(list.length());
+ for (int i = 0; i < list.length(); i++) {
+ property_list_->set(i, *list[i]);
+ }
+ property_list_ = handle_scope.CloseAndEscape(property_list_);
+ } else if (replacer->IsCallable()) {
+ replacer_function_ = Handle<JSReceiver>::cast(replacer);
+ }
+ return true;
+}
+
+bool JsonStringifier::InitializeGap(Handle<Object> gap) {
+ DCHECK_NULL(gap_);
+ HandleScope scope(isolate_);
+ if (gap->IsJSValue()) {
+ Handle<Object> value(Handle<JSValue>::cast(gap)->value(), isolate_);
+ if (value->IsString()) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate_, gap,
+ Object::ToString(isolate_, gap), false);
+ } else if (value->IsNumber()) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate_, gap, Object::ToNumber(gap),
+ false);
+ }
+ }
+
+ if (gap->IsString()) {
+ Handle<String> gap_string = Handle<String>::cast(gap);
+ if (gap_string->length() > 0) {
+ int gap_length = std::min(gap_string->length(), 10);
+ gap_ = NewArray<uc16>(gap_length + 1);
+ String::WriteToFlat(*gap_string, gap_, 0, gap_length);
+ for (int i = 0; i < gap_length; i++) {
+ if (gap_[i] > String::kMaxOneByteCharCode) {
+ builder_.ChangeEncoding();
+ break;
+ }
+ }
+ gap_[gap_length] = '\0';
+ }
+ } else if (gap->IsNumber()) {
+ int num_value = DoubleToInt32(gap->Number());
+ if (num_value > 0) {
+ int gap_length = std::min(num_value, 10);
+ gap_ = NewArray<uc16>(gap_length + 1);
+ for (int i = 0; i < gap_length; i++) gap_[i] = ' ';
+ gap_[gap_length] = '\0';
+ }
+ }
+ return true;
+}
+
+MaybeHandle<Object> JsonStringifier::ApplyToJsonFunction(Handle<Object> object,
+ Handle<Object> key) {
+ HandleScope scope(isolate_);
+ LookupIterator it(object, tojson_string_,
+ LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+ Handle<Object> fun;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate_, fun, Object::GetProperty(&it), Object);
+ if (!fun->IsCallable()) return object;
+
+ // Call toJSON function.
+ if (key->IsSmi()) key = factory()->NumberToString(key);
+ Handle<Object> argv[] = {key};
+ ASSIGN_RETURN_ON_EXCEPTION(isolate_, object,
+ Execution::Call(isolate_, fun, object, 1, argv),
+ Object);
+ return scope.CloseAndEscape(object);
+}
+
+MaybeHandle<Object> JsonStringifier::ApplyReplacerFunction(
+ Handle<Object> value, Handle<Object> key, Handle<Object> initial_holder) {
+ HandleScope scope(isolate_);
+ if (key->IsSmi()) key = factory()->NumberToString(key);
+ Handle<Object> argv[] = {key, value};
+ Handle<JSReceiver> holder = CurrentHolder(value, initial_holder);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate_, value,
+ Execution::Call(isolate_, replacer_function_, holder, 2, argv), Object);
+ return scope.CloseAndEscape(value);
+}
+
+Handle<JSReceiver> JsonStringifier::CurrentHolder(
+ Handle<Object> value, Handle<Object> initial_holder) {
+ int length = Smi::cast(stack_->length())->value();
+ if (length == 0) {
+ Handle<JSObject> holder =
+ factory()->NewJSObject(isolate_->object_function());
+ JSObject::AddProperty(holder, factory()->empty_string(), initial_holder,
+ NONE);
+ return holder;
+ } else {
+ FixedArray* elements = FixedArray::cast(stack_->elements());
+ return Handle<JSReceiver>(JSReceiver::cast(elements->get(length - 1)),
+ isolate_);
+ }
+}
+
+JsonStringifier::Result JsonStringifier::StackPush(Handle<Object> object) {
+ StackLimitCheck check(isolate_);
+ if (check.HasOverflowed()) {
+ isolate_->StackOverflow();
+ return EXCEPTION;
+ }
+
+ int length = Smi::cast(stack_->length())->value();
+ {
+ DisallowHeapAllocation no_allocation;
+ FixedArray* elements = FixedArray::cast(stack_->elements());
+ for (int i = 0; i < length; i++) {
+ if (elements->get(i) == *object) {
+ AllowHeapAllocation allow_to_return_error;
+ Handle<Object> error =
+ factory()->NewTypeError(MessageTemplate::kCircularStructure);
+ isolate_->Throw(*error);
+ return EXCEPTION;
+ }
+ }
+ }
+ JSArray::SetLength(stack_, length + 1);
+ FixedArray::cast(stack_->elements())->set(length, *object);
+ return SUCCESS;
+}
+
+void JsonStringifier::StackPop() {
+ int length = Smi::cast(stack_->length())->value();
+ stack_->set_length(Smi::FromInt(length - 1));
+}
+
+template <bool deferred_string_key>
+JsonStringifier::Result JsonStringifier::Serialize_(Handle<Object> object,
+ bool comma,
+ Handle<Object> key) {
+ StackLimitCheck interrupt_check(isolate_);
+ Handle<Object> initial_value = object;
+ if (interrupt_check.InterruptRequested() &&
+ isolate_->stack_guard()->HandleInterrupts()->IsException(isolate_)) {
+ return EXCEPTION;
+ }
+ if (object->IsJSReceiver()) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, object, ApplyToJsonFunction(object, key), EXCEPTION);
+ }
+ if (!replacer_function_.is_null()) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, object, ApplyReplacerFunction(object, key, initial_value),
+ EXCEPTION);
+ }
+
+ if (object->IsSmi()) {
+ if (deferred_string_key) SerializeDeferredKey(comma, key);
+ return SerializeSmi(Smi::cast(*object));
+ }
+
+ switch (HeapObject::cast(*object)->map()->instance_type()) {
+ case HEAP_NUMBER_TYPE:
+ case MUTABLE_HEAP_NUMBER_TYPE:
+ if (deferred_string_key) SerializeDeferredKey(comma, key);
+ return SerializeHeapNumber(Handle<HeapNumber>::cast(object));
+ case ODDBALL_TYPE:
+ switch (Oddball::cast(*object)->kind()) {
+ case Oddball::kFalse:
+ if (deferred_string_key) SerializeDeferredKey(comma, key);
+ builder_.AppendCString("false");
+ return SUCCESS;
+ case Oddball::kTrue:
+ if (deferred_string_key) SerializeDeferredKey(comma, key);
+ builder_.AppendCString("true");
+ return SUCCESS;
+ case Oddball::kNull:
+ if (deferred_string_key) SerializeDeferredKey(comma, key);
+ builder_.AppendCString("null");
+ return SUCCESS;
+ default:
+ return UNCHANGED;
+ }
+ case JS_ARRAY_TYPE:
+ if (deferred_string_key) SerializeDeferredKey(comma, key);
+ return SerializeJSArray(Handle<JSArray>::cast(object));
+ case JS_VALUE_TYPE:
+ if (deferred_string_key) SerializeDeferredKey(comma, key);
+ return SerializeJSValue(Handle<JSValue>::cast(object));
+ case SIMD128_VALUE_TYPE:
+ case SYMBOL_TYPE:
+ return UNCHANGED;
+ default:
+ if (object->IsString()) {
+ if (deferred_string_key) SerializeDeferredKey(comma, key);
+ SerializeString(Handle<String>::cast(object));
+ return SUCCESS;
+ } else {
+ DCHECK(object->IsJSReceiver());
+ if (object->IsCallable()) return UNCHANGED;
+ // Go to slow path for global proxy and objects requiring access checks.
+ if (deferred_string_key) SerializeDeferredKey(comma, key);
+ if (object->IsJSProxy()) {
+ return SerializeJSProxy(Handle<JSProxy>::cast(object));
+ }
+ return SerializeJSObject(Handle<JSObject>::cast(object));
+ }
+ }
+
+ UNREACHABLE();
+ return UNCHANGED;
+}
+
+JsonStringifier::Result JsonStringifier::SerializeJSValue(
+ Handle<JSValue> object) {
+ String* class_name = object->class_name();
+ if (class_name == isolate_->heap()->String_string()) {
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, value, Object::ToString(isolate_, object), EXCEPTION);
+ SerializeString(Handle<String>::cast(value));
+ } else if (class_name == isolate_->heap()->Number_string()) {
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate_, value, Object::ToNumber(object),
+ EXCEPTION);
+ if (value->IsSmi()) return SerializeSmi(Smi::cast(*value));
+ SerializeHeapNumber(Handle<HeapNumber>::cast(value));
+ } else if (class_name == isolate_->heap()->Boolean_string()) {
+ Object* value = JSValue::cast(*object)->value();
+ DCHECK(value->IsBoolean());
+ builder_.AppendCString(value->IsTrue(isolate_) ? "true" : "false");
+ } else {
+ // ES6 24.3.2.1 step 10.c, serialize as an ordinary JSObject.
+ return SerializeJSObject(object);
+ }
+ return SUCCESS;
+}
+
+JsonStringifier::Result JsonStringifier::SerializeSmi(Smi* object) {
+ static const int kBufferSize = 100;
+ char chars[kBufferSize];
+ Vector<char> buffer(chars, kBufferSize);
+ builder_.AppendCString(IntToCString(object->value(), buffer));
+ return SUCCESS;
+}
+
+JsonStringifier::Result JsonStringifier::SerializeDouble(double number) {
+ if (std::isinf(number) || std::isnan(number)) {
+ builder_.AppendCString("null");
+ return SUCCESS;
+ }
+ static const int kBufferSize = 100;
+ char chars[kBufferSize];
+ Vector<char> buffer(chars, kBufferSize);
+ builder_.AppendCString(DoubleToCString(number, buffer));
+ return SUCCESS;
+}
+
+JsonStringifier::Result JsonStringifier::SerializeJSArray(
+ Handle<JSArray> object) {
+ HandleScope handle_scope(isolate_);
+ Result stack_push = StackPush(object);
+ if (stack_push != SUCCESS) return stack_push;
+ uint32_t length = 0;
+ CHECK(object->length()->ToArrayLength(&length));
+ DCHECK(!object->IsAccessCheckNeeded());
+ builder_.AppendCharacter('[');
+ Indent();
+ uint32_t i = 0;
+ if (replacer_function_.is_null()) {
+ switch (object->GetElementsKind()) {
+ case FAST_SMI_ELEMENTS: {
+ Handle<FixedArray> elements(FixedArray::cast(object->elements()),
+ isolate_);
+ StackLimitCheck interrupt_check(isolate_);
+ while (i < length) {
+ if (interrupt_check.InterruptRequested() &&
+ isolate_->stack_guard()->HandleInterrupts()->IsException(
+ isolate_)) {
+ return EXCEPTION;
+ }
+ Separator(i == 0);
+ SerializeSmi(Smi::cast(elements->get(i)));
+ i++;
+ }
+ break;
+ }
+ case FAST_DOUBLE_ELEMENTS: {
+ // Empty array is FixedArray but not FixedDoubleArray.
+ if (length == 0) break;
+ Handle<FixedDoubleArray> elements(
+ FixedDoubleArray::cast(object->elements()), isolate_);
+ StackLimitCheck interrupt_check(isolate_);
+ while (i < length) {
+ if (interrupt_check.InterruptRequested() &&
+ isolate_->stack_guard()->HandleInterrupts()->IsException(
+ isolate_)) {
+ return EXCEPTION;
+ }
+ Separator(i == 0);
+ SerializeDouble(elements->get_scalar(i));
+ i++;
+ }
+ break;
+ }
+ case FAST_ELEMENTS: {
+ Handle<Object> old_length(object->length(), isolate_);
+ while (i < length) {
+ if (object->length() != *old_length ||
+ object->GetElementsKind() != FAST_ELEMENTS) {
+ // Fall back to slow path.
+ break;
+ }
+ Separator(i == 0);
+ Result result = SerializeElement(
+ isolate_,
+ Handle<Object>(FixedArray::cast(object->elements())->get(i),
+ isolate_),
+ i);
+ if (result == UNCHANGED) {
+ builder_.AppendCString("null");
+ } else if (result != SUCCESS) {
+ return result;
+ }
+ i++;
+ }
+ break;
+ }
+ // The FAST_HOLEY_* cases could be handled in a faster way. They resemble
+ // the non-holey cases except that a lookup is necessary for holes.
+ default:
+ break;
+ }
+ }
+ if (i < length) {
+ // Slow path for non-fast elements and fall-back in edge case.
+ Result result = SerializeArrayLikeSlow(object, i, length);
+ if (result != SUCCESS) return result;
+ }
+ Unindent();
+ if (length > 0) NewLine();
+ builder_.AppendCharacter(']');
+ StackPop();
+ return SUCCESS;
+}
+
+JsonStringifier::Result JsonStringifier::SerializeArrayLikeSlow(
+ Handle<JSReceiver> object, uint32_t start, uint32_t length) {
+ // We need to write out at least two characters per array element.
+ static const int kMaxSerializableArrayLength = String::kMaxLength / 2;
+ if (length > kMaxSerializableArrayLength) {
+ isolate_->Throw(*isolate_->factory()->NewInvalidStringLengthError());
+ return EXCEPTION;
+ }
+ for (uint32_t i = start; i < length; i++) {
+ Separator(i == 0);
+ Handle<Object> element;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, element, JSReceiver::GetElement(isolate_, object, i),
+ EXCEPTION);
+ Result result = SerializeElement(isolate_, element, i);
+ if (result == SUCCESS) continue;
+ if (result == UNCHANGED) {
+ // Detect overflow sooner for large sparse arrays.
+ if (builder_.HasOverflowed()) return EXCEPTION;
+ builder_.AppendCString("null");
+ } else {
+ return result;
+ }
+ }
+ return SUCCESS;
+}
+
+JsonStringifier::Result JsonStringifier::SerializeJSObject(
+ Handle<JSObject> object) {
+ HandleScope handle_scope(isolate_);
+ Result stack_push = StackPush(object);
+ if (stack_push != SUCCESS) return stack_push;
+
+ if (property_list_.is_null() &&
+ object->map()->instance_type() > LAST_CUSTOM_ELEMENTS_RECEIVER &&
+ object->HasFastProperties() &&
+ Handle<JSObject>::cast(object)->elements()->length() == 0) {
+ DCHECK(object->IsJSObject());
+ DCHECK(!object->IsJSGlobalProxy());
+ Handle<JSObject> js_obj = Handle<JSObject>::cast(object);
+ DCHECK(!js_obj->HasIndexedInterceptor());
+ DCHECK(!js_obj->HasNamedInterceptor());
+ Handle<Map> map(js_obj->map());
+ builder_.AppendCharacter('{');
+ Indent();
+ bool comma = false;
+ for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
+ Handle<Name> name(map->instance_descriptors()->GetKey(i), isolate_);
+ // TODO(rossberg): Should this throw?
+ if (!name->IsString()) continue;
+ Handle<String> key = Handle<String>::cast(name);
+ PropertyDetails details = map->instance_descriptors()->GetDetails(i);
+ if (details.IsDontEnum()) continue;
+ Handle<Object> property;
+ if (details.type() == DATA && *map == js_obj->map()) {
+ FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
+ property = JSObject::FastPropertyAt(js_obj, details.representation(),
+ field_index);
+ } else {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, property, Object::GetPropertyOrElement(js_obj, key),
+ EXCEPTION);
+ }
+ Result result = SerializeProperty(property, comma, key);
+ if (!comma && result == SUCCESS) comma = true;
+ if (result == EXCEPTION) return result;
+ }
+ Unindent();
+ if (comma) NewLine();
+ builder_.AppendCharacter('}');
+ } else {
+ Result result = SerializeJSReceiverSlow(object);
+ if (result != SUCCESS) return result;
+ }
+ StackPop();
+ return SUCCESS;
+}
+
+JsonStringifier::Result JsonStringifier::SerializeJSReceiverSlow(
+ Handle<JSReceiver> object) {
+ Handle<FixedArray> contents = property_list_;
+ if (contents.is_null()) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, contents,
+ KeyAccumulator::GetKeys(object, KeyCollectionMode::kOwnOnly,
+ ENUMERABLE_STRINGS,
+ GetKeysConversion::kConvertToString),
+ EXCEPTION);
+ }
+ builder_.AppendCharacter('{');
+ Indent();
+ bool comma = false;
+ for (int i = 0; i < contents->length(); i++) {
+ Handle<String> key(String::cast(contents->get(i)), isolate_);
+ Handle<Object> property;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate_, property,
+ Object::GetPropertyOrElement(object, key),
+ EXCEPTION);
+ Result result = SerializeProperty(property, comma, key);
+ if (!comma && result == SUCCESS) comma = true;
+ if (result == EXCEPTION) return result;
+ }
+ Unindent();
+ if (comma) NewLine();
+ builder_.AppendCharacter('}');
+ return SUCCESS;
+}
+
+JsonStringifier::Result JsonStringifier::SerializeJSProxy(
+ Handle<JSProxy> object) {
+ HandleScope scope(isolate_);
+ Result stack_push = StackPush(object);
+ if (stack_push != SUCCESS) return stack_push;
+ Maybe<bool> is_array = Object::IsArray(object);
+ if (is_array.IsNothing()) return EXCEPTION;
+ if (is_array.FromJust()) {
+ Handle<Object> length_object;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, length_object,
+ Object::GetLengthFromArrayLike(isolate_, object), EXCEPTION);
+ uint32_t length;
+ if (!length_object->ToUint32(&length)) {
+ // Technically, we need to be able to handle lengths outside the
+ // uint32_t range. However, we would run into string size overflow
+ // if we tried to stringify such an array.
+ isolate_->Throw(*isolate_->factory()->NewInvalidStringLengthError());
+ return EXCEPTION;
+ }
+ builder_.AppendCharacter('[');
+ Indent();
+ Result result = SerializeArrayLikeSlow(object, 0, length);
+ if (result != SUCCESS) return result;
+ Unindent();
+ if (length > 0) NewLine();
+ builder_.AppendCharacter(']');
+ } else {
+ Result result = SerializeJSReceiverSlow(object);
+ if (result != SUCCESS) return result;
+ }
+ StackPop();
+ return SUCCESS;
+}
+
+template <typename SrcChar, typename DestChar>
+void JsonStringifier::SerializeStringUnchecked_(
+ Vector<const SrcChar> src,
+ IncrementalStringBuilder::NoExtend<DestChar>* dest) {
+ // Assert that uc16 character is not truncated down to 8 bit.
+ // The <uc16, char> version of this method must not be called.
+ DCHECK(sizeof(DestChar) >= sizeof(SrcChar));
+
+ for (int i = 0; i < src.length(); i++) {
+ SrcChar c = src[i];
+ if (DoNotEscape(c)) {
+ dest->Append(c);
+ } else {
+ dest->AppendCString(&JsonEscapeTable[c * kJsonEscapeTableEntrySize]);
+ }
+ }
+}
+
+template <typename SrcChar, typename DestChar>
+void JsonStringifier::SerializeString_(Handle<String> string) {
+ int length = string->length();
+ builder_.Append<uint8_t, DestChar>('"');
+ // We make a rough estimate to find out if the current string can be
+ // serialized without allocating a new string part. The worst case length of
+ // an escaped character is 6. Shifting the remainin string length right by 3
+ // is a more pessimistic estimate, but faster to calculate.
+ int worst_case_length = length << 3;
+ if (builder_.CurrentPartCanFit(worst_case_length)) {
+ DisallowHeapAllocation no_gc;
+ Vector<const SrcChar> vector = string->GetCharVector<SrcChar>();
+ IncrementalStringBuilder::NoExtendBuilder<DestChar> no_extend(
+ &builder_, worst_case_length);
+ SerializeStringUnchecked_(vector, &no_extend);
+ } else {
+ FlatStringReader reader(isolate_, string);
+ for (int i = 0; i < reader.length(); i++) {
+ SrcChar c = reader.Get<SrcChar>(i);
+ if (DoNotEscape(c)) {
+ builder_.Append<SrcChar, DestChar>(c);
+ } else {
+ builder_.AppendCString(&JsonEscapeTable[c * kJsonEscapeTableEntrySize]);
+ }
+ }
+ }
+
+ builder_.Append<uint8_t, DestChar>('"');
+}
+
+template <>
+bool JsonStringifier::DoNotEscape(uint8_t c) {
+ return c >= '#' && c <= '~' && c != '\\';
+}
+
+template <>
+bool JsonStringifier::DoNotEscape(uint16_t c) {
+ return c >= '#' && c != '\\' && c != 0x7f;
+}
+
+void JsonStringifier::NewLine() {
+ if (gap_ == nullptr) return;
+ builder_.AppendCharacter('\n');
+ for (int i = 0; i < indent_; i++) builder_.AppendCString(gap_);
+}
+
+void JsonStringifier::Separator(bool first) {
+ if (!first) builder_.AppendCharacter(',');
+ NewLine();
+}
+
+void JsonStringifier::SerializeDeferredKey(bool deferred_comma,
+ Handle<Object> deferred_key) {
+ Separator(!deferred_comma);
+ SerializeString(Handle<String>::cast(deferred_key));
+ builder_.AppendCharacter(':');
+ if (gap_ != nullptr) builder_.AppendCharacter(' ');
+}
+
+void JsonStringifier::SerializeString(Handle<String> object) {
+ object = String::Flatten(object);
+ if (builder_.CurrentEncoding() == String::ONE_BYTE_ENCODING) {
+ if (object->IsOneByteRepresentationUnderneath()) {
+ SerializeString_<uint8_t, uint8_t>(object);
+ } else {
+ builder_.ChangeEncoding();
+ SerializeString(object);
+ }
+ } else {
+ if (object->IsOneByteRepresentationUnderneath()) {
+ SerializeString_<uint8_t, uc16>(object);
+ } else {
+ SerializeString_<uc16, uc16>(object);
+ }
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/json-stringifier.h b/deps/v8/src/json-stringifier.h
index b40a78249f..e72bd9d3d2 100644
--- a/deps/v8/src/json-stringifier.h
+++ b/deps/v8/src/json-stringifier.h
@@ -5,36 +5,33 @@
#ifndef V8_JSON_STRINGIFIER_H_
#define V8_JSON_STRINGIFIER_H_
-#include "src/conversions.h"
-#include "src/lookup.h"
-#include "src/messages.h"
+#include "src/objects.h"
#include "src/string-builder.h"
-#include "src/utils.h"
namespace v8 {
namespace internal {
-class BasicJsonStringifier BASE_EMBEDDED {
+class JsonStringifier BASE_EMBEDDED {
public:
- explicit BasicJsonStringifier(Isolate* isolate);
+ explicit JsonStringifier(Isolate* isolate);
- MUST_USE_RESULT MaybeHandle<Object> Stringify(Handle<Object> object);
+ ~JsonStringifier() { DeleteArray(gap_); }
- MUST_USE_RESULT INLINE(static MaybeHandle<Object> StringifyString(
- Isolate* isolate,
- Handle<String> object));
+ MUST_USE_RESULT MaybeHandle<Object> Stringify(Handle<Object> object,
+ Handle<Object> replacer,
+ Handle<Object> gap);
private:
enum Result { UNCHANGED, SUCCESS, EXCEPTION };
+ bool InitializeReplacer(Handle<Object> replacer);
+ bool InitializeGap(Handle<Object> gap);
+
MUST_USE_RESULT MaybeHandle<Object> ApplyToJsonFunction(
Handle<Object> object,
Handle<Object> key);
-
- Result SerializeGeneric(Handle<Object> object,
- Handle<Object> key,
- bool deferred_comma,
- bool deferred_key);
+ MUST_USE_RESULT MaybeHandle<Object> ApplyReplacerFunction(
+ Handle<Object> value, Handle<Object> key, Handle<Object> initial_holder);
// Entry point to serialize the object.
INLINE(Result SerializeObject(Handle<Object> obj)) {
@@ -64,11 +61,8 @@ class BasicJsonStringifier BASE_EMBEDDED {
template <bool deferred_string_key>
Result Serialize_(Handle<Object> object, bool comma, Handle<Object> key);
- void SerializeDeferredKey(bool deferred_comma, Handle<Object> deferred_key) {
- if (deferred_comma) builder_.AppendCharacter(',');
- SerializeString(Handle<String>::cast(deferred_key));
- builder_.AppendCharacter(':');
- }
+ INLINE(void SerializeDeferredKey(bool deferred_comma,
+ Handle<Object> deferred_key));
Result SerializeSmi(Smi* object);
@@ -82,8 +76,10 @@ class BasicJsonStringifier BASE_EMBEDDED {
INLINE(Result SerializeJSArray(Handle<JSArray> object));
INLINE(Result SerializeJSObject(Handle<JSObject> object));
- Result SerializeJSArraySlow(Handle<JSArray> object, uint32_t start,
- uint32_t length);
+ Result SerializeJSProxy(Handle<JSProxy> object);
+ Result SerializeJSReceiverSlow(Handle<JSReceiver> object);
+ Result SerializeArrayLikeSlow(Handle<JSReceiver> object, uint32_t start,
+ uint32_t length);
void SerializeString(Handle<String> object);
@@ -98,6 +94,14 @@ class BasicJsonStringifier BASE_EMBEDDED {
template <typename Char>
INLINE(static bool DoNotEscape(Char c));
+ INLINE(void NewLine());
+ INLINE(void Indent() { indent_++; });
+ INLINE(void Unindent() { indent_--; });
+ INLINE(void Separator(bool first));
+
+ Handle<JSReceiver> CurrentHolder(Handle<Object> value,
+ Handle<Object> inital_holder);
+
Result StackPush(Handle<Object> object);
void StackPop();
@@ -107,579 +111,15 @@ class BasicJsonStringifier BASE_EMBEDDED {
IncrementalStringBuilder builder_;
Handle<String> tojson_string_;
Handle<JSArray> stack_;
+ Handle<FixedArray> property_list_;
+ Handle<JSReceiver> replacer_function_;
+ uc16* gap_;
+ int indent_;
static const int kJsonEscapeTableEntrySize = 8;
static const char* const JsonEscapeTable;
};
-
-// Translation table to escape Latin1 characters.
-// Table entries start at a multiple of 8 and are null-terminated.
-const char* const BasicJsonStringifier::JsonEscapeTable =
- "\\u0000\0 \\u0001\0 \\u0002\0 \\u0003\0 "
- "\\u0004\0 \\u0005\0 \\u0006\0 \\u0007\0 "
- "\\b\0 \\t\0 \\n\0 \\u000b\0 "
- "\\f\0 \\r\0 \\u000e\0 \\u000f\0 "
- "\\u0010\0 \\u0011\0 \\u0012\0 \\u0013\0 "
- "\\u0014\0 \\u0015\0 \\u0016\0 \\u0017\0 "
- "\\u0018\0 \\u0019\0 \\u001a\0 \\u001b\0 "
- "\\u001c\0 \\u001d\0 \\u001e\0 \\u001f\0 "
- " \0 !\0 \\\"\0 #\0 "
- "$\0 %\0 &\0 '\0 "
- "(\0 )\0 *\0 +\0 "
- ",\0 -\0 .\0 /\0 "
- "0\0 1\0 2\0 3\0 "
- "4\0 5\0 6\0 7\0 "
- "8\0 9\0 :\0 ;\0 "
- "<\0 =\0 >\0 ?\0 "
- "@\0 A\0 B\0 C\0 "
- "D\0 E\0 F\0 G\0 "
- "H\0 I\0 J\0 K\0 "
- "L\0 M\0 N\0 O\0 "
- "P\0 Q\0 R\0 S\0 "
- "T\0 U\0 V\0 W\0 "
- "X\0 Y\0 Z\0 [\0 "
- "\\\\\0 ]\0 ^\0 _\0 "
- "`\0 a\0 b\0 c\0 "
- "d\0 e\0 f\0 g\0 "
- "h\0 i\0 j\0 k\0 "
- "l\0 m\0 n\0 o\0 "
- "p\0 q\0 r\0 s\0 "
- "t\0 u\0 v\0 w\0 "
- "x\0 y\0 z\0 {\0 "
- "|\0 }\0 ~\0 \177\0 "
- "\200\0 \201\0 \202\0 \203\0 "
- "\204\0 \205\0 \206\0 \207\0 "
- "\210\0 \211\0 \212\0 \213\0 "
- "\214\0 \215\0 \216\0 \217\0 "
- "\220\0 \221\0 \222\0 \223\0 "
- "\224\0 \225\0 \226\0 \227\0 "
- "\230\0 \231\0 \232\0 \233\0 "
- "\234\0 \235\0 \236\0 \237\0 "
- "\240\0 \241\0 \242\0 \243\0 "
- "\244\0 \245\0 \246\0 \247\0 "
- "\250\0 \251\0 \252\0 \253\0 "
- "\254\0 \255\0 \256\0 \257\0 "
- "\260\0 \261\0 \262\0 \263\0 "
- "\264\0 \265\0 \266\0 \267\0 "
- "\270\0 \271\0 \272\0 \273\0 "
- "\274\0 \275\0 \276\0 \277\0 "
- "\300\0 \301\0 \302\0 \303\0 "
- "\304\0 \305\0 \306\0 \307\0 "
- "\310\0 \311\0 \312\0 \313\0 "
- "\314\0 \315\0 \316\0 \317\0 "
- "\320\0 \321\0 \322\0 \323\0 "
- "\324\0 \325\0 \326\0 \327\0 "
- "\330\0 \331\0 \332\0 \333\0 "
- "\334\0 \335\0 \336\0 \337\0 "
- "\340\0 \341\0 \342\0 \343\0 "
- "\344\0 \345\0 \346\0 \347\0 "
- "\350\0 \351\0 \352\0 \353\0 "
- "\354\0 \355\0 \356\0 \357\0 "
- "\360\0 \361\0 \362\0 \363\0 "
- "\364\0 \365\0 \366\0 \367\0 "
- "\370\0 \371\0 \372\0 \373\0 "
- "\374\0 \375\0 \376\0 \377\0 ";
-
-
-BasicJsonStringifier::BasicJsonStringifier(Isolate* isolate)
- : isolate_(isolate), builder_(isolate) {
- tojson_string_ = factory()->toJSON_string();
- stack_ = factory()->NewJSArray(8);
-}
-
-
-MaybeHandle<Object> BasicJsonStringifier::Stringify(Handle<Object> object) {
- Result result = SerializeObject(object);
- if (result == UNCHANGED) return factory()->undefined_value();
- if (result == SUCCESS) return builder_.Finish();
- DCHECK(result == EXCEPTION);
- return MaybeHandle<Object>();
-}
-
-
-MaybeHandle<Object> BasicJsonStringifier::StringifyString(
- Isolate* isolate, Handle<String> object) {
- static const int kJsonQuoteWorstCaseBlowup = 6;
- static const int kSpaceForQuotes = 2;
- int worst_case_length =
- object->length() * kJsonQuoteWorstCaseBlowup + kSpaceForQuotes;
-
- if (worst_case_length > 32 * KB) { // Slow path if too large.
- BasicJsonStringifier stringifier(isolate);
- return stringifier.Stringify(object);
- }
-
- object = String::Flatten(object);
- DCHECK(object->IsFlat());
- Handle<SeqString> result;
- if (object->IsOneByteRepresentationUnderneath()) {
- result = isolate->factory()
- ->NewRawOneByteString(worst_case_length)
- .ToHandleChecked();
- IncrementalStringBuilder::NoExtendString<uint8_t> no_extend(
- result, worst_case_length);
- no_extend.Append('\"');
- SerializeStringUnchecked_(object->GetFlatContent().ToOneByteVector(),
- &no_extend);
- no_extend.Append('\"');
- return no_extend.Finalize();
- } else {
- result = isolate->factory()
- ->NewRawTwoByteString(worst_case_length)
- .ToHandleChecked();
- IncrementalStringBuilder::NoExtendString<uc16> no_extend(result,
- worst_case_length);
- no_extend.Append('\"');
- SerializeStringUnchecked_(object->GetFlatContent().ToUC16Vector(),
- &no_extend);
- no_extend.Append('\"');
- return no_extend.Finalize();
- }
-}
-
-
-MaybeHandle<Object> BasicJsonStringifier::ApplyToJsonFunction(
- Handle<Object> object, Handle<Object> key) {
- LookupIterator it(object, tojson_string_,
- LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
- Handle<Object> fun;
- ASSIGN_RETURN_ON_EXCEPTION(isolate_, fun, Object::GetProperty(&it), Object);
- if (!fun->IsCallable()) return object;
-
- // Call toJSON function.
- if (key->IsSmi()) key = factory()->NumberToString(key);
- Handle<Object> argv[] = { key };
- HandleScope scope(isolate_);
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate_, object,
- Execution::Call(isolate_, fun, object, 1, argv),
- Object);
- return scope.CloseAndEscape(object);
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::StackPush(
- Handle<Object> object) {
- StackLimitCheck check(isolate_);
- if (check.HasOverflowed()) {
- isolate_->StackOverflow();
- return EXCEPTION;
- }
-
- int length = Smi::cast(stack_->length())->value();
- {
- DisallowHeapAllocation no_allocation;
- FixedArray* elements = FixedArray::cast(stack_->elements());
- for (int i = 0; i < length; i++) {
- if (elements->get(i) == *object) {
- AllowHeapAllocation allow_to_return_error;
- Handle<Object> error =
- factory()->NewTypeError(MessageTemplate::kCircularStructure);
- isolate_->Throw(*error);
- return EXCEPTION;
- }
- }
- }
- JSArray::SetLength(stack_, length + 1);
- FixedArray::cast(stack_->elements())->set(length, *object);
- return SUCCESS;
-}
-
-
-void BasicJsonStringifier::StackPop() {
- int length = Smi::cast(stack_->length())->value();
- stack_->set_length(Smi::FromInt(length - 1));
-}
-
-
-template <bool deferred_string_key>
-BasicJsonStringifier::Result BasicJsonStringifier::Serialize_(
- Handle<Object> object, bool comma, Handle<Object> key) {
- if (object->IsJSObject()) {
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate_, object,
- ApplyToJsonFunction(object, key),
- EXCEPTION);
- }
-
- if (object->IsSmi()) {
- if (deferred_string_key) SerializeDeferredKey(comma, key);
- return SerializeSmi(Smi::cast(*object));
- }
-
- switch (HeapObject::cast(*object)->map()->instance_type()) {
- case HEAP_NUMBER_TYPE:
- case MUTABLE_HEAP_NUMBER_TYPE:
- if (deferred_string_key) SerializeDeferredKey(comma, key);
- return SerializeHeapNumber(Handle<HeapNumber>::cast(object));
- case ODDBALL_TYPE:
- switch (Oddball::cast(*object)->kind()) {
- case Oddball::kFalse:
- if (deferred_string_key) SerializeDeferredKey(comma, key);
- builder_.AppendCString("false");
- return SUCCESS;
- case Oddball::kTrue:
- if (deferred_string_key) SerializeDeferredKey(comma, key);
- builder_.AppendCString("true");
- return SUCCESS;
- case Oddball::kNull:
- if (deferred_string_key) SerializeDeferredKey(comma, key);
- builder_.AppendCString("null");
- return SUCCESS;
- default:
- return UNCHANGED;
- }
- case JS_ARRAY_TYPE:
- if (object->IsAccessCheckNeeded()) break;
- if (deferred_string_key) SerializeDeferredKey(comma, key);
- return SerializeJSArray(Handle<JSArray>::cast(object));
- case JS_VALUE_TYPE:
- if (deferred_string_key) SerializeDeferredKey(comma, key);
- return SerializeJSValue(Handle<JSValue>::cast(object));
- default:
- if (object->IsString()) {
- if (deferred_string_key) SerializeDeferredKey(comma, key);
- SerializeString(Handle<String>::cast(object));
- return SUCCESS;
- } else if (object->IsJSObject()) {
- if (object->IsCallable()) return UNCHANGED;
- // Go to slow path for global proxy and objects requiring access checks.
- if (object->IsAccessCheckNeeded() || object->IsJSGlobalProxy()) break;
- if (deferred_string_key) SerializeDeferredKey(comma, key);
- return SerializeJSObject(Handle<JSObject>::cast(object));
- }
- }
-
- return SerializeGeneric(object, key, comma, deferred_string_key);
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::SerializeGeneric(
- Handle<Object> object,
- Handle<Object> key,
- bool deferred_comma,
- bool deferred_key) {
- Handle<JSFunction> fun = isolate_->json_serialize_adapter();
- Handle<Object> argv[] = { key, object };
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate_, result, Execution::Call(isolate_, fun, object, 2, argv),
- EXCEPTION);
- if (result->IsUndefined()) return UNCHANGED;
- if (deferred_key) {
- if (key->IsSmi()) key = factory()->NumberToString(key);
- SerializeDeferredKey(deferred_comma, key);
- }
-
- builder_.AppendString(Handle<String>::cast(result));
- return SUCCESS;
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSValue(
- Handle<JSValue> object) {
- String* class_name = object->class_name();
- if (class_name == isolate_->heap()->String_string()) {
- Handle<Object> value;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate_, value, Object::ToString(isolate_, object), EXCEPTION);
- SerializeString(Handle<String>::cast(value));
- } else if (class_name == isolate_->heap()->Number_string()) {
- Handle<Object> value;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate_, value, Object::ToNumber(object),
- EXCEPTION);
- if (value->IsSmi()) return SerializeSmi(Smi::cast(*value));
- SerializeHeapNumber(Handle<HeapNumber>::cast(value));
- } else if (class_name == isolate_->heap()->Boolean_string()) {
- Object* value = JSValue::cast(*object)->value();
- DCHECK(value->IsBoolean());
- builder_.AppendCString(value->IsTrue() ? "true" : "false");
- } else {
- // ES6 24.3.2.1 step 10.c, serialize as an ordinary JSObject.
- CHECK(!object->IsAccessCheckNeeded());
- CHECK(!object->IsJSGlobalProxy());
- return SerializeJSObject(object);
- }
- return SUCCESS;
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::SerializeSmi(Smi* object) {
- static const int kBufferSize = 100;
- char chars[kBufferSize];
- Vector<char> buffer(chars, kBufferSize);
- builder_.AppendCString(IntToCString(object->value(), buffer));
- return SUCCESS;
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::SerializeDouble(
- double number) {
- if (std::isinf(number) || std::isnan(number)) {
- builder_.AppendCString("null");
- return SUCCESS;
- }
- static const int kBufferSize = 100;
- char chars[kBufferSize];
- Vector<char> buffer(chars, kBufferSize);
- builder_.AppendCString(DoubleToCString(number, buffer));
- return SUCCESS;
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArray(
- Handle<JSArray> object) {
- HandleScope handle_scope(isolate_);
- Result stack_push = StackPush(object);
- if (stack_push != SUCCESS) return stack_push;
- uint32_t length = 0;
- CHECK(object->length()->ToArrayLength(&length));
- builder_.AppendCharacter('[');
- switch (object->GetElementsKind()) {
- case FAST_SMI_ELEMENTS: {
- Handle<FixedArray> elements(FixedArray::cast(object->elements()),
- isolate_);
- for (uint32_t i = 0; i < length; i++) {
- if (i > 0) builder_.AppendCharacter(',');
- SerializeSmi(Smi::cast(elements->get(i)));
- }
- break;
- }
- case FAST_DOUBLE_ELEMENTS: {
- // Empty array is FixedArray but not FixedDoubleArray.
- if (length == 0) break;
- Handle<FixedDoubleArray> elements(
- FixedDoubleArray::cast(object->elements()), isolate_);
- for (uint32_t i = 0; i < length; i++) {
- if (i > 0) builder_.AppendCharacter(',');
- SerializeDouble(elements->get_scalar(i));
- }
- break;
- }
- case FAST_ELEMENTS: {
- Handle<Object> old_length(object->length(), isolate_);
- for (uint32_t i = 0; i < length; i++) {
- if (object->length() != *old_length ||
- object->GetElementsKind() != FAST_ELEMENTS) {
- Result result = SerializeJSArraySlow(object, i, length);
- if (result != SUCCESS) return result;
- break;
- }
- if (i > 0) builder_.AppendCharacter(',');
- Result result = SerializeElement(
- isolate_,
- Handle<Object>(FixedArray::cast(object->elements())->get(i),
- isolate_),
- i);
- if (result == SUCCESS) continue;
- if (result == UNCHANGED) {
- builder_.AppendCString("null");
- } else {
- return result;
- }
- }
- break;
- }
- // The FAST_HOLEY_* cases could be handled in a faster way. They resemble
- // the non-holey cases except that a lookup is necessary for holes.
- default: {
- Result result = SerializeJSArraySlow(object, 0, length);
- if (result != SUCCESS) return result;
- break;
- }
- }
- builder_.AppendCharacter(']');
- StackPop();
- return SUCCESS;
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArraySlow(
- Handle<JSArray> object, uint32_t start, uint32_t length) {
- for (uint32_t i = start; i < length; i++) {
- if (i > 0) builder_.AppendCharacter(',');
- Handle<Object> element;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate_, element, JSReceiver::GetElement(isolate_, object, i),
- EXCEPTION);
- if (element->IsUndefined()) {
- builder_.AppendCString("null");
- } else {
- Result result = SerializeElement(isolate_, element, i);
- if (result == SUCCESS) continue;
- if (result == UNCHANGED) {
- builder_.AppendCString("null");
- } else {
- return result;
- }
- }
- }
- return SUCCESS;
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
- Handle<JSObject> object) {
- HandleScope handle_scope(isolate_);
- Result stack_push = StackPush(object);
- if (stack_push != SUCCESS) return stack_push;
- DCHECK(!object->IsJSGlobalProxy() && !object->IsJSGlobalObject());
-
- builder_.AppendCharacter('{');
- bool comma = false;
-
- if (object->HasFastProperties() &&
- !object->HasIndexedInterceptor() &&
- !object->HasNamedInterceptor() &&
- object->elements()->length() == 0) {
- Handle<Map> map(object->map());
- for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
- Handle<Name> name(map->instance_descriptors()->GetKey(i), isolate_);
- // TODO(rossberg): Should this throw?
- if (!name->IsString()) continue;
- Handle<String> key = Handle<String>::cast(name);
- PropertyDetails details = map->instance_descriptors()->GetDetails(i);
- if (details.IsDontEnum()) continue;
- Handle<Object> property;
- if (details.type() == DATA && *map == object->map()) {
- FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
- Isolate* isolate = object->GetIsolate();
- if (object->IsUnboxedDoubleField(field_index)) {
- double value = object->RawFastDoublePropertyAt(field_index);
- property = isolate->factory()->NewHeapNumber(value);
-
- } else {
- property = handle(object->RawFastPropertyAt(field_index), isolate);
- }
- } else {
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate_, property,
- Object::GetPropertyOrElement(object, key),
- EXCEPTION);
- }
- Result result = SerializeProperty(property, comma, key);
- if (!comma && result == SUCCESS) comma = true;
- if (result == EXCEPTION) return result;
- }
- } else {
- Handle<FixedArray> contents;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate_, contents,
- JSReceiver::GetKeys(object, OWN_ONLY, ENUMERABLE_STRINGS), EXCEPTION);
-
- for (int i = 0; i < contents->length(); i++) {
- Object* key = contents->get(i);
- Handle<String> key_handle;
- MaybeHandle<Object> maybe_property;
- if (key->IsString()) {
- key_handle = Handle<String>(String::cast(key), isolate_);
- maybe_property = Object::GetPropertyOrElement(object, key_handle);
- } else {
- DCHECK(key->IsNumber());
- key_handle = factory()->NumberToString(Handle<Object>(key, isolate_));
- if (key->IsSmi()) {
- maybe_property =
- JSReceiver::GetElement(isolate_, object, Smi::cast(key)->value());
- } else {
- maybe_property = Object::GetPropertyOrElement(object, key_handle);
- }
- }
- Handle<Object> property;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate_, property, maybe_property, EXCEPTION);
- Result result = SerializeProperty(property, comma, key_handle);
- if (!comma && result == SUCCESS) comma = true;
- if (result == EXCEPTION) return result;
- }
- }
-
- builder_.AppendCharacter('}');
- StackPop();
- return SUCCESS;
-}
-
-
-template <typename SrcChar, typename DestChar>
-void BasicJsonStringifier::SerializeStringUnchecked_(
- Vector<const SrcChar> src,
- IncrementalStringBuilder::NoExtend<DestChar>* dest) {
- // Assert that uc16 character is not truncated down to 8 bit.
- // The <uc16, char> version of this method must not be called.
- DCHECK(sizeof(DestChar) >= sizeof(SrcChar));
-
- for (int i = 0; i < src.length(); i++) {
- SrcChar c = src[i];
- if (DoNotEscape(c)) {
- dest->Append(c);
- } else {
- dest->AppendCString(&JsonEscapeTable[c * kJsonEscapeTableEntrySize]);
- }
- }
-}
-
-
-template <typename SrcChar, typename DestChar>
-void BasicJsonStringifier::SerializeString_(Handle<String> string) {
- int length = string->length();
- builder_.Append<uint8_t, DestChar>('"');
- // We make a rough estimate to find out if the current string can be
- // serialized without allocating a new string part. The worst case length of
- // an escaped character is 6. Shifting the remainin string length right by 3
- // is a more pessimistic estimate, but faster to calculate.
- int worst_case_length = length << 3;
- if (builder_.CurrentPartCanFit(worst_case_length)) {
- DisallowHeapAllocation no_gc;
- Vector<const SrcChar> vector = string->GetCharVector<SrcChar>();
- IncrementalStringBuilder::NoExtendBuilder<DestChar> no_extend(
- &builder_, worst_case_length);
- SerializeStringUnchecked_(vector, &no_extend);
- } else {
- FlatStringReader reader(isolate_, string);
- for (int i = 0; i < reader.length(); i++) {
- SrcChar c = reader.Get<SrcChar>(i);
- if (DoNotEscape(c)) {
- builder_.Append<SrcChar, DestChar>(c);
- } else {
- builder_.AppendCString(&JsonEscapeTable[c * kJsonEscapeTableEntrySize]);
- }
- }
- }
-
- builder_.Append<uint8_t, DestChar>('"');
-}
-
-
-template <>
-bool BasicJsonStringifier::DoNotEscape(uint8_t c) {
- return c >= '#' && c <= '~' && c != '\\';
-}
-
-
-template <>
-bool BasicJsonStringifier::DoNotEscape(uint16_t c) {
- return c >= '#' && c != '\\' && c != 0x7f;
-}
-
-
-void BasicJsonStringifier::SerializeString(Handle<String> object) {
- object = String::Flatten(object);
- if (builder_.CurrentEncoding() == String::ONE_BYTE_ENCODING) {
- if (object->IsOneByteRepresentationUnderneath()) {
- SerializeString_<uint8_t, uint8_t>(object);
- } else {
- builder_.ChangeEncoding();
- SerializeString(object);
- }
- } else {
- if (object->IsOneByteRepresentationUnderneath()) {
- SerializeString_<uint8_t, uc16>(object);
- } else {
- SerializeString_<uc16, uc16>(object);
- }
- }
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/keys.cc b/deps/v8/src/keys.cc
index 8645681692..c6e31e3f23 100644
--- a/deps/v8/src/keys.cc
+++ b/deps/v8/src/keys.cc
@@ -4,8 +4,10 @@
#include "src/keys.h"
+#include "src/api-arguments.h"
#include "src/elements.h"
#include "src/factory.h"
+#include "src/identity-map.h"
#include "src/isolate-inl.h"
#include "src/objects-inl.h"
#include "src/property-descriptor.h"
@@ -15,187 +17,72 @@ namespace v8 {
namespace internal {
KeyAccumulator::~KeyAccumulator() {
- for (size_t i = 0; i < elements_.size(); i++) {
- delete elements_[i];
- }
-}
-
-Handle<FixedArray> KeyAccumulator::GetKeys(GetKeysConversion convert) {
- if (length_ == 0) {
- return isolate_->factory()->empty_fixed_array();
- }
- // Make sure we have all the lengths collected.
- NextPrototype();
-
- if (type_ == OWN_ONLY && !ownProxyKeys_.is_null()) {
- return ownProxyKeys_;
- }
- // Assemble the result array by first adding the element keys and then the
- // property keys. We use the total number of String + Symbol keys per level in
- // |level_lengths_| and the available element keys in the corresponding bucket
- // in |elements_| to deduce the number of keys to take from the
- // |string_properties_| and |symbol_properties_| set.
- Handle<FixedArray> result = isolate_->factory()->NewFixedArray(length_);
- int insertion_index = 0;
- int string_properties_index = 0;
- int symbol_properties_index = 0;
- // String and Symbol lengths always come in pairs:
- size_t max_level = level_lengths_.size() / 2;
- for (size_t level = 0; level < max_level; level++) {
- int num_string_properties = level_lengths_[level * 2];
- int num_symbol_properties = level_lengths_[level * 2 + 1];
- int num_elements = 0;
- if (num_string_properties < 0) {
- // If the |num_string_properties| is negative, the current level contains
- // properties from a proxy, hence we skip the integer keys in |elements_|
- // since proxies define the complete ordering.
- num_string_properties = -num_string_properties;
- } else if (level < elements_.size()) {
- // Add the element indices for this prototype level.
- std::vector<uint32_t>* elements = elements_[level];
- num_elements = static_cast<int>(elements->size());
- for (int i = 0; i < num_elements; i++) {
- Handle<Object> key;
- if (convert == KEEP_NUMBERS) {
- key = isolate_->factory()->NewNumberFromUint(elements->at(i));
- } else {
- key = isolate_->factory()->Uint32ToString(elements->at(i));
- }
- result->set(insertion_index, *key);
- insertion_index++;
- }
- }
- // Add the string property keys for this prototype level.
- for (int i = 0; i < num_string_properties; i++) {
- Object* key = string_properties_->KeyAt(string_properties_index);
- result->set(insertion_index, key);
- insertion_index++;
- string_properties_index++;
- }
- // Add the symbol property keys for this prototype level.
- for (int i = 0; i < num_symbol_properties; i++) {
- Object* key = symbol_properties_->KeyAt(symbol_properties_index);
- result->set(insertion_index, key);
- insertion_index++;
- symbol_properties_index++;
- }
- if (FLAG_trace_for_in_enumerate) {
- PrintF("| strings=%d symbols=%d elements=%i ", num_string_properties,
- num_symbol_properties, num_elements);
- }
- }
- if (FLAG_trace_for_in_enumerate) {
- PrintF("|| prototypes=%zu ||\n", max_level);
- }
-
- DCHECK_EQ(insertion_index, length_);
- return result;
}
namespace {
-bool AccumulatorHasKey(std::vector<uint32_t>* sub_elements, uint32_t key) {
- return std::binary_search(sub_elements->begin(), sub_elements->end(), key);
+static bool ContainsOnlyValidKeys(Handle<FixedArray> array) {
+ int len = array->length();
+ for (int i = 0; i < len; i++) {
+ Object* e = array->get(i);
+ if (!(e->IsName() || e->IsNumber())) return false;
+ }
+ return true;
}
} // namespace
-bool KeyAccumulator::AddKey(Object* key, AddKeyConversion convert) {
- return AddKey(handle(key, isolate_), convert);
+// static
+MaybeHandle<FixedArray> KeyAccumulator::GetKeys(
+ Handle<JSReceiver> object, KeyCollectionMode mode, PropertyFilter filter,
+ GetKeysConversion keys_conversion, bool is_for_in) {
+ Isolate* isolate = object->GetIsolate();
+ FastKeyAccumulator accumulator(isolate, object, mode, filter);
+ accumulator.set_is_for_in(is_for_in);
+ return accumulator.GetKeys(keys_conversion);
}
-bool KeyAccumulator::AddKey(Handle<Object> key, AddKeyConversion convert) {
- if (key->IsSymbol()) {
- if (filter_ & SKIP_SYMBOLS) return false;
- if (Handle<Symbol>::cast(key)->is_private()) return false;
- return AddSymbolKey(key);
- }
- if (filter_ & SKIP_STRINGS) return false;
- // Make sure we do not add keys to a proxy-level (see AddKeysFromProxy).
- DCHECK_LE(0, level_string_length_);
- // In some cases (e.g. proxies) we might get in String-converted ints which
- // should be added to the elements list instead of the properties. For
- // proxies we have to convert as well but also respect the original order.
- // Therefore we add a converted key to both sides
- if (convert == CONVERT_TO_ARRAY_INDEX || convert == PROXY_MAGIC) {
- uint32_t index = 0;
- int prev_length = length_;
- int prev_proto = level_string_length_;
- if ((key->IsString() && Handle<String>::cast(key)->AsArrayIndex(&index)) ||
- key->ToArrayIndex(&index)) {
- bool key_was_added = AddIntegerKey(index);
- if (convert == CONVERT_TO_ARRAY_INDEX) return key_was_added;
- if (convert == PROXY_MAGIC) {
- // If we had an array index (number) and it wasn't added, the key
- // already existed before, hence we cannot add it to the properties
- // keys as it would lead to duplicate entries.
- if (!key_was_added) {
- return false;
- }
- length_ = prev_length;
- level_string_length_ = prev_proto;
- }
- }
+Handle<FixedArray> KeyAccumulator::GetKeys(GetKeysConversion convert) {
+ if (keys_.is_null()) {
+ return isolate_->factory()->empty_fixed_array();
}
- return AddStringKey(key, convert);
-}
-
-bool KeyAccumulator::AddKey(uint32_t key) { return AddIntegerKey(key); }
-
-bool KeyAccumulator::AddIntegerKey(uint32_t key) {
- // Make sure we do not add keys to a proxy-level (see AddKeysFromProxy).
- // We mark proxy-levels with a negative length
- DCHECK_LE(0, level_string_length_);
- // Binary search over all but the last level. The last one might not be
- // sorted yet.
- for (size_t i = 1; i < elements_.size(); i++) {
- if (AccumulatorHasKey(elements_[i - 1], key)) return false;
+ if (mode_ == KeyCollectionMode::kOwnOnly &&
+ keys_->map() == isolate_->heap()->fixed_array_map()) {
+ return Handle<FixedArray>::cast(keys_);
}
- elements_.back()->push_back(key);
- length_++;
- return true;
+ USE(ContainsOnlyValidKeys);
+ Handle<FixedArray> result =
+ OrderedHashSet::ConvertToKeysArray(keys(), convert);
+ DCHECK(ContainsOnlyValidKeys(result));
+ return result;
}
-bool KeyAccumulator::AddStringKey(Handle<Object> key,
- AddKeyConversion convert) {
- if (string_properties_.is_null()) {
- string_properties_ = OrderedHashSet::Allocate(isolate_, 16);
- }
- // TODO(cbruni): remove this conversion once we throw the correct TypeError
- // for non-string/symbol elements returned by proxies
- if (convert == PROXY_MAGIC && key->IsNumber()) {
- key = isolate_->factory()->NumberToString(key);
- }
- int prev_size = string_properties_->NumberOfElements();
- string_properties_ = OrderedHashSet::Add(string_properties_, key);
- if (prev_size < string_properties_->NumberOfElements()) {
- length_++;
- level_string_length_++;
- return true;
- } else {
- return false;
- }
+void KeyAccumulator::AddKey(Object* key, AddKeyConversion convert) {
+ AddKey(handle(key, isolate_), convert);
}
-bool KeyAccumulator::AddSymbolKey(Handle<Object> key) {
- if (symbol_properties_.is_null()) {
- symbol_properties_ = OrderedHashSet::Allocate(isolate_, 16);
+void KeyAccumulator::AddKey(Handle<Object> key, AddKeyConversion convert) {
+ if (key->IsSymbol()) {
+ if (filter_ & SKIP_SYMBOLS) return;
+ if (Handle<Symbol>::cast(key)->is_private()) return;
+ } else if (filter_ & SKIP_STRINGS) {
+ return;
}
- int prev_size = symbol_properties_->NumberOfElements();
- symbol_properties_ = OrderedHashSet::Add(symbol_properties_, key);
- if (prev_size < symbol_properties_->NumberOfElements()) {
- length_++;
- level_symbol_length_++;
- return true;
- } else {
- return false;
+ if (IsShadowed(key)) return;
+ if (keys_.is_null()) {
+ keys_ = OrderedHashSet::Allocate(isolate_, 16);
}
+ uint32_t index;
+ if (convert == CONVERT_TO_ARRAY_INDEX && key->IsString() &&
+ Handle<String>::cast(key)->AsArrayIndex(&index)) {
+ key = isolate_->factory()->NewNumberFromUint(index);
+ }
+ keys_ = OrderedHashSet::Add(keys(), key);
}
void KeyAccumulator::AddKeys(Handle<FixedArray> array,
AddKeyConversion convert) {
int add_length = array->length();
- if (add_length == 0) return;
for (int i = 0; i < add_length; i++) {
Handle<Object> current(array->get(i), isolate_);
AddKey(current, convert);
@@ -209,24 +96,15 @@ void KeyAccumulator::AddKeys(Handle<JSObject> array_like,
accessor->AddElementsToKeyAccumulator(array_like, this, convert);
}
-void KeyAccumulator::AddKeysFromProxy(Handle<JSObject> array_like) {
- // Proxies define a complete list of keys with no distinction of
- // elements and properties, which breaks the normal assumption for the
- // KeyAccumulator.
- AddKeys(array_like, PROXY_MAGIC);
- // Invert the current length to indicate a present proxy, so we can ignore
- // element keys for this level. Otherwise we would not fully respect the order
- // given by the proxy.
- level_string_length_ = -level_string_length_;
-}
-
-MaybeHandle<FixedArray> FilterProxyKeys(Isolate* isolate, Handle<JSProxy> owner,
+MaybeHandle<FixedArray> FilterProxyKeys(KeyAccumulator* accumulator,
+ Handle<JSProxy> owner,
Handle<FixedArray> keys,
PropertyFilter filter) {
if (filter == ALL_PROPERTIES) {
// Nothing to do.
return keys;
}
+ Isolate* isolate = accumulator->isolate();
int store_position = 0;
for (int i = 0; i < keys->length(); ++i) {
Handle<Name> key(Name::cast(keys->get(i)), isolate);
@@ -236,7 +114,11 @@ MaybeHandle<FixedArray> FilterProxyKeys(Isolate* isolate, Handle<JSProxy> owner,
Maybe<bool> found =
JSProxy::GetOwnPropertyDescriptor(isolate, owner, key, &desc);
MAYBE_RETURN(found, MaybeHandle<FixedArray>());
- if (!found.FromJust() || !desc.enumerable()) continue; // Skip this key.
+ if (!found.FromJust()) continue;
+ if (!desc.enumerable()) {
+ accumulator->AddShadowingKey(key);
+ continue;
+ }
}
// Keep this key.
if (store_position != i) {
@@ -250,66 +132,83 @@ MaybeHandle<FixedArray> FilterProxyKeys(Isolate* isolate, Handle<JSProxy> owner,
}
// Returns "nothing" in case of exception, "true" on success.
-Maybe<bool> KeyAccumulator::AddKeysFromProxy(Handle<JSProxy> proxy,
- Handle<FixedArray> keys) {
- if (filter_proxy_keys_) {
+Maybe<bool> KeyAccumulator::AddKeysFromJSProxy(Handle<JSProxy> proxy,
+ Handle<FixedArray> keys) {
+ // Postpone the enumerable check for for-in to the ForInFilter step.
+ if (!is_for_in_) {
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate_, keys, FilterProxyKeys(isolate_, proxy, keys, filter_),
+ isolate_, keys, FilterProxyKeys(this, proxy, keys, filter_),
Nothing<bool>());
+ if (mode_ == KeyCollectionMode::kOwnOnly) {
+ // If we collect only the keys from a JSProxy do not sort or deduplicate.
+ keys_ = keys;
+ return Just(true);
+ }
}
- // Proxies define a complete list of keys with no distinction of
- // elements and properties, which breaks the normal assumption for the
- // KeyAccumulator.
- if (type_ == OWN_ONLY) {
- ownProxyKeys_ = keys;
- level_string_length_ = keys->length();
- length_ = level_string_length_;
- } else {
- AddKeys(keys, PROXY_MAGIC);
- }
- // Invert the current length to indicate a present proxy, so we can ignore
- // element keys for this level. Otherwise we would not fully respect the order
- // given by the proxy.
- level_string_length_ = -level_string_length_;
+ AddKeys(keys, is_for_in_ ? CONVERT_TO_ARRAY_INDEX : DO_NOT_CONVERT);
return Just(true);
}
-void KeyAccumulator::AddElementKeysFromInterceptor(
- Handle<JSObject> array_like) {
- AddKeys(array_like, CONVERT_TO_ARRAY_INDEX);
- // The interceptor might introduce duplicates for the current level, since
- // these keys get added after the objects's normal element keys.
- SortCurrentElementsListRemoveDuplicates();
-}
+Maybe<bool> KeyAccumulator::CollectKeys(Handle<JSReceiver> receiver,
+ Handle<JSReceiver> object) {
+ // Proxies have no hidden prototype and we should not trigger the
+ // [[GetPrototypeOf]] trap on the last iteration when using
+ // AdvanceFollowingProxies.
+ if (mode_ == KeyCollectionMode::kOwnOnly && object->IsJSProxy()) {
+ MAYBE_RETURN(CollectOwnJSProxyKeys(receiver, Handle<JSProxy>::cast(object)),
+ Nothing<bool>());
+ return Just(true);
+ }
-void KeyAccumulator::SortCurrentElementsListRemoveDuplicates() {
- // Sort and remove duplicates from the current elements level and adjust.
- // the lengths accordingly.
- auto last_level = elements_.back();
- size_t nof_removed_keys = last_level->size();
- std::sort(last_level->begin(), last_level->end());
- last_level->erase(std::unique(last_level->begin(), last_level->end()),
- last_level->end());
- // Adjust total length by the number of removed duplicates.
- nof_removed_keys -= last_level->size();
- length_ -= static_cast<int>(nof_removed_keys);
+ PrototypeIterator::WhereToEnd end = mode_ == KeyCollectionMode::kOwnOnly
+ ? PrototypeIterator::END_AT_NON_HIDDEN
+ : PrototypeIterator::END_AT_NULL;
+ for (PrototypeIterator iter(isolate_, object, kStartAtReceiver, end);
+ !iter.IsAtEnd();) {
+ // Start the shadow checks only after the first prototype has added
+ // shadowing keys.
+ if (HasShadowingKeys()) skip_shadow_check_ = false;
+ Handle<JSReceiver> current =
+ PrototypeIterator::GetCurrent<JSReceiver>(iter);
+ Maybe<bool> result = Just(false); // Dummy initialization.
+ if (current->IsJSProxy()) {
+ result = CollectOwnJSProxyKeys(receiver, Handle<JSProxy>::cast(current));
+ } else {
+ DCHECK(current->IsJSObject());
+ result = CollectOwnKeys(receiver, Handle<JSObject>::cast(current));
+ }
+ MAYBE_RETURN(result, Nothing<bool>());
+ if (!result.FromJust()) break; // |false| means "stop iterating".
+ // Iterate through proxies but ignore access checks for the ALL_CAN_READ
+ // case on API objects for OWN_ONLY keys handled in CollectOwnKeys.
+ if (!iter.AdvanceFollowingProxiesIgnoringAccessChecks()) {
+ return Nothing<bool>();
+ }
+ if (!last_non_empty_prototype_.is_null() &&
+ *last_non_empty_prototype_ == *current) {
+ break;
+ }
+ }
+ return Just(true);
}
-void KeyAccumulator::SortCurrentElementsList() {
- if (elements_.empty()) return;
- auto element_keys = elements_.back();
- std::sort(element_keys->begin(), element_keys->end());
+bool KeyAccumulator::HasShadowingKeys() { return !shadowing_keys_.is_null(); }
+
+bool KeyAccumulator::IsShadowed(Handle<Object> key) {
+ if (!HasShadowingKeys() || skip_shadow_check_) return false;
+ return shadowing_keys_->Has(isolate_, key);
}
-void KeyAccumulator::NextPrototype() {
- // Store the protoLength on the first call of this method.
- if (!elements_.empty()) {
- level_lengths_.push_back(level_string_length_);
- level_lengths_.push_back(level_symbol_length_);
+void KeyAccumulator::AddShadowingKey(Object* key) {
+ if (mode_ == KeyCollectionMode::kOwnOnly) return;
+ AddShadowingKey(handle(key, isolate_));
+}
+void KeyAccumulator::AddShadowingKey(Handle<Object> key) {
+ if (mode_ == KeyCollectionMode::kOwnOnly) return;
+ if (shadowing_keys_.is_null()) {
+ shadowing_keys_ = ObjectHashSet::New(isolate_, 16);
}
- elements_.push_back(new std::vector<uint32_t>());
- level_string_length_ = 0;
- level_symbol_length_ = 0;
+ shadowing_keys_ = ObjectHashSet::Add(shadowing_keys_, key);
}
namespace {
@@ -341,47 +240,131 @@ bool CheckAndInitalizeSimpleEnumCache(JSReceiver* object) {
void FastKeyAccumulator::Prepare() {
DisallowHeapAllocation no_gc;
// Directly go for the fast path for OWN_ONLY keys.
- if (type_ == OWN_ONLY) return;
+ if (mode_ == KeyCollectionMode::kOwnOnly) return;
// Fully walk the prototype chain and find the last prototype with keys.
is_receiver_simple_enum_ = false;
has_empty_prototype_ = true;
- JSReceiver* first_non_empty_prototype;
+ JSReceiver* last_prototype = nullptr;
for (PrototypeIterator iter(isolate_, *receiver_); !iter.IsAtEnd();
iter.Advance()) {
JSReceiver* current = iter.GetCurrent<JSReceiver>();
- if (CheckAndInitalizeSimpleEnumCache(current)) continue;
+ bool has_no_properties = CheckAndInitalizeSimpleEnumCache(current);
+ if (has_no_properties) continue;
+ last_prototype = current;
has_empty_prototype_ = false;
- first_non_empty_prototype = current;
- // TODO(cbruni): use the first non-empty prototype.
- USE(first_non_empty_prototype);
- return;
}
- DCHECK(has_empty_prototype_);
- is_receiver_simple_enum_ =
- receiver_->map()->EnumLength() != kInvalidEnumCacheSentinel &&
- !JSObject::cast(*receiver_)->HasEnumerableElements();
+ if (has_empty_prototype_) {
+ is_receiver_simple_enum_ =
+ receiver_->map()->EnumLength() != kInvalidEnumCacheSentinel &&
+ !JSObject::cast(*receiver_)->HasEnumerableElements();
+ } else if (last_prototype != nullptr) {
+ last_non_empty_prototype_ = handle(last_prototype, isolate_);
+ }
}
namespace {
+static Handle<FixedArray> ReduceFixedArrayTo(Isolate* isolate,
+ Handle<FixedArray> array,
+ int length) {
+ DCHECK_LE(length, array->length());
+ if (array->length() == length) return array;
+ return isolate->factory()->CopyFixedArrayUpTo(array, length);
+}
+
+Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
+ Handle<JSObject> object) {
+ Handle<Map> map(object->map());
+ bool cache_enum_length = map->OnlyHasSimpleProperties();
+
+ Handle<DescriptorArray> descs =
+ Handle<DescriptorArray>(map->instance_descriptors(), isolate);
+ int own_property_count = map->EnumLength();
+ // If the enum length of the given map is set to kInvalidEnumCache, this
+ // means that the map itself has never used the present enum cache. The
+ // first step to using the cache is to set the enum length of the map by
+ // counting the number of own descriptors that are ENUMERABLE_STRINGS.
+ if (own_property_count == kInvalidEnumCacheSentinel) {
+ own_property_count =
+ map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS);
+ } else {
+ DCHECK(
+ own_property_count ==
+ map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS));
+ }
+
+ if (descs->HasEnumCache()) {
+ Handle<FixedArray> keys(descs->GetEnumCache(), isolate);
+ // In case the number of properties required in the enum are actually
+ // present, we can reuse the enum cache. Otherwise, this means that the
+ // enum cache was generated for a previous (smaller) version of the
+ // Descriptor Array. In that case we regenerate the enum cache.
+ if (own_property_count <= keys->length()) {
+ isolate->counters()->enum_cache_hits()->Increment();
+ if (cache_enum_length) map->SetEnumLength(own_property_count);
+ return ReduceFixedArrayTo(isolate, keys, own_property_count);
+ }
+ }
+
+ if (descs->IsEmpty()) {
+ isolate->counters()->enum_cache_hits()->Increment();
+ if (cache_enum_length) map->SetEnumLength(0);
+ return isolate->factory()->empty_fixed_array();
+ }
+
+ isolate->counters()->enum_cache_misses()->Increment();
+
+ Handle<FixedArray> storage =
+ isolate->factory()->NewFixedArray(own_property_count);
+ Handle<FixedArray> indices =
+ isolate->factory()->NewFixedArray(own_property_count);
+
+ int size = map->NumberOfOwnDescriptors();
+ int index = 0;
+
+ for (int i = 0; i < size; i++) {
+ PropertyDetails details = descs->GetDetails(i);
+ if (details.IsDontEnum()) continue;
+ Object* key = descs->GetKey(i);
+ if (key->IsSymbol()) continue;
+ storage->set(index, key);
+ if (!indices.is_null()) {
+ if (details.type() != DATA) {
+ indices = Handle<FixedArray>();
+ } else {
+ FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
+ int load_by_field_index = field_index.GetLoadByFieldIndex();
+ indices->set(index, Smi::FromInt(load_by_field_index));
+ }
+ }
+ index++;
+ }
+ DCHECK(index == storage->length());
+
+ DescriptorArray::SetEnumCache(descs, isolate, storage, indices);
+ if (cache_enum_length) {
+ map->SetEnumLength(own_property_count);
+ }
+ return storage;
+}
template <bool fast_properties>
-Handle<FixedArray> GetOwnKeysWithElements(Isolate* isolate,
- Handle<JSObject> object,
- GetKeysConversion convert) {
+MaybeHandle<FixedArray> GetOwnKeysWithElements(Isolate* isolate,
+ Handle<JSObject> object,
+ GetKeysConversion convert) {
Handle<FixedArray> keys;
ElementsAccessor* accessor = object->GetElementsAccessor();
if (fast_properties) {
- keys = JSObject::GetFastEnumPropertyKeys(isolate, object);
+ keys = GetFastEnumPropertyKeys(isolate, object);
} else {
// TODO(cbruni): preallocate big enough array to also hold elements.
- keys = JSObject::GetEnumPropertyKeys(object);
+ keys = KeyAccumulator::GetOwnEnumPropertyKeys(isolate, object);
}
- Handle<FixedArray> result =
+ MaybeHandle<FixedArray> result =
accessor->PrependElementIndices(object, keys, convert, ONLY_ENUMERABLE);
if (FLAG_trace_for_in_enumerate) {
PrintF("| strings=%d symbols=0 elements=%u || prototypes>=1 ||\n",
- keys->length(), result->length() - keys->length());
+ keys->length(), result.ToHandleChecked()->length() - keys->length());
}
return result;
}
@@ -402,27 +385,31 @@ MaybeHandle<FixedArray> GetOwnKeysWithUninitializedEnumCache(
}
// We have no elements but possibly enumerable property keys, hence we can
// directly initialize the enum cache.
- return JSObject::GetFastEnumPropertyKeys(isolate, object);
+ return GetFastEnumPropertyKeys(isolate, object);
}
bool OnlyHasSimpleProperties(Map* map) {
- return map->instance_type() > LAST_CUSTOM_ELEMENTS_RECEIVER &&
- map->instance_type() != JS_GLOBAL_PROXY_TYPE;
+ return map->instance_type() > LAST_CUSTOM_ELEMENTS_RECEIVER;
}
} // namespace
-MaybeHandle<FixedArray> FastKeyAccumulator::GetKeys(GetKeysConversion convert) {
- Handle<FixedArray> keys;
- if (GetKeysFast(convert).ToHandle(&keys)) {
- return keys;
+MaybeHandle<FixedArray> FastKeyAccumulator::GetKeys(
+ GetKeysConversion keys_conversion) {
+ if (filter_ == ENUMERABLE_STRINGS) {
+ Handle<FixedArray> keys;
+ if (GetKeysFast(keys_conversion).ToHandle(&keys)) {
+ return keys;
+ }
+ if (isolate_->has_pending_exception()) return MaybeHandle<FixedArray>();
}
- return GetKeysSlow(convert);
+
+ return GetKeysSlow(keys_conversion);
}
MaybeHandle<FixedArray> FastKeyAccumulator::GetKeysFast(
- GetKeysConversion convert) {
- bool own_only = has_empty_prototype_ || type_ == OWN_ONLY;
+ GetKeysConversion keys_conversion) {
+ bool own_only = has_empty_prototype_ || mode_ == KeyCollectionMode::kOwnOnly;
Map* map = receiver_->map();
if (!own_only || !OnlyHasSimpleProperties(map)) {
return MaybeHandle<FixedArray>();
@@ -434,7 +421,7 @@ MaybeHandle<FixedArray> FastKeyAccumulator::GetKeysFast(
// Do not try to use the enum-cache for dict-mode objects.
if (map->is_dictionary_map()) {
- return GetOwnKeysWithElements<false>(isolate_, object, convert);
+ return GetOwnKeysWithElements<false>(isolate_, object, keys_conversion);
}
int enum_length = receiver_->map()->EnumLength();
if (enum_length == kInvalidEnumCacheSentinel) {
@@ -453,13 +440,431 @@ MaybeHandle<FixedArray> FastKeyAccumulator::GetKeysFast(
}
// The properties-only case failed because there were probably elements on the
// receiver.
- return GetOwnKeysWithElements<true>(isolate_, object, convert);
+ return GetOwnKeysWithElements<true>(isolate_, object, keys_conversion);
}
MaybeHandle<FixedArray> FastKeyAccumulator::GetKeysSlow(
- GetKeysConversion convert) {
- return JSReceiver::GetKeys(receiver_, type_, ENUMERABLE_STRINGS, KEEP_NUMBERS,
- filter_proxy_keys_);
+ GetKeysConversion keys_conversion) {
+ KeyAccumulator accumulator(isolate_, mode_, filter_);
+ accumulator.set_is_for_in(is_for_in_);
+ accumulator.set_last_non_empty_prototype(last_non_empty_prototype_);
+
+ MAYBE_RETURN(accumulator.CollectKeys(receiver_, receiver_),
+ MaybeHandle<FixedArray>());
+ return accumulator.GetKeys(keys_conversion);
+}
+
+namespace {
+
+enum IndexedOrNamed { kIndexed, kNamed };
+
+// Returns |true| on success, |nothing| on exception.
+template <class Callback, IndexedOrNamed type>
+Maybe<bool> CollectInterceptorKeysInternal(Handle<JSReceiver> receiver,
+ Handle<JSObject> object,
+ Handle<InterceptorInfo> interceptor,
+ KeyAccumulator* accumulator) {
+ Isolate* isolate = accumulator->isolate();
+ PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+ *object, Object::DONT_THROW);
+ Handle<JSObject> result;
+ if (!interceptor->enumerator()->IsUndefined(isolate)) {
+ Callback enum_fun = v8::ToCData<Callback>(interceptor->enumerator());
+ const char* log_tag = type == kIndexed ? "interceptor-indexed-enum"
+ : "interceptor-named-enum";
+ LOG(isolate, ApiObjectAccess(log_tag, *object));
+ result = args.Call(enum_fun);
+ }
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ if (result.is_null()) return Just(true);
+ accumulator->AddKeys(
+ result, type == kIndexed ? CONVERT_TO_ARRAY_INDEX : DO_NOT_CONVERT);
+ return Just(true);
+}
+
+template <class Callback, IndexedOrNamed type>
+Maybe<bool> CollectInterceptorKeys(Handle<JSReceiver> receiver,
+ Handle<JSObject> object,
+ KeyAccumulator* accumulator) {
+ Isolate* isolate = accumulator->isolate();
+ if (type == kIndexed) {
+ if (!object->HasIndexedInterceptor()) return Just(true);
+ } else {
+ if (!object->HasNamedInterceptor()) return Just(true);
+ }
+ Handle<InterceptorInfo> interceptor(type == kIndexed
+ ? object->GetIndexedInterceptor()
+ : object->GetNamedInterceptor(),
+ isolate);
+ if ((accumulator->filter() & ONLY_ALL_CAN_READ) &&
+ !interceptor->all_can_read()) {
+ return Just(true);
+ }
+ return CollectInterceptorKeysInternal<Callback, type>(
+ receiver, object, interceptor, accumulator);
+}
+
+} // namespace
+
+Maybe<bool> KeyAccumulator::CollectOwnElementIndices(
+ Handle<JSReceiver> receiver, Handle<JSObject> object) {
+ if (filter_ & SKIP_STRINGS || skip_indices_) return Just(true);
+
+ ElementsAccessor* accessor = object->GetElementsAccessor();
+ accessor->CollectElementIndices(object, this);
+
+ return CollectInterceptorKeys<v8::IndexedPropertyEnumeratorCallback,
+ kIndexed>(receiver, object, this);
+}
+
+namespace {
+
+template <bool skip_symbols>
+int CollectOwnPropertyNamesInternal(Handle<JSObject> object,
+ KeyAccumulator* keys,
+ Handle<DescriptorArray> descs,
+ int start_index, int limit) {
+ int first_skipped = -1;
+ PropertyFilter filter = keys->filter();
+ KeyCollectionMode mode = keys->mode();
+ for (int i = start_index; i < limit; i++) {
+ bool is_shadowing_key = false;
+ PropertyDetails details = descs->GetDetails(i);
+
+ if ((details.attributes() & filter) != 0) {
+ if (mode == KeyCollectionMode::kIncludePrototypes) {
+ is_shadowing_key = true;
+ } else {
+ continue;
+ }
+ }
+
+ if (filter & ONLY_ALL_CAN_READ) {
+ if (details.kind() != kAccessor) continue;
+ Object* accessors = descs->GetValue(i);
+ if (!accessors->IsAccessorInfo()) continue;
+ if (!AccessorInfo::cast(accessors)->all_can_read()) continue;
+ }
+
+ Name* key = descs->GetKey(i);
+ if (skip_symbols == key->IsSymbol()) {
+ if (first_skipped == -1) first_skipped = i;
+ continue;
+ }
+ if (key->FilterKey(keys->filter())) continue;
+
+ if (is_shadowing_key) {
+ keys->AddShadowingKey(key);
+ } else {
+ keys->AddKey(key, DO_NOT_CONVERT);
+ }
+ }
+ return first_skipped;
+}
+
+template <class T>
+Handle<FixedArray> GetOwnEnumPropertyDictionaryKeys(Isolate* isolate,
+ KeyCollectionMode mode,
+ KeyAccumulator* accumulator,
+ Handle<JSObject> object,
+ T* raw_dictionary) {
+ Handle<T> dictionary(raw_dictionary, isolate);
+ int length = dictionary->NumberOfEnumElements();
+ if (length == 0) {
+ return isolate->factory()->empty_fixed_array();
+ }
+ Handle<FixedArray> storage = isolate->factory()->NewFixedArray(length);
+ T::CopyEnumKeysTo(dictionary, storage, mode, accumulator);
+ return storage;
+}
+} // namespace
+
+Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
+ Handle<JSObject> object) {
+ if (filter_ == ENUMERABLE_STRINGS) {
+ Handle<FixedArray> enum_keys;
+ if (object->HasFastProperties()) {
+ enum_keys = KeyAccumulator::GetOwnEnumPropertyKeys(isolate_, object);
+ // If the number of properties equals the length of enumerable properties
+ // we do not have to filter out non-enumerable ones
+ Map* map = object->map();
+ int nof_descriptors = map->NumberOfOwnDescriptors();
+ if (enum_keys->length() != nof_descriptors) {
+ Handle<DescriptorArray> descs =
+ Handle<DescriptorArray>(map->instance_descriptors(), isolate_);
+ for (int i = 0; i < nof_descriptors; i++) {
+ PropertyDetails details = descs->GetDetails(i);
+ if (!details.IsDontEnum()) continue;
+ Object* key = descs->GetKey(i);
+ this->AddShadowingKey(key);
+ }
+ }
+ } else if (object->IsJSGlobalObject()) {
+ enum_keys = GetOwnEnumPropertyDictionaryKeys(
+ isolate_, mode_, this, object, object->global_dictionary());
+ } else {
+ enum_keys = GetOwnEnumPropertyDictionaryKeys(
+ isolate_, mode_, this, object, object->property_dictionary());
+ }
+ AddKeys(enum_keys, DO_NOT_CONVERT);
+ } else {
+ if (object->HasFastProperties()) {
+ int limit = object->map()->NumberOfOwnDescriptors();
+ Handle<DescriptorArray> descs(object->map()->instance_descriptors(),
+ isolate_);
+ // First collect the strings,
+ int first_symbol =
+ CollectOwnPropertyNamesInternal<true>(object, this, descs, 0, limit);
+ // then the symbols.
+ if (first_symbol != -1) {
+ CollectOwnPropertyNamesInternal<false>(object, this, descs,
+ first_symbol, limit);
+ }
+ } else if (object->IsJSGlobalObject()) {
+ GlobalDictionary::CollectKeysTo(
+ handle(object->global_dictionary(), isolate_), this);
+ } else {
+ NameDictionary::CollectKeysTo(
+ handle(object->property_dictionary(), isolate_), this);
+ }
+ }
+ // Add the property keys from the interceptor.
+ return CollectInterceptorKeys<v8::GenericNamedPropertyEnumeratorCallback,
+ kNamed>(receiver, object, this);
+}
+
+Maybe<bool> KeyAccumulator::CollectAccessCheckInterceptorKeys(
+ Handle<AccessCheckInfo> access_check_info, Handle<JSReceiver> receiver,
+ Handle<JSObject> object) {
+ MAYBE_RETURN(
+ (CollectInterceptorKeysInternal<v8::IndexedPropertyEnumeratorCallback,
+ kIndexed>(
+ receiver, object,
+ handle(
+ InterceptorInfo::cast(access_check_info->indexed_interceptor()),
+ isolate_),
+ this)),
+ Nothing<bool>());
+ MAYBE_RETURN(
+ (CollectInterceptorKeysInternal<
+ v8::GenericNamedPropertyEnumeratorCallback, kNamed>(
+ receiver, object,
+ handle(InterceptorInfo::cast(access_check_info->named_interceptor()),
+ isolate_),
+ this)),
+ Nothing<bool>());
+ return Just(true);
+}
+
+// Returns |true| on success, |false| if prototype walking should be stopped,
+// |nothing| if an exception was thrown.
+Maybe<bool> KeyAccumulator::CollectOwnKeys(Handle<JSReceiver> receiver,
+ Handle<JSObject> object) {
+ // Check access rights if required.
+ if (object->IsAccessCheckNeeded() &&
+ !isolate_->MayAccess(handle(isolate_->context()), object)) {
+ // The cross-origin spec says that [[Enumerate]] shall return an empty
+ // iterator when it doesn't have access...
+ if (mode_ == KeyCollectionMode::kIncludePrototypes) {
+ return Just(false);
+ }
+ // ...whereas [[OwnPropertyKeys]] shall return whitelisted properties.
+ DCHECK(KeyCollectionMode::kOwnOnly == mode_);
+ Handle<AccessCheckInfo> access_check_info;
+ {
+ DisallowHeapAllocation no_gc;
+ AccessCheckInfo* maybe_info = AccessCheckInfo::Get(isolate_, object);
+ if (maybe_info) access_check_info = handle(maybe_info, isolate_);
+ }
+ // We always have both kinds of interceptors or none.
+ if (!access_check_info.is_null() &&
+ access_check_info->named_interceptor()) {
+ MAYBE_RETURN(CollectAccessCheckInterceptorKeys(access_check_info,
+ receiver, object),
+ Nothing<bool>());
+ return Just(false);
+ }
+ filter_ = static_cast<PropertyFilter>(filter_ | ONLY_ALL_CAN_READ);
+ }
+ MAYBE_RETURN(CollectOwnElementIndices(receiver, object), Nothing<bool>());
+ MAYBE_RETURN(CollectOwnPropertyNames(receiver, object), Nothing<bool>());
+ return Just(true);
+}
+
+// static
+Handle<FixedArray> KeyAccumulator::GetOwnEnumPropertyKeys(
+ Isolate* isolate, Handle<JSObject> object) {
+ if (object->HasFastProperties()) {
+ return GetFastEnumPropertyKeys(isolate, object);
+ } else if (object->IsJSGlobalObject()) {
+ return GetOwnEnumPropertyDictionaryKeys(
+ isolate, KeyCollectionMode::kOwnOnly, nullptr, object,
+ object->global_dictionary());
+ } else {
+ return GetOwnEnumPropertyDictionaryKeys(
+ isolate, KeyCollectionMode::kOwnOnly, nullptr, object,
+ object->property_dictionary());
+ }
+}
+
+// ES6 9.5.12
+// Returns |true| on success, |nothing| in case of exception.
+Maybe<bool> KeyAccumulator::CollectOwnJSProxyKeys(Handle<JSReceiver> receiver,
+ Handle<JSProxy> proxy) {
+ STACK_CHECK(isolate_, Nothing<bool>());
+ // 1. Let handler be the value of the [[ProxyHandler]] internal slot of O.
+ Handle<Object> handler(proxy->handler(), isolate_);
+ // 2. If handler is null, throw a TypeError exception.
+ // 3. Assert: Type(handler) is Object.
+ if (proxy->IsRevoked()) {
+ isolate_->Throw(*isolate_->factory()->NewTypeError(
+ MessageTemplate::kProxyRevoked, isolate_->factory()->ownKeys_string()));
+ return Nothing<bool>();
+ }
+ // 4. Let target be the value of the [[ProxyTarget]] internal slot of O.
+ Handle<JSReceiver> target(proxy->target(), isolate_);
+ // 5. Let trap be ? GetMethod(handler, "ownKeys").
+ Handle<Object> trap;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, trap, Object::GetMethod(Handle<JSReceiver>::cast(handler),
+ isolate_->factory()->ownKeys_string()),
+ Nothing<bool>());
+ // 6. If trap is undefined, then
+ if (trap->IsUndefined(isolate_)) {
+ // 6a. Return target.[[OwnPropertyKeys]]().
+ return CollectOwnJSProxyTargetKeys(proxy, target);
+ }
+ // 7. Let trapResultArray be Call(trap, handler, «target»).
+ Handle<Object> trap_result_array;
+ Handle<Object> args[] = {target};
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, trap_result_array,
+ Execution::Call(isolate_, trap, handler, arraysize(args), args),
+ Nothing<bool>());
+ // 8. Let trapResult be ? CreateListFromArrayLike(trapResultArray,
+ // «String, Symbol»).
+ Handle<FixedArray> trap_result;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, trap_result,
+ Object::CreateListFromArrayLike(isolate_, trap_result_array,
+ ElementTypes::kStringAndSymbol),
+ Nothing<bool>());
+ // 9. Let extensibleTarget be ? IsExtensible(target).
+ Maybe<bool> maybe_extensible = JSReceiver::IsExtensible(target);
+ MAYBE_RETURN(maybe_extensible, Nothing<bool>());
+ bool extensible_target = maybe_extensible.FromJust();
+ // 10. Let targetKeys be ? target.[[OwnPropertyKeys]]().
+ Handle<FixedArray> target_keys;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate_, target_keys,
+ JSReceiver::OwnPropertyKeys(target),
+ Nothing<bool>());
+ // 11. (Assert)
+ // 12. Let targetConfigurableKeys be an empty List.
+ // To save memory, we're re-using target_keys and will modify it in-place.
+ Handle<FixedArray> target_configurable_keys = target_keys;
+ // 13. Let targetNonconfigurableKeys be an empty List.
+ Handle<FixedArray> target_nonconfigurable_keys =
+ isolate_->factory()->NewFixedArray(target_keys->length());
+ int nonconfigurable_keys_length = 0;
+ // 14. Repeat, for each element key of targetKeys:
+ for (int i = 0; i < target_keys->length(); ++i) {
+ // 14a. Let desc be ? target.[[GetOwnProperty]](key).
+ PropertyDescriptor desc;
+ Maybe<bool> found = JSReceiver::GetOwnPropertyDescriptor(
+ isolate_, target, handle(target_keys->get(i), isolate_), &desc);
+ MAYBE_RETURN(found, Nothing<bool>());
+ // 14b. If desc is not undefined and desc.[[Configurable]] is false, then
+ if (found.FromJust() && !desc.configurable()) {
+ // 14b i. Append key as an element of targetNonconfigurableKeys.
+ target_nonconfigurable_keys->set(nonconfigurable_keys_length,
+ target_keys->get(i));
+ nonconfigurable_keys_length++;
+ // The key was moved, null it out in the original list.
+ target_keys->set(i, Smi::FromInt(0));
+ } else {
+ // 14c. Else,
+ // 14c i. Append key as an element of targetConfigurableKeys.
+ // (No-op, just keep it in |target_keys|.)
+ }
+ }
+ // 15. If extensibleTarget is true and targetNonconfigurableKeys is empty,
+ // then:
+ if (extensible_target && nonconfigurable_keys_length == 0) {
+ // 15a. Return trapResult.
+ return AddKeysFromJSProxy(proxy, trap_result);
+ }
+ // 16. Let uncheckedResultKeys be a new List which is a copy of trapResult.
+ Zone set_zone(isolate_->allocator());
+ const int kPresent = 1;
+ const int kGone = 0;
+ IdentityMap<int> unchecked_result_keys(isolate_->heap(), &set_zone);
+ int unchecked_result_keys_size = 0;
+ for (int i = 0; i < trap_result->length(); ++i) {
+ DCHECK(trap_result->get(i)->IsUniqueName());
+ Object* key = trap_result->get(i);
+ int* entry = unchecked_result_keys.Get(key);
+ if (*entry != kPresent) {
+ *entry = kPresent;
+ unchecked_result_keys_size++;
+ }
+ }
+ // 17. Repeat, for each key that is an element of targetNonconfigurableKeys:
+ for (int i = 0; i < nonconfigurable_keys_length; ++i) {
+ Object* key = target_nonconfigurable_keys->get(i);
+ // 17a. If key is not an element of uncheckedResultKeys, throw a
+ // TypeError exception.
+ int* found = unchecked_result_keys.Find(key);
+ if (found == nullptr || *found == kGone) {
+ isolate_->Throw(*isolate_->factory()->NewTypeError(
+ MessageTemplate::kProxyOwnKeysMissing, handle(key, isolate_)));
+ return Nothing<bool>();
+ }
+ // 17b. Remove key from uncheckedResultKeys.
+ *found = kGone;
+ unchecked_result_keys_size--;
+ }
+ // 18. If extensibleTarget is true, return trapResult.
+ if (extensible_target) {
+ return AddKeysFromJSProxy(proxy, trap_result);
+ }
+ // 19. Repeat, for each key that is an element of targetConfigurableKeys:
+ for (int i = 0; i < target_configurable_keys->length(); ++i) {
+ Object* key = target_configurable_keys->get(i);
+ if (key->IsSmi()) continue; // Zapped entry, was nonconfigurable.
+ // 19a. If key is not an element of uncheckedResultKeys, throw a
+ // TypeError exception.
+ int* found = unchecked_result_keys.Find(key);
+ if (found == nullptr || *found == kGone) {
+ isolate_->Throw(*isolate_->factory()->NewTypeError(
+ MessageTemplate::kProxyOwnKeysMissing, handle(key, isolate_)));
+ return Nothing<bool>();
+ }
+ // 19b. Remove key from uncheckedResultKeys.
+ *found = kGone;
+ unchecked_result_keys_size--;
+ }
+ // 20. If uncheckedResultKeys is not empty, throw a TypeError exception.
+ if (unchecked_result_keys_size != 0) {
+ DCHECK_GT(unchecked_result_keys_size, 0);
+ isolate_->Throw(*isolate_->factory()->NewTypeError(
+ MessageTemplate::kProxyOwnKeysNonExtensible));
+ return Nothing<bool>();
+ }
+ // 21. Return trapResult.
+ return AddKeysFromJSProxy(proxy, trap_result);
+}
+
+Maybe<bool> KeyAccumulator::CollectOwnJSProxyTargetKeys(
+ Handle<JSProxy> proxy, Handle<JSReceiver> target) {
+ // TODO(cbruni): avoid creating another KeyAccumulator
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, keys,
+ KeyAccumulator::GetKeys(target, KeyCollectionMode::kOwnOnly, filter_,
+ GetKeysConversion::kConvertToString, is_for_in_),
+ Nothing<bool>());
+ Maybe<bool> result = AddKeysFromJSProxy(proxy, keys);
+ return result;
}
} // namespace internal
diff --git a/deps/v8/src/keys.h b/deps/v8/src/keys.h
index 1fd3fc02b0..63b8b26ce2 100644
--- a/deps/v8/src/keys.h
+++ b/deps/v8/src/keys.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_KEY_ACCUMULATOR_H_
-#define V8_KEY_ACCUMULATOR_H_
+#ifndef V8_KEYS_H_
+#define V8_KEYS_H_
#include "src/isolate.h"
#include "src/objects.h"
@@ -11,7 +11,7 @@
namespace v8 {
namespace internal {
-enum AddKeyConversion { DO_NOT_CONVERT, CONVERT_TO_ARRAY_INDEX, PROXY_MAGIC };
+enum AddKeyConversion { DO_NOT_CONVERT, CONVERT_TO_ARRAY_INDEX };
// This is a helper class for JSReceiver::GetKeys which collects and sorts keys.
// GetKeys needs to sort keys per prototype level, first showing the integer
@@ -31,81 +31,109 @@ enum AddKeyConversion { DO_NOT_CONVERT, CONVERT_TO_ARRAY_INDEX, PROXY_MAGIC };
// are more compact and allow for reasonably fast includes check.
class KeyAccumulator final BASE_EMBEDDED {
public:
- KeyAccumulator(Isolate* isolate, KeyCollectionType type,
+ KeyAccumulator(Isolate* isolate, KeyCollectionMode mode,
PropertyFilter filter)
- : isolate_(isolate), type_(type), filter_(filter) {}
+ : isolate_(isolate), mode_(mode), filter_(filter) {}
~KeyAccumulator();
- bool AddKey(uint32_t key);
- bool AddKey(Object* key, AddKeyConversion convert);
- bool AddKey(Handle<Object> key, AddKeyConversion convert);
+ static MaybeHandle<FixedArray> GetKeys(
+ Handle<JSReceiver> object, KeyCollectionMode mode, PropertyFilter filter,
+ GetKeysConversion keys_conversion = GetKeysConversion::kKeepNumbers,
+ bool is_for_in = false);
+
+ Handle<FixedArray> GetKeys(
+ GetKeysConversion convert = GetKeysConversion::kKeepNumbers);
+ Maybe<bool> CollectKeys(Handle<JSReceiver> receiver,
+ Handle<JSReceiver> object);
+ Maybe<bool> CollectOwnElementIndices(Handle<JSReceiver> receiver,
+ Handle<JSObject> object);
+ Maybe<bool> CollectOwnPropertyNames(Handle<JSReceiver> receiver,
+ Handle<JSObject> object);
+ Maybe<bool> CollectAccessCheckInterceptorKeys(
+ Handle<AccessCheckInfo> access_check_info, Handle<JSReceiver> receiver,
+ Handle<JSObject> object);
+
+ static Handle<FixedArray> GetOwnEnumPropertyKeys(Isolate* isolate,
+ Handle<JSObject> object);
+
+ void AddKey(Object* key, AddKeyConversion convert = DO_NOT_CONVERT);
+ void AddKey(Handle<Object> key, AddKeyConversion convert = DO_NOT_CONVERT);
void AddKeys(Handle<FixedArray> array, AddKeyConversion convert);
- void AddKeys(Handle<JSObject> array, AddKeyConversion convert);
- void AddKeysFromProxy(Handle<JSObject> array);
- Maybe<bool> AddKeysFromProxy(Handle<JSProxy> proxy, Handle<FixedArray> keys);
- void AddElementKeysFromInterceptor(Handle<JSObject> array);
+ void AddKeys(Handle<JSObject> array_like, AddKeyConversion convert);
+
// Jump to the next level, pushing the current |levelLength_| to
// |levelLengths_| and adding a new list to |elements_|.
- void NextPrototype();
- // Sort the integer indices in the last list in |elements_|
- void SortCurrentElementsList();
- Handle<FixedArray> GetKeys(GetKeysConversion convert = KEEP_NUMBERS);
- int length() { return length_; }
Isolate* isolate() { return isolate_; }
- void set_filter_proxy_keys(bool filter) { filter_proxy_keys_ = filter; }
+ // Filter keys based on their property descriptors.
+ PropertyFilter filter() { return filter_; }
+ // The collection mode defines whether we collect the keys from the prototype
+ // chain or only look at the receiver.
+ KeyCollectionMode mode() { return mode_; }
+ // In case of for-in loops we have to treat JSProxy keys differently and
+ // deduplicate them. Additionally we convert JSProxy keys back to array
+ // indices.
+ void set_is_for_in(bool value) { is_for_in_ = value; }
+ void set_skip_indices(bool value) { skip_indices_ = value; }
+ // The last_non_empty_prototype is used to limit the prototypes for which
+ // we have to keep track of non-enumerable keys that can shadow keys
+ // repeated on the prototype chain.
+ void set_last_non_empty_prototype(Handle<JSReceiver> object) {
+ last_non_empty_prototype_ = object;
+ }
+ // Shadowing keys are used to filter keys. This happens when non-enumerable
+ // keys appear again on the prototype chain.
+ void AddShadowingKey(Object* key);
+ void AddShadowingKey(Handle<Object> key);
private:
- bool AddIntegerKey(uint32_t key);
- bool AddStringKey(Handle<Object> key, AddKeyConversion convert);
- bool AddSymbolKey(Handle<Object> array);
- void SortCurrentElementsListRemoveDuplicates();
+ Maybe<bool> CollectOwnKeys(Handle<JSReceiver> receiver,
+ Handle<JSObject> object);
+ Maybe<bool> CollectOwnJSProxyKeys(Handle<JSReceiver> receiver,
+ Handle<JSProxy> proxy);
+ Maybe<bool> CollectOwnJSProxyTargetKeys(Handle<JSProxy> proxy,
+ Handle<JSReceiver> target);
+ Maybe<bool> AddKeysFromJSProxy(Handle<JSProxy> proxy,
+ Handle<FixedArray> keys);
+ bool IsShadowed(Handle<Object> key);
+ bool HasShadowingKeys();
+ Handle<OrderedHashSet> keys() { return Handle<OrderedHashSet>::cast(keys_); }
Isolate* isolate_;
- KeyCollectionType type_;
+ // keys_ is either an Handle<OrderedHashSet> or in the case of own JSProxy
+ // keys a Handle<FixedArray>. The OrderedHashSet is in-place converted to the
+ // result list, a FixedArray containing all collected keys.
+ Handle<FixedArray> keys_;
+ Handle<JSReceiver> last_non_empty_prototype_;
+ Handle<ObjectHashSet> shadowing_keys_;
+ KeyCollectionMode mode_;
PropertyFilter filter_;
- bool filter_proxy_keys_ = true;
- // |elements_| contains the sorted element keys (indices) per level.
- std::vector<std::vector<uint32_t>*> elements_;
- // |protoLengths_| contains the total number of keys (elements + properties)
- // per level. Negative values mark counts for a level with keys from a proxy.
- std::vector<int> level_lengths_;
- // |string_properties_| contains the unique String property keys for all
- // levels in insertion order per level.
- Handle<OrderedHashSet> string_properties_;
- // |symbol_properties_| contains the unique Symbol property keys for all
- // levels in insertion order per level.
- Handle<OrderedHashSet> symbol_properties_;
- Handle<FixedArray> ownProxyKeys_;
- // |length_| keeps track of the total number of all element and property keys.
- int length_ = 0;
- // |levelLength_| keeps track of the number of String keys in the current
- // level.
- int level_string_length_ = 0;
- // |levelSymbolLength_| keeps track of the number of Symbol keys in the
- // current level.
- int level_symbol_length_ = 0;
+ bool is_for_in_ = false;
+ bool skip_indices_ = false;
+ // For all the keys on the first receiver adding a shadowing key we can skip
+ // the shadow check.
+ bool skip_shadow_check_ = true;
DISALLOW_COPY_AND_ASSIGN(KeyAccumulator);
};
// The FastKeyAccumulator handles the cases where there are no elements on the
// prototype chain and forwords the complex/slow cases to the normal
-// KeyAccumulator.
+// KeyAccumulator. This significantly speeds up the cases where the OWN_ONLY
+// case where we do not have to walk the prototype chain.
class FastKeyAccumulator {
public:
FastKeyAccumulator(Isolate* isolate, Handle<JSReceiver> receiver,
- KeyCollectionType type, PropertyFilter filter)
- : isolate_(isolate), receiver_(receiver), type_(type), filter_(filter) {
+ KeyCollectionMode mode, PropertyFilter filter)
+ : isolate_(isolate), receiver_(receiver), mode_(mode), filter_(filter) {
Prepare();
- // TODO(cbruni): pass filter_ directly to the KeyAccumulator.
- USE(filter_);
}
bool is_receiver_simple_enum() { return is_receiver_simple_enum_; }
bool has_empty_prototype() { return has_empty_prototype_; }
- void set_filter_proxy_keys(bool filter) { filter_proxy_keys_ = filter; }
+ void set_is_for_in(bool value) { is_for_in_ = value; }
- MaybeHandle<FixedArray> GetKeys(GetKeysConversion convert = KEEP_NUMBERS);
+ MaybeHandle<FixedArray> GetKeys(
+ GetKeysConversion convert = GetKeysConversion::kKeepNumbers);
private:
void Prepare();
@@ -114,11 +142,12 @@ class FastKeyAccumulator {
Isolate* isolate_;
Handle<JSReceiver> receiver_;
- KeyCollectionType type_;
+ Handle<JSReceiver> last_non_empty_prototype_;
+ KeyCollectionMode mode_;
PropertyFilter filter_;
+ bool is_for_in_ = false;
bool is_receiver_simple_enum_ = false;
bool has_empty_prototype_ = false;
- bool filter_proxy_keys_ = true;
DISALLOW_COPY_AND_ASSIGN(FastKeyAccumulator);
};
@@ -126,4 +155,4 @@ class FastKeyAccumulator {
} // namespace internal
} // namespace v8
-#endif // V8_KEY_ACCUMULATOR_H_
+#endif // V8_KEYS_H_
diff --git a/deps/v8/src/libplatform/default-platform.cc b/deps/v8/src/libplatform/default-platform.cc
index 6035c9d91e..2f81248ec1 100644
--- a/deps/v8/src/libplatform/default-platform.cc
+++ b/deps/v8/src/libplatform/default-platform.cc
@@ -29,11 +29,17 @@ bool PumpMessageLoop(v8::Platform* platform, v8::Isolate* isolate) {
return reinterpret_cast<DefaultPlatform*>(platform)->PumpMessageLoop(isolate);
}
+void SetTracingController(
+ v8::Platform* platform,
+ v8::platform::tracing::TracingController* tracing_controller) {
+ return reinterpret_cast<DefaultPlatform*>(platform)->SetTracingController(
+ tracing_controller);
+}
+
const int DefaultPlatform::kMaxThreadPoolSize = 8;
DefaultPlatform::DefaultPlatform()
- : initialized_(false), thread_pool_size_(0) {}
-
+ : initialized_(false), thread_pool_size_(0), tracing_controller_(NULL) {}
DefaultPlatform::~DefaultPlatform() {
base::LockGuard<base::Mutex> guard(&lock_);
@@ -57,6 +63,11 @@ DefaultPlatform::~DefaultPlatform() {
i->second.pop();
}
}
+
+ if (tracing_controller_) {
+ tracing_controller_->StopTracing();
+ delete tracing_controller_;
+ }
}
@@ -170,18 +181,30 @@ double DefaultPlatform::MonotonicallyIncreasingTime() {
uint64_t DefaultPlatform::AddTraceEvent(
char phase, const uint8_t* category_enabled_flag, const char* name,
- uint64_t id, uint64_t bind_id, int num_args,
+ const char* scope, uint64_t id, uint64_t bind_id, int num_args,
const char** arg_names, const uint8_t* arg_types,
const uint64_t* arg_values, unsigned int flags) {
+ if (tracing_controller_) {
+ return tracing_controller_->AddTraceEvent(
+ phase, category_enabled_flag, name, scope, id, bind_id, num_args,
+ arg_names, arg_types, arg_values, flags);
+ }
+
return 0;
}
-
void DefaultPlatform::UpdateTraceEventDuration(
- const uint8_t* category_enabled_flag, const char* name, uint64_t handle) {}
-
+ const uint8_t* category_enabled_flag, const char* name, uint64_t handle) {
+ if (tracing_controller_) {
+ tracing_controller_->UpdateTraceEventDuration(category_enabled_flag, name,
+ handle);
+ }
+}
const uint8_t* DefaultPlatform::GetCategoryGroupEnabled(const char* name) {
+ if (tracing_controller_) {
+ return tracing_controller_->GetCategoryGroupEnabled(name);
+ }
static uint8_t no = 0;
return &no;
}
@@ -193,6 +216,10 @@ const char* DefaultPlatform::GetCategoryGroupName(
return dummy;
}
+void DefaultPlatform::SetTracingController(
+ tracing::TracingController* tracing_controller) {
+ tracing_controller_ = tracing_controller;
+}
size_t DefaultPlatform::NumberOfAvailableBackgroundThreads() {
return static_cast<size_t>(thread_pool_size_);
diff --git a/deps/v8/src/libplatform/default-platform.h b/deps/v8/src/libplatform/default-platform.h
index fe214c6963..0fd7e5ad89 100644
--- a/deps/v8/src/libplatform/default-platform.h
+++ b/deps/v8/src/libplatform/default-platform.h
@@ -10,6 +10,7 @@
#include <queue>
#include <vector>
+#include "include/libplatform/v8-tracing.h"
#include "include/v8-platform.h"
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
@@ -22,6 +23,10 @@ class TaskQueue;
class Thread;
class WorkerThread;
+namespace tracing {
+class TracingController;
+}
+
class DefaultPlatform : public Platform {
public:
DefaultPlatform();
@@ -47,14 +52,14 @@ class DefaultPlatform : public Platform {
const char* GetCategoryGroupName(
const uint8_t* category_enabled_flag) override;
uint64_t AddTraceEvent(char phase, const uint8_t* category_enabled_flag,
- const char* name, uint64_t id,
+ const char* name, const char* scope, uint64_t id,
uint64_t bind_id, int32_t num_args,
const char** arg_names, const uint8_t* arg_types,
const uint64_t* arg_values,
unsigned int flags) override;
void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
const char* name, uint64_t handle) override;
-
+ void SetTracingController(tracing::TracingController* tracing_controller);
private:
static const int kMaxThreadPoolSize;
@@ -74,6 +79,7 @@ class DefaultPlatform : public Platform {
std::priority_queue<DelayedEntry, std::vector<DelayedEntry>,
std::greater<DelayedEntry> > >
main_thread_delayed_queue_;
+ tracing::TracingController* tracing_controller_;
DISALLOW_COPY_AND_ASSIGN(DefaultPlatform);
};
diff --git a/deps/v8/src/libplatform/task-queue.cc b/deps/v8/src/libplatform/task-queue.cc
index 0a630ed3c3..ada13d9fe9 100644
--- a/deps/v8/src/libplatform/task-queue.cc
+++ b/deps/v8/src/libplatform/task-queue.cc
@@ -5,6 +5,8 @@
#include "src/libplatform/task-queue.h"
#include "src/base/logging.h"
+#include "src/base/platform/platform.h"
+#include "src/base/platform/time.h"
namespace v8 {
namespace platform {
@@ -53,5 +55,15 @@ void TaskQueue::Terminate() {
process_queue_semaphore_.Signal();
}
+void TaskQueue::BlockUntilQueueEmptyForTesting() {
+ for (;;) {
+ {
+ base::LockGuard<base::Mutex> guard(&lock_);
+ if (task_queue_.empty()) return;
+ }
+ base::OS::Sleep(base::TimeDelta::FromMilliseconds(5));
+ }
+}
+
} // namespace platform
} // namespace v8
diff --git a/deps/v8/src/libplatform/task-queue.h b/deps/v8/src/libplatform/task-queue.h
index efe9e07e06..5239cdac40 100644
--- a/deps/v8/src/libplatform/task-queue.h
+++ b/deps/v8/src/libplatform/task-queue.h
@@ -10,6 +10,7 @@
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/semaphore.h"
+#include "testing/gtest/include/gtest/gtest_prod.h"
namespace v8 {
@@ -33,8 +34,12 @@ class TaskQueue {
void Terminate();
private:
- base::Mutex lock_;
+ FRIEND_TEST(WorkerThreadTest, PostSingleTask);
+
+ void BlockUntilQueueEmptyForTesting();
+
base::Semaphore process_queue_semaphore_;
+ base::Mutex lock_;
std::queue<Task*> task_queue_;
bool terminated_;
diff --git a/deps/v8/src/libplatform/tracing/trace-buffer.cc b/deps/v8/src/libplatform/tracing/trace-buffer.cc
new file mode 100644
index 0000000000..354f0459f6
--- /dev/null
+++ b/deps/v8/src/libplatform/tracing/trace-buffer.cc
@@ -0,0 +1,109 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/libplatform/tracing/trace-buffer.h"
+
+namespace v8 {
+namespace platform {
+namespace tracing {
+
+TraceBufferRingBuffer::TraceBufferRingBuffer(size_t max_chunks,
+ TraceWriter* trace_writer)
+ : max_chunks_(max_chunks) {
+ trace_writer_.reset(trace_writer);
+ chunks_.resize(max_chunks);
+}
+
+TraceBufferRingBuffer::~TraceBufferRingBuffer() {}
+
+TraceObject* TraceBufferRingBuffer::AddTraceEvent(uint64_t* handle) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ if (is_empty_ || chunks_[chunk_index_]->IsFull()) {
+ chunk_index_ = is_empty_ ? 0 : NextChunkIndex(chunk_index_);
+ is_empty_ = false;
+ auto& chunk = chunks_[chunk_index_];
+ if (chunk) {
+ chunk->Reset(current_chunk_seq_++);
+ } else {
+ chunk.reset(new TraceBufferChunk(current_chunk_seq_++));
+ }
+ }
+ auto& chunk = chunks_[chunk_index_];
+ size_t event_index;
+ TraceObject* trace_object = chunk->AddTraceEvent(&event_index);
+ *handle = MakeHandle(chunk_index_, chunk->seq(), event_index);
+ return trace_object;
+}
+
+TraceObject* TraceBufferRingBuffer::GetEventByHandle(uint64_t handle) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ size_t chunk_index, event_index;
+ uint32_t chunk_seq;
+ ExtractHandle(handle, &chunk_index, &chunk_seq, &event_index);
+ if (chunk_index >= chunks_.size()) return NULL;
+ auto& chunk = chunks_[chunk_index];
+ if (!chunk || chunk->seq() != chunk_seq) return NULL;
+ return chunk->GetEventAt(event_index);
+}
+
+bool TraceBufferRingBuffer::Flush() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ // This flushes all the traces stored in the buffer.
+ if (!is_empty_) {
+ for (size_t i = NextChunkIndex(chunk_index_);; i = NextChunkIndex(i)) {
+ if (auto& chunk = chunks_[i]) {
+ for (size_t j = 0; j < chunk->size(); ++j) {
+ trace_writer_->AppendTraceEvent(chunk->GetEventAt(j));
+ }
+ }
+ if (i == chunk_index_) break;
+ }
+ }
+ trace_writer_->Flush();
+ // This resets the trace buffer.
+ is_empty_ = true;
+ return true;
+}
+
+uint64_t TraceBufferRingBuffer::MakeHandle(size_t chunk_index,
+ uint32_t chunk_seq,
+ size_t event_index) const {
+ return static_cast<uint64_t>(chunk_seq) * Capacity() +
+ chunk_index * TraceBufferChunk::kChunkSize + event_index;
+}
+
+void TraceBufferRingBuffer::ExtractHandle(uint64_t handle, size_t* chunk_index,
+ uint32_t* chunk_seq,
+ size_t* event_index) const {
+ *chunk_seq = static_cast<uint32_t>(handle / Capacity());
+ size_t indices = handle % Capacity();
+ *chunk_index = indices / TraceBufferChunk::kChunkSize;
+ *event_index = indices % TraceBufferChunk::kChunkSize;
+}
+
+size_t TraceBufferRingBuffer::NextChunkIndex(size_t index) const {
+ if (++index >= max_chunks_) index = 0;
+ return index;
+}
+
+TraceBufferChunk::TraceBufferChunk(uint32_t seq) : seq_(seq) {}
+
+void TraceBufferChunk::Reset(uint32_t new_seq) {
+ next_free_ = 0;
+ seq_ = new_seq;
+}
+
+TraceObject* TraceBufferChunk::AddTraceEvent(size_t* event_index) {
+ *event_index = next_free_++;
+ return &chunk_[*event_index];
+}
+
+TraceBuffer* TraceBuffer::CreateTraceBufferRingBuffer(
+ size_t max_chunks, TraceWriter* trace_writer) {
+ return new TraceBufferRingBuffer(max_chunks, trace_writer);
+}
+
+} // namespace tracing
+} // namespace platform
+} // namespace v8
diff --git a/deps/v8/src/libplatform/tracing/trace-buffer.h b/deps/v8/src/libplatform/tracing/trace-buffer.h
new file mode 100644
index 0000000000..16f3b2a12e
--- /dev/null
+++ b/deps/v8/src/libplatform/tracing/trace-buffer.h
@@ -0,0 +1,48 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SRC_LIBPLATFORM_TRACING_TRACE_BUFFER_H_
+#define SRC_LIBPLATFORM_TRACING_TRACE_BUFFER_H_
+
+#include <memory>
+#include <vector>
+
+#include "include/libplatform/v8-tracing.h"
+#include "src/base/platform/mutex.h"
+
+namespace v8 {
+namespace platform {
+namespace tracing {
+
+class TraceBufferRingBuffer : public TraceBuffer {
+ public:
+ TraceBufferRingBuffer(size_t max_chunks, TraceWriter* trace_writer);
+ ~TraceBufferRingBuffer();
+
+ TraceObject* AddTraceEvent(uint64_t* handle) override;
+ TraceObject* GetEventByHandle(uint64_t handle) override;
+ bool Flush() override;
+
+ private:
+ uint64_t MakeHandle(size_t chunk_index, uint32_t chunk_seq,
+ size_t event_index) const;
+ void ExtractHandle(uint64_t handle, size_t* chunk_index, uint32_t* chunk_seq,
+ size_t* event_index) const;
+ size_t Capacity() const { return max_chunks_ * TraceBufferChunk::kChunkSize; }
+ size_t NextChunkIndex(size_t index) const;
+
+ mutable base::Mutex mutex_;
+ size_t max_chunks_;
+ std::unique_ptr<TraceWriter> trace_writer_;
+ std::vector<std::unique_ptr<TraceBufferChunk>> chunks_;
+ size_t chunk_index_;
+ bool is_empty_ = true;
+ uint32_t current_chunk_seq_ = 1;
+};
+
+} // namespace tracing
+} // namespace platform
+} // namespace v8
+
+#endif // SRC_LIBPLATFORM_TRACING_TRACE_BUFFER_H_
diff --git a/deps/v8/src/libplatform/tracing/trace-config.cc b/deps/v8/src/libplatform/tracing/trace-config.cc
new file mode 100644
index 0000000000..7a824f614e
--- /dev/null
+++ b/deps/v8/src/libplatform/tracing/trace-config.cc
@@ -0,0 +1,42 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string.h>
+
+#include "include/libplatform/v8-tracing.h"
+#include "src/base/logging.h"
+
+namespace v8 {
+
+class Isolate;
+
+namespace platform {
+namespace tracing {
+
+TraceConfig* TraceConfig::CreateDefaultTraceConfig() {
+ TraceConfig* trace_config = new TraceConfig();
+ trace_config->included_categories_.push_back("v8");
+ return trace_config;
+}
+
+bool TraceConfig::IsCategoryGroupEnabled(const char* category_group) const {
+ for (auto included_category : included_categories_) {
+ if (strcmp(included_category.data(), category_group) == 0) return true;
+ }
+ return false;
+}
+
+void TraceConfig::AddIncludedCategory(const char* included_category) {
+ DCHECK(included_category != NULL && strlen(included_category) > 0);
+ included_categories_.push_back(included_category);
+}
+
+void TraceConfig::AddExcludedCategory(const char* excluded_category) {
+ DCHECK(excluded_category != NULL && strlen(excluded_category) > 0);
+ excluded_categories_.push_back(excluded_category);
+}
+
+} // namespace tracing
+} // namespace platform
+} // namespace v8
diff --git a/deps/v8/src/libplatform/tracing/trace-object.cc b/deps/v8/src/libplatform/tracing/trace-object.cc
new file mode 100644
index 0000000000..55be8921cb
--- /dev/null
+++ b/deps/v8/src/libplatform/tracing/trace-object.cc
@@ -0,0 +1,130 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/libplatform/v8-tracing.h"
+
+#include "base/trace_event/common/trace_event_common.h"
+#include "src/base/platform/platform.h"
+#include "src/base/platform/time.h"
+
+namespace v8 {
+namespace platform {
+namespace tracing {
+
+// We perform checks for NULL strings since it is possible that a string arg
+// value is NULL.
+V8_INLINE static size_t GetAllocLength(const char* str) {
+ return str ? strlen(str) + 1 : 0;
+}
+
+// Copies |*member| into |*buffer|, sets |*member| to point to this new
+// location, and then advances |*buffer| by the amount written.
+V8_INLINE static void CopyTraceObjectParameter(char** buffer,
+ const char** member) {
+ if (*member) {
+ size_t length = strlen(*member) + 1;
+ strncpy(*buffer, *member, length);
+ *member = *buffer;
+ *buffer += length;
+ }
+}
+
+void TraceObject::Initialize(char phase, const uint8_t* category_enabled_flag,
+ const char* name, const char* scope, uint64_t id,
+ uint64_t bind_id, int num_args,
+ const char** arg_names, const uint8_t* arg_types,
+ const uint64_t* arg_values, unsigned int flags) {
+ pid_ = base::OS::GetCurrentProcessId();
+ tid_ = base::OS::GetCurrentThreadId();
+ phase_ = phase;
+ category_enabled_flag_ = category_enabled_flag;
+ name_ = name;
+ scope_ = scope;
+ id_ = id;
+ bind_id_ = bind_id;
+ flags_ = flags;
+ ts_ = base::TimeTicks::HighResolutionNow().ToInternalValue();
+ tts_ = base::ThreadTicks::Now().ToInternalValue();
+ duration_ = 0;
+ cpu_duration_ = 0;
+
+ // Clamp num_args since it may have been set by a third-party library.
+ num_args_ = (num_args > kTraceMaxNumArgs) ? kTraceMaxNumArgs : num_args;
+ for (int i = 0; i < num_args_; ++i) {
+ arg_names_[i] = arg_names[i];
+ arg_values_[i].as_uint = arg_values[i];
+ arg_types_[i] = arg_types[i];
+ }
+
+ bool copy = !!(flags & TRACE_EVENT_FLAG_COPY);
+ // Allocate a long string to fit all string copies.
+ size_t alloc_size = 0;
+ if (copy) {
+ alloc_size += GetAllocLength(name) + GetAllocLength(scope);
+ for (int i = 0; i < num_args_; ++i) {
+ alloc_size += GetAllocLength(arg_names_[i]);
+ if (arg_types_[i] == TRACE_VALUE_TYPE_STRING)
+ arg_types_[i] = TRACE_VALUE_TYPE_COPY_STRING;
+ }
+ }
+
+ bool arg_is_copy[kTraceMaxNumArgs];
+ for (int i = 0; i < num_args_; ++i) {
+ // We only take a copy of arg_vals if they are of type COPY_STRING.
+ arg_is_copy[i] = (arg_types_[i] == TRACE_VALUE_TYPE_COPY_STRING);
+ if (arg_is_copy[i]) alloc_size += GetAllocLength(arg_values_[i].as_string);
+ }
+
+ if (alloc_size) {
+ // Since TraceObject can be initialized multiple times, we might need
+ // to free old memory.
+ delete[] parameter_copy_storage_;
+ char* ptr = parameter_copy_storage_ = new char[alloc_size];
+ if (copy) {
+ CopyTraceObjectParameter(&ptr, &name_);
+ CopyTraceObjectParameter(&ptr, &scope_);
+ for (int i = 0; i < num_args_; ++i) {
+ CopyTraceObjectParameter(&ptr, &arg_names_[i]);
+ }
+ }
+ for (int i = 0; i < num_args_; ++i) {
+ if (arg_is_copy[i]) {
+ CopyTraceObjectParameter(&ptr, &arg_values_[i].as_string);
+ }
+ }
+ }
+}
+
+TraceObject::~TraceObject() { delete[] parameter_copy_storage_; }
+
+void TraceObject::UpdateDuration() {
+ duration_ = base::TimeTicks::HighResolutionNow().ToInternalValue() - ts_;
+ cpu_duration_ = base::ThreadTicks::Now().ToInternalValue() - tts_;
+}
+
+void TraceObject::InitializeForTesting(
+ char phase, const uint8_t* category_enabled_flag, const char* name,
+ const char* scope, uint64_t id, uint64_t bind_id, int num_args,
+ const char** arg_names, const uint8_t* arg_types,
+ const uint64_t* arg_values, unsigned int flags, int pid, int tid,
+ int64_t ts, int64_t tts, uint64_t duration, uint64_t cpu_duration) {
+ pid_ = pid;
+ tid_ = tid;
+ phase_ = phase;
+ category_enabled_flag_ = category_enabled_flag;
+ name_ = name;
+ scope_ = scope;
+ id_ = id;
+ bind_id_ = bind_id;
+ num_args_ = num_args;
+ flags_ = flags;
+ ts_ = ts;
+ tts_ = tts;
+ duration_ = duration;
+ cpu_duration_ = cpu_duration;
+}
+
+} // namespace tracing
+} // namespace platform
+} // namespace v8
diff --git a/deps/v8/src/libplatform/tracing/trace-writer.cc b/deps/v8/src/libplatform/tracing/trace-writer.cc
new file mode 100644
index 0000000000..ec95527d5f
--- /dev/null
+++ b/deps/v8/src/libplatform/tracing/trace-writer.cc
@@ -0,0 +1,163 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/libplatform/tracing/trace-writer.h"
+
+#include <cmath>
+
+#include "base/trace_event/common/trace_event_common.h"
+#include "src/base/platform/platform.h"
+
+namespace v8 {
+namespace platform {
+namespace tracing {
+
+// Writes the given string to a stream, taking care to escape characters
+// when necessary.
+V8_INLINE static void WriteJSONStringToStream(const char* str,
+ std::ostream& stream) {
+ size_t len = strlen(str);
+ stream << "\"";
+ for (size_t i = 0; i < len; ++i) {
+ // All of the permitted escape sequences in JSON strings, as per
+ // https://mathiasbynens.be/notes/javascript-escapes
+ switch (str[i]) {
+ case '\b':
+ stream << "\\b";
+ break;
+ case '\f':
+ stream << "\\f";
+ break;
+ case '\n':
+ stream << "\\n";
+ break;
+ case '\r':
+ stream << "\\r";
+ break;
+ case '\t':
+ stream << "\\t";
+ break;
+ case '\"':
+ stream << "\\\"";
+ break;
+ case '\\':
+ stream << "\\\\";
+ break;
+ // Note that because we use double quotes for JSON strings,
+ // we don't need to escape single quotes.
+ default:
+ stream << str[i];
+ break;
+ }
+ }
+ stream << "\"";
+}
+
+void JSONTraceWriter::AppendArgValue(uint8_t type,
+ TraceObject::ArgValue value) {
+ switch (type) {
+ case TRACE_VALUE_TYPE_BOOL:
+ stream_ << (value.as_bool ? "true" : "false");
+ break;
+ case TRACE_VALUE_TYPE_UINT:
+ stream_ << value.as_uint;
+ break;
+ case TRACE_VALUE_TYPE_INT:
+ stream_ << value.as_int;
+ break;
+ case TRACE_VALUE_TYPE_DOUBLE: {
+ std::string real;
+ double val = value.as_double;
+ if (std::isfinite(val)) {
+ std::ostringstream convert_stream;
+ convert_stream << val;
+ real = convert_stream.str();
+ // Ensure that the number has a .0 if there's no decimal or 'e'. This
+ // makes sure that when we read the JSON back, it's interpreted as a
+ // real rather than an int.
+ if (real.find('.') == std::string::npos &&
+ real.find('e') == std::string::npos &&
+ real.find('E') == std::string::npos) {
+ real += ".0";
+ }
+ } else if (std::isnan(val)) {
+ // The JSON spec doesn't allow NaN and Infinity (since these are
+ // objects in EcmaScript). Use strings instead.
+ real = "\"NaN\"";
+ } else if (val < 0) {
+ real = "\"-Infinity\"";
+ } else {
+ real = "\"Infinity\"";
+ }
+ stream_ << real;
+ break;
+ }
+ case TRACE_VALUE_TYPE_POINTER:
+ // JSON only supports double and int numbers.
+ // So as not to lose bits from a 64-bit pointer, output as a hex string.
+ stream_ << "\"" << value.as_pointer << "\"";
+ break;
+ case TRACE_VALUE_TYPE_STRING:
+ case TRACE_VALUE_TYPE_COPY_STRING:
+ if (value.as_string == nullptr) {
+ stream_ << "\"NULL\"";
+ } else {
+ WriteJSONStringToStream(value.as_string, stream_);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+JSONTraceWriter::JSONTraceWriter(std::ostream& stream) : stream_(stream) {
+ stream_ << "{\"traceEvents\":[";
+}
+
+JSONTraceWriter::~JSONTraceWriter() { stream_ << "]}"; }
+
+void JSONTraceWriter::AppendTraceEvent(TraceObject* trace_event) {
+ if (append_comma_) stream_ << ",";
+ append_comma_ = true;
+ stream_ << "{\"pid\":" << trace_event->pid()
+ << ",\"tid\":" << trace_event->tid()
+ << ",\"ts\":" << trace_event->ts()
+ << ",\"tts\":" << trace_event->tts() << ",\"ph\":\""
+ << trace_event->phase() << "\",\"cat\":\""
+ << TracingController::GetCategoryGroupName(
+ trace_event->category_enabled_flag())
+ << "\",\"name\":\"" << trace_event->name()
+ << "\",\"dur\":" << trace_event->duration()
+ << ",\"tdur\":" << trace_event->cpu_duration();
+ if (trace_event->flags() & TRACE_EVENT_FLAG_HAS_ID) {
+ if (trace_event->scope() != nullptr) {
+ stream_ << ",\"scope\":\"" << trace_event->scope() << "\"";
+ }
+ // So as not to lose bits from a 64-bit integer, output as a hex string.
+ stream_ << ",\"id\":\"0x" << std::hex << trace_event->id() << "\""
+ << std::dec;
+ }
+ stream_ << ",\"args\":{";
+ const char** arg_names = trace_event->arg_names();
+ const uint8_t* arg_types = trace_event->arg_types();
+ TraceObject::ArgValue* arg_values = trace_event->arg_values();
+ for (int i = 0; i < trace_event->num_args(); ++i) {
+ if (i > 0) stream_ << ",";
+ stream_ << "\"" << arg_names[i] << "\":";
+ AppendArgValue(arg_types[i], arg_values[i]);
+ }
+ stream_ << "}}";
+ // TODO(fmeawad): Add support for Flow Events.
+}
+
+void JSONTraceWriter::Flush() {}
+
+TraceWriter* TraceWriter::CreateJSONTraceWriter(std::ostream& stream) {
+ return new JSONTraceWriter(stream);
+}
+
+} // namespace tracing
+} // namespace platform
+} // namespace v8
diff --git a/deps/v8/src/libplatform/tracing/trace-writer.h b/deps/v8/src/libplatform/tracing/trace-writer.h
new file mode 100644
index 0000000000..963fc6a64d
--- /dev/null
+++ b/deps/v8/src/libplatform/tracing/trace-writer.h
@@ -0,0 +1,32 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SRC_LIBPLATFORM_TRACING_TRACE_WRITER_H_
+#define SRC_LIBPLATFORM_TRACING_TRACE_WRITER_H_
+
+#include "include/libplatform/v8-tracing.h"
+
+namespace v8 {
+namespace platform {
+namespace tracing {
+
+class JSONTraceWriter : public TraceWriter {
+ public:
+ explicit JSONTraceWriter(std::ostream& stream);
+ ~JSONTraceWriter();
+ void AppendTraceEvent(TraceObject* trace_event) override;
+ void Flush() override;
+
+ private:
+ void AppendArgValue(uint8_t type, TraceObject::ArgValue value);
+
+ std::ostream& stream_;
+ bool append_comma_ = false;
+};
+
+} // namespace tracing
+} // namespace platform
+} // namespace v8
+
+#endif // SRC_LIBPLATFORM_TRACING_TRACE_WRITER_H_
diff --git a/deps/v8/src/libplatform/tracing/tracing-controller.cc b/deps/v8/src/libplatform/tracing/tracing-controller.cc
new file mode 100644
index 0000000000..e9a21725e2
--- /dev/null
+++ b/deps/v8/src/libplatform/tracing/tracing-controller.cc
@@ -0,0 +1,177 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdio.h>
+#include <string.h>
+
+#include "include/libplatform/v8-tracing.h"
+
+#include "src/base/platform/mutex.h"
+
+namespace v8 {
+namespace platform {
+namespace tracing {
+
+#define MAX_CATEGORY_GROUPS 200
+
+// Parallel arrays g_category_groups and g_category_group_enabled are separate
+// so that a pointer to a member of g_category_group_enabled can be easily
+// converted to an index into g_category_groups. This allows macros to deal
+// only with char enabled pointers from g_category_group_enabled, and we can
+// convert internally to determine the category name from the char enabled
+// pointer.
+const char* g_category_groups[MAX_CATEGORY_GROUPS] = {
+ "toplevel", "tracing already shutdown",
+ "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
+ "__metadata"};
+
+// The enabled flag is char instead of bool so that the API can be used from C.
+unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = {0};
+// Indexes here have to match the g_category_groups array indexes above.
+const int g_category_already_shutdown = 1;
+const int g_category_categories_exhausted = 2;
+// Metadata category not used in V8.
+// const int g_category_metadata = 3;
+const int g_num_builtin_categories = 4;
+
+// Skip default categories.
+v8::base::AtomicWord g_category_index = g_num_builtin_categories;
+
+void TracingController::Initialize(TraceBuffer* trace_buffer) {
+ trace_buffer_.reset(trace_buffer);
+}
+
+uint64_t TracingController::AddTraceEvent(
+ char phase, const uint8_t* category_enabled_flag, const char* name,
+ const char* scope, uint64_t id, uint64_t bind_id, int num_args,
+ const char** arg_names, const uint8_t* arg_types,
+ const uint64_t* arg_values, unsigned int flags) {
+ uint64_t handle;
+ TraceObject* trace_object = trace_buffer_->AddTraceEvent(&handle);
+ if (trace_object) {
+ trace_object->Initialize(phase, category_enabled_flag, name, scope, id,
+ bind_id, num_args, arg_names, arg_types,
+ arg_values, flags);
+ }
+ return handle;
+}
+
+void TracingController::UpdateTraceEventDuration(
+ const uint8_t* category_enabled_flag, const char* name, uint64_t handle) {
+ TraceObject* trace_object = trace_buffer_->GetEventByHandle(handle);
+ if (!trace_object) return;
+ trace_object->UpdateDuration();
+}
+
+const uint8_t* TracingController::GetCategoryGroupEnabled(
+ const char* category_group) {
+ if (!trace_buffer_) {
+ DCHECK(!g_category_group_enabled[g_category_already_shutdown]);
+ return &g_category_group_enabled[g_category_already_shutdown];
+ }
+ return GetCategoryGroupEnabledInternal(category_group);
+}
+
+const char* TracingController::GetCategoryGroupName(
+ const uint8_t* category_group_enabled) {
+ // Calculate the index of the category group by finding
+ // category_group_enabled in g_category_group_enabled array.
+ uintptr_t category_begin =
+ reinterpret_cast<uintptr_t>(g_category_group_enabled);
+ uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled);
+ // Check for out of bounds category pointers.
+ DCHECK(category_ptr >= category_begin &&
+ category_ptr < reinterpret_cast<uintptr_t>(g_category_group_enabled +
+ MAX_CATEGORY_GROUPS));
+ uintptr_t category_index =
+ (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]);
+ return g_category_groups[category_index];
+}
+
+void TracingController::StartTracing(TraceConfig* trace_config) {
+ trace_config_.reset(trace_config);
+ mode_ = RECORDING_MODE;
+ UpdateCategoryGroupEnabledFlags();
+}
+
+void TracingController::StopTracing() {
+ mode_ = DISABLED;
+ UpdateCategoryGroupEnabledFlags();
+ trace_buffer_->Flush();
+}
+
+void TracingController::UpdateCategoryGroupEnabledFlag(size_t category_index) {
+ unsigned char enabled_flag = 0;
+ const char* category_group = g_category_groups[category_index];
+ if (mode_ == RECORDING_MODE &&
+ trace_config_->IsCategoryGroupEnabled(category_group)) {
+ enabled_flag |= ENABLED_FOR_RECORDING;
+ }
+
+ // TODO(fmeawad): EventCallback and ETW modes are not yet supported in V8.
+ // TODO(primiano): this is a temporary workaround for catapult:#2341,
+ // to guarantee that metadata events are always added even if the category
+ // filter is "-*". See crbug.com/618054 for more details and long-term fix.
+ if (mode_ == RECORDING_MODE && !strcmp(category_group, "__metadata")) {
+ enabled_flag |= ENABLED_FOR_RECORDING;
+ }
+
+ g_category_group_enabled[category_index] = enabled_flag;
+}
+
+void TracingController::UpdateCategoryGroupEnabledFlags() {
+ size_t category_index = base::NoBarrier_Load(&g_category_index);
+ for (size_t i = 0; i < category_index; i++) UpdateCategoryGroupEnabledFlag(i);
+}
+
+const uint8_t* TracingController::GetCategoryGroupEnabledInternal(
+ const char* category_group) {
+ // Check that category groups does not contain double quote
+ DCHECK(!strchr(category_group, '"'));
+
+ // The g_category_groups is append only, avoid using a lock for the fast path.
+ size_t current_category_index = v8::base::Acquire_Load(&g_category_index);
+
+ // Search for pre-existing category group.
+ for (size_t i = 0; i < current_category_index; ++i) {
+ if (strcmp(g_category_groups[i], category_group) == 0) {
+ return &g_category_group_enabled[i];
+ }
+ }
+
+ unsigned char* category_group_enabled = NULL;
+ size_t category_index = base::Acquire_Load(&g_category_index);
+ for (size_t i = 0; i < category_index; ++i) {
+ if (strcmp(g_category_groups[i], category_group) == 0) {
+ return &g_category_group_enabled[i];
+ }
+ }
+
+ // Create a new category group.
+ // Check that there is a slot for the new category_group.
+ DCHECK(category_index < MAX_CATEGORY_GROUPS);
+ if (category_index < MAX_CATEGORY_GROUPS) {
+ // Don't hold on to the category_group pointer, so that we can create
+ // category groups with strings not known at compile time (this is
+ // required by SetWatchEvent).
+ const char* new_group = strdup(category_group);
+ g_category_groups[category_index] = new_group;
+ DCHECK(!g_category_group_enabled[category_index]);
+ // Note that if both included and excluded patterns in the
+ // TraceConfig are empty, we exclude nothing,
+ // thereby enabling this category group.
+ UpdateCategoryGroupEnabledFlag(category_index);
+ category_group_enabled = &g_category_group_enabled[category_index];
+ // Update the max index now.
+ base::Release_Store(&g_category_index, category_index + 1);
+ } else {
+ category_group_enabled =
+ &g_category_group_enabled[g_category_categories_exhausted];
+ }
+ return category_group_enabled;
+}
+
+} // namespace tracing
+} // namespace platform
+} // namespace v8
diff --git a/deps/v8/src/libsampler/DEPS b/deps/v8/src/libsampler/DEPS
new file mode 100644
index 0000000000..bdf1a82670
--- /dev/null
+++ b/deps/v8/src/libsampler/DEPS
@@ -0,0 +1,6 @@
+include_rules = [
+ "+include",
+ "-src",
+ "+src/base",
+ "+src/libsampler",
+] \ No newline at end of file
diff --git a/deps/v8/src/libsampler/sampler.cc b/deps/v8/src/libsampler/sampler.cc
new file mode 100644
index 0000000000..71c667f4d6
--- /dev/null
+++ b/deps/v8/src/libsampler/sampler.cc
@@ -0,0 +1,663 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/libsampler/sampler.h"
+
+#if V8_OS_POSIX && !V8_OS_CYGWIN
+
+#define USE_SIGNALS
+
+#include <errno.h>
+#include <pthread.h>
+#include <signal.h>
+#include <sys/time.h>
+
+#if !V8_OS_QNX && !V8_OS_AIX
+#include <sys/syscall.h> // NOLINT
+#endif
+
+#if V8_OS_MACOSX
+#include <mach/mach.h>
+// OpenBSD doesn't have <ucontext.h>. ucontext_t lives in <signal.h>
+// and is a typedef for struct sigcontext. There is no uc_mcontext.
+#elif(!V8_OS_ANDROID || defined(__BIONIC_HAVE_UCONTEXT_T)) && !V8_OS_OPENBSD
+#include <ucontext.h>
+#endif
+
+#include <unistd.h>
+
+// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
+// Old versions of the C library <signal.h> didn't define the type.
+#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
+ (defined(__arm__) || defined(__aarch64__)) && \
+ !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
+#include <asm/sigcontext.h> // NOLINT
+#endif
+
+#elif V8_OS_WIN || V8_OS_CYGWIN
+
+#include "src/base/win32-headers.h"
+
+#endif
+
+#include <algorithm>
+#include <vector>
+#include <map>
+
+#include "src/base/atomic-utils.h"
+#include "src/base/hashmap.h"
+#include "src/base/platform/platform.h"
+
+#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
+
+// Not all versions of Android's C library provide ucontext_t.
+// Detect this and provide custom but compatible definitions. Note that these
+// follow the GLibc naming convention to access register values from
+// mcontext_t.
+//
+// See http://code.google.com/p/android/issues/detail?id=34784
+
+#if defined(__arm__)
+
+typedef struct sigcontext mcontext_t;
+
+typedef struct ucontext {
+ uint32_t uc_flags;
+ struct ucontext* uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ // Other fields are not used by V8, don't define them here.
+} ucontext_t;
+
+#elif defined(__aarch64__)
+
+typedef struct sigcontext mcontext_t;
+
+typedef struct ucontext {
+ uint64_t uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ // Other fields are not used by V8, don't define them here.
+} ucontext_t;
+
+#elif defined(__mips__)
+// MIPS version of sigcontext, for Android bionic.
+typedef struct {
+ uint32_t regmask;
+ uint32_t status;
+ uint64_t pc;
+ uint64_t gregs[32];
+ uint64_t fpregs[32];
+ uint32_t acx;
+ uint32_t fpc_csr;
+ uint32_t fpc_eir;
+ uint32_t used_math;
+ uint32_t dsp;
+ uint64_t mdhi;
+ uint64_t mdlo;
+ uint32_t hi1;
+ uint32_t lo1;
+ uint32_t hi2;
+ uint32_t lo2;
+ uint32_t hi3;
+ uint32_t lo3;
+} mcontext_t;
+
+typedef struct ucontext {
+ uint32_t uc_flags;
+ struct ucontext* uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ // Other fields are not used by V8, don't define them here.
+} ucontext_t;
+
+#elif defined(__i386__)
+// x86 version for Android.
+typedef struct {
+ uint32_t gregs[19];
+ void* fpregs;
+ uint32_t oldmask;
+ uint32_t cr2;
+} mcontext_t;
+
+typedef uint32_t kernel_sigset_t[2]; // x86 kernel uses 64-bit signal masks
+typedef struct ucontext {
+ uint32_t uc_flags;
+ struct ucontext* uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ // Other fields are not used by V8, don't define them here.
+} ucontext_t;
+enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
+
+#elif defined(__x86_64__)
+// x64 version for Android.
+typedef struct {
+ uint64_t gregs[23];
+ void* fpregs;
+ uint64_t __reserved1[8];
+} mcontext_t;
+
+typedef struct ucontext {
+ uint64_t uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ // Other fields are not used by V8, don't define them here.
+} ucontext_t;
+enum { REG_RBP = 10, REG_RSP = 15, REG_RIP = 16 };
+#endif
+
+#endif // V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
+
+
+namespace v8 {
+namespace sampler {
+
+namespace {
+
+#if defined(USE_SIGNALS)
+typedef std::vector<Sampler*> SamplerList;
+typedef SamplerList::iterator SamplerListIterator;
+typedef base::AtomicValue<bool> AtomicMutex;
+
+class AtomicGuard {
+ public:
+ explicit AtomicGuard(AtomicMutex* atomic, bool is_blocking = true)
+ : atomic_(atomic), is_success_(false) {
+ do {
+ // Use Acquire_Load to gain mutual exclusion.
+ USE(atomic_->Value());
+ is_success_ = atomic_->TrySetValue(false, true);
+ } while (is_blocking && !is_success_);
+ }
+
+ bool is_success() const { return is_success_; }
+
+ ~AtomicGuard() {
+ if (!is_success_) return;
+ atomic_->SetValue(false);
+ }
+
+ private:
+ AtomicMutex* const atomic_;
+ bool is_success_;
+};
+
+// Returns key for hash map.
+void* ThreadKey(pthread_t thread_id) {
+ return reinterpret_cast<void*>(thread_id);
+}
+
+// Returns hash value for hash map.
+uint32_t ThreadHash(pthread_t thread_id) {
+#if V8_OS_BSD
+ return static_cast<uint32_t>(reinterpret_cast<intptr_t>(thread_id));
+#else
+ return static_cast<uint32_t>(thread_id);
+#endif
+}
+
+#endif // USE_SIGNALS
+
+} // namespace
+
+#if defined(USE_SIGNALS)
+
+class Sampler::PlatformData {
+ public:
+ PlatformData() : vm_tid_(pthread_self()) {}
+ pthread_t vm_tid() const { return vm_tid_; }
+
+ private:
+ pthread_t vm_tid_;
+};
+
+class SamplerManager {
+ public:
+ SamplerManager() : sampler_map_(base::HashMap::PointersMatch) {}
+
+ void AddSampler(Sampler* sampler) {
+ AtomicGuard atomic_guard(&samplers_access_counter_);
+ DCHECK(sampler->IsActive() || !sampler->IsRegistered());
+ // Add sampler into map if needed.
+ pthread_t thread_id = sampler->platform_data()->vm_tid();
+ base::HashMap::Entry* entry =
+ sampler_map_.LookupOrInsert(ThreadKey(thread_id),
+ ThreadHash(thread_id));
+ DCHECK(entry != nullptr);
+ if (entry->value == nullptr) {
+ SamplerList* samplers = new SamplerList();
+ samplers->push_back(sampler);
+ entry->value = samplers;
+ } else {
+ SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
+ bool exists = false;
+ for (SamplerListIterator iter = samplers->begin();
+ iter != samplers->end(); ++iter) {
+ if (*iter == sampler) {
+ exists = true;
+ break;
+ }
+ }
+ if (!exists) {
+ samplers->push_back(sampler);
+ }
+ }
+ }
+
+ void RemoveSampler(Sampler* sampler) {
+ AtomicGuard atomic_guard(&samplers_access_counter_);
+ DCHECK(sampler->IsActive() || sampler->IsRegistered());
+ // Remove sampler from map.
+ pthread_t thread_id = sampler->platform_data()->vm_tid();
+ void* thread_key = ThreadKey(thread_id);
+ uint32_t thread_hash = ThreadHash(thread_id);
+ base::HashMap::Entry* entry = sampler_map_.Lookup(thread_key, thread_hash);
+ DCHECK(entry != nullptr);
+ SamplerList* samplers = reinterpret_cast<SamplerList*>(entry->value);
+ for (SamplerListIterator iter = samplers->begin(); iter != samplers->end();
+ ++iter) {
+ if (*iter == sampler) {
+ samplers->erase(iter);
+ break;
+ }
+ }
+ if (samplers->empty()) {
+ sampler_map_.Remove(thread_key, thread_hash);
+ delete samplers;
+ }
+ }
+
+#if defined(USE_SIGNALS)
+ void DoSample(const v8::RegisterState& state) {
+ AtomicGuard atomic_guard(&SamplerManager::samplers_access_counter_, false);
+ if (!atomic_guard.is_success()) return;
+ pthread_t thread_id = pthread_self();
+ base::HashMap::Entry* entry =
+ sampler_map_.Lookup(ThreadKey(thread_id), ThreadHash(thread_id));
+ if (!entry) return;
+ SamplerList& samplers = *static_cast<SamplerList*>(entry->value);
+
+ for (int i = 0; i < samplers.size(); ++i) {
+ Sampler* sampler = samplers[i];
+ Isolate* isolate = sampler->isolate();
+ // We require a fully initialized and entered isolate.
+ if (isolate == nullptr || !isolate->IsInUse()) continue;
+ if (v8::Locker::IsActive() && !Locker::IsLocked(isolate)) continue;
+ sampler->SampleStack(state);
+ }
+ }
+#endif
+
+ static SamplerManager* instance() { return instance_.Pointer(); }
+
+ private:
+ base::HashMap sampler_map_;
+ static AtomicMutex samplers_access_counter_;
+ static base::LazyInstance<SamplerManager>::type instance_;
+};
+
+AtomicMutex SamplerManager::samplers_access_counter_;
+base::LazyInstance<SamplerManager>::type SamplerManager::instance_ =
+ LAZY_INSTANCE_INITIALIZER;
+
+#elif V8_OS_WIN || V8_OS_CYGWIN
+
+// ----------------------------------------------------------------------------
+// Win32 profiler support. On Cygwin we use the same sampler implementation as
+// on Win32.
+
+class Sampler::PlatformData {
+ public:
+ // Get a handle to the calling thread. This is the thread that we are
+ // going to profile. We need to make a copy of the handle because we are
+ // going to use it in the sampler thread. Using GetThreadHandle() will
+ // not work in this case. We're using OpenThread because DuplicateHandle
+ // for some reason doesn't work in Chrome's sandbox.
+ PlatformData()
+ : profiled_thread_(OpenThread(THREAD_GET_CONTEXT |
+ THREAD_SUSPEND_RESUME |
+ THREAD_QUERY_INFORMATION,
+ false,
+ GetCurrentThreadId())) {}
+
+ ~PlatformData() {
+ if (profiled_thread_ != nullptr) {
+ CloseHandle(profiled_thread_);
+ profiled_thread_ = nullptr;
+ }
+ }
+
+ HANDLE profiled_thread() { return profiled_thread_; }
+
+ private:
+ HANDLE profiled_thread_;
+};
+#endif // USE_SIGNALS
+
+
+#if defined(USE_SIGNALS)
+class SignalHandler {
+ public:
+ static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); }
+ static void TearDown() {
+ delete mutex_;
+ mutex_ = nullptr;
+ }
+
+ static void IncreaseSamplerCount() {
+ base::LockGuard<base::Mutex> lock_guard(mutex_);
+ if (++client_count_ == 1) Install();
+ }
+
+ static void DecreaseSamplerCount() {
+ base::LockGuard<base::Mutex> lock_guard(mutex_);
+ if (--client_count_ == 0) Restore();
+ }
+
+ static bool Installed() {
+ base::LockGuard<base::Mutex> lock_guard(mutex_);
+ return signal_handler_installed_;
+ }
+
+ private:
+ static void Install() {
+ struct sigaction sa;
+ sa.sa_sigaction = &HandleProfilerSignal;
+ sigemptyset(&sa.sa_mask);
+#if V8_OS_QNX
+ sa.sa_flags = SA_SIGINFO;
+#else
+ sa.sa_flags = SA_RESTART | SA_SIGINFO;
+#endif
+ signal_handler_installed_ =
+ (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
+ }
+
+ static void Restore() {
+ if (signal_handler_installed_) {
+ sigaction(SIGPROF, &old_signal_handler_, 0);
+ signal_handler_installed_ = false;
+ }
+ }
+
+ static void FillRegisterState(void* context, RegisterState* regs);
+ static void HandleProfilerSignal(int signal, siginfo_t* info, void* context);
+
+ // Protects the process wide state below.
+ static base::Mutex* mutex_;
+ static int client_count_;
+ static bool signal_handler_installed_;
+ static struct sigaction old_signal_handler_;
+};
+
+base::Mutex* SignalHandler::mutex_ = nullptr;
+int SignalHandler::client_count_ = 0;
+struct sigaction SignalHandler::old_signal_handler_;
+bool SignalHandler::signal_handler_installed_ = false;
+
+
+void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
+ void* context) {
+ USE(info);
+ if (signal != SIGPROF) return;
+ v8::RegisterState state;
+ FillRegisterState(context, &state);
+ SamplerManager::instance()->DoSample(state);
+}
+
+void SignalHandler::FillRegisterState(void* context, RegisterState* state) {
+ // Extracting the sample from the context is extremely machine dependent.
+ ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
+#if !(V8_OS_OPENBSD || (V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_S390)))
+ mcontext_t& mcontext = ucontext->uc_mcontext;
+#endif
+#if V8_OS_LINUX
+#if V8_HOST_ARCH_IA32
+ state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_EIP]);
+ state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_ESP]);
+ state->fp = reinterpret_cast<void*>(mcontext.gregs[REG_EBP]);
+#elif V8_HOST_ARCH_X64
+ state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_RIP]);
+ state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_RSP]);
+ state->fp = reinterpret_cast<void*>(mcontext.gregs[REG_RBP]);
+#elif V8_HOST_ARCH_ARM
+#if V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
+ // Old GLibc ARM versions used a gregs[] array to access the register
+ // values from mcontext_t.
+ state->pc = reinterpret_cast<void*>(mcontext.gregs[R15]);
+ state->sp = reinterpret_cast<void*>(mcontext.gregs[R13]);
+ state->fp = reinterpret_cast<void*>(mcontext.gregs[R11]);
+#else
+ state->pc = reinterpret_cast<void*>(mcontext.arm_pc);
+ state->sp = reinterpret_cast<void*>(mcontext.arm_sp);
+ state->fp = reinterpret_cast<void*>(mcontext.arm_fp);
+#endif // V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
+#elif V8_HOST_ARCH_ARM64
+ state->pc = reinterpret_cast<void*>(mcontext.pc);
+ state->sp = reinterpret_cast<void*>(mcontext.sp);
+ // FP is an alias for x29.
+ state->fp = reinterpret_cast<void*>(mcontext.regs[29]);
+#elif V8_HOST_ARCH_MIPS
+ state->pc = reinterpret_cast<void*>(mcontext.pc);
+ state->sp = reinterpret_cast<void*>(mcontext.gregs[29]);
+ state->fp = reinterpret_cast<void*>(mcontext.gregs[30]);
+#elif V8_HOST_ARCH_MIPS64
+ state->pc = reinterpret_cast<void*>(mcontext.pc);
+ state->sp = reinterpret_cast<void*>(mcontext.gregs[29]);
+ state->fp = reinterpret_cast<void*>(mcontext.gregs[30]);
+#elif V8_HOST_ARCH_PPC
+ state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.regs->nip);
+ state->sp =
+ reinterpret_cast<void*>(ucontext->uc_mcontext.regs->gpr[PT_R1]);
+ state->fp =
+ reinterpret_cast<void*>(ucontext->uc_mcontext.regs->gpr[PT_R31]);
+#elif V8_HOST_ARCH_S390
+#if V8_TARGET_ARCH_32_BIT
+ // 31-bit target will have bit 0 (MSB) of the PSW set to denote addressing
+ // mode. This bit needs to be masked out to resolve actual address.
+ state->pc =
+ reinterpret_cast<void*>(ucontext->uc_mcontext.psw.addr & 0x7FFFFFFF);
+#else
+ state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.psw.addr);
+#endif // V8_TARGET_ARCH_32_BIT
+ state->sp = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[15]);
+ state->fp = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[11]);
+#endif // V8_HOST_ARCH_*
+#elif V8_OS_MACOSX
+#if V8_HOST_ARCH_X64
+#if __DARWIN_UNIX03
+ state->pc = reinterpret_cast<void*>(mcontext->__ss.__rip);
+ state->sp = reinterpret_cast<void*>(mcontext->__ss.__rsp);
+ state->fp = reinterpret_cast<void*>(mcontext->__ss.__rbp);
+#else // !__DARWIN_UNIX03
+ state->pc = reinterpret_cast<void*>(mcontext->ss.rip);
+ state->sp = reinterpret_cast<void*>(mcontext->ss.rsp);
+ state->fp = reinterpret_cast<void*>(mcontext->ss.rbp);
+#endif // __DARWIN_UNIX03
+#elif V8_HOST_ARCH_IA32
+#if __DARWIN_UNIX03
+ state->pc = reinterpret_cast<void*>(mcontext->__ss.__eip);
+ state->sp = reinterpret_cast<void*>(mcontext->__ss.__esp);
+ state->fp = reinterpret_cast<void*>(mcontext->__ss.__ebp);
+#else // !__DARWIN_UNIX03
+ state->pc = reinterpret_cast<void*>(mcontext->ss.eip);
+ state->sp = reinterpret_cast<void*>(mcontext->ss.esp);
+ state->fp = reinterpret_cast<void*>(mcontext->ss.ebp);
+#endif // __DARWIN_UNIX03
+#endif // V8_HOST_ARCH_IA32
+#elif V8_OS_FREEBSD
+#if V8_HOST_ARCH_IA32
+ state->pc = reinterpret_cast<void*>(mcontext.mc_eip);
+ state->sp = reinterpret_cast<void*>(mcontext.mc_esp);
+ state->fp = reinterpret_cast<void*>(mcontext.mc_ebp);
+#elif V8_HOST_ARCH_X64
+ state->pc = reinterpret_cast<void*>(mcontext.mc_rip);
+ state->sp = reinterpret_cast<void*>(mcontext.mc_rsp);
+ state->fp = reinterpret_cast<void*>(mcontext.mc_rbp);
+#elif V8_HOST_ARCH_ARM
+ state->pc = reinterpret_cast<void*>(mcontext.mc_r15);
+ state->sp = reinterpret_cast<void*>(mcontext.mc_r13);
+ state->fp = reinterpret_cast<void*>(mcontext.mc_r11);
+#endif // V8_HOST_ARCH_*
+#elif V8_OS_NETBSD
+#if V8_HOST_ARCH_IA32
+ state->pc = reinterpret_cast<void*>(mcontext.__gregs[_REG_EIP]);
+ state->sp = reinterpret_cast<void*>(mcontext.__gregs[_REG_ESP]);
+ state->fp = reinterpret_cast<void*>(mcontext.__gregs[_REG_EBP]);
+#elif V8_HOST_ARCH_X64
+ state->pc = reinterpret_cast<void*>(mcontext.__gregs[_REG_RIP]);
+ state->sp = reinterpret_cast<void*>(mcontext.__gregs[_REG_RSP]);
+ state->fp = reinterpret_cast<void*>(mcontext.__gregs[_REG_RBP]);
+#endif // V8_HOST_ARCH_*
+#elif V8_OS_OPENBSD
+#if V8_HOST_ARCH_IA32
+ state->pc = reinterpret_cast<void*>(ucontext->sc_eip);
+ state->sp = reinterpret_cast<void*>(ucontext->sc_esp);
+ state->fp = reinterpret_cast<void*>(ucontext->sc_ebp);
+#elif V8_HOST_ARCH_X64
+ state->pc = reinterpret_cast<void*>(ucontext->sc_rip);
+ state->sp = reinterpret_cast<void*>(ucontext->sc_rsp);
+ state->fp = reinterpret_cast<void*>(ucontext->sc_rbp);
+#endif // V8_HOST_ARCH_*
+#elif V8_OS_SOLARIS
+ state->pc = reinterpret_cast<void*>(mcontext.gregs[REG_PC]);
+ state->sp = reinterpret_cast<void*>(mcontext.gregs[REG_SP]);
+ state->fp = reinterpret_cast<void*>(mcontext.gregs[REG_FP]);
+#elif V8_OS_QNX
+#if V8_HOST_ARCH_IA32
+ state->pc = reinterpret_cast<void*>(mcontext.cpu.eip);
+ state->sp = reinterpret_cast<void*>(mcontext.cpu.esp);
+ state->fp = reinterpret_cast<void*>(mcontext.cpu.ebp);
+#elif V8_HOST_ARCH_ARM
+ state->pc = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_PC]);
+ state->sp = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_SP]);
+ state->fp = reinterpret_cast<void*>(mcontext.cpu.gpr[ARM_REG_FP]);
+#endif // V8_HOST_ARCH_*
+#elif V8_OS_AIX
+ state->pc = reinterpret_cast<void*>(mcontext.jmp_context.iar);
+ state->sp = reinterpret_cast<void*>(mcontext.jmp_context.gpr[1]);
+ state->fp = reinterpret_cast<void*>(mcontext.jmp_context.gpr[31]);
+#endif // V8_OS_AIX
+}
+
+#endif // USE_SIGNALS
+
+
+void Sampler::SetUp() {
+#if defined(USE_SIGNALS)
+ SignalHandler::SetUp();
+#endif
+}
+
+
+void Sampler::TearDown() {
+#if defined(USE_SIGNALS)
+ SignalHandler::TearDown();
+#endif
+}
+
+Sampler::Sampler(Isolate* isolate)
+ : is_counting_samples_(false),
+ js_sample_count_(0),
+ external_sample_count_(0),
+ isolate_(isolate),
+ profiling_(false),
+ has_processing_thread_(false),
+ active_(false),
+ registered_(false) {
+ data_ = new PlatformData;
+}
+
+Sampler::~Sampler() {
+ DCHECK(!IsActive());
+#if defined(USE_SIGNALS)
+ if (IsRegistered()) {
+ SamplerManager::instance()->RemoveSampler(this);
+ }
+#endif
+ delete data_;
+}
+
+void Sampler::Start() {
+ DCHECK(!IsActive());
+ SetActive(true);
+#if defined(USE_SIGNALS)
+ SamplerManager::instance()->AddSampler(this);
+#endif
+}
+
+
+void Sampler::Stop() {
+#if defined(USE_SIGNALS)
+ SamplerManager::instance()->RemoveSampler(this);
+#endif
+ DCHECK(IsActive());
+ SetActive(false);
+ SetRegistered(false);
+}
+
+
+void Sampler::IncreaseProfilingDepth() {
+ base::NoBarrier_AtomicIncrement(&profiling_, 1);
+#if defined(USE_SIGNALS)
+ SignalHandler::IncreaseSamplerCount();
+#endif
+}
+
+
+void Sampler::DecreaseProfilingDepth() {
+#if defined(USE_SIGNALS)
+ SignalHandler::DecreaseSamplerCount();
+#endif
+ base::NoBarrier_AtomicIncrement(&profiling_, -1);
+}
+
+
+#if defined(USE_SIGNALS)
+
+void Sampler::DoSample() {
+ if (!SignalHandler::Installed()) return;
+ if (!IsActive() && !IsRegistered()) {
+ SamplerManager::instance()->AddSampler(this);
+ SetRegistered(true);
+ }
+ pthread_kill(platform_data()->vm_tid(), SIGPROF);
+}
+
+#elif V8_OS_WIN || V8_OS_CYGWIN
+
+void Sampler::DoSample() {
+ HANDLE profiled_thread = platform_data()->profiled_thread();
+ if (profiled_thread == nullptr) return;
+
+ const DWORD kSuspendFailed = static_cast<DWORD>(-1);
+ if (SuspendThread(profiled_thread) == kSuspendFailed) return;
+
+ // Context used for sampling the register state of the profiled thread.
+ CONTEXT context;
+ memset(&context, 0, sizeof(context));
+ context.ContextFlags = CONTEXT_FULL;
+ if (GetThreadContext(profiled_thread, &context) != 0) {
+ v8::RegisterState state;
+#if V8_HOST_ARCH_X64
+ state.pc = reinterpret_cast<void*>(context.Rip);
+ state.sp = reinterpret_cast<void*>(context.Rsp);
+ state.fp = reinterpret_cast<void*>(context.Rbp);
+#else
+ state.pc = reinterpret_cast<void*>(context.Eip);
+ state.sp = reinterpret_cast<void*>(context.Esp);
+ state.fp = reinterpret_cast<void*>(context.Ebp);
+#endif
+ SampleStack(state);
+ }
+ ResumeThread(profiled_thread);
+}
+
+#endif // USE_SIGNALS
+
+} // namespace sampler
+} // namespace v8
diff --git a/deps/v8/src/libsampler/sampler.h b/deps/v8/src/libsampler/sampler.h
new file mode 100644
index 0000000000..7ae3c8c476
--- /dev/null
+++ b/deps/v8/src/libsampler/sampler.h
@@ -0,0 +1,103 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LIBSAMPLER_SAMPLER_H_
+#define V8_LIBSAMPLER_SAMPLER_H_
+
+#include "include/v8.h"
+
+#include "src/base/atomicops.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace sampler {
+
+// ----------------------------------------------------------------------------
+// Sampler
+//
+// A sampler periodically samples the state of the VM and optionally
+// (if used for profiling) the program counter and stack pointer for
+// the thread that created it.
+
+class Sampler {
+ public:
+ static const int kMaxFramesCountLog2 = 8;
+ static const unsigned kMaxFramesCount = (1u << kMaxFramesCountLog2) - 1;
+
+ // Initializes the Sampler support. Called once at VM startup.
+ static void SetUp();
+ static void TearDown();
+
+ // Initialize sampler.
+ explicit Sampler(Isolate* isolate);
+ virtual ~Sampler();
+
+ Isolate* isolate() const { return isolate_; }
+
+ // Performs stack sampling.
+ // Clients should override this method in order to do something on samples,
+ // for example buffer samples in a queue.
+ virtual void SampleStack(const v8::RegisterState& regs) = 0;
+
+ // Start and stop sampler.
+ void Start();
+ void Stop();
+
+ // Whether the sampling thread should use this Sampler for CPU profiling?
+ bool IsProfiling() const {
+ return base::NoBarrier_Load(&profiling_) > 0 &&
+ !base::NoBarrier_Load(&has_processing_thread_);
+ }
+ void IncreaseProfilingDepth();
+ void DecreaseProfilingDepth();
+
+ // Whether the sampler is running (that is, consumes resources).
+ bool IsActive() const { return base::NoBarrier_Load(&active_); }
+
+ // CpuProfiler collects samples by calling DoSample directly
+ // without calling Start. To keep it working, we register the sampler
+ // with the CpuProfiler.
+ bool IsRegistered() const { return base::NoBarrier_Load(&registered_); }
+
+ void DoSample();
+
+ void SetHasProcessingThread(bool value) {
+ base::NoBarrier_Store(&has_processing_thread_, value);
+ }
+
+ // Used in tests to make sure that stack sampling is performed.
+ unsigned js_sample_count() const { return js_sample_count_; }
+ unsigned external_sample_count() const { return external_sample_count_; }
+ void StartCountingSamples() {
+ js_sample_count_ = 0;
+ external_sample_count_ = 0;
+ is_counting_samples_ = true;
+ }
+
+ class PlatformData;
+ PlatformData* platform_data() const { return data_; }
+
+ protected:
+ // Counts stack samples taken in various VM states.
+ bool is_counting_samples_;
+ unsigned js_sample_count_;
+ unsigned external_sample_count_;
+
+ private:
+ void SetActive(bool value) { base::NoBarrier_Store(&active_, value); }
+ void SetRegistered(bool value) { base::NoBarrier_Store(&registered_, value); }
+
+ Isolate* isolate_;
+ base::Atomic32 profiling_;
+ base::Atomic32 has_processing_thread_;
+ base::Atomic32 active_;
+ base::Atomic32 registered_;
+ PlatformData* data_; // Platform specific data.
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
+};
+
+} // namespace sampler
+} // namespace v8
+
+#endif // V8_LIBSAMPLER_SAMPLER_H_
diff --git a/deps/v8/src/locked-queue-inl.h b/deps/v8/src/locked-queue-inl.h
index 8b3e9d02bb..eb18f649ae 100644
--- a/deps/v8/src/locked-queue-inl.h
+++ b/deps/v8/src/locked-queue-inl.h
@@ -5,7 +5,7 @@
#ifndef V8_LOCKED_QUEUE_INL_
#define V8_LOCKED_QUEUE_INL_
-#include "src/atomic-utils.h"
+#include "src/base/atomic-utils.h"
#include "src/locked-queue.h"
namespace v8 {
@@ -15,7 +15,7 @@ template <typename Record>
struct LockedQueue<Record>::Node : Malloced {
Node() : next(nullptr) {}
Record value;
- AtomicValue<Node*> next;
+ base::AtomicValue<Node*> next;
};
diff --git a/deps/v8/src/log-inl.h b/deps/v8/src/log-inl.h
index 765398fdd7..0eb18e0525 100644
--- a/deps/v8/src/log-inl.h
+++ b/deps/v8/src/log-inl.h
@@ -13,25 +13,24 @@
namespace v8 {
namespace internal {
-Logger::LogEventsAndTags Logger::ToNativeByScript(Logger::LogEventsAndTags tag,
- Script* script) {
- if ((tag == FUNCTION_TAG || tag == LAZY_COMPILE_TAG || tag == SCRIPT_TAG) &&
- script->type() == Script::TYPE_NATIVE) {
- switch (tag) {
- case FUNCTION_TAG: return NATIVE_FUNCTION_TAG;
- case LAZY_COMPILE_TAG: return NATIVE_LAZY_COMPILE_TAG;
- case SCRIPT_TAG: return NATIVE_SCRIPT_TAG;
- default: return tag;
- }
- } else {
- return tag;
+CodeEventListener::LogEventsAndTags Logger::ToNativeByScript(
+ CodeEventListener::LogEventsAndTags tag, Script* script) {
+ if (script->type() != Script::TYPE_NATIVE) return tag;
+ switch (tag) {
+ case CodeEventListener::FUNCTION_TAG:
+ return CodeEventListener::NATIVE_FUNCTION_TAG;
+ case CodeEventListener::LAZY_COMPILE_TAG:
+ return CodeEventListener::NATIVE_LAZY_COMPILE_TAG;
+ case CodeEventListener::SCRIPT_TAG:
+ return CodeEventListener::NATIVE_SCRIPT_TAG;
+ default:
+ return tag;
}
}
-
void Logger::CallEventLogger(Isolate* isolate, const char* name, StartEnd se,
bool expose_to_api) {
- if (isolate->event_logger() != NULL) {
+ if (isolate->event_logger()) {
if (isolate->event_logger() == DefaultEventLoggerSentinel) {
LOG(isolate, TimerEvent(se, name));
} else if (expose_to_api) {
@@ -39,6 +38,13 @@ void Logger::CallEventLogger(Isolate* isolate, const char* name, StartEnd se,
}
}
}
+
+template <class TimerEvent>
+void TimerEventScope<TimerEvent>::LogTimerEvent(Logger::StartEnd se) {
+ Logger::CallEventLogger(isolate_, TimerEvent::name(), se,
+ TimerEvent::expose_to_api());
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc
index ff9af685d1..22972ec055 100644
--- a/deps/v8/src/log-utils.cc
+++ b/deps/v8/src/log-utils.cc
@@ -164,16 +164,14 @@ void Log::MessageBuilder::Append(String* str) {
}
}
-
void Log::MessageBuilder::AppendAddress(Address addr) {
- Append("0x%" V8PRIxPTR, addr);
+ Append("%p", static_cast<void*>(addr));
}
-
void Log::MessageBuilder::AppendSymbolName(Symbol* symbol) {
DCHECK(symbol);
Append("symbol(");
- if (!symbol->name()->IsUndefined()) {
+ if (!symbol->name()->IsUndefined(symbol->GetIsolate())) {
Append("\"");
AppendDetailed(String::cast(symbol->name()), false);
Append("\" ");
diff --git a/deps/v8/src/log-utils.h b/deps/v8/src/log-utils.h
index 3e70a96c8f..059e5a53c5 100644
--- a/deps/v8/src/log-utils.h
+++ b/deps/v8/src/log-utils.h
@@ -10,6 +10,7 @@
#include <cstdarg>
#include "src/allocation.h"
+#include "src/base/compiler-specific.h"
#include "src/base/platform/mutex.h"
#include "src/flags.h"
@@ -62,10 +63,10 @@ class Log {
~MessageBuilder() { }
// Append string data to the log message.
- void Append(const char* format, ...);
+ void PRINTF_FORMAT(2, 3) Append(const char* format, ...);
// Append string data to the log message.
- void AppendVA(const char* format, va_list args);
+ void PRINTF_FORMAT(2, 0) AppendVA(const char* format, va_list args);
// Append a character to the log message.
void Append(const char c);
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index da38d3e7f3..fc7fcb9ced 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -5,49 +5,38 @@
#include "src/log.h"
#include <cstdarg>
+#include <memory>
#include <sstream>
#include "src/bailout-reason.h"
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
+#include "src/counters.h"
#include "src/deoptimizer.h"
#include "src/global-handles.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter.h"
+#include "src/libsampler/sampler.h"
#include "src/log-inl.h"
#include "src/log-utils.h"
#include "src/macro-assembler.h"
#include "src/perf-jit.h"
-#include "src/profiler/cpu-profiler.h"
+#include "src/profiler/profiler-listener.h"
+#include "src/profiler/tick-sample.h"
#include "src/runtime-profiler.h"
+#include "src/source-position-table.h"
#include "src/string-stream.h"
#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
-
#define DECLARE_EVENT(ignore1, name) name,
-static const char* const kLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
- LOG_EVENTS_AND_TAGS_LIST(DECLARE_EVENT)
-};
+static const char* kLogEventsNames[CodeEventListener::NUMBER_OF_LOG_EVENTS] = {
+ LOG_EVENTS_AND_TAGS_LIST(DECLARE_EVENT)};
#undef DECLARE_EVENT
-
-#define CALL_LISTENERS(Call) \
-for (int i = 0; i < listeners_.length(); ++i) { \
- listeners_[i]->Call; \
-}
-
-#define PROFILER_LOG(Call) \
- do { \
- CpuProfiler* cpu_profiler = isolate_->cpu_profiler(); \
- if (cpu_profiler->is_profiling()) { \
- cpu_profiler->Call; \
- } \
- } while (false);
-
static const char* ComputeMarker(SharedFunctionInfo* shared,
AbstractCode* code) {
switch (code->kind()) {
@@ -70,7 +59,7 @@ class CodeEventLogger::NameBuffer {
utf8_pos_ = 0;
}
- void Init(Logger::LogEventsAndTags tag) {
+ void Init(CodeEventListener::LogEventsAndTags tag) {
Reset();
AppendBytes(kLogEventsNames[tag]);
AppendByte(':');
@@ -82,7 +71,7 @@ class CodeEventLogger::NameBuffer {
} else {
Symbol* symbol = Symbol::cast(name);
AppendBytes("symbol(");
- if (!symbol->name()->IsUndefined()) {
+ if (!symbol->name()->IsUndefined(symbol->GetIsolate())) {
AppendBytes("\"");
AppendString(String::cast(symbol->name()));
AppendBytes("\" ");
@@ -164,34 +153,32 @@ CodeEventLogger::CodeEventLogger() : name_buffer_(new NameBuffer) { }
CodeEventLogger::~CodeEventLogger() { delete name_buffer_; }
-void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
+void CodeEventLogger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
AbstractCode* code, const char* comment) {
name_buffer_->Init(tag);
name_buffer_->AppendBytes(comment);
LogRecordedBuffer(code, NULL, name_buffer_->get(), name_buffer_->size());
}
-void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
+void CodeEventLogger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
AbstractCode* code, Name* name) {
name_buffer_->Init(tag);
name_buffer_->AppendName(name);
LogRecordedBuffer(code, NULL, name_buffer_->get(), name_buffer_->size());
}
-void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
+void CodeEventLogger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
AbstractCode* code,
- SharedFunctionInfo* shared,
- CompilationInfo* info, Name* name) {
+ SharedFunctionInfo* shared, Name* name) {
name_buffer_->Init(tag);
name_buffer_->AppendBytes(ComputeMarker(shared, code));
name_buffer_->AppendName(name);
LogRecordedBuffer(code, shared, name_buffer_->get(), name_buffer_->size());
}
-void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
+void CodeEventLogger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
AbstractCode* code,
- SharedFunctionInfo* shared,
- CompilationInfo* info, Name* source,
+ SharedFunctionInfo* shared, Name* source,
int line, int column) {
name_buffer_->Init(tag);
name_buffer_->AppendBytes(ComputeMarker(shared, code));
@@ -209,7 +196,7 @@ void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
LogRecordedBuffer(code, shared, name_buffer_->get(), name_buffer_->size());
}
-void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
+void CodeEventLogger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
AbstractCode* code, int args_count) {
name_buffer_->Init(tag);
name_buffer_->AppendInt(args_count);
@@ -218,7 +205,7 @@ void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag,
void CodeEventLogger::RegExpCodeCreateEvent(AbstractCode* code,
String* source) {
- name_buffer_->Init(Logger::REG_EXP_TAG);
+ name_buffer_->Init(CodeEventListener::REG_EXP_TAG);
name_buffer_->AppendString(source);
LogRecordedBuffer(code, NULL, name_buffer_->get(), name_buffer_->size());
}
@@ -280,12 +267,17 @@ void PerfBasicLogger::LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo*,
return;
}
- base::OS::FPrint(perf_output_handle_, "%llx %x %.*s\n",
- reinterpret_cast<uint64_t>(code->instruction_start()),
+ // Linux perf expects hex literals without a leading 0x, while some
+ // implementations of printf might prepend one when using the %p format
+ // for pointers, leading to wrongly formatted JIT symbols maps.
+ //
+ // Instead, we use V8PRIxPTR format string and cast pointer to uintpr_t,
+ // so that we have control over the exact output format.
+ base::OS::FPrint(perf_output_handle_, "%" V8PRIxPTR " %x %.*s\n",
+ reinterpret_cast<uintptr_t>(code->instruction_start()),
code->instruction_size(), length, name);
}
-
// Low-level logging support.
#define LL_LOG(Call) if (ll_logger_) ll_logger_->Call;
@@ -424,10 +416,6 @@ void LowLevelLogger::CodeMovingGCEvent() {
LogWriteBytes(&tag, sizeof(tag));
}
-
-#define JIT_LOG(Call) if (jit_logger_) jit_logger_->Call;
-
-
class JitLogger : public CodeEventLogger {
public:
explicit JitLogger(JitCodeEventHandler code_event_handler);
@@ -528,6 +516,31 @@ void JitLogger::EndCodePosInfoEvent(AbstractCode* code,
}
+// TODO(lpy): Keeping sampling thread inside V8 is a workaround currently,
+// the reason is to reduce code duplication during migration to sampler library,
+// sampling thread, as well as the sampler, will be moved to D8 eventually.
+class SamplingThread : public base::Thread {
+ public:
+ static const int kSamplingThreadStackSize = 64 * KB;
+
+ SamplingThread(sampler::Sampler* sampler, int interval)
+ : base::Thread(base::Thread::Options("SamplingThread",
+ kSamplingThreadStackSize)),
+ sampler_(sampler),
+ interval_(interval) {}
+ void Run() override {
+ while (sampler_->IsProfiling()) {
+ sampler_->DoSample();
+ base::OS::Sleep(base::TimeDelta::FromMilliseconds(interval_));
+ }
+ }
+
+ private:
+ sampler::Sampler* sampler_;
+ const int interval_;
+};
+
+
// The Profiler samples pc and sp values for the main thread.
// Each sample is appended to a circular buffer.
// An independent thread removes data and writes it to the log.
@@ -540,7 +553,7 @@ class Profiler: public base::Thread {
void Disengage();
// Inserts collected profiling data into buffer.
- void Insert(TickSample* sample) {
+ void Insert(v8::TickSample* sample) {
if (paused_)
return;
@@ -561,7 +574,7 @@ class Profiler: public base::Thread {
private:
// Waits for a signal and removes profiling data.
- bool Remove(TickSample* sample) {
+ bool Remove(v8::TickSample* sample) {
buffer_semaphore_.Wait(); // Wait for an element.
*sample = buffer_[base::NoBarrier_Load(&tail_)];
bool result = overflow_;
@@ -578,7 +591,7 @@ class Profiler: public base::Thread {
// Cyclic buffer for communicating profiling samples
// between the signal handler and the worker thread.
static const int kBufferSize = 128;
- TickSample buffer_[kBufferSize]; // Buffer storage.
+ v8::TickSample buffer_[kBufferSize]; // Buffer storage.
int head_; // Index to the buffer head.
base::Atomic32 tail_; // Index to the buffer tail.
bool overflow_; // Tell whether a buffer overflow has occurred.
@@ -600,33 +613,44 @@ class Profiler: public base::Thread {
// Ticker used to provide ticks to the profiler and the sliding state
// window.
//
-class Ticker: public Sampler {
+class Ticker: public sampler::Sampler {
public:
- Ticker(Isolate* isolate, int interval):
- Sampler(isolate, interval),
- profiler_(NULL) {}
-
- ~Ticker() { if (IsActive()) Stop(); }
+ Ticker(Isolate* isolate, int interval)
+ : sampler::Sampler(reinterpret_cast<v8::Isolate*>(isolate)),
+ profiler_(nullptr),
+ sampling_thread_(new SamplingThread(this, interval)) {}
- virtual void Tick(TickSample* sample) {
- if (profiler_) profiler_->Insert(sample);
+ ~Ticker() {
+ if (IsActive()) Stop();
+ delete sampling_thread_;
}
void SetProfiler(Profiler* profiler) {
- DCHECK(profiler_ == NULL);
+ DCHECK(profiler_ == nullptr);
profiler_ = profiler;
IncreaseProfilingDepth();
if (!IsActive()) Start();
+ sampling_thread_->StartSynchronously();
}
void ClearProfiler() {
- profiler_ = NULL;
+ profiler_ = nullptr;
if (IsActive()) Stop();
DecreaseProfilingDepth();
+ sampling_thread_->Join();
+ }
+
+ void SampleStack(const v8::RegisterState& state) override {
+ if (!profiler_) return;
+ Isolate* isolate = reinterpret_cast<Isolate*>(this->isolate());
+ TickSample sample;
+ sample.Init(isolate, state, TickSample::kIncludeCEntryFrame, true);
+ profiler_->Insert(&sample);
}
private:
Profiler* profiler_;
+ SamplingThread* sampling_thread_;
};
@@ -653,8 +677,9 @@ void Profiler::Engage() {
std::vector<base::OS::SharedLibraryAddress> addresses =
base::OS::GetSharedLibraryAddresses();
for (size_t i = 0; i < addresses.size(); ++i) {
- LOG(isolate_, SharedLibraryEvent(
- addresses[i].library_path, addresses[i].start, addresses[i].end));
+ LOG(isolate_,
+ SharedLibraryEvent(addresses[i].library_path, addresses[i].start,
+ addresses[i].end, addresses[i].aslr_slide));
}
// Start thread processing the profiler buffer.
@@ -679,7 +704,7 @@ void Profiler::Disengage() {
// inserting a fake element in the queue and then wait for
// the thread to terminate.
base::NoBarrier_Store(&running_, 0);
- TickSample sample;
+ v8::TickSample sample;
// Reset 'paused_' flag, otherwise semaphore may not be signalled.
resume();
Insert(&sample);
@@ -690,7 +715,7 @@ void Profiler::Disengage() {
void Profiler::Run() {
- TickSample sample;
+ v8::TickSample sample;
bool overflow = Remove(&sample);
while (base::NoBarrier_Load(&running_)) {
LOG(isolate_, TickEvent(&sample, overflow));
@@ -721,24 +746,16 @@ Logger::~Logger() {
delete log_;
}
-
void Logger::addCodeEventListener(CodeEventListener* listener) {
- DCHECK(!hasCodeEventListener(listener));
- listeners_.Add(listener);
+ bool result = isolate_->code_event_dispatcher()->AddListener(listener);
+ USE(result);
+ DCHECK(result);
}
-
void Logger::removeCodeEventListener(CodeEventListener* listener) {
- DCHECK(hasCodeEventListener(listener));
- listeners_.RemoveElement(listener);
+ isolate_->code_event_dispatcher()->RemoveListener(listener);
}
-
-bool Logger::hasCodeEventListener(CodeEventListener* listener) {
- return listeners_.Contains(listener);
-}
-
-
void Logger::ProfilerBeginEvent() {
if (!log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
@@ -781,7 +798,7 @@ void Logger::UncheckedIntEvent(const char* name, int value) {
void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) {
if (!log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
- msg.Append("%s,%" V8_PTR_PREFIX "d", name, value);
+ msg.Append("%s,%" V8PRIdPTR, name, value);
msg.WriteToLogFile();
}
@@ -789,7 +806,7 @@ void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) {
void Logger::HandleEvent(const char* name, Object** location) {
if (!log_->IsEnabled() || !FLAG_log_handles) return;
Log::MessageBuilder msg(log_);
- msg.Append("%s,0x%" V8PRIxPTR, name, location);
+ msg.Append("%s,%p", name, static_cast<void*>(location));
msg.WriteToLogFile();
}
@@ -813,24 +830,23 @@ void Logger::ApiSecurityCheck() {
ApiEvent("api,check-security");
}
-
void Logger::SharedLibraryEvent(const std::string& library_path,
- uintptr_t start,
- uintptr_t end) {
+ uintptr_t start, uintptr_t end,
+ intptr_t aslr_slide) {
if (!log_->IsEnabled() || !FLAG_prof_cpp) return;
Log::MessageBuilder msg(log_);
- msg.Append("shared-library,\"%s\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR,
- library_path.c_str(), start, end);
+ msg.Append("shared-library,\"%s\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR
+ ",%" V8PRIdPTR,
+ library_path.c_str(), start, end, aslr_slide);
msg.WriteToLogFile();
}
void Logger::CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta) {
- PROFILER_LOG(CodeDeoptEvent(code, pc, fp_to_sp_delta));
if (!log_->IsEnabled() || !FLAG_log_internal_timer_events) return;
Log::MessageBuilder msg(log_);
int since_epoch = static_cast<int>(timer_.Elapsed().InMicroseconds());
- msg.Append("code-deopt,%ld,%d", since_epoch, code->CodeSize());
+ msg.Append("code-deopt,%d,%d", since_epoch, code->CodeSize());
msg.WriteToLogFile();
}
@@ -840,7 +856,7 @@ void Logger::CurrentTimeEvent() {
DCHECK(FLAG_log_timer_events || FLAG_prof_cpp);
Log::MessageBuilder msg(log_);
int since_epoch = static_cast<int>(timer_.Elapsed().InMicroseconds());
- msg.Append("current-time,%ld", since_epoch);
+ msg.Append("current-time,%d", since_epoch);
msg.WriteToLogFile();
}
@@ -870,14 +886,6 @@ void Logger::LeaveExternal(Isolate* isolate) {
isolate->set_current_vm_state(JS);
}
-
-template <class TimerEvent>
-void TimerEventScope<TimerEvent>::LogTimerEvent(Logger::StartEnd se) {
- Logger::CallEventLogger(isolate_, TimerEvent::name(), se,
- TimerEvent::expose_to_api());
-}
-
-
// Instantiate template methods.
#define V(TimerName, expose) \
template void TimerEventScope<TimerEvent##TimerName>::LogTimerEvent( \
@@ -914,19 +922,19 @@ void LogRegExpSource(Handle<JSRegExp> regexp, Isolate* isolate,
// global flag
Handle<Object> global =
JSReceiver::GetProperty(isolate, regexp, "global").ToHandleChecked();
- if (global->IsTrue()) {
+ if (global->IsTrue(isolate)) {
msg->Append('g');
}
// ignorecase flag
Handle<Object> ignorecase =
JSReceiver::GetProperty(isolate, regexp, "ignoreCase").ToHandleChecked();
- if (ignorecase->IsTrue()) {
+ if (ignorecase->IsTrue(isolate)) {
msg->Append('i');
}
// multiline flag
Handle<Object> multiline =
JSReceiver::GetProperty(isolate, regexp, "multiline").ToHandleChecked();
- if (multiline->IsTrue()) {
+ if (multiline->IsTrue(isolate)) {
msg->Append('m');
}
}
@@ -949,20 +957,20 @@ void Logger::ApiNamedPropertyAccess(const char* tag,
DCHECK(name->IsName());
if (!log_->IsEnabled() || !FLAG_log_api) return;
String* class_name_obj = holder->class_name();
- base::SmartArrayPointer<char> class_name =
+ std::unique_ptr<char[]> class_name =
class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
if (name->IsString()) {
- base::SmartArrayPointer<char> property_name =
+ std::unique_ptr<char[]> property_name =
String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
ApiEvent("api,%s,\"%s\",\"%s\"", tag, class_name.get(),
property_name.get());
} else {
Symbol* symbol = Symbol::cast(name);
uint32_t hash = symbol->Hash();
- if (symbol->name()->IsUndefined()) {
+ if (symbol->name()->IsUndefined(symbol->GetIsolate())) {
ApiEvent("api,%s,\"%s\",symbol(hash %x)", tag, class_name.get(), hash);
} else {
- base::SmartArrayPointer<char> str =
+ std::unique_ptr<char[]> str =
String::cast(symbol->name())
->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
ApiEvent("api,%s,\"%s\",symbol(\"%s\" hash %x)", tag, class_name.get(),
@@ -976,7 +984,7 @@ void Logger::ApiIndexedPropertyAccess(const char* tag,
uint32_t index) {
if (!log_->IsEnabled() || !FLAG_log_api) return;
String* class_name_obj = holder->class_name();
- base::SmartArrayPointer<char> class_name =
+ std::unique_ptr<char[]> class_name =
class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
ApiEvent("api,%s,\"%s\",%u", tag, class_name.get(), index);
}
@@ -985,7 +993,7 @@ void Logger::ApiIndexedPropertyAccess(const char* tag,
void Logger::ApiObjectAccess(const char* tag, JSObject* object) {
if (!log_->IsEnabled() || !FLAG_log_api) return;
String* class_name_obj = object->class_name();
- base::SmartArrayPointer<char> class_name =
+ std::unique_ptr<char[]> class_name =
class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
ApiEvent("api,%s,\"%s\"", tag, class_name.get());
}
@@ -1000,8 +1008,7 @@ void Logger::ApiEntryCall(const char* name) {
void Logger::NewEvent(const char* name, void* object, size_t size) {
if (!log_->IsEnabled() || !FLAG_log) return;
Log::MessageBuilder msg(log_);
- msg.Append("new,%s,0x%" V8PRIxPTR ",%u", name, object,
- static_cast<unsigned int>(size));
+ msg.Append("new,%s,%p,%u", name, object, static_cast<unsigned int>(size));
msg.WriteToLogFile();
}
@@ -1009,7 +1016,7 @@ void Logger::NewEvent(const char* name, void* object, size_t size) {
void Logger::DeleteEvent(const char* name, void* object) {
if (!log_->IsEnabled() || !FLAG_log) return;
Log::MessageBuilder msg(log_);
- msg.Append("delete,%s,0x%" V8PRIxPTR, name, object);
+ msg.Append("delete,%s,%p", name, object);
msg.WriteToLogFile();
}
@@ -1019,22 +1026,22 @@ void Logger::CallbackEventInternal(const char* prefix, Name* name,
if (!FLAG_log_code || !log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
msg.Append("%s,%s,-2,",
- kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[CALLBACK_TAG]);
+ kLogEventsNames[CodeEventListener::CODE_CREATION_EVENT],
+ kLogEventsNames[CodeEventListener::CALLBACK_TAG]);
msg.AppendAddress(entry_point);
if (name->IsString()) {
- base::SmartArrayPointer<char> str =
+ std::unique_ptr<char[]> str =
String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append(",1,\"%s%s\"", prefix, str.get());
} else {
Symbol* symbol = Symbol::cast(name);
- if (symbol->name()->IsUndefined()) {
- msg.Append(",1,symbol(hash %x)", prefix, symbol->Hash());
+ if (symbol->name()->IsUndefined(symbol->GetIsolate())) {
+ msg.Append(",1,symbol(hash %x)", symbol->Hash());
} else {
- base::SmartArrayPointer<char> str =
+ std::unique_ptr<char[]> str =
String::cast(symbol->name())
->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append(",1,symbol(\"%s\" hash %x)", prefix, str.get(),
+ msg.Append(",1,symbol(\"%s%s\" hash %x)", prefix, str.get(),
symbol->Hash());
}
}
@@ -1043,41 +1050,33 @@ void Logger::CallbackEventInternal(const char* prefix, Name* name,
void Logger::CallbackEvent(Name* name, Address entry_point) {
- PROFILER_LOG(CallbackEvent(name, entry_point));
CallbackEventInternal("", name, entry_point);
}
void Logger::GetterCallbackEvent(Name* name, Address entry_point) {
- PROFILER_LOG(GetterCallbackEvent(name, entry_point));
CallbackEventInternal("get ", name, entry_point);
}
void Logger::SetterCallbackEvent(Name* name, Address entry_point) {
- PROFILER_LOG(SetterCallbackEvent(name, entry_point));
CallbackEventInternal("set ", name, entry_point);
}
static void AppendCodeCreateHeader(Log::MessageBuilder* msg,
- Logger::LogEventsAndTags tag,
+ CodeEventListener::LogEventsAndTags tag,
AbstractCode* code) {
DCHECK(msg);
msg->Append("%s,%s,%d,",
- kLogEventsNames[Logger::CODE_CREATION_EVENT],
- kLogEventsNames[tag],
- code->kind());
+ kLogEventsNames[CodeEventListener::CODE_CREATION_EVENT],
+ kLogEventsNames[tag], code->kind());
msg->AppendAddress(code->address());
msg->Append(",%d,", code->ExecutableSize());
}
-void Logger::CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
- const char* comment) {
- PROFILER_LOG(CodeCreateEvent(tag, code, comment));
-
+void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ AbstractCode* code, const char* comment) {
if (!is_logging_code_events()) return;
- CALL_LISTENERS(CodeCreateEvent(tag, code, comment));
-
if (!FLAG_log_code || !log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
AppendCodeCreateHeader(&msg, tag, code);
@@ -1085,13 +1084,9 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
msg.WriteToLogFile();
}
-void Logger::CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
- Name* name) {
- PROFILER_LOG(CodeCreateEvent(tag, code, name));
-
+void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ AbstractCode* code, Name* name) {
if (!is_logging_code_events()) return;
- CALL_LISTENERS(CodeCreateEvent(tag, code, name));
-
if (!FLAG_log_code || !log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
AppendCodeCreateHeader(&msg, tag, code);
@@ -1105,14 +1100,10 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
msg.WriteToLogFile();
}
-void Logger::CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
- SharedFunctionInfo* shared, CompilationInfo* info,
+void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ AbstractCode* code, SharedFunctionInfo* shared,
Name* name) {
- PROFILER_LOG(CodeCreateEvent(tag, code, shared, info, name));
-
if (!is_logging_code_events()) return;
- CALL_LISTENERS(CodeCreateEvent(tag, code, shared, info, name));
-
if (!FLAG_log_code || !log_->IsEnabled()) return;
if (code == AbstractCode::cast(
isolate_->builtins()->builtin(Builtins::kCompileLazy))) {
@@ -1122,7 +1113,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
Log::MessageBuilder msg(log_);
AppendCodeCreateHeader(&msg, tag, code);
if (name->IsString()) {
- base::SmartArrayPointer<char> str =
+ std::unique_ptr<char[]> str =
String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append("\"%s\"", str.get());
} else {
@@ -1138,23 +1129,18 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
// Although, it is possible to extract source and line from
// the SharedFunctionInfo object, we left it to caller
// to leave logging functions free from heap allocations.
-void Logger::CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
- SharedFunctionInfo* shared, CompilationInfo* info,
+void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ AbstractCode* code, SharedFunctionInfo* shared,
Name* source, int line, int column) {
- PROFILER_LOG(CodeCreateEvent(tag, code, shared, info, source, line, column));
-
if (!is_logging_code_events()) return;
- CALL_LISTENERS(CodeCreateEvent(tag, code, shared, info, source, line,
- column));
-
if (!FLAG_log_code || !log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
AppendCodeCreateHeader(&msg, tag, code);
- base::SmartArrayPointer<char> name =
+ std::unique_ptr<char[]> name =
shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append("\"%s ", name.get());
if (source->IsString()) {
- base::SmartArrayPointer<char> sourcestr = String::cast(source)->ToCString(
+ std::unique_ptr<char[]> sourcestr = String::cast(source)->ToCString(
DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append("%s", sourcestr.get());
} else {
@@ -1166,13 +1152,9 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
msg.WriteToLogFile();
}
-void Logger::CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
- int args_count) {
- PROFILER_LOG(CodeCreateEvent(tag, code, args_count));
-
+void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ AbstractCode* code, int args_count) {
if (!is_logging_code_events()) return;
- CALL_LISTENERS(CodeCreateEvent(tag, code, args_count));
-
if (!FLAG_log_code || !log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
AppendCodeCreateHeader(&msg, tag, code);
@@ -1182,15 +1164,11 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
void Logger::CodeDisableOptEvent(AbstractCode* code,
SharedFunctionInfo* shared) {
- PROFILER_LOG(CodeDisableOptEvent(code, shared));
-
if (!is_logging_code_events()) return;
- CALL_LISTENERS(CodeDisableOptEvent(code, shared));
-
if (!FLAG_log_code || !log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
- msg.Append("%s,", kLogEventsNames[CODE_DISABLE_OPT_EVENT]);
- base::SmartArrayPointer<char> name =
+ msg.Append("%s,", kLogEventsNames[CodeEventListener::CODE_DISABLE_OPT_EVENT]);
+ std::unique_ptr<char[]> name =
shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append("\"%s\",", name.get());
msg.Append("\"%s\"", GetBailoutReason(shared->disable_optimization_reason()));
@@ -1199,23 +1177,16 @@ void Logger::CodeDisableOptEvent(AbstractCode* code,
void Logger::CodeMovingGCEvent() {
- PROFILER_LOG(CodeMovingGCEvent());
-
if (!is_logging_code_events()) return;
if (!log_->IsEnabled() || !FLAG_ll_prof) return;
- CALL_LISTENERS(CodeMovingGCEvent());
base::OS::SignalCodeMovingGC();
}
void Logger::RegExpCodeCreateEvent(AbstractCode* code, String* source) {
- PROFILER_LOG(RegExpCodeCreateEvent(code, source));
-
if (!is_logging_code_events()) return;
- CALL_LISTENERS(RegExpCodeCreateEvent(code, source));
-
if (!FLAG_log_code || !log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
- AppendCodeCreateHeader(&msg, REG_EXP_TAG, code);
+ AppendCodeCreateHeader(&msg, CodeEventListener::REG_EXP_TAG, code);
msg.Append('"');
msg.AppendDetailed(source, false);
msg.Append('"');
@@ -1223,48 +1194,34 @@ void Logger::RegExpCodeCreateEvent(AbstractCode* code, String* source) {
}
void Logger::CodeMoveEvent(AbstractCode* from, Address to) {
- PROFILER_LOG(CodeMoveEvent(from, to));
-
if (!is_logging_code_events()) return;
- CALL_LISTENERS(CodeMoveEvent(from, to));
- MoveEventInternal(CODE_MOVE_EVENT, from->address(), to);
-}
-
-void Logger::CodeLinePosInfoAddPositionEvent(void* jit_handler_data,
- int pc_offset, int position) {
- JIT_LOG(AddCodeLinePosInfoEvent(jit_handler_data,
- pc_offset,
- position,
- JitCodeEvent::POSITION));
+ MoveEventInternal(CodeEventListener::CODE_MOVE_EVENT, from->address(), to);
}
-
-void Logger::CodeLinePosInfoAddStatementPositionEvent(void* jit_handler_data,
- int pc_offset,
- int position) {
- JIT_LOG(AddCodeLinePosInfoEvent(jit_handler_data,
- pc_offset,
- position,
- JitCodeEvent::STATEMENT_POSITION));
-}
-
-
-void Logger::CodeStartLinePosInfoRecordEvent(PositionsRecorder* pos_recorder) {
- if (jit_logger_ != NULL) {
- pos_recorder->AttachJITHandlerData(jit_logger_->StartCodePosInfoEvent());
+void Logger::CodeLinePosInfoRecordEvent(AbstractCode* code,
+ ByteArray* source_position_table) {
+ if (jit_logger_) {
+ void* jit_handler_data = jit_logger_->StartCodePosInfoEvent();
+ for (SourcePositionTableIterator iter(source_position_table); !iter.done();
+ iter.Advance()) {
+ if (iter.is_statement()) {
+ jit_logger_->AddCodeLinePosInfoEvent(
+ jit_handler_data, iter.code_offset(), iter.source_position(),
+ JitCodeEvent::STATEMENT_POSITION);
+ }
+ jit_logger_->AddCodeLinePosInfoEvent(jit_handler_data, iter.code_offset(),
+ iter.source_position(),
+ JitCodeEvent::POSITION);
+ }
+ jit_logger_->EndCodePosInfoEvent(code, jit_handler_data);
}
}
-void Logger::CodeEndLinePosInfoRecordEvent(AbstractCode* code,
- void* jit_handler_data) {
- JIT_LOG(EndCodePosInfoEvent(code, jit_handler_data));
-}
-
-
void Logger::CodeNameEvent(Address addr, int pos, const char* code_name) {
if (code_name == NULL) return; // Not a code object.
Log::MessageBuilder msg(log_);
- msg.Append("%s,%d,", kLogEventsNames[SNAPSHOT_CODE_NAME_EVENT], pos);
+ msg.Append("%s,%d,",
+ kLogEventsNames[CodeEventListener::SNAPSHOT_CODE_NAME_EVENT], pos);
msg.AppendDoubleQuotedString(code_name);
msg.WriteToLogFile();
}
@@ -1272,13 +1229,11 @@ void Logger::CodeNameEvent(Address addr, int pos, const char* code_name) {
void Logger::SharedFunctionInfoMoveEvent(Address from, Address to) {
if (!is_logging_code_events()) return;
- MoveEventInternal(SHARED_FUNC_MOVE_EVENT, from, to);
+ MoveEventInternal(CodeEventListener::SHARED_FUNC_MOVE_EVENT, from, to);
}
-
-void Logger::MoveEventInternal(LogEventsAndTags event,
- Address from,
- Address to) {
+void Logger::MoveEventInternal(CodeEventListener::LogEventsAndTags event,
+ Address from, Address to) {
if (!FLAG_log_code || !log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
msg.Append("%s,", kLogEventsNames[event]);
@@ -1372,19 +1327,34 @@ void Logger::DebugEvent(const char* event_type, Vector<uint16_t> parameter) {
msg.WriteToLogFile();
}
+void Logger::RuntimeCallTimerEvent() {
+ RuntimeCallStats* stats = isolate_->counters()->runtime_call_stats();
+ RuntimeCallTimer* timer = stats->current_timer();
+ if (timer == nullptr) return;
+ RuntimeCallCounter* counter = timer->counter();
+ if (counter == nullptr) return;
+ Log::MessageBuilder msg(log_);
+ msg.Append("active-runtime-timer,");
+ msg.AppendDoubleQuotedString(counter->name);
+ msg.WriteToLogFile();
+}
-void Logger::TickEvent(TickSample* sample, bool overflow) {
+void Logger::TickEvent(v8::TickSample* sample, bool overflow) {
if (!log_->IsEnabled() || !FLAG_prof_cpp) return;
+ if (FLAG_runtime_call_stats) {
+ RuntimeCallTimerEvent();
+ }
Log::MessageBuilder msg(log_);
- msg.Append("%s,", kLogEventsNames[TICK_EVENT]);
- msg.AppendAddress(sample->pc);
- msg.Append(",%ld", static_cast<int>(timer_.Elapsed().InMicroseconds()));
+ msg.Append("%s,", kLogEventsNames[CodeEventListener::TICK_EVENT]);
+ msg.AppendAddress(reinterpret_cast<Address>(sample->pc));
+ msg.Append(",%d", static_cast<int>(timer_.Elapsed().InMicroseconds()));
if (sample->has_external_callback) {
msg.Append(",1,");
- msg.AppendAddress(sample->external_callback_entry);
+ msg.AppendAddress(
+ reinterpret_cast<Address>(sample->external_callback_entry));
} else {
msg.Append(",0,");
- msg.AppendAddress(sample->tos);
+ msg.AppendAddress(reinterpret_cast<Address>(sample->tos));
}
msg.Append(",%d", static_cast<int>(sample->state));
if (overflow) {
@@ -1392,7 +1362,7 @@ void Logger::TickEvent(TickSample* sample, bool overflow) {
}
for (unsigned i = 0; i < sample->frames_count; ++i) {
msg.Append(',');
- msg.AppendAddress(sample->stack[i]);
+ msg.AppendAddress(reinterpret_cast<Address>(sample->stack[i]));
}
msg.WriteToLogFile();
}
@@ -1403,6 +1373,7 @@ void Logger::StopProfiler() {
if (profiler_ != NULL) {
profiler_->pause();
is_logging_ = false;
+ removeCodeEventListener(this);
}
}
@@ -1484,7 +1455,7 @@ static int EnumerateCompiledFunctions(Heap* heap,
void Logger::LogCodeObject(Object* object) {
AbstractCode* code_object = AbstractCode::cast(object);
- LogEventsAndTags tag = Logger::STUB_TAG;
+ CodeEventListener::LogEventsAndTags tag = CodeEventListener::STUB_TAG;
const char* description = "Unknown code from the snapshot";
switch (code_object->kind()) {
case AbstractCode::FUNCTION:
@@ -1502,53 +1473,59 @@ void Logger::LogCodeObject(Object* object) {
CodeStub::MajorName(CodeStub::GetMajorKey(code_object->GetCode()));
if (description == NULL)
description = "A stub from the snapshot";
- tag = Logger::STUB_TAG;
+ tag = CodeEventListener::STUB_TAG;
break;
case AbstractCode::REGEXP:
description = "Regular expression code";
- tag = Logger::REG_EXP_TAG;
+ tag = CodeEventListener::REG_EXP_TAG;
break;
case AbstractCode::BUILTIN:
description =
isolate_->builtins()->name(code_object->GetCode()->builtin_index());
- tag = Logger::BUILTIN_TAG;
+ tag = CodeEventListener::BUILTIN_TAG;
break;
case AbstractCode::HANDLER:
description = "An IC handler from the snapshot";
- tag = Logger::HANDLER_TAG;
+ tag = CodeEventListener::HANDLER_TAG;
break;
case AbstractCode::KEYED_LOAD_IC:
description = "A keyed load IC from the snapshot";
- tag = Logger::KEYED_LOAD_IC_TAG;
+ tag = CodeEventListener::KEYED_LOAD_IC_TAG;
break;
case AbstractCode::LOAD_IC:
description = "A load IC from the snapshot";
- tag = Logger::LOAD_IC_TAG;
+ tag = CodeEventListener::LOAD_IC_TAG;
+ break;
+ case AbstractCode::LOAD_GLOBAL_IC:
+ description = "A load global IC from the snapshot";
+ tag = Logger::LOAD_GLOBAL_IC_TAG;
break;
case AbstractCode::CALL_IC:
description = "A call IC from the snapshot";
- tag = Logger::CALL_IC_TAG;
+ tag = CodeEventListener::CALL_IC_TAG;
break;
case AbstractCode::STORE_IC:
description = "A store IC from the snapshot";
- tag = Logger::STORE_IC_TAG;
+ tag = CodeEventListener::STORE_IC_TAG;
break;
case AbstractCode::KEYED_STORE_IC:
description = "A keyed store IC from the snapshot";
- tag = Logger::KEYED_STORE_IC_TAG;
+ tag = CodeEventListener::KEYED_STORE_IC_TAG;
break;
case AbstractCode::WASM_FUNCTION:
description = "A Wasm function";
- tag = Logger::STUB_TAG;
+ tag = CodeEventListener::STUB_TAG;
break;
case AbstractCode::JS_TO_WASM_FUNCTION:
description = "A JavaScript to Wasm adapter";
- tag = Logger::STUB_TAG;
+ tag = CodeEventListener::STUB_TAG;
break;
case AbstractCode::WASM_TO_JS_FUNCTION:
description = "A Wasm to JavaScript adapter";
- tag = Logger::STUB_TAG;
+ tag = CodeEventListener::STUB_TAG;
break;
+ case AbstractCode::NUMBER_OF_KINDS:
+ UNIMPLEMENTED();
}
PROFILE(isolate_, CodeCreateEvent(tag, code_object, description));
}
@@ -1556,8 +1533,6 @@ void Logger::LogCodeObject(Object* object) {
void Logger::LogCodeObjects() {
Heap* heap = isolate_->heap();
- heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "Logger::LogCodeObjects");
HeapIterator iterator(heap);
DisallowHeapAllocation no_gc;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
@@ -1569,20 +1544,24 @@ void Logger::LogCodeObjects() {
void Logger::LogBytecodeHandlers() {
if (!FLAG_ignition) return;
- interpreter::Interpreter* interpreter = isolate_->interpreter();
+ const interpreter::OperandScale kOperandScales[] = {
+#define VALUE(Name, _) interpreter::OperandScale::k##Name,
+ OPERAND_SCALE_LIST(VALUE)
+#undef VALUE
+ };
+
const int last_index = static_cast<int>(interpreter::Bytecode::kLast);
- for (auto operand_scale = interpreter::OperandScale::kSingle;
- operand_scale <= interpreter::OperandScale::kMaxValid;
- operand_scale =
- interpreter::Bytecodes::NextOperandScale(operand_scale)) {
+ interpreter::Interpreter* interpreter = isolate_->interpreter();
+ for (auto operand_scale : kOperandScales) {
for (int index = 0; index <= last_index; ++index) {
interpreter::Bytecode bytecode = interpreter::Bytecodes::FromByte(index);
if (interpreter::Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) {
Code* code = interpreter->GetBytecodeHandler(bytecode, operand_scale);
std::string bytecode_name =
interpreter::Bytecodes::ToString(bytecode, operand_scale);
- CodeCreateEvent(Logger::BYTECODE_HANDLER_TAG, AbstractCode::cast(code),
- bytecode_name.c_str());
+ PROFILE(isolate_, CodeCreateEvent(
+ CodeEventListener::BYTECODE_HANDLER_TAG,
+ AbstractCode::cast(code), bytecode_name.c_str()));
}
}
}
@@ -1601,28 +1580,28 @@ void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
if (line_num > 0) {
PROFILE(isolate_,
CodeCreateEvent(
- Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
- *code, *shared, NULL,
- *script_name, line_num, column_num));
+ Logger::ToNativeByScript(
+ CodeEventListener::LAZY_COMPILE_TAG, *script),
+ *code, *shared, *script_name, line_num, column_num));
} else {
// Can't distinguish eval and script here, so always use Script.
PROFILE(isolate_,
- CodeCreateEvent(
- Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
- *code, *shared, NULL, *script_name));
+ CodeCreateEvent(Logger::ToNativeByScript(
+ CodeEventListener::SCRIPT_TAG, *script),
+ *code, *shared, *script_name));
}
} else {
PROFILE(isolate_,
- CodeCreateEvent(
- Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
- *code, *shared, NULL,
- isolate_->heap()->empty_string(), line_num, column_num));
+ CodeCreateEvent(Logger::ToNativeByScript(
+ CodeEventListener::LAZY_COMPILE_TAG, *script),
+ *code, *shared, isolate_->heap()->empty_string(),
+ line_num, column_num));
}
} else if (shared->IsApiFunction()) {
// API function.
FunctionTemplateInfo* fun_data = shared->get_api_func_data();
Object* raw_call_data = fun_data->call_code();
- if (!raw_call_data->IsUndefined()) {
+ if (!raw_call_data->IsUndefined(isolate_)) {
CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
Object* callback_obj = call_data->callback();
Address entry_point = v8::ToCData<Address>(callback_obj);
@@ -1632,17 +1611,14 @@ void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
PROFILE(isolate_, CallbackEvent(*func_name, entry_point));
}
} else {
- PROFILE(isolate_,
- CodeCreateEvent(
- Logger::LAZY_COMPILE_TAG, *code, *shared, NULL, *func_name));
+ PROFILE(isolate_, CodeCreateEvent(CodeEventListener::LAZY_COMPILE_TAG,
+ *code, *shared, *func_name));
}
}
void Logger::LogCompiledFunctions() {
Heap* heap = isolate_->heap();
- heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "Logger::LogCompiledFunctions");
HandleScope scope(isolate_);
const int compiled_funcs_count = EnumerateCompiledFunctions(heap, NULL, NULL);
ScopedVector< Handle<SharedFunctionInfo> > sfis(compiled_funcs_count);
@@ -1661,8 +1637,6 @@ void Logger::LogCompiledFunctions() {
void Logger::LogAccessorCallbacks() {
Heap* heap = isolate_->heap();
- heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "Logger::LogAccessorCallbacks");
HeapIterator iterator(heap);
DisallowHeapAllocation no_gc;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
@@ -1747,7 +1721,6 @@ bool Logger::SetUp(Isolate* isolate) {
PrepareLogFileName(log_file_name, isolate, FLAG_logfile);
log_->Initialize(log_file_name.str().c_str());
-
if (FLAG_perf_basic_prof) {
perf_basic_logger_ = new PerfBasicLogger();
addCodeEventListener(perf_basic_logger_);
@@ -1777,6 +1750,12 @@ bool Logger::SetUp(Isolate* isolate) {
profiler_->Engage();
}
+ profiler_listener_.reset();
+
+ if (is_logging_) {
+ addCodeEventListener(this);
+ }
+
return true;
}
@@ -1800,8 +1779,20 @@ void Logger::SetCodeEventHandler(uint32_t options,
}
}
+void Logger::SetUpProfilerListener() {
+ if (!is_initialized_) return;
+ if (profiler_listener_.get() == nullptr) {
+ profiler_listener_.reset(new ProfilerListener(isolate_));
+ }
+ addCodeEventListener(profiler_listener_.get());
+}
+
+void Logger::TearDownProfilerListener() {
+ if (profiler_listener_->HasObservers()) return;
+ removeCodeEventListener(profiler_listener_.get());
+}
-Sampler* Logger::sampler() {
+sampler::Sampler* Logger::sampler() {
return ticker_;
}
@@ -1844,6 +1835,10 @@ FILE* Logger::TearDown() {
jit_logger_ = NULL;
}
+ if (profiler_listener_.get() != nullptr) {
+ removeCodeEventListener(profiler_listener_.get());
+ }
+
return log_->Close();
}
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index fdc50471b4..a05b187282 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -8,14 +8,19 @@
#include <string>
#include "src/allocation.h"
+#include "src/base/compiler-specific.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/base/platform/platform.h"
+#include "src/code-events.h"
+#include "src/isolate.h"
#include "src/objects.h"
namespace v8 {
-namespace base {
-class Semaphore;
+struct TickSample;
+
+namespace sampler {
+class Sampler;
}
namespace internal {
@@ -56,108 +61,35 @@ namespace internal {
// Forward declarations.
class CodeEventListener;
-class CompilationInfo;
class CpuProfiler;
class Isolate;
+class JitLogger;
class Log;
-class PositionsRecorder;
+class LowLevelLogger;
+class PerfBasicLogger;
+class PerfJitLogger;
class Profiler;
+class ProfilerListener;
+class RuntimeCallTimer;
class Ticker;
-struct TickSample;
#undef LOG
-#define LOG(isolate, Call) \
- do { \
- v8::internal::Logger* logger = \
- (isolate)->logger(); \
- if (logger->is_logging()) \
- logger->Call; \
+#define LOG(isolate, Call) \
+ do { \
+ v8::internal::Logger* logger = (isolate)->logger(); \
+ if (logger->is_logging()) logger->Call; \
} while (false)
-#define LOG_CODE_EVENT(isolate, Call) \
- do { \
- v8::internal::Logger* logger = \
- (isolate)->logger(); \
- if (logger->is_logging_code_events()) \
- logger->Call; \
+#define LOG_CODE_EVENT(isolate, Call) \
+ do { \
+ v8::internal::Logger* logger = (isolate)->logger(); \
+ if (logger->is_logging_code_events()) logger->Call; \
} while (false)
-#define LOG_EVENTS_AND_TAGS_LIST(V) \
- V(CODE_CREATION_EVENT, "code-creation") \
- V(CODE_DISABLE_OPT_EVENT, "code-disable-optimization") \
- V(CODE_MOVE_EVENT, "code-move") \
- V(CODE_DELETE_EVENT, "code-delete") \
- V(CODE_MOVING_GC, "code-moving-gc") \
- V(SHARED_FUNC_MOVE_EVENT, "sfi-move") \
- V(SNAPSHOT_CODE_NAME_EVENT, "snapshot-code-name") \
- V(TICK_EVENT, "tick") \
- V(REPEAT_META_EVENT, "repeat") \
- V(BUILTIN_TAG, "Builtin") \
- V(CALL_DEBUG_BREAK_TAG, "CallDebugBreak") \
- V(CALL_DEBUG_PREPARE_STEP_IN_TAG, "CallDebugPrepareStepIn") \
- V(CALL_INITIALIZE_TAG, "CallInitialize") \
- V(CALL_MEGAMORPHIC_TAG, "CallMegamorphic") \
- V(CALL_MISS_TAG, "CallMiss") \
- V(CALL_NORMAL_TAG, "CallNormal") \
- V(CALL_PRE_MONOMORPHIC_TAG, "CallPreMonomorphic") \
- V(LOAD_INITIALIZE_TAG, "LoadInitialize") \
- V(LOAD_PREMONOMORPHIC_TAG, "LoadPreMonomorphic") \
- V(LOAD_MEGAMORPHIC_TAG, "LoadMegamorphic") \
- V(STORE_INITIALIZE_TAG, "StoreInitialize") \
- V(STORE_PREMONOMORPHIC_TAG, "StorePreMonomorphic") \
- V(STORE_GENERIC_TAG, "StoreGeneric") \
- V(STORE_MEGAMORPHIC_TAG, "StoreMegamorphic") \
- V(KEYED_CALL_DEBUG_BREAK_TAG, "KeyedCallDebugBreak") \
- V(KEYED_CALL_DEBUG_PREPARE_STEP_IN_TAG, "KeyedCallDebugPrepareStepIn") \
- V(KEYED_CALL_INITIALIZE_TAG, "KeyedCallInitialize") \
- V(KEYED_CALL_MEGAMORPHIC_TAG, "KeyedCallMegamorphic") \
- V(KEYED_CALL_MISS_TAG, "KeyedCallMiss") \
- V(KEYED_CALL_NORMAL_TAG, "KeyedCallNormal") \
- V(KEYED_CALL_PRE_MONOMORPHIC_TAG, "KeyedCallPreMonomorphic") \
- V(CALLBACK_TAG, "Callback") \
- V(EVAL_TAG, "Eval") \
- V(FUNCTION_TAG, "Function") \
- V(HANDLER_TAG, "Handler") \
- V(BYTECODE_HANDLER_TAG, "BytecodeHandler") \
- V(KEYED_LOAD_IC_TAG, "KeyedLoadIC") \
- V(KEYED_LOAD_POLYMORPHIC_IC_TAG, "KeyedLoadPolymorphicIC") \
- V(KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG, "KeyedExternalArrayLoadIC") \
- V(KEYED_STORE_IC_TAG, "KeyedStoreIC") \
- V(KEYED_STORE_POLYMORPHIC_IC_TAG, "KeyedStorePolymorphicIC") \
- V(KEYED_EXTERNAL_ARRAY_STORE_IC_TAG, "KeyedExternalArrayStoreIC") \
- V(LAZY_COMPILE_TAG, "LazyCompile") \
- V(CALL_IC_TAG, "CallIC") \
- V(LOAD_IC_TAG, "LoadIC") \
- V(LOAD_POLYMORPHIC_IC_TAG, "LoadPolymorphicIC") \
- V(REG_EXP_TAG, "RegExp") \
- V(SCRIPT_TAG, "Script") \
- V(STORE_IC_TAG, "StoreIC") \
- V(STORE_POLYMORPHIC_IC_TAG, "StorePolymorphicIC") \
- V(STUB_TAG, "Stub") \
- V(NATIVE_FUNCTION_TAG, "Function") \
- V(NATIVE_LAZY_COMPILE_TAG, "LazyCompile") \
- V(NATIVE_SCRIPT_TAG, "Script")
-// Note that 'NATIVE_' cases for functions and scripts are mapped onto
-// original tags when writing to the log.
-
-
-class JitLogger;
-class PerfBasicLogger;
-class LowLevelLogger;
-class PerfJitLogger;
-class Sampler;
-
-class Logger {
+class Logger : public CodeEventListener {
public:
enum StartEnd { START = 0, END = 1 };
-#define DECLARE_ENUM(enum_item, ignore) enum_item,
- enum LogEventsAndTags {
- LOG_EVENTS_AND_TAGS_LIST(DECLARE_ENUM)
- NUMBER_OF_LOG_EVENTS
- };
-#undef DECLARE_ENUM
-
// Acquires resources for logging if the right flags are set.
bool SetUp(Isolate* isolate);
@@ -165,7 +97,15 @@ class Logger {
void SetCodeEventHandler(uint32_t options,
JitCodeEventHandler event_handler);
- Sampler* sampler();
+ // Sets up ProfilerListener.
+ void SetUpProfilerListener();
+
+ // Tear down ProfilerListener if it has no observers.
+ void TearDownProfilerListener();
+
+ sampler::Sampler* sampler();
+
+ ProfilerListener* profiler_listener() { return profiler_listener_.get(); }
// Frees resources acquired in SetUp.
// When a temporary file is used for the log, returns its stream descriptor,
@@ -211,29 +151,27 @@ class Logger {
void ApiObjectAccess(const char* tag, JSObject* obj);
void ApiEntryCall(const char* name);
-
// ==== Events logged by --log-code. ====
void addCodeEventListener(CodeEventListener* listener);
void removeCodeEventListener(CodeEventListener* listener);
- bool hasCodeEventListener(CodeEventListener* listener);
-
// Emits a code event for a callback function.
void CallbackEvent(Name* name, Address entry_point);
void GetterCallbackEvent(Name* name, Address entry_point);
void SetterCallbackEvent(Name* name, Address entry_point);
// Emits a code create event.
- void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
- const char* source);
- void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code, Name* name);
- void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
- SharedFunctionInfo* shared, CompilationInfo* info,
+ void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ AbstractCode* code, const char* source);
+ void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ AbstractCode* code, Name* name);
+ void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ AbstractCode* code, SharedFunctionInfo* shared,
Name* name);
- void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
- SharedFunctionInfo* shared, CompilationInfo* info,
+ void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ AbstractCode* code, SharedFunctionInfo* shared,
Name* source, int line, int column);
- void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
- int args_count);
+ void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ AbstractCode* code, int args_count);
// Emits a code deoptimization event.
void CodeDisableOptEvent(AbstractCode* code, SharedFunctionInfo* shared);
void CodeMovingGCEvent();
@@ -241,25 +179,16 @@ class Logger {
void RegExpCodeCreateEvent(AbstractCode* code, String* source);
// Emits a code move event.
void CodeMoveEvent(AbstractCode* from, Address to);
- // Emits a code line info add event with Postion type.
- void CodeLinePosInfoAddPositionEvent(void* jit_handler_data,
- int pc_offset,
- int position);
- // Emits a code line info add event with StatementPostion type.
- void CodeLinePosInfoAddStatementPositionEvent(void* jit_handler_data,
- int pc_offset,
- int position);
- // Emits a code line info start to record event
- void CodeStartLinePosInfoRecordEvent(PositionsRecorder* pos_recorder);
- // Emits a code line info finish record event.
- // It's the callee's responsibility to dispose the parameter jit_handler_data.
- void CodeEndLinePosInfoRecordEvent(AbstractCode* code,
- void* jit_handler_data);
+ // Emits a code line info record event.
+ void CodeLinePosInfoRecordEvent(AbstractCode* code,
+ ByteArray* source_position_table);
void SharedFunctionInfoMoveEvent(Address from, Address to);
void CodeNameEvent(Address addr, int pos, const char* code_name);
+ void CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta);
+
// ==== Events logged by --log-gc. ====
// Heap sampling events: start, end, and individual types.
void HeapSampleBeginEvent(const char* space, const char* kind);
@@ -274,11 +203,9 @@ class Logger {
void HeapSampleStats(const char* space, const char* kind,
intptr_t capacity, intptr_t used);
- void SharedLibraryEvent(const std::string& library_path,
- uintptr_t start,
- uintptr_t end);
+ void SharedLibraryEvent(const std::string& library_path, uintptr_t start,
+ uintptr_t end, intptr_t aslr_slide);
- void CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta);
void CurrentTimeEvent();
void TimerEvent(StartEnd se, const char* name);
@@ -320,7 +247,8 @@ class Logger {
void LogBytecodeHandlers();
// Converts tag to a corresponding NATIVE_... if the script is native.
- INLINE(static LogEventsAndTags ToNativeByScript(LogEventsAndTags, Script*));
+ INLINE(static CodeEventListener::LogEventsAndTags ToNativeByScript(
+ CodeEventListener::LogEventsAndTags, Script*));
// Profiler's sampling interval (in milliseconds).
#if defined(ANDROID)
@@ -347,18 +275,20 @@ class Logger {
Address entry_point);
// Internal configurable move event.
- void MoveEventInternal(LogEventsAndTags event, Address from, Address to);
+ void MoveEventInternal(CodeEventListener::LogEventsAndTags event,
+ Address from, Address to);
// Used for logging stubs found in the snapshot.
void LogCodeObject(Object* code_object);
// Helper method. It resets name_buffer_ and add tag name into it.
- void InitNameBuffer(LogEventsAndTags tag);
+ void InitNameBuffer(CodeEventListener::LogEventsAndTags tag);
// Emits a profiler tick event. Used by the profiler thread.
void TickEvent(TickSample* sample, bool overflow);
+ void RuntimeCallTimerEvent();
- void ApiEvent(const char* name, ...);
+ PRINTF_FORMAT(2, 3) void ApiEvent(const char* format, ...);
// Logs a StringEvent regardless of whether FLAG_log is true.
void UncheckedStringEvent(const char* name, const char* value);
@@ -395,6 +325,7 @@ class Logger {
PerfJitLogger* perf_jit_logger_;
LowLevelLogger* ll_logger_;
JitLogger* jit_logger_;
+ std::unique_ptr<ProfilerListener> profiler_listener_;
List<CodeEventListener*> listeners_;
// Guards against multiple calls to TearDown() that can happen in some tests.
@@ -437,79 +368,27 @@ class TimerEventScope {
~TimerEventScope() { LogTimerEvent(Logger::END); }
- void LogTimerEvent(Logger::StartEnd se);
-
private:
+ void LogTimerEvent(Logger::StartEnd se);
Isolate* isolate_;
};
-class PositionsRecorder BASE_EMBEDDED {
- public:
- PositionsRecorder() { jit_handler_data_ = NULL; }
-
- void AttachJITHandlerData(void* user_data) { jit_handler_data_ = user_data; }
-
- void* DetachJITHandlerData() {
- void* old_data = jit_handler_data_;
- jit_handler_data_ = NULL;
- return old_data;
- }
-
- protected:
- // Currently jit_handler_data_ is used to store JITHandler-specific data
- // over the lifetime of a PositionsRecorder
- void* jit_handler_data_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(PositionsRecorder);
-};
-
-class CodeEventListener {
- public:
- virtual ~CodeEventListener() {}
-
- virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
- const char* comment) = 0;
- virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
- Name* name) = 0;
- virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
- SharedFunctionInfo* shared,
- CompilationInfo* info, Name* name) = 0;
- virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
- SharedFunctionInfo* shared,
- CompilationInfo* info, Name* source, int line,
- int column) = 0;
- virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
- int args_count) = 0;
- virtual void CallbackEvent(Name* name, Address entry_point) = 0;
- virtual void GetterCallbackEvent(Name* name, Address entry_point) = 0;
- virtual void SetterCallbackEvent(Name* name, Address entry_point) = 0;
- virtual void RegExpCodeCreateEvent(AbstractCode* code, String* source) = 0;
- virtual void CodeMoveEvent(AbstractCode* from, Address to) = 0;
- virtual void SharedFunctionInfoMoveEvent(Address from, Address to) = 0;
- virtual void CodeMovingGCEvent() = 0;
- virtual void CodeDisableOptEvent(AbstractCode* code,
- SharedFunctionInfo* shared) = 0;
-};
-
-
class CodeEventLogger : public CodeEventListener {
public:
CodeEventLogger();
~CodeEventLogger() override;
- void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
const char* comment) override;
- void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
Name* name) override;
- void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
int args_count) override;
- void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
- SharedFunctionInfo* shared, CompilationInfo* info,
- Name* name) override;
- void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
- SharedFunctionInfo* shared, CompilationInfo* info,
- Name* source, int line, int column) override;
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+ SharedFunctionInfo* shared, Name* name) override;
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
+ SharedFunctionInfo* shared, Name* source, int line,
+ int column) override;
void RegExpCodeCreateEvent(AbstractCode* code, String* source) override;
void CallbackEvent(Name* name, Address entry_point) override {}
@@ -517,6 +396,7 @@ class CodeEventLogger : public CodeEventListener {
void SetterCallbackEvent(Name* name, Address entry_point) override {}
void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
void CodeMovingGCEvent() override {}
+ void CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta) override {}
private:
class NameBuffer;
diff --git a/deps/v8/src/lookup.cc b/deps/v8/src/lookup.cc
index 713ea06948..54015d44c6 100644
--- a/deps/v8/src/lookup.cc
+++ b/deps/v8/src/lookup.cc
@@ -74,8 +74,7 @@ void LookupIterator::Next() {
JSReceiver* holder = *holder_;
Map* map = holder->map();
- if (map->instance_type() <= LAST_SPECIAL_RECEIVER_TYPE ||
- map->instance_type() == JS_GLOBAL_PROXY_TYPE) {
+ if (map->instance_type() <= LAST_SPECIAL_RECEIVER_TYPE) {
state_ = IsElement() ? LookupInSpecialHolder<true>(map, holder)
: LookupInSpecialHolder<false>(map, holder);
if (IsFound()) return;
@@ -132,7 +131,7 @@ Handle<JSReceiver> LookupIterator::GetRootForNonJSReceiver(
return result;
}
auto root = handle(receiver->GetRootMap(isolate)->prototype(), isolate);
- if (root->IsNull()) {
+ if (root->IsNull(isolate)) {
unsigned int magic = 0xbbbbbbbb;
isolate->PushStackTraceAndDie(magic, *receiver, NULL, magic);
}
@@ -159,47 +158,42 @@ void LookupIterator::ReloadPropertyInformation() {
DCHECK(IsFound() || !holder_->HasFastProperties());
}
-bool LookupIterator::HolderIsInContextIndex(uint32_t index) const {
- DisallowHeapAllocation no_gc;
-
- Object* context = heap()->native_contexts_list();
- while (!context->IsUndefined()) {
- Context* current_context = Context::cast(context);
- if (current_context->get(index) == *holder_) {
- return true;
- }
- context = current_context->get(Context::NEXT_CONTEXT_LINK);
- }
- return false;
-}
-
void LookupIterator::InternalUpdateProtector() {
if (isolate_->bootstrapper()->IsActive()) return;
- if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
if (*name_ == heap()->constructor_string()) {
+ if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
// Setting the constructor property could change an instance's @@species
if (holder_->IsJSArray()) {
isolate_->CountUsage(
v8::Isolate::UseCounterFeature::kArrayInstanceConstructorModified);
isolate_->InvalidateArraySpeciesProtector();
} else if (holder_->map()->is_prototype_map()) {
+ DisallowHeapAllocation no_gc;
// Setting the constructor of Array.prototype of any realm also needs
// to invalidate the species protector
- if (HolderIsInContextIndex(Context::INITIAL_ARRAY_PROTOTYPE_INDEX)) {
+ if (isolate_->IsInAnyContext(*holder_,
+ Context::INITIAL_ARRAY_PROTOTYPE_INDEX)) {
isolate_->CountUsage(v8::Isolate::UseCounterFeature::
kArrayPrototypeConstructorModified);
isolate_->InvalidateArraySpeciesProtector();
}
}
} else if (*name_ == heap()->species_symbol()) {
+ if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
// Setting the Symbol.species property of any Array constructor invalidates
// the species protector
- if (HolderIsInContextIndex(Context::ARRAY_FUNCTION_INDEX)) {
+ if (isolate_->IsInAnyContext(*holder_, Context::ARRAY_FUNCTION_INDEX)) {
isolate_->CountUsage(
v8::Isolate::UseCounterFeature::kArraySpeciesModified);
isolate_->InvalidateArraySpeciesProtector();
}
+ } else if (*name_ == heap()->is_concat_spreadable_symbol()) {
+ if (!isolate_->IsIsConcatSpreadableLookupChainIntact()) return;
+ isolate_->InvalidateIsConcatSpreadableProtector();
+ } else if (*name_ == heap()->has_instance_symbol()) {
+ if (!isolate_->IsHasInstanceLookupChainIntact()) return;
+ isolate_->InvalidateHasInstanceProtector();
}
}
@@ -226,6 +220,16 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
return;
}
+ if (holder->IsJSGlobalObject()) {
+ Handle<GlobalDictionary> dictionary(holder->global_dictionary());
+ Handle<PropertyCell> cell(
+ PropertyCell::cast(dictionary->ValueAt(dictionary_entry())));
+ DCHECK(!cell->IsTheHole(isolate_));
+ property_details_ = cell->property_details();
+ PropertyCell::PrepareForValue(dictionary, dictionary_entry(), value,
+ property_details_);
+ return;
+ }
if (!holder->HasFastProperties()) return;
Handle<Map> old_map(holder->map(), isolate_);
@@ -258,20 +262,34 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
holder->GetElementsAccessor()->Reconfigure(holder, elements, number_, value,
attributes);
ReloadPropertyInformation<true>();
+ } else if (holder->HasFastProperties()) {
+ Handle<Map> old_map(holder->map(), isolate_);
+ Handle<Map> new_map = Map::ReconfigureExistingProperty(
+ old_map, descriptor_number(), i::kData, attributes);
+ new_map = Map::PrepareForDataProperty(new_map, descriptor_number(), value);
+ JSObject::MigrateToMap(holder, new_map);
+ ReloadPropertyInformation<false>();
} else {
- if (!holder->HasFastProperties()) {
- PropertyDetails details(attributes, v8::internal::DATA, 0,
- PropertyCellType::kMutable);
- JSObject::SetNormalizedProperty(holder, name(), value, details);
+ PropertyDetails details(attributes, v8::internal::DATA, 0,
+ PropertyCellType::kMutable);
+ if (holder->IsJSGlobalObject()) {
+ Handle<GlobalDictionary> dictionary(holder->global_dictionary());
+
+ Handle<PropertyCell> cell = PropertyCell::PrepareForValue(
+ dictionary, dictionary_entry(), value, details);
+ cell->set_value(*value);
+ property_details_ = cell->property_details();
} else {
- Handle<Map> old_map(holder->map(), isolate_);
- Handle<Map> new_map = Map::ReconfigureExistingProperty(
- old_map, descriptor_number(), i::kData, attributes);
- new_map =
- Map::PrepareForDataProperty(new_map, descriptor_number(), value);
- JSObject::MigrateToMap(holder, new_map);
+ Handle<NameDictionary> dictionary(holder->property_dictionary());
+ PropertyDetails original_details =
+ dictionary->DetailsAt(dictionary_entry());
+ int enumeration_index = original_details.dictionary_index();
+ DCHECK(enumeration_index > 0);
+ details = details.set_index(enumeration_index);
+ dictionary->SetEntry(dictionary_entry(), name(), value, details);
+ property_details_ = details;
}
- ReloadPropertyInformation<false>();
+ state_ = DATA;
}
WriteDataValue(value);
@@ -303,11 +321,31 @@ void LookupIterator::PrepareTransitionToDataProperty(
state_ = TRANSITION;
if (map->IsJSGlobalObjectMap()) {
// Install a property cell.
- auto cell = JSGlobalObject::EnsurePropertyCell(
- Handle<JSGlobalObject>::cast(receiver), name());
- DCHECK(cell->value()->IsTheHole());
+ Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(receiver);
+ int entry;
+ Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
+ global, name(), PropertyCellType::kUninitialized, &entry);
+ Handle<GlobalDictionary> dictionary(global->global_dictionary(),
+ isolate_);
+ DCHECK(cell->value()->IsTheHole(isolate_));
+ DCHECK(!value->IsTheHole(isolate_));
transition_ = cell;
+ // Assign an enumeration index to the property and update
+ // SetNextEnumerationIndex.
+ int index = dictionary->NextEnumerationIndex();
+ dictionary->SetNextEnumerationIndex(index + 1);
+ property_details_ = PropertyDetails(attributes, i::DATA, index,
+ PropertyCellType::kUninitialized);
+ PropertyCellType new_type =
+ PropertyCell::UpdatedType(cell, value, property_details_);
+ property_details_ = property_details_.set_cell_type(new_type);
+ cell->set_property_details(property_details_);
+ number_ = entry;
+ has_property_ = true;
} else {
+ // Don't set enumeration index (it will be set during value store).
+ property_details_ =
+ PropertyDetails(attributes, i::DATA, 0, PropertyCellType::kNoCell);
transition_ = map;
}
return;
@@ -318,7 +356,11 @@ void LookupIterator::PrepareTransitionToDataProperty(
state_ = TRANSITION;
transition_ = transition;
- if (!transition->is_dictionary_map()) {
+ if (transition->is_dictionary_map()) {
+ // Don't set enumeration index (it will be set during value store).
+ property_details_ =
+ PropertyDetails(attributes, i::DATA, 0, PropertyCellType::kNoCell);
+ } else {
property_details_ = transition->GetLastDescriptorDetails();
has_property_ = true;
}
@@ -328,9 +370,11 @@ void LookupIterator::ApplyTransitionToDataProperty(Handle<JSObject> receiver) {
DCHECK_EQ(TRANSITION, state_);
DCHECK(receiver.is_identical_to(GetStoreTarget()));
-
- if (receiver->IsJSGlobalObject()) return;
holder_ = receiver;
+ if (receiver->IsJSGlobalObject()) {
+ state_ = DATA;
+ return;
+ }
Handle<Map> transition = transition_map();
bool simple_transition = transition->GetBackPointer() == receiver->map();
JSObject::MigrateToMap(receiver, transition);
@@ -340,6 +384,20 @@ void LookupIterator::ApplyTransitionToDataProperty(Handle<JSObject> receiver) {
number_ = static_cast<uint32_t>(number);
property_details_ = transition->GetLastDescriptorDetails();
state_ = DATA;
+ } else if (receiver->map()->is_dictionary_map()) {
+ Handle<NameDictionary> dictionary(receiver->property_dictionary(),
+ isolate_);
+ int entry;
+ dictionary = NameDictionary::Add(dictionary, name(),
+ isolate_->factory()->uninitialized_value(),
+ property_details_, &entry);
+ receiver->set_properties(*dictionary);
+ // Reload details containing proper enumeration index value.
+ property_details_ = dictionary->DetailsAt(entry);
+ number_ = entry;
+ has_property_ = true;
+ state_ = DATA;
+
} else {
ReloadPropertyInformation<false>();
}
@@ -353,9 +411,19 @@ void LookupIterator::Delete() {
ElementsAccessor* accessor = object->GetElementsAccessor();
accessor->Delete(object, number_);
} else {
- PropertyNormalizationMode mode = holder->map()->is_prototype_map()
- ? KEEP_INOBJECT_PROPERTIES
- : CLEAR_INOBJECT_PROPERTIES;
+ bool is_prototype_map = holder->map()->is_prototype_map();
+ RuntimeCallTimerScope stats_scope(
+ isolate_, is_prototype_map
+ ? &RuntimeCallStats::PrototypeObject_DeleteProperty
+ : &RuntimeCallStats::Object_DeleteProperty);
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
+ isolate_,
+ (is_prototype_map
+ ? &tracing::TraceEventStatsTable::PrototypeObject_DeleteProperty
+ : &tracing::TraceEventStatsTable::Object_DeleteProperty));
+
+ PropertyNormalizationMode mode =
+ is_prototype_map ? KEEP_INOBJECT_PROPERTIES : CLEAR_INOBJECT_PROPERTIES;
if (holder->HasFastProperties()) {
JSObject::NormalizeProperties(Handle<JSObject>::cast(holder), mode, 0,
@@ -371,11 +439,10 @@ void LookupIterator::Delete() {
state_ = NOT_FOUND;
}
-
void LookupIterator::TransitionToAccessorProperty(
- AccessorComponent component, Handle<Object> accessor,
+ Handle<Object> getter, Handle<Object> setter,
PropertyAttributes attributes) {
- DCHECK(!accessor->IsNull());
+ DCHECK(!getter->IsNull(isolate_) || !setter->IsNull(isolate_));
// Can only be called when the receiver is a JSObject. JSProxy has to be
// handled via a trap. Adding properties to primitive values is not
// observable.
@@ -394,7 +461,7 @@ void LookupIterator::TransitionToAccessorProperty(
IsFound() ? static_cast<int>(number_) : DescriptorArray::kNotFound;
Handle<Map> new_map = Map::TransitionToAccessorProperty(
- old_map, name_, descriptor, component, accessor, attributes);
+ isolate_, old_map, name_, descriptor, getter, setter, attributes);
bool simple_transition = new_map->GetBackPointer() == receiver->map();
JSObject::MigrateToMap(receiver, new_map);
@@ -414,15 +481,18 @@ void LookupIterator::TransitionToAccessorProperty(
if (state() == ACCESSOR && GetAccessors()->IsAccessorPair()) {
pair = Handle<AccessorPair>::cast(GetAccessors());
// If the component and attributes are identical, nothing has to be done.
- if (pair->get(component) == *accessor) {
- if (property_details().attributes() == attributes) return;
+ if (pair->Equals(*getter, *setter)) {
+ if (property_details().attributes() == attributes) {
+ if (!IsElement()) JSObject::ReoptimizeIfPrototype(receiver);
+ return;
+ }
} else {
pair = AccessorPair::Copy(pair);
- pair->set(component, *accessor);
+ pair->SetComponents(*getter, *setter);
}
} else {
pair = factory()->NewAccessorPair();
- pair->set(component, *accessor);
+ pair->SetComponents(*getter, *setter);
}
TransitionToAccessorPair(pair, attributes);
@@ -495,8 +565,7 @@ bool LookupIterator::HolderIsReceiverOrHiddenPrototype() const {
if (!current->map()->has_hidden_prototype()) return false;
// JSProxy do not occur as hidden prototypes.
if (object->IsJSProxy()) return false;
- PrototypeIterator iter(isolate(), current,
- PrototypeIterator::START_AT_PROTOTYPE,
+ PrototypeIterator iter(isolate(), current, kStartAtPrototype,
PrototypeIterator::END_AT_NON_HIDDEN);
while (!iter.IsAtEnd()) {
if (iter.GetCurrent<JSReceiver>() == object) return true;
@@ -572,11 +641,10 @@ Handle<FieldType> LookupIterator::GetFieldType() const {
Handle<PropertyCell> LookupIterator::GetPropertyCell() const {
DCHECK(!IsElement());
- Handle<JSObject> holder = GetHolder<JSObject>();
- Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(holder);
- Object* value = global->global_dictionary()->ValueAt(dictionary_entry());
+ Handle<JSGlobalObject> holder = GetHolder<JSGlobalObject>();
+ Object* value = holder->global_dictionary()->ValueAt(dictionary_entry());
DCHECK(value->IsPropertyCell());
- return handle(PropertyCell::cast(value));
+ return handle(PropertyCell::cast(value), isolate_);
}
@@ -608,13 +676,13 @@ void LookupIterator::WriteDataValue(Handle<Object> value) {
DCHECK_EQ(v8::internal::DATA_CONSTANT, property_details_.type());
}
} else if (holder->IsJSGlobalObject()) {
- Handle<GlobalDictionary> property_dictionary =
- handle(JSObject::cast(*holder)->global_dictionary());
- PropertyCell::UpdateCell(property_dictionary, dictionary_entry(), value,
- property_details_);
+ GlobalDictionary* dictionary = JSObject::cast(*holder)->global_dictionary();
+ Object* cell = dictionary->ValueAt(dictionary_entry());
+ DCHECK(cell->IsPropertyCell());
+ PropertyCell::cast(cell)->set_value(*value);
} else {
- NameDictionary* property_dictionary = holder->property_dictionary();
- property_dictionary->ValueAtPut(dictionary_entry(), *value);
+ NameDictionary* dictionary = holder->property_dictionary();
+ dictionary->ValueAtPut(dictionary_entry(), *value);
}
}
@@ -639,17 +707,7 @@ bool LookupIterator::SkipInterceptor(JSObject* holder) {
JSReceiver* LookupIterator::NextHolder(Map* map) {
DisallowHeapAllocation no_gc;
if (map->prototype() == heap()->null_value()) return NULL;
-
- DCHECK(!map->IsJSGlobalProxyMap() || map->has_hidden_prototype());
-
- if (!check_prototype_chain() &&
- !(check_hidden() && map->has_hidden_prototype()) &&
- // Always lookup behind the JSGlobalProxy into the JSGlobalObject, even
- // when not checking other hidden prototypes.
- !map->IsJSGlobalProxyMap()) {
- return NULL;
- }
-
+ if (!check_prototype_chain() && !map->has_hidden_prototype()) return NULL;
return JSReceiver::cast(map->prototype());
}
@@ -702,7 +760,7 @@ LookupIterator::State LookupIterator::LookupInSpecialHolder(
number_ = static_cast<uint32_t>(number);
DCHECK(dict->ValueAt(number_)->IsPropertyCell());
PropertyCell* cell = PropertyCell::cast(dict->ValueAt(number_));
- if (cell->value()->IsTheHole()) return NOT_FOUND;
+ if (cell->value()->IsTheHole(isolate_)) return NOT_FOUND;
property_details_ = cell->property_details();
has_property_ = true;
switch (property_details_.kind()) {
@@ -767,5 +825,21 @@ LookupIterator::State LookupIterator::LookupInRegularHolder(
return state_;
}
+Handle<InterceptorInfo> LookupIterator::GetInterceptorForFailedAccessCheck()
+ const {
+ DCHECK_EQ(ACCESS_CHECK, state_);
+ DisallowHeapAllocation no_gc;
+ AccessCheckInfo* access_check_info =
+ AccessCheckInfo::Get(isolate_, Handle<JSObject>::cast(holder_));
+ if (access_check_info) {
+ Object* interceptor = IsElement() ? access_check_info->indexed_interceptor()
+ : access_check_info->named_interceptor();
+ if (interceptor) {
+ return handle(InterceptorInfo::cast(interceptor), isolate_);
+ }
+ }
+ return Handle<InterceptorInfo>();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/lookup.h b/deps/v8/src/lookup.h
index abd073284d..ffc7904b2a 100644
--- a/deps/v8/src/lookup.h
+++ b/deps/v8/src/lookup.h
@@ -16,17 +16,14 @@ class LookupIterator final BASE_EMBEDDED {
public:
enum Configuration {
// Configuration bits.
- kHidden = 1 << 0,
- kInterceptor = 1 << 1,
- kPrototypeChain = 1 << 2,
+ kInterceptor = 1 << 0,
+ kPrototypeChain = 1 << 1,
// Convience combinations of bits.
OWN_SKIP_INTERCEPTOR = 0,
OWN = kInterceptor,
- HIDDEN_SKIP_INTERCEPTOR = kHidden,
- HIDDEN = kHidden | kInterceptor,
- PROTOTYPE_CHAIN_SKIP_INTERCEPTOR = kHidden | kPrototypeChain,
- PROTOTYPE_CHAIN = kHidden | kPrototypeChain | kInterceptor,
+ PROTOTYPE_CHAIN_SKIP_INTERCEPTOR = kPrototypeChain,
+ PROTOTYPE_CHAIN = kPrototypeChain | kInterceptor,
DEFAULT = PROTOTYPE_CHAIN
};
@@ -179,6 +176,7 @@ class LookupIterator final BASE_EMBEDDED {
Handle<Object> GetReceiver() const { return receiver_; }
Handle<JSObject> GetStoreTarget() const {
+ DCHECK(receiver_->IsJSObject());
if (receiver_->IsJSGlobalProxy()) {
Map* map = JSGlobalProxy::cast(*receiver_)->map();
if (map->has_hidden_prototype()) {
@@ -193,6 +191,10 @@ class LookupIterator final BASE_EMBEDDED {
DCHECK_EQ(TRANSITION, state_);
return Handle<Map>::cast(transition_);
}
+ Handle<PropertyCell> transition_cell() const {
+ DCHECK_EQ(TRANSITION, state_);
+ return Handle<PropertyCell>::cast(transition_);
+ }
template <class T>
Handle<T> GetHolder() const {
DCHECK(IsFound());
@@ -229,8 +231,8 @@ class LookupIterator final BASE_EMBEDDED {
void ReconfigureDataProperty(Handle<Object> value,
PropertyAttributes attributes);
void Delete();
- void TransitionToAccessorProperty(AccessorComponent component,
- Handle<Object> accessor,
+ void TransitionToAccessorProperty(Handle<Object> getter,
+ Handle<Object> setter,
PropertyAttributes attributes);
void TransitionToAccessorPair(Handle<Object> pair,
PropertyAttributes attributes);
@@ -260,12 +262,15 @@ class LookupIterator final BASE_EMBEDDED {
: GetInterceptor<false>(JSObject::cast(*holder_));
return handle(result, isolate_);
}
+ Handle<InterceptorInfo> GetInterceptorForFailedAccessCheck() const;
Handle<Object> GetDataValue() const;
void WriteDataValue(Handle<Object> value);
inline void UpdateProtector() {
- if (FLAG_harmony_species && !IsElement() &&
- (*name_ == heap()->constructor_string() ||
- *name_ == heap()->species_symbol())) {
+ if (IsElement()) return;
+ if (*name_ == heap()->is_concat_spreadable_symbol() ||
+ *name_ == heap()->constructor_string() ||
+ *name_ == heap()->species_symbol() ||
+ *name_ == heap()->has_instance_symbol()) {
InternalUpdateProtector();
}
}
@@ -289,8 +294,7 @@ class LookupIterator final BASE_EMBEDDED {
void NextInternal(Map* map, JSReceiver* holder);
template <bool is_element>
inline State LookupInHolder(Map* map, JSReceiver* holder) {
- return (map->instance_type() <= LAST_SPECIAL_RECEIVER_TYPE ||
- map->instance_type() == JS_GLOBAL_PROXY_TYPE)
+ return map->instance_type() <= LAST_SPECIAL_RECEIVER_TYPE
? LookupInSpecialHolder<is_element>(map, holder)
: LookupInRegularHolder<is_element>(map, holder);
}
@@ -316,7 +320,6 @@ class LookupIterator final BASE_EMBEDDED {
: holder->GetNamedInterceptor();
}
- bool check_hidden() const { return (configuration_ & kHidden) != 0; }
bool check_interceptor() const {
return (configuration_ & kInterceptor) != 0;
}
@@ -335,12 +338,7 @@ class LookupIterator final BASE_EMBEDDED {
static Configuration ComputeConfiguration(
Configuration configuration, Handle<Name> name) {
- if (name->IsPrivate()) {
- return static_cast<Configuration>(configuration &
- HIDDEN_SKIP_INTERCEPTOR);
- } else {
- return configuration;
- }
+ return name->IsPrivate() ? OWN_SKIP_INTERCEPTOR : configuration;
}
static Handle<JSReceiver> GetRootForNonJSReceiver(
@@ -354,8 +352,6 @@ class LookupIterator final BASE_EMBEDDED {
State NotFound(JSReceiver* const holder) const;
- bool HolderIsInContextIndex(uint32_t index) const;
-
// If configuration_ becomes mutable, update
// HolderIsReceiverOrHiddenPrototype.
const Configuration configuration_;
diff --git a/deps/v8/src/machine-type.cc b/deps/v8/src/machine-type.cc
index fcc3e97973..9289673bd7 100644
--- a/deps/v8/src/machine-type.cc
+++ b/deps/v8/src/machine-type.cc
@@ -9,33 +9,40 @@ namespace v8 {
namespace internal {
std::ostream& operator<<(std::ostream& os, MachineRepresentation rep) {
+ return os << MachineReprToString(rep);
+}
+
+const char* MachineReprToString(MachineRepresentation rep) {
switch (rep) {
case MachineRepresentation::kNone:
- return os << "kMachNone";
+ return "kMachNone";
case MachineRepresentation::kBit:
- return os << "kRepBit";
+ return "kRepBit";
case MachineRepresentation::kWord8:
- return os << "kRepWord8";
+ return "kRepWord8";
case MachineRepresentation::kWord16:
- return os << "kRepWord16";
+ return "kRepWord16";
case MachineRepresentation::kWord32:
- return os << "kRepWord32";
+ return "kRepWord32";
case MachineRepresentation::kWord64:
- return os << "kRepWord64";
+ return "kRepWord64";
case MachineRepresentation::kFloat32:
- return os << "kRepFloat32";
+ return "kRepFloat32";
case MachineRepresentation::kFloat64:
- return os << "kRepFloat64";
+ return "kRepFloat64";
case MachineRepresentation::kSimd128:
- return os << "kRepSimd128";
+ return "kRepSimd128";
+ case MachineRepresentation::kTaggedSigned:
+ return "kRepTaggedSigned";
+ case MachineRepresentation::kTaggedPointer:
+ return "kRepTaggedPointer";
case MachineRepresentation::kTagged:
- return os << "kRepTagged";
+ return "kRepTagged";
}
UNREACHABLE();
- return os;
+ return nullptr;
}
-
std::ostream& operator<<(std::ostream& os, MachineSemantic type) {
switch (type) {
case MachineSemantic::kNone:
diff --git a/deps/v8/src/machine-type.h b/deps/v8/src/machine-type.h
index 1085657894..bcc85b3e7c 100644
--- a/deps/v8/src/machine-type.h
+++ b/deps/v8/src/machine-type.h
@@ -23,11 +23,15 @@ enum class MachineRepresentation : uint8_t {
kWord32,
kWord64,
kFloat32,
- kFloat64,
- kSimd128,
+ kFloat64, // must follow kFloat32
+ kSimd128, // must follow kFloat64
+ kTaggedSigned,
+ kTaggedPointer,
kTagged
};
+const char* MachineReprToString(MachineRepresentation);
+
enum class MachineSemantic : uint8_t {
kNone,
kBool,
@@ -157,6 +161,35 @@ class MachineType {
return MachineType(MachineRepresentation::kBit, MachineSemantic::kNone);
}
+ static MachineType TypeForRepresentation(MachineRepresentation& rep,
+ bool isSigned = true) {
+ switch (rep) {
+ case MachineRepresentation::kNone:
+ return MachineType::None();
+ case MachineRepresentation::kBit:
+ return MachineType::Bool();
+ case MachineRepresentation::kWord8:
+ return isSigned ? MachineType::Int8() : MachineType::Uint8();
+ case MachineRepresentation::kWord16:
+ return isSigned ? MachineType::Int16() : MachineType::Uint16();
+ case MachineRepresentation::kWord32:
+ return isSigned ? MachineType::Int32() : MachineType::Uint32();
+ case MachineRepresentation::kWord64:
+ return isSigned ? MachineType::Int64() : MachineType::Uint64();
+ case MachineRepresentation::kFloat32:
+ return MachineType::Float32();
+ case MachineRepresentation::kFloat64:
+ return MachineType::Float64();
+ case MachineRepresentation::kSimd128:
+ return MachineType::Simd128();
+ case MachineRepresentation::kTagged:
+ return MachineType::AnyTagged();
+ default:
+ UNREACHABLE();
+ return MachineType::None();
+ }
+ }
+
private:
MachineRepresentation representation_;
MachineSemantic semantic_;
@@ -177,7 +210,8 @@ std::ostream& operator<<(std::ostream& os, MachineType type);
inline bool IsFloatingPoint(MachineRepresentation rep) {
return rep == MachineRepresentation::kFloat32 ||
- rep == MachineRepresentation::kFloat64;
+ rep == MachineRepresentation::kFloat64 ||
+ rep == MachineRepresentation::kSimd128;
}
// Gets the log2 of the element size in bytes of the machine type.
@@ -196,6 +230,8 @@ inline int ElementSizeLog2Of(MachineRepresentation rep) {
return 3;
case MachineRepresentation::kSimd128:
return 4;
+ case MachineRepresentation::kTaggedSigned:
+ case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
return kPointerSizeLog2;
default:
diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h
index 6338b2c1c1..b6830450e7 100644
--- a/deps/v8/src/macro-assembler.h
+++ b/deps/v8/src/macro-assembler.h
@@ -19,21 +19,22 @@ enum InvokeFlag {
enum AllocationFlags {
// No special flags.
NO_ALLOCATION_FLAGS = 0,
- // Return the pointer to the allocated already tagged as a heap object.
- TAG_OBJECT = 1 << 0,
// The content of the result register already contains the allocation top in
// new space.
- RESULT_CONTAINS_TOP = 1 << 1,
+ RESULT_CONTAINS_TOP = 1 << 0,
// Specify that the requested size of the space to allocate is specified in
// words instead of bytes.
- SIZE_IN_WORDS = 1 << 2,
+ SIZE_IN_WORDS = 1 << 1,
// Align the allocation to a multiple of kDoubleSize
- DOUBLE_ALIGNMENT = 1 << 3,
+ DOUBLE_ALIGNMENT = 1 << 2,
// Directly allocate in old space
- PRETENURE = 1 << 4,
+ PRETENURE = 1 << 3,
+ // Allocation folding dominator
+ ALLOCATION_FOLDING_DOMINATOR = 1 << 4,
+ // Folded allocation
+ ALLOCATION_FOLDED = 1 << 5
};
-
#if V8_TARGET_ARCH_IA32
#include "src/ia32/assembler-ia32.h"
#include "src/ia32/assembler-ia32-inl.h"
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index 67ab36f6cf..5d03318963 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -4,27 +4,41 @@
#include "src/messages.h"
+#include <memory>
+
#include "src/api.h"
#include "src/execution.h"
#include "src/isolate-inl.h"
+#include "src/keys.h"
#include "src/string-builder.h"
+#include "src/wasm/wasm-module.h"
namespace v8 {
namespace internal {
+MessageLocation::MessageLocation(Handle<Script> script, int start_pos,
+ int end_pos)
+ : script_(script), start_pos_(start_pos), end_pos_(end_pos) {}
+MessageLocation::MessageLocation(Handle<Script> script, int start_pos,
+ int end_pos, Handle<JSFunction> function)
+ : script_(script),
+ start_pos_(start_pos),
+ end_pos_(end_pos),
+ function_(function) {}
+MessageLocation::MessageLocation() : start_pos_(-1), end_pos_(-1) {}
// If no message listeners have been registered this one is called
// by default.
void MessageHandler::DefaultMessageReport(Isolate* isolate,
const MessageLocation* loc,
Handle<Object> message_obj) {
- base::SmartArrayPointer<char> str = GetLocalizedMessage(isolate, message_obj);
+ std::unique_ptr<char[]> str = GetLocalizedMessage(isolate, message_obj);
if (loc == NULL) {
PrintF("%s\n", str.get());
} else {
HandleScope scope(isolate);
Handle<Object> data(loc->script()->name(), isolate);
- base::SmartArrayPointer<char> data_str;
+ std::unique_ptr<char[]> data_str;
if (data->IsString())
data_str = Handle<String>::cast(data)->ToCString(DISALLOW_NULLS);
PrintF("%s:%i: %s\n", data_str.get() ? data_str.get() : "<unknown>",
@@ -86,11 +100,8 @@ void MessageHandler::ReportMessage(Isolate* isolate, MessageLocation* loc,
MaybeHandle<Object> maybe_stringified;
Handle<Object> stringified;
// Make sure we don't leak uncaught internally generated Error objects.
- if (Object::IsErrorObject(isolate, argument)) {
- Handle<Object> args[] = {argument};
- maybe_stringified = Execution::TryCall(
- isolate, isolate->no_side_effects_to_string_fun(),
- isolate->factory()->undefined_value(), arraysize(args), args);
+ if (argument->IsJSError()) {
+ maybe_stringified = Object::NoSideEffectsToString(isolate, argument);
} else {
v8::TryCatch catcher(reinterpret_cast<v8::Isolate*>(isolate));
catcher.SetVerbose(false);
@@ -108,8 +119,9 @@ void MessageHandler::ReportMessage(Isolate* isolate, MessageLocation* loc,
v8::Local<v8::Message> api_message_obj = v8::Utils::MessageToLocal(message);
v8::Local<v8::Value> api_exception_obj = v8::Utils::ToLocal(exception);
- v8::NeanderArray global_listeners(isolate->factory()->message_listeners());
- int global_length = global_listeners.length();
+ Handle<TemplateList> global_listeners =
+ isolate->factory()->message_listeners();
+ int global_length = global_listeners->length();
if (global_length == 0) {
DefaultMessageReport(isolate, loc, message);
if (isolate->has_scheduled_exception()) {
@@ -118,16 +130,16 @@ void MessageHandler::ReportMessage(Isolate* isolate, MessageLocation* loc,
} else {
for (int i = 0; i < global_length; i++) {
HandleScope scope(isolate);
- if (global_listeners.get(i)->IsUndefined()) continue;
- v8::NeanderObject listener(JSObject::cast(global_listeners.get(i)));
- Handle<Foreign> callback_obj(Foreign::cast(listener.get(0)));
+ if (global_listeners->get(i)->IsUndefined(isolate)) continue;
+ FixedArray* listener = FixedArray::cast(global_listeners->get(i));
+ Foreign* callback_obj = Foreign::cast(listener->get(0));
v8::MessageCallback callback =
FUNCTION_CAST<v8::MessageCallback>(callback_obj->foreign_address());
- Handle<Object> callback_data(listener.get(1), isolate);
+ Handle<Object> callback_data(listener->get(1), isolate);
{
// Do not allow exceptions to propagate.
v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
- callback(api_message_obj, callback_data->IsUndefined()
+ callback(api_message_obj, callback_data->IsUndefined(isolate)
? api_exception_obj
: v8::Utils::ToLocal(callback_data));
}
@@ -146,8 +158,7 @@ Handle<String> MessageHandler::GetMessage(Isolate* isolate,
return MessageTemplate::FormatMessage(isolate, message->type(), arg);
}
-
-base::SmartArrayPointer<char> MessageHandler::GetLocalizedMessage(
+std::unique_ptr<char[]> MessageHandler::GetLocalizedMessage(
Isolate* isolate, Handle<Object> data) {
HandleScope scope(isolate);
return GetMessage(isolate, data)->ToCString(DISALLOW_NULLS);
@@ -158,11 +169,25 @@ CallSite::CallSite(Isolate* isolate, Handle<JSObject> call_site_obj)
: isolate_(isolate) {
Handle<Object> maybe_function = JSObject::GetDataProperty(
call_site_obj, isolate->factory()->call_site_function_symbol());
- if (!maybe_function->IsJSFunction()) return;
+ if (maybe_function->IsJSFunction()) {
+ // javascript
+ fun_ = Handle<JSFunction>::cast(maybe_function);
+ receiver_ = JSObject::GetDataProperty(
+ call_site_obj, isolate->factory()->call_site_receiver_symbol());
+ } else {
+ Handle<Object> maybe_wasm_func_index = JSObject::GetDataProperty(
+ call_site_obj, isolate->factory()->call_site_wasm_func_index_symbol());
+ if (!maybe_wasm_func_index->IsSmi()) {
+ // invalid: neither javascript nor wasm
+ return;
+ }
+ // wasm
+ wasm_obj_ = Handle<JSObject>::cast(JSObject::GetDataProperty(
+ call_site_obj, isolate->factory()->call_site_wasm_obj_symbol()));
+ wasm_func_index_ = Smi::cast(*maybe_wasm_func_index)->value();
+ DCHECK(static_cast<int>(wasm_func_index_) >= 0);
+ }
- fun_ = Handle<JSFunction>::cast(maybe_function);
- receiver_ = JSObject::GetDataProperty(
- call_site_obj, isolate->factory()->call_site_receiver_symbol());
CHECK(JSObject::GetDataProperty(
call_site_obj, isolate->factory()->call_site_position_symbol())
->ToInt32(&pos_));
@@ -170,15 +195,18 @@ CallSite::CallSite(Isolate* isolate, Handle<JSObject> call_site_obj)
Handle<Object> CallSite::GetFileName() {
- Handle<Object> script(fun_->shared()->script(), isolate_);
- if (script->IsScript()) {
- return Handle<Object>(Handle<Script>::cast(script)->name(), isolate_);
- }
- return isolate_->factory()->null_value();
+ if (!IsJavaScript()) return isolate_->factory()->null_value();
+ Object* script = fun_->shared()->script();
+ if (!script->IsScript()) return isolate_->factory()->null_value();
+ return Handle<Object>(Script::cast(script)->name(), isolate_);
}
Handle<Object> CallSite::GetFunctionName() {
+ if (IsWasm()) {
+ return wasm::GetWasmFunctionNameOrNull(isolate_, wasm_obj_,
+ wasm_func_index_);
+ }
Handle<String> result = JSFunction::GetName(fun_);
if (result->length() != 0) return result;
@@ -191,19 +219,16 @@ Handle<Object> CallSite::GetFunctionName() {
return isolate_->factory()->null_value();
}
-
Handle<Object> CallSite::GetScriptNameOrSourceUrl() {
- Handle<Object> script_obj(fun_->shared()->script(), isolate_);
- if (script_obj->IsScript()) {
- Handle<Script> script = Handle<Script>::cast(script_obj);
- Object* source_url = script->source_url();
- if (source_url->IsString()) return Handle<Object>(source_url, isolate_);
- return Handle<Object>(script->name(), isolate_);
- }
- return isolate_->factory()->null_value();
+ if (!IsJavaScript()) return isolate_->factory()->null_value();
+ Object* script_obj = fun_->shared()->script();
+ if (!script_obj->IsScript()) return isolate_->factory()->null_value();
+ Handle<Script> script(Script::cast(script_obj), isolate_);
+ Object* source_url = script->source_url();
+ if (source_url->IsString()) return Handle<Object>(source_url, isolate_);
+ return Handle<Object>(script->name(), isolate_);
}
-
bool CheckMethodName(Isolate* isolate, Handle<JSObject> obj, Handle<Name> name,
Handle<JSFunction> fun,
LookupIterator::Configuration config) {
@@ -223,7 +248,8 @@ bool CheckMethodName(Isolate* isolate, Handle<JSObject> obj, Handle<Name> name,
Handle<Object> CallSite::GetMethodName() {
- if (receiver_->IsNull() || receiver_->IsUndefined()) {
+ if (!IsJavaScript() || receiver_->IsNull(isolate_) ||
+ receiver_->IsUndefined(isolate_)) {
return isolate_->factory()->null_value();
}
Handle<JSReceiver> receiver =
@@ -234,17 +260,13 @@ Handle<Object> CallSite::GetMethodName() {
Handle<JSObject> obj = Handle<JSObject>::cast(receiver);
Handle<Object> function_name(fun_->shared()->name(), isolate_);
- if (function_name->IsName()) {
- Handle<Name> name = Handle<Name>::cast(function_name);
+ if (function_name->IsString()) {
+ Handle<String> name = Handle<String>::cast(function_name);
// ES2015 gives getters and setters name prefixes which must
// be stripped to find the property name.
- if (name->IsString() && FLAG_harmony_function_name) {
- Handle<String> name_string = Handle<String>::cast(name);
- if (name_string->IsUtf8EqualTo(CStrVector("get "), true) ||
- name_string->IsUtf8EqualTo(CStrVector("set "), true)) {
- name = isolate_->factory()->NewProperSubString(name_string, 4,
- name_string->length());
- }
+ if (name->IsUtf8EqualTo(CStrVector("get "), true) ||
+ name->IsUtf8EqualTo(CStrVector("set "), true)) {
+ name = isolate_->factory()->NewProperSubString(name, 4, name->length());
}
if (CheckMethodName(isolate_, obj, name, fun_,
LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR)) {
@@ -254,14 +276,14 @@ Handle<Object> CallSite::GetMethodName() {
HandleScope outer_scope(isolate_);
Handle<Object> result;
- for (PrototypeIterator iter(isolate_, obj,
- PrototypeIterator::START_AT_RECEIVER);
- !iter.IsAtEnd(); iter.Advance()) {
+ for (PrototypeIterator iter(isolate_, obj, kStartAtReceiver); !iter.IsAtEnd();
+ iter.Advance()) {
Handle<Object> current = PrototypeIterator::GetCurrent(iter);
if (!current->IsJSObject()) break;
Handle<JSObject> current_obj = Handle<JSObject>::cast(current);
if (current_obj->IsAccessCheckNeeded()) break;
- Handle<FixedArray> keys = JSObject::GetEnumPropertyKeys(current_obj);
+ Handle<FixedArray> keys =
+ KeyAccumulator::GetOwnEnumPropertyKeys(isolate_, current_obj);
for (int i = 0; i < keys->length(); i++) {
HandleScope inner_scope(isolate_);
if (!keys->get(i)->IsName()) continue;
@@ -279,9 +301,132 @@ Handle<Object> CallSite::GetMethodName() {
return isolate_->factory()->null_value();
}
+Handle<Object> CallSite::GetTypeName() {
+ // TODO(jgruber): Check for strict/constructor here as in
+ // CallSitePrototypeGetThis.
+
+ if (receiver_->IsNull(isolate_) || receiver_->IsUndefined(isolate_))
+ return isolate_->factory()->null_value();
+
+ if (receiver_->IsJSProxy()) return isolate_->factory()->Proxy_string();
+
+ Handle<JSReceiver> receiver_object =
+ Object::ToObject(isolate_, receiver_).ToHandleChecked();
+ return JSReceiver::GetConstructorName(receiver_object);
+}
+
+namespace {
+
+Object* EvalFromFunctionName(Isolate* isolate, Handle<Script> script) {
+ if (script->eval_from_shared()->IsUndefined(isolate))
+ return *isolate->factory()->undefined_value();
+
+ Handle<SharedFunctionInfo> shared(
+ SharedFunctionInfo::cast(script->eval_from_shared()));
+ // Find the name of the function calling eval.
+ if (shared->name()->BooleanValue()) {
+ return shared->name();
+ }
+
+ return shared->inferred_name();
+}
+
+Object* EvalFromScript(Isolate* isolate, Handle<Script> script) {
+ if (script->eval_from_shared()->IsUndefined(isolate))
+ return *isolate->factory()->undefined_value();
+
+ Handle<SharedFunctionInfo> eval_from_shared(
+ SharedFunctionInfo::cast(script->eval_from_shared()));
+ return eval_from_shared->script()->IsScript()
+ ? eval_from_shared->script()
+ : *isolate->factory()->undefined_value();
+}
+
+MaybeHandle<String> FormatEvalOrigin(Isolate* isolate, Handle<Script> script) {
+ Handle<Object> sourceURL = Script::GetNameOrSourceURL(script);
+ if (!sourceURL->IsUndefined(isolate)) {
+ DCHECK(sourceURL->IsString());
+ return Handle<String>::cast(sourceURL);
+ }
+
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendCString("eval at ");
+
+ Handle<Object> eval_from_function_name =
+ handle(EvalFromFunctionName(isolate, script), isolate);
+ if (eval_from_function_name->BooleanValue()) {
+ Handle<String> str;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, str, Object::ToString(isolate, eval_from_function_name),
+ String);
+ builder.AppendString(str);
+ } else {
+ builder.AppendCString("<anonymous>");
+ }
+
+ Handle<Object> eval_from_script_obj =
+ handle(EvalFromScript(isolate, script), isolate);
+ if (eval_from_script_obj->IsScript()) {
+ Handle<Script> eval_from_script =
+ Handle<Script>::cast(eval_from_script_obj);
+ builder.AppendCString(" (");
+ if (eval_from_script->compilation_type() == Script::COMPILATION_TYPE_EVAL) {
+ // Eval script originated from another eval.
+ Handle<String> str;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, str, FormatEvalOrigin(isolate, eval_from_script), String);
+ builder.AppendString(str);
+ } else {
+ DCHECK(eval_from_script->compilation_type() !=
+ Script::COMPILATION_TYPE_EVAL);
+ // eval script originated from "real" source.
+ Handle<Object> name_obj = handle(eval_from_script->name(), isolate);
+ if (eval_from_script->name()->IsString()) {
+ builder.AppendString(Handle<String>::cast(name_obj));
+
+ Script::PositionInfo info;
+ if (eval_from_script->GetPositionInfo(script->GetEvalPosition(), &info,
+ Script::NO_OFFSET)) {
+ builder.AppendCString(":");
+
+ Handle<String> str = isolate->factory()->NumberToString(
+ handle(Smi::FromInt(info.line + 1), isolate));
+ builder.AppendString(str);
+
+ builder.AppendCString(":");
+
+ str = isolate->factory()->NumberToString(
+ handle(Smi::FromInt(info.column + 1), isolate));
+ builder.AppendString(str);
+ }
+ } else {
+ DCHECK(!eval_from_script->name()->IsString());
+ builder.AppendCString("unknown source");
+ }
+ }
+ builder.AppendCString(")");
+ }
+
+ Handle<String> result;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result, builder.Finish(), String);
+ return result;
+}
+
+} // namespace
+
+Handle<Object> CallSite::GetEvalOrigin() {
+ if (IsWasm()) return isolate_->factory()->undefined_value();
+ DCHECK(IsJavaScript());
+
+ Handle<Object> script = handle(fun_->shared()->script(), isolate_);
+ if (!script->IsScript()) return isolate_->factory()->undefined_value();
+
+ return FormatEvalOrigin(isolate_, Handle<Script>::cast(script))
+ .ToHandleChecked();
+}
int CallSite::GetLineNumber() {
- if (pos_ >= 0) {
+ if (pos_ >= 0 && IsJavaScript()) {
Handle<Object> script_obj(fun_->shared()->script(), isolate_);
if (script_obj->IsScript()) {
Handle<Script> script = Handle<Script>::cast(script_obj);
@@ -293,7 +438,7 @@ int CallSite::GetLineNumber() {
int CallSite::GetColumnNumber() {
- if (pos_ >= 0) {
+ if (pos_ >= 0 && IsJavaScript()) {
Handle<Object> script_obj(fun_->shared()->script(), isolate_);
if (script_obj->IsScript()) {
Handle<Script> script = Handle<Script>::cast(script_obj);
@@ -305,6 +450,7 @@ int CallSite::GetColumnNumber() {
bool CallSite::IsNative() {
+ if (!IsJavaScript()) return false;
Handle<Object> script(fun_->shared()->script(), isolate_);
return script->IsScript() &&
Handle<Script>::cast(script)->type() == Script::TYPE_NATIVE;
@@ -312,12 +458,14 @@ bool CallSite::IsNative() {
bool CallSite::IsToplevel() {
- return receiver_->IsJSGlobalProxy() || receiver_->IsNull() ||
- receiver_->IsUndefined();
+ if (IsWasm()) return false;
+ return receiver_->IsJSGlobalProxy() || receiver_->IsNull(isolate_) ||
+ receiver_->IsUndefined(isolate_);
}
bool CallSite::IsEval() {
+ if (!IsJavaScript()) return false;
Handle<Object> script(fun_->shared()->script(), isolate_);
return script->IsScript() &&
Handle<Script>::cast(script)->compilation_type() ==
@@ -326,32 +474,202 @@ bool CallSite::IsEval() {
bool CallSite::IsConstructor() {
- if (!receiver_->IsJSObject()) return false;
+ // Builtin exit frames mark constructors by passing a special symbol as the
+ // receiver.
+ Object* ctor_symbol = isolate_->heap()->call_site_constructor_symbol();
+ if (*receiver_ == ctor_symbol) return true;
+ if (!IsJavaScript() || !receiver_->IsJSObject()) return false;
Handle<Object> constructor =
JSReceiver::GetDataProperty(Handle<JSObject>::cast(receiver_),
isolate_->factory()->constructor_string());
return constructor.is_identical_to(fun_);
}
+namespace {
+
+// Convert the raw frames as written by Isolate::CaptureSimpleStackTrace into
+// a vector of JS CallSite objects.
+MaybeHandle<FixedArray> GetStackFrames(Isolate* isolate,
+ Handle<Object> raw_stack) {
+ DCHECK(raw_stack->IsJSArray());
+ Handle<JSArray> raw_stack_array = Handle<JSArray>::cast(raw_stack);
+
+ DCHECK(raw_stack_array->elements()->IsFixedArray());
+ Handle<FixedArray> raw_stack_elements =
+ handle(FixedArray::cast(raw_stack_array->elements()), isolate);
+
+ const int raw_stack_len = raw_stack_elements->length();
+ DCHECK(raw_stack_len % 4 == 1); // Multiples of 4 plus sloppy frames count.
+ const int frame_count = (raw_stack_len - 1) / 4;
+
+ Handle<Object> sloppy_frames_obj =
+ FixedArray::get(*raw_stack_elements, 0, isolate);
+ int sloppy_frames = Handle<Smi>::cast(sloppy_frames_obj)->value();
+
+ int dst_ix = 0;
+ Handle<FixedArray> frames = isolate->factory()->NewFixedArray(frame_count);
+ for (int i = 1; i < raw_stack_len; i += 4) {
+ Handle<Object> recv = FixedArray::get(*raw_stack_elements, i, isolate);
+ Handle<Object> fun = FixedArray::get(*raw_stack_elements, i + 1, isolate);
+ Handle<AbstractCode> code = Handle<AbstractCode>::cast(
+ FixedArray::get(*raw_stack_elements, i + 2, isolate));
+ Handle<Smi> pc =
+ Handle<Smi>::cast(FixedArray::get(*raw_stack_elements, i + 3, isolate));
+
+ Handle<Object> pos =
+ (fun->IsSmi() && pc->value() < 0)
+ ? handle(Smi::FromInt(-1 - pc->value()), isolate)
+ : handle(Smi::FromInt(code->SourcePosition(pc->value())), isolate);
+
+ sloppy_frames--;
+ Handle<Object> strict = isolate->factory()->ToBoolean(sloppy_frames < 0);
+
+ Handle<Object> callsite;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, callsite,
+ CallSiteUtils::Construct(isolate, recv, fun, pos, strict), FixedArray);
+
+ frames->set(dst_ix++, *callsite);
+ }
+
+ DCHECK_EQ(frame_count, dst_ix);
+ return frames;
+}
-Handle<String> MessageTemplate::FormatMessage(Isolate* isolate,
- int template_index,
- Handle<Object> arg) {
- Factory* factory = isolate->factory();
- Handle<String> result_string;
- if (arg->IsString()) {
- result_string = Handle<String>::cast(arg);
+MaybeHandle<Object> AppendErrorString(Isolate* isolate, Handle<Object> error,
+ IncrementalStringBuilder* builder) {
+ MaybeHandle<String> err_str =
+ ErrorUtils::ToString(isolate, Handle<Object>::cast(error));
+ if (err_str.is_null()) {
+ // Error.toString threw. Try to return a string representation of the thrown
+ // exception instead.
+
+ DCHECK(isolate->has_pending_exception());
+ Handle<Object> pending_exception =
+ handle(isolate->pending_exception(), isolate);
+ isolate->clear_pending_exception();
+
+ err_str = ErrorUtils::ToString(isolate, pending_exception);
+ if (err_str.is_null()) {
+ // Formatting the thrown exception threw again, give up.
+ DCHECK(isolate->has_pending_exception());
+ isolate->clear_pending_exception();
+
+ builder->AppendCString("<error>");
+ } else {
+ // Formatted thrown exception successfully, append it.
+ builder->AppendCString("<error: ");
+ builder->AppendString(err_str.ToHandleChecked());
+ builder->AppendCharacter('>');
+ }
} else {
- Handle<JSFunction> fun = isolate->no_side_effects_to_string_fun();
+ builder->AppendString(err_str.ToHandleChecked());
+ }
+
+ return error;
+}
+
+class PrepareStackTraceScope {
+ public:
+ explicit PrepareStackTraceScope(Isolate* isolate) : isolate_(isolate) {
+ DCHECK(!isolate_->formatting_stack_trace());
+ isolate_->set_formatting_stack_trace(true);
+ }
+
+ ~PrepareStackTraceScope() { isolate_->set_formatting_stack_trace(false); }
+
+ private:
+ Isolate* isolate_;
+
+ DISALLOW_COPY_AND_ASSIGN(PrepareStackTraceScope);
+};
+
+} // namespace
+
+// static
+MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
+ Handle<JSObject> error,
+ Handle<Object> raw_stack) {
+ // Create JS CallSite objects from the raw stack frame array.
+
+ Handle<FixedArray> frames;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, frames,
+ GetStackFrames(isolate, raw_stack), Object);
+
+ // If there's a user-specified "prepareStackFrames" function, call it on the
+ // frames and use its result.
+
+ Handle<JSFunction> global_error = isolate->error_function();
+ Handle<Object> prepare_stack_trace;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, prepare_stack_trace,
+ JSFunction::GetProperty(isolate, global_error, "prepareStackTrace"),
+ Object);
+
+ const bool in_recursion = isolate->formatting_stack_trace();
+ if (prepare_stack_trace->IsJSFunction() && !in_recursion) {
+ PrepareStackTraceScope scope(isolate);
+ Handle<JSArray> array = isolate->factory()->NewJSArrayWithElements(frames);
+
+ const int argc = 2;
+ ScopedVector<Handle<Object>> argv(argc);
+ argv[0] = error;
+ argv[1] = array;
- MaybeHandle<Object> maybe_result =
- Execution::TryCall(isolate, fun, factory->undefined_value(), 1, &arg);
Handle<Object> result;
- if (!maybe_result.ToHandle(&result) || !result->IsString()) {
- return factory->InternalizeOneByteString(STATIC_CHAR_VECTOR("<error>"));
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result, Execution::Call(isolate, prepare_stack_trace,
+ global_error, argc, argv.start()),
+ Object);
+
+ return result;
+ }
+
+ IncrementalStringBuilder builder(isolate);
+
+ RETURN_ON_EXCEPTION(isolate, AppendErrorString(isolate, error, &builder),
+ Object);
+
+ for (int i = 0; i < frames->length(); i++) {
+ builder.AppendCString("\n at ");
+
+ Handle<Object> frame = FixedArray::get(*frames, i, isolate);
+ MaybeHandle<String> maybe_frame_string =
+ CallSiteUtils::ToString(isolate, frame);
+ if (maybe_frame_string.is_null()) {
+ // CallSite.toString threw. Try to return a string representation of the
+ // thrown exception instead.
+
+ DCHECK(isolate->has_pending_exception());
+ Handle<Object> pending_exception =
+ handle(isolate->pending_exception(), isolate);
+ isolate->clear_pending_exception();
+
+ maybe_frame_string = ErrorUtils::ToString(isolate, pending_exception);
+ if (maybe_frame_string.is_null()) {
+ // Formatting the thrown exception threw again, give up.
+
+ builder.AppendCString("<error>");
+ } else {
+ // Formatted thrown exception successfully, append it.
+ builder.AppendCString("<error: ");
+ builder.AppendString(maybe_frame_string.ToHandleChecked());
+ builder.AppendCString("<error>");
+ }
+ } else {
+ // CallSite.toString completed without throwing.
+ builder.AppendString(maybe_frame_string.ToHandleChecked());
}
- result_string = Handle<String>::cast(result);
}
+
+ RETURN_RESULT(isolate, builder.Finish(), Object);
+}
+
+Handle<String> MessageTemplate::FormatMessage(Isolate* isolate,
+ int template_index,
+ Handle<Object> arg) {
+ Factory* factory = isolate->factory();
+ Handle<String> result_string = Object::NoSideEffectsToString(isolate, arg);
MaybeHandle<String> maybe_result_string = MessageTemplate::FormatMessage(
template_index, result_string, factory->empty_string(),
factory->empty_string());
@@ -415,6 +733,459 @@ MaybeHandle<String> MessageTemplate::FormatMessage(int template_index,
return builder.Finish();
}
+MaybeHandle<Object> ErrorUtils::Construct(
+ Isolate* isolate, Handle<JSFunction> target, Handle<Object> new_target,
+ Handle<Object> message, FrameSkipMode mode, Handle<Object> caller,
+ bool suppress_detailed_trace) {
+ // 1. If NewTarget is undefined, let newTarget be the active function object,
+ // else let newTarget be NewTarget.
+
+ Handle<JSReceiver> new_target_recv =
+ new_target->IsJSReceiver() ? Handle<JSReceiver>::cast(new_target)
+ : Handle<JSReceiver>::cast(target);
+
+ // 2. Let O be ? OrdinaryCreateFromConstructor(newTarget, "%ErrorPrototype%",
+ // « [[ErrorData]] »).
+ Handle<JSObject> err;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, err,
+ JSObject::New(target, new_target_recv), Object);
+
+ // 3. If message is not undefined, then
+ // a. Let msg be ? ToString(message).
+ // b. Let msgDesc be the PropertyDescriptor{[[Value]]: msg, [[Writable]]:
+ // true, [[Enumerable]]: false, [[Configurable]]: true}.
+ // c. Perform ! DefinePropertyOrThrow(O, "message", msgDesc).
+ // 4. Return O.
+
+ if (!message->IsUndefined(isolate)) {
+ Handle<String> msg_string;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, msg_string,
+ Object::ToString(isolate, message), Object);
+ RETURN_ON_EXCEPTION(isolate, JSObject::SetOwnPropertyIgnoreAttributes(
+ err, isolate->factory()->message_string(),
+ msg_string, DONT_ENUM),
+ Object);
+ }
+
+ // Optionally capture a more detailed stack trace for the message.
+ if (!suppress_detailed_trace) {
+ RETURN_ON_EXCEPTION(isolate, isolate->CaptureAndSetDetailedStackTrace(err),
+ Object);
+ }
+
+ // Capture a simple stack trace for the stack property.
+ RETURN_ON_EXCEPTION(isolate,
+ isolate->CaptureAndSetSimpleStackTrace(err, mode, caller),
+ Object);
+
+ return err;
+}
+
+namespace {
+
+MaybeHandle<String> GetStringPropertyOrDefault(Isolate* isolate,
+ Handle<JSReceiver> recv,
+ Handle<String> key,
+ Handle<String> default_str) {
+ Handle<Object> obj;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, obj, JSObject::GetProperty(recv, key),
+ String);
+
+ Handle<String> str;
+ if (obj->IsUndefined(isolate)) {
+ str = default_str;
+ } else {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, str, Object::ToString(isolate, obj),
+ String);
+ }
+
+ return str;
+}
+
+} // namespace
+
+// ES6 section 19.5.3.4 Error.prototype.toString ( )
+MaybeHandle<String> ErrorUtils::ToString(Isolate* isolate,
+ Handle<Object> receiver) {
+ // 1. Let O be the this value.
+ // 2. If Type(O) is not Object, throw a TypeError exception.
+ if (!receiver->IsJSReceiver()) {
+ return isolate->Throw<String>(isolate->factory()->NewTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Error.prototype.toString"),
+ receiver));
+ }
+ Handle<JSReceiver> recv = Handle<JSReceiver>::cast(receiver);
+
+ // 3. Let name be ? Get(O, "name").
+ // 4. If name is undefined, let name be "Error"; otherwise let name be
+ // ? ToString(name).
+ Handle<String> name_key = isolate->factory()->name_string();
+ Handle<String> name_default = isolate->factory()->Error_string();
+ Handle<String> name;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, name,
+ GetStringPropertyOrDefault(isolate, recv, name_key, name_default),
+ String);
+
+ // 5. Let msg be ? Get(O, "message").
+ // 6. If msg is undefined, let msg be the empty String; otherwise let msg be
+ // ? ToString(msg).
+ Handle<String> msg_key = isolate->factory()->message_string();
+ Handle<String> msg_default = isolate->factory()->empty_string();
+ Handle<String> msg;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, msg,
+ GetStringPropertyOrDefault(isolate, recv, msg_key, msg_default), String);
+
+ // 7. If name is the empty String, return msg.
+ // 8. If msg is the empty String, return name.
+ if (name->length() == 0) return msg;
+ if (msg->length() == 0) return name;
+
+ // 9. Return the result of concatenating name, the code unit 0x003A (COLON),
+ // the code unit 0x0020 (SPACE), and msg.
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendString(name);
+ builder.AppendCString(": ");
+ builder.AppendString(msg);
+
+ Handle<String> result;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result, builder.Finish(), String);
+ return result;
+}
+
+namespace {
+
+Handle<String> FormatMessage(Isolate* isolate, int template_index,
+ Handle<Object> arg0, Handle<Object> arg1,
+ Handle<Object> arg2) {
+ Handle<String> arg0_str = Object::NoSideEffectsToString(isolate, arg0);
+ Handle<String> arg1_str = Object::NoSideEffectsToString(isolate, arg1);
+ Handle<String> arg2_str = Object::NoSideEffectsToString(isolate, arg2);
+
+ isolate->native_context()->IncrementErrorsThrown();
+
+ Handle<String> msg;
+ if (!MessageTemplate::FormatMessage(template_index, arg0_str, arg1_str,
+ arg2_str)
+ .ToHandle(&msg)) {
+ DCHECK(isolate->has_pending_exception());
+ isolate->clear_pending_exception();
+ return isolate->factory()->NewStringFromAsciiChecked("<error>");
+ }
+
+ return msg;
+}
+
+} // namespace
+
+// static
+MaybeHandle<Object> ErrorUtils::MakeGenericError(
+ Isolate* isolate, Handle<JSFunction> constructor, int template_index,
+ Handle<Object> arg0, Handle<Object> arg1, Handle<Object> arg2,
+ FrameSkipMode mode) {
+ if (FLAG_clear_exceptions_on_js_entry) {
+ // This function used to be implemented in JavaScript, and JSEntryStub
+ // clears
+ // any pending exceptions - so whenever we'd call this from C++, pending
+ // exceptions would be cleared. Preserve this behavior.
+ isolate->clear_pending_exception();
+ }
+
+ DCHECK(mode != SKIP_UNTIL_SEEN);
+
+ Handle<Object> no_caller;
+ Handle<String> msg = FormatMessage(isolate, template_index, arg0, arg1, arg2);
+ return ErrorUtils::Construct(isolate, constructor, constructor, msg, mode,
+ no_caller, false);
+}
+
+#define SET_CALLSITE_PROPERTY(target, key, value) \
+ RETURN_ON_EXCEPTION( \
+ isolate, JSObject::SetOwnPropertyIgnoreAttributes( \
+ target, isolate->factory()->key(), value, DONT_ENUM), \
+ Object)
+
+MaybeHandle<Object> CallSiteUtils::Construct(Isolate* isolate,
+ Handle<Object> receiver,
+ Handle<Object> fun,
+ Handle<Object> pos,
+ Handle<Object> strict_mode) {
+ // Create the JS object.
+
+ Handle<JSFunction> target =
+ handle(isolate->native_context()->callsite_function(), isolate);
+
+ Handle<JSObject> obj;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, obj, JSObject::New(target, target),
+ Object);
+
+ // For wasm frames, receiver is the wasm object and fun is the function index
+ // instead of an actual function.
+ const bool is_wasm_object =
+ receiver->IsJSObject() && wasm::IsWasmObject(JSObject::cast(*receiver));
+ if (!fun->IsJSFunction() && !is_wasm_object) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kCallSiteExpectsFunction,
+ Object::TypeOf(isolate, receiver),
+ Object::TypeOf(isolate, fun)),
+ Object);
+ }
+
+ if (is_wasm_object) {
+ DCHECK(fun->IsSmi());
+ DCHECK(wasm::GetNumberOfFunctions(JSObject::cast(*receiver)) >
+ Smi::cast(*fun)->value());
+
+ SET_CALLSITE_PROPERTY(obj, call_site_wasm_obj_symbol, receiver);
+ SET_CALLSITE_PROPERTY(obj, call_site_wasm_func_index_symbol, fun);
+ } else {
+ DCHECK(fun->IsJSFunction());
+ SET_CALLSITE_PROPERTY(obj, call_site_receiver_symbol, receiver);
+ SET_CALLSITE_PROPERTY(obj, call_site_function_symbol, fun);
+ }
+
+ DCHECK(pos->IsSmi());
+ SET_CALLSITE_PROPERTY(obj, call_site_position_symbol, pos);
+ SET_CALLSITE_PROPERTY(
+ obj, call_site_strict_symbol,
+ isolate->factory()->ToBoolean(strict_mode->BooleanValue()));
+
+ return obj;
+}
+
+#undef SET_CALLSITE_PROPERTY
+
+namespace {
+
+bool IsNonEmptyString(Handle<Object> object) {
+ return (object->IsString() && String::cast(*object)->length() > 0);
+}
+
+MaybeHandle<JSObject> AppendWasmToString(Isolate* isolate,
+ Handle<JSObject> recv,
+ CallSite* call_site,
+ IncrementalStringBuilder* builder) {
+ Handle<Object> name = call_site->GetFunctionName();
+ if (name->IsNull(isolate)) {
+ builder->AppendCString("<WASM UNNAMED>");
+ } else {
+ DCHECK(name->IsString());
+ builder->AppendString(Handle<String>::cast(name));
+ }
+
+ builder->AppendCString(" (<WASM>[");
+
+ Handle<String> ix = isolate->factory()->NumberToString(
+ handle(Smi::FromInt(call_site->wasm_func_index()), isolate));
+ builder->AppendString(ix);
+
+ builder->AppendCString("]+");
+
+ Handle<Object> pos;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, pos, JSObject::GetProperty(
+ recv, isolate->factory()->call_site_position_symbol()),
+ JSObject);
+ DCHECK(pos->IsNumber());
+ builder->AppendString(isolate->factory()->NumberToString(pos));
+ builder->AppendCString(")");
+
+ return recv;
+}
+
+MaybeHandle<JSObject> AppendFileLocation(Isolate* isolate,
+ Handle<JSObject> recv,
+ CallSite* call_site,
+ IncrementalStringBuilder* builder) {
+ if (call_site->IsNative()) {
+ builder->AppendCString("native");
+ return recv;
+ }
+
+ Handle<Object> file_name = call_site->GetScriptNameOrSourceUrl();
+ if (!file_name->IsString() && call_site->IsEval()) {
+ Handle<Object> eval_origin = call_site->GetEvalOrigin();
+ DCHECK(eval_origin->IsString());
+ builder->AppendString(Handle<String>::cast(eval_origin));
+ builder->AppendCString(", "); // Expecting source position to follow.
+ }
+
+ if (IsNonEmptyString(file_name)) {
+ builder->AppendString(Handle<String>::cast(file_name));
+ } else {
+ // Source code does not originate from a file and is not native, but we
+ // can still get the source position inside the source string, e.g. in
+ // an eval string.
+ builder->AppendCString("<anonymous>");
+ }
+
+ int line_number = call_site->GetLineNumber();
+ if (line_number != -1) {
+ builder->AppendCharacter(':');
+ Handle<String> line_string = isolate->factory()->NumberToString(
+ handle(Smi::FromInt(line_number), isolate), isolate);
+ builder->AppendString(line_string);
+
+ int column_number = call_site->GetColumnNumber();
+ if (column_number != -1) {
+ builder->AppendCharacter(':');
+ Handle<String> column_string = isolate->factory()->NumberToString(
+ handle(Smi::FromInt(column_number), isolate), isolate);
+ builder->AppendString(column_string);
+ }
+ }
+
+ return recv;
+}
+
+int StringIndexOf(Isolate* isolate, Handle<String> subject,
+ Handle<String> pattern) {
+ if (pattern->length() > subject->length()) return -1;
+ return String::IndexOf(isolate, subject, pattern, 0);
+}
+
+// Returns true iff
+// 1. the subject ends with '.' + pattern, or
+// 2. subject == pattern.
+bool StringEndsWithMethodName(Isolate* isolate, Handle<String> subject,
+ Handle<String> pattern) {
+ if (String::Equals(subject, pattern)) return true;
+
+ FlatStringReader subject_reader(isolate, String::Flatten(subject));
+ FlatStringReader pattern_reader(isolate, String::Flatten(pattern));
+
+ int pattern_index = pattern_reader.length() - 1;
+ int subject_index = subject_reader.length() - 1;
+ for (int i = 0; i <= pattern_reader.length(); i++) { // Iterate over len + 1.
+ if (subject_index < 0) {
+ return false;
+ }
+
+ const uc32 subject_char = subject_reader.Get(subject_index);
+ if (i == pattern_reader.length()) {
+ if (subject_char != '.') return false;
+ } else if (subject_char != pattern_reader.Get(pattern_index)) {
+ return false;
+ }
+
+ pattern_index--;
+ subject_index--;
+ }
+
+ return true;
+}
+
+MaybeHandle<JSObject> AppendMethodCall(Isolate* isolate, Handle<JSObject> recv,
+ CallSite* call_site,
+ IncrementalStringBuilder* builder) {
+ Handle<Object> type_name = call_site->GetTypeName();
+ Handle<Object> method_name = call_site->GetMethodName();
+ Handle<Object> function_name = call_site->GetFunctionName();
+
+ if (IsNonEmptyString(function_name)) {
+ Handle<String> function_string = Handle<String>::cast(function_name);
+ if (IsNonEmptyString(type_name)) {
+ Handle<String> type_string = Handle<String>::cast(type_name);
+ bool starts_with_type_name =
+ (StringIndexOf(isolate, function_string, type_string) == 0);
+ if (!starts_with_type_name) {
+ builder->AppendString(type_string);
+ builder->AppendCharacter('.');
+ }
+ }
+ builder->AppendString(function_string);
+
+ if (IsNonEmptyString(method_name)) {
+ Handle<String> method_string = Handle<String>::cast(method_name);
+ if (!StringEndsWithMethodName(isolate, function_string, method_string)) {
+ builder->AppendCString(" [as ");
+ builder->AppendString(method_string);
+ builder->AppendCharacter(']');
+ }
+ }
+ } else {
+ builder->AppendString(Handle<String>::cast(type_name));
+ builder->AppendCharacter('.');
+ if (IsNonEmptyString(method_name)) {
+ builder->AppendString(Handle<String>::cast(method_name));
+ } else {
+ builder->AppendCString("<anonymous>");
+ }
+ }
+
+ return recv;
+}
+
+} // namespace
+
+MaybeHandle<String> CallSiteUtils::ToString(Isolate* isolate,
+ Handle<Object> receiver) {
+ if (!receiver->IsJSObject()) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
+ isolate->factory()->NewStringFromAsciiChecked("toString"),
+ receiver),
+ String);
+ }
+ Handle<JSObject> recv = Handle<JSObject>::cast(receiver);
+
+ if (!JSReceiver::HasOwnProperty(
+ recv, isolate->factory()->call_site_position_symbol())
+ .FromMaybe(false)) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kCallSiteMethod,
+ isolate->factory()->NewStringFromAsciiChecked("toString")),
+ String);
+ }
+
+ IncrementalStringBuilder builder(isolate);
+
+ CallSite call_site(isolate, recv);
+ if (call_site.IsWasm()) {
+ RETURN_ON_EXCEPTION(isolate,
+ AppendWasmToString(isolate, recv, &call_site, &builder),
+ String);
+ RETURN_RESULT(isolate, builder.Finish(), String);
+ }
+
+ DCHECK(!call_site.IsWasm());
+ Handle<Object> function_name = call_site.GetFunctionName();
+
+ const bool is_toplevel = call_site.IsToplevel();
+ const bool is_constructor = call_site.IsConstructor();
+ const bool is_method_call = !(is_toplevel || is_constructor);
+
+ if (is_method_call) {
+ RETURN_ON_EXCEPTION(
+ isolate, AppendMethodCall(isolate, recv, &call_site, &builder), String);
+ } else if (is_constructor) {
+ builder.AppendCString("new ");
+ if (IsNonEmptyString(function_name)) {
+ builder.AppendString(Handle<String>::cast(function_name));
+ } else {
+ builder.AppendCString("<anonymous>");
+ }
+ } else if (IsNonEmptyString(function_name)) {
+ builder.AppendString(Handle<String>::cast(function_name));
+ } else {
+ RETURN_ON_EXCEPTION(isolate,
+ AppendFileLocation(isolate, recv, &call_site, &builder),
+ String);
+ RETURN_RESULT(isolate, builder.Finish(), String);
+ }
+
+ builder.AppendCString(" (");
+ RETURN_ON_EXCEPTION(
+ isolate, AppendFileLocation(isolate, recv, &call_site, &builder), String);
+ builder.AppendCString(")");
+
+ RETURN_RESULT(isolate, builder.Finish(), String);
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index 4aa0b73e71..cf49ac9c5c 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -10,7 +10,8 @@
#ifndef V8_MESSAGES_H_
#define V8_MESSAGES_H_
-#include "src/base/smart-pointers.h"
+#include <memory>
+
#include "src/handles.h"
#include "src/list.h"
@@ -24,13 +25,10 @@ class SourceInfo;
class MessageLocation {
public:
+ MessageLocation(Handle<Script> script, int start_pos, int end_pos);
MessageLocation(Handle<Script> script, int start_pos, int end_pos,
- Handle<JSFunction> function = Handle<JSFunction>())
- : script_(script),
- start_pos_(start_pos),
- end_pos_(end_pos),
- function_(function) {}
- MessageLocation() : start_pos_(-1), end_pos_(-1) { }
+ Handle<JSFunction> function);
+ MessageLocation();
Handle<Script> script() const { return script_; }
int start_pos() const { return start_pos_; }
@@ -53,6 +51,8 @@ class CallSite {
Handle<Object> GetFunctionName();
Handle<Object> GetScriptNameOrSourceUrl();
Handle<Object> GetMethodName();
+ Handle<Object> GetTypeName();
+ Handle<Object> GetEvalOrigin();
// Return 1-based line number, including line offset.
int GetLineNumber();
// Return 1-based column number, including column offset if first line.
@@ -62,13 +62,59 @@ class CallSite {
bool IsEval();
bool IsConstructor();
- bool IsValid() { return !fun_.is_null(); }
+ bool IsJavaScript() { return !fun_.is_null(); }
+ bool IsWasm() { return !wasm_obj_.is_null(); }
+
+ int wasm_func_index() const { return wasm_func_index_; }
private:
Isolate* isolate_;
Handle<Object> receiver_;
Handle<JSFunction> fun_;
- int32_t pos_;
+ int32_t pos_ = -1;
+ Handle<JSObject> wasm_obj_;
+ uint32_t wasm_func_index_ = static_cast<uint32_t>(-1);
+};
+
+// Determines how stack trace collection skips frames.
+enum FrameSkipMode {
+ // Unconditionally skips the first frame. Used e.g. when the Error constructor
+ // is called, in which case the first frame is always a BUILTIN_EXIT frame.
+ SKIP_FIRST,
+ // Skip all frames until a specified caller function is seen.
+ SKIP_UNTIL_SEEN,
+ SKIP_NONE,
+};
+
+class ErrorUtils : public AllStatic {
+ public:
+ static MaybeHandle<Object> Construct(
+ Isolate* isolate, Handle<JSFunction> target, Handle<Object> new_target,
+ Handle<Object> message, FrameSkipMode mode, Handle<Object> caller,
+ bool suppress_detailed_trace);
+
+ static MaybeHandle<String> ToString(Isolate* isolate, Handle<Object> recv);
+
+ static MaybeHandle<Object> MakeGenericError(
+ Isolate* isolate, Handle<JSFunction> constructor, int template_index,
+ Handle<Object> arg0, Handle<Object> arg1, Handle<Object> arg2,
+ FrameSkipMode mode);
+
+ // Formats a textual stack trace from the given structured stack trace.
+ // Note that this can call arbitrary JS code through Error.prepareStackTrace.
+ static MaybeHandle<Object> FormatStackTrace(Isolate* isolate,
+ Handle<JSObject> error,
+ Handle<Object> stack_trace);
+};
+
+class CallSiteUtils : public AllStatic {
+ public:
+ static MaybeHandle<Object> Construct(Isolate* isolate,
+ Handle<Object> receiver,
+ Handle<Object> fun, Handle<Object> pos,
+ Handle<Object> strict_mode);
+
+ static MaybeHandle<String> ToString(Isolate* isolate, Handle<Object> recv);
};
#define MESSAGE_TEMPLATES(T) \
@@ -94,12 +140,11 @@ class CallSite {
T(ArrayFunctionsOnSealed, "Cannot add/remove sealed array elements") \
T(ArrayNotSubclassable, "Subclassing Arrays is not currently supported.") \
T(CalledNonCallable, "% is not a function") \
- T(CalledNonCallableInstanceOf, \
- "Right-hand side of 'instanceof' is not callable") \
T(CalledOnNonObject, "% called on non-object") \
T(CalledOnNullOrUndefined, "% called on null or undefined") \
T(CallSiteExpectsFunction, \
- "CallSite expects function as second argument, got %") \
+ "CallSite expects wasm object as first or function as second argument, " \
+ "got <%, %>") \
T(CallSiteMethod, "CallSite method % expects CallSite as receiver") \
T(CannotConvertToPrimitive, "Cannot convert object to primitive value") \
T(CannotPreventExt, "Cannot prevent extensions") \
@@ -120,19 +165,18 @@ class CallSite {
T(DebuggerType, "Debugger: Parameters have wrong types.") \
T(DeclarationMissingInitializer, "Missing initializer in % declaration") \
T(DefineDisallowed, "Cannot define property:%, object is not extensible.") \
+ T(DetachedOperation, "Cannot perform % on a detached ArrayBuffer") \
T(DuplicateTemplateProperty, "Object template has duplicate property '%'") \
- T(ExtendsValueGenerator, \
- "Class extends value % may not be a generator function") \
- T(ExtendsValueNotFunction, \
- "Class extends value % is not a function or null") \
+ T(ExtendsValueNotConstructor, \
+ "Class extends value % is not a constructor or null") \
T(FirstArgumentNotRegExp, \
"First argument to % must not be a regular expression") \
T(FunctionBind, "Bind must be called on a function") \
T(GeneratorRunning, "Generator is already running") \
T(IllegalInvocation, "Illegal invocation") \
+ T(ImmutablePrototypeSet, \
+ "Immutable prototype object '%' cannot have their prototype set") \
T(IncompatibleMethodReceiver, "Method % called on incompatible receiver %") \
- T(InstanceofFunctionExpected, \
- "Expecting a function in instanceof check, but got %") \
T(InstanceofNonobjectProto, \
"Function has non-object prototype '%' in instanceof check") \
T(InvalidArgument, "invalid_argument") \
@@ -149,6 +193,8 @@ class CallSite {
"Method invoked on undefined or null value.") \
T(MethodInvokedOnWrongType, "Method invoked on an object that is not %.") \
T(NoAccess, "no access") \
+ T(NonCallableInInstanceOfCheck, \
+ "Right-hand side of 'instanceof' is not callable") \
T(NonCoercible, "Cannot match against 'undefined' or 'null'.") \
T(NonExtensibleProto, "% is not extensible") \
T(NonObjectInInstanceOfCheck, \
@@ -175,19 +221,6 @@ class CallSite {
T(ObjectSetterExpectingFunction, \
"Object.prototype.__defineSetter__: Expecting function") \
T(ObjectSetterCallable, "Setter must be a function: %") \
- T(ObserveCallbackFrozen, \
- "Object.observe cannot deliver to a frozen function object") \
- T(ObserveGlobalProxy, "% cannot be called on the global proxy object") \
- T(ObserveAccessChecked, "% cannot be called on access-checked objects") \
- T(ObserveInvalidAccept, \
- "Third argument to Object.observe must be an array of strings.") \
- T(ObserveNonFunction, "Object.% cannot deliver to non-function") \
- T(ObserveNonObject, "Object.% cannot % non-object") \
- T(ObserveNotifyNonNotifier, "notify called on non-notifier object") \
- T(ObservePerformNonFunction, "Cannot perform non-function") \
- T(ObservePerformNonString, "Invalid non-string changeType") \
- T(ObserveTypeNonString, \
- "Invalid changeRecord with non-string 'type' property") \
T(OrdinaryFunctionCalledAsConstructor, \
"Function object that's not a constructor was created with new") \
T(PromiseCyclic, "Chaining cycle detected for promise %") \
@@ -342,8 +375,9 @@ class CallSite {
T(InvalidCurrencyCode, "Invalid currency code: %") \
T(InvalidDataViewAccessorOffset, \
"Offset is outside the bounds of the DataView") \
- T(InvalidDataViewLength, "Invalid data view length") \
- T(InvalidDataViewOffset, "Start offset is outside the bounds of the buffer") \
+ T(InvalidDataViewLength, "Invalid DataView length %") \
+ T(InvalidDataViewOffset, \
+ "Start offset % is outside the bounds of the buffer") \
T(InvalidHint, "Invalid hint: %") \
T(InvalidLanguageTag, "Invalid language tag: %") \
T(InvalidWeakMapKey, "Invalid value used as weak map key") \
@@ -353,6 +387,8 @@ class CallSite {
T(InvalidTypedArrayAlignment, "% of % should be a multiple of %") \
T(InvalidTypedArrayLength, "Invalid typed array length") \
T(InvalidTypedArrayOffset, "Start offset is too large:") \
+ T(InvalidSimdIndex, "Index out of bounds for SIMD operation") \
+ T(InvalidSimdLaneValue, "Lane value out of bounds for SIMD operation") \
T(LetInLexicalBinding, "let is disallowed as a lexically bound name") \
T(LocaleMatcher, "Illegal value for localeMatcher:%") \
T(NormalizationForm, "The normalization form should be one of %.") \
@@ -370,6 +406,7 @@ class CallSite {
T(BadSetterArity, "Setter must have exactly one formal parameter.") \
T(ConstructorIsAccessor, "Class constructor may not be an accessor") \
T(ConstructorIsGenerator, "Class constructor may not be a generator") \
+ T(ConstructorIsAsync, "Class constructor may not be an async method") \
T(DerivedConstructorReturn, \
"Derived constructors may only return object or undefined") \
T(DuplicateConstructor, "A class may only have one constructor") \
@@ -380,6 +417,8 @@ class CallSite {
"% loop variable declaration may not have an initializer.") \
T(ForInOfLoopMultiBindings, \
"Invalid left-hand side in % loop: Must have a single binding.") \
+ T(GeneratorInLegacyContext, \
+ "Generator declarations are not allowed in legacy contexts.") \
T(IllegalBreak, "Illegal break statement") \
T(IllegalContinue, "Illegal continue statement") \
T(IllegalLanguageModeDirective, \
@@ -430,9 +469,6 @@ class CallSite {
T(SloppyFunction, \
"In non-strict mode code, functions can only be declared at top level, " \
"inside a block, or as the body of an if statement.") \
- T(SloppyLexical, \
- "Block-scoped declarations (let, const, function, class) not yet " \
- "supported outside strict mode") \
T(SpeciesNotConstructor, \
"object.constructor[Symbol.species] is not a constructor") \
T(StrictDelete, "Delete of an unqualified identifier in strict mode.") \
@@ -445,20 +481,35 @@ class CallSite {
T(TemplateOctalLiteral, \
"Octal literals are not allowed in template strings.") \
T(ThisFormalParameter, "'this' is not a valid formal parameter name") \
+ T(AwaitBindingIdentifier, \
+ "'await' is not a valid identifier name in an async function") \
+ T(AwaitExpressionFormalParameter, \
+ "Illegal await-expression in formal parameters of async function") \
T(TooManyArguments, \
"Too many arguments in function call (only 65535 allowed)") \
T(TooManyParameters, \
"Too many parameters in function definition (only 65535 allowed)") \
+ T(TooManySpreads, \
+ "Literal containing too many nested spreads (up to 65534 allowed)") \
T(TooManyVariables, "Too many variables declared (only 4194303 allowed)") \
T(TypedArrayTooShort, \
"Derived TypedArray constructor created an array which was too small") \
T(UnexpectedEOS, "Unexpected end of input") \
T(UnexpectedFunctionSent, \
"function.sent expression is not allowed outside a generator") \
+ T(UnexpectedInsideTailCall, "Unexpected expression inside tail call") \
T(UnexpectedReserved, "Unexpected reserved word") \
T(UnexpectedStrictReserved, "Unexpected strict mode reserved word") \
T(UnexpectedSuper, "'super' keyword unexpected here") \
+ T(UnexpectedSloppyTailCall, \
+ "Tail call expressions are not allowed in non-strict mode") \
T(UnexpectedNewTarget, "new.target expression is not allowed here") \
+ T(UnexpectedTailCall, "Tail call expression is not allowed here") \
+ T(UnexpectedTailCallInCatchBlock, \
+ "Tail call expression in catch block when finally block is also present") \
+ T(UnexpectedTailCallInForInOf, "Tail call expression in for-in/of body") \
+ T(UnexpectedTailCallInTryBlock, "Tail call expression in try block") \
+ T(UnexpectedTailCallOfEval, "Tail call of a direct eval is not allowed") \
T(UnexpectedTemplateString, "Unexpected template string") \
T(UnexpectedToken, "Unexpected token %") \
T(UnexpectedTokenIdentifier, "Unexpected identifier") \
@@ -478,7 +529,18 @@ class CallSite {
/* EvalError */ \
T(CodeGenFromStrings, "%") \
/* URIError */ \
- T(URIMalformed, "URI malformed")
+ T(URIMalformed, "URI malformed") \
+ /* Wasm errors (currently Error) */ \
+ T(WasmTrapUnreachable, "unreachable") \
+ T(WasmTrapMemOutOfBounds, "memory access out of bounds") \
+ T(WasmTrapDivByZero, "divide by zero") \
+ T(WasmTrapDivUnrepresentable, "divide result unrepresentable") \
+ T(WasmTrapRemByZero, "remainder by zero") \
+ T(WasmTrapFloatUnrepresentable, "integer result unrepresentable") \
+ T(WasmTrapFuncInvalid, "invalid function") \
+ T(WasmTrapFuncSigMismatch, "function signature mismatch") \
+ T(WasmTrapInvalidIndex, "invalid index into function table") \
+ T(WasmTrapTypeError, "invalid type")
class MessageTemplate {
public:
@@ -518,8 +580,8 @@ class MessageHandler {
static void DefaultMessageReport(Isolate* isolate, const MessageLocation* loc,
Handle<Object> message_obj);
static Handle<String> GetMessage(Isolate* isolate, Handle<Object> data);
- static base::SmartArrayPointer<char> GetLocalizedMessage(Isolate* isolate,
- Handle<Object> data);
+ static std::unique_ptr<char[]> GetLocalizedMessage(Isolate* isolate,
+ Handle<Object> data);
};
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index 517d4adab0..963ed4acc9 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -49,6 +49,7 @@ namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return IsSupported(FPU); }
+bool CpuFeatures::SupportsSimd128() { return false; }
// -----------------------------------------------------------------------------
// Operand and MemOperand.
@@ -102,11 +103,6 @@ Address RelocInfo::target_address() {
return Assembler::target_address_at(pc_, host_);
}
-Address RelocInfo::wasm_memory_reference() {
- DCHECK(IsWasmMemoryReference(rmode_));
- return Assembler::target_address_at(pc_, host_);
-}
-
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) ||
@@ -142,33 +138,6 @@ int RelocInfo::target_address_size() {
}
-void RelocInfo::set_target_address(Address target,
- WriteBarrierMode write_barrier_mode,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(isolate_, pc_, host_, target,
- icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
- host() != NULL && IsCodeTarget(rmode_)) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
-}
-
-void RelocInfo::update_wasm_memory_reference(
- Address old_base, Address new_base, size_t old_size, size_t new_size,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(IsWasmMemoryReference(rmode_));
- DCHECK(old_base <= wasm_memory_reference() &&
- wasm_memory_reference() < old_base + old_size);
- Address updated_reference = new_base + (wasm_memory_reference() - old_base);
- DCHECK(new_base <= updated_reference &&
- updated_reference < new_base + new_size);
- Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
- icache_flush_mode);
-}
-
Address Assembler::target_address_from_return_address(Address pc) {
return pc - kCallTargetAddressOffset;
}
@@ -243,6 +212,7 @@ void RelocInfo::set_target_object(Object* target,
target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target));
+ host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
}
}
@@ -380,7 +350,7 @@ void RelocInfo::WipeOut() {
}
}
-
+template <typename ObjectVisitor>
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
@@ -481,6 +451,8 @@ void Assembler::EmitHelper(Instr x, CompactBranchType is_compact_branch) {
CheckTrampolinePoolQuick();
}
+template <>
+inline void Assembler::EmitHelper(uint8_t x);
template <typename T>
void Assembler::EmitHelper(T x) {
@@ -489,6 +461,14 @@ void Assembler::EmitHelper(T x) {
CheckTrampolinePoolQuick();
}
+template <>
+void Assembler::EmitHelper(uint8_t x) {
+ *reinterpret_cast<uint8_t*>(pc_) = x;
+ pc_ += sizeof(x);
+ if (reinterpret_cast<intptr_t>(pc_) % kInstrSize == 0) {
+ CheckTrampolinePoolQuick();
+ }
+}
void Assembler::emit(Instr x, CompactBranchType is_compact_branch) {
if (!is_buffer_growth_blocked()) {
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index bfa232892a..20a8a11fbb 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -189,6 +189,31 @@ bool RelocInfo::IsInConstantPool() {
return false;
}
+Address RelocInfo::wasm_memory_reference() {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ return Assembler::target_address_at(pc_, host_);
+}
+
+Address RelocInfo::wasm_global_reference() {
+ DCHECK(IsWasmGlobalReference(rmode_));
+ return Assembler::target_address_at(pc_, host_);
+}
+
+uint32_t RelocInfo::wasm_memory_size_reference() {
+ DCHECK(IsWasmMemorySizeReference(rmode_));
+ return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
+}
+
+void RelocInfo::unchecked_update_wasm_memory_reference(
+ Address address, ICacheFlushMode flush_mode) {
+ Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
+}
+
+void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
+ ICacheFlushMode flush_mode) {
+ Assembler::set_target_address_at(isolate_, pc_, host_,
+ reinterpret_cast<Address>(size), flush_mode);
+}
// -----------------------------------------------------------------------------
// Implementation of Operand and MemOperand.
@@ -200,7 +225,6 @@ Operand::Operand(Handle<Object> handle) {
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
if (obj->IsHeapObject()) {
- DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
imm32_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
} else {
@@ -259,11 +283,9 @@ const Instr kLwSwInstrTypeMask = 0xffe00000;
const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
const Instr kLwSwOffsetMask = kImm16Mask;
-
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
- recorded_ast_id_(TypeFeedbackId::None()),
- positions_recorder_(this) {
+ recorded_ast_id_(TypeFeedbackId::None()) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
last_trampoline_pool_end_ = 0;
@@ -294,6 +316,8 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
desc->origin = this;
desc->constant_pool_size = 0;
+ desc->unwinding_info_size = 0;
+ desc->unwinding_info = nullptr;
}
@@ -1241,7 +1265,6 @@ void Assembler::b(int16_t offset) {
void Assembler::bal(int16_t offset) {
- positions_recorder()->WriteRecordedPositions();
bgezal(zero_reg, offset);
}
@@ -1254,7 +1277,6 @@ void Assembler::bc(int32_t offset) {
void Assembler::balc(int32_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1301,7 +1323,6 @@ void Assembler::bgec(Register rs, Register rt, int16_t offset) {
void Assembler::bgezal(Register rs, int16_t offset) {
DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
BlockTrampolinePoolScope block_trampoline_pool(this);
- positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
}
@@ -1372,7 +1393,6 @@ void Assembler::bltz(Register rs, int16_t offset) {
void Assembler::bltzal(Register rs, int16_t offset) {
DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
BlockTrampolinePoolScope block_trampoline_pool(this);
- positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
}
@@ -1408,7 +1428,6 @@ void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
void Assembler::blezalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rt.is(zero_reg)));
- positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(BLEZ, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -1417,7 +1436,6 @@ void Assembler::blezalc(Register rt, int16_t offset) {
void Assembler::bgezalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rt.is(zero_reg)));
- positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1426,7 +1444,6 @@ void Assembler::bgezall(Register rs, int16_t offset) {
DCHECK(!IsMipsArchVariant(kMips32r6));
DCHECK(!(rs.is(zero_reg)));
BlockTrampolinePoolScope block_trampoline_pool(this);
- positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
}
@@ -1435,7 +1452,6 @@ void Assembler::bgezall(Register rs, int16_t offset) {
void Assembler::bltzalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rt.is(zero_reg)));
- positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1443,7 +1459,6 @@ void Assembler::bltzalc(Register rt, int16_t offset) {
void Assembler::bgtzalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rt.is(zero_reg)));
- positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(BGTZ, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -1452,7 +1467,6 @@ void Assembler::bgtzalc(Register rt, int16_t offset) {
void Assembler::beqzalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rt.is(zero_reg)));
- positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(ADDI, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -1461,7 +1475,6 @@ void Assembler::beqzalc(Register rt, int16_t offset) {
void Assembler::bnezalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rt.is(zero_reg)));
- positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(DADDI, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -1520,9 +1533,6 @@ void Assembler::j(int32_t target) {
void Assembler::jr(Register rs) {
if (!IsMipsArchVariant(kMips32r6)) {
BlockTrampolinePoolScope block_trampoline_pool(this);
- if (rs.is(ra)) {
- positions_recorder()->WriteRecordedPositions();
- }
GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
BlockTrampolinePoolFor(1); // For associated delay slot.
} else {
@@ -1540,7 +1550,6 @@ void Assembler::jal(int32_t target) {
DCHECK(in_range && ((target & 3) == 0));
#endif
BlockTrampolinePoolScope block_trampoline_pool(this);
- positions_recorder()->WriteRecordedPositions();
GenInstrJump(JAL, (target >> 2) & kImm26Mask);
BlockTrampolinePoolFor(1); // For associated delay slot.
}
@@ -1549,7 +1558,6 @@ void Assembler::jal(int32_t target) {
void Assembler::jalr(Register rs, Register rd) {
DCHECK(rs.code() != rd.code());
BlockTrampolinePoolScope block_trampoline_pool(this);
- positions_recorder()->WriteRecordedPositions();
GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
BlockTrampolinePoolFor(1); // For associated delay slot.
}
@@ -1563,7 +1571,6 @@ void Assembler::jic(Register rt, int16_t offset) {
void Assembler::jialc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
- positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(POP76, zero_reg, rt, offset);
}
@@ -1829,11 +1836,17 @@ void Assembler::lw(Register rd, const MemOperand& rs) {
void Assembler::lwl(Register rd, const MemOperand& rs) {
+ DCHECK(is_int16(rs.offset_));
+ DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
+ IsMipsArchVariant(kMips32r2));
GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
}
void Assembler::lwr(Register rd, const MemOperand& rs) {
+ DCHECK(is_int16(rs.offset_));
+ DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
+ IsMipsArchVariant(kMips32r2));
GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
}
@@ -1869,11 +1882,17 @@ void Assembler::sw(Register rd, const MemOperand& rs) {
void Assembler::swl(Register rd, const MemOperand& rs) {
+ DCHECK(is_int16(rs.offset_));
+ DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
+ IsMipsArchVariant(kMips32r2));
GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
}
void Assembler::swr(Register rd, const MemOperand& rs) {
+ DCHECK(is_int16(rs.offset_));
+ DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
+ IsMipsArchVariant(kMips32r2));
GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
}
@@ -1892,7 +1911,6 @@ void Assembler::aui(Register rt, Register rs, int32_t j) {
GenInstrImmediate(LUI, rs, rt, j);
}
-
// ---------PC-Relative instructions-----------
void Assembler::addiupc(Register rs, int32_t imm19) {
@@ -1956,7 +1974,11 @@ void Assembler::stop(const char* msg, uint32_t code) {
// The Simulator will handle the stop instruction and get the message address.
// On MIPS stop() is just a special kind of break_().
break_(code, true);
- emit(reinterpret_cast<Instr>(msg));
+ // Do not embed the message string address! We used to do this, but that
+ // made snapshots created from position-independent executable builds
+ // non-deterministic.
+ // TODO(yangguo): remove this field entirely.
+ nop();
#endif
}
@@ -2009,6 +2031,10 @@ void Assembler::tne(Register rs, Register rt, uint16_t code) {
emit(instr);
}
+void Assembler::sync() {
+ Instr sync_instr = SPECIAL | SYNC;
+ emit(sync_instr);
+}
// Move from HI/LO register.
@@ -2123,6 +2149,21 @@ void Assembler::align(Register rd, Register rs, Register rt, uint8_t bp) {
GenInstrRegister(SPECIAL3, rs, rt, rd, sa, BSHFL);
}
+// Byte swap.
+void Assembler::wsbh(Register rd, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL3, zero_reg, rt, rd, WSBH, BSHFL);
+}
+
+void Assembler::seh(Register rd, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEH, BSHFL);
+}
+
+void Assembler::seb(Register rd, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEB, BSHFL);
+}
// --------Coprocessor-instructions----------------
@@ -2451,11 +2492,13 @@ void Assembler::mov_s(FPURegister fd, FPURegister fs) {
void Assembler::neg_s(FPURegister fd, FPURegister fs) {
+ DCHECK(!IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, S, f0, fs, fd, NEG_S);
}
void Assembler::neg_d(FPURegister fd, FPURegister fs) {
+ DCHECK(!IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
}
@@ -2955,6 +2998,7 @@ void Assembler::dd(Label* label) {
data = reinterpret_cast<uint32_t>(buffer_ + label->pos());
} else {
data = jump_address(label);
+ unbound_labels_count_++;
internal_reference_positions_.insert(label->pos());
}
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
@@ -2968,9 +3012,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (rmode >= RelocInfo::COMMENT &&
rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL) {
// Adjust code for new modes.
- DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
- || RelocInfo::IsComment(rmode)
- || RelocInfo::IsPosition(rmode));
+ DCHECK(RelocInfo::IsDebugBreakSlot(rmode) || RelocInfo::IsComment(rmode));
// These modes do not need an entry in the constant pool.
}
if (!RelocInfo::IsNone(rinfo.rmode())) {
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 886ac6c052..0e41671a67 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -63,6 +63,9 @@ namespace internal {
V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \
V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31)
+#define FLOAT_REGISTERS DOUBLE_REGISTERS
+#define SIMD128_REGISTERS DOUBLE_REGISTERS
+
#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
V(f0) V(f2) V(f4) V(f6) V(f8) V(f10) V(f12) V(f14) \
V(f16) V(f18) V(f20) V(f22) V(f24) V(f26)
@@ -123,8 +126,6 @@ struct Register {
Register r = {code};
return r;
}
- const char* ToString();
- bool IsAllocatable() const;
bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
bool is(Register reg) const { return reg_code == reg.reg_code; }
int code() const {
@@ -153,8 +154,10 @@ int ToNumber(Register reg);
Register ToRegister(int num);
+static const bool kSimpleFPAliasing = true;
+
// Coprocessor register.
-struct DoubleRegister {
+struct FPURegister {
enum Code {
#define REGISTER_CODE(R) kCode_##R,
DOUBLE_REGISTERS(REGISTER_CODE)
@@ -171,22 +174,20 @@ struct DoubleRegister {
// to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
// number of Double regs (64-bit regs, or FPU-reg-pairs).
- const char* ToString();
- bool IsAllocatable() const;
bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
- bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
- DoubleRegister low() const {
+ bool is(FPURegister reg) const { return reg_code == reg.reg_code; }
+ FPURegister low() const {
// Find low reg of a Double-reg pair, which is the reg itself.
DCHECK(reg_code % 2 == 0); // Specified Double reg must be even.
- DoubleRegister reg;
+ FPURegister reg;
reg.reg_code = reg_code;
DCHECK(reg.is_valid());
return reg;
}
- DoubleRegister high() const {
+ FPURegister high() const {
// Find high reg of a Doubel-reg pair, which is reg + 1.
DCHECK(reg_code % 2 == 0); // Specified Double reg must be even.
- DoubleRegister reg;
+ FPURegister reg;
reg.reg_code = reg_code + 1;
DCHECK(reg.is_valid());
return reg;
@@ -201,8 +202,8 @@ struct DoubleRegister {
return 1 << reg_code;
}
- static DoubleRegister from_code(int code) {
- DoubleRegister r = {code};
+ static FPURegister from_code(int code) {
+ FPURegister r = {code};
return r;
}
void setcode(int f) {
@@ -227,8 +228,12 @@ struct DoubleRegister {
// but it is not in common use. Someday we will want to support this in v8.)
// For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers.
-typedef DoubleRegister FPURegister;
-typedef DoubleRegister FloatRegister;
+typedef FPURegister FloatRegister;
+
+typedef FPURegister DoubleRegister;
+
+// TODO(mips) Define SIMD registers.
+typedef FPURegister Simd128Register;
const DoubleRegister no_freg = {-1};
@@ -304,9 +309,6 @@ struct FPUControlRegister {
const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister };
const FPUControlRegister FCSR = { kFCSRRegister };
-// TODO(mips) Define SIMD registers.
-typedef DoubleRegister Simd128Register;
-
// -----------------------------------------------------------------------------
// Machine instruction Operands.
@@ -799,6 +801,9 @@ class Assembler : public AssemblerBase {
void teq(Register rs, Register rt, uint16_t code);
void tne(Register rs, Register rt, uint16_t code);
+ // Memory barrier instruction.
+ void sync();
+
// Move from HI/LO register.
void mfhi(Register rd);
void mflo(Register rd);
@@ -844,6 +849,10 @@ class Assembler : public AssemblerBase {
void bitswap(Register rd, Register rt);
void align(Register rd, Register rs, Register rt, uint8_t bp);
+ void wsbh(Register rd, Register rt);
+ void seh(Register rd, Register rt);
+ void seb(Register rd, Register rt);
+
// --------Coprocessor-instructions----------------
// Load, store, and move.
@@ -1039,8 +1048,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, int raw_position);
-
+ void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
static int RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
intptr_t pc_delta);
@@ -1053,10 +1061,6 @@ class Assembler : public AssemblerBase {
void dp(uintptr_t data) { dd(data); }
void dd(Label* label);
- AssemblerPositionsRecorder* positions_recorder() {
- return &positions_recorder_;
- }
-
// Postpone the generation of the trampoline pool for the specified number of
// instructions.
void BlockTrampolinePoolFor(int instructions);
@@ -1158,10 +1162,15 @@ class Assembler : public AssemblerBase {
bool IsPrevInstrCompactBranch() { return prev_instr_compact_branch_; }
+ inline int UnboundLabelsCount() { return unbound_labels_count_; }
+
protected:
// Load Scaled Address instruction.
void lsa(Register rd, Register rt, Register rs, uint8_t sa);
+ // Helpers.
+ void LoadRegPlusOffsetToAt(const MemOperand& src);
+
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
// the relocation info.
@@ -1359,8 +1368,6 @@ class Assembler : public AssemblerBase {
void GenInstrJump(Opcode opcode,
uint32_t address);
- // Helpers.
- void LoadRegPlusOffsetToAt(const MemOperand& src);
// Labels.
void print(Label* L);
@@ -1450,9 +1457,6 @@ class Assembler : public AssemblerBase {
friend class RelocInfo;
friend class CodePatcher;
friend class BlockTrampolinePoolScope;
-
- AssemblerPositionsRecorder positions_recorder_;
- friend class AssemblerPositionsRecorder;
friend class EnsureSpace;
};
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index fd286fbb77..844958ec47 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -21,60 +21,16 @@
namespace v8 {
namespace internal {
+#define __ ACCESS_MASM(masm)
-static void InitializeArrayConstructorDescriptor(
- Isolate* isolate, CodeStubDescriptor* descriptor,
- int constant_stack_parameter_count) {
- Address deopt_handler = Runtime::FunctionForId(
- Runtime::kArrayConstructor)->entry;
-
- if (constant_stack_parameter_count == 0) {
- descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- } else {
- descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- }
-}
-
-
-static void InitializeInternalArrayConstructorDescriptor(
- Isolate* isolate, CodeStubDescriptor* descriptor,
- int constant_stack_parameter_count) {
- Address deopt_handler = Runtime::FunctionForId(
- Runtime::kInternalArrayConstructor)->entry;
-
- if (constant_stack_parameter_count == 0) {
- descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- } else {
- descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- }
-}
-
-
-void ArrayNoArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
-
-void ArraySingleArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
-}
-
-
-void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
+void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
+ __ sll(t9, a0, kPointerSizeLog2);
+ __ Addu(t9, sp, t9);
+ __ sw(a1, MemOperand(t9, 0));
+ __ Push(a1);
+ __ Push(a2);
+ __ Addu(a0, a0, Operand(3));
+ __ TailCallRuntime(Runtime::kNewArray);
}
void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
@@ -82,20 +38,12 @@ void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize(a0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
}
-void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+void FastFunctionBindStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
-}
-
-
-void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
+ Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
+ descriptor->Initialize(a0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
}
-
-#define __ ACCESS_MASM(masm)
-
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
Condition cc);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
@@ -812,11 +760,8 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
void MathPowStub::Generate(MacroAssembler* masm) {
- const Register base = a1;
const Register exponent = MathPowTaggedDescriptor::exponent();
DCHECK(exponent.is(a2));
- const Register heapnumbermap = t1;
- const Register heapnumber = v0;
const DoubleRegister double_base = f2;
const DoubleRegister double_exponent = f4;
const DoubleRegister double_result = f0;
@@ -826,35 +771,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
const Register scratch2 = t3;
Label call_runtime, done, int_exponent;
- if (exponent_type() == ON_STACK) {
- Label base_is_smi, unpack_exponent;
- // The exponent and base are supplied as arguments on the stack.
- // This can only happen if the stub is called from non-optimized code.
- // Load input parameters from stack to double registers.
- __ lw(base, MemOperand(sp, 1 * kPointerSize));
- __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
-
- __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
-
- __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
- __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
- __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
-
- __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
- __ jmp(&unpack_exponent);
-
- __ bind(&base_is_smi);
- __ mtc1(scratch, single_scratch);
- __ cvt_d_w(double_base, single_scratch);
- __ bind(&unpack_exponent);
-
- __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
-
- __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
- __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
- __ ldc1(double_exponent,
- FieldMemOperand(exponent, HeapNumber::kValueOffset));
- } else if (exponent_type() == TAGGED) {
+ if (exponent_type() == TAGGED) {
// Base is already in double_base.
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
@@ -875,54 +792,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// scratch2 == 0 means there was no conversion error.
__ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
- if (exponent_type() == ON_STACK) {
- // Detect square root case. Crankshaft detects constant +/-0.5 at
- // compile time and uses DoMathPowHalf instead. We then skip this check
- // for non-constant cases of +/-0.5 as these hardly occur.
- Label not_plus_half;
- // Test for 0.5.
- __ Move(double_scratch, 0.5);
- __ BranchF(USE_DELAY_SLOT,
- &not_plus_half,
- NULL,
- ne,
- double_exponent,
- double_scratch);
- // double_scratch can be overwritten in the delay slot.
- // Calculates square root of base. Check for the special case of
- // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
- __ Move(double_scratch, static_cast<double>(-V8_INFINITY));
- __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
- __ neg_d(double_result, double_scratch);
-
- // Add +0 to convert -0 to +0.
- __ add_d(double_scratch, double_base, kDoubleRegZero);
- __ sqrt_d(double_result, double_scratch);
- __ jmp(&done);
-
- __ bind(&not_plus_half);
- __ Move(double_scratch, -0.5);
- __ BranchF(USE_DELAY_SLOT,
- &call_runtime,
- NULL,
- ne,
- double_exponent,
- double_scratch);
- // double_scratch can be overwritten in the delay slot.
- // Calculates square root of base. Check for the special case of
- // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
- __ Move(double_scratch, static_cast<double>(-V8_INFINITY));
- __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
- __ Move(double_result, kDoubleRegZero);
-
- // Add +0 to convert -0 to +0.
- __ add_d(double_scratch, double_base, kDoubleRegZero);
- __ Move(double_result, 1.);
- __ sqrt_d(double_scratch, double_scratch);
- __ div_d(double_result, double_result, double_scratch);
- __ jmp(&done);
- }
-
__ push(ra);
{
AllowExternalCallThatCantCauseGC scope(masm);
@@ -954,10 +823,14 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ Move(double_result, 1.0);
// Get absolute value of exponent.
- Label positive_exponent;
+ Label positive_exponent, bail_out;
__ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
__ Subu(scratch, zero_reg, scratch);
+ // Check when Subu overflows and we get negative result
+ // (happens only when input is MIN_INT).
+ __ Branch(&bail_out, gt, zero_reg, Operand(scratch));
__ bind(&positive_exponent);
+ __ Assert(ge, kUnexpectedNegativeValue, scratch, Operand(zero_reg));
Label while_true, no_carry, loop_end;
__ bind(&while_true);
@@ -986,42 +859,25 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// double_exponent may not contain the exponent value if the input was a
// smi. We set it with exponent value before bailing out.
+ __ bind(&bail_out);
__ mtc1(exponent, single_scratch);
__ cvt_d_w(double_exponent, single_scratch);
// Returning or bailing out.
- if (exponent_type() == ON_STACK) {
- // The arguments are still on the stack.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMathPowRT);
-
- // The stub is called from non-optimized code, which expects the result
- // as heap number in exponent.
- __ bind(&done);
- __ AllocateHeapNumber(
- heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
- __ sdc1(double_result,
- FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
- DCHECK(heapnumber.is(v0));
- __ DropAndRet(2);
- } else {
- __ push(ra);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch);
- __ MovToFloatParameters(double_base, double_exponent);
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()),
- 0, 2);
- }
- __ pop(ra);
- __ MovFromFloatResult(double_result);
-
- __ bind(&done);
- __ Ret();
+ __ push(ra);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ MovToFloatParameters(double_base, double_exponent);
+ __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
+ 0, 2);
}
-}
+ __ pop(ra);
+ __ MovFromFloatResult(double_result);
+ __ bind(&done);
+ __ Ret();
+}
bool CEntryStub::NeedsImmovableCode() {
return true;
@@ -1032,7 +888,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
- ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+ CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
@@ -1040,7 +896,6 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
- TypeofStub::GenerateAheadOfTime(isolate);
}
@@ -1095,7 +950,9 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(save_doubles());
+ __ EnterExitFrame(save_doubles(), 0, is_builtin_exit()
+ ? StackFrame::BUILTIN_EXIT
+ : StackFrame::EXIT);
// s0: number of arguments including receiver (C callee-saved)
// s1: pointer to first argument (C callee-saved)
@@ -1364,12 +1221,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// restores all kCalleeSaved registers (including cp and fp) to their
// saved values before returning a failure to C.
- // Clear any pending exceptions.
- __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
- __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ sw(t1, MemOperand(t0));
-
// Invoke the function by calling through JS entry trampoline builtin.
// Notice that we cannot store a reference to the trampoline code directly in
// this stub, because runtime stubs are not traversed when doing GC.
@@ -1451,7 +1302,6 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
&miss, // When not a string.
&miss, // When not a number.
&miss, // When index out of range.
- STRING_INDEX_IS_ARRAY_INDEX,
RECEIVER_IS_STRING);
char_at_generator.GenerateFast(masm);
__ Ret();
@@ -1465,128 +1315,6 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
}
-void InstanceOfStub::Generate(MacroAssembler* masm) {
- Register const object = a1; // Object (lhs).
- Register const function = a0; // Function (rhs).
- Register const object_map = a2; // Map of {object}.
- Register const function_map = a3; // Map of {function}.
- Register const function_prototype = t0; // Prototype of {function}.
- Register const scratch = t1;
-
- DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
- DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
-
- // Check if {object} is a smi.
- Label object_is_smi;
- __ JumpIfSmi(object, &object_is_smi);
-
- // Lookup the {function} and the {object} map in the global instanceof cache.
- // Note: This is safe because we clear the global instanceof cache whenever
- // we change the prototype of any object.
- Label fast_case, slow_case;
- __ lw(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
- __ Branch(&fast_case, ne, function, Operand(at));
- __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
- __ Branch(&fast_case, ne, object_map, Operand(at));
- __ Ret(USE_DELAY_SLOT);
- __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); // In delay slot.
-
- // If {object} is a smi we can safely return false if {function} is a JS
- // function, otherwise we have to miss to the runtime and throw an exception.
- __ bind(&object_is_smi);
- __ JumpIfSmi(function, &slow_case);
- __ GetObjectType(function, function_map, scratch);
- __ Branch(&slow_case, ne, scratch, Operand(JS_FUNCTION_TYPE));
- __ Ret(USE_DELAY_SLOT);
- __ LoadRoot(v0, Heap::kFalseValueRootIndex); // In delay slot.
-
- // Fast-case: The {function} must be a valid JSFunction.
- __ bind(&fast_case);
- __ JumpIfSmi(function, &slow_case);
- __ GetObjectType(function, function_map, scratch);
- __ Branch(&slow_case, ne, scratch, Operand(JS_FUNCTION_TYPE));
-
- // Go to the runtime if the function is not a constructor.
- __ lbu(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
- __ And(at, scratch, Operand(1 << Map::kIsConstructor));
- __ Branch(&slow_case, eq, at, Operand(zero_reg));
-
- // Ensure that {function} has an instance prototype.
- __ And(at, scratch, Operand(1 << Map::kHasNonInstancePrototype));
- __ Branch(&slow_case, ne, at, Operand(zero_reg));
-
- // Get the "prototype" (or initial map) of the {function}.
- __ lw(function_prototype,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- __ AssertNotSmi(function_prototype);
-
- // Resolve the prototype if the {function} has an initial map. Afterwards the
- // {function_prototype} will be either the JSReceiver prototype object or the
- // hole value, which means that no instances of the {function} were created so
- // far and hence we should return false.
- Label function_prototype_valid;
- __ GetObjectType(function_prototype, scratch, scratch);
- __ Branch(&function_prototype_valid, ne, scratch, Operand(MAP_TYPE));
- __ lw(function_prototype,
- FieldMemOperand(function_prototype, Map::kPrototypeOffset));
- __ bind(&function_prototype_valid);
- __ AssertNotSmi(function_prototype);
-
- // Update the global instanceof cache with the current {object} map and
- // {function}. The cached answer will be set when it is known below.
- __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
-
- // Loop through the prototype chain looking for the {function} prototype.
- // Assume true, and change to false if not found.
- Register const object_instance_type = function_map;
- Register const map_bit_field = function_map;
- Register const null = scratch;
- Register const result = v0;
-
- Label done, loop, fast_runtime_fallback;
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ LoadRoot(null, Heap::kNullValueRootIndex);
- __ bind(&loop);
-
- // Check if the object needs to be access checked.
- __ lbu(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
- __ And(map_bit_field, map_bit_field, Operand(1 << Map::kIsAccessCheckNeeded));
- __ Branch(&fast_runtime_fallback, ne, map_bit_field, Operand(zero_reg));
- // Check if the current object is a Proxy.
- __ lbu(object_instance_type,
- FieldMemOperand(object_map, Map::kInstanceTypeOffset));
- __ Branch(&fast_runtime_fallback, eq, object_instance_type,
- Operand(JS_PROXY_TYPE));
-
- __ lw(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
- __ Branch(&done, eq, object, Operand(function_prototype));
- __ Branch(USE_DELAY_SLOT, &loop, ne, object, Operand(null));
- __ lw(object_map,
- FieldMemOperand(object, HeapObject::kMapOffset)); // In delay slot.
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
- __ Ret(USE_DELAY_SLOT);
- __ StoreRoot(result,
- Heap::kInstanceofCacheAnswerRootIndex); // In delay slot.
-
- // Found Proxy or access check needed: Call the runtime
- __ bind(&fast_runtime_fallback);
- __ Push(object, function_prototype);
- // Invalidate the instanceof cache.
- DCHECK(Smi::FromInt(0) == 0);
- __ StoreRoot(zero_reg, Heap::kInstanceofCacheFunctionRootIndex);
- __ TailCallRuntime(Runtime::kHasInPrototypeChain);
-
- // Slow-case: Call the %InstanceOf runtime function.
- __ bind(&slow_case);
- __ Push(object, function);
- __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
- : Runtime::kInstanceOf);
-}
-
-
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
Register receiver = LoadDescriptor::ReceiverRegister();
@@ -1900,8 +1628,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
__ JumpIfSmi(a0, &runtime);
__ GetObjectType(a0, a2, a2);
- __ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE));
- // Check that the JSArray is in fast case.
+ __ Branch(&runtime, ne, a2, Operand(JS_OBJECT_TYPE));
+ // Check that the object has fast elements.
__ lw(last_match_info_elements,
FieldMemOperand(a0, JSArray::kElementsOffset));
__ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
@@ -2031,7 +1759,8 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
const RegList kSavedRegs = 1 << 4 | // a0
1 << 5 | // a1
1 << 6 | // a2
- 1 << 7; // a3
+ 1 << 7 | // a3
+ 1 << cp.code();
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(a0);
@@ -2053,6 +1782,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// a2 : feedback vector
// a3 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
+ Label done_initialize_count, done_increment_count;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
@@ -2071,7 +1801,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
Register feedback_map = t1;
Register weak_value = t4;
__ lw(weak_value, FieldMemOperand(t2, WeakCell::kValueOffset));
- __ Branch(&done, eq, a1, Operand(weak_value));
+ __ Branch(&done_increment_count, eq, a1, Operand(weak_value));
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ Branch(&done, eq, t2, Operand(at));
__ lw(feedback_map, FieldMemOperand(t2, HeapObject::kMapOffset));
@@ -2093,7 +1823,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Make sure the function is the Array() function
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t2);
__ Branch(&megamorphic, ne, a1, Operand(t2));
- __ jmp(&done);
+ __ jmp(&done_increment_count);
__ bind(&miss);
@@ -2120,11 +1850,27 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub);
- __ Branch(&done);
+ __ Branch(&done_initialize_count);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &weak_cell_stub);
+
+ __ bind(&done_initialize_count);
+ // Initialize the call counter.
+ __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ li(t0, Operand(Smi::FromInt(1)));
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ sw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
+
+ __ bind(&done_increment_count);
+
+ // Increment the call count for monomorphic function calls.
+ __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
+ __ lw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
+ __ Addu(t0, t0, Operand(Smi::FromInt(1)));
+ __ sw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
+
__ bind(&done);
}
@@ -2185,7 +1931,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// Increment the call count for monomorphic function calls.
__ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
- __ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+ __ Addu(a3, a3, Operand(Smi::FromInt(1)));
__ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
__ mov(a2, t0);
@@ -2231,7 +1977,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Increment the call count for monomorphic function calls.
__ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
- __ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+ __ Addu(a3, a3, Operand(Smi::FromInt(1)));
__ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
__ bind(&call_function);
@@ -2302,7 +2048,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Initialize the call counter.
__ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
- __ li(t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+ __ li(t0, Operand(Smi::FromInt(1)));
__ sw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
// Store the function. Use a stub since we need a frame for allocation.
@@ -2312,9 +2058,9 @@ void CallICStub::Generate(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
CreateWeakCellStub create_stub(masm->isolate());
- __ Push(a1);
+ __ Push(cp, a1);
__ CallStub(&create_stub);
- __ Pop(a1);
+ __ Pop(cp, a1);
}
__ Branch(&call_function);
@@ -2402,13 +2148,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
} else {
__ Push(object_, index_);
}
- if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
- } else {
- DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
- // NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi);
- }
+ __ CallRuntime(Runtime::kNumberToSmi);
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
@@ -2750,74 +2490,13 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// a3: from index (untagged)
__ SmiTag(a3, a3);
StringCharAtGenerator generator(v0, a3, a2, v0, &runtime, &runtime, &runtime,
- STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
+ RECEIVER_IS_STRING);
generator.GenerateFast(masm);
__ DropAndRet(3);
generator.SkipSlow(masm, &runtime);
}
-void ToNumberStub::Generate(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in a0.
- Label not_smi;
- __ JumpIfNotSmi(a0, &not_smi);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
- __ bind(&not_smi);
-
- Label not_heap_number;
- __ GetObjectType(a0, a1, a1);
- // a0: receiver
- // a1: receiver instance type
- __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
- __ bind(&not_heap_number);
-
- NonNumberToNumberStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
- // The NonNumberToNumber stub takes on argument in a0.
- __ AssertNotNumber(a0);
-
- Label not_string;
- __ GetObjectType(a0, a1, a1);
- // a0: receiver
- // a1: receiver instance type
- __ Branch(&not_string, hs, a1, Operand(FIRST_NONSTRING_TYPE));
- StringToNumberStub stub(masm->isolate());
- __ TailCallStub(&stub);
- __ bind(&not_string);
-
- Label not_oddball;
- __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
- __ Ret(USE_DELAY_SLOT);
- __ lw(v0, FieldMemOperand(a0, Oddball::kToNumberOffset)); // In delay slot.
- __ bind(&not_oddball);
-
- __ Push(a0); // Push argument.
- __ TailCallRuntime(Runtime::kToNumber);
-}
-
-void StringToNumberStub::Generate(MacroAssembler* masm) {
- // The StringToNumber stub takes on argument in a0.
- __ AssertString(a0);
-
- // Check if string has a cached array index.
- Label runtime;
- __ lw(a2, FieldMemOperand(a0, String::kHashFieldOffset));
- __ And(at, a2, Operand(String::kContainsCachedArrayIndexMask));
- __ Branch(&runtime, ne, at, Operand(zero_reg));
- __ IndexFromHash(a2, v0);
- __ Ret();
-
- __ bind(&runtime);
- __ Push(a0); // Push argument.
- __ TailCallRuntime(Runtime::kStringToNumber);
-}
-
void ToStringStub::Generate(MacroAssembler* masm) {
// The ToString stub takes on argument in a0.
Label is_number;
@@ -2998,7 +2677,7 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// Load a2 with the allocation site. We stick an undefined dummy value here
// and replace it with the real allocation site later when we instantiate this
// stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
- __ li(a2, handle(isolate()->heap()->undefined_value()));
+ __ li(a2, isolate()->factory()->undefined_value());
// Make sure that we actually patched the allocation site.
if (FLAG_debug_code) {
@@ -3893,14 +3572,14 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- LoadICStub stub(isolate(), state());
+ LoadICStub stub(isolate());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- KeyedLoadICStub stub(isolate(), state());
+ KeyedLoadICStub stub(isolate());
stub.GenerateForTrampoline(masm);
}
@@ -4037,11 +3716,8 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ bind(&not_array);
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ Branch(&miss, ne, at, Operand(feedback));
- Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
- receiver, name, feedback,
- receiver_map, scratch1, t5);
+ masm->isolate()->load_stub_cache()->GenerateProbe(
+ masm, receiver, name, feedback, receiver_map, scratch1, t5);
__ bind(&miss);
LoadIC::GenerateMiss(masm);
@@ -4120,37 +3796,30 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ jmp(&compare_map);
}
-
-void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
- VectorStoreICStub stub(isolate(), state());
+void StoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
+ StoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
-
-void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
- VectorKeyedStoreICStub stub(isolate(), state());
+void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
+ KeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
+void StoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-void VectorStoreICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-
-void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+void StoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
-
-void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // a1
- Register key = VectorStoreICDescriptor::NameRegister(); // a2
- Register vector = VectorStoreICDescriptor::VectorRegister(); // a3
- Register slot = VectorStoreICDescriptor::SlotRegister(); // t0
- DCHECK(VectorStoreICDescriptor::ValueRegister().is(a0)); // a0
+void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // a1
+ Register key = StoreWithVectorDescriptor::NameRegister(); // a2
+ Register vector = StoreWithVectorDescriptor::VectorRegister(); // a3
+ Register slot = StoreWithVectorDescriptor::SlotRegister(); // t0
+ DCHECK(StoreWithVectorDescriptor::ValueRegister().is(a0)); // a0
Register feedback = t1;
Register receiver_map = t2;
Register scratch1 = t5;
@@ -4179,11 +3848,8 @@ void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ bind(&not_array);
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ Branch(&miss, ne, feedback, Operand(at));
- Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map,
- scratch1, scratch2);
+ masm->isolate()->store_stub_cache()->GenerateProbe(
+ masm, receiver, key, feedback, receiver_map, scratch1, scratch2);
__ bind(&miss);
StoreIC::GenerateMiss(masm);
@@ -4193,13 +3859,11 @@ void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); // In delay slot.
}
-
-void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
+void KeyedStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
-
-void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
@@ -4265,13 +3929,12 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
__ jmp(miss);
}
-
-void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // a1
- Register key = VectorStoreICDescriptor::NameRegister(); // a2
- Register vector = VectorStoreICDescriptor::VectorRegister(); // a3
- Register slot = VectorStoreICDescriptor::SlotRegister(); // t0
- DCHECK(VectorStoreICDescriptor::ValueRegister().is(a0)); // a0
+void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // a1
+ Register key = StoreWithVectorDescriptor::NameRegister(); // a2
+ Register vector = StoreWithVectorDescriptor::VectorRegister(); // a3
+ Register slot = StoreWithVectorDescriptor::SlotRegister(); // t0
+ DCHECK(StoreWithVectorDescriptor::ValueRegister().is(a0)); // a0
Register feedback = t1;
Register receiver_map = t2;
Register scratch1 = t5;
@@ -4517,19 +4180,13 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
}
}
-
-void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
isolate);
ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
isolate);
- ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
- isolate);
-}
-
-
-void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
- Isolate* isolate) {
+ ArrayNArgumentsConstructorStub stub(isolate);
+ stub.GetCode();
ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things.
@@ -4537,8 +4194,6 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
stubh1.GetCode();
InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
stubh2.GetCode();
- InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
- stubh3.GetCode();
}
}
@@ -4557,13 +4212,15 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
CreateArrayDispatchOneArgument(masm, mode);
__ bind(&not_one_case);
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ ArrayNArgumentsConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
} else if (argument_count() == NONE) {
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
} else if (argument_count() == ONE) {
CreateArrayDispatchOneArgument(masm, mode);
} else if (argument_count() == MORE_THAN_ONE) {
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ ArrayNArgumentsConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
} else {
UNREACHABLE();
}
@@ -4647,7 +4304,7 @@ void InternalArrayConstructorStub::GenerateCase(
InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
__ TailCallStub(&stub0, lo, a0, Operand(1));
- InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+ ArrayNArgumentsConstructorStub stubN(isolate());
__ TailCallStub(&stubN, hi, a0, Operand(1));
if (IsFastPackedElementsKind(kind)) {
@@ -4748,15 +4405,15 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
__ bind(&done_allocate);
// Initialize the JSObject fields.
- __ sw(a2, MemOperand(v0, JSObject::kMapOffset));
+ __ sw(a2, FieldMemOperand(v0, JSObject::kMapOffset));
__ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
- __ sw(a3, MemOperand(v0, JSObject::kPropertiesOffset));
- __ sw(a3, MemOperand(v0, JSObject::kElementsOffset));
+ __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ Addu(a1, v0, Operand(JSObject::kHeaderSize));
+ __ Addu(a1, v0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
// ----------- S t a t e -------------
- // -- v0 : result (untagged)
+ // -- v0 : result (tagged)
// -- a1 : result fields (untagged)
// -- t1 : result end (untagged)
// -- a2 : initial map
@@ -4774,11 +4431,7 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
{
// Initialize all in-object fields with undefined.
__ InitializeFieldsWithFiller(a1, t1, a0);
-
- // Add the object tag to make the JSObject real.
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ Ret(USE_DELAY_SLOT);
- __ Addu(v0, v0, Operand(kHeapObjectTag)); // In delay slot.
+ __ Ret();
}
__ bind(&slack_tracking);
{
@@ -4801,9 +4454,7 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
Label finalize;
STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
__ And(a3, a3, Operand(Map::ConstructionCounter::kMask));
- __ Branch(USE_DELAY_SLOT, &finalize, eq, a3, Operand(zero_reg));
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ Addu(v0, v0, Operand(kHeapObjectTag)); // In delay slot.
+ __ Branch(&finalize, eq, a3, Operand(zero_reg));
__ Ret();
// Finalize the instance size.
@@ -4828,10 +4479,10 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ Pop(a2);
}
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ Subu(v0, v0, Operand(kHeapObjectTag));
__ lbu(t1, FieldMemOperand(a2, Map::kInstanceSizeOffset));
__ Lsa(t1, v0, t1, kPointerSizeLog2);
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ Subu(t1, t1, Operand(kHeapObjectTag));
__ jmp(&done_allocate);
// Fall back to %NewObject.
@@ -4850,19 +4501,19 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// -----------------------------------
__ AssertFunction(a1);
- // For Ignition we need to skip all possible handler/stub frames until
- // we reach the JavaScript frame for the function (similar to what the
- // runtime fallback implementation does). So make a2 point to that
- // JavaScript frame.
- {
- Label loop, loop_entry;
- __ Branch(USE_DELAY_SLOT, &loop_entry);
- __ mov(a2, fp); // In delay slot.
- __ bind(&loop);
+ // Make a2 point to the JavaScript frame.
+ __ mov(a2, fp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
__ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
- __ bind(&loop_entry);
+ }
+ if (FLAG_debug_code) {
+ Label ok;
__ lw(a3, MemOperand(a2, StandardFrameConstants::kFunctionOffset));
- __ Branch(&loop, ne, a1, Operand(a3));
+ __ Branch(&ok, eq, a1, Operand(a3));
+ __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+ __ bind(&ok);
}
// Check if we have rest parameters (only possible if we have an
@@ -4877,10 +4528,10 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// specified by the function's internal formal parameter count.
Label rest_parameters;
__ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ lw(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a1,
- FieldMemOperand(a1, SharedFunctionInfo::kFormalParameterCountOffset));
- __ Subu(a0, a0, Operand(a1));
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a3,
+ FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Subu(a0, a0, Operand(a3));
__ Branch(&rest_parameters, gt, a0, Operand(zero_reg));
// Return an empty rest parameter array.
@@ -4893,7 +4544,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// Allocate an empty rest parameter array.
Label allocate, done_allocate;
- __ Allocate(JSArray::kSize, v0, a0, a1, &allocate, TAG_OBJECT);
+ __ Allocate(JSArray::kSize, v0, a0, a1, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Setup the rest parameter array in v0.
@@ -4927,15 +4578,16 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- cp : context
// -- a0 : number of rest parameters (tagged)
+ // -- a1 : function
// -- a2 : pointer to first rest parameters
// -- ra : return address
// -----------------------------------
// Allocate space for the rest parameter array plus the backing store.
Label allocate, done_allocate;
- __ li(a1, Operand(JSArray::kSize + FixedArray::kHeaderSize));
- __ Lsa(a1, a1, a0, kPointerSizeLog2 - 1);
- __ Allocate(a1, v0, a3, t0, &allocate, TAG_OBJECT);
+ __ li(t0, Operand(JSArray::kSize + FixedArray::kHeaderSize));
+ __ Lsa(t0, t0, a0, kPointerSizeLog2 - 1);
+ __ Allocate(t0, v0, a3, t1, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Setup the elements array in v0.
@@ -4968,16 +4620,24 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a3); // In delay slot
- // Fall back to %AllocateInNewSpace.
+ // Fall back to %AllocateInNewSpace (if not too big).
+ Label too_big_for_new_space;
__ bind(&allocate);
+ __ Branch(&too_big_for_new_space, gt, t0,
+ Operand(Page::kMaxRegularHeapObjectSize));
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(a1);
- __ Push(a0, a2, a1);
+ __ SmiTag(t0);
+ __ Push(a0, a2, t0);
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ Pop(a0, a2);
}
__ jmp(&done_allocate);
+
+ // Fall back to %NewStrictArguments.
+ __ bind(&too_big_for_new_space);
+ __ Push(a1);
+ __ TailCallRuntime(Runtime::kNewStrictArguments);
}
}
@@ -4991,23 +4651,39 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
// -----------------------------------
__ AssertFunction(a1);
+ // Make t0 point to the JavaScript frame.
+ __ mov(t0, fp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
+ __ lw(t0, MemOperand(t0, StandardFrameConstants::kCallerFPOffset));
+ }
+ if (FLAG_debug_code) {
+ Label ok;
+ __ lw(a3, MemOperand(t0, StandardFrameConstants::kFunctionOffset));
+ __ Branch(&ok, eq, a1, Operand(a3));
+ __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+ __ bind(&ok);
+ }
+
// TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(a2,
FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ Lsa(a3, fp, a2, kPointerSizeLog2 - 1);
+ __ Lsa(a3, t0, a2, kPointerSizeLog2 - 1);
__ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
// a1 : function
// a2 : number of parameters (tagged)
// a3 : parameters pointer
+ // t0 : Javascript frame pointer
// Registers used over whole function:
// t1 : arguments count (tagged)
// t2 : mapped parameter count (tagged)
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
- __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(t0, MemOperand(t0, StandardFrameConstants::kCallerFPOffset));
__ lw(a0, MemOperand(t0, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Branch(&adaptor_frame, eq, a0,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
@@ -5053,7 +4729,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
__ Addu(t5, t5, Operand(JSSloppyArgumentsObject::kSize));
// Do the allocation of all three objects in one go.
- __ Allocate(t5, v0, t5, t0, &runtime, TAG_OBJECT);
+ __ Allocate(t5, v0, t5, t0, &runtime, NO_ALLOCATION_FLAGS);
// v0 = address of new object(s) (tagged)
// a2 = argument count (smi-tagged)
@@ -5205,19 +4881,19 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// -----------------------------------
__ AssertFunction(a1);
- // For Ignition we need to skip all possible handler/stub frames until
- // we reach the JavaScript frame for the function (similar to what the
- // runtime fallback implementation does). So make a2 point to that
- // JavaScript frame.
- {
- Label loop, loop_entry;
- __ Branch(USE_DELAY_SLOT, &loop_entry);
- __ mov(a2, fp); // In delay slot.
- __ bind(&loop);
+ // Make a2 point to the JavaScript frame.
+ __ mov(a2, fp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
__ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
- __ bind(&loop_entry);
+ }
+ if (FLAG_debug_code) {
+ Label ok;
__ lw(a3, MemOperand(a2, StandardFrameConstants::kFunctionOffset));
- __ Branch(&loop, ne, a1, Operand(a3));
+ __ Branch(&ok, eq, a1, Operand(a3));
+ __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+ __ bind(&ok);
}
// Check if we have an arguments adaptor frame below the function frame.
@@ -5227,9 +4903,9 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ Branch(&arguments_adaptor, eq, a0,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
{
- __ lw(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(a0,
- FieldMemOperand(a1, SharedFunctionInfo::kFormalParameterCountOffset));
+ FieldMemOperand(t0, SharedFunctionInfo::kFormalParameterCountOffset));
__ Lsa(a2, a2, a0, kPointerSizeLog2 - 1);
__ Addu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
1 * kPointerSize));
@@ -5247,15 +4923,16 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- cp : context
// -- a0 : number of rest parameters (tagged)
+ // -- a1 : function
// -- a2 : pointer to first rest parameters
// -- ra : return address
// -----------------------------------
// Allocate space for the strict arguments object plus the backing store.
Label allocate, done_allocate;
- __ li(a1, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
- __ Lsa(a1, a1, a0, kPointerSizeLog2 - 1);
- __ Allocate(a1, v0, a3, t0, &allocate, TAG_OBJECT);
+ __ li(t0, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
+ __ Lsa(t0, t0, a0, kPointerSizeLog2 - 1);
+ __ Allocate(t0, v0, a3, t1, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Setup the elements array in v0.
@@ -5288,46 +4965,24 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a3); // In delay slot
- // Fall back to %AllocateInNewSpace.
+ // Fall back to %AllocateInNewSpace (if not too big).
+ Label too_big_for_new_space;
__ bind(&allocate);
+ __ Branch(&too_big_for_new_space, gt, t0,
+ Operand(Page::kMaxRegularHeapObjectSize));
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(a1);
- __ Push(a0, a2, a1);
+ __ SmiTag(t0);
+ __ Push(a0, a2, t0);
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ Pop(a0, a2);
}
__ jmp(&done_allocate);
-}
-
-
-void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
- Register context_reg = cp;
- Register slot_reg = a2;
- Register result_reg = v0;
- Label slow_case;
-
- // Go up context chain to the script context.
- for (int i = 0; i < depth(); ++i) {
- __ lw(result_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX));
- context_reg = result_reg;
- }
-
- // Load the PropertyCell value at the specified slot.
- __ Lsa(at, context_reg, slot_reg, kPointerSizeLog2);
- __ lw(result_reg, ContextMemOperand(at, 0));
- __ lw(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset));
- // Check that value is not the_hole.
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(&slow_case, eq, result_reg, Operand(at));
- __ Ret();
-
- // Fallback to the runtime.
- __ bind(&slow_case);
- __ SmiTag(slot_reg);
- __ Push(slot_reg);
- __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
+ // Fall back to %NewStrictArguments.
+ __ bind(&too_big_for_new_space);
+ __ Push(a1);
+ __ TailCallRuntime(Runtime::kNewStrictArguments);
}
@@ -5608,7 +5263,11 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
STATIC_ASSERT(FCA::kIsolateIndex == 1);
STATIC_ASSERT(FCA::kHolderIndex == 0);
- STATIC_ASSERT(FCA::kArgsLength == 7);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 7);
+ STATIC_ASSERT(FCA::kArgsLength == 8);
+
+ // new target
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
// Save context, callee and call data.
__ Push(context, callee, call_data);
@@ -5632,7 +5291,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
- const int kApiStackSpace = 4;
+ const int kApiStackSpace = 3;
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
@@ -5649,8 +5308,6 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// FunctionCallbackInfo::length_ = argc
__ li(at, Operand(argc()));
__ sw(at, MemOperand(a0, 2 * kPointerSize));
- // FunctionCallbackInfo::is_construct_call_ = 0
- __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
ExternalReference thunk_ref =
ExternalReference::invoke_function_callback(masm->isolate());
@@ -5667,8 +5324,9 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
}
MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
int stack_space = 0;
- int32_t stack_space_offset = 4 * kPointerSize;
+ int32_t stack_space_offset = 3 * kPointerSize;
stack_space = argc() + FCA::kArgsLength + 1;
+ // TODO(adamk): Why are we clobbering this immediately?
stack_space_offset = kInvalidStackOffset;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
stack_space_offset, return_value_operand,
@@ -5677,15 +5335,44 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
void CallApiGetterStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- sp[0] : name
- // -- sp[4 .. (4 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_
- // -- ...
- // -- a2 : api_function_address
- // -----------------------------------
-
- Register api_function_address = ApiGetterDescriptor::function_address();
- DCHECK(api_function_address.is(a2));
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+ Register receiver = ApiGetterDescriptor::ReceiverRegister();
+ Register holder = ApiGetterDescriptor::HolderRegister();
+ Register callback = ApiGetterDescriptor::CallbackRegister();
+ Register scratch = t0;
+ DCHECK(!AreAliased(receiver, holder, callback, scratch));
+
+ Register api_function_address = a2;
+
+ // Here and below +1 is for name() pushed after the args_ array.
+ typedef PropertyCallbackArguments PCA;
+ __ Subu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
+ __ sw(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
+ __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+ __ sw(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ sw(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
+ __ sw(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
+ kPointerSize));
+ __ li(scratch, Operand(ExternalReference::isolate_address(isolate())));
+ __ sw(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
+ __ sw(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
+ // should_throw_on_error -> false
+ DCHECK(Smi::FromInt(0) == nullptr);
+ __ sw(zero_reg,
+ MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
+ __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+ __ sw(scratch, MemOperand(sp, 0 * kPointerSize));
// v8::PropertyCallbackInfo::args_ array and name handle.
const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
@@ -5706,6 +5393,10 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
+ __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+ __ lw(api_function_address,
+ FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
+
// +3 is to skip prolog, return address and name handle.
MemOperand return_value_operand(
fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
@@ -5714,7 +5405,6 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
return_value_operand, NULL);
}
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index 1c6c1692ae..8aaeaca367 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -6,6 +6,8 @@
#if V8_TARGET_ARCH_MIPS
+#include <memory>
+
#include "src/codegen.h"
#include "src/macro-assembler.h"
#include "src/mips/simulator-mips.h"
@@ -16,60 +18,6 @@ namespace internal {
#define __ masm.
-
-#if defined(USE_SIMULATOR)
-byte* fast_exp_mips_machine_code = nullptr;
-double fast_exp_simulator(double x, Isolate* isolate) {
- return Simulator::current(isolate)->CallFP(fast_exp_mips_machine_code, x, 0);
-}
-#endif
-
-
-UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
- size_t actual_size;
- byte* buffer =
- static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == nullptr) return nullptr;
- ExternalReference::InitializeMathExpData();
-
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
- CodeObjectRequired::kNo);
-
- {
- DoubleRegister input = f12;
- DoubleRegister result = f0;
- DoubleRegister double_scratch1 = f4;
- DoubleRegister double_scratch2 = f6;
- Register temp1 = t0;
- Register temp2 = t1;
- Register temp3 = t2;
-
- __ MovFromFloatParameter(input);
- __ Push(temp3, temp2, temp1);
- MathExpGenerator::EmitMathExp(
- &masm, input, result, double_scratch1, double_scratch2,
- temp1, temp2, temp3);
- __ Pop(temp3, temp2, temp1);
- __ MovToFloatResult(result);
- __ Ret();
- }
-
- CodeDesc desc;
- masm.GetCode(&desc);
- DCHECK(!RelocInfo::RequiresRelocation(desc));
-
- Assembler::FlushICache(isolate, buffer, actual_size);
- base::OS::ProtectCode(buffer, actual_size);
-
-#if !defined(USE_SIMULATOR)
- return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
-#else
- fast_exp_mips_machine_code = buffer;
- return &fast_exp_simulator;
-#endif
-}
-
-
#if defined(V8_HOST_ARCH_MIPS)
MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
MemCopyUint8Function stub) {
@@ -95,8 +43,8 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
// The size of each prefetch.
uint32_t pref_chunk = 32;
- // The maximum size of a prefetch, it must not be less then pref_chunk.
- // If the real size of a prefetch is greater then max_pref_size and
+ // The maximum size of a prefetch, it must not be less than pref_chunk.
+ // If the real size of a prefetch is greater than max_pref_size and
// the kPrefHintPrepareForStore hint is used, the code will not work
// correctly.
uint32_t max_pref_size = 128;
@@ -733,13 +681,13 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ sll(scratch, length, 2);
__ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize);
__ Allocate(scratch, array, t3, scratch2, &gc_required, DOUBLE_ALIGNMENT);
- // array: destination FixedDoubleArray, not tagged as heap object
+ // array: destination FixedDoubleArray, tagged as heap object
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
- __ sw(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
+ __ sw(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
// Update receiver's map.
- __ sw(scratch2, MemOperand(array, HeapObject::kMapOffset));
+ __ sw(scratch2, FieldMemOperand(array, HeapObject::kMapOffset));
__ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver,
@@ -751,7 +699,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created FixedDoubleArray.
- __ Addu(scratch1, array, Operand(kHeapObjectTag));
+ __ Addu(scratch1, array, Operand(kHeapObjectTag - kHeapObjectTag));
__ sw(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver,
JSObject::kElementsOffset,
@@ -766,7 +714,8 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Prepare for conversion loop.
__ Addu(scratch1, elements,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ Addu(scratch3, array, Operand(FixedDoubleArray::kHeaderSize));
+ __ Addu(scratch3, array,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
__ Lsa(array_end, scratch3, length, 2);
// Repurpose registers no longer in use.
@@ -886,8 +835,8 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// array: destination FixedArray, not tagged as heap object
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
- __ sw(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
- __ sw(scratch, MemOperand(array, HeapObject::kMapOffset));
+ __ sw(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
+ __ sw(scratch, FieldMemOperand(array, HeapObject::kMapOffset));
// Prepare for conversion loop.
Register src_elements = elements;
@@ -897,7 +846,8 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ Addu(src_elements, src_elements, Operand(
FixedDoubleArray::kHeaderSize - kHeapObjectTag
+ Register::kExponentOffset));
- __ Addu(dst_elements, array, Operand(FixedArray::kHeaderSize));
+ __ Addu(dst_elements, array,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ Lsa(dst_end, dst_elements, dst_end, 1);
// Allocating heap numbers in the loop below can fail and cause a jump to
@@ -912,8 +862,8 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ bind(&initialization_loop_entry);
__ Branch(&initialization_loop, lt, dst_elements, Operand(dst_end));
- __ Addu(dst_elements, array, Operand(FixedArray::kHeaderSize));
- __ Addu(array, array, Operand(kHeapObjectTag));
+ __ Addu(dst_elements, array,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
// Using offsetted addresses.
// dst_elements: begin of destination FixedArray element fields, not tagged
@@ -1090,95 +1040,6 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ bind(&done);
}
-
-static MemOperand ExpConstant(int index, Register base) {
- return MemOperand(base, index * kDoubleSize);
-}
-
-
-void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
- DoubleRegister input,
- DoubleRegister result,
- DoubleRegister double_scratch1,
- DoubleRegister double_scratch2,
- Register temp1,
- Register temp2,
- Register temp3) {
- DCHECK(!input.is(result));
- DCHECK(!input.is(double_scratch1));
- DCHECK(!input.is(double_scratch2));
- DCHECK(!result.is(double_scratch1));
- DCHECK(!result.is(double_scratch2));
- DCHECK(!double_scratch1.is(double_scratch2));
- DCHECK(!temp1.is(temp2));
- DCHECK(!temp1.is(temp3));
- DCHECK(!temp2.is(temp3));
- DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
- DCHECK(!masm->serializer_enabled()); // External references not serializable.
-
- Label zero, infinity, done;
-
- __ li(temp3, Operand(ExternalReference::math_exp_constants(0)));
-
- __ ldc1(double_scratch1, ExpConstant(0, temp3));
- __ BranchF(&zero, NULL, ge, double_scratch1, input);
-
- __ ldc1(double_scratch2, ExpConstant(1, temp3));
- __ BranchF(&infinity, NULL, ge, input, double_scratch2);
-
- __ ldc1(double_scratch1, ExpConstant(3, temp3));
- __ ldc1(result, ExpConstant(4, temp3));
- __ mul_d(double_scratch1, double_scratch1, input);
- __ add_d(double_scratch1, double_scratch1, result);
- __ FmoveLow(temp2, double_scratch1);
- __ sub_d(double_scratch1, double_scratch1, result);
- __ ldc1(result, ExpConstant(6, temp3));
- __ ldc1(double_scratch2, ExpConstant(5, temp3));
- __ mul_d(double_scratch1, double_scratch1, double_scratch2);
- __ sub_d(double_scratch1, double_scratch1, input);
- __ sub_d(result, result, double_scratch1);
- __ mul_d(double_scratch2, double_scratch1, double_scratch1);
- __ mul_d(result, result, double_scratch2);
- __ ldc1(double_scratch2, ExpConstant(7, temp3));
- __ mul_d(result, result, double_scratch2);
- __ sub_d(result, result, double_scratch1);
- // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
- DCHECK(*reinterpret_cast<double*>
- (ExternalReference::math_exp_constants(8).address()) == 1);
- __ Move(double_scratch2, 1.);
- __ add_d(result, result, double_scratch2);
- __ srl(temp1, temp2, 11);
- __ Ext(temp2, temp2, 0, 11);
- __ Addu(temp1, temp1, Operand(0x3ff));
-
- // Must not call ExpConstant() after overwriting temp3!
- __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
- __ Lsa(temp3, temp3, temp2, 3);
- __ lw(temp2, MemOperand(temp3, Register::kMantissaOffset));
- __ lw(temp3, MemOperand(temp3, Register::kExponentOffset));
- // The first word is loaded is the lower number register.
- if (temp2.code() < temp3.code()) {
- __ sll(at, temp1, 20);
- __ Or(temp1, temp3, at);
- __ Move(double_scratch1, temp2, temp1);
- } else {
- __ sll(at, temp1, 20);
- __ Or(temp1, temp2, at);
- __ Move(double_scratch1, temp3, temp1);
- }
- __ mul_d(result, result, double_scratch1);
- __ BranchShort(&done);
-
- __ bind(&zero);
- __ Move(result, kDoubleRegZero);
- __ BranchShort(&done);
-
- __ bind(&infinity);
- __ ldc1(result, ExpConstant(2, temp3));
-
- __ bind(&done);
-}
-
#ifdef DEBUG
// nop(CODE_AGE_MARKER_NOP)
static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
@@ -1192,7 +1053,7 @@ CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
// the process, before MIPS simulator ICache is setup.
- base::SmartPointer<CodePatcher> patcher(
+ std::unique_ptr<CodePatcher> patcher(
new CodePatcher(isolate, young_sequence_.start(),
young_sequence_.length() / Assembler::kInstrSize,
CodePatcher::DONT_FLUSH));
diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h
index ad7abb30c5..48853de659 100644
--- a/deps/v8/src/mips/codegen-mips.h
+++ b/deps/v8/src/mips/codegen-mips.h
@@ -6,8 +6,6 @@
#ifndef V8_MIPS_CODEGEN_MIPS_H_
#define V8_MIPS_CODEGEN_MIPS_H_
-
-#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {
@@ -29,23 +27,6 @@ class StringCharLoadGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
-
-class MathExpGenerator : public AllStatic {
- public:
- // Register input isn't modified. All other registers are clobbered.
- static void EmitMathExp(MacroAssembler* masm,
- DoubleRegister input,
- DoubleRegister result,
- DoubleRegister double_scratch1,
- DoubleRegister double_scratch2,
- Register temp1,
- Register temp2,
- Register temp3);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips/constants-mips.h b/deps/v8/src/mips/constants-mips.h
index 49142515c7..8301c5e5de 100644
--- a/deps/v8/src/mips/constants-mips.h
+++ b/deps/v8/src/mips/constants-mips.h
@@ -108,6 +108,19 @@ const uint32_t kHoleNanLower32Offset = 4;
(CpuFeatures::IsSupported(static_cast<CpuFeature>(check)))
#endif
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+const uint32_t kMipsLwrOffset = 0;
+const uint32_t kMipsLwlOffset = 3;
+const uint32_t kMipsSwrOffset = 0;
+const uint32_t kMipsSwlOffset = 3;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+const uint32_t kMipsLwrOffset = 3;
+const uint32_t kMipsLwlOffset = 0;
+const uint32_t kMipsSwrOffset = 3;
+const uint32_t kMipsSwlOffset = 0;
+#else
+#error Unknown endianness
+#endif
#define __STDC_FORMAT_MACROS
#include <inttypes.h>
@@ -409,6 +422,7 @@ enum SecondaryField : uint32_t {
MOVZ = ((1U << 3) + 2),
MOVN = ((1U << 3) + 3),
BREAK = ((1U << 3) + 5),
+ SYNC = ((1U << 3) + 7),
MFHI = ((2U << 3) + 0),
CLZ_R6 = ((2U << 3) + 0),
@@ -620,7 +634,6 @@ enum SecondaryField : uint32_t {
NULLSF = 0U
};
-
// ----- Emulated conditions.
// On MIPS we use this enum to abstract from conditional branch instructions.
// The 'U' prefix is used to specify unsigned comparisons.
@@ -928,8 +941,7 @@ class Instruction {
FunctionFieldToBitNumber(TEQ) | FunctionFieldToBitNumber(TNE) |
FunctionFieldToBitNumber(MOVZ) | FunctionFieldToBitNumber(MOVN) |
FunctionFieldToBitNumber(MOVCI) | FunctionFieldToBitNumber(SELEQZ_S) |
- FunctionFieldToBitNumber(SELNEZ_S);
-
+ FunctionFieldToBitNumber(SELNEZ_S) | FunctionFieldToBitNumber(SYNC);
// Get the encoding type of the instruction.
inline Type InstructionType(TypeChecks checks = NORMAL) const;
@@ -1174,11 +1186,10 @@ Instruction::Type Instruction::InstructionType(TypeChecks checks) const {
int sa = SaFieldRaw() >> kSaShift;
switch (sa) {
case BITSWAP:
- return kRegisterType;
case WSBH:
case SEB:
case SEH:
- return kUnsupported;
+ return kRegisterType;
}
sa >>= kBp2Bits;
switch (sa) {
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index e9caaadadb..478b9dfe30 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -117,8 +117,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Save all FPU registers before messing with them.
__ Subu(sp, sp, Operand(kDoubleRegsSize));
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
index 7e0a480e13..bd07874bd6 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -1191,6 +1191,9 @@ void Decoder::DecodeTypeRegisterSPECIAL(Instruction* instr) {
case TNE:
Format(instr, "tne 'rs, 'rt, code: 'code");
break;
+ case SYNC:
+ Format(instr, "sync");
+ break;
case MOVZ:
Format(instr, "movz 'rd, 'rs, 'rt");
break;
@@ -1261,11 +1264,30 @@ void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) {
}
break;
}
- case SEB:
- case SEH:
- case WSBH:
- UNREACHABLE();
+ case SEB: {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ Format(instr, "seb 'rd, 'rt");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ case SEH: {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ Format(instr, "seh 'rd, 'rt");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ case WSBH: {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ Format(instr, "wsbh 'rd, 'rt");
+ } else {
+ Unknown(instr);
+ }
break;
+ }
default: {
sa >>= kBp2Bits;
switch (sa) {
@@ -1696,7 +1718,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
namespace disasm {
const char* NameConverter::NameOfAddress(byte* addr) const {
- v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
+ v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
return tmp_buffer_.start();
}
@@ -1759,8 +1781,8 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
buffer[0] = '\0';
byte* prev_pc = pc;
pc += d.InstructionDecode(buffer, pc);
- v8::internal::PrintF(f, "%p %08x %s\n",
- prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+ v8::internal::PrintF(f, "%p %08x %s\n", static_cast<void*>(prev_pc),
+ *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
}
}
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
index 06e3b77aea..bafe0b661b 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -11,6 +11,19 @@ namespace internal {
const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
+void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
+ CallInterfaceDescriptorData* data, int register_parameter_count) {
+ const Register default_stub_registers[] = {a0, a1, a2, a3, t0};
+ CHECK_LE(static_cast<size_t>(register_parameter_count),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(register_parameter_count,
+ default_stub_registers);
+}
+
+const Register FastNewFunctionContextDescriptor::FunctionRegister() {
+ return a1;
+}
+const Register FastNewFunctionContextDescriptor::SlotsRegister() { return a0; }
const Register LoadDescriptor::ReceiverRegister() { return a1; }
const Register LoadDescriptor::NameRegister() { return a2; }
@@ -23,13 +36,9 @@ const Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
const Register StoreDescriptor::ReceiverRegister() { return a1; }
const Register StoreDescriptor::NameRegister() { return a2; }
const Register StoreDescriptor::ValueRegister() { return a0; }
+const Register StoreDescriptor::SlotRegister() { return t0; }
-
-const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return t0; }
-
-
-const Register VectorStoreICDescriptor::VectorRegister() { return a3; }
-
+const Register StoreWithVectorDescriptor::VectorRegister() { return a3; }
const Register VectorStoreTransitionDescriptor::SlotRegister() { return t0; }
const Register VectorStoreTransitionDescriptor::VectorRegister() { return a3; }
@@ -39,23 +48,15 @@ const Register VectorStoreTransitionDescriptor::MapRegister() { return t1; }
const Register StoreTransitionDescriptor::MapRegister() { return a3; }
-const Register LoadGlobalViaContextDescriptor::SlotRegister() { return a2; }
-
-
const Register StoreGlobalViaContextDescriptor::SlotRegister() { return a2; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return a0; }
-const Register InstanceOfDescriptor::LeftRegister() { return a1; }
-const Register InstanceOfDescriptor::RightRegister() { return a0; }
-
-
const Register StringCompareDescriptor::LeftRegister() { return a1; }
const Register StringCompareDescriptor::RightRegister() { return a0; }
-
-const Register ApiGetterDescriptor::function_address() { return a2; }
-
+const Register ApiGetterDescriptor::HolderRegister() { return a0; }
+const Register ApiGetterDescriptor::CallbackRegister() { return a3; }
const Register MathPowTaggedDescriptor::exponent() { return a2; }
@@ -75,13 +76,6 @@ void FastNewClosureDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-
-void FastNewContextDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
void FastNewObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a3};
@@ -247,50 +241,34 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC
-void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
+void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
// a0 -- number of arguments
// a1 -- function
// a2 -- allocation site with elements kind
- Register registers[] = {a1, a2};
+ Register registers[] = {a1, a2, a0};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-
-void ArrayConstructorDescriptor::InitializePlatformSpecific(
+void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // stack param count needs (constructor pointer, and single argument)
- Register registers[] = {a1, a2, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void InternalArrayConstructorConstantArgCountDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
// register state
// a0 -- number of arguments
- // a1 -- constructor function
- Register registers[] = {a1};
+ // a1 -- function
+ // a2 -- allocation site with elements kind
+ Register registers[] = {a1, a2, a0};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-
-void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
+void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
- Register registers[] = {a1, a0};
+ Register registers[] = {a1, a2, a0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void FastArrayPushDescriptor::InitializePlatformSpecific(
+void VarArgFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (arg count)
Register registers[] = {a0};
@@ -317,6 +295,22 @@ void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
+void BinaryOpWithVectorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // register state
+ // a1 -- lhs
+ // a0 -- rhs
+ // t0 -- slot id
+ // a3 -- vector
+ Register registers[] = {a1, a0, t0, a3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CountOpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void StringAddDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -377,9 +371,8 @@ void ApiCallbackDescriptorBase::InitializePlatformSpecific(
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
- kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
- kInterpreterDispatchTableRegister};
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -414,6 +407,16 @@ void InterpreterCEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void ResumeGeneratorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ v0, // the value to pass to the generator
+ a1, // the JSGeneratorObject to resume
+ a2 // the resume mode (tagged)
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 7cbbd3ae2f..dba1fae975 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -30,7 +30,6 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
}
}
-
void MacroAssembler::Load(Register dst,
const MemOperand& src,
Representation r) {
@@ -67,7 +66,6 @@ void MacroAssembler::Store(Register src,
}
}
-
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index) {
lw(destination, MemOperand(s6, index << kPointerSizeLog2));
@@ -1188,37 +1186,306 @@ void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
}
}
+void MacroAssembler::Bovc(Register rs, Register rt, Label* L) {
+ if (is_trampoline_emitted()) {
+ Label skip;
+ bnvc(rs, rt, &skip);
+ BranchLong(L, PROTECT);
+ bind(&skip);
+ } else {
+ bovc(rs, rt, L);
+ }
+}
+
+void MacroAssembler::Bnvc(Register rs, Register rt, Label* L) {
+ if (is_trampoline_emitted()) {
+ Label skip;
+ bovc(rs, rt, &skip);
+ BranchLong(L, PROTECT);
+ bind(&skip);
+ } else {
+ bnvc(rs, rt, L);
+ }
+}
// ------------Pseudo-instructions-------------
+// Word Swap Byte
+void MacroAssembler::ByteSwapSigned(Register dest, Register src,
+ int operand_size) {
+ DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4);
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ if (operand_size == 2) {
+ seh(src, src);
+ } else if (operand_size == 1) {
+ seb(src, src);
+ }
+ // No need to do any preparation if operand_size is 4
+
+ wsbh(dest, src);
+ rotr(dest, dest, 16);
+ } else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) {
+ if (operand_size == 1) {
+ sll(src, src, 24);
+ sra(src, src, 24);
+ } else if (operand_size == 2) {
+ sll(src, src, 16);
+ sra(src, src, 16);
+ }
+ // No need to do any preparation if operand_size is 4
+
+ Register tmp = t0;
+ Register tmp2 = t1;
+
+ andi(tmp2, src, 0xFF);
+ sll(tmp2, tmp2, 24);
+ or_(tmp, zero_reg, tmp2);
+
+ andi(tmp2, src, 0xFF00);
+ sll(tmp2, tmp2, 8);
+ or_(tmp, tmp, tmp2);
+
+ srl(src, src, 8);
+ andi(tmp2, src, 0xFF00);
+ or_(tmp, tmp, tmp2);
+
+ srl(src, src, 16);
+ andi(tmp2, src, 0xFF);
+ or_(tmp, tmp, tmp2);
+
+ or_(dest, tmp, zero_reg);
+ }
+}
+
+void MacroAssembler::ByteSwapUnsigned(Register dest, Register src,
+ int operand_size) {
+ DCHECK(operand_size == 1 || operand_size == 2);
+
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ if (operand_size == 1) {
+ andi(src, src, 0xFF);
+ } else {
+ andi(src, src, 0xFFFF);
+ }
+ // No need to do any preparation if operand_size is 4
+
+ wsbh(dest, src);
+ rotr(dest, dest, 16);
+ } else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) {
+ if (operand_size == 1) {
+ sll(src, src, 24);
+ } else {
+ Register tmp = t0;
+
+ andi(tmp, src, 0xFF00);
+ sll(src, src, 24);
+ sll(tmp, tmp, 8);
+ or_(dest, tmp, src);
+ }
+ }
+}
+
void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
- lwr(rd, rs);
- lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
+ DCHECK(!rd.is(at));
+ DCHECK(!rs.rm().is(at));
+ if (IsMipsArchVariant(kMips32r6)) {
+ lw(rd, rs);
+ } else {
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+ IsMipsArchVariant(kLoongson));
+ if (is_int16(rs.offset() + kMipsLwrOffset) &&
+ is_int16(rs.offset() + kMipsLwlOffset)) {
+ if (!rd.is(rs.rm())) {
+ lwr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
+ lwl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
+ } else {
+ lwr(at, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
+ lwl(at, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
+ mov(rd, at);
+ }
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+ lwr(rd, MemOperand(at, kMipsLwrOffset));
+ lwl(rd, MemOperand(at, kMipsLwlOffset));
+ }
+ }
}
void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
- swr(rd, rs);
- swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
+ DCHECK(!rd.is(at));
+ DCHECK(!rs.rm().is(at));
+ if (IsMipsArchVariant(kMips32r6)) {
+ sw(rd, rs);
+ } else {
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+ IsMipsArchVariant(kLoongson));
+ if (is_int16(rs.offset() + kMipsSwrOffset) &&
+ is_int16(rs.offset() + kMipsSwlOffset)) {
+ swr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwrOffset));
+ swl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwlOffset));
+ } else {
+ LoadRegPlusOffsetToAt(rs);
+ swr(rd, MemOperand(at, kMipsSwrOffset));
+ swl(rd, MemOperand(at, kMipsSwlOffset));
+ }
+ }
}
+void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
+ DCHECK(!rd.is(at));
+ DCHECK(!rs.rm().is(at));
+ if (IsMipsArchVariant(kMips32r6)) {
+ lh(rd, rs);
+ } else {
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+ IsMipsArchVariant(kLoongson));
+ if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ lbu(at, rs);
+ lb(rd, MemOperand(rs.rm(), rs.offset() + 1));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
+ lb(rd, rs);
+#endif
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ lb(rd, MemOperand(at, 1));
+ lbu(at, MemOperand(at, 0));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ lb(rd, MemOperand(at, 0));
+ lbu(at, MemOperand(at, 1));
+#endif
+ }
+ sll(rd, rd, 8);
+ or_(rd, rd, at);
+ }
+}
-void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
- AllowDeferredHandleDereference smi_check;
- if (value->IsSmi()) {
- li(dst, Operand(value), mode);
+void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
+ DCHECK(!rd.is(at));
+ DCHECK(!rs.rm().is(at));
+ if (IsMipsArchVariant(kMips32r6)) {
+ lhu(rd, rs);
} else {
- DCHECK(value->IsHeapObject());
- if (isolate()->heap()->InNewSpace(*value)) {
- Handle<Cell> cell = isolate()->factory()->NewCell(value);
- li(dst, Operand(cell));
- lw(dst, FieldMemOperand(dst, Cell::kValueOffset));
- } else {
- li(dst, Operand(value));
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+ IsMipsArchVariant(kLoongson));
+ if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ lbu(at, rs);
+ lbu(rd, MemOperand(rs.rm(), rs.offset() + 1));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
+ lbu(rd, rs);
+#endif
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ lbu(rd, MemOperand(at, 1));
+ lbu(at, MemOperand(at, 0));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ lbu(rd, MemOperand(at, 0));
+ lbu(at, MemOperand(at, 1));
+#endif
}
+ sll(rd, rd, 8);
+ or_(rd, rd, at);
+ }
+}
+
+void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
+ DCHECK(!rd.is(at));
+ DCHECK(!rs.rm().is(at));
+ DCHECK(!rs.rm().is(scratch));
+ DCHECK(!scratch.is(at));
+ if (IsMipsArchVariant(kMips32r6)) {
+ sh(rd, rs);
+ } else {
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+ IsMipsArchVariant(kLoongson));
+ MemOperand source = rs;
+ // If offset > 16 bits, load address to at with offset 0.
+ if (!is_int16(rs.offset()) || !is_int16(rs.offset() + 1)) {
+ LoadRegPlusOffsetToAt(rs);
+ source = MemOperand(at, 0);
+ }
+
+ if (!scratch.is(rd)) {
+ mov(scratch, rd);
+ }
+
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ sb(scratch, source);
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(source.rm(), source.offset() + 1));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ sb(scratch, MemOperand(source.rm(), source.offset() + 1));
+ srl(scratch, scratch, 8);
+ sb(scratch, source);
+#endif
+ }
+}
+
+void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
+ Register scratch) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ lwc1(fd, rs);
+ } else {
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+ IsMipsArchVariant(kLoongson));
+ Ulw(scratch, rs);
+ mtc1(scratch, fd);
+ }
+}
+
+void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
+ Register scratch) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ swc1(fd, rs);
+ } else {
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+ IsMipsArchVariant(kLoongson));
+ mfc1(scratch, fd);
+ Usw(scratch, rs);
+ }
+}
+
+void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
+ Register scratch) {
+ DCHECK(!scratch.is(at));
+ if (IsMipsArchVariant(kMips32r6)) {
+ ldc1(fd, rs);
+ } else {
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+ IsMipsArchVariant(kLoongson));
+ Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset));
+ mtc1(scratch, fd);
+ Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset));
+ Mthc1(scratch, fd);
}
}
+void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
+ Register scratch) {
+ DCHECK(!scratch.is(at));
+ if (IsMipsArchVariant(kMips32r6)) {
+ sdc1(fd, rs);
+ } else {
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+ IsMipsArchVariant(kLoongson));
+ mfc1(scratch, fd);
+ Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset));
+ Mfhc1(scratch, fd);
+ Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset));
+ }
+}
+
+
+void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
+ li(dst, Operand(value), mode);
+}
+
void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
DCHECK(!j.is_reg());
@@ -1354,6 +1621,252 @@ void MacroAssembler::MultiPopReversedFPU(RegList regs) {
addiu(sp, sp, stack_offset);
}
+void MacroAssembler::AddPair(Register dst_low, Register dst_high,
+ Register left_low, Register left_high,
+ Register right_low, Register right_high) {
+ Label no_overflow;
+ Register kScratchReg = s3;
+ Register kScratchReg2 = s4;
+ // Add lower word
+ Addu(dst_low, left_low, right_low);
+ Addu(dst_high, left_high, right_high);
+ // Check for lower word unsigned overflow
+ Sltu(kScratchReg, dst_low, left_low);
+ Sltu(kScratchReg2, dst_low, right_low);
+ Or(kScratchReg, kScratchReg2, kScratchReg);
+ Branch(&no_overflow, eq, kScratchReg, Operand(zero_reg));
+ // Increment higher word if there was overflow
+ Addu(dst_high, dst_high, 0x1);
+ bind(&no_overflow);
+}
+
+void MacroAssembler::SubPair(Register dst_low, Register dst_high,
+ Register left_low, Register left_high,
+ Register right_low, Register right_high) {
+ Label no_overflow;
+ Register kScratchReg = s3;
+ // Subtract lower word
+ Subu(dst_low, left_low, right_low);
+ Subu(dst_high, left_high, right_high);
+ // Check for lower word unsigned underflow
+ Sltu(kScratchReg, left_low, right_low);
+ Branch(&no_overflow, eq, kScratchReg, Operand(zero_reg));
+ // Decrement higher word if there was underflow
+ Subu(dst_high, dst_high, 0x1);
+ bind(&no_overflow);
+}
+
+void MacroAssembler::ShlPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ Register shift) {
+ Label less_than_32;
+ Label zero_shift;
+ Label word_shift;
+ Label done;
+ Register kScratchReg = s3;
+ And(shift, shift, 0x3F);
+ li(kScratchReg, 0x20);
+ Branch(&less_than_32, lt, shift, Operand(kScratchReg));
+
+ Branch(&word_shift, eq, shift, Operand(kScratchReg));
+ // Shift more than 32
+ Subu(kScratchReg, shift, kScratchReg);
+ mov(dst_low, zero_reg);
+ sllv(dst_high, src_low, kScratchReg);
+ Branch(&done);
+ // Word shift
+ bind(&word_shift);
+ mov(dst_low, zero_reg);
+ mov(dst_high, src_low);
+ Branch(&done);
+
+ bind(&less_than_32);
+ // Check if zero shift
+ Branch(&zero_shift, eq, shift, Operand(zero_reg));
+ // Shift less than 32
+ Subu(kScratchReg, kScratchReg, shift);
+ sllv(dst_high, src_high, shift);
+ sllv(dst_low, src_low, shift);
+ srlv(kScratchReg, src_low, kScratchReg);
+ Or(dst_high, dst_high, kScratchReg);
+ Branch(&done);
+ // Zero shift
+ bind(&zero_shift);
+ mov(dst_low, src_low);
+ mov(dst_high, src_high);
+ bind(&done);
+}
+
+void MacroAssembler::ShlPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ uint32_t shift) {
+ Register kScratchReg = s3;
+ shift = shift & 0x3F;
+ if (shift < 32) {
+ if (shift == 0) {
+ mov(dst_low, src_low);
+ mov(dst_high, src_high);
+ } else {
+ sll(dst_high, src_high, shift);
+ sll(dst_low, src_low, shift);
+ shift = 32 - shift;
+ srl(kScratchReg, src_low, shift);
+ Or(dst_high, dst_high, kScratchReg);
+ }
+ } else {
+ if (shift == 32) {
+ mov(dst_low, zero_reg);
+ mov(dst_high, src_low);
+ } else {
+ shift = shift - 32;
+ mov(dst_low, zero_reg);
+ sll(dst_high, src_low, shift);
+ }
+ }
+}
+
+void MacroAssembler::ShrPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ Register shift) {
+ Label less_than_32;
+ Label zero_shift;
+ Label word_shift;
+ Label done;
+ Register kScratchReg = s3;
+ And(shift, shift, 0x3F);
+ li(kScratchReg, 0x20);
+ Branch(&less_than_32, lt, shift, Operand(kScratchReg));
+
+ Branch(&word_shift, eq, shift, Operand(kScratchReg));
+ // Shift more than 32
+ Subu(kScratchReg, shift, kScratchReg);
+ mov(dst_high, zero_reg);
+ srlv(dst_low, src_high, kScratchReg);
+ Branch(&done);
+ // Word shift
+ bind(&word_shift);
+ mov(dst_high, zero_reg);
+ mov(dst_low, src_high);
+ Branch(&done);
+
+ bind(&less_than_32);
+ // Check if zero shift
+ Branch(&zero_shift, eq, shift, Operand(zero_reg));
+ // Shift less than 32
+ Subu(kScratchReg, kScratchReg, shift);
+ srlv(dst_high, src_high, shift);
+ srlv(dst_low, src_low, shift);
+ sllv(kScratchReg, src_high, kScratchReg);
+ Or(dst_low, dst_low, kScratchReg);
+ Branch(&done);
+ // Zero shift
+ bind(&zero_shift);
+ mov(dst_low, src_low);
+ mov(dst_high, src_high);
+ bind(&done);
+}
+
+void MacroAssembler::ShrPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ uint32_t shift) {
+ Register kScratchReg = s3;
+ shift = shift & 0x3F;
+ if (shift < 32) {
+ if (shift == 0) {
+ mov(dst_low, src_low);
+ mov(dst_high, src_high);
+ } else {
+ srl(dst_high, src_high, shift);
+ srl(dst_low, src_low, shift);
+ shift = 32 - shift;
+ sll(kScratchReg, src_high, shift);
+ Or(dst_low, dst_low, kScratchReg);
+ }
+ } else {
+ if (shift == 32) {
+ mov(dst_high, zero_reg);
+ mov(dst_low, src_high);
+ } else {
+ shift = shift - 32;
+ mov(dst_high, zero_reg);
+ srl(dst_low, src_high, shift);
+ }
+ }
+}
+
+void MacroAssembler::SarPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ Register shift) {
+ Label less_than_32;
+ Label zero_shift;
+ Label word_shift;
+ Label done;
+ Register kScratchReg = s3;
+ Register kScratchReg2 = s4;
+ And(shift, shift, 0x3F);
+ li(kScratchReg, 0x20);
+ Branch(&less_than_32, lt, shift, Operand(kScratchReg));
+
+ Branch(&word_shift, eq, shift, Operand(kScratchReg));
+
+ // Shift more than 32
+ li(kScratchReg2, 0x1F);
+ Subu(kScratchReg, shift, kScratchReg);
+ srav(dst_high, src_high, kScratchReg2);
+ srav(dst_low, src_high, kScratchReg);
+ Branch(&done);
+ // Word shift
+ bind(&word_shift);
+ li(kScratchReg2, 0x1F);
+ srav(dst_high, src_high, kScratchReg2);
+ mov(dst_low, src_high);
+ Branch(&done);
+
+ bind(&less_than_32);
+ // Check if zero shift
+ Branch(&zero_shift, eq, shift, Operand(zero_reg));
+
+ // Shift less than 32
+ Subu(kScratchReg, kScratchReg, shift);
+ srav(dst_high, src_high, shift);
+ srlv(dst_low, src_low, shift);
+ sllv(kScratchReg, src_high, kScratchReg);
+ Or(dst_low, dst_low, kScratchReg);
+ Branch(&done);
+ // Zero shift
+ bind(&zero_shift);
+ mov(dst_low, src_low);
+ mov(dst_high, src_high);
+ bind(&done);
+}
+
+void MacroAssembler::SarPair(Register dst_low, Register dst_high,
+ Register src_low, Register src_high,
+ uint32_t shift) {
+ Register kScratchReg = s3;
+ shift = shift & 0x3F;
+ if (shift < 32) {
+ if (shift == 0) {
+ mov(dst_low, src_low);
+ mov(dst_high, src_high);
+ } else {
+ sra(dst_high, src_high, shift);
+ srl(dst_low, src_low, shift);
+ shift = 32 - shift;
+ sll(kScratchReg, src_high, shift);
+ Or(dst_low, dst_low, kScratchReg);
+ }
+ } else {
+ if (shift == 32) {
+ sra(dst_high, src_high, 31);
+ mov(dst_low, src_high);
+ } else {
+ shift = shift - 32;
+ sra(dst_high, src_high, 31);
+ sra(dst_low, src_high, shift);
+ }
+ }
+}
void MacroAssembler::Ext(Register rt,
Register rs,
@@ -1401,6 +1914,66 @@ void MacroAssembler::Ins(Register rt,
}
}
+void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) {
+ Register scratch1 = t8;
+ Register scratch2 = t9;
+ if (IsMipsArchVariant(kMips32r2)) {
+ Label is_nan, done;
+ Register scratch1 = t8;
+ Register scratch2 = t9;
+ BranchF32(nullptr, &is_nan, eq, fs, fs);
+ Branch(USE_DELAY_SLOT, &done);
+ // For NaN input, neg_s will return the same NaN value,
+ // while the sign has to be changed separately.
+ neg_s(fd, fs); // In delay slot.
+
+ bind(&is_nan);
+ mfc1(scratch1, fs);
+ And(scratch2, scratch1, Operand(~kBinary32SignMask));
+ And(scratch1, scratch1, Operand(kBinary32SignMask));
+ Xor(scratch1, scratch1, Operand(kBinary32SignMask));
+ Or(scratch2, scratch2, scratch1);
+ mtc1(scratch2, fd);
+ bind(&done);
+ } else {
+ mfc1(scratch1, fs);
+ And(scratch2, scratch1, Operand(~kBinary32SignMask));
+ And(scratch1, scratch1, Operand(kBinary32SignMask));
+ Xor(scratch1, scratch1, Operand(kBinary32SignMask));
+ Or(scratch2, scratch2, scratch1);
+ mtc1(scratch2, fd);
+ }
+}
+
+void MacroAssembler::Neg_d(FPURegister fd, FPURegister fs) {
+ Register scratch1 = t8;
+ Register scratch2 = t9;
+ if (IsMipsArchVariant(kMips32r2)) {
+ Label is_nan, done;
+ BranchF64(nullptr, &is_nan, eq, fs, fs);
+ Branch(USE_DELAY_SLOT, &done);
+ // For NaN input, neg_d will return the same NaN value,
+ // while the sign has to be changed separately.
+ neg_d(fd, fs); // In delay slot.
+
+ bind(&is_nan);
+ Mfhc1(scratch1, fs);
+ And(scratch2, scratch1, Operand(~HeapNumber::kSignMask));
+ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+ Xor(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+ Or(scratch2, scratch2, scratch1);
+ Mthc1(scratch2, fd);
+ bind(&done);
+ } else {
+ Move_d(fd, fs);
+ Mfhc1(scratch1, fs);
+ And(scratch2, scratch1, Operand(~HeapNumber::kSignMask));
+ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+ Xor(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+ Or(scratch2, scratch2, scratch1);
+ Mthc1(scratch2, fd);
+ }
+}
void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs,
FPURegister scratch) {
@@ -1801,14 +2374,12 @@ void MacroAssembler::Move(FPURegister dst, float imm) {
void MacroAssembler::Move(FPURegister dst, double imm) {
- static const DoubleRepresentation minus_zero(-0.0);
- static const DoubleRepresentation zero(0.0);
- DoubleRepresentation value_rep(imm);
+ int64_t imm_bits = bit_cast<int64_t>(imm);
// Handle special values first.
- if (value_rep == zero && has_double_zero_reg_set_) {
+ if (imm_bits == bit_cast<int64_t>(0.0) && has_double_zero_reg_set_) {
mov_d(dst, kDoubleRegZero);
- } else if (value_rep == minus_zero && has_double_zero_reg_set_) {
- neg_d(dst, kDoubleRegZero);
+ } else if (imm_bits == bit_cast<int64_t>(-0.0) && has_double_zero_reg_set_) {
+ Neg_d(dst, kDoubleRegZero);
} else {
uint32_t lo, hi;
DoubleAsTwoUInt32(imm, &lo, &hi);
@@ -2849,7 +3420,6 @@ bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt,
BranchDelaySlot bdslot) {
BRANCH_ARGS_CHECK(cond, rs, rt);
-
if (!L) {
if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
DCHECK(is_int26(offset));
@@ -3401,9 +3971,6 @@ void MacroAssembler::Call(Address target,
Label start;
bind(&start);
int32_t target_int = reinterpret_cast<int32_t>(target);
- // Must record previous source positions before the
- // li() generates a new code target.
- positions_recorder()->WriteRecordedPositions();
li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
Call(t9, cond, rs, rt, bd);
DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
@@ -3654,6 +4221,7 @@ void MacroAssembler::Allocate(int object_size,
Label* gc_required,
AllocationFlags flags) {
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -3726,18 +4294,21 @@ void MacroAssembler::Allocate(int object_size,
// to calculate the new top.
Addu(result_end, result, Operand(object_size));
Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
- sw(result_end, MemOperand(top_address));
- // Tag object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- Addu(result, result, Operand(kHeapObjectTag));
+ if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+ // The top pointer is not updated for allocation folding dominators.
+ sw(result_end, MemOperand(top_address));
}
+
+ // Tag object.
+ Addu(result, result, Operand(kHeapObjectTag));
}
void MacroAssembler::Allocate(Register object_size, Register result,
Register result_end, Register scratch,
Label* gc_required, AllocationFlags flags) {
+ DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -3810,6 +4381,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
} else {
Addu(result_end, result, Operand(object_size));
}
+
Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
// Update allocation top. result temporarily holds the new top.
@@ -3817,14 +4389,104 @@ void MacroAssembler::Allocate(Register object_size, Register result,
And(alloc_limit, result_end, Operand(kObjectAlignmentMask));
Check(eq, kUnalignedAllocationInNewSpace, alloc_limit, Operand(zero_reg));
}
- sw(result_end, MemOperand(top_address));
- // Tag object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- Addu(result, result, Operand(kHeapObjectTag));
+ if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+ // The top pointer is not updated for allocation folding dominators.
+ sw(result_end, MemOperand(top_address));
+ }
+
+ // Tag object.
+ Addu(result, result, Operand(kHeapObjectTag));
+}
+
+void MacroAssembler::FastAllocate(int object_size, Register result,
+ Register scratch1, Register scratch2,
+ AllocationFlags flags) {
+ DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK(!AreAliased(result, scratch1, scratch2, t9, at));
+
+ // Make object size into bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ object_size *= kPointerSize;
+ }
+ DCHECK_EQ(0, object_size & kObjectAlignmentMask);
+
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+ // Set up allocation top address and allocation limit registers.
+ Register top_address = scratch1;
+ // This code stores a temporary value in t9.
+ Register result_end = scratch2;
+ li(top_address, Operand(allocation_top));
+ lw(result, MemOperand(top_address));
+
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+ And(result_end, result, Operand(kDoubleAlignmentMask));
+ Label aligned;
+ Branch(&aligned, eq, result_end, Operand(zero_reg));
+ li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ sw(result_end, MemOperand(result));
+ Addu(result, result, Operand(kDoubleSize / 2));
+ bind(&aligned);
}
+
+ Addu(result_end, result, Operand(object_size));
+
+ // The top pointer is not updated for allocation folding dominators.
+ sw(result_end, MemOperand(top_address));
+
+ Addu(result, result, Operand(kHeapObjectTag));
}
+void MacroAssembler::FastAllocate(Register object_size, Register result,
+ Register result_end, Register scratch,
+ AllocationFlags flags) {
+ // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
+ // is not specified. Other registers must not overlap.
+ DCHECK(!AreAliased(object_size, result, scratch, t9, at));
+ DCHECK(!AreAliased(result_end, result, scratch, t9, at));
+ DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
+
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+ // Set up allocation top address and allocation limit registers.
+ Register top_address = scratch;
+ // This code stores a temporary value in t9.
+ li(top_address, Operand(allocation_top));
+ lw(result, MemOperand(top_address));
+
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+ DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+ And(result_end, result, Operand(kDoubleAlignmentMask));
+ Label aligned;
+ Branch(&aligned, eq, result_end, Operand(zero_reg));
+ li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ sw(result_end, MemOperand(result));
+ Addu(result, result, Operand(kDoubleSize / 2));
+ bind(&aligned);
+ }
+
+ // Calculate new top and bail out if new space is exhausted. Use result
+ // to calculate the new top. Object size may be in words so a shift is
+ // required to get the number of bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ Lsa(result_end, result, object_size, kPointerSizeLog2);
+ } else {
+ Addu(result_end, result, Operand(object_size));
+ }
+
+ // The top pointer is not updated for allocation folding dominators.
+ sw(result_end, MemOperand(top_address));
+
+ Addu(result, result, Operand(kHeapObjectTag));
+}
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,
@@ -3841,12 +4503,8 @@ void MacroAssembler::AllocateTwoByteString(Register result,
And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate two-byte string in new space.
- Allocate(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(scratch1, result, scratch2, scratch3, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
InitializeNewString(result,
@@ -3869,12 +4527,8 @@ void MacroAssembler::AllocateOneByteString(Register result, Register length,
And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate one-byte string in new space.
- Allocate(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(scratch1, result, scratch2, scratch3, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
@@ -3888,7 +4542,7 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
Register scratch2,
Label* gc_required) {
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result,
length,
Heap::kConsStringMapRootIndex,
@@ -3901,12 +4555,8 @@ void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
Register scratch1,
Register scratch2,
Label* gc_required) {
- Allocate(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
scratch1, scratch2);
@@ -3919,7 +4569,7 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
Register scratch2,
Label* gc_required) {
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result,
length,
@@ -3935,7 +4585,7 @@ void MacroAssembler::AllocateOneByteSlicedString(Register result,
Register scratch2,
Label* gc_required) {
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
scratch1, scratch2);
@@ -3961,12 +4611,11 @@ void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch2,
Register heap_number_map,
Label* need_gc,
- TaggingMode tagging_mode,
MutableMode mode) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
- tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
+ NO_ALLOCATION_FLAGS);
Heap::RootListIndex map_index = mode == MUTABLE
? Heap::kMutableHeapNumberMapRootIndex
@@ -3974,11 +4623,7 @@ void MacroAssembler::AllocateHeapNumber(Register result,
AssertIsRoot(heap_number_map, map_index);
// Store heap number map in the allocated object.
- if (tagging_mode == TAG_RESULT) {
- sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
- } else {
- sw(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
- }
+ sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
}
@@ -4002,7 +4647,8 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
DCHECK(!result.is(value));
// Allocate JSValue in new space.
- Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
+ Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
+ NO_ALLOCATION_FLAGS);
// Initialize the JSValue.
LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
@@ -4145,9 +4791,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
int elements_offset) {
DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1, scratch2,
scratch3));
- Label smi_value, maybe_nan, have_double_value, is_nan, done;
- Register mantissa_reg = scratch2;
- Register exponent_reg = scratch3;
+ Label smi_value, done;
// Handle smi values specially.
JumpIfSmi(value_reg, &smi_value);
@@ -4159,53 +4803,29 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
fail,
DONT_DO_SMI_CHECK);
- // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
- // in the exponent.
- li(scratch1, Operand(kHoleNanUpper32 & HeapNumber::kExponentMask));
- lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
- Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
-
- lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
-
- bind(&have_double_value);
- Lsa(scratch1, elements_reg, key_reg, kDoubleSizeLog2 - kSmiTagSize);
- sw(mantissa_reg,
- FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
- + kHoleNanLower32Offset));
- sw(exponent_reg,
- FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
- + kHoleNanUpper32Offset));
- jmp(&done);
-
- bind(&maybe_nan);
- // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
- // it's an Infinity, and the non-NaN code path applies.
- Branch(&is_nan, gt, exponent_reg, Operand(scratch1));
- lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
- Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
- bind(&is_nan);
- // Load canonical NaN for storing into the double array.
- LoadRoot(at, Heap::kNanValueRootIndex);
- lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset));
- lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset));
- jmp(&have_double_value);
+ // Double value, turn potential sNaN into qNan.
+ DoubleRegister double_result = f0;
+ DoubleRegister double_scratch = f2;
+
+ ldc1(double_result, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+ Branch(USE_DELAY_SLOT, &done); // Canonicalization is one instruction.
+ FPUCanonicalizeNaN(double_result, double_result);
bind(&smi_value);
+ Register untagged_value = scratch2;
+ SmiUntag(untagged_value, value_reg);
+ mtc1(untagged_value, double_scratch);
+ cvt_d_w(double_result, double_scratch);
+
+ bind(&done);
Addu(scratch1, elements_reg,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
elements_offset));
Lsa(scratch1, scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
// scratch1 is now effective address of the double element
-
- Register untagged_value = scratch2;
- SmiUntag(untagged_value, value_reg);
- mtc1(untagged_value, f2);
- cvt_d_w(f0, f2);
- sdc1(f0, MemOperand(scratch1, 0));
- bind(&done);
+ sdc1(double_result, MemOperand(scratch1, 0));
}
-
void MacroAssembler::CompareMapAndBranch(Register obj,
Register scratch,
Handle<Map> map,
@@ -4268,6 +4888,10 @@ void MacroAssembler::CheckMap(Register obj,
Branch(fail, ne, scratch, Operand(at));
}
+void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
+ const DoubleRegister src) {
+ sub_d(dst, src, kDoubleRegZero);
+}
void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
li(value, Operand(cell));
@@ -4488,11 +5112,12 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
Label skip_flooding;
- ExternalReference step_in_enabled =
- ExternalReference::debug_step_in_enabled_address(isolate());
- li(t0, Operand(step_in_enabled));
+ ExternalReference last_step_action =
+ ExternalReference::debug_last_step_action_address(isolate());
+ STATIC_ASSERT(StepFrame > StepIn);
+ li(t0, Operand(last_step_action));
lb(t0, MemOperand(t0));
- Branch(&skip_flooding, eq, t0, Operand(zero_reg));
+ Branch(&skip_flooding, lt, t0, Operand(StepIn));
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -4851,9 +5476,9 @@ void MacroAssembler::AddBranchOvf(Register dst, Register left, Register right,
Move(left_reg, left);
Move(right_reg, right);
addu(dst, left, right);
- bnvc(left_reg, right_reg, no_overflow_label);
+ Bnvc(left_reg, right_reg, no_overflow_label);
} else {
- bovc(left, right, overflow_label);
+ Bovc(left, right, overflow_label);
addu(dst, left, right);
if (no_overflow_label) bc(no_overflow_label);
}
@@ -4973,6 +5598,69 @@ void MacroAssembler::SubBranchOvf(Register dst, Register left, Register right,
BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
}
+static inline void BranchOvfHelperMult(MacroAssembler* masm,
+ Register overflow_dst,
+ Label* overflow_label,
+ Label* no_overflow_label) {
+ DCHECK(overflow_label || no_overflow_label);
+ if (!overflow_label) {
+ DCHECK(no_overflow_label);
+ masm->Branch(no_overflow_label, eq, overflow_dst, Operand(zero_reg));
+ } else {
+ masm->Branch(overflow_label, ne, overflow_dst, Operand(zero_reg));
+ if (no_overflow_label) masm->Branch(no_overflow_label);
+ }
+}
+
+void MacroAssembler::MulBranchOvf(Register dst, Register left,
+ const Operand& right, Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
+ DCHECK(overflow_label || no_overflow_label);
+ if (right.is_reg()) {
+ MulBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
+ scratch);
+ } else {
+ Register overflow_dst = t9;
+ DCHECK(!dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
+ DCHECK(!left.is(overflow_dst));
+ DCHECK(!left.is(scratch));
+
+ Mul(overflow_dst, dst, left, right.immediate());
+ sra(scratch, dst, 31);
+ xor_(overflow_dst, overflow_dst, scratch);
+
+ BranchOvfHelperMult(this, overflow_dst, overflow_label, no_overflow_label);
+ }
+}
+
+void MacroAssembler::MulBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
+ DCHECK(overflow_label || no_overflow_label);
+ Register overflow_dst = t9;
+ DCHECK(!dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
+ DCHECK(!overflow_dst.is(left));
+ DCHECK(!overflow_dst.is(right));
+ DCHECK(!scratch.is(left));
+ DCHECK(!scratch.is(right));
+
+ if (IsMipsArchVariant(kMips32r6) && dst.is(right)) {
+ mov(scratch, right);
+ Mul(overflow_dst, dst, left, scratch);
+ sra(scratch, dst, 31);
+ xor_(overflow_dst, overflow_dst, scratch);
+ } else {
+ Mul(overflow_dst, dst, left, right);
+ sra(scratch, dst, 31);
+ xor_(overflow_dst, overflow_dst, scratch);
+ }
+
+ BranchOvfHelperMult(this, overflow_dst, overflow_label, no_overflow_label);
+}
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles,
@@ -5015,11 +5703,12 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
JumpToExternalReference(ExternalReference(fid, isolate()));
}
-
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
- BranchDelaySlot bd) {
+ BranchDelaySlot bd,
+ bool builtin_exit_frame) {
PrepareCEntryFunction(builtin);
- CEntryStub stub(isolate(), 1);
+ CEntryStub stub(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
+ builtin_exit_frame);
Jump(stub.GetCode(),
RelocInfo::CODE_TARGET,
al,
@@ -5028,7 +5717,6 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bd);
}
-
void MacroAssembler::SetCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
if (FLAG_native_code_counters && counter->Enabled()) {
@@ -5118,16 +5806,19 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- li(a0, Operand(Smi::FromInt(reason)));
- push(a0);
+ // Check if Abort() has already been initialized.
+ DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
+
+ Move(a0, Smi::FromInt(static_cast<int>(reason)));
+
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort);
+ Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
} else {
- CallRuntime(Runtime::kAbort);
+ Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
}
// Will not return here.
if (is_trampoline_pool_blocked()) {
@@ -5236,9 +5927,8 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
lw(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- lw(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
- lw(vector,
- FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+ lw(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
+ lw(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
}
@@ -5284,7 +5974,24 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
lw(fp, MemOperand(fp, 0 * kPointerSize));
}
-void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
+void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
+ Register argc) {
+ Push(ra, fp);
+ Move(fp, sp);
+ Push(context, target, argc);
+}
+
+void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
+ Register argc) {
+ Pop(context, target, argc);
+ Pop(ra, fp);
+}
+
+void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
+ StackFrame::Type frame_type) {
+ DCHECK(frame_type == StackFrame::EXIT ||
+ frame_type == StackFrame::BUILTIN_EXIT);
+
// Set up the frame structure on the stack.
STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
@@ -5304,7 +6011,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
addiu(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
sw(ra, MemOperand(sp, 4 * kPointerSize));
sw(fp, MemOperand(sp, 3 * kPointerSize));
- li(at, Operand(Smi::FromInt(StackFrame::EXIT)));
+ li(at, Operand(Smi::FromInt(frame_type)));
sw(at, MemOperand(sp, 2 * kPointerSize));
// Set up new frame pointer.
addiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
@@ -5615,6 +6322,16 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
}
+void MacroAssembler::AssertGeneratorObject(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, kOperandIsASmiAndNotAGeneratorObject, t8, Operand(zero_reg));
+ GetObjectType(object, t8, t8);
+ Check(eq, kOperandIsNotAGeneratorObject, t8,
+ Operand(JS_GENERATOR_OBJECT_TYPE));
+ }
+}
void MacroAssembler::AssertReceiver(Register object) {
if (emit_debug_code()) {
@@ -6089,7 +6806,7 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
Label* no_memento_found) {
Label map_check;
Label top_check;
- ExternalReference new_space_allocation_top =
+ ExternalReference new_space_allocation_top_adr =
ExternalReference::new_space_allocation_top_address(isolate());
const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
@@ -6099,7 +6816,9 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
// If the object is in new space, we need to check whether it is on the same
// page as the current top.
Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
- Xor(scratch_reg, scratch_reg, Operand(new_space_allocation_top));
+ li(at, Operand(new_space_allocation_top_adr));
+ lw(at, MemOperand(at));
+ Xor(scratch_reg, scratch_reg, Operand(at));
And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
Branch(&top_check, eq, scratch_reg, Operand(zero_reg));
// The object is on a different page than allocation top. Bail out if the
@@ -6115,7 +6834,7 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
// we are below top.
bind(&top_check);
Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
- li(at, Operand(new_space_allocation_top));
+ li(at, Operand(new_space_allocation_top_adr));
lw(at, MemOperand(at));
Branch(no_memento_found, gt, scratch_reg, Operand(at));
// Memento map check.
@@ -6140,8 +6859,7 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
if (reg5.is_valid()) regs |= reg5.bit();
if (reg6.is_valid()) regs |= reg6.bit();
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
int code = config->GetAllocatableGeneralCode(i);
Register candidate = Register::from_code(code);
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 2f028658f4..aa5b0f9524 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -18,8 +18,8 @@ const Register kReturnRegister1 = {Register::kCode_v1};
const Register kReturnRegister2 = {Register::kCode_a0};
const Register kJSFunctionRegister = {Register::kCode_a1};
const Register kContextRegister = {Register::kCpRegister};
+const Register kAllocateSizeRegister = {Register::kCode_a0};
const Register kInterpreterAccumulatorRegister = {Register::kCode_v0};
-const Register kInterpreterRegisterFileRegister = {Register::kCode_t3};
const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_t4};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_t5};
const Register kInterpreterDispatchTableRegister = {Register::kCode_t6};
@@ -555,6 +555,15 @@ class MacroAssembler: public Assembler {
void Allocate(Register object_size, Register result, Register result_new,
Register scratch, Label* gc_required, AllocationFlags flags);
+ // FastAllocate is right now only used for folded allocations. It just
+ // increments the top pointer without checking against limit. This can only
+ // be done if it was proved earlier that the allocation will succeed.
+ void FastAllocate(int object_size, Register result, Register scratch1,
+ Register scratch2, AllocationFlags flags);
+
+ void FastAllocate(Register object_size, Register result, Register result_new,
+ Register scratch, AllocationFlags flags);
+
void AllocateTwoByteString(Register result,
Register length,
Register scratch1,
@@ -589,7 +598,6 @@ class MacroAssembler: public Assembler {
Register scratch2,
Register heap_number_map,
Label* gc_required,
- TaggingMode tagging_mode = TAG_RESULT,
MutableMode mode = IMMUTABLE);
void AllocateHeapNumberWithValue(Register result,
FPURegister value,
@@ -679,11 +687,25 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Pseudo-instructions.
+ // Change endianness
+ void ByteSwapSigned(Register dest, Register src, int operand_size);
+ void ByteSwapUnsigned(Register dest, Register src, int operand_size);
+
void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
+ void Ulh(Register rd, const MemOperand& rs);
+ void Ulhu(Register rd, const MemOperand& rs);
+ void Ush(Register rd, const MemOperand& rs, Register scratch);
+
void Ulw(Register rd, const MemOperand& rs);
void Usw(Register rd, const MemOperand& rs);
+ void Ulwc1(FPURegister fd, const MemOperand& rs, Register scratch);
+ void Uswc1(FPURegister fd, const MemOperand& rs, Register scratch);
+
+ void Uldc1(FPURegister fd, const MemOperand& rs, Register scratch);
+ void Usdc1(FPURegister fd, const MemOperand& rs, Register scratch);
+
// Load int32 in the rd register.
void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
@@ -808,6 +830,37 @@ class MacroAssembler: public Assembler {
// MIPS32 R2 instruction macro.
void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void Neg_s(FPURegister fd, FPURegister fs);
+ void Neg_d(FPURegister fd, FPURegister fs);
+
+ // MIPS32 R6 instruction macros.
+ void Bovc(Register rt, Register rs, Label* L);
+ void Bnvc(Register rt, Register rs, Label* L);
+
+ // Int64Lowering instructions
+ void AddPair(Register dst_low, Register dst_high, Register left_low,
+ Register left_high, Register right_low, Register right_high);
+
+ void SubPair(Register dst_low, Register dst_high, Register left_low,
+ Register left_high, Register right_low, Register right_high);
+
+ void ShlPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, Register shift);
+
+ void ShlPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, uint32_t shift);
+
+ void ShrPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, Register shift);
+
+ void ShrPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, uint32_t shift);
+
+ void SarPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, Register shift);
+
+ void SarPair(Register dst_low, Register dst_high, Register src_low,
+ Register src_high, uint32_t shift);
// ---------------------------------------------------------------------------
// FPU macros. These do not handle special cases like NaN or +- inf.
@@ -960,8 +1013,8 @@ class MacroAssembler: public Assembler {
// argc - argument count to be dropped by LeaveExitFrame.
// save_doubles - saves FPU registers on stack, currently disabled.
// stack_space - extra stack space.
- void EnterExitFrame(bool save_doubles,
- int stack_space = 0);
+ void EnterExitFrame(bool save_doubles, int stack_space = 0,
+ StackFrame::Type frame_type = StackFrame::EXIT);
// Leave the current exit frame.
void LeaveExitFrame(bool save_doubles, Register arg_count,
@@ -1188,6 +1241,9 @@ class MacroAssembler: public Assembler {
Handle<WeakCell> cell, Handle<Code> success,
SmiCheckType smi_check_type);
+ // If the value is a NaN, canonicalize the value else, do nothing.
+ void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
+
// Get value of the weak cell.
void GetWeakValue(Register value, Handle<WeakCell> cell);
@@ -1280,6 +1336,24 @@ class MacroAssembler: public Assembler {
Label* overflow_label, Label* no_overflow_label,
Register scratch = at);
+ inline void MulBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Register scratch = at) {
+ MulBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
+ }
+
+ inline void MulBranchNoOvf(Register dst, Register left, const Operand& right,
+ Label* no_overflow_label, Register scratch = at) {
+ MulBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
+ }
+
+ void MulBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
+
+ void MulBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
+
// -------------------------------------------------------------------------
// Runtime calls.
@@ -1386,7 +1460,8 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Jump to the builtin routine.
void JumpToExternalReference(const ExternalReference& builtin,
- BranchDelaySlot bd = PROTECT);
+ BranchDelaySlot bd = PROTECT,
+ bool builtin_exit_frame = false);
struct Unresolved {
int pc;
@@ -1546,6 +1621,10 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// enabled via --debug-code.
void AssertBoundFunction(Register object);
+ // Abort execution if argument is not a JSGeneratorObject,
+ // enabled via --debug-code.
+ void AssertGeneratorObject(Register object);
+
// Abort execution if argument is not a JSReceiver, enabled via --debug-code.
void AssertReceiver(Register object);
@@ -1659,6 +1738,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
void LeaveFrame(StackFrame::Type type);
+ void EnterBuiltinFrame(Register context, Register target, Register argc);
+ void LeaveBuiltinFrame(Register context, Register target, Register argc);
+
// Expects object in a0 and returns map with validated enum cache
// in a0. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Label* call_runtime);
@@ -1834,14 +1916,7 @@ void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
}
}
-#ifdef GENERATED_CODE_COVERAGE
-#define CODE_COVERAGE_STRINGIFY(x) #x
-#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
-#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
-#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
-#else
#define ACCESS_MASM(masm) masm->
-#endif
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index e37b6e12f9..59dc300f68 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -51,7 +51,6 @@ uint32_t get_fcsr_condition_bit(uint32_t cc) {
class MipsDebugger {
public:
explicit MipsDebugger(Simulator* sim) : sim_(sim) { }
- ~MipsDebugger();
void Stop(Instruction* instr);
void Debug();
@@ -85,71 +84,16 @@ class MipsDebugger {
};
-MipsDebugger::~MipsDebugger() {
-}
-
-
-#ifdef GENERATED_CODE_COVERAGE
-static FILE* coverage_log = NULL;
-
-
-static void InitializeCoverage() {
- char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
- if (file_name != NULL) {
- coverage_log = fopen(file_name, "aw+");
- }
-}
-
-
-void MipsDebugger::Stop(Instruction* instr) {
- // Get the stop code.
- uint32_t code = instr->Bits(25, 6);
- // Retrieve the encoded address, which comes just after this stop.
- char** msg_address =
- reinterpret_cast<char**>(sim_->get_pc() + Instr::kInstrSize);
- char* msg = *msg_address;
- DCHECK(msg != NULL);
-
- // Update this stop description.
- if (!watched_stops_[code].desc) {
- watched_stops_[code].desc = msg;
- }
-
- if (strlen(msg) > 0) {
- if (coverage_log != NULL) {
- fprintf(coverage_log, "%s\n", str);
- fflush(coverage_log);
- }
- // Overwrite the instruction and address with nops.
- instr->SetInstructionBits(kNopInstr);
- reinterpret_cast<Instr*>(msg_address)->SetInstructionBits(kNopInstr);
- }
- sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstructionSize);
-}
-
-
-#else // GENERATED_CODE_COVERAGE
-
#define UNSUPPORTED() printf("Sim: Unsupported instruction.\n");
-static void InitializeCoverage() {}
-
void MipsDebugger::Stop(Instruction* instr) {
// Get the stop code.
uint32_t code = instr->Bits(25, 6);
- // Retrieve the encoded address, which comes just after this stop.
- char* msg = *reinterpret_cast<char**>(sim_->get_pc() +
- Instruction::kInstrSize);
- // Update this stop description.
- if (!sim_->watched_stops_[code].desc) {
- sim_->watched_stops_[code].desc = msg;
- }
- PrintF("Simulator hit %s (%u)\n", msg, code);
+ PrintF("Simulator hit (%u)\n", code);
sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstrSize);
Debug();
}
-#endif // GENERATED_CODE_COVERAGE
int32_t MipsDebugger::GetRegisterValue(int regnum) {
@@ -586,7 +530,7 @@ void MipsDebugger::Debug() {
}
while (cur < end) {
- PrintF(" 0x%08x: 0x%08x %10d",
+ PrintF(" 0x%08" PRIxPTR ": 0x%08x %10d",
reinterpret_cast<intptr_t>(cur), *cur, *cur);
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
int value = *cur;
@@ -649,8 +593,8 @@ void MipsDebugger::Debug() {
while (cur < end) {
dasm.InstructionDecode(buffer, cur);
- PrintF(" 0x%08x %s\n",
- reinterpret_cast<intptr_t>(cur), buffer.start());
+ PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast<intptr_t>(cur),
+ buffer.start());
cur += Instruction::kInstrSize;
}
} else if (strcmp(cmd, "gdb") == 0) {
@@ -771,8 +715,8 @@ void MipsDebugger::Debug() {
while (cur < end) {
dasm.InstructionDecode(buffer, cur);
- PrintF(" 0x%08x %s\n",
- reinterpret_cast<intptr_t>(cur), buffer.start());
+ PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast<intptr_t>(cur),
+ buffer.start());
cur += Instruction::kInstrSize;
}
} else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
@@ -864,9 +808,7 @@ void Simulator::set_last_debugger_input(char* input) {
last_debugger_input_ = input;
}
-
-void Simulator::FlushICache(v8::internal::HashMap* i_cache,
- void* start_addr,
+void Simulator::FlushICache(base::HashMap* i_cache, void* start_addr,
size_t size) {
intptr_t start = reinterpret_cast<intptr_t>(start_addr);
int intra_line = (start & CachePage::kLineMask);
@@ -887,10 +829,8 @@ void Simulator::FlushICache(v8::internal::HashMap* i_cache,
}
}
-
-CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
- v8::internal::HashMap::Entry* entry =
- i_cache->LookupOrInsert(page, ICacheHash(page));
+CachePage* Simulator::GetCachePage(base::HashMap* i_cache, void* page) {
+ base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
if (entry->value == NULL) {
CachePage* new_page = new CachePage();
entry->value = new_page;
@@ -900,9 +840,7 @@ CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
// Flush from start up to and not including start + size.
-void Simulator::FlushOnePage(v8::internal::HashMap* i_cache,
- intptr_t start,
- int size) {
+void Simulator::FlushOnePage(base::HashMap* i_cache, intptr_t start, int size) {
DCHECK(size <= CachePage::kPageSize);
DCHECK(AllOnOnePage(start, size - 1));
DCHECK((start & CachePage::kLineMask) == 0);
@@ -914,9 +852,7 @@ void Simulator::FlushOnePage(v8::internal::HashMap* i_cache,
memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
}
-
-void Simulator::CheckICache(v8::internal::HashMap* i_cache,
- Instruction* instr) {
+void Simulator::CheckICache(base::HashMap* i_cache, Instruction* instr) {
intptr_t address = reinterpret_cast<intptr_t>(instr);
void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
@@ -949,7 +885,7 @@ void Simulator::Initialize(Isolate* isolate) {
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
i_cache_ = isolate_->simulator_i_cache();
if (i_cache_ == NULL) {
- i_cache_ = new v8::internal::HashMap(&ICacheMatch);
+ i_cache_ = new base::HashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
}
Initialize(isolate);
@@ -985,7 +921,6 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
// access violation if the simulator ever tries to execute it.
registers_[pc] = bad_ra;
registers_[ra] = bad_ra;
- InitializeCoverage();
last_debugger_input_ = NULL;
}
@@ -1062,10 +997,10 @@ class Redirection {
// static
-void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
+void Simulator::TearDown(base::HashMap* i_cache, Redirection* first) {
Redirection::DeleteChain(first);
if (i_cache != nullptr) {
- for (HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
+ for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
entry = i_cache->Next(entry)) {
delete static_cast<CachePage*>(entry->value);
}
@@ -1786,12 +1721,12 @@ void Simulator::TraceMemWr(int32_t addr, int32_t value, TraceType t) {
int Simulator::ReadW(int32_t addr, Instruction* instr) {
if (addr >=0 && addr < 0x400) {
// This has to be a NULL-dereference, drop into debugger.
- PrintF("Memory read from bad address: 0x%08x, pc=0x%08x\n",
- addr, reinterpret_cast<intptr_t>(instr));
+ PrintF("Memory read from bad address: 0x%08x, pc=0x%08" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
MipsDebugger dbg(this);
dbg.Debug();
}
- if ((addr & kPointerAlignmentMask) == 0) {
+ if ((addr & kPointerAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
TraceMemRd(addr, static_cast<int32_t>(*ptr));
return *ptr;
@@ -1808,12 +1743,12 @@ int Simulator::ReadW(int32_t addr, Instruction* instr) {
void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
if (addr >= 0 && addr < 0x400) {
// This has to be a NULL-dereference, drop into debugger.
- PrintF("Memory write to bad address: 0x%08x, pc=0x%08x\n",
- addr, reinterpret_cast<intptr_t>(instr));
+ PrintF("Memory write to bad address: 0x%08x, pc=0x%08" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
MipsDebugger dbg(this);
dbg.Debug();
}
- if ((addr & kPointerAlignmentMask) == 0) {
+ if ((addr & kPointerAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
TraceMemWr(addr, value, WORD);
*ptr = value;
@@ -1828,7 +1763,7 @@ void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
double Simulator::ReadD(int32_t addr, Instruction* instr) {
- if ((addr & kDoubleAlignmentMask) == 0) {
+ if ((addr & kDoubleAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) {
double* ptr = reinterpret_cast<double*>(addr);
return *ptr;
}
@@ -1841,7 +1776,7 @@ double Simulator::ReadD(int32_t addr, Instruction* instr) {
void Simulator::WriteD(int32_t addr, double value, Instruction* instr) {
- if ((addr & kDoubleAlignmentMask) == 0) {
+ if ((addr & kDoubleAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) {
double* ptr = reinterpret_cast<double*>(addr);
*ptr = value;
return;
@@ -1854,7 +1789,7 @@ void Simulator::WriteD(int32_t addr, double value, Instruction* instr) {
uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
- if ((addr & 1) == 0) {
+ if ((addr & 1) == 0 || IsMipsArchVariant(kMips32r6)) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
TraceMemRd(addr, static_cast<int32_t>(*ptr));
return *ptr;
@@ -1868,7 +1803,7 @@ uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
- if ((addr & 1) == 0) {
+ if ((addr & 1) == 0 || IsMipsArchVariant(kMips32r6)) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
TraceMemRd(addr, static_cast<int32_t>(*ptr));
return *ptr;
@@ -1882,7 +1817,7 @@ int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
- if ((addr & 1) == 0) {
+ if ((addr & 1) == 0 || IsMipsArchVariant(kMips32r6)) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
TraceMemWr(addr, value, HALF);
*ptr = value;
@@ -1896,7 +1831,7 @@ void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
- if ((addr & 1) == 0) {
+ if ((addr & 1) == 0 || IsMipsArchVariant(kMips32r6)) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
TraceMemWr(addr, value, HALF);
*ptr = value;
@@ -1953,7 +1888,7 @@ uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
// Unsupported instructions use Format to print an error and stop execution.
void Simulator::Format(Instruction* instr, const char* format) {
- PrintF("Simulator found unsupported instruction:\n 0x%08x: %s\n",
+ PrintF("Simulator found unsupported instruction:\n 0x%08" PRIxPTR ": %s\n",
reinterpret_cast<intptr_t>(instr), format);
UNIMPLEMENTED_MIPS();
}
@@ -2088,15 +2023,17 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
case ExternalReference::BUILTIN_FP_FP_CALL:
case ExternalReference::BUILTIN_COMPARE_CALL:
PrintF("Call to host function at %p with args %f, %f",
- FUNCTION_ADDR(generic_target), dval0, dval1);
+ static_cast<void*>(FUNCTION_ADDR(generic_target)), dval0,
+ dval1);
break;
case ExternalReference::BUILTIN_FP_CALL:
PrintF("Call to host function at %p with arg %f",
- FUNCTION_ADDR(generic_target), dval0);
+ static_cast<void*>(FUNCTION_ADDR(generic_target)), dval0);
break;
case ExternalReference::BUILTIN_FP_INT_CALL:
PrintF("Call to host function at %p with args %f, %d",
- FUNCTION_ADDR(generic_target), dval0, ival);
+ static_cast<void*>(FUNCTION_ADDR(generic_target)), dval0,
+ ival);
break;
default:
UNREACHABLE();
@@ -2195,13 +2132,15 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF(
"Call to host triple returning runtime function %p "
"args %08x, %08x, %08x, %08x, %08x\n",
- FUNCTION_ADDR(target), arg1, arg2, arg3, arg4, arg5);
+ static_cast<void*>(FUNCTION_ADDR(target)), arg1, arg2, arg3, arg4,
+ arg5);
}
// arg0 is a hidden argument pointing to the return location, so don't
// pass it to the target function.
ObjectTriple result = target(arg1, arg2, arg3, arg4, arg5);
if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned { %p, %p, %p }\n", result.x, result.y, result.z);
+ PrintF("Returned { %p, %p, %p }\n", static_cast<void*>(result.x),
+ static_cast<void*>(result.y), static_cast<void*>(result.z));
}
// Return is passed back in address pointed to by hidden first argument.
ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(arg0);
@@ -2216,13 +2155,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF(
"Call to host function at %p "
"args %08x, %08x, %08x, %08x, %08x, %08x\n",
- FUNCTION_ADDR(target),
- arg0,
- arg1,
- arg2,
- arg3,
- arg4,
- arg5);
+ static_cast<void*>(FUNCTION_ADDR(target)), arg0, arg1, arg2, arg3,
+ arg4, arg5);
}
int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
set_register(v0, static_cast<int32_t>(result));
@@ -3731,6 +3665,9 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
case TNE:
do_interrupt = rs() != rt();
break;
+ case SYNC:
+ // TODO(palfia): Ignore sync instruction for now.
+ break;
// Conditional moves.
case MOVN:
if (rt()) {
@@ -3839,12 +3776,51 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
alu_out = static_cast<int32_t>(output);
break;
}
- case SEB:
- case SEH:
- case WSBH:
- alu_out = 0x12345678;
- UNREACHABLE();
+ case SEB: {
+ uint8_t input = static_cast<uint8_t>(rt());
+ uint32_t output = input;
+ uint32_t mask = 0x00000080;
+
+ // Extending sign
+ if (mask & input) {
+ output |= 0xFFFFFF00;
+ }
+
+ alu_out = static_cast<int32_t>(output);
+ break;
+ }
+ case SEH: {
+ uint16_t input = static_cast<uint16_t>(rt());
+ uint32_t output = input;
+ uint32_t mask = 0x00008000;
+
+ // Extending sign
+ if (mask & input) {
+ output |= 0xFFFF0000;
+ }
+
+ alu_out = static_cast<int32_t>(output);
break;
+ }
+ case WSBH: {
+ uint32_t input = static_cast<uint32_t>(rt());
+ uint32_t output = 0;
+
+ uint32_t mask = 0xFF000000;
+ for (int i = 0; i < 4; i++) {
+ uint32_t tmp = mask & input;
+ if (i % 2 == 0) {
+ tmp = tmp >> 8;
+ } else {
+ tmp = tmp << 8;
+ }
+ output = output | tmp;
+ mask = mask >> 8;
+ }
+
+ alu_out = static_cast<int32_t>(output);
+ break;
+ }
default: {
const uint8_t bp = get_instr()->Bp2Value();
sa >>= kBp2Bits;
@@ -4413,8 +4389,9 @@ void Simulator::InstructionDecode(Instruction* instr) {
UNSUPPORTED();
}
if (::v8::internal::FLAG_trace_sim) {
- PrintF(" 0x%08x %-44s %s\n", reinterpret_cast<intptr_t>(instr),
- buffer.start(), trace_buf_.start());
+ PrintF(" 0x%08" PRIxPTR " %-44s %s\n",
+ reinterpret_cast<intptr_t>(instr), buffer.start(),
+ trace_buf_.start());
}
if (!pc_modified_) {
set_register(pc, reinterpret_cast<int32_t>(instr) +
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index e1c42fdcca..5c77756394 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -75,7 +75,7 @@ class SimulatorStack : public v8::internal::AllStatic {
// Running with a simulator.
#include "src/assembler.h"
-#include "src/hashmap.h"
+#include "src/base/hashmap.h"
namespace v8 {
namespace internal {
@@ -216,7 +216,7 @@ class Simulator {
// Call on program start.
static void Initialize(Isolate* isolate);
- static void TearDown(HashMap* i_cache, Redirection* first);
+ static void TearDown(base::HashMap* i_cache, Redirection* first);
// V8 generally calls into generated JS code with 5 parameters and into
// generated RegExp code with 7 parameters. This is a convenience function,
@@ -236,8 +236,7 @@ class Simulator {
char* last_debugger_input() { return last_debugger_input_; }
// ICache checking.
- static void FlushICache(v8::internal::HashMap* i_cache, void* start,
- size_t size);
+ static void FlushICache(base::HashMap* i_cache, void* start, size_t size);
// Returns true if pc register contains one of the 'special_values' defined
// below (bad_ra, end_sim_pc).
@@ -401,10 +400,9 @@ class Simulator {
}
// ICache.
- static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
- static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
- int size);
- static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
+ static void CheckICache(base::HashMap* i_cache, Instruction* instr);
+ static void FlushOnePage(base::HashMap* i_cache, intptr_t start, int size);
+ static CachePage* GetCachePage(base::HashMap* i_cache, void* page);
enum Exception {
none,
@@ -450,7 +448,7 @@ class Simulator {
char* last_debugger_input_;
// Icache simulation.
- v8::internal::HashMap* i_cache_;
+ base::HashMap* i_cache_;
v8::internal::Isolate* isolate_;
diff --git a/deps/v8/src/mips64/assembler-mips64-inl.h b/deps/v8/src/mips64/assembler-mips64-inl.h
index dec58e895c..6078ab965a 100644
--- a/deps/v8/src/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/mips64/assembler-mips64-inl.h
@@ -49,6 +49,7 @@ namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return IsSupported(FPU); }
+bool CpuFeatures::SupportsSimd128() { return false; }
// -----------------------------------------------------------------------------
// Operand and MemOperand.
@@ -102,11 +103,6 @@ Address RelocInfo::target_address() {
return Assembler::target_address_at(pc_, host_);
}
-Address RelocInfo::wasm_memory_reference() {
- DCHECK(IsWasmMemoryReference(rmode_));
- return Assembler::target_address_at(pc_, host_);
-}
-
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) ||
@@ -144,33 +140,6 @@ int RelocInfo::target_address_size() {
}
-void RelocInfo::set_target_address(Address target,
- WriteBarrierMode write_barrier_mode,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(isolate_, pc_, host_, target,
- icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
- host() != NULL && IsCodeTarget(rmode_)) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
-}
-
-void RelocInfo::update_wasm_memory_reference(
- Address old_base, Address new_base, size_t old_size, size_t new_size,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(IsWasmMemoryReference(rmode_));
- DCHECK(old_base <= wasm_memory_reference() &&
- wasm_memory_reference() < old_base + old_size);
- Address updated_reference = new_base + (wasm_memory_reference() - old_base);
- DCHECK(new_base <= updated_reference &&
- updated_reference < new_base + new_size);
- Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
- icache_flush_mode);
-}
-
Address Assembler::target_address_from_return_address(Address pc) {
return pc - kCallTargetAddressOffset;
}
@@ -231,6 +200,7 @@ void RelocInfo::set_target_object(Object* target,
target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target));
+ host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
}
}
@@ -362,7 +332,7 @@ void RelocInfo::WipeOut() {
}
}
-
+template <typename ObjectVisitor>
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
@@ -463,6 +433,8 @@ void Assembler::EmitHelper(Instr x, CompactBranchType is_compact_branch) {
CheckTrampolinePoolQuick();
}
+template <>
+inline void Assembler::EmitHelper(uint8_t x);
template <typename T>
void Assembler::EmitHelper(T x) {
@@ -471,6 +443,14 @@ void Assembler::EmitHelper(T x) {
CheckTrampolinePoolQuick();
}
+template <>
+void Assembler::EmitHelper(uint8_t x) {
+ *reinterpret_cast<uint8_t*>(pc_) = x;
+ pc_ += sizeof(x);
+ if (reinterpret_cast<intptr_t>(pc_) % kInstrSize == 0) {
+ CheckTrampolinePoolQuick();
+ }
+}
void Assembler::emit(Instr x, CompactBranchType is_compact_branch) {
if (!is_buffer_growth_blocked()) {
diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/mips64/assembler-mips64.cc
index 5a8dd2cd37..21a243453a 100644
--- a/deps/v8/src/mips64/assembler-mips64.cc
+++ b/deps/v8/src/mips64/assembler-mips64.cc
@@ -167,6 +167,32 @@ bool RelocInfo::IsInConstantPool() {
return false;
}
+Address RelocInfo::wasm_memory_reference() {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ return Assembler::target_address_at(pc_, host_);
+}
+
+Address RelocInfo::wasm_global_reference() {
+ DCHECK(IsWasmGlobalReference(rmode_));
+ return Assembler::target_address_at(pc_, host_);
+}
+
+uint32_t RelocInfo::wasm_memory_size_reference() {
+ DCHECK(IsWasmMemorySizeReference(rmode_));
+ return static_cast<uint32_t>(
+ reinterpret_cast<intptr_t>((Assembler::target_address_at(pc_, host_))));
+}
+
+void RelocInfo::unchecked_update_wasm_memory_reference(
+ Address address, ICacheFlushMode flush_mode) {
+ Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
+}
+
+void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
+ ICacheFlushMode flush_mode) {
+ Assembler::set_target_address_at(isolate_, pc_, host_,
+ reinterpret_cast<Address>(size), flush_mode);
+}
// -----------------------------------------------------------------------------
// Implementation of Operand and MemOperand.
@@ -178,7 +204,6 @@ Operand::Operand(Handle<Object> handle) {
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
if (obj->IsHeapObject()) {
- DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
imm64_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
} else {
@@ -238,11 +263,9 @@ const Instr kLwSwInstrTypeMask = 0xffe00000;
const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
const Instr kLwSwOffsetMask = kImm16Mask;
-
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
- recorded_ast_id_(TypeFeedbackId::None()),
- positions_recorder_(this) {
+ recorded_ast_id_(TypeFeedbackId::None()) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
last_trampoline_pool_end_ = 0;
@@ -274,6 +297,8 @@ void Assembler::GetCode(CodeDesc* desc) {
static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
desc->origin = this;
desc->constant_pool_size = 0;
+ desc->unwinding_info_size = 0;
+ desc->unwinding_info = nullptr;
}
@@ -1226,7 +1251,6 @@ void Assembler::b(int16_t offset) {
void Assembler::bal(int16_t offset) {
- positions_recorder()->WriteRecordedPositions();
bgezal(zero_reg, offset);
}
@@ -1239,7 +1263,6 @@ void Assembler::bc(int32_t offset) {
void Assembler::balc(int32_t offset) {
DCHECK(kArchVariant == kMips64r6);
- positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1286,7 +1309,6 @@ void Assembler::bgec(Register rs, Register rt, int16_t offset) {
void Assembler::bgezal(Register rs, int16_t offset) {
DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg));
BlockTrampolinePoolScope block_trampoline_pool(this);
- positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
}
@@ -1357,7 +1379,6 @@ void Assembler::bltz(Register rs, int16_t offset) {
void Assembler::bltzal(Register rs, int16_t offset) {
DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg));
BlockTrampolinePoolScope block_trampoline_pool(this);
- positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
}
@@ -1393,7 +1414,6 @@ void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
void Assembler::blezalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
- positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(BLEZ, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -1402,7 +1422,6 @@ void Assembler::blezalc(Register rt, int16_t offset) {
void Assembler::bgezalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
- positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1411,7 +1430,6 @@ void Assembler::bgezall(Register rs, int16_t offset) {
DCHECK(kArchVariant != kMips64r6);
DCHECK(!(rs.is(zero_reg)));
BlockTrampolinePoolScope block_trampoline_pool(this);
- positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
}
@@ -1420,7 +1438,6 @@ void Assembler::bgezall(Register rs, int16_t offset) {
void Assembler::bltzalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
- positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1428,7 +1445,6 @@ void Assembler::bltzalc(Register rt, int16_t offset) {
void Assembler::bgtzalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
- positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(BGTZ, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -1437,7 +1453,6 @@ void Assembler::bgtzalc(Register rt, int16_t offset) {
void Assembler::beqzalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
- positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(ADDI, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -1446,7 +1461,6 @@ void Assembler::beqzalc(Register rt, int16_t offset) {
void Assembler::bnezalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
- positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(DADDI, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -1512,7 +1526,6 @@ void Assembler::jal(Label* target) {
uint64_t imm = jump_offset(target);
if (target->is_bound()) {
BlockTrampolinePoolScope block_trampoline_pool(this);
- positions_recorder()->WriteRecordedPositions();
GenInstrJump(static_cast<Opcode>(kJalRawMark),
static_cast<uint32_t>(imm >> 2) & kImm26Mask);
BlockTrampolinePoolFor(1); // For associated delay slot.
@@ -1525,9 +1538,6 @@ void Assembler::jal(Label* target) {
void Assembler::jr(Register rs) {
if (kArchVariant != kMips64r6) {
BlockTrampolinePoolScope block_trampoline_pool(this);
- if (rs.is(ra)) {
- positions_recorder()->WriteRecordedPositions();
- }
GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
BlockTrampolinePoolFor(1); // For associated delay slot.
} else {
@@ -1538,7 +1548,6 @@ void Assembler::jr(Register rs) {
void Assembler::jal(int64_t target) {
BlockTrampolinePoolScope block_trampoline_pool(this);
- positions_recorder()->WriteRecordedPositions();
GenInstrJump(JAL, static_cast<uint32_t>(target >> 2) & kImm26Mask);
BlockTrampolinePoolFor(1); // For associated delay slot.
}
@@ -1547,7 +1556,6 @@ void Assembler::jal(int64_t target) {
void Assembler::jalr(Register rs, Register rd) {
DCHECK(rs.code() != rd.code());
BlockTrampolinePoolScope block_trampoline_pool(this);
- positions_recorder()->WriteRecordedPositions();
GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
BlockTrampolinePoolFor(1); // For associated delay slot.
}
@@ -1561,7 +1569,6 @@ void Assembler::jic(Register rt, int16_t offset) {
void Assembler::jialc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
- positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(POP76, zero_reg, rt, offset);
}
@@ -2001,11 +2008,15 @@ void Assembler::lwu(Register rd, const MemOperand& rs) {
void Assembler::lwl(Register rd, const MemOperand& rs) {
+ DCHECK(is_int16(rs.offset_));
+ DCHECK(kArchVariant == kMips64r2);
GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
}
void Assembler::lwr(Register rd, const MemOperand& rs) {
+ DCHECK(is_int16(rs.offset_));
+ DCHECK(kArchVariant == kMips64r2);
GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
}
@@ -2041,11 +2052,15 @@ void Assembler::sw(Register rd, const MemOperand& rs) {
void Assembler::swl(Register rd, const MemOperand& rs) {
+ DCHECK(is_int16(rs.offset_));
+ DCHECK(kArchVariant == kMips64r2);
GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
}
void Assembler::swr(Register rd, const MemOperand& rs) {
+ DCHECK(is_int16(rs.offset_));
+ DCHECK(kArchVariant == kMips64r2);
GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
}
@@ -2084,21 +2099,29 @@ void Assembler::dati(Register rs, int32_t j) {
void Assembler::ldl(Register rd, const MemOperand& rs) {
+ DCHECK(is_int16(rs.offset_));
+ DCHECK(kArchVariant == kMips64r2);
GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_);
}
void Assembler::ldr(Register rd, const MemOperand& rs) {
+ DCHECK(is_int16(rs.offset_));
+ DCHECK(kArchVariant == kMips64r2);
GenInstrImmediate(LDR, rs.rm(), rd, rs.offset_);
}
void Assembler::sdl(Register rd, const MemOperand& rs) {
+ DCHECK(is_int16(rs.offset_));
+ DCHECK(kArchVariant == kMips64r2);
GenInstrImmediate(SDL, rs.rm(), rd, rs.offset_);
}
void Assembler::sdr(Register rd, const MemOperand& rs) {
+ DCHECK(is_int16(rs.offset_));
+ DCHECK(kArchVariant == kMips64r2);
GenInstrImmediate(SDR, rs.rm(), rd, rs.offset_);
}
@@ -2202,7 +2225,11 @@ void Assembler::stop(const char* msg, uint32_t code) {
// The Simulator will handle the stop instruction and get the message address.
// On MIPS stop() is just a special kind of break_().
break_(code, true);
- emit(reinterpret_cast<uint64_t>(msg));
+ // Do not embed the message string address! We used to do this, but that
+ // made snapshots created from position-independent executable builds
+ // non-deterministic.
+ // TODO(yangguo): remove this field entirely.
+ nop();
#endif
}
@@ -2255,6 +2282,10 @@ void Assembler::tne(Register rs, Register rt, uint16_t code) {
emit(instr);
}
+void Assembler::sync() {
+ Instr sync_instr = SPECIAL | SYNC;
+ emit(sync_instr);
+}
// Move from HI/LO register.
@@ -2488,6 +2519,30 @@ void Assembler::dalign(Register rd, Register rs, Register rt, uint8_t bp) {
GenInstrRegister(SPECIAL3, rs, rt, rd, sa, DBSHFL);
}
+void Assembler::wsbh(Register rd, Register rt) {
+ DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL3, zero_reg, rt, rd, WSBH, BSHFL);
+}
+
+void Assembler::dsbh(Register rd, Register rt) {
+ DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL3, zero_reg, rt, rd, DSBH, DBSHFL);
+}
+
+void Assembler::dshd(Register rd, Register rt) {
+ DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL3, zero_reg, rt, rd, DSHD, DBSHFL);
+}
+
+void Assembler::seh(Register rd, Register rt) {
+ DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEH, BSHFL);
+}
+
+void Assembler::seb(Register rd, Register rt) {
+ DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEB, BSHFL);
+}
// --------Coprocessor-instructions----------------
@@ -2763,11 +2818,13 @@ void Assembler::mov_s(FPURegister fd, FPURegister fs) {
void Assembler::neg_s(FPURegister fd, FPURegister fs) {
+ DCHECK(kArchVariant == kMips64r2);
GenInstrRegister(COP1, S, f0, fs, fd, NEG_D);
}
void Assembler::neg_d(FPURegister fd, FPURegister fs) {
+ DCHECK(kArchVariant == kMips64r2);
GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
}
@@ -3208,6 +3265,7 @@ void Assembler::dd(Label* label) {
data = reinterpret_cast<uint64_t>(buffer_ + label->pos());
} else {
data = jump_address(label);
+ unbound_labels_count_++;
internal_reference_positions_.insert(label->pos());
}
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
@@ -3221,9 +3279,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (rmode >= RelocInfo::COMMENT &&
rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL) {
// Adjust code for new modes.
- DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
- || RelocInfo::IsComment(rmode)
- || RelocInfo::IsPosition(rmode));
+ DCHECK(RelocInfo::IsDebugBreakSlot(rmode) || RelocInfo::IsComment(rmode));
// These modes do not need an entry in the constant pool.
}
if (!RelocInfo::IsNone(rinfo.rmode())) {
@@ -3394,7 +3450,6 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
}
}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h
index de09366b42..e269acfc28 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/mips64/assembler-mips64.h
@@ -63,6 +63,9 @@ namespace internal {
V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \
V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31)
+#define FLOAT_REGISTERS DOUBLE_REGISTERS
+#define SIMD128_REGISTERS DOUBLE_REGISTERS
+
#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
V(f0) V(f2) V(f4) V(f6) V(f8) V(f10) V(f12) V(f14) \
V(f16) V(f18) V(f20) V(f22) V(f24) V(f26)
@@ -123,8 +126,6 @@ struct Register {
return r;
}
- const char* ToString();
- bool IsAllocatable() const;
bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
bool is(Register reg) const { return reg_code == reg.reg_code; }
int code() const {
@@ -153,8 +154,10 @@ int ToNumber(Register reg);
Register ToRegister(int num);
+static const bool kSimpleFPAliasing = true;
+
// Coprocessor register.
-struct DoubleRegister {
+struct FPURegister {
enum Code {
#define REGISTER_CODE(R) kCode_##R,
DOUBLE_REGISTERS(REGISTER_CODE)
@@ -171,24 +174,22 @@ struct DoubleRegister {
// to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
// number of Double regs (64-bit regs, or FPU-reg-pairs).
- const char* ToString();
- bool IsAllocatable() const;
bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
- bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
- DoubleRegister low() const {
+ bool is(FPURegister reg) const { return reg_code == reg.reg_code; }
+ FPURegister low() const {
// TODO(plind): Create DCHECK for FR=0 mode. This usage suspect for FR=1.
// Find low reg of a Double-reg pair, which is the reg itself.
DCHECK(reg_code % 2 == 0); // Specified Double reg must be even.
- DoubleRegister reg;
+ FPURegister reg;
reg.reg_code = reg_code;
DCHECK(reg.is_valid());
return reg;
}
- DoubleRegister high() const {
+ FPURegister high() const {
// TODO(plind): Create DCHECK for FR=0 mode. This usage illegal in FR=1.
// Find high reg of a Doubel-reg pair, which is reg + 1.
DCHECK(reg_code % 2 == 0); // Specified Double reg must be even.
- DoubleRegister reg;
+ FPURegister reg;
reg.reg_code = reg_code + 1;
DCHECK(reg.is_valid());
return reg;
@@ -203,8 +204,8 @@ struct DoubleRegister {
return 1 << reg_code;
}
- static DoubleRegister from_code(int code) {
- DoubleRegister r = {code};
+ static FPURegister from_code(int code) {
+ FPURegister r = {code};
return r;
}
void setcode(int f) {
@@ -229,8 +230,12 @@ struct DoubleRegister {
// but it is not in common use. Someday we will want to support this in v8.)
// For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers.
-typedef DoubleRegister FPURegister;
-typedef DoubleRegister FloatRegister;
+typedef FPURegister FloatRegister;
+
+typedef FPURegister DoubleRegister;
+
+// TODO(mips64) Define SIMD registers.
+typedef FPURegister Simd128Register;
const DoubleRegister no_freg = {-1};
@@ -306,9 +311,6 @@ struct FPUControlRegister {
const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister };
const FPUControlRegister FCSR = { kFCSRRegister };
-// TODO(mips64) Define SIMD registers.
-typedef DoubleRegister Simd128Register;
-
// -----------------------------------------------------------------------------
// Machine instruction Operands.
const int kSmiShift = kSmiTagSize + kSmiShiftSize;
@@ -849,6 +851,9 @@ class Assembler : public AssemblerBase {
void teq(Register rs, Register rt, uint16_t code);
void tne(Register rs, Register rt, uint16_t code);
+ // Memory barrier instruction.
+ void sync();
+
// Move from HI/LO register.
void mfhi(Register rd);
void mflo(Register rd);
@@ -901,6 +906,12 @@ class Assembler : public AssemblerBase {
void align(Register rd, Register rs, Register rt, uint8_t bp);
void dalign(Register rd, Register rs, Register rt, uint8_t bp);
+ void wsbh(Register rd, Register rt);
+ void dsbh(Register rd, Register rt);
+ void dshd(Register rd, Register rt);
+ void seh(Register rd, Register rt);
+ void seb(Register rd, Register rt);
+
// --------Coprocessor-instructions----------------
// Load, store, and move.
@@ -1099,7 +1110,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, int raw_position);
+ void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
static int RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
intptr_t pc_delta);
@@ -1112,10 +1123,6 @@ class Assembler : public AssemblerBase {
void dp(uintptr_t data) { dq(data); }
void dd(Label* label);
- AssemblerPositionsRecorder* positions_recorder() {
- return &positions_recorder_;
- }
-
// Postpone the generation of the trampoline pool for the specified number of
// instructions.
void BlockTrampolinePoolFor(int instructions);
@@ -1212,11 +1219,16 @@ class Assembler : public AssemblerBase {
bool IsPrevInstrCompactBranch() { return prev_instr_compact_branch_; }
+ inline int UnboundLabelsCount() { return unbound_labels_count_; }
+
protected:
// Load Scaled Address instructions.
void lsa(Register rd, Register rt, Register rs, uint8_t sa);
void dlsa(Register rd, Register rt, Register rs, uint8_t sa);
+ // Helpers.
+ void LoadRegPlusOffsetToAt(const MemOperand& src);
+
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
// the relocation info.
@@ -1413,9 +1425,6 @@ class Assembler : public AssemblerBase {
void GenInstrJump(Opcode opcode,
uint32_t address);
- // Helpers.
- void LoadRegPlusOffsetToAt(const MemOperand& src);
-
// Labels.
void print(Label* L);
void bind_to(Label* L, int pos);
@@ -1500,9 +1509,6 @@ class Assembler : public AssemblerBase {
friend class RelocInfo;
friend class CodePatcher;
friend class BlockTrampolinePoolScope;
-
- AssemblerPositionsRecorder positions_recorder_;
- friend class AssemblerPositionsRecorder;
friend class EnsureSpace;
};
diff --git a/deps/v8/src/mips64/code-stubs-mips64.cc b/deps/v8/src/mips64/code-stubs-mips64.cc
index fdb6c81d2e..4d9f1209b4 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.cc
+++ b/deps/v8/src/mips64/code-stubs-mips64.cc
@@ -20,60 +20,16 @@
namespace v8 {
namespace internal {
+#define __ ACCESS_MASM(masm)
-static void InitializeArrayConstructorDescriptor(
- Isolate* isolate, CodeStubDescriptor* descriptor,
- int constant_stack_parameter_count) {
- Address deopt_handler = Runtime::FunctionForId(
- Runtime::kArrayConstructor)->entry;
-
- if (constant_stack_parameter_count == 0) {
- descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- } else {
- descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- }
-}
-
-
-static void InitializeInternalArrayConstructorDescriptor(
- Isolate* isolate, CodeStubDescriptor* descriptor,
- int constant_stack_parameter_count) {
- Address deopt_handler = Runtime::FunctionForId(
- Runtime::kInternalArrayConstructor)->entry;
-
- if (constant_stack_parameter_count == 0) {
- descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- } else {
- descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- }
-}
-
-
-void ArrayNoArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
-
-void ArraySingleArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
-}
-
-
-void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
+void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
+ __ dsll(t9, a0, kPointerSizeLog2);
+ __ Daddu(t9, sp, t9);
+ __ sd(a1, MemOperand(t9, 0));
+ __ Push(a1);
+ __ Push(a2);
+ __ Daddu(a0, a0, 3);
+ __ TailCallRuntime(Runtime::kNewArray);
}
void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
@@ -81,20 +37,12 @@ void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize(a0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
}
-void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+void FastFunctionBindStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
-}
-
-
-void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
+ Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
+ descriptor->Initialize(a0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
}
-
-#define __ ACCESS_MASM(masm)
-
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
Condition cc);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
@@ -809,11 +757,8 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
void MathPowStub::Generate(MacroAssembler* masm) {
- const Register base = a1;
const Register exponent = MathPowTaggedDescriptor::exponent();
DCHECK(exponent.is(a2));
- const Register heapnumbermap = a5;
- const Register heapnumber = v0;
const DoubleRegister double_base = f2;
const DoubleRegister double_exponent = f4;
const DoubleRegister double_result = f0;
@@ -823,35 +768,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
const Register scratch2 = a7;
Label call_runtime, done, int_exponent;
- if (exponent_type() == ON_STACK) {
- Label base_is_smi, unpack_exponent;
- // The exponent and base are supplied as arguments on the stack.
- // This can only happen if the stub is called from non-optimized code.
- // Load input parameters from stack to double registers.
- __ ld(base, MemOperand(sp, 1 * kPointerSize));
- __ ld(exponent, MemOperand(sp, 0 * kPointerSize));
-
- __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
-
- __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
- __ ld(scratch, FieldMemOperand(base, JSObject::kMapOffset));
- __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
-
- __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
- __ jmp(&unpack_exponent);
-
- __ bind(&base_is_smi);
- __ mtc1(scratch, single_scratch);
- __ cvt_d_w(double_base, single_scratch);
- __ bind(&unpack_exponent);
-
- __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
-
- __ ld(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
- __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
- __ ldc1(double_exponent,
- FieldMemOperand(exponent, HeapNumber::kValueOffset));
- } else if (exponent_type() == TAGGED) {
+ if (exponent_type() == TAGGED) {
// Base is already in double_base.
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
@@ -872,55 +789,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// scratch2 == 0 means there was no conversion error.
__ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
- if (exponent_type() == ON_STACK) {
- // Detect square root case. Crankshaft detects constant +/-0.5 at
- // compile time and uses DoMathPowHalf instead. We then skip this check
- // for non-constant cases of +/-0.5 as these hardly occur.
- Label not_plus_half;
-
- // Test for 0.5.
- __ Move(double_scratch, 0.5);
- __ BranchF(USE_DELAY_SLOT,
- &not_plus_half,
- NULL,
- ne,
- double_exponent,
- double_scratch);
- // double_scratch can be overwritten in the delay slot.
- // Calculates square root of base. Check for the special case of
- // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
- __ Move(double_scratch, static_cast<double>(-V8_INFINITY));
- __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
- __ neg_d(double_result, double_scratch);
-
- // Add +0 to convert -0 to +0.
- __ add_d(double_scratch, double_base, kDoubleRegZero);
- __ sqrt_d(double_result, double_scratch);
- __ jmp(&done);
-
- __ bind(&not_plus_half);
- __ Move(double_scratch, -0.5);
- __ BranchF(USE_DELAY_SLOT,
- &call_runtime,
- NULL,
- ne,
- double_exponent,
- double_scratch);
- // double_scratch can be overwritten in the delay slot.
- // Calculates square root of base. Check for the special case of
- // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
- __ Move(double_scratch, static_cast<double>(-V8_INFINITY));
- __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
- __ Move(double_result, kDoubleRegZero);
-
- // Add +0 to convert -0 to +0.
- __ add_d(double_scratch, double_base, kDoubleRegZero);
- __ Move(double_result, 1.);
- __ sqrt_d(double_scratch, double_scratch);
- __ div_d(double_result, double_result, double_scratch);
- __ jmp(&done);
- }
-
__ push(ra);
{
AllowExternalCallThatCantCauseGC scope(masm);
@@ -952,10 +820,14 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ Move(double_result, 1.0);
// Get absolute value of exponent.
- Label positive_exponent;
+ Label positive_exponent, bail_out;
__ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
__ Dsubu(scratch, zero_reg, scratch);
+ // Check when Dsubu overflows and we get negative result
+ // (happens only when input is MIN_INT).
+ __ Branch(&bail_out, gt, zero_reg, Operand(scratch));
__ bind(&positive_exponent);
+ __ Assert(ge, kUnexpectedNegativeValue, scratch, Operand(zero_reg));
Label while_true, no_carry, loop_end;
__ bind(&while_true);
@@ -984,42 +856,25 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// double_exponent may not contain the exponent value if the input was a
// smi. We set it with exponent value before bailing out.
+ __ bind(&bail_out);
__ mtc1(exponent, single_scratch);
__ cvt_d_w(double_exponent, single_scratch);
// Returning or bailing out.
- if (exponent_type() == ON_STACK) {
- // The arguments are still on the stack.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMathPowRT);
-
- // The stub is called from non-optimized code, which expects the result
- // as heap number in exponent.
- __ bind(&done);
- __ AllocateHeapNumber(
- heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
- __ sdc1(double_result,
- FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
- DCHECK(heapnumber.is(v0));
- __ DropAndRet(2);
- } else {
- __ push(ra);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch);
- __ MovToFloatParameters(double_base, double_exponent);
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()),
- 0, 2);
- }
- __ pop(ra);
- __ MovFromFloatResult(double_result);
-
- __ bind(&done);
- __ Ret();
+ __ push(ra);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ MovToFloatParameters(double_base, double_exponent);
+ __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
+ 0, 2);
}
-}
+ __ pop(ra);
+ __ MovFromFloatResult(double_result);
+ __ bind(&done);
+ __ Ret();
+}
bool CEntryStub::NeedsImmovableCode() {
return true;
@@ -1030,7 +885,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
- ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+ CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
@@ -1038,7 +893,6 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
- TypeofStub::GenerateAheadOfTime(isolate);
}
@@ -1093,7 +947,9 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(save_doubles());
+ __ EnterExitFrame(save_doubles(), 0, is_builtin_exit()
+ ? StackFrame::BUILTIN_EXIT
+ : StackFrame::EXIT);
// s0: number of arguments including receiver (C callee-saved)
// s1: pointer to first argument (C callee-saved)
@@ -1361,12 +1217,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// restores all kCalleeSaved registers (including cp and fp) to their
// saved values before returning a failure to C.
- // Clear any pending exceptions.
- __ LoadRoot(a5, Heap::kTheHoleValueRootIndex);
- __ li(a4, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate)));
- __ sd(a5, MemOperand(a4));
-
// Invoke the function by calling through JS entry trampoline builtin.
// Notice that we cannot store a reference to the trampoline code directly in
// this stub, because runtime stubs are not traversed when doing GC.
@@ -1447,7 +1297,6 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
&miss, // When not a string.
&miss, // When not a number.
&miss, // When index out of range.
- STRING_INDEX_IS_ARRAY_INDEX,
RECEIVER_IS_STRING);
char_at_generator.GenerateFast(masm);
__ Ret();
@@ -1461,128 +1310,6 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
}
-void InstanceOfStub::Generate(MacroAssembler* masm) {
- Register const object = a1; // Object (lhs).
- Register const function = a0; // Function (rhs).
- Register const object_map = a2; // Map of {object}.
- Register const function_map = a3; // Map of {function}.
- Register const function_prototype = a4; // Prototype of {function}.
- Register const scratch = a5;
-
- DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
- DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
-
- // Check if {object} is a smi.
- Label object_is_smi;
- __ JumpIfSmi(object, &object_is_smi);
-
- // Lookup the {function} and the {object} map in the global instanceof cache.
- // Note: This is safe because we clear the global instanceof cache whenever
- // we change the prototype of any object.
- Label fast_case, slow_case;
- __ ld(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
- __ Branch(&fast_case, ne, function, Operand(at));
- __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
- __ Branch(&fast_case, ne, object_map, Operand(at));
- __ Ret(USE_DELAY_SLOT);
- __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); // In delay slot.
-
- // If {object} is a smi we can safely return false if {function} is a JS
- // function, otherwise we have to miss to the runtime and throw an exception.
- __ bind(&object_is_smi);
- __ JumpIfSmi(function, &slow_case);
- __ GetObjectType(function, function_map, scratch);
- __ Branch(&slow_case, ne, scratch, Operand(JS_FUNCTION_TYPE));
- __ Ret(USE_DELAY_SLOT);
- __ LoadRoot(v0, Heap::kFalseValueRootIndex); // In delay slot.
-
- // Fast-case: The {function} must be a valid JSFunction.
- __ bind(&fast_case);
- __ JumpIfSmi(function, &slow_case);
- __ GetObjectType(function, function_map, scratch);
- __ Branch(&slow_case, ne, scratch, Operand(JS_FUNCTION_TYPE));
-
- // Go to the runtime if the function is not a constructor.
- __ lbu(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
- __ And(at, scratch, Operand(1 << Map::kIsConstructor));
- __ Branch(&slow_case, eq, at, Operand(zero_reg));
-
- // Ensure that {function} has an instance prototype.
- __ And(at, scratch, Operand(1 << Map::kHasNonInstancePrototype));
- __ Branch(&slow_case, ne, at, Operand(zero_reg));
-
- // Get the "prototype" (or initial map) of the {function}.
- __ ld(function_prototype,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- __ AssertNotSmi(function_prototype);
-
- // Resolve the prototype if the {function} has an initial map. Afterwards the
- // {function_prototype} will be either the JSReceiver prototype object or the
- // hole value, which means that no instances of the {function} were created so
- // far and hence we should return false.
- Label function_prototype_valid;
- __ GetObjectType(function_prototype, scratch, scratch);
- __ Branch(&function_prototype_valid, ne, scratch, Operand(MAP_TYPE));
- __ ld(function_prototype,
- FieldMemOperand(function_prototype, Map::kPrototypeOffset));
- __ bind(&function_prototype_valid);
- __ AssertNotSmi(function_prototype);
-
- // Update the global instanceof cache with the current {object} map and
- // {function}. The cached answer will be set when it is known below.
- __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
-
- // Loop through the prototype chain looking for the {function} prototype.
- // Assume true, and change to false if not found.
- Register const object_instance_type = function_map;
- Register const map_bit_field = function_map;
- Register const null = scratch;
- Register const result = v0;
-
- Label done, loop, fast_runtime_fallback;
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ LoadRoot(null, Heap::kNullValueRootIndex);
- __ bind(&loop);
-
- // Check if the object needs to be access checked.
- __ lbu(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
- __ And(map_bit_field, map_bit_field, Operand(1 << Map::kIsAccessCheckNeeded));
- __ Branch(&fast_runtime_fallback, ne, map_bit_field, Operand(zero_reg));
- // Check if the current object is a Proxy.
- __ lbu(object_instance_type,
- FieldMemOperand(object_map, Map::kInstanceTypeOffset));
- __ Branch(&fast_runtime_fallback, eq, object_instance_type,
- Operand(JS_PROXY_TYPE));
-
- __ ld(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
- __ Branch(&done, eq, object, Operand(function_prototype));
- __ Branch(USE_DELAY_SLOT, &loop, ne, object, Operand(null));
- __ ld(object_map,
- FieldMemOperand(object, HeapObject::kMapOffset)); // In delay slot.
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
- __ Ret(USE_DELAY_SLOT);
- __ StoreRoot(result,
- Heap::kInstanceofCacheAnswerRootIndex); // In delay slot.
-
- // Found Proxy or access check needed: Call the runtime
- __ bind(&fast_runtime_fallback);
- __ Push(object, function_prototype);
- // Invalidate the instanceof cache.
- DCHECK(Smi::FromInt(0) == 0);
- __ StoreRoot(zero_reg, Heap::kInstanceofCacheFunctionRootIndex);
- __ TailCallRuntime(Runtime::kHasInPrototypeChain);
-
- // Slow-case: Call the %InstanceOf runtime function.
- __ bind(&slow_case);
- __ Push(object, function);
- __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
- : Runtime::kInstanceOf);
-}
-
-
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
Register receiver = LoadDescriptor::ReceiverRegister();
@@ -1901,8 +1628,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ld(a0, MemOperand(sp, kLastMatchInfoOffset));
__ JumpIfSmi(a0, &runtime);
__ GetObjectType(a0, a2, a2);
- __ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE));
- // Check that the JSArray is in fast case.
+ __ Branch(&runtime, ne, a2, Operand(JS_OBJECT_TYPE));
+ // Check that the object has fast elements.
__ ld(last_match_info_elements,
FieldMemOperand(a0, JSArray::kElementsOffset));
__ ld(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
@@ -2033,8 +1760,8 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
const RegList kSavedRegs = 1 << 4 | // a0
1 << 5 | // a1
1 << 6 | // a2
- 1 << 7; // a3
-
+ 1 << 7 | // a3
+ 1 << cp.code();
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(a0);
@@ -2056,6 +1783,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// a2 : feedback vector
// a3 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
+ Label done_initialize_count, done_increment_count;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
@@ -2075,7 +1803,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
Register feedback_map = a6;
Register weak_value = t0;
__ ld(weak_value, FieldMemOperand(a5, WeakCell::kValueOffset));
- __ Branch(&done, eq, a1, Operand(weak_value));
+ __ Branch(&done_increment_count, eq, a1, Operand(weak_value));
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ Branch(&done, eq, a5, Operand(at));
__ ld(feedback_map, FieldMemOperand(a5, HeapObject::kMapOffset));
@@ -2097,7 +1825,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Make sure the function is the Array() function
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a5);
__ Branch(&megamorphic, ne, a1, Operand(a5));
- __ jmp(&done);
+ __ jmp(&done_increment_count);
__ bind(&miss);
@@ -2125,12 +1853,31 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub);
- __ Branch(&done);
+ __ Branch(&done_initialize_count);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &weak_cell_stub);
+
+ __ bind(&done_initialize_count);
+ // Initialize the call counter.
+
+ __ SmiScale(a4, a3, kPointerSizeLog2);
+ __ Daddu(a4, a2, Operand(a4));
+ __ li(a5, Operand(Smi::FromInt(1)));
+ __ Branch(USE_DELAY_SLOT, &done);
+ __ sd(a5, FieldMemOperand(a4, FixedArray::kHeaderSize + kPointerSize));
+
+ __ bind(&done_increment_count);
+
+ // Increment the call count for monomorphic function calls.
+ __ SmiScale(a4, a3, kPointerSizeLog2);
+ __ Daddu(a5, a2, Operand(a4));
+ __ ld(a4, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
+ __ Daddu(a4, a4, Operand(Smi::FromInt(1)));
+ __ sd(a4, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
+
__ bind(&done);
}
@@ -2233,7 +1980,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
__ dsrl(t0, a3, 32 - kPointerSizeLog2);
__ Daddu(a3, a2, Operand(t0));
__ ld(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
- __ Daddu(t0, t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+ __ Daddu(t0, t0, Operand(Smi::FromInt(1)));
__ sd(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
__ mov(a2, a4);
@@ -2281,7 +2028,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ dsrl(t0, a3, 32 - kPointerSizeLog2);
__ Daddu(a3, a2, Operand(t0));
__ ld(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
- __ Daddu(t0, t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+ __ Daddu(t0, t0, Operand(Smi::FromInt(1)));
__ sd(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
__ bind(&call_function);
@@ -2354,7 +2101,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Initialize the call counter.
__ dsrl(at, a3, 32 - kPointerSizeLog2);
__ Daddu(at, a2, Operand(at));
- __ li(t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+ __ li(t0, Operand(Smi::FromInt(1)));
__ sd(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
// Store the function. Use a stub since we need a frame for allocation.
@@ -2364,9 +2111,9 @@ void CallICStub::Generate(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
CreateWeakCellStub create_stub(masm->isolate());
- __ Push(a1);
+ __ Push(cp, a1);
__ CallStub(&create_stub);
- __ Pop(a1);
+ __ Pop(cp, a1);
}
__ Branch(&call_function);
@@ -2415,13 +2162,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
} else {
__ Push(object_, index_);
}
- if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
- } else {
- DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
- // NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi);
- }
+ __ CallRuntime(Runtime::kNumberToSmi);
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
@@ -2758,74 +2499,12 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// a3: from index (untagged)
__ SmiTag(a3);
StringCharAtGenerator generator(v0, a3, a2, v0, &runtime, &runtime, &runtime,
- STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
+ RECEIVER_IS_STRING);
generator.GenerateFast(masm);
__ DropAndRet(3);
generator.SkipSlow(masm, &runtime);
}
-
-void ToNumberStub::Generate(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in a0.
- Label not_smi;
- __ JumpIfNotSmi(a0, &not_smi);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
- __ bind(&not_smi);
-
- Label not_heap_number;
- __ GetObjectType(a0, a1, a1);
- // a0: receiver
- // a1: receiver instance type
- __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, a0);
- __ bind(&not_heap_number);
-
- NonNumberToNumberStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
- // The NonNumberToNumber stub takes on argument in a0.
- __ AssertNotNumber(a0);
-
- Label not_string;
- __ GetObjectType(a0, a1, a1);
- // a0: receiver
- // a1: receiver instance type
- __ Branch(&not_string, hs, a1, Operand(FIRST_NONSTRING_TYPE));
- StringToNumberStub stub(masm->isolate());
- __ TailCallStub(&stub);
- __ bind(&not_string);
-
- Label not_oddball;
- __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
- __ Ret(USE_DELAY_SLOT);
- __ ld(v0, FieldMemOperand(a0, Oddball::kToNumberOffset)); // In delay slot.
- __ bind(&not_oddball);
-
- __ Push(a0); // Push argument.
- __ TailCallRuntime(Runtime::kToNumber);
-}
-
-void StringToNumberStub::Generate(MacroAssembler* masm) {
- // The StringToNumber stub takes on argument in a0.
- __ AssertString(a0);
-
- // Check if string has a cached array index.
- Label runtime;
- __ lwu(a2, FieldMemOperand(a0, String::kHashFieldOffset));
- __ And(at, a2, Operand(String::kContainsCachedArrayIndexMask));
- __ Branch(&runtime, ne, at, Operand(zero_reg));
- __ IndexFromHash(a2, v0);
- __ Ret();
-
- __ bind(&runtime);
- __ Push(a0); // Push argument.
- __ TailCallRuntime(Runtime::kStringToNumber);
-}
-
void ToStringStub::Generate(MacroAssembler* masm) {
// The ToString stub takes on argument in a0.
Label is_number;
@@ -3006,7 +2685,7 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// Load a2 with the allocation site. We stick an undefined dummy value here
// and replace it with the real allocation site later when we instantiate this
// stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
- __ li(a2, handle(isolate()->heap()->undefined_value()));
+ __ li(a2, isolate()->factory()->undefined_value());
// Make sure that we actually patched the allocation site.
if (FLAG_debug_code) {
@@ -3901,14 +3580,14 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- LoadICStub stub(isolate(), state());
+ LoadICStub stub(isolate());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- KeyedLoadICStub stub(isolate(), state());
+ KeyedLoadICStub stub(isolate());
stub.GenerateForTrampoline(masm);
}
@@ -4046,11 +3725,8 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ bind(&not_array);
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ Branch(&miss, ne, feedback, Operand(at));
- Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
- receiver, name, feedback,
- receiver_map, scratch1, a7);
+ masm->isolate()->load_stub_cache()->GenerateProbe(
+ masm, receiver, name, feedback, receiver_map, scratch1, a7);
__ bind(&miss);
LoadIC::GenerateMiss(masm);
@@ -4131,37 +3807,30 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ Branch(&compare_map);
}
-
-void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
- VectorStoreICStub stub(isolate(), state());
+void StoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
+ StoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
-
-void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
- VectorKeyedStoreICStub stub(isolate(), state());
+void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
+ KeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
+void StoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-void VectorStoreICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-
-void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+void StoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
-
-void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // a1
- Register key = VectorStoreICDescriptor::NameRegister(); // a2
- Register vector = VectorStoreICDescriptor::VectorRegister(); // a3
- Register slot = VectorStoreICDescriptor::SlotRegister(); // a4
- DCHECK(VectorStoreICDescriptor::ValueRegister().is(a0)); // a0
+void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // a1
+ Register key = StoreWithVectorDescriptor::NameRegister(); // a2
+ Register vector = StoreWithVectorDescriptor::VectorRegister(); // a3
+ Register slot = StoreWithVectorDescriptor::SlotRegister(); // a4
+ DCHECK(StoreWithVectorDescriptor::ValueRegister().is(a0)); // a0
Register feedback = a5;
Register receiver_map = a6;
Register scratch1 = a7;
@@ -4189,11 +3858,8 @@ void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ bind(&not_array);
__ Branch(&miss, ne, feedback, Heap::kmegamorphic_symbolRootIndex);
- Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map,
- scratch1, scratch2);
+ masm->isolate()->store_stub_cache()->GenerateProbe(
+ masm, receiver, key, feedback, receiver_map, scratch1, scratch2);
__ bind(&miss);
StoreIC::GenerateMiss(masm);
@@ -4203,13 +3869,11 @@ void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); // In delay slot.
}
-
-void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
+void KeyedStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
-
-void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
@@ -4276,13 +3940,12 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
__ Branch(miss);
}
-
-void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // a1
- Register key = VectorStoreICDescriptor::NameRegister(); // a2
- Register vector = VectorStoreICDescriptor::VectorRegister(); // a3
- Register slot = VectorStoreICDescriptor::SlotRegister(); // a4
- DCHECK(VectorStoreICDescriptor::ValueRegister().is(a0)); // a0
+void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // a1
+ Register key = StoreWithVectorDescriptor::NameRegister(); // a2
+ Register vector = StoreWithVectorDescriptor::VectorRegister(); // a3
+ Register slot = StoreWithVectorDescriptor::SlotRegister(); // a4
+ DCHECK(StoreWithVectorDescriptor::ValueRegister().is(a0)); // a0
Register feedback = a5;
Register receiver_map = a6;
Register scratch1 = a7;
@@ -4527,19 +4190,13 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
}
}
-
-void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
isolate);
ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
isolate);
- ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
- isolate);
-}
-
-
-void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
- Isolate* isolate) {
+ ArrayNArgumentsConstructorStub stub(isolate);
+ stub.GetCode();
ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things.
@@ -4547,8 +4204,6 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
stubh1.GetCode();
InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
stubh2.GetCode();
- InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
- stubh3.GetCode();
}
}
@@ -4567,13 +4222,15 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
CreateArrayDispatchOneArgument(masm, mode);
__ bind(&not_one_case);
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ ArrayNArgumentsConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
} else if (argument_count() == NONE) {
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
} else if (argument_count() == ONE) {
CreateArrayDispatchOneArgument(masm, mode);
} else if (argument_count() == MORE_THAN_ONE) {
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ ArrayNArgumentsConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
} else {
UNREACHABLE();
}
@@ -4657,7 +4314,7 @@ void InternalArrayConstructorStub::GenerateCase(
InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
__ TailCallStub(&stub0, lo, a0, Operand(1));
- InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+ ArrayNArgumentsConstructorStub stubN(isolate());
__ TailCallStub(&stubN, hi, a0, Operand(1));
if (IsFastPackedElementsKind(kind)) {
@@ -4758,15 +4415,15 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
__ bind(&done_allocate);
// Initialize the JSObject fields.
- __ sd(a2, MemOperand(v0, JSObject::kMapOffset));
+ __ sd(a2, FieldMemOperand(v0, JSObject::kMapOffset));
__ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
- __ sd(a3, MemOperand(v0, JSObject::kPropertiesOffset));
- __ sd(a3, MemOperand(v0, JSObject::kElementsOffset));
+ __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ Daddu(a1, v0, Operand(JSObject::kHeaderSize));
+ __ Daddu(a1, v0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
// ----------- S t a t e -------------
- // -- v0 : result (untagged)
+ // -- v0 : result (tagged)
// -- a1 : result fields (untagged)
// -- a5 : result end (untagged)
// -- a2 : initial map
@@ -4784,11 +4441,7 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
{
// Initialize all in-object fields with undefined.
__ InitializeFieldsWithFiller(a1, a5, a0);
-
- // Add the object tag to make the JSObject real.
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ Ret(USE_DELAY_SLOT);
- __ Daddu(v0, v0, Operand(kHeapObjectTag)); // In delay slot.
+ __ Ret();
}
__ bind(&slack_tracking);
{
@@ -4811,9 +4464,7 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
Label finalize;
STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
__ And(a3, a3, Operand(Map::ConstructionCounter::kMask));
- __ Branch(USE_DELAY_SLOT, &finalize, eq, a3, Operand(zero_reg));
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ Daddu(v0, v0, Operand(kHeapObjectTag)); // In delay slot.
+ __ Branch(&finalize, eq, a3, Operand(zero_reg));
__ Ret();
// Finalize the instance size.
@@ -4839,10 +4490,10 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ Pop(a2);
}
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ Dsubu(v0, v0, Operand(kHeapObjectTag));
__ lbu(a5, FieldMemOperand(a2, Map::kInstanceSizeOffset));
__ Dlsa(a5, v0, a5, kPointerSizeLog2);
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ Dsubu(a5, a5, Operand(kHeapObjectTag));
__ jmp(&done_allocate);
// Fall back to %NewObject.
@@ -4861,19 +4512,19 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// -----------------------------------
__ AssertFunction(a1);
- // For Ignition we need to skip all possible handler/stub frames until
- // we reach the JavaScript frame for the function (similar to what the
- // runtime fallback implementation does). So make a2 point to that
- // JavaScript frame.
- {
- Label loop, loop_entry;
- __ Branch(USE_DELAY_SLOT, &loop_entry);
- __ mov(a2, fp); // In delay slot.
- __ bind(&loop);
+ // Make a2 point to the JavaScript frame.
+ __ mov(a2, fp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
__ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
- __ bind(&loop_entry);
+ }
+ if (FLAG_debug_code) {
+ Label ok;
__ ld(a3, MemOperand(a2, StandardFrameConstants::kFunctionOffset));
- __ Branch(&loop, ne, a1, Operand(a3));
+ __ Branch(&ok, eq, a1, Operand(a3));
+ __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+ __ bind(&ok);
}
// Check if we have rest parameters (only possible if we have an
@@ -4889,10 +4540,10 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
Label rest_parameters;
__ SmiLoadUntag(
a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ ld(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a1,
- FieldMemOperand(a1, SharedFunctionInfo::kFormalParameterCountOffset));
- __ Dsubu(a0, a0, Operand(a1));
+ __ ld(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a3,
+ FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Dsubu(a0, a0, Operand(a3));
__ Branch(&rest_parameters, gt, a0, Operand(zero_reg));
// Return an empty rest parameter array.
@@ -4905,7 +4556,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// Allocate an empty rest parameter array.
Label allocate, done_allocate;
- __ Allocate(JSArray::kSize, v0, a0, a1, &allocate, TAG_OBJECT);
+ __ Allocate(JSArray::kSize, v0, a0, a1, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Setup the rest parameter array in v0.
@@ -4939,15 +4590,16 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- cp : context
// -- a0 : number of rest parameters
+ // -- a1 : function
// -- a2 : pointer to first rest parameters
// -- ra : return address
// -----------------------------------
// Allocate space for the rest parameter array plus the backing store.
Label allocate, done_allocate;
- __ li(a1, Operand(JSArray::kSize + FixedArray::kHeaderSize));
- __ Dlsa(a1, a1, a0, kPointerSizeLog2);
- __ Allocate(a1, v0, a3, a4, &allocate, TAG_OBJECT);
+ __ li(a5, Operand(JSArray::kSize + FixedArray::kHeaderSize));
+ __ Dlsa(a5, a5, a0, kPointerSizeLog2);
+ __ Allocate(a5, v0, a3, a4, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Compute arguments.length in a4.
@@ -4982,18 +4634,26 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a3); // In delay slot
- // Fall back to %AllocateInNewSpace.
+ // Fall back to %AllocateInNewSpace (if not too big).
+ Label too_big_for_new_space;
__ bind(&allocate);
+ __ Branch(&too_big_for_new_space, gt, a5,
+ Operand(Page::kMaxRegularHeapObjectSize));
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(a0);
- __ SmiTag(a1);
- __ Push(a0, a2, a1);
+ __ SmiTag(a5);
+ __ Push(a0, a2, a5);
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ Pop(a0, a2);
__ SmiUntag(a0);
}
__ jmp(&done_allocate);
+
+ // Fall back to %NewStrictArguments.
+ __ bind(&too_big_for_new_space);
+ __ Push(a1);
+ __ TailCallRuntime(Runtime::kNewStrictArguments);
}
}
@@ -5007,24 +4667,40 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
// -----------------------------------
__ AssertFunction(a1);
+ // Make t0 point to the JavaScript frame.
+ __ mov(t0, fp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
+ __ ld(t0, MemOperand(t0, StandardFrameConstants::kCallerFPOffset));
+ }
+ if (FLAG_debug_code) {
+ Label ok;
+ __ ld(a3, MemOperand(t0, StandardFrameConstants::kFunctionOffset));
+ __ Branch(&ok, eq, a1, Operand(a3));
+ __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+ __ bind(&ok);
+ }
+
// TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
__ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(a2,
FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ Lsa(a3, fp, a2, kPointerSizeLog2);
+ __ Lsa(a3, t0, a2, kPointerSizeLog2);
__ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
__ SmiTag(a2);
// a1 : function
// a2 : number of parameters (tagged)
// a3 : parameters pointer
+ // t0 : Javascript frame pointer
// Registers used over whole function:
// a5 : arguments count (tagged)
// a6 : mapped parameter count (tagged)
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
- __ ld(a4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ld(a4, MemOperand(t0, StandardFrameConstants::kCallerFPOffset));
__ ld(a0, MemOperand(a4, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Branch(&adaptor_frame, eq, a0,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
@@ -5072,7 +4748,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
__ Daddu(t1, t1, Operand(JSSloppyArgumentsObject::kSize));
// Do the allocation of all three objects in one go.
- __ Allocate(t1, v0, t1, a4, &runtime, TAG_OBJECT);
+ __ Allocate(t1, v0, t1, a4, &runtime, NO_ALLOCATION_FLAGS);
// v0 = address of new object(s) (tagged)
// a2 = argument count (smi-tagged)
@@ -5227,19 +4903,19 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// -----------------------------------
__ AssertFunction(a1);
- // For Ignition we need to skip all possible handler/stub frames until
- // we reach the JavaScript frame for the function (similar to what the
- // runtime fallback implementation does). So make a2 point to that
- // JavaScript frame.
- {
- Label loop, loop_entry;
- __ Branch(USE_DELAY_SLOT, &loop_entry);
- __ mov(a2, fp); // In delay slot.
- __ bind(&loop);
+ // Make a2 point to the JavaScript frame.
+ __ mov(a2, fp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
__ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
- __ bind(&loop_entry);
+ }
+ if (FLAG_debug_code) {
+ Label ok;
__ ld(a3, MemOperand(a2, StandardFrameConstants::kFunctionOffset));
- __ Branch(&loop, ne, a1, Operand(a3));
+ __ Branch(&ok, eq, a1, Operand(a3));
+ __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+ __ bind(&ok);
}
// Check if we have an arguments adaptor frame below the function frame.
@@ -5249,9 +4925,9 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ Branch(&arguments_adaptor, eq, a0,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
{
- __ ld(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(a0,
- FieldMemOperand(a1, SharedFunctionInfo::kFormalParameterCountOffset));
+ FieldMemOperand(a4, SharedFunctionInfo::kFormalParameterCountOffset));
__ Dlsa(a2, a2, a0, kPointerSizeLog2);
__ Daddu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
1 * kPointerSize));
@@ -5270,15 +4946,16 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- cp : context
// -- a0 : number of rest parameters
+ // -- a1 : function
// -- a2 : pointer to first rest parameters
// -- ra : return address
// -----------------------------------
// Allocate space for the rest parameter array plus the backing store.
Label allocate, done_allocate;
- __ li(a1, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
- __ Dlsa(a1, a1, a0, kPointerSizeLog2);
- __ Allocate(a1, v0, a3, a4, &allocate, TAG_OBJECT);
+ __ li(a5, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
+ __ Dlsa(a5, a5, a0, kPointerSizeLog2);
+ __ Allocate(a5, v0, a3, a4, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Compute arguments.length in a4.
@@ -5313,48 +4990,26 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a3); // In delay slot
- // Fall back to %AllocateInNewSpace.
+ // Fall back to %AllocateInNewSpace (if not too big).
+ Label too_big_for_new_space;
__ bind(&allocate);
+ __ Branch(&too_big_for_new_space, gt, a5,
+ Operand(Page::kMaxRegularHeapObjectSize));
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(a0);
- __ SmiTag(a1);
- __ Push(a0, a2, a1);
+ __ SmiTag(a5);
+ __ Push(a0, a2, a5);
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ Pop(a0, a2);
__ SmiUntag(a0);
}
__ jmp(&done_allocate);
-}
-
-
-void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
- Register context_reg = cp;
- Register slot_reg = a2;
- Register result_reg = v0;
- Label slow_case;
-
- // Go up context chain to the script context.
- for (int i = 0; i < depth(); ++i) {
- __ ld(result_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX));
- context_reg = result_reg;
- }
-
- // Load the PropertyCell value at the specified slot.
- __ Dlsa(at, context_reg, slot_reg, kPointerSizeLog2);
- __ ld(result_reg, ContextMemOperand(at, 0));
- __ ld(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset));
- // Check that value is not the_hole.
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(&slow_case, eq, result_reg, Operand(at));
- __ Ret();
-
- // Fallback to the runtime.
- __ bind(&slow_case);
- __ SmiTag(slot_reg);
- __ Push(slot_reg);
- __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
+ // Fall back to %NewStrictArguments.
+ __ bind(&too_big_for_new_space);
+ __ Push(a1);
+ __ TailCallRuntime(Runtime::kNewStrictArguments);
}
@@ -5636,7 +5291,11 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
STATIC_ASSERT(FCA::kIsolateIndex == 1);
STATIC_ASSERT(FCA::kHolderIndex == 0);
- STATIC_ASSERT(FCA::kArgsLength == 7);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 7);
+ STATIC_ASSERT(FCA::kArgsLength == 8);
+
+ // new target
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
// Save context, callee and call data.
__ Push(context, callee, call_data);
@@ -5660,7 +5319,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
- const int kApiStackSpace = 4;
+ const int kApiStackSpace = 3;
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
@@ -5680,8 +5339,6 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// justified by n64 ABI.
__ li(at, Operand(argc()));
__ sw(at, MemOperand(a0, 2 * kPointerSize));
- // FunctionCallbackInfo::is_construct_call_ = 0
- __ sw(zero_reg, MemOperand(a0, 2 * kPointerSize + kIntSize));
ExternalReference thunk_ref =
ExternalReference::invoke_function_callback(masm->isolate());
@@ -5698,8 +5355,9 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
}
MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
int stack_space = 0;
- int32_t stack_space_offset = 4 * kPointerSize;
+ int32_t stack_space_offset = 3 * kPointerSize;
stack_space = argc() + FCA::kArgsLength + 1;
+ // TODO(adamk): Why are we clobbering this immediately?
stack_space_offset = kInvalidStackOffset;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
stack_space_offset, return_value_operand,
@@ -5708,15 +5366,44 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
void CallApiGetterStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- sp[0] : name
- // -- sp[8 .. (8 + kArgsLength*8)] : v8::PropertyCallbackInfo::args_
- // -- ...
- // -- a2 : api_function_address
- // -----------------------------------
-
- Register api_function_address = ApiGetterDescriptor::function_address();
- DCHECK(api_function_address.is(a2));
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+ Register receiver = ApiGetterDescriptor::ReceiverRegister();
+ Register holder = ApiGetterDescriptor::HolderRegister();
+ Register callback = ApiGetterDescriptor::CallbackRegister();
+ Register scratch = a4;
+ DCHECK(!AreAliased(receiver, holder, callback, scratch));
+
+ Register api_function_address = a2;
+
+ // Here and below +1 is for name() pushed after the args_ array.
+ typedef PropertyCallbackArguments PCA;
+ __ Dsubu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
+ __ sd(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
+ __ ld(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+ __ sd(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ sd(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
+ __ sd(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
+ kPointerSize));
+ __ li(scratch, Operand(ExternalReference::isolate_address(isolate())));
+ __ sd(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
+ __ sd(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
+ // should_throw_on_error -> false
+ DCHECK(Smi::FromInt(0) == nullptr);
+ __ sd(zero_reg,
+ MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
+ __ ld(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+ __ sd(scratch, MemOperand(sp, 0 * kPointerSize));
// v8::PropertyCallbackInfo::args_ array and name handle.
const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
@@ -5738,6 +5425,10 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
+ __ ld(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+ __ ld(api_function_address,
+ FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
+
// +3 is to skip prolog, return address and name handle.
MemOperand return_value_operand(
fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
@@ -5746,7 +5437,6 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
return_value_operand, NULL);
}
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/mips64/codegen-mips64.cc b/deps/v8/src/mips64/codegen-mips64.cc
index 44d822b615..943c2a6e63 100644
--- a/deps/v8/src/mips64/codegen-mips64.cc
+++ b/deps/v8/src/mips64/codegen-mips64.cc
@@ -6,6 +6,8 @@
#if V8_TARGET_ARCH_MIPS64
+#include <memory>
+
#include "src/codegen.h"
#include "src/macro-assembler.h"
#include "src/mips64/simulator-mips64.h"
@@ -17,59 +19,6 @@ namespace internal {
#define __ masm.
-#if defined(USE_SIMULATOR)
-byte* fast_exp_mips_machine_code = nullptr;
-double fast_exp_simulator(double x, Isolate* isolate) {
- return Simulator::current(isolate)->CallFP(fast_exp_mips_machine_code, x, 0);
-}
-#endif
-
-
-UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
- size_t actual_size;
- byte* buffer =
- static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == nullptr) return nullptr;
- ExternalReference::InitializeMathExpData();
-
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
- CodeObjectRequired::kNo);
-
- {
- DoubleRegister input = f12;
- DoubleRegister result = f0;
- DoubleRegister double_scratch1 = f4;
- DoubleRegister double_scratch2 = f6;
- Register temp1 = a4;
- Register temp2 = a5;
- Register temp3 = a6;
-
- __ MovFromFloatParameter(input);
- __ Push(temp3, temp2, temp1);
- MathExpGenerator::EmitMathExp(
- &masm, input, result, double_scratch1, double_scratch2,
- temp1, temp2, temp3);
- __ Pop(temp3, temp2, temp1);
- __ MovToFloatResult(result);
- __ Ret();
- }
-
- CodeDesc desc;
- masm.GetCode(&desc);
- DCHECK(!RelocInfo::RequiresRelocation(desc));
-
- Assembler::FlushICache(isolate, buffer, actual_size);
- base::OS::ProtectCode(buffer, actual_size);
-
-#if !defined(USE_SIMULATOR)
- return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
-#else
- fast_exp_mips_machine_code = buffer;
- return &fast_exp_simulator;
-#endif
-}
-
-
#if defined(V8_HOST_ARCH_MIPS)
MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
MemCopyUint8Function stub) {
@@ -95,8 +44,8 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
// The size of each prefetch.
uint32_t pref_chunk = 32;
- // The maximum size of a prefetch, it must not be less then pref_chunk.
- // If the real size of a prefetch is greater then max_pref_size and
+ // The maximum size of a prefetch, it must not be less than pref_chunk.
+ // If the real size of a prefetch is greater than max_pref_size and
// the kPrefHintPrepareForStore hint is used, the code will not work
// correctly.
uint32_t max_pref_size = 128;
@@ -732,6 +681,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ SmiScale(scratch, length, kDoubleSizeLog2);
__ Daddu(scratch, scratch, FixedDoubleArray::kHeaderSize);
__ Allocate(scratch, array, t3, scratch2, &gc_required, DOUBLE_ALIGNMENT);
+ __ Dsubu(array, array, kHeapObjectTag);
// array: destination FixedDoubleArray, not tagged as heap object
// Set destination FixedDoubleArray's length and map.
@@ -882,6 +832,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ Daddu(array_size, array_size, FixedDoubleArray::kHeaderSize);
__ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
NO_ALLOCATION_FLAGS);
+ __ Dsubu(array, array, kHeapObjectTag);
// array: destination FixedArray, not tagged as heap object
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
@@ -1088,94 +1039,6 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ bind(&done);
}
-
-static MemOperand ExpConstant(int index, Register base) {
- return MemOperand(base, index * kDoubleSize);
-}
-
-
-void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
- DoubleRegister input,
- DoubleRegister result,
- DoubleRegister double_scratch1,
- DoubleRegister double_scratch2,
- Register temp1,
- Register temp2,
- Register temp3) {
- DCHECK(!input.is(result));
- DCHECK(!input.is(double_scratch1));
- DCHECK(!input.is(double_scratch2));
- DCHECK(!result.is(double_scratch1));
- DCHECK(!result.is(double_scratch2));
- DCHECK(!double_scratch1.is(double_scratch2));
- DCHECK(!temp1.is(temp2));
- DCHECK(!temp1.is(temp3));
- DCHECK(!temp2.is(temp3));
- DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
- DCHECK(!masm->serializer_enabled()); // External references not serializable.
-
- Label zero, infinity, done;
- __ li(temp3, Operand(ExternalReference::math_exp_constants(0)));
-
- __ ldc1(double_scratch1, ExpConstant(0, temp3));
- __ BranchF(&zero, NULL, ge, double_scratch1, input);
-
- __ ldc1(double_scratch2, ExpConstant(1, temp3));
- __ BranchF(&infinity, NULL, ge, input, double_scratch2);
-
- __ ldc1(double_scratch1, ExpConstant(3, temp3));
- __ ldc1(result, ExpConstant(4, temp3));
- __ mul_d(double_scratch1, double_scratch1, input);
- __ add_d(double_scratch1, double_scratch1, result);
- __ FmoveLow(temp2, double_scratch1);
- __ sub_d(double_scratch1, double_scratch1, result);
- __ ldc1(result, ExpConstant(6, temp3));
- __ ldc1(double_scratch2, ExpConstant(5, temp3));
- __ mul_d(double_scratch1, double_scratch1, double_scratch2);
- __ sub_d(double_scratch1, double_scratch1, input);
- __ sub_d(result, result, double_scratch1);
- __ mul_d(double_scratch2, double_scratch1, double_scratch1);
- __ mul_d(result, result, double_scratch2);
- __ ldc1(double_scratch2, ExpConstant(7, temp3));
- __ mul_d(result, result, double_scratch2);
- __ sub_d(result, result, double_scratch1);
- // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
- DCHECK(*reinterpret_cast<double*>
- (ExternalReference::math_exp_constants(8).address()) == 1);
- __ Move(double_scratch2, 1.);
- __ add_d(result, result, double_scratch2);
- __ dsrl(temp1, temp2, 11);
- __ Ext(temp2, temp2, 0, 11);
- __ Daddu(temp1, temp1, Operand(0x3ff));
-
- // Must not call ExpConstant() after overwriting temp3!
- __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
- __ Dlsa(temp3, temp3, temp2, 3);
- __ lwu(temp2, MemOperand(temp3, Register::kMantissaOffset));
- __ lwu(temp3, MemOperand(temp3, Register::kExponentOffset));
- // The first word is loaded is the lower number register.
- if (temp2.code() < temp3.code()) {
- __ dsll(at, temp1, 20);
- __ Or(temp1, temp3, at);
- __ Move(double_scratch1, temp2, temp1);
- } else {
- __ dsll(at, temp1, 20);
- __ Or(temp1, temp2, at);
- __ Move(double_scratch1, temp3, temp1);
- }
- __ mul_d(result, result, double_scratch1);
- __ BranchShort(&done);
-
- __ bind(&zero);
- __ Move(result, kDoubleRegZero);
- __ BranchShort(&done);
-
- __ bind(&infinity);
- __ ldc1(result, ExpConstant(2, temp3));
-
- __ bind(&done);
-}
-
#ifdef DEBUG
// nop(CODE_AGE_MARKER_NOP)
static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
@@ -1189,7 +1052,7 @@ CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
// the process, before MIPS simulator ICache is setup.
- base::SmartPointer<CodePatcher> patcher(
+ std::unique_ptr<CodePatcher> patcher(
new CodePatcher(isolate, young_sequence_.start(),
young_sequence_.length() / Assembler::kInstrSize,
CodePatcher::DONT_FLUSH));
diff --git a/deps/v8/src/mips64/codegen-mips64.h b/deps/v8/src/mips64/codegen-mips64.h
index ad7abb30c5..48853de659 100644
--- a/deps/v8/src/mips64/codegen-mips64.h
+++ b/deps/v8/src/mips64/codegen-mips64.h
@@ -6,8 +6,6 @@
#ifndef V8_MIPS_CODEGEN_MIPS_H_
#define V8_MIPS_CODEGEN_MIPS_H_
-
-#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {
@@ -29,23 +27,6 @@ class StringCharLoadGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
-
-class MathExpGenerator : public AllStatic {
- public:
- // Register input isn't modified. All other registers are clobbered.
- static void EmitMathExp(MacroAssembler* masm,
- DoubleRegister input,
- DoubleRegister result,
- DoubleRegister double_scratch1,
- DoubleRegister double_scratch2,
- Register temp1,
- Register temp2,
- Register temp3);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips64/constants-mips64.h b/deps/v8/src/mips64/constants-mips64.h
index 57e947b138..d2b1e92957 100644
--- a/deps/v8/src/mips64/constants-mips64.h
+++ b/deps/v8/src/mips64/constants-mips64.h
@@ -60,6 +60,27 @@ const bool IsMipsSoftFloatABI = true;
const bool IsMipsSoftFloatABI = true;
#endif
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+const uint32_t kMipsLwrOffset = 0;
+const uint32_t kMipsLwlOffset = 3;
+const uint32_t kMipsSwrOffset = 0;
+const uint32_t kMipsSwlOffset = 3;
+const uint32_t kMipsLdrOffset = 0;
+const uint32_t kMipsLdlOffset = 7;
+const uint32_t kMipsSdrOffset = 0;
+const uint32_t kMipsSdlOffset = 7;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+const uint32_t kMipsLwrOffset = 3;
+const uint32_t kMipsLwlOffset = 0;
+const uint32_t kMipsSwrOffset = 3;
+const uint32_t kMipsSwlOffset = 0;
+const uint32_t kMipsLdrOffset = 7;
+const uint32_t kMipsLdlOffset = 0;
+const uint32_t kMipsSdrOffset = 7;
+const uint32_t kMipsSdlOffset = 0;
+#else
+#error Unknown endianness
+#endif
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
@@ -384,6 +405,7 @@ enum SecondaryField : uint32_t {
MOVZ = ((1U << 3) + 2),
MOVN = ((1U << 3) + 3),
BREAK = ((1U << 3) + 5),
+ SYNC = ((1U << 3) + 7),
MFHI = ((2U << 3) + 0),
CLZ_R6 = ((2U << 3) + 0),
@@ -645,7 +667,6 @@ enum SecondaryField : uint32_t {
NULLSF = 0U
};
-
// ----- Emulated conditions.
// On MIPS we use this enum to abstract from conditional branch instructions.
// The 'U' prefix is used to specify unsigned comparisons.
@@ -911,7 +932,6 @@ class Instruction {
enum TypeChecks { NORMAL, EXTRA };
-
static constexpr uint64_t kOpcodeImmediateTypeMask =
OpcodeToBitNumber(REGIMM) | OpcodeToBitNumber(BEQ) |
OpcodeToBitNumber(BNE) | OpcodeToBitNumber(BLEZ) |
@@ -926,12 +946,14 @@ class Instruction {
OpcodeToBitNumber(POP76) | OpcodeToBitNumber(LB) | OpcodeToBitNumber(LH) |
OpcodeToBitNumber(LWL) | OpcodeToBitNumber(LW) | OpcodeToBitNumber(LWU) |
OpcodeToBitNumber(LD) | OpcodeToBitNumber(LBU) | OpcodeToBitNumber(LHU) |
- OpcodeToBitNumber(LWR) | OpcodeToBitNumber(SB) | OpcodeToBitNumber(SH) |
+ OpcodeToBitNumber(LDL) | OpcodeToBitNumber(LDR) | OpcodeToBitNumber(LWR) |
+ OpcodeToBitNumber(SDL) | OpcodeToBitNumber(SB) | OpcodeToBitNumber(SH) |
OpcodeToBitNumber(SWL) | OpcodeToBitNumber(SW) | OpcodeToBitNumber(SD) |
- OpcodeToBitNumber(SWR) | OpcodeToBitNumber(LWC1) |
- OpcodeToBitNumber(LDC1) | OpcodeToBitNumber(SWC1) |
- OpcodeToBitNumber(SDC1) | OpcodeToBitNumber(PCREL) |
- OpcodeToBitNumber(DAUI) | OpcodeToBitNumber(BC) | OpcodeToBitNumber(BALC);
+ OpcodeToBitNumber(SWR) | OpcodeToBitNumber(SDR) |
+ OpcodeToBitNumber(LWC1) | OpcodeToBitNumber(LDC1) |
+ OpcodeToBitNumber(SWC1) | OpcodeToBitNumber(SDC1) |
+ OpcodeToBitNumber(PCREL) | OpcodeToBitNumber(DAUI) |
+ OpcodeToBitNumber(BC) | OpcodeToBitNumber(BALC);
#define FunctionFieldToBitNumber(function) (1ULL << function)
@@ -964,8 +986,7 @@ class Instruction {
FunctionFieldToBitNumber(TEQ) | FunctionFieldToBitNumber(TNE) |
FunctionFieldToBitNumber(MOVZ) | FunctionFieldToBitNumber(MOVN) |
FunctionFieldToBitNumber(MOVCI) | FunctionFieldToBitNumber(SELEQZ_S) |
- FunctionFieldToBitNumber(SELNEZ_S);
-
+ FunctionFieldToBitNumber(SELNEZ_S) | FunctionFieldToBitNumber(SYNC);
// Get the encoding type of the instruction.
inline Type InstructionType(TypeChecks checks = NORMAL) const;
@@ -1216,11 +1237,10 @@ Instruction::Type Instruction::InstructionType(TypeChecks checks) const {
int sa = SaFieldRaw() >> kSaShift;
switch (sa) {
case BITSWAP:
- return kRegisterType;
case WSBH:
case SEB:
case SEH:
- return kUnsupported;
+ return kRegisterType;
}
sa >>= kBp2Bits;
switch (sa) {
@@ -1234,10 +1254,9 @@ Instruction::Type Instruction::InstructionType(TypeChecks checks) const {
int sa = SaFieldRaw() >> kSaShift;
switch (sa) {
case DBITSWAP:
- return kRegisterType;
case DSBH:
case DSHD:
- return kUnsupported;
+ return kRegisterType;
}
sa = SaFieldRaw() >> kSaShift;
sa >>= kBp3Bits;
diff --git a/deps/v8/src/mips64/deoptimizer-mips64.cc b/deps/v8/src/mips64/deoptimizer-mips64.cc
index 90bd11e5e3..ea17124c63 100644
--- a/deps/v8/src/mips64/deoptimizer-mips64.cc
+++ b/deps/v8/src/mips64/deoptimizer-mips64.cc
@@ -117,8 +117,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Save all FPU registers before messing with them.
__ Dsubu(sp, sp, Operand(kDoubleRegsSize));
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
diff --git a/deps/v8/src/mips64/disasm-mips64.cc b/deps/v8/src/mips64/disasm-mips64.cc
index 3d0e10c20a..5485f3ee95 100644
--- a/deps/v8/src/mips64/disasm-mips64.cc
+++ b/deps/v8/src/mips64/disasm-mips64.cc
@@ -375,7 +375,8 @@ void Decoder::PrintXImm26(Instruction* instr) {
uint64_t target = static_cast<uint64_t>(instr->Imm26Value())
<< kImmFieldShift;
target = (reinterpret_cast<uint64_t>(instr) & ~0xfffffff) | target;
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%lx", target);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "0x%" PRIx64, target);
}
@@ -801,16 +802,12 @@ int Decoder::DecodeBreakInstr(Instruction* instr) {
if (instr->Bits(25, 6) == static_cast<int>(kMaxStopCode)) {
// This is stop(msg).
Format(instr, "break, code: 'code");
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "\n%p %08lx stop msg: %s",
- static_cast<void*>
- (reinterpret_cast<int32_t*>(instr
- + Instruction::kInstrSize)),
- reinterpret_cast<uint64_t>
- (*reinterpret_cast<char**>(instr
- + Instruction::kInstrSize)),
- *reinterpret_cast<char**>(instr
- + Instruction::kInstrSize));
+ out_buffer_pos_ += SNPrintF(
+ out_buffer_ + out_buffer_pos_, "\n%p %08" PRIx64,
+ static_cast<void*>(
+ reinterpret_cast<int32_t*>(instr + Instruction::kInstrSize)),
+ reinterpret_cast<uint64_t>(
+ *reinterpret_cast<char**>(instr + Instruction::kInstrSize)));
// Size 3: the break_ instr, plus embedded 64-bit char pointer.
return 3 * Instruction::kInstrSize;
} else {
@@ -1162,26 +1159,22 @@ void Decoder::DecodeTypeRegisterSPECIAL(Instruction* instr) {
if (instr->RsValue() == 0) {
Format(instr, "srl 'rd, 'rt, 'sa");
} else {
- if (kArchVariant == kMips64r2) {
- Format(instr, "rotr 'rd, 'rt, 'sa");
- } else {
- Unknown(instr);
- }
+ Format(instr, "rotr 'rd, 'rt, 'sa");
}
break;
case DSRL:
if (instr->RsValue() == 0) {
Format(instr, "dsrl 'rd, 'rt, 'sa");
} else {
- if (kArchVariant == kMips64r2) {
- Format(instr, "drotr 'rd, 'rt, 'sa");
- } else {
- Unknown(instr);
- }
+ Format(instr, "drotr 'rd, 'rt, 'sa");
}
break;
case DSRL32:
- Format(instr, "dsrl32 'rd, 'rt, 'sa");
+ if (instr->RsValue() == 0) {
+ Format(instr, "dsrl32 'rd, 'rt, 'sa");
+ } else {
+ Format(instr, "drotr32 'rd, 'rt, 'sa");
+ }
break;
case SRA:
Format(instr, "sra 'rd, 'rt, 'sa");
@@ -1202,22 +1195,14 @@ void Decoder::DecodeTypeRegisterSPECIAL(Instruction* instr) {
if (instr->SaValue() == 0) {
Format(instr, "srlv 'rd, 'rt, 'rs");
} else {
- if (kArchVariant == kMips64r2) {
- Format(instr, "rotrv 'rd, 'rt, 'rs");
- } else {
- Unknown(instr);
- }
+ Format(instr, "rotrv 'rd, 'rt, 'rs");
}
break;
case DSRLV:
if (instr->SaValue() == 0) {
Format(instr, "dsrlv 'rd, 'rt, 'rs");
} else {
- if (kArchVariant == kMips64r2) {
- Format(instr, "drotrv 'rd, 'rt, 'rs");
- } else {
- Unknown(instr);
- }
+ Format(instr, "drotrv 'rd, 'rt, 'rs");
}
break;
case SRAV:
@@ -1400,6 +1385,9 @@ void Decoder::DecodeTypeRegisterSPECIAL(Instruction* instr) {
case TNE:
Format(instr, "tne 'rs, 'rt, code: 'code");
break;
+ case SYNC:
+ Format(instr, "sync");
+ break;
case MOVZ:
Format(instr, "movz 'rd, 'rs, 'rt");
break;
@@ -1467,11 +1455,18 @@ void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) {
Format(instr, "bitswap 'rd, 'rt");
break;
}
- case SEB:
- case SEH:
- case WSBH:
- UNREACHABLE();
+ case SEB: {
+ Format(instr, "seb 'rd, 'rt");
+ break;
+ }
+ case SEH: {
+ Format(instr, "seh 'rd, 'rt");
+ break;
+ }
+ case WSBH: {
+ Format(instr, "wsbh 'rd, 'rt");
break;
+ }
default: {
sa >>= kBp2Bits;
switch (sa) {
@@ -1502,10 +1497,14 @@ void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) {
}
break;
}
- case DSBH:
- case DSHD:
- UNREACHABLE();
+ case DSBH: {
+ Format(instr, "dsbh 'rd, 'rt");
break;
+ }
+ case DSHD: {
+ Format(instr, "dshd 'rd, 'rt");
+ break;
+ }
default: {
sa >>= kBp3Bits;
switch (sa) {
@@ -1948,7 +1947,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
namespace disasm {
const char* NameConverter::NameOfAddress(byte* addr) const {
- v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
+ v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
return tmp_buffer_.start();
}
@@ -2011,8 +2010,8 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
buffer[0] = '\0';
byte* prev_pc = pc;
pc += d.InstructionDecode(buffer, pc);
- v8::internal::PrintF(f, "%p %08x %s\n",
- prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+ v8::internal::PrintF(f, "%p %08x %s\n", static_cast<void*>(prev_pc),
+ *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
}
}
diff --git a/deps/v8/src/mips64/interface-descriptors-mips64.cc b/deps/v8/src/mips64/interface-descriptors-mips64.cc
index 7695d0b694..77c71aae78 100644
--- a/deps/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/mips64/interface-descriptors-mips64.cc
@@ -11,6 +11,19 @@ namespace internal {
const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
+void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
+ CallInterfaceDescriptorData* data, int register_parameter_count) {
+ const Register default_stub_registers[] = {a0, a1, a2, a3, a4};
+ CHECK_LE(static_cast<size_t>(register_parameter_count),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(register_parameter_count,
+ default_stub_registers);
+}
+
+const Register FastNewFunctionContextDescriptor::FunctionRegister() {
+ return a1;
+}
+const Register FastNewFunctionContextDescriptor::SlotsRegister() { return a0; }
const Register LoadDescriptor::ReceiverRegister() { return a1; }
const Register LoadDescriptor::NameRegister() { return a2; }
@@ -23,13 +36,9 @@ const Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
const Register StoreDescriptor::ReceiverRegister() { return a1; }
const Register StoreDescriptor::NameRegister() { return a2; }
const Register StoreDescriptor::ValueRegister() { return a0; }
+const Register StoreDescriptor::SlotRegister() { return a4; }
-
-const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return a4; }
-
-
-const Register VectorStoreICDescriptor::VectorRegister() { return a3; }
-
+const Register StoreWithVectorDescriptor::VectorRegister() { return a3; }
const Register VectorStoreTransitionDescriptor::SlotRegister() { return a4; }
const Register VectorStoreTransitionDescriptor::VectorRegister() { return a3; }
@@ -39,23 +48,15 @@ const Register VectorStoreTransitionDescriptor::MapRegister() { return a5; }
const Register StoreTransitionDescriptor::MapRegister() { return a3; }
-const Register LoadGlobalViaContextDescriptor::SlotRegister() { return a2; }
-
-
const Register StoreGlobalViaContextDescriptor::SlotRegister() { return a2; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return a0; }
-const Register InstanceOfDescriptor::LeftRegister() { return a1; }
-const Register InstanceOfDescriptor::RightRegister() { return a0; }
-
-
const Register StringCompareDescriptor::LeftRegister() { return a1; }
const Register StringCompareDescriptor::RightRegister() { return a0; }
-
-const Register ApiGetterDescriptor::function_address() { return a2; }
-
+const Register ApiGetterDescriptor::HolderRegister() { return a0; }
+const Register ApiGetterDescriptor::CallbackRegister() { return a3; }
const Register MathPowTaggedDescriptor::exponent() { return a2; }
@@ -75,13 +76,6 @@ void FastNewClosureDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-
-void FastNewContextDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
void FastNewObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a3};
@@ -247,50 +241,34 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC
-void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
+void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
// a0 -- number of arguments
// a1 -- function
// a2 -- allocation site with elements kind
- Register registers[] = {a1, a2};
+ Register registers[] = {a1, a2, a0};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-
-void ArrayConstructorDescriptor::InitializePlatformSpecific(
+void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // stack param count needs (constructor pointer, and single argument)
- Register registers[] = {a1, a2, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void InternalArrayConstructorConstantArgCountDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
// register state
// a0 -- number of arguments
- // a1 -- constructor function
- Register registers[] = {a1};
+ // a1 -- function
+ // a2 -- allocation site with elements kind
+ Register registers[] = {a1, a2, a0};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-
-void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
+void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
- Register registers[] = {a1, a0};
+ Register registers[] = {a1, a2, a0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void FastArrayPushDescriptor::InitializePlatformSpecific(
+void VarArgFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (arg count)
Register registers[] = {a0};
@@ -316,6 +294,22 @@ void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
+void BinaryOpWithVectorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // register state
+ // a1 -- lhs
+ // a0 -- rhs
+ // a4 -- slot id
+ // a3 -- vector
+ Register registers[] = {a1, a0, a4, a3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CountOpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void StringAddDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -376,9 +370,8 @@ void ApiCallbackDescriptorBase::InitializePlatformSpecific(
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
- kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
- kInterpreterDispatchTableRegister};
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -413,6 +406,16 @@ void InterpreterCEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void ResumeGeneratorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ v0, // the value to pass to the generator
+ a1, // the JSGeneratorObject to resume
+ a2 // the resume mode (tagged)
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index fb83fe9b76..aa0de26b88 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -17,6 +17,18 @@
namespace v8 {
namespace internal {
+// Floating point constants.
+const uint64_t kDoubleSignMask = Double::kSignMask;
+const uint32_t kDoubleExponentShift = HeapNumber::kMantissaBits;
+const uint32_t kDoubleNaNShift = kDoubleExponentShift - 1;
+const uint64_t kDoubleNaNMask = Double::kExponentMask | (1L << kDoubleNaNShift);
+
+const uint32_t kSingleSignMask = kBinary32SignMask;
+const uint32_t kSingleExponentMask = kBinary32ExponentMask;
+const uint32_t kSingleExponentShift = kBinary32ExponentShift;
+const uint32_t kSingleNaNShift = kSingleExponentShift - 1;
+const uint32_t kSingleNaNMask = kSingleExponentMask | (1 << kSingleNaNShift);
+
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, size),
@@ -29,7 +41,6 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
}
}
-
void MacroAssembler::Load(Register dst,
const MemOperand& src,
Representation r) {
@@ -1321,37 +1332,245 @@ void MacroAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa,
}
}
+void MacroAssembler::Bovc(Register rs, Register rt, Label* L) {
+ if (is_trampoline_emitted()) {
+ Label skip;
+ bnvc(rs, rt, &skip);
+ BranchLong(L, PROTECT);
+ bind(&skip);
+ } else {
+ bovc(rs, rt, L);
+ }
+}
+
+void MacroAssembler::Bnvc(Register rs, Register rt, Label* L) {
+ if (is_trampoline_emitted()) {
+ Label skip;
+ bovc(rs, rt, &skip);
+ BranchLong(L, PROTECT);
+ bind(&skip);
+ } else {
+ bnvc(rs, rt, L);
+ }
+}
// ------------Pseudo-instructions-------------
+// Change endianness
+void MacroAssembler::ByteSwapSigned(Register dest, Register src,
+ int operand_size) {
+ DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4 ||
+ operand_size == 8);
+ DCHECK(kArchVariant == kMips64r6 || kArchVariant == kMips64r2);
+ if (operand_size == 1) {
+ seb(src, src);
+ sll(src, src, 0);
+ dsbh(dest, src);
+ dshd(dest, dest);
+ } else if (operand_size == 2) {
+ seh(src, src);
+ sll(src, src, 0);
+ dsbh(dest, src);
+ dshd(dest, dest);
+ } else if (operand_size == 4) {
+ sll(src, src, 0);
+ dsbh(dest, src);
+ dshd(dest, dest);
+ } else {
+ dsbh(dest, src);
+ dshd(dest, dest);
+ }
+}
+
+void MacroAssembler::ByteSwapUnsigned(Register dest, Register src,
+ int operand_size) {
+ DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4);
+ if (operand_size == 1) {
+ andi(src, src, 0xFF);
+ dsbh(dest, src);
+ dshd(dest, dest);
+ } else if (operand_size == 2) {
+ andi(src, src, 0xFFFF);
+ dsbh(dest, src);
+ dshd(dest, dest);
+ } else {
+ dsll32(src, src, 0);
+ dsrl32(src, src, 0);
+ dsbh(dest, src);
+ dshd(dest, dest);
+ }
+}
+
void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
- lwr(rd, rs);
- lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
+ DCHECK(!rd.is(at));
+ DCHECK(!rs.rm().is(at));
+ if (kArchVariant == kMips64r6) {
+ lw(rd, rs);
+ } else {
+ DCHECK(kArchVariant == kMips64r2);
+ if (is_int16(rs.offset() + kMipsLwrOffset) &&
+ is_int16(rs.offset() + kMipsLwlOffset)) {
+ if (!rd.is(rs.rm())) {
+ lwr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
+ lwl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
+ } else {
+ lwr(at, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
+ lwl(at, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
+ mov(rd, at);
+ }
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+ lwr(rd, MemOperand(at, kMipsLwrOffset));
+ lwl(rd, MemOperand(at, kMipsLwlOffset));
+ }
+ }
+}
+
+void MacroAssembler::Ulwu(Register rd, const MemOperand& rs) {
+ if (kArchVariant == kMips64r6) {
+ lwu(rd, rs);
+ } else {
+ DCHECK(kArchVariant == kMips64r2);
+ Ulw(rd, rs);
+ Dext(rd, rd, 0, 32);
+ }
}
void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
- swr(rd, rs);
- swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
+ DCHECK(!rd.is(at));
+ DCHECK(!rs.rm().is(at));
+ if (kArchVariant == kMips64r6) {
+ sw(rd, rs);
+ } else {
+ DCHECK(kArchVariant == kMips64r2);
+ if (is_int16(rs.offset() + kMipsSwrOffset) &&
+ is_int16(rs.offset() + kMipsSwlOffset)) {
+ swr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwrOffset));
+ swl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwlOffset));
+ } else {
+ LoadRegPlusOffsetToAt(rs);
+ swr(rd, MemOperand(at, kMipsSwrOffset));
+ swl(rd, MemOperand(at, kMipsSwlOffset));
+ }
+ }
}
+void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
+ DCHECK(!rd.is(at));
+ DCHECK(!rs.rm().is(at));
+ if (kArchVariant == kMips64r6) {
+ lh(rd, rs);
+ } else {
+ DCHECK(kArchVariant == kMips64r2);
+ if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ lbu(at, rs);
+ lb(rd, MemOperand(rs.rm(), rs.offset() + 1));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
+ lb(rd, rs);
+#endif
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ lb(rd, MemOperand(at, 1));
+ lbu(at, MemOperand(at, 0));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ lb(rd, MemOperand(at, 0));
+ lbu(at, MemOperand(at, 1));
+#endif
+ }
+ dsll(rd, rd, 8);
+ or_(rd, rd, at);
+ }
+}
-// Do 64-bit load from unaligned address. Note this only handles
-// the specific case of 32-bit aligned, but not 64-bit aligned.
-void MacroAssembler::Uld(Register rd, const MemOperand& rs, Register scratch) {
- // Assert fail if the offset from start of object IS actually aligned.
- // ONLY use with known misalignment, since there is performance cost.
- DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
- if (kArchEndian == kLittle) {
- lwu(rd, rs);
- lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
- dsll32(scratch, scratch, 0);
+void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
+ DCHECK(!rd.is(at));
+ DCHECK(!rs.rm().is(at));
+ if (kArchVariant == kMips64r6) {
+ lhu(rd, rs);
} else {
- lw(rd, rs);
- lwu(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
- dsll32(rd, rd, 0);
+ DCHECK(kArchVariant == kMips64r2);
+ if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ lbu(at, rs);
+ lbu(rd, MemOperand(rs.rm(), rs.offset() + 1));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
+ lbu(rd, rs);
+#endif
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ lbu(rd, MemOperand(at, 1));
+ lbu(at, MemOperand(at, 0));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ lbu(rd, MemOperand(at, 0));
+ lbu(at, MemOperand(at, 1));
+#endif
+ }
+ dsll(rd, rd, 8);
+ or_(rd, rd, at);
+ }
+}
+
+void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
+ DCHECK(!rd.is(at));
+ DCHECK(!rs.rm().is(at));
+ DCHECK(!rs.rm().is(scratch));
+ DCHECK(!scratch.is(at));
+ if (kArchVariant == kMips64r6) {
+ sh(rd, rs);
+ } else {
+ DCHECK(kArchVariant == kMips64r2);
+ MemOperand source = rs;
+ // If offset > 16 bits, load address to at with offset 0.
+ if (!is_int16(rs.offset()) || !is_int16(rs.offset() + 1)) {
+ LoadRegPlusOffsetToAt(rs);
+ source = MemOperand(at, 0);
+ }
+
+ if (!scratch.is(rd)) {
+ mov(scratch, rd);
+ }
+
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ sb(scratch, source);
+ srl(scratch, scratch, 8);
+ sb(scratch, MemOperand(source.rm(), source.offset() + 1));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ sb(scratch, MemOperand(source.rm(), source.offset() + 1));
+ srl(scratch, scratch, 8);
+ sb(scratch, source);
+#endif
+ }
+}
+
+void MacroAssembler::Uld(Register rd, const MemOperand& rs) {
+ DCHECK(!rd.is(at));
+ DCHECK(!rs.rm().is(at));
+ if (kArchVariant == kMips64r6) {
+ ld(rd, rs);
+ } else {
+ DCHECK(kArchVariant == kMips64r2);
+ if (is_int16(rs.offset() + kMipsLdrOffset) &&
+ is_int16(rs.offset() + kMipsLdlOffset)) {
+ if (!rd.is(rs.rm())) {
+ ldr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset));
+ ldl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset));
+ } else {
+ ldr(at, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset));
+ ldl(at, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset));
+ mov(rd, at);
+ }
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+ ldr(rd, MemOperand(at, kMipsLdrOffset));
+ ldl(rd, MemOperand(at, kMipsLdlOffset));
+ }
}
- Daddu(rd, rd, scratch);
}
@@ -1366,21 +1585,22 @@ void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs,
Daddu(rd, rd, scratch);
}
-
-// Do 64-bit store to unaligned address. Note this only handles
-// the specific case of 32-bit aligned, but not 64-bit aligned.
-void MacroAssembler::Usd(Register rd, const MemOperand& rs, Register scratch) {
- // Assert fail if the offset from start of object IS actually aligned.
- // ONLY use with known misalignment, since there is performance cost.
- DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
- if (kArchEndian == kLittle) {
- sw(rd, rs);
- dsrl32(scratch, rd, 0);
- sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
+void MacroAssembler::Usd(Register rd, const MemOperand& rs) {
+ DCHECK(!rd.is(at));
+ DCHECK(!rs.rm().is(at));
+ if (kArchVariant == kMips64r6) {
+ sd(rd, rs);
} else {
- sw(rd, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
- dsrl32(scratch, rd, 0);
- sw(scratch, rs);
+ DCHECK(kArchVariant == kMips64r2);
+ if (is_int16(rs.offset() + kMipsSdrOffset) &&
+ is_int16(rs.offset() + kMipsSdlOffset)) {
+ sdr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSdrOffset));
+ sdl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSdlOffset));
+ } else {
+ LoadRegPlusOffsetToAt(rs);
+ sdr(rd, MemOperand(at, kMipsSdrOffset));
+ sdl(rd, MemOperand(at, kMipsSdlOffset));
+ }
}
}
@@ -1393,23 +1613,56 @@ void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs,
sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
}
+void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
+ Register scratch) {
+ if (kArchVariant == kMips64r6) {
+ lwc1(fd, rs);
+ } else {
+ DCHECK(kArchVariant == kMips64r2);
+ Ulw(scratch, rs);
+ mtc1(scratch, fd);
+ }
+}
+
+void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
+ Register scratch) {
+ if (kArchVariant == kMips64r6) {
+ swc1(fd, rs);
+ } else {
+ DCHECK(kArchVariant == kMips64r2);
+ mfc1(scratch, fd);
+ Usw(scratch, rs);
+ }
+}
-void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
- AllowDeferredHandleDereference smi_check;
- if (value->IsSmi()) {
- li(dst, Operand(value), mode);
- } else {
- DCHECK(value->IsHeapObject());
- if (isolate()->heap()->InNewSpace(*value)) {
- Handle<Cell> cell = isolate()->factory()->NewCell(value);
- li(dst, Operand(cell));
- ld(dst, FieldMemOperand(dst, Cell::kValueOffset));
- } else {
- li(dst, Operand(value));
- }
+void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
+ Register scratch) {
+ DCHECK(!scratch.is(at));
+ if (kArchVariant == kMips64r6) {
+ ldc1(fd, rs);
+ } else {
+ DCHECK(kArchVariant == kMips64r2);
+ Uld(scratch, rs);
+ dmtc1(scratch, fd);
}
}
+void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
+ Register scratch) {
+ DCHECK(!scratch.is(at));
+ if (kArchVariant == kMips64r6) {
+ sdc1(fd, rs);
+ } else {
+ DCHECK(kArchVariant == kMips64r2);
+ dmfc1(scratch, fd);
+ Usd(scratch, rs);
+ }
+}
+
+void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
+ li(dst, Operand(value), mode);
+}
+
static inline int64_t ShiftAndFixSignExtension(int64_t imm, int bitnum) {
if ((imm >> (bitnum - 1)) & 0x1) {
imm = (imm >> bitnum) + 1;
@@ -1706,6 +1959,61 @@ void MacroAssembler::Ins(Register rt,
ins_(rt, rs, pos, size);
}
+void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) {
+ Register scratch1 = t8;
+ Register scratch2 = t9;
+ if (kArchVariant == kMips64r2) {
+ Label is_nan, done;
+ BranchF32(nullptr, &is_nan, eq, fs, fs);
+ Branch(USE_DELAY_SLOT, &done);
+ // For NaN input, neg_s will return the same NaN value,
+ // while the sign has to be changed separately.
+ neg_s(fd, fs); // In delay slot.
+ bind(&is_nan);
+ mfc1(scratch1, fs);
+ And(scratch2, scratch1, Operand(~kBinary32SignMask));
+ And(scratch1, scratch1, Operand(kBinary32SignMask));
+ Xor(scratch1, scratch1, Operand(kBinary32SignMask));
+ Or(scratch2, scratch2, scratch1);
+ mtc1(scratch2, fd);
+ bind(&done);
+ } else {
+ mfc1(scratch1, fs);
+ And(scratch2, scratch1, Operand(~kBinary32SignMask));
+ And(scratch1, scratch1, Operand(kBinary32SignMask));
+ Xor(scratch1, scratch1, Operand(kBinary32SignMask));
+ Or(scratch2, scratch2, scratch1);
+ mtc1(scratch2, fd);
+ }
+}
+
+void MacroAssembler::Neg_d(FPURegister fd, FPURegister fs) {
+ Register scratch1 = t8;
+ Register scratch2 = t9;
+ if (kArchVariant == kMips64r2) {
+ Label is_nan, done;
+ BranchF64(nullptr, &is_nan, eq, fs, fs);
+ Branch(USE_DELAY_SLOT, &done);
+ // For NaN input, neg_d will return the same NaN value,
+ // while the sign has to be changed separately.
+ neg_d(fd, fs); // In delay slot.
+ bind(&is_nan);
+ dmfc1(scratch1, fs);
+ And(scratch2, scratch1, Operand(~Double::kSignMask));
+ And(scratch1, scratch1, Operand(Double::kSignMask));
+ Xor(scratch1, scratch1, Operand(Double::kSignMask));
+ Or(scratch2, scratch2, scratch1);
+ dmtc1(scratch2, fd);
+ bind(&done);
+ } else {
+ dmfc1(scratch1, fs);
+ And(scratch2, scratch1, Operand(~Double::kSignMask));
+ And(scratch1, scratch1, Operand(Double::kSignMask));
+ Xor(scratch1, scratch1, Operand(Double::kSignMask));
+ Or(scratch2, scratch2, scratch1);
+ dmtc1(scratch2, fd);
+ }
+}
void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
// Move the data from fs to t8.
@@ -2279,14 +2587,12 @@ void MacroAssembler::Move(FPURegister dst, float imm) {
void MacroAssembler::Move(FPURegister dst, double imm) {
- static const DoubleRepresentation minus_zero(-0.0);
- static const DoubleRepresentation zero(0.0);
- DoubleRepresentation value_rep(imm);
+ int64_t imm_bits = bit_cast<int64_t>(imm);
// Handle special values first.
- if (value_rep == zero && has_double_zero_reg_set_) {
+ if (imm_bits == bit_cast<int64_t>(0.0) && has_double_zero_reg_set_) {
mov_d(dst, kDoubleRegZero);
- } else if (value_rep == minus_zero && has_double_zero_reg_set_) {
- neg_d(dst, kDoubleRegZero);
+ } else if (imm_bits == bit_cast<int64_t>(-0.0) && has_double_zero_reg_set_) {
+ Neg_d(dst, kDoubleRegZero);
} else {
uint32_t lo, hi;
DoubleAsTwoUInt32(imm, &lo, &hi);
@@ -3843,9 +4149,6 @@ void MacroAssembler::Call(Address target,
Label start;
bind(&start);
int64_t target_int = reinterpret_cast<int64_t>(target);
- // Must record previous source positions before the
- // li() generates a new code target.
- positions_recorder()->WriteRecordedPositions();
li(t9, Operand(target_int, rmode), ADDRESS_LOAD);
Call(t9, cond, rs, rt, bd);
DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
@@ -4142,12 +4445,14 @@ void MacroAssembler::Allocate(int object_size,
// to calculate the new top.
Daddu(result_end, result, Operand(object_size));
Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
- sd(result_end, MemOperand(top_address));
- // Tag object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- Daddu(result, result, Operand(kHeapObjectTag));
+ if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+ // The top pointer is not updated for allocation folding dominators.
+ sd(result_end, MemOperand(top_address));
}
+
+ // Tag object.
+ Daddu(result, result, Operand(kHeapObjectTag));
}
@@ -4217,6 +4522,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
} else {
Daddu(result_end, result, Operand(object_size));
}
+
Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
// Update allocation top. result temporarily holds the new top.
@@ -4224,14 +4530,91 @@ void MacroAssembler::Allocate(Register object_size, Register result,
And(at, result_end, Operand(kObjectAlignmentMask));
Check(eq, kUnalignedAllocationInNewSpace, at, Operand(zero_reg));
}
- sd(result_end, MemOperand(top_address));
- // Tag object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- Daddu(result, result, Operand(kHeapObjectTag));
+ if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+ // The top pointer is not updated for allocation folding dominators.
+ sd(result_end, MemOperand(top_address));
+ }
+
+ // Tag object if.
+ Daddu(result, result, Operand(kHeapObjectTag));
+}
+
+void MacroAssembler::FastAllocate(int object_size, Register result,
+ Register scratch1, Register scratch2,
+ AllocationFlags flags) {
+ DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK(!AreAliased(result, scratch1, scratch2, at));
+
+ // Make object size into bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ object_size *= kPointerSize;
+ }
+ DCHECK(0 == (object_size & kObjectAlignmentMask));
+
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+ Register top_address = scratch1;
+ Register result_end = scratch2;
+ li(top_address, Operand(allocation_top));
+ ld(result, MemOperand(top_address));
+
+ // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
+ // the same alignment on MIPS64.
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+
+ if (emit_debug_code()) {
+ And(at, result, Operand(kDoubleAlignmentMask));
+ Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
}
+
+ // Calculate new top and write it back.
+ Daddu(result_end, result, Operand(object_size));
+ sd(result_end, MemOperand(top_address));
+
+ Daddu(result, result, Operand(kHeapObjectTag));
}
+void MacroAssembler::FastAllocate(Register object_size, Register result,
+ Register result_end, Register scratch,
+ AllocationFlags flags) {
+ // |object_size| and |result_end| may overlap, other registers must not.
+ DCHECK(!AreAliased(object_size, result, scratch, at));
+ DCHECK(!AreAliased(result_end, result, scratch, at));
+
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+ // Set up allocation top address and object size registers.
+ Register top_address = scratch;
+ li(top_address, Operand(allocation_top));
+ ld(result, MemOperand(top_address));
+
+ // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
+ // the same alignment on MIPS64.
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+
+ if (emit_debug_code()) {
+ And(at, result, Operand(kDoubleAlignmentMask));
+ Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
+ }
+
+ // Calculate new top and write it back
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ Dlsa(result_end, result, object_size, kPointerSizeLog2);
+ } else {
+ Daddu(result_end, result, Operand(object_size));
+ }
+
+ // Update allocation top. result temporarily holds the new top.
+ if (emit_debug_code()) {
+ And(at, result_end, Operand(kObjectAlignmentMask));
+ Check(eq, kUnalignedAllocationInNewSpace, at, Operand(zero_reg));
+ }
+
+ Daddu(result, result, Operand(kHeapObjectTag));
+}
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,
@@ -4248,12 +4631,8 @@ void MacroAssembler::AllocateTwoByteString(Register result,
And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate two-byte string in new space.
- Allocate(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(scratch1, result, scratch2, scratch3, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
InitializeNewString(result,
@@ -4277,12 +4656,8 @@ void MacroAssembler::AllocateOneByteString(Register result, Register length,
And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate one-byte string in new space.
- Allocate(scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(scratch1, result, scratch2, scratch3, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
@@ -4296,7 +4671,7 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
Register scratch2,
Label* gc_required) {
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result,
length,
Heap::kConsStringMapRootIndex,
@@ -4309,12 +4684,8 @@ void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
Register scratch1,
Register scratch2,
Label* gc_required) {
- Allocate(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
scratch1, scratch2);
@@ -4327,7 +4698,7 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
Register scratch2,
Label* gc_required) {
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result,
length,
@@ -4343,7 +4714,7 @@ void MacroAssembler::AllocateOneByteSlicedString(Register result,
Register scratch2,
Label* gc_required) {
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
scratch1, scratch2);
@@ -4369,12 +4740,11 @@ void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch2,
Register heap_number_map,
Label* need_gc,
- TaggingMode tagging_mode,
MutableMode mode) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
- tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
+ NO_ALLOCATION_FLAGS);
Heap::RootListIndex map_index = mode == MUTABLE
? Heap::kMutableHeapNumberMapRootIndex
@@ -4382,11 +4752,7 @@ void MacroAssembler::AllocateHeapNumber(Register result,
AssertIsRoot(heap_number_map, map_index);
// Store heap number map in the allocated object.
- if (tagging_mode == TAG_RESULT) {
- sd(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
- } else {
- sd(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
- }
+ sd(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
}
@@ -4410,7 +4776,8 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
DCHECK(!result.is(value));
// Allocate JSValue in new space.
- Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
+ Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
+ NO_ALLOCATION_FLAGS);
// Initialize the JSValue.
LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
@@ -4602,6 +4969,72 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
sdc1(double_result, MemOperand(scratch1, 0));
}
+void MacroAssembler::SubNanPreservePayloadAndSign_s(FPURegister fd,
+ FPURegister fs,
+ FPURegister ft) {
+ FloatRegister dest = fd.is(fs) || fd.is(ft) ? kLithiumScratchDouble : fd;
+ Label check_nan, save_payload, done;
+ Register scratch1 = t8;
+ Register scratch2 = t9;
+
+ sub_s(dest, fs, ft);
+ // Check if the result of subtraction is NaN.
+ BranchF32(nullptr, &check_nan, eq, fs, ft);
+ Branch(USE_DELAY_SLOT, &done);
+ dest.is(fd) ? nop() : mov_s(fd, dest);
+
+ bind(&check_nan);
+ // Check if first operand is a NaN.
+ mfc1(scratch1, fs);
+ BranchF32(nullptr, &save_payload, eq, fs, fs);
+ // Second operand must be a NaN.
+ mfc1(scratch1, ft);
+
+ bind(&save_payload);
+ // Reserve payload.
+ And(scratch1, scratch1,
+ Operand(kSingleSignMask | ((1 << kSingleNaNShift) - 1)));
+ mfc1(scratch2, dest);
+ And(scratch2, scratch2, Operand(kSingleNaNMask));
+ Or(scratch2, scratch2, scratch1);
+ mtc1(scratch2, fd);
+
+ bind(&done);
+}
+
+void MacroAssembler::SubNanPreservePayloadAndSign_d(FPURegister fd,
+ FPURegister fs,
+ FPURegister ft) {
+ FloatRegister dest = fd.is(fs) || fd.is(ft) ? kLithiumScratchDouble : fd;
+ Label check_nan, save_payload, done;
+ Register scratch1 = t8;
+ Register scratch2 = t9;
+
+ sub_d(dest, fs, ft);
+ // Check if the result of subtraction is NaN.
+ BranchF64(nullptr, &check_nan, eq, fs, ft);
+ Branch(USE_DELAY_SLOT, &done);
+ dest.is(fd) ? nop() : mov_d(fd, dest);
+
+ bind(&check_nan);
+ // Check if first operand is a NaN.
+ dmfc1(scratch1, fs);
+ BranchF64(nullptr, &save_payload, eq, fs, fs);
+ // Second operand must be a NaN.
+ dmfc1(scratch1, ft);
+
+ bind(&save_payload);
+ // Reserve payload.
+ li(at, Operand(kDoubleSignMask | (1L << kDoubleNaNShift)));
+ Dsubu(at, at, Operand(1));
+ And(scratch1, scratch1, at);
+ dmfc1(scratch2, dest);
+ And(scratch2, scratch2, Operand(kDoubleNaNMask));
+ Or(scratch2, scratch2, scratch1);
+ dmtc1(scratch2, fd);
+
+ bind(&done);
+}
void MacroAssembler::CompareMapAndBranch(Register obj,
Register scratch,
@@ -4890,11 +5323,12 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
Label skip_flooding;
- ExternalReference step_in_enabled =
- ExternalReference::debug_step_in_enabled_address(isolate());
- li(t0, Operand(step_in_enabled));
+ ExternalReference last_step_action =
+ ExternalReference::debug_last_step_action_address(isolate());
+ STATIC_ASSERT(StepFrame > StepIn);
+ li(t0, Operand(last_step_action));
lb(t0, MemOperand(t0));
- Branch(&skip_flooding, eq, t0, Operand(zero_reg));
+ Branch(&skip_flooding, lt, t0, Operand(StepIn));
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -5250,9 +5684,9 @@ void MacroAssembler::AddBranchOvf(Register dst, Register left, Register right,
Move(left_reg, left);
Move(right_reg, right);
addu(dst, left, right);
- bnvc(left_reg, right_reg, no_overflow_label);
+ Bnvc(left_reg, right_reg, no_overflow_label);
} else {
- bovc(left, right, overflow_label);
+ Bovc(left, right, overflow_label);
addu(dst, left, right);
if (no_overflow_label) bc(no_overflow_label);
}
@@ -5515,6 +5949,78 @@ void MacroAssembler::DsubBranchOvf(Register dst, Register left, Register right,
BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
}
+static inline void BranchOvfHelperMult(MacroAssembler* masm,
+ Register overflow_dst,
+ Label* overflow_label,
+ Label* no_overflow_label) {
+ DCHECK(overflow_label || no_overflow_label);
+ if (!overflow_label) {
+ DCHECK(no_overflow_label);
+ masm->Branch(no_overflow_label, eq, overflow_dst, Operand(zero_reg));
+ } else {
+ masm->Branch(overflow_label, ne, overflow_dst, Operand(zero_reg));
+ if (no_overflow_label) masm->Branch(no_overflow_label);
+ }
+}
+
+void MacroAssembler::MulBranchOvf(Register dst, Register left,
+ const Operand& right, Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
+ DCHECK(overflow_label || no_overflow_label);
+ if (right.is_reg()) {
+ MulBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
+ scratch);
+ } else {
+ Register overflow_dst = t9;
+ DCHECK(!dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
+ DCHECK(!left.is(overflow_dst));
+ DCHECK(!left.is(scratch));
+
+ if (dst.is(left)) {
+ Mul(scratch, left, static_cast<int32_t>(right.immediate()));
+ Mulh(overflow_dst, left, static_cast<int32_t>(right.immediate()));
+ mov(dst, scratch);
+ } else {
+ Mul(dst, left, static_cast<int32_t>(right.immediate()));
+ Mulh(overflow_dst, left, static_cast<int32_t>(right.immediate()));
+ }
+
+ dsra32(scratch, dst, 0);
+ xor_(overflow_dst, overflow_dst, scratch);
+
+ BranchOvfHelperMult(this, overflow_dst, overflow_label, no_overflow_label);
+ }
+}
+
+void MacroAssembler::MulBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label,
+ Label* no_overflow_label, Register scratch) {
+ DCHECK(overflow_label || no_overflow_label);
+ Register overflow_dst = t9;
+ DCHECK(!dst.is(scratch));
+ DCHECK(!dst.is(overflow_dst));
+ DCHECK(!scratch.is(overflow_dst));
+ DCHECK(!overflow_dst.is(left));
+ DCHECK(!overflow_dst.is(right));
+ DCHECK(!scratch.is(left));
+ DCHECK(!scratch.is(right));
+
+ if (dst.is(left) || dst.is(right)) {
+ Mul(scratch, left, right);
+ Mulh(overflow_dst, left, right);
+ mov(dst, scratch);
+ } else {
+ Mul(dst, left, right);
+ Mulh(overflow_dst, left, right);
+ }
+
+ dsra32(scratch, dst, 0);
+ xor_(overflow_dst, overflow_dst, scratch);
+
+ BranchOvfHelperMult(this, overflow_dst, overflow_label, no_overflow_label);
+}
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles,
@@ -5557,11 +6063,12 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
JumpToExternalReference(ExternalReference(fid, isolate()));
}
-
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
- BranchDelaySlot bd) {
+ BranchDelaySlot bd,
+ bool builtin_exit_frame) {
PrepareCEntryFunction(builtin);
- CEntryStub stub(isolate(), 1);
+ CEntryStub stub(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
+ builtin_exit_frame);
Jump(stub.GetCode(),
RelocInfo::CODE_TARGET,
al,
@@ -5570,13 +6077,12 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bd);
}
-
void MacroAssembler::SetCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
if (FLAG_native_code_counters && counter->Enabled()) {
li(scratch1, Operand(value));
li(scratch2, Operand(ExternalReference(counter)));
- sd(scratch1, MemOperand(scratch2));
+ sw(scratch1, MemOperand(scratch2));
}
}
@@ -5586,9 +6092,9 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
DCHECK(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
li(scratch2, Operand(ExternalReference(counter)));
- ld(scratch1, MemOperand(scratch2));
- Daddu(scratch1, scratch1, Operand(value));
- sd(scratch1, MemOperand(scratch2));
+ lw(scratch1, MemOperand(scratch2));
+ Addu(scratch1, scratch1, Operand(value));
+ sw(scratch1, MemOperand(scratch2));
}
}
@@ -5598,9 +6104,9 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
DCHECK(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
li(scratch2, Operand(ExternalReference(counter)));
- ld(scratch1, MemOperand(scratch2));
- Dsubu(scratch1, scratch1, Operand(value));
- sd(scratch1, MemOperand(scratch2));
+ lw(scratch1, MemOperand(scratch2));
+ Subu(scratch1, scratch1, Operand(value));
+ sw(scratch1, MemOperand(scratch2));
}
}
@@ -5660,16 +6166,19 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- li(a0, Operand(Smi::FromInt(reason)));
- push(a0);
+ // Check if Abort() has already been initialized.
+ DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
+
+ Move(a0, Smi::FromInt(static_cast<int>(reason)));
+
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort);
+ Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
} else {
- CallRuntime(Runtime::kAbort);
+ Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
}
// Will not return here.
if (is_trampoline_pool_blocked()) {
@@ -5779,9 +6288,8 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- ld(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
- ld(vector,
- FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+ ld(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
+ ld(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
}
@@ -5827,7 +6335,24 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
ld(fp, MemOperand(fp, 0 * kPointerSize));
}
-void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
+void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
+ Register argc) {
+ Push(ra, fp);
+ Move(fp, sp);
+ Push(context, target, argc);
+}
+
+void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
+ Register argc) {
+ Pop(context, target, argc);
+ Pop(ra, fp);
+}
+
+void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
+ StackFrame::Type frame_type) {
+ DCHECK(frame_type == StackFrame::EXIT ||
+ frame_type == StackFrame::BUILTIN_EXIT);
+
// Set up the frame structure on the stack.
STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
@@ -5847,7 +6372,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
daddiu(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
sd(ra, MemOperand(sp, 4 * kPointerSize));
sd(fp, MemOperand(sp, 3 * kPointerSize));
- li(at, Operand(Smi::FromInt(StackFrame::EXIT)));
+ li(at, Operand(Smi::FromInt(frame_type)));
sd(at, MemOperand(sp, 2 * kPointerSize));
// Set up new frame pointer.
daddiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
@@ -6225,6 +6750,16 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
}
+void MacroAssembler::AssertGeneratorObject(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, kOperandIsASmiAndNotAGeneratorObject, t8, Operand(zero_reg));
+ GetObjectType(object, t8, t8);
+ Check(eq, kOperandIsNotAGeneratorObject, t8,
+ Operand(JS_GENERATOR_OBJECT_TYPE));
+ }
+}
void MacroAssembler::AssertReceiver(Register object) {
if (emit_debug_code()) {
@@ -6696,7 +7231,7 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
Label* no_memento_found) {
Label map_check;
Label top_check;
- ExternalReference new_space_allocation_top =
+ ExternalReference new_space_allocation_top_adr =
ExternalReference::new_space_allocation_top_address(isolate());
const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
@@ -6705,14 +7240,16 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
// If the object is in new space, we need to check whether it is on the same
// page as the current top.
- Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
- Xor(scratch_reg, scratch_reg, Operand(new_space_allocation_top));
+ Daddu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+ li(at, Operand(new_space_allocation_top_adr));
+ ld(at, MemOperand(at));
+ Xor(scratch_reg, scratch_reg, Operand(at));
And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
Branch(&top_check, eq, scratch_reg, Operand(zero_reg));
// The object is on a different page than allocation top. Bail out if the
// object sits on the page boundary as no memento can follow and we cannot
// touch the memory following it.
- Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+ Daddu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
Xor(scratch_reg, scratch_reg, Operand(receiver_reg));
And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
Branch(no_memento_found, ne, scratch_reg, Operand(zero_reg));
@@ -6721,13 +7258,13 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
// If top is on the same page as the current object, we need to check whether
// we are below top.
bind(&top_check);
- Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
- li(at, Operand(new_space_allocation_top));
- lw(at, MemOperand(at));
+ Daddu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+ li(at, Operand(new_space_allocation_top_adr));
+ ld(at, MemOperand(at));
Branch(no_memento_found, gt, scratch_reg, Operand(at));
// Memento map check.
bind(&map_check);
- lw(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
+ ld(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
Branch(no_memento_found, ne, scratch_reg,
Operand(isolate()->factory()->allocation_memento_map()));
}
@@ -6747,8 +7284,7 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
if (reg5.is_valid()) regs |= reg5.bit();
if (reg6.is_valid()) regs |= reg6.bit();
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
int code = config->GetAllocatableGeneralCode(i);
Register candidate = Register::from_code(code);
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/mips64/macro-assembler-mips64.h
index 401112d100..c96525c6ae 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/macro-assembler-mips64.h
@@ -18,8 +18,8 @@ const Register kReturnRegister1 = {Register::kCode_v1};
const Register kReturnRegister2 = {Register::kCode_a0};
const Register kJSFunctionRegister = {Register::kCode_a1};
const Register kContextRegister = {Register::kCpRegister};
+const Register kAllocateSizeRegister = {Register::kCode_a0};
const Register kInterpreterAccumulatorRegister = {Register::kCode_v0};
-const Register kInterpreterRegisterFileRegister = {Register::kCode_a7};
const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_t0};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_t1};
const Register kInterpreterDispatchTableRegister = {Register::kCode_t2};
@@ -587,6 +587,15 @@ class MacroAssembler: public Assembler {
void Allocate(Register object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
+ // FastAllocate is right now only used for folded allocations. It just
+ // increments the top pointer without checking against limit. This can only
+ // be done if it was proved earlier that the allocation will succeed.
+ void FastAllocate(int object_size, Register result, Register scratch1,
+ Register scratch2, AllocationFlags flags);
+
+ void FastAllocate(Register object_size, Register result, Register result_new,
+ Register scratch, AllocationFlags flags);
+
void AllocateTwoByteString(Register result,
Register length,
Register scratch1,
@@ -621,7 +630,6 @@ class MacroAssembler: public Assembler {
Register scratch2,
Register heap_number_map,
Label* gc_required,
- TaggingMode tagging_mode = TAG_RESULT,
MutableMode mode = IMMUTABLE);
void AllocateHeapNumberWithValue(Register result,
@@ -714,12 +722,28 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Pseudo-instructions.
+ // Change endianness
+ void ByteSwapSigned(Register dest, Register src, int operand_size);
+ void ByteSwapUnsigned(Register dest, Register src, int operand_size);
+
void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
+ void Ulh(Register rd, const MemOperand& rs);
+ void Ulhu(Register rd, const MemOperand& rs);
+ void Ush(Register rd, const MemOperand& rs, Register scratch);
+
void Ulw(Register rd, const MemOperand& rs);
+ void Ulwu(Register rd, const MemOperand& rs);
void Usw(Register rd, const MemOperand& rs);
- void Uld(Register rd, const MemOperand& rs, Register scratch = at);
- void Usd(Register rd, const MemOperand& rs, Register scratch = at);
+
+ void Uld(Register rd, const MemOperand& rs);
+ void Usd(Register rd, const MemOperand& rs);
+
+ void Ulwc1(FPURegister fd, const MemOperand& rs, Register scratch);
+ void Uswc1(FPURegister fd, const MemOperand& rs, Register scratch);
+
+ void Uldc1(FPURegister fd, const MemOperand& rs, Register scratch);
+ void Usdc1(FPURegister fd, const MemOperand& rs, Register scratch);
void LoadWordPair(Register rd, const MemOperand& rs, Register scratch = at);
void StoreWordPair(Register rd, const MemOperand& rs, Register scratch = at);
@@ -856,6 +880,12 @@ class MacroAssembler: public Assembler {
void Dext(Register rt, Register rs, uint16_t pos, uint16_t size);
void Dextm(Register rt, Register rs, uint16_t pos, uint16_t size);
void Dextu(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void Neg_s(FPURegister fd, FPURegister fs);
+ void Neg_d(FPURegister fd, FPURegister fs);
+
+ // MIPS64 R6 instruction macros.
+ void Bovc(Register rt, Register rs, Label* L);
+ void Bnvc(Register rt, Register rs, Label* L);
// ---------------------------------------------------------------------------
// FPU macros. These do not handle special cases like NaN or +- inf.
@@ -909,6 +939,12 @@ class MacroAssembler: public Assembler {
void Floor_w_d(FPURegister fd, FPURegister fs);
void Ceil_w_d(FPURegister fd, FPURegister fs);
+ // Preserve value of a NaN operand
+ void SubNanPreservePayloadAndSign_s(FPURegister fd, FPURegister fs,
+ FPURegister ft);
+ void SubNanPreservePayloadAndSign_d(FPURegister fd, FPURegister fs,
+ FPURegister ft);
+
void Madd_d(FPURegister fd,
FPURegister fr,
FPURegister fs,
@@ -1034,8 +1070,8 @@ class MacroAssembler: public Assembler {
// argc - argument count to be dropped by LeaveExitFrame.
// save_doubles - saves FPU registers on stack, currently disabled.
// stack_space - extra stack space.
- void EnterExitFrame(bool save_doubles,
- int stack_space = 0);
+ void EnterExitFrame(bool save_doubles, int stack_space = 0,
+ StackFrame::Type frame_type = StackFrame::EXIT);
// Leave the current exit frame.
void LeaveExitFrame(bool save_doubles, Register arg_count,
@@ -1357,6 +1393,24 @@ class MacroAssembler: public Assembler {
Label* overflow_label, Label* no_overflow_label,
Register scratch = at);
+ inline void MulBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Register scratch = at) {
+ MulBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
+ }
+
+ inline void MulBranchNoOvf(Register dst, Register left, const Operand& right,
+ Label* no_overflow_label, Register scratch = at) {
+ MulBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
+ }
+
+ void MulBranchOvf(Register dst, Register left, const Operand& right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
+
+ void MulBranchOvf(Register dst, Register left, Register right,
+ Label* overflow_label, Label* no_overflow_label,
+ Register scratch = at);
+
inline void DaddBranchOvf(Register dst, Register left, const Operand& right,
Label* overflow_label, Register scratch = at) {
DaddBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
@@ -1518,7 +1572,8 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Jump to the builtin routine.
void JumpToExternalReference(const ExternalReference& builtin,
- BranchDelaySlot bd = PROTECT);
+ BranchDelaySlot bd = PROTECT,
+ bool builtin_exit_frame = false);
struct Unresolved {
int pc;
@@ -1714,6 +1769,10 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// enabled via --debug-code.
void AssertBoundFunction(Register object);
+ // Abort execution if argument is not a JSGeneratorObject,
+ // enabled via --debug-code.
+ void AssertGeneratorObject(Register object);
+
// Abort execution if argument is not a JSReceiver, enabled via --debug-code.
void AssertReceiver(Register object);
@@ -1818,6 +1877,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
void LeaveFrame(StackFrame::Type type);
+ void EnterBuiltinFrame(Register context, Register target, Register argc);
+ void LeaveBuiltinFrame(Register context, Register target, Register argc);
+
// Expects object in a0 and returns map with validated enum cache
// in a0. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Label* call_runtime);
@@ -2001,14 +2063,7 @@ void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
}
}
-#ifdef GENERATED_CODE_COVERAGE
-#define CODE_COVERAGE_STRINGIFY(x) #x
-#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
-#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
-#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
-#else
#define ACCESS_MASM(masm) masm->
-#endif
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips64/simulator-mips64.cc b/deps/v8/src/mips64/simulator-mips64.cc
index 9519865c82..780c90c16b 100644
--- a/deps/v8/src/mips64/simulator-mips64.cc
+++ b/deps/v8/src/mips64/simulator-mips64.cc
@@ -66,7 +66,6 @@ static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
class MipsDebugger {
public:
explicit MipsDebugger(Simulator* sim) : sim_(sim) { }
- ~MipsDebugger();
void Stop(Instruction* instr);
void Debug();
@@ -97,75 +96,16 @@ class MipsDebugger {
void RedoBreakpoints();
};
-
-MipsDebugger::~MipsDebugger() {
-}
-
-
-#ifdef GENERATED_CODE_COVERAGE
-static FILE* coverage_log = NULL;
-
-
-static void InitializeCoverage() {
- char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
- if (file_name != NULL) {
- coverage_log = fopen(file_name, "aw+");
- }
-}
-
-
-void MipsDebugger::Stop(Instruction* instr) {
- // Get the stop code.
- uint32_t code = instr->Bits(25, 6);
- // Retrieve the encoded address, which comes just after this stop.
- char** msg_address =
- reinterpret_cast<char**>(sim_->get_pc() + Instr::kInstrSize);
- char* msg = *msg_address;
- DCHECK(msg != NULL);
-
- // Update this stop description.
- if (!watched_stops_[code].desc) {
- watched_stops_[code].desc = msg;
- }
-
- if (strlen(msg) > 0) {
- if (coverage_log != NULL) {
- fprintf(coverage_log, "%s\n", str);
- fflush(coverage_log);
- }
- // Overwrite the instruction and address with nops.
- instr->SetInstructionBits(kNopInstr);
- reinterpret_cast<Instr*>(msg_address)->SetInstructionBits(kNopInstr);
- }
- // TODO(yuyin): 2 -> 3?
- sim_->set_pc(sim_->get_pc() + 3 * Instruction::kInstructionSize);
-}
-
-
-#else // GENERATED_CODE_COVERAGE
-
#define UNSUPPORTED() printf("Sim: Unsupported instruction.\n");
-static void InitializeCoverage() {}
-
-
void MipsDebugger::Stop(Instruction* instr) {
// Get the stop code.
uint32_t code = instr->Bits(25, 6);
- // Retrieve the encoded address, which comes just after this stop.
- char* msg = *reinterpret_cast<char**>(sim_->get_pc() +
- Instruction::kInstrSize);
- // Update this stop description.
- if (!sim_->watched_stops_[code].desc) {
- sim_->watched_stops_[code].desc = msg;
- }
- PrintF("Simulator hit %s (%u)\n", msg, code);
+ PrintF("Simulator hit (%u)\n", code);
// TODO(yuyin): 2 -> 3?
sim_->set_pc(sim_->get_pc() + 3 * Instruction::kInstrSize);
Debug();
}
-#endif // GENERATED_CODE_COVERAGE
-
int64_t MipsDebugger::GetRegisterValue(int regnum) {
if (regnum == kNumSimuRegisters) {
@@ -268,33 +208,42 @@ void MipsDebugger::PrintAllRegs() {
PrintF("\n");
// at, v0, a0.
- PrintF("%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n",
+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 "\t%3s: 0x%016" PRIx64 " %14" PRId64
+ "\t%3s: 0x%016" PRIx64 " %14" PRId64 "\n",
REG_INFO(1), REG_INFO(2), REG_INFO(4));
// v1, a1.
- PrintF("%34s\t%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n",
+ PrintF("%34s\t%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64
+ " %14" PRId64 " \n",
"", REG_INFO(3), REG_INFO(5));
// a2.
- PrintF("%34s\t%34s\t%3s: 0x%016lx %14ld\n", "", "", REG_INFO(6));
+ PrintF("%34s\t%34s\t%3s: 0x%016" PRIx64 " %14" PRId64 " \n", "", "",
+ REG_INFO(6));
// a3.
- PrintF("%34s\t%34s\t%3s: 0x%016lx %14ld\n", "", "", REG_INFO(7));
+ PrintF("%34s\t%34s\t%3s: 0x%016" PRIx64 " %14" PRId64 " \n", "", "",
+ REG_INFO(7));
PrintF("\n");
// a4-t3, s0-s7
for (int i = 0; i < 8; i++) {
- PrintF("%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n",
- REG_INFO(8+i), REG_INFO(16+i));
+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64
+ " %14" PRId64 " \n",
+ REG_INFO(8 + i), REG_INFO(16 + i));
}
PrintF("\n");
// t8, k0, LO.
- PrintF("%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n",
+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64
+ " %14" PRId64 " \t%3s: 0x%016" PRIx64 " %14" PRId64 " \n",
REG_INFO(24), REG_INFO(26), REG_INFO(32));
// t9, k1, HI.
- PrintF("%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n",
+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64
+ " %14" PRId64 " \t%3s: 0x%016" PRIx64 " %14" PRId64 " \n",
REG_INFO(25), REG_INFO(27), REG_INFO(33));
// sp, fp, gp.
- PrintF("%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n",
+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64
+ " %14" PRId64 " \t%3s: 0x%016" PRIx64 " %14" PRId64 " \n",
REG_INFO(29), REG_INFO(30), REG_INFO(28));
// pc.
- PrintF("%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n",
+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64
+ " %14" PRId64 " \n",
REG_INFO(31), REG_INFO(34));
#undef REG_INFO
@@ -312,38 +261,38 @@ void MipsDebugger::PrintAllRegsIncludingFPU() {
PrintF("\n\n");
// f0, f1, f2, ... f31.
// TODO(plind): consider printing 2 columns for space efficiency.
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(0) );
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(1) );
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(2) );
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(3) );
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(4) );
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(5) );
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(6) );
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(7) );
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(8) );
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(9) );
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(10));
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(11));
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(12));
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(13));
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(14));
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(15));
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(16));
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(17));
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(18));
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(19));
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(20));
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(21));
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(22));
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(23));
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(24));
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(25));
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(26));
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(27));
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(28));
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(29));
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(30));
- PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(31));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(0));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(1));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(2));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(3));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(4));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(5));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(6));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(7));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(8));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(9));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(10));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(11));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(12));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(13));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(14));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(15));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(16));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(17));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(18));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(19));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(20));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(21));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(22));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(23));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(24));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(25));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(26));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(27));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(28));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(29));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(30));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(31));
#undef REG_INFO
#undef FPU_REG_INFO
@@ -382,7 +331,7 @@ void MipsDebugger::Debug() {
v8::internal::EmbeddedVector<char, 256> buffer;
dasm.InstructionDecode(buffer,
reinterpret_cast<byte*>(sim_->get_pc()));
- PrintF(" 0x%016lx %s\n", sim_->get_pc(), buffer.start());
+ PrintF(" 0x%016" PRIx64 " %s\n", sim_->get_pc(), buffer.start());
last_pc = sim_->get_pc();
}
char* line = ReadLine("sim> ");
@@ -433,11 +382,12 @@ void MipsDebugger::Debug() {
if (regnum != kInvalidRegister) {
value = GetRegisterValue(regnum);
- PrintF("%s: 0x%08lx %ld \n", arg1, value, value);
+ PrintF("%s: 0x%08" PRIx64 " %" PRId64 " \n", arg1, value,
+ value);
} else if (fpuregnum != kInvalidFPURegister) {
value = GetFPURegisterValue(fpuregnum);
dvalue = GetFPURegisterValueDouble(fpuregnum);
- PrintF("%3s: 0x%016lx %16.4e\n",
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n",
FPURegisters::Name(fpuregnum), value, dvalue);
} else {
PrintF("%s unrecognized\n", arg1);
@@ -454,7 +404,7 @@ void MipsDebugger::Debug() {
value = GetFPURegisterValue(fpuregnum);
value &= 0xffffffffUL;
fvalue = GetFPURegisterValueFloat(fpuregnum);
- PrintF("%s: 0x%08lx %11.4e\n", arg1, value, fvalue);
+ PrintF("%s: 0x%08" PRIx64 " %11.4e\n", arg1, value, fvalue);
} else {
PrintF("%s unrecognized\n", arg1);
}
@@ -513,7 +463,7 @@ void MipsDebugger::Debug() {
end = cur + words;
while (cur < end) {
- PrintF(" 0x%012lx: 0x%016lx %14ld",
+ PrintF(" 0x%012" PRIxPTR " : 0x%016" PRIx64 " %14" PRId64 " ",
reinterpret_cast<intptr_t>(cur), *cur, *cur);
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
int64_t value = *cur;
@@ -576,8 +526,8 @@ void MipsDebugger::Debug() {
while (cur < end) {
dasm.InstructionDecode(buffer, cur);
- PrintF(" 0x%08lx %s\n",
- reinterpret_cast<intptr_t>(cur), buffer.start());
+ PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast<intptr_t>(cur),
+ buffer.start());
cur += Instruction::kInstrSize;
}
} else if (strcmp(cmd, "gdb") == 0) {
@@ -698,8 +648,8 @@ void MipsDebugger::Debug() {
while (cur < end) {
dasm.InstructionDecode(buffer, cur);
- PrintF(" 0x%08lx %s\n",
- reinterpret_cast<intptr_t>(cur), buffer.start());
+ PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast<intptr_t>(cur),
+ buffer.start());
cur += Instruction::kInstrSize;
}
} else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
@@ -791,9 +741,7 @@ void Simulator::set_last_debugger_input(char* input) {
last_debugger_input_ = input;
}
-
-void Simulator::FlushICache(v8::internal::HashMap* i_cache,
- void* start_addr,
+void Simulator::FlushICache(base::HashMap* i_cache, void* start_addr,
size_t size) {
int64_t start = reinterpret_cast<int64_t>(start_addr);
int64_t intra_line = (start & CachePage::kLineMask);
@@ -814,10 +762,8 @@ void Simulator::FlushICache(v8::internal::HashMap* i_cache,
}
}
-
-CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
- v8::internal::HashMap::Entry* entry =
- i_cache->LookupOrInsert(page, ICacheHash(page));
+CachePage* Simulator::GetCachePage(base::HashMap* i_cache, void* page) {
+ base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
if (entry->value == NULL) {
CachePage* new_page = new CachePage();
entry->value = new_page;
@@ -827,7 +773,7 @@ CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
// Flush from start up to and not including start + size.
-void Simulator::FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
+void Simulator::FlushOnePage(base::HashMap* i_cache, intptr_t start,
size_t size) {
DCHECK(size <= CachePage::kPageSize);
DCHECK(AllOnOnePage(start, size - 1));
@@ -840,9 +786,7 @@ void Simulator::FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
}
-
-void Simulator::CheckICache(v8::internal::HashMap* i_cache,
- Instruction* instr) {
+void Simulator::CheckICache(base::HashMap* i_cache, Instruction* instr) {
int64_t address = reinterpret_cast<int64_t>(instr);
void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
@@ -875,7 +819,7 @@ void Simulator::Initialize(Isolate* isolate) {
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
i_cache_ = isolate_->simulator_i_cache();
if (i_cache_ == NULL) {
- i_cache_ = new v8::internal::HashMap(&ICacheMatch);
+ i_cache_ = new base::HashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
}
Initialize(isolate);
@@ -912,7 +856,6 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
// access violation if the simulator ever tries to execute it.
registers_[pc] = bad_ra;
registers_[ra] = bad_ra;
- InitializeCoverage();
last_debugger_input_ = NULL;
}
@@ -990,10 +933,10 @@ class Redirection {
// static
-void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
+void Simulator::TearDown(base::HashMap* i_cache, Redirection* first) {
Redirection::DeleteChain(first);
if (i_cache != nullptr) {
- for (HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
+ for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
entry = i_cache->Next(entry)) {
delete static_cast<CachePage*>(entry->value);
}
@@ -1683,7 +1626,7 @@ void Simulator::DieOrDebug() {
void Simulator::TraceRegWr(int64_t value) {
if (::v8::internal::FLAG_trace_sim) {
- SNPrintF(trace_buf_, "%016lx", value);
+ SNPrintF(trace_buf_, "%016" PRIx64 " ", value);
}
}
@@ -1691,8 +1634,9 @@ void Simulator::TraceRegWr(int64_t value) {
// TODO(plind): consider making icount_ printing a flag option.
void Simulator::TraceMemRd(int64_t addr, int64_t value) {
if (::v8::internal::FLAG_trace_sim) {
- SNPrintF(trace_buf_, "%016lx <-- [%016lx] (%ld)",
- value, addr, icount_);
+ SNPrintF(trace_buf_,
+ "%016" PRIx64 " <-- [%016" PRIx64 " ] (%" PRId64 " )", value,
+ addr, icount_);
}
}
@@ -1701,19 +1645,20 @@ void Simulator::TraceMemWr(int64_t addr, int64_t value, TraceType t) {
if (::v8::internal::FLAG_trace_sim) {
switch (t) {
case BYTE:
- SNPrintF(trace_buf_, " %02x --> [%016lx]",
+ SNPrintF(trace_buf_, " %02x --> [%016" PRIx64 " ]",
static_cast<int8_t>(value), addr);
break;
case HALF:
- SNPrintF(trace_buf_, " %04x --> [%016lx]",
+ SNPrintF(trace_buf_, " %04x --> [%016" PRIx64 " ]",
static_cast<int16_t>(value), addr);
break;
case WORD:
- SNPrintF(trace_buf_, " %08x --> [%016lx]",
+ SNPrintF(trace_buf_, " %08x --> [%016" PRIx64 " ]",
static_cast<int32_t>(value), addr);
break;
case DWORD:
- SNPrintF(trace_buf_, "%016lx --> [%016lx] (%ld)",
+ SNPrintF(trace_buf_,
+ "%016" PRIx64 " --> [%016" PRIx64 " ] (%" PRId64 " )",
value, addr, icount_);
break;
}
@@ -1726,17 +1671,17 @@ void Simulator::TraceMemWr(int64_t addr, int64_t value, TraceType t) {
int32_t Simulator::ReadW(int64_t addr, Instruction* instr) {
if (addr >=0 && addr < 0x400) {
// This has to be a NULL-dereference, drop into debugger.
- PrintF("Memory read from bad address: 0x%08lx, pc=0x%08lx\n",
+ PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+ " \n",
addr, reinterpret_cast<intptr_t>(instr));
DieOrDebug();
}
- if ((addr & 0x3) == 0) {
+ if ((addr & 0x3) == 0 || kArchVariant == kMips64r6) {
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
TraceMemRd(addr, static_cast<int64_t>(*ptr));
return *ptr;
}
- PrintF("Unaligned read at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
- addr,
+ PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
reinterpret_cast<intptr_t>(instr));
DieOrDebug();
return 0;
@@ -1746,17 +1691,17 @@ int32_t Simulator::ReadW(int64_t addr, Instruction* instr) {
uint32_t Simulator::ReadWU(int64_t addr, Instruction* instr) {
if (addr >=0 && addr < 0x400) {
// This has to be a NULL-dereference, drop into debugger.
- PrintF("Memory read from bad address: 0x%08lx, pc=0x%08lx\n",
+ PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+ " \n",
addr, reinterpret_cast<intptr_t>(instr));
DieOrDebug();
}
- if ((addr & 0x3) == 0) {
+ if ((addr & 0x3) == 0 || kArchVariant == kMips64r6) {
uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
TraceMemRd(addr, static_cast<int64_t>(*ptr));
return *ptr;
}
- PrintF("Unaligned read at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
- addr,
+ PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
reinterpret_cast<intptr_t>(instr));
DieOrDebug();
return 0;
@@ -1766,18 +1711,18 @@ uint32_t Simulator::ReadWU(int64_t addr, Instruction* instr) {
void Simulator::WriteW(int64_t addr, int32_t value, Instruction* instr) {
if (addr >= 0 && addr < 0x400) {
// This has to be a NULL-dereference, drop into debugger.
- PrintF("Memory write to bad address: 0x%08lx, pc=0x%08lx\n",
+ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+ " \n",
addr, reinterpret_cast<intptr_t>(instr));
DieOrDebug();
}
- if ((addr & 0x3) == 0) {
+ if ((addr & 0x3) == 0 || kArchVariant == kMips64r6) {
TraceMemWr(addr, value, WORD);
int* ptr = reinterpret_cast<int*>(addr);
*ptr = value;
return;
}
- PrintF("Unaligned write at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
- addr,
+ PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
reinterpret_cast<intptr_t>(instr));
DieOrDebug();
}
@@ -1786,17 +1731,17 @@ void Simulator::WriteW(int64_t addr, int32_t value, Instruction* instr) {
int64_t Simulator::Read2W(int64_t addr, Instruction* instr) {
if (addr >=0 && addr < 0x400) {
// This has to be a NULL-dereference, drop into debugger.
- PrintF("Memory read from bad address: 0x%08lx, pc=0x%08lx\n",
+ PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+ " \n",
addr, reinterpret_cast<intptr_t>(instr));
DieOrDebug();
}
- if ((addr & kPointerAlignmentMask) == 0) {
+ if ((addr & kPointerAlignmentMask) == 0 || kArchVariant == kMips64r6) {
int64_t* ptr = reinterpret_cast<int64_t*>(addr);
TraceMemRd(addr, *ptr);
return *ptr;
}
- PrintF("Unaligned read at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
- addr,
+ PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
reinterpret_cast<intptr_t>(instr));
DieOrDebug();
return 0;
@@ -1806,102 +1751,100 @@ int64_t Simulator::Read2W(int64_t addr, Instruction* instr) {
void Simulator::Write2W(int64_t addr, int64_t value, Instruction* instr) {
if (addr >= 0 && addr < 0x400) {
// This has to be a NULL-dereference, drop into debugger.
- PrintF("Memory write to bad address: 0x%08lx, pc=0x%08lx\n",
+ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+ "\n",
addr, reinterpret_cast<intptr_t>(instr));
DieOrDebug();
}
- if ((addr & kPointerAlignmentMask) == 0) {
+ if ((addr & kPointerAlignmentMask) == 0 || kArchVariant == kMips64r6) {
TraceMemWr(addr, value, DWORD);
int64_t* ptr = reinterpret_cast<int64_t*>(addr);
*ptr = value;
return;
}
- PrintF("Unaligned write at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
- addr,
+ PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
reinterpret_cast<intptr_t>(instr));
DieOrDebug();
}
double Simulator::ReadD(int64_t addr, Instruction* instr) {
- if ((addr & kDoubleAlignmentMask) == 0) {
+ if ((addr & kDoubleAlignmentMask) == 0 || kArchVariant == kMips64r6) {
double* ptr = reinterpret_cast<double*>(addr);
return *ptr;
}
- PrintF("Unaligned (double) read at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
+ PrintF("Unaligned (double) read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
base::OS::Abort();
return 0;
}
void Simulator::WriteD(int64_t addr, double value, Instruction* instr) {
- if ((addr & kDoubleAlignmentMask) == 0) {
+ if ((addr & kDoubleAlignmentMask) == 0 || kArchVariant == kMips64r6) {
double* ptr = reinterpret_cast<double*>(addr);
*ptr = value;
return;
}
- PrintF("Unaligned (double) write at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
+ PrintF("Unaligned (double) write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR
+ "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
DieOrDebug();
}
uint16_t Simulator::ReadHU(int64_t addr, Instruction* instr) {
- if ((addr & 1) == 0) {
+ if ((addr & 1) == 0 || kArchVariant == kMips64r6) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
TraceMemRd(addr, static_cast<int64_t>(*ptr));
return *ptr;
}
- PrintF("Unaligned unsigned halfword read at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
+ PrintF("Unaligned unsigned halfword read at 0x%08" PRIx64
+ " , pc=0x%08" V8PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
DieOrDebug();
return 0;
}
int16_t Simulator::ReadH(int64_t addr, Instruction* instr) {
- if ((addr & 1) == 0) {
+ if ((addr & 1) == 0 || kArchVariant == kMips64r6) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
TraceMemRd(addr, static_cast<int64_t>(*ptr));
return *ptr;
}
- PrintF("Unaligned signed halfword read at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
+ PrintF("Unaligned signed halfword read at 0x%08" PRIx64
+ " , pc=0x%08" V8PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
DieOrDebug();
return 0;
}
void Simulator::WriteH(int64_t addr, uint16_t value, Instruction* instr) {
- if ((addr & 1) == 0) {
+ if ((addr & 1) == 0 || kArchVariant == kMips64r6) {
TraceMemWr(addr, value, HALF);
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
*ptr = value;
return;
}
- PrintF(
- "Unaligned unsigned halfword write at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
+ PrintF("Unaligned unsigned halfword write at 0x%08" PRIx64
+ " , pc=0x%08" V8PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
DieOrDebug();
}
void Simulator::WriteH(int64_t addr, int16_t value, Instruction* instr) {
- if ((addr & 1) == 0) {
+ if ((addr & 1) == 0 || kArchVariant == kMips64r6) {
TraceMemWr(addr, value, HALF);
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
*ptr = value;
return;
}
- PrintF("Unaligned halfword write at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
+ PrintF("Unaligned halfword write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR
+ "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
DieOrDebug();
}
@@ -1950,7 +1893,7 @@ uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
// Unsupported instructions use Format to print an error and stop execution.
void Simulator::Format(Instruction* instr, const char* format) {
- PrintF("Simulator found unsupported instruction:\n 0x%08lx: %s\n",
+ PrintF("Simulator found unsupported instruction:\n 0x%08" PRIxPTR " : %s\n",
reinterpret_cast<intptr_t>(instr), format);
UNIMPLEMENTED_MIPS();
}
@@ -2067,15 +2010,17 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
case ExternalReference::BUILTIN_FP_FP_CALL:
case ExternalReference::BUILTIN_COMPARE_CALL:
PrintF("Call to host function at %p with args %f, %f",
- FUNCTION_ADDR(generic_target), dval0, dval1);
+ static_cast<void*>(FUNCTION_ADDR(generic_target)), dval0,
+ dval1);
break;
case ExternalReference::BUILTIN_FP_CALL:
PrintF("Call to host function at %p with arg %f",
- FUNCTION_ADDR(generic_target), dval0);
+ static_cast<void*>(FUNCTION_ADDR(generic_target)), dval0);
break;
case ExternalReference::BUILTIN_FP_INT_CALL:
PrintF("Call to host function at %p with args %f, %d",
- FUNCTION_ADDR(generic_target), dval0, ival);
+ static_cast<void*>(FUNCTION_ADDR(generic_target)), dval0,
+ ival);
break;
default:
UNREACHABLE();
@@ -2133,8 +2078,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
if (::v8::internal::FLAG_trace_sim) {
- PrintF("Call to host function at %p args %08lx\n",
- reinterpret_cast<void*>(external), arg0);
+ PrintF("Call to host function at %p args %08" PRIx64 " \n",
+ reinterpret_cast<void*>(external), arg0);
}
SimulatorRuntimeDirectApiCall target =
reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
@@ -2142,8 +2087,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
} else if (
redirection->type() == ExternalReference::PROFILING_API_CALL) {
if (::v8::internal::FLAG_trace_sim) {
- PrintF("Call to host function at %p args %08lx %08lx\n",
- reinterpret_cast<void*>(external), arg0, arg1);
+ PrintF("Call to host function at %p args %08" PRIx64 " %08" PRIx64
+ " \n",
+ reinterpret_cast<void*>(external), arg0, arg1);
}
SimulatorRuntimeProfilingApiCall target =
reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
@@ -2151,8 +2097,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
} else if (
redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
if (::v8::internal::FLAG_trace_sim) {
- PrintF("Call to host function at %p args %08lx %08lx\n",
- reinterpret_cast<void*>(external), arg0, arg1);
+ PrintF("Call to host function at %p args %08" PRIx64 " %08" PRIx64
+ " \n",
+ reinterpret_cast<void*>(external), arg0, arg1);
}
SimulatorRuntimeDirectGetterCall target =
reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
@@ -2160,8 +2107,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
} else if (
redirection->type() == ExternalReference::PROFILING_GETTER_CALL) {
if (::v8::internal::FLAG_trace_sim) {
- PrintF("Call to host function at %p args %08lx %08lx %08lx\n",
- reinterpret_cast<void*>(external), arg0, arg1, arg2);
+ PrintF("Call to host function at %p args %08" PRIx64 " %08" PRIx64
+ " %08" PRIx64 " \n",
+ reinterpret_cast<void*>(external), arg0, arg1, arg2);
}
SimulatorRuntimeProfilingGetterCall target =
reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
@@ -2175,13 +2123,15 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
"Call to host triple returning runtime function %p "
"args %016" PRIx64 ", %016" PRIx64 ", %016" PRIx64 ", %016" PRIx64
", %016" PRIx64 "\n",
- FUNCTION_ADDR(target), arg1, arg2, arg3, arg4, arg5);
+ static_cast<void*>(FUNCTION_ADDR(target)), arg1, arg2, arg3, arg4,
+ arg5);
}
// arg0 is a hidden argument pointing to the return location, so don't
// pass it to the target function.
ObjectTriple result = target(arg1, arg2, arg3, arg4, arg5);
if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned { %p, %p, %p }\n", result.x, result.y, result.z);
+ PrintF("Returned { %p, %p, %p }\n", static_cast<void*>(result.x),
+ static_cast<void*>(result.y), static_cast<void*>(result.z));
}
// Return is passed back in address pointed to by hidden first argument.
ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(arg0);
@@ -2195,14 +2145,10 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
if (::v8::internal::FLAG_trace_sim) {
PrintF(
"Call to host function at %p "
- "args %08lx, %08lx, %08lx, %08lx, %08lx, %08lx\n",
- FUNCTION_ADDR(target),
- arg0,
- arg1,
- arg2,
- arg3,
- arg4,
- arg5);
+ "args %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64
+ " , %08" PRIx64 " , %08" PRIx64 " \n",
+ static_cast<void*>(FUNCTION_ADDR(target)), arg0, arg1, arg2, arg3,
+ arg4, arg5);
}
// int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
// set_register(v0, static_cast<int32_t>(result));
@@ -2212,7 +2158,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
set_register(v1, (int64_t)(result.y));
}
if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %08lx : %08lx\n", get_register(v1), get_register(v0));
+ PrintF("Returned %08" PRIx64 " : %08" PRIx64 " \n", get_register(v1),
+ get_register(v0));
}
set_register(ra, saved_ra);
set_pc(get_register(ra));
@@ -2241,7 +2188,8 @@ bool Simulator::IsWatchpoint(uint64_t code) {
void Simulator::PrintWatchpoint(uint64_t code) {
MipsDebugger dbg(this);
++break_count_;
- PrintF("\n---- break %ld marker: %3d (instr count: %8ld) ----------"
+ PrintF("\n---- break %" PRId64 " marker: %3d (instr count: %8" PRId64
+ " ) ----------"
"----------------------------------",
code, break_count_, icount_);
dbg.PrintAllRegs(); // Print registers and continue running.
@@ -2291,8 +2239,10 @@ void Simulator::DisableStop(uint64_t code) {
void Simulator::IncreaseStopCounter(uint64_t code) {
DCHECK(code <= kMaxStopCode);
if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
- PrintF("Stop counter for code %ld has overflowed.\n"
- "Enabling this code and reseting the counter to 0.\n", code);
+ PrintF("Stop counter for code %" PRId64
+ " has overflowed.\n"
+ "Enabling this code and reseting the counter to 0.\n",
+ code);
watched_stops_[code].count = 0;
EnableStop(code);
} else {
@@ -2315,11 +2265,11 @@ void Simulator::PrintStopInfo(uint64_t code) {
// Don't print the state of unused breakpoints.
if (count != 0) {
if (watched_stops_[code].desc) {
- PrintF("stop %ld - 0x%lx: \t%s, \tcounter = %i, \t%s\n",
+ PrintF("stop %" PRId64 " - 0x%" PRIx64 " : \t%s, \tcounter = %i, \t%s\n",
code, code, state, count, watched_stops_[code].desc);
} else {
- PrintF("stop %ld - 0x%lx: \t%s, \tcounter = %i\n",
- code, code, state, count);
+ PrintF("stop %" PRId64 " - 0x%" PRIx64 " : \t%s, \tcounter = %i\n", code,
+ code, state, count);
}
}
}
@@ -3420,21 +3370,50 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
// bits instruction. RS field is always equal to 0.
// Sign-extend the 32-bit result.
alu_out = static_cast<int32_t>(static_cast<uint32_t>(rt_u()) >> sa());
- } else {
+ } else if (rs_reg() == 1) {
// Logical right-rotate of a word by a fixed number of bits. This
// is special case of SRL instruction, added in MIPS32 Release 2.
// RS field is equal to 00001.
alu_out = static_cast<int32_t>(
base::bits::RotateRight32(static_cast<const uint32_t>(rt_u()),
static_cast<const uint32_t>(sa())));
+ } else {
+ UNREACHABLE();
}
SetResult(rd_reg(), alu_out);
break;
case DSRL:
- SetResult(rd_reg(), rt_u() >> sa());
+ if (rs_reg() == 0) {
+ // Regular logical right shift of a word by a fixed number of
+ // bits instruction. RS field is always equal to 0.
+ // Sign-extend the 64-bit result.
+ alu_out = static_cast<int64_t>(rt_u() >> sa());
+ } else if (rs_reg() == 1) {
+ // Logical right-rotate of a word by a fixed number of bits. This
+ // is special case of SRL instruction, added in MIPS32 Release 2.
+ // RS field is equal to 00001.
+ alu_out = static_cast<int64_t>(base::bits::RotateRight64(rt_u(), sa()));
+ } else {
+ UNREACHABLE();
+ }
+ SetResult(rd_reg(), alu_out);
break;
case DSRL32:
- SetResult(rd_reg(), rt_u() >> sa() >> 32);
+ if (rs_reg() == 0) {
+ // Regular logical right shift of a word by a fixed number of
+ // bits instruction. RS field is always equal to 0.
+ // Sign-extend the 64-bit result.
+ alu_out = static_cast<int64_t>(rt_u() >> sa() >> 32);
+ } else if (rs_reg() == 1) {
+ // Logical right-rotate of a word by a fixed number of bits. This
+ // is special case of SRL instruction, added in MIPS32 Release 2.
+ // RS field is equal to 00001.
+ alu_out =
+ static_cast<int64_t>(base::bits::RotateRight64(rt_u(), sa() + 32));
+ } else {
+ UNREACHABLE();
+ }
+ SetResult(rd_reg(), alu_out);
break;
case SRA:
SetResult(rd_reg(), (int32_t)rt() >> sa());
@@ -3470,12 +3449,13 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
if (sa() == 0) {
// Regular logical right-shift of a word by a variable number of
// bits instruction. SA field is always equal to 0.
- alu_out = rt_u() >> rs();
+ alu_out = static_cast<int64_t>(rt_u() >> rs());
} else {
// Logical right-rotate of a word by a variable number of bits.
// This is special case od SRLV instruction, added in MIPS32
// Release 2. SA field is equal to 00001.
- alu_out = base::bits::RotateRight64(rt_u(), rs_u());
+ alu_out =
+ static_cast<int64_t>(base::bits::RotateRight64(rt_u(), rs_u()));
}
SetResult(rd_reg(), alu_out);
break;
@@ -3777,6 +3757,9 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
case TNE:
do_interrupt = rs() != rt();
break;
+ case SYNC:
+ // TODO(palfia): Ignore sync instruction for now.
+ break;
// Conditional moves.
case MOVN:
if (rt()) {
@@ -3933,12 +3916,57 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
alu_out = static_cast<int64_t>(static_cast<int32_t>(output));
break;
}
- case SEB:
- case SEH:
- case WSBH:
- alu_out = 0x12345678;
- UNREACHABLE();
+ case SEB: {
+ uint8_t input = static_cast<uint8_t>(rt());
+ uint32_t output = input;
+ uint32_t mask = 0x00000080;
+
+ // Extending sign
+ if (mask & input) {
+ output |= 0xFFFFFF00;
+ }
+
+ alu_out = static_cast<int32_t>(output);
break;
+ }
+ case SEH: {
+ uint16_t input = static_cast<uint16_t>(rt());
+ uint32_t output = input;
+ uint32_t mask = 0x00008000;
+
+ // Extending sign
+ if (mask & input) {
+ output |= 0xFFFF0000;
+ }
+
+ alu_out = static_cast<int32_t>(output);
+ break;
+ }
+ case WSBH: {
+ uint32_t input = static_cast<uint32_t>(rt());
+ uint64_t output = 0;
+
+ uint32_t mask = 0xFF000000;
+ for (int i = 0; i < 4; i++) {
+ uint32_t tmp = mask & input;
+ if (i % 2 == 0) {
+ tmp = tmp >> 8;
+ } else {
+ tmp = tmp << 8;
+ }
+ output = output | tmp;
+ mask = mask >> 8;
+ }
+ mask = 0x80000000;
+
+ // Extending sign
+ if (mask & output) {
+ output |= 0xFFFFFFFF00000000;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ break;
+ }
default: {
const uint8_t bp2 = get_instr()->Bp2Value();
sa >>= kBp2Bits;
@@ -3997,11 +4025,47 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
}
break;
}
- case DSBH:
- case DSHD:
- alu_out = 0x12345678;
- UNREACHABLE();
+ case DSBH: {
+ uint64_t input = static_cast<uint64_t>(rt());
+ uint64_t output = 0;
+
+ uint64_t mask = 0xFF00000000000000;
+ for (int i = 0; i < 8; i++) {
+ uint64_t tmp = mask & input;
+ if (i % 2 == 0)
+ tmp = tmp >> 8;
+ else
+ tmp = tmp << 8;
+
+ output = output | tmp;
+ mask = mask >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ break;
+ }
+ case DSHD: {
+ uint64_t input = static_cast<uint64_t>(rt());
+ uint64_t output = 0;
+
+ uint64_t mask = 0xFFFF000000000000;
+ for (int i = 0; i < 4; i++) {
+ uint64_t tmp = mask & input;
+ if (i == 0)
+ tmp = tmp >> 48;
+ else if (i == 1)
+ tmp = tmp >> 16;
+ else if (i == 2)
+ tmp = tmp << 16;
+ else
+ tmp = tmp << 48;
+ output = output | tmp;
+ mask = mask >> 16;
+ }
+
+ alu_out = static_cast<int64_t>(output);
break;
+ }
default: {
const uint8_t bp3 = get_instr()->Bp3Value();
sa >>= kBp3Bits;
@@ -4051,31 +4115,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
DecodeTypeRegisterSPECIAL2();
break;
case SPECIAL3:
- switch (instr->FunctionFieldRaw()) {
- case BSHFL: {
- int32_t saVal = sa();
- saVal >>= kBp2Bits;
- switch (saVal) {
- case ALIGN: {
- DecodeTypeRegisterSPECIAL3();
- break;
- }
- }
- }
- case DBSHFL: {
- int32_t saVal = sa();
- saVal >>= kBp2Bits;
- switch (saVal) {
- case DALIGN: {
- DecodeTypeRegisterSPECIAL3();
- break;
- }
- }
- }
- default:
- DecodeTypeRegisterSPECIAL3();
- break;
- }
+ DecodeTypeRegisterSPECIAL3();
break;
// Unimplemented opcodes raised an error in the configuration step before,
// so we can use the default here to set the destination register in common
@@ -4119,6 +4159,8 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
int64_t addr = 0x0;
// Alignment for 32-bit integers used in LWL, LWR, etc.
const int kInt32AlignmentMask = sizeof(uint32_t) - 1;
+ // Alignment for 64-bit integers used in LDL, LDR, etc.
+ const int kInt64AlignmentMask = sizeof(uint64_t) - 1;
// Branch instructions common part.
auto BranchAndLinkHelper = [this, instr, &next_pc,
@@ -4366,6 +4408,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
break;
// ------------- Arithmetic instructions.
case ADDIU: {
+ DCHECK(is_int32(rs));
int32_t alu32_out = static_cast<int32_t>(rs + se_imm16);
// Sign-extend result of 32bit operation into 64bit register.
SetResult(rt_reg, static_cast<int64_t>(alu32_out));
@@ -4420,10 +4463,10 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
uint8_t byte_shift = kInt32AlignmentMask - al_offset;
uint32_t mask = (1 << byte_shift * 8) - 1;
addr = rs + se_imm16 - al_offset;
- alu_out = ReadW(addr, instr);
- alu_out <<= byte_shift * 8;
- alu_out |= rt & mask;
- set_register(rt_reg, alu_out);
+ int32_t val = ReadW(addr, instr);
+ val <<= byte_shift * 8;
+ val |= rt & mask;
+ set_register(rt_reg, static_cast<int64_t>(val));
break;
}
case LW:
@@ -4453,6 +4496,30 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
set_register(rt_reg, alu_out);
break;
}
+ case LDL: {
+ // al_offset is offset of the effective address within an aligned word.
+ uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask;
+ uint8_t byte_shift = kInt64AlignmentMask - al_offset;
+ uint64_t mask = (1UL << byte_shift * 8) - 1;
+ addr = rs + se_imm16 - al_offset;
+ alu_out = Read2W(addr, instr);
+ alu_out <<= byte_shift * 8;
+ alu_out |= rt & mask;
+ set_register(rt_reg, alu_out);
+ break;
+ }
+ case LDR: {
+ // al_offset is offset of the effective address within an aligned word.
+ uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask;
+ uint8_t byte_shift = kInt64AlignmentMask - al_offset;
+ uint64_t mask = al_offset ? (~0UL << (byte_shift + 1) * 8) : 0UL;
+ addr = rs + se_imm16 - al_offset;
+ alu_out = Read2W(addr, instr);
+ alu_out = alu_out >> al_offset * 8;
+ alu_out |= rt & mask;
+ set_register(rt_reg, alu_out);
+ break;
+ }
case SB:
WriteB(rs + se_imm16, static_cast<int8_t>(rt));
break;
@@ -4484,6 +4551,25 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
WriteW(addr, static_cast<int32_t>(mem_value), instr);
break;
}
+ case SDL: {
+ uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask;
+ uint8_t byte_shift = kInt64AlignmentMask - al_offset;
+ uint64_t mask = byte_shift ? (~0UL << (al_offset + 1) * 8) : 0;
+ addr = rs + se_imm16 - al_offset;
+ uint64_t mem_value = Read2W(addr, instr) & mask;
+ mem_value |= rt >> byte_shift * 8;
+ Write2W(addr, mem_value, instr);
+ break;
+ }
+ case SDR: {
+ uint8_t al_offset = (rs + se_imm16) & kInt64AlignmentMask;
+ uint64_t mask = (1UL << al_offset * 8) - 1;
+ addr = rs + se_imm16 - al_offset;
+ uint64_t mem_value = Read2W(addr, instr);
+ mem_value = (rt << al_offset * 8) | (mem_value & mask);
+ Write2W(addr, mem_value, instr);
+ break;
+ }
case LWC1:
set_fpu_register(ft_reg, kFPUInvalidResult); // Trash upper 32 bits.
set_fpu_register_word(ft_reg, ReadW(rs + se_imm16, instr));
@@ -4642,8 +4728,9 @@ void Simulator::InstructionDecode(Instruction* instr) {
}
if (::v8::internal::FLAG_trace_sim) {
- PrintF(" 0x%08lx %-44s %s\n", reinterpret_cast<intptr_t>(instr),
- buffer.start(), trace_buf_.start());
+ PrintF(" 0x%08" PRIxPTR " %-44s %s\n",
+ reinterpret_cast<intptr_t>(instr), buffer.start(),
+ trace_buf_.start());
}
if (!pc_modified_) {
diff --git a/deps/v8/src/mips64/simulator-mips64.h b/deps/v8/src/mips64/simulator-mips64.h
index 7f60a74639..cd606e2402 100644
--- a/deps/v8/src/mips64/simulator-mips64.h
+++ b/deps/v8/src/mips64/simulator-mips64.h
@@ -84,7 +84,7 @@ class SimulatorStack : public v8::internal::AllStatic {
// Running with a simulator.
#include "src/assembler.h"
-#include "src/hashmap.h"
+#include "src/base/hashmap.h"
namespace v8 {
namespace internal {
@@ -226,7 +226,7 @@ class Simulator {
// Call on program start.
static void Initialize(Isolate* isolate);
- static void TearDown(HashMap* i_cache, Redirection* first);
+ static void TearDown(base::HashMap* i_cache, Redirection* first);
// V8 generally calls into generated JS code with 5 parameters and into
// generated RegExp code with 7 parameters. This is a convenience function,
@@ -246,8 +246,7 @@ class Simulator {
char* last_debugger_input() { return last_debugger_input_; }
// ICache checking.
- static void FlushICache(v8::internal::HashMap* i_cache, void* start,
- size_t size);
+ static void FlushICache(base::HashMap* i_cache, void* start, size_t size);
// Returns true if pc register contains one of the 'special_values' defined
// below (bad_ra, end_sim_pc).
@@ -415,10 +414,9 @@ class Simulator {
}
// ICache.
- static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
- static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
- size_t size);
- static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
+ static void CheckICache(base::HashMap* i_cache, Instruction* instr);
+ static void FlushOnePage(base::HashMap* i_cache, intptr_t start, size_t size);
+ static CachePage* GetCachePage(base::HashMap* i_cache, void* page);
enum Exception {
none,
@@ -463,7 +461,7 @@ class Simulator {
char* last_debugger_input_;
// Icache simulation.
- v8::internal::HashMap* i_cache_;
+ base::HashMap* i_cache_;
v8::internal::Isolate* isolate_;
diff --git a/deps/v8/src/objects-body-descriptors-inl.h b/deps/v8/src/objects-body-descriptors-inl.h
index bfc1895aef..ccee37b962 100644
--- a/deps/v8/src/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects-body-descriptors-inl.h
@@ -76,6 +76,7 @@ void BodyDescriptorBase::IterateBodyImpl(Heap* heap, HeapObject* obj,
template <typename ObjectVisitor>
+DISABLE_CFI_PERF
void BodyDescriptorBase::IteratePointers(HeapObject* obj, int start_offset,
int end_offset, ObjectVisitor* v) {
v->VisitPointers(HeapObject::RawField(obj, start_offset),
@@ -84,6 +85,7 @@ void BodyDescriptorBase::IteratePointers(HeapObject* obj, int start_offset,
template <typename StaticVisitor>
+DISABLE_CFI_PERF
void BodyDescriptorBase::IteratePointers(Heap* heap, HeapObject* obj,
int start_offset, int end_offset) {
StaticVisitor::VisitPointers(heap, obj,
@@ -355,6 +357,8 @@ class Code::BodyDescriptor final : public BodyDescriptorBase {
STATIC_ASSERT(kHandlerTableOffset + kPointerSize ==
kDeoptimizationDataOffset);
STATIC_ASSERT(kDeoptimizationDataOffset + kPointerSize ==
+ kSourcePositionTableOffset);
+ STATIC_ASSERT(kSourcePositionTableOffset + kPointerSize ==
kTypeFeedbackInfoOffset);
STATIC_ASSERT(kTypeFeedbackInfoOffset + kPointerSize == kNextCodeLinkOffset);
@@ -456,6 +460,8 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
case TRANSITION_ARRAY_TYPE:
return Op::template apply<TransitionArray::BodyDescriptor>(p1, p2, p3);
case JS_OBJECT_TYPE:
+ case JS_ERROR_TYPE:
+ case JS_ARGUMENTS_TYPE:
case JS_PROMISE_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
@@ -472,6 +478,7 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
case JS_REGEXP_TYPE:
case JS_GLOBAL_PROXY_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
+ case JS_API_OBJECT_TYPE:
case JS_SPECIAL_API_OBJECT_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
case JS_BOUND_FUNCTION_TYPE:
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index b023b03aea..7d426a045e 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -99,6 +99,9 @@ void HeapObject::HeapObjectVerify() {
Oddball::cast(this)->OddballVerify();
break;
case JS_OBJECT_TYPE:
+ case JS_ERROR_TYPE:
+ case JS_ARGUMENTS_TYPE:
+ case JS_API_OBJECT_TYPE:
case JS_SPECIAL_API_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_PROMISE_TYPE:
@@ -107,9 +110,6 @@ void HeapObject::HeapObjectVerify() {
case JS_GENERATOR_OBJECT_TYPE:
JSGeneratorObject::cast(this)->JSGeneratorObjectVerify();
break;
- case JS_MODULE_TYPE:
- JSModule::cast(this)->JSModuleVerify();
- break;
case JS_VALUE_TYPE:
JSValue::cast(this)->JSValueVerify();
break;
@@ -209,8 +209,8 @@ void HeapObject::VerifyHeapPointer(Object* p) {
void Symbol::SymbolVerify() {
CHECK(IsSymbol());
CHECK(HasHashCode());
- CHECK(GetHeap()->hidden_properties_symbol() == this || Hash() > 0u);
- CHECK(name()->IsUndefined() || name()->IsString());
+ CHECK(Hash() > 0u);
+ CHECK(name()->IsUndefined(GetIsolate()) || name()->IsString());
}
@@ -288,6 +288,7 @@ void JSObject::JSObjectVerify() {
actual_unused_property_fields - JSObject::kFieldsAdded);
}
DescriptorArray* descriptors = map()->instance_descriptors();
+ Isolate* isolate = GetIsolate();
for (int i = 0; i < map()->NumberOfOwnDescriptors(); i++) {
if (descriptors->GetDetails(i).type() == DATA) {
Representation r = descriptors->GetDetails(i).representation();
@@ -298,7 +299,7 @@ void JSObject::JSObjectVerify() {
}
Object* value = RawFastPropertyAt(index);
if (r.IsDouble()) DCHECK(value->IsMutableHeapNumber());
- if (value->IsUninitialized()) continue;
+ if (value->IsUninitialized(isolate)) continue;
if (r.IsSmi()) DCHECK(value->IsSmi());
if (r.IsHeapObject()) DCHECK(value->IsHeapObject());
FieldType* field_type = descriptors->GetFieldType(i);
@@ -311,7 +312,7 @@ void JSObject::JSObjectVerify() {
// object literal creation and we will end up having and undefined
// value that does not match the field type.
CHECK(!field_type->NowStable() || field_type->NowContains(value) ||
- (!FLAG_use_allocation_folding && value->IsUndefined()));
+ (!FLAG_use_allocation_folding && value->IsUndefined(isolate)));
}
}
}
@@ -337,7 +338,7 @@ void Map::MapVerify() {
CHECK(instance_size() == kVariableSizeSentinel ||
(kPointerSize <= instance_size() &&
instance_size() < heap->Capacity()));
- CHECK(GetBackPointer()->IsUndefined() ||
+ CHECK(GetBackPointer()->IsUndefined(heap->isolate()) ||
!Map::cast(GetBackPointer())->is_stable());
VerifyHeapPointer(prototype());
VerifyHeapPointer(instance_descriptors());
@@ -369,21 +370,6 @@ void Map::VerifyOmittedMapChecks() {
}
-void CodeCache::CodeCacheVerify() {
- VerifyHeapPointer(default_cache());
- VerifyHeapPointer(normal_type_cache());
- CHECK(default_cache()->IsFixedArray());
- CHECK(normal_type_cache()->IsUndefined()
- || normal_type_cache()->IsCodeCacheHashTable());
-}
-
-
-void PolymorphicCodeCache::PolymorphicCodeCacheVerify() {
- VerifyHeapPointer(cache());
- CHECK(cache()->IsUndefined() || cache()->IsPolymorphicCodeCacheHashTable());
-}
-
-
void TypeFeedbackInfo::TypeFeedbackInfoVerify() {
VerifyObjectField(kStorage1Offset);
VerifyObjectField(kStorage2Offset);
@@ -426,7 +412,7 @@ void TransitionArray::TransitionArrayVerify() {
VerifyPointer(e);
}
CHECK_LE(LengthFor(number_of_transitions()), length());
- CHECK(next_link()->IsUndefined() || next_link()->IsSmi() ||
+ CHECK(next_link()->IsUndefined(GetIsolate()) || next_link()->IsSmi() ||
next_link()->IsTransitionArray());
}
@@ -443,14 +429,6 @@ void JSGeneratorObject::JSGeneratorObjectVerify() {
}
-void JSModule::JSModuleVerify() {
- VerifyObjectField(kContextOffset);
- VerifyObjectField(kScopeInfoOffset);
- CHECK(context()->IsUndefined() ||
- Context::cast(context())->IsModuleContext());
-}
-
-
void JSValue::JSValueVerify() {
Object* v = value();
if (v->IsHeapObject()) {
@@ -463,16 +441,18 @@ void JSDate::JSDateVerify() {
if (value()->IsHeapObject()) {
VerifyHeapPointer(value());
}
- CHECK(value()->IsUndefined() || value()->IsSmi() || value()->IsHeapNumber());
- CHECK(year()->IsUndefined() || year()->IsSmi() || year()->IsNaN());
- CHECK(month()->IsUndefined() || month()->IsSmi() || month()->IsNaN());
- CHECK(day()->IsUndefined() || day()->IsSmi() || day()->IsNaN());
- CHECK(weekday()->IsUndefined() || weekday()->IsSmi() || weekday()->IsNaN());
- CHECK(hour()->IsUndefined() || hour()->IsSmi() || hour()->IsNaN());
- CHECK(min()->IsUndefined() || min()->IsSmi() || min()->IsNaN());
- CHECK(sec()->IsUndefined() || sec()->IsSmi() || sec()->IsNaN());
- CHECK(cache_stamp()->IsUndefined() ||
- cache_stamp()->IsSmi() ||
+ Isolate* isolate = GetIsolate();
+ CHECK(value()->IsUndefined(isolate) || value()->IsSmi() ||
+ value()->IsHeapNumber());
+ CHECK(year()->IsUndefined(isolate) || year()->IsSmi() || year()->IsNaN());
+ CHECK(month()->IsUndefined(isolate) || month()->IsSmi() || month()->IsNaN());
+ CHECK(day()->IsUndefined(isolate) || day()->IsSmi() || day()->IsNaN());
+ CHECK(weekday()->IsUndefined(isolate) || weekday()->IsSmi() ||
+ weekday()->IsNaN());
+ CHECK(hour()->IsUndefined(isolate) || hour()->IsSmi() || hour()->IsNaN());
+ CHECK(min()->IsUndefined(isolate) || min()->IsSmi() || min()->IsNaN());
+ CHECK(sec()->IsUndefined(isolate) || sec()->IsSmi() || sec()->IsNaN());
+ CHECK(cache_stamp()->IsUndefined(isolate) || cache_stamp()->IsSmi() ||
cache_stamp()->IsNaN());
if (month()->IsSmi()) {
@@ -501,7 +481,7 @@ void JSDate::JSDateVerify() {
}
if (cache_stamp()->IsSmi()) {
CHECK(Smi::cast(cache_stamp())->value() <=
- Smi::cast(GetIsolate()->date_cache()->stamp())->value());
+ Smi::cast(isolate->date_cache()->stamp())->value());
}
}
@@ -569,7 +549,7 @@ void JSFunction::JSFunctionVerify() {
VerifyObjectField(kNextFunctionLinkOffset);
CHECK(code()->IsCode());
CHECK(next_function_link() == NULL ||
- next_function_link()->IsUndefined() ||
+ next_function_link()->IsUndefined(GetIsolate()) ||
next_function_link()->IsJSFunction());
CHECK(map()->is_callable());
}
@@ -580,14 +560,17 @@ void SharedFunctionInfo::SharedFunctionInfoVerify() {
VerifyObjectField(kNameOffset);
VerifyObjectField(kCodeOffset);
VerifyObjectField(kOptimizedCodeMapOffset);
- VerifyObjectField(kFeedbackVectorOffset);
+ VerifyObjectField(kFeedbackMetadataOffset);
VerifyObjectField(kScopeInfoOffset);
VerifyObjectField(kInstanceClassNameOffset);
- CHECK(function_data()->IsUndefined() || IsApiFunction() ||
- HasBuiltinFunctionId() || HasBytecodeArray());
+ CHECK(function_data()->IsUndefined(GetIsolate()) || IsApiFunction() ||
+ HasBytecodeArray() || HasAsmWasmData());
VerifyObjectField(kFunctionDataOffset);
VerifyObjectField(kScriptOffset);
VerifyObjectField(kDebugInfoOffset);
+ CHECK(function_identifier()->IsUndefined(GetIsolate()) ||
+ HasBuiltinFunctionId() || HasInferredName());
+ VerifyObjectField(kFunctionIdentifierOffset);
}
@@ -618,12 +601,13 @@ void Oddball::OddballVerify() {
VerifyHeapPointer(to_string());
Object* number = to_number();
if (number->IsHeapObject()) {
- CHECK(number == heap->nan_value());
+ CHECK(number == heap->nan_value() ||
+ number == heap->hole_nan_value());
} else {
CHECK(number->IsSmi());
int value = Smi::cast(number)->value();
// Hidden oddballs have negative smis.
- const int kLeastHiddenOddballNumber = -6;
+ const int kLeastHiddenOddballNumber = -7;
CHECK_LE(value, 1);
CHECK(value >= kLeastHiddenOddballNumber);
}
@@ -648,6 +632,8 @@ void Oddball::OddballVerify() {
CHECK(this == heap->exception());
} else if (map() == heap->optimized_out_map()) {
CHECK(this == heap->optimized_out());
+ } else if (map() == heap->stale_register_map()) {
+ CHECK(this == heap->stale_register());
} else {
UNREACHABLE();
}
@@ -707,11 +693,25 @@ void Code::VerifyEmbeddedObjectsDependency() {
CHECK(map->dependent_code()->Contains(DependentCode::kWeakCodeGroup,
cell));
} else if (obj->IsJSObject()) {
- WeakHashTable* table =
- GetIsolate()->heap()->weak_object_to_code_table();
- Handle<HeapObject> key_obj(HeapObject::cast(obj), isolate);
- CHECK(DependentCode::cast(table->Lookup(key_obj))
- ->Contains(DependentCode::kWeakCodeGroup, cell));
+ if (isolate->heap()->InNewSpace(obj)) {
+ ArrayList* list =
+ GetIsolate()->heap()->weak_new_space_object_to_code_list();
+ bool found = false;
+ for (int i = 0; i < list->Length(); i += 2) {
+ WeakCell* obj_cell = WeakCell::cast(list->Get(i));
+ if (!obj_cell->cleared() && obj_cell->value() == obj &&
+ WeakCell::cast(list->Get(i + 1)) == cell) {
+ found = true;
+ break;
+ }
+ }
+ CHECK(found);
+ } else {
+ Handle<HeapObject> key_obj(HeapObject::cast(obj), isolate);
+ DependentCode* dep =
+ GetIsolate()->heap()->LookupWeakObjectToCodeDependency(key_obj);
+ dep->Contains(DependentCode::kWeakCodeGroup, cell);
+ }
}
}
}
@@ -720,12 +720,12 @@ void Code::VerifyEmbeddedObjectsDependency() {
void JSArray::JSArrayVerify() {
JSObjectVerify();
- CHECK(length()->IsNumber() || length()->IsUndefined());
+ Isolate* isolate = GetIsolate();
+ CHECK(length()->IsNumber() || length()->IsUndefined(isolate));
// If a GC was caused while constructing this array, the elements
// pointer may point to a one pointer filler map.
if (ElementsAreSafeToExamine()) {
- CHECK(elements()->IsUndefined() ||
- elements()->IsFixedArray() ||
+ CHECK(elements()->IsUndefined(isolate) || elements()->IsFixedArray() ||
elements()->IsFixedDoubleArray());
}
}
@@ -735,7 +735,7 @@ void JSSet::JSSetVerify() {
CHECK(IsJSSet());
JSObjectVerify();
VerifyHeapPointer(table());
- CHECK(table()->IsOrderedHashTable() || table()->IsUndefined());
+ CHECK(table()->IsOrderedHashTable() || table()->IsUndefined(GetIsolate()));
// TODO(arv): Verify OrderedHashTable too.
}
@@ -744,7 +744,7 @@ void JSMap::JSMapVerify() {
CHECK(IsJSMap());
JSObjectVerify();
VerifyHeapPointer(table());
- CHECK(table()->IsOrderedHashTable() || table()->IsUndefined());
+ CHECK(table()->IsOrderedHashTable() || table()->IsUndefined(GetIsolate()));
// TODO(arv): Verify OrderedHashTable too.
}
@@ -753,9 +753,10 @@ void JSSetIterator::JSSetIteratorVerify() {
CHECK(IsJSSetIterator());
JSObjectVerify();
VerifyHeapPointer(table());
- CHECK(table()->IsOrderedHashTable() || table()->IsUndefined());
- CHECK(index()->IsSmi() || index()->IsUndefined());
- CHECK(kind()->IsSmi() || kind()->IsUndefined());
+ Isolate* isolate = GetIsolate();
+ CHECK(table()->IsOrderedHashTable() || table()->IsUndefined(isolate));
+ CHECK(index()->IsSmi() || index()->IsUndefined(isolate));
+ CHECK(kind()->IsSmi() || kind()->IsUndefined(isolate));
}
@@ -763,9 +764,10 @@ void JSMapIterator::JSMapIteratorVerify() {
CHECK(IsJSMapIterator());
JSObjectVerify();
VerifyHeapPointer(table());
- CHECK(table()->IsOrderedHashTable() || table()->IsUndefined());
- CHECK(index()->IsSmi() || index()->IsUndefined());
- CHECK(kind()->IsSmi() || kind()->IsUndefined());
+ Isolate* isolate = GetIsolate();
+ CHECK(table()->IsOrderedHashTable() || table()->IsUndefined(isolate));
+ CHECK(index()->IsSmi() || index()->IsUndefined(isolate));
+ CHECK(kind()->IsSmi() || kind()->IsUndefined(isolate));
}
@@ -773,7 +775,7 @@ void JSWeakMap::JSWeakMapVerify() {
CHECK(IsJSWeakMap());
JSObjectVerify();
VerifyHeapPointer(table());
- CHECK(table()->IsHashTable() || table()->IsUndefined());
+ CHECK(table()->IsHashTable() || table()->IsUndefined(GetIsolate()));
}
@@ -781,13 +783,14 @@ void JSWeakSet::JSWeakSetVerify() {
CHECK(IsJSWeakSet());
JSObjectVerify();
VerifyHeapPointer(table());
- CHECK(table()->IsHashTable() || table()->IsUndefined());
+ CHECK(table()->IsHashTable() || table()->IsUndefined(GetIsolate()));
}
void JSRegExp::JSRegExpVerify() {
JSObjectVerify();
- CHECK(data()->IsUndefined() || data()->IsFixedArray());
+ Isolate* isolate = GetIsolate();
+ CHECK(data()->IsUndefined(isolate) || data()->IsFixedArray());
switch (TypeTag()) {
case JSRegExp::ATOM: {
FixedArray* arr = FixedArray::cast(data());
@@ -823,7 +826,7 @@ void JSRegExp::JSRegExpVerify() {
}
default:
CHECK_EQ(JSRegExp::NOT_COMPILED, TypeTag());
- CHECK(data()->IsUndefined());
+ CHECK(data()->IsUndefined(isolate));
break;
}
}
@@ -833,10 +836,11 @@ void JSProxy::JSProxyVerify() {
CHECK(IsJSProxy());
VerifyPointer(target());
VerifyPointer(handler());
+ Isolate* isolate = GetIsolate();
CHECK_EQ(target()->IsCallable(), map()->is_callable());
CHECK_EQ(target()->IsConstructor(), map()->is_constructor());
- CHECK(hash()->IsSmi() || hash()->IsUndefined());
- CHECK(map()->prototype()->IsNull());
+ CHECK(hash()->IsSmi() || hash()->IsUndefined(isolate));
+ CHECK(map()->prototype()->IsNull(isolate));
// There should be no properties on a Proxy.
CHECK_EQ(0, map()->NumberOfOwnDescriptors());
}
@@ -846,8 +850,8 @@ void JSArrayBuffer::JSArrayBufferVerify() {
CHECK(IsJSArrayBuffer());
JSObjectVerify();
VerifyPointer(byte_length());
- CHECK(byte_length()->IsSmi() || byte_length()->IsHeapNumber()
- || byte_length()->IsUndefined());
+ CHECK(byte_length()->IsSmi() || byte_length()->IsHeapNumber() ||
+ byte_length()->IsUndefined(GetIsolate()));
}
@@ -855,16 +859,17 @@ void JSArrayBufferView::JSArrayBufferViewVerify() {
CHECK(IsJSArrayBufferView());
JSObjectVerify();
VerifyPointer(buffer());
- CHECK(buffer()->IsJSArrayBuffer() || buffer()->IsUndefined()
- || buffer() == Smi::FromInt(0));
+ Isolate* isolate = GetIsolate();
+ CHECK(buffer()->IsJSArrayBuffer() || buffer()->IsUndefined(isolate) ||
+ buffer() == Smi::FromInt(0));
VerifyPointer(raw_byte_offset());
CHECK(raw_byte_offset()->IsSmi() || raw_byte_offset()->IsHeapNumber() ||
- raw_byte_offset()->IsUndefined());
+ raw_byte_offset()->IsUndefined(isolate));
VerifyPointer(raw_byte_length());
CHECK(raw_byte_length()->IsSmi() || raw_byte_length()->IsHeapNumber() ||
- raw_byte_length()->IsUndefined());
+ raw_byte_length()->IsUndefined(isolate));
}
@@ -873,7 +878,7 @@ void JSTypedArray::JSTypedArrayVerify() {
JSArrayBufferViewVerify();
VerifyPointer(raw_length());
CHECK(raw_length()->IsSmi() || raw_length()->IsHeapNumber() ||
- raw_length()->IsUndefined());
+ raw_length()->IsUndefined(GetIsolate()));
VerifyPointer(elements());
}
@@ -921,6 +926,7 @@ void AccessorInfo::AccessorInfoVerify() {
VerifyPointer(expected_receiver_type());
VerifyPointer(getter());
VerifyPointer(setter());
+ VerifyPointer(js_getter());
VerifyPointer(data());
}
@@ -934,9 +940,9 @@ void AccessorPair::AccessorPairVerify() {
void AccessCheckInfo::AccessCheckInfoVerify() {
CHECK(IsAccessCheckInfo());
- VerifyPointer(named_callback());
- VerifyPointer(indexed_callback());
VerifyPointer(callback());
+ VerifyPointer(named_interceptor());
+ VerifyPointer(indexed_interceptor());
VerifyPointer(data());
}
@@ -986,7 +992,7 @@ void ObjectTemplateInfo::ObjectTemplateInfoVerify() {
CHECK(IsObjectTemplateInfo());
TemplateInfoVerify();
VerifyPointer(constructor());
- VerifyPointer(internal_field_count());
+ VerifyPointer(data());
}
@@ -1014,12 +1020,13 @@ void Script::ScriptVerify() {
void NormalizedMapCache::NormalizedMapCacheVerify() {
FixedArray::cast(this)->FixedArrayVerify();
if (FLAG_enable_slow_asserts) {
+ Isolate* isolate = GetIsolate();
for (int i = 0; i < length(); i++) {
Object* e = FixedArray::get(i);
if (e->IsMap()) {
Map::cast(e)->DictionaryMapVerify();
} else {
- CHECK(e->IsUndefined());
+ CHECK(e->IsUndefined(isolate));
}
}
}
@@ -1029,7 +1036,7 @@ void NormalizedMapCache::NormalizedMapCacheVerify() {
void DebugInfo::DebugInfoVerify() {
CHECK(IsDebugInfo());
VerifyPointer(shared());
- VerifyPointer(abstract_code());
+ VerifyPointer(debug_bytecode_array());
VerifyPointer(break_points());
}
@@ -1073,9 +1080,9 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
int holes = 0;
FixedArray* e = FixedArray::cast(elements());
int len = e->length();
- Heap* heap = GetHeap();
+ Isolate* isolate = GetIsolate();
for (int i = 0; i < len; i++) {
- if (e->get(i) == heap->the_hole_value()) holes++;
+ if (e->get(i)->IsTheHole(isolate)) holes++;
}
info->number_of_fast_used_elements_ += len - holes;
info->number_of_fast_unused_elements_ += holes;
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index b75dd1c969..3d82bf8205 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -14,6 +14,7 @@
#include "src/base/atomicops.h"
#include "src/base/bits.h"
+#include "src/builtins/builtins.h"
#include "src/contexts-inl.h"
#include "src/conversions-inl.h"
#include "src/factory.h"
@@ -22,8 +23,9 @@
#include "src/handles-inl.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
-#include "src/isolate.h"
#include "src/isolate-inl.h"
+#include "src/isolate.h"
+#include "src/keys.h"
#include "src/layout-descriptor-inl.h"
#include "src/lookup.h"
#include "src/objects.h"
@@ -76,25 +78,35 @@ int PropertyDetails::field_width_in_words() const {
int holder::name() const { return READ_INT_FIELD(this, offset); } \
void holder::set_##name(int value) { WRITE_INT_FIELD(this, offset, value); }
-
-#define ACCESSORS(holder, name, type, offset) \
- type* holder::name() const { return type::cast(READ_FIELD(this, offset)); } \
- void holder::set_##name(type* value, WriteBarrierMode mode) { \
- WRITE_FIELD(this, offset, value); \
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \
+#define ACCESSORS_CHECKED(holder, name, type, offset, condition) \
+ type* holder::name() const { \
+ DCHECK(condition); \
+ return type::cast(READ_FIELD(this, offset)); \
+ } \
+ void holder::set_##name(type* value, WriteBarrierMode mode) { \
+ DCHECK(condition); \
+ WRITE_FIELD(this, offset, value); \
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \
}
+#define ACCESSORS(holder, name, type, offset) \
+ ACCESSORS_CHECKED(holder, name, type, offset, true)
// Getter that returns a Smi as an int and writes an int as a Smi.
-#define SMI_ACCESSORS(holder, name, offset) \
- int holder::name() const { \
- Object* value = READ_FIELD(this, offset); \
- return Smi::cast(value)->value(); \
- } \
- void holder::set_##name(int value) { \
- WRITE_FIELD(this, offset, Smi::FromInt(value)); \
+#define SMI_ACCESSORS_CHECKED(holder, name, offset, condition) \
+ int holder::name() const { \
+ DCHECK(condition); \
+ Object* value = READ_FIELD(this, offset); \
+ return Smi::cast(value)->value(); \
+ } \
+ void holder::set_##name(int value) { \
+ DCHECK(condition); \
+ WRITE_FIELD(this, offset, Smi::FromInt(value)); \
}
+#define SMI_ACCESSORS(holder, name, offset) \
+ SMI_ACCESSORS_CHECKED(holder, name, offset, true)
+
#define SYNCHRONIZED_SMI_ACCESSORS(holder, name, offset) \
int holder::synchronized_##name() const { \
Object* value = ACQUIRE_READ_FIELD(this, offset); \
@@ -159,6 +171,15 @@ SIMD128_TYPES(SIMD128_TYPE_CHECKER)
return IsHeapObject() && HeapObject::cast(this)->Is##type_(); \
}
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DEF)
+#undef IS_TYPE_FUNCTION_DEF
+
+#define IS_TYPE_FUNCTION_DEF(Type, Value) \
+ bool Object::Is##Type(Isolate* isolate) const { \
+ return this == isolate->heap()->Value(); \
+ } \
+ bool HeapObject::Is##Type(Isolate* isolate) const { \
+ return this == isolate->heap()->Value(); \
+ }
ODDBALL_LIST(IS_TYPE_FUNCTION_DEF)
#undef IS_TYPE_FUNCTION_DEF
@@ -244,7 +265,6 @@ bool HeapObject::IsExternalTwoByteString() const {
String::cast(this)->IsTwoByteRepresentation();
}
-
bool Object::HasValidElements() {
// Dictionary is covered under FixedArray.
return IsFixedArray() || IsFixedDoubleArray() || IsFixedTypedArrayBase();
@@ -283,12 +303,12 @@ bool Object::FilterKey(PropertyFilter filter) {
Handle<Object> Object::NewStorageFor(Isolate* isolate,
Handle<Object> object,
Representation representation) {
- if (representation.IsSmi() && object->IsUninitialized()) {
+ if (representation.IsSmi() && object->IsUninitialized(isolate)) {
return handle(Smi::FromInt(0), isolate);
}
if (!representation.IsDouble()) return object;
double value;
- if (object->IsUninitialized()) {
+ if (object->IsUninitialized(isolate)) {
value = 0;
} else if (object->IsMutableHeapNumber()) {
value = HeapNumber::cast(*object)->value();
@@ -302,7 +322,7 @@ Handle<Object> Object::NewStorageFor(Isolate* isolate,
Handle<Object> Object::WrapForRead(Isolate* isolate,
Handle<Object> object,
Representation representation) {
- DCHECK(!object->IsUninitialized());
+ DCHECK(!object->IsUninitialized(isolate));
if (!representation.IsDouble()) {
DCHECK(object->FitsRepresentation(representation));
return object;
@@ -685,6 +705,8 @@ bool HeapObject::IsJSWeakCollection() const {
return IsJSWeakMap() || IsJSWeakSet();
}
+bool HeapObject::IsJSCollection() const { return IsJSMap() || IsJSSet(); }
+
bool HeapObject::IsDescriptorArray() const { return IsFixedArray(); }
bool HeapObject::IsArrayList() const { return IsFixedArray(); }
@@ -730,6 +752,14 @@ bool HeapObject::IsHandlerTable() const {
return true;
}
+bool HeapObject::IsTemplateList() const {
+ if (!IsFixedArray()) return false;
+ // There's actually no way to see the difference between a fixed array and
+ // a template list.
+ if (FixedArray::cast(this)->length() < 1) return false;
+ return true;
+}
+
bool HeapObject::IsDependentCode() const {
if (!IsFixedArray()) return false;
// There's actually no way to see the difference between a fixed array and
@@ -776,11 +806,12 @@ TYPE_CHECKER(Cell, CELL_TYPE)
TYPE_CHECKER(PropertyCell, PROPERTY_CELL_TYPE)
TYPE_CHECKER(WeakCell, WEAK_CELL_TYPE)
TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE)
-TYPE_CHECKER(JSGeneratorObject, JS_GENERATOR_OBJECT_TYPE)
-TYPE_CHECKER(JSModule, JS_MODULE_TYPE)
-TYPE_CHECKER(JSValue, JS_VALUE_TYPE)
TYPE_CHECKER(JSDate, JS_DATE_TYPE)
+TYPE_CHECKER(JSError, JS_ERROR_TYPE)
+TYPE_CHECKER(JSGeneratorObject, JS_GENERATOR_OBJECT_TYPE)
TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE)
+TYPE_CHECKER(JSPromise, JS_PROMISE_TYPE)
+TYPE_CHECKER(JSValue, JS_VALUE_TYPE)
bool HeapObject::IsAbstractCode() const {
return IsBytecodeArray() || IsCode();
@@ -839,15 +870,16 @@ bool Object::IsSeededNumberDictionary() const {
return IsDictionary();
}
-
-bool Object::IsUnseededNumberDictionary() const {
- return IsDictionary();
+bool HeapObject::IsUnseededNumberDictionary() const {
+ return map() == GetHeap()->unseeded_number_dictionary_map();
}
bool HeapObject::IsStringTable() const { return IsHashTable(); }
bool HeapObject::IsStringSet() const { return IsHashTable(); }
+bool HeapObject::IsObjectHashSet() const { return IsHashTable(); }
+
bool HeapObject::IsNormalizedMapCache() const {
return NormalizedMapCache::IsNormalizedMapCache(this);
}
@@ -875,10 +907,6 @@ bool HeapObject::IsCompilationCacheTable() const { return IsHashTable(); }
bool HeapObject::IsCodeCacheHashTable() const { return IsHashTable(); }
-bool HeapObject::IsPolymorphicCodeCacheHashTable() const {
- return IsHashTable();
-}
-
bool HeapObject::IsMapCache() const { return IsHashTable(); }
bool HeapObject::IsObjectHashTable() const { return IsHashTable(); }
@@ -941,13 +969,6 @@ bool HeapObject::IsStruct() const {
STRUCT_LIST(MAKE_STRUCT_PREDICATE)
#undef MAKE_STRUCT_PREDICATE
-#define MAKE_ODDBALL_PREDICATE(Name) \
- bool HeapObject::Is##Name() const { \
- return IsOddball() && Oddball::cast(this)->kind() == Oddball::k##Name; \
- }
-ODDBALL_LIST(MAKE_ODDBALL_PREDICATE)
-
-#undef MAKE_ODDBALL_PREDICATE
double Object::Number() const {
DCHECK(IsNumber());
return IsSmi()
@@ -973,7 +994,8 @@ Representation Object::OptimalRepresentation() {
return Representation::Smi();
} else if (FLAG_track_double_fields && IsHeapNumber()) {
return Representation::Double();
- } else if (FLAG_track_computed_fields && IsUninitialized()) {
+ } else if (FLAG_track_computed_fields &&
+ IsUninitialized(HeapObject::cast(this)->GetIsolate())) {
return Representation::None();
} else if (FLAG_track_heap_object_fields) {
DCHECK(IsHeapObject());
@@ -1099,8 +1121,7 @@ MaybeHandle<Object> JSReceiver::GetPrototype(Isolate* isolate,
Handle<JSReceiver> receiver) {
// We don't expect access checks to be needed on JSProxy objects.
DCHECK(!receiver->IsAccessCheckNeeded() || receiver->IsJSObject());
- PrototypeIterator iter(isolate, receiver,
- PrototypeIterator::START_AT_RECEIVER,
+ PrototypeIterator iter(isolate, receiver, kStartAtReceiver,
PrototypeIterator::END_AT_NON_HIDDEN);
do {
if (!iter.AdvanceFollowingProxies()) return MaybeHandle<Object>();
@@ -1115,6 +1136,27 @@ MaybeHandle<Object> JSReceiver::GetProperty(Isolate* isolate,
return GetProperty(receiver, str);
}
+// static
+MUST_USE_RESULT MaybeHandle<FixedArray> JSReceiver::OwnPropertyKeys(
+ Handle<JSReceiver> object) {
+ return KeyAccumulator::GetKeys(object, KeyCollectionMode::kOwnOnly,
+ ALL_PROPERTIES,
+ GetKeysConversion::kConvertToString);
+}
+
+bool JSObject::PrototypeHasNoElements(Isolate* isolate, JSObject* object) {
+ DisallowHeapAllocation no_gc;
+ HeapObject* prototype = HeapObject::cast(object->map()->prototype());
+ HeapObject* null = isolate->heap()->null_value();
+ HeapObject* empty = isolate->heap()->empty_fixed_array();
+ while (prototype != null) {
+ Map* map = prototype->map();
+ if (map->instance_type() <= LAST_CUSTOM_ELEMENTS_RECEIVER) return false;
+ if (JSObject::cast(prototype)->elements() != empty) return false;
+ prototype = HeapObject::cast(map->prototype());
+ }
+ return true;
+}
#define FIELD_ADDR(p, offset) \
(reinterpret_cast<byte*>(p) + offset - kHeapObjectTag)
@@ -1151,6 +1193,12 @@ MaybeHandle<Object> JSReceiver::GetProperty(Isolate* isolate,
object, HeapObject::RawField(object, offset), value); \
heap->RecordWrite(object, offset, value);
+#define FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(heap, array, start, length) \
+ do { \
+ heap->RecordFixedArrayElements(array, start, length); \
+ heap->incremental_marking()->IterateBlackObject(array); \
+ } while (false)
+
#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \
if (mode != SKIP_WRITE_BARRIER) { \
if (mode == UPDATE_WRITE_BARRIER) { \
@@ -1261,8 +1309,7 @@ Map* MapWord::ToMap() {
return reinterpret_cast<Map*>(value_);
}
-
-bool MapWord::IsForwardingAddress() {
+bool MapWord::IsForwardingAddress() const {
return HAS_SMI_TAG(reinterpret_cast<Object*>(value_));
}
@@ -1741,7 +1788,7 @@ inline bool AllocationSite::DigestPretenuringFeedback(
PrintIsolate(GetIsolate(),
"pretenuring: AllocationSite(%p): (created, found, ratio) "
"(%d, %d, %f) %s => %s\n",
- this, create_count, found_count, ratio,
+ static_cast<void*>(this), create_count, found_count, ratio,
PretenureDecisionName(current_decision),
PretenureDecisionName(pretenure_decision()));
}
@@ -1785,15 +1832,14 @@ void JSObject::EnsureCanContainElements(Handle<JSObject> object,
Object** objects,
uint32_t count,
EnsureElementsMode mode) {
- ElementsKind current_kind = object->map()->elements_kind();
+ ElementsKind current_kind = object->GetElementsKind();
ElementsKind target_kind = current_kind;
{
DisallowHeapAllocation no_allocation;
DCHECK(mode != ALLOW_COPIED_DOUBLE_ELEMENTS);
bool is_holey = IsFastHoleyElementsKind(current_kind);
if (current_kind == FAST_HOLEY_ELEMENTS) return;
- Heap* heap = object->GetHeap();
- Object* the_hole = heap->the_hole_value();
+ Object* the_hole = object->GetHeap()->the_hole_value();
for (uint32_t i = 0; i < count; ++i) {
Object* current = *objects++;
if (current == the_hole) {
@@ -1909,10 +1955,16 @@ InterceptorInfo* Map::GetIndexedInterceptor() {
constructor->shared()->get_api_func_data()->indexed_property_handler());
}
+double Oddball::to_number_raw() const {
+ return READ_DOUBLE_FIELD(this, kToNumberRawOffset);
+}
+
+void Oddball::set_to_number_raw(double value) {
+ WRITE_DOUBLE_FIELD(this, kToNumberRawOffset, value);
+}
ACCESSORS(Oddball, to_string, String, kToStringOffset)
ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
-ACCESSORS(Oddball, to_boolean, Oddball, kToBooleanOffset)
ACCESSORS(Oddball, type_of, String, kTypeOfOffset)
@@ -1965,10 +2017,9 @@ void WeakCell::initialize(HeapObject* val) {
// We just have to execute the generational barrier here because we never
// mark through a weak cell and collect evacuation candidates when we process
// all weak cells.
- WriteBarrierMode mode =
- Page::FromAddress(this->address())->IsFlagSet(Page::BLACK_PAGE)
- ? UPDATE_WRITE_BARRIER
- : UPDATE_WEAK_WRITE_BARRIER;
+ WriteBarrierMode mode = Marking::IsBlack(ObjectMarking::MarkBitFrom(this))
+ ? UPDATE_WRITE_BARRIER
+ : UPDATE_WEAK_WRITE_BARRIER;
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kValueOffset, val, mode);
}
@@ -1992,9 +2043,7 @@ void WeakCell::clear_next(Object* the_hole_value) {
set_next(the_hole_value, SKIP_WRITE_BARRIER);
}
-
-bool WeakCell::next_cleared() { return next()->IsTheHole(); }
-
+bool WeakCell::next_cleared() { return next()->IsTheHole(GetIsolate()); }
int JSObject::GetHeaderSize() { return GetHeaderSize(map()->instance_type()); }
@@ -2005,12 +2054,11 @@ int JSObject::GetHeaderSize(InstanceType type) {
// field operations considerably on average.
if (type == JS_OBJECT_TYPE) return JSObject::kHeaderSize;
switch (type) {
+ case JS_API_OBJECT_TYPE:
case JS_SPECIAL_API_OBJECT_TYPE:
return JSObject::kHeaderSize;
case JS_GENERATOR_OBJECT_TYPE:
return JSGeneratorObject::kSize;
- case JS_MODULE_TYPE:
- return JSModule::kSize;
case JS_GLOBAL_PROXY_TYPE:
return JSGlobalProxy::kSize;
case JS_GLOBAL_OBJECT_TYPE:
@@ -2051,6 +2099,10 @@ int JSObject::GetHeaderSize(InstanceType type) {
return JSObject::kHeaderSize;
case JS_MESSAGE_OBJECT_TYPE:
return JSMessageObject::kSize;
+ case JS_ARGUMENTS_TYPE:
+ return JSArgumentsObject::kHeaderSize;
+ case JS_ERROR_TYPE:
+ return JSObject::kHeaderSize;
default:
UNREACHABLE();
return 0;
@@ -2170,7 +2222,9 @@ void JSObject::WriteToField(int descriptor, PropertyDetails details,
FieldIndex index = FieldIndex::ForDescriptor(map(), descriptor);
if (details.representation().IsDouble()) {
// Nothing more to be done.
- if (value->IsUninitialized()) return;
+ if (value->IsUninitialized(this->GetIsolate())) {
+ return;
+ }
if (IsUnboxedDoubleField(index)) {
RawFastDoublePropertyAtPut(index, value->Number());
} else {
@@ -2263,9 +2317,12 @@ bool Object::ToArrayIndex(uint32_t* index) {
void Object::VerifyApiCallResultType() {
#if DEBUG
- if (!(IsSmi() || IsString() || IsSymbol() || IsJSReceiver() ||
- IsHeapNumber() || IsSimd128Value() || IsUndefined() || IsTrue() ||
- IsFalse() || IsNull())) {
+ if (IsSmi()) return;
+ DCHECK(IsHeapObject());
+ Isolate* isolate = HeapObject::cast(this)->GetIsolate();
+ if (!(IsString() || IsSymbol() || IsJSReceiver() || IsHeapNumber() ||
+ IsSimd128Value() || IsUndefined(isolate) || IsTrue(isolate) ||
+ IsFalse(isolate) || IsNull(isolate))) {
FATAL("API call returned invalid object");
}
#endif // DEBUG
@@ -2281,12 +2338,24 @@ Handle<Object> FixedArray::get(FixedArray* array, int index, Isolate* isolate) {
return handle(array->get(index), isolate);
}
+template <class T>
+MaybeHandle<T> FixedArray::GetValue(Isolate* isolate, int index) const {
+ Object* obj = get(index);
+ if (obj->IsUndefined(isolate)) return MaybeHandle<T>();
+ return Handle<T>(T::cast(obj), isolate);
+}
+
+template <class T>
+Handle<T> FixedArray::GetValueChecked(Isolate* isolate, int index) const {
+ Object* obj = get(index);
+ CHECK(!obj->IsUndefined(isolate));
+ return Handle<T>(T::cast(obj), isolate);
+}
bool FixedArray::is_the_hole(int index) {
return get(index) == GetHeap()->the_hole_value();
}
-
void FixedArray::set(int index, Smi* value) {
DCHECK(map() != GetHeap()->fixed_cow_array_map());
DCHECK(index >= 0 && index < this->length());
@@ -2441,14 +2510,13 @@ Object** ArrayList::Slot(int index) {
return data_start() + kFirstIndex + index;
}
-
-void ArrayList::Set(int index, Object* obj) {
- FixedArray::cast(this)->set(kFirstIndex + index, obj);
+void ArrayList::Set(int index, Object* obj, WriteBarrierMode mode) {
+ FixedArray::cast(this)->set(kFirstIndex + index, obj, mode);
}
void ArrayList::Clear(int index, Object* undefined) {
- DCHECK(undefined->IsUndefined());
+ DCHECK(undefined->IsUndefined(GetIsolate()));
FixedArray::cast(this)
->set(kFirstIndex + index, undefined, SKIP_WRITE_BARRIER);
}
@@ -2780,18 +2848,18 @@ void Map::SetEnumLength(int length) {
FixedArrayBase* Map::GetInitialElements() {
+ FixedArrayBase* result = nullptr;
if (has_fast_elements() || has_fast_string_wrapper_elements()) {
- DCHECK(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
- return GetHeap()->empty_fixed_array();
+ result = GetHeap()->empty_fixed_array();
+ } else if (has_fast_sloppy_arguments_elements()) {
+ result = GetHeap()->empty_sloppy_arguments_elements();
} else if (has_fixed_typed_array_elements()) {
- FixedTypedArrayBase* empty_array =
- GetHeap()->EmptyFixedTypedArrayForMap(this);
- DCHECK(!GetHeap()->InNewSpace(empty_array));
- return empty_array;
+ result = GetHeap()->EmptyFixedTypedArrayForMap(this);
} else {
UNREACHABLE();
}
- return NULL;
+ DCHECK(!GetHeap()->InNewSpace(result));
+ return result;
}
// static
@@ -3018,12 +3086,14 @@ int HashTableBase::ComputeCapacity(int at_least_space_for) {
return Max(capacity, kMinCapacity);
}
-bool HashTableBase::IsKey(Heap* heap, Object* k) {
+bool HashTableBase::IsKey(Isolate* isolate, Object* k) {
+ Heap* heap = isolate->heap();
return k != heap->the_hole_value() && k != heap->undefined_value();
}
bool HashTableBase::IsKey(Object* k) {
- return !k->IsTheHole() && !k->IsUndefined();
+ Isolate* isolate = this->GetIsolate();
+ return !k->IsTheHole(isolate) && !k->IsUndefined(isolate);
}
@@ -3036,6 +3106,10 @@ void HashTableBase::SetNumberOfDeletedElements(int nod) {
set(kNumberOfDeletedElementsIndex, Smi::FromInt(nod));
}
+template <typename Key>
+Map* BaseShape<Key>::GetMap(Isolate* isolate) {
+ return isolate->heap()->hash_table_map();
+}
template <typename Derived, typename Shape, typename Key>
int HashTable<Derived, Shape, Key>::FindEntry(Key key) {
@@ -3048,7 +3122,6 @@ int HashTable<Derived, Shape, Key>::FindEntry(Isolate* isolate, Key key) {
return FindEntry(isolate, key, HashTable::Hash(key));
}
-
// Find entry for key otherwise return kNotFound.
template <typename Derived, typename Shape, typename Key>
int HashTable<Derived, Shape, Key>::FindEntry(Isolate* isolate, Key key,
@@ -3057,18 +3130,39 @@ int HashTable<Derived, Shape, Key>::FindEntry(Isolate* isolate, Key key,
uint32_t entry = FirstProbe(hash, capacity);
uint32_t count = 1;
// EnsureCapacity will guarantee the hash table is never full.
+ Object* undefined = isolate->heap()->undefined_value();
+ Object* the_hole = isolate->heap()->the_hole_value();
while (true) {
Object* element = KeyAt(entry);
// Empty entry. Uses raw unchecked accessors because it is called by the
// string table during bootstrapping.
- if (element == isolate->heap()->root(Heap::kUndefinedValueRootIndex)) break;
- if (element != isolate->heap()->root(Heap::kTheHoleValueRootIndex) &&
- Shape::IsMatch(key, element)) return entry;
+ if (element == undefined) break;
+ if (element != the_hole && Shape::IsMatch(key, element)) return entry;
entry = NextProbe(entry, count++, capacity);
}
return kNotFound;
}
+template <typename Derived, typename Shape, typename Key>
+bool HashTable<Derived, Shape, Key>::Has(Key key) {
+ return FindEntry(key) != kNotFound;
+}
+
+template <typename Derived, typename Shape, typename Key>
+bool HashTable<Derived, Shape, Key>::Has(Isolate* isolate, Key key) {
+ return FindEntry(isolate, key) != kNotFound;
+}
+
+bool ObjectHashSet::Has(Isolate* isolate, Handle<Object> key, int32_t hash) {
+ return FindEntry(isolate, key, hash) != kNotFound;
+}
+
+bool ObjectHashSet::Has(Isolate* isolate, Handle<Object> key) {
+ Object* hash = key->GetHash();
+ if (!hash->IsSmi()) return false;
+ return FindEntry(isolate, key, Smi::cast(hash)->value()) != kNotFound;
+}
+
bool StringSetShape::IsMatch(String* key, Object* value) {
return value->IsString() && key->Equals(String::cast(value));
}
@@ -3148,7 +3242,6 @@ CAST_ACCESSOR(JSGlobalProxy)
CAST_ACCESSOR(JSMap)
CAST_ACCESSOR(JSMapIterator)
CAST_ACCESSOR(JSMessageObject)
-CAST_ACCESSOR(JSModule)
CAST_ACCESSOR(JSObject)
CAST_ACCESSOR(JSProxy)
CAST_ACCESSOR(JSReceiver)
@@ -3157,6 +3250,7 @@ CAST_ACCESSOR(JSSet)
CAST_ACCESSOR(JSSetIterator)
CAST_ACCESSOR(JSTypedArray)
CAST_ACCESSOR(JSValue)
+CAST_ACCESSOR(JSWeakCollection)
CAST_ACCESSOR(JSWeakMap)
CAST_ACCESSOR(JSWeakSet)
CAST_ACCESSOR(LayoutDescriptor)
@@ -3166,11 +3260,12 @@ CAST_ACCESSOR(NameDictionary)
CAST_ACCESSOR(NormalizedMapCache)
CAST_ACCESSOR(Object)
CAST_ACCESSOR(ObjectHashTable)
+CAST_ACCESSOR(ObjectHashSet)
CAST_ACCESSOR(Oddball)
CAST_ACCESSOR(OrderedHashMap)
CAST_ACCESSOR(OrderedHashSet)
-CAST_ACCESSOR(PolymorphicCodeCacheHashTable)
CAST_ACCESSOR(PropertyCell)
+CAST_ACCESSOR(TemplateList)
CAST_ACCESSOR(ScopeInfo)
CAST_ACCESSOR(SeededNumberDictionary)
CAST_ACCESSOR(SeqOneByteString)
@@ -3185,6 +3280,7 @@ CAST_ACCESSOR(StringSet)
CAST_ACCESSOR(StringTable)
CAST_ACCESSOR(Struct)
CAST_ACCESSOR(Symbol)
+CAST_ACCESSOR(TemplateInfo)
CAST_ACCESSOR(Uint16x8)
CAST_ACCESSOR(Uint32x4)
CAST_ACCESSOR(Uint8x16)
@@ -3318,11 +3414,19 @@ LiteralsArray* LiteralsArray::cast(Object* object) {
TypeFeedbackVector* LiteralsArray::feedback_vector() const {
+ if (length() == 0) {
+ return TypeFeedbackVector::cast(
+ const_cast<FixedArray*>(FixedArray::cast(this)));
+ }
return TypeFeedbackVector::cast(get(kVectorIndex));
}
void LiteralsArray::set_feedback_vector(TypeFeedbackVector* vector) {
+ if (length() <= kVectorIndex) {
+ DCHECK(vector->length() == 0);
+ return;
+ }
set(kVectorIndex, vector);
}
@@ -3336,6 +3440,9 @@ void LiteralsArray::set_literal(int literal_index, Object* literal) {
set(kFirstLiteralIndex + literal_index, literal);
}
+void LiteralsArray::set_literal_undefined(int literal_index) {
+ set_undefined(kFirstLiteralIndex + literal_index);
+}
int LiteralsArray::literals_count() const {
return length() - kFirstLiteralIndex;
@@ -3358,12 +3465,6 @@ int HandlerTable::GetRangeData(int index) const {
return Smi::cast(get(index * kRangeEntrySize + kRangeDataIndex))->value();
}
-HandlerTable::CatchPrediction HandlerTable::GetRangePrediction(
- int index) const {
- return HandlerPredictionField::decode(
- Smi::cast(get(index * kRangeEntrySize + kRangeHandlerIndex))->value());
-}
-
void HandlerTable::SetRangeStart(int index, int value) {
set(index * kRangeEntrySize + kRangeStartIndex, Smi::FromInt(value));
}
@@ -3390,11 +3491,8 @@ void HandlerTable::SetReturnOffset(int index, int value) {
set(index * kReturnEntrySize + kReturnOffsetIndex, Smi::FromInt(value));
}
-
-void HandlerTable::SetReturnHandler(int index, int offset,
- CatchPrediction prediction) {
- int value = HandlerOffsetField::encode(offset) |
- HandlerPredictionField::encode(prediction);
+void HandlerTable::SetReturnHandler(int index, int offset) {
+ int value = HandlerOffsetField::encode(offset);
set(index * kReturnEntrySize + kReturnHandlerIndex, Smi::FromInt(value));
}
@@ -3916,24 +4014,43 @@ void StringCharacterStream::VisitTwoByteString(
int ByteArray::Size() { return RoundUp(length() + kHeaderSize, kPointerSize); }
-
byte ByteArray::get(int index) {
DCHECK(index >= 0 && index < this->length());
return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
}
+const byte* ByteArray::data() const {
+ return reinterpret_cast<const byte*>(FIELD_ADDR_CONST(this, kHeaderSize));
+}
void ByteArray::set(int index, byte value) {
DCHECK(index >= 0 && index < this->length());
WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize, value);
}
+void ByteArray::copy_in(int index, const byte* buffer, int length) {
+ DCHECK(index >= 0 && length >= 0 && index + length >= index &&
+ index + length <= this->length());
+ byte* dst_addr = FIELD_ADDR(this, kHeaderSize + index * kCharSize);
+ memcpy(dst_addr, buffer, length);
+}
+
+void ByteArray::copy_out(int index, byte* buffer, int length) {
+ DCHECK(index >= 0 && length >= 0 && index + length >= index &&
+ index + length <= this->length());
+ const byte* src_addr = FIELD_ADDR(this, kHeaderSize + index * kCharSize);
+ memcpy(buffer, src_addr, length);
+}
int ByteArray::get_int(int index) {
- DCHECK(index >= 0 && (index * kIntSize) < this->length());
+ DCHECK(index >= 0 && index < this->length() / kIntSize);
return READ_INT_FIELD(this, kHeaderSize + index * kIntSize);
}
+void ByteArray::set_int(int index, int value) {
+ DCHECK(index >= 0 && index < this->length() / kIntSize);
+ WRITE_INT_FIELD(this, kHeaderSize + index * kIntSize, value);
+}
ByteArray* ByteArray::FromDataStartAddress(Address address) {
DCHECK_TAG_ALIGNED(address);
@@ -3995,6 +4112,16 @@ void BytecodeArray::set_interrupt_budget(int interrupt_budget) {
WRITE_INT_FIELD(this, kInterruptBudgetOffset, interrupt_budget);
}
+int BytecodeArray::osr_loop_nesting_level() const {
+ return READ_INT8_FIELD(this, kOSRNestingLevelOffset);
+}
+
+void BytecodeArray::set_osr_loop_nesting_level(int depth) {
+ DCHECK(0 <= depth && depth <= AbstractCode::kMaxLoopNestingMarker);
+ STATIC_ASSERT(AbstractCode::kMaxLoopNestingMarker < kMaxInt8);
+ WRITE_INT8_FIELD(this, kOSRNestingLevelOffset, depth);
+}
+
int BytecodeArray::parameter_count() const {
// Parameter count is stored as the size on stack of the parameters to allow
// it to be used directly by generated code.
@@ -4014,6 +4141,13 @@ Address BytecodeArray::GetFirstBytecodeAddress() {
int BytecodeArray::BytecodeArraySize() { return SizeFor(this->length()); }
+int BytecodeArray::SizeIncludingMetadata() {
+ int size = BytecodeArraySize();
+ size += constant_pool()->Size();
+ size += handler_table()->Size();
+ size += source_position_table()->Size();
+ return size;
+}
ACCESSORS(FixedTypedArrayBase, base_pointer, Object, kBasePointerOffset)
@@ -4189,7 +4323,7 @@ void FixedTypedArray<Traits>::SetValue(uint32_t index, Object* value) {
} else {
// Clamp undefined to the default value. All other types have been
// converted to a number type further up in the call chain.
- DCHECK(value->IsUndefined());
+ DCHECK(value->IsUndefined(GetIsolate()));
}
set(index, cast_value);
}
@@ -4461,11 +4595,6 @@ bool Map::is_undetectable() {
}
-void Map::set_is_observed() { set_bit_field(bit_field() | (1 << kIsObserved)); }
-
-bool Map::is_observed() { return ((1 << kIsObserved) & bit_field()) != 0; }
-
-
void Map::set_has_named_interceptor() {
set_bit_field(bit_field() | (1 << kHasNamedInterceptor));
}
@@ -4511,6 +4640,10 @@ bool Map::is_prototype_map() const {
return IsPrototypeMapBits::decode(bit_field2());
}
+bool Map::should_be_fast_prototype_map() const {
+ if (!prototype_info()->IsPrototypeInfo()) return false;
+ return PrototypeInfo::cast(prototype_info())->should_be_fast_map();
+}
void Map::set_elements_kind(ElementsKind elements_kind) {
DCHECK(static_cast<int>(elements_kind) < kElementsKindCount);
@@ -4547,6 +4680,10 @@ bool Map::has_sloppy_arguments_elements() {
return IsSloppyArgumentsElements(elements_kind());
}
+bool Map::has_fast_sloppy_arguments_elements() {
+ return elements_kind() == FAST_SLOPPY_ARGUMENTS_ELEMENTS;
+}
+
bool Map::has_fast_string_wrapper_elements() {
return elements_kind() == FAST_STRING_WRAPPER_ELEMENTS;
}
@@ -4614,6 +4751,13 @@ bool Map::is_migration_target() {
return IsMigrationTarget::decode(bit_field3());
}
+void Map::set_immutable_proto(bool value) {
+ set_bit_field3(ImmutablePrototype::update(bit_field3(), value));
+}
+
+bool Map::is_immutable_proto() {
+ return ImmutablePrototype::decode(bit_field3());
+}
void Map::set_new_target_is_base(bool value) {
set_bit_field3(NewTargetIsBase::update(bit_field3(), value));
@@ -4644,7 +4788,9 @@ bool Map::is_stable() {
bool Map::has_code_cache() {
- return code_cache() != GetIsolate()->heap()->empty_fixed_array();
+ // Code caches are always fixed arrays. The empty fixed array is used as a
+ // sentinel for an absent code cache.
+ return code_cache()->length() != 0;
}
@@ -4776,43 +4922,25 @@ Code::Kind Code::kind() {
return ExtractKindFromFlags(flags());
}
-
bool Code::IsCodeStubOrIC() {
- return kind() == STUB || kind() == HANDLER || kind() == LOAD_IC ||
- kind() == KEYED_LOAD_IC || kind() == CALL_IC || kind() == STORE_IC ||
- kind() == KEYED_STORE_IC || kind() == BINARY_OP_IC ||
- kind() == COMPARE_IC || kind() == TO_BOOLEAN_IC;
-}
-
-
-bool Code::IsJavaScriptCode() {
- return kind() == FUNCTION || kind() == OPTIMIZED_FUNCTION ||
- is_interpreter_entry_trampoline();
-}
-
-
-InlineCacheState Code::ic_state() {
- InlineCacheState result = ExtractICStateFromFlags(flags());
- // Only allow uninitialized or debugger states for non-IC code
- // objects. This is used in the debugger to determine whether or not
- // a call to code object has been replaced with a debug break call.
- DCHECK(is_inline_cache_stub() ||
- result == UNINITIALIZED ||
- result == DEBUG_STUB);
- return result;
+ switch (kind()) {
+ case STUB:
+ case HANDLER:
+#define CASE_KIND(kind) case kind:
+ IC_KIND_LIST(CASE_KIND)
+#undef CASE_KIND
+ return true;
+ default:
+ return false;
+ }
}
-
ExtraICState Code::extra_ic_state() {
- DCHECK(is_inline_cache_stub() || ic_state() == DEBUG_STUB);
+ DCHECK(is_inline_cache_stub() || is_debug_stub());
return ExtractExtraICStateFromFlags(flags());
}
-Code::StubType Code::type() {
- return ExtractTypeFromFlags(flags());
-}
-
// For initialization.
void Code::set_raw_kind_specific_flags1(int value) {
WRITE_INT_FIELD(this, kKindSpecificFlags1Offset, value);
@@ -4834,18 +4962,21 @@ inline bool Code::is_hydrogen_stub() {
return is_crankshafted() && kind() != OPTIMIZED_FUNCTION;
}
+inline bool Code::is_interpreter_trampoline_builtin() {
+ Builtins* builtins = GetIsolate()->builtins();
+ return this == *builtins->InterpreterEntryTrampoline() ||
+ this == *builtins->InterpreterEnterBytecodeDispatch() ||
+ this == *builtins->InterpreterMarkBaselineOnReturn();
+}
-inline bool Code::is_interpreter_entry_trampoline() {
- Handle<Code> interpreter_entry =
- GetIsolate()->builtins()->InterpreterEntryTrampoline();
- return interpreter_entry.location() != nullptr && *interpreter_entry == this;
+inline bool Code::has_unwinding_info() const {
+ return HasUnwindingInfoField::decode(READ_UINT32_FIELD(this, kFlagsOffset));
}
-inline bool Code::is_interpreter_enter_bytecode_dispatch() {
- Handle<Code> interpreter_handler =
- GetIsolate()->builtins()->InterpreterEnterBytecodeDispatch();
- return interpreter_handler.location() != nullptr &&
- *interpreter_handler == this;
+inline void Code::set_has_unwinding_info(bool state) {
+ uint32_t previous = READ_UINT32_FIELD(this, kFlagsOffset);
+ uint32_t updated_value = HasUnwindingInfoField::update(previous, state);
+ WRITE_UINT32_FIELD(this, kFlagsOffset, updated_value);
}
inline void Code::set_is_crankshafted(bool value) {
@@ -4882,6 +5013,18 @@ inline void Code::set_can_have_weak_objects(bool value) {
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
+inline bool Code::is_construct_stub() {
+ DCHECK(kind() == BUILTIN);
+ return IsConstructStubField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
+}
+
+inline void Code::set_is_construct_stub(bool value) {
+ DCHECK(kind() == BUILTIN);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ int updated = IsConstructStubField::update(previous, value);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
+}
bool Code::has_deoptimization_support() {
DCHECK_EQ(FUNCTION, kind());
@@ -4937,7 +5080,7 @@ int Code::allow_osr_at_loop_nesting_level() {
void Code::set_allow_osr_at_loop_nesting_level(int level) {
DCHECK_EQ(FUNCTION, kind());
- DCHECK(level >= 0 && level <= kMaxLoopNestingMarker);
+ DCHECK(level >= 0 && level <= AbstractCode::kMaxLoopNestingMarker);
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
int updated = AllowOSRAtLoopNestingLevelField::update(previous, level);
WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
@@ -5051,18 +5194,19 @@ bool Code::is_inline_cache_stub() {
}
}
-
-bool Code::is_keyed_stub() {
- return is_keyed_load_stub() || is_keyed_store_stub();
+bool Code::is_debug_stub() {
+ if (kind() != BUILTIN) return false;
+ switch (builtin_index()) {
+#define CASE_DEBUG_BUILTIN(name) case Builtins::k##name:
+ BUILTIN_LIST_DBG(CASE_DEBUG_BUILTIN)
+#undef CASE_DEBUG_BUILTIN
+ return true;
+ default:
+ return false;
+ }
+ return false;
}
-
-
-bool Code::is_debug_stub() { return ic_state() == DEBUG_STUB; }
bool Code::is_handler() { return kind() == HANDLER; }
-bool Code::is_load_stub() { return kind() == LOAD_IC; }
-bool Code::is_keyed_load_stub() { return kind() == KEYED_LOAD_IC; }
-bool Code::is_store_stub() { return kind() == STORE_IC; }
-bool Code::is_keyed_store_stub() { return kind() == KEYED_STORE_IC; }
bool Code::is_call_stub() { return kind() == CALL_IC; }
bool Code::is_binary_op_stub() { return kind() == BINARY_OP_IC; }
bool Code::is_compare_ic_stub() { return kind() == COMPARE_IC; }
@@ -5070,14 +5214,6 @@ bool Code::is_to_boolean_ic_stub() { return kind() == TO_BOOLEAN_IC; }
bool Code::is_optimized_code() { return kind() == OPTIMIZED_FUNCTION; }
bool Code::is_wasm_code() { return kind() == WASM_FUNCTION; }
-bool Code::embeds_maps_weakly() {
- Kind k = kind();
- return (k == LOAD_IC || k == STORE_IC || k == KEYED_LOAD_IC ||
- k == KEYED_STORE_IC) &&
- ic_state() == MONOMORPHIC;
-}
-
-
Address Code::constant_pool() {
Address constant_pool = NULL;
if (FLAG_enable_embedded_constant_pool) {
@@ -5089,28 +5225,18 @@ Address Code::constant_pool() {
return constant_pool;
}
-Code::Flags Code::ComputeFlags(Kind kind, InlineCacheState ic_state,
- ExtraICState extra_ic_state, StubType type,
+Code::Flags Code::ComputeFlags(Kind kind, ExtraICState extra_ic_state,
CacheHolderFlag holder) {
// Compute the bit mask.
- unsigned int bits = KindField::encode(kind) | ICStateField::encode(ic_state) |
- TypeField::encode(type) |
+ unsigned int bits = KindField::encode(kind) |
ExtraICStateField::encode(extra_ic_state) |
CacheHolderField::encode(holder);
return static_cast<Flags>(bits);
}
-Code::Flags Code::ComputeMonomorphicFlags(Kind kind,
- ExtraICState extra_ic_state,
- CacheHolderFlag holder,
- StubType type) {
- return ComputeFlags(kind, MONOMORPHIC, extra_ic_state, type, holder);
-}
-
-
-Code::Flags Code::ComputeHandlerFlags(Kind handler_kind, StubType type,
+Code::Flags Code::ComputeHandlerFlags(Kind handler_kind,
CacheHolderFlag holder) {
- return ComputeFlags(Code::HANDLER, MONOMORPHIC, handler_kind, type, holder);
+ return ComputeFlags(Code::HANDLER, handler_kind, holder);
}
@@ -5119,33 +5245,17 @@ Code::Kind Code::ExtractKindFromFlags(Flags flags) {
}
-InlineCacheState Code::ExtractICStateFromFlags(Flags flags) {
- return ICStateField::decode(flags);
-}
-
-
ExtraICState Code::ExtractExtraICStateFromFlags(Flags flags) {
return ExtraICStateField::decode(flags);
}
-Code::StubType Code::ExtractTypeFromFlags(Flags flags) {
- return TypeField::decode(flags);
-}
-
CacheHolderFlag Code::ExtractCacheHolderFromFlags(Flags flags) {
return CacheHolderField::decode(flags);
}
-
-Code::Flags Code::RemoveTypeFromFlags(Flags flags) {
- int bits = flags & ~TypeField::kMask;
- return static_cast<Flags>(bits);
-}
-
-
-Code::Flags Code::RemoveTypeAndHolderFromFlags(Flags flags) {
- int bits = flags & ~TypeField::kMask & ~CacheHolderField::kMask;
+Code::Flags Code::RemoveHolderFromFlags(Flags flags) {
+ int bits = flags & ~CacheHolderField::kMask;
return static_cast<Flags>(bits);
}
@@ -5223,6 +5333,39 @@ int AbstractCode::instruction_size() {
}
}
+ByteArray* AbstractCode::source_position_table() {
+ if (IsCode()) {
+ return GetCode()->source_position_table();
+ } else {
+ return GetBytecodeArray()->source_position_table();
+ }
+}
+
+void AbstractCode::set_source_position_table(ByteArray* source_position_table) {
+ if (IsCode()) {
+ GetCode()->set_source_position_table(source_position_table);
+ } else {
+ GetBytecodeArray()->set_source_position_table(source_position_table);
+ }
+}
+
+int AbstractCode::LookupRangeInHandlerTable(
+ int code_offset, int* data, HandlerTable::CatchPrediction* prediction) {
+ if (IsCode()) {
+ return GetCode()->LookupRangeInHandlerTable(code_offset, data, prediction);
+ } else {
+ return GetBytecodeArray()->LookupRangeInHandlerTable(code_offset, data,
+ prediction);
+ }
+}
+
+int AbstractCode::SizeIncludingMetadata() {
+ if (IsCode()) {
+ return GetCode()->SizeIncludingMetadata();
+ } else {
+ return GetBytecodeArray()->SizeIncludingMetadata();
+ }
+}
int AbstractCode::ExecutableSize() {
if (IsCode()) {
return GetCode()->ExecutableSize();
@@ -5274,20 +5417,20 @@ Object* Map::prototype() const {
void Map::set_prototype(Object* value, WriteBarrierMode mode) {
- DCHECK(value->IsNull() || value->IsJSReceiver());
+ DCHECK(value->IsNull(GetIsolate()) || value->IsJSReceiver());
WRITE_FIELD(this, kPrototypeOffset, value);
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, value, mode);
}
LayoutDescriptor* Map::layout_descriptor_gc_safe() {
- Object* layout_desc = READ_FIELD(this, kLayoutDecriptorOffset);
+ Object* layout_desc = READ_FIELD(this, kLayoutDescriptorOffset);
return LayoutDescriptor::cast_gc_safe(layout_desc);
}
bool Map::HasFastPointerLayout() const {
- Object* layout_desc = READ_FIELD(this, kLayoutDecriptorOffset);
+ Object* layout_desc = READ_FIELD(this, kLayoutDescriptorOffset);
return LayoutDescriptor::IsFastPointerLayout(layout_desc);
}
@@ -5335,8 +5478,7 @@ void Map::InitializeDescriptors(DescriptorArray* descriptors,
ACCESSORS(Map, instance_descriptors, DescriptorArray, kDescriptorsOffset)
-ACCESSORS(Map, layout_descriptor, LayoutDescriptor, kLayoutDecriptorOffset)
-
+ACCESSORS(Map, layout_descriptor, LayoutDescriptor, kLayoutDescriptorOffset)
void Map::set_bit_field3(uint32_t bits) {
if (kInt32Size != kPointerSize) {
@@ -5407,14 +5549,14 @@ void Map::set_prototype_info(Object* value, WriteBarrierMode mode) {
void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
DCHECK(instance_type() >= FIRST_JS_RECEIVER_TYPE);
- DCHECK((value->IsMap() && GetBackPointer()->IsUndefined()));
+ DCHECK(value->IsMap());
+ DCHECK(GetBackPointer()->IsUndefined(GetIsolate()));
DCHECK(!value->IsMap() ||
Map::cast(value)->GetConstructor() == constructor_or_backpointer());
set_constructor_or_backpointer(value, mode);
}
-
-ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)
+ACCESSORS(Map, code_cache, FixedArray, kCodeCacheOffset)
ACCESSORS(Map, dependent_code, DependentCode, kDependentCodeOffset)
ACCESSORS(Map, weak_cell_cache, Object, kWeakCellCacheOffset)
ACCESSORS(Map, constructor_or_backpointer, Object,
@@ -5445,8 +5587,6 @@ Handle<Map> Map::CopyInitialMap(Handle<Map> map) {
}
-ACCESSORS(JSBoundFunction, length, Object, kLengthOffset)
-ACCESSORS(JSBoundFunction, name, Object, kNameOffset)
ACCESSORS(JSBoundFunction, bound_target_function, JSReceiver,
kBoundTargetFunctionOffset)
ACCESSORS(JSBoundFunction, bound_this, Object, kBoundThisOffset)
@@ -5469,13 +5609,58 @@ ACCESSORS(AccessorInfo, expected_receiver_type, Object,
ACCESSORS(AccessorInfo, getter, Object, kGetterOffset)
ACCESSORS(AccessorInfo, setter, Object, kSetterOffset)
+ACCESSORS(AccessorInfo, js_getter, Object, kJsGetterOffset)
ACCESSORS(AccessorInfo, data, Object, kDataOffset)
ACCESSORS(Box, value, Object, kValueOffset)
+Map* PrototypeInfo::ObjectCreateMap() {
+ return Map::cast(WeakCell::cast(object_create_map())->value());
+}
+
+// static
+void PrototypeInfo::SetObjectCreateMap(Handle<PrototypeInfo> info,
+ Handle<Map> map) {
+ Handle<WeakCell> cell = Map::WeakCellForMap(map);
+ info->set_object_create_map(*cell);
+}
+
+bool PrototypeInfo::HasObjectCreateMap() {
+ Object* cache = object_create_map();
+ return cache->IsWeakCell() && !WeakCell::cast(cache)->cleared();
+}
+
+bool FunctionTemplateInfo::instantiated() {
+ return shared_function_info()->IsSharedFunctionInfo();
+}
+
+FunctionTemplateInfo* FunctionTemplateInfo::GetParent(Isolate* isolate) {
+ Object* parent = parent_template();
+ return parent->IsUndefined(isolate) ? nullptr
+ : FunctionTemplateInfo::cast(parent);
+}
+
+ObjectTemplateInfo* ObjectTemplateInfo::GetParent(Isolate* isolate) {
+ Object* maybe_ctor = constructor();
+ if (maybe_ctor->IsUndefined(isolate)) return nullptr;
+ FunctionTemplateInfo* constructor = FunctionTemplateInfo::cast(maybe_ctor);
+ while (true) {
+ constructor = constructor->GetParent(isolate);
+ if (constructor == nullptr) return nullptr;
+ Object* maybe_obj = constructor->instance_template();
+ if (!maybe_obj->IsUndefined(isolate)) {
+ return ObjectTemplateInfo::cast(maybe_obj);
+ }
+ }
+ return nullptr;
+}
+
ACCESSORS(PrototypeInfo, prototype_users, Object, kPrototypeUsersOffset)
+ACCESSORS(PrototypeInfo, object_create_map, Object, kObjectCreateMap)
SMI_ACCESSORS(PrototypeInfo, registry_slot, kRegistrySlotOffset)
ACCESSORS(PrototypeInfo, validity_cell, Object, kValidityCellOffset)
+SMI_ACCESSORS(PrototypeInfo, bit_field, kBitFieldOffset)
+BOOL_ACCESSORS(PrototypeInfo, bit_field, should_be_fast_map, kShouldBeFastBit)
ACCESSORS(SloppyBlockWithEvalContextExtension, scope_info, ScopeInfo,
kScopeInfoOffset)
@@ -5485,9 +5670,10 @@ ACCESSORS(SloppyBlockWithEvalContextExtension, extension, JSObject,
ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
-ACCESSORS(AccessCheckInfo, named_callback, Object, kNamedCallbackOffset)
-ACCESSORS(AccessCheckInfo, indexed_callback, Object, kIndexedCallbackOffset)
ACCESSORS(AccessCheckInfo, callback, Object, kCallbackOffset)
+ACCESSORS(AccessCheckInfo, named_interceptor, Object, kNamedInterceptorOffset)
+ACCESSORS(AccessCheckInfo, indexed_interceptor, Object,
+ kIndexedInterceptorOffset)
ACCESSORS(AccessCheckInfo, data, Object, kDataOffset)
ACCESSORS(InterceptorInfo, getter, Object, kGetterOffset)
@@ -5528,11 +5714,47 @@ ACCESSORS(FunctionTemplateInfo, instance_call_handler, Object,
kInstanceCallHandlerOffset)
ACCESSORS(FunctionTemplateInfo, access_check_info, Object,
kAccessCheckInfoOffset)
+ACCESSORS(FunctionTemplateInfo, shared_function_info, Object,
+ kSharedFunctionInfoOffset)
+
SMI_ACCESSORS(FunctionTemplateInfo, flag, kFlagOffset)
ACCESSORS(ObjectTemplateInfo, constructor, Object, kConstructorOffset)
-ACCESSORS(ObjectTemplateInfo, internal_field_count, Object,
- kInternalFieldCountOffset)
+ACCESSORS(ObjectTemplateInfo, data, Object, kDataOffset)
+
+int ObjectTemplateInfo::internal_field_count() const {
+ Object* value = data();
+ DCHECK(value->IsSmi());
+ return InternalFieldCount::decode(Smi::cast(value)->value());
+}
+
+void ObjectTemplateInfo::set_internal_field_count(int count) {
+ return set_data(Smi::FromInt(
+ InternalFieldCount::update(Smi::cast(data())->value(), count)));
+}
+
+bool ObjectTemplateInfo::immutable_proto() const {
+ Object* value = data();
+ DCHECK(value->IsSmi());
+ return IsImmutablePrototype::decode(Smi::cast(value)->value());
+}
+
+void ObjectTemplateInfo::set_immutable_proto(bool immutable) {
+ return set_data(Smi::FromInt(
+ IsImmutablePrototype::update(Smi::cast(data())->value(), immutable)));
+}
+
+int TemplateList::length() const {
+ return Smi::cast(FixedArray::cast(this)->get(kLengthIndex))->value();
+}
+
+Object* TemplateList::get(int index) const {
+ return FixedArray::cast(this)->get(kFirstElementIndex + index);
+}
+
+void TemplateList::set(int index, Object* value) {
+ FixedArray::cast(this)->set(kFirstElementIndex + index, value);
+}
ACCESSORS(AllocationSite, transition_info, Object, kTransitionInfoOffset)
ACCESSORS(AllocationSite, nested_site, Object, kNestedSiteOffset)
@@ -5553,13 +5775,18 @@ ACCESSORS(Script, context_data, Object, kContextOffset)
ACCESSORS(Script, wrapper, HeapObject, kWrapperOffset)
SMI_ACCESSORS(Script, type, kTypeOffset)
ACCESSORS(Script, line_ends, Object, kLineEndsOffset)
-ACCESSORS(Script, eval_from_shared, Object, kEvalFromSharedOffset)
-SMI_ACCESSORS(Script, eval_from_instructions_offset,
- kEvalFrominstructionsOffsetOffset)
+ACCESSORS_CHECKED(Script, eval_from_shared, Object, kEvalFromSharedOffset,
+ this->type() != TYPE_WASM)
+SMI_ACCESSORS_CHECKED(Script, eval_from_position, kEvalFromPositionOffset,
+ this->type() != TYPE_WASM)
ACCESSORS(Script, shared_function_infos, Object, kSharedFunctionInfosOffset)
SMI_ACCESSORS(Script, flags, kFlagsOffset)
ACCESSORS(Script, source_url, Object, kSourceUrlOffset)
ACCESSORS(Script, source_mapping_url, Object, kSourceMappingUrlOffset)
+ACCESSORS_CHECKED(Script, wasm_object, JSObject, kEvalFromSharedOffset,
+ this->type() == TYPE_WASM)
+SMI_ACCESSORS_CHECKED(Script, wasm_function_index, kEvalFromPositionOffset,
+ this->type() == TYPE_WASM)
Script::CompilationType Script::compilation_type() {
return BooleanBit::get(flags(), kCompilationTypeBit) ?
@@ -5593,24 +5820,44 @@ void Script::set_origin_options(ScriptOriginOptions origin_options) {
ACCESSORS(DebugInfo, shared, SharedFunctionInfo, kSharedFunctionInfoIndex)
-ACCESSORS(DebugInfo, abstract_code, AbstractCode, kAbstractCodeIndex)
+ACCESSORS(DebugInfo, debug_bytecode_array, Object, kDebugBytecodeArrayIndex)
ACCESSORS(DebugInfo, break_points, FixedArray, kBreakPointsStateIndex)
-BytecodeArray* DebugInfo::original_bytecode_array() {
+bool DebugInfo::HasDebugBytecodeArray() {
+ return debug_bytecode_array()->IsBytecodeArray();
+}
+
+bool DebugInfo::HasDebugCode() {
+ Code* code = shared()->code();
+ bool has = code->kind() == Code::FUNCTION;
+ DCHECK(!has || code->has_debug_break_slots());
+ return has;
+}
+
+BytecodeArray* DebugInfo::OriginalBytecodeArray() {
+ DCHECK(HasDebugBytecodeArray());
return shared()->bytecode_array();
}
-SMI_ACCESSORS(BreakPointInfo, code_offset, kCodeOffsetIndex)
+BytecodeArray* DebugInfo::DebugBytecodeArray() {
+ DCHECK(HasDebugBytecodeArray());
+ return BytecodeArray::cast(debug_bytecode_array());
+}
+
+Code* DebugInfo::DebugCode() {
+ DCHECK(HasDebugCode());
+ return shared()->code();
+}
+
SMI_ACCESSORS(BreakPointInfo, source_position, kSourcePositionIndex)
-SMI_ACCESSORS(BreakPointInfo, statement_position, kStatementPositionIndex)
ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex)
ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
ACCESSORS(SharedFunctionInfo, optimized_code_map, FixedArray,
kOptimizedCodeMapOffset)
ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
-ACCESSORS(SharedFunctionInfo, feedback_vector, TypeFeedbackVector,
- kFeedbackVectorOffset)
+ACCESSORS(SharedFunctionInfo, feedback_metadata, TypeFeedbackMetadata,
+ kFeedbackMetadataOffset)
#if TRACE_MAPS
SMI_ACCESSORS(SharedFunctionInfo, unique_id, kUniqueIdOffset)
#endif
@@ -5634,7 +5881,6 @@ BOOL_ACCESSORS(FunctionTemplateInfo, flag, remove_prototype,
kRemovePrototypeBit)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, do_not_cache,
kDoNotCacheBit)
-BOOL_ACCESSORS(FunctionTemplateInfo, flag, instantiated, kInstantiatedBit)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, accept_any_receiver,
kAcceptAnyReceiver)
BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_named_expression,
@@ -5769,14 +6015,14 @@ void SharedFunctionInfo::set_optimization_disabled(bool disable) {
LanguageMode SharedFunctionInfo::language_mode() {
- STATIC_ASSERT(LANGUAGE_END == 3);
+ STATIC_ASSERT(LANGUAGE_END == 2);
return construct_language_mode(
BooleanBit::get(compiler_hints(), kStrictModeFunction));
}
void SharedFunctionInfo::set_language_mode(LanguageMode language_mode) {
- STATIC_ASSERT(LANGUAGE_END == 3);
+ STATIC_ASSERT(LANGUAGE_END == 2);
// We only allow language mode transitions that set the same language mode
// again or go up in the chain:
DCHECK(is_sloppy(this->language_mode()) || is_strict(language_mode));
@@ -5798,7 +6044,6 @@ void SharedFunctionInfo::set_kind(FunctionKind kind) {
set_compiler_hints(hints);
}
-
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, needs_home_object,
kNeedsHomeObject)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, native, kNative)
@@ -5814,6 +6059,7 @@ BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_crankshaft,
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_flush, kDontFlush)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_arrow, kIsArrow)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_generator, kIsGenerator)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_async, kIsAsyncFunction)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_concise_method,
kIsConciseMethod)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_getter_function,
@@ -5822,11 +6068,12 @@ BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_setter_function,
kIsSetterFunction)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_default_constructor,
kIsDefaultConstructor)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_asm_wasm_broken,
+ kIsAsmWasmBroken)
-ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
-ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
-
-ACCESSORS(PolymorphicCodeCache, cache, Object, kCacheOffset)
+inline bool SharedFunctionInfo::is_resumable() const {
+ return is_generator() || is_async();
+}
bool Script::HasValidSource() {
Object* src = this->source();
@@ -5889,6 +6136,9 @@ void SharedFunctionInfo::ReplaceCode(Code* value) {
if (is_compiled()) set_never_compiled(false);
}
+bool SharedFunctionInfo::HasBaselineCode() const {
+ return code()->kind() == Code::FUNCTION;
+}
ScopeInfo* SharedFunctionInfo::scope_info() const {
return reinterpret_cast<ScopeInfo*>(READ_FIELD(this, kScopeInfoOffset));
@@ -5905,11 +6155,11 @@ void SharedFunctionInfo::set_scope_info(ScopeInfo* value,
mode);
}
-
-bool SharedFunctionInfo::is_compiled() {
+bool SharedFunctionInfo::is_compiled() const {
Builtins* builtins = GetIsolate()->builtins();
DCHECK(code() != builtins->builtin(Builtins::kCompileOptimizedConcurrent));
DCHECK(code() != builtins->builtin(Builtins::kCompileOptimized));
+ DCHECK(code() != builtins->builtin(Builtins::kCompileBaseline));
return code() != builtins->builtin(Builtins::kCompileLazy);
}
@@ -5933,8 +6183,8 @@ DebugInfo* SharedFunctionInfo::GetDebugInfo() {
bool SharedFunctionInfo::HasDebugCode() {
- return HasBytecodeArray() ||
- (code()->kind() == Code::FUNCTION && code()->has_debug_break_slots());
+ if (HasBaselineCode()) return code()->has_debug_break_slots();
+ return HasBytecodeArray();
}
@@ -5949,7 +6199,7 @@ FunctionTemplateInfo* SharedFunctionInfo::get_api_func_data() {
}
void SharedFunctionInfo::set_api_func_data(FunctionTemplateInfo* data) {
- DCHECK(function_data()->IsUndefined());
+ DCHECK(function_data()->IsUndefined(GetIsolate()));
set_function_data(data);
}
@@ -5957,19 +6207,37 @@ bool SharedFunctionInfo::HasBytecodeArray() {
return function_data()->IsBytecodeArray();
}
-
BytecodeArray* SharedFunctionInfo::bytecode_array() {
DCHECK(HasBytecodeArray());
return BytecodeArray::cast(function_data());
}
void SharedFunctionInfo::set_bytecode_array(BytecodeArray* bytecode) {
- DCHECK(function_data()->IsUndefined());
+ DCHECK(function_data()->IsUndefined(GetIsolate()));
set_function_data(bytecode);
}
void SharedFunctionInfo::ClearBytecodeArray() {
- DCHECK(function_data()->IsUndefined() || HasBytecodeArray());
+ DCHECK(function_data()->IsUndefined(GetIsolate()) || HasBytecodeArray());
+ set_function_data(GetHeap()->undefined_value());
+}
+
+bool SharedFunctionInfo::HasAsmWasmData() {
+ return function_data()->IsFixedArray();
+}
+
+FixedArray* SharedFunctionInfo::asm_wasm_data() {
+ DCHECK(HasAsmWasmData());
+ return FixedArray::cast(function_data());
+}
+
+void SharedFunctionInfo::set_asm_wasm_data(FixedArray* data) {
+ DCHECK(function_data()->IsUndefined(GetIsolate()) || HasAsmWasmData());
+ set_function_data(data);
+}
+
+void SharedFunctionInfo::ClearAsmWasmData() {
+ DCHECK(function_data()->IsUndefined(GetIsolate()) || HasAsmWasmData());
set_function_data(GetHeap()->undefined_value());
}
@@ -5995,12 +6263,13 @@ String* SharedFunctionInfo::inferred_name() {
if (HasInferredName()) {
return String::cast(function_identifier());
}
- DCHECK(function_identifier()->IsUndefined() || HasBuiltinFunctionId());
- return GetIsolate()->heap()->empty_string();
+ Isolate* isolate = GetIsolate();
+ DCHECK(function_identifier()->IsUndefined(isolate) || HasBuiltinFunctionId());
+ return isolate->heap()->empty_string();
}
void SharedFunctionInfo::set_inferred_name(String* inferred_name) {
- DCHECK(function_identifier()->IsUndefined() || HasInferredName());
+ DCHECK(function_identifier()->IsUndefined(GetIsolate()) || HasInferredName());
set_function_identifier(inferred_name);
}
@@ -6086,7 +6355,7 @@ void SharedFunctionInfo::set_disable_optimization_reason(BailoutReason reason) {
bool SharedFunctionInfo::IsBuiltin() {
Object* script_obj = script();
- if (script_obj->IsUndefined()) return true;
+ if (script_obj->IsUndefined(GetIsolate())) return true;
Script* script = Script::cast(script_obj);
Script::Type type = static_cast<Script::Type>(script->type());
return type != Script::TYPE_NORMAL;
@@ -6105,6 +6374,10 @@ bool JSFunction::IsOptimized() {
return code()->kind() == Code::OPTIMIZED_FUNCTION;
}
+bool JSFunction::IsMarkedForBaseline() {
+ return code() ==
+ GetIsolate()->builtins()->builtin(Builtins::kCompileBaseline);
+}
bool JSFunction::IsMarkedForOptimization() {
return code() == GetIsolate()->builtins()->builtin(
@@ -6147,7 +6420,7 @@ void Map::InobjectSlackTrackingStep() {
AbstractCode* JSFunction::abstract_code() {
Code* code = this->code();
- if (code->is_interpreter_entry_trampoline()) {
+ if (code->is_interpreter_trampoline_builtin()) {
return AbstractCode::cast(shared()->bytecode_array());
} else {
return AbstractCode::cast(code);
@@ -6215,7 +6488,7 @@ Context* JSFunction::native_context() { return context()->native_context(); }
void JSFunction::set_context(Object* value) {
- DCHECK(value->IsUndefined() || value->IsContext());
+ DCHECK(value->IsUndefined(GetIsolate()) || value->IsContext());
WRITE_FIELD(this, kContextOffset, value);
WRITE_BARRIER(GetHeap(), this, kContextOffset, value);
}
@@ -6235,7 +6508,8 @@ bool JSFunction::has_initial_map() {
bool JSFunction::has_instance_prototype() {
- return has_initial_map() || !prototype_or_initial_map()->IsTheHole();
+ return has_initial_map() ||
+ !prototype_or_initial_map()->IsTheHole(GetIsolate());
}
@@ -6270,16 +6544,16 @@ Object* JSFunction::prototype() {
bool JSFunction::is_compiled() {
Builtins* builtins = GetIsolate()->builtins();
return code() != builtins->builtin(Builtins::kCompileLazy) &&
+ code() != builtins->builtin(Builtins::kCompileBaseline) &&
code() != builtins->builtin(Builtins::kCompileOptimized) &&
code() != builtins->builtin(Builtins::kCompileOptimizedConcurrent);
}
-
-int JSFunction::NumberOfLiterals() {
- return literals()->length();
+TypeFeedbackVector* JSFunction::feedback_vector() {
+ LiteralsArray* array = literals();
+ return array->feedback_vector();
}
-
ACCESSORS(JSProxy, target, JSReceiver, kTargetOffset)
ACCESSORS(JSProxy, handler, Object, kHandlerOffset)
ACCESSORS(JSProxy, hash, Object, kHashOffset)
@@ -6325,28 +6599,25 @@ void Foreign::set_foreign_address(Address value) {
ACCESSORS(JSGeneratorObject, function, JSFunction, kFunctionOffset)
ACCESSORS(JSGeneratorObject, context, Context, kContextOffset)
ACCESSORS(JSGeneratorObject, receiver, Object, kReceiverOffset)
-ACCESSORS(JSGeneratorObject, input, Object, kInputOffset)
+ACCESSORS(JSGeneratorObject, input_or_debug_pos, Object, kInputOrDebugPosOffset)
+SMI_ACCESSORS(JSGeneratorObject, resume_mode, kResumeModeOffset)
SMI_ACCESSORS(JSGeneratorObject, continuation, kContinuationOffset)
ACCESSORS(JSGeneratorObject, operand_stack, FixedArray, kOperandStackOffset)
-bool JSGeneratorObject::is_suspended() {
- DCHECK_LT(kGeneratorExecuting, kGeneratorClosed);
- DCHECK_EQ(kGeneratorClosed, 0);
- return continuation() > 0;
+bool JSGeneratorObject::is_suspended() const {
+ DCHECK_LT(kGeneratorExecuting, 0);
+ DCHECK_LT(kGeneratorClosed, 0);
+ return continuation() >= 0;
}
-bool JSGeneratorObject::is_closed() {
+bool JSGeneratorObject::is_closed() const {
return continuation() == kGeneratorClosed;
}
-bool JSGeneratorObject::is_executing() {
+bool JSGeneratorObject::is_executing() const {
return continuation() == kGeneratorExecuting;
}
-ACCESSORS(JSModule, context, Object, kContextOffset)
-ACCESSORS(JSModule, scope_info, ScopeInfo, kScopeInfoOffset)
-
-
ACCESSORS(JSValue, value, Object, kValueOffset)
@@ -6387,14 +6658,15 @@ INT_ACCESSORS(Code, constant_pool_offset, kConstantPoolOffset)
ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
ACCESSORS(Code, handler_table, FixedArray, kHandlerTableOffset)
ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
+ACCESSORS(Code, source_position_table, ByteArray, kSourcePositionTableOffset)
ACCESSORS(Code, raw_type_feedback_info, Object, kTypeFeedbackInfoOffset)
ACCESSORS(Code, next_code_link, Object, kNextCodeLinkOffset)
-
void Code::WipeOutHeader() {
WRITE_FIELD(this, kRelocationInfoOffset, NULL);
WRITE_FIELD(this, kHandlerTableOffset, NULL);
WRITE_FIELD(this, kDeoptimizationDataOffset, NULL);
+ WRITE_FIELD(this, kSourcePositionTableOffset, NULL);
// Do not wipe out major/minor keys on a code stub or IC
if (!READ_FIELD(this, kTypeFeedbackInfoOffset)->IsSmi()) {
WRITE_FIELD(this, kTypeFeedbackInfoOffset, NULL);
@@ -6444,11 +6716,48 @@ byte* Code::instruction_end() {
return instruction_start() + instruction_size();
}
+int Code::GetUnwindingInfoSizeOffset() const {
+ DCHECK(has_unwinding_info());
+ return RoundUp(kHeaderSize + instruction_size(), kInt64Size);
+}
+
+int Code::unwinding_info_size() const {
+ DCHECK(has_unwinding_info());
+ return static_cast<int>(
+ READ_UINT64_FIELD(this, GetUnwindingInfoSizeOffset()));
+}
+
+void Code::set_unwinding_info_size(int value) {
+ DCHECK(has_unwinding_info());
+ WRITE_UINT64_FIELD(this, GetUnwindingInfoSizeOffset(), value);
+}
+
+byte* Code::unwinding_info_start() {
+ DCHECK(has_unwinding_info());
+ return FIELD_ADDR(this, GetUnwindingInfoSizeOffset()) + kInt64Size;
+}
+
+byte* Code::unwinding_info_end() {
+ DCHECK(has_unwinding_info());
+ return unwinding_info_start() + unwinding_info_size();
+}
int Code::body_size() {
- return RoundUp(instruction_size(), kObjectAlignment);
+ int unpadded_body_size =
+ has_unwinding_info()
+ ? static_cast<int>(unwinding_info_end() - instruction_start())
+ : instruction_size();
+ return RoundUp(unpadded_body_size, kObjectAlignment);
}
+int Code::SizeIncludingMetadata() {
+ int size = CodeSize();
+ size += relocation_info()->Size();
+ size += deoptimization_data()->Size();
+ size += handler_table()->Size();
+ if (kind() == FUNCTION) size += source_position_table()->Size();
+ return size;
+}
ByteArray* Code::unchecked_relocation_info() {
return reinterpret_cast<ByteArray*>(READ_FIELD(this, kRelocationInfoOffset));
@@ -6623,7 +6932,7 @@ ACCESSORS(JSRegExp, source, Object, kSourceOffset)
JSRegExp::Type JSRegExp::TypeTag() {
Object* data = this->data();
- if (data->IsUndefined()) return JSRegExp::NOT_COMPILED;
+ if (data->IsUndefined(GetIsolate())) return JSRegExp::NOT_COMPILED;
Smi* smi = Smi::cast(FixedArray::cast(data)->get(kTagIndex));
return static_cast<JSRegExp::Type>(smi->value());
}
@@ -6681,16 +6990,18 @@ ElementsKind JSObject::GetElementsKind() {
// pointer may point to a one pointer filler map.
if (ElementsAreSafeToExamine()) {
Map* map = fixed_array->map();
- DCHECK((IsFastSmiOrObjectElementsKind(kind) &&
- (map == GetHeap()->fixed_array_map() ||
- map == GetHeap()->fixed_cow_array_map())) ||
- (IsFastDoubleElementsKind(kind) &&
- (fixed_array->IsFixedDoubleArray() ||
- fixed_array == GetHeap()->empty_fixed_array())) ||
- (kind == DICTIONARY_ELEMENTS &&
- fixed_array->IsFixedArray() &&
- fixed_array->IsDictionary()) ||
- (kind > DICTIONARY_ELEMENTS));
+ if (IsFastSmiOrObjectElementsKind(kind)) {
+ DCHECK(map == GetHeap()->fixed_array_map() ||
+ map == GetHeap()->fixed_cow_array_map());
+ } else if (IsFastDoubleElementsKind(kind)) {
+ DCHECK(fixed_array->IsFixedDoubleArray() ||
+ fixed_array == GetHeap()->empty_fixed_array());
+ } else if (kind == DICTIONARY_ELEMENTS) {
+ DCHECK(fixed_array->IsFixedArray());
+ DCHECK(fixed_array->IsDictionary());
+ } else {
+ DCHECK(kind > DICTIONARY_ELEMENTS);
+ }
DCHECK(!IsSloppyArgumentsElements(kind) ||
(elements()->IsFixedArray() && elements()->length() >= 2));
}
@@ -7140,7 +7451,7 @@ Maybe<bool> JSReceiver::HasOwnProperty(Handle<JSReceiver> object,
Handle<Name> name) {
if (object->IsJSObject()) { // Shortcut
LookupIterator it = LookupIterator::PropertyOrElement(
- object->GetIsolate(), object, name, object, LookupIterator::HIDDEN);
+ object->GetIsolate(), object, name, object, LookupIterator::OWN);
return HasProperty(&it);
}
@@ -7150,6 +7461,19 @@ Maybe<bool> JSReceiver::HasOwnProperty(Handle<JSReceiver> object,
return Just(attributes.FromJust() != ABSENT);
}
+Maybe<bool> JSReceiver::HasOwnProperty(Handle<JSReceiver> object,
+ uint32_t index) {
+ if (object->IsJSObject()) { // Shortcut
+ LookupIterator it(object->GetIsolate(), object, index, object,
+ LookupIterator::OWN);
+ return HasProperty(&it);
+ }
+
+ Maybe<PropertyAttributes> attributes =
+ JSReceiver::GetOwnPropertyAttributes(object, index);
+ MAYBE_RETURN(attributes, Nothing<bool>());
+ return Just(attributes.FromJust() != ABSENT);
+}
Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes(
Handle<JSReceiver> object, Handle<Name> name) {
@@ -7162,10 +7486,16 @@ Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes(
Maybe<PropertyAttributes> JSReceiver::GetOwnPropertyAttributes(
Handle<JSReceiver> object, Handle<Name> name) {
LookupIterator it = LookupIterator::PropertyOrElement(
- name->GetIsolate(), object, name, object, LookupIterator::HIDDEN);
+ name->GetIsolate(), object, name, object, LookupIterator::OWN);
return GetPropertyAttributes(&it);
}
+Maybe<PropertyAttributes> JSReceiver::GetOwnPropertyAttributes(
+ Handle<JSReceiver> object, uint32_t index) {
+ LookupIterator it(object->GetIsolate(), object, index, object,
+ LookupIterator::OWN);
+ return GetPropertyAttributes(&it);
+}
Maybe<bool> JSReceiver::HasElement(Handle<JSReceiver> object, uint32_t index) {
LookupIterator it(object->GetIsolate(), object, index, object);
@@ -7184,7 +7514,7 @@ Maybe<PropertyAttributes> JSReceiver::GetElementAttributes(
Maybe<PropertyAttributes> JSReceiver::GetOwnElementAttributes(
Handle<JSReceiver> object, uint32_t index) {
Isolate* isolate = object->GetIsolate();
- LookupIterator it(isolate, object, index, object, LookupIterator::HIDDEN);
+ LookupIterator it(isolate, object, index, object, LookupIterator::OWN);
return GetPropertyAttributes(&it);
}
@@ -7200,19 +7530,20 @@ bool JSGlobalProxy::IsDetachedFrom(JSGlobalObject* global) const {
return iter.GetCurrent() != global;
}
-
-Handle<Smi> JSReceiver::GetOrCreateIdentityHash(Handle<JSReceiver> object) {
- return object->IsJSProxy()
- ? JSProxy::GetOrCreateIdentityHash(Handle<JSProxy>::cast(object))
- : JSObject::GetOrCreateIdentityHash(Handle<JSObject>::cast(object));
+Smi* JSReceiver::GetOrCreateIdentityHash(Isolate* isolate,
+ Handle<JSReceiver> object) {
+ return object->IsJSProxy() ? JSProxy::GetOrCreateIdentityHash(
+ isolate, Handle<JSProxy>::cast(object))
+ : JSObject::GetOrCreateIdentityHash(
+ isolate, Handle<JSObject>::cast(object));
}
-Handle<Object> JSReceiver::GetIdentityHash(Isolate* isolate,
- Handle<JSReceiver> receiver) {
- return receiver->IsJSProxy() ? JSProxy::GetIdentityHash(
- isolate, Handle<JSProxy>::cast(receiver))
- : JSObject::GetIdentityHash(
- isolate, Handle<JSObject>::cast(receiver));
+Object* JSReceiver::GetIdentityHash(Isolate* isolate,
+ Handle<JSReceiver> receiver) {
+ return receiver->IsJSProxy()
+ ? JSProxy::GetIdentityHash(Handle<JSProxy>::cast(receiver))
+ : JSObject::GetIdentityHash(isolate,
+ Handle<JSObject>::cast(receiver));
}
@@ -7260,6 +7591,9 @@ void AccessorInfo::set_property_attributes(PropertyAttributes attributes) {
set_flag(AttributesField::update(flag(), attributes));
}
+bool FunctionTemplateInfo::IsTemplateFor(JSObject* object) {
+ return IsTemplateFor(object->map());
+}
bool AccessorInfo::IsCompatibleReceiver(Object* receiver) {
if (!HasExpectedReceiverType()) return true;
@@ -7289,8 +7623,9 @@ void AccessorPair::set(AccessorComponent component, Object* value) {
void AccessorPair::SetComponents(Object* getter, Object* setter) {
- if (!getter->IsNull()) set_getter(getter);
- if (!setter->IsNull()) set_setter(setter);
+ Isolate* isolate = GetIsolate();
+ if (!getter->IsNull(isolate)) set_getter(getter);
+ if (!setter->IsNull(isolate)) set_setter(setter);
}
@@ -7310,7 +7645,7 @@ bool AccessorPair::ContainsAccessor() {
bool AccessorPair::IsJSAccessor(Object* obj) {
- return obj->IsCallable() || obj->IsUndefined();
+ return obj->IsCallable() || obj->IsUndefined(GetIsolate());
}
@@ -7337,14 +7672,16 @@ void BaseDictionaryShape<Key>::SetEntry(Dictionary* dict, int entry,
Handle<Object> key,
Handle<Object> value,
PropertyDetails details) {
- STATIC_ASSERT(Dictionary::kEntrySize == 3);
+ STATIC_ASSERT(Dictionary::kEntrySize == 2 || Dictionary::kEntrySize == 3);
DCHECK(!key->IsName() || details.dictionary_index() > 0);
int index = dict->EntryToIndex(entry);
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = dict->GetWriteBarrierMode(no_gc);
- dict->set(index, *key, mode);
- dict->set(index + 1, *value, mode);
- dict->set(index + 2, details.AsSmi());
+ dict->set(index + Dictionary::kEntryKeyIndex, *key, mode);
+ dict->set(index + Dictionary::kEntryValueIndex, *value, mode);
+ if (Dictionary::kEntrySize == 3) {
+ dict->set(index + Dictionary::kEntryDetailsIndex, details.AsSmi());
+ }
}
@@ -7358,8 +7695,8 @@ void GlobalDictionaryShape::SetEntry(Dictionary* dict, int entry,
int index = dict->EntryToIndex(entry);
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = dict->GetWriteBarrierMode(no_gc);
- dict->set(index, *key, mode);
- dict->set(index + 1, *value, mode);
+ dict->set(index + Dictionary::kEntryKeyIndex, *key, mode);
+ dict->set(index + Dictionary::kEntryValueIndex, *value, mode);
PropertyCell::cast(*value)->set_property_details(details);
}
@@ -7381,6 +7718,9 @@ uint32_t UnseededNumberDictionaryShape::HashForObject(uint32_t key,
return ComputeIntegerHash(static_cast<uint32_t>(other->Number()), 0);
}
+Map* UnseededNumberDictionaryShape::GetMap(Isolate* isolate) {
+ return *isolate->factory()->unseeded_number_dictionary_map();
+}
uint32_t SeededNumberDictionaryShape::SeededHash(uint32_t key, uint32_t seed) {
return ComputeIntegerHash(key, seed);
@@ -7455,7 +7795,8 @@ void GlobalDictionaryShape::DetailsAtPut(Dictionary* dict, int entry,
template <typename Dictionary>
bool GlobalDictionaryShape::IsDeleted(Dictionary* dict, int entry) {
DCHECK(dict->ValueAt(entry)->IsPropertyCell());
- return PropertyCell::cast(dict->ValueAt(entry))->value()->IsTheHole();
+ Isolate* isolate = dict->GetIsolate();
+ return PropertyCell::cast(dict->ValueAt(entry))->value()->IsTheHole(isolate);
}
@@ -7555,7 +7896,6 @@ void Map::ClearCodeCache(Heap* heap) {
// Please note this function is used during marking:
// - MarkCompactCollector::MarkUnmarkedObject
// - IncrementalMarking::Step
- DCHECK(!heap->InNewSpace(heap->empty_fixed_array()));
WRITE_FIELD(this, kCodeCacheOffset, heap->empty_fixed_array());
}
@@ -7730,7 +8070,7 @@ Object* OrderedHashTableIterator<Derived, TableType>::CurrentKey() {
TableType* table(TableType::cast(this->table()));
int index = Smi::cast(this->index())->value();
Object* key = table->KeyAt(index);
- DCHECK(!key->IsTheHole());
+ DCHECK(!key->IsTheHole(table->GetIsolate()));
return key;
}
@@ -7750,7 +8090,7 @@ Object* JSMapIterator::CurrentValue() {
OrderedHashMap* table(OrderedHashMap::cast(this->table()));
int index = Smi::cast(this->index())->value();
Object* value = table->ValueAt(index);
- DCHECK(!value->IsTheHole());
+ DCHECK(!value->IsTheHole(table->GetIsolate()));
return value;
}
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 58092a49ba..6f1f746e5e 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -4,6 +4,9 @@
#include "src/objects.h"
+#include <iomanip>
+#include <memory>
+
#include "src/disasm.h"
#include "src/disassembler.h"
#include "src/interpreter/bytecodes.h"
@@ -33,7 +36,13 @@ void Object::Print(std::ostream& os) { // NOLINT
void HeapObject::PrintHeader(std::ostream& os, const char* id) { // NOLINT
- os << reinterpret_cast<void*>(this) << ": [" << id << "]";
+ os << reinterpret_cast<void*>(this) << ": [";
+ if (id != nullptr) {
+ os << id;
+ } else {
+ os << map()->instance_type();
+ }
+ os << "]";
}
@@ -95,22 +104,24 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
os << "filler";
break;
case JS_OBJECT_TYPE: // fall through
+ case JS_API_OBJECT_TYPE:
case JS_SPECIAL_API_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- case JS_ARRAY_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
case JS_PROMISE_TYPE:
+ case JS_ARGUMENTS_TYPE:
+ case JS_ERROR_TYPE:
JSObject::cast(this)->JSObjectPrint(os);
break;
+ case JS_ARRAY_TYPE:
+ JSArray::cast(this)->JSArrayPrint(os);
+ break;
case JS_REGEXP_TYPE:
JSRegExp::cast(this)->JSRegExpPrint(os);
break;
case ODDBALL_TYPE:
Oddball::cast(this)->to_string()->Print(os);
break;
- case JS_MODULE_TYPE:
- JSModule::cast(this)->JSModulePrint(os);
- break;
case JS_BOUND_FUNCTION_TYPE:
JSBoundFunction::cast(this)->JSBoundFunctionPrint(os);
break;
@@ -307,12 +318,35 @@ void JSObject::PrintProperties(std::ostream& os) { // NOLINT
}
}
-
-template <class T>
+template <class T, bool print_the_hole>
static void DoPrintElements(std::ostream& os, Object* object) { // NOLINT
- T* p = T::cast(object);
- for (int i = 0; i < p->length(); i++) {
- os << "\n " << i << ": " << p->get_scalar(i);
+ T* array = T::cast(object);
+ if (array->length() == 0) return;
+ int previous_index = 0;
+ double previous_value = array->get_scalar(0);
+ double value = 0.0;
+ int i;
+ for (i = 1; i <= array->length(); i++) {
+ if (i < array->length()) value = array->get_scalar(i);
+ bool values_are_nan = std::isnan(previous_value) && std::isnan(value);
+ if ((previous_value == value || values_are_nan) && i != array->length()) {
+ continue;
+ }
+ os << "\n";
+ std::stringstream ss;
+ ss << previous_index;
+ if (previous_index != i - 1) {
+ ss << '-' << (i - 1);
+ }
+ os << std::setw(12) << ss.str() << ": ";
+ if (print_the_hole &&
+ FixedDoubleArray::cast(object)->is_the_hole(previous_index)) {
+ os << "<the_hole>";
+ } else {
+ os << previous_value;
+ }
+ previous_index = i;
+ previous_value = value;
}
}
@@ -320,6 +354,7 @@ static void DoPrintElements(std::ostream& os, Object* object) { // NOLINT
void JSObject::PrintElements(std::ostream& os) { // NOLINT
// Don't call GetElementsKind, its validation code can cause the printer to
// fail when debugging.
+ if (elements()->length() == 0) return;
switch (map()->elements_kind()) {
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_SMI_ELEMENTS:
@@ -327,52 +362,55 @@ void JSObject::PrintElements(std::ostream& os) { // NOLINT
case FAST_ELEMENTS:
case FAST_STRING_WRAPPER_ELEMENTS: {
// Print in array notation for non-sparse arrays.
- FixedArray* p = FixedArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- os << "\n " << i << ": " << Brief(p->get(i));
+ FixedArray* array = FixedArray::cast(elements());
+ Object* previous_value = array->get(0);
+ Object* value = nullptr;
+ int previous_index = 0;
+ int i;
+ for (i = 1; i <= array->length(); i++) {
+ if (i < array->length()) value = array->get(i);
+ if (previous_value == value && i != array->length()) {
+ continue;
+ }
+ os << "\n";
+ std::stringstream ss;
+ ss << previous_index;
+ if (previous_index != i - 1) {
+ ss << '-' << (i - 1);
+ }
+ os << std::setw(12) << ss.str() << ": " << Brief(previous_value);
+ previous_index = i;
+ previous_value = value;
}
break;
}
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: {
- // Print in array notation for non-sparse arrays.
- if (elements()->length() > 0) {
- FixedDoubleArray* p = FixedDoubleArray::cast(elements());
- for (int i = 0; i < p->length(); i++) {
- os << "\n " << i << ": ";
- if (p->is_the_hole(i)) {
- os << "<the hole>";
- } else {
- os << p->get_scalar(i);
- }
- }
- }
+ DoPrintElements<FixedDoubleArray, true>(os, elements());
break;
}
-
-#define PRINT_ELEMENTS(Kind, Type) \
- case Kind: { \
- DoPrintElements<Type>(os, elements()); \
- break; \
+#define PRINT_ELEMENTS(Kind, Type) \
+ case Kind: { \
+ DoPrintElements<Type, false>(os, elements()); \
+ break; \
}
- PRINT_ELEMENTS(UINT8_ELEMENTS, FixedUint8Array)
- PRINT_ELEMENTS(UINT8_CLAMPED_ELEMENTS, FixedUint8ClampedArray)
- PRINT_ELEMENTS(INT8_ELEMENTS, FixedInt8Array)
- PRINT_ELEMENTS(UINT16_ELEMENTS, FixedUint16Array)
- PRINT_ELEMENTS(INT16_ELEMENTS, FixedInt16Array)
- PRINT_ELEMENTS(UINT32_ELEMENTS, FixedUint32Array)
- PRINT_ELEMENTS(INT32_ELEMENTS, FixedInt32Array)
- PRINT_ELEMENTS(FLOAT32_ELEMENTS, FixedFloat32Array)
- PRINT_ELEMENTS(FLOAT64_ELEMENTS, FixedFloat64Array)
+ PRINT_ELEMENTS(UINT8_ELEMENTS, FixedUint8Array)
+ PRINT_ELEMENTS(UINT8_CLAMPED_ELEMENTS, FixedUint8ClampedArray)
+ PRINT_ELEMENTS(INT8_ELEMENTS, FixedInt8Array)
+ PRINT_ELEMENTS(UINT16_ELEMENTS, FixedUint16Array)
+ PRINT_ELEMENTS(INT16_ELEMENTS, FixedInt16Array)
+ PRINT_ELEMENTS(UINT32_ELEMENTS, FixedUint32Array)
+ PRINT_ELEMENTS(INT32_ELEMENTS, FixedInt32Array)
+ PRINT_ELEMENTS(FLOAT32_ELEMENTS, FixedFloat32Array)
+ PRINT_ELEMENTS(FLOAT64_ELEMENTS, FixedFloat64Array)
#undef PRINT_ELEMENTS
case DICTIONARY_ELEMENTS:
case SLOW_STRING_WRAPPER_ELEMENTS:
- os << "\n - elements: ";
- elements()->Print(os);
+ SeededNumberDictionary::cast(elements())->Print(os);
break;
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
@@ -396,46 +434,62 @@ static void JSObjectPrintHeader(std::ostream& os, JSObject* obj,
obj->PrintHeader(os, id);
// Don't call GetElementsKind, its validation code can cause the printer to
// fail when debugging.
- os << "\n - map = " << reinterpret_cast<void*>(obj->map()) << " ["
+ os << "\n - map = " << reinterpret_cast<void*>(obj->map()) << " [";
+ if (obj->HasFastProperties()) {
+ os << "FastProperties";
+ } else {
+ os << "DictionaryProperties";
+ }
+ PrototypeIterator iter(obj->GetIsolate(), obj);
+ os << "]\n - prototype = " << reinterpret_cast<void*>(iter.GetCurrent());
+ os << "\n - elements = " << Brief(obj->elements()) << " ["
<< ElementsKindToString(obj->map()->elements_kind());
if (obj->elements()->map() == obj->GetHeap()->fixed_cow_array_map()) {
os << " (COW)";
}
- PrototypeIterator iter(obj->GetIsolate(), obj);
- os << "]\n - prototype = " << reinterpret_cast<void*>(iter.GetCurrent());
- if (obj->elements()->length() > 0) {
- os << "\n - elements = " << Brief(obj->elements());
+ os << "]";
+ if (obj->GetInternalFieldCount() > 0) {
+ os << "\n - internal fields: " << obj->GetInternalFieldCount();
}
}
static void JSObjectPrintBody(std::ostream& os, JSObject* obj, // NOLINT
bool print_elements = true) {
- os << "\n {";
+ os << "\n - properties = {";
obj->PrintProperties(os);
- obj->PrintTransitions(os);
- if (print_elements) obj->PrintElements(os);
os << "\n }\n";
+ if (print_elements && obj->elements()->length() > 0) {
+ os << " - elements = {";
+ obj->PrintElements(os);
+ os << "\n }\n";
+ }
+ int internal_fields = obj->GetInternalFieldCount();
+ if (internal_fields > 0) {
+ os << " - internal fields = {";
+ for (int i = 0; i < internal_fields; i++) {
+ os << "\n " << Brief(obj->GetInternalField(i));
+ }
+ os << "\n }\n";
+ }
}
void JSObject::JSObjectPrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSObject");
+ JSObjectPrintHeader(os, this, nullptr);
JSObjectPrintBody(os, this);
}
-
-void JSRegExp::JSRegExpPrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSRegExp");
- os << "\n - data = " << Brief(data());
+void JSArray::JSArrayPrint(std::ostream& os) { // NOLINT
+ JSObjectPrintHeader(os, this, "JSArray");
+ os << "\n - length = " << Brief(this->length());
JSObjectPrintBody(os, this);
}
-void JSModule::JSModulePrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSModule");
- os << "\n - context = " << Brief(context());
- os << " - scope_info = " << Brief(scope_info());
+void JSRegExp::JSRegExpPrint(std::ostream& os) { // NOLINT
+ JSObjectPrintHeader(os, this, "JSRegExp");
+ os << "\n - data = " << Brief(data());
JSObjectPrintBody(os, this);
}
@@ -444,7 +498,7 @@ void Symbol::SymbolPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "Symbol");
os << "\n - hash: " << Hash();
os << "\n - name: " << Brief(name());
- if (name()->IsUndefined()) {
+ if (name()->IsUndefined(GetIsolate())) {
os << " (" << PrivateSymbolToName() << ")";
}
os << "\n - private: " << is_private();
@@ -471,14 +525,13 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
if (is_stable()) os << "\n - stable_map";
if (is_dictionary_map()) os << "\n - dictionary_map";
if (has_hidden_prototype()) os << "\n - has_hidden_prototype";
- if (has_named_interceptor()) os << " - named_interceptor";
+ if (has_named_interceptor()) os << "\n - named_interceptor";
if (has_indexed_interceptor()) os << "\n - indexed_interceptor";
if (is_undetectable()) os << "\n - undetectable";
if (is_callable()) os << "\n - callable";
if (is_constructor()) os << "\n - constructor";
if (is_access_check_needed()) os << "\n - access_check_needed";
if (!is_extensible()) os << "\n - non-extensible";
- if (is_observed()) os << "\n - observed";
if (is_prototype_map()) {
os << "\n - prototype_map";
os << "\n - prototype info: " << Brief(prototype_info());
@@ -506,20 +559,6 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
}
-void CodeCache::CodeCachePrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "CodeCache");
- os << "\n - default_cache: " << Brief(default_cache());
- os << "\n - normal_type_cache: " << Brief(normal_type_cache());
-}
-
-
-void PolymorphicCodeCache::PolymorphicCodeCachePrint(
- std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "PolymorphicCodeCache");
- os << "\n - cache: " << Brief(cache());
-}
-
-
void TypeFeedbackInfo::TypeFeedbackInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "TypeFeedbackInfo");
os << "\n - ic_total_count: " << ic_total_count()
@@ -572,6 +611,40 @@ void TransitionArray::TransitionArrayPrint(std::ostream& os) { // NOLINT
os << "\n";
}
+template void FeedbackVectorSpecBase<StaticFeedbackVectorSpec>::Print();
+template void FeedbackVectorSpecBase<FeedbackVectorSpec>::Print();
+
+template <typename Derived>
+void FeedbackVectorSpecBase<Derived>::Print() {
+ OFStream os(stdout);
+ FeedbackVectorSpecPrint(os);
+ os << std::flush;
+}
+
+template <typename Derived>
+void FeedbackVectorSpecBase<Derived>::FeedbackVectorSpecPrint(
+ std::ostream& os) { // NOLINT
+ int slot_count = This()->slots();
+ os << " - slot_count: " << slot_count;
+ if (slot_count == 0) {
+ os << " (empty)\n";
+ return;
+ }
+
+ for (int slot = 0, name_index = 0; slot < slot_count;) {
+ FeedbackVectorSlotKind kind = This()->GetKind(slot);
+ int entry_size = TypeFeedbackMetadata::GetSlotSize(kind);
+ DCHECK_LT(0, entry_size);
+
+ os << "\n Slot #" << slot << " " << kind;
+ if (TypeFeedbackMetadata::SlotRequiresName(kind)) {
+ os << ", " << Brief(*This()->GetName(name_index++));
+ }
+
+ slot += entry_size;
+ }
+ os << "\n";
+}
void TypeFeedbackMetadata::Print() {
OFStream os(stdout);
@@ -588,12 +661,16 @@ void TypeFeedbackMetadata::TypeFeedbackMetadataPrint(
os << " (empty)\n";
return;
}
+ os << "\n - slot_count: " << slot_count();
TypeFeedbackMetadataIterator iter(this);
while (iter.HasNext()) {
FeedbackVectorSlot slot = iter.Next();
FeedbackVectorSlotKind kind = iter.kind();
os << "\n Slot " << slot << " " << kind;
+ if (TypeFeedbackMetadata::SlotRequiresName(kind)) {
+ os << ", " << Brief(iter.name());
+ }
}
os << "\n";
}
@@ -619,13 +696,22 @@ void TypeFeedbackVector::TypeFeedbackVectorPrint(std::ostream& os) { // NOLINT
FeedbackVectorSlot slot = iter.Next();
FeedbackVectorSlotKind kind = iter.kind();
- os << "\n Slot " << slot << " " << kind << " ";
+ os << "\n Slot " << slot << " " << kind;
+ if (TypeFeedbackMetadata::SlotRequiresName(kind)) {
+ os << ", " << Brief(iter.name());
+ }
+ os << " ";
switch (kind) {
case FeedbackVectorSlotKind::LOAD_IC: {
LoadICNexus nexus(this, slot);
os << Code::ICState2String(nexus.StateFromFeedback());
break;
}
+ case FeedbackVectorSlotKind::LOAD_GLOBAL_IC: {
+ LoadGlobalICNexus nexus(this, slot);
+ os << Code::ICState2String(nexus.StateFromFeedback());
+ break;
+ }
case FeedbackVectorSlotKind::KEYED_LOAD_IC: {
KeyedLoadICNexus nexus(this, slot);
os << Code::ICState2String(nexus.StateFromFeedback());
@@ -713,8 +799,6 @@ void String::StringPrint(std::ostream& os) { // NOLINT
void Name::NamePrint(std::ostream& os) { // NOLINT
if (IsString()) {
String::cast(this)->StringPrint(os);
- } else if (IsSymbol()) {
- Symbol::cast(this)->name()->Print(os);
} else {
os << Brief(this);
}
@@ -872,6 +956,8 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
<< shared()->internal_formal_parameter_count();
if (shared()->is_generator()) {
os << "\n - generator";
+ } else if (shared()->is_async()) {
+ os << "\n - async";
}
os << "\n - context = " << Brief(context());
os << "\n - literals = " << Brief(literals());
@@ -894,7 +980,7 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
String* source = String::cast(Script::cast(script())->source());
int start = start_position();
int length = end_position() - start;
- base::SmartArrayPointer<char> source_string = source->ToCString(
+ std::unique_ptr<char[]> source_string = source->ToCString(
DISALLOW_NULLS, FAST_STRING_TRAVERSAL, start, length, NULL);
os << source_string.get();
}
@@ -913,9 +999,10 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
os << "\n - end position = " << end_position();
os << "\n - debug info = " << Brief(debug_info());
os << "\n - length = " << length();
+ os << "\n - num_literals = " << num_literals();
os << "\n - optimized_code_map = " << Brief(optimized_code_map());
- os << "\n - feedback_vector = ";
- feedback_vector()->TypeFeedbackVectorPrint(os);
+ os << "\n - feedback_metadata = ";
+ feedback_metadata()->TypeFeedbackMetadataPrint(os);
if (HasBytecodeArray()) {
os << "\n - bytecode_array = " << bytecode_array();
}
@@ -950,6 +1037,46 @@ void PropertyCell::PropertyCellPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "PropertyCell");
os << "\n - value: " << Brief(value());
os << "\n - details: " << property_details();
+ PropertyCellType cell_type = property_details().cell_type();
+ os << "\n - cell_type: ";
+ if (value()->IsTheHole(GetIsolate())) {
+ switch (cell_type) {
+ case PropertyCellType::kUninitialized:
+ os << "Uninitialized";
+ break;
+ case PropertyCellType::kInvalidated:
+ os << "Invalidated";
+ break;
+ default:
+ os << "??? " << static_cast<int>(cell_type);
+ break;
+ }
+ } else {
+ switch (cell_type) {
+ case PropertyCellType::kUndefined:
+ os << "Undefined";
+ break;
+ case PropertyCellType::kConstant:
+ os << "Constant";
+ break;
+ case PropertyCellType::kConstantType:
+ os << "ConstantType"
+ << " (";
+ switch (GetConstantType()) {
+ case PropertyCellConstantType::kSmi:
+ os << "Smi";
+ break;
+ case PropertyCellConstantType::kStableMap:
+ os << "StableMap";
+ break;
+ }
+ os << ")";
+ break;
+ case PropertyCellType::kMutable:
+ os << "Mutable";
+ break;
+ }
+ }
os << "\n";
}
@@ -988,6 +1115,7 @@ void AccessorInfo::AccessorInfoPrint(std::ostream& os) { // NOLINT
os << "\n - flag: " << flag();
os << "\n - getter: " << Brief(getter());
os << "\n - setter: " << Brief(setter());
+ os << "\n - js_getter: " << Brief(js_getter());
os << "\n - data: " << Brief(data());
os << "\n";
}
@@ -1028,9 +1156,9 @@ void AccessorPair::AccessorPairPrint(std::ostream& os) { // NOLINT
void AccessCheckInfo::AccessCheckInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "AccessCheckInfo");
- os << "\n - named_callback: " << Brief(named_callback());
- os << "\n - indexed_callback: " << Brief(indexed_callback());
os << "\n - callback: " << Brief(callback());
+ os << "\n - named_interceptor: " << Brief(named_interceptor());
+ os << "\n - indexed_interceptor: " << Brief(indexed_interceptor());
os << "\n - data: " << Brief(data());
os << "\n";
}
@@ -1087,7 +1215,8 @@ void ObjectTemplateInfo::ObjectTemplateInfoPrint(std::ostream& os) { // NOLINT
os << "\n - property_list: " << Brief(property_list());
os << "\n - property_accessors: " << Brief(property_accessors());
os << "\n - constructor: " << Brief(constructor());
- os << "\n - internal_field_count: " << Brief(internal_field_count());
+ os << "\n - internal_field_count: " << internal_field_count();
+ os << "\n - immutable_proto: " << (immutable_proto() ? "true" : "false");
os << "\n";
}
@@ -1140,8 +1269,7 @@ void Script::ScriptPrint(std::ostream& os) { // NOLINT
os << "\n - compilation type: " << compilation_type();
os << "\n - line ends: " << Brief(line_ends());
os << "\n - eval from shared: " << Brief(eval_from_shared());
- os << "\n - eval from instructions offset: "
- << eval_from_instructions_offset();
+ os << "\n - eval from position: " << eval_from_position();
os << "\n - shared function infos: " << Brief(shared_function_infos());
os << "\n";
}
@@ -1150,7 +1278,7 @@ void Script::ScriptPrint(std::ostream& os) { // NOLINT
void DebugInfo::DebugInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "DebugInfo");
os << "\n - shared: " << Brief(shared());
- os << "\n - code: " << Brief(abstract_code());
+ os << "\n - debug bytecode array: " << Brief(debug_bytecode_array());
os << "\n - break_points: ";
break_points()->Print(os);
}
@@ -1158,9 +1286,7 @@ void DebugInfo::DebugInfoPrint(std::ostream& os) { // NOLINT
void BreakPointInfo::BreakPointInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "BreakPointInfo");
- os << "\n - code_offset: " << code_offset();
os << "\n - source_position: " << source_position();
- os << "\n - statement_position: " << statement_position();
os << "\n - break_point_objects: " << Brief(break_point_objects());
os << "\n";
}
@@ -1184,7 +1310,7 @@ void LayoutDescriptor::Print() {
void LayoutDescriptor::Print(std::ostream& os) { // NOLINT
os << "Layout descriptor: ";
- if (IsUninitialized()) {
+ if (IsOddball() && IsUninitialized(HeapObject::cast(this)->GetIsolate())) {
os << "<uninitialized>";
} else if (IsFastPointerLayout()) {
os << "<all tagged>";
@@ -1215,7 +1341,7 @@ void Name::NameShortPrint() {
} else {
DCHECK(this->IsSymbol());
Symbol* s = Symbol::cast(this);
- if (s->name()->IsUndefined()) {
+ if (s->name()->IsUndefined(GetIsolate())) {
PrintF("#<%s>", s->PrivateSymbolToName());
} else {
PrintF("<%s>", String::cast(s->name())->ToCString().get());
@@ -1230,7 +1356,7 @@ int Name::NameShortPrint(Vector<char> str) {
} else {
DCHECK(this->IsSymbol());
Symbol* s = Symbol::cast(this);
- if (s->name()->IsUndefined()) {
+ if (s->name()->IsUndefined(GetIsolate())) {
return SNPrintF(str, "#<%s>", s->PrivateSymbolToName());
} else {
return SNPrintF(str, "<%s>", String::cast(s->name())->ToCString().get());
@@ -1293,7 +1419,7 @@ void TransitionArray::PrintTransitions(std::ostream& os, Object* transitions,
for (int i = 0; i < num_transitions; i++) {
Name* key = GetKey(transitions, i);
Map* target = GetTarget(transitions, i);
- os << "\n ";
+ os << "\n ";
#ifdef OBJECT_PRINT
key->NamePrint(os);
#else
@@ -1312,8 +1438,6 @@ void TransitionArray::PrintTransitions(std::ostream& os, Object* transitions,
<< ")";
} else if (key == heap->strict_function_transition_symbol()) {
os << " (transition to strict function)";
- } else if (key == heap->observed_symbol()) {
- os << " (transition to Object.observe)";
} else {
PropertyDetails details = GetTargetDetails(key, target);
os << "(transition to ";
@@ -1343,3 +1467,44 @@ void JSObject::PrintTransitions(std::ostream& os) { // NOLINT
#endif // defined(DEBUG) || defined(OBJECT_PRINT)
} // namespace internal
} // namespace v8
+
+//
+// The following functions are used by our gdb macros.
+//
+extern void _v8_internal_Print_Object(void* object) {
+ reinterpret_cast<i::Object*>(object)->Print();
+}
+
+extern void _v8_internal_Print_Code(void* object) {
+ i::Isolate* isolate = i::Isolate::Current();
+ isolate->FindCodeObject(reinterpret_cast<i::Address>(object))->Print();
+}
+
+extern void _v8_internal_Print_TypeFeedbackVector(void* object) {
+ if (reinterpret_cast<i::Object*>(object)->IsSmi()) {
+ printf("Not a type feedback vector\n");
+ } else {
+ reinterpret_cast<i::TypeFeedbackVector*>(object)->Print();
+ }
+}
+
+extern void _v8_internal_Print_DescriptorArray(void* object) {
+ if (reinterpret_cast<i::Object*>(object)->IsSmi()) {
+ printf("Not a descriptor array\n");
+ } else {
+ reinterpret_cast<i::DescriptorArray*>(object)->Print();
+ }
+}
+
+extern void _v8_internal_Print_TransitionArray(void* object) {
+ if (reinterpret_cast<i::Object*>(object)->IsSmi()) {
+ printf("Not a transition array\n");
+ } else {
+ reinterpret_cast<i::TransitionArray*>(object)->Print();
+ }
+}
+
+extern void _v8_internal_Print_StackTrace() {
+ i::Isolate* isolate = i::Isolate::Current();
+ isolate->PrintStack(stdout);
+}
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 51993f3f32..00721c2d1b 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -6,15 +6,16 @@
#include <cmath>
#include <iomanip>
+#include <memory>
#include <sstream>
#include "src/objects-inl.h"
#include "src/accessors.h"
#include "src/allocation-site-scopes.h"
-#include "src/api.h"
-#include "src/api-arguments.h"
+#include "src/api-arguments-inl.h"
#include "src/api-natives.h"
+#include "src/api.h"
#include "src/base/bits.h"
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
@@ -22,6 +23,8 @@
#include "src/codegen.h"
#include "src/compilation-dependencies.h"
#include "src/compiler.h"
+#include "src/counters-inl.h"
+#include "src/counters.h"
#include "src/date.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
@@ -30,12 +33,14 @@
#include "src/field-index-inl.h"
#include "src/field-index.h"
#include "src/field-type.h"
+#include "src/frames-inl.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/globals.h"
#include "src/ic/ic.h"
#include "src/identity-map.h"
#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecode-decoder.h"
#include "src/interpreter/interpreter.h"
-#include "src/interpreter/source-position-table.h"
#include "src/isolate-inl.h"
#include "src/keys.h"
#include "src/list.h"
@@ -44,20 +49,23 @@
#include "src/macro-assembler.h"
#include "src/messages.h"
#include "src/objects-body-descriptors-inl.h"
-#include "src/profiler/cpu-profiler.h"
#include "src/property-descriptor.h"
#include "src/prototype.h"
#include "src/regexp/jsregexp.h"
#include "src/safepoint-table.h"
+#include "src/snapshot/code-serializer.h"
+#include "src/source-position-table.h"
#include "src/string-builder.h"
#include "src/string-search.h"
#include "src/string-stream.h"
#include "src/utils.h"
+#include "src/wasm/wasm-module.h"
#include "src/zone.h"
#ifdef ENABLE_DISASSEMBLER
#include "src/disasm.h"
#include "src/disassembler.h"
+#include "src/eh-frame.h"
#endif
namespace v8 {
@@ -121,8 +129,8 @@ MaybeHandle<JSReceiver> Object::ConvertReceiver(Isolate* isolate,
Handle<Object> object) {
if (object->IsJSReceiver()) return Handle<JSReceiver>::cast(object);
if (*object == isolate->heap()->null_value() ||
- *object == isolate->heap()->undefined_value()) {
- return handle(isolate->global_proxy(), isolate);
+ object->IsUndefined(isolate)) {
+ return isolate->global_proxy();
}
return Object::ToObject(isolate, object);
}
@@ -213,6 +221,155 @@ MaybeHandle<String> Object::ToString(Isolate* isolate, Handle<Object> input) {
}
}
+namespace {
+
+bool IsErrorObject(Isolate* isolate, Handle<Object> object) {
+ if (!object->IsJSReceiver()) return false;
+ Handle<Symbol> symbol = isolate->factory()->stack_trace_symbol();
+ return JSReceiver::HasOwnProperty(Handle<JSReceiver>::cast(object), symbol)
+ .FromMaybe(false);
+}
+
+Handle<String> AsStringOrEmpty(Isolate* isolate, Handle<Object> object) {
+ return object->IsString() ? Handle<String>::cast(object)
+ : isolate->factory()->empty_string();
+}
+
+Handle<String> NoSideEffectsErrorToString(Isolate* isolate,
+ Handle<Object> input) {
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(input);
+
+ Handle<Name> name_key = isolate->factory()->name_string();
+ Handle<Object> name = JSReceiver::GetDataProperty(receiver, name_key);
+ Handle<String> name_str = AsStringOrEmpty(isolate, name);
+
+ Handle<Name> msg_key = isolate->factory()->message_string();
+ Handle<Object> msg = JSReceiver::GetDataProperty(receiver, msg_key);
+ Handle<String> msg_str = AsStringOrEmpty(isolate, msg);
+
+ if (name_str->length() == 0) return msg_str;
+ if (msg_str->length() == 0) return name_str;
+
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendString(name_str);
+ builder.AppendCString(": ");
+ builder.AppendString(msg_str);
+
+ return builder.Finish().ToHandleChecked();
+}
+
+} // namespace
+
+// static
+Handle<String> Object::NoSideEffectsToString(Isolate* isolate,
+ Handle<Object> input) {
+ DisallowJavascriptExecution no_js(isolate);
+
+ if (input->IsString() || input->IsNumber() || input->IsOddball() ||
+ input->IsSimd128Value()) {
+ return Object::ToString(isolate, input).ToHandleChecked();
+ } else if (input->IsFunction()) {
+ // -- F u n c t i o n
+ Handle<String> fun_str;
+ if (input->IsJSBoundFunction()) {
+ fun_str = JSBoundFunction::ToString(Handle<JSBoundFunction>::cast(input));
+ } else {
+ DCHECK(input->IsJSFunction());
+ fun_str = JSFunction::ToString(Handle<JSFunction>::cast(input));
+ }
+
+ if (fun_str->length() > 128) {
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendString(isolate->factory()->NewSubString(fun_str, 0, 111));
+ builder.AppendCString("...<omitted>...");
+ builder.AppendString(isolate->factory()->NewSubString(
+ fun_str, fun_str->length() - 2, fun_str->length()));
+
+ return builder.Finish().ToHandleChecked();
+ }
+ return fun_str;
+ } else if (input->IsSymbol()) {
+ // -- S y m b o l
+ Handle<Symbol> symbol = Handle<Symbol>::cast(input);
+
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendCString("Symbol(");
+ if (symbol->name()->IsString()) {
+ builder.AppendString(handle(String::cast(symbol->name()), isolate));
+ }
+ builder.AppendCharacter(')');
+
+ return builder.Finish().ToHandleChecked();
+ } else if (input->IsJSReceiver()) {
+ // -- J S R e c e i v e r
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(input);
+ Handle<Object> to_string = JSReceiver::GetDataProperty(
+ receiver, isolate->factory()->toString_string());
+
+ if (IsErrorObject(isolate, input) ||
+ *to_string == *isolate->error_to_string()) {
+ // When internally formatting error objects, use a side-effects-free
+ // version of Error.prototype.toString independent of the actually
+ // installed toString method.
+ return NoSideEffectsErrorToString(isolate, input);
+ } else if (*to_string == *isolate->object_to_string()) {
+ Handle<Object> ctor = JSReceiver::GetDataProperty(
+ receiver, isolate->factory()->constructor_string());
+ if (ctor->IsFunction()) {
+ Handle<String> ctor_name;
+ if (ctor->IsJSBoundFunction()) {
+ ctor_name = JSBoundFunction::GetName(
+ isolate, Handle<JSBoundFunction>::cast(ctor))
+ .ToHandleChecked();
+ } else if (ctor->IsJSFunction()) {
+ Handle<Object> ctor_name_obj =
+ JSFunction::GetName(isolate, Handle<JSFunction>::cast(ctor));
+ ctor_name = AsStringOrEmpty(isolate, ctor_name_obj);
+ }
+
+ if (ctor_name->length() != 0) {
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendCString("#<");
+ builder.AppendString(ctor_name);
+ builder.AppendCString(">");
+
+ return builder.Finish().ToHandleChecked();
+ }
+ }
+ }
+ }
+
+ // At this point, input is either none of the above or a JSReceiver.
+
+ Handle<JSReceiver> receiver;
+ if (input->IsJSReceiver()) {
+ receiver = Handle<JSReceiver>::cast(input);
+ } else {
+ // This is the only case where Object::ToObject throws.
+ DCHECK(!input->IsSmi());
+ int constructor_function_index =
+ Handle<HeapObject>::cast(input)->map()->GetConstructorFunctionIndex();
+ if (constructor_function_index == Map::kNoConstructorFunctionIndex) {
+ return isolate->factory()->NewStringFromAsciiChecked("[object Unknown]");
+ }
+
+ receiver = Object::ToObject(isolate, input, isolate->native_context())
+ .ToHandleChecked();
+ }
+
+ Handle<String> builtin_tag = handle(receiver->class_name(), isolate);
+ Handle<Object> tag_obj = JSReceiver::GetDataProperty(
+ receiver, isolate->factory()->to_string_tag_symbol());
+ Handle<String> tag =
+ tag_obj->IsString() ? Handle<String>::cast(tag_obj) : builtin_tag;
+
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendCString("[object ");
+ builder.AppendString(tag);
+ builder.AppendCString("]");
+
+ return builder.Finish().ToHandleChecked();
+}
// static
MaybeHandle<Object> Object::ToLength(Isolate* isolate, Handle<Object> input) {
@@ -226,11 +383,25 @@ MaybeHandle<Object> Object::ToLength(Isolate* isolate, Handle<Object> input) {
return isolate->factory()->NewNumber(len);
}
+// static
+MaybeHandle<Object> Object::ToIndex(Isolate* isolate, Handle<Object> input,
+ MessageTemplate::Template error_index) {
+ if (input->IsUndefined(isolate)) return isolate->factory()->NewNumber(0.0);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ToNumber(input), Object);
+ double len = DoubleToInteger(input->Number()) + 0.0;
+ auto js_len = isolate->factory()->NewNumber(len);
+ if (len < 0.0 || len > kMaxSafeInteger) {
+ THROW_NEW_ERROR(isolate, NewRangeError(error_index, js_len), Object);
+ }
+ return js_len;
+}
bool Object::BooleanValue() {
- if (IsBoolean()) return IsTrue();
if (IsSmi()) return Smi::cast(this)->value() != 0;
- if (IsUndefined() || IsNull()) return false;
+ DCHECK(IsHeapObject());
+ Isolate* isolate = HeapObject::cast(this)->GetIsolate();
+ if (IsBoolean()) return IsTrue(isolate);
+ if (IsUndefined(isolate) || IsNull(isolate)) return false;
if (IsUndetectable()) return false; // Undetectable object is false.
if (IsString()) return String::cast(this)->length() != 0;
if (IsHeapNumber()) return HeapNumber::cast(this)->HeapNumberBooleanValue();
@@ -568,6 +739,86 @@ MaybeHandle<Object> Object::BitwiseXor(Isolate* isolate, Handle<Object> lhs,
NumberToInt32(*rhs));
}
+// static
+MaybeHandle<Object> Object::OrdinaryHasInstance(Isolate* isolate,
+ Handle<Object> callable,
+ Handle<Object> object) {
+ // The {callable} must have a [[Call]] internal method.
+ if (!callable->IsCallable()) return isolate->factory()->false_value();
+
+ // Check if {callable} is a bound function, and if so retrieve its
+ // [[BoundTargetFunction]] and use that instead of {callable}.
+ if (callable->IsJSBoundFunction()) {
+ Handle<Object> bound_callable(
+ Handle<JSBoundFunction>::cast(callable)->bound_target_function(),
+ isolate);
+ return Object::InstanceOf(isolate, object, bound_callable);
+ }
+
+ // If {object} is not a receiver, return false.
+ if (!object->IsJSReceiver()) return isolate->factory()->false_value();
+
+ // Get the "prototype" of {callable}; raise an error if it's not a receiver.
+ Handle<Object> prototype;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, prototype,
+ Object::GetProperty(callable, isolate->factory()->prototype_string()),
+ Object);
+ if (!prototype->IsJSReceiver()) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kInstanceofNonobjectProto, prototype),
+ Object);
+ }
+
+ // Return whether or not {prototype} is in the prototype chain of {object}.
+ Maybe<bool> result = JSReceiver::HasInPrototypeChain(
+ isolate, Handle<JSReceiver>::cast(object), prototype);
+ if (result.IsNothing()) return MaybeHandle<Object>();
+ return isolate->factory()->ToBoolean(result.FromJust());
+}
+
+// static
+MaybeHandle<Object> Object::InstanceOf(Isolate* isolate, Handle<Object> object,
+ Handle<Object> callable) {
+ // The {callable} must be a receiver.
+ if (!callable->IsJSReceiver()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kNonObjectInInstanceOfCheck),
+ Object);
+ }
+
+ // Lookup the @@hasInstance method on {callable}.
+ Handle<Object> inst_of_handler;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, inst_of_handler,
+ JSReceiver::GetMethod(Handle<JSReceiver>::cast(callable),
+ isolate->factory()->has_instance_symbol()),
+ Object);
+ if (!inst_of_handler->IsUndefined(isolate)) {
+ // Call the {inst_of_handler} on the {callable}.
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ Execution::Call(isolate, inst_of_handler, callable, 1, &object),
+ Object);
+ return isolate->factory()->ToBoolean(result->BooleanValue());
+ }
+
+ // The {callable} must have a [[Call]] internal method.
+ if (!callable->IsCallable()) {
+ THROW_NEW_ERROR(
+ isolate, NewTypeError(MessageTemplate::kNonCallableInInstanceOfCheck),
+ Object);
+ }
+
+ // Fall back to OrdinaryHasInstance with {callable} and {object}.
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ JSReceiver::OrdinaryHasInstance(isolate, callable, object), Object);
+ return result;
+}
Maybe<bool> Object::IsArray(Handle<Object> object) {
if (object->IsJSArray()) return Just(true);
@@ -586,20 +837,6 @@ Maybe<bool> Object::IsArray(Handle<Object> object) {
}
-bool Object::IsPromise(Handle<Object> object) {
- if (!object->IsJSObject()) return false;
- auto js_object = Handle<JSObject>::cast(object);
- // Promises can't have access checks.
- if (js_object->map()->is_access_check_needed()) return false;
- auto isolate = js_object->GetIsolate();
- // TODO(dcarney): this should just be read from the symbol registry so as not
- // to be context dependent.
- auto key = isolate->factory()->promise_status_symbol();
- // Shouldn't be possible to throw here.
- return JSObject::HasRealNamedProperty(js_object, key).FromJust();
-}
-
-
// static
MaybeHandle<Object> Object::GetMethod(Handle<JSReceiver> receiver,
Handle<Name> name) {
@@ -607,7 +844,7 @@ MaybeHandle<Object> Object::GetMethod(Handle<JSReceiver> receiver,
Isolate* isolate = receiver->GetIsolate();
ASSIGN_RETURN_ON_EXCEPTION(isolate, func,
JSReceiver::GetProperty(receiver, name), Object);
- if (func->IsNull() || func->IsUndefined()) {
+ if (func->IsNull(isolate) || func->IsUndefined(isolate)) {
return isolate->factory()->undefined_value();
}
if (!func->IsCallable()) {
@@ -634,14 +871,9 @@ MaybeHandle<FixedArray> Object::CreateListFromArrayLike(
}
// 4. Let len be ? ToLength(? Get(obj, "length")).
Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
- Handle<Object> raw_length_obj;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, raw_length_obj,
- JSReceiver::GetProperty(receiver, isolate->factory()->length_string()),
- FixedArray);
Handle<Object> raw_length_number;
ASSIGN_RETURN_ON_EXCEPTION(isolate, raw_length_number,
- Object::ToLength(isolate, raw_length_obj),
+ Object::GetLengthFromArrayLike(isolate, receiver),
FixedArray);
uint32_t len;
if (!raw_length_number->ToUint32(&len) ||
@@ -688,6 +920,16 @@ MaybeHandle<FixedArray> Object::CreateListFromArrayLike(
// static
+MaybeHandle<Object> Object::GetLengthFromArrayLike(Isolate* isolate,
+ Handle<Object> object) {
+ Handle<Object> val;
+ Handle<Object> key = isolate->factory()->length_string();
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, val, Runtime::GetObjectProperty(isolate, object, key), Object);
+ return Object::ToLength(isolate, val);
+}
+
+// static
Maybe<bool> JSReceiver::HasProperty(LookupIterator* it) {
for (; it->IsFound(); it->Next()) {
switch (it->state()) {
@@ -762,17 +1004,6 @@ MaybeHandle<Object> Object::GetProperty(LookupIterator* it) {
}
-#define STACK_CHECK(result_value) \
- do { \
- StackLimitCheck stack_check(isolate); \
- if (stack_check.HasOverflowed()) { \
- isolate->Throw(*isolate->factory()->NewRangeError( \
- MessageTemplate::kStackOverflow)); \
- return result_value; \
- } \
- } while (false)
-
-
// static
MaybeHandle<Object> JSProxy::GetProperty(Isolate* isolate,
Handle<JSProxy> proxy,
@@ -788,7 +1019,7 @@ MaybeHandle<Object> JSProxy::GetProperty(Isolate* isolate,
}
DCHECK(!name->IsPrivate());
- STACK_CHECK(MaybeHandle<Object>());
+ STACK_CHECK(isolate, MaybeHandle<Object>());
Handle<Name> trap_name = isolate->factory()->get_string();
// 1. Assert: IsPropertyKey(P) is true.
// 2. Let handler be the value of the [[ProxyHandler]] internal slot of O.
@@ -808,7 +1039,7 @@ MaybeHandle<Object> JSProxy::GetProperty(Isolate* isolate,
isolate, trap,
Object::GetMethod(Handle<JSReceiver>::cast(handler), trap_name), Object);
// 7. If trap is undefined, then
- if (trap->IsUndefined()) {
+ if (trap->IsUndefined(isolate)) {
// 7.a Return target.[[Get]](P, Receiver).
LookupIterator it =
LookupIterator::PropertyOrElement(isolate, receiver, name, target);
@@ -848,8 +1079,8 @@ MaybeHandle<Object> JSProxy::GetProperty(Isolate* isolate,
// 10.b.i. If trapResult is not undefined, throw a TypeError exception.
inconsistent = PropertyDescriptor::IsAccessorDescriptor(&target_desc) &&
!target_desc.configurable() &&
- target_desc.get()->IsUndefined() &&
- !trap_result->IsUndefined();
+ target_desc.get()->IsUndefined(isolate) &&
+ !trap_result->IsUndefined(isolate);
if (inconsistent) {
THROW_NEW_ERROR(
isolate,
@@ -908,12 +1139,40 @@ bool Object::ToInt32(int32_t* value) {
return false;
}
+Handle<SharedFunctionInfo> FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(
+ Isolate* isolate, Handle<FunctionTemplateInfo> info) {
+ Object* current_info = info->shared_function_info();
+ if (current_info->IsSharedFunctionInfo()) {
+ return handle(SharedFunctionInfo::cast(current_info), isolate);
+ }
-bool FunctionTemplateInfo::IsTemplateFor(Object* object) {
- if (!object->IsHeapObject()) return false;
- return IsTemplateFor(HeapObject::cast(object)->map());
-}
+ Handle<Object> class_name(info->class_name(), isolate);
+ Handle<String> name = class_name->IsString()
+ ? Handle<String>::cast(class_name)
+ : isolate->factory()->empty_string();
+ Handle<Code> code;
+ if (info->call_code()->IsCallHandlerInfo() &&
+ CallHandlerInfo::cast(info->call_code())->fast_handler()->IsCode()) {
+ code = isolate->builtins()->HandleFastApiCall();
+ } else {
+ code = isolate->builtins()->HandleApiCall();
+ }
+ bool is_constructor = !info->remove_prototype();
+ Handle<SharedFunctionInfo> result =
+ isolate->factory()->NewSharedFunctionInfo(name, code, is_constructor);
+ if (is_constructor) {
+ result->SetConstructStub(*isolate->builtins()->JSConstructStubApi());
+ }
+
+ result->set_length(info->length());
+ if (class_name->IsString()) result->set_instance_class_name(*class_name);
+ result->set_api_func_data(*info);
+ result->DontAdaptArguments();
+ DCHECK(result->IsApiFunction());
+ info->set_shared_function_info(*result);
+ return result;
+}
bool FunctionTemplateInfo::IsTemplateFor(Map* map) {
// There is a constraint on the object; check.
@@ -934,25 +1193,25 @@ bool FunctionTemplateInfo::IsTemplateFor(Map* map) {
}
-// TODO(dcarney): CallOptimization duplicates this logic, merge.
-Object* FunctionTemplateInfo::GetCompatibleReceiver(Isolate* isolate,
- Object* receiver) {
- // API calls are only supported with JSObject receivers.
- if (!receiver->IsJSObject()) return isolate->heap()->null_value();
- Object* recv_type = this->signature();
- // No signature, return holder.
- if (recv_type->IsUndefined()) return receiver;
- FunctionTemplateInfo* signature = FunctionTemplateInfo::cast(recv_type);
- // Check the receiver.
- for (PrototypeIterator iter(isolate, JSObject::cast(receiver),
- PrototypeIterator::START_AT_RECEIVER,
- PrototypeIterator::END_AT_NON_HIDDEN);
- !iter.IsAtEnd(); iter.Advance()) {
- if (signature->IsTemplateFor(iter.GetCurrent())) return iter.GetCurrent();
- }
- return isolate->heap()->null_value();
+// static
+Handle<TemplateList> TemplateList::New(Isolate* isolate, int size) {
+ Handle<FixedArray> list =
+ isolate->factory()->NewFixedArray(kLengthIndex + size);
+ list->set(kLengthIndex, Smi::FromInt(0));
+ return Handle<TemplateList>::cast(list);
}
+// static
+Handle<TemplateList> TemplateList::Add(Isolate* isolate,
+ Handle<TemplateList> list,
+ Handle<i::Object> value) {
+ STATIC_ASSERT(kFirstElementIndex == 1);
+ int index = list->length() + 1;
+ Handle<i::FixedArray> fixed_array = Handle<FixedArray>::cast(list);
+ fixed_array = FixedArray::SetAndGrow(fixed_array, index, value);
+ fixed_array->set(kLengthIndex, Smi::FromInt(index));
+ return Handle<TemplateList>::cast(fixed_array);
+}
// static
MaybeHandle<JSObject> JSObject::New(Handle<JSFunction> constructor,
@@ -1001,7 +1260,7 @@ MaybeHandle<Object> JSProxy::GetPrototype(Handle<JSProxy> proxy) {
Isolate* isolate = proxy->GetIsolate();
Handle<String> trap_name = isolate->factory()->getPrototypeOf_string();
- STACK_CHECK(MaybeHandle<Object>());
+ STACK_CHECK(isolate, MaybeHandle<Object>());
// 1. Let handler be the value of the [[ProxyHandler]] internal slot.
// 2. If handler is null, throw a TypeError exception.
@@ -1020,7 +1279,7 @@ MaybeHandle<Object> JSProxy::GetPrototype(Handle<JSProxy> proxy) {
ASSIGN_RETURN_ON_EXCEPTION(isolate, trap, GetMethod(handler, trap_name),
Object);
// 6. If trap is undefined, then return target.[[GetPrototypeOf]]().
- if (trap->IsUndefined()) {
+ if (trap->IsUndefined(isolate)) {
return JSReceiver::GetPrototype(isolate, target);
}
// 7. Let handlerProto be ? Call(trap, handler, «target»).
@@ -1030,7 +1289,7 @@ MaybeHandle<Object> JSProxy::GetPrototype(Handle<JSProxy> proxy) {
isolate, handler_proto,
Execution::Call(isolate, trap, handler, arraysize(argv), argv), Object);
// 8. If Type(handlerProto) is neither Object nor Null, throw a TypeError.
- if (!(handler_proto->IsJSReceiver() || handler_proto->IsNull())) {
+ if (!(handler_proto->IsJSReceiver() || handler_proto->IsNull(isolate))) {
THROW_NEW_ERROR(isolate,
NewTypeError(MessageTemplate::kProxyGetPrototypeOfInvalid),
Object);
@@ -1098,16 +1357,9 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(LookupIterator* it) {
// Regular accessor.
Handle<Object> getter(AccessorPair::cast(*structure)->getter(), isolate);
if (getter->IsFunctionTemplateInfo()) {
- auto result = Builtins::InvokeApiFunction(
- Handle<FunctionTemplateInfo>::cast(getter), receiver, 0, nullptr);
- if (isolate->has_pending_exception()) {
- return MaybeHandle<Object>();
- }
- Handle<Object> return_value;
- if (result.ToHandle(&return_value)) {
- return_value->VerifyApiCallResultType();
- return handle(*return_value, isolate);
- }
+ return Builtins::InvokeApiFunction(
+ isolate, false, Handle<FunctionTemplateInfo>::cast(getter), receiver, 0,
+ nullptr, isolate->factory()->undefined_value());
} else if (getter->IsCallable()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
return Object::GetPropertyWithDefinedGetter(
@@ -1117,6 +1369,20 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(LookupIterator* it) {
return ReadAbsentProperty(isolate, receiver, it->GetName());
}
+// static
+Address AccessorInfo::redirect(Isolate* isolate, Address address,
+ AccessorComponent component) {
+ ApiFunction fun(address);
+ DCHECK_EQ(ACCESSOR_GETTER, component);
+ ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
+ return ExternalReference(&fun, type, isolate).address();
+}
+
+Address AccessorInfo::redirected_getter() const {
+ Address accessor = v8::ToCData<Address>(getter());
+ if (accessor == nullptr) return nullptr;
+ return redirect(GetIsolate(), accessor, ACCESSOR_GETTER);
+}
bool AccessorInfo::IsCompatibleReceiverMap(Isolate* isolate,
Handle<AccessorInfo> info,
@@ -1173,12 +1439,12 @@ Maybe<bool> Object::SetPropertyWithAccessor(LookupIterator* it,
Handle<Object> setter(AccessorPair::cast(*structure)->setter(), isolate);
if (setter->IsFunctionTemplateInfo()) {
Handle<Object> argv[] = {value};
- auto result =
- Builtins::InvokeApiFunction(Handle<FunctionTemplateInfo>::cast(setter),
- receiver, arraysize(argv), argv);
- if (isolate->has_pending_exception()) {
- return Nothing<bool>();
- }
+ RETURN_ON_EXCEPTION_VALUE(
+ isolate, Builtins::InvokeApiFunction(
+ isolate, false, Handle<FunctionTemplateInfo>::cast(setter),
+ receiver, arraysize(argv), argv,
+ isolate->factory()->undefined_value()),
+ Nothing<bool>());
return Just(true);
} else if (setter->IsCallable()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
@@ -1230,18 +1496,6 @@ Maybe<bool> Object::SetPropertyWithDefinedSetter(Handle<Object> receiver,
// static
-bool Object::IsErrorObject(Isolate* isolate, Handle<Object> object) {
- if (!object->IsJSObject()) return false;
- // Use stack_trace_symbol as proxy for [[ErrorData]].
- Handle<Name> symbol = isolate->factory()->stack_trace_symbol();
- Maybe<bool> has_stack_trace =
- JSReceiver::HasOwnProperty(Handle<JSReceiver>::cast(object), symbol);
- DCHECK(!has_stack_trace.IsNothing());
- return has_stack_trace.FromJust();
-}
-
-
-// static
bool JSObject::AllCanRead(LookupIterator* it) {
// Skip current iteration, it's in state ACCESS_CHECK or INTERCEPTOR, both of
// which have already been checked.
@@ -1263,19 +1517,191 @@ bool JSObject::AllCanRead(LookupIterator* it) {
return false;
}
+namespace {
+
+MaybeHandle<Object> GetPropertyWithInterceptorInternal(
+ LookupIterator* it, Handle<InterceptorInfo> interceptor, bool* done) {
+ *done = false;
+ Isolate* isolate = it->isolate();
+ // Make sure that the top context does not change when doing callbacks or
+ // interceptor calls.
+ AssertNoContextChange ncc(isolate);
+
+ if (interceptor->getter()->IsUndefined(isolate)) {
+ return isolate->factory()->undefined_value();
+ }
+
+ Handle<JSObject> holder = it->GetHolder<JSObject>();
+ Handle<Object> result;
+ Handle<Object> receiver = it->GetReceiver();
+ if (!receiver->IsJSReceiver()) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, receiver, Object::ConvertReceiver(isolate, receiver), Object);
+ }
+ PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+ *holder, Object::DONT_THROW);
+
+ if (it->IsElement()) {
+ uint32_t index = it->index();
+ v8::IndexedPropertyGetterCallback getter =
+ v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
+ result = args.Call(getter, index);
+ } else {
+ Handle<Name> name = it->name();
+ DCHECK(!name->IsPrivate());
+
+ if (name->IsSymbol() && !interceptor->can_intercept_symbols()) {
+ return isolate->factory()->undefined_value();
+ }
+
+ v8::GenericNamedPropertyGetterCallback getter =
+ v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
+ interceptor->getter());
+ result = args.Call(getter, name);
+ }
+
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ if (result.is_null()) return isolate->factory()->undefined_value();
+ *done = true;
+ // Rebox handle before return
+ return handle(*result, isolate);
+}
+
+Maybe<PropertyAttributes> GetPropertyAttributesWithInterceptorInternal(
+ LookupIterator* it, Handle<InterceptorInfo> interceptor) {
+ Isolate* isolate = it->isolate();
+ // Make sure that the top context does not change when doing
+ // callbacks or interceptor calls.
+ AssertNoContextChange ncc(isolate);
+ HandleScope scope(isolate);
+
+ Handle<JSObject> holder = it->GetHolder<JSObject>();
+ if (!it->IsElement() && it->name()->IsSymbol() &&
+ !interceptor->can_intercept_symbols()) {
+ return Just(ABSENT);
+ }
+ Handle<Object> receiver = it->GetReceiver();
+ if (!receiver->IsJSReceiver()) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, receiver,
+ Object::ConvertReceiver(isolate, receiver),
+ Nothing<PropertyAttributes>());
+ }
+ PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+ *holder, Object::DONT_THROW);
+ if (!interceptor->query()->IsUndefined(isolate)) {
+ Handle<Object> result;
+ if (it->IsElement()) {
+ uint32_t index = it->index();
+ v8::IndexedPropertyQueryCallback query =
+ v8::ToCData<v8::IndexedPropertyQueryCallback>(interceptor->query());
+ result = args.Call(query, index);
+ } else {
+ Handle<Name> name = it->name();
+ DCHECK(!name->IsPrivate());
+ v8::GenericNamedPropertyQueryCallback query =
+ v8::ToCData<v8::GenericNamedPropertyQueryCallback>(
+ interceptor->query());
+ result = args.Call(query, name);
+ }
+ if (!result.is_null()) {
+ int32_t value;
+ CHECK(result->ToInt32(&value));
+ return Just(static_cast<PropertyAttributes>(value));
+ }
+ } else if (!interceptor->getter()->IsUndefined(isolate)) {
+ // TODO(verwaest): Use GetPropertyWithInterceptor?
+ Handle<Object> result;
+ if (it->IsElement()) {
+ uint32_t index = it->index();
+ v8::IndexedPropertyGetterCallback getter =
+ v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
+ result = args.Call(getter, index);
+ } else {
+ Handle<Name> name = it->name();
+ DCHECK(!name->IsPrivate());
+ v8::GenericNamedPropertyGetterCallback getter =
+ v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
+ interceptor->getter());
+ result = args.Call(getter, name);
+ }
+ if (!result.is_null()) return Just(DONT_ENUM);
+ }
+
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<PropertyAttributes>());
+ return Just(ABSENT);
+}
+
+Maybe<bool> SetPropertyWithInterceptorInternal(
+ LookupIterator* it, Handle<InterceptorInfo> interceptor,
+ Object::ShouldThrow should_throw, Handle<Object> value) {
+ Isolate* isolate = it->isolate();
+ // Make sure that the top context does not change when doing callbacks or
+ // interceptor calls.
+ AssertNoContextChange ncc(isolate);
+
+ if (interceptor->setter()->IsUndefined(isolate)) return Just(false);
+
+ Handle<JSObject> holder = it->GetHolder<JSObject>();
+ bool result;
+ Handle<Object> receiver = it->GetReceiver();
+ if (!receiver->IsJSReceiver()) {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, receiver,
+ Object::ConvertReceiver(isolate, receiver),
+ Nothing<bool>());
+ }
+ PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+ *holder, should_throw);
+
+ if (it->IsElement()) {
+ uint32_t index = it->index();
+ v8::IndexedPropertySetterCallback setter =
+ v8::ToCData<v8::IndexedPropertySetterCallback>(interceptor->setter());
+ // TODO(neis): In the future, we may want to actually return the
+ // interceptor's result, which then should be a boolean.
+ result = !args.Call(setter, index, value).is_null();
+ } else {
+ Handle<Name> name = it->name();
+ DCHECK(!name->IsPrivate());
+
+ if (name->IsSymbol() && !interceptor->can_intercept_symbols()) {
+ return Just(false);
+ }
+
+ v8::GenericNamedPropertySetterCallback setter =
+ v8::ToCData<v8::GenericNamedPropertySetterCallback>(
+ interceptor->setter());
+ result = !args.Call(setter, name, value).is_null();
+ }
+
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(), Nothing<bool>());
+ return Just(result);
+}
+
+} // namespace
MaybeHandle<Object> JSObject::GetPropertyWithFailedAccessCheck(
LookupIterator* it) {
+ Isolate* isolate = it->isolate();
Handle<JSObject> checked = it->GetHolder<JSObject>();
- while (AllCanRead(it)) {
- if (it->state() == LookupIterator::ACCESSOR) {
- return GetPropertyWithAccessor(it);
+ Handle<InterceptorInfo> interceptor =
+ it->GetInterceptorForFailedAccessCheck();
+ if (interceptor.is_null()) {
+ while (AllCanRead(it)) {
+ if (it->state() == LookupIterator::ACCESSOR) {
+ return GetPropertyWithAccessor(it);
+ }
+ DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
+ bool done;
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
+ GetPropertyWithInterceptor(it, &done), Object);
+ if (done) return result;
}
- DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
+ } else {
+ MaybeHandle<Object> result;
bool done;
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(it->isolate(), result,
- GetPropertyWithInterceptor(it, &done), Object);
+ result = GetPropertyWithInterceptorInternal(it, interceptor, &done);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (done) return result;
}
@@ -1286,27 +1712,36 @@ MaybeHandle<Object> JSObject::GetPropertyWithFailedAccessCheck(
return it->factory()->undefined_value();
}
- it->isolate()->ReportFailedAccessCheck(checked);
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(it->isolate(), Object);
+ isolate->ReportFailedAccessCheck(checked);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
return it->factory()->undefined_value();
}
Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithFailedAccessCheck(
LookupIterator* it) {
+ Isolate* isolate = it->isolate();
Handle<JSObject> checked = it->GetHolder<JSObject>();
- while (AllCanRead(it)) {
- if (it->state() == LookupIterator::ACCESSOR) {
- return Just(it->property_attributes());
+ Handle<InterceptorInfo> interceptor =
+ it->GetInterceptorForFailedAccessCheck();
+ if (interceptor.is_null()) {
+ while (AllCanRead(it)) {
+ if (it->state() == LookupIterator::ACCESSOR) {
+ return Just(it->property_attributes());
+ }
+ DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
+ auto result = GetPropertyAttributesWithInterceptor(it);
+ if (isolate->has_scheduled_exception()) break;
+ if (result.IsJust() && result.FromJust() != ABSENT) return result;
}
- DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
- auto result = GetPropertyAttributesWithInterceptor(it);
- if (it->isolate()->has_scheduled_exception()) break;
- if (result.IsJust() && result.FromJust() != ABSENT) return result;
+ } else {
+ Maybe<PropertyAttributes> result =
+ GetPropertyAttributesWithInterceptorInternal(it, interceptor);
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<PropertyAttributes>());
+ if (result.FromMaybe(ABSENT) != ABSENT) return result;
}
- it->isolate()->ReportFailedAccessCheck(checked);
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(),
- Nothing<PropertyAttributes>());
+ isolate->ReportFailedAccessCheck(checked);
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<PropertyAttributes>());
return Just(ABSENT);
}
@@ -1327,13 +1762,23 @@ bool JSObject::AllCanWrite(LookupIterator* it) {
Maybe<bool> JSObject::SetPropertyWithFailedAccessCheck(
LookupIterator* it, Handle<Object> value, ShouldThrow should_throw) {
+ Isolate* isolate = it->isolate();
Handle<JSObject> checked = it->GetHolder<JSObject>();
- if (AllCanWrite(it)) {
- return SetPropertyWithAccessor(it, value, should_throw);
+ Handle<InterceptorInfo> interceptor =
+ it->GetInterceptorForFailedAccessCheck();
+ if (interceptor.is_null()) {
+ if (AllCanWrite(it)) {
+ return SetPropertyWithAccessor(it, value, should_throw);
+ }
+ } else {
+ Maybe<bool> result = SetPropertyWithInterceptorInternal(
+ it, interceptor, should_throw, value);
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ if (result.IsJust()) return result;
}
- it->isolate()->ReportFailedAccessCheck(checked);
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(), Nothing<bool>());
+ isolate->ReportFailedAccessCheck(checked);
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
return Just(true);
}
@@ -1349,44 +1794,47 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object,
}
if (object->IsJSGlobalObject()) {
- Handle<GlobalDictionary> property_dictionary(object->global_dictionary());
+ Handle<GlobalDictionary> dictionary(object->global_dictionary());
- int entry = property_dictionary->FindEntry(name);
+ int entry = dictionary->FindEntry(name);
if (entry == GlobalDictionary::kNotFound) {
- auto cell = object->GetIsolate()->factory()->NewPropertyCell();
+ Isolate* isolate = object->GetIsolate();
+ auto cell = isolate->factory()->NewPropertyCell();
cell->set_value(*value);
- auto cell_type = value->IsUndefined() ? PropertyCellType::kUndefined
- : PropertyCellType::kConstant;
+ auto cell_type = value->IsUndefined(isolate)
+ ? PropertyCellType::kUndefined
+ : PropertyCellType::kConstant;
details = details.set_cell_type(cell_type);
value = cell;
- property_dictionary =
- GlobalDictionary::Add(property_dictionary, name, value, details);
- object->set_properties(*property_dictionary);
+ dictionary = GlobalDictionary::Add(dictionary, name, value, details);
+ object->set_properties(*dictionary);
} else {
- PropertyCell::UpdateCell(property_dictionary, entry, value, details);
+ Handle<PropertyCell> cell =
+ PropertyCell::PrepareForValue(dictionary, entry, value, details);
+ cell->set_value(*value);
}
} else {
- Handle<NameDictionary> property_dictionary(object->property_dictionary());
+ Handle<NameDictionary> dictionary(object->property_dictionary());
- int entry = property_dictionary->FindEntry(name);
+ int entry = dictionary->FindEntry(name);
if (entry == NameDictionary::kNotFound) {
- property_dictionary =
- NameDictionary::Add(property_dictionary, name, value, details);
- object->set_properties(*property_dictionary);
+ dictionary = NameDictionary::Add(dictionary, name, value, details);
+ object->set_properties(*dictionary);
} else {
- PropertyDetails original_details = property_dictionary->DetailsAt(entry);
+ PropertyDetails original_details = dictionary->DetailsAt(entry);
int enumeration_index = original_details.dictionary_index();
DCHECK(enumeration_index > 0);
details = details.set_index(enumeration_index);
- property_dictionary->SetEntry(entry, name, value, details);
+ dictionary->SetEntry(entry, name, value, details);
}
}
}
+// static
Maybe<bool> JSReceiver::HasInPrototypeChain(Isolate* isolate,
Handle<JSReceiver> object,
Handle<Object> proto) {
- PrototypeIterator iter(isolate, object, PrototypeIterator::START_AT_RECEIVER);
+ PrototypeIterator iter(isolate, object, kStartAtReceiver);
while (true) {
if (!iter.AdvanceFollowingProxies()) return Nothing<bool>();
if (iter.IsAtEnd()) return Just(false);
@@ -1396,7 +1844,6 @@ Maybe<bool> JSReceiver::HasInPrototypeChain(Isolate* isolate,
}
}
-
Map* Object::GetRootMap(Isolate* isolate) {
DisallowHeapAllocation no_alloc;
if (IsSmi()) {
@@ -1421,28 +1868,21 @@ Map* Object::GetRootMap(Isolate* isolate) {
return isolate->heap()->null_value()->map();
}
+namespace {
-Object* Object::GetHash() {
- Object* hash = GetSimpleHash();
- if (hash->IsSmi()) return hash;
-
- DisallowHeapAllocation no_gc;
- DCHECK(IsJSReceiver());
- JSReceiver* receiver = JSReceiver::cast(this);
- Isolate* isolate = receiver->GetIsolate();
- return *JSReceiver::GetIdentityHash(isolate, handle(receiver, isolate));
-}
-
-
-Object* Object::GetSimpleHash() {
+// Returns a non-SMI for JSObjects, but returns the hash code for simple
+// objects. This avoids a double lookup in the cases where we know we will
+// add the hash to the JSObject if it does not already exist.
+Object* GetSimpleHash(Object* object) {
// The object is either a Smi, a HeapNumber, a name, an odd-ball,
// a SIMD value type, a real JS object, or a Harmony proxy.
- if (IsSmi()) {
- uint32_t hash = ComputeIntegerHash(Smi::cast(this)->value(), kZeroHashSeed);
+ if (object->IsSmi()) {
+ uint32_t hash =
+ ComputeIntegerHash(Smi::cast(object)->value(), kZeroHashSeed);
return Smi::FromInt(hash & Smi::kMaxValue);
}
- if (IsHeapNumber()) {
- double num = HeapNumber::cast(this)->value();
+ if (object->IsHeapNumber()) {
+ double num = HeapNumber::cast(object)->value();
if (std::isnan(num)) return Smi::FromInt(Smi::kMaxValue);
if (i::IsMinusZero(num)) num = 0;
if (IsSmiDouble(num)) {
@@ -1451,30 +1891,43 @@ Object* Object::GetSimpleHash() {
uint32_t hash = ComputeLongHash(double_to_uint64(num));
return Smi::FromInt(hash & Smi::kMaxValue);
}
- if (IsName()) {
- uint32_t hash = Name::cast(this)->Hash();
+ if (object->IsName()) {
+ uint32_t hash = Name::cast(object)->Hash();
return Smi::FromInt(hash);
}
- if (IsOddball()) {
- uint32_t hash = Oddball::cast(this)->to_string()->Hash();
+ if (object->IsOddball()) {
+ uint32_t hash = Oddball::cast(object)->to_string()->Hash();
return Smi::FromInt(hash);
}
- if (IsSimd128Value()) {
- uint32_t hash = Simd128Value::cast(this)->Hash();
+ if (object->IsSimd128Value()) {
+ uint32_t hash = Simd128Value::cast(object)->Hash();
return Smi::FromInt(hash & Smi::kMaxValue);
}
+ DCHECK(object->IsJSReceiver());
+ // Simply return the receiver as it is guaranteed to not be a SMI.
+ return object;
+}
+
+} // namespace
+
+Object* Object::GetHash() {
+ Object* hash = GetSimpleHash(this);
+ if (hash->IsSmi()) return hash;
+
+ DisallowHeapAllocation no_gc;
DCHECK(IsJSReceiver());
JSReceiver* receiver = JSReceiver::cast(this);
- return receiver->GetHeap()->undefined_value();
+ Isolate* isolate = receiver->GetIsolate();
+ return JSReceiver::GetIdentityHash(isolate, handle(receiver, isolate));
}
-
-Handle<Smi> Object::GetOrCreateHash(Isolate* isolate, Handle<Object> object) {
- Handle<Object> hash(object->GetSimpleHash(), isolate);
- if (hash->IsSmi()) return Handle<Smi>::cast(hash);
+Smi* Object::GetOrCreateHash(Isolate* isolate, Handle<Object> object) {
+ Object* hash = GetSimpleHash(*object);
+ if (hash->IsSmi()) return Smi::cast(hash);
DCHECK(object->IsJSReceiver());
- return JSReceiver::GetOrCreateIdentityHash(Handle<JSReceiver>::cast(object));
+ return JSReceiver::GetOrCreateIdentityHash(isolate,
+ Handle<JSReceiver>::cast(object));
}
@@ -1555,11 +2008,7 @@ bool Object::SameValueZero(Object* other) {
MaybeHandle<Object> Object::ArraySpeciesConstructor(
Isolate* isolate, Handle<Object> original_array) {
- Handle<Context> native_context = isolate->native_context();
Handle<Object> default_species = isolate->array_function();
- if (!FLAG_harmony_species) {
- return default_species;
- }
if (original_array->IsJSArray() &&
Handle<JSArray>::cast(original_array)->HasArrayPrototype(isolate) &&
isolate->IsArraySpeciesLookupChainIntact()) {
@@ -1580,7 +2029,7 @@ MaybeHandle<Object> Object::ArraySpeciesConstructor(
isolate, constructor_context,
JSReceiver::GetFunctionRealm(Handle<JSReceiver>::cast(constructor)),
Object);
- if (*constructor_context != *native_context &&
+ if (*constructor_context != *isolate->native_context() &&
*constructor == constructor_context->array_function()) {
constructor = isolate->factory()->undefined_value();
}
@@ -1591,12 +2040,12 @@ MaybeHandle<Object> Object::ArraySpeciesConstructor(
JSReceiver::GetProperty(Handle<JSReceiver>::cast(constructor),
isolate->factory()->species_symbol()),
Object);
- if (constructor->IsNull()) {
+ if (constructor->IsNull(isolate)) {
constructor = isolate->factory()->undefined_value();
}
}
}
- if (constructor->IsUndefined()) {
+ if (constructor->IsUndefined(isolate)) {
return default_species;
} else {
if (!constructor->IsConstructor()) {
@@ -1718,6 +2167,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
Heap* heap = GetHeap();
bool is_one_byte = this->IsOneByteRepresentation();
bool is_internalized = this->IsInternalizedString();
+ bool has_pointers = this->IsConsString() || this->IsSlicedString();
// Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing
@@ -1746,6 +2196,9 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
int new_size = this->SizeFromMap(new_map);
heap->CreateFillerObjectAt(this->address() + new_size, size - new_size,
ClearRecordedSlots::kNo);
+ if (has_pointers) {
+ heap->ClearRecordedSlotRange(this->address(), this->address() + new_size);
+ }
// We are storing the new map using release store after creating a filler for
// the left-over space to avoid races with the sweeper thread.
@@ -1786,6 +2239,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
if (size < ExternalString::kShortSize) return false;
Heap* heap = GetHeap();
bool is_internalized = this->IsInternalizedString();
+ bool has_pointers = this->IsConsString() || this->IsSlicedString();
// Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing
@@ -1808,6 +2262,9 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
int new_size = this->SizeFromMap(new_map);
heap->CreateFillerObjectAt(this->address() + new_size, size - new_size,
ClearRecordedSlots::kNo);
+ if (has_pointers) {
+ heap->ClearRecordedSlotRange(this->address(), this->address() + new_size);
+ }
// We are storing the new map using release store after creating a filler for
// the left-over space to avoid races with the sweeper thread.
@@ -1821,8 +2278,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
return true;
}
-
-void String::StringShortPrint(StringStream* accumulator) {
+void String::StringShortPrint(StringStream* accumulator, bool show_details) {
int len = length();
if (len > kMaxShortPrintLength) {
accumulator->Add("<Very long string[%u]>", len);
@@ -1851,15 +2307,15 @@ void String::StringShortPrint(StringStream* accumulator) {
}
stream.Reset(this);
if (one_byte) {
- accumulator->Add("<String[%u]: ", length());
+ if (show_details) accumulator->Add("<String[%u]: ", length());
for (int i = 0; i < len; i++) {
accumulator->Put(static_cast<char>(stream.GetNext()));
}
- accumulator->Put('>');
+ if (show_details) accumulator->Put('>');
} else {
// Backslash indicates that the string contains control
// characters and that backslashes are therefore escaped.
- accumulator->Add("<String[%u]\\: ", length());
+ if (show_details) accumulator->Add("<String[%u]\\: ", length());
for (int i = 0; i < len; i++) {
uint16_t c = stream.GetNext();
if (c == '\n') {
@@ -1879,7 +2335,7 @@ void String::StringShortPrint(StringStream* accumulator) {
accumulator->Put('.');
accumulator->Put('.');
}
- accumulator->Put('>');
+ if (show_details) accumulator->Put('>');
}
return;
}
@@ -1897,23 +2353,15 @@ void String::PrintUC16(std::ostream& os, int start, int end) { // NOLINT
void JSObject::JSObjectShortPrint(StringStream* accumulator) {
switch (map()->instance_type()) {
case JS_ARRAY_TYPE: {
- double length = JSArray::cast(this)->length()->IsUndefined()
- ? 0
- : JSArray::cast(this)->length()->Number();
+ double length = JSArray::cast(this)->length()->IsUndefined(GetIsolate())
+ ? 0
+ : JSArray::cast(this)->length()->Number();
accumulator->Add("<JS Array[%u]>", static_cast<uint32_t>(length));
break;
}
case JS_BOUND_FUNCTION_TYPE: {
JSBoundFunction* bound_function = JSBoundFunction::cast(this);
- Object* name = bound_function->name();
accumulator->Add("<JS BoundFunction");
- if (name->IsString()) {
- String* str = String::cast(name);
- if (str->length() > 0) {
- accumulator->Add(" ");
- accumulator->Put(str);
- }
- }
accumulator->Add(
" (BoundTargetFunction %p)>",
reinterpret_cast<void*>(bound_function->bound_target_function()));
@@ -1946,6 +2394,18 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
if (!printed) {
accumulator->Add("<JS Function");
}
+ if (FLAG_trace_file_names) {
+ Object* source_name =
+ Script::cast(function->shared()->script())->name();
+ if (source_name->IsString()) {
+ String* str = String::cast(source_name);
+ if (str->length() > 0) {
+ accumulator->Add(" <");
+ accumulator->Put(str);
+ accumulator->Add(">");
+ }
+ }
+ }
accumulator->Add(" (SharedFunctionInfo %p)",
reinterpret_cast<void*>(function->shared()));
accumulator->Put('>');
@@ -2131,6 +2591,7 @@ void JSObject::PrintInstanceMigration(FILE* file,
void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
Heap* heap = GetHeap();
+ Isolate* isolate = heap->isolate();
if (!heap->Contains(this)) {
os << "!!!INVALID POINTER!!!";
return;
@@ -2192,8 +2653,7 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
case SHARED_FUNCTION_INFO_TYPE: {
SharedFunctionInfo* shared = SharedFunctionInfo::cast(this);
- base::SmartArrayPointer<char> debug_name =
- shared->DebugName()->ToCString();
+ std::unique_ptr<char[]> debug_name = shared->DebugName()->ToCString();
if (debug_name[0] != 0) {
os << "<SharedFunctionInfo " << debug_name.get() << ">";
} else {
@@ -2216,18 +2676,20 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
break;
}
case ODDBALL_TYPE: {
- if (IsUndefined()) {
+ if (IsUndefined(isolate)) {
os << "<undefined>";
- } else if (IsTheHole()) {
+ } else if (IsTheHole(isolate)) {
os << "<the hole>";
- } else if (IsNull()) {
+ } else if (IsNull(isolate)) {
os << "<null>";
- } else if (IsTrue()) {
+ } else if (IsTrue(isolate)) {
os << "<true>";
- } else if (IsFalse()) {
+ } else if (IsFalse(isolate)) {
os << "<false>";
} else {
- os << "<Odd Oddball>";
+ os << "<Odd Oddball: ";
+ os << Oddball::cast(this)->to_string()->ToCString().get();
+ os << ">";
}
break;
}
@@ -2458,25 +2920,6 @@ String* JSReceiver::class_name() {
}
-MaybeHandle<String> JSReceiver::BuiltinStringTag(Handle<JSReceiver> object) {
- Maybe<bool> is_array = Object::IsArray(object);
- MAYBE_RETURN(is_array, MaybeHandle<String>());
- Isolate* const isolate = object->GetIsolate();
- if (is_array.FromJust()) {
- return isolate->factory()->Array_string();
- }
- // TODO(adamk): According to ES2015, we should return "Function" when
- // object has a [[Call]] internal method (corresponds to IsCallable).
- // But this is well cemented in layout tests and might cause webbreakage.
- // if (object->IsCallable()) {
- // return isolate->factory()->Function_string();
- // }
- // TODO(adamk): class_name() is expensive, replace with instance type
- // checks where possible.
- return handle(object->class_name(), isolate);
-}
-
-
// static
Handle<String> JSReceiver::GetConstructorName(Handle<JSReceiver> receiver) {
Isolate* isolate = receiver->GetIsolate();
@@ -2600,68 +3043,6 @@ MaybeHandle<Map> Map::CopyWithConstant(Handle<Map> map,
return Map::CopyAddDescriptor(map, &new_constant_desc, flag);
}
-
-void JSObject::AddSlowProperty(Handle<JSObject> object,
- Handle<Name> name,
- Handle<Object> value,
- PropertyAttributes attributes) {
- DCHECK(!object->HasFastProperties());
- Isolate* isolate = object->GetIsolate();
- if (object->IsJSGlobalObject()) {
- Handle<GlobalDictionary> dict(object->global_dictionary());
- PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
- int entry = dict->FindEntry(name);
- // If there's a cell there, just invalidate and set the property.
- if (entry != GlobalDictionary::kNotFound) {
- PropertyCell::UpdateCell(dict, entry, value, details);
- // TODO(ishell): move this to UpdateCell.
- // Need to adjust the details.
- int index = dict->NextEnumerationIndex();
- dict->SetNextEnumerationIndex(index + 1);
- PropertyCell* cell = PropertyCell::cast(dict->ValueAt(entry));
- details = cell->property_details().set_index(index);
- cell->set_property_details(details);
-
- } else {
- auto cell = isolate->factory()->NewPropertyCell();
- cell->set_value(*value);
- auto cell_type = value->IsUndefined() ? PropertyCellType::kUndefined
- : PropertyCellType::kConstant;
- details = details.set_cell_type(cell_type);
- value = cell;
-
- Handle<GlobalDictionary> result =
- GlobalDictionary::Add(dict, name, value, details);
- if (*dict != *result) object->set_properties(*result);
- }
- } else {
- Handle<NameDictionary> dict(object->property_dictionary());
- PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
- Handle<NameDictionary> result =
- NameDictionary::Add(dict, name, value, details);
- if (*dict != *result) object->set_properties(*result);
- }
-}
-
-
-MaybeHandle<Object> JSObject::EnqueueChangeRecord(Handle<JSObject> object,
- const char* type_str,
- Handle<Name> name,
- Handle<Object> old_value) {
- DCHECK(!object->IsJSGlobalProxy());
- DCHECK(!object->IsJSGlobalObject());
- Isolate* isolate = object->GetIsolate();
- HandleScope scope(isolate);
- Handle<String> type = isolate->factory()->InternalizeUtf8String(type_str);
- Handle<Object> args[] = { type, object, name, old_value };
- int argc = name.is_null() ? 2 : old_value->IsTheHole() ? 3 : 4;
-
- return Execution::Call(isolate,
- Handle<JSFunction>(isolate->observers_notify_change()),
- isolate->factory()->undefined_value(), argc, args);
-}
-
-
const char* Representation::Mnemonic() const {
switch (kind_) {
case kNone: return "v";
@@ -2727,7 +3108,6 @@ bool Map::InstancesNeedRewriting(Map* target, int target_number_of_fields,
void JSObject::UpdatePrototypeUserRegistration(Handle<Map> old_map,
Handle<Map> new_map,
Isolate* isolate) {
- if (!FLAG_track_prototype_users) return;
if (!old_map->is_prototype_map()) return;
DCHECK(new_map->is_prototype_map());
bool was_registered = JSObject::UnregisterPrototypeUser(old_map, isolate);
@@ -3124,7 +3504,7 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
// Ensure that no transition was inserted for prototype migrations.
DCHECK_EQ(
0, TransitionArray::NumberOfTransitions(old_map->raw_transitions()));
- DCHECK(new_map->GetBackPointer()->IsUndefined());
+ DCHECK(new_map->GetBackPointer()->IsUndefined(new_map->GetIsolate()));
}
} else {
MigrateFastToSlow(object, new_map, expected_additional_properties);
@@ -3138,6 +3518,15 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
// When adding code here, add a DisallowHeapAllocation too.
}
+void JSObject::ForceSetPrototype(Handle<JSObject> object,
+ Handle<Object> proto) {
+ // object.__proto__ = proto;
+ Handle<Map> old_map = Handle<Map>(object->map());
+ Handle<Map> new_map = Map::Copy(old_map, "ForceSetPrototype");
+ Map::SetPrototype(new_map, proto, FAST_PROTOTYPE);
+ JSObject::MigrateToMap(object, new_map);
+}
+
int Map::NumberOfFields() {
DescriptorArray* descriptors = instance_descriptors();
int result = 0;
@@ -3236,17 +3625,18 @@ static inline bool EqualImmutableValues(Object* obj1, Object* obj2) {
// proper sharing of descriptor arrays.
void Map::ReplaceDescriptors(DescriptorArray* new_descriptors,
LayoutDescriptor* new_layout_descriptor) {
+ Isolate* isolate = GetIsolate();
// Don't overwrite the empty descriptor array or initial map's descriptors.
- if (NumberOfOwnDescriptors() == 0 || GetBackPointer()->IsUndefined()) {
+ if (NumberOfOwnDescriptors() == 0 || GetBackPointer()->IsUndefined(isolate)) {
return;
}
DescriptorArray* to_replace = instance_descriptors();
- GetHeap()->incremental_marking()->IterateBlackObject(to_replace);
+ isolate->heap()->incremental_marking()->IterateBlackObject(to_replace);
Map* current = this;
while (current->instance_descriptors() == to_replace) {
Object* next = current->GetBackPointer();
- if (next->IsUndefined()) break; // Stop overwriting at initial map.
+ if (next->IsUndefined(isolate)) break; // Stop overwriting at initial map.
current->SetEnumLength(kInvalidEnumCacheSentinel);
current->UpdateDescriptors(new_descriptors, new_layout_descriptor);
current = Map::cast(next);
@@ -3257,9 +3647,10 @@ void Map::ReplaceDescriptors(DescriptorArray* new_descriptors,
Map* Map::FindRootMap() {
Map* result = this;
+ Isolate* isolate = GetIsolate();
while (true) {
Object* back = result->GetBackPointer();
- if (back->IsUndefined()) {
+ if (back->IsUndefined(isolate)) {
// Initial map always owns descriptors and doesn't have unused entries
// in the descriptor array.
DCHECK(result->owns_descriptors());
@@ -3317,9 +3708,10 @@ Map* Map::FindFieldOwner(int descriptor) {
DisallowHeapAllocation no_allocation;
DCHECK_EQ(DATA, instance_descriptors()->GetDetails(descriptor).type());
Map* result = this;
+ Isolate* isolate = GetIsolate();
while (true) {
Object* back = result->GetBackPointer();
- if (back->IsUndefined()) break;
+ if (back->IsUndefined(isolate)) break;
Map* parent = Map::cast(back);
if (parent->NumberOfOwnDescriptors() <= descriptor) break;
result = parent;
@@ -4125,56 +4517,14 @@ Handle<Map> Map::Update(Handle<Map> map) {
ALLOW_IN_DESCRIPTOR);
}
-
Maybe<bool> JSObject::SetPropertyWithInterceptor(LookupIterator* it,
ShouldThrow should_throw,
Handle<Object> value) {
- Isolate* isolate = it->isolate();
- // Make sure that the top context does not change when doing callbacks or
- // interceptor calls.
- AssertNoContextChange ncc(isolate);
-
DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
- Handle<InterceptorInfo> interceptor(it->GetInterceptor());
- if (interceptor->setter()->IsUndefined()) return Just(false);
-
- Handle<JSObject> holder = it->GetHolder<JSObject>();
- bool result;
- Handle<Object> receiver = it->GetReceiver();
- if (!receiver->IsJSReceiver()) {
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, receiver,
- Object::ConvertReceiver(isolate, receiver),
- Nothing<bool>());
- }
- PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
- *holder, should_throw);
-
- if (it->IsElement()) {
- uint32_t index = it->index();
- v8::IndexedPropertySetterCallback setter =
- v8::ToCData<v8::IndexedPropertySetterCallback>(interceptor->setter());
- // TODO(neis): In the future, we may want to actually return the
- // interceptor's result, which then should be a boolean.
- result = !args.Call(setter, index, value).is_null();
- } else {
- Handle<Name> name = it->name();
- DCHECK(!name->IsPrivate());
-
- if (name->IsSymbol() && !interceptor->can_intercept_symbols()) {
- return Just(false);
- }
-
- v8::GenericNamedPropertySetterCallback setter =
- v8::ToCData<v8::GenericNamedPropertySetterCallback>(
- interceptor->setter());
- result = !args.Call(setter, name, value).is_null();
- }
-
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(), Nothing<bool>());
- return Just(result);
+ return SetPropertyWithInterceptorInternal(it, it->GetInterceptor(),
+ should_throw, value);
}
-
MaybeHandle<Object> Object::SetProperty(Handle<Object> object,
Handle<Name> name, Handle<Object> value,
LanguageMode language_mode,
@@ -4190,6 +4540,7 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
LanguageMode language_mode,
StoreFromKeyed store_mode,
bool* found) {
+ it->UpdateProtector();
DCHECK(it->IsFound());
ShouldThrow should_throw =
is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
@@ -4214,23 +4565,49 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
return JSProxy::SetProperty(it->GetHolder<JSProxy>(), it->GetName(),
value, it->GetReceiver(), language_mode);
- case LookupIterator::INTERCEPTOR:
+ case LookupIterator::INTERCEPTOR: {
+ Handle<Map> store_target_map;
+ if (it->GetReceiver()->IsJSObject()) {
+ store_target_map = handle(it->GetStoreTarget()->map(), it->isolate());
+ }
if (it->HolderIsReceiverOrHiddenPrototype()) {
Maybe<bool> result =
JSObject::SetPropertyWithInterceptor(it, should_throw, value);
if (result.IsNothing() || result.FromJust()) return result;
+ // Interceptor modified the store target but failed to set the
+ // property.
+ // TODO(jochen): Remove after we've identified the faulty interceptor.
+ if (!store_target_map.is_null() &&
+ *store_target_map != it->GetStoreTarget()->map()) {
+ it->isolate()->PushStackTraceAndDie(
+ 0xabababaa, v8::ToCData<void*>(it->GetInterceptor()->setter()),
+ nullptr, 0xabababab);
+ }
+ Utils::ApiCheck(store_target_map.is_null() ||
+ *store_target_map == it->GetStoreTarget()->map(),
+ it->IsElement() ? "v8::IndexedPropertySetterCallback"
+ : "v8::NamedPropertySetterCallback",
+ "Interceptor silently changed store target.");
} else {
Maybe<PropertyAttributes> maybe_attributes =
JSObject::GetPropertyAttributesWithInterceptor(it);
if (!maybe_attributes.IsJust()) return Nothing<bool>();
- if (maybe_attributes.FromJust() == ABSENT) break;
if ((maybe_attributes.FromJust() & READ_ONLY) != 0) {
return WriteToReadOnlyProperty(it, value, should_throw);
}
+ // Interceptor modified the store target but failed to set the
+ // property.
+ Utils::ApiCheck(store_target_map.is_null() ||
+ *store_target_map == it->GetStoreTarget()->map(),
+ it->IsElement() ? "v8::IndexedPropertySetterCallback"
+ : "v8::NamedPropertySetterCallback",
+ "Interceptor silently changed store target.");
+ if (maybe_attributes.FromJust() == ABSENT) break;
*found = false;
return Nothing<bool>();
}
break;
+ }
case LookupIterator::ACCESSOR: {
if (it->IsReadOnly()) {
@@ -4272,7 +4649,6 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
Maybe<bool> Object::SetProperty(LookupIterator* it, Handle<Object> value,
LanguageMode language_mode,
StoreFromKeyed store_mode) {
- it->UpdateProtector();
if (it->IsFound()) {
bool found = true;
Maybe<bool> result =
@@ -4300,7 +4676,6 @@ Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
StoreFromKeyed store_mode) {
Isolate* isolate = it->isolate();
- it->UpdateProtector();
if (it->IsFound()) {
bool found = true;
Maybe<bool> result =
@@ -4308,6 +4683,8 @@ Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
if (found) return result;
}
+ it->UpdateProtector();
+
// The property either doesn't exist on the holder or exists there as a data
// property.
@@ -4319,7 +4696,7 @@ Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
}
Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(it->GetReceiver());
- LookupIterator::Configuration c = LookupIterator::HIDDEN;
+ LookupIterator::Configuration c = LookupIterator::OWN;
LookupIterator own_lookup =
it->IsElement() ? LookupIterator(isolate, receiver, it->index(), c)
: LookupIterator(receiver, it->name(), c);
@@ -4443,18 +4820,10 @@ Maybe<bool> Object::SetDataProperty(LookupIterator* it, Handle<Object> value) {
// Store on the holder which may be hidden behind the receiver.
DCHECK(it->HolderIsReceiverOrHiddenPrototype());
- // Old value for the observation change record.
- // Fetch before transforming the object since the encoding may become
- // incompatible with what's cached in |it|.
- bool is_observed = receiver->map()->is_observed() &&
- (it->IsElement() || !it->name()->IsPrivate());
- MaybeHandle<Object> maybe_old;
- if (is_observed) maybe_old = it->GetDataValue();
-
Handle<Object> to_assign = value;
// Convert the incoming value to a number for storing into typed arrays.
if (it->IsElement() && receiver->HasFixedTypedArrayElements()) {
- if (!value->IsNumber() && !value->IsUndefined()) {
+ if (!value->IsNumber() && !value->IsUndefined(it->isolate())) {
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
it->isolate(), to_assign, Object::ToNumber(value), Nothing<bool>());
// We have to recheck the length. However, it can only change if the
@@ -4473,15 +4842,6 @@ Maybe<bool> Object::SetDataProperty(LookupIterator* it, Handle<Object> value) {
// Write the property value.
it->WriteDataValue(to_assign);
- // Send the change record if there are observers.
- if (is_observed && !value->SameValue(*maybe_old.ToHandleChecked())) {
- RETURN_ON_EXCEPTION_VALUE(
- it->isolate(),
- JSObject::EnqueueChangeRecord(receiver, "update", it->GetName(),
- maybe_old.ToHandleChecked()),
- Nothing<bool>());
- }
-
#if VERIFY_HEAP
if (FLAG_verify_heap) {
receiver->JSObjectVerify();
@@ -4491,47 +4851,6 @@ Maybe<bool> Object::SetDataProperty(LookupIterator* it, Handle<Object> value) {
}
-MUST_USE_RESULT static MaybeHandle<Object> BeginPerformSplice(
- Handle<JSArray> object) {
- Isolate* isolate = object->GetIsolate();
- HandleScope scope(isolate);
- Handle<Object> args[] = {object};
-
- return Execution::Call(
- isolate, Handle<JSFunction>(isolate->observers_begin_perform_splice()),
- isolate->factory()->undefined_value(), arraysize(args), args);
-}
-
-
-MUST_USE_RESULT static MaybeHandle<Object> EndPerformSplice(
- Handle<JSArray> object) {
- Isolate* isolate = object->GetIsolate();
- HandleScope scope(isolate);
- Handle<Object> args[] = {object};
-
- return Execution::Call(
- isolate, Handle<JSFunction>(isolate->observers_end_perform_splice()),
- isolate->factory()->undefined_value(), arraysize(args), args);
-}
-
-
-MUST_USE_RESULT static MaybeHandle<Object> EnqueueSpliceRecord(
- Handle<JSArray> object, uint32_t index, Handle<JSArray> deleted,
- uint32_t add_count) {
- Isolate* isolate = object->GetIsolate();
- HandleScope scope(isolate);
- Handle<Object> index_object = isolate->factory()->NewNumberFromUint(index);
- Handle<Object> add_count_object =
- isolate->factory()->NewNumberFromUint(add_count);
-
- Handle<Object> args[] = {object, index_object, deleted, add_count_object};
-
- return Execution::Call(
- isolate, Handle<JSFunction>(isolate->observers_enqueue_splice()),
- isolate->factory()->undefined_value(), arraysize(args), args);
-}
-
-
Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
PropertyAttributes attributes,
ShouldThrow should_throw,
@@ -4586,6 +4905,7 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
JSObject::ValidateElements(receiver);
return result;
} else {
+ it->UpdateProtector();
// Migrate to the most up-to-date map that will be able to store |value|
// under it->name() with |attributes|.
it->PrepareTransitionToDataProperty(receiver, value, attributes,
@@ -4593,22 +4913,9 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
DCHECK_EQ(LookupIterator::TRANSITION, it->state());
it->ApplyTransitionToDataProperty(receiver);
- // TODO(verwaest): Encapsulate dictionary handling better.
- if (receiver->map()->is_dictionary_map()) {
- // TODO(dcarney): just populate TransitionPropertyCell here?
- JSObject::AddSlowProperty(receiver, it->name(), value, attributes);
- } else {
- // Write the property value.
- it->WriteDataValue(value);
- }
+ // Write the property value.
+ it->WriteDataValue(value);
- // Send the change record if there are observers.
- if (receiver->map()->is_observed() && !it->name()->IsPrivate()) {
- RETURN_ON_EXCEPTION_VALUE(isolate, JSObject::EnqueueChangeRecord(
- receiver, "add", it->name(),
- it->factory()->the_hole_value()),
- Nothing<bool>());
- }
#if VERIFY_HEAP
if (FLAG_verify_heap) {
receiver->JSObjectVerify();
@@ -4649,22 +4956,22 @@ void Map::EnsureDescriptorSlack(Handle<Map> map, int slack) {
new_descriptors->CopyEnumCacheFrom(*descriptors);
}
+ Isolate* isolate = map->GetIsolate();
// Replace descriptors by new_descriptors in all maps that share it.
- map->GetHeap()->incremental_marking()->IterateBlackObject(*descriptors);
+ isolate->heap()->incremental_marking()->IterateBlackObject(*descriptors);
Map* current = *map;
while (current->instance_descriptors() == *descriptors) {
Object* next = current->GetBackPointer();
- if (next->IsUndefined()) break; // Stop overwriting at initial map.
+ if (next->IsUndefined(isolate)) break; // Stop overwriting at initial map.
current->UpdateDescriptors(*new_descriptors, layout_descriptor);
current = Map::cast(next);
}
map->UpdateDescriptors(*new_descriptors, layout_descriptor);
}
-
-template<class T>
-static int AppendUniqueCallbacks(NeanderArray* callbacks,
+template <class T>
+static int AppendUniqueCallbacks(Handle<TemplateList> callbacks,
Handle<typename T::Array> array,
int valid_descriptors) {
int nof_callbacks = callbacks->length();
@@ -4743,9 +5050,9 @@ void Map::AppendCallbackDescriptors(Handle<Map> map,
Handle<Object> descriptors) {
int nof = map->NumberOfOwnDescriptors();
Handle<DescriptorArray> array(map->instance_descriptors());
- NeanderArray callbacks(descriptors);
- DCHECK(array->NumberOfSlackDescriptors() >= callbacks.length());
- nof = AppendUniqueCallbacks<DescriptorArrayAppender>(&callbacks, array, nof);
+ Handle<TemplateList> callbacks = Handle<TemplateList>::cast(descriptors);
+ DCHECK_GE(array->NumberOfSlackDescriptors(), callbacks->length());
+ nof = AppendUniqueCallbacks<DescriptorArrayAppender>(callbacks, array, nof);
map->SetNumberOfOwnDescriptors(nof);
}
@@ -4753,10 +5060,9 @@ void Map::AppendCallbackDescriptors(Handle<Map> map,
int AccessorInfo::AppendUnique(Handle<Object> descriptors,
Handle<FixedArray> array,
int valid_descriptors) {
- NeanderArray callbacks(descriptors);
- DCHECK(array->length() >= callbacks.length() + valid_descriptors);
- return AppendUniqueCallbacks<FixedArrayAppender>(&callbacks,
- array,
+ Handle<TemplateList> callbacks = Handle<TemplateList>::cast(descriptors);
+ DCHECK_GE(array->length(), callbacks->length() + valid_descriptors);
+ return AppendUniqueCallbacks<FixedArrayAppender>(callbacks, array,
valid_descriptors);
}
@@ -4915,7 +5221,7 @@ Handle<Map> Map::TransitionElementsTo(Handle<Map> map,
}
}
- DCHECK(!map->IsUndefined());
+ DCHECK(!map->IsUndefined(isolate));
// Check if we can go back in the elements kind transition chain.
if (IsHoleyElementsKind(from_kind) &&
to_kind == GetPackedElementsKind(from_kind) &&
@@ -4969,7 +5275,7 @@ void JSProxy::Revoke(Handle<JSProxy> proxy) {
Maybe<bool> JSProxy::HasProperty(Isolate* isolate, Handle<JSProxy> proxy,
Handle<Name> name) {
DCHECK(!name->IsPrivate());
- STACK_CHECK(Nothing<bool>());
+ STACK_CHECK(isolate, Nothing<bool>());
// 1. (Assert)
// 2. Let handler be the value of the [[ProxyHandler]] internal slot of O.
Handle<Object> handler(proxy->handler(), isolate);
@@ -4989,7 +5295,7 @@ Maybe<bool> JSProxy::HasProperty(Isolate* isolate, Handle<JSProxy> proxy,
isolate->factory()->has_string()),
Nothing<bool>());
// 7. If trap is undefined, then
- if (trap->IsUndefined()) {
+ if (trap->IsUndefined(isolate)) {
// 7a. Return target.[[HasProperty]](P).
return JSReceiver::HasProperty(target, name);
}
@@ -5038,7 +5344,7 @@ Maybe<bool> JSProxy::SetProperty(Handle<JSProxy> proxy, Handle<Name> name,
LanguageMode language_mode) {
DCHECK(!name->IsPrivate());
Isolate* isolate = proxy->GetIsolate();
- STACK_CHECK(Nothing<bool>());
+ STACK_CHECK(isolate, Nothing<bool>());
Factory* factory = isolate->factory();
Handle<String> trap_name = factory->set_string();
ShouldThrow should_throw =
@@ -5055,7 +5361,7 @@ Maybe<bool> JSProxy::SetProperty(Handle<JSProxy> proxy, Handle<Name> name,
Handle<Object> trap;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, trap, Object::GetMethod(handler, trap_name), Nothing<bool>());
- if (trap->IsUndefined()) {
+ if (trap->IsUndefined(isolate)) {
LookupIterator it =
LookupIterator::PropertyOrElement(isolate, receiver, name, target);
return Object::SetSuperProperty(&it, value, language_mode,
@@ -5091,7 +5397,7 @@ Maybe<bool> JSProxy::SetProperty(Handle<JSProxy> proxy, Handle<Name> name,
}
inconsistent = PropertyDescriptor::IsAccessorDescriptor(&target_desc) &&
!target_desc.configurable() &&
- target_desc.set()->IsUndefined();
+ target_desc.set()->IsUndefined(isolate);
if (inconsistent) {
isolate->Throw(*isolate->factory()->NewTypeError(
MessageTemplate::kProxySetFrozenAccessor, name));
@@ -5109,7 +5415,7 @@ Maybe<bool> JSProxy::DeletePropertyOrElement(Handle<JSProxy> proxy,
ShouldThrow should_throw =
is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
Isolate* isolate = proxy->GetIsolate();
- STACK_CHECK(Nothing<bool>());
+ STACK_CHECK(isolate, Nothing<bool>());
Factory* factory = isolate->factory();
Handle<String> trap_name = factory->deleteProperty_string();
@@ -5124,7 +5430,7 @@ Maybe<bool> JSProxy::DeletePropertyOrElement(Handle<JSProxy> proxy,
Handle<Object> trap;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, trap, Object::GetMethod(handler, trap_name), Nothing<bool>());
- if (trap->IsUndefined()) {
+ if (trap->IsUndefined(isolate)) {
return JSReceiver::DeletePropertyOrElement(target, name, language_mode);
}
@@ -5200,6 +5506,44 @@ MaybeHandle<Context> JSBoundFunction::GetFunctionRealm(
handle(function->bound_target_function()));
}
+// static
+MaybeHandle<String> JSBoundFunction::GetName(Isolate* isolate,
+ Handle<JSBoundFunction> function) {
+ Handle<String> prefix = isolate->factory()->bound__string();
+ if (!function->bound_target_function()->IsJSFunction()) return prefix;
+ Handle<JSFunction> target(JSFunction::cast(function->bound_target_function()),
+ isolate);
+ Handle<Object> target_name = JSFunction::GetName(isolate, target);
+ if (!target_name->IsString()) return prefix;
+ Factory* factory = isolate->factory();
+ return factory->NewConsString(prefix, Handle<String>::cast(target_name));
+}
+
+// static
+Handle<Object> JSFunction::GetName(Isolate* isolate,
+ Handle<JSFunction> function) {
+ if (function->shared()->name_should_print_as_anonymous()) {
+ return isolate->factory()->anonymous_string();
+ }
+ return handle(function->shared()->name(), isolate);
+}
+
+// static
+MaybeHandle<Smi> JSFunction::GetLength(Isolate* isolate,
+ Handle<JSFunction> function) {
+ int length = 0;
+ if (function->shared()->is_compiled()) {
+ length = function->shared()->length();
+ } else {
+ // If the function isn't compiled yet, the length is not computed
+ // correctly yet. Compile it now and return the right length.
+ if (Compiler::Compile(function, Compiler::KEEP_EXCEPTION)) {
+ length = function->shared()->length();
+ }
+ if (isolate->has_pending_exception()) return MaybeHandle<Smi>();
+ }
+ return handle(Smi::FromInt(length), isolate);
+}
// static
Handle<Context> JSFunction::GetFunctionRealm(Handle<JSFunction> function) {
@@ -5342,8 +5686,6 @@ Maybe<bool> JSObject::DefineOwnPropertyIgnoreAttributes(
ShouldThrow should_throw, AccessorInfoHandling handling) {
it->UpdateProtector();
Handle<JSObject> object = Handle<JSObject>::cast(it->GetReceiver());
- bool is_observed = object->map()->is_observed() &&
- (it->IsElement() || !it->name()->IsPrivate());
for (; it->IsFound(); it->Next()) {
switch (it->state()) {
@@ -5403,14 +5745,6 @@ Maybe<bool> JSObject::DefineOwnPropertyIgnoreAttributes(
it->ReconfigureDataProperty(value, attributes);
}
- if (is_observed) {
- RETURN_ON_EXCEPTION_VALUE(
- it->isolate(),
- EnqueueChangeRecord(object, "reconfigure", it->GetName(),
- it->factory()->the_hole_value()),
- Nothing<bool>());
- }
-
return Just(true);
}
case LookupIterator::INTEGER_INDEXED_EXOTIC:
@@ -5431,20 +5765,8 @@ Maybe<bool> JSObject::DefineOwnPropertyIgnoreAttributes(
}
// Reconfigure the data property if the attributes mismatch.
- Handle<Object> old_value = it->factory()->the_hole_value();
- if (is_observed) old_value = it->GetDataValue();
-
it->ReconfigureDataProperty(value, attributes);
- if (is_observed) {
- if (old_value->SameValue(*value)) {
- old_value = it->factory()->the_hole_value();
- }
- RETURN_ON_EXCEPTION_VALUE(
- it->isolate(), EnqueueChangeRecord(object, "reconfigure",
- it->GetName(), old_value),
- Nothing<bool>());
- }
return Just(true);
}
}
@@ -5457,7 +5779,7 @@ Maybe<bool> JSObject::DefineOwnPropertyIgnoreAttributes(
MaybeHandle<Object> JSObject::SetOwnPropertyIgnoreAttributes(
Handle<JSObject> object, Handle<Name> name, Handle<Object> value,
PropertyAttributes attributes) {
- DCHECK(!value->IsTheHole());
+ DCHECK(!value->IsTheHole(object->GetIsolate()));
LookupIterator it(object, name, object, LookupIterator::OWN);
return DefineOwnPropertyIgnoreAttributes(&it, value, attributes);
}
@@ -5479,73 +5801,11 @@ MaybeHandle<Object> JSObject::DefinePropertyOrElementIgnoreAttributes(
return DefineOwnPropertyIgnoreAttributes(&it, value, attributes);
}
-
Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithInterceptor(
LookupIterator* it) {
- Isolate* isolate = it->isolate();
- // Make sure that the top context does not change when doing
- // callbacks or interceptor calls.
- AssertNoContextChange ncc(isolate);
- HandleScope scope(isolate);
-
- Handle<JSObject> holder = it->GetHolder<JSObject>();
- Handle<InterceptorInfo> interceptor(it->GetInterceptor());
- if (!it->IsElement() && it->name()->IsSymbol() &&
- !interceptor->can_intercept_symbols()) {
- return Just(ABSENT);
- }
- Handle<Object> receiver = it->GetReceiver();
- if (!receiver->IsJSReceiver()) {
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, receiver,
- Object::ConvertReceiver(isolate, receiver),
- Nothing<PropertyAttributes>());
- }
- PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
- *holder, Object::DONT_THROW);
- if (!interceptor->query()->IsUndefined()) {
- Handle<Object> result;
- if (it->IsElement()) {
- uint32_t index = it->index();
- v8::IndexedPropertyQueryCallback query =
- v8::ToCData<v8::IndexedPropertyQueryCallback>(interceptor->query());
- result = args.Call(query, index);
- } else {
- Handle<Name> name = it->name();
- DCHECK(!name->IsPrivate());
- v8::GenericNamedPropertyQueryCallback query =
- v8::ToCData<v8::GenericNamedPropertyQueryCallback>(
- interceptor->query());
- result = args.Call(query, name);
- }
- if (!result.is_null()) {
- int32_t value;
- CHECK(result->ToInt32(&value));
- return Just(static_cast<PropertyAttributes>(value));
- }
- } else if (!interceptor->getter()->IsUndefined()) {
- // TODO(verwaest): Use GetPropertyWithInterceptor?
- Handle<Object> result;
- if (it->IsElement()) {
- uint32_t index = it->index();
- v8::IndexedPropertyGetterCallback getter =
- v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
- result = args.Call(getter, index);
- } else {
- Handle<Name> name = it->name();
- DCHECK(!name->IsPrivate());
- v8::GenericNamedPropertyGetterCallback getter =
- v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
- interceptor->getter());
- result = args.Call(getter, name);
- }
- if (!result.is_null()) return Just(DONT_ENUM);
- }
-
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<PropertyAttributes>());
- return Just(ABSENT);
+ return GetPropertyAttributesWithInterceptorInternal(it, it->GetInterceptor());
}
-
Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes(
LookupIterator* it) {
for (; it->IsFound(); it->Next()) {
@@ -5660,7 +5920,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
// Compute the length of the instance descriptor.
for (int i = 0; i < instance_descriptor_length; i++) {
int index = Smi::cast(iteration_order->get(i))->value();
- DCHECK(dictionary->IsKey(dictionary->KeyAt(index)));
+ DCHECK(dictionary->IsKey(isolate, dictionary->KeyAt(index)));
Object* value = dictionary->ValueAt(index);
PropertyType type = dictionary->DetailsAt(index).type();
@@ -5876,216 +6136,56 @@ static Smi* GenerateIdentityHash(Isolate* isolate) {
return Smi::FromInt(hash_value);
}
+template <typename ProxyType>
+static Smi* GetOrCreateIdentityHashHelper(Isolate* isolate,
+ Handle<ProxyType> proxy) {
+ Object* maybe_hash = proxy->hash();
+ if (maybe_hash->IsSmi()) return Smi::cast(maybe_hash);
-template<typename ProxyType>
-static Handle<Smi> GetOrCreateIdentityHashHelper(Handle<ProxyType> proxy) {
- Isolate* isolate = proxy->GetIsolate();
-
- Handle<Object> maybe_hash(proxy->hash(), isolate);
- if (maybe_hash->IsSmi()) return Handle<Smi>::cast(maybe_hash);
-
- Handle<Smi> hash(GenerateIdentityHash(isolate), isolate);
- proxy->set_hash(*hash);
+ Smi* hash = GenerateIdentityHash(isolate);
+ proxy->set_hash(hash);
return hash;
}
// static
-Handle<Object> JSObject::GetIdentityHash(Isolate* isolate,
- Handle<JSObject> object) {
+Object* JSObject::GetIdentityHash(Isolate* isolate, Handle<JSObject> object) {
if (object->IsJSGlobalProxy()) {
- return handle(JSGlobalProxy::cast(*object)->hash(), isolate);
+ return JSGlobalProxy::cast(*object)->hash();
}
Handle<Name> hash_code_symbol = isolate->factory()->hash_code_symbol();
- return JSReceiver::GetDataProperty(object, hash_code_symbol);
+ return *JSReceiver::GetDataProperty(object, hash_code_symbol);
}
// static
-Handle<Smi> JSObject::GetOrCreateIdentityHash(Handle<JSObject> object) {
+Smi* JSObject::GetOrCreateIdentityHash(Isolate* isolate,
+ Handle<JSObject> object) {
if (object->IsJSGlobalProxy()) {
- return GetOrCreateIdentityHashHelper(Handle<JSGlobalProxy>::cast(object));
+ return GetOrCreateIdentityHashHelper(isolate,
+ Handle<JSGlobalProxy>::cast(object));
}
- Isolate* isolate = object->GetIsolate();
Handle<Name> hash_code_symbol = isolate->factory()->hash_code_symbol();
LookupIterator it(object, hash_code_symbol, object, LookupIterator::OWN);
if (it.IsFound()) {
DCHECK_EQ(LookupIterator::DATA, it.state());
- Handle<Object> maybe_hash = it.GetDataValue();
- if (maybe_hash->IsSmi()) return Handle<Smi>::cast(maybe_hash);
+ Object* maybe_hash = *it.GetDataValue();
+ if (maybe_hash->IsSmi()) return Smi::cast(maybe_hash);
}
- Handle<Smi> hash(GenerateIdentityHash(isolate), isolate);
- CHECK(AddDataProperty(&it, hash, NONE, THROW_ON_ERROR,
+ Smi* hash = GenerateIdentityHash(isolate);
+ CHECK(AddDataProperty(&it, handle(hash, isolate), NONE, THROW_ON_ERROR,
CERTAINLY_NOT_STORE_FROM_KEYED)
.IsJust());
return hash;
}
// static
-Handle<Object> JSProxy::GetIdentityHash(Isolate* isolate,
- Handle<JSProxy> proxy) {
- return handle(proxy->hash(), isolate);
+Object* JSProxy::GetIdentityHash(Handle<JSProxy> proxy) {
+ return proxy->hash();
}
-
-Handle<Smi> JSProxy::GetOrCreateIdentityHash(Handle<JSProxy> proxy) {
- return GetOrCreateIdentityHashHelper(proxy);
-}
-
-
-Object* JSObject::GetHiddenProperty(Handle<Name> key) {
- DisallowHeapAllocation no_gc;
- DCHECK(key->IsUniqueName());
- if (IsJSGlobalProxy()) {
- // For a proxy, use the prototype as target object.
- PrototypeIterator iter(GetIsolate(), this);
- // If the proxy is detached, return undefined.
- if (iter.IsAtEnd()) return GetHeap()->the_hole_value();
- DCHECK(iter.GetCurrent()->IsJSGlobalObject());
- return iter.GetCurrent<JSObject>()->GetHiddenProperty(key);
- }
- DCHECK(!IsJSGlobalProxy());
- Object* inline_value = GetHiddenPropertiesHashTable();
-
- if (inline_value->IsUndefined()) return GetHeap()->the_hole_value();
-
- ObjectHashTable* hashtable = ObjectHashTable::cast(inline_value);
- Object* entry = hashtable->Lookup(key);
- return entry;
-}
-
-
-Handle<Object> JSObject::SetHiddenProperty(Handle<JSObject> object,
- Handle<Name> key,
- Handle<Object> value) {
- Isolate* isolate = object->GetIsolate();
-
- DCHECK(key->IsUniqueName());
- if (object->IsJSGlobalProxy()) {
- // For a proxy, use the prototype as target object.
- PrototypeIterator iter(isolate, object);
- // If the proxy is detached, return undefined.
- if (iter.IsAtEnd()) return isolate->factory()->undefined_value();
- DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
- return SetHiddenProperty(PrototypeIterator::GetCurrent<JSObject>(iter), key,
- value);
- }
- DCHECK(!object->IsJSGlobalProxy());
-
- Handle<Object> inline_value(object->GetHiddenPropertiesHashTable(), isolate);
-
- Handle<ObjectHashTable> hashtable =
- GetOrCreateHiddenPropertiesHashtable(object);
-
- // If it was found, check if the key is already in the dictionary.
- Handle<ObjectHashTable> new_table = ObjectHashTable::Put(hashtable, key,
- value);
- if (*new_table != *hashtable) {
- // If adding the key expanded the dictionary (i.e., Add returned a new
- // dictionary), store it back to the object.
- SetHiddenPropertiesHashTable(object, new_table);
- }
-
- // Return this to mark success.
- return object;
-}
-
-
-void JSObject::DeleteHiddenProperty(Handle<JSObject> object, Handle<Name> key) {
- Isolate* isolate = object->GetIsolate();
- DCHECK(key->IsUniqueName());
-
- if (object->IsJSGlobalProxy()) {
- PrototypeIterator iter(isolate, object);
- if (iter.IsAtEnd()) return;
- DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
- return DeleteHiddenProperty(PrototypeIterator::GetCurrent<JSObject>(iter),
- key);
- }
-
- Object* inline_value = object->GetHiddenPropertiesHashTable();
-
- if (inline_value->IsUndefined()) return;
-
- Handle<ObjectHashTable> hashtable(ObjectHashTable::cast(inline_value));
- bool was_present = false;
- ObjectHashTable::Remove(hashtable, key, &was_present);
-}
-
-
-bool JSObject::HasHiddenProperties(Handle<JSObject> object) {
- Isolate* isolate = object->GetIsolate();
- Handle<Symbol> hidden = isolate->factory()->hidden_properties_symbol();
- LookupIterator it(object, hidden, object);
- Maybe<PropertyAttributes> maybe = GetPropertyAttributes(&it);
- // Cannot get an exception since the hidden_properties_symbol isn't exposed to
- // JS.
- DCHECK(maybe.IsJust());
- return maybe.FromJust() != ABSENT;
-}
-
-
-Object* JSObject::GetHiddenPropertiesHashTable() {
- DCHECK(!IsJSGlobalProxy());
- if (HasFastProperties()) {
- // If the object has fast properties, check whether the first slot
- // in the descriptor array matches the hidden string. Since the
- // hidden strings hash code is zero (and no other name has hash
- // code zero) it will always occupy the first entry if present.
- DescriptorArray* descriptors = this->map()->instance_descriptors();
- if (descriptors->number_of_descriptors() > 0) {
- int sorted_index = descriptors->GetSortedKeyIndex(0);
- if (descriptors->GetKey(sorted_index) ==
- GetHeap()->hidden_properties_symbol() &&
- sorted_index < map()->NumberOfOwnDescriptors()) {
- DCHECK(descriptors->GetType(sorted_index) == DATA);
- DCHECK(descriptors->GetDetails(sorted_index).representation().
- IsCompatibleForLoad(Representation::Tagged()));
- FieldIndex index = FieldIndex::ForDescriptor(this->map(),
- sorted_index);
- return this->RawFastPropertyAt(index);
- } else {
- return GetHeap()->undefined_value();
- }
- } else {
- return GetHeap()->undefined_value();
- }
- } else {
- Isolate* isolate = GetIsolate();
- Handle<Symbol> hidden = isolate->factory()->hidden_properties_symbol();
- Handle<JSObject> receiver(this, isolate);
- LookupIterator it(receiver, hidden, receiver);
- // Access check is always skipped for the hidden string anyways.
- return *GetDataProperty(&it);
- }
-}
-
-Handle<ObjectHashTable> JSObject::GetOrCreateHiddenPropertiesHashtable(
- Handle<JSObject> object) {
- Isolate* isolate = object->GetIsolate();
-
- static const int kInitialCapacity = 4;
- Handle<Object> inline_value(object->GetHiddenPropertiesHashTable(), isolate);
- if (inline_value->IsHashTable()) {
- return Handle<ObjectHashTable>::cast(inline_value);
- }
-
- Handle<ObjectHashTable> hashtable = ObjectHashTable::New(
- isolate, kInitialCapacity, USE_CUSTOM_MINIMUM_CAPACITY);
-
- DCHECK(inline_value->IsUndefined());
- SetHiddenPropertiesHashTable(object, hashtable);
- return hashtable;
-}
-
-
-Handle<Object> JSObject::SetHiddenPropertiesHashTable(Handle<JSObject> object,
- Handle<Object> value) {
- DCHECK(!object->IsJSGlobalProxy());
- Isolate* isolate = object->GetIsolate();
- Handle<Symbol> name = isolate->factory()->hidden_properties_symbol();
- SetOwnPropertyIgnoreAttributes(object, name, value, DONT_ENUM).Assert();
- return object;
+Smi* JSProxy::GetOrCreateIdentityHash(Isolate* isolate, Handle<JSProxy> proxy) {
+ return GetOrCreateIdentityHashHelper(isolate, proxy);
}
@@ -6098,7 +6198,7 @@ Maybe<bool> JSObject::DeletePropertyWithInterceptor(LookupIterator* it,
DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
Handle<InterceptorInfo> interceptor(it->GetInterceptor());
- if (interceptor->deleter()->IsUndefined()) return Nothing<bool>();
+ if (interceptor->deleter()->IsUndefined(isolate)) return Nothing<bool>();
Handle<JSObject> holder = it->GetHolder<JSObject>();
Handle<Object> receiver = it->GetReceiver();
@@ -6132,7 +6232,7 @@ Maybe<bool> JSObject::DeletePropertyWithInterceptor(LookupIterator* it,
DCHECK(result->IsBoolean());
// Rebox CustomArguments::kReturnValueOffset before returning.
- return Just(result->IsTrue());
+ return Just(result->IsTrue(isolate));
}
@@ -6149,9 +6249,8 @@ void JSReceiver::DeleteNormalizedProperty(Handle<JSReceiver> object,
auto cell = PropertyCell::InvalidateEntry(dictionary, entry);
cell->set_value(isolate->heap()->the_hole_value());
- // TODO(ishell): InvalidateForDelete
cell->set_property_details(
- cell->property_details().set_cell_type(PropertyCellType::kInvalidated));
+ PropertyDetails::Empty(PropertyCellType::kUninitialized));
} else {
Handle<NameDictionary> dictionary(object->property_dictionary());
DCHECK_NE(NameDictionary::kNotFound, entry);
@@ -6185,11 +6284,6 @@ Maybe<bool> JSReceiver::DeleteProperty(LookupIterator* it,
}
Handle<JSObject> receiver = Handle<JSObject>::cast(it->GetReceiver());
- bool is_observed = receiver->map()->is_observed() &&
- (it->IsElement() || !it->name()->IsPrivate());
-
- Handle<Object> old_value = it->factory()->the_hole_value();
-
for (; it->IsFound(); it->Next()) {
switch (it->state()) {
case LookupIterator::JSPROXY:
@@ -6217,10 +6311,6 @@ Maybe<bool> JSReceiver::DeleteProperty(LookupIterator* it,
case LookupIterator::INTEGER_INDEXED_EXOTIC:
return Just(true);
case LookupIterator::DATA:
- if (is_observed) {
- old_value = it->GetDataValue();
- }
- // Fall through.
case LookupIterator::ACCESSOR: {
if (!it->IsConfigurable()) {
// Fail if the property is not configurable.
@@ -6235,13 +6325,6 @@ Maybe<bool> JSReceiver::DeleteProperty(LookupIterator* it,
it->Delete();
- if (is_observed) {
- RETURN_ON_EXCEPTION_VALUE(
- isolate, JSObject::EnqueueChangeRecord(receiver, "delete",
- it->GetName(), old_value),
- Nothing<bool>());
- }
-
return Just(true);
}
}
@@ -6254,7 +6337,7 @@ Maybe<bool> JSReceiver::DeleteProperty(LookupIterator* it,
Maybe<bool> JSReceiver::DeleteElement(Handle<JSReceiver> object, uint32_t index,
LanguageMode language_mode) {
LookupIterator it(object->GetIsolate(), object, index, object,
- LookupIterator::HIDDEN);
+ LookupIterator::OWN);
return DeleteProperty(&it, language_mode);
}
@@ -6262,7 +6345,7 @@ Maybe<bool> JSReceiver::DeleteElement(Handle<JSReceiver> object, uint32_t index,
Maybe<bool> JSReceiver::DeleteProperty(Handle<JSReceiver> object,
Handle<Name> name,
LanguageMode language_mode) {
- LookupIterator it(object, name, object, LookupIterator::HIDDEN);
+ LookupIterator it(object, name, object, LookupIterator::OWN);
return DeleteProperty(&it, language_mode);
}
@@ -6271,13 +6354,15 @@ Maybe<bool> JSReceiver::DeletePropertyOrElement(Handle<JSReceiver> object,
Handle<Name> name,
LanguageMode language_mode) {
LookupIterator it = LookupIterator::PropertyOrElement(
- name->GetIsolate(), object, name, object, LookupIterator::HIDDEN);
+ name->GetIsolate(), object, name, object, LookupIterator::OWN);
return DeleteProperty(&it, language_mode);
}
// ES6 7.1.14
-MaybeHandle<Object> ToPropertyKey(Isolate* isolate, Handle<Object> value) {
+// static
+MaybeHandle<Object> Object::ToPropertyKey(Isolate* isolate,
+ Handle<Object> value) {
// 1. Let key be ToPrimitive(argument, hint String).
MaybeHandle<Object> maybe_key =
Object::ToPrimitive(value, ToPrimitiveHint::kString);
@@ -6348,16 +6433,15 @@ MaybeHandle<Object> JSReceiver::DefineProperties(Isolate* isolate,
// 2. Let props be ToObject(Properties).
// 3. ReturnIfAbrupt(props).
Handle<JSReceiver> props;
- if (!Object::ToObject(isolate, properties).ToHandle(&props)) {
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kUndefinedOrNullToObject),
- Object);
- }
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, props,
+ Object::ToObject(isolate, properties), Object);
+
// 4. Let keys be props.[[OwnPropertyKeys]]().
// 5. ReturnIfAbrupt(keys).
Handle<FixedArray> keys;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, keys, JSReceiver::GetKeys(props, OWN_ONLY, ALL_PROPERTIES),
+ isolate, keys, KeyAccumulator::GetKeys(props, KeyCollectionMode::kOwnOnly,
+ ALL_PROPERTIES),
Object);
// 6. Let descriptors be an empty List.
int capacity = keys->length();
@@ -6370,7 +6454,7 @@ MaybeHandle<Object> JSReceiver::DefineProperties(Isolate* isolate,
// 7b. ReturnIfAbrupt(propDesc).
bool success = false;
LookupIterator it = LookupIterator::PropertyOrElement(
- isolate, props, next_key, &success, LookupIterator::HIDDEN);
+ isolate, props, next_key, &success, LookupIterator::OWN);
DCHECK(success);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
if (!maybe.IsJust()) return MaybeHandle<Object>();
@@ -6445,7 +6529,7 @@ Maybe<bool> JSReceiver::OrdinaryDefineOwnProperty(Isolate* isolate,
bool success = false;
DCHECK(key->IsName() || key->IsNumber()); // |key| is a PropertyKey...
LookupIterator it = LookupIterator::PropertyOrElement(
- isolate, object, key, &success, LookupIterator::HIDDEN);
+ isolate, object, key, &success, LookupIterator::OWN);
DCHECK(success); // ...so creating a LookupIterator can't fail.
// Deal with access checks first.
@@ -6990,10 +7074,7 @@ Maybe<bool> JSArray::ArraySetLength(Isolate* isolate, Handle<JSArray> a,
// (Not needed.)
}
// Most of steps 16 through 19 is implemented by JSArray::SetLength.
- if (JSArray::ObservableSetLength(a, new_len).is_null()) {
- DCHECK(isolate->has_pending_exception());
- return Nothing<bool>();
- }
+ JSArray::SetLength(a, new_len);
// Steps 19d-ii, 20.
if (!new_writable) {
PropertyDescriptor readonly;
@@ -7025,7 +7106,7 @@ Maybe<bool> JSProxy::DefineOwnProperty(Isolate* isolate, Handle<JSProxy> proxy,
Handle<Object> key,
PropertyDescriptor* desc,
ShouldThrow should_throw) {
- STACK_CHECK(Nothing<bool>());
+ STACK_CHECK(isolate, Nothing<bool>());
if (key->IsSymbol() && Handle<Symbol>::cast(key)->IsPrivate()) {
return SetPrivateProperty(isolate, proxy, Handle<Symbol>::cast(key), desc,
should_throw);
@@ -7051,7 +7132,7 @@ Maybe<bool> JSProxy::DefineOwnProperty(Isolate* isolate, Handle<JSProxy> proxy,
Object::GetMethod(Handle<JSReceiver>::cast(handler), trap_name),
Nothing<bool>());
// 7. If trap is undefined, then:
- if (trap->IsUndefined()) {
+ if (trap->IsUndefined(isolate)) {
// 7a. Return target.[[DefineOwnProperty]](P, Desc).
return JSReceiver::DefineOwnProperty(isolate, target, key, desc,
should_throw);
@@ -7175,7 +7256,7 @@ Maybe<bool> JSReceiver::GetOwnPropertyDescriptor(Isolate* isolate,
bool success = false;
DCHECK(key->IsName() || key->IsNumber()); // |key| is a PropertyKey...
LookupIterator it = LookupIterator::PropertyOrElement(
- isolate, object, key, &success, LookupIterator::HIDDEN);
+ isolate, object, key, &success, LookupIterator::OWN);
DCHECK(success); // ...so creating a LookupIterator can't fail.
return GetOwnPropertyDescriptor(&it, desc);
}
@@ -7246,7 +7327,7 @@ Maybe<bool> JSProxy::GetOwnPropertyDescriptor(Isolate* isolate,
Handle<Name> name,
PropertyDescriptor* desc) {
DCHECK(!name->IsPrivate());
- STACK_CHECK(Nothing<bool>());
+ STACK_CHECK(isolate, Nothing<bool>());
Handle<String> trap_name =
isolate->factory()->getOwnPropertyDescriptor_string();
@@ -7269,7 +7350,7 @@ Maybe<bool> JSProxy::GetOwnPropertyDescriptor(Isolate* isolate,
Object::GetMethod(Handle<JSReceiver>::cast(handler), trap_name),
Nothing<bool>());
// 7. If trap is undefined, then
- if (trap->IsUndefined()) {
+ if (trap->IsUndefined(isolate)) {
// 7a. Return target.[[GetOwnProperty]](P).
return JSReceiver::GetOwnPropertyDescriptor(isolate, target, name, desc);
}
@@ -7282,7 +7363,8 @@ Maybe<bool> JSProxy::GetOwnPropertyDescriptor(Isolate* isolate,
Nothing<bool>());
// 9. If Type(trapResultObj) is neither Object nor Undefined, throw a
// TypeError exception.
- if (!trap_result_obj->IsJSReceiver() && !trap_result_obj->IsUndefined()) {
+ if (!trap_result_obj->IsJSReceiver() &&
+ !trap_result_obj->IsUndefined(isolate)) {
isolate->Throw(*isolate->factory()->NewTypeError(
MessageTemplate::kProxyGetOwnPropertyDescriptorInvalid, name));
return Nothing<bool>();
@@ -7293,7 +7375,7 @@ Maybe<bool> JSProxy::GetOwnPropertyDescriptor(Isolate* isolate,
JSReceiver::GetOwnPropertyDescriptor(isolate, target, name, &target_desc);
MAYBE_RETURN(found, Nothing<bool>());
// 11. If trapResultObj is undefined, then
- if (trap_result_obj->IsUndefined()) {
+ if (trap_result_obj->IsUndefined(isolate)) {
// 11a. If targetDesc is undefined, return undefined.
if (!found.FromJust()) return Just(false);
// 11b. If targetDesc.[[Configurable]] is false, throw a TypeError
@@ -7358,19 +7440,20 @@ Maybe<bool> JSProxy::GetOwnPropertyDescriptor(Isolate* isolate,
bool JSObject::ReferencesObjectFromElements(FixedArray* elements,
ElementsKind kind,
Object* object) {
+ Isolate* isolate = elements->GetIsolate();
if (IsFastObjectElementsKind(kind) || kind == FAST_STRING_WRAPPER_ELEMENTS) {
int length = IsJSArray()
? Smi::cast(JSArray::cast(this)->length())->value()
: elements->length();
for (int i = 0; i < length; ++i) {
Object* element = elements->get(i);
- if (!element->IsTheHole() && element == object) return true;
+ if (!element->IsTheHole(isolate) && element == object) return true;
}
} else {
DCHECK(kind == DICTIONARY_ELEMENTS || kind == SLOW_STRING_WRAPPER_ELEMENTS);
Object* key =
SeededNumberDictionary::cast(elements)->SlowReverseLookup(object);
- if (!key->IsUndefined()) return true;
+ if (!key->IsUndefined(isolate)) return true;
}
return false;
}
@@ -7394,7 +7477,7 @@ bool JSObject::ReferencesObject(Object* obj) {
// Check if the object is among the named properties.
Object* key = SlowReverseLookup(obj);
- if (!key->IsUndefined()) {
+ if (!key->IsUndefined(heap->isolate())) {
return true;
}
@@ -7432,7 +7515,7 @@ bool JSObject::ReferencesObject(Object* obj) {
int length = parameter_map->length();
for (int i = 2; i < length; ++i) {
Object* value = parameter_map->get(i);
- if (!value->IsTheHole() && value == obj) return true;
+ if (!value->IsTheHole(heap->isolate()) && value == obj) return true;
}
// Check the arguments.
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
@@ -7500,8 +7583,7 @@ Maybe<bool> JSReceiver::SetIntegrityLevel(Handle<JSReceiver> receiver,
if (receiver->IsJSObject()) {
Handle<JSObject> object = Handle<JSObject>::cast(receiver);
- if (!object->HasSloppyArgumentsElements() &&
- !object->map()->is_observed()) { // Fast path.
+ if (!object->HasSloppyArgumentsElements()) { // Fast path.
if (level == SEALED) {
return JSObject::PreventExtensionsWithTransition<SEALED>(object,
should_throw);
@@ -7605,7 +7687,7 @@ Maybe<bool> JSReceiver::PreventExtensions(Handle<JSReceiver> object,
Maybe<bool> JSProxy::PreventExtensions(Handle<JSProxy> proxy,
ShouldThrow should_throw) {
Isolate* isolate = proxy->GetIsolate();
- STACK_CHECK(Nothing<bool>());
+ STACK_CHECK(isolate, Nothing<bool>());
Factory* factory = isolate->factory();
Handle<String> trap_name = factory->preventExtensions_string();
@@ -7620,7 +7702,7 @@ Maybe<bool> JSProxy::PreventExtensions(Handle<JSProxy> proxy,
Handle<Object> trap;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, trap, Object::GetMethod(handler, trap_name), Nothing<bool>());
- if (trap->IsUndefined()) {
+ if (trap->IsUndefined(isolate)) {
return JSReceiver::PreventExtensions(target, should_throw);
}
@@ -7652,7 +7734,7 @@ Maybe<bool> JSObject::PreventExtensions(Handle<JSObject> object,
ShouldThrow should_throw) {
Isolate* isolate = object->GetIsolate();
- if (!object->HasSloppyArgumentsElements() && !object->map()->is_observed()) {
+ if (!object->HasSloppyArgumentsElements()) {
return PreventExtensionsWithTransition<NONE>(object, should_throw);
}
@@ -7693,13 +7775,6 @@ Maybe<bool> JSObject::PreventExtensions(Handle<JSObject> object,
JSObject::MigrateToMap(object, new_map);
DCHECK(!object->map()->is_extensible());
- if (object->map()->is_observed()) {
- RETURN_ON_EXCEPTION_VALUE(
- isolate,
- EnqueueChangeRecord(object, "preventExtensions", Handle<Name>(),
- isolate->factory()->the_hole_value()),
- Nothing<bool>());
- }
return Just(true);
}
@@ -7714,7 +7789,7 @@ Maybe<bool> JSReceiver::IsExtensible(Handle<JSReceiver> object) {
Maybe<bool> JSProxy::IsExtensible(Handle<JSProxy> proxy) {
Isolate* isolate = proxy->GetIsolate();
- STACK_CHECK(Nothing<bool>());
+ STACK_CHECK(isolate, Nothing<bool>());
Factory* factory = isolate->factory();
Handle<String> trap_name = factory->isExtensible_string();
@@ -7729,7 +7804,7 @@ Maybe<bool> JSProxy::IsExtensible(Handle<JSProxy> proxy) {
Handle<Object> trap;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, trap, Object::GetMethod(handler, trap_name), Nothing<bool>());
- if (trap->IsUndefined()) {
+ if (trap->IsUndefined(isolate)) {
return JSReceiver::IsExtensible(target);
}
@@ -7773,9 +7848,10 @@ template <typename Dictionary>
static void ApplyAttributesToDictionary(Dictionary* dictionary,
const PropertyAttributes attributes) {
int capacity = dictionary->Capacity();
+ Isolate* isolate = dictionary->GetIsolate();
for (int i = 0; i < capacity; i++) {
Object* k = dictionary->KeyAt(i);
- if (dictionary->IsKey(k) &&
+ if (dictionary->IsKey(isolate, k) &&
!(k->IsSymbol() && Symbol::cast(k)->is_private())) {
PropertyDetails details = dictionary->DetailsAt(i);
int attrs = attributes;
@@ -7800,7 +7876,6 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
// Sealing/freezing sloppy arguments should be handled elsewhere.
DCHECK(!object->HasSloppyArgumentsElements());
- DCHECK(!object->map()->is_observed());
Isolate* isolate = object->GetIsolate();
if (object->IsAccessCheckNeeded() &&
@@ -7919,28 +7994,6 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
}
-void JSObject::SetObserved(Handle<JSObject> object) {
- DCHECK(!object->IsJSGlobalProxy());
- DCHECK(!object->IsJSGlobalObject());
- Isolate* isolate = object->GetIsolate();
- Handle<Map> new_map;
- Handle<Map> old_map(object->map(), isolate);
- DCHECK(!old_map->is_observed());
- Map* transition = TransitionArray::SearchSpecial(
- *old_map, isolate->heap()->observed_symbol());
- if (transition != NULL) {
- new_map = handle(transition, isolate);
- DCHECK(new_map->is_observed());
- } else if (TransitionArray::CanHaveMoreTransitions(old_map)) {
- new_map = Map::CopyForObserved(old_map);
- } else {
- new_map = Map::Copy(old_map, "SlowObserved");
- new_map->set_is_observed();
- }
- JSObject::MigrateToMap(object, new_map);
-}
-
-
Handle<Object> JSObject::FastPropertyAt(Handle<JSObject> object,
Representation representation,
FieldIndex index) {
@@ -8069,9 +8122,8 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
// an array.
PropertyFilter filter = static_cast<PropertyFilter>(
ONLY_WRITABLE | ONLY_ENUMERABLE | ONLY_CONFIGURABLE);
- KeyAccumulator accumulator(isolate, OWN_ONLY, filter);
- accumulator.NextPrototype();
- copy->CollectOwnPropertyNames(&accumulator, filter);
+ KeyAccumulator accumulator(isolate, KeyCollectionMode::kOwnOnly, filter);
+ accumulator.CollectOwnPropertyNames(copy, copy);
Handle<FixedArray> names = accumulator.GetKeys();
for (int i = 0; i < names->length(); i++) {
DCHECK(names->get(i)->IsName());
@@ -8126,7 +8178,7 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
int capacity = element_dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
Object* k = element_dictionary->KeyAt(i);
- if (element_dictionary->IsKey(k)) {
+ if (element_dictionary->IsKey(isolate, k)) {
Handle<Object> value(element_dictionary->ValueAt(i), isolate);
if (value->IsJSObject()) {
Handle<JSObject> result;
@@ -8205,19 +8257,9 @@ MaybeHandle<Object> JSReceiver::ToPrimitive(Handle<JSReceiver> receiver,
ASSIGN_RETURN_ON_EXCEPTION(
isolate, exotic_to_prim,
GetMethod(receiver, isolate->factory()->to_primitive_symbol()), Object);
- if (!exotic_to_prim->IsUndefined()) {
- Handle<Object> hint_string;
- switch (hint) {
- case ToPrimitiveHint::kDefault:
- hint_string = isolate->factory()->default_string();
- break;
- case ToPrimitiveHint::kNumber:
- hint_string = isolate->factory()->number_string();
- break;
- case ToPrimitiveHint::kString:
- hint_string = isolate->factory()->string_string();
- break;
- }
+ if (!exotic_to_prim->IsUndefined(isolate)) {
+ Handle<Object> hint_string =
+ isolate->factory()->ToPrimitiveHintString(hint);
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, result,
@@ -8335,28 +8377,6 @@ bool JSObject::HasEnumerableElements() {
return true;
}
-// Tests for the fast common case for property enumeration:
-// - This object and all prototypes has an enum cache (which means that
-// it is no proxy, has no interceptors and needs no access checks).
-// - This object has no elements.
-// - No prototype has enumerable properties/elements.
-bool JSReceiver::IsSimpleEnum() {
- for (PrototypeIterator iter(GetIsolate(), this,
- PrototypeIterator::START_AT_RECEIVER);
- !iter.IsAtEnd(); iter.Advance()) {
- if (!iter.GetCurrent()->IsJSObject()) return false;
- JSObject* current = iter.GetCurrent<JSObject>();
- int enum_length = current->map()->EnumLength();
- if (enum_length == kInvalidEnumCacheSentinel) return false;
- if (current->IsAccessCheckNeeded()) return false;
- DCHECK(!current->HasNamedInterceptor());
- DCHECK(!current->HasIndexedInterceptor());
- if (current->HasEnumerableElements()) return false;
- if (current != this && enum_length != 0) return false;
- }
- return true;
-}
-
int Map::NumberOfDescribedProperties(DescriptorFlag which,
PropertyFilter filter) {
@@ -8390,447 +8410,14 @@ int Map::NextFreePropertyIndex() {
}
-static bool ContainsOnlyValidKeys(Handle<FixedArray> array) {
- int len = array->length();
- for (int i = 0; i < len; i++) {
- Object* e = array->get(i);
- if (!(e->IsName() || e->IsNumber())) return false;
- }
- return true;
-}
-
-
-static Handle<FixedArray> ReduceFixedArrayTo(
- Handle<FixedArray> array, int length) {
- DCHECK_LE(length, array->length());
- if (array->length() == length) return array;
- return array->GetIsolate()->factory()->CopyFixedArrayUpTo(array, length);
-}
-
bool Map::OnlyHasSimpleProperties() {
// Wrapped string elements aren't explicitly stored in the elements backing
// store, but are loaded indirectly from the underlying string.
return !IsStringWrapperElementsKind(elements_kind()) &&
- (instance_type() > LAST_SPECIAL_RECEIVER_TYPE &&
- instance_type() != JS_GLOBAL_PROXY_TYPE) &&
+ instance_type() > LAST_SPECIAL_RECEIVER_TYPE &&
!has_hidden_prototype() && !is_dictionary_map();
}
-// static
-Handle<FixedArray> JSObject::GetFastEnumPropertyKeys(Isolate* isolate,
- Handle<JSObject> object) {
- Handle<Map> map(object->map());
- bool cache_enum_length = map->OnlyHasSimpleProperties();
-
- Handle<DescriptorArray> descs =
- Handle<DescriptorArray>(map->instance_descriptors(), isolate);
- int own_property_count = map->EnumLength();
- // If the enum length of the given map is set to kInvalidEnumCache, this
- // means that the map itself has never used the present enum cache. The
- // first step to using the cache is to set the enum length of the map by
- // counting the number of own descriptors that are ENUMERABLE_STRINGS.
- if (own_property_count == kInvalidEnumCacheSentinel) {
- own_property_count =
- map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS);
- } else {
- DCHECK(
- own_property_count ==
- map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS));
- }
-
- if (descs->HasEnumCache()) {
- Handle<FixedArray> keys(descs->GetEnumCache(), isolate);
- // In case the number of properties required in the enum are actually
- // present, we can reuse the enum cache. Otherwise, this means that the
- // enum cache was generated for a previous (smaller) version of the
- // Descriptor Array. In that case we regenerate the enum cache.
- if (own_property_count <= keys->length()) {
- isolate->counters()->enum_cache_hits()->Increment();
- if (cache_enum_length) map->SetEnumLength(own_property_count);
- return ReduceFixedArrayTo(keys, own_property_count);
- }
- }
-
- if (descs->IsEmpty()) {
- isolate->counters()->enum_cache_hits()->Increment();
- if (cache_enum_length) map->SetEnumLength(0);
- return isolate->factory()->empty_fixed_array();
- }
-
- isolate->counters()->enum_cache_misses()->Increment();
-
- Handle<FixedArray> storage =
- isolate->factory()->NewFixedArray(own_property_count);
- Handle<FixedArray> indices =
- isolate->factory()->NewFixedArray(own_property_count);
-
- int size = map->NumberOfOwnDescriptors();
- int index = 0;
-
- for (int i = 0; i < size; i++) {
- PropertyDetails details = descs->GetDetails(i);
- if (details.IsDontEnum()) continue;
- Object* key = descs->GetKey(i);
- if (key->IsSymbol()) continue;
- storage->set(index, key);
- if (!indices.is_null()) {
- if (details.type() != DATA) {
- indices = Handle<FixedArray>();
- } else {
- FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
- int load_by_field_index = field_index.GetLoadByFieldIndex();
- indices->set(index, Smi::FromInt(load_by_field_index));
- }
- }
- index++;
- }
- DCHECK(index == storage->length());
-
- DescriptorArray::SetEnumCache(descs, isolate, storage, indices);
- if (cache_enum_length) {
- map->SetEnumLength(own_property_count);
- }
- return storage;
-}
-
-
-Handle<FixedArray> JSObject::GetEnumPropertyKeys(Handle<JSObject> object) {
- Isolate* isolate = object->GetIsolate();
- if (object->HasFastProperties()) {
- return GetFastEnumPropertyKeys(isolate, object);
- } else if (object->IsJSGlobalObject()) {
- Handle<GlobalDictionary> dictionary(object->global_dictionary());
- int length = dictionary->NumberOfEnumElements();
- if (length == 0) {
- return isolate->factory()->empty_fixed_array();
- }
- Handle<FixedArray> storage = isolate->factory()->NewFixedArray(length);
- dictionary->CopyEnumKeysTo(*storage);
- return storage;
- } else {
- Handle<NameDictionary> dictionary(object->property_dictionary());
- int length = dictionary->NumberOfEnumElements();
- if (length == 0) {
- return isolate->factory()->empty_fixed_array();
- }
- Handle<FixedArray> storage = isolate->factory()->NewFixedArray(length);
- dictionary->CopyEnumKeysTo(*storage);
- return storage;
- }
-}
-
-
-enum IndexedOrNamed { kIndexed, kNamed };
-
-
-// Returns |true| on success, |nothing| on exception.
-template <class Callback, IndexedOrNamed type>
-static Maybe<bool> GetKeysFromInterceptor(Isolate* isolate,
- Handle<JSReceiver> receiver,
- Handle<JSObject> object,
- PropertyFilter filter,
- KeyAccumulator* accumulator) {
- if (type == kIndexed) {
- if (!object->HasIndexedInterceptor()) return Just(true);
- } else {
- if (!object->HasNamedInterceptor()) return Just(true);
- }
- Handle<InterceptorInfo> interceptor(type == kIndexed
- ? object->GetIndexedInterceptor()
- : object->GetNamedInterceptor(),
- isolate);
- if ((filter & ONLY_ALL_CAN_READ) && !interceptor->all_can_read()) {
- return Just(true);
- }
- PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
- *object, Object::DONT_THROW);
- Handle<JSObject> result;
- if (!interceptor->enumerator()->IsUndefined()) {
- Callback enum_fun = v8::ToCData<Callback>(interceptor->enumerator());
- const char* log_tag = type == kIndexed ? "interceptor-indexed-enum"
- : "interceptor-named-enum";
- LOG(isolate, ApiObjectAccess(log_tag, *object));
- result = args.Call(enum_fun);
- }
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
- if (result.is_null()) return Just(true);
- DCHECK(result->IsJSArray() || result->HasSloppyArgumentsElements());
- // The accumulator takes care of string/symbol filtering.
- if (type == kIndexed) {
- accumulator->AddElementKeysFromInterceptor(result);
- } else {
- accumulator->AddKeys(result, DO_NOT_CONVERT);
- }
- return Just(true);
-}
-
-
-// Returns |true| on success, |false| if prototype walking should be stopped,
-// |nothing| if an exception was thrown.
-static Maybe<bool> GetKeysFromJSObject(Isolate* isolate,
- Handle<JSReceiver> receiver,
- Handle<JSObject> object,
- PropertyFilter* filter,
- KeyCollectionType type,
- KeyAccumulator* accumulator) {
- accumulator->NextPrototype();
- // Check access rights if required.
- if (object->IsAccessCheckNeeded() &&
- !isolate->MayAccess(handle(isolate->context()), object)) {
- // The cross-origin spec says that [[Enumerate]] shall return an empty
- // iterator when it doesn't have access...
- if (type == INCLUDE_PROTOS) {
- return Just(false);
- }
- // ...whereas [[OwnPropertyKeys]] shall return whitelisted properties.
- DCHECK_EQ(OWN_ONLY, type);
- *filter = static_cast<PropertyFilter>(*filter | ONLY_ALL_CAN_READ);
- }
-
- JSObject::CollectOwnElementKeys(object, accumulator, *filter);
-
- // Add the element keys from the interceptor.
- Maybe<bool> success =
- GetKeysFromInterceptor<v8::IndexedPropertyEnumeratorCallback, kIndexed>(
- isolate, receiver, object, *filter, accumulator);
- MAYBE_RETURN(success, Nothing<bool>());
-
- if (*filter == ENUMERABLE_STRINGS) {
- Handle<FixedArray> enum_keys = JSObject::GetEnumPropertyKeys(object);
- accumulator->AddKeys(enum_keys, DO_NOT_CONVERT);
- } else {
- object->CollectOwnPropertyNames(accumulator, *filter);
- }
-
- // Add the property keys from the interceptor.
- success = GetKeysFromInterceptor<v8::GenericNamedPropertyEnumeratorCallback,
- kNamed>(isolate, receiver, object, *filter,
- accumulator);
- MAYBE_RETURN(success, Nothing<bool>());
- return Just(true);
-}
-
-
-// Helper function for JSReceiver::GetKeys() below. Can be called recursively.
-// Returns |true| or |nothing|.
-static Maybe<bool> GetKeys_Internal(Isolate* isolate,
- Handle<JSReceiver> receiver,
- Handle<JSReceiver> object,
- KeyCollectionType type,
- PropertyFilter filter,
- KeyAccumulator* accumulator) {
- // Proxies have no hidden prototype and we should not trigger the
- // [[GetPrototypeOf]] trap on the last iteration when using
- // AdvanceFollowingProxies.
- if (type == OWN_ONLY && object->IsJSProxy()) {
- MAYBE_RETURN(JSProxy::OwnPropertyKeys(isolate, receiver,
- Handle<JSProxy>::cast(object), filter,
- accumulator),
- Nothing<bool>());
- return Just(true);
- }
-
- PrototypeIterator::WhereToEnd end = type == OWN_ONLY
- ? PrototypeIterator::END_AT_NON_HIDDEN
- : PrototypeIterator::END_AT_NULL;
- for (PrototypeIterator iter(isolate, object,
- PrototypeIterator::START_AT_RECEIVER, end);
- !iter.IsAtEnd();) {
- Handle<JSReceiver> current =
- PrototypeIterator::GetCurrent<JSReceiver>(iter);
- Maybe<bool> result = Just(false); // Dummy initialization.
- if (current->IsJSProxy()) {
- result = JSProxy::OwnPropertyKeys(isolate, receiver,
- Handle<JSProxy>::cast(current), filter,
- accumulator);
- } else {
- DCHECK(current->IsJSObject());
- result = GetKeysFromJSObject(isolate, receiver,
- Handle<JSObject>::cast(current), &filter,
- type, accumulator);
- }
- MAYBE_RETURN(result, Nothing<bool>());
- if (!result.FromJust()) break; // |false| means "stop iterating".
- // Iterate through proxies but ignore access checks for the ALL_CAN_READ
- // case on API objects for OWN_ONLY keys handlede in GgetKeysFromJSObject.
- if (!iter.AdvanceFollowingProxiesIgnoringAccessChecks()) {
- return Nothing<bool>();
- }
- }
- return Just(true);
-}
-
-
-// ES6 9.5.12
-// Returns |true| on success, |nothing| in case of exception.
-// static
-Maybe<bool> JSProxy::OwnPropertyKeys(Isolate* isolate,
- Handle<JSReceiver> receiver,
- Handle<JSProxy> proxy,
- PropertyFilter filter,
- KeyAccumulator* accumulator) {
- STACK_CHECK(Nothing<bool>());
- // 1. Let handler be the value of the [[ProxyHandler]] internal slot of O.
- Handle<Object> handler(proxy->handler(), isolate);
- // 2. If handler is null, throw a TypeError exception.
- // 3. Assert: Type(handler) is Object.
- if (proxy->IsRevoked()) {
- isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kProxyRevoked, isolate->factory()->ownKeys_string()));
- return Nothing<bool>();
- }
- // 4. Let target be the value of the [[ProxyTarget]] internal slot of O.
- Handle<JSReceiver> target(proxy->target(), isolate);
- // 5. Let trap be ? GetMethod(handler, "ownKeys").
- Handle<Object> trap;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, trap, Object::GetMethod(Handle<JSReceiver>::cast(handler),
- isolate->factory()->ownKeys_string()),
- Nothing<bool>());
- // 6. If trap is undefined, then
- if (trap->IsUndefined()) {
- // 6a. Return target.[[OwnPropertyKeys]]().
- return GetKeys_Internal(isolate, receiver, target, OWN_ONLY, filter,
- accumulator);
- }
- // 7. Let trapResultArray be Call(trap, handler, «target»).
- Handle<Object> trap_result_array;
- Handle<Object> args[] = {target};
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, trap_result_array,
- Execution::Call(isolate, trap, handler, arraysize(args), args),
- Nothing<bool>());
- // 8. Let trapResult be ? CreateListFromArrayLike(trapResultArray,
- // «String, Symbol»).
- Handle<FixedArray> trap_result;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, trap_result,
- Object::CreateListFromArrayLike(isolate, trap_result_array,
- ElementTypes::kStringAndSymbol),
- Nothing<bool>());
- // 9. Let extensibleTarget be ? IsExtensible(target).
- Maybe<bool> maybe_extensible = JSReceiver::IsExtensible(target);
- MAYBE_RETURN(maybe_extensible, Nothing<bool>());
- bool extensible_target = maybe_extensible.FromJust();
- // 10. Let targetKeys be ? target.[[OwnPropertyKeys]]().
- Handle<FixedArray> target_keys;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, target_keys,
- JSReceiver::OwnPropertyKeys(target),
- Nothing<bool>());
- // 11. (Assert)
- // 12. Let targetConfigurableKeys be an empty List.
- // To save memory, we're re-using target_keys and will modify it in-place.
- Handle<FixedArray> target_configurable_keys = target_keys;
- // 13. Let targetNonconfigurableKeys be an empty List.
- Handle<FixedArray> target_nonconfigurable_keys =
- isolate->factory()->NewFixedArray(target_keys->length());
- int nonconfigurable_keys_length = 0;
- // 14. Repeat, for each element key of targetKeys:
- for (int i = 0; i < target_keys->length(); ++i) {
- // 14a. Let desc be ? target.[[GetOwnProperty]](key).
- PropertyDescriptor desc;
- Maybe<bool> found = JSReceiver::GetOwnPropertyDescriptor(
- isolate, target, handle(target_keys->get(i), isolate), &desc);
- MAYBE_RETURN(found, Nothing<bool>());
- // 14b. If desc is not undefined and desc.[[Configurable]] is false, then
- if (found.FromJust() && !desc.configurable()) {
- // 14b i. Append key as an element of targetNonconfigurableKeys.
- target_nonconfigurable_keys->set(nonconfigurable_keys_length,
- target_keys->get(i));
- nonconfigurable_keys_length++;
- // The key was moved, null it out in the original list.
- target_keys->set(i, Smi::FromInt(0));
- } else {
- // 14c. Else,
- // 14c i. Append key as an element of targetConfigurableKeys.
- // (No-op, just keep it in |target_keys|.)
- }
- }
- accumulator->NextPrototype(); // Prepare for accumulating keys.
- // 15. If extensibleTarget is true and targetNonconfigurableKeys is empty,
- // then:
- if (extensible_target && nonconfigurable_keys_length == 0) {
- // 15a. Return trapResult.
- return accumulator->AddKeysFromProxy(proxy, trap_result);
- }
- // 16. Let uncheckedResultKeys be a new List which is a copy of trapResult.
- Zone set_zone(isolate->allocator());
- const int kPresent = 1;
- const int kGone = 0;
- IdentityMap<int> unchecked_result_keys(isolate->heap(), &set_zone);
- int unchecked_result_keys_size = 0;
- for (int i = 0; i < trap_result->length(); ++i) {
- DCHECK(trap_result->get(i)->IsUniqueName());
- Object* key = trap_result->get(i);
- int* entry = unchecked_result_keys.Get(key);
- if (*entry != kPresent) {
- *entry = kPresent;
- unchecked_result_keys_size++;
- }
- }
- // 17. Repeat, for each key that is an element of targetNonconfigurableKeys:
- for (int i = 0; i < nonconfigurable_keys_length; ++i) {
- Object* key = target_nonconfigurable_keys->get(i);
- // 17a. If key is not an element of uncheckedResultKeys, throw a
- // TypeError exception.
- int* found = unchecked_result_keys.Find(key);
- if (found == nullptr || *found == kGone) {
- isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kProxyOwnKeysMissing, handle(key, isolate)));
- return Nothing<bool>();
- }
- // 17b. Remove key from uncheckedResultKeys.
- *found = kGone;
- unchecked_result_keys_size--;
- }
- // 18. If extensibleTarget is true, return trapResult.
- if (extensible_target) {
- return accumulator->AddKeysFromProxy(proxy, trap_result);
- }
- // 19. Repeat, for each key that is an element of targetConfigurableKeys:
- for (int i = 0; i < target_configurable_keys->length(); ++i) {
- Object* key = target_configurable_keys->get(i);
- if (key->IsSmi()) continue; // Zapped entry, was nonconfigurable.
- // 19a. If key is not an element of uncheckedResultKeys, throw a
- // TypeError exception.
- int* found = unchecked_result_keys.Find(key);
- if (found == nullptr || *found == kGone) {
- isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kProxyOwnKeysMissing, handle(key, isolate)));
- return Nothing<bool>();
- }
- // 19b. Remove key from uncheckedResultKeys.
- *found = kGone;
- unchecked_result_keys_size--;
- }
- // 20. If uncheckedResultKeys is not empty, throw a TypeError exception.
- if (unchecked_result_keys_size != 0) {
- DCHECK_GT(unchecked_result_keys_size, 0);
- isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kProxyOwnKeysNonExtensible));
- return Nothing<bool>();
- }
- // 21. Return trapResult.
- return accumulator->AddKeysFromProxy(proxy, trap_result);
-}
-
-MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object,
- KeyCollectionType type,
- PropertyFilter filter,
- GetKeysConversion keys_conversion,
- bool filter_proxy_keys) {
- USE(ContainsOnlyValidKeys);
- Isolate* isolate = object->GetIsolate();
- KeyAccumulator accumulator(isolate, type, filter);
- accumulator.set_filter_proxy_keys(filter_proxy_keys);
- MAYBE_RETURN(
- GetKeys_Internal(isolate, object, object, type, filter, &accumulator),
- MaybeHandle<FixedArray>());
- Handle<FixedArray> keys = accumulator.GetKeys(keys_conversion);
- DCHECK(ContainsOnlyValidKeys(keys));
- return keys;
-}
-
MUST_USE_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
Isolate* isolate, Handle<JSReceiver> receiver, bool get_entries,
Handle<FixedArray>* result) {
@@ -8921,12 +8508,13 @@ MaybeHandle<FixedArray> GetOwnValuesOrEntries(Isolate* isolate,
PropertyFilter key_filter =
static_cast<PropertyFilter>(filter & ~ONLY_ENUMERABLE);
- KeyAccumulator accumulator(isolate, OWN_ONLY, key_filter);
- MAYBE_RETURN(GetKeys_Internal(isolate, object, object, OWN_ONLY, key_filter,
- &accumulator),
- MaybeHandle<FixedArray>());
- Handle<FixedArray> keys = accumulator.GetKeys(CONVERT_TO_STRING);
- DCHECK(ContainsOnlyValidKeys(keys));
+
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, keys,
+ KeyAccumulator::GetKeys(object, KeyCollectionMode::kOwnOnly, key_filter,
+ GetKeysConversion::kConvertToString),
+ MaybeHandle<FixedArray>());
values_or_entries = isolate->factory()->NewFixedArray(keys->length());
int length = 0;
@@ -9011,7 +8599,7 @@ MaybeHandle<Object> JSObject::DefineAccessor(Handle<JSObject> object,
Isolate* isolate = object->GetIsolate();
LookupIterator it = LookupIterator::PropertyOrElement(
- isolate, object, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
+ isolate, object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
return DefineAccessor(&it, getter, setter, attributes);
}
@@ -9039,40 +8627,11 @@ MaybeHandle<Object> JSObject::DefineAccessor(LookupIterator* it,
return it->factory()->undefined_value();
}
- Handle<Object> old_value = isolate->factory()->the_hole_value();
- bool is_observed = object->map()->is_observed() &&
- (it->IsElement() || !it->name()->IsPrivate());
- bool preexists = false;
- if (is_observed) {
- CHECK(GetPropertyAttributes(it).IsJust());
- preexists = it->IsFound();
- if (preexists && (it->state() == LookupIterator::DATA ||
- it->GetAccessors()->IsAccessorInfo())) {
- old_value = Object::GetProperty(it).ToHandleChecked();
- }
- }
-
- DCHECK(getter->IsCallable() || getter->IsUndefined() || getter->IsNull() ||
- getter->IsFunctionTemplateInfo());
- DCHECK(setter->IsCallable() || setter->IsUndefined() || setter->IsNull() ||
- getter->IsFunctionTemplateInfo());
- // At least one of the accessors needs to be a new value.
- DCHECK(!getter->IsNull() || !setter->IsNull());
- if (!getter->IsNull()) {
- it->TransitionToAccessorProperty(ACCESSOR_GETTER, getter, attributes);
- }
- if (!setter->IsNull()) {
- it->TransitionToAccessorProperty(ACCESSOR_SETTER, setter, attributes);
- }
-
- if (is_observed) {
- // Make sure the top context isn't changed.
- AssertNoContextChange ncc(isolate);
- const char* type = preexists ? "reconfigure" : "add";
- RETURN_ON_EXCEPTION(
- isolate, EnqueueChangeRecord(object, type, it->GetName(), old_value),
- Object);
- }
+ DCHECK(getter->IsCallable() || getter->IsUndefined(isolate) ||
+ getter->IsNull(isolate) || getter->IsFunctionTemplateInfo());
+ DCHECK(setter->IsCallable() || setter->IsUndefined(isolate) ||
+ setter->IsNull(isolate) || setter->IsFunctionTemplateInfo());
+ it->TransitionToAccessorProperty(getter, setter, attributes);
return isolate->factory()->undefined_value();
}
@@ -9084,7 +8643,7 @@ MaybeHandle<Object> JSObject::SetAccessor(Handle<JSObject> object,
Handle<Name> name(Name::cast(info->name()), isolate);
LookupIterator it = LookupIterator::PropertyOrElement(
- isolate, object, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
+ isolate, object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
// Duplicate ACCESS_CHECK outside of GetPropertyAttributes for the case that
// the FailedAccessCheckCallbackFunction doesn't throw an exception.
@@ -9118,53 +8677,6 @@ MaybeHandle<Object> JSObject::SetAccessor(Handle<JSObject> object,
return object;
}
-
-MaybeHandle<Object> JSObject::GetAccessor(Handle<JSObject> object,
- Handle<Name> name,
- AccessorComponent component) {
- Isolate* isolate = object->GetIsolate();
-
- // Make sure that the top context does not change when doing callbacks or
- // interceptor calls.
- AssertNoContextChange ncc(isolate);
-
- LookupIterator it = LookupIterator::PropertyOrElement(
- isolate, object, name, LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
-
- for (; it.IsFound(); it.Next()) {
- switch (it.state()) {
- case LookupIterator::INTERCEPTOR:
- case LookupIterator::NOT_FOUND:
- case LookupIterator::TRANSITION:
- UNREACHABLE();
-
- case LookupIterator::ACCESS_CHECK:
- if (it.HasAccess()) continue;
- isolate->ReportFailedAccessCheck(it.GetHolder<JSObject>());
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return isolate->factory()->undefined_value();
-
- case LookupIterator::JSPROXY:
- return isolate->factory()->undefined_value();
-
- case LookupIterator::INTEGER_INDEXED_EXOTIC:
- return isolate->factory()->undefined_value();
- case LookupIterator::DATA:
- continue;
- case LookupIterator::ACCESSOR: {
- Handle<Object> maybe_pair = it.GetAccessors();
- if (maybe_pair->IsAccessorPair()) {
- return AccessorPair::GetComponent(
- Handle<AccessorPair>::cast(maybe_pair), component);
- }
- }
- }
- }
-
- return isolate->factory()->undefined_value();
-}
-
-
Object* JSObject::SlowReverseLookup(Object* value) {
if (HasFastProperties()) {
int number_of_own_descriptors = map()->NumberOfOwnDescriptors();
@@ -9236,7 +8748,8 @@ Handle<Map> Map::Normalize(Handle<Map> fast_map, PropertyNormalizationMode mode,
Isolate* isolate = fast_map->GetIsolate();
Handle<Object> maybe_cache(isolate->native_context()->normalized_map_cache(),
isolate);
- bool use_cache = !fast_map->is_prototype_map() && !maybe_cache->IsUndefined();
+ bool use_cache =
+ !fast_map->is_prototype_map() && !maybe_cache->IsUndefined(isolate);
Handle<NormalizedMapCache> cache;
if (use_cache) cache = Handle<NormalizedMapCache>::cast(maybe_cache);
@@ -9319,6 +8832,17 @@ Handle<Map> Map::CopyNormalized(Handle<Map> map,
return result;
}
+// Return an immutable prototype exotic object version of the input map.
+// Never even try to cache it in the transition tree, as it is intended
+// for the global object and its prototype chain, and excluding it saves
+// memory on the map transition tree.
+
+// static
+Handle<Map> Map::TransitionToImmutableProto(Handle<Map> map) {
+ Handle<Map> new_map = Map::Copy(map, "ImmutablePrototype");
+ new_map->set_immutable_proto(true);
+ return new_map;
+}
Handle<Map> Map::CopyInitialMap(Handle<Map> map, int instance_size,
int in_object_properties,
@@ -9447,7 +8971,7 @@ void Map::TraceAllTransitions(Map* map) {
void Map::ConnectTransition(Handle<Map> parent, Handle<Map> child,
Handle<Name> name, SimpleTransitionFlag flag) {
- if (!parent->GetBackPointer()->IsUndefined()) {
+ if (!parent->GetBackPointer()->IsUndefined(parent->GetIsolate())) {
parent->set_owns_descriptors(false);
} else {
// |parent| is initial map and it must keep the ownership, there must be no
@@ -9646,22 +9170,15 @@ Handle<Map> Map::AsLanguageMode(Handle<Map> initial_map,
// using |strict_function_transition_symbol| as a key.
if (language_mode == SLOPPY) return initial_map;
Isolate* isolate = initial_map->GetIsolate();
- Factory* factory = isolate->factory();
- Handle<Symbol> transition_symbol;
int map_index = Context::FunctionMapIndex(language_mode, kind);
Handle<Map> function_map(
Map::cast(isolate->native_context()->get(map_index)));
- STATIC_ASSERT(LANGUAGE_END == 3);
- switch (language_mode) {
- case STRICT:
- transition_symbol = factory->strict_function_transition_symbol();
- break;
- default:
- UNREACHABLE();
- break;
- }
+ STATIC_ASSERT(LANGUAGE_END == 2);
+ DCHECK_EQ(STRICT, language_mode);
+ Handle<Symbol> transition_symbol =
+ isolate->factory()->strict_function_transition_symbol();
Map* maybe_transition =
TransitionArray::SearchSpecial(*initial_map, *transition_symbol);
if (maybe_transition != NULL) {
@@ -9686,30 +9203,6 @@ Handle<Map> Map::AsLanguageMode(Handle<Map> initial_map,
}
-Handle<Map> Map::CopyForObserved(Handle<Map> map) {
- DCHECK(!map->is_observed());
-
- Isolate* isolate = map->GetIsolate();
-
- bool insert_transition =
- TransitionArray::CanHaveMoreTransitions(map) && !map->is_prototype_map();
-
- if (insert_transition) {
- Handle<Map> new_map = CopyForTransition(map, "CopyForObserved");
- new_map->set_is_observed();
-
- Handle<Name> name = isolate->factory()->observed_symbol();
- ConnectTransition(map, new_map, name, SPECIAL_TRANSITION);
- return new_map;
- }
-
- // Create a new free-floating map only if we are not allowed to store it.
- Handle<Map> new_map = Map::Copy(map, "CopyForObserved");
- new_map->set_is_observed();
- return new_map;
-}
-
-
Handle<Map> Map::CopyForTransition(Handle<Map> map, const char* reason) {
DCHECK(!map->is_prototype_map());
Handle<Map> new_map = CopyDropDescriptors(map);
@@ -9870,6 +9363,17 @@ Handle<Map> Map::TransitionToDataProperty(Handle<Map> map, Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
StoreFromKeyed store_mode) {
+ RuntimeCallTimerScope stats_scope(
+ *map, map->is_prototype_map()
+ ? &RuntimeCallStats::PrototypeMap_TransitionToDataProperty
+ : &RuntimeCallStats::Map_TransitionToDataProperty);
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
+ map->GetIsolate(),
+ (map->is_prototype_map()
+ ? &tracing::TraceEventStatsTable::
+ PrototypeMap_TransitionToDataProperty
+ : &tracing::TraceEventStatsTable::Map_TransitionToDataProperty))
+
DCHECK(name->IsUniqueName());
DCHECK(!map->is_dictionary_map());
@@ -9945,13 +9449,26 @@ Handle<Map> Map::ReconfigureExistingProperty(Handle<Map> map, int descriptor,
return new_map;
}
-Handle<Map> Map::TransitionToAccessorProperty(Handle<Map> map,
+Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
Handle<Name> name, int descriptor,
- AccessorComponent component,
- Handle<Object> accessor,
+ Handle<Object> getter,
+ Handle<Object> setter,
PropertyAttributes attributes) {
+ RuntimeCallTimerScope stats_scope(
+ isolate,
+ map->is_prototype_map()
+ ? &RuntimeCallStats::PrototypeMap_TransitionToAccessorProperty
+ : &RuntimeCallStats::Map_TransitionToAccessorProperty);
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
+ isolate,
+ (map->is_prototype_map()
+ ? &tracing::TraceEventStatsTable::
+ PrototypeMap_TransitionToAccessorProperty
+ : &tracing::TraceEventStatsTable::Map_TransitionToAccessorProperty));
+
+ // At least one of the accessors needs to be a new value.
+ DCHECK(!getter->IsNull(isolate) || !setter->IsNull(isolate));
DCHECK(name->IsUniqueName());
- Isolate* isolate = name->GetIsolate();
// Dictionary maps can always have additional data properties.
if (map->is_dictionary_map()) return map;
@@ -9980,7 +9497,7 @@ Handle<Map> Map::TransitionToAccessorProperty(Handle<Map> map,
}
Handle<AccessorPair> pair = Handle<AccessorPair>::cast(maybe_pair);
- if (pair->get(component) != *accessor) {
+ if (!pair->Equals(*getter, *setter)) {
return Map::Normalize(map, mode, "TransitionToDifferentAccessor");
}
@@ -10007,10 +9524,21 @@ Handle<Map> Map::TransitionToAccessorProperty(Handle<Map> map,
return Map::Normalize(map, mode, "AccessorsOverwritingNonPair");
}
- Object* current = Handle<AccessorPair>::cast(maybe_pair)->get(component);
- if (current == *accessor) return map;
+ Handle<AccessorPair> current_pair = Handle<AccessorPair>::cast(maybe_pair);
+ if (current_pair->Equals(*getter, *setter)) return map;
- if (!current->IsTheHole()) {
+ bool overwriting_accessor = false;
+ if (!getter->IsNull(isolate) &&
+ !current_pair->get(ACCESSOR_GETTER)->IsNull(isolate) &&
+ current_pair->get(ACCESSOR_GETTER) != *getter) {
+ overwriting_accessor = true;
+ }
+ if (!setter->IsNull(isolate) &&
+ !current_pair->get(ACCESSOR_SETTER)->IsNull(isolate) &&
+ current_pair->get(ACCESSOR_SETTER) != *setter) {
+ overwriting_accessor = true;
+ }
+ if (overwriting_accessor) {
return Map::Normalize(map, mode, "AccessorsOverwritingAccessors");
}
@@ -10022,7 +9550,8 @@ Handle<Map> Map::TransitionToAccessorProperty(Handle<Map> map,
pair = isolate->factory()->NewAccessorPair();
}
- pair->set(component, *accessor);
+ pair->SetComponents(*getter, *setter);
+
TransitionFlag flag = INSERT_TRANSITION;
AccessorConstantDescriptor new_desc(name, pair, attributes);
return Map::CopyInsertDescriptor(map, &new_desc, flag);
@@ -10036,7 +9565,7 @@ Handle<Map> Map::CopyAddDescriptor(Handle<Map> map,
// Share descriptors only if map owns descriptors and it not an initial map.
if (flag == INSERT_TRANSITION && map->owns_descriptors() &&
- !map->GetBackPointer()->IsUndefined() &&
+ !map->GetBackPointer()->IsUndefined(map->GetIsolate()) &&
TransitionArray::CanHaveMoreTransitions(map)) {
return ShareDescriptor(map, descriptors, descriptor);
}
@@ -10166,222 +9695,186 @@ Handle<Map> Map::CopyReplaceDescriptor(Handle<Map> map,
simple_flag);
}
-
-void Map::UpdateCodeCache(Handle<Map> map,
- Handle<Name> name,
- Handle<Code> code) {
- Isolate* isolate = map->GetIsolate();
- HandleScope scope(isolate);
- // Allocate the code cache if not present.
- if (map->code_cache()->IsFixedArray()) {
- Handle<Object> result = isolate->factory()->NewCodeCache();
- map->set_code_cache(*result);
+// Helper class to manage a Map's code cache. The layout depends on the number
+// of entries; this is worthwhile because most code caches are very small,
+// but some are huge (thousands of entries).
+// For zero entries, the EmptyFixedArray is used.
+// For one entry, we use a 2-element FixedArray containing [name, code].
+// For 2..100 entries, we use a FixedArray with linear lookups, the layout is:
+// [0] - number of slots that are currently in use
+// [1] - first name
+// [2] - first code
+// [3] - second name
+// [4] - second code
+// etc.
+// For more than 128 entries, we use a CodeCacheHashTable.
+class CodeCache : public AllStatic {
+ public:
+ // Returns the new cache, to be stored on the map.
+ static Handle<FixedArray> Put(Isolate* isolate, Handle<FixedArray> cache,
+ Handle<Name> name, Handle<Code> code) {
+ int length = cache->length();
+ if (length == 0) return PutFirstElement(isolate, name, code);
+ if (length == kEntrySize) {
+ return PutSecondElement(isolate, cache, name, code);
+ }
+ if (length <= kLinearMaxSize) {
+ Handle<FixedArray> result = PutLinearElement(isolate, cache, name, code);
+ if (!result.is_null()) return result;
+ // Fall through if linear storage is getting too large.
+ }
+ return PutHashTableElement(isolate, cache, name, code);
+ }
+
+ static Code* Lookup(FixedArray* cache, Name* name, Code::Flags flags) {
+ int length = cache->length();
+ if (length == 0) return nullptr;
+ if (length == kEntrySize) return OneElementLookup(cache, name, flags);
+ if (!cache->IsCodeCacheHashTable()) {
+ return LinearLookup(cache, name, flags);
+ } else {
+ return CodeCacheHashTable::cast(cache)->Lookup(name, flags);
+ }
}
- // Update the code cache.
- Handle<CodeCache> code_cache(CodeCache::cast(map->code_cache()), isolate);
- CodeCache::Update(code_cache, name, code);
-}
-
+ private:
+ static const int kNameIndex = 0;
+ static const int kCodeIndex = 1;
+ static const int kEntrySize = 2;
-Object* Map::FindInCodeCache(Name* name, Code::Flags flags) {
- // Do a lookup if a code cache exists.
- if (!code_cache()->IsFixedArray()) {
- return CodeCache::cast(code_cache())->Lookup(name, flags);
- } else {
- return GetHeap()->undefined_value();
- }
-}
+ static const int kLinearUsageIndex = 0;
+ static const int kLinearReservedSlots = 1;
+ static const int kLinearInitialCapacity = 2;
+ static const int kLinearMaxSize = 257; // == LinearSizeFor(128);
+ static const int kHashTableInitialCapacity = 200; // Number of entries.
-int Map::IndexInCodeCache(Object* name, Code* code) {
- // Get the internal index if a code cache exists.
- if (!code_cache()->IsFixedArray()) {
- return CodeCache::cast(code_cache())->GetIndex(name, code);
+ static int LinearSizeFor(int entries) {
+ return kLinearReservedSlots + kEntrySize * entries;
}
- return -1;
-}
-
-
-void Map::RemoveFromCodeCache(Name* name, Code* code, int index) {
- // No GC is supposed to happen between a call to IndexInCodeCache and
- // RemoveFromCodeCache so the code cache must be there.
- DCHECK(!code_cache()->IsFixedArray());
- CodeCache::cast(code_cache())->RemoveByIndex(name, code, index);
-}
-
-void CodeCache::Update(
- Handle<CodeCache> code_cache, Handle<Name> name, Handle<Code> code) {
- // The number of monomorphic stubs for normal load/store/call IC's can grow to
- // a large number and therefore they need to go into a hash table. They are
- // used to load global properties from cells.
- if (code->type() == Code::NORMAL) {
- // Make sure that a hash table is allocated for the normal load code cache.
- if (code_cache->normal_type_cache()->IsUndefined()) {
- Handle<Object> result =
- CodeCacheHashTable::New(code_cache->GetIsolate(),
- CodeCacheHashTable::kInitialSize);
- code_cache->set_normal_type_cache(*result);
- }
- UpdateNormalTypeCache(code_cache, name, code);
- } else {
- DCHECK(code_cache->default_cache()->IsFixedArray());
- UpdateDefaultCache(code_cache, name, code);
+ static int LinearNewSize(int old_size) {
+ int old_entries = (old_size - kLinearReservedSlots) / kEntrySize;
+ return LinearSizeFor(old_entries * 2);
}
-}
-
-void CodeCache::UpdateDefaultCache(
- Handle<CodeCache> code_cache, Handle<Name> name, Handle<Code> code) {
- Isolate* isolate = code_cache->GetIsolate();
- // When updating the default code cache we disregard the type encoded in the
- // flags. This allows call constant stubs to overwrite call field
- // stubs, etc.
- Code::Flags flags = Code::RemoveTypeFromFlags(code->flags());
-
- // First check whether we can update existing code cache without
- // extending it.
- Handle<FixedArray> cache = handle(code_cache->default_cache());
- int length = cache->length();
- {
- DisallowHeapAllocation no_alloc;
- int deleted_index = -1;
- Object* null = isolate->heap()->null_value();
- Object* undefined = isolate->heap()->undefined_value();
- DCHECK(name->IsUniqueName());
- for (int i = 0; i < length; i += kCodeCacheEntrySize) {
- Object* key = cache->get(i);
- if (key == null) {
- if (deleted_index < 0) deleted_index = i;
- continue;
- }
- if (key == undefined) {
- if (deleted_index >= 0) i = deleted_index;
- cache->set(i + kCodeCacheEntryNameOffset, *name);
- cache->set(i + kCodeCacheEntryCodeOffset, *code);
- return;
- }
- DCHECK(key->IsUniqueName());
- if (*name == key) {
- Code::Flags found =
- Code::cast(cache->get(i + kCodeCacheEntryCodeOffset))->flags();
- if (Code::RemoveTypeFromFlags(found) == flags) {
- cache->set(i + kCodeCacheEntryCodeOffset, *code);
- return;
- }
- }
- }
+ static Code* OneElementLookup(FixedArray* cache, Name* name,
+ Code::Flags flags) {
+ DCHECK_EQ(cache->length(), kEntrySize);
+ if (cache->get(kNameIndex) != name) return nullptr;
+ Code* maybe_code = Code::cast(cache->get(kCodeIndex));
+ if (maybe_code->flags() != flags) return nullptr;
+ return maybe_code;
+ }
- // Reached the end of the code cache. If there were deleted
- // elements, reuse the space for the first of them.
- if (deleted_index >= 0) {
- cache->set(deleted_index + kCodeCacheEntryNameOffset, *name);
- cache->set(deleted_index + kCodeCacheEntryCodeOffset, *code);
- return;
+ static Code* LinearLookup(FixedArray* cache, Name* name, Code::Flags flags) {
+ DCHECK_GE(cache->length(), kEntrySize);
+ DCHECK(!cache->IsCodeCacheHashTable());
+ int usage = GetLinearUsage(cache);
+ for (int i = kLinearReservedSlots; i < usage; i += kEntrySize) {
+ if (cache->get(i + kNameIndex) != name) continue;
+ Code* code = Code::cast(cache->get(i + kCodeIndex));
+ if (code->flags() == flags) return code;
}
+ return nullptr;
}
- // Extend the code cache with some new entries (at least one). Must be a
- // multiple of the entry size.
- int new_length = length + (length >> 1) + kCodeCacheEntrySize;
- new_length = new_length - new_length % kCodeCacheEntrySize;
- DCHECK((new_length % kCodeCacheEntrySize) == 0);
- cache = isolate->factory()->CopyFixedArrayAndGrow(cache, new_length - length);
-
- // Add the (name, code) pair to the new cache.
- cache->set(length + kCodeCacheEntryNameOffset, *name);
- cache->set(length + kCodeCacheEntryCodeOffset, *code);
- code_cache->set_default_cache(*cache);
-}
-
-
-void CodeCache::UpdateNormalTypeCache(
- Handle<CodeCache> code_cache, Handle<Name> name, Handle<Code> code) {
- // Adding a new entry can cause a new cache to be allocated.
- Handle<CodeCacheHashTable> cache(
- CodeCacheHashTable::cast(code_cache->normal_type_cache()));
- Handle<Object> new_cache = CodeCacheHashTable::Put(cache, name, code);
- code_cache->set_normal_type_cache(*new_cache);
-}
-
-
-Object* CodeCache::Lookup(Name* name, Code::Flags flags) {
- Object* result = LookupDefaultCache(name, Code::RemoveTypeFromFlags(flags));
- if (result->IsCode()) {
- if (Code::cast(result)->flags() == flags) return result;
- return GetHeap()->undefined_value();
+ static Handle<FixedArray> PutFirstElement(Isolate* isolate, Handle<Name> name,
+ Handle<Code> code) {
+ Handle<FixedArray> cache = isolate->factory()->NewFixedArray(kEntrySize);
+ cache->set(kNameIndex, *name);
+ cache->set(kCodeIndex, *code);
+ return cache;
}
- return LookupNormalTypeCache(name, flags);
-}
-
-Object* CodeCache::LookupDefaultCache(Name* name, Code::Flags flags) {
- FixedArray* cache = default_cache();
- Heap* heap = GetHeap();
- Object* null = heap->null_value();
- Object* undefined = heap->undefined_value();
- int length = cache->length();
- DCHECK(name->IsUniqueName());
- for (int i = 0; i < length; i += kCodeCacheEntrySize) {
- Object* key = cache->get(i + kCodeCacheEntryNameOffset);
- // Skip deleted elements.
- if (key == null) continue;
- if (key == undefined) return key;
- DCHECK(key->IsUniqueName());
- if (name == key) {
- Code* code = Code::cast(cache->get(i + kCodeCacheEntryCodeOffset));
- if (Code::RemoveTypeFromFlags(code->flags()) == flags) {
- return code;
+ static Handle<FixedArray> PutSecondElement(Isolate* isolate,
+ Handle<FixedArray> cache,
+ Handle<Name> name,
+ Handle<Code> code) {
+ DCHECK_EQ(cache->length(), kEntrySize);
+ Handle<FixedArray> new_cache = isolate->factory()->NewFixedArray(
+ LinearSizeFor(kLinearInitialCapacity));
+ new_cache->set(kLinearReservedSlots + kNameIndex, cache->get(kNameIndex));
+ new_cache->set(kLinearReservedSlots + kCodeIndex, cache->get(kCodeIndex));
+ new_cache->set(LinearSizeFor(1) + kNameIndex, *name);
+ new_cache->set(LinearSizeFor(1) + kCodeIndex, *code);
+ new_cache->set(kLinearUsageIndex, Smi::FromInt(LinearSizeFor(2)));
+ return new_cache;
+ }
+
+ static Handle<FixedArray> PutLinearElement(Isolate* isolate,
+ Handle<FixedArray> cache,
+ Handle<Name> name,
+ Handle<Code> code) {
+ int length = cache->length();
+ int usage = GetLinearUsage(*cache);
+ DCHECK_LE(usage, length);
+ // Check if we need to grow.
+ if (usage == length) {
+ int new_length = LinearNewSize(length);
+ if (new_length > kLinearMaxSize) return Handle<FixedArray>::null();
+ Handle<FixedArray> new_cache =
+ isolate->factory()->NewFixedArray(new_length);
+ for (int i = kLinearReservedSlots; i < length; i++) {
+ new_cache->set(i, cache->get(i));
}
+ cache = new_cache;
}
+ // Store new entry.
+ DCHECK_GE(cache->length(), usage + kEntrySize);
+ cache->set(usage + kNameIndex, *name);
+ cache->set(usage + kCodeIndex, *code);
+ cache->set(kLinearUsageIndex, Smi::FromInt(usage + kEntrySize));
+ return cache;
}
- return GetHeap()->undefined_value();
-}
-
-Object* CodeCache::LookupNormalTypeCache(Name* name, Code::Flags flags) {
- if (!normal_type_cache()->IsUndefined()) {
- CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
- return cache->Lookup(name, flags);
- } else {
- return GetHeap()->undefined_value();
+ static Handle<FixedArray> PutHashTableElement(Isolate* isolate,
+ Handle<FixedArray> cache,
+ Handle<Name> name,
+ Handle<Code> code) {
+ // Check if we need to transition from linear to hash table storage.
+ if (!cache->IsCodeCacheHashTable()) {
+ // Check that the initial hash table capacity is large enough.
+ DCHECK_EQ(kLinearMaxSize, LinearSizeFor(128));
+ STATIC_ASSERT(kHashTableInitialCapacity > 128);
+
+ int length = cache->length();
+ // Only migrate from linear storage when it's full.
+ DCHECK_EQ(length, GetLinearUsage(*cache));
+ DCHECK_EQ(length, kLinearMaxSize);
+ Handle<CodeCacheHashTable> table =
+ CodeCacheHashTable::New(isolate, kHashTableInitialCapacity);
+ HandleScope scope(isolate);
+ for (int i = kLinearReservedSlots; i < length; i += kEntrySize) {
+ Handle<Name> old_name(Name::cast(cache->get(i + kNameIndex)), isolate);
+ Handle<Code> old_code(Code::cast(cache->get(i + kCodeIndex)), isolate);
+ CodeCacheHashTable::Put(table, old_name, old_code);
+ }
+ cache = table;
+ }
+ // Store new entry.
+ DCHECK(cache->IsCodeCacheHashTable());
+ return CodeCacheHashTable::Put(Handle<CodeCacheHashTable>::cast(cache),
+ name, code);
}
-}
-
-int CodeCache::GetIndex(Object* name, Code* code) {
- if (code->type() == Code::NORMAL) {
- if (normal_type_cache()->IsUndefined()) return -1;
- CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
- return cache->GetIndex(Name::cast(name), code->flags());
+ static inline int GetLinearUsage(FixedArray* linear_cache) {
+ DCHECK_GT(linear_cache->length(), kEntrySize);
+ return Smi::cast(linear_cache->get(kLinearUsageIndex))->value();
}
+};
- FixedArray* array = default_cache();
- int len = array->length();
- for (int i = 0; i < len; i += kCodeCacheEntrySize) {
- if (array->get(i + kCodeCacheEntryCodeOffset) == code) return i + 1;
- }
- return -1;
+void Map::UpdateCodeCache(Handle<Map> map,
+ Handle<Name> name,
+ Handle<Code> code) {
+ Isolate* isolate = map->GetIsolate();
+ Handle<FixedArray> cache(map->code_cache(), isolate);
+ Handle<FixedArray> new_cache = CodeCache::Put(isolate, cache, name, code);
+ map->set_code_cache(*new_cache);
}
-
-void CodeCache::RemoveByIndex(Object* name, Code* code, int index) {
- if (code->type() == Code::NORMAL) {
- DCHECK(!normal_type_cache()->IsUndefined());
- CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
- DCHECK(cache->GetIndex(Name::cast(name), code->flags()) == index);
- cache->RemoveByIndex(index);
- } else {
- FixedArray* array = default_cache();
- DCHECK(array->length() >= index && array->get(index)->IsCode());
- // Use null instead of undefined for deleted elements to distinguish
- // deleted elements from unused elements. This distinction is used
- // when looking up in the cache and when updating the cache.
- DCHECK_EQ(1, kCodeCacheEntryCodeOffset - kCodeCacheEntryNameOffset);
- array->set_null(index - 1); // Name.
- array->set_null(index); // Code.
- }
+Code* Map::LookupInCodeCache(Name* name, Code::Flags flags) {
+ return CodeCache::Lookup(code_cache(), name, flags);
}
@@ -10392,20 +9885,23 @@ void CodeCache::RemoveByIndex(Object* name, Code* code, int index) {
class CodeCacheHashTableKey : public HashTableKey {
public:
CodeCacheHashTableKey(Handle<Name> name, Code::Flags flags)
- : name_(name), flags_(flags), code_() { }
+ : name_(name), flags_(flags), code_() {
+ DCHECK(name_->IsUniqueName());
+ }
CodeCacheHashTableKey(Handle<Name> name, Handle<Code> code)
- : name_(name), flags_(code->flags()), code_(code) { }
+ : name_(name), flags_(code->flags()), code_(code) {
+ DCHECK(name_->IsUniqueName());
+ }
bool IsMatch(Object* other) override {
- if (!other->IsFixedArray()) return false;
+ DCHECK(other->IsFixedArray());
FixedArray* pair = FixedArray::cast(other);
Name* name = Name::cast(pair->get(0));
Code::Flags flags = Code::cast(pair->get(1))->flags();
- if (flags != flags_) {
- return false;
- }
- return name_->Equals(name);
+ if (flags != flags_) return false;
+ DCHECK(name->IsUniqueName());
+ return *name_ == name;
}
static uint32_t NameFlagsHashHelper(Name* name, Code::Flags flags) {
@@ -10437,15 +9933,6 @@ class CodeCacheHashTableKey : public HashTableKey {
};
-Object* CodeCacheHashTable::Lookup(Name* name, Code::Flags flags) {
- DisallowHeapAllocation no_alloc;
- CodeCacheHashTableKey key(handle(name), flags);
- int entry = FindEntry(&key);
- if (entry == kNotFound) return GetHeap()->undefined_value();
- return get(EntryToIndex(entry) + 1);
-}
-
-
Handle<CodeCacheHashTable> CodeCacheHashTable::Put(
Handle<CodeCacheHashTable> cache, Handle<Name> name, Handle<Code> code) {
CodeCacheHashTableKey key(name, code);
@@ -10456,179 +9943,36 @@ Handle<CodeCacheHashTable> CodeCacheHashTable::Put(
Handle<Object> k = key.AsHandle(cache->GetIsolate());
new_cache->set(EntryToIndex(entry), *k);
- new_cache->set(EntryToIndex(entry) + 1, *code);
new_cache->ElementAdded();
return new_cache;
}
-
-int CodeCacheHashTable::GetIndex(Name* name, Code::Flags flags) {
+Code* CodeCacheHashTable::Lookup(Name* name, Code::Flags flags) {
DisallowHeapAllocation no_alloc;
CodeCacheHashTableKey key(handle(name), flags);
int entry = FindEntry(&key);
- return (entry == kNotFound) ? -1 : entry;
+ if (entry == kNotFound) return nullptr;
+ return Code::cast(FixedArray::cast(get(EntryToIndex(entry)))->get(1));
}
-
-void CodeCacheHashTable::RemoveByIndex(int index) {
- DCHECK(index >= 0);
- Heap* heap = GetHeap();
- set(EntryToIndex(index), heap->the_hole_value());
- set(EntryToIndex(index) + 1, heap->the_hole_value());
- ElementRemoved();
-}
-
-
-void PolymorphicCodeCache::Update(Handle<PolymorphicCodeCache> code_cache,
- MapHandleList* maps,
- Code::Flags flags,
- Handle<Code> code) {
- Isolate* isolate = code_cache->GetIsolate();
- if (code_cache->cache()->IsUndefined()) {
- Handle<PolymorphicCodeCacheHashTable> result =
- PolymorphicCodeCacheHashTable::New(
- isolate,
- PolymorphicCodeCacheHashTable::kInitialSize);
- code_cache->set_cache(*result);
- } else {
- // This entry shouldn't be contained in the cache yet.
- DCHECK(PolymorphicCodeCacheHashTable::cast(code_cache->cache())
- ->Lookup(maps, flags)->IsUndefined());
- }
- Handle<PolymorphicCodeCacheHashTable> hash_table =
- handle(PolymorphicCodeCacheHashTable::cast(code_cache->cache()));
- Handle<PolymorphicCodeCacheHashTable> new_cache =
- PolymorphicCodeCacheHashTable::Put(hash_table, maps, flags, code);
- code_cache->set_cache(*new_cache);
-}
-
-
-Handle<Object> PolymorphicCodeCache::Lookup(MapHandleList* maps,
- Code::Flags flags) {
- if (!cache()->IsUndefined()) {
- PolymorphicCodeCacheHashTable* hash_table =
- PolymorphicCodeCacheHashTable::cast(cache());
- return Handle<Object>(hash_table->Lookup(maps, flags), GetIsolate());
- } else {
- return GetIsolate()->factory()->undefined_value();
- }
-}
-
-
-// Despite their name, object of this class are not stored in the actual
-// hash table; instead they're temporarily used for lookups. It is therefore
-// safe to have a weak (non-owning) pointer to a MapList as a member field.
-class PolymorphicCodeCacheHashTableKey : public HashTableKey {
- public:
- // Callers must ensure that |maps| outlives the newly constructed object.
- PolymorphicCodeCacheHashTableKey(MapHandleList* maps, int code_flags)
- : maps_(maps),
- code_flags_(code_flags) {}
-
- bool IsMatch(Object* other) override {
- MapHandleList other_maps(kDefaultListAllocationSize);
- int other_flags;
- FromObject(other, &other_flags, &other_maps);
- if (code_flags_ != other_flags) return false;
- if (maps_->length() != other_maps.length()) return false;
- // Compare just the hashes first because it's faster.
- int this_hash = MapsHashHelper(maps_, code_flags_);
- int other_hash = MapsHashHelper(&other_maps, other_flags);
- if (this_hash != other_hash) return false;
-
- // Full comparison: for each map in maps_, look for an equivalent map in
- // other_maps. This implementation is slow, but probably good enough for
- // now because the lists are short (<= 4 elements currently).
- for (int i = 0; i < maps_->length(); ++i) {
- bool match_found = false;
- for (int j = 0; j < other_maps.length(); ++j) {
- if (*(maps_->at(i)) == *(other_maps.at(j))) {
- match_found = true;
- break;
- }
- }
- if (!match_found) return false;
- }
- return true;
- }
-
- static uint32_t MapsHashHelper(MapHandleList* maps, int code_flags) {
- uint32_t hash = code_flags;
- for (int i = 0; i < maps->length(); ++i) {
- hash ^= maps->at(i)->Hash();
- }
- return hash;
- }
-
- uint32_t Hash() override { return MapsHashHelper(maps_, code_flags_); }
-
- uint32_t HashForObject(Object* obj) override {
- MapHandleList other_maps(kDefaultListAllocationSize);
- int other_flags;
- FromObject(obj, &other_flags, &other_maps);
- return MapsHashHelper(&other_maps, other_flags);
- }
-
- MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) override {
- // The maps in |maps_| must be copied to a newly allocated FixedArray,
- // both because the referenced MapList is short-lived, and because C++
- // objects can't be stored in the heap anyway.
- Handle<FixedArray> list =
- isolate->factory()->NewUninitializedFixedArray(maps_->length() + 1);
- list->set(0, Smi::FromInt(code_flags_));
- for (int i = 0; i < maps_->length(); ++i) {
- list->set(i + 1, *maps_->at(i));
- }
- return list;
- }
-
- private:
- static MapHandleList* FromObject(Object* obj,
- int* code_flags,
- MapHandleList* maps) {
- FixedArray* list = FixedArray::cast(obj);
- maps->Rewind(0);
- *code_flags = Smi::cast(list->get(0))->value();
- for (int i = 1; i < list->length(); ++i) {
- maps->Add(Handle<Map>(Map::cast(list->get(i))));
- }
- return maps;
+Handle<FixedArray> FixedArray::SetAndGrow(Handle<FixedArray> array, int index,
+ Handle<Object> value) {
+ if (index < array->length()) {
+ array->set(index, *value);
+ return array;
}
-
- MapHandleList* maps_; // weak.
- int code_flags_;
- static const int kDefaultListAllocationSize = kMaxKeyedPolymorphism + 1;
-};
-
-
-Object* PolymorphicCodeCacheHashTable::Lookup(MapHandleList* maps,
- int code_kind) {
- DisallowHeapAllocation no_alloc;
- PolymorphicCodeCacheHashTableKey key(maps, code_kind);
- int entry = FindEntry(&key);
- if (entry == kNotFound) return GetHeap()->undefined_value();
- return get(EntryToIndex(entry) + 1);
-}
-
-
-Handle<PolymorphicCodeCacheHashTable> PolymorphicCodeCacheHashTable::Put(
- Handle<PolymorphicCodeCacheHashTable> hash_table,
- MapHandleList* maps,
- int code_kind,
- Handle<Code> code) {
- PolymorphicCodeCacheHashTableKey key(maps, code_kind);
- Handle<PolymorphicCodeCacheHashTable> cache =
- EnsureCapacity(hash_table, 1, &key);
- int entry = cache->FindInsertionEntry(key.Hash());
-
- Handle<Object> obj = key.AsHandle(hash_table->GetIsolate());
- cache->set(EntryToIndex(entry), *obj);
- cache->set(EntryToIndex(entry) + 1, *code);
- cache->ElementAdded();
- return cache;
+ int capacity = array->length();
+ do {
+ capacity = JSObject::NewElementsCapacity(capacity);
+ } while (capacity <= index);
+ Handle<FixedArray> new_array =
+ array->GetIsolate()->factory()->NewUninitializedFixedArray(capacity);
+ array->CopyTo(0, *new_array, 0, array->length());
+ new_array->FillWithHoles(array->length(), new_array->length());
+ new_array->set(index, *value);
+ return new_array;
}
-
void FixedArray::Shrink(int new_length) {
DCHECK(0 <= new_length && new_length <= length());
if (new_length < length()) {
@@ -10984,7 +10328,7 @@ Handle<Object> AccessorPair::GetComponent(Handle<AccessorPair> accessor_pair,
.ToHandleChecked();
}
Isolate* isolate = accessor_pair->GetIsolate();
- if (accessor->IsTheHole()) {
+ if (accessor->IsNull(isolate)) {
return isolate->factory()->undefined_value();
}
return handle(accessor, isolate);
@@ -11012,12 +10356,21 @@ Handle<DeoptimizationOutputData> DeoptimizationOutputData::New(
return Handle<DeoptimizationOutputData>::cast(result);
}
+const int LiteralsArray::kFeedbackVectorOffset =
+ LiteralsArray::OffsetOfElementAt(LiteralsArray::kVectorIndex);
+
+const int LiteralsArray::kOffsetToFirstLiteral =
+ LiteralsArray::OffsetOfElementAt(LiteralsArray::kFirstLiteralIndex);
// static
Handle<LiteralsArray> LiteralsArray::New(Isolate* isolate,
Handle<TypeFeedbackVector> vector,
int number_of_literals,
PretenureFlag pretenure) {
+ if (vector->is_empty() && number_of_literals == 0) {
+ return Handle<LiteralsArray>::cast(
+ isolate->factory()->empty_literals_array());
+ }
Handle<FixedArray> literals = isolate->factory()->NewFixedArray(
number_of_literals + kFirstLiteralIndex, pretenure);
Handle<LiteralsArray> casted_literals = Handle<LiteralsArray>::cast(literals);
@@ -11058,14 +10411,11 @@ int HandlerTable::LookupRange(int pc_offset, int* data_out,
// TODO(turbofan): Make sure table is sorted and use binary search.
-int HandlerTable::LookupReturn(int pc_offset, CatchPrediction* prediction_out) {
+int HandlerTable::LookupReturn(int pc_offset) {
for (int i = 0; i < length(); i += kReturnEntrySize) {
int return_offset = Smi::cast(get(i + kReturnOffsetIndex))->value();
int handler_field = Smi::cast(get(i + kReturnHandlerIndex))->value();
if (pc_offset == return_offset) {
- if (prediction_out) {
- *prediction_out = HandlerPredictionField::decode(handler_field);
- }
return HandlerOffsetField::decode(handler_field);
}
}
@@ -11085,6 +10435,34 @@ bool DescriptorArray::IsEqualTo(DescriptorArray* other) {
}
#endif
+// static
+Handle<String> String::Trim(Handle<String> string, TrimMode mode) {
+ Isolate* const isolate = string->GetIsolate();
+ string = String::Flatten(string);
+ int const length = string->length();
+
+ // Perform left trimming if requested.
+ int left = 0;
+ UnicodeCache* unicode_cache = isolate->unicode_cache();
+ if (mode == kTrim || mode == kTrimLeft) {
+ while (left < length &&
+ unicode_cache->IsWhiteSpaceOrLineTerminator(string->Get(left))) {
+ left++;
+ }
+ }
+
+ // Perform right trimming if requested.
+ int right = length;
+ if (mode == kTrim || mode == kTrimRight) {
+ while (
+ right > left &&
+ unicode_cache->IsWhiteSpaceOrLineTerminator(string->Get(right - 1))) {
+ right--;
+ }
+ }
+
+ return isolate->factory()->NewSubString(string, left, right);
+}
bool String::LooksValid() {
if (!GetIsolate()->heap()->Contains(this)) return false;
@@ -11098,7 +10476,9 @@ MaybeHandle<String> Name::ToFunctionName(Handle<Name> name) {
// ES6 section 9.2.11 SetFunctionName, step 4.
Isolate* const isolate = name->GetIsolate();
Handle<Object> description(Handle<Symbol>::cast(name)->name(), isolate);
- if (description->IsUndefined()) return isolate->factory()->empty_string();
+ if (description->IsUndefined(isolate)) {
+ return isolate->factory()->empty_string();
+ }
IncrementalStringBuilder builder(isolate);
builder.AppendCharacter('[');
builder.AppendString(Handle<String>::cast(description));
@@ -11106,6 +10486,19 @@ MaybeHandle<String> Name::ToFunctionName(Handle<Name> name) {
return builder.Finish();
}
+// static
+MaybeHandle<String> Name::ToFunctionName(Handle<Name> name,
+ Handle<String> prefix) {
+ Handle<String> name_string;
+ Isolate* const isolate = name->GetIsolate();
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, name_string, ToFunctionName(name),
+ String);
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendString(prefix);
+ builder.AppendCharacter(' ');
+ builder.AppendString(name_string);
+ return builder.Finish();
+}
namespace {
@@ -11238,13 +10631,12 @@ String::FlatContent String::GetFlatContent() {
}
}
-
-base::SmartArrayPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
- RobustnessFlag robust_flag,
- int offset, int length,
- int* length_return) {
+std::unique_ptr<char[]> String::ToCString(AllowNullsFlag allow_nulls,
+ RobustnessFlag robust_flag,
+ int offset, int length,
+ int* length_return) {
if (robust_flag == ROBUST_STRING_TRAVERSAL && !LooksValid()) {
- return base::SmartArrayPointer<char>(NULL);
+ return std::unique_ptr<char[]>();
}
// Negative length means the to the end of the string.
if (length < 0) length = kMaxInt - offset;
@@ -11281,13 +10673,12 @@ base::SmartArrayPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
last = character;
}
result[utf8_byte_position] = 0;
- return base::SmartArrayPointer<char>(result);
+ return std::unique_ptr<char[]>(result);
}
-
-base::SmartArrayPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
- RobustnessFlag robust_flag,
- int* length_return) {
+std::unique_ptr<char[]> String::ToCString(AllowNullsFlag allow_nulls,
+ RobustnessFlag robust_flag,
+ int* length_return) {
return ToCString(allow_nulls, robust_flag, 0, -1, length_return);
}
@@ -11313,25 +10704,6 @@ const uc16* String::GetTwoByteData(unsigned start) {
}
-base::SmartArrayPointer<uc16> String::ToWideCString(
- RobustnessFlag robust_flag) {
- if (robust_flag == ROBUST_STRING_TRAVERSAL && !LooksValid()) {
- return base::SmartArrayPointer<uc16>();
- }
- StringCharacterStream stream(this);
-
- uc16* result = NewArray<uc16>(length() + 1);
-
- int i = 0;
- while (stream.HasMore()) {
- uint16_t character = stream.GetNext();
- result[i++] = character;
- }
- result[i] = 0;
- return base::SmartArrayPointer<uc16>(result);
-}
-
-
const uc16* SeqTwoByteString::SeqTwoByteStringGetData(unsigned start) {
return reinterpret_cast<uc16*>(
reinterpret_cast<char*>(this) - kHeapObjectTag + kHeaderSize) + start;
@@ -12052,6 +11424,42 @@ ComparisonResult String::Compare(Handle<String> x, Handle<String> y) {
return result;
}
+int String::IndexOf(Isolate* isolate, Handle<String> sub, Handle<String> pat,
+ int start_index) {
+ DCHECK(0 <= start_index);
+ DCHECK(start_index <= sub->length());
+
+ int pattern_length = pat->length();
+ if (pattern_length == 0) return start_index;
+
+ int subject_length = sub->length();
+ if (start_index + pattern_length > subject_length) return -1;
+
+ sub = String::Flatten(sub);
+ pat = String::Flatten(pat);
+
+ DisallowHeapAllocation no_gc; // ensure vectors stay valid
+ // Extract flattened substrings of cons strings before getting encoding.
+ String::FlatContent seq_sub = sub->GetFlatContent();
+ String::FlatContent seq_pat = pat->GetFlatContent();
+
+ // dispatch on type of strings
+ if (seq_pat.IsOneByte()) {
+ Vector<const uint8_t> pat_vector = seq_pat.ToOneByteVector();
+ if (seq_sub.IsOneByte()) {
+ return SearchString(isolate, seq_sub.ToOneByteVector(), pat_vector,
+ start_index);
+ }
+ return SearchString(isolate, seq_sub.ToUC16Vector(), pat_vector,
+ start_index);
+ }
+ Vector<const uc16> pat_vector = seq_pat.ToUC16Vector();
+ if (seq_sub.IsOneByte()) {
+ return SearchString(isolate, seq_sub.ToOneByteVector(), pat_vector,
+ start_index);
+ }
+ return SearchString(isolate, seq_sub.ToUC16Vector(), pat_vector, start_index);
+}
bool String::IsUtf8EqualTo(Vector<const char> str, bool allow_prefix_match) {
int slen = length();
@@ -12200,8 +11608,8 @@ uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) {
value |= length << String::ArrayIndexLengthBits::kShift;
DCHECK((value & String::kIsNotArrayIndexMask) == 0);
- DCHECK((length > String::kMaxCachedArrayIndexLength) ||
- (value & String::kContainsCachedArrayIndexMask) == 0);
+ DCHECK_EQ(length <= String::kMaxCachedArrayIndexLength,
+ (value & String::kContainsCachedArrayIndexMask) == 0);
return value;
}
@@ -12377,15 +11785,18 @@ bool JSFunction::Inlines(SharedFunctionInfo* candidate) {
return false;
}
+void JSFunction::MarkForBaseline() {
+ Isolate* isolate = GetIsolate();
+ set_code_no_write_barrier(
+ isolate->builtins()->builtin(Builtins::kCompileBaseline));
+ // No write barrier required, since the builtin is part of the root set.
+}
void JSFunction::MarkForOptimization() {
Isolate* isolate = GetIsolate();
- // Do not optimize if function contains break points.
- if (shared()->HasDebugInfo()) return;
DCHECK(!IsOptimized());
DCHECK(shared()->allows_lazy_compilation() ||
!shared()->optimization_disabled());
- DCHECK(!shared()->HasDebugInfo());
set_code_no_write_barrier(
isolate->builtins()->builtin(Builtins::kCompileOptimized));
// No write barrier required, since the builtin is part of the root set.
@@ -12414,6 +11825,30 @@ void JSFunction::AttemptConcurrentOptimization() {
// No write barrier required, since the builtin is part of the root set.
}
+// static
+Handle<LiteralsArray> SharedFunctionInfo::FindOrCreateLiterals(
+ Handle<SharedFunctionInfo> shared, Handle<Context> native_context) {
+ Isolate* isolate = shared->GetIsolate();
+ CodeAndLiterals result =
+ shared->SearchOptimizedCodeMap(*native_context, BailoutId::None());
+ if (result.literals != nullptr) {
+ DCHECK(shared->feedback_metadata()->is_empty() ||
+ !result.literals->feedback_vector()->is_empty());
+ return handle(result.literals, isolate);
+ }
+
+ Handle<TypeFeedbackVector> feedback_vector =
+ TypeFeedbackVector::New(isolate, handle(shared->feedback_metadata()));
+ Handle<LiteralsArray> literals =
+ LiteralsArray::New(isolate, feedback_vector, shared->num_literals());
+ Handle<Code> code;
+ if (result.code != nullptr) {
+ code = Handle<Code>(result.code, isolate);
+ }
+ AddToOptimizedCodeMap(shared, native_context, code, literals,
+ BailoutId::None());
+ return literals;
+}
void SharedFunctionInfo::AddSharedCodeToOptimizedCodeMap(
Handle<SharedFunctionInfo> shared, Handle<Code> code) {
@@ -12610,6 +12045,17 @@ void SharedFunctionInfo::TrimOptimizedCodeMap(int shrink_by) {
}
}
+// static
+void JSFunction::EnsureLiterals(Handle<JSFunction> function) {
+ Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<Context> native_context(function->context()->native_context());
+ if (function->literals() ==
+ function->GetIsolate()->heap()->empty_literals_array()) {
+ Handle<LiteralsArray> literals =
+ SharedFunctionInfo::FindOrCreateLiterals(shared, native_context);
+ function->set_literals(*literals);
+ }
+}
static void GetMinInobjectSlack(Map* map, void* data) {
int slack = map->unused_property_fields();
@@ -12636,7 +12082,7 @@ static void StopSlackTracking(Map* map, void* data) {
void Map::CompleteInobjectSlackTracking() {
// Has to be an initial map.
- DCHECK(GetBackPointer()->IsUndefined());
+ DCHECK(GetBackPointer()->IsUndefined(GetIsolate()));
int slack = unused_property_fields();
TransitionArray::TraverseTransitionTree(this, &GetMinInobjectSlack, &slack);
@@ -12667,6 +12113,26 @@ static bool PrototypeBenefitsFromNormalization(Handle<JSObject> object) {
return false;
}
+// static
+void JSObject::MakePrototypesFast(Handle<Object> receiver,
+ WhereToStart where_to_start,
+ Isolate* isolate) {
+ if (!receiver->IsJSReceiver()) return;
+ for (PrototypeIterator iter(isolate, Handle<JSReceiver>::cast(receiver),
+ where_to_start);
+ !iter.IsAtEnd(); iter.Advance()) {
+ Handle<Object> current = PrototypeIterator::GetCurrent(iter);
+ if (!current->IsJSObject()) return;
+ Handle<JSObject> current_obj = Handle<JSObject>::cast(current);
+ Map* current_map = current_obj->map();
+ if (current_map->is_prototype_map() &&
+ !current_map->should_be_fast_prototype_map()) {
+ Handle<Map> map(current_map);
+ Map::SetShouldBeFastPrototypeMap(map, true, isolate);
+ JSObject::OptimizeAsPrototype(current_obj, FAST_PROTOTYPE);
+ }
+ }
+}
// static
void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
@@ -12678,10 +12144,12 @@ void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
"NormalizeAsPrototype");
}
Handle<Map> previous_map(object->map());
- if (!object->HasFastProperties()) {
- JSObject::MigrateSlowToFast(object, 0, "OptimizeAsPrototype");
- }
- if (!object->map()->is_prototype_map()) {
+ if (object->map()->is_prototype_map()) {
+ if (object->map()->should_be_fast_prototype_map() &&
+ !object->HasFastProperties()) {
+ JSObject::MigrateSlowToFast(object, 0, "OptimizeAsPrototype");
+ }
+ } else {
if (object->map() == *previous_map) {
Handle<Map> new_map = Map::Copy(handle(object->map()), "CopyAsPrototype");
JSObject::MigrateToMap(object, new_map);
@@ -12709,13 +12177,13 @@ void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
// static
void JSObject::ReoptimizeIfPrototype(Handle<JSObject> object) {
if (!object->map()->is_prototype_map()) return;
+ if (!object->map()->should_be_fast_prototype_map()) return;
OptimizeAsPrototype(object, FAST_PROTOTYPE);
}
// static
void JSObject::LazyRegisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
- DCHECK(FLAG_track_prototype_users);
// Contract: In line with InvalidatePrototypeChains()'s requirements,
// leaf maps don't need to register as users, only prototypes do.
DCHECK(user->is_prototype_map());
@@ -12821,7 +12289,6 @@ static void InvalidatePrototypeChainsInternal(Map* map) {
// static
void JSObject::InvalidatePrototypeChains(Map* map) {
- if (!FLAG_eliminate_prototype_chain_checks) return;
DisallowHeapAllocation no_gc;
InvalidatePrototypeChainsInternal(map);
}
@@ -12852,6 +12319,15 @@ Handle<PrototypeInfo> Map::GetOrCreatePrototypeInfo(Handle<Map> prototype_map,
return proto_info;
}
+// static
+void Map::SetShouldBeFastPrototypeMap(Handle<Map> map, bool value,
+ Isolate* isolate) {
+ if (value == false && !map->prototype_info()->IsPrototypeInfo()) {
+ // "False" is the implicit default value, so there's nothing to do.
+ return;
+ }
+ GetOrCreatePrototypeInfo(map, isolate)->set_should_be_fast_map(value);
+}
// static
Handle<Cell> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
@@ -12884,6 +12360,10 @@ Handle<Cell> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
// static
void Map::SetPrototype(Handle<Map> map, Handle<Object> prototype,
PrototypeOptimizationMode proto_mode) {
+ RuntimeCallTimerScope stats_scope(*map, &RuntimeCallStats::Map_SetPrototype);
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
+ map->GetIsolate(), &tracing::TraceEventStatsTable::Map_SetPrototype);
+
bool is_hidden = false;
if (prototype->IsJSObject()) {
Handle<JSObject> prototype_jsobj = Handle<JSObject>::cast(prototype);
@@ -12900,8 +12380,9 @@ void Map::SetPrototype(Handle<Map> map, Handle<Object> prototype,
}
map->set_has_hidden_prototype(is_hidden);
- WriteBarrierMode wb_mode =
- prototype->IsNull() ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
+ WriteBarrierMode wb_mode = prototype->IsNull(map->GetIsolate())
+ ? SKIP_WRITE_BARRIER
+ : UPDATE_WRITE_BARRIER;
map->set_prototype(*prototype, wb_mode);
}
@@ -13009,8 +12490,13 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
new_map->SetConstructor(*value);
new_map->set_non_instance_prototype(true);
Isolate* isolate = new_map->GetIsolate();
+
construct_prototype = handle(
- function->context()->native_context()->initial_object_prototype(),
+ IsGeneratorFunction(function->shared()->kind())
+ ? function->context()
+ ->native_context()
+ ->initial_generator_prototype()
+ : function->context()->native_context()->initial_object_prototype(),
isolate);
} else {
function->map()->set_non_instance_prototype(false);
@@ -13065,50 +12551,54 @@ namespace {
bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
switch (instance_type) {
- case JS_OBJECT_TYPE:
- case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- case JS_GENERATOR_OBJECT_TYPE:
- case JS_MODULE_TYPE:
- case JS_VALUE_TYPE:
- case JS_DATE_TYPE:
- case JS_ARRAY_TYPE:
- case JS_MESSAGE_OBJECT_TYPE:
+ case JS_API_OBJECT_TYPE:
case JS_ARRAY_BUFFER_TYPE:
- case JS_TYPED_ARRAY_TYPE:
+ case JS_ARRAY_TYPE:
+ case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_DATA_VIEW_TYPE:
- case JS_SET_TYPE:
+ case JS_DATE_TYPE:
+ case JS_FUNCTION_TYPE:
+ case JS_GENERATOR_OBJECT_TYPE:
+ case JS_MAP_ITERATOR_TYPE:
case JS_MAP_TYPE:
+ case JS_MESSAGE_OBJECT_TYPE:
+ case JS_MODULE_TYPE:
+ case JS_OBJECT_TYPE:
+ case JS_ERROR_TYPE:
+ case JS_ARGUMENTS_TYPE:
+ case JS_PROMISE_TYPE:
+ case JS_REGEXP_TYPE:
case JS_SET_ITERATOR_TYPE:
- case JS_MAP_ITERATOR_TYPE:
+ case JS_SET_TYPE:
+ case JS_SPECIAL_API_OBJECT_TYPE:
+ case JS_TYPED_ARRAY_TYPE:
+ case JS_VALUE_TYPE:
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
- case JS_PROMISE_TYPE:
- case JS_REGEXP_TYPE:
- case JS_FUNCTION_TYPE:
return true;
- case JS_BOUND_FUNCTION_TYPE:
- case JS_PROXY_TYPE:
- case JS_GLOBAL_PROXY_TYPE:
- case JS_GLOBAL_OBJECT_TYPE:
+ case BYTECODE_ARRAY_TYPE:
+ case BYTE_ARRAY_TYPE:
+ case CELL_TYPE:
+ case CODE_TYPE:
+ case FILLER_TYPE:
case FIXED_ARRAY_TYPE:
case FIXED_DOUBLE_ARRAY_TYPE:
- case ODDBALL_TYPE:
case FOREIGN_TYPE:
- case MAP_TYPE:
- case CODE_TYPE:
- case CELL_TYPE:
- case PROPERTY_CELL_TYPE:
- case WEAK_CELL_TYPE:
- case SYMBOL_TYPE:
- case BYTECODE_ARRAY_TYPE:
+ case FREE_SPACE_TYPE:
case HEAP_NUMBER_TYPE:
+ case JS_BOUND_FUNCTION_TYPE:
+ case JS_GLOBAL_OBJECT_TYPE:
+ case JS_GLOBAL_PROXY_TYPE:
+ case JS_PROXY_TYPE:
+ case MAP_TYPE:
case MUTABLE_HEAP_NUMBER_TYPE:
- case SIMD128_VALUE_TYPE:
- case FILLER_TYPE:
- case BYTE_ARRAY_TYPE:
- case FREE_SPACE_TYPE:
+ case ODDBALL_TYPE:
+ case PROPERTY_CELL_TYPE:
case SHARED_FUNCTION_INFO_TYPE:
+ case SIMD128_VALUE_TYPE:
+ case SYMBOL_TYPE:
+ case WEAK_CELL_TYPE:
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
case FIXED_##TYPE##_ARRAY_TYPE:
@@ -13130,7 +12620,7 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
- DCHECK(function->IsConstructor() || function->shared()->is_generator());
+ DCHECK(function->IsConstructor() || function->shared()->is_resumable());
if (function->has_initial_map()) return;
Isolate* isolate = function->GetIsolate();
@@ -13141,7 +12631,7 @@ void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
// First create a new map with the size and number of in-object properties
// suggested by the function.
InstanceType instance_type;
- if (function->shared()->is_generator()) {
+ if (function->shared()->is_resumable()) {
instance_type = JS_GENERATOR_OBJECT_TYPE;
} else {
instance_type = JS_OBJECT_TYPE;
@@ -13275,7 +12765,7 @@ MaybeHandle<Map> JSFunction::GetDerivedMap(Isolate* isolate,
void JSFunction::PrintName(FILE* out) {
- base::SmartArrayPointer<char> name = shared()->DebugName()->ToCString();
+ std::unique_ptr<char[]> name = shared()->DebugName()->ToCString();
PrintF(out, "%s", name.get());
}
@@ -13341,18 +12831,6 @@ Handle<String> JSBoundFunction::ToString(Handle<JSBoundFunction> function) {
return isolate->factory()->NewStringFromAsciiChecked(kNativeCodeSource);
}
-// static
-MaybeHandle<String> JSBoundFunction::GetName(Isolate* isolate,
- Handle<JSBoundFunction> function) {
- Handle<String> prefix = isolate->factory()->bound__string();
- if (!function->bound_target_function()->IsJSFunction()) return prefix;
- Handle<JSFunction> target(JSFunction::cast(function->bound_target_function()),
- isolate);
- Handle<Object> target_name = JSFunction::GetName(target);
- if (!target_name->IsString()) return prefix;
- Factory* factory = isolate->factory();
- return factory->NewConsString(prefix, Handle<String>::cast(target_name));
-}
// static
Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
@@ -13386,10 +12864,16 @@ Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
IncrementalStringBuilder builder(isolate);
if (!shared_info->is_arrow()) {
if (shared_info->is_concise_method()) {
- if (shared_info->is_generator()) builder.AppendCharacter('*');
+ if (shared_info->is_generator()) {
+ builder.AppendCharacter('*');
+ } else if (shared_info->is_async()) {
+ builder.AppendCString("async ");
+ }
} else {
if (shared_info->is_generator()) {
builder.AppendCString("function* ");
+ } else if (shared_info->is_async()) {
+ builder.AppendCString("async function ");
} else {
builder.AppendCString("function ");
}
@@ -13406,26 +12890,64 @@ Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
void Oddball::Initialize(Isolate* isolate, Handle<Oddball> oddball,
const char* to_string, Handle<Object> to_number,
- bool to_boolean, const char* type_of, byte kind) {
+ const char* type_of, byte kind) {
Handle<String> internalized_to_string =
isolate->factory()->InternalizeUtf8String(to_string);
Handle<String> internalized_type_of =
isolate->factory()->InternalizeUtf8String(type_of);
- oddball->set_to_boolean(isolate->heap()->ToBoolean(to_boolean));
+ oddball->set_to_number_raw(to_number->Number());
oddball->set_to_number(*to_number);
oddball->set_to_string(*internalized_to_string);
oddball->set_type_of(*internalized_type_of);
oddball->set_kind(kind);
}
+void Script::SetEvalOrigin(Handle<Script> script,
+ Handle<SharedFunctionInfo> outer_info,
+ int eval_position) {
+ if (eval_position == kNoSourcePosition) {
+ // If the position is missing, attempt to get the code offset from the
+ // current activation. Do not translate the code offset into source
+ // position, but store it as negative value for lazy translation.
+ StackTraceFrameIterator it(script->GetIsolate());
+ if (!it.done() && it.is_javascript()) {
+ FrameSummary summary = FrameSummary::GetFirst(it.javascript_frame());
+ script->set_eval_from_shared(summary.function()->shared());
+ script->set_eval_from_position(-summary.code_offset());
+ return;
+ }
+ eval_position = 0;
+ }
+ script->set_eval_from_shared(*outer_info);
+ script->set_eval_from_position(eval_position);
+}
-void Script::InitLineEnds(Handle<Script> script) {
- if (!script->line_ends()->IsUndefined()) return;
+int Script::GetEvalPosition() {
+ DisallowHeapAllocation no_gc;
+ DCHECK(compilation_type() == Script::COMPILATION_TYPE_EVAL);
+ int position = eval_from_position();
+ if (position < 0) {
+ // Due to laziness, the position may not have been translated from code
+ // offset yet, which would be encoded as negative integer. In that case,
+ // translate and set the position.
+ if (eval_from_shared()->IsUndefined(GetIsolate())) {
+ position = 0;
+ } else {
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(eval_from_shared());
+ position = shared->abstract_code()->SourcePosition(-position);
+ }
+ DCHECK(position >= 0);
+ set_eval_from_position(position);
+ }
+ return position;
+}
+void Script::InitLineEnds(Handle<Script> script) {
Isolate* isolate = script->GetIsolate();
+ if (!script->line_ends()->IsUndefined(isolate)) return;
if (!script->source()->IsString()) {
- DCHECK(script->source()->IsUndefined());
+ DCHECK(script->source()->IsUndefined(isolate));
Handle<FixedArray> empty = isolate->factory()->NewFixedArray(0);
script->set_line_ends(*empty);
DCHECK(script->line_ends()->IsFixedArray());
@@ -13444,42 +12966,94 @@ void Script::InitLineEnds(Handle<Script> script) {
DCHECK(script->line_ends()->IsFixedArray());
}
-
-int Script::GetColumnNumber(Handle<Script> script, int code_pos) {
- int line_number = GetLineNumber(script, code_pos);
- if (line_number == -1) return -1;
+#define SMI_VALUE(x) (Smi::cast(x)->value())
+bool Script::GetPositionInfo(int position, PositionInfo* info,
+ OffsetFlag offset_flag) {
+ Handle<Script> script(this);
+ InitLineEnds(script);
DisallowHeapAllocation no_allocation;
- FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
- line_number = line_number - script->line_offset();
- if (line_number == 0) return code_pos + script->column_offset();
- int prev_line_end_pos =
- Smi::cast(line_ends_array->get(line_number - 1))->value();
- return code_pos - (prev_line_end_pos + 1);
-}
+ DCHECK(script->line_ends()->IsFixedArray());
+ FixedArray* ends = FixedArray::cast(script->line_ends());
-int Script::GetLineNumberWithArray(int code_pos) {
- DisallowHeapAllocation no_allocation;
- DCHECK(line_ends()->IsFixedArray());
- FixedArray* line_ends_array = FixedArray::cast(line_ends());
- int line_ends_len = line_ends_array->length();
- if (line_ends_len == 0) return -1;
+ const int ends_len = ends->length();
+ if (ends_len == 0) return false;
- if ((Smi::cast(line_ends_array->get(0)))->value() >= code_pos) {
- return line_offset();
+ // Return early on invalid positions. Negative positions behave as if 0 was
+ // passed, and positions beyond the end of the script return as failure.
+ if (position < 0) {
+ position = 0;
+ } else if (position > SMI_VALUE(ends->get(ends_len - 1))) {
+ return false;
}
- int left = 0;
- int right = line_ends_len;
- while (int half = (right - left) / 2) {
- if ((Smi::cast(line_ends_array->get(left + half)))->value() > code_pos) {
- right -= half;
- } else {
- left += half;
+ // Determine line number by doing a binary search on the line ends array.
+ if (SMI_VALUE(ends->get(0)) >= position) {
+ info->line = 0;
+ info->line_start = 0;
+ info->column = position;
+ } else {
+ int left = 0;
+ int right = ends_len - 1;
+
+ while (right > 0) {
+ DCHECK_LE(left, right);
+ const int mid = (left + right) / 2;
+ if (position > SMI_VALUE(ends->get(mid))) {
+ left = mid + 1;
+ } else if (position <= SMI_VALUE(ends->get(mid - 1))) {
+ right = mid - 1;
+ } else {
+ info->line = mid;
+ break;
+ }
}
+ DCHECK(SMI_VALUE(ends->get(info->line)) >= position &&
+ SMI_VALUE(ends->get(info->line - 1)) < position);
+ info->line_start = SMI_VALUE(ends->get(info->line - 1)) + 1;
+ info->column = position - info->line_start;
}
- return right + line_offset();
+
+ // Line end is position of the linebreak character.
+ info->line_end = SMI_VALUE(ends->get(info->line));
+ if (info->line_end > 0) {
+ DCHECK(script->source()->IsString());
+ Handle<String> src(String::cast(script->source()));
+ if (src->length() >= info->line_end &&
+ src->Get(info->line_end - 1) == '\r') {
+ info->line_end--;
+ }
+ }
+
+ // Add offsets if requested.
+ if (offset_flag == WITH_OFFSET) {
+ if (info->line == 0) {
+ info->column += script->column_offset();
+ }
+ info->line += script->line_offset();
+ }
+
+ return true;
+}
+#undef SMI_VALUE
+
+int Script::GetColumnNumber(Handle<Script> script, int code_pos) {
+ PositionInfo info;
+ if (!script->GetPositionInfo(code_pos, &info, WITH_OFFSET)) {
+ return -1;
+ }
+
+ return info.column;
+}
+
+int Script::GetLineNumberWithArray(int code_pos) {
+ PositionInfo info;
+ if (!GetPositionInfo(code_pos, &info, WITH_OFFSET)) {
+ return -1;
+ }
+
+ return info.line;
}
@@ -13491,7 +13065,9 @@ int Script::GetLineNumber(Handle<Script> script, int code_pos) {
int Script::GetLineNumber(int code_pos) {
DisallowHeapAllocation no_allocation;
- if (!line_ends()->IsUndefined()) return GetLineNumberWithArray(code_pos);
+ if (!line_ends()->IsUndefined(GetIsolate())) {
+ return GetLineNumberWithArray(code_pos);
+ }
// Slow mode: we do not have line_ends. We have to iterate through source.
if (!source()->IsString()) return -1;
@@ -13509,28 +13085,19 @@ int Script::GetLineNumber(int code_pos) {
Handle<Object> Script::GetNameOrSourceURL(Handle<Script> script) {
Isolate* isolate = script->GetIsolate();
- Handle<String> name_or_source_url_key =
- isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("nameOrSourceURL"));
- Handle<JSObject> script_wrapper = Script::GetWrapper(script);
- Handle<Object> property =
- JSReceiver::GetProperty(script_wrapper, name_or_source_url_key)
- .ToHandleChecked();
- DCHECK(property->IsJSFunction());
- Handle<Object> result;
- // Do not check against pending exception, since this function may be called
- // when an exception has already been pending.
- if (!Execution::TryCall(isolate, property, script_wrapper, 0, NULL)
- .ToHandle(&result)) {
- return isolate->factory()->undefined_value();
+
+ // Keep in sync with ScriptNameOrSourceURL in messages.js.
+
+ if (!script->source_url()->IsUndefined(isolate)) {
+ return handle(script->source_url(), isolate);
}
- return result;
+ return handle(script->name(), isolate);
}
Handle<JSObject> Script::GetWrapper(Handle<Script> script) {
Isolate* isolate = script->GetIsolate();
- if (!script->wrapper()->IsUndefined()) {
+ if (!script->wrapper()->IsUndefined(isolate)) {
DCHECK(script->wrapper()->IsWeakCell());
Handle<WeakCell> cell(WeakCell::cast(script->wrapper()));
if (!cell->cleared()) {
@@ -13560,7 +13127,8 @@ MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
SharedFunctionInfo* shared;
while ((shared = iterator.Next<SharedFunctionInfo>())) {
if (fun->function_token_position() == shared->function_token_position() &&
- fun->start_position() == shared->start_position()) {
+ fun->start_position() == shared->start_position() &&
+ fun->end_position() == shared->end_position()) {
return Handle<SharedFunctionInfo>(shared);
}
}
@@ -13694,8 +13262,9 @@ bool SharedFunctionInfo::PassesFilter(const char* raw_filter) {
}
bool SharedFunctionInfo::HasSourceCode() const {
- return !script()->IsUndefined() &&
- !reinterpret_cast<Script*>(script())->source()->IsUndefined();
+ Isolate* isolate = GetIsolate();
+ return !script()->IsUndefined(isolate) &&
+ !reinterpret_cast<Script*>(script())->source()->IsUndefined(isolate);
}
@@ -13751,9 +13320,8 @@ void JSFunction::CalculateInstanceSizeForDerivedClass(
int* instance_size, int* in_object_properties) {
Isolate* isolate = GetIsolate();
int expected_nof_properties = 0;
- for (PrototypeIterator iter(isolate, this,
- PrototypeIterator::START_AT_RECEIVER);
- !iter.IsAtEnd(); iter.Advance()) {
+ for (PrototypeIterator iter(isolate, this, kStartAtReceiver); !iter.IsAtEnd();
+ iter.Advance()) {
JSReceiver* current = iter.GetCurrent<JSReceiver>();
if (!current->IsJSFunction()) break;
JSFunction* func = JSFunction::cast(current);
@@ -13858,9 +13426,37 @@ void SharedFunctionInfo::DisableOptimization(BailoutReason reason) {
}
}
+namespace {
+
+// Sets the expected number of properties based on estimate from parser.
+void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
+ FunctionLiteral* literal) {
+ int estimate = literal->expected_property_count();
+
+ // If no properties are added in the constructor, they are more likely
+ // to be added later.
+ if (estimate == 0) estimate = 2;
+
+ // TODO(yangguo): check whether those heuristics are still up-to-date.
+ // We do not shrink objects that go into a snapshot (yet), so we adjust
+ // the estimate conservatively.
+ if (shared->GetIsolate()->serializer_enabled()) {
+ estimate += 2;
+ } else {
+ // Inobject slack tracking will reclaim redundant inobject space later,
+ // so we can afford to adjust the estimate generously.
+ estimate += 8;
+ }
+
+ shared->set_expected_nof_properties(estimate);
+}
+
+} // namespace
void SharedFunctionInfo::InitFromFunctionLiteral(
Handle<SharedFunctionInfo> shared_info, FunctionLiteral* lit) {
+ // When adding fields here, make sure Scope::AnalyzePartially is updated
+ // accordingly.
shared_info->set_length(lit->scope()->default_function_length());
shared_info->set_internal_formal_parameter_count(lit->parameter_count());
shared_info->set_function_token_position(lit->function_token_position());
@@ -13876,20 +13472,16 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
shared_info->set_language_mode(lit->language_mode());
shared_info->set_uses_arguments(lit->scope()->arguments() != NULL);
shared_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
- shared_info->set_ast_node_count(lit->ast_node_count());
shared_info->set_is_function(lit->is_function());
- if (lit->dont_optimize_reason() != kNoReason) {
- shared_info->DisableOptimization(lit->dont_optimize_reason());
- }
- shared_info->set_dont_crankshaft(lit->flags() &
- AstProperties::kDontCrankshaft);
+ shared_info->set_never_compiled(true);
shared_info->set_kind(lit->kind());
if (!IsConstructable(lit->kind(), lit->language_mode())) {
- shared_info->set_construct_stub(
+ shared_info->SetConstructStub(
*shared_info->GetIsolate()->builtins()->ConstructedNonConstructable());
}
shared_info->set_needs_home_object(lit->scope()->NeedsHomeObject());
shared_info->set_asm_function(lit->scope()->asm_function());
+ SetExpectedNofPropertiesFromEstimate(shared_info, lit);
}
@@ -13903,6 +13495,10 @@ bool SharedFunctionInfo::VerifyBailoutId(BailoutId id) {
return true; // Return true if there was no DCHECK.
}
+void SharedFunctionInfo::SetConstructStub(Code* code) {
+ if (code->kind() == Code::BUILTIN) code->set_is_construct_stub(true);
+ set_construct_stub(code);
+}
void Map::StartInobjectSlackTracking() {
DCHECK(!IsInobjectSlackTrackingInProgress());
@@ -13919,14 +13515,18 @@ void Map::StartInobjectSlackTracking() {
void SharedFunctionInfo::ResetForNewContext(int new_ic_age) {
code()->ClearInlineCaches();
- // If we clear ICs, we need to clear the type feedback vector too, since
- // CallICs are synced with a feedback vector slot.
- ClearTypeFeedbackInfo();
set_ic_age(new_ic_age);
if (code()->kind() == Code::FUNCTION) {
code()->set_profiler_ticks(0);
- if (optimization_disabled() &&
- opt_count() >= FLAG_max_opt_count) {
+ if (optimization_disabled() && opt_count() >= FLAG_max_opt_count) {
+ // Re-enable optimizations if they were disabled due to opt_count limit.
+ set_optimization_disabled(false);
+ }
+ set_opt_count(0);
+ set_deopt_count(0);
+ } else if (code()->is_interpreter_trampoline_builtin()) {
+ set_profiler_ticks(0);
+ if (optimization_disabled() && opt_count() >= FLAG_max_opt_count) {
// Re-enable optimizations if they were disabled due to opt_count limit.
set_optimization_disabled(false);
}
@@ -13960,6 +13560,19 @@ int SharedFunctionInfo::SearchOptimizedCodeMapEntry(Context* native_context,
return -1;
}
+void SharedFunctionInfo::ClearCodeFromOptimizedCodeMap() {
+ if (!OptimizedCodeMapIsCleared()) {
+ FixedArray* optimized_code_map = this->optimized_code_map();
+ int length = optimized_code_map->length();
+ WeakCell* empty_weak_cell = GetHeap()->empty_weak_cell();
+ for (int i = kEntriesStart; i < length; i += kEntryLength) {
+ optimized_code_map->set(i + kCachedCodeOffset, empty_weak_cell,
+ SKIP_WRITE_BARRIER);
+ }
+ optimized_code_map->set(kSharedCodeIndex, empty_weak_cell,
+ SKIP_WRITE_BARRIER);
+ }
+}
CodeAndLiterals SharedFunctionInfo::SearchOptimizedCodeMap(
Context* native_context, BailoutId osr_ast_id) {
@@ -13986,12 +13599,6 @@ CodeAndLiterals SharedFunctionInfo::SearchOptimizedCodeMap(
: LiteralsArray::cast(literals_cell->value())};
}
}
- if (FLAG_trace_opt && !OptimizedCodeMapIsCleared() &&
- result.code == nullptr) {
- PrintF("[didn't find optimized code in optimized code map for ");
- ShortPrint();
- PrintF("]\n");
- }
return result;
}
@@ -14014,63 +13621,66 @@ const char* const VisitorSynchronization::kTagNames[
void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) {
DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- Object* old_target = target;
- VisitPointer(&target);
- CHECK_EQ(target, old_target); // VisitPointer doesn't change Code* *target.
+ Object* old_pointer = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ Object* new_pointer = old_pointer;
+ VisitPointer(&new_pointer);
+ DCHECK_EQ(old_pointer, new_pointer);
}
void ObjectVisitor::VisitCodeAgeSequence(RelocInfo* rinfo) {
DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
- Object* stub = rinfo->code_age_stub();
- if (stub) {
- VisitPointer(&stub);
+ Object* old_pointer = rinfo->code_age_stub();
+ Object* new_pointer = old_pointer;
+ if (old_pointer != nullptr) {
+ VisitPointer(&new_pointer);
+ DCHECK_EQ(old_pointer, new_pointer);
}
}
void ObjectVisitor::VisitCodeEntry(Address entry_address) {
- Object* code = Code::GetObjectFromEntryAddress(entry_address);
- Object* old_code = code;
- VisitPointer(&code);
- if (code != old_code) {
- Memory::Address_at(entry_address) = reinterpret_cast<Code*>(code)->entry();
- }
+ Object* old_pointer = Code::GetObjectFromEntryAddress(entry_address);
+ Object* new_pointer = old_pointer;
+ VisitPointer(&new_pointer);
+ DCHECK_EQ(old_pointer, new_pointer);
}
void ObjectVisitor::VisitCell(RelocInfo* rinfo) {
DCHECK(rinfo->rmode() == RelocInfo::CELL);
- Object* cell = rinfo->target_cell();
- Object* old_cell = cell;
- VisitPointer(&cell);
- if (cell != old_cell) {
- rinfo->set_target_cell(reinterpret_cast<Cell*>(cell));
- }
+ Object* old_pointer = rinfo->target_cell();
+ Object* new_pointer = old_pointer;
+ VisitPointer(&new_pointer);
+ DCHECK_EQ(old_pointer, new_pointer);
}
void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) {
DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
rinfo->IsPatchedDebugBreakSlotSequence());
- Object* target = Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
- Object* old_target = target;
- VisitPointer(&target);
- CHECK_EQ(target, old_target); // VisitPointer doesn't change Code* *target.
+ Object* old_pointer =
+ Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
+ Object* new_pointer = old_pointer;
+ VisitPointer(&new_pointer);
+ DCHECK_EQ(old_pointer, new_pointer);
}
void ObjectVisitor::VisitEmbeddedPointer(RelocInfo* rinfo) {
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- Object* p = rinfo->target_object();
- VisitPointer(&p);
+ Object* old_pointer = rinfo->target_object();
+ Object* new_pointer = old_pointer;
+ VisitPointer(&new_pointer);
+ DCHECK_EQ(old_pointer, new_pointer);
}
void ObjectVisitor::VisitExternalReference(RelocInfo* rinfo) {
- Address p = rinfo->target_external_reference();
- VisitExternalReference(&p);
+ Address old_reference = rinfo->target_external_reference();
+ Address new_reference = old_reference;
+ VisitExternalReference(&new_reference);
+ DCHECK_EQ(old_reference, new_reference);
}
@@ -14109,6 +13719,14 @@ void Code::CopyFrom(const CodeDesc& desc) {
CopyBytes(instruction_start(), desc.buffer,
static_cast<size_t>(desc.instr_size));
+ // copy unwinding info, if any
+ if (desc.unwinding_info) {
+ DCHECK_GT(desc.unwinding_info_size, 0);
+ set_unwinding_info_size(desc.unwinding_info_size);
+ CopyBytes(unwinding_info_start(), desc.unwinding_info,
+ static_cast<size_t>(desc.unwinding_info_size));
+ }
+
// copy reloc info
CopyBytes(relocation_start(),
desc.buffer + desc.buffer_size - desc.reloc_size,
@@ -14156,64 +13774,6 @@ void Code::CopyFrom(const CodeDesc& desc) {
Assembler::FlushICache(GetIsolate(), instruction_start(), instruction_size());
}
-// Locate the source position which is closest to the code offset. This is
-// using the source position information embedded in the relocation info.
-// The position returned is relative to the beginning of the script where the
-// source for this function is found.
-int Code::SourcePosition(int code_offset) {
- Address pc = instruction_start() + code_offset;
- int distance = kMaxInt;
- int position = RelocInfo::kNoPosition; // Initially no position found.
- // Run through all the relocation info to find the best matching source
- // position. All the code needs to be considered as the sequence of the
- // instructions in the code does not necessarily follow the same order as the
- // source.
- RelocIterator it(this, RelocInfo::kPositionMask);
- while (!it.done()) {
- // Only look at positions after the current pc.
- if (it.rinfo()->pc() < pc) {
- // Get position and distance.
-
- int dist = static_cast<int>(pc - it.rinfo()->pc());
- int pos = static_cast<int>(it.rinfo()->data());
- // If this position is closer than the current candidate or if it has the
- // same distance as the current candidate and the position is higher then
- // this position is the new candidate.
- if ((dist < distance) ||
- (dist == distance && pos > position)) {
- position = pos;
- distance = dist;
- }
- }
- it.next();
- }
- DCHECK(kind() == FUNCTION || (is_optimized_code() && is_turbofanned()) ||
- is_wasm_code() || position == RelocInfo::kNoPosition);
- return position;
-}
-
-
-// Same as Code::SourcePosition above except it only looks for statement
-// positions.
-int Code::SourceStatementPosition(int code_offset) {
- // First find the position as close as possible using all position
- // information.
- int position = SourcePosition(code_offset);
- // Now find the closest statement position before the position.
- int statement_position = 0;
- RelocIterator it(this, RelocInfo::kPositionMask);
- while (!it.done()) {
- if (RelocInfo::IsStatementPosition(it.rinfo()->rmode())) {
- int p = static_cast<int>(it.rinfo()->data());
- if (statement_position < p && p <= position) {
- statement_position = p;
- }
- }
- it.next();
- }
- return statement_position;
-}
-
SafepointEntry Code::GetSafepointEntry(Address pc) {
SafepointTable table(this);
@@ -14275,150 +13835,55 @@ void Code::FindAndReplace(const FindAndReplacePattern& pattern) {
}
-void Code::FindAllMaps(MapHandleList* maps) {
- DCHECK(is_inline_cache_stub());
- DisallowHeapAllocation no_allocation;
- int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(this, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- Object* object = info->target_object();
- if (object->IsWeakCell()) object = WeakCell::cast(object)->value();
- if (object->IsMap()) maps->Add(handle(Map::cast(object)));
- }
-}
-
-
-Code* Code::FindFirstHandler() {
- DCHECK(is_inline_cache_stub());
- DisallowHeapAllocation no_allocation;
- int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- bool skip_next_handler = false;
- for (RelocIterator it(this, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- if (info->rmode() == RelocInfo::EMBEDDED_OBJECT) {
- Object* obj = info->target_object();
- skip_next_handler |= obj->IsWeakCell() && WeakCell::cast(obj)->cleared();
- } else {
- Code* code = Code::GetCodeFromTargetAddress(info->target_address());
- if (code->kind() == Code::HANDLER) {
- if (!skip_next_handler) return code;
- skip_next_handler = false;
- }
- }
- }
- return NULL;
-}
-
-
-bool Code::FindHandlers(CodeHandleList* code_list, int length) {
- DCHECK(is_inline_cache_stub());
- DisallowHeapAllocation no_allocation;
- int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- bool skip_next_handler = false;
- int i = 0;
- for (RelocIterator it(this, mask); !it.done(); it.next()) {
- if (i == length) return true;
- RelocInfo* info = it.rinfo();
- if (info->rmode() == RelocInfo::EMBEDDED_OBJECT) {
- Object* obj = info->target_object();
- skip_next_handler |= obj->IsWeakCell() && WeakCell::cast(obj)->cleared();
- } else {
- Code* code = Code::GetCodeFromTargetAddress(info->target_address());
- // IC stubs with handlers never contain non-handler code objects before
- // handler targets.
- if (code->kind() != Code::HANDLER) break;
- if (!skip_next_handler) {
- code_list->Add(Handle<Code>(code));
- i++;
- }
- skip_next_handler = false;
- }
- }
- return i == length;
-}
-
-
-MaybeHandle<Code> Code::FindHandlerForMap(Map* map) {
- DCHECK(is_inline_cache_stub());
- int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- bool return_next = false;
- for (RelocIterator it(this, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- if (info->rmode() == RelocInfo::EMBEDDED_OBJECT) {
- Object* object = info->target_object();
- if (object->IsWeakCell()) object = WeakCell::cast(object)->value();
- if (object == map) return_next = true;
- } else if (return_next) {
- Code* code = Code::GetCodeFromTargetAddress(info->target_address());
- DCHECK(code->kind() == Code::HANDLER);
- return handle(code);
- }
- }
- return MaybeHandle<Code>();
-}
-
-
-Name* Code::FindFirstName() {
- DCHECK(is_inline_cache_stub());
- DisallowHeapAllocation no_allocation;
- int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(this, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- Object* object = info->target_object();
- if (object->IsName()) return Name::cast(object);
- }
- return NULL;
-}
-
-
void Code::ClearInlineCaches() {
- ClearInlineCaches(NULL);
-}
-
-
-void Code::ClearInlineCaches(Code::Kind kind) {
- ClearInlineCaches(&kind);
-}
-
-
-void Code::ClearInlineCaches(Code::Kind* kind) {
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID);
for (RelocIterator it(this, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
Code* target(Code::GetCodeFromTargetAddress(info->target_address()));
if (target->is_inline_cache_stub()) {
- if (kind == NULL || *kind == target->kind()) {
- IC::Clear(this->GetIsolate(), info->pc(),
- info->host()->constant_pool());
- }
+ IC::Clear(this->GetIsolate(), info->pc(), info->host()->constant_pool());
}
}
}
int AbstractCode::SourcePosition(int offset) {
- return IsBytecodeArray() ? GetBytecodeArray()->SourcePosition(offset)
- : GetCode()->SourcePosition(offset);
+ int position = 0;
+ // Subtract one because the current PC is one instruction after the call site.
+ if (IsCode()) offset--;
+ for (SourcePositionTableIterator iterator(source_position_table());
+ !iterator.done() && iterator.code_offset() <= offset;
+ iterator.Advance()) {
+ position = iterator.source_position();
+ }
+ return position;
}
int AbstractCode::SourceStatementPosition(int offset) {
- return IsBytecodeArray() ? GetBytecodeArray()->SourceStatementPosition(offset)
- : GetCode()->SourceStatementPosition(offset);
+ // First find the closest position.
+ int position = SourcePosition(offset);
+ // Now find the closest statement position before the position.
+ int statement_position = 0;
+ for (SourcePositionTableIterator it(source_position_table()); !it.done();
+ it.Advance()) {
+ if (it.is_statement()) {
+ int p = it.source_position();
+ if (statement_position < p && p <= position) {
+ statement_position = p;
+ }
+ }
+ }
+ return statement_position;
}
-void SharedFunctionInfo::ClearTypeFeedbackInfo() {
- feedback_vector()->ClearSlots(this);
+void JSFunction::ClearTypeFeedbackInfo() {
+ feedback_vector()->ClearSlots(shared());
}
-
-void SharedFunctionInfo::ClearTypeFeedbackInfoAtGCTime() {
- feedback_vector()->ClearSlotsAtGCTime(this);
+void JSFunction::ClearTypeFeedbackInfoAtGCTime() {
+ feedback_vector()->ClearSlotsAtGCTime(shared());
}
-
BailoutId Code::TranslatePcOffsetToAstId(uint32_t pc_offset) {
DisallowHeapAllocation no_gc;
DCHECK(kind() == FUNCTION);
@@ -14441,6 +13906,12 @@ uint32_t Code::TranslateAstIdToPcOffset(BailoutId ast_id) {
return 0;
}
+int Code::LookupRangeInHandlerTable(int code_offset, int* data,
+ HandlerTable::CatchPrediction* prediction) {
+ DCHECK(!is_optimized_code());
+ HandlerTable* table = HandlerTable::cast(handler_table());
+ return table->LookupRange(code_offset, data, prediction);
+}
void Code::MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate) {
PatchPlatformCodeAge(isolate, sequence, kNoAgeCodeAge, NO_MARKING_PARITY);
@@ -14614,14 +14085,14 @@ Code* Code::GetCodeAgeStub(Isolate* isolate, Age age, MarkingParity parity) {
void Code::PrintDeoptLocation(FILE* out, Address pc) {
Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(this, pc);
class SourcePosition pos = info.position;
- if (info.deopt_reason != Deoptimizer::kNoReason || !pos.IsUnknown()) {
+ if (info.deopt_reason != DeoptimizeReason::kNoReason || !pos.IsUnknown()) {
if (FLAG_hydrogen_track_positions) {
PrintF(out, " ;;; deoptimize at %d_%d: %s\n",
pos.inlining_id(), pos.position(),
- Deoptimizer::GetDeoptReason(info.deopt_reason));
+ DeoptimizeReasonToString(info.deopt_reason));
} else {
PrintF(out, " ;;; deoptimize at %d: %s\n", pos.raw(),
- Deoptimizer::GetDeoptReason(info.deopt_reason));
+ DeoptimizeReasonToString(info.deopt_reason));
}
}
}
@@ -14654,6 +14125,14 @@ const char* Code::Kind2String(Kind kind) {
return NULL;
}
+// Identify kind of code.
+const char* AbstractCode::Kind2String(Kind kind) {
+ if (kind < AbstractCode::INTERPRETED_FUNCTION)
+ return Code::Kind2String((Code::Kind)kind);
+ if (kind == AbstractCode::INTERPRETED_FUNCTION) return "INTERPRETED_FUNCTION";
+ UNREACHABLE();
+ return NULL;
+}
Handle<WeakCell> Code::WeakCellFor(Handle<Code> code) {
DCHECK(code->kind() == OPTIMIZED_FUNCTION);
@@ -14677,7 +14156,6 @@ WeakCell* Code::CachedWeakCell() {
return NULL;
}
-
#ifdef ENABLE_DISASSEMBLER
void DeoptimizationInputData::DeoptimizationInputDataPrint(
@@ -14811,9 +14289,20 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(
break;
}
+ case Translation::FLOAT_REGISTER: {
+ int reg_code = iterator.Next();
+ os << "{input="
+ << RegisterConfiguration::Crankshaft()->GetFloatRegisterName(
+ reg_code)
+ << "}";
+ break;
+ }
+
case Translation::DOUBLE_REGISTER: {
int reg_code = iterator.Next();
- os << "{input=" << DoubleRegister::from_code(reg_code).ToString()
+ os << "{input="
+ << RegisterConfiguration::Crankshaft()->GetDoubleRegisterName(
+ reg_code)
<< "}";
break;
}
@@ -14842,6 +14331,7 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(
break;
}
+ case Translation::FLOAT_STACK_SLOT:
case Translation::DOUBLE_STACK_SLOT: {
int input_slot_index = iterator.Next();
os << "{input=" << input_slot_index << "}";
@@ -14886,8 +14376,9 @@ void DeoptimizationOutputData::DeoptimizationOutputDataPrint(
int pc_and_state = this->PcAndState(i)->value();
os << std::setw(6) << this->AstId(i).ToInt() << " " << std::setw(8)
<< FullCodeGenerator::PcField::decode(pc_and_state) << " "
- << FullCodeGenerator::State2String(
- FullCodeGenerator::StateField::decode(pc_and_state)) << "\n";
+ << Deoptimizer::BailoutStateToString(
+ FullCodeGenerator::BailoutStateField::decode(pc_and_state))
+ << "\n";
}
}
@@ -14926,28 +14417,17 @@ const char* Code::ICState2String(InlineCacheState state) {
case UNINITIALIZED: return "UNINITIALIZED";
case PREMONOMORPHIC: return "PREMONOMORPHIC";
case MONOMORPHIC: return "MONOMORPHIC";
- case PROTOTYPE_FAILURE:
- return "PROTOTYPE_FAILURE";
+ case RECOMPUTE_HANDLER:
+ return "RECOMPUTE_HANDLER";
case POLYMORPHIC: return "POLYMORPHIC";
case MEGAMORPHIC: return "MEGAMORPHIC";
case GENERIC: return "GENERIC";
- case DEBUG_STUB: return "DEBUG_STUB";
}
UNREACHABLE();
return NULL;
}
-const char* Code::StubType2String(StubType type) {
- switch (type) {
- case NORMAL: return "NORMAL";
- case FAST: return "FAST";
- }
- UNREACHABLE(); // keep the compiler happy
- return NULL;
-}
-
-
void Code::PrintExtraICState(std::ostream& os, // NOLINT
Kind kind, ExtraICState extra) {
os << "extra_ic_state = ";
@@ -14967,11 +14447,11 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
os << "major_key = " << (n == NULL ? "null" : n) << "\n";
}
if (is_inline_cache_stub()) {
- os << "ic_state = " << ICState2String(ic_state()) << "\n";
- PrintExtraICState(os, kind(), extra_ic_state());
- if (ic_state() == MONOMORPHIC) {
- os << "type = " << StubType2String(type()) << "\n";
+ if (!IC::ICUseVector(kind())) {
+ InlineCacheState ic_state = IC::StateFromCode(this);
+ os << "ic_state = " << ICState2String(ic_state) << "\n";
}
+ PrintExtraICState(os, kind(), extra_ic_state());
if (is_compare_ic_stub()) {
DCHECK(CodeStub::GetMajorKey(this) == CodeStub::CompareIC);
CompareICStub stub(stub_key(), GetIsolate());
@@ -15038,6 +14518,17 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
}
os << "\n";
+ SourcePositionTableIterator it(source_position_table());
+ if (!it.done()) {
+ os << "Source positions:\n pc offset position\n";
+ for (; !it.done(); it.Advance()) {
+ os << std::setw(10) << it.code_offset() << std::setw(10)
+ << it.source_position() << (it.is_statement() ? " statement" : "")
+ << "\n";
+ }
+ os << "\n";
+ }
+
if (kind() == FUNCTION) {
DeoptimizationOutputData* data =
DeoptimizationOutputData::cast(this->deoptimization_data());
@@ -15090,7 +14581,7 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
os << "\n";
}
#ifdef OBJECT_PRINT
- if (!type_feedback_info()->IsUndefined()) {
+ if (!type_feedback_info()->IsUndefined(GetIsolate())) {
TypeFeedbackInfo::cast(type_feedback_info())->TypeFeedbackInfoPrint(os);
os << "\n";
}
@@ -15112,52 +14603,29 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
it.rinfo()->Print(GetIsolate(), os);
}
os << "\n";
-}
-#endif // ENABLE_DISASSEMBLER
-int BytecodeArray::SourcePosition(int offset) {
- int last_position = 0;
- for (interpreter::SourcePositionTableIterator iterator(
- source_position_table());
- !iterator.done() && iterator.bytecode_offset() <= offset;
- iterator.Advance()) {
- last_position = iterator.source_position();
+ if (has_unwinding_info()) {
+ os << "UnwindingInfo (size = " << unwinding_info_size() << ")\n";
+ EhFrameDisassembler eh_frame_disassembler(unwinding_info_start(),
+ unwinding_info_end());
+ eh_frame_disassembler.DisassembleToStream(os);
+ os << "\n";
}
- return last_position;
}
+#endif // ENABLE_DISASSEMBLER
-int BytecodeArray::SourceStatementPosition(int offset) {
- // First find the position as close as possible using all position
- // information.
- int position = SourcePosition(offset);
- // Now find the closest statement position before the position.
- int statement_position = 0;
- interpreter::SourcePositionTableIterator iterator(source_position_table());
- while (!iterator.done()) {
- if (iterator.is_statement()) {
- int p = iterator.source_position();
- if (statement_position < p && p <= position) {
- statement_position = p;
- }
- }
- iterator.Advance();
- }
- return statement_position;
-}
void BytecodeArray::Disassemble(std::ostream& os) {
os << "Parameter count " << parameter_count() << "\n";
os << "Frame size " << frame_size() << "\n";
- Vector<char> buf = Vector<char>::New(50);
const uint8_t* base_address = GetFirstBytecodeAddress();
- interpreter::SourcePositionTableIterator source_positions(
- source_position_table());
+ SourcePositionTableIterator source_positions(source_position_table());
interpreter::BytecodeArrayIterator iterator(handle(this));
while (!iterator.done()) {
if (!source_positions.done() &&
- iterator.current_offset() == source_positions.bytecode_offset()) {
+ iterator.current_offset() == source_positions.code_offset()) {
os << std::setw(5) << source_positions.source_position();
os << (source_positions.is_statement() ? " S> " : " E> ");
source_positions.Advance();
@@ -15165,12 +14633,14 @@ void BytecodeArray::Disassemble(std::ostream& os) {
os << " ";
}
const uint8_t* current_address = base_address + iterator.current_offset();
- SNPrintF(buf, "%p", current_address);
- os << buf.start() << " : ";
- interpreter::Bytecodes::Decode(os, current_address, parameter_count());
+ os << reinterpret_cast<const void*>(current_address) << " @ "
+ << std::setw(4) << iterator.current_offset() << " : ";
+ interpreter::BytecodeDecoder::Decode(os, current_address,
+ parameter_count());
if (interpreter::Bytecodes::IsJump(iterator.current_bytecode())) {
- SNPrintF(buf, " (%p)", base_address + iterator.GetJumpTargetOffset());
- os << buf.start();
+ const void* jump_target = base_address + iterator.GetJumpTargetOffset();
+ os << " (" << jump_target << " @ " << iterator.GetJumpTargetOffset()
+ << ")";
}
os << std::endl;
iterator.Advance();
@@ -15196,6 +14666,13 @@ void BytecodeArray::CopyBytecodesTo(BytecodeArray* to) {
from->length());
}
+int BytecodeArray::LookupRangeInHandlerTable(
+ int code_offset, int* data, HandlerTable::CatchPrediction* prediction) {
+ HandlerTable* table = HandlerTable::cast(handler_table());
+ code_offset++; // Point after current bytecode.
+ return table->LookupRange(code_offset, data, prediction);
+}
+
// static
void JSArray::Initialize(Handle<JSArray> array, int capacity, int length) {
DCHECK(capacity >= 0);
@@ -15203,29 +14680,6 @@ void JSArray::Initialize(Handle<JSArray> array, int capacity, int length) {
array, length, capacity, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
}
-
-// Returns false if the passed-in index is marked non-configurable, which will
-// cause the truncation operation to halt, and thus no further old values need
-// be collected.
-static bool GetOldValue(Isolate* isolate,
- Handle<JSObject> object,
- uint32_t index,
- List<Handle<Object> >* old_values,
- List<uint32_t>* indices) {
- LookupIterator it(isolate, object, index, object, LookupIterator::HIDDEN);
- CHECK(JSReceiver::GetPropertyAttributes(&it).IsJust());
- DCHECK(it.IsFound());
- if (!it.IsConfigurable()) return false;
- Handle<Object> value =
- it.state() == LookupIterator::ACCESSOR
- ? Handle<Object>::cast(isolate->factory()->the_hole_value())
- : JSReceiver::GetDataProperty(&it);
- old_values->Add(value);
- indices->Add(index);
- return true;
-}
-
-
void JSArray::SetLength(Handle<JSArray> array, uint32_t new_length) {
// We should never end in here with a pixel or external array.
DCHECK(array->AllowsSetLength());
@@ -15236,91 +14690,6 @@ void JSArray::SetLength(Handle<JSArray> array, uint32_t new_length) {
}
-MaybeHandle<Object> JSArray::ObservableSetLength(Handle<JSArray> array,
- uint32_t new_length) {
- if (!array->map()->is_observed()) {
- SetLength(array, new_length);
- return array;
- }
-
- Isolate* isolate = array->GetIsolate();
- List<uint32_t> indices;
- List<Handle<Object> > old_values;
- Handle<Object> old_length_handle(array->length(), isolate);
- uint32_t old_length = 0;
- CHECK(old_length_handle->ToArrayLength(&old_length));
-
- int num_elements = array->NumberOfOwnElements(ALL_PROPERTIES);
- if (num_elements > 0) {
- if (old_length == static_cast<uint32_t>(num_elements)) {
- // Simple case for arrays without holes.
- for (uint32_t i = old_length - 1; i + 1 > new_length; --i) {
- if (!GetOldValue(isolate, array, i, &old_values, &indices)) break;
- }
- } else {
- // For sparse arrays, only iterate over existing elements.
- // TODO(rafaelw): For fast, sparse arrays, we can avoid iterating over
- // the to-be-removed indices twice.
- Handle<FixedArray> keys = isolate->factory()->NewFixedArray(num_elements);
- array->GetOwnElementKeys(*keys, ALL_PROPERTIES);
- while (num_elements-- > 0) {
- uint32_t index = NumberToUint32(keys->get(num_elements));
- if (index < new_length) break;
- if (!GetOldValue(isolate, array, index, &old_values, &indices)) break;
- }
- }
- }
-
- SetLength(array, new_length);
-
- CHECK(array->length()->ToArrayLength(&new_length));
- if (old_length == new_length) return array;
-
- RETURN_ON_EXCEPTION(isolate, BeginPerformSplice(array), Object);
-
- for (int i = 0; i < indices.length(); ++i) {
- // For deletions where the property was an accessor, old_values[i]
- // will be the hole, which instructs EnqueueChangeRecord to elide
- // the "oldValue" property.
- RETURN_ON_EXCEPTION(
- isolate,
- JSObject::EnqueueChangeRecord(
- array, "delete", isolate->factory()->Uint32ToString(indices[i]),
- old_values[i]),
- Object);
- }
-
- RETURN_ON_EXCEPTION(isolate,
- JSObject::EnqueueChangeRecord(
- array, "update", isolate->factory()->length_string(),
- old_length_handle),
- Object);
-
- RETURN_ON_EXCEPTION(isolate, EndPerformSplice(array), Object);
-
- uint32_t index = Min(old_length, new_length);
- uint32_t add_count = new_length > old_length ? new_length - old_length : 0;
- uint32_t delete_count = new_length < old_length ? old_length - new_length : 0;
- Handle<JSArray> deleted = isolate->factory()->NewJSArray(0);
- if (delete_count > 0) {
- for (int i = indices.length() - 1; i >= 0; i--) {
- // Skip deletions where the property was an accessor, leaving holes
- // in the array of old values.
- if (old_values[i]->IsTheHole()) continue;
- JSObject::AddDataElement(deleted, indices[i] - index, old_values[i], NONE)
- .Assert();
- }
-
- JSArray::SetLength(deleted, delete_count);
- }
-
- RETURN_ON_EXCEPTION(
- isolate, EnqueueSpliceRecord(array, index, deleted, add_count), Object);
-
- return array;
-}
-
-
// static
void Map::AddDependentCode(Handle<Map> map,
DependentCode::DependencyGroup group,
@@ -15648,10 +15017,10 @@ Maybe<bool> JSProxy::SetPrototype(Handle<JSProxy> proxy, Handle<Object> value,
bool from_javascript,
ShouldThrow should_throw) {
Isolate* isolate = proxy->GetIsolate();
- STACK_CHECK(Nothing<bool>());
+ STACK_CHECK(isolate, Nothing<bool>());
Handle<Name> trap_name = isolate->factory()->setPrototypeOf_string();
// 1. Assert: Either Type(V) is Object or Type(V) is Null.
- DCHECK(value->IsJSReceiver() || value->IsNull());
+ DCHECK(value->IsJSReceiver() || value->IsNull(isolate));
// 2. Let handler be the value of the [[ProxyHandler]] internal slot of O.
Handle<Object> handler(proxy->handler(), isolate);
// 3. If handler is null, throw a TypeError exception.
@@ -15670,7 +15039,7 @@ Maybe<bool> JSProxy::SetPrototype(Handle<JSProxy> proxy, Handle<Object> value,
Object::GetMethod(Handle<JSReceiver>::cast(handler), trap_name),
Nothing<bool>());
// 7. If trap is undefined, then return target.[[SetPrototypeOf]]().
- if (trap->IsUndefined()) {
+ if (trap->IsUndefined(isolate)) {
return JSReceiver::SetPrototype(target, value, from_javascript,
should_throw);
}
@@ -15719,46 +15088,10 @@ Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
ShouldThrow should_throw) {
Isolate* isolate = object->GetIsolate();
- const bool observed = from_javascript && object->map()->is_observed();
- Handle<Object> old_value;
- if (observed) {
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, old_value,
- JSReceiver::GetPrototype(isolate, object),
- Nothing<bool>());
- }
-
- Maybe<bool> result =
- SetPrototypeUnobserved(object, value, from_javascript, should_throw);
- MAYBE_RETURN(result, Nothing<bool>());
-
- if (result.FromJust() && observed) {
- Handle<Object> new_value;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, new_value,
- JSReceiver::GetPrototype(isolate, object),
- Nothing<bool>());
- if (!new_value->SameValue(*old_value)) {
- RETURN_ON_EXCEPTION_VALUE(
- isolate, JSObject::EnqueueChangeRecord(
- object, "setPrototype",
- isolate->factory()->proto_string(), old_value),
- Nothing<bool>());
- }
- }
-
- return result;
-}
-
-
-Maybe<bool> JSObject::SetPrototypeUnobserved(Handle<JSObject> object,
- Handle<Object> value,
- bool from_javascript,
- ShouldThrow should_throw) {
#ifdef DEBUG
int size = object->Size();
#endif
- Isolate* isolate = object->GetIsolate();
-
if (from_javascript) {
if (object->IsAccessCheckNeeded() &&
!isolate->MayAccess(handle(isolate->context()), object)) {
@@ -15774,7 +15107,7 @@ Maybe<bool> JSObject::SetPrototypeUnobserved(Handle<JSObject> object,
Heap* heap = isolate->heap();
// Silently ignore the change if value is not a JSObject or null.
// SpiderMonkey behaves this way.
- if (!value->IsJSReceiver() && !value->IsNull()) return Just(true);
+ if (!value->IsJSReceiver() && !value->IsNull(isolate)) return Just(true);
bool dictionary_elements_in_chain =
object->map()->DictionaryElementsInPrototypeChainOnly();
@@ -15784,8 +15117,7 @@ Maybe<bool> JSObject::SetPrototypeUnobserved(Handle<JSObject> object,
if (from_javascript) {
// Find the first object in the chain whose prototype object is not
// hidden.
- PrototypeIterator iter(isolate, real_receiver,
- PrototypeIterator::START_AT_PROTOTYPE,
+ PrototypeIterator iter(isolate, real_receiver, kStartAtPrototype,
PrototypeIterator::END_AT_NON_HIDDEN);
while (!iter.IsAtEnd()) {
// Casting to JSObject is fine because hidden prototypes are never
@@ -15800,6 +15132,13 @@ Maybe<bool> JSObject::SetPrototypeUnobserved(Handle<JSObject> object,
// Nothing to do if prototype is already set.
if (map->prototype() == *value) return Just(true);
+ bool immutable_proto = object->map()->is_immutable_proto();
+ if (immutable_proto) {
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kImmutablePrototypeSet, object));
+ }
+
// From 8.6.2 Object Internal Methods
// ...
// In addition, if [[Extensible]] is false the value of the [[Class]] and
@@ -15818,7 +15157,7 @@ Maybe<bool> JSObject::SetPrototypeUnobserved(Handle<JSObject> object,
// new prototype chain.
if (value->IsJSReceiver()) {
for (PrototypeIterator iter(isolate, JSReceiver::cast(*value),
- PrototypeIterator::START_AT_RECEIVER);
+ kStartAtReceiver);
!iter.IsAtEnd(); iter.Advance()) {
if (iter.GetCurrent<JSReceiver>() == *object) {
// Cycle detected.
@@ -15851,6 +15190,17 @@ Maybe<bool> JSObject::SetPrototypeUnobserved(Handle<JSObject> object,
return Just(true);
}
+// static
+void JSObject::SetImmutableProto(Handle<JSObject> object) {
+ DCHECK(!object->IsAccessCheckNeeded()); // Never called from JS
+ Handle<Map> map(object->map());
+
+ // Nothing to do if prototype is already set.
+ if (map->is_immutable_proto()) return;
+
+ Handle<Map> new_map = Map::TransitionToImmutableProto(map);
+ object->set_map(*new_map);
+}
void JSObject::EnsureCanContainElements(Handle<JSObject> object,
Arguments* args,
@@ -15994,12 +15344,8 @@ Maybe<bool> JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
uint32_t old_length = 0;
uint32_t new_capacity = 0;
- Handle<Object> old_length_handle;
if (object->IsJSArray()) {
CHECK(JSArray::cast(*object)->length()->ToArrayLength(&old_length));
- if (object->map()->is_observed()) {
- old_length_handle = handle(JSArray::cast(*object)->length(), isolate);
- }
}
ElementsKind kind = object->GetElementsKind();
@@ -16043,38 +15389,6 @@ Maybe<bool> JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
JSArray::cast(*object)->set_length(*new_length_handle);
}
- if (!old_length_handle.is_null() && new_length != old_length) {
- // |old_length_handle| is kept null above unless the object is observed.
- DCHECK(object->map()->is_observed());
- Handle<JSArray> array = Handle<JSArray>::cast(object);
- Handle<String> name = isolate->factory()->Uint32ToString(index);
-
- RETURN_ON_EXCEPTION_VALUE(isolate, BeginPerformSplice(array),
- Nothing<bool>());
- RETURN_ON_EXCEPTION_VALUE(
- isolate, EnqueueChangeRecord(array, "add", name,
- isolate->factory()->the_hole_value()),
- Nothing<bool>());
- RETURN_ON_EXCEPTION_VALUE(
- isolate, EnqueueChangeRecord(array, "update",
- isolate->factory()->length_string(),
- old_length_handle),
- Nothing<bool>());
- RETURN_ON_EXCEPTION_VALUE(isolate, EndPerformSplice(array),
- Nothing<bool>());
- Handle<JSArray> deleted = isolate->factory()->NewJSArray(0);
- RETURN_ON_EXCEPTION_VALUE(isolate,
- EnqueueSpliceRecord(array, old_length, deleted,
- new_length - old_length),
- Nothing<bool>());
- } else if (object->map()->is_observed()) {
- Handle<String> name = isolate->factory()->Uint32ToString(index);
- RETURN_ON_EXCEPTION_VALUE(
- isolate, EnqueueChangeRecord(object, "add", name,
- isolate->factory()->the_hole_value()),
- Nothing<bool>());
- }
-
return Just(true);
}
@@ -16342,10 +15656,11 @@ int JSObject::GetFastElementsUsage() {
#ifdef OBJECT_PRINT
template <typename Derived, typename Shape, typename Key>
void Dictionary<Derived, Shape, Key>::Print(std::ostream& os) { // NOLINT
+ Isolate* isolate = this->GetIsolate();
int capacity = this->Capacity();
for (int i = 0; i < capacity; i++) {
Object* k = this->KeyAt(i);
- if (this->IsKey(k)) {
+ if (this->IsKey(isolate, k)) {
os << "\n ";
if (k->IsString()) {
String::cast(k)->StringPrint(os);
@@ -16356,18 +15671,24 @@ void Dictionary<Derived, Shape, Key>::Print(std::ostream& os) { // NOLINT
}
}
}
+template <typename Derived, typename Shape, typename Key>
+void Dictionary<Derived, Shape, Key>::Print() {
+ OFStream os(stdout);
+ Print(os);
+}
#endif
template<typename Derived, typename Shape, typename Key>
void Dictionary<Derived, Shape, Key>::CopyValuesTo(FixedArray* elements) {
+ Isolate* isolate = this->GetIsolate();
int pos = 0;
int capacity = this->Capacity();
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
for (int i = 0; i < capacity; i++) {
Object* k = this->KeyAt(i);
- if (this->IsKey(k)) {
+ if (this->IsKey(isolate, k)) {
elements->set(pos++, this->ValueAt(i), mode);
}
}
@@ -16377,55 +15698,10 @@ void Dictionary<Derived, Shape, Key>::CopyValuesTo(FixedArray* elements) {
MaybeHandle<Object> JSObject::GetPropertyWithInterceptor(LookupIterator* it,
bool* done) {
- *done = false;
- Isolate* isolate = it->isolate();
- // Make sure that the top context does not change when doing callbacks or
- // interceptor calls.
- AssertNoContextChange ncc(isolate);
-
DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
- Handle<InterceptorInfo> interceptor = it->GetInterceptor();
- if (interceptor->getter()->IsUndefined()) {
- return isolate->factory()->undefined_value();
- }
-
- Handle<JSObject> holder = it->GetHolder<JSObject>();
- Handle<Object> result;
- Handle<Object> receiver = it->GetReceiver();
- if (!receiver->IsJSReceiver()) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, receiver, Object::ConvertReceiver(isolate, receiver), Object);
- }
- PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
- *holder, Object::DONT_THROW);
-
- if (it->IsElement()) {
- uint32_t index = it->index();
- v8::IndexedPropertyGetterCallback getter =
- v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
- result = args.Call(getter, index);
- } else {
- Handle<Name> name = it->name();
- DCHECK(!name->IsPrivate());
-
- if (name->IsSymbol() && !interceptor->can_intercept_symbols()) {
- return isolate->factory()->undefined_value();
- }
-
- v8::GenericNamedPropertyGetterCallback getter =
- v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
- interceptor->getter());
- result = args.Call(getter, name);
- }
-
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- if (result.is_null()) return isolate->factory()->undefined_value();
- *done = true;
- // Rebox handle before return
- return handle(*result, isolate);
+ return GetPropertyWithInterceptorInternal(it, it->GetInterceptor(), done);
}
-
Maybe<bool> JSObject::HasRealNamedProperty(Handle<JSObject> object,
Handle<Name> name) {
LookupIterator it = LookupIterator::PropertyOrElement(
@@ -16452,6 +15728,10 @@ Maybe<bool> JSObject::HasRealNamedCallbackProperty(Handle<JSObject> object,
: Nothing<bool>();
}
+int FixedArrayBase::GetMaxLengthForNewSpaceAllocation(ElementsKind kind) {
+ return ((Page::kMaxRegularHeapObjectSize - FixedArrayBase::kHeaderSize) >>
+ ElementsKindToShiftSize(kind));
+}
void FixedArray::SwapPairs(FixedArray* numbers, int i, int j) {
Object* temp = get(i);
@@ -16565,196 +15845,46 @@ void FixedArray::SortPairs(FixedArray* numbers, uint32_t len) {
}
}
-void JSObject::CollectOwnPropertyNames(KeyAccumulator* keys,
- PropertyFilter filter) {
- if (HasFastProperties()) {
- int real_size = map()->NumberOfOwnDescriptors();
- Handle<DescriptorArray> descs(map()->instance_descriptors());
- for (int i = 0; i < real_size; i++) {
- PropertyDetails details = descs->GetDetails(i);
- if ((details.attributes() & filter) != 0) continue;
- if (filter & ONLY_ALL_CAN_READ) {
- if (details.kind() != kAccessor) continue;
- Object* accessors = descs->GetValue(i);
- if (!accessors->IsAccessorInfo()) continue;
- if (!AccessorInfo::cast(accessors)->all_can_read()) continue;
- }
- Name* key = descs->GetKey(i);
- if (key->FilterKey(filter)) continue;
- keys->AddKey(key, DO_NOT_CONVERT);
- }
- } else if (IsJSGlobalObject()) {
- GlobalDictionary::CollectKeysTo(handle(global_dictionary()), keys, filter);
- } else {
- NameDictionary::CollectKeysTo(handle(property_dictionary()), keys, filter);
- }
-}
-
-
-int JSObject::NumberOfOwnElements(PropertyFilter filter) {
- // Fast case for objects with no elements.
- if (!IsJSValue() && HasFastElements()) {
- uint32_t length =
- IsJSArray()
- ? static_cast<uint32_t>(
- Smi::cast(JSArray::cast(this)->length())->value())
- : static_cast<uint32_t>(FixedArrayBase::cast(elements())->length());
- if (length == 0) return 0;
- }
- // Compute the number of enumerable elements.
- return GetOwnElementKeys(NULL, filter);
-}
-
-void JSObject::CollectOwnElementKeys(Handle<JSObject> object,
- KeyAccumulator* keys,
- PropertyFilter filter) {
- if (filter & SKIP_STRINGS) return;
- ElementsAccessor* accessor = object->GetElementsAccessor();
- accessor->CollectElementIndices(object, keys, kMaxUInt32, filter, 0);
-}
-
-
-int JSObject::GetOwnElementKeys(FixedArray* storage, PropertyFilter filter) {
- int counter = 0;
-
- // If this is a String wrapper, add the string indices first,
- // as they're guaranteed to precede the elements in numerical order
- // and ascending order is required by ECMA-262, 6th, 9.1.12.
- if (IsJSValue()) {
- Object* val = JSValue::cast(this)->value();
- if (val->IsString()) {
- String* str = String::cast(val);
- if (storage) {
- for (int i = 0; i < str->length(); i++) {
- storage->set(counter + i, Smi::FromInt(i));
- }
- }
- counter += str->length();
- }
- }
-
- switch (GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_STRING_WRAPPER_ELEMENTS: {
- int length = IsJSArray() ?
- Smi::cast(JSArray::cast(this)->length())->value() :
- FixedArray::cast(elements())->length();
- for (int i = 0; i < length; i++) {
- if (!FixedArray::cast(elements())->get(i)->IsTheHole()) {
- if (storage != NULL) {
- storage->set(counter, Smi::FromInt(i));
- }
- counter++;
- }
- }
- DCHECK(!storage || storage->length() >= counter);
- break;
- }
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS: {
- int length = IsJSArray() ?
- Smi::cast(JSArray::cast(this)->length())->value() :
- FixedArrayBase::cast(elements())->length();
- for (int i = 0; i < length; i++) {
- if (!FixedDoubleArray::cast(elements())->is_the_hole(i)) {
- if (storage != NULL) {
- storage->set(counter, Smi::FromInt(i));
- }
- counter++;
- }
- }
- DCHECK(!storage || storage->length() >= counter);
- break;
- }
-
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS: \
-
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- {
- int length = FixedArrayBase::cast(elements())->length();
- while (counter < length) {
- if (storage != NULL) {
- storage->set(counter, Smi::FromInt(counter));
- }
- counter++;
- }
- DCHECK(!storage || storage->length() >= counter);
- break;
- }
-
- case DICTIONARY_ELEMENTS:
- case SLOW_STRING_WRAPPER_ELEMENTS: {
- if (storage != NULL) {
- element_dictionary()->CopyKeysTo(storage, counter, filter,
- SeededNumberDictionary::SORTED);
- }
- counter += element_dictionary()->NumberOfElementsFilterAttributes(filter);
- break;
- }
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
- FixedArray* parameter_map = FixedArray::cast(elements());
- int mapped_length = parameter_map->length() - 2;
- FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
- if (arguments->IsDictionary()) {
- // Copy the keys from arguments first, because Dictionary::CopyKeysTo
- // will insert in storage starting at index 0.
- SeededNumberDictionary* dictionary =
- SeededNumberDictionary::cast(arguments);
- if (storage != NULL) {
- dictionary->CopyKeysTo(storage, counter, filter,
- SeededNumberDictionary::UNSORTED);
- }
- counter += dictionary->NumberOfElementsFilterAttributes(filter);
- for (int i = 0; i < mapped_length; ++i) {
- if (!parameter_map->get(i + 2)->IsTheHole()) {
- if (storage != NULL) storage->set(counter, Smi::FromInt(i));
- ++counter;
- }
- }
- if (storage != NULL) storage->SortPairs(storage, counter);
-
- } else {
- int backing_length = arguments->length();
- int i = 0;
- for (; i < mapped_length; ++i) {
- if (!parameter_map->get(i + 2)->IsTheHole()) {
- if (storage != NULL) storage->set(counter, Smi::FromInt(i));
- ++counter;
- } else if (i < backing_length && !arguments->get(i)->IsTheHole()) {
- if (storage != NULL) storage->set(counter, Smi::FromInt(i));
- ++counter;
- }
- }
- for (; i < backing_length; ++i) {
- if (storage != NULL) storage->set(counter, Smi::FromInt(i));
- ++counter;
- }
- }
- break;
+bool JSObject::WasConstructedFromApiFunction() {
+ auto instance_type = map()->instance_type();
+ bool is_api_object = instance_type == JS_API_OBJECT_TYPE ||
+ instance_type == JS_SPECIAL_API_OBJECT_TYPE;
+#ifdef ENABLE_SLOW_DCHECKS
+ if (FLAG_enable_slow_asserts) {
+ Object* maybe_constructor = map()->GetConstructor();
+ if (!maybe_constructor->IsJSFunction()) return false;
+ JSFunction* constructor = JSFunction::cast(maybe_constructor);
+ if (constructor->shared()->IsApiFunction()) {
+ DCHECK(is_api_object);
+ } else {
+ DCHECK(!is_api_object);
}
- case NO_ELEMENTS:
- break;
}
-
- DCHECK(!storage || storage->length() == counter);
- return counter;
+#endif
+ return is_api_object;
}
-
MaybeHandle<String> Object::ObjectProtoToString(Isolate* isolate,
Handle<Object> object) {
- if (object->IsUndefined()) return isolate->factory()->undefined_to_string();
- if (object->IsNull()) return isolate->factory()->null_to_string();
+ if (*object == isolate->heap()->undefined_value()) {
+ return isolate->factory()->undefined_to_string();
+ }
+ if (*object == isolate->heap()->null_value()) {
+ return isolate->factory()->null_to_string();
+ }
Handle<JSReceiver> receiver =
Object::ToObject(isolate, object).ToHandleChecked();
+ // For proxies, we must check IsArray() before get(toStringTag) to comply
+ // with the specification
+ Maybe<bool> is_array = Nothing<bool>();
+ InstanceType instance_type = receiver->map()->instance_type();
+ if (instance_type == JS_PROXY_TYPE) {
+ is_array = Object::IsArray(receiver);
+ MAYBE_RETURN(is_array, MaybeHandle<String>());
+ }
+
Handle<String> tag;
Handle<Object> to_string_tag;
ASSIGN_RETURN_ON_EXCEPTION(
@@ -16764,11 +15894,55 @@ MaybeHandle<String> Object::ObjectProtoToString(Isolate* isolate,
String);
if (to_string_tag->IsString()) {
tag = Handle<String>::cast(to_string_tag);
- }
-
- if (tag.is_null()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, tag,
- JSReceiver::BuiltinStringTag(receiver), String);
+ } else {
+ switch (instance_type) {
+ case JS_API_OBJECT_TYPE:
+ case JS_SPECIAL_API_OBJECT_TYPE:
+ tag = handle(receiver->class_name(), isolate);
+ break;
+ case JS_ARGUMENTS_TYPE:
+ return isolate->factory()->arguments_to_string();
+ case JS_ARRAY_TYPE:
+ return isolate->factory()->array_to_string();
+ case JS_BOUND_FUNCTION_TYPE:
+ case JS_FUNCTION_TYPE:
+ return isolate->factory()->function_to_string();
+ case JS_ERROR_TYPE:
+ return isolate->factory()->error_to_string();
+ case JS_DATE_TYPE:
+ return isolate->factory()->date_to_string();
+ case JS_REGEXP_TYPE:
+ return isolate->factory()->regexp_to_string();
+ case JS_PROXY_TYPE: {
+ if (is_array.FromJust()) {
+ return isolate->factory()->array_to_string();
+ }
+ if (receiver->IsCallable()) {
+ return isolate->factory()->function_to_string();
+ }
+ return isolate->factory()->object_to_string();
+ }
+ case JS_VALUE_TYPE: {
+ Object* value = JSValue::cast(*receiver)->value();
+ if (value->IsString()) {
+ return isolate->factory()->string_to_string();
+ }
+ if (value->IsNumber()) {
+ return isolate->factory()->number_to_string();
+ }
+ if (value->IsBoolean()) {
+ return isolate->factory()->boolean_to_string();
+ }
+ if (value->IsSymbol()) {
+ return isolate->factory()->object_to_string();
+ }
+ UNREACHABLE();
+ tag = handle(receiver->class_name(), isolate);
+ break;
+ }
+ default:
+ return isolate->factory()->object_to_string();
+ }
}
IncrementalStringBuilder builder(isolate);
@@ -16778,7 +15952,6 @@ MaybeHandle<String> Object::ObjectProtoToString(Isolate* isolate,
return builder.Finish();
}
-
const char* Symbol::PrivateSymbolToName() const {
Heap* heap = GetIsolate()->heap();
#define SYMBOL_CHECK_AND_PRINT(name) \
@@ -16790,12 +15963,12 @@ const char* Symbol::PrivateSymbolToName() const {
void Symbol::SymbolShortPrint(std::ostream& os) {
- os << "<Symbol: " << Hash();
- if (!name()->IsUndefined()) {
+ os << "<Symbol:";
+ if (!name()->IsUndefined(GetIsolate())) {
os << " ";
HeapStringAllocator allocator;
StringStream accumulator(&allocator);
- String::cast(name())->StringShortPrint(&accumulator);
+ String::cast(name())->StringShortPrint(&accumulator, false);
os << accumulator.ToCString().get();
} else {
os << " (" << PrivateSymbolToName() << ")";
@@ -16847,7 +16020,7 @@ class StringSharedKey : public HashTableKey {
// collection.
Script* script(Script::cast(shared->script()));
hash ^= String::cast(script->source())->Hash();
- STATIC_ASSERT(LANGUAGE_END == 3);
+ STATIC_ASSERT(LANGUAGE_END == 2);
if (is_strict(language_mode)) hash ^= 0x8000;
hash += scope_position;
}
@@ -16913,7 +16086,6 @@ JSRegExp::Flags RegExpFlagsFromString(Handle<String> flags, bool* success) {
flag = JSRegExp::kMultiline;
break;
case 'u':
- if (!FLAG_harmony_unicode_regexps) return JSRegExp::Flags(0);
flag = JSRegExp::kUnicode;
break;
case 'y':
@@ -16945,22 +16117,6 @@ MaybeHandle<JSRegExp> JSRegExp::New(Handle<String> pattern, Flags flags) {
// static
-MaybeHandle<JSRegExp> JSRegExp::New(Handle<String> pattern,
- Handle<String> flags_string) {
- Isolate* isolate = pattern->GetIsolate();
- bool success = false;
- Flags flags = RegExpFlagsFromString(flags_string, &success);
- if (!success) {
- THROW_NEW_ERROR(
- isolate,
- NewSyntaxError(MessageTemplate::kInvalidRegExpFlags, flags_string),
- JSRegExp);
- }
- return New(pattern, flags);
-}
-
-
-// static
Handle<JSRegExp> JSRegExp::Copy(Handle<JSRegExp> regexp) {
Isolate* const isolate = regexp->GetIsolate();
return Handle<JSRegExp>::cast(isolate->factory()->CopyJSObject(regexp));
@@ -16973,7 +16129,13 @@ inline int CountRequiredEscapes(Handle<String> source) {
int escapes = 0;
Vector<const Char> src = source->GetCharVector<Char>();
for (int i = 0; i < src.length(); i++) {
- if (src[i] == '/' && (i == 0 || src[i - 1] != '\\')) escapes++;
+ if (src[i] == '\\') {
+ // Escape. Skip next character;
+ i++;
+ } else if (src[i] == '/') {
+ // Not escaped forward-slash needs escape.
+ escapes++;
+ }
}
return escapes;
}
@@ -16988,7 +16150,14 @@ inline Handle<StringType> WriteEscapedRegExpSource(Handle<String> source,
int s = 0;
int d = 0;
while (s < src.length()) {
- if (src[s] == '/' && (s == 0 || src[s - 1] != '\\')) dst[d++] = '\\';
+ if (src[s] == '\\') {
+ // Escape. Copy this and next character.
+ dst[d++] = src[s++];
+ if (s == src.length()) break;
+ } else if (src[s] == '/') {
+ // Not escaped forward-slash needs escape.
+ dst[d++] = '\\';
+ }
dst[d++] = src[s++];
}
DCHECK_EQ(result->length(), d);
@@ -17051,6 +16220,9 @@ MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
ASSIGN_RETURN_ON_EXCEPTION(isolate, escaped_source,
EscapeRegExpSource(isolate, source), JSRegExp);
+ RETURN_ON_EXCEPTION(isolate, RegExpImpl::Compile(regexp, source, flags),
+ JSRegExp);
+
regexp->set_source(*escaped_source);
regexp->set_flags(Smi::FromInt(flags));
@@ -17063,17 +16235,13 @@ MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
Smi::FromInt(0), SKIP_WRITE_BARRIER);
} else {
// Map has changed, so use generic, but slower, method.
- PropertyAttributes writable =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
- JSObject::SetOwnPropertyIgnoreAttributes(
- regexp, factory->last_index_string(),
- Handle<Smi>(Smi::FromInt(0), isolate), writable)
- .Check();
+ RETURN_ON_EXCEPTION(
+ isolate,
+ JSReceiver::SetProperty(regexp, factory->last_index_string(),
+ Handle<Smi>(Smi::FromInt(0), isolate), STRICT),
+ JSRegExp);
}
- RETURN_ON_EXCEPTION(isolate, RegExpImpl::Compile(regexp, source, flags),
- JSRegExp);
-
return regexp;
}
@@ -17214,7 +16382,7 @@ Handle<Derived> HashTable<Derived, Shape, Key>::New(
Factory* factory = isolate->factory();
int length = EntryToIndex(capacity);
Handle<FixedArray> array = factory->NewFixedArray(length, pretenure);
- array->set_map_no_write_barrier(*factory->hash_table_map());
+ array->set_map_no_write_barrier(Shape::GetMap(isolate));
Handle<Derived> table = Handle<Derived>::cast(array);
table->SetNumberOfElements(0);
@@ -17244,21 +16412,12 @@ int NameDictionaryBase<Derived, Shape>::FindEntry(Handle<Name> key) {
uint32_t capacity = this->Capacity();
uint32_t entry = Derived::FirstProbe(key->Hash(), capacity);
uint32_t count = 1;
-
+ Isolate* isolate = this->GetIsolate();
while (true) {
- int index = Derived::EntryToIndex(entry);
- Object* element = this->get(index);
- if (element->IsUndefined()) break; // Empty entry.
+ Object* element = this->KeyAt(entry);
+ if (element->IsUndefined(isolate)) break; // Empty entry.
if (*key == element) return entry;
- if (!element->IsUniqueName() &&
- !element->IsTheHole() &&
- Name::cast(element)->Equals(*key)) {
- // Replace a key that is a non-internalized string by the equivalent
- // internalized string for faster further lookups.
- this->set(index, *key);
- return entry;
- }
- DCHECK(element->IsTheHole() || !Name::cast(element)->Equals(*key));
+ DCHECK(element->IsTheHole(isolate) || element->IsUniqueName());
entry = Derived::NextProbe(entry, count++, capacity);
}
return Derived::kNotFound;
@@ -17283,10 +16442,13 @@ void HashTable<Derived, Shape, Key>::Rehash(
// Rehash the elements.
int capacity = this->Capacity();
+ Heap* heap = new_table->GetHeap();
+ Object* the_hole = heap->the_hole_value();
+ Object* undefined = heap->undefined_value();
for (int i = 0; i < capacity; i++) {
uint32_t from_index = EntryToIndex(i);
Object* k = this->get(from_index);
- if (IsKey(k)) {
+ if (k != the_hole && k != undefined) {
uint32_t hash = this->HashForObject(key, k);
uint32_t insertion_index =
EntryToIndex(new_table->FindInsertionEntry(hash));
@@ -17340,6 +16502,7 @@ template<typename Derived, typename Shape, typename Key>
void HashTable<Derived, Shape, Key>::Rehash(Key key) {
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = GetWriteBarrierMode(no_gc);
+ Isolate* isolate = GetIsolate();
uint32_t capacity = Capacity();
bool done = false;
for (int probe = 1; !done; probe++) {
@@ -17347,11 +16510,11 @@ void HashTable<Derived, Shape, Key>::Rehash(Key key) {
// are placed correctly. Other elements might need to be moved.
done = true;
for (uint32_t current = 0; current < capacity; current++) {
- Object* current_key = get(EntryToIndex(current));
- if (IsKey(current_key)) {
+ Object* current_key = KeyAt(current);
+ if (IsKey(isolate, current_key)) {
uint32_t target = EntryForProbe(key, current_key, probe, current);
if (current == target) continue;
- Object* target_key = get(EntryToIndex(target));
+ Object* target_key = KeyAt(target);
if (!IsKey(target_key) ||
EntryForProbe(key, target_key, probe, target) != target) {
// Put the current element into the correct position.
@@ -17367,12 +16530,11 @@ void HashTable<Derived, Shape, Key>::Rehash(Key key) {
}
}
// Wipe deleted entries.
- Heap* heap = GetHeap();
- Object* the_hole = heap->the_hole_value();
- Object* undefined = heap->undefined_value();
+ Object* the_hole = isolate->heap()->the_hole_value();
+ Object* undefined = isolate->heap()->undefined_value();
for (uint32_t current = 0; current < capacity; current++) {
- if (get(EntryToIndex(current)) == the_hole) {
- set(EntryToIndex(current), undefined);
+ if (KeyAt(current) == the_hole) {
+ set(EntryToIndex(current) + Derived::kEntryKeyIndex, undefined);
}
}
SetNumberOfDeletedElements(0);
@@ -17460,9 +16622,10 @@ uint32_t HashTable<Derived, Shape, Key>::FindInsertionEntry(uint32_t hash) {
uint32_t entry = FirstProbe(hash, capacity);
uint32_t count = 1;
// EnsureCapacity will guarantee the hash table is never full.
+ Isolate* isolate = GetIsolate();
while (true) {
Object* element = KeyAt(entry);
- if (element->IsUndefined() || element->IsTheHole()) break;
+ if (!IsKey(isolate, element)) break;
entry = NextProbe(entry, count++, capacity);
}
return entry;
@@ -17498,20 +16661,23 @@ template class Dictionary<UnseededNumberDictionary,
uint32_t>;
template Handle<SeededNumberDictionary>
-Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::
- New(Isolate*, int at_least_space_for, PretenureFlag pretenure);
+Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::New(
+ Isolate*, int at_least_space_for, PretenureFlag pretenure,
+ MinimumCapacity capacity_option);
template Handle<UnseededNumberDictionary>
-Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape, uint32_t>::
- New(Isolate*, int at_least_space_for, PretenureFlag pretenure);
+Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape,
+ uint32_t>::New(Isolate*, int at_least_space_for,
+ PretenureFlag pretenure,
+ MinimumCapacity capacity_option);
template Handle<NameDictionary>
-Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::
- New(Isolate*, int n, PretenureFlag pretenure);
+Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>::New(
+ Isolate*, int n, PretenureFlag pretenure, MinimumCapacity capacity_option);
template Handle<GlobalDictionary>
-Dictionary<GlobalDictionary, GlobalDictionaryShape, Handle<Name> >::New(
- Isolate*, int n, PretenureFlag pretenure);
+Dictionary<GlobalDictionary, GlobalDictionaryShape, Handle<Name>>::New(
+ Isolate*, int n, PretenureFlag pretenure, MinimumCapacity capacity_option);
template Handle<SeededNumberDictionary>
Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::
@@ -17545,6 +16711,11 @@ template Handle<NameDictionary>
HashTable<NameDictionary, NameDictionaryShape, Handle<Name> >::
New(Isolate*, int, MinimumCapacity, PretenureFlag);
+template Handle<ObjectHashSet> HashTable<ObjectHashSet, ObjectHashSetShape,
+ Handle<Object>>::New(Isolate*, int n,
+ MinimumCapacity,
+ PretenureFlag);
+
template Handle<NameDictionary>
HashTable<NameDictionary, NameDictionaryShape, Handle<Name> >::
Shrink(Handle<NameDictionary>, Handle<Name>);
@@ -17558,13 +16729,14 @@ template Handle<UnseededNumberDictionary>
uint32_t>::Shrink(Handle<UnseededNumberDictionary>, uint32_t);
template Handle<NameDictionary>
-Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::Add(
- Handle<NameDictionary>, Handle<Name>, Handle<Object>, PropertyDetails);
+Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>::Add(
+ Handle<NameDictionary>, Handle<Name>, Handle<Object>, PropertyDetails,
+ int*);
template Handle<GlobalDictionary>
- Dictionary<GlobalDictionary, GlobalDictionaryShape, Handle<Name> >::Add(
- Handle<GlobalDictionary>, Handle<Name>, Handle<Object>,
- PropertyDetails);
+Dictionary<GlobalDictionary, GlobalDictionaryShape, Handle<Name>>::Add(
+ Handle<GlobalDictionary>, Handle<Name>, Handle<Object>, PropertyDetails,
+ int*);
template Handle<FixedArray> Dictionary<
NameDictionary, NameDictionaryShape,
@@ -17575,18 +16747,14 @@ template Handle<FixedArray> Dictionary<
Handle<Name> >::GenerateNewEnumerationIndices(Handle<NameDictionary>);
template Handle<SeededNumberDictionary>
-Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::
- Add(Handle<SeededNumberDictionary>,
- uint32_t,
- Handle<Object>,
- PropertyDetails);
+Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::Add(
+ Handle<SeededNumberDictionary>, uint32_t, Handle<Object>, PropertyDetails,
+ int*);
template Handle<UnseededNumberDictionary>
-Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape, uint32_t>::
- Add(Handle<UnseededNumberDictionary>,
- uint32_t,
- Handle<Object>,
- PropertyDetails);
+Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape,
+ uint32_t>::Add(Handle<UnseededNumberDictionary>, uint32_t,
+ Handle<Object>, PropertyDetails, int*);
template Handle<SeededNumberDictionary>
Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::
@@ -17609,6 +16777,39 @@ template int HashTable<SeededNumberDictionary, SeededNumberDictionaryShape,
template int NameDictionaryBase<NameDictionary, NameDictionaryShape>::FindEntry(
Handle<Name>);
+template int Dictionary<GlobalDictionary, GlobalDictionaryShape, Handle<Name>>::
+ NumberOfElementsFilterAttributes(PropertyFilter filter);
+
+template int Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>::
+ NumberOfElementsFilterAttributes(PropertyFilter filter);
+
+template void
+Dictionary<GlobalDictionary, GlobalDictionaryShape, Handle<Name>>::
+ CopyEnumKeysTo(Handle<Dictionary<GlobalDictionary, GlobalDictionaryShape,
+ Handle<Name>>>
+ dictionary,
+ Handle<FixedArray> storage, KeyCollectionMode mode,
+ KeyAccumulator* accumulator);
+
+template void
+Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>::CopyEnumKeysTo(
+ Handle<Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>>
+ dictionary,
+ Handle<FixedArray> storage, KeyCollectionMode mode,
+ KeyAccumulator* accumulator);
+
+template void
+Dictionary<GlobalDictionary, GlobalDictionaryShape, Handle<Name>>::
+ CollectKeysTo(Handle<Dictionary<GlobalDictionary, GlobalDictionaryShape,
+ Handle<Name>>>
+ dictionary,
+ KeyAccumulator* keys);
+
+template void
+Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>::CollectKeysTo(
+ Handle<Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>>
+ dictionary,
+ KeyAccumulator* keys);
Handle<Object> JSObject::PrepareSlowElementsForSort(
Handle<JSObject> object, uint32_t limit) {
@@ -17630,7 +16831,7 @@ Handle<Object> JSObject::PrepareSlowElementsForSort(
DisallowHeapAllocation no_gc;
for (int i = 0; i < capacity; i++) {
Object* k = dict->KeyAt(i);
- if (!dict->IsKey(k)) continue;
+ if (!dict->IsKey(isolate, k)) continue;
DCHECK(k->IsNumber());
DCHECK(!k->IsSmi() || Smi::cast(k)->value() >= 0);
@@ -17648,7 +16849,7 @@ Handle<Object> JSObject::PrepareSlowElementsForSort(
uint32_t key = NumberToUint32(k);
if (key < limit) {
- if (value->IsUndefined()) {
+ if (value->IsUndefined(isolate)) {
undefs++;
} else if (pos > static_cast<uint32_t>(Smi::kMaxValue)) {
// Adding an entry with the key beyond smi-range requires
@@ -17705,8 +16906,7 @@ Handle<Object> JSObject::PrepareSlowElementsForSort(
Handle<Object> JSObject::PrepareElementsForSort(Handle<JSObject> object,
uint32_t limit) {
Isolate* isolate = object->GetIsolate();
- if (object->HasSloppyArgumentsElements() ||
- object->map()->is_observed()) {
+ if (object->HasSloppyArgumentsElements()) {
return handle(Smi::FromInt(-1), isolate);
}
@@ -17799,10 +16999,10 @@ Handle<Object> JSObject::PrepareElementsForSort(Handle<JSObject> object,
// number of stores of non-undefined, non-the-hole values.
for (unsigned int i = 0; i < undefs; i++) {
Object* current = elements->get(i);
- if (current->IsTheHole()) {
+ if (current->IsTheHole(isolate)) {
holes--;
undefs--;
- } else if (current->IsUndefined()) {
+ } else if (current->IsUndefined(isolate)) {
undefs--;
} else {
continue;
@@ -17810,10 +17010,10 @@ Handle<Object> JSObject::PrepareElementsForSort(Handle<JSObject> object,
// Position i needs to be filled.
while (undefs > i) {
current = elements->get(undefs);
- if (current->IsTheHole()) {
+ if (current->IsTheHole(isolate)) {
holes--;
undefs--;
- } else if (current->IsUndefined()) {
+ } else if (current->IsUndefined(isolate)) {
undefs--;
} else {
elements->set(i, current, write_barrier);
@@ -17877,29 +17077,35 @@ void JSGlobalObject::InvalidatePropertyCell(Handle<JSGlobalObject> global,
PropertyCell::InvalidateEntry(dictionary, entry);
}
-
-// TODO(ishell): rename to EnsureEmptyPropertyCell or something.
-Handle<PropertyCell> JSGlobalObject::EnsurePropertyCell(
- Handle<JSGlobalObject> global, Handle<Name> name) {
+Handle<PropertyCell> JSGlobalObject::EnsureEmptyPropertyCell(
+ Handle<JSGlobalObject> global, Handle<Name> name,
+ PropertyCellType cell_type, int* entry_out) {
+ Isolate* isolate = global->GetIsolate();
DCHECK(!global->HasFastProperties());
- auto dictionary = handle(global->global_dictionary());
+ Handle<GlobalDictionary> dictionary(global->global_dictionary(), isolate);
int entry = dictionary->FindEntry(name);
Handle<PropertyCell> cell;
if (entry != GlobalDictionary::kNotFound) {
+ if (entry_out) *entry_out = entry;
// This call should be idempotent.
DCHECK(dictionary->ValueAt(entry)->IsPropertyCell());
cell = handle(PropertyCell::cast(dictionary->ValueAt(entry)));
- DCHECK(cell->property_details().cell_type() ==
- PropertyCellType::kUninitialized ||
- cell->property_details().cell_type() ==
- PropertyCellType::kInvalidated);
- DCHECK(cell->value()->IsTheHole());
+ PropertyCellType original_cell_type = cell->property_details().cell_type();
+ DCHECK(original_cell_type == PropertyCellType::kInvalidated ||
+ original_cell_type == PropertyCellType::kUninitialized);
+ DCHECK(cell->value()->IsTheHole(isolate));
+ if (original_cell_type == PropertyCellType::kInvalidated) {
+ cell = PropertyCell::InvalidateEntry(dictionary, entry);
+ }
+ PropertyDetails details(NONE, DATA, 0, cell_type);
+ cell->set_property_details(details);
return cell;
}
- Isolate* isolate = global->GetIsolate();
cell = isolate->factory()->NewPropertyCell();
- PropertyDetails details(NONE, DATA, 0, PropertyCellType::kUninitialized);
- dictionary = GlobalDictionary::Add(dictionary, name, cell, details);
+ PropertyDetails details(NONE, DATA, 0, cell_type);
+ dictionary =
+ GlobalDictionary::Add(dictionary, name, cell, details, entry_out);
+ // {*entry_out} is initialized inside GlobalDictionary::Add().
global->set_properties(*dictionary);
return cell;
}
@@ -18036,6 +17242,16 @@ Handle<String> StringTable::LookupString(Isolate* isolate,
Handle<ConsString> cons = Handle<ConsString>::cast(string);
cons->set_first(*result);
cons->set_second(isolate->heap()->empty_string());
+ } else if (string->IsSlicedString()) {
+ STATIC_ASSERT(ConsString::kSize == SlicedString::kSize);
+ DisallowHeapAllocation no_gc;
+ bool one_byte = result->IsOneByteRepresentation();
+ Handle<Map> map = one_byte ? isolate->factory()->cons_one_byte_string_map()
+ : isolate->factory()->cons_string_map();
+ string->set_map(*map);
+ Handle<ConsString> cons = Handle<ConsString>::cast(string);
+ cons->set_first(*result);
+ cons->set_second(isolate->heap()->empty_string());
}
return result;
}
@@ -18096,12 +17312,26 @@ bool StringSet::Has(Handle<String> name) {
return FindEntry(*name) != kNotFound;
}
+Handle<ObjectHashSet> ObjectHashSet::Add(Handle<ObjectHashSet> set,
+ Handle<Object> key) {
+ Isolate* isolate = set->GetIsolate();
+ int32_t hash = Object::GetOrCreateHash(isolate, key)->value();
+
+ if (!set->Has(isolate, key, hash)) {
+ set = EnsureCapacity(set, 1, key);
+ int entry = set->FindInsertionEntry(hash);
+ set->set(EntryToIndex(entry), *key);
+ set->ElementAdded();
+ }
+ return set;
+}
+
Handle<Object> CompilationCacheTable::Lookup(Handle<String> src,
Handle<Context> context,
LanguageMode language_mode) {
Isolate* isolate = GetIsolate();
Handle<SharedFunctionInfo> shared(context->closure()->shared());
- StringSharedKey key(src, shared, language_mode, RelocInfo::kNoPosition);
+ StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
int entry = FindEntry(&key);
if (entry == kNotFound) return isolate->factory()->undefined_value();
int index = EntryToIndex(entry);
@@ -18141,24 +17371,12 @@ Handle<CompilationCacheTable> CompilationCacheTable::Put(
Handle<Context> context, LanguageMode language_mode, Handle<Object> value) {
Isolate* isolate = cache->GetIsolate();
Handle<SharedFunctionInfo> shared(context->closure()->shared());
- StringSharedKey key(src, shared, language_mode, RelocInfo::kNoPosition);
- {
- Handle<Object> k = key.AsHandle(isolate);
- DisallowHeapAllocation no_allocation_scope;
- int entry = cache->FindEntry(&key);
- if (entry != kNotFound) {
- cache->set(EntryToIndex(entry), *k);
- cache->set(EntryToIndex(entry) + 1, *value);
- return cache;
- }
- }
-
+ StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
+ Handle<Object> k = key.AsHandle(isolate);
cache = EnsureCapacity(cache, 1, &key);
int entry = cache->FindInsertionEntry(key.Hash());
- Handle<Object> k =
- isolate->factory()->NewNumber(static_cast<double>(key.Hash()));
cache->set(EntryToIndex(entry), *k);
- cache->set(EntryToIndex(entry) + 1, Smi::FromInt(kHashGenerations));
+ cache->set(EntryToIndex(entry) + 1, *value);
cache->ElementAdded();
return cache;
}
@@ -18251,17 +17469,13 @@ void CompilationCacheTable::Remove(Object* value) {
return;
}
-
-template<typename Derived, typename Shape, typename Key>
+template <typename Derived, typename Shape, typename Key>
Handle<Derived> Dictionary<Derived, Shape, Key>::New(
- Isolate* isolate,
- int at_least_space_for,
- PretenureFlag pretenure) {
+ Isolate* isolate, int at_least_space_for, PretenureFlag pretenure,
+ MinimumCapacity capacity_option) {
DCHECK(0 <= at_least_space_for);
- Handle<Derived> dict = DerivedHashTable::New(isolate,
- at_least_space_for,
- USE_DEFAULT_MINIMUM_CAPACITY,
- pretenure);
+ Handle<Derived> dict = DerivedHashTable::New(isolate, at_least_space_for,
+ capacity_option, pretenure);
// Initialize the next enumeration index.
dict->SetNextEnumerationIndex(PropertyDetails::kInitialIndex);
@@ -18272,7 +17486,8 @@ Handle<Derived> Dictionary<Derived, Shape, Key>::New(
template <typename Derived, typename Shape, typename Key>
Handle<FixedArray> Dictionary<Derived, Shape, Key>::BuildIterationIndicesArray(
Handle<Derived> dictionary) {
- Factory* factory = dictionary->GetIsolate()->factory();
+ Isolate* isolate = dictionary->GetIsolate();
+ Factory* factory = isolate->factory();
int length = dictionary->NumberOfElements();
Handle<FixedArray> iteration_order = factory->NewFixedArray(length);
@@ -18283,7 +17498,7 @@ Handle<FixedArray> Dictionary<Derived, Shape, Key>::BuildIterationIndicesArray(
int capacity = dictionary->Capacity();
int pos = 0;
for (int i = 0; i < capacity; i++) {
- if (dictionary->IsKey(dictionary->KeyAt(i))) {
+ if (dictionary->IsKey(isolate, dictionary->KeyAt(i))) {
int index = dictionary->DetailsAt(i).dictionary_index();
iteration_order->set(pos, Smi::FromInt(i));
enumeration_order->set(pos, Smi::FromInt(index));
@@ -18385,31 +17600,28 @@ Handle<Derived> Dictionary<Derived, Shape, Key>::AtPut(
return dictionary;
}
-
-template<typename Derived, typename Shape, typename Key>
-Handle<Derived> Dictionary<Derived, Shape, Key>::Add(
- Handle<Derived> dictionary,
- Key key,
- Handle<Object> value,
- PropertyDetails details) {
+template <typename Derived, typename Shape, typename Key>
+Handle<Derived> Dictionary<Derived, Shape, Key>::Add(Handle<Derived> dictionary,
+ Key key,
+ Handle<Object> value,
+ PropertyDetails details,
+ int* entry_out) {
// Valdate key is absent.
SLOW_DCHECK((dictionary->FindEntry(key) == Dictionary::kNotFound));
// Check whether the dictionary should be extended.
dictionary = EnsureCapacity(dictionary, 1, key);
- AddEntry(dictionary, key, value, details, dictionary->Hash(key));
+ int entry = AddEntry(dictionary, key, value, details, dictionary->Hash(key));
+ if (entry_out) *entry_out = entry;
return dictionary;
}
-
-// Add a key, value pair to the dictionary.
-template<typename Derived, typename Shape, typename Key>
-void Dictionary<Derived, Shape, Key>::AddEntry(
- Handle<Derived> dictionary,
- Key key,
- Handle<Object> value,
- PropertyDetails details,
- uint32_t hash) {
+// Add a key, value pair to the dictionary. Returns entry value.
+template <typename Derived, typename Shape, typename Key>
+int Dictionary<Derived, Shape, Key>::AddEntry(Handle<Derived> dictionary,
+ Key key, Handle<Object> value,
+ PropertyDetails details,
+ uint32_t hash) {
// Compute the key object.
Handle<Object> k = Shape::AsHandle(dictionary->GetIsolate(), key);
@@ -18426,20 +17638,21 @@ void Dictionary<Derived, Shape, Key>::AddEntry(
DCHECK((dictionary->KeyAt(entry)->IsNumber() ||
dictionary->KeyAt(entry)->IsName()));
dictionary->ElementAdded();
+ return entry;
}
bool SeededNumberDictionary::HasComplexElements() {
if (!requires_slow_elements()) return false;
+ Isolate* isolate = this->GetIsolate();
int capacity = this->Capacity();
for (int i = 0; i < capacity; i++) {
Object* k = this->KeyAt(i);
- if (this->IsKey(k)) {
- DCHECK(!IsDeleted(i));
- PropertyDetails details = this->DetailsAt(i);
- if (details.type() == ACCESSOR_CONSTANT) return true;
- PropertyAttributes attr = details.attributes();
- if (attr & ALL_ATTRIBUTES_MASK) return true;
- }
+ if (!this->IsKey(isolate, k)) continue;
+ DCHECK(!IsDeleted(i));
+ PropertyDetails details = this->DetailsAt(i);
+ if (details.type() == ACCESSOR_CONSTANT) return true;
+ PropertyAttributes attr = details.attributes();
+ if (attr & ALL_ATTRIBUTES_MASK) return true;
}
return false;
}
@@ -18486,6 +17699,17 @@ Handle<UnseededNumberDictionary> UnseededNumberDictionary::AddNumberEntry(
return Add(dictionary, key, value, PropertyDetails::Empty());
}
+Handle<UnseededNumberDictionary> UnseededNumberDictionary::DeleteKey(
+ Handle<UnseededNumberDictionary> dictionary, uint32_t key) {
+ int entry = dictionary->FindEntry(key);
+ if (entry == kNotFound) return dictionary;
+
+ Factory* factory = dictionary->GetIsolate()->factory();
+ dictionary->SetEntry(entry, factory->the_hole_value(),
+ factory->the_hole_value());
+ dictionary->ElementRemoved();
+ return dictionary->Shrink(dictionary, key);
+}
Handle<SeededNumberDictionary> SeededNumberDictionary::AtNumberPut(
Handle<SeededNumberDictionary> dictionary, uint32_t key,
@@ -18535,11 +17759,12 @@ Handle<UnseededNumberDictionary> UnseededNumberDictionary::Set(
template <typename Derived, typename Shape, typename Key>
int Dictionary<Derived, Shape, Key>::NumberOfElementsFilterAttributes(
PropertyFilter filter) {
+ Isolate* isolate = this->GetIsolate();
int capacity = this->Capacity();
int result = 0;
for (int i = 0; i < capacity; i++) {
Object* k = this->KeyAt(i);
- if (this->IsKey(k) && !k->FilterKey(filter)) {
+ if (this->IsKey(isolate, k) && !k->FilterKey(filter)) {
if (this->IsDeleted(i)) continue;
PropertyDetails details = this->DetailsAt(i);
PropertyAttributes attr = details.attributes();
@@ -18561,74 +17786,73 @@ struct EnumIndexComparator {
Dictionary* dict;
};
-
template <typename Derived, typename Shape, typename Key>
-void Dictionary<Derived, Shape, Key>::CopyEnumKeysTo(FixedArray* storage) {
+void Dictionary<Derived, Shape, Key>::CopyEnumKeysTo(
+ Handle<Dictionary<Derived, Shape, Key>> dictionary,
+ Handle<FixedArray> storage, KeyCollectionMode mode,
+ KeyAccumulator* accumulator) {
+ Isolate* isolate = dictionary->GetIsolate();
int length = storage->length();
- int capacity = this->Capacity();
+ int capacity = dictionary->Capacity();
int properties = 0;
for (int i = 0; i < capacity; i++) {
- Object* k = this->KeyAt(i);
- if (this->IsKey(k) && !k->IsSymbol()) {
- PropertyDetails details = this->DetailsAt(i);
- if (details.IsDontEnum() || this->IsDeleted(i)) continue;
+ Object* key = dictionary->KeyAt(i);
+ bool is_shadowing_key = false;
+ if (!dictionary->IsKey(isolate, key)) continue;
+ if (key->IsSymbol()) continue;
+ PropertyDetails details = dictionary->DetailsAt(i);
+ if (details.IsDontEnum()) {
+ if (mode == KeyCollectionMode::kIncludePrototypes) {
+ is_shadowing_key = true;
+ } else {
+ continue;
+ }
+ }
+ if (dictionary->IsDeleted(i)) continue;
+ if (is_shadowing_key) {
+ accumulator->AddShadowingKey(key);
+ continue;
+ } else {
storage->set(properties, Smi::FromInt(i));
- properties++;
- if (properties == length) break;
}
+ properties++;
+ if (properties == length) break;
}
+
CHECK_EQ(length, properties);
- EnumIndexComparator<Derived> cmp(static_cast<Derived*>(this));
+ DisallowHeapAllocation no_gc;
+ Dictionary<Derived, Shape, Key>* raw_dictionary = *dictionary;
+ FixedArray* raw_storage = *storage;
+ EnumIndexComparator<Derived> cmp(static_cast<Derived*>(*dictionary));
Smi** start = reinterpret_cast<Smi**>(storage->GetFirstElementAddress());
std::sort(start, start + length, cmp);
for (int i = 0; i < length; i++) {
- int index = Smi::cast(storage->get(i))->value();
- storage->set(i, this->KeyAt(index));
- }
-}
-
-
-template <typename Derived, typename Shape, typename Key>
-int Dictionary<Derived, Shape, Key>::CopyKeysTo(
- FixedArray* storage, int index, PropertyFilter filter,
- typename Dictionary<Derived, Shape, Key>::SortMode sort_mode) {
- DCHECK(storage->length() >= NumberOfElementsFilterAttributes(filter));
- int start_index = index;
- int capacity = this->Capacity();
- for (int i = 0; i < capacity; i++) {
- Object* k = this->KeyAt(i);
- if (!this->IsKey(k) || k->FilterKey(filter)) continue;
- if (this->IsDeleted(i)) continue;
- PropertyDetails details = this->DetailsAt(i);
- PropertyAttributes attr = details.attributes();
- if ((attr & filter) != 0) continue;
- storage->set(index++, k);
- }
- if (sort_mode == Dictionary::SORTED) {
- storage->SortPairs(storage, index);
+ int index = Smi::cast(raw_storage->get(i))->value();
+ raw_storage->set(i, raw_dictionary->KeyAt(index));
}
- DCHECK(storage->length() >= index);
- return index - start_index;
}
template <typename Derived, typename Shape, typename Key>
void Dictionary<Derived, Shape, Key>::CollectKeysTo(
- Handle<Dictionary<Derived, Shape, Key> > dictionary, KeyAccumulator* keys,
- PropertyFilter filter) {
+ Handle<Dictionary<Derived, Shape, Key>> dictionary, KeyAccumulator* keys) {
+ Isolate* isolate = keys->isolate();
int capacity = dictionary->Capacity();
Handle<FixedArray> array =
- keys->isolate()->factory()->NewFixedArray(dictionary->NumberOfElements());
+ isolate->factory()->NewFixedArray(dictionary->NumberOfElements());
int array_size = 0;
-
+ PropertyFilter filter = keys->filter();
{
DisallowHeapAllocation no_gc;
Dictionary<Derived, Shape, Key>* raw_dict = *dictionary;
for (int i = 0; i < capacity; i++) {
Object* k = raw_dict->KeyAt(i);
- if (!raw_dict->IsKey(k) || k->FilterKey(filter)) continue;
+ if (!raw_dict->IsKey(isolate, k) || k->FilterKey(filter)) continue;
if (raw_dict->IsDeleted(i)) continue;
PropertyDetails details = raw_dict->DetailsAt(i);
- if ((details.attributes() & filter) != 0) continue;
+ if ((details.attributes() & filter) != 0) {
+ keys->AddShadowingKey(k);
+ continue;
+ }
if (filter & ONLY_ALL_CAN_READ) {
if (details.kind() != kAccessor) continue;
Object* accessors = raw_dict->ValueAt(i);
@@ -18646,9 +17870,23 @@ void Dictionary<Derived, Shape, Key>::CollectKeysTo(
std::sort(start, start + array_size, cmp);
}
+ bool has_seen_symbol = false;
for (int i = 0; i < array_size; i++) {
int index = Smi::cast(array->get(i))->value();
- keys->AddKey(dictionary->KeyAt(index), DO_NOT_CONVERT);
+ Object* key = dictionary->KeyAt(index);
+ if (key->IsSymbol()) {
+ has_seen_symbol = true;
+ continue;
+ }
+ keys->AddKey(key, DO_NOT_CONVERT);
+ }
+ if (has_seen_symbol) {
+ for (int i = 0; i < array_size; i++) {
+ int index = Smi::cast(array->get(i))->value();
+ Object* key = dictionary->KeyAt(index);
+ if (!key->IsSymbol()) continue;
+ keys->AddKey(key, DO_NOT_CONVERT);
+ }
}
}
@@ -18656,27 +17894,26 @@ void Dictionary<Derived, Shape, Key>::CollectKeysTo(
// Backwards lookup (slow).
template<typename Derived, typename Shape, typename Key>
Object* Dictionary<Derived, Shape, Key>::SlowReverseLookup(Object* value) {
+ Isolate* isolate = this->GetIsolate();
int capacity = this->Capacity();
for (int i = 0; i < capacity; i++) {
Object* k = this->KeyAt(i);
- if (this->IsKey(k)) {
- Object* e = this->ValueAt(i);
- // TODO(dcarney): this should be templatized.
- if (e->IsPropertyCell()) {
- e = PropertyCell::cast(e)->value();
- }
- if (e == value) return k;
+ if (!this->IsKey(isolate, k)) continue;
+ Object* e = this->ValueAt(i);
+ // TODO(dcarney): this should be templatized.
+ if (e->IsPropertyCell()) {
+ e = PropertyCell::cast(e)->value();
}
+ if (e == value) return k;
}
- Heap* heap = Dictionary::GetHeap();
- return heap->undefined_value();
+ return isolate->heap()->undefined_value();
}
Object* ObjectHashTable::Lookup(Isolate* isolate, Handle<Object> key,
int32_t hash) {
DisallowHeapAllocation no_gc;
- DCHECK(IsKey(*key));
+ DCHECK(IsKey(isolate, *key));
int entry = FindEntry(isolate, key, hash);
if (entry == kNotFound) return isolate->heap()->the_hole_value();
@@ -18686,13 +17923,13 @@ Object* ObjectHashTable::Lookup(Isolate* isolate, Handle<Object> key,
Object* ObjectHashTable::Lookup(Handle<Object> key) {
DisallowHeapAllocation no_gc;
- DCHECK(IsKey(*key));
Isolate* isolate = GetIsolate();
+ DCHECK(IsKey(isolate, *key));
// If the object does not have an identity hash, it was never used as a key.
Object* hash = key->GetHash();
- if (hash->IsUndefined()) {
+ if (hash->IsUndefined(isolate)) {
return isolate->heap()->the_hole_value();
}
return Lookup(isolate, key, Smi::cast(hash)->value());
@@ -18707,10 +17944,10 @@ Object* ObjectHashTable::Lookup(Handle<Object> key, int32_t hash) {
Handle<ObjectHashTable> ObjectHashTable::Put(Handle<ObjectHashTable> table,
Handle<Object> key,
Handle<Object> value) {
- DCHECK(table->IsKey(*key));
- DCHECK(!value->IsTheHole());
-
Isolate* isolate = table->GetIsolate();
+ DCHECK(table->IsKey(isolate, *key));
+ DCHECK(!value->IsTheHole(isolate));
+
// Make sure the key object has an identity hash code.
int32_t hash = Object::GetOrCreateHash(isolate, key)->value();
@@ -18722,10 +17959,9 @@ Handle<ObjectHashTable> ObjectHashTable::Put(Handle<ObjectHashTable> table,
Handle<Object> key,
Handle<Object> value,
int32_t hash) {
- DCHECK(table->IsKey(*key));
- DCHECK(!value->IsTheHole());
-
Isolate* isolate = table->GetIsolate();
+ DCHECK(table->IsKey(isolate, *key));
+ DCHECK(!value->IsTheHole(isolate));
int entry = table->FindEntry(isolate, key, hash);
@@ -18735,7 +17971,7 @@ Handle<ObjectHashTable> ObjectHashTable::Put(Handle<ObjectHashTable> table,
return table;
}
- // Rehash if more than 25% of the entries are deleted entries.
+ // Rehash if more than 33% of the entries are deleted entries.
// TODO(jochen): Consider to shrink the fixed array in place.
if ((table->NumberOfDeletedElements() << 1) > table->NumberOfElements()) {
table->Rehash(isolate->factory()->undefined_value());
@@ -18764,10 +18000,10 @@ Handle<ObjectHashTable> ObjectHashTable::Put(Handle<ObjectHashTable> table,
Handle<ObjectHashTable> ObjectHashTable::Remove(Handle<ObjectHashTable> table,
Handle<Object> key,
bool* was_present) {
- DCHECK(table->IsKey(*key));
+ DCHECK(table->IsKey(table->GetIsolate(), *key));
Object* hash = key->GetHash();
- if (hash->IsUndefined()) {
+ if (hash->IsUndefined(table->GetIsolate())) {
*was_present = false;
return table;
}
@@ -18780,9 +18016,10 @@ Handle<ObjectHashTable> ObjectHashTable::Remove(Handle<ObjectHashTable> table,
Handle<Object> key,
bool* was_present,
int32_t hash) {
- DCHECK(table->IsKey(*key));
+ Isolate* isolate = table->GetIsolate();
+ DCHECK(table->IsKey(isolate, *key));
- int entry = table->FindEntry(table->GetIsolate(), key, hash);
+ int entry = table->FindEntry(isolate, key, hash);
if (entry == kNotFound) {
*was_present = false;
return table;
@@ -18810,9 +18047,10 @@ void ObjectHashTable::RemoveEntry(int entry) {
Object* WeakHashTable::Lookup(Handle<HeapObject> key) {
DisallowHeapAllocation no_gc;
- DCHECK(IsKey(*key));
+ Isolate* isolate = GetIsolate();
+ DCHECK(IsKey(isolate, *key));
int entry = FindEntry(key);
- if (entry == kNotFound) return GetHeap()->the_hole_value();
+ if (entry == kNotFound) return isolate->heap()->the_hole_value();
return get(EntryToValueIndex(entry));
}
@@ -18820,7 +18058,8 @@ Object* WeakHashTable::Lookup(Handle<HeapObject> key) {
Handle<WeakHashTable> WeakHashTable::Put(Handle<WeakHashTable> table,
Handle<HeapObject> key,
Handle<HeapObject> value) {
- DCHECK(table->IsKey(*key));
+ Isolate* isolate = key->GetIsolate();
+ DCHECK(table->IsKey(isolate, *key));
int entry = table->FindEntry(key);
// Key is already in table, just overwrite value.
if (entry != kNotFound) {
@@ -18828,7 +18067,7 @@ Handle<WeakHashTable> WeakHashTable::Put(Handle<WeakHashTable> table,
return table;
}
- Handle<WeakCell> key_cell = key->GetIsolate()->factory()->NewWeakCell(key);
+ Handle<WeakCell> key_cell = isolate->factory()->NewWeakCell(key);
// Check whether the hash table should be extended.
table = EnsureCapacity(table, 1, key, TENURED);
@@ -18922,11 +18161,14 @@ Handle<Derived> OrderedHashTable<Derived, Iterator, entrysize>::Clear(
template <class Derived, class Iterator, int entrysize>
bool OrderedHashTable<Derived, Iterator, entrysize>::HasKey(
Handle<Derived> table, Handle<Object> key) {
- int entry = table->KeyToFirstEntry(*key);
+ DisallowHeapAllocation no_gc;
+ Isolate* isolate = table->GetIsolate();
+ Object* raw_key = *key;
+ int entry = table->KeyToFirstEntry(isolate, raw_key);
// Walk the chain in the bucket to find the key.
while (entry != kNotFound) {
Object* candidate_key = table->KeyAt(entry);
- if (candidate_key->SameValueZero(*key)) return true;
+ if (candidate_key->SameValueZero(raw_key)) return true;
entry = table->NextChainEntry(entry);
}
return false;
@@ -18961,25 +18203,46 @@ Handle<OrderedHashSet> OrderedHashSet::Add(Handle<OrderedHashSet> table,
return table;
}
+Handle<FixedArray> OrderedHashSet::ConvertToKeysArray(
+ Handle<OrderedHashSet> table, GetKeysConversion convert) {
+ Isolate* isolate = table->GetIsolate();
+ int length = table->NumberOfElements();
+ int nof_buckets = table->NumberOfBuckets();
+ // Convert the dictionary to a linear list.
+ Handle<FixedArray> result = Handle<FixedArray>::cast(table);
+ // From this point on table is no longer a valid OrderedHashSet.
+ result->set_map(isolate->heap()->fixed_array_map());
+ for (int i = 0; i < length; i++) {
+ int index = kHashTableStartIndex + nof_buckets + (i * kEntrySize);
+ Object* key = table->get(index);
+ if (convert == GetKeysConversion::kConvertToString && key->IsNumber()) {
+ key = *isolate->factory()->NumberToString(handle(key, isolate));
+ }
+ result->set(i, key);
+ }
+ result->Shrink(length);
+ return result;
+}
template<class Derived, class Iterator, int entrysize>
Handle<Derived> OrderedHashTable<Derived, Iterator, entrysize>::Rehash(
Handle<Derived> table, int new_capacity) {
+ Isolate* isolate = table->GetIsolate();
DCHECK(!table->IsObsolete());
Handle<Derived> new_table =
- Allocate(table->GetIsolate(),
- new_capacity,
- table->GetHeap()->InNewSpace(*table) ? NOT_TENURED : TENURED);
+ Allocate(isolate, new_capacity,
+ isolate->heap()->InNewSpace(*table) ? NOT_TENURED : TENURED);
int nof = table->NumberOfElements();
int nod = table->NumberOfDeletedElements();
int new_buckets = new_table->NumberOfBuckets();
int new_entry = 0;
int removed_holes_index = 0;
+ DisallowHeapAllocation no_gc;
for (int old_entry = 0; old_entry < (nof + nod); ++old_entry) {
Object* key = table->KeyAt(old_entry);
- if (key->IsTheHole()) {
+ if (key->IsTheHole(isolate)) {
table->SetRemovedIndexAt(removed_holes_index++, old_entry);
continue;
}
@@ -19083,7 +18346,8 @@ void OrderedHashTableIterator<Derived, TableType>::Transition() {
template<class Derived, class TableType>
bool OrderedHashTableIterator<Derived, TableType>::HasMore() {
DisallowHeapAllocation no_allocation;
- if (this->table()->IsUndefined()) return false;
+ Isolate* isolate = this->GetIsolate();
+ if (this->table()->IsUndefined(isolate)) return false;
Transition();
@@ -19091,7 +18355,7 @@ bool OrderedHashTableIterator<Derived, TableType>::HasMore() {
int index = Smi::cast(this->index())->value();
int used_capacity = table->UsedCapacity();
- while (index < used_capacity && table->KeyAt(index)->IsTheHole()) {
+ while (index < used_capacity && table->KeyAt(index)->IsTheHole(isolate)) {
index++;
}
@@ -19099,7 +18363,7 @@ bool OrderedHashTableIterator<Derived, TableType>::HasMore() {
if (index < used_capacity) return true;
- set_table(GetHeap()->undefined_value());
+ set_table(isolate->heap()->undefined_value());
return false;
}
@@ -19218,45 +18482,58 @@ bool JSWeakCollection::Delete(Handle<JSWeakCollection> weak_collection,
return was_present;
}
-// Check if there is a break point at this code offset.
-bool DebugInfo::HasBreakPoint(int code_offset) {
+// Check if there is a break point at this source position.
+bool DebugInfo::HasBreakPoint(int source_position) {
// Get the break point info object for this code offset.
- Object* break_point_info = GetBreakPointInfo(code_offset);
+ Object* break_point_info = GetBreakPointInfo(source_position);
// If there is no break point info object or no break points in the break
// point info object there is no break point at this code offset.
- if (break_point_info->IsUndefined()) return false;
+ if (break_point_info->IsUndefined(GetIsolate())) return false;
return BreakPointInfo::cast(break_point_info)->GetBreakPointCount() > 0;
}
-// Get the break point info object for this code offset.
-Object* DebugInfo::GetBreakPointInfo(int code_offset) {
- // Find the index of the break point info object for this code offset.
- int index = GetBreakPointInfoIndex(code_offset);
-
- // Return the break point info object if any.
- if (index == kNoBreakPointInfo) return GetHeap()->undefined_value();
- return BreakPointInfo::cast(break_points()->get(index));
+// Get the break point info object for this source position.
+Object* DebugInfo::GetBreakPointInfo(int source_position) {
+ Isolate* isolate = GetIsolate();
+ if (!break_points()->IsUndefined(isolate)) {
+ for (int i = 0; i < break_points()->length(); i++) {
+ if (!break_points()->get(i)->IsUndefined(isolate)) {
+ BreakPointInfo* break_point_info =
+ BreakPointInfo::cast(break_points()->get(i));
+ if (break_point_info->source_position() == source_position) {
+ return break_point_info;
+ }
+ }
+ }
+ }
+ return isolate->heap()->undefined_value();
}
-// Clear a break point at the specified code offset.
-void DebugInfo::ClearBreakPoint(Handle<DebugInfo> debug_info, int code_offset,
+bool DebugInfo::ClearBreakPoint(Handle<DebugInfo> debug_info,
Handle<Object> break_point_object) {
- Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_offset),
- debug_info->GetIsolate());
- if (break_point_info->IsUndefined()) return;
- BreakPointInfo::ClearBreakPoint(
- Handle<BreakPointInfo>::cast(break_point_info),
- break_point_object);
+ Isolate* isolate = debug_info->GetIsolate();
+ if (debug_info->break_points()->IsUndefined(isolate)) return false;
+
+ for (int i = 0; i < debug_info->break_points()->length(); i++) {
+ if (debug_info->break_points()->get(i)->IsUndefined(isolate)) continue;
+ Handle<BreakPointInfo> break_point_info = Handle<BreakPointInfo>(
+ BreakPointInfo::cast(debug_info->break_points()->get(i)), isolate);
+ if (BreakPointInfo::HasBreakPointObject(break_point_info,
+ break_point_object)) {
+ BreakPointInfo::ClearBreakPoint(break_point_info, break_point_object);
+ return true;
+ }
+ }
+ return false;
}
-void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info, int code_offset,
- int source_position, int statement_position,
+void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info, int source_position,
Handle<Object> break_point_object) {
Isolate* isolate = debug_info->GetIsolate();
- Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_offset),
- isolate);
- if (!break_point_info->IsUndefined()) {
+ Handle<Object> break_point_info(
+ debug_info->GetBreakPointInfo(source_position), isolate);
+ if (!break_point_info->IsUndefined(isolate)) {
BreakPointInfo::SetBreakPoint(
Handle<BreakPointInfo>::cast(break_point_info),
break_point_object);
@@ -19265,17 +18542,18 @@ void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info, int code_offset,
// Adding a new break point for a code offset which did not have any
// break points before. Try to find a free slot.
+ static const int kNoBreakPointInfo = -1;
int index = kNoBreakPointInfo;
for (int i = 0; i < debug_info->break_points()->length(); i++) {
- if (debug_info->break_points()->get(i)->IsUndefined()) {
+ if (debug_info->break_points()->get(i)->IsUndefined(isolate)) {
index = i;
break;
}
}
if (index == kNoBreakPointInfo) {
// No free slot - extend break point info array.
- Handle<FixedArray> old_break_points =
- Handle<FixedArray>(FixedArray::cast(debug_info->break_points()));
+ Handle<FixedArray> old_break_points = Handle<FixedArray>(
+ FixedArray::cast(debug_info->break_points()), isolate);
Handle<FixedArray> new_break_points =
isolate->factory()->NewFixedArray(
old_break_points->length() +
@@ -19292,33 +18570,32 @@ void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info, int code_offset,
// Allocate new BreakPointInfo object and set the break point.
Handle<BreakPointInfo> new_break_point_info = Handle<BreakPointInfo>::cast(
isolate->factory()->NewStruct(BREAK_POINT_INFO_TYPE));
- new_break_point_info->set_code_offset(code_offset);
new_break_point_info->set_source_position(source_position);
- new_break_point_info->set_statement_position(statement_position);
new_break_point_info->set_break_point_objects(
isolate->heap()->undefined_value());
BreakPointInfo::SetBreakPoint(new_break_point_info, break_point_object);
debug_info->break_points()->set(index, *new_break_point_info);
}
-// Get the break point objects for a code offset.
-Handle<Object> DebugInfo::GetBreakPointObjects(int code_offset) {
- Object* break_point_info = GetBreakPointInfo(code_offset);
- if (break_point_info->IsUndefined()) {
- return GetIsolate()->factory()->undefined_value();
+// Get the break point objects for a source position.
+Handle<Object> DebugInfo::GetBreakPointObjects(int source_position) {
+ Object* break_point_info = GetBreakPointInfo(source_position);
+ Isolate* isolate = GetIsolate();
+ if (break_point_info->IsUndefined(isolate)) {
+ return isolate->factory()->undefined_value();
}
return Handle<Object>(
- BreakPointInfo::cast(break_point_info)->break_point_objects(),
- GetIsolate());
+ BreakPointInfo::cast(break_point_info)->break_point_objects(), isolate);
}
// Get the total number of break points.
int DebugInfo::GetBreakPointCount() {
- if (break_points()->IsUndefined()) return 0;
+ Isolate* isolate = GetIsolate();
+ if (break_points()->IsUndefined(isolate)) return 0;
int count = 0;
for (int i = 0; i < break_points()->length(); i++) {
- if (!break_points()->get(i)->IsUndefined()) {
+ if (!break_points()->get(i)->IsUndefined(isolate)) {
BreakPointInfo* break_point_info =
BreakPointInfo::cast(break_points()->get(i));
count += break_point_info->GetBreakPointCount();
@@ -19331,9 +18608,9 @@ int DebugInfo::GetBreakPointCount() {
Handle<Object> DebugInfo::FindBreakPointInfo(
Handle<DebugInfo> debug_info, Handle<Object> break_point_object) {
Isolate* isolate = debug_info->GetIsolate();
- if (!debug_info->break_points()->IsUndefined()) {
+ if (!debug_info->break_points()->IsUndefined(isolate)) {
for (int i = 0; i < debug_info->break_points()->length(); i++) {
- if (!debug_info->break_points()->get(i)->IsUndefined()) {
+ if (!debug_info->break_points()->get(i)->IsUndefined(isolate)) {
Handle<BreakPointInfo> break_point_info = Handle<BreakPointInfo>(
BreakPointInfo::cast(debug_info->break_points()->get(i)), isolate);
if (BreakPointInfo::HasBreakPointObject(break_point_info,
@@ -19346,30 +18623,12 @@ Handle<Object> DebugInfo::FindBreakPointInfo(
return isolate->factory()->undefined_value();
}
-
-// Find the index of the break point info object for the specified code
-// position.
-int DebugInfo::GetBreakPointInfoIndex(int code_offset) {
- if (break_points()->IsUndefined()) return kNoBreakPointInfo;
- for (int i = 0; i < break_points()->length(); i++) {
- if (!break_points()->get(i)->IsUndefined()) {
- BreakPointInfo* break_point_info =
- BreakPointInfo::cast(break_points()->get(i));
- if (break_point_info->code_offset() == code_offset) {
- return i;
- }
- }
- }
- return kNoBreakPointInfo;
-}
-
-
// Remove the specified break point object.
void BreakPointInfo::ClearBreakPoint(Handle<BreakPointInfo> break_point_info,
Handle<Object> break_point_object) {
Isolate* isolate = break_point_info->GetIsolate();
// If there are no break points just ignore.
- if (break_point_info->break_point_objects()->IsUndefined()) return;
+ if (break_point_info->break_point_objects()->IsUndefined(isolate)) return;
// If there is a single break point clear it if it is the same.
if (!break_point_info->break_point_objects()->IsFixedArray()) {
if (break_point_info->break_point_objects() == *break_point_object) {
@@ -19405,7 +18664,7 @@ void BreakPointInfo::SetBreakPoint(Handle<BreakPointInfo> break_point_info,
Isolate* isolate = break_point_info->GetIsolate();
// If there was no break point objects before just set it.
- if (break_point_info->break_point_objects()->IsUndefined()) {
+ if (break_point_info->break_point_objects()->IsUndefined(isolate)) {
break_point_info->set_break_point_objects(*break_point_object);
return;
}
@@ -19440,7 +18699,10 @@ bool BreakPointInfo::HasBreakPointObject(
Handle<BreakPointInfo> break_point_info,
Handle<Object> break_point_object) {
// No break point.
- if (break_point_info->break_point_objects()->IsUndefined()) return false;
+ Isolate* isolate = break_point_info->GetIsolate();
+ if (break_point_info->break_point_objects()->IsUndefined(isolate)) {
+ return false;
+ }
// Single break point.
if (!break_point_info->break_point_objects()->IsFixedArray()) {
return break_point_info->break_point_objects() == *break_point_object;
@@ -19459,7 +18721,7 @@ bool BreakPointInfo::HasBreakPointObject(
// Get the number of break points.
int BreakPointInfo::GetBreakPointCount() {
// No break point.
- if (break_point_objects()->IsUndefined()) return 0;
+ if (break_point_objects()->IsUndefined(GetIsolate())) return 0;
// Single break point.
if (!break_point_objects()->IsFixedArray()) return 1;
// Multiple break points.
@@ -19660,6 +18922,62 @@ void JSDate::SetCachedFields(int64_t local_time_ms, DateCache* date_cache) {
set_sec(Smi::FromInt(sec), SKIP_WRITE_BARRIER);
}
+namespace {
+
+Script* ScriptFromJSValue(Object* in) {
+ DCHECK(in->IsJSValue());
+ JSValue* jsvalue = JSValue::cast(in);
+ DCHECK(jsvalue->value()->IsScript());
+ return Script::cast(jsvalue->value());
+}
+
+} // namespace
+
+int JSMessageObject::GetLineNumber() const {
+ if (start_position() == -1) return Message::kNoLineNumberInfo;
+
+ Handle<Script> the_script = handle(ScriptFromJSValue(script()));
+
+ Script::PositionInfo info;
+ const Script::OffsetFlag offset_flag = Script::WITH_OFFSET;
+ if (!the_script->GetPositionInfo(start_position(), &info, offset_flag)) {
+ return Message::kNoLineNumberInfo;
+ }
+
+ return info.line + 1;
+}
+
+int JSMessageObject::GetColumnNumber() const {
+ if (start_position() == -1) return -1;
+
+ Handle<Script> the_script = handle(ScriptFromJSValue(script()));
+
+ Script::PositionInfo info;
+ const Script::OffsetFlag offset_flag = Script::WITH_OFFSET;
+ if (!the_script->GetPositionInfo(start_position(), &info, offset_flag)) {
+ return -1;
+ }
+
+ return info.column; // Note: No '+1' in contrast to GetLineNumber.
+}
+
+Handle<String> JSMessageObject::GetSourceLine() const {
+ Handle<Script> the_script = handle(ScriptFromJSValue(script()));
+
+ Isolate* isolate = the_script->GetIsolate();
+ if (the_script->type() == Script::TYPE_WASM) {
+ return isolate->factory()->empty_string();
+ }
+
+ Script::PositionInfo info;
+ const Script::OffsetFlag offset_flag = Script::WITH_OFFSET;
+ if (!the_script->GetPositionInfo(start_position(), &info, offset_flag)) {
+ return isolate->factory()->empty_string();
+ }
+
+ Handle<String> src = handle(String::cast(the_script->source()), isolate);
+ return isolate->factory()->NewSubString(src, info.line_start, info.line_end);
+}
void JSArrayBuffer::Neuter() {
CHECK(is_neuterable());
@@ -19776,20 +19094,19 @@ Handle<JSArrayBuffer> JSTypedArray::GetBuffer() {
return MaterializeArrayBuffer(self);
}
-
Handle<PropertyCell> PropertyCell::InvalidateEntry(
Handle<GlobalDictionary> dictionary, int entry) {
Isolate* isolate = dictionary->GetIsolate();
// Swap with a copy.
DCHECK(dictionary->ValueAt(entry)->IsPropertyCell());
Handle<PropertyCell> cell(PropertyCell::cast(dictionary->ValueAt(entry)));
- auto new_cell = isolate->factory()->NewPropertyCell();
+ Handle<PropertyCell> new_cell = isolate->factory()->NewPropertyCell();
new_cell->set_value(cell->value());
dictionary->ValueAtPut(entry, *new_cell);
- bool is_the_hole = cell->value()->IsTheHole();
+ bool is_the_hole = cell->value()->IsTheHole(isolate);
// Cell is officially mutable henceforth.
PropertyDetails details = cell->property_details();
- details = details.set_cell_type(is_the_hole ? PropertyCellType::kInvalidated
+ details = details.set_cell_type(is_the_hole ? PropertyCellType::kUninitialized
: PropertyCellType::kMutable);
new_cell->set_property_details(details);
// Old cell is ready for invalidation.
@@ -19830,12 +19147,13 @@ PropertyCellType PropertyCell::UpdatedType(Handle<PropertyCell> cell,
Handle<Object> value,
PropertyDetails details) {
PropertyCellType type = details.cell_type();
- DCHECK(!value->IsTheHole());
- if (cell->value()->IsTheHole()) {
+ Isolate* isolate = cell->GetIsolate();
+ DCHECK(!value->IsTheHole(isolate));
+ if (cell->value()->IsTheHole(isolate)) {
switch (type) {
// Only allow a cell to transition once into constant state.
case PropertyCellType::kUninitialized:
- if (value->IsUndefined()) return PropertyCellType::kUndefined;
+ if (value->IsUndefined(isolate)) return PropertyCellType::kUndefined;
return PropertyCellType::kConstant;
case PropertyCellType::kInvalidated:
return PropertyCellType::kMutable;
@@ -19862,10 +19180,11 @@ PropertyCellType PropertyCell::UpdatedType(Handle<PropertyCell> cell,
return PropertyCellType::kMutable;
}
-
-void PropertyCell::UpdateCell(Handle<GlobalDictionary> dictionary, int entry,
- Handle<Object> value, PropertyDetails details) {
- DCHECK(!value->IsTheHole());
+Handle<PropertyCell> PropertyCell::PrepareForValue(
+ Handle<GlobalDictionary> dictionary, int entry, Handle<Object> value,
+ PropertyDetails details) {
+ Isolate* isolate = dictionary->GetIsolate();
+ DCHECK(!value->IsTheHole(isolate));
DCHECK(dictionary->ValueAt(entry)->IsPropertyCell());
Handle<PropertyCell> cell(PropertyCell::cast(dictionary->ValueAt(entry)));
const PropertyDetails original_details = cell->property_details();
@@ -19876,11 +19195,9 @@ void PropertyCell::UpdateCell(Handle<GlobalDictionary> dictionary, int entry,
PropertyCellType old_type = original_details.cell_type();
// Preserve the enumeration index unless the property was deleted or never
// initialized.
- if (cell->value()->IsTheHole()) {
+ if (cell->value()->IsTheHole(isolate)) {
index = dictionary->NextEnumerationIndex();
dictionary->SetNextEnumerationIndex(index + 1);
- // Negative lookup cells must be invalidated.
- invalidate = true;
}
DCHECK(index > 0);
details = details.set_index(index);
@@ -19888,18 +19205,17 @@ void PropertyCell::UpdateCell(Handle<GlobalDictionary> dictionary, int entry,
PropertyCellType new_type = UpdatedType(cell, value, original_details);
if (invalidate) cell = PropertyCell::InvalidateEntry(dictionary, entry);
- // Install new property details and cell value.
+ // Install new property details.
details = details.set_cell_type(new_type);
cell->set_property_details(details);
- cell->set_value(*value);
// Deopt when transitioning from a constant type.
if (!invalidate && (old_type != new_type ||
original_details.IsReadOnly() != details.IsReadOnly())) {
- Isolate* isolate = dictionary->GetIsolate();
cell->dependent_code()->DeoptimizeDependentCodeGroup(
isolate, DependentCode::kPropertyCellChangedGroup);
}
+ return cell;
}
@@ -19914,5 +19230,56 @@ void PropertyCell::SetValueWithInvalidation(Handle<PropertyCell> cell,
}
}
+int JSGeneratorObject::source_position() const {
+ CHECK(is_suspended());
+ AbstractCode* code;
+ int code_offset;
+ if (function()->shared()->HasBytecodeArray()) {
+ // New-style generators.
+ DCHECK(!function()->shared()->HasBaselineCode());
+ code_offset = Smi::cast(input_or_debug_pos())->value();
+ // The stored bytecode offset is relative to a different base than what
+ // is used in the source position table, hence the subtraction.
+ code_offset -= BytecodeArray::kHeaderSize - kHeapObjectTag;
+ code = AbstractCode::cast(function()->shared()->bytecode_array());
+ } else {
+ // Old-style generators.
+ DCHECK(function()->shared()->HasBaselineCode());
+ code_offset = continuation();
+ CHECK(0 <= code_offset);
+ CHECK(code_offset < function()->code()->instruction_size());
+ code = AbstractCode::cast(function()->shared()->code());
+ }
+ return code->SourcePosition(code_offset);
+}
+
+// static
+AccessCheckInfo* AccessCheckInfo::Get(Isolate* isolate,
+ Handle<JSObject> receiver) {
+ DisallowHeapAllocation no_gc;
+ DCHECK(receiver->map()->is_access_check_needed());
+ Object* maybe_constructor = receiver->map()->GetConstructor();
+ // Might happen for a detached context.
+ if (!maybe_constructor->IsJSFunction()) return nullptr;
+ JSFunction* constructor = JSFunction::cast(maybe_constructor);
+ // Might happen for the debug context.
+ if (!constructor->shared()->IsApiFunction()) return nullptr;
+
+ Object* data_obj =
+ constructor->shared()->get_api_func_data()->access_check_info();
+ if (data_obj->IsUndefined(isolate)) return nullptr;
+
+ return AccessCheckInfo::cast(data_obj);
+}
+
+bool JSReceiver::HasProxyInPrototype(Isolate* isolate) {
+ for (PrototypeIterator iter(isolate, this, kStartAtReceiver,
+ PrototypeIterator::END_AT_NULL);
+ !iter.IsAtEnd(); iter.AdvanceIgnoringProxies()) {
+ if (iter.GetCurrent<Object>()->IsJSProxy()) return true;
+ }
+ return false;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index abced2d4ba..b7c67030c5 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -6,21 +6,22 @@
#define V8_OBJECTS_H_
#include <iosfwd>
+#include <memory>
#include "src/assert-scope.h"
#include "src/bailout-reason.h"
#include "src/base/bits.h"
#include "src/base/flags.h"
-#include "src/base/smart-pointers.h"
-#include "src/builtins.h"
+#include "src/builtins/builtins.h"
#include "src/checks.h"
#include "src/elements-kind.h"
#include "src/field-index.h"
#include "src/flags.h"
#include "src/list.h"
+#include "src/messages.h"
#include "src/property-details.h"
-#include "src/unicode.h"
#include "src/unicode-decoder.h"
+#include "src/unicode.h"
#include "src/zone.h"
#if V8_TARGET_ARCH_ARM
@@ -64,7 +65,6 @@
// - JSRegExp
// - JSFunction
// - JSGeneratorObject
-// - JSModule
// - JSGlobalObject
// - JSGlobalProxy
// - JSValue
@@ -90,8 +90,9 @@
// - Context
// - TypeFeedbackMetadata
// - TypeFeedbackVector
-// - ScopeInfo
+// - TemplateList
// - TransitionArray
+// - ScopeInfo
// - ScriptContextTable
// - WeakFixedArray
// - FixedDoubleArray
@@ -170,16 +171,6 @@ enum KeyedAccessStoreMode {
};
-// Valid hints for the abstract operation ToPrimitive,
-// implemented according to ES6, section 7.1.1.
-enum class ToPrimitiveHint { kDefault, kNumber, kString };
-
-
-// Valid hints for the abstract operation OrdinaryToPrimitive,
-// implemented according to ES6, section 7.1.1.
-enum class OrdinaryToPrimitiveHint { kNumber, kString };
-
-
enum TypeofMode : int { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
@@ -309,7 +300,7 @@ const int kVariableSizeSentinel = 0;
// We may store the unsigned bit field as signed Smi value and do not
// use the sign bit.
-const int kStubMajorKeyBits = 7;
+const int kStubMajorKeyBits = 8;
const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
// All Maps have a field instance_type containing a InstanceType.
@@ -398,8 +389,6 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(ALLOCATION_MEMENTO_TYPE) \
V(ALLOCATION_SITE_TYPE) \
V(SCRIPT_TYPE) \
- V(CODE_CACHE_TYPE) \
- V(POLYMORPHIC_CODE_CACHE_TYPE) \
V(TYPE_FEEDBACK_INFO_TYPE) \
V(ALIASED_ARGUMENTS_ENTRY_TYPE) \
V(BOX_TYPE) \
@@ -417,11 +406,13 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(JS_VALUE_TYPE) \
V(JS_DATE_TYPE) \
V(JS_OBJECT_TYPE) \
+ V(JS_ARGUMENTS_TYPE) \
V(JS_CONTEXT_EXTENSION_OBJECT_TYPE) \
V(JS_GENERATOR_OBJECT_TYPE) \
V(JS_MODULE_TYPE) \
V(JS_GLOBAL_OBJECT_TYPE) \
V(JS_GLOBAL_PROXY_TYPE) \
+ V(JS_API_OBJECT_TYPE) \
V(JS_SPECIAL_API_OBJECT_TYPE) \
V(JS_ARRAY_TYPE) \
V(JS_ARRAY_BUFFER_TYPE) \
@@ -436,6 +427,7 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(JS_WEAK_SET_TYPE) \
V(JS_PROMISE_TYPE) \
V(JS_REGEXP_TYPE) \
+ V(JS_ERROR_TYPE) \
\
V(JS_BOUND_FUNCTION_TYPE) \
V(JS_FUNCTION_TYPE) \
@@ -514,8 +506,6 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(SCRIPT, Script, script) \
V(ALLOCATION_SITE, AllocationSite, allocation_site) \
V(ALLOCATION_MEMENTO, AllocationMemento, allocation_memento) \
- V(CODE_CACHE, CodeCache, code_cache) \
- V(POLYMORPHIC_CODE_CACHE, PolymorphicCodeCache, polymorphic_code_cache) \
V(TYPE_FEEDBACK_INFO, TypeFeedbackInfo, type_feedback_info) \
V(ALIASED_ARGUMENTS_ENTRY, AliasedArgumentsEntry, aliased_arguments_entry) \
V(DEBUG_INFO, DebugInfo, debug_info) \
@@ -685,8 +675,6 @@ enum InstanceType {
ALLOCATION_SITE_TYPE,
ALLOCATION_MEMENTO_TYPE,
SCRIPT_TYPE,
- CODE_CACHE_TYPE,
- POLYMORPHIC_CODE_CACHE_TYPE,
TYPE_FEEDBACK_INFO_TYPE,
ALIASED_ARGUMENTS_ENTRY_TYPE,
BOX_TYPE,
@@ -707,13 +695,17 @@ enum InstanceType {
// compares for checking the JS_RECEIVER and the NONCALLABLE_JS_OBJECT range.
JS_PROXY_TYPE, // FIRST_JS_RECEIVER_TYPE
JS_GLOBAL_OBJECT_TYPE, // FIRST_JS_OBJECT_TYPE
- // Like JS_OBJECT_TYPE, but requires access checks and/or has interceptors.
+ JS_GLOBAL_PROXY_TYPE,
+ // Like JS_API_OBJECT_TYPE, but requires access checks and/or has
+ // interceptors.
JS_SPECIAL_API_OBJECT_TYPE, // LAST_SPECIAL_RECEIVER_TYPE
JS_VALUE_TYPE, // LAST_CUSTOM_ELEMENTS_RECEIVER
- JS_OBJECT_TYPE,
- JS_GLOBAL_PROXY_TYPE,
JS_MESSAGE_OBJECT_TYPE,
JS_DATE_TYPE,
+ // Like JS_OBJECT_TYPE, but created from API function.
+ JS_API_OBJECT_TYPE,
+ JS_OBJECT_TYPE,
+ JS_ARGUMENTS_TYPE,
JS_CONTEXT_EXTENSION_OBJECT_TYPE,
JS_GENERATOR_OBJECT_TYPE,
JS_MODULE_TYPE,
@@ -729,6 +721,7 @@ enum InstanceType {
JS_WEAK_SET_TYPE,
JS_PROMISE_TYPE,
JS_REGEXP_TYPE,
+ JS_ERROR_TYPE,
JS_BOUND_FUNCTION_TYPE,
JS_FUNCTION_TYPE, // LAST_JS_OBJECT_TYPE, LAST_JS_RECEIVER_TYPE
@@ -768,6 +761,7 @@ enum InstanceType {
};
STATIC_ASSERT(JS_OBJECT_TYPE == Internals::kJSObjectType);
+STATIC_ASSERT(JS_API_OBJECT_TYPE == Internals::kJSApiObjectType);
STATIC_ASSERT(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType);
STATIC_ASSERT(ODDBALL_TYPE == Internals::kOddballType);
STATIC_ASSERT(FOREIGN_TYPE == Internals::kForeignType);
@@ -775,22 +769,57 @@ STATIC_ASSERT(FOREIGN_TYPE == Internals::kForeignType);
std::ostream& operator<<(std::ostream& os, InstanceType instance_type);
-
-#define FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(V) \
- V(FAST_ELEMENTS_SUB_TYPE) \
- V(DICTIONARY_ELEMENTS_SUB_TYPE) \
- V(FAST_PROPERTIES_SUB_TYPE) \
- V(DICTIONARY_PROPERTIES_SUB_TYPE) \
- V(MAP_CODE_CACHE_SUB_TYPE) \
- V(SCOPE_INFO_SUB_TYPE) \
- V(STRING_TABLE_SUB_TYPE) \
- V(DESCRIPTOR_ARRAY_SUB_TYPE)
+#define FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(V) \
+ V(BYTECODE_ARRAY_CONSTANT_POOL_SUB_TYPE) \
+ V(BYTECODE_ARRAY_HANDLER_TABLE_SUB_TYPE) \
+ V(CODE_STUBS_TABLE_SUB_TYPE) \
+ V(COMPILATION_CACHE_TABLE_SUB_TYPE) \
+ V(CONTEXT_SUB_TYPE) \
+ V(COPY_ON_WRITE_SUB_TYPE) \
+ V(DEOPTIMIZATION_DATA_SUB_TYPE) \
+ V(DESCRIPTOR_ARRAY_SUB_TYPE) \
+ V(EMBEDDED_OBJECT_SUB_TYPE) \
+ V(ENUM_CACHE_SUB_TYPE) \
+ V(ENUM_INDICES_CACHE_SUB_TYPE) \
+ V(DEPENDENT_CODE_SUB_TYPE) \
+ V(DICTIONARY_ELEMENTS_SUB_TYPE) \
+ V(DICTIONARY_PROPERTIES_SUB_TYPE) \
+ V(EMPTY_PROPERTIES_DICTIONARY_SUB_TYPE) \
+ V(FAST_ELEMENTS_SUB_TYPE) \
+ V(FAST_PROPERTIES_SUB_TYPE) \
+ V(FAST_TEMPLATE_INSTANTIATIONS_CACHE_SUB_TYPE) \
+ V(HANDLER_TABLE_SUB_TYPE) \
+ V(INTRINSIC_FUNCTION_NAMES_SUB_TYPE) \
+ V(JS_COLLECTION_SUB_TYPE) \
+ V(JS_WEAK_COLLECTION_SUB_TYPE) \
+ V(LITERALS_ARRAY_SUB_TYPE) \
+ V(MAP_CODE_CACHE_SUB_TYPE) \
+ V(NOSCRIPT_SHARED_FUNCTION_INFOS_SUB_TYPE) \
+ V(NUMBER_STRING_CACHE_SUB_TYPE) \
+ V(OBJECT_TO_CODE_SUB_TYPE) \
+ V(OPTIMIZED_CODE_LITERALS_SUB_TYPE) \
+ V(OPTIMIZED_CODE_MAP_SUB_TYPE) \
+ V(PROTOTYPE_USERS_SUB_TYPE) \
+ V(REGEXP_MULTIPLE_CACHE_SUB_TYPE) \
+ V(RETAINED_MAPS_SUB_TYPE) \
+ V(SCOPE_INFO_SUB_TYPE) \
+ V(SCRIPT_LIST_SUB_TYPE) \
+ V(SERIALIZED_TEMPLATES_SUB_TYPE) \
+ V(SHARED_FUNCTION_INFOS_SUB_TYPE) \
+ V(SINGLE_CHARACTER_STRING_CACHE_SUB_TYPE) \
+ V(SLOW_TEMPLATE_INSTANTIATIONS_CACHE_SUB_TYPE) \
+ V(STRING_SPLIT_CACHE_SUB_TYPE) \
+ V(STRING_TABLE_SUB_TYPE) \
+ V(TEMPLATE_INFO_SUB_TYPE) \
+ V(TYPE_FEEDBACK_VECTOR_SUB_TYPE) \
+ V(TYPE_FEEDBACK_METADATA_SUB_TYPE) \
+ V(WEAK_NEW_SPACE_OBJECT_TO_CODE_SUB_TYPE)
enum FixedArraySubInstanceType {
#define DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE(name) name,
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE)
#undef DEFINE_FIXED_ARRAY_SUB_INSTANCE_TYPE
- LAST_FIXED_ARRAY_SUB_TYPE = DESCRIPTOR_ARRAY_SUB_TYPE
+ LAST_FIXED_ARRAY_SUB_TYPE = WEAK_NEW_SPACE_OBJECT_TO_CODE_SUB_TYPE
};
@@ -857,10 +886,11 @@ class SafepointEntry;
class SharedFunctionInfo;
class StringStream;
class TypeFeedbackInfo;
+class TypeFeedbackMetadata;
class TypeFeedbackVector;
class WeakCell;
class TransitionArray;
-
+class TemplateList;
// A template-ized version of the IsXXX functions.
template <class C> inline bool Is(Object* obj);
@@ -884,134 +914,139 @@ template <class C> inline bool Is(Object* obj);
V(Primitive) \
V(Number)
-#define HEAP_OBJECT_TYPE_LIST(V) \
- V(HeapNumber) \
- V(MutableHeapNumber) \
- V(Simd128Value) \
- V(Float32x4) \
- V(Int32x4) \
- V(Uint32x4) \
- V(Bool32x4) \
- V(Int16x8) \
- V(Uint16x8) \
- V(Bool16x8) \
- V(Int8x16) \
- V(Uint8x16) \
- V(Bool8x16) \
- V(Name) \
- V(UniqueName) \
- V(String) \
- V(SeqString) \
- V(ExternalString) \
- V(ConsString) \
- V(SlicedString) \
- V(ExternalTwoByteString) \
- V(ExternalOneByteString) \
- V(SeqTwoByteString) \
- V(SeqOneByteString) \
- V(InternalizedString) \
- V(Symbol) \
- \
- V(FixedTypedArrayBase) \
- V(FixedUint8Array) \
- V(FixedInt8Array) \
- V(FixedUint16Array) \
- V(FixedInt16Array) \
- V(FixedUint32Array) \
- V(FixedInt32Array) \
- V(FixedFloat32Array) \
- V(FixedFloat64Array) \
- V(FixedUint8ClampedArray) \
- V(ByteArray) \
- V(BytecodeArray) \
- V(FreeSpace) \
- V(JSReceiver) \
- V(JSObject) \
- V(JSContextExtensionObject) \
- V(JSGeneratorObject) \
- V(JSModule) \
- V(Map) \
- V(DescriptorArray) \
- V(TransitionArray) \
- V(LiteralsArray) \
- V(TypeFeedbackMetadata) \
- V(TypeFeedbackVector) \
- V(DeoptimizationInputData) \
- V(DeoptimizationOutputData) \
- V(DependentCode) \
- V(HandlerTable) \
- V(FixedArray) \
- V(FixedDoubleArray) \
- V(WeakFixedArray) \
- V(ArrayList) \
- V(Context) \
- V(ScriptContextTable) \
- V(NativeContext) \
- V(ScopeInfo) \
- V(JSBoundFunction) \
- V(JSFunction) \
- V(Code) \
- V(AbstractCode) \
- V(Oddball) \
- V(SharedFunctionInfo) \
- V(JSValue) \
- V(JSDate) \
- V(JSMessageObject) \
- V(StringWrapper) \
- V(Foreign) \
- V(Boolean) \
- V(JSArray) \
- V(JSArrayBuffer) \
- V(JSArrayBufferView) \
- V(JSTypedArray) \
- V(JSDataView) \
- V(JSProxy) \
- V(JSSet) \
- V(JSMap) \
- V(JSSetIterator) \
- V(JSMapIterator) \
- V(JSWeakCollection) \
- V(JSWeakMap) \
- V(JSWeakSet) \
- V(JSRegExp) \
- V(HashTable) \
- V(Dictionary) \
- V(StringTable) \
- V(StringSet) \
- V(NormalizedMapCache) \
- V(CompilationCacheTable) \
- V(CodeCacheHashTable) \
- V(PolymorphicCodeCacheHashTable) \
- V(MapCache) \
- V(JSGlobalObject) \
- V(JSGlobalProxy) \
- V(Undetectable) \
- V(AccessCheckNeeded) \
- V(Callable) \
- V(Function) \
- V(Constructor) \
- V(TemplateInfo) \
- V(Filler) \
- V(FixedArrayBase) \
- V(External) \
- V(Struct) \
- V(Cell) \
- V(PropertyCell) \
- V(WeakCell) \
- V(ObjectHashTable) \
- V(WeakHashTable) \
+#define HEAP_OBJECT_TYPE_LIST(V) \
+ V(HeapNumber) \
+ V(MutableHeapNumber) \
+ V(Simd128Value) \
+ V(Float32x4) \
+ V(Int32x4) \
+ V(Uint32x4) \
+ V(Bool32x4) \
+ V(Int16x8) \
+ V(Uint16x8) \
+ V(Bool16x8) \
+ V(Int8x16) \
+ V(Uint8x16) \
+ V(Bool8x16) \
+ V(Name) \
+ V(UniqueName) \
+ V(String) \
+ V(SeqString) \
+ V(ExternalString) \
+ V(ConsString) \
+ V(SlicedString) \
+ V(ExternalTwoByteString) \
+ V(ExternalOneByteString) \
+ V(SeqTwoByteString) \
+ V(SeqOneByteString) \
+ V(InternalizedString) \
+ V(Symbol) \
+ \
+ V(FixedTypedArrayBase) \
+ V(FixedUint8Array) \
+ V(FixedInt8Array) \
+ V(FixedUint16Array) \
+ V(FixedInt16Array) \
+ V(FixedUint32Array) \
+ V(FixedInt32Array) \
+ V(FixedFloat32Array) \
+ V(FixedFloat64Array) \
+ V(FixedUint8ClampedArray) \
+ V(ByteArray) \
+ V(BytecodeArray) \
+ V(FreeSpace) \
+ V(JSReceiver) \
+ V(JSObject) \
+ V(JSContextExtensionObject) \
+ V(JSGeneratorObject) \
+ V(Map) \
+ V(DescriptorArray) \
+ V(TransitionArray) \
+ V(LiteralsArray) \
+ V(TypeFeedbackMetadata) \
+ V(TypeFeedbackVector) \
+ V(DeoptimizationInputData) \
+ V(DeoptimizationOutputData) \
+ V(DependentCode) \
+ V(HandlerTable) \
+ V(FixedArray) \
+ V(FixedDoubleArray) \
+ V(WeakFixedArray) \
+ V(ArrayList) \
+ V(Context) \
+ V(ScriptContextTable) \
+ V(NativeContext) \
+ V(ScopeInfo) \
+ V(JSBoundFunction) \
+ V(JSFunction) \
+ V(Code) \
+ V(AbstractCode) \
+ V(Oddball) \
+ V(SharedFunctionInfo) \
+ V(JSValue) \
+ V(JSDate) \
+ V(JSMessageObject) \
+ V(StringWrapper) \
+ V(Foreign) \
+ V(Boolean) \
+ V(JSArray) \
+ V(JSArrayBuffer) \
+ V(JSArrayBufferView) \
+ V(JSCollection) \
+ V(JSTypedArray) \
+ V(JSDataView) \
+ V(JSProxy) \
+ V(JSError) \
+ V(JSPromise) \
+ V(JSSet) \
+ V(JSMap) \
+ V(JSSetIterator) \
+ V(JSMapIterator) \
+ V(JSWeakCollection) \
+ V(JSWeakMap) \
+ V(JSWeakSet) \
+ V(JSRegExp) \
+ V(HashTable) \
+ V(Dictionary) \
+ V(UnseededNumberDictionary) \
+ V(StringTable) \
+ V(StringSet) \
+ V(NormalizedMapCache) \
+ V(CompilationCacheTable) \
+ V(CodeCacheHashTable) \
+ V(MapCache) \
+ V(JSGlobalObject) \
+ V(JSGlobalProxy) \
+ V(Undetectable) \
+ V(AccessCheckNeeded) \
+ V(Callable) \
+ V(Function) \
+ V(Constructor) \
+ V(TemplateInfo) \
+ V(Filler) \
+ V(FixedArrayBase) \
+ V(External) \
+ V(Struct) \
+ V(Cell) \
+ V(TemplateList) \
+ V(PropertyCell) \
+ V(WeakCell) \
+ V(ObjectHashTable) \
+ V(ObjectHashSet) \
+ V(WeakHashTable) \
V(OrderedHashTable)
-#define ODDBALL_LIST(V) \
- V(Undefined) \
- V(Null) \
- V(TheHole) \
- V(Exception) \
- V(Uninitialized) \
- V(True) \
- V(False) \
- V(ArgumentsMarker) \
- V(OptimizedOut)
+#define ODDBALL_LIST(V) \
+ V(Undefined, undefined_value) \
+ V(Null, null_value) \
+ V(TheHole, the_hole_value) \
+ V(Exception, exception) \
+ V(Uninitialized, uninitialized_value) \
+ V(True, true_value) \
+ V(False, false_value) \
+ V(ArgumentsMarker, arguments_marker) \
+ V(OptimizedOut, optimized_out) \
+ V(StaleRegister, stale_register)
// The element types selection for CreateListFromArrayLike.
enum class ElementTypes { kAll, kStringAndSymbol };
@@ -1027,9 +1062,12 @@ class Object {
// Type testing.
bool IsObject() const { return true; }
-#define IS_TYPE_FUNCTION_DECL(type_) INLINE(bool Is##type_() const);
+#define IS_TYPE_FUNCTION_DECL(Type) INLINE(bool Is##Type() const);
OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
+#undef IS_TYPE_FUNCTION_DECL
+#define IS_TYPE_FUNCTION_DECL(Type, Value) \
+ INLINE(bool Is##Type(Isolate* isolate) const);
ODDBALL_LIST(IS_TYPE_FUNCTION_DECL)
#undef IS_TYPE_FUNCTION_DECL
@@ -1070,10 +1108,8 @@ class Object {
INLINE(bool IsNameDictionary() const);
INLINE(bool IsGlobalDictionary() const);
INLINE(bool IsSeededNumberDictionary() const);
- INLINE(bool IsUnseededNumberDictionary() const);
INLINE(bool IsOrderedHashSet() const);
INLINE(bool IsOrderedHashMap() const);
- static bool IsPromise(Handle<Object> object);
// Extract the number.
inline double Number() const;
@@ -1162,10 +1198,22 @@ class Object {
MUST_USE_RESULT static MaybeHandle<String> ToString(Isolate* isolate,
Handle<Object> input);
+ static Handle<String> NoSideEffectsToString(Isolate* isolate,
+ Handle<Object> input);
+
+ // ES6 section 7.1.14 ToPropertyKey
+ MUST_USE_RESULT static MaybeHandle<Object> ToPropertyKey(
+ Isolate* isolate, Handle<Object> value);
+
// ES6 section 7.1.15 ToLength
MUST_USE_RESULT static MaybeHandle<Object> ToLength(Isolate* isolate,
Handle<Object> input);
+ // ES6 section 7.1.17 ToIndex
+ MUST_USE_RESULT static MaybeHandle<Object> ToIndex(
+ Isolate* isolate, Handle<Object> input,
+ MessageTemplate::Template error_index);
+
// ES6 section 7.3.9 GetMethod
MUST_USE_RESULT static MaybeHandle<Object> GetMethod(
Handle<JSReceiver> receiver, Handle<Name> name);
@@ -1174,8 +1222,9 @@ class Object {
MUST_USE_RESULT static MaybeHandle<FixedArray> CreateListFromArrayLike(
Isolate* isolate, Handle<Object> object, ElementTypes element_types);
- // Check whether |object| is an instance of Error or NativeError.
- static bool IsErrorObject(Isolate* isolate, Handle<Object> object);
+ // Get length property and apply ToLength.
+ MUST_USE_RESULT static MaybeHandle<Object> GetLengthFromArrayLike(
+ Isolate* isolate, Handle<Object> object);
// ES6 section 12.5.6 The typeof Operator
static Handle<String> TypeOf(Isolate* isolate, Handle<Object> object);
@@ -1230,6 +1279,14 @@ class Object {
Handle<Object> lhs,
Handle<Object> rhs);
+ // ES6 section 7.3.19 OrdinaryHasInstance (C, O).
+ MUST_USE_RESULT static MaybeHandle<Object> OrdinaryHasInstance(
+ Isolate* isolate, Handle<Object> callable, Handle<Object> object);
+
+ // ES6 section 12.10.4 Runtime Semantics: InstanceofOperator(O, C)
+ MUST_USE_RESULT static MaybeHandle<Object> InstanceOf(
+ Isolate* isolate, Handle<Object> object, Handle<Object> callable);
+
MUST_USE_RESULT static MaybeHandle<Object> GetProperty(LookupIterator* it);
// ES6 [[Set]] (when passed DONT_THROW)
@@ -1306,15 +1363,10 @@ class Object {
// undefined if not yet created.
Object* GetHash();
- // Returns undefined for JSObjects, but returns the hash code for simple
- // objects. This avoids a double lookup in the cases where we know we will
- // add the hash to the JSObject if it does not already exist.
- Object* GetSimpleHash();
-
// Returns the permanent hash code associated with this object depending on
// the actual object type. May create and store a hash code if needed and none
// exists.
- static Handle<Smi> GetOrCreateHash(Isolate* isolate, Handle<Object> object);
+ static Smi* GetOrCreateHash(Isolate* isolate, Handle<Object> object);
// Checks whether this object has the same value as the given one. This
// function is implemented according to ES5, section 9.12 and can be used
@@ -1476,7 +1528,7 @@ class MapWord BASE_EMBEDDED {
// True if this map word is a forwarding address for a scavenge
// collection. Only valid during a scavenge collection (specifically,
// when all map words are heap object pointers, i.e. not during a full GC).
- inline bool IsForwardingAddress();
+ inline bool IsForwardingAddress() const;
// Create a map word from a forwarding address.
static inline MapWord FromForwardingAddress(HeapObject* object);
@@ -1535,10 +1587,15 @@ class HeapObject: public Object {
// Convenience method to get current isolate.
inline Isolate* GetIsolate() const;
-#define IS_TYPE_FUNCTION_DECL(type_) INLINE(bool Is##type_() const);
+#define IS_TYPE_FUNCTION_DECL(Type) INLINE(bool Is##Type() const);
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
+#undef IS_TYPE_FUNCTION_DECL
+
+#define IS_TYPE_FUNCTION_DECL(Type, Value) \
+ INLINE(bool Is##Type(Isolate* isolate) const);
ODDBALL_LIST(IS_TYPE_FUNCTION_DECL)
#undef IS_TYPE_FUNCTION_DECL
+
#define DECLARE_STRUCT_PREDICATE(NAME, Name, name) \
INLINE(bool Is##Name() const);
STRUCT_LIST(DECLARE_STRUCT_PREDICATE)
@@ -1783,10 +1840,13 @@ enum AccessorComponent {
ACCESSOR_SETTER
};
+enum class GetKeysConversion { kKeepNumbers, kConvertToString };
-enum GetKeysConversion { KEEP_NUMBERS, CONVERT_TO_STRING };
-
-enum KeyCollectionType { OWN_ONLY, INCLUDE_PROTOS };
+enum class KeyCollectionMode {
+ kOwnOnly = static_cast<int>(v8::KeyCollectionMode::kOwnOnly),
+ kIncludePrototypes =
+ static_cast<int>(v8::KeyCollectionMode::kIncludePrototypes)
+};
// JSReceiver includes types on which properties can be defined, i.e.,
// JSObject and JSProxy.
@@ -1811,6 +1871,8 @@ class JSReceiver: public HeapObject {
MUST_USE_RESULT static MaybeHandle<Object> ToPrimitive(
Handle<JSReceiver> receiver,
ToPrimitiveHint hint = ToPrimitiveHint::kDefault);
+
+ // ES6 section 7.1.1.1 OrdinaryToPrimitive
MUST_USE_RESULT static MaybeHandle<Object> OrdinaryToPrimitive(
Handle<JSReceiver> receiver, OrdinaryToPrimitiveHint hint);
@@ -1832,6 +1894,8 @@ class JSReceiver: public HeapObject {
MUST_USE_RESULT static inline Maybe<bool> HasOwnProperty(
Handle<JSReceiver> object, Handle<Name> name);
+ MUST_USE_RESULT static inline Maybe<bool> HasOwnProperty(
+ Handle<JSReceiver> object, uint32_t index);
MUST_USE_RESULT static inline MaybeHandle<Object> GetProperty(
Isolate* isolate, Handle<JSReceiver> receiver, const char* key);
@@ -1912,16 +1976,9 @@ class JSReceiver: public HeapObject {
MUST_USE_RESULT static Maybe<bool> IsExtensible(Handle<JSReceiver> object);
- // Tests for the fast common case for property enumeration.
- bool IsSimpleEnum();
-
// Returns the class name ([[Class]] property in the specification).
String* class_name();
- // Returns the builtin string tag used in Object.prototype.toString.
- MUST_USE_RESULT static MaybeHandle<String> BuiltinStringTag(
- Handle<JSReceiver> object);
-
// Returns the constructor name (the name (possibly, inferred name) of the
// function that was used to instantiate the object).
static Handle<String> GetConstructorName(Handle<JSReceiver> receiver);
@@ -1932,6 +1989,8 @@ class JSReceiver: public HeapObject {
Handle<JSReceiver> object, Handle<Name> name);
MUST_USE_RESULT static inline Maybe<PropertyAttributes>
GetOwnPropertyAttributes(Handle<JSReceiver> object, Handle<Name> name);
+ MUST_USE_RESULT static inline Maybe<PropertyAttributes>
+ GetOwnPropertyAttributes(Handle<JSReceiver> object, uint32_t index);
MUST_USE_RESULT static inline Maybe<PropertyAttributes> GetElementAttributes(
Handle<JSReceiver> object, uint32_t index);
@@ -1954,26 +2013,17 @@ class JSReceiver: public HeapObject {
// Retrieves a permanent object identity hash code. The undefined value might
// be returned in case no hash was created yet.
- static inline Handle<Object> GetIdentityHash(Isolate* isolate,
- Handle<JSReceiver> object);
+ static inline Object* GetIdentityHash(Isolate* isolate,
+ Handle<JSReceiver> object);
// Retrieves a permanent object identity hash code. May create and store a
// hash code if needed and none exists.
- inline static Handle<Smi> GetOrCreateIdentityHash(
- Handle<JSReceiver> object);
+ inline static Smi* GetOrCreateIdentityHash(Isolate* isolate,
+ Handle<JSReceiver> object);
// ES6 [[OwnPropertyKeys]] (modulo return type)
- MUST_USE_RESULT static MaybeHandle<FixedArray> OwnPropertyKeys(
- Handle<JSReceiver> object) {
- return GetKeys(object, OWN_ONLY, ALL_PROPERTIES, CONVERT_TO_STRING);
- }
-
- // Computes the enumerable keys for a JSObject. Used for implementing
- // "for (n in object) { }".
- MUST_USE_RESULT static MaybeHandle<FixedArray> GetKeys(
- Handle<JSReceiver> object, KeyCollectionType type, PropertyFilter filter,
- GetKeysConversion keys_conversion = KEEP_NUMBERS,
- bool filter_proxy_keys_ = true);
+ MUST_USE_RESULT static inline MaybeHandle<FixedArray> OwnPropertyKeys(
+ Handle<JSReceiver> object);
MUST_USE_RESULT static MaybeHandle<FixedArray> GetOwnValues(
Handle<JSReceiver> object, PropertyFilter filter);
@@ -1985,6 +2035,8 @@ class JSReceiver: public HeapObject {
static const int kPropertiesOffset = HeapObject::kHeaderSize;
static const int kHeaderSize = HeapObject::kHeaderSize + kPointerSize;
+ bool HasProxyInPrototype(Isolate* isolate);
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSReceiver);
};
@@ -2161,6 +2213,8 @@ class JSObject: public JSReceiver {
static void OptimizeAsPrototype(Handle<JSObject> object,
PrototypeOptimizationMode mode);
static void ReoptimizeIfPrototype(Handle<JSObject> object);
+ static void MakePrototypesFast(Handle<Object> receiver,
+ WhereToStart where_to_start, Isolate* isolate);
static void LazyRegisterPrototypeUser(Handle<Map> user, Isolate* isolate);
static void UpdatePrototypeUserRegistration(Handle<Map> old_map,
Handle<Map> new_map,
@@ -2168,6 +2222,9 @@ class JSObject: public JSReceiver {
static bool UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate);
static void InvalidatePrototypeChains(Map* map);
+ // Utility used by many Array builtins and runtime functions
+ static inline bool PrototypeHasNoElements(Isolate* isolate, JSObject* object);
+
// Alternative implementation of WeakFixedArray::NullCallback.
class PrototypeRegistryCompactionCallback {
public:
@@ -2184,13 +2241,6 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT static Maybe<PropertyAttributes>
GetPropertyAttributesWithFailedAccessCheck(LookupIterator* it);
- // Retrieves an AccessorPair property from the given object. Might return
- // undefined if the property doesn't exist or is of a different kind.
- MUST_USE_RESULT static MaybeHandle<Object> GetAccessor(
- Handle<JSObject> object,
- Handle<Name> name,
- AccessorComponent component);
-
// Defines an AccessorPair property on the given object.
// TODO(mstarzinger): Rename to SetAccessor().
static MaybeHandle<Object> DefineAccessor(Handle<JSObject> object,
@@ -2214,30 +2264,6 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithInterceptor(
LookupIterator* it, bool* done);
- // Accessors for hidden properties object.
- //
- // Hidden properties are not own properties of the object itself. Instead
- // they are stored in an auxiliary structure kept as an own property with a
- // special name Heap::hidden_properties_symbol(). But if the receiver is a
- // JSGlobalProxy then the auxiliary object is a property of its prototype, and
- // if it's a detached proxy, then you can't have hidden properties.
-
- // Sets a hidden property on this object. Returns this object if successful,
- // undefined if called on a detached proxy.
- static Handle<Object> SetHiddenProperty(Handle<JSObject> object,
- Handle<Name> key,
- Handle<Object> value);
- // Gets the value of a hidden property with the given key. Returns the hole
- // if the property doesn't exist (or if called on a detached proxy),
- // otherwise returns the value set for the key.
- Object* GetHiddenProperty(Handle<Name> key);
- // Deletes a hidden property. Deleting a non-existing property is
- // considered successful.
- static void DeleteHiddenProperty(Handle<JSObject> object,
- Handle<Name> key);
- // Returns true if the object has a property with the hidden string as name.
- static bool HasHiddenProperties(Handle<JSObject> object);
-
static void ValidateElements(Handle<JSObject> object);
// Makes sure that this object can contain HeapObject as elements.
@@ -2299,27 +2325,7 @@ class JSObject: public JSReceiver {
inline Object* GetInternalField(int index);
inline void SetInternalField(int index, Object* value);
inline void SetInternalField(int index, Smi* value);
-
- void CollectOwnPropertyNames(KeyAccumulator* keys,
- PropertyFilter filter = ALL_PROPERTIES);
-
- // Returns the number of properties on this object filtering out properties
- // with the specified attributes (ignoring interceptors).
- // TODO(jkummerow): Deprecated, only used by Object.observe.
- int NumberOfOwnElements(PropertyFilter filter);
- // Returns the number of elements on this object filtering out elements
- // with the specified attributes (ignoring interceptors).
- // TODO(jkummerow): Deprecated, only used by Object.observe.
- int GetOwnElementKeys(FixedArray* storage, PropertyFilter filter);
-
- static void CollectOwnElementKeys(Handle<JSObject> object,
- KeyAccumulator* keys,
- PropertyFilter filter);
-
- static Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object);
-
- static Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
- Handle<JSObject> object);
+ bool WasConstructedFromApiFunction();
// Returns a new map with all transitions dropped from the object's current
// map and the ElementsKind set.
@@ -2334,6 +2340,10 @@ class JSObject: public JSReceiver {
static void MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
int expected_additional_properties = 0);
+ // Forces a prototype without any of the checks that the regular SetPrototype
+ // would do.
+ static void ForceSetPrototype(Handle<JSObject> object, Handle<Object> proto);
+
// Convert the object to use the canonical dictionary
// representation. If the object is expected to have additional properties
// added this number can be indicated to have the backing store allocated to
@@ -2384,6 +2394,10 @@ class JSObject: public JSReceiver {
bool from_javascript,
ShouldThrow should_throw);
+ // Makes the object prototype immutable
+ // Never called from JavaScript
+ static void SetImmutableProto(Handle<JSObject> object);
+
// Initializes the body starting at |start_offset|. It is responsibility of
// the caller to initialize object header. Fill the pre-allocated fields with
// pre_allocated_value and the rest with filler_value.
@@ -2400,9 +2414,6 @@ class JSObject: public JSReceiver {
static bool IsExtensible(Handle<JSObject> object);
- // Called the first time an object is observed with ES7 Object.observe.
- static void SetObserved(Handle<JSObject> object);
-
// Copy object.
enum DeepCopyHints { kNoHints = 0, kObjectIsShallow = 1 };
@@ -2503,11 +2514,6 @@ class JSObject: public JSReceiver {
typedef FlexibleBodyDescriptor<JSReceiver::kPropertiesOffset> BodyDescriptor;
- // Enqueue change record for Object.observe. May cause GC.
- MUST_USE_RESULT static MaybeHandle<Object> EnqueueChangeRecord(
- Handle<JSObject> object, const char* type, Handle<Name> name,
- Handle<Object> old_value);
-
// Gets the number of currently used elements.
int GetFastElementsUsage();
@@ -2525,12 +2531,6 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT static Maybe<bool> SetPropertyWithFailedAccessCheck(
LookupIterator* it, Handle<Object> value, ShouldThrow should_throw);
- // Add a property to a slow-case object.
- static void AddSlowProperty(Handle<JSObject> object,
- Handle<Name> name,
- Handle<Object> value,
- PropertyAttributes attributes);
-
MUST_USE_RESULT static Maybe<bool> DeletePropertyWithInterceptor(
LookupIterator* it, ShouldThrow should_throw);
@@ -2538,25 +2538,10 @@ class JSObject: public JSReceiver {
ElementsKind kind,
Object* object);
- // Return the hash table backing store or the inline stored identity hash,
- // whatever is found.
- MUST_USE_RESULT Object* GetHiddenPropertiesHashTable();
-
- // Return the hash table backing store for hidden properties. If there is no
- // backing store, allocate one.
- static Handle<ObjectHashTable> GetOrCreateHiddenPropertiesHashtable(
- Handle<JSObject> object);
-
- // Set the hidden property backing store to either a hash table or
- // the inline-stored identity hash.
- static Handle<Object> SetHiddenPropertiesHashTable(
- Handle<JSObject> object,
- Handle<Object> value);
-
- static Handle<Object> GetIdentityHash(Isolate* isolate,
- Handle<JSObject> object);
+ static Object* GetIdentityHash(Isolate* isolate, Handle<JSObject> object);
- static Handle<Smi> GetOrCreateIdentityHash(Handle<JSObject> object);
+ static Smi* GetOrCreateIdentityHash(Isolate* isolate,
+ Handle<JSObject> object);
// Helper for fast versions of preventExtensions, seal, and freeze.
// attrs is one of NONE, SEALED, or FROZEN (depending on the operation).
@@ -2564,10 +2549,6 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT static Maybe<bool> PreventExtensionsWithTransition(
Handle<JSObject> object, ShouldThrow should_throw);
- MUST_USE_RESULT static Maybe<bool> SetPrototypeUnobserved(
- Handle<JSObject> object, Handle<Object> value, bool from_javascript,
- ShouldThrow should_throw);
-
DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
};
@@ -2691,6 +2672,8 @@ class FixedArrayBase: public HeapObject {
DECLARE_CAST(FixedArrayBase)
+ static int GetMaxLengthForNewSpaceAllocation(ElementsKind kind);
+
// Layout description.
// Length is smi tagged when it is stored.
static const int kLengthOffset = HeapObject::kHeaderSize;
@@ -2709,6 +2692,16 @@ class FixedArray: public FixedArrayBase {
inline Object* get(int index) const;
static inline Handle<Object> get(FixedArray* array, int index,
Isolate* isolate);
+ template <class T>
+ MaybeHandle<T> GetValue(Isolate* isolate, int index) const;
+
+ template <class T>
+ Handle<T> GetValueChecked(Isolate* isolate, int index) const;
+
+ // Return a grown copy if the index is bigger than the array's length.
+ static Handle<FixedArray> SetAndGrow(Handle<FixedArray> array, int index,
+ Handle<Object> value);
+
// Setter that uses write barrier.
inline void set(int index, Object* value);
inline bool is_the_hole(int index);
@@ -2919,7 +2912,8 @@ class ArrayList : public FixedArray {
inline void SetLength(int length);
inline Object* Get(int index);
inline Object** Slot(int index);
- inline void Set(int index, Object* obj);
+ inline void Set(int index, Object* obj,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline void Clear(int index, Object* undefined);
bool IsFull();
DECLARE_CAST(ArrayList)
@@ -3101,6 +3095,11 @@ class DescriptorArray: public FixedArray {
return kFirstIndex + (descriptor_number * kDescriptorSize) + kDescriptorKey;
}
+ static int ToValueIndex(int descriptor_number) {
+ return kFirstIndex + (descriptor_number * kDescriptorSize) +
+ kDescriptorValue;
+ }
+
private:
// An entry in a DescriptorArray, represented as an (array, index) pair.
class Entry {
@@ -3116,12 +3115,6 @@ class DescriptorArray: public FixedArray {
int index_;
};
- static int ToValueIndex(int descriptor_number) {
- return kFirstIndex +
- (descriptor_number * kDescriptorSize) +
- kDescriptorValue;
- }
-
// Transfer a complete descriptor from the src descriptor array to this
// descriptor array.
void CopyFrom(int index, DescriptorArray* src);
@@ -3189,6 +3182,7 @@ class BaseShape {
DCHECK(UsesSeed);
return HashForObject(key, object);
}
+ static inline Map* GetMap(Isolate* isolate);
};
@@ -3219,7 +3213,7 @@ class HashTableBase : public FixedArray {
// Tells whether k is a real key. The hole and undefined are not allowed
// as keys and can be used to indicate missing or deleted elements.
inline bool IsKey(Object* k);
- inline bool IsKey(Heap* heap, Object* k);
+ inline bool IsKey(Isolate* isolate, Object* k);
// Compute the probe offset (quadratic probing).
INLINE(static uint32_t GetProbeOffset(uint32_t n)) {
@@ -3261,6 +3255,8 @@ class HashTableBase : public FixedArray {
template <typename Derived, typename Shape, typename Key>
class HashTable : public HashTableBase {
public:
+ typedef Shape ShapeT;
+
// Wrapper methods
inline uint32_t Hash(Key key) {
if (Shape::UsesSeed) {
@@ -3294,15 +3290,19 @@ class HashTable : public HashTableBase {
inline int FindEntry(Key key);
inline int FindEntry(Isolate* isolate, Key key, int32_t hash);
int FindEntry(Isolate* isolate, Key key);
+ inline bool Has(Isolate* isolate, Key key);
+ inline bool Has(Key key);
// Rehashes the table in-place.
void Rehash(Key key);
// Returns the key at entry.
- Object* KeyAt(int entry) { return get(EntryToIndex(entry)); }
+ Object* KeyAt(int entry) { return get(EntryToIndex(entry) + kEntryKeyIndex); }
static const int kElementsStartIndex = kPrefixStartIndex + Shape::kPrefixSize;
static const int kEntrySize = Shape::kEntrySize;
+ STATIC_ASSERT(kEntrySize > 0);
+ static const int kEntryKeyIndex = 0;
static const int kElementsStartOffset =
kHeaderSize + kElementsStartIndex * kPointerSize;
static const int kCapacityOffset =
@@ -3507,29 +3507,24 @@ class Dictionary: public HashTable<Derived, Shape, Key> {
// Returns the number of elements in the dictionary filtering out properties
// with the specified attributes.
- // TODO(jkummerow): Deprecated, only used by Object.observe.
int NumberOfElementsFilterAttributes(PropertyFilter filter);
// Returns the number of enumerable elements in the dictionary.
- // TODO(jkummerow): Deprecated, only used by Object.observe.
int NumberOfEnumElements() {
return NumberOfElementsFilterAttributes(ENUMERABLE_STRINGS);
}
enum SortMode { UNSORTED, SORTED };
- // Fill in details for properties into storage.
- // Returns the number of properties added.
- // TODO(jkummerow): Deprecated, only used by Object.observe.
- int CopyKeysTo(FixedArray* storage, int index, PropertyFilter filter,
- SortMode sort_mode);
// Collect the keys into the given KeyAccumulator, in ascending chronological
// order of property creation.
- static void CollectKeysTo(Handle<Dictionary<Derived, Shape, Key> > dictionary,
- KeyAccumulator* keys, PropertyFilter filter);
+ static void CollectKeysTo(Handle<Dictionary<Derived, Shape, Key>> dictionary,
+ KeyAccumulator* keys);
// Copies enumerable keys to preallocated fixed array.
- void CopyEnumKeysTo(FixedArray* storage);
+ static void CopyEnumKeysTo(Handle<Dictionary<Derived, Shape, Key>> dictionary,
+ Handle<FixedArray> storage, KeyCollectionMode mode,
+ KeyAccumulator* accumulator);
// Accessors for next enumeration index.
void SetNextEnumerationIndex(int index) {
@@ -3543,9 +3538,9 @@ class Dictionary: public HashTable<Derived, Shape, Key> {
// Creates a new dictionary.
MUST_USE_RESULT static Handle<Derived> New(
- Isolate* isolate,
- int at_least_space_for,
- PretenureFlag pretenure = NOT_TENURED);
+ Isolate* isolate, int at_least_space_for,
+ PretenureFlag pretenure = NOT_TENURED,
+ MinimumCapacity capacity_option = USE_DEFAULT_MINIMUM_CAPACITY);
// Ensures that a new dictionary is created when the capacity is checked.
void SetRequiresCopyOnCapacityChange();
@@ -3554,6 +3549,9 @@ class Dictionary: public HashTable<Derived, Shape, Key> {
static Handle<Derived> EnsureCapacity(Handle<Derived> obj, int n, Key key);
#ifdef OBJECT_PRINT
+ // For our gdb macros, we should perhaps change these in the future.
+ void Print();
+
void Print(std::ostream& os); // NOLINT
#endif
// Returns the key (slow).
@@ -3568,11 +3566,10 @@ class Dictionary: public HashTable<Derived, Shape, Key> {
Handle<Object> value,
PropertyDetails details);
- MUST_USE_RESULT static Handle<Derived> Add(
- Handle<Derived> dictionary,
- Key key,
- Handle<Object> value,
- PropertyDetails details);
+ MUST_USE_RESULT static Handle<Derived> Add(Handle<Derived> dictionary,
+ Key key, Handle<Object> value,
+ PropertyDetails details,
+ int* entry_out = nullptr);
// Returns iteration indices array for the |dictionary|.
// Values are direct indices in the |HashTable| array.
@@ -3586,13 +3583,9 @@ class Dictionary: public HashTable<Derived, Shape, Key> {
Key key,
Handle<Object> value);
- // Add entry to dictionary.
- static void AddEntry(
- Handle<Derived> dictionary,
- Key key,
- Handle<Object> value,
- PropertyDetails details,
- uint32_t hash);
+ // Add entry to dictionary. Returns entry value.
+ static int AddEntry(Handle<Derived> dictionary, Key key, Handle<Object> value,
+ PropertyDetails details, uint32_t hash);
// Generate new enumeration indices to avoid enumeration index overflow.
// Returns iteration indices array for the |dictionary|.
@@ -3621,15 +3614,16 @@ class BaseDictionaryShape : public BaseShape<Key> {
static inline PropertyDetails DetailsAt(Dictionary* dict, int entry) {
STATIC_ASSERT(Dictionary::kEntrySize == 3);
DCHECK(entry >= 0); // Not found is -1, which is not caught by get().
- return PropertyDetails(
- Smi::cast(dict->get(Dictionary::EntryToIndex(entry) + 2)));
+ return PropertyDetails(Smi::cast(dict->get(
+ Dictionary::EntryToIndex(entry) + Dictionary::kEntryDetailsIndex)));
}
template <typename Dictionary>
static inline void DetailsAtPut(Dictionary* dict, int entry,
PropertyDetails value) {
STATIC_ASSERT(Dictionary::kEntrySize == 3);
- dict->set(Dictionary::EntryToIndex(entry) + 2, value.AsSmi());
+ dict->set(Dictionary::EntryToIndex(entry) + Dictionary::kEntryDetailsIndex,
+ value.AsSmi());
}
template <typename Dictionary>
@@ -3651,6 +3645,8 @@ class NameDictionaryShape : public BaseDictionaryShape<Handle<Name> > {
static inline Handle<Object> AsHandle(Isolate* isolate, Handle<Name> key);
static const int kPrefixSize = 2;
static const int kEntrySize = 3;
+ static const int kEntryValueIndex = 1;
+ static const int kEntryDetailsIndex = 2;
static const bool kIsEnumerable = true;
};
@@ -3665,6 +3661,9 @@ class NameDictionary
inline static Handle<FixedArray> DoGenerateNewEnumerationIndices(
Handle<NameDictionary> dictionary);
+
+ static const int kEntryValueIndex = 1;
+ static const int kEntryDetailsIndex = 2;
};
@@ -3692,6 +3691,8 @@ class GlobalDictionary
: public NameDictionaryBase<GlobalDictionary, GlobalDictionaryShape> {
public:
DECLARE_CAST(GlobalDictionary)
+
+ static const int kEntryValueIndex = 1;
};
@@ -3699,7 +3700,6 @@ class NumberDictionaryShape : public BaseDictionaryShape<uint32_t> {
public:
static inline bool IsMatch(uint32_t key, Object* other);
static inline Handle<Object> AsHandle(Isolate* isolate, uint32_t key);
- static const int kEntrySize = 3;
static const bool kIsEnumerable = false;
};
@@ -3708,6 +3708,7 @@ class SeededNumberDictionaryShape : public NumberDictionaryShape {
public:
static const bool UsesSeed = true;
static const int kPrefixSize = 2;
+ static const int kEntrySize = 3;
static inline uint32_t SeededHash(uint32_t key, uint32_t seed);
static inline uint32_t SeededHashForObject(uint32_t key,
@@ -3719,9 +3720,24 @@ class SeededNumberDictionaryShape : public NumberDictionaryShape {
class UnseededNumberDictionaryShape : public NumberDictionaryShape {
public:
static const int kPrefixSize = 0;
+ static const int kEntrySize = 2;
static inline uint32_t Hash(uint32_t key);
static inline uint32_t HashForObject(uint32_t key, Object* object);
+
+ template <typename Dictionary>
+ static inline PropertyDetails DetailsAt(Dictionary* dict, int entry) {
+ UNREACHABLE();
+ return PropertyDetails::Empty();
+ }
+
+ template <typename Dictionary>
+ static inline void DetailsAtPut(Dictionary* dict, int entry,
+ PropertyDetails value) {
+ UNREACHABLE();
+ }
+
+ static inline Map* GetMap(Isolate* isolate);
};
@@ -3765,6 +3781,9 @@ class SeededNumberDictionary
// requires_slow_elements returns false.
inline uint32_t max_number_key();
+ static const int kEntryValueIndex = 1;
+ static const int kEntryDetailsIndex = 2;
+
// Bit masks.
static const int kRequiresSlowElementsMask = 1;
static const int kRequiresSlowElementsTagSize = 1;
@@ -3788,6 +3807,8 @@ class UnseededNumberDictionary
Handle<UnseededNumberDictionary> dictionary,
uint32_t key,
Handle<Object> value);
+ static Handle<UnseededNumberDictionary> DeleteKey(
+ Handle<UnseededNumberDictionary> dictionary, uint32_t key);
// Set an existing entry or add a new one if needed.
// Return the updated dictionary.
@@ -3795,6 +3816,9 @@ class UnseededNumberDictionary
Handle<UnseededNumberDictionary> dictionary,
uint32_t key,
Handle<Object> value);
+
+ static const int kEntryValueIndex = 1;
+ static const int kEntryDetailsIndex = 2;
};
@@ -3858,6 +3882,23 @@ class ObjectHashTable: public HashTable<ObjectHashTable,
}
};
+class ObjectHashSetShape : public ObjectHashTableShape {
+ public:
+ static const int kPrefixSize = 0;
+ static const int kEntrySize = 1;
+};
+
+class ObjectHashSet
+ : public HashTable<ObjectHashSet, ObjectHashSetShape, Handle<Object>> {
+ public:
+ static Handle<ObjectHashSet> Add(Handle<ObjectHashSet> set,
+ Handle<Object> key);
+
+ inline bool Has(Isolate* isolate, Handle<Object> key, int32_t hash);
+ inline bool Has(Isolate* isolate, Handle<Object> key);
+
+ DECLARE_CAST(ObjectHashSet)
+};
// OrderedHashTable is a HashTable with Object keys that preserves
// insertion order. There are Map and Set interfaces (OrderedHashMap
@@ -3911,7 +3952,7 @@ class OrderedHashTable: public FixedArray {
static Handle<Derived> Shrink(Handle<Derived> table);
// Returns a new empty OrderedHashTable and records the clearing so that
- // exisiting iterators can be updated.
+ // existing iterators can be updated.
static Handle<Derived> Clear(Handle<Derived> table);
// Returns a true if the OrderedHashTable contains the key
@@ -3925,6 +3966,8 @@ class OrderedHashTable: public FixedArray {
return Smi::cast(get(kNumberOfDeletedElementsIndex))->value();
}
+ // Returns the number of contiguous entries in the data table, starting at 0,
+ // that either are real entries or have been deleted.
int UsedCapacity() { return NumberOfElements() + NumberOfDeletedElements(); }
int NumberOfBuckets() {
@@ -3944,10 +3987,10 @@ class OrderedHashTable: public FixedArray {
return Smi::cast(entry)->value();
}
- int KeyToFirstEntry(Object* key) {
+ int KeyToFirstEntry(Isolate* isolate, Object* key) {
Object* hash = key->GetHash();
// If the object does not have an identity hash, it was never used as a key
- if (hash->IsUndefined()) return kNotFound;
+ if (hash->IsUndefined(isolate)) return kNotFound;
return HashToEntry(Smi::cast(hash)->value());
}
@@ -3956,7 +3999,11 @@ class OrderedHashTable: public FixedArray {
return Smi::cast(next_entry)->value();
}
- Object* KeyAt(int entry) { return get(EntryToIndex(entry)); }
+ // use KeyAt(i)->IsTheHole(isolate) to determine if this is a deleted entry.
+ Object* KeyAt(int entry) {
+ DCHECK_LT(entry, this->UsedCapacity());
+ return get(EntryToIndex(entry));
+ }
bool IsObsolete() {
return !get(kNextTableIndex)->IsSmi();
@@ -4017,6 +4064,7 @@ class OrderedHashTable: public FixedArray {
set(kNumberOfDeletedElementsIndex, Smi::FromInt(num));
}
+ // Returns the number elements that can fit into the allocated buffer.
int Capacity() {
return NumberOfBuckets() * kLoadFactor;
}
@@ -4047,6 +4095,8 @@ class OrderedHashSet: public OrderedHashTable<
static Handle<OrderedHashSet> Add(Handle<OrderedHashSet> table,
Handle<Object> value);
+ static Handle<FixedArray> ConvertToKeysArray(Handle<OrderedHashSet> table,
+ GetKeysConversion convert);
};
@@ -4212,7 +4262,7 @@ class ScopeInfo : public FixedArray {
// Return true if this local was introduced by the compiler, and should not be
// exposed to the user in a debugger.
- bool LocalIsSynthetic(int var);
+ static bool VariableIsSynthetic(String* name);
// Lookup support for serialized scope info. Returns the
// the stack slot index for a given slot name if the slot is
@@ -4354,9 +4404,9 @@ class ScopeInfo : public FixedArray {
// Properties of scopes.
class ScopeTypeField : public BitField<ScopeType, 0, 4> {};
class CallsEvalField : public BitField<bool, ScopeTypeField::kNext, 1> {};
- STATIC_ASSERT(LANGUAGE_END == 3);
+ STATIC_ASSERT(LANGUAGE_END == 2);
class LanguageModeField
- : public BitField<LanguageMode, CallsEvalField::kNext, 2> {};
+ : public BitField<LanguageMode, CallsEvalField::kNext, 1> {};
class DeclarationScopeField
: public BitField<bool, LanguageModeField::kNext, 1> {};
class ReceiverVariableField
@@ -4374,7 +4424,7 @@ class ScopeInfo : public FixedArray {
class HasSimpleParametersField
: public BitField<bool, AsmFunctionField::kNext, 1> {};
class FunctionKindField
- : public BitField<FunctionKind, HasSimpleParametersField::kNext, 8> {};
+ : public BitField<FunctionKind, HasSimpleParametersField::kNext, 9> {};
// BitFields representing the encoded information for context locals in the
// ContextLocalInfoEntries part.
@@ -4415,6 +4465,83 @@ class NormalizedMapCache: public FixedArray {
void set(int index, Object* value);
};
+// HandlerTable is a fixed array containing entries for exception handlers in
+// the code object it is associated with. The tables comes in two flavors:
+// 1) Based on ranges: Used for unoptimized code. Contains one entry per
+// exception handler and a range representing the try-block covered by that
+// handler. Layout looks as follows:
+// [ range-start , range-end , handler-offset , handler-data ]
+// 2) Based on return addresses: Used for turbofanned code. Contains one entry
+// per call-site that could throw an exception. Layout looks as follows:
+// [ return-address-offset , handler-offset ]
+class HandlerTable : public FixedArray {
+ public:
+ // Conservative prediction whether a given handler will locally catch an
+ // exception or cause a re-throw to outside the code boundary. Since this is
+ // undecidable it is merely an approximation (e.g. useful for debugger).
+ enum CatchPrediction {
+ UNCAUGHT, // The handler will (likely) rethrow the exception.
+ CAUGHT, // The exception will be caught by the handler.
+ PROMISE, // The exception will be caught and cause a promise rejection.
+ DESUGARING, // The exception will be caught, but both the exception and the
+ // catching are part of a desugaring and should therefore not
+ // be visible to the user (we won't notify the debugger of such
+ // exceptions).
+ };
+
+ // Getters for handler table based on ranges.
+ inline int GetRangeStart(int index) const;
+ inline int GetRangeEnd(int index) const;
+ inline int GetRangeHandler(int index) const;
+ inline int GetRangeData(int index) const;
+
+ // Setters for handler table based on ranges.
+ inline void SetRangeStart(int index, int value);
+ inline void SetRangeEnd(int index, int value);
+ inline void SetRangeHandler(int index, int offset, CatchPrediction pred);
+ inline void SetRangeData(int index, int value);
+
+ // Setters for handler table based on return addresses.
+ inline void SetReturnOffset(int index, int value);
+ inline void SetReturnHandler(int index, int offset);
+
+ // Lookup handler in a table based on ranges.
+ int LookupRange(int pc_offset, int* data, CatchPrediction* prediction);
+
+ // Lookup handler in a table based on return addresses.
+ int LookupReturn(int pc_offset);
+
+ // Returns the number of entries in the table.
+ inline int NumberOfRangeEntries() const;
+
+ // Returns the required length of the underlying fixed array.
+ static int LengthForRange(int entries) { return entries * kRangeEntrySize; }
+ static int LengthForReturn(int entries) { return entries * kReturnEntrySize; }
+
+ DECLARE_CAST(HandlerTable)
+
+#ifdef ENABLE_DISASSEMBLER
+ void HandlerTableRangePrint(std::ostream& os); // NOLINT
+ void HandlerTableReturnPrint(std::ostream& os); // NOLINT
+#endif
+
+ private:
+ // Layout description for handler table based on ranges.
+ static const int kRangeStartIndex = 0;
+ static const int kRangeEndIndex = 1;
+ static const int kRangeHandlerIndex = 2;
+ static const int kRangeDataIndex = 3;
+ static const int kRangeEntrySize = 4;
+
+ // Layout description for handler table based on return addresses.
+ static const int kReturnOffsetIndex = 0;
+ static const int kReturnHandlerIndex = 1;
+ static const int kReturnEntrySize = 2;
+
+ // Encoding of the {handler} field.
+ class HandlerPredictionField : public BitField<CatchPrediction, 0, 2> {};
+ class HandlerOffsetField : public BitField<int, 2, 30> {};
+};
// ByteArray represents fixed sized byte arrays. Used for the relocation info
// that is attached to code objects.
@@ -4425,9 +4552,15 @@ class ByteArray: public FixedArrayBase {
// Setter and getter.
inline byte get(int index);
inline void set(int index, byte value);
+ inline const byte* data() const;
+
+ // Copy in / copy out whole byte slices.
+ inline void copy_out(int index, byte* buffer, int length);
+ inline void copy_in(int index, const byte* buffer, int length);
// Treat contents as an int array.
inline int get_int(int index);
+ inline void set_int(int index, int value);
static int SizeFor(int length) {
return OBJECT_POINTER_ALIGN(kHeaderSize + length);
@@ -4497,6 +4630,10 @@ class BytecodeArray : public FixedArrayBase {
inline int interrupt_budget() const;
inline void set_interrupt_budget(int interrupt_budget);
+ // Accessors for OSR loop nesting level.
+ inline int osr_loop_nesting_level() const;
+ inline void set_osr_loop_nesting_level(int depth);
+
// Accessors for the constant pool.
DECL_ACCESSORS(constant_pool, FixedArray)
@@ -4514,6 +4651,10 @@ class BytecodeArray : public FixedArrayBase {
inline int instruction_size();
+ // Returns the size of bytecode and its metadata. This includes the size of
+ // bytecode, constant pool, source position table, and handler table.
+ inline int SizeIncludingMetadata();
+
int SourcePosition(int offset);
int SourceStatementPosition(int offset);
@@ -4524,6 +4665,9 @@ class BytecodeArray : public FixedArrayBase {
void CopyBytecodesTo(BytecodeArray* to);
+ int LookupRangeInHandlerTable(int code_offset, int* data,
+ HandlerTable::CatchPrediction* prediction);
+
// Layout description.
static const int kConstantPoolOffset = FixedArrayBase::kHeaderSize;
static const int kHandlerTableOffset = kConstantPoolOffset + kPointerSize;
@@ -4532,7 +4676,8 @@ class BytecodeArray : public FixedArrayBase {
static const int kFrameSizeOffset = kSourcePositionTableOffset + kPointerSize;
static const int kParameterSizeOffset = kFrameSizeOffset + kIntSize;
static const int kInterruptBudgetOffset = kParameterSizeOffset + kIntSize;
- static const int kHeaderSize = kInterruptBudgetOffset + kIntSize;
+ static const int kOSRNestingLevelOffset = kInterruptBudgetOffset + kIntSize;
+ static const int kHeaderSize = kOSRNestingLevelOffset + kCharSize;
// Maximal memory consumption for a single BytecodeArray.
static const int kMaxSize = 512 * MB;
@@ -4801,23 +4946,24 @@ class LiteralsArray : public FixedArray {
public:
static const int kVectorIndex = 0;
static const int kFirstLiteralIndex = 1;
- static const int kOffsetToFirstLiteral =
- FixedArray::kHeaderSize + kPointerSize;
+ static const int kFeedbackVectorOffset;
+ static const int kOffsetToFirstLiteral;
static int OffsetOfLiteralAt(int index) {
- return SizeFor(index + kFirstLiteralIndex);
+ return OffsetOfElementAt(index + kFirstLiteralIndex);
}
inline TypeFeedbackVector* feedback_vector() const;
inline void set_feedback_vector(TypeFeedbackVector* vector);
inline Object* literal(int literal_index) const;
inline void set_literal(int literal_index, Object* literal);
+ inline void set_literal_undefined(int literal_index);
inline int literals_count() const;
static Handle<LiteralsArray> New(Isolate* isolate,
Handle<TypeFeedbackVector> vector,
int number_of_literals,
- PretenureFlag pretenure);
+ PretenureFlag pretenure = TENURED);
DECLARE_CAST(LiteralsArray)
@@ -4829,80 +4975,21 @@ class LiteralsArray : public FixedArray {
};
-// HandlerTable is a fixed array containing entries for exception handlers in
-// the code object it is associated with. The tables comes in two flavors:
-// 1) Based on ranges: Used for unoptimized code. Contains one entry per
-// exception handler and a range representing the try-block covered by that
-// handler. Layout looks as follows:
-// [ range-start , range-end , handler-offset , handler-data ]
-// 2) Based on return addresses: Used for turbofanned code. Contains one entry
-// per call-site that could throw an exception. Layout looks as follows:
-// [ return-address-offset , handler-offset ]
-class HandlerTable : public FixedArray {
+class TemplateList : public FixedArray {
public:
- // Conservative prediction whether a given handler will locally catch an
- // exception or cause a re-throw to outside the code boundary. Since this is
- // undecidable it is merely an approximation (e.g. useful for debugger).
- enum CatchPrediction { UNCAUGHT, CAUGHT };
-
- // Getters for handler table based on ranges.
- inline int GetRangeStart(int index) const;
- inline int GetRangeEnd(int index) const;
- inline int GetRangeHandler(int index) const;
- inline int GetRangeData(int index) const;
-
- // Setters for handler table based on ranges.
- inline void SetRangeStart(int index, int value);
- inline void SetRangeEnd(int index, int value);
- inline void SetRangeHandler(int index, int offset, CatchPrediction pred);
- inline void SetRangeData(int index, int value);
-
- // Setters for handler table based on return addresses.
- inline void SetReturnOffset(int index, int value);
- inline void SetReturnHandler(int index, int offset, CatchPrediction pred);
-
- // Lookup handler in a table based on ranges.
- int LookupRange(int pc_offset, int* data, CatchPrediction* prediction);
-
- // Lookup handler in a table based on return addresses.
- int LookupReturn(int pc_offset, CatchPrediction* prediction);
-
- // Returns the conservative catch predication.
- inline CatchPrediction GetRangePrediction(int index) const;
-
- // Returns the number of entries in the table.
- inline int NumberOfRangeEntries() const;
-
- // Returns the required length of the underlying fixed array.
- static int LengthForRange(int entries) { return entries * kRangeEntrySize; }
- static int LengthForReturn(int entries) { return entries * kReturnEntrySize; }
-
- DECLARE_CAST(HandlerTable)
-
-#ifdef ENABLE_DISASSEMBLER
- void HandlerTableRangePrint(std::ostream& os); // NOLINT
- void HandlerTableReturnPrint(std::ostream& os); // NOLINT
-#endif
-
+ static Handle<TemplateList> New(Isolate* isolate, int size);
+ inline int length() const;
+ inline Object* get(int index) const;
+ inline void set(int index, Object* value);
+ static Handle<TemplateList> Add(Isolate* isolate, Handle<TemplateList> list,
+ Handle<Object> value);
+ DECLARE_CAST(TemplateList)
private:
- // Layout description for handler table based on ranges.
- static const int kRangeStartIndex = 0;
- static const int kRangeEndIndex = 1;
- static const int kRangeHandlerIndex = 2;
- static const int kRangeDataIndex = 3;
- static const int kRangeEntrySize = 4;
-
- // Layout description for handler table based on return addresses.
- static const int kReturnOffsetIndex = 0;
- static const int kReturnHandlerIndex = 1;
- static const int kReturnEntrySize = 2;
-
- // Encoding of the {handler} field.
- class HandlerPredictionField : public BitField<CatchPrediction, 0, 1> {};
- class HandlerOffsetField : public BitField<int, 1, 30> {};
+ static const int kLengthIndex = 0;
+ static const int kFirstElementIndex = kLengthIndex + 1;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateList);
};
-
// Code describes objects with on-the-fly generated machine code.
class Code: public HeapObject {
public:
@@ -4924,6 +5011,7 @@ class Code: public HeapObject {
#define IC_KIND_LIST(V) \
V(LOAD_IC) \
+ V(LOAD_GLOBAL_IC) \
V(KEYED_LOAD_IC) \
V(CALL_IC) \
V(STORE_IC) \
@@ -4943,24 +5031,13 @@ class Code: public HeapObject {
NUMBER_OF_KINDS
};
- // No more than 32 kinds. The value is currently encoded in five bits in
- // Flags.
- STATIC_ASSERT(NUMBER_OF_KINDS <= 32);
-
static const char* Kind2String(Kind kind);
- // Types of stubs.
- enum StubType {
- NORMAL,
- FAST
- };
-
static const int kPrologueOffsetNotSet = -1;
#ifdef ENABLE_DISASSEMBLER
// Printing
static const char* ICState2String(InlineCacheState state);
- static const char* StubType2String(StubType type);
static void PrintExtraICState(std::ostream& os, // NOLINT
Kind kind, ExtraICState extra);
void Disassemble(const char* name, std::ostream& os); // NOLINT
@@ -4981,6 +5058,9 @@ class Code: public HeapObject {
// [deoptimization_data]: Array containing data for deopt.
DECL_ACCESSORS(deoptimization_data, FixedArray)
+ // [source_position_table]: ByteArray for the source positions table.
+ DECL_ACCESSORS(source_position_table, ByteArray)
+
// [raw_type_feedback_info]: This field stores various things, depending on
// the kind of the code object.
// FUNCTION => type feedback information.
@@ -5027,37 +5107,26 @@ class Code: public HeapObject {
// [flags]: Access to specific code flags.
inline Kind kind();
- inline InlineCacheState ic_state(); // Only valid for IC stubs.
inline ExtraICState extra_ic_state(); // Only valid for IC stubs.
- inline StubType type(); // Only valid for monomorphic IC stubs.
-
// Testers for IC stub kinds.
inline bool is_inline_cache_stub();
inline bool is_debug_stub();
inline bool is_handler();
- inline bool is_load_stub();
- inline bool is_keyed_load_stub();
- inline bool is_store_stub();
- inline bool is_keyed_store_stub();
inline bool is_call_stub();
inline bool is_binary_op_stub();
inline bool is_compare_ic_stub();
inline bool is_to_boolean_ic_stub();
- inline bool is_keyed_stub();
inline bool is_optimized_code();
inline bool is_wasm_code();
- inline bool embeds_maps_weakly();
inline bool IsCodeStubOrIC();
- inline bool IsJavaScriptCode();
inline void set_raw_kind_specific_flags1(int value);
inline void set_raw_kind_specific_flags2(int value);
// Testers for interpreter builtins.
- inline bool is_interpreter_entry_trampoline();
- inline bool is_interpreter_enter_bytecode_dispatch();
+ inline bool is_interpreter_trampoline_builtin();
// [is_crankshafted]: For kind STUB or ICs, tells whether or not a code
// object was generated by either the hydrogen or the TurboFan optimizing
@@ -5076,6 +5145,12 @@ class Code: public HeapObject {
inline bool can_have_weak_objects();
inline void set_can_have_weak_objects(bool value);
+ // [is_construct_stub]: For kind BUILTIN, tells whether the code object
+ // represents a hand-written construct stub
+ // (e.g., NumberConstructor_ConstructStub).
+ inline bool is_construct_stub();
+ inline void set_is_construct_stub(bool value);
+
// [has_deoptimization_support]: For FUNCTION kind, tells if it has
// deoptimization support.
inline bool has_deoptimization_support();
@@ -5104,11 +5179,9 @@ class Code: public HeapObject {
inline int profiler_ticks();
inline void set_profiler_ticks(int ticks);
- // [builtin_index]: For BUILTIN kind, tells which builtin index it has.
- // For builtins, tells which builtin index it has.
- // Note that builtins can have a code kind other than BUILTIN, which means
- // that for arbitrary code objects, this index value may be random garbage.
- // To verify in that case, compare the code object to the indexed builtin.
+ // [builtin_index]: For builtins, tells which builtin index the code object
+ // has. Note that builtins can have a code kind other than BUILTIN. The
+ // builtin index is a non-negative integer for builtins, and -1 otherwise.
inline int builtin_index();
inline void set_builtin_index(int id);
@@ -5151,20 +5224,6 @@ class Code: public HeapObject {
// Find the first map in an IC stub.
Map* FindFirstMap();
- void FindAllMaps(MapHandleList* maps);
-
- // Find the first handler in an IC stub.
- Code* FindFirstHandler();
-
- // Find |length| handlers and put them into |code_list|. Returns false if not
- // enough handlers can be found.
- bool FindHandlers(CodeHandleList* code_list, int length = -1);
-
- // Find the handler for |map|.
- MaybeHandle<Code> FindHandlerForMap(Map* map);
-
- // Find the first name in an IC stub.
- Name* FindFirstName();
class FindAndReplacePattern;
// For each (map-to-find, object-to-replace) pair in the pattern, this
@@ -5186,26 +5245,17 @@ class Code: public HeapObject {
// Flags operations.
static inline Flags ComputeFlags(
- Kind kind, InlineCacheState ic_state = UNINITIALIZED,
- ExtraICState extra_ic_state = kNoExtraICState, StubType type = NORMAL,
- CacheHolderFlag holder = kCacheOnReceiver);
-
- static inline Flags ComputeMonomorphicFlags(
Kind kind, ExtraICState extra_ic_state = kNoExtraICState,
- CacheHolderFlag holder = kCacheOnReceiver, StubType type = NORMAL);
+ CacheHolderFlag holder = kCacheOnReceiver);
static inline Flags ComputeHandlerFlags(
- Kind handler_kind, StubType type = NORMAL,
- CacheHolderFlag holder = kCacheOnReceiver);
+ Kind handler_kind, CacheHolderFlag holder = kCacheOnReceiver);
- static inline InlineCacheState ExtractICStateFromFlags(Flags flags);
- static inline StubType ExtractTypeFromFlags(Flags flags);
static inline CacheHolderFlag ExtractCacheHolderFromFlags(Flags flags);
static inline Kind ExtractKindFromFlags(Flags flags);
static inline ExtraICState ExtractExtraICStateFromFlags(Flags flags);
- static inline Flags RemoveTypeFromFlags(Flags flags);
- static inline Flags RemoveTypeAndHolderFromFlags(Flags flags);
+ static inline Flags RemoveHolderFromFlags(Flags flags);
// Convert a target address into a code object.
static inline Code* GetCodeFromTargetAddress(Address address);
@@ -5219,12 +5269,60 @@ class Code: public HeapObject {
// Returns the address right after the last instruction.
inline byte* instruction_end();
- // Returns the size of the instructions, padding, and relocation information.
+ // Returns the size of the instructions, padding, relocation and unwinding
+ // information.
inline int body_size();
+ // Returns the size of code and its metadata. This includes the size of code
+ // relocation information, deoptimization data and handler table.
+ inline int SizeIncludingMetadata();
+
// Returns the address of the first relocation info (read backwards!).
inline byte* relocation_start();
+ // [has_unwinding_info]: Whether this code object has unwinding information.
+ // If it doesn't, unwinding_information_start() will point to invalid data.
+ //
+ // The body of all code objects has the following layout.
+ //
+ // +--------------------------+ <-- instruction_start()
+ // | instructions |
+ // | ... |
+ // +--------------------------+
+ // | relocation info |
+ // | ... |
+ // +--------------------------+ <-- instruction_end()
+ //
+ // If has_unwinding_info() is false, instruction_end() points to the first
+ // memory location after the end of the code object. Otherwise, the body
+ // continues as follows:
+ //
+ // +--------------------------+
+ // | padding to the next |
+ // | 8-byte aligned address |
+ // +--------------------------+ <-- instruction_end()
+ // | [unwinding_info_size] |
+ // | as uint64_t |
+ // +--------------------------+ <-- unwinding_info_start()
+ // | unwinding info |
+ // | ... |
+ // +--------------------------+ <-- unwinding_info_end()
+ //
+ // and unwinding_info_end() points to the first memory location after the end
+ // of the code object.
+ //
+ DECL_BOOLEAN_ACCESSORS(has_unwinding_info)
+
+ // [unwinding_info_size]: Size of the unwinding information.
+ inline int unwinding_info_size() const;
+ inline void set_unwinding_info_size(int value);
+
+ // Returns the address of the unwinding information, if any.
+ inline byte* unwinding_info_start();
+
+ // Returns the address right after the end of the unwinding information.
+ inline byte* unwinding_info_end();
+
// Code entry point.
inline byte* entry();
@@ -5248,10 +5346,6 @@ class Code: public HeapObject {
// the layout of the code object into account.
inline int ExecutableSize();
- // Locating source position.
- int SourcePosition(int code_offset);
- int SourceStatementPosition(int code_offset);
-
DECLARE_CAST(Code)
// Dispatched behavior.
@@ -5261,11 +5355,13 @@ class Code: public HeapObject {
DECLARE_VERIFIER(Code)
void ClearInlineCaches();
- void ClearInlineCaches(Kind kind);
BailoutId TranslatePcOffsetToAstId(uint32_t pc_offset);
uint32_t TranslateAstIdToPcOffset(BailoutId ast_id);
+ int LookupRangeInHandlerTable(int code_offset, int* data,
+ HandlerTable::CatchPrediction* prediction);
+
#define DECLARE_CODE_AGE_ENUM(X) k##X##CodeAge,
enum Age {
kToBeExecutedOnceCodeAge = -3,
@@ -5321,10 +5417,6 @@ class Code: public HeapObject {
static Handle<WeakCell> WeakCellFor(Handle<Code> code);
WeakCell* CachedWeakCell();
- // Max loop nesting marker used to postpose OSR. We don't take loop
- // nesting that is deeper than 5 levels into account.
- static const int kMaxLoopNestingMarker = 6;
-
static const int kConstantPoolSize =
FLAG_enable_embedded_constant_pool ? kIntSize : 0;
@@ -5333,9 +5425,11 @@ class Code: public HeapObject {
static const int kHandlerTableOffset = kRelocationInfoOffset + kPointerSize;
static const int kDeoptimizationDataOffset =
kHandlerTableOffset + kPointerSize;
+ static const int kSourcePositionTableOffset =
+ kDeoptimizationDataOffset + kPointerSize;
// For FUNCTION kind, we store the type feedback info here.
static const int kTypeFeedbackInfoOffset =
- kDeoptimizationDataOffset + kPointerSize;
+ kSourcePositionTableOffset + kPointerSize;
static const int kNextCodeLinkOffset = kTypeFeedbackInfoOffset + kPointerSize;
static const int kGCMetadataOffset = kNextCodeLinkOffset + kPointerSize;
static const int kInstructionSizeOffset = kGCMetadataOffset + kPointerSize;
@@ -5356,6 +5450,8 @@ class Code: public HeapObject {
static const int kHeaderSize =
(kHeaderPaddingStart + kCodeAlignmentMask) & ~kCodeAlignmentMask;
+ inline int GetUnwindingInfoSizeOffset() const;
+
class BodyDescriptor;
// Byte offsets within kKindSpecificFlags1Offset.
@@ -5369,13 +5465,15 @@ class Code: public HeapObject {
class ProfilerTicksField : public BitField<int, 4, 28> {};
// Flags layout. BitField<type, shift, size>.
- class ICStateField : public BitField<InlineCacheState, 0, 3> {};
- class TypeField : public BitField<StubType, 3, 1> {};
- class CacheHolderField : public BitField<CacheHolderFlag, 4, 2> {};
- class KindField : public BitField<Kind, 6, 5> {};
+ class HasUnwindingInfoField : public BitField<bool, 0, 1> {};
+ class CacheHolderField
+ : public BitField<CacheHolderFlag, HasUnwindingInfoField::kNext, 2> {};
+ class KindField : public BitField<Kind, CacheHolderField::kNext, 5> {};
+ STATIC_ASSERT(NUMBER_OF_KINDS <= KindField::kMax);
class ExtraICStateField
- : public BitField<ExtraICState, 11, PlatformSmiTagging::kSmiValueSize -
- 11 + 1> {}; // NOLINT
+ : public BitField<ExtraICState, KindField::kNext,
+ PlatformSmiTagging::kSmiValueSize - KindField::kNext> {
+ };
// KindSpecificFlags1 layout (STUB, BUILTIN and OPTIMIZED_FUNCTION)
static const int kStackSlotsFirstBit = 0;
@@ -5384,9 +5482,11 @@ class Code: public HeapObject {
kStackSlotsFirstBit + kStackSlotsBitCount;
static const int kIsTurbofannedBit = kMarkedForDeoptimizationBit + 1;
static const int kCanHaveWeakObjects = kIsTurbofannedBit + 1;
+ // Could be moved to overlap previous bits when we need more space.
+ static const int kIsConstructStub = kCanHaveWeakObjects + 1;
STATIC_ASSERT(kStackSlotsFirstBit + kStackSlotsBitCount <= 32);
- STATIC_ASSERT(kCanHaveWeakObjects + 1 <= 32);
+ STATIC_ASSERT(kIsConstructStub + 1 <= 32);
class StackSlotsField: public BitField<int,
kStackSlotsFirstBit, kStackSlotsBitCount> {}; // NOLINT
@@ -5396,6 +5496,8 @@ class Code: public HeapObject {
}; // NOLINT
class CanHaveWeakObjectsField
: public BitField<bool, kCanHaveWeakObjects, 1> {}; // NOLINT
+ class IsConstructStubField : public BitField<bool, kIsConstructStub, 1> {
+ }; // NOLINT
// KindSpecificFlags2 layout (ALL)
static const int kIsCrankshaftedBit = 0;
@@ -5419,21 +5521,17 @@ class Code: public HeapObject {
kIsCrankshaftedBit + 1, 27> {}; // NOLINT
class AllowOSRAtLoopNestingLevelField: public BitField<int,
kIsCrankshaftedBit + 1 + 27, 4> {}; // NOLINT
- STATIC_ASSERT(AllowOSRAtLoopNestingLevelField::kMax >= kMaxLoopNestingMarker);
static const int kArgumentsBits = 16;
static const int kMaxArguments = (1 << kArgumentsBits) - 1;
// This constant should be encodable in an ARM instruction.
- static const int kFlagsNotUsedInLookup =
- TypeField::kMask | CacheHolderField::kMask;
+ static const int kFlagsNotUsedInLookup = CacheHolderField::kMask;
private:
friend class RelocIterator;
friend class Deoptimizer; // For FindCodeAgeSequence.
- void ClearInlineCaches(Kind* kind);
-
// Code aging
byte* FindCodeAgeSequence();
static void GetCodeAgeAndParity(Code* code, Age* age,
@@ -5458,8 +5556,11 @@ class AbstractCode : public HeapObject {
CODE_KIND_LIST(DEFINE_CODE_KIND_ENUM)
#undef DEFINE_CODE_KIND_ENUM
INTERPRETED_FUNCTION,
+ NUMBER_OF_KINDS
};
+ static const char* Kind2String(Kind kind);
+
int SourcePosition(int offset);
int SourceStatementPosition(int offset);
@@ -5469,9 +5570,22 @@ class AbstractCode : public HeapObject {
// Returns the address right after the last instruction.
inline Address instruction_end();
- // Returns the of the code instructions.
+ // Returns the size of the code instructions.
inline int instruction_size();
+ // Return the source position table.
+ inline ByteArray* source_position_table();
+
+ // Set the source position table.
+ inline void set_source_position_table(ByteArray* source_position_table);
+
+ // Return the exception handler table.
+ inline int LookupRangeInHandlerTable(
+ int code_offset, int* data, HandlerTable::CatchPrediction* prediction);
+
+ // Returns the size of instructions and the metadata.
+ inline int SizeIncludingMetadata();
+
// Returns true if pc is inside this object's instructions.
inline bool contains(byte* pc);
@@ -5485,6 +5599,12 @@ class AbstractCode : public HeapObject {
DECLARE_CAST(AbstractCode)
inline Code* GetCode();
inline BytecodeArray* GetBytecodeArray();
+
+ // Max loop nesting marker used to postpose OSR. We don't take loop
+ // nesting that is deeper than 5 levels into account.
+ static const int kMaxLoopNestingMarker = 6;
+ STATIC_ASSERT(Code::AllowOSRAtLoopNestingLevelField::kMax >=
+ kMaxLoopNestingMarker);
};
// Dependent code is a singly linked list of fixed arrays. Each array contains
@@ -5538,6 +5658,9 @@ class DependentCode: public FixedArray {
};
static const int kGroupCount = kAllocationSiteTransitionChangedGroup + 1;
+ static const int kNextLinkIndex = 0;
+ static const int kFlagsIndex = 1;
+ static const int kCodesStartIndex = 2;
bool Contains(DependencyGroup group, WeakCell* code_cell);
bool IsEmpty(DependencyGroup group);
@@ -5598,9 +5721,6 @@ class DependentCode: public FixedArray {
class GroupField : public BitField<int, 0, 3> {};
class CountField : public BitField<int, 3, 27> {};
STATIC_ASSERT(kGroupCount <= GroupField::kMax + 1);
- static const int kNextLinkIndex = 0;
- static const int kFlagsIndex = 1;
- static const int kCodesStartIndex = 2;
};
@@ -5675,7 +5795,7 @@ class Map: public HeapObject {
class Deprecated : public BitField<bool, 23, 1> {};
class IsUnstable : public BitField<bool, 24, 1> {};
class IsMigrationTarget : public BitField<bool, 25, 1> {};
- // Bit 26 is free.
+ class ImmutablePrototype : public BitField<bool, 26, 1> {};
class NewTargetIsBase : public BitField<bool, 27, 1> {};
// Bit 28 is free.
@@ -5775,10 +5895,6 @@ class Map: public HeapObject {
inline void set_is_undetectable();
inline bool is_undetectable();
- // Tells whether the instance has a call-as-function handler.
- inline void set_is_observed();
- inline bool is_observed();
-
// Tells whether the instance has a [[Call]] internal method.
// This property is implemented according to ES6, section 7.2.3.
inline void set_is_callable();
@@ -5803,6 +5919,7 @@ class Map: public HeapObject {
inline bool has_fast_double_elements();
inline bool has_fast_elements();
inline bool has_sloppy_arguments_elements();
+ inline bool has_fast_sloppy_arguments_elements();
inline bool has_fast_string_wrapper_elements();
inline bool has_fixed_typed_array_elements();
inline bool has_dictionary_elements();
@@ -5831,6 +5948,9 @@ class Map: public HeapObject {
Handle<JSObject> prototype, Isolate* isolate);
static Handle<PrototypeInfo> GetOrCreatePrototypeInfo(
Handle<Map> prototype_map, Isolate* isolate);
+ inline bool should_be_fast_prototype_map() const;
+ static void SetShouldBeFastPrototypeMap(Handle<Map> map, bool value,
+ Isolate* isolate);
// [prototype chain validity cell]: Associated with a prototype object,
// stored in that object's map's PrototypeInfo, indicates that prototype
@@ -5933,7 +6053,7 @@ class Map: public HeapObject {
LayoutDescriptor* layout_descriptor);
// [stub cache]: contains stubs compiled for this map.
- DECL_ACCESSORS(code_cache, Object)
+ DECL_ACCESSORS(code_cache, FixedArray)
// [dependent code]: list of optimized codes that weakly embed this map.
DECL_ACCESSORS(dependent_code, DependentCode)
@@ -5963,6 +6083,8 @@ class Map: public HeapObject {
inline bool is_stable();
inline void set_migration_target(bool value);
inline bool is_migration_target();
+ inline void set_immutable_proto(bool value);
+ inline bool is_immutable_proto();
inline void set_construction_counter(int value);
inline int construction_counter();
inline void deprecate();
@@ -6017,8 +6139,6 @@ class Map: public HeapObject {
FunctionKind kind);
- static Handle<Map> CopyForObserved(Handle<Map> map);
-
static Handle<Map> CopyForPreventExtensions(Handle<Map> map,
PropertyAttributes attrs_to_add,
Handle<Symbol> transition_marker,
@@ -6037,8 +6157,8 @@ class Map: public HeapObject {
PropertyAttributes attributes,
StoreFromKeyed store_mode);
static Handle<Map> TransitionToAccessorProperty(
- Handle<Map> map, Handle<Name> name, int descriptor,
- AccessorComponent component, Handle<Object> accessor,
+ Isolate* isolate, Handle<Map> map, Handle<Name> name, int descriptor,
+ Handle<Object> getter, Handle<Object> setter,
PropertyAttributes attributes);
static Handle<Map> ReconfigureExistingProperty(Handle<Map> map,
int descriptor,
@@ -6086,15 +6206,7 @@ class Map: public HeapObject {
static void EnsureDescriptorSlack(Handle<Map> map, int slack);
- // Returns the found code or undefined if absent.
- Object* FindInCodeCache(Name* name, Code::Flags flags);
-
- // Returns the non-negative index of the code object if it is in the
- // cache and -1 otherwise.
- int IndexInCodeCache(Object* name, Code* code);
-
- // Removes a code object from the code cache at the given index.
- void RemoveFromCodeCache(Name* name, Code* code, int index);
+ Code* LookupInCodeCache(Name* name, Code::Flags code);
// Computes a hash value for this map, to be used in HashTables and such.
int Hash();
@@ -6145,6 +6257,8 @@ class Map: public HeapObject {
Handle<Object> prototype,
PrototypeOptimizationMode mode);
+ static Handle<Map> TransitionToImmutableProto(Handle<Map> map);
+
static const int kMaxPreAllocatedPropertyFields = 255;
// Layout description.
@@ -6163,10 +6277,10 @@ class Map: public HeapObject {
static const int kDescriptorsOffset =
kTransitionsOrPrototypeInfoOffset + kPointerSize;
#if V8_DOUBLE_FIELDS_UNBOXING
- static const int kLayoutDecriptorOffset = kDescriptorsOffset + kPointerSize;
- static const int kCodeCacheOffset = kLayoutDecriptorOffset + kPointerSize;
+ static const int kLayoutDescriptorOffset = kDescriptorsOffset + kPointerSize;
+ static const int kCodeCacheOffset = kLayoutDescriptorOffset + kPointerSize;
#else
- static const int kLayoutDecriptorOffset = 1; // Must not be ever accessed.
+ static const int kLayoutDescriptorOffset = 1; // Must not be ever accessed.
static const int kCodeCacheOffset = kDescriptorsOffset + kPointerSize;
#endif
static const int kDependentCodeOffset = kCodeCacheOffset + kPointerSize;
@@ -6215,9 +6329,9 @@ class Map: public HeapObject {
static const int kHasNamedInterceptor = 2;
static const int kHasIndexedInterceptor = 3;
static const int kIsUndetectable = 4;
- static const int kIsObserved = 5;
- static const int kIsAccessCheckNeeded = 6;
- static const int kIsConstructor = 7;
+ static const int kIsAccessCheckNeeded = 5;
+ static const int kIsConstructor = 6;
+ // Bit 7 is free.
// Bit positions for bit field 2
static const int kIsExtensible = 0;
@@ -6397,6 +6511,13 @@ class PrototypeInfo : public Struct {
// [prototype_users]: WeakFixedArray containing maps using this prototype,
// or Smi(0) if uninitialized.
DECL_ACCESSORS(prototype_users, Object)
+
+ // [object_create_map]: A field caching the map for Object.create(prototype).
+ static inline void SetObjectCreateMap(Handle<PrototypeInfo> info,
+ Handle<Map> map);
+ inline Map* ObjectCreateMap();
+ inline bool HasObjectCreateMap();
+
// [registry_slot]: Slot in prototype's user registry where this user
// is stored. Returns UNREGISTERED if this prototype has not been registered.
inline int registry_slot() const;
@@ -6408,6 +6529,11 @@ class PrototypeInfo : public Struct {
// given receiver embed the currently valid cell for that receiver's prototype
// during their compilation and check it on execution.
DECL_ACCESSORS(validity_cell, Object)
+ // [bit_field]
+ inline int bit_field() const;
+ inline void set_bit_field(int bit_field);
+
+ DECL_BOOLEAN_ACCESSORS(should_be_fast_map)
DECLARE_CAST(PrototypeInfo)
@@ -6418,10 +6544,16 @@ class PrototypeInfo : public Struct {
static const int kPrototypeUsersOffset = HeapObject::kHeaderSize;
static const int kRegistrySlotOffset = kPrototypeUsersOffset + kPointerSize;
static const int kValidityCellOffset = kRegistrySlotOffset + kPointerSize;
- static const int kConstructorNameOffset = kValidityCellOffset + kPointerSize;
- static const int kSize = kConstructorNameOffset + kPointerSize;
+ static const int kObjectCreateMap = kValidityCellOffset + kPointerSize;
+ static const int kBitFieldOffset = kObjectCreateMap + kPointerSize;
+ static const int kSize = kBitFieldOffset + kPointerSize;
+
+ // Bit field usage.
+ static const int kShouldBeFastBit = 0;
private:
+ DECL_ACCESSORS(object_create_map, Object)
+
DISALLOW_IMPLICIT_CONSTRUCTORS(PrototypeInfo);
};
@@ -6459,7 +6591,8 @@ class Script: public Struct {
enum Type {
TYPE_NATIVE = 0,
TYPE_EXTENSION = 1,
- TYPE_NORMAL = 2
+ TYPE_NORMAL = 2,
+ TYPE_WASM = 3
};
// Script compilation types.
@@ -6506,9 +6639,10 @@ class Script: public Struct {
// function from which eval was called.
DECL_ACCESSORS(eval_from_shared, Object)
- // [eval_from_instructions_offset]: the instruction offset in the code for the
- // function from which eval was called where eval was called.
- DECL_INT_ACCESSORS(eval_from_instructions_offset)
+ // [eval_from_position]: the source position in the code for the function
+ // from which eval was called, as positive integer. Or the code offset in the
+ // code from which eval was called, as negative integer.
+ DECL_INT_ACCESSORS(eval_from_position)
// [shared_function_infos]: weak fixed array containing all shared
// function infos created from this script.
@@ -6520,9 +6654,17 @@ class Script: public Struct {
// [source_url]: sourceURL from magic comment
DECL_ACCESSORS(source_url, Object)
- // [source_url]: sourceMappingURL magic comment
+ // [source_mapping_url]: sourceMappingURL magic comment
DECL_ACCESSORS(source_mapping_url, Object)
+ // [wasm_object]: the wasm object this script belongs to.
+ // This must only be called if the type of this script is TYPE_WASM.
+ DECL_ACCESSORS(wasm_object, JSObject)
+
+ // [wasm_function_index]: the wasm function index this script belongs to.
+ // This must only be called if the type of this script is TYPE_WASM.
+ DECL_INT_ACCESSORS(wasm_function_index)
+
// [compilation_type]: how the the script was compiled. Encoded in the
// 'flags' field.
inline CompilationType compilation_type();
@@ -6550,6 +6692,18 @@ class Script: public Struct {
// resource is accessible. Otherwise, always return true.
inline bool HasValidSource();
+ static Handle<Object> GetNameOrSourceURL(Handle<Script> script);
+
+ // Set eval origin for stack trace formatting.
+ static void SetEvalOrigin(Handle<Script> script,
+ Handle<SharedFunctionInfo> outer,
+ int eval_position);
+ // Retrieve source position from where eval was called.
+ int GetEvalPosition();
+
+ // Init line_ends array with source code positions of line ends.
+ static void InitLineEnds(Handle<Script> script);
+
// Convert code offset into column number.
static int GetColumnNumber(Handle<Script> script, int code_offset);
@@ -6558,10 +6712,24 @@ class Script: public Struct {
static int GetLineNumber(Handle<Script> script, int code_offset);
int GetLineNumber(int code_pos);
- static Handle<Object> GetNameOrSourceURL(Handle<Script> script);
+ // Carries information about a source position.
+ struct PositionInfo {
+ PositionInfo() : line(-1), column(-1), line_start(-1), line_end(-1) {}
- // Init line_ends array with source code positions of line ends.
- static void InitLineEnds(Handle<Script> script);
+ int line; // Zero-based line number.
+ int column; // Zero-based column number.
+ int line_start; // Position of first character in line.
+ int line_end; // Position of last (non-linebreak) character in line.
+ };
+
+ // Specifies whether to add offsets to position infos.
+ enum OffsetFlag { NO_OFFSET = 0, WITH_OFFSET = 1 };
+
+ // Retrieves information about the given position, optionally with an offset.
+ // Returns false on failure, and otherwise writes into the given info object
+ // on success.
+ bool GetPositionInfo(int position, PositionInfo* info,
+ OffsetFlag offset_flag);
// Get the JS object wrapping the given script; create it if none exists.
static Handle<JSObject> GetWrapper(Handle<Script> script);
@@ -6595,10 +6763,10 @@ class Script: public Struct {
static const int kLineEndsOffset = kTypeOffset + kPointerSize;
static const int kIdOffset = kLineEndsOffset + kPointerSize;
static const int kEvalFromSharedOffset = kIdOffset + kPointerSize;
- static const int kEvalFrominstructionsOffsetOffset =
+ static const int kEvalFromPositionOffset =
kEvalFromSharedOffset + kPointerSize;
static const int kSharedFunctionInfosOffset =
- kEvalFrominstructionsOffsetOffset + kPointerSize;
+ kEvalFromPositionOffset + kPointerSize;
static const int kFlagsOffset = kSharedFunctionInfosOffset + kPointerSize;
static const int kSourceUrlOffset = kFlagsOffset + kPointerSize;
static const int kSourceMappingUrlOffset = kSourceUrlOffset + kPointerSize;
@@ -6641,6 +6809,7 @@ class Script: public Struct {
V(String.prototype, charCodeAt, StringCharCodeAt) \
V(String.prototype, charAt, StringCharAt) \
V(String.prototype, concat, StringConcat) \
+ V(String.prototype, substr, StringSubstr) \
V(String.prototype, toLowerCase, StringToLowerCase) \
V(String.prototype, toUpperCase, StringToUpperCase) \
V(String, fromCharCode, StringFromCharCode) \
@@ -6650,22 +6819,36 @@ class Script: public Struct {
V(Math, ceil, MathCeil) \
V(Math, abs, MathAbs) \
V(Math, log, MathLog) \
+ V(Math, log1p, MathLog1p) \
+ V(Math, log2, MathLog2) \
+ V(Math, log10, MathLog10) \
+ V(Math, cbrt, MathCbrt) \
V(Math, exp, MathExp) \
+ V(Math, expm1, MathExpm1) \
V(Math, sqrt, MathSqrt) \
V(Math, pow, MathPow) \
V(Math, max, MathMax) \
V(Math, min, MathMin) \
V(Math, cos, MathCos) \
+ V(Math, cosh, MathCosh) \
+ V(Math, sign, MathSign) \
V(Math, sin, MathSin) \
+ V(Math, sinh, MathSinh) \
V(Math, tan, MathTan) \
+ V(Math, tanh, MathTanh) \
V(Math, acos, MathAcos) \
+ V(Math, acosh, MathAcosh) \
V(Math, asin, MathAsin) \
+ V(Math, asinh, MathAsinh) \
V(Math, atan, MathAtan) \
V(Math, atan2, MathAtan2) \
+ V(Math, atanh, MathAtanh) \
V(Math, imul, MathImul) \
V(Math, clz32, MathClz32) \
V(Math, fround, MathFround) \
- V(Math, trunc, MathTrunc)
+ V(Math, trunc, MathTrunc) \
+ V(Number, parseInt, NumberParseInt) \
+ V(Number.prototype, toString, NumberToString)
#define ATOMIC_FUNCTIONS_WITH_ID_LIST(V) \
V(Atomics, load, AtomicsLoad) \
@@ -6673,9 +6856,6 @@ class Script: public Struct {
enum BuiltinFunctionId {
kArrayCode,
- kGeneratorObjectNext,
- kGeneratorObjectReturn,
- kGeneratorObjectThrow,
#define DECLARE_FUNCTION_ID(ignored1, ignore2, name) \
k##name,
FUNCTIONS_WITH_ID_LIST(DECLARE_FUNCTION_ID)
@@ -6683,7 +6863,22 @@ enum BuiltinFunctionId {
#undef DECLARE_FUNCTION_ID
// Fake id for a special case of Math.pow. Note, it continues the
// list of math functions.
- kMathPowHalf
+ kMathPowHalf,
+ // These are manually assigned to special getters during bootstrapping.
+ kArrayBufferByteLength,
+ kDataViewBuffer,
+ kDataViewByteLength,
+ kDataViewByteOffset,
+ kGlobalDecodeURI,
+ kGlobalDecodeURIComponent,
+ kGlobalEncodeURI,
+ kGlobalEncodeURIComponent,
+ kGlobalEscape,
+ kGlobalUnescape,
+ kTypedArrayByteLength,
+ kTypedArrayByteOffset,
+ kTypedArrayLength,
+ kSharedArrayBufferByteLength,
};
@@ -6710,6 +6905,7 @@ class SharedFunctionInfo: public HeapObject {
inline AbstractCode* abstract_code();
inline void ReplaceCode(Code* code);
+ inline bool HasBaselineCode() const;
// [optimized_code_map]: Map from native context to optimized code
// and a shared literals array.
@@ -6725,6 +6921,9 @@ class SharedFunctionInfo: public HeapObject {
// Clear optimized code map.
void ClearOptimizedCodeMap();
+ // Like ClearOptimizedCodeMap, but preserves literals.
+ void ClearCodeFromOptimizedCodeMap();
+
// We have a special root FixedArray with the right shape and values
// to represent the cleared optimized code map. This predicate checks
// if that root is installed.
@@ -6738,6 +6937,9 @@ class SharedFunctionInfo: public HeapObject {
// Trims the optimized code map after entries have been removed.
void TrimOptimizedCodeMap(int shrink_by);
+ static Handle<LiteralsArray> FindOrCreateLiterals(
+ Handle<SharedFunctionInfo> shared, Handle<Context> native_context);
+
// Add or update entry in the optimized code map for context-independent code.
static void AddSharedCodeToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
Handle<Code> code);
@@ -6767,14 +6969,30 @@ class SharedFunctionInfo: public HeapObject {
static const int kNotFound = -1;
+ // Helpers for assembly code that does a backwards walk of the optimized code
+ // map.
+ static const int kOffsetToPreviousContext =
+ FixedArray::kHeaderSize + kPointerSize * (kContextOffset - kEntryLength);
+ static const int kOffsetToPreviousCachedCode =
+ FixedArray::kHeaderSize +
+ kPointerSize * (kCachedCodeOffset - kEntryLength);
+ static const int kOffsetToPreviousLiterals =
+ FixedArray::kHeaderSize + kPointerSize * (kLiteralsOffset - kEntryLength);
+ static const int kOffsetToPreviousOsrAstId =
+ FixedArray::kHeaderSize + kPointerSize * (kOsrAstIdOffset - kEntryLength);
+
// [scope_info]: Scope info.
DECL_ACCESSORS(scope_info, ScopeInfo)
// [construct stub]: Code stub for constructing instances of this function.
DECL_ACCESSORS(construct_stub, Code)
+ // Sets the given code as the construct stub, and marks builtin code objects
+ // as a construct stub.
+ void SetConstructStub(Code* code);
+
// Returns if this function has been compiled to native code yet.
- inline bool is_compiled();
+ inline bool is_compiled() const;
// [length]: The function length - usually the number of declared parameters.
// Use up to 2^30 parameters.
@@ -6795,16 +7013,10 @@ class SharedFunctionInfo: public HeapObject {
inline int expected_nof_properties() const;
inline void set_expected_nof_properties(int value);
- // [feedback_vector] - accumulates ast node feedback from full-codegen and
+ // [feedback_metadata] - describes ast node feedback from full-codegen and
// (increasingly) from crankshafted code where sufficient feedback isn't
// available.
- DECL_ACCESSORS(feedback_vector, TypeFeedbackVector)
-
- // Unconditionally clear the type feedback vector (including vector ICs).
- void ClearTypeFeedbackInfo();
-
- // Clear the type feedback vector with a more subtle policy at GC time.
- void ClearTypeFeedbackInfoAtGCTime();
+ DECL_ACCESSORS(feedback_metadata, TypeFeedbackMetadata)
#if TRACE_MAPS
// [unique_id] - For --trace-maps purposes, an identifier that's persistent
@@ -6820,6 +7032,7 @@ class SharedFunctionInfo: public HeapObject {
// Currently it has one of:
// - a FunctionTemplateInfo to make benefit the API [IsApiFunction()].
// - a BytecodeArray for the interpreter [HasBytecodeArray()].
+ // - a FixedArray with Asm->Wasm conversion [HasAsmWasmData()].
DECL_ACCESSORS(function_data, Object)
inline bool IsApiFunction();
@@ -6829,6 +7042,10 @@ class SharedFunctionInfo: public HeapObject {
inline BytecodeArray* bytecode_array();
inline void set_bytecode_array(BytecodeArray* bytecode);
inline void ClearBytecodeArray();
+ inline bool HasAsmWasmData();
+ inline FixedArray* asm_wasm_data();
+ inline void set_asm_wasm_data(FixedArray* data);
+ inline void ClearAsmWasmData();
// [function identifier]: This field holds an additional identifier for the
// function.
@@ -6977,6 +7194,13 @@ class SharedFunctionInfo: public HeapObject {
// Indicates that this function is a generator.
DECL_BOOLEAN_ACCESSORS(is_generator)
+ // Indicates that this function is an async function.
+ DECL_BOOLEAN_ACCESSORS(is_async)
+
+ // Indicates that this function can be suspended, either via YieldExpressions
+ // or AwaitExpressions.
+ inline bool is_resumable() const;
+
// Indicates that this function is an arrow function.
DECL_BOOLEAN_ACCESSORS(is_arrow)
@@ -7004,6 +7228,9 @@ class SharedFunctionInfo: public HeapObject {
// Whether this function was created from a FunctionDeclaration.
DECL_BOOLEAN_ACCESSORS(is_declaration)
+ // Indicates that asm->wasm conversion failed and should not be re-attempted.
+ DECL_BOOLEAN_ACCESSORS(is_asm_wasm_broken)
+
inline FunctionKind kind();
inline void set_kind(FunctionKind kind);
@@ -7116,15 +7343,15 @@ class SharedFunctionInfo: public HeapObject {
static const int kScriptOffset = kFunctionDataOffset + kPointerSize;
static const int kDebugInfoOffset = kScriptOffset + kPointerSize;
static const int kFunctionIdentifierOffset = kDebugInfoOffset + kPointerSize;
- static const int kFeedbackVectorOffset =
+ static const int kFeedbackMetadataOffset =
kFunctionIdentifierOffset + kPointerSize;
#if TRACE_MAPS
- static const int kUniqueIdOffset = kFeedbackVectorOffset + kPointerSize;
+ static const int kUniqueIdOffset = kFeedbackMetadataOffset + kPointerSize;
static const int kLastPointerFieldOffset = kUniqueIdOffset;
#else
// Just to not break the postmortrem support with conditional offsets
- static const int kUniqueIdOffset = kFeedbackVectorOffset;
- static const int kLastPointerFieldOffset = kFeedbackVectorOffset;
+ static const int kUniqueIdOffset = kFeedbackMetadataOffset;
+ static const int kLastPointerFieldOffset = kFeedbackMetadataOffset;
#endif
#if V8_HOST_ARCH_32_BIT
@@ -7273,12 +7500,12 @@ class SharedFunctionInfo: public HeapObject {
kIsGetterFunction,
kIsSetterFunction,
// byte 3
+ kIsAsyncFunction,
kDeserialized,
kIsDeclaration,
+ kIsAsmWasmBroken,
kCompilerHintsCount, // Pseudo entry
};
- // Add hints for other modes when they're added.
- STATIC_ASSERT(LANGUAGE_END == 3);
// kFunctionKind has to be byte-aligned
STATIC_ASSERT((kFunctionKind % kBitsPerByte) == 0);
// Make sure that FunctionKind and byte 2 are in sync:
@@ -7295,7 +7522,7 @@ class SharedFunctionInfo: public HeapObject {
ASSERT_FUNCTION_KIND_ORDER(kSetterFunction, kIsSetterFunction);
#undef ASSERT_FUNCTION_KIND_ORDER
- class FunctionKindBits : public BitField<FunctionKind, kIsArrow, 8> {};
+ class FunctionKindBits : public BitField<FunctionKind, kIsArrow, 9> {};
class DeoptCountBits : public BitField<int, 0, 4> {};
class OptReenableTriesBits : public BitField<int, 4, 18> {};
@@ -7324,16 +7551,32 @@ class SharedFunctionInfo: public HeapObject {
static const int kStrictModeBit =
kStrictModeFunction + kCompilerHintsSmiTagSize;
static const int kNativeBit = kNative + kCompilerHintsSmiTagSize;
-
+ static const int kHasDuplicateParametersBit =
+ kHasDuplicateParameters + kCompilerHintsSmiTagSize;
+
+ static const int kIsArrowBit = kIsArrow + kCompilerHintsSmiTagSize;
+ static const int kIsGeneratorBit = kIsGenerator + kCompilerHintsSmiTagSize;
+ static const int kIsConciseMethodBit =
+ kIsConciseMethod + kCompilerHintsSmiTagSize;
+ static const int kIsAsyncFunctionBit =
+ kIsAsyncFunction + kCompilerHintsSmiTagSize;
+
+ static const int kAccessorFunctionBits =
+ FunctionKind::kAccessorFunction
+ << (kFunctionKind + kCompilerHintsSmiTagSize);
static const int kClassConstructorBits =
FunctionKind::kClassConstructor
<< (kFunctionKind + kCompilerHintsSmiTagSize);
+ static const int kFunctionKindMaskBits = FunctionKindBits::kMask
+ << kCompilerHintsSmiTagSize;
// Constants for optimizing codegen for strict mode function and
// native tests.
// Allows to use byte-width instructions.
static const int kStrictModeBitWithinByte = kStrictModeBit % kBitsPerByte;
static const int kNativeBitWithinByte = kNativeBit % kBitsPerByte;
+ static const int kHasDuplicateParametersBitWithinByte =
+ kHasDuplicateParametersBit % kBitsPerByte;
static const int kClassConstructorBitsWithinByte =
FunctionKind::kClassConstructor << kCompilerHintsSmiTagSize;
@@ -7353,6 +7596,8 @@ class SharedFunctionInfo: public HeapObject {
static const int kStrictModeByteOffset = BYTE_OFFSET(kStrictModeFunction);
static const int kNativeByteOffset = BYTE_OFFSET(kNative);
static const int kFunctionKindByteOffset = BYTE_OFFSET(kFunctionKind);
+ static const int kHasDuplicateParametersByteOffset =
+ BYTE_OFFSET(kHasDuplicateParameters);
#undef BYTE_OFFSET
private:
@@ -7389,19 +7634,32 @@ class JSGeneratorObject: public JSObject {
// [receiver]: The receiver of the suspended computation.
DECL_ACCESSORS(receiver, Object)
- // [input]: The most recent input value.
- DECL_ACCESSORS(input, Object)
+ // [input_or_debug_pos]
+ // For executing generators: the most recent input value.
+ // For suspended new-style generators: debug information (bytecode offset).
+ // For suspended old-style generators: unused.
+ // There is currently no need to remember the most recent input value for a
+ // suspended generator.
+ DECL_ACCESSORS(input_or_debug_pos, Object)
+
+ // [resume_mode]: The most recent resume mode.
+ enum ResumeMode { kNext, kReturn, kThrow };
+ DECL_INT_ACCESSORS(resume_mode)
- // [continuation]: Offset into code of continuation.
+ // [continuation]
//
- // A positive offset indicates a suspended generator. The special
+ // A positive value indicates a suspended generator. The special
// kGeneratorExecuting and kGeneratorClosed values indicate that a generator
// cannot be resumed.
inline int continuation() const;
inline void set_continuation(int continuation);
- inline bool is_closed();
- inline bool is_executing();
- inline bool is_suspended();
+ inline bool is_closed() const;
+ inline bool is_executing() const;
+ inline bool is_suspended() const;
+
+ // For suspended generators: the source position at which the generator
+ // is suspended.
+ int source_position() const;
// [operand_stack]: Saved operand stack.
DECL_ACCESSORS(operand_stack, FixedArray)
@@ -7409,64 +7667,30 @@ class JSGeneratorObject: public JSObject {
DECLARE_CAST(JSGeneratorObject)
// Dispatched behavior.
- DECLARE_PRINTER(JSGeneratorObject)
DECLARE_VERIFIER(JSGeneratorObject)
// Magic sentinel values for the continuation.
- static const int kGeneratorExecuting = -1;
- static const int kGeneratorClosed = 0;
+ static const int kGeneratorExecuting = -2;
+ static const int kGeneratorClosed = -1;
// Layout description.
static const int kFunctionOffset = JSObject::kHeaderSize;
static const int kContextOffset = kFunctionOffset + kPointerSize;
static const int kReceiverOffset = kContextOffset + kPointerSize;
- static const int kInputOffset = kReceiverOffset + kPointerSize;
- static const int kContinuationOffset = kInputOffset + kPointerSize;
+ static const int kInputOrDebugPosOffset = kReceiverOffset + kPointerSize;
+ static const int kResumeModeOffset = kInputOrDebugPosOffset + kPointerSize;
+ static const int kContinuationOffset = kResumeModeOffset + kPointerSize;
static const int kOperandStackOffset = kContinuationOffset + kPointerSize;
static const int kSize = kOperandStackOffset + kPointerSize;
- // Resume mode, for use by runtime functions.
- enum ResumeMode { NEXT, RETURN, THROW };
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSGeneratorObject);
};
-// Representation for module instance objects.
-class JSModule: public JSObject {
- public:
- // [context]: the context holding the module's locals, or undefined if none.
- DECL_ACCESSORS(context, Object)
-
- // [scope_info]: Scope info.
- DECL_ACCESSORS(scope_info, ScopeInfo)
-
- DECLARE_CAST(JSModule)
-
- // Dispatched behavior.
- DECLARE_PRINTER(JSModule)
- DECLARE_VERIFIER(JSModule)
-
- // Layout description.
- static const int kContextOffset = JSObject::kHeaderSize;
- static const int kScopeInfoOffset = kContextOffset + kPointerSize;
- static const int kSize = kScopeInfoOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSModule);
-};
-
-
// JSBoundFunction describes a bound function exotic object.
class JSBoundFunction : public JSObject {
public:
- // [length]: The bound function "length" property.
- DECL_ACCESSORS(length, Object)
-
- // [name]: The bound function "name" property.
- DECL_ACCESSORS(name, Object)
-
// [bound_target_function]: The wrapped function object.
DECL_ACCESSORS(bound_target_function, JSReceiver)
@@ -7478,6 +7702,8 @@ class JSBoundFunction : public JSObject {
// arguments to any call to the wrapped function.
DECL_ACCESSORS(bound_arguments, FixedArray)
+ static MaybeHandle<String> GetName(Isolate* isolate,
+ Handle<JSBoundFunction> function);
static MaybeHandle<Context> GetFunctionRealm(
Handle<JSBoundFunction> function);
@@ -7491,20 +7717,11 @@ class JSBoundFunction : public JSObject {
// to ES6 section 19.2.3.5 Function.prototype.toString ( ).
static Handle<String> ToString(Handle<JSBoundFunction> function);
- static MaybeHandle<String> GetName(Isolate* isolate,
- Handle<JSBoundFunction> function);
-
// Layout description.
static const int kBoundTargetFunctionOffset = JSObject::kHeaderSize;
static const int kBoundThisOffset = kBoundTargetFunctionOffset + kPointerSize;
static const int kBoundArgumentsOffset = kBoundThisOffset + kPointerSize;
- static const int kLengthOffset = kBoundArgumentsOffset + kPointerSize;
- static const int kNameOffset = kLengthOffset + kPointerSize;
- static const int kSize = kNameOffset + kPointerSize;
-
- // Indices of in-object properties.
- static const int kLengthIndex = 0;
- static const int kNameIndex = 1;
+ static const int kSize = kBoundArgumentsOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSBoundFunction);
@@ -7521,12 +7738,18 @@ class JSFunction: public JSObject {
// can be shared by instances.
DECL_ACCESSORS(shared, SharedFunctionInfo)
+ static const int kLengthDescriptorIndex = 0;
+ static const int kNameDescriptorIndex = 1;
+
// [context]: The context for this function.
inline Context* context();
inline void set_context(Object* context);
inline JSObject* global_proxy();
inline Context* native_context();
+ static Handle<Object> GetName(Isolate* isolate, Handle<JSFunction> function);
+ static MaybeHandle<Smi> GetLength(Isolate* isolate,
+ Handle<JSFunction> function);
static Handle<Context> GetFunctionRealm(Handle<JSFunction> function);
// [code]: The generated code object for this function. Executed
@@ -7548,13 +7771,14 @@ class JSFunction: public JSObject {
// Tells whether or not this function has been optimized.
inline bool IsOptimized();
- // Mark this function for lazy recompilation. The function will be
- // recompiled the next time it is executed.
+ // Mark this function for lazy recompilation. The function will be recompiled
+ // the next time it is executed.
+ void MarkForBaseline();
void MarkForOptimization();
void AttemptConcurrentOptimization();
- // Tells whether or not the function is already marked for lazy
- // recompilation.
+ // Tells whether or not the function is already marked for lazy recompilation.
+ inline bool IsMarkedForBaseline();
inline bool IsMarkedForOptimization();
inline bool IsMarkedForConcurrentOptimization();
@@ -7572,9 +7796,18 @@ class JSFunction: public JSObject {
// necessary so that we do not dynamically lookup the object, regexp
// or array functions. Performing a dynamic lookup, we might end up
// using the functions from a new context that we should not have
- // access to.
+ // access to. For API objects we store the boilerplate in the literal array.
DECL_ACCESSORS(literals, LiteralsArray)
+ static void EnsureLiterals(Handle<JSFunction> function);
+ inline TypeFeedbackVector* feedback_vector();
+
+ // Unconditionally clear the type feedback vector (including vector ICs).
+ void ClearTypeFeedbackInfo();
+
+ // Clear the type feedback vector with a more subtle policy at GC time.
+ void ClearTypeFeedbackInfoAtGCTime();
+
// The initial map for an object created by this constructor.
inline Map* initial_map();
static void SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
@@ -7658,9 +7891,6 @@ class JSFunction: public JSObject {
DECLARE_PRINTER(JSFunction)
DECLARE_VERIFIER(JSFunction)
- // Returns the number of allocated literals.
- inline int NumberOfLiterals();
-
// The function's name if it is configured, otherwise shared function info
// debug name.
static Handle<String> GetName(Handle<JSFunction> function);
@@ -7745,8 +7975,9 @@ class JSGlobalObject : public JSObject {
static void InvalidatePropertyCell(Handle<JSGlobalObject> object,
Handle<Name> name);
// Ensure that the global object has a cell for the given property name.
- static Handle<PropertyCell> EnsurePropertyCell(Handle<JSGlobalObject> global,
- Handle<Name> name);
+ static Handle<PropertyCell> EnsureEmptyPropertyCell(
+ Handle<JSGlobalObject> global, Handle<Name> name,
+ PropertyCellType cell_type, int* entry_out = nullptr);
DECLARE_CAST(JSGlobalObject)
@@ -7922,6 +8153,15 @@ class JSMessageObject: public JSObject {
inline int end_position() const;
inline void set_end_position(int value);
+ int GetLineNumber() const;
+
+ // Returns the offset of the given position within the containing line.
+ int GetColumnNumber() const;
+
+ // Returns the source code line containing the given source
+ // position, or the empty string if the position is invalid.
+ Handle<String> GetSourceLine() const;
+
DECLARE_CAST(JSMessageObject)
// Dispatched behavior.
@@ -7942,7 +8182,6 @@ class JSMessageObject: public JSObject {
kSize> BodyDescriptor;
};
-
// Regular expressions
// The regular expression holds a single reference to a FixedArray in
// the kDataOffset field.
@@ -7965,7 +8204,6 @@ class JSRegExp: public JSObject {
// NOT_COMPILED: Initial value. No data has been stored in the JSRegExp yet.
// ATOM: A simple string to match against using an indexOf operation.
// IRREGEXP: Compiled with Irregexp.
- // IRREGEXP_NATIVE: Compiled to native code with Irregexp.
enum Type { NOT_COMPILED, ATOM, IRREGEXP };
enum Flag {
kNone = 0,
@@ -7982,7 +8220,6 @@ class JSRegExp: public JSObject {
DECL_ACCESSORS(source, Object)
static MaybeHandle<JSRegExp> New(Handle<String> source, Flags flags);
- static MaybeHandle<JSRegExp> New(Handle<String> source, Handle<String> flags);
static Handle<JSRegExp> Copy(Handle<JSRegExp> regexp);
static MaybeHandle<JSRegExp> Initialize(Handle<JSRegExp> regexp,
@@ -8059,8 +8296,11 @@ class JSRegExp: public JSObject {
static const int kIrregexpMaxRegisterCountIndex = kDataIndex + 4;
// Number of captures in the compiled regexp.
static const int kIrregexpCaptureCountIndex = kDataIndex + 5;
+ // Maps names of named capture groups (at indices 2i) to their corresponding
+ // capture group indices (at indices 2i + 1).
+ static const int kIrregexpCaptureNameMapIndex = kDataIndex + 6;
- static const int kIrregexpDataSize = kIrregexpCaptureCountIndex + 1;
+ static const int kIrregexpDataSize = kIrregexpCaptureNameMapIndex + 1;
// Offsets directly into the data fixed array.
static const int kDataTagOffset =
@@ -8157,57 +8397,6 @@ class CompilationCacheTable: public HashTable<CompilationCacheTable,
};
-class CodeCache: public Struct {
- public:
- DECL_ACCESSORS(default_cache, FixedArray)
- DECL_ACCESSORS(normal_type_cache, Object)
-
- // Add the code object to the cache.
- static void Update(
- Handle<CodeCache> cache, Handle<Name> name, Handle<Code> code);
-
- // Lookup code object in the cache. Returns code object if found and undefined
- // if not.
- Object* Lookup(Name* name, Code::Flags flags);
-
- // Get the internal index of a code object in the cache. Returns -1 if the
- // code object is not in that cache. This index can be used to later call
- // RemoveByIndex. The cache cannot be modified between a call to GetIndex and
- // RemoveByIndex.
- int GetIndex(Object* name, Code* code);
-
- // Remove an object from the cache with the provided internal index.
- void RemoveByIndex(Object* name, Code* code, int index);
-
- DECLARE_CAST(CodeCache)
-
- // Dispatched behavior.
- DECLARE_PRINTER(CodeCache)
- DECLARE_VERIFIER(CodeCache)
-
- static const int kDefaultCacheOffset = HeapObject::kHeaderSize;
- static const int kNormalTypeCacheOffset =
- kDefaultCacheOffset + kPointerSize;
- static const int kSize = kNormalTypeCacheOffset + kPointerSize;
-
- private:
- static void UpdateDefaultCache(
- Handle<CodeCache> code_cache, Handle<Name> name, Handle<Code> code);
- static void UpdateNormalTypeCache(
- Handle<CodeCache> code_cache, Handle<Name> name, Handle<Code> code);
- Object* LookupDefaultCache(Name* name, Code::Flags flags);
- Object* LookupNormalTypeCache(Name* name, Code::Flags flags);
-
- // Code cache layout of the default cache. Elements are alternating name and
- // code objects for non normal load/store/call IC's.
- static const int kCodeCacheEntrySize = 2;
- static const int kCodeCacheEntryNameOffset = 0;
- static const int kCodeCacheEntryCodeOffset = 1;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(CodeCache);
-};
-
-
class CodeCacheHashTableShape : public BaseShape<HashTableKey*> {
public:
static inline bool IsMatch(HashTableKey* key, Object* value) {
@@ -8225,7 +8414,11 @@ class CodeCacheHashTableShape : public BaseShape<HashTableKey*> {
static inline Handle<Object> AsHandle(Isolate* isolate, HashTableKey* key);
static const int kPrefixSize = 0;
- static const int kEntrySize = 2;
+ // The both the key (name + flags) and value (code object) can be derived from
+ // the fixed array that stores both the name and code.
+ // TODO(verwaest): Don't allocate a fixed array but inline name and code.
+ // Rewrite IsMatch to get table + index as input rather than just the raw key.
+ static const int kEntrySize = 1;
};
@@ -8233,73 +8426,20 @@ class CodeCacheHashTable: public HashTable<CodeCacheHashTable,
CodeCacheHashTableShape,
HashTableKey*> {
public:
- Object* Lookup(Name* name, Code::Flags flags);
static Handle<CodeCacheHashTable> Put(
Handle<CodeCacheHashTable> table,
Handle<Name> name,
Handle<Code> code);
- int GetIndex(Name* name, Code::Flags flags);
- void RemoveByIndex(int index);
+ Code* Lookup(Name* name, Code::Flags flags);
DECLARE_CAST(CodeCacheHashTable)
- // Initial size of the fixed array backing the hash table.
- static const int kInitialSize = 64;
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(CodeCacheHashTable);
};
-class PolymorphicCodeCache: public Struct {
- public:
- DECL_ACCESSORS(cache, Object)
-
- static void Update(Handle<PolymorphicCodeCache> cache,
- MapHandleList* maps,
- Code::Flags flags,
- Handle<Code> code);
-
-
- // Returns an undefined value if the entry is not found.
- Handle<Object> Lookup(MapHandleList* maps, Code::Flags flags);
-
- DECLARE_CAST(PolymorphicCodeCache)
-
- // Dispatched behavior.
- DECLARE_PRINTER(PolymorphicCodeCache)
- DECLARE_VERIFIER(PolymorphicCodeCache)
-
- static const int kCacheOffset = HeapObject::kHeaderSize;
- static const int kSize = kCacheOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(PolymorphicCodeCache);
-};
-
-
-class PolymorphicCodeCacheHashTable
- : public HashTable<PolymorphicCodeCacheHashTable,
- CodeCacheHashTableShape,
- HashTableKey*> {
- public:
- Object* Lookup(MapHandleList* maps, int code_kind);
-
- static Handle<PolymorphicCodeCacheHashTable> Put(
- Handle<PolymorphicCodeCacheHashTable> hash_table,
- MapHandleList* maps,
- int code_kind,
- Handle<Code> code);
-
- DECLARE_CAST(PolymorphicCodeCacheHashTable)
-
- static const int kInitialSize = 64;
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(PolymorphicCodeCacheHashTable);
-};
-
-
class TypeFeedbackInfo: public Struct {
public:
inline int ic_total_count();
@@ -8685,6 +8825,8 @@ class Name: public HeapObject {
// Return a string version of this name that is converted according to the
// rules described in ES6 section 9.2.11.
MUST_USE_RESULT static MaybeHandle<String> ToFunctionName(Handle<Name> name);
+ MUST_USE_RESULT static MaybeHandle<String> ToFunctionName(
+ Handle<Name> name, Handle<String> prefix);
DECLARE_CAST(Name)
@@ -8722,6 +8864,10 @@ class Name: public HeapObject {
// Array index strings this short can keep their index in the hash field.
static const int kMaxCachedArrayIndexLength = 7;
+ // Maximum number of characters to consider when trying to convert a string
+ // value into an array index.
+ static const int kMaxArrayIndexSize = 10;
+
// For strings which are array indexes the hash value has the string length
// mixed into the hash, mainly to avoid a hash value of zero which would be
// the case for the string '0'. 24 bits are used for the array index value.
@@ -8729,7 +8875,8 @@ class Name: public HeapObject {
static const int kArrayIndexLengthBits =
kBitsPerInt - kArrayIndexValueBits - kNofHashBitFields;
- STATIC_ASSERT((kArrayIndexLengthBits > 0));
+ STATIC_ASSERT(kArrayIndexLengthBits > 0);
+ STATIC_ASSERT(kMaxArrayIndexSize < (1 << kArrayIndexLengthBits));
class ArrayIndexValueBits : public BitField<unsigned int, kNofHashBitFields,
kArrayIndexValueBits> {}; // NOLINT
@@ -8819,34 +8966,6 @@ class String: public Name {
public:
enum Encoding { ONE_BYTE_ENCODING, TWO_BYTE_ENCODING };
- // Array index strings this short can keep their index in the hash field.
- static const int kMaxCachedArrayIndexLength = 7;
-
- // For strings which are array indexes the hash value has the string length
- // mixed into the hash, mainly to avoid a hash value of zero which would be
- // the case for the string '0'. 24 bits are used for the array index value.
- static const int kArrayIndexValueBits = 24;
- static const int kArrayIndexLengthBits =
- kBitsPerInt - kArrayIndexValueBits - kNofHashBitFields;
-
- STATIC_ASSERT((kArrayIndexLengthBits > 0));
-
- class ArrayIndexValueBits : public BitField<unsigned int, kNofHashBitFields,
- kArrayIndexValueBits> {}; // NOLINT
- class ArrayIndexLengthBits : public BitField<unsigned int,
- kNofHashBitFields + kArrayIndexValueBits,
- kArrayIndexLengthBits> {}; // NOLINT
-
- // Check that kMaxCachedArrayIndexLength + 1 is a power of two so we
- // could use a mask to test if the length of string is less than or equal to
- // kMaxCachedArrayIndexLength.
- STATIC_ASSERT(IS_POWER_OF_TWO(kMaxCachedArrayIndexLength + 1));
-
- static const unsigned int kContainsCachedArrayIndexMask =
- (~static_cast<unsigned>(kMaxCachedArrayIndexLength)
- << ArrayIndexLengthBits::kShift) |
- kIsNotArrayIndexMask;
-
class SubStringRange {
public:
explicit inline SubStringRange(String* string, int first = 0,
@@ -8869,26 +8988,26 @@ class String: public Name {
class FlatContent {
public:
// Returns true if the string is flat and this structure contains content.
- bool IsFlat() { return state_ != NON_FLAT; }
+ bool IsFlat() const { return state_ != NON_FLAT; }
// Returns true if the structure contains one-byte content.
- bool IsOneByte() { return state_ == ONE_BYTE; }
+ bool IsOneByte() const { return state_ == ONE_BYTE; }
// Returns true if the structure contains two-byte content.
- bool IsTwoByte() { return state_ == TWO_BYTE; }
+ bool IsTwoByte() const { return state_ == TWO_BYTE; }
// Return the one byte content of the string. Only use if IsOneByte()
// returns true.
- Vector<const uint8_t> ToOneByteVector() {
+ Vector<const uint8_t> ToOneByteVector() const {
DCHECK_EQ(ONE_BYTE, state_);
return Vector<const uint8_t>(onebyte_start, length_);
}
// Return the two-byte content of the string. Only use if IsTwoByte()
// returns true.
- Vector<const uc16> ToUC16Vector() {
+ Vector<const uc16> ToUC16Vector() const {
DCHECK_EQ(TWO_BYTE, state_);
return Vector<const uc16>(twobyte_start, length_);
}
- uc16 Get(int i) {
+ uc16 Get(int i) const {
DCHECK(i < length_);
DCHECK(state_ != NON_FLAT);
if (state_ == ONE_BYTE) return onebyte_start[i];
@@ -8999,6 +9118,11 @@ class String: public Name {
MUST_USE_RESULT static ComparisonResult Compare(Handle<String> x,
Handle<String> y);
+ // Perform string match of pattern on subject, starting at start index.
+ // Caller must ensure that 0 <= start_index <= sub->length().
+ static int IndexOf(Isolate* isolate, Handle<String> sub, Handle<String> pat,
+ int start_index);
+
// String equality operations.
inline bool Equals(String* other);
inline static bool Equals(Handle<String> one, Handle<String> two);
@@ -9014,24 +9138,14 @@ class String: public Name {
// ROBUST_STRING_TRAVERSAL invokes behaviour that is robust This means it
// handles unexpected data without causing assert failures and it does not
// do any heap allocations. This is useful when printing stack traces.
- base::SmartArrayPointer<char> ToCString(AllowNullsFlag allow_nulls,
- RobustnessFlag robustness_flag,
- int offset, int length,
- int* length_output = 0);
- base::SmartArrayPointer<char> ToCString(
+ std::unique_ptr<char[]> ToCString(AllowNullsFlag allow_nulls,
+ RobustnessFlag robustness_flag, int offset,
+ int length, int* length_output = 0);
+ std::unique_ptr<char[]> ToCString(
AllowNullsFlag allow_nulls = DISALLOW_NULLS,
RobustnessFlag robustness_flag = FAST_STRING_TRAVERSAL,
int* length_output = 0);
- // Return a 16 bit Unicode representation of the string.
- // The string should be nearly flat, otherwise the performance of
- // of this method may be very bad. Setting robustness_flag to
- // ROBUST_STRING_TRAVERSAL invokes behaviour that is robust This means it
- // handles unexpected data without causing assert failures and it does not
- // do any heap allocations. This is useful when printing stack traces.
- base::SmartArrayPointer<uc16> ToWideCString(
- RobustnessFlag robustness_flag = FAST_STRING_TRAVERSAL);
-
bool ComputeArrayIndex(uint32_t* index);
// Externalization.
@@ -9041,6 +9155,10 @@ class String: public Name {
// Conversion.
inline bool AsArrayIndex(uint32_t* index);
+ // Trimming.
+ enum TrimMode { kTrim, kTrimLeft, kTrimRight };
+ static Handle<String> Trim(Handle<String> string, TrimMode mode);
+
DECLARE_CAST(String)
void PrintOn(FILE* out);
@@ -9049,7 +9167,7 @@ class String: public Name {
bool LooksValid();
// Dispatched behavior.
- void StringShortPrint(StringStream* accumulator);
+ void StringShortPrint(StringStream* accumulator, bool show_details = true);
void PrintUC16(std::ostream& os, int start = 0, int end = -1); // NOLINT
#if defined(DEBUG) || defined(OBJECT_PRINT)
char* ToAsciiArray();
@@ -9063,11 +9181,6 @@ class String: public Name {
static const int kLengthOffset = Name::kSize;
static const int kSize = kLengthOffset + kPointerSize;
- // Maximum number of characters to consider when trying to convert a string
- // value into an array index.
- static const int kMaxArrayIndexSize = 10;
- STATIC_ASSERT(kMaxArrayIndexSize < (1 << kArrayIndexLengthBits));
-
// Max char codes.
static const int32_t kMaxOneByteCharCode = unibrow::Latin1::kMaxChar;
static const uint32_t kMaxOneByteCharCodeU = unibrow::Latin1::kMaxChar;
@@ -9606,15 +9719,16 @@ class VectorIterator {
// The Oddball describes objects null, undefined, true, and false.
class Oddball: public HeapObject {
public:
+ // [to_number_raw]: Cached raw to_number computed at startup.
+ inline double to_number_raw() const;
+ inline void set_to_number_raw(double value);
+
// [to_string]: Cached to_string computed at startup.
DECL_ACCESSORS(to_string, String)
// [to_number]: Cached to_number computed at startup.
DECL_ACCESSORS(to_number, Object)
- // [to_number]: Cached to_boolean computed at startup.
- DECL_ACCESSORS(to_boolean, Oddball)
-
// [typeof]: Cached type_of computed at startup.
DECL_ACCESSORS(type_of, String)
@@ -9632,15 +9746,15 @@ class Oddball: public HeapObject {
// Initialize the fields.
static void Initialize(Isolate* isolate, Handle<Oddball> oddball,
const char* to_string, Handle<Object> to_number,
- bool to_boolean, const char* type_of, byte kind);
+ const char* type_of, byte kind);
// Layout description.
- static const int kToStringOffset = HeapObject::kHeaderSize;
+ static const int kToNumberRawOffset = HeapObject::kHeaderSize;
+ static const int kToStringOffset = kToNumberRawOffset + kDoubleSize;
static const int kToNumberOffset = kToStringOffset + kPointerSize;
- static const int kToBooleanOffset = kToNumberOffset + kPointerSize;
- static const int kKindOffset = kToBooleanOffset + kPointerSize;
- static const int kTypeOfOffset = kKindOffset + kPointerSize;
- static const int kSize = kTypeOfOffset + kPointerSize;
+ static const int kTypeOfOffset = kToNumberOffset + kPointerSize;
+ static const int kKindOffset = kTypeOfOffset + kPointerSize;
+ static const int kSize = kKindOffset + kPointerSize;
static const byte kFalse = 0;
static const byte kTrue = 1;
@@ -9653,10 +9767,12 @@ class Oddball: public HeapObject {
static const byte kOther = 7;
static const byte kException = 8;
static const byte kOptimizedOut = 9;
+ static const byte kStaleRegister = 10;
typedef FixedBodyDescriptor<kToStringOffset, kTypeOfOffset + kPointerSize,
kSize> BodyDescriptor;
+ STATIC_ASSERT(kToNumberRawOffset == HeapNumber::kValueOffset);
STATIC_ASSERT(kKindOffset == Internals::kOddballKindOffset);
STATIC_ASSERT(kNull == Internals::kNullOddballKind);
STATIC_ASSERT(kUndefined == Internals::kUndefinedOddballKind);
@@ -9719,8 +9835,12 @@ class PropertyCell : public HeapObject {
static PropertyCellType UpdatedType(Handle<PropertyCell> cell,
Handle<Object> value,
PropertyDetails details);
- static void UpdateCell(Handle<GlobalDictionary> dictionary, int entry,
- Handle<Object> value, PropertyDetails details);
+ // Prepares property cell at given entry for receiving given value.
+ // As a result the old cell could be invalidated and/or dependent code could
+ // be deoptimized. Returns the prepared property cell.
+ static Handle<PropertyCell> PrepareForValue(
+ Handle<GlobalDictionary> dictionary, int entry, Handle<Object> value,
+ PropertyDetails details);
static Handle<PropertyCell> InvalidateEntry(
Handle<GlobalDictionary> dictionary, int entry);
@@ -9875,10 +9995,9 @@ class JSProxy: public JSReceiver {
typedef FixedBodyDescriptor<JSReceiver::kPropertiesOffset, kSize, kSize>
BodyDescriptor;
- static Handle<Object> GetIdentityHash(Isolate* isolate,
- Handle<JSProxy> receiver);
+ static Object* GetIdentityHash(Handle<JSProxy> receiver);
- static Handle<Smi> GetOrCreateIdentityHash(Handle<JSProxy> proxy);
+ static Smi* GetOrCreateIdentityHash(Isolate* isolate, Handle<JSProxy> proxy);
static Maybe<bool> SetPrivateProperty(Isolate* isolate, Handle<JSProxy> proxy,
Handle<Symbol> private_name,
@@ -10046,6 +10165,8 @@ class JSMapIterator: public OrderedHashTableIterator<JSMapIterator,
// Base class for both JSWeakMap and JSWeakSet
class JSWeakCollection: public JSObject {
public:
+ DECLARE_CAST(JSWeakCollection)
+
// [table]: the backing hash table mapping keys to values.
DECL_ACCESSORS(table, Object)
@@ -10322,9 +10443,6 @@ class JSArray: public JSObject {
inline bool AllowsSetLength();
static void SetLength(Handle<JSArray> array, uint32_t length);
- // Same as above but will also queue splice records if |array| is observed.
- static MaybeHandle<Object> ObservableSetLength(Handle<JSArray> array,
- uint32_t length);
// Set the content of the array to the content of storage.
static inline void SetContent(Handle<JSArray> array,
@@ -10413,10 +10531,18 @@ class AccessorInfo: public Struct {
DECL_ACCESSORS(name, Object)
DECL_INT_ACCESSORS(flag)
DECL_ACCESSORS(expected_receiver_type, Object)
+ // This directly points at a foreign C function to be used from the runtime.
DECL_ACCESSORS(getter, Object)
DECL_ACCESSORS(setter, Object)
+ // This either points at the same as above, or a trampoline in case we are
+ // running with the simulator. Use these entries from generated code.
+ DECL_ACCESSORS(js_getter, Object)
DECL_ACCESSORS(data, Object)
+ static Address redirect(Isolate* isolate, Address address,
+ AccessorComponent component);
+ Address redirected_getter() const;
+
// Dispatched behavior.
DECLARE_PRINTER(AccessorInfo)
@@ -10455,9 +10581,10 @@ class AccessorInfo: public Struct {
static const int kNameOffset = HeapObject::kHeaderSize;
static const int kFlagOffset = kNameOffset + kPointerSize;
static const int kExpectedReceiverTypeOffset = kFlagOffset + kPointerSize;
- static const int kGetterOffset = kExpectedReceiverTypeOffset + kPointerSize;
- static const int kSetterOffset = kGetterOffset + kPointerSize;
- static const int kDataOffset = kSetterOffset + kPointerSize;
+ static const int kSetterOffset = kExpectedReceiverTypeOffset + kPointerSize;
+ static const int kGetterOffset = kSetterOffset + kPointerSize;
+ static const int kJsGetterOffset = kGetterOffset + kPointerSize;
+ static const int kDataOffset = kJsGetterOffset + kPointerSize;
static const int kSize = kDataOffset + kPointerSize;
@@ -10527,9 +10654,9 @@ class AccessorPair: public Struct {
class AccessCheckInfo: public Struct {
public:
- DECL_ACCESSORS(named_callback, Object)
- DECL_ACCESSORS(indexed_callback, Object)
DECL_ACCESSORS(callback, Object)
+ DECL_ACCESSORS(named_interceptor, Object)
+ DECL_ACCESSORS(indexed_interceptor, Object)
DECL_ACCESSORS(data, Object)
DECLARE_CAST(AccessCheckInfo)
@@ -10538,10 +10665,13 @@ class AccessCheckInfo: public Struct {
DECLARE_PRINTER(AccessCheckInfo)
DECLARE_VERIFIER(AccessCheckInfo)
- static const int kNamedCallbackOffset = HeapObject::kHeaderSize;
- static const int kIndexedCallbackOffset = kNamedCallbackOffset + kPointerSize;
- static const int kCallbackOffset = kIndexedCallbackOffset + kPointerSize;
- static const int kDataOffset = kCallbackOffset + kPointerSize;
+ static AccessCheckInfo* Get(Isolate* isolate, Handle<JSObject> receiver);
+
+ static const int kCallbackOffset = HeapObject::kHeaderSize;
+ static const int kNamedInterceptorOffset = kCallbackOffset + kPointerSize;
+ static const int kIndexedInterceptorOffset =
+ kNamedInterceptorOffset + kPointerSize;
+ static const int kDataOffset = kIndexedInterceptorOffset + kPointerSize;
static const int kSize = kDataOffset + kPointerSize;
private:
@@ -10620,6 +10750,8 @@ class TemplateInfo: public Struct {
DECLARE_VERIFIER(TemplateInfo)
+ DECLARE_CAST(TemplateInfo)
+
static const int kTagOffset = HeapObject::kHeaderSize;
static const int kSerialNumberOffset = kTagOffset + kPointerSize;
static const int kNumberOfProperties = kSerialNumberOffset + kPointerSize;
@@ -10630,6 +10762,8 @@ class TemplateInfo: public Struct {
kPropertyAccessorsOffset + kPointerSize;
static const int kHeaderSize = kPropertyIntrinsicsOffset + kPointerSize;
+ static const int kFastTemplateInstantiationsCacheSize = 1 * KB;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateInfo);
};
@@ -10647,6 +10781,8 @@ class FunctionTemplateInfo: public TemplateInfo {
DECL_ACCESSORS(signature, Object)
DECL_ACCESSORS(instance_call_handler, Object)
DECL_ACCESSORS(access_check_info, Object)
+ DECL_ACCESSORS(shared_function_info, Object)
+ DECL_ACCESSORS(js_function, Object)
DECL_INT_ACCESSORS(flag)
inline int length() const;
@@ -10661,7 +10797,6 @@ class FunctionTemplateInfo: public TemplateInfo {
DECL_BOOLEAN_ACCESSORS(read_only_prototype)
DECL_BOOLEAN_ACCESSORS(remove_prototype)
DECL_BOOLEAN_ACCESSORS(do_not_cache)
- DECL_BOOLEAN_ACCESSORS(instantiated)
DECL_BOOLEAN_ACCESSORS(accept_any_receiver)
DECLARE_CAST(FunctionTemplateInfo)
@@ -10686,17 +10821,20 @@ class FunctionTemplateInfo: public TemplateInfo {
static const int kInstanceCallHandlerOffset = kSignatureOffset + kPointerSize;
static const int kAccessCheckInfoOffset =
kInstanceCallHandlerOffset + kPointerSize;
- static const int kFlagOffset = kAccessCheckInfoOffset + kPointerSize;
+ static const int kSharedFunctionInfoOffset =
+ kAccessCheckInfoOffset + kPointerSize;
+ static const int kFlagOffset = kSharedFunctionInfoOffset + kPointerSize;
static const int kLengthOffset = kFlagOffset + kPointerSize;
static const int kSize = kLengthOffset + kPointerSize;
+ static Handle<SharedFunctionInfo> GetOrCreateSharedFunctionInfo(
+ Isolate* isolate, Handle<FunctionTemplateInfo> info);
+ // Returns parent function template or null.
+ inline FunctionTemplateInfo* GetParent(Isolate* isolate);
// Returns true if |object| is an instance of this function template.
- bool IsTemplateFor(Object* object);
+ inline bool IsTemplateFor(JSObject* object);
bool IsTemplateFor(Map* map);
-
- // Returns the holder JSObject if the function can legally be called with this
- // receiver. Returns Heap::null_value() if the call is illegal.
- Object* GetCompatibleReceiver(Isolate* isolate, Object* receiver);
+ inline bool instantiated();
private:
// Bit position in the flag, from least significant bit position.
@@ -10706,8 +10844,7 @@ class FunctionTemplateInfo: public TemplateInfo {
static const int kReadOnlyPrototypeBit = 3;
static const int kRemovePrototypeBit = 4;
static const int kDoNotCacheBit = 5;
- static const int kInstantiatedBit = 6;
- static const int kAcceptAnyReceiver = 7;
+ static const int kAcceptAnyReceiver = 6;
DISALLOW_IMPLICIT_CONSTRUCTORS(FunctionTemplateInfo);
};
@@ -10716,7 +10853,9 @@ class FunctionTemplateInfo: public TemplateInfo {
class ObjectTemplateInfo: public TemplateInfo {
public:
DECL_ACCESSORS(constructor, Object)
- DECL_ACCESSORS(internal_field_count, Object)
+ DECL_ACCESSORS(data, Object)
+ DECL_INT_ACCESSORS(internal_field_count)
+ DECL_BOOLEAN_ACCESSORS(immutable_proto)
DECLARE_CAST(ObjectTemplateInfo)
@@ -10725,9 +10864,18 @@ class ObjectTemplateInfo: public TemplateInfo {
DECLARE_VERIFIER(ObjectTemplateInfo)
static const int kConstructorOffset = TemplateInfo::kHeaderSize;
- static const int kInternalFieldCountOffset =
- kConstructorOffset + kPointerSize;
- static const int kSize = kInternalFieldCountOffset + kPointerSize;
+ // LSB is for immutable_proto, higher bits for internal_field_count
+ static const int kDataOffset = kConstructorOffset + kPointerSize;
+ static const int kSize = kDataOffset + kPointerSize;
+
+ // Starting from given object template's constructor walk up the inheritance
+ // chain till a function template that has an instance template is found.
+ inline ObjectTemplateInfo* GetParent(Isolate* isolate);
+
+ private:
+ class IsImmutablePrototype : public BitField<bool, 0, 1> {};
+ class InternalFieldCount
+ : public BitField<int, IsImmutablePrototype::kNext, 29> {};
};
@@ -10737,25 +10885,21 @@ class DebugInfo: public Struct {
public:
// The shared function info for the source being debugged.
DECL_ACCESSORS(shared, SharedFunctionInfo)
- // Code object for the patched code. This code object is the code object
- // currently active for the function.
- DECL_ACCESSORS(abstract_code, AbstractCode)
+
+ DECL_ACCESSORS(debug_bytecode_array, Object)
// Fixed array holding status information for each active break point.
DECL_ACCESSORS(break_points, FixedArray)
- // Check if there is a break point at a code offset.
- bool HasBreakPoint(int code_offset);
- // Get the break point info object for a code offset.
- Object* GetBreakPointInfo(int code_offset);
- // Clear a break point.
- static void ClearBreakPoint(Handle<DebugInfo> debug_info, int code_offset,
+ // Check if there is a break point at a source position.
+ bool HasBreakPoint(int source_position);
+ // Attempt to clear a break point. Return true if successful.
+ static bool ClearBreakPoint(Handle<DebugInfo> debug_info,
Handle<Object> break_point_object);
// Set a break point.
- static void SetBreakPoint(Handle<DebugInfo> debug_info, int code_offset,
- int source_position, int statement_position,
+ static void SetBreakPoint(Handle<DebugInfo> debug_info, int source_position,
Handle<Object> break_point_object);
- // Get the break point objects for a code offset.
- Handle<Object> GetBreakPointObjects(int code_offset);
+ // Get the break point objects for a source position.
+ Handle<Object> GetBreakPointObjects(int source_position);
// Find the break point info holding this break point object.
static Handle<Object> FindBreakPointInfo(Handle<DebugInfo> debug_info,
Handle<Object> break_point_object);
@@ -10764,7 +10908,12 @@ class DebugInfo: public Struct {
static Smi* uninitialized() { return Smi::FromInt(0); }
- inline BytecodeArray* original_bytecode_array();
+ inline bool HasDebugBytecodeArray();
+ inline bool HasDebugCode();
+
+ inline BytecodeArray* OriginalBytecodeArray();
+ inline BytecodeArray* DebugBytecodeArray();
+ inline Code* DebugCode();
DECLARE_CAST(DebugInfo)
@@ -10773,17 +10922,17 @@ class DebugInfo: public Struct {
DECLARE_VERIFIER(DebugInfo)
static const int kSharedFunctionInfoIndex = Struct::kHeaderSize;
- static const int kAbstractCodeIndex = kSharedFunctionInfoIndex + kPointerSize;
- static const int kBreakPointsStateIndex = kAbstractCodeIndex + kPointerSize;
+ static const int kDebugBytecodeArrayIndex =
+ kSharedFunctionInfoIndex + kPointerSize;
+ static const int kBreakPointsStateIndex =
+ kDebugBytecodeArrayIndex + kPointerSize;
static const int kSize = kBreakPointsStateIndex + kPointerSize;
- static const int kEstimatedNofBreakPointsInFunction = 16;
+ static const int kEstimatedNofBreakPointsInFunction = 4;
private:
- static const int kNoBreakPointInfo = -1;
-
- // Lookup the index in the break_points array for a code offset.
- int GetBreakPointInfoIndex(int code_offset);
+ // Get the break point info object for a source position.
+ Object* GetBreakPointInfo(int source_position);
DISALLOW_IMPLICIT_CONSTRUCTORS(DebugInfo);
};
@@ -10794,13 +10943,8 @@ class DebugInfo: public Struct {
// position with one or more break points.
class BreakPointInfo: public Struct {
public:
- // The code offset for the break point.
- DECL_INT_ACCESSORS(code_offset)
// The position in the source for the break position.
DECL_INT_ACCESSORS(source_position)
- // The position in the source for the last statement before this break
- // position.
- DECL_INT_ACCESSORS(statement_position)
// List of related JavaScript break points.
DECL_ACCESSORS(break_point_objects, Object)
@@ -10816,18 +10960,17 @@ class BreakPointInfo: public Struct {
// Get the number of break points for this code offset.
int GetBreakPointCount();
+ int GetStatementPosition(Handle<DebugInfo> debug_info);
+
DECLARE_CAST(BreakPointInfo)
// Dispatched behavior.
DECLARE_PRINTER(BreakPointInfo)
DECLARE_VERIFIER(BreakPointInfo)
- static const int kCodeOffsetIndex = Struct::kHeaderSize;
- static const int kSourcePositionIndex = kCodeOffsetIndex + kPointerSize;
- static const int kStatementPositionIndex =
- kSourcePositionIndex + kPointerSize;
+ static const int kSourcePositionIndex = Struct::kHeaderSize;
static const int kBreakPointObjectsIndex =
- kStatementPositionIndex + kPointerSize;
+ kSourcePositionIndex + kPointerSize;
static const int kSize = kBreakPointObjectsIndex + kPointerSize;
private:
diff --git a/deps/v8/src/ostreams.cc b/deps/v8/src/ostreams.cc
index 120db257cd..a0a548b607 100644
--- a/deps/v8/src/ostreams.cc
+++ b/deps/v8/src/ostreams.cc
@@ -61,6 +61,14 @@ std::ostream& PrintUC16(std::ostream& os, uint16_t c, bool (*pred)(uint16_t)) {
return os << buf;
}
+std::ostream& PrintUC16ForJSON(std::ostream& os, uint16_t c,
+ bool (*pred)(uint16_t)) {
+ // JSON does not allow \x99; must use \u0099.
+ char buf[10];
+ const char* format = pred(c) ? "%c" : "\\u%04x";
+ snprintf(buf, sizeof(buf), format, c);
+ return os << buf;
+}
std::ostream& PrintUC32(std::ostream& os, int32_t c, bool (*pred)(uint16_t)) {
if (c <= String::kMaxUtf16CodeUnit) {
@@ -84,7 +92,7 @@ std::ostream& operator<<(std::ostream& os, const AsEscapedUC16ForJSON& c) {
if (c.value == '\r') return os << "\\r";
if (c.value == '\t') return os << "\\t";
if (c.value == '\"') return os << "\\\"";
- return PrintUC16(os, c.value, IsOK);
+ return PrintUC16ForJSON(os, c.value, IsOK);
}
@@ -97,5 +105,11 @@ std::ostream& operator<<(std::ostream& os, const AsUC32& c) {
return PrintUC32(os, c.value, IsPrint);
}
+std::ostream& operator<<(std::ostream& os, const AsHex& hex) {
+ char buf[20];
+ snprintf(buf, sizeof(buf), "%.*" PRIx64, hex.min_width, hex.value);
+ return os << buf;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ostreams.h b/deps/v8/src/ostreams.h
index 1c2f38a153..977b5c6f4a 100644
--- a/deps/v8/src/ostreams.h
+++ b/deps/v8/src/ostreams.h
@@ -66,6 +66,12 @@ struct AsEscapedUC16ForJSON {
uint16_t value;
};
+struct AsHex {
+ explicit AsHex(uint64_t v, uint8_t min_width = 0)
+ : value(v), min_width(min_width) {}
+ uint64_t value;
+ uint8_t min_width;
+};
// Writes the given character to the output escaping everything outside of
// printable/space ASCII range. Additionally escapes '\' making escaping
@@ -83,6 +89,9 @@ std::ostream& operator<<(std::ostream& os, const AsUC16& c);
// of printable ASCII range.
std::ostream& operator<<(std::ostream& os, const AsUC32& c);
+// Writes the given number to the output in hexadecimal notation.
+std::ostream& operator<<(std::ostream& os, const AsHex& v);
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parsing/OWNERS b/deps/v8/src/parsing/OWNERS
index a5daeb3b72..44cc4ed5ed 100644
--- a/deps/v8/src/parsing/OWNERS
+++ b/deps/v8/src/parsing/OWNERS
@@ -4,4 +4,4 @@ adamk@chromium.org
littledan@chromium.org
marja@chromium.org
rossberg@chromium.org
-
+vogelheim@chromium.org
diff --git a/deps/v8/src/parsing/expression-classifier.h b/deps/v8/src/parsing/expression-classifier.h
index 71fa3d3e89..9190e18c7d 100644
--- a/deps/v8/src/parsing/expression-classifier.h
+++ b/deps/v8/src/parsing/expression-classifier.h
@@ -12,142 +12,197 @@
namespace v8 {
namespace internal {
+#define ERROR_CODES(T) \
+ T(ExpressionProduction, 0) \
+ T(FormalParameterInitializerProduction, 1) \
+ T(BindingPatternProduction, 2) \
+ T(AssignmentPatternProduction, 3) \
+ T(DistinctFormalParametersProduction, 4) \
+ T(StrictModeFormalParametersProduction, 5) \
+ T(ArrowFormalParametersProduction, 6) \
+ T(LetPatternProduction, 7) \
+ T(ObjectLiteralProduction, 8) \
+ T(TailCallExpressionProduction, 9) \
+ T(AsyncArrowFormalParametersProduction, 10)
template <typename Traits>
class ExpressionClassifier {
public:
+ enum ErrorKind : unsigned {
+#define DEFINE_ERROR_KIND(NAME, CODE) k##NAME = CODE,
+ ERROR_CODES(DEFINE_ERROR_KIND)
+#undef DEFINE_ERROR_KIND
+ kUnusedError = 15 // Larger than error codes; should fit in 4 bits
+ };
+
struct Error {
- Error()
+ V8_INLINE Error()
: location(Scanner::Location::invalid()),
message(MessageTemplate::kNone),
+ kind(kUnusedError),
type(kSyntaxError),
arg(nullptr) {}
+ V8_INLINE explicit Error(Scanner::Location loc,
+ MessageTemplate::Template msg, ErrorKind k,
+ const char* a = nullptr,
+ ParseErrorType t = kSyntaxError)
+ : location(loc), message(msg), kind(k), type(t), arg(a) {}
Scanner::Location location;
- MessageTemplate::Template message : 30;
+ MessageTemplate::Template message : 26;
+ unsigned kind : 4;
ParseErrorType type : 2;
const char* arg;
};
- enum TargetProduction {
- ExpressionProduction = 1 << 0,
- FormalParameterInitializerProduction = 1 << 1,
- BindingPatternProduction = 1 << 2,
- AssignmentPatternProduction = 1 << 3,
- DistinctFormalParametersProduction = 1 << 4,
- StrictModeFormalParametersProduction = 1 << 5,
- ArrowFormalParametersProduction = 1 << 6,
- LetPatternProduction = 1 << 7,
- CoverInitializedNameProduction = 1 << 8,
-
- ExpressionProductions =
- (ExpressionProduction | FormalParameterInitializerProduction),
+ enum TargetProduction : unsigned {
+#define DEFINE_PRODUCTION(NAME, CODE) NAME = 1 << CODE,
+ ERROR_CODES(DEFINE_PRODUCTION)
+#undef DEFINE_PRODUCTION
+
+ ExpressionProductions =
+ (ExpressionProduction | FormalParameterInitializerProduction |
+ TailCallExpressionProduction),
PatternProductions = (BindingPatternProduction |
AssignmentPatternProduction | LetPatternProduction),
FormalParametersProductions = (DistinctFormalParametersProduction |
StrictModeFormalParametersProduction),
- StandardProductions = ExpressionProductions | PatternProductions,
AllProductions =
- (StandardProductions | FormalParametersProductions |
- ArrowFormalParametersProduction | CoverInitializedNameProduction)
+ (ExpressionProductions | PatternProductions |
+ FormalParametersProductions | ArrowFormalParametersProduction |
+ ObjectLiteralProduction | AsyncArrowFormalParametersProduction)
};
- enum FunctionProperties { NonSimpleParameter = 1 << 0 };
+ enum FunctionProperties : unsigned {
+ NonSimpleParameter = 1 << 0
+ };
explicit ExpressionClassifier(const Traits* t)
: zone_(t->zone()),
non_patterns_to_rewrite_(t->GetNonPatternList()),
+ reported_errors_(t->GetReportedErrorList()),
+ duplicate_finder_(nullptr),
invalid_productions_(0),
- function_properties_(0),
- duplicate_finder_(nullptr) {
+ function_properties_(0) {
+ reported_errors_begin_ = reported_errors_end_ = reported_errors_->length();
non_pattern_begin_ = non_patterns_to_rewrite_->length();
}
ExpressionClassifier(const Traits* t, DuplicateFinder* duplicate_finder)
: zone_(t->zone()),
non_patterns_to_rewrite_(t->GetNonPatternList()),
+ reported_errors_(t->GetReportedErrorList()),
+ duplicate_finder_(duplicate_finder),
invalid_productions_(0),
- function_properties_(0),
- duplicate_finder_(duplicate_finder) {
+ function_properties_(0) {
+ reported_errors_begin_ = reported_errors_end_ = reported_errors_->length();
non_pattern_begin_ = non_patterns_to_rewrite_->length();
}
~ExpressionClassifier() { Discard(); }
- bool is_valid(unsigned productions) const {
+ V8_INLINE bool is_valid(unsigned productions) const {
return (invalid_productions_ & productions) == 0;
}
- DuplicateFinder* duplicate_finder() const { return duplicate_finder_; }
+ V8_INLINE DuplicateFinder* duplicate_finder() const {
+ return duplicate_finder_;
+ }
- bool is_valid_expression() const { return is_valid(ExpressionProduction); }
+ V8_INLINE bool is_valid_expression() const {
+ return is_valid(ExpressionProduction);
+ }
- bool is_valid_formal_parameter_initializer() const {
+ V8_INLINE bool is_valid_formal_parameter_initializer() const {
return is_valid(FormalParameterInitializerProduction);
}
- bool is_valid_binding_pattern() const {
+ V8_INLINE bool is_valid_binding_pattern() const {
return is_valid(BindingPatternProduction);
}
- bool is_valid_assignment_pattern() const {
+ V8_INLINE bool is_valid_assignment_pattern() const {
return is_valid(AssignmentPatternProduction);
}
- bool is_valid_arrow_formal_parameters() const {
+ V8_INLINE bool is_valid_arrow_formal_parameters() const {
return is_valid(ArrowFormalParametersProduction);
}
- bool is_valid_formal_parameter_list_without_duplicates() const {
+ V8_INLINE bool is_valid_formal_parameter_list_without_duplicates() const {
return is_valid(DistinctFormalParametersProduction);
}
// Note: callers should also check
// is_valid_formal_parameter_list_without_duplicates().
- bool is_valid_strict_mode_formal_parameters() const {
+ V8_INLINE bool is_valid_strict_mode_formal_parameters() const {
return is_valid(StrictModeFormalParametersProduction);
}
- bool is_valid_let_pattern() const { return is_valid(LetPatternProduction); }
+ V8_INLINE bool is_valid_let_pattern() const {
+ return is_valid(LetPatternProduction);
+ }
- const Error& expression_error() const { return expression_error_; }
+ bool is_valid_async_arrow_formal_parameters() const {
+ return is_valid(AsyncArrowFormalParametersProduction);
+ }
- const Error& formal_parameter_initializer_error() const {
- return formal_parameter_initializer_error_;
+ V8_INLINE const Error& expression_error() const {
+ return reported_error(kExpressionProduction);
}
- const Error& binding_pattern_error() const { return binding_pattern_error_; }
+ V8_INLINE const Error& formal_parameter_initializer_error() const {
+ return reported_error(kFormalParameterInitializerProduction);
+ }
- const Error& assignment_pattern_error() const {
- return assignment_pattern_error_;
+ V8_INLINE const Error& binding_pattern_error() const {
+ return reported_error(kBindingPatternProduction);
}
- const Error& arrow_formal_parameters_error() const {
- return arrow_formal_parameters_error_;
+ V8_INLINE const Error& assignment_pattern_error() const {
+ return reported_error(kAssignmentPatternProduction);
}
- const Error& duplicate_formal_parameter_error() const {
- return duplicate_formal_parameter_error_;
+ V8_INLINE const Error& arrow_formal_parameters_error() const {
+ return reported_error(kArrowFormalParametersProduction);
}
- const Error& strict_mode_formal_parameter_error() const {
- return strict_mode_formal_parameter_error_;
+ V8_INLINE const Error& duplicate_formal_parameter_error() const {
+ return reported_error(kDistinctFormalParametersProduction);
}
- const Error& let_pattern_error() const { return let_pattern_error_; }
+ V8_INLINE const Error& strict_mode_formal_parameter_error() const {
+ return reported_error(kStrictModeFormalParametersProduction);
+ }
- bool has_cover_initialized_name() const {
- return !is_valid(CoverInitializedNameProduction);
+ V8_INLINE const Error& let_pattern_error() const {
+ return reported_error(kLetPatternProduction);
}
- const Error& cover_initialized_name_error() const {
- return cover_initialized_name_error_;
+
+ V8_INLINE bool has_object_literal_error() const {
+ return !is_valid(ObjectLiteralProduction);
+ }
+
+ V8_INLINE const Error& object_literal_error() const {
+ return reported_error(kObjectLiteralProduction);
}
- bool is_simple_parameter_list() const {
+ V8_INLINE bool has_tail_call_expression() const {
+ return !is_valid(TailCallExpressionProduction);
+ }
+ V8_INLINE const Error& tail_call_expression_error() const {
+ return reported_error(kTailCallExpressionProduction);
+ }
+
+ V8_INLINE const Error& async_arrow_formal_parameters_error() const {
+ return reported_error(kAsyncArrowFormalParametersProduction);
+ }
+
+ V8_INLINE bool is_simple_parameter_list() const {
return !(function_properties_ & NonSimpleParameter);
}
- void RecordNonSimpleParameter() {
+ V8_INLINE void RecordNonSimpleParameter() {
function_properties_ |= NonSimpleParameter;
}
@@ -156,9 +211,7 @@ class ExpressionClassifier {
const char* arg = nullptr) {
if (!is_valid_expression()) return;
invalid_productions_ |= ExpressionProduction;
- expression_error_.location = loc;
- expression_error_.message = message;
- expression_error_.arg = arg;
+ Add(Error(loc, message, kExpressionProduction, arg));
}
void RecordExpressionError(const Scanner::Location& loc,
@@ -166,10 +219,7 @@ class ExpressionClassifier {
ParseErrorType type, const char* arg = nullptr) {
if (!is_valid_expression()) return;
invalid_productions_ |= ExpressionProduction;
- expression_error_.location = loc;
- expression_error_.message = message;
- expression_error_.arg = arg;
- expression_error_.type = type;
+ Add(Error(loc, message, kExpressionProduction, arg, type));
}
void RecordFormalParameterInitializerError(const Scanner::Location& loc,
@@ -177,9 +227,7 @@ class ExpressionClassifier {
const char* arg = nullptr) {
if (!is_valid_formal_parameter_initializer()) return;
invalid_productions_ |= FormalParameterInitializerProduction;
- formal_parameter_initializer_error_.location = loc;
- formal_parameter_initializer_error_.message = message;
- formal_parameter_initializer_error_.arg = arg;
+ Add(Error(loc, message, kFormalParameterInitializerProduction, arg));
}
void RecordBindingPatternError(const Scanner::Location& loc,
@@ -187,9 +235,7 @@ class ExpressionClassifier {
const char* arg = nullptr) {
if (!is_valid_binding_pattern()) return;
invalid_productions_ |= BindingPatternProduction;
- binding_pattern_error_.location = loc;
- binding_pattern_error_.message = message;
- binding_pattern_error_.arg = arg;
+ Add(Error(loc, message, kBindingPatternProduction, arg));
}
void RecordAssignmentPatternError(const Scanner::Location& loc,
@@ -197,9 +243,7 @@ class ExpressionClassifier {
const char* arg = nullptr) {
if (!is_valid_assignment_pattern()) return;
invalid_productions_ |= AssignmentPatternProduction;
- assignment_pattern_error_.location = loc;
- assignment_pattern_error_.message = message;
- assignment_pattern_error_.arg = arg;
+ Add(Error(loc, message, kAssignmentPatternProduction, arg));
}
void RecordPatternError(const Scanner::Location& loc,
@@ -214,17 +258,22 @@ class ExpressionClassifier {
const char* arg = nullptr) {
if (!is_valid_arrow_formal_parameters()) return;
invalid_productions_ |= ArrowFormalParametersProduction;
- arrow_formal_parameters_error_.location = loc;
- arrow_formal_parameters_error_.message = message;
- arrow_formal_parameters_error_.arg = arg;
+ Add(Error(loc, message, kArrowFormalParametersProduction, arg));
+ }
+
+ void RecordAsyncArrowFormalParametersError(const Scanner::Location& loc,
+ MessageTemplate::Template message,
+ const char* arg = nullptr) {
+ if (!is_valid_async_arrow_formal_parameters()) return;
+ invalid_productions_ |= AsyncArrowFormalParametersProduction;
+ Add(Error(loc, message, kAsyncArrowFormalParametersProduction, arg));
}
void RecordDuplicateFormalParameterError(const Scanner::Location& loc) {
if (!is_valid_formal_parameter_list_without_duplicates()) return;
invalid_productions_ |= DistinctFormalParametersProduction;
- duplicate_formal_parameter_error_.location = loc;
- duplicate_formal_parameter_error_.message = MessageTemplate::kParamDupe;
- duplicate_formal_parameter_error_.arg = nullptr;
+ Add(Error(loc, MessageTemplate::kParamDupe,
+ kDistinctFormalParametersProduction));
}
// Record a binding that would be invalid in strict mode. Confusingly this
@@ -235,9 +284,7 @@ class ExpressionClassifier {
const char* arg = nullptr) {
if (!is_valid_strict_mode_formal_parameters()) return;
invalid_productions_ |= StrictModeFormalParametersProduction;
- strict_mode_formal_parameter_error_.location = loc;
- strict_mode_formal_parameter_error_.message = message;
- strict_mode_formal_parameter_error_.arg = arg;
+ Add(Error(loc, message, kStrictModeFormalParametersProduction, arg));
}
void RecordLetPatternError(const Scanner::Location& loc,
@@ -245,86 +292,103 @@ class ExpressionClassifier {
const char* arg = nullptr) {
if (!is_valid_let_pattern()) return;
invalid_productions_ |= LetPatternProduction;
- let_pattern_error_.location = loc;
- let_pattern_error_.message = message;
- let_pattern_error_.arg = arg;
- }
-
- void RecordCoverInitializedNameError(const Scanner::Location& loc,
- MessageTemplate::Template message,
- const char* arg = nullptr) {
- if (has_cover_initialized_name()) return;
- invalid_productions_ |= CoverInitializedNameProduction;
- cover_initialized_name_error_.location = loc;
- cover_initialized_name_error_.message = message;
- cover_initialized_name_error_.arg = arg;
+ Add(Error(loc, message, kLetPatternProduction, arg));
}
- void ForgiveCoverInitializedNameError() {
- invalid_productions_ &= ~CoverInitializedNameProduction;
- cover_initialized_name_error_ = Error();
+ void RecordObjectLiteralError(const Scanner::Location& loc,
+ MessageTemplate::Template message,
+ const char* arg = nullptr) {
+ if (has_object_literal_error()) return;
+ invalid_productions_ |= ObjectLiteralProduction;
+ Add(Error(loc, message, kObjectLiteralProduction, arg));
}
- void ForgiveAssignmentPatternError() {
- invalid_productions_ &= ~AssignmentPatternProduction;
- assignment_pattern_error_ = Error();
+ void RecordTailCallExpressionError(const Scanner::Location& loc,
+ MessageTemplate::Template message,
+ const char* arg = nullptr) {
+ if (has_tail_call_expression()) return;
+ invalid_productions_ |= TailCallExpressionProduction;
+ Add(Error(loc, message, kTailCallExpressionProduction, arg));
}
- void Accumulate(ExpressionClassifier* inner,
- unsigned productions = StandardProductions,
+ void Accumulate(ExpressionClassifier* inner, unsigned productions,
bool merge_non_patterns = true) {
+ DCHECK_EQ(inner->reported_errors_, reported_errors_);
+ DCHECK_EQ(inner->reported_errors_begin_, reported_errors_end_);
+ DCHECK_EQ(inner->reported_errors_end_, reported_errors_->length());
if (merge_non_patterns) MergeNonPatterns(inner);
// Propagate errors from inner, but don't overwrite already recorded
// errors.
unsigned non_arrow_inner_invalid_productions =
inner->invalid_productions_ & ~ArrowFormalParametersProduction;
- if (non_arrow_inner_invalid_productions == 0) return;
- unsigned non_arrow_productions =
- productions & ~ArrowFormalParametersProduction;
- unsigned errors =
- non_arrow_productions & non_arrow_inner_invalid_productions;
- errors &= ~invalid_productions_;
- if (errors != 0) {
- invalid_productions_ |= errors;
- if (errors & ExpressionProduction)
- expression_error_ = inner->expression_error_;
- if (errors & FormalParameterInitializerProduction)
- formal_parameter_initializer_error_ =
- inner->formal_parameter_initializer_error_;
- if (errors & BindingPatternProduction)
- binding_pattern_error_ = inner->binding_pattern_error_;
- if (errors & AssignmentPatternProduction)
- assignment_pattern_error_ = inner->assignment_pattern_error_;
- if (errors & DistinctFormalParametersProduction)
- duplicate_formal_parameter_error_ =
- inner->duplicate_formal_parameter_error_;
- if (errors & StrictModeFormalParametersProduction)
- strict_mode_formal_parameter_error_ =
- inner->strict_mode_formal_parameter_error_;
- if (errors & LetPatternProduction)
- let_pattern_error_ = inner->let_pattern_error_;
- if (errors & CoverInitializedNameProduction)
- cover_initialized_name_error_ = inner->cover_initialized_name_error_;
- }
-
- // As an exception to the above, the result continues to be a valid arrow
- // formal parameters if the inner expression is a valid binding pattern.
- if (productions & ArrowFormalParametersProduction &&
- is_valid_arrow_formal_parameters()) {
- // Also copy function properties if expecting an arrow function
- // parameter.
- function_properties_ |= inner->function_properties_;
-
- if (!inner->is_valid_binding_pattern()) {
- invalid_productions_ |= ArrowFormalParametersProduction;
- arrow_formal_parameters_error_ = inner->binding_pattern_error_;
+ if (non_arrow_inner_invalid_productions) {
+ unsigned errors = non_arrow_inner_invalid_productions & productions &
+ ~invalid_productions_;
+ // The result will continue to be a valid arrow formal parameters if the
+ // inner expression is a valid binding pattern.
+ bool copy_BP_to_AFP = false;
+ if (productions & ArrowFormalParametersProduction &&
+ is_valid_arrow_formal_parameters()) {
+ // Also copy function properties if expecting an arrow function
+ // parameter.
+ function_properties_ |= inner->function_properties_;
+ if (!inner->is_valid_binding_pattern()) {
+ copy_BP_to_AFP = true;
+ invalid_productions_ |= ArrowFormalParametersProduction;
+ }
+ }
+ // Traverse the list of errors reported by the inner classifier
+ // to copy what's necessary.
+ if (errors != 0 || copy_BP_to_AFP) {
+ invalid_productions_ |= errors;
+ int binding_pattern_index = inner->reported_errors_end_;
+ for (int i = inner->reported_errors_begin_;
+ i < inner->reported_errors_end_; i++) {
+ int k = reported_errors_->at(i).kind;
+ if (errors & (1 << k)) Copy(i);
+ // Check if it's a BP error that has to be copied to an AFP error.
+ if (k == kBindingPatternProduction && copy_BP_to_AFP) {
+ if (reported_errors_end_ <= i) {
+ // If the BP error itself has not already been copied,
+ // copy it now and change it to an AFP error.
+ Copy(i);
+ reported_errors_->at(reported_errors_end_-1).kind =
+ kArrowFormalParametersProduction;
+ } else {
+ // Otherwise, if the BP error was already copied, keep its
+ // position and wait until the end of the traversal.
+ DCHECK_EQ(reported_errors_end_, i+1);
+ binding_pattern_index = i;
+ }
+ }
+ }
+ // Do we still have to copy the BP error to an AFP error?
+ if (binding_pattern_index < inner->reported_errors_end_) {
+ // If there's still unused space in the list of the inner
+ // classifier, copy it there, otherwise add it to the end
+ // of the list.
+ if (reported_errors_end_ < inner->reported_errors_end_)
+ Copy(binding_pattern_index);
+ else
+ Add(reported_errors_->at(binding_pattern_index));
+ reported_errors_->at(reported_errors_end_-1).kind =
+ kArrowFormalParametersProduction;
+ }
}
}
+ reported_errors_->Rewind(reported_errors_end_);
+ inner->reported_errors_begin_ = inner->reported_errors_end_ =
+ reported_errors_end_;
}
V8_INLINE int GetNonPatternBegin() const { return non_pattern_begin_; }
V8_INLINE void Discard() {
+ if (reported_errors_end_ == reported_errors_->length()) {
+ reported_errors_->Rewind(reported_errors_begin_);
+ reported_errors_end_ = reported_errors_begin_;
+ }
+ DCHECK_EQ(reported_errors_begin_, reported_errors_end_);
DCHECK_LE(non_pattern_begin_, non_patterns_to_rewrite_->length());
non_patterns_to_rewrite_->Rewind(non_pattern_begin_);
}
@@ -335,24 +399,69 @@ class ExpressionClassifier {
}
private:
+ V8_INLINE const Error& reported_error(ErrorKind kind) const {
+ if (invalid_productions_ & (1 << kind)) {
+ for (int i = reported_errors_begin_; i < reported_errors_end_; i++) {
+ if (reported_errors_->at(i).kind == kind)
+ return reported_errors_->at(i);
+ }
+ UNREACHABLE();
+ }
+ // We should only be looking for an error when we know that one has
+ // been reported. But we're not... So this is to make sure we have
+ // the same behaviour.
+ static Error none;
+ return none;
+ }
+
+ // Adds e to the end of the list of reported errors for this classifier.
+ // It is expected that this classifier is the last one in the stack.
+ V8_INLINE void Add(const Error& e) {
+ DCHECK_EQ(reported_errors_end_, reported_errors_->length());
+ reported_errors_->Add(e, zone_);
+ reported_errors_end_++;
+ }
+
+ // Copies the error at position i of the list of reported errors, so that
+ // it becomes the last error reported for this classifier. Position i
+ // could be either after the existing errors of this classifier (i.e.,
+ // in an inner classifier) or it could be an existing error (in case a
+ // copy is needed).
+ V8_INLINE void Copy(int i) {
+ DCHECK_LT(i, reported_errors_->length());
+ if (reported_errors_end_ != i)
+ reported_errors_->at(reported_errors_end_) = reported_errors_->at(i);
+ reported_errors_end_++;
+ }
+
Zone* zone_;
ZoneList<typename Traits::Type::Expression>* non_patterns_to_rewrite_;
- int non_pattern_begin_;
- unsigned invalid_productions_;
- unsigned function_properties_;
- Error expression_error_;
- Error formal_parameter_initializer_error_;
- Error binding_pattern_error_;
- Error assignment_pattern_error_;
- Error arrow_formal_parameters_error_;
- Error duplicate_formal_parameter_error_;
- Error strict_mode_formal_parameter_error_;
- Error let_pattern_error_;
- Error cover_initialized_name_error_;
+ ZoneList<Error>* reported_errors_;
DuplicateFinder* duplicate_finder_;
+ // The uint16_t for non_pattern_begin_ will not be enough in the case,
+ // e.g., of an array literal containing more than 64K inner array
+ // literals with spreads, as in:
+ // var N=65536; eval("var x=[];" + "[" + "[...x],".repeat(N) + "].length");
+ // An implementation limit error in ParserBase::AddNonPatternForRewriting
+ // will be triggered in this case.
+ uint16_t non_pattern_begin_;
+ unsigned invalid_productions_ : 14;
+ unsigned function_properties_ : 2;
+ // The uint16_t for reported_errors_begin_ and reported_errors_end_ will
+ // not be enough in the case of a long series of expressions using nested
+ // classifiers, e.g., a long sequence of assignments, as in:
+ // literals with spreads, as in:
+ // var N=65536; eval("var x;" + "x=".repeat(N) + "42");
+ // This should not be a problem, as such things currently fail with a
+ // stack overflow while parsing.
+ uint16_t reported_errors_begin_;
+ uint16_t reported_errors_end_;
};
+#undef ERROR_CODES
+
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parsing/func-name-inferrer.cc b/deps/v8/src/parsing/func-name-inferrer.cc
index 12013afd28..0821be0a68 100644
--- a/deps/v8/src/parsing/func-name-inferrer.cc
+++ b/deps/v8/src/parsing/func-name-inferrer.cc
@@ -44,6 +44,11 @@ void FuncNameInferrer::PushVariableName(const AstRawString* name) {
}
}
+void FuncNameInferrer::RemoveAsyncKeywordFromEnd() {
+ DCHECK(names_stack_.length() > 0);
+ DCHECK(names_stack_.last().name->IsOneByteEqualTo("async"));
+ names_stack_.RemoveLast();
+}
const AstString* FuncNameInferrer::MakeNameFromStack() {
return MakeNameFromStackHelper(0, ast_value_factory_->empty_string());
diff --git a/deps/v8/src/parsing/func-name-inferrer.h b/deps/v8/src/parsing/func-name-inferrer.h
index ba38ffeb24..cffd8a8c18 100644
--- a/deps/v8/src/parsing/func-name-inferrer.h
+++ b/deps/v8/src/parsing/func-name-inferrer.h
@@ -16,6 +16,8 @@ class AstString;
class AstValueFactory;
class FunctionLiteral;
+enum class InferName { kYes, kNo };
+
// FuncNameInferrer is a stateful class that is used to perform name
// inference for anonymous functions during static analysis of source code.
// Inference is performed in cases when an anonymous function is assigned
@@ -71,6 +73,8 @@ class FuncNameInferrer : public ZoneObject {
}
}
+ void RemoveAsyncKeywordFromEnd();
+
// Infers a function name and leaves names collection state.
void Infer() {
DCHECK(IsOpen());
diff --git a/deps/v8/src/parsing/parameter-initializer-rewriter.cc b/deps/v8/src/parsing/parameter-initializer-rewriter.cc
index 3e3587b2bd..b12a80f9b8 100644
--- a/deps/v8/src/parsing/parameter-initializer-rewriter.cc
+++ b/deps/v8/src/parsing/parameter-initializer-rewriter.cc
@@ -5,7 +5,7 @@
#include "src/parsing/parameter-initializer-rewriter.h"
#include "src/ast/ast.h"
-#include "src/ast/ast-expression-visitor.h"
+#include "src/ast/ast-traversal-visitor.h"
#include "src/ast/scopes.h"
namespace v8 {
@@ -14,33 +14,34 @@ namespace internal {
namespace {
-class Rewriter final : public AstExpressionVisitor {
+class Rewriter final : public AstTraversalVisitor<Rewriter> {
public:
- Rewriter(uintptr_t stack_limit, Expression* initializer, Scope* old_scope,
- Scope* new_scope)
- : AstExpressionVisitor(stack_limit, initializer),
- old_scope_(old_scope),
- new_scope_(new_scope) {}
+ Rewriter(uintptr_t stack_limit, Expression* initializer, Scope* param_scope)
+ : AstTraversalVisitor(stack_limit, initializer),
+ param_scope_(param_scope) {}
private:
- void VisitExpression(Expression* expr) override {}
+ // This is required so that the overriden Visit* methods can be
+ // called by the base class (template).
+ friend class AstTraversalVisitor<Rewriter>;
- void VisitFunctionLiteral(FunctionLiteral* expr) override;
- void VisitClassLiteral(ClassLiteral* expr) override;
- void VisitVariableProxy(VariableProxy* expr) override;
+ void VisitFunctionLiteral(FunctionLiteral* expr);
+ void VisitClassLiteral(ClassLiteral* expr);
+ void VisitVariableProxy(VariableProxy* expr);
- Scope* old_scope_;
- Scope* new_scope_;
-};
+ void VisitBlock(Block* stmt);
+ void VisitTryCatchStatement(TryCatchStatement* stmt);
+ void VisitWithStatement(WithStatement* stmt);
+ Scope* param_scope_;
+};
void Rewriter::VisitFunctionLiteral(FunctionLiteral* function_literal) {
- function_literal->scope()->ReplaceOuterScope(new_scope_);
+ function_literal->scope()->ReplaceOuterScope(param_scope_);
}
void Rewriter::VisitClassLiteral(ClassLiteral* class_literal) {
- class_literal->scope()->ReplaceOuterScope(new_scope_);
if (class_literal->extends() != nullptr) {
Visit(class_literal->extends());
}
@@ -60,26 +61,50 @@ void Rewriter::VisitClassLiteral(ClassLiteral* class_literal) {
void Rewriter::VisitVariableProxy(VariableProxy* proxy) {
- if (proxy->is_resolved()) {
- Variable* var = proxy->var();
- if (var->mode() != TEMPORARY) return;
- if (old_scope_->RemoveTemporary(var)) {
- var->set_scope(new_scope_);
- new_scope_->AddTemporary(var);
+ if (!proxy->is_resolved()) {
+ if (param_scope_->outer_scope()->RemoveUnresolved(proxy)) {
+ param_scope_->AddUnresolved(proxy);
}
- } else if (old_scope_->RemoveUnresolved(proxy)) {
- new_scope_->AddUnresolved(proxy);
+ } else {
+ // Ensure that temporaries we find are already in the correct scope.
+ DCHECK(proxy->var()->mode() != TEMPORARY ||
+ proxy->var()->scope() == param_scope_->GetClosureScope());
}
}
+void Rewriter::VisitBlock(Block* stmt) {
+ if (stmt->scope() != nullptr)
+ stmt->scope()->ReplaceOuterScope(param_scope_);
+ else
+ VisitStatements(stmt->statements());
+}
+
+
+void Rewriter::VisitTryCatchStatement(TryCatchStatement* stmt) {
+ Visit(stmt->try_block());
+ stmt->scope()->ReplaceOuterScope(param_scope_);
+}
+
+
+void Rewriter::VisitWithStatement(WithStatement* stmt) {
+ Visit(stmt->expression());
+ stmt->scope()->ReplaceOuterScope(param_scope_);
+}
+
+
} // anonymous namespace
+void ReparentParameterExpressionScope(uintptr_t stack_limit, Expression* expr,
+ Scope* param_scope) {
+ // The only case that uses this code is block scopes for parameters containing
+ // sloppy eval.
+ DCHECK(param_scope->is_block_scope());
+ DCHECK(param_scope->is_declaration_scope());
+ DCHECK(param_scope->calls_sloppy_eval());
+ DCHECK(param_scope->outer_scope()->is_function_scope());
-void RewriteParameterInitializerScope(uintptr_t stack_limit,
- Expression* initializer, Scope* old_scope,
- Scope* new_scope) {
- Rewriter rewriter(stack_limit, initializer, old_scope, new_scope);
+ Rewriter rewriter(stack_limit, expr, param_scope);
rewriter.Run();
}
diff --git a/deps/v8/src/parsing/parameter-initializer-rewriter.h b/deps/v8/src/parsing/parameter-initializer-rewriter.h
index 255534c99e..a0ff7d2b38 100644
--- a/deps/v8/src/parsing/parameter-initializer-rewriter.h
+++ b/deps/v8/src/parsing/parameter-initializer-rewriter.h
@@ -5,16 +5,23 @@
#ifndef V8_PARSING_PARAMETER_EXPRESSION_REWRITER_H_
#define V8_PARSING_PARAMETER_EXPRESSION_REWRITER_H_
-#include "src/ast/ast.h"
+#include "src/types.h"
namespace v8 {
namespace internal {
-
-void RewriteParameterInitializerScope(uintptr_t stack_limit,
- Expression* initializer, Scope* old_scope,
- Scope* new_scope);
-
+class Expression;
+class Scope;
+
+// When an extra declaration scope needs to be inserted to account for
+// a sloppy eval in a default parameter or function body, the expressions
+// needs to be in that new inner scope which was added after initial
+// parsing.
+//
+// param_scope is the new inner scope, and its outer_scope() is assumed
+// to be the function scope which was used during the initial parse.
+void ReparentParameterExpressionScope(uintptr_t stack_limit, Expression* expr,
+ Scope* param_scope);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
new file mode 100644
index 0000000000..dfec0610e1
--- /dev/null
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -0,0 +1,113 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/parsing/parse-info.h"
+
+#include "src/ast/ast-value-factory.h"
+#include "src/ast/ast.h"
+
+namespace v8 {
+namespace internal {
+
+ParseInfo::ParseInfo(Zone* zone)
+ : zone_(zone),
+ flags_(0),
+ source_stream_(nullptr),
+ source_stream_encoding_(ScriptCompiler::StreamedSource::ONE_BYTE),
+ character_stream_(nullptr),
+ extension_(nullptr),
+ compile_options_(ScriptCompiler::kNoCompileOptions),
+ script_scope_(nullptr),
+ unicode_cache_(nullptr),
+ stack_limit_(0),
+ hash_seed_(0),
+ compiler_hints_(0),
+ start_position_(0),
+ end_position_(0),
+ isolate_(nullptr),
+ cached_data_(nullptr),
+ ast_value_factory_(nullptr),
+ function_name_(nullptr),
+ literal_(nullptr) {}
+
+ParseInfo::ParseInfo(Zone* zone, Handle<JSFunction> function)
+ : ParseInfo(zone, Handle<SharedFunctionInfo>(function->shared())) {
+ set_context(Handle<Context>(function->context()));
+}
+
+ParseInfo::ParseInfo(Zone* zone, Handle<SharedFunctionInfo> shared)
+ : ParseInfo(zone) {
+ isolate_ = shared->GetIsolate();
+
+ set_lazy();
+ set_hash_seed(isolate_->heap()->HashSeed());
+ set_is_named_expression(shared->is_named_expression());
+ set_calls_eval(shared->scope_info()->CallsEval());
+ set_compiler_hints(shared->compiler_hints());
+ set_start_position(shared->start_position());
+ set_end_position(shared->end_position());
+ set_stack_limit(isolate_->stack_guard()->real_climit());
+ set_unicode_cache(isolate_->unicode_cache());
+ set_language_mode(shared->language_mode());
+ set_shared_info(shared);
+
+ Handle<Script> script(Script::cast(shared->script()));
+ set_script(script);
+ if (!script.is_null() && script->type() == Script::TYPE_NATIVE) {
+ set_native();
+ }
+}
+
+ParseInfo::ParseInfo(Zone* zone, Handle<Script> script) : ParseInfo(zone) {
+ isolate_ = script->GetIsolate();
+
+ set_hash_seed(isolate_->heap()->HashSeed());
+ set_stack_limit(isolate_->stack_guard()->real_climit());
+ set_unicode_cache(isolate_->unicode_cache());
+ set_script(script);
+
+ if (script->type() == Script::TYPE_NATIVE) {
+ set_native();
+ }
+}
+
+ParseInfo::~ParseInfo() {
+ if (ast_value_factory_owned()) {
+ delete ast_value_factory_;
+ set_ast_value_factory_owned(false);
+ }
+ ast_value_factory_ = nullptr;
+}
+
+DeclarationScope* ParseInfo::scope() const { return literal()->scope(); }
+
+bool ParseInfo::is_declaration() const {
+ return (compiler_hints_ & (1 << SharedFunctionInfo::kIsDeclaration)) != 0;
+}
+
+bool ParseInfo::is_arrow() const {
+ return (compiler_hints_ & (1 << SharedFunctionInfo::kIsArrow)) != 0;
+}
+
+bool ParseInfo::is_async() const {
+ return (compiler_hints_ & (1 << SharedFunctionInfo::kIsAsyncFunction)) != 0;
+}
+
+bool ParseInfo::is_default_constructor() const {
+ return (compiler_hints_ & (1 << SharedFunctionInfo::kIsDefaultConstructor)) !=
+ 0;
+}
+
+FunctionKind ParseInfo::function_kind() const {
+ return SharedFunctionInfo::FunctionKindBits::decode(compiler_hints_);
+}
+
+#ifdef DEBUG
+bool ParseInfo::script_is_native() const {
+ return script_->type() == Script::TYPE_NATIVE;
+}
+#endif // DEBUG
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
new file mode 100644
index 0000000000..6176135c59
--- /dev/null
+++ b/deps/v8/src/parsing/parse-info.h
@@ -0,0 +1,245 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PARSING_PARSE_INFO_H_
+#define V8_PARSING_PARSE_INFO_H_
+
+#include "include/v8.h"
+#include "src/globals.h"
+#include "src/handles.h"
+
+namespace v8 {
+
+class Extension;
+
+namespace internal {
+
+class AstRawString;
+class AstValueFactory;
+class DeclarationScope;
+class FunctionLiteral;
+class ScriptData;
+class SharedFunctionInfo;
+class UnicodeCache;
+class Utf16CharacterStream;
+class Zone;
+
+// A container for the inputs, configuration options, and outputs of parsing.
+class ParseInfo {
+ public:
+ explicit ParseInfo(Zone* zone);
+ ParseInfo(Zone* zone, Handle<JSFunction> function);
+ ParseInfo(Zone* zone, Handle<Script> script);
+ // TODO(all) Only used via Debug::FindSharedFunctionInfoInScript, remove?
+ ParseInfo(Zone* zone, Handle<SharedFunctionInfo> shared);
+
+ ~ParseInfo();
+
+ Zone* zone() const { return zone_; }
+
+// Convenience accessor methods for flags.
+#define FLAG_ACCESSOR(flag, getter, setter) \
+ bool getter() const { return GetFlag(flag); } \
+ void setter() { SetFlag(flag); } \
+ void setter(bool val) { SetFlag(flag, val); }
+
+ FLAG_ACCESSOR(kToplevel, is_toplevel, set_toplevel)
+ FLAG_ACCESSOR(kLazy, is_lazy, set_lazy)
+ FLAG_ACCESSOR(kEval, is_eval, set_eval)
+ FLAG_ACCESSOR(kGlobal, is_global, set_global)
+ FLAG_ACCESSOR(kStrictMode, is_strict_mode, set_strict_mode)
+ FLAG_ACCESSOR(kNative, is_native, set_native)
+ FLAG_ACCESSOR(kModule, is_module, set_module)
+ FLAG_ACCESSOR(kAllowLazyParsing, allow_lazy_parsing, set_allow_lazy_parsing)
+ FLAG_ACCESSOR(kAstValueFactoryOwned, ast_value_factory_owned,
+ set_ast_value_factory_owned)
+ FLAG_ACCESSOR(kIsNamedExpression, is_named_expression,
+ set_is_named_expression)
+ FLAG_ACCESSOR(kCallsEval, calls_eval, set_calls_eval)
+
+#undef FLAG_ACCESSOR
+
+ void set_parse_restriction(ParseRestriction restriction) {
+ SetFlag(kParseRestriction, restriction != NO_PARSE_RESTRICTION);
+ }
+
+ ParseRestriction parse_restriction() const {
+ return GetFlag(kParseRestriction) ? ONLY_SINGLE_FUNCTION_LITERAL
+ : NO_PARSE_RESTRICTION;
+ }
+
+ ScriptCompiler::ExternalSourceStream* source_stream() const {
+ return source_stream_;
+ }
+ void set_source_stream(ScriptCompiler::ExternalSourceStream* source_stream) {
+ source_stream_ = source_stream;
+ }
+
+ ScriptCompiler::StreamedSource::Encoding source_stream_encoding() const {
+ return source_stream_encoding_;
+ }
+ void set_source_stream_encoding(
+ ScriptCompiler::StreamedSource::Encoding source_stream_encoding) {
+ source_stream_encoding_ = source_stream_encoding;
+ }
+
+ Utf16CharacterStream* character_stream() const { return character_stream_; }
+ void set_character_stream(Utf16CharacterStream* character_stream) {
+ character_stream_ = character_stream;
+ }
+
+ v8::Extension* extension() const { return extension_; }
+ void set_extension(v8::Extension* extension) { extension_ = extension; }
+
+ ScriptData** cached_data() const { return cached_data_; }
+ void set_cached_data(ScriptData** cached_data) { cached_data_ = cached_data; }
+
+ ScriptCompiler::CompileOptions compile_options() const {
+ return compile_options_;
+ }
+ void set_compile_options(ScriptCompiler::CompileOptions compile_options) {
+ compile_options_ = compile_options;
+ }
+
+ DeclarationScope* script_scope() const { return script_scope_; }
+ void set_script_scope(DeclarationScope* script_scope) {
+ script_scope_ = script_scope;
+ }
+
+ AstValueFactory* ast_value_factory() const { return ast_value_factory_; }
+ void set_ast_value_factory(AstValueFactory* ast_value_factory) {
+ ast_value_factory_ = ast_value_factory;
+ }
+
+ const AstRawString* function_name() const { return function_name_; }
+ void set_function_name(const AstRawString* function_name) {
+ function_name_ = function_name;
+ }
+
+ FunctionLiteral* literal() const { return literal_; }
+ void set_literal(FunctionLiteral* literal) { literal_ = literal; }
+
+ DeclarationScope* scope() const;
+
+ UnicodeCache* unicode_cache() const { return unicode_cache_; }
+ void set_unicode_cache(UnicodeCache* unicode_cache) {
+ unicode_cache_ = unicode_cache;
+ }
+
+ uintptr_t stack_limit() const { return stack_limit_; }
+ void set_stack_limit(uintptr_t stack_limit) { stack_limit_ = stack_limit; }
+
+ uint32_t hash_seed() const { return hash_seed_; }
+ void set_hash_seed(uint32_t hash_seed) { hash_seed_ = hash_seed; }
+
+ int compiler_hints() const { return compiler_hints_; }
+ void set_compiler_hints(int compiler_hints) {
+ compiler_hints_ = compiler_hints;
+ }
+
+ int start_position() const { return start_position_; }
+ void set_start_position(int start_position) {
+ start_position_ = start_position;
+ }
+
+ int end_position() const { return end_position_; }
+ void set_end_position(int end_position) { end_position_ = end_position; }
+
+ // Getters for individual compiler hints.
+ bool is_declaration() const;
+ bool is_arrow() const;
+ bool is_async() const;
+ bool is_default_constructor() const;
+ FunctionKind function_kind() const;
+
+ //--------------------------------------------------------------------------
+ // TODO(titzer): these should not be part of ParseInfo.
+ //--------------------------------------------------------------------------
+ Isolate* isolate() const { return isolate_; }
+ Handle<SharedFunctionInfo> shared_info() const { return shared_; }
+ Handle<Script> script() const { return script_; }
+ Handle<Context> context() const { return context_; }
+ void clear_script() { script_ = Handle<Script>::null(); }
+ void set_isolate(Isolate* isolate) { isolate_ = isolate; }
+ void set_shared_info(Handle<SharedFunctionInfo> shared) { shared_ = shared; }
+ void set_context(Handle<Context> context) { context_ = context; }
+ void set_script(Handle<Script> script) { script_ = script; }
+ //--------------------------------------------------------------------------
+
+ LanguageMode language_mode() const {
+ return construct_language_mode(is_strict_mode());
+ }
+ void set_language_mode(LanguageMode language_mode) {
+ STATIC_ASSERT(LANGUAGE_END == 2);
+ set_strict_mode(is_strict(language_mode));
+ }
+
+ void ReopenHandlesInNewHandleScope() {
+ shared_ = Handle<SharedFunctionInfo>(*shared_);
+ script_ = Handle<Script>(*script_);
+ context_ = Handle<Context>(*context_);
+ }
+
+#ifdef DEBUG
+ bool script_is_native() const;
+#endif // DEBUG
+
+ private:
+ // Various configuration flags for parsing.
+ enum Flag {
+ // ---------- Input flags ---------------------------
+ kToplevel = 1 << 0,
+ kLazy = 1 << 1,
+ kEval = 1 << 2,
+ kGlobal = 1 << 3,
+ kStrictMode = 1 << 4,
+ kNative = 1 << 5,
+ kParseRestriction = 1 << 6,
+ kModule = 1 << 7,
+ kAllowLazyParsing = 1 << 8,
+ kIsNamedExpression = 1 << 9,
+ kCallsEval = 1 << 10,
+ // ---------- Output flags --------------------------
+ kAstValueFactoryOwned = 1 << 11
+ };
+
+ //------------- Inputs to parsing and scope analysis -----------------------
+ Zone* zone_;
+ unsigned flags_;
+ ScriptCompiler::ExternalSourceStream* source_stream_;
+ ScriptCompiler::StreamedSource::Encoding source_stream_encoding_;
+ Utf16CharacterStream* character_stream_;
+ v8::Extension* extension_;
+ ScriptCompiler::CompileOptions compile_options_;
+ DeclarationScope* script_scope_;
+ UnicodeCache* unicode_cache_;
+ uintptr_t stack_limit_;
+ uint32_t hash_seed_;
+ int compiler_hints_;
+ int start_position_;
+ int end_position_;
+
+ // TODO(titzer): Move handles and isolate out of ParseInfo.
+ Isolate* isolate_;
+ Handle<SharedFunctionInfo> shared_;
+ Handle<Script> script_;
+ Handle<Context> context_;
+
+ //----------- Inputs+Outputs of parsing and scope analysis -----------------
+ ScriptData** cached_data_; // used if available, populated if requested.
+ AstValueFactory* ast_value_factory_; // used if available, otherwise new.
+ const AstRawString* function_name_;
+
+ //----------- Output of parsing and scope analysis ------------------------
+ FunctionLiteral* literal_;
+
+ void SetFlag(Flag f) { flags_ |= f; }
+ void SetFlag(Flag f, bool v) { flags_ = v ? flags_ | f : flags_ & ~f; }
+ bool GetFlag(Flag f) const { return (flags_ & f) != 0; }
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PARSING_PARSE_INFO_H_
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index dde6b1dd86..b8703d0691 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -7,7 +7,8 @@
#include "src/ast/scopes.h"
#include "src/bailout-reason.h"
-#include "src/hashmap.h"
+#include "src/base/hashmap.h"
+#include "src/globals.h"
#include "src/messages.h"
#include "src/parsing/expression-classifier.h"
#include "src/parsing/func-name-inferrer.h"
@@ -29,17 +30,143 @@ enum AllowLabelledFunctionStatement {
kDisallowLabelledFunctionStatement,
};
+enum class ParseFunctionFlags {
+ kIsNormal = 0,
+ kIsGenerator = 1,
+ kIsAsync = 2,
+ kIsDefault = 4
+};
+
+static inline ParseFunctionFlags operator|(ParseFunctionFlags lhs,
+ ParseFunctionFlags rhs) {
+ typedef unsigned char T;
+ return static_cast<ParseFunctionFlags>(static_cast<T>(lhs) |
+ static_cast<T>(rhs));
+}
+
+static inline ParseFunctionFlags& operator|=(ParseFunctionFlags& lhs,
+ const ParseFunctionFlags& rhs) {
+ lhs = lhs | rhs;
+ return lhs;
+}
+
+static inline bool operator&(ParseFunctionFlags bitfield,
+ ParseFunctionFlags mask) {
+ typedef unsigned char T;
+ return static_cast<T>(bitfield) & static_cast<T>(mask);
+}
+
+enum class MethodKind {
+ kNormal = 0,
+ kStatic = 1 << 0,
+ kGenerator = 1 << 1,
+ kStaticGenerator = kStatic | kGenerator,
+ kAsync = 1 << 2,
+ kStaticAsync = kStatic | kAsync,
+
+ /* Any non-ordinary method kinds */
+ kSpecialMask = kGenerator | kAsync
+};
+
+inline bool IsValidMethodKind(MethodKind kind) {
+ return kind == MethodKind::kNormal || kind == MethodKind::kStatic ||
+ kind == MethodKind::kGenerator ||
+ kind == MethodKind::kStaticGenerator || kind == MethodKind::kAsync ||
+ kind == MethodKind::kStaticAsync;
+}
+
+static inline MethodKind operator|(MethodKind lhs, MethodKind rhs) {
+ typedef unsigned char T;
+ return static_cast<MethodKind>(static_cast<T>(lhs) | static_cast<T>(rhs));
+}
+
+static inline MethodKind& operator|=(MethodKind& lhs, const MethodKind& rhs) {
+ lhs = lhs | rhs;
+ DCHECK(IsValidMethodKind(lhs));
+ return lhs;
+}
+
+static inline bool operator&(MethodKind bitfield, MethodKind mask) {
+ typedef unsigned char T;
+ return static_cast<T>(bitfield) & static_cast<T>(mask);
+}
+
+inline bool IsNormalMethod(MethodKind kind) {
+ return kind == MethodKind::kNormal;
+}
+
+inline bool IsSpecialMethod(MethodKind kind) {
+ return kind & MethodKind::kSpecialMask;
+}
+
+inline bool IsStaticMethod(MethodKind kind) {
+ return kind & MethodKind::kStatic;
+}
+
+inline bool IsGeneratorMethod(MethodKind kind) {
+ return kind & MethodKind::kGenerator;
+}
+
+inline bool IsAsyncMethod(MethodKind kind) { return kind & MethodKind::kAsync; }
+
struct FormalParametersBase {
- explicit FormalParametersBase(Scope* scope) : scope(scope) {}
- Scope* scope;
+ explicit FormalParametersBase(DeclarationScope* scope) : scope(scope) {}
+ DeclarationScope* scope;
bool has_rest = false;
bool is_simple = true;
int materialized_literals_count = 0;
};
-// Common base class shared between parser and pre-parser. Traits encapsulate
-// the differences between Parser and PreParser:
+// ----------------------------------------------------------------------------
+// The CHECK_OK macro is a convenient macro to enforce error
+// handling for functions that may fail (by returning !*ok).
+//
+// CAUTION: This macro appends extra statements after a call,
+// thus it must never be used where only a single statement
+// is correct (e.g. an if statement branch w/o braces)!
+
+#define CHECK_OK_CUSTOM(x) ok); \
+ if (!*ok) return this->x(); \
+ ((void)0
+#define DUMMY ) // to make indentation work
+#undef DUMMY
+
+// Used in functions where the return type is ExpressionT.
+#define CHECK_OK CHECK_OK_CUSTOM(EmptyExpression)
+
+// Common base class template shared between parser and pre-parser.
+// The Impl parameter is the actual class of the parser/pre-parser,
+// following the Curiously Recurring Template Pattern (CRTP).
+// The structure of the parser objects is roughly the following:
+//
+// // Common denominator, needed to avoid cyclic dependency.
+// // Instances of this template will end up with very minimal
+// // definitions, ideally containing just typedefs.
+// template <typename Impl>
+// class ParserBaseTraits;
+
+// // The parser base object, which should just implement pure
+// // parser behavior. The Impl parameter is the actual derived
+// // class (according to CRTP), which implements impure parser
+// // behavior.
+// template <typename Impl>
+// class ParserBase : public ParserBaseTraits<Impl> { ... };
+//
+// // And then, for each parser variant (e.g., parser, preparser, etc):
+// class Parser;
+//
+// template <>
+// class ParserBaseTraits<Parser> { ... };
+//
+// class Parser : public ParserBase<Parser> { ... };
+//
+// TODO(nikolaos): Currently the traits objects contain many things
+// that will be moved to the implementation objects or to the parser
+// base. The following comments will have to change, when this happens.
+
+// The traits class template encapsulates the differences between
+// parser/pre-parser implementations. In particular:
// - Return types: For example, Parser functions return Expression* and
// PreParser functions return PreParserExpression.
@@ -57,33 +184,40 @@ struct FormalParametersBase {
// pretenured, and PreParser doesn't care.
// The traits are expected to contain the following typedefs:
-// struct Traits {
+// template <>
+// class ParserBaseTraits<Impl> {
// // In particular...
// struct Type {
-// // Used by FunctionState and BlockState.
-// typedef Scope;
// typedef GeneratorVariable;
+// typedef AstProperties;
+// typedef ExpressionClassifier;
// // Return types for traversing functions.
// typedef Identifier;
// typedef Expression;
+// typedef YieldExpression;
// typedef FunctionLiteral;
// typedef ClassLiteral;
-// typedef ObjectLiteralProperty;
// typedef Literal;
+// typedef ObjectLiteralProperty;
// typedef ExpressionList;
// typedef PropertyList;
// typedef FormalParameter;
// typedef FormalParameters;
+// typedef StatementList;
// // For constructing objects returned by the traversing functions.
// typedef Factory;
// };
// // ...
// };
-template <typename Traits>
-class ParserBase : public Traits {
+template <typename Impl>
+class ParserBaseTraits;
+
+template <typename Impl>
+class ParserBase : public ParserBaseTraits<Impl> {
public:
// Shorten type names defined by Traits.
+ typedef ParserBaseTraits<Impl> Traits;
typedef typename Traits::Type::Expression ExpressionT;
typedef typename Traits::Type::Identifier IdentifierT;
typedef typename Traits::Type::FormalParameter FormalParameterT;
@@ -94,18 +228,22 @@ class ParserBase : public Traits {
typedef typename Traits::Type::StatementList StatementListT;
typedef typename Traits::Type::ExpressionClassifier ExpressionClassifier;
+ // All implementation-specific methods must be called through this.
+ Impl* impl() { return static_cast<Impl*>(this); }
+ const Impl* impl() const { return static_cast<const Impl*>(this); }
+
ParserBase(Zone* zone, Scanner* scanner, uintptr_t stack_limit,
v8::Extension* extension, AstValueFactory* ast_value_factory,
- ParserRecorder* log, typename Traits::Type::Parser this_object)
- : Traits(this_object),
- parenthesized_function_(false),
- scope_(NULL),
- function_state_(NULL),
+ ParserRecorder* log)
+ : scope_state_(nullptr),
+ function_state_(nullptr),
extension_(extension),
- fni_(NULL),
+ fni_(nullptr),
ast_value_factory_(ast_value_factory),
+ ast_node_factory_(ast_value_factory),
log_(log),
mode_(PARSE_EAGERLY), // Lazy mode must be set explicitly.
+ parsing_module_(false),
stack_limit_(stack_limit),
zone_(zone),
scanner_(scanner),
@@ -113,41 +251,35 @@ class ParserBase : public Traits {
allow_lazy_(false),
allow_natives_(false),
allow_tailcalls_(false),
- allow_harmony_sloppy_(false),
- allow_harmony_sloppy_function_(false),
- allow_harmony_sloppy_let_(false),
allow_harmony_restrictive_declarations_(false),
allow_harmony_do_expressions_(false),
- allow_harmony_function_name_(false),
- allow_harmony_function_sent_(false) {}
+ allow_harmony_for_in_(false),
+ allow_harmony_function_sent_(false),
+ allow_harmony_async_await_(false),
+ allow_harmony_restrictive_generators_(false),
+ allow_harmony_trailing_commas_(false) {}
#define ALLOW_ACCESSORS(name) \
bool allow_##name() const { return allow_##name##_; } \
void set_allow_##name(bool allow) { allow_##name##_ = allow; }
-#define SCANNER_ACCESSORS(name) \
- bool allow_##name() const { return scanner_->allow_##name(); } \
- void set_allow_##name(bool allow) { \
- return scanner_->set_allow_##name(allow); \
- }
-
ALLOW_ACCESSORS(lazy);
ALLOW_ACCESSORS(natives);
ALLOW_ACCESSORS(tailcalls);
- ALLOW_ACCESSORS(harmony_sloppy);
- ALLOW_ACCESSORS(harmony_sloppy_function);
- ALLOW_ACCESSORS(harmony_sloppy_let);
ALLOW_ACCESSORS(harmony_restrictive_declarations);
ALLOW_ACCESSORS(harmony_do_expressions);
- ALLOW_ACCESSORS(harmony_function_name);
+ ALLOW_ACCESSORS(harmony_for_in);
ALLOW_ACCESSORS(harmony_function_sent);
- SCANNER_ACCESSORS(harmony_exponentiation_operator);
+ ALLOW_ACCESSORS(harmony_async_await);
+ ALLOW_ACCESSORS(harmony_restrictive_generators);
+ ALLOW_ACCESSORS(harmony_trailing_commas);
-#undef SCANNER_ACCESSORS
#undef ALLOW_ACCESSORS
uintptr_t stack_limit() const { return stack_limit_; }
+ void set_stack_limit(uintptr_t stack_limit) { stack_limit_ = stack_limit; }
+
protected:
enum AllowRestrictedIdentifiers {
kAllowRestrictedIdentifiers,
@@ -169,21 +301,58 @@ class ParserBase : public Traits {
class ObjectLiteralCheckerBase;
// ---------------------------------------------------------------------------
- // FunctionState and BlockState together implement the parser's scope stack.
- // The parser's current scope is in scope_. BlockState and FunctionState
- // constructors push on the scope stack and the destructors pop. They are also
- // used to hold the parser's per-function and per-block state.
- class BlockState BASE_EMBEDDED {
+ // ScopeState and its subclasses implement the parser's scope stack.
+ // ScopeState keeps track of the current scope, and the outer ScopeState. The
+ // parser's scope_state_ points to the top ScopeState. ScopeState's
+ // constructor push on the scope stack and the destructors pop. BlockState and
+ // FunctionState are used to hold additional per-block and per-function state.
+ class ScopeState BASE_EMBEDDED {
public:
- BlockState(Scope** scope_stack, Scope* scope)
- : scope_stack_(scope_stack), outer_scope_(*scope_stack) {
- *scope_stack_ = scope;
+ V8_INLINE Scope* scope() const { return scope_; }
+ Zone* zone() const { return scope_->zone(); }
+
+ protected:
+ ScopeState(ScopeState** scope_stack, Scope* scope)
+ : scope_stack_(scope_stack), outer_scope_(*scope_stack), scope_(scope) {
+ *scope_stack = this;
}
- ~BlockState() { *scope_stack_ = outer_scope_; }
+ ~ScopeState() { *scope_stack_ = outer_scope_; }
private:
- Scope** scope_stack_;
- Scope* outer_scope_;
+ ScopeState** const scope_stack_;
+ ScopeState* const outer_scope_;
+ Scope* const scope_;
+ };
+
+ class BlockState final : public ScopeState {
+ public:
+ BlockState(ScopeState** scope_stack, Scope* scope)
+ : ScopeState(scope_stack, scope) {}
+
+ // BlockState(ScopeState**) automatically manages Scope(BLOCK_SCOPE)
+ // allocation.
+ // TODO(verwaest): Move to LazyBlockState class that only allocates the
+ // scope when needed.
+ explicit BlockState(ScopeState** scope_stack)
+ : ScopeState(scope_stack, NewScope(*scope_stack)) {}
+
+ void SetNonlinear() { this->scope()->SetNonlinear(); }
+ void set_start_position(int pos) { this->scope()->set_start_position(pos); }
+ void set_end_position(int pos) { this->scope()->set_end_position(pos); }
+ void set_is_hidden() { this->scope()->set_is_hidden(); }
+ Scope* FinalizedBlockScope() const {
+ return this->scope()->FinalizeBlockScope();
+ }
+ LanguageMode language_mode() const {
+ return this->scope()->language_mode();
+ }
+
+ private:
+ Scope* NewScope(ScopeState* outer_state) {
+ Scope* parent = outer_state->scope();
+ Zone* zone = outer_state->zone();
+ return new (zone) Scope(zone, parent, BLOCK_SCOPE);
+ }
};
struct DestructuringAssignment {
@@ -195,11 +364,68 @@ class ParserBase : public Traits {
Scope* scope;
};
- class FunctionState BASE_EMBEDDED {
+ class TailCallExpressionList {
public:
- FunctionState(FunctionState** function_state_stack, Scope** scope_stack,
- Scope* scope, FunctionKind kind,
- typename Traits::Type::Factory* factory);
+ explicit TailCallExpressionList(Zone* zone)
+ : zone_(zone), expressions_(0, zone), has_explicit_tail_calls_(false) {}
+
+ const ZoneList<ExpressionT>& expressions() const { return expressions_; }
+ const Scanner::Location& location() const { return loc_; }
+
+ bool has_explicit_tail_calls() const { return has_explicit_tail_calls_; }
+
+ void Swap(TailCallExpressionList& other) {
+ expressions_.Swap(&other.expressions_);
+ std::swap(loc_, other.loc_);
+ std::swap(has_explicit_tail_calls_, other.has_explicit_tail_calls_);
+ }
+
+ void AddImplicitTailCall(ExpressionT expr) {
+ expressions_.Add(expr, zone_);
+ }
+
+ void AddExplicitTailCall(ExpressionT expr, const Scanner::Location& loc) {
+ if (!has_explicit_tail_calls()) {
+ loc_ = loc;
+ has_explicit_tail_calls_ = true;
+ }
+ expressions_.Add(expr, zone_);
+ }
+
+ void Append(const TailCallExpressionList& other) {
+ if (!has_explicit_tail_calls()) {
+ loc_ = other.loc_;
+ has_explicit_tail_calls_ = other.has_explicit_tail_calls_;
+ }
+ expressions_.AddAll(other.expressions_, zone_);
+ }
+
+ private:
+ Zone* zone_;
+ ZoneList<ExpressionT> expressions_;
+ Scanner::Location loc_;
+ bool has_explicit_tail_calls_;
+ };
+
+ // Defines whether tail call expressions are allowed or not.
+ enum class ReturnExprContext {
+ // We are inside return statement which is allowed to contain tail call
+ // expressions. Tail call expressions are allowed.
+ kInsideValidReturnStatement,
+
+ // We are inside a block in which tail call expressions are allowed but
+ // not yet inside a return statement.
+ kInsideValidBlock,
+
+ // Tail call expressions are not allowed in the following blocks.
+ kInsideTryBlock,
+ kInsideForInOfBody,
+ };
+
+ class FunctionState final : public ScopeState {
+ public:
+ FunctionState(FunctionState** function_state_stack,
+ ScopeState** scope_stack, Scope* scope, FunctionKind kind);
~FunctionState();
int NextMaterializedLiteralIndex() {
@@ -216,20 +442,9 @@ class ParserBase : public Traits {
void AddProperty() { expected_property_count_++; }
int expected_property_count() { return expected_property_count_; }
- Scanner::Location this_location() const { return this_location_; }
- Scanner::Location super_location() const { return super_location_; }
- Scanner::Location return_location() const { return return_location_; }
- void set_this_location(Scanner::Location location) {
- this_location_ = location;
- }
- void set_super_location(Scanner::Location location) {
- super_location_ = location;
- }
- void set_return_location(Scanner::Location location) {
- return_location_ = location;
- }
-
bool is_generator() const { return IsGeneratorFunction(kind_); }
+ bool is_async_function() const { return IsAsyncFunction(kind_); }
+ bool is_resumable() const { return is_generator() || is_async_function(); }
FunctionKind kind() const { return kind_; }
FunctionState* outer() const { return outer_function_state_; }
@@ -237,7 +452,7 @@ class ParserBase : public Traits {
void set_generator_object_variable(
typename Traits::Type::GeneratorVariable* variable) {
DCHECK(variable != NULL);
- DCHECK(is_generator());
+ DCHECK(is_resumable());
generator_object_variable_ = variable;
}
typename Traits::Type::GeneratorVariable* generator_object_variable()
@@ -245,42 +460,66 @@ class ParserBase : public Traits {
return generator_object_variable_;
}
- typename Traits::Type::Factory* factory() { return factory_; }
-
- const List<DestructuringAssignment>& destructuring_assignments_to_rewrite()
- const {
+ const ZoneList<DestructuringAssignment>&
+ destructuring_assignments_to_rewrite() const {
return destructuring_assignments_to_rewrite_;
}
- List<ExpressionT>& expressions_in_tail_position() {
- return expressions_in_tail_position_;
+ TailCallExpressionList& tail_call_expressions() {
+ return tail_call_expressions_;
+ }
+ void AddImplicitTailCallExpression(ExpressionT expression) {
+ if (return_expr_context() ==
+ ReturnExprContext::kInsideValidReturnStatement) {
+ tail_call_expressions_.AddImplicitTailCall(expression);
+ }
}
- void AddExpressionInTailPosition(ExpressionT expression) {
- if (collect_expressions_in_tail_position_) {
- expressions_in_tail_position_.Add(expression);
+ void AddExplicitTailCallExpression(ExpressionT expression,
+ const Scanner::Location& loc) {
+ DCHECK(expression->IsCall());
+ if (return_expr_context() ==
+ ReturnExprContext::kInsideValidReturnStatement) {
+ tail_call_expressions_.AddExplicitTailCall(expression, loc);
}
}
- bool collect_expressions_in_tail_position() const {
- return collect_expressions_in_tail_position_;
+ ZoneList<typename ExpressionClassifier::Error>* GetReportedErrorList() {
+ return &reported_errors_;
+ }
+
+ ReturnExprContext return_expr_context() const {
+ return return_expr_context_;
}
- void set_collect_expressions_in_tail_position(bool collect) {
- collect_expressions_in_tail_position_ = collect;
+ void set_return_expr_context(ReturnExprContext context) {
+ return_expr_context_ = context;
}
ZoneList<ExpressionT>* non_patterns_to_rewrite() {
return &non_patterns_to_rewrite_;
}
+ bool next_function_is_parenthesized() const {
+ return next_function_is_parenthesized_;
+ }
+
+ void set_next_function_is_parenthesized(bool parenthesized) {
+ next_function_is_parenthesized_ = parenthesized;
+ }
+
+ bool this_function_is_parenthesized() const {
+ return this_function_is_parenthesized_;
+ }
+
private:
void AddDestructuringAssignment(DestructuringAssignment pair) {
- destructuring_assignments_to_rewrite_.Add(pair);
+ destructuring_assignments_to_rewrite_.Add(pair, this->zone());
}
- V8_INLINE Scope* scope() { return *scope_stack_; }
-
- void AddNonPatternForRewriting(ExpressionT expr) {
- non_patterns_to_rewrite_.Add(expr, (*scope_stack_)->zone());
+ void AddNonPatternForRewriting(ExpressionT expr, bool* ok) {
+ non_patterns_to_rewrite_.Add(expr, this->zone());
+ if (non_patterns_to_rewrite_.length() >=
+ std::numeric_limits<uint16_t>::max())
+ *ok = false;
}
// Used to assign an index to each literal that needs materialization in
@@ -291,15 +530,6 @@ class ParserBase : public Traits {
// Properties count estimation.
int expected_property_count_;
- // Location of most recent use of 'this' (invalid if none).
- Scanner::Location this_location_;
-
- // Location of most recent 'return' statement (invalid if none).
- Scanner::Location return_location_;
-
- // Location of call to the "super" constructor (invalid if none).
- Scanner::Location super_location_;
-
FunctionKind kind_;
// For generators, this variable may hold the generator object. It variable
// is used by yield expressions and return statements. It is not necessary
@@ -308,21 +538,68 @@ class ParserBase : public Traits {
FunctionState** function_state_stack_;
FunctionState* outer_function_state_;
- Scope** scope_stack_;
- Scope* outer_scope_;
- List<DestructuringAssignment> destructuring_assignments_to_rewrite_;
- List<ExpressionT> expressions_in_tail_position_;
- bool collect_expressions_in_tail_position_;
+ ZoneList<DestructuringAssignment> destructuring_assignments_to_rewrite_;
+ TailCallExpressionList tail_call_expressions_;
+ ReturnExprContext return_expr_context_;
ZoneList<ExpressionT> non_patterns_to_rewrite_;
- typename Traits::Type::Factory* factory_;
+ ZoneList<typename ExpressionClassifier::Error> reported_errors_;
+
+ // If true, the next (and immediately following) function literal is
+ // preceded by a parenthesis.
+ bool next_function_is_parenthesized_;
- friend class ParserTraits;
- friend class PreParserTraits;
+ // The value of the parents' next_function_is_parenthesized_, as it applies
+ // to this function. Filled in by constructor.
+ bool this_function_is_parenthesized_;
+
+ friend Impl;
friend class Checkpoint;
};
+ // This scope sets current ReturnExprContext to given value.
+ class ReturnExprScope {
+ public:
+ explicit ReturnExprScope(FunctionState* function_state,
+ ReturnExprContext return_expr_context)
+ : function_state_(function_state),
+ sav_return_expr_context_(function_state->return_expr_context()) {
+ // Don't update context if we are requested to enable tail call
+ // expressions but current block does not allow them.
+ if (return_expr_context !=
+ ReturnExprContext::kInsideValidReturnStatement ||
+ sav_return_expr_context_ == ReturnExprContext::kInsideValidBlock) {
+ function_state->set_return_expr_context(return_expr_context);
+ }
+ }
+ ~ReturnExprScope() {
+ function_state_->set_return_expr_context(sav_return_expr_context_);
+ }
+
+ private:
+ FunctionState* function_state_;
+ ReturnExprContext sav_return_expr_context_;
+ };
+
+ // Collects all return expressions at tail call position in this scope
+ // to a separate list.
+ class CollectExpressionsInTailPositionToListScope {
+ public:
+ CollectExpressionsInTailPositionToListScope(FunctionState* function_state,
+ TailCallExpressionList* list)
+ : function_state_(function_state), list_(list) {
+ function_state->tail_call_expressions().Swap(*list_);
+ }
+ ~CollectExpressionsInTailPositionToListScope() {
+ function_state_->tail_call_expressions().Swap(*list_);
+ }
+
+ private:
+ FunctionState* function_state_;
+ TailCallExpressionList* list_;
+ };
+
// Annoyingly, arrow functions first parse as comma expressions, then when we
// see the => we have to go back and reinterpret the arguments as being formal
// parameters. To do so we need to reset some of the parser state back to
@@ -367,24 +644,55 @@ class ParserBase : public Traits {
Mode old_mode_;
};
- Scope* NewScope(Scope* parent, ScopeType scope_type) {
- // Must always pass the function kind for FUNCTION_SCOPE.
- DCHECK(scope_type != FUNCTION_SCOPE);
- return NewScope(parent, scope_type, kNormalFunction);
+ DeclarationScope* NewScriptScope() const {
+ return new (zone()) DeclarationScope(zone());
+ }
+
+ DeclarationScope* NewVarblockScope() const {
+ return new (zone()) DeclarationScope(zone(), scope(), BLOCK_SCOPE);
+ }
+
+ ModuleScope* NewModuleScope(DeclarationScope* parent) const {
+ return new (zone()) ModuleScope(zone(), parent, ast_value_factory());
+ }
+
+ DeclarationScope* NewEvalScope(Scope* parent) const {
+ return new (zone()) DeclarationScope(zone(), parent, EVAL_SCOPE);
+ }
+
+ Scope* NewScope(ScopeType scope_type) const {
+ return NewScopeWithParent(scope(), scope_type);
+ }
+
+ // This constructor should only be used when absolutely necessary. Most scopes
+ // should automatically use scope() as parent, and be fine with
+ // NewScope(ScopeType) above.
+ Scope* NewScopeWithParent(Scope* parent, ScopeType scope_type) const {
+ // Must always use the specific constructors for the blacklisted scope
+ // types.
+ DCHECK_NE(FUNCTION_SCOPE, scope_type);
+ DCHECK_NE(SCRIPT_SCOPE, scope_type);
+ DCHECK_NE(MODULE_SCOPE, scope_type);
+ DCHECK_NOT_NULL(parent);
+ return new (zone()) Scope(zone(), parent, scope_type);
}
- Scope* NewScope(Scope* parent, ScopeType scope_type, FunctionKind kind) {
+ DeclarationScope* NewFunctionScope(FunctionKind kind) const {
DCHECK(ast_value_factory());
- Scope* result = new (zone())
- Scope(zone(), parent, scope_type, ast_value_factory(), kind);
- result->Initialize();
+ DeclarationScope* result =
+ new (zone()) DeclarationScope(zone(), scope(), FUNCTION_SCOPE, kind);
+ // TODO(verwaest): Move into the DeclarationScope constructor.
+ if (!IsArrowFunction(kind)) {
+ result->DeclareThis(ast_value_factory());
+ result->DeclareDefaultFunctionVariables(ast_value_factory());
+ }
return result;
}
Scanner* scanner() const { return scanner_; }
AstValueFactory* ast_value_factory() const { return ast_value_factory_; }
- int position() { return scanner_->location().beg_pos; }
- int peek_position() { return scanner_->peek_location().beg_pos; }
+ int position() const { return scanner_->location().beg_pos; }
+ int peek_position() const { return scanner_->peek_location().beg_pos; }
bool stack_overflow() const { return stack_overflow_; }
void set_stack_overflow() { stack_overflow_ = true; }
Mode mode() const { return mode_; }
@@ -453,12 +761,16 @@ class ParserBase : public Traits {
Expect(Token::SEMICOLON, ok);
}
- bool peek_any_identifier() {
- Token::Value next = peek();
- return next == Token::IDENTIFIER || next == Token::FUTURE_RESERVED_WORD ||
- next == Token::FUTURE_STRICT_RESERVED_WORD || next == Token::LET ||
- next == Token::STATIC || next == Token::YIELD;
+ // A dummy function, just useful as an argument to CHECK_OK_CUSTOM.
+ static void Void() {}
+
+ bool is_any_identifier(Token::Value token) {
+ return token == Token::IDENTIFIER || token == Token::ENUM ||
+ token == Token::AWAIT || token == Token::ASYNC ||
+ token == Token::FUTURE_STRICT_RESERVED_WORD || token == Token::LET ||
+ token == Token::STATIC || token == Token::YIELD;
}
+ bool peek_any_identifier() { return is_any_identifier(peek()); }
bool CheckContextualKeyword(Vector<const char> keyword) {
if (PeekContextualKeyword(keyword)) {
@@ -477,8 +789,7 @@ class ParserBase : public Traits {
const char* full_name, int pos, bool* ok);
void ExpectContextualKeyword(Vector<const char> keyword, bool* ok) {
- Expect(Token::IDENTIFIER, ok);
- if (!*ok) return;
+ Expect(Token::IDENTIFIER, CHECK_OK_CUSTOM(Void));
if (!scanner()->is_literal_contextual_keyword(keyword)) {
ReportUnexpectedToken(scanner()->current_token());
*ok = false;
@@ -512,6 +823,18 @@ class ParserBase : public Traits {
*ok = false;
}
}
+ // for now, this check just collects statistics.
+ void CheckDecimalLiteralWithLeadingZero(int* use_counts, int beg_pos,
+ int end_pos) {
+ Scanner::Location token_location =
+ scanner()->decimal_with_leading_zero_position();
+ if (token_location.IsValid() && beg_pos <= token_location.beg_pos &&
+ token_location.end_pos <= end_pos) {
+ scanner()->clear_decimal_with_leading_zero_position();
+ if (use_counts != nullptr)
+ ++use_counts[v8::Isolate::kDecimalWithLeadingZeroInStrictMode];
+ }
+ }
inline void CheckStrictOctalLiteral(int beg_pos, int end_pos, bool* ok) {
CheckOctalLiteral(beg_pos, end_pos, MessageTemplate::kStrictOctalLiteral,
@@ -557,20 +880,17 @@ class ParserBase : public Traits {
return Token::Precedence(token);
}
- typename Traits::Type::Factory* factory() {
- return function_state_->factory();
- }
+ typename Traits::Type::Factory* factory() { return &ast_node_factory_; }
- LanguageMode language_mode() { return scope_->language_mode(); }
- bool is_generator() const { return function_state_->is_generator(); }
-
- bool allow_const() {
- return is_strict(language_mode()) || allow_harmony_sloppy();
+ DeclarationScope* GetReceiverScope() const {
+ return scope()->GetReceiverScope();
}
-
- bool allow_let() {
- return is_strict(language_mode()) || allow_harmony_sloppy_let();
+ LanguageMode language_mode() { return scope()->language_mode(); }
+ bool is_generator() const { return function_state_->is_generator(); }
+ bool is_async_function() const {
+ return function_state_->is_async_function();
}
+ bool is_resumable() const { return function_state_->is_resumable(); }
// Report syntax errors.
void ReportMessage(MessageTemplate::Template message, const char* arg = NULL,
@@ -579,11 +899,31 @@ class ParserBase : public Traits {
Traits::ReportMessageAt(source_location, message, arg, error_type);
}
+ void ReportMessage(MessageTemplate::Template message, const AstRawString* arg,
+ ParseErrorType error_type = kSyntaxError) {
+ Scanner::Location source_location = scanner()->location();
+ Traits::ReportMessageAt(source_location, message, arg, error_type);
+ }
+
+ void ReportMessageAt(Scanner::Location location,
+ MessageTemplate::Template message,
+ const char* arg = NULL,
+ ParseErrorType error_type = kSyntaxError) {
+ Traits::ReportMessageAt(location, message, arg, error_type);
+ }
+
void ReportMessageAt(Scanner::Location location,
MessageTemplate::Template message,
+ const AstRawString* arg,
ParseErrorType error_type = kSyntaxError) {
- Traits::ReportMessageAt(location, message, reinterpret_cast<const char*>(0),
- error_type);
+ Traits::ReportMessageAt(location, message, arg, error_type);
+ }
+
+ void ReportMessageAt(Scanner::Location location,
+ MessageTemplate::Template message,
+ ParseErrorType error_type) {
+ ReportMessageAt(location, message, static_cast<const char*>(nullptr),
+ error_type);
}
void GetUnexpectedTokenMessage(
@@ -604,12 +944,12 @@ class ParserBase : public Traits {
void ValidateExpression(const ExpressionClassifier* classifier, bool* ok) {
if (!classifier->is_valid_expression() ||
- classifier->has_cover_initialized_name()) {
+ classifier->has_object_literal_error()) {
const Scanner::Location& a = classifier->expression_error().location;
const Scanner::Location& b =
- classifier->cover_initialized_name_error().location;
+ classifier->object_literal_error().location;
if (a.beg_pos < 0 || (b.beg_pos >= 0 && a.beg_pos > b.beg_pos)) {
- ReportClassifierError(classifier->cover_initialized_name_error());
+ ReportClassifierError(classifier->object_literal_error());
} else {
ReportClassifierError(classifier->expression_error());
}
@@ -655,9 +995,14 @@ class ParserBase : public Traits {
}
}
+ bool IsValidArrowFormalParametersStart(Token::Value token) {
+ return is_any_identifier(token) || token == Token::LPAREN;
+ }
+
void ValidateArrowFormalParameters(const ExpressionClassifier* classifier,
ExpressionT expr,
- bool parenthesized_formals, bool* ok) {
+ bool parenthesized_formals, bool is_async,
+ bool* ok) {
if (classifier->is_valid_binding_pattern()) {
// A simple arrow formal parameter: IDENTIFIER => BODY.
if (!this->IsIdentifier(expr)) {
@@ -677,6 +1022,12 @@ class ParserBase : public Traits {
ReportClassifierError(error);
*ok = false;
}
+ if (is_async && !classifier->is_valid_async_arrow_formal_parameters()) {
+ const typename ExpressionClassifier::Error& error =
+ classifier->async_arrow_formal_parameters_error();
+ ReportClassifierError(error);
+ *ok = false;
+ }
}
void ValidateLetPattern(const ExpressionClassifier* classifier, bool* ok) {
@@ -686,6 +1037,15 @@ class ParserBase : public Traits {
}
}
+ void CheckNoTailCallExpressions(const ExpressionClassifier* classifier,
+ bool* ok) {
+ if (FLAG_harmony_explicit_tailcalls &&
+ classifier->has_tail_call_expression()) {
+ ReportClassifierError(classifier->tail_call_expression_error());
+ *ok = false;
+ }
+ }
+
void ExpressionUnexpectedToken(ExpressionClassifier* classifier) {
MessageTemplate::Template message = MessageTemplate::kUnexpectedToken;
const char* arg;
@@ -721,27 +1081,30 @@ class ParserBase : public Traits {
IdentifierT ParseAndClassifyIdentifier(ExpressionClassifier* classifier,
bool* ok);
// Parses an identifier or a strict mode future reserved word, and indicate
- // whether it is strict mode future reserved. Allows passing in is_generator
+ // whether it is strict mode future reserved. Allows passing in function_kind
// for the case of parsing the identifier in a function expression, where the
- // relevant "is_generator" bit is of the function being parsed, not the
- // containing
- // function.
- IdentifierT ParseIdentifierOrStrictReservedWord(bool is_generator,
+ // relevant "function_kind" bit is of the function being parsed, not the
+ // containing function.
+ IdentifierT ParseIdentifierOrStrictReservedWord(FunctionKind function_kind,
bool* is_strict_reserved,
bool* ok);
IdentifierT ParseIdentifierOrStrictReservedWord(bool* is_strict_reserved,
bool* ok) {
- return ParseIdentifierOrStrictReservedWord(this->is_generator(),
+ return ParseIdentifierOrStrictReservedWord(function_state_->kind(),
is_strict_reserved, ok);
}
IdentifierT ParseIdentifierName(bool* ok);
- ExpressionT ParseRegExpLiteral(bool seen_equal,
- ExpressionClassifier* classifier, bool* ok);
+ ExpressionT ParseRegExpLiteral(bool* ok);
ExpressionT ParsePrimaryExpression(ExpressionClassifier* classifier,
- bool* ok);
+ bool* is_async, bool* ok);
+ ExpressionT ParsePrimaryExpression(ExpressionClassifier* classifier,
+ bool* ok) {
+ bool is_async;
+ return ParsePrimaryExpression(classifier, &is_async, ok);
+ }
ExpressionT ParseExpression(bool accept_IN, bool* ok);
ExpressionT ParseExpression(bool accept_IN, ExpressionClassifier* classifier,
bool* ok);
@@ -752,16 +1115,24 @@ class ParserBase : public Traits {
ExpressionT ParseObjectLiteral(ExpressionClassifier* classifier, bool* ok);
ObjectLiteralPropertyT ParsePropertyDefinition(
ObjectLiteralCheckerBase* checker, bool in_class, bool has_extends,
- bool is_static, bool* is_computed_name, bool* has_seen_constructor,
+ MethodKind kind, bool* is_computed_name, bool* has_seen_constructor,
ExpressionClassifier* classifier, IdentifierT* name, bool* ok);
typename Traits::Type::ExpressionList ParseArguments(
+ Scanner::Location* first_spread_pos, bool maybe_arrow,
+ ExpressionClassifier* classifier, bool* ok);
+ typename Traits::Type::ExpressionList ParseArguments(
Scanner::Location* first_spread_pos, ExpressionClassifier* classifier,
- bool* ok);
+ bool* ok) {
+ return ParseArguments(first_spread_pos, false, classifier, ok);
+ }
ExpressionT ParseAssignmentExpression(bool accept_IN,
ExpressionClassifier* classifier,
bool* ok);
- ExpressionT ParseYieldExpression(ExpressionClassifier* classifier, bool* ok);
+ ExpressionT ParseYieldExpression(bool accept_IN,
+ ExpressionClassifier* classifier, bool* ok);
+ ExpressionT ParseTailCallExpression(ExpressionClassifier* classifier,
+ bool* ok);
ExpressionT ParseConditionalExpression(bool accept_IN,
ExpressionClassifier* classifier,
bool* ok);
@@ -773,19 +1144,20 @@ class ParserBase : public Traits {
ExpressionT ParseLeftHandSideExpression(ExpressionClassifier* classifier,
bool* ok);
ExpressionT ParseMemberWithNewPrefixesExpression(
- ExpressionClassifier* classifier, bool* ok);
- ExpressionT ParseMemberExpression(ExpressionClassifier* classifier, bool* ok);
+ ExpressionClassifier* classifier, bool* is_async, bool* ok);
+ ExpressionT ParseMemberExpression(ExpressionClassifier* classifier,
+ bool* is_async, bool* ok);
ExpressionT ParseMemberExpressionContinuation(
- ExpressionT expression, ExpressionClassifier* classifier, bool* ok);
+ ExpressionT expression, bool* is_async, ExpressionClassifier* classifier,
+ bool* ok);
ExpressionT ParseArrowFunctionLiteral(bool accept_IN,
const FormalParametersT& parameters,
+ bool is_async,
const ExpressionClassifier& classifier,
bool* ok);
ExpressionT ParseTemplateLiteral(ExpressionT tag, int start,
ExpressionClassifier* classifier, bool* ok);
- void AddTemplateExpression(ExpressionT);
- ExpressionT ParseSuperExpression(bool is_new,
- ExpressionClassifier* classifier, bool* ok);
+ ExpressionT ParseSuperExpression(bool is_new, bool* ok);
ExpressionT ParseNewTargetExpression(bool* ok);
void ParseFormalParameter(FormalParametersT* parameters,
@@ -797,6 +1169,7 @@ class ParserBase : public Traits {
int formals_end_pos, bool* ok);
bool IsNextLetKeyword();
+ bool IsTrivialExpression();
// Checks if the expression is a valid reference expression (e.g., on the
// left-hand side of assignments). Although ruled out by ECMA as early errors,
@@ -826,16 +1199,19 @@ class ParserBase : public Traits {
// Keep track of eval() calls since they disable all local variable
// optimizations. This checks if expression is an eval call, and if yes,
// forwards the information to scope.
- void CheckPossibleEvalCall(ExpressionT expression, Scope* scope) {
+ Call::PossiblyEval CheckPossibleEvalCall(ExpressionT expression,
+ Scope* scope) {
if (Traits::IsIdentifier(expression) &&
Traits::IsEval(Traits::AsIdentifier(expression))) {
scope->RecordEvalCall();
if (is_sloppy(scope->language_mode())) {
// For sloppy scopes we also have to record the call at function level,
// in case it includes declarations that will be hoisted.
- scope->DeclarationScope()->RecordEvalCall();
+ scope->GetDeclarationScope()->RecordEvalCall();
}
+ return Call::IS_POSSIBLY_EVAL;
}
+ return Call::NOT_EVAL;
}
// Used to validate property names in object literals and class literals
@@ -850,7 +1226,8 @@ class ParserBase : public Traits {
explicit ObjectLiteralCheckerBase(ParserBase* parser) : parser_(parser) {}
virtual void CheckProperty(Token::Value property, PropertyKind type,
- bool is_static, bool is_generator, bool* ok) = 0;
+ MethodKind method_type,
+ ExpressionClassifier* classifier, bool* ok) = 0;
virtual ~ObjectLiteralCheckerBase() {}
@@ -868,8 +1245,9 @@ class ParserBase : public Traits {
explicit ObjectLiteralChecker(ParserBase* parser)
: ObjectLiteralCheckerBase(parser), has_seen_proto_(false) {}
- void CheckProperty(Token::Value property, PropertyKind type, bool is_static,
- bool is_generator, bool* ok) override;
+ void CheckProperty(Token::Value property, PropertyKind type,
+ MethodKind method_type, ExpressionClassifier* classifier,
+ bool* ok) override;
private:
bool IsProto() { return this->scanner()->LiteralMatches("__proto__", 9); }
@@ -883,8 +1261,9 @@ class ParserBase : public Traits {
explicit ClassLiteralChecker(ParserBase* parser)
: ObjectLiteralCheckerBase(parser), has_seen_constructor_(false) {}
- void CheckProperty(Token::Value property, PropertyKind type, bool is_static,
- bool is_generator, bool* ok) override;
+ void CheckProperty(Token::Value property, PropertyKind type,
+ MethodKind method_type, ExpressionClassifier* classifier,
+ bool* ok) override;
private:
bool IsConstructor() {
@@ -897,19 +1276,20 @@ class ParserBase : public Traits {
bool has_seen_constructor_;
};
- // If true, the next (and immediately following) function literal is
- // preceded by a parenthesis.
- // Heuristically that means that the function will be called immediately,
- // so never lazily compile it.
- bool parenthesized_function_;
+ ModuleDescriptor* module() const {
+ return scope()->AsModuleScope()->module();
+ }
+ Scope* scope() const { return scope_state_->scope(); }
- Scope* scope_; // Scope stack.
+ ScopeState* scope_state_; // Scope stack.
FunctionState* function_state_; // Function state stack.
v8::Extension* extension_;
FuncNameInferrer* fni_;
AstValueFactory* ast_value_factory_; // Not owned.
+ typename Traits::Type::Factory ast_node_factory_;
ParserRecorder* log_;
Mode mode_;
+ bool parsing_module_;
uintptr_t stack_limit_;
private:
@@ -921,46 +1301,50 @@ class ParserBase : public Traits {
bool allow_lazy_;
bool allow_natives_;
bool allow_tailcalls_;
- bool allow_harmony_sloppy_;
- bool allow_harmony_sloppy_function_;
- bool allow_harmony_sloppy_let_;
bool allow_harmony_restrictive_declarations_;
bool allow_harmony_do_expressions_;
- bool allow_harmony_function_name_;
+ bool allow_harmony_for_in_;
bool allow_harmony_function_sent_;
+ bool allow_harmony_async_await_;
+ bool allow_harmony_restrictive_generators_;
+ bool allow_harmony_trailing_commas_;
+
+ friend class DiscardableZoneScope;
};
-template <class Traits>
-ParserBase<Traits>::FunctionState::FunctionState(
- FunctionState** function_state_stack, Scope** scope_stack, Scope* scope,
- FunctionKind kind, typename Traits::Type::Factory* factory)
- : next_materialized_literal_index_(0),
+template <typename Impl>
+ParserBase<Impl>::FunctionState::FunctionState(
+ FunctionState** function_state_stack, ScopeState** scope_stack,
+ Scope* scope, FunctionKind kind)
+ : ScopeState(scope_stack, scope),
+ next_materialized_literal_index_(0),
expected_property_count_(0),
- this_location_(Scanner::Location::invalid()),
- return_location_(Scanner::Location::invalid()),
- super_location_(Scanner::Location::invalid()),
kind_(kind),
generator_object_variable_(NULL),
function_state_stack_(function_state_stack),
outer_function_state_(*function_state_stack),
- scope_stack_(scope_stack),
- outer_scope_(*scope_stack),
- collect_expressions_in_tail_position_(true),
+ destructuring_assignments_to_rewrite_(16, scope->zone()),
+ tail_call_expressions_(scope->zone()),
+ return_expr_context_(ReturnExprContext::kInsideValidBlock),
non_patterns_to_rewrite_(0, scope->zone()),
- factory_(factory) {
- *scope_stack_ = scope;
+ reported_errors_(16, scope->zone()),
+ next_function_is_parenthesized_(false),
+ this_function_is_parenthesized_(false) {
*function_state_stack = this;
+ if (outer_function_state_) {
+ this_function_is_parenthesized_ =
+ outer_function_state_->next_function_is_parenthesized_;
+ outer_function_state_->next_function_is_parenthesized_ = false;
+ }
}
-
-template <class Traits>
-ParserBase<Traits>::FunctionState::~FunctionState() {
- *scope_stack_ = outer_scope_;
+template <typename Impl>
+ParserBase<Impl>::FunctionState::~FunctionState() {
*function_state_stack_ = outer_function_state_;
}
-template <class Traits>
-void ParserBase<Traits>::GetUnexpectedTokenMessage(
+template <typename Impl>
+void ParserBase<Impl>::GetUnexpectedTokenMessage(
Token::Value token, MessageTemplate::Template* message,
Scanner::Location* location, const char** arg,
MessageTemplate::Template default_) {
@@ -979,7 +1363,8 @@ void ParserBase<Traits>::GetUnexpectedTokenMessage(
case Token::IDENTIFIER:
*message = MessageTemplate::kUnexpectedTokenIdentifier;
break;
- case Token::FUTURE_RESERVED_WORD:
+ case Token::AWAIT:
+ case Token::ENUM:
*message = MessageTemplate::kUnexpectedReserved;
break;
case Token::LET:
@@ -1006,6 +1391,9 @@ void ParserBase<Traits>::GetUnexpectedTokenMessage(
*message = MessageTemplate::kInvalidOrUnexpectedToken;
}
break;
+ case Token::REGEXP_LITERAL:
+ *message = MessageTemplate::kUnexpectedTokenRegExp;
+ break;
default:
const char* name = Token::String(token);
DCHECK(name != NULL);
@@ -1014,15 +1402,13 @@ void ParserBase<Traits>::GetUnexpectedTokenMessage(
}
}
-
-template <class Traits>
-void ParserBase<Traits>::ReportUnexpectedToken(Token::Value token) {
+template <typename Impl>
+void ParserBase<Impl>::ReportUnexpectedToken(Token::Value token) {
return ReportUnexpectedTokenAt(scanner_->location(), token);
}
-
-template <class Traits>
-void ParserBase<Traits>::ReportUnexpectedTokenAt(
+template <typename Impl>
+void ParserBase<Impl>::ReportUnexpectedTokenAt(
Scanner::Location source_location, Token::Value token,
MessageTemplate::Template message) {
const char* arg;
@@ -1030,31 +1416,28 @@ void ParserBase<Traits>::ReportUnexpectedTokenAt(
Traits::ReportMessageAt(source_location, message, arg);
}
-
-template <class Traits>
-typename ParserBase<Traits>::IdentifierT ParserBase<Traits>::ParseIdentifier(
+template <typename Impl>
+typename ParserBase<Impl>::IdentifierT ParserBase<Impl>::ParseIdentifier(
AllowRestrictedIdentifiers allow_restricted_identifiers, bool* ok) {
ExpressionClassifier classifier(this);
- auto result = ParseAndClassifyIdentifier(&classifier, ok);
- if (!*ok) return Traits::EmptyIdentifier();
+ auto result =
+ ParseAndClassifyIdentifier(&classifier, CHECK_OK_CUSTOM(EmptyIdentifier));
if (allow_restricted_identifiers == kDontAllowRestrictedIdentifiers) {
- ValidateAssignmentPattern(&classifier, ok);
- if (!*ok) return Traits::EmptyIdentifier();
- ValidateBindingPattern(&classifier, ok);
- if (!*ok) return Traits::EmptyIdentifier();
+ ValidateAssignmentPattern(&classifier, CHECK_OK_CUSTOM(EmptyIdentifier));
+ ValidateBindingPattern(&classifier, CHECK_OK_CUSTOM(EmptyIdentifier));
}
return result;
}
-
-template <class Traits>
-typename ParserBase<Traits>::IdentifierT
-ParserBase<Traits>::ParseAndClassifyIdentifier(ExpressionClassifier* classifier,
- bool* ok) {
+template <typename Impl>
+typename ParserBase<Impl>::IdentifierT
+ParserBase<Impl>::ParseAndClassifyIdentifier(ExpressionClassifier* classifier,
+ bool* ok) {
Token::Value next = Next();
- if (next == Token::IDENTIFIER) {
+ if (next == Token::IDENTIFIER || next == Token::ASYNC ||
+ (next == Token::AWAIT && !parsing_module_ && !is_async_function())) {
IdentifierT name = this->GetSymbol(scanner());
// When this function is used to read a formal parameter, we don't always
// know whether the function is going to be strict or sloppy. Indeed for
@@ -1062,22 +1445,16 @@ ParserBase<Traits>::ParseAndClassifyIdentifier(ExpressionClassifier* classifier,
// is actually a formal parameter. Therefore besides the errors that we
// must detect because we know we're in strict mode, we also record any
// error that we might make in the future once we know the language mode.
- if (this->IsEval(name)) {
- classifier->RecordStrictModeFormalParameterError(
- scanner()->location(), MessageTemplate::kStrictEvalArguments);
- if (is_strict(language_mode())) {
- classifier->RecordBindingPatternError(
- scanner()->location(), MessageTemplate::kStrictEvalArguments);
- }
- }
- if (this->IsArguments(name)) {
- scope_->RecordArgumentsUsage();
+ if (this->IsEvalOrArguments(name)) {
classifier->RecordStrictModeFormalParameterError(
scanner()->location(), MessageTemplate::kStrictEvalArguments);
if (is_strict(language_mode())) {
classifier->RecordBindingPatternError(
scanner()->location(), MessageTemplate::kStrictEvalArguments);
}
+ } else if (next == Token::AWAIT) {
+ classifier->RecordAsyncArrowFormalParametersError(
+ scanner()->location(), MessageTemplate::kAwaitBindingIdentifier);
}
if (classifier->duplicate_finder() != nullptr &&
@@ -1112,16 +1489,18 @@ ParserBase<Traits>::ParseAndClassifyIdentifier(ExpressionClassifier* classifier,
}
}
-
-template <class Traits>
-typename ParserBase<Traits>::IdentifierT
-ParserBase<Traits>::ParseIdentifierOrStrictReservedWord(
- bool is_generator, bool* is_strict_reserved, bool* ok) {
+template <class Impl>
+typename ParserBase<Impl>::IdentifierT
+ParserBase<Impl>::ParseIdentifierOrStrictReservedWord(
+ FunctionKind function_kind, bool* is_strict_reserved, bool* ok) {
Token::Value next = Next();
- if (next == Token::IDENTIFIER) {
+ if (next == Token::IDENTIFIER || (next == Token::AWAIT && !parsing_module_ &&
+ !IsAsyncFunction(function_kind)) ||
+ next == Token::ASYNC) {
*is_strict_reserved = false;
} else if (next == Token::FUTURE_STRICT_RESERVED_WORD || next == Token::LET ||
- next == Token::STATIC || (next == Token::YIELD && !is_generator)) {
+ next == Token::STATIC ||
+ (next == Token::YIELD && !IsGeneratorFunction(function_kind))) {
*is_strict_reserved = true;
} else {
ReportUnexpectedToken(next);
@@ -1129,18 +1508,16 @@ ParserBase<Traits>::ParseIdentifierOrStrictReservedWord(
return Traits::EmptyIdentifier();
}
- IdentifierT name = this->GetSymbol(scanner());
- if (this->IsArguments(name)) scope_->RecordArgumentsUsage();
- return name;
+ return this->GetSymbol(scanner());
}
-
-template <class Traits>
-typename ParserBase<Traits>::IdentifierT
-ParserBase<Traits>::ParseIdentifierName(bool* ok) {
+template <typename Impl>
+typename ParserBase<Impl>::IdentifierT ParserBase<Impl>::ParseIdentifierName(
+ bool* ok) {
Token::Value next = Next();
- if (next != Token::IDENTIFIER && next != Token::FUTURE_RESERVED_WORD &&
- next != Token::LET && next != Token::STATIC && next != Token::YIELD &&
+ if (next != Token::IDENTIFIER && next != Token::ASYNC &&
+ next != Token::ENUM && next != Token::AWAIT && next != Token::LET &&
+ next != Token::STATIC && next != Token::YIELD &&
next != Token::FUTURE_STRICT_RESERVED_WORD &&
next != Token::ESCAPED_KEYWORD &&
next != Token::ESCAPED_STRICT_RESERVED_WORD && !Token::IsKeyword(next)) {
@@ -1149,17 +1526,14 @@ ParserBase<Traits>::ParseIdentifierName(bool* ok) {
return Traits::EmptyIdentifier();
}
- IdentifierT name = this->GetSymbol(scanner());
- if (this->IsArguments(name)) scope_->RecordArgumentsUsage();
- return name;
+ return this->GetSymbol(scanner());
}
-
-template <class Traits>
-typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseRegExpLiteral(
- bool seen_equal, ExpressionClassifier* classifier, bool* ok) {
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseRegExpLiteral(
+ bool* ok) {
int pos = peek_position();
- if (!scanner()->ScanRegExpPattern(seen_equal)) {
+ if (!scanner()->ScanRegExpPattern()) {
Next();
ReportMessage(MessageTemplate::kUnterminatedRegExp);
*ok = false;
@@ -1181,25 +1555,9 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseRegExpLiteral(
return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index, pos);
}
-
-#define CHECK_OK ok); \
- if (!*ok) return this->EmptyExpression(); \
- ((void)0
-#define DUMMY ) // to make indentation work
-#undef DUMMY
-
-// Used in functions where the return type is not ExpressionT.
-#define CHECK_OK_CUSTOM(x) ok); \
- if (!*ok) return this->x(); \
- ((void)0
-#define DUMMY ) // to make indentation work
-#undef DUMMY
-
-
-template <class Traits>
-typename ParserBase<Traits>::ExpressionT
-ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
- bool* ok) {
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrimaryExpression(
+ ExpressionClassifier* classifier, bool* is_async, bool* ok) {
// PrimaryExpression ::
// 'this'
// 'null'
@@ -1215,35 +1573,45 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
// '(' Expression ')'
// TemplateLiteral
// do Block
+ // AsyncFunctionExpression
int beg_pos = peek_position();
switch (peek()) {
case Token::THIS: {
BindingPatternUnexpectedToken(classifier);
Consume(Token::THIS);
- return this->ThisExpression(scope_, factory(), beg_pos);
+ return this->ThisExpression(beg_pos);
}
case Token::NULL_LITERAL:
case Token::TRUE_LITERAL:
case Token::FALSE_LITERAL:
- BindingPatternUnexpectedToken(classifier);
- return this->ExpressionFromLiteral(Next(), beg_pos, scanner(), factory());
case Token::SMI:
case Token::NUMBER:
BindingPatternUnexpectedToken(classifier);
return this->ExpressionFromLiteral(Next(), beg_pos, scanner(), factory());
+ case Token::ASYNC:
+ if (allow_harmony_async_await() &&
+ !scanner()->HasAnyLineTerminatorAfterNext() &&
+ PeekAhead() == Token::FUNCTION) {
+ Consume(Token::ASYNC);
+ return impl()->ParseAsyncFunctionExpression(CHECK_OK);
+ }
+ // CoverCallExpressionAndAsyncArrowHead
+ *is_async = true;
+ /* falls through */
case Token::IDENTIFIER:
case Token::LET:
case Token::STATIC:
case Token::YIELD:
+ case Token::AWAIT:
case Token::ESCAPED_STRICT_RESERVED_WORD:
case Token::FUTURE_STRICT_RESERVED_WORD: {
// Using eval or arguments in this context is OK even in strict mode.
IdentifierT name = ParseAndClassifyIdentifier(classifier, CHECK_OK);
- return this->ExpressionFromIdentifier(
- name, beg_pos, scanner()->location().end_pos, scope_, factory());
+ return this->ExpressionFromIdentifier(name, beg_pos,
+ scanner()->location().end_pos);
}
case Token::STRING: {
@@ -1253,14 +1621,10 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
}
case Token::ASSIGN_DIV:
- classifier->RecordBindingPatternError(
- scanner()->peek_location(), MessageTemplate::kUnexpectedTokenRegExp);
- return this->ParseRegExpLiteral(true, classifier, ok);
-
case Token::DIV:
classifier->RecordBindingPatternError(
scanner()->peek_location(), MessageTemplate::kUnexpectedTokenRegExp);
- return this->ParseRegExpLiteral(false, classifier, ok);
+ return this->ParseRegExpLiteral(ok);
case Token::LBRACK:
return this->ParseArrayLiteral(classifier, ok);
@@ -1274,12 +1638,11 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
// Parentheses are not valid on the LHS of a BindingPattern, so we use the
// is_valid_binding_pattern() check to detect multiple levels of
// parenthesization.
- if (!classifier->is_valid_binding_pattern()) {
- ArrowFormalParametersUnexpectedToken(classifier);
- }
+ bool pattern_error = !classifier->is_valid_binding_pattern();
classifier->RecordPatternError(scanner()->peek_location(),
MessageTemplate::kUnexpectedToken,
Token::String(Token::LPAREN));
+ if (pattern_error) ArrowFormalParametersUnexpectedToken(classifier);
Consume(Token::LPAREN);
if (Check(Token::RPAREN)) {
// ()=>x. The continuation that looks for the => is in
@@ -1297,8 +1660,11 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
MessageTemplate::kUnexpectedToken,
Token::String(Token::ELLIPSIS));
classifier->RecordNonSimpleParameter();
- ExpressionT expr =
- this->ParseAssignmentExpression(true, classifier, CHECK_OK);
+ ExpressionClassifier binding_classifier(this);
+ ExpressionT expr = this->ParseAssignmentExpression(
+ true, &binding_classifier, CHECK_OK);
+ classifier->Accumulate(&binding_classifier,
+ ExpressionClassifier::AllProductions);
if (!this->IsIdentifier(expr) && !IsValidPattern(expr)) {
classifier->RecordArrowFormalParametersError(
Scanner::Location(ellipsis_pos, scanner()->location().end_pos),
@@ -1315,7 +1681,8 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
}
// Heuristically try to detect immediately called functions before
// seeing the call parentheses.
- parenthesized_function_ = (peek() == Token::FUNCTION);
+ function_state_->set_next_function_is_parenthesized(peek() ==
+ Token::FUNCTION);
ExpressionT expr = this->ParseExpression(true, classifier, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
return expr;
@@ -1324,11 +1691,6 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
case Token::CLASS: {
BindingPatternUnexpectedToken(classifier);
Consume(Token::CLASS);
- if (!allow_harmony_sloppy() && is_sloppy(language_mode())) {
- ReportMessage(MessageTemplate::kSloppyLexical);
- *ok = false;
- return this->EmptyExpression();
- }
int class_token_position = position();
IdentifierT name = this->EmptyIdentifier();
bool is_strict_reserved_name = false;
@@ -1338,9 +1700,9 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
CHECK_OK);
class_name_location = scanner()->location();
}
- return this->ParseClassLiteral(classifier, name, class_name_location,
- is_strict_reserved_name,
- class_token_position, ok);
+ return impl()->ParseClassLiteral(classifier, name, class_name_location,
+ is_strict_reserved_name,
+ class_token_position, ok);
}
case Token::TEMPLATE_SPAN:
@@ -1352,14 +1714,14 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
case Token::MOD:
if (allow_natives() || extension_ != NULL) {
BindingPatternUnexpectedToken(classifier);
- return this->ParseV8Intrinsic(ok);
+ return impl()->ParseV8Intrinsic(ok);
}
break;
case Token::DO:
if (allow_harmony_do_expressions()) {
BindingPatternUnexpectedToken(classifier);
- return Traits::ParseDoExpression(ok);
+ return impl()->ParseDoExpression(ok);
}
break;
@@ -1372,32 +1734,34 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
return this->EmptyExpression();
}
-
-template <class Traits>
-typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseExpression(
bool accept_IN, bool* ok) {
ExpressionClassifier classifier(this);
ExpressionT result = ParseExpression(accept_IN, &classifier, CHECK_OK);
- Traits::RewriteNonPattern(&classifier, CHECK_OK);
+ impl()->RewriteNonPattern(&classifier, CHECK_OK);
return result;
}
-
-template <class Traits>
-typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseExpression(
bool accept_IN, ExpressionClassifier* classifier, bool* ok) {
// Expression ::
// AssignmentExpression
// Expression ',' AssignmentExpression
- ExpressionClassifier binding_classifier(this);
- ExpressionT result =
- this->ParseAssignmentExpression(accept_IN, &binding_classifier, CHECK_OK);
- classifier->Accumulate(&binding_classifier,
- ExpressionClassifier::AllProductions);
+ ExpressionT result;
+ {
+ ExpressionClassifier binding_classifier(this);
+ result = this->ParseAssignmentExpression(accept_IN, &binding_classifier,
+ CHECK_OK);
+ classifier->Accumulate(&binding_classifier,
+ ExpressionClassifier::AllProductions);
+ }
bool is_simple_parameter_list = this->IsIdentifier(result);
bool seen_rest = false;
while (peek() == Token::COMMA) {
+ CheckNoTailCallExpressions(classifier, CHECK_OK);
if (seen_rest) {
// At this point the production can't possibly be valid, but we don't know
// which error to signal.
@@ -1406,7 +1770,11 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
}
Consume(Token::COMMA);
bool is_rest = false;
- if (peek() == Token::ELLIPSIS) {
+ if (allow_harmony_trailing_commas() && peek() == Token::RPAREN &&
+ PeekAhead() == Token::ARROW) {
+ // a trailing comma is allowed at the end of an arrow parameter list
+ break;
+ } else if (peek() == Token::ELLIPSIS) {
// 'x, y, ...z' in CoverParenthesizedExpressionAndArrowParameterList only
// as the formal parameters of'(x, y, ...z) => foo', and is not itself a
// valid expression or binding pattern.
@@ -1416,6 +1784,7 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
seen_rest = is_rest = true;
}
int pos = position(), expr_pos = peek_position();
+ ExpressionClassifier binding_classifier(this);
ExpressionT right = this->ParseAssignmentExpression(
accept_IN, &binding_classifier, CHECK_OK);
classifier->Accumulate(&binding_classifier,
@@ -1439,9 +1808,8 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
return result;
}
-
-template <class Traits>
-typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseArrayLiteral(
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseArrayLiteral(
ExpressionClassifier* classifier, bool* ok) {
// ArrayLiteral ::
// '[' Expression? (',' Expression?)* ']'
@@ -1452,7 +1820,7 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseArrayLiteral(
int first_spread_index = -1;
Expect(Token::LBRACK, CHECK_OK);
while (peek() != Token::RBRACK) {
- ExpressionT elem = this->EmptyExpression();
+ ExpressionT elem;
if (peek() == Token::COMMA) {
elem = this->GetLiteralTheHole(peek_position(), factory());
} else if (peek() == Token::ELLIPSIS) {
@@ -1461,6 +1829,7 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseArrayLiteral(
int expr_pos = peek_position();
ExpressionT argument =
this->ParseAssignmentExpression(true, classifier, CHECK_OK);
+ CheckNoTailCallExpressions(classifier, CHECK_OK);
elem = factory()->NewSpread(argument, start_pos, expr_pos);
if (first_spread_index < 0) {
@@ -1484,6 +1853,7 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseArrayLiteral(
} else {
int beg_pos = peek_position();
elem = this->ParseAssignmentExpression(true, classifier, CHECK_OK);
+ CheckNoTailCallExpressions(classifier, CHECK_OK);
CheckDestructuringElement(elem, classifier, beg_pos,
scanner()->location().end_pos);
}
@@ -1501,14 +1871,21 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseArrayLiteral(
literal_index, pos);
if (first_spread_index >= 0) {
result = factory()->NewRewritableExpression(result);
- Traits::QueueNonPatternForRewriting(result);
+ impl()->QueueNonPatternForRewriting(result, ok);
+ if (!*ok) {
+ // If the non-pattern rewriting mechanism is used in the future for
+ // rewriting other things than spreads, this error message will have
+ // to change. Also, this error message will never appear while pre-
+ // parsing (this is OK, as it is an implementation limitation).
+ ReportMessage(MessageTemplate::kTooManySpreads);
+ return this->EmptyExpression();
+ }
}
return result;
}
-
-template <class Traits>
-typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParsePropertyName(
+template <class Impl>
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePropertyName(
IdentifierT* name, bool* is_get, bool* is_set, bool* is_computed_name,
ExpressionClassifier* classifier, bool* ok) {
Token::Value token = peek();
@@ -1545,7 +1922,7 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParsePropertyName(
ExpressionClassifier computed_name_classifier(this);
ExpressionT expression =
ParseAssignmentExpression(true, &computed_name_classifier, CHECK_OK);
- Traits::RewriteNonPattern(&computed_name_classifier, CHECK_OK);
+ impl()->RewriteNonPattern(&computed_name_classifier, CHECK_OK);
classifier->Accumulate(&computed_name_classifier,
ExpressionClassifier::ExpressionProductions);
Expect(Token::RBRACK, CHECK_OK);
@@ -1564,20 +1941,30 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParsePropertyName(
: factory()->NewStringLiteral(*name, pos);
}
-
-template <class Traits>
-typename ParserBase<Traits>::ObjectLiteralPropertyT
-ParserBase<Traits>::ParsePropertyDefinition(
+template <typename Impl>
+typename ParserBase<Impl>::ObjectLiteralPropertyT
+ParserBase<Impl>::ParsePropertyDefinition(
ObjectLiteralCheckerBase* checker, bool in_class, bool has_extends,
- bool is_static, bool* is_computed_name, bool* has_seen_constructor,
+ MethodKind method_kind, bool* is_computed_name, bool* has_seen_constructor,
ExpressionClassifier* classifier, IdentifierT* name, bool* ok) {
- DCHECK(!in_class || is_static || has_seen_constructor != nullptr);
- ExpressionT value = this->EmptyExpression();
+ DCHECK(!in_class || IsStaticMethod(method_kind) ||
+ has_seen_constructor != nullptr);
bool is_get = false;
bool is_set = false;
bool is_generator = Check(Token::MUL);
+ bool is_async = false;
+ const bool is_static = IsStaticMethod(method_kind);
Token::Value name_token = peek();
+
+ if (is_generator) {
+ method_kind |= MethodKind::kGenerator;
+ } else if (allow_harmony_async_await() && name_token == Token::ASYNC &&
+ !scanner()->HasAnyLineTerminatorAfterNext() &&
+ PeekAhead() != Token::LPAREN && PeekAhead()) {
+ is_async = true;
+ }
+
int next_beg_pos = scanner()->peek_location().beg_pos;
int next_end_pos = scanner()->peek_location().end_pos;
ExpressionT name_expression =
@@ -1589,28 +1976,28 @@ ParserBase<Traits>::ParsePropertyDefinition(
}
if (!in_class && !is_generator) {
- DCHECK(!is_static);
-
+ DCHECK(!IsStaticMethod(method_kind));
if (peek() == Token::COLON) {
// PropertyDefinition
// PropertyName ':' AssignmentExpression
if (!*is_computed_name) {
- checker->CheckProperty(name_token, kValueProperty, false, false,
+ checker->CheckProperty(name_token, kValueProperty, MethodKind::kNormal,
+ classifier,
CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
}
Consume(Token::COLON);
int beg_pos = peek_position();
- value = this->ParseAssignmentExpression(
+ ExpressionT value = this->ParseAssignmentExpression(
true, classifier, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
CheckDestructuringElement(value, classifier, beg_pos,
scanner()->location().end_pos);
- return factory()->NewObjectLiteralProperty(name_expression, value, false,
- *is_computed_name);
+ return factory()->NewObjectLiteralProperty(name_expression, value,
+ is_static, *is_computed_name);
}
- if (Token::IsIdentifier(name_token, language_mode(),
- this->is_generator()) &&
+ if (Token::IsIdentifier(name_token, language_mode(), this->is_generator(),
+ parsing_module_ || is_async_function()) &&
(peek() == Token::COMMA || peek() == Token::RBRACE ||
peek() == Token::ASSIGN)) {
// PropertyDefinition
@@ -1623,39 +2010,49 @@ ParserBase<Traits>::ParsePropertyDefinition(
scanner()->FindSymbol(classifier->duplicate_finder(), 1) != 0) {
classifier->RecordDuplicateFormalParameterError(scanner()->location());
}
+
+ if (this->IsEvalOrArguments(*name) && is_strict(language_mode())) {
+ classifier->RecordBindingPatternError(
+ scanner()->location(), MessageTemplate::kStrictEvalArguments);
+ }
+
if (name_token == Token::LET) {
classifier->RecordLetPatternError(
scanner()->location(), MessageTemplate::kLetInLexicalBinding);
}
-
- ExpressionT lhs = this->ExpressionFromIdentifier(
- *name, next_beg_pos, next_end_pos, scope_, factory());
+ if (name_token == Token::AWAIT) {
+ DCHECK(!is_async_function());
+ classifier->RecordAsyncArrowFormalParametersError(
+ Scanner::Location(next_beg_pos, next_end_pos),
+ MessageTemplate::kAwaitBindingIdentifier);
+ }
+ ExpressionT lhs =
+ this->ExpressionFromIdentifier(*name, next_beg_pos, next_end_pos);
CheckDestructuringElement(lhs, classifier, next_beg_pos, next_end_pos);
+ ExpressionT value;
if (peek() == Token::ASSIGN) {
Consume(Token::ASSIGN);
ExpressionClassifier rhs_classifier(this);
ExpressionT rhs = this->ParseAssignmentExpression(
true, &rhs_classifier, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
- Traits::RewriteNonPattern(&rhs_classifier,
+ impl()->RewriteNonPattern(&rhs_classifier,
CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
classifier->Accumulate(&rhs_classifier,
ExpressionClassifier::ExpressionProductions);
value = factory()->NewAssignment(Token::ASSIGN, lhs, rhs,
- RelocInfo::kNoPosition);
- classifier->RecordCoverInitializedNameError(
+ kNoSourcePosition);
+ classifier->RecordObjectLiteralError(
Scanner::Location(next_beg_pos, scanner()->location().end_pos),
MessageTemplate::kInvalidCoverInitializedName);
- if (allow_harmony_function_name()) {
- Traits::SetFunctionNameFromIdentifierRef(rhs, lhs);
- }
+ Traits::SetFunctionNameFromIdentifierRef(rhs, lhs);
} else {
value = lhs;
}
return factory()->NewObjectLiteralProperty(
- name_expression, value, ObjectLiteralProperty::COMPUTED, false,
+ name_expression, value, ObjectLiteralProperty::COMPUTED, is_static,
false);
}
}
@@ -1665,43 +2062,56 @@ ParserBase<Traits>::ParsePropertyDefinition(
Scanner::Location(next_beg_pos, scanner()->location().end_pos),
MessageTemplate::kInvalidDestructuringTarget);
+ if (is_async && !IsSpecialMethod(method_kind)) {
+ DCHECK(!is_get);
+ DCHECK(!is_set);
+ bool dont_care;
+ name_expression = ParsePropertyName(
+ name, &dont_care, &dont_care, is_computed_name, classifier,
+ CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ method_kind |= MethodKind::kAsync;
+ }
+
if (is_generator || peek() == Token::LPAREN) {
// MethodDefinition
// PropertyName '(' StrictFormalParameters ')' '{' FunctionBody '}'
// '*' PropertyName '(' StrictFormalParameters ')' '{' FunctionBody '}'
if (!*is_computed_name) {
- checker->CheckProperty(name_token, kMethodProperty, is_static,
- is_generator,
+ checker->CheckProperty(name_token, kMethodProperty, method_kind,
+ classifier,
CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
}
- FunctionKind kind = is_generator ? FunctionKind::kConciseGeneratorMethod
- : FunctionKind::kConciseMethod;
+ FunctionKind kind = is_generator
+ ? FunctionKind::kConciseGeneratorMethod
+ : is_async ? FunctionKind::kAsyncConciseMethod
+ : FunctionKind::kConciseMethod;
- if (in_class && !is_static && this->IsConstructor(*name)) {
+ if (in_class && !IsStaticMethod(method_kind) &&
+ this->IsConstructor(*name)) {
*has_seen_constructor = true;
kind = has_extends ? FunctionKind::kSubclassConstructor
: FunctionKind::kBaseConstructor;
}
- value = this->ParseFunctionLiteral(
+ ExpressionT value = impl()->ParseFunctionLiteral(
*name, scanner()->location(), kSkipFunctionNameCheck, kind,
- RelocInfo::kNoPosition, FunctionLiteral::kAccessorOrMethod,
- language_mode(), CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ kNoSourcePosition, FunctionLiteral::kAccessorOrMethod, language_mode(),
+ CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
return factory()->NewObjectLiteralProperty(name_expression, value,
ObjectLiteralProperty::COMPUTED,
is_static, *is_computed_name);
}
- if (in_class && name_token == Token::STATIC && !is_static) {
+ if (in_class && name_token == Token::STATIC && IsNormalMethod(method_kind)) {
// ClassElement (static)
// 'static' MethodDefinition
*name = this->EmptyIdentifier();
ObjectLiteralPropertyT property = ParsePropertyDefinition(
- checker, true, has_extends, true, is_computed_name, nullptr, classifier,
- name, ok);
- Traits::RewriteNonPattern(classifier, ok);
+ checker, true, has_extends, MethodKind::kStatic, is_computed_name,
+ nullptr, classifier, name, ok);
+ impl()->RewriteNonPattern(classifier, ok);
return property;
}
@@ -1718,16 +2128,16 @@ ParserBase<Traits>::ParsePropertyDefinition(
CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
if (!*is_computed_name) {
- checker->CheckProperty(name_token, kAccessorProperty, is_static,
- is_generator,
+ checker->CheckProperty(name_token, kAccessorProperty, method_kind,
+ classifier,
CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
}
- typename Traits::Type::FunctionLiteral value = this->ParseFunctionLiteral(
+ typename Traits::Type::FunctionLiteral value = impl()->ParseFunctionLiteral(
*name, scanner()->location(), kSkipFunctionNameCheck,
is_get ? FunctionKind::kGetterFunction : FunctionKind::kSetterFunction,
- RelocInfo::kNoPosition, FunctionLiteral::kAccessorOrMethod,
- language_mode(), CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ kNoSourcePosition, FunctionLiteral::kAccessorOrMethod, language_mode(),
+ CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
// Make sure the name expression is a string since we need a Name for
// Runtime_DefineAccessorPropertyUnchecked and since we can determine this
@@ -1749,9 +2159,8 @@ ParserBase<Traits>::ParsePropertyDefinition(
return this->EmptyObjectLiteralProperty();
}
-
-template <class Traits>
-typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseObjectLiteral(
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseObjectLiteral(
ExpressionClassifier* classifier, bool* ok) {
// ObjectLiteral ::
// '{' (PropertyDefinition (',' PropertyDefinition)* ','? )? '}'
@@ -1769,13 +2178,12 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseObjectLiteral(
FuncNameInferrer::State fni_state(fni_);
const bool in_class = false;
- const bool is_static = false;
const bool has_extends = false;
bool is_computed_name = false;
IdentifierT name = this->EmptyIdentifier();
ObjectLiteralPropertyT property = this->ParsePropertyDefinition(
- &checker, in_class, has_extends, is_static, &is_computed_name, NULL,
- classifier, &name, CHECK_OK);
+ &checker, in_class, has_extends, MethodKind::kNormal, &is_computed_name,
+ NULL, classifier, &name, CHECK_OK);
if (is_computed_name) {
has_computed_names = true;
@@ -1794,9 +2202,7 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseObjectLiteral(
if (fni_ != nullptr) fni_->Infer();
- if (allow_harmony_function_name()) {
- Traits::SetFunctionNameFromPropertyName(property, name);
- }
+ Traits::SetFunctionNameFromPropertyName(property, name);
}
Expect(Token::RBRACE, CHECK_OK);
@@ -1809,11 +2215,11 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseObjectLiteral(
pos);
}
-
-template <class Traits>
-typename Traits::Type::ExpressionList ParserBase<Traits>::ParseArguments(
- Scanner::Location* first_spread_arg_loc, ExpressionClassifier* classifier,
- bool* ok) {
+template <typename Impl>
+typename ParserBase<Impl>::Traits::Type::ExpressionList
+ParserBase<Impl>::ParseArguments(Scanner::Location* first_spread_arg_loc,
+ bool maybe_arrow,
+ ExpressionClassifier* classifier, bool* ok) {
// Arguments ::
// '(' (AssignmentExpression)*[','] ')'
@@ -1831,7 +2237,11 @@ typename Traits::Type::ExpressionList ParserBase<Traits>::ParseArguments(
ExpressionT argument = this->ParseAssignmentExpression(
true, classifier, CHECK_OK_CUSTOM(NullExpressionList));
- Traits::RewriteNonPattern(classifier, CHECK_OK_CUSTOM(NullExpressionList));
+ CheckNoTailCallExpressions(classifier, CHECK_OK_CUSTOM(NullExpressionList));
+ if (!maybe_arrow) {
+ impl()->RewriteNonPattern(classifier,
+ CHECK_OK_CUSTOM(NullExpressionList));
+ }
if (is_spread) {
if (!spread_arg.IsValid()) {
spread_arg.beg_pos = start_pos;
@@ -1858,6 +2268,10 @@ typename Traits::Type::ExpressionList ParserBase<Traits>::ParseArguments(
done = (peek() != Token::COMMA);
if (!done) {
Next();
+ if (allow_harmony_trailing_commas() && peek() == Token::RPAREN) {
+ // allow trailing comma
+ done = true;
+ }
}
}
Scanner::Location location = scanner_->location();
@@ -1868,50 +2282,83 @@ typename Traits::Type::ExpressionList ParserBase<Traits>::ParseArguments(
}
*first_spread_arg_loc = spread_arg;
- if (spread_arg.IsValid()) {
- // Unspread parameter sequences are translated into array literals in the
- // parser. Ensure that the number of materialized literals matches between
- // the parser and preparser
- Traits::MaterializeUnspreadArgumentsLiterals(unspread_sequences_count);
+ if (!maybe_arrow || peek() != Token::ARROW) {
+ if (maybe_arrow) {
+ impl()->RewriteNonPattern(classifier,
+ CHECK_OK_CUSTOM(NullExpressionList));
+ }
+ if (spread_arg.IsValid()) {
+ // Unspread parameter sequences are translated into array literals in the
+ // parser. Ensure that the number of materialized literals matches between
+ // the parser and preparser
+ Traits::MaterializeUnspreadArgumentsLiterals(unspread_sequences_count);
+ }
}
return result;
}
// Precedence = 2
-template <class Traits>
-typename ParserBase<Traits>::ExpressionT
-ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
- ExpressionClassifier* classifier,
- bool* ok) {
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN,
+ ExpressionClassifier* classifier,
+ bool* ok) {
// AssignmentExpression ::
// ConditionalExpression
// ArrowFunction
// YieldExpression
// LeftHandSideExpression AssignmentOperator AssignmentExpression
- bool is_destructuring_assignment = false;
int lhs_beg_pos = peek_position();
if (peek() == Token::YIELD && is_generator()) {
- return this->ParseYieldExpression(classifier, ok);
+ return this->ParseYieldExpression(accept_IN, classifier, ok);
}
FuncNameInferrer::State fni_state(fni_);
- ParserBase<Traits>::Checkpoint checkpoint(this);
+ Checkpoint checkpoint(this);
ExpressionClassifier arrow_formals_classifier(this,
classifier->duplicate_finder());
+
+ Scope::Snapshot scope_snapshot(scope());
+
+ bool is_async = allow_harmony_async_await() && peek() == Token::ASYNC &&
+ !scanner()->HasAnyLineTerminatorAfterNext() &&
+ IsValidArrowFormalParametersStart(PeekAhead());
+
bool parenthesized_formals = peek() == Token::LPAREN;
- if (!parenthesized_formals) {
+ if (!is_async && !parenthesized_formals) {
ArrowFormalParametersUnexpectedToken(&arrow_formals_classifier);
}
- ExpressionT expression = this->ParseConditionalExpression(
- accept_IN, &arrow_formals_classifier, CHECK_OK);
+
+ // Parse a simple, faster sub-grammar (primary expression) if it's evident
+ // that we have only a trivial expression to parse.
+ ExpressionT expression;
+ if (IsTrivialExpression()) {
+ expression = this->ParsePrimaryExpression(&arrow_formals_classifier,
+ &is_async, CHECK_OK);
+ } else {
+ expression = this->ParseConditionalExpression(
+ accept_IN, &arrow_formals_classifier, CHECK_OK);
+ }
+
+ if (is_async && this->IsIdentifier(expression) && peek_any_identifier() &&
+ PeekAhead() == Token::ARROW) {
+ // async Identifier => AsyncConciseBody
+ IdentifierT name =
+ ParseAndClassifyIdentifier(&arrow_formals_classifier, CHECK_OK);
+ expression = this->ExpressionFromIdentifier(
+ name, position(), scanner()->location().end_pos, InferName::kNo);
+ if (fni_) {
+ // Remove `async` keyword from inferred name stack.
+ fni_->RemoveAsyncKeywordFromEnd();
+ }
+ }
+
if (peek() == Token::ARROW) {
- classifier->RecordPatternError(scanner()->peek_location(),
- MessageTemplate::kUnexpectedToken,
- Token::String(Token::ARROW));
+ Scanner::Location arrow_loc = scanner()->peek_location();
ValidateArrowFormalParameters(&arrow_formals_classifier, expression,
- parenthesized_formals, CHECK_OK);
+ parenthesized_formals, is_async, CHECK_OK);
// This reads strangely, but is correct: it checks whether any
// sub-expression of the parameter list failed to be a valid formal
// parameter initializer. Since YieldExpressions are banned anywhere
@@ -1919,13 +2366,15 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
// TODO(adamk): Rename "FormalParameterInitializerError" to refer to
// "YieldExpression", which is its only use.
ValidateFormalParameterInitializer(&arrow_formals_classifier, ok);
+
Scanner::Location loc(lhs_beg_pos, scanner()->location().end_pos);
- Scope* scope =
- this->NewScope(scope_, FUNCTION_SCOPE, FunctionKind::kArrowFunction);
+ DeclarationScope* scope =
+ this->NewFunctionScope(is_async ? FunctionKind::kAsyncArrowFunction
+ : FunctionKind::kArrowFunction);
// Because the arrow's parameters were parsed in the outer scope, any
// usage flags that might have been triggered there need to be copied
// to the arrow scope.
- scope_->PropagateUsageFlagsToScope(scope);
+ this->scope()->PropagateUsageFlagsToScope(scope);
FormalParametersT parameters(scope);
if (!arrow_formals_classifier.is_simple_parameter_list()) {
scope->SetHasNonSimpleParameters();
@@ -1936,33 +2385,56 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
scope->set_start_position(lhs_beg_pos);
Scanner::Location duplicate_loc = Scanner::Location::invalid();
- this->ParseArrowFunctionFormalParameterList(&parameters, expression, loc,
- &duplicate_loc, CHECK_OK);
+ this->ParseArrowFunctionFormalParameterList(
+ &parameters, expression, loc, &duplicate_loc, scope_snapshot, CHECK_OK);
if (duplicate_loc.IsValid()) {
arrow_formals_classifier.RecordDuplicateFormalParameterError(
duplicate_loc);
}
expression = this->ParseArrowFunctionLiteral(
- accept_IN, parameters, arrow_formals_classifier, CHECK_OK);
+ accept_IN, parameters, is_async, arrow_formals_classifier, CHECK_OK);
+ arrow_formals_classifier.Discard();
+ classifier->RecordPatternError(arrow_loc,
+ MessageTemplate::kUnexpectedToken,
+ Token::String(Token::ARROW));
if (fni_ != nullptr) fni_->Infer();
return expression;
}
- if (this->IsValidReferenceExpression(expression)) {
- arrow_formals_classifier.ForgiveAssignmentPatternError();
- }
-
// "expression" was not itself an arrow function parameter list, but it might
- // form part of one. Propagate speculative formal parameter error locations.
+ // form part of one. Propagate speculative formal parameter error locations
+ // (including those for binding patterns, since formal parameters can
+ // themselves contain binding patterns).
// Do not merge pending non-pattern expressions yet!
- classifier->Accumulate(
- &arrow_formals_classifier,
- ExpressionClassifier::StandardProductions |
+ unsigned productions =
ExpressionClassifier::FormalParametersProductions |
- ExpressionClassifier::CoverInitializedNameProduction,
- false);
+ ExpressionClassifier::AsyncArrowFormalParametersProduction |
+ ExpressionClassifier::FormalParameterInitializerProduction;
+
+ // Parenthesized identifiers and property references are allowed as part
+ // of a larger binding pattern, even though parenthesized patterns
+ // themselves are not allowed, e.g., "[(x)] = []". Only accumulate
+ // assignment pattern errors if the parsed expression is more complex.
+ if (this->IsValidReferenceExpression(expression)) {
+ productions |= ExpressionClassifier::PatternProductions &
+ ~ExpressionClassifier::AssignmentPatternProduction;
+ } else {
+ productions |= ExpressionClassifier::PatternProductions;
+ }
+
+ const bool is_destructuring_assignment =
+ IsValidPattern(expression) && peek() == Token::ASSIGN;
+ if (!is_destructuring_assignment) {
+ // This may be an expression or a pattern, so we must continue to
+ // accumulate expression-related errors.
+ productions |= ExpressionClassifier::ExpressionProduction |
+ ExpressionClassifier::TailCallExpressionProduction |
+ ExpressionClassifier::ObjectLiteralProduction;
+ }
+
+ classifier->Accumulate(&arrow_formals_classifier, productions, false);
if (!Token::IsAssignmentOp(peek())) {
// Parsed conditional expression only (no assignment).
@@ -1974,10 +2446,10 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
// Now pending non-pattern expressions must be discarded.
arrow_formals_classifier.Discard();
- if (IsValidPattern(expression) && peek() == Token::ASSIGN) {
- classifier->ForgiveCoverInitializedNameError();
+ CheckNoTailCallExpressions(classifier, CHECK_OK);
+
+ if (is_destructuring_assignment) {
ValidateAssignmentPattern(classifier, CHECK_OK);
- is_destructuring_assignment = true;
} else {
expression = this->CheckAndRewriteReferenceExpression(
expression, lhs_beg_pos, scanner()->location().end_pos,
@@ -1998,10 +2470,13 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
ExpressionT right =
this->ParseAssignmentExpression(accept_IN, &rhs_classifier, CHECK_OK);
- Traits::RewriteNonPattern(&rhs_classifier, CHECK_OK);
+ CheckNoTailCallExpressions(&rhs_classifier, CHECK_OK);
+ impl()->RewriteNonPattern(&rhs_classifier, CHECK_OK);
classifier->Accumulate(
- &rhs_classifier, ExpressionClassifier::ExpressionProductions |
- ExpressionClassifier::CoverInitializedNameProduction);
+ &rhs_classifier,
+ ExpressionClassifier::ExpressionProductions |
+ ExpressionClassifier::ObjectLiteralProduction |
+ ExpressionClassifier::AsyncArrowFormalParametersProduction);
// TODO(1231235): We try to estimate the set of properties set by
// constructors. We define a new property whenever there is an
@@ -2026,29 +2501,28 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
}
}
- if (op == Token::ASSIGN && allow_harmony_function_name()) {
+ if (op == Token::ASSIGN) {
Traits::SetFunctionNameFromIdentifierRef(right, expression);
}
if (op == Token::ASSIGN_EXP) {
DCHECK(!is_destructuring_assignment);
- return Traits::RewriteAssignExponentiation(expression, right, pos);
+ return impl()->RewriteAssignExponentiation(expression, right, pos);
}
ExpressionT result = factory()->NewAssignment(op, expression, right, pos);
if (is_destructuring_assignment) {
result = factory()->NewRewritableExpression(result);
- Traits::QueueDestructuringAssignmentForRewriting(result);
+ impl()->QueueDestructuringAssignmentForRewriting(result);
}
return result;
}
-template <class Traits>
-typename ParserBase<Traits>::ExpressionT
-ParserBase<Traits>::ParseYieldExpression(ExpressionClassifier* classifier,
- bool* ok) {
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseYieldExpression(
+ bool accept_IN, ExpressionClassifier* classifier, bool* ok) {
// YieldExpression ::
// 'yield' ([no line terminator] '*'? AssignmentExpression)?
int pos = peek_position();
@@ -2059,6 +2533,7 @@ ParserBase<Traits>::ParseYieldExpression(ExpressionClassifier* classifier,
Expect(Token::YIELD, CHECK_OK);
ExpressionT generator_object =
factory()->NewVariableProxy(function_state_->generator_object_variable());
+ // The following initialization is necessary.
ExpressionT expression = Traits::EmptyExpression();
bool delegating = false; // yield*
if (!scanner()->HasAnyLineTerminatorBeforeNext()) {
@@ -2078,31 +2553,98 @@ ParserBase<Traits>::ParseYieldExpression(ExpressionClassifier* classifier,
if (!delegating) break;
// Delegating yields require an RHS; fall through.
default:
- expression = ParseAssignmentExpression(false, classifier, CHECK_OK);
- Traits::RewriteNonPattern(classifier, CHECK_OK);
+ expression = ParseAssignmentExpression(accept_IN, classifier, CHECK_OK);
+ impl()->RewriteNonPattern(classifier, CHECK_OK);
break;
}
}
if (delegating) {
- return Traits::RewriteYieldStar(generator_object, expression, pos);
+ return impl()->RewriteYieldStar(generator_object, expression, pos);
}
expression = Traits::BuildIteratorResult(expression, false);
// Hackily disambiguate o from o.next and o [Symbol.iterator]().
// TODO(verwaest): Come up with a better solution.
- typename Traits::Type::YieldExpression yield =
- factory()->NewYield(generator_object, expression, pos);
+ typename Traits::Type::YieldExpression yield = factory()->NewYield(
+ generator_object, expression, pos, Yield::kOnExceptionThrow);
return yield;
}
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseTailCallExpression(ExpressionClassifier* classifier,
+ bool* ok) {
+ // TailCallExpression::
+ // 'continue' MemberExpression Arguments
+ // 'continue' CallExpression Arguments
+ // 'continue' MemberExpression TemplateLiteral
+ // 'continue' CallExpression TemplateLiteral
+ Expect(Token::CONTINUE, CHECK_OK);
+ int pos = position();
+ int sub_expression_pos = peek_position();
+ ExpressionT expression =
+ this->ParseLeftHandSideExpression(classifier, CHECK_OK);
+ CheckNoTailCallExpressions(classifier, CHECK_OK);
+
+ Scanner::Location loc(pos, scanner()->location().end_pos);
+ if (!expression->IsCall()) {
+ Scanner::Location sub_loc(sub_expression_pos, loc.end_pos);
+ ReportMessageAt(sub_loc, MessageTemplate::kUnexpectedInsideTailCall);
+ *ok = false;
+ return Traits::EmptyExpression();
+ }
+ if (Traits::IsDirectEvalCall(expression)) {
+ Scanner::Location sub_loc(sub_expression_pos, loc.end_pos);
+ ReportMessageAt(sub_loc, MessageTemplate::kUnexpectedTailCallOfEval);
+ *ok = false;
+ return Traits::EmptyExpression();
+ }
+ if (!is_strict(language_mode())) {
+ ReportMessageAt(loc, MessageTemplate::kUnexpectedSloppyTailCall);
+ *ok = false;
+ return Traits::EmptyExpression();
+ }
+ if (is_resumable()) {
+ Scanner::Location sub_loc(sub_expression_pos, loc.end_pos);
+ ReportMessageAt(sub_loc, MessageTemplate::kUnexpectedTailCall);
+ *ok = false;
+ return Traits::EmptyExpression();
+ }
+ ReturnExprContext return_expr_context =
+ function_state_->return_expr_context();
+ if (return_expr_context != ReturnExprContext::kInsideValidReturnStatement) {
+ MessageTemplate::Template msg = MessageTemplate::kNone;
+ switch (return_expr_context) {
+ case ReturnExprContext::kInsideValidReturnStatement:
+ UNREACHABLE();
+ return Traits::EmptyExpression();
+ case ReturnExprContext::kInsideValidBlock:
+ msg = MessageTemplate::kUnexpectedTailCall;
+ break;
+ case ReturnExprContext::kInsideTryBlock:
+ msg = MessageTemplate::kUnexpectedTailCallInTryBlock;
+ break;
+ case ReturnExprContext::kInsideForInOfBody:
+ msg = MessageTemplate::kUnexpectedTailCallInForInOf;
+ break;
+ }
+ ReportMessageAt(loc, msg);
+ *ok = false;
+ return Traits::EmptyExpression();
+ }
+ classifier->RecordTailCallExpressionError(
+ loc, MessageTemplate::kUnexpectedTailCall);
+ function_state_->AddExplicitTailCallExpression(expression, loc);
+ return expression;
+}
// Precedence = 3
-template <class Traits>
-typename ParserBase<Traits>::ExpressionT
-ParserBase<Traits>::ParseConditionalExpression(bool accept_IN,
- ExpressionClassifier* classifier,
- bool* ok) {
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseConditionalExpression(bool accept_IN,
+ ExpressionClassifier* classifier,
+ bool* ok) {
// ConditionalExpression ::
// LogicalOrExpression
// LogicalOrExpression '?' AssignmentExpression ':' AssignmentExpression
@@ -2112,35 +2654,35 @@ ParserBase<Traits>::ParseConditionalExpression(bool accept_IN,
ExpressionT expression =
this->ParseBinaryExpression(4, accept_IN, classifier, CHECK_OK);
if (peek() != Token::CONDITIONAL) return expression;
- Traits::RewriteNonPattern(classifier, CHECK_OK);
- ArrowFormalParametersUnexpectedToken(classifier);
+ CheckNoTailCallExpressions(classifier, CHECK_OK);
+ impl()->RewriteNonPattern(classifier, CHECK_OK);
BindingPatternUnexpectedToken(classifier);
+ ArrowFormalParametersUnexpectedToken(classifier);
Consume(Token::CONDITIONAL);
// In parsing the first assignment expression in conditional
// expressions we always accept the 'in' keyword; see ECMA-262,
// section 11.12, page 58.
ExpressionT left = ParseAssignmentExpression(true, classifier, CHECK_OK);
- Traits::RewriteNonPattern(classifier, CHECK_OK);
+ impl()->RewriteNonPattern(classifier, CHECK_OK);
Expect(Token::COLON, CHECK_OK);
ExpressionT right =
ParseAssignmentExpression(accept_IN, classifier, CHECK_OK);
- Traits::RewriteNonPattern(classifier, CHECK_OK);
+ impl()->RewriteNonPattern(classifier, CHECK_OK);
return factory()->NewConditional(expression, left, right, pos);
}
// Precedence >= 4
-template <class Traits>
-typename ParserBase<Traits>::ExpressionT
-ParserBase<Traits>::ParseBinaryExpression(int prec, bool accept_IN,
- ExpressionClassifier* classifier,
- bool* ok) {
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseBinaryExpression(
+ int prec, bool accept_IN, ExpressionClassifier* classifier, bool* ok) {
DCHECK(prec >= 4);
ExpressionT x = this->ParseUnaryExpression(classifier, CHECK_OK);
for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) {
// prec1 >= 4
while (Precedence(peek(), accept_IN) == prec1) {
- Traits::RewriteNonPattern(classifier, CHECK_OK);
+ CheckNoTailCallExpressions(classifier, CHECK_OK);
+ impl()->RewriteNonPattern(classifier, CHECK_OK);
BindingPatternUnexpectedToken(classifier);
ArrowFormalParametersUnexpectedToken(classifier);
Token::Value op = Next();
@@ -2150,7 +2692,10 @@ ParserBase<Traits>::ParseBinaryExpression(int prec, bool accept_IN,
const int next_prec = is_right_associative ? prec1 : prec1 + 1;
ExpressionT y =
ParseBinaryExpression(next_prec, accept_IN, classifier, CHECK_OK);
- Traits::RewriteNonPattern(classifier, CHECK_OK);
+ if (op != Token::OR && op != Token::AND) {
+ CheckNoTailCallExpressions(classifier, CHECK_OK);
+ }
+ impl()->RewriteNonPattern(classifier, CHECK_OK);
if (this->ShortcutNumericLiteralBinaryExpression(&x, y, op, pos,
factory())) {
@@ -2168,18 +2713,13 @@ ParserBase<Traits>::ParseBinaryExpression(int prec, bool accept_IN,
case Token::NE_STRICT: cmp = Token::EQ_STRICT; break;
default: break;
}
- if (FLAG_harmony_instanceof && cmp == Token::INSTANCEOF) {
- x = Traits::RewriteInstanceof(x, y, pos);
- } else {
- x = factory()->NewCompareOperation(cmp, x, y, pos);
- if (cmp != op) {
- // The comparison was negated - add a NOT.
- x = factory()->NewUnaryOperation(Token::NOT, x, pos);
- }
+ x = factory()->NewCompareOperation(cmp, x, y, pos);
+ if (cmp != op) {
+ // The comparison was negated - add a NOT.
+ x = factory()->NewUnaryOperation(Token::NOT, x, pos);
}
-
} else if (op == Token::EXP) {
- x = Traits::RewriteExponentiation(x, y, pos);
+ x = impl()->RewriteExponentiation(x, y, pos);
} else {
// We have a "normal" binary operation.
x = factory()->NewBinaryOperation(op, x, y, pos);
@@ -2189,11 +2729,9 @@ ParserBase<Traits>::ParseBinaryExpression(int prec, bool accept_IN,
return x;
}
-
-template <class Traits>
-typename ParserBase<Traits>::ExpressionT
-ParserBase<Traits>::ParseUnaryExpression(ExpressionClassifier* classifier,
- bool* ok) {
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseUnaryExpression(
+ ExpressionClassifier* classifier, bool* ok) {
// UnaryExpression ::
// PostfixExpression
// 'delete' UnaryExpression
@@ -2205,6 +2743,7 @@ ParserBase<Traits>::ParseUnaryExpression(ExpressionClassifier* classifier,
// '-' UnaryExpression
// '~' UnaryExpression
// '!' UnaryExpression
+ // [+Await] AwaitExpression[?Yield]
Token::Value op = peek();
if (Token::IsUnaryOp(op)) {
@@ -2214,7 +2753,8 @@ ParserBase<Traits>::ParseUnaryExpression(ExpressionClassifier* classifier,
op = Next();
int pos = position();
ExpressionT expression = ParseUnaryExpression(classifier, CHECK_OK);
- Traits::RewriteNonPattern(classifier, CHECK_OK);
+ CheckNoTailCallExpressions(classifier, CHECK_OK);
+ impl()->RewriteNonPattern(classifier, CHECK_OK);
if (op == Token::DELETE && is_strict(language_mode())) {
if (this->IsIdentifier(expression)) {
@@ -2239,27 +2779,37 @@ ParserBase<Traits>::ParseUnaryExpression(ExpressionClassifier* classifier,
op = Next();
int beg_pos = peek_position();
ExpressionT expression = this->ParseUnaryExpression(classifier, CHECK_OK);
+ CheckNoTailCallExpressions(classifier, CHECK_OK);
expression = this->CheckAndRewriteReferenceExpression(
expression, beg_pos, scanner()->location().end_pos,
MessageTemplate::kInvalidLhsInPrefixOp, CHECK_OK);
this->MarkExpressionAsAssigned(expression);
- Traits::RewriteNonPattern(classifier, CHECK_OK);
+ impl()->RewriteNonPattern(classifier, CHECK_OK);
return factory()->NewCountOperation(op,
true /* prefix */,
expression,
position());
+ } else if (is_async_function() && peek() == Token::AWAIT) {
+ classifier->RecordFormalParameterInitializerError(
+ scanner()->peek_location(),
+ MessageTemplate::kAwaitExpressionFormalParameter);
+
+ int await_pos = peek_position();
+ Consume(Token::AWAIT);
+
+ ExpressionT value = ParseUnaryExpression(classifier, CHECK_OK);
+
+ return impl()->RewriteAwaitExpression(value, await_pos);
} else {
return this->ParsePostfixExpression(classifier, ok);
}
}
-
-template <class Traits>
-typename ParserBase<Traits>::ExpressionT
-ParserBase<Traits>::ParsePostfixExpression(ExpressionClassifier* classifier,
- bool* ok) {
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePostfixExpression(
+ ExpressionClassifier* classifier, bool* ok) {
// PostfixExpression ::
// LeftHandSideExpression ('++' | '--')?
@@ -2268,6 +2818,7 @@ ParserBase<Traits>::ParsePostfixExpression(ExpressionClassifier* classifier,
this->ParseLeftHandSideExpression(classifier, CHECK_OK);
if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
Token::IsCountOp(peek())) {
+ CheckNoTailCallExpressions(classifier, CHECK_OK);
BindingPatternUnexpectedToken(classifier);
ArrowFormalParametersUnexpectedToken(classifier);
@@ -2275,7 +2826,7 @@ ParserBase<Traits>::ParsePostfixExpression(ExpressionClassifier* classifier,
expression, lhs_beg_pos, scanner()->location().end_pos,
MessageTemplate::kInvalidLhsInPostfixOp, CHECK_OK);
expression = this->MarkExpressionAsAssigned(expression);
- Traits::RewriteNonPattern(classifier, CHECK_OK);
+ impl()->RewriteNonPattern(classifier, CHECK_OK);
Token::Value next = Next();
expression =
@@ -2287,40 +2838,45 @@ ParserBase<Traits>::ParsePostfixExpression(ExpressionClassifier* classifier,
return expression;
}
-
-template <class Traits>
-typename ParserBase<Traits>::ExpressionT
-ParserBase<Traits>::ParseLeftHandSideExpression(
- ExpressionClassifier* classifier, bool* ok) {
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseLeftHandSideExpression(ExpressionClassifier* classifier,
+ bool* ok) {
// LeftHandSideExpression ::
// (NewExpression | MemberExpression) ...
- ExpressionT result =
- this->ParseMemberWithNewPrefixesExpression(classifier, CHECK_OK);
+ if (FLAG_harmony_explicit_tailcalls && peek() == Token::CONTINUE) {
+ return this->ParseTailCallExpression(classifier, ok);
+ }
+
+ bool is_async = false;
+ ExpressionT result = this->ParseMemberWithNewPrefixesExpression(
+ classifier, &is_async, CHECK_OK);
while (true) {
switch (peek()) {
case Token::LBRACK: {
- Traits::RewriteNonPattern(classifier, CHECK_OK);
+ CheckNoTailCallExpressions(classifier, CHECK_OK);
+ impl()->RewriteNonPattern(classifier, CHECK_OK);
BindingPatternUnexpectedToken(classifier);
ArrowFormalParametersUnexpectedToken(classifier);
Consume(Token::LBRACK);
int pos = position();
ExpressionT index = ParseExpression(true, classifier, CHECK_OK);
- Traits::RewriteNonPattern(classifier, CHECK_OK);
+ impl()->RewriteNonPattern(classifier, CHECK_OK);
result = factory()->NewProperty(result, index, pos);
Expect(Token::RBRACK, CHECK_OK);
break;
}
case Token::LPAREN: {
- Traits::RewriteNonPattern(classifier, CHECK_OK);
- BindingPatternUnexpectedToken(classifier);
- ArrowFormalParametersUnexpectedToken(classifier);
-
+ CheckNoTailCallExpressions(classifier, CHECK_OK);
int pos;
+ impl()->RewriteNonPattern(classifier, CHECK_OK);
+ BindingPatternUnexpectedToken(classifier);
if (scanner()->current_token() == Token::IDENTIFIER ||
- scanner()->current_token() == Token::SUPER) {
+ scanner()->current_token() == Token::SUPER ||
+ scanner()->current_token() == Token::ASYNC) {
// For call of an identifier we want to report position of
// the identifier as position of the call in the stack trace.
pos = position();
@@ -2339,8 +2895,36 @@ ParserBase<Traits>::ParseLeftHandSideExpression(
}
}
Scanner::Location spread_pos;
- typename Traits::Type::ExpressionList args =
- ParseArguments(&spread_pos, classifier, CHECK_OK);
+ typename Traits::Type::ExpressionList args;
+ if (V8_UNLIKELY(is_async && this->IsIdentifier(result))) {
+ ExpressionClassifier async_classifier(this);
+ args = ParseArguments(&spread_pos, true, &async_classifier, CHECK_OK);
+ if (peek() == Token::ARROW) {
+ if (fni_) {
+ fni_->RemoveAsyncKeywordFromEnd();
+ }
+ ValidateBindingPattern(&async_classifier, CHECK_OK);
+ if (!async_classifier.is_valid_async_arrow_formal_parameters()) {
+ ReportClassifierError(
+ async_classifier.async_arrow_formal_parameters_error());
+ *ok = false;
+ return this->EmptyExpression();
+ }
+ if (args->length()) {
+ // async ( Arguments ) => ...
+ return Traits::ExpressionListToExpression(args);
+ }
+ // async () => ...
+ return factory()->NewEmptyParentheses(pos);
+ } else {
+ classifier->Accumulate(&async_classifier,
+ ExpressionClassifier::AllProductions);
+ }
+ } else {
+ args = ParseArguments(&spread_pos, false, classifier, CHECK_OK);
+ }
+
+ ArrowFormalParametersUnexpectedToken(classifier);
// Keep track of eval() calls since they disable all local variable
// optimizations.
@@ -2349,20 +2933,21 @@ ParserBase<Traits>::ParseLeftHandSideExpression(
// no explicit receiver.
// These calls are marked as potentially direct eval calls. Whether
// they are actually direct calls to eval is determined at run time.
- this->CheckPossibleEvalCall(result, scope_);
+ Call::PossiblyEval is_possibly_eval =
+ CheckPossibleEvalCall(result, scope());
bool is_super_call = result->IsSuperCallReference();
if (spread_pos.IsValid()) {
- args = Traits::PrepareSpreadArguments(args);
- result = Traits::SpreadCall(result, args, pos);
+ args = impl()->PrepareSpreadArguments(args);
+ result = impl()->SpreadCall(result, args, pos);
} else {
- result = factory()->NewCall(result, args, pos);
+ result = factory()->NewCall(result, args, pos, is_possibly_eval);
}
// Explicit calls to the super constructor using super() perform an
// implicit binding assignment to the 'this' variable.
if (is_super_call) {
- ExpressionT this_expr = this->ThisExpression(scope_, factory(), pos);
+ ExpressionT this_expr = this->ThisExpression(pos);
result =
factory()->NewAssignment(Token::INIT, this_expr, result, pos);
}
@@ -2372,7 +2957,8 @@ ParserBase<Traits>::ParseLeftHandSideExpression(
}
case Token::PERIOD: {
- Traits::RewriteNonPattern(classifier, CHECK_OK);
+ CheckNoTailCallExpressions(classifier, CHECK_OK);
+ impl()->RewriteNonPattern(classifier, CHECK_OK);
BindingPatternUnexpectedToken(classifier);
ArrowFormalParametersUnexpectedToken(classifier);
Consume(Token::PERIOD);
@@ -2386,7 +2972,8 @@ ParserBase<Traits>::ParseLeftHandSideExpression(
case Token::TEMPLATE_SPAN:
case Token::TEMPLATE_TAIL: {
- Traits::RewriteNonPattern(classifier, CHECK_OK);
+ CheckNoTailCallExpressions(classifier, CHECK_OK);
+ impl()->RewriteNonPattern(classifier, CHECK_OK);
BindingPatternUnexpectedToken(classifier);
ArrowFormalParametersUnexpectedToken(classifier);
result = ParseTemplateLiteral(result, position(), classifier, CHECK_OK);
@@ -2399,11 +2986,10 @@ ParserBase<Traits>::ParseLeftHandSideExpression(
}
}
-
-template <class Traits>
-typename ParserBase<Traits>::ExpressionT
-ParserBase<Traits>::ParseMemberWithNewPrefixesExpression(
- ExpressionClassifier* classifier, bool* ok) {
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseMemberWithNewPrefixesExpression(
+ ExpressionClassifier* classifier, bool* is_async, bool* ok) {
// NewExpression ::
// ('new')+ MemberExpression
//
@@ -2429,16 +3015,17 @@ ParserBase<Traits>::ParseMemberWithNewPrefixesExpression(
ArrowFormalParametersUnexpectedToken(classifier);
Consume(Token::NEW);
int new_pos = position();
- ExpressionT result = this->EmptyExpression();
+ ExpressionT result;
if (peek() == Token::SUPER) {
const bool is_new = true;
- result = ParseSuperExpression(is_new, classifier, CHECK_OK);
+ result = ParseSuperExpression(is_new, CHECK_OK);
} else if (peek() == Token::PERIOD) {
return ParseNewTargetExpression(CHECK_OK);
} else {
- result = this->ParseMemberWithNewPrefixesExpression(classifier, CHECK_OK);
+ result = this->ParseMemberWithNewPrefixesExpression(classifier, is_async,
+ CHECK_OK);
}
- Traits::RewriteNonPattern(classifier, CHECK_OK);
+ impl()->RewriteNonPattern(classifier, CHECK_OK);
if (peek() == Token::LPAREN) {
// NewExpression with arguments.
Scanner::Location spread_pos;
@@ -2446,14 +3033,14 @@ ParserBase<Traits>::ParseMemberWithNewPrefixesExpression(
this->ParseArguments(&spread_pos, classifier, CHECK_OK);
if (spread_pos.IsValid()) {
- args = Traits::PrepareSpreadArguments(args);
- result = Traits::SpreadCallNew(result, args, new_pos);
+ args = impl()->PrepareSpreadArguments(args);
+ result = impl()->SpreadCallNew(result, args, new_pos);
} else {
result = factory()->NewCallNew(result, args, new_pos);
}
// The expression can still continue with . or [ after the arguments.
- result =
- this->ParseMemberExpressionContinuation(result, classifier, CHECK_OK);
+ result = this->ParseMemberExpressionContinuation(result, is_async,
+ classifier, CHECK_OK);
return result;
}
// NewExpression without arguments.
@@ -2461,14 +3048,12 @@ ParserBase<Traits>::ParseMemberWithNewPrefixesExpression(
new_pos);
}
// No 'new' or 'super' keyword.
- return this->ParseMemberExpression(classifier, ok);
+ return this->ParseMemberExpression(classifier, is_async, ok);
}
-
-template <class Traits>
-typename ParserBase<Traits>::ExpressionT
-ParserBase<Traits>::ParseMemberExpression(ExpressionClassifier* classifier,
- bool* ok) {
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseMemberExpression(
+ ExpressionClassifier* classifier, bool* is_async, bool* ok) {
// MemberExpression ::
// (PrimaryExpression | FunctionLiteral | ClassLiteral)
// ('[' Expression ']' | '.' Identifier | Arguments | TemplateLiteral)*
@@ -2478,7 +3063,7 @@ ParserBase<Traits>::ParseMemberExpression(ExpressionClassifier* classifier,
// caller.
// Parse the initial primary or function expression.
- ExpressionT result = this->EmptyExpression();
+ ExpressionT result;
if (peek() == Token::FUNCTION) {
BindingPatternUnexpectedToken(classifier);
ArrowFormalParametersUnexpectedToken(classifier);
@@ -2499,10 +3084,12 @@ ParserBase<Traits>::ParseMemberExpression(ExpressionClassifier* classifier,
return this->EmptyExpression();
}
- return this->FunctionSentExpression(scope_, factory(), pos);
+ return this->FunctionSentExpression(factory(), pos);
}
- bool is_generator = Check(Token::MUL);
+ FunctionKind function_kind = Check(Token::MUL)
+ ? FunctionKind::kGeneratorFunction
+ : FunctionKind::kNormalFunction;
IdentifierT name = this->EmptyIdentifier();
bool is_strict_reserved_name = false;
Scanner::Location function_name_location = Scanner::Location::invalid();
@@ -2510,52 +3097,48 @@ ParserBase<Traits>::ParseMemberExpression(ExpressionClassifier* classifier,
FunctionLiteral::kAnonymousExpression;
if (peek_any_identifier()) {
name = ParseIdentifierOrStrictReservedWord(
- is_generator, &is_strict_reserved_name, CHECK_OK);
+ function_kind, &is_strict_reserved_name, CHECK_OK);
function_name_location = scanner()->location();
function_type = FunctionLiteral::kNamedExpression;
}
- result = this->ParseFunctionLiteral(
+ result = impl()->ParseFunctionLiteral(
name, function_name_location,
is_strict_reserved_name ? kFunctionNameIsStrictReserved
: kFunctionNameValidityUnknown,
- is_generator ? FunctionKind::kGeneratorFunction
- : FunctionKind::kNormalFunction,
- function_token_position, function_type, language_mode(), CHECK_OK);
+ function_kind, function_token_position, function_type, language_mode(),
+ CHECK_OK);
} else if (peek() == Token::SUPER) {
const bool is_new = false;
- result = ParseSuperExpression(is_new, classifier, CHECK_OK);
+ result = ParseSuperExpression(is_new, CHECK_OK);
} else {
- result = ParsePrimaryExpression(classifier, CHECK_OK);
+ result = ParsePrimaryExpression(classifier, is_async, CHECK_OK);
}
- result = ParseMemberExpressionContinuation(result, classifier, CHECK_OK);
+ result =
+ ParseMemberExpressionContinuation(result, is_async, classifier, CHECK_OK);
return result;
}
-
-template <class Traits>
-typename ParserBase<Traits>::ExpressionT
-ParserBase<Traits>::ParseSuperExpression(bool is_new,
- ExpressionClassifier* classifier,
- bool* ok) {
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseSuperExpression(
+ bool is_new, bool* ok) {
Expect(Token::SUPER, CHECK_OK);
int pos = position();
- Scope* scope = scope_->ReceiverScope();
+ DeclarationScope* scope = GetReceiverScope();
FunctionKind kind = scope->function_kind();
if (IsConciseMethod(kind) || IsAccessorFunction(kind) ||
IsClassConstructor(kind)) {
if (peek() == Token::PERIOD || peek() == Token::LBRACK) {
scope->RecordSuperPropertyUsage();
- return this->SuperPropertyReference(scope_, factory(), pos);
+ return this->NewSuperPropertyReference(factory(), pos);
}
// new super() is never allowed.
// super() is only allowed in derived constructor
if (!is_new && peek() == Token::LPAREN && IsSubclassConstructor(kind)) {
// TODO(rossberg): This might not be the correct FunctionState for the
// method here.
- function_state_->set_super_location(scanner()->location());
- return this->SuperCallReference(scope_, factory(), pos);
+ return this->NewSuperCallReference(factory(), pos);
}
}
@@ -2564,13 +3147,12 @@ ParserBase<Traits>::ParseSuperExpression(bool is_new,
return this->EmptyExpression();
}
-template <class Traits>
-void ParserBase<Traits>::ExpectMetaProperty(Vector<const char> property_name,
- const char* full_name, int pos,
- bool* ok) {
+template <typename Impl>
+void ParserBase<Impl>::ExpectMetaProperty(Vector<const char> property_name,
+ const char* full_name, int pos,
+ bool* ok) {
Consume(Token::PERIOD);
- ExpectContextualKeyword(property_name, ok);
- if (!*ok) return;
+ ExpectContextualKeyword(property_name, CHECK_OK_CUSTOM(Void));
if (scanner()->literal_contains_escapes()) {
Traits::ReportMessageAt(
Scanner::Location(pos, scanner()->location().end_pos),
@@ -2579,40 +3161,41 @@ void ParserBase<Traits>::ExpectMetaProperty(Vector<const char> property_name,
}
}
-template <class Traits>
-typename ParserBase<Traits>::ExpressionT
-ParserBase<Traits>::ParseNewTargetExpression(bool* ok) {
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseNewTargetExpression(bool* ok) {
int pos = position();
ExpectMetaProperty(CStrVector("target"), "new.target", pos, CHECK_OK);
- if (!scope_->ReceiverScope()->is_function_scope()) {
+ if (!GetReceiverScope()->is_function_scope()) {
ReportMessageAt(scanner()->location(),
MessageTemplate::kUnexpectedNewTarget);
*ok = false;
return this->EmptyExpression();
}
- return this->NewTargetExpression(scope_, factory(), pos);
+ return this->NewTargetExpression(pos);
}
-
-template <class Traits>
-typename ParserBase<Traits>::ExpressionT
-ParserBase<Traits>::ParseMemberExpressionContinuation(
- ExpressionT expression, ExpressionClassifier* classifier, bool* ok) {
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseMemberExpressionContinuation(
+ ExpressionT expression, bool* is_async, ExpressionClassifier* classifier,
+ bool* ok) {
// Parses this part of MemberExpression:
// ('[' Expression ']' | '.' Identifier | TemplateLiteral)*
while (true) {
switch (peek()) {
case Token::LBRACK: {
- Traits::RewriteNonPattern(classifier, CHECK_OK);
+ *is_async = false;
+ impl()->RewriteNonPattern(classifier, CHECK_OK);
BindingPatternUnexpectedToken(classifier);
ArrowFormalParametersUnexpectedToken(classifier);
Consume(Token::LBRACK);
int pos = position();
ExpressionT index = this->ParseExpression(true, classifier, CHECK_OK);
- Traits::RewriteNonPattern(classifier, CHECK_OK);
+ impl()->RewriteNonPattern(classifier, CHECK_OK);
expression = factory()->NewProperty(expression, index, pos);
if (fni_ != NULL) {
this->PushPropertyName(fni_, index);
@@ -2621,7 +3204,8 @@ ParserBase<Traits>::ParseMemberExpressionContinuation(
break;
}
case Token::PERIOD: {
- Traits::RewriteNonPattern(classifier, CHECK_OK);
+ *is_async = false;
+ impl()->RewriteNonPattern(classifier, CHECK_OK);
BindingPatternUnexpectedToken(classifier);
ArrowFormalParametersUnexpectedToken(classifier);
@@ -2637,7 +3221,8 @@ ParserBase<Traits>::ParseMemberExpressionContinuation(
}
case Token::TEMPLATE_SPAN:
case Token::TEMPLATE_TAIL: {
- Traits::RewriteNonPattern(classifier, CHECK_OK);
+ *is_async = false;
+ impl()->RewriteNonPattern(classifier, CHECK_OK);
BindingPatternUnexpectedToken(classifier);
ArrowFormalParametersUnexpectedToken(classifier);
int pos;
@@ -2668,88 +3253,83 @@ ParserBase<Traits>::ParseMemberExpressionContinuation(
return this->EmptyExpression();
}
-
-template <class Traits>
-void ParserBase<Traits>::ParseFormalParameter(
- FormalParametersT* parameters, ExpressionClassifier* classifier, bool* ok) {
+template <typename Impl>
+void ParserBase<Impl>::ParseFormalParameter(FormalParametersT* parameters,
+ ExpressionClassifier* classifier,
+ bool* ok) {
// FormalParameter[Yield,GeneratorParameter] :
// BindingElement[?Yield, ?GeneratorParameter]
bool is_rest = parameters->has_rest;
- ExpressionT pattern = ParsePrimaryExpression(classifier, ok);
- if (!*ok) return;
-
- ValidateBindingPattern(classifier, ok);
- if (!*ok) return;
+ ExpressionT pattern =
+ ParsePrimaryExpression(classifier, CHECK_OK_CUSTOM(Void));
+ ValidateBindingPattern(classifier, CHECK_OK_CUSTOM(Void));
if (!Traits::IsIdentifier(pattern)) {
parameters->is_simple = false;
- ValidateFormalParameterInitializer(classifier, ok);
- if (!*ok) return;
+ ValidateFormalParameterInitializer(classifier, CHECK_OK_CUSTOM(Void));
classifier->RecordNonSimpleParameter();
}
ExpressionT initializer = Traits::EmptyExpression();
if (!is_rest && Check(Token::ASSIGN)) {
ExpressionClassifier init_classifier(this);
- initializer = ParseAssignmentExpression(true, &init_classifier, ok);
- if (!*ok) return;
- Traits::RewriteNonPattern(&init_classifier, ok);
- ValidateFormalParameterInitializer(&init_classifier, ok);
- if (!*ok) return;
+ initializer = ParseAssignmentExpression(true, &init_classifier,
+ CHECK_OK_CUSTOM(Void));
+ impl()->RewriteNonPattern(&init_classifier, CHECK_OK_CUSTOM(Void));
+ ValidateFormalParameterInitializer(&init_classifier, CHECK_OK_CUSTOM(Void));
parameters->is_simple = false;
init_classifier.Discard();
classifier->RecordNonSimpleParameter();
- if (allow_harmony_function_name()) {
- Traits::SetFunctionNameFromIdentifierRef(initializer, pattern);
- }
+ Traits::SetFunctionNameFromIdentifierRef(initializer, pattern);
}
Traits::AddFormalParameter(parameters, pattern, initializer,
scanner()->location().end_pos, is_rest);
}
-
-template <class Traits>
-void ParserBase<Traits>::ParseFormalParameterList(
+template <typename Impl>
+void ParserBase<Impl>::ParseFormalParameterList(
FormalParametersT* parameters, ExpressionClassifier* classifier, bool* ok) {
- // FormalParameters[Yield,GeneratorParameter] :
+ // FormalParameters[Yield] :
// [empty]
- // FormalParameterList[?Yield, ?GeneratorParameter]
- //
- // FormalParameterList[Yield,GeneratorParameter] :
// FunctionRestParameter[?Yield]
- // FormalsList[?Yield, ?GeneratorParameter]
- // FormalsList[?Yield, ?GeneratorParameter] , FunctionRestParameter[?Yield]
+ // FormalParameterList[?Yield]
+ // FormalParameterList[?Yield] ,
+ // FormalParameterList[?Yield] , FunctionRestParameter[?Yield]
//
- // FormalsList[Yield,GeneratorParameter] :
- // FormalParameter[?Yield, ?GeneratorParameter]
- // FormalsList[?Yield, ?GeneratorParameter] ,
- // FormalParameter[?Yield,?GeneratorParameter]
+ // FormalParameterList[Yield] :
+ // FormalParameter[?Yield]
+ // FormalParameterList[?Yield] , FormalParameter[?Yield]
DCHECK_EQ(0, parameters->Arity());
if (peek() != Token::RPAREN) {
- do {
+ while (true) {
if (parameters->Arity() > Code::kMaxArguments) {
ReportMessage(MessageTemplate::kTooManyParameters);
*ok = false;
return;
}
parameters->has_rest = Check(Token::ELLIPSIS);
- ParseFormalParameter(parameters, classifier, ok);
- if (!*ok) return;
- } while (!parameters->has_rest && Check(Token::COMMA));
+ ParseFormalParameter(parameters, classifier, CHECK_OK_CUSTOM(Void));
- if (parameters->has_rest) {
- parameters->is_simple = false;
- classifier->RecordNonSimpleParameter();
- if (peek() == Token::COMMA) {
- ReportMessageAt(scanner()->peek_location(),
- MessageTemplate::kParamAfterRest);
- *ok = false;
- return;
+ if (parameters->has_rest) {
+ parameters->is_simple = false;
+ classifier->RecordNonSimpleParameter();
+ if (peek() == Token::COMMA) {
+ ReportMessageAt(scanner()->peek_location(),
+ MessageTemplate::kParamAfterRest);
+ *ok = false;
+ return;
+ }
+ break;
+ }
+ if (!Check(Token::COMMA)) break;
+ if (allow_harmony_trailing_commas() && peek() == Token::RPAREN) {
+ // allow the trailing comma
+ break;
}
}
}
@@ -2760,12 +3340,12 @@ void ParserBase<Traits>::ParseFormalParameterList(
}
}
-template <class Traits>
-void ParserBase<Traits>::CheckArityRestrictions(int param_count,
- FunctionKind function_kind,
- bool has_rest,
- int formals_start_pos,
- int formals_end_pos, bool* ok) {
+template <typename Impl>
+void ParserBase<Impl>::CheckArityRestrictions(int param_count,
+ FunctionKind function_kind,
+ bool has_rest,
+ int formals_start_pos,
+ int formals_end_pos, bool* ok) {
if (IsGetterFunction(function_kind)) {
if (param_count != 0) {
ReportMessageAt(Scanner::Location(formals_start_pos, formals_end_pos),
@@ -2786,32 +3366,53 @@ void ParserBase<Traits>::CheckArityRestrictions(int param_count,
}
}
-
-template <class Traits>
-bool ParserBase<Traits>::IsNextLetKeyword() {
+template <typename Impl>
+bool ParserBase<Impl>::IsNextLetKeyword() {
DCHECK(peek() == Token::LET);
- if (!allow_let()) {
- return false;
- }
Token::Value next_next = PeekAhead();
switch (next_next) {
case Token::LBRACE:
case Token::LBRACK:
case Token::IDENTIFIER:
case Token::STATIC:
- case Token::LET: // Yes, you can do let let = ... in sloppy mode
+ case Token::LET: // `let let;` is disallowed by static semantics, but the
+ // token must be first interpreted as a keyword in order
+ // for those semantics to apply. This ensures that ASI is
+ // not honored when a LineTerminator separates the
+ // tokens.
case Token::YIELD:
+ case Token::AWAIT:
+ case Token::ASYNC:
return true;
+ case Token::FUTURE_STRICT_RESERVED_WORD:
+ return is_sloppy(language_mode());
default:
return false;
}
}
+template <typename Impl>
+bool ParserBase<Impl>::IsTrivialExpression() {
+ Token::Value peek_token = peek();
+ if (peek_token == Token::SMI || peek_token == Token::NUMBER ||
+ peek_token == Token::NULL_LITERAL || peek_token == Token::TRUE_LITERAL ||
+ peek_token == Token::FALSE_LITERAL || peek_token == Token::STRING ||
+ peek_token == Token::IDENTIFIER || peek_token == Token::THIS) {
+ // PeekAhead() is expensive & may not always be called, so we only call it
+ // after checking peek().
+ Token::Value peek_ahead = PeekAhead();
+ if (peek_ahead == Token::COMMA || peek_ahead == Token::RPAREN ||
+ peek_ahead == Token::SEMICOLON || peek_ahead == Token::RBRACK) {
+ return true;
+ }
+ }
+ return false;
+}
-template <class Traits>
-typename ParserBase<Traits>::ExpressionT
-ParserBase<Traits>::ParseArrowFunctionLiteral(
- bool accept_IN, const FormalParametersT& formal_parameters,
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseArrowFunctionLiteral(
+ bool accept_IN, const FormalParametersT& formal_parameters, bool is_async,
const ExpressionClassifier& formals_classifier, bool* ok) {
if (peek() == Token::ARROW && scanner_->HasAnyLineTerminatorBeforeNext()) {
// ASI inserts `;` after arrow parameters if a line terminator is found.
@@ -2826,13 +3427,11 @@ ParserBase<Traits>::ParseArrowFunctionLiteral(
int num_parameters = formal_parameters.scope->num_parameters();
int materialized_literal_count = -1;
int expected_property_count = -1;
- Scanner::Location super_loc;
+ FunctionKind arrow_kind = is_async ? kAsyncArrowFunction : kArrowFunction;
{
- typename Traits::Type::Factory function_factory(ast_value_factory());
- FunctionState function_state(&function_state_, &scope_,
- formal_parameters.scope, kArrowFunction,
- &function_factory);
+ FunctionState function_state(&function_state_, &scope_state_,
+ formal_parameters.scope, arrow_kind);
function_state.SkipMaterializedLiterals(
formal_parameters.materialized_literals_count);
@@ -2844,20 +3443,21 @@ ParserBase<Traits>::ParseArrowFunctionLiteral(
if (peek() == Token::LBRACE) {
// Multiple statement body
Consume(Token::LBRACE);
- bool is_lazily_parsed =
- (mode() == PARSE_LAZILY && scope_->AllowsLazyParsing());
+ DCHECK_EQ(scope(), formal_parameters.scope);
+ bool is_lazily_parsed = (mode() == PARSE_LAZILY &&
+ formal_parameters.scope->AllowsLazyParsing());
if (is_lazily_parsed) {
body = this->NewStatementList(0, zone());
- this->SkipLazyFunctionBody(&materialized_literal_count,
- &expected_property_count, CHECK_OK);
+ impl()->SkipLazyFunctionBody(&materialized_literal_count,
+ &expected_property_count, CHECK_OK);
if (formal_parameters.materialized_literals_count > 0) {
materialized_literal_count +=
formal_parameters.materialized_literals_count;
}
} else {
- body = this->ParseEagerFunctionBody(
- this->EmptyIdentifier(), RelocInfo::kNoPosition, formal_parameters,
- kArrowFunction, FunctionLiteral::kAnonymousExpression, CHECK_OK);
+ body = impl()->ParseEagerFunctionBody(
+ this->EmptyIdentifier(), kNoSourcePosition, formal_parameters,
+ arrow_kind, FunctionLiteral::kAnonymousExpression, CHECK_OK);
materialized_literal_count =
function_state.materialized_literal_count();
expected_property_count = function_state.expected_property_count();
@@ -2865,22 +3465,32 @@ ParserBase<Traits>::ParseArrowFunctionLiteral(
} else {
// Single-expression body
int pos = position();
- parenthesized_function_ = false;
- ExpressionClassifier classifier(this);
- ExpressionT expression =
- ParseAssignmentExpression(accept_IN, &classifier, CHECK_OK);
- Traits::RewriteNonPattern(&classifier, CHECK_OK);
+ DCHECK(ReturnExprContext::kInsideValidBlock ==
+ function_state_->return_expr_context());
+ ReturnExprScope allow_tail_calls(
+ function_state_, ReturnExprContext::kInsideValidReturnStatement);
body = this->NewStatementList(1, zone());
- this->AddParameterInitializationBlock(formal_parameters, body, CHECK_OK);
- body->Add(factory()->NewReturnStatement(expression, pos), zone());
+ this->AddParameterInitializationBlock(formal_parameters, body, is_async,
+ CHECK_OK);
+ ExpressionClassifier classifier(this);
+ if (is_async) {
+ impl()->ParseAsyncArrowSingleExpressionBody(body, accept_IN,
+ &classifier, pos, CHECK_OK);
+ impl()->RewriteNonPattern(&classifier, CHECK_OK);
+ } else {
+ ExpressionT expression =
+ ParseAssignmentExpression(accept_IN, &classifier, CHECK_OK);
+ impl()->RewriteNonPattern(&classifier, CHECK_OK);
+ body->Add(factory()->NewReturnStatement(expression, pos), zone());
+ if (allow_tailcalls() && !is_sloppy(language_mode())) {
+ // ES6 14.6.1 Static Semantics: IsInTailPosition
+ impl()->MarkTailPosition(expression);
+ }
+ }
materialized_literal_count = function_state.materialized_literal_count();
expected_property_count = function_state.expected_property_count();
- // ES6 14.6.1 Static Semantics: IsInTailPosition
- if (allow_tailcalls() && !is_sloppy(language_mode())) {
- this->MarkTailPosition(expression);
- }
+ impl()->MarkCollectedTailCallExpressions();
}
- super_loc = function_state.super_location();
formal_parameters.scope->set_end_position(scanner()->location().end_pos);
@@ -2897,11 +3507,9 @@ ParserBase<Traits>::ParseArrowFunctionLiteral(
CheckStrictOctalLiteral(formal_parameters.scope->start_position(),
scanner()->location().end_pos, CHECK_OK);
}
- if (is_strict(language_mode()) || allow_harmony_sloppy()) {
- this->CheckConflictingVarDeclarations(formal_parameters.scope, CHECK_OK);
- }
+ impl()->CheckConflictingVarDeclarations(formal_parameters.scope, CHECK_OK);
- Traits::RewriteDestructuringAssignments();
+ impl()->RewriteDestructuringAssignments();
}
FunctionLiteralT function_literal = factory()->NewFunctionLiteral(
@@ -2909,24 +3517,20 @@ ParserBase<Traits>::ParseArrowFunctionLiteral(
materialized_literal_count, expected_property_count, num_parameters,
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::kAnonymousExpression,
- FunctionLiteral::kShouldLazyCompile, FunctionKind::kArrowFunction,
+ FunctionLiteral::kShouldLazyCompile, arrow_kind,
formal_parameters.scope->start_position());
function_literal->set_function_token_position(
formal_parameters.scope->start_position());
- if (super_loc.IsValid()) function_state_->set_super_location(super_loc);
if (fni_ != NULL) this->InferFunctionName(fni_, function_literal);
return function_literal;
}
-
-template <typename Traits>
-typename ParserBase<Traits>::ExpressionT
-ParserBase<Traits>::ParseTemplateLiteral(ExpressionT tag, int start,
- ExpressionClassifier* classifier,
- bool* ok) {
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseTemplateLiteral(
+ ExpressionT tag, int start, ExpressionClassifier* classifier, bool* ok) {
// A TemplateLiteral is made up of 0 or more TEMPLATE_SPAN tokens (literal
// text followed by a substitution expression), finalized by a single
// TEMPLATE_TAIL.
@@ -2946,15 +3550,15 @@ ParserBase<Traits>::ParseTemplateLiteral(ExpressionT tag, int start,
Consume(Token::TEMPLATE_TAIL);
int pos = position();
CheckTemplateOctalLiteral(pos, peek_position(), CHECK_OK);
- typename Traits::TemplateLiteralState ts = Traits::OpenTemplateLiteral(pos);
- Traits::AddTemplateSpan(&ts, true);
- return Traits::CloseTemplateLiteral(&ts, start, tag);
+ typename Impl::TemplateLiteralState ts = impl()->OpenTemplateLiteral(pos);
+ impl()->AddTemplateSpan(&ts, true);
+ return impl()->CloseTemplateLiteral(&ts, start, tag);
}
Consume(Token::TEMPLATE_SPAN);
int pos = position();
- typename Traits::TemplateLiteralState ts = Traits::OpenTemplateLiteral(pos);
- Traits::AddTemplateSpan(&ts, false);
+ typename Impl::TemplateLiteralState ts = impl()->OpenTemplateLiteral(pos);
+ impl()->AddTemplateSpan(&ts, false);
Token::Value next;
// If we open with a TEMPLATE_SPAN, we must scan the subsequent expression,
@@ -2979,8 +3583,9 @@ ParserBase<Traits>::ParseTemplateLiteral(ExpressionT tag, int start,
int expr_pos = peek_position();
ExpressionT expression = this->ParseExpression(true, classifier, CHECK_OK);
- Traits::RewriteNonPattern(classifier, CHECK_OK);
- Traits::AddTemplateExpression(&ts, expression);
+ CheckNoTailCallExpressions(classifier, CHECK_OK);
+ impl()->RewriteNonPattern(classifier, CHECK_OK);
+ impl()->AddTemplateExpression(&ts, expression);
if (peek() != Token::RBRACE) {
ReportMessageAt(Scanner::Location(expr_pos, peek_position()),
@@ -3008,29 +3613,27 @@ ParserBase<Traits>::ParseTemplateLiteral(ExpressionT tag, int start,
return Traits::EmptyExpression();
}
- Traits::AddTemplateSpan(&ts, next == Token::TEMPLATE_TAIL);
+ impl()->AddTemplateSpan(&ts, next == Token::TEMPLATE_TAIL);
} while (next == Token::TEMPLATE_SPAN);
DCHECK_EQ(next, Token::TEMPLATE_TAIL);
CheckTemplateOctalLiteral(pos, peek_position(), CHECK_OK);
// Once we've reached a TEMPLATE_TAIL, we can close the TemplateLiteral.
- return Traits::CloseTemplateLiteral(&ts, start, tag);
+ return impl()->CloseTemplateLiteral(&ts, start, tag);
}
-
-template <typename Traits>
-typename ParserBase<Traits>::ExpressionT
-ParserBase<Traits>::CheckAndRewriteReferenceExpression(
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::CheckAndRewriteReferenceExpression(
ExpressionT expression, int beg_pos, int end_pos,
MessageTemplate::Template message, bool* ok) {
return this->CheckAndRewriteReferenceExpression(expression, beg_pos, end_pos,
message, kReferenceError, ok);
}
-
-template <typename Traits>
-typename ParserBase<Traits>::ExpressionT
-ParserBase<Traits>::CheckAndRewriteReferenceExpression(
+template <typename Impl>
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::CheckAndRewriteReferenceExpression(
ExpressionT expression, int beg_pos, int end_pos,
MessageTemplate::Template message, ParseErrorType type, bool* ok) {
if (this->IsIdentifier(expression) && is_strict(language_mode()) &&
@@ -3054,15 +3657,13 @@ ParserBase<Traits>::CheckAndRewriteReferenceExpression(
return this->EmptyExpression();
}
-
-template <typename Traits>
-bool ParserBase<Traits>::IsValidReferenceExpression(ExpressionT expression) {
+template <typename Impl>
+bool ParserBase<Impl>::IsValidReferenceExpression(ExpressionT expression) {
return this->IsAssignableIdentifier(expression) || expression->IsProperty();
}
-
-template <typename Traits>
-void ParserBase<Traits>::CheckDestructuringElement(
+template <typename Impl>
+void ParserBase<Impl>::CheckDestructuringElement(
ExpressionT expression, ExpressionClassifier* classifier, int begin,
int end) {
if (!IsValidPattern(expression) && !expression->IsAssignment() &&
@@ -3077,47 +3678,47 @@ void ParserBase<Traits>::CheckDestructuringElement(
#undef CHECK_OK
#undef CHECK_OK_CUSTOM
-
-template <typename Traits>
-void ParserBase<Traits>::ObjectLiteralChecker::CheckProperty(
- Token::Value property, PropertyKind type, bool is_static, bool is_generator,
- bool* ok) {
- DCHECK(!is_static);
- DCHECK(!is_generator || type == kMethodProperty);
+template <typename Impl>
+void ParserBase<Impl>::ObjectLiteralChecker::CheckProperty(
+ Token::Value property, PropertyKind type, MethodKind method_type,
+ ExpressionClassifier* classifier, bool* ok) {
+ DCHECK(!IsStaticMethod(method_type));
+ DCHECK(!IsSpecialMethod(method_type) || type == kMethodProperty);
if (property == Token::SMI || property == Token::NUMBER) return;
if (type == kValueProperty && IsProto()) {
if (has_seen_proto_) {
- this->parser()->ReportMessage(MessageTemplate::kDuplicateProto);
- *ok = false;
+ classifier->RecordObjectLiteralError(
+ this->scanner()->location(), MessageTemplate::kDuplicateProto);
return;
}
has_seen_proto_ = true;
- return;
}
}
-
-template <typename Traits>
-void ParserBase<Traits>::ClassLiteralChecker::CheckProperty(
- Token::Value property, PropertyKind type, bool is_static, bool is_generator,
- bool* ok) {
+template <typename Impl>
+void ParserBase<Impl>::ClassLiteralChecker::CheckProperty(
+ Token::Value property, PropertyKind type, MethodKind method_type,
+ ExpressionClassifier* classifier, bool* ok) {
DCHECK(type == kMethodProperty || type == kAccessorProperty);
if (property == Token::SMI || property == Token::NUMBER) return;
- if (is_static) {
+ if (IsStaticMethod(method_type)) {
if (IsPrototype()) {
this->parser()->ReportMessage(MessageTemplate::kStaticPrototype);
*ok = false;
return;
}
} else if (IsConstructor()) {
- if (is_generator || type == kAccessorProperty) {
+ const bool is_generator = IsGeneratorMethod(method_type);
+ const bool is_async = IsAsyncMethod(method_type);
+ if (is_generator || is_async || type == kAccessorProperty) {
MessageTemplate::Template msg =
is_generator ? MessageTemplate::kConstructorIsGenerator
- : MessageTemplate::kConstructorIsAccessor;
+ : is_async ? MessageTemplate::kConstructorIsAsync
+ : MessageTemplate::kConstructorIsAccessor;
this->parser()->ReportMessage(msg);
*ok = false;
return;
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index c9897cdd92..25571470dc 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -4,21 +4,19 @@
#include "src/parsing/parser.h"
+#include <memory>
+
#include "src/api.h"
-#include "src/ast/ast.h"
#include "src/ast/ast-expression-rewriter.h"
-#include "src/ast/ast-expression-visitor.h"
#include "src/ast/ast-literal-reindexer.h"
-#include "src/ast/scopeinfo.h"
+#include "src/ast/ast-traversal-visitor.h"
+#include "src/ast/ast.h"
#include "src/bailout-reason.h"
#include "src/base/platform/platform.h"
-#include "src/bootstrapper.h"
#include "src/char-predicates-inl.h"
-#include "src/codegen.h"
-#include "src/compiler.h"
#include "src/messages.h"
#include "src/parsing/parameter-initializer-rewriter.h"
-#include "src/parsing/parser-base.h"
+#include "src/parsing/parse-info.h"
#include "src/parsing/rewriter.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/runtime/runtime.h"
@@ -39,64 +37,6 @@ ScriptData::ScriptData(const byte* data, int length)
}
}
-
-ParseInfo::ParseInfo(Zone* zone)
- : zone_(zone),
- flags_(0),
- source_stream_(nullptr),
- source_stream_encoding_(ScriptCompiler::StreamedSource::ONE_BYTE),
- extension_(nullptr),
- compile_options_(ScriptCompiler::kNoCompileOptions),
- script_scope_(nullptr),
- unicode_cache_(nullptr),
- stack_limit_(0),
- hash_seed_(0),
- cached_data_(nullptr),
- ast_value_factory_(nullptr),
- literal_(nullptr),
- scope_(nullptr) {}
-
-
-ParseInfo::ParseInfo(Zone* zone, Handle<JSFunction> function)
- : ParseInfo(zone, Handle<SharedFunctionInfo>(function->shared())) {
- set_closure(function);
- set_context(Handle<Context>(function->context()));
-}
-
-
-ParseInfo::ParseInfo(Zone* zone, Handle<SharedFunctionInfo> shared)
- : ParseInfo(zone) {
- isolate_ = shared->GetIsolate();
-
- set_lazy();
- set_hash_seed(isolate_->heap()->HashSeed());
- set_stack_limit(isolate_->stack_guard()->real_climit());
- set_unicode_cache(isolate_->unicode_cache());
- set_language_mode(shared->language_mode());
- set_shared_info(shared);
-
- Handle<Script> script(Script::cast(shared->script()));
- set_script(script);
- if (!script.is_null() && script->type() == Script::TYPE_NATIVE) {
- set_native();
- }
-}
-
-
-ParseInfo::ParseInfo(Zone* zone, Handle<Script> script) : ParseInfo(zone) {
- isolate_ = script->GetIsolate();
-
- set_hash_seed(isolate_->heap()->HashSeed());
- set_stack_limit(isolate_->stack_guard()->real_climit());
- set_unicode_cache(isolate_->unicode_cache());
- set_script(script);
-
- if (script->type() == Script::TYPE_NATIVE) {
- set_native();
- }
-}
-
-
FunctionEntry ParseData::GetFunctionEntry(int start) {
// The current pre-data entry must be a FunctionEntry with the given
// start position.
@@ -168,6 +108,35 @@ int ParseData::FunctionsSize() {
return static_cast<int>(Data()[PreparseDataConstants::kFunctionsSizeOffset]);
}
+// Helper for putting parts of the parse results into a temporary zone when
+// parsing inner function bodies.
+class DiscardableZoneScope {
+ public:
+ DiscardableZoneScope(Parser* parser, Zone* temp_zone, bool use_temp_zone)
+ : ast_node_factory_scope_(parser->factory(), temp_zone, use_temp_zone),
+ fni_(parser->ast_value_factory_, temp_zone),
+ parser_(parser),
+ prev_fni_(parser->fni_),
+ prev_zone_(parser->zone_) {
+ if (use_temp_zone) {
+ parser_->fni_ = &fni_;
+ parser_->zone_ = temp_zone;
+ }
+ }
+ ~DiscardableZoneScope() {
+ parser_->fni_ = prev_fni_;
+ parser_->zone_ = prev_zone_;
+ }
+
+ private:
+ AstNodeFactory::BodyScope ast_node_factory_scope_;
+ FuncNameInferrer fni_;
+ Parser* parser_;
+ FuncNameInferrer* prev_fni_;
+ Zone* prev_zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(DiscardableZoneScope);
+};
void Parser::SetCachedData(ParseInfo* info) {
if (compile_options_ == ScriptCompiler::kNoCompileOptions) {
@@ -181,8 +150,8 @@ void Parser::SetCachedData(ParseInfo* info) {
}
FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
- bool call_super, Scope* scope,
- int pos, int end_pos,
+ bool call_super, int pos,
+ int end_pos,
LanguageMode language_mode) {
int materialized_literal_count = -1;
int expected_property_count = -1;
@@ -191,7 +160,7 @@ FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
FunctionKind kind = call_super ? FunctionKind::kDefaultSubclassConstructor
: FunctionKind::kDefaultBaseConstructor;
- Scope* function_scope = NewScope(scope, FUNCTION_SCOPE, kind);
+ DeclarationScope* function_scope = NewFunctionScope(kind);
SetLanguageMode(function_scope,
static_cast<LanguageMode>(language_mode | STRICT));
// Set start and end position to the same value
@@ -200,32 +169,40 @@ FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
ZoneList<Statement*>* body = NULL;
{
- AstNodeFactory function_factory(ast_value_factory());
- FunctionState function_state(&function_state_, &scope_, function_scope,
- kind, &function_factory);
+ FunctionState function_state(&function_state_, &scope_state_,
+ function_scope, kind);
body = new (zone()) ZoneList<Statement*>(call_super ? 2 : 1, zone());
if (call_super) {
// $super_constructor = %_GetSuperConstructor(<this-function>)
- // %reflect_construct($super_constructor, arguments, new.target)
+ // %reflect_construct(
+ // $super_constructor, InternalArray(...args), new.target)
+ auto constructor_args_name = ast_value_factory()->empty_string();
+ bool is_duplicate;
+ bool is_rest = true;
+ bool is_optional = false;
+ Variable* constructor_args = function_scope->DeclareParameter(
+ constructor_args_name, TEMPORARY, is_optional, is_rest, &is_duplicate,
+ ast_value_factory());
+
ZoneList<Expression*>* args =
new (zone()) ZoneList<Expression*>(2, zone());
- VariableProxy* this_function_proxy = scope_->NewUnresolved(
- factory(), ast_value_factory()->this_function_string(),
- Variable::NORMAL, pos);
+ VariableProxy* this_function_proxy =
+ NewUnresolved(ast_value_factory()->this_function_string(), pos);
ZoneList<Expression*>* tmp =
new (zone()) ZoneList<Expression*>(1, zone());
tmp->Add(this_function_proxy, zone());
Expression* super_constructor = factory()->NewCallRuntime(
Runtime::kInlineGetSuperConstructor, tmp, pos);
args->Add(super_constructor, zone());
- VariableProxy* arguments_proxy = scope_->NewUnresolved(
- factory(), ast_value_factory()->arguments_string(), Variable::NORMAL,
- pos);
- args->Add(arguments_proxy, zone());
- VariableProxy* new_target_proxy = scope_->NewUnresolved(
- factory(), ast_value_factory()->new_target_string(), Variable::NORMAL,
- pos);
+ Spread* spread_args = factory()->NewSpread(
+ factory()->NewVariableProxy(constructor_args), pos, pos);
+ ZoneList<Expression*>* spread_args_expr =
+ new (zone()) ZoneList<Expression*>(1, zone());
+ spread_args_expr->Add(spread_args, zone());
+ args->AddAll(*PrepareSpreadArguments(spread_args_expr), zone());
+ VariableProxy* new_target_proxy =
+ NewUnresolved(ast_value_factory()->new_target_string(), pos);
args->Add(new_target_proxy, zone());
CallRuntime* call = factory()->NewCallRuntime(
Context::REFLECT_CONSTRUCT_INDEX, args, pos);
@@ -299,14 +276,20 @@ class TargetScope BASE_EMBEDDED {
// thus it must never be used where only a single statement
// is correct (e.g. an if statement branch w/o braces)!
-#define CHECK_OK ok); \
- if (!*ok) return NULL; \
+#define CHECK_OK ok); \
+ if (!*ok) return nullptr; \
((void)0
#define DUMMY ) // to make indentation work
#undef DUMMY
-#define CHECK_FAILED /**/); \
- if (failed_) return NULL; \
+#define CHECK_OK_VOID ok); \
+ if (!*ok) return; \
+ ((void)0
+#define DUMMY ) // to make indentation work
+#undef DUMMY
+
+#define CHECK_FAILED /**/); \
+ if (failed_) return nullptr; \
((void)0
#define DUMMY ) // to make indentation work
#undef DUMMY
@@ -314,77 +297,74 @@ class TargetScope BASE_EMBEDDED {
// ----------------------------------------------------------------------------
// Implementation of Parser
-bool ParserTraits::IsEval(const AstRawString* identifier) const {
- return identifier == parser_->ast_value_factory()->eval_string();
+bool ParserBaseTraits<Parser>::IsEval(const AstRawString* identifier) const {
+ return identifier == delegate()->ast_value_factory()->eval_string();
}
-
-bool ParserTraits::IsArguments(const AstRawString* identifier) const {
- return identifier == parser_->ast_value_factory()->arguments_string();
+bool ParserBaseTraits<Parser>::IsArguments(
+ const AstRawString* identifier) const {
+ return identifier == delegate()->ast_value_factory()->arguments_string();
}
-
-bool ParserTraits::IsEvalOrArguments(const AstRawString* identifier) const {
+bool ParserBaseTraits<Parser>::IsEvalOrArguments(
+ const AstRawString* identifier) const {
return IsEval(identifier) || IsArguments(identifier);
}
-bool ParserTraits::IsUndefined(const AstRawString* identifier) const {
- return identifier == parser_->ast_value_factory()->undefined_string();
+bool ParserBaseTraits<Parser>::IsUndefined(
+ const AstRawString* identifier) const {
+ return identifier == delegate()->ast_value_factory()->undefined_string();
}
-bool ParserTraits::IsPrototype(const AstRawString* identifier) const {
- return identifier == parser_->ast_value_factory()->prototype_string();
+bool ParserBaseTraits<Parser>::IsPrototype(
+ const AstRawString* identifier) const {
+ return identifier == delegate()->ast_value_factory()->prototype_string();
}
-
-bool ParserTraits::IsConstructor(const AstRawString* identifier) const {
- return identifier == parser_->ast_value_factory()->constructor_string();
+bool ParserBaseTraits<Parser>::IsConstructor(
+ const AstRawString* identifier) const {
+ return identifier == delegate()->ast_value_factory()->constructor_string();
}
-
-bool ParserTraits::IsThisProperty(Expression* expression) {
+bool ParserBaseTraits<Parser>::IsThisProperty(Expression* expression) {
DCHECK(expression != NULL);
Property* property = expression->AsProperty();
return property != NULL && property->obj()->IsVariableProxy() &&
property->obj()->AsVariableProxy()->is_this();
}
-
-bool ParserTraits::IsIdentifier(Expression* expression) {
+bool ParserBaseTraits<Parser>::IsIdentifier(Expression* expression) {
VariableProxy* operand = expression->AsVariableProxy();
return operand != NULL && !operand->is_this();
}
-
-void ParserTraits::PushPropertyName(FuncNameInferrer* fni,
- Expression* expression) {
+void ParserBaseTraits<Parser>::PushPropertyName(FuncNameInferrer* fni,
+ Expression* expression) {
if (expression->IsPropertyName()) {
fni->PushLiteralName(expression->AsLiteral()->AsRawPropertyName());
} else {
fni->PushLiteralName(
- parser_->ast_value_factory()->anonymous_function_string());
+ delegate()->ast_value_factory()->anonymous_function_string());
}
}
-
-void ParserTraits::CheckAssigningFunctionLiteralToProperty(Expression* left,
- Expression* right) {
+void ParserBaseTraits<Parser>::CheckAssigningFunctionLiteralToProperty(
+ Expression* left, Expression* right) {
DCHECK(left != NULL);
if (left->IsProperty() && right->IsFunctionLiteral()) {
right->AsFunctionLiteral()->set_pretenure();
}
}
-
-Expression* ParserTraits::MarkExpressionAsAssigned(Expression* expression) {
+Expression* ParserBaseTraits<Parser>::MarkExpressionAsAssigned(
+ Expression* expression) {
VariableProxy* proxy =
expression != NULL ? expression->AsVariableProxy() : NULL;
if (proxy != NULL) proxy->set_is_assigned();
return expression;
}
-
-bool ParserTraits::ShortcutNumericLiteralBinaryExpression(
+bool ParserBaseTraits<Parser>::ShortcutNumericLiteralBinaryExpression(
Expression** x, Expression* y, Token::Value op, int pos,
AstNodeFactory* factory) {
if ((*x)->AsLiteral() && (*x)->AsLiteral()->raw_value()->IsNumber() &&
@@ -454,10 +434,8 @@ bool ParserTraits::ShortcutNumericLiteralBinaryExpression(
return false;
}
-
-Expression* ParserTraits::BuildUnaryExpression(Expression* expression,
- Token::Value op, int pos,
- AstNodeFactory* factory) {
+Expression* ParserBaseTraits<Parser>::BuildUnaryExpression(
+ Expression* expression, Token::Value op, int pos, AstNodeFactory* factory) {
DCHECK(expression != NULL);
if (expression->IsLiteral()) {
const AstValue* literal = expression->AsLiteral()->raw_value();
@@ -499,10 +477,11 @@ Expression* ParserTraits::BuildUnaryExpression(Expression* expression,
return factory->NewUnaryOperation(op, expression, pos);
}
-Expression* ParserTraits::BuildIteratorResult(Expression* value, bool done) {
- int pos = RelocInfo::kNoPosition;
- AstNodeFactory* factory = parser_->factory();
- Zone* zone = parser_->zone();
+Expression* ParserBaseTraits<Parser>::BuildIteratorResult(Expression* value,
+ bool done) {
+ int pos = kNoSourcePosition;
+ AstNodeFactory* factory = delegate()->factory();
+ Zone* zone = delegate()->zone();
if (value == nullptr) value = factory->NewUndefinedLiteral(pos);
@@ -514,173 +493,136 @@ Expression* ParserTraits::BuildIteratorResult(Expression* value, bool done) {
pos);
}
-Expression* ParserTraits::NewThrowReferenceError(
+Expression* ParserBaseTraits<Parser>::NewThrowReferenceError(
MessageTemplate::Template message, int pos) {
- return NewThrowError(Runtime::kNewReferenceError, message,
- parser_->ast_value_factory()->empty_string(), pos);
+ return delegate()->NewThrowError(
+ Runtime::kNewReferenceError, message,
+ delegate()->ast_value_factory()->empty_string(), pos);
}
-
-Expression* ParserTraits::NewThrowSyntaxError(MessageTemplate::Template message,
- const AstRawString* arg,
- int pos) {
- return NewThrowError(Runtime::kNewSyntaxError, message, arg, pos);
+Expression* ParserBaseTraits<Parser>::NewThrowSyntaxError(
+ MessageTemplate::Template message, const AstRawString* arg, int pos) {
+ return delegate()->NewThrowError(Runtime::kNewSyntaxError, message, arg, pos);
}
-
-Expression* ParserTraits::NewThrowTypeError(MessageTemplate::Template message,
- const AstRawString* arg, int pos) {
- return NewThrowError(Runtime::kNewTypeError, message, arg, pos);
+Expression* ParserBaseTraits<Parser>::NewThrowTypeError(
+ MessageTemplate::Template message, const AstRawString* arg, int pos) {
+ return delegate()->NewThrowError(Runtime::kNewTypeError, message, arg, pos);
}
-
-Expression* ParserTraits::NewThrowError(Runtime::FunctionId id,
- MessageTemplate::Template message,
- const AstRawString* arg, int pos) {
- Zone* zone = parser_->zone();
- ZoneList<Expression*>* args = new (zone) ZoneList<Expression*>(2, zone);
- args->Add(parser_->factory()->NewSmiLiteral(message, pos), zone);
- args->Add(parser_->factory()->NewStringLiteral(arg, pos), zone);
- CallRuntime* call_constructor =
- parser_->factory()->NewCallRuntime(id, args, pos);
- return parser_->factory()->NewThrow(call_constructor, pos);
+Expression* Parser::NewThrowError(Runtime::FunctionId id,
+ MessageTemplate::Template message,
+ const AstRawString* arg, int pos) {
+ ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
+ args->Add(factory()->NewSmiLiteral(message, pos), zone());
+ args->Add(factory()->NewStringLiteral(arg, pos), zone());
+ CallRuntime* call_constructor = factory()->NewCallRuntime(id, args, pos);
+ return factory()->NewThrow(call_constructor, pos);
}
-
-void ParserTraits::ReportMessageAt(Scanner::Location source_location,
- MessageTemplate::Template message,
- const char* arg, ParseErrorType error_type) {
- if (parser_->stack_overflow()) {
+void ParserBaseTraits<Parser>::ReportMessageAt(
+ Scanner::Location source_location, MessageTemplate::Template message,
+ const char* arg, ParseErrorType error_type) {
+ if (delegate()->stack_overflow()) {
// Suppress the error message (syntax error or such) in the presence of a
// stack overflow. The isolate allows only one pending exception at at time
// and we want to report the stack overflow later.
return;
}
- parser_->pending_error_handler_.ReportMessageAt(source_location.beg_pos,
- source_location.end_pos,
- message, arg, error_type);
-}
-
-
-void ParserTraits::ReportMessage(MessageTemplate::Template message,
- const char* arg, ParseErrorType error_type) {
- Scanner::Location source_location = parser_->scanner()->location();
- ReportMessageAt(source_location, message, arg, error_type);
-}
-
-
-void ParserTraits::ReportMessage(MessageTemplate::Template message,
- const AstRawString* arg,
- ParseErrorType error_type) {
- Scanner::Location source_location = parser_->scanner()->location();
- ReportMessageAt(source_location, message, arg, error_type);
+ delegate()->pending_error_handler_.ReportMessageAt(source_location.beg_pos,
+ source_location.end_pos,
+ message, arg, error_type);
}
-
-void ParserTraits::ReportMessageAt(Scanner::Location source_location,
- MessageTemplate::Template message,
- const AstRawString* arg,
- ParseErrorType error_type) {
- if (parser_->stack_overflow()) {
+void ParserBaseTraits<Parser>::ReportMessageAt(
+ Scanner::Location source_location, MessageTemplate::Template message,
+ const AstRawString* arg, ParseErrorType error_type) {
+ if (delegate()->stack_overflow()) {
// Suppress the error message (syntax error or such) in the presence of a
// stack overflow. The isolate allows only one pending exception at at time
// and we want to report the stack overflow later.
return;
}
- parser_->pending_error_handler_.ReportMessageAt(source_location.beg_pos,
- source_location.end_pos,
- message, arg, error_type);
+ delegate()->pending_error_handler_.ReportMessageAt(source_location.beg_pos,
+ source_location.end_pos,
+ message, arg, error_type);
}
-
-const AstRawString* ParserTraits::GetSymbol(Scanner* scanner) {
+const AstRawString* ParserBaseTraits<Parser>::GetSymbol(
+ Scanner* scanner) const {
const AstRawString* result =
- parser_->scanner()->CurrentSymbol(parser_->ast_value_factory());
+ delegate()->scanner()->CurrentSymbol(delegate()->ast_value_factory());
DCHECK(result != NULL);
return result;
}
-
-const AstRawString* ParserTraits::GetNumberAsSymbol(Scanner* scanner) {
- double double_value = parser_->scanner()->DoubleValue();
+const AstRawString* ParserBaseTraits<Parser>::GetNumberAsSymbol(
+ Scanner* scanner) const {
+ double double_value = delegate()->scanner()->DoubleValue();
char array[100];
- const char* string =
- DoubleToCString(double_value, Vector<char>(array, arraysize(array)));
- return parser_->ast_value_factory()->GetOneByteString(string);
+ const char* string = DoubleToCString(double_value, ArrayVector(array));
+ return delegate()->ast_value_factory()->GetOneByteString(string);
}
-
-const AstRawString* ParserTraits::GetNextSymbol(Scanner* scanner) {
- return parser_->scanner()->NextSymbol(parser_->ast_value_factory());
+const AstRawString* ParserBaseTraits<Parser>::GetNextSymbol(
+ Scanner* scanner) const {
+ return delegate()->scanner()->NextSymbol(delegate()->ast_value_factory());
}
-
-Expression* ParserTraits::ThisExpression(Scope* scope, AstNodeFactory* factory,
- int pos) {
- return scope->NewUnresolved(factory,
- parser_->ast_value_factory()->this_string(),
- Variable::THIS, pos, pos + 4);
+Expression* ParserBaseTraits<Parser>::ThisExpression(int pos) {
+ return delegate()->NewUnresolved(
+ delegate()->ast_value_factory()->this_string(), pos, pos + 4,
+ Variable::THIS);
}
-
-Expression* ParserTraits::SuperPropertyReference(Scope* scope,
- AstNodeFactory* factory,
- int pos) {
+Expression* ParserBaseTraits<Parser>::NewSuperPropertyReference(
+ AstNodeFactory* factory, int pos) {
// this_function[home_object_symbol]
- VariableProxy* this_function_proxy = scope->NewUnresolved(
- factory, parser_->ast_value_factory()->this_function_string(),
- Variable::NORMAL, pos);
+ VariableProxy* this_function_proxy = delegate()->NewUnresolved(
+ delegate()->ast_value_factory()->this_function_string(), pos);
Expression* home_object_symbol_literal =
- factory->NewSymbolLiteral("home_object_symbol", RelocInfo::kNoPosition);
+ factory->NewSymbolLiteral("home_object_symbol", kNoSourcePosition);
Expression* home_object = factory->NewProperty(
this_function_proxy, home_object_symbol_literal, pos);
return factory->NewSuperPropertyReference(
- ThisExpression(scope, factory, pos)->AsVariableProxy(), home_object, pos);
+ ThisExpression(pos)->AsVariableProxy(), home_object, pos);
}
-
-Expression* ParserTraits::SuperCallReference(Scope* scope,
- AstNodeFactory* factory, int pos) {
- VariableProxy* new_target_proxy = scope->NewUnresolved(
- factory, parser_->ast_value_factory()->new_target_string(),
- Variable::NORMAL, pos);
- VariableProxy* this_function_proxy = scope->NewUnresolved(
- factory, parser_->ast_value_factory()->this_function_string(),
- Variable::NORMAL, pos);
- return factory->NewSuperCallReference(
- ThisExpression(scope, factory, pos)->AsVariableProxy(), new_target_proxy,
- this_function_proxy, pos);
+Expression* ParserBaseTraits<Parser>::NewSuperCallReference(
+ AstNodeFactory* factory, int pos) {
+ VariableProxy* new_target_proxy = delegate()->NewUnresolved(
+ delegate()->ast_value_factory()->new_target_string(), pos);
+ VariableProxy* this_function_proxy = delegate()->NewUnresolved(
+ delegate()->ast_value_factory()->this_function_string(), pos);
+ return factory->NewSuperCallReference(ThisExpression(pos)->AsVariableProxy(),
+ new_target_proxy, this_function_proxy,
+ pos);
}
-
-Expression* ParserTraits::NewTargetExpression(Scope* scope,
- AstNodeFactory* factory,
- int pos) {
+Expression* ParserBaseTraits<Parser>::NewTargetExpression(int pos) {
static const int kNewTargetStringLength = 10;
- auto proxy = scope->NewUnresolved(
- factory, parser_->ast_value_factory()->new_target_string(),
- Variable::NORMAL, pos, pos + kNewTargetStringLength);
+ auto proxy = delegate()->NewUnresolved(
+ delegate()->ast_value_factory()->new_target_string(), pos,
+ pos + kNewTargetStringLength);
proxy->set_is_new_target();
return proxy;
}
-
-Expression* ParserTraits::FunctionSentExpression(Scope* scope,
- AstNodeFactory* factory,
- int pos) {
- // We desugar function.sent into %GeneratorGetInput(generator).
- Zone* zone = parser_->zone();
+Expression* ParserBaseTraits<Parser>::FunctionSentExpression(
+ AstNodeFactory* factory, int pos) const {
+ // We desugar function.sent into %_GeneratorGetInputOrDebugPos(generator).
+ Zone* zone = delegate()->zone();
ZoneList<Expression*>* args = new (zone) ZoneList<Expression*>(1, zone);
VariableProxy* generator = factory->NewVariableProxy(
- parser_->function_state_->generator_object_variable());
+ delegate()->function_state_->generator_object_variable());
args->Add(generator, zone);
- return factory->NewCallRuntime(Runtime::kGeneratorGetInput, args, pos);
+ return factory->NewCallRuntime(Runtime::kInlineGeneratorGetInputOrDebugPos,
+ args, pos);
}
-
-Literal* ParserTraits::ExpressionFromLiteral(Token::Value token, int pos,
- Scanner* scanner,
- AstNodeFactory* factory) {
+Literal* ParserBaseTraits<Parser>::ExpressionFromLiteral(
+ Token::Value token, int pos, Scanner* scanner,
+ AstNodeFactory* factory) const {
switch (token) {
case Token::NULL_LITERAL:
return factory->NewNullLiteral(pos);
@@ -703,75 +645,46 @@ Literal* ParserTraits::ExpressionFromLiteral(Token::Value token, int pos,
return NULL;
}
-
-Expression* ParserTraits::ExpressionFromIdentifier(const AstRawString* name,
- int start_position,
- int end_position,
- Scope* scope,
- AstNodeFactory* factory) {
- if (parser_->fni_ != NULL) parser_->fni_->PushVariableName(name);
- return scope->NewUnresolved(factory, name, Variable::NORMAL, start_position,
- end_position);
+Expression* ParserBaseTraits<Parser>::ExpressionFromIdentifier(
+ const AstRawString* name, int start_position, int end_position,
+ InferName infer) {
+ if (infer == InferName::kYes && delegate()->fni_ != NULL) {
+ delegate()->fni_->PushVariableName(name);
+ }
+ return delegate()->NewUnresolved(name, start_position, end_position);
}
-
-Expression* ParserTraits::ExpressionFromString(int pos, Scanner* scanner,
- AstNodeFactory* factory) {
+Expression* ParserBaseTraits<Parser>::ExpressionFromString(
+ int pos, Scanner* scanner, AstNodeFactory* factory) const {
const AstRawString* symbol = GetSymbol(scanner);
- if (parser_->fni_ != NULL) parser_->fni_->PushLiteralName(symbol);
+ if (delegate()->fni_ != NULL) delegate()->fni_->PushLiteralName(symbol);
return factory->NewStringLiteral(symbol, pos);
}
-
-Expression* ParserTraits::GetIterator(Expression* iterable,
- AstNodeFactory* factory, int pos) {
+Expression* ParserBaseTraits<Parser>::GetIterator(Expression* iterable,
+ AstNodeFactory* factory,
+ int pos) {
Expression* iterator_symbol_literal =
- factory->NewSymbolLiteral("iterator_symbol", RelocInfo::kNoPosition);
+ factory->NewSymbolLiteral("iterator_symbol", kNoSourcePosition);
Expression* prop =
factory->NewProperty(iterable, iterator_symbol_literal, pos);
- Zone* zone = parser_->zone();
+ Zone* zone = delegate()->zone();
ZoneList<Expression*>* args = new (zone) ZoneList<Expression*>(0, zone);
return factory->NewCall(prop, args, pos);
}
-
-Literal* ParserTraits::GetLiteralTheHole(int position,
- AstNodeFactory* factory) {
- return factory->NewTheHoleLiteral(RelocInfo::kNoPosition);
-}
-
-
-Expression* ParserTraits::ParseV8Intrinsic(bool* ok) {
- return parser_->ParseV8Intrinsic(ok);
-}
-
-
-FunctionLiteral* ParserTraits::ParseFunctionLiteral(
- const AstRawString* name, Scanner::Location function_name_location,
- FunctionNameValidity function_name_validity, FunctionKind kind,
- int function_token_position, FunctionLiteral::FunctionType type,
- LanguageMode language_mode, bool* ok) {
- return parser_->ParseFunctionLiteral(
- name, function_name_location, function_name_validity, kind,
- function_token_position, type, language_mode, ok);
+Literal* ParserBaseTraits<Parser>::GetLiteralTheHole(
+ int position, AstNodeFactory* factory) const {
+ return factory->NewTheHoleLiteral(kNoSourcePosition);
}
-ClassLiteral* ParserTraits::ParseClassLiteral(
- Type::ExpressionClassifier* classifier, const AstRawString* name,
- Scanner::Location class_name_location, bool name_is_strict_reserved,
- int pos, bool* ok) {
- return parser_->ParseClassLiteral(classifier, name, class_name_location,
- name_is_strict_reserved, pos, ok);
-}
-
-void ParserTraits::MarkTailPosition(Expression* expression) {
+void Parser::MarkTailPosition(Expression* expression) {
expression->MarkTail();
}
Parser::Parser(ParseInfo* info)
- : ParserBase<ParserTraits>(info->zone(), &scanner_, info->stack_limit(),
- info->extension(), info->ast_value_factory(),
- NULL, this),
+ : ParserBase<Parser>(info->zone(), &scanner_, info->stack_limit(),
+ info->extension(), info->ast_value_factory(), NULL),
scanner_(info->unicode_cache()),
reusable_preparser_(NULL),
original_scope_(NULL),
@@ -784,21 +697,20 @@ Parser::Parser(ParseInfo* info)
// Even though we were passed ParseInfo, we should not store it in
// Parser - this makes sure that Isolate is not accidentally accessed via
// ParseInfo during background parsing.
- DCHECK(!info->script().is_null() || info->source_stream() != NULL);
+ DCHECK(!info->script().is_null() || info->source_stream() != nullptr ||
+ info->character_stream() != nullptr);
set_allow_lazy(info->allow_lazy_parsing());
set_allow_natives(FLAG_allow_natives_syntax || info->is_native());
set_allow_tailcalls(FLAG_harmony_tailcalls && !info->is_native() &&
info->isolate()->is_tail_call_elimination_enabled());
- set_allow_harmony_sloppy(FLAG_harmony_sloppy);
- set_allow_harmony_sloppy_function(FLAG_harmony_sloppy_function);
- set_allow_harmony_sloppy_let(FLAG_harmony_sloppy_let);
set_allow_harmony_do_expressions(FLAG_harmony_do_expressions);
- set_allow_harmony_function_name(FLAG_harmony_function_name);
+ set_allow_harmony_for_in(FLAG_harmony_for_in);
set_allow_harmony_function_sent(FLAG_harmony_function_sent);
set_allow_harmony_restrictive_declarations(
FLAG_harmony_restrictive_declarations);
- set_allow_harmony_exponentiation_operator(
- FLAG_harmony_exponentiation_operator);
+ set_allow_harmony_async_await(FLAG_harmony_async_await);
+ set_allow_harmony_restrictive_generators(FLAG_harmony_restrictive_generators);
+ set_allow_harmony_trailing_commas(FLAG_harmony_trailing_commas);
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
++feature) {
use_counts_[feature] = 0;
@@ -808,10 +720,38 @@ Parser::Parser(ParseInfo* info)
info->set_ast_value_factory(new AstValueFactory(zone(), info->hash_seed()));
info->set_ast_value_factory_owned();
ast_value_factory_ = info->ast_value_factory();
+ ast_node_factory_.set_ast_value_factory(ast_value_factory_);
+ }
+}
+
+void Parser::DeserializeScopeChain(
+ ParseInfo* info, Handle<Context> context,
+ Scope::DeserializationMode deserialization_mode) {
+ DCHECK(ThreadId::Current().Equals(info->isolate()->thread_id()));
+ // TODO(wingo): Add an outer SCRIPT_SCOPE corresponding to the native
+ // context, which will have the "this" binding for script scopes.
+ DeclarationScope* script_scope = NewScriptScope();
+ info->set_script_scope(script_scope);
+ Scope* scope = script_scope;
+ if (!context.is_null() && !context->IsNativeContext()) {
+ scope = Scope::DeserializeScopeChain(info->isolate(), zone(), *context,
+ script_scope, ast_value_factory(),
+ deserialization_mode);
+ if (info->context().is_null()) {
+ DCHECK(deserialization_mode ==
+ Scope::DeserializationMode::kDeserializeOffHeap);
+ } else {
+ // The Scope is backed up by ScopeInfo (which is in the V8 heap); this
+ // means the Parser cannot operate independent of the V8 heap. Tell the
+ // string table to internalize strings and values right after they're
+ // created. This kind of parsing can only be done in the main thread.
+ DCHECK(parsing_on_main_thread_);
+ ast_value_factory()->Internalize(info->isolate());
+ }
}
+ original_scope_ = scope;
}
-
FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
// TODO(bmeurer): We temporarily need to pass allow_nesting = true here,
// see comment for HistogramTimerScope class.
@@ -821,7 +761,9 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
DCHECK(parsing_on_main_thread_);
HistogramTimerScope timer_scope(isolate->counters()->parse(), true);
- TRACE_EVENT0("v8", "V8.Parse");
+ RuntimeCallTimerScope runtime_timer(isolate, &RuntimeCallStats::Parse);
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
+ isolate, &tracing::TraceEventStatsTable::Parse);
Handle<String> source(String::cast(info->script()->source()));
isolate->counters()->total_parse_size()->Increment(source->length());
base::ElapsedTimer timer;
@@ -839,20 +781,25 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
cached_parse_data_->Initialize();
}
+ DeserializeScopeChain(info, info->context(),
+ Scope::DeserializationMode::kKeepScopeInfo);
+
source = String::Flatten(source);
FunctionLiteral* result;
- if (source->IsExternalTwoByteString()) {
- // Notice that the stream is destroyed at the end of the branch block.
- // The last line of the blocks can't be moved outside, even though they're
- // identical calls.
- ExternalTwoByteStringUtf16CharacterStream stream(
- Handle<ExternalTwoByteString>::cast(source), 0, source->length());
- scanner_.Initialize(&stream);
- result = DoParseProgram(info);
- } else {
- GenericStringUtf16CharacterStream stream(source, 0, source->length());
- scanner_.Initialize(&stream);
+ {
+ std::unique_ptr<Utf16CharacterStream> stream;
+ if (source->IsExternalTwoByteString()) {
+ stream.reset(new ExternalTwoByteStringUtf16CharacterStream(
+ Handle<ExternalTwoByteString>::cast(source), 0, source->length()));
+ } else if (source->IsExternalOneByteString()) {
+ stream.reset(new ExternalOneByteStringUtf16CharacterStream(
+ Handle<ExternalOneByteString>::cast(source), 0, source->length()));
+ } else {
+ stream.reset(
+ new GenericStringUtf16CharacterStream(source, 0, source->length()));
+ }
+ scanner_.Initialize(stream.get());
result = DoParseProgram(info);
}
if (result != NULL) {
@@ -866,7 +813,7 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
PrintF("[parsing eval");
} else if (info->script()->name()->IsString()) {
String* name = String::cast(info->script()->name());
- base::SmartArrayPointer<char> name_chars = name->ToCString();
+ std::unique_ptr<char[]> name_chars = name->ToCString();
PrintF("[parsing script: %s", name_chars.get());
} else {
PrintF("[parsing script");
@@ -885,55 +832,54 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
// Note that this function can be called from the main thread or from a
// background thread. We should not access anything Isolate / heap dependent
// via ParseInfo, and also not pass it forward.
- DCHECK(scope_ == NULL);
- DCHECK(target_stack_ == NULL);
+ DCHECK_NULL(scope_state_);
+ DCHECK_NULL(target_stack_);
Mode parsing_mode = FLAG_lazy && allow_lazy() ? PARSE_LAZILY : PARSE_EAGERLY;
if (allow_natives() || extension_ != NULL) parsing_mode = PARSE_EAGERLY;
FunctionLiteral* result = NULL;
{
- // TODO(wingo): Add an outer SCRIPT_SCOPE corresponding to the native
- // context, which will have the "this" binding for script scopes.
- Scope* scope = NewScope(scope_, SCRIPT_SCOPE);
- info->set_script_scope(scope);
- if (!info->context().is_null() && !info->context()->IsNativeContext()) {
- scope = Scope::DeserializeScopeChain(info->isolate(), zone(),
- *info->context(), scope);
- // The Scope is backed up by ScopeInfo (which is in the V8 heap); this
- // means the Parser cannot operate independent of the V8 heap. Tell the
- // string table to internalize strings and values right after they're
- // created. This kind of parsing can only be done in the main thread.
- DCHECK(parsing_on_main_thread_);
- ast_value_factory()->Internalize(info->isolate());
+ Scope* outer = original_scope_;
+ // If there's a chance that there's a reference to global 'this', predeclare
+ // it as a dynamic global on the script scope.
+ if (outer->GetReceiverScope()->is_script_scope()) {
+ info->script_scope()->DeclareDynamicGlobal(
+ ast_value_factory()->this_string(), Variable::THIS);
}
- original_scope_ = scope;
+ DCHECK(outer);
if (info->is_eval()) {
- if (!scope->is_script_scope() || is_strict(info->language_mode())) {
+ if (!outer->is_script_scope() || is_strict(info->language_mode())) {
parsing_mode = PARSE_EAGERLY;
}
- scope = NewScope(scope, EVAL_SCOPE);
+ outer = NewEvalScope(outer);
} else if (info->is_module()) {
- scope = NewScope(scope, MODULE_SCOPE);
+ DCHECK_EQ(outer, info->script_scope());
+ outer = NewModuleScope(info->script_scope());
}
+ DeclarationScope* scope = outer->AsDeclarationScope();
+
scope->set_start_position(0);
// Enter 'scope' with the given parsing mode.
ParsingModeScope parsing_mode_scope(this, parsing_mode);
- AstNodeFactory function_factory(ast_value_factory());
- FunctionState function_state(&function_state_, &scope_, scope,
- kNormalFunction, &function_factory);
+ FunctionState function_state(&function_state_, &scope_state_, scope,
+ kNormalFunction);
- // Don't count the mode in the use counters--give the program a chance
- // to enable script/module-wide strict mode below.
- scope_->SetLanguageMode(info->language_mode());
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
bool ok = true;
int beg_pos = scanner()->location().beg_pos;
- if (info->is_module()) {
+ parsing_module_ = info->is_module();
+ if (parsing_module_) {
ParseModuleItemList(body, &ok);
+ ok = ok &&
+ module()->Validate(this->scope()->AsModuleScope(),
+ &pending_error_handler_, zone());
} else {
+ // Don't count the mode in the use counters--give the program a chance
+ // to enable script-wide strict mode below.
+ this->scope()->SetLanguageMode(info->language_mode());
ParseStatementList(body, Token::EOS, &ok);
}
@@ -943,16 +889,18 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
if (ok && is_strict(language_mode())) {
CheckStrictOctalLiteral(beg_pos, scanner()->location().end_pos, &ok);
+ CheckDecimalLiteralWithLeadingZero(use_counts_, beg_pos,
+ scanner()->location().end_pos);
}
- if (ok && is_sloppy(language_mode()) && allow_harmony_sloppy_function()) {
+ if (ok && is_sloppy(language_mode())) {
// TODO(littledan): Function bindings on the global object that modify
// pre-existing bindings should be made writable, enumerable and
// nonconfigurable if possible, whereas this code will leave attributes
// unchanged if the property already exists.
- InsertSloppyBlockFunctionVarBindings(scope, &ok);
+ InsertSloppyBlockFunctionVarBindings(scope, nullptr, &ok);
}
if (ok) {
- CheckConflictingVarDeclarations(scope_, &ok);
+ CheckConflictingVarDeclarations(scope, &ok);
}
if (ok && info->parse_restriction() == ONLY_SINGLE_FUNCTION_LITERAL) {
@@ -966,9 +914,9 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
}
if (ok) {
- ParserTraits::RewriteDestructuringAssignments();
+ RewriteDestructuringAssignments();
result = factory()->NewScriptOrEvalFunctionLiteral(
- scope_, body, function_state.materialized_literal_count(),
+ scope, body, function_state.materialized_literal_count(),
function_state.expected_property_count());
}
}
@@ -984,8 +932,10 @@ FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info) {
// It's OK to use the Isolate & counters here, since this function is only
// called in the main thread.
DCHECK(parsing_on_main_thread_);
+ RuntimeCallTimerScope runtime_timer(isolate, &RuntimeCallStats::ParseLazy);
HistogramTimerScope timer_scope(isolate->counters()->parse_lazy());
- TRACE_EVENT0("v8", "V8.ParseLazy");
+ TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
+ isolate, &tracing::TraceEventStatsTable::ParseLazy);
Handle<String> source(String::cast(info->script()->source()));
isolate->counters()->total_parse_size()->Increment(source->length());
base::ElapsedTimer timer;
@@ -993,100 +943,115 @@ FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info) {
timer.Start();
}
Handle<SharedFunctionInfo> shared_info = info->shared_info();
+ DeserializeScopeChain(info, info->context(),
+ Scope::DeserializationMode::kKeepScopeInfo);
// Initialize parser state.
source = String::Flatten(source);
FunctionLiteral* result;
- if (source->IsExternalTwoByteString()) {
- ExternalTwoByteStringUtf16CharacterStream stream(
- Handle<ExternalTwoByteString>::cast(source),
- shared_info->start_position(),
- shared_info->end_position());
- result = ParseLazy(isolate, info, &stream);
- } else {
- GenericStringUtf16CharacterStream stream(source,
- shared_info->start_position(),
- shared_info->end_position());
- result = ParseLazy(isolate, info, &stream);
+ {
+ std::unique_ptr<Utf16CharacterStream> stream;
+ if (source->IsExternalTwoByteString()) {
+ stream.reset(new ExternalTwoByteStringUtf16CharacterStream(
+ Handle<ExternalTwoByteString>::cast(source),
+ shared_info->start_position(), shared_info->end_position()));
+ } else if (source->IsExternalOneByteString()) {
+ stream.reset(new ExternalOneByteStringUtf16CharacterStream(
+ Handle<ExternalOneByteString>::cast(source),
+ shared_info->start_position(), shared_info->end_position()));
+ } else {
+ stream.reset(new GenericStringUtf16CharacterStream(
+ source, shared_info->start_position(), shared_info->end_position()));
+ }
+ Handle<String> name(String::cast(shared_info->name()));
+ result =
+ DoParseLazy(info, ast_value_factory()->GetString(name), stream.get());
+ if (result != nullptr) {
+ Handle<String> inferred_name(shared_info->inferred_name());
+ result->set_inferred_name(inferred_name);
+ }
}
if (FLAG_trace_parse && result != NULL) {
double ms = timer.Elapsed().InMillisecondsF();
- base::SmartArrayPointer<char> name_chars =
- result->debug_name()->ToCString();
+ std::unique_ptr<char[]> name_chars = result->debug_name()->ToCString();
PrintF("[parsing function: %s - took %0.3f ms]\n", name_chars.get(), ms);
}
return result;
}
-static FunctionLiteral::FunctionType ComputeFunctionType(
- Handle<SharedFunctionInfo> shared_info) {
- if (shared_info->is_declaration()) {
+static FunctionLiteral::FunctionType ComputeFunctionType(ParseInfo* info) {
+ if (info->is_declaration()) {
return FunctionLiteral::kDeclaration;
- } else if (shared_info->is_named_expression()) {
+ } else if (info->is_named_expression()) {
return FunctionLiteral::kNamedExpression;
- } else if (IsConciseMethod(shared_info->kind()) ||
- IsAccessorFunction(shared_info->kind())) {
+ } else if (IsConciseMethod(info->function_kind()) ||
+ IsAccessorFunction(info->function_kind())) {
return FunctionLiteral::kAccessorOrMethod;
}
return FunctionLiteral::kAnonymousExpression;
}
-FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info,
- Utf16CharacterStream* source) {
- Handle<SharedFunctionInfo> shared_info = info->shared_info();
+FunctionLiteral* Parser::DoParseLazy(ParseInfo* info,
+ const AstRawString* raw_name,
+ Utf16CharacterStream* source) {
scanner_.Initialize(source);
- DCHECK(scope_ == NULL);
- DCHECK(target_stack_ == NULL);
+ DCHECK_NULL(scope_state_);
+ DCHECK_NULL(target_stack_);
- Handle<String> name(String::cast(shared_info->name()));
DCHECK(ast_value_factory());
fni_ = new (zone()) FuncNameInferrer(ast_value_factory(), zone());
- const AstRawString* raw_name = ast_value_factory()->GetString(name);
fni_->PushEnclosingName(raw_name);
ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
// Place holder for the result.
- FunctionLiteral* result = NULL;
+ FunctionLiteral* result = nullptr;
{
// Parse the function literal.
- Scope* scope = NewScope(scope_, SCRIPT_SCOPE);
- info->set_script_scope(scope);
- if (!info->closure().is_null()) {
- // Ok to use Isolate here, since lazy function parsing is only done in the
- // main thread.
- DCHECK(parsing_on_main_thread_);
- scope = Scope::DeserializeScopeChain(isolate, zone(),
- info->closure()->context(), scope);
+ Scope* scope = original_scope_;
+ DCHECK(scope);
+ // If there's a chance that there's a reference to global 'this', predeclare
+ // it as a dynamic global on the script scope.
+ if (info->is_arrow() && scope->GetReceiverScope()->is_script_scope()) {
+ info->script_scope()->DeclareDynamicGlobal(
+ ast_value_factory()->this_string(), Variable::THIS);
}
- original_scope_ = scope;
- AstNodeFactory function_factory(ast_value_factory());
- FunctionState function_state(&function_state_, &scope_, scope,
- shared_info->kind(), &function_factory);
+ FunctionState function_state(&function_state_, &scope_state_, scope,
+ info->function_kind());
DCHECK(is_sloppy(scope->language_mode()) ||
is_strict(info->language_mode()));
- DCHECK(info->language_mode() == shared_info->language_mode());
- FunctionLiteral::FunctionType function_type =
- ComputeFunctionType(shared_info);
+ FunctionLiteral::FunctionType function_type = ComputeFunctionType(info);
bool ok = true;
- if (shared_info->is_arrow()) {
+ if (info->is_arrow()) {
+ bool is_async = allow_harmony_async_await() && info->is_async();
+ if (is_async) {
+ DCHECK(!scanner()->HasAnyLineTerminatorAfterNext());
+ if (!Check(Token::ASYNC)) {
+ CHECK(stack_overflow());
+ return nullptr;
+ }
+ if (!(peek_any_identifier() || peek() == Token::LPAREN)) {
+ CHECK(stack_overflow());
+ return nullptr;
+ }
+ }
+
// TODO(adamk): We should construct this scope from the ScopeInfo.
- Scope* scope =
- NewScope(scope_, FUNCTION_SCOPE, FunctionKind::kArrowFunction);
+ DeclarationScope* scope = NewFunctionScope(FunctionKind::kArrowFunction);
// These two bits only need to be explicitly set because we're
// not passing the ScopeInfo to the Scope constructor.
// TODO(adamk): Remove these calls once the above NewScope call
// passes the ScopeInfo.
- if (shared_info->scope_info()->CallsEval()) {
+ if (info->calls_eval()) {
scope->RecordEvalCall();
}
- SetLanguageMode(scope, shared_info->language_mode());
+ SetLanguageMode(scope, info->language_mode());
- scope->set_start_position(shared_info->start_position());
+ scope->set_start_position(info->start_position());
ExpressionClassifier formals_classifier(this);
ParserFormalParameters formals(scope);
Checkpoint checkpoint(this);
@@ -1094,7 +1059,7 @@ FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info,
// Parsing patterns as variable reference expression creates
// NewUnresolved references in current scope. Entrer arrow function
// scope for formal parameter parsing.
- BlockState block_state(&scope_, scope);
+ BlockState block_state(&scope_state_, scope);
if (Check(Token::LPAREN)) {
// '(' StrictFormalParameters ')'
ParseFormalParameterList(&formals, &formals_classifier, &ok);
@@ -1113,8 +1078,8 @@ FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info,
checkpoint.Restore(&formals.materialized_literals_count);
// Pass `accept_IN=true` to ParseArrowFunctionLiteral --- This should
// not be observable, or else the preparser would have failed.
- Expression* expression =
- ParseArrowFunctionLiteral(true, formals, formals_classifier, &ok);
+ Expression* expression = ParseArrowFunctionLiteral(
+ true, formals, is_async, formals_classifier, &ok);
if (ok) {
// Scanning must end at the same position that was recorded
// previously. If not, parsing has been interrupted due to a stack
@@ -1122,7 +1087,7 @@ FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info,
// concise body happens to be a valid expression. This is a problem
// only for arrow functions with single expression bodies, since there
// is no end token such as "}" for normal functions.
- if (scanner()->location().end_pos == shared_info->end_position()) {
+ if (scanner()->location().end_pos == info->end_position()) {
// The pre-parser saw an arrow function here, so the full parser
// must produce a FunctionLiteral.
DCHECK(expression->IsFunctionLiteral());
@@ -1132,34 +1097,29 @@ FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info,
}
}
}
- } else if (shared_info->is_default_constructor()) {
+ } else if (info->is_default_constructor()) {
+ DCHECK_EQ(this->scope(), scope);
result = DefaultConstructor(
- raw_name, IsSubclassConstructor(shared_info->kind()), scope,
- shared_info->start_position(), shared_info->end_position(),
- shared_info->language_mode());
+ raw_name, IsSubclassConstructor(info->function_kind()),
+ info->start_position(), info->end_position(), info->language_mode());
} else {
result = ParseFunctionLiteral(raw_name, Scanner::Location::invalid(),
- kSkipFunctionNameCheck, shared_info->kind(),
- RelocInfo::kNoPosition, function_type,
- shared_info->language_mode(), &ok);
+ kSkipFunctionNameCheck,
+ info->function_kind(), kNoSourcePosition,
+ function_type, info->language_mode(), &ok);
}
// Make sure the results agree.
- DCHECK(ok == (result != NULL));
+ DCHECK(ok == (result != nullptr));
}
// Make sure the target stack is empty.
- DCHECK(target_stack_ == NULL);
-
- if (result != NULL) {
- Handle<String> inferred_name(shared_info->inferred_name());
- result->set_inferred_name(inferred_name);
- }
+ DCHECK_NULL(target_stack_);
return result;
}
-void* Parser::ParseStatementList(ZoneList<Statement*>* body, int end_token,
- bool* ok) {
+void Parser::ParseStatementList(ZoneList<Statement*>* body, int end_token,
+ bool* ok) {
// StatementList ::
// (StatementListItem)* <end_token>
@@ -1178,7 +1138,7 @@ void* Parser::ParseStatementList(ZoneList<Statement*>* body, int end_token,
}
Scanner::Location token_loc = scanner()->peek_location();
- Statement* stat = ParseStatementListItem(CHECK_OK);
+ Statement* stat = ParseStatementListItem(CHECK_OK_VOID);
if (stat == NULL || stat->IsEmpty()) {
directive_prologue = false; // End of directive prologue.
continue;
@@ -1199,26 +1159,26 @@ void* Parser::ParseStatementList(ZoneList<Statement*>* body, int end_token,
token_loc.end_pos - token_loc.beg_pos ==
ast_value_factory()->use_strict_string()->length() + 2;
if (use_strict_found) {
- if (is_sloppy(scope_->language_mode())) {
+ if (is_sloppy(language_mode())) {
RaiseLanguageMode(STRICT);
}
- if (!scope_->HasSimpleParameters()) {
+ if (!this->scope()->HasSimpleParameters()) {
// TC39 deemed "use strict" directives to be an error when occurring
// in the body of a function with non-simple parameter list, on
// 29/7/2015. https://goo.gl/ueA7Ln
const AstRawString* string = literal->raw_value()->AsString();
- ParserTraits::ReportMessageAt(
- token_loc, MessageTemplate::kIllegalLanguageModeDirective,
- string);
+ ReportMessageAt(token_loc,
+ MessageTemplate::kIllegalLanguageModeDirective,
+ string);
*ok = false;
- return nullptr;
+ return;
}
// Because declarations in strict eval code don't leak into the scope
// of the eval call, it is likely that functions declared in strict
// eval code will be used within the eval code, so lazy parsing is
// probably not a win.
- if (scope_->is_eval_scope()) mode_ = PARSE_EAGERLY;
+ if (this->scope()->is_eval_scope()) mode_ = PARSE_EAGERLY;
} else if (literal->raw_value()->AsString() ==
ast_value_factory()->use_asm_string() &&
token_loc.end_pos - token_loc.beg_pos ==
@@ -1226,7 +1186,8 @@ void* Parser::ParseStatementList(ZoneList<Statement*>* body, int end_token,
// Store the usage count; The actual use counter on the isolate is
// incremented after parsing is done.
++use_counts_[v8::Isolate::kUseAsm];
- scope_->SetAsmModule();
+ DCHECK(this->scope()->is_declaration_scope());
+ this->scope()->AsDeclarationScope()->set_asm_module();
} else {
// Should not change mode, but will increment UseCounter
// if appropriate. Ditto usages below.
@@ -1243,8 +1204,6 @@ void* Parser::ParseStatementList(ZoneList<Statement*>* body, int end_token,
body->Add(stat, zone());
}
-
- return 0;
}
@@ -1253,18 +1212,15 @@ Statement* Parser::ParseStatementListItem(bool* ok) {
// StatementListItem:
// Statement
// Declaration
-
- switch (peek()) {
+ const Token::Value peeked = peek();
+ switch (peeked) {
case Token::FUNCTION:
- return ParseFunctionDeclaration(NULL, ok);
+ return ParseHoistableDeclaration(NULL, false, ok);
case Token::CLASS:
Consume(Token::CLASS);
- return ParseClassDeclaration(NULL, ok);
+ return ParseClassDeclaration(NULL, false, ok);
case Token::CONST:
- if (allow_const()) {
- return ParseVariableStatement(kStatementListItem, NULL, ok);
- }
- break;
+ return ParseVariableStatement(kStatementListItem, NULL, ok);
case Token::VAR:
return ParseVariableStatement(kStatementListItem, NULL, ok);
case Token::LET:
@@ -1272,6 +1228,13 @@ Statement* Parser::ParseStatementListItem(bool* ok) {
return ParseVariableStatement(kStatementListItem, NULL, ok);
}
break;
+ case Token::ASYNC:
+ if (allow_harmony_async_await() && PeekAhead() == Token::FUNCTION &&
+ !scanner()->HasAnyLineTerminatorAfterNext()) {
+ Consume(Token::ASYNC);
+ return ParseAsyncFunctionDeclaration(NULL, false, ok);
+ }
+ /* falls through */
default:
break;
}
@@ -1280,7 +1243,7 @@ Statement* Parser::ParseStatementListItem(bool* ok) {
Statement* Parser::ParseModuleItem(bool* ok) {
- // (Ecma 262 6th Edition, 15.2):
+ // ecma262/#prod-ModuleItem
// ModuleItem :
// ImportDeclaration
// ExportDeclaration
@@ -1288,7 +1251,8 @@ Statement* Parser::ParseModuleItem(bool* ok) {
switch (peek()) {
case Token::IMPORT:
- return ParseImportDeclaration(ok);
+ ParseImportDeclaration(CHECK_OK);
+ return factory()->NewEmptyStatement(kNoSourcePosition);
case Token::EXPORT:
return ParseExportDeclaration(ok);
default:
@@ -1297,40 +1261,22 @@ Statement* Parser::ParseModuleItem(bool* ok) {
}
-void* Parser::ParseModuleItemList(ZoneList<Statement*>* body, bool* ok) {
- // (Ecma 262 6th Edition, 15.2):
+void Parser::ParseModuleItemList(ZoneList<Statement*>* body, bool* ok) {
+ // ecma262/#prod-Module
// Module :
// ModuleBody?
//
+ // ecma262/#prod-ModuleItemList
// ModuleBody :
// ModuleItem*
- DCHECK(scope_->is_module_scope());
- RaiseLanguageMode(STRICT);
-
+ DCHECK(scope()->is_module_scope());
while (peek() != Token::EOS) {
- Statement* stat = ParseModuleItem(CHECK_OK);
+ Statement* stat = ParseModuleItem(CHECK_OK_VOID);
if (stat && !stat->IsEmpty()) {
body->Add(stat, zone());
}
}
-
- // Check that all exports are bound.
- ModuleDescriptor* descriptor = scope_->module();
- for (ModuleDescriptor::Iterator it = descriptor->iterator(); !it.done();
- it.Advance()) {
- if (scope_->LookupLocal(it.local_name()) == NULL) {
- // TODO(adamk): Pass both local_name and export_name once ParserTraits
- // supports multiple arg error messages.
- // Also try to report this at a better location.
- ParserTraits::ReportMessage(MessageTemplate::kModuleExportUndefined,
- it.local_name());
- *ok = false;
- return NULL;
- }
- }
-
- return NULL;
}
@@ -1343,10 +1289,10 @@ const AstRawString* Parser::ParseModuleSpecifier(bool* ok) {
}
-void* Parser::ParseExportClause(ZoneList<const AstRawString*>* export_names,
- ZoneList<Scanner::Location>* export_locations,
- ZoneList<const AstRawString*>* local_names,
- Scanner::Location* reserved_loc, bool* ok) {
+void Parser::ParseExportClause(ZoneList<const AstRawString*>* export_names,
+ ZoneList<Scanner::Location>* export_locations,
+ ZoneList<const AstRawString*>* local_names,
+ Scanner::Location* reserved_loc, bool* ok) {
// ExportClause :
// '{' '}'
// '{' ExportsList '}'
@@ -1360,20 +1306,20 @@ void* Parser::ParseExportClause(ZoneList<const AstRawString*>* export_names,
// IdentifierName
// IdentifierName 'as' IdentifierName
- Expect(Token::LBRACE, CHECK_OK);
+ Expect(Token::LBRACE, CHECK_OK_VOID);
Token::Value name_tok;
while ((name_tok = peek()) != Token::RBRACE) {
// Keep track of the first reserved word encountered in case our
// caller needs to report an error.
if (!reserved_loc->IsValid() &&
- !Token::IsIdentifier(name_tok, STRICT, false)) {
+ !Token::IsIdentifier(name_tok, STRICT, false, parsing_module_)) {
*reserved_loc = scanner()->location();
}
- const AstRawString* local_name = ParseIdentifierName(CHECK_OK);
+ const AstRawString* local_name = ParseIdentifierName(CHECK_OK_VOID);
const AstRawString* export_name = NULL;
if (CheckContextualKeyword(CStrVector("as"))) {
- export_name = ParseIdentifierName(CHECK_OK);
+ export_name = ParseIdentifierName(CHECK_OK_VOID);
}
if (export_name == NULL) {
export_name = local_name;
@@ -1382,16 +1328,15 @@ void* Parser::ParseExportClause(ZoneList<const AstRawString*>* export_names,
local_names->Add(local_name, zone());
export_locations->Add(scanner()->location(), zone());
if (peek() == Token::RBRACE) break;
- Expect(Token::COMMA, CHECK_OK);
+ Expect(Token::COMMA, CHECK_OK_VOID);
}
- Expect(Token::RBRACE, CHECK_OK);
-
- return 0;
+ Expect(Token::RBRACE, CHECK_OK_VOID);
}
-ZoneList<ImportDeclaration*>* Parser::ParseNamedImports(int pos, bool* ok) {
+ZoneList<const Parser::NamedImport*>* Parser::ParseNamedImports(
+ int pos, bool* ok) {
// NamedImports :
// '{' '}'
// '{' ImportsList '}'
@@ -1407,8 +1352,7 @@ ZoneList<ImportDeclaration*>* Parser::ParseNamedImports(int pos, bool* ok) {
Expect(Token::LBRACE, CHECK_OK);
- ZoneList<ImportDeclaration*>* result =
- new (zone()) ZoneList<ImportDeclaration*>(1, zone());
+ auto result = new (zone()) ZoneList<const NamedImport*>(1, zone());
while (peek() != Token::RBRACE) {
const AstRawString* import_name = ParseIdentifierName(CHECK_OK);
const AstRawString* local_name = import_name;
@@ -1418,39 +1362,42 @@ ZoneList<ImportDeclaration*>* Parser::ParseNamedImports(int pos, bool* ok) {
if (CheckContextualKeyword(CStrVector("as"))) {
local_name = ParseIdentifierName(CHECK_OK);
}
- if (!Token::IsIdentifier(scanner()->current_token(), STRICT, false)) {
+ if (!Token::IsIdentifier(scanner()->current_token(), STRICT, false,
+ parsing_module_)) {
*ok = false;
ReportMessage(MessageTemplate::kUnexpectedReserved);
- return NULL;
+ return nullptr;
} else if (IsEvalOrArguments(local_name)) {
*ok = false;
ReportMessage(MessageTemplate::kStrictEvalArguments);
- return NULL;
+ return nullptr;
}
- VariableProxy* proxy = NewUnresolved(local_name, IMPORT);
- ImportDeclaration* declaration =
- factory()->NewImportDeclaration(proxy, import_name, NULL, scope_, pos);
- Declare(declaration, DeclarationDescriptor::NORMAL, true, CHECK_OK);
- result->Add(declaration, zone());
+
+ DeclareVariable(local_name, CONST, kNeedsInitialization, position(),
+ CHECK_OK);
+
+ NamedImport* import = new (zone()) NamedImport(
+ import_name, local_name, scanner()->location());
+ result->Add(import, zone());
+
if (peek() == Token::RBRACE) break;
Expect(Token::COMMA, CHECK_OK);
}
Expect(Token::RBRACE, CHECK_OK);
-
return result;
}
-Statement* Parser::ParseImportDeclaration(bool* ok) {
+void Parser::ParseImportDeclaration(bool* ok) {
// ImportDeclaration :
// 'import' ImportClause 'from' ModuleSpecifier ';'
// 'import' ModuleSpecifier ';'
//
// ImportClause :
+ // ImportedDefaultBinding
// NameSpaceImport
// NamedImports
- // ImportedDefaultBinding
// ImportedDefaultBinding ',' NameSpaceImport
// ImportedDefaultBinding ',' NamedImports
//
@@ -1458,158 +1405,159 @@ Statement* Parser::ParseImportDeclaration(bool* ok) {
// '*' 'as' ImportedBinding
int pos = peek_position();
- Expect(Token::IMPORT, CHECK_OK);
+ Expect(Token::IMPORT, CHECK_OK_VOID);
Token::Value tok = peek();
// 'import' ModuleSpecifier ';'
if (tok == Token::STRING) {
- const AstRawString* module_specifier = ParseModuleSpecifier(CHECK_OK);
- scope_->module()->AddModuleRequest(module_specifier, zone());
- ExpectSemicolon(CHECK_OK);
- return factory()->NewEmptyStatement(pos);
+ const AstRawString* module_specifier = ParseModuleSpecifier(CHECK_OK_VOID);
+ ExpectSemicolon(CHECK_OK_VOID);
+ module()->AddEmptyImport(module_specifier, scanner()->location(), zone());
+ return;
}
// Parse ImportedDefaultBinding if present.
- ImportDeclaration* import_default_declaration = NULL;
+ const AstRawString* import_default_binding = nullptr;
+ Scanner::Location import_default_binding_loc;
if (tok != Token::MUL && tok != Token::LBRACE) {
- const AstRawString* local_name =
- ParseIdentifier(kDontAllowRestrictedIdentifiers, CHECK_OK);
- VariableProxy* proxy = NewUnresolved(local_name, IMPORT);
- import_default_declaration = factory()->NewImportDeclaration(
- proxy, ast_value_factory()->default_string(), NULL, scope_, pos);
- Declare(import_default_declaration, DeclarationDescriptor::NORMAL, true,
- CHECK_OK);
- }
-
- const AstRawString* module_instance_binding = NULL;
- ZoneList<ImportDeclaration*>* named_declarations = NULL;
- if (import_default_declaration == NULL || Check(Token::COMMA)) {
+ import_default_binding =
+ ParseIdentifier(kDontAllowRestrictedIdentifiers, CHECK_OK_VOID);
+ import_default_binding_loc = scanner()->location();
+ DeclareVariable(import_default_binding, CONST, kNeedsInitialization, pos,
+ CHECK_OK_VOID);
+ }
+
+ // Parse NameSpaceImport or NamedImports if present.
+ const AstRawString* module_namespace_binding = nullptr;
+ Scanner::Location module_namespace_binding_loc;
+ const ZoneList<const NamedImport*>* named_imports = nullptr;
+ if (import_default_binding == nullptr || Check(Token::COMMA)) {
switch (peek()) {
case Token::MUL: {
Consume(Token::MUL);
- ExpectContextualKeyword(CStrVector("as"), CHECK_OK);
- module_instance_binding =
- ParseIdentifier(kDontAllowRestrictedIdentifiers, CHECK_OK);
- // TODO(ES6): Add an appropriate declaration.
+ ExpectContextualKeyword(CStrVector("as"), CHECK_OK_VOID);
+ module_namespace_binding =
+ ParseIdentifier(kDontAllowRestrictedIdentifiers, CHECK_OK_VOID);
+ module_namespace_binding_loc = scanner()->location();
+ DeclareVariable(module_namespace_binding, CONST, kCreatedInitialized,
+ pos, CHECK_OK_VOID);
break;
}
case Token::LBRACE:
- named_declarations = ParseNamedImports(pos, CHECK_OK);
+ named_imports = ParseNamedImports(pos, CHECK_OK_VOID);
break;
default:
*ok = false;
ReportUnexpectedToken(scanner()->current_token());
- return NULL;
+ return;
}
}
- ExpectContextualKeyword(CStrVector("from"), CHECK_OK);
- const AstRawString* module_specifier = ParseModuleSpecifier(CHECK_OK);
- scope_->module()->AddModuleRequest(module_specifier, zone());
+ ExpectContextualKeyword(CStrVector("from"), CHECK_OK_VOID);
+ const AstRawString* module_specifier = ParseModuleSpecifier(CHECK_OK_VOID);
+ ExpectSemicolon(CHECK_OK_VOID);
+
+ // Now that we have all the information, we can make the appropriate
+ // declarations.
- if (module_instance_binding != NULL) {
- // TODO(ES6): Set the module specifier for the module namespace binding.
+ // TODO(neis): Would prefer to call DeclareVariable for each case below rather
+ // than above and in ParseNamedImports, but then a possible error message
+ // would point to the wrong location. Maybe have a DeclareAt version of
+ // Declare that takes a location?
+
+ if (module_namespace_binding != nullptr) {
+ module()->AddStarImport(module_namespace_binding, module_specifier,
+ module_namespace_binding_loc, zone());
}
- if (import_default_declaration != NULL) {
- import_default_declaration->set_module_specifier(module_specifier);
+ if (import_default_binding != nullptr) {
+ module()->AddImport(ast_value_factory()->default_string(),
+ import_default_binding, module_specifier,
+ import_default_binding_loc, zone());
}
- if (named_declarations != NULL) {
- for (int i = 0; i < named_declarations->length(); ++i) {
- named_declarations->at(i)->set_module_specifier(module_specifier);
+ if (named_imports != nullptr) {
+ if (named_imports->length() == 0) {
+ module()->AddEmptyImport(module_specifier, scanner()->location(), zone());
+ } else {
+ for (int i = 0; i < named_imports->length(); ++i) {
+ const NamedImport* import = named_imports->at(i);
+ module()->AddImport(import->import_name, import->local_name,
+ module_specifier, import->location, zone());
+ }
}
}
-
- ExpectSemicolon(CHECK_OK);
- return factory()->NewEmptyStatement(pos);
}
Statement* Parser::ParseExportDefault(bool* ok) {
// Supports the following productions, starting after the 'default' token:
- // 'export' 'default' FunctionDeclaration
+ // 'export' 'default' HoistableDeclaration
// 'export' 'default' ClassDeclaration
// 'export' 'default' AssignmentExpression[In] ';'
Expect(Token::DEFAULT, CHECK_OK);
Scanner::Location default_loc = scanner()->location();
- const AstRawString* default_string = ast_value_factory()->default_string();
- ZoneList<const AstRawString*> names(1, zone());
+ ZoneList<const AstRawString*> local_names(1, zone());
Statement* result = nullptr;
- Expression* default_export = nullptr;
switch (peek()) {
- case Token::FUNCTION: {
- Consume(Token::FUNCTION);
- int pos = position();
- bool is_generator = Check(Token::MUL);
- if (peek() == Token::LPAREN) {
- // FunctionDeclaration[+Default] ::
- // 'function' '(' FormalParameters ')' '{' FunctionBody '}'
- //
- // GeneratorDeclaration[+Default] ::
- // 'function' '*' '(' FormalParameters ')' '{' FunctionBody '}'
- default_export = ParseFunctionLiteral(
- default_string, Scanner::Location::invalid(),
- kSkipFunctionNameCheck,
- is_generator ? FunctionKind::kGeneratorFunction
- : FunctionKind::kNormalFunction,
- pos, FunctionLiteral::kDeclaration, language_mode(), CHECK_OK);
- result = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
- } else {
- result = ParseFunctionDeclaration(pos, is_generator, &names, CHECK_OK);
- }
+ case Token::FUNCTION:
+ result = ParseHoistableDeclaration(&local_names, true, CHECK_OK);
break;
- }
case Token::CLASS:
Consume(Token::CLASS);
- if (peek() == Token::EXTENDS || peek() == Token::LBRACE) {
- // ClassDeclaration[+Default] ::
- // 'class' ('extends' LeftHandExpression)? '{' ClassBody '}'
- default_export = ParseClassLiteral(nullptr, default_string,
- Scanner::Location::invalid(), false,
- position(), CHECK_OK);
- result = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
- } else {
- result = ParseClassDeclaration(&names, CHECK_OK);
- }
+ result = ParseClassDeclaration(&local_names, true, CHECK_OK);
break;
+ case Token::ASYNC:
+ if (allow_harmony_async_await() && PeekAhead() == Token::FUNCTION &&
+ !scanner()->HasAnyLineTerminatorAfterNext()) {
+ Consume(Token::ASYNC);
+ result = ParseAsyncFunctionDeclaration(&local_names, true, CHECK_OK);
+ break;
+ }
+ /* falls through */
+
default: {
- int pos = peek_position();
+ int pos = position();
ExpressionClassifier classifier(this);
- Expression* expr = ParseAssignmentExpression(true, &classifier, CHECK_OK);
+ Expression* value =
+ ParseAssignmentExpression(true, &classifier, CHECK_OK);
RewriteNonPattern(&classifier, CHECK_OK);
+ SetFunctionName(value, ast_value_factory()->default_string());
+
+ const AstRawString* local_name =
+ ast_value_factory()->star_default_star_string();
+ local_names.Add(local_name, zone());
+
+ // It's fine to declare this as CONST because the user has no way of
+ // writing to it.
+ Declaration* decl = DeclareVariable(local_name, CONST, pos, CHECK_OK);
+ decl->proxy()->var()->set_initializer_position(position());
+
+ Assignment* assignment = factory()->NewAssignment(
+ Token::INIT, decl->proxy(), value, kNoSourcePosition);
+ result = factory()->NewExpressionStatement(assignment, kNoSourcePosition);
ExpectSemicolon(CHECK_OK);
- result = factory()->NewExpressionStatement(expr, pos);
break;
}
}
- DCHECK_LE(names.length(), 1);
- if (names.length() == 1) {
- scope_->module()->AddLocalExport(default_string, names.first(), zone(), ok);
- if (!*ok) {
- ParserTraits::ReportMessageAt(
- default_loc, MessageTemplate::kDuplicateExport, default_string);
- return nullptr;
- }
- } else {
- // TODO(ES6): Assign result to a const binding with the name "*default*"
- // and add an export entry with "*default*" as the local name.
- USE(default_export);
- }
+ DCHECK_EQ(local_names.length(), 1);
+ module()->AddExport(local_names.first(),
+ ast_value_factory()->default_string(), default_loc,
+ zone());
+ DCHECK_NOT_NULL(result);
return result;
}
-
Statement* Parser::ParseExportDeclaration(bool* ok) {
// ExportDeclaration:
// 'export' '*' 'from' ModuleSpecifier ';'
@@ -1621,7 +1569,7 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
int pos = peek_position();
Expect(Token::EXPORT, CHECK_OK);
- Statement* result = NULL;
+ Statement* result = nullptr;
ZoneList<const AstRawString*> names(1, zone());
switch (peek()) {
case Token::DEFAULT:
@@ -1631,9 +1579,8 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
Consume(Token::MUL);
ExpectContextualKeyword(CStrVector("from"), CHECK_OK);
const AstRawString* module_specifier = ParseModuleSpecifier(CHECK_OK);
- scope_->module()->AddModuleRequest(module_specifier, zone());
- // TODO(ES6): scope_->module()->AddStarExport(...)
ExpectSemicolon(CHECK_OK);
+ module()->AddStarExport(module_specifier, scanner()->location(), zone());
return factory()->NewEmptyStatement(pos);
}
@@ -1652,50 +1599,46 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
Scanner::Location reserved_loc = Scanner::Location::invalid();
ZoneList<const AstRawString*> export_names(1, zone());
ZoneList<Scanner::Location> export_locations(1, zone());
- ZoneList<const AstRawString*> local_names(1, zone());
- ParseExportClause(&export_names, &export_locations, &local_names,
+ ZoneList<const AstRawString*> original_names(1, zone());
+ ParseExportClause(&export_names, &export_locations, &original_names,
&reserved_loc, CHECK_OK);
- const AstRawString* indirect_export_module_specifier = NULL;
+ const AstRawString* module_specifier = nullptr;
if (CheckContextualKeyword(CStrVector("from"))) {
- indirect_export_module_specifier = ParseModuleSpecifier(CHECK_OK);
+ module_specifier = ParseModuleSpecifier(CHECK_OK);
} else if (reserved_loc.IsValid()) {
// No FromClause, so reserved words are invalid in ExportClause.
*ok = false;
ReportMessageAt(reserved_loc, MessageTemplate::kUnexpectedReserved);
- return NULL;
+ return nullptr;
}
ExpectSemicolon(CHECK_OK);
const int length = export_names.length();
- DCHECK_EQ(length, local_names.length());
+ DCHECK_EQ(length, original_names.length());
DCHECK_EQ(length, export_locations.length());
- if (indirect_export_module_specifier == NULL) {
+ if (module_specifier == nullptr) {
for (int i = 0; i < length; ++i) {
- scope_->module()->AddLocalExport(export_names[i], local_names[i],
- zone(), ok);
- if (!*ok) {
- ParserTraits::ReportMessageAt(export_locations[i],
- MessageTemplate::kDuplicateExport,
- export_names[i]);
- return NULL;
- }
+ module()->AddExport(original_names[i], export_names[i],
+ export_locations[i], zone());
}
+ } else if (length == 0) {
+ module()->AddEmptyImport(module_specifier, scanner()->location(),
+ zone());
} else {
- scope_->module()->AddModuleRequest(indirect_export_module_specifier,
- zone());
for (int i = 0; i < length; ++i) {
- // TODO(ES6): scope_->module()->AddIndirectExport(...);(
+ module()->AddExport(original_names[i], export_names[i],
+ module_specifier, export_locations[i], zone());
}
}
return factory()->NewEmptyStatement(pos);
}
case Token::FUNCTION:
- result = ParseFunctionDeclaration(&names, CHECK_OK);
+ result = ParseHoistableDeclaration(&names, false, CHECK_OK);
break;
case Token::CLASS:
Consume(Token::CLASS);
- result = ParseClassDeclaration(&names, CHECK_OK);
+ result = ParseClassDeclaration(&names, false, CHECK_OK);
break;
case Token::VAR:
@@ -1704,21 +1647,26 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
result = ParseVariableStatement(kStatementListItem, &names, CHECK_OK);
break;
+ case Token::ASYNC:
+ if (allow_harmony_async_await()) {
+ // TODO(neis): Why don't we have the same check here as in
+ // ParseStatementListItem?
+ Consume(Token::ASYNC);
+ result = ParseAsyncFunctionDeclaration(&names, false, CHECK_OK);
+ break;
+ }
+ /* falls through */
+
default:
*ok = false;
ReportUnexpectedToken(scanner()->current_token());
- return NULL;
+ return nullptr;
}
- // Extract declared names into export declarations.
- ModuleDescriptor* descriptor = scope_->module();
+ ModuleDescriptor* descriptor = module();
for (int i = 0; i < names.length(); ++i) {
- descriptor->AddLocalExport(names[i], names[i], zone(), ok);
- if (!*ok) {
- // TODO(adamk): Possibly report this error at the right place.
- ParserTraits::ReportMessage(MessageTemplate::kDuplicateExport, names[i]);
- return NULL;
- }
+ // TODO(neis): Provide better location.
+ descriptor->AddExport(names[i], names[i], scanner()->location(), zone());
}
DCHECK_NOT_NULL(result);
@@ -1734,7 +1682,7 @@ Statement* Parser::ParseStatement(ZoneList<const AstRawString*>* labels,
if (peek() == Token::SEMICOLON) {
Next();
- return factory()->NewEmptyStatement(RelocInfo::kNoPosition);
+ return factory()->NewEmptyStatement(kNoSourcePosition);
}
return ParseSubStatement(labels, allow_function, ok);
}
@@ -1771,7 +1719,7 @@ Statement* Parser::ParseSubStatement(
case Token::SEMICOLON:
Next();
- return factory()->NewEmptyStatement(RelocInfo::kNoPosition);
+ return factory()->NewEmptyStatement(kNoSourcePosition);
case Token::IF:
return ParseIfStatement(labels, ok);
@@ -1796,7 +1744,7 @@ Statement* Parser::ParseSubStatement(
return ParseStatementAsUnlabelled(labels, ok);
} else {
Block* result =
- factory()->NewBlock(labels, 1, false, RelocInfo::kNoPosition);
+ factory()->NewBlock(labels, 1, false, kNoSourcePosition);
Target target(&this->target_stack_, result);
Statement* statement = ParseStatementAsUnlabelled(labels, CHECK_OK);
if (result) result->statements()->Add(statement, zone());
@@ -1858,70 +1806,102 @@ Statement* Parser::ParseStatementAsUnlabelled(
}
}
+VariableProxy* Parser::NewUnresolved(const AstRawString* name, int begin_pos,
+ int end_pos, Variable::Kind kind) {
+ return scope()->NewUnresolved(factory(), name, begin_pos, end_pos, kind);
+}
-VariableProxy* Parser::NewUnresolved(const AstRawString* name,
- VariableMode mode) {
- // If we are inside a function, a declaration of a var/const variable is a
- // truly local variable, and the scope of the variable is always the function
- // scope.
- // Let/const variables in harmony mode are always added to the immediately
- // enclosing scope.
- Scope* scope =
- IsLexicalVariableMode(mode) ? scope_ : scope_->DeclarationScope();
- return scope->NewUnresolved(factory(), name, Variable::NORMAL,
- scanner()->location().beg_pos,
- scanner()->location().end_pos);
+VariableProxy* Parser::NewUnresolved(const AstRawString* name) {
+ return scope()->NewUnresolved(factory(), name, scanner()->location().beg_pos,
+ scanner()->location().end_pos);
}
+InitializationFlag Parser::DefaultInitializationFlag(VariableMode mode) {
+ DCHECK(IsDeclaredVariableMode(mode));
+ return mode == VAR ? kCreatedInitialized : kNeedsInitialization;
+}
+
+Declaration* Parser::DeclareVariable(const AstRawString* name,
+ VariableMode mode, int pos, bool* ok) {
+ return DeclareVariable(name, mode, DefaultInitializationFlag(mode), pos, ok);
+}
+
+Declaration* Parser::DeclareVariable(const AstRawString* name,
+ VariableMode mode, InitializationFlag init,
+ int pos, bool* ok) {
+ DCHECK_NOT_NULL(name);
+ Scope* scope =
+ IsLexicalVariableMode(mode) ? this->scope() : GetDeclarationScope();
+ VariableProxy* proxy =
+ scope->NewUnresolved(factory(), name, scanner()->location().beg_pos,
+ scanner()->location().end_pos);
+ Declaration* declaration =
+ factory()->NewVariableDeclaration(proxy, this->scope(), pos);
+ Declare(declaration, DeclarationDescriptor::NORMAL, mode, init, CHECK_OK);
+ return declaration;
+}
Variable* Parser::Declare(Declaration* declaration,
DeclarationDescriptor::Kind declaration_kind,
- bool resolve, bool* ok, Scope* scope) {
+ VariableMode mode, InitializationFlag init, bool* ok,
+ Scope* scope) {
+ DCHECK(IsDeclaredVariableMode(mode) && mode != CONST_LEGACY);
+
VariableProxy* proxy = declaration->proxy();
DCHECK(proxy->raw_name() != NULL);
const AstRawString* name = proxy->raw_name();
- VariableMode mode = declaration->mode();
+
+ if (scope == nullptr) scope = this->scope();
+ if (mode == VAR) scope = scope->GetDeclarationScope();
+ DCHECK(!scope->is_catch_scope());
+ DCHECK(!scope->is_with_scope());
+ DCHECK(scope->is_declaration_scope() ||
+ (IsLexicalVariableMode(mode) && scope->is_block_scope()));
+
bool is_function_declaration = declaration->IsFunctionDeclaration();
- if (scope == nullptr) scope = scope_;
- Scope* declaration_scope =
- IsLexicalVariableMode(mode) ? scope : scope->DeclarationScope();
- Variable* var = NULL;
- // If a suitable scope exists, then we can statically declare this
- // variable and also set its mode. In any case, a Declaration node
- // will be added to the scope so that the declaration can be added
- // to the corresponding activation frame at runtime if necessary.
- // For instance, var declarations inside a sloppy eval scope need
- // to be added to the calling function context. Similarly, strict
- // mode eval scope and lexical eval bindings do not leak variable
- // declarations to the caller's scope so we declare all locals, too.
- if (declaration_scope->is_function_scope() ||
- declaration_scope->is_block_scope() ||
- declaration_scope->is_module_scope() ||
- declaration_scope->is_script_scope() ||
- (declaration_scope->is_eval_scope() &&
- (is_strict(declaration_scope->language_mode()) ||
- IsLexicalVariableMode(mode)))) {
+ Variable* var = NULL;
+ if (scope->is_eval_scope() && is_sloppy(scope->language_mode()) &&
+ mode == VAR) {
+ // In a var binding in a sloppy direct eval, pollute the enclosing scope
+ // with this new binding by doing the following:
+ // The proxy is bound to a lookup variable to force a dynamic declaration
+ // using the DeclareEvalVar or DeclareEvalFunction runtime functions.
+ Variable::Kind kind = Variable::NORMAL;
+ // TODO(sigurds) figure out if kNotAssigned is OK here
+ var = new (zone()) Variable(scope, name, mode, kind, init, kNotAssigned);
+ var->AllocateTo(VariableLocation::LOOKUP, -1);
+ } else {
// Declare the variable in the declaration scope.
- var = declaration_scope->LookupLocal(name);
+ var = scope->LookupLocal(name);
if (var == NULL) {
// Declare the name.
Variable::Kind kind = Variable::NORMAL;
if (is_function_declaration) {
kind = Variable::FUNCTION;
}
- var = declaration_scope->DeclareLocal(
- name, mode, declaration->initialization(), kind, kNotAssigned);
- } else if ((IsLexicalVariableMode(mode) ||
- IsLexicalVariableMode(var->mode())) &&
- // Lexical bindings may appear for some parameters in sloppy
- // mode even with --harmony-sloppy off.
- (is_strict(language_mode()) || allow_harmony_sloppy())) {
+ var = scope->DeclareLocal(name, mode, init, kind, kNotAssigned);
+ } else if (IsLexicalVariableMode(mode) ||
+ IsLexicalVariableMode(var->mode())) {
// Allow duplicate function decls for web compat, see bug 4693.
- if (is_sloppy(language_mode()) && is_function_declaration &&
+ bool duplicate_allowed = false;
+ if (is_sloppy(scope->language_mode()) && is_function_declaration &&
var->is_function()) {
DCHECK(IsLexicalVariableMode(mode) &&
IsLexicalVariableMode(var->mode()));
+ // If the duplication is allowed, then the var will show up
+ // in the SloppyBlockFunctionMap and the new FunctionKind
+ // will be a permitted duplicate.
+ FunctionKind function_kind =
+ declaration->AsFunctionDeclaration()->fun()->kind();
+ duplicate_allowed =
+ scope->GetDeclarationScope()->sloppy_block_function_map()->Lookup(
+ const_cast<AstRawString*>(name), name->hash()) != nullptr &&
+ !IsAsyncFunction(function_kind) &&
+ !(allow_harmony_restrictive_generators() &&
+ IsGeneratorFunction(function_kind));
+ }
+ if (duplicate_allowed) {
++use_counts_[v8::Isolate::kSloppyModeBlockScopedFunctionRedefinition];
} else {
// The name was declared in this scope before; check for conflicting
@@ -1941,9 +1921,9 @@ Variable* Parser::Declare(Declaration* declaration,
// In harmony we treat re-declarations as early errors. See
// ES5 16 for a definition of early errors.
if (declaration_kind == DeclarationDescriptor::NORMAL) {
- ParserTraits::ReportMessage(MessageTemplate::kVarRedeclaration, name);
+ ReportMessage(MessageTemplate::kVarRedeclaration, name);
} else {
- ParserTraits::ReportMessage(MessageTemplate::kParamDupe);
+ ReportMessage(MessageTemplate::kParamDupe);
}
*ok = false;
return nullptr;
@@ -1951,76 +1931,20 @@ Variable* Parser::Declare(Declaration* declaration,
} else if (mode == VAR) {
var->set_maybe_assigned();
}
- } else if (declaration_scope->is_eval_scope() &&
- is_sloppy(declaration_scope->language_mode()) &&
- !IsLexicalVariableMode(mode)) {
- // In a var binding in a sloppy direct eval, pollute the enclosing scope
- // with this new binding by doing the following:
- // The proxy is bound to a lookup variable to force a dynamic declaration
- // using the DeclareLookupSlot runtime function.
- Variable::Kind kind = Variable::NORMAL;
- // TODO(sigurds) figure out if kNotAssigned is OK here
- var = new (zone()) Variable(declaration_scope, name, mode, kind,
- declaration->initialization(), kNotAssigned);
- var->AllocateTo(VariableLocation::LOOKUP, -1);
- var->SetFromEval();
- resolve = true;
}
-
+ DCHECK_NOT_NULL(var);
// We add a declaration node for every declaration. The compiler
// will only generate code if necessary. In particular, declarations
// for inner local variables that do not represent functions won't
// result in any generated code.
//
- // Note that we always add an unresolved proxy even if it's not
- // used, simply because we don't know in this method (w/o extra
- // parameters) if the proxy is needed or not. The proxy will be
- // bound during variable resolution time unless it was pre-bound
- // below.
- //
- // WARNING: This will lead to multiple declaration nodes for the
+ // This will lead to multiple declaration nodes for the
// same variable if it is declared several times. This is not a
- // semantic issue as long as we keep the source order, but it may be
- // a performance issue since it may lead to repeated
- // RuntimeHidden_DeclareLookupSlot calls.
- declaration_scope->AddDeclaration(declaration);
-
- if (mode == CONST_LEGACY && declaration_scope->is_script_scope()) {
- // For global const variables we bind the proxy to a variable.
- DCHECK(resolve); // should be set by all callers
- Variable::Kind kind = Variable::NORMAL;
- var = new (zone()) Variable(declaration_scope, name, mode, kind,
- kNeedsInitialization, kNotAssigned);
- }
-
- // If requested and we have a local variable, bind the proxy to the variable
- // at parse-time. This is used for functions (and consts) declared inside
- // statements: the corresponding function (or const) variable must be in the
- // function scope and not a statement-local scope, e.g. as provided with a
- // 'with' statement:
- //
- // with (obj) {
- // function f() {}
- // }
- //
- // which is translated into:
- //
- // with (obj) {
- // // in this case this is not: 'var f; f = function () {};'
- // var f = function () {};
- // }
- //
- // Note that if 'f' is accessed from inside the 'with' statement, it
- // will be allocated in the context (because we must be able to look
- // it up dynamically) but it will also be accessed statically, i.e.,
- // with a context slot index and a context chain length for this
- // initialization code. Thus, inside the 'with' statement, we need
- // both access to the static and the dynamic context chain; the
- // runtime needs to provide both.
- if (resolve && var != NULL) {
- proxy->BindTo(var);
- }
+ // semantic issue, but it may be a performance issue since it may
+ // lead to repeated DeclareEvalVar or DeclareEvalFunction calls.
+ scope->AddDeclaration(declaration);
+ proxy->BindTo(var);
return var;
}
@@ -2051,88 +1975,122 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
// isn't lazily compiled. The extension structures are only
// accessible while parsing the first time not when reparsing
// because of lazy compilation.
- // TODO(adamk): Should this be ClosureScope()?
- scope_->DeclarationScope()->ForceEagerCompilation();
+ GetClosureScope()->ForceEagerCompilation();
// TODO(1240846): It's weird that native function declarations are
// introduced dynamically when we meet their declarations, whereas
// other functions are set up when entering the surrounding scope.
- VariableProxy* proxy = NewUnresolved(name, VAR);
- Declaration* declaration =
- factory()->NewVariableDeclaration(proxy, VAR, scope_, pos);
- Declare(declaration, DeclarationDescriptor::NORMAL, true, CHECK_OK);
- NativeFunctionLiteral* lit = factory()->NewNativeFunctionLiteral(
- name, extension_, RelocInfo::kNoPosition);
+ Declaration* decl = DeclareVariable(name, VAR, pos, CHECK_OK);
+ NativeFunctionLiteral* lit =
+ factory()->NewNativeFunctionLiteral(name, extension_, kNoSourcePosition);
return factory()->NewExpressionStatement(
- factory()->NewAssignment(Token::INIT, proxy, lit, RelocInfo::kNoPosition),
+ factory()->NewAssignment(Token::INIT, decl->proxy(), lit,
+ kNoSourcePosition),
pos);
}
-
-Statement* Parser::ParseFunctionDeclaration(
- ZoneList<const AstRawString*>* names, bool* ok) {
+Statement* Parser::ParseHoistableDeclaration(
+ ZoneList<const AstRawString*>* names, bool default_export, bool* ok) {
Expect(Token::FUNCTION, CHECK_OK);
int pos = position();
- bool is_generator = Check(Token::MUL);
- return ParseFunctionDeclaration(pos, is_generator, names, ok);
+ ParseFunctionFlags flags = ParseFunctionFlags::kIsNormal;
+ if (Check(Token::MUL)) {
+ flags |= ParseFunctionFlags::kIsGenerator;
+ }
+ return ParseHoistableDeclaration(pos, flags, names, default_export, ok);
}
+Statement* Parser::ParseAsyncFunctionDeclaration(
+ ZoneList<const AstRawString*>* names, bool default_export, bool* ok) {
+ DCHECK_EQ(scanner()->current_token(), Token::ASYNC);
+ int pos = position();
+ if (scanner()->HasAnyLineTerminatorBeforeNext()) {
+ *ok = false;
+ ReportUnexpectedToken(scanner()->current_token());
+ return nullptr;
+ }
+ Expect(Token::FUNCTION, CHECK_OK);
+ ParseFunctionFlags flags = ParseFunctionFlags::kIsAsync;
+ return ParseHoistableDeclaration(pos, flags, names, default_export, ok);
+}
-Statement* Parser::ParseFunctionDeclaration(
- int pos, bool is_generator, ZoneList<const AstRawString*>* names,
- bool* ok) {
+Statement* Parser::ParseHoistableDeclaration(
+ int pos, ParseFunctionFlags flags, ZoneList<const AstRawString*>* names,
+ bool default_export, bool* ok) {
// FunctionDeclaration ::
// 'function' Identifier '(' FormalParameters ')' '{' FunctionBody '}'
+ // 'function' '(' FormalParameters ')' '{' FunctionBody '}'
// GeneratorDeclaration ::
// 'function' '*' Identifier '(' FormalParameters ')' '{' FunctionBody '}'
+ // 'function' '*' '(' FormalParameters ')' '{' FunctionBody '}'
+ //
+ // The anonymous forms are allowed iff [default_export] is true.
//
// 'function' and '*' (if present) have been consumed by the caller.
- bool is_strict_reserved = false;
- const AstRawString* name = ParseIdentifierOrStrictReservedWord(
- &is_strict_reserved, CHECK_OK);
+
+ const bool is_generator = flags & ParseFunctionFlags::kIsGenerator;
+ const bool is_async = flags & ParseFunctionFlags::kIsAsync;
+ DCHECK(!is_generator || !is_async);
+
+ const AstRawString* name;
+ FunctionNameValidity name_validity;
+ const AstRawString* variable_name;
+ if (default_export && peek() == Token::LPAREN) {
+ name = ast_value_factory()->default_string();
+ name_validity = kSkipFunctionNameCheck;
+ variable_name = ast_value_factory()->star_default_star_string();
+ } else {
+ bool is_strict_reserved;
+ name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved, CHECK_OK);
+ name_validity = is_strict_reserved ? kFunctionNameIsStrictReserved
+ : kFunctionNameValidityUnknown;
+ variable_name = name;
+ }
FuncNameInferrer::State fni_state(fni_);
if (fni_ != NULL) fni_->PushEnclosingName(name);
FunctionLiteral* fun = ParseFunctionLiteral(
- name, scanner()->location(),
- is_strict_reserved ? kFunctionNameIsStrictReserved
- : kFunctionNameValidityUnknown,
+ name, scanner()->location(), name_validity,
is_generator ? FunctionKind::kGeneratorFunction
- : FunctionKind::kNormalFunction,
+ : is_async ? FunctionKind::kAsyncFunction
+ : FunctionKind::kNormalFunction,
pos, FunctionLiteral::kDeclaration, language_mode(), CHECK_OK);
- // Even if we're not at the top-level of the global or a function
- // scope, we treat it as such and introduce the function with its
- // initial value upon entering the corresponding scope.
// In ES6, a function behaves as a lexical binding, except in
// a script scope, or the initial scope of eval or another function.
VariableMode mode =
- (is_strict(language_mode()) || allow_harmony_sloppy_function()) &&
- !scope_->is_declaration_scope()
- ? LET
- : VAR;
- VariableProxy* proxy = NewUnresolved(name, mode);
+ (!scope()->is_declaration_scope() || scope()->is_module_scope()) ? LET
+ : VAR;
+ VariableProxy* proxy = NewUnresolved(variable_name);
Declaration* declaration =
- factory()->NewFunctionDeclaration(proxy, mode, fun, scope_, pos);
- Declare(declaration, DeclarationDescriptor::NORMAL, true, CHECK_OK);
- if (names) names->Add(name, zone());
- EmptyStatement* empty = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
- if (is_sloppy(language_mode()) && allow_harmony_sloppy_function() &&
- !scope_->is_declaration_scope()) {
+ factory()->NewFunctionDeclaration(proxy, fun, scope(), pos);
+ Declare(declaration, DeclarationDescriptor::NORMAL, mode, kCreatedInitialized,
+ CHECK_OK);
+ if (names) names->Add(variable_name, zone());
+ EmptyStatement* empty = factory()->NewEmptyStatement(kNoSourcePosition);
+ // Async functions don't undergo sloppy mode block scoped hoisting, and don't
+ // allow duplicates in a block. Both are represented by the
+ // sloppy_block_function_map. Don't add them to the map for async functions.
+ // Generators are also supposed to be prohibited; currently doing this behind
+ // a flag and UseCounting violations to assess web compatibility.
+ if (is_sloppy(language_mode()) && !scope()->is_declaration_scope() &&
+ !is_async && !(allow_harmony_restrictive_generators() && is_generator)) {
SloppyBlockFunctionStatement* delegate =
- factory()->NewSloppyBlockFunctionStatement(empty, scope_);
- scope_->DeclarationScope()->sloppy_block_function_map()->Declare(name,
- delegate);
+ factory()->NewSloppyBlockFunctionStatement(empty, scope());
+ DeclarationScope* target_scope = GetDeclarationScope();
+ target_scope->DeclareSloppyBlockFunction(variable_name, delegate);
return delegate;
}
return empty;
}
-
Statement* Parser::ParseClassDeclaration(ZoneList<const AstRawString*>* names,
- bool* ok) {
+ bool default_export, bool* ok) {
// ClassDeclaration ::
// 'class' Identifier ('extends' LeftHandExpression)? '{' ClassBody '}'
+ // 'class' ('extends' LeftHandExpression)? '{' ClassBody '}'
+ //
+ // The anonymous form is allowed iff [default_export] is true.
//
// 'class' is expected to be consumed by the caller.
//
@@ -2146,49 +2104,47 @@ Statement* Parser::ParseClassDeclaration(ZoneList<const AstRawString*>* names,
//
// so rewrite it as such.
- if (!allow_harmony_sloppy() && is_sloppy(language_mode())) {
- ReportMessage(MessageTemplate::kSloppyLexical);
- *ok = false;
- return NULL;
+ int pos = position();
+
+ const AstRawString* name;
+ bool is_strict_reserved;
+ const AstRawString* variable_name;
+ if (default_export && (peek() == Token::EXTENDS || peek() == Token::LBRACE)) {
+ name = ast_value_factory()->default_string();
+ is_strict_reserved = false;
+ variable_name = ast_value_factory()->star_default_star_string();
+ } else {
+ name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved, CHECK_OK);
+ variable_name = name;
}
- int pos = position();
- bool is_strict_reserved = false;
- const AstRawString* name =
- ParseIdentifierOrStrictReservedWord(&is_strict_reserved, CHECK_OK);
- ClassLiteral* value = ParseClassLiteral(nullptr, name, scanner()->location(),
- is_strict_reserved, pos, CHECK_OK);
+ Expression* value = ParseClassLiteral(nullptr, name, scanner()->location(),
+ is_strict_reserved, pos, CHECK_OK);
- VariableProxy* proxy = NewUnresolved(name, LET);
- Declaration* declaration =
- factory()->NewVariableDeclaration(proxy, LET, scope_, pos);
- Declare(declaration, DeclarationDescriptor::NORMAL, true, CHECK_OK);
- proxy->var()->set_initializer_position(position());
+ Declaration* decl = DeclareVariable(variable_name, LET, pos, CHECK_OK);
+ decl->proxy()->var()->set_initializer_position(position());
Assignment* assignment =
- factory()->NewAssignment(Token::INIT, proxy, value, pos);
+ factory()->NewAssignment(Token::INIT, decl->proxy(), value, pos);
Statement* assignment_statement =
- factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition);
- if (names) names->Add(name, zone());
+ factory()->NewExpressionStatement(assignment, kNoSourcePosition);
+ if (names) names->Add(variable_name, zone());
return assignment_statement;
}
-
-Block* Parser::ParseBlock(ZoneList<const AstRawString*>* labels,
- bool finalize_block_scope, bool* ok) {
+Block* Parser::ParseBlock(ZoneList<const AstRawString*>* labels, bool* ok) {
// The harmony mode uses block elements instead of statements.
//
// Block ::
// '{' StatementList '}'
// Construct block expecting 16 statements.
- Block* body =
- factory()->NewBlock(labels, 16, false, RelocInfo::kNoPosition);
- Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
+ Block* body = factory()->NewBlock(labels, 16, false, kNoSourcePosition);
// Parse the statements and collect escaping labels.
Expect(Token::LBRACE, CHECK_OK);
- block_scope->set_start_position(scanner()->location().beg_pos);
- { BlockState block_state(&scope_, block_scope);
+ {
+ BlockState block_state(&scope_state_);
+ block_state.set_start_position(scanner()->location().beg_pos);
Target target(&this->target_stack_, body);
while (peek() != Token::RBRACE) {
@@ -2197,22 +2153,15 @@ Block* Parser::ParseBlock(ZoneList<const AstRawString*>* labels,
body->statements()->Add(stat, zone());
}
}
+
+ Expect(Token::RBRACE, CHECK_OK);
+ block_state.set_end_position(scanner()->location().end_pos);
+ body->set_scope(block_state.FinalizedBlockScope());
}
- Expect(Token::RBRACE, CHECK_OK);
- block_scope->set_end_position(scanner()->location().end_pos);
- if (finalize_block_scope) {
- block_scope = block_scope->FinalizeBlockScope();
- }
- body->set_scope(block_scope);
return body;
}
-Block* Parser::ParseBlock(ZoneList<const AstRawString*>* labels, bool* ok) {
- return ParseBlock(labels, true, ok);
-}
-
-
Block* Parser::DeclarationParsingResult::BuildInitializationBlock(
ZoneList<const AstRawString*>* names, bool* ok) {
Block* result = descriptor.parser->factory()->NewBlock(
@@ -2231,11 +2180,11 @@ Block* Parser::ParseVariableStatement(VariableDeclarationContext var_context,
// VariableStatement ::
// VariableDeclarations ';'
- // The scope of a var/const declared variable anywhere inside a function
+ // The scope of a var declared variable anywhere inside a function
// is the entire function (ECMA-262, 3rd, 10.1.3, and 12.2). Thus we can
- // transform a source-level var/const declaration into a (Function)
- // Scope declaration, and rewrite the source-level initialization into an
- // assignment statement. We use a block to collect multiple assignments.
+ // transform a source-level var declaration into a (Function) Scope
+ // declaration, and rewrite the source-level initialization into an assignment
+ // statement. We use a block to collect multiple assignments.
//
// We mark the block as initializer block because we don't want the
// rewriter to add a '.result' assignment to such a block (to get compliant
@@ -2282,12 +2231,11 @@ Block* Parser::ParseVariableDeclarations(
if (peek() == Token::VAR) {
Consume(Token::VAR);
- } else if (peek() == Token::CONST && allow_const()) {
+ } else if (peek() == Token::CONST) {
Consume(Token::CONST);
- DCHECK(is_strict(language_mode()) || allow_harmony_sloppy());
DCHECK(var_context != kStatement);
parsing_result->descriptor.mode = CONST;
- } else if (peek() == Token::LET && allow_let()) {
+ } else if (peek() == Token::LET) {
Consume(Token::LET);
DCHECK(var_context != kStatement);
parsing_result->descriptor.mode = LET;
@@ -2295,7 +2243,7 @@ Block* Parser::ParseVariableDeclarations(
UNREACHABLE(); // by current callers
}
- parsing_result->descriptor.scope = scope_;
+ parsing_result->descriptor.scope = scope();
parsing_result->descriptor.hoist_scope = nullptr;
@@ -2327,7 +2275,7 @@ Block* Parser::ParseVariableDeclarations(
}
Expression* value = NULL;
- int initializer_position = RelocInfo::kNoPosition;
+ int initializer_position = kNoSourcePosition;
if (Check(Token::ASSIGN)) {
ExpressionClassifier classifier(this);
value = ParseAssignmentExpression(var_context != kForStatement,
@@ -2349,9 +2297,8 @@ Block* Parser::ParseVariableDeclarations(
}
}
- if (allow_harmony_function_name()) {
- ParserTraits::SetFunctionNameFromIdentifierRef(value, pattern);
- }
+ ParserBaseTraits<Parser>::SetFunctionNameFromIdentifierRef(value,
+ pattern);
// End position of the initializer is after the assignment expression.
initializer_position = scanner()->location().end_pos;
@@ -2362,7 +2309,7 @@ Block* Parser::ParseVariableDeclarations(
// ES6 'const' and binding patterns require initializers.
if (parsing_result->descriptor.mode == CONST ||
!pattern->IsVariableProxy()) {
- ParserTraits::ReportMessageAt(
+ ReportMessageAt(
Scanner::Location(decl_pos, scanner()->location().end_pos),
MessageTemplate::kDeclarationMissingInitializer,
!pattern->IsVariableProxy() ? "destructuring" : "const");
@@ -2370,9 +2317,8 @@ Block* Parser::ParseVariableDeclarations(
return nullptr;
}
- // 'let x' and (legacy) 'const x' initialize 'x' to undefined.
- if (parsing_result->descriptor.mode == LET ||
- parsing_result->descriptor.mode == CONST_LEGACY) {
+ // 'let x' initializes 'x' to undefined.
+ if (parsing_result->descriptor.mode == LET) {
value = GetLiteralUndefined(position());
}
}
@@ -2419,6 +2365,23 @@ static bool ContainsLabel(ZoneList<const AstRawString*>* labels,
return false;
}
+Statement* Parser::ParseFunctionDeclaration(bool* ok) {
+ Consume(Token::FUNCTION);
+ int pos = position();
+ ParseFunctionFlags flags = ParseFunctionFlags::kIsNormal;
+ if (Check(Token::MUL)) {
+ flags |= ParseFunctionFlags::kIsGenerator;
+ if (allow_harmony_restrictive_declarations()) {
+ ReportMessageAt(scanner()->location(),
+ MessageTemplate::kGeneratorInLegacyContext);
+ *ok = false;
+ return nullptr;
+ }
+ }
+
+ return ParseHoistableDeclaration(pos, flags, nullptr, false, CHECK_OK);
+}
+
Statement* Parser::ParseExpressionOrLabelledStatement(
ZoneList<const AstRawString*>* labels,
AllowLabelledFunctionStatement allow_function, bool* ok) {
@@ -2439,7 +2402,6 @@ Statement* Parser::ParseExpressionOrLabelledStatement(
ReportUnexpectedToken(Next());
*ok = false;
return nullptr;
-
default:
break;
}
@@ -2459,7 +2421,7 @@ Statement* Parser::ParseExpressionOrLabelledStatement(
// structured. However, these are probably changes we want to
// make later anyway so we should go back and fix this then.
if (ContainsLabel(labels, label) || TargetStackContainsLabel(label)) {
- ParserTraits::ReportMessage(MessageTemplate::kLabelRedeclaration, label);
+ ReportMessage(MessageTemplate::kLabelRedeclaration, label);
*ok = false;
return NULL;
}
@@ -2470,12 +2432,12 @@ Statement* Parser::ParseExpressionOrLabelledStatement(
// Remove the "ghost" variable that turned out to be a label
// from the top scope. This way, we don't try to resolve it
// during the scope processing.
- scope_->RemoveUnresolved(var);
+ scope()->RemoveUnresolved(var);
Expect(Token::COLON, CHECK_OK);
// ES#sec-labelled-function-declarations Labelled Function Declarations
if (peek() == Token::FUNCTION && is_sloppy(language_mode())) {
if (allow_function == kAllowLabelledFunctionStatement) {
- return ParseFunctionDeclaration(labels, ok);
+ return ParseFunctionDeclaration(ok);
} else {
return ParseScopedStatement(labels, true, ok);
}
@@ -2496,15 +2458,6 @@ Statement* Parser::ParseExpressionOrLabelledStatement(
}
// Parsed expression statement, followed by semicolon.
- // Detect attempts at 'let' declarations in sloppy mode.
- if (!allow_harmony_sloppy_let() && peek() == Token::IDENTIFIER &&
- expr->AsVariableProxy() != NULL &&
- expr->AsVariableProxy()->raw_name() ==
- ast_value_factory()->let_string()) {
- ReportMessage(MessageTemplate::kSloppyLexical, NULL);
- *ok = false;
- return NULL;
- }
ExpectSemicolon(CHECK_OK);
return factory()->NewExpressionStatement(expr, pos);
}
@@ -2526,7 +2479,7 @@ IfStatement* Parser::ParseIfStatement(ZoneList<const AstRawString*>* labels,
Next();
else_statement = ParseScopedStatement(labels, false, CHECK_OK);
} else {
- else_statement = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
+ else_statement = factory()->NewEmptyStatement(kNoSourcePosition);
}
return factory()->NewIfStatement(
condition, then_statement, else_statement, pos);
@@ -2553,7 +2506,7 @@ Statement* Parser::ParseContinueStatement(bool* ok) {
if (label != NULL) {
message = MessageTemplate::kUnknownLabel;
}
- ParserTraits::ReportMessage(message, label);
+ ReportMessage(message, label);
*ok = false;
return NULL;
}
@@ -2590,7 +2543,7 @@ Statement* Parser::ParseBreakStatement(ZoneList<const AstRawString*>* labels,
if (label != NULL) {
message = MessageTemplate::kUnknownLabel;
}
- ParserTraits::ReportMessage(message, label);
+ ReportMessage(message, label);
*ok = false;
return NULL;
}
@@ -2608,7 +2561,6 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
// reported (underlining).
Expect(Token::RETURN, CHECK_OK);
Scanner::Location loc = scanner()->location();
- function_state_->set_return_location(loc);
Token::Value tok = peek();
Statement* result;
@@ -2618,15 +2570,19 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
tok == Token::RBRACE ||
tok == Token::EOS) {
if (IsSubclassConstructor(function_state_->kind())) {
- return_value = ThisExpression(scope_, factory(), loc.beg_pos);
+ return_value = ThisExpression(loc.beg_pos);
} else {
return_value = GetLiteralUndefined(position());
}
} else {
int pos = peek_position();
- return_value = ParseExpression(true, CHECK_OK);
if (IsSubclassConstructor(function_state_->kind())) {
+ // Because of the return code rewriting that happens in case of a subclass
+ // constructor we don't want to accept tail calls, therefore we don't set
+ // ReturnExprScope to kInsideValidReturnStatement here.
+ return_value = ParseExpression(true, CHECK_OK);
+
// For subclass constructors we need to return this in case of undefined
// return a Smi (transformed into an exception in the ConstructStub)
// for a non object.
@@ -2639,8 +2595,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
// %_IsJSReceiver(temp) ? temp : 1;
// temp = expr
- Variable* temp = scope_->NewTemporary(
- ast_value_factory()->empty_string());
+ Variable* temp = NewTemporary(ast_value_factory()->empty_string());
Assignment* assign = factory()->NewAssignment(
Token::ASSIGN, factory()->NewVariableProxy(temp), return_value, pos);
@@ -2651,7 +2606,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
Expression* is_spec_object_call = factory()->NewCallRuntime(
Runtime::kInlineIsJSReceiver, is_spec_object_args, pos);
- // %_IsJSReceiver(temp) ? temp : throw_expression
+ // %_IsJSReceiver(temp) ? temp : 1;
Expression* is_object_conditional = factory()->NewConditional(
is_spec_object_call, factory()->NewVariableProxy(temp),
factory()->NewSmiLiteral(1, pos), pos);
@@ -2659,28 +2614,33 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
// temp === undefined
Expression* is_undefined = factory()->NewCompareOperation(
Token::EQ_STRICT, assign,
- factory()->NewUndefinedLiteral(RelocInfo::kNoPosition), pos);
+ factory()->NewUndefinedLiteral(kNoSourcePosition), pos);
// is_undefined ? this : is_object_conditional
return_value = factory()->NewConditional(
- is_undefined, ThisExpression(scope_, factory(), pos),
- is_object_conditional, pos);
- }
+ is_undefined, ThisExpression(pos), is_object_conditional, pos);
+ } else {
+ ReturnExprScope maybe_allow_tail_calls(
+ function_state_, ReturnExprContext::kInsideValidReturnStatement);
+ return_value = ParseExpression(true, CHECK_OK);
- // ES6 14.6.1 Static Semantics: IsInTailPosition
- if (allow_tailcalls() && !is_sloppy(language_mode())) {
- function_state_->AddExpressionInTailPosition(return_value);
+ if (allow_tailcalls() && !is_sloppy(language_mode()) && !is_resumable()) {
+ // ES6 14.6.1 Static Semantics: IsInTailPosition
+ function_state_->AddImplicitTailCallExpression(return_value);
+ }
}
}
ExpectSemicolon(CHECK_OK);
if (is_generator()) {
return_value = BuildIteratorResult(return_value, true);
+ } else if (is_async_function()) {
+ return_value = BuildPromiseResolve(return_value, return_value->position());
}
result = factory()->NewReturnStatement(return_value, loc.beg_pos);
- Scope* decl_scope = scope_->DeclarationScope();
+ DeclarationScope* decl_scope = GetDeclarationScope();
if (decl_scope->is_script_scope() || decl_scope->is_eval_scope()) {
ReportMessageAt(loc, MessageTemplate::kIllegalReturn);
*ok = false;
@@ -2708,9 +2668,10 @@ Statement* Parser::ParseWithStatement(ZoneList<const AstRawString*>* labels,
Expression* expr = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- Scope* with_scope = NewScope(scope_, WITH_SCOPE);
+ Scope* with_scope = NewScope(WITH_SCOPE);
Statement* body;
- { BlockState block_state(&scope_, with_scope);
+ {
+ BlockState block_state(&scope_state_, with_scope);
with_scope->set_start_position(scanner()->peek_location().beg_pos);
body = ParseScopedStatement(labels, true, CHECK_OK);
with_scope->set_end_position(scanner()->location().end_pos);
@@ -2766,8 +2727,7 @@ Statement* Parser::ParseSwitchStatement(ZoneList<const AstRawString*>* labels,
// }
// }
- Block* switch_block =
- factory()->NewBlock(NULL, 2, false, RelocInfo::kNoPosition);
+ Block* switch_block = factory()->NewBlock(NULL, 2, false, kNoSourcePosition);
int switch_pos = peek_position();
Expect(Token::SWITCH, CHECK_OK);
@@ -2776,12 +2736,12 @@ Statement* Parser::ParseSwitchStatement(ZoneList<const AstRawString*>* labels,
Expect(Token::RPAREN, CHECK_OK);
Variable* tag_variable =
- scope_->NewTemporary(ast_value_factory()->dot_switch_tag_string());
+ NewTemporary(ast_value_factory()->dot_switch_tag_string());
Assignment* tag_assign = factory()->NewAssignment(
Token::ASSIGN, factory()->NewVariableProxy(tag_variable), tag,
tag->position());
Statement* tag_statement =
- factory()->NewExpressionStatement(tag_assign, RelocInfo::kNoPosition);
+ factory()->NewExpressionStatement(tag_assign, kNoSourcePosition);
switch_block->statements()->Add(tag_statement, zone());
// make statement: undefined;
@@ -2789,21 +2749,18 @@ Statement* Parser::ParseSwitchStatement(ZoneList<const AstRawString*>* labels,
// statements don't have a value.
switch_block->statements()->Add(
factory()->NewExpressionStatement(
- factory()->NewUndefinedLiteral(RelocInfo::kNoPosition),
- RelocInfo::kNoPosition),
+ factory()->NewUndefinedLiteral(kNoSourcePosition), kNoSourcePosition),
zone());
- Block* cases_block =
- factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
- Scope* cases_scope = NewScope(scope_, BLOCK_SCOPE);
- cases_scope->SetNonlinear();
+ Block* cases_block = factory()->NewBlock(NULL, 1, false, kNoSourcePosition);
SwitchStatement* switch_statement =
factory()->NewSwitchStatement(labels, switch_pos);
- cases_scope->set_start_position(scanner()->location().beg_pos);
{
- BlockState cases_block_state(&scope_, cases_scope);
+ BlockState cases_block_state(&scope_state_);
+ cases_block_state.set_start_position(scanner()->location().beg_pos);
+ cases_block_state.SetNonlinear();
Target target(&this->target_stack_, switch_statement);
Expression* tag_read = factory()->NewVariableProxy(tag_variable);
@@ -2818,12 +2775,11 @@ Statement* Parser::ParseSwitchStatement(ZoneList<const AstRawString*>* labels,
}
switch_statement->Initialize(tag_read, cases);
cases_block->statements()->Add(switch_statement, zone());
- }
- Expect(Token::RBRACE, CHECK_OK);
+ Expect(Token::RBRACE, CHECK_OK);
- cases_scope->set_end_position(scanner()->location().end_pos);
- cases_scope = cases_scope->FinalizeBlockScope();
- cases_block->set_scope(cases_scope);
+ cases_block_state.set_end_position(scanner()->location().end_pos);
+ cases_block->set_scope(cases_block_state.FinalizedBlockScope());
+ }
switch_block->statements()->Add(cases_block, zone());
@@ -2849,40 +2805,6 @@ Statement* Parser::ParseThrowStatement(bool* ok) {
factory()->NewThrow(exception, pos), pos);
}
-class Parser::DontCollectExpressionsInTailPositionScope {
- public:
- DontCollectExpressionsInTailPositionScope(
- Parser::FunctionState* function_state)
- : function_state_(function_state),
- old_value_(function_state->collect_expressions_in_tail_position()) {
- function_state->set_collect_expressions_in_tail_position(false);
- }
- ~DontCollectExpressionsInTailPositionScope() {
- function_state_->set_collect_expressions_in_tail_position(old_value_);
- }
-
- private:
- Parser::FunctionState* function_state_;
- bool old_value_;
-};
-
-// Collects all return expressions at tail call position in this scope
-// to a separate list.
-class Parser::CollectExpressionsInTailPositionToListScope {
- public:
- CollectExpressionsInTailPositionToListScope(
- Parser::FunctionState* function_state, List<Expression*>* list)
- : function_state_(function_state), list_(list) {
- function_state->expressions_in_tail_position().Swap(list_);
- }
- ~CollectExpressionsInTailPositionToListScope() {
- function_state_->expressions_in_tail_position().Swap(list_);
- }
-
- private:
- Parser::FunctionState* function_state_;
- List<Expression*>* list_;
-};
TryStatement* Parser::ParseTryStatement(bool* ok) {
// TryStatement ::
@@ -2901,11 +2823,20 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
Block* try_block;
{
- DontCollectExpressionsInTailPositionScope no_tail_calls(function_state_);
+ ReturnExprScope no_tail_calls(function_state_,
+ ReturnExprContext::kInsideTryBlock);
try_block = ParseBlock(NULL, CHECK_OK);
}
Token::Value tok = peek();
+
+ bool catch_for_promise_reject = false;
+ if (allow_natives() && tok == Token::MOD) {
+ Consume(Token::MOD);
+ catch_for_promise_reject = true;
+ tok = peek();
+ }
+
if (tok != Token::CATCH && tok != Token::FINALLY) {
ReportMessage(MessageTemplate::kNoCatchOrFinally);
*ok = false;
@@ -2915,81 +2846,94 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
Scope* catch_scope = NULL;
Variable* catch_variable = NULL;
Block* catch_block = NULL;
- List<Expression*> expressions_in_tail_position_in_catch_block;
+ TailCallExpressionList tail_call_expressions_in_catch_block(zone());
if (tok == Token::CATCH) {
Consume(Token::CATCH);
Expect(Token::LPAREN, CHECK_OK);
- catch_scope = NewScope(scope_, CATCH_SCOPE);
+ catch_scope = NewScope(CATCH_SCOPE);
catch_scope->set_start_position(scanner()->location().beg_pos);
- ExpressionClassifier pattern_classifier(this);
- Expression* pattern = ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
- ValidateBindingPattern(&pattern_classifier, CHECK_OK);
-
- const AstRawString* name = ast_value_factory()->dot_catch_string();
- bool is_simple = pattern->IsVariableProxy();
- if (is_simple) {
- auto proxy = pattern->AsVariableProxy();
- scope_->RemoveUnresolved(proxy);
- name = proxy->raw_name();
- }
-
- catch_variable = catch_scope->DeclareLocal(name, VAR, kCreatedInitialized,
- Variable::NORMAL);
-
- Expect(Token::RPAREN, CHECK_OK);
-
{
CollectExpressionsInTailPositionToListScope
- collect_expressions_in_tail_position_scope(
- function_state_, &expressions_in_tail_position_in_catch_block);
- BlockState block_state(&scope_, catch_scope);
+ collect_tail_call_expressions_scope(
+ function_state_, &tail_call_expressions_in_catch_block);
+ BlockState block_state(&scope_state_, catch_scope);
- // TODO(adamk): Make a version of ParseBlock that takes a scope and
- // a block.
- catch_block =
- factory()->NewBlock(nullptr, 16, false, RelocInfo::kNoPosition);
- Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
+ catch_block = factory()->NewBlock(nullptr, 16, false, kNoSourcePosition);
- block_scope->set_start_position(scanner()->location().beg_pos);
+ // Create a block scope to hold any lexical declarations created
+ // as part of destructuring the catch parameter.
{
- BlockState block_state(&scope_, block_scope);
+ BlockState block_state(&scope_state_);
+ block_state.set_start_position(scanner()->location().beg_pos);
Target target(&this->target_stack_, catch_block);
- if (!is_simple) {
+ const AstRawString* name = ast_value_factory()->dot_catch_string();
+ Expression* pattern = nullptr;
+ if (peek_any_identifier()) {
+ name = ParseIdentifier(kDontAllowRestrictedIdentifiers, CHECK_OK);
+ } else {
+ ExpressionClassifier pattern_classifier(this);
+ pattern = ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
+ ValidateBindingPattern(&pattern_classifier, CHECK_OK);
+ }
+ catch_variable = catch_scope->DeclareLocal(
+ name, VAR, kCreatedInitialized, Variable::NORMAL);
+
+ Expect(Token::RPAREN, CHECK_OK);
+
+ ZoneList<const AstRawString*> bound_names(1, zone());
+ if (pattern != nullptr) {
DeclarationDescriptor descriptor;
descriptor.declaration_kind = DeclarationDescriptor::NORMAL;
descriptor.parser = this;
- descriptor.scope = scope_;
+ descriptor.scope = scope();
descriptor.hoist_scope = nullptr;
descriptor.mode = LET;
descriptor.declaration_pos = pattern->position();
descriptor.initialization_pos = pattern->position();
+ // Initializer position for variables declared by the pattern.
+ const int initializer_position = position();
+
DeclarationParsingResult::Declaration decl(
- pattern, pattern->position(),
+ pattern, initializer_position,
factory()->NewVariableProxy(catch_variable));
Block* init_block =
- factory()->NewBlock(nullptr, 8, true, RelocInfo::kNoPosition);
+ factory()->NewBlock(nullptr, 8, true, kNoSourcePosition);
PatternRewriter::DeclareAndInitializeVariables(
- init_block, &descriptor, &decl, nullptr, CHECK_OK);
+ init_block, &descriptor, &decl, &bound_names, CHECK_OK);
catch_block->statements()->Add(init_block, zone());
+ } else {
+ bound_names.Add(name, zone());
}
- Expect(Token::LBRACE, CHECK_OK);
- while (peek() != Token::RBRACE) {
- Statement* stat = ParseStatementListItem(CHECK_OK);
- if (stat && !stat->IsEmpty()) {
- catch_block->statements()->Add(stat, zone());
+ Block* inner_block = ParseBlock(nullptr, CHECK_OK);
+ catch_block->statements()->Add(inner_block, zone());
+
+ // Check for `catch(e) { let e; }` and similar errors.
+ Scope* inner_block_scope = inner_block->scope();
+ if (inner_block_scope != nullptr) {
+ Declaration* decl =
+ inner_block_scope->CheckLexDeclarationsConflictingWith(
+ bound_names);
+ if (decl != nullptr) {
+ const AstRawString* name = decl->proxy()->raw_name();
+ int position = decl->proxy()->position();
+ Scanner::Location location =
+ position == kNoSourcePosition
+ ? Scanner::Location::invalid()
+ : Scanner::Location(position, position + 1);
+ ReportMessageAt(location, MessageTemplate::kVarRedeclaration, name);
+ *ok = false;
+ return nullptr;
}
}
- Consume(Token::RBRACE);
+ block_state.set_end_position(scanner()->location().end_pos);
+ catch_block->set_scope(block_state.FinalizedBlockScope());
}
- block_scope->set_end_position(scanner()->location().end_pos);
- block_scope = block_scope->FinalizeBlockScope();
- catch_block->set_scope(block_scope);
}
catch_scope->set_end_position(scanner()->location().end_pos);
@@ -3011,10 +2955,18 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
if (catch_block != NULL && finally_block != NULL) {
// If we have both, create an inner try/catch.
DCHECK(catch_scope != NULL && catch_variable != NULL);
- TryCatchStatement* statement =
- factory()->NewTryCatchStatement(try_block, catch_scope, catch_variable,
- catch_block, RelocInfo::kNoPosition);
- try_block = factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
+ TryCatchStatement* statement;
+ if (catch_for_promise_reject) {
+ statement = factory()->NewTryCatchStatementForPromiseReject(
+ try_block, catch_scope, catch_variable, catch_block,
+ kNoSourcePosition);
+ } else {
+ statement = factory()->NewTryCatchStatement(try_block, catch_scope,
+ catch_variable, catch_block,
+ kNoSourcePosition);
+ }
+
+ try_block = factory()->NewBlock(NULL, 1, false, kNoSourcePosition);
try_block->statements()->Add(statement, zone());
catch_block = NULL; // Clear to indicate it's been handled.
}
@@ -3023,14 +2975,23 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
if (catch_block != NULL) {
// For a try-catch construct append return expressions from the catch block
// to the list of return expressions.
- function_state_->expressions_in_tail_position().AddAll(
- expressions_in_tail_position_in_catch_block);
+ function_state_->tail_call_expressions().Append(
+ tail_call_expressions_in_catch_block);
DCHECK(finally_block == NULL);
DCHECK(catch_scope != NULL && catch_variable != NULL);
result = factory()->NewTryCatchStatement(try_block, catch_scope,
catch_variable, catch_block, pos);
} else {
+ if (FLAG_harmony_explicit_tailcalls &&
+ tail_call_expressions_in_catch_block.has_explicit_tail_calls()) {
+ // TODO(ishell): update chapter number.
+ // ES8 XX.YY.ZZ
+ ReportMessageAt(tail_call_expressions_in_catch_block.location(),
+ MessageTemplate::kUnexpectedTailCallInCatchBlock);
+ *ok = false;
+ return NULL;
+ }
DCHECK(finally_block != NULL);
result = factory()->NewTryFinallyStatement(try_block, finally_block, pos);
}
@@ -3091,9 +3052,9 @@ WhileStatement* Parser::ParseWhileStatement(
Expression* Parser::BuildIteratorNextResult(Expression* iterator,
Variable* result, int pos) {
Expression* next_literal = factory()->NewStringLiteral(
- ast_value_factory()->next_string(), RelocInfo::kNoPosition);
+ ast_value_factory()->next_string(), kNoSourcePosition);
Expression* next_property =
- factory()->NewProperty(iterator, next_literal, RelocInfo::kNoPosition);
+ factory()->NewProperty(iterator, next_literal, kNoSourcePosition);
ZoneList<Expression*>* next_arguments =
new (zone()) ZoneList<Expression*>(0, zone());
Expression* next_call =
@@ -3123,97 +3084,159 @@ Expression* Parser::BuildIteratorNextResult(Expression* iterator,
throw_call, pos);
}
-void Parser::InitializeForEachStatement(ForEachStatement* stmt,
- Expression* each, Expression* subject,
- Statement* body) {
+Statement* Parser::InitializeForEachStatement(ForEachStatement* stmt,
+ Expression* each,
+ Expression* subject,
+ Statement* body,
+ int each_keyword_pos) {
ForOfStatement* for_of = stmt->AsForOfStatement();
if (for_of != NULL) {
- InitializeForOfStatement(for_of, each, subject, body,
- RelocInfo::kNoPosition);
+ const bool finalize = true;
+ return InitializeForOfStatement(for_of, each, subject, body, finalize,
+ each_keyword_pos);
} else {
if (each->IsArrayLiteral() || each->IsObjectLiteral()) {
- Variable* temp =
- scope_->NewTemporary(ast_value_factory()->empty_string());
+ Variable* temp = NewTemporary(ast_value_factory()->empty_string());
VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
Expression* assign_each = PatternRewriter::RewriteDestructuringAssignment(
this, factory()->NewAssignment(Token::ASSIGN, each, temp_proxy,
- RelocInfo::kNoPosition),
- scope_);
- auto block =
- factory()->NewBlock(nullptr, 2, false, RelocInfo::kNoPosition);
- block->statements()->Add(factory()->NewExpressionStatement(
- assign_each, RelocInfo::kNoPosition),
- zone());
+ kNoSourcePosition),
+ scope());
+ auto block = factory()->NewBlock(nullptr, 2, false, kNoSourcePosition);
+ block->statements()->Add(
+ factory()->NewExpressionStatement(assign_each, kNoSourcePosition),
+ zone());
block->statements()->Add(body, zone());
body = block;
each = factory()->NewVariableProxy(temp);
}
- stmt->Initialize(each, subject, body);
+ stmt->AsForInStatement()->Initialize(each, subject, body);
}
+ return stmt;
}
-void Parser::InitializeForOfStatement(ForOfStatement* for_of, Expression* each,
- Expression* iterable, Statement* body,
- int iterable_pos) {
- Variable* iterator =
- scope_->NewTemporary(ast_value_factory()->dot_iterator_string());
- Variable* result =
- scope_->NewTemporary(ast_value_factory()->dot_result_string());
+Statement* Parser::InitializeForOfStatement(ForOfStatement* for_of,
+ Expression* each,
+ Expression* iterable,
+ Statement* body, bool finalize,
+ int next_result_pos) {
+ // Create the auxiliary expressions needed for iterating over the iterable,
+ // and initialize the given ForOfStatement with them.
+ // If finalize is true, also instrument the loop with code that performs the
+ // proper ES6 iterator finalization. In that case, the result is not
+ // immediately a ForOfStatement.
- Expression* assign_iterator;
- Expression* next_result;
- Expression* result_done;
- Expression* assign_each;
+ const int nopos = kNoSourcePosition;
+ auto avfactory = ast_value_factory();
- // Hackily disambiguate o from o.next and o [Symbol.iterator]().
- // TODO(verwaest): Come up with a better solution.
- int get_iterator_pos = iterable_pos != RelocInfo::kNoPosition
- ? iterable_pos
- : iterable->position() - 2;
- int next_result_pos = iterable_pos != RelocInfo::kNoPosition
- ? iterable_pos
- : iterable->position() - 1;
+ Variable* iterator = NewTemporary(ast_value_factory()->dot_iterator_string());
+ Variable* result = NewTemporary(ast_value_factory()->dot_result_string());
+ Variable* completion = NewTemporary(avfactory->empty_string());
// iterator = iterable[Symbol.iterator]()
- assign_iterator = factory()->NewAssignment(
- Token::ASSIGN, factory()->NewVariableProxy(iterator),
- GetIterator(iterable, factory(), get_iterator_pos), iterable->position());
+ Expression* assign_iterator;
+ {
+ assign_iterator = factory()->NewAssignment(
+ Token::ASSIGN, factory()->NewVariableProxy(iterator),
+ GetIterator(iterable, factory(), iterable->position()),
+ iterable->position());
+ }
// !%_IsJSReceiver(result = iterator.next()) &&
// %ThrowIteratorResultNotAnObject(result)
+ Expression* next_result;
{
- // result = iterator.next()
Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
next_result =
BuildIteratorNextResult(iterator_proxy, result, next_result_pos);
}
// result.done
+ Expression* result_done;
{
Expression* done_literal = factory()->NewStringLiteral(
- ast_value_factory()->done_string(), RelocInfo::kNoPosition);
+ ast_value_factory()->done_string(), kNoSourcePosition);
Expression* result_proxy = factory()->NewVariableProxy(result);
- result_done = factory()->NewProperty(result_proxy, done_literal,
- RelocInfo::kNoPosition);
+ result_done =
+ factory()->NewProperty(result_proxy, done_literal, kNoSourcePosition);
}
- // each = result.value
+ // result.value
+ Expression* result_value;
{
- Expression* value_literal = factory()->NewStringLiteral(
- ast_value_factory()->value_string(), RelocInfo::kNoPosition);
+ Expression* value_literal =
+ factory()->NewStringLiteral(avfactory->value_string(), nopos);
Expression* result_proxy = factory()->NewVariableProxy(result);
- Expression* result_value = factory()->NewProperty(
- result_proxy, value_literal, RelocInfo::kNoPosition);
- assign_each = factory()->NewAssignment(Token::ASSIGN, each, result_value,
- RelocInfo::kNoPosition);
+ result_value = factory()->NewProperty(result_proxy, value_literal, nopos);
+ }
+
+ // {{completion = kAbruptCompletion;}}
+ Statement* set_completion_abrupt;
+ if (finalize) {
+ Expression* proxy = factory()->NewVariableProxy(completion);
+ Expression* assignment = factory()->NewAssignment(
+ Token::ASSIGN, proxy,
+ factory()->NewSmiLiteral(Parser::kAbruptCompletion, nopos), nopos);
+
+ Block* block = factory()->NewBlock(nullptr, 1, true, nopos);
+ block->statements()->Add(
+ factory()->NewExpressionStatement(assignment, nopos), zone());
+ set_completion_abrupt = block;
+ }
+
+ // do { let tmp = #result_value; #set_completion_abrupt; tmp }
+ // Expression* result_value (gets overwritten)
+ if (finalize) {
+ Variable* var_tmp = NewTemporary(avfactory->empty_string());
+ Expression* tmp = factory()->NewVariableProxy(var_tmp);
+ Expression* assignment =
+ factory()->NewAssignment(Token::ASSIGN, tmp, result_value, nopos);
+
+ Block* block = factory()->NewBlock(nullptr, 2, false, nopos);
+ block->statements()->Add(
+ factory()->NewExpressionStatement(assignment, nopos), zone());
+ block->statements()->Add(set_completion_abrupt, zone());
+
+ result_value = factory()->NewDoExpression(block, var_tmp, nopos);
+ }
+
+ // each = #result_value;
+ Expression* assign_each;
+ {
+ assign_each =
+ factory()->NewAssignment(Token::ASSIGN, each, result_value, nopos);
if (each->IsArrayLiteral() || each->IsObjectLiteral()) {
assign_each = PatternRewriter::RewriteDestructuringAssignment(
- this, assign_each->AsAssignment(), scope_);
+ this, assign_each->AsAssignment(), scope());
}
}
- for_of->Initialize(each, iterable, body, iterator, assign_iterator,
- next_result, result_done, assign_each);
+ // {{completion = kNormalCompletion;}}
+ Statement* set_completion_normal;
+ if (finalize) {
+ Expression* proxy = factory()->NewVariableProxy(completion);
+ Expression* assignment = factory()->NewAssignment(
+ Token::ASSIGN, proxy,
+ factory()->NewSmiLiteral(Parser::kNormalCompletion, nopos), nopos);
+
+ Block* block = factory()->NewBlock(nullptr, 1, true, nopos);
+ block->statements()->Add(
+ factory()->NewExpressionStatement(assignment, nopos), zone());
+ set_completion_normal = block;
+ }
+
+ // { #loop-body; #set_completion_normal }
+ // Statement* body (gets overwritten)
+ if (finalize) {
+ Block* block = factory()->NewBlock(nullptr, 2, false, nopos);
+ block->statements()->Add(body, zone());
+ block->statements()->Add(set_completion_normal, zone());
+ body = block;
+ }
+
+ for_of->Initialize(body, iterator, assign_iterator, next_result, result_done,
+ assign_each);
+ return finalize ? FinalizeForOfStatement(for_of, completion, nopos) : for_of;
}
Statement* Parser::DesugarLexicalBindingsInForStatement(
@@ -3260,8 +3283,8 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
DCHECK(names->length() > 0);
ZoneList<Variable*> temps(names->length(), zone());
- Block* outer_block = factory()->NewBlock(NULL, names->length() + 4, false,
- RelocInfo::kNoPosition);
+ Block* outer_block =
+ factory()->NewBlock(NULL, names->length() + 4, false, kNoSourcePosition);
// Add statement: let/const x = i.
outer_block->statements()->Add(init, zone());
@@ -3271,13 +3294,13 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
// For each lexical variable x:
// make statement: temp_x = x.
for (int i = 0; i < names->length(); i++) {
- VariableProxy* proxy = NewUnresolved(names->at(i), LET);
- Variable* temp = scope_->NewTemporary(temp_name);
+ VariableProxy* proxy = NewUnresolved(names->at(i));
+ Variable* temp = NewTemporary(temp_name);
VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
- Assignment* assignment = factory()->NewAssignment(
- Token::ASSIGN, temp_proxy, proxy, RelocInfo::kNoPosition);
- Statement* assignment_statement = factory()->NewExpressionStatement(
- assignment, RelocInfo::kNoPosition);
+ Assignment* assignment = factory()->NewAssignment(Token::ASSIGN, temp_proxy,
+ proxy, kNoSourcePosition);
+ Statement* assignment_statement =
+ factory()->NewExpressionStatement(assignment, kNoSourcePosition);
outer_block->statements()->Add(assignment_statement, zone());
temps.Add(temp, zone());
}
@@ -3285,21 +3308,20 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
Variable* first = NULL;
// Make statement: first = 1.
if (next) {
- first = scope_->NewTemporary(temp_name);
+ first = NewTemporary(temp_name);
VariableProxy* first_proxy = factory()->NewVariableProxy(first);
- Expression* const1 = factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
+ Expression* const1 = factory()->NewSmiLiteral(1, kNoSourcePosition);
Assignment* assignment = factory()->NewAssignment(
- Token::ASSIGN, first_proxy, const1, RelocInfo::kNoPosition);
+ Token::ASSIGN, first_proxy, const1, kNoSourcePosition);
Statement* assignment_statement =
- factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition);
+ factory()->NewExpressionStatement(assignment, kNoSourcePosition);
outer_block->statements()->Add(assignment_statement, zone());
}
// make statement: undefined;
outer_block->statements()->Add(
factory()->NewExpressionStatement(
- factory()->NewUndefinedLiteral(RelocInfo::kNoPosition),
- RelocInfo::kNoPosition),
+ factory()->NewUndefinedLiteral(kNoSourcePosition), kNoSourcePosition),
zone());
// Make statement: outer: for (;;)
@@ -3308,33 +3330,30 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
// need to know about it. This should be safe because we don't run any code
// in this function that looks up break targets.
ForStatement* outer_loop =
- factory()->NewForStatement(NULL, RelocInfo::kNoPosition);
+ factory()->NewForStatement(NULL, kNoSourcePosition);
outer_block->statements()->Add(outer_loop, zone());
- outer_block->set_scope(scope_);
+ outer_block->set_scope(scope());
- Block* inner_block =
- factory()->NewBlock(NULL, 3, false, RelocInfo::kNoPosition);
+ Block* inner_block = factory()->NewBlock(NULL, 3, false, kNoSourcePosition);
{
- BlockState block_state(&scope_, inner_scope);
+ BlockState block_state(&scope_state_, inner_scope);
- Block* ignore_completion_block = factory()->NewBlock(
- NULL, names->length() + 3, true, RelocInfo::kNoPosition);
+ Block* ignore_completion_block =
+ factory()->NewBlock(NULL, names->length() + 3, true, kNoSourcePosition);
ZoneList<Variable*> inner_vars(names->length(), zone());
// For each let variable x:
// make statement: let/const x = temp_x.
for (int i = 0; i < names->length(); i++) {
- VariableProxy* proxy = NewUnresolved(names->at(i), mode);
- Declaration* declaration = factory()->NewVariableDeclaration(
- proxy, mode, scope_, RelocInfo::kNoPosition);
- Declare(declaration, DeclarationDescriptor::NORMAL, true, CHECK_OK);
- inner_vars.Add(declaration->proxy()->var(), zone());
+ Declaration* decl =
+ DeclareVariable(names->at(i), mode, kNoSourcePosition, CHECK_OK);
+ inner_vars.Add(decl->proxy()->var(), zone());
VariableProxy* temp_proxy = factory()->NewVariableProxy(temps.at(i));
Assignment* assignment = factory()->NewAssignment(
- Token::INIT, proxy, temp_proxy, RelocInfo::kNoPosition);
+ Token::INIT, decl->proxy(), temp_proxy, kNoSourcePosition);
Statement* assignment_statement =
- factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition);
- DCHECK(init->position() != RelocInfo::kNoPosition);
- proxy->var()->set_initializer_position(init->position());
+ factory()->NewExpressionStatement(assignment, kNoSourcePosition);
+ DCHECK(init->position() != kNoSourcePosition);
+ decl->proxy()->var()->set_initializer_position(init->position());
ignore_completion_block->statements()->Add(assignment_statement, zone());
}
@@ -3344,45 +3363,43 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
Expression* compare = NULL;
// Make compare expression: first == 1.
{
- Expression* const1 =
- factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
+ Expression* const1 = factory()->NewSmiLiteral(1, kNoSourcePosition);
VariableProxy* first_proxy = factory()->NewVariableProxy(first);
compare = factory()->NewCompareOperation(Token::EQ, first_proxy, const1,
- RelocInfo::kNoPosition);
+ kNoSourcePosition);
}
Statement* clear_first = NULL;
// Make statement: first = 0.
{
VariableProxy* first_proxy = factory()->NewVariableProxy(first);
- Expression* const0 =
- factory()->NewSmiLiteral(0, RelocInfo::kNoPosition);
+ Expression* const0 = factory()->NewSmiLiteral(0, kNoSourcePosition);
Assignment* assignment = factory()->NewAssignment(
- Token::ASSIGN, first_proxy, const0, RelocInfo::kNoPosition);
- clear_first = factory()->NewExpressionStatement(assignment,
- RelocInfo::kNoPosition);
+ Token::ASSIGN, first_proxy, const0, kNoSourcePosition);
+ clear_first =
+ factory()->NewExpressionStatement(assignment, kNoSourcePosition);
}
Statement* clear_first_or_next = factory()->NewIfStatement(
- compare, clear_first, next, RelocInfo::kNoPosition);
+ compare, clear_first, next, kNoSourcePosition);
ignore_completion_block->statements()->Add(clear_first_or_next, zone());
}
- Variable* flag = scope_->NewTemporary(temp_name);
+ Variable* flag = NewTemporary(temp_name);
// Make statement: flag = 1.
{
VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
- Expression* const1 = factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
+ Expression* const1 = factory()->NewSmiLiteral(1, kNoSourcePosition);
Assignment* assignment = factory()->NewAssignment(
- Token::ASSIGN, flag_proxy, const1, RelocInfo::kNoPosition);
+ Token::ASSIGN, flag_proxy, const1, kNoSourcePosition);
Statement* assignment_statement =
- factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition);
+ factory()->NewExpressionStatement(assignment, kNoSourcePosition);
ignore_completion_block->statements()->Add(assignment_statement, zone());
}
// Make statement: if (!cond) break.
if (cond) {
Statement* stop =
- factory()->NewBreakStatement(outer_loop, RelocInfo::kNoPosition);
- Statement* noop = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
+ factory()->NewBreakStatement(outer_loop, kNoSourcePosition);
+ Statement* noop = factory()->NewEmptyStatement(kNoSourcePosition);
ignore_completion_block->statements()->Add(
factory()->NewIfStatement(cond, noop, stop, cond->position()),
zone());
@@ -3392,10 +3409,10 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
// Make cond expression for main loop: flag == 1.
Expression* flag_cond = NULL;
{
- Expression* const1 = factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
+ Expression* const1 = factory()->NewSmiLiteral(1, kNoSourcePosition);
VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
flag_cond = factory()->NewCompareOperation(Token::EQ, flag_proxy, const1,
- RelocInfo::kNoPosition);
+ kNoSourcePosition);
}
// Create chain of expressions "flag = 0, temp_x = x, ..."
@@ -3405,10 +3422,9 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
// Make expression: flag = 0.
{
VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
- Expression* const0 =
- factory()->NewSmiLiteral(0, RelocInfo::kNoPosition);
- compound_next = factory()->NewAssignment(
- Token::ASSIGN, flag_proxy, const0, RelocInfo::kNoPosition);
+ Expression* const0 = factory()->NewSmiLiteral(0, kNoSourcePosition);
+ compound_next = factory()->NewAssignment(Token::ASSIGN, flag_proxy,
+ const0, kNoSourcePosition);
}
// Make the comma-separated list of temp_x = x assignments.
@@ -3418,13 +3434,13 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
VariableProxy* proxy =
factory()->NewVariableProxy(inner_vars.at(i), inner_var_proxy_pos);
Assignment* assignment = factory()->NewAssignment(
- Token::ASSIGN, temp_proxy, proxy, RelocInfo::kNoPosition);
+ Token::ASSIGN, temp_proxy, proxy, kNoSourcePosition);
compound_next = factory()->NewBinaryOperation(
- Token::COMMA, compound_next, assignment, RelocInfo::kNoPosition);
+ Token::COMMA, compound_next, assignment, kNoSourcePosition);
}
- compound_next_statement = factory()->NewExpressionStatement(
- compound_next, RelocInfo::kNoPosition);
+ compound_next_statement =
+ factory()->NewExpressionStatement(compound_next, kNoSourcePosition);
}
// Make statement: labels: for (; flag == 1; flag = 0, temp_x = x)
@@ -3439,19 +3455,18 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
Expression* compare = NULL;
// Make compare expresion: flag == 1.
{
- Expression* const1 =
- factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
+ Expression* const1 = factory()->NewSmiLiteral(1, kNoSourcePosition);
VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
compare = factory()->NewCompareOperation(Token::EQ, flag_proxy, const1,
- RelocInfo::kNoPosition);
+ kNoSourcePosition);
}
Statement* stop =
- factory()->NewBreakStatement(outer_loop, RelocInfo::kNoPosition);
- Statement* empty = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
- Statement* if_flag_break = factory()->NewIfStatement(
- compare, stop, empty, RelocInfo::kNoPosition);
+ factory()->NewBreakStatement(outer_loop, kNoSourcePosition);
+ Statement* empty = factory()->NewEmptyStatement(kNoSourcePosition);
+ Statement* if_flag_break =
+ factory()->NewIfStatement(compare, stop, empty, kNoSourcePosition);
Block* ignore_completion_block =
- factory()->NewBlock(NULL, 1, true, RelocInfo::kNoPosition);
+ factory()->NewBlock(NULL, 1, true, kNoSourcePosition);
ignore_completion_block->statements()->Add(if_flag_break, zone());
inner_block->statements()->Add(ignore_completion_block, zone());
}
@@ -3475,14 +3490,13 @@ Statement* Parser::ParseScopedStatement(ZoneList<const AstRawString*>* labels,
}
// Make a block around the statement for a lexical binding
// is introduced by a FunctionDeclaration.
- Scope* body_scope = NewScope(scope_, BLOCK_SCOPE);
- BlockState block_state(&scope_, body_scope);
- Block* block = factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
- Statement* body = ParseFunctionDeclaration(NULL, CHECK_OK);
+ BlockState block_state(&scope_state_);
+ block_state.set_start_position(scanner()->location().beg_pos);
+ Block* block = factory()->NewBlock(NULL, 1, false, kNoSourcePosition);
+ Statement* body = ParseFunctionDeclaration(CHECK_OK);
block->statements()->Add(body, zone());
- body_scope->set_end_position(scanner()->location().end_pos);
- body_scope = body_scope->FinalizeBlockScope();
- block->set_scope(body_scope);
+ block_state.set_end_position(scanner()->location().end_pos);
+ block->set_scope(block_state.FinalizedBlockScope());
return block;
}
}
@@ -3491,19 +3505,18 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
bool* ok) {
int stmt_pos = peek_position();
Statement* init = NULL;
- ZoneList<const AstRawString*> lexical_bindings(1, zone());
+ ZoneList<const AstRawString*> bound_names(1, zone());
+ bool bound_names_are_lexical = false;
// Create an in-between scope for let-bound iteration variables.
- Scope* for_scope = NewScope(scope_, BLOCK_SCOPE);
-
- BlockState block_state(&scope_, for_scope);
+ BlockState for_state(&scope_state_);
Expect(Token::FOR, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
- for_scope->set_start_position(scanner()->location().beg_pos);
- bool is_let_identifier_expression = false;
+ for_state.set_start_position(scanner()->location().beg_pos);
+ for_state.set_is_hidden();
DeclarationParsingResult parsing_result;
if (peek() != Token::SEMICOLON) {
- if (peek() == Token::VAR || (peek() == Token::CONST && allow_const()) ||
+ if (peek() == Token::VAR || peek() == Token::CONST ||
(peek() == Token::LET && IsNextLetKeyword())) {
ParseVariableDeclarations(kForStatement, &parsing_result, nullptr,
CHECK_OK);
@@ -3515,10 +3528,9 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
if (CheckInOrOf(&mode, ok)) {
if (!*ok) return nullptr;
if (parsing_result.declarations.length() != 1) {
- ParserTraits::ReportMessageAt(
- parsing_result.bindings_loc,
- MessageTemplate::kForInOfLoopMultiBindings,
- ForEachStatement::VisitModeString(mode));
+ ReportMessageAt(parsing_result.bindings_loc,
+ MessageTemplate::kForInOfLoopMultiBindings,
+ ForEachStatement::VisitModeString(mode));
*ok = false;
return nullptr;
}
@@ -3527,33 +3539,38 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
if (parsing_result.first_initializer_loc.IsValid() &&
(is_strict(language_mode()) || mode == ForEachStatement::ITERATE ||
IsLexicalVariableMode(parsing_result.descriptor.mode) ||
- !decl.pattern->IsVariableProxy())) {
- ParserTraits::ReportMessageAt(
- parsing_result.first_initializer_loc,
- MessageTemplate::kForInOfLoopInitializer,
- ForEachStatement::VisitModeString(mode));
+ !decl.pattern->IsVariableProxy() || allow_harmony_for_in())) {
+ // Only increment the use count if we would have let this through
+ // without the flag.
+ if (allow_harmony_for_in()) {
+ ++use_counts_[v8::Isolate::kForInInitializer];
+ }
+ ReportMessageAt(parsing_result.first_initializer_loc,
+ MessageTemplate::kForInOfLoopInitializer,
+ ForEachStatement::VisitModeString(mode));
*ok = false;
return nullptr;
}
Block* init_block = nullptr;
+ bound_names_are_lexical =
+ IsLexicalVariableMode(parsing_result.descriptor.mode);
- // special case for legacy for (var/const x =.... in)
- if (!IsLexicalVariableMode(parsing_result.descriptor.mode) &&
- decl.pattern->IsVariableProxy() && decl.initializer != nullptr) {
+ // special case for legacy for (var ... = ... in ...)
+ if (!bound_names_are_lexical && decl.pattern->IsVariableProxy() &&
+ decl.initializer != nullptr) {
+ DCHECK(!allow_harmony_for_in());
++use_counts_[v8::Isolate::kForInInitializer];
const AstRawString* name =
decl.pattern->AsVariableProxy()->raw_name();
- VariableProxy* single_var = scope_->NewUnresolved(
- factory(), name, Variable::NORMAL, each_beg_pos, each_end_pos);
+ VariableProxy* single_var = NewUnresolved(name);
init_block = factory()->NewBlock(
nullptr, 2, true, parsing_result.descriptor.declaration_pos);
init_block->statements()->Add(
factory()->NewExpressionStatement(
factory()->NewAssignment(Token::ASSIGN, single_var,
- decl.initializer,
- RelocInfo::kNoPosition),
- RelocInfo::kNoPosition),
+ decl.initializer, kNoSourcePosition),
+ kNoSourcePosition),
zone());
}
@@ -3573,12 +3590,13 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
// let x; // for TDZ
// }
- Variable* temp =
- scope_->NewTemporary(ast_value_factory()->dot_for_string());
+ Variable* temp = NewTemporary(ast_value_factory()->dot_for_string());
ForEachStatement* loop =
factory()->NewForEachStatement(mode, labels, stmt_pos);
Target target(&this->target_stack_, loop);
+ int each_keyword_position = scanner()->location().beg_pos;
+
Expression* enumerable;
if (mode == ForEachStatement::ITERATE) {
ExpressionClassifier classifier(this);
@@ -3590,72 +3608,95 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
Expect(Token::RPAREN, CHECK_OK);
- Scope* body_scope = NewScope(scope_, BLOCK_SCOPE);
- body_scope->set_start_position(scanner()->location().beg_pos);
Block* body_block =
- factory()->NewBlock(NULL, 3, false, RelocInfo::kNoPosition);
+ factory()->NewBlock(NULL, 3, false, kNoSourcePosition);
+ Statement* final_loop;
{
- DontCollectExpressionsInTailPositionScope no_tail_calls(
- function_state_);
- BlockState block_state(&scope_, body_scope);
+ ReturnExprScope no_tail_calls(function_state_,
+ ReturnExprContext::kInsideForInOfBody);
+ BlockState block_state(&scope_state_);
+ block_state.set_start_position(scanner()->location().beg_pos);
Statement* body = ParseScopedStatement(NULL, true, CHECK_OK);
auto each_initialization_block =
- factory()->NewBlock(nullptr, 1, true, RelocInfo::kNoPosition);
+ factory()->NewBlock(nullptr, 1, true, kNoSourcePosition);
{
auto descriptor = parsing_result.descriptor;
- descriptor.declaration_pos = RelocInfo::kNoPosition;
- descriptor.initialization_pos = RelocInfo::kNoPosition;
+ descriptor.declaration_pos = kNoSourcePosition;
+ descriptor.initialization_pos = kNoSourcePosition;
decl.initializer = factory()->NewVariableProxy(temp);
+ bool is_for_var_of =
+ mode == ForEachStatement::ITERATE &&
+ parsing_result.descriptor.mode == VariableMode::VAR;
+
PatternRewriter::DeclareAndInitializeVariables(
each_initialization_block, &descriptor, &decl,
- IsLexicalVariableMode(descriptor.mode) ? &lexical_bindings
- : nullptr,
+ bound_names_are_lexical || is_for_var_of ? &bound_names
+ : nullptr,
CHECK_OK);
+
+ // Annex B.3.5 prohibits the form
+ // `try {} catch(e) { for (var e of {}); }`
+ // So if we are parsing a statement like `for (var ... of ...)`
+ // we need to walk up the scope chain and look for catch scopes
+ // which have a simple binding, then compare their binding against
+ // all of the names declared in the init of the for-of we're
+ // parsing.
+ if (is_for_var_of) {
+ Scope* catch_scope = scope();
+ while (catch_scope != nullptr &&
+ !catch_scope->is_declaration_scope()) {
+ if (catch_scope->is_catch_scope()) {
+ auto name = catch_scope->catch_variable_name();
+ if (name !=
+ ast_value_factory()
+ ->dot_catch_string()) { // i.e. is a simple binding
+ if (bound_names.Contains(name)) {
+ ReportMessageAt(parsing_result.bindings_loc,
+ MessageTemplate::kVarRedeclaration, name);
+ *ok = false;
+ return nullptr;
+ }
+ }
+ }
+ catch_scope = catch_scope->outer_scope();
+ }
+ }
}
body_block->statements()->Add(each_initialization_block, zone());
body_block->statements()->Add(body, zone());
VariableProxy* temp_proxy =
factory()->NewVariableProxy(temp, each_beg_pos, each_end_pos);
- InitializeForEachStatement(loop, temp_proxy, enumerable, body_block);
+ final_loop = InitializeForEachStatement(
+ loop, temp_proxy, enumerable, body_block, each_keyword_position);
+ block_state.set_end_position(scanner()->location().end_pos);
+ body_block->set_scope(block_state.FinalizedBlockScope());
}
- body_scope->set_end_position(scanner()->location().end_pos);
- body_scope = body_scope->FinalizeBlockScope();
- body_block->set_scope(body_scope);
// Create a TDZ for any lexically-bound names.
- if (IsLexicalVariableMode(parsing_result.descriptor.mode)) {
+ if (bound_names_are_lexical) {
DCHECK_NULL(init_block);
init_block =
- factory()->NewBlock(nullptr, 1, false, RelocInfo::kNoPosition);
+ factory()->NewBlock(nullptr, 1, false, kNoSourcePosition);
- for (int i = 0; i < lexical_bindings.length(); ++i) {
+ for (int i = 0; i < bound_names.length(); ++i) {
// TODO(adamk): This needs to be some sort of special
// INTERNAL variable that's invisible to the debugger
// but visible to everything else.
- VariableProxy* tdz_proxy =
- NewUnresolved(lexical_bindings[i], LET);
- Declaration* tdz_decl = factory()->NewVariableDeclaration(
- tdz_proxy, LET, scope_, RelocInfo::kNoPosition);
- Variable* tdz_var = Declare(
- tdz_decl, DeclarationDescriptor::NORMAL, true, CHECK_OK);
- tdz_var->set_initializer_position(position());
+ Declaration* tdz_decl = DeclareVariable(
+ bound_names[i], LET, kNoSourcePosition, CHECK_OK);
+ tdz_decl->proxy()->var()->set_initializer_position(position());
}
}
- Statement* final_loop = loop->IsForOfStatement()
- ? FinalizeForOfStatement(
- loop->AsForOfStatement(), RelocInfo::kNoPosition)
- : loop;
-
- for_scope->set_end_position(scanner()->location().end_pos);
- for_scope = for_scope->FinalizeBlockScope();
+ for_state.set_end_position(scanner()->location().end_pos);
+ Scope* for_scope = for_state.FinalizedBlockScope();
// Parsed for-in loop w/ variable declarations.
if (init_block != nullptr) {
init_block->statements()->Add(final_loop, zone());
@@ -3666,11 +3707,10 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
return final_loop;
}
} else {
+ bound_names_are_lexical =
+ IsLexicalVariableMode(parsing_result.descriptor.mode);
init = parsing_result.BuildInitializationBlock(
- IsLexicalVariableMode(parsing_result.descriptor.mode)
- ? &lexical_bindings
- : nullptr,
- CHECK_OK);
+ bound_names_are_lexical ? &bound_names : nullptr, CHECK_OK);
}
} else {
int lhs_beg_pos = peek_position();
@@ -3678,13 +3718,8 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
Expression* expression = ParseExpression(false, &classifier, CHECK_OK);
int lhs_end_pos = scanner()->location().end_pos;
ForEachStatement::VisitMode mode = ForEachStatement::ENUMERATE;
- is_let_identifier_expression =
- expression->IsVariableProxy() &&
- expression->AsVariableProxy()->raw_name() ==
- ast_value_factory()->let_string();
- bool is_for_each = CheckInOrOf(&mode, ok);
- if (!*ok) return nullptr;
+ bool is_for_each = CheckInOrOf(&mode, CHECK_OK);
bool is_destructuring = is_for_each && (expression->IsArrayLiteral() ||
expression->IsObjectLiteral());
@@ -3705,6 +3740,8 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
factory()->NewForEachStatement(mode, labels, stmt_pos);
Target target(&this->target_stack_, loop);
+ int each_keyword_position = scanner()->location().beg_pos;
+
Expression* enumerable;
if (mode == ForEachStatement::ITERATE) {
ExpressionClassifier classifier(this);
@@ -3719,16 +3756,10 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
// For legacy compat reasons, give for loops similar treatment to
// if statements in allowing a function declaration for a body
Statement* body = ParseScopedStatement(NULL, true, CHECK_OK);
- InitializeForEachStatement(loop, expression, enumerable, body);
+ Statement* final_loop = InitializeForEachStatement(
+ loop, expression, enumerable, body, each_keyword_position);
- Statement* final_loop = loop->IsForOfStatement()
- ? FinalizeForOfStatement(
- loop->AsForOfStatement(), RelocInfo::kNoPosition)
- : loop;
-
- for_scope->set_end_position(scanner()->location().end_pos);
- for_scope = for_scope->FinalizeBlockScope();
- DCHECK(for_scope == nullptr);
+ DCHECK_NULL(for_state.FinalizedBlockScope());
return final_loop;
} else {
@@ -3742,13 +3773,6 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
Target target(&this->target_stack_, loop);
// Parsed initializer at this point.
- // Detect attempts at 'let' declarations in sloppy mode.
- if (!allow_harmony_sloppy_let() && peek() == Token::IDENTIFIER &&
- is_sloppy(language_mode()) && is_let_identifier_expression) {
- ReportMessage(MessageTemplate::kSloppyLexical, NULL);
- *ok = false;
- return NULL;
- }
Expect(Token::SEMICOLON, CHECK_OK);
Expression* cond = NULL;
@@ -3757,13 +3781,14 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
// If there are let bindings, then condition and the next statement of the
// for loop must be parsed in a new scope.
- Scope* inner_scope = scope_;
- if (lexical_bindings.length() > 0) {
- inner_scope = NewScope(for_scope, BLOCK_SCOPE);
+ Scope* inner_scope = scope();
+ // TODO(verwaest): Allocate this through a ScopeState as well.
+ if (bound_names_are_lexical && bound_names.length() > 0) {
+ inner_scope = NewScopeWithParent(inner_scope, BLOCK_SCOPE);
inner_scope->set_start_position(scanner()->location().beg_pos);
}
{
- BlockState block_state(&scope_, inner_scope);
+ BlockState block_state(&scope_state_, inner_scope);
if (peek() != Token::SEMICOLON) {
cond = ParseExpression(true, CHECK_OK);
@@ -3780,15 +3805,14 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
}
Statement* result = NULL;
- if (lexical_bindings.length() > 0) {
- BlockState block_state(&scope_, for_scope);
+ if (bound_names_are_lexical && bound_names.length() > 0) {
result = DesugarLexicalBindingsInForStatement(
- inner_scope, parsing_result.descriptor.mode, &lexical_bindings, loop,
- init, cond, next, body, CHECK_OK);
- for_scope->set_end_position(scanner()->location().end_pos);
+ inner_scope, parsing_result.descriptor.mode, &bound_names, loop, init,
+ cond, next, body, CHECK_OK);
+ for_state.set_end_position(scanner()->location().end_pos);
} else {
- for_scope->set_end_position(scanner()->location().end_pos);
- for_scope = for_scope->FinalizeBlockScope();
+ for_state.set_end_position(scanner()->location().end_pos);
+ Scope* for_scope = for_state.FinalizedBlockScope();
if (for_scope) {
// Rewrite a for statement of the form
// for (const x = i; c; n) b
@@ -3808,8 +3832,7 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
// }
// just in case b introduces a lexical binding some other way, e.g., if b
// is a FunctionDeclaration.
- Block* block =
- factory()->NewBlock(NULL, 2, false, RelocInfo::kNoPosition);
+ Block* block = factory()->NewBlock(NULL, 2, false, kNoSourcePosition);
if (init != nullptr) {
block->statements()->Add(init, zone());
}
@@ -3882,16 +3905,9 @@ Handle<FixedArray> CompileTimeValue::GetElements(Handle<FixedArray> value) {
return Handle<FixedArray>(FixedArray::cast(value->get(kElementsSlot)));
}
-
-void ParserTraits::ParseArrowFunctionFormalParameters(
- ParserFormalParameters* parameters, Expression* expr,
- const Scanner::Location& params_loc, bool* ok) {
- if (parameters->Arity() >= Code::kMaxArguments) {
- ReportMessageAt(params_loc, MessageTemplate::kMalformedArrowFunParamList);
- *ok = false;
- return;
- }
-
+void Parser::ParseArrowFunctionFormalParameters(
+ ParserFormalParameters* parameters, Expression* expr, int end_pos,
+ bool* ok) {
// ArrowFunctionFormals ::
// Binary(Token::COMMA, NonTailArrowFunctionFormals, Tail)
// Tail
@@ -3912,8 +3928,9 @@ void ParserTraits::ParseArrowFunctionFormalParameters(
DCHECK_EQ(binop->op(), Token::COMMA);
Expression* left = binop->left();
Expression* right = binop->right();
- ParseArrowFunctionFormalParameters(parameters, left, params_loc, ok);
- if (!*ok) return;
+ int comma_pos = binop->position();
+ ParseArrowFunctionFormalParameters(parameters, left, comma_pos,
+ CHECK_OK_VOID);
// LHS of comma expression should be unparenthesized.
expr = right;
}
@@ -3931,31 +3948,63 @@ void ParserTraits::ParseArrowFunctionFormalParameters(
}
Expression* initializer = nullptr;
- if (expr->IsVariableProxy()) {
- // When the formal parameter was originally seen, it was parsed as a
- // VariableProxy and recorded as unresolved in the scope. Here we undo that
- // parse-time side-effect for parameters that are single-names (not
- // patterns; for patterns that happens uniformly in
- // PatternRewriter::VisitVariableProxy).
- parser_->scope_->RemoveUnresolved(expr->AsVariableProxy());
- } else if (expr->IsAssignment()) {
+ if (expr->IsAssignment()) {
Assignment* assignment = expr->AsAssignment();
DCHECK(!assignment->is_compound());
initializer = assignment->value();
expr = assignment->target();
-
- // TODO(adamk): Only call this if necessary.
- RewriteParameterInitializerScope(parser_->stack_limit(), initializer,
- parser_->scope_, parameters->scope);
}
- // TODO(adamk): params_loc.end_pos is not the correct initializer position,
- // but it should be conservative enough to trigger hole checks for variables
- // referenced in the initializer (if any).
- AddFormalParameter(parameters, expr, initializer, params_loc.end_pos,
- is_rest);
+ AddFormalParameter(parameters, expr, initializer, end_pos, is_rest);
}
+void Parser::DesugarAsyncFunctionBody(const AstRawString* function_name,
+ Scope* scope, ZoneList<Statement*>* body,
+ ExpressionClassifier* classifier,
+ FunctionKind kind,
+ FunctionBodyType body_type,
+ bool accept_IN, int pos, bool* ok) {
+ // function async_function() {
+ // try {
+ // .generator_object = %CreateGeneratorObject();
+ // ... function body ...
+ // } catch (e) {
+ // return Promise.reject(e);
+ // }
+ // }
+ scope->ForceContextAllocation();
+ Variable* temp =
+ NewTemporary(ast_value_factory()->dot_generator_object_string());
+ function_state_->set_generator_object_variable(temp);
+
+ Expression* init_generator_variable = factory()->NewAssignment(
+ Token::INIT, factory()->NewVariableProxy(temp),
+ BuildCreateJSGeneratorObject(pos, kind), kNoSourcePosition);
+ body->Add(factory()->NewExpressionStatement(init_generator_variable,
+ kNoSourcePosition),
+ zone());
+
+ Block* try_block = factory()->NewBlock(NULL, 8, true, kNoSourcePosition);
+
+ ZoneList<Statement*>* inner_body = try_block->statements();
+
+ Expression* return_value = nullptr;
+ if (body_type == FunctionBodyType::kNormal) {
+ ParseStatementList(inner_body, Token::RBRACE, CHECK_OK_VOID);
+ return_value = factory()->NewUndefinedLiteral(kNoSourcePosition);
+ } else {
+ return_value =
+ ParseAssignmentExpression(accept_IN, classifier, CHECK_OK_VOID);
+ RewriteNonPattern(classifier, CHECK_OK_VOID);
+ }
+
+ return_value = BuildPromiseResolve(return_value, return_value->position());
+ inner_body->Add(
+ factory()->NewReturnStatement(return_value, return_value->position()),
+ zone());
+ body->Add(BuildRejectPromiseOnException(try_block), zone());
+ scope->set_end_position(scanner()->location().end_pos);
+}
DoExpression* Parser::ParseDoExpression(bool* ok) {
// AssignmentExpression ::
@@ -3963,29 +4012,34 @@ DoExpression* Parser::ParseDoExpression(bool* ok) {
int pos = peek_position();
Expect(Token::DO, CHECK_OK);
- Variable* result =
- scope_->NewTemporary(ast_value_factory()->dot_result_string());
- Block* block = ParseBlock(nullptr, false, CHECK_OK);
+ Variable* result = NewTemporary(ast_value_factory()->dot_result_string());
+ Block* block = ParseBlock(nullptr, CHECK_OK);
DoExpression* expr = factory()->NewDoExpression(block, result, pos);
- if (!Rewriter::Rewrite(this, expr, ast_value_factory())) {
+ if (!Rewriter::Rewrite(this, GetClosureScope(), expr, ast_value_factory())) {
*ok = false;
return nullptr;
}
- block->set_scope(block->scope()->FinalizeBlockScope());
return expr;
}
-
-void ParserTraits::ParseArrowFunctionFormalParameterList(
+void ParserBaseTraits<Parser>::ParseArrowFunctionFormalParameterList(
ParserFormalParameters* parameters, Expression* expr,
- const Scanner::Location& params_loc,
- Scanner::Location* duplicate_loc, bool* ok) {
+ const Scanner::Location& params_loc, Scanner::Location* duplicate_loc,
+ const Scope::Snapshot& scope_snapshot, bool* ok) {
if (expr->IsEmptyParentheses()) return;
- ParseArrowFunctionFormalParameters(parameters, expr, params_loc, ok);
- if (!*ok) return;
+ delegate()->ParseArrowFunctionFormalParameters(
+ parameters, expr, params_loc.end_pos, CHECK_OK_VOID);
- Type::ExpressionClassifier classifier(parser_);
+ scope_snapshot.Reparent(parameters->scope);
+
+ if (parameters->Arity() > Code::kMaxArguments) {
+ ReportMessageAt(params_loc, MessageTemplate::kMalformedArrowFunParamList);
+ *ok = false;
+ return;
+ }
+
+ Type::ExpressionClassifier classifier(delegate());
if (!parameters->is_simple) {
classifier.RecordNonSimpleParameter();
}
@@ -3999,9 +4053,9 @@ void ParserTraits::ParseArrowFunctionFormalParameterList(
DCHECK_EQ(parameters->is_simple, parameters->scope->has_simple_parameters());
}
-
-void ParserTraits::ReindexLiterals(const ParserFormalParameters& parameters) {
- if (parser_->function_state_->materialized_literal_count() > 0) {
+void ParserBaseTraits<Parser>::ReindexLiterals(
+ const ParserFormalParameters& parameters) {
+ if (delegate()->function_state_->materialized_literal_count() > 0) {
AstLiteralReindexer reindexer;
for (const auto p : parameters.params) {
@@ -4010,7 +4064,7 @@ void ParserTraits::ReindexLiterals(const ParserFormalParameters& parameters) {
}
DCHECK(reindexer.count() <=
- parser_->function_state_->materialized_literal_count());
+ delegate()->function_state_->materialized_literal_count());
}
}
@@ -4029,8 +4083,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// Setter ::
// '(' PropertySetParameterList ')' '{' FunctionBody '}'
- int pos = function_token_pos == RelocInfo::kNoPosition
- ? peek_position() : function_token_pos;
+ int pos = function_token_pos == kNoSourcePosition ? peek_position()
+ : function_token_pos;
bool is_generator = IsGeneratorFunction(kind);
@@ -4044,59 +4098,111 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
function_name = ast_value_factory()->empty_string();
}
- // Function declarations are function scoped in normal mode, so they are
- // hoisted. In harmony block scoping mode they are block scoped, so they
- // are not hoisted.
- //
- // One tricky case are function declarations in a local sloppy-mode eval:
- // their declaration is hoisted, but they still see the local scope. E.g.,
- //
- // function() {
- // var x = 0
- // try { throw 1 } catch (x) { eval("function g() { return x }") }
- // return g()
- // }
- //
- // needs to return 1. To distinguish such cases, we need to detect
- // (1) whether a function stems from a sloppy eval, and
- // (2) whether it actually hoists across the eval.
- // Unfortunately, we do not represent sloppy eval scopes, so we do not have
- // either information available directly, especially not when lazily compiling
- // a function like 'g'. We hence rely on the following invariants:
- // - (1) is the case iff the innermost scope of the deserialized scope chain
- // under which we compile is _not_ a declaration scope. This holds because
- // in all normal cases, function declarations are fully hoisted to a
- // declaration scope and compiled relative to that.
- // - (2) is the case iff the current declaration scope is still the original
- // one relative to the deserialized scope chain. Otherwise we must be
- // compiling a function in an inner declaration scope in the eval, e.g. a
- // nested function, and hoisting works normally relative to that.
- Scope* declaration_scope = scope_->DeclarationScope();
- Scope* original_declaration_scope = original_scope_->DeclarationScope();
- Scope* scope = function_type == FunctionLiteral::kDeclaration &&
- is_sloppy(language_mode) &&
- !allow_harmony_sloppy_function() &&
- (original_scope_ == original_declaration_scope ||
- declaration_scope != original_declaration_scope)
- ? NewScope(declaration_scope, FUNCTION_SCOPE, kind)
- : NewScope(scope_, FUNCTION_SCOPE, kind);
- SetLanguageMode(scope, language_mode);
- ZoneList<Statement*>* body = NULL;
+ FunctionLiteral::EagerCompileHint eager_compile_hint =
+ function_state_->next_function_is_parenthesized()
+ ? FunctionLiteral::kShouldEagerCompile
+ : FunctionLiteral::kShouldLazyCompile;
+
+ // Determine if the function can be parsed lazily. Lazy parsing is
+ // different from lazy compilation; we need to parse more eagerly than we
+ // compile.
+
+ // We can only parse lazily if we also compile lazily. The heuristics for lazy
+ // compilation are:
+ // - It must not have been prohibited by the caller to Parse (some callers
+ // need a full AST).
+ // - The outer scope must allow lazy compilation of inner functions.
+ // - The function mustn't be a function expression with an open parenthesis
+ // before; we consider that a hint that the function will be called
+ // immediately, and it would be a waste of time to make it lazily
+ // compiled.
+ // These are all things we can know at this point, without looking at the
+ // function itself.
+
+ // In addition, we need to distinguish between these cases:
+ // (function foo() {
+ // bar = function() { return 1; }
+ // })();
+ // and
+ // (function foo() {
+ // var a = 1;
+ // bar = function() { return a; }
+ // })();
+
+ // Now foo will be parsed eagerly and compiled eagerly (optimization: assume
+ // parenthesis before the function means that it will be called
+ // immediately). The inner function *must* be parsed eagerly to resolve the
+ // possible reference to the variable in foo's scope. However, it's possible
+ // that it will be compiled lazily.
+
+ // To make this additional case work, both Parser and PreParser implement a
+ // logic where only top-level functions will be parsed lazily.
+ bool is_lazily_parsed = mode() == PARSE_LAZILY &&
+ this->scope()->AllowsLazyParsing() &&
+ !function_state_->next_function_is_parenthesized();
+
+ // Determine whether the function body can be discarded after parsing.
+ // The preconditions are:
+ // - Lazy compilation has to be enabled.
+ // - Neither V8 natives nor native function declarations can be allowed,
+ // since parsing one would retroactively force the function to be
+ // eagerly compiled.
+ // - The invoker of this parser can't depend on the AST being eagerly
+ // built (either because the function is about to be compiled, or
+ // because the AST is going to be inspected for some reason).
+ // - Because of the above, we can't be attempting to parse a
+ // FunctionExpression; even without enclosing parentheses it might be
+ // immediately invoked.
+ // - The function literal shouldn't be hinted to eagerly compile.
+ // - For asm.js functions the body needs to be available when module
+ // validation is active, because we examine the entire module at once.
+ bool use_temp_zone =
+ !is_lazily_parsed && FLAG_lazy && !allow_natives() &&
+ extension_ == NULL && allow_lazy() &&
+ function_type == FunctionLiteral::kDeclaration &&
+ eager_compile_hint != FunctionLiteral::kShouldEagerCompile &&
+ !(FLAG_validate_asm && scope()->IsAsmModule());
+
+ DeclarationScope* main_scope = nullptr;
+ if (use_temp_zone) {
+ // This Scope lives in the main Zone; we'll migrate data into it later.
+ main_scope = NewFunctionScope(kind);
+ }
+
+ ZoneList<Statement*>* body = nullptr;
int arity = -1;
int materialized_literal_count = -1;
int expected_property_count = -1;
DuplicateFinder duplicate_finder(scanner()->unicode_cache());
- FunctionLiteral::EagerCompileHint eager_compile_hint =
- parenthesized_function_ ? FunctionLiteral::kShouldEagerCompile
- : FunctionLiteral::kShouldLazyCompile;
bool should_be_used_once_hint = false;
bool has_duplicate_parameters;
- // Parse function.
+
{
- AstNodeFactory function_factory(ast_value_factory());
- FunctionState function_state(&function_state_, &scope_, scope, kind,
- &function_factory);
- scope_->SetScopeName(function_name);
+ // Temporary zones can nest. When we migrate free variables (see below), we
+ // need to recreate them in the previous Zone.
+ AstNodeFactory previous_zone_ast_node_factory(ast_value_factory());
+ previous_zone_ast_node_factory.set_zone(zone());
+
+ // Open a new zone scope, which sets our AstNodeFactory to allocate in the
+ // new temporary zone if the preconditions are satisfied, and ensures that
+ // the previous zone is always restored after parsing the body. To be able
+ // to do scope analysis correctly after full parsing, we migrate needed
+ // information from scope into main_scope when the function has been parsed.
+ Zone temp_zone(zone()->allocator());
+ DiscardableZoneScope zone_scope(this, &temp_zone, use_temp_zone);
+
+ DeclarationScope* scope = NewFunctionScope(kind);
+ SetLanguageMode(scope, language_mode);
+ if (!use_temp_zone) {
+ main_scope = scope;
+ } else {
+ DCHECK(main_scope->zone() != scope->zone());
+ }
+
+ FunctionState function_state(&function_state_, &scope_state_, scope, kind);
+#ifdef DEBUG
+ scope->SetScopeName(function_name);
+#endif
ExpressionClassifier formals_classifier(this, &duplicate_finder);
if (is_generator) {
@@ -4104,19 +4210,19 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// because it minimizes the work needed to suspend and resume an
// activation. The machine code produced for generators (by full-codegen)
// relies on this forced context allocation, but not in an essential way.
- scope_->ForceContextAllocation();
+ this->scope()->ForceContextAllocation();
// Calling a generator returns a generator object. That object is stored
// in a temporary variable, a definition that is used by "yield"
// expressions. This also marks the FunctionState as a generator.
- Variable* temp = scope_->NewTemporary(
- ast_value_factory()->dot_generator_object_string());
+ Variable* temp =
+ NewTemporary(ast_value_factory()->dot_generator_object_string());
function_state.set_generator_object_variable(temp);
}
Expect(Token::LPAREN, CHECK_OK);
int start_position = scanner()->location().beg_pos;
- scope_->set_start_position(start_position);
+ this->scope()->set_start_position(start_position);
ParserFormalParameters formals(scope);
ParseFormalParameterList(&formals, &formals_classifier, CHECK_OK);
arity = formals.Arity();
@@ -4126,50 +4232,11 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
CheckArityRestrictions(arity, kind, formals.has_rest, start_position,
formals_end_position, CHECK_OK);
Expect(Token::LBRACE, CHECK_OK);
-
// Don't include the rest parameter into the function's formal parameter
// count (esp. the SharedFunctionInfo::internal_formal_parameter_count,
// which says whether we need to create an arguments adaptor frame).
if (formals.has_rest) arity--;
- // Determine if the function can be parsed lazily. Lazy parsing is different
- // from lazy compilation; we need to parse more eagerly than we compile.
-
- // We can only parse lazily if we also compile lazily. The heuristics for
- // lazy compilation are:
- // - It must not have been prohibited by the caller to Parse (some callers
- // need a full AST).
- // - The outer scope must allow lazy compilation of inner functions.
- // - The function mustn't be a function expression with an open parenthesis
- // before; we consider that a hint that the function will be called
- // immediately, and it would be a waste of time to make it lazily
- // compiled.
- // These are all things we can know at this point, without looking at the
- // function itself.
-
- // In addition, we need to distinguish between these cases:
- // (function foo() {
- // bar = function() { return 1; }
- // })();
- // and
- // (function foo() {
- // var a = 1;
- // bar = function() { return a; }
- // })();
-
- // Now foo will be parsed eagerly and compiled eagerly (optimization: assume
- // parenthesis before the function means that it will be called
- // immediately). The inner function *must* be parsed eagerly to resolve the
- // possible reference to the variable in foo's scope. However, it's possible
- // that it will be compiled lazily.
-
- // To make this additional case work, both Parser and PreParser implement a
- // logic where only top-level functions will be parsed lazily.
- bool is_lazily_parsed = mode() == PARSE_LAZILY &&
- scope_->AllowsLazyParsing() &&
- !parenthesized_function_;
- parenthesized_function_ = false; // The bit was set for this function only.
-
// Eager or lazy parse?
// If is_lazily_parsed, we'll parse lazy. If we can set a bookmark, we'll
// pass it to SkipLazyFunctionBody, which may use it to abort lazy
@@ -4198,43 +4265,15 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
}
}
if (!is_lazily_parsed) {
- // Determine whether the function body can be discarded after parsing.
- // The preconditions are:
- // - Lazy compilation has to be enabled.
- // - Neither V8 natives nor native function declarations can be allowed,
- // since parsing one would retroactively force the function to be
- // eagerly compiled.
- // - The invoker of this parser can't depend on the AST being eagerly
- // built (either because the function is about to be compiled, or
- // because the AST is going to be inspected for some reason).
- // - Because of the above, we can't be attempting to parse a
- // FunctionExpression; even without enclosing parentheses it might be
- // immediately invoked.
- // - The function literal shouldn't be hinted to eagerly compile.
- bool use_temp_zone =
- FLAG_lazy && !allow_natives() && extension_ == NULL && allow_lazy() &&
- function_type == FunctionLiteral::kDeclaration &&
- eager_compile_hint != FunctionLiteral::kShouldEagerCompile;
- // Open a new BodyScope, which sets our AstNodeFactory to allocate in the
- // new temporary zone if the preconditions are satisfied, and ensures that
- // the previous zone is always restored after parsing the body.
- // For the purpose of scope analysis, some ZoneObjects allocated by the
- // factory must persist after the function body is thrown away and
- // temp_zone is deallocated. These objects are instead allocated in a
- // parser-persistent zone (see parser_zone_ in AstNodeFactory).
- {
- Zone temp_zone(zone()->allocator());
- AstNodeFactory::BodyScope inner(factory(), &temp_zone, use_temp_zone);
+ body = ParseEagerFunctionBody(function_name, pos, formals, kind,
+ function_type, CHECK_OK);
- body = ParseEagerFunctionBody(function_name, pos, formals, kind,
- function_type, CHECK_OK);
- }
materialized_literal_count = function_state.materialized_literal_count();
expected_property_count = function_state.expected_property_count();
if (use_temp_zone) {
// If the preconditions are correct the function body should never be
// accessed, but do this anyway for better behaviour if they're wrong.
- body = NULL;
+ body = nullptr;
}
}
@@ -4253,26 +4292,31 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
if (is_strict(language_mode)) {
CheckStrictOctalLiteral(scope->start_position(), scope->end_position(),
CHECK_OK);
- }
- if (is_sloppy(language_mode) && allow_harmony_sloppy_function()) {
- InsertSloppyBlockFunctionVarBindings(scope, CHECK_OK);
+ CheckDecimalLiteralWithLeadingZero(use_counts_, scope->start_position(),
+ scope->end_position());
}
CheckConflictingVarDeclarations(scope, CHECK_OK);
if (body) {
// If body can be inspected, rewrite queued destructuring assignments
- ParserTraits::RewriteDestructuringAssignments();
+ RewriteDestructuringAssignments();
}
has_duplicate_parameters =
!formals_classifier.is_valid_formal_parameter_list_without_duplicates();
- }
+
+ if (use_temp_zone) {
+ DCHECK(main_scope != scope);
+ scope->AnalyzePartially(main_scope, &previous_zone_ast_node_factory);
+ }
+ } // DiscardableZoneScope goes out of scope.
FunctionLiteral::ParameterFlag duplicate_parameters =
has_duplicate_parameters ? FunctionLiteral::kHasDuplicateParameters
: FunctionLiteral::kNoDuplicateParameters;
+ // Note that the FunctionLiteral needs to be created in the main Zone again.
FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
- function_name, scope, body, materialized_literal_count,
+ function_name, main_scope, body, materialized_literal_count,
expected_property_count, arity, duplicate_parameters, function_type,
eager_compile_hint, kind, pos);
function_literal->set_function_token_position(function_token_pos);
@@ -4283,6 +4327,31 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
return function_literal;
}
+Expression* Parser::ParseAsyncFunctionExpression(bool* ok) {
+ // AsyncFunctionDeclaration ::
+ // async [no LineTerminator here] function ( FormalParameters[Await] )
+ // { AsyncFunctionBody }
+ //
+ // async [no LineTerminator here] function BindingIdentifier[Await]
+ // ( FormalParameters[Await] ) { AsyncFunctionBody }
+ DCHECK_EQ(scanner()->current_token(), Token::ASYNC);
+ int pos = position();
+ Expect(Token::FUNCTION, CHECK_OK);
+ bool is_strict_reserved = false;
+ const AstRawString* name = nullptr;
+ FunctionLiteral::FunctionType type = FunctionLiteral::kAnonymousExpression;
+
+ if (peek_any_identifier()) {
+ type = FunctionLiteral::kNamedExpression;
+ name = ParseIdentifierOrStrictReservedWord(FunctionKind::kAsyncFunction,
+ &is_strict_reserved, CHECK_OK);
+ }
+ return ParseFunctionLiteral(name, scanner()->location(),
+ is_strict_reserved ? kFunctionNameIsStrictReserved
+ : kFunctionNameValidityUnknown,
+ FunctionKind::kAsyncFunction, pos, type,
+ language_mode(), CHECK_OK);
+}
void Parser::SkipLazyFunctionBody(int* materialized_literal_count,
int* expected_property_count, bool* ok,
@@ -4291,6 +4360,8 @@ void Parser::SkipLazyFunctionBody(int* materialized_literal_count,
if (produce_cached_parse_data()) CHECK(log_);
int function_block_pos = position();
+ DeclarationScope* scope = this->scope()->AsDeclarationScope();
+ DCHECK(scope->is_function_scope());
if (consume_cached_parse_data() && !cached_parse_data_->rejected()) {
// If we have cached data, we use it to skip parsing the function body. The
// data contains the information we need to construct the lazy function.
@@ -4302,17 +4373,14 @@ void Parser::SkipLazyFunctionBody(int* materialized_literal_count,
if (entry.is_valid() && entry.end_pos() > function_block_pos) {
scanner()->SeekForward(entry.end_pos() - 1);
- scope_->set_end_position(entry.end_pos());
- Expect(Token::RBRACE, ok);
- if (!*ok) {
- return;
- }
- total_preparse_skipped_ += scope_->end_position() - function_block_pos;
+ scope->set_end_position(entry.end_pos());
+ Expect(Token::RBRACE, CHECK_OK_VOID);
+ total_preparse_skipped_ += scope->end_position() - function_block_pos;
*materialized_literal_count = entry.literal_count();
*expected_property_count = entry.property_count();
- SetLanguageMode(scope_, entry.language_mode());
- if (entry.uses_super_property()) scope_->RecordSuperPropertyUsage();
- if (entry.calls_eval()) scope_->RecordEvalCall();
+ SetLanguageMode(scope, entry.language_mode());
+ if (entry.uses_super_property()) scope->RecordSuperPropertyUsage();
+ if (entry.calls_eval()) scope->RecordEvalCall();
return;
}
cached_parse_data_->Reject();
@@ -4332,34 +4400,27 @@ void Parser::SkipLazyFunctionBody(int* materialized_literal_count,
return;
}
if (logger.has_error()) {
- ParserTraits::ReportMessageAt(
- Scanner::Location(logger.start(), logger.end()), logger.message(),
- logger.argument_opt(), logger.error_type());
+ ReportMessageAt(Scanner::Location(logger.start(), logger.end()),
+ logger.message(), logger.argument_opt(),
+ logger.error_type());
*ok = false;
return;
}
- scope_->set_end_position(logger.end());
- Expect(Token::RBRACE, ok);
- if (!*ok) {
- return;
- }
- total_preparse_skipped_ += scope_->end_position() - function_block_pos;
+ scope->set_end_position(logger.end());
+ Expect(Token::RBRACE, CHECK_OK_VOID);
+ total_preparse_skipped_ += scope->end_position() - function_block_pos;
*materialized_literal_count = logger.literals();
*expected_property_count = logger.properties();
- SetLanguageMode(scope_, logger.language_mode());
- if (logger.uses_super_property()) {
- scope_->RecordSuperPropertyUsage();
- }
- if (logger.calls_eval()) {
- scope_->RecordEvalCall();
- }
+ SetLanguageMode(scope, logger.language_mode());
+ if (logger.uses_super_property()) scope->RecordSuperPropertyUsage();
+ if (logger.calls_eval()) scope->RecordEvalCall();
if (produce_cached_parse_data()) {
DCHECK(log_);
// Position right after terminal '}'.
int body_end = scanner()->location().end_pos;
log_->LogFunction(function_block_pos, body_end, *materialized_literal_count,
- *expected_property_count, scope_->language_mode(),
- scope_->uses_super_property(), scope_->calls_eval());
+ *expected_property_count, language_mode(),
+ scope->uses_super_property(), scope->calls_eval());
}
}
@@ -4369,49 +4430,50 @@ Statement* Parser::BuildAssertIsCoercible(Variable* var) {
// throw /* type error kNonCoercible) */;
Expression* condition = factory()->NewBinaryOperation(
- Token::OR, factory()->NewCompareOperation(
- Token::EQ_STRICT, factory()->NewVariableProxy(var),
- factory()->NewUndefinedLiteral(RelocInfo::kNoPosition),
- RelocInfo::kNoPosition),
+ Token::OR,
factory()->NewCompareOperation(
Token::EQ_STRICT, factory()->NewVariableProxy(var),
- factory()->NewNullLiteral(RelocInfo::kNoPosition),
- RelocInfo::kNoPosition),
- RelocInfo::kNoPosition);
+ factory()->NewUndefinedLiteral(kNoSourcePosition), kNoSourcePosition),
+ factory()->NewCompareOperation(
+ Token::EQ_STRICT, factory()->NewVariableProxy(var),
+ factory()->NewNullLiteral(kNoSourcePosition), kNoSourcePosition),
+ kNoSourcePosition);
Expression* throw_type_error = this->NewThrowTypeError(
MessageTemplate::kNonCoercible, ast_value_factory()->empty_string(),
- RelocInfo::kNoPosition);
+ kNoSourcePosition);
IfStatement* if_statement = factory()->NewIfStatement(
- condition, factory()->NewExpressionStatement(throw_type_error,
- RelocInfo::kNoPosition),
- factory()->NewEmptyStatement(RelocInfo::kNoPosition),
- RelocInfo::kNoPosition);
+ condition,
+ factory()->NewExpressionStatement(throw_type_error, kNoSourcePosition),
+ factory()->NewEmptyStatement(kNoSourcePosition), kNoSourcePosition);
return if_statement;
}
-class InitializerRewriter : public AstExpressionVisitor {
+class InitializerRewriter final
+ : public AstTraversalVisitor<InitializerRewriter> {
public:
InitializerRewriter(uintptr_t stack_limit, Expression* root, Parser* parser,
Scope* scope)
- : AstExpressionVisitor(stack_limit, root),
+ : AstTraversalVisitor(stack_limit, root),
parser_(parser),
scope_(scope) {}
private:
- void VisitExpression(Expression* expr) override {
- RewritableExpression* to_rewrite = expr->AsRewritableExpression();
- if (to_rewrite == nullptr || to_rewrite->is_rewritten()) return;
+ // This is required so that the overriden Visit* methods can be
+ // called by the base class (template).
+ friend class AstTraversalVisitor<InitializerRewriter>;
+ // Just rewrite destructuring assignments wrapped in RewritableExpressions.
+ void VisitRewritableExpression(RewritableExpression* to_rewrite) {
+ if (to_rewrite->is_rewritten()) return;
Parser::PatternRewriter::RewriteDestructuringAssignment(parser_, to_rewrite,
scope_);
}
// Code in function literals does not need to be eagerly rewritten, it will be
// rewritten when scheduled.
- void VisitFunctionLiteral(FunctionLiteral* expr) override {}
+ void VisitFunctionLiteral(FunctionLiteral* expr) {}
- private:
Parser* parser_;
Scope* scope_;
};
@@ -4426,23 +4488,22 @@ void Parser::RewriteParameterInitializer(Expression* expr, Scope* scope) {
Block* Parser::BuildParameterInitializationBlock(
const ParserFormalParameters& parameters, bool* ok) {
DCHECK(!parameters.is_simple);
- DCHECK(scope_->is_function_scope());
- Block* init_block =
- factory()->NewBlock(NULL, 1, true, RelocInfo::kNoPosition);
+ DCHECK(scope()->is_function_scope());
+ Block* init_block = factory()->NewBlock(NULL, 1, true, kNoSourcePosition);
for (int i = 0; i < parameters.params.length(); ++i) {
auto parameter = parameters.params[i];
if (parameter.is_rest && parameter.pattern->IsVariableProxy()) break;
DeclarationDescriptor descriptor;
descriptor.declaration_kind = DeclarationDescriptor::PARAMETER;
descriptor.parser = this;
- descriptor.scope = scope_;
+ descriptor.scope = scope();
descriptor.hoist_scope = nullptr;
descriptor.mode = LET;
descriptor.declaration_pos = parameter.pattern->position();
// The position that will be used by the AssignmentExpression
// which copies from the temp parameter to the pattern.
//
- // TODO(adamk): Should this be RelocInfo::kNoPosition, since
+ // TODO(adamk): Should this be kNoSourcePosition, since
// it's just copying from a temp var to the real param var?
descriptor.initialization_pos = parameter.pattern->position();
// The initializer position which will end up in,
@@ -4454,43 +4515,44 @@ Block* Parser::BuildParameterInitializationBlock(
// IS_UNDEFINED($param) ? initializer : $param
// Ensure initializer is rewritten
- RewriteParameterInitializer(parameter.initializer, scope_);
+ RewriteParameterInitializer(parameter.initializer, scope());
auto condition = factory()->NewCompareOperation(
Token::EQ_STRICT,
factory()->NewVariableProxy(parameters.scope->parameter(i)),
- factory()->NewUndefinedLiteral(RelocInfo::kNoPosition),
- RelocInfo::kNoPosition);
+ factory()->NewUndefinedLiteral(kNoSourcePosition), kNoSourcePosition);
initial_value = factory()->NewConditional(
- condition, parameter.initializer, initial_value,
- RelocInfo::kNoPosition);
+ condition, parameter.initializer, initial_value, kNoSourcePosition);
descriptor.initialization_pos = parameter.initializer->position();
initializer_position = parameter.initializer_end_position;
}
- Scope* param_scope = scope_;
+ Scope* param_scope = scope();
Block* param_block = init_block;
- if (!parameter.is_simple() && scope_->calls_sloppy_eval()) {
- param_scope = NewScope(scope_, BLOCK_SCOPE);
- param_scope->set_is_declaration_scope();
- param_scope->set_start_position(parameter.pattern->position());
- param_scope->set_end_position(RelocInfo::kNoPosition);
+ if (!parameter.is_simple() && scope()->calls_sloppy_eval()) {
+ param_scope = NewVarblockScope();
+ param_scope->set_start_position(descriptor.initialization_pos);
+ param_scope->set_end_position(parameter.initializer_end_position);
param_scope->RecordEvalCall();
- param_block = factory()->NewBlock(NULL, 8, true, RelocInfo::kNoPosition);
+ param_block = factory()->NewBlock(NULL, 8, true, kNoSourcePosition);
param_block->set_scope(param_scope);
- descriptor.hoist_scope = scope_;
+ descriptor.hoist_scope = scope();
+ // Pass the appropriate scope in so that PatternRewriter can appropriately
+ // rewrite inner initializers of the pattern to param_scope
+ descriptor.scope = param_scope;
+ // Rewrite the outer initializer to point to param_scope
+ ReparentParameterExpressionScope(stack_limit(), initial_value,
+ param_scope);
}
- {
- BlockState block_state(&scope_, param_scope);
- DeclarationParsingResult::Declaration decl(
- parameter.pattern, initializer_position, initial_value);
- PatternRewriter::DeclareAndInitializeVariables(param_block, &descriptor,
- &decl, nullptr, CHECK_OK);
- }
+ BlockState block_state(&scope_state_, param_scope);
+ DeclarationParsingResult::Declaration decl(
+ parameter.pattern, initializer_position, initial_value);
+ PatternRewriter::DeclareAndInitializeVariables(param_block, &descriptor,
+ &decl, nullptr, CHECK_OK);
- if (!parameter.is_simple() && scope_->calls_sloppy_eval()) {
- param_scope = param_scope->FinalizeBlockScope();
+ if (param_block != init_block) {
+ param_scope = block_state.FinalizedBlockScope();
if (param_scope != nullptr) {
CheckConflictingVarDeclarations(param_scope, CHECK_OK);
}
@@ -4500,6 +4562,54 @@ Block* Parser::BuildParameterInitializationBlock(
return init_block;
}
+Block* Parser::BuildRejectPromiseOnException(Block* block) {
+ // try { <block> } catch (error) { return Promise.reject(error); }
+ Block* try_block = block;
+ Scope* catch_scope = NewScope(CATCH_SCOPE);
+ catch_scope->set_is_hidden();
+ Variable* catch_variable =
+ catch_scope->DeclareLocal(ast_value_factory()->dot_catch_string(), VAR,
+ kCreatedInitialized, Variable::NORMAL);
+ Block* catch_block = factory()->NewBlock(nullptr, 1, true, kNoSourcePosition);
+
+ Expression* promise_reject = BuildPromiseReject(
+ factory()->NewVariableProxy(catch_variable), kNoSourcePosition);
+
+ ReturnStatement* return_promise_reject =
+ factory()->NewReturnStatement(promise_reject, kNoSourcePosition);
+ catch_block->statements()->Add(return_promise_reject, zone());
+ TryStatement* try_catch_statement = factory()->NewTryCatchStatement(
+ try_block, catch_scope, catch_variable, catch_block, kNoSourcePosition);
+
+ block = factory()->NewBlock(nullptr, 1, true, kNoSourcePosition);
+ block->statements()->Add(try_catch_statement, zone());
+ return block;
+}
+
+Expression* Parser::BuildCreateJSGeneratorObject(int pos, FunctionKind kind) {
+ DCHECK_NOT_NULL(function_state_->generator_object_variable());
+ ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
+ args->Add(factory()->NewThisFunction(pos), zone());
+ args->Add(IsArrowFunction(kind) ? GetLiteralUndefined(pos)
+ : ThisExpression(kNoSourcePosition),
+ zone());
+ return factory()->NewCallRuntime(Runtime::kCreateJSGeneratorObject, args,
+ pos);
+}
+
+Expression* Parser::BuildPromiseResolve(Expression* value, int pos) {
+ ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(1, zone());
+ args->Add(value, zone());
+ return factory()->NewCallRuntime(Context::PROMISE_CREATE_RESOLVED_INDEX, args,
+ pos);
+}
+
+Expression* Parser::BuildPromiseReject(Expression* value, int pos) {
+ ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(1, zone());
+ args->Add(value, zone());
+ return factory()->NewCallRuntime(Context::PROMISE_CREATE_REJECTED_INDEX, args,
+ pos);
+}
ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
const AstRawString* function_name, int pos,
@@ -4523,25 +4633,25 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
}
ZoneList<Statement*>* body = result;
- Scope* inner_scope = scope_;
+ DeclarationScope* function_scope = scope()->AsDeclarationScope();
+ DeclarationScope* inner_scope = function_scope;
Block* inner_block = nullptr;
if (!parameters.is_simple) {
- inner_scope = NewScope(scope_, BLOCK_SCOPE);
- inner_scope->set_is_declaration_scope();
+ inner_scope = NewVarblockScope();
inner_scope->set_start_position(scanner()->location().beg_pos);
- inner_block = factory()->NewBlock(NULL, 8, true, RelocInfo::kNoPosition);
+ inner_block = factory()->NewBlock(NULL, 8, true, kNoSourcePosition);
inner_block->set_scope(inner_scope);
body = inner_block->statements();
}
{
- BlockState block_state(&scope_, inner_scope);
+ BlockState block_state(&scope_state_, inner_scope);
if (IsGeneratorFunction(kind)) {
// We produce:
//
// try { InitialYield; ...body...; return {value: undefined, done: true} }
- // finally { %GeneratorClose(generator) }
+ // finally { %_GeneratorClose(generator) }
//
// - InitialYield yields the actual generator object.
// - Any return statement inside the body will have its argument wrapped
@@ -4550,113 +4660,117 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
// Hence the finally clause.
Block* try_block =
- factory()->NewBlock(nullptr, 3, false, RelocInfo::kNoPosition);
+ factory()->NewBlock(nullptr, 3, false, kNoSourcePosition);
{
- ZoneList<Expression*>* arguments =
- new (zone()) ZoneList<Expression*>(0, zone());
- CallRuntime* allocation = factory()->NewCallRuntime(
- Runtime::kCreateJSGeneratorObject, arguments, pos);
+ Expression* allocation = BuildCreateJSGeneratorObject(pos, kind);
VariableProxy* init_proxy = factory()->NewVariableProxy(
function_state_->generator_object_variable());
Assignment* assignment = factory()->NewAssignment(
- Token::INIT, init_proxy, allocation, RelocInfo::kNoPosition);
+ Token::INIT, init_proxy, allocation, kNoSourcePosition);
VariableProxy* get_proxy = factory()->NewVariableProxy(
function_state_->generator_object_variable());
- Yield* yield =
- factory()->NewYield(get_proxy, assignment, RelocInfo::kNoPosition);
+ // The position of the yield is important for reporting the exception
+ // caused by calling the .throw method on a generator suspended at the
+ // initial yield (i.e. right after generator instantiation).
+ Yield* yield = factory()->NewYield(get_proxy, assignment,
+ scope()->start_position(),
+ Yield::kOnExceptionThrow);
try_block->statements()->Add(
- factory()->NewExpressionStatement(yield, RelocInfo::kNoPosition),
+ factory()->NewExpressionStatement(yield, kNoSourcePosition),
zone());
}
ParseStatementList(try_block->statements(), Token::RBRACE, CHECK_OK);
Statement* final_return = factory()->NewReturnStatement(
- BuildIteratorResult(nullptr, true), RelocInfo::kNoPosition);
+ BuildIteratorResult(nullptr, true), kNoSourcePosition);
try_block->statements()->Add(final_return, zone());
Block* finally_block =
- factory()->NewBlock(nullptr, 1, false, RelocInfo::kNoPosition);
+ factory()->NewBlock(nullptr, 1, false, kNoSourcePosition);
ZoneList<Expression*>* args =
new (zone()) ZoneList<Expression*>(1, zone());
VariableProxy* call_proxy = factory()->NewVariableProxy(
function_state_->generator_object_variable());
args->Add(call_proxy, zone());
Expression* call = factory()->NewCallRuntime(
- Runtime::kGeneratorClose, args, RelocInfo::kNoPosition);
+ Runtime::kInlineGeneratorClose, args, kNoSourcePosition);
finally_block->statements()->Add(
- factory()->NewExpressionStatement(call, RelocInfo::kNoPosition),
- zone());
+ factory()->NewExpressionStatement(call, kNoSourcePosition), zone());
body->Add(factory()->NewTryFinallyStatement(try_block, finally_block,
- RelocInfo::kNoPosition),
+ kNoSourcePosition),
zone());
+ } else if (IsAsyncFunction(kind)) {
+ const bool accept_IN = true;
+ DesugarAsyncFunctionBody(function_name, inner_scope, body, nullptr, kind,
+ FunctionBodyType::kNormal, accept_IN, pos,
+ CHECK_OK);
} else {
ParseStatementList(body, Token::RBRACE, CHECK_OK);
}
if (IsSubclassConstructor(kind)) {
- body->Add(
- factory()->NewReturnStatement(
- this->ThisExpression(scope_, factory(), RelocInfo::kNoPosition),
- RelocInfo::kNoPosition),
- zone());
+ body->Add(factory()->NewReturnStatement(
+ this->ThisExpression(kNoSourcePosition), kNoSourcePosition),
+ zone());
}
}
Expect(Token::RBRACE, CHECK_OK);
- scope_->set_end_position(scanner()->location().end_pos);
+ scope()->set_end_position(scanner()->location().end_pos);
if (!parameters.is_simple) {
DCHECK_NOT_NULL(inner_scope);
+ DCHECK_EQ(function_scope, scope());
+ DCHECK_EQ(function_scope, inner_scope->outer_scope());
DCHECK_EQ(body, inner_block->statements());
- SetLanguageMode(scope_, inner_scope->language_mode());
+ SetLanguageMode(function_scope, inner_scope->language_mode());
Block* init_block = BuildParameterInitializationBlock(parameters, CHECK_OK);
+
+ if (is_sloppy(inner_scope->language_mode())) {
+ InsertSloppyBlockFunctionVarBindings(inner_scope, function_scope,
+ CHECK_OK);
+ }
+
+ if (IsAsyncFunction(kind)) {
+ init_block = BuildRejectPromiseOnException(init_block);
+ }
+
DCHECK_NOT_NULL(init_block);
inner_scope->set_end_position(scanner()->location().end_pos);
- inner_scope = inner_scope->FinalizeBlockScope();
- if (inner_scope != nullptr) {
+ if (inner_scope->FinalizeBlockScope() != nullptr) {
CheckConflictingVarDeclarations(inner_scope, CHECK_OK);
InsertShadowingVarBindingInitializers(inner_block);
}
+ inner_scope = nullptr;
result->Add(init_block, zone());
result->Add(inner_block, zone());
+ } else {
+ DCHECK_EQ(inner_scope, function_scope);
+ if (is_sloppy(function_scope->language_mode())) {
+ InsertSloppyBlockFunctionVarBindings(function_scope, nullptr, CHECK_OK);
+ }
}
if (function_type == FunctionLiteral::kNamedExpression) {
// Now that we know the language mode, we can create the const assignment
// in the previously reserved spot.
- // NOTE: We create a proxy and resolve it here so that in the
- // future we can change the AST to only refer to VariableProxies
- // instead of Variables and Proxies as is the case now.
- VariableMode fvar_mode = is_strict(language_mode()) ? CONST : CONST_LEGACY;
- Variable* fvar = new (zone())
- Variable(scope_, function_name, fvar_mode, Variable::NORMAL,
- kCreatedInitialized, kNotAssigned);
- VariableProxy* proxy = factory()->NewVariableProxy(fvar);
- VariableDeclaration* fvar_declaration = factory()->NewVariableDeclaration(
- proxy, fvar_mode, scope_, RelocInfo::kNoPosition);
- scope_->DeclareFunctionVar(fvar_declaration);
-
+ DCHECK_EQ(function_scope, scope());
+ Variable* fvar = function_scope->DeclareFunctionVar(function_name);
VariableProxy* fproxy = factory()->NewVariableProxy(fvar);
result->Set(kFunctionNameAssignmentIndex,
factory()->NewExpressionStatement(
factory()->NewAssignment(Token::INIT, fproxy,
factory()->NewThisFunction(pos),
- RelocInfo::kNoPosition),
- RelocInfo::kNoPosition));
+ kNoSourcePosition),
+ kNoSourcePosition));
}
- // ES6 14.6.1 Static Semantics: IsInTailPosition
- // Mark collected return expressions that are in tail call position.
- const List<Expression*>& expressions_in_tail_position =
- function_state_->expressions_in_tail_position();
- for (int i = 0; i < expressions_in_tail_position.length(); ++i) {
- MarkTailPosition(expressions_in_tail_position[i]);
- }
+ MarkCollectedTailCallExpressions();
return result;
}
@@ -4668,7 +4782,7 @@ PreParser::PreParseResult Parser::ParseLazyFunctionBodyWithPreParser(
if (pre_parse_timer_ != NULL) {
pre_parse_timer_->Start();
}
- TRACE_EVENT0("v8", "V8.PreParse");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.PreParse");
DCHECK_EQ(Token::LBRACE, scanner()->current_token());
@@ -4678,74 +4792,77 @@ PreParser::PreParseResult Parser::ParseLazyFunctionBodyWithPreParser(
reusable_preparser_->set_allow_lazy(true);
#define SET_ALLOW(name) reusable_preparser_->set_allow_##name(allow_##name());
SET_ALLOW(natives);
- SET_ALLOW(harmony_sloppy);
- SET_ALLOW(harmony_sloppy_function);
- SET_ALLOW(harmony_sloppy_let);
SET_ALLOW(harmony_do_expressions);
- SET_ALLOW(harmony_function_name);
+ SET_ALLOW(harmony_for_in);
SET_ALLOW(harmony_function_sent);
- SET_ALLOW(harmony_exponentiation_operator);
SET_ALLOW(harmony_restrictive_declarations);
+ SET_ALLOW(harmony_async_await);
+ SET_ALLOW(harmony_trailing_commas);
#undef SET_ALLOW
}
PreParser::PreParseResult result = reusable_preparser_->PreParseLazyFunction(
- language_mode(), function_state_->kind(), scope_->has_simple_parameters(),
- logger, bookmark);
+ language_mode(), function_state_->kind(),
+ scope()->AsDeclarationScope()->has_simple_parameters(), parsing_module_,
+ logger, bookmark, use_counts_);
if (pre_parse_timer_ != NULL) {
pre_parse_timer_->Stop();
}
return result;
}
-ClassLiteral* Parser::ParseClassLiteral(ExpressionClassifier* classifier,
- const AstRawString* name,
- Scanner::Location class_name_location,
- bool name_is_strict_reserved, int pos,
- bool* ok) {
+Expression* Parser::ParseClassLiteral(ExpressionClassifier* classifier,
+ const AstRawString* name,
+ Scanner::Location class_name_location,
+ bool name_is_strict_reserved, int pos,
+ bool* ok) {
// All parts of a ClassDeclaration and ClassExpression are strict code.
if (name_is_strict_reserved) {
ReportMessageAt(class_name_location,
MessageTemplate::kUnexpectedStrictReserved);
*ok = false;
- return NULL;
+ return nullptr;
}
if (IsEvalOrArguments(name)) {
ReportMessageAt(class_name_location, MessageTemplate::kStrictEvalArguments);
*ok = false;
- return NULL;
+ return nullptr;
}
- Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
- BlockState block_state(&scope_, block_scope);
+ BlockState block_state(&scope_state_);
RaiseLanguageMode(STRICT);
- scope_->SetScopeName(name);
-
- VariableProxy* proxy = NULL;
- if (name != NULL) {
- proxy = NewUnresolved(name, CONST);
+#ifdef DEBUG
+ scope()->SetScopeName(name);
+#endif
+
+ VariableProxy* proxy = nullptr;
+ if (name != nullptr) {
+ proxy = NewUnresolved(name);
+ // TODO(verwaest): declare via block_state.
Declaration* declaration =
- factory()->NewVariableDeclaration(proxy, CONST, block_scope, pos);
- Declare(declaration, DeclarationDescriptor::NORMAL, true, CHECK_OK);
+ factory()->NewVariableDeclaration(proxy, block_state.scope(), pos);
+ Declare(declaration, DeclarationDescriptor::NORMAL, CONST,
+ DefaultInitializationFlag(CONST), CHECK_OK);
}
- Expression* extends = NULL;
+ Expression* extends = nullptr;
if (Check(Token::EXTENDS)) {
- block_scope->set_start_position(scanner()->location().end_pos);
+ block_state.set_start_position(scanner()->location().end_pos);
ExpressionClassifier extends_classifier(this);
extends = ParseLeftHandSideExpression(&extends_classifier, CHECK_OK);
+ CheckNoTailCallExpressions(&extends_classifier, CHECK_OK);
RewriteNonPattern(&extends_classifier, CHECK_OK);
if (classifier != nullptr) {
classifier->Accumulate(&extends_classifier,
ExpressionClassifier::ExpressionProductions);
}
} else {
- block_scope->set_start_position(scanner()->location().end_pos);
+ block_state.set_start_position(scanner()->location().end_pos);
}
ClassLiteralChecker checker(this);
ZoneList<ObjectLiteral::Property*>* properties = NewPropertyList(4, zone());
- FunctionLiteral* constructor = NULL;
+ FunctionLiteral* constructor = nullptr;
bool has_seen_constructor = false;
Expect(Token::LBRACE, CHECK_OK);
@@ -4755,13 +4872,12 @@ ClassLiteral* Parser::ParseClassLiteral(ExpressionClassifier* classifier,
if (Check(Token::SEMICOLON)) continue;
FuncNameInferrer::State fni_state(fni_);
const bool in_class = true;
- const bool is_static = false;
bool is_computed_name = false; // Classes do not care about computed
// property names here.
ExpressionClassifier property_classifier(this);
const AstRawString* property_name = nullptr;
ObjectLiteral::Property* property = ParsePropertyDefinition(
- &checker, in_class, has_extends, is_static, &is_computed_name,
+ &checker, in_class, has_extends, MethodKind::kNormal, &is_computed_name,
&has_seen_constructor, &property_classifier, &property_name, CHECK_OK);
RewriteNonPattern(&property_classifier, CHECK_OK);
if (classifier != nullptr) {
@@ -4769,7 +4885,7 @@ ClassLiteral* Parser::ParseClassLiteral(ExpressionClassifier* classifier,
ExpressionClassifier::ExpressionProductions);
}
- if (has_seen_constructor && constructor == NULL) {
+ if (has_seen_constructor && constructor == nullptr) {
constructor = GetPropertyValue(property)->AsFunctionLiteral();
DCHECK_NOT_NULL(constructor);
constructor->set_raw_name(
@@ -4778,10 +4894,9 @@ ClassLiteral* Parser::ParseClassLiteral(ExpressionClassifier* classifier,
properties->Add(property, zone());
}
- if (fni_ != NULL) fni_->Infer();
+ if (fni_ != nullptr) fni_->Infer();
- if (allow_harmony_function_name() &&
- property_name != ast_value_factory()->constructor_string()) {
+ if (property_name != ast_value_factory()->constructor_string()) {
SetFunctionNameFromPropertyName(property, property_name);
}
}
@@ -4789,22 +4904,34 @@ ClassLiteral* Parser::ParseClassLiteral(ExpressionClassifier* classifier,
Expect(Token::RBRACE, CHECK_OK);
int end_pos = scanner()->location().end_pos;
- if (constructor == NULL) {
- constructor = DefaultConstructor(name, extends != NULL, block_scope, pos,
- end_pos, block_scope->language_mode());
+ if (constructor == nullptr) {
+ constructor = DefaultConstructor(name, has_extends, pos, end_pos,
+ block_state.language_mode());
}
// Note that we do not finalize this block scope because it is
// used as a sentinel value indicating an anonymous class.
- block_scope->set_end_position(end_pos);
+ block_state.set_end_position(end_pos);
- if (name != NULL) {
+ if (name != nullptr) {
DCHECK_NOT_NULL(proxy);
proxy->var()->set_initializer_position(end_pos);
}
- return factory()->NewClassLiteral(block_scope, proxy, extends, constructor,
- properties, pos, end_pos);
+ Block* do_block = factory()->NewBlock(nullptr, 1, false, pos);
+ Variable* result_var = NewTemporary(ast_value_factory()->empty_string());
+ do_block->set_scope(block_state.FinalizedBlockScope());
+ DoExpression* do_expr = factory()->NewDoExpression(do_block, result_var, pos);
+
+ ClassLiteral* class_literal = factory()->NewClassLiteral(
+ proxy, extends, constructor, properties, pos, end_pos);
+
+ do_block->statements()->Add(
+ factory()->NewExpressionStatement(class_literal, pos), zone());
+ do_expr->set_represented_function(constructor);
+ Rewriter::Rewrite(this, GetClosureScope(), do_expr, ast_value_factory());
+
+ return do_expr;
}
@@ -4827,7 +4954,7 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
if (extension_ != NULL) {
// The extension structures are only accessible while parsing the
// very first time not when reparsing because of lazy compilation.
- scope_->DeclarationScope()->ForceEagerCompilation();
+ GetClosureScope()->ForceEagerCompilation();
}
const Runtime::Function* function = Runtime::FunctionForName(name->string());
@@ -4865,7 +4992,7 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
// Check that the function is defined.
if (context_index == Context::kNotFound) {
- ParserTraits::ReportMessage(MessageTemplate::kNotDefined, name);
+ ReportMessage(MessageTemplate::kNotDefined, name);
*ok = false;
return NULL;
}
@@ -4885,11 +5012,11 @@ void Parser::CheckConflictingVarDeclarations(Scope* scope, bool* ok) {
// In ES6, conflicting variable bindings are early errors.
const AstRawString* name = decl->proxy()->raw_name();
int position = decl->proxy()->position();
- Scanner::Location location = position == RelocInfo::kNoPosition
- ? Scanner::Location::invalid()
- : Scanner::Location(position, position + 1);
- ParserTraits::ReportMessageAt(location, MessageTemplate::kVarRedeclaration,
- name);
+ Scanner::Location location =
+ position == kNoSourcePosition
+ ? Scanner::Location::invalid()
+ : Scanner::Location(position, position + 1);
+ ReportMessageAt(location, MessageTemplate::kVarRedeclaration, name);
*ok = false;
}
}
@@ -4903,53 +5030,109 @@ void Parser::InsertShadowingVarBindingInitializers(Block* inner_block) {
Scope* function_scope = inner_scope->outer_scope();
DCHECK(function_scope->is_function_scope());
ZoneList<Declaration*>* decls = inner_scope->declarations();
+ BlockState block_state(&scope_state_, inner_scope);
for (int i = 0; i < decls->length(); ++i) {
Declaration* decl = decls->at(i);
- if (decl->mode() != VAR || !decl->IsVariableDeclaration()) continue;
+ if (decl->proxy()->var()->mode() != VAR || !decl->IsVariableDeclaration()) {
+ continue;
+ }
const AstRawString* name = decl->proxy()->raw_name();
Variable* parameter = function_scope->LookupLocal(name);
if (parameter == nullptr) continue;
- VariableProxy* to = inner_scope->NewUnresolved(factory(), name);
+ VariableProxy* to = NewUnresolved(name);
VariableProxy* from = factory()->NewVariableProxy(parameter);
- Expression* assignment = factory()->NewAssignment(
- Token::ASSIGN, to, from, RelocInfo::kNoPosition);
- Statement* statement = factory()->NewExpressionStatement(
- assignment, RelocInfo::kNoPosition);
+ Expression* assignment =
+ factory()->NewAssignment(Token::ASSIGN, to, from, kNoSourcePosition);
+ Statement* statement =
+ factory()->NewExpressionStatement(assignment, kNoSourcePosition);
inner_block->statements()->InsertAt(0, statement, zone());
}
}
-
-void Parser::InsertSloppyBlockFunctionVarBindings(Scope* scope, bool* ok) {
+void Parser::InsertSloppyBlockFunctionVarBindings(DeclarationScope* scope,
+ Scope* complex_params_scope,
+ bool* ok) {
// For each variable which is used as a function declaration in a sloppy
// block,
- DCHECK(scope->is_declaration_scope());
SloppyBlockFunctionMap* map = scope->sloppy_block_function_map();
for (ZoneHashMap::Entry* p = map->Start(); p != nullptr; p = map->Next(p)) {
AstRawString* name = static_cast<AstRawString*>(p->key);
- // If the variable wouldn't conflict with a lexical declaration,
- Variable* var = scope->LookupLocal(name);
- if (var == nullptr || !IsLexicalVariableMode(var->mode())) {
+
+ // If the variable wouldn't conflict with a lexical declaration
+ // or parameter,
+
+ // Check if there's a conflict with a parameter.
+ // This depends on the fact that functions always have a scope solely to
+ // hold complex parameters, and the names local to that scope are
+ // precisely the names of the parameters. IsDeclaredParameter(name) does
+ // not hold for names declared by complex parameters, nor are those
+ // bindings necessarily declared lexically, so we have to check for them
+ // explicitly. On the other hand, if there are not complex parameters,
+ // it is sufficient to just check IsDeclaredParameter.
+ if (complex_params_scope != nullptr) {
+ if (complex_params_scope->LookupLocal(name) != nullptr) {
+ continue;
+ }
+ } else {
+ if (scope->IsDeclaredParameter(name)) {
+ continue;
+ }
+ }
+
+ bool var_created = false;
+
+ // Write in assignments to var for each block-scoped function declaration
+ auto delegates = static_cast<SloppyBlockFunctionStatement*>(p->value);
+
+ DeclarationScope* decl_scope = scope;
+ while (decl_scope->is_eval_scope()) {
+ decl_scope = decl_scope->outer_scope()->GetDeclarationScope();
+ }
+ Scope* outer_scope = decl_scope->outer_scope();
+
+ for (SloppyBlockFunctionStatement* delegate = delegates;
+ delegate != nullptr; delegate = delegate->next()) {
+ // Check if there's a conflict with a lexical declaration
+ Scope* query_scope = delegate->scope()->outer_scope();
+ Variable* var = nullptr;
+ bool should_hoist = true;
+
+ // Note that we perform this loop for each delegate named 'name',
+ // which may duplicate work if those delegates share scopes.
+ // It is not sufficient to just do a Lookup on query_scope: for
+ // example, that does not prevent hoisting of the function in
+ // `{ let e; try {} catch (e) { function e(){} } }`
+ do {
+ var = query_scope->LookupLocal(name);
+ if (var != nullptr && IsLexicalVariableMode(var->mode())) {
+ should_hoist = false;
+ break;
+ }
+ query_scope = query_scope->outer_scope();
+ } while (query_scope != outer_scope);
+
+ if (!should_hoist) continue;
+
// Declare a var-style binding for the function in the outer scope
- VariableProxy* proxy = scope->NewUnresolved(factory(), name);
- Declaration* declaration = factory()->NewVariableDeclaration(
- proxy, VAR, scope, RelocInfo::kNoPosition);
- Declare(declaration, DeclarationDescriptor::NORMAL, true, ok, scope);
- DCHECK(ok); // Based on the preceding check, this should not fail
- if (!ok) return;
-
- // Write in assignments to var for each block-scoped function declaration
- auto delegates = static_cast<SloppyBlockFunctionMap::Vector*>(p->value);
- for (SloppyBlockFunctionStatement* delegate : *delegates) {
- // Read from the local lexical scope and write to the function scope
- VariableProxy* to = scope->NewUnresolved(factory(), name);
- VariableProxy* from = delegate->scope()->NewUnresolved(factory(), name);
- Expression* assignment = factory()->NewAssignment(
- Token::ASSIGN, to, from, RelocInfo::kNoPosition);
- Statement* statement = factory()->NewExpressionStatement(
- assignment, RelocInfo::kNoPosition);
- delegate->set_statement(statement);
+ if (!var_created) {
+ var_created = true;
+ VariableProxy* proxy = scope->NewUnresolved(factory(), name);
+ Declaration* declaration =
+ factory()->NewVariableDeclaration(proxy, scope, kNoSourcePosition);
+ Declare(declaration, DeclarationDescriptor::NORMAL, VAR,
+ DefaultInitializationFlag(VAR), ok, scope);
+ DCHECK(ok); // Based on the preceding check, this should not fail
+ if (!ok) return;
}
+
+ // Read from the local lexical scope and write to the function scope
+ VariableProxy* to = scope->NewUnresolved(factory(), name);
+ VariableProxy* from = delegate->scope()->NewUnresolved(factory(), name);
+ Expression* assignment =
+ factory()->NewAssignment(Token::ASSIGN, to, from, kNoSourcePosition);
+ Statement* statement =
+ factory()->NewExpressionStatement(assignment, kNoSourcePosition);
+ delegate->set_statement(statement);
}
}
}
@@ -4997,13 +5180,12 @@ IterationStatement* Parser::LookupContinueTarget(const AstRawString* label,
void Parser::HandleSourceURLComments(Isolate* isolate, Handle<Script> script) {
- if (scanner_.source_url()->length() > 0) {
- Handle<String> source_url = scanner_.source_url()->Internalize(isolate);
+ Handle<String> source_url = scanner_.SourceUrl(isolate);
+ if (!source_url.is_null()) {
script->set_source_url(*source_url);
}
- if (scanner_.source_mapping_url()->length() > 0) {
- Handle<String> source_mapping_url =
- scanner_.source_mapping_url()->Internalize(isolate);
+ Handle<String> source_mapping_url = scanner_.SourceMappingUrl(isolate);
+ if (!source_mapping_url.is_null()) {
script->set_source_mapping_url(*source_mapping_url);
}
}
@@ -5026,7 +5208,7 @@ void Parser::Internalize(Isolate* isolate, Handle<Script> script, bool error) {
// Move statistics to Isolate.
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
++feature) {
- for (int i = 0; i < use_counts_[feature]; ++i) {
+ if (use_counts_[feature] > 0) {
isolate->CountUsage(v8::Isolate::UseCounterFeature(feature));
}
}
@@ -5093,24 +5275,38 @@ void Parser::ParseOnBackground(ParseInfo* info) {
DCHECK(info->literal() == NULL);
FunctionLiteral* result = NULL;
- fni_ = new (zone()) FuncNameInferrer(ast_value_factory(), zone());
CompleteParserRecorder recorder;
if (produce_cached_parse_data()) log_ = &recorder;
- DCHECK(info->source_stream() != NULL);
- ExternalStreamingStream stream(info->source_stream(),
- info->source_stream_encoding());
- scanner_.Initialize(&stream);
+ std::unique_ptr<Utf16CharacterStream> stream;
+ Utf16CharacterStream* stream_ptr;
+ if (info->character_stream()) {
+ DCHECK(info->source_stream() == nullptr);
+ stream_ptr = info->character_stream();
+ } else {
+ DCHECK(info->character_stream() == nullptr);
+ stream.reset(new ExternalStreamingStream(info->source_stream(),
+ info->source_stream_encoding()));
+ stream_ptr = stream.get();
+ }
DCHECK(info->context().is_null() || info->context()->IsNativeContext());
+ DCHECK(original_scope_);
+
// When streaming, we don't know the length of the source until we have parsed
// it. The raw data can be UTF-8, so we wouldn't know the source length until
// we have decoded it anyway even if we knew the raw data length (which we
// don't). We work around this by storing all the scopes which need their end
// position set at the end of the script (the top scope and possible eval
// scopes) and set their end position after we know the script length.
- result = DoParseProgram(info);
+ if (info->is_lazy()) {
+ result = DoParseLazy(info, info->function_name(), stream_ptr);
+ } else {
+ fni_ = new (zone()) FuncNameInferrer(ast_value_factory(), zone());
+ scanner_.Initialize(stream_ptr);
+ result = DoParseProgram(info);
+ }
info->set_literal(result);
@@ -5123,9 +5319,8 @@ void Parser::ParseOnBackground(ParseInfo* info) {
}
}
-
-ParserTraits::TemplateLiteralState Parser::OpenTemplateLiteral(int pos) {
- return new (zone()) ParserTraits::TemplateLiteral(zone(), pos);
+Parser::TemplateLiteralState Parser::OpenTemplateLiteral(int pos) {
+ return new (zone()) TemplateLiteral(zone(), pos);
}
@@ -5256,7 +5451,7 @@ ZoneList<v8::internal::Expression*>* Parser::PrepareSpreadArguments(
new (zone()) ZoneList<Expression*>(0, zone());
spread_list->Add(list->at(0)->AsSpread()->expression(), zone());
args->Add(factory()->NewCallRuntime(Context::SPREAD_ITERABLE_INDEX,
- spread_list, RelocInfo::kNoPosition),
+ spread_list, kNoSourcePosition),
zone());
return args;
} else {
@@ -5280,7 +5475,7 @@ ZoneList<v8::internal::Expression*>* Parser::PrepareSpreadArguments(
}
int literal_index = function_state_->NextMaterializedLiteralIndex();
args->Add(factory()->NewArrayLiteral(unspread, literal_index,
- RelocInfo::kNoPosition),
+ kNoSourcePosition),
zone());
if (i == n) break;
@@ -5291,13 +5486,13 @@ ZoneList<v8::internal::Expression*>* Parser::PrepareSpreadArguments(
new (zone()) ZoneList<v8::internal::Expression*>(1, zone());
spread_list->Add(list->at(i++)->AsSpread()->expression(), zone());
args->Add(factory()->NewCallRuntime(Context::SPREAD_ITERABLE_INDEX,
- spread_list, RelocInfo::kNoPosition),
+ spread_list, kNoSourcePosition),
zone());
}
list = new (zone()) ZoneList<v8::internal::Expression*>(1, zone());
list->Add(factory()->NewCallRuntime(Context::SPREAD_ARGUMENTS_INDEX, args,
- RelocInfo::kNoPosition),
+ kNoSourcePosition),
zone());
return list;
}
@@ -5324,19 +5519,17 @@ Expression* Parser::SpreadCall(Expression* function,
if (function->IsProperty()) {
// Method calls
if (function->AsProperty()->IsSuperAccess()) {
- Expression* home =
- ThisExpression(scope_, factory(), RelocInfo::kNoPosition);
+ Expression* home = ThisExpression(kNoSourcePosition);
args->InsertAt(0, function, zone());
args->InsertAt(1, home, zone());
} else {
- Variable* temp =
- scope_->NewTemporary(ast_value_factory()->empty_string());
+ Variable* temp = NewTemporary(ast_value_factory()->empty_string());
VariableProxy* obj = factory()->NewVariableProxy(temp);
Assignment* assign_obj = factory()->NewAssignment(
Token::ASSIGN, obj, function->AsProperty()->obj(),
- RelocInfo::kNoPosition);
+ kNoSourcePosition);
function = factory()->NewProperty(
- assign_obj, function->AsProperty()->key(), RelocInfo::kNoPosition);
+ assign_obj, function->AsProperty()->key(), kNoSourcePosition);
args->InsertAt(0, function, zone());
obj = factory()->NewVariableProxy(temp);
args->InsertAt(1, obj, zone());
@@ -5344,7 +5537,7 @@ Expression* Parser::SpreadCall(Expression* function,
} else {
// Non-method calls
args->InsertAt(0, function, zone());
- args->InsertAt(1, factory()->NewUndefinedLiteral(RelocInfo::kNoPosition),
+ args->InsertAt(1, factory()->NewUndefinedLiteral(kNoSourcePosition),
zone());
}
return factory()->NewCallRuntime(Context::REFLECT_APPLY_INDEX, args, pos);
@@ -5375,41 +5568,87 @@ void Parser::SetLanguageMode(Scope* scope, LanguageMode mode) {
void Parser::RaiseLanguageMode(LanguageMode mode) {
- LanguageMode old = scope_->language_mode();
- SetLanguageMode(scope_, old > mode ? old : mode);
+ LanguageMode old = scope()->language_mode();
+ SetLanguageMode(scope(), old > mode ? old : mode);
}
-
-void ParserTraits::RewriteDestructuringAssignments() {
- parser_->RewriteDestructuringAssignments();
+void Parser::MarkCollectedTailCallExpressions() {
+ const ZoneList<Expression*>& tail_call_expressions =
+ function_state_->tail_call_expressions().expressions();
+ for (int i = 0; i < tail_call_expressions.length(); ++i) {
+ Expression* expression = tail_call_expressions[i];
+ // If only FLAG_harmony_explicit_tailcalls is enabled then expression
+ // must be a Call expression.
+ DCHECK(FLAG_harmony_tailcalls || !FLAG_harmony_explicit_tailcalls ||
+ expression->IsCall());
+ MarkTailPosition(expression);
+ }
}
-Expression* ParserTraits::RewriteExponentiation(Expression* left,
- Expression* right, int pos) {
- return parser_->RewriteExponentiation(left, right, pos);
+Expression* ParserBaseTraits<Parser>::ExpressionListToExpression(
+ ZoneList<Expression*>* args) {
+ AstNodeFactory* factory = delegate()->factory();
+ Expression* expr = args->at(0);
+ for (int i = 1; i < args->length(); ++i) {
+ expr = factory->NewBinaryOperation(Token::COMMA, expr, args->at(i),
+ expr->position());
+ }
+ return expr;
}
-Expression* ParserTraits::RewriteAssignExponentiation(Expression* left,
- Expression* right,
- int pos) {
- return parser_->RewriteAssignExponentiation(left, right, pos);
-}
+Expression* Parser::RewriteAwaitExpression(Expression* value, int await_pos) {
+ // yield %AsyncFunctionAwait(.generator_object, <operand>)
+ Variable* generator_object_variable =
+ delegate()->function_state_->generator_object_variable();
-void ParserTraits::RewriteNonPattern(Type::ExpressionClassifier* classifier,
- bool* ok) {
- parser_->RewriteNonPattern(classifier, ok);
-}
+ // If generator_object_variable is null,
+ if (!generator_object_variable) return value;
+
+ auto factory = delegate()->factory();
+ const int nopos = kNoSourcePosition;
+ Variable* temp_var =
+ delegate()->NewTemporary(delegate()->ast_value_factory()->empty_string());
+ VariableProxy* temp_proxy = factory->NewVariableProxy(temp_var);
+ Block* do_block = factory->NewBlock(nullptr, 2, false, nopos);
-Zone* ParserTraits::zone() const {
- return parser_->function_state_->scope()->zone();
+ // Wrap value evaluation to provide a break location.
+ Expression* value_assignment =
+ factory->NewAssignment(Token::ASSIGN, temp_proxy, value, nopos);
+ do_block->statements()->Add(
+ factory->NewExpressionStatement(value_assignment, value->position()),
+ zone());
+
+ ZoneList<Expression*>* async_function_await_args =
+ new (zone()) ZoneList<Expression*>(2, zone());
+ Expression* generator_object =
+ factory->NewVariableProxy(generator_object_variable);
+ async_function_await_args->Add(generator_object, zone());
+ async_function_await_args->Add(temp_proxy, zone());
+ Expression* async_function_await = delegate()->factory()->NewCallRuntime(
+ Context::ASYNC_FUNCTION_AWAIT_INDEX, async_function_await_args, nopos);
+ // Wrap await to provide a break location between value evaluation and yield.
+ Expression* await_assignment = factory->NewAssignment(
+ Token::ASSIGN, temp_proxy, async_function_await, nopos);
+ do_block->statements()->Add(
+ factory->NewExpressionStatement(await_assignment, await_pos), zone());
+ Expression* do_expr = factory->NewDoExpression(do_block, temp_var, nopos);
+
+ generator_object = factory->NewVariableProxy(generator_object_variable);
+ return factory->NewYield(generator_object, do_expr, nopos,
+ Yield::kOnExceptionRethrow);
}
+ZoneList<Expression*>* ParserBaseTraits<Parser>::GetNonPatternList() const {
+ return delegate()->function_state_->non_patterns_to_rewrite();
+}
-ZoneList<Expression*>* ParserTraits::GetNonPatternList() const {
- return parser_->function_state_->non_patterns_to_rewrite();
+ZoneList<typename ParserBaseTraits<Parser>::Type::ExpressionClassifier::Error>*
+ParserBaseTraits<Parser>::GetReportedErrorList() const {
+ return delegate()->function_state_->GetReportedErrorList();
}
+Zone* ParserBaseTraits<Parser>::zone() const { return delegate()->zone(); }
class NonPatternRewriter : public AstExpressionRewriter {
public:
@@ -5450,8 +5689,7 @@ class NonPatternRewriter : public AstExpressionRewriter {
void Parser::RewriteNonPattern(ExpressionClassifier* classifier, bool* ok) {
- ValidateExpression(classifier, ok);
- if (!*ok) return;
+ ValidateExpression(classifier, CHECK_OK_VOID);
auto non_patterns_to_rewrite = function_state_->non_patterns_to_rewrite();
int begin = classifier->GetNonPatternBegin();
int end = non_patterns_to_rewrite->length();
@@ -5488,7 +5726,7 @@ Expression* Parser::RewriteExponentiation(Expression* left, Expression* right,
ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
args->Add(left, zone());
args->Add(right, zone());
- return factory()->NewCallRuntime(Context::MATH_POW_METHOD_INDEX, args, pos);
+ return factory()->NewCallRuntime(Context::MATH_POW_INDEX, args, pos);
}
Expression* Parser::RewriteAssignExponentiation(Expression* left,
@@ -5499,34 +5737,33 @@ Expression* Parser::RewriteAssignExponentiation(Expression* left,
Expression* result;
DCHECK_NOT_NULL(lhs->raw_name());
- result =
- this->ExpressionFromIdentifier(lhs->raw_name(), lhs->position(),
- lhs->end_position(), scope_, factory());
+ result = this->ExpressionFromIdentifier(lhs->raw_name(), lhs->position(),
+ lhs->end_position());
args->Add(left, zone());
args->Add(right, zone());
Expression* call =
- factory()->NewCallRuntime(Context::MATH_POW_METHOD_INDEX, args, pos);
+ factory()->NewCallRuntime(Context::MATH_POW_INDEX, args, pos);
return factory()->NewAssignment(Token::ASSIGN, result, call, pos);
} else if (left->IsProperty()) {
Property* prop = left->AsProperty();
- auto temp_obj = scope_->NewTemporary(ast_value_factory()->empty_string());
- auto temp_key = scope_->NewTemporary(ast_value_factory()->empty_string());
+ auto temp_obj = NewTemporary(ast_value_factory()->empty_string());
+ auto temp_key = NewTemporary(ast_value_factory()->empty_string());
Expression* assign_obj = factory()->NewAssignment(
Token::ASSIGN, factory()->NewVariableProxy(temp_obj), prop->obj(),
- RelocInfo::kNoPosition);
+ kNoSourcePosition);
Expression* assign_key = factory()->NewAssignment(
Token::ASSIGN, factory()->NewVariableProxy(temp_key), prop->key(),
- RelocInfo::kNoPosition);
+ kNoSourcePosition);
args->Add(factory()->NewProperty(factory()->NewVariableProxy(temp_obj),
factory()->NewVariableProxy(temp_key),
left->position()),
zone());
args->Add(right, zone());
Expression* call =
- factory()->NewCallRuntime(Context::MATH_POW_METHOD_INDEX, args, pos);
+ factory()->NewCallRuntime(Context::MATH_POW_INDEX, args, pos);
Expression* target = factory()->NewProperty(
factory()->NewVariableProxy(temp_obj),
- factory()->NewVariableProxy(temp_key), RelocInfo::kNoPosition);
+ factory()->NewVariableProxy(temp_key), kNoSourcePosition);
Expression* assign =
factory()->NewAssignment(Token::ASSIGN, target, call, pos);
return factory()->NewBinaryOperation(
@@ -5553,18 +5790,15 @@ Expression* Parser::RewriteSpreads(ArrayLiteral* lit) {
// where $R, $i and $j are fresh temporary variables.
ZoneList<Expression*>::iterator s = lit->FirstSpread();
if (s == lit->EndValue()) return nullptr; // no spread, no rewriting...
- Variable* result =
- scope_->NewTemporary(ast_value_factory()->dot_result_string());
+ Variable* result = NewTemporary(ast_value_factory()->dot_result_string());
// NOTE: The value assigned to R is the whole original array literal,
// spreads included. This will be fixed before the rewritten AST is returned.
// $R = lit
- Expression* init_result =
- factory()->NewAssignment(Token::INIT, factory()->NewVariableProxy(result),
- lit, RelocInfo::kNoPosition);
- Block* do_block =
- factory()->NewBlock(nullptr, 16, false, RelocInfo::kNoPosition);
+ Expression* init_result = factory()->NewAssignment(
+ Token::INIT, factory()->NewVariableProxy(result), lit, kNoSourcePosition);
+ Block* do_block = factory()->NewBlock(nullptr, 16, false, kNoSourcePosition);
do_block->statements()->Add(
- factory()->NewExpressionStatement(init_result, RelocInfo::kNoPosition),
+ factory()->NewExpressionStatement(init_result, kNoSourcePosition),
zone());
// Traverse the array literal starting from the first spread.
while (s != lit->EndValue()) {
@@ -5573,20 +5807,36 @@ Expression* Parser::RewriteSpreads(ArrayLiteral* lit) {
if (spread == nullptr) {
// If the element is not a spread, we're adding a single:
// %AppendElement($R, value)
- ZoneList<Expression*>* append_element_args = NewExpressionList(2, zone());
- append_element_args->Add(factory()->NewVariableProxy(result), zone());
- append_element_args->Add(value, zone());
- do_block->statements()->Add(
- factory()->NewExpressionStatement(
- factory()->NewCallRuntime(Runtime::kAppendElement,
- append_element_args,
- RelocInfo::kNoPosition),
- RelocInfo::kNoPosition),
- zone());
+ // or, in case of a hole,
+ // ++($R.length)
+ if (!value->IsLiteral() ||
+ !value->AsLiteral()->raw_value()->IsTheHole()) {
+ ZoneList<Expression*>* append_element_args =
+ NewExpressionList(2, zone());
+ append_element_args->Add(factory()->NewVariableProxy(result), zone());
+ append_element_args->Add(value, zone());
+ do_block->statements()->Add(
+ factory()->NewExpressionStatement(
+ factory()->NewCallRuntime(Runtime::kAppendElement,
+ append_element_args,
+ kNoSourcePosition),
+ kNoSourcePosition),
+ zone());
+ } else {
+ Property* length_property = factory()->NewProperty(
+ factory()->NewVariableProxy(result),
+ factory()->NewStringLiteral(ast_value_factory()->length_string(),
+ kNoSourcePosition),
+ kNoSourcePosition);
+ CountOperation* count_op = factory()->NewCountOperation(
+ Token::INC, true /* prefix */, length_property, kNoSourcePosition);
+ do_block->statements()->Add(
+ factory()->NewExpressionStatement(count_op, kNoSourcePosition),
+ zone());
+ }
} else {
// If it's a spread, we're adding a for/of loop iterating through it.
- Variable* each =
- scope_->NewTemporary(ast_value_factory()->dot_for_string());
+ Variable* each = NewTemporary(ast_value_factory()->dot_for_string());
Expression* subject = spread->expression();
// %AppendElement($R, each)
Statement* append_body;
@@ -5597,16 +5847,16 @@ Expression* Parser::RewriteSpreads(ArrayLiteral* lit) {
append_element_args->Add(factory()->NewVariableProxy(each), zone());
append_body = factory()->NewExpressionStatement(
factory()->NewCallRuntime(Runtime::kAppendElement,
- append_element_args,
- RelocInfo::kNoPosition),
- RelocInfo::kNoPosition);
+ append_element_args, kNoSourcePosition),
+ kNoSourcePosition);
}
// for (each of spread) %AppendElement($R, each)
ForEachStatement* loop = factory()->NewForEachStatement(
- ForEachStatement::ITERATE, nullptr, RelocInfo::kNoPosition);
+ ForEachStatement::ITERATE, nullptr, kNoSourcePosition);
+ const bool finalize = false;
InitializeForOfStatement(loop->AsForOfStatement(),
factory()->NewVariableProxy(each), subject,
- append_body, spread->expression_position());
+ append_body, finalize);
do_block->statements()->Add(loop, zone());
}
}
@@ -5616,21 +5866,18 @@ Expression* Parser::RewriteSpreads(ArrayLiteral* lit) {
return factory()->NewDoExpression(do_block, result, lit->position());
}
-
-void ParserTraits::QueueDestructuringAssignmentForRewriting(Expression* expr) {
+void Parser::QueueDestructuringAssignmentForRewriting(Expression* expr) {
DCHECK(expr->IsRewritableExpression());
- parser_->function_state_->AddDestructuringAssignment(
- Parser::DestructuringAssignment(expr, parser_->scope_));
+ function_state_->AddDestructuringAssignment(
+ DestructuringAssignment(expr, delegate()->scope()));
}
-
-void ParserTraits::QueueNonPatternForRewriting(Expression* expr) {
+void Parser::QueueNonPatternForRewriting(Expression* expr, bool* ok) {
DCHECK(expr->IsRewritableExpression());
- parser_->function_state_->AddNonPatternForRewriting(expr);
+ function_state_->AddNonPatternForRewriting(expr, ok);
}
-
-void ParserTraits::SetFunctionNameFromPropertyName(
+void ParserBaseTraits<Parser>::SetFunctionNameFromPropertyName(
ObjectLiteralProperty* property, const AstRawString* name) {
Expression* value = property->value();
@@ -5646,46 +5893,38 @@ void ParserTraits::SetFunctionNameFromPropertyName(
if (is_getter || is_setter) {
DCHECK_NOT_NULL(name);
const AstRawString* prefix =
- is_getter ? parser_->ast_value_factory()->get_space_string()
- : parser_->ast_value_factory()->set_space_string();
+ is_getter ? delegate()->ast_value_factory()->get_space_string()
+ : delegate()->ast_value_factory()->set_space_string();
function->set_raw_name(
- parser_->ast_value_factory()->NewConsString(prefix, name));
+ delegate()->ast_value_factory()->NewConsString(prefix, name));
return;
}
}
- if (!value->IsAnonymousFunctionDefinition()) return;
- DCHECK_NOT_NULL(name);
-
// Ignore "__proto__" as a name when it's being used to set the [[Prototype]]
// of an object literal.
if (property->kind() == ObjectLiteralProperty::PROTOTYPE) return;
- if (function != nullptr) {
- function->set_raw_name(name);
- DCHECK_EQ(ObjectLiteralProperty::COMPUTED, property->kind());
- } else {
- DCHECK(value->IsClassLiteral());
- DCHECK_EQ(ObjectLiteralProperty::COMPUTED, property->kind());
- value->AsClassLiteral()->constructor()->set_raw_name(name);
- }
+ DCHECK(!value->IsAnonymousFunctionDefinition() ||
+ property->kind() == ObjectLiteralProperty::COMPUTED);
+ delegate()->SetFunctionName(value, name);
}
-
-void ParserTraits::SetFunctionNameFromIdentifierRef(Expression* value,
- Expression* identifier) {
- if (!value->IsAnonymousFunctionDefinition()) return;
+void ParserBaseTraits<Parser>::SetFunctionNameFromIdentifierRef(
+ Expression* value, Expression* identifier) {
if (!identifier->IsVariableProxy()) return;
+ delegate()->SetFunctionName(value, identifier->AsVariableProxy()->raw_name());
+}
- auto name = identifier->AsVariableProxy()->raw_name();
+void Parser::SetFunctionName(Expression* value, const AstRawString* name) {
DCHECK_NOT_NULL(name);
-
+ if (!value->IsAnonymousFunctionDefinition()) return;
auto function = value->AsFunctionLiteral();
if (function != nullptr) {
function->set_raw_name(name);
} else {
- DCHECK(value->IsClassLiteral());
- value->AsClassLiteral()->constructor()->set_raw_name(name);
+ DCHECK(value->IsDoExpression());
+ value->AsDoExpression()->represented_function()->set_raw_name(name);
}
}
@@ -5749,15 +5988,19 @@ void ParserTraits::SetFunctionNameFromIdentifierRef(Expression* value,
// }
// }
//
-// output.value;
+// if (mode === kReturn) {
+// return {value: output.value, done: true};
+// }
+// output.value
// }
//
// IteratorClose(iterator) expands to the following:
//
// let iteratorReturn = iterator.return;
-// if (IS_NULL_OR_UNDEFINED(iteratorReturn)) return;
-// let output = %_Call(iteratorReturn, iterator);
-// if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
+// if (!IS_NULL_OR_UNDEFINED(iteratorReturn)) {
+// let output = %_Call(iteratorReturn, iterator);
+// if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
+// }
//
// IteratorClose(iterator, input, output) expands to the following:
//
@@ -5766,607 +6009,431 @@ void ParserTraits::SetFunctionNameFromIdentifierRef(Expression* value,
// output = %_Call(iteratorReturn, iterator, input);
// if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
-
-Expression* ParserTraits::RewriteYieldStar(
- Expression* generator, Expression* iterable, int pos) {
-
- const int nopos = RelocInfo::kNoPosition;
-
- auto factory = parser_->factory();
- auto avfactory = parser_->ast_value_factory();
- auto scope = parser_->scope_;
- auto zone = parser_->zone();
-
+Expression* Parser::RewriteYieldStar(Expression* generator,
+ Expression* iterable, int pos) {
+ const int nopos = kNoSourcePosition;
// Forward definition for break/continue statements.
- WhileStatement* loop = factory->NewWhileStatement(nullptr, nopos);
-
+ WhileStatement* loop = factory()->NewWhileStatement(nullptr, nopos);
// let input = undefined;
- Variable* var_input = scope->NewTemporary(avfactory->empty_string());
+ Variable* var_input = NewTemporary(ast_value_factory()->empty_string());
Statement* initialize_input;
{
- Expression* input_proxy = factory->NewVariableProxy(var_input);
- Expression* assignment = factory->NewAssignment(
- Token::ASSIGN, input_proxy, factory->NewUndefinedLiteral(nopos), nopos);
- initialize_input = factory->NewExpressionStatement(assignment, nopos);
+ Expression* input_proxy = factory()->NewVariableProxy(var_input);
+ Expression* assignment =
+ factory()->NewAssignment(Token::ASSIGN, input_proxy,
+ factory()->NewUndefinedLiteral(nopos), nopos);
+ initialize_input = factory()->NewExpressionStatement(assignment, nopos);
}
-
// let mode = kNext;
- Variable* var_mode = scope->NewTemporary(avfactory->empty_string());
+ Variable* var_mode = NewTemporary(ast_value_factory()->empty_string());
Statement* initialize_mode;
{
- Expression* mode_proxy = factory->NewVariableProxy(var_mode);
- Expression* knext = factory->NewSmiLiteral(JSGeneratorObject::NEXT, nopos);
+ Expression* mode_proxy = factory()->NewVariableProxy(var_mode);
+ Expression* knext =
+ factory()->NewSmiLiteral(JSGeneratorObject::kNext, nopos);
Expression* assignment =
- factory->NewAssignment(Token::ASSIGN, mode_proxy, knext, nopos);
- initialize_mode = factory->NewExpressionStatement(assignment, nopos);
+ factory()->NewAssignment(Token::ASSIGN, mode_proxy, knext, nopos);
+ initialize_mode = factory()->NewExpressionStatement(assignment, nopos);
}
-
// let output = undefined;
- Variable* var_output = scope->NewTemporary(avfactory->empty_string());
+ Variable* var_output = NewTemporary(ast_value_factory()->empty_string());
Statement* initialize_output;
{
- Expression* output_proxy = factory->NewVariableProxy(var_output);
- Expression* assignment = factory->NewAssignment(
- Token::ASSIGN, output_proxy, factory->NewUndefinedLiteral(nopos),
- nopos);
- initialize_output = factory->NewExpressionStatement(assignment, nopos);
+ Expression* output_proxy = factory()->NewVariableProxy(var_output);
+ Expression* assignment =
+ factory()->NewAssignment(Token::ASSIGN, output_proxy,
+ factory()->NewUndefinedLiteral(nopos), nopos);
+ initialize_output = factory()->NewExpressionStatement(assignment, nopos);
}
-
// let iterator = iterable[Symbol.iterator];
- Variable* var_iterator = scope->NewTemporary(avfactory->empty_string());
+ Variable* var_iterator = NewTemporary(ast_value_factory()->empty_string());
Statement* get_iterator;
{
- Expression* iterator = GetIterator(iterable, factory, nopos);
- Expression* iterator_proxy = factory->NewVariableProxy(var_iterator);
- Expression* assignment = factory->NewAssignment(
+ Expression* iterator = GetIterator(iterable, factory(), nopos);
+ Expression* iterator_proxy = factory()->NewVariableProxy(var_iterator);
+ Expression* assignment = factory()->NewAssignment(
Token::ASSIGN, iterator_proxy, iterator, nopos);
- get_iterator = factory->NewExpressionStatement(assignment, nopos);
+ get_iterator = factory()->NewExpressionStatement(assignment, nopos);
}
-
// if (!IS_RECEIVER(iterator)) throw MakeTypeError(kSymbolIteratorInvalid);
Statement* validate_iterator;
{
Expression* is_receiver_call;
{
- auto args = new (zone) ZoneList<Expression*>(1, zone);
- args->Add(factory->NewVariableProxy(var_iterator), zone);
+ auto args = new (zone()) ZoneList<Expression*>(1, zone());
+ args->Add(factory()->NewVariableProxy(var_iterator), zone());
is_receiver_call =
- factory->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
+ factory()->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
}
Statement* throw_call;
{
- Expression* call = NewThrowTypeError(
- MessageTemplate::kSymbolIteratorInvalid, avfactory->empty_string(),
- nopos);
- throw_call = factory->NewExpressionStatement(call, nopos);
+ Expression* call =
+ NewThrowTypeError(MessageTemplate::kSymbolIteratorInvalid,
+ ast_value_factory()->empty_string(), nopos);
+ throw_call = factory()->NewExpressionStatement(call, nopos);
}
- validate_iterator = factory->NewIfStatement(
- is_receiver_call, factory->NewEmptyStatement(nopos), throw_call, nopos);
+ validate_iterator = factory()->NewIfStatement(
+ is_receiver_call, factory()->NewEmptyStatement(nopos), throw_call,
+ nopos);
}
-
// output = iterator.next(input);
Statement* call_next;
{
- Expression* iterator_proxy = factory->NewVariableProxy(var_iterator);
+ Expression* iterator_proxy = factory()->NewVariableProxy(var_iterator);
Expression* literal =
- factory->NewStringLiteral(avfactory->next_string(), nopos);
+ factory()->NewStringLiteral(ast_value_factory()->next_string(), nopos);
Expression* next_property =
- factory->NewProperty(iterator_proxy, literal, nopos);
- Expression* input_proxy = factory->NewVariableProxy(var_input);
- auto args = new (zone) ZoneList<Expression*>(1, zone);
- args->Add(input_proxy, zone);
- Expression* call = factory->NewCall(next_property, args, nopos);
- Expression* output_proxy = factory->NewVariableProxy(var_output);
+ factory()->NewProperty(iterator_proxy, literal, nopos);
+ Expression* input_proxy = factory()->NewVariableProxy(var_input);
+ auto args = new (zone()) ZoneList<Expression*>(1, zone());
+ args->Add(input_proxy, zone());
+ Expression* call = factory()->NewCall(next_property, args, nopos);
+ Expression* output_proxy = factory()->NewVariableProxy(var_output);
Expression* assignment =
- factory->NewAssignment(Token::ASSIGN, output_proxy, call, nopos);
- call_next = factory->NewExpressionStatement(assignment, nopos);
+ factory()->NewAssignment(Token::ASSIGN, output_proxy, call, nopos);
+ call_next = factory()->NewExpressionStatement(assignment, nopos);
}
-
// if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
Statement* validate_next_output;
{
Expression* is_receiver_call;
{
- auto args = new (zone) ZoneList<Expression*>(1, zone);
- args->Add(factory->NewVariableProxy(var_output), zone);
+ auto args = new (zone()) ZoneList<Expression*>(1, zone());
+ args->Add(factory()->NewVariableProxy(var_output), zone());
is_receiver_call =
- factory->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
+ factory()->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
}
Statement* throw_call;
{
- auto args = new (zone) ZoneList<Expression*>(1, zone);
- args->Add(factory->NewVariableProxy(var_output), zone);
- Expression* call = factory->NewCallRuntime(
+ auto args = new (zone()) ZoneList<Expression*>(1, zone());
+ args->Add(factory()->NewVariableProxy(var_output), zone());
+ Expression* call = factory()->NewCallRuntime(
Runtime::kThrowIteratorResultNotAnObject, args, nopos);
- throw_call = factory->NewExpressionStatement(call, nopos);
+ throw_call = factory()->NewExpressionStatement(call, nopos);
}
- validate_next_output = factory->NewIfStatement(
- is_receiver_call, factory->NewEmptyStatement(nopos), throw_call, nopos);
+ validate_next_output = factory()->NewIfStatement(
+ is_receiver_call, factory()->NewEmptyStatement(nopos), throw_call,
+ nopos);
}
-
// let iteratorThrow = iterator.throw;
- Variable* var_throw = scope->NewTemporary(avfactory->empty_string());
+ Variable* var_throw = NewTemporary(ast_value_factory()->empty_string());
Statement* get_throw;
{
- Expression* iterator_proxy = factory->NewVariableProxy(var_iterator);
+ Expression* iterator_proxy = factory()->NewVariableProxy(var_iterator);
Expression* literal =
- factory->NewStringLiteral(avfactory->throw_string(), nopos);
+ factory()->NewStringLiteral(ast_value_factory()->throw_string(), nopos);
Expression* property =
- factory->NewProperty(iterator_proxy, literal, nopos);
- Expression* throw_proxy = factory->NewVariableProxy(var_throw);
- Expression* assignment = factory->NewAssignment(
- Token::ASSIGN, throw_proxy, property, nopos);
- get_throw = factory->NewExpressionStatement(assignment, nopos);
+ factory()->NewProperty(iterator_proxy, literal, nopos);
+ Expression* throw_proxy = factory()->NewVariableProxy(var_throw);
+ Expression* assignment =
+ factory()->NewAssignment(Token::ASSIGN, throw_proxy, property, nopos);
+ get_throw = factory()->NewExpressionStatement(assignment, nopos);
}
-
// if (IS_NULL_OR_UNDEFINED(iteratorThrow) {
// IteratorClose(iterator);
// throw MakeTypeError(kThrowMethodMissing);
// }
Statement* check_throw;
{
- Expression* condition = factory->NewCompareOperation(
- Token::EQ, factory->NewVariableProxy(var_throw),
- factory->NewNullLiteral(nopos), nopos);
-
- Expression* call = NewThrowTypeError(
- MessageTemplate::kThrowMethodMissing,
- avfactory->empty_string(), nopos);
- Statement* throw_call = factory->NewExpressionStatement(call, nopos);
+ Expression* condition = factory()->NewCompareOperation(
+ Token::EQ, factory()->NewVariableProxy(var_throw),
+ factory()->NewNullLiteral(nopos), nopos);
+ Expression* call =
+ NewThrowTypeError(MessageTemplate::kThrowMethodMissing,
+ ast_value_factory()->empty_string(), nopos);
+ Statement* throw_call = factory()->NewExpressionStatement(call, nopos);
- Block* then = factory->NewBlock(nullptr, 4+1, false, nopos);
- Variable* var_tmp = scope->NewTemporary(avfactory->empty_string());
- BuildIteratorClose(then->statements(), var_iterator, Nothing<Variable*>(),
- var_tmp);
- then->statements()->Add(throw_call, zone);
- check_throw = factory->NewIfStatement(
- condition, then, factory->NewEmptyStatement(nopos), nopos);
+ Block* then = factory()->NewBlock(nullptr, 4 + 1, false, nopos);
+ BuildIteratorCloseForCompletion(
+ then->statements(), var_iterator,
+ factory()->NewSmiLiteral(Parser::kNormalCompletion, nopos));
+ then->statements()->Add(throw_call, zone());
+ check_throw = factory()->NewIfStatement(
+ condition, then, factory()->NewEmptyStatement(nopos), nopos);
}
-
// output = %_Call(iteratorThrow, iterator, input);
Statement* call_throw;
{
- auto args = new (zone) ZoneList<Expression*>(3, zone);
- args->Add(factory->NewVariableProxy(var_throw), zone);
- args->Add(factory->NewVariableProxy(var_iterator), zone);
- args->Add(factory->NewVariableProxy(var_input), zone);
+ auto args = new (zone()) ZoneList<Expression*>(3, zone());
+ args->Add(factory()->NewVariableProxy(var_throw), zone());
+ args->Add(factory()->NewVariableProxy(var_iterator), zone());
+ args->Add(factory()->NewVariableProxy(var_input), zone());
Expression* call =
- factory->NewCallRuntime(Runtime::kInlineCall, args, nopos);
- Expression* assignment = factory->NewAssignment(
- Token::ASSIGN, factory->NewVariableProxy(var_output), call, nopos);
- call_throw = factory->NewExpressionStatement(assignment, nopos);
+ factory()->NewCallRuntime(Runtime::kInlineCall, args, nopos);
+ Expression* assignment = factory()->NewAssignment(
+ Token::ASSIGN, factory()->NewVariableProxy(var_output), call, nopos);
+ call_throw = factory()->NewExpressionStatement(assignment, nopos);
}
-
// if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
Statement* validate_throw_output;
{
Expression* is_receiver_call;
{
- auto args = new (zone) ZoneList<Expression*>(1, zone);
- args->Add(factory->NewVariableProxy(var_output), zone);
+ auto args = new (zone()) ZoneList<Expression*>(1, zone());
+ args->Add(factory()->NewVariableProxy(var_output), zone());
is_receiver_call =
- factory->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
+ factory()->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
}
Statement* throw_call;
{
- auto args = new (zone) ZoneList<Expression*>(1, zone);
- args->Add(factory->NewVariableProxy(var_output), zone);
- Expression* call = factory->NewCallRuntime(
+ auto args = new (zone()) ZoneList<Expression*>(1, zone());
+ args->Add(factory()->NewVariableProxy(var_output), zone());
+ Expression* call = factory()->NewCallRuntime(
Runtime::kThrowIteratorResultNotAnObject, args, nopos);
- throw_call = factory->NewExpressionStatement(call, nopos);
+ throw_call = factory()->NewExpressionStatement(call, nopos);
}
- validate_throw_output = factory->NewIfStatement(
- is_receiver_call, factory->NewEmptyStatement(nopos), throw_call, nopos);
+ validate_throw_output = factory()->NewIfStatement(
+ is_receiver_call, factory()->NewEmptyStatement(nopos), throw_call,
+ nopos);
}
-
// if (output.done) break;
Statement* if_done;
{
- Expression* output_proxy = factory->NewVariableProxy(var_output);
+ Expression* output_proxy = factory()->NewVariableProxy(var_output);
Expression* literal =
- factory->NewStringLiteral(avfactory->done_string(), nopos);
- Expression* property = factory->NewProperty(output_proxy, literal, nopos);
- BreakStatement* break_loop = factory->NewBreakStatement(loop, nopos);
- if_done = factory->NewIfStatement(
- property, break_loop, factory->NewEmptyStatement(nopos), nopos);
+ factory()->NewStringLiteral(ast_value_factory()->done_string(), nopos);
+ Expression* property = factory()->NewProperty(output_proxy, literal, nopos);
+ BreakStatement* break_loop = factory()->NewBreakStatement(loop, nopos);
+ if_done = factory()->NewIfStatement(
+ property, break_loop, factory()->NewEmptyStatement(nopos), nopos);
}
// mode = kReturn;
Statement* set_mode_return;
{
- Expression* mode_proxy = factory->NewVariableProxy(var_mode);
+ Expression* mode_proxy = factory()->NewVariableProxy(var_mode);
Expression* kreturn =
- factory->NewSmiLiteral(JSGeneratorObject::RETURN, nopos);
+ factory()->NewSmiLiteral(JSGeneratorObject::kReturn, nopos);
Expression* assignment =
- factory->NewAssignment(Token::ASSIGN, mode_proxy, kreturn, nopos);
- set_mode_return = factory->NewExpressionStatement(assignment, nopos);
+ factory()->NewAssignment(Token::ASSIGN, mode_proxy, kreturn, nopos);
+ set_mode_return = factory()->NewExpressionStatement(assignment, nopos);
}
// Yield(output);
Statement* yield_output;
{
- Expression* output_proxy = factory->NewVariableProxy(var_output);
- Yield* yield = factory->NewYield(generator, output_proxy, nopos);
- yield_output = factory->NewExpressionStatement(yield, nopos);
+ Expression* output_proxy = factory()->NewVariableProxy(var_output);
+ Yield* yield = factory()->NewYield(generator, output_proxy, nopos,
+ Yield::kOnExceptionThrow);
+ yield_output = factory()->NewExpressionStatement(yield, nopos);
}
-
// mode = kNext;
Statement* set_mode_next;
{
- Expression* mode_proxy = factory->NewVariableProxy(var_mode);
- Expression* knext = factory->NewSmiLiteral(JSGeneratorObject::NEXT, nopos);
+ Expression* mode_proxy = factory()->NewVariableProxy(var_mode);
+ Expression* knext =
+ factory()->NewSmiLiteral(JSGeneratorObject::kNext, nopos);
Expression* assignment =
- factory->NewAssignment(Token::ASSIGN, mode_proxy, knext, nopos);
- set_mode_next = factory->NewExpressionStatement(assignment, nopos);
+ factory()->NewAssignment(Token::ASSIGN, mode_proxy, knext, nopos);
+ set_mode_next = factory()->NewExpressionStatement(assignment, nopos);
}
-
// mode = kThrow;
Statement* set_mode_throw;
{
- Expression* mode_proxy = factory->NewVariableProxy(var_mode);
+ Expression* mode_proxy = factory()->NewVariableProxy(var_mode);
Expression* kthrow =
- factory->NewSmiLiteral(JSGeneratorObject::THROW, nopos);
+ factory()->NewSmiLiteral(JSGeneratorObject::kThrow, nopos);
Expression* assignment =
- factory->NewAssignment(Token::ASSIGN, mode_proxy, kthrow, nopos);
- set_mode_throw = factory->NewExpressionStatement(assignment, nopos);
+ factory()->NewAssignment(Token::ASSIGN, mode_proxy, kthrow, nopos);
+ set_mode_throw = factory()->NewExpressionStatement(assignment, nopos);
}
-
// input = function.sent;
Statement* get_input;
{
- Expression* function_sent = FunctionSentExpression(scope, factory, nopos);
- Expression* input_proxy = factory->NewVariableProxy(var_input);
- Expression* assignment = factory->NewAssignment(
+ Expression* function_sent = FunctionSentExpression(factory(), nopos);
+ Expression* input_proxy = factory()->NewVariableProxy(var_input);
+ Expression* assignment = factory()->NewAssignment(
Token::ASSIGN, input_proxy, function_sent, nopos);
- get_input = factory->NewExpressionStatement(assignment, nopos);
+ get_input = factory()->NewExpressionStatement(assignment, nopos);
}
+ // if (mode === kReturn) {
+ // return {value: output.value, done: true};
+ // }
+ Statement* maybe_return_value;
+ {
+ Expression* mode_proxy = factory()->NewVariableProxy(var_mode);
+ Expression* kreturn =
+ factory()->NewSmiLiteral(JSGeneratorObject::kReturn, nopos);
+ Expression* condition = factory()->NewCompareOperation(
+ Token::EQ_STRICT, mode_proxy, kreturn, nopos);
- // output.value;
+ Expression* output_proxy = factory()->NewVariableProxy(var_output);
+ Expression* literal =
+ factory()->NewStringLiteral(ast_value_factory()->value_string(), nopos);
+ Expression* property = factory()->NewProperty(output_proxy, literal, nopos);
+ Statement* return_value = factory()->NewReturnStatement(
+ BuildIteratorResult(property, true), nopos);
+
+ maybe_return_value = factory()->NewIfStatement(
+ condition, return_value, factory()->NewEmptyStatement(nopos), nopos);
+ }
+
+ // output.value
Statement* get_value;
{
- Expression* output_proxy = factory->NewVariableProxy(var_output);
+ Expression* output_proxy = factory()->NewVariableProxy(var_output);
Expression* literal =
- factory->NewStringLiteral(avfactory->value_string(), nopos);
- Expression* property = factory->NewProperty(output_proxy, literal, nopos);
- get_value = factory->NewExpressionStatement(property, nopos);
+ factory()->NewStringLiteral(ast_value_factory()->value_string(), nopos);
+ Expression* property = factory()->NewProperty(output_proxy, literal, nopos);
+ get_value = factory()->NewExpressionStatement(property, nopos);
}
-
// Now put things together.
-
// try { ... } catch(e) { ... }
Statement* try_catch;
{
- Block* try_block = factory->NewBlock(nullptr, 2, false, nopos);
- try_block->statements()->Add(yield_output, zone);
- try_block->statements()->Add(set_mode_next, zone);
+ Block* try_block = factory()->NewBlock(nullptr, 2, false, nopos);
+ try_block->statements()->Add(yield_output, zone());
+ try_block->statements()->Add(set_mode_next, zone());
- Block* catch_block = factory->NewBlock(nullptr, 1, false, nopos);
- catch_block->statements()->Add(set_mode_throw, zone);
+ Block* catch_block = factory()->NewBlock(nullptr, 1, false, nopos);
+ catch_block->statements()->Add(set_mode_throw, zone());
- Scope* catch_scope = NewScope(scope, CATCH_SCOPE);
- const AstRawString* name = avfactory->dot_catch_string();
+ Scope* catch_scope = NewScope(CATCH_SCOPE);
+ catch_scope->set_is_hidden();
+ const AstRawString* name = ast_value_factory()->dot_catch_string();
Variable* catch_variable =
catch_scope->DeclareLocal(name, VAR, kCreatedInitialized,
Variable::NORMAL);
- try_catch = factory->NewTryCatchStatement(
+ try_catch = factory()->NewTryCatchStatementForDesugaring(
try_block, catch_scope, catch_variable, catch_block, nopos);
}
-
// try { ... } finally { ... }
Statement* try_finally;
{
- Block* try_block = factory->NewBlock(nullptr, 1, false, nopos);
- try_block->statements()->Add(try_catch, zone);
+ Block* try_block = factory()->NewBlock(nullptr, 1, false, nopos);
+ try_block->statements()->Add(try_catch, zone());
- Block* finally = factory->NewBlock(nullptr, 2, false, nopos);
- finally->statements()->Add(get_input, zone);
- finally->statements()->Add(
- factory->NewContinueStatement(loop, nopos), zone);
+ Block* finally = factory()->NewBlock(nullptr, 2, false, nopos);
+ finally->statements()->Add(get_input, zone());
+ finally->statements()->Add(factory()->NewContinueStatement(loop, nopos),
+ zone());
- try_finally = factory->NewTryFinallyStatement(try_block, finally, nopos);
+ try_finally = factory()->NewTryFinallyStatement(try_block, finally, nopos);
}
-
// switch (mode) { ... }
- SwitchStatement* switch_mode = factory->NewSwitchStatement(nullptr, nopos);
+ SwitchStatement* switch_mode = factory()->NewSwitchStatement(nullptr, nopos);
{
- auto case_next = new (zone) ZoneList<Statement*>(3, zone);
- case_next->Add(call_next, zone);
- case_next->Add(validate_next_output, zone);
- case_next->Add(factory->NewBreakStatement(switch_mode, nopos), zone);
-
- auto case_return = new (zone) ZoneList<Statement*>(5, zone);
- BuildIteratorClose(case_return, var_iterator, Just(var_input), var_output);
- case_return->Add(factory->NewBreakStatement(switch_mode, nopos), zone);
-
- auto case_throw = new (zone) ZoneList<Statement*>(5, zone);
- case_throw->Add(get_throw, zone);
- case_throw->Add(check_throw, zone);
- case_throw->Add(call_throw, zone);
- case_throw->Add(validate_throw_output, zone);
- case_throw->Add(factory->NewBreakStatement(switch_mode, nopos), zone);
-
- auto cases = new (zone) ZoneList<CaseClause*>(3, zone);
- Expression* knext = factory->NewSmiLiteral(JSGeneratorObject::NEXT, nopos);
+ auto case_next = new (zone()) ZoneList<Statement*>(3, zone());
+ case_next->Add(call_next, zone());
+ case_next->Add(validate_next_output, zone());
+ case_next->Add(factory()->NewBreakStatement(switch_mode, nopos), zone());
+
+ auto case_return = new (zone()) ZoneList<Statement*>(5, zone());
+ BuildIteratorClose(case_return, var_iterator, var_input, var_output);
+ case_return->Add(factory()->NewBreakStatement(switch_mode, nopos), zone());
+
+ auto case_throw = new (zone()) ZoneList<Statement*>(5, zone());
+ case_throw->Add(get_throw, zone());
+ case_throw->Add(check_throw, zone());
+ case_throw->Add(call_throw, zone());
+ case_throw->Add(validate_throw_output, zone());
+ case_throw->Add(factory()->NewBreakStatement(switch_mode, nopos), zone());
+
+ auto cases = new (zone()) ZoneList<CaseClause*>(3, zone());
+ Expression* knext =
+ factory()->NewSmiLiteral(JSGeneratorObject::kNext, nopos);
Expression* kreturn =
- factory->NewSmiLiteral(JSGeneratorObject::RETURN, nopos);
+ factory()->NewSmiLiteral(JSGeneratorObject::kReturn, nopos);
Expression* kthrow =
- factory->NewSmiLiteral(JSGeneratorObject::THROW, nopos);
- cases->Add(factory->NewCaseClause(knext, case_next, nopos), zone);
- cases->Add(factory->NewCaseClause(kreturn, case_return, nopos), zone);
- cases->Add(factory->NewCaseClause(kthrow, case_throw, nopos), zone);
+ factory()->NewSmiLiteral(JSGeneratorObject::kThrow, nopos);
+ cases->Add(factory()->NewCaseClause(knext, case_next, nopos), zone());
+ cases->Add(factory()->NewCaseClause(kreturn, case_return, nopos), zone());
+ cases->Add(factory()->NewCaseClause(kthrow, case_throw, nopos), zone());
- switch_mode->Initialize(factory->NewVariableProxy(var_mode), cases);
+ switch_mode->Initialize(factory()->NewVariableProxy(var_mode), cases);
}
-
// while (true) { ... }
// Already defined earlier: WhileStatement* loop = ...
{
- Block* loop_body = factory->NewBlock(nullptr, 4, false, nopos);
- loop_body->statements()->Add(switch_mode, zone);
- loop_body->statements()->Add(if_done, zone);
- loop_body->statements()->Add(set_mode_return, zone);
- loop_body->statements()->Add(try_finally, zone);
+ Block* loop_body = factory()->NewBlock(nullptr, 4, false, nopos);
+ loop_body->statements()->Add(switch_mode, zone());
+ loop_body->statements()->Add(if_done, zone());
+ loop_body->statements()->Add(set_mode_return, zone());
+ loop_body->statements()->Add(try_finally, zone());
- loop->Initialize(factory->NewBooleanLiteral(true, nopos), loop_body);
+ loop->Initialize(factory()->NewBooleanLiteral(true, nopos), loop_body);
}
-
// do { ... }
DoExpression* yield_star;
{
// The rewriter needs to process the get_value statement only, hence we
// put the preceding statements into an init block.
- Block* do_block_ = factory->NewBlock(nullptr, 6, true, nopos);
- do_block_->statements()->Add(initialize_input, zone);
- do_block_->statements()->Add(initialize_mode, zone);
- do_block_->statements()->Add(initialize_output, zone);
- do_block_->statements()->Add(get_iterator, zone);
- do_block_->statements()->Add(validate_iterator, zone);
- do_block_->statements()->Add(loop, zone);
+ Block* do_block_ = factory()->NewBlock(nullptr, 7, true, nopos);
+ do_block_->statements()->Add(initialize_input, zone());
+ do_block_->statements()->Add(initialize_mode, zone());
+ do_block_->statements()->Add(initialize_output, zone());
+ do_block_->statements()->Add(get_iterator, zone());
+ do_block_->statements()->Add(validate_iterator, zone());
+ do_block_->statements()->Add(loop, zone());
+ do_block_->statements()->Add(maybe_return_value, zone());
- Block* do_block = factory->NewBlock(nullptr, 2, false, nopos);
- do_block->statements()->Add(do_block_, zone);
- do_block->statements()->Add(get_value, zone);
+ Block* do_block = factory()->NewBlock(nullptr, 2, false, nopos);
+ do_block->statements()->Add(do_block_, zone());
+ do_block->statements()->Add(get_value, zone());
- Variable* dot_result = scope->NewTemporary(avfactory->dot_result_string());
- yield_star = factory->NewDoExpression(do_block, dot_result, nopos);
- Rewriter::Rewrite(parser_, yield_star, avfactory);
+ Variable* dot_result =
+ NewTemporary(ast_value_factory()->dot_result_string());
+ yield_star = factory()->NewDoExpression(do_block, dot_result, nopos);
+ Rewriter::Rewrite(this, GetClosureScope(), yield_star, ast_value_factory());
}
return yield_star;
}
-// Desugaring of (lhs) instanceof (rhs)
-// ====================================
-//
-// We desugar instanceof into a load of property @@hasInstance on the rhs.
-// We end up with roughly the following code (O, C):
-//
-// do {
-// let O = lhs;
-// let C = rhs;
-// if (!IS_RECEIVER(C)) throw MakeTypeError(kNonObjectInInstanceOfCheck);
-// let handler_result = C[Symbol.hasInstance];
-// if (handler_result === undefined) {
-// if (!IS_CALLABLE(C)) {
-// throw MakeTypeError(kCalledNonCallableInstanceOf);
-// }
-// handler_result = %_GetOrdinaryHasInstance()
-// handler_result = %_Call(handler_result, C, O);
-// } else {
-// handler_result = !!(%_Call(handler_result, C, O));
-// }
-// handler_result;
-// }
-//
-Expression* ParserTraits::RewriteInstanceof(Expression* lhs, Expression* rhs,
- int pos) {
- const int nopos = RelocInfo::kNoPosition;
-
- auto factory = parser_->factory();
- auto avfactory = parser_->ast_value_factory();
- auto scope = parser_->scope_;
- auto zone = parser_->zone();
-
- // let O = lhs;
- Variable* var_O = scope->NewTemporary(avfactory->empty_string());
- Statement* get_O;
- {
- Expression* O_proxy = factory->NewVariableProxy(var_O);
- Expression* assignment =
- factory->NewAssignment(Token::ASSIGN, O_proxy, lhs, nopos);
- get_O = factory->NewExpressionStatement(assignment, nopos);
- }
-
- // let C = lhs;
- Variable* var_C = scope->NewTemporary(avfactory->empty_string());
- Statement* get_C;
- {
- Expression* C_proxy = factory->NewVariableProxy(var_C);
- Expression* assignment =
- factory->NewAssignment(Token::ASSIGN, C_proxy, rhs, nopos);
- get_C = factory->NewExpressionStatement(assignment, nopos);
- }
-
- // if (!IS_RECEIVER(C)) throw MakeTypeError(kNonObjectInInstanceOfCheck);
- Statement* validate_C;
- {
- auto args = new (zone) ZoneList<Expression*>(1, zone);
- args->Add(factory->NewVariableProxy(var_C), zone);
- Expression* is_receiver_call =
- factory->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
- Expression* call =
- NewThrowTypeError(MessageTemplate::kNonObjectInInstanceOfCheck,
- avfactory->empty_string(), pos);
- Statement* throw_call = factory->NewExpressionStatement(call, pos);
-
- validate_C =
- factory->NewIfStatement(is_receiver_call,
- factory->NewEmptyStatement(nopos),
- throw_call,
- nopos);
- }
-
- // let handler_result = C[Symbol.hasInstance];
- Variable* var_handler_result = scope->NewTemporary(avfactory->empty_string());
- Statement* initialize_handler;
- {
- Expression* hasInstance_symbol_literal =
- factory->NewSymbolLiteral("hasInstance_symbol", RelocInfo::kNoPosition);
- Expression* prop = factory->NewProperty(factory->NewVariableProxy(var_C),
- hasInstance_symbol_literal, pos);
- Expression* handler_proxy = factory->NewVariableProxy(var_handler_result);
- Expression* assignment =
- factory->NewAssignment(Token::ASSIGN, handler_proxy, prop, nopos);
- initialize_handler = factory->NewExpressionStatement(assignment, nopos);
- }
-
- // if (handler_result === undefined) {
- // if (!IS_CALLABLE(C)) {
- // throw MakeTypeError(kCalledNonCallableInstanceOf);
- // }
- // handler_result = %_GetOrdinaryHasInstance()
- // handler_result = %_Call(handler_result, C, O);
- // } else {
- // handler_result = !!%_Call(handler_result, C, O);
- // }
- Statement* call_handler;
- {
- Expression* condition = factory->NewCompareOperation(
- Token::EQ_STRICT, factory->NewVariableProxy(var_handler_result),
- factory->NewUndefinedLiteral(nopos), nopos);
-
- Block* then_side = factory->NewBlock(nullptr, 3, false, nopos);
- {
- Expression* throw_expr =
- NewThrowTypeError(MessageTemplate::kCalledNonCallableInstanceOf,
- avfactory->empty_string(), pos);
- Statement* validate_C = CheckCallable(var_C, throw_expr, pos);
-
- ZoneList<Expression*>* empty_args =
- new (zone) ZoneList<Expression*>(0, zone);
- Expression* ordinary_has_instance = factory->NewCallRuntime(
- Runtime::kInlineGetOrdinaryHasInstance, empty_args, pos);
- Expression* handler_proxy = factory->NewVariableProxy(var_handler_result);
- Expression* assignment_handler = factory->NewAssignment(
- Token::ASSIGN, handler_proxy, ordinary_has_instance, nopos);
- Statement* assignment_get_handler =
- factory->NewExpressionStatement(assignment_handler, nopos);
-
- ZoneList<Expression*>* args = new (zone) ZoneList<Expression*>(3, zone);
- args->Add(factory->NewVariableProxy(var_handler_result), zone);
- args->Add(factory->NewVariableProxy(var_C), zone);
- args->Add(factory->NewVariableProxy(var_O), zone);
- Expression* call =
- factory->NewCallRuntime(Runtime::kInlineCall, args, pos);
- Expression* result_proxy = factory->NewVariableProxy(var_handler_result);
- Expression* assignment =
- factory->NewAssignment(Token::ASSIGN, result_proxy, call, nopos);
- Statement* assignment_return =
- factory->NewExpressionStatement(assignment, nopos);
-
- then_side->statements()->Add(validate_C, zone);
- then_side->statements()->Add(assignment_get_handler, zone);
- then_side->statements()->Add(assignment_return, zone);
- }
-
- Statement* else_side;
- {
- auto args = new (zone) ZoneList<Expression*>(3, zone);
- args->Add(factory->NewVariableProxy(var_handler_result), zone);
- args->Add(factory->NewVariableProxy(var_C), zone);
- args->Add(factory->NewVariableProxy(var_O), zone);
- Expression* call =
- factory->NewCallRuntime(Runtime::kInlineCall, args, nopos);
- Expression* inner_not =
- factory->NewUnaryOperation(Token::NOT, call, nopos);
- Expression* outer_not =
- factory->NewUnaryOperation(Token::NOT, inner_not, nopos);
- Expression* result_proxy = factory->NewVariableProxy(var_handler_result);
- Expression* assignment =
- factory->NewAssignment(Token::ASSIGN, result_proxy, outer_not, nopos);
-
- else_side = factory->NewExpressionStatement(assignment, nopos);
- }
- call_handler =
- factory->NewIfStatement(condition, then_side, else_side, nopos);
- }
-
- // do { ... }
- DoExpression* instanceof;
- {
- Block* block = factory->NewBlock(nullptr, 5, true, nopos);
- block->statements()->Add(get_O, zone);
- block->statements()->Add(get_C, zone);
- block->statements()->Add(validate_C, zone);
- block->statements()->Add(initialize_handler, zone);
- block->statements()->Add(call_handler, zone);
-
- // Here is the desugared instanceof.
- instanceof = factory->NewDoExpression(block, var_handler_result, nopos);
- Rewriter::Rewrite(parser_, instanceof, avfactory);
- }
-
- return instanceof;
-}
-
-Statement* ParserTraits::CheckCallable(Variable* var, Expression* error,
- int pos) {
- auto factory = parser_->factory();
- auto avfactory = parser_->ast_value_factory();
- const int nopos = RelocInfo::kNoPosition;
+Statement* Parser::CheckCallable(Variable* var, Expression* error, int pos) {
+ const int nopos = kNoSourcePosition;
Statement* validate_var;
{
- Expression* type_of = factory->NewUnaryOperation(
- Token::TYPEOF, factory->NewVariableProxy(var), nopos);
- Expression* function_literal =
- factory->NewStringLiteral(avfactory->function_string(), nopos);
- Expression* condition = factory->NewCompareOperation(
+ Expression* type_of = factory()->NewUnaryOperation(
+ Token::TYPEOF, factory()->NewVariableProxy(var), nopos);
+ Expression* function_literal = factory()->NewStringLiteral(
+ ast_value_factory()->function_string(), nopos);
+ Expression* condition = factory()->NewCompareOperation(
Token::EQ_STRICT, type_of, function_literal, nopos);
- Statement* throw_call = factory->NewExpressionStatement(error, pos);
+ Statement* throw_call = factory()->NewExpressionStatement(error, pos);
- validate_var = factory->NewIfStatement(
- condition, factory->NewEmptyStatement(nopos), throw_call, nopos);
+ validate_var = factory()->NewIfStatement(
+ condition, factory()->NewEmptyStatement(nopos), throw_call, nopos);
}
return validate_var;
}
-void ParserTraits::BuildIteratorClose(ZoneList<Statement*>* statements,
- Variable* iterator,
- Maybe<Variable*> input,
- Variable* var_output) {
+void Parser::BuildIteratorClose(ZoneList<Statement*>* statements,
+ Variable* iterator, Variable* input,
+ Variable* var_output) {
//
// This function adds four statements to [statements], corresponding to the
// following code:
@@ -6378,33 +6445,22 @@ void ParserTraits::BuildIteratorClose(ZoneList<Statement*>* statements,
// output = %_Call(iteratorReturn, iterator, input);
// if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
//
- // When the input variable is not given, the return statement becomes
- // return {value: undefined, done: true};
- // and %_Call has only two arguments:
- // output = %_Call(iteratorReturn, iterator);
- //
- // The reason for allowing input is that BuildIteratorClose
- // can then be reused to handle the return case in yield*.
- //
- const int nopos = RelocInfo::kNoPosition;
- auto factory = parser_->factory();
- auto avfactory = parser_->ast_value_factory();
- auto zone = parser_->zone();
+ const int nopos = kNoSourcePosition;
// let iteratorReturn = iterator.return;
Variable* var_return = var_output; // Reusing the output variable.
Statement* get_return;
{
- Expression* iterator_proxy = factory->NewVariableProxy(iterator);
- Expression* literal =
- factory->NewStringLiteral(avfactory->return_string(), nopos);
+ Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
+ Expression* literal = factory()->NewStringLiteral(
+ ast_value_factory()->return_string(), nopos);
Expression* property =
- factory->NewProperty(iterator_proxy, literal, nopos);
- Expression* return_proxy = factory->NewVariableProxy(var_return);
- Expression* assignment = factory->NewAssignment(
- Token::ASSIGN, return_proxy, property, nopos);
- get_return = factory->NewExpressionStatement(assignment, nopos);
+ factory()->NewProperty(iterator_proxy, literal, nopos);
+ Expression* return_proxy = factory()->NewVariableProxy(var_return);
+ Expression* assignment =
+ factory()->NewAssignment(Token::ASSIGN, return_proxy, property, nopos);
+ get_return = factory()->NewExpressionStatement(assignment, nopos);
}
// if (IS_NULL_OR_UNDEFINED(iteratorReturn) {
@@ -6412,38 +6468,33 @@ void ParserTraits::BuildIteratorClose(ZoneList<Statement*>* statements,
// }
Statement* check_return;
{
- Expression* condition = factory->NewCompareOperation(
- Token::EQ, factory->NewVariableProxy(var_return),
- factory->NewNullLiteral(nopos), nopos);
+ Expression* condition = factory()->NewCompareOperation(
+ Token::EQ, factory()->NewVariableProxy(var_return),
+ factory()->NewNullLiteral(nopos), nopos);
- Expression* value = input.IsJust()
- ? static_cast<Expression*>(
- factory->NewVariableProxy(input.FromJust()))
- : factory->NewUndefinedLiteral(nopos);
+ Expression* value = factory()->NewVariableProxy(input);
Statement* return_input =
- factory->NewReturnStatement(BuildIteratorResult(value, true), nopos);
+ factory()->NewReturnStatement(BuildIteratorResult(value, true), nopos);
- check_return = factory->NewIfStatement(
- condition, return_input, factory->NewEmptyStatement(nopos), nopos);
+ check_return = factory()->NewIfStatement(
+ condition, return_input, factory()->NewEmptyStatement(nopos), nopos);
}
// output = %_Call(iteratorReturn, iterator, input);
Statement* call_return;
{
- auto args = new (zone) ZoneList<Expression*>(3, zone);
- args->Add(factory->NewVariableProxy(var_return), zone);
- args->Add(factory->NewVariableProxy(iterator), zone);
- if (input.IsJust()) {
- args->Add(factory->NewVariableProxy(input.FromJust()), zone);
- }
+ auto args = new (zone()) ZoneList<Expression*>(3, zone());
+ args->Add(factory()->NewVariableProxy(var_return), zone());
+ args->Add(factory()->NewVariableProxy(iterator), zone());
+ args->Add(factory()->NewVariableProxy(input), zone());
Expression* call =
- factory->NewCallRuntime(Runtime::kInlineCall, args, nopos);
- Expression* output_proxy = factory->NewVariableProxy(var_output);
- Expression* assignment = factory->NewAssignment(
- Token::ASSIGN, output_proxy, call, nopos);
- call_return = factory->NewExpressionStatement(assignment, nopos);
+ factory()->NewCallRuntime(Runtime::kInlineCall, args, nopos);
+ Expression* output_proxy = factory()->NewVariableProxy(var_output);
+ Expression* assignment =
+ factory()->NewAssignment(Token::ASSIGN, output_proxy, call, nopos);
+ call_return = factory()->NewExpressionStatement(assignment, nopos);
}
// if (!IS_RECEIVER(output)) %ThrowIteratorResultNotAnObject(output);
@@ -6451,36 +6502,35 @@ void ParserTraits::BuildIteratorClose(ZoneList<Statement*>* statements,
{
Expression* is_receiver_call;
{
- auto args = new (zone) ZoneList<Expression*>(1, zone);
- args->Add(factory->NewVariableProxy(var_output), zone);
+ auto args = new (zone()) ZoneList<Expression*>(1, zone());
+ args->Add(factory()->NewVariableProxy(var_output), zone());
is_receiver_call =
- factory->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
+ factory()->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
}
Statement* throw_call;
{
- auto args = new (zone) ZoneList<Expression*>(1, zone);
- args->Add(factory->NewVariableProxy(var_output), zone);
- Expression* call = factory->NewCallRuntime(
+ auto args = new (zone()) ZoneList<Expression*>(1, zone());
+ args->Add(factory()->NewVariableProxy(var_output), zone());
+ Expression* call = factory()->NewCallRuntime(
Runtime::kThrowIteratorResultNotAnObject, args, nopos);
- throw_call = factory->NewExpressionStatement(call, nopos);
+ throw_call = factory()->NewExpressionStatement(call, nopos);
}
- validate_output = factory->NewIfStatement(
- is_receiver_call, factory->NewEmptyStatement(nopos), throw_call, nopos);
+ validate_output = factory()->NewIfStatement(
+ is_receiver_call, factory()->NewEmptyStatement(nopos), throw_call,
+ nopos);
}
- statements->Add(get_return, zone);
- statements->Add(check_return, zone);
- statements->Add(call_return, zone);
- statements->Add(validate_output, zone);
+ statements->Add(get_return, zone());
+ statements->Add(check_return, zone());
+ statements->Add(call_return, zone());
+ statements->Add(validate_output, zone());
}
-void ParserTraits::FinalizeIteratorUse(Variable* completion,
- Expression* condition, Variable* iter,
- Block* iterator_use, Block* target) {
- if (!FLAG_harmony_iterator_close) return;
-
+void Parser::FinalizeIteratorUse(Variable* completion, Expression* condition,
+ Variable* iter, Block* iterator_use,
+ Block* target) {
//
// This function adds two statements to [target], corresponding to the
// following code:
@@ -6500,36 +6550,33 @@ void ParserTraits::FinalizeIteratorUse(Variable* completion,
// }
//
- const int nopos = RelocInfo::kNoPosition;
- auto factory = parser_->factory();
- auto avfactory = parser_->ast_value_factory();
- auto scope = parser_->scope_;
- auto zone = parser_->zone();
+ const int nopos = kNoSourcePosition;
// completion = kNormalCompletion;
Statement* initialize_completion;
{
- Expression* proxy = factory->NewVariableProxy(completion);
- Expression* assignment = factory->NewAssignment(
+ Expression* proxy = factory()->NewVariableProxy(completion);
+ Expression* assignment = factory()->NewAssignment(
Token::ASSIGN, proxy,
- factory->NewSmiLiteral(Parser::kNormalCompletion, nopos), nopos);
- initialize_completion = factory->NewExpressionStatement(assignment, nopos);
+ factory()->NewSmiLiteral(Parser::kNormalCompletion, nopos), nopos);
+ initialize_completion =
+ factory()->NewExpressionStatement(assignment, nopos);
}
// if (completion === kAbruptCompletion) completion = kThrowCompletion;
Statement* set_completion_throw;
{
- Expression* condition = factory->NewCompareOperation(
- Token::EQ_STRICT, factory->NewVariableProxy(completion),
- factory->NewSmiLiteral(Parser::kAbruptCompletion, nopos), nopos);
+ Expression* condition = factory()->NewCompareOperation(
+ Token::EQ_STRICT, factory()->NewVariableProxy(completion),
+ factory()->NewSmiLiteral(Parser::kAbruptCompletion, nopos), nopos);
- Expression* proxy = factory->NewVariableProxy(completion);
- Expression* assignment = factory->NewAssignment(
+ Expression* proxy = factory()->NewVariableProxy(completion);
+ Expression* assignment = factory()->NewAssignment(
Token::ASSIGN, proxy,
- factory->NewSmiLiteral(Parser::kThrowCompletion, nopos), nopos);
- Statement* statement = factory->NewExpressionStatement(assignment, nopos);
- set_completion_throw = factory->NewIfStatement(
- condition, statement, factory->NewEmptyStatement(nopos), nopos);
+ factory()->NewSmiLiteral(Parser::kThrowCompletion, nopos), nopos);
+ Statement* statement = factory()->NewExpressionStatement(assignment, nopos);
+ set_completion_throw = factory()->NewIfStatement(
+ condition, statement, factory()->NewEmptyStatement(nopos), nopos);
}
// if (condition) {
@@ -6537,16 +6584,16 @@ void ParserTraits::FinalizeIteratorUse(Variable* completion,
// }
Block* maybe_close;
{
- Block* block = factory->NewBlock(nullptr, 2, true, nopos);
- parser_->BuildIteratorCloseForCompletion(block->statements(), iter,
- completion);
+ Block* block = factory()->NewBlock(nullptr, 2, true, nopos);
+ Expression* proxy = factory()->NewVariableProxy(completion);
+ BuildIteratorCloseForCompletion(block->statements(), iter, proxy);
DCHECK(block->statements()->length() == 2);
- maybe_close = factory->NewBlock(nullptr, 1, true, nopos);
+ maybe_close = factory()->NewBlock(nullptr, 1, true, nopos);
maybe_close->statements()->Add(
- factory->NewIfStatement(condition, block,
- factory->NewEmptyStatement(nopos), nopos),
- zone);
+ factory()->NewIfStatement(condition, block,
+ factory()->NewEmptyStatement(nopos), nopos),
+ zone());
}
// try { #try_block }
@@ -6556,10 +6603,11 @@ void ParserTraits::FinalizeIteratorUse(Variable* completion,
// }
Statement* try_catch;
{
- Scope* catch_scope = parser_->NewScope(scope, CATCH_SCOPE);
+ Scope* catch_scope = NewScopeWithParent(scope(), CATCH_SCOPE);
Variable* catch_variable =
- catch_scope->DeclareLocal(avfactory->dot_catch_string(), VAR,
+ catch_scope->DeclareLocal(ast_value_factory()->dot_catch_string(), VAR,
kCreatedInitialized, Variable::NORMAL);
+ catch_scope->set_is_hidden();
Statement* rethrow;
// We use %ReThrow rather than the ordinary throw because we want to
@@ -6567,37 +6615,37 @@ void ParserTraits::FinalizeIteratorUse(Variable* completion,
// TryCatchStatementForReThrow below (which does not clear the pending
// message), rather than a TryCatchStatement.
{
- auto args = new (zone) ZoneList<Expression*>(1, zone);
- args->Add(factory->NewVariableProxy(catch_variable), zone);
- rethrow = factory->NewExpressionStatement(
- factory->NewCallRuntime(Runtime::kReThrow, args, nopos), nopos);
+ auto args = new (zone()) ZoneList<Expression*>(1, zone());
+ args->Add(factory()->NewVariableProxy(catch_variable), zone());
+ rethrow = factory()->NewExpressionStatement(
+ factory()->NewCallRuntime(Runtime::kReThrow, args, nopos), nopos);
}
- Block* catch_block = factory->NewBlock(nullptr, 2, false, nopos);
- catch_block->statements()->Add(set_completion_throw, zone);
- catch_block->statements()->Add(rethrow, zone);
+ Block* catch_block = factory()->NewBlock(nullptr, 2, false, nopos);
+ catch_block->statements()->Add(set_completion_throw, zone());
+ catch_block->statements()->Add(rethrow, zone());
- try_catch = factory->NewTryCatchStatementForReThrow(
+ try_catch = factory()->NewTryCatchStatementForReThrow(
iterator_use, catch_scope, catch_variable, catch_block, nopos);
}
// try { #try_catch } finally { #maybe_close }
Statement* try_finally;
{
- Block* try_block = factory->NewBlock(nullptr, 1, false, nopos);
- try_block->statements()->Add(try_catch, zone);
+ Block* try_block = factory()->NewBlock(nullptr, 1, false, nopos);
+ try_block->statements()->Add(try_catch, zone());
try_finally =
- factory->NewTryFinallyStatement(try_block, maybe_close, nopos);
+ factory()->NewTryFinallyStatement(try_block, maybe_close, nopos);
}
- target->statements()->Add(initialize_completion, zone);
- target->statements()->Add(try_finally, zone);
+ target->statements()->Add(initialize_completion, zone());
+ target->statements()->Add(try_finally, zone());
}
-void ParserTraits::BuildIteratorCloseForCompletion(
- ZoneList<Statement*>* statements, Variable* iterator,
- Variable* completion) {
+void Parser::BuildIteratorCloseForCompletion(ZoneList<Statement*>* statements,
+ Variable* iterator,
+ Expression* completion) {
//
// This function adds two statements to [statements], corresponding to the
// following code:
@@ -6618,26 +6666,20 @@ void ParserTraits::BuildIteratorCloseForCompletion(
// }
//
- const int nopos = RelocInfo::kNoPosition;
- auto factory = parser_->factory();
- auto avfactory = parser_->ast_value_factory();
- auto scope = parser_->scope_;
- auto zone = parser_->zone();
-
-
+ const int nopos = kNoSourcePosition;
// let iteratorReturn = iterator.return;
- Variable* var_return = scope->NewTemporary(avfactory->empty_string());
+ Variable* var_return = NewTemporary(ast_value_factory()->empty_string());
Statement* get_return;
{
- Expression* iterator_proxy = factory->NewVariableProxy(iterator);
- Expression* literal =
- factory->NewStringLiteral(avfactory->return_string(), nopos);
+ Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
+ Expression* literal = factory()->NewStringLiteral(
+ ast_value_factory()->return_string(), nopos);
Expression* property =
- factory->NewProperty(iterator_proxy, literal, nopos);
- Expression* return_proxy = factory->NewVariableProxy(var_return);
- Expression* assignment = factory->NewAssignment(
- Token::ASSIGN, return_proxy, property, nopos);
- get_return = factory->NewExpressionStatement(assignment, nopos);
+ factory()->NewProperty(iterator_proxy, literal, nopos);
+ Expression* return_proxy = factory()->NewVariableProxy(var_return);
+ Expression* assignment =
+ factory()->NewAssignment(Token::ASSIGN, return_proxy, property, nopos);
+ get_return = factory()->NewExpressionStatement(assignment, nopos);
}
// if (!IS_CALLABLE(iteratorReturn)) {
@@ -6645,34 +6687,35 @@ void ParserTraits::BuildIteratorCloseForCompletion(
// }
Statement* check_return_callable;
{
- Expression* throw_expr = NewThrowTypeError(
- MessageTemplate::kReturnMethodNotCallable,
- avfactory->empty_string(), nopos);
+ Expression* throw_expr =
+ NewThrowTypeError(MessageTemplate::kReturnMethodNotCallable,
+ ast_value_factory()->empty_string(), nopos);
check_return_callable = CheckCallable(var_return, throw_expr, nopos);
}
// try { %_Call(iteratorReturn, iterator) } catch (_) { }
Statement* try_call_return;
{
- auto args = new (zone) ZoneList<Expression*>(2, zone);
- args->Add(factory->NewVariableProxy(var_return), zone);
- args->Add(factory->NewVariableProxy(iterator), zone);
+ auto args = new (zone()) ZoneList<Expression*>(2, zone());
+ args->Add(factory()->NewVariableProxy(var_return), zone());
+ args->Add(factory()->NewVariableProxy(iterator), zone());
Expression* call =
- factory->NewCallRuntime(Runtime::kInlineCall, args, nopos);
+ factory()->NewCallRuntime(Runtime::kInlineCall, args, nopos);
- Block* try_block = factory->NewBlock(nullptr, 1, false, nopos);
- try_block->statements()->Add(factory->NewExpressionStatement(call, nopos),
- zone);
+ Block* try_block = factory()->NewBlock(nullptr, 1, false, nopos);
+ try_block->statements()->Add(factory()->NewExpressionStatement(call, nopos),
+ zone());
- Block* catch_block = factory->NewBlock(nullptr, 0, false, nopos);
+ Block* catch_block = factory()->NewBlock(nullptr, 0, false, nopos);
- Scope* catch_scope = NewScope(scope, CATCH_SCOPE);
- Variable* catch_variable = catch_scope->DeclareLocal(
- avfactory->dot_catch_string(), VAR, kCreatedInitialized,
- Variable::NORMAL);
+ Scope* catch_scope = NewScope(CATCH_SCOPE);
+ Variable* catch_variable =
+ catch_scope->DeclareLocal(ast_value_factory()->dot_catch_string(), VAR,
+ kCreatedInitialized, Variable::NORMAL);
+ catch_scope->set_is_hidden();
- try_call_return = factory->NewTryCatchStatement(
+ try_call_return = factory()->NewTryCatchStatement(
try_block, catch_scope, catch_variable, catch_block, nopos);
}
@@ -6682,44 +6725,45 @@ void ParserTraits::BuildIteratorCloseForCompletion(
// }
Block* validate_return;
{
- Variable* var_output = scope->NewTemporary(avfactory->empty_string());
+ Variable* var_output = NewTemporary(ast_value_factory()->empty_string());
Statement* call_return;
{
- auto args = new (zone) ZoneList<Expression*>(2, zone);
- args->Add(factory->NewVariableProxy(var_return), zone);
- args->Add(factory->NewVariableProxy(iterator), zone);
+ auto args = new (zone()) ZoneList<Expression*>(2, zone());
+ args->Add(factory()->NewVariableProxy(var_return), zone());
+ args->Add(factory()->NewVariableProxy(iterator), zone());
Expression* call =
- factory->NewCallRuntime(Runtime::kInlineCall, args, nopos);
+ factory()->NewCallRuntime(Runtime::kInlineCall, args, nopos);
- Expression* output_proxy = factory->NewVariableProxy(var_output);
+ Expression* output_proxy = factory()->NewVariableProxy(var_output);
Expression* assignment =
- factory->NewAssignment(Token::ASSIGN, output_proxy, call, nopos);
- call_return = factory->NewExpressionStatement(assignment, nopos);
+ factory()->NewAssignment(Token::ASSIGN, output_proxy, call, nopos);
+ call_return = factory()->NewExpressionStatement(assignment, nopos);
}
Expression* is_receiver_call;
{
- auto args = new (zone) ZoneList<Expression*>(1, zone);
- args->Add(factory->NewVariableProxy(var_output), zone);
+ auto args = new (zone()) ZoneList<Expression*>(1, zone());
+ args->Add(factory()->NewVariableProxy(var_output), zone());
is_receiver_call =
- factory->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
+ factory()->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
}
Statement* throw_call;
{
- auto args = new (zone) ZoneList<Expression*>(1, zone);
- args->Add(factory->NewVariableProxy(var_output), zone);
- Expression* call = factory->NewCallRuntime(
+ auto args = new (zone()) ZoneList<Expression*>(1, zone());
+ args->Add(factory()->NewVariableProxy(var_output), zone());
+ Expression* call = factory()->NewCallRuntime(
Runtime::kThrowIteratorResultNotAnObject, args, nopos);
- throw_call = factory->NewExpressionStatement(call, nopos);
+ throw_call = factory()->NewExpressionStatement(call, nopos);
}
- Statement* check_return = factory->NewIfStatement(
- is_receiver_call, factory->NewEmptyStatement(nopos), throw_call, nopos);
+ Statement* check_return = factory()->NewIfStatement(
+ is_receiver_call, factory()->NewEmptyStatement(nopos), throw_call,
+ nopos);
- validate_return = factory->NewBlock(nullptr, 2, false, nopos);
- validate_return->statements()->Add(call_return, zone);
- validate_return->statements()->Add(check_return, zone);
+ validate_return = factory()->NewBlock(nullptr, 2, false, nopos);
+ validate_return->statements()->Add(call_return, zone());
+ validate_return->statements()->Add(check_return, zone());
}
// if (completion === kThrowCompletion) {
@@ -6730,44 +6774,40 @@ void ParserTraits::BuildIteratorCloseForCompletion(
// }
Statement* call_return_carefully;
{
- Expression* condition = factory->NewCompareOperation(
- Token::EQ_STRICT, factory->NewVariableProxy(completion),
- factory->NewSmiLiteral(Parser::kThrowCompletion, nopos), nopos);
+ Expression* condition = factory()->NewCompareOperation(
+ Token::EQ_STRICT, completion,
+ factory()->NewSmiLiteral(Parser::kThrowCompletion, nopos), nopos);
- Block* then_block = factory->NewBlock(nullptr, 2, false, nopos);
- then_block->statements()->Add(check_return_callable, zone);
- then_block->statements()->Add(try_call_return, zone);
+ Block* then_block = factory()->NewBlock(nullptr, 2, false, nopos);
+ then_block->statements()->Add(check_return_callable, zone());
+ then_block->statements()->Add(try_call_return, zone());
- call_return_carefully =
- factory->NewIfStatement(condition, then_block, validate_return, nopos);
+ call_return_carefully = factory()->NewIfStatement(condition, then_block,
+ validate_return, nopos);
}
// if (!IS_NULL_OR_UNDEFINED(iteratorReturn)) { ... }
Statement* maybe_call_return;
{
- Expression* condition = factory->NewCompareOperation(
- Token::EQ, factory->NewVariableProxy(var_return),
- factory->NewNullLiteral(nopos), nopos);
+ Expression* condition = factory()->NewCompareOperation(
+ Token::EQ, factory()->NewVariableProxy(var_return),
+ factory()->NewNullLiteral(nopos), nopos);
- maybe_call_return =
- factory->NewIfStatement(condition, factory->NewEmptyStatement(nopos),
- call_return_carefully, nopos);
+ maybe_call_return = factory()->NewIfStatement(
+ condition, factory()->NewEmptyStatement(nopos), call_return_carefully,
+ nopos);
}
-
- statements->Add(get_return, zone);
- statements->Add(maybe_call_return, zone);
+ statements->Add(get_return, zone());
+ statements->Add(maybe_call_return, zone());
}
-
-Statement* ParserTraits::FinalizeForOfStatement(ForOfStatement* loop, int pos) {
- if (!FLAG_harmony_iterator_close) return loop;
-
+Statement* Parser::FinalizeForOfStatement(ForOfStatement* loop,
+ Variable* var_completion, int pos) {
//
// This function replaces the loop with the following wrapping:
//
- // let each;
- // let completion = kNormalCompletion;
+ // completion = kNormalCompletion;
// try {
// try {
// #loop;
@@ -6781,120 +6821,48 @@ Statement* ParserTraits::FinalizeForOfStatement(ForOfStatement* loop, int pos) {
// }
// }
//
- // where the loop's body is wrapped as follows:
+ // Note that the loop's body and its assign_each already contain appropriate
+ // assignments to completion (see InitializeForOfStatement).
//
- // {
- // #loop-body
- // {{completion = kNormalCompletion;}}
- // }
- //
- // and the loop's assign_each is wrapped as follows
- //
- // do {
- // {{completion = kAbruptCompletion;}}
- // #assign-each
- // }
- //
-
- const int nopos = RelocInfo::kNoPosition;
- auto factory = parser_->factory();
- auto avfactory = parser_->ast_value_factory();
- auto scope = parser_->scope_;
- auto zone = parser_->zone();
- Variable* var_completion = scope->NewTemporary(avfactory->empty_string());
-
- // let each;
- Variable* var_each = scope->NewTemporary(avfactory->empty_string());
- Statement* initialize_each;
- {
- Expression* proxy = factory->NewVariableProxy(var_each);
- Expression* assignment = factory->NewAssignment(
- Token::ASSIGN, proxy,
- factory->NewUndefinedLiteral(nopos), nopos);
- initialize_each =
- factory->NewExpressionStatement(assignment, nopos);
- }
+ const int nopos = kNoSourcePosition;
// !(completion === kNormalCompletion || IS_UNDEFINED(#iterator))
Expression* closing_condition;
{
- Expression* lhs = factory->NewCompareOperation(
- Token::EQ_STRICT, factory->NewVariableProxy(var_completion),
- factory->NewSmiLiteral(Parser::kNormalCompletion, nopos), nopos);
- Expression* rhs = factory->NewCompareOperation(
- Token::EQ_STRICT, factory->NewVariableProxy(loop->iterator()),
- factory->NewUndefinedLiteral(nopos), nopos);
- closing_condition = factory->NewUnaryOperation(
- Token::NOT, factory->NewBinaryOperation(Token::OR, lhs, rhs, nopos),
+ Expression* lhs = factory()->NewCompareOperation(
+ Token::EQ_STRICT, factory()->NewVariableProxy(var_completion),
+ factory()->NewSmiLiteral(Parser::kNormalCompletion, nopos), nopos);
+ Expression* rhs = factory()->NewCompareOperation(
+ Token::EQ_STRICT, factory()->NewVariableProxy(loop->iterator()),
+ factory()->NewUndefinedLiteral(nopos), nopos);
+ closing_condition = factory()->NewUnaryOperation(
+ Token::NOT, factory()->NewBinaryOperation(Token::OR, lhs, rhs, nopos),
nopos);
}
- // {{completion = kNormalCompletion;}}
- Statement* set_completion_normal;
- {
- Expression* proxy = factory->NewVariableProxy(var_completion);
- Expression* assignment = factory->NewAssignment(
- Token::ASSIGN, proxy,
- factory->NewSmiLiteral(Parser::kNormalCompletion, nopos), nopos);
-
- Block* block = factory->NewBlock(nullptr, 1, true, nopos);
- block->statements()->Add(
- factory->NewExpressionStatement(assignment, nopos), zone);
- set_completion_normal = block;
- }
-
- // {{completion = kAbruptCompletion;}}
- Statement* set_completion_abrupt;
- {
- Expression* proxy = factory->NewVariableProxy(var_completion);
- Expression* assignment = factory->NewAssignment(
- Token::ASSIGN, proxy,
- factory->NewSmiLiteral(Parser::kAbruptCompletion, nopos), nopos);
-
- Block* block = factory->NewBlock(nullptr, 1, true, nopos);
- block->statements()->Add(factory->NewExpressionStatement(assignment, nopos),
- zone);
- set_completion_abrupt = block;
- }
-
- // { #loop-body; #set_completion_normal }
- Block* new_body = factory->NewBlock(nullptr, 2, false, nopos);
+ Block* final_loop = factory()->NewBlock(nullptr, 2, false, nopos);
{
- new_body->statements()->Add(loop->body(), zone);
- new_body->statements()->Add(set_completion_normal, zone);
- }
-
- // { #set_completion_abrupt; #assign-each }
- Block* new_assign_each = factory->NewBlock(nullptr, 2, false, nopos);
- {
- new_assign_each->statements()->Add(set_completion_abrupt, zone);
- new_assign_each->statements()->Add(
- factory->NewExpressionStatement(loop->assign_each(), nopos), zone);
- }
-
- // Now put things together.
-
- loop->set_body(new_body);
- loop->set_assign_each(
- factory->NewDoExpression(new_assign_each, var_each, nopos));
-
- Statement* final_loop;
- {
- Block* target = factory->NewBlock(nullptr, 3, false, nopos);
- target->statements()->Add(initialize_each, zone);
-
- Block* try_block = factory->NewBlock(nullptr, 1, false, nopos);
- try_block->statements()->Add(loop, zone);
+ Block* try_block = factory()->NewBlock(nullptr, 1, false, nopos);
+ try_block->statements()->Add(loop, zone());
FinalizeIteratorUse(var_completion, closing_condition, loop->iterator(),
- try_block, target);
- final_loop = target;
+ try_block, final_loop);
}
return final_loop;
}
+#ifdef DEBUG
+void Parser::Print(AstNode* node) {
+ ast_value_factory()->Internalize(Isolate::Current());
+ node->Print(Isolate::Current());
+}
+#endif // DEBUG
+
+#undef CHECK_OK
+#undef CHECK_OK_VOID
+#undef CHECK_FAILED
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index c82682e323..b069f9af98 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -5,10 +5,8 @@
#ifndef V8_PARSING_PARSER_H_
#define V8_PARSING_PARSER_H_
-#include "src/allocation.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
-#include "src/compiler.h" // TODO(titzer): remove this include dependency
#include "src/parsing/parser-base.h"
#include "src/parsing/preparse-data.h"
#include "src/parsing/preparse-data-format.h"
@@ -21,191 +19,10 @@ class ScriptCompiler;
namespace internal {
+class ParseInfo;
+class ScriptData;
class Target;
-// A container for the inputs, configuration options, and outputs of parsing.
-class ParseInfo {
- public:
- explicit ParseInfo(Zone* zone);
- ParseInfo(Zone* zone, Handle<JSFunction> function);
- ParseInfo(Zone* zone, Handle<Script> script);
- // TODO(all) Only used via Debug::FindSharedFunctionInfoInScript, remove?
- ParseInfo(Zone* zone, Handle<SharedFunctionInfo> shared);
-
- ~ParseInfo() {
- if (ast_value_factory_owned()) {
- delete ast_value_factory_;
- set_ast_value_factory_owned(false);
- }
- ast_value_factory_ = nullptr;
- }
-
- Zone* zone() { return zone_; }
-
-// Convenience accessor methods for flags.
-#define FLAG_ACCESSOR(flag, getter, setter) \
- bool getter() const { return GetFlag(flag); } \
- void setter() { SetFlag(flag); } \
- void setter(bool val) { SetFlag(flag, val); }
-
- FLAG_ACCESSOR(kToplevel, is_toplevel, set_toplevel)
- FLAG_ACCESSOR(kLazy, is_lazy, set_lazy)
- FLAG_ACCESSOR(kEval, is_eval, set_eval)
- FLAG_ACCESSOR(kGlobal, is_global, set_global)
- FLAG_ACCESSOR(kStrictMode, is_strict_mode, set_strict_mode)
- FLAG_ACCESSOR(kNative, is_native, set_native)
- FLAG_ACCESSOR(kModule, is_module, set_module)
- FLAG_ACCESSOR(kAllowLazyParsing, allow_lazy_parsing, set_allow_lazy_parsing)
- FLAG_ACCESSOR(kAstValueFactoryOwned, ast_value_factory_owned,
- set_ast_value_factory_owned)
-
-#undef FLAG_ACCESSOR
-
- void set_parse_restriction(ParseRestriction restriction) {
- SetFlag(kParseRestriction, restriction != NO_PARSE_RESTRICTION);
- }
-
- ParseRestriction parse_restriction() const {
- return GetFlag(kParseRestriction) ? ONLY_SINGLE_FUNCTION_LITERAL
- : NO_PARSE_RESTRICTION;
- }
-
- ScriptCompiler::ExternalSourceStream* source_stream() {
- return source_stream_;
- }
- void set_source_stream(ScriptCompiler::ExternalSourceStream* source_stream) {
- source_stream_ = source_stream;
- }
-
- ScriptCompiler::StreamedSource::Encoding source_stream_encoding() {
- return source_stream_encoding_;
- }
- void set_source_stream_encoding(
- ScriptCompiler::StreamedSource::Encoding source_stream_encoding) {
- source_stream_encoding_ = source_stream_encoding;
- }
-
- v8::Extension* extension() { return extension_; }
- void set_extension(v8::Extension* extension) { extension_ = extension; }
-
- ScriptData** cached_data() { return cached_data_; }
- void set_cached_data(ScriptData** cached_data) { cached_data_ = cached_data; }
-
- ScriptCompiler::CompileOptions compile_options() { return compile_options_; }
- void set_compile_options(ScriptCompiler::CompileOptions compile_options) {
- compile_options_ = compile_options;
- }
-
- Scope* script_scope() { return script_scope_; }
- void set_script_scope(Scope* script_scope) { script_scope_ = script_scope; }
-
- AstValueFactory* ast_value_factory() { return ast_value_factory_; }
- void set_ast_value_factory(AstValueFactory* ast_value_factory) {
- ast_value_factory_ = ast_value_factory;
- }
-
- FunctionLiteral* literal() { return literal_; }
- void set_literal(FunctionLiteral* literal) { literal_ = literal; }
-
- Scope* scope() { return scope_; }
- void set_scope(Scope* scope) { scope_ = scope; }
-
- UnicodeCache* unicode_cache() { return unicode_cache_; }
- void set_unicode_cache(UnicodeCache* unicode_cache) {
- unicode_cache_ = unicode_cache;
- }
-
- uintptr_t stack_limit() { return stack_limit_; }
- void set_stack_limit(uintptr_t stack_limit) { stack_limit_ = stack_limit; }
-
- uint32_t hash_seed() { return hash_seed_; }
- void set_hash_seed(uint32_t hash_seed) { hash_seed_ = hash_seed; }
-
- //--------------------------------------------------------------------------
- // TODO(titzer): these should not be part of ParseInfo.
- //--------------------------------------------------------------------------
- Isolate* isolate() { return isolate_; }
- Handle<JSFunction> closure() { return closure_; }
- Handle<SharedFunctionInfo> shared_info() { return shared_; }
- Handle<Script> script() { return script_; }
- Handle<Context> context() { return context_; }
- void clear_script() { script_ = Handle<Script>::null(); }
- void set_isolate(Isolate* isolate) { isolate_ = isolate; }
- void set_shared_info(Handle<SharedFunctionInfo> shared) { shared_ = shared; }
- void set_context(Handle<Context> context) { context_ = context; }
- void set_script(Handle<Script> script) { script_ = script; }
- //--------------------------------------------------------------------------
-
- LanguageMode language_mode() {
- return construct_language_mode(is_strict_mode());
- }
- void set_language_mode(LanguageMode language_mode) {
- STATIC_ASSERT(LANGUAGE_END == 3);
- set_strict_mode(is_strict(language_mode));
- }
-
- void ReopenHandlesInNewHandleScope() {
- closure_ = Handle<JSFunction>(*closure_);
- shared_ = Handle<SharedFunctionInfo>(*shared_);
- script_ = Handle<Script>(*script_);
- context_ = Handle<Context>(*context_);
- }
-
-#ifdef DEBUG
- bool script_is_native() { return script_->type() == Script::TYPE_NATIVE; }
-#endif // DEBUG
-
- private:
- // Various configuration flags for parsing.
- enum Flag {
- // ---------- Input flags ---------------------------
- kToplevel = 1 << 0,
- kLazy = 1 << 1,
- kEval = 1 << 2,
- kGlobal = 1 << 3,
- kStrictMode = 1 << 4,
- kNative = 1 << 5,
- kParseRestriction = 1 << 6,
- kModule = 1 << 7,
- kAllowLazyParsing = 1 << 8,
- // ---------- Output flags --------------------------
- kAstValueFactoryOwned = 1 << 9
- };
-
- //------------- Inputs to parsing and scope analysis -----------------------
- Zone* zone_;
- unsigned flags_;
- ScriptCompiler::ExternalSourceStream* source_stream_;
- ScriptCompiler::StreamedSource::Encoding source_stream_encoding_;
- v8::Extension* extension_;
- ScriptCompiler::CompileOptions compile_options_;
- Scope* script_scope_;
- UnicodeCache* unicode_cache_;
- uintptr_t stack_limit_;
- uint32_t hash_seed_;
-
- // TODO(titzer): Move handles and isolate out of ParseInfo.
- Isolate* isolate_;
- Handle<JSFunction> closure_;
- Handle<SharedFunctionInfo> shared_;
- Handle<Script> script_;
- Handle<Context> context_;
-
- //----------- Inputs+Outputs of parsing and scope analysis -----------------
- ScriptData** cached_data_; // used if available, populated if requested.
- AstValueFactory* ast_value_factory_; // used if available, otherwise new.
-
- //----------- Outputs of parsing and scope analysis ------------------------
- FunctionLiteral* literal_; // produced by full parser.
- Scope* scope_; // produced by scope analysis.
-
- void SetFlag(Flag f) { flags_ |= f; }
- void SetFlag(Flag f, bool v) { flags_ = v ? flags_ | f : flags_ & ~f; }
- bool GetFlag(Flag f) const { return (flags_ & f) != 0; }
-
- void set_closure(Handle<JSFunction> closure) { closure_ = closure; }
-};
-
class FunctionEntry BASE_EMBEDDED {
public:
enum {
@@ -312,7 +129,7 @@ struct ParserFormalParameters : FormalParametersBase {
}
};
- explicit ParserFormalParameters(Scope* scope)
+ explicit ParserFormalParameters(DeclarationScope* scope)
: FormalParametersBase(scope), params(4, scope->zone()) {}
ZoneList<Parameter> params;
@@ -320,14 +137,12 @@ struct ParserFormalParameters : FormalParametersBase {
const Parameter& at(int i) const { return params[i]; }
};
-
-class ParserTraits {
+template <>
+class ParserBaseTraits<Parser> {
public:
- struct Type {
- // TODO(marja): To be removed. The Traits object should contain all the data
- // it needs.
- typedef v8::internal::Parser* Parser;
+ typedef ParserBaseTraits<Parser> ParserTraits;
+ struct Type {
typedef Variable GeneratorVariable;
typedef v8::internal::AstProperties AstProperties;
@@ -353,7 +168,12 @@ class ParserTraits {
typedef AstNodeFactory Factory;
};
- explicit ParserTraits(Parser* parser) : parser_(parser) {}
+ // TODO(nikolaos): The traits methods should not need to call methods
+ // of the implementation object.
+ Parser* delegate() { return reinterpret_cast<Parser*>(this); }
+ const Parser* delegate() const {
+ return reinterpret_cast<const Parser*>(this);
+ }
// Helper functions for recursive descent.
bool IsEval(const AstRawString* identifier) const;
@@ -367,13 +187,19 @@ class ParserTraits {
static bool IsIdentifier(Expression* expression);
+ static const AstRawString* AsIdentifier(Expression* expression) {
+ DCHECK(IsIdentifier(expression));
+ return expression->AsVariableProxy()->raw_name();
+ }
+
bool IsPrototype(const AstRawString* identifier) const;
bool IsConstructor(const AstRawString* identifier) const;
- static const AstRawString* AsIdentifier(Expression* expression) {
- DCHECK(IsIdentifier(expression));
- return expression->AsVariableProxy()->raw_name();
+ bool IsDirectEvalCall(Expression* expression) const {
+ if (!expression->IsCall()) return false;
+ expression = expression->AsCall()->expression();
+ return IsIdentifier(expression) && IsEval(AsIdentifier(expression));
}
static bool IsBoilerplateProperty(ObjectLiteral::Property* property) {
@@ -448,205 +274,91 @@ class ParserTraits {
Expression* NewThrowTypeError(MessageTemplate::Template message,
const AstRawString* arg, int pos);
- // Generic AST generator for throwing errors from compiled code.
- Expression* NewThrowError(Runtime::FunctionId function_id,
- MessageTemplate::Template message,
- const AstRawString* arg, int pos);
-
- void FinalizeIteratorUse(Variable* completion, Expression* condition,
- Variable* iter, Block* iterator_use, Block* result);
-
- Statement* FinalizeForOfStatement(ForOfStatement* loop, int pos);
-
// Reporting errors.
void ReportMessageAt(Scanner::Location source_location,
MessageTemplate::Template message,
const char* arg = NULL,
ParseErrorType error_type = kSyntaxError);
- void ReportMessage(MessageTemplate::Template message, const char* arg = NULL,
- ParseErrorType error_type = kSyntaxError);
- void ReportMessage(MessageTemplate::Template message, const AstRawString* arg,
- ParseErrorType error_type = kSyntaxError);
void ReportMessageAt(Scanner::Location source_location,
MessageTemplate::Template message,
const AstRawString* arg,
ParseErrorType error_type = kSyntaxError);
// "null" return type creators.
- static const AstRawString* EmptyIdentifier() {
- return NULL;
- }
- static Expression* EmptyExpression() {
- return NULL;
- }
- static Literal* EmptyLiteral() {
- return NULL;
- }
- static ObjectLiteralProperty* EmptyObjectLiteralProperty() { return NULL; }
- static FunctionLiteral* EmptyFunctionLiteral() { return NULL; }
+ static const AstRawString* EmptyIdentifier() { return nullptr; }
+ static Expression* EmptyExpression() { return nullptr; }
+ static Literal* EmptyLiteral() { return nullptr; }
+ static ObjectLiteralProperty* EmptyObjectLiteralProperty() { return nullptr; }
+ static FunctionLiteral* EmptyFunctionLiteral() { return nullptr; }
// Used in error return values.
- static ZoneList<Expression*>* NullExpressionList() {
- return NULL;
- }
- static const AstRawString* EmptyFormalParameter() { return NULL; }
+ static ZoneList<Expression*>* NullExpressionList() { return nullptr; }
// Non-NULL empty string.
- V8_INLINE const AstRawString* EmptyIdentifierString();
+ V8_INLINE const AstRawString* EmptyIdentifierString() const;
// Odd-ball literal creators.
- Literal* GetLiteralTheHole(int position, AstNodeFactory* factory);
+ Literal* GetLiteralTheHole(int position, AstNodeFactory* factory) const;
// Producing data during the recursive descent.
- const AstRawString* GetSymbol(Scanner* scanner);
- const AstRawString* GetNextSymbol(Scanner* scanner);
- const AstRawString* GetNumberAsSymbol(Scanner* scanner);
-
- Expression* ThisExpression(Scope* scope, AstNodeFactory* factory,
- int pos = RelocInfo::kNoPosition);
- Expression* SuperPropertyReference(Scope* scope, AstNodeFactory* factory,
- int pos);
- Expression* SuperCallReference(Scope* scope, AstNodeFactory* factory,
- int pos);
- Expression* NewTargetExpression(Scope* scope, AstNodeFactory* factory,
- int pos);
- Expression* FunctionSentExpression(Scope* scope, AstNodeFactory* factory,
- int pos);
+ const AstRawString* GetSymbol(Scanner* scanner) const;
+ const AstRawString* GetNextSymbol(Scanner* scanner) const;
+ const AstRawString* GetNumberAsSymbol(Scanner* scanner) const;
+
+ Expression* ThisExpression(int pos = kNoSourcePosition);
+ Expression* NewSuperPropertyReference(AstNodeFactory* factory, int pos);
+ Expression* NewSuperCallReference(AstNodeFactory* factory, int pos);
+ Expression* NewTargetExpression(int pos);
+ Expression* FunctionSentExpression(AstNodeFactory* factory, int pos) const;
Literal* ExpressionFromLiteral(Token::Value token, int pos, Scanner* scanner,
- AstNodeFactory* factory);
+ AstNodeFactory* factory) const;
Expression* ExpressionFromIdentifier(const AstRawString* name,
int start_position, int end_position,
- Scope* scope, AstNodeFactory* factory);
+ InferName = InferName::kYes);
Expression* ExpressionFromString(int pos, Scanner* scanner,
- AstNodeFactory* factory);
+ AstNodeFactory* factory) const;
Expression* GetIterator(Expression* iterable, AstNodeFactory* factory,
int pos);
- ZoneList<v8::internal::Expression*>* NewExpressionList(int size, Zone* zone) {
+ ZoneList<v8::internal::Expression*>* NewExpressionList(int size,
+ Zone* zone) const {
return new(zone) ZoneList<v8::internal::Expression*>(size, zone);
}
- ZoneList<ObjectLiteral::Property*>* NewPropertyList(int size, Zone* zone) {
+ ZoneList<ObjectLiteral::Property*>* NewPropertyList(int size,
+ Zone* zone) const {
return new(zone) ZoneList<ObjectLiteral::Property*>(size, zone);
}
- ZoneList<v8::internal::Statement*>* NewStatementList(int size, Zone* zone) {
+ ZoneList<v8::internal::Statement*>* NewStatementList(int size,
+ Zone* zone) const {
return new(zone) ZoneList<v8::internal::Statement*>(size, zone);
}
V8_INLINE void AddParameterInitializationBlock(
const ParserFormalParameters& parameters,
- ZoneList<v8::internal::Statement*>* body, bool* ok);
-
- V8_INLINE Scope* NewScope(Scope* parent_scope, ScopeType scope_type,
- FunctionKind kind = kNormalFunction);
+ ZoneList<v8::internal::Statement*>* body, bool is_async, bool* ok);
V8_INLINE void AddFormalParameter(ParserFormalParameters* parameters,
Expression* pattern,
Expression* initializer,
int initializer_end_position, bool is_rest);
V8_INLINE void DeclareFormalParameter(
- Scope* scope, const ParserFormalParameters::Parameter& parameter,
+ DeclarationScope* scope,
+ const ParserFormalParameters::Parameter& parameter,
Type::ExpressionClassifier* classifier);
- void ParseArrowFunctionFormalParameters(ParserFormalParameters* parameters,
- Expression* params,
- const Scanner::Location& params_loc,
- bool* ok);
void ParseArrowFunctionFormalParameterList(
ParserFormalParameters* parameters, Expression* params,
- const Scanner::Location& params_loc,
- Scanner::Location* duplicate_loc, bool* ok);
-
- V8_INLINE DoExpression* ParseDoExpression(bool* ok);
+ const Scanner::Location& params_loc, Scanner::Location* duplicate_loc,
+ const Scope::Snapshot& scope_snapshot, bool* ok);
void ReindexLiterals(const ParserFormalParameters& parameters);
- // Temporary glue; these functions will move to ParserBase.
- Expression* ParseV8Intrinsic(bool* ok);
- FunctionLiteral* ParseFunctionLiteral(
- const AstRawString* name, Scanner::Location function_name_location,
- FunctionNameValidity function_name_validity, FunctionKind kind,
- int function_token_position, FunctionLiteral::FunctionType type,
- LanguageMode language_mode, bool* ok);
- V8_INLINE void SkipLazyFunctionBody(
- int* materialized_literal_count, int* expected_property_count, bool* ok,
- Scanner::BookmarkScope* bookmark = nullptr);
- V8_INLINE ZoneList<Statement*>* ParseEagerFunctionBody(
- const AstRawString* name, int pos,
- const ParserFormalParameters& parameters, FunctionKind kind,
- FunctionLiteral::FunctionType function_type, bool* ok);
-
- ClassLiteral* ParseClassLiteral(Type::ExpressionClassifier* classifier,
- const AstRawString* name,
- Scanner::Location class_name_location,
- bool name_is_strict_reserved, int pos,
- bool* ok);
-
- V8_INLINE void MarkTailPosition(Expression* expression);
-
- V8_INLINE void CheckConflictingVarDeclarations(v8::internal::Scope* scope,
- bool* ok);
-
- class TemplateLiteral : public ZoneObject {
- public:
- TemplateLiteral(Zone* zone, int pos)
- : cooked_(8, zone), raw_(8, zone), expressions_(8, zone), pos_(pos) {}
-
- const ZoneList<Expression*>* cooked() const { return &cooked_; }
- const ZoneList<Expression*>* raw() const { return &raw_; }
- const ZoneList<Expression*>* expressions() const { return &expressions_; }
- int position() const { return pos_; }
-
- void AddTemplateSpan(Literal* cooked, Literal* raw, int end, Zone* zone) {
- DCHECK_NOT_NULL(cooked);
- DCHECK_NOT_NULL(raw);
- cooked_.Add(cooked, zone);
- raw_.Add(raw, zone);
- }
-
- void AddExpression(Expression* expression, Zone* zone) {
- DCHECK_NOT_NULL(expression);
- expressions_.Add(expression, zone);
- }
-
- private:
- ZoneList<Expression*> cooked_;
- ZoneList<Expression*> raw_;
- ZoneList<Expression*> expressions_;
- int pos_;
- };
-
- typedef TemplateLiteral* TemplateLiteralState;
-
- V8_INLINE TemplateLiteralState OpenTemplateLiteral(int pos);
- V8_INLINE void AddTemplateSpan(TemplateLiteralState* state, bool tail);
- V8_INLINE void AddTemplateExpression(TemplateLiteralState* state,
- Expression* expression);
- V8_INLINE Expression* CloseTemplateLiteral(TemplateLiteralState* state,
- int start, Expression* tag);
V8_INLINE Expression* NoTemplateTag() { return NULL; }
V8_INLINE static bool IsTaggedTemplate(const Expression* tag) {
return tag != NULL;
}
- V8_INLINE ZoneList<v8::internal::Expression*>* PrepareSpreadArguments(
- ZoneList<v8::internal::Expression*>* list);
V8_INLINE void MaterializeUnspreadArgumentsLiterals(int count) {}
- V8_INLINE Expression* SpreadCall(Expression* function,
- ZoneList<v8::internal::Expression*>* args,
- int pos);
- V8_INLINE Expression* SpreadCallNew(Expression* function,
- ZoneList<v8::internal::Expression*>* args,
- int pos);
-
- // Rewrite all DestructuringAssignments in the current FunctionState.
- V8_INLINE void RewriteDestructuringAssignments();
-
- V8_INLINE Expression* RewriteExponentiation(Expression* left,
- Expression* right, int pos);
- V8_INLINE Expression* RewriteAssignExponentiation(Expression* left,
- Expression* right, int pos);
- V8_INLINE void QueueDestructuringAssignmentForRewriting(
- Expression* assignment);
- V8_INLINE void QueueNonPatternForRewriting(Expression* expr);
+ Expression* ExpressionListToExpression(ZoneList<Expression*>* args);
void SetFunctionNameFromPropertyName(ObjectLiteralProperty* property,
const AstRawString* name);
@@ -654,32 +366,14 @@ class ParserTraits {
void SetFunctionNameFromIdentifierRef(Expression* value,
Expression* identifier);
- // Rewrite expressions that are not used as patterns
- V8_INLINE void RewriteNonPattern(Type::ExpressionClassifier* classifier,
- bool* ok);
-
+ V8_INLINE ZoneList<typename Type::ExpressionClassifier::Error>*
+ GetReportedErrorList() const;
V8_INLINE Zone* zone() const;
V8_INLINE ZoneList<Expression*>* GetNonPatternList() const;
-
- Expression* RewriteYieldStar(
- Expression* generator, Expression* expression, int pos);
-
- Expression* RewriteInstanceof(Expression* lhs, Expression* rhs, int pos);
-
- private:
- Parser* parser_;
-
- void BuildIteratorClose(ZoneList<Statement*>* statements, Variable* iterator,
- Maybe<Variable*> input, Variable* output);
- void BuildIteratorCloseForCompletion(
- ZoneList<Statement*>* statements, Variable* iterator,
- Variable* body_threw);
- Statement* CheckCallable(Variable* var, Expression* error, int pos);
};
-
-class Parser : public ParserBase<ParserTraits> {
+class Parser : public ParserBase<Parser> {
public:
explicit Parser(ParseInfo* info);
~Parser() {
@@ -696,13 +390,19 @@ class Parser : public ParserBase<ParserTraits> {
bool Parse(ParseInfo* info);
void ParseOnBackground(ParseInfo* info);
+ void DeserializeScopeChain(ParseInfo* info, Handle<Context> context,
+ Scope::DeserializationMode deserialization_mode);
+
// Handle errors detected during parsing, move statistics to Isolate,
// internalize strings (move them to the heap).
void Internalize(Isolate* isolate, Handle<Script> script, bool error);
void HandleSourceURLComments(Isolate* isolate, Handle<Script> script);
private:
- friend class ParserTraits;
+ friend class ParserBase<Parser>;
+ // TODO(nikolaos): This should not be necessary. It will be removed
+ // when the traits object stops delegating to the implementation object.
+ friend class ParserBaseTraits<Parser>;
// Runtime encoding of different completion modes.
enum CompletionKind {
@@ -711,6 +411,18 @@ class Parser : public ParserBase<ParserTraits> {
kAbruptCompletion
};
+ enum class FunctionBodyType { kNormal, kSingleExpression };
+
+ DeclarationScope* GetDeclarationScope() const {
+ return scope()->GetDeclarationScope();
+ }
+ DeclarationScope* GetClosureScope() const {
+ return scope()->GetClosureScope();
+ }
+ Variable* NewTemporary(const AstRawString* name) {
+ return scope()->NewTemporary(name);
+ }
+
// Limit the allowed number of local variables in a function. The hard limit
// is that offsets computed by FullCodeGenerator::StackOperand and similar
// functions are ints, and they should not overflow. In addition, accessing
@@ -724,8 +436,8 @@ class Parser : public ParserBase<ParserTraits> {
FunctionLiteral* ParseProgram(Isolate* isolate, ParseInfo* info);
FunctionLiteral* ParseLazy(Isolate* isolate, ParseInfo* info);
- FunctionLiteral* ParseLazy(Isolate* isolate, ParseInfo* info,
- Utf16CharacterStream* source);
+ FunctionLiteral* DoParseLazy(ParseInfo* info, const AstRawString* raw_name,
+ Utf16CharacterStream* source);
// Called by ParseProgram after setting up the scanner.
FunctionLiteral* DoParseProgram(ParseInfo* info);
@@ -747,19 +459,29 @@ class Parser : public ParserBase<ParserTraits> {
// which is set to false if parsing failed; it is unchanged otherwise.
// By making the 'exception handling' explicit, we are forced to check
// for failure at the call sites.
- void* ParseStatementList(ZoneList<Statement*>* body, int end_token, bool* ok);
+ void ParseStatementList(ZoneList<Statement*>* body, int end_token, bool* ok);
Statement* ParseStatementListItem(bool* ok);
- void* ParseModuleItemList(ZoneList<Statement*>* body, bool* ok);
+ void ParseModuleItemList(ZoneList<Statement*>* body, bool* ok);
Statement* ParseModuleItem(bool* ok);
const AstRawString* ParseModuleSpecifier(bool* ok);
- Statement* ParseImportDeclaration(bool* ok);
+ void ParseImportDeclaration(bool* ok);
Statement* ParseExportDeclaration(bool* ok);
Statement* ParseExportDefault(bool* ok);
- void* ParseExportClause(ZoneList<const AstRawString*>* export_names,
- ZoneList<Scanner::Location>* export_locations,
- ZoneList<const AstRawString*>* local_names,
- Scanner::Location* reserved_loc, bool* ok);
- ZoneList<ImportDeclaration*>* ParseNamedImports(int pos, bool* ok);
+ void ParseExportClause(ZoneList<const AstRawString*>* export_names,
+ ZoneList<Scanner::Location>* export_locations,
+ ZoneList<const AstRawString*>* local_names,
+ Scanner::Location* reserved_loc, bool* ok);
+ struct NamedImport : public ZoneObject {
+ const AstRawString* import_name;
+ const AstRawString* local_name;
+ const Scanner::Location location;
+ NamedImport(const AstRawString* import_name, const AstRawString* local_name,
+ Scanner::Location location)
+ : import_name(import_name),
+ local_name(local_name),
+ location(location) {}
+ };
+ ZoneList<const NamedImport*>* ParseNamedImports(int pos, bool* ok);
Statement* ParseStatement(ZoneList<const AstRawString*>* labels,
AllowLabelledFunctionStatement allow_function,
bool* ok);
@@ -768,17 +490,19 @@ class Parser : public ParserBase<ParserTraits> {
bool* ok);
Statement* ParseStatementAsUnlabelled(ZoneList<const AstRawString*>* labels,
bool* ok);
- Statement* ParseFunctionDeclaration(ZoneList<const AstRawString*>* names,
- bool* ok);
- Statement* ParseFunctionDeclaration(int pos, bool is_generator,
- ZoneList<const AstRawString*>* names,
- bool* ok);
+ Statement* ParseFunctionDeclaration(bool* ok);
+ Statement* ParseHoistableDeclaration(ZoneList<const AstRawString*>* names,
+ bool default_export, bool* ok);
+ Statement* ParseHoistableDeclaration(int pos, ParseFunctionFlags flags,
+ ZoneList<const AstRawString*>* names,
+ bool default_export, bool* ok);
+ Statement* ParseAsyncFunctionDeclaration(ZoneList<const AstRawString*>* names,
+ bool default_export, bool* ok);
+ Expression* ParseAsyncFunctionExpression(bool* ok);
Statement* ParseClassDeclaration(ZoneList<const AstRawString*>* names,
- bool* ok);
+ bool default_export, bool* ok);
Statement* ParseNativeDeclaration(bool* ok);
Block* ParseBlock(ZoneList<const AstRawString*>* labels, bool* ok);
- Block* ParseBlock(ZoneList<const AstRawString*>* labels,
- bool finalize_block_scope, bool* ok);
Block* ParseVariableStatement(VariableDeclarationContext var_context,
ZoneList<const AstRawString*>* names,
bool* ok);
@@ -823,7 +547,7 @@ class Parser : public ParserBase<ParserTraits> {
Scanner::Location bindings_loc;
};
- class PatternRewriter : private AstVisitor {
+ class PatternRewriter final : public AstVisitor<PatternRewriter> {
public:
static void DeclareAndInitializeVariables(
Block* block, const DeclarationDescriptor* declaration_descriptor,
@@ -838,16 +562,13 @@ class Parser : public ParserBase<ParserTraits> {
Assignment* assignment,
Scope* scope);
- void set_initializer_position(int pos) { initializer_position_ = pos; }
-
private:
PatternRewriter() {}
-#define DECLARE_VISIT(type) void Visit##type(v8::internal::type* node) override;
+#define DECLARE_VISIT(type) void Visit##type(v8::internal::type* node);
// Visiting functions for AST nodes make this an AstVisitor.
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
- void Visit(AstNode* node) override;
enum PatternContext {
BINDING,
@@ -863,7 +584,7 @@ class Parser : public ParserBase<ParserTraits> {
Expression* old_value = current_value_;
current_value_ = value;
recursion_level_++;
- pattern->Accept(this);
+ Visit(pattern);
recursion_level_--;
current_value_ = old_value;
}
@@ -880,6 +601,8 @@ class Parser : public ParserBase<ParserTraits> {
PatternContext SetAssignmentContextIfNeeded(Expression* node);
PatternContext SetInitializerContextIfNeeded(Expression* node);
+ void RewriteParameterScopes(Expression* expr);
+
Variable* CreateTempVar(Expression* value = nullptr);
AstNodeFactory* factory() const { return parser_->factory(); }
@@ -900,6 +623,8 @@ class Parser : public ParserBase<ParserTraits> {
Expression* current_value_;
int recursion_level_;
bool* ok_;
+
+ DEFINE_AST_VISITOR_MEMBERS_WITHOUT_STACKOVERFLOW()
};
Block* ParseVariableDeclarations(VariableDeclarationContext var_context,
@@ -927,8 +652,6 @@ class Parser : public ParserBase<ParserTraits> {
Statement* ParseForStatement(ZoneList<const AstRawString*>* labels, bool* ok);
Statement* ParseThrowStatement(bool* ok);
Expression* MakeCatchContext(Handle<String> id, VariableProxy* value);
- class DontCollectExpressionsInTailPositionScope;
- class CollectExpressionsInTailPositionToListScope;
TryStatement* ParseTryStatement(bool* ok);
DebuggerStatement* ParseDebuggerStatement(bool* ok);
// Parse a SubStatement in strict mode, or with an extra block scope in
@@ -947,16 +670,24 @@ class Parser : public ParserBase<ParserTraits> {
// Initialize the components of a for-in / for-of statement.
- void InitializeForEachStatement(ForEachStatement* stmt, Expression* each,
- Expression* subject, Statement* body);
- void InitializeForOfStatement(ForOfStatement* stmt, Expression* each,
- Expression* iterable, Statement* body,
- int iterable_pos);
+ Statement* InitializeForEachStatement(ForEachStatement* stmt,
+ Expression* each, Expression* subject,
+ Statement* body, int each_keyword_pos);
+ Statement* InitializeForOfStatement(ForOfStatement* stmt, Expression* each,
+ Expression* iterable, Statement* body,
+ bool finalize,
+ int next_result_pos = kNoSourcePosition);
Statement* DesugarLexicalBindingsInForStatement(
Scope* inner_scope, VariableMode mode,
ZoneList<const AstRawString*>* names, ForStatement* loop, Statement* init,
Expression* cond, Statement* next, Statement* body, bool* ok);
+ void DesugarAsyncFunctionBody(const AstRawString* function_name, Scope* scope,
+ ZoneList<Statement*>* body,
+ Type::ExpressionClassifier* classifier,
+ FunctionKind kind, FunctionBodyType type,
+ bool accept_IN, int pos, bool* ok);
+
void RewriteDoExpression(Expression* expr, bool* ok);
FunctionLiteral* ParseFunctionLiteral(
@@ -965,11 +696,11 @@ class Parser : public ParserBase<ParserTraits> {
int function_token_position, FunctionLiteral::FunctionType type,
LanguageMode language_mode, bool* ok);
- ClassLiteral* ParseClassLiteral(ExpressionClassifier* classifier,
- const AstRawString* name,
- Scanner::Location class_name_location,
- bool name_is_strict_reserved, int pos,
- bool* ok);
+ Expression* ParseClassLiteral(ExpressionClassifier* classifier,
+ const AstRawString* name,
+ Scanner::Location class_name_location,
+ bool name_is_strict_reserved, int pos,
+ bool* ok);
// Magical syntax support.
Expression* ParseV8Intrinsic(bool* ok);
@@ -993,13 +724,23 @@ class Parser : public ParserBase<ParserTraits> {
void InsertShadowingVarBindingInitializers(Block* block);
// Implement sloppy block-scoped functions, ES2015 Annex B 3.3
- void InsertSloppyBlockFunctionVarBindings(Scope* scope, bool* ok);
-
- // Parser support
- VariableProxy* NewUnresolved(const AstRawString* name, VariableMode mode);
+ void InsertSloppyBlockFunctionVarBindings(DeclarationScope* scope,
+ Scope* complex_params_scope,
+ bool* ok);
+
+ static InitializationFlag DefaultInitializationFlag(VariableMode mode);
+ VariableProxy* NewUnresolved(const AstRawString* name, int begin_pos,
+ int end_pos = kNoSourcePosition,
+ Variable::Kind kind = Variable::NORMAL);
+ VariableProxy* NewUnresolved(const AstRawString* name);
Variable* Declare(Declaration* declaration,
- DeclarationDescriptor::Kind declaration_kind, bool resolve,
- bool* ok, Scope* declaration_scope = nullptr);
+ DeclarationDescriptor::Kind declaration_kind,
+ VariableMode mode, InitializationFlag init, bool* ok,
+ Scope* declaration_scope = nullptr);
+ Declaration* DeclareVariable(const AstRawString* name, VariableMode mode,
+ int pos, bool* ok);
+ Declaration* DeclareVariable(const AstRawString* name, VariableMode mode,
+ InitializationFlag init, int pos, bool* ok);
bool TargetStackContainsLabel(const AstRawString* label);
BreakableStatement* LookupBreakTarget(const AstRawString* label, bool* ok);
@@ -1009,7 +750,7 @@ class Parser : public ParserBase<ParserTraits> {
// Factory methods.
FunctionLiteral* DefaultConstructor(const AstRawString* name, bool call_super,
- Scope* scope, int pos, int end_pos,
+ int pos, int end_pos,
LanguageMode language_mode);
// Skip over a lazy function, either using cached data if we have it, or
@@ -1027,6 +768,7 @@ class Parser : public ParserBase<ParserTraits> {
Block* BuildParameterInitializationBlock(
const ParserFormalParameters& parameters, bool* ok);
+ Block* BuildRejectPromiseOnException(Block* block);
// Consumes the ending }.
ZoneList<Statement*>* ParseEagerFunctionBody(
@@ -1036,6 +778,37 @@ class Parser : public ParserBase<ParserTraits> {
void ThrowPendingError(Isolate* isolate, Handle<Script> script);
+ class TemplateLiteral : public ZoneObject {
+ public:
+ TemplateLiteral(Zone* zone, int pos)
+ : cooked_(8, zone), raw_(8, zone), expressions_(8, zone), pos_(pos) {}
+
+ const ZoneList<Expression*>* cooked() const { return &cooked_; }
+ const ZoneList<Expression*>* raw() const { return &raw_; }
+ const ZoneList<Expression*>* expressions() const { return &expressions_; }
+ int position() const { return pos_; }
+
+ void AddTemplateSpan(Literal* cooked, Literal* raw, int end, Zone* zone) {
+ DCHECK_NOT_NULL(cooked);
+ DCHECK_NOT_NULL(raw);
+ cooked_.Add(cooked, zone);
+ raw_.Add(raw, zone);
+ }
+
+ void AddExpression(Expression* expression, Zone* zone) {
+ DCHECK_NOT_NULL(expression);
+ expressions_.Add(expression, zone);
+ }
+
+ private:
+ ZoneList<Expression*> cooked_;
+ ZoneList<Expression*> raw_;
+ ZoneList<Expression*> expressions_;
+ int pos_;
+ };
+
+ typedef TemplateLiteral* TemplateLiteralState;
+
TemplateLiteralState OpenTemplateLiteral(int pos);
void AddTemplateSpan(TemplateLiteralState* state, bool tail);
void AddTemplateExpression(TemplateLiteralState* state,
@@ -1044,6 +817,16 @@ class Parser : public ParserBase<ParserTraits> {
Expression* tag);
uint32_t ComputeTemplateLiteralHash(const TemplateLiteral* lit);
+ void ParseAsyncArrowSingleExpressionBody(ZoneList<Statement*>* body,
+ bool accept_IN,
+ ExpressionClassifier* classifier,
+ int pos, bool* ok) {
+ DesugarAsyncFunctionBody(ast_value_factory()->empty_string(), scope(), body,
+ classifier, kAsyncArrowFunction,
+ FunctionBodyType::kSingleExpression, accept_IN,
+ pos, ok);
+ }
+
ZoneList<v8::internal::Expression*>* PrepareSpreadArguments(
ZoneList<v8::internal::Expression*>* list);
Expression* SpreadCall(Expression* function,
@@ -1054,6 +837,10 @@ class Parser : public ParserBase<ParserTraits> {
void SetLanguageMode(Scope* scope, LanguageMode mode);
void RaiseLanguageMode(LanguageMode mode);
+ V8_INLINE void MarkCollectedTailCallExpressions();
+ V8_INLINE void MarkTailPosition(Expression* expression);
+
+ // Rewrite all DestructuringAssignments in the current FunctionState.
V8_INLINE void RewriteDestructuringAssignments();
V8_INLINE Expression* RewriteExponentiation(Expression* left,
@@ -1064,11 +851,47 @@ class Parser : public ParserBase<ParserTraits> {
friend class NonPatternRewriter;
V8_INLINE Expression* RewriteSpreads(ArrayLiteral* lit);
+ // Rewrite expressions that are not used as patterns
V8_INLINE void RewriteNonPattern(ExpressionClassifier* classifier, bool* ok);
+ V8_INLINE void QueueDestructuringAssignmentForRewriting(
+ Expression* assignment);
+ V8_INLINE void QueueNonPatternForRewriting(Expression* expr, bool* ok);
+
friend class InitializerRewriter;
void RewriteParameterInitializer(Expression* expr, Scope* scope);
+ Expression* BuildCreateJSGeneratorObject(int pos, FunctionKind kind);
+ Expression* BuildPromiseResolve(Expression* value, int pos);
+ Expression* BuildPromiseReject(Expression* value, int pos);
+
+ // Generic AST generator for throwing errors from compiled code.
+ Expression* NewThrowError(Runtime::FunctionId function_id,
+ MessageTemplate::Template message,
+ const AstRawString* arg, int pos);
+
+ void FinalizeIteratorUse(Variable* completion, Expression* condition,
+ Variable* iter, Block* iterator_use, Block* result);
+
+ Statement* FinalizeForOfStatement(ForOfStatement* loop, Variable* completion,
+ int pos);
+ void BuildIteratorClose(ZoneList<Statement*>* statements, Variable* iterator,
+ Variable* input, Variable* output);
+ void BuildIteratorCloseForCompletion(ZoneList<Statement*>* statements,
+ Variable* iterator,
+ Expression* completion);
+ Statement* CheckCallable(Variable* var, Expression* error, int pos);
+
+ V8_INLINE Expression* RewriteAwaitExpression(Expression* value, int pos);
+
+ Expression* RewriteYieldStar(Expression* generator, Expression* expression,
+ int pos);
+
+ void ParseArrowFunctionFormalParameters(ParserFormalParameters* parameters,
+ Expression* params, int end_pos,
+ bool* ok);
+ void SetFunctionName(Expression* value, const AstRawString* name);
+
Scanner scanner_;
PreParser* reusable_preparser_;
Scope* original_scope_; // for ES5 function declarations in sloppy eval
@@ -1085,45 +908,19 @@ class Parser : public ParserBase<ParserTraits> {
HistogramTimer* pre_parse_timer_;
bool parsing_on_main_thread_;
-};
+#ifdef DEBUG
+ void Print(AstNode* node);
+#endif // DEBUG
+};
-bool ParserTraits::IsFutureStrictReserved(
+bool ParserBaseTraits<Parser>::IsFutureStrictReserved(
const AstRawString* identifier) const {
- return parser_->scanner()->IdentifierIsFutureStrictReserved(identifier);
-}
-
-
-Scope* ParserTraits::NewScope(Scope* parent_scope, ScopeType scope_type,
- FunctionKind kind) {
- return parser_->NewScope(parent_scope, scope_type, kind);
-}
-
-
-const AstRawString* ParserTraits::EmptyIdentifierString() {
- return parser_->ast_value_factory()->empty_string();
-}
-
-
-void ParserTraits::SkipLazyFunctionBody(int* materialized_literal_count,
- int* expected_property_count, bool* ok,
- Scanner::BookmarkScope* bookmark) {
- return parser_->SkipLazyFunctionBody(materialized_literal_count,
- expected_property_count, ok, bookmark);
-}
-
-
-ZoneList<Statement*>* ParserTraits::ParseEagerFunctionBody(
- const AstRawString* name, int pos, const ParserFormalParameters& parameters,
- FunctionKind kind, FunctionLiteral::FunctionType function_type, bool* ok) {
- return parser_->ParseEagerFunctionBody(name, pos, parameters, kind,
- function_type, ok);
+ return delegate()->scanner()->IdentifierIsFutureStrictReserved(identifier);
}
-
-void ParserTraits::CheckConflictingVarDeclarations(v8::internal::Scope* scope,
- bool* ok) {
- parser_->CheckConflictingVarDeclarations(scope, ok);
+const AstRawString* ParserBaseTraits<Parser>::EmptyIdentifierString() const {
+ return delegate()->ast_value_factory()->empty_string();
}
@@ -1155,80 +952,36 @@ class CompileTimeValue: public AllStatic {
DISALLOW_IMPLICIT_CONSTRUCTORS(CompileTimeValue);
};
-
-ParserTraits::TemplateLiteralState ParserTraits::OpenTemplateLiteral(int pos) {
- return parser_->OpenTemplateLiteral(pos);
-}
-
-
-void ParserTraits::AddTemplateSpan(TemplateLiteralState* state, bool tail) {
- parser_->AddTemplateSpan(state, tail);
-}
-
-
-void ParserTraits::AddTemplateExpression(TemplateLiteralState* state,
- Expression* expression) {
- parser_->AddTemplateExpression(state, expression);
-}
-
-
-Expression* ParserTraits::CloseTemplateLiteral(TemplateLiteralState* state,
- int start, Expression* tag) {
- return parser_->CloseTemplateLiteral(state, start, tag);
-}
-
-
-ZoneList<v8::internal::Expression*>* ParserTraits::PrepareSpreadArguments(
- ZoneList<v8::internal::Expression*>* list) {
- return parser_->PrepareSpreadArguments(list);
-}
-
-
-Expression* ParserTraits::SpreadCall(Expression* function,
- ZoneList<v8::internal::Expression*>* args,
- int pos) {
- return parser_->SpreadCall(function, args, pos);
-}
-
-
-Expression* ParserTraits::SpreadCallNew(
- Expression* function, ZoneList<v8::internal::Expression*>* args, int pos) {
- return parser_->SpreadCallNew(function, args, pos);
-}
-
-
-void ParserTraits::AddFormalParameter(ParserFormalParameters* parameters,
- Expression* pattern,
- Expression* initializer,
- int initializer_end_position,
- bool is_rest) {
+void ParserBaseTraits<Parser>::AddFormalParameter(
+ ParserFormalParameters* parameters, Expression* pattern,
+ Expression* initializer, int initializer_end_position, bool is_rest) {
bool is_simple = pattern->IsVariableProxy() && initializer == nullptr;
- const AstRawString* name = is_simple
- ? pattern->AsVariableProxy()->raw_name()
- : parser_->ast_value_factory()->empty_string();
+ const AstRawString* name =
+ is_simple ? pattern->AsVariableProxy()->raw_name()
+ : delegate()->ast_value_factory()->empty_string();
parameters->params.Add(
ParserFormalParameters::Parameter(name, pattern, initializer,
initializer_end_position, is_rest),
parameters->scope->zone());
}
-
-void ParserTraits::DeclareFormalParameter(
- Scope* scope, const ParserFormalParameters::Parameter& parameter,
+void ParserBaseTraits<Parser>::DeclareFormalParameter(
+ DeclarationScope* scope, const ParserFormalParameters::Parameter& parameter,
Type::ExpressionClassifier* classifier) {
bool is_duplicate = false;
bool is_simple = classifier->is_simple_parameter_list();
auto name = is_simple || parameter.is_rest
? parameter.name
- : parser_->ast_value_factory()->empty_string();
+ : delegate()->ast_value_factory()->empty_string();
auto mode = is_simple || parameter.is_rest ? VAR : TEMPORARY;
if (!is_simple) scope->SetHasNonSimpleParameters();
bool is_optional = parameter.initializer != nullptr;
- Variable* var = scope->DeclareParameter(
- name, mode, is_optional, parameter.is_rest, &is_duplicate);
+ Variable* var =
+ scope->DeclareParameter(name, mode, is_optional, parameter.is_rest,
+ &is_duplicate, delegate()->ast_value_factory());
if (is_duplicate) {
classifier->RecordDuplicateFormalParameterError(
- parser_->scanner()->location());
+ delegate()->scanner()->location());
}
if (is_sloppy(scope->language_mode())) {
// TODO(sigurds) Mark every parameter as maybe assigned. This is a
@@ -1238,26 +991,24 @@ void ParserTraits::DeclareFormalParameter(
}
}
-
-void ParserTraits::AddParameterInitializationBlock(
+void ParserBaseTraits<Parser>::AddParameterInitializationBlock(
const ParserFormalParameters& parameters,
- ZoneList<v8::internal::Statement*>* body, bool* ok) {
+ ZoneList<v8::internal::Statement*>* body, bool is_async, bool* ok) {
if (!parameters.is_simple) {
auto* init_block =
- parser_->BuildParameterInitializationBlock(parameters, ok);
+ delegate()->BuildParameterInitializationBlock(parameters, ok);
if (!*ok) return;
+
+ if (is_async) {
+ init_block = delegate()->BuildRejectPromiseOnException(init_block);
+ }
+
if (init_block != nullptr) {
- body->Add(init_block, parser_->zone());
+ body->Add(init_block, delegate()->zone());
}
}
}
-
-DoExpression* ParserTraits::ParseDoExpression(bool* ok) {
- return parser_->ParseDoExpression(ok);
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parsing/pattern-rewriter.cc b/deps/v8/src/parsing/pattern-rewriter.cc
index e699255cdb..1831a2927d 100644
--- a/deps/v8/src/parsing/pattern-rewriter.cc
+++ b/deps/v8/src/parsing/pattern-rewriter.cc
@@ -147,24 +147,28 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
// pre-resolve the proxy because it resides in the same scope as the
// declaration.
const AstRawString* name = pattern->raw_name();
- VariableProxy* proxy = parser_->NewUnresolved(name, descriptor_->mode);
+ VariableProxy* proxy = descriptor_->scope->NewUnresolved(
+ factory(), name, parser_->scanner()->location().beg_pos,
+ parser_->scanner()->location().end_pos);
Declaration* declaration = factory()->NewVariableDeclaration(
- proxy, descriptor_->mode, descriptor_->scope,
- descriptor_->declaration_pos);
- Variable* var =
- parser_->Declare(declaration, descriptor_->declaration_kind,
- descriptor_->mode != VAR, ok_, descriptor_->hoist_scope);
+ proxy, descriptor_->scope, descriptor_->declaration_pos);
+ Variable* var = parser_->Declare(declaration, descriptor_->declaration_kind,
+ descriptor_->mode,
+ DefaultInitializationFlag(descriptor_->mode),
+ ok_, descriptor_->hoist_scope);
if (!*ok_) return;
DCHECK_NOT_NULL(var);
- DCHECK(!proxy->is_resolved() || proxy->var() == var);
+ DCHECK(proxy->is_resolved());
+ DCHECK(initializer_position_ != kNoSourcePosition);
var->set_initializer_position(initializer_position_);
- DCHECK(initializer_position_ != RelocInfo::kNoPosition);
-
+ // TODO(adamk): This should probably be checking hoist_scope.
+ // Move it to Parser::Declare() to make it easier to test
+ // the right scope.
Scope* declaration_scope = IsLexicalVariableMode(descriptor_->mode)
? descriptor_->scope
- : descriptor_->scope->DeclarationScope();
- if (declaration_scope->num_var_or_const() > kMaxNumFunctionLocals) {
+ : descriptor_->scope->GetDeclarationScope();
+ if (declaration_scope->num_var() > kMaxNumFunctionLocals) {
parser_->ReportMessage(MessageTemplate::kTooManyVariables);
*ok_ = false;
return;
@@ -173,8 +177,10 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
names_->Add(name, zone());
}
- // Initialize variables if needed. A
- // declaration of the form:
+ // If there's no initializer, we're done.
+ if (value == nullptr) return;
+
+ // A declaration of the form:
//
// var v = x;
//
@@ -182,125 +188,57 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
//
// var v; v = x;
//
- // In particular, we need to re-lookup 'v' (in scope_, not
- // declaration_scope) as it may be a different 'v' than the 'v' in the
- // declaration (e.g., if we are inside a 'with' statement or 'catch'
- // block).
- //
- // However, note that const declarations are different! A const
- // declaration of the form:
- //
- // const c = x;
- //
- // is *not* syntactic sugar for:
- //
- // const c; c = x;
- //
- // The "variable" c initialized to x is the same as the declared
- // one - there is no re-lookup (see the last parameter of the
- // Declare() call above).
- Scope* initialization_scope = IsImmutableVariableMode(descriptor_->mode)
- ? declaration_scope
- : descriptor_->scope;
-
-
- // Global variable declarations must be compiled in a specific
- // way. When the script containing the global variable declaration
- // is entered, the global variable must be declared, so that if it
- // doesn't exist (on the global object itself, see ES5 errata) it
- // gets created with an initial undefined value. This is handled
- // by the declarations part of the function representing the
- // top-level global code; see Runtime::DeclareGlobalVariable. If
- // it already exists (in the object or in a prototype), it is
- // *not* touched until the variable declaration statement is
- // executed.
- //
- // Executing the variable declaration statement will always
- // guarantee to give the global object an own property.
- // This way, global variable declarations can shadow
- // properties in the prototype chain, but only after the variable
- // declaration statement has been executed. This is important in
- // browsers where the global object (window) has lots of
- // properties defined in prototype objects.
- if (initialization_scope->is_script_scope() &&
- !IsLexicalVariableMode(descriptor_->mode)) {
- // Compute the arguments for the runtime
- // call.test-parsing/InitializedDeclarationsInStrictForOfError
+ // In particular, we need to re-lookup 'v' as it may be a different
+ // 'v' than the 'v' in the declaration (e.g., if we are inside a
+ // 'with' statement or 'catch' block). Global var declarations
+ // also need special treatment.
+ Scope* var_init_scope = descriptor_->scope;
+
+ if (descriptor_->mode == VAR && var_init_scope->is_script_scope()) {
+ // Global variable declarations must be compiled in a specific
+ // way. When the script containing the global variable declaration
+ // is entered, the global variable must be declared, so that if it
+ // doesn't exist (on the global object itself, see ES5 errata) it
+ // gets created with an initial undefined value. This is handled
+ // by the declarations part of the function representing the
+ // top-level global code; see Runtime::DeclareGlobalVariable. If
+ // it already exists (in the object or in a prototype), it is
+ // *not* touched until the variable declaration statement is
+ // executed.
+ //
+ // Executing the variable declaration statement will always
+ // guarantee to give the global object an own property.
+ // This way, global variable declarations can shadow
+ // properties in the prototype chain, but only after the variable
+ // declaration statement has been executed. This is important in
+ // browsers where the global object (window) has lots of
+ // properties defined in prototype objects.
+
ZoneList<Expression*>* arguments =
new (zone()) ZoneList<Expression*>(3, zone());
- // We have at least 1 parameter.
arguments->Add(
factory()->NewStringLiteral(name, descriptor_->declaration_pos),
zone());
- CallRuntime* initialize;
-
- if (IsImmutableVariableMode(descriptor_->mode)) {
- arguments->Add(value, zone());
- // Construct the call to Runtime_InitializeConstGlobal
- // and add it to the initialization statement block.
- // Note that the function does different things depending on
- // the number of arguments (1 or 2).
- initialize = factory()->NewCallRuntime(Runtime::kInitializeConstGlobal,
- arguments, value->position());
- value = NULL; // zap the value to avoid the unnecessary assignment
- } else {
- // Add language mode.
- // We may want to pass singleton to avoid Literal allocations.
- LanguageMode language_mode = initialization_scope->language_mode();
- arguments->Add(
- factory()->NewNumberLiteral(language_mode, RelocInfo::kNoPosition),
- zone());
+ arguments->Add(factory()->NewNumberLiteral(var_init_scope->language_mode(),
+ kNoSourcePosition),
+ zone());
+ arguments->Add(value, zone());
- // Be careful not to assign a value to the global variable if
- // we're in a with. The initialization value should not
- // necessarily be stored in the global object in that case,
- // which is why we need to generate a separate assignment node.
- if (value != NULL && !descriptor_->scope->inside_with()) {
- arguments->Add(value, zone());
- // Construct the call to Runtime_InitializeVarGlobal
- // and add it to the initialization statement block.
- initialize = factory()->NewCallRuntime(Runtime::kInitializeVarGlobal,
- arguments, value->position());
- value = NULL; // zap the value to avoid the unnecessary assignment
- } else {
- initialize = NULL;
- }
- }
-
- if (initialize != NULL) {
- block_->statements()->Add(
- factory()->NewExpressionStatement(initialize, initialize->position()),
- zone());
- }
- } else if (value != nullptr && (descriptor_->mode == CONST_LEGACY ||
- IsLexicalVariableMode(descriptor_->mode))) {
- // Constant initializations always assign to the declared constant which
- // is always at the function scope level. This is only relevant for
- // dynamically looked-up variables and constants (the
- // start context for constant lookups is always the function context,
- // while it is the top context for var declared variables). Sigh...
- // For 'let' and 'const' declared variables in harmony mode the
- // initialization also always assigns to the declared variable.
- DCHECK_NOT_NULL(proxy);
- DCHECK_NOT_NULL(proxy->var());
- DCHECK_NOT_NULL(value);
- // Add break location for destructured sub-pattern.
- int pos = IsSubPattern() ? pattern->position() : value->position();
- Assignment* assignment =
- factory()->NewAssignment(Token::INIT, proxy, value, pos);
+ CallRuntime* initialize = factory()->NewCallRuntime(
+ Runtime::kInitializeVarGlobal, arguments, value->position());
block_->statements()->Add(
- factory()->NewExpressionStatement(assignment, pos), zone());
- value = NULL;
- }
-
- // Add an assignment node to the initialization statement block if we still
- // have a pending initialization value.
- if (value != NULL) {
- DCHECK(descriptor_->mode == VAR);
- // 'var' initializations are simply assignments (with all the consequences
- // if they are inside a 'with' statement - they may change a 'with' object
- // property).
- VariableProxy* proxy = initialization_scope->NewUnresolved(factory(), name);
+ factory()->NewExpressionStatement(initialize, initialize->position()),
+ zone());
+ } else {
+ // For 'let' and 'const' declared variables the initialization always
+ // assigns to the declared variable.
+ // But for var declarations we need to do a new lookup.
+ if (descriptor_->mode == VAR) {
+ proxy = var_init_scope->NewUnresolved(factory(), name);
+ } else {
+ DCHECK_NOT_NULL(proxy);
+ DCHECK_NOT_NULL(proxy->var());
+ }
// Add break location for destructured sub-pattern.
int pos = IsSubPattern() ? pattern->position() : value->position();
Assignment* assignment =
@@ -316,10 +254,10 @@ Variable* Parser::PatternRewriter::CreateTempVar(Expression* value) {
if (value != nullptr) {
auto assignment = factory()->NewAssignment(
Token::ASSIGN, factory()->NewVariableProxy(temp), value,
- RelocInfo::kNoPosition);
+ kNoSourcePosition);
block_->statements()->Add(
- factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition),
+ factory()->NewExpressionStatement(assignment, kNoSourcePosition),
zone());
}
return temp;
@@ -334,7 +272,7 @@ void Parser::PatternRewriter::VisitRewritableExpression(
// perform BindingPattern rewriting
DCHECK(!node->is_rewritten());
node->Rewrite(node->expression());
- return node->expression()->Accept(this);
+ return Visit(node->expression());
}
if (node->is_rewritten()) return;
@@ -354,11 +292,10 @@ void Parser::PatternRewriter::VisitRewritableExpression(
auto temp_var = CreateTempVar(current_value_);
Expression* is_undefined = factory()->NewCompareOperation(
Token::EQ_STRICT, factory()->NewVariableProxy(temp_var),
- factory()->NewUndefinedLiteral(RelocInfo::kNoPosition),
- RelocInfo::kNoPosition);
+ factory()->NewUndefinedLiteral(kNoSourcePosition), kNoSourcePosition);
value = factory()->NewConditional(is_undefined, initializer,
factory()->NewVariableProxy(temp_var),
- RelocInfo::kNoPosition);
+ kNoSourcePosition);
}
PatternContext old_context = SetAssignmentContextIfNeeded(initializer);
@@ -387,6 +324,21 @@ void Parser::PatternRewriter::VisitRewritableExpression(
return set_context(old_context);
}
+// When an extra declaration scope needs to be inserted to account for
+// a sloppy eval in a default parameter or function body, the expressions
+// needs to be in that new inner scope which was added after initial
+// parsing.
+void Parser::PatternRewriter::RewriteParameterScopes(Expression* expr) {
+ if (!IsBindingContext()) return;
+ if (descriptor_->declaration_kind != DeclarationDescriptor::PARAMETER) return;
+ if (!scope()->is_block_scope()) return;
+
+ DCHECK(scope()->is_declaration_scope());
+ DCHECK(scope()->outer_scope()->is_function_scope());
+ DCHECK(scope()->calls_sloppy_eval());
+
+ ReparentParameterExpressionScope(parser_->stack_limit(), expr, scope());
+}
void Parser::PatternRewriter::VisitObjectLiteral(ObjectLiteral* pattern,
Variable** temp_var) {
@@ -396,10 +348,15 @@ void Parser::PatternRewriter::VisitObjectLiteral(ObjectLiteral* pattern,
for (ObjectLiteralProperty* property : *pattern->properties()) {
PatternContext context = SetInitializerContextIfNeeded(property->value());
+
+ // Computed property names contain expressions which might require
+ // scope rewriting.
+ if (!property->key()->IsLiteral()) RewriteParameterScopes(property->key());
+
RecurseIntoSubpattern(
property->value(),
factory()->NewProperty(factory()->NewVariableProxy(temp),
- property->key(), RelocInfo::kNoPosition));
+ property->key(), kNoSourcePosition));
set_context(context);
}
}
@@ -417,13 +374,13 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
auto temp = *temp_var = CreateTempVar(current_value_);
auto iterator = CreateTempVar(parser_->GetIterator(
- factory()->NewVariableProxy(temp), factory(), RelocInfo::kNoPosition));
- auto done = CreateTempVar(
- factory()->NewBooleanLiteral(false, RelocInfo::kNoPosition));
+ factory()->NewVariableProxy(temp), factory(), kNoSourcePosition));
+ auto done =
+ CreateTempVar(factory()->NewBooleanLiteral(false, kNoSourcePosition));
auto result = CreateTempVar();
auto v = CreateTempVar();
auto completion = CreateTempVar();
- auto nopos = RelocInfo::kNoPosition;
+ auto nopos = kNoSourcePosition;
// For the purpose of iterator finalization, we temporarily set block_ to a
// new block. In the main body of this function, we write to block_ (both
@@ -431,9 +388,7 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
// wrap this new block in a try-finally statement, restore block_ to its
// original value, and add the try-finally statement to block_.
auto target = block_;
- if (FLAG_harmony_iterator_close) {
- block_ = factory()->NewBlock(nullptr, 8, true, nopos);
- }
+ block_ = factory()->NewBlock(nullptr, 8, true, nopos);
Spread* spread = nullptr;
for (Expression* value : *node->values()) {
@@ -459,30 +414,29 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
auto result_done = factory()->NewProperty(
factory()->NewVariableProxy(result),
factory()->NewStringLiteral(ast_value_factory()->done_string(),
- RelocInfo::kNoPosition),
- RelocInfo::kNoPosition);
+ kNoSourcePosition),
+ kNoSourcePosition);
auto assign_undefined = factory()->NewAssignment(
Token::ASSIGN, factory()->NewVariableProxy(v),
- factory()->NewUndefinedLiteral(RelocInfo::kNoPosition),
- RelocInfo::kNoPosition);
+ factory()->NewUndefinedLiteral(kNoSourcePosition), kNoSourcePosition);
auto assign_value = factory()->NewAssignment(
Token::ASSIGN, factory()->NewVariableProxy(v),
factory()->NewProperty(
factory()->NewVariableProxy(result),
factory()->NewStringLiteral(ast_value_factory()->value_string(),
- RelocInfo::kNoPosition),
- RelocInfo::kNoPosition),
- RelocInfo::kNoPosition);
+ kNoSourcePosition),
+ kNoSourcePosition),
+ kNoSourcePosition);
auto unset_done = factory()->NewAssignment(
Token::ASSIGN, factory()->NewVariableProxy(done),
- factory()->NewBooleanLiteral(false, RelocInfo::kNoPosition),
- RelocInfo::kNoPosition);
+ factory()->NewBooleanLiteral(false, kNoSourcePosition),
+ kNoSourcePosition);
auto inner_else =
- factory()->NewBlock(nullptr, 2, true, RelocInfo::kNoPosition);
+ factory()->NewBlock(nullptr, 2, true, kNoSourcePosition);
inner_else->statements()->Add(
factory()->NewExpressionStatement(assign_value, nopos), zone());
inner_else->statements()->Add(
@@ -494,7 +448,7 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
inner_else, nopos);
auto next_block =
- factory()->NewBlock(nullptr, 3, true, RelocInfo::kNoPosition);
+ factory()->NewBlock(nullptr, 3, true, kNoSourcePosition);
next_block->statements()->Add(
factory()->NewExpressionStatement(
factory()->NewAssignment(
@@ -506,22 +460,21 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
factory()->NewExpressionStatement(
parser_->BuildIteratorNextResult(
factory()->NewVariableProxy(iterator), result,
- RelocInfo::kNoPosition),
- RelocInfo::kNoPosition),
+ kNoSourcePosition),
+ kNoSourcePosition),
zone());
next_block->statements()->Add(inner_if, zone());
if_not_done = factory()->NewIfStatement(
- factory()->NewUnaryOperation(Token::NOT,
- factory()->NewVariableProxy(done),
- RelocInfo::kNoPosition),
- next_block, factory()->NewEmptyStatement(RelocInfo::kNoPosition),
- RelocInfo::kNoPosition);
+ factory()->NewUnaryOperation(
+ Token::NOT, factory()->NewVariableProxy(done), kNoSourcePosition),
+ next_block, factory()->NewEmptyStatement(kNoSourcePosition),
+ kNoSourcePosition);
}
block_->statements()->Add(if_not_done, zone());
if (!(value->IsLiteral() && value->AsLiteral()->raw_value()->IsTheHole())) {
- if (FLAG_harmony_iterator_close) {
+ {
// completion = kAbruptCompletion;
Expression* proxy = factory()->NewVariableProxy(completion);
Expression* assignment = factory()->NewAssignment(
@@ -533,7 +486,7 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
RecurseIntoSubpattern(value, factory()->NewVariableProxy(v));
- if (FLAG_harmony_iterator_close) {
+ {
// completion = kNormalCompletion;
Expression* proxy = factory()->NewVariableProxy(completion);
Expression* assignment = factory()->NewAssignment(
@@ -552,11 +505,11 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
// let array = [];
// while (!done) {
+ // done = true; // If .next, .done or .value throws, don't close.
// result = IteratorNext(iterator);
- // if (result.done) {
- // done = true;
- // } else {
+ // if (!result.done) {
// %AppendElement(array, result.value);
+ // done = false;
// }
// }
@@ -568,15 +521,9 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
empty_exprs,
// Reuse pattern's literal index - it is unused since there is no
// actual literal allocated.
- node->literal_index(), RelocInfo::kNoPosition));
+ node->literal_index(), kNoSourcePosition));
}
- // result = IteratorNext(iterator);
- Statement* get_next = factory()->NewExpressionStatement(
- parser_->BuildIteratorNextResult(factory()->NewVariableProxy(iterator),
- result, nopos),
- nopos);
-
// done = true;
Statement* set_done = factory()->NewExpressionStatement(
factory()->NewAssignment(
@@ -584,6 +531,12 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
factory()->NewBooleanLiteral(true, nopos), nopos),
nopos);
+ // result = IteratorNext(iterator);
+ Statement* get_next = factory()->NewExpressionStatement(
+ parser_->BuildIteratorNextResult(factory()->NewVariableProxy(iterator),
+ result, nopos),
+ nopos);
+
// %AppendElement(array, result.value);
Statement* append_element;
{
@@ -600,29 +553,44 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
nopos);
}
- // if (result.done) { #set_done } else { #append_element }
- Statement* set_done_or_append;
+ // done = false;
+ Statement* unset_done = factory()->NewExpressionStatement(
+ factory()->NewAssignment(
+ Token::ASSIGN, factory()->NewVariableProxy(done),
+ factory()->NewBooleanLiteral(false, nopos), nopos),
+ nopos);
+
+ // if (!result.done) { #append_element; #unset_done }
+ Statement* maybe_append_and_unset_done;
{
Expression* result_done =
factory()->NewProperty(factory()->NewVariableProxy(result),
factory()->NewStringLiteral(
ast_value_factory()->done_string(), nopos),
nopos);
- set_done_or_append = factory()->NewIfStatement(result_done, set_done,
- append_element, nopos);
+
+ Block* then = factory()->NewBlock(nullptr, 2, true, nopos);
+ then->statements()->Add(append_element, zone());
+ then->statements()->Add(unset_done, zone());
+
+ maybe_append_and_unset_done = factory()->NewIfStatement(
+ factory()->NewUnaryOperation(Token::NOT, result_done, nopos), then,
+ factory()->NewEmptyStatement(nopos), nopos);
}
// while (!done) {
+ // #set_done;
// #get_next;
- // #set_done_or_append;
+ // #maybe_append_and_unset_done;
// }
WhileStatement* loop = factory()->NewWhileStatement(nullptr, nopos);
{
Expression* condition = factory()->NewUnaryOperation(
Token::NOT, factory()->NewVariableProxy(done), nopos);
- Block* body = factory()->NewBlock(nullptr, 2, true, nopos);
+ Block* body = factory()->NewBlock(nullptr, 3, true, nopos);
+ body->statements()->Add(set_done, zone());
body->statements()->Add(get_next, zone());
- body->statements()->Add(set_done_or_append, zone());
+ body->statements()->Add(maybe_append_and_unset_done, zone());
loop->Initialize(condition, body);
}
@@ -631,13 +599,11 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
factory()->NewVariableProxy(array));
}
- if (FLAG_harmony_iterator_close) {
- Expression* closing_condition = factory()->NewUnaryOperation(
- Token::NOT, factory()->NewVariableProxy(done), nopos);
- parser_->FinalizeIteratorUse(completion, closing_condition, iterator,
- block_, target);
- block_ = target;
- }
+ Expression* closing_condition = factory()->NewUnaryOperation(
+ Token::NOT, factory()->NewVariableProxy(done), nopos);
+ parser_->FinalizeIteratorUse(completion, closing_condition, iterator, block_,
+ target);
+ block_ = target;
}
@@ -661,19 +627,14 @@ void Parser::PatternRewriter::VisitAssignment(Assignment* node) {
if (IsInitializerContext()) {
Expression* is_undefined = factory()->NewCompareOperation(
Token::EQ_STRICT, factory()->NewVariableProxy(temp),
- factory()->NewUndefinedLiteral(RelocInfo::kNoPosition),
- RelocInfo::kNoPosition);
+ factory()->NewUndefinedLiteral(kNoSourcePosition), kNoSourcePosition);
value = factory()->NewConditional(is_undefined, initializer,
factory()->NewVariableProxy(temp),
- RelocInfo::kNoPosition);
+ kNoSourcePosition);
}
- if (IsBindingContext() &&
- descriptor_->declaration_kind == DeclarationDescriptor::PARAMETER &&
- scope()->is_arrow_scope()) {
- RewriteParameterInitializerScope(parser_->stack_limit(), initializer,
- scope()->outer_scope(), scope());
- }
+ // Initializer may have been parsed in the wrong scope.
+ RewriteParameterScopes(initializer);
PatternContext old_context = SetAssignmentContextIfNeeded(initializer);
RecurseIntoSubpattern(node->target(), value);
@@ -691,15 +652,12 @@ void Parser::PatternRewriter::VisitProperty(v8::internal::Property* node) {
factory()->NewAssignment(Token::ASSIGN, node, value, node->position());
block_->statements()->Add(
- factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition),
- zone());
+ factory()->NewExpressionStatement(assignment, kNoSourcePosition), zone());
}
// =============== UNREACHABLE =============================
-void Parser::PatternRewriter::Visit(AstNode* node) { UNREACHABLE(); }
-
#define NOT_A_PATTERN(Node) \
void Parser::PatternRewriter::Visit##Node(v8::internal::Node*) { \
UNREACHABLE(); \
@@ -722,7 +680,6 @@ NOT_A_PATTERN(DoExpression)
NOT_A_PATTERN(DoWhileStatement)
NOT_A_PATTERN(EmptyStatement)
NOT_A_PATTERN(EmptyParentheses)
-NOT_A_PATTERN(ExportDeclaration)
NOT_A_PATTERN(ExpressionStatement)
NOT_A_PATTERN(ForInStatement)
NOT_A_PATTERN(ForOfStatement)
@@ -730,7 +687,6 @@ NOT_A_PATTERN(ForStatement)
NOT_A_PATTERN(FunctionDeclaration)
NOT_A_PATTERN(FunctionLiteral)
NOT_A_PATTERN(IfStatement)
-NOT_A_PATTERN(ImportDeclaration)
NOT_A_PATTERN(Literal)
NOT_A_PATTERN(NativeFunctionLiteral)
NOT_A_PATTERN(RegExpLiteral)
diff --git a/deps/v8/src/parsing/preparse-data.cc b/deps/v8/src/parsing/preparse-data.cc
index d02cd63d66..e1ef74c33c 100644
--- a/deps/v8/src/parsing/preparse-data.cc
+++ b/deps/v8/src/parsing/preparse-data.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/parsing/preparse-data.h"
+#include "src/base/hashmap.h"
#include "src/base/logging.h"
#include "src/globals.h"
-#include "src/hashmap.h"
#include "src/parsing/parser.h"
-#include "src/parsing/preparse-data.h"
#include "src/parsing/preparse-data-format.h"
namespace v8 {
diff --git a/deps/v8/src/parsing/preparse-data.h b/deps/v8/src/parsing/preparse-data.h
index 1c99450810..ddc4d03321 100644
--- a/deps/v8/src/parsing/preparse-data.h
+++ b/deps/v8/src/parsing/preparse-data.h
@@ -6,8 +6,8 @@
#define V8_PARSING_PREPARSE_DATA_H_
#include "src/allocation.h"
+#include "src/base/hashmap.h"
#include "src/collector.h"
-#include "src/hashmap.h"
#include "src/messages.h"
#include "src/parsing/preparse-data-format.h"
diff --git a/deps/v8/src/parsing/preparser.cc b/deps/v8/src/parsing/preparser.cc
index da1c35bcc0..b1bbbf60c8 100644
--- a/deps/v8/src/parsing/preparser.cc
+++ b/deps/v8/src/parsing/preparser.cc
@@ -9,11 +9,10 @@
#include "src/conversions-inl.h"
#include "src/conversions.h"
#include "src/globals.h"
-#include "src/hashmap.h"
#include "src/list.h"
#include "src/parsing/parser-base.h"
-#include "src/parsing/preparse-data.h"
#include "src/parsing/preparse-data-format.h"
+#include "src/parsing/preparse-data.h"
#include "src/parsing/preparser.h"
#include "src/unicode.h"
#include "src/utils.h"
@@ -21,103 +20,102 @@
namespace v8 {
namespace internal {
-void PreParserTraits::ReportMessageAt(Scanner::Location location,
- MessageTemplate::Template message,
- const char* arg,
- ParseErrorType error_type) {
- ReportMessageAt(location.beg_pos, location.end_pos, message, arg, error_type);
-}
+// ----------------------------------------------------------------------------
+// The CHECK_OK macro is a convenient macro to enforce error
+// handling for functions that may fail (by returning !*ok).
+//
+// CAUTION: This macro appends extra statements after a call,
+// thus it must never be used where only a single statement
+// is correct (e.g. an if statement branch w/o braces)!
+#define CHECK_OK ok); \
+ if (!*ok) return Statement::Default(); \
+ ((void)0
+#define DUMMY ) // to make indentation work
+#undef DUMMY
-void PreParserTraits::ReportMessageAt(int start_pos, int end_pos,
- MessageTemplate::Template message,
- const char* arg,
- ParseErrorType error_type) {
- pre_parser_->log_->LogMessage(start_pos, end_pos, message, arg, error_type);
-}
-
+// Used in functions where the return type is not ExpressionT.
+#define CHECK_OK_CUSTOM(x) ok); \
+ if (!*ok) return this->x(); \
+ ((void)0
+#define DUMMY ) // to make indentation work
+#undef DUMMY
-PreParserIdentifier PreParserTraits::GetSymbol(Scanner* scanner) {
- if (scanner->current_token() == Token::FUTURE_RESERVED_WORD) {
- return PreParserIdentifier::FutureReserved();
- } else if (scanner->current_token() ==
- Token::FUTURE_STRICT_RESERVED_WORD) {
- return PreParserIdentifier::FutureStrictReserved();
- } else if (scanner->current_token() == Token::LET) {
- return PreParserIdentifier::Let();
- } else if (scanner->current_token() == Token::STATIC) {
- return PreParserIdentifier::Static();
- } else if (scanner->current_token() == Token::YIELD) {
- return PreParserIdentifier::Yield();
- }
- if (scanner->UnescapedLiteralMatches("eval", 4)) {
- return PreParserIdentifier::Eval();
- }
- if (scanner->UnescapedLiteralMatches("arguments", 9)) {
- return PreParserIdentifier::Arguments();
- }
- if (scanner->UnescapedLiteralMatches("undefined", 9)) {
- return PreParserIdentifier::Undefined();
- }
- if (scanner->LiteralMatches("prototype", 9)) {
- return PreParserIdentifier::Prototype();
- }
- if (scanner->LiteralMatches("constructor", 11)) {
- return PreParserIdentifier::Constructor();
- }
- return PreParserIdentifier::Default();
+void ParserBaseTraits<PreParser>::ReportMessageAt(
+ Scanner::Location source_location, MessageTemplate::Template message,
+ const char* arg, ParseErrorType error_type) {
+ delegate()->log_->LogMessage(source_location.beg_pos, source_location.end_pos,
+ message, arg, error_type);
}
-
-PreParserIdentifier PreParserTraits::GetNumberAsSymbol(Scanner* scanner) {
- return PreParserIdentifier::Default();
+void ParserBaseTraits<PreParser>::ReportMessageAt(
+ Scanner::Location source_location, MessageTemplate::Template message,
+ const AstRawString* arg, ParseErrorType error_type) {
+ UNREACHABLE();
}
+PreParserIdentifier ParserBaseTraits<PreParser>::GetSymbol(
+ Scanner* scanner) const {
+ switch (scanner->current_token()) {
+ case Token::ENUM:
+ return PreParserIdentifier::Enum();
+ case Token::AWAIT:
+ return PreParserIdentifier::Await();
+ case Token::FUTURE_STRICT_RESERVED_WORD:
+ return PreParserIdentifier::FutureStrictReserved();
+ case Token::LET:
+ return PreParserIdentifier::Let();
+ case Token::STATIC:
+ return PreParserIdentifier::Static();
+ case Token::YIELD:
+ return PreParserIdentifier::Yield();
+ case Token::ASYNC:
+ return PreParserIdentifier::Async();
+ default:
+ if (scanner->UnescapedLiteralMatches("eval", 4))
+ return PreParserIdentifier::Eval();
+ if (scanner->UnescapedLiteralMatches("arguments", 9))
+ return PreParserIdentifier::Arguments();
+ if (scanner->UnescapedLiteralMatches("undefined", 9))
+ return PreParserIdentifier::Undefined();
+ if (scanner->LiteralMatches("prototype", 9))
+ return PreParserIdentifier::Prototype();
+ if (scanner->LiteralMatches("constructor", 11))
+ return PreParserIdentifier::Constructor();
+ return PreParserIdentifier::Default();
+ }
+}
-PreParserExpression PreParserTraits::ExpressionFromString(
- int pos, Scanner* scanner, PreParserFactory* factory) {
+PreParserExpression ParserBaseTraits<PreParser>::ExpressionFromString(
+ int pos, Scanner* scanner, PreParserFactory* factory) const {
if (scanner->UnescapedLiteralMatches("use strict", 10)) {
return PreParserExpression::UseStrictStringLiteral();
}
return PreParserExpression::StringLiteral();
}
-
-PreParserExpression PreParserTraits::ParseV8Intrinsic(bool* ok) {
- return pre_parser_->ParseV8Intrinsic(ok);
-}
-
-
-PreParserExpression PreParserTraits::ParseFunctionLiteral(
- PreParserIdentifier name, Scanner::Location function_name_location,
- FunctionNameValidity function_name_validity, FunctionKind kind,
- int function_token_position, FunctionLiteral::FunctionType type,
- LanguageMode language_mode, bool* ok) {
- return pre_parser_->ParseFunctionLiteral(
- name, function_name_location, function_name_validity, kind,
- function_token_position, type, language_mode, ok);
-}
-
-
PreParser::PreParseResult PreParser::PreParseLazyFunction(
LanguageMode language_mode, FunctionKind kind, bool has_simple_parameters,
- ParserRecorder* log, Scanner::BookmarkScope* bookmark) {
+ bool parsing_module, ParserRecorder* log, Scanner::BookmarkScope* bookmark,
+ int* use_counts) {
+ parsing_module_ = parsing_module;
log_ = log;
+ use_counts_ = use_counts;
// Lazy functions always have trivial outer scopes (no with/catch scopes).
- Scope* top_scope = NewScope(scope_, SCRIPT_SCOPE);
- PreParserFactory top_factory(NULL);
- FunctionState top_state(&function_state_, &scope_, top_scope, kNormalFunction,
- &top_factory);
- scope_->SetLanguageMode(language_mode);
- Scope* function_scope = NewScope(scope_, FUNCTION_SCOPE, kind);
+ DCHECK_NULL(scope_state_);
+ DeclarationScope* top_scope = NewScriptScope();
+ FunctionState top_state(&function_state_, &scope_state_, top_scope,
+ kNormalFunction);
+ scope()->SetLanguageMode(language_mode);
+ DeclarationScope* function_scope = NewFunctionScope(kind);
if (!has_simple_parameters) function_scope->SetHasNonSimpleParameters();
- PreParserFactory function_factory(NULL);
- FunctionState function_state(&function_state_, &scope_, function_scope, kind,
- &function_factory);
+ FunctionState function_state(&function_state_, &scope_state_, function_scope,
+ kind);
DCHECK_EQ(Token::LBRACE, scanner()->current_token());
bool ok = true;
int start_position = peek_position();
ParseLazyFunctionLiteralBody(&ok, bookmark);
+ use_counts_ = nullptr;
if (bookmark && bookmark->HasBeenReset()) {
// Do nothing, as we've just aborted scanning this function.
} else if (stack_overflow()) {
@@ -126,23 +124,16 @@ PreParser::PreParseResult PreParser::PreParseLazyFunction(
ReportUnexpectedToken(scanner()->current_token());
} else {
DCHECK_EQ(Token::RBRACE, scanner()->peek());
- if (is_strict(scope_->language_mode())) {
+ if (is_strict(scope()->language_mode())) {
int end_pos = scanner()->location().end_pos;
CheckStrictOctalLiteral(start_position, end_pos, &ok);
+ CheckDecimalLiteralWithLeadingZero(use_counts, start_position, end_pos);
if (!ok) return kPreParseSuccess;
}
}
return kPreParseSuccess;
}
-PreParserExpression PreParserTraits::ParseClassLiteral(
- Type::ExpressionClassifier* classifier, PreParserIdentifier name,
- Scanner::Location class_name_location, bool name_is_strict_reserved,
- int pos, bool* ok) {
- return pre_parser_->ParseClassLiteral(classifier, name, class_name_location,
- name_is_strict_reserved, pos, ok);
-}
-
// Preparsing checks a JavaScript program and emits preparse-data that helps
// a later parsing to be faster.
@@ -178,19 +169,23 @@ PreParser::Statement PreParser::ParseStatementListItem(bool* ok) {
switch (peek()) {
case Token::FUNCTION:
- return ParseFunctionDeclaration(ok);
+ return ParseHoistableDeclaration(ok);
case Token::CLASS:
return ParseClassDeclaration(ok);
case Token::CONST:
- if (allow_const()) {
- return ParseVariableStatement(kStatementListItem, ok);
- }
- break;
+ return ParseVariableStatement(kStatementListItem, ok);
case Token::LET:
if (IsNextLetKeyword()) {
return ParseVariableStatement(kStatementListItem, ok);
}
break;
+ case Token::ASYNC:
+ if (allow_harmony_async_await() && PeekAhead() == Token::FUNCTION &&
+ !scanner()->HasAnyLineTerminatorAfterNext()) {
+ Consume(Token::ASYNC);
+ return ParseAsyncFunctionDeclaration(ok);
+ }
+ /* falls through */
default:
break;
}
@@ -215,26 +210,25 @@ void PreParser::ParseStatementList(int end_token, bool* ok,
}
bool starts_with_identifier = peek() == Token::IDENTIFIER;
Scanner::Location token_loc = scanner()->peek_location();
- Statement statement = ParseStatementListItem(ok);
- if (!*ok) return;
+ Statement statement = ParseStatementListItem(CHECK_OK_CUSTOM(Void));
if (directive_prologue) {
bool use_strict_found = statement.IsUseStrictLiteral();
if (use_strict_found) {
- scope_->SetLanguageMode(
- static_cast<LanguageMode>(scope_->language_mode() | STRICT));
+ scope()->SetLanguageMode(
+ static_cast<LanguageMode>(scope()->language_mode() | STRICT));
} else if (!statement.IsStringLiteral()) {
directive_prologue = false;
}
- if (use_strict_found && !scope_->HasSimpleParameters()) {
+ if (use_strict_found && !scope()->HasSimpleParameters()) {
// TC39 deemed "use strict" directives to be an error when occurring
// in the body of a function with non-simple parameter list, on
// 29/7/2015. https://goo.gl/ueA7Ln
- PreParserTraits::ReportMessageAt(
- token_loc, MessageTemplate::kIllegalLanguageModeDirective,
- "use strict");
+ ReportMessageAt(token_loc,
+ MessageTemplate::kIllegalLanguageModeDirective,
+ "use strict");
*ok = false;
return;
}
@@ -257,12 +251,6 @@ void PreParser::ParseStatementList(int end_token, bool* ok,
}
-#define CHECK_OK ok); \
- if (!*ok) return Statement::Default(); \
- ((void)0
-#define DUMMY ) // to make indentation work
-#undef DUMMY
-
PreParser::Statement PreParser::ParseStatement(
AllowLabelledFunctionStatement allow_function, bool* ok) {
// Statement ::
@@ -281,7 +269,8 @@ PreParser::Statement PreParser::ParseScopedStatement(bool legacy, bool* ok) {
(legacy && allow_harmony_restrictive_declarations())) {
return ParseSubStatement(kDisallowLabelledFunctionStatement, ok);
} else {
- return ParseFunctionDeclaration(CHECK_OK);
+ BlockState block_state(&scope_state_);
+ return ParseFunctionDeclaration(ok);
}
}
@@ -377,37 +366,57 @@ PreParser::Statement PreParser::ParseSubStatement(
}
}
+PreParser::Statement PreParser::ParseHoistableDeclaration(
+ int pos, ParseFunctionFlags flags, bool* ok) {
+ const bool is_generator = flags & ParseFunctionFlags::kIsGenerator;
+ const bool is_async = flags & ParseFunctionFlags::kIsAsync;
+ DCHECK(!is_generator || !is_async);
-PreParser::Statement PreParser::ParseFunctionDeclaration(bool* ok) {
- // FunctionDeclaration ::
- // 'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
- // GeneratorDeclaration ::
- // 'function' '*' Identifier '(' FormalParameterListopt ')'
- // '{' FunctionBody '}'
- Expect(Token::FUNCTION, CHECK_OK);
- int pos = position();
- bool is_generator = Check(Token::MUL);
bool is_strict_reserved = false;
Identifier name = ParseIdentifierOrStrictReservedWord(
&is_strict_reserved, CHECK_OK);
+
ParseFunctionLiteral(name, scanner()->location(),
is_strict_reserved ? kFunctionNameIsStrictReserved
: kFunctionNameValidityUnknown,
is_generator ? FunctionKind::kGeneratorFunction
- : FunctionKind::kNormalFunction,
+ : is_async ? FunctionKind::kAsyncFunction
+ : FunctionKind::kNormalFunction,
pos, FunctionLiteral::kDeclaration, language_mode(),
CHECK_OK);
return Statement::FunctionDeclaration();
}
+PreParser::Statement PreParser::ParseAsyncFunctionDeclaration(bool* ok) {
+ // AsyncFunctionDeclaration ::
+ // async [no LineTerminator here] function BindingIdentifier[Await]
+ // ( FormalParameters[Await] ) { AsyncFunctionBody }
+ DCHECK_EQ(scanner()->current_token(), Token::ASYNC);
+ int pos = position();
+ Expect(Token::FUNCTION, CHECK_OK);
+ ParseFunctionFlags flags = ParseFunctionFlags::kIsAsync;
+ return ParseHoistableDeclaration(pos, flags, ok);
+}
+
+PreParser::Statement PreParser::ParseHoistableDeclaration(bool* ok) {
+ // FunctionDeclaration ::
+ // 'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
+ // GeneratorDeclaration ::
+ // 'function' '*' Identifier '(' FormalParameterListopt ')'
+ // '{' FunctionBody '}'
+
+ Expect(Token::FUNCTION, CHECK_OK);
+ int pos = position();
+ ParseFunctionFlags flags = ParseFunctionFlags::kIsNormal;
+ if (Check(Token::MUL)) {
+ flags |= ParseFunctionFlags::kIsGenerator;
+ }
+ return ParseHoistableDeclaration(pos, flags, ok);
+}
+
PreParser::Statement PreParser::ParseClassDeclaration(bool* ok) {
Expect(Token::CLASS, CHECK_OK);
- if (!allow_harmony_sloppy() && is_sloppy(language_mode())) {
- ReportMessage(MessageTemplate::kSloppyLexical);
- *ok = false;
- return Statement::Default();
- }
int pos = position();
bool is_strict_reserved = false;
@@ -425,8 +434,11 @@ PreParser::Statement PreParser::ParseBlock(bool* ok) {
Expect(Token::LBRACE, CHECK_OK);
Statement final = Statement::Default();
- while (peek() != Token::RBRACE) {
- final = ParseStatementListItem(CHECK_OK);
+ {
+ BlockState block_state(&scope_state_);
+ while (peek() != Token::RBRACE) {
+ final = ParseStatementListItem(CHECK_OK);
+ }
}
Expect(Token::RBRACE, ok);
return final;
@@ -473,7 +485,7 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
bool is_pattern = false;
if (peek() == Token::VAR) {
Consume(Token::VAR);
- } else if (peek() == Token::CONST && allow_const()) {
+ } else if (peek() == Token::CONST) {
// TODO(ES6): The ES6 Draft Rev4 section 12.2.2 reads:
//
// ConstDeclaration : const ConstBinding (',' ConstBinding)* ';'
@@ -485,12 +497,10 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
// existing pages. Therefore we keep allowing const with the old
// non-harmony semantics in sloppy mode.
Consume(Token::CONST);
- if (is_strict(language_mode()) || allow_harmony_sloppy()) {
- DCHECK(var_context != kStatement);
- require_initializer = true;
- lexical = true;
- }
- } else if (peek() == Token::LET && allow_let()) {
+ DCHECK(var_context != kStatement);
+ require_initializer = true;
+ lexical = true;
+ } else if (peek() == Token::LET) {
Consume(Token::LET);
DCHECK(var_context != kStatement);
lexical = true;
@@ -536,7 +546,7 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
}
} else if ((require_initializer || is_pattern) &&
(var_context != kForStatement || !PeekInOrOf())) {
- PreParserTraits::ReportMessageAt(
+ ReportMessageAt(
Scanner::Location(decl_pos, scanner()->location().end_pos),
MessageTemplate::kDeclarationMissingInitializer,
is_pattern ? "destructuring" : "const");
@@ -556,6 +566,22 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
return Statement::Default();
}
+PreParser::Statement PreParser::ParseFunctionDeclaration(bool* ok) {
+ Consume(Token::FUNCTION);
+ int pos = position();
+ ParseFunctionFlags flags = ParseFunctionFlags::kIsNormal;
+ if (Check(Token::MUL)) {
+ flags |= ParseFunctionFlags::kIsGenerator;
+ if (allow_harmony_restrictive_declarations()) {
+ ReportMessageAt(scanner()->location(),
+ MessageTemplate::kGeneratorInLegacyContext);
+ *ok = false;
+ return Statement::Default();
+ }
+ }
+ return ParseHoistableDeclaration(pos, flags, ok);
+}
+
PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(
AllowLabelledFunctionStatement allow_function, bool* ok) {
// ExpressionStatement | LabelledStatement ::
@@ -586,7 +612,8 @@ PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(
if (starts_with_identifier && expr.IsIdentifier() && peek() == Token::COLON) {
// Expression is a single identifier, and not, e.g., a parenthesized
// identifier.
- DCHECK(!expr.AsIdentifier().IsFutureReserved());
+ DCHECK(!expr.AsIdentifier().IsEnum());
+ DCHECK(!parsing_module_ || !expr.AsIdentifier().IsAwait());
DCHECK(is_sloppy(language_mode()) ||
!IsFutureStrictReserved(expr.AsIdentifier()));
Consume(Token::COLON);
@@ -606,14 +633,6 @@ PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(
// accept "native function" in the preparser.
}
// Parsed expression statement.
- // Detect attempts at 'let' declarations in sloppy mode.
- if (!allow_harmony_sloppy_let() && peek() == Token::IDENTIFIER &&
- is_sloppy(language_mode()) && expr.IsIdentifier() &&
- expr.AsIdentifier().IsLet()) {
- ReportMessage(MessageTemplate::kSloppyLexical, NULL);
- *ok = false;
- return Statement::Default();
- }
ExpectSemicolon(CHECK_OK);
return Statement::ExpressionStatement(expr);
}
@@ -684,7 +703,6 @@ PreParser::Statement PreParser::ParseReturnStatement(bool* ok) {
// reporting any errors on it, because of the way errors are
// reported (underlining).
Expect(Token::RETURN, CHECK_OK);
- function_state_->set_return_location(scanner()->location());
// An ECMAScript program is considered syntactically incorrect if it
// contains a return statement that is not within the body of a
@@ -696,6 +714,16 @@ PreParser::Statement PreParser::ParseReturnStatement(bool* ok) {
tok != Token::SEMICOLON &&
tok != Token::RBRACE &&
tok != Token::EOS) {
+ // Because of the return code rewriting that happens in case of a subclass
+ // constructor we don't want to accept tail calls, therefore we don't set
+ // ReturnExprScope to kInsideValidReturnStatement here.
+ ReturnExprContext return_expr_context =
+ IsSubclassConstructor(function_state_->kind())
+ ? function_state_->return_expr_context()
+ : ReturnExprContext::kInsideValidReturnStatement;
+
+ ReturnExprScope maybe_allow_tail_calls(function_state_,
+ return_expr_context);
ParseExpression(true, CHECK_OK);
}
ExpectSemicolon(CHECK_OK);
@@ -716,8 +744,8 @@ PreParser::Statement PreParser::ParseWithStatement(bool* ok) {
ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- Scope* with_scope = NewScope(scope_, WITH_SCOPE);
- BlockState block_state(&scope_, with_scope);
+ Scope* with_scope = NewScope(WITH_SCOPE);
+ BlockState block_state(&scope_state_, with_scope);
ParseScopedStatement(true, CHECK_OK);
return Statement::Default();
}
@@ -732,23 +760,26 @@ PreParser::Statement PreParser::ParseSwitchStatement(bool* ok) {
ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- Expect(Token::LBRACE, CHECK_OK);
- Token::Value token = peek();
- while (token != Token::RBRACE) {
- if (token == Token::CASE) {
- Expect(Token::CASE, CHECK_OK);
- ParseExpression(true, CHECK_OK);
- } else {
- Expect(Token::DEFAULT, CHECK_OK);
- }
- Expect(Token::COLON, CHECK_OK);
- token = peek();
- Statement statement = Statement::Jump();
- while (token != Token::CASE &&
- token != Token::DEFAULT &&
- token != Token::RBRACE) {
- statement = ParseStatementListItem(CHECK_OK);
+ {
+ BlockState cases_block_state(&scope_state_);
+ Expect(Token::LBRACE, CHECK_OK);
+ Token::Value token = peek();
+ while (token != Token::RBRACE) {
+ if (token == Token::CASE) {
+ Expect(Token::CASE, CHECK_OK);
+ ParseExpression(true, CHECK_OK);
+ } else {
+ Expect(Token::DEFAULT, CHECK_OK);
+ }
+ Expect(Token::COLON, CHECK_OK);
token = peek();
+ Statement statement = Statement::Jump();
+ while (token != Token::CASE &&
+ token != Token::DEFAULT &&
+ token != Token::RBRACE) {
+ statement = ParseStatementListItem(CHECK_OK);
+ token = peek();
+ }
}
}
Expect(Token::RBRACE, ok);
@@ -788,12 +819,15 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
// ForStatement ::
// 'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
+ // Create an in-between scope for let-bound iteration variables.
+ bool has_lexical = false;
+
+ BlockState block_state(&scope_state_);
Expect(Token::FOR, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
- bool is_let_identifier_expression = false;
if (peek() != Token::SEMICOLON) {
ForEachStatement::VisitMode mode;
- if (peek() == Token::VAR || (peek() == Token::CONST && allow_const()) ||
+ if (peek() == Token::VAR || peek() == Token::CONST ||
(peek() == Token::LET && IsNextLetKeyword())) {
int decl_count;
bool is_lexical;
@@ -803,21 +837,27 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
ParseVariableDeclarations(kForStatement, &decl_count, &is_lexical,
&is_binding_pattern, &first_initializer_loc,
&bindings_loc, CHECK_OK);
+ if (is_lexical) has_lexical = true;
if (CheckInOrOf(&mode, ok)) {
if (!*ok) return Statement::Default();
if (decl_count != 1) {
- PreParserTraits::ReportMessageAt(
- bindings_loc, MessageTemplate::kForInOfLoopMultiBindings,
- ForEachStatement::VisitModeString(mode));
+ ReportMessageAt(bindings_loc,
+ MessageTemplate::kForInOfLoopMultiBindings,
+ ForEachStatement::VisitModeString(mode));
*ok = false;
return Statement::Default();
}
if (first_initializer_loc.IsValid() &&
(is_strict(language_mode()) || mode == ForEachStatement::ITERATE ||
- is_lexical || is_binding_pattern)) {
- PreParserTraits::ReportMessageAt(
- first_initializer_loc, MessageTemplate::kForInOfLoopInitializer,
- ForEachStatement::VisitModeString(mode));
+ is_lexical || is_binding_pattern || allow_harmony_for_in())) {
+ // Only increment the use count if we would have let this through
+ // without the flag.
+ if (use_counts_ != nullptr && allow_harmony_for_in()) {
+ ++use_counts_[v8::Isolate::kForInInitializer];
+ }
+ ReportMessageAt(first_initializer_loc,
+ MessageTemplate::kForInOfLoopInitializer,
+ ForEachStatement::VisitModeString(mode));
*ok = false;
return Statement::Default();
}
@@ -831,7 +871,11 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
}
Expect(Token::RPAREN, CHECK_OK);
- ParseScopedStatement(true, CHECK_OK);
+ {
+ ReturnExprScope no_tail_calls(function_state_,
+ ReturnExprContext::kInsideForInOfBody);
+ ParseScopedStatement(true, CHECK_OK);
+ }
return Statement::Default();
}
} else {
@@ -839,10 +883,7 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
ExpressionClassifier classifier(this);
Expression lhs = ParseExpression(false, &classifier, CHECK_OK);
int lhs_end_pos = scanner()->location().end_pos;
- is_let_identifier_expression =
- lhs.IsIdentifier() && lhs.AsIdentifier().IsLet();
- bool is_for_each = CheckInOrOf(&mode, ok);
- if (!*ok) return Statement::Default();
+ bool is_for_each = CheckInOrOf(&mode, CHECK_OK);
bool is_destructuring = is_for_each &&
(lhs->IsArrayLiteral() || lhs->IsObjectLiteral());
@@ -868,33 +909,39 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
}
Expect(Token::RPAREN, CHECK_OK);
- ParseScopedStatement(true, CHECK_OK);
+ {
+ BlockState block_state(&scope_state_);
+ ParseScopedStatement(true, CHECK_OK);
+ }
return Statement::Default();
}
}
}
// Parsed initializer at this point.
- // Detect attempts at 'let' declarations in sloppy mode.
- if (!allow_harmony_sloppy_let() && peek() == Token::IDENTIFIER &&
- is_sloppy(language_mode()) && is_let_identifier_expression) {
- ReportMessage(MessageTemplate::kSloppyLexical, NULL);
- *ok = false;
- return Statement::Default();
- }
Expect(Token::SEMICOLON, CHECK_OK);
- if (peek() != Token::SEMICOLON) {
- ParseExpression(true, CHECK_OK);
- }
- Expect(Token::SEMICOLON, CHECK_OK);
+ // If there are let bindings, then condition and the next statement of the
+ // for loop must be parsed in a new scope.
+ Scope* inner_scope = scope();
+ // TODO(verwaest): Allocate this through a ScopeState as well.
+ if (has_lexical) inner_scope = NewScopeWithParent(inner_scope, BLOCK_SCOPE);
- if (peek() != Token::RPAREN) {
- ParseExpression(true, CHECK_OK);
- }
- Expect(Token::RPAREN, CHECK_OK);
+ {
+ BlockState block_state(&scope_state_, inner_scope);
- ParseScopedStatement(true, ok);
+ if (peek() != Token::SEMICOLON) {
+ ParseExpression(true, CHECK_OK);
+ }
+ Expect(Token::SEMICOLON, CHECK_OK);
+
+ if (peek() != Token::RPAREN) {
+ ParseExpression(true, CHECK_OK);
+ }
+ Expect(Token::RPAREN, CHECK_OK);
+
+ ParseScopedStatement(true, ok);
+ }
return Statement::Default();
}
@@ -929,7 +976,11 @@ PreParser::Statement PreParser::ParseTryStatement(bool* ok) {
Expect(Token::TRY, CHECK_OK);
- ParseBlock(CHECK_OK);
+ {
+ ReturnExprScope no_tail_calls(function_state_,
+ ReturnExprContext::kInsideTryBlock);
+ ParseBlock(CHECK_OK);
+ }
Token::Value tok = peek();
if (tok != Token::CATCH && tok != Token::FINALLY) {
@@ -937,24 +988,41 @@ PreParser::Statement PreParser::ParseTryStatement(bool* ok) {
*ok = false;
return Statement::Default();
}
+ TailCallExpressionList tail_call_expressions_in_catch_block(zone());
+ bool catch_block_exists = false;
if (tok == Token::CATCH) {
Consume(Token::CATCH);
Expect(Token::LPAREN, CHECK_OK);
+ Scope* catch_scope = NewScope(CATCH_SCOPE);
ExpressionClassifier pattern_classifier(this);
ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
ValidateBindingPattern(&pattern_classifier, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
{
- // TODO(adamk): Make this CATCH_SCOPE
- Scope* with_scope = NewScope(scope_, WITH_SCOPE);
- BlockState block_state(&scope_, with_scope);
- ParseBlock(CHECK_OK);
+ CollectExpressionsInTailPositionToListScope
+ collect_tail_call_expressions_scope(
+ function_state_, &tail_call_expressions_in_catch_block);
+ BlockState block_state(&scope_state_, catch_scope);
+ {
+ BlockState block_state(&scope_state_);
+ ParseBlock(CHECK_OK);
+ }
}
+ catch_block_exists = true;
tok = peek();
}
if (tok == Token::FINALLY) {
Consume(Token::FINALLY);
ParseBlock(CHECK_OK);
+ if (FLAG_harmony_explicit_tailcalls && catch_block_exists &&
+ tail_call_expressions_in_catch_block.has_explicit_tail_calls()) {
+ // TODO(ishell): update chapter number.
+ // ES8 XX.YY.ZZ
+ ReportMessageAt(tail_call_expressions_in_catch_block.location(),
+ MessageTemplate::kUnexpectedTailCallInCatchBlock);
+ *ok = false;
+ return Statement::Default();
+ }
}
return Statement::Default();
}
@@ -973,6 +1041,7 @@ PreParser::Statement PreParser::ParseDebuggerStatement(bool* ok) {
}
+// Redefinition of CHECK_OK for parsing expressions.
#undef CHECK_OK
#define CHECK_OK ok); \
if (!*ok) return Expression::Default(); \
@@ -990,12 +1059,11 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
// '(' FormalParameterList? ')' '{' FunctionBody '}'
// Parse function body.
- bool outer_is_script_scope = scope_->is_script_scope();
- Scope* function_scope = NewScope(scope_, FUNCTION_SCOPE, kind);
+ bool outer_is_script_scope = scope()->is_script_scope();
+ DeclarationScope* function_scope = NewFunctionScope(kind);
function_scope->SetLanguageMode(language_mode);
- PreParserFactory factory(NULL);
- FunctionState function_state(&function_state_, &scope_, function_scope, kind,
- &factory);
+ FunctionState function_state(&function_state_, &scope_state_, function_scope,
+ kind);
DuplicateFinder duplicate_finder(scanner()->unicode_cache());
ExpressionClassifier formals_classifier(this, &duplicate_finder);
@@ -1012,9 +1080,8 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
// See Parser::ParseFunctionLiteral for more information about lazy parsing
// and lazy compilation.
- bool is_lazily_parsed =
- (outer_is_script_scope && allow_lazy() && !parenthesized_function_);
- parenthesized_function_ = false;
+ bool is_lazily_parsed = (outer_is_script_scope && allow_lazy() &&
+ !function_state_->this_function_is_parenthesized());
Expect(Token::LBRACE, CHECK_OK);
if (is_lazily_parsed) {
@@ -1039,11 +1106,39 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
if (is_strict(language_mode)) {
int end_position = scanner()->location().end_pos;
CheckStrictOctalLiteral(start_position, end_position, CHECK_OK);
+ CheckDecimalLiteralWithLeadingZero(use_counts_, start_position,
+ end_position);
}
return Expression::Default();
}
+PreParser::Expression PreParser::ParseAsyncFunctionExpression(bool* ok) {
+ // AsyncFunctionDeclaration ::
+ // async [no LineTerminator here] function ( FormalParameters[Await] )
+ // { AsyncFunctionBody }
+ //
+ // async [no LineTerminator here] function BindingIdentifier[Await]
+ // ( FormalParameters[Await] ) { AsyncFunctionBody }
+ int pos = position();
+ Expect(Token::FUNCTION, CHECK_OK);
+ bool is_strict_reserved = false;
+ Identifier name;
+ FunctionLiteral::FunctionType type = FunctionLiteral::kAnonymousExpression;
+
+ if (peek_any_identifier()) {
+ type = FunctionLiteral::kNamedExpression;
+ name = ParseIdentifierOrStrictReservedWord(FunctionKind::kAsyncFunction,
+ &is_strict_reserved, CHECK_OK);
+ }
+
+ ParseFunctionLiteral(name, scanner()->location(),
+ is_strict_reserved ? kFunctionNameIsStrictReserved
+ : kFunctionNameValidityUnknown,
+ FunctionKind::kAsyncFunction, pos, type, language_mode(),
+ CHECK_OK);
+ return Expression::Default();
+}
void PreParser::ParseLazyFunctionLiteralBody(bool* ok,
Scanner::BookmarkScope* bookmark) {
@@ -1055,10 +1150,12 @@ void PreParser::ParseLazyFunctionLiteralBody(bool* ok,
// Position right after terminal '}'.
DCHECK_EQ(Token::RBRACE, scanner()->peek());
int body_end = scanner()->peek_location().end_pos;
+ DeclarationScope* scope = this->scope()->AsDeclarationScope();
+ DCHECK(scope->is_function_scope());
log_->LogFunction(body_start, body_end,
function_state_->materialized_literal_count(),
function_state_->expected_property_count(), language_mode(),
- scope_->uses_super_property(), scope_->calls_eval());
+ scope->uses_super_property(), scope->calls_eval());
}
PreParserExpression PreParser::ParseClassLiteral(
@@ -1079,17 +1176,17 @@ PreParserExpression PreParser::ParseClassLiteral(
}
LanguageMode class_language_mode = language_mode();
- Scope* scope = NewScope(scope_, BLOCK_SCOPE);
- BlockState block_state(&scope_, scope);
- scope_->SetLanguageMode(
+ BlockState block_state(&scope_state_);
+ scope()->SetLanguageMode(
static_cast<LanguageMode>(class_language_mode | STRICT));
// TODO(marja): Make PreParser use scope names too.
- // scope_->SetScopeName(name);
+ // this->scope()->SetScopeName(name);
bool has_extends = Check(Token::EXTENDS);
if (has_extends) {
ExpressionClassifier extends_classifier(this);
ParseLeftHandSideExpression(&extends_classifier, CHECK_OK);
+ CheckNoTailCallExpressions(&extends_classifier, CHECK_OK);
ValidateExpression(&extends_classifier, CHECK_OK);
if (classifier != nullptr) {
classifier->Accumulate(&extends_classifier,
@@ -1104,14 +1201,13 @@ PreParserExpression PreParser::ParseClassLiteral(
while (peek() != Token::RBRACE) {
if (Check(Token::SEMICOLON)) continue;
const bool in_class = true;
- const bool is_static = false;
bool is_computed_name = false; // Classes do not care about computed
// property names here.
Identifier name;
ExpressionClassifier property_classifier(this);
- ParsePropertyDefinition(&checker, in_class, has_extends, is_static,
- &is_computed_name, &has_seen_constructor,
- &property_classifier, &name, CHECK_OK);
+ ParsePropertyDefinition(
+ &checker, in_class, has_extends, MethodKind::kNormal, &is_computed_name,
+ &has_seen_constructor, &property_classifier, &name, CHECK_OK);
ValidateExpression(&property_classifier, CHECK_OK);
if (classifier != nullptr) {
classifier->Accumulate(&property_classifier,
@@ -1151,18 +1247,26 @@ PreParserExpression PreParser::ParseDoExpression(bool* ok) {
// do '{' StatementList '}'
Expect(Token::DO, CHECK_OK);
Expect(Token::LBRACE, CHECK_OK);
- Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
- {
- BlockState block_state(&scope_, block_scope);
- while (peek() != Token::RBRACE) {
- ParseStatementListItem(CHECK_OK);
- }
- Expect(Token::RBRACE, CHECK_OK);
- return PreParserExpression::Default();
+ while (peek() != Token::RBRACE) {
+ ParseStatementListItem(CHECK_OK);
}
+ Expect(Token::RBRACE, CHECK_OK);
+ return PreParserExpression::Default();
+}
+
+void PreParser::ParseAsyncArrowSingleExpressionBody(
+ PreParserStatementList body, bool accept_IN,
+ ExpressionClassifier* classifier, int pos, bool* ok) {
+ scope()->ForceContextAllocation();
+
+ PreParserExpression return_value =
+ ParseAssignmentExpression(accept_IN, classifier, CHECK_OK_CUSTOM(Void));
+
+ body->Add(PreParserStatement::ExpressionStatement(return_value), zone());
}
#undef CHECK_OK
+#undef CHECK_OK_CUSTOM
} // namespace internal
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index f2f69517b2..3f268ee14a 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -7,7 +7,7 @@
#include "src/ast/scopes.h"
#include "src/bailout-reason.h"
-#include "src/hashmap.h"
+#include "src/base/hashmap.h"
#include "src/messages.h"
#include "src/parsing/expression-classifier.h"
#include "src/parsing/func-name-inferrer.h"
@@ -55,6 +55,15 @@ class PreParserIdentifier {
static PreParserIdentifier Constructor() {
return PreParserIdentifier(kConstructorIdentifier);
}
+ static PreParserIdentifier Enum() {
+ return PreParserIdentifier(kEnumIdentifier);
+ }
+ static PreParserIdentifier Await() {
+ return PreParserIdentifier(kAwaitIdentifier);
+ }
+ static PreParserIdentifier Async() {
+ return PreParserIdentifier(kAsyncIdentifier);
+ }
bool IsEval() const { return type_ == kEvalIdentifier; }
bool IsArguments() const { return type_ == kArgumentsIdentifier; }
bool IsEvalOrArguments() const { return IsEval() || IsArguments(); }
@@ -64,7 +73,8 @@ class PreParserIdentifier {
bool IsYield() const { return type_ == kYieldIdentifier; }
bool IsPrototype() const { return type_ == kPrototypeIdentifier; }
bool IsConstructor() const { return type_ == kConstructorIdentifier; }
- bool IsFutureReserved() const { return type_ == kFutureReservedIdentifier; }
+ bool IsEnum() const { return type_ == kEnumIdentifier; }
+ bool IsAwait() const { return type_ == kAwaitIdentifier; }
bool IsFutureStrictReserved() const {
return type_ == kFutureStrictReservedIdentifier ||
type_ == kLetIdentifier || type_ == kStaticIdentifier ||
@@ -91,7 +101,10 @@ class PreParserIdentifier {
kArgumentsIdentifier,
kUndefinedIdentifier,
kPrototypeIdentifier,
- kConstructorIdentifier
+ kConstructorIdentifier,
+ kEnumIdentifier,
+ kAwaitIdentifier,
+ kAsyncIdentifier
};
explicit PreParserIdentifier(Type type) : type_(type) {}
@@ -103,8 +116,10 @@ class PreParserIdentifier {
class PreParserExpression {
public:
+ PreParserExpression() : code_(TypeField::encode(kExpression)) {}
+
static PreParserExpression Default() {
- return PreParserExpression(TypeField::encode(kExpression));
+ return PreParserExpression();
}
static PreParserExpression Spread(PreParserExpression expression) {
@@ -166,6 +181,12 @@ class PreParserExpression {
ExpressionTypeField::encode(kCallExpression));
}
+ static PreParserExpression CallEval() {
+ return PreParserExpression(
+ TypeField::encode(kExpression) |
+ ExpressionTypeField::encode(kCallEvalExpression));
+ }
+
static PreParserExpression SuperCallReference() {
return PreParserExpression(
TypeField::encode(kExpression) |
@@ -227,7 +248,13 @@ class PreParserExpression {
bool IsCall() const {
return TypeField::decode(code_) == kExpression &&
- ExpressionTypeField::decode(code_) == kCallExpression;
+ (ExpressionTypeField::decode(code_) == kCallExpression ||
+ ExpressionTypeField::decode(code_) == kCallEvalExpression);
+ }
+
+ bool IsDirectEvalCall() const {
+ return TypeField::decode(code_) == kExpression &&
+ ExpressionTypeField::decode(code_) == kCallEvalExpression;
}
bool IsSuperCallReference() const {
@@ -266,7 +293,7 @@ class PreParserExpression {
void set_index(int index) {} // For YieldExpressions
void set_should_eager_compile() {}
- int position() const { return RelocInfo::kNoPosition; }
+ int position() const { return kNoSourcePosition; }
void set_function_token_position(int position) {}
private:
@@ -285,6 +312,7 @@ class PreParserExpression {
kThisPropertyExpression,
kPropertyExpression,
kCallExpression,
+ kCallEvalExpression,
kSuperCallReference,
kNoTemplateTagExpression,
kAssignment
@@ -475,8 +503,8 @@ class PreParserFactory {
return PreParserExpression::Assignment();
}
PreParserExpression NewYield(PreParserExpression generator_object,
- PreParserExpression expression,
- int pos) {
+ PreParserExpression expression, int pos,
+ Yield::OnException on_exception) {
return PreParserExpression::Default();
}
PreParserExpression NewConditional(PreParserExpression condition,
@@ -491,9 +519,13 @@ class PreParserFactory {
int pos) {
return PreParserExpression::Default();
}
- PreParserExpression NewCall(PreParserExpression expression,
- PreParserExpressionList arguments,
- int pos) {
+ PreParserExpression NewCall(
+ PreParserExpression expression, PreParserExpressionList arguments,
+ int pos, Call::PossiblyEval possibly_eval = Call::NOT_EVAL) {
+ if (possibly_eval == Call::IS_POSSIBLY_EVAL) {
+ DCHECK(expression.IsIdentifier() && expression.AsIdentifier().IsEval());
+ return PreParserExpression::CallEval();
+ }
return PreParserExpression::Call();
}
PreParserExpression NewCallNew(PreParserExpression expression,
@@ -542,7 +574,7 @@ class PreParserFactory {
struct PreParserFormalParameters : FormalParametersBase {
- explicit PreParserFormalParameters(Scope* scope)
+ explicit PreParserFormalParameters(DeclarationScope* scope)
: FormalParametersBase(scope) {}
int arity = 0;
@@ -553,13 +585,12 @@ struct PreParserFormalParameters : FormalParametersBase {
class PreParser;
-class PreParserTraits {
+template <>
+class ParserBaseTraits<PreParser> {
public:
- struct Type {
- // TODO(marja): To be removed. The Traits object should contain all the data
- // it needs.
- typedef PreParser* Parser;
+ typedef ParserBaseTraits<PreParser> PreParserTraits;
+ struct Type {
// PreParser doesn't need to store generator variables.
typedef void GeneratorVariable;
@@ -574,8 +605,8 @@ class PreParserTraits {
typedef PreParserExpression YieldExpression;
typedef PreParserExpression FunctionLiteral;
typedef PreParserExpression ClassLiteral;
- typedef PreParserExpression ObjectLiteralProperty;
typedef PreParserExpression Literal;
+ typedef PreParserExpression ObjectLiteralProperty;
typedef PreParserExpressionList ExpressionList;
typedef PreParserExpressionList PropertyList;
typedef PreParserIdentifier FormalParameter;
@@ -586,31 +617,36 @@ class PreParserTraits {
typedef PreParserFactory Factory;
};
- explicit PreParserTraits(PreParser* pre_parser) : pre_parser_(pre_parser) {}
+ // TODO(nikolaos): The traits methods should not need to call methods
+ // of the implementation object.
+ PreParser* delegate() { return reinterpret_cast<PreParser*>(this); }
+ const PreParser* delegate() const {
+ return reinterpret_cast<const PreParser*>(this);
+ }
// Helper functions for recursive descent.
- static bool IsEval(PreParserIdentifier identifier) {
+ bool IsEval(PreParserIdentifier identifier) const {
return identifier.IsEval();
}
- static bool IsArguments(PreParserIdentifier identifier) {
+ bool IsArguments(PreParserIdentifier identifier) const {
return identifier.IsArguments();
}
- static bool IsEvalOrArguments(PreParserIdentifier identifier) {
+ bool IsEvalOrArguments(PreParserIdentifier identifier) const {
return identifier.IsEvalOrArguments();
}
- static bool IsUndefined(PreParserIdentifier identifier) {
+ bool IsUndefined(PreParserIdentifier identifier) const {
return identifier.IsUndefined();
}
- static bool IsPrototype(PreParserIdentifier identifier) {
- return identifier.IsPrototype();
+ bool IsAwait(PreParserIdentifier identifier) const {
+ return identifier.IsAwait();
}
- static bool IsConstructor(PreParserIdentifier identifier) {
- return identifier.IsConstructor();
+ bool IsFutureStrictReserved(PreParserIdentifier identifier) const {
+ return identifier.IsFutureStrictReserved();
}
// Returns true if the expression is of type "this.foo".
@@ -626,8 +662,16 @@ class PreParserTraits {
return expression.AsIdentifier();
}
- static bool IsFutureStrictReserved(PreParserIdentifier identifier) {
- return identifier.IsFutureStrictReserved();
+ bool IsPrototype(PreParserIdentifier identifier) const {
+ return identifier.IsPrototype();
+ }
+
+ bool IsConstructor(PreParserIdentifier identifier) const {
+ return identifier.IsConstructor();
+ }
+
+ bool IsDirectEvalCall(PreParserExpression expression) const {
+ return expression.IsDirectEvalCall();
}
static bool IsBoilerplateProperty(PreParserExpression property) {
@@ -650,8 +694,7 @@ class PreParserTraits {
UNREACHABLE();
}
- static void PushPropertyName(FuncNameInferrer* fni,
- PreParserExpression expression) {
+ void PushPropertyName(FuncNameInferrer* fni, PreParserExpression expression) {
// PreParser should not use FuncNameInferrer.
UNREACHABLE();
}
@@ -674,8 +717,7 @@ class PreParserTraits {
bool ShortcutNumericLiteralBinaryExpression(PreParserExpression* x,
PreParserExpression y,
- Token::Value op,
- int pos,
+ Token::Value op, int pos,
PreParserFactory* factory) {
return false;
}
@@ -690,36 +732,39 @@ class PreParserTraits {
bool done) {
return PreParserExpression::Default();
}
+
PreParserExpression NewThrowReferenceError(MessageTemplate::Template message,
int pos) {
return PreParserExpression::Default();
}
+
PreParserExpression NewThrowSyntaxError(MessageTemplate::Template message,
- Handle<Object> arg, int pos) {
+ PreParserIdentifier arg, int pos) {
return PreParserExpression::Default();
}
+
PreParserExpression NewThrowTypeError(MessageTemplate::Template message,
- Handle<Object> arg, int pos) {
+ PreParserIdentifier arg, int pos) {
return PreParserExpression::Default();
}
// Reporting errors.
- void ReportMessageAt(Scanner::Location location,
+ void ReportMessageAt(Scanner::Location source_location,
MessageTemplate::Template message,
const char* arg = NULL,
ParseErrorType error_type = kSyntaxError);
- void ReportMessageAt(int start_pos, int end_pos,
+ void ReportMessageAt(Scanner::Location source_location,
MessageTemplate::Template message,
- const char* arg = NULL,
+ const AstRawString* arg,
ParseErrorType error_type = kSyntaxError);
+ // A dummy function, just useful as an argument to CHECK_OK_CUSTOM.
+ static void Void() {}
+
// "null" return type creators.
static PreParserIdentifier EmptyIdentifier() {
return PreParserIdentifier::Default();
}
- static PreParserIdentifier EmptyIdentifierString() {
- return PreParserIdentifier::Default();
- }
static PreParserExpression EmptyExpression() {
return PreParserExpression::Default();
}
@@ -732,131 +777,90 @@ class PreParserTraits {
static PreParserExpression EmptyFunctionLiteral() {
return PreParserExpression::Default();
}
+
static PreParserExpressionList NullExpressionList() {
return PreParserExpressionList();
}
+ PreParserIdentifier EmptyIdentifierString() const {
+ return PreParserIdentifier::Default();
+ }
// Odd-ball literal creators.
- static PreParserExpression GetLiteralTheHole(int position,
- PreParserFactory* factory) {
+ PreParserExpression GetLiteralTheHole(int position,
+ PreParserFactory* factory) const {
return PreParserExpression::Default();
}
// Producing data during the recursive descent.
- PreParserIdentifier GetSymbol(Scanner* scanner);
- PreParserIdentifier GetNumberAsSymbol(Scanner* scanner);
+ PreParserIdentifier GetSymbol(Scanner* scanner) const;
- static PreParserIdentifier GetNextSymbol(Scanner* scanner) {
+ PreParserIdentifier GetNextSymbol(Scanner* scanner) const {
return PreParserIdentifier::Default();
}
- static PreParserExpression ThisExpression(Scope* scope,
- PreParserFactory* factory,
- int pos) {
+ PreParserIdentifier GetNumberAsSymbol(Scanner* scanner) const {
+ return PreParserIdentifier::Default();
+ }
+
+ PreParserExpression ThisExpression(int pos = kNoSourcePosition) {
return PreParserExpression::This();
}
- static PreParserExpression SuperPropertyReference(Scope* scope,
- PreParserFactory* factory,
- int pos) {
+ PreParserExpression NewSuperPropertyReference(PreParserFactory* factory,
+ int pos) {
return PreParserExpression::Default();
}
- static PreParserExpression SuperCallReference(Scope* scope,
- PreParserFactory* factory,
- int pos) {
+ PreParserExpression NewSuperCallReference(PreParserFactory* factory,
+ int pos) {
return PreParserExpression::SuperCallReference();
}
- static PreParserExpression NewTargetExpression(Scope* scope,
- PreParserFactory* factory,
- int pos) {
+ PreParserExpression NewTargetExpression(int pos) {
return PreParserExpression::Default();
}
- static PreParserExpression FunctionSentExpression(Scope* scope,
- PreParserFactory* factory,
- int pos) {
+ PreParserExpression FunctionSentExpression(PreParserFactory* factory,
+ int pos) const {
return PreParserExpression::Default();
}
- static PreParserExpression ExpressionFromLiteral(
- Token::Value token, int pos, Scanner* scanner,
- PreParserFactory* factory) {
+ PreParserExpression ExpressionFromLiteral(Token::Value token, int pos,
+ Scanner* scanner,
+ PreParserFactory* factory) const {
return PreParserExpression::Default();
}
- static PreParserExpression ExpressionFromIdentifier(
- PreParserIdentifier name, int start_position, int end_position,
- Scope* scope, PreParserFactory* factory) {
+ PreParserExpression ExpressionFromIdentifier(PreParserIdentifier name,
+ int start_position,
+ int end_position,
+ InferName = InferName::kYes) {
return PreParserExpression::FromIdentifier(name);
}
- PreParserExpression ExpressionFromString(int pos,
- Scanner* scanner,
- PreParserFactory* factory = NULL);
+ PreParserExpression ExpressionFromString(int pos, Scanner* scanner,
+ PreParserFactory* factory) const;
PreParserExpression GetIterator(PreParserExpression iterable,
PreParserFactory* factory, int pos) {
return PreParserExpression::Default();
}
- static PreParserExpressionList NewExpressionList(int size, Zone* zone) {
+ PreParserExpressionList NewExpressionList(int size, Zone* zone) const {
return PreParserExpressionList();
}
- static PreParserStatementList NewStatementList(int size, Zone* zone) {
- return PreParserStatementList();
- }
-
- static PreParserExpressionList NewPropertyList(int size, Zone* zone) {
+ PreParserExpressionList NewPropertyList(int size, Zone* zone) const {
return PreParserExpressionList();
}
- static void AddParameterInitializationBlock(
- const PreParserFormalParameters& parameters,
- PreParserStatementList list, bool* ok) {}
-
- V8_INLINE void SkipLazyFunctionBody(int* materialized_literal_count,
- int* expected_property_count, bool* ok) {
- UNREACHABLE();
+ PreParserStatementList NewStatementList(int size, Zone* zone) const {
+ return PreParserStatementList();
}
- V8_INLINE PreParserStatementList ParseEagerFunctionBody(
- PreParserIdentifier function_name, int pos,
- const PreParserFormalParameters& parameters, FunctionKind kind,
- FunctionLiteral::FunctionType function_type, bool* ok);
-
- V8_INLINE void ParseArrowFunctionFormalParameterList(
- PreParserFormalParameters* parameters,
- PreParserExpression expression, const Scanner::Location& params_loc,
- Scanner::Location* duplicate_loc, bool* ok);
-
- void ReindexLiterals(const PreParserFormalParameters& paramaters) {}
-
- struct TemplateLiteralState {};
-
- TemplateLiteralState OpenTemplateLiteral(int pos) {
- return TemplateLiteralState();
- }
- void AddTemplateSpan(TemplateLiteralState*, bool) {}
- void AddTemplateExpression(TemplateLiteralState*, PreParserExpression) {}
- PreParserExpression CloseTemplateLiteral(TemplateLiteralState*, int,
- PreParserExpression tag) {
- if (IsTaggedTemplate(tag)) {
- // Emulate generation of array literals for tag callsite
- // 1st is array of cooked strings, second is array of raw strings
- MaterializeTemplateCallsiteLiterals();
- }
- return EmptyExpression();
- }
- inline void MaterializeTemplateCallsiteLiterals();
- PreParserExpression NoTemplateTag() {
- return PreParserExpression::NoTemplateTag();
- }
- static bool IsTaggedTemplate(const PreParserExpression tag) {
- return !tag.IsNoTemplateTag();
- }
+ void AddParameterInitializationBlock(
+ const PreParserFormalParameters& parameters, PreParserStatementList body,
+ bool is_async, bool* ok) {}
void AddFormalParameter(PreParserFormalParameters* parameters,
PreParserExpression pattern,
@@ -864,79 +868,45 @@ class PreParserTraits {
int initializer_end_position, bool is_rest) {
++parameters->arity;
}
- void DeclareFormalParameter(Scope* scope, PreParserIdentifier parameter,
+
+ void DeclareFormalParameter(DeclarationScope* scope,
+ PreParserIdentifier parameter,
Type::ExpressionClassifier* classifier) {
if (!classifier->is_simple_parameter_list()) {
scope->SetHasNonSimpleParameters();
}
}
- void CheckConflictingVarDeclarations(Scope* scope, bool* ok) {}
-
- // Temporary glue; these functions will move to ParserBase.
- PreParserExpression ParseV8Intrinsic(bool* ok);
- V8_INLINE PreParserExpression ParseDoExpression(bool* ok);
- PreParserExpression ParseFunctionLiteral(
- PreParserIdentifier name, Scanner::Location function_name_location,
- FunctionNameValidity function_name_validity, FunctionKind kind,
- int function_token_position, FunctionLiteral::FunctionType type,
- LanguageMode language_mode, bool* ok);
-
- PreParserExpression ParseClassLiteral(Type::ExpressionClassifier* classifier,
- PreParserIdentifier name,
- Scanner::Location class_name_location,
- bool name_is_strict_reserved, int pos,
- bool* ok);
+ V8_INLINE void ParseArrowFunctionFormalParameterList(
+ PreParserFormalParameters* parameters, PreParserExpression params,
+ const Scanner::Location& params_loc, Scanner::Location* duplicate_loc,
+ const Scope::Snapshot& scope_snapshot, bool* ok);
- V8_INLINE void MarkTailPosition(PreParserExpression) {}
+ void ReindexLiterals(const PreParserFormalParameters& parameters) {}
- PreParserExpressionList PrepareSpreadArguments(PreParserExpressionList list) {
- return list;
+ V8_INLINE PreParserExpression NoTemplateTag() {
+ return PreParserExpression::NoTemplateTag();
+ }
+ V8_INLINE static bool IsTaggedTemplate(const PreParserExpression tag) {
+ return !tag.IsNoTemplateTag();
}
inline void MaterializeUnspreadArgumentsLiterals(int count);
- inline PreParserExpression SpreadCall(PreParserExpression function,
- PreParserExpressionList args, int pos);
-
- inline PreParserExpression SpreadCallNew(PreParserExpression function,
- PreParserExpressionList args,
- int pos);
-
- inline void RewriteDestructuringAssignments() {}
-
- inline PreParserExpression RewriteExponentiation(PreParserExpression left,
- PreParserExpression right,
- int pos) {
- return left;
- }
- inline PreParserExpression RewriteAssignExponentiation(
- PreParserExpression left, PreParserExpression right, int pos) {
- return left;
+ inline PreParserExpression ExpressionListToExpression(
+ PreParserExpressionList args) {
+ return PreParserExpression::Default();
}
- inline void QueueDestructuringAssignmentForRewriting(PreParserExpression) {}
- inline void QueueNonPatternForRewriting(PreParserExpression) {}
-
- void SetFunctionNameFromPropertyName(PreParserExpression,
- PreParserIdentifier) {}
- void SetFunctionNameFromIdentifierRef(PreParserExpression,
- PreParserExpression) {}
-
- inline void RewriteNonPattern(Type::ExpressionClassifier* classifier,
- bool* ok);
+ void SetFunctionNameFromPropertyName(PreParserExpression property,
+ PreParserIdentifier name) {}
+ void SetFunctionNameFromIdentifierRef(PreParserExpression value,
+ PreParserExpression identifier) {}
+ V8_INLINE ZoneList<typename Type::ExpressionClassifier::Error>*
+ GetReportedErrorList() const;
V8_INLINE Zone* zone() const;
V8_INLINE ZoneList<PreParserExpression>* GetNonPatternList() const;
-
- inline PreParserExpression RewriteYieldStar(
- PreParserExpression generator, PreParserExpression expr, int pos);
- inline PreParserExpression RewriteInstanceof(PreParserExpression lhs,
- PreParserExpression rhs,
- int pos);
-
- private:
- PreParser* pre_parser_;
};
@@ -952,7 +922,12 @@ class PreParserTraits {
// rather it is to speed up properly written and correct programs.
// That means that contextual checks (like a label being declared where
// it is used) are generally omitted.
-class PreParser : public ParserBase<PreParserTraits> {
+class PreParser : public ParserBase<PreParser> {
+ friend class ParserBase<PreParser>;
+ // TODO(nikolaos): This should not be necessary. It will be removed
+ // when the traits object stops delegating to the implementation object.
+ friend class ParserBaseTraits<PreParser>;
+
public:
typedef PreParserIdentifier Identifier;
typedef PreParserExpression Expression;
@@ -965,27 +940,38 @@ class PreParser : public ParserBase<PreParserTraits> {
PreParser(Zone* zone, Scanner* scanner, AstValueFactory* ast_value_factory,
ParserRecorder* log, uintptr_t stack_limit)
- : ParserBase<PreParserTraits>(zone, scanner, stack_limit, NULL,
- ast_value_factory, log, this) {}
+ : ParserBase<PreParser>(zone, scanner, stack_limit, NULL,
+ ast_value_factory, log),
+ use_counts_(nullptr) {}
// Pre-parse the program from the character stream; returns true on
// success (even if parsing failed, the pre-parse data successfully
// captured the syntax error), and false if a stack-overflow happened
// during parsing.
- PreParseResult PreParseProgram(int* materialized_literals = 0) {
- Scope* scope = NewScope(scope_, SCRIPT_SCOPE);
- PreParserFactory factory(NULL);
- FunctionState top_scope(&function_state_, &scope_, scope, kNormalFunction,
- &factory);
+ PreParseResult PreParseProgram(int* materialized_literals = 0,
+ bool is_module = false) {
+ DCHECK_NULL(scope_state_);
+ DeclarationScope* scope = NewScriptScope();
+
+ // ModuleDeclarationInstantiation for Source Text Module Records creates a
+ // new Module Environment Record whose outer lexical environment record is
+ // the global scope.
+ if (is_module) scope = NewModuleScope(scope);
+
+ FunctionState top_scope(&function_state_, &scope_state_, scope,
+ kNormalFunction);
bool ok = true;
int start_position = scanner()->peek_location().beg_pos;
+ parsing_module_ = is_module;
ParseStatementList(Token::EOS, &ok);
if (stack_overflow()) return kPreParseStackOverflow;
if (!ok) {
ReportUnexpectedToken(scanner()->current_token());
- } else if (is_strict(scope_->language_mode())) {
+ } else if (is_strict(this->scope()->language_mode())) {
CheckStrictOctalLiteral(start_position, scanner()->location().end_pos,
&ok);
+ CheckDecimalLiteralWithLeadingZero(use_counts_, start_position,
+ scanner()->location().end_pos);
}
if (materialized_literals) {
*materialized_literals = function_state_->materialized_literal_count();
@@ -1001,13 +987,14 @@ class PreParser : public ParserBase<PreParserTraits> {
// keyword and parameters, and have consumed the initial '{'.
// At return, unless an error occurred, the scanner is positioned before the
// the final '}'.
- PreParseResult PreParseLazyFunction(
- LanguageMode language_mode, FunctionKind kind, bool has_simple_parameters,
- ParserRecorder* log, Scanner::BookmarkScope* bookmark = nullptr);
+ PreParseResult PreParseLazyFunction(LanguageMode language_mode,
+ FunctionKind kind,
+ bool has_simple_parameters,
+ bool parsing_module, ParserRecorder* log,
+ Scanner::BookmarkScope* bookmark,
+ int* use_counts);
private:
- friend class PreParserTraits;
-
static const int kLazyParseTrialLimit = 200;
// These types form an algebra over syntactic categories that is just
@@ -1027,7 +1014,12 @@ class PreParser : public ParserBase<PreParserTraits> {
Statement ParseSubStatement(AllowLabelledFunctionStatement allow_function,
bool* ok);
Statement ParseScopedStatement(bool legacy, bool* ok);
+ Statement ParseHoistableDeclaration(bool* ok);
+ Statement ParseHoistableDeclaration(int pos, ParseFunctionFlags flags,
+ bool* ok);
Statement ParseFunctionDeclaration(bool* ok);
+ Statement ParseAsyncFunctionDeclaration(bool* ok);
+ Expression ParseAsyncFunctionExpression(bool* ok);
Statement ParseClassDeclaration(bool* ok);
Statement ParseBlock(bool* ok);
Statement ParseVariableStatement(VariableDeclarationContext var_context,
@@ -1057,13 +1049,16 @@ class PreParser : public ParserBase<PreParserTraits> {
Expression ParseV8Intrinsic(bool* ok);
Expression ParseDoExpression(bool* ok);
- V8_INLINE void SkipLazyFunctionBody(int* materialized_literal_count,
- int* expected_property_count, bool* ok);
V8_INLINE PreParserStatementList ParseEagerFunctionBody(
PreParserIdentifier function_name, int pos,
const PreParserFormalParameters& parameters, FunctionKind kind,
FunctionLiteral::FunctionType function_type, bool* ok);
+ V8_INLINE void SkipLazyFunctionBody(
+ int* materialized_literal_count, int* expected_property_count, bool* ok,
+ Scanner::BookmarkScope* bookmark = nullptr) {
+ UNREACHABLE();
+ }
Expression ParseFunctionLiteral(
Identifier name, Scanner::Location function_name_location,
FunctionNameValidity function_name_validity, FunctionKind kind,
@@ -1077,74 +1072,113 @@ class PreParser : public ParserBase<PreParserTraits> {
Scanner::Location class_name_location,
bool name_is_strict_reserved, int pos,
bool* ok);
-};
+ struct TemplateLiteralState {};
-void PreParserTraits::MaterializeTemplateCallsiteLiterals() {
- pre_parser_->function_state_->NextMaterializedLiteralIndex();
- pre_parser_->function_state_->NextMaterializedLiteralIndex();
-}
+ V8_INLINE TemplateLiteralState OpenTemplateLiteral(int pos) {
+ return TemplateLiteralState();
+ }
+ V8_INLINE void AddTemplateExpression(TemplateLiteralState* state,
+ PreParserExpression expression) {}
+ V8_INLINE void AddTemplateSpan(TemplateLiteralState* state, bool tail) {}
+ V8_INLINE PreParserExpression CloseTemplateLiteral(
+ TemplateLiteralState* state, int start, PreParserExpression tag);
+ V8_INLINE void CheckConflictingVarDeclarations(Scope* scope, bool* ok) {}
+ V8_INLINE void MarkCollectedTailCallExpressions() {}
+ V8_INLINE void MarkTailPosition(PreParserExpression expression) {}
-void PreParserTraits::MaterializeUnspreadArgumentsLiterals(int count) {
- for (int i = 0; i < count; ++i) {
- pre_parser_->function_state_->NextMaterializedLiteralIndex();
+ void ParseAsyncArrowSingleExpressionBody(PreParserStatementList body,
+ bool accept_IN,
+ ExpressionClassifier* classifier,
+ int pos, bool* ok);
+
+ V8_INLINE PreParserExpressionList
+ PrepareSpreadArguments(PreParserExpressionList list) {
+ return list;
}
-}
+ V8_INLINE PreParserExpression SpreadCall(PreParserExpression function,
+ PreParserExpressionList args,
+ int pos);
+ V8_INLINE PreParserExpression SpreadCallNew(PreParserExpression function,
+ PreParserExpressionList args,
+ int pos);
-PreParserExpression PreParserTraits::SpreadCall(PreParserExpression function,
- PreParserExpressionList args,
- int pos) {
- return pre_parser_->factory()->NewCall(function, args, pos);
-}
+ V8_INLINE void RewriteDestructuringAssignments() {}
-PreParserExpression PreParserTraits::SpreadCallNew(PreParserExpression function,
- PreParserExpressionList args,
- int pos) {
- return pre_parser_->factory()->NewCallNew(function, args, pos);
-}
+ V8_INLINE PreParserExpression RewriteExponentiation(PreParserExpression left,
+ PreParserExpression right,
+ int pos) {
+ return left;
+ }
+ V8_INLINE PreParserExpression RewriteAssignExponentiation(
+ PreParserExpression left, PreParserExpression right, int pos) {
+ return left;
+ }
+ V8_INLINE PreParserExpression
+ RewriteAwaitExpression(PreParserExpression value, int pos) {
+ return value;
+ }
+ V8_INLINE PreParserExpression RewriteYieldStar(PreParserExpression generator,
+ PreParserExpression expression,
+ int pos) {
+ return PreParserExpression::Default();
+ }
+ V8_INLINE void RewriteNonPattern(Type::ExpressionClassifier* classifier,
+ bool* ok) {
+ ValidateExpression(classifier, ok);
+ }
-void PreParserTraits::ParseArrowFunctionFormalParameterList(
- PreParserFormalParameters* parameters,
- PreParserExpression params, const Scanner::Location& params_loc,
- Scanner::Location* duplicate_loc, bool* ok) {
- // TODO(wingo): Detect duplicated identifiers in paramlists. Detect parameter
- // lists that are too long.
-}
+ V8_INLINE void QueueDestructuringAssignmentForRewriting(
+ PreParserExpression assignment) {}
+ V8_INLINE void QueueNonPatternForRewriting(PreParserExpression expr,
+ bool* ok) {}
+ int* use_counts_;
+};
-PreParserExpression PreParserTraits::ParseDoExpression(bool* ok) {
- return pre_parser_->ParseDoExpression(ok);
+void ParserBaseTraits<PreParser>::MaterializeUnspreadArgumentsLiterals(
+ int count) {
+ for (int i = 0; i < count; ++i) {
+ delegate()->function_state_->NextMaterializedLiteralIndex();
+ }
}
-
-void PreParserTraits::RewriteNonPattern(Type::ExpressionClassifier* classifier,
- bool* ok) {
- pre_parser_->ValidateExpression(classifier, ok);
+PreParserExpression PreParser::SpreadCall(PreParserExpression function,
+ PreParserExpressionList args,
+ int pos) {
+ return factory()->NewCall(function, args, pos);
}
-
-Zone* PreParserTraits::zone() const {
- return pre_parser_->function_state_->scope()->zone();
+PreParserExpression PreParser::SpreadCallNew(PreParserExpression function,
+ PreParserExpressionList args,
+ int pos) {
+ return factory()->NewCallNew(function, args, pos);
}
-
-ZoneList<PreParserExpression>* PreParserTraits::GetNonPatternList() const {
- return pre_parser_->function_state_->non_patterns_to_rewrite();
+void ParserBaseTraits<PreParser>::ParseArrowFunctionFormalParameterList(
+ PreParserFormalParameters* parameters, PreParserExpression params,
+ const Scanner::Location& params_loc, Scanner::Location* duplicate_loc,
+ const Scope::Snapshot& scope_snapshot, bool* ok) {
+ // TODO(wingo): Detect duplicated identifiers in paramlists. Detect parameter
+ // lists that are too long.
}
+ZoneList<PreParserExpression>* ParserBaseTraits<PreParser>::GetNonPatternList()
+ const {
+ return delegate()->function_state_->non_patterns_to_rewrite();
+}
-PreParserExpression PreParserTraits::RewriteYieldStar(
- PreParserExpression generator, PreParserExpression expression, int pos) {
- return PreParserExpression::Default();
+ZoneList<
+ typename ParserBaseTraits<PreParser>::Type::ExpressionClassifier::Error>*
+ParserBaseTraits<PreParser>::GetReportedErrorList() const {
+ return delegate()->function_state_->GetReportedErrorList();
}
-PreParserExpression PreParserTraits::RewriteInstanceof(PreParserExpression lhs,
- PreParserExpression rhs,
- int pos) {
- return PreParserExpression::Default();
+Zone* ParserBaseTraits<PreParser>::zone() const {
+ return delegate()->function_state_->scope()->zone();
}
PreParserStatementList PreParser::ParseEagerFunctionBody(
@@ -1153,20 +1187,29 @@ PreParserStatementList PreParser::ParseEagerFunctionBody(
FunctionLiteral::FunctionType function_type, bool* ok) {
ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
- ParseStatementList(Token::RBRACE, ok);
- if (!*ok) return PreParserStatementList();
+ Scope* inner_scope = scope();
+ if (!parameters.is_simple) inner_scope = NewScope(BLOCK_SCOPE);
+
+ {
+ BlockState block_state(&scope_state_, inner_scope);
+ ParseStatementList(Token::RBRACE, ok);
+ if (!*ok) return PreParserStatementList();
+ }
Expect(Token::RBRACE, ok);
return PreParserStatementList();
}
-
-PreParserStatementList PreParserTraits::ParseEagerFunctionBody(
- PreParserIdentifier function_name, int pos,
- const PreParserFormalParameters& parameters, FunctionKind kind,
- FunctionLiteral::FunctionType function_type, bool* ok) {
- return pre_parser_->ParseEagerFunctionBody(function_name, pos, parameters,
- kind, function_type, ok);
+PreParserExpression PreParser::CloseTemplateLiteral(TemplateLiteralState* state,
+ int start,
+ PreParserExpression tag) {
+ if (IsTaggedTemplate(tag)) {
+ // Emulate generation of array literals for tag callsite
+ // 1st is array of cooked strings, second is array of raw strings
+ function_state_->NextMaterializedLiteralIndex();
+ function_state_->NextMaterializedLiteralIndex();
+ }
+ return EmptyExpression();
}
} // namespace internal
diff --git a/deps/v8/src/parsing/rewriter.cc b/deps/v8/src/parsing/rewriter.cc
index 915a464bf4..51ff547017 100644
--- a/deps/v8/src/parsing/rewriter.cc
+++ b/deps/v8/src/parsing/rewriter.cc
@@ -6,44 +6,45 @@
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
+#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
namespace v8 {
namespace internal {
-class Processor: public AstVisitor {
+class Processor final : public AstVisitor<Processor> {
public:
- Processor(Isolate* isolate, Scope* scope, Variable* result,
+ Processor(Isolate* isolate, DeclarationScope* closure_scope, Variable* result,
AstValueFactory* ast_value_factory)
: result_(result),
result_assigned_(false),
replacement_(nullptr),
is_set_(false),
zone_(ast_value_factory->zone()),
- scope_(scope),
+ closure_scope_(closure_scope),
factory_(ast_value_factory) {
+ DCHECK_EQ(closure_scope, closure_scope->GetClosureScope());
InitializeAstVisitor(isolate);
}
- Processor(Parser* parser, Scope* scope, Variable* result,
+ Processor(Parser* parser, DeclarationScope* closure_scope, Variable* result,
AstValueFactory* ast_value_factory)
: result_(result),
result_assigned_(false),
replacement_(nullptr),
is_set_(false),
zone_(ast_value_factory->zone()),
- scope_(scope),
+ closure_scope_(closure_scope),
factory_(ast_value_factory) {
+ DCHECK_EQ(closure_scope, closure_scope->GetClosureScope());
InitializeAstVisitor(parser->stack_limit());
}
- ~Processor() override {}
-
void Process(ZoneList<Statement*>* statements);
bool result_assigned() const { return result_assigned_; }
Zone* zone() { return zone_; }
- Scope* scope() { return scope_; }
+ DeclarationScope* closure_scope() { return closure_scope_; }
AstNodeFactory* factory() { return &factory_; }
// Returns ".result = value"
@@ -51,7 +52,7 @@ class Processor: public AstVisitor {
result_assigned_ = true;
VariableProxy* result_proxy = factory()->NewVariableProxy(result_);
return factory()->NewAssignment(Token::ASSIGN, result_proxy, value,
- RelocInfo::kNoPosition);
+ kNoSourcePosition);
}
// Inserts '.result = undefined' in front of the given statement.
@@ -77,11 +78,11 @@ class Processor: public AstVisitor {
bool is_set_;
Zone* zone_;
- Scope* scope_;
+ DeclarationScope* closure_scope_;
AstNodeFactory factory_;
// Node visitors.
-#define DEF_VISIT(type) void Visit##type(type* node) override;
+#define DEF_VISIT(type) void Visit##type(type* node);
AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
@@ -93,13 +94,12 @@ class Processor: public AstVisitor {
Statement* Processor::AssignUndefinedBefore(Statement* s) {
Expression* result_proxy = factory()->NewVariableProxy(result_);
- Expression* undef = factory()->NewUndefinedLiteral(RelocInfo::kNoPosition);
- Expression* assignment = factory()->NewAssignment(
- Token::ASSIGN, result_proxy, undef, RelocInfo::kNoPosition);
- Block* b = factory()->NewBlock(NULL, 2, false, RelocInfo::kNoPosition);
+ Expression* undef = factory()->NewUndefinedLiteral(kNoSourcePosition);
+ Expression* assignment = factory()->NewAssignment(Token::ASSIGN, result_proxy,
+ undef, kNoSourcePosition);
+ Block* b = factory()->NewBlock(NULL, 2, false, kNoSourcePosition);
b->statements()->Add(
- factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition),
- zone());
+ factory()->NewExpressionStatement(assignment, kNoSourcePosition), zone());
b->statements()->Add(s, zone());
return b;
}
@@ -226,21 +226,19 @@ void Processor::VisitTryFinallyStatement(TryFinallyStatement* node) {
// at the end again: ".backup = .result; ...; .result = .backup"
// This is necessary because the finally block does not normally contribute
// to the completion value.
- CHECK(scope() != nullptr);
- Variable* backup = scope()->NewTemporary(
- factory()->ast_value_factory()->dot_result_string());
- Expression* backup_proxy = factory()->NewVariableProxy(backup);
- Expression* result_proxy = factory()->NewVariableProxy(result_);
- Expression* save = factory()->NewAssignment(
- Token::ASSIGN, backup_proxy, result_proxy, RelocInfo::kNoPosition);
- Expression* restore = factory()->NewAssignment(
- Token::ASSIGN, result_proxy, backup_proxy, RelocInfo::kNoPosition);
- node->finally_block()->statements()->InsertAt(
- 0, factory()->NewExpressionStatement(save, RelocInfo::kNoPosition),
- zone());
- node->finally_block()->statements()->Add(
- factory()->NewExpressionStatement(restore, RelocInfo::kNoPosition),
- zone());
+ CHECK_NOT_NULL(closure_scope());
+ Variable* backup = closure_scope()->NewTemporary(
+ factory()->ast_value_factory()->dot_result_string());
+ Expression* backup_proxy = factory()->NewVariableProxy(backup);
+ Expression* result_proxy = factory()->NewVariableProxy(result_);
+ Expression* save = factory()->NewAssignment(
+ Token::ASSIGN, backup_proxy, result_proxy, kNoSourcePosition);
+ Expression* restore = factory()->NewAssignment(
+ Token::ASSIGN, result_proxy, backup_proxy, kNoSourcePosition);
+ node->finally_block()->statements()->InsertAt(
+ 0, factory()->NewExpressionStatement(save, kNoSourcePosition), zone());
+ node->finally_block()->statements()->Add(
+ factory()->NewExpressionStatement(restore, kNoSourcePosition), zone());
}
is_set_ = set_after;
Visit(node->try_block());
@@ -338,24 +336,25 @@ DECLARATION_NODE_LIST(DEF_VISIT)
// continue to be used in the case of failure.
bool Rewriter::Rewrite(ParseInfo* info) {
FunctionLiteral* function = info->literal();
- DCHECK(function != NULL);
+ DCHECK_NOT_NULL(function);
Scope* scope = function->scope();
- DCHECK(scope != NULL);
+ DCHECK_NOT_NULL(scope);
if (!scope->is_script_scope() && !scope->is_eval_scope()) return true;
+ DeclarationScope* closure_scope = scope->GetClosureScope();
ZoneList<Statement*>* body = function->body();
if (!body->is_empty()) {
- Variable* result =
- scope->NewTemporary(info->ast_value_factory()->dot_result_string());
+ Variable* result = closure_scope->NewTemporary(
+ info->ast_value_factory()->dot_result_string());
// The name string must be internalized at this point.
DCHECK(!result->name().is_null());
- Processor processor(info->isolate(), scope, result,
+ Processor processor(info->isolate(), closure_scope, result,
info->ast_value_factory());
processor.Process(body);
if (processor.HasStackOverflow()) return false;
if (processor.result_assigned()) {
- int pos = RelocInfo::kNoPosition;
+ int pos = kNoSourcePosition;
VariableProxy* result_proxy =
processor.factory()->NewVariableProxy(result, pos);
Statement* result_statement =
@@ -367,24 +366,24 @@ bool Rewriter::Rewrite(ParseInfo* info) {
return true;
}
-
-bool Rewriter::Rewrite(Parser* parser, DoExpression* expr,
- AstValueFactory* factory) {
+bool Rewriter::Rewrite(Parser* parser, DeclarationScope* closure_scope,
+ DoExpression* expr, AstValueFactory* factory) {
Block* block = expr->block();
- Scope* scope = block->scope();
+ DCHECK_EQ(closure_scope, closure_scope->GetClosureScope());
+ DCHECK(block->scope() == nullptr ||
+ block->scope()->GetClosureScope() == closure_scope);
ZoneList<Statement*>* body = block->statements();
VariableProxy* result = expr->result();
Variable* result_var = result->var();
if (!body->is_empty()) {
- Processor processor(parser, scope, result_var, factory);
+ Processor processor(parser, closure_scope, result_var, factory);
processor.Process(body);
if (processor.HasStackOverflow()) return false;
if (!processor.result_assigned()) {
AstNodeFactory* node_factory = processor.factory();
- Expression* undef =
- node_factory->NewUndefinedLiteral(RelocInfo::kNoPosition);
+ Expression* undef = node_factory->NewUndefinedLiteral(kNoSourcePosition);
Statement* completion = node_factory->NewExpressionStatement(
processor.SetResult(undef), expr->position());
body->Add(completion, factory->zone());
diff --git a/deps/v8/src/parsing/rewriter.h b/deps/v8/src/parsing/rewriter.h
index 477644a756..2dbfd32b7f 100644
--- a/deps/v8/src/parsing/rewriter.h
+++ b/deps/v8/src/parsing/rewriter.h
@@ -12,6 +12,8 @@ class AstValueFactory;
class DoExpression;
class ParseInfo;
class Parser;
+class DeclarationScope;
+class Scope;
class Rewriter {
public:
@@ -24,9 +26,13 @@ class Rewriter {
static bool Rewrite(ParseInfo* info);
// Rewrite a list of statements, using the same rules as a top-level program,
- // to ensure identical behaviour of completion result.
- static bool Rewrite(Parser* parser, DoExpression* expr,
- AstValueFactory* factory);
+ // to ensure identical behaviour of completion result. The temporary is added
+ // to the closure scope of the do-expression, which matches the closure scope
+ // of the outer scope (the do-expression itself runs in a block scope, not a
+ // closure scope). This closure scope needs to be passed in since the
+ // do-expression could have dropped its own block scope.
+ static bool Rewrite(Parser* parser, DeclarationScope* closure_scope,
+ DoExpression* expr, AstValueFactory* factory);
};
diff --git a/deps/v8/src/parsing/scanner-character-streams.cc b/deps/v8/src/parsing/scanner-character-streams.cc
index 91ed54f7be..7cdef87c9c 100644
--- a/deps/v8/src/parsing/scanner-character-streams.cc
+++ b/deps/v8/src/parsing/scanner-character-streams.cc
@@ -8,7 +8,7 @@
#include "src/globals.h"
#include "src/handles.h"
#include "src/list-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
-#include "src/objects.h"
+#include "src/objects-inl.h"
#include "src/unicode-inl.h"
namespace v8 {
@@ -16,6 +16,33 @@ namespace internal {
namespace {
+size_t CopyUtf8CharsToUtf16Chars(uint16_t* dest, size_t length, const byte* src,
+ size_t* src_pos, size_t src_length) {
+ static const unibrow::uchar kMaxUtf16Character =
+ unibrow::Utf16::kMaxNonSurrogateCharCode;
+ size_t i = 0;
+ // Because of the UTF-16 lead and trail surrogates, we stop filling the buffer
+ // one character early (in the normal case), because we need to have at least
+ // two free spaces in the buffer to be sure that the next character will fit.
+ while (i < length - 1) {
+ if (*src_pos == src_length) break;
+ unibrow::uchar c = src[*src_pos];
+ if (c <= unibrow::Utf8::kMaxOneByteChar) {
+ *src_pos = *src_pos + 1;
+ } else {
+ c = unibrow::Utf8::CalculateValue(src + *src_pos, src_length - *src_pos,
+ src_pos);
+ }
+ if (c > kMaxUtf16Character) {
+ dest[i++] = unibrow::Utf16::LeadSurrogate(c);
+ dest[i++] = unibrow::Utf16::TrailSurrogate(c);
+ } else {
+ dest[i++] = static_cast<uc16>(c);
+ }
+ }
+ return i;
+}
+
size_t CopyCharsHelper(uint16_t* dest, size_t length, const uint8_t* src,
size_t* src_pos, size_t src_length,
ScriptCompiler::StreamedSource::Encoding encoding) {
@@ -24,8 +51,7 @@ size_t CopyCharsHelper(uint16_t* dest, size_t length, const uint8_t* src,
if (length == 0) return 0;
if (encoding == ScriptCompiler::StreamedSource::UTF8) {
- return v8::internal::Utf8ToUtf16CharacterStream::CopyChars(
- dest, length, src, src_pos, src_length);
+ return CopyUtf8CharsToUtf16Chars(dest, length, src, src_pos, src_length);
}
size_t to_fill = length;
@@ -175,163 +201,7 @@ size_t GenericStringUtf16CharacterStream::FillBuffer(size_t from_pos) {
// ----------------------------------------------------------------------------
-// Utf8ToUtf16CharacterStream
-Utf8ToUtf16CharacterStream::Utf8ToUtf16CharacterStream(const byte* data,
- size_t length)
- : BufferedUtf16CharacterStream(),
- raw_data_(data),
- raw_data_length_(length),
- raw_data_pos_(0),
- raw_character_position_(0) {
- ReadBlock();
-}
-
-
-Utf8ToUtf16CharacterStream::~Utf8ToUtf16CharacterStream() { }
-
-
-size_t Utf8ToUtf16CharacterStream::CopyChars(uint16_t* dest, size_t length,
- const byte* src, size_t* src_pos,
- size_t src_length) {
- static const unibrow::uchar kMaxUtf16Character =
- unibrow::Utf16::kMaxNonSurrogateCharCode;
- size_t i = 0;
- // Because of the UTF-16 lead and trail surrogates, we stop filling the buffer
- // one character early (in the normal case), because we need to have at least
- // two free spaces in the buffer to be sure that the next character will fit.
- while (i < length - 1) {
- if (*src_pos == src_length) break;
- unibrow::uchar c = src[*src_pos];
- if (c <= unibrow::Utf8::kMaxOneByteChar) {
- *src_pos = *src_pos + 1;
- } else {
- c = unibrow::Utf8::CalculateValue(src + *src_pos, src_length - *src_pos,
- src_pos);
- }
- if (c > kMaxUtf16Character) {
- dest[i++] = unibrow::Utf16::LeadSurrogate(c);
- dest[i++] = unibrow::Utf16::TrailSurrogate(c);
- } else {
- dest[i++] = static_cast<uc16>(c);
- }
- }
- return i;
-}
-
-
-size_t Utf8ToUtf16CharacterStream::BufferSeekForward(size_t delta) {
- size_t old_pos = pos_;
- size_t target_pos = pos_ + delta;
- SetRawPosition(target_pos);
- pos_ = raw_character_position_;
- ReadBlock();
- return pos_ - old_pos;
-}
-
-
-size_t Utf8ToUtf16CharacterStream::FillBuffer(size_t char_position) {
- SetRawPosition(char_position);
- if (raw_character_position_ != char_position) {
- // char_position was not a valid position in the stream (hit the end
- // while spooling to it).
- return 0u;
- }
- size_t i = CopyChars(buffer_, kBufferSize, raw_data_, &raw_data_pos_,
- raw_data_length_);
- raw_character_position_ = char_position + i;
- return i;
-}
-
-
-static const byte kUtf8MultiByteMask = 0xC0;
-static const byte kUtf8MultiByteCharFollower = 0x80;
-
-
-#ifdef DEBUG
-static const byte kUtf8MultiByteCharStart = 0xC0;
-static bool IsUtf8MultiCharacterStart(byte first_byte) {
- return (first_byte & kUtf8MultiByteMask) == kUtf8MultiByteCharStart;
-}
-#endif
-
-
-static bool IsUtf8MultiCharacterFollower(byte later_byte) {
- return (later_byte & kUtf8MultiByteMask) == kUtf8MultiByteCharFollower;
-}
-
-
-// Move the cursor back to point at the preceding UTF-8 character start
-// in the buffer.
-static inline void Utf8CharacterBack(const byte* buffer, size_t* cursor) {
- byte character = buffer[--*cursor];
- if (character > unibrow::Utf8::kMaxOneByteChar) {
- DCHECK(IsUtf8MultiCharacterFollower(character));
- // Last byte of a multi-byte character encoding. Step backwards until
- // pointing to the first byte of the encoding, recognized by having the
- // top two bits set.
- while (IsUtf8MultiCharacterFollower(buffer[--*cursor])) { }
- DCHECK(IsUtf8MultiCharacterStart(buffer[*cursor]));
- }
-}
-
-
-// Move the cursor forward to point at the next following UTF-8 character start
-// in the buffer.
-static inline void Utf8CharacterForward(const byte* buffer, size_t* cursor) {
- byte character = buffer[(*cursor)++];
- if (character > unibrow::Utf8::kMaxOneByteChar) {
- // First character of a multi-byte character encoding.
- // The number of most-significant one-bits determines the length of the
- // encoding:
- // 110..... - (0xCx, 0xDx) one additional byte (minimum).
- // 1110.... - (0xEx) two additional bytes.
- // 11110... - (0xFx) three additional bytes (maximum).
- DCHECK(IsUtf8MultiCharacterStart(character));
- // Additional bytes is:
- // 1 if value in range 0xC0 .. 0xDF.
- // 2 if value in range 0xE0 .. 0xEF.
- // 3 if value in range 0xF0 .. 0xF7.
- // Encode that in a single value.
- size_t additional_bytes =
- ((0x3211u) >> (((character - 0xC0) >> 2) & 0xC)) & 0x03;
- *cursor += additional_bytes;
- DCHECK(!IsUtf8MultiCharacterFollower(buffer[1 + additional_bytes]));
- }
-}
-
-
-// This can't set a raw position between two surrogate pairs, since there
-// is no position in the UTF8 stream that corresponds to that. This assumes
-// that the surrogate pair is correctly coded as a 4 byte UTF-8 sequence. If
-// it is illegally coded as two 3 byte sequences then there is no problem here.
-void Utf8ToUtf16CharacterStream::SetRawPosition(size_t target_position) {
- if (raw_character_position_ > target_position) {
- // Spool backwards in utf8 buffer.
- do {
- size_t old_pos = raw_data_pos_;
- Utf8CharacterBack(raw_data_, &raw_data_pos_);
- raw_character_position_--;
- DCHECK(old_pos - raw_data_pos_ <= 4);
- // Step back over both code units for surrogate pairs.
- if (old_pos - raw_data_pos_ == 4) raw_character_position_--;
- } while (raw_character_position_ > target_position);
- // No surrogate pair splitting.
- DCHECK(raw_character_position_ == target_position);
- return;
- }
- // Spool forwards in the utf8 buffer.
- while (raw_character_position_ < target_position) {
- if (raw_data_pos_ == raw_data_length_) return;
- size_t old_pos = raw_data_pos_;
- Utf8CharacterForward(raw_data_, &raw_data_pos_);
- raw_character_position_++;
- DCHECK(raw_data_pos_ - old_pos <= 4);
- if (raw_data_pos_ - old_pos == 4) raw_character_position_++;
- }
- // No surrogate pair splitting.
- DCHECK(raw_character_position_ == target_position);
-}
-
+// ExternalStreamingStream
size_t ExternalStreamingStream::FillBuffer(size_t position) {
// Ignore "position" which is the position in the decoded data. Instead,
@@ -559,15 +429,11 @@ void ExternalStreamingStream::HandleUtf8SplitCharacters(
ExternalTwoByteStringUtf16CharacterStream::
~ExternalTwoByteStringUtf16CharacterStream() { }
-
ExternalTwoByteStringUtf16CharacterStream::
ExternalTwoByteStringUtf16CharacterStream(
Handle<ExternalTwoByteString> data, int start_position,
int end_position)
- : Utf16CharacterStream(),
- source_(data),
- raw_data_(data->GetTwoByteData(start_position)),
- bookmark_(kNoBookmark) {
+ : raw_data_(data->GetTwoByteData(start_position)), bookmark_(kNoBookmark) {
buffer_cursor_ = raw_data_,
buffer_end_ = raw_data_ + (end_position - start_position);
pos_ = start_position;
@@ -585,5 +451,62 @@ void ExternalTwoByteStringUtf16CharacterStream::ResetToBookmark() {
pos_ = bookmark_;
buffer_cursor_ = raw_data_ + bookmark_;
}
+
+// ----------------------------------------------------------------------------
+// ExternalOneByteStringUtf16CharacterStream
+
+ExternalOneByteStringUtf16CharacterStream::
+ ~ExternalOneByteStringUtf16CharacterStream() {}
+
+ExternalOneByteStringUtf16CharacterStream::
+ ExternalOneByteStringUtf16CharacterStream(
+ Handle<ExternalOneByteString> data, int start_position,
+ int end_position)
+ : raw_data_(data->GetChars()),
+ length_(end_position),
+ bookmark_(kNoBookmark) {
+ DCHECK(end_position >= start_position);
+ pos_ = start_position;
+}
+
+ExternalOneByteStringUtf16CharacterStream::
+ ExternalOneByteStringUtf16CharacterStream(const char* data, size_t length)
+ : raw_data_(reinterpret_cast<const uint8_t*>(data)),
+ length_(length),
+ bookmark_(kNoBookmark) {}
+
+ExternalOneByteStringUtf16CharacterStream::
+ ExternalOneByteStringUtf16CharacterStream(const char* data)
+ : ExternalOneByteStringUtf16CharacterStream(data, strlen(data)) {}
+
+bool ExternalOneByteStringUtf16CharacterStream::SetBookmark() {
+ bookmark_ = pos_;
+ return true;
+}
+
+void ExternalOneByteStringUtf16CharacterStream::ResetToBookmark() {
+ DCHECK(bookmark_ != kNoBookmark);
+ pos_ = bookmark_;
+ buffer_cursor_ = buffer_;
+ buffer_end_ = buffer_ + FillBuffer(pos_);
+}
+
+size_t ExternalOneByteStringUtf16CharacterStream::BufferSeekForward(
+ size_t delta) {
+ size_t old_pos = pos_;
+ pos_ = Min(pos_ + delta, length_);
+ ReadBlock();
+ return pos_ - old_pos;
+}
+
+size_t ExternalOneByteStringUtf16CharacterStream::FillBuffer(size_t from_pos) {
+ if (from_pos >= length_) return 0;
+ size_t length = Min(kBufferSize, length_ - from_pos);
+ for (size_t i = 0; i < length; ++i) {
+ buffer_[i] = static_cast<uc16>(raw_data_[from_pos + i]);
+ }
+ return length;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parsing/scanner-character-streams.h b/deps/v8/src/parsing/scanner-character-streams.h
index 603db93d02..94d8284f14 100644
--- a/deps/v8/src/parsing/scanner-character-streams.h
+++ b/deps/v8/src/parsing/scanner-character-streams.h
@@ -14,6 +14,7 @@ namespace internal {
// Forward declarations.
class ExternalTwoByteString;
+class ExternalOneByteString;
// A buffered character stream based on a random access character
// source (ReadBlock can be called with pos_ pointing to any position,
@@ -63,29 +64,6 @@ class GenericStringUtf16CharacterStream: public BufferedUtf16CharacterStream {
};
-// Utf16 stream based on a literal UTF-8 string.
-class Utf8ToUtf16CharacterStream: public BufferedUtf16CharacterStream {
- public:
- Utf8ToUtf16CharacterStream(const byte* data, size_t length);
- ~Utf8ToUtf16CharacterStream() override;
-
- static size_t CopyChars(uint16_t* dest, size_t length, const byte* src,
- size_t* src_pos, size_t src_length);
-
- protected:
- size_t BufferSeekForward(size_t delta) override;
- size_t FillBuffer(size_t char_position) override;
- void SetRawPosition(size_t char_position);
-
- const byte* raw_data_;
- size_t raw_data_length_; // Measured in bytes, not characters.
- size_t raw_data_pos_;
- // The character position of the character at raw_data[raw_data_pos_].
- // Not necessarily the same as pos_.
- size_t raw_character_position_;
-};
-
-
// ExternalStreamingStream is a wrapper around an ExternalSourceStream (see
// include/v8.h) subclass implemented by the embedder.
class ExternalStreamingStream : public BufferedUtf16CharacterStream {
@@ -158,14 +136,16 @@ class ExternalTwoByteStringUtf16CharacterStream: public Utf16CharacterStream {
void PushBack(uc32 character) override {
DCHECK(buffer_cursor_ > raw_data_);
- buffer_cursor_--;
pos_--;
+ if (character != kEndOfInput) {
+ buffer_cursor_--;
+ }
}
bool SetBookmark() override;
void ResetToBookmark() override;
- protected:
+ private:
size_t SlowSeekForward(size_t delta) override {
// Fast case always handles seeking.
return 0;
@@ -174,12 +154,37 @@ class ExternalTwoByteStringUtf16CharacterStream: public Utf16CharacterStream {
// Entire string is read at start.
return false;
}
- Handle<ExternalTwoByteString> source_;
const uc16* raw_data_; // Pointer to the actual array of characters.
+ static const size_t kNoBookmark = -1;
+
+ size_t bookmark_;
+};
+
+// UTF16 buffer to read characters from an external latin1 string.
+class ExternalOneByteStringUtf16CharacterStream
+ : public BufferedUtf16CharacterStream {
+ public:
+ ExternalOneByteStringUtf16CharacterStream(Handle<ExternalOneByteString> data,
+ int start_position,
+ int end_position);
+ ~ExternalOneByteStringUtf16CharacterStream() override;
+
+ // For testing:
+ explicit ExternalOneByteStringUtf16CharacterStream(const char* data);
+ ExternalOneByteStringUtf16CharacterStream(const char* data, size_t length);
+
+ bool SetBookmark() override;
+ void ResetToBookmark() override;
+
private:
static const size_t kNoBookmark = -1;
+ size_t BufferSeekForward(size_t delta) override;
+ size_t FillBuffer(size_t position) override;
+
+ const uint8_t* raw_data_; // Pointer to the actual array of characters.
+ size_t length_;
size_t bookmark_;
};
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index 698cb5e905..06ead2e827 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -19,8 +19,7 @@
namespace v8 {
namespace internal {
-
-Handle<String> LiteralBuffer::Internalize(Isolate* isolate) const {
+Handle<String> Scanner::LiteralBuffer::Internalize(Isolate* isolate) const {
if (is_one_byte()) {
return isolate->factory()->InternalizeOneByteString(one_byte_literal());
}
@@ -40,8 +39,8 @@ Scanner::Scanner(UnicodeCache* unicode_cache)
: unicode_cache_(unicode_cache),
bookmark_c0_(kNoBookmark),
octal_pos_(Location::invalid()),
- found_html_comment_(false),
- allow_harmony_exponentiation_operator_(false) {
+ decimal_with_leading_zero_pos_(Location::invalid()),
+ found_html_comment_(false) {
bookmark_current_.literal_chars = &bookmark_current_literal_;
bookmark_current_.raw_literal_chars = &bookmark_current_raw_literal_;
bookmark_next_.literal_chars = &bookmark_next_literal_;
@@ -249,6 +248,7 @@ Token::Value Scanner::Next() {
if (V8_UNLIKELY(next_next_.token != Token::UNINITIALIZED)) {
next_ = next_next_;
next_next_.token = Token::UNINITIALIZED;
+ has_line_terminator_before_next_ = has_line_terminator_after_next_;
return current_.token;
}
has_line_terminator_before_next_ = false;
@@ -260,6 +260,8 @@ Token::Value Scanner::Next() {
next_.token = token;
next_.location.beg_pos = pos;
next_.location.end_pos = pos + 1;
+ next_.literal_chars = nullptr;
+ next_.raw_literal_chars = nullptr;
Advance();
return current_.token;
}
@@ -270,11 +272,19 @@ Token::Value Scanner::Next() {
Token::Value Scanner::PeekAhead() {
+ DCHECK(next_.token != Token::DIV);
+ DCHECK(next_.token != Token::ASSIGN_DIV);
+
if (next_next_.token != Token::UNINITIALIZED) {
return next_next_.token;
}
TokenDesc prev = current_;
+ bool has_line_terminator_before_next =
+ has_line_terminator_before_next_ || has_multiline_comment_before_next_;
Next();
+ has_line_terminator_after_next_ =
+ has_line_terminator_before_next_ || has_multiline_comment_before_next_;
+ has_line_terminator_before_next_ = has_line_terminator_before_next;
Token::Value ret = next_.token;
next_next_ = next_;
next_ = current_;
@@ -557,7 +567,7 @@ void Scanner::Scan() {
Advance();
if (c0_ == '-') {
Advance();
- if (c0_ == '>' && has_line_terminator_before_next_) {
+ if (c0_ == '>' && HasAnyLineTerminatorBeforeNext()) {
// For compatibility with SpiderMonkey, we skip lines that
// start with an HTML comment end '-->'.
token = SkipSingleLineComment();
@@ -574,7 +584,7 @@ void Scanner::Scan() {
case '*':
// * *=
Advance();
- if (c0_ == '*' && allow_harmony_exponentiation_operator()) {
+ if (c0_ == '*') {
token = Select('=', Token::ASSIGN_EXP, Token::EXP);
} else if (c0_ == '=') {
token = Select(Token::ASSIGN_MUL);
@@ -726,8 +736,50 @@ void Scanner::Scan() {
next_.location.end_pos = source_pos();
next_.token = token;
+
+#ifdef DEBUG
+ SanityCheckTokenDesc(current_);
+ SanityCheckTokenDesc(next_);
+ SanityCheckTokenDesc(next_next_);
+#endif
}
+#ifdef DEBUG
+void Scanner::SanityCheckTokenDesc(const TokenDesc& token) const {
+ // Most tokens should not have literal_chars or even raw_literal chars.
+ // The rules are:
+ // - UNINITIALIZED: we don't care.
+ // - TEMPLATE_*: need both literal + raw literal chars.
+ // - IDENTIFIERS, STRINGS, etc.: need a literal, but no raw literal.
+ // - all others: should have neither.
+
+ switch (token.token) {
+ case Token::UNINITIALIZED:
+ // token.literal_chars & other members might be garbage. That's ok.
+ break;
+ case Token::TEMPLATE_SPAN:
+ case Token::TEMPLATE_TAIL:
+ DCHECK_NOT_NULL(token.raw_literal_chars);
+ DCHECK_NOT_NULL(token.literal_chars);
+ break;
+ case Token::ESCAPED_KEYWORD:
+ case Token::ESCAPED_STRICT_RESERVED_WORD:
+ case Token::FUTURE_STRICT_RESERVED_WORD:
+ case Token::IDENTIFIER:
+ case Token::NUMBER:
+ case Token::REGEXP_LITERAL:
+ case Token::SMI:
+ case Token::STRING:
+ DCHECK_NOT_NULL(token.literal_chars);
+ DCHECK_NULL(token.raw_literal_chars);
+ break;
+ default:
+ DCHECK_NULL(token.literal_chars);
+ DCHECK_NULL(token.raw_literal_chars);
+ break;
+ }
+}
+#endif // DEBUG
void Scanner::SeekForward(int pos) {
// After this call, we will have the token at the given position as
@@ -832,9 +884,6 @@ uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
}
-const int kMaxAscii = 127;
-
-
Token::Value Scanner::ScanString() {
uc32 quote = c0_;
Advance<false, false>(); // consume quote
@@ -851,7 +900,7 @@ Token::Value Scanner::ScanString() {
Advance<false, false>();
return Token::STRING;
}
- uc32 c = c0_;
+ char c = static_cast<char>(c0_);
if (c == '\\') break;
Advance<false, false>();
AddLiteralChar(c);
@@ -952,6 +1001,7 @@ Token::Value Scanner::ScanTemplateSpan() {
Token::Value Scanner::ScanTemplateStart() {
+ DCHECK(next_next_.token == Token::UNINITIALIZED);
DCHECK(c0_ == '`');
next_.location.beg_pos = source_pos();
Advance(); // Consume `
@@ -975,10 +1025,18 @@ void Scanner::ScanDecimalDigits() {
Token::Value Scanner::ScanNumber(bool seen_period) {
DCHECK(IsDecimalDigit(c0_)); // the first digit of the number or the fraction
- enum { DECIMAL, HEX, OCTAL, IMPLICIT_OCTAL, BINARY } kind = DECIMAL;
+ enum {
+ DECIMAL,
+ DECIMAL_WITH_LEADING_ZERO,
+ HEX,
+ OCTAL,
+ IMPLICIT_OCTAL,
+ BINARY
+ } kind = DECIMAL;
LiteralScope literal(this);
bool at_start = !seen_period;
+ int start_pos = source_pos(); // For reporting octal positions.
if (seen_period) {
// we have already seen a decimal point of the float
AddLiteralChar('.');
@@ -987,7 +1045,6 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
} else {
// if the first character is '0' we must check for octals and hex
if (c0_ == '0') {
- int start_pos = source_pos(); // For reporting octal positions.
AddLiteralCharAdvance();
// either 0, 0exxx, 0Exxx, 0.xxx, a hex number, a binary number or
@@ -1029,7 +1086,7 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
while (true) {
if (c0_ == '8' || c0_ == '9') {
at_start = false;
- kind = DECIMAL;
+ kind = DECIMAL_WITH_LEADING_ZERO;
break;
}
if (c0_ < '0' || '7' < c0_) {
@@ -1039,11 +1096,13 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
}
AddLiteralCharAdvance();
}
+ } else if (c0_ == '8' || c0_ == '9') {
+ kind = DECIMAL_WITH_LEADING_ZERO;
}
}
// Parse decimal digits and allow trailing fractional part.
- if (kind == DECIMAL) {
+ if (kind == DECIMAL || kind == DECIMAL_WITH_LEADING_ZERO) {
if (at_start) {
uint64_t value = 0;
while (IsDecimalDigit(c0_)) {
@@ -1060,6 +1119,8 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
literal.Complete();
HandleLeadSurrogate();
+ if (kind == DECIMAL_WITH_LEADING_ZERO)
+ decimal_with_leading_zero_pos_ = Location(start_pos, source_pos());
return Token::SMI;
}
HandleLeadSurrogate();
@@ -1076,7 +1137,8 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
// scan exponent, if any
if (c0_ == 'e' || c0_ == 'E') {
DCHECK(kind != HEX); // 'e'/'E' must be scanned as part of the hex number
- if (kind != DECIMAL) return Token::ILLEGAL;
+ if (!(kind == DECIMAL || kind == DECIMAL_WITH_LEADING_ZERO))
+ return Token::ILLEGAL;
// scan exponent
AddLiteralCharAdvance();
if (c0_ == '+' || c0_ == '-')
@@ -1098,6 +1160,8 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
literal.Complete();
+ if (kind == DECIMAL_WITH_LEADING_ZERO)
+ decimal_with_leading_zero_pos_ = Location(start_pos, source_pos());
return Token::NUMBER;
}
@@ -1135,6 +1199,9 @@ uc32 Scanner::ScanUnicodeEscape() {
// Keyword Matcher
#define KEYWORDS(KEYWORD_GROUP, KEYWORD) \
+ KEYWORD_GROUP('a') \
+ KEYWORD("async", Token::ASYNC) \
+ KEYWORD("await", Token::AWAIT) \
KEYWORD_GROUP('b') \
KEYWORD("break", Token::BREAK) \
KEYWORD_GROUP('c') \
@@ -1150,7 +1217,7 @@ uc32 Scanner::ScanUnicodeEscape() {
KEYWORD("do", Token::DO) \
KEYWORD_GROUP('e') \
KEYWORD("else", Token::ELSE) \
- KEYWORD("enum", Token::FUTURE_RESERVED_WORD) \
+ KEYWORD("enum", Token::ENUM) \
KEYWORD("export", Token::EXPORT) \
KEYWORD("extends", Token::EXTENDS) \
KEYWORD_GROUP('f') \
@@ -1196,9 +1263,8 @@ uc32 Scanner::ScanUnicodeEscape() {
KEYWORD_GROUP('y') \
KEYWORD("yield", Token::YIELD)
-
static Token::Value KeywordOrIdentifierToken(const uint8_t* input,
- int input_length, bool escaped) {
+ int input_length) {
DCHECK(input_length >= 1);
const int kMinLength = 2;
const int kMaxLength = 10;
@@ -1226,13 +1292,6 @@ static Token::Value KeywordOrIdentifierToken(const uint8_t* input,
(keyword_length <= 7 || input[7] == keyword[7]) && \
(keyword_length <= 8 || input[8] == keyword[8]) && \
(keyword_length <= 9 || input[9] == keyword[9])) { \
- if (escaped) { \
- /* TODO(adamk): YIELD should be handled specially. */ \
- return (token == Token::FUTURE_STRICT_RESERVED_WORD || \
- token == Token::LET || token == Token::STATIC) \
- ? Token::ESCAPED_STRICT_RESERVED_WORD \
- : Token::ESCAPED_KEYWORD; \
- } \
return token; \
} \
}
@@ -1251,7 +1310,7 @@ bool Scanner::IdentifierIsFutureStrictReserved(
return true;
}
return Token::FUTURE_STRICT_RESERVED_WORD ==
- KeywordOrIdentifierToken(string->raw_data(), string->length(), false);
+ KeywordOrIdentifierToken(string->raw_data(), string->length());
}
@@ -1260,7 +1319,7 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
LiteralScope literal(this);
if (IsInRange(c0_, 'a', 'z')) {
do {
- uc32 first_char = c0_;
+ char first_char = static_cast<char>(c0_);
Advance<false, false>();
AddLiteralChar(first_char);
} while (IsInRange(c0_, 'a', 'z'));
@@ -1268,11 +1327,11 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
if (IsDecimalDigit(c0_) || IsInRange(c0_, 'A', 'Z') || c0_ == '_' ||
c0_ == '$') {
// Identifier starting with lowercase.
- uc32 first_char = c0_;
+ char first_char = static_cast<char>(c0_);
Advance<false, false>();
AddLiteralChar(first_char);
while (IsAsciiIdentifier(c0_)) {
- uc32 first_char = c0_;
+ char first_char = static_cast<char>(c0_);
Advance<false, false>();
AddLiteralChar(first_char);
}
@@ -1282,15 +1341,19 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
}
} else if (c0_ <= kMaxAscii && c0_ != '\\') {
// Only a-z+: could be a keyword or identifier.
- literal.Complete();
Vector<const uint8_t> chars = next_.literal_chars->one_byte_literal();
- return KeywordOrIdentifierToken(chars.start(), chars.length(), false);
+ Token::Value token =
+ KeywordOrIdentifierToken(chars.start(), chars.length());
+ if (token == Token::IDENTIFIER ||
+ token == Token::FUTURE_STRICT_RESERVED_WORD)
+ literal.Complete();
+ return token;
}
HandleLeadSurrogate();
} else if (IsInRange(c0_, 'A', 'Z') || c0_ == '_' || c0_ == '$') {
do {
- uc32 first_char = c0_;
+ char first_char = static_cast<char>(c0_);
Advance<false, false>();
AddLiteralChar(first_char);
} while (IsAsciiIdentifier(c0_));
@@ -1330,12 +1393,14 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
return ScanIdentifierSuffix(&literal, false);
}
- literal.Complete();
-
if (next_.literal_chars->is_one_byte()) {
Vector<const uint8_t> chars = next_.literal_chars->one_byte_literal();
- return KeywordOrIdentifierToken(chars.start(), chars.length(), false);
+ Token::Value token =
+ KeywordOrIdentifierToken(chars.start(), chars.length());
+ if (token == Token::IDENTIFIER) literal.Complete();
+ return token;
}
+ literal.Complete();
return Token::IDENTIFIER;
}
@@ -1363,15 +1428,28 @@ Token::Value Scanner::ScanIdentifierSuffix(LiteralScope* literal,
if (escaped && next_.literal_chars->is_one_byte()) {
Vector<const uint8_t> chars = next_.literal_chars->one_byte_literal();
- return KeywordOrIdentifierToken(chars.start(), chars.length(), true);
+ Token::Value token =
+ KeywordOrIdentifierToken(chars.start(), chars.length());
+ /* TODO(adamk): YIELD should be handled specially. */
+ if (token == Token::IDENTIFIER) {
+ return Token::IDENTIFIER;
+ } else if (token == Token::FUTURE_STRICT_RESERVED_WORD ||
+ token == Token::LET || token == Token::STATIC) {
+ return Token::ESCAPED_STRICT_RESERVED_WORD;
+ } else {
+ return Token::ESCAPED_KEYWORD;
+ }
}
return Token::IDENTIFIER;
}
+bool Scanner::ScanRegExpPattern() {
+ DCHECK(next_next_.token == Token::UNINITIALIZED);
+ DCHECK(next_.token == Token::DIV || next_.token == Token::ASSIGN_DIV);
-bool Scanner::ScanRegExpPattern(bool seen_equal) {
// Scan: ('/' | '/=') RegularExpressionBody '/' RegularExpressionFlags
bool in_character_class = false;
+ bool seen_equal = (next_.token == Token::ASSIGN_DIV);
// Previous token is either '/' or '/=', in the second case, the
// pattern starts at =.
@@ -1411,14 +1489,15 @@ bool Scanner::ScanRegExpPattern(bool seen_equal) {
Advance(); // consume '/'
literal.Complete();
-
+ next_.token = Token::REGEXP_LITERAL;
return true;
}
Maybe<RegExp::Flags> Scanner::ScanRegExpFlags() {
+ DCHECK(next_.token == Token::REGEXP_LITERAL);
+
// Scan regular expression flags.
- LiteralScope literal(this);
int flags = 0;
while (c0_ >= 0 && unicode_cache_->IsIdentifierPart(c0_)) {
RegExp::Flags flag = RegExp::kNone;
@@ -1433,7 +1512,6 @@ Maybe<RegExp::Flags> Scanner::ScanRegExpFlags() {
flag = RegExp::kMultiline;
break;
case 'u':
- if (!FLAG_harmony_unicode_regexps) return Nothing<RegExp::Flags>();
flag = RegExp::kUnicode;
break;
case 'y':
@@ -1442,11 +1520,12 @@ Maybe<RegExp::Flags> Scanner::ScanRegExpFlags() {
default:
return Nothing<RegExp::Flags>();
}
- if (flags & flag) return Nothing<RegExp::Flags>();
- AddLiteralCharAdvance();
+ if (flags & flag) {
+ return Nothing<RegExp::Flags>();
+ }
+ Advance();
flags |= flag;
}
- literal.Complete();
next_.location.end_pos = source_pos();
return Just(RegExp::Flags(flags));
@@ -1519,14 +1598,9 @@ void Scanner::ResetToBookmark() {
source_->ResetToBookmark();
c0_ = bookmark_c0_;
- StartLiteral();
- StartRawLiteral();
- CopyTokenDesc(&next_, &bookmark_current_);
+ CopyToNextTokenDesc(&bookmark_current_);
current_ = next_;
- StartLiteral();
- StartRawLiteral();
- CopyTokenDesc(&next_, &bookmark_next_);
-
+ CopyToNextTokenDesc(&bookmark_next_);
bookmark_c0_ = kBookmarkWasApplied;
}
@@ -1541,6 +1615,13 @@ bool Scanner::BookmarkHasBeenReset() {
void Scanner::DropBookmark() { bookmark_c0_ = kNoBookmark; }
+void Scanner::CopyToNextTokenDesc(TokenDesc* from) {
+ StartLiteral();
+ StartRawLiteral();
+ CopyTokenDesc(&next_, from);
+ if (next_.literal_chars->length() == 0) next_.literal_chars = nullptr;
+ if (next_.raw_literal_chars->length() == 0) next_.raw_literal_chars = nullptr;
+}
void Scanner::CopyTokenDesc(TokenDesc* to, TokenDesc* from) {
DCHECK_NOT_NULL(to);
@@ -1567,7 +1648,7 @@ int DuplicateFinder::AddSymbol(Vector<const uint8_t> key,
int value) {
uint32_t hash = Hash(key, is_one_byte);
byte* encoding = BackupKey(key, is_one_byte);
- HashMap::Entry* entry = map_.LookupOrInsert(encoding, hash);
+ base::HashMap::Entry* entry = map_.LookupOrInsert(encoding, hash);
int old_value = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
entry->value =
reinterpret_cast<void*>(static_cast<intptr_t>(value | old_value));
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index 22c504c98e..66c6ce8133 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -8,16 +8,16 @@
#define V8_PARSING_SCANNER_H_
#include "src/allocation.h"
+#include "src/base/hashmap.h"
#include "src/base/logging.h"
#include "src/char-predicates.h"
#include "src/collector.h"
#include "src/globals.h"
-#include "src/hashmap.h"
#include "src/list.h"
#include "src/messages.h"
#include "src/parsing/token.h"
-#include "src/unicode.h"
#include "src/unicode-decoder.h"
+#include "src/unicode.h"
namespace v8 {
namespace internal {
@@ -143,167 +143,17 @@ class DuplicateFinder {
UnicodeCache* unicode_constants_;
// Backing store used to store strings used as hashmap keys.
SequenceCollector<unsigned char> backing_store_;
- HashMap map_;
+ base::HashMap map_;
// Buffer used for string->number->canonical string conversions.
char number_buffer_[kBufferSize];
};
// ----------------------------------------------------------------------------
-// LiteralBuffer - Collector of chars of literals.
-
-class LiteralBuffer {
- public:
- LiteralBuffer() : is_one_byte_(true), position_(0), backing_store_() { }
-
- ~LiteralBuffer() { backing_store_.Dispose(); }
-
- INLINE(void AddChar(uint32_t code_unit)) {
- if (position_ >= backing_store_.length()) ExpandBuffer();
- if (is_one_byte_) {
- if (code_unit <= unibrow::Latin1::kMaxChar) {
- backing_store_[position_] = static_cast<byte>(code_unit);
- position_ += kOneByteSize;
- return;
- }
- ConvertToTwoByte();
- }
- if (code_unit <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
- *reinterpret_cast<uint16_t*>(&backing_store_[position_]) = code_unit;
- position_ += kUC16Size;
- } else {
- *reinterpret_cast<uint16_t*>(&backing_store_[position_]) =
- unibrow::Utf16::LeadSurrogate(code_unit);
- position_ += kUC16Size;
- if (position_ >= backing_store_.length()) ExpandBuffer();
- *reinterpret_cast<uint16_t*>(&backing_store_[position_]) =
- unibrow::Utf16::TrailSurrogate(code_unit);
- position_ += kUC16Size;
- }
- }
-
- bool is_one_byte() const { return is_one_byte_; }
-
- bool is_contextual_keyword(Vector<const char> keyword) const {
- return is_one_byte() && keyword.length() == position_ &&
- (memcmp(keyword.start(), backing_store_.start(), position_) == 0);
- }
-
- Vector<const uint16_t> two_byte_literal() const {
- DCHECK(!is_one_byte_);
- DCHECK((position_ & 0x1) == 0);
- return Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(backing_store_.start()),
- position_ >> 1);
- }
-
- Vector<const uint8_t> one_byte_literal() const {
- DCHECK(is_one_byte_);
- return Vector<const uint8_t>(
- reinterpret_cast<const uint8_t*>(backing_store_.start()),
- position_);
- }
-
- int length() const {
- return is_one_byte_ ? position_ : (position_ >> 1);
- }
-
- void ReduceLength(int delta) {
- position_ -= delta * (is_one_byte_ ? kOneByteSize : kUC16Size);
- }
-
- void Reset() {
- position_ = 0;
- is_one_byte_ = true;
- }
-
- Handle<String> Internalize(Isolate* isolate) const;
-
- void CopyFrom(const LiteralBuffer* other) {
- if (other == nullptr) {
- Reset();
- } else {
- is_one_byte_ = other->is_one_byte_;
- position_ = other->position_;
- backing_store_.Dispose();
- backing_store_ = other->backing_store_.Clone();
- }
- }
-
- private:
- static const int kInitialCapacity = 16;
- static const int kGrowthFactory = 4;
- static const int kMinConversionSlack = 256;
- static const int kMaxGrowth = 1 * MB;
- inline int NewCapacity(int min_capacity) {
- int capacity = Max(min_capacity, backing_store_.length());
- int new_capacity = Min(capacity * kGrowthFactory, capacity + kMaxGrowth);
- return new_capacity;
- }
-
- void ExpandBuffer() {
- Vector<byte> new_store = Vector<byte>::New(NewCapacity(kInitialCapacity));
- MemCopy(new_store.start(), backing_store_.start(), position_);
- backing_store_.Dispose();
- backing_store_ = new_store;
- }
-
- void ConvertToTwoByte() {
- DCHECK(is_one_byte_);
- Vector<byte> new_store;
- int new_content_size = position_ * kUC16Size;
- if (new_content_size >= backing_store_.length()) {
- // Ensure room for all currently read code units as UC16 as well
- // as the code unit about to be stored.
- new_store = Vector<byte>::New(NewCapacity(new_content_size));
- } else {
- new_store = backing_store_;
- }
- uint8_t* src = backing_store_.start();
- uint16_t* dst = reinterpret_cast<uint16_t*>(new_store.start());
- for (int i = position_ - 1; i >= 0; i--) {
- dst[i] = src[i];
- }
- if (new_store.start() != backing_store_.start()) {
- backing_store_.Dispose();
- backing_store_ = new_store;
- }
- position_ = new_content_size;
- is_one_byte_ = false;
- }
-
- bool is_one_byte_;
- int position_;
- Vector<byte> backing_store_;
-
- DISALLOW_COPY_AND_ASSIGN(LiteralBuffer);
-};
-
-
-// ----------------------------------------------------------------------------
// JavaScript Scanner.
class Scanner {
public:
- // Scoped helper for literal recording. Automatically drops the literal
- // if aborting the scanning before it's complete.
- class LiteralScope {
- public:
- explicit LiteralScope(Scanner* self) : scanner_(self), complete_(false) {
- scanner_->StartLiteral();
- }
- ~LiteralScope() {
- if (!complete_) scanner_->DropLiteral();
- }
- void Complete() {
- complete_ = true;
- }
-
- private:
- Scanner* scanner_;
- bool complete_;
- };
-
// Scoped helper for a re-settable bookmark.
class BookmarkScope {
public:
@@ -369,10 +219,9 @@ class Scanner {
bool literal_contains_escapes() const {
return LiteralContainsEscapes(current_);
}
- bool next_literal_contains_escapes() const {
- return LiteralContainsEscapes(next_);
- }
bool is_literal_contextual_keyword(Vector<const char> keyword) {
+ DCHECK(current_.token == Token::IDENTIFIER ||
+ current_.token == Token::ESCAPED_STRICT_RESERVED_WORD);
DCHECK_NOT_NULL(current_.literal_chars);
return current_.literal_chars->is_contextual_keyword(keyword);
}
@@ -388,9 +237,10 @@ class Scanner {
double DoubleValue();
bool ContainsDot();
bool LiteralMatches(const char* data, int length, bool allow_escapes = true) {
- if (is_literal_one_byte() &&
- literal_length() == length &&
- (allow_escapes || !literal_contains_escapes())) {
+ if (!current_.literal_chars) {
+ return !strncmp(Token::Name(current_.token), data, length);
+ } else if (is_literal_one_byte() && literal_length() == length &&
+ (allow_escapes || !literal_contains_escapes())) {
const char* token =
reinterpret_cast<const char*>(literal_one_byte_string().start());
return !strncmp(token, data, length);
@@ -419,6 +269,13 @@ class Scanner {
// Returns the location of the last seen octal literal.
Location octal_position() const { return octal_pos_; }
void clear_octal_position() { octal_pos_ = Location::invalid(); }
+ // Returns the location of the last seen decimal literal with a leading zero.
+ Location decimal_with_leading_zero_position() const {
+ return decimal_with_leading_zero_pos_;
+ }
+ void clear_decimal_with_leading_zero_position() {
+ decimal_with_leading_zero_pos_ = Location::invalid();
+ }
// Returns the value of the last smi that was scanned.
int smi_value() const { return current_.smi_value_; }
@@ -436,9 +293,15 @@ class Scanner {
has_multiline_comment_before_next_;
}
- // Scans the input as a regular expression pattern, previous
- // character(s) must be /(=). Returns true if a pattern is scanned.
- bool ScanRegExpPattern(bool seen_equal);
+ bool HasAnyLineTerminatorAfterNext() {
+ Token::Value ensure_next_next = PeekAhead();
+ USE(ensure_next_next);
+ return has_line_terminator_after_next_;
+ }
+
+ // Scans the input as a regular expression pattern, next token must be /(=).
+ // Returns true if a pattern is scanned.
+ bool ScanRegExpPattern();
// Scans the input as regular expression flags. Returns the flags on success.
Maybe<RegExp::Flags> ScanRegExpFlags();
@@ -446,32 +309,201 @@ class Scanner {
Token::Value ScanTemplateStart();
Token::Value ScanTemplateContinuation();
- const LiteralBuffer* source_url() const { return &source_url_; }
- const LiteralBuffer* source_mapping_url() const {
- return &source_mapping_url_;
+ Handle<String> SourceUrl(Isolate* isolate) const {
+ Handle<String> tmp;
+ if (source_url_.length() > 0) tmp = source_url_.Internalize(isolate);
+ return tmp;
+ }
+
+ Handle<String> SourceMappingUrl(Isolate* isolate) const {
+ Handle<String> tmp;
+ if (source_mapping_url_.length() > 0)
+ tmp = source_mapping_url_.Internalize(isolate);
+ return tmp;
}
bool IdentifierIsFutureStrictReserved(const AstRawString* string) const;
bool FoundHtmlComment() const { return found_html_comment_; }
-#define DECLARE_ACCESSORS(name) \
- inline bool allow_##name() const { return allow_##name##_; } \
- inline void set_allow_##name(bool allow) { allow_##name##_ = allow; }
- DECLARE_ACCESSORS(harmony_exponentiation_operator)
-#undef ACCESSOR
-
private:
+ // Scoped helper for literal recording. Automatically drops the literal
+ // if aborting the scanning before it's complete.
+ class LiteralScope {
+ public:
+ explicit LiteralScope(Scanner* self) : scanner_(self), complete_(false) {
+ scanner_->StartLiteral();
+ }
+ ~LiteralScope() {
+ if (!complete_) scanner_->DropLiteral();
+ }
+ void Complete() { complete_ = true; }
+
+ private:
+ Scanner* scanner_;
+ bool complete_;
+ };
+
+ // LiteralBuffer - Collector of chars of literals.
+ class LiteralBuffer {
+ public:
+ LiteralBuffer() : is_one_byte_(true), position_(0), backing_store_() {}
+
+ ~LiteralBuffer() { backing_store_.Dispose(); }
+
+ INLINE(void AddChar(char code_unit)) {
+ if (position_ >= backing_store_.length()) ExpandBuffer();
+ DCHECK(is_one_byte_);
+ DCHECK(IsValidAscii(code_unit));
+ backing_store_[position_] = static_cast<byte>(code_unit);
+ position_ += kOneByteSize;
+ return;
+ }
+
+ INLINE(void AddChar(uc32 code_unit)) {
+ if (position_ >= backing_store_.length()) ExpandBuffer();
+ if (is_one_byte_) {
+ if (code_unit <= unibrow::Latin1::kMaxChar) {
+ backing_store_[position_] = static_cast<byte>(code_unit);
+ position_ += kOneByteSize;
+ return;
+ }
+ ConvertToTwoByte();
+ }
+ if (code_unit <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
+ *reinterpret_cast<uint16_t*>(&backing_store_[position_]) = code_unit;
+ position_ += kUC16Size;
+ } else {
+ *reinterpret_cast<uint16_t*>(&backing_store_[position_]) =
+ unibrow::Utf16::LeadSurrogate(code_unit);
+ position_ += kUC16Size;
+ if (position_ >= backing_store_.length()) ExpandBuffer();
+ *reinterpret_cast<uint16_t*>(&backing_store_[position_]) =
+ unibrow::Utf16::TrailSurrogate(code_unit);
+ position_ += kUC16Size;
+ }
+ }
+
+ bool is_one_byte() const { return is_one_byte_; }
+
+ bool is_contextual_keyword(Vector<const char> keyword) const {
+ return is_one_byte() && keyword.length() == position_ &&
+ (memcmp(keyword.start(), backing_store_.start(), position_) == 0);
+ }
+
+ Vector<const uint16_t> two_byte_literal() const {
+ DCHECK(!is_one_byte_);
+ DCHECK((position_ & 0x1) == 0);
+ return Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(backing_store_.start()),
+ position_ >> 1);
+ }
+
+ Vector<const uint8_t> one_byte_literal() const {
+ DCHECK(is_one_byte_);
+ return Vector<const uint8_t>(
+ reinterpret_cast<const uint8_t*>(backing_store_.start()), position_);
+ }
+
+ int length() const { return is_one_byte_ ? position_ : (position_ >> 1); }
+
+ void ReduceLength(int delta) {
+ position_ -= delta * (is_one_byte_ ? kOneByteSize : kUC16Size);
+ }
+
+ void Reset() {
+ position_ = 0;
+ is_one_byte_ = true;
+ }
+
+ Handle<String> Internalize(Isolate* isolate) const;
+
+ void CopyFrom(const LiteralBuffer* other) {
+ if (other == nullptr) {
+ Reset();
+ } else {
+ is_one_byte_ = other->is_one_byte_;
+ position_ = other->position_;
+ if (position_ < backing_store_.length()) {
+ std::copy(other->backing_store_.begin(),
+ other->backing_store_.begin() + position_,
+ backing_store_.begin());
+ } else {
+ backing_store_.Dispose();
+ backing_store_ = other->backing_store_.Clone();
+ }
+ }
+ }
+
+ private:
+ static const int kInitialCapacity = 16;
+ static const int kGrowthFactory = 4;
+ static const int kMinConversionSlack = 256;
+ static const int kMaxGrowth = 1 * MB;
+
+ inline bool IsValidAscii(char code_unit) {
+ // Control characters and printable characters span the range of
+ // valid ASCII characters (0-127). Chars are unsigned on some
+ // platforms which causes compiler warnings if the validity check
+ // tests the lower bound >= 0 as it's always true.
+ return iscntrl(code_unit) || isprint(code_unit);
+ }
+
+ inline int NewCapacity(int min_capacity) {
+ int capacity = Max(min_capacity, backing_store_.length());
+ int new_capacity = Min(capacity * kGrowthFactory, capacity + kMaxGrowth);
+ return new_capacity;
+ }
+
+ void ExpandBuffer() {
+ Vector<byte> new_store = Vector<byte>::New(NewCapacity(kInitialCapacity));
+ MemCopy(new_store.start(), backing_store_.start(), position_);
+ backing_store_.Dispose();
+ backing_store_ = new_store;
+ }
+
+ void ConvertToTwoByte() {
+ DCHECK(is_one_byte_);
+ Vector<byte> new_store;
+ int new_content_size = position_ * kUC16Size;
+ if (new_content_size >= backing_store_.length()) {
+ // Ensure room for all currently read code units as UC16 as well
+ // as the code unit about to be stored.
+ new_store = Vector<byte>::New(NewCapacity(new_content_size));
+ } else {
+ new_store = backing_store_;
+ }
+ uint8_t* src = backing_store_.start();
+ uint16_t* dst = reinterpret_cast<uint16_t*>(new_store.start());
+ for (int i = position_ - 1; i >= 0; i--) {
+ dst[i] = src[i];
+ }
+ if (new_store.start() != backing_store_.start()) {
+ backing_store_.Dispose();
+ backing_store_ = new_store;
+ }
+ position_ = new_content_size;
+ is_one_byte_ = false;
+ }
+
+ bool is_one_byte_;
+ int position_;
+ Vector<byte> backing_store_;
+
+ DISALLOW_COPY_AND_ASSIGN(LiteralBuffer);
+ };
+
// The current and look-ahead token.
struct TokenDesc {
- Token::Value token;
Location location;
LiteralBuffer* literal_chars;
LiteralBuffer* raw_literal_chars;
int smi_value_;
+ Token::Value token;
};
static const int kCharacterLookaheadBufferSize = 1;
+ const int kMaxAscii = 127;
// Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
template <bool capture_raw>
@@ -483,9 +515,15 @@ class Scanner {
STATIC_ASSERT(kCharacterLookaheadBufferSize == 1);
Advance();
// Initialize current_ to not refer to a literal.
+ current_.token = Token::UNINITIALIZED;
current_.literal_chars = NULL;
current_.raw_literal_chars = NULL;
+ next_.token = Token::UNINITIALIZED;
+ next_.literal_chars = NULL;
+ next_.raw_literal_chars = NULL;
next_next_.token = Token::UNINITIALIZED;
+ next_next_.literal_chars = NULL;
+ next_next_.raw_literal_chars = NULL;
found_html_comment_ = false;
scanner_error_ = MessageTemplate::kNone;
}
@@ -496,6 +534,7 @@ class Scanner {
bool BookmarkHasBeenSet();
bool BookmarkHasBeenReset();
void DropBookmark();
+ void CopyToNextTokenDesc(TokenDesc* from);
static void CopyTokenDesc(TokenDesc* to, TokenDesc* from);
void ReportScannerError(const Location& location,
@@ -538,6 +577,11 @@ class Scanner {
next_.literal_chars->AddChar(c);
}
+ INLINE(void AddLiteralChar(char c)) {
+ DCHECK_NOT_NULL(next_.literal_chars);
+ next_.literal_chars->AddChar(c);
+ }
+
INLINE(void AddRawLiteralChar(uc32 c)) {
DCHECK_NOT_NULL(next_.raw_literal_chars);
next_.raw_literal_chars->AddChar(c);
@@ -582,7 +626,7 @@ class Scanner {
}
void PushBack(uc32 ch) {
- if (ch > static_cast<uc32>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
+ if (c0_ > static_cast<uc32>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
source_->PushBack(unibrow::Utf16::TrailSurrogate(c0_));
source_->PushBack(unibrow::Utf16::LeadSurrogate(c0_));
} else {
@@ -613,21 +657,30 @@ class Scanner {
// form.
// These functions only give the correct result if the literal was scanned
// when a LiteralScope object is alive.
+ //
+ // Current usage of these functions is unfortunately a little undisciplined,
+ // and is_literal_one_byte() + is_literal_one_byte_string() is also
+ // requested for tokens that do not have a literal. Hence, we treat any
+ // token as a one-byte literal. E.g. Token::FUNCTION pretends to have a
+ // literal "function".
Vector<const uint8_t> literal_one_byte_string() {
- DCHECK_NOT_NULL(current_.literal_chars);
- return current_.literal_chars->one_byte_literal();
+ if (current_.literal_chars)
+ return current_.literal_chars->one_byte_literal();
+ const char* str = Token::String(current_.token);
+ const uint8_t* str_as_uint8 = reinterpret_cast<const uint8_t*>(str);
+ return Vector<const uint8_t>(str_as_uint8,
+ Token::StringLength(current_.token));
}
Vector<const uint16_t> literal_two_byte_string() {
DCHECK_NOT_NULL(current_.literal_chars);
return current_.literal_chars->two_byte_literal();
}
bool is_literal_one_byte() {
- DCHECK_NOT_NULL(current_.literal_chars);
- return current_.literal_chars->is_one_byte();
+ return !current_.literal_chars || current_.literal_chars->is_one_byte();
}
int literal_length() const {
- DCHECK_NOT_NULL(current_.literal_chars);
- return current_.literal_chars->length();
+ if (current_.literal_chars) return current_.literal_chars->length();
+ return Token::StringLength(current_.token);
}
// Returns the literal string for the next token (the token that
// would be returned if Next() were called).
@@ -709,9 +762,14 @@ class Scanner {
// Subtract delimiters.
source_length -= 2;
}
- return token.literal_chars->length() != source_length;
+ return token.literal_chars &&
+ (token.literal_chars->length() != source_length);
}
+#ifdef DEBUG
+ void SanityCheckTokenDesc(const TokenDesc&) const;
+#endif
+
UnicodeCache* unicode_cache_;
// Buffers collecting literal strings, numbers, etc.
@@ -766,9 +824,9 @@ class Scanner {
// Input stream. Must be initialized to an Utf16CharacterStream.
Utf16CharacterStream* source_;
-
- // Start position of the octal literal last scanned.
+ // Last-seen positions of potentially problematic tokens.
Location octal_pos_;
+ Location decimal_with_leading_zero_pos_;
// One Unicode character look-ahead; c0_ < 0 at the end of the input.
uc32 c0_;
@@ -780,12 +838,11 @@ class Scanner {
// Whether there is a multi-line comment that contains a
// line-terminator after the current token, and before the next.
bool has_multiline_comment_before_next_;
+ bool has_line_terminator_after_next_;
// Whether this scanner encountered an HTML comment.
bool found_html_comment_;
- bool allow_harmony_exponentiation_operator_;
-
MessageTemplate::Template scanner_error_;
Location scanner_error_location_;
};
diff --git a/deps/v8/src/parsing/token.cc b/deps/v8/src/parsing/token.cc
index 7edfefa821..35038ba766 100644
--- a/deps/v8/src/parsing/token.cc
+++ b/deps/v8/src/parsing/token.cc
@@ -22,6 +22,16 @@ const char* const Token::string_[NUM_TOKENS] = {
};
#undef T
+#if !V8_CC_MSVC
+// TODO(vogelheim): Remove #if once MSVC supports constexpr on functions.
+constexpr
+#endif
+uint8_t length(const char* str) {
+ return str ? static_cast<uint8_t>(strlen(str)) : 0;
+}
+#define T(name, string, precedence) length(string),
+const uint8_t Token::string_length_[NUM_TOKENS] = {TOKEN_LIST(T, T)};
+#undef T
#define T(name, string, precedence) precedence,
const int8_t Token::precedence_[NUM_TOKENS] = {
diff --git a/deps/v8/src/parsing/token.h b/deps/v8/src/parsing/token.h
index fae9ea8bff..64cc337afc 100644
--- a/deps/v8/src/parsing/token.h
+++ b/deps/v8/src/parsing/token.h
@@ -148,10 +148,13 @@ namespace internal {
T(IDENTIFIER, NULL, 0) \
\
/* Future reserved words (ECMA-262, section 7.6.1.2). */ \
- T(FUTURE_RESERVED_WORD, NULL, 0) \
T(FUTURE_STRICT_RESERVED_WORD, NULL, 0) \
+ K(ASYNC, "async", 0) \
+ /* `await` is a reserved word in module code only */ \
+ K(AWAIT, "await", 0) \
K(CLASS, "class", 0) \
K(CONST, "const", 0) \
+ K(ENUM, "enum", 0) \
K(EXPORT, "export", 0) \
K(EXTENDS, "extends", 0) \
K(IMPORT, "import", 0) \
@@ -168,12 +171,12 @@ namespace internal {
/* Scanner-internal use only. */ \
T(WHITESPACE, NULL, 0) \
T(UNINITIALIZED, NULL, 0) \
+ T(REGEXP_LITERAL, NULL, 0) \
\
/* ES6 Template Literals */ \
T(TEMPLATE_SPAN, NULL, 0) \
T(TEMPLATE_TAIL, NULL, 0)
-
class Token {
public:
// All token values.
@@ -197,9 +200,10 @@ class Token {
}
static bool IsIdentifier(Value tok, LanguageMode language_mode,
- bool is_generator) {
+ bool is_generator, bool disallow_await) {
switch (tok) {
case IDENTIFIER:
+ case ASYNC:
return true;
case ESCAPED_STRICT_RESERVED_WORD:
case FUTURE_STRICT_RESERVED_WORD:
@@ -208,6 +212,8 @@ class Token {
return is_sloppy(language_mode);
case YIELD:
return !is_generator && is_sloppy(language_mode);
+ case AWAIT:
+ return !disallow_await;
default:
return false;
}
@@ -320,6 +326,11 @@ class Token {
return string_[tok];
}
+ static uint8_t StringLength(Value tok) {
+ DCHECK(tok < NUM_TOKENS);
+ return string_length_[tok];
+ }
+
// Returns the precedence > 0 for binary and compare
// operators; returns 0 otherwise.
static int Precedence(Value tok) {
@@ -330,6 +341,7 @@ class Token {
private:
static const char* const name_[NUM_TOKENS];
static const char* const string_[NUM_TOKENS];
+ static const uint8_t string_length_[NUM_TOKENS];
static const int8_t precedence_[NUM_TOKENS];
static const char token_type[NUM_TOKENS];
};
diff --git a/deps/v8/src/perf-jit.cc b/deps/v8/src/perf-jit.cc
index 6f3551468a..a8c7255396 100644
--- a/deps/v8/src/perf-jit.cc
+++ b/deps/v8/src/perf-jit.cc
@@ -27,8 +27,12 @@
#include "src/perf-jit.h"
+#include <memory>
+
#include "src/assembler.h"
+#include "src/eh-frame.h"
#include "src/objects-inl.h"
+#include "src/source-position-table.h"
#if V8_OS_LINUX
#include <fcntl.h>
@@ -56,7 +60,13 @@ struct PerfJitHeader {
};
struct PerfJitBase {
- enum PerfJitEvent { kLoad = 0, kMove = 1, kDebugInfo = 2, kClose = 3 };
+ enum PerfJitEvent {
+ kLoad = 0,
+ kMove = 1,
+ kDebugInfo = 2,
+ kClose = 3,
+ kUnwindingInfo = 4
+ };
uint32_t event_;
uint32_t size_;
@@ -85,6 +95,13 @@ struct PerfJitCodeDebugInfo : PerfJitBase {
// Followed by entry_count_ instances of PerfJitDebugEntry.
};
+struct PerfJitCodeUnwindingInfo : PerfJitBase {
+ uint64_t unwinding_size_;
+ uint64_t eh_frame_hdr_size_;
+ uint64_t mapped_size_;
+ // Followed by size_ - sizeof(PerfJitCodeUnwindingInfo) bytes of data.
+};
+
const char PerfJitLogger::kFilenameFormatString[] = "./jit-%d.dump";
// Extra padding for the PID in the filename
@@ -204,6 +221,9 @@ void PerfJitLogger::LogRecordedBuffer(AbstractCode* abstract_code,
uint32_t code_size = code->is_crankshafted() ? code->safepoint_table_offset()
: code->instruction_size();
+ // Unwinding info comes right after debug info.
+ if (FLAG_perf_prof_unwinding_info) LogWriteUnwindingInfo(code);
+
static const char string_terminator[] = "\0";
PerfJitCodeLoad code_load;
@@ -229,8 +249,8 @@ void PerfJitLogger::LogRecordedBuffer(AbstractCode* abstract_code,
void PerfJitLogger::LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared) {
// Compute the entry count and get the name of the script.
uint32_t entry_count = 0;
- for (RelocIterator it(code, RelocInfo::kPositionMask); !it.done();
- it.next()) {
+ for (SourcePositionTableIterator iterator(code->source_position_table());
+ !iterator.done(); iterator.Advance()) {
entry_count++;
}
if (entry_count == 0) return;
@@ -238,7 +258,7 @@ void PerfJitLogger::LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared) {
Handle<Object> name_or_url(Script::GetNameOrSourceURL(script));
int name_length = 0;
- base::SmartArrayPointer<char> name_string;
+ std::unique_ptr<char[]> name_string;
if (name_or_url->IsString()) {
name_string =
Handle<String>::cast(name_or_url)
@@ -250,7 +270,7 @@ void PerfJitLogger::LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared) {
char* buffer = NewArray<char>(name_length);
base::OS::StrNCpy(buffer, name_length + 1, unknown,
static_cast<size_t>(name_length));
- name_string = base::SmartArrayPointer<char>(buffer);
+ name_string = std::unique_ptr<char[]>(buffer);
}
DCHECK_EQ(name_length, strlen(name_string.get()));
@@ -275,10 +295,11 @@ void PerfJitLogger::LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared) {
int script_line_offset = script->line_offset();
Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
+ Address code_start = code->instruction_start();
- for (RelocIterator it(code, RelocInfo::kPositionMask); !it.done();
- it.next()) {
- int position = static_cast<int>(it.rinfo()->data());
+ for (SourcePositionTableIterator iterator(code->source_position_table());
+ !iterator.done(); iterator.Advance()) {
+ int position = iterator.source_position();
int line_number = Script::GetLineNumber(script, position);
// Compute column.
int relative_line_number = line_number - script_line_offset;
@@ -293,7 +314,8 @@ void PerfJitLogger::LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared) {
}
PerfJitDebugEntry entry;
- entry.address_ = reinterpret_cast<uint64_t>(it.rinfo()->pc());
+ entry.address_ =
+ reinterpret_cast<uint64_t>(code_start + iterator.code_offset());
entry.line_number_ = line_number;
entry.column_ = column_offset;
LogWriteBytes(reinterpret_cast<const char*>(&entry), sizeof(entry));
@@ -303,6 +325,41 @@ void PerfJitLogger::LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared) {
LogWriteBytes(padding_bytes, padding);
}
+void PerfJitLogger::LogWriteUnwindingInfo(Code* code) {
+ PerfJitCodeUnwindingInfo unwinding_info_header;
+ unwinding_info_header.event_ = PerfJitCodeLoad::kUnwindingInfo;
+ unwinding_info_header.time_stamp_ = GetTimestamp();
+ unwinding_info_header.eh_frame_hdr_size_ = EhFrameConstants::kEhFrameHdrSize;
+
+ if (code->has_unwinding_info()) {
+ unwinding_info_header.unwinding_size_ = code->unwinding_info_size();
+ unwinding_info_header.mapped_size_ = unwinding_info_header.unwinding_size_;
+ } else {
+ unwinding_info_header.unwinding_size_ = EhFrameConstants::kEhFrameHdrSize;
+ unwinding_info_header.mapped_size_ = 0;
+ }
+
+ int content_size = static_cast<int>(sizeof(unwinding_info_header) +
+ unwinding_info_header.unwinding_size_);
+ int padding_size = RoundUp(content_size, 8) - content_size;
+ unwinding_info_header.size_ = content_size + padding_size;
+
+ LogWriteBytes(reinterpret_cast<const char*>(&unwinding_info_header),
+ sizeof(unwinding_info_header));
+
+ if (code->has_unwinding_info()) {
+ LogWriteBytes(reinterpret_cast<const char*>(code->unwinding_info_start()),
+ code->unwinding_info_size());
+ } else {
+ OFStream perf_output_stream(perf_output_handle_);
+ EhFrameWriter::WriteEmptyEhFrame(perf_output_stream);
+ }
+
+ char padding_bytes[] = "\0\0\0\0\0\0\0\0";
+ DCHECK_LT(padding_size, sizeof(padding_bytes));
+ LogWriteBytes(padding_bytes, padding_size);
+}
+
void PerfJitLogger::CodeMoveEvent(AbstractCode* from, Address to) {
// Code relocation not supported.
UNREACHABLE();
diff --git a/deps/v8/src/perf-jit.h b/deps/v8/src/perf-jit.h
index 25cc3b3686..6efa4bbd6b 100644
--- a/deps/v8/src/perf-jit.h
+++ b/deps/v8/src/perf-jit.h
@@ -66,11 +66,13 @@ class PerfJitLogger : public CodeEventLogger {
void LogWriteBytes(const char* bytes, int size);
void LogWriteHeader();
void LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared);
+ void LogWriteUnwindingInfo(Code* code);
static const uint32_t kElfMachIA32 = 3;
static const uint32_t kElfMachX64 = 62;
static const uint32_t kElfMachARM = 40;
static const uint32_t kElfMachMIPS = 10;
+ static const uint32_t kElfMachARM64 = 183;
uint32_t GetElfMach() {
#if V8_TARGET_ARCH_IA32
@@ -81,6 +83,8 @@ class PerfJitLogger : public CodeEventLogger {
return kElfMachARM;
#elif V8_TARGET_ARCH_MIPS
return kElfMachMIPS;
+#elif V8_TARGET_ARCH_ARM64
+ return kElfMachARM64;
#else
UNIMPLEMENTED();
return 0;
diff --git a/deps/v8/src/ppc/OWNERS b/deps/v8/src/ppc/OWNERS
index eb007cb908..752e8e3d81 100644
--- a/deps/v8/src/ppc/OWNERS
+++ b/deps/v8/src/ppc/OWNERS
@@ -3,3 +3,4 @@ dstence@us.ibm.com
joransiu@ca.ibm.com
mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
index c495fee182..12201daf21 100644
--- a/deps/v8/src/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/ppc/assembler-ppc-inl.h
@@ -49,6 +49,7 @@ namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; }
+bool CpuFeatures::SupportsSimd128() { return false; }
void RelocInfo::apply(intptr_t delta) {
// absolute code pointer inside code object moves with the code object.
@@ -89,11 +90,6 @@ Address RelocInfo::target_address() {
return Assembler::target_address_at(pc_, host_);
}
-Address RelocInfo::wasm_memory_reference() {
- DCHECK(IsWasmMemoryReference(rmode_));
- return Assembler::target_address_at(pc_, host_);
-}
-
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) ||
rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
@@ -136,21 +132,6 @@ Address RelocInfo::constant_pool_entry_address() {
int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
-void RelocInfo::set_target_address(Address target,
- WriteBarrierMode write_barrier_mode,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(isolate_, pc_, host_, target,
- icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
- IsCodeTarget(rmode_)) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
-}
-
-
Address Assembler::target_address_from_return_address(Address pc) {
// Returns the address of the call target from the return address that will
// be returned to after a call.
@@ -183,19 +164,6 @@ Address Assembler::return_address_from_call_start(Address pc) {
return pc + (len + 2) * kInstrSize;
}
-void RelocInfo::update_wasm_memory_reference(
- Address old_base, Address new_base, size_t old_size, size_t new_size,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(IsWasmMemoryReference(rmode_));
- DCHECK(old_base <= wasm_memory_reference() &&
- wasm_memory_reference() < old_base + old_size);
- Address updated_reference = new_base + (wasm_memory_reference() - old_base);
- DCHECK(new_base <= updated_reference &&
- updated_reference < new_base + new_size);
- Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
- icache_flush_mode);
-}
-
Object* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
@@ -220,6 +188,7 @@ void RelocInfo::set_target_object(Object* target,
target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target));
+ host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
}
}
@@ -343,7 +312,7 @@ void RelocInfo::WipeOut() {
}
}
-
+template <typename ObjectVisitor>
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index 507eec11af..08a8005ee1 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -155,6 +155,33 @@ bool RelocInfo::IsInConstantPool() {
return false;
}
+Address RelocInfo::wasm_memory_reference() {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ return Assembler::target_address_at(pc_, host_);
+}
+
+uint32_t RelocInfo::wasm_memory_size_reference() {
+ DCHECK(IsWasmMemorySizeReference(rmode_));
+ return static_cast<uint32_t>(
+ reinterpret_cast<intptr_t>(Assembler::target_address_at(pc_, host_)));
+}
+
+Address RelocInfo::wasm_global_reference() {
+ DCHECK(IsWasmGlobalReference(rmode_));
+ return Assembler::target_address_at(pc_, host_);
+}
+
+
+void RelocInfo::unchecked_update_wasm_memory_reference(
+ Address address, ICacheFlushMode flush_mode) {
+ Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
+}
+
+void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
+ ICacheFlushMode flush_mode) {
+ Assembler::set_target_address_at(isolate_, pc_, host_,
+ reinterpret_cast<Address>(size), flush_mode);
+}
// -----------------------------------------------------------------------------
// Implementation of Operand and MemOperand
@@ -166,7 +193,6 @@ Operand::Operand(Handle<Object> handle) {
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
if (obj->IsHeapObject()) {
- DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
imm_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
} else {
@@ -194,12 +220,10 @@ MemOperand::MemOperand(Register ra, Register rb) {
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
-
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
recorded_ast_id_(TypeFeedbackId::None()),
- constant_pool_builder_(kLoadPtrMaxReachBits, kLoadDoubleMaxReachBits),
- positions_recorder_(this) {
+ constant_pool_builder_(kLoadPtrMaxReachBits, kLoadDoubleMaxReachBits) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
no_trampoline_pool_before_ = 0;
@@ -230,6 +254,8 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->constant_pool_size =
(constant_pool_offset ? desc->instr_size - constant_pool_offset : 0);
desc->origin = this;
+ desc->unwinding_info_size = 0;
+ desc->unwinding_info = nullptr;
}
@@ -678,13 +704,11 @@ int Assembler::link(Label* L) {
void Assembler::bclr(BOfield bo, int condition_bit, LKBit lk) {
- positions_recorder()->WriteRecordedPositions();
emit(EXT1 | bo | condition_bit * B16 | BCLRX | lk);
}
void Assembler::bcctr(BOfield bo, int condition_bit, LKBit lk) {
- positions_recorder()->WriteRecordedPositions();
emit(EXT1 | bo | condition_bit * B16 | BCCTRX | lk);
}
@@ -701,9 +725,6 @@ void Assembler::bctrl() { bcctr(BA, 0, SetLK); }
void Assembler::bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk) {
- if (lk == SetLK) {
- positions_recorder()->WriteRecordedPositions();
- }
int imm16 = branch_offset;
CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
emit(BCX | bo | condition_bit * B16 | (imm16 & kImm16Mask) | lk);
@@ -711,9 +732,6 @@ void Assembler::bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk) {
void Assembler::b(int branch_offset, LKBit lk) {
- if (lk == SetLK) {
- positions_recorder()->WriteRecordedPositions();
- }
int imm26 = branch_offset;
CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
emit(BX | (imm26 & kImm26Mask) | lk);
@@ -1213,6 +1231,21 @@ void Assembler::lwax(Register rt, const MemOperand& src) {
}
+void Assembler::ldbrx(Register dst, const MemOperand& src) {
+ x_form(EXT2 | LDBRX, src.ra(), dst, src.rb(), LeaveRC);
+}
+
+
+void Assembler::lwbrx(Register dst, const MemOperand& src) {
+ x_form(EXT2 | LWBRX, src.ra(), dst, src.rb(), LeaveRC);
+}
+
+
+void Assembler::lhbrx(Register dst, const MemOperand& src) {
+ x_form(EXT2 | LHBRX, src.ra(), dst, src.rb(), LeaveRC);
+}
+
+
void Assembler::stb(Register dst, const MemOperand& src) {
DCHECK(!src.ra_.is(r0));
d_form(STB, dst, src.ra(), src.offset(), true);
@@ -2445,8 +2478,6 @@ void Assembler::EmitRelocations() {
reloc_info_writer.Write(&rinfo);
}
-
- reloc_info_writer.Finish();
}
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index 3e8be7d75a..7843e2e07d 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -109,6 +109,9 @@ namespace internal {
V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
+#define FLOAT_REGISTERS DOUBLE_REGISTERS
+#define SIMD128_REGISTERS DOUBLE_REGISTERS
+
#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
V(d8) V(d9) V(d10) V(d11) V(d12) V(d15) \
@@ -164,8 +167,6 @@ struct Register {
Register r = {code};
return r;
}
- const char* ToString();
- bool IsAllocatable() const;
bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
bool is(Register reg) const { return reg_code == reg.reg_code; }
int code() const {
@@ -204,6 +205,8 @@ const Register kConstantPoolRegister = r28; // Constant pool.
const Register kRootRegister = r29; // Roots array pointer.
const Register cp = r30; // JavaScript context pointer.
+static const bool kSimpleFPAliasing = true;
+
// Double word FP register.
struct DoubleRegister {
enum Code {
@@ -217,8 +220,6 @@ struct DoubleRegister {
static const int kNumRegisters = Code::kAfterLast;
static const int kMaxNumRegisters = kNumRegisters;
- const char* ToString();
- bool IsAllocatable() const;
bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
int code() const {
@@ -238,6 +239,11 @@ struct DoubleRegister {
int reg_code;
};
+typedef DoubleRegister FloatRegister;
+
+// TODO(ppc) Define SIMD registers.
+typedef DoubleRegister Simd128Register;
+
#define DECLARE_REGISTER(R) \
const DoubleRegister R = {DoubleRegister::kCode_##R};
DOUBLE_REGISTERS(DECLARE_REGISTER)
@@ -283,9 +289,6 @@ const CRegister cr5 = {5};
const CRegister cr6 = {6};
const CRegister cr7 = {7};
-// TODO(ppc) Define SIMD registers.
-typedef DoubleRegister Simd128Register;
-
// -----------------------------------------------------------------------------
// Machine instruction Operands
@@ -874,6 +877,9 @@ class Assembler : public AssemblerBase {
void lwzux(Register dst, const MemOperand& src);
void lwa(Register dst, const MemOperand& src);
void lwax(Register dst, const MemOperand& src);
+ void ldbrx(Register dst, const MemOperand& src);
+ void lwbrx(Register dst, const MemOperand& src);
+ void lhbrx(Register dst, const MemOperand& src);
void stb(Register dst, const MemOperand& src);
void stbx(Register dst, const MemOperand& src);
void stbux(Register dst, const MemOperand& src);
@@ -1210,7 +1216,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, int raw_position);
+ void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
// Writes a single byte or word of data in the code stream. Used
// for inline tables, e.g., jump-tables.
@@ -1219,10 +1225,6 @@ class Assembler : public AssemblerBase {
void dq(uint64_t data);
void dp(uintptr_t data);
- AssemblerPositionsRecorder* positions_recorder() {
- return &positions_recorder_;
- }
-
// Read/patch instructions
Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
void instr_at_put(int pos, Instr instr) {
@@ -1468,8 +1470,6 @@ class Assembler : public AssemblerBase {
friend class RelocInfo;
friend class CodePatcher;
friend class BlockTrampolinePoolScope;
- AssemblerPositionsRecorder positions_recorder_;
- friend class AssemblerPositionsRecorder;
friend class EnsureSpace;
};
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index 0671f990e8..6dd897b031 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -21,60 +21,15 @@
namespace v8 {
namespace internal {
+#define __ ACCESS_MASM(masm)
-static void InitializeArrayConstructorDescriptor(
- Isolate* isolate, CodeStubDescriptor* descriptor,
- int constant_stack_parameter_count) {
- Address deopt_handler =
- Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
-
- if (constant_stack_parameter_count == 0) {
- descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- } else {
- descriptor->Initialize(r3, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- }
-}
-
-
-static void InitializeInternalArrayConstructorDescriptor(
- Isolate* isolate, CodeStubDescriptor* descriptor,
- int constant_stack_parameter_count) {
- Address deopt_handler =
- Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
-
- if (constant_stack_parameter_count == 0) {
- descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- } else {
- descriptor->Initialize(r3, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- }
-}
-
-
-void ArrayNoArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
-
-void ArraySingleArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
-}
-
-
-void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
+void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
+ __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
+ __ StorePX(r4, MemOperand(sp, r0));
+ __ push(r4);
+ __ push(r5);
+ __ addi(r3, r3, Operand(3));
+ __ TailCallRuntime(Runtime::kNewArray);
}
void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
@@ -82,20 +37,12 @@ void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize(r3, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
}
-void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+void FastFunctionBindStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
-}
-
-
-void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
+ Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
+ descriptor->Initialize(r3, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
}
-
-#define __ ACCESS_MASM(masm)
-
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
Condition cond);
static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
@@ -787,11 +734,8 @@ void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
void MathPowStub::Generate(MacroAssembler* masm) {
- const Register base = r4;
const Register exponent = MathPowTaggedDescriptor::exponent();
DCHECK(exponent.is(r5));
- const Register heapnumbermap = r8;
- const Register heapnumber = r3;
const DoubleRegister double_base = d1;
const DoubleRegister double_exponent = d2;
const DoubleRegister double_result = d3;
@@ -800,36 +744,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
const Register scratch2 = r10;
Label call_runtime, done, int_exponent;
- if (exponent_type() == ON_STACK) {
- Label base_is_smi, unpack_exponent;
- // The exponent and base are supplied as arguments on the stack.
- // This can only happen if the stub is called from non-optimized code.
- // Load input parameters from stack to double registers.
- __ LoadP(base, MemOperand(sp, 1 * kPointerSize));
- __ LoadP(exponent, MemOperand(sp, 0 * kPointerSize));
-
- __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
-
- __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
- __ LoadP(scratch, FieldMemOperand(base, JSObject::kMapOffset));
- __ cmp(scratch, heapnumbermap);
- __ bne(&call_runtime);
-
- __ lfd(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
- __ b(&unpack_exponent);
-
- __ bind(&base_is_smi);
- __ ConvertIntToDouble(scratch, double_base);
- __ bind(&unpack_exponent);
-
- __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
- __ LoadP(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
- __ cmp(scratch, heapnumbermap);
- __ bne(&call_runtime);
-
- __ lfd(double_exponent,
- FieldMemOperand(exponent, HeapNumber::kValueOffset));
- } else if (exponent_type() == TAGGED) {
+ if (exponent_type() == TAGGED) {
// Base is already in double_base.
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
@@ -843,53 +758,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
double_scratch);
__ beq(&int_exponent);
- if (exponent_type() == ON_STACK) {
- // Detect square root case. Crankshaft detects constant +/-0.5 at
- // compile time and uses DoMathPowHalf instead. We then skip this check
- // for non-constant cases of +/-0.5 as these hardly occur.
- Label not_plus_half, not_minus_inf1, not_minus_inf2;
-
- // Test for 0.5.
- __ LoadDoubleLiteral(double_scratch, 0.5, scratch);
- __ fcmpu(double_exponent, double_scratch);
- __ bne(&not_plus_half);
-
- // Calculates square root of base. Check for the special case of
- // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
- __ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch);
- __ fcmpu(double_base, double_scratch);
- __ bne(&not_minus_inf1);
- __ fneg(double_result, double_scratch);
- __ b(&done);
- __ bind(&not_minus_inf1);
-
- // Add +0 to convert -0 to +0.
- __ fadd(double_scratch, double_base, kDoubleRegZero);
- __ fsqrt(double_result, double_scratch);
- __ b(&done);
-
- __ bind(&not_plus_half);
- __ LoadDoubleLiteral(double_scratch, -0.5, scratch);
- __ fcmpu(double_exponent, double_scratch);
- __ bne(&call_runtime);
-
- // Calculates square root of base. Check for the special case of
- // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
- __ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch);
- __ fcmpu(double_base, double_scratch);
- __ bne(&not_minus_inf2);
- __ fmr(double_result, kDoubleRegZero);
- __ b(&done);
- __ bind(&not_minus_inf2);
-
- // Add +0 to convert -0 to +0.
- __ fadd(double_scratch, double_base, kDoubleRegZero);
- __ LoadDoubleLiteral(double_result, 1.0, scratch);
- __ fsqrt(double_scratch, double_scratch);
- __ fdiv(double_result, double_result, double_scratch);
- __ b(&done);
- }
-
__ mflr(r0);
__ push(r0);
{
@@ -937,7 +805,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ beq(&no_carry, cr0);
__ fmul(double_result, double_result, double_scratch);
__ bind(&no_carry);
- __ ShiftRightArithImm(scratch, scratch, 1, SetRC);
+ __ ShiftRightImm(scratch, scratch, Operand(1), SetRC);
__ beq(&loop_end, cr0);
__ fmul(double_scratch, double_scratch, double_scratch);
__ b(&while_true);
@@ -958,37 +826,21 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ ConvertIntToDouble(exponent, double_exponent);
// Returning or bailing out.
- if (exponent_type() == ON_STACK) {
- // The arguments are still on the stack.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMathPowRT);
-
- // The stub is called from non-optimized code, which expects the result
- // as heap number in exponent.
- __ bind(&done);
- __ AllocateHeapNumber(heapnumber, scratch, scratch2, heapnumbermap,
- &call_runtime);
- __ stfd(double_result,
- FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
- DCHECK(heapnumber.is(r3));
- __ Ret(2);
- } else {
- __ mflr(r0);
- __ push(r0);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch);
- __ MovToFloatParameters(double_base, double_exponent);
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()), 0, 2);
- }
- __ pop(r0);
- __ mtlr(r0);
- __ MovFromFloatResult(double_result);
-
- __ bind(&done);
- __ Ret();
+ __ mflr(r0);
+ __ push(r0);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ MovToFloatParameters(double_base, double_exponent);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(isolate()), 0, 2);
}
+ __ pop(r0);
+ __ mtlr(r0);
+ __ MovFromFloatResult(double_result);
+
+ __ bind(&done);
+ __ Ret();
}
@@ -999,7 +851,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
- ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+ CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
@@ -1007,7 +859,6 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
- TypeofStub::GenerateAheadOfTime(isolate);
}
@@ -1076,7 +927,9 @@ void CEntryStub::Generate(MacroAssembler* masm) {
arg_stack_space += result_size();
}
- __ EnterExitFrame(save_doubles(), arg_stack_space);
+ __ EnterExitFrame(save_doubles(), arg_stack_space, is_builtin_exit()
+ ? StackFrame::BUILTIN_EXIT
+ : StackFrame::EXIT);
// Store a copy of argc in callee-saved registers for later.
__ mr(r14, r3);
@@ -1316,12 +1169,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// restores all kCalleeSaved registers (including cp and fp) to their
// saved values before returning a failure to C.
- // Clear any pending exceptions.
- __ mov(r8, Operand(isolate()->factory()->the_hole_value()));
- __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate())));
- __ StoreP(r8, MemOperand(ip));
-
// Invoke the function by calling through JS entry trampoline builtin.
// Notice that we cannot store a reference to the trampoline code directly in
// this stub, because runtime stubs are not traversed when doing GC.
@@ -1383,126 +1230,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
}
-void InstanceOfStub::Generate(MacroAssembler* masm) {
- Register const object = r4; // Object (lhs).
- Register const function = r3; // Function (rhs).
- Register const object_map = r5; // Map of {object}.
- Register const function_map = r6; // Map of {function}.
- Register const function_prototype = r7; // Prototype of {function}.
- Register const scratch = r8;
-
- DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
- DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
-
- // Check if {object} is a smi.
- Label object_is_smi;
- __ JumpIfSmi(object, &object_is_smi);
-
- // Lookup the {function} and the {object} map in the global instanceof cache.
- // Note: This is safe because we clear the global instanceof cache whenever
- // we change the prototype of any object.
- Label fast_case, slow_case;
- __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
- __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ bne(&fast_case);
- __ CompareRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
- __ bne(&fast_case);
- __ LoadRoot(r3, Heap::kInstanceofCacheAnswerRootIndex);
- __ Ret();
-
- // If {object} is a smi we can safely return false if {function} is a JS
- // function, otherwise we have to miss to the runtime and throw an exception.
- __ bind(&object_is_smi);
- __ JumpIfSmi(function, &slow_case);
- __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
- __ bne(&slow_case);
- __ LoadRoot(r3, Heap::kFalseValueRootIndex);
- __ Ret();
-
- // Fast-case: The {function} must be a valid JSFunction.
- __ bind(&fast_case);
- __ JumpIfSmi(function, &slow_case);
- __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
- __ bne(&slow_case);
-
- // Go to the runtime if the function is not a constructor.
- __ lbz(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
- __ TestBit(scratch, Map::kIsConstructor, r0);
- __ beq(&slow_case, cr0);
-
- // Ensure that {function} has an instance prototype.
- __ TestBit(scratch, Map::kHasNonInstancePrototype, r0);
- __ bne(&slow_case, cr0);
-
- // Get the "prototype" (or initial map) of the {function}.
- __ LoadP(function_prototype,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- __ AssertNotSmi(function_prototype);
-
- // Resolve the prototype if the {function} has an initial map. Afterwards the
- // {function_prototype} will be either the JSReceiver prototype object or the
- // hole value, which means that no instances of the {function} were created so
- // far and hence we should return false.
- Label function_prototype_valid;
- __ CompareObjectType(function_prototype, scratch, scratch, MAP_TYPE);
- __ bne(&function_prototype_valid);
- __ LoadP(function_prototype,
- FieldMemOperand(function_prototype, Map::kPrototypeOffset));
- __ bind(&function_prototype_valid);
- __ AssertNotSmi(function_prototype);
-
- // Update the global instanceof cache with the current {object} map and
- // {function}. The cached answer will be set when it is known below.
- __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
-
- // Loop through the prototype chain looking for the {function} prototype.
- // Assume true, and change to false if not found.
- Register const object_instance_type = function_map;
- Register const map_bit_field = function_map;
- Register const null = scratch;
- Register const result = r3;
-
- Label done, loop, fast_runtime_fallback;
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ LoadRoot(null, Heap::kNullValueRootIndex);
- __ bind(&loop);
-
- // Check if the object needs to be access checked.
- __ lbz(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
- __ TestBit(map_bit_field, Map::kIsAccessCheckNeeded, r0);
- __ bne(&fast_runtime_fallback, cr0);
- // Check if the current object is a Proxy.
- __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
- __ beq(&fast_runtime_fallback);
-
- __ LoadP(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
- __ cmp(object, function_prototype);
- __ beq(&done);
- __ cmp(object, null);
- __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
- __ bne(&loop);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
- __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
- __ Ret();
-
- // Found Proxy or access check needed: Call the runtime
- __ bind(&fast_runtime_fallback);
- __ Push(object, function_prototype);
- // Invalidate the instanceof cache.
- __ LoadSmiLiteral(scratch, Smi::FromInt(0));
- __ StoreRoot(scratch, Heap::kInstanceofCacheFunctionRootIndex);
- __ TailCallRuntime(Runtime::kHasInPrototypeChain);
-
- // Slow-case: Call the %InstanceOf runtime function.
- __ bind(&slow_case);
- __ Push(object, function);
- __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
- : Runtime::kInstanceOf);
-}
-
-
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
Register receiver = LoadDescriptor::ReceiverRegister();
@@ -1538,7 +1265,6 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
&miss, // When not a string.
&miss, // When not a number.
&miss, // When index out of range.
- STRING_INDEX_IS_ARRAY_INDEX,
RECEIVER_IS_STRING);
char_at_generator.GenerateFast(masm);
__ Ret();
@@ -1852,9 +1578,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ LoadP(r3, MemOperand(sp, kLastMatchInfoOffset));
__ JumpIfSmi(r3, &runtime);
- __ CompareObjectType(r3, r5, r5, JS_ARRAY_TYPE);
+ __ CompareObjectType(r3, r5, r5, JS_OBJECT_TYPE);
__ bne(&runtime);
- // Check that the JSArray is in fast case.
+ // Check that the object has fast elements.
__ LoadP(last_match_info_elements,
FieldMemOperand(r3, JSArray::kElementsOffset));
__ LoadP(r3,
@@ -1974,9 +1700,11 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(r3);
__ Push(r6, r5, r4, r3);
+ __ Push(cp);
__ CallStub(stub);
+ __ Pop(cp);
__ Pop(r6, r5, r4, r3);
__ SmiUntag(r3);
}
@@ -1991,12 +1719,15 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// r5 : feedback vector
// r6 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
+ Label done_initialize_count, done_increment_count;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
+ const int count_offset = FixedArray::kHeaderSize + kPointerSize;
+
// Load the cache state into r8.
__ SmiToPtrArrayOffset(r8, r6);
__ add(r8, r5, r8);
@@ -2011,7 +1742,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
Register weak_value = r10;
__ LoadP(weak_value, FieldMemOperand(r8, WeakCell::kValueOffset));
__ cmp(r4, weak_value);
- __ beq(&done);
+ __ beq(&done_increment_count);
__ CompareRoot(r8, Heap::kmegamorphic_symbolRootIndex);
__ beq(&done);
__ LoadP(feedback_map, FieldMemOperand(r8, HeapObject::kMapOffset));
@@ -2034,7 +1765,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
__ cmp(r4, r8);
__ bne(&megamorphic);
- __ b(&done);
+ __ b(&done_increment_count);
__ bind(&miss);
@@ -2064,12 +1795,31 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub);
- __ b(&done);
+ __ b(&done_initialize_count);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &weak_cell_stub);
+
+ __ bind(&done_initialize_count);
+ // Initialize the call counter.
+ __ LoadSmiLiteral(r8, Smi::FromInt(1));
+ __ SmiToPtrArrayOffset(r7, r6);
+ __ add(r7, r5, r7);
+ __ StoreP(r8, FieldMemOperand(r7, count_offset), r0);
+ __ b(&done);
+
+ __ bind(&done_increment_count);
+
+ // Increment the call count for monomorphic function calls.
+ __ SmiToPtrArrayOffset(r8, r6);
+ __ add(r8, r5, r8);
+
+ __ LoadP(r7, FieldMemOperand(r8, count_offset));
+ __ AddSmiLiteral(r7, r7, Smi::FromInt(1), r0);
+ __ StoreP(r7, FieldMemOperand(r8, count_offset), r0);
+
__ bind(&done);
}
@@ -2139,7 +1889,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
__ SmiToPtrArrayOffset(r8, r6);
__ add(r5, r5, r8);
__ LoadP(r6, FieldMemOperand(r5, count_offset));
- __ AddSmiLiteral(r6, r6, Smi::FromInt(CallICNexus::kCallCountIncrement), r0);
+ __ AddSmiLiteral(r6, r6, Smi::FromInt(1), r0);
__ StoreP(r6, FieldMemOperand(r5, count_offset), r0);
__ mr(r5, r7);
@@ -2187,7 +1937,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Increment the call count for monomorphic function calls.
const int count_offset = FixedArray::kHeaderSize + kPointerSize;
__ LoadP(r6, FieldMemOperand(r9, count_offset));
- __ AddSmiLiteral(r6, r6, Smi::FromInt(CallICNexus::kCallCountIncrement), r0);
+ __ AddSmiLiteral(r6, r6, Smi::FromInt(1), r0);
__ StoreP(r6, FieldMemOperand(r9, count_offset), r0);
__ bind(&call_function);
@@ -2257,7 +2007,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bne(&miss);
// Initialize the call counter.
- __ LoadSmiLiteral(r8, Smi::FromInt(CallICNexus::kCallCountIncrement));
+ __ LoadSmiLiteral(r8, Smi::FromInt(1));
__ StoreP(r8, FieldMemOperand(r9, count_offset), r0);
// Store the function. Use a stub since we need a frame for allocation.
@@ -2267,9 +2017,9 @@ void CallICStub::Generate(MacroAssembler* masm) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
CreateWeakCellStub create_stub(masm->isolate());
- __ Push(r4);
+ __ Push(cp, r4);
__ CallStub(&create_stub);
- __ Pop(r4);
+ __ Pop(cp, r4);
}
__ b(&call_function);
@@ -2348,13 +2098,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
// index_ is consumed by runtime conversion function.
__ Push(object_, index_);
}
- if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
- } else {
- DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
- // NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi);
- }
+ __ CallRuntime(Runtime::kNumberToSmi);
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
__ Move(index_, r3);
@@ -2683,67 +2427,13 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// r6: from index (untagged)
__ SmiTag(r6, r6);
StringCharAtGenerator generator(r3, r6, r5, r3, &runtime, &runtime, &runtime,
- STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
+ RECEIVER_IS_STRING);
generator.GenerateFast(masm);
__ Drop(3);
__ Ret();
generator.SkipSlow(masm, &runtime);
}
-
-void ToNumberStub::Generate(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in r3.
- STATIC_ASSERT(kSmiTag == 0);
- __ TestIfSmi(r3, r0);
- __ Ret(eq, cr0);
-
- __ CompareObjectType(r3, r4, r4, HEAP_NUMBER_TYPE);
- // r3: receiver
- // r4: receiver instance type
- __ Ret(eq);
-
- NonNumberToNumberStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
- // The NonNumberToNumber stub takes one argument in r3.
- __ AssertNotNumber(r3);
-
- __ CompareObjectType(r3, r4, r4, FIRST_NONSTRING_TYPE);
- // r3: receiver
- // r4: receiver instance type
- StringToNumberStub stub(masm->isolate());
- __ TailCallStub(&stub, lt);
-
- Label not_oddball;
- __ cmpi(r4, Operand(ODDBALL_TYPE));
- __ bne(&not_oddball);
- __ LoadP(r3, FieldMemOperand(r3, Oddball::kToNumberOffset));
- __ blr();
- __ bind(&not_oddball);
-
- __ push(r3); // Push argument.
- __ TailCallRuntime(Runtime::kToNumber);
-}
-
-void StringToNumberStub::Generate(MacroAssembler* masm) {
- // The StringToNumber stub takes one argument in r3.
- __ AssertString(r3);
-
- // Check if string has a cached array index.
- Label runtime;
- __ lwz(r5, FieldMemOperand(r3, String::kHashFieldOffset));
- __ And(r0, r5, Operand(String::kContainsCachedArrayIndexMask), SetRC);
- __ bne(&runtime, cr0);
- __ IndexFromHash(r5, r3);
- __ blr();
-
- __ bind(&runtime);
- __ push(r3); // Push argument.
- __ TailCallRuntime(Runtime::kStringToNumber);
-}
-
void ToStringStub::Generate(MacroAssembler* masm) {
// The ToString stub takes one argument in r3.
Label is_number;
@@ -2934,7 +2624,7 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// Load r5 with the allocation site. We stick an undefined dummy value here
// and replace it with the real allocation site later when we instantiate this
// stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
- __ Move(r5, handle(isolate()->heap()->undefined_value()));
+ __ Move(r5, isolate()->factory()->undefined_value());
// Make sure that we actually patched the allocation site.
if (FLAG_debug_code) {
@@ -3808,14 +3498,14 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- LoadICStub stub(isolate(), state());
+ LoadICStub stub(isolate());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- KeyedLoadICStub stub(isolate(), state());
+ KeyedLoadICStub stub(isolate());
stub.GenerateForTrampoline(masm);
}
@@ -3960,11 +3650,8 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ bind(&not_array);
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ bne(&miss);
- Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
- receiver, name, feedback,
- receiver_map, scratch1, r10);
+ masm->isolate()->load_stub_cache()->GenerateProbe(
+ masm, receiver, name, feedback, receiver_map, scratch1, r10);
__ bind(&miss);
LoadIC::GenerateMiss(masm);
@@ -4045,37 +3732,30 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ b(&compare_map);
}
-
-void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
- VectorStoreICStub stub(isolate(), state());
+void StoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
+ StoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
-
-void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
- VectorKeyedStoreICStub stub(isolate(), state());
+void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
+ KeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
+void StoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-void VectorStoreICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-
-void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+void StoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
-
-void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // r4
- Register key = VectorStoreICDescriptor::NameRegister(); // r5
- Register vector = VectorStoreICDescriptor::VectorRegister(); // r6
- Register slot = VectorStoreICDescriptor::SlotRegister(); // r7
- DCHECK(VectorStoreICDescriptor::ValueRegister().is(r3)); // r3
+void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // r4
+ Register key = StoreWithVectorDescriptor::NameRegister(); // r5
+ Register vector = StoreWithVectorDescriptor::VectorRegister(); // r6
+ Register slot = StoreWithVectorDescriptor::SlotRegister(); // r7
+ DCHECK(StoreWithVectorDescriptor::ValueRegister().is(r3)); // r3
Register feedback = r8;
Register receiver_map = r9;
Register scratch1 = r10;
@@ -4105,11 +3785,8 @@ void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ bind(&not_array);
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ bne(&miss);
- Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map,
- scratch1, scratch2);
+ masm->isolate()->store_stub_cache()->GenerateProbe(
+ masm, receiver, key, feedback, receiver_map, scratch1, scratch2);
__ bind(&miss);
StoreIC::GenerateMiss(masm);
@@ -4119,13 +3796,11 @@ void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ b(&compare_map);
}
-
-void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
+void KeyedStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
-
-void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
@@ -4194,13 +3869,12 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
__ b(miss);
}
-
-void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // r4
- Register key = VectorStoreICDescriptor::NameRegister(); // r5
- Register vector = VectorStoreICDescriptor::VectorRegister(); // r6
- Register slot = VectorStoreICDescriptor::SlotRegister(); // r7
- DCHECK(VectorStoreICDescriptor::ValueRegister().is(r3)); // r3
+void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // r4
+ Register key = StoreWithVectorDescriptor::NameRegister(); // r5
+ Register vector = StoreWithVectorDescriptor::VectorRegister(); // r6
+ Register slot = StoreWithVectorDescriptor::SlotRegister(); // r7
+ DCHECK(StoreWithVectorDescriptor::ValueRegister().is(r3)); // r3
Register feedback = r8;
Register receiver_map = r9;
Register scratch1 = r10;
@@ -4470,18 +4144,11 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
}
-void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
isolate);
- ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
- isolate);
- ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
- isolate);
-}
-
-
-void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
- Isolate* isolate) {
+ ArrayNArgumentsConstructorStub stub(isolate);
+ stub.GetCode();
ElementsKind kinds[2] = {FAST_ELEMENTS, FAST_HOLEY_ELEMENTS};
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
@@ -4489,8 +4156,6 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
stubh1.GetCode();
InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
stubh2.GetCode();
- InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
- stubh3.GetCode();
}
}
@@ -4509,13 +4174,15 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
CreateArrayDispatchOneArgument(masm, mode);
__ bind(&not_one_case);
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ ArrayNArgumentsConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
} else if (argument_count() == NONE) {
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
} else if (argument_count() == ONE) {
CreateArrayDispatchOneArgument(masm, mode);
} else if (argument_count() == MORE_THAN_ONE) {
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ ArrayNArgumentsConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
} else {
UNREACHABLE();
}
@@ -4599,7 +4266,7 @@ void InternalArrayConstructorStub::GenerateCase(MacroAssembler* masm,
InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
__ TailCallStub(&stub0, lt);
- InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+ ArrayNArgumentsConstructorStub stubN(isolate());
__ TailCallStub(&stubN, gt);
if (IsFastPackedElementsKind(kind)) {
@@ -4698,15 +4365,15 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
__ bind(&done_allocate);
// Initialize the JSObject fields.
- __ StoreP(r5, MemOperand(r3, JSObject::kMapOffset));
+ __ StoreP(r5, FieldMemOperand(r3, JSObject::kMapOffset), r0);
__ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r6, MemOperand(r3, JSObject::kPropertiesOffset));
- __ StoreP(r6, MemOperand(r3, JSObject::kElementsOffset));
+ __ StoreP(r6, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
+ __ StoreP(r6, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ addi(r4, r3, Operand(JSObject::kHeaderSize));
+ __ addi(r4, r3, Operand(JSObject::kHeaderSize - kHeapObjectTag));
// ----------- S t a t e -------------
- // -- r3 : result (untagged)
+ // -- r3 : result (tagged)
// -- r4 : result fields (untagged)
// -- r8 : result end (untagged)
// -- r5 : initial map
@@ -4724,9 +4391,6 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
{
// Initialize all in-object fields with undefined.
__ InitializeFieldsWithFiller(r4, r8, r9);
-
- // Add the object tag to make the JSObject real.
- __ addi(r3, r3, Operand(kHeapObjectTag));
__ Ret();
}
__ bind(&slack_tracking);
@@ -4746,9 +4410,6 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
__ LoadRoot(r9, Heap::kOnePointerFillerMapRootIndex);
__ InitializeFieldsWithFiller(r4, r8, r9);
- // Add the object tag to make the JSObject real.
- __ addi(r3, r3, Operand(kHeapObjectTag));
-
// Check if we can finalize the instance size.
__ cmpi(r10, Operand(Map::kSlackTrackingCounterEnd));
__ Ret(ne);
@@ -4774,10 +4435,10 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ Pop(r5);
}
- __ subi(r3, r3, Operand(kHeapObjectTag));
__ lbz(r8, FieldMemOperand(r5, Map::kInstanceSizeOffset));
__ ShiftLeftImm(r8, r8, Operand(kPointerSizeLog2));
__ add(r8, r3, r8);
+ __ subi(r8, r8, Operand(kHeapObjectTag));
__ b(&done_allocate);
// Fall back to %NewObject.
@@ -4795,20 +4456,20 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// -----------------------------------
__ AssertFunction(r4);
- // For Ignition we need to skip all possible handler/stub frames until
- // we reach the JavaScript frame for the function (similar to what the
- // runtime fallback implementation does). So make r5 point to that
- // JavaScript frame.
- {
- Label loop, loop_entry;
- __ mr(r5, fp);
- __ b(&loop_entry);
- __ bind(&loop);
+ // Make r5 point to the JavaScript frame.
+ __ mr(r5, fp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
__ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
- __ bind(&loop_entry);
+ }
+ if (FLAG_debug_code) {
+ Label ok;
__ LoadP(ip, MemOperand(r5, StandardFrameConstants::kFunctionOffset));
__ cmp(ip, r4);
- __ bne(&loop);
+ __ beq(&ok);
+ __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+ __ bind(&ok);
}
// Check if we have rest parameters (only possible if we have an
@@ -4823,13 +4484,13 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// specified by the function's internal formal parameter count.
Label rest_parameters;
__ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r6, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ LoadWordArith(
- r4, FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
+ r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
#if V8_TARGET_ARCH_PPC64
- __ SmiTag(r4);
+ __ SmiTag(r6);
#endif
- __ sub(r3, r3, r4, LeaveOE, SetRC);
+ __ sub(r3, r3, r6, LeaveOE, SetRC);
__ bgt(&rest_parameters, cr0);
// Return an empty rest parameter array.
@@ -4842,7 +4503,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// Allocate an empty rest parameter array.
Label allocate, done_allocate;
- __ Allocate(JSArray::kSize, r3, r4, r5, &allocate, TAG_OBJECT);
+ __ Allocate(JSArray::kSize, r3, r4, r5, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Setup the rest parameter array in r0.
@@ -4876,6 +4537,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- cp : context
// -- r3 : number of rest parameters (tagged)
+ // -- r4 : function
// -- r5 : pointer just past first rest parameters
// -- r9 : size of rest parameters
// -- lr : return address
@@ -4883,9 +4545,9 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// Allocate space for the rest parameter array plus the backing store.
Label allocate, done_allocate;
- __ mov(r4, Operand(JSArray::kSize + FixedArray::kHeaderSize));
- __ add(r4, r4, r9);
- __ Allocate(r4, r6, r7, r8, &allocate, TAG_OBJECT);
+ __ mov(r10, Operand(JSArray::kSize + FixedArray::kHeaderSize));
+ __ add(r10, r10, r9);
+ __ Allocate(r10, r6, r7, r8, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Setup the elements array in r6.
@@ -4916,17 +4578,25 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
__ addi(r3, r7, Operand(kHeapObjectTag));
__ Ret();
- // Fall back to %AllocateInNewSpace.
+ // Fall back to %AllocateInNewSpace (if not too big).
+ Label too_big_for_new_space;
__ bind(&allocate);
+ __ Cmpi(r10, Operand(Page::kMaxRegularHeapObjectSize), r0);
+ __ bgt(&too_big_for_new_space);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(r4);
- __ Push(r3, r5, r4);
+ __ SmiTag(r10);
+ __ Push(r3, r5, r10);
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ mr(r6, r3);
__ Pop(r3, r5);
}
__ b(&done_allocate);
+
+ // Fall back to %NewRestParameter.
+ __ bind(&too_big_for_new_space);
+ __ push(r4);
+ __ TailCallRuntime(Runtime::kNewRestParameter);
}
}
@@ -4939,6 +4609,23 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
// -----------------------------------
__ AssertFunction(r4);
+ // Make r10 point to the JavaScript frame.
+ __ mr(r10, fp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
+ __ LoadP(r10, MemOperand(r10, StandardFrameConstants::kCallerFPOffset));
+ }
+ if (FLAG_debug_code) {
+ Label ok;
+ __ LoadP(ip, MemOperand(r10, StandardFrameConstants::kFunctionOffset));
+ __ cmp(ip, r4);
+ __ beq(&ok);
+ __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+ __ bind(&ok);
+ }
+
+
// TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ LoadWordArith(
@@ -4947,19 +4634,20 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
__ SmiTag(r5);
#endif
__ SmiToPtrArrayOffset(r6, r5);
- __ add(r6, fp, r6);
+ __ add(r6, r10, r6);
__ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
// r4 : function
// r5 : number of parameters (tagged)
// r6 : parameters pointer
+ // r10 : JavaScript frame pointer
// Registers used over whole function:
// r8 : arguments count (tagged)
// r9 : mapped parameter count (tagged)
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
- __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(r7, MemOperand(r10, StandardFrameConstants::kCallerFPOffset));
__ LoadP(r3, MemOperand(r7, CommonFrameConstants::kContextOrFrameTypeOffset));
__ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
__ beq(&adaptor_frame);
@@ -5022,7 +4710,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
__ addi(r11, r11, Operand(JSSloppyArgumentsObject::kSize));
// Do the allocation of all three objects in one go.
- __ Allocate(r11, r3, r11, r7, &runtime, TAG_OBJECT);
+ __ Allocate(r11, r3, r11, r7, &runtime, NO_ALLOCATION_FLAGS);
// r3 = address of new object(s) (tagged)
// r5 = argument count (smi-tagged)
@@ -5191,20 +4879,20 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// -----------------------------------
__ AssertFunction(r4);
- // For Ignition we need to skip all possible handler/stub frames until
- // we reach the JavaScript frame for the function (similar to what the
- // runtime fallback implementation does). So make r5 point to that
- // JavaScript frame.
- {
- Label loop, loop_entry;
- __ mr(r5, fp);
- __ b(&loop_entry);
- __ bind(&loop);
+ // Make r5 point to the JavaScript frame.
+ __ mr(r5, fp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
__ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
- __ bind(&loop_entry);
+ }
+ if (FLAG_debug_code) {
+ Label ok;
__ LoadP(ip, MemOperand(r5, StandardFrameConstants::kFunctionOffset));
__ cmp(ip, r4);
- __ bne(&loop);
+ __ b(&ok);
+ __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+ __ bind(&ok);
}
// Check if we have an arguments adaptor frame below the function frame.
@@ -5214,10 +4902,10 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
__ beq(&arguments_adaptor);
{
- __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ LoadWordArith(
r3,
- FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
+ FieldMemOperand(r7, SharedFunctionInfo::kFormalParameterCountOffset));
#if V8_TARGET_ARCH_PPC64
__ SmiTag(r3);
#endif
@@ -5237,6 +4925,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- cp : context
// -- r3 : number of rest parameters (tagged)
+ // -- r4 : function
// -- r5 : pointer just past first rest parameters
// -- r9 : size of rest parameters
// -- lr : return address
@@ -5244,9 +4933,10 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// Allocate space for the strict arguments object plus the backing store.
Label allocate, done_allocate;
- __ mov(r4, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
- __ add(r4, r4, r9);
- __ Allocate(r4, r6, r7, r8, &allocate, TAG_OBJECT);
+ __ mov(r10,
+ Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
+ __ add(r10, r10, r9);
+ __ Allocate(r10, r6, r7, r8, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Setup the elements array in r6.
@@ -5279,47 +4969,27 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ addi(r3, r7, Operand(kHeapObjectTag));
__ Ret();
- // Fall back to %AllocateInNewSpace.
+ // Fall back to %AllocateInNewSpace (if not too big).
+ Label too_big_for_new_space;
__ bind(&allocate);
+ __ Cmpi(r10, Operand(Page::kMaxRegularHeapObjectSize), r0);
+ __ bgt(&too_big_for_new_space);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(r4);
- __ Push(r3, r5, r4);
+ __ SmiTag(r10);
+ __ Push(r3, r5, r10);
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ mr(r6, r3);
__ Pop(r3, r5);
}
__ b(&done_allocate);
-}
-
-void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
- Register context = cp;
- Register result = r3;
- Register slot = r5;
-
- // Go up the context chain to the script context.
- for (int i = 0; i < depth(); ++i) {
- __ LoadP(result, ContextMemOperand(context, Context::PREVIOUS_INDEX));
- context = result;
- }
-
- // Load the PropertyCell value at the specified slot.
- __ ShiftLeftImm(r0, slot, Operand(kPointerSizeLog2));
- __ add(result, context, r0);
- __ LoadP(result, ContextMemOperand(result));
- __ LoadP(result, FieldMemOperand(result, PropertyCell::kValueOffset));
- // If the result is not the_hole, return. Otherwise, handle in the runtime.
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- __ Ret(ne);
-
- // Fallback to runtime.
- __ SmiTag(slot);
- __ Push(slot);
- __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
+ // Fall back to %NewStrictArguments.
+ __ bind(&too_big_for_new_space);
+ __ push(r4);
+ __ TailCallRuntime(Runtime::kNewStrictArguments);
}
-
void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
Register value = r3;
Register slot = r5;
@@ -5623,7 +5293,11 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
STATIC_ASSERT(FCA::kIsolateIndex == 1);
STATIC_ASSERT(FCA::kHolderIndex == 0);
- STATIC_ASSERT(FCA::kArgsLength == 7);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 7);
+ STATIC_ASSERT(FCA::kArgsLength == 8);
+
+ // new target
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
// context save
__ push(context);
@@ -5659,10 +5333,10 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// it's not controlled by GC.
// PPC LINUX ABI:
//
- // Create 5 extra slots on stack:
+ // Create 4 extra slots on stack:
// [0] space for DirectCEntryStub's LR save
- // [1-4] FunctionCallbackInfo
- const int kApiStackSpace = 5;
+ // [1-3] FunctionCallbackInfo
+ const int kApiStackSpace = 4;
const int kFunctionCallbackInfoOffset =
(kStackFrameExtraParamSlot + 1) * kPointerSize;
@@ -5681,9 +5355,6 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// FunctionCallbackInfo::length_ = argc
__ li(ip, Operand(argc()));
__ stw(ip, MemOperand(r3, 2 * kPointerSize));
- // FunctionCallbackInfo::is_construct_call_ = 0
- __ li(ip, Operand::Zero());
- __ stw(ip, MemOperand(r3, 2 * kPointerSize + kIntSize));
ExternalReference thunk_ref =
ExternalReference::invoke_function_callback(masm->isolate());
@@ -5700,9 +5371,9 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
}
MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
int stack_space = 0;
- MemOperand is_construct_call_operand =
- MemOperand(sp, kFunctionCallbackInfoOffset + 2 * kPointerSize + kIntSize);
- MemOperand* stack_space_operand = &is_construct_call_operand;
+ MemOperand length_operand =
+ MemOperand(sp, kFunctionCallbackInfoOffset + 2 * kPointerSize);
+ MemOperand* stack_space_operand = &length_operand;
stack_space = argc() + FCA::kArgsLength + 1;
stack_space_operand = NULL;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
@@ -5712,18 +5383,39 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
void CallApiGetterStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- sp[0] : name
- // -- sp[4 .. (4 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_
- // -- ...
- // -- r5 : api_function_address
- // -----------------------------------
-
- Register api_function_address = ApiGetterDescriptor::function_address();
int arg0Slot = 0;
int accessorInfoSlot = 0;
int apiStackSpace = 0;
- DCHECK(api_function_address.is(r5));
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+ Register receiver = ApiGetterDescriptor::ReceiverRegister();
+ Register holder = ApiGetterDescriptor::HolderRegister();
+ Register callback = ApiGetterDescriptor::CallbackRegister();
+ Register scratch = r7;
+ DCHECK(!AreAliased(receiver, holder, callback, scratch));
+
+ Register api_function_address = r5;
+
+ __ push(receiver);
+ // Push data from AccessorInfo.
+ __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+ __ push(scratch);
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ Push(scratch, scratch);
+ __ mov(scratch, Operand(ExternalReference::isolate_address(isolate())));
+ __ Push(scratch, holder);
+ __ Push(Smi::FromInt(0)); // should_throw_on_error -> false
+ __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+ __ push(scratch);
// v8::PropertyCallbackInfo::args_ array and name handle.
const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
@@ -5771,6 +5463,10 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
+ __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+ __ LoadP(api_function_address,
+ FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
+
// +3 is to skip prolog, return address and name handle.
MemOperand return_value_operand(
fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
@@ -5778,7 +5474,6 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
kStackUnwindSpace, NULL, return_value_operand, NULL);
}
-
#undef __
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc
index 5642e91f9d..07853edc20 100644
--- a/deps/v8/src/ppc/codegen-ppc.cc
+++ b/deps/v8/src/ppc/codegen-ppc.cc
@@ -6,6 +6,8 @@
#if V8_TARGET_ARCH_PPC
+#include <memory>
+
#include "src/codegen.h"
#include "src/macro-assembler.h"
#include "src/ppc/simulator-ppc.h"
@@ -16,62 +18,6 @@ namespace internal {
#define __ masm.
-
-#if defined(USE_SIMULATOR)
-byte* fast_exp_ppc_machine_code = nullptr;
-double fast_exp_simulator(double x, Isolate* isolate) {
- return Simulator::current(isolate)
- ->CallFPReturnsDouble(fast_exp_ppc_machine_code, x, 0);
-}
-#endif
-
-
-UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
- size_t actual_size;
- byte* buffer =
- static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == nullptr) return nullptr;
- ExternalReference::InitializeMathExpData();
-
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
- CodeObjectRequired::kNo);
-
- {
- DoubleRegister input = d1;
- DoubleRegister result = d2;
- DoubleRegister double_scratch1 = d3;
- DoubleRegister double_scratch2 = d4;
- Register temp1 = r7;
- Register temp2 = r8;
- Register temp3 = r9;
-
-// Called from C
- __ function_descriptor();
-
- __ Push(temp3, temp2, temp1);
- MathExpGenerator::EmitMathExp(&masm, input, result, double_scratch1,
- double_scratch2, temp1, temp2, temp3);
- __ Pop(temp3, temp2, temp1);
- __ fmr(d1, result);
- __ Ret();
- }
-
- CodeDesc desc;
- masm.GetCode(&desc);
- DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
-
- Assembler::FlushICache(isolate, buffer, actual_size);
- base::OS::ProtectCode(buffer, actual_size);
-
-#if !defined(USE_SIMULATOR)
- return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
-#else
- fast_exp_ppc_machine_code = buffer;
- return &fast_exp_simulator;
-#endif
-}
-
-
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
return nullptr;
@@ -185,6 +131,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ SmiToDoubleArrayOffset(scratch3, length);
__ addi(scratch3, scratch3, Operand(FixedDoubleArray::kHeaderSize));
__ Allocate(scratch3, array, scratch4, scratch2, fail, DOUBLE_ALIGNMENT);
+ __ subi(array, array, Operand(kHeapObjectTag));
// array: destination FixedDoubleArray, not tagged as heap object.
// elements: source FixedArray.
@@ -313,12 +260,12 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ add(array_size, array_size, r0);
__ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
NO_ALLOCATION_FLAGS);
- // array: destination FixedArray, not tagged as heap object
+ // array: destination FixedArray, tagged as heap object
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
- __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
- __ StoreP(scratch, MemOperand(array, HeapObject::kMapOffset));
- __ addi(array, array, Operand(kHeapObjectTag));
+ __ StoreP(length, FieldMemOperand(array,
+ FixedDoubleArray::kLengthOffset), r0);
+ __ StoreP(scratch, FieldMemOperand(array, HeapObject::kMapOffset), r0);
// Prepare for conversion loop.
Register src_elements = elements;
@@ -512,96 +459,6 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
__ bind(&done);
}
-
-static MemOperand ExpConstant(int index, Register base) {
- return MemOperand(base, index * kDoubleSize);
-}
-
-
-void MathExpGenerator::EmitMathExp(MacroAssembler* masm, DoubleRegister input,
- DoubleRegister result,
- DoubleRegister double_scratch1,
- DoubleRegister double_scratch2,
- Register temp1, Register temp2,
- Register temp3) {
- DCHECK(!input.is(result));
- DCHECK(!input.is(double_scratch1));
- DCHECK(!input.is(double_scratch2));
- DCHECK(!result.is(double_scratch1));
- DCHECK(!result.is(double_scratch2));
- DCHECK(!double_scratch1.is(double_scratch2));
- DCHECK(!temp1.is(temp2));
- DCHECK(!temp1.is(temp3));
- DCHECK(!temp2.is(temp3));
- DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
- DCHECK(!masm->serializer_enabled()); // External references not serializable.
-
- Label zero, infinity, done;
-
- __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
-
- __ lfd(double_scratch1, ExpConstant(0, temp3));
- __ fcmpu(double_scratch1, input);
- __ fmr(result, input);
- __ bunordered(&done);
- __ bge(&zero);
-
- __ lfd(double_scratch2, ExpConstant(1, temp3));
- __ fcmpu(input, double_scratch2);
- __ bge(&infinity);
-
- __ lfd(double_scratch1, ExpConstant(3, temp3));
- __ lfd(result, ExpConstant(4, temp3));
- __ fmul(double_scratch1, double_scratch1, input);
- __ fadd(double_scratch1, double_scratch1, result);
- __ MovDoubleLowToInt(temp2, double_scratch1);
- __ fsub(double_scratch1, double_scratch1, result);
- __ lfd(result, ExpConstant(6, temp3));
- __ lfd(double_scratch2, ExpConstant(5, temp3));
- __ fmul(double_scratch1, double_scratch1, double_scratch2);
- __ fsub(double_scratch1, double_scratch1, input);
- __ fsub(result, result, double_scratch1);
- __ fmul(double_scratch2, double_scratch1, double_scratch1);
- __ fmul(result, result, double_scratch2);
- __ lfd(double_scratch2, ExpConstant(7, temp3));
- __ fmul(result, result, double_scratch2);
- __ fsub(result, result, double_scratch1);
- __ lfd(double_scratch2, ExpConstant(8, temp3));
- __ fadd(result, result, double_scratch2);
- __ srwi(temp1, temp2, Operand(11));
- __ andi(temp2, temp2, Operand(0x7ff));
- __ addi(temp1, temp1, Operand(0x3ff));
-
- // Must not call ExpConstant() after overwriting temp3!
- __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
- __ slwi(temp2, temp2, Operand(3));
-#if V8_TARGET_ARCH_PPC64
- __ ldx(temp2, MemOperand(temp3, temp2));
- __ sldi(temp1, temp1, Operand(52));
- __ orx(temp2, temp1, temp2);
- __ MovInt64ToDouble(double_scratch1, temp2);
-#else
- __ add(ip, temp3, temp2);
- __ lwz(temp3, MemOperand(ip, Register::kExponentOffset));
- __ lwz(temp2, MemOperand(ip, Register::kMantissaOffset));
- __ slwi(temp1, temp1, Operand(20));
- __ orx(temp3, temp1, temp3);
- __ MovInt64ToDouble(double_scratch1, temp3, temp2);
-#endif
-
- __ fmul(result, result, double_scratch1);
- __ b(&done);
-
- __ bind(&zero);
- __ fmr(result, kDoubleRegZero);
- __ b(&done);
-
- __ bind(&infinity);
- __ lfd(result, ExpConstant(2, temp3));
-
- __ bind(&done);
-}
-
#undef __
CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
@@ -611,7 +468,7 @@ CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
// the process, before ARM simulator ICache is setup.
- base::SmartPointer<CodePatcher> patcher(
+ std::unique_ptr<CodePatcher> patcher(
new CodePatcher(isolate, young_sequence_.start(),
young_sequence_.length() / Assembler::kInstrSize,
CodePatcher::DONT_FLUSH));
diff --git a/deps/v8/src/ppc/codegen-ppc.h b/deps/v8/src/ppc/codegen-ppc.h
index c3cd9b39a0..b0d344a013 100644
--- a/deps/v8/src/ppc/codegen-ppc.h
+++ b/deps/v8/src/ppc/codegen-ppc.h
@@ -5,7 +5,6 @@
#ifndef V8_PPC_CODEGEN_PPC_H_
#define V8_PPC_CODEGEN_PPC_H_
-#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {
@@ -23,18 +22,6 @@ class StringCharLoadGenerator : public AllStatic {
private:
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
-
-class MathExpGenerator : public AllStatic {
- public:
- // Register input isn't modified. All other registers are clobbered.
- static void EmitMathExp(MacroAssembler* masm, DoubleRegister input,
- DoubleRegister result, DoubleRegister double_scratch1,
- DoubleRegister double_scratch2, Register temp1,
- Register temp2, Register temp3);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
-};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/constants-ppc.h b/deps/v8/src/ppc/constants-ppc.h
index 4c404ae911..393f039e27 100644
--- a/deps/v8/src/ppc/constants-ppc.h
+++ b/deps/v8/src/ppc/constants-ppc.h
@@ -241,6 +241,8 @@ enum OpcodeExt2 {
POPCNTD = 506 << 1, // Population Count Doubleword
// Below represent bits 10-1 (any value >= 512)
+ LDBRX = 532 << 1, // load double word byte reversed w/ x-form
+ LWBRX = 534 << 1, // load word byte reversed w/ x-form
LFSX = 535 << 1, // load float-single w/ x-form
SRWX = 536 << 1, // Shift Right Word
SRDX = 539 << 1, // Shift Right Double Word
@@ -252,6 +254,7 @@ enum OpcodeExt2 {
STFSUX = 695 << 1, // store float-single w/ update x-form
STFDX = 727 << 1, // store float-double w/ x-form
STFDUX = 759 << 1, // store float-double w/ update x-form
+ LHBRX = 790 << 1, // load half word byte reversed w/ x-form
SRAW = 792 << 1, // Shift Right Algebraic Word
SRAD = 794 << 1, // Shift Right Algebraic Double Word
SRAWIX = 824 << 1, // Shift Right Algebraic Word Immediate
diff --git a/deps/v8/src/ppc/deoptimizer-ppc.cc b/deps/v8/src/ppc/deoptimizer-ppc.cc
index ead877e149..39102a1ef0 100644
--- a/deps/v8/src/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/ppc/deoptimizer-ppc.cc
@@ -124,8 +124,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Save all double registers before messing with them.
__ subi(sp, sp, Operand(kDoubleRegsSize));
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister dreg = DoubleRegister::from_code(code);
diff --git a/deps/v8/src/ppc/disasm-ppc.cc b/deps/v8/src/ppc/disasm-ppc.cc
index baba14643f..c0a02a8b9c 100644
--- a/deps/v8/src/ppc/disasm-ppc.cc
+++ b/deps/v8/src/ppc/disasm-ppc.cc
@@ -39,6 +39,7 @@
namespace v8 {
namespace internal {
+const auto GetRegConfig = RegisterConfiguration::Crankshaft;
//------------------------------------------------------------------------------
@@ -118,7 +119,7 @@ void Decoder::PrintRegister(int reg) {
// Print the double FP register name according to the active name converter.
void Decoder::PrintDRegister(int reg) {
- Print(DoubleRegister::from_code(reg).ToString());
+ Print(GetRegConfig()->GetDoubleRegisterName(reg));
}
@@ -1401,7 +1402,7 @@ namespace disasm {
const char* NameConverter::NameOfAddress(byte* addr) const {
- v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
+ v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
return tmp_buffer_.start();
}
@@ -1412,7 +1413,7 @@ const char* NameConverter::NameOfConstant(byte* addr) const {
const char* NameConverter::NameOfCPURegister(int reg) const {
- return v8::internal::Register::from_code(reg).ToString();
+ return v8::internal::GetRegConfig()->GetGeneralRegisterName(reg);
}
const char* NameConverter::NameOfByteCPURegister(int reg) const {
@@ -1461,7 +1462,7 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
buffer[0] = '\0';
byte* prev_pc = pc;
pc += d.InstructionDecode(buffer, pc);
- v8::internal::PrintF(f, "%p %08x %s\n", prev_pc,
+ v8::internal::PrintF(f, "%p %08x %s\n", static_cast<void*>(prev_pc),
*reinterpret_cast<int32_t*>(prev_pc), buffer.start());
}
}
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index 48b6cdcf0c..bc188f4be1 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -11,6 +11,19 @@ namespace internal {
const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
+void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
+ CallInterfaceDescriptorData* data, int register_parameter_count) {
+ const Register default_stub_registers[] = {r3, r4, r5, r6, r7};
+ CHECK_LE(static_cast<size_t>(register_parameter_count),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(register_parameter_count,
+ default_stub_registers);
+}
+
+const Register FastNewFunctionContextDescriptor::FunctionRegister() {
+ return r4;
+}
+const Register FastNewFunctionContextDescriptor::SlotsRegister() { return r3; }
const Register LoadDescriptor::ReceiverRegister() { return r4; }
const Register LoadDescriptor::NameRegister() { return r5; }
@@ -23,13 +36,9 @@ const Register LoadWithVectorDescriptor::VectorRegister() { return r6; }
const Register StoreDescriptor::ReceiverRegister() { return r4; }
const Register StoreDescriptor::NameRegister() { return r5; }
const Register StoreDescriptor::ValueRegister() { return r3; }
+const Register StoreDescriptor::SlotRegister() { return r7; }
-
-const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return r7; }
-
-
-const Register VectorStoreICDescriptor::VectorRegister() { return r6; }
-
+const Register StoreWithVectorDescriptor::VectorRegister() { return r6; }
const Register VectorStoreTransitionDescriptor::SlotRegister() { return r7; }
const Register VectorStoreTransitionDescriptor::VectorRegister() { return r6; }
@@ -39,23 +48,15 @@ const Register VectorStoreTransitionDescriptor::MapRegister() { return r8; }
const Register StoreTransitionDescriptor::MapRegister() { return r6; }
-const Register LoadGlobalViaContextDescriptor::SlotRegister() { return r5; }
-
-
const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r5; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r3; }
-const Register InstanceOfDescriptor::LeftRegister() { return r4; }
-const Register InstanceOfDescriptor::RightRegister() { return r3; }
-
-
const Register StringCompareDescriptor::LeftRegister() { return r4; }
const Register StringCompareDescriptor::RightRegister() { return r3; }
-
-const Register ApiGetterDescriptor::function_address() { return r5; }
-
+const Register ApiGetterDescriptor::HolderRegister() { return r3; }
+const Register ApiGetterDescriptor::CallbackRegister() { return r6; }
const Register MathPowTaggedDescriptor::exponent() { return r5; }
@@ -68,20 +69,12 @@ const Register MathPowIntegerDescriptor::exponent() {
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r3; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r6; }
-
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r5};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void FastNewContextDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void FastNewObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4, r6};
@@ -244,50 +237,36 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC
-void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
+void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
// r3 -- number of arguments
// r4 -- function
// r5 -- allocation site with elements kind
- Register registers[] = {r4, r5};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ArrayConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // stack param count needs (constructor pointer, and single argument)
Register registers[] = {r4, r5, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void InternalArrayConstructorConstantArgCountDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// register state
// r3 -- number of arguments
- // r4 -- constructor function
- Register registers[] = {r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ // r4 -- function
+ // r5 -- allocation site with elements kind
+ Register registers[] = {r4, r5, r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
+void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
- Register registers[] = {r4, r3};
+ Register registers[] = {r4, r5, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void FastArrayPushDescriptor::InitializePlatformSpecific(
+
+void VarArgFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (arg count)
Register registers[] = {r3};
@@ -314,6 +293,22 @@ void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void BinaryOpWithVectorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // register state
+ // r4 -- lhs
+ // r3 -- rhs
+ // r7 -- slot id
+ // r6 -- vector
+ Register registers[] = {r4, r3, r7, r6};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CountOpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void StringAddDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -374,9 +369,8 @@ void ApiCallbackDescriptorBase::InitializePlatformSpecific(
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
- kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
- kInterpreterDispatchTableRegister};
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -410,6 +404,16 @@ void InterpreterCEntryDescriptor::InitializePlatformSpecific(
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+
+void ResumeGeneratorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ r3, // the value to pass to the generator
+ r4, // the JSGeneratorObject to resume
+ r5 // the resume mode (tagged)
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index 42e5a13157..4e39d967af 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -84,10 +84,6 @@ void MacroAssembler::Call(Register target) {
Label start;
bind(&start);
- // Statement positions are expected to be recorded when the target
- // address is loaded.
- positions_recorder()->WriteRecordedPositions();
-
// branch via link register and set LK bit for return point
mtctr(target);
bctrl();
@@ -128,11 +124,6 @@ void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
Label start;
bind(&start);
#endif
-
- // Statement positions are expected to be recorded when the target
- // address is loaded.
- positions_recorder()->WriteRecordedPositions();
-
// This can likely be optimized to make use of bc() with 24bit relative
//
// RecordRelocInfo(x.rmode_, x.imm_);
@@ -198,19 +189,7 @@ void MacroAssembler::Push(Handle<Object> handle) {
void MacroAssembler::Move(Register dst, Handle<Object> value) {
- AllowDeferredHandleDereference smi_check;
- if (value->IsSmi()) {
- LoadSmiLiteral(dst, reinterpret_cast<Smi*>(*value));
- } else {
- DCHECK(value->IsHeapObject());
- if (isolate()->heap()->InNewSpace(*value)) {
- Handle<Cell> cell = isolate()->factory()->NewCell(value);
- mov(dst, Operand(cell));
- LoadP(dst, FieldMemOperand(dst, Cell::kValueOffset));
- } else {
- mov(dst, Operand(value));
- }
- }
+ mov(dst, Operand(value));
}
@@ -725,8 +704,7 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
// General purpose registers are pushed last on the stack.
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
int doubles_size = config->num_allocatable_double_registers() * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
return MemOperand(sp, doubles_size + register_offset);
@@ -1047,9 +1025,8 @@ void MacroAssembler::Prologue(bool code_pre_aging, Register base,
void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- LoadP(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
- LoadP(vector,
- FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+ LoadP(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
+ LoadP(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
}
@@ -1095,6 +1072,49 @@ int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
return frame_ends;
}
+void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
+ Register argc) {
+ int fp_delta = 0;
+ mflr(r0);
+ if (FLAG_enable_embedded_constant_pool) {
+ if (target.is_valid()) {
+ Push(r0, fp, kConstantPoolRegister, context, target);
+ fp_delta = 3;
+ } else {
+ Push(r0, fp, kConstantPoolRegister, context);
+ fp_delta = 2;
+ }
+ } else {
+ if (target.is_valid()) {
+ Push(r0, fp, context, target);
+ fp_delta = 2;
+ } else {
+ Push(r0, fp, context);
+ fp_delta = 1;
+ }
+ }
+ addi(fp, sp, Operand(fp_delta * kPointerSize));
+ Push(argc);
+}
+
+void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
+ Register argc) {
+ Pop(argc);
+ if (FLAG_enable_embedded_constant_pool) {
+ if (target.is_valid()) {
+ Pop(r0, fp, kConstantPoolRegister, context, target);
+ } else {
+ Pop(r0, fp, kConstantPoolRegister, context);
+ }
+ } else {
+ if (target.is_valid()) {
+ Pop(r0, fp, context, target);
+ } else {
+ Pop(r0, fp, context);
+ }
+ }
+ mtlr(r0);
+}
// ExitFrame layout (probably wrongish.. needs updating)
//
@@ -1112,7 +1132,10 @@ int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
// in the fp register (r31)
// Then - we buy a new frame
-void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
+void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
+ StackFrame::Type frame_type) {
+ DCHECK(frame_type == StackFrame::EXIT ||
+ frame_type == StackFrame::BUILTIN_EXIT);
// Set up the frame structure on the stack.
DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
@@ -1123,7 +1146,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
// all of the pushes that have happened inside of V8
// since we were called from C code
- LoadSmiLiteral(ip, Smi::FromInt(StackFrame::EXIT));
+ LoadSmiLiteral(ip, Smi::FromInt(frame_type));
PushCommonFrame(ip);
// Reserve room for saved entry sp and code object.
subi(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
@@ -1387,12 +1410,14 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
Label skip_flooding;
- ExternalReference step_in_enabled =
- ExternalReference::debug_step_in_enabled_address(isolate());
- mov(r7, Operand(step_in_enabled));
- lbz(r7, MemOperand(r7));
- cmpi(r7, Operand::Zero());
- beq(&skip_flooding);
+ ExternalReference last_step_action =
+ ExternalReference::debug_last_step_action_address(isolate());
+ STATIC_ASSERT(StepFrame > StepIn);
+ mov(r7, Operand(last_step_action));
+ LoadByte(r7, MemOperand(r7), r0);
+ extsb(r7, r7);
+ cmpi(r7, Operand(StepIn));
+ blt(&skip_flooding);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -1790,6 +1815,7 @@ void MacroAssembler::Allocate(int object_size, Register result,
Register scratch1, Register scratch2,
Label* gc_required, AllocationFlags flags) {
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1875,18 +1901,21 @@ void MacroAssembler::Allocate(int object_size, Register result,
blt(gc_required);
add(result_end, result, result_end);
}
- StoreP(result_end, MemOperand(top_address));
- // Tag object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- addi(result, result, Operand(kHeapObjectTag));
+ if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+ // The top pointer is not updated for allocation folding dominators.
+ StoreP(result_end, MemOperand(top_address));
}
+
+ // Tag object.
+ addi(result, result, Operand(kHeapObjectTag));
}
void MacroAssembler::Allocate(Register object_size, Register result,
Register result_end, Register scratch,
Label* gc_required, AllocationFlags flags) {
+ DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1976,12 +2005,114 @@ void MacroAssembler::Allocate(Register object_size, Register result,
andi(r0, result_end, Operand(kObjectAlignmentMask));
Check(eq, kUnalignedAllocationInNewSpace, cr0);
}
+ if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+ // The top pointer is not updated for allocation folding dominators.
+ StoreP(result_end, MemOperand(top_address));
+ }
+
+ // Tag object.
+ addi(result, result, Operand(kHeapObjectTag));
+}
+
+void MacroAssembler::FastAllocate(Register object_size, Register result,
+ Register result_end, Register scratch,
+ AllocationFlags flags) {
+ // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
+ // is not specified. Other registers must not overlap.
+ DCHECK(!AreAliased(object_size, result, scratch, ip));
+ DCHECK(!AreAliased(result_end, result, scratch, ip));
+ DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
+
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+ Register top_address = scratch;
+ mov(top_address, Operand(allocation_top));
+ LoadP(result, MemOperand(top_address));
+
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+#if V8_TARGET_ARCH_PPC64
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+#else
+ DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+ andi(result_end, result, Operand(kDoubleAlignmentMask));
+ Label aligned;
+ beq(&aligned);
+ mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ stw(result_end, MemOperand(result));
+ addi(result, result, Operand(kDoubleSize / 2));
+ bind(&aligned);
+#endif
+ }
+
+ // Calculate new top using result. Object size may be in words so a shift is
+ // required to get the number of bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ ShiftLeftImm(result_end, object_size, Operand(kPointerSizeLog2));
+ add(result_end, result, result_end);
+ } else {
+ add(result_end, result, object_size);
+ }
+
+ // Update allocation top. result temporarily holds the new top.
+ if (emit_debug_code()) {
+ andi(r0, result_end, Operand(kObjectAlignmentMask));
+ Check(eq, kUnalignedAllocationInNewSpace, cr0);
+ }
StoreP(result_end, MemOperand(top_address));
- // Tag object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- addi(result, result, Operand(kHeapObjectTag));
+ // Tag object.
+ addi(result, result, Operand(kHeapObjectTag));
+}
+
+void MacroAssembler::FastAllocate(int object_size, Register result,
+ Register scratch1, Register scratch2,
+ AllocationFlags flags) {
+ DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK(!AreAliased(result, scratch1, scratch2, ip));
+
+ // Make object size into bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ object_size *= kPointerSize;
+ }
+ DCHECK_EQ(0, object_size & kObjectAlignmentMask);
+
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+ // Set up allocation top address register.
+ Register top_address = scratch1;
+ Register result_end = scratch2;
+ mov(top_address, Operand(allocation_top));
+ LoadP(result, MemOperand(top_address));
+
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ // Align the next allocation. Storing the filler map without checking top is
+ // safe in new-space because the limit of the heap is aligned there.
+#if V8_TARGET_ARCH_PPC64
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+#else
+ DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+ andi(result_end, result, Operand(kDoubleAlignmentMask));
+ Label aligned;
+ beq(&aligned);
+ mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ stw(result_end, MemOperand(result));
+ addi(result, result, Operand(kDoubleSize / 2));
+ bind(&aligned);
+#endif
}
+
+ // Calculate new top using result.
+ Add(result_end, result, object_size, r0);
+
+ // The top pointer is not updated for allocation folding dominators.
+ StoreP(result_end, MemOperand(top_address));
+
+ // Tag object.
+ addi(result, result, Operand(kHeapObjectTag));
}
@@ -1999,7 +2130,8 @@ void MacroAssembler::AllocateTwoByteString(Register result, Register length,
and_(scratch1, scratch1, r0);
// Allocate two-byte string in new space.
- Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT);
+ Allocate(scratch1, result, scratch2, scratch3, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
InitializeNewString(result, length, Heap::kStringMapRootIndex, scratch1,
@@ -2021,7 +2153,8 @@ void MacroAssembler::AllocateOneByteString(Register result, Register length,
and_(scratch1, scratch1, r0);
// Allocate one-byte string in new space.
- Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT);
+ Allocate(scratch1, result, scratch2, scratch3, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
@@ -2034,7 +2167,7 @@ void MacroAssembler::AllocateTwoByteConsString(Register result, Register length,
Register scratch2,
Label* gc_required) {
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kConsStringMapRootIndex, scratch1,
scratch2);
@@ -2046,7 +2179,7 @@ void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
Register scratch2,
Label* gc_required) {
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
scratch1, scratch2);
@@ -2059,7 +2192,7 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
Register scratch2,
Label* gc_required) {
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kSlicedStringMapRootIndex, scratch1,
scratch2);
@@ -2072,7 +2205,7 @@ void MacroAssembler::AllocateOneByteSlicedString(Register result,
Register scratch2,
Label* gc_required) {
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
scratch1, scratch2);
@@ -2667,9 +2800,11 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
}
-void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
+ bool builtin_exit_frame) {
mov(r4, Operand(builtin));
- CEntryStub stub(isolate(), 1);
+ CEntryStub stub(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
+ builtin_exit_frame);
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
@@ -2761,16 +2896,19 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- LoadSmiLiteral(r0, Smi::FromInt(reason));
- push(r0);
+ // Check if Abort() has already been initialized.
+ DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
+
+ LoadSmiLiteral(r4, Smi::FromInt(static_cast<int>(reason)));
+
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort);
+ Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
} else {
- CallRuntime(Runtime::kAbort);
+ Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
}
// will not return here
}
@@ -2994,6 +3132,18 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
}
+void MacroAssembler::AssertGeneratorObject(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ TestIfSmi(object, r0);
+ Check(ne, kOperandIsASmiAndNotAGeneratorObject, cr0);
+ push(object);
+ CompareObjectType(object, object, object, JS_GENERATOR_OBJECT_TYPE);
+ pop(object);
+ Check(eq, kOperandIsNotAGeneratorObject);
+ }
+}
+
void MacroAssembler::AssertReceiver(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -3087,12 +3237,11 @@ void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1,
Register scratch2,
Register heap_number_map,
Label* gc_required,
- TaggingMode tagging_mode,
MutableMode mode) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
- tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
+ NO_ALLOCATION_FLAGS);
Heap::RootListIndex map_index = mode == MUTABLE
? Heap::kMutableHeapNumberMapRootIndex
@@ -3100,12 +3249,8 @@ void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1,
AssertIsRoot(heap_number_map, map_index);
// Store heap number map in the allocated object.
- if (tagging_mode == TAG_RESULT) {
- StoreP(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset),
- r0);
- } else {
- StoreP(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
- }
+ StoreP(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset),
+ r0);
}
@@ -3126,7 +3271,8 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
DCHECK(!result.is(value));
// Allocate JSValue in new space.
- Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
+ Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
+ NO_ALLOCATION_FLAGS);
// Initialize the JSValue.
LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
@@ -3183,50 +3329,9 @@ void MacroAssembler::CopyBytes(Register src, Register dst, Register length,
LoadP(scratch, MemOperand(src));
addi(src, src, Operand(kPointerSize));
subi(length, length, Operand(kPointerSize));
- if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
- // currently false for PPC - but possible future opt
- StoreP(scratch, MemOperand(dst));
- addi(dst, dst, Operand(kPointerSize));
- } else {
-#if V8_TARGET_LITTLE_ENDIAN
- stb(scratch, MemOperand(dst, 0));
- ShiftRightImm(scratch, scratch, Operand(8));
- stb(scratch, MemOperand(dst, 1));
- ShiftRightImm(scratch, scratch, Operand(8));
- stb(scratch, MemOperand(dst, 2));
- ShiftRightImm(scratch, scratch, Operand(8));
- stb(scratch, MemOperand(dst, 3));
-#if V8_TARGET_ARCH_PPC64
- ShiftRightImm(scratch, scratch, Operand(8));
- stb(scratch, MemOperand(dst, 4));
- ShiftRightImm(scratch, scratch, Operand(8));
- stb(scratch, MemOperand(dst, 5));
- ShiftRightImm(scratch, scratch, Operand(8));
- stb(scratch, MemOperand(dst, 6));
- ShiftRightImm(scratch, scratch, Operand(8));
- stb(scratch, MemOperand(dst, 7));
-#endif
-#else
-#if V8_TARGET_ARCH_PPC64
- stb(scratch, MemOperand(dst, 7));
- ShiftRightImm(scratch, scratch, Operand(8));
- stb(scratch, MemOperand(dst, 6));
- ShiftRightImm(scratch, scratch, Operand(8));
- stb(scratch, MemOperand(dst, 5));
- ShiftRightImm(scratch, scratch, Operand(8));
- stb(scratch, MemOperand(dst, 4));
- ShiftRightImm(scratch, scratch, Operand(8));
-#endif
- stb(scratch, MemOperand(dst, 3));
- ShiftRightImm(scratch, scratch, Operand(8));
- stb(scratch, MemOperand(dst, 2));
- ShiftRightImm(scratch, scratch, Operand(8));
- stb(scratch, MemOperand(dst, 1));
- ShiftRightImm(scratch, scratch, Operand(8));
- stb(scratch, MemOperand(dst, 0));
-#endif
- addi(dst, dst, Operand(kPointerSize));
- }
+
+ StoreP(scratch, MemOperand(dst));
+ addi(dst, dst, Operand(kPointerSize));
bdnz(&word_loop);
// Copy the last bytes if any left.
@@ -4202,11 +4307,7 @@ void MacroAssembler::LoadP(Register dst, const MemOperand& mem,
/* cannot use d-form */
DCHECK(!scratch.is(no_reg));
mov(scratch, Operand(offset));
-#if V8_TARGET_ARCH_PPC64
- ldx(dst, MemOperand(mem.ra(), scratch));
-#else
- lwzx(dst, MemOperand(mem.ra(), scratch));
-#endif
+ LoadPX(dst, MemOperand(mem.ra(), scratch));
} else {
#if V8_TARGET_ARCH_PPC64
int misaligned = (offset & 3);
@@ -4225,9 +4326,7 @@ void MacroAssembler::LoadP(Register dst, const MemOperand& mem,
}
}
-
-// Store a "pointer" sized value to the memory location
-void MacroAssembler::StoreP(Register src, const MemOperand& mem,
+void MacroAssembler::LoadPU(Register dst, const MemOperand& mem,
Register scratch) {
int offset = mem.offset();
@@ -4235,11 +4334,26 @@ void MacroAssembler::StoreP(Register src, const MemOperand& mem,
/* cannot use d-form */
DCHECK(!scratch.is(no_reg));
mov(scratch, Operand(offset));
+ LoadPUX(dst, MemOperand(mem.ra(), scratch));
+ } else {
#if V8_TARGET_ARCH_PPC64
- stdx(src, MemOperand(mem.ra(), scratch));
+ ldu(dst, mem);
#else
- stwx(src, MemOperand(mem.ra(), scratch));
+ lwzu(dst, mem);
#endif
+ }
+}
+
+// Store a "pointer" sized value to the memory location
+void MacroAssembler::StoreP(Register src, const MemOperand& mem,
+ Register scratch) {
+ int offset = mem.offset();
+
+ if (!is_int16(offset)) {
+ /* cannot use d-form */
+ DCHECK(!scratch.is(no_reg));
+ mov(scratch, Operand(offset));
+ StorePX(src, MemOperand(mem.ra(), scratch));
} else {
#if V8_TARGET_ARCH_PPC64
int misaligned = (offset & 3);
@@ -4263,6 +4377,24 @@ void MacroAssembler::StoreP(Register src, const MemOperand& mem,
}
}
+void MacroAssembler::StorePU(Register src, const MemOperand& mem,
+ Register scratch) {
+ int offset = mem.offset();
+
+ if (!is_int16(offset)) {
+ /* cannot use d-form */
+ DCHECK(!scratch.is(no_reg));
+ mov(scratch, Operand(offset));
+ StorePUX(src, MemOperand(mem.ra(), scratch));
+ } else {
+#if V8_TARGET_ARCH_PPC64
+ stdu(src, mem);
+#else
+ stwu(src, mem);
+#endif
+ }
+}
+
void MacroAssembler::LoadWordArith(Register dst, const MemOperand& mem,
Register scratch) {
int offset = mem.offset();
@@ -4457,6 +4589,44 @@ void MacroAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem,
}
}
+void MacroAssembler::LoadDoubleU(DoubleRegister dst, const MemOperand& mem,
+ Register scratch) {
+ Register base = mem.ra();
+ int offset = mem.offset();
+
+ if (!is_int16(offset)) {
+ mov(scratch, Operand(offset));
+ lfdux(dst, MemOperand(base, scratch));
+ } else {
+ lfdu(dst, mem);
+ }
+}
+
+void MacroAssembler::LoadSingle(DoubleRegister dst, const MemOperand& mem,
+ Register scratch) {
+ Register base = mem.ra();
+ int offset = mem.offset();
+
+ if (!is_int16(offset)) {
+ mov(scratch, Operand(offset));
+ lfsx(dst, MemOperand(base, scratch));
+ } else {
+ lfs(dst, mem);
+ }
+}
+
+void MacroAssembler::LoadSingleU(DoubleRegister dst, const MemOperand& mem,
+ Register scratch) {
+ Register base = mem.ra();
+ int offset = mem.offset();
+
+ if (!is_int16(offset)) {
+ mov(scratch, Operand(offset));
+ lfsux(dst, MemOperand(base, scratch));
+ } else {
+ lfsu(dst, mem);
+ }
+}
void MacroAssembler::StoreDouble(DoubleRegister src, const MemOperand& mem,
Register scratch) {
@@ -4471,13 +4641,52 @@ void MacroAssembler::StoreDouble(DoubleRegister src, const MemOperand& mem,
}
}
+void MacroAssembler::StoreDoubleU(DoubleRegister src, const MemOperand& mem,
+ Register scratch) {
+ Register base = mem.ra();
+ int offset = mem.offset();
+
+ if (!is_int16(offset)) {
+ mov(scratch, Operand(offset));
+ stfdux(src, MemOperand(base, scratch));
+ } else {
+ stfdu(src, mem);
+ }
+}
+
+void MacroAssembler::StoreSingle(DoubleRegister src, const MemOperand& mem,
+ Register scratch) {
+ Register base = mem.ra();
+ int offset = mem.offset();
+
+ if (!is_int16(offset)) {
+ mov(scratch, Operand(offset));
+ stfsx(src, MemOperand(base, scratch));
+ } else {
+ stfs(src, mem);
+ }
+}
+
+void MacroAssembler::StoreSingleU(DoubleRegister src, const MemOperand& mem,
+ Register scratch) {
+ Register base = mem.ra();
+ int offset = mem.offset();
+
+ if (!is_int16(offset)) {
+ mov(scratch, Operand(offset));
+ stfsux(src, MemOperand(base, scratch));
+ } else {
+ stfsu(src, mem);
+ }
+}
+
void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
Register scratch_reg,
Register scratch2_reg,
Label* no_memento_found) {
Label map_check;
Label top_check;
- ExternalReference new_space_allocation_top =
+ ExternalReference new_space_allocation_top_adr =
ExternalReference::new_space_allocation_top_address(isolate());
const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
@@ -4494,7 +4703,9 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
// If the object is in new space, we need to check whether it is on the same
// page as the current top.
- Xor(r0, scratch_reg, Operand(new_space_allocation_top));
+ mov(ip, Operand(new_space_allocation_top_adr));
+ LoadP(ip, MemOperand(ip));
+ Xor(r0, scratch_reg, Operand(ip));
and_(r0, r0, mask, SetRC);
beq(&top_check, cr0);
// The object is on a different page than allocation top. Bail out if the
@@ -4508,7 +4719,7 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
// If top is on the same page as the current object, we need to check whether
// we are below top.
bind(&top_check);
- Cmpi(scratch_reg, Operand(new_space_allocation_top), r0);
+ cmp(scratch_reg, ip);
bgt(no_memento_found);
// Memento map check.
bind(&map_check);
@@ -4528,8 +4739,7 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
if (reg5.is_valid()) regs |= reg5.bit();
if (reg6.is_valid()) regs |= reg6.bit();
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
int code = config->GetAllocatableGeneralCode(i);
Register candidate = Register::from_code(code);
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index a529b627f2..cf9d4b5719 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -19,8 +19,8 @@ const Register kReturnRegister1 = {Register::kCode_r4};
const Register kReturnRegister2 = {Register::kCode_r5};
const Register kJSFunctionRegister = {Register::kCode_r4};
const Register kContextRegister = {Register::kCode_r30};
+const Register kAllocateSizeRegister = {Register::kCode_r4};
const Register kInterpreterAccumulatorRegister = {Register::kCode_r3};
-const Register kInterpreterRegisterFileRegister = {Register::kCode_r14};
const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r15};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r16};
const Register kInterpreterDispatchTableRegister = {Register::kCode_r17};
@@ -73,10 +73,8 @@ bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
// These exist to provide portability between 32 and 64bit
#if V8_TARGET_ARCH_PPC64
-#define LoadPU ldu
#define LoadPX ldx
#define LoadPUX ldux
-#define StorePU stdu
#define StorePX stdx
#define StorePUX stdux
#define ShiftLeftImm sldi
@@ -90,10 +88,8 @@ bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
#define Mul mulld
#define Div divd
#else
-#define LoadPU lwzu
#define LoadPX lwzx
#define LoadPUX lwzux
-#define StorePU stwu
#define StorePX stwx
#define StorePUX stwux
#define ShiftLeftImm slwi
@@ -437,7 +433,8 @@ class MacroAssembler : public Assembler {
// Enter exit frame.
// stack_space - extra stack space, used for parameters before call to C.
// At least one slot (for the return address) should be provided.
- void EnterExitFrame(bool save_doubles, int stack_space = 1);
+ void EnterExitFrame(bool save_doubles, int stack_space = 1,
+ StackFrame::Type frame_type = StackFrame::EXIT);
// Leave the current exit frame. Expects the return value in r0.
// Expect the number of values, pushed prior to the exit frame, to
@@ -515,8 +512,25 @@ class MacroAssembler : public Assembler {
void StoreRepresentation(Register src, const MemOperand& mem,
Representation r, Register scratch = no_reg);
- void LoadDouble(DoubleRegister dst, const MemOperand& mem, Register scratch);
- void StoreDouble(DoubleRegister src, const MemOperand& mem, Register scratch);
+ void LoadDouble(DoubleRegister dst, const MemOperand& mem,
+ Register scratch = no_reg);
+ void LoadDoubleU(DoubleRegister dst, const MemOperand& mem,
+ Register scratch = no_reg);
+
+ void LoadSingle(DoubleRegister dst, const MemOperand& mem,
+ Register scratch = no_reg);
+ void LoadSingleU(DoubleRegister dst, const MemOperand& mem,
+ Register scratch = no_reg);
+
+ void StoreDouble(DoubleRegister src, const MemOperand& mem,
+ Register scratch = no_reg);
+ void StoreDoubleU(DoubleRegister src, const MemOperand& mem,
+ Register scratch = no_reg);
+
+ void StoreSingle(DoubleRegister src, const MemOperand& mem,
+ Register scratch = no_reg);
+ void StoreSingleU(DoubleRegister src, const MemOperand& mem,
+ Register scratch = no_reg);
// Move values between integer and floating point registers.
void MovIntToDouble(DoubleRegister dst, Register src, Register scratch);
@@ -573,7 +587,9 @@ class MacroAssembler : public Assembler {
// These exist to provide portability between 32 and 64bit
void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg);
+ void LoadPU(Register dst, const MemOperand& mem, Register scratch = no_reg);
void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
+ void StorePU(Register src, const MemOperand& mem, Register scratch = no_reg);
// ---------------------------------------------------------------------------
// JavaScript invokes
@@ -696,6 +712,15 @@ class MacroAssembler : public Assembler {
void Allocate(Register object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
+ // FastAllocate is right now only used for folded allocations. It just
+ // increments the top pointer without checking against limit. This can only
+ // be done if it was proved earlier that the allocation will succeed.
+ void FastAllocate(int object_size, Register result, Register scratch1,
+ Register scratch2, AllocationFlags flags);
+
+ void FastAllocate(Register object_size, Register result, Register result_end,
+ Register scratch, AllocationFlags flags);
+
void AllocateTwoByteString(Register result, Register length,
Register scratch1, Register scratch2,
Register scratch3, Label* gc_required);
@@ -720,7 +745,6 @@ class MacroAssembler : public Assembler {
// when control continues at the gc_required label.
void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
Register heap_number_map, Label* gc_required,
- TaggingMode tagging_mode = TAG_RESULT,
MutableMode mode = IMMUTABLE);
void AllocateHeapNumberWithValue(Register result, DoubleRegister value,
Register scratch1, Register scratch2,
@@ -1049,7 +1073,8 @@ class MacroAssembler : public Assembler {
void MovFromFloatResult(DoubleRegister dst);
// Jump to a runtime routine.
- void JumpToExternalReference(const ExternalReference& builtin);
+ void JumpToExternalReference(const ExternalReference& builtin,
+ bool builtin_exit_frame = false);
Handle<Object> CodeObject() {
DCHECK(!code_object_.is_null());
@@ -1380,6 +1405,10 @@ class MacroAssembler : public Assembler {
// enabled via --debug-code.
void AssertBoundFunction(Register object);
+ // Abort execution if argument is not a JSGeneratorObject,
+ // enabled via --debug-code.
+ void AssertGeneratorObject(Register object);
+
// Abort execution if argument is not a JSReceiver, enabled via --debug-code.
void AssertReceiver(Register object);
@@ -1497,6 +1526,9 @@ class MacroAssembler : public Assembler {
// Returns the pc offset at which the frame ends.
int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
+ void EnterBuiltinFrame(Register context, Register target, Register argc);
+ void LeaveBuiltinFrame(Register context, Register target, Register argc);
+
// Expects object in r3 and returns map with validated enum cache
// in r3. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Label* call_runtime);
@@ -1632,17 +1664,8 @@ inline MemOperand NativeContextMemOperand() {
return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}
-
-#ifdef GENERATED_CODE_COVERAGE
-#define CODE_COVERAGE_STRINGIFY(x) #x
-#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
-#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
-#define ACCESS_MASM(masm) \
- masm->stop(__FILE_LINE__); \
- masm->
-#else
#define ACCESS_MASM(masm) masm->
-#endif
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
index 79dc8252b7..2816a87751 100644
--- a/deps/v8/src/ppc/simulator-ppc.cc
+++ b/deps/v8/src/ppc/simulator-ppc.cc
@@ -23,6 +23,8 @@
namespace v8 {
namespace internal {
+const auto GetRegConfig = RegisterConfiguration::Crankshaft;
+
// This macro provides a platform independent use of sscanf. The reason for
// SScanF not being implemented in a platform independent way through
// ::v8::internal::OS in the same way as SNPrintF is that the
@@ -34,7 +36,6 @@ namespace internal {
class PPCDebugger {
public:
explicit PPCDebugger(Simulator* sim) : sim_(sim) {}
- ~PPCDebugger();
void Stop(Instruction* instr);
void Debug();
@@ -61,53 +62,6 @@ class PPCDebugger {
void RedoBreakpoints();
};
-
-PPCDebugger::~PPCDebugger() {}
-
-
-#ifdef GENERATED_CODE_COVERAGE
-static FILE* coverage_log = NULL;
-
-
-static void InitializeCoverage() {
- char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
- if (file_name != NULL) {
- coverage_log = fopen(file_name, "aw+");
- }
-}
-
-
-void PPCDebugger::Stop(Instruction* instr) {
- // Get the stop code.
- uint32_t code = instr->SvcValue() & kStopCodeMask;
- // Retrieve the encoded address, which comes just after this stop.
- char** msg_address =
- reinterpret_cast<char**>(sim_->get_pc() + Instruction::kInstrSize);
- char* msg = *msg_address;
- DCHECK(msg != NULL);
-
- // Update this stop description.
- if (isWatchedStop(code) && !watched_stops_[code].desc) {
- watched_stops_[code].desc = msg;
- }
-
- if (strlen(msg) > 0) {
- if (coverage_log != NULL) {
- fprintf(coverage_log, "%s\n", msg);
- fflush(coverage_log);
- }
- // Overwrite the instruction and address with nops.
- instr->SetInstructionBits(kNopInstr);
- reinterpret_cast<Instruction*>(msg_address)->SetInstructionBits(kNopInstr);
- }
- sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize + kPointerSize);
-}
-
-#else // ndef GENERATED_CODE_COVERAGE
-
-static void InitializeCoverage() {}
-
-
void PPCDebugger::Stop(Instruction* instr) {
// Get the stop code.
// use of kStopCodeMask not right on PowerPC
@@ -128,8 +82,6 @@ void PPCDebugger::Stop(Instruction* instr) {
sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize + kPointerSize);
Debug();
}
-#endif
-
intptr_t PPCDebugger::GetRegisterValue(int regnum) {
return sim_->get_register(regnum);
@@ -315,7 +267,7 @@ void PPCDebugger::Debug() {
for (int i = 0; i < kNumRegisters; i++) {
value = GetRegisterValue(i);
PrintF(" %3s: %08" V8PRIxPTR,
- Register::from_code(i).ToString(), value);
+ GetRegConfig()->GetGeneralRegisterName(i), value);
if ((argc == 3 && strcmp(arg2, "fp") == 0) && i < 8 &&
(i % 2) == 0) {
dvalue = GetRegisterPairDoubleValue(i);
@@ -334,7 +286,7 @@ void PPCDebugger::Debug() {
for (int i = 0; i < kNumRegisters; i++) {
value = GetRegisterValue(i);
PrintF(" %3s: %08" V8PRIxPTR " %11" V8PRIdPTR,
- Register::from_code(i).ToString(), value, value);
+ GetRegConfig()->GetGeneralRegisterName(i), value, value);
if ((argc == 3 && strcmp(arg2, "fp") == 0) && i < 8 &&
(i % 2) == 0) {
dvalue = GetRegisterPairDoubleValue(i);
@@ -354,7 +306,7 @@ void PPCDebugger::Debug() {
dvalue = GetFPDoubleRegisterValue(i);
uint64_t as_words = bit_cast<uint64_t>(dvalue);
PrintF("%3s: %f 0x%08x %08x\n",
- DoubleRegister::from_code(i).ToString(), dvalue,
+ GetRegConfig()->GetDoubleRegisterName(i), dvalue,
static_cast<uint32_t>(as_words >> 32),
static_cast<uint32_t>(as_words & 0xffffffff));
}
@@ -707,7 +659,7 @@ void Simulator::set_last_debugger_input(char* input) {
}
-void Simulator::FlushICache(v8::internal::HashMap* i_cache, void* start_addr,
+void Simulator::FlushICache(base::HashMap* i_cache, void* start_addr,
size_t size) {
intptr_t start = reinterpret_cast<intptr_t>(start_addr);
int intra_line = (start & CachePage::kLineMask);
@@ -729,9 +681,8 @@ void Simulator::FlushICache(v8::internal::HashMap* i_cache, void* start_addr,
}
-CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
- v8::internal::HashMap::Entry* entry =
- i_cache->LookupOrInsert(page, ICacheHash(page));
+CachePage* Simulator::GetCachePage(base::HashMap* i_cache, void* page) {
+ base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
if (entry->value == NULL) {
CachePage* new_page = new CachePage();
entry->value = new_page;
@@ -741,8 +692,7 @@ CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
// Flush from start up to and not including start + size.
-void Simulator::FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
- int size) {
+void Simulator::FlushOnePage(base::HashMap* i_cache, intptr_t start, int size) {
DCHECK(size <= CachePage::kPageSize);
DCHECK(AllOnOnePage(start, size - 1));
DCHECK((start & CachePage::kLineMask) == 0);
@@ -754,9 +704,7 @@ void Simulator::FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
}
-
-void Simulator::CheckICache(v8::internal::HashMap* i_cache,
- Instruction* instr) {
+void Simulator::CheckICache(base::HashMap* i_cache, Instruction* instr) {
intptr_t address = reinterpret_cast<intptr_t>(instr);
void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
@@ -789,7 +737,7 @@ void Simulator::Initialize(Isolate* isolate) {
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
i_cache_ = isolate_->simulator_i_cache();
if (i_cache_ == NULL) {
- i_cache_ = new v8::internal::HashMap(&ICacheMatch);
+ i_cache_ = new base::HashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
}
Initialize(isolate);
@@ -828,7 +776,6 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
// some buffer below.
registers_[sp] =
reinterpret_cast<intptr_t>(stack_) + stack_size - stack_protection_size_;
- InitializeCoverage();
last_debugger_input_ = NULL;
}
@@ -925,10 +872,10 @@ class Redirection {
// static
-void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
+void Simulator::TearDown(base::HashMap* i_cache, Redirection* first) {
Redirection::DeleteChain(first);
if (i_cache != nullptr) {
- for (HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
+ for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
entry = i_cache->Next(entry)) {
delete static_cast<CachePage*>(entry->value);
}
@@ -1284,15 +1231,18 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
case ExternalReference::BUILTIN_FP_FP_CALL:
case ExternalReference::BUILTIN_COMPARE_CALL:
PrintF("Call to host function at %p with args %f, %f",
- FUNCTION_ADDR(generic_target), dval0, dval1);
+ static_cast<void*>(FUNCTION_ADDR(generic_target)),
+ dval0, dval1);
break;
case ExternalReference::BUILTIN_FP_CALL:
PrintF("Call to host function at %p with arg %f",
- FUNCTION_ADDR(generic_target), dval0);
+ static_cast<void*>(FUNCTION_ADDR(generic_target)),
+ dval0);
break;
case ExternalReference::BUILTIN_FP_INT_CALL:
PrintF("Call to host function at %p with args %f, %" V8PRIdPTR,
- FUNCTION_ADDR(generic_target), dval0, ival);
+ static_cast<void*>(FUNCTION_ADDR(generic_target)),
+ dval0, ival);
break;
default:
UNREACHABLE();
@@ -1434,8 +1384,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
"Call to host function at %p,\n"
"\t\t\t\targs %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR,
- FUNCTION_ADDR(target), arg[0], arg[1], arg[2], arg[3], arg[4],
- arg[5]);
+ static_cast<void*>(FUNCTION_ADDR(target)), arg[0], arg[1],
+ arg[2], arg[3], arg[4], arg[5]);
if (!stack_aligned) {
PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
get_register(sp));
diff --git a/deps/v8/src/ppc/simulator-ppc.h b/deps/v8/src/ppc/simulator-ppc.h
index a3b03dc506..d3163e8a50 100644
--- a/deps/v8/src/ppc/simulator-ppc.h
+++ b/deps/v8/src/ppc/simulator-ppc.h
@@ -66,7 +66,7 @@ class SimulatorStack : public v8::internal::AllStatic {
// Running with a simulator.
#include "src/assembler.h"
-#include "src/hashmap.h"
+#include "src/base/hashmap.h"
#include "src/ppc/constants-ppc.h"
namespace v8 {
@@ -217,7 +217,7 @@ class Simulator {
// Call on program start.
static void Initialize(Isolate* isolate);
- static void TearDown(HashMap* i_cache, Redirection* first);
+ static void TearDown(base::HashMap* i_cache, Redirection* first);
// V8 generally calls into generated JS code with 5 parameters and into
// generated RegExp code with 7 parameters. This is a convenience function,
@@ -239,8 +239,7 @@ class Simulator {
char* last_debugger_input() { return last_debugger_input_; }
// ICache checking.
- static void FlushICache(v8::internal::HashMap* i_cache, void* start,
- size_t size);
+ static void FlushICache(base::HashMap* i_cache, void* start, size_t size);
// Returns true if pc register contains one of the 'special_values' defined
// below (bad_lr, end_sim_pc).
@@ -330,10 +329,9 @@ class Simulator {
void ExecuteInstruction(Instruction* instr);
// ICache.
- static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
- static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
- int size);
- static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
+ static void CheckICache(base::HashMap* i_cache, Instruction* instr);
+ static void FlushOnePage(base::HashMap* i_cache, intptr_t start, int size);
+ static CachePage* GetCachePage(base::HashMap* i_cache, void* page);
// Runtime call support.
static void* RedirectExternalReference(
@@ -371,7 +369,7 @@ class Simulator {
char* last_debugger_input_;
// Icache simulation
- v8::internal::HashMap* i_cache_;
+ base::HashMap* i_cache_;
// Registered breakpoints.
Instruction* break_pc_;
diff --git a/deps/v8/src/profiler/allocation-tracker.cc b/deps/v8/src/profiler/allocation-tracker.cc
index 791cdf03f0..d094d0ecc6 100644
--- a/deps/v8/src/profiler/allocation-tracker.cc
+++ b/deps/v8/src/profiler/allocation-tracker.cc
@@ -149,11 +149,10 @@ void AddressToTraceMap::Clear() {
void AddressToTraceMap::Print() {
- PrintF("[AddressToTraceMap (%" V8_SIZET_PREFIX V8PRIuPTR "): \n",
- ranges_.size());
+ PrintF("[AddressToTraceMap (%" PRIuS "): \n", ranges_.size());
for (RangeMap::iterator it = ranges_.begin(); it != ranges_.end(); ++it) {
- PrintF("[%p - %p] => %u\n", it->second.start, it->first,
- it->second.trace_node_id);
+ PrintF("[%p - %p] => %u\n", static_cast<void*>(it->second.start),
+ static_cast<void*>(it->first), it->second.trace_node_id);
}
PrintF("]\n");
}
@@ -191,12 +190,10 @@ void AllocationTracker::DeleteFunctionInfo(FunctionInfo** info) {
delete *info;
}
-
-AllocationTracker::AllocationTracker(
- HeapObjectsMap* ids, StringsStorage* names)
+AllocationTracker::AllocationTracker(HeapObjectsMap* ids, StringsStorage* names)
: ids_(ids),
names_(names),
- id_to_function_info_index_(HashMap::PointersMatch),
+ id_to_function_info_index_(base::HashMap::PointersMatch),
info_index_for_other_state_(0) {
FunctionInfo* info = new FunctionInfo();
info->name = "(root)";
@@ -231,7 +228,7 @@ void AllocationTracker::AllocationEvent(Address addr, int size) {
Isolate* isolate = heap->isolate();
int length = 0;
- StackTraceFrameIterator it(isolate);
+ JavaScriptFrameIterator it(isolate);
while (!it.done() && length < kMaxAllocationTraceLength) {
JavaScriptFrame* frame = it.frame();
SharedFunctionInfo* shared = frame->function()->shared();
@@ -262,7 +259,7 @@ static uint32_t SnapshotObjectIdHash(SnapshotObjectId id) {
unsigned AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
SnapshotObjectId id) {
- HashMap::Entry* entry = id_to_function_info_index_.LookupOrInsert(
+ base::HashMap::Entry* entry = id_to_function_info_index_.LookupOrInsert(
reinterpret_cast<void*>(id), SnapshotObjectIdHash(id));
if (entry->value == NULL) {
FunctionInfo* info = new FunctionInfo();
@@ -307,9 +304,8 @@ AllocationTracker::UnresolvedLocation::UnresolvedLocation(
info_(info) {
script_ = Handle<Script>::cast(
script->GetIsolate()->global_handles()->Create(script));
- GlobalHandles::MakeWeak(reinterpret_cast<Object**>(script_.location()),
- this,
- &HandleWeakScript);
+ GlobalHandles::MakeWeak(reinterpret_cast<Object**>(script_.location()), this,
+ &HandleWeakScript, v8::WeakCallbackType::kParameter);
}
@@ -327,9 +323,8 @@ void AllocationTracker::UnresolvedLocation::Resolve() {
info_->column = Script::GetColumnNumber(script_, start_position_);
}
-
void AllocationTracker::UnresolvedLocation::HandleWeakScript(
- const v8::WeakCallbackData<v8::Value, void>& data) {
+ const v8::WeakCallbackInfo<void>& data) {
UnresolvedLocation* loc =
reinterpret_cast<UnresolvedLocation*>(data.GetParameter());
GlobalHandles::Destroy(reinterpret_cast<Object**>(loc->script_.location()));
diff --git a/deps/v8/src/profiler/allocation-tracker.h b/deps/v8/src/profiler/allocation-tracker.h
index 03802a5c66..45bd446714 100644
--- a/deps/v8/src/profiler/allocation-tracker.h
+++ b/deps/v8/src/profiler/allocation-tracker.h
@@ -8,8 +8,8 @@
#include <map>
#include "include/v8-profiler.h"
+#include "src/base/hashmap.h"
#include "src/handles.h"
-#include "src/hashmap.h"
#include "src/list.h"
#include "src/vector.h"
@@ -129,8 +129,7 @@ class AllocationTracker {
void Resolve();
private:
- static void HandleWeakScript(
- const v8::WeakCallbackData<v8::Value, void>& data);
+ static void HandleWeakScript(const v8::WeakCallbackInfo<void>& data);
Handle<Script> script_;
int start_position_;
@@ -144,7 +143,7 @@ class AllocationTracker {
AllocationTraceTree trace_tree_;
unsigned allocation_trace_buffer_[kMaxAllocationTraceLength];
List<FunctionInfo*> function_info_list_;
- HashMap id_to_function_info_index_;
+ base::HashMap id_to_function_info_index_;
List<UnresolvedLocation*> unresolved_locations_;
unsigned info_index_for_other_state_;
AddressToTraceMap address_to_trace_;
diff --git a/deps/v8/src/profiler/cpu-profiler-inl.h b/deps/v8/src/profiler/cpu-profiler-inl.h
index 45e4ccf136..504c3f6e1a 100644
--- a/deps/v8/src/profiler/cpu-profiler-inl.h
+++ b/deps/v8/src/profiler/cpu-profiler-inl.h
@@ -35,7 +35,7 @@ void CodeDisableOptEventRecord::UpdateCodeMap(CodeMap* code_map) {
void CodeDeoptEventRecord::UpdateCodeMap(CodeMap* code_map) {
CodeEntry* entry = code_map->FindEntry(start);
- if (entry != NULL) entry->set_deopt_info(deopt_reason, position, pc_offset);
+ if (entry != NULL) entry->set_deopt_info(deopt_reason, position, deopt_id);
}
@@ -50,17 +50,6 @@ void ReportBuiltinEventRecord::UpdateCodeMap(CodeMap* code_map) {
}
-TickSample* CpuProfiler::StartTickSample() {
- if (is_profiling_) return processor_->StartTickSample();
- return NULL;
-}
-
-
-void CpuProfiler::FinishTickSample() {
- processor_->FinishTickSample();
-}
-
-
TickSample* ProfilerEventsProcessor::StartTickSample() {
void* address = ticks_buffer_.StartEnqueue();
if (address == NULL) return NULL;
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index 47585b7b08..7a0cf9c8bf 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -12,28 +12,49 @@
#include "src/profiler/cpu-profiler-inl.h"
#include "src/vm-state-inl.h"
-#include "include/v8-profiler.h"
-
namespace v8 {
namespace internal {
static const int kProfilerStackSize = 64 * KB;
+class CpuSampler : public sampler::Sampler {
+ public:
+ CpuSampler(Isolate* isolate, ProfilerEventsProcessor* processor)
+ : sampler::Sampler(reinterpret_cast<v8::Isolate*>(isolate)),
+ processor_(processor) {}
+
+ void SampleStack(const v8::RegisterState& regs) override {
+ TickSample* sample = processor_->StartTickSample();
+ if (sample == nullptr) return;
+ Isolate* isolate = reinterpret_cast<Isolate*>(this->isolate());
+ sample->Init(isolate, regs, TickSample::kIncludeCEntryFrame, true);
+ if (is_counting_samples_ && !sample->timestamp.IsNull()) {
+ if (sample->state == JS) ++js_sample_count_;
+ if (sample->state == EXTERNAL) ++external_sample_count_;
+ }
+ processor_->FinishTickSample();
+ }
+
+ private:
+ ProfilerEventsProcessor* processor_;
+};
-ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator,
- Sampler* sampler,
+ProfilerEventsProcessor::ProfilerEventsProcessor(Isolate* isolate,
+ ProfileGenerator* generator,
base::TimeDelta period)
: Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
generator_(generator),
- sampler_(sampler),
+ sampler_(new CpuSampler(isolate, this)),
running_(1),
period_(period),
last_code_event_id_(0),
- last_processed_code_event_id_(0) {}
-
-
-ProfilerEventsProcessor::~ProfilerEventsProcessor() {}
+ last_processed_code_event_id_(0) {
+ sampler_->IncreaseProfilingDepth();
+}
+ProfilerEventsProcessor::~ProfilerEventsProcessor() {
+ sampler_->DecreaseProfilingDepth();
+}
void ProfilerEventsProcessor::Enqueue(const CodeEventsContainer& event) {
event.generic.order = last_code_event_id_.Increment(1);
@@ -49,7 +70,7 @@ void ProfilerEventsProcessor::AddDeoptStack(Isolate* isolate, Address from,
regs.sp = fp - fp_to_sp_delta;
regs.fp = fp;
regs.pc = from;
- record.sample.Init(isolate, regs, TickSample::kSkipCEntryFrame, false);
+ record.sample.Init(isolate, regs, TickSample::kSkipCEntryFrame, false, false);
ticks_from_vm_buffer_.Enqueue(record);
}
@@ -64,7 +85,8 @@ void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate,
regs.fp = frame->fp();
regs.pc = frame->pc();
}
- record.sample.Init(isolate, regs, TickSample::kSkipCEntryFrame, update_stats);
+ record.sample.Init(isolate, regs, TickSample::kSkipCEntryFrame, update_stats,
+ false);
ticks_from_vm_buffer_.Enqueue(record);
}
@@ -199,257 +221,23 @@ void CpuProfiler::DeleteProfile(CpuProfile* profile) {
}
}
-
-void CpuProfiler::CallbackEvent(Name* name, Address entry_point) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->start = entry_point;
- rec->entry = profiles_->NewCodeEntry(
- Logger::CALLBACK_TAG,
- profiles_->GetName(name));
- rec->size = 1;
- processor_->Enqueue(evt_rec);
-}
-
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
- AbstractCode* code, const char* name) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->start = code->address();
- rec->entry = profiles_->NewCodeEntry(
- tag, profiles_->GetFunctionName(name), CodeEntry::kEmptyNamePrefix,
- CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
- CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
- RecordInliningInfo(rec->entry, code);
- rec->size = code->ExecutableSize();
- processor_->Enqueue(evt_rec);
-}
-
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
- AbstractCode* code, Name* name) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->start = code->address();
- rec->entry = profiles_->NewCodeEntry(
- tag, profiles_->GetFunctionName(name), CodeEntry::kEmptyNamePrefix,
- CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
- CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
- RecordInliningInfo(rec->entry, code);
- rec->size = code->ExecutableSize();
- processor_->Enqueue(evt_rec);
-}
-
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
- AbstractCode* code,
- SharedFunctionInfo* shared,
- CompilationInfo* info, Name* script_name) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->start = code->address();
- rec->entry = profiles_->NewCodeEntry(
- tag, profiles_->GetFunctionName(shared->DebugName()),
- CodeEntry::kEmptyNamePrefix,
- profiles_->GetName(InferScriptName(script_name, shared)),
- CpuProfileNode::kNoLineNumberInfo, CpuProfileNode::kNoColumnNumberInfo,
- NULL, code->instruction_start());
- RecordInliningInfo(rec->entry, code);
- if (info) {
- rec->entry->set_inlined_function_infos(info->inlined_function_infos());
- }
- rec->entry->FillFunctionInfo(shared);
- rec->size = code->ExecutableSize();
- processor_->Enqueue(evt_rec);
-}
-
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
- AbstractCode* abstract_code,
- SharedFunctionInfo* shared,
- CompilationInfo* info, Name* script_name,
- int line, int column) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->start = abstract_code->address();
- Script* script = Script::cast(shared->script());
- JITLineInfoTable* line_table = NULL;
- if (script) {
- if (abstract_code->IsCode()) {
- Code* code = abstract_code->GetCode();
- int start_position = shared->start_position();
- int end_position = shared->end_position();
- line_table = new JITLineInfoTable();
- for (RelocIterator it(code); !it.done(); it.next()) {
- RelocInfo* reloc_info = it.rinfo();
- if (!RelocInfo::IsPosition(reloc_info->rmode())) continue;
- int position = static_cast<int>(reloc_info->data());
- // TODO(alph): in case of inlining the position may correspond
- // to an inlined function source code. Do not collect positions
- // that fall beyond the function source code. There's however a
- // chance the inlined function has similar positions but in another
- // script. So the proper fix is to store script_id in some form
- // along with the inlined function positions.
- if (position < start_position || position >= end_position) continue;
- int pc_offset = static_cast<int>(reloc_info->pc() - code->address());
- int line_number = script->GetLineNumber(position) + 1;
- line_table->SetPosition(pc_offset, line_number);
- }
- } else {
- BytecodeArray* bytecode = abstract_code->GetBytecodeArray();
- line_table = new JITLineInfoTable();
- interpreter::SourcePositionTableIterator it(
- bytecode->source_position_table());
- for (; !it.done(); it.Advance()) {
- int line_number = script->GetLineNumber(it.source_position()) + 1;
- int pc_offset = it.bytecode_offset() + BytecodeArray::kHeaderSize;
- line_table->SetPosition(pc_offset, line_number);
- }
- }
- }
- rec->entry = profiles_->NewCodeEntry(
- tag, profiles_->GetFunctionName(shared->DebugName()),
- CodeEntry::kEmptyNamePrefix,
- profiles_->GetName(InferScriptName(script_name, shared)), line, column,
- line_table, abstract_code->instruction_start());
- RecordInliningInfo(rec->entry, abstract_code);
- if (info) {
- rec->entry->set_inlined_function_infos(info->inlined_function_infos());
- }
- rec->entry->FillFunctionInfo(shared);
- rec->size = abstract_code->ExecutableSize();
- processor_->Enqueue(evt_rec);
-}
-
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
- AbstractCode* code, int args_count) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->start = code->address();
- rec->entry = profiles_->NewCodeEntry(
- tag, profiles_->GetName(args_count), "args_count: ",
- CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
- CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
- RecordInliningInfo(rec->entry, code);
- rec->size = code->ExecutableSize();
- processor_->Enqueue(evt_rec);
-}
-
-void CpuProfiler::CodeMoveEvent(AbstractCode* from, Address to) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_MOVE);
- CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
- rec->from = from->address();
- rec->to = to;
- processor_->Enqueue(evt_rec);
-}
-
-void CpuProfiler::CodeDisableOptEvent(AbstractCode* code,
- SharedFunctionInfo* shared) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_DISABLE_OPT);
- CodeDisableOptEventRecord* rec = &evt_rec.CodeDisableOptEventRecord_;
- rec->start = code->address();
- rec->bailout_reason = GetBailoutReason(shared->disable_optimization_reason());
- processor_->Enqueue(evt_rec);
-}
-
-void CpuProfiler::CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_DEOPT);
- CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
- Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(code, pc);
- rec->start = code->address();
- rec->deopt_reason = Deoptimizer::GetDeoptReason(info.deopt_reason);
- rec->position = info.position;
- rec->pc_offset = pc - code->instruction_start();
- processor_->Enqueue(evt_rec);
- processor_->AddDeoptStack(isolate_, pc, fp_to_sp_delta);
-}
-
-void CpuProfiler::GetterCallbackEvent(Name* name, Address entry_point) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->start = entry_point;
- rec->entry = profiles_->NewCodeEntry(
- Logger::CALLBACK_TAG,
- profiles_->GetName(name),
- "get ");
- rec->size = 1;
- processor_->Enqueue(evt_rec);
-}
-
-void CpuProfiler::RegExpCodeCreateEvent(AbstractCode* code, String* source) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->start = code->address();
- rec->entry = profiles_->NewCodeEntry(
- Logger::REG_EXP_TAG, profiles_->GetName(source), "RegExp: ",
- CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
- CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
- rec->size = code->ExecutableSize();
- processor_->Enqueue(evt_rec);
-}
-
-
-void CpuProfiler::SetterCallbackEvent(Name* name, Address entry_point) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->start = entry_point;
- rec->entry = profiles_->NewCodeEntry(
- Logger::CALLBACK_TAG,
- profiles_->GetName(name),
- "set ");
- rec->size = 1;
- processor_->Enqueue(evt_rec);
-}
-
-Name* CpuProfiler::InferScriptName(Name* name, SharedFunctionInfo* info) {
- if (name->IsString() && String::cast(name)->length()) return name;
- if (!info->script()->IsScript()) return name;
- Object* source_url = Script::cast(info->script())->source_url();
- return source_url->IsName() ? Name::cast(source_url) : name;
-}
-
-void CpuProfiler::RecordInliningInfo(CodeEntry* entry,
- AbstractCode* abstract_code) {
- if (!abstract_code->IsCode()) return;
- Code* code = abstract_code->GetCode();
- if (code->kind() != Code::OPTIMIZED_FUNCTION) return;
- DeoptimizationInputData* deopt_input_data =
- DeoptimizationInputData::cast(code->deoptimization_data());
- int deopt_count = deopt_input_data->DeoptCount();
- for (int i = 0; i < deopt_count; i++) {
- int pc_offset = deopt_input_data->Pc(i)->value();
- if (pc_offset == -1) continue;
- int translation_index = deopt_input_data->TranslationIndex(i)->value();
- TranslationIterator it(deopt_input_data->TranslationByteArray(),
- translation_index);
- Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
- DCHECK_EQ(Translation::BEGIN, opcode);
- it.Skip(Translation::NumberOfOperandsFor(opcode));
- int depth = 0;
- std::vector<CodeEntry*> inline_stack;
- while (it.HasNext() &&
- Translation::BEGIN !=
- (opcode = static_cast<Translation::Opcode>(it.Next()))) {
- if (opcode != Translation::JS_FRAME &&
- opcode != Translation::INTERPRETED_FRAME) {
- it.Skip(Translation::NumberOfOperandsFor(opcode));
- continue;
- }
- it.Next(); // Skip ast_id
- int shared_info_id = it.Next();
- it.Next(); // Skip height
- SharedFunctionInfo* shared_info = SharedFunctionInfo::cast(
- deopt_input_data->LiteralArray()->get(shared_info_id));
- if (!depth++) continue; // Skip the current function itself.
- CodeEntry* inline_entry = new CodeEntry(
- entry->tag(), profiles_->GetFunctionName(shared_info->DebugName()),
- CodeEntry::kEmptyNamePrefix, entry->resource_name(),
- CpuProfileNode::kNoLineNumberInfo,
- CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
- inline_entry->FillFunctionInfo(shared_info);
- inline_stack.push_back(inline_entry);
- }
- if (!inline_stack.empty()) {
- entry->AddInlineStack(pc_offset, inline_stack);
- DCHECK(inline_stack.empty());
+void CpuProfiler::CodeEventHandler(const CodeEventsContainer& evt_rec) {
+ switch (evt_rec.generic.type) {
+ case CodeEventRecord::CODE_CREATION:
+ case CodeEventRecord::CODE_MOVE:
+ case CodeEventRecord::CODE_DISABLE_OPT:
+ processor_->Enqueue(evt_rec);
+ break;
+ case CodeEventRecord::CODE_DEOPT: {
+ const CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
+ Address pc = reinterpret_cast<Address>(rec->pc);
+ int fp_to_sp_delta = rec->fp_to_sp_delta;
+ processor_->Enqueue(evt_rec);
+ processor_->AddDeoptStack(isolate_, pc, fp_to_sp_delta);
+ break;
}
+ default:
+ UNREACHABLE();
}
}
@@ -457,15 +245,12 @@ CpuProfiler::CpuProfiler(Isolate* isolate)
: isolate_(isolate),
sampling_interval_(base::TimeDelta::FromMicroseconds(
FLAG_cpu_profiler_sampling_interval)),
- profiles_(new CpuProfilesCollection(isolate->heap())),
- generator_(NULL),
- processor_(NULL),
+ profiles_(new CpuProfilesCollection(isolate)),
is_profiling_(false) {
+ profiles_->set_cpu_profiler(this);
}
-
-CpuProfiler::CpuProfiler(Isolate* isolate,
- CpuProfilesCollection* test_profiles,
+CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilesCollection* test_profiles,
ProfileGenerator* test_generator,
ProfilerEventsProcessor* test_processor)
: isolate_(isolate),
@@ -475,28 +260,25 @@ CpuProfiler::CpuProfiler(Isolate* isolate,
generator_(test_generator),
processor_(test_processor),
is_profiling_(false) {
+ profiles_->set_cpu_profiler(this);
}
-
CpuProfiler::~CpuProfiler() {
DCHECK(!is_profiling_);
- delete profiles_;
}
-
void CpuProfiler::set_sampling_interval(base::TimeDelta value) {
DCHECK(!is_profiling_);
sampling_interval_ = value;
}
-
void CpuProfiler::ResetProfiles() {
- delete profiles_;
- profiles_ = new CpuProfilesCollection(isolate()->heap());
+ profiles_.reset(new CpuProfilesCollection(isolate_));
+ profiles_->set_cpu_profiler(this);
}
void CpuProfiler::CollectSample() {
- if (processor_ != NULL) {
+ if (processor_) {
processor_->AddCurrentStack(isolate_);
}
}
@@ -515,7 +297,7 @@ void CpuProfiler::StartProfiling(String* title, bool record_samples) {
void CpuProfiler::StartProcessorIfNotStarted() {
- if (processor_ != NULL) {
+ if (processor_) {
processor_->AddCurrentStack(isolate_);
return;
}
@@ -523,11 +305,14 @@ void CpuProfiler::StartProcessorIfNotStarted() {
// Disable logging when using the new implementation.
saved_is_logging_ = logger->is_logging_;
logger->is_logging_ = false;
- generator_ = new ProfileGenerator(profiles_);
- Sampler* sampler = logger->sampler();
- processor_ = new ProfilerEventsProcessor(
- generator_, sampler, sampling_interval_);
+ generator_.reset(new ProfileGenerator(profiles_.get()));
+ processor_.reset(new ProfilerEventsProcessor(isolate_, generator_.get(),
+ sampling_interval_));
+ logger->SetUpProfilerListener();
+ ProfilerListener* profiler_listener = logger->profiler_listener();
+ profiler_listener->AddObserver(this);
is_profiling_ = true;
+ isolate_->set_is_profiling(true);
// Enumerate stuff we already have in the heap.
DCHECK(isolate_->heap()->HasBeenSetUp());
if (!FLAG_prof_browser_mode) {
@@ -537,18 +322,16 @@ void CpuProfiler::StartProcessorIfNotStarted() {
logger->LogAccessorCallbacks();
LogBuiltins();
// Enable stack sampling.
- sampler->SetHasProcessingThread(true);
- sampler->IncreaseProfilingDepth();
processor_->AddCurrentStack(isolate_);
processor_->StartSynchronously();
}
CpuProfile* CpuProfiler::StopProfiling(const char* title) {
- if (!is_profiling_) return NULL;
+ if (!is_profiling_) return nullptr;
StopProcessorIfLastProfile(title);
CpuProfile* result = profiles_->StopProfiling(title);
- if (result != NULL) {
+ if (result) {
result->Print();
}
return result;
@@ -556,7 +339,7 @@ CpuProfile* CpuProfiler::StopProfiling(const char* title) {
CpuProfile* CpuProfiler::StopProfiling(String* title) {
- if (!is_profiling_) return NULL;
+ if (!is_profiling_) return nullptr;
const char* profile_title = profiles_->GetName(title);
StopProcessorIfLastProfile(profile_title);
return profiles_->StopProfiling(profile_title);
@@ -564,21 +347,22 @@ CpuProfile* CpuProfiler::StopProfiling(String* title) {
void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
- if (profiles_->IsLastProfile(title)) StopProcessor();
+ if (profiles_->IsLastProfile(title)) {
+ StopProcessor();
+ }
}
void CpuProfiler::StopProcessor() {
Logger* logger = isolate_->logger();
- Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_);
is_profiling_ = false;
+ isolate_->set_is_profiling(false);
+ ProfilerListener* profiler_listener = logger->profiler_listener();
+ profiler_listener->RemoveObserver(this);
processor_->StopSynchronously();
- delete processor_;
- delete generator_;
- processor_ = NULL;
- generator_ = NULL;
- sampler->SetHasProcessingThread(false);
- sampler->DecreaseProfilingDepth();
+ logger->TearDownProfilerListener();
+ processor_.reset();
+ generator_.reset();
logger->is_logging_ = saved_is_logging_;
}
@@ -596,6 +380,5 @@ void CpuProfiler::LogBuiltins() {
}
}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/profiler/cpu-profiler.h b/deps/v8/src/profiler/cpu-profiler.h
index a04ee3c3a8..e3df609f89 100644
--- a/deps/v8/src/profiler/cpu-profiler.h
+++ b/deps/v8/src/profiler/cpu-profiler.h
@@ -5,14 +5,19 @@
#ifndef V8_PROFILER_CPU_PROFILER_H_
#define V8_PROFILER_CPU_PROFILER_H_
+#include <memory>
+
#include "src/allocation.h"
-#include "src/atomic-utils.h"
+#include "src/base/atomic-utils.h"
#include "src/base/atomicops.h"
#include "src/base/platform/time.h"
#include "src/compiler.h"
+#include "src/isolate.h"
+#include "src/libsampler/sampler.h"
#include "src/locked-queue.h"
#include "src/profiler/circular-queue.h"
-#include "src/profiler/sampler.h"
+#include "src/profiler/profiler-listener.h"
+#include "src/profiler/tick-sample.h"
namespace v8 {
namespace internal {
@@ -20,7 +25,6 @@ namespace internal {
// Forward declarations.
class CodeEntry;
class CodeMap;
-class CompilationInfo;
class CpuProfile;
class CpuProfilesCollection;
class ProfileGenerator;
@@ -81,7 +85,9 @@ class CodeDeoptEventRecord : public CodeEventRecord {
Address start;
const char* deopt_reason;
SourcePosition position;
- size_t pc_offset;
+ int deopt_id;
+ void* pc;
+ int fp_to_sp_delta;
INLINE(void UpdateCodeMap(CodeMap* code_map));
};
@@ -127,8 +133,7 @@ class CodeEventsContainer {
// methods called by event producers: VM and stack sampler threads.
class ProfilerEventsProcessor : public base::Thread {
public:
- ProfilerEventsProcessor(ProfileGenerator* generator,
- Sampler* sampler,
+ ProfilerEventsProcessor(Isolate* isolate, ProfileGenerator* generator,
base::TimeDelta period);
virtual ~ProfilerEventsProcessor();
@@ -154,6 +159,8 @@ class ProfilerEventsProcessor : public base::Thread {
void* operator new(size_t size);
void operator delete(void* ptr);
+ sampler::Sampler* sampler() { return sampler_.get(); }
+
private:
// Called from events processing thread (Run() method.)
bool ProcessCodeEvent();
@@ -166,7 +173,7 @@ class ProfilerEventsProcessor : public base::Thread {
SampleProcessingResult ProcessOneSample();
ProfileGenerator* generator_;
- Sampler* sampler_;
+ std::unique_ptr<sampler::Sampler> sampler_;
base::Atomic32 running_;
const base::TimeDelta period_; // Samples & code events processing period.
LockedQueue<CodeEventsContainer> events_buffer_;
@@ -176,28 +183,15 @@ class ProfilerEventsProcessor : public base::Thread {
SamplingCircularQueue<TickSampleEventRecord,
kTickSampleQueueLength> ticks_buffer_;
LockedQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
- AtomicNumber<unsigned> last_code_event_id_;
+ base::AtomicNumber<unsigned> last_code_event_id_;
unsigned last_processed_code_event_id_;
};
-
-#define PROFILE(IsolateGetter, Call) \
- do { \
- Isolate* cpu_profiler_isolate = (IsolateGetter); \
- v8::internal::Logger* logger = cpu_profiler_isolate->logger(); \
- CpuProfiler* cpu_profiler = cpu_profiler_isolate->cpu_profiler(); \
- if (logger->is_logging_code_events() || cpu_profiler->is_profiling()) { \
- logger->Call; \
- } \
- } while (false)
-
-
-class CpuProfiler : public CodeEventListener {
+class CpuProfiler : public CodeEventObserver {
public:
explicit CpuProfiler(Isolate* isolate);
- CpuProfiler(Isolate* isolate,
- CpuProfilesCollection* test_collection,
+ CpuProfiler(Isolate* isolate, CpuProfilesCollection* profiles,
ProfileGenerator* test_generator,
ProfilerEventsProcessor* test_processor);
@@ -214,42 +208,12 @@ class CpuProfiler : public CodeEventListener {
void DeleteAllProfiles();
void DeleteProfile(CpuProfile* profile);
- // Invoked from stack sampler (thread or signal handler.)
- inline TickSample* StartTickSample();
- inline void FinishTickSample();
+ void CodeEventHandler(const CodeEventsContainer& evt_rec) override;
- // Must be called via PROFILE macro, otherwise will crash when
- // profiling is not enabled.
- void CallbackEvent(Name* name, Address entry_point) override;
- void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
- const char* comment) override;
- void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
- Name* name) override;
- void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
- SharedFunctionInfo* shared, CompilationInfo* info,
- Name* script_name) override;
- void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
- SharedFunctionInfo* shared, CompilationInfo* info,
- Name* script_name, int line, int column) override;
- void CodeCreateEvent(Logger::LogEventsAndTags tag, AbstractCode* code,
- int args_count) override;
- void CodeMovingGCEvent() override {}
- void CodeMoveEvent(AbstractCode* from, Address to) override;
- void CodeDisableOptEvent(AbstractCode* code,
- SharedFunctionInfo* shared) override;
- void CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta);
- void GetterCallbackEvent(Name* name, Address entry_point) override;
- void RegExpCodeCreateEvent(AbstractCode* code, String* source) override;
- void SetterCallbackEvent(Name* name, Address entry_point) override;
- void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
-
- INLINE(bool is_profiling() const) { return is_profiling_; }
- bool* is_profiling_address() {
- return &is_profiling_;
- }
+ bool is_profiling() const { return is_profiling_; }
- ProfileGenerator* generator() const { return generator_; }
- ProfilerEventsProcessor* processor() const { return processor_; }
+ ProfileGenerator* generator() const { return generator_.get(); }
+ ProfilerEventsProcessor* processor() const { return processor_.get(); }
Isolate* isolate() const { return isolate_; }
private:
@@ -258,14 +222,12 @@ class CpuProfiler : public CodeEventListener {
void StopProcessor();
void ResetProfiles();
void LogBuiltins();
- void RecordInliningInfo(CodeEntry* entry, AbstractCode* abstract_code);
- Name* InferScriptName(Name* name, SharedFunctionInfo* info);
- Isolate* isolate_;
+ Isolate* const isolate_;
base::TimeDelta sampling_interval_;
- CpuProfilesCollection* profiles_;
- ProfileGenerator* generator_;
- ProfilerEventsProcessor* processor_;
+ std::unique_ptr<CpuProfilesCollection> profiles_;
+ std::unique_ptr<ProfileGenerator> generator_;
+ std::unique_ptr<ProfilerEventsProcessor> processor_;
bool saved_is_logging_;
bool is_profiling_;
diff --git a/deps/v8/src/profiler/heap-profiler.cc b/deps/v8/src/profiler/heap-profiler.cc
index 1305cae66e..2df28a7958 100644
--- a/deps/v8/src/profiler/heap-profiler.cc
+++ b/deps/v8/src/profiler/heap-profiler.cc
@@ -34,7 +34,7 @@ HeapProfiler::~HeapProfiler() {
void HeapProfiler::DeleteAllSnapshots() {
snapshots_.Iterate(DeleteHeapSnapshot);
snapshots_.Clear();
- names_.Reset(new StringsStorage(heap()));
+ names_.reset(new StringsStorage(heap()));
}
@@ -84,20 +84,20 @@ HeapSnapshot* HeapProfiler::TakeSnapshot(
return result;
}
-
-bool HeapProfiler::StartSamplingHeapProfiler(uint64_t sample_interval,
- int stack_depth) {
+bool HeapProfiler::StartSamplingHeapProfiler(
+ uint64_t sample_interval, int stack_depth,
+ v8::HeapProfiler::SamplingFlags flags) {
if (sampling_heap_profiler_.get()) {
return false;
}
- sampling_heap_profiler_.Reset(new SamplingHeapProfiler(
- heap(), names_.get(), sample_interval, stack_depth));
+ sampling_heap_profiler_.reset(new SamplingHeapProfiler(
+ heap(), names_.get(), sample_interval, stack_depth, flags));
return true;
}
void HeapProfiler::StopSamplingHeapProfiler() {
- sampling_heap_profiler_.Reset(nullptr);
+ sampling_heap_profiler_.reset();
}
@@ -115,7 +115,7 @@ void HeapProfiler::StartHeapObjectsTracking(bool track_allocations) {
is_tracking_object_moves_ = true;
DCHECK(!is_tracking_allocations());
if (track_allocations) {
- allocation_tracker_.Reset(new AllocationTracker(ids_.get(), names_.get()));
+ allocation_tracker_.reset(new AllocationTracker(ids_.get(), names_.get()));
heap()->DisableInlineAllocation();
heap()->isolate()->debug()->feature_tracker()->Track(
DebugFeatureTracker::kAllocationTracking);
@@ -132,7 +132,7 @@ SnapshotObjectId HeapProfiler::PushHeapObjectsStats(OutputStream* stream,
void HeapProfiler::StopHeapObjectsTracking() {
ids_->StopHeapObjectsTracking();
if (is_tracking_allocations()) {
- allocation_tracker_.Reset(NULL);
+ allocation_tracker_.reset();
heap()->EnableInlineAllocation();
}
}
@@ -170,7 +170,7 @@ SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Object> obj) {
void HeapProfiler::ObjectMoveEvent(Address from, Address to, int size) {
base::LockGuard<base::Mutex> guard(&profiler_mutex_);
bool known_object = ids_->MoveObject(from, to, size);
- if (!known_object && !allocation_tracker_.is_empty()) {
+ if (!known_object && allocation_tracker_) {
allocation_tracker_->address_to_trace()->MoveObject(from, to, size);
}
}
@@ -178,7 +178,7 @@ void HeapProfiler::ObjectMoveEvent(Address from, Address to, int size) {
void HeapProfiler::AllocationEvent(Address addr, int size) {
DisallowHeapAllocation no_allocation;
- if (!allocation_tracker_.is_empty()) {
+ if (allocation_tracker_) {
allocation_tracker_->AllocationEvent(addr, size);
}
}
@@ -214,7 +214,7 @@ Handle<HeapObject> HeapProfiler::FindHeapObjectById(SnapshotObjectId id) {
void HeapProfiler::ClearHeapObjectMap() {
- ids_.Reset(new HeapObjectsMap(heap()));
+ ids_.reset(new HeapObjectsMap(heap()));
if (!is_tracking_allocations()) is_tracking_object_moves_ = false;
}
diff --git a/deps/v8/src/profiler/heap-profiler.h b/deps/v8/src/profiler/heap-profiler.h
index 32e143c74f..3e1dcb54f9 100644
--- a/deps/v8/src/profiler/heap-profiler.h
+++ b/deps/v8/src/profiler/heap-profiler.h
@@ -5,7 +5,8 @@
#ifndef V8_PROFILER_HEAP_PROFILER_H_
#define V8_PROFILER_HEAP_PROFILER_H_
-#include "src/base/smart-pointers.h"
+#include <memory>
+
#include "src/isolate.h"
#include "src/list.h"
@@ -30,9 +31,10 @@ class HeapProfiler {
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver);
- bool StartSamplingHeapProfiler(uint64_t sample_interval, int stack_depth);
+ bool StartSamplingHeapProfiler(uint64_t sample_interval, int stack_depth,
+ v8::HeapProfiler::SamplingFlags);
void StopSamplingHeapProfiler();
- bool is_sampling_allocations() { return !sampling_heap_profiler_.is_empty(); }
+ bool is_sampling_allocations() { return !!sampling_heap_profiler_; }
AllocationProfile* GetAllocationProfile();
void StartHeapObjectsTracking(bool track_allocations);
@@ -65,9 +67,7 @@ class HeapProfiler {
void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info);
bool is_tracking_object_moves() const { return is_tracking_object_moves_; }
- bool is_tracking_allocations() const {
- return !allocation_tracker_.is_empty();
- }
+ bool is_tracking_allocations() const { return !!allocation_tracker_; }
Handle<HeapObject> FindHeapObjectById(SnapshotObjectId id);
void ClearHeapObjectMap();
@@ -78,14 +78,16 @@ class HeapProfiler {
Heap* heap() const;
// Mapping from HeapObject addresses to objects' uids.
- base::SmartPointer<HeapObjectsMap> ids_;
+ std::unique_ptr<HeapObjectsMap> ids_;
List<HeapSnapshot*> snapshots_;
- base::SmartPointer<StringsStorage> names_;
+ std::unique_ptr<StringsStorage> names_;
List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;
- base::SmartPointer<AllocationTracker> allocation_tracker_;
+ std::unique_ptr<AllocationTracker> allocation_tracker_;
bool is_tracking_object_moves_;
base::Mutex profiler_mutex_;
- base::SmartPointer<SamplingHeapProfiler> sampling_heap_profiler_;
+ std::unique_ptr<SamplingHeapProfiler> sampling_heap_profiler_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapProfiler);
};
} // namespace internal
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index 748f3074a1..9273168f80 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -80,8 +80,8 @@ void HeapEntry::SetIndexedReference(HeapGraphEdge::Type type,
void HeapEntry::Print(
const char* prefix, const char* edge_name, int max_depth, int indent) {
STATIC_ASSERT(sizeof(unsigned) == sizeof(id()));
- base::OS::Print("%6" V8PRIuPTR " @%6u %*c %s%s: ", self_size(), id(), indent,
- ' ', prefix, edge_name);
+ base::OS::Print("%6" PRIuS " @%6u %*c %s%s: ", self_size(), id(), indent, ' ',
+ prefix, edge_name);
if (type() != kString) {
base::OS::Print("%s %.40s\n", TypeAsString(), name_);
} else {
@@ -392,7 +392,7 @@ bool HeapObjectsMap::MoveObject(Address from, Address to, int object_size) {
entries_.at(to_entry_info_index).addr = NULL;
}
} else {
- HashMap::Entry* to_entry =
+ base::HashMap::Entry* to_entry =
entries_map_.LookupOrInsert(to, ComputePointerHash(to));
if (to_entry->value != NULL) {
// We found the existing entry with to address for an old object.
@@ -412,10 +412,8 @@ bool HeapObjectsMap::MoveObject(Address from, Address to, int object_size) {
// object is migrated.
if (FLAG_heap_profiler_trace_objects) {
PrintF("Move object from %p to %p old size %6d new size %6d\n",
- from,
- to,
- entries_.at(from_entry_info_index).size,
- object_size);
+ static_cast<void*>(from), static_cast<void*>(to),
+ entries_.at(from_entry_info_index).size, object_size);
}
entries_.at(from_entry_info_index).size = object_size;
to_entry->value = from_value;
@@ -430,7 +428,8 @@ void HeapObjectsMap::UpdateObjectSize(Address addr, int size) {
SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) {
- HashMap::Entry* entry = entries_map_.Lookup(addr, ComputePointerHash(addr));
+ base::HashMap::Entry* entry =
+ entries_map_.Lookup(addr, ComputePointerHash(addr));
if (entry == NULL) return 0;
int entry_index = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
EntryInfo& entry_info = entries_.at(entry_index);
@@ -443,7 +442,7 @@ SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr,
unsigned int size,
bool accessed) {
DCHECK(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
- HashMap::Entry* entry =
+ base::HashMap::Entry* entry =
entries_map_.LookupOrInsert(addr, ComputePointerHash(addr));
if (entry->value != NULL) {
int entry_index =
@@ -452,9 +451,7 @@ SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr,
entry_info.accessed = accessed;
if (FLAG_heap_profiler_trace_objects) {
PrintF("Update object size : %p with old size %d and new size %d\n",
- addr,
- entry_info.size,
- size);
+ static_cast<void*>(addr), entry_info.size, size);
}
entry_info.size = size;
return entry_info.id;
@@ -487,9 +484,8 @@ void HeapObjectsMap::UpdateHeapObjectsMap() {
FindOrAddEntry(obj->address(), obj->Size());
if (FLAG_heap_profiler_trace_objects) {
PrintF("Update object : %p %6d. Next address is %p\n",
- obj->address(),
- obj->Size(),
- obj->address() + obj->Size());
+ static_cast<void*>(obj->address()), obj->Size(),
+ static_cast<void*>(obj->address() + obj->Size()));
}
}
RemoveDeadEntries();
@@ -517,20 +513,16 @@ struct HeapObjectInfo {
void Print() const {
if (expected_size == 0) {
PrintF("Untracked object : %p %6d. Next address is %p\n",
- obj->address(),
- obj->Size(),
- obj->address() + obj->Size());
+ static_cast<void*>(obj->address()), obj->Size(),
+ static_cast<void*>(obj->address() + obj->Size()));
} else if (obj->Size() != expected_size) {
- PrintF("Wrong size %6d: %p %6d. Next address is %p\n",
- expected_size,
- obj->address(),
- obj->Size(),
- obj->address() + obj->Size());
+ PrintF("Wrong size %6d: %p %6d. Next address is %p\n", expected_size,
+ static_cast<void*>(obj->address()), obj->Size(),
+ static_cast<void*>(obj->address() + obj->Size()));
} else {
PrintF("Good object : %p %6d. Next address is %p\n",
- obj->address(),
- expected_size,
- obj->address() + obj->Size());
+ static_cast<void*>(obj->address()), expected_size,
+ static_cast<void*>(obj->address() + obj->Size()));
}
}
};
@@ -554,7 +546,7 @@ int HeapObjectsMap::FindUntrackedObjects() {
for (HeapObject* obj = iterator.next();
obj != NULL;
obj = iterator.next()) {
- HashMap::Entry* entry =
+ base::HashMap::Entry* entry =
entries_map_.Lookup(obj->address(), ComputePointerHash(obj->address()));
if (entry == NULL) {
++untracked;
@@ -674,7 +666,7 @@ void HeapObjectsMap::RemoveDeadEntries() {
entries_.at(first_free_entry) = entry_info;
}
entries_.at(first_free_entry).accessed = false;
- HashMap::Entry* entry = entries_map_.Lookup(
+ base::HashMap::Entry* entry = entries_map_.Lookup(
entry_info.addr, ComputePointerHash(entry_info.addr));
DCHECK(entry);
entry->value = reinterpret_cast<void*>(first_free_entry);
@@ -707,37 +699,28 @@ SnapshotObjectId HeapObjectsMap::GenerateId(v8::RetainedObjectInfo* info) {
size_t HeapObjectsMap::GetUsedMemorySize() const {
- return
- sizeof(*this) +
- sizeof(HashMap::Entry) * entries_map_.capacity() +
- GetMemoryUsedByList(entries_) +
- GetMemoryUsedByList(time_intervals_);
-}
-
-
-HeapEntriesMap::HeapEntriesMap()
- : entries_(HashMap::PointersMatch) {
+ return sizeof(*this) +
+ sizeof(base::HashMap::Entry) * entries_map_.capacity() +
+ GetMemoryUsedByList(entries_) + GetMemoryUsedByList(time_intervals_);
}
+HeapEntriesMap::HeapEntriesMap() : entries_(base::HashMap::PointersMatch) {}
int HeapEntriesMap::Map(HeapThing thing) {
- HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing));
+ base::HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing));
if (cache_entry == NULL) return HeapEntry::kNoEntry;
return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value));
}
void HeapEntriesMap::Pair(HeapThing thing, int entry) {
- HashMap::Entry* cache_entry = entries_.LookupOrInsert(thing, Hash(thing));
+ base::HashMap::Entry* cache_entry =
+ entries_.LookupOrInsert(thing, Hash(thing));
DCHECK(cache_entry->value == NULL);
cache_entry->value = reinterpret_cast<void*>(static_cast<intptr_t>(entry));
}
-
-HeapObjectsSet::HeapObjectsSet()
- : entries_(HashMap::PointersMatch) {
-}
-
+HeapObjectsSet::HeapObjectsSet() : entries_(base::HashMap::PointersMatch) {}
void HeapObjectsSet::Clear() {
entries_.Clear();
@@ -760,7 +743,7 @@ void HeapObjectsSet::Insert(Object* obj) {
const char* HeapObjectsSet::GetTag(Object* obj) {
HeapObject* object = HeapObject::cast(obj);
- HashMap::Entry* cache_entry =
+ base::HashMap::Entry* cache_entry =
entries_.Lookup(object, HeapEntriesMap::Hash(object));
return cache_entry != NULL
? reinterpret_cast<const char*>(cache_entry->value)
@@ -768,10 +751,10 @@ const char* HeapObjectsSet::GetTag(Object* obj) {
}
-void HeapObjectsSet::SetTag(Object* obj, const char* tag) {
+V8_NOINLINE void HeapObjectsSet::SetTag(Object* obj, const char* tag) {
if (!obj->IsHeapObject()) return;
HeapObject* object = HeapObject::cast(obj);
- HashMap::Entry* cache_entry =
+ base::HashMap::Entry* cache_entry =
entries_.LookupOrInsert(object, HeapEntriesMap::Hash(object));
cache_entry->value = const_cast<char*>(tag);
}
@@ -1003,8 +986,7 @@ class IndexedReferencesExtractor : public ObjectVisitor {
}
void VisitPointers(Object** start, Object** end) override {
for (Object** p = start; p < end; p++) {
- intptr_t index =
- static_cast<intptr_t>(p - HeapObject::RawField(parent_obj_, 0));
+ int index = static_cast<int>(p - HeapObject::RawField(parent_obj_, 0));
++next_index_;
// |p| could be outside of the object, e.g., while visiting RelocInfo of
// code objects.
@@ -1012,7 +994,8 @@ class IndexedReferencesExtractor : public ObjectVisitor {
generator_->marks_[index] = false;
continue;
}
- generator_->SetHiddenReference(parent_obj_, parent_, next_index_, *p);
+ generator_->SetHiddenReference(parent_obj_, parent_, next_index_, *p,
+ index * kPointerSize);
}
}
@@ -1058,14 +1041,14 @@ bool V8HeapExplorer::ExtractReferencesPass1(int entry, HeapObject* obj) {
ExtractAccessorInfoReferences(entry, AccessorInfo::cast(obj));
} else if (obj->IsAccessorPair()) {
ExtractAccessorPairReferences(entry, AccessorPair::cast(obj));
- } else if (obj->IsCodeCache()) {
- ExtractCodeCacheReferences(entry, CodeCache::cast(obj));
} else if (obj->IsCode()) {
ExtractCodeReferences(entry, Code::cast(obj));
} else if (obj->IsBox()) {
ExtractBoxReferences(entry, Box::cast(obj));
} else if (obj->IsCell()) {
ExtractCellReferences(entry, Cell::cast(obj));
+ } else if (obj->IsWeakCell()) {
+ ExtractWeakCellReferences(entry, WeakCell::cast(obj));
} else if (obj->IsPropertyCell()) {
ExtractPropertyCellReferences(entry, PropertyCell::cast(obj));
} else if (obj->IsAllocationSite()) {
@@ -1108,9 +1091,11 @@ void V8HeapExplorer::ExtractJSObjectReferences(
TagObject(js_fun->bound_arguments(), "(bound arguments)");
SetInternalReference(js_fun, entry, "bindings", js_fun->bound_arguments(),
JSBoundFunction::kBoundArgumentsOffset);
- SetNativeBindReference(js_obj, entry, "bound_this", js_fun->bound_this());
- SetNativeBindReference(js_obj, entry, "bound_function",
- js_fun->bound_target_function());
+ SetInternalReference(js_obj, entry, "bound_this", js_fun->bound_this(),
+ JSBoundFunction::kBoundThisOffset);
+ SetInternalReference(js_obj, entry, "bound_function",
+ js_fun->bound_target_function(),
+ JSBoundFunction::kBoundTargetFunctionOffset);
FixedArray* bindings = js_fun->bound_arguments();
for (int i = 0; i < bindings->length(); i++) {
const char* reference_name = names_->GetFormatted("bound_argument_%d", i);
@@ -1119,7 +1104,7 @@ void V8HeapExplorer::ExtractJSObjectReferences(
} else if (obj->IsJSFunction()) {
JSFunction* js_fun = JSFunction::cast(js_obj);
Object* proto_or_map = js_fun->prototype_or_initial_map();
- if (!proto_or_map->IsTheHole()) {
+ if (!proto_or_map->IsTheHole(heap_->isolate())) {
if (!proto_or_map->IsMap()) {
SetPropertyReference(
obj, entry,
@@ -1147,9 +1132,6 @@ void V8HeapExplorer::ExtractJSObjectReferences(
SetInternalReference(js_fun, entry,
"context", js_fun->context(),
JSFunction::kContextOffset);
- SetWeakReference(js_fun, entry,
- "next_function_link", js_fun->next_function_link(),
- JSFunction::kNextFunctionLinkOffset);
// Ensure no new weak references appeared in JSFunction.
STATIC_ASSERT(JSFunction::kCodeEntryOffset ==
JSFunction::kNonWeakFieldsEndOffset);
@@ -1211,16 +1193,16 @@ void V8HeapExplorer::ExtractJSCollectionReferences(int entry,
JSCollection::kTableOffset);
}
-
-void V8HeapExplorer::ExtractJSWeakCollectionReferences(
- int entry, JSWeakCollection* collection) {
- MarkAsWeakContainer(collection->table());
- SetInternalReference(collection, entry,
- "table", collection->table(),
+void V8HeapExplorer::ExtractJSWeakCollectionReferences(int entry,
+ JSWeakCollection* obj) {
+ if (obj->table()->IsHashTable()) {
+ ObjectHashTable* table = ObjectHashTable::cast(obj->table());
+ TagFixedArraySubType(table, JS_WEAK_COLLECTION_SUB_TYPE);
+ }
+ SetInternalReference(obj, entry, "table", obj->table(),
JSWeakCollection::kTableOffset);
}
-
void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
if (context == context->declaration_context()) {
ScopeInfo* scope_info = context->closure()->shared()->scope_info();
@@ -1264,7 +1246,6 @@ void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
optimized_functions_list);
EXTRACT_CONTEXT_FIELD(OPTIMIZED_CODE_LIST, unused, optimized_code_list);
EXTRACT_CONTEXT_FIELD(DEOPTIMIZED_CODE_LIST, unused, deoptimized_code_list);
- EXTRACT_CONTEXT_FIELD(NEXT_CONTEXT_LINK, unused, next_context_link);
#undef EXTRACT_CONTEXT_FIELD
STATIC_ASSERT(Context::OPTIMIZED_FUNCTIONS_LIST ==
Context::FIRST_WEAK_SLOT);
@@ -1282,19 +1263,9 @@ void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
raw_transitions_or_prototype_info)) {
TransitionArray* transitions =
TransitionArray::cast(raw_transitions_or_prototype_info);
- int transitions_entry = GetEntry(transitions)->index();
-
- if (map->CanTransition()) {
- if (transitions->HasPrototypeTransitions()) {
- FixedArray* prototype_transitions =
- transitions->GetPrototypeTransitions();
- MarkAsWeakContainer(prototype_transitions);
- TagObject(prototype_transitions, "(prototype transitions");
- SetInternalReference(transitions, transitions_entry,
- "prototype_transitions", prototype_transitions);
- }
- // TODO(alph): transitions keys are strong links.
- MarkAsWeakContainer(transitions);
+ if (map->CanTransition() && transitions->HasPrototypeTransitions()) {
+ TagObject(transitions->GetPrototypeTransitions(),
+ "(prototype transitions)");
}
TagObject(transitions, "(transition array)");
@@ -1314,16 +1285,19 @@ void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
}
DescriptorArray* descriptors = map->instance_descriptors();
TagObject(descriptors, "(map descriptors)");
- SetInternalReference(map, entry,
- "descriptors", descriptors,
+ SetInternalReference(map, entry, "descriptors", descriptors,
Map::kDescriptorsOffset);
-
- MarkAsWeakContainer(map->code_cache());
- SetInternalReference(map, entry,
- "code_cache", map->code_cache(),
+ SetInternalReference(map, entry, "code_cache", map->code_cache(),
Map::kCodeCacheOffset);
- SetInternalReference(map, entry,
- "prototype", map->prototype(), Map::kPrototypeOffset);
+ SetInternalReference(map, entry, "prototype", map->prototype(),
+ Map::kPrototypeOffset);
+#if V8_DOUBLE_FIELDS_UNBOXING
+ if (FLAG_unbox_double_fields) {
+ SetInternalReference(map, entry, "layout_descriptor",
+ map->layout_descriptor(),
+ Map::kLayoutDescriptorOffset);
+ }
+#endif
Object* constructor_or_backpointer = map->constructor_or_backpointer();
if (constructor_or_backpointer->IsMap()) {
TagObject(constructor_or_backpointer, "(back pointer)");
@@ -1334,10 +1308,11 @@ void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
Map::kConstructorOrBackPointerOffset);
}
TagObject(map->dependent_code(), "(dependent code)");
- MarkAsWeakContainer(map->dependent_code());
- SetInternalReference(map, entry,
- "dependent_code", map->dependent_code(),
+ SetInternalReference(map, entry, "dependent_code", map->dependent_code(),
Map::kDependentCodeOffset);
+ TagObject(map->weak_cell_cache(), "(weak cell)");
+ SetInternalReference(map, entry, "weak_cell_cache", map->weak_cell_cache(),
+ Map::kWeakCellCacheOffset);
}
@@ -1389,9 +1364,9 @@ void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
SetInternalReference(obj, entry,
"optimized_code_map", shared->optimized_code_map(),
SharedFunctionInfo::kOptimizedCodeMapOffset);
- SetInternalReference(obj, entry,
- "feedback_vector", shared->feedback_vector(),
- SharedFunctionInfo::kFeedbackVectorOffset);
+ SetInternalReference(obj, entry, "feedback_metadata",
+ shared->feedback_metadata(),
+ SharedFunctionInfo::kFeedbackMetadataOffset);
}
@@ -1444,19 +1419,6 @@ void V8HeapExplorer::ExtractAccessorPairReferences(
}
-void V8HeapExplorer::ExtractCodeCacheReferences(
- int entry, CodeCache* code_cache) {
- TagObject(code_cache->default_cache(), "(default code cache)");
- SetInternalReference(code_cache, entry,
- "default_cache", code_cache->default_cache(),
- CodeCache::kDefaultCacheOffset);
- TagObject(code_cache->normal_type_cache(), "(code type cache)");
- SetInternalReference(code_cache, entry,
- "type_cache", code_cache->normal_type_cache(),
- CodeCache::kNormalTypeCacheOffset);
-}
-
-
void V8HeapExplorer::TagBuiltinCodeObject(Code* code, const char* name) {
TagObject(code, names_->GetFormatted("(%s builtin)", name));
}
@@ -1484,37 +1446,38 @@ void V8HeapExplorer::ExtractCodeReferences(int entry, Code* code) {
SetInternalReference(code, entry,
"deoptimization_data", code->deoptimization_data(),
Code::kDeoptimizationDataOffset);
+ TagObject(code->source_position_table(), "(source position table)");
+ SetInternalReference(code, entry, "source_position_table",
+ code->source_position_table(),
+ Code::kSourcePositionTableOffset);
if (code->kind() == Code::FUNCTION) {
- SetInternalReference(code, entry,
- "type_feedback_info", code->type_feedback_info(),
+ SetInternalReference(code, entry, "type_feedback_info",
+ code->type_feedback_info(),
Code::kTypeFeedbackInfoOffset);
}
- SetInternalReference(code, entry,
- "gc_metadata", code->gc_metadata(),
+ SetInternalReference(code, entry, "gc_metadata", code->gc_metadata(),
Code::kGCMetadataOffset);
- if (code->kind() == Code::OPTIMIZED_FUNCTION) {
- SetWeakReference(code, entry,
- "next_code_link", code->next_code_link(),
- Code::kNextCodeLinkOffset);
- }
}
-
void V8HeapExplorer::ExtractBoxReferences(int entry, Box* box) {
SetInternalReference(box, entry, "value", box->value(), Box::kValueOffset);
}
-
void V8HeapExplorer::ExtractCellReferences(int entry, Cell* cell) {
SetInternalReference(cell, entry, "value", cell->value(), Cell::kValueOffset);
}
+void V8HeapExplorer::ExtractWeakCellReferences(int entry, WeakCell* weak_cell) {
+ TagObject(weak_cell, "(weak cell)");
+ SetWeakReference(weak_cell, entry, "value", weak_cell->value(),
+ WeakCell::kValueOffset);
+}
void V8HeapExplorer::ExtractPropertyCellReferences(int entry,
PropertyCell* cell) {
SetInternalReference(cell, entry, "value", cell->value(),
PropertyCell::kValueOffset);
- MarkAsWeakContainer(cell->dependent_code());
+ TagObject(cell->dependent_code(), "(dependent code)");
SetInternalReference(cell, entry, "dependent_code", cell->dependent_code(),
PropertyCell::kDependentCodeOffset);
}
@@ -1526,7 +1489,7 @@ void V8HeapExplorer::ExtractAllocationSiteReferences(int entry,
AllocationSite::kTransitionInfoOffset);
SetInternalReference(site, entry, "nested_site", site->nested_site(),
AllocationSite::kNestedSiteOffset);
- MarkAsWeakContainer(site->dependent_code());
+ TagObject(site->dependent_code(), "(dependent code)");
SetInternalReference(site, entry, "dependent_code", site->dependent_code(),
AllocationSite::kDependentCodeOffset);
// Do not visit weak_next as it is not visited by the StaticVisitor,
@@ -1558,7 +1521,7 @@ void V8HeapExplorer::ExtractJSArrayBufferReferences(
// Setup a reference to a native memory backing_store object.
if (!buffer->backing_store())
return;
- size_t data_size = NumberToSize(heap_->isolate(), buffer->byte_length());
+ size_t data_size = NumberToSize(buffer->byte_length());
JSArrayBufferDataEntryAllocator allocator(data_size, this);
HeapEntry* data_entry =
filler_->FindOrAddEntry(buffer->backing_store(), &allocator);
@@ -1566,22 +1529,36 @@ void V8HeapExplorer::ExtractJSArrayBufferReferences(
entry, "backing_store", data_entry);
}
-
void V8HeapExplorer::ExtractFixedArrayReferences(int entry, FixedArray* array) {
- bool is_weak = weak_containers_.Contains(array);
- for (int i = 0, l = array->length(); i < l; ++i) {
- if (is_weak) {
- SetWeakReference(array, entry,
- i, array->get(i), array->OffsetOfElementAt(i));
- } else {
- SetInternalReference(array, entry,
- i, array->get(i), array->OffsetOfElementAt(i));
+ auto it = array_types_.find(array);
+ if (it == array_types_.end()) {
+ for (int i = 0, l = array->length(); i < l; ++i) {
+ SetInternalReference(array, entry, i, array->get(i),
+ array->OffsetOfElementAt(i));
}
+ return;
}
-}
+ switch (it->second) {
+ case JS_WEAK_COLLECTION_SUB_TYPE:
+ for (int i = 0, l = array->length(); i < l; ++i) {
+ SetWeakReference(array, entry, i, array->get(i),
+ array->OffsetOfElementAt(i));
+ }
+ break;
+ // TODO(alph): Add special processing for other types of FixedArrays.
+
+ default:
+ for (int i = 0, l = array->length(); i < l; ++i) {
+ SetInternalReference(array, entry, i, array->get(i),
+ array->OffsetOfElementAt(i));
+ }
+ break;
+ }
+}
void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
+ Isolate* isolate = js_obj->GetIsolate();
if (js_obj->HasFastProperties()) {
DescriptorArray* descs = js_obj->map()->instance_descriptors();
int real_size = js_obj->map()->NumberOfOwnDescriptors();
@@ -1598,14 +1575,8 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
int field_offset =
field_index.is_inobject() ? field_index.offset() : -1;
- if (k != heap_->hidden_properties_symbol()) {
- SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry, k,
- value, NULL, field_offset);
- } else {
- TagObject(value, "(hidden properties)");
- SetInternalReference(js_obj, entry, "hidden_properties", value,
- field_offset);
- }
+ SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry, k,
+ value, NULL, field_offset);
break;
}
case kDescriptor:
@@ -1621,15 +1592,10 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
int length = dictionary->Capacity();
for (int i = 0; i < length; ++i) {
Object* k = dictionary->KeyAt(i);
- if (dictionary->IsKey(k)) {
+ if (dictionary->IsKey(isolate, k)) {
DCHECK(dictionary->ValueAt(i)->IsPropertyCell());
PropertyCell* cell = PropertyCell::cast(dictionary->ValueAt(i));
Object* value = cell->value();
- if (k == heap_->hidden_properties_symbol()) {
- TagObject(value, "(hidden properties)");
- SetInternalReference(js_obj, entry, "hidden_properties", value);
- continue;
- }
PropertyDetails details = cell->property_details();
SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry,
Name::cast(k), value);
@@ -1640,13 +1606,8 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
int length = dictionary->Capacity();
for (int i = 0; i < length; ++i) {
Object* k = dictionary->KeyAt(i);
- if (dictionary->IsKey(k)) {
+ if (dictionary->IsKey(isolate, k)) {
Object* value = dictionary->ValueAt(i);
- if (k == heap_->hidden_properties_symbol()) {
- TagObject(value, "(hidden properties)");
- SetInternalReference(js_obj, entry, "hidden_properties", value);
- continue;
- }
PropertyDetails details = dictionary->DetailsAt(i);
SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry,
Name::cast(k), value);
@@ -1675,13 +1636,14 @@ void V8HeapExplorer::ExtractAccessorPairProperty(JSObject* js_obj, int entry,
void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj, int entry) {
+ Isolate* isolate = js_obj->GetIsolate();
if (js_obj->HasFastObjectElements()) {
FixedArray* elements = FixedArray::cast(js_obj->elements());
int length = js_obj->IsJSArray() ?
Smi::cast(JSArray::cast(js_obj)->length())->value() :
elements->length();
for (int i = 0; i < length; ++i) {
- if (!elements->get(i)->IsTheHole()) {
+ if (!elements->get(i)->IsTheHole(isolate)) {
SetElementReference(js_obj, entry, i, elements->get(i));
}
}
@@ -1690,7 +1652,7 @@ void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj, int entry) {
int length = dictionary->Capacity();
for (int i = 0; i < length; ++i) {
Object* k = dictionary->KeyAt(i);
- if (dictionary->IsKey(k)) {
+ if (dictionary->IsKey(isolate, k)) {
DCHECK(k->IsNumber());
uint32_t index = static_cast<uint32_t>(k->Number());
SetElementReference(js_obj, entry, index, dictionary->ValueAt(i));
@@ -1877,6 +1839,23 @@ bool V8HeapExplorer::IsEssentialObject(Object* object) {
object != heap_->two_pointer_filler_map();
}
+bool V8HeapExplorer::IsEssentialHiddenReference(Object* parent,
+ int field_offset) {
+ if (parent->IsAllocationSite() &&
+ field_offset == AllocationSite::kWeakNextOffset)
+ return false;
+ if (parent->IsJSFunction() &&
+ field_offset == JSFunction::kNextFunctionLinkOffset)
+ return false;
+ if (parent->IsCode() && field_offset == Code::kNextCodeLinkOffset)
+ return false;
+ if (parent->IsContext() &&
+ field_offset == Context::OffsetOfElementAt(Context::NEXT_CONTEXT_LINK))
+ return false;
+ if (parent->IsWeakCell() && field_offset == WeakCell::kNextOffset)
+ return false;
+ return true;
+}
void V8HeapExplorer::SetContextReference(HeapObject* parent_obj,
int parent_entry,
@@ -1968,17 +1947,14 @@ void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
MarkVisitedField(parent_obj, field_offset);
}
-
void V8HeapExplorer::SetHiddenReference(HeapObject* parent_obj,
- int parent_entry,
- int index,
- Object* child_obj) {
+ int parent_entry, int index,
+ Object* child_obj, int field_offset) {
DCHECK(parent_entry == GetEntry(parent_obj)->index());
HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL && IsEssentialObject(child_obj)) {
- filler_->SetIndexedReference(HeapGraphEdge::kHidden,
- parent_entry,
- index,
+ if (child_entry != nullptr && IsEssentialObject(child_obj) &&
+ IsEssentialHiddenReference(parent_obj, field_offset)) {
+ filler_->SetIndexedReference(HeapGraphEdge::kHidden, parent_entry, index,
child_entry);
}
}
@@ -2165,14 +2141,12 @@ void V8HeapExplorer::TagObject(Object* obj, const char* tag) {
}
}
-
-void V8HeapExplorer::MarkAsWeakContainer(Object* object) {
- if (IsEssentialObject(object) && object->IsFixedArray()) {
- weak_containers_.Insert(object);
- }
+void V8HeapExplorer::TagFixedArraySubType(const FixedArray* array,
+ FixedArraySubInstanceType type) {
+ DCHECK(array_types_.find(array) == array_types_.end());
+ array_types_[array] = type;
}
-
class GlobalObjectsEnumerator : public ObjectVisitor {
public:
void VisitPointers(Object** start, Object** end) override {
@@ -2262,9 +2236,9 @@ HeapEntry* BasicHeapEntriesAllocator::AllocateEntry(HeapThing ptr) {
intptr_t elements = info->GetElementCount();
intptr_t size = info->GetSizeInBytes();
const char* name = elements != -1
- ? names_->GetFormatted(
- "%s / %" V8_PTR_PREFIX "d entries", info->GetLabel(), elements)
- : names_->GetCopy(info->GetLabel());
+ ? names_->GetFormatted("%s / %" V8PRIdPTR " entries",
+ info->GetLabel(), elements)
+ : names_->GetCopy(info->GetLabel());
return snapshot_->AddEntry(
entries_type_,
name,
@@ -2292,8 +2266,7 @@ NativeObjectsExplorer::NativeObjectsExplorer(
NativeObjectsExplorer::~NativeObjectsExplorer() {
- for (HashMap::Entry* p = objects_by_info_.Start();
- p != NULL;
+ for (base::HashMap::Entry* p = objects_by_info_.Start(); p != NULL;
p = objects_by_info_.Next(p)) {
v8::RetainedObjectInfo* info =
reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
@@ -2302,8 +2275,7 @@ NativeObjectsExplorer::~NativeObjectsExplorer() {
reinterpret_cast<List<HeapObject*>* >(p->value);
delete objects;
}
- for (HashMap::Entry* p = native_groups_.Start();
- p != NULL;
+ for (base::HashMap::Entry* p = native_groups_.Start(); p != NULL;
p = native_groups_.Next(p)) {
v8::RetainedObjectInfo* info =
reinterpret_cast<v8::RetainedObjectInfo*>(p->value);
@@ -2375,7 +2347,8 @@ void NativeObjectsExplorer::FillImplicitReferences() {
List<HeapObject*>* NativeObjectsExplorer::GetListMaybeDisposeInfo(
v8::RetainedObjectInfo* info) {
- HashMap::Entry* entry = objects_by_info_.LookupOrInsert(info, InfoHash(info));
+ base::HashMap::Entry* entry =
+ objects_by_info_.LookupOrInsert(info, InfoHash(info));
if (entry->value != NULL) {
info->Dispose();
} else {
@@ -2391,8 +2364,7 @@ bool NativeObjectsExplorer::IterateAndExtractReferences(
FillRetainedObjects();
FillImplicitReferences();
if (EstimateObjectsCount() > 0) {
- for (HashMap::Entry* p = objects_by_info_.Start();
- p != NULL;
+ for (base::HashMap::Entry* p = objects_by_info_.Start(); p != NULL;
p = objects_by_info_.Next(p)) {
v8::RetainedObjectInfo* info =
reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
@@ -2444,7 +2416,7 @@ NativeGroupRetainedObjectInfo* NativeObjectsExplorer::FindOrAddGroupInfo(
label_copy,
static_cast<int>(strlen(label_copy)),
isolate_->heap()->HashSeed());
- HashMap::Entry* entry =
+ base::HashMap::Entry* entry =
native_groups_.LookupOrInsert(const_cast<char*>(label_copy), hash);
if (entry->value == NULL) {
entry->value = new NativeGroupRetainedObjectInfo(label);
@@ -2490,8 +2462,7 @@ void NativeObjectsExplorer::SetWrapperNativeReferences(
void NativeObjectsExplorer::SetRootNativeRootsReference() {
- for (HashMap::Entry* entry = native_groups_.Start();
- entry;
+ for (base::HashMap::Entry* entry = native_groups_.Start(); entry;
entry = native_groups_.Next(entry)) {
NativeGroupRetainedObjectInfo* group_info =
static_cast<NativeGroupRetainedObjectInfo*>(entry->value);
@@ -2759,7 +2730,7 @@ void HeapSnapshotJSONSerializer::SerializeImpl() {
int HeapSnapshotJSONSerializer::GetStringId(const char* s) {
- HashMap::Entry* cache_entry =
+ base::HashMap::Entry* cache_entry =
strings_.LookupOrInsert(const_cast<char*>(s), StringHash(s));
if (cache_entry->value == NULL) {
cache_entry->value = reinterpret_cast<void*>(next_string_id_++);
@@ -3144,8 +3115,7 @@ void HeapSnapshotJSONSerializer::SerializeString(const unsigned char* s) {
void HeapSnapshotJSONSerializer::SerializeStrings() {
ScopedVector<const unsigned char*> sorted_strings(
strings_.occupancy() + 1);
- for (HashMap::Entry* entry = strings_.Start();
- entry != NULL;
+ for (base::HashMap::Entry* entry = strings_.Start(); entry != NULL;
entry = strings_.Next(entry)) {
int index = static_cast<int>(reinterpret_cast<uintptr_t>(entry->value));
sorted_strings[index] = reinterpret_cast<const unsigned char*>(entry->key);
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index 857f2401bf..b870fbe324 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -5,6 +5,8 @@
#ifndef V8_PROFILER_HEAP_SNAPSHOT_GENERATOR_H_
#define V8_PROFILER_HEAP_SNAPSHOT_GENERATOR_H_
+#include <unordered_map>
+
#include "include/v8-profiler.h"
#include "src/base/platform/time.h"
#include "src/objects.h"
@@ -259,7 +261,7 @@ class HeapObjectsMap {
};
SnapshotObjectId next_id_;
- HashMap entries_map_;
+ base::HashMap entries_map_;
List<EntryInfo> entries_;
List<TimeInterval> time_intervals_;
Heap* heap_;
@@ -297,7 +299,7 @@ class HeapEntriesMap {
v8::internal::kZeroHashSeed);
}
- HashMap entries_;
+ base::HashMap entries_;
friend class HeapObjectsSet;
@@ -316,7 +318,7 @@ class HeapObjectsSet {
bool is_empty() const { return entries_.occupancy() == 0; }
private:
- HashMap entries_;
+ base::HashMap entries_;
DISALLOW_COPY_AND_ASSIGN(HeapObjectsSet);
};
@@ -382,10 +384,10 @@ class V8HeapExplorer : public HeapEntriesAllocator {
void ExtractScriptReferences(int entry, Script* script);
void ExtractAccessorInfoReferences(int entry, AccessorInfo* accessor_info);
void ExtractAccessorPairReferences(int entry, AccessorPair* accessors);
- void ExtractCodeCacheReferences(int entry, CodeCache* code_cache);
void ExtractCodeReferences(int entry, Code* code);
void ExtractBoxReferences(int entry, Box* box);
void ExtractCellReferences(int entry, Cell* cell);
+ void ExtractWeakCellReferences(int entry, WeakCell* weak_cell);
void ExtractPropertyCellReferences(int entry, PropertyCell* cell);
void ExtractAllocationSiteReferences(int entry, AllocationSite* site);
void ExtractJSArrayBufferReferences(int entry, JSArrayBuffer* buffer);
@@ -397,6 +399,8 @@ class V8HeapExplorer : public HeapEntriesAllocator {
void ExtractInternalReferences(JSObject* js_obj, int entry);
bool IsEssentialObject(Object* object);
+ bool IsEssentialHiddenReference(Object* parent, int field_offset);
+
void SetContextReference(HeapObject* parent_obj,
int parent,
String* reference_name,
@@ -420,10 +424,8 @@ class V8HeapExplorer : public HeapEntriesAllocator {
int index,
Object* child,
int field_offset = -1);
- void SetHiddenReference(HeapObject* parent_obj,
- int parent,
- int index,
- Object* child);
+ void SetHiddenReference(HeapObject* parent_obj, int parent, int index,
+ Object* child, int field_offset);
void SetWeakReference(HeapObject* parent_obj,
int parent,
const char* reference_name,
@@ -453,7 +455,8 @@ class V8HeapExplorer : public HeapEntriesAllocator {
VisitorSynchronization::SyncTag tag, bool is_weak, Object* child);
const char* GetStrongGcSubrootName(Object* object);
void TagObject(Object* obj, const char* tag);
- void MarkAsWeakContainer(Object* object);
+ void TagFixedArraySubType(const FixedArray* array,
+ FixedArraySubInstanceType type);
HeapEntry* GetEntry(Object* obj);
@@ -466,7 +469,7 @@ class V8HeapExplorer : public HeapEntriesAllocator {
HeapObjectsSet objects_tags_;
HeapObjectsSet strong_gc_subroot_names_;
HeapObjectsSet user_roots_;
- HeapObjectsSet weak_containers_;
+ std::unordered_map<const FixedArray*, FixedArraySubInstanceType> array_types_;
v8::HeapProfiler::ObjectNameResolver* global_object_name_resolver_;
std::vector<bool> marks_;
@@ -522,8 +525,8 @@ class NativeObjectsExplorer {
bool embedder_queried_;
HeapObjectsSet in_groups_;
// RetainedObjectInfo* -> List<HeapObject*>*
- HashMap objects_by_info_;
- HashMap native_groups_;
+ base::HashMap objects_by_info_;
+ base::HashMap native_groups_;
HeapEntriesAllocator* synthetic_entries_allocator_;
HeapEntriesAllocator* native_entries_allocator_;
// Used during references extraction.
@@ -610,7 +613,7 @@ class HeapSnapshotJSONSerializer {
static const int kNodeFieldsCount;
HeapSnapshot* snapshot_;
- HashMap strings_;
+ base::HashMap strings_;
int next_node_id_;
int next_string_id_;
OutputStreamWriter* writer_;
diff --git a/deps/v8/src/profiler/profile-generator-inl.h b/deps/v8/src/profiler/profile-generator-inl.h
index 85edce2663..c50964d990 100644
--- a/deps/v8/src/profiler/profile-generator-inl.h
+++ b/deps/v8/src/profiler/profile-generator-inl.h
@@ -10,7 +10,7 @@
namespace v8 {
namespace internal {
-CodeEntry::CodeEntry(Logger::LogEventsAndTags tag, const char* name,
+CodeEntry::CodeEntry(CodeEventListener::LogEventsAndTags tag, const char* name,
const char* name_prefix, const char* resource_name,
int line_number, int column_number,
JITLineInfoTable* line_info, Address instruction_start)
@@ -26,10 +26,10 @@ CodeEntry::CodeEntry(Logger::LogEventsAndTags tag, const char* name,
bailout_reason_(kEmptyBailoutReason),
deopt_reason_(kNoDeoptReason),
deopt_position_(SourcePosition::Unknown()),
+ deopt_id_(kNoDeoptimizationId),
line_info_(line_info),
instruction_start_(instruction_start) {}
-
ProfileNode::ProfileNode(ProfileTree* tree, CodeEntry* entry)
: tree_(tree),
entry_(entry),
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index abcd9e5d88..583ef0f4e3 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -4,13 +4,12 @@
#include "src/profiler/profile-generator.h"
-#include "src/ast/scopeinfo.h"
+#include "src/base/adapters.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/global-handles.h"
+#include "src/profiler/cpu-profiler.h"
#include "src/profiler/profile-generator-inl.h"
-#include "src/profiler/sampler.h"
-#include "src/splay-tree-inl.h"
#include "src/unicode.h"
namespace v8 {
@@ -47,6 +46,41 @@ const char* const CodeEntry::kEmptyResourceName = "";
const char* const CodeEntry::kEmptyBailoutReason = "";
const char* const CodeEntry::kNoDeoptReason = "";
+const char* const CodeEntry::kProgramEntryName = "(program)";
+const char* const CodeEntry::kIdleEntryName = "(idle)";
+const char* const CodeEntry::kGarbageCollectorEntryName = "(garbage collector)";
+const char* const CodeEntry::kUnresolvedFunctionName = "(unresolved function)";
+
+base::LazyDynamicInstance<CodeEntry, CodeEntry::ProgramEntryCreateTrait>::type
+ CodeEntry::kProgramEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+
+base::LazyDynamicInstance<CodeEntry, CodeEntry::IdleEntryCreateTrait>::type
+ CodeEntry::kIdleEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+
+base::LazyDynamicInstance<CodeEntry, CodeEntry::GCEntryCreateTrait>::type
+ CodeEntry::kGCEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+
+base::LazyDynamicInstance<CodeEntry,
+ CodeEntry::UnresolvedEntryCreateTrait>::type
+ CodeEntry::kUnresolvedEntry = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+
+CodeEntry* CodeEntry::ProgramEntryCreateTrait::Create() {
+ return new CodeEntry(Logger::FUNCTION_TAG, CodeEntry::kProgramEntryName);
+}
+
+CodeEntry* CodeEntry::IdleEntryCreateTrait::Create() {
+ return new CodeEntry(Logger::FUNCTION_TAG, CodeEntry::kIdleEntryName);
+}
+
+CodeEntry* CodeEntry::GCEntryCreateTrait::Create() {
+ return new CodeEntry(Logger::BUILTIN_TAG,
+ CodeEntry::kGarbageCollectorEntryName);
+}
+
+CodeEntry* CodeEntry::UnresolvedEntryCreateTrait::Create() {
+ return new CodeEntry(Logger::FUNCTION_TAG,
+ CodeEntry::kUnresolvedFunctionName);
+}
CodeEntry::~CodeEntry() {
delete line_info_;
@@ -93,7 +127,7 @@ bool CodeEntry::IsSameFunctionAs(CodeEntry* entry) const {
void CodeEntry::SetBuiltinId(Builtins::Name id) {
- bit_field_ = TagField::update(bit_field_, Logger::BUILTIN_TAG);
+ bit_field_ = TagField::update(bit_field_, CodeEventListener::BUILTIN_TAG);
bit_field_ = BuiltinIdField::update(bit_field_, id);
}
@@ -118,6 +152,19 @@ const std::vector<CodeEntry*>* CodeEntry::GetInlineStack(int pc_offset) const {
return it != inline_locations_.end() ? &it->second : NULL;
}
+void CodeEntry::AddDeoptInlinedFrames(
+ int deopt_id, std::vector<DeoptInlinedFrame>& inlined_frames) {
+ // It's better to use std::move to place the vector into the map,
+ // but it's not supported by the current stdlibc++ on MacOS.
+ deopt_inlined_frames_
+ .insert(std::make_pair(deopt_id, std::vector<DeoptInlinedFrame>()))
+ .first->second.swap(inlined_frames);
+}
+
+bool CodeEntry::HasDeoptInlinedFramesFor(int deopt_id) const {
+ return deopt_inlined_frames_.find(deopt_id) != deopt_inlined_frames_.end();
+}
+
void CodeEntry::FillFunctionInfo(SharedFunctionInfo* shared) {
if (!shared->script()->IsScript()) return;
Script* script = Script::cast(shared->script());
@@ -131,30 +178,20 @@ CpuProfileDeoptInfo CodeEntry::GetDeoptInfo() {
CpuProfileDeoptInfo info;
info.deopt_reason = deopt_reason_;
- if (inlined_function_infos_.empty()) {
+ DCHECK_NE(kNoDeoptimizationId, deopt_id_);
+ if (deopt_inlined_frames_.find(deopt_id_) == deopt_inlined_frames_.end()) {
info.stack.push_back(CpuProfileDeoptFrame(
{script_id_, position_ + deopt_position_.position()}));
- return info;
- }
- // Copy the only branch from the inlining tree where the deopt happened.
- SourcePosition position = deopt_position_;
- int inlining_id = InlinedFunctionInfo::kNoParentId;
- for (size_t i = 0; i < inlined_function_infos_.size(); ++i) {
- InlinedFunctionInfo& current_info = inlined_function_infos_.at(i);
- if (std::binary_search(current_info.deopt_pc_offsets.begin(),
- current_info.deopt_pc_offsets.end(), pc_offset_)) {
- inlining_id = static_cast<int>(i);
- break;
+ } else {
+ size_t deopt_position = deopt_position_.raw();
+ // Copy stack of inlined frames where the deopt happened.
+ std::vector<DeoptInlinedFrame>& frames = deopt_inlined_frames_[deopt_id_];
+ for (DeoptInlinedFrame& inlined_frame : base::Reversed(frames)) {
+ info.stack.push_back(CpuProfileDeoptFrame(
+ {inlined_frame.script_id, deopt_position + inlined_frame.position}));
+ deopt_position = 0; // Done with innermost frame.
}
}
- while (inlining_id != InlinedFunctionInfo::kNoParentId) {
- InlinedFunctionInfo& inlined_info = inlined_function_infos_.at(inlining_id);
- info.stack.push_back(
- CpuProfileDeoptFrame({inlined_info.script_id,
- inlined_info.start_position + position.raw()}));
- position = inlined_info.inline_position;
- inlining_id = inlined_info.parent_id;
- }
return info;
}
@@ -166,14 +203,15 @@ void ProfileNode::CollectDeoptInfo(CodeEntry* entry) {
ProfileNode* ProfileNode::FindChild(CodeEntry* entry) {
- HashMap::Entry* map_entry = children_.Lookup(entry, CodeEntryHash(entry));
+ base::HashMap::Entry* map_entry =
+ children_.Lookup(entry, CodeEntryHash(entry));
return map_entry != NULL ?
reinterpret_cast<ProfileNode*>(map_entry->value) : NULL;
}
ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry) {
- HashMap::Entry* map_entry =
+ base::HashMap::Entry* map_entry =
children_.LookupOrInsert(entry, CodeEntryHash(entry));
ProfileNode* node = reinterpret_cast<ProfileNode*>(map_entry->value);
if (node == NULL) {
@@ -190,7 +228,7 @@ void ProfileNode::IncrementLineTicks(int src_line) {
if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) return;
// Increment a hit counter of a certain source line.
// Add a new source line if not found.
- HashMap::Entry* e =
+ base::HashMap::Entry* e =
line_ticks_.LookupOrInsert(reinterpret_cast<void*>(src_line), src_line);
DCHECK(e);
e->value = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(e->value) + 1);
@@ -208,7 +246,7 @@ bool ProfileNode::GetLineTicks(v8::CpuProfileNode::LineTick* entries,
v8::CpuProfileNode::LineTick* entry = entries;
- for (HashMap::Entry* p = line_ticks_.Start(); p != NULL;
+ for (base::HashMap::Entry *p = line_ticks_.Start(); p != NULL;
p = line_ticks_.Next(p), entry++) {
entry->line =
static_cast<unsigned int>(reinterpret_cast<uintptr_t>(p->key));
@@ -229,12 +267,13 @@ void ProfileNode::Print(int indent) {
base::OS::Print("\n");
for (size_t i = 0; i < deopt_infos_.size(); ++i) {
CpuProfileDeoptInfo& info = deopt_infos_[i];
- base::OS::Print(
- "%*s;;; deopted at script_id: %d position: %d with reason '%s'.\n",
- indent + 10, "", info.stack[0].script_id, info.stack[0].position,
- info.deopt_reason);
+ base::OS::Print("%*s;;; deopted at script_id: %d position: %" PRIuS
+ " with reason '%s'.\n",
+ indent + 10, "", info.stack[0].script_id,
+ info.stack[0].position, info.deopt_reason);
for (size_t index = 1; index < info.stack.size(); ++index) {
- base::OS::Print("%*s;;; Inline point: script_id %d position: %d.\n",
+ base::OS::Print("%*s;;; Inline point: script_id %d position: %" PRIuS
+ ".\n",
indent + 10, "", info.stack[index].script_id,
info.stack[index].position);
}
@@ -245,8 +284,7 @@ void ProfileNode::Print(int indent) {
base::OS::Print("%*s bailed out due to '%s'\n", indent + 10, "",
bailout_reason);
}
- for (HashMap::Entry* p = children_.Start();
- p != NULL;
+ for (base::HashMap::Entry* p = children_.Start(); p != NULL;
p = children_.Next(p)) {
reinterpret_cast<ProfileNode*>(p->value)->Print(indent + 2);
}
@@ -264,16 +302,14 @@ class DeleteNodesCallback {
void AfterChildTraversed(ProfileNode*, ProfileNode*) { }
};
-
ProfileTree::ProfileTree(Isolate* isolate)
- : root_entry_(Logger::FUNCTION_TAG, "(root)"),
+ : root_entry_(CodeEventListener::FUNCTION_TAG, "(root)"),
next_node_id_(1),
root_(new ProfileNode(this, &root_entry_)),
isolate_(isolate),
next_function_id_(1),
function_ids_(ProfileNode::CodeEntriesMatch) {}
-
ProfileTree::~ProfileTree() {
DeleteNodesCallback cb;
TraverseDepthFirst(&cb);
@@ -282,7 +318,7 @@ ProfileTree::~ProfileTree() {
unsigned ProfileTree::GetFunctionId(const ProfileNode* node) {
CodeEntry* code_entry = node->entry();
- HashMap::Entry* entry =
+ base::HashMap::Entry* entry =
function_ids_.LookupOrInsert(code_entry, code_entry->GetHash());
if (!entry->value) {
entry->value = reinterpret_cast<void*>(next_function_id_++);
@@ -361,12 +397,13 @@ void ProfileTree::TraverseDepthFirst(Callback* callback) {
}
}
-
-CpuProfile::CpuProfile(Isolate* isolate, const char* title, bool record_samples)
+CpuProfile::CpuProfile(CpuProfiler* profiler, const char* title,
+ bool record_samples)
: title_(title),
record_samples_(record_samples),
start_time_(base::TimeTicks::HighResolutionNow()),
- top_down_(isolate) {}
+ top_down_(profiler->isolate()),
+ profiler_(profiler) {}
void CpuProfile::AddPath(base::TimeTicks timestamp,
const std::vector<CodeEntry*>& path, int src_line,
@@ -379,92 +416,60 @@ void CpuProfile::AddPath(base::TimeTicks timestamp,
}
}
-
void CpuProfile::CalculateTotalTicksAndSamplingRate() {
end_time_ = base::TimeTicks::HighResolutionNow();
}
-
void CpuProfile::Print() {
base::OS::Print("[Top down]:\n");
top_down_.Print();
}
-
-CodeMap::~CodeMap() {}
-
-
-const CodeMap::CodeTreeConfig::Key CodeMap::CodeTreeConfig::kNoKey = NULL;
-
-
void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
DeleteAllCoveredCode(addr, addr + size);
- CodeTree::Locator locator;
- tree_.Insert(addr, &locator);
- locator.set_value(CodeEntryInfo(entry, size));
+ code_map_.insert({addr, CodeEntryInfo(entry, size)});
}
-
void CodeMap::DeleteAllCoveredCode(Address start, Address end) {
- List<Address> to_delete;
- Address addr = end - 1;
- while (addr >= start) {
- CodeTree::Locator locator;
- if (!tree_.FindGreatestLessThan(addr, &locator)) break;
- Address start2 = locator.key(), end2 = start2 + locator.value().size;
- if (start2 < end && start < end2) to_delete.Add(start2);
- addr = start2 - 1;
+ auto left = code_map_.upper_bound(start);
+ if (left != code_map_.begin()) {
+ --left;
+ if (left->first + left->second.size <= start) ++left;
}
- for (int i = 0; i < to_delete.length(); ++i) tree_.Remove(to_delete[i]);
+ auto right = left;
+ while (right != code_map_.end() && right->first < end) ++right;
+ code_map_.erase(left, right);
}
-
CodeEntry* CodeMap::FindEntry(Address addr) {
- CodeTree::Locator locator;
- if (tree_.FindGreatestLessThan(addr, &locator)) {
- // locator.key() <= addr. Need to check that addr is within entry.
- const CodeEntryInfo& entry = locator.value();
- if (addr < (locator.key() + entry.size)) {
- return entry.entry;
- }
- }
- return NULL;
+ auto it = code_map_.upper_bound(addr);
+ if (it == code_map_.begin()) return nullptr;
+ --it;
+ Address end_address = it->first + it->second.size;
+ return addr < end_address ? it->second.entry : nullptr;
}
-
void CodeMap::MoveCode(Address from, Address to) {
if (from == to) return;
- CodeTree::Locator locator;
- if (!tree_.Find(from, &locator)) return;
- CodeEntryInfo entry = locator.value();
- tree_.Remove(from);
- AddCode(to, entry.entry, entry.size);
+ auto it = code_map_.find(from);
+ if (it == code_map_.end()) return;
+ CodeEntryInfo info = it->second;
+ code_map_.erase(it);
+ AddCode(to, info.entry, info.size);
}
-
-void CodeMap::CodeTreePrinter::Call(
- const Address& key, const CodeMap::CodeEntryInfo& value) {
- base::OS::Print("%p %5d %s\n", key, value.size, value.entry->name());
-}
-
-
void CodeMap::Print() {
- CodeTreePrinter printer;
- tree_.ForEach(&printer);
+ for (auto it = code_map_.begin(); it != code_map_.end(); ++it) {
+ base::OS::Print("%p %5d %s\n", static_cast<void*>(it->first),
+ it->second.size, it->second.entry->name());
+ }
}
-
-CpuProfilesCollection::CpuProfilesCollection(Heap* heap)
- : function_and_resource_names_(heap),
- isolate_(heap->isolate()),
+CpuProfilesCollection::CpuProfilesCollection(Isolate* isolate)
+ : resource_names_(isolate->heap()),
+ profiler_(nullptr),
current_profiles_semaphore_(1) {}
-
-static void DeleteCodeEntry(CodeEntry** entry_ptr) {
- delete *entry_ptr;
-}
-
-
static void DeleteCpuProfile(CpuProfile** profile_ptr) {
delete *profile_ptr;
}
@@ -473,7 +478,6 @@ static void DeleteCpuProfile(CpuProfile** profile_ptr) {
CpuProfilesCollection::~CpuProfilesCollection() {
finished_profiles_.Iterate(DeleteCpuProfile);
current_profiles_.Iterate(DeleteCpuProfile);
- code_entries_.Iterate(DeleteCodeEntry);
}
@@ -492,7 +496,7 @@ bool CpuProfilesCollection::StartProfiling(const char* title,
return true;
}
}
- current_profiles_.Add(new CpuProfile(isolate_, title, record_samples));
+ current_profiles_.Add(new CpuProfile(profiler_, title, record_samples));
current_profiles_semaphore_.Signal();
return true;
}
@@ -550,43 +554,8 @@ void CpuProfilesCollection::AddPathToCurrentProfiles(
current_profiles_semaphore_.Signal();
}
-
-CodeEntry* CpuProfilesCollection::NewCodeEntry(
- Logger::LogEventsAndTags tag, const char* name, const char* name_prefix,
- const char* resource_name, int line_number, int column_number,
- JITLineInfoTable* line_info, Address instruction_start) {
- CodeEntry* code_entry =
- new CodeEntry(tag, name, name_prefix, resource_name, line_number,
- column_number, line_info, instruction_start);
- code_entries_.Add(code_entry);
- return code_entry;
-}
-
-
-const char* const ProfileGenerator::kProgramEntryName =
- "(program)";
-const char* const ProfileGenerator::kIdleEntryName =
- "(idle)";
-const char* const ProfileGenerator::kGarbageCollectorEntryName =
- "(garbage collector)";
-const char* const ProfileGenerator::kUnresolvedFunctionName =
- "(unresolved function)";
-
-
ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
- : profiles_(profiles),
- program_entry_(
- profiles->NewCodeEntry(Logger::FUNCTION_TAG, kProgramEntryName)),
- idle_entry_(
- profiles->NewCodeEntry(Logger::FUNCTION_TAG, kIdleEntryName)),
- gc_entry_(
- profiles->NewCodeEntry(Logger::BUILTIN_TAG,
- kGarbageCollectorEntryName)),
- unresolved_entry_(
- profiles->NewCodeEntry(Logger::FUNCTION_TAG,
- kUnresolvedFunctionName)) {
-}
-
+ : profiles_(profiles) {}
void ProfileGenerator::RecordTickSample(const TickSample& sample) {
std::vector<CodeEntry*> entries;
@@ -602,30 +571,29 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
int src_line = v8::CpuProfileNode::kNoLineNumberInfo;
bool src_line_not_found = true;
- if (sample.pc != NULL) {
- if (sample.has_external_callback && sample.state == EXTERNAL &&
- sample.top_frame_type == StackFrame::EXIT) {
+ if (sample.pc != nullptr) {
+ if (sample.has_external_callback && sample.state == EXTERNAL) {
// Don't use PC when in external callback code, as it can point
// inside callback's code, and we will erroneously report
// that a callback calls itself.
- entries.push_back(code_map_.FindEntry(sample.external_callback_entry));
+ entries.push_back(code_map_.FindEntry(
+ reinterpret_cast<Address>(sample.external_callback_entry)));
} else {
- CodeEntry* pc_entry = code_map_.FindEntry(sample.pc);
+ CodeEntry* pc_entry =
+ code_map_.FindEntry(reinterpret_cast<Address>(sample.pc));
// If there is no pc_entry we're likely in native code.
// Find out, if top of stack was pointing inside a JS function
// meaning that we have encountered a frameless invocation.
- if (!pc_entry && (sample.top_frame_type == StackFrame::JAVA_SCRIPT ||
- sample.top_frame_type == StackFrame::INTERPRETED ||
- sample.top_frame_type == StackFrame::OPTIMIZED)) {
- pc_entry = code_map_.FindEntry(sample.tos);
+ if (!pc_entry && !sample.has_external_callback) {
+ pc_entry = code_map_.FindEntry(reinterpret_cast<Address>(sample.tos));
}
// If pc is in the function code before it set up stack frame or after the
// frame was destroyed SafeStackFrameIterator incorrectly thinks that
// ebp contains return address of the current function and skips caller's
// frame. Check for this case and just skip such samples.
if (pc_entry) {
- int pc_offset =
- static_cast<int>(sample.pc - pc_entry->instruction_start());
+ int pc_offset = static_cast<int>(reinterpret_cast<Address>(sample.pc) -
+ pc_entry->instruction_start());
src_line = pc_entry->GetSourceLine(pc_offset);
if (src_line == v8::CpuProfileNode::kNoLineNumberInfo) {
src_line = pc_entry->line_number();
@@ -641,22 +609,21 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
// In the latter case we know the caller for sure but in the
// former case we don't so we simply replace the frame with
// 'unresolved' entry.
- if (sample.top_frame_type == StackFrame::JAVA_SCRIPT) {
- entries.push_back(unresolved_entry_);
+ if (!sample.has_external_callback) {
+ entries.push_back(CodeEntry::unresolved_entry());
}
}
}
}
- for (const Address *stack_pos = sample.stack,
- *stack_end = stack_pos + sample.frames_count;
- stack_pos != stack_end; ++stack_pos) {
- CodeEntry* entry = code_map_.FindEntry(*stack_pos);
+ for (unsigned i = 0; i < sample.frames_count; ++i) {
+ Address stack_pos = reinterpret_cast<Address>(sample.stack[i]);
+ CodeEntry* entry = code_map_.FindEntry(stack_pos);
if (entry) {
// Find out if the entry has an inlining stack associated.
int pc_offset =
- static_cast<int>(*stack_pos - entry->instruction_start());
+ static_cast<int>(stack_pos - entry->instruction_start());
const std::vector<CodeEntry*>* inline_stack =
entry->GetInlineStack(pc_offset);
if (inline_stack) {
@@ -699,7 +666,7 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
switch (tag) {
case GC:
- return gc_entry_;
+ return CodeEntry::gc_entry();
case JS:
case COMPILER:
// DOM events handlers are reported as OTHER / EXTERNAL entries.
@@ -707,9 +674,9 @@ CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
// one bucket.
case OTHER:
case EXTERNAL:
- return program_entry_;
+ return CodeEntry::program_entry();
case IDLE:
- return idle_entry_;
+ return CodeEntry::idle_entry();
default: return NULL;
}
}
diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index 194b490929..b785eaaf5f 100644
--- a/deps/v8/src/profiler/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -6,15 +6,16 @@
#define V8_PROFILER_PROFILE_GENERATOR_H_
#include <map>
-#include "include/v8-profiler.h"
#include "src/allocation.h"
+#include "src/base/hashmap.h"
#include "src/compiler.h"
-#include "src/hashmap.h"
#include "src/profiler/strings-storage.h"
namespace v8 {
namespace internal {
+struct TickSample;
+
// Provides a mapping from the offsets within generated code to
// the source line.
class JITLineInfoTable : public Malloced {
@@ -38,7 +39,7 @@ class JITLineInfoTable : public Malloced {
class CodeEntry {
public:
// CodeEntry doesn't own name strings, just references them.
- inline CodeEntry(Logger::LogEventsAndTags tag, const char* name,
+ inline CodeEntry(CodeEventListener::LogEventsAndTags tag, const char* name,
const char* name_prefix = CodeEntry::kEmptyNamePrefix,
const char* resource_name = CodeEntry::kEmptyResourceName,
int line_number = v8::CpuProfileNode::kNoLineNumberInfo,
@@ -47,6 +48,13 @@ class CodeEntry {
Address instruction_start = NULL);
~CodeEntry();
+ // Container describing inlined frames at eager deopt points. Is eventually
+ // being translated into v8::CpuProfileDeoptFrame by the profiler.
+ struct DeoptInlinedFrame {
+ int position;
+ int script_id;
+ };
+
const char* name_prefix() const { return name_prefix_; }
bool has_name_prefix() const { return name_prefix_[0] != '\0'; }
const char* name() const { return name_; }
@@ -64,31 +72,22 @@ class CodeEntry {
const char* bailout_reason() const { return bailout_reason_; }
void set_deopt_info(const char* deopt_reason, SourcePosition position,
- size_t pc_offset) {
- DCHECK(deopt_position_.IsUnknown());
+ int deopt_id) {
+ DCHECK(!has_deopt_info());
deopt_reason_ = deopt_reason;
deopt_position_ = position;
- pc_offset_ = pc_offset;
+ deopt_id_ = deopt_id;
}
CpuProfileDeoptInfo GetDeoptInfo();
- const char* deopt_reason() const { return deopt_reason_; }
- SourcePosition deopt_position() const { return deopt_position_; }
- bool has_deopt_info() const { return !deopt_position_.IsUnknown(); }
+ bool has_deopt_info() const { return deopt_id_ != kNoDeoptimizationId; }
void clear_deopt_info() {
deopt_reason_ = kNoDeoptReason;
deopt_position_ = SourcePosition::Unknown();
+ deopt_id_ = kNoDeoptimizationId;
}
void FillFunctionInfo(SharedFunctionInfo* shared);
- void set_inlined_function_infos(
- const std::vector<InlinedFunctionInfo>& infos) {
- inlined_function_infos_ = infos;
- }
- const std::vector<InlinedFunctionInfo> inlined_function_infos() {
- return inlined_function_infos_;
- }
-
void SetBuiltinId(Builtins::Name id);
Builtins::Name builtin_id() const {
return BuiltinIdField::decode(bit_field_);
@@ -102,17 +101,60 @@ class CodeEntry {
void AddInlineStack(int pc_offset, std::vector<CodeEntry*>& inline_stack);
const std::vector<CodeEntry*>* GetInlineStack(int pc_offset) const;
+ void AddDeoptInlinedFrames(int deopt_id, std::vector<DeoptInlinedFrame>&);
+ bool HasDeoptInlinedFramesFor(int deopt_id) const;
+
Address instruction_start() const { return instruction_start_; }
- Logger::LogEventsAndTags tag() const { return TagField::decode(bit_field_); }
+ CodeEventListener::LogEventsAndTags tag() const {
+ return TagField::decode(bit_field_);
+ }
static const char* const kEmptyNamePrefix;
static const char* const kEmptyResourceName;
static const char* const kEmptyBailoutReason;
static const char* const kNoDeoptReason;
+ static const char* const kProgramEntryName;
+ static const char* const kIdleEntryName;
+ static const char* const kGarbageCollectorEntryName;
+ // Used to represent frames for which we have no reliable way to
+ // detect function.
+ static const char* const kUnresolvedFunctionName;
+
+ V8_INLINE static CodeEntry* program_entry() {
+ return kProgramEntry.Pointer();
+ }
+ V8_INLINE static CodeEntry* idle_entry() { return kIdleEntry.Pointer(); }
+ V8_INLINE static CodeEntry* gc_entry() { return kGCEntry.Pointer(); }
+ V8_INLINE static CodeEntry* unresolved_entry() {
+ return kUnresolvedEntry.Pointer();
+ }
+
private:
+ struct ProgramEntryCreateTrait {
+ static CodeEntry* Create();
+ };
+ struct IdleEntryCreateTrait {
+ static CodeEntry* Create();
+ };
+ struct GCEntryCreateTrait {
+ static CodeEntry* Create();
+ };
+ struct UnresolvedEntryCreateTrait {
+ static CodeEntry* Create();
+ };
+
+ static base::LazyDynamicInstance<CodeEntry, ProgramEntryCreateTrait>::type
+ kProgramEntry;
+ static base::LazyDynamicInstance<CodeEntry, IdleEntryCreateTrait>::type
+ kIdleEntry;
+ static base::LazyDynamicInstance<CodeEntry, GCEntryCreateTrait>::type
+ kGCEntry;
+ static base::LazyDynamicInstance<CodeEntry, UnresolvedEntryCreateTrait>::type
+ kUnresolvedEntry;
+
class TagField : public BitField<Logger::LogEventsAndTags, 0, 8> {};
- class BuiltinIdField : public BitField<Builtins::Name, 8, 8> {};
+ class BuiltinIdField : public BitField<Builtins::Name, 8, 24> {};
uint32_t bit_field_;
const char* name_prefix_;
@@ -125,13 +167,12 @@ class CodeEntry {
const char* bailout_reason_;
const char* deopt_reason_;
SourcePosition deopt_position_;
- size_t pc_offset_;
+ int deopt_id_;
JITLineInfoTable* line_info_;
Address instruction_start_;
// Should be an unordered_map, but it doesn't currently work on Win & MacOS.
std::map<int, std::vector<CodeEntry*>> inline_locations_;
-
- std::vector<InlinedFunctionInfo> inlined_function_infos_;
+ std::map<int, std::vector<DeoptInlinedFrame>> deopt_inlined_frames_;
DISALLOW_COPY_AND_ASSIGN(CodeEntry);
};
@@ -179,10 +220,10 @@ class ProfileNode {
CodeEntry* entry_;
unsigned self_ticks_;
// Mapping from CodeEntry* to ProfileNode*
- HashMap children_;
+ base::HashMap children_;
List<ProfileNode*> children_list_;
unsigned id_;
- HashMap line_ticks_;
+ base::HashMap line_ticks_;
std::vector<CpuProfileDeoptInfo> deopt_infos_;
@@ -219,7 +260,7 @@ class ProfileTree {
Isolate* isolate_;
unsigned next_function_id_;
- HashMap function_ids_;
+ base::HashMap function_ids_;
DISALLOW_COPY_AND_ASSIGN(ProfileTree);
};
@@ -227,7 +268,7 @@ class ProfileTree {
class CpuProfile {
public:
- CpuProfile(Isolate* isolate, const char* title, bool record_samples);
+ CpuProfile(CpuProfiler* profiler, const char* title, bool record_samples);
// Add pc -> ... -> main() call path to the profile.
void AddPath(base::TimeTicks timestamp, const std::vector<CodeEntry*>& path,
@@ -245,6 +286,7 @@ class CpuProfile {
base::TimeTicks start_time() const { return start_time_; }
base::TimeTicks end_time() const { return end_time_; }
+ CpuProfiler* cpu_profiler() const { return profiler_; }
void UpdateTicksScale();
@@ -258,20 +300,18 @@ class CpuProfile {
List<ProfileNode*> samples_;
List<base::TimeTicks> timestamps_;
ProfileTree top_down_;
+ CpuProfiler* const profiler_;
DISALLOW_COPY_AND_ASSIGN(CpuProfile);
};
-
class CodeMap {
public:
CodeMap() {}
- ~CodeMap();
+
void AddCode(Address addr, CodeEntry* entry, unsigned size);
void MoveCode(Address from, Address to);
CodeEntry* FindEntry(Address addr);
- int GetSharedId(Address addr);
-
void Print();
private:
@@ -282,61 +322,26 @@ class CodeMap {
unsigned size;
};
- struct CodeTreeConfig {
- typedef Address Key;
- typedef CodeEntryInfo Value;
- static const Key kNoKey;
- static const Value NoValue() { return CodeEntryInfo(NULL, 0); }
- static int Compare(const Key& a, const Key& b) {
- return a < b ? -1 : (a > b ? 1 : 0);
- }
- };
- typedef SplayTree<CodeTreeConfig> CodeTree;
-
- class CodeTreePrinter {
- public:
- void Call(const Address& key, const CodeEntryInfo& value);
- };
-
void DeleteAllCoveredCode(Address start, Address end);
- CodeTree tree_;
+ std::map<Address, CodeEntryInfo> code_map_;
DISALLOW_COPY_AND_ASSIGN(CodeMap);
};
-
class CpuProfilesCollection {
public:
- explicit CpuProfilesCollection(Heap* heap);
+ explicit CpuProfilesCollection(Isolate* isolate);
~CpuProfilesCollection();
+ void set_cpu_profiler(CpuProfiler* profiler) { profiler_ = profiler; }
bool StartProfiling(const char* title, bool record_samples);
CpuProfile* StopProfiling(const char* title);
List<CpuProfile*>* profiles() { return &finished_profiles_; }
- const char* GetName(Name* name) {
- return function_and_resource_names_.GetName(name);
- }
- const char* GetName(int args_count) {
- return function_and_resource_names_.GetName(args_count);
- }
- const char* GetFunctionName(Name* name) {
- return function_and_resource_names_.GetFunctionName(name);
- }
- const char* GetFunctionName(const char* name) {
- return function_and_resource_names_.GetFunctionName(name);
- }
+ const char* GetName(Name* name) { return resource_names_.GetName(name); }
bool IsLastProfile(const char* title);
void RemoveProfile(CpuProfile* profile);
- CodeEntry* NewCodeEntry(
- Logger::LogEventsAndTags tag, const char* name,
- const char* name_prefix = CodeEntry::kEmptyNamePrefix,
- const char* resource_name = CodeEntry::kEmptyResourceName,
- int line_number = v8::CpuProfileNode::kNoLineNumberInfo,
- int column_number = v8::CpuProfileNode::kNoColumnNumberInfo,
- JITLineInfoTable* line_info = NULL, Address instruction_start = NULL);
-
// Called from profile generator thread.
void AddPathToCurrentProfiles(base::TimeTicks timestamp,
const std::vector<CodeEntry*>& path,
@@ -346,11 +351,9 @@ class CpuProfilesCollection {
static const int kMaxSimultaneousProfiles = 100;
private:
- StringsStorage function_and_resource_names_;
- List<CodeEntry*> code_entries_;
+ StringsStorage resource_names_;
List<CpuProfile*> finished_profiles_;
-
- Isolate* isolate_;
+ CpuProfiler* profiler_;
// Accessed by VM thread and profile generator thread.
List<CpuProfile*> current_profiles_;
@@ -368,22 +371,11 @@ class ProfileGenerator {
CodeMap* code_map() { return &code_map_; }
- static const char* const kProgramEntryName;
- static const char* const kIdleEntryName;
- static const char* const kGarbageCollectorEntryName;
- // Used to represent frames for which we have no reliable way to
- // detect function.
- static const char* const kUnresolvedFunctionName;
-
private:
CodeEntry* EntryForVMState(StateTag tag);
CpuProfilesCollection* profiles_;
CodeMap code_map_;
- CodeEntry* program_entry_;
- CodeEntry* idle_entry_;
- CodeEntry* gc_entry_;
- CodeEntry* unresolved_entry_;
DISALLOW_COPY_AND_ASSIGN(ProfileGenerator);
};
diff --git a/deps/v8/src/profiler/profiler-listener.cc b/deps/v8/src/profiler/profiler-listener.cc
new file mode 100644
index 0000000000..7ce874e6c1
--- /dev/null
+++ b/deps/v8/src/profiler/profiler-listener.cc
@@ -0,0 +1,335 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/profiler/profiler-listener.h"
+
+#include "src/deoptimizer.h"
+#include "src/profiler/cpu-profiler.h"
+#include "src/profiler/profile-generator-inl.h"
+#include "src/source-position-table.h"
+
+namespace v8 {
+namespace internal {
+
+ProfilerListener::ProfilerListener(Isolate* isolate)
+ : function_and_resource_names_(isolate->heap()) {}
+
+ProfilerListener::~ProfilerListener() {
+ for (auto code_entry : code_entries_) {
+ delete code_entry;
+ }
+}
+
+void ProfilerListener::CallbackEvent(Name* name, Address entry_point) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = entry_point;
+ rec->entry = NewCodeEntry(CodeEventListener::CALLBACK_TAG, GetName(name));
+ rec->size = 1;
+ DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ AbstractCode* code, const char* name) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = code->address();
+ rec->entry = NewCodeEntry(
+ tag, GetFunctionName(name), CodeEntry::kEmptyNamePrefix,
+ CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
+ CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
+ RecordInliningInfo(rec->entry, code);
+ rec->size = code->ExecutableSize();
+ DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ AbstractCode* code, Name* name) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = code->address();
+ rec->entry = NewCodeEntry(
+ tag, GetFunctionName(name), CodeEntry::kEmptyNamePrefix,
+ CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
+ CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
+ RecordInliningInfo(rec->entry, code);
+ rec->size = code->ExecutableSize();
+ DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ AbstractCode* code,
+ SharedFunctionInfo* shared,
+ Name* script_name) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = code->address();
+ rec->entry = NewCodeEntry(
+ tag, GetFunctionName(shared->DebugName()), CodeEntry::kEmptyNamePrefix,
+ GetName(InferScriptName(script_name, shared)),
+ CpuProfileNode::kNoLineNumberInfo, CpuProfileNode::kNoColumnNumberInfo,
+ NULL, code->instruction_start());
+ RecordInliningInfo(rec->entry, code);
+ rec->entry->FillFunctionInfo(shared);
+ rec->size = code->ExecutableSize();
+ DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ AbstractCode* abstract_code,
+ SharedFunctionInfo* shared,
+ Name* script_name, int line,
+ int column) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = abstract_code->address();
+ Script* script = Script::cast(shared->script());
+ JITLineInfoTable* line_table = NULL;
+ if (script) {
+ line_table = new JITLineInfoTable();
+ int offset = abstract_code->IsCode() ? Code::kHeaderSize
+ : BytecodeArray::kHeaderSize;
+ int start_position = shared->start_position();
+ int end_position = shared->end_position();
+ for (SourcePositionTableIterator it(abstract_code->source_position_table());
+ !it.done(); it.Advance()) {
+ int position = it.source_position();
+ // TODO(alph): in case of inlining the position may correspond to an
+ // inlined function source code. Do not collect positions that fall
+ // beyond the function source code. There's however a chance the
+ // inlined function has similar positions but in another script. So
+ // the proper fix is to store script_id in some form along with the
+ // inlined function positions.
+ if (position < start_position || position >= end_position) continue;
+ int line_number = script->GetLineNumber(position) + 1;
+ int pc_offset = it.code_offset() + offset;
+ line_table->SetPosition(pc_offset, line_number);
+ }
+ }
+ rec->entry = NewCodeEntry(
+ tag, GetFunctionName(shared->DebugName()), CodeEntry::kEmptyNamePrefix,
+ GetName(InferScriptName(script_name, shared)), line, column, line_table,
+ abstract_code->instruction_start());
+ RecordInliningInfo(rec->entry, abstract_code);
+ RecordDeoptInlinedFrames(rec->entry, abstract_code);
+ rec->entry->FillFunctionInfo(shared);
+ rec->size = abstract_code->ExecutableSize();
+ DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ AbstractCode* code, int args_count) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = code->address();
+ rec->entry = NewCodeEntry(
+ tag, GetName(args_count), "args_count: ", CodeEntry::kEmptyResourceName,
+ CpuProfileNode::kNoLineNumberInfo, CpuProfileNode::kNoColumnNumberInfo,
+ NULL, code->instruction_start());
+ RecordInliningInfo(rec->entry, code);
+ rec->size = code->ExecutableSize();
+ DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::CodeMoveEvent(AbstractCode* from, Address to) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_MOVE);
+ CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
+ rec->from = from->address();
+ rec->to = to;
+ DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::CodeDisableOptEvent(AbstractCode* code,
+ SharedFunctionInfo* shared) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_DISABLE_OPT);
+ CodeDisableOptEventRecord* rec = &evt_rec.CodeDisableOptEventRecord_;
+ rec->start = code->address();
+ rec->bailout_reason = GetBailoutReason(shared->disable_optimization_reason());
+ DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::CodeDeoptEvent(Code* code, Address pc,
+ int fp_to_sp_delta) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_DEOPT);
+ CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
+ Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(code, pc);
+ rec->start = code->address();
+ rec->deopt_reason = DeoptimizeReasonToString(info.deopt_reason);
+ rec->position = info.position;
+ rec->deopt_id = info.deopt_id;
+ rec->pc = reinterpret_cast<void*>(pc);
+ rec->fp_to_sp_delta = fp_to_sp_delta;
+ DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::GetterCallbackEvent(Name* name, Address entry_point) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = entry_point;
+ rec->entry =
+ NewCodeEntry(CodeEventListener::CALLBACK_TAG, GetName(name), "get ");
+ rec->size = 1;
+ DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::RegExpCodeCreateEvent(AbstractCode* code,
+ String* source) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = code->address();
+ rec->entry = NewCodeEntry(
+ CodeEventListener::REG_EXP_TAG, GetName(source), "RegExp: ",
+ CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
+ CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
+ rec->size = code->ExecutableSize();
+ DispatchCodeEvent(evt_rec);
+}
+
+void ProfilerListener::SetterCallbackEvent(Name* name, Address entry_point) {
+ CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
+ CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
+ rec->start = entry_point;
+ rec->entry =
+ NewCodeEntry(CodeEventListener::CALLBACK_TAG, GetName(name), "set ");
+ rec->size = 1;
+ DispatchCodeEvent(evt_rec);
+}
+
+Name* ProfilerListener::InferScriptName(Name* name, SharedFunctionInfo* info) {
+ if (name->IsString() && String::cast(name)->length()) return name;
+ if (!info->script()->IsScript()) return name;
+ Object* source_url = Script::cast(info->script())->source_url();
+ return source_url->IsName() ? Name::cast(source_url) : name;
+}
+
+void ProfilerListener::RecordInliningInfo(CodeEntry* entry,
+ AbstractCode* abstract_code) {
+ if (!abstract_code->IsCode()) return;
+ Code* code = abstract_code->GetCode();
+ if (code->kind() != Code::OPTIMIZED_FUNCTION) return;
+ DeoptimizationInputData* deopt_input_data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int deopt_count = deopt_input_data->DeoptCount();
+ for (int i = 0; i < deopt_count; i++) {
+ int pc_offset = deopt_input_data->Pc(i)->value();
+ if (pc_offset == -1) continue;
+ int translation_index = deopt_input_data->TranslationIndex(i)->value();
+ TranslationIterator it(deopt_input_data->TranslationByteArray(),
+ translation_index);
+ Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
+ DCHECK_EQ(Translation::BEGIN, opcode);
+ it.Skip(Translation::NumberOfOperandsFor(opcode));
+ int depth = 0;
+ std::vector<CodeEntry*> inline_stack;
+ while (it.HasNext() &&
+ Translation::BEGIN !=
+ (opcode = static_cast<Translation::Opcode>(it.Next()))) {
+ if (opcode != Translation::JS_FRAME &&
+ opcode != Translation::INTERPRETED_FRAME) {
+ it.Skip(Translation::NumberOfOperandsFor(opcode));
+ continue;
+ }
+ it.Next(); // Skip ast_id
+ int shared_info_id = it.Next();
+ it.Next(); // Skip height
+ SharedFunctionInfo* shared_info = SharedFunctionInfo::cast(
+ deopt_input_data->LiteralArray()->get(shared_info_id));
+ if (!depth++) continue; // Skip the current function itself.
+ CodeEntry* inline_entry = new CodeEntry(
+ entry->tag(), GetFunctionName(shared_info->DebugName()),
+ CodeEntry::kEmptyNamePrefix, entry->resource_name(),
+ CpuProfileNode::kNoLineNumberInfo,
+ CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
+ inline_entry->FillFunctionInfo(shared_info);
+ inline_stack.push_back(inline_entry);
+ }
+ if (!inline_stack.empty()) {
+ entry->AddInlineStack(pc_offset, inline_stack);
+ DCHECK(inline_stack.empty());
+ }
+ }
+}
+
+void ProfilerListener::RecordDeoptInlinedFrames(CodeEntry* entry,
+ AbstractCode* abstract_code) {
+ if (abstract_code->kind() != AbstractCode::OPTIMIZED_FUNCTION) return;
+ Code* code = abstract_code->GetCode();
+ DeoptimizationInputData* deopt_input_data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int const mask = RelocInfo::ModeMask(RelocInfo::DEOPT_ID);
+ for (RelocIterator rit(code, mask); !rit.done(); rit.next()) {
+ RelocInfo* reloc_info = rit.rinfo();
+ DCHECK(RelocInfo::IsDeoptId(reloc_info->rmode()));
+ int deopt_id = static_cast<int>(reloc_info->data());
+ int translation_index =
+ deopt_input_data->TranslationIndex(deopt_id)->value();
+ TranslationIterator it(deopt_input_data->TranslationByteArray(),
+ translation_index);
+ Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
+ DCHECK_EQ(Translation::BEGIN, opcode);
+ it.Skip(Translation::NumberOfOperandsFor(opcode));
+ std::vector<CodeEntry::DeoptInlinedFrame> inlined_frames;
+ while (it.HasNext() &&
+ Translation::BEGIN !=
+ (opcode = static_cast<Translation::Opcode>(it.Next()))) {
+ if (opcode != Translation::JS_FRAME &&
+ opcode != Translation::INTERPRETED_FRAME) {
+ it.Skip(Translation::NumberOfOperandsFor(opcode));
+ continue;
+ }
+ BailoutId ast_id = BailoutId(it.Next());
+ int shared_info_id = it.Next();
+ it.Next(); // Skip height
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(
+ deopt_input_data->LiteralArray()->get(shared_info_id));
+ int source_position;
+ if (opcode == Translation::INTERPRETED_FRAME) {
+ source_position =
+ Deoptimizer::ComputeSourcePositionFromBytecodeArray(shared, ast_id);
+ } else {
+ DCHECK(opcode == Translation::JS_FRAME);
+ source_position =
+ Deoptimizer::ComputeSourcePositionFromBaselineCode(shared, ast_id);
+ }
+ int script_id = v8::UnboundScript::kNoScriptId;
+ if (shared->script()->IsScript()) {
+ Script* script = Script::cast(shared->script());
+ script_id = script->id();
+ }
+ CodeEntry::DeoptInlinedFrame frame = {source_position, script_id};
+ inlined_frames.push_back(frame);
+ }
+ if (!inlined_frames.empty() && !entry->HasDeoptInlinedFramesFor(deopt_id)) {
+ entry->AddDeoptInlinedFrames(deopt_id, inlined_frames);
+ DCHECK(inlined_frames.empty());
+ }
+ }
+}
+
+CodeEntry* ProfilerListener::NewCodeEntry(
+ CodeEventListener::LogEventsAndTags tag, const char* name,
+ const char* name_prefix, const char* resource_name, int line_number,
+ int column_number, JITLineInfoTable* line_info, Address instruction_start) {
+ CodeEntry* code_entry =
+ new CodeEntry(tag, name, name_prefix, resource_name, line_number,
+ column_number, line_info, instruction_start);
+ code_entries_.push_back(code_entry);
+ return code_entry;
+}
+
+void ProfilerListener::AddObserver(CodeEventObserver* observer) {
+ if (std::find(observers_.begin(), observers_.end(), observer) !=
+ observers_.end())
+ return;
+ observers_.push_back(observer);
+}
+
+void ProfilerListener::RemoveObserver(CodeEventObserver* observer) {
+ auto it = std::find(observers_.begin(), observers_.end(), observer);
+ if (it == observers_.end()) return;
+ observers_.erase(it);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/profiler/profiler-listener.h b/deps/v8/src/profiler/profiler-listener.h
new file mode 100644
index 0000000000..7e24ceaa86
--- /dev/null
+++ b/deps/v8/src/profiler/profiler-listener.h
@@ -0,0 +1,97 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PROFILER_PROFILER_LISTENER_H_
+#define V8_PROFILER_PROFILER_LISTENER_H_
+
+#include <vector>
+
+#include "src/code-events.h"
+#include "src/profiler/profile-generator.h"
+
+namespace v8 {
+namespace internal {
+
+class CodeEventsContainer;
+
+class CodeEventObserver {
+ public:
+ virtual void CodeEventHandler(const CodeEventsContainer& evt_rec) = 0;
+ virtual ~CodeEventObserver() {}
+};
+
+class ProfilerListener : public CodeEventListener {
+ public:
+ explicit ProfilerListener(Isolate* isolate);
+ ~ProfilerListener() override;
+
+ void CallbackEvent(Name* name, Address entry_point) override;
+ void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ AbstractCode* code, const char* comment) override;
+ void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ AbstractCode* code, Name* name) override;
+ void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ AbstractCode* code, SharedFunctionInfo* shared,
+ Name* script_name) override;
+ void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ AbstractCode* code, SharedFunctionInfo* shared,
+ Name* script_name, int line, int column) override;
+ void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
+ AbstractCode* code, int args_count) override;
+ void CodeMovingGCEvent() override {}
+ void CodeMoveEvent(AbstractCode* from, Address to) override;
+ void CodeDisableOptEvent(AbstractCode* code,
+ SharedFunctionInfo* shared) override;
+ void CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta) override;
+ void GetterCallbackEvent(Name* name, Address entry_point) override;
+ void RegExpCodeCreateEvent(AbstractCode* code, String* source) override;
+ void SetterCallbackEvent(Name* name, Address entry_point) override;
+ void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
+
+ CodeEntry* NewCodeEntry(
+ CodeEventListener::LogEventsAndTags tag, const char* name,
+ const char* name_prefix = CodeEntry::kEmptyNamePrefix,
+ const char* resource_name = CodeEntry::kEmptyResourceName,
+ int line_number = v8::CpuProfileNode::kNoLineNumberInfo,
+ int column_number = v8::CpuProfileNode::kNoColumnNumberInfo,
+ JITLineInfoTable* line_info = NULL, Address instruction_start = NULL);
+
+ void AddObserver(CodeEventObserver* observer);
+ void RemoveObserver(CodeEventObserver* observer);
+ V8_INLINE bool HasObservers() { return !observers_.empty(); }
+
+ const char* GetName(Name* name) {
+ return function_and_resource_names_.GetName(name);
+ }
+ const char* GetName(int args_count) {
+ return function_and_resource_names_.GetName(args_count);
+ }
+ const char* GetFunctionName(Name* name) {
+ return function_and_resource_names_.GetFunctionName(name);
+ }
+ const char* GetFunctionName(const char* name) {
+ return function_and_resource_names_.GetFunctionName(name);
+ }
+
+ private:
+ void RecordInliningInfo(CodeEntry* entry, AbstractCode* abstract_code);
+ void RecordDeoptInlinedFrames(CodeEntry* entry, AbstractCode* abstract_code);
+ Name* InferScriptName(Name* name, SharedFunctionInfo* info);
+ V8_INLINE void DispatchCodeEvent(const CodeEventsContainer& evt_rec) {
+ for (auto observer : observers_) {
+ observer->CodeEventHandler(evt_rec);
+ }
+ }
+
+ StringsStorage function_and_resource_names_;
+ std::vector<CodeEntry*> code_entries_;
+ std::vector<CodeEventObserver*> observers_;
+
+ DISALLOW_COPY_AND_ASSIGN(ProfilerListener);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PROFILER_PROFILER_LISTENER_H_
diff --git a/deps/v8/src/profiler/sampler.cc b/deps/v8/src/profiler/sampler.cc
deleted file mode 100644
index a34042453c..0000000000
--- a/deps/v8/src/profiler/sampler.cc
+++ /dev/null
@@ -1,898 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/profiler/sampler.h"
-
-#if V8_OS_POSIX && !V8_OS_CYGWIN
-
-#define USE_SIGNALS
-
-#include <errno.h>
-#include <pthread.h>
-#include <signal.h>
-#include <sys/time.h>
-
-#if !V8_OS_QNX && !V8_OS_NACL && !V8_OS_AIX
-#include <sys/syscall.h> // NOLINT
-#endif
-
-#if V8_OS_MACOSX
-#include <mach/mach.h>
-// OpenBSD doesn't have <ucontext.h>. ucontext_t lives in <signal.h>
-// and is a typedef for struct sigcontext. There is no uc_mcontext.
-#elif(!V8_OS_ANDROID || defined(__BIONIC_HAVE_UCONTEXT_T)) && \
- !V8_OS_OPENBSD && !V8_OS_NACL
-#include <ucontext.h>
-#endif
-
-#include <unistd.h>
-
-// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
-// Old versions of the C library <signal.h> didn't define the type.
-#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
- (defined(__arm__) || defined(__aarch64__)) && \
- !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
-#include <asm/sigcontext.h> // NOLINT
-#endif
-
-#elif V8_OS_WIN || V8_OS_CYGWIN
-
-#include "src/base/win32-headers.h"
-
-#endif
-
-#include "src/base/platform/platform.h"
-#include "src/flags.h"
-#include "src/frames-inl.h"
-#include "src/log.h"
-#include "src/profiler/cpu-profiler-inl.h"
-#include "src/simulator.h"
-#include "src/v8threads.h"
-#include "src/vm-state-inl.h"
-
-
-#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
-
-// Not all versions of Android's C library provide ucontext_t.
-// Detect this and provide custom but compatible definitions. Note that these
-// follow the GLibc naming convention to access register values from
-// mcontext_t.
-//
-// See http://code.google.com/p/android/issues/detail?id=34784
-
-#if defined(__arm__)
-
-typedef struct sigcontext mcontext_t;
-
-typedef struct ucontext {
- uint32_t uc_flags;
- struct ucontext* uc_link;
- stack_t uc_stack;
- mcontext_t uc_mcontext;
- // Other fields are not used by V8, don't define them here.
-} ucontext_t;
-
-#elif defined(__aarch64__)
-
-typedef struct sigcontext mcontext_t;
-
-typedef struct ucontext {
- uint64_t uc_flags;
- struct ucontext *uc_link;
- stack_t uc_stack;
- mcontext_t uc_mcontext;
- // Other fields are not used by V8, don't define them here.
-} ucontext_t;
-
-#elif defined(__mips__)
-// MIPS version of sigcontext, for Android bionic.
-typedef struct {
- uint32_t regmask;
- uint32_t status;
- uint64_t pc;
- uint64_t gregs[32];
- uint64_t fpregs[32];
- uint32_t acx;
- uint32_t fpc_csr;
- uint32_t fpc_eir;
- uint32_t used_math;
- uint32_t dsp;
- uint64_t mdhi;
- uint64_t mdlo;
- uint32_t hi1;
- uint32_t lo1;
- uint32_t hi2;
- uint32_t lo2;
- uint32_t hi3;
- uint32_t lo3;
-} mcontext_t;
-
-typedef struct ucontext {
- uint32_t uc_flags;
- struct ucontext* uc_link;
- stack_t uc_stack;
- mcontext_t uc_mcontext;
- // Other fields are not used by V8, don't define them here.
-} ucontext_t;
-
-#elif defined(__i386__)
-// x86 version for Android.
-typedef struct {
- uint32_t gregs[19];
- void* fpregs;
- uint32_t oldmask;
- uint32_t cr2;
-} mcontext_t;
-
-typedef uint32_t kernel_sigset_t[2]; // x86 kernel uses 64-bit signal masks
-typedef struct ucontext {
- uint32_t uc_flags;
- struct ucontext* uc_link;
- stack_t uc_stack;
- mcontext_t uc_mcontext;
- // Other fields are not used by V8, don't define them here.
-} ucontext_t;
-enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
-
-#elif defined(__x86_64__)
-// x64 version for Android.
-typedef struct {
- uint64_t gregs[23];
- void* fpregs;
- uint64_t __reserved1[8];
-} mcontext_t;
-
-typedef struct ucontext {
- uint64_t uc_flags;
- struct ucontext *uc_link;
- stack_t uc_stack;
- mcontext_t uc_mcontext;
- // Other fields are not used by V8, don't define them here.
-} ucontext_t;
-enum { REG_RBP = 10, REG_RSP = 15, REG_RIP = 16 };
-#endif
-
-#endif // V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
-
-
-namespace v8 {
-namespace internal {
-
-namespace {
-
-class PlatformDataCommon : public Malloced {
- public:
- PlatformDataCommon() : profiled_thread_id_(ThreadId::Current()) {}
- ThreadId profiled_thread_id() { return profiled_thread_id_; }
-
- protected:
- ~PlatformDataCommon() {}
-
- private:
- ThreadId profiled_thread_id_;
-};
-
-
-bool IsSamePage(byte* ptr1, byte* ptr2) {
- const uint32_t kPageSize = 4096;
- uintptr_t mask = ~static_cast<uintptr_t>(kPageSize - 1);
- return (reinterpret_cast<uintptr_t>(ptr1) & mask) ==
- (reinterpret_cast<uintptr_t>(ptr2) & mask);
-}
-
-
-// Check if the code at specified address could potentially be a
-// frame setup code.
-bool IsNoFrameRegion(Address address) {
- struct Pattern {
- int bytes_count;
- byte bytes[8];
- int offsets[4];
- };
- byte* pc = reinterpret_cast<byte*>(address);
- static Pattern patterns[] = {
-#if V8_HOST_ARCH_IA32
- // push %ebp
- // mov %esp,%ebp
- {3, {0x55, 0x89, 0xe5}, {0, 1, -1}},
- // pop %ebp
- // ret N
- {2, {0x5d, 0xc2}, {0, 1, -1}},
- // pop %ebp
- // ret
- {2, {0x5d, 0xc3}, {0, 1, -1}},
-#elif V8_HOST_ARCH_X64
- // pushq %rbp
- // movq %rsp,%rbp
- {4, {0x55, 0x48, 0x89, 0xe5}, {0, 1, -1}},
- // popq %rbp
- // ret N
- {2, {0x5d, 0xc2}, {0, 1, -1}},
- // popq %rbp
- // ret
- {2, {0x5d, 0xc3}, {0, 1, -1}},
-#endif
- {0, {}, {}}
- };
- for (Pattern* pattern = patterns; pattern->bytes_count; ++pattern) {
- for (int* offset_ptr = pattern->offsets; *offset_ptr != -1; ++offset_ptr) {
- int offset = *offset_ptr;
- if (!offset || IsSamePage(pc, pc - offset)) {
- MSAN_MEMORY_IS_INITIALIZED(pc - offset, pattern->bytes_count);
- if (!memcmp(pc - offset, pattern->bytes, pattern->bytes_count))
- return true;
- } else {
- // It is not safe to examine bytes on another page as it might not be
- // allocated thus causing a SEGFAULT.
- // Check the pattern part that's on the same page and
- // pessimistically assume it could be the entire pattern match.
- MSAN_MEMORY_IS_INITIALIZED(pc, pattern->bytes_count - offset);
- if (!memcmp(pc, pattern->bytes + offset, pattern->bytes_count - offset))
- return true;
- }
- }
- }
- return false;
-}
-
-} // namespace
-
-#if defined(USE_SIGNALS)
-
-class Sampler::PlatformData : public PlatformDataCommon {
- public:
- PlatformData() : vm_tid_(pthread_self()) {}
- pthread_t vm_tid() const { return vm_tid_; }
-
- private:
- pthread_t vm_tid_;
-};
-
-#elif V8_OS_WIN || V8_OS_CYGWIN
-
-// ----------------------------------------------------------------------------
-// Win32 profiler support. On Cygwin we use the same sampler implementation as
-// on Win32.
-
-class Sampler::PlatformData : public PlatformDataCommon {
- public:
- // Get a handle to the calling thread. This is the thread that we are
- // going to profile. We need to make a copy of the handle because we are
- // going to use it in the sampler thread. Using GetThreadHandle() will
- // not work in this case. We're using OpenThread because DuplicateHandle
- // for some reason doesn't work in Chrome's sandbox.
- PlatformData()
- : profiled_thread_(OpenThread(THREAD_GET_CONTEXT |
- THREAD_SUSPEND_RESUME |
- THREAD_QUERY_INFORMATION,
- false,
- GetCurrentThreadId())) {}
-
- ~PlatformData() {
- if (profiled_thread_ != NULL) {
- CloseHandle(profiled_thread_);
- profiled_thread_ = NULL;
- }
- }
-
- HANDLE profiled_thread() { return profiled_thread_; }
-
- private:
- HANDLE profiled_thread_;
-};
-#endif
-
-
-#if defined(USE_SIMULATOR)
-class SimulatorHelper {
- public:
- inline bool Init(Isolate* isolate) {
- simulator_ = isolate->thread_local_top()->simulator_;
- // Check if there is active simulator.
- return simulator_ != NULL;
- }
-
- inline void FillRegisters(v8::RegisterState* state) {
-#if V8_TARGET_ARCH_ARM
- if (!simulator_->has_bad_pc()) {
- state->pc = reinterpret_cast<Address>(simulator_->get_pc());
- }
- state->sp = reinterpret_cast<Address>(simulator_->get_register(
- Simulator::sp));
- state->fp = reinterpret_cast<Address>(simulator_->get_register(
- Simulator::r11));
-#elif V8_TARGET_ARCH_ARM64
- if (simulator_->sp() == 0 || simulator_->fp() == 0) {
- // It's possible that the simulator is interrupted while it is updating
- // the sp or fp register. ARM64 simulator does this in two steps:
- // first setting it to zero and then setting it to a new value.
- // Bailout if sp/fp doesn't contain the new value.
- //
- // FIXME: The above doesn't really solve the issue.
- // If a 64-bit target is executed on a 32-bit host even the final
- // write is non-atomic, so it might obtain a half of the result.
- // Moreover as long as the register set code uses memcpy (as of now),
- // it is not guaranteed to be atomic even when both host and target
- // are of same bitness.
- return;
- }
- state->pc = reinterpret_cast<Address>(simulator_->pc());
- state->sp = reinterpret_cast<Address>(simulator_->sp());
- state->fp = reinterpret_cast<Address>(simulator_->fp());
-#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
- if (!simulator_->has_bad_pc()) {
- state->pc = reinterpret_cast<Address>(simulator_->get_pc());
- }
- state->sp = reinterpret_cast<Address>(simulator_->get_register(
- Simulator::sp));
- state->fp = reinterpret_cast<Address>(simulator_->get_register(
- Simulator::fp));
-#elif V8_TARGET_ARCH_PPC
- if (!simulator_->has_bad_pc()) {
- state->pc = reinterpret_cast<Address>(simulator_->get_pc());
- }
- state->sp =
- reinterpret_cast<Address>(simulator_->get_register(Simulator::sp));
- state->fp =
- reinterpret_cast<Address>(simulator_->get_register(Simulator::fp));
-#elif V8_TARGET_ARCH_S390
- if (!simulator_->has_bad_pc()) {
- state->pc = reinterpret_cast<Address>(simulator_->get_pc());
- }
- state->sp =
- reinterpret_cast<Address>(simulator_->get_register(Simulator::sp));
- state->fp =
- reinterpret_cast<Address>(simulator_->get_register(Simulator::fp));
-#endif
- }
-
- private:
- Simulator* simulator_;
-};
-#endif // USE_SIMULATOR
-
-
-#if defined(USE_SIGNALS)
-
-class SignalHandler : public AllStatic {
- public:
- static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); }
- static void TearDown() { delete mutex_; mutex_ = NULL; }
-
- static void IncreaseSamplerCount() {
- base::LockGuard<base::Mutex> lock_guard(mutex_);
- if (++client_count_ == 1) Install();
- }
-
- static void DecreaseSamplerCount() {
- base::LockGuard<base::Mutex> lock_guard(mutex_);
- if (--client_count_ == 0) Restore();
- }
-
- static bool Installed() {
- return signal_handler_installed_;
- }
-
- private:
- static void Install() {
-#if !V8_OS_NACL
- struct sigaction sa;
- sa.sa_sigaction = &HandleProfilerSignal;
- sigemptyset(&sa.sa_mask);
-#if V8_OS_QNX
- sa.sa_flags = SA_SIGINFO;
-#else
- sa.sa_flags = SA_RESTART | SA_SIGINFO;
-#endif
- signal_handler_installed_ =
- (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
-#endif
- }
-
- static void Restore() {
-#if !V8_OS_NACL
- if (signal_handler_installed_) {
- sigaction(SIGPROF, &old_signal_handler_, 0);
- signal_handler_installed_ = false;
- }
-#endif
- }
-
-#if !V8_OS_NACL
- static void HandleProfilerSignal(int signal, siginfo_t* info, void* context);
-#endif
- // Protects the process wide state below.
- static base::Mutex* mutex_;
- static int client_count_;
- static bool signal_handler_installed_;
- static struct sigaction old_signal_handler_;
-};
-
-
-base::Mutex* SignalHandler::mutex_ = NULL;
-int SignalHandler::client_count_ = 0;
-struct sigaction SignalHandler::old_signal_handler_;
-bool SignalHandler::signal_handler_installed_ = false;
-
-
-// As Native Client does not support signal handling, profiling is disabled.
-#if !V8_OS_NACL
-void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
- void* context) {
- USE(info);
- if (signal != SIGPROF) return;
- Isolate* isolate = Isolate::UnsafeCurrent();
- if (isolate == NULL || !isolate->IsInUse()) {
- // We require a fully initialized and entered isolate.
- return;
- }
- if (v8::Locker::IsActive() &&
- !isolate->thread_manager()->IsLockedByCurrentThread()) {
- return;
- }
-
- Sampler* sampler = isolate->logger()->sampler();
- if (sampler == NULL) return;
-
- v8::RegisterState state;
-
-#if defined(USE_SIMULATOR)
- SimulatorHelper helper;
- if (!helper.Init(isolate)) return;
- helper.FillRegisters(&state);
- // It possible that the simulator is interrupted while it is updating
- // the sp or fp register. ARM64 simulator does this in two steps:
- // first setting it to zero and then setting it to the new value.
- // Bailout if sp/fp doesn't contain the new value.
- if (state.sp == 0 || state.fp == 0) return;
-#else
- // Extracting the sample from the context is extremely machine dependent.
- ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
-#if !(V8_OS_OPENBSD || (V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_S390)))
- mcontext_t& mcontext = ucontext->uc_mcontext;
-#endif
-#if V8_OS_LINUX
-#if V8_HOST_ARCH_IA32
- state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
- state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
- state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
-#elif V8_HOST_ARCH_X64
- state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
- state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
- state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
-#elif V8_HOST_ARCH_ARM
-#if V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
- // Old GLibc ARM versions used a gregs[] array to access the register
- // values from mcontext_t.
- state.pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
- state.sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
- state.fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
-#else
- state.pc = reinterpret_cast<Address>(mcontext.arm_pc);
- state.sp = reinterpret_cast<Address>(mcontext.arm_sp);
- state.fp = reinterpret_cast<Address>(mcontext.arm_fp);
-#endif // V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
-#elif V8_HOST_ARCH_ARM64
- state.pc = reinterpret_cast<Address>(mcontext.pc);
- state.sp = reinterpret_cast<Address>(mcontext.sp);
- // FP is an alias for x29.
- state.fp = reinterpret_cast<Address>(mcontext.regs[29]);
-#elif V8_HOST_ARCH_MIPS
- state.pc = reinterpret_cast<Address>(mcontext.pc);
- state.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
- state.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
-#elif V8_HOST_ARCH_MIPS64
- state.pc = reinterpret_cast<Address>(mcontext.pc);
- state.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
- state.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
-#elif V8_HOST_ARCH_PPC
- state.pc = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->nip);
- state.sp = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->gpr[PT_R1]);
- state.fp = reinterpret_cast<Address>(ucontext->uc_mcontext.regs->gpr[PT_R31]);
-#elif V8_HOST_ARCH_S390
-#if V8_TARGET_ARCH_32_BIT
- // 31-bit target will have bit 0 (MSB) of the PSW set to denote addressing
- // mode. This bit needs to be masked out to resolve actual address.
- state.pc =
- reinterpret_cast<Address>(ucontext->uc_mcontext.psw.addr & 0x7FFFFFFF);
-#else
- state.pc = reinterpret_cast<Address>(ucontext->uc_mcontext.psw.addr);
-#endif // V8_TARGET_ARCH_32_BIT
- state.sp = reinterpret_cast<Address>(ucontext->uc_mcontext.gregs[15]);
- state.fp = reinterpret_cast<Address>(ucontext->uc_mcontext.gregs[11]);
-#endif // V8_HOST_ARCH_*
-#elif V8_OS_MACOSX
-#if V8_HOST_ARCH_X64
-#if __DARWIN_UNIX03
- state.pc = reinterpret_cast<Address>(mcontext->__ss.__rip);
- state.sp = reinterpret_cast<Address>(mcontext->__ss.__rsp);
- state.fp = reinterpret_cast<Address>(mcontext->__ss.__rbp);
-#else // !__DARWIN_UNIX03
- state.pc = reinterpret_cast<Address>(mcontext->ss.rip);
- state.sp = reinterpret_cast<Address>(mcontext->ss.rsp);
- state.fp = reinterpret_cast<Address>(mcontext->ss.rbp);
-#endif // __DARWIN_UNIX03
-#elif V8_HOST_ARCH_IA32
-#if __DARWIN_UNIX03
- state.pc = reinterpret_cast<Address>(mcontext->__ss.__eip);
- state.sp = reinterpret_cast<Address>(mcontext->__ss.__esp);
- state.fp = reinterpret_cast<Address>(mcontext->__ss.__ebp);
-#else // !__DARWIN_UNIX03
- state.pc = reinterpret_cast<Address>(mcontext->ss.eip);
- state.sp = reinterpret_cast<Address>(mcontext->ss.esp);
- state.fp = reinterpret_cast<Address>(mcontext->ss.ebp);
-#endif // __DARWIN_UNIX03
-#endif // V8_HOST_ARCH_IA32
-#elif V8_OS_FREEBSD
-#if V8_HOST_ARCH_IA32
- state.pc = reinterpret_cast<Address>(mcontext.mc_eip);
- state.sp = reinterpret_cast<Address>(mcontext.mc_esp);
- state.fp = reinterpret_cast<Address>(mcontext.mc_ebp);
-#elif V8_HOST_ARCH_X64
- state.pc = reinterpret_cast<Address>(mcontext.mc_rip);
- state.sp = reinterpret_cast<Address>(mcontext.mc_rsp);
- state.fp = reinterpret_cast<Address>(mcontext.mc_rbp);
-#elif V8_HOST_ARCH_ARM
- state.pc = reinterpret_cast<Address>(mcontext.mc_r15);
- state.sp = reinterpret_cast<Address>(mcontext.mc_r13);
- state.fp = reinterpret_cast<Address>(mcontext.mc_r11);
-#endif // V8_HOST_ARCH_*
-#elif V8_OS_NETBSD
-#if V8_HOST_ARCH_IA32
- state.pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_EIP]);
- state.sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_ESP]);
- state.fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_EBP]);
-#elif V8_HOST_ARCH_X64
- state.pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_RIP]);
- state.sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RSP]);
- state.fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RBP]);
-#endif // V8_HOST_ARCH_*
-#elif V8_OS_OPENBSD
-#if V8_HOST_ARCH_IA32
- state.pc = reinterpret_cast<Address>(ucontext->sc_eip);
- state.sp = reinterpret_cast<Address>(ucontext->sc_esp);
- state.fp = reinterpret_cast<Address>(ucontext->sc_ebp);
-#elif V8_HOST_ARCH_X64
- state.pc = reinterpret_cast<Address>(ucontext->sc_rip);
- state.sp = reinterpret_cast<Address>(ucontext->sc_rsp);
- state.fp = reinterpret_cast<Address>(ucontext->sc_rbp);
-#endif // V8_HOST_ARCH_*
-#elif V8_OS_SOLARIS
- state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]);
- state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]);
- state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]);
-#elif V8_OS_QNX
-#if V8_HOST_ARCH_IA32
- state.pc = reinterpret_cast<Address>(mcontext.cpu.eip);
- state.sp = reinterpret_cast<Address>(mcontext.cpu.esp);
- state.fp = reinterpret_cast<Address>(mcontext.cpu.ebp);
-#elif V8_HOST_ARCH_ARM
- state.pc = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_PC]);
- state.sp = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_SP]);
- state.fp = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_FP]);
-#endif // V8_HOST_ARCH_*
-#elif V8_OS_AIX
- state.pc = reinterpret_cast<Address>(mcontext.jmp_context.iar);
- state.sp = reinterpret_cast<Address>(mcontext.jmp_context.gpr[1]);
- state.fp = reinterpret_cast<Address>(mcontext.jmp_context.gpr[31]);
-#endif // V8_OS_AIX
-#endif // USE_SIMULATOR
- sampler->SampleStack(state);
-}
-#endif // V8_OS_NACL
-
-#endif
-
-
-class SamplerThread : public base::Thread {
- public:
- static const int kSamplerThreadStackSize = 64 * KB;
-
- explicit SamplerThread(int interval)
- : Thread(base::Thread::Options("SamplerThread", kSamplerThreadStackSize)),
- interval_(interval) {}
-
- static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); }
- static void TearDown() { delete mutex_; mutex_ = NULL; }
-
- static void AddActiveSampler(Sampler* sampler) {
- bool need_to_start = false;
- base::LockGuard<base::Mutex> lock_guard(mutex_);
- if (instance_ == NULL) {
- // Start a thread that will send SIGPROF signal to VM threads,
- // when CPU profiling will be enabled.
- instance_ = new SamplerThread(sampler->interval());
- need_to_start = true;
- }
-
- DCHECK(sampler->IsActive());
- DCHECK(!instance_->active_samplers_.Contains(sampler));
- DCHECK(instance_->interval_ == sampler->interval());
- instance_->active_samplers_.Add(sampler);
-
- if (need_to_start) instance_->StartSynchronously();
- }
-
- static void RemoveActiveSampler(Sampler* sampler) {
- SamplerThread* instance_to_remove = NULL;
- {
- base::LockGuard<base::Mutex> lock_guard(mutex_);
-
- DCHECK(sampler->IsActive());
- bool removed = instance_->active_samplers_.RemoveElement(sampler);
- DCHECK(removed);
- USE(removed);
-
- // We cannot delete the instance immediately as we need to Join() the
- // thread but we are holding mutex_ and the thread may try to acquire it.
- if (instance_->active_samplers_.is_empty()) {
- instance_to_remove = instance_;
- instance_ = NULL;
- }
- }
-
- if (!instance_to_remove) return;
- instance_to_remove->Join();
- delete instance_to_remove;
- }
-
- // Implement Thread::Run().
- virtual void Run() {
- while (true) {
- {
- base::LockGuard<base::Mutex> lock_guard(mutex_);
- if (active_samplers_.is_empty()) break;
- // When CPU profiling is enabled both JavaScript and C++ code is
- // profiled. We must not suspend.
- for (int i = 0; i < active_samplers_.length(); ++i) {
- Sampler* sampler = active_samplers_.at(i);
- if (!sampler->IsProfiling()) continue;
- sampler->DoSample();
- }
- }
- base::OS::Sleep(base::TimeDelta::FromMilliseconds(interval_));
- }
- }
-
- private:
- // Protects the process wide state below.
- static base::Mutex* mutex_;
- static SamplerThread* instance_;
-
- const int interval_;
- List<Sampler*> active_samplers_;
-
- DISALLOW_COPY_AND_ASSIGN(SamplerThread);
-};
-
-
-base::Mutex* SamplerThread::mutex_ = NULL;
-SamplerThread* SamplerThread::instance_ = NULL;
-
-
-//
-// StackTracer implementation
-//
-DISABLE_ASAN void TickSample::Init(Isolate* isolate,
- const v8::RegisterState& regs,
- RecordCEntryFrame record_c_entry_frame,
- bool update_stats) {
- timestamp = base::TimeTicks::HighResolutionNow();
- pc = reinterpret_cast<Address>(regs.pc);
- state = isolate->current_vm_state();
- this->update_stats = update_stats;
-
- // Avoid collecting traces while doing GC.
- if (state == GC) return;
-
- Address js_entry_sp = isolate->js_entry_sp();
- if (js_entry_sp == 0) return; // Not executing JS now.
-
- if (pc && IsNoFrameRegion(pc)) {
- // Can't collect stack. Mark the sample as spoiled.
- timestamp = base::TimeTicks();
- pc = 0;
- return;
- }
-
- ExternalCallbackScope* scope = isolate->external_callback_scope();
- Address handler = Isolate::handler(isolate->thread_local_top());
- // If there is a handler on top of the external callback scope then
- // we have already entrered JavaScript again and the external callback
- // is not the top function.
- if (scope && scope->scope_address() < handler) {
- external_callback_entry = *scope->callback_entrypoint_address();
- has_external_callback = true;
- } else {
- // sp register may point at an arbitrary place in memory, make
- // sure MSAN doesn't complain about it.
- MSAN_MEMORY_IS_INITIALIZED(regs.sp, sizeof(Address));
- // Sample potential return address value for frameless invocation of
- // stubs (we'll figure out later, if this value makes sense).
- tos = Memory::Address_at(reinterpret_cast<Address>(regs.sp));
- has_external_callback = false;
- }
-
- SafeStackFrameIterator it(isolate, reinterpret_cast<Address>(regs.fp),
- reinterpret_cast<Address>(regs.sp), js_entry_sp);
- top_frame_type = it.top_frame_type();
-
- SampleInfo info;
- GetStackSample(isolate, regs, record_c_entry_frame,
- reinterpret_cast<void**>(&stack[0]), kMaxFramesCount, &info);
- frames_count = static_cast<unsigned>(info.frames_count);
- if (!frames_count) {
- // It is executing JS but failed to collect a stack trace.
- // Mark the sample as spoiled.
- timestamp = base::TimeTicks();
- pc = 0;
- }
-}
-
-
-void TickSample::GetStackSample(Isolate* isolate, const v8::RegisterState& regs,
- RecordCEntryFrame record_c_entry_frame,
- void** frames, size_t frames_limit,
- v8::SampleInfo* sample_info) {
- sample_info->frames_count = 0;
- sample_info->vm_state = isolate->current_vm_state();
- if (sample_info->vm_state == GC) return;
-
- Address js_entry_sp = isolate->js_entry_sp();
- if (js_entry_sp == 0) return; // Not executing JS now.
-
- SafeStackFrameIterator it(isolate, reinterpret_cast<Address>(regs.fp),
- reinterpret_cast<Address>(regs.sp), js_entry_sp);
- size_t i = 0;
- if (record_c_entry_frame == kIncludeCEntryFrame && !it.done() &&
- it.top_frame_type() == StackFrame::EXIT) {
- frames[i++] = isolate->c_function();
- }
- while (!it.done() && i < frames_limit) {
- if (it.frame()->is_interpreted()) {
- // For interpreted frames use the bytecode array pointer as the pc.
- InterpretedFrame* frame = static_cast<InterpretedFrame*>(it.frame());
- // Since the sampler can interrupt execution at any point the
- // bytecode_array might be garbage, so don't dereference it.
- Address bytecode_array =
- reinterpret_cast<Address>(frame->GetBytecodeArray()) - kHeapObjectTag;
- frames[i++] = bytecode_array + BytecodeArray::kHeaderSize +
- frame->GetBytecodeOffset();
- } else {
- frames[i++] = it.frame()->pc();
- }
- it.Advance();
- }
- sample_info->frames_count = i;
-}
-
-
-void Sampler::SetUp() {
-#if defined(USE_SIGNALS)
- SignalHandler::SetUp();
-#endif
- SamplerThread::SetUp();
-}
-
-
-void Sampler::TearDown() {
- SamplerThread::TearDown();
-#if defined(USE_SIGNALS)
- SignalHandler::TearDown();
-#endif
-}
-
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- has_processing_thread_(false),
- active_(false),
- is_counting_samples_(false),
- js_sample_count_(0),
- external_sample_count_(0) {
- data_ = new PlatformData;
-}
-
-Sampler::~Sampler() {
- DCHECK(!IsActive());
- delete data_;
-}
-
-void Sampler::Start() {
- DCHECK(!IsActive());
- SetActive(true);
- SamplerThread::AddActiveSampler(this);
-}
-
-
-void Sampler::Stop() {
- DCHECK(IsActive());
- SamplerThread::RemoveActiveSampler(this);
- SetActive(false);
-}
-
-
-void Sampler::IncreaseProfilingDepth() {
- base::NoBarrier_AtomicIncrement(&profiling_, 1);
-#if defined(USE_SIGNALS)
- SignalHandler::IncreaseSamplerCount();
-#endif
-}
-
-
-void Sampler::DecreaseProfilingDepth() {
-#if defined(USE_SIGNALS)
- SignalHandler::DecreaseSamplerCount();
-#endif
- base::NoBarrier_AtomicIncrement(&profiling_, -1);
-}
-
-
-void Sampler::SampleStack(const v8::RegisterState& state) {
- TickSample* sample = isolate_->cpu_profiler()->StartTickSample();
- TickSample sample_obj;
- if (sample == NULL) sample = &sample_obj;
- sample->Init(isolate_, state, TickSample::kIncludeCEntryFrame, true);
- if (is_counting_samples_ && !sample->timestamp.IsNull()) {
- if (sample->state == JS) ++js_sample_count_;
- if (sample->state == EXTERNAL) ++external_sample_count_;
- }
- Tick(sample);
- if (sample != &sample_obj) {
- isolate_->cpu_profiler()->FinishTickSample();
- }
-}
-
-
-#if defined(USE_SIGNALS)
-
-void Sampler::DoSample() {
- if (!SignalHandler::Installed()) return;
- pthread_kill(platform_data()->vm_tid(), SIGPROF);
-}
-
-#elif V8_OS_WIN || V8_OS_CYGWIN
-
-void Sampler::DoSample() {
- HANDLE profiled_thread = platform_data()->profiled_thread();
- if (profiled_thread == NULL) return;
-
-#if defined(USE_SIMULATOR)
- SimulatorHelper helper;
- if (!helper.Init(isolate())) return;
-#endif
-
- const DWORD kSuspendFailed = static_cast<DWORD>(-1);
- if (SuspendThread(profiled_thread) == kSuspendFailed) return;
-
- // Context used for sampling the register state of the profiled thread.
- CONTEXT context;
- memset(&context, 0, sizeof(context));
- context.ContextFlags = CONTEXT_FULL;
- if (GetThreadContext(profiled_thread, &context) != 0) {
- v8::RegisterState state;
-#if defined(USE_SIMULATOR)
- helper.FillRegisters(&state);
-#else
-#if V8_HOST_ARCH_X64
- state.pc = reinterpret_cast<Address>(context.Rip);
- state.sp = reinterpret_cast<Address>(context.Rsp);
- state.fp = reinterpret_cast<Address>(context.Rbp);
-#else
- state.pc = reinterpret_cast<Address>(context.Eip);
- state.sp = reinterpret_cast<Address>(context.Esp);
- state.fp = reinterpret_cast<Address>(context.Ebp);
-#endif
-#endif // USE_SIMULATOR
- SampleStack(state);
- }
- ResumeThread(profiled_thread);
-}
-
-#endif // USE_SIGNALS
-
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/profiler/sampler.h b/deps/v8/src/profiler/sampler.h
deleted file mode 100644
index dcd1255d75..0000000000
--- a/deps/v8/src/profiler/sampler.h
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_PROFILER_SAMPLER_H_
-#define V8_PROFILER_SAMPLER_H_
-
-#include "include/v8.h"
-
-#include "src/base/atomicops.h"
-#include "src/base/platform/time.h"
-#include "src/frames.h"
-#include "src/globals.h"
-
-namespace v8 {
-namespace internal {
-
-class Isolate;
-
-// ----------------------------------------------------------------------------
-// Sampler
-//
-// A sampler periodically samples the state of the VM and optionally
-// (if used for profiling) the program counter and stack pointer for
-// the thread that created it.
-
-// TickSample captures the information collected for each sample.
-struct TickSample {
- // Internal profiling (with --prof + tools/$OS-tick-processor) wants to
- // include the runtime function we're calling. Externally exposed tick
- // samples don't care.
- enum RecordCEntryFrame { kIncludeCEntryFrame, kSkipCEntryFrame };
-
- TickSample()
- : state(OTHER),
- pc(NULL),
- external_callback_entry(NULL),
- frames_count(0),
- has_external_callback(false),
- update_stats(true),
- top_frame_type(StackFrame::NONE) {}
- void Init(Isolate* isolate, const v8::RegisterState& state,
- RecordCEntryFrame record_c_entry_frame, bool update_stats);
- static void GetStackSample(Isolate* isolate, const v8::RegisterState& state,
- RecordCEntryFrame record_c_entry_frame,
- void** frames, size_t frames_limit,
- v8::SampleInfo* sample_info);
- StateTag state; // The state of the VM.
- Address pc; // Instruction pointer.
- union {
- Address tos; // Top stack value (*sp).
- Address external_callback_entry;
- };
- static const unsigned kMaxFramesCountLog2 = 8;
- static const unsigned kMaxFramesCount = (1 << kMaxFramesCountLog2) - 1;
- Address stack[kMaxFramesCount]; // Call stack.
- base::TimeTicks timestamp;
- unsigned frames_count : kMaxFramesCountLog2; // Number of captured frames.
- bool has_external_callback : 1;
- bool update_stats : 1; // Whether the sample should update aggregated stats.
- StackFrame::Type top_frame_type : 5;
-};
-
-class Sampler {
- public:
- // Initializes the Sampler support. Called once at VM startup.
- static void SetUp();
- static void TearDown();
-
- // Initialize sampler.
- Sampler(Isolate* isolate, int interval);
- virtual ~Sampler();
-
- Isolate* isolate() const { return isolate_; }
- int interval() const { return interval_; }
-
- // Performs stack sampling.
- void SampleStack(const v8::RegisterState& regs);
-
- // Start and stop sampler.
- void Start();
- void Stop();
-
- // Whether the sampling thread should use this Sampler for CPU profiling?
- bool IsProfiling() const {
- return base::NoBarrier_Load(&profiling_) > 0 &&
- !base::NoBarrier_Load(&has_processing_thread_);
- }
- void IncreaseProfilingDepth();
- void DecreaseProfilingDepth();
-
- // Whether the sampler is running (that is, consumes resources).
- bool IsActive() const { return base::NoBarrier_Load(&active_); }
-
- void DoSample();
- // If true next sample must be initiated on the profiler event processor
- // thread right after latest sample is processed.
- void SetHasProcessingThread(bool value) {
- base::NoBarrier_Store(&has_processing_thread_, value);
- }
-
- // Used in tests to make sure that stack sampling is performed.
- unsigned js_sample_count() const { return js_sample_count_; }
- unsigned external_sample_count() const { return external_sample_count_; }
- void StartCountingSamples() {
- js_sample_count_ = 0;
- external_sample_count_ = 0;
- is_counting_samples_ = true;
- }
-
- class PlatformData;
- PlatformData* platform_data() const { return data_; }
-
- protected:
- // This method is called for each sampling period with the current
- // program counter.
- virtual void Tick(TickSample* sample) = 0;
-
- private:
- void SetActive(bool value) { base::NoBarrier_Store(&active_, value); }
-
- Isolate* isolate_;
- const int interval_;
- base::Atomic32 profiling_;
- base::Atomic32 has_processing_thread_;
- base::Atomic32 active_;
- PlatformData* data_; // Platform specific data.
- // Counts stack samples taken in various VM states.
- bool is_counting_samples_;
- unsigned js_sample_count_;
- unsigned external_sample_count_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_PROFILER_SAMPLER_H_
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.cc b/deps/v8/src/profiler/sampling-heap-profiler.cc
index a32cae3ef9..b4361ee849 100644
--- a/deps/v8/src/profiler/sampling-heap-profiler.cc
+++ b/deps/v8/src/profiler/sampling-heap-profiler.cc
@@ -7,6 +7,7 @@
#include <stdint.h>
#include <memory>
#include "src/api.h"
+#include "src/base/ieee754.h"
#include "src/base/utils/random-number-generator.h"
#include "src/frames-inl.h"
#include "src/heap/heap.h"
@@ -27,7 +28,7 @@ intptr_t SamplingAllocationObserver::GetNextSampleInterval(uint64_t rate) {
return static_cast<intptr_t>(rate);
}
double u = random_->NextDouble();
- double next = (-std::log(u)) * rate;
+ double next = (-base::ieee754::log(u)) * rate;
return next < kPointerSize
? kPointerSize
: (next > INT_MAX ? INT_MAX : static_cast<intptr_t>(next));
@@ -47,8 +48,9 @@ v8::AllocationProfile::Allocation SamplingHeapProfiler::ScaleSample(
return {size, static_cast<unsigned int>(count * scale + 0.5)};
}
-SamplingHeapProfiler::SamplingHeapProfiler(Heap* heap, StringsStorage* names,
- uint64_t rate, int stack_depth)
+SamplingHeapProfiler::SamplingHeapProfiler(
+ Heap* heap, StringsStorage* names, uint64_t rate, int stack_depth,
+ v8::HeapProfiler::SamplingFlags flags)
: isolate_(heap->isolate()),
heap_(heap),
new_space_observer_(new SamplingAllocationObserver(
@@ -58,14 +60,15 @@ SamplingHeapProfiler::SamplingHeapProfiler(Heap* heap, StringsStorage* names,
heap_, static_cast<intptr_t>(rate), rate, this,
heap->isolate()->random_number_generator())),
names_(names),
- profile_root_("(root)", v8::UnboundScript::kNoScriptId, 0),
+ profile_root_(nullptr, "(root)", v8::UnboundScript::kNoScriptId, 0),
samples_(),
stack_depth_(stack_depth),
- rate_(rate) {
+ rate_(rate),
+ flags_(flags) {
CHECK_GT(rate_, 0);
heap->new_space()->AddAllocationObserver(new_space_observer_.get());
AllSpaces spaces(heap);
- for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+ for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
if (space != heap->new_space()) {
space->AddAllocationObserver(other_spaces_observer_.get());
}
@@ -76,7 +79,7 @@ SamplingHeapProfiler::SamplingHeapProfiler(Heap* heap, StringsStorage* names,
SamplingHeapProfiler::~SamplingHeapProfiler() {
heap_->new_space()->RemoveAllocationObserver(new_space_observer_.get());
AllSpaces spaces(heap_);
- for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+ for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
if (space != heap_->new_space()) {
space->RemoveAllocationObserver(other_spaces_observer_.get());
}
@@ -109,6 +112,7 @@ void SamplingHeapProfiler::SampleObject(Address soon_object, size_t size) {
Sample* sample = new Sample(size, node, loc, this);
samples_.insert(sample);
sample->global.SetWeak(sample, OnWeakCallback, WeakCallbackType::kParameter);
+ sample->global.MarkIndependent();
}
void SamplingHeapProfiler::OnWeakCallback(
@@ -117,22 +121,34 @@ void SamplingHeapProfiler::OnWeakCallback(
AllocationNode* node = sample->owner;
DCHECK(node->allocations_[sample->size] > 0);
node->allocations_[sample->size]--;
+ if (node->allocations_[sample->size] == 0) {
+ node->allocations_.erase(sample->size);
+ while (node->allocations_.empty() && node->children_.empty() &&
+ node->parent_ && !node->parent_->pinned_) {
+ AllocationNode* parent = node->parent_;
+ AllocationNode::FunctionId id = AllocationNode::function_id(
+ node->script_id_, node->script_position_, node->name_);
+ parent->children_.erase(id);
+ delete node;
+ node = parent;
+ }
+ }
sample->profiler->samples_.erase(sample);
delete sample;
}
-SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::FindOrAddChildNode(
- AllocationNode* parent, const char* name, int script_id,
- int start_position) {
- for (AllocationNode* child : parent->children_) {
- if (child->script_id_ == script_id &&
- child->script_position_ == start_position &&
- strcmp(child->name_, name) == 0) {
- return child;
- }
+SamplingHeapProfiler::AllocationNode*
+SamplingHeapProfiler::AllocationNode::FindOrAddChildNode(const char* name,
+ int script_id,
+ int start_position) {
+ FunctionId id = function_id(script_id, start_position, name);
+ auto it = children_.find(id);
+ if (it != children_.end()) {
+ DCHECK(strcmp(it->second->name_, name) == 0);
+ return it->second;
}
- AllocationNode* child = new AllocationNode(name, script_id, start_position);
- parent->children_.push_back(child);
+ auto child = new AllocationNode(this, name, script_id, start_position);
+ children_.insert(std::make_pair(id, child));
return child;
}
@@ -140,7 +156,7 @@ SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::AddStack() {
AllocationNode* node = &profile_root_;
std::vector<SharedFunctionInfo*> stack;
- StackTraceFrameIterator it(isolate_);
+ JavaScriptFrameIterator it(isolate_);
int frames_captured = 0;
while (!it.done() && frames_captured < stack_depth_) {
JavaScriptFrame* frame = it.frame();
@@ -173,7 +189,7 @@ SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::AddStack() {
name = "(JS)";
break;
}
- return FindOrAddChildNode(node, name, v8::UnboundScript::kNoScriptId, 0);
+ return node->FindOrAddChildNode(name, v8::UnboundScript::kNoScriptId, 0);
}
// We need to process the stack in reverse order as the top of the stack is
@@ -186,14 +202,17 @@ SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::AddStack() {
Script* script = Script::cast(shared->script());
script_id = script->id();
}
- node = FindOrAddChildNode(node, name, script_id, shared->start_position());
+ node = node->FindOrAddChildNode(name, script_id, shared->start_position());
}
return node;
}
v8::AllocationProfile::Node* SamplingHeapProfiler::TranslateAllocationNode(
AllocationProfile* profile, SamplingHeapProfiler::AllocationNode* node,
- const std::map<int, Script*>& scripts) {
+ const std::map<int, Handle<Script>>& scripts) {
+ // By pinning the node we make sure its children won't get disposed if
+ // a GC kicks in during the tree retrieval.
+ node->pinned_ = true;
Local<v8::String> script_name =
ToApiHandle<v8::String>(isolate_->factory()->InternalizeUtf8String(""));
int line = v8::AllocationProfile::kNoLineNumberInfo;
@@ -203,23 +222,22 @@ v8::AllocationProfile::Node* SamplingHeapProfiler::TranslateAllocationNode(
if (node->script_id_ != v8::UnboundScript::kNoScriptId &&
scripts.find(node->script_id_) != scripts.end()) {
// Cannot use std::map<T>::at because it is not available on android.
- auto non_const_scripts = const_cast<std::map<int, Script*>&>(scripts);
- Script* script = non_const_scripts[node->script_id_];
- if (script) {
+ auto non_const_scripts =
+ const_cast<std::map<int, Handle<Script>>&>(scripts);
+ Handle<Script> script = non_const_scripts[node->script_id_];
+ if (!script.is_null()) {
if (script->name()->IsName()) {
Name* name = Name::cast(script->name());
script_name = ToApiHandle<v8::String>(
isolate_->factory()->InternalizeUtf8String(names_->GetName(name)));
}
- Handle<Script> script_handle(script);
- line = 1 + Script::GetLineNumber(script_handle, node->script_position_);
- column =
- 1 + Script::GetColumnNumber(script_handle, node->script_position_);
- }
- for (auto alloc : node->allocations_) {
- allocations.push_back(ScaleSample(alloc.first, alloc.second));
+ line = 1 + Script::GetLineNumber(script, node->script_position_);
+ column = 1 + Script::GetColumnNumber(script, node->script_position_);
}
}
+ for (auto alloc : node->allocations_) {
+ allocations.push_back(ScaleSample(alloc.first, alloc.second));
+ }
profile->nodes().push_back(v8::AllocationProfile::Node(
{ToApiHandle<v8::String>(
@@ -227,35 +245,34 @@ v8::AllocationProfile::Node* SamplingHeapProfiler::TranslateAllocationNode(
script_name, node->script_id_, node->script_position_, line, column,
std::vector<v8::AllocationProfile::Node*>(), allocations}));
v8::AllocationProfile::Node* current = &profile->nodes().back();
- size_t child_len = node->children_.size();
- // The children vector may have nodes appended to it during translation
+ // The children map may have nodes inserted into it during translation
// because the translation may allocate strings on the JS heap that have
- // the potential to be sampled. We cache the length of the vector before
- // iteration so that nodes appended to the vector during iteration are
- // not processed.
- for (size_t i = 0; i < child_len; i++) {
+ // the potential to be sampled. That's ok since map iterators are not
+ // invalidated upon std::map insertion.
+ for (auto it : node->children_) {
current->children.push_back(
- TranslateAllocationNode(profile, node->children_[i], scripts));
+ TranslateAllocationNode(profile, it.second, scripts));
}
+ node->pinned_ = false;
return current;
}
v8::AllocationProfile* SamplingHeapProfiler::GetAllocationProfile() {
+ if (flags_ & v8::HeapProfiler::kSamplingForceGC) {
+ isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags,
+ "SamplingHeapProfiler");
+ }
// To resolve positions to line/column numbers, we will need to look up
// scripts. Build a map to allow fast mapping from script id to script.
- std::map<int, Script*> scripts;
+ std::map<int, Handle<Script>> scripts;
{
Script::Iterator iterator(isolate_);
- Script* script;
- while ((script = iterator.Next())) {
- scripts[script->id()] = script;
+ while (Script* script = iterator.Next()) {
+ scripts[script->id()] = handle(script);
}
}
-
auto profile = new v8::internal::AllocationProfile();
-
TranslateAllocationNode(profile, &profile_root_, scripts);
-
return profile;
}
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.h b/deps/v8/src/profiler/sampling-heap-profiler.h
index 0b538b070c..07840244ce 100644
--- a/deps/v8/src/profiler/sampling-heap-profiler.h
+++ b/deps/v8/src/profiler/sampling-heap-profiler.h
@@ -7,6 +7,7 @@
#include <deque>
#include <map>
+#include <memory>
#include <set>
#include "include/v8-profiler.h"
#include "src/heap/heap.h"
@@ -41,7 +42,7 @@ class AllocationProfile : public v8::AllocationProfile {
class SamplingHeapProfiler {
public:
SamplingHeapProfiler(Heap* heap, StringsStorage* names, uint64_t rate,
- int stack_depth);
+ int stack_depth, v8::HeapProfiler::SamplingFlags flags);
~SamplingHeapProfiler();
v8::AllocationProfile* GetAllocationProfile();
@@ -71,23 +72,47 @@ class SamplingHeapProfiler {
class AllocationNode {
public:
- AllocationNode(const char* const name, int script_id,
- const int start_position)
- : script_id_(script_id),
+ AllocationNode(AllocationNode* parent, const char* name, int script_id,
+ int start_position)
+ : parent_(parent),
+ script_id_(script_id),
script_position_(start_position),
- name_(name) {}
+ name_(name),
+ pinned_(false) {}
~AllocationNode() {
for (auto child : children_) {
- delete child;
+ delete child.second;
}
}
private:
+ typedef uint64_t FunctionId;
+ static FunctionId function_id(int script_id, int start_position,
+ const char* name) {
+ // script_id == kNoScriptId case:
+ // Use function name pointer as an id. Names derived from VM state
+ // must not collide with the builtin names. The least significant bit
+ // of the id is set to 1.
+ if (script_id == v8::UnboundScript::kNoScriptId) {
+ return reinterpret_cast<intptr_t>(name) | 1;
+ }
+ // script_id != kNoScriptId case:
+ // Use script_id, start_position pair to uniquelly identify the node.
+ // The least significant bit of the id is set to 0.
+ DCHECK(static_cast<unsigned>(start_position) < (1u << 31));
+ return (static_cast<uint64_t>(script_id) << 32) + (start_position << 1);
+ }
+ AllocationNode* FindOrAddChildNode(const char* name, int script_id,
+ int start_position);
+ // TODO(alph): make use of unordered_map's here. Pay attention to
+ // iterator invalidation during TranslateAllocationNode.
std::map<size_t, unsigned int> allocations_;
- std::vector<AllocationNode*> children_;
+ std::map<FunctionId, AllocationNode*> children_;
+ AllocationNode* const parent_;
const int script_id_;
const int script_position_;
const char* const name_;
+ bool pinned_;
friend class SamplingHeapProfiler;
@@ -110,24 +135,25 @@ class SamplingHeapProfiler {
// loaded scripts keyed by their script id.
v8::AllocationProfile::Node* TranslateAllocationNode(
AllocationProfile* profile, SamplingHeapProfiler::AllocationNode* node,
- const std::map<int, Script*>& scripts);
+ const std::map<int, Handle<Script>>& scripts);
v8::AllocationProfile::Allocation ScaleSample(size_t size,
unsigned int count);
AllocationNode* AddStack();
- AllocationNode* FindOrAddChildNode(AllocationNode* parent, const char* name,
- int script_id, int start_position);
Isolate* const isolate_;
Heap* const heap_;
- base::SmartPointer<SamplingAllocationObserver> new_space_observer_;
- base::SmartPointer<SamplingAllocationObserver> other_spaces_observer_;
+ std::unique_ptr<SamplingAllocationObserver> new_space_observer_;
+ std::unique_ptr<SamplingAllocationObserver> other_spaces_observer_;
StringsStorage* const names_;
AllocationNode profile_root_;
std::set<Sample*> samples_;
const int stack_depth_;
const uint64_t rate_;
+ v8::HeapProfiler::SamplingFlags flags_;
friend class SamplingAllocationObserver;
+
+ DISALLOW_COPY_AND_ASSIGN(SamplingHeapProfiler);
};
class SamplingAllocationObserver : public AllocationObserver {
diff --git a/deps/v8/src/profiler/strings-storage.cc b/deps/v8/src/profiler/strings-storage.cc
index 9f095b8866..edb01b5fd0 100644
--- a/deps/v8/src/profiler/strings-storage.cc
+++ b/deps/v8/src/profiler/strings-storage.cc
@@ -4,7 +4,8 @@
#include "src/profiler/strings-storage.h"
-#include "src/base/smart-pointers.h"
+#include <memory>
+
#include "src/objects-inl.h"
namespace v8 {
@@ -22,7 +23,8 @@ StringsStorage::StringsStorage(Heap* heap)
StringsStorage::~StringsStorage() {
- for (HashMap::Entry* p = names_.Start(); p != NULL; p = names_.Next(p)) {
+ for (base::HashMap::Entry* p = names_.Start(); p != NULL;
+ p = names_.Next(p)) {
DeleteArray(reinterpret_cast<const char*>(p->value));
}
}
@@ -30,7 +32,7 @@ StringsStorage::~StringsStorage() {
const char* StringsStorage::GetCopy(const char* src) {
int len = static_cast<int>(strlen(src));
- HashMap::Entry* entry = GetEntry(src, len);
+ base::HashMap::Entry* entry = GetEntry(src, len);
if (entry->value == NULL) {
Vector<char> dst = Vector<char>::New(len + 1);
StrNCpy(dst, src, len);
@@ -52,7 +54,7 @@ const char* StringsStorage::GetFormatted(const char* format, ...) {
const char* StringsStorage::AddOrDisposeString(char* str, int len) {
- HashMap::Entry* entry = GetEntry(str, len);
+ base::HashMap::Entry* entry = GetEntry(str, len);
if (entry->value == NULL) {
// New entry added.
entry->key = str;
@@ -80,9 +82,9 @@ const char* StringsStorage::GetName(Name* name) {
String* str = String::cast(name);
int length = Min(kMaxNameSize, str->length());
int actual_length = 0;
- base::SmartArrayPointer<char> data = str->ToCString(
+ std::unique_ptr<char[]> data = str->ToCString(
DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length, &actual_length);
- return AddOrDisposeString(data.Detach(), actual_length);
+ return AddOrDisposeString(data.release(), actual_length);
} else if (name->IsSymbol()) {
return "<symbol>";
}
@@ -107,15 +109,15 @@ const char* StringsStorage::GetFunctionName(const char* name) {
size_t StringsStorage::GetUsedMemorySize() const {
size_t size = sizeof(*this);
- size += sizeof(HashMap::Entry) * names_.capacity();
- for (HashMap::Entry* p = names_.Start(); p != NULL; p = names_.Next(p)) {
+ size += sizeof(base::HashMap::Entry) * names_.capacity();
+ for (base::HashMap::Entry* p = names_.Start(); p != NULL;
+ p = names_.Next(p)) {
size += strlen(reinterpret_cast<const char*>(p->value)) + 1;
}
return size;
}
-
-HashMap::Entry* StringsStorage::GetEntry(const char* str, int len) {
+base::HashMap::Entry* StringsStorage::GetEntry(const char* str, int len) {
uint32_t hash = StringHasher::HashSequentialString(str, len, hash_seed_);
return names_.LookupOrInsert(const_cast<char*>(str), hash);
}
diff --git a/deps/v8/src/profiler/strings-storage.h b/deps/v8/src/profiler/strings-storage.h
index 7164caef63..f98aa5e038 100644
--- a/deps/v8/src/profiler/strings-storage.h
+++ b/deps/v8/src/profiler/strings-storage.h
@@ -5,8 +5,11 @@
#ifndef V8_PROFILER_STRINGS_STORAGE_H_
#define V8_PROFILER_STRINGS_STORAGE_H_
+#include <stdarg.h>
+
#include "src/allocation.h"
-#include "src/hashmap.h"
+#include "src/base/compiler-specific.h"
+#include "src/base/hashmap.h"
namespace v8 {
namespace internal {
@@ -19,7 +22,8 @@ class StringsStorage {
~StringsStorage();
const char* GetCopy(const char* src);
- const char* GetFormatted(const char* format, ...);
+ PRINTF_FORMAT(2, 3) const char* GetFormatted(const char* format, ...);
+ PRINTF_FORMAT(2, 0)
const char* GetVFormatted(const char* format, va_list args);
const char* GetName(Name* name);
const char* GetName(int index);
@@ -32,10 +36,10 @@ class StringsStorage {
static bool StringsMatch(void* key1, void* key2);
const char* AddOrDisposeString(char* str, int len);
- HashMap::Entry* GetEntry(const char* str, int len);
+ base::HashMap::Entry* GetEntry(const char* str, int len);
uint32_t hash_seed_;
- HashMap names_;
+ base::HashMap names_;
DISALLOW_COPY_AND_ASSIGN(StringsStorage);
};
diff --git a/deps/v8/src/profiler/tick-sample.cc b/deps/v8/src/profiler/tick-sample.cc
new file mode 100644
index 0000000000..ecb2bf46f7
--- /dev/null
+++ b/deps/v8/src/profiler/tick-sample.cc
@@ -0,0 +1,272 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/profiler/tick-sample.h"
+
+#include "include/v8-profiler.h"
+#include "src/frames-inl.h"
+#include "src/msan.h"
+#include "src/simulator.h"
+#include "src/vm-state-inl.h"
+
+namespace v8 {
+namespace {
+
+bool IsSamePage(i::byte* ptr1, i::byte* ptr2) {
+ const uint32_t kPageSize = 4096;
+ uintptr_t mask = ~static_cast<uintptr_t>(kPageSize - 1);
+ return (reinterpret_cast<uintptr_t>(ptr1) & mask) ==
+ (reinterpret_cast<uintptr_t>(ptr2) & mask);
+}
+
+// Check if the code at specified address could potentially be a
+// frame setup code.
+bool IsNoFrameRegion(i::Address address) {
+ struct Pattern {
+ int bytes_count;
+ i::byte bytes[8];
+ int offsets[4];
+ };
+ i::byte* pc = reinterpret_cast<i::byte*>(address);
+ static Pattern patterns[] = {
+#if V8_HOST_ARCH_IA32
+ // push %ebp
+ // mov %esp,%ebp
+ {3, {0x55, 0x89, 0xe5}, {0, 1, -1}},
+ // pop %ebp
+ // ret N
+ {2, {0x5d, 0xc2}, {0, 1, -1}},
+ // pop %ebp
+ // ret
+ {2, {0x5d, 0xc3}, {0, 1, -1}},
+#elif V8_HOST_ARCH_X64
+ // pushq %rbp
+ // movq %rsp,%rbp
+ {4, {0x55, 0x48, 0x89, 0xe5}, {0, 1, -1}},
+ // popq %rbp
+ // ret N
+ {2, {0x5d, 0xc2}, {0, 1, -1}},
+ // popq %rbp
+ // ret
+ {2, {0x5d, 0xc3}, {0, 1, -1}},
+#endif
+ {0, {}, {}}
+ };
+ for (Pattern* pattern = patterns; pattern->bytes_count; ++pattern) {
+ for (int* offset_ptr = pattern->offsets; *offset_ptr != -1; ++offset_ptr) {
+ int offset = *offset_ptr;
+ if (!offset || IsSamePage(pc, pc - offset)) {
+ MSAN_MEMORY_IS_INITIALIZED(pc - offset, pattern->bytes_count);
+ if (!memcmp(pc - offset, pattern->bytes, pattern->bytes_count))
+ return true;
+ } else {
+ // It is not safe to examine bytes on another page as it might not be
+ // allocated thus causing a SEGFAULT.
+ // Check the pattern part that's on the same page and
+ // pessimistically assume it could be the entire pattern match.
+ MSAN_MEMORY_IS_INITIALIZED(pc, pattern->bytes_count - offset);
+ if (!memcmp(pc, pattern->bytes + offset, pattern->bytes_count - offset))
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+} // namespace
+
+namespace internal {
+namespace {
+
+#if defined(USE_SIMULATOR)
+class SimulatorHelper {
+ public:
+ // Returns true if register values were successfully retrieved
+ // from the simulator, otherwise returns false.
+ static bool FillRegisters(Isolate* isolate, v8::RegisterState* state);
+};
+
+bool SimulatorHelper::FillRegisters(Isolate* isolate,
+ v8::RegisterState* state) {
+ Simulator* simulator = isolate->thread_local_top()->simulator_;
+ // Check if there is active simulator.
+ if (simulator == NULL) return false;
+#if V8_TARGET_ARCH_ARM
+ if (!simulator->has_bad_pc()) {
+ state->pc = reinterpret_cast<Address>(simulator->get_pc());
+ }
+ state->sp = reinterpret_cast<Address>(simulator->get_register(Simulator::sp));
+ state->fp =
+ reinterpret_cast<Address>(simulator->get_register(Simulator::r11));
+#elif V8_TARGET_ARCH_ARM64
+ state->pc = reinterpret_cast<Address>(simulator->pc());
+ state->sp = reinterpret_cast<Address>(simulator->sp());
+ state->fp = reinterpret_cast<Address>(simulator->fp());
+#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+ if (!simulator->has_bad_pc()) {
+ state->pc = reinterpret_cast<Address>(simulator->get_pc());
+ }
+ state->sp = reinterpret_cast<Address>(simulator->get_register(Simulator::sp));
+ state->fp = reinterpret_cast<Address>(simulator->get_register(Simulator::fp));
+#elif V8_TARGET_ARCH_PPC
+ if (!simulator->has_bad_pc()) {
+ state->pc = reinterpret_cast<Address>(simulator->get_pc());
+ }
+ state->sp = reinterpret_cast<Address>(simulator->get_register(Simulator::sp));
+ state->fp = reinterpret_cast<Address>(simulator->get_register(Simulator::fp));
+#elif V8_TARGET_ARCH_S390
+ if (!simulator->has_bad_pc()) {
+ state->pc = reinterpret_cast<Address>(simulator->get_pc());
+ }
+ state->sp = reinterpret_cast<Address>(simulator->get_register(Simulator::sp));
+ state->fp = reinterpret_cast<Address>(simulator->get_register(Simulator::fp));
+#endif
+ if (state->sp == 0 || state->fp == 0) {
+ // It possible that the simulator is interrupted while it is updating
+ // the sp or fp register. ARM64 simulator does this in two steps:
+ // first setting it to zero and then setting it to the new value.
+ // Bailout if sp/fp doesn't contain the new value.
+ //
+ // FIXME: The above doesn't really solve the issue.
+ // If a 64-bit target is executed on a 32-bit host even the final
+ // write is non-atomic, so it might obtain a half of the result.
+ // Moreover as long as the register set code uses memcpy (as of now),
+ // it is not guaranteed to be atomic even when both host and target
+ // are of same bitness.
+ return false;
+ }
+ return true;
+}
+#endif // USE_SIMULATOR
+
+} // namespace
+} // namespace internal
+
+//
+// StackTracer implementation
+//
+DISABLE_ASAN void TickSample::Init(Isolate* v8_isolate,
+ const RegisterState& reg_state,
+ RecordCEntryFrame record_c_entry_frame,
+ bool update_stats,
+ bool use_simulator_reg_state) {
+ this->update_stats = update_stats;
+ SampleInfo info;
+ RegisterState regs = reg_state;
+ if (!GetStackSample(v8_isolate, &regs, record_c_entry_frame, stack,
+ kMaxFramesCount, &info, use_simulator_reg_state)) {
+ // It is executing JS but failed to collect a stack trace.
+ // Mark the sample as spoiled.
+ pc = nullptr;
+ return;
+ }
+
+ state = info.vm_state;
+ pc = regs.pc;
+ frames_count = static_cast<unsigned>(info.frames_count);
+ has_external_callback = info.external_callback_entry != nullptr;
+ if (has_external_callback) {
+ external_callback_entry = info.external_callback_entry;
+ } else if (frames_count) {
+ // sp register may point at an arbitrary place in memory, make
+ // sure MSAN doesn't complain about it.
+ MSAN_MEMORY_IS_INITIALIZED(regs.sp, sizeof(void*));
+ // Sample potential return address value for frameless invocation of
+ // stubs (we'll figure out later, if this value makes sense).
+ tos = i::Memory::Address_at(reinterpret_cast<i::Address>(regs.sp));
+ } else {
+ tos = nullptr;
+ }
+}
+
+bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
+ RecordCEntryFrame record_c_entry_frame,
+ void** frames, size_t frames_limit,
+ v8::SampleInfo* sample_info,
+ bool use_simulator_reg_state) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ sample_info->frames_count = 0;
+ sample_info->vm_state = isolate->current_vm_state();
+ sample_info->external_callback_entry = nullptr;
+ if (sample_info->vm_state == GC) return true;
+
+ i::Address js_entry_sp = isolate->js_entry_sp();
+ if (js_entry_sp == nullptr) return true; // Not executing JS now.
+
+#if defined(USE_SIMULATOR)
+ if (use_simulator_reg_state) {
+ if (!i::SimulatorHelper::FillRegisters(isolate, regs)) return false;
+ }
+#else
+ USE(use_simulator_reg_state);
+#endif
+ DCHECK(regs->sp);
+
+ if (regs->pc && IsNoFrameRegion(static_cast<i::Address>(regs->pc))) {
+ // The frame is not setup, so it'd be hard to iterate the stack. Bailout.
+ return false;
+ }
+
+ i::ExternalCallbackScope* scope = isolate->external_callback_scope();
+ i::Address handler = i::Isolate::handler(isolate->thread_local_top());
+ // If there is a handler on top of the external callback scope then
+ // we have already entrered JavaScript again and the external callback
+ // is not the top function.
+ if (scope && scope->scope_address() < handler) {
+ i::Address* external_callback_entry_ptr =
+ scope->callback_entrypoint_address();
+ sample_info->external_callback_entry =
+ external_callback_entry_ptr == nullptr ? nullptr
+ : *external_callback_entry_ptr;
+ }
+
+ i::SafeStackFrameIterator it(isolate, reinterpret_cast<i::Address>(regs->fp),
+ reinterpret_cast<i::Address>(regs->sp),
+ js_entry_sp);
+
+ // If at this point iterator does not see any frames,
+ // is usually means something is wrong with the FP,
+ // e.g. it is used as a general purpose register in the function.
+ // Bailout.
+ if (it.done()) return false;
+
+ size_t i = 0;
+ if (record_c_entry_frame == kIncludeCEntryFrame &&
+ (it.top_frame_type() == internal::StackFrame::EXIT ||
+ it.top_frame_type() == internal::StackFrame::BUILTIN_EXIT)) {
+ frames[i++] = isolate->c_function();
+ }
+ for (; !it.done() && i < frames_limit; it.Advance()) {
+ if (!it.frame()->is_interpreted()) {
+ frames[i++] = it.frame()->pc();
+ continue;
+ }
+ // For interpreted frames use the bytecode array pointer as the pc.
+ i::InterpretedFrame* frame = static_cast<i::InterpretedFrame*>(it.frame());
+ // Since the sampler can interrupt execution at any point the
+ // bytecode_array might be garbage, so don't dereference it.
+ i::Address bytecode_array =
+ reinterpret_cast<i::Address>(frame->GetBytecodeArray()) -
+ i::kHeapObjectTag;
+ frames[i++] = bytecode_array + i::BytecodeArray::kHeaderSize +
+ frame->GetBytecodeOffset();
+ }
+ sample_info->frames_count = i;
+ return true;
+}
+
+namespace internal {
+
+void TickSample::Init(Isolate* isolate, const v8::RegisterState& state,
+ RecordCEntryFrame record_c_entry_frame, bool update_stats,
+ bool use_simulator_reg_state) {
+ v8::TickSample::Init(reinterpret_cast<v8::Isolate*>(isolate), state,
+ record_c_entry_frame, update_stats,
+ use_simulator_reg_state);
+ if (pc == nullptr) return;
+ timestamp = base::TimeTicks::HighResolutionNow();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/profiler/tick-sample.h b/deps/v8/src/profiler/tick-sample.h
new file mode 100644
index 0000000000..819b862388
--- /dev/null
+++ b/deps/v8/src/profiler/tick-sample.h
@@ -0,0 +1,27 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PROFILER_TICK_SAMPLE_H_
+#define V8_PROFILER_TICK_SAMPLE_H_
+
+#include "include/v8-profiler.h"
+#include "src/base/platform/time.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+struct TickSample : public v8::TickSample {
+ void Init(Isolate* isolate, const v8::RegisterState& state,
+ RecordCEntryFrame record_c_entry_frame, bool update_stats,
+ bool use_simulator_reg_state = true);
+ base::TimeTicks timestamp;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PROFILER_TICK_SAMPLE_H_
diff --git a/deps/v8/src/property-descriptor.cc b/deps/v8/src/property-descriptor.cc
index 31efb413b6..f22a2630e2 100644
--- a/deps/v8/src/property-descriptor.cc
+++ b/deps/v8/src/property-descriptor.cc
@@ -249,7 +249,7 @@ bool PropertyDescriptor::ToPropertyDescriptor(Isolate* isolate,
if (!getter.is_null()) {
// 18c. If IsCallable(getter) is false and getter is not undefined,
// throw a TypeError exception.
- if (!getter->IsCallable() && !getter->IsUndefined()) {
+ if (!getter->IsCallable() && !getter->IsUndefined(isolate)) {
isolate->Throw(*isolate->factory()->NewTypeError(
MessageTemplate::kObjectGetterCallable, getter));
return false;
@@ -267,7 +267,7 @@ bool PropertyDescriptor::ToPropertyDescriptor(Isolate* isolate,
if (!setter.is_null()) {
// 21c. If IsCallable(setter) is false and setter is not undefined,
// throw a TypeError exception.
- if (!setter->IsCallable() && !setter->IsUndefined()) {
+ if (!setter->IsCallable() && !setter->IsUndefined(isolate)) {
isolate->Throw(*isolate->factory()->NewTypeError(
MessageTemplate::kObjectSetterCallable, setter));
return false;
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index fdf2c6c4ab..87df02d08e 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -28,11 +28,6 @@ enum PropertyAttributes {
// ABSENT can never be stored in or returned from a descriptor's attributes
// bitfield. It is only used as a return value meaning the attributes of
// a non-existent property.
-
- // When creating a property, EVAL_DECLARED used to indicate that the property
- // came from a sloppy-mode direct eval, and certain checks need to be done.
- // Cannot be stored in or returned from a descriptor's attributes bitfield.
- EVAL_DECLARED = 128
};
@@ -53,7 +48,18 @@ STATIC_ASSERT(ONLY_ENUMERABLE == static_cast<PropertyFilter>(DONT_ENUM));
STATIC_ASSERT(ONLY_CONFIGURABLE == static_cast<PropertyFilter>(DONT_DELETE));
STATIC_ASSERT(((SKIP_STRINGS | SKIP_SYMBOLS | ONLY_ALL_CAN_READ) &
ALL_ATTRIBUTES_MASK) == 0);
-
+STATIC_ASSERT(ALL_PROPERTIES ==
+ static_cast<PropertyFilter>(v8::PropertyFilter::ALL_PROPERTIES));
+STATIC_ASSERT(ONLY_WRITABLE ==
+ static_cast<PropertyFilter>(v8::PropertyFilter::ONLY_WRITABLE));
+STATIC_ASSERT(ONLY_ENUMERABLE ==
+ static_cast<PropertyFilter>(v8::PropertyFilter::ONLY_ENUMERABLE));
+STATIC_ASSERT(ONLY_CONFIGURABLE == static_cast<PropertyFilter>(
+ v8::PropertyFilter::ONLY_CONFIGURABLE));
+STATIC_ASSERT(SKIP_STRINGS ==
+ static_cast<PropertyFilter>(v8::PropertyFilter::SKIP_STRINGS));
+STATIC_ASSERT(SKIP_SYMBOLS ==
+ static_cast<PropertyFilter>(v8::PropertyFilter::SKIP_SYMBOLS));
class Smi;
class Type;
@@ -203,7 +209,6 @@ static const int kMaxNumberOfDescriptors =
static const int kInvalidEnumCacheSentinel =
(1 << kDescriptorIndexBitCount) - 1;
-
enum class PropertyCellType {
// Meaningful when a property cell does not contain the hole.
kUndefined, // The PREMONOMORPHIC of property cells.
@@ -213,13 +218,13 @@ enum class PropertyCellType {
// Meaningful when a property cell contains the hole.
kUninitialized = kUndefined, // Cell has never been initialized.
- kInvalidated = kConstant, // Cell has been deleted or invalidated.
+ kInvalidated = kConstant, // Cell has been deleted, invalidated or never
+ // existed.
// For dictionaries not holding cells.
kNoCell = kMutable,
};
-
enum class PropertyCellConstantType {
kSmi,
kStableMap,
@@ -259,8 +264,9 @@ class PropertyDetails BASE_EMBEDDED {
FieldIndexField::encode(field_index);
}
- static PropertyDetails Empty() {
- return PropertyDetails(NONE, DATA, 0, PropertyCellType::kNoCell);
+ static PropertyDetails Empty(
+ PropertyCellType cell_type = PropertyCellType::kNoCell) {
+ return PropertyDetails(NONE, DATA, 0, cell_type);
}
int pointer() const { return DescriptorPointer::decode(value_); }
diff --git a/deps/v8/src/prototype.h b/deps/v8/src/prototype.h
index e09ff0ff0c..032d9b6b34 100644
--- a/deps/v8/src/prototype.h
+++ b/deps/v8/src/prototype.h
@@ -25,14 +25,12 @@ namespace internal {
class PrototypeIterator {
public:
- enum WhereToStart { START_AT_RECEIVER, START_AT_PROTOTYPE };
-
enum WhereToEnd { END_AT_NULL, END_AT_NON_HIDDEN };
const int kProxyPrototypeLimit = 100 * 1000;
PrototypeIterator(Isolate* isolate, Handle<JSReceiver> receiver,
- WhereToStart where_to_start = START_AT_PROTOTYPE,
+ WhereToStart where_to_start = kStartAtPrototype,
WhereToEnd where_to_end = END_AT_NULL)
: object_(NULL),
handle_(receiver),
@@ -41,32 +39,34 @@ class PrototypeIterator {
is_at_end_(false),
seen_proxies_(0) {
CHECK(!handle_.is_null());
- if (where_to_start == START_AT_PROTOTYPE) Advance();
+ if (where_to_start == kStartAtPrototype) Advance();
}
PrototypeIterator(Isolate* isolate, JSReceiver* receiver,
- WhereToStart where_to_start = START_AT_PROTOTYPE,
+ WhereToStart where_to_start = kStartAtPrototype,
WhereToEnd where_to_end = END_AT_NULL)
: object_(receiver),
isolate_(isolate),
where_to_end_(where_to_end),
is_at_end_(false),
seen_proxies_(0) {
- if (where_to_start == START_AT_PROTOTYPE) Advance();
+ if (where_to_start == kStartAtPrototype) Advance();
}
explicit PrototypeIterator(Map* receiver_map)
: object_(receiver_map->prototype()),
isolate_(receiver_map->GetIsolate()),
where_to_end_(END_AT_NULL),
- is_at_end_(object_->IsNull()) {}
+ is_at_end_(object_->IsNull(isolate_)),
+ seen_proxies_(0) {}
explicit PrototypeIterator(Handle<Map> receiver_map)
: object_(NULL),
handle_(handle(receiver_map->prototype(), receiver_map->GetIsolate())),
isolate_(receiver_map->GetIsolate()),
where_to_end_(END_AT_NULL),
- is_at_end_(handle_->IsNull()) {}
+ is_at_end_(handle_->IsNull(isolate_)),
+ seen_proxies_(0) {}
~PrototypeIterator() {}
@@ -114,7 +114,7 @@ class PrototypeIterator {
Object* prototype = map->prototype();
is_at_end_ = where_to_end_ == END_AT_NON_HIDDEN
? !map->has_hidden_prototype()
- : prototype->IsNull();
+ : prototype->IsNull(isolate_);
if (handle_.is_null()) {
object_ = prototype;
@@ -153,7 +153,8 @@ class PrototypeIterator {
MaybeHandle<Object> proto =
JSProxy::GetPrototype(Handle<JSProxy>::cast(handle_));
if (!proto.ToHandle(&handle_)) return false;
- is_at_end_ = where_to_end_ == END_AT_NON_HIDDEN || handle_->IsNull();
+ is_at_end_ =
+ where_to_end_ == END_AT_NON_HIDDEN || handle_->IsNull(isolate_);
return true;
}
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
index f8dfc97c9c..351d34c576 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -9,7 +9,6 @@
#include "src/code-stubs.h"
#include "src/log.h"
#include "src/macro-assembler.h"
-#include "src/profiler/cpu-profiler.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
#include "src/unicode.h"
@@ -1201,11 +1200,6 @@ void RegExpMacroAssemblerARM::CheckStackLimit() {
}
-bool RegExpMacroAssemblerARM::CanReadUnaligned() {
- return CpuFeatures::IsSupported(UNALIGNED_ACCESSES) && !slow_safe();
-}
-
-
void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
Register offset = current_input_offset();
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
index f808538a44..6c910644b2 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
@@ -86,7 +86,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
- virtual bool CanReadUnaligned();
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index e8bdad8e14..49a81a78ec 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -9,7 +9,6 @@
#include "src/code-stubs.h"
#include "src/log.h"
#include "src/macro-assembler.h"
-#include "src/profiler/cpu-profiler.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
#include "src/unicode.h"
@@ -1364,12 +1363,6 @@ void RegExpMacroAssemblerARM64::CheckPosition(int cp_offset,
}
-bool RegExpMacroAssemblerARM64::CanReadUnaligned() {
- // TODO(pielan): See whether or not we should disable unaligned accesses.
- return !slow_safe();
-}
-
-
// Private methods:
void RegExpMacroAssemblerARM64::CallCheckStackGuardState(Register scratch) {
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
index 69624f606e..5db220e962 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
@@ -91,7 +91,6 @@ class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
- virtual bool CanReadUnaligned();
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
index 9c55af6645..6b4ea247ef 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
@@ -8,7 +8,6 @@
#include "src/log.h"
#include "src/macro-assembler.h"
-#include "src/profiler/cpu-profiler.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
#include "src/unicode.h"
diff --git a/deps/v8/src/regexp/jsregexp.cc b/deps/v8/src/regexp/jsregexp.cc
index bbbfbeb799..0fd1a76e82 100644
--- a/deps/v8/src/regexp/jsregexp.cc
+++ b/deps/v8/src/regexp/jsregexp.cc
@@ -4,10 +4,12 @@
#include "src/regexp/jsregexp.h"
-#include "src/ast/ast.h"
+#include <memory>
+
#include "src/base/platform/platform.h"
#include "src/compilation-cache.h"
#include "src/compiler.h"
+#include "src/elements.h"
#include "src/execution.h"
#include "src/factory.h"
#include "src/isolate-inl.h"
@@ -15,9 +17,9 @@
#include "src/ostreams.h"
#include "src/regexp/interpreter-irregexp.h"
#include "src/regexp/jsregexp-inl.h"
-#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-macro-assembler-irregexp.h"
#include "src/regexp/regexp-macro-assembler-tracer.h"
+#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-parser.h"
#include "src/regexp/regexp-stack.h"
#include "src/runtime/runtime.h"
@@ -191,11 +193,9 @@ MaybeHandle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
return re;
}
-
MaybeHandle<Object> RegExpImpl::Exec(Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- Handle<JSArray> last_match_info) {
+ Handle<String> subject, int index,
+ Handle<JSObject> last_match_info) {
switch (regexp->TypeTag()) {
case JSRegExp::ATOM:
return AtomExec(regexp, subject, index, last_match_info);
@@ -288,11 +288,9 @@ int RegExpImpl::AtomExecRaw(Handle<JSRegExp> regexp,
return output_size / 2;
}
-
-Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
- Handle<String> subject,
+Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re, Handle<String> subject,
int index,
- Handle<JSArray> last_match_info) {
+ Handle<JSObject> last_match_info) {
Isolate* isolate = re->GetIsolate();
static const int kNumRegisters = 2;
@@ -397,6 +395,7 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
Handle<FixedArray> data = Handle<FixedArray>(FixedArray::cast(re->data()));
data->set(JSRegExp::code_index(is_one_byte), result.code);
+ SetIrregexpCaptureNameMap(*data, compile_data.capture_name_map);
int register_max = IrregexpMaxRegisterCount(*data);
if (result.num_registers > register_max) {
SetIrregexpMaxRegisterCount(*data, result.num_registers);
@@ -416,6 +415,14 @@ void RegExpImpl::SetIrregexpMaxRegisterCount(FixedArray* re, int value) {
re->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::FromInt(value));
}
+void RegExpImpl::SetIrregexpCaptureNameMap(FixedArray* re,
+ Handle<FixedArray> value) {
+ if (value.is_null()) {
+ re->set(JSRegExp::kIrregexpCaptureNameMapIndex, Smi::FromInt(0));
+ } else {
+ re->set(JSRegExp::kIrregexpCaptureNameMapIndex, *value);
+ }
+}
int RegExpImpl::IrregexpNumberOfCaptures(FixedArray* re) {
return Smi::cast(re->get(JSRegExp::kIrregexpCaptureCountIndex))->value();
@@ -560,11 +567,10 @@ int RegExpImpl::IrregexpExecRaw(Handle<JSRegExp> regexp,
#endif // V8_INTERPRETED_REGEXP
}
-
MaybeHandle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> regexp,
Handle<String> subject,
int previous_index,
- Handle<JSArray> last_match_info) {
+ Handle<JSObject> last_match_info) {
Isolate* isolate = regexp->GetIsolate();
DCHECK_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
@@ -587,7 +593,7 @@ MaybeHandle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> regexp,
if (required_registers > Isolate::kJSRegexpStaticOffsetsVectorSize) {
output_registers = NewArray<int32_t>(required_registers);
}
- base::SmartArrayPointer<int32_t> auto_release(output_registers);
+ std::unique_ptr<int32_t[]> auto_release(output_registers);
if (output_registers == NULL) {
output_registers = isolate->jsregexp_static_offsets_vector();
}
@@ -608,18 +614,16 @@ MaybeHandle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> regexp,
return isolate->factory()->null_value();
}
-
-static void EnsureSize(Handle<JSArray> array, uint32_t minimum_size) {
+static void EnsureSize(Handle<JSObject> array, uint32_t minimum_size) {
if (static_cast<uint32_t>(array->elements()->length()) < minimum_size) {
- JSArray::SetLength(array, minimum_size);
+ array->GetElementsAccessor()->GrowCapacityAndConvert(array, minimum_size);
}
}
-
-Handle<JSArray> RegExpImpl::SetLastMatchInfo(Handle<JSArray> last_match_info,
- Handle<String> subject,
- int capture_count,
- int32_t* match) {
+Handle<JSObject> RegExpImpl::SetLastMatchInfo(Handle<JSObject> last_match_info,
+ Handle<String> subject,
+ int capture_count,
+ int32_t* match) {
DCHECK(last_match_info->HasFastObjectElements());
int capture_register_count = (capture_count + 1) * 2;
EnsureSize(last_match_info, capture_register_count + kLastMatchOverhead);
@@ -5159,8 +5163,10 @@ RegExpNode* RegExpCharacterClass::ToNode(RegExpCompiler* compiler,
ranges = negated;
}
if (ranges->length() == 0) {
- // No matches possible.
- return new (zone) EndNode(EndNode::BACKTRACK, zone);
+ ranges->Add(CharacterRange::Everything(), zone);
+ RegExpCharacterClass* fail =
+ new (zone) RegExpCharacterClass(ranges, true);
+ return new (zone) TextNode(fail, compiler->read_backward(), on_success);
}
if (standard_type() == '*') {
return UnanchoredAdvance(compiler, on_success);
@@ -6763,7 +6769,7 @@ bool RegExpEngine::TooMuchRegExpCode(Handle<String> pattern) {
Heap* heap = pattern->GetHeap();
bool too_much = pattern->length() > RegExpImpl::kRegExpTooLargeToOptimize;
if (heap->total_regexp_code_generated() > RegExpImpl::kRegExpCompiledLimit &&
- heap->isolate()->memory_allocator()->SizeExecutable() >
+ heap->memory_allocator()->SizeExecutable() >
RegExpImpl::kRegExpExecutableMemoryLimit) {
too_much = true;
}
diff --git a/deps/v8/src/regexp/jsregexp.h b/deps/v8/src/regexp/jsregexp.h
index e55d650fab..31c427ac0a 100644
--- a/deps/v8/src/regexp/jsregexp.h
+++ b/deps/v8/src/regexp/jsregexp.h
@@ -47,10 +47,8 @@ class RegExpImpl {
// See ECMA-262 section 15.10.6.2.
// This function calls the garbage collector if necessary.
MUST_USE_RESULT static MaybeHandle<Object> Exec(
- Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- Handle<JSArray> lastMatchInfo);
+ Handle<JSRegExp> regexp, Handle<String> subject, int index,
+ Handle<JSObject> lastMatchInfo);
// Prepares a JSRegExp object with Irregexp-specific data.
static void IrregexpInitialize(Handle<JSRegExp> re,
@@ -71,11 +69,9 @@ class RegExpImpl {
int32_t* output,
int output_size);
-
static Handle<Object> AtomExec(Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- Handle<JSArray> lastMatchInfo);
+ Handle<String> subject, int index,
+ Handle<JSObject> lastMatchInfo);
enum IrregexpResult { RE_FAILURE = 0, RE_SUCCESS = 1, RE_EXCEPTION = -1 };
@@ -106,17 +102,13 @@ class RegExpImpl {
// captured positions. On a failure, the result is the null value.
// Returns an empty handle in case of an exception.
MUST_USE_RESULT static MaybeHandle<Object> IrregexpExec(
- Handle<JSRegExp> regexp,
- Handle<String> subject,
- int index,
- Handle<JSArray> lastMatchInfo);
+ Handle<JSRegExp> regexp, Handle<String> subject, int index,
+ Handle<JSObject> lastMatchInfo);
// Set last match info. If match is NULL, then setting captures is omitted.
- static Handle<JSArray> SetLastMatchInfo(Handle<JSArray> last_match_info,
- Handle<String> subject,
- int capture_count,
- int32_t* match);
-
+ static Handle<JSObject> SetLastMatchInfo(Handle<JSObject> last_match_info,
+ Handle<String> subject,
+ int capture_count, int32_t* match);
class GlobalCache {
public:
@@ -196,6 +188,8 @@ class RegExpImpl {
// For acting on the JSRegExp data FixedArray.
static int IrregexpMaxRegisterCount(FixedArray* re);
static void SetIrregexpMaxRegisterCount(FixedArray* re, int value);
+ static void SetIrregexpCaptureNameMap(FixedArray* re,
+ Handle<FixedArray> value);
static int IrregexpNumberOfCaptures(FixedArray* re);
static int IrregexpNumberOfRegisters(FixedArray* re);
static ByteArray* IrregexpByteCode(FixedArray* re, bool is_one_byte);
@@ -1530,6 +1524,7 @@ struct RegExpCompileData {
RegExpNode* node;
bool simple;
bool contains_anchor;
+ Handle<FixedArray> capture_name_map;
Handle<String> error;
int capture_count;
};
diff --git a/deps/v8/src/regexp/ppc/OWNERS b/deps/v8/src/regexp/ppc/OWNERS
index eb007cb908..752e8e3d81 100644
--- a/deps/v8/src/regexp/ppc/OWNERS
+++ b/deps/v8/src/regexp/ppc/OWNERS
@@ -3,3 +3,4 @@ dstence@us.ibm.com
joransiu@ca.ibm.com
mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index 70842f5a2c..531eac10d7 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -10,7 +10,6 @@
#include "src/code-stubs.h"
#include "src/log.h"
#include "src/macro-assembler.h"
-#include "src/profiler/cpu-profiler.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
#include "src/unicode.h"
@@ -1270,11 +1269,6 @@ void RegExpMacroAssemblerPPC::CheckStackLimit() {
}
-bool RegExpMacroAssemblerPPC::CanReadUnaligned() {
- return CpuFeatures::IsSupported(UNALIGNED_ACCESSES) && !slow_safe();
-}
-
-
void RegExpMacroAssemblerPPC::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
Register offset = current_input_offset();
@@ -1288,14 +1282,47 @@ void RegExpMacroAssemblerPPC::LoadCurrentCharacterUnchecked(int cp_offset,
// We assume we don't want to do unaligned loads on PPC, so this function
// must only be used to load a single character at a time.
- DCHECK(characters == 1);
__ add(current_character(), end_of_input_address(), offset);
+#if V8_TARGET_LITTLE_ENDIAN
if (mode_ == LATIN1) {
- __ lbz(current_character(), MemOperand(current_character()));
+ if (characters == 4) {
+ __ lwz(current_character(), MemOperand(current_character()));
+ } else if (characters == 2) {
+ __ lhz(current_character(), MemOperand(current_character()));
+ } else {
+ DCHECK(characters == 1);
+ __ lbz(current_character(), MemOperand(current_character()));
+ }
} else {
DCHECK(mode_ == UC16);
- __ lhz(current_character(), MemOperand(current_character()));
+ if (characters == 2) {
+ __ lwz(current_character(), MemOperand(current_character()));
+ } else {
+ DCHECK(characters == 1);
+ __ lhz(current_character(), MemOperand(current_character()));
+ }
+ }
+#else
+ if (mode_ == LATIN1) {
+ if (characters == 4) {
+ __ lwbrx(current_character(), MemOperand(r0, current_character()));
+ } else if (characters == 2) {
+ __ lhbrx(current_character(), MemOperand(r0, current_character()));
+ } else {
+ DCHECK(characters == 1);
+ __ lbz(current_character(), MemOperand(current_character()));
+ }
+ } else {
+ DCHECK(mode_ == UC16);
+ if (characters == 2) {
+ __ lwz(current_character(), MemOperand(current_character()));
+ __ rlwinm(current_character(), current_character(), 16, 0, 31);
+ } else {
+ DCHECK(characters == 1);
+ __ lhz(current_character(), MemOperand(current_character()));
+ }
}
+#endif
}
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
index d2813872c4..9151bf7b07 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
@@ -78,7 +78,6 @@ class RegExpMacroAssemblerPPC : public NativeRegExpMacroAssembler {
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
- virtual bool CanReadUnaligned();
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
diff --git a/deps/v8/src/regexp/regexp-ast.h b/deps/v8/src/regexp/regexp-ast.h
index 0e718d3b4d..406bf84233 100644
--- a/deps/v8/src/regexp/regexp-ast.h
+++ b/deps/v8/src/regexp/regexp-ast.h
@@ -7,6 +7,7 @@
#include "src/objects.h"
#include "src/utils.h"
+#include "src/zone-containers.h"
#include "src/zone.h"
namespace v8 {
@@ -296,7 +297,10 @@ class RegExpCharacterClass final : public RegExpTree {
bool IsCharacterClass() override;
bool IsTextElement() override { return true; }
int min_match() override { return 1; }
- int max_match() override { return 1; }
+ // The character class may match two code units for unicode regexps.
+ // TODO(yangguo): we should split this class for usage in TextElement, and
+ // make max_match() dependent on the character class content.
+ int max_match() override { return 2; }
void AppendToText(RegExpText* text, Zone* zone) override;
CharacterSet character_set() { return set_; }
// TODO(lrn): Remove need for complex version if is_standard that
@@ -409,7 +413,8 @@ class RegExpQuantifier final : public RegExpTree {
class RegExpCapture final : public RegExpTree {
public:
- explicit RegExpCapture(int index) : body_(NULL), index_(index) {}
+ explicit RegExpCapture(int index)
+ : body_(NULL), index_(index), name_(nullptr) {}
void* Accept(RegExpVisitor* visitor, void* data) override;
RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
static RegExpNode* ToNode(RegExpTree* body, int index,
@@ -424,12 +429,15 @@ class RegExpCapture final : public RegExpTree {
RegExpTree* body() { return body_; }
void set_body(RegExpTree* body) { body_ = body; }
int index() { return index_; }
+ const ZoneVector<uc16>* name() const { return name_; }
+ void set_name(const ZoneVector<uc16>* name) { name_ = name; }
static int StartRegister(int index) { return index * 2; }
static int EndRegister(int index) { return index * 2 + 1; }
private:
RegExpTree* body_;
int index_;
+ const ZoneVector<uc16>* name_;
};
@@ -486,7 +494,9 @@ class RegExpLookaround final : public RegExpTree {
class RegExpBackReference final : public RegExpTree {
public:
- explicit RegExpBackReference(RegExpCapture* capture) : capture_(capture) {}
+ RegExpBackReference() : capture_(nullptr), name_(nullptr) {}
+ explicit RegExpBackReference(RegExpCapture* capture)
+ : capture_(capture), name_(nullptr) {}
void* Accept(RegExpVisitor* visitor, void* data) override;
RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
RegExpBackReference* AsBackReference() override;
@@ -497,9 +507,13 @@ class RegExpBackReference final : public RegExpTree {
int max_match() override { return kInfinity; }
int index() { return capture_->index(); }
RegExpCapture* capture() { return capture_; }
+ void set_capture(RegExpCapture* capture) { capture_ = capture; }
+ const ZoneVector<uc16>* name() const { return name_; }
+ void set_name(const ZoneVector<uc16>* name) { name_ = name; }
private:
RegExpCapture* capture_;
+ const ZoneVector<uc16>* name_;
};
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.cc b/deps/v8/src/regexp/regexp-macro-assembler.cc
index 9bb5073a8b..0a7f5c1b9e 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler.cc
@@ -100,6 +100,15 @@ void RegExpMacroAssembler::CheckNotInSurrogatePair(int cp_offset,
Bind(&ok);
}
+void RegExpMacroAssembler::CheckPosition(int cp_offset,
+ Label* on_outside_input) {
+ LoadCurrentCharacter(cp_offset, on_outside_input, true);
+}
+
+bool RegExpMacroAssembler::CheckSpecialCharacterClass(uc16 type,
+ Label* on_no_match) {
+ return false;
+}
#ifndef V8_INTERPRETED_REGEXP // Avoid unused code, e.g., on ARM.
@@ -113,7 +122,7 @@ NativeRegExpMacroAssembler::~NativeRegExpMacroAssembler() {
bool NativeRegExpMacroAssembler::CanReadUnaligned() {
- return FLAG_enable_unaligned_accesses && !slow_safe();
+ return FLAG_enable_regexp_unaligned_accesses && !slow_safe();
}
const byte* NativeRegExpMacroAssembler::StringCharacterPosition(
@@ -168,7 +177,7 @@ int NativeRegExpMacroAssembler::CheckStackGuardState(
return_value = RETRY;
} else {
Object* result = isolate->stack_guard()->HandleInterrupts();
- if (result->IsException()) return_value = EXCEPTION;
+ if (result->IsException(isolate)) return_value = EXCEPTION;
}
DisallowHeapAllocation no_gc;
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.h b/deps/v8/src/regexp/regexp-macro-assembler.h
index 2aa439eceb..76efdf910f 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler.h
@@ -113,12 +113,12 @@ class RegExpMacroAssembler {
// Checks whether the given offset from the current position is before
// the end of the string. May overwrite the current character.
- virtual void CheckPosition(int cp_offset, Label* on_outside_input) = 0;
+ virtual void CheckPosition(int cp_offset, Label* on_outside_input);
// Check whether a standard/default character class matches the current
// character. Returns false if the type of special character class does
// not have custom support.
// May clobber the current loaded character.
- virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match) = 0;
+ virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match);
virtual void Fail() = 0;
virtual Handle<HeapObject> GetCode(Handle<String> source) = 0;
virtual void GoTo(Label* label) = 0;
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index d433fc8578..dba81ae9a7 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -25,6 +25,8 @@ RegExpParser::RegExpParser(FlatStringReader* in, Handle<String>* error,
zone_(zone),
error_(error),
captures_(NULL),
+ named_captures_(NULL),
+ named_back_references_(NULL),
in_(in),
current_(kEndMarker),
ignore_case_(flags & JSRegExp::kIgnoreCase),
@@ -73,7 +75,8 @@ void RegExpParser::Advance() {
if (has_next()) {
StackLimitCheck check(isolate());
if (check.HasOverflowed()) {
- ReportError(CStrVector(Isolate::kStackOverflowMessage));
+ ReportError(CStrVector(
+ MessageTemplate::TemplateString(MessageTemplate::kStackOverflow)));
} else if (zone()->excess_allocation()) {
ReportError(CStrVector("Regular expression too large"));
} else {
@@ -130,6 +133,7 @@ bool RegExpParser::IsSyntaxCharacterOrSlash(uc32 c) {
RegExpTree* RegExpParser::ReportError(Vector<const char> message) {
+ if (failed_) return NULL; // Do not overwrite any existing error.
failed_ = true;
*error_ = isolate()->factory()->NewStringFromAscii(message).ToHandleChecked();
// Zip to the end to make sure the no more input is read.
@@ -148,6 +152,7 @@ RegExpTree* RegExpParser::ReportError(Vector<const char> message) {
// Disjunction
RegExpTree* RegExpParser::ParsePattern() {
RegExpTree* result = ParseDisjunction(CHECK_FAILED);
+ PatchNamedBackReferences(CHECK_FAILED);
DCHECK(!has_more());
// If the result of parsing is a literal string atom, and it has the
// same length as the input, then the atom is identical to the input.
@@ -171,7 +176,7 @@ RegExpTree* RegExpParser::ParsePattern() {
RegExpTree* RegExpParser::ParseDisjunction() {
// Used to store current state while parsing subexpressions.
RegExpParserState initial_state(NULL, INITIAL, RegExpLookaround::LOOKAHEAD, 0,
- ignore_case(), unicode(), zone());
+ nullptr, ignore_case(), unicode(), zone());
RegExpParserState* state = &initial_state;
// Cache the builder in a local variable for quick access.
RegExpBuilder* builder = initial_state.builder();
@@ -203,6 +208,10 @@ RegExpTree* RegExpParser::ParseDisjunction() {
// Build result of subexpression.
if (group_type == CAPTURE) {
+ if (state->IsNamedCapture()) {
+ CreateNamedCaptureAtIndex(state->capture_name(),
+ capture_index CHECK_FAILED);
+ }
RegExpCapture* capture = GetCapture(capture_index);
capture->set_body(body);
body = capture;
@@ -267,47 +276,65 @@ RegExpTree* RegExpParser::ParseDisjunction() {
case '(': {
SubexpressionType subexpr_type = CAPTURE;
RegExpLookaround::Type lookaround_type = state->lookaround_type();
+ bool is_named_capture = false;
Advance();
if (current() == '?') {
switch (Next()) {
case ':':
subexpr_type = GROUPING;
+ Advance(2);
break;
case '=':
lookaround_type = RegExpLookaround::LOOKAHEAD;
subexpr_type = POSITIVE_LOOKAROUND;
+ Advance(2);
break;
case '!':
lookaround_type = RegExpLookaround::LOOKAHEAD;
subexpr_type = NEGATIVE_LOOKAROUND;
+ Advance(2);
break;
case '<':
+ Advance();
if (FLAG_harmony_regexp_lookbehind) {
- Advance();
- lookaround_type = RegExpLookaround::LOOKBEHIND;
if (Next() == '=') {
subexpr_type = POSITIVE_LOOKAROUND;
+ lookaround_type = RegExpLookaround::LOOKBEHIND;
+ Advance(2);
break;
} else if (Next() == '!') {
subexpr_type = NEGATIVE_LOOKAROUND;
+ lookaround_type = RegExpLookaround::LOOKBEHIND;
+ Advance(2);
break;
}
}
+ if (FLAG_harmony_regexp_named_captures && unicode()) {
+ is_named_capture = true;
+ Advance();
+ break;
+ }
// Fall through.
default:
return ReportError(CStrVector("Invalid group"));
}
- Advance(2);
- } else {
+ }
+
+ const ZoneVector<uc16>* capture_name = nullptr;
+ if (subexpr_type == CAPTURE) {
if (captures_started_ >= kMaxCaptures) {
return ReportError(CStrVector("Too many captures"));
}
captures_started_++;
+
+ if (is_named_capture) {
+ capture_name = ParseCaptureGroupName(CHECK_FAILED);
+ }
}
// Store current state and begin new disjunction parsing.
state = new (zone()) RegExpParserState(
state, subexpr_type, lookaround_type, captures_started_,
- ignore_case(), unicode(), zone());
+ capture_name, ignore_case(), unicode(), zone());
builder = state->builder();
continue;
}
@@ -361,11 +388,11 @@ RegExpTree* RegExpParser::ParseDisjunction() {
if (FLAG_harmony_regexp_property) {
ZoneList<CharacterRange>* ranges =
new (zone()) ZoneList<CharacterRange>(2, zone());
- if (!ParsePropertyClass(ranges)) {
+ if (!ParsePropertyClass(ranges, p == 'P')) {
return ReportError(CStrVector("Invalid property name"));
}
RegExpCharacterClass* cc =
- new (zone()) RegExpCharacterClass(ranges, p == 'P');
+ new (zone()) RegExpCharacterClass(ranges, false);
builder->AddCharacterClass(cc);
} else {
// With /u, no identity escapes except for syntax characters
@@ -415,7 +442,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
break;
}
}
- // FALLTHROUGH
+ // Fall through.
case '0': {
Advance();
if (unicode() && Next() >= '0' && Next() <= '9') {
@@ -496,6 +523,13 @@ RegExpTree* RegExpParser::ParseDisjunction() {
}
break;
}
+ case 'k':
+ if (FLAG_harmony_regexp_named_captures && unicode()) {
+ Advance(2);
+ ParseNamedBackReference(builder, state CHECK_FAILED);
+ break;
+ }
+ // Fall through.
default:
Advance();
// With /u, no identity escapes except for syntax characters
@@ -511,17 +545,16 @@ RegExpTree* RegExpParser::ParseDisjunction() {
break;
case '{': {
int dummy;
- if (ParseIntervalQuantifier(&dummy, &dummy)) {
- return ReportError(CStrVector("Nothing to repeat"));
- }
- // fallthrough
+ bool parsed = ParseIntervalQuantifier(&dummy, &dummy CHECK_FAILED);
+ if (parsed) return ReportError(CStrVector("Nothing to repeat"));
+ // Fall through.
}
case '}':
case ']':
if (unicode()) {
return ReportError(CStrVector("Lone quantifier brackets"));
}
- // fallthrough
+ // Fall through.
default:
builder->AddUnicodeCharacter(current());
Advance();
@@ -675,6 +708,148 @@ bool RegExpParser::ParseBackReferenceIndex(int* index_out) {
return true;
}
+static void push_code_unit(ZoneVector<uc16>* v, uint32_t code_unit) {
+ if (code_unit <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
+ v->push_back(code_unit);
+ } else {
+ v->push_back(unibrow::Utf16::LeadSurrogate(code_unit));
+ v->push_back(unibrow::Utf16::TrailSurrogate(code_unit));
+ }
+}
+
+const ZoneVector<uc16>* RegExpParser::ParseCaptureGroupName() {
+ DCHECK(FLAG_harmony_regexp_named_captures);
+ DCHECK(unicode());
+
+ ZoneVector<uc16>* name =
+ new (zone()->New(sizeof(ZoneVector<uc16>))) ZoneVector<uc16>(zone());
+
+ bool at_start = true;
+ while (true) {
+ uc32 c = current();
+ Advance();
+
+ // Convert unicode escapes.
+ if (c == '\\' && current() == 'u') {
+ Advance();
+ if (!ParseUnicodeEscape(&c)) {
+ ReportError(CStrVector("Invalid Unicode escape sequence"));
+ return nullptr;
+ }
+ }
+
+ if (at_start) {
+ if (!IdentifierStart::Is(c)) {
+ ReportError(CStrVector("Invalid capture group name"));
+ return nullptr;
+ }
+ push_code_unit(name, c);
+ at_start = false;
+ } else {
+ if (c == '>') {
+ break;
+ } else if (IdentifierPart::Is(c)) {
+ push_code_unit(name, c);
+ } else {
+ ReportError(CStrVector("Invalid capture group name"));
+ return nullptr;
+ }
+ }
+ }
+
+ return name;
+}
+
+bool RegExpParser::CreateNamedCaptureAtIndex(const ZoneVector<uc16>* name,
+ int index) {
+ DCHECK(FLAG_harmony_regexp_named_captures);
+ DCHECK(unicode());
+ DCHECK(0 < index && index <= captures_started_);
+ DCHECK_NOT_NULL(name);
+
+ if (named_captures_ == nullptr) {
+ named_captures_ = new (zone()) ZoneList<RegExpCapture*>(1, zone());
+ } else {
+ // Check for duplicates and bail if we find any.
+ for (const auto& named_capture : *named_captures_) {
+ if (*named_capture->name() == *name) {
+ ReportError(CStrVector("Duplicate capture group name"));
+ return false;
+ }
+ }
+ }
+
+ RegExpCapture* capture = GetCapture(index);
+ DCHECK(capture->name() == nullptr);
+
+ capture->set_name(name);
+ named_captures_->Add(capture, zone());
+
+ return true;
+}
+
+bool RegExpParser::ParseNamedBackReference(RegExpBuilder* builder,
+ RegExpParserState* state) {
+ // The parser is assumed to be on the '<' in \k<name>.
+ if (current() != '<') {
+ ReportError(CStrVector("Invalid named reference"));
+ return false;
+ }
+
+ Advance();
+ const ZoneVector<uc16>* name = ParseCaptureGroupName();
+ if (name == nullptr) {
+ return false;
+ }
+
+ if (state->IsInsideCaptureGroup(name)) {
+ builder->AddEmpty();
+ } else {
+ RegExpBackReference* atom = new (zone()) RegExpBackReference();
+ atom->set_name(name);
+
+ builder->AddAtom(atom);
+
+ if (named_back_references_ == nullptr) {
+ named_back_references_ =
+ new (zone()) ZoneList<RegExpBackReference*>(1, zone());
+ }
+ named_back_references_->Add(atom, zone());
+ }
+
+ return true;
+}
+
+void RegExpParser::PatchNamedBackReferences() {
+ if (named_back_references_ == nullptr) return;
+
+ if (named_captures_ == nullptr) {
+ ReportError(CStrVector("Invalid named capture referenced"));
+ return;
+ }
+
+ // Look up and patch the actual capture for each named back reference.
+ // TODO(jgruber): O(n^2), optimize if necessary.
+
+ for (int i = 0; i < named_back_references_->length(); i++) {
+ RegExpBackReference* ref = named_back_references_->at(i);
+
+ int index = -1;
+ for (const auto& capture : *named_captures_) {
+ if (*capture->name() == *ref->name()) {
+ index = capture->index();
+ break;
+ }
+ }
+
+ if (index == -1) {
+ ReportError(CStrVector("Invalid named capture referenced"));
+ return;
+ }
+
+ ref->set_capture(GetCapture(index));
+ }
+}
RegExpCapture* RegExpParser::GetCapture(int index) {
// The index for the capture groups are one-based. Its index in the list is
@@ -691,6 +866,24 @@ RegExpCapture* RegExpParser::GetCapture(int index) {
return captures_->at(index - 1);
}
+Handle<FixedArray> RegExpParser::CreateCaptureNameMap() {
+ if (named_captures_ == nullptr || named_captures_->is_empty())
+ return Handle<FixedArray>();
+
+ Factory* factory = isolate()->factory();
+
+ int len = named_captures_->length() * 2;
+ Handle<FixedArray> array = factory->NewFixedArray(len);
+
+ for (int i = 0; i < named_captures_->length(); i++) {
+ RegExpCapture* capture = named_captures_->at(i);
+ MaybeHandle<String> name = factory->NewStringFromTwoByte(capture->name());
+ array->set(i * 2, *name.ToHandleChecked());
+ array->set(i * 2 + 1, Smi::FromInt(capture->index()));
+ }
+
+ return array;
+}
bool RegExpParser::RegExpParserState::IsInsideCaptureGroup(int index) {
for (RegExpParserState* s = this; s != NULL; s = s->previous_state()) {
@@ -703,6 +896,15 @@ bool RegExpParser::RegExpParserState::IsInsideCaptureGroup(int index) {
return false;
}
+bool RegExpParser::RegExpParserState::IsInsideCaptureGroup(
+ const ZoneVector<uc16>* name) {
+ DCHECK_NOT_NULL(name);
+ for (RegExpParserState* s = this; s != NULL; s = s->previous_state()) {
+ if (s->capture_name() == nullptr) continue;
+ if (*s->capture_name() == *name) return true;
+ }
+ return false;
+}
// QuantifierPrefix ::
// { DecimalDigits }
@@ -845,29 +1047,49 @@ bool RegExpParser::ParseUnicodeEscape(uc32* value) {
}
#ifdef V8_I18N_SUPPORT
-bool IsExactPropertyValueAlias(const char* property_name, UProperty property,
- int32_t property_value) {
+
+namespace {
+
+bool IsExactPropertyAlias(const char* property_name, UProperty property) {
+ const char* short_name = u_getPropertyName(property, U_SHORT_PROPERTY_NAME);
+ if (short_name != NULL && strcmp(property_name, short_name) == 0) return true;
+ for (int i = 0;; i++) {
+ const char* long_name = u_getPropertyName(
+ property, static_cast<UPropertyNameChoice>(U_LONG_PROPERTY_NAME + i));
+ if (long_name == NULL) break;
+ if (strcmp(property_name, long_name) == 0) return true;
+ }
+ return false;
+}
+
+bool IsExactPropertyValueAlias(const char* property_value_name,
+ UProperty property, int32_t property_value) {
const char* short_name =
u_getPropertyValueName(property, property_value, U_SHORT_PROPERTY_NAME);
- if (short_name != NULL && strcmp(property_name, short_name) == 0) return true;
+ if (short_name != NULL && strcmp(property_value_name, short_name) == 0) {
+ return true;
+ }
for (int i = 0;; i++) {
const char* long_name = u_getPropertyValueName(
property, property_value,
static_cast<UPropertyNameChoice>(U_LONG_PROPERTY_NAME + i));
if (long_name == NULL) break;
- if (strcmp(property_name, long_name) == 0) return true;
+ if (strcmp(property_value_name, long_name) == 0) return true;
}
return false;
}
-bool LookupPropertyClass(UProperty property, const char* property_name,
- ZoneList<CharacterRange>* result, Zone* zone) {
- int32_t property_value = u_getPropertyValueEnum(property, property_name);
+bool LookupPropertyValueName(UProperty property,
+ const char* property_value_name, bool negate,
+ ZoneList<CharacterRange>* result, Zone* zone) {
+ int32_t property_value =
+ u_getPropertyValueEnum(property, property_value_name);
if (property_value == UCHAR_INVALID_CODE) return false;
// We require the property name to match exactly to one of the property value
// aliases. However, u_getPropertyValueEnum uses loose matching.
- if (!IsExactPropertyValueAlias(property_name, property, property_value)) {
+ if (!IsExactPropertyValueAlias(property_value_name, property,
+ property_value)) {
return false;
}
@@ -878,6 +1100,7 @@ bool LookupPropertyClass(UProperty property, const char* property_name,
if (success) {
uset_removeAllStrings(set);
+ if (negate) uset_complement(set);
int item_count = uset_getItemCount(set);
int item_result = 0;
for (int i = 0; i < item_count; i++) {
@@ -892,49 +1115,104 @@ bool LookupPropertyClass(UProperty property, const char* property_name,
uset_close(set);
return success;
}
-#endif // V8_I18N_SUPPORT
-bool RegExpParser::ParsePropertyClass(ZoneList<CharacterRange>* result) {
-#ifdef V8_I18N_SUPPORT
- List<char> property_name_list;
+template <size_t N>
+inline bool NameEquals(const char* name, const char (&literal)[N]) {
+ return strncmp(name, literal, N + 1) == 0;
+}
+
+bool LookupSpecialPropertyValueName(const char* name,
+ ZoneList<CharacterRange>* result,
+ bool negate, Zone* zone) {
+ if (NameEquals(name, "Any")) {
+ if (!negate) result->Add(CharacterRange::Everything(), zone);
+ } else if (NameEquals(name, "ASCII")) {
+ result->Add(negate ? CharacterRange::Range(0x80, String::kMaxCodePoint)
+ : CharacterRange::Range(0x0, 0x7f),
+ zone);
+ } else if (NameEquals(name, "Assigned")) {
+ return LookupPropertyValueName(UCHAR_GENERAL_CATEGORY, "Unassigned",
+ !negate, result, zone);
+ } else {
+ return false;
+ }
+ return true;
+}
+
+} // anonymous namespace
+
+bool RegExpParser::ParsePropertyClass(ZoneList<CharacterRange>* result,
+ bool negate) {
+ // Parse the property class as follows:
+ // - In \p{name}, 'name' is interpreted
+ // - either as a general category property value name.
+ // - or as a binary property name.
+ // - In \p{name=value}, 'name' is interpreted as an enumerated property name,
+ // and 'value' is interpreted as one of the available property value names.
+ // - Aliases in PropertyAlias.txt and PropertyValueAlias.txt can be used.
+ // - Loose matching is not applied.
+ List<char> first_part;
+ List<char> second_part;
if (current() == '{') {
- for (Advance(); current() != '}'; Advance()) {
+ // Parse \p{[PropertyName=]PropertyNameValue}
+ for (Advance(); current() != '}' && current() != '='; Advance()) {
if (!has_next()) return false;
- property_name_list.Add(static_cast<char>(current()));
+ first_part.Add(static_cast<char>(current()));
+ }
+ if (current() == '=') {
+ for (Advance(); current() != '}'; Advance()) {
+ if (!has_next()) return false;
+ second_part.Add(static_cast<char>(current()));
+ }
+ second_part.Add(0); // null-terminate string.
}
- } else if (current() != kEndMarker) {
- property_name_list.Add(static_cast<char>(current()));
} else {
return false;
}
Advance();
- property_name_list.Add(0); // null-terminate string.
-
- const char* property_name = property_name_list.ToConstVector().start();
-
-#define PROPERTY_NAME_LOOKUP(PROPERTY) \
- do { \
- if (LookupPropertyClass(PROPERTY, property_name, result, zone())) { \
- return true; \
- } \
- } while (false)
-
- // General_Category (gc) found in PropertyValueAliases.txt
- PROPERTY_NAME_LOOKUP(UCHAR_GENERAL_CATEGORY_MASK);
- // Script (sc) found in Scripts.txt
- PROPERTY_NAME_LOOKUP(UCHAR_SCRIPT);
- // To disambiguate from script names, block names have an "In"-prefix.
- if (property_name_list.length() > 3 && property_name[0] == 'I' &&
- property_name[1] == 'n') {
- // Block (blk) found in Blocks.txt
- property_name += 2;
- PROPERTY_NAME_LOOKUP(UCHAR_BLOCK);
+ first_part.Add(0); // null-terminate string.
+
+ if (second_part.is_empty()) {
+ // First attempt to interpret as general category property value name.
+ const char* name = first_part.ToConstVector().start();
+ if (LookupPropertyValueName(UCHAR_GENERAL_CATEGORY_MASK, name, negate,
+ result, zone())) {
+ return true;
+ }
+ // Interpret "Any", "ASCII", and "Assigned".
+ if (LookupSpecialPropertyValueName(name, result, negate, zone())) {
+ return true;
+ }
+ // Then attempt to interpret as binary property name with value name 'Y'.
+ UProperty property = u_getPropertyEnum(name);
+ if (property < UCHAR_BINARY_START) return false;
+ if (property >= UCHAR_BINARY_LIMIT) return false;
+ if (!IsExactPropertyAlias(name, property)) return false;
+ return LookupPropertyValueName(property, negate ? "N" : "Y", false, result,
+ zone());
+ } else {
+ // Both property name and value name are specified. Attempt to interpret
+ // the property name as enumerated property.
+ const char* property_name = first_part.ToConstVector().start();
+ const char* value_name = second_part.ToConstVector().start();
+ UProperty property = u_getPropertyEnum(property_name);
+ if (property < UCHAR_INT_START) return false;
+ if (property >= UCHAR_INT_LIMIT) return false;
+ if (!IsExactPropertyAlias(property_name, property)) return false;
+ return LookupPropertyValueName(property, value_name, negate, result,
+ zone());
}
-#undef PROPERTY_NAME_LOOKUP
-#endif // V8_I18N_SUPPORT
+}
+
+#else // V8_I18N_SUPPORT
+
+bool RegExpParser::ParsePropertyClass(ZoneList<CharacterRange>* result,
+ bool negate) {
return false;
}
+#endif // V8_I18N_SUPPORT
+
bool RegExpParser::ParseUnlimitedLengthHexNumber(int max_value, uc32* value) {
uc32 x = 0;
int d = HexValue(current());
@@ -1096,7 +1374,6 @@ CharacterRange RegExpParser::ParseClassAtom(uc16* char_class) {
return CharacterRange::Singleton(first);
}
-
static const uc16 kNoCharClass = 0;
// Adds range or pre-defined character class to character ranges.
@@ -1120,19 +1397,10 @@ bool RegExpParser::ParseClassProperty(ZoneList<CharacterRange>* ranges) {
bool parse_success = false;
if (next == 'p') {
Advance(2);
- parse_success = ParsePropertyClass(ranges);
+ parse_success = ParsePropertyClass(ranges, false);
} else if (next == 'P') {
Advance(2);
- ZoneList<CharacterRange>* property_class =
- new (zone()) ZoneList<CharacterRange>(2, zone());
- parse_success = ParsePropertyClass(property_class);
- if (parse_success) {
- ZoneList<CharacterRange>* negated =
- new (zone()) ZoneList<CharacterRange>(2, zone());
- CharacterRange::Negate(property_class, negated, zone());
- const Vector<CharacterRange> negated_vector = negated->ToVector();
- ranges->AddAll(negated_vector, zone());
- }
+ parse_success = ParsePropertyClass(ranges, true);
} else {
return false;
}
@@ -1229,6 +1497,7 @@ bool RegExpParser::ParseRegExp(Isolate* isolate, Zone* zone,
int capture_count = parser.captures_started();
result->simple = tree->IsAtom() && parser.simple() && capture_count == 0;
result->contains_anchor = parser.contains_anchor();
+ result->capture_name_map = parser.CreateCaptureNameMap();
result->capture_count = capture_count;
}
return !parser.failed();
diff --git a/deps/v8/src/regexp/regexp-parser.h b/deps/v8/src/regexp/regexp-parser.h
index 6142a9ea53..a0b975d79e 100644
--- a/deps/v8/src/regexp/regexp-parser.h
+++ b/deps/v8/src/regexp/regexp-parser.h
@@ -174,7 +174,7 @@ class RegExpParser BASE_EMBEDDED {
bool ParseHexEscape(int length, uc32* value);
bool ParseUnicodeEscape(uc32* value);
bool ParseUnlimitedLengthHexNumber(int max_value, uc32* value);
- bool ParsePropertyClass(ZoneList<CharacterRange>* result);
+ bool ParsePropertyClass(ZoneList<CharacterRange>* result, bool negate);
uc32 ParseOctalLiteral();
@@ -222,13 +222,15 @@ class RegExpParser BASE_EMBEDDED {
RegExpParserState(RegExpParserState* previous_state,
SubexpressionType group_type,
RegExpLookaround::Type lookaround_type,
- int disjunction_capture_index, bool ignore_case,
+ int disjunction_capture_index,
+ const ZoneVector<uc16>* capture_name, bool ignore_case,
bool unicode, Zone* zone)
: previous_state_(previous_state),
builder_(new (zone) RegExpBuilder(zone, ignore_case, unicode)),
group_type_(group_type),
lookaround_type_(lookaround_type),
- disjunction_capture_index_(disjunction_capture_index) {}
+ disjunction_capture_index_(disjunction_capture_index),
+ capture_name_(capture_name) {}
// Parser state of containing expression, if any.
RegExpParserState* previous_state() { return previous_state_; }
bool IsSubexpression() { return previous_state_ != NULL; }
@@ -242,9 +244,16 @@ class RegExpParser BASE_EMBEDDED {
// Also the capture index of this sub-expression itself, if group_type
// is CAPTURE.
int capture_index() { return disjunction_capture_index_; }
+ // The name of the current sub-expression, if group_type is CAPTURE. Only
+ // used for named captures.
+ const ZoneVector<uc16>* capture_name() { return capture_name_; }
+
+ bool IsNamedCapture() const { return capture_name_ != nullptr; }
// Check whether the parser is inside a capture group with the given index.
bool IsInsideCaptureGroup(int index);
+ // Check whether the parser is inside a capture group with the given name.
+ bool IsInsideCaptureGroup(const ZoneVector<uc16>* name);
private:
// Linked list implementation of stack of states.
@@ -257,11 +266,32 @@ class RegExpParser BASE_EMBEDDED {
RegExpLookaround::Type lookaround_type_;
// Stored disjunction's capture index (if any).
int disjunction_capture_index_;
+ // Stored capture name (if any).
+ const ZoneVector<uc16>* capture_name_;
};
// Return the 1-indexed RegExpCapture object, allocate if necessary.
RegExpCapture* GetCapture(int index);
+ // Creates a new named capture at the specified index. Must be called exactly
+ // once for each named capture. Fails if a capture with the same name is
+ // encountered.
+ bool CreateNamedCaptureAtIndex(const ZoneVector<uc16>* name, int index);
+
+ // Parses the name of a capture group (?<name>pattern). The name must adhere
+ // to IdentifierName in the ECMAScript standard.
+ const ZoneVector<uc16>* ParseCaptureGroupName();
+
+ bool ParseNamedBackReference(RegExpBuilder* builder,
+ RegExpParserState* state);
+
+ // After the initial parsing pass, patch corresponding RegExpCapture objects
+ // into all RegExpBackReferences. This is done after initial parsing in order
+ // to avoid complicating cases in which references comes before the capture.
+ void PatchNamedBackReferences();
+
+ Handle<FixedArray> CreateCaptureNameMap();
+
Isolate* isolate() { return isolate_; }
Zone* zone() const { return zone_; }
@@ -278,6 +308,8 @@ class RegExpParser BASE_EMBEDDED {
Zone* zone_;
Handle<String>* error_;
ZoneList<RegExpCapture*>* captures_;
+ ZoneList<RegExpCapture*>* named_captures_;
+ ZoneList<RegExpBackReference*>* named_back_references_;
FlatStringReader* in_;
uc32 current_;
bool ignore_case_;
diff --git a/deps/v8/src/regexp/s390/OWNERS b/deps/v8/src/regexp/s390/OWNERS
index eb007cb908..752e8e3d81 100644
--- a/deps/v8/src/regexp/s390/OWNERS
+++ b/deps/v8/src/regexp/s390/OWNERS
@@ -3,3 +3,4 @@ dstence@us.ibm.com
joransiu@ca.ibm.com
mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
index 9dac534636..e5fd90f53f 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
@@ -10,7 +10,6 @@
#include "src/code-stubs.h"
#include "src/log.h"
#include "src/macro-assembler.h"
-#include "src/profiler/cpu-profiler.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
#include "src/regexp/s390/regexp-macro-assembler-s390.h"
@@ -1227,23 +1226,54 @@ void RegExpMacroAssemblerS390::CallCFunctionUsingStub(
__ mov(code_pointer(), Operand(masm_->CodeObject()));
}
-bool RegExpMacroAssemblerS390::CanReadUnaligned() {
- return CpuFeatures::IsSupported(UNALIGNED_ACCESSES) && !slow_safe();
-}
void RegExpMacroAssemblerS390::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
- DCHECK(characters == 1);
if (mode_ == LATIN1) {
- __ LoadlB(current_character(),
- MemOperand(current_input_offset(), end_of_input_address(),
- cp_offset * char_size()));
+ // using load reverse for big-endian platforms
+ if (characters == 4) {
+#if V8_TARGET_LITTLE_ENDIAN
+ __ LoadlW(current_character(),
+ MemOperand(current_input_offset(), end_of_input_address(),
+ cp_offset * char_size()));
+#else
+ __ LoadLogicalReversedWordP(current_character(),
+ MemOperand(current_input_offset(), end_of_input_address(),
+ cp_offset * char_size()));
+#endif
+ } else if (characters == 2) {
+#if V8_TARGET_LITTLE_ENDIAN
+ __ LoadLogicalHalfWordP(current_character(),
+ MemOperand(current_input_offset(), end_of_input_address(),
+ cp_offset * char_size()));
+#else
+ __ LoadLogicalReversedHalfWordP(current_character(),
+ MemOperand(current_input_offset(), end_of_input_address(),
+ cp_offset * char_size()));
+#endif
+ } else {
+ DCHECK(characters == 1);
+ __ LoadlB(current_character(),
+ MemOperand(current_input_offset(), end_of_input_address(),
+ cp_offset * char_size()));
+ }
} else {
DCHECK(mode_ == UC16);
- __ LoadLogicalHalfWordP(
- current_character(),
- MemOperand(current_input_offset(), end_of_input_address(),
- cp_offset * char_size()));
+ if (characters == 2) {
+ __ LoadlW(current_character(),
+ MemOperand(current_input_offset(), end_of_input_address(),
+ cp_offset * char_size()));
+#if !V8_TARGET_LITTLE_ENDIAN
+ // need to swap the order of the characters for big-endian platforms
+ __ rll(current_character(), current_character(), Operand(16));
+#endif
+ } else {
+ DCHECK(characters == 1);
+ __ LoadLogicalHalfWordP(
+ current_character(),
+ MemOperand(current_input_offset(), end_of_input_address(),
+ cp_offset * char_size()));
+ }
}
}
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
index 60ca890f12..755bc89066 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
@@ -77,7 +77,6 @@ class RegExpMacroAssemblerS390 : public NativeRegExpMacroAssembler {
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
- virtual bool CanReadUnaligned();
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
index 5d73b436f8..aafc840680 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -8,7 +8,6 @@
#include "src/log.h"
#include "src/macro-assembler.h"
-#include "src/profiler/cpu-profiler.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
#include "src/unicode.h"
diff --git a/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc b/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc
index 9f15b1c952..4a1c3a889a 100644
--- a/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc
+++ b/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc
@@ -8,7 +8,6 @@
#include "src/log.h"
#include "src/macro-assembler.h"
-#include "src/profiler/cpu-profiler.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
#include "src/unicode.h"
diff --git a/deps/v8/src/register-configuration.cc b/deps/v8/src/register-configuration.cc
index 2df825afc0..1a198ea42c 100644
--- a/deps/v8/src/register-configuration.cc
+++ b/deps/v8/src/register-configuration.cc
@@ -33,131 +33,243 @@ static const char* const kGeneralRegisterNames[] = {
#undef REGISTER_NAME
};
+static const char* const kFloatRegisterNames[] = {
+#define REGISTER_NAME(R) #R,
+ FLOAT_REGISTERS(REGISTER_NAME)
+#undef REGISTER_NAME
+};
+
static const char* const kDoubleRegisterNames[] = {
#define REGISTER_NAME(R) #R,
DOUBLE_REGISTERS(REGISTER_NAME)
#undef REGISTER_NAME
};
+static const char* const kSimd128RegisterNames[] = {
+#define REGISTER_NAME(R) #R,
+ SIMD128_REGISTERS(REGISTER_NAME)
+#undef REGISTER_NAME
+};
+
STATIC_ASSERT(RegisterConfiguration::kMaxGeneralRegisters >=
Register::kNumRegisters);
-STATIC_ASSERT(RegisterConfiguration::kMaxDoubleRegisters >=
+STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
+ FloatRegister::kMaxNumRegisters);
+STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
DoubleRegister::kMaxNumRegisters);
+STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
+ Simd128Register::kMaxNumRegisters);
+
+enum CompilerSelector { CRANKSHAFT, TURBOFAN };
class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
public:
explicit ArchDefaultRegisterConfiguration(CompilerSelector compiler)
- : RegisterConfiguration(Register::kNumRegisters,
- DoubleRegister::kMaxNumRegisters,
+ : RegisterConfiguration(
+ Register::kNumRegisters, DoubleRegister::kMaxNumRegisters,
#if V8_TARGET_ARCH_IA32
- kMaxAllocatableGeneralRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableGeneralRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_X87
- kMaxAllocatableGeneralRegisterCount,
- compiler == TURBOFAN
- ? 1
- : kMaxAllocatableDoubleRegisterCount,
- compiler == TURBOFAN
- ? 1
- : kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableGeneralRegisterCount,
+ compiler == TURBOFAN ? 1 : kMaxAllocatableDoubleRegisterCount,
+ compiler == TURBOFAN ? 1 : kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_X64
- kMaxAllocatableGeneralRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableGeneralRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_ARM
- FLAG_enable_embedded_constant_pool
- ? (kMaxAllocatableGeneralRegisterCount - 1)
- : kMaxAllocatableGeneralRegisterCount,
- CpuFeatures::IsSupported(VFP32DREGS)
- ? kMaxAllocatableDoubleRegisterCount
- : (ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(
- REGISTER_COUNT)0),
- ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(
- REGISTER_COUNT)0,
+ FLAG_enable_embedded_constant_pool
+ ? (kMaxAllocatableGeneralRegisterCount - 1)
+ : kMaxAllocatableGeneralRegisterCount,
+ CpuFeatures::IsSupported(VFP32DREGS)
+ ? kMaxAllocatableDoubleRegisterCount
+ : (ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(REGISTER_COUNT) 0),
+ ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(REGISTER_COUNT) 0,
#elif V8_TARGET_ARCH_ARM64
- kMaxAllocatableGeneralRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableGeneralRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_MIPS
- kMaxAllocatableGeneralRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableGeneralRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_MIPS64
- kMaxAllocatableGeneralRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableGeneralRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_PPC
- kMaxAllocatableGeneralRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableGeneralRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_S390
- kMaxAllocatableGeneralRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableGeneralRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
+ kMaxAllocatableDoubleRegisterCount,
#else
#error Unsupported target architecture.
#endif
- kAllocatableGeneralCodes, kAllocatableDoubleCodes,
- kGeneralRegisterNames, kDoubleRegisterNames) {
+ kAllocatableGeneralCodes, kAllocatableDoubleCodes,
+ kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE,
+ kGeneralRegisterNames, kFloatRegisterNames, kDoubleRegisterNames,
+ kSimd128RegisterNames) {
}
};
-
-template <RegisterConfiguration::CompilerSelector compiler>
+template <CompilerSelector compiler>
struct RegisterConfigurationInitializer {
static void Construct(ArchDefaultRegisterConfiguration* config) {
new (config) ArchDefaultRegisterConfiguration(compiler);
}
};
-static base::LazyInstance<
- ArchDefaultRegisterConfiguration,
- RegisterConfigurationInitializer<RegisterConfiguration::CRANKSHAFT>>::type
+static base::LazyInstance<ArchDefaultRegisterConfiguration,
+ RegisterConfigurationInitializer<CRANKSHAFT>>::type
kDefaultRegisterConfigurationForCrankshaft = LAZY_INSTANCE_INITIALIZER;
-
-static base::LazyInstance<
- ArchDefaultRegisterConfiguration,
- RegisterConfigurationInitializer<RegisterConfiguration::TURBOFAN>>::type
+static base::LazyInstance<ArchDefaultRegisterConfiguration,
+ RegisterConfigurationInitializer<TURBOFAN>>::type
kDefaultRegisterConfigurationForTurboFan = LAZY_INSTANCE_INITIALIZER;
} // namespace
-
-const RegisterConfiguration* RegisterConfiguration::ArchDefault(
- CompilerSelector compiler) {
- return compiler == TURBOFAN
- ? &kDefaultRegisterConfigurationForTurboFan.Get()
- : &kDefaultRegisterConfigurationForCrankshaft.Get();
+const RegisterConfiguration* RegisterConfiguration::Crankshaft() {
+ return &kDefaultRegisterConfigurationForCrankshaft.Get();
}
+const RegisterConfiguration* RegisterConfiguration::Turbofan() {
+ return &kDefaultRegisterConfigurationForTurboFan.Get();
+}
RegisterConfiguration::RegisterConfiguration(
int num_general_registers, int num_double_registers,
int num_allocatable_general_registers, int num_allocatable_double_registers,
int num_allocatable_aliased_double_registers,
const int* allocatable_general_codes, const int* allocatable_double_codes,
- const char* const* general_register_names,
- const char* const* double_register_names)
+ AliasingKind fp_aliasing_kind, const char* const* general_register_names,
+ const char* const* float_register_names,
+ const char* const* double_register_names,
+ const char* const* simd128_register_names)
: num_general_registers_(num_general_registers),
+ num_float_registers_(0),
num_double_registers_(num_double_registers),
+ num_simd128_registers_(0),
num_allocatable_general_registers_(num_allocatable_general_registers),
+ num_allocatable_float_registers_(0),
num_allocatable_double_registers_(num_allocatable_double_registers),
num_allocatable_aliased_double_registers_(
num_allocatable_aliased_double_registers),
+ num_allocatable_simd128_registers_(0),
allocatable_general_codes_mask_(0),
+ allocatable_float_codes_mask_(0),
allocatable_double_codes_mask_(0),
+ allocatable_simd128_codes_mask_(0),
allocatable_general_codes_(allocatable_general_codes),
allocatable_double_codes_(allocatable_double_codes),
+ fp_aliasing_kind_(fp_aliasing_kind),
general_register_names_(general_register_names),
- double_register_names_(double_register_names) {
+ float_register_names_(float_register_names),
+ double_register_names_(double_register_names),
+ simd128_register_names_(simd128_register_names) {
+ DCHECK(num_general_registers_ <= RegisterConfiguration::kMaxGeneralRegisters);
+ DCHECK(num_double_registers_ <= RegisterConfiguration::kMaxFPRegisters);
for (int i = 0; i < num_allocatable_general_registers_; ++i) {
allocatable_general_codes_mask_ |= (1 << allocatable_general_codes_[i]);
}
for (int i = 0; i < num_allocatable_double_registers_; ++i) {
allocatable_double_codes_mask_ |= (1 << allocatable_double_codes_[i]);
}
+
+ if (fp_aliasing_kind_ == COMBINE) {
+ num_float_registers_ = num_double_registers_ * 2 <= kMaxFPRegisters
+ ? num_double_registers_ * 2
+ : kMaxFPRegisters;
+ num_allocatable_float_registers_ = 0;
+ for (int i = 0; i < num_allocatable_double_registers_; i++) {
+ int base_code = allocatable_double_codes_[i] * 2;
+ if (base_code >= kMaxFPRegisters) continue;
+ allocatable_float_codes_[num_allocatable_float_registers_++] = base_code;
+ allocatable_float_codes_[num_allocatable_float_registers_++] =
+ base_code + 1;
+ allocatable_float_codes_mask_ |= (0x3 << base_code);
+ }
+ num_simd128_registers_ = num_double_registers_ / 2;
+ num_allocatable_simd128_registers_ = 0;
+ int last_simd128_code = allocatable_double_codes_[0] / 2;
+ for (int i = 1; i < num_allocatable_double_registers_; i++) {
+ int next_simd128_code = allocatable_double_codes_[i] / 2;
+ // This scheme assumes allocatable_double_codes_ are strictly increasing.
+ DCHECK_GE(next_simd128_code, last_simd128_code);
+ if (last_simd128_code == next_simd128_code) {
+ allocatable_simd128_codes_[num_allocatable_simd128_registers_++] =
+ next_simd128_code;
+ allocatable_simd128_codes_mask_ |= (0x1 << next_simd128_code);
+ }
+ last_simd128_code = next_simd128_code;
+ }
+ } else {
+ DCHECK(fp_aliasing_kind_ == OVERLAP);
+ num_float_registers_ = num_simd128_registers_ = num_double_registers_;
+ num_allocatable_float_registers_ = num_allocatable_simd128_registers_ =
+ num_allocatable_double_registers_;
+ for (int i = 0; i < num_allocatable_float_registers_; ++i) {
+ allocatable_float_codes_[i] = allocatable_simd128_codes_[i] =
+ allocatable_double_codes_[i];
+ }
+ allocatable_float_codes_mask_ = allocatable_simd128_codes_mask_ =
+ allocatable_double_codes_mask_;
+ }
+}
+
+// Assert that kFloat32, kFloat64, and kSimd128 are consecutive values.
+STATIC_ASSERT(static_cast<int>(MachineRepresentation::kSimd128) ==
+ static_cast<int>(MachineRepresentation::kFloat64) + 1);
+STATIC_ASSERT(static_cast<int>(MachineRepresentation::kFloat64) ==
+ static_cast<int>(MachineRepresentation::kFloat32) + 1);
+
+int RegisterConfiguration::GetAliases(MachineRepresentation rep, int index,
+ MachineRepresentation other_rep,
+ int* alias_base_index) const {
+ DCHECK(fp_aliasing_kind_ == COMBINE);
+ DCHECK(IsFloatingPoint(rep) && IsFloatingPoint(other_rep));
+ if (rep == other_rep) {
+ *alias_base_index = index;
+ return 1;
+ }
+ int rep_int = static_cast<int>(rep);
+ int other_rep_int = static_cast<int>(other_rep);
+ if (rep_int > other_rep_int) {
+ int shift = rep_int - other_rep_int;
+ int base_index = index << shift;
+ if (base_index >= kMaxFPRegisters) {
+ // Alias indices would be out of FP register range.
+ return 0;
+ }
+ *alias_base_index = base_index;
+ return 1 << shift;
+ }
+ int shift = other_rep_int - rep_int;
+ *alias_base_index = index >> shift;
+ return 1;
+}
+
+bool RegisterConfiguration::AreAliases(MachineRepresentation rep, int index,
+ MachineRepresentation other_rep,
+ int other_index) const {
+ DCHECK(fp_aliasing_kind_ == COMBINE);
+ DCHECK(IsFloatingPoint(rep) && IsFloatingPoint(other_rep));
+ if (rep == other_rep) {
+ return index == other_index;
+ }
+ int rep_int = static_cast<int>(rep);
+ int other_rep_int = static_cast<int>(other_rep);
+ if (rep_int > other_rep_int) {
+ int shift = rep_int - other_rep_int;
+ return index == other_index >> shift;
+ }
+ int shift = other_rep_int - rep_int;
+ return index >> shift == other_index;
}
#undef REGISTER_COUNT
diff --git a/deps/v8/src/register-configuration.h b/deps/v8/src/register-configuration.h
index 8ad1d78304..2cb7c91eec 100644
--- a/deps/v8/src/register-configuration.h
+++ b/deps/v8/src/register-configuration.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_REGISTER_CONFIGURATION_H_
#include "src/base/macros.h"
+#include "src/machine-type.h"
namespace v8 {
namespace internal {
@@ -14,18 +15,22 @@ namespace internal {
// for instruction creation.
class RegisterConfiguration {
public:
- // Define the optimized compiler selector for register configuration
- // selection.
- //
- // TODO(X87): This distinction in RegisterConfigurations is temporary
- // until x87 TF supports all of the registers that Crankshaft does.
- enum CompilerSelector { CRANKSHAFT, TURBOFAN };
+ enum AliasingKind {
+ // Registers alias a single register of every other size (e.g. Intel).
+ OVERLAP,
+ // Registers alias two registers of the next smaller size (e.g. ARM).
+ COMBINE
+ };
// Architecture independent maxes.
static const int kMaxGeneralRegisters = 32;
- static const int kMaxDoubleRegisters = 32;
+ static const int kMaxFPRegisters = 32;
- static const RegisterConfiguration* ArchDefault(CompilerSelector compiler);
+ // Default RegisterConfigurations for the target architecture.
+ // TODO(X87): This distinction in RegisterConfigurations is temporary
+ // until x87 TF supports all of the registers that Crankshaft does.
+ static const RegisterConfiguration* Crankshaft();
+ static const RegisterConfiguration* Turbofan();
RegisterConfiguration(int num_general_registers, int num_double_registers,
int num_allocatable_general_registers,
@@ -33,23 +38,35 @@ class RegisterConfiguration {
int num_allocatable_aliased_double_registers,
const int* allocatable_general_codes,
const int* allocatable_double_codes,
+ AliasingKind fp_aliasing_kind,
char const* const* general_names,
- char const* const* double_names);
+ char const* const* float_names,
+ char const* const* double_names,
+ char const* const* simd128_names);
int num_general_registers() const { return num_general_registers_; }
+ int num_float_registers() const { return num_float_registers_; }
int num_double_registers() const { return num_double_registers_; }
+ int num_simd128_registers() const { return num_simd128_registers_; }
int num_allocatable_general_registers() const {
return num_allocatable_general_registers_;
}
+ int num_allocatable_float_registers() const {
+ return num_allocatable_float_registers_;
+ }
int num_allocatable_double_registers() const {
return num_allocatable_double_registers_;
}
- // TODO(turbofan): This is a temporary work-around required because our
+ // TODO(bbudge): This is a temporary work-around required because our
// register allocator does not yet support the aliasing of single/double
// registers on ARM.
int num_allocatable_aliased_double_registers() const {
return num_allocatable_aliased_double_registers_;
}
+ int num_allocatable_simd128_registers() const {
+ return num_allocatable_simd128_registers_;
+ }
+ AliasingKind fp_aliasing_kind() const { return fp_aliasing_kind_; }
int32_t allocatable_general_codes_mask() const {
return allocatable_general_codes_mask_;
}
@@ -59,34 +76,87 @@ class RegisterConfiguration {
int GetAllocatableGeneralCode(int index) const {
return allocatable_general_codes_[index];
}
+ bool IsAllocatableGeneralCode(int index) const {
+ return ((1 << index) & allocatable_general_codes_mask_) != 0;
+ }
+ int GetAllocatableFloatCode(int index) const {
+ return allocatable_float_codes_[index];
+ }
+ bool IsAllocatableFloatCode(int index) const {
+ return ((1 << index) & allocatable_float_codes_mask_) != 0;
+ }
int GetAllocatableDoubleCode(int index) const {
return allocatable_double_codes_[index];
}
+ bool IsAllocatableDoubleCode(int index) const {
+ return ((1 << index) & allocatable_double_codes_mask_) != 0;
+ }
+ int GetAllocatableSimd128Code(int index) const {
+ return allocatable_simd128_codes_[index];
+ }
+ bool IsAllocatableSimd128Code(int index) const {
+ return ((1 << index) & allocatable_simd128_codes_mask_) != 0;
+ }
const char* GetGeneralRegisterName(int code) const {
return general_register_names_[code];
}
+ const char* GetFloatRegisterName(int code) const {
+ return float_register_names_[code];
+ }
const char* GetDoubleRegisterName(int code) const {
return double_register_names_[code];
}
+ const char* GetSimd128RegisterName(int code) const {
+ return simd128_register_names_[code];
+ }
const int* allocatable_general_codes() const {
return allocatable_general_codes_;
}
+ const int* allocatable_float_codes() const {
+ return allocatable_float_codes_;
+ }
const int* allocatable_double_codes() const {
return allocatable_double_codes_;
}
+ const int* allocatable_simd128_codes() const {
+ return allocatable_simd128_codes_;
+ }
+
+ // Aliasing calculations for floating point registers, when fp_aliasing_kind()
+ // is COMBINE. Currently only implemented for kFloat32, kFloat64, or kSimd128
+ // reps. Returns the number of aliases, and if > 0, alias_base_index is set to
+ // the index of the first alias.
+ int GetAliases(MachineRepresentation rep, int index,
+ MachineRepresentation other_rep, int* alias_base_index) const;
+ // Returns a value indicating whether two registers alias each other, when
+ // fp_aliasing_kind() is COMBINE. Currently implemented for kFloat32,
+ // kFloat64, or kSimd128 reps.
+ bool AreAliases(MachineRepresentation rep, int index,
+ MachineRepresentation other_rep, int other_index) const;
private:
const int num_general_registers_;
+ int num_float_registers_;
const int num_double_registers_;
+ int num_simd128_registers_;
int num_allocatable_general_registers_;
+ int num_allocatable_float_registers_;
int num_allocatable_double_registers_;
int num_allocatable_aliased_double_registers_;
+ int num_allocatable_simd128_registers_;
int32_t allocatable_general_codes_mask_;
+ int32_t allocatable_float_codes_mask_;
int32_t allocatable_double_codes_mask_;
+ int32_t allocatable_simd128_codes_mask_;
const int* allocatable_general_codes_;
+ int allocatable_float_codes_[kMaxFPRegisters];
const int* allocatable_double_codes_;
+ int allocatable_simd128_codes_[kMaxFPRegisters];
+ AliasingKind fp_aliasing_kind_;
char const* const* general_register_names_;
+ char const* const* float_register_names_;
char const* const* double_register_names_;
+ char const* const* simd128_register_names_;
};
} // namespace internal
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index b76785deeb..fb05690b91 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -5,7 +5,6 @@
#include "src/runtime-profiler.h"
#include "src/assembler.h"
-#include "src/ast/scopeinfo.h"
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
@@ -14,12 +13,16 @@
#include "src/frames-inl.h"
#include "src/full-codegen/full-codegen.h"
#include "src/global-handles.h"
+#include "src/interpreter/interpreter.h"
namespace v8 {
namespace internal {
// Number of times a function has to be seen on the stack before it is
+// compiled for baseline.
+static const int kProfilerTicksBeforeBaseline = 1;
+// Number of times a function has to be seen on the stack before it is
// optimized.
static const int kProfilerTicksBeforeOptimization = 2;
// If the function optimization was disabled due to high deoptimization count,
@@ -38,9 +41,13 @@ STATIC_ASSERT(kTicksWhenNotEnoughTypeInfo < 256);
// Maximum size in bytes of generate code for a function to allow OSR.
static const int kOSRCodeSizeAllowanceBase =
100 * FullCodeGenerator::kCodeSizeMultiplier;
+static const int kOSRCodeSizeAllowanceBaseIgnition =
+ 100 * interpreter::Interpreter::kCodeSizeMultiplier;
static const int kOSRCodeSizeAllowancePerTick =
4 * FullCodeGenerator::kCodeSizeMultiplier;
+static const int kOSRCodeSizeAllowancePerTickIgnition =
+ 4 * interpreter::Interpreter::kCodeSizeMultiplier;
// Maximum size in bytes of generated code for a function to be optimized
// the very first time it is seen on the stack.
@@ -53,16 +60,14 @@ RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
any_ic_changed_(false) {
}
-
-static void GetICCounts(SharedFunctionInfo* shared,
- int* ic_with_type_info_count, int* ic_generic_count,
- int* ic_total_count, int* type_info_percentage,
- int* generic_percentage) {
+static void GetICCounts(JSFunction* function, int* ic_with_type_info_count,
+ int* ic_generic_count, int* ic_total_count,
+ int* type_info_percentage, int* generic_percentage) {
*ic_total_count = 0;
*ic_generic_count = 0;
*ic_with_type_info_count = 0;
- if (shared->code()->kind() == Code::FUNCTION) {
- Code* shared_code = shared->code();
+ if (function->code()->kind() == Code::FUNCTION) {
+ Code* shared_code = function->shared()->code();
Object* raw_info = shared_code->type_feedback_info();
if (raw_info->IsTypeFeedbackInfo()) {
TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info);
@@ -73,7 +78,7 @@ static void GetICCounts(SharedFunctionInfo* shared,
}
// Harvest vector-ics as well
- TypeFeedbackVector* vector = shared->feedback_vector();
+ TypeFeedbackVector* vector = function->feedback_vector();
int with = 0, gen = 0;
vector->ComputeCounts(&with, &gen);
*ic_with_type_info_count += with;
@@ -88,30 +93,43 @@ static void GetICCounts(SharedFunctionInfo* shared,
}
}
-
-void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
+static void TraceRecompile(JSFunction* function, const char* reason,
+ const char* type) {
if (FLAG_trace_opt &&
function->shared()->PassesFilter(FLAG_hydrogen_filter)) {
PrintF("[marking ");
function->ShortPrint();
- PrintF(" for recompilation, reason: %s", reason);
+ PrintF(" for %s recompilation, reason: %s", type, reason);
if (FLAG_type_info_threshold > 0) {
int typeinfo, generic, total, type_percentage, generic_percentage;
- GetICCounts(function->shared(), &typeinfo, &generic, &total,
- &type_percentage, &generic_percentage);
+ GetICCounts(function, &typeinfo, &generic, &total, &type_percentage,
+ &generic_percentage);
PrintF(", ICs with typeinfo: %d/%d (%d%%)", typeinfo, total,
type_percentage);
PrintF(", generic ICs: %d/%d (%d%%)", generic, total, generic_percentage);
}
PrintF("]\n");
}
+}
+void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
+ TraceRecompile(function, reason, "optimized");
function->AttemptConcurrentOptimization();
}
+void RuntimeProfiler::Baseline(JSFunction* function, const char* reason) {
+ TraceRecompile(function, reason, "baseline");
-void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function,
+ // TODO(4280): Fix this to check function is compiled for the interpreter
+ // once we have a standard way to check that. For now function will only
+ // have a bytecode array if compiled for the interpreter.
+ DCHECK(function->shared()->HasBytecodeArray());
+ function->MarkForBaseline();
+}
+
+void RuntimeProfiler::AttemptOnStackReplacement(JavaScriptFrame* frame,
int loop_nesting_levels) {
+ JSFunction* function = frame->function();
SharedFunctionInfo* shared = function->shared();
if (!FLAG_use_osr || function->shared()->IsBuiltin()) {
return;
@@ -125,32 +143,46 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function,
// arguments accesses, which is unsound. Don't try OSR.
if (shared->uses_arguments()) return;
- // We're using on-stack replacement: patch the unoptimized code so that
- // any back edge in any unoptimized frame will trigger on-stack
+ // We're using on-stack replacement: modify unoptimized code so that
+ // certain back edges in any unoptimized frame will trigger on-stack
// replacement for that frame.
+ // - Ignition: Store new loop nesting level in BytecodeArray header.
+ // - FullCodegen: Patch back edges up to new level using BackEdgeTable.
if (FLAG_trace_osr) {
- PrintF("[OSR - patching back edges in ");
+ PrintF("[OSR - arming back edges in ");
function->PrintName();
PrintF("]\n");
}
- for (int i = 0; i < loop_nesting_levels; i++) {
- BackEdgeTable::Patch(isolate_, shared->code());
+ if (frame->type() == StackFrame::JAVA_SCRIPT) {
+ DCHECK(shared->HasBaselineCode());
+ DCHECK(BackEdgeTable::Verify(shared->GetIsolate(), shared->code()));
+ for (int i = 0; i < loop_nesting_levels; i++) {
+ BackEdgeTable::Patch(isolate_, shared->code());
+ }
+ } else if (frame->type() == StackFrame::INTERPRETED) {
+ DCHECK(shared->HasBytecodeArray());
+ if (!FLAG_ignition_osr) return; // Only use this when enabled.
+ int level = shared->bytecode_array()->osr_loop_nesting_level();
+ shared->bytecode_array()->set_osr_loop_nesting_level(
+ Min(level + loop_nesting_levels, AbstractCode::kMaxLoopNestingMarker));
+ } else {
+ UNREACHABLE();
}
}
void RuntimeProfiler::MaybeOptimizeFullCodegen(JSFunction* function,
- int frame_count,
- bool frame_optimized) {
+ JavaScriptFrame* frame,
+ int frame_count) {
SharedFunctionInfo* shared = function->shared();
Code* shared_code = shared->code();
if (shared_code->kind() != Code::FUNCTION) return;
if (function->IsInOptimizationQueue()) return;
if (FLAG_always_osr) {
- AttemptOnStackReplacement(function, Code::kMaxLoopNestingMarker);
+ AttemptOnStackReplacement(frame, AbstractCode::kMaxLoopNestingMarker);
// Fall through and do a normal optimized compile as well.
- } else if (!frame_optimized &&
+ } else if (!frame->is_optimized() &&
(function->IsMarkedForOptimization() ||
function->IsMarkedForConcurrentOptimization() ||
function->IsOptimized())) {
@@ -164,7 +196,7 @@ void RuntimeProfiler::MaybeOptimizeFullCodegen(JSFunction* function,
ticks < Code::ProfilerTicksField::kMax) {
shared_code->set_profiler_ticks(ticks + 1);
} else {
- AttemptOnStackReplacement(function);
+ AttemptOnStackReplacement(frame);
}
return;
}
@@ -199,7 +231,7 @@ void RuntimeProfiler::MaybeOptimizeFullCodegen(JSFunction* function,
if (ticks >= kProfilerTicksBeforeOptimization) {
int typeinfo, generic, total, type_percentage, generic_percentage;
- GetICCounts(shared, &typeinfo, &generic, &total, &type_percentage,
+ GetICCounts(function, &typeinfo, &generic, &total, &type_percentage,
&generic_percentage);
if (type_percentage >= FLAG_type_info_threshold &&
generic_percentage <= FLAG_generic_ic_threshold) {
@@ -222,7 +254,7 @@ void RuntimeProfiler::MaybeOptimizeFullCodegen(JSFunction* function,
// If no IC was patched since the last tick and this function is very
// small, optimistically optimize it now.
int typeinfo, generic, total, type_percentage, generic_percentage;
- GetICCounts(shared, &typeinfo, &generic, &total, &type_percentage,
+ GetICCounts(function, &typeinfo, &generic, &total, &type_percentage,
&generic_percentage);
if (type_percentage >= FLAG_type_info_threshold &&
generic_percentage <= FLAG_generic_ic_threshold) {
@@ -235,8 +267,8 @@ void RuntimeProfiler::MaybeOptimizeFullCodegen(JSFunction* function,
}
}
-void RuntimeProfiler::MaybeOptimizeIgnition(JSFunction* function,
- bool frame_optimized) {
+void RuntimeProfiler::MaybeBaselineIgnition(JSFunction* function,
+ JavaScriptFrame* frame) {
if (function->IsInOptimizationQueue()) return;
SharedFunctionInfo* shared = function->shared();
@@ -244,18 +276,67 @@ void RuntimeProfiler::MaybeOptimizeIgnition(JSFunction* function,
// TODO(rmcilroy): Also ensure we only OSR top-level code if it is smaller
// than kMaxToplevelSourceSize.
- // TODO(rmcilroy): Consider whether we should optimize small functions when
- // they are first seen on the stack (e.g., kMaxSizeEarlyOpt).
- if (!frame_optimized && (function->IsMarkedForOptimization() ||
- function->IsMarkedForConcurrentOptimization() ||
- function->IsOptimized())) {
- // TODO(rmcilroy): Support OSR in these cases.
+ if (FLAG_always_osr) {
+ AttemptOnStackReplacement(frame, AbstractCode::kMaxLoopNestingMarker);
+ // Fall through and do a normal baseline compile as well.
+ } else if (!frame->is_optimized() &&
+ (function->IsMarkedForBaseline() ||
+ function->IsMarkedForOptimization() ||
+ function->IsMarkedForConcurrentOptimization() ||
+ function->IsOptimized())) {
+ // Attempt OSR if we are still running interpreted code even though the
+ // the function has long been marked or even already been optimized.
+ int64_t allowance =
+ kOSRCodeSizeAllowanceBaseIgnition +
+ static_cast<int64_t>(ticks) * kOSRCodeSizeAllowancePerTickIgnition;
+ if (shared->bytecode_array()->Size() <= allowance) {
+ AttemptOnStackReplacement(frame);
+ }
+ return;
+ }
+
+ if (shared->optimization_disabled() &&
+ shared->disable_optimization_reason() == kOptimizationDisabledForTest) {
+ // Don't baseline functions which have been marked by NeverOptimizeFunction
+ // in a test.
+ return;
+ }
+
+ if (ticks >= kProfilerTicksBeforeBaseline) {
+ Baseline(function, "hot enough for baseline");
+ }
+}
+
+void RuntimeProfiler::MaybeOptimizeIgnition(JSFunction* function,
+ JavaScriptFrame* frame) {
+ if (function->IsInOptimizationQueue()) return;
+
+ SharedFunctionInfo* shared = function->shared();
+ int ticks = shared->profiler_ticks();
+
+ // TODO(rmcilroy): Also ensure we only OSR top-level code if it is smaller
+ // than kMaxToplevelSourceSize.
+ if (FLAG_always_osr) {
+ AttemptOnStackReplacement(frame, AbstractCode::kMaxLoopNestingMarker);
+ // Fall through and do a normal optimized compile as well.
+ } else if (!frame->is_optimized() &&
+ (function->IsMarkedForBaseline() ||
+ function->IsMarkedForOptimization() ||
+ function->IsMarkedForConcurrentOptimization() ||
+ function->IsOptimized())) {
+ // Attempt OSR if we are still running interpreted code even though the
+ // the function has long been marked or even already been optimized.
+ int64_t allowance =
+ kOSRCodeSizeAllowanceBaseIgnition +
+ static_cast<int64_t>(ticks) * kOSRCodeSizeAllowancePerTickIgnition;
+ if (shared->bytecode_array()->Size() <= allowance) {
+ AttemptOnStackReplacement(frame);
+ }
return;
}
- // Do not optimize non-optimizable functions.
if (shared->optimization_disabled()) {
if (shared->deopt_count() >= FLAG_max_opt_count) {
// If optimization was disabled due to many deoptimizations,
@@ -267,12 +348,11 @@ void RuntimeProfiler::MaybeOptimizeIgnition(JSFunction* function,
}
return;
}
-
if (function->IsOptimized()) return;
if (ticks >= kProfilerTicksBeforeOptimization) {
int typeinfo, generic, total, type_percentage, generic_percentage;
- GetICCounts(shared, &typeinfo, &generic, &total, &type_percentage,
+ GetICCounts(function, &typeinfo, &generic, &total, &type_percentage,
&generic_percentage);
if (type_percentage >= FLAG_type_info_threshold &&
generic_percentage <= FLAG_generic_ic_threshold) {
@@ -290,6 +370,8 @@ void RuntimeProfiler::MaybeOptimizeIgnition(JSFunction* function,
}
}
}
+ // TODO(rmcilroy): Consider whether we should optimize small functions when
+ // they are first seen on the stack (e.g., kMaxSizeEarlyOpt).
}
void RuntimeProfiler::MarkCandidatesForOptimization() {
@@ -320,10 +402,18 @@ void RuntimeProfiler::MarkCandidatesForOptimization() {
}
}
- if (FLAG_ignition) {
- MaybeOptimizeIgnition(function, frame->is_optimized());
+ Compiler::CompilationTier next_tier =
+ Compiler::NextCompilationTier(function);
+ if (function->shared()->code()->is_interpreter_trampoline_builtin()) {
+ if (next_tier == Compiler::BASELINE) {
+ MaybeBaselineIgnition(function, frame);
+ } else {
+ DCHECK_EQ(next_tier, Compiler::OPTIMIZED);
+ MaybeOptimizeIgnition(function, frame);
+ }
} else {
- MaybeOptimizeFullCodegen(function, frame_count, frame->is_optimized());
+ DCHECK_EQ(next_tier, Compiler::OPTIMIZED);
+ MaybeOptimizeFullCodegen(function, frame, frame_count);
}
}
any_ic_changed_ = false;
diff --git a/deps/v8/src/runtime-profiler.h b/deps/v8/src/runtime-profiler.h
index aa2f65eb29..7f2c9024bf 100644
--- a/deps/v8/src/runtime-profiler.h
+++ b/deps/v8/src/runtime-profiler.h
@@ -8,16 +8,11 @@
#include "src/allocation.h"
namespace v8 {
-
-namespace base {
-class Semaphore;
-}
-
namespace internal {
class Isolate;
+class JavaScriptFrame;
class JSFunction;
-class Object;
class RuntimeProfiler {
public:
@@ -27,18 +22,18 @@ class RuntimeProfiler {
void NotifyICChanged() { any_ic_changed_ = true; }
- void AttemptOnStackReplacement(JSFunction* function, int nesting_levels = 1);
+ void AttemptOnStackReplacement(JavaScriptFrame* frame,
+ int nesting_levels = 1);
private:
- void MaybeOptimizeFullCodegen(JSFunction* function, int frame_count,
- bool frame_optimized);
- void MaybeOptimizeIgnition(JSFunction* function, bool frame_optimized);
+ void MaybeOptimizeFullCodegen(JSFunction* function, JavaScriptFrame* frame,
+ int frame_count);
+ void MaybeBaselineIgnition(JSFunction* function, JavaScriptFrame* frame);
+ void MaybeOptimizeIgnition(JSFunction* function, JavaScriptFrame* frame);
void Optimize(JSFunction* function, const char* reason);
-
- bool CodeSizeOKForOSR(Code* shared_code);
+ void Baseline(JSFunction* function, const char* reason);
Isolate* isolate_;
-
bool any_ic_changed_;
};
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index ab436c2237..4b7cd39835 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -22,8 +22,9 @@ RUNTIME_FUNCTION(Runtime_FinishArrayPrototypeSetup) {
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSArray, prototype, 0);
Object* length = prototype->length();
- RUNTIME_ASSERT(length->IsSmi() && Smi::cast(length)->value() == 0);
- RUNTIME_ASSERT(prototype->HasFastSmiOrObjectElements());
+ CHECK(length->IsSmi());
+ CHECK(Smi::cast(length)->value() == 0);
+ CHECK(prototype->HasFastSmiOrObjectElements());
// This is necessary to enable fast checks for absence of elements
// on Array.prototype and below.
prototype->set_elements(isolate->heap()->empty_fixed_array());
@@ -31,18 +32,24 @@ RUNTIME_FUNCTION(Runtime_FinishArrayPrototypeSetup) {
}
static void InstallCode(Isolate* isolate, Handle<JSObject> holder,
- const char* name, Handle<Code> code) {
+ const char* name, Handle<Code> code, int argc = -1) {
Handle<String> key = isolate->factory()->InternalizeUtf8String(name);
Handle<JSFunction> optimized =
isolate->factory()->NewFunctionWithoutPrototype(key, code);
- optimized->shared()->DontAdaptArguments();
+ if (argc < 0) {
+ optimized->shared()->DontAdaptArguments();
+ } else {
+ optimized->shared()->set_internal_formal_parameter_count(argc);
+ }
JSObject::AddProperty(holder, key, optimized, NONE);
}
static void InstallBuiltin(Isolate* isolate, Handle<JSObject> holder,
- const char* name, Builtins::Name builtin_name) {
+ const char* name, Builtins::Name builtin_name,
+ int argc = -1) {
InstallCode(isolate, holder, name,
- handle(isolate->builtins()->builtin(builtin_name), isolate));
+ handle(isolate->builtins()->builtin(builtin_name), isolate),
+ argc);
}
RUNTIME_FUNCTION(Runtime_SpecialArrayFunctions) {
@@ -52,12 +59,18 @@ RUNTIME_FUNCTION(Runtime_SpecialArrayFunctions) {
isolate->factory()->NewJSObject(isolate->object_function());
InstallBuiltin(isolate, holder, "pop", Builtins::kArrayPop);
- FastArrayPushStub stub(isolate);
- InstallCode(isolate, holder, "push", stub.GetCode());
+ if (FLAG_minimal) {
+ InstallBuiltin(isolate, holder, "push", Builtins::kArrayPush);
+ } else {
+ FastArrayPushStub stub(isolate);
+ InstallCode(isolate, holder, "push", stub.GetCode());
+ }
InstallBuiltin(isolate, holder, "shift", Builtins::kArrayShift);
InstallBuiltin(isolate, holder, "unshift", Builtins::kArrayUnshift);
InstallBuiltin(isolate, holder, "slice", Builtins::kArraySlice);
InstallBuiltin(isolate, holder, "splice", Builtins::kArraySplice);
+ InstallBuiltin(isolate, holder, "includes", Builtins::kArrayIncludes, 2);
+ InstallBuiltin(isolate, holder, "indexOf", Builtins::kArrayIndexOf, 2);
return *holder;
}
@@ -85,11 +98,12 @@ RUNTIME_FUNCTION(Runtime_FixedArraySet) {
RUNTIME_FUNCTION(Runtime_TransitionElementsKind) {
HandleScope scope(isolate);
- RUNTIME_ASSERT(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
- CONVERT_ARG_HANDLE_CHECKED(Map, map, 1);
- JSObject::TransitionElementsKind(array, map->elements_kind());
- return *array;
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Map, to_map, 1);
+ ElementsKind to_kind = to_map->elements_kind();
+ ElementsAccessor::ForKind(to_kind)->TransitionElementsKind(object, to_map);
+ return *object;
}
@@ -182,8 +196,14 @@ RUNTIME_FUNCTION(Runtime_GetArrayKeys) {
DCHECK(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0);
CONVERT_NUMBER_CHECKED(uint32_t, length, Uint32, args[1]);
+ ElementsKind kind = array->GetElementsKind();
- if (array->HasFastStringWrapperElements()) {
+ if (IsFastElementsKind(kind) || IsFixedTypedArrayElementsKind(kind)) {
+ uint32_t actual_length = static_cast<uint32_t>(array->elements()->length());
+ return *isolate->factory()->NewNumberFromUint(Min(actual_length, length));
+ }
+
+ if (kind == FAST_STRING_WRAPPER_ELEMENTS) {
int string_length =
String::cast(Handle<JSValue>::cast(array)->value())->length();
int backing_store_length = array->elements()->length();
@@ -192,17 +212,9 @@ RUNTIME_FUNCTION(Runtime_GetArrayKeys) {
static_cast<uint32_t>(Max(string_length, backing_store_length))));
}
- if (!array->elements()->IsDictionary()) {
- RUNTIME_ASSERT(array->HasFastSmiOrObjectElements() ||
- array->HasFastDoubleElements());
- uint32_t actual_length = static_cast<uint32_t>(array->elements()->length());
- return *isolate->factory()->NewNumberFromUint(Min(actual_length, length));
- }
-
- KeyAccumulator accumulator(isolate, OWN_ONLY, ALL_PROPERTIES);
- // No need to separate prototype levels since we only get element keys.
- for (PrototypeIterator iter(isolate, array,
- PrototypeIterator::START_AT_RECEIVER);
+ KeyAccumulator accumulator(isolate, KeyCollectionMode::kOwnOnly,
+ ALL_PROPERTIES);
+ for (PrototypeIterator iter(isolate, array, kStartAtReceiver);
!iter.IsAtEnd(); iter.Advance()) {
if (PrototypeIterator::GetCurrent(iter)->IsJSProxy() ||
PrototypeIterator::GetCurrent<JSObject>(iter)
@@ -211,12 +223,12 @@ RUNTIME_FUNCTION(Runtime_GetArrayKeys) {
// collecting keys in that case.
return *isolate->factory()->NewNumberFromUint(length);
}
- accumulator.NextPrototype();
Handle<JSObject> current = PrototypeIterator::GetCurrent<JSObject>(iter);
- JSObject::CollectOwnElementKeys(current, &accumulator, ALL_PROPERTIES);
+ accumulator.CollectOwnElementIndices(array, current);
}
// Erase any keys >= length.
- Handle<FixedArray> keys = accumulator.GetKeys(KEEP_NUMBERS);
+ Handle<FixedArray> keys =
+ accumulator.GetKeys(GetKeysConversion::kKeepNumbers);
int j = 0;
for (int i = 0; i < keys->length(); i++) {
if (NumberToUint32(keys->get(i)) >= length) continue;
@@ -321,7 +333,6 @@ Object* ArrayConstructorCommon(Isolate* isolate, Handle<JSFunction> constructor,
} // namespace
-
RUNTIME_FUNCTION(Runtime_NewArray) {
HandleScope scope(isolate);
DCHECK_LE(3, args.length());
@@ -338,66 +349,12 @@ RUNTIME_FUNCTION(Runtime_NewArray) {
return ArrayConstructorCommon(isolate, constructor, new_target, site, &argv);
}
-
-RUNTIME_FUNCTION(Runtime_ArrayConstructor) {
- HandleScope scope(isolate);
- // If we get 2 arguments then they are the stub parameters (constructor, type
- // info). If we get 4, then the first one is a pointer to the arguments
- // passed by the caller, and the last one is the length of the arguments
- // passed to the caller (redundant, but useful to check on the deoptimizer
- // with an assert).
- Arguments empty_args(0, NULL);
- bool no_caller_args = args.length() == 2;
- DCHECK(no_caller_args || args.length() == 4);
- int parameters_start = no_caller_args ? 0 : 1;
- Arguments* caller_args =
- no_caller_args ? &empty_args : reinterpret_cast<Arguments*>(args[0]);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, parameters_start);
- CONVERT_ARG_HANDLE_CHECKED(Object, type_info, parameters_start + 1);
-#ifdef DEBUG
- if (!no_caller_args) {
- CONVERT_SMI_ARG_CHECKED(arg_count, parameters_start + 2);
- DCHECK(arg_count == caller_args->length());
- }
-#endif
-
- Handle<AllocationSite> site;
- if (!type_info.is_null() &&
- *type_info != isolate->heap()->undefined_value()) {
- site = Handle<AllocationSite>::cast(type_info);
- DCHECK(!site->SitePointsToLiteral());
- }
-
- return ArrayConstructorCommon(isolate, constructor, constructor, site,
- caller_args);
-}
-
-RUNTIME_FUNCTION(Runtime_InternalArrayConstructor) {
- HandleScope scope(isolate);
- Arguments empty_args(0, NULL);
- bool no_caller_args = args.length() == 1;
- DCHECK(no_caller_args || args.length() == 3);
- int parameters_start = no_caller_args ? 0 : 1;
- Arguments* caller_args =
- no_caller_args ? &empty_args : reinterpret_cast<Arguments*>(args[0]);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, parameters_start);
-#ifdef DEBUG
- if (!no_caller_args) {
- CONVERT_SMI_ARG_CHECKED(arg_count, parameters_start + 1);
- DCHECK(arg_count == caller_args->length());
- }
-#endif
- return ArrayConstructorCommon(isolate, constructor, constructor,
- Handle<AllocationSite>::null(), caller_args);
-}
-
-
RUNTIME_FUNCTION(Runtime_NormalizeElements) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0);
- RUNTIME_ASSERT(!array->HasFixedTypedArrayElements() &&
- !array->IsJSGlobalProxy());
+ CHECK(!array->HasFixedTypedArrayElements());
+ CHECK(!array->IsJSGlobalProxy());
JSObject::NormalizeElements(array);
return *array;
}
@@ -418,7 +375,8 @@ RUNTIME_FUNCTION(Runtime_GrowArrayElements) {
uint32_t index = static_cast<uint32_t>(key);
if (index >= capacity) {
- if (object->WouldConvertToSlowElements(index)) {
+ if (object->map()->is_prototype_map() ||
+ object->WouldConvertToSlowElements(index)) {
// We don't want to allow operations that cause lazy deopt. Return a Smi
// as a signal that optimized code should eagerly deoptimize.
return Smi::FromInt(0);
@@ -437,8 +395,7 @@ RUNTIME_FUNCTION(Runtime_HasComplexElements) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0);
- for (PrototypeIterator iter(isolate, array,
- PrototypeIterator::START_AT_RECEIVER);
+ for (PrototypeIterator iter(isolate, array, kStartAtReceiver);
!iter.IsAtEnd(); iter.Advance()) {
if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
return isolate->heap()->true_value();
@@ -455,6 +412,15 @@ RUNTIME_FUNCTION(Runtime_HasComplexElements) {
return isolate->heap()->false_value();
}
+// ES6 22.1.2.2 Array.isArray
+RUNTIME_FUNCTION(Runtime_ArrayIsArray) {
+ HandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ Maybe<bool> result = Object::IsArray(object);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return isolate->heap()->ToBoolean(result.FromJust());
+}
RUNTIME_FUNCTION(Runtime_IsArray) {
SealHandleScope shs(isolate);
@@ -463,7 +429,6 @@ RUNTIME_FUNCTION(Runtime_IsArray) {
return isolate->heap()->ToBoolean(obj->IsJSArray());
}
-
RUNTIME_FUNCTION(Runtime_HasCachedArrayIndex) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
@@ -483,11 +448,201 @@ RUNTIME_FUNCTION(Runtime_ArraySpeciesConstructor) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(Object, original_array, 0);
- Handle<Object> constructor;
+ RETURN_RESULT_OR_FAILURE(
+ isolate, Object::ArraySpeciesConstructor(isolate, original_array));
+}
+
+// ES7 22.1.3.11 Array.prototype.includes
+RUNTIME_FUNCTION(Runtime_ArrayIncludes_Slow) {
+ HandleScope shs(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(Object, search_element, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, from_index, 2);
+
+ // Let O be ? ToObject(this value).
+ Handle<JSReceiver> object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, constructor,
- Object::ArraySpeciesConstructor(isolate, original_array));
- return *constructor;
+ isolate, object, Object::ToObject(isolate, handle(args[0], isolate)));
+
+ // Let len be ? ToLength(? Get(O, "length")).
+ int64_t len;
+ {
+ if (object->map()->instance_type() == JS_ARRAY_TYPE) {
+ uint32_t len32 = 0;
+ bool success = JSArray::cast(*object)->length()->ToArrayLength(&len32);
+ DCHECK(success);
+ USE(success);
+ len = len32;
+ } else {
+ Handle<Object> len_;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, len_,
+ Object::GetProperty(object, isolate->factory()->length_string()));
+
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, len_,
+ Object::ToLength(isolate, len_));
+ len = static_cast<int64_t>(len_->Number());
+ DCHECK_EQ(len, len_->Number());
+ }
+ }
+
+ if (len == 0) return isolate->heap()->false_value();
+
+ // Let n be ? ToInteger(fromIndex). (If fromIndex is undefined, this step
+ // produces the value 0.)
+ int64_t start_from;
+ {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, from_index,
+ Object::ToInteger(isolate, from_index));
+ double fp = from_index->Number();
+ if (fp > len) return isolate->heap()->false_value();
+ start_from = static_cast<int64_t>(fp);
+ }
+
+ int64_t index;
+ if (start_from >= 0) {
+ index = start_from;
+ } else {
+ index = len + start_from;
+ if (index < 0) {
+ index = 0;
+ }
+ }
+
+ // If the receiver is not a special receiver type, and the length is a valid
+ // element index, perform fast operation tailored to specific ElementsKinds.
+ if (object->map()->instance_type() > LAST_SPECIAL_RECEIVER_TYPE &&
+ len < kMaxUInt32 &&
+ JSObject::PrototypeHasNoElements(isolate, JSObject::cast(*object))) {
+ Handle<JSObject> obj = Handle<JSObject>::cast(object);
+ ElementsAccessor* elements = obj->GetElementsAccessor();
+ Maybe<bool> result = elements->IncludesValue(isolate, obj, search_element,
+ static_cast<uint32_t>(index),
+ static_cast<uint32_t>(len));
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return *isolate->factory()->ToBoolean(result.FromJust());
+ }
+
+ // Otherwise, perform slow lookups for special receiver types
+ for (; index < len; ++index) {
+ // Let elementK be the result of ? Get(O, ! ToString(k)).
+ Handle<Object> element_k;
+ {
+ Handle<Object> index_obj = isolate->factory()->NewNumberFromInt64(index);
+ bool success;
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, object, index_obj, &success);
+ DCHECK(success);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, element_k,
+ Object::GetProperty(&it));
+ }
+
+ // If SameValueZero(searchElement, elementK) is true, return true.
+ if (search_element->SameValueZero(*element_k)) {
+ return isolate->heap()->true_value();
+ }
+ }
+ return isolate->heap()->false_value();
+}
+
+RUNTIME_FUNCTION(Runtime_ArrayIndexOf) {
+ HandleScope shs(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(Object, search_element, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, from_index, 2);
+
+ // Let O be ? ToObject(this value).
+ Handle<Object> receiver_obj = args.at<Object>(0);
+ if (receiver_obj->IsNull(isolate) || receiver_obj->IsUndefined(isolate)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Array.prototype.indexOf")));
+ }
+ Handle<JSReceiver> object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, object, Object::ToObject(isolate, args.at<Object>(0)));
+
+ // Let len be ? ToLength(? Get(O, "length")).
+ int64_t len;
+ {
+ if (object->IsJSArray()) {
+ uint32_t len32 = 0;
+ bool success = JSArray::cast(*object)->length()->ToArrayLength(&len32);
+ DCHECK(success);
+ USE(success);
+ len = len32;
+ } else {
+ Handle<Object> len_;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, len_,
+ Object::GetProperty(object, isolate->factory()->length_string()));
+
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, len_,
+ Object::ToLength(isolate, len_));
+ len = static_cast<int64_t>(len_->Number());
+ DCHECK_EQ(len, len_->Number());
+ }
+ }
+
+ if (len == 0) return Smi::FromInt(-1);
+
+ // Let n be ? ToInteger(fromIndex). (If fromIndex is undefined, this step
+ // produces the value 0.)
+ int64_t start_from;
+ {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, from_index,
+ Object::ToInteger(isolate, from_index));
+ double fp = from_index->Number();
+ if (fp > len) return Smi::FromInt(-1);
+ start_from = static_cast<int64_t>(fp);
+ }
+
+ int64_t index;
+ if (start_from >= 0) {
+ index = start_from;
+ } else {
+ index = len + start_from;
+ if (index < 0) {
+ index = 0;
+ }
+ }
+
+ // If the receiver is not a special receiver type, and the length is a valid
+ // element index, perform fast operation tailored to specific ElementsKinds.
+ if (object->map()->instance_type() > LAST_SPECIAL_RECEIVER_TYPE &&
+ len < kMaxUInt32 &&
+ JSObject::PrototypeHasNoElements(isolate, JSObject::cast(*object))) {
+ Handle<JSObject> obj = Handle<JSObject>::cast(object);
+ ElementsAccessor* elements = obj->GetElementsAccessor();
+ Maybe<int64_t> result = elements->IndexOfValue(isolate, obj, search_element,
+ static_cast<uint32_t>(index),
+ static_cast<uint32_t>(len));
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return *isolate->factory()->NewNumberFromInt64(result.FromJust());
+ }
+
+ // Otherwise, perform slow lookups for special receiver types
+ for (; index < len; ++index) {
+ // Let elementK be the result of ? Get(O, ! ToString(k)).
+ Handle<Object> element_k;
+ {
+ Handle<Object> index_obj = isolate->factory()->NewNumberFromInt64(index);
+ bool success;
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, object, index_obj, &success);
+ DCHECK(success);
+ if (!JSReceiver::HasProperty(&it).FromJust()) {
+ continue;
+ }
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, element_k,
+ Object::GetProperty(&it));
+ if (search_element->StrictEquals(*element_k)) {
+ return *index_obj;
+ }
+ }
+ }
+ return Smi::FromInt(-1);
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-atomics.cc b/deps/v8/src/runtime/runtime-atomics.cc
index 94d98d4ffa..3bd0738dd2 100644
--- a/deps/v8/src/runtime/runtime-atomics.cc
+++ b/deps/v8/src/runtime/runtime-atomics.cc
@@ -12,7 +12,7 @@
// Implement Atomic accesses to SharedArrayBuffers as defined in the
// SharedArrayBuffer draft spec, found here
-// https://github.com/lars-t-hansen/ecmascript_sharedmem
+// https://github.com/tc39/ecmascript_sharedmem
namespace v8 {
namespace internal {
@@ -33,18 +33,6 @@ inline T CompareExchangeSeqCst(T* p, T oldval, T newval) {
}
template <typename T>
-inline T LoadSeqCst(T* p) {
- T result;
- __atomic_load(p, &result, __ATOMIC_SEQ_CST);
- return result;
-}
-
-template <typename T>
-inline void StoreSeqCst(T* p, T value) {
- __atomic_store_n(p, value, __ATOMIC_SEQ_CST);
-}
-
-template <typename T>
inline T AddSeqCst(T* p, T value) {
return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST);
}
@@ -116,11 +104,6 @@ inline T ExchangeSeqCst(T* p, T value) {
return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \
bit_cast<vctype>(newval), \
bit_cast<vctype>(oldval)); \
- } \
- inline type LoadSeqCst(type* p) { return *p; } \
- inline void StoreSeqCst(type* p, type value) { \
- InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \
- bit_cast<vctype>(value)); \
}
ATOMIC_OPS(int8_t, 8, char)
@@ -216,22 +199,6 @@ inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index,
template <typename T>
-inline Object* DoLoad(Isolate* isolate, void* buffer, size_t index) {
- T result = LoadSeqCst(static_cast<T*>(buffer) + index);
- return ToObject(isolate, result);
-}
-
-
-template <typename T>
-inline Object* DoStore(Isolate* isolate, void* buffer, size_t index,
- Handle<Object> obj) {
- T value = FromObject<T>(obj);
- StoreSeqCst(static_cast<T*>(buffer) + index, value);
- return *obj;
-}
-
-
-template <typename T>
inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index,
Handle<Object> obj) {
T value = FromObject<T>(obj);
@@ -307,15 +274,6 @@ inline Object* DoCompareExchangeUint8Clamped(Isolate* isolate, void* buffer,
}
-inline Object* DoStoreUint8Clamped(Isolate* isolate, void* buffer, size_t index,
- Handle<Object> obj) {
- typedef int32_t convert_type;
- uint8_t value = ClampToUint8(FromObject<convert_type>(obj));
- StoreSeqCst(static_cast<uint8_t*>(buffer) + index, value);
- return *obj;
-}
-
-
#define DO_UINT8_CLAMPED_OP(name, op) \
inline Object* Do##name##Uint8Clamped(Isolate* isolate, void* buffer, \
size_t index, Handle<Object> obj) { \
@@ -365,6 +323,29 @@ inline Object* DoExchangeUint8Clamped(Isolate* isolate, void* buffer,
V(Uint32, uint32, UINT32, uint32_t, 4) \
V(Int32, int32, INT32, int32_t, 4)
+RUNTIME_FUNCTION(Runtime_ThrowNotIntegerSharedTypedArrayError) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kNotIntegerSharedTypedArray, value));
+}
+
+RUNTIME_FUNCTION(Runtime_ThrowNotInt32SharedTypedArrayError) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kNotInt32SharedTypedArray, value));
+}
+
+RUNTIME_FUNCTION(Runtime_ThrowInvalidAtomicAccessIndexError) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(0, args.length());
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidAtomicAccessIndex));
+}
RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
HandleScope scope(isolate);
@@ -373,11 +354,11 @@ RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
CONVERT_SIZE_ARG_CHECKED(index, 1);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(oldobj, 2);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(newobj, 3);
- RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
- RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
+ CHECK(sta->GetBuffer()->is_shared());
+ CHECK_LT(index, NumberToSize(sta->length()));
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
- NumberToSize(isolate, sta->byte_offset());
+ NumberToSize(sta->byte_offset());
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
@@ -400,80 +381,17 @@ RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
}
-RUNTIME_FUNCTION(Runtime_AtomicsLoad) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
- CONVERT_SIZE_ARG_CHECKED(index, 1);
- RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
- RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
-
- uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
- NumberToSize(isolate, sta->byte_offset());
-
- switch (sta->type()) {
-#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
- case kExternal##Type##Array: \
- return DoLoad<ctype>(isolate, source, index);
-
- INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
-
- case kExternalUint8ClampedArray:
- return DoLoad<uint8_t>(isolate, source, index);
-
- default:
- break;
- }
-
- UNREACHABLE();
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_AtomicsStore) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
- CONVERT_SIZE_ARG_CHECKED(index, 1);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
- RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
- RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
-
- uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
- NumberToSize(isolate, sta->byte_offset());
-
- switch (sta->type()) {
-#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
- case kExternal##Type##Array: \
- return DoStore<ctype>(isolate, source, index, value);
-
- INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
-
- case kExternalUint8ClampedArray:
- return DoStoreUint8Clamped(isolate, source, index, value);
-
- default:
- break;
- }
-
- UNREACHABLE();
- return isolate->heap()->undefined_value();
-}
-
-
RUNTIME_FUNCTION(Runtime_AtomicsAdd) {
HandleScope scope(isolate);
DCHECK(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
CONVERT_SIZE_ARG_CHECKED(index, 1);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
- RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
- RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
+ CHECK(sta->GetBuffer()->is_shared());
+ CHECK_LT(index, NumberToSize(sta->length()));
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
- NumberToSize(isolate, sta->byte_offset());
+ NumberToSize(sta->byte_offset());
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
@@ -501,11 +419,11 @@ RUNTIME_FUNCTION(Runtime_AtomicsSub) {
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
CONVERT_SIZE_ARG_CHECKED(index, 1);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
- RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
- RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
+ CHECK(sta->GetBuffer()->is_shared());
+ CHECK_LT(index, NumberToSize(sta->length()));
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
- NumberToSize(isolate, sta->byte_offset());
+ NumberToSize(sta->byte_offset());
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
@@ -533,11 +451,11 @@ RUNTIME_FUNCTION(Runtime_AtomicsAnd) {
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
CONVERT_SIZE_ARG_CHECKED(index, 1);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
- RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
- RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
+ CHECK(sta->GetBuffer()->is_shared());
+ CHECK_LT(index, NumberToSize(sta->length()));
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
- NumberToSize(isolate, sta->byte_offset());
+ NumberToSize(sta->byte_offset());
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
@@ -565,11 +483,11 @@ RUNTIME_FUNCTION(Runtime_AtomicsOr) {
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
CONVERT_SIZE_ARG_CHECKED(index, 1);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
- RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
- RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
+ CHECK(sta->GetBuffer()->is_shared());
+ CHECK_LT(index, NumberToSize(sta->length()));
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
- NumberToSize(isolate, sta->byte_offset());
+ NumberToSize(sta->byte_offset());
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
@@ -597,11 +515,11 @@ RUNTIME_FUNCTION(Runtime_AtomicsXor) {
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
CONVERT_SIZE_ARG_CHECKED(index, 1);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
- RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
- RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
+ CHECK(sta->GetBuffer()->is_shared());
+ CHECK_LT(index, NumberToSize(sta->length()));
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
- NumberToSize(isolate, sta->byte_offset());
+ NumberToSize(sta->byte_offset());
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
@@ -629,11 +547,11 @@ RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
CONVERT_SIZE_ARG_CHECKED(index, 1);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
- RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
- RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
+ CHECK(sta->GetBuffer()->is_shared());
+ CHECK_LT(index, NumberToSize(sta->length()));
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
- NumberToSize(isolate, sta->byte_offset());
+ NumberToSize(sta->byte_offset());
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index 3f102256bf..5448159513 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -88,25 +88,21 @@ static MaybeHandle<Object> DefineClass(Isolate* isolate,
Handle<Object> prototype_parent;
Handle<Object> constructor_parent;
- if (super_class->IsTheHole()) {
+ if (super_class->IsTheHole(isolate)) {
prototype_parent = isolate->initial_object_prototype();
} else {
- if (super_class->IsNull()) {
+ if (super_class->IsNull(isolate)) {
prototype_parent = isolate->factory()->null_value();
} else if (super_class->IsConstructor()) {
- if (super_class->IsJSFunction() &&
- Handle<JSFunction>::cast(super_class)->shared()->is_generator()) {
- THROW_NEW_ERROR(
- isolate,
- NewTypeError(MessageTemplate::kExtendsValueGenerator, super_class),
- Object);
- }
+ DCHECK(!super_class->IsJSFunction() ||
+ !Handle<JSFunction>::cast(super_class)->shared()->is_resumable());
ASSIGN_RETURN_ON_EXCEPTION(
isolate, prototype_parent,
Runtime::GetObjectProperty(isolate, super_class,
isolate->factory()->prototype_string()),
Object);
- if (!prototype_parent->IsNull() && !prototype_parent->IsJSReceiver()) {
+ if (!prototype_parent->IsNull(isolate) &&
+ !prototype_parent->IsJSReceiver()) {
THROW_NEW_ERROR(
isolate, NewTypeError(MessageTemplate::kPrototypeParentNotAnObject,
prototype_parent),
@@ -114,10 +110,10 @@ static MaybeHandle<Object> DefineClass(Isolate* isolate,
}
constructor_parent = super_class;
} else {
- THROW_NEW_ERROR(
- isolate,
- NewTypeError(MessageTemplate::kExtendsValueNotFunction, super_class),
- Object);
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kExtendsValueNotConstructor,
+ super_class),
+ Object);
}
}
@@ -128,13 +124,13 @@ static MaybeHandle<Object> DefineClass(Isolate* isolate,
map->SetConstructor(*constructor);
Handle<JSObject> prototype = isolate->factory()->NewJSObjectFromMap(map);
- if (!super_class->IsTheHole()) {
+ if (!super_class->IsTheHole(isolate)) {
// Derived classes, just like builtins, don't create implicit receivers in
// [[construct]]. Instead they just set up new.target and call into the
// constructor. Hence we can reuse the builtins construct stub for derived
// classes.
Handle<Code> stub(isolate->builtins()->JSBuiltinsConstructStubForDerived());
- constructor->shared()->set_construct_stub(*stub);
+ constructor->shared()->SetConstructStub(*stub);
}
JSFunction::SetPrototype(constructor, prototype);
@@ -186,22 +182,12 @@ RUNTIME_FUNCTION(Runtime_DefineClass) {
CONVERT_SMI_ARG_CHECKED(start_position, 2);
CONVERT_SMI_ARG_CHECKED(end_position, 3);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, DefineClass(isolate, super_class, constructor,
- start_position, end_position));
- return *result;
+ RETURN_RESULT_OR_FAILURE(
+ isolate, DefineClass(isolate, super_class, constructor, start_position,
+ end_position));
}
-RUNTIME_FUNCTION(Runtime_FinalizeClassDefinition) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, constructor, 0);
- JSObject::MigrateSlowToFast(constructor, 0, "RuntimeToFastProperties");
- return *constructor;
-}
-
static MaybeHandle<Object> LoadFromSuper(Isolate* isolate,
Handle<Object> receiver,
Handle<JSObject> home_object,
@@ -255,10 +241,8 @@ RUNTIME_FUNCTION(Runtime_LoadFromSuper) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, home_object, 1);
CONVERT_ARG_HANDLE_CHECKED(Name, name, 2);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, LoadFromSuper(isolate, receiver, home_object, name));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate,
+ LoadFromSuper(isolate, receiver, home_object, name));
}
@@ -270,13 +254,10 @@ RUNTIME_FUNCTION(Runtime_LoadKeyedFromSuper) {
CONVERT_ARG_HANDLE_CHECKED(Object, key, 2);
uint32_t index = 0;
- Handle<Object> result;
if (key->ToArrayIndex(&index)) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- LoadElementFromSuper(isolate, receiver, home_object, index));
- return *result;
+ RETURN_RESULT_OR_FAILURE(
+ isolate, LoadElementFromSuper(isolate, receiver, home_object, index));
}
Handle<Name> name;
@@ -284,14 +265,11 @@ RUNTIME_FUNCTION(Runtime_LoadKeyedFromSuper) {
Object::ToName(isolate, key));
// TODO(verwaest): Unify using LookupIterator.
if (name->AsArrayIndex(&index)) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- LoadElementFromSuper(isolate, receiver, home_object, index));
- return *result;
+ RETURN_RESULT_OR_FAILURE(
+ isolate, LoadElementFromSuper(isolate, receiver, home_object, index));
}
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, LoadFromSuper(isolate, receiver, home_object, name));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate,
+ LoadFromSuper(isolate, receiver, home_object, name));
}
diff --git a/deps/v8/src/runtime/runtime-collections.cc b/deps/v8/src/runtime/runtime-collections.cc
index 32340e5acb..b25a5ef5c2 100644
--- a/deps/v8/src/runtime/runtime-collections.cc
+++ b/deps/v8/src/runtime/runtime-collections.cc
@@ -31,7 +31,7 @@ RUNTIME_FUNCTION(Runtime_JSCollectionGetTable) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_CHECKED(JSObject, object, 0);
- RUNTIME_ASSERT(object->IsJSSet() || object->IsJSMap());
+ CHECK(object->IsJSSet() || object->IsJSMap());
return static_cast<JSCollection*>(object)->table();
}
@@ -40,8 +40,8 @@ RUNTIME_FUNCTION(Runtime_GenericHash) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- Handle<Smi> hash = Object::GetOrCreateHash(isolate, object);
- return *hash;
+ Smi* hash = Object::GetOrCreateHash(isolate, object);
+ return hash;
}
@@ -91,8 +91,8 @@ RUNTIME_FUNCTION(Runtime_SetIteratorInitialize) {
CONVERT_ARG_HANDLE_CHECKED(JSSetIterator, holder, 0);
CONVERT_ARG_HANDLE_CHECKED(JSSet, set, 1);
CONVERT_SMI_ARG_CHECKED(kind, 2)
- RUNTIME_ASSERT(kind == JSSetIterator::kKindValues ||
- kind == JSSetIterator::kKindEntries);
+ CHECK(kind == JSSetIterator::kKindValues ||
+ kind == JSSetIterator::kKindEntries);
Handle<OrderedHashSet> table(OrderedHashSet::cast(set->table()));
holder->set_table(*table);
holder->set_index(Smi::FromInt(0));
@@ -186,9 +186,9 @@ RUNTIME_FUNCTION(Runtime_MapIteratorInitialize) {
CONVERT_ARG_HANDLE_CHECKED(JSMapIterator, holder, 0);
CONVERT_ARG_HANDLE_CHECKED(JSMap, map, 1);
CONVERT_SMI_ARG_CHECKED(kind, 2)
- RUNTIME_ASSERT(kind == JSMapIterator::kKindKeys ||
- kind == JSMapIterator::kKindValues ||
- kind == JSMapIterator::kKindEntries);
+ CHECK(kind == JSMapIterator::kKindKeys ||
+ kind == JSMapIterator::kKindValues ||
+ kind == JSMapIterator::kKindEntries);
Handle<OrderedHashMap> table(OrderedHashMap::cast(map->table()));
holder->set_table(*table);
holder->set_index(Smi::FromInt(0));
@@ -232,7 +232,7 @@ RUNTIME_FUNCTION(Runtime_GetWeakMapEntries) {
DCHECK(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, holder, 0);
CONVERT_NUMBER_CHECKED(int, max_entries, Int32, args[1]);
- RUNTIME_ASSERT(max_entries >= 0);
+ CHECK(max_entries >= 0);
Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
if (max_entries == 0 || max_entries > table->NumberOfElements()) {
@@ -250,7 +250,7 @@ RUNTIME_FUNCTION(Runtime_GetWeakMapEntries) {
int count = 0;
for (int i = 0; count / 2 < max_entries && i < table->Capacity(); i++) {
Handle<Object> key(table->KeyAt(i), isolate);
- if (table->IsKey(*key)) {
+ if (table->IsKey(isolate, *key)) {
entries->set(count++, *key);
Object* value = table->Lookup(key);
entries->set(count++, value);
@@ -286,12 +286,13 @@ RUNTIME_FUNCTION(Runtime_WeakCollectionGet) {
CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
CONVERT_SMI_ARG_CHECKED(hash, 2)
- RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol());
+ CHECK(key->IsJSReceiver() || key->IsSymbol());
Handle<ObjectHashTable> table(
ObjectHashTable::cast(weak_collection->table()));
- RUNTIME_ASSERT(table->IsKey(*key));
+ CHECK(table->IsKey(isolate, *key));
Handle<Object> lookup(table->Lookup(key, hash), isolate);
- return lookup->IsTheHole() ? isolate->heap()->undefined_value() : *lookup;
+ return lookup->IsTheHole(isolate) ? isolate->heap()->undefined_value()
+ : *lookup;
}
@@ -301,12 +302,12 @@ RUNTIME_FUNCTION(Runtime_WeakCollectionHas) {
CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
CONVERT_SMI_ARG_CHECKED(hash, 2)
- RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol());
+ CHECK(key->IsJSReceiver() || key->IsSymbol());
Handle<ObjectHashTable> table(
ObjectHashTable::cast(weak_collection->table()));
- RUNTIME_ASSERT(table->IsKey(*key));
+ CHECK(table->IsKey(isolate, *key));
Handle<Object> lookup(table->Lookup(key, hash), isolate);
- return isolate->heap()->ToBoolean(!lookup->IsTheHole());
+ return isolate->heap()->ToBoolean(!lookup->IsTheHole(isolate));
}
@@ -316,10 +317,10 @@ RUNTIME_FUNCTION(Runtime_WeakCollectionDelete) {
CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
CONVERT_SMI_ARG_CHECKED(hash, 2)
- RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol());
+ CHECK(key->IsJSReceiver() || key->IsSymbol());
Handle<ObjectHashTable> table(
ObjectHashTable::cast(weak_collection->table()));
- RUNTIME_ASSERT(table->IsKey(*key));
+ CHECK(table->IsKey(isolate, *key));
bool was_present = JSWeakCollection::Delete(weak_collection, key, hash);
return isolate->heap()->ToBoolean(was_present);
}
@@ -330,12 +331,12 @@ RUNTIME_FUNCTION(Runtime_WeakCollectionSet) {
DCHECK(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol());
+ CHECK(key->IsJSReceiver() || key->IsSymbol());
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
CONVERT_SMI_ARG_CHECKED(hash, 3)
Handle<ObjectHashTable> table(
ObjectHashTable::cast(weak_collection->table()));
- RUNTIME_ASSERT(table->IsKey(*key));
+ CHECK(table->IsKey(isolate, *key));
JSWeakCollection::Set(weak_collection, key, value, hash);
return *weak_collection;
}
@@ -346,7 +347,7 @@ RUNTIME_FUNCTION(Runtime_GetWeakSetValues) {
DCHECK(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, holder, 0);
CONVERT_NUMBER_CHECKED(int, max_values, Int32, args[1]);
- RUNTIME_ASSERT(max_values >= 0);
+ CHECK(max_values >= 0);
Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
if (max_values == 0 || max_values > table->NumberOfElements()) {
@@ -361,21 +362,12 @@ RUNTIME_FUNCTION(Runtime_GetWeakSetValues) {
DisallowHeapAllocation no_gc;
int count = 0;
for (int i = 0; count < max_values && i < table->Capacity(); i++) {
- Handle<Object> key(table->KeyAt(i), isolate);
- if (table->IsKey(*key)) values->set(count++, *key);
+ Object* key = table->KeyAt(i);
+ if (table->IsKey(isolate, key)) values->set(count++, key);
}
DCHECK_EQ(max_values, count);
}
return *isolate->factory()->NewJSArrayWithElements(values);
}
-
-
-RUNTIME_FUNCTION(Runtime_ObservationWeakMapCreate) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 0);
- Handle<JSWeakMap> weakmap = isolate->factory()->NewJSWeakMap();
- JSWeakCollection::Initialize(weakmap, isolate);
- return *weakmap;
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index 89a6fa15d2..b5910e4d3b 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -5,6 +5,8 @@
#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
+#include "src/asmjs/asm-js.h"
+#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/compiler.h"
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
@@ -19,7 +21,7 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_CompileLazy) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
#ifdef DEBUG
@@ -39,10 +41,22 @@ RUNTIME_FUNCTION(Runtime_CompileLazy) {
return function->code();
}
+RUNTIME_FUNCTION(Runtime_CompileBaseline) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ StackLimitCheck check(isolate);
+ if (check.JsHasOverflowed(1 * KB)) return isolate->StackOverflow();
+ if (!Compiler::CompileBaseline(function)) {
+ return isolate->heap()->exception();
+ }
+ DCHECK(function->is_compiled());
+ return function->code();
+}
RUNTIME_FUNCTION(Runtime_CompileOptimized_Concurrent) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
StackLimitCheck check(isolate);
if (check.JsHasOverflowed(1 * KB)) return isolate->StackOverflow();
@@ -56,7 +70,7 @@ RUNTIME_FUNCTION(Runtime_CompileOptimized_Concurrent) {
RUNTIME_FUNCTION(Runtime_CompileOptimized_NotConcurrent) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
StackLimitCheck check(isolate);
if (check.JsHasOverflowed(1 * KB)) return isolate->StackOverflow();
@@ -67,6 +81,49 @@ RUNTIME_FUNCTION(Runtime_CompileOptimized_NotConcurrent) {
return function->code();
}
+RUNTIME_FUNCTION(Runtime_InstantiateAsmJs) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(args.length(), 4);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+
+ Handle<JSReceiver> stdlib;
+ if (args[1]->IsJSReceiver()) {
+ stdlib = args.at<JSReceiver>(1);
+ }
+ Handle<JSObject> foreign;
+ if (args[2]->IsJSObject()) {
+ foreign = args.at<i::JSObject>(2);
+ }
+ Handle<JSArrayBuffer> memory;
+ if (args[3]->IsJSArrayBuffer()) {
+ memory = args.at<i::JSArrayBuffer>(3);
+ }
+ if (function->shared()->HasAsmWasmData() &&
+ AsmJs::IsStdlibValid(isolate, handle(function->shared()->asm_wasm_data()),
+ stdlib)) {
+ MaybeHandle<Object> result;
+ result = AsmJs::InstantiateAsmWasm(
+ isolate, handle(function->shared()->asm_wasm_data()), memory, foreign);
+ if (!result.is_null()) {
+ return *result.ToHandleChecked();
+ }
+ }
+ // Remove wasm data, mark as broken for asm->wasm,
+ // replace code with CompileLazy, and return a smi 0 to indicate failure.
+ if (function->shared()->HasAsmWasmData()) {
+ function->shared()->ClearAsmWasmData();
+ }
+ function->shared()->set_is_asm_wasm_broken(true);
+ DCHECK(function->code() ==
+ isolate->builtins()->builtin(Builtins::kInstantiateAsmJs));
+ function->ReplaceCode(isolate->builtins()->builtin(Builtins::kCompileLazy));
+ if (function->shared()->code() ==
+ isolate->builtins()->builtin(Builtins::kInstantiateAsmJs)) {
+ function->shared()->ReplaceCode(
+ isolate->builtins()->builtin(Builtins::kCompileLazy));
+ }
+ return Smi::FromInt(0);
+}
RUNTIME_FUNCTION(Runtime_NotifyStubFailure) {
HandleScope scope(isolate);
@@ -77,7 +134,6 @@ RUNTIME_FUNCTION(Runtime_NotifyStubFailure) {
return isolate->heap()->undefined_value();
}
-
class ActivationsFinder : public ThreadVisitor {
public:
Code* code_;
@@ -180,38 +236,75 @@ static bool IsSuitableForOnStackReplacement(Isolate* isolate,
return true;
}
+namespace {
-RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- Handle<Code> caller_code(function->shared()->code());
-
- // We're not prepared to handle a function with arguments object.
- DCHECK(!function->shared()->uses_arguments());
+BailoutId DetermineEntryAndDisarmOSRForBaseline(JavaScriptFrame* frame) {
+ Handle<Code> caller_code(frame->function()->shared()->code());
- RUNTIME_ASSERT(FLAG_use_osr);
-
- // Passing the PC in the javascript frame from the caller directly is
+ // Passing the PC in the JavaScript frame from the caller directly is
// not GC safe, so we walk the stack to get it.
- JavaScriptFrameIterator it(isolate);
- JavaScriptFrame* frame = it.frame();
if (!caller_code->contains(frame->pc())) {
// Code on the stack may not be the code object referenced by the shared
// function info. It may have been replaced to include deoptimization data.
caller_code = Handle<Code>(frame->LookupCode());
}
+ DCHECK_EQ(frame->LookupCode(), *caller_code);
+ DCHECK_EQ(Code::FUNCTION, caller_code->kind());
+ DCHECK(caller_code->contains(frame->pc()));
+
+ // Revert the patched back edge table, regardless of whether OSR succeeds.
+ BackEdgeTable::Revert(frame->isolate(), *caller_code);
+
uint32_t pc_offset =
static_cast<uint32_t>(frame->pc() - caller_code->instruction_start());
-#ifdef DEBUG
+ return caller_code->TranslatePcOffsetToAstId(pc_offset);
+}
+
+BailoutId DetermineEntryAndDisarmOSRForInterpreter(JavaScriptFrame* frame) {
+ InterpretedFrame* iframe = reinterpret_cast<InterpretedFrame*>(frame);
+
+ // Note that the bytecode array active on the stack might be different from
+ // the one installed on the function (e.g. patched by debugger). This however
+ // is fine because we guarantee the layout to be in sync, hence any BailoutId
+ // representing the entry point will be valid for any copy of the bytecode.
+ Handle<BytecodeArray> bytecode(iframe->GetBytecodeArray());
+
+ DCHECK(frame->LookupCode()->is_interpreter_trampoline_builtin());
+ DCHECK(frame->function()->shared()->HasBytecodeArray());
+ DCHECK(frame->is_interpreted());
+ DCHECK(FLAG_ignition_osr);
+
+ // Reset the OSR loop nesting depth to disarm back edges.
+ bytecode->set_osr_loop_nesting_level(0);
+
+ return BailoutId(iframe->GetBytecodeOffset());
+}
+
+} // namespace
+
+RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+
+ // We're not prepared to handle a function with arguments object.
+ DCHECK(!function->shared()->uses_arguments());
+
+ // Only reachable when OST is enabled.
+ CHECK(FLAG_use_osr);
+
+ // Determine frame triggering OSR request.
+ JavaScriptFrameIterator it(isolate);
+ JavaScriptFrame* frame = it.frame();
DCHECK_EQ(frame->function(), *function);
- DCHECK_EQ(frame->LookupCode(), *caller_code);
- DCHECK(caller_code->contains(frame->pc()));
-#endif // DEBUG
- BailoutId ast_id = caller_code->TranslatePcOffsetToAstId(pc_offset);
+ // Determine the entry point for which this OSR request has been fired and
+ // also disarm all back edges in the calling code to stop new requests.
+ BailoutId ast_id = frame->is_interpreted()
+ ? DetermineEntryAndDisarmOSRForInterpreter(frame)
+ : DetermineEntryAndDisarmOSRForBaseline(frame);
DCHECK(!ast_id.IsNone());
MaybeHandle<Code> maybe_result;
@@ -224,9 +317,6 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
maybe_result = Compiler::GetOptimizedCodeForOSR(function, ast_id, frame);
}
- // Revert the patched back edge table, regardless of whether OSR succeeds.
- BackEdgeTable::Revert(isolate, *caller_code);
-
// Check whether we ended up with usable optimized code.
Handle<Code> result;
if (maybe_result.ToHandle(&result) &&
@@ -291,7 +381,7 @@ RUNTIME_FUNCTION(Runtime_TryInstallOptimizedCode) {
bool CodeGenerationFromStringsAllowed(Isolate* isolate,
Handle<Context> context) {
- DCHECK(context->allow_code_gen_from_strings()->IsFalse());
+ DCHECK(context->allow_code_gen_from_strings()->IsFalse(isolate));
// Check with callback if set.
AllowCodeGenerationFromStringsCallback callback =
isolate->allow_code_gen_callback();
@@ -305,17 +395,16 @@ bool CodeGenerationFromStringsAllowed(Isolate* isolate,
}
}
-
static Object* CompileGlobalEval(Isolate* isolate, Handle<String> source,
Handle<SharedFunctionInfo> outer_info,
LanguageMode language_mode,
- int scope_position) {
+ int eval_scope_position, int eval_position) {
Handle<Context> context = Handle<Context>(isolate->context());
Handle<Context> native_context = Handle<Context>(context->native_context());
// Check if native context allows code generation from
// strings. Throw an exception if it doesn't.
- if (native_context->allow_code_gen_from_strings()->IsFalse() &&
+ if (native_context->allow_code_gen_from_strings()->IsFalse(isolate) &&
!CodeGenerationFromStringsAllowed(isolate, native_context)) {
Handle<Object> error_message =
native_context->ErrorMessageForCodeGenerationFromStrings();
@@ -331,9 +420,9 @@ static Object* CompileGlobalEval(Isolate* isolate, Handle<String> source,
static const ParseRestriction restriction = NO_PARSE_RESTRICTION;
Handle<JSFunction> compiled;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, compiled,
- Compiler::GetFunctionFromEval(source, outer_info, context, language_mode,
- restriction, scope_position),
+ isolate, compiled, Compiler::GetFunctionFromEval(
+ source, outer_info, context, language_mode,
+ restriction, eval_scope_position, eval_position),
isolate->heap()->exception());
return *compiled;
}
@@ -341,7 +430,7 @@ static Object* CompileGlobalEval(Isolate* isolate, Handle<String> source,
RUNTIME_FUNCTION(Runtime_ResolvePossiblyDirectEval) {
HandleScope scope(isolate);
- DCHECK(args.length() == 5);
+ DCHECK(args.length() == 6);
Handle<Object> callee = args.at<Object>(0);
@@ -362,7 +451,7 @@ RUNTIME_FUNCTION(Runtime_ResolvePossiblyDirectEval) {
Handle<SharedFunctionInfo> outer_info(args.at<JSFunction>(2)->shared(),
isolate);
return CompileGlobalEval(isolate, args.at<String>(1), outer_info,
- language_mode, args.smi_at(4));
+ language_mode, args.smi_at(4), args.smi_at(5));
}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index 3263a89809..a8c465a380 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -10,10 +10,13 @@
#include "src/debug/debug-scopes.h"
#include "src/debug/debug.h"
#include "src/frames-inl.h"
+#include "src/globals.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
#include "src/runtime/runtime.h"
+#include "src/wasm/wasm-debug.h"
+#include "src/wasm/wasm-module.h"
namespace v8 {
namespace internal {
@@ -42,6 +45,9 @@ RUNTIME_FUNCTION(Runtime_DebugBreakOnBytecode) {
JavaScriptFrameIterator it(isolate);
isolate->debug()->Break(it.frame());
+ // If live-edit has dropped frames, we are not going back to dispatch.
+ if (LiveEdit::SetAfterBreakTarget(isolate->debug())) return Smi::FromInt(0);
+
// Return the handler from the original bytecode array.
DCHECK(it.frame()->is_interpreted());
InterpretedFrame* interpreted_frame =
@@ -73,8 +79,8 @@ RUNTIME_FUNCTION(Runtime_HandleDebuggerStatement) {
RUNTIME_FUNCTION(Runtime_SetDebugEventListener) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 2);
- RUNTIME_ASSERT(args[0]->IsJSFunction() || args[0]->IsUndefined() ||
- args[0]->IsNull());
+ CHECK(args[0]->IsJSFunction() || args[0]->IsUndefined(isolate) ||
+ args[0]->IsNull(isolate));
CONVERT_ARG_HANDLE_CHECKED(Object, callback, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, data, 1);
isolate->debug()->SetEventListener(callback, data);
@@ -142,7 +148,7 @@ static MaybeHandle<JSArray> GetIteratorInternalProperties(
Isolate* isolate, Handle<IteratorType> object) {
Factory* factory = isolate->factory();
Handle<IteratorType> iterator = Handle<IteratorType>::cast(object);
- RUNTIME_ASSERT_HANDLIFIED(iterator->kind()->IsSmi(), JSArray);
+ CHECK(iterator->kind()->IsSmi());
const char* kind = NULL;
switch (Smi::cast(iterator->kind())->value()) {
case IteratorType::kKindKeys:
@@ -155,7 +161,7 @@ static MaybeHandle<JSArray> GetIteratorInternalProperties(
kind = "entries";
break;
default:
- RUNTIME_ASSERT_HANDLIFIED(false, JSArray);
+ UNREACHABLE();
}
Handle<FixedArray> result = factory->NewFixedArray(2 * 3);
@@ -240,12 +246,12 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
result->set(4, *receiver);
result->set(5, generator->receiver());
return factory->NewJSArrayWithElements(result);
- } else if (Object::IsPromise(object)) {
+ } else if (object->IsJSPromise()) {
Handle<JSObject> promise = Handle<JSObject>::cast(object);
Handle<Object> status_obj =
- DebugGetProperty(promise, isolate->factory()->promise_status_symbol());
- RUNTIME_ASSERT_HANDLIFIED(status_obj->IsSmi(), JSArray);
+ DebugGetProperty(promise, isolate->factory()->promise_state_symbol());
+ CHECK(status_obj->IsSmi());
const char* status = "rejected";
int status_val = Handle<Smi>::cast(status_obj)->value();
switch (status_val) {
@@ -267,12 +273,31 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
result->set(1, *status_str);
Handle<Object> value_obj =
- DebugGetProperty(promise, isolate->factory()->promise_value_symbol());
+ DebugGetProperty(promise, isolate->factory()->promise_result_symbol());
Handle<String> promise_value =
factory->NewStringFromAsciiChecked("[[PromiseValue]]");
result->set(2, *promise_value);
result->set(3, *value_obj);
return factory->NewJSArrayWithElements(result);
+ } else if (object->IsJSProxy()) {
+ Handle<JSProxy> js_proxy = Handle<JSProxy>::cast(object);
+ Handle<FixedArray> result = factory->NewFixedArray(3 * 2);
+
+ Handle<String> handler_str =
+ factory->NewStringFromAsciiChecked("[[Handler]]");
+ result->set(0, *handler_str);
+ result->set(1, js_proxy->handler());
+
+ Handle<String> target_str =
+ factory->NewStringFromAsciiChecked("[[Target]]");
+ result->set(2, *target_str);
+ result->set(3, js_proxy->target());
+
+ Handle<String> is_revoked_str =
+ factory->NewStringFromAsciiChecked("[[IsRevoked]]");
+ result->set(4, *is_revoked_str);
+ result->set(5, isolate->heap()->ToBoolean(js_proxy->IsRevoked()));
+ return factory->NewJSArrayWithElements(result);
} else if (object->IsJSValue()) {
Handle<JSValue> js_value = Handle<JSValue>::cast(object);
@@ -291,10 +316,8 @@ RUNTIME_FUNCTION(Runtime_DebugGetInternalProperties) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
- Handle<JSArray> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Runtime::GetInternalProperties(isolate, obj));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate,
+ Runtime::GetInternalProperties(isolate, obj));
}
@@ -307,11 +330,14 @@ RUNTIME_FUNCTION(Runtime_DebugGetInternalProperties) {
// Items 2-4 are only filled if the property has either a getter or a setter.
RUNTIME_FUNCTION(Runtime_DebugGetPropertyDetails) {
HandleScope scope(isolate);
-
- DCHECK(args.length() == 2);
-
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
- CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, name_obj, 1);
+
+ // Convert the {name_obj} to a Name.
+ Handle<Name> name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
+ Object::ToName(isolate, name_obj));
// Make sure to set the current context to the context before the debugger was
// entered (if the debugger is entered). The reason for switching context here
@@ -339,7 +365,7 @@ RUNTIME_FUNCTION(Runtime_DebugGetPropertyDetails) {
return *isolate->factory()->NewJSArrayWithElements(details);
}
- LookupIterator it(obj, name, LookupIterator::HIDDEN);
+ LookupIterator it(obj, name, LookupIterator::OWN);
bool has_caught = false;
Handle<Object> value = DebugGetProperty(&it, &has_caught);
if (!it.IsFound()) return isolate->heap()->undefined_value();
@@ -382,7 +408,7 @@ RUNTIME_FUNCTION(Runtime_DebugGetProperty) {
DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
LookupIterator it(obj, name);
@@ -410,55 +436,11 @@ RUNTIME_FUNCTION(Runtime_DebugPropertyAttributesFromDetails) {
}
-// Return the property insertion index calculated from the property details.
-// args[0]: smi with property details.
-RUNTIME_FUNCTION(Runtime_DebugPropertyIndexFromDetails) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
- // TODO(verwaest): Works only for dictionary mode holders.
- return Smi::FromInt(details.dictionary_index());
-}
-
-
-// Return property value from named interceptor.
-// args[0]: object
-// args[1]: property name
-RUNTIME_FUNCTION(Runtime_DebugNamedInterceptorPropertyValue) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
- RUNTIME_ASSERT(obj->HasNamedInterceptor());
- CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
-
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- JSObject::GetProperty(obj, name));
- return *result;
-}
-
-
-// Return element value from indexed interceptor.
-// args[0]: object
-// args[1]: index
-RUNTIME_FUNCTION(Runtime_DebugIndexedInterceptorElementValue) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
- RUNTIME_ASSERT(obj->HasIndexedInterceptor());
- CONVERT_NUMBER_CHECKED(uint32_t, index, Uint32, args[1]);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, JSReceiver::GetElement(isolate, obj, index));
- return *result;
-}
-
-
RUNTIME_FUNCTION(Runtime_CheckExecutionState) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
- RUNTIME_ASSERT(isolate->debug()->CheckExecutionState(break_id));
+ CHECK(isolate->debug()->CheckExecutionState(break_id));
return isolate->heap()->true_value();
}
@@ -467,7 +449,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameCount) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
- RUNTIME_ASSERT(isolate->debug()->CheckExecutionState(break_id));
+ CHECK(isolate->debug()->CheckExecutionState(break_id));
// Count all frames which are relevant to debugging stack trace.
int n = 0;
@@ -477,12 +459,16 @@ RUNTIME_FUNCTION(Runtime_GetFrameCount) {
return Smi::FromInt(0);
}
- for (JavaScriptFrameIterator it(isolate, id); !it.done(); it.Advance()) {
+ for (StackTraceFrameIterator it(isolate, id); !it.done(); it.Advance()) {
List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
- it.frame()->Summarize(&frames);
- for (int i = frames.length() - 1; i >= 0; i--) {
- // Omit functions from native and extension scripts.
- if (frames[i].function()->shared()->IsSubjectToDebugging()) n++;
+ if (it.is_wasm()) {
+ n++;
+ } else {
+ it.javascript_frame()->Summarize(&frames);
+ for (int i = frames.length() - 1; i >= 0; i--) {
+ // Omit functions from native and extension scripts.
+ if (frames[i].function()->shared()->IsSubjectToDebugging()) n++;
+ }
}
}
return Smi::FromInt(n);
@@ -492,14 +478,14 @@ RUNTIME_FUNCTION(Runtime_GetFrameCount) {
static const int kFrameDetailsFrameIdIndex = 0;
static const int kFrameDetailsReceiverIndex = 1;
static const int kFrameDetailsFunctionIndex = 2;
-static const int kFrameDetailsArgumentCountIndex = 3;
-static const int kFrameDetailsLocalCountIndex = 4;
-static const int kFrameDetailsSourcePositionIndex = 5;
-static const int kFrameDetailsConstructCallIndex = 6;
-static const int kFrameDetailsAtReturnIndex = 7;
-static const int kFrameDetailsFlagsIndex = 8;
-static const int kFrameDetailsFirstDynamicIndex = 9;
-
+static const int kFrameDetailsScriptIndex = 3;
+static const int kFrameDetailsArgumentCountIndex = 4;
+static const int kFrameDetailsLocalCountIndex = 5;
+static const int kFrameDetailsSourcePositionIndex = 6;
+static const int kFrameDetailsConstructCallIndex = 7;
+static const int kFrameDetailsAtReturnIndex = 8;
+static const int kFrameDetailsFlagsIndex = 9;
+static const int kFrameDetailsFirstDynamicIndex = 10;
// Return an array with frame details
// args[0]: number: break id
@@ -509,12 +495,13 @@ static const int kFrameDetailsFirstDynamicIndex = 9;
// 0: Frame id
// 1: Receiver
// 2: Function
-// 3: Argument count
-// 4: Local count
-// 5: Source position
-// 6: Constructor call
-// 7: Is at return
-// 8: Flags
+// 3: Script
+// 4: Argument count
+// 5: Local count
+// 6: Source position
+// 7: Constructor call
+// 8: Is at return
+// 9: Flags
// Arguments name, value
// Locals name, value
// Return value if any
@@ -522,7 +509,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
- RUNTIME_ASSERT(isolate->debug()->CheckExecutionState(break_id));
+ CHECK(isolate->debug()->CheckExecutionState(break_id));
CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
Heap* heap = isolate->heap();
@@ -534,14 +521,13 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
return heap->undefined_value();
}
- JavaScriptFrameIterator it(isolate, id);
+ StackTraceFrameIterator it(isolate, id);
// Inlined frame index in optimized frame, starting from outer function.
int inlined_jsframe_index =
DebugFrameHelper::FindIndexedNonNativeFrame(&it, index);
if (inlined_jsframe_index == -1) return heap->undefined_value();
FrameInspector frame_inspector(it.frame(), inlined_jsframe_index, isolate);
- bool is_optimized = it.frame()->is_optimized();
// Traverse the saved contexts chain to find the active context for the
// selected frame.
@@ -555,58 +541,116 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
// Find source position in unoptimized code.
int position = frame_inspector.GetSourcePosition();
+ if (it.is_wasm()) {
+ // Create the details array (no dynamic information for wasm).
+ Handle<FixedArray> details =
+ isolate->factory()->NewFixedArray(kFrameDetailsFirstDynamicIndex);
+
+ // Add the frame id.
+ details->set(kFrameDetailsFrameIdIndex, *frame_id);
+
+ // Add the function name.
+ Handle<Object> wasm_obj(it.wasm_frame()->wasm_obj(), isolate);
+ int func_index = it.wasm_frame()->function_index();
+ Handle<String> func_name =
+ wasm::GetWasmFunctionName(isolate, wasm_obj, func_index);
+ details->set(kFrameDetailsFunctionIndex, *func_name);
+
+ // Add the script wrapper
+ Handle<Object> script_wrapper =
+ Script::GetWrapper(frame_inspector.GetScript());
+ details->set(kFrameDetailsScriptIndex, *script_wrapper);
+
+ // Add the arguments count.
+ details->set(kFrameDetailsArgumentCountIndex, Smi::FromInt(0));
+
+ // Add the locals count
+ details->set(kFrameDetailsLocalCountIndex, Smi::FromInt(0));
+
+ // Add the source position.
+ if (position != kNoSourcePosition) {
+ details->set(kFrameDetailsSourcePositionIndex, Smi::FromInt(position));
+ }
+
+ // Add the constructor information.
+ details->set(kFrameDetailsConstructCallIndex, heap->ToBoolean(false));
+
+ // Add the at return information.
+ details->set(kFrameDetailsAtReturnIndex, heap->ToBoolean(false));
+
+ // Add flags to indicate information on whether this frame is
+ // bit 0: invoked in the debugger context.
+ // bit 1: optimized frame.
+ // bit 2: inlined in optimized frame
+ int flags = 0;
+ if (*save->context() == *isolate->debug()->debug_context()) {
+ flags |= 1 << 0;
+ }
+ details->set(kFrameDetailsFlagsIndex, Smi::FromInt(flags));
+
+ return *isolate->factory()->NewJSArrayWithElements(details);
+ }
+
+ // Handle JavaScript frames.
+ bool is_optimized = it.frame()->is_optimized();
+
// Check for constructor frame.
bool constructor = frame_inspector.IsConstructor();
// Get scope info and read from it for local variable information.
Handle<JSFunction> function =
Handle<JSFunction>::cast(frame_inspector.GetFunction());
- RUNTIME_ASSERT(function->shared()->IsSubjectToDebugging());
+ CHECK(function->shared()->IsSubjectToDebugging());
Handle<SharedFunctionInfo> shared(function->shared());
Handle<ScopeInfo> scope_info(shared->scope_info());
DCHECK(*scope_info != ScopeInfo::Empty(isolate));
// Get the locals names and values into a temporary array.
- int local_count = scope_info->LocalCount();
- for (int slot = 0; slot < scope_info->LocalCount(); ++slot) {
+ Handle<Object> maybe_context = frame_inspector.GetContext();
+ const int local_count_with_synthetic = maybe_context->IsContext()
+ ? scope_info->LocalCount()
+ : scope_info->StackLocalCount();
+ int local_count = local_count_with_synthetic;
+ for (int slot = 0; slot < local_count_with_synthetic; ++slot) {
// Hide compiler-introduced temporary variables, whether on the stack or on
// the context.
- if (scope_info->LocalIsSynthetic(slot)) local_count--;
+ if (ScopeInfo::VariableIsSynthetic(scope_info->LocalName(slot))) {
+ local_count--;
+ }
}
- Handle<FixedArray> locals =
- isolate->factory()->NewFixedArray(local_count * 2);
-
+ List<Handle<Object>> locals;
// Fill in the values of the locals.
- int local = 0;
int i = 0;
for (; i < scope_info->StackLocalCount(); ++i) {
// Use the value from the stack.
- if (scope_info->LocalIsSynthetic(i)) continue;
- locals->set(local * 2, scope_info->LocalName(i));
- Handle<Object> value = frame_inspector.GetExpression(i);
+ if (ScopeInfo::VariableIsSynthetic(scope_info->LocalName(i))) continue;
+ locals.Add(Handle<String>(scope_info->LocalName(i), isolate));
+ Handle<Object> value =
+ frame_inspector.GetExpression(scope_info->StackLocalIndex(i));
// TODO(yangguo): We convert optimized out values to {undefined} when they
// are passed to the debugger. Eventually we should handle them somehow.
- if (value->IsOptimizedOut()) value = isolate->factory()->undefined_value();
- locals->set(local * 2 + 1, *value);
- local++;
+ if (value->IsOptimizedOut(isolate)) {
+ value = isolate->factory()->undefined_value();
+ }
+ locals.Add(value);
}
- if (local < local_count) {
+ if (locals.length() < local_count * 2) {
// Get the context containing declarations.
- Handle<Context> context(
- Handle<Context>::cast(frame_inspector.GetContext())->closure_context());
+ DCHECK(maybe_context->IsContext());
+ Handle<Context> context(Context::cast(*maybe_context)->closure_context());
+
for (; i < scope_info->LocalCount(); ++i) {
- if (scope_info->LocalIsSynthetic(i)) continue;
Handle<String> name(scope_info->LocalName(i));
+ if (ScopeInfo::VariableIsSynthetic(*name)) continue;
VariableMode mode;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
- locals->set(local * 2, *name);
+ locals.Add(name);
int context_slot_index = ScopeInfo::ContextSlotIndex(
scope_info, name, &mode, &init_flag, &maybe_assigned_flag);
Object* value = context->get(context_slot_index);
- locals->set(local * 2 + 1, value);
- local++;
+ locals.Add(Handle<Object>(value, isolate));
}
}
@@ -614,7 +658,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
// frame or if the frame is optimized it cannot be at a return.
bool at_return = false;
if (!is_optimized && index == 0) {
- at_return = isolate->debug()->IsBreakAtReturn(it.frame());
+ at_return = isolate->debug()->IsBreakAtReturn(it.javascript_frame());
}
// If positioned just before return find the value to be returned and add it
@@ -628,7 +672,8 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
// the provided parameters whereas the function frame always have the number
// of arguments matching the functions parameters. The rest of the
// information (except for what is collected above) is the same.
- if ((inlined_jsframe_index == 0) && it.frame()->has_adapted_arguments()) {
+ if ((inlined_jsframe_index == 0) &&
+ it.javascript_frame()->has_adapted_arguments()) {
it.AdvanceToArgumentsFrame();
frame_inspector.SetArgumentsFrame(it.frame());
}
@@ -651,6 +696,11 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
// Add the function (same as in function frame).
details->set(kFrameDetailsFunctionIndex, *(frame_inspector.GetFunction()));
+ // Add the script wrapper
+ Handle<Object> script_wrapper =
+ Script::GetWrapper(frame_inspector.GetScript());
+ details->set(kFrameDetailsScriptIndex, *script_wrapper);
+
// Add the arguments count.
details->set(kFrameDetailsArgumentCountIndex, Smi::FromInt(argument_count));
@@ -658,7 +708,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
details->set(kFrameDetailsLocalCountIndex, Smi::FromInt(local_count));
// Add the source position.
- if (position != RelocInfo::kNoPosition) {
+ if (position != kNoSourcePosition) {
details->set(kFrameDetailsSourcePositionIndex, Smi::FromInt(position));
} else {
details->set(kFrameDetailsSourcePositionIndex, heap->undefined_value());
@@ -706,9 +756,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
}
// Add locals name and value from the temporary copy from the function frame.
- for (int i = 0; i < local_count * 2; i++) {
- details->set(details_index++, locals->get(i));
- }
+ for (const auto& local : locals) details->set(details_index++, *local);
// Add the value being returned.
if (at_return) {
@@ -730,7 +778,7 @@ RUNTIME_FUNCTION(Runtime_GetScopeCount) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
- RUNTIME_ASSERT(isolate->debug()->CheckExecutionState(break_id));
+ CHECK(isolate->debug()->CheckExecutionState(break_id));
CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
@@ -763,7 +811,7 @@ RUNTIME_FUNCTION(Runtime_GetScopeDetails) {
HandleScope scope(isolate);
DCHECK(args.length() == 4);
CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
- RUNTIME_ASSERT(isolate->debug()->CheckExecutionState(break_id));
+ CHECK(isolate->debug()->CheckExecutionState(break_id));
CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
@@ -784,10 +832,7 @@ RUNTIME_FUNCTION(Runtime_GetScopeDetails) {
if (it.Done()) {
return isolate->heap()->undefined_value();
}
- Handle<JSObject> details;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, details,
- it.MaterializeScopeDetails());
- return *details;
+ RETURN_RESULT_OR_FAILURE(isolate, it.MaterializeScopeDetails());
}
@@ -804,7 +849,7 @@ RUNTIME_FUNCTION(Runtime_GetAllScopesDetails) {
HandleScope scope(isolate);
DCHECK(args.length() == 3 || args.length() == 4);
CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
- RUNTIME_ASSERT(isolate->debug()->CheckExecutionState(break_id));
+ CHECK(isolate->debug()->CheckExecutionState(break_id));
CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
@@ -817,8 +862,8 @@ RUNTIME_FUNCTION(Runtime_GetAllScopesDetails) {
// Get the frame where the debugging is performed.
StackFrame::Id id = DebugFrameHelper::UnwrapFrameId(wrapped_id);
- JavaScriptFrameIterator frame_it(isolate, id);
- JavaScriptFrame* frame = frame_it.frame();
+ StackTraceFrameIterator frame_it(isolate, id);
+ StandardFrame* frame = frame_it.frame();
FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
List<Handle<JSObject> > result(4);
@@ -876,12 +921,51 @@ RUNTIME_FUNCTION(Runtime_GetFunctionScopeDetails) {
return isolate->heap()->undefined_value();
}
- Handle<JSObject> details;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, details,
- it.MaterializeScopeDetails());
- return *details;
+ RETURN_RESULT_OR_FAILURE(isolate, it.MaterializeScopeDetails());
+}
+
+RUNTIME_FUNCTION(Runtime_GetGeneratorScopeCount) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+
+ if (!args[0]->IsJSGeneratorObject()) return Smi::FromInt(0);
+
+ // Check arguments.
+ CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, gen, 0);
+
+ // Count the visible scopes.
+ int n = 0;
+ for (ScopeIterator it(isolate, gen); !it.Done(); it.Next()) {
+ n++;
+ }
+
+ return Smi::FromInt(n);
}
+RUNTIME_FUNCTION(Runtime_GetGeneratorScopeDetails) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+
+ if (!args[0]->IsJSGeneratorObject()) {
+ return *isolate->factory()->undefined_value();
+ }
+
+ // Check arguments.
+ CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, gen, 0);
+ CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
+
+ // Find the requested scope.
+ int n = 0;
+ ScopeIterator it(isolate, gen);
+ for (; !it.Done() && n < index; it.Next()) {
+ n++;
+ }
+ if (it.Done()) {
+ return isolate->heap()->undefined_value();
+ }
+
+ RETURN_RESULT_OR_FAILURE(isolate, it.MaterializeScopeDetails());
+}
static bool SetScopeVariableValue(ScopeIterator* it, int index,
Handle<String> variable_name,
@@ -917,7 +1001,7 @@ RUNTIME_FUNCTION(Runtime_SetScopeVariableValue) {
bool res;
if (args[0]->IsNumber()) {
CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
- RUNTIME_ASSERT(isolate->debug()->CheckExecutionState(break_id));
+ CHECK(isolate->debug()->CheckExecutionState(break_id));
CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
@@ -930,10 +1014,14 @@ RUNTIME_FUNCTION(Runtime_SetScopeVariableValue) {
ScopeIterator it(isolate, &frame_inspector);
res = SetScopeVariableValue(&it, index, variable_name, new_value);
- } else {
+ } else if (args[0]->IsJSFunction()) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
ScopeIterator it(isolate, fun);
res = SetScopeVariableValue(&it, index, variable_name, new_value);
+ } else {
+ CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, gen, 0);
+ ScopeIterator it(isolate, gen);
+ res = SetScopeVariableValue(&it, index, variable_name, new_value);
}
return isolate->heap()->ToBoolean(res);
@@ -958,78 +1046,6 @@ RUNTIME_FUNCTION(Runtime_DebugPrintScopes) {
}
-RUNTIME_FUNCTION(Runtime_GetThreadCount) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
- RUNTIME_ASSERT(isolate->debug()->CheckExecutionState(break_id));
-
- // Count all archived V8 threads.
- int n = 0;
- for (ThreadState* thread = isolate->thread_manager()->FirstThreadStateInUse();
- thread != NULL; thread = thread->Next()) {
- n++;
- }
-
- // Total number of threads is current thread and archived threads.
- return Smi::FromInt(n + 1);
-}
-
-
-static const int kThreadDetailsCurrentThreadIndex = 0;
-static const int kThreadDetailsThreadIdIndex = 1;
-static const int kThreadDetailsSize = 2;
-
-// Return an array with thread details
-// args[0]: number: break id
-// args[1]: number: thread index
-//
-// The array returned contains the following information:
-// 0: Is current thread?
-// 1: Thread id
-RUNTIME_FUNCTION(Runtime_GetThreadDetails) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
- RUNTIME_ASSERT(isolate->debug()->CheckExecutionState(break_id));
-
- CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
-
- // Allocate array for result.
- Handle<FixedArray> details =
- isolate->factory()->NewFixedArray(kThreadDetailsSize);
-
- // Thread index 0 is current thread.
- if (index == 0) {
- // Fill the details.
- details->set(kThreadDetailsCurrentThreadIndex,
- isolate->heap()->true_value());
- details->set(kThreadDetailsThreadIdIndex,
- Smi::FromInt(ThreadId::Current().ToInteger()));
- } else {
- // Find the thread with the requested index.
- int n = 1;
- ThreadState* thread = isolate->thread_manager()->FirstThreadStateInUse();
- while (index != n && thread != NULL) {
- thread = thread->Next();
- n++;
- }
- if (thread == NULL) {
- return isolate->heap()->undefined_value();
- }
-
- // Fill the details.
- details->set(kThreadDetailsCurrentThreadIndex,
- isolate->heap()->false_value());
- details->set(kThreadDetailsThreadIdIndex,
- Smi::FromInt(thread->id().ToInteger()));
- }
-
- // Convert to JS array and return.
- return *isolate->factory()->NewJSArrayWithElements(details);
-}
-
-
// Sets the disable break state
// args[0]: disable break state
RUNTIME_FUNCTION(Runtime_SetBreakPointsActive) {
@@ -1049,7 +1065,7 @@ static bool IsPositionAlignmentCodeCorrect(int alignment) {
RUNTIME_FUNCTION(Runtime_GetBreakLocations) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
- RUNTIME_ASSERT(isolate->debug()->is_active());
+ CHECK(isolate->debug()->is_active());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
CONVERT_NUMBER_CHECKED(int32_t, statement_aligned_code, Int32, args[1]);
@@ -1063,7 +1079,9 @@ RUNTIME_FUNCTION(Runtime_GetBreakLocations) {
// Find the number of break points
Handle<Object> break_locations =
Debug::GetSourceBreakLocations(shared, alignment);
- if (break_locations->IsUndefined()) return isolate->heap()->undefined_value();
+ if (break_locations->IsUndefined(isolate)) {
+ return isolate->heap()->undefined_value();
+ }
// Return array as JS array
return *isolate->factory()->NewJSArrayWithElements(
Handle<FixedArray>::cast(break_locations));
@@ -1077,16 +1095,16 @@ RUNTIME_FUNCTION(Runtime_GetBreakLocations) {
RUNTIME_FUNCTION(Runtime_SetFunctionBreakPoint) {
HandleScope scope(isolate);
DCHECK(args.length() == 3);
- RUNTIME_ASSERT(isolate->debug()->is_active());
+ CHECK(isolate->debug()->is_active());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
- RUNTIME_ASSERT(source_position >= function->shared()->start_position() &&
- source_position <= function->shared()->end_position());
+ CHECK(source_position >= function->shared()->start_position() &&
+ source_position <= function->shared()->end_position());
CONVERT_ARG_HANDLE_CHECKED(Object, break_point_object_arg, 2);
// Set break point.
- RUNTIME_ASSERT(isolate->debug()->SetBreakPoint(
- function, break_point_object_arg, &source_position));
+ CHECK(isolate->debug()->SetBreakPoint(function, break_point_object_arg,
+ &source_position));
return Smi::FromInt(source_position);
}
@@ -1102,10 +1120,10 @@ RUNTIME_FUNCTION(Runtime_SetFunctionBreakPoint) {
RUNTIME_FUNCTION(Runtime_SetScriptBreakPoint) {
HandleScope scope(isolate);
DCHECK(args.length() == 4);
- RUNTIME_ASSERT(isolate->debug()->is_active());
+ CHECK(isolate->debug()->is_active());
CONVERT_ARG_HANDLE_CHECKED(JSValue, wrapper, 0);
CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
- RUNTIME_ASSERT(source_position >= 0);
+ CHECK(source_position >= 0);
CONVERT_NUMBER_CHECKED(int32_t, statement_aligned_code, Int32, args[2]);
CONVERT_ARG_HANDLE_CHECKED(Object, break_point_object_arg, 3);
@@ -1116,7 +1134,7 @@ RUNTIME_FUNCTION(Runtime_SetScriptBreakPoint) {
static_cast<BreakPositionAlignment>(statement_aligned_code);
// Get the script from the script wrapper.
- RUNTIME_ASSERT(wrapper->value()->IsScript());
+ CHECK(wrapper->value()->IsScript());
Handle<Script> script(Script::cast(wrapper->value()));
// Set break point.
@@ -1134,7 +1152,7 @@ RUNTIME_FUNCTION(Runtime_SetScriptBreakPoint) {
RUNTIME_FUNCTION(Runtime_ClearBreakPoint) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
- RUNTIME_ASSERT(isolate->debug()->is_active());
+ CHECK(isolate->debug()->is_active());
CONVERT_ARG_HANDLE_CHECKED(Object, break_point_object_arg, 0);
// Clear break point.
@@ -1184,7 +1202,7 @@ RUNTIME_FUNCTION(Runtime_PrepareStep) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
- RUNTIME_ASSERT(isolate->debug()->CheckExecutionState(break_id));
+ CHECK(isolate->debug()->CheckExecutionState(break_id));
if (!args[1]->IsNumber()) {
return isolate->Throw(isolate->heap()->illegal_argument_string());
@@ -1210,7 +1228,7 @@ RUNTIME_FUNCTION(Runtime_PrepareStep) {
RUNTIME_FUNCTION(Runtime_ClearStepping) {
HandleScope scope(isolate);
DCHECK(args.length() == 0);
- RUNTIME_ASSERT(isolate->debug()->is_active());
+ CHECK(isolate->debug()->is_active());
isolate->debug()->ClearStepping();
return isolate->heap()->undefined_value();
}
@@ -1223,7 +1241,7 @@ RUNTIME_FUNCTION(Runtime_DebugEvaluate) {
// evaluated.
DCHECK(args.length() == 6);
CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
- RUNTIME_ASSERT(isolate->debug()->CheckExecutionState(break_id));
+ CHECK(isolate->debug()->CheckExecutionState(break_id));
CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
@@ -1233,12 +1251,9 @@ RUNTIME_FUNCTION(Runtime_DebugEvaluate) {
StackFrame::Id id = DebugFrameHelper::UnwrapFrameId(wrapped_id);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- DebugEvaluate::Local(isolate, id, inlined_jsframe_index, source,
- disable_break, context_extension));
- return *result;
+ RETURN_RESULT_OR_FAILURE(
+ isolate, DebugEvaluate::Local(isolate, id, inlined_jsframe_index, source,
+ disable_break, context_extension));
}
@@ -1249,24 +1264,25 @@ RUNTIME_FUNCTION(Runtime_DebugEvaluateGlobal) {
// evaluated.
DCHECK(args.length() == 4);
CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
- RUNTIME_ASSERT(isolate->debug()->CheckExecutionState(break_id));
+ CHECK(isolate->debug()->CheckExecutionState(break_id));
CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 2);
CONVERT_ARG_HANDLE_CHECKED(HeapObject, context_extension, 3);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
+ RETURN_RESULT_OR_FAILURE(
+ isolate,
DebugEvaluate::Global(isolate, source, disable_break, context_extension));
- return *result;
}
RUNTIME_FUNCTION(Runtime_DebugGetLoadedScripts) {
HandleScope scope(isolate);
DCHECK(args.length() == 0);
- RUNTIME_ASSERT(isolate->debug()->is_active());
+
+ // This runtime function is used by the debugger to determine whether the
+ // debugger is active or not. Hence we fail gracefully here and don't crash.
+ if (!isolate->debug()->is_active()) return isolate->ThrowIllegalOperation();
Handle<FixedArray> instances;
{
@@ -1292,16 +1308,13 @@ RUNTIME_FUNCTION(Runtime_DebugGetLoadedScripts) {
}
// Return result as a JS array.
- Handle<JSObject> result =
- isolate->factory()->NewJSObject(isolate->array_function());
- JSArray::SetContent(Handle<JSArray>::cast(result), instances);
- return *result;
+ return *isolate->factory()->NewJSArrayWithElements(instances);
}
static bool HasInPrototypeChainIgnoringProxies(Isolate* isolate,
JSObject* object,
Object* proto) {
- PrototypeIterator iter(isolate, object, PrototypeIterator::START_AT_RECEIVER);
+ PrototypeIterator iter(isolate, object, kStartAtReceiver);
while (true) {
iter.AdvanceIgnoringProxies();
if (iter.IsAtEnd()) return false;
@@ -1319,9 +1332,9 @@ RUNTIME_FUNCTION(Runtime_DebugReferencedBy) {
DCHECK(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSObject, target, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, filter, 1);
- RUNTIME_ASSERT(filter->IsUndefined() || filter->IsJSObject());
+ CHECK(filter->IsUndefined(isolate) || filter->IsJSObject());
CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[2]);
- RUNTIME_ASSERT(max_references >= 0);
+ CHECK(max_references >= 0);
List<Handle<JSObject> > instances;
Heap* heap = isolate->heap();
@@ -1338,7 +1351,7 @@ RUNTIME_FUNCTION(Runtime_DebugReferencedBy) {
if (!obj->ReferencesObject(*target)) continue;
// Check filter if supplied. This is normally used to avoid
// references from mirror objects.
- if (!filter->IsUndefined() &&
+ if (!filter->IsUndefined(isolate) &&
HasInPrototypeChainIgnoringProxies(isolate, obj, *filter)) {
continue;
}
@@ -1376,7 +1389,7 @@ RUNTIME_FUNCTION(Runtime_DebugConstructedBy) {
DCHECK(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 0);
CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[1]);
- RUNTIME_ASSERT(max_references >= 0);
+ CHECK(max_references >= 0);
List<Handle<JSObject> > instances;
Heap* heap = isolate->heap();
@@ -1408,12 +1421,9 @@ RUNTIME_FUNCTION(Runtime_DebugGetPrototype) {
HandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
- Handle<Object> prototype;
// TODO(1543): Come up with a solution for clients to handle potential errors
// thrown by an intermediate proxy.
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, prototype,
- JSReceiver::GetPrototype(isolate, obj));
- return *prototype;
+ RETURN_RESULT_OR_FAILURE(isolate, JSReceiver::GetPrototype(isolate, obj));
}
@@ -1425,11 +1435,15 @@ RUNTIME_FUNCTION(Runtime_DebugSetScriptSource) {
CONVERT_ARG_HANDLE_CHECKED(JSValue, script_wrapper, 0);
CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
- RUNTIME_ASSERT(script_wrapper->value()->IsScript());
+ CHECK(script_wrapper->value()->IsScript());
Handle<Script> script(Script::cast(script_wrapper->value()));
- int compilation_state = script->compilation_state();
- RUNTIME_ASSERT(compilation_state == Script::COMPILATION_STATE_INITIAL);
+ // The following condition is not guaranteed to hold and a failure is also
+ // propagated to callers. Hence we fail gracefully here and don't crash.
+ if (script->compilation_state() != Script::COMPILATION_STATE_INITIAL) {
+ return isolate->ThrowIllegalOperation();
+ }
+
script->set_source(*source);
return isolate->heap()->undefined_value();
@@ -1455,49 +1469,12 @@ RUNTIME_FUNCTION(Runtime_FunctionGetDebugName) {
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
if (function->IsJSBoundFunction()) {
- return Handle<JSBoundFunction>::cast(function)->name();
- }
- Handle<Object> name =
- JSFunction::GetDebugName(Handle<JSFunction>::cast(function));
- return *name;
-}
-
-
-// A testing entry. Returns statement position which is the closest to
-// source_position.
-RUNTIME_FUNCTION(Runtime_GetFunctionCodePositionFromSource) {
- HandleScope scope(isolate);
- CHECK(isolate->debug()->live_edit_enabled());
- DCHECK(args.length() == 2);
- RUNTIME_ASSERT(isolate->debug()->is_active());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
-
- Handle<Code> code(function->code(), isolate);
-
- if (code->kind() != Code::FUNCTION &&
- code->kind() != Code::OPTIMIZED_FUNCTION) {
- return isolate->heap()->undefined_value();
- }
-
- RelocIterator it(*code, RelocInfo::ModeMask(RelocInfo::STATEMENT_POSITION));
- int closest_pc = 0;
- int distance = kMaxInt;
- while (!it.done()) {
- int statement_position = static_cast<int>(it.rinfo()->data());
- // Check if this break point is closer that what was previously found.
- if (source_position <= statement_position &&
- statement_position - source_position < distance) {
- closest_pc =
- static_cast<int>(it.rinfo()->pc() - code->instruction_start());
- distance = statement_position - source_position;
- // Check whether we can't get any closer.
- if (distance == 0) break;
- }
- it.next();
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSBoundFunction::GetName(
+ isolate, Handle<JSBoundFunction>::cast(function)));
+ } else {
+ return *JSFunction::GetDebugName(Handle<JSFunction>::cast(function));
}
-
- return Smi::FromInt(closest_pc);
}
@@ -1515,12 +1492,9 @@ RUNTIME_FUNCTION(Runtime_ExecuteInDebugContext) {
return isolate->heap()->exception();
}
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Execution::Call(isolate, function, handle(function->global_proxy()), 0,
- NULL));
- return *result;
+ RETURN_RESULT_OR_FAILURE(
+ isolate, Execution::Call(isolate, function,
+ handle(function->global_proxy()), 0, NULL));
}
@@ -1593,36 +1567,248 @@ RUNTIME_FUNCTION(Runtime_GetScript) {
return *Script::GetWrapper(found);
}
+RUNTIME_FUNCTION(Runtime_ScriptLineCount) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSValue, script, 0);
+
+ CHECK(script->value()->IsScript());
+ Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
+
+ Script::InitLineEnds(script_handle);
+
+ FixedArray* line_ends_array = FixedArray::cast(script_handle->line_ends());
+ return Smi::FromInt(line_ends_array->length());
+}
+
+RUNTIME_FUNCTION(Runtime_ScriptLineStartPosition) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_CHECKED(JSValue, script, 0);
+ CONVERT_NUMBER_CHECKED(int32_t, line, Int32, args[1]);
+
+ CHECK(script->value()->IsScript());
+ Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
+
+ Script::InitLineEnds(script_handle);
+
+ FixedArray* line_ends_array = FixedArray::cast(script_handle->line_ends());
+ const int line_count = line_ends_array->length();
+
+ // If line == line_count, we return the first position beyond the last line.
+ if (line < 0 || line > line_count) {
+ return Smi::FromInt(-1);
+ } else if (line == 0) {
+ return Smi::FromInt(0);
+ } else {
+ DCHECK(0 < line && line <= line_count);
+ const int pos = Smi::cast(line_ends_array->get(line - 1))->value() + 1;
+ return Smi::FromInt(pos);
+ }
+}
+
+RUNTIME_FUNCTION(Runtime_ScriptLineEndPosition) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_CHECKED(JSValue, script, 0);
+ CONVERT_NUMBER_CHECKED(int32_t, line, Int32, args[1]);
+
+ CHECK(script->value()->IsScript());
+ Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
+
+ Script::InitLineEnds(script_handle);
+
+ FixedArray* line_ends_array = FixedArray::cast(script_handle->line_ends());
+ const int line_count = line_ends_array->length();
+
+ if (line < 0 || line >= line_count) {
+ return Smi::FromInt(-1);
+ } else {
+ return Smi::cast(line_ends_array->get(line));
+ }
+}
+
+static Handle<Object> GetJSPositionInfo(Handle<Script> script, int position,
+ Script::OffsetFlag offset_flag,
+ Isolate* isolate) {
+ Script::PositionInfo info;
+ if (!script->GetPositionInfo(position, &info, offset_flag)) {
+ return isolate->factory()->null_value();
+ }
+
+ Handle<String> source = handle(String::cast(script->source()), isolate);
+ Handle<String> sourceText = script->type() == Script::TYPE_WASM
+ ? isolate->factory()->empty_string()
+ : isolate->factory()->NewSubString(
+ source, info.line_start, info.line_end);
+
+ Handle<JSObject> jsinfo =
+ isolate->factory()->NewJSObject(isolate->object_function());
+
+ JSObject::AddProperty(jsinfo, isolate->factory()->script_string(), script,
+ NONE);
+ JSObject::AddProperty(jsinfo, isolate->factory()->position_string(),
+ handle(Smi::FromInt(position), isolate), NONE);
+ JSObject::AddProperty(jsinfo, isolate->factory()->line_string(),
+ handle(Smi::FromInt(info.line), isolate), NONE);
+ JSObject::AddProperty(jsinfo, isolate->factory()->column_string(),
+ handle(Smi::FromInt(info.column), isolate), NONE);
+ JSObject::AddProperty(jsinfo, isolate->factory()->sourceText_string(),
+ sourceText, NONE);
+
+ return jsinfo;
+}
+
+// Get information on a specific source line and column possibly offset by a
+// fixed source position. This function is used to find a source position from
+// a line and column position. The fixed source position offset is typically
+// used to find a source position in a function based on a line and column in
+// the source for the function alone. The offset passed will then be the
+// start position of the source for the function within the full script source.
+// Note that incoming line and column parameters may be undefined, and are
+// assumed to be passed *with* offsets.
+RUNTIME_FUNCTION(Runtime_ScriptLocationFromLine) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 4);
+ CONVERT_ARG_CHECKED(JSValue, script, 0);
+
+ CHECK(script->value()->IsScript());
+ Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
+
+ // Line and column are possibly undefined and we need to handle these cases,
+ // additionally subtracting corresponding offsets.
+
+ int32_t line;
+ if (args[1]->IsNull(isolate) || args[1]->IsUndefined(isolate)) {
+ line = 0;
+ } else {
+ CHECK(args[1]->IsNumber());
+ line = NumberToInt32(args[1]) - script_handle->line_offset();
+ }
+
+ int32_t column;
+ if (args[2]->IsNull(isolate) || args[2]->IsUndefined(isolate)) {
+ column = 0;
+ } else {
+ CHECK(args[2]->IsNumber());
+ column = NumberToInt32(args[2]);
+ if (line == 0) column -= script_handle->column_offset();
+ }
+
+ CONVERT_NUMBER_CHECKED(int32_t, offset_position, Int32, args[3]);
+
+ if (line < 0 || column < 0 || offset_position < 0) {
+ return isolate->heap()->null_value();
+ }
+
+ Script::InitLineEnds(script_handle);
+
+ FixedArray* line_ends_array = FixedArray::cast(script_handle->line_ends());
+ const int line_count = line_ends_array->length();
+
+ int position;
+ if (line == 0) {
+ position = offset_position + column;
+ } else {
+ Script::PositionInfo info;
+ if (!script_handle->GetPositionInfo(offset_position, &info,
+ Script::NO_OFFSET) ||
+ info.line + line >= line_count) {
+ return isolate->heap()->null_value();
+ }
+
+ const int offset_line = info.line + line;
+ const int offset_line_position =
+ (offset_line == 0)
+ ? 0
+ : Smi::cast(line_ends_array->get(offset_line - 1))->value() + 1;
+ position = offset_line_position + column;
+ }
+
+ return *GetJSPositionInfo(script_handle, position, Script::NO_OFFSET,
+ isolate);
+}
+
+RUNTIME_FUNCTION(Runtime_ScriptPositionInfo) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_CHECKED(JSValue, script, 0);
+ CONVERT_NUMBER_CHECKED(int32_t, position, Int32, args[1]);
+ CONVERT_BOOLEAN_ARG_CHECKED(with_offset, 2);
+
+ CHECK(script->value()->IsScript());
+ Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
+
+ const Script::OffsetFlag offset_flag =
+ with_offset ? Script::WITH_OFFSET : Script::NO_OFFSET;
+ return *GetJSPositionInfo(script_handle, position, offset_flag, isolate);
+}
+
+// Returns the given line as a string, or null if line is out of bounds.
+// The parameter line is expected to include the script's line offset.
+RUNTIME_FUNCTION(Runtime_ScriptSourceLine) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_CHECKED(JSValue, script, 0);
+ CONVERT_NUMBER_CHECKED(int32_t, line, Int32, args[1]);
+
+ CHECK(script->value()->IsScript());
+ Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
+
+ Script::InitLineEnds(script_handle);
+
+ FixedArray* line_ends_array = FixedArray::cast(script_handle->line_ends());
+ const int line_count = line_ends_array->length();
+
+ line -= script_handle->line_offset();
+ if (line < 0 || line_count <= line) {
+ return isolate->heap()->null_value();
+ }
+
+ const int start =
+ (line == 0) ? 0 : Smi::cast(line_ends_array->get(line - 1))->value() + 1;
+ const int end = Smi::cast(line_ends_array->get(line))->value();
+
+ Handle<String> source =
+ handle(String::cast(script_handle->source()), isolate);
+ Handle<String> str = isolate->factory()->NewSubString(source, start, end);
+
+ return *str;
+}
// Set one shot breakpoints for the callback function that is passed to a
// built-in function such as Array.forEach to enable stepping into the callback,
// if we are indeed stepping and the callback is subject to debugging.
RUNTIME_FUNCTION(Runtime_DebugPrepareStepInIfStepping) {
- DCHECK(args.length() == 1);
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- RUNTIME_ASSERT(object->IsJSFunction() || object->IsJSGeneratorObject());
- Handle<JSFunction> fun;
- if (object->IsJSFunction()) {
- fun = Handle<JSFunction>::cast(object);
- } else {
- fun = Handle<JSFunction>(
- Handle<JSGeneratorObject>::cast(object)->function(), isolate);
- }
-
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
isolate->debug()->PrepareStepIn(fun);
return isolate->heap()->undefined_value();
}
+// Set one shot breakpoints for the suspended generator object.
+RUNTIME_FUNCTION(Runtime_DebugPrepareStepInSuspendedGenerator) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(0, args.length());
+ isolate->debug()->PrepareStepInSuspendedGenerator();
+ return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_DebugRecordAsyncFunction) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
+ CHECK(isolate->debug()->last_step_action() >= StepNext);
+ isolate->debug()->RecordAsyncFunction(generator);
+ return isolate->heap()->undefined_value();
+}
RUNTIME_FUNCTION(Runtime_DebugPushPromise) {
- DCHECK(args.length() == 2);
+ DCHECK(args.length() == 1);
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 1);
- isolate->PushPromise(promise, function);
- // If we are in step-in mode, flood the handler.
- isolate->debug()->EnableStepIn();
+ isolate->PushPromise(promise);
return isolate->heap()->undefined_value();
}
@@ -1635,15 +1821,6 @@ RUNTIME_FUNCTION(Runtime_DebugPopPromise) {
}
-RUNTIME_FUNCTION(Runtime_DebugPromiseEvent) {
- DCHECK(args.length() == 1);
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, data, 0);
- isolate->debug()->OnPromiseEvent(data);
- return isolate->heap()->undefined_value();
-}
-
-
RUNTIME_FUNCTION(Runtime_DebugAsyncTaskEvent) {
DCHECK(args.length() == 1);
HandleScope scope(isolate);
@@ -1663,5 +1840,35 @@ RUNTIME_FUNCTION(Runtime_DebugBreakInOptimizedCode) {
UNIMPLEMENTED();
return NULL;
}
+
+RUNTIME_FUNCTION(Runtime_GetWasmFunctionOffsetTable) {
+ DCHECK(args.length() == 1);
+ HandleScope scope(isolate);
+ CONVERT_ARG_CHECKED(JSValue, script_val, 0);
+
+ CHECK(script_val->value()->IsScript());
+ Handle<Script> script = Handle<Script>(Script::cast(script_val->value()));
+
+ Handle<wasm::WasmDebugInfo> debug_info =
+ wasm::GetDebugInfo(handle(script->wasm_object(), isolate));
+ Handle<FixedArray> elements = wasm::WasmDebugInfo::GetFunctionOffsetTable(
+ debug_info, script->wasm_function_index());
+ return *isolate->factory()->NewJSArrayWithElements(elements);
+}
+
+RUNTIME_FUNCTION(Runtime_DisassembleWasmFunction) {
+ DCHECK(args.length() == 1);
+ HandleScope scope(isolate);
+ CONVERT_ARG_CHECKED(JSValue, script_val, 0);
+
+ CHECK(script_val->value()->IsScript());
+ Handle<Script> script = Handle<Script>(Script::cast(script_val->value()));
+
+ Handle<wasm::WasmDebugInfo> debug_info =
+ wasm::GetDebugInfo(handle(script->wasm_object(), isolate));
+ return *wasm::WasmDebugInfo::DisassembleFunction(
+ debug_info, script->wasm_function_index());
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-error.cc b/deps/v8/src/runtime/runtime-error.cc
new file mode 100644
index 0000000000..3a9b192029
--- /dev/null
+++ b/deps/v8/src/runtime/runtime-error.cc
@@ -0,0 +1,24 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/runtime/runtime-utils.h"
+
+#include "src/arguments.h"
+#include "src/base/platform/time.h"
+#include "src/conversions-inl.h"
+#include "src/futex-emulation.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+RUNTIME_FUNCTION(Runtime_ErrorToString) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, recv, 0);
+ RETURN_RESULT_OR_FAILURE(isolate, ErrorUtils::ToString(isolate, recv));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-forin.cc b/deps/v8/src/runtime/runtime-forin.cc
index 4b558d124f..0d624e97d6 100644
--- a/deps/v8/src/runtime/runtime-forin.cc
+++ b/deps/v8/src/runtime/runtime-forin.cc
@@ -22,14 +22,17 @@ namespace {
// deletions during a for-in.
MaybeHandle<HeapObject> Enumerate(Handle<JSReceiver> receiver) {
Isolate* const isolate = receiver->GetIsolate();
- FastKeyAccumulator accumulator(isolate, receiver, INCLUDE_PROTOS,
+ JSObject::MakePrototypesFast(receiver, kStartAtReceiver, isolate);
+ FastKeyAccumulator accumulator(isolate, receiver,
+ KeyCollectionMode::kIncludePrototypes,
ENUMERABLE_STRINGS);
- accumulator.set_filter_proxy_keys(false);
+ accumulator.set_is_for_in(true);
// Test if we have an enum cache for {receiver}.
if (!accumulator.is_receiver_simple_enum()) {
Handle<FixedArray> keys;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, keys, accumulator.GetKeys(KEEP_NUMBERS),
- HeapObject);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, keys, accumulator.GetKeys(GetKeysConversion::kKeepNumbers),
+ HeapObject);
// Test again, since cache may have been built by GetKeys() calls above.
if (!accumulator.is_receiver_simple_enum()) return keys;
}
@@ -61,7 +64,9 @@ MaybeHandle<Object> HasEnumerableProperty(Isolate* isolate,
Handle<Object> prototype;
ASSIGN_RETURN_ON_EXCEPTION(isolate, prototype,
JSProxy::GetPrototype(proxy), Object);
- if (prototype->IsNull()) break;
+ if (prototype->IsNull(isolate)) {
+ return isolate->factory()->undefined_value();
+ }
// We already have a stack-check in JSProxy::GetPrototype.
return HasEnumerableProperty(
isolate, Handle<JSReceiver>::cast(prototype), key);
@@ -95,11 +100,6 @@ MaybeHandle<Object> HasEnumerableProperty(Isolate* isolate,
return isolate->factory()->undefined_value();
}
-MaybeHandle<Object> Filter(Handle<JSReceiver> receiver, Handle<Object> key) {
- Isolate* const isolate = receiver->GetIsolate();
- return HasEnumerableProperty(isolate, receiver, key);
-}
-
} // namespace
@@ -107,9 +107,7 @@ RUNTIME_FUNCTION(Runtime_ForInEnumerate) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
- Handle<HeapObject> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, Enumerate(receiver));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate, Enumerate(receiver));
}
@@ -153,15 +151,24 @@ RUNTIME_FUNCTION(Runtime_ForInDone) {
return isolate->heap()->ToBoolean(index == length);
}
+RUNTIME_FUNCTION(Runtime_ForInHasProperty) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, HasEnumerableProperty(isolate, receiver, key));
+ return isolate->heap()->ToBoolean(!result->IsUndefined(isolate));
+}
RUNTIME_FUNCTION(Runtime_ForInFilter) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, Filter(receiver, key));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate,
+ HasEnumerableProperty(isolate, receiver, key));
}
@@ -177,9 +184,8 @@ RUNTIME_FUNCTION(Runtime_ForInNext) {
if (receiver->map() == *cache_type) {
return *key;
}
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, Filter(receiver, key));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate,
+ HasEnumerableProperty(isolate, receiver, key));
}
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index 011f9ff820..298f1a1d11 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -10,7 +10,7 @@
#include "src/frames-inl.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
-#include "src/profiler/cpu-profiler.h"
+#include "src/wasm/wasm-module.h"
namespace v8 {
namespace internal {
@@ -21,14 +21,11 @@ RUNTIME_FUNCTION(Runtime_FunctionGetName) {
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
if (function->IsJSBoundFunction()) {
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, JSBoundFunction::GetName(
- isolate, Handle<JSBoundFunction>::cast(function)));
- return *result;
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSBoundFunction::GetName(
+ isolate, Handle<JSBoundFunction>::cast(function)));
} else {
- RUNTIME_ASSERT(function->IsJSFunction());
- return Handle<JSFunction>::cast(function)->shared()->name();
+ return *JSFunction::GetName(isolate, Handle<JSFunction>::cast(function));
}
}
@@ -51,8 +48,8 @@ RUNTIME_FUNCTION(Runtime_FunctionRemovePrototype) {
DCHECK(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
- RUNTIME_ASSERT(f->RemovePrototype());
- f->shared()->set_construct_stub(
+ CHECK(f->RemovePrototype());
+ f->shared()->SetConstructStub(
*isolate->builtins()->ConstructedNonConstructable());
return isolate->heap()->undefined_value();
@@ -95,16 +92,6 @@ RUNTIME_FUNCTION(Runtime_FunctionGetScriptSourcePosition) {
return Smi::FromInt(pos);
}
-
-RUNTIME_FUNCTION(Runtime_FunctionGetPositionForOffset) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_ARG_CHECKED(AbstractCode, abstract_code, 0);
- CONVERT_NUMBER_CHECKED(int, offset, Int32, args[1]);
- return Smi::FromInt(abstract_code->SourcePosition(offset));
-}
-
RUNTIME_FUNCTION(Runtime_FunctionGetContextData) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
@@ -131,8 +118,7 @@ RUNTIME_FUNCTION(Runtime_FunctionSetLength) {
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
CONVERT_SMI_ARG_CHECKED(length, 1);
- RUNTIME_ASSERT((length & 0xC0000000) == 0xC0000000 ||
- (length & 0xC0000000) == 0x0);
+ CHECK((length & 0xC0000000) == 0xC0000000 || (length & 0xC0000000) == 0x0);
fun->shared()->set_length(length);
return isolate->heap()->undefined_value();
}
@@ -144,7 +130,7 @@ RUNTIME_FUNCTION(Runtime_FunctionSetPrototype) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
- RUNTIME_ASSERT(fun->IsConstructor());
+ CHECK(fun->IsConstructor());
RETURN_FAILURE_ON_EXCEPTION(isolate,
Accessors::FunctionSetPrototype(fun, value));
return args[0]; // return TOS
@@ -189,7 +175,8 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
}
target_shared->set_scope_info(source_shared->scope_info());
target_shared->set_length(source_shared->length());
- target_shared->set_feedback_vector(source_shared->feedback_vector());
+ target_shared->set_num_literals(source_shared->num_literals());
+ target_shared->set_feedback_metadata(source_shared->feedback_metadata());
target_shared->set_internal_formal_parameter_count(
source_shared->internal_formal_parameter_count());
target_shared->set_start_position_and_type(
@@ -206,21 +193,17 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
// Set the code of the target function.
target->ReplaceCode(source_shared->code());
- DCHECK(target->next_function_link()->IsUndefined());
+ DCHECK(target->next_function_link()->IsUndefined(isolate));
- // Make sure we get a fresh copy of the literal vector to avoid cross
- // context contamination.
Handle<Context> context(source->context());
target->set_context(*context);
- int number_of_literals = source->NumberOfLiterals();
- Handle<LiteralsArray> literals =
- LiteralsArray::New(isolate, handle(target_shared->feedback_vector()),
- number_of_literals, TENURED);
- target->set_literals(*literals);
+ // Make sure we get a fresh copy of the literal vector to avoid cross
+ // context contamination, and that the literal vector makes it's way into
+ // the target_shared optimized code map.
+ JSFunction::EnsureLiterals(target);
- if (isolate->logger()->is_logging_code_events() ||
- isolate->cpu_profiler()->is_profiling()) {
+ if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
isolate->logger()->LogExistingFunction(
source_shared, Handle<AbstractCode>(source_shared->abstract_code()));
}
@@ -234,7 +217,7 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
// into the global object when doing call and apply.
RUNTIME_FUNCTION(Runtime_SetNativeFlag) {
SealHandleScope shs(isolate);
- RUNTIME_ASSERT(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Object, object, 0);
@@ -255,7 +238,7 @@ RUNTIME_FUNCTION(Runtime_IsConstructor) {
RUNTIME_FUNCTION(Runtime_SetForceInlineFlag) {
SealHandleScope shs(isolate);
- RUNTIME_ASSERT(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
if (object->IsJSFunction()) {
@@ -276,11 +259,8 @@ RUNTIME_FUNCTION(Runtime_Call) {
for (int i = 0; i < argc; ++i) {
argv[i] = args.at<Object>(2 + i);
}
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Execution::Call(isolate, target, receiver, argc, argv.start()));
- return *result;
+ RETURN_RESULT_OR_FAILURE(
+ isolate, Execution::Call(isolate, target, receiver, argc, argv.start()));
}
@@ -289,10 +269,7 @@ RUNTIME_FUNCTION(Runtime_ConvertReceiver) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
- if (receiver->IsNull() || receiver->IsUndefined()) {
- return isolate->global_proxy();
- }
- return *Object::ToObject(isolate, receiver).ToHandleChecked();
+ return *Object::ConvertReceiver(isolate, receiver).ToHandleChecked();
}
diff --git a/deps/v8/src/runtime/runtime-futex.cc b/deps/v8/src/runtime/runtime-futex.cc
index f4ef679bf6..a93bb23645 100644
--- a/deps/v8/src/runtime/runtime-futex.cc
+++ b/deps/v8/src/runtime/runtime-futex.cc
@@ -12,80 +12,56 @@
// Implement Futex API for SharedArrayBuffers as defined in the
// SharedArrayBuffer draft spec, found here:
-// https://github.com/lars-t-hansen/ecmascript_sharedmem
+// https://github.com/tc39/ecmascript_sharedmem
namespace v8 {
namespace internal {
-RUNTIME_FUNCTION(Runtime_AtomicsFutexWait) {
+RUNTIME_FUNCTION(Runtime_AtomicsWait) {
HandleScope scope(isolate);
DCHECK(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
CONVERT_SIZE_ARG_CHECKED(index, 1);
CONVERT_INT32_ARG_CHECKED(value, 2);
CONVERT_DOUBLE_ARG_CHECKED(timeout, 3);
- RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
- RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
- RUNTIME_ASSERT(sta->type() == kExternalInt32Array);
- RUNTIME_ASSERT(timeout == V8_INFINITY || !std::isnan(timeout));
+ CHECK(sta->GetBuffer()->is_shared());
+ CHECK_LT(index, NumberToSize(sta->length()));
+ CHECK_EQ(sta->type(), kExternalInt32Array);
+ CHECK(timeout == V8_INFINITY || !std::isnan(timeout));
Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
- size_t addr = (index << 2) + NumberToSize(isolate, sta->byte_offset());
+ size_t addr = (index << 2) + NumberToSize(sta->byte_offset());
return FutexEmulation::Wait(isolate, array_buffer, addr, value, timeout);
}
-
-RUNTIME_FUNCTION(Runtime_AtomicsFutexWake) {
+RUNTIME_FUNCTION(Runtime_AtomicsWake) {
HandleScope scope(isolate);
DCHECK(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
CONVERT_SIZE_ARG_CHECKED(index, 1);
CONVERT_INT32_ARG_CHECKED(count, 2);
- RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
- RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
- RUNTIME_ASSERT(sta->type() == kExternalInt32Array);
+ CHECK(sta->GetBuffer()->is_shared());
+ CHECK_LT(index, NumberToSize(sta->length()));
+ CHECK_EQ(sta->type(), kExternalInt32Array);
Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
- size_t addr = (index << 2) + NumberToSize(isolate, sta->byte_offset());
+ size_t addr = (index << 2) + NumberToSize(sta->byte_offset());
return FutexEmulation::Wake(isolate, array_buffer, addr, count);
}
-
-RUNTIME_FUNCTION(Runtime_AtomicsFutexWakeOrRequeue) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 5);
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
- CONVERT_SIZE_ARG_CHECKED(index1, 1);
- CONVERT_INT32_ARG_CHECKED(count, 2);
- CONVERT_INT32_ARG_CHECKED(value, 3);
- CONVERT_SIZE_ARG_CHECKED(index2, 4);
- RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
- RUNTIME_ASSERT(index1 < NumberToSize(isolate, sta->length()));
- RUNTIME_ASSERT(index2 < NumberToSize(isolate, sta->length()));
- RUNTIME_ASSERT(sta->type() == kExternalInt32Array);
-
- Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
- size_t addr1 = (index1 << 2) + NumberToSize(isolate, sta->byte_offset());
- size_t addr2 = (index2 << 2) + NumberToSize(isolate, sta->byte_offset());
-
- return FutexEmulation::WakeOrRequeue(isolate, array_buffer, addr1, count,
- value, addr2);
-}
-
-
-RUNTIME_FUNCTION(Runtime_AtomicsFutexNumWaitersForTesting) {
+RUNTIME_FUNCTION(Runtime_AtomicsNumWaitersForTesting) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
CONVERT_SIZE_ARG_CHECKED(index, 1);
- RUNTIME_ASSERT(sta->GetBuffer()->is_shared());
- RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length()));
- RUNTIME_ASSERT(sta->type() == kExternalInt32Array);
+ CHECK(sta->GetBuffer()->is_shared());
+ CHECK_LT(index, NumberToSize(sta->length()));
+ CHECK_EQ(sta->type(), kExternalInt32Array);
Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
- size_t addr = (index << 2) + NumberToSize(isolate, sta->byte_offset());
+ size_t addr = (index << 2) + NumberToSize(sta->byte_offset());
return FutexEmulation::NumWaitersForTesting(isolate, array_buffer, addr);
}
diff --git a/deps/v8/src/runtime/runtime-generator.cc b/deps/v8/src/runtime/runtime-generator.cc
index 181b5f9540..dcc48c5c9e 100644
--- a/deps/v8/src/runtime/runtime-generator.cc
+++ b/deps/v8/src/runtime/runtime-generator.cc
@@ -5,6 +5,7 @@
#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
+#include "src/debug/debug.h"
#include "src/factory.h"
#include "src/frames-inl.h"
#include "src/objects-inl.h"
@@ -14,26 +15,33 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_CreateJSGeneratorObject) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
-
- JavaScriptFrameIterator it(isolate);
- JavaScriptFrame* frame = it.frame();
- Handle<JSFunction> function(frame->function());
- RUNTIME_ASSERT(function->shared()->is_generator());
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 1);
+ CHECK(function->shared()->is_resumable());
+
+ Handle<FixedArray> operand_stack;
+ if (function->shared()->HasBytecodeArray()) {
+ // New-style generators.
+ DCHECK(!function->shared()->HasBaselineCode());
+ int size = function->shared()->bytecode_array()->register_count();
+ operand_stack = isolate->factory()->NewFixedArray(size);
+ } else {
+ // Old-style generators.
+ DCHECK(function->shared()->HasBaselineCode());
+ operand_stack = isolate->factory()->empty_fixed_array();
+ }
- Handle<JSGeneratorObject> generator;
- DCHECK(!frame->IsConstructor());
- generator = isolate->factory()->NewJSGeneratorObject(function);
+ Handle<JSGeneratorObject> generator =
+ isolate->factory()->NewJSGeneratorObject(function);
generator->set_function(*function);
- generator->set_context(Context::cast(frame->context()));
- generator->set_receiver(frame->receiver());
- generator->set_continuation(0);
- generator->set_operand_stack(isolate->heap()->empty_fixed_array());
-
+ generator->set_context(isolate->context());
+ generator->set_receiver(*receiver);
+ generator->set_operand_stack(*operand_stack);
+ generator->set_continuation(JSGeneratorObject::kGeneratorExecuting);
return *generator;
}
-
RUNTIME_FUNCTION(Runtime_SuspendJSGeneratorObject) {
HandleScope handle_scope(isolate);
DCHECK(args.length() == 1);
@@ -41,11 +49,13 @@ RUNTIME_FUNCTION(Runtime_SuspendJSGeneratorObject) {
JavaScriptFrameIterator stack_iterator(isolate);
JavaScriptFrame* frame = stack_iterator.frame();
- RUNTIME_ASSERT(frame->function()->shared()->is_generator());
+ CHECK(frame->function()->shared()->is_resumable());
DCHECK_EQ(frame->function(), generator_object->function());
DCHECK(frame->function()->shared()->is_compiled());
DCHECK(!frame->function()->IsOptimized());
+ isolate->debug()->RecordAsyncFunction(generator_object);
+
// The caller should have saved the context and continuation already.
DCHECK_EQ(generator_object->context(), Context::cast(frame->context()));
DCHECK_LT(0, generator_object->continuation());
@@ -72,63 +82,6 @@ RUNTIME_FUNCTION(Runtime_SuspendJSGeneratorObject) {
return isolate->heap()->undefined_value();
}
-
-// Note that this function is the slow path for resuming generators. It is only
-// called if the suspended activation had operands on the stack, stack handlers
-// needing rewinding, or if the resume should throw an exception. The fast path
-// is handled directly in FullCodeGenerator::EmitGeneratorResume(), which is
-// inlined into GeneratorNext, GeneratorReturn, and GeneratorThrow.
-// EmitGeneratorResume is called in any case, as it needs to reconstruct the
-// stack frame and make space for arguments and operands.
-RUNTIME_FUNCTION(Runtime_ResumeJSGeneratorObject) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_CHECKED(JSGeneratorObject, generator_object, 0);
- CONVERT_ARG_CHECKED(Object, value, 1);
- CONVERT_SMI_ARG_CHECKED(resume_mode_int, 2);
- JavaScriptFrameIterator stack_iterator(isolate);
- JavaScriptFrame* frame = stack_iterator.frame();
-
- DCHECK_EQ(frame->function(), generator_object->function());
- DCHECK(frame->function()->shared()->is_compiled());
- DCHECK(!frame->function()->IsOptimized());
-
- STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
- STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
-
- Code* code = generator_object->function()->shared()->code();
- int offset = generator_object->continuation();
- DCHECK_GT(offset, 0);
- frame->set_pc(code->instruction_start() + offset);
- if (FLAG_enable_embedded_constant_pool) {
- frame->set_constant_pool(code->constant_pool());
- }
- generator_object->set_continuation(JSGeneratorObject::kGeneratorExecuting);
-
- FixedArray* operand_stack = generator_object->operand_stack();
- int operands_count = operand_stack->length();
- if (operands_count != 0) {
- frame->RestoreOperandStack(operand_stack);
- generator_object->set_operand_stack(isolate->heap()->empty_fixed_array());
- }
-
- JSGeneratorObject::ResumeMode resume_mode =
- static_cast<JSGeneratorObject::ResumeMode>(resume_mode_int);
- switch (resume_mode) {
- // Note: this looks like NEXT and RETURN are the same but RETURN receives
- // special treatment in the generator code (to which we return here).
- case JSGeneratorObject::NEXT:
- case JSGeneratorObject::RETURN:
- return value;
- case JSGeneratorObject::THROW:
- return isolate->Throw(value);
- }
-
- UNREACHABLE();
- return isolate->ThrowIllegalOperation();
-}
-
-
RUNTIME_FUNCTION(Runtime_GeneratorClose) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
@@ -139,8 +92,6 @@ RUNTIME_FUNCTION(Runtime_GeneratorClose) {
return isolate->heap()->undefined_value();
}
-
-// Returns function of generator activation.
RUNTIME_FUNCTION(Runtime_GeneratorGetFunction) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
@@ -149,8 +100,6 @@ RUNTIME_FUNCTION(Runtime_GeneratorGetFunction) {
return generator->function();
}
-
-// Returns receiver of generator activation.
RUNTIME_FUNCTION(Runtime_GeneratorGetReceiver) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
@@ -159,18 +108,22 @@ RUNTIME_FUNCTION(Runtime_GeneratorGetReceiver) {
return generator->receiver();
}
-
-// Returns input of generator activation.
-RUNTIME_FUNCTION(Runtime_GeneratorGetInput) {
+RUNTIME_FUNCTION(Runtime_GeneratorGetInputOrDebugPos) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
- return generator->input();
+ return generator->input_or_debug_pos();
}
+RUNTIME_FUNCTION(Runtime_GeneratorGetResumeMode) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
+
+ return Smi::FromInt(generator->resume_mode());
+}
-// Returns generator continuation as a PC offset, or the magic -1 or 0 values.
RUNTIME_FUNCTION(Runtime_GeneratorGetContinuation) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
@@ -179,38 +132,13 @@ RUNTIME_FUNCTION(Runtime_GeneratorGetContinuation) {
return Smi::FromInt(generator->continuation());
}
-
RUNTIME_FUNCTION(Runtime_GeneratorGetSourcePosition) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
- if (generator->is_suspended()) {
- Handle<Code> code(generator->function()->code(), isolate);
- int offset = generator->continuation();
- RUNTIME_ASSERT(0 <= offset && offset < code->instruction_size());
- return Smi::FromInt(code->SourcePosition(offset));
- }
-
- return isolate->heap()->undefined_value();
-}
-
-// Optimization for builtins calling any of the following three functions is
-// disabled in js/generator.js and compiler.cc, hence they are unreachable.
-
-RUNTIME_FUNCTION(Runtime_GeneratorNext) {
- UNREACHABLE();
- return nullptr;
-}
-
-RUNTIME_FUNCTION(Runtime_GeneratorReturn) {
- UNREACHABLE();
- return nullptr;
-}
-
-RUNTIME_FUNCTION(Runtime_GeneratorThrow) {
- UNREACHABLE();
- return nullptr;
+ if (!generator->is_suspended()) return isolate->heap()->undefined_value();
+ return Smi::FromInt(generator->source_position());
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-i18n.cc b/deps/v8/src/runtime/runtime-i18n.cc
index 27f970bdb4..8b9d92ec00 100644
--- a/deps/v8/src/runtime/runtime-i18n.cc
+++ b/deps/v8/src/runtime/runtime-i18n.cc
@@ -6,6 +6,8 @@
#ifdef V8_I18N_SUPPORT
#include "src/runtime/runtime-utils.h"
+#include <memory>
+
#include "src/api.h"
#include "src/api-natives.h"
#include "src/arguments.h"
@@ -24,22 +26,44 @@
#include "unicode/dtfmtsym.h"
#include "unicode/dtptngen.h"
#include "unicode/locid.h"
+#include "unicode/normalizer2.h"
#include "unicode/numfmt.h"
#include "unicode/numsys.h"
#include "unicode/rbbi.h"
#include "unicode/smpdtfmt.h"
#include "unicode/timezone.h"
+#include "unicode/translit.h"
#include "unicode/uchar.h"
#include "unicode/ucol.h"
#include "unicode/ucurr.h"
#include "unicode/uloc.h"
+#include "unicode/unistr.h"
#include "unicode/unum.h"
#include "unicode/uversion.h"
namespace v8 {
namespace internal {
+namespace {
+
+const UChar* GetUCharBufferFromFlat(const String::FlatContent& flat,
+ std::unique_ptr<uc16[]>* dest,
+ int32_t length) {
+ DCHECK(flat.IsFlat());
+ if (flat.IsOneByte()) {
+ if (!*dest) {
+ dest->reset(NewArray<uc16>(length));
+ CopyChars(dest->get(), flat.ToOneByteVector().start(), length);
+ }
+ return reinterpret_cast<const UChar*>(dest->get());
+ } else {
+ return reinterpret_cast<const UChar*>(flat.ToUC16Vector().start());
+ }
+}
+
+} // namespace
+// ECMA 402 6.2.3
RUNTIME_FUNCTION(Runtime_CanonicalizeLanguageTag) {
HandleScope scope(isolate);
Factory* factory = isolate->factory();
@@ -50,6 +74,8 @@ RUNTIME_FUNCTION(Runtime_CanonicalizeLanguageTag) {
v8::String::Utf8Value locale_id(v8::Utils::ToLocal(locale_id_str));
// Return value which denotes invalid language tag.
+ // TODO(jshin): Can uloc_{for,to}TanguageTag fail even for structually valid
+ // language tags? If not, just add CHECK instead of returning 'invalid-tag'.
const char* const kInvalidTag = "invalid-tag";
UErrorCode error = U_ZERO_ERROR;
@@ -152,7 +178,7 @@ RUNTIME_FUNCTION(Runtime_GetLanguageTagVariants) {
uint32_t length = static_cast<uint32_t>(input->length()->Number());
// Set some limit to prevent fuzz tests from going OOM.
// Can be bumped when callers' requirements change.
- RUNTIME_ASSERT(length < 100);
+ if (length >= 100) return isolate->ThrowIllegalOperation();
Handle<FixedArray> output = factory->NewFixedArray(length);
Handle<Name> maximized = factory->NewStringFromStaticChars("maximized");
Handle<Name> base = factory->NewStringFromStaticChars("base");
@@ -238,7 +264,7 @@ RUNTIME_FUNCTION(Runtime_IsInitializedIntlObject) {
Handle<Symbol> marker = isolate->factory()->intl_initialized_marker_symbol();
Handle<Object> tag = JSReceiver::GetDataProperty(obj, marker);
- return isolate->heap()->ToBoolean(!tag->IsUndefined());
+ return isolate->heap()->ToBoolean(!tag->IsUndefined(isolate));
}
@@ -296,7 +322,7 @@ RUNTIME_FUNCTION(Runtime_GetImplFromInitializedIntlObject) {
Handle<Symbol> marker = isolate->factory()->intl_impl_object_symbol();
Handle<Object> impl = JSReceiver::GetDataProperty(obj, marker);
- if (impl->IsTheHole()) {
+ if (impl->IsTheHole(isolate)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kNotIntlObject, obj));
}
@@ -336,9 +362,9 @@ RUNTIME_FUNCTION(Runtime_CreateDateTimeFormat) {
// Make object handle weak so we can delete the data format once GC kicks in.
Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
- GlobalHandles::MakeWeak(wrapper.location(),
- reinterpret_cast<void*>(wrapper.location()),
- DateFormat::DeleteDateFormat);
+ GlobalHandles::MakeWeak(wrapper.location(), wrapper.location(),
+ DateFormat::DeleteDateFormat,
+ WeakCallbackType::kInternalFields);
return *local_object;
}
@@ -361,13 +387,10 @@ RUNTIME_FUNCTION(Runtime_InternalDateFormat) {
icu::UnicodeString result;
date_format->format(value->Number(), result);
- Handle<String> result_str;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result_str,
- isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(result.getBuffer()),
- result.length())));
- return *result_str;
+ RETURN_RESULT_OR_FAILURE(
+ isolate, isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(result.getBuffer()),
+ result.length())));
}
@@ -389,12 +412,9 @@ RUNTIME_FUNCTION(Runtime_InternalDateParse) {
UDate date = date_format->parse(u_date, status);
if (U_FAILURE(status)) return isolate->heap()->undefined_value();
- Handle<JSDate> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- JSDate::New(isolate->date_function(), isolate->date_function(),
- static_cast<double>(date)));
- return *result;
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSDate::New(isolate->date_function(), isolate->date_function(),
+ static_cast<double>(date)));
}
@@ -430,9 +450,9 @@ RUNTIME_FUNCTION(Runtime_CreateNumberFormat) {
JSObject::AddProperty(local_object, key, value, NONE);
Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
- GlobalHandles::MakeWeak(wrapper.location(),
- reinterpret_cast<void*>(wrapper.location()),
- NumberFormat::DeleteNumberFormat);
+ GlobalHandles::MakeWeak(wrapper.location(), wrapper.location(),
+ NumberFormat::DeleteNumberFormat,
+ WeakCallbackType::kInternalFields);
return *local_object;
}
@@ -455,13 +475,10 @@ RUNTIME_FUNCTION(Runtime_InternalNumberFormat) {
icu::UnicodeString result;
number_format->format(value->Number(), result);
- Handle<String> result_str;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result_str,
- isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(result.getBuffer()),
- result.length())));
- return *result_str;
+ RETURN_RESULT_OR_FAILURE(
+ isolate, isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(result.getBuffer()),
+ result.length())));
}
@@ -536,9 +553,9 @@ RUNTIME_FUNCTION(Runtime_CreateCollator) {
JSObject::AddProperty(local_object, key, value, NONE);
Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
- GlobalHandles::MakeWeak(wrapper.location(),
- reinterpret_cast<void*>(wrapper.location()),
- Collator::DeleteCollator);
+ GlobalHandles::MakeWeak(wrapper.location(), wrapper.location(),
+ Collator::DeleteCollator,
+ WeakCallbackType::kInternalFields);
return *local_object;
}
@@ -555,14 +572,24 @@ RUNTIME_FUNCTION(Runtime_InternalCompare) {
icu::Collator* collator = Collator::UnpackCollator(isolate, collator_holder);
if (!collator) return isolate->ThrowIllegalOperation();
- v8::String::Value string_value1(v8::Utils::ToLocal(string1));
- v8::String::Value string_value2(v8::Utils::ToLocal(string2));
- const UChar* u_string1 = reinterpret_cast<const UChar*>(*string_value1);
- const UChar* u_string2 = reinterpret_cast<const UChar*>(*string_value2);
+ string1 = String::Flatten(string1);
+ string2 = String::Flatten(string2);
+
+ UCollationResult result;
UErrorCode status = U_ZERO_ERROR;
- UCollationResult result =
- collator->compare(u_string1, string_value1.length(), u_string2,
- string_value2.length(), status);
+ {
+ DisallowHeapAllocation no_gc;
+ int32_t length1 = string1->length();
+ int32_t length2 = string2->length();
+ String::FlatContent flat1 = string1->GetFlatContent();
+ String::FlatContent flat2 = string2->GetFlatContent();
+ std::unique_ptr<uc16[]> sap1;
+ std::unique_ptr<uc16[]> sap2;
+ const UChar* string_val1 = GetUCharBufferFromFlat(flat1, &sap1, length1);
+ const UChar* string_val2 = GetUCharBufferFromFlat(flat2, &sap2, length2);
+ result =
+ collator->compare(string_val1, length1, string_val2, length2, status);
+ }
if (U_FAILURE(status)) return isolate->ThrowIllegalOperation();
return *isolate->factory()->NewNumberFromInt(result);
@@ -571,36 +598,59 @@ RUNTIME_FUNCTION(Runtime_InternalCompare) {
RUNTIME_FUNCTION(Runtime_StringNormalize) {
HandleScope scope(isolate);
- static const UNormalizationMode normalizationForms[] = {
- UNORM_NFC, UNORM_NFD, UNORM_NFKC, UNORM_NFKD};
+ static const struct {
+ const char* name;
+ UNormalization2Mode mode;
+ } normalizationForms[] = {
+ {"nfc", UNORM2_COMPOSE},
+ {"nfc", UNORM2_DECOMPOSE},
+ {"nfkc", UNORM2_COMPOSE},
+ {"nfkc", UNORM2_DECOMPOSE},
+ };
DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(String, stringValue, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
CONVERT_NUMBER_CHECKED(int, form_id, Int32, args[1]);
- RUNTIME_ASSERT(form_id >= 0 &&
- static_cast<size_t>(form_id) < arraysize(normalizationForms));
+ CHECK(form_id >= 0 &&
+ static_cast<size_t>(form_id) < arraysize(normalizationForms));
- v8::String::Value string_value(v8::Utils::ToLocal(stringValue));
- const UChar* u_value = reinterpret_cast<const UChar*>(*string_value);
-
- // TODO(mnita): check Normalizer2 (not available in ICU 46)
- UErrorCode status = U_ZERO_ERROR;
- icu::UnicodeString input(false, u_value, string_value.length());
+ int length = s->length();
+ s = String::Flatten(s);
icu::UnicodeString result;
- icu::Normalizer::normalize(input, normalizationForms[form_id], 0, result,
- status);
+ std::unique_ptr<uc16[]> sap;
+ UErrorCode status = U_ZERO_ERROR;
+ {
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat = s->GetFlatContent();
+ const UChar* src = GetUCharBufferFromFlat(flat, &sap, length);
+ icu::UnicodeString input(false, src, length);
+ // Getting a singleton. Should not free it.
+ const icu::Normalizer2* normalizer =
+ icu::Normalizer2::getInstance(nullptr, normalizationForms[form_id].name,
+ normalizationForms[form_id].mode, status);
+ DCHECK(U_SUCCESS(status));
+ CHECK(normalizer != nullptr);
+ int32_t normalized_prefix_length =
+ normalizer->spanQuickCheckYes(input, status);
+ // Quick return if the input is already normalized.
+ if (length == normalized_prefix_length) return *s;
+ icu::UnicodeString unnormalized =
+ input.tempSubString(normalized_prefix_length);
+ // Read-only alias of the normalized prefix.
+ result.setTo(false, input.getBuffer(), normalized_prefix_length);
+ // copy-on-write; normalize the suffix and append to |result|.
+ normalizer->normalizeSecondAndAppend(result, unnormalized, status);
+ }
+
if (U_FAILURE(status)) {
return isolate->heap()->undefined_value();
}
- Handle<String> result_str;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result_str,
- isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(result.getBuffer()),
- result.length())));
- return *result_str;
+ RETURN_RESULT_OR_FAILURE(
+ isolate, isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(result.getBuffer()),
+ result.length())));
}
@@ -640,9 +690,9 @@ RUNTIME_FUNCTION(Runtime_CreateBreakIterator) {
// Make object handle weak so we can delete the break iterator once GC kicks
// in.
Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
- GlobalHandles::MakeWeak(wrapper.location(),
- reinterpret_cast<void*>(wrapper.location()),
- BreakIterator::DeleteBreakIterator);
+ GlobalHandles::MakeWeak(wrapper.location(), wrapper.location(),
+ BreakIterator::DeleteBreakIterator,
+ WeakCallbackType::kInternalFields);
return *local_object;
}
@@ -663,9 +713,13 @@ RUNTIME_FUNCTION(Runtime_BreakIteratorAdoptText) {
break_iterator_holder->GetInternalField(1));
delete u_text;
- v8::String::Value text_value(v8::Utils::ToLocal(text));
- u_text = new icu::UnicodeString(reinterpret_cast<const UChar*>(*text_value),
- text_value.length());
+ int length = text->length();
+ text = String::Flatten(text);
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat = text->GetFlatContent();
+ std::unique_ptr<uc16[]> sap;
+ const UChar* text_value = GetUCharBufferFromFlat(flat, &sap, length);
+ u_text = new icu::UnicodeString(text_value, length);
break_iterator_holder->SetInternalField(1, reinterpret_cast<Smi*>(u_text));
break_iterator->setText(*u_text);
@@ -749,6 +803,362 @@ RUNTIME_FUNCTION(Runtime_BreakIteratorBreakType) {
return *isolate->factory()->NewStringFromStaticChars("unknown");
}
}
+
+namespace {
+void ConvertCaseWithTransliterator(icu::UnicodeString* input,
+ const char* transliterator_id) {
+ UErrorCode status = U_ZERO_ERROR;
+ std::unique_ptr<icu::Transliterator> translit(
+ icu::Transliterator::createInstance(
+ icu::UnicodeString(transliterator_id, -1, US_INV), UTRANS_FORWARD,
+ status));
+ if (U_FAILURE(status)) return;
+ translit->transliterate(*input);
+}
+
+MUST_USE_RESULT Object* LocaleConvertCase(Handle<String> s, Isolate* isolate,
+ bool is_to_upper, const char* lang) {
+ int32_t src_length = s->length();
+
+ // Greek uppercasing has to be done via transliteration.
+ // TODO(jshin): Drop this special-casing once ICU's regular case conversion
+ // API supports Greek uppercasing. See
+ // http://bugs.icu-project.org/trac/ticket/10582 .
+ // In the meantime, if there's no Greek character in |s|, call this
+ // function again with the root locale (lang="").
+ // ICU's C API for transliteration is nasty and we just use C++ API.
+ if (V8_UNLIKELY(is_to_upper && lang[0] == 'e' && lang[1] == 'l')) {
+ icu::UnicodeString converted;
+ std::unique_ptr<uc16[]> sap;
+ {
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat = s->GetFlatContent();
+ const UChar* src = GetUCharBufferFromFlat(flat, &sap, src_length);
+ // Starts with the source string (read-only alias with copy-on-write
+ // semantics) and will be modified to contain the converted result.
+ // Using read-only alias at first saves one copy operation if
+ // transliteration does not change the input, which is rather rare.
+ // Moreover, transliteration takes rather long so that saving one copy
+ // helps only a little bit.
+ converted.setTo(false, src, src_length);
+ ConvertCaseWithTransliterator(&converted, "el-Upper");
+ // If no change is made, just return |s|.
+ if (converted.getBuffer() == src) return *s;
+ }
+ RETURN_RESULT_OR_FAILURE(
+ isolate,
+ isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(converted.getBuffer()),
+ converted.length())));
+ }
+
+ auto case_converter = is_to_upper ? u_strToUpper : u_strToLower;
+
+ int32_t dest_length = src_length;
+ UErrorCode status;
+ Handle<SeqTwoByteString> result;
+ std::unique_ptr<uc16[]> sap;
+
+ // This is not a real loop. It'll be executed only once (no overflow) or
+ // twice (overflow).
+ for (int i = 0; i < 2; ++i) {
+ // Case conversion can increase the string length (e.g. sharp-S => SS) so
+ // that we have to handle RangeError exceptions here.
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, isolate->factory()->NewRawTwoByteString(dest_length));
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat = s->GetFlatContent();
+ const UChar* src = GetUCharBufferFromFlat(flat, &sap, src_length);
+ status = U_ZERO_ERROR;
+ dest_length = case_converter(reinterpret_cast<UChar*>(result->GetChars()),
+ dest_length, src, src_length, lang, &status);
+ if (status != U_BUFFER_OVERFLOW_ERROR) break;
+ }
+
+ // In most cases, the output will fill the destination buffer completely
+ // leading to an unterminated string (U_STRING_NOT_TERMINATED_WARNING).
+ // Only in rare cases, it'll be shorter than the destination buffer and
+ // |result| has to be truncated.
+ DCHECK(U_SUCCESS(status));
+ if (V8_LIKELY(status == U_STRING_NOT_TERMINATED_WARNING)) {
+ DCHECK(dest_length == result->length());
+ return *result;
+ }
+ if (U_SUCCESS(status)) {
+ DCHECK(dest_length < result->length());
+ return *Handle<SeqTwoByteString>::cast(
+ SeqString::Truncate(result, dest_length));
+ }
+ return *s;
+}
+
+inline bool IsASCIIUpper(uint16_t ch) { return ch >= 'A' && ch <= 'Z'; }
+
+const uint8_t kToLower[256] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23,
+ 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B,
+ 0x3C, 0x3D, 0x3E, 0x3F, 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73,
+ 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B,
+ 0x6C, 0x6D, 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F, 0x80, 0x81, 0x82, 0x83,
+ 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B,
+ 0x9C, 0x9D, 0x9E, 0x9F, 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
+ 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF, 0xB0, 0xB1, 0xB2, 0xB3,
+ 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF,
+ 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xEB,
+ 0xEC, 0xED, 0xEE, 0xEF, 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xD7,
+ 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xDF, 0xE0, 0xE1, 0xE2, 0xE3,
+ 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
+ 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB,
+ 0xFC, 0xFD, 0xFE, 0xFF,
+};
+
+inline uint16_t ToLatin1Lower(uint16_t ch) {
+ return static_cast<uint16_t>(kToLower[ch]);
+}
+
+inline uint16_t ToASCIIUpper(uint16_t ch) {
+ return ch & ~((ch >= 'a' && ch <= 'z') << 5);
+}
+
+// Does not work for U+00DF (sharp-s), U+00B5 (micron), U+00FF.
+inline uint16_t ToLatin1Upper(uint16_t ch) {
+ DCHECK(ch != 0xDF && ch != 0xB5 && ch != 0xFF);
+ return ch &
+ ~(((ch >= 'a' && ch <= 'z') || (((ch & 0xE0) == 0xE0) && ch != 0xE7))
+ << 5);
+}
+
+template <typename Char>
+bool ToUpperFastASCII(const Vector<const Char>& src,
+ Handle<SeqOneByteString> result) {
+ // Do a faster loop for the case where all the characters are ASCII.
+ uint16_t ored = 0;
+ int32_t index = 0;
+ for (auto it = src.begin(); it != src.end(); ++it) {
+ uint16_t ch = static_cast<uint16_t>(*it);
+ ored |= ch;
+ result->SeqOneByteStringSet(index++, ToASCIIUpper(ch));
+ }
+ return !(ored & ~0x7F);
+}
+
+const uint16_t sharp_s = 0xDF;
+
+template <typename Char>
+bool ToUpperOneByte(const Vector<const Char>& src,
+ Handle<SeqOneByteString> result, int* sharp_s_count) {
+ // Still pretty-fast path for the input with non-ASCII Latin-1 characters.
+
+ // There are two special cases.
+ // 1. U+00B5 and U+00FF are mapped to a character beyond U+00FF.
+ // 2. Lower case sharp-S converts to "SS" (two characters)
+ *sharp_s_count = 0;
+ int32_t index = 0;
+ for (auto it = src.begin(); it != src.end(); ++it) {
+ uint16_t ch = static_cast<uint16_t>(*it);
+ if (V8_UNLIKELY(ch == sharp_s)) {
+ ++(*sharp_s_count);
+ continue;
+ }
+ if (V8_UNLIKELY(ch == 0xB5 || ch == 0xFF)) {
+ // Since this upper-cased character does not fit in an 8-bit string, we
+ // need to take the 16-bit path.
+ return false;
+ }
+ result->SeqOneByteStringSet(index++, ToLatin1Upper(ch));
+ }
+
+ return true;
+}
+
+template <typename Char>
+void ToUpperWithSharpS(const Vector<const Char>& src,
+ Handle<SeqOneByteString> result) {
+ int32_t dest_index = 0;
+ for (auto it = src.begin(); it != src.end(); ++it) {
+ uint16_t ch = static_cast<uint16_t>(*it);
+ if (ch == sharp_s) {
+ result->SeqOneByteStringSet(dest_index++, 'S');
+ result->SeqOneByteStringSet(dest_index++, 'S');
+ } else {
+ result->SeqOneByteStringSet(dest_index++, ToLatin1Upper(ch));
+ }
+ }
+}
+
+} // namespace
+
+RUNTIME_FUNCTION(Runtime_StringToLowerCaseI18N) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(args.length(), 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
+
+ int length = s->length();
+ s = String::Flatten(s);
+ // First scan the string for uppercase and non-ASCII characters:
+ if (s->HasOnlyOneByteChars()) {
+ unsigned first_index_to_lower = length;
+ for (int index = 0; index < length; ++index) {
+ // Blink specializes this path for one-byte strings, so it
+ // does not need to do a generic get, but can do the equivalent
+ // of SeqOneByteStringGet.
+ uint16_t ch = s->Get(index);
+ if (V8_UNLIKELY(IsASCIIUpper(ch) || ch & ~0x7F)) {
+ first_index_to_lower = index;
+ break;
+ }
+ }
+
+ // Nothing to do if the string is all ASCII with no uppercase.
+ if (first_index_to_lower == length) return *s;
+
+ // We depend here on the invariant that the length of a Latin1
+ // string is invariant under ToLowerCase, and the result always
+ // fits in the Latin1 range in the *root locale*. It does not hold
+ // for ToUpperCase even in the root locale.
+ Handle<SeqOneByteString> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, isolate->factory()->NewRawOneByteString(length));
+
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat = s->GetFlatContent();
+ if (flat.IsOneByte()) {
+ const uint8_t* src = flat.ToOneByteVector().start();
+ CopyChars(result->GetChars(), src, first_index_to_lower);
+ for (int index = first_index_to_lower; index < length; ++index) {
+ uint16_t ch = static_cast<uint16_t>(src[index]);
+ result->SeqOneByteStringSet(index, ToLatin1Lower(ch));
+ }
+ } else {
+ const uint16_t* src = flat.ToUC16Vector().start();
+ CopyChars(result->GetChars(), src, first_index_to_lower);
+ for (int index = first_index_to_lower; index < length; ++index) {
+ uint16_t ch = src[index];
+ result->SeqOneByteStringSet(index, ToLatin1Lower(ch));
+ }
+ }
+
+ return *result;
+ }
+
+ // Blink had an additional case here for ASCII 2-byte strings, but
+ // that is subsumed by the above code (assuming there isn't a false
+ // negative for HasOnlyOneByteChars).
+
+ // Do a slower implementation for cases that include non-ASCII characters.
+ return LocaleConvertCase(s, isolate, false, "");
+}
+
+RUNTIME_FUNCTION(Runtime_StringToUpperCaseI18N) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(args.length(), 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
+
+ // This function could be optimized for no-op cases the way lowercase
+ // counterpart is, but in empirical testing, few actual calls to upper()
+ // are no-ops. So, it wouldn't be worth the extra time for pre-scanning.
+
+ int32_t length = s->length();
+ s = String::Flatten(s);
+
+ if (s->HasOnlyOneByteChars()) {
+ Handle<SeqOneByteString> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, isolate->factory()->NewRawOneByteString(length));
+
+ int sharp_s_count;
+ bool is_result_single_byte;
+ {
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat = s->GetFlatContent();
+ // If it was ok to slow down ASCII-only input slightly, ToUpperFastASCII
+ // could be removed because ToUpperOneByte is pretty fast now (it
+ // does not call ICU API any more.).
+ if (flat.IsOneByte()) {
+ Vector<const uint8_t> src = flat.ToOneByteVector();
+ if (ToUpperFastASCII(src, result)) return *result;
+ is_result_single_byte = ToUpperOneByte(src, result, &sharp_s_count);
+ } else {
+ DCHECK(flat.IsTwoByte());
+ Vector<const uint16_t> src = flat.ToUC16Vector();
+ if (ToUpperFastASCII(src, result)) return *result;
+ is_result_single_byte = ToUpperOneByte(src, result, &sharp_s_count);
+ }
+ }
+
+ // Go to the full Unicode path if there are characters whose uppercase
+ // is beyond the Latin-1 range (cannot be represented in OneByteString).
+ if (V8_UNLIKELY(!is_result_single_byte)) {
+ return LocaleConvertCase(s, isolate, true, "");
+ }
+
+ if (sharp_s_count == 0) return *result;
+
+ // We have sharp_s_count sharp-s characters, but the result is still
+ // in the Latin-1 range.
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ isolate->factory()->NewRawOneByteString(length + sharp_s_count));
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat = s->GetFlatContent();
+ if (flat.IsOneByte()) {
+ ToUpperWithSharpS(flat.ToOneByteVector(), result);
+ } else {
+ ToUpperWithSharpS(flat.ToUC16Vector(), result);
+ }
+
+ return *result;
+ }
+
+ return LocaleConvertCase(s, isolate, true, "");
+}
+
+RUNTIME_FUNCTION(Runtime_StringLocaleConvertCase) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(args.length(), 3);
+ CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
+ CONVERT_BOOLEAN_ARG_CHECKED(is_upper, 1);
+ CONVERT_ARG_HANDLE_CHECKED(SeqOneByteString, lang, 2);
+
+ // All the languages requiring special handling ("az", "el", "lt", "tr")
+ // have a 2-letter language code.
+ DCHECK(lang->length() == 2);
+ uint8_t lang_str[3];
+ memcpy(lang_str, lang->GetChars(), 2);
+ lang_str[2] = 0;
+ s = String::Flatten(s);
+ // TODO(jshin): Consider adding a fast path for ASCII or Latin-1. The fastpath
+ // in the root locale needs to be adjusted for az, lt and tr because even case
+ // mapping of ASCII range characters are different in those locales.
+ // Greek (el) does not require any adjustment, though.
+ return LocaleConvertCase(s, isolate, is_upper,
+ reinterpret_cast<const char*>(lang_str));
+}
+
+RUNTIME_FUNCTION(Runtime_DateCacheVersion) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(0, args.length());
+ if (isolate->serializer_enabled()) return isolate->heap()->undefined_value();
+ if (!isolate->eternal_handles()->Exists(EternalHandles::DATE_CACHE_VERSION)) {
+ Handle<FixedArray> date_cache_version =
+ isolate->factory()->NewFixedArray(1, TENURED);
+ date_cache_version->set(0, Smi::FromInt(0));
+ isolate->eternal_handles()->CreateSingleton(
+ isolate, *date_cache_version, EternalHandles::DATE_CACHE_VERSION);
+ }
+ Handle<FixedArray> date_cache_version =
+ Handle<FixedArray>::cast(isolate->eternal_handles()->GetSingleton(
+ EternalHandles::DATE_CACHE_VERSION));
+ return date_cache_version->get(0);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index d871fc7f5a..3de0f16b1e 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -4,6 +4,8 @@
#include "src/runtime/runtime-utils.h"
+#include <memory>
+
#include "src/arguments.h"
#include "src/ast/prettyprinter.h"
#include "src/bootstrapper.h"
@@ -12,7 +14,9 @@
#include "src/frames-inl.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
+#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
+#include "src/wasm/wasm-module.h"
namespace v8 {
namespace internal {
@@ -20,7 +24,7 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_CheckIsBootstrapping) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 0);
- RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
+ CHECK(isolate->bootstrapper()->IsActive());
return isolate->heap()->undefined_value();
}
@@ -29,7 +33,7 @@ RUNTIME_FUNCTION(Runtime_ExportFromRuntime) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, container, 0);
- RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
+ CHECK(isolate->bootstrapper()->IsActive());
JSObject::NormalizeProperties(container, KEEP_INOBJECT_PROPERTIES, 10,
"ExportFromRuntime");
Bootstrapper::ExportFromRuntime(isolate, container);
@@ -42,7 +46,7 @@ RUNTIME_FUNCTION(Runtime_ExportExperimentalFromRuntime) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, container, 0);
- RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
+ CHECK(isolate->bootstrapper()->IsActive());
JSObject::NormalizeProperties(container, KEEP_INOBJECT_PROPERTIES, 10,
"ExportExperimentalFromRuntime");
Bootstrapper::ExportExperimentalFromRuntime(isolate, container);
@@ -55,21 +59,21 @@ RUNTIME_FUNCTION(Runtime_InstallToContext) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
- RUNTIME_ASSERT(array->HasFastElements());
- RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
+ CHECK(array->HasFastElements());
+ CHECK(isolate->bootstrapper()->IsActive());
Handle<Context> native_context = isolate->native_context();
Handle<FixedArray> fixed_array(FixedArray::cast(array->elements()));
int length = Smi::cast(array->length())->value();
for (int i = 0; i < length; i += 2) {
- RUNTIME_ASSERT(fixed_array->get(i)->IsString());
+ CHECK(fixed_array->get(i)->IsString());
Handle<String> name(String::cast(fixed_array->get(i)));
- RUNTIME_ASSERT(fixed_array->get(i + 1)->IsJSObject());
+ CHECK(fixed_array->get(i + 1)->IsJSObject());
Handle<JSObject> object(JSObject::cast(fixed_array->get(i + 1)));
int index = Context::ImportedFieldIndexForName(name);
if (index == Context::kNotFound) {
index = Context::IntrinsicIndexForName(name);
}
- RUNTIME_ASSERT(index != Context::kNotFound);
+ CHECK(index != Context::kNotFound);
native_context->set(index, *object);
}
return isolate->heap()->undefined_value();
@@ -96,6 +100,60 @@ RUNTIME_FUNCTION(Runtime_ThrowStackOverflow) {
return isolate->StackOverflow();
}
+RUNTIME_FUNCTION(Runtime_ThrowWasmError) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_SMI_ARG_CHECKED(message_id, 0);
+ CONVERT_SMI_ARG_CHECKED(byte_offset, 1);
+ Handle<Object> error_obj = isolate->factory()->NewError(
+ static_cast<MessageTemplate::Template>(message_id));
+
+ // For wasm traps, the byte offset (a.k.a source position) can not be
+ // determined from relocation info, since the explicit checks for traps
+ // converge in one singe block which calls this runtime function.
+ // We hence pass the byte offset explicitely, and patch it into the top-most
+ // frame (a wasm frame) on the collected stack trace.
+ // TODO(wasm): This implementation is temporary, see bug #5007:
+ // https://bugs.chromium.org/p/v8/issues/detail?id=5007
+ Handle<JSObject> error = Handle<JSObject>::cast(error_obj);
+ Handle<Object> stack_trace_obj = JSReceiver::GetDataProperty(
+ error, isolate->factory()->stack_trace_symbol());
+ // Patch the stack trace (array of <receiver, function, code, position>).
+ if (stack_trace_obj->IsJSArray()) {
+ Handle<FixedArray> stack_elements(
+ FixedArray::cast(JSArray::cast(*stack_trace_obj)->elements()));
+ DCHECK_EQ(1, stack_elements->length() % 4);
+ DCHECK(Code::cast(stack_elements->get(3))->kind() == Code::WASM_FUNCTION);
+ DCHECK(stack_elements->get(4)->IsSmi() &&
+ Smi::cast(stack_elements->get(4))->value() >= 0);
+ stack_elements->set(4, Smi::FromInt(-1 - byte_offset));
+ }
+ Handle<Object> detailed_stack_trace_obj = JSReceiver::GetDataProperty(
+ error, isolate->factory()->detailed_stack_trace_symbol());
+ // Patch the detailed stack trace (array of JSObjects with various
+ // properties).
+ if (detailed_stack_trace_obj->IsJSArray()) {
+ Handle<FixedArray> stack_elements(
+ FixedArray::cast(JSArray::cast(*detailed_stack_trace_obj)->elements()));
+ DCHECK_GE(stack_elements->length(), 1);
+ Handle<JSObject> top_frame(JSObject::cast(stack_elements->get(0)));
+ Handle<String> wasm_offset_key =
+ isolate->factory()->InternalizeOneByteString(
+ STATIC_CHAR_VECTOR("column"));
+ LookupIterator it(top_frame, wasm_offset_key, top_frame,
+ LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+ if (it.IsFound()) {
+ DCHECK(JSReceiver::GetDataProperty(&it)->IsSmi());
+ // Make column number 1-based here.
+ Maybe<bool> data_set = JSReceiver::SetDataProperty(
+ &it, handle(Smi::FromInt(byte_offset + 1), isolate));
+ DCHECK(data_set.IsJust() && data_set.FromJust() == true);
+ USE(data_set);
+ }
+ }
+
+ return isolate->Throw(*error_obj);
+}
RUNTIME_FUNCTION(Runtime_UnwindAndFindExceptionHandler) {
SealHandleScope shs(isolate);
@@ -152,6 +210,11 @@ RUNTIME_FUNCTION(Runtime_NewSyntaxError) {
return *isolate->factory()->NewSyntaxError(message_template, arg0);
}
+RUNTIME_FUNCTION(Runtime_ThrowCannotConvertToPrimitive) {
+ HandleScope scope(isolate);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCannotConvertToPrimitive));
+}
RUNTIME_FUNCTION(Runtime_ThrowIllegalInvocation) {
HandleScope scope(isolate);
@@ -160,6 +223,21 @@ RUNTIME_FUNCTION(Runtime_ThrowIllegalInvocation) {
isolate, NewTypeError(MessageTemplate::kIllegalInvocation));
}
+RUNTIME_FUNCTION(Runtime_ThrowIncompatibleMethodReceiver) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, arg0, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, arg1, 1);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kIncompatibleMethodReceiver, arg0, arg1));
+}
+
+RUNTIME_FUNCTION(Runtime_ThrowInvalidStringLength) {
+ HandleScope scope(isolate);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidStringLength));
+}
RUNTIME_FUNCTION(Runtime_ThrowIteratorResultNotAnObject) {
HandleScope scope(isolate);
@@ -170,6 +248,20 @@ RUNTIME_FUNCTION(Runtime_ThrowIteratorResultNotAnObject) {
NewTypeError(MessageTemplate::kIteratorResultNotAnObject, value));
}
+RUNTIME_FUNCTION(Runtime_ThrowNotGeneric) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, arg0, 0);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kNotGeneric, arg0));
+}
+
+RUNTIME_FUNCTION(Runtime_ThrowGeneratorRunning) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(0, args.length());
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kGeneratorRunning));
+}
RUNTIME_FUNCTION(Runtime_ThrowApplyNonFunction) {
HandleScope scope(isolate);
@@ -190,7 +282,7 @@ RUNTIME_FUNCTION(Runtime_PromiseRejectEvent) {
if (debug_event) isolate->debug()->OnPromiseReject(promise, value);
Handle<Symbol> key = isolate->factory()->promise_has_handler_symbol();
// Do not report if we actually have a handler.
- if (JSReceiver::GetDataProperty(promise, key)->IsUndefined()) {
+ if (JSReceiver::GetDataProperty(promise, key)->IsUndefined(isolate)) {
isolate->ReportPromiseReject(promise, value,
v8::kPromiseRejectWithNoHandler);
}
@@ -204,7 +296,7 @@ RUNTIME_FUNCTION(Runtime_PromiseRevokeReject) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
Handle<Symbol> key = isolate->factory()->promise_has_handler_symbol();
// At this point, no revocation has been issued before
- RUNTIME_ASSERT(JSReceiver::GetDataProperty(promise, key)->IsUndefined());
+ CHECK(JSReceiver::GetDataProperty(promise, key)->IsUndefined(isolate));
isolate->ReportPromiseReject(promise, Handle<Object>(),
v8::kPromiseHandlerAddedAfterReject);
return isolate->heap()->undefined_value();
@@ -236,9 +328,9 @@ RUNTIME_FUNCTION(Runtime_AllocateInNewSpace) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_SMI_ARG_CHECKED(size, 0);
- RUNTIME_ASSERT(IsAligned(size, kPointerSize));
- RUNTIME_ASSERT(size > 0);
- RUNTIME_ASSERT(size <= Page::kMaxRegularHeapObjectSize);
+ CHECK(IsAligned(size, kPointerSize));
+ CHECK(size > 0);
+ CHECK(size <= Page::kMaxRegularHeapObjectSize);
return *isolate->factory()->NewFillerObject(size, false, NEW_SPACE);
}
@@ -248,108 +340,34 @@ RUNTIME_FUNCTION(Runtime_AllocateInTargetSpace) {
DCHECK(args.length() == 2);
CONVERT_SMI_ARG_CHECKED(size, 0);
CONVERT_SMI_ARG_CHECKED(flags, 1);
- RUNTIME_ASSERT(IsAligned(size, kPointerSize));
- RUNTIME_ASSERT(size > 0);
- RUNTIME_ASSERT(size <= Page::kMaxRegularHeapObjectSize);
+ CHECK(IsAligned(size, kPointerSize));
+ CHECK(size > 0);
+ CHECK(size <= Page::kMaxRegularHeapObjectSize);
bool double_align = AllocateDoubleAlignFlag::decode(flags);
AllocationSpace space = AllocateTargetSpace::decode(flags);
return *isolate->factory()->NewFillerObject(size, double_align, space);
}
-
-// Collect the raw data for a stack trace. Returns an array of 4
-// element segments each containing a receiver, function, code and
-// native code offset.
-RUNTIME_FUNCTION(Runtime_CollectStackTrace) {
+RUNTIME_FUNCTION(Runtime_AllocateSeqOneByteString) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, error_object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, caller, 1);
-
- if (!isolate->bootstrapper()->IsActive()) {
- // Optionally capture a more detailed stack trace for the message.
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, isolate->CaptureAndSetDetailedStackTrace(error_object));
- // Capture a simple stack trace for the stack property.
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, isolate->CaptureAndSetSimpleStackTrace(error_object, caller));
- }
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_MessageGetStartPosition) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
- return Smi::FromInt(message->start_position());
-}
-
-
-RUNTIME_FUNCTION(Runtime_MessageGetScript) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
- return message->script();
+ DCHECK_EQ(1, args.length());
+ CONVERT_SMI_ARG_CHECKED(length, 0);
+ Handle<SeqOneByteString> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, isolate->factory()->NewRawOneByteString(length));
+ return *result;
}
-
-RUNTIME_FUNCTION(Runtime_FormatMessageString) {
+RUNTIME_FUNCTION(Runtime_AllocateSeqTwoByteString) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
- CONVERT_INT32_ARG_CHECKED(template_index, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, arg0, 1);
- CONVERT_ARG_HANDLE_CHECKED(String, arg1, 2);
- CONVERT_ARG_HANDLE_CHECKED(String, arg2, 3);
- Handle<String> result;
+ DCHECK_EQ(1, args.length());
+ CONVERT_SMI_ARG_CHECKED(length, 0);
+ Handle<SeqTwoByteString> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- MessageTemplate::FormatMessage(template_index, arg0, arg1, arg2));
- isolate->native_context()->IncrementErrorsThrown();
+ isolate, result, isolate->factory()->NewRawTwoByteString(length));
return *result;
}
-#define CALLSITE_GET(NAME, RETURN) \
- RUNTIME_FUNCTION(Runtime_CallSite##NAME##RT) { \
- HandleScope scope(isolate); \
- DCHECK(args.length() == 1); \
- CONVERT_ARG_HANDLE_CHECKED(JSObject, call_site_obj, 0); \
- Handle<String> result; \
- CallSite call_site(isolate, call_site_obj); \
- RUNTIME_ASSERT(call_site.IsValid()); \
- return RETURN(call_site.NAME(), isolate); \
- }
-
-static inline Object* ReturnDereferencedHandle(Handle<Object> obj,
- Isolate* isolate) {
- return *obj;
-}
-
-
-static inline Object* ReturnPositiveNumberOrNull(int value, Isolate* isolate) {
- if (value >= 0) return *isolate->factory()->NewNumberFromInt(value);
- return isolate->heap()->null_value();
-}
-
-
-static inline Object* ReturnBoolean(bool value, Isolate* isolate) {
- return isolate->heap()->ToBoolean(value);
-}
-
-
-CALLSITE_GET(GetFileName, ReturnDereferencedHandle)
-CALLSITE_GET(GetFunctionName, ReturnDereferencedHandle)
-CALLSITE_GET(GetScriptNameOrSourceUrl, ReturnDereferencedHandle)
-CALLSITE_GET(GetMethodName, ReturnDereferencedHandle)
-CALLSITE_GET(GetLineNumber, ReturnPositiveNumberOrNull)
-CALLSITE_GET(GetColumnNumber, ReturnPositiveNumberOrNull)
-CALLSITE_GET(IsNative, ReturnBoolean)
-CALLSITE_GET(IsToplevel, ReturnBoolean)
-CALLSITE_GET(IsEval, ReturnBoolean)
-CALLSITE_GET(IsConstructor, ReturnBoolean)
-
-#undef CALLSITE_GET
-
RUNTIME_FUNCTION(Runtime_IS_VAR) {
UNREACHABLE(); // implemented as macro in the parser
@@ -366,8 +384,8 @@ bool ComputeLocation(Isolate* isolate, MessageLocation* target) {
JSFunction* fun = frame->function();
Object* script = fun->shared()->script();
if (script->IsScript() &&
- !(Script::cast(script)->source()->IsUndefined())) {
- Handle<Script> casted_script(Script::cast(script));
+ !(Script::cast(script)->source()->IsUndefined(isolate))) {
+ Handle<Script> casted_script(Script::cast(script), isolate);
// Compute the location from the function and the relocation info of the
// baseline code. For optimized code this will use the deoptimization
// information to get canonical location information.
@@ -387,16 +405,14 @@ Handle<String> RenderCallSite(Isolate* isolate, Handle<Object> object) {
MessageLocation location;
if (ComputeLocation(isolate, &location)) {
Zone zone(isolate->allocator());
- base::SmartPointer<ParseInfo> info(
+ std::unique_ptr<ParseInfo> info(
location.function()->shared()->is_function()
? new ParseInfo(&zone, location.function())
: new ParseInfo(&zone, location.script()));
if (Parser::ParseStatic(info.get())) {
CallPrinter printer(isolate, location.function()->shared()->IsBuiltin());
- const char* string = printer.Print(info->literal(), location.start_pos());
- if (strlen(string) > 0) {
- return isolate->factory()->NewStringFromAsciiChecked(string);
- }
+ Handle<String> str = printer.Print(info->literal(), location.start_pos());
+ if (str->length() > 0) return str;
} else {
isolate->clear_pending_exception();
}
@@ -416,6 +432,13 @@ RUNTIME_FUNCTION(Runtime_ThrowCalledNonCallable) {
isolate, NewTypeError(MessageTemplate::kCalledNonCallable, callsite));
}
+RUNTIME_FUNCTION(Runtime_ThrowCalledOnNullOrUndefined) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined, name));
+}
RUNTIME_FUNCTION(Runtime_ThrowConstructedNonConstructable) {
HandleScope scope(isolate);
@@ -426,7 +449,6 @@ RUNTIME_FUNCTION(Runtime_ThrowConstructedNonConstructable) {
isolate, NewTypeError(MessageTemplate::kNotConstructor, callsite));
}
-
RUNTIME_FUNCTION(Runtime_ThrowDerivedConstructorReturnedNonObject) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
@@ -434,17 +456,21 @@ RUNTIME_FUNCTION(Runtime_ThrowDerivedConstructorReturnedNonObject) {
isolate, NewTypeError(MessageTemplate::kDerivedConstructorReturn));
}
+RUNTIME_FUNCTION(Runtime_ThrowUndefinedOrNullToObject) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kUndefinedOrNullToObject, name));
+}
// ES6 section 7.3.17 CreateListFromArrayLike (obj)
RUNTIME_FUNCTION(Runtime_CreateListFromArrayLike) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- Handle<FixedArray> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Object::CreateListFromArrayLike(isolate, object, ElementTypes::kAll));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate, Object::CreateListFromArrayLike(
+ isolate, object, ElementTypes::kAll));
}
@@ -456,22 +482,90 @@ RUNTIME_FUNCTION(Runtime_IncrementUseCounter) {
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(Runtime_GetOrdinaryHasInstance) {
+RUNTIME_FUNCTION(Runtime_GetAndResetRuntimeCallStats) {
HandleScope scope(isolate);
- DCHECK_EQ(0, args.length());
+ if (args.length() == 0) {
+ // Without arguments, the result is returned as a string.
+ DCHECK_EQ(0, args.length());
+ std::stringstream stats_stream;
+ isolate->counters()->runtime_call_stats()->Print(stats_stream);
+ Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(
+ stats_stream.str().c_str());
+ isolate->counters()->runtime_call_stats()->Reset();
+ return *result;
+ } else {
+ DCHECK_LE(args.length(), 2);
+ std::FILE* f;
+ if (args[0]->IsString()) {
+ // With a string argument, the results are appended to that file.
+ CONVERT_ARG_HANDLE_CHECKED(String, arg0, 0);
+ String::FlatContent flat = arg0->GetFlatContent();
+ const char* filename =
+ reinterpret_cast<const char*>(&(flat.ToOneByteVector()[0]));
+ f = std::fopen(filename, "a");
+ DCHECK_NOT_NULL(f);
+ } else {
+ // With an integer argument, the results are written to stdout/stderr.
+ CONVERT_SMI_ARG_CHECKED(fd, 0);
+ DCHECK(fd == 1 || fd == 2);
+ f = fd == 1 ? stdout : stderr;
+ }
+ // The second argument (if any) is a message header to be printed.
+ if (args.length() >= 2) {
+ CONVERT_ARG_HANDLE_CHECKED(String, arg1, 1);
+ arg1->PrintOn(f);
+ std::fputc('\n', f);
+ std::fflush(f);
+ }
+ OFStream stats_stream(f);
+ isolate->counters()->runtime_call_stats()->Print(stats_stream);
+ isolate->counters()->runtime_call_stats()->Reset();
+ if (args[0]->IsString())
+ std::fclose(f);
+ else
+ std::fflush(f);
+ return isolate->heap()->undefined_value();
+ }
+}
- return isolate->native_context()->ordinary_has_instance();
+RUNTIME_FUNCTION(Runtime_EnqueueMicrotask) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, microtask, 0);
+ isolate->EnqueueMicrotask(microtask);
+ return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(Runtime_GetAndResetRuntimeCallStats) {
+RUNTIME_FUNCTION(Runtime_RunMicrotasks) {
HandleScope scope(isolate);
- DCHECK_EQ(0, args.length());
- std::stringstream stats_stream;
- isolate->counters()->runtime_call_stats()->Print(stats_stream);
- Handle<String> result =
- isolate->factory()->NewStringFromAsciiChecked(stats_stream.str().c_str());
- isolate->counters()->runtime_call_stats()->Reset();
- return *result;
+ DCHECK(args.length() == 0);
+ isolate->RunMicrotasks();
+ return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_OrdinaryHasInstance) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, callable, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 1);
+ RETURN_RESULT_OR_FAILURE(
+ isolate, Object::OrdinaryHasInstance(isolate, callable, object));
+}
+
+RUNTIME_FUNCTION(Runtime_IsWasmObject) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(Object, object, 0);
+ bool is_wasm_object =
+ object->IsJSObject() && wasm::IsWasmObject(JSObject::cast(object));
+ return *isolate->factory()->ToBoolean(is_wasm_object);
+}
+
+RUNTIME_FUNCTION(Runtime_Typeof) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ return *Object::TypeOf(isolate, object);
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-interpreter.cc b/deps/v8/src/runtime/runtime-interpreter.cc
index 22ae9113d8..ce71e2c52d 100644
--- a/deps/v8/src/runtime/runtime-interpreter.cc
+++ b/deps/v8/src/runtime/runtime-interpreter.cc
@@ -9,6 +9,9 @@
#include "src/arguments.h"
#include "src/frames-inl.h"
#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecode-decoder.h"
+#include "src/interpreter/bytecode-flags.h"
+#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
#include "src/isolate-inl.h"
#include "src/ostreams.h"
@@ -64,14 +67,11 @@ void PrintRegisters(std::ostream& os, bool is_input,
os << " ]" << std::endl;
}
- // Find the location of the register file.
+ // Print the registers.
JavaScriptFrameIterator frame_iterator(
bytecode_iterator.bytecode_array()->GetIsolate());
- JavaScriptFrame* frame = frame_iterator.frame();
- Address register_file =
- frame->fp() + InterpreterFrameConstants::kRegisterFilePointerFromFp;
-
- // Print the registers.
+ InterpretedFrame* frame =
+ reinterpret_cast<InterpretedFrame*>(frame_iterator.frame());
int operand_count = interpreter::Bytecodes::NumberOfOperands(bytecode);
for (int operand_index = 0; operand_index < operand_count; operand_index++) {
interpreter::OperandType operand_type =
@@ -86,8 +86,7 @@ void PrintRegisters(std::ostream& os, bool is_input,
int range = bytecode_iterator.GetRegisterOperandRange(operand_index);
for (int reg_index = first_reg.index();
reg_index < first_reg.index() + range; reg_index++) {
- Address reg_location = register_file - reg_index * kPointerSize;
- Object* reg_object = Memory::Object_at(reg_location);
+ Object* reg_object = frame->ReadInterpreterRegister(reg_index);
os << " [ " << std::setw(kRegFieldWidth)
<< interpreter::Register(reg_index).ToString(
bytecode_iterator.bytecode_array()->parameter_count())
@@ -117,12 +116,12 @@ RUNTIME_FUNCTION(Runtime_InterpreterTraceBytecodeEntry) {
AdvanceToOffsetForTracing(bytecode_iterator, offset);
if (offset == bytecode_iterator.current_offset()) {
// Print bytecode.
- const uint8_t* bytecode_address =
- reinterpret_cast<const uint8_t*>(*bytecode_array) + bytecode_offset;
- os << " -> " << static_cast<const void*>(bytecode_address)
- << " (" << bytecode_offset << ") : ";
- interpreter::Bytecodes::Decode(os, bytecode_address,
- bytecode_array->parameter_count());
+ const uint8_t* base_address = bytecode_array->GetFirstBytecodeAddress();
+ const uint8_t* bytecode_address = base_address + offset;
+ os << " -> " << static_cast<const void*>(bytecode_address) << " @ "
+ << std::setw(4) << offset << " : ";
+ interpreter::BytecodeDecoder::Decode(os, bytecode_address,
+ bytecode_array->parameter_count());
os << std::endl;
// Print all input registers and accumulator.
PrintRegisters(os, true, bytecode_iterator, accumulator);
diff --git a/deps/v8/src/runtime/runtime-json.cc b/deps/v8/src/runtime/runtime-json.cc
deleted file mode 100644
index 07232d59b8..0000000000
--- a/deps/v8/src/runtime/runtime-json.cc
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/runtime/runtime-utils.h"
-
-#include "src/arguments.h"
-#include "src/char-predicates-inl.h"
-#include "src/isolate-inl.h"
-#include "src/json-parser.h"
-#include "src/json-stringifier.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-RUNTIME_FUNCTION(Runtime_QuoteJSONString) {
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
- DCHECK(args.length() == 1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, BasicJsonStringifier::StringifyString(isolate, string));
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_BasicJSONStringify) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- BasicJsonStringifier stringifier(isolate);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- stringifier.Stringify(object));
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_ParseJson) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- Handle<String> source;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, source,
- Object::ToString(isolate, object));
- source = String::Flatten(source);
- // Optimized fast case where we only have Latin1 characters.
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- source->IsSeqOneByteString()
- ? JsonParser<true>::Parse(source)
- : JsonParser<false>::Parse(source));
- return *result;
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index 34feeba2b4..a0dd3e8de9 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -6,7 +6,6 @@
#include "src/allocation-site-scopes.h"
#include "src/arguments.h"
-#include "src/ast/ast.h"
#include "src/isolate-inl.h"
#include "src/parsing/parser.h"
#include "src/runtime/runtime.h"
@@ -85,7 +84,9 @@ MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
uint32_t element_index = 0;
if (key->ToArrayIndex(&element_index)) {
// Array index (uint32).
- if (value->IsUninitialized()) value = handle(Smi::FromInt(0), isolate);
+ if (value->IsUninitialized(isolate)) {
+ value = handle(Smi::FromInt(0), isolate);
+ }
maybe_result = JSObject::SetOwnElementIgnoreAttributes(
boilerplate, element_index, value, NONE);
} else {
@@ -209,7 +210,7 @@ RUNTIME_FUNCTION(Runtime_CreateRegExpLiteral) {
// Check if boilerplate exists. If not, create it first.
Handle<Object> boilerplate(closure->literals()->literal(index), isolate);
- if (boilerplate->IsUndefined()) {
+ if (boilerplate->IsUndefined(isolate)) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, boilerplate, JSRegExp::New(pattern, JSRegExp::Flags(flags)));
closure->literals()->set_literal(index, *boilerplate);
@@ -229,14 +230,14 @@ RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
bool enable_mementos = (flags & ObjectLiteral::kDisableMementos) == 0;
- RUNTIME_ASSERT(literals_index >= 0 &&
- literals_index < literals->literals_count());
+ CHECK(literals_index >= 0);
+ CHECK(literals_index < literals->literals_count());
// Check if boilerplate exists. If not, create it first.
Handle<Object> literal_site(literals->literal(literals_index), isolate);
Handle<AllocationSite> site;
Handle<JSObject> boilerplate;
- if (*literal_site == isolate->heap()->undefined_value()) {
+ if (literal_site->IsUndefined(isolate)) {
Handle<Object> raw_boilerplate;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, raw_boilerplate,
@@ -263,9 +264,7 @@ RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
MaybeHandle<Object> maybe_copy =
JSObject::DeepCopy(boilerplate, &usage_context);
usage_context.ExitScope(site, boilerplate);
- Handle<Object> copy;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, copy, maybe_copy);
- return *copy;
+ RETURN_RESULT_OR_FAILURE(isolate, maybe_copy);
}
MUST_USE_RESULT static MaybeHandle<AllocationSite> GetLiteralAllocationSite(
@@ -274,7 +273,7 @@ MUST_USE_RESULT static MaybeHandle<AllocationSite> GetLiteralAllocationSite(
// Check if boilerplate exists. If not, create it first.
Handle<Object> literal_site(literals->literal(literals_index), isolate);
Handle<AllocationSite> site;
- if (*literal_site == isolate->heap()->undefined_value()) {
+ if (literal_site->IsUndefined(isolate)) {
DCHECK(*elements != isolate->heap()->empty_fixed_array());
Handle<Object> boilerplate;
ASSIGN_RETURN_ON_EXCEPTION(
@@ -302,9 +301,7 @@ MUST_USE_RESULT static MaybeHandle<AllocationSite> GetLiteralAllocationSite(
static MaybeHandle<JSObject> CreateArrayLiteralImpl(
Isolate* isolate, Handle<LiteralsArray> literals, int literals_index,
Handle<FixedArray> elements, int flags) {
- RUNTIME_ASSERT_HANDLIFIED(
- literals_index >= 0 && literals_index < literals->literals_count(),
- JSObject);
+ CHECK(literals_index >= 0 && literals_index < literals->literals_count());
Handle<AllocationSite> site;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, site,
@@ -333,12 +330,10 @@ RUNTIME_FUNCTION(Runtime_CreateArrayLiteral) {
CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
- Handle<JSObject> result;
Handle<LiteralsArray> literals(closure->literals(), isolate);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, CreateArrayLiteralImpl(isolate, literals, literals_index,
- elements, flags));
- return *result;
+ RETURN_RESULT_OR_FAILURE(
+ isolate, CreateArrayLiteralImpl(isolate, literals, literals_index,
+ elements, flags));
}
@@ -349,13 +344,11 @@ RUNTIME_FUNCTION(Runtime_CreateArrayLiteralStubBailout) {
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
- Handle<JSObject> result;
Handle<LiteralsArray> literals(closure->literals(), isolate);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
+ RETURN_RESULT_OR_FAILURE(
+ isolate,
CreateArrayLiteralImpl(isolate, literals, literals_index, elements,
ArrayLiteral::kShallowElements));
- return *result;
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-liveedit.cc b/deps/v8/src/runtime/runtime-liveedit.cc
index da342de9d8..a19ccaa584 100644
--- a/deps/v8/src/runtime/runtime-liveedit.cc
+++ b/deps/v8/src/runtime/runtime-liveedit.cc
@@ -24,7 +24,7 @@ RUNTIME_FUNCTION(Runtime_LiveEditFindSharedFunctionInfosForScript) {
DCHECK(args.length() == 1);
CONVERT_ARG_CHECKED(JSValue, script_value, 0);
- RUNTIME_ASSERT(script_value->value()->IsScript());
+ CHECK(script_value->value()->IsScript());
Handle<Script> script = Handle<Script>(Script::cast(script_value->value()));
List<Handle<SharedFunctionInfo> > found;
@@ -67,13 +67,11 @@ RUNTIME_FUNCTION(Runtime_LiveEditGatherCompileInfo) {
CONVERT_ARG_CHECKED(JSValue, script, 0);
CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
- RUNTIME_ASSERT(script->value()->IsScript());
+ CHECK(script->value()->IsScript());
Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
- Handle<JSArray> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, LiveEdit::GatherCompileInfo(script_handle, source));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate,
+ LiveEdit::GatherCompileInfo(script_handle, source));
}
@@ -88,7 +86,7 @@ RUNTIME_FUNCTION(Runtime_LiveEditReplaceScript) {
CONVERT_ARG_HANDLE_CHECKED(String, new_source, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, old_script_name, 2);
- RUNTIME_ASSERT(original_script_value->value()->IsScript());
+ CHECK(original_script_value->value()->IsScript());
Handle<Script> original_script(Script::cast(original_script_value->value()));
Handle<Object> old_script = LiveEdit::ChangeScriptSource(
@@ -108,7 +106,7 @@ RUNTIME_FUNCTION(Runtime_LiveEditFunctionSourceUpdated) {
CHECK(isolate->debug()->live_edit_enabled());
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_info, 0);
- RUNTIME_ASSERT(SharedInfoWrapper::IsInstance(shared_info));
+ CHECK(SharedInfoWrapper::IsInstance(shared_info));
LiveEdit::FunctionSourceUpdated(shared_info);
return isolate->heap()->undefined_value();
@@ -122,7 +120,7 @@ RUNTIME_FUNCTION(Runtime_LiveEditReplaceFunctionCode) {
DCHECK(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSArray, new_compile_info, 0);
CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_info, 1);
- RUNTIME_ASSERT(SharedInfoWrapper::IsInstance(shared_info));
+ CHECK(SharedInfoWrapper::IsInstance(shared_info));
LiveEdit::ReplaceFunctionCode(new_compile_info, shared_info);
return isolate->heap()->undefined_value();
@@ -140,11 +138,11 @@ RUNTIME_FUNCTION(Runtime_LiveEditFunctionSetScript) {
if (function_object->IsJSValue()) {
Handle<JSValue> function_wrapper = Handle<JSValue>::cast(function_object);
if (script_object->IsJSValue()) {
- RUNTIME_ASSERT(JSValue::cast(*script_object)->value()->IsScript());
+ CHECK(JSValue::cast(*script_object)->value()->IsScript());
Script* script = Script::cast(JSValue::cast(*script_object)->value());
script_object = Handle<Object>(script, isolate);
}
- RUNTIME_ASSERT(function_wrapper->value()->IsSharedFunctionInfo());
+ CHECK(function_wrapper->value()->IsSharedFunctionInfo());
LiveEdit::SetFunctionScript(function_wrapper, script_object);
} else {
// Just ignore this. We may not have a SharedFunctionInfo for some functions
@@ -165,9 +163,9 @@ RUNTIME_FUNCTION(Runtime_LiveEditReplaceRefToNestedFunction) {
CONVERT_ARG_HANDLE_CHECKED(JSValue, parent_wrapper, 0);
CONVERT_ARG_HANDLE_CHECKED(JSValue, orig_wrapper, 1);
CONVERT_ARG_HANDLE_CHECKED(JSValue, subst_wrapper, 2);
- RUNTIME_ASSERT(parent_wrapper->value()->IsSharedFunctionInfo());
- RUNTIME_ASSERT(orig_wrapper->value()->IsSharedFunctionInfo());
- RUNTIME_ASSERT(subst_wrapper->value()->IsSharedFunctionInfo());
+ CHECK(parent_wrapper->value()->IsSharedFunctionInfo());
+ CHECK(orig_wrapper->value()->IsSharedFunctionInfo());
+ CHECK(subst_wrapper->value()->IsSharedFunctionInfo());
LiveEdit::ReplaceRefToNestedFunction(parent_wrapper, orig_wrapper,
subst_wrapper);
@@ -186,7 +184,7 @@ RUNTIME_FUNCTION(Runtime_LiveEditPatchFunctionPositions) {
DCHECK(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0);
CONVERT_ARG_HANDLE_CHECKED(JSArray, position_change_array, 1);
- RUNTIME_ASSERT(SharedInfoWrapper::IsInstance(shared_array));
+ CHECK(SharedInfoWrapper::IsInstance(shared_array));
LiveEdit::PatchFunctionPositions(shared_array, position_change_array);
return isolate->heap()->undefined_value();
@@ -205,10 +203,10 @@ RUNTIME_FUNCTION(Runtime_LiveEditCheckAndDropActivations) {
CONVERT_ARG_HANDLE_CHECKED(JSArray, new_shared_array, 1);
CONVERT_BOOLEAN_ARG_CHECKED(do_drop, 2);
USE(new_shared_array);
- RUNTIME_ASSERT(old_shared_array->length()->IsSmi());
- RUNTIME_ASSERT(new_shared_array->length() == old_shared_array->length());
- RUNTIME_ASSERT(old_shared_array->HasFastElements());
- RUNTIME_ASSERT(new_shared_array->HasFastElements());
+ CHECK(old_shared_array->length()->IsSmi());
+ CHECK(new_shared_array->length() == old_shared_array->length());
+ CHECK(old_shared_array->HasFastElements());
+ CHECK(new_shared_array->HasFastElements());
int array_length = Smi::cast(old_shared_array->length())->value();
for (int i = 0; i < array_length; i++) {
Handle<Object> old_element;
@@ -216,14 +214,13 @@ RUNTIME_FUNCTION(Runtime_LiveEditCheckAndDropActivations) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, old_element,
JSReceiver::GetElement(isolate, old_shared_array, i));
- RUNTIME_ASSERT(
- old_element->IsJSValue() &&
- Handle<JSValue>::cast(old_element)->value()->IsSharedFunctionInfo());
+ CHECK(old_element->IsJSValue() &&
+ Handle<JSValue>::cast(old_element)->value()->IsSharedFunctionInfo());
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, new_element,
JSReceiver::GetElement(isolate, new_shared_array, i));
- RUNTIME_ASSERT(
- new_element->IsUndefined() ||
+ CHECK(
+ new_element->IsUndefined(isolate) ||
(new_element->IsJSValue() &&
Handle<JSValue>::cast(new_element)->value()->IsSharedFunctionInfo()));
}
@@ -261,7 +258,7 @@ RUNTIME_FUNCTION(Runtime_LiveEditRestartFrame) {
CHECK(isolate->debug()->live_edit_enabled());
DCHECK(args.length() == 2);
CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
- RUNTIME_ASSERT(isolate->debug()->CheckExecutionState(break_id));
+ CHECK(isolate->debug()->CheckExecutionState(break_id));
CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
Heap* heap = isolate->heap();
@@ -273,13 +270,16 @@ RUNTIME_FUNCTION(Runtime_LiveEditRestartFrame) {
return heap->undefined_value();
}
- JavaScriptFrameIterator it(isolate, id);
+ StackTraceFrameIterator it(isolate, id);
int inlined_jsframe_index =
DebugFrameHelper::FindIndexedNonNativeFrame(&it, index);
- if (inlined_jsframe_index == -1) return heap->undefined_value();
+ // Liveedit is not supported on Wasm.
+ if (inlined_jsframe_index == -1 || it.is_wasm()) {
+ return heap->undefined_value();
+ }
// We don't really care what the inlined frame index is, since we are
// throwing away the entire frame anyways.
- const char* error_message = LiveEdit::RestartFrame(it.frame());
+ const char* error_message = LiveEdit::RestartFrame(it.javascript_frame());
if (error_message) {
return *(isolate->factory()->InternalizeUtf8String(error_message));
}
diff --git a/deps/v8/src/runtime/runtime-maths.cc b/deps/v8/src/runtime/runtime-maths.cc
index 91b6181ab7..47e560d022 100644
--- a/deps/v8/src/runtime/runtime-maths.cc
+++ b/deps/v8/src/runtime/runtime-maths.cc
@@ -9,147 +9,10 @@
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
-#include "src/third_party/fdlibm/fdlibm.h"
namespace v8 {
namespace internal {
-#define RUNTIME_UNARY_MATH(Name, name) \
- RUNTIME_FUNCTION(Runtime_Math##Name) { \
- HandleScope scope(isolate); \
- DCHECK(args.length() == 1); \
- isolate->counters()->math_##name##_runtime()->Increment(); \
- CONVERT_DOUBLE_ARG_CHECKED(x, 0); \
- return *isolate->factory()->NewHeapNumber(std::name(x)); \
- }
-
-RUNTIME_UNARY_MATH(LogRT, log)
-#undef RUNTIME_UNARY_MATH
-
-
-RUNTIME_FUNCTION(Runtime_DoubleHi) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- uint64_t unsigned64 = double_to_uint64(x);
- uint32_t unsigned32 = static_cast<uint32_t>(unsigned64 >> 32);
- int32_t signed32 = bit_cast<int32_t, uint32_t>(unsigned32);
- return *isolate->factory()->NewNumber(signed32);
-}
-
-
-RUNTIME_FUNCTION(Runtime_DoubleLo) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- uint64_t unsigned64 = double_to_uint64(x);
- uint32_t unsigned32 = static_cast<uint32_t>(unsigned64);
- int32_t signed32 = bit_cast<int32_t, uint32_t>(unsigned32);
- return *isolate->factory()->NewNumber(signed32);
-}
-
-
-RUNTIME_FUNCTION(Runtime_ConstructDouble) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_NUMBER_CHECKED(uint32_t, hi, Uint32, args[0]);
- CONVERT_NUMBER_CHECKED(uint32_t, lo, Uint32, args[1]);
- uint64_t result = (static_cast<uint64_t>(hi) << 32) | lo;
- return *isolate->factory()->NewNumber(uint64_to_double(result));
-}
-
-
-RUNTIME_FUNCTION(Runtime_RemPiO2) {
- SealHandleScope shs(isolate);
- DisallowHeapAllocation no_gc;
- DCHECK(args.length() == 2);
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- CONVERT_ARG_CHECKED(JSTypedArray, result, 1);
- RUNTIME_ASSERT(result->byte_length() == Smi::FromInt(2 * sizeof(double)));
- FixedFloat64Array* array = FixedFloat64Array::cast(result->elements());
- double* y = static_cast<double*>(array->DataPtr());
- return Smi::FromInt(fdlibm::rempio2(x, y));
-}
-
-
-static const double kPiDividedBy4 = 0.78539816339744830962;
-
-
-RUNTIME_FUNCTION(Runtime_MathAtan2) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- isolate->counters()->math_atan2_runtime()->Increment();
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- double result;
- if (std::isinf(x) && std::isinf(y)) {
- // Make sure that the result in case of two infinite arguments
- // is a multiple of Pi / 4. The sign of the result is determined
- // by the first argument (x) and the sign of the second argument
- // determines the multiplier: one or three.
- int multiplier = (x < 0) ? -1 : 1;
- if (y < 0) multiplier *= 3;
- result = multiplier * kPiDividedBy4;
- } else {
- result = std::atan2(x, y);
- }
- return *isolate->factory()->NewNumber(result);
-}
-
-
-RUNTIME_FUNCTION(Runtime_MathExpRT) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- isolate->counters()->math_exp_runtime()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- lazily_initialize_fast_exp(isolate);
- return *isolate->factory()->NewNumber(fast_exp(x, isolate));
-}
-
-
-// Slow version of Math.pow. We check for fast paths for special cases.
-// Used if VFP3 is not available.
-RUNTIME_FUNCTION(Runtime_MathPow) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- isolate->counters()->math_pow_runtime()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
-
- // If the second argument is a smi, it is much faster to call the
- // custom powi() function than the generic pow().
- if (args[1]->IsSmi()) {
- int y = args.smi_at(1);
- return *isolate->factory()->NewNumber(power_double_int(x, y));
- }
-
- CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- double result = power_helper(isolate, x, y);
- if (std::isnan(result)) return isolate->heap()->nan_value();
- return *isolate->factory()->NewNumber(result);
-}
-
-
-// Fast version of Math.pow if we know that y is not an integer and y is not
-// -0.5 or 0.5. Used as slow case from full codegen.
-RUNTIME_FUNCTION(Runtime_MathPowRT) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- isolate->counters()->math_pow_runtime()->Increment();
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- if (y == 0) {
- return Smi::FromInt(1);
- } else {
- double result = power_double_double(x, y);
- if (std::isnan(result)) return isolate->heap()->nan_value();
- return *isolate->factory()->NewNumber(result);
- }
-}
-
-
RUNTIME_FUNCTION(Runtime_GenerateRandomNumbers) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
diff --git a/deps/v8/src/runtime/runtime-numbers.cc b/deps/v8/src/runtime/runtime-numbers.cc
index efbdeb2f33..9f43c0acfc 100644
--- a/deps/v8/src/runtime/runtime-numbers.cc
+++ b/deps/v8/src/runtime/runtime-numbers.cc
@@ -13,90 +13,6 @@
namespace v8 {
namespace internal {
-RUNTIME_FUNCTION(Runtime_NumberToRadixString) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_SMI_ARG_CHECKED(radix, 1);
- RUNTIME_ASSERT(2 <= radix && radix <= 36);
-
- // Fast case where the result is a one character string.
- if (args[0]->IsSmi()) {
- int value = args.smi_at(0);
- if (value >= 0 && value < radix) {
- // Character array used for conversion.
- static const char kCharTable[] = "0123456789abcdefghijklmnopqrstuvwxyz";
- return *isolate->factory()->LookupSingleCharacterStringFromCode(
- kCharTable[value]);
- }
- }
-
- // Slow case.
- CONVERT_DOUBLE_ARG_CHECKED(value, 0);
- if (std::isnan(value)) {
- return isolate->heap()->nan_string();
- }
- if (std::isinf(value)) {
- if (value < 0) {
- return isolate->heap()->minus_infinity_string();
- }
- return isolate->heap()->infinity_string();
- }
- char* str = DoubleToRadixCString(value, radix);
- Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str);
- DeleteArray(str);
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberToFixed) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_DOUBLE_ARG_CHECKED(value, 0);
- CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
- int f = FastD2IChecked(f_number);
- // See DoubleToFixedCString for these constants:
- RUNTIME_ASSERT(f >= 0 && f <= 20);
- RUNTIME_ASSERT(!Double(value).IsSpecial());
- char* str = DoubleToFixedCString(value, f);
- Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str);
- DeleteArray(str);
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberToExponential) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_DOUBLE_ARG_CHECKED(value, 0);
- CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
- int f = FastD2IChecked(f_number);
- RUNTIME_ASSERT(f >= -1 && f <= 20);
- RUNTIME_ASSERT(!Double(value).IsSpecial());
- char* str = DoubleToExponentialCString(value, f);
- Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str);
- DeleteArray(str);
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberToPrecision) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_DOUBLE_ARG_CHECKED(value, 0);
- CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
- int f = FastD2IChecked(f_number);
- RUNTIME_ASSERT(f >= 1 && f <= 21);
- RUNTIME_ASSERT(!Double(value).IsSpecial());
- char* str = DoubleToPrecisionCString(value, f);
- Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(str);
- DeleteArray(str);
- return *result;
-}
-
-
RUNTIME_FUNCTION(Runtime_IsValidSmi) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
@@ -121,7 +37,7 @@ RUNTIME_FUNCTION(Runtime_StringParseInt) {
CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
CONVERT_NUMBER_CHECKED(int, radix, Int32, args[1]);
// Step 8.a. is already handled in the JS function.
- RUNTIME_ASSERT(radix == 0 || (2 <= radix && radix <= 36));
+ CHECK(radix == 0 || (2 <= radix && radix <= 36));
subject = String::Flatten(subject);
double value;
@@ -174,20 +90,6 @@ RUNTIME_FUNCTION(Runtime_NumberToStringSkipCache) {
}
-// TODO(bmeurer): Kill this runtime entry. Uses in date.js are wrong anyway.
-RUNTIME_FUNCTION(Runtime_NumberToIntegerMapMinusZero) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, input, Object::ToNumber(input));
- double double_value = DoubleToInteger(input->Number());
- // Map both -0 and +0 to +0.
- if (double_value == 0) double_value = 0;
-
- return *isolate->factory()->NewNumber(double_value);
-}
-
-
// Converts a Number to a Smi, if possible. Returns NaN if the number is not
// a small integer.
RUNTIME_FUNCTION(Runtime_NumberToSmi) {
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 5bdb08541f..7908c6295c 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -17,8 +17,9 @@ namespace internal {
MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate,
Handle<Object> object,
- Handle<Object> key) {
- if (object->IsUndefined() || object->IsNull()) {
+ Handle<Object> key,
+ bool* is_found_out) {
+ if (object->IsUndefined(isolate) || object->IsNull(isolate)) {
THROW_NEW_ERROR(
isolate,
NewTypeError(MessageTemplate::kNonObjectPropertyLoad, key, object),
@@ -30,7 +31,9 @@ MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate,
LookupIterator::PropertyOrElement(isolate, object, key, &success);
if (!success) return MaybeHandle<Object>();
- return Object::GetProperty(&it);
+ MaybeHandle<Object> result = Object::GetProperty(&it);
+ if (is_found_out) *is_found_out = it.IsFound();
+ return result;
}
static MaybeHandle<Object> KeyedGetObjectProperty(Isolate* isolate,
@@ -62,7 +65,9 @@ static MaybeHandle<Object> KeyedGetObjectProperty(Isolate* isolate,
PropertyCell* cell = PropertyCell::cast(dictionary->ValueAt(entry));
if (cell->property_details().type() == DATA) {
Object* value = cell->value();
- if (!value->IsTheHole()) return Handle<Object>(value, isolate);
+ if (!value->IsTheHole(isolate)) {
+ return Handle<Object>(value, isolate);
+ }
// If value is the hole (meaning, absent) do the general lookup.
}
}
@@ -119,7 +124,7 @@ Maybe<bool> Runtime::DeleteObjectProperty(Isolate* isolate,
LanguageMode language_mode) {
bool success = false;
LookupIterator it = LookupIterator::PropertyOrElement(
- isolate, receiver, key, &success, LookupIterator::HIDDEN);
+ isolate, receiver, key, &success, LookupIterator::OWN);
if (!success) return Nothing<bool>();
return JSReceiver::DeleteProperty(&it, language_mode);
@@ -168,7 +173,7 @@ RUNTIME_FUNCTION(Runtime_ObjectHasOwnProperty) {
}
// Slow case.
- LookupIterator::Configuration c = LookupIterator::HIDDEN;
+ LookupIterator::Configuration c = LookupIterator::OWN;
LookupIterator it = key_is_array_index
? LookupIterator(isolate, js_obj, index, js_obj, c)
: LookupIterator(js_obj, key, js_obj, c);
@@ -194,7 +199,7 @@ RUNTIME_FUNCTION(Runtime_ObjectHasOwnProperty) {
key_is_array_index
? index < static_cast<uint32_t>(String::cast(*object)->length())
: key->Equals(isolate->heap()->length_string()));
- } else if (object->IsNull() || object->IsUndefined()) {
+ } else if (object->IsNull(isolate) || object->IsUndefined(isolate)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kUndefinedOrNullToObject));
}
@@ -207,7 +212,7 @@ MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
Handle<Object> key,
Handle<Object> value,
LanguageMode language_mode) {
- if (object->IsUndefined() || object->IsNull()) {
+ if (object->IsUndefined(isolate) || object->IsNull(isolate)) {
THROW_NEW_ERROR(
isolate,
NewTypeError(MessageTemplate::kNonObjectPropertyStore, key, object),
@@ -230,10 +235,7 @@ RUNTIME_FUNCTION(Runtime_GetPrototype) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0);
- Handle<Object> prototype;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, prototype,
- JSReceiver::GetPrototype(isolate, obj));
- return *prototype;
+ RETURN_RESULT_OR_FAILURE(isolate, JSReceiver::GetPrototype(isolate, obj));
}
@@ -260,92 +262,13 @@ RUNTIME_FUNCTION(Runtime_SetPrototype) {
return *obj;
}
-
-// Enumerator used as indices into the array returned from GetOwnProperty
-enum PropertyDescriptorIndices {
- IS_ACCESSOR_INDEX,
- VALUE_INDEX,
- GETTER_INDEX,
- SETTER_INDEX,
- WRITABLE_INDEX,
- ENUMERABLE_INDEX,
- CONFIGURABLE_INDEX,
- DESCRIPTOR_SIZE
-};
-
-
-MUST_USE_RESULT static MaybeHandle<Object> GetOwnProperty(Isolate* isolate,
- Handle<JSObject> obj,
- Handle<Name> name) {
- Heap* heap = isolate->heap();
- Factory* factory = isolate->factory();
-
- // Get attributes.
- LookupIterator it = LookupIterator::PropertyOrElement(isolate, obj, name, obj,
- LookupIterator::HIDDEN);
- Maybe<PropertyAttributes> maybe = JSObject::GetPropertyAttributes(&it);
-
- if (!maybe.IsJust()) return MaybeHandle<Object>();
- PropertyAttributes attrs = maybe.FromJust();
- if (attrs == ABSENT) return factory->undefined_value();
-
- DCHECK(!isolate->has_pending_exception());
- Handle<FixedArray> elms = factory->NewFixedArray(DESCRIPTOR_SIZE);
- elms->set(ENUMERABLE_INDEX, heap->ToBoolean((attrs & DONT_ENUM) == 0));
- elms->set(CONFIGURABLE_INDEX, heap->ToBoolean((attrs & DONT_DELETE) == 0));
-
- bool is_accessor_pair = it.state() == LookupIterator::ACCESSOR &&
- it.GetAccessors()->IsAccessorPair();
- elms->set(IS_ACCESSOR_INDEX, heap->ToBoolean(is_accessor_pair));
-
- if (is_accessor_pair) {
- Handle<AccessorPair> accessors =
- Handle<AccessorPair>::cast(it.GetAccessors());
- Handle<Object> getter =
- AccessorPair::GetComponent(accessors, ACCESSOR_GETTER);
- Handle<Object> setter =
- AccessorPair::GetComponent(accessors, ACCESSOR_SETTER);
- elms->set(GETTER_INDEX, *getter);
- elms->set(SETTER_INDEX, *setter);
- } else {
- Handle<Object> value;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, value, Object::GetProperty(&it),
- Object);
- elms->set(WRITABLE_INDEX, heap->ToBoolean((attrs & READ_ONLY) == 0));
- elms->set(VALUE_INDEX, *value);
- }
-
- return factory->NewJSArrayWithElements(elms);
-}
-
-
-// Returns an array with the property description:
-// if args[1] is not a property on args[0]
-// returns undefined
-// if args[1] is a data property on args[0]
-// [false, value, Writeable, Enumerable, Configurable]
-// if args[1] is an accessor on args[0]
-// [true, GetFunction, SetFunction, Enumerable, Configurable]
-// TODO(jkummerow): Deprecated. Remove all callers and delete.
-RUNTIME_FUNCTION(Runtime_GetOwnProperty_Legacy) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
- CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- GetOwnProperty(isolate, obj, name));
- return *result;
-}
-
-
RUNTIME_FUNCTION(Runtime_OptimizeObjectForAddingMultipleProperties) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_SMI_ARG_CHECKED(properties, 1);
// Conservative upper limit to prevent fuzz tests from going OOM.
- RUNTIME_ASSERT(properties <= 100000);
+ if (properties > 100000) return isolate->ThrowIllegalOperation();
if (object->HasFastProperties() && !object->IsJSGlobalProxy()) {
JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, properties,
"OptimizeForAdding");
@@ -354,42 +277,6 @@ RUNTIME_FUNCTION(Runtime_OptimizeObjectForAddingMultipleProperties) {
}
-RUNTIME_FUNCTION(Runtime_LoadGlobalViaContext) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_SMI_ARG_CHECKED(slot, 0);
-
- // Go up context chain to the script context.
- Handle<Context> script_context(isolate->context()->script_context(), isolate);
- DCHECK(script_context->IsScriptContext());
- DCHECK(script_context->get(slot)->IsPropertyCell());
-
- // Lookup the named property on the global object.
- Handle<ScopeInfo> scope_info(script_context->scope_info(), isolate);
- Handle<Name> name(scope_info->ContextSlotName(slot), isolate);
- Handle<JSGlobalObject> global_object(script_context->global_object(),
- isolate);
- LookupIterator it(global_object, name, global_object, LookupIterator::HIDDEN);
-
- // Switch to fast mode only if there is a data property and it's not on
- // a hidden prototype.
- if (it.state() == LookupIterator::DATA &&
- it.GetHolder<Object>().is_identical_to(global_object)) {
- // Now update the cell in the script context.
- Handle<PropertyCell> cell = it.GetPropertyCell();
- script_context->set(slot, *cell);
- } else {
- // This is not a fast case, so keep this access in a slow mode.
- // Store empty_property_cell here to release the outdated property cell.
- script_context->set(slot, isolate->heap()->empty_property_cell());
- }
-
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, Object::GetProperty(&it));
- return *result;
-}
-
-
namespace {
Object* StoreGlobalViaContext(Isolate* isolate, int slot, Handle<Object> value,
@@ -404,7 +291,7 @@ Object* StoreGlobalViaContext(Isolate* isolate, int slot, Handle<Object> value,
Handle<Name> name(scope_info->ContextSlotName(slot), isolate);
Handle<JSGlobalObject> global_object(script_context->global_object(),
isolate);
- LookupIterator it(global_object, name, global_object, LookupIterator::HIDDEN);
+ LookupIterator it(global_object, name, global_object, LookupIterator::OWN);
// Switch to fast mode only if there is a data property and it's not on
// a hidden prototype.
@@ -455,13 +342,10 @@ RUNTIME_FUNCTION(Runtime_GetProperty) {
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Runtime::GetObjectProperty(isolate, object, key));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate,
+ Runtime::GetObjectProperty(isolate, object, key));
}
-
// KeyedGetProperty is called from KeyedLoadIC::GenerateGeneric.
RUNTIME_FUNCTION(Runtime_KeyedGetProperty) {
HandleScope scope(isolate);
@@ -470,16 +354,13 @@ RUNTIME_FUNCTION(Runtime_KeyedGetProperty) {
CONVERT_ARG_HANDLE_CHECKED(Object, receiver_obj, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key_obj, 1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, KeyedGetObjectProperty(isolate, receiver_obj, key_obj));
- return *result;
+ RETURN_RESULT_OR_FAILURE(
+ isolate, KeyedGetObjectProperty(isolate, receiver_obj, key_obj));
}
-
RUNTIME_FUNCTION(Runtime_AddNamedProperty) {
HandleScope scope(isolate);
- RUNTIME_ASSERT(args.length() == 4);
+ DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
@@ -492,14 +373,11 @@ RUNTIME_FUNCTION(Runtime_AddNamedProperty) {
LookupIterator it(object, name, object, LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
if (!maybe.IsJust()) return isolate->heap()->exception();
- RUNTIME_ASSERT(!it.IsFound());
+ CHECK(!it.IsFound());
#endif
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- JSObject::SetOwnPropertyIgnoreAttributes(object, name, value, attrs));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate, JSObject::SetOwnPropertyIgnoreAttributes(
+ object, name, value, attrs));
}
@@ -507,7 +385,7 @@ RUNTIME_FUNCTION(Runtime_AddNamedProperty) {
// This is used to create an indexed data property into an array.
RUNTIME_FUNCTION(Runtime_AddElement) {
HandleScope scope(isolate);
- RUNTIME_ASSERT(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
@@ -521,35 +399,32 @@ RUNTIME_FUNCTION(Runtime_AddElement) {
LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
if (!maybe.IsJust()) return isolate->heap()->exception();
- RUNTIME_ASSERT(!it.IsFound());
+ CHECK(!it.IsFound());
if (object->IsJSArray()) {
Handle<JSArray> array = Handle<JSArray>::cast(object);
- RUNTIME_ASSERT(!JSArray::WouldChangeReadOnlyLength(array, index));
+ CHECK(!JSArray::WouldChangeReadOnlyLength(array, index));
}
#endif
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- JSObject::SetOwnElementIgnoreAttributes(object, index, value, NONE));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate, JSObject::SetOwnElementIgnoreAttributes(
+ object, index, value, NONE));
}
RUNTIME_FUNCTION(Runtime_AppendElement) {
HandleScope scope(isolate);
- RUNTIME_ASSERT(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+ CHECK(!value->IsTheHole(isolate));
uint32_t index;
CHECK(array->length()->ToArrayIndex(&index));
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, JSObject::AddDataElement(array, index, value, NONE));
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate, JSObject::AddDataElement(array, index, value, NONE));
JSObject::ValidateElements(array);
return *array;
}
@@ -557,19 +432,16 @@ RUNTIME_FUNCTION(Runtime_AppendElement) {
RUNTIME_FUNCTION(Runtime_SetProperty) {
HandleScope scope(isolate);
- RUNTIME_ASSERT(args.length() == 4);
+ DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode_arg, 3);
- LanguageMode language_mode = language_mode_arg;
+ CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 3);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
+ RETURN_RESULT_OR_FAILURE(
+ isolate,
Runtime::SetObjectProperty(isolate, object, key, value, language_mode));
- return *result;
}
@@ -612,8 +484,8 @@ RUNTIME_FUNCTION(Runtime_DeleteProperty_Strict) {
RUNTIME_FUNCTION(Runtime_HasProperty) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
// Check that {object} is actually a receiver.
if (!object->IsJSReceiver()) {
@@ -635,21 +507,6 @@ RUNTIME_FUNCTION(Runtime_HasProperty) {
}
-RUNTIME_FUNCTION(Runtime_PropertyIsEnumerable) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
-
- Maybe<PropertyAttributes> maybe =
- JSReceiver::GetOwnPropertyAttributes(object, key);
- if (!maybe.IsJust()) return isolate->heap()->exception();
- if (maybe.FromJust() == ABSENT) return isolate->heap()->false_value();
- return isolate->heap()->ToBoolean((maybe.FromJust() & DONT_ENUM) == 0);
-}
-
-
RUNTIME_FUNCTION(Runtime_GetOwnPropertyKeys) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
@@ -660,7 +517,8 @@ RUNTIME_FUNCTION(Runtime_GetOwnPropertyKeys) {
Handle<FixedArray> keys;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, keys,
- JSReceiver::GetKeys(object, OWN_ONLY, filter, CONVERT_TO_STRING));
+ KeyAccumulator::GetKeys(object, KeyCollectionMode::kOwnOnly, filter,
+ GetKeysConversion::kConvertToString));
return *isolate->factory()->NewJSArrayWithElements(keys);
}
@@ -708,10 +566,7 @@ RUNTIME_FUNCTION(Runtime_NewObject) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, target, 0);
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, new_target, 1);
- Handle<JSObject> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- JSObject::New(target, new_target));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate, JSObject::New(target, new_target));
}
@@ -726,44 +581,19 @@ RUNTIME_FUNCTION(Runtime_FinalizeInstanceSize) {
}
-RUNTIME_FUNCTION(Runtime_GlobalProxy) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunction, function, 0);
- return function->context()->global_proxy();
-}
-
-
-RUNTIME_FUNCTION(Runtime_LookupAccessor) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
- CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
- CONVERT_SMI_ARG_CHECKED(flag, 2);
- AccessorComponent component = flag == 0 ? ACCESSOR_GETTER : ACCESSOR_SETTER;
- if (!receiver->IsJSObject()) return isolate->heap()->undefined_value();
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- JSObject::GetAccessor(Handle<JSObject>::cast(receiver), name, component));
- return *result;
-}
-
-
RUNTIME_FUNCTION(Runtime_LoadMutableDouble) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Smi, index, 1);
- RUNTIME_ASSERT((index->value() & 1) == 1);
+ CHECK((index->value() & 1) == 1);
FieldIndex field_index =
FieldIndex::ForLoadByFieldIndex(object->map(), index->value());
if (field_index.is_inobject()) {
- RUNTIME_ASSERT(field_index.property_index() <
- object->map()->GetInObjectProperties());
+ CHECK(field_index.property_index() <
+ object->map()->GetInObjectProperties());
} else {
- RUNTIME_ASSERT(field_index.outobject_array_index() <
- object->properties()->length());
+ CHECK(field_index.outobject_array_index() < object->properties()->length());
}
return *JSObject::FastPropertyAt(object, Representation::Double(),
field_index);
@@ -793,9 +623,8 @@ RUNTIME_FUNCTION(Runtime_IsJSGlobalProxy) {
return isolate->heap()->ToBoolean(obj->IsJSGlobalProxy());
}
-
-static bool IsValidAccessor(Handle<Object> obj) {
- return obj->IsUndefined() || obj->IsCallable() || obj->IsNull();
+static bool IsValidAccessor(Isolate* isolate, Handle<Object> obj) {
+ return obj->IsUndefined(isolate) || obj->IsCallable() || obj->IsNull(isolate);
}
@@ -809,12 +638,12 @@ RUNTIME_FUNCTION(Runtime_DefineAccessorPropertyUnchecked) {
HandleScope scope(isolate);
DCHECK(args.length() == 5);
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
- RUNTIME_ASSERT(!obj->IsNull());
+ CHECK(!obj->IsNull(isolate));
CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, getter, 2);
- RUNTIME_ASSERT(IsValidAccessor(getter));
+ CHECK(IsValidAccessor(isolate, getter));
CONVERT_ARG_HANDLE_CHECKED(Object, setter, 3);
- RUNTIME_ASSERT(IsValidAccessor(setter));
+ CHECK(IsValidAccessor(isolate, setter));
CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 4);
RETURN_FAILURE_ON_EXCEPTION(
@@ -823,34 +652,6 @@ RUNTIME_FUNCTION(Runtime_DefineAccessorPropertyUnchecked) {
}
-// Implements part of 8.12.9 DefineOwnProperty.
-// There are 3 cases that lead here:
-// Step 4a - define a new data property.
-// Steps 9b & 12 - replace an existing accessor property with a data property.
-// Step 12 - update an existing data property with a data or generic
-// descriptor.
-RUNTIME_FUNCTION(Runtime_DefineDataPropertyUnchecked) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
-
- LookupIterator it = LookupIterator::PropertyOrElement(
- isolate, object, name, object, LookupIterator::OWN);
- if (it.state() == LookupIterator::ACCESS_CHECK && !it.HasAccess()) {
- return isolate->heap()->undefined_value();
- }
-
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, JSObject::DefineOwnPropertyIgnoreAttributes(
- &it, value, attrs, JSObject::DONT_FORCE_FIELD));
-
- return *result;
-}
-
RUNTIME_FUNCTION(Runtime_DefineDataPropertyInLiteral) {
HandleScope scope(isolate);
DCHECK(args.length() == 5);
@@ -860,7 +661,7 @@ RUNTIME_FUNCTION(Runtime_DefineDataPropertyInLiteral) {
CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
CONVERT_SMI_ARG_CHECKED(set_function_name, 4);
- if (FLAG_harmony_function_name && set_function_name) {
+ if (set_function_name) {
DCHECK(value->IsJSFunction());
JSFunction::SetName(Handle<JSFunction>::cast(value), name,
isolate->factory()->empty_string());
@@ -885,6 +686,15 @@ RUNTIME_FUNCTION(Runtime_GetDataProperty) {
return *JSReceiver::GetDataProperty(object, name);
}
+RUNTIME_FUNCTION(Runtime_GetConstructorName) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+
+ CHECK(!object->IsUndefined(isolate) && !object->IsNull(isolate));
+ Handle<JSReceiver> recv = Object::ToObject(isolate, object).ToHandleChecked();
+ return *JSReceiver::GetConstructorName(recv);
+}
RUNTIME_FUNCTION(Runtime_HasFastPackedElements) {
SealHandleScope shs(isolate);
@@ -929,8 +739,7 @@ RUNTIME_FUNCTION(Runtime_DefineGetterPropertyUnchecked) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, getter, 2);
CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
- if (FLAG_harmony_function_name &&
- String::cast(getter->shared()->name())->length() == 0) {
+ if (String::cast(getter->shared()->name())->length() == 0) {
JSFunction::SetName(getter, name, isolate->factory()->get_string());
}
@@ -950,8 +759,7 @@ RUNTIME_FUNCTION(Runtime_DefineSetterPropertyUnchecked) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, setter, 2);
CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
- if (FLAG_harmony_function_name &&
- String::cast(setter->shared()->name())->length() == 0) {
+ if (String::cast(setter->shared()->name())->length() == 0) {
JSFunction::SetName(setter, name, isolate->factory()->set_string());
}
@@ -967,10 +775,7 @@ RUNTIME_FUNCTION(Runtime_ToObject) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- Handle<JSReceiver> receiver;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
- Object::ToObject(isolate, object));
- return *receiver;
+ RETURN_RESULT_OR_FAILURE(isolate, Object::ToObject(isolate, object));
}
@@ -978,10 +783,7 @@ RUNTIME_FUNCTION(Runtime_ToPrimitive) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- Object::ToPrimitive(input));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate, Object::ToPrimitive(input));
}
@@ -989,31 +791,15 @@ RUNTIME_FUNCTION(Runtime_ToPrimitive_Number) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Object::ToPrimitive(input, ToPrimitiveHint::kNumber));
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_ToPrimitive_String) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Object::ToPrimitive(input, ToPrimitiveHint::kString));
- return *result;
+ RETURN_RESULT_OR_FAILURE(
+ isolate, Object::ToPrimitive(input, ToPrimitiveHint::kNumber));
}
-
RUNTIME_FUNCTION(Runtime_ToNumber) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, Object::ToNumber(input));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate, Object::ToNumber(input));
}
@@ -1021,10 +807,7 @@ RUNTIME_FUNCTION(Runtime_ToInteger) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- Object::ToInteger(isolate, input));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate, Object::ToInteger(isolate, input));
}
@@ -1032,10 +815,7 @@ RUNTIME_FUNCTION(Runtime_ToLength) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- Object::ToLength(isolate, input));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate, Object::ToLength(isolate, input));
}
@@ -1043,10 +823,7 @@ RUNTIME_FUNCTION(Runtime_ToString) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- Object::ToString(isolate, input));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate, Object::ToString(isolate, input));
}
@@ -1054,10 +831,7 @@ RUNTIME_FUNCTION(Runtime_ToName) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- Object::ToName(isolate, input));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate, Object::ToName(isolate, input));
}
@@ -1104,97 +878,6 @@ RUNTIME_FUNCTION(Runtime_Compare) {
return isolate->heap()->exception();
}
-
-RUNTIME_FUNCTION(Runtime_InstanceOf) {
- // TODO(4447): Remove this function when ES6 instanceof ships for good.
- DCHECK(!FLAG_harmony_instanceof);
-
- // ECMA-262, section 11.8.6, page 54.
- HandleScope shs(isolate);
- DCHECK_EQ(2, args.length());
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, callable, 1);
- // {callable} must have a [[Call]] internal method.
- if (!callable->IsCallable()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewTypeError(MessageTemplate::kInstanceofFunctionExpected, callable));
- }
- // If {object} is not a receiver, return false.
- if (!object->IsJSReceiver()) {
- return isolate->heap()->false_value();
- }
- // Check if {callable} is bound, if so, get [[BoundTargetFunction]] from it
- // and use that instead of {callable}.
- while (callable->IsJSBoundFunction()) {
- callable =
- handle(Handle<JSBoundFunction>::cast(callable)->bound_target_function(),
- isolate);
- }
- DCHECK(callable->IsCallable());
- // Get the "prototype" of {callable}; raise an error if it's not a receiver.
- Handle<Object> prototype;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, prototype,
- JSReceiver::GetProperty(Handle<JSReceiver>::cast(callable),
- isolate->factory()->prototype_string()));
- if (!prototype->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewTypeError(MessageTemplate::kInstanceofNonobjectProto, prototype));
- }
- // Return whether or not {prototype} is in the prototype chain of {object}.
- Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
- Maybe<bool> result =
- JSReceiver::HasInPrototypeChain(isolate, receiver, prototype);
- MAYBE_RETURN(result, isolate->heap()->exception());
- return isolate->heap()->ToBoolean(result.FromJust());
-}
-
-RUNTIME_FUNCTION(Runtime_OrdinaryHasInstance) {
- // ES6 section 19.2.3.6 Function.prototype[@@hasInstance](V)
- HandleScope shs(isolate);
- DCHECK_EQ(2, args.length());
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, callable, 1);
- // {callable} must have a [[Call]] internal method.
- if (!callable->IsCallable()) {
- return isolate->heap()->false_value();
- }
- // If {object} is not a receiver, return false.
- if (!object->IsJSReceiver()) {
- return isolate->heap()->false_value();
- }
- // Check if {callable} is bound, if so, get [[BoundTargetFunction]] from it
- // and use that instead of {callable}.
- while (callable->IsJSBoundFunction()) {
- callable =
- handle(Handle<JSBoundFunction>::cast(callable)->bound_target_function(),
- isolate);
- }
- DCHECK(callable->IsCallable());
- // Get the "prototype" of {callable}; raise an error if it's not a receiver.
- Handle<Object> prototype;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, prototype,
- JSReceiver::GetProperty(Handle<JSReceiver>::cast(callable),
- isolate->factory()->prototype_string()));
- if (!prototype->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewTypeError(MessageTemplate::kInstanceofNonobjectProto, prototype));
- }
- // Return whether or not {prototype} is in the prototype chain of {object}.
- Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
- Maybe<bool> result =
- JSReceiver::HasInPrototypeChain(isolate, receiver, prototype);
- MAYBE_RETURN(result, isolate->heap()->exception());
- return isolate->heap()->ToBoolean(result.FromJust());
-}
-
-
RUNTIME_FUNCTION(Runtime_HasInPrototypeChain) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -1229,24 +912,20 @@ RUNTIME_FUNCTION(Runtime_IsAccessCheckNeeded) {
}
-RUNTIME_FUNCTION(Runtime_ObjectDefineProperty) {
+RUNTIME_FUNCTION(Runtime_CreateDataProperty) {
HandleScope scope(isolate);
DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(Object, o, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, name, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, attributes, 2);
- return JSReceiver::DefineProperty(isolate, o, name, attributes);
-}
-
-
-RUNTIME_FUNCTION(Runtime_ObjectDefineProperties) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(Object, o, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, properties, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, o, JSReceiver::DefineProperties(isolate, o, properties));
- return *o;
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, o, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ bool success;
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, o, key, &success, LookupIterator::OWN);
+ if (!success) return isolate->heap()->exception();
+ MAYBE_RETURN(
+ JSReceiver::CreateDataProperty(&it, value, Object::THROW_ON_ERROR),
+ isolate->heap()->exception());
+ return *value;
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-observe.cc b/deps/v8/src/runtime/runtime-observe.cc
deleted file mode 100644
index 0407b8a9df..0000000000
--- a/deps/v8/src/runtime/runtime-observe.cc
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/runtime/runtime-utils.h"
-
-#include "src/arguments.h"
-#include "src/debug/debug.h"
-#include "src/isolate-inl.h"
-
-namespace v8 {
-namespace internal {
-
-RUNTIME_FUNCTION(Runtime_IsObserved) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
-
- if (!args[0]->IsJSReceiver()) return isolate->heap()->false_value();
- CONVERT_ARG_CHECKED(JSReceiver, obj, 0);
- DCHECK(!obj->IsJSGlobalProxy() || !obj->map()->is_observed());
- return isolate->heap()->ToBoolean(obj->map()->is_observed());
-}
-
-
-RUNTIME_FUNCTION(Runtime_SetIsObserved) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0);
- RUNTIME_ASSERT(!obj->IsJSGlobalProxy());
- if (obj->IsJSProxy()) return isolate->heap()->undefined_value();
- RUNTIME_ASSERT(!obj->map()->is_observed());
-
- DCHECK(obj->IsJSObject());
- JSObject::SetObserved(Handle<JSObject>::cast(obj));
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_EnqueueMicrotask) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, microtask, 0);
- isolate->EnqueueMicrotask(microtask);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_RunMicrotasks) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 0);
- isolate->RunMicrotasks();
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_DeliverObservationChangeRecords) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, callback, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, argument, 1);
- v8::TryCatch catcher(reinterpret_cast<v8::Isolate*>(isolate));
- // We should send a message on uncaught exception thrown during
- // Object.observe delivery while not interrupting further delivery, thus
- // we make a call inside a verbose TryCatch.
- catcher.SetVerbose(true);
- Handle<Object> argv[] = {argument};
-
- // If we are in step-in mode, flood the handler.
- isolate->debug()->EnableStepIn();
-
- USE(Execution::Call(isolate, callback, isolate->factory()->undefined_value(),
- arraysize(argv), argv));
- if (isolate->has_pending_exception()) {
- isolate->ReportPendingMessages();
- isolate->clear_pending_exception();
- isolate->set_external_caught_exception(false);
- }
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetObservationState) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 0);
- isolate->CountUsage(v8::Isolate::kObjectObserve);
- return isolate->heap()->observation_state();
-}
-
-
-static bool ContextsHaveSameOrigin(Handle<Context> context1,
- Handle<Context> context2) {
- return context1->security_token() == context2->security_token();
-}
-
-
-RUNTIME_FUNCTION(Runtime_ObserverObjectAndRecordHaveSameOrigin) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, observer, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, record, 2);
-
- while (observer->IsJSBoundFunction()) {
- observer = handle(
- Handle<JSBoundFunction>::cast(observer)->bound_target_function());
- }
- if (!observer->IsJSFunction()) return isolate->heap()->false_value();
-
- Handle<Context> observer_context(
- Handle<JSFunction>::cast(observer)->context()->native_context());
- Handle<Context> object_context(object->GetCreationContext());
- Handle<Context> record_context(record->GetCreationContext());
-
- return isolate->heap()->ToBoolean(
- ContextsHaveSameOrigin(object_context, observer_context) &&
- ContextsHaveSameOrigin(object_context, record_context));
-}
-
-
-RUNTIME_FUNCTION(Runtime_ObjectWasCreatedInCurrentOrigin) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
-
- Handle<Context> creation_context(object->GetCreationContext(), isolate);
- return isolate->heap()->ToBoolean(
- ContextsHaveSameOrigin(creation_context, isolate->native_context()));
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetObjectContextObjectObserve) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
-
- Handle<Context> context(object->GetCreationContext(), isolate);
- return context->native_object_observe();
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetObjectContextObjectGetNotifier) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
-
- Handle<Context> context(object->GetCreationContext(), isolate);
- return context->native_object_get_notifier();
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetObjectContextNotifierPerformChange) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object_info, 0);
-
- Handle<Context> context(object_info->GetCreationContext(), isolate);
- return context->native_object_notifier_perform_change();
-}
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-operators.cc b/deps/v8/src/runtime/runtime-operators.cc
index e55ab7c542..2a9255b77e 100644
--- a/deps/v8/src/runtime/runtime-operators.cc
+++ b/deps/v8/src/runtime/runtime-operators.cc
@@ -14,10 +14,7 @@ RUNTIME_FUNCTION(Runtime_Multiply) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- Object::Multiply(isolate, lhs, rhs));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate, Object::Multiply(isolate, lhs, rhs));
}
@@ -26,10 +23,7 @@ RUNTIME_FUNCTION(Runtime_Divide) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- Object::Divide(isolate, lhs, rhs));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate, Object::Divide(isolate, lhs, rhs));
}
@@ -38,10 +32,7 @@ RUNTIME_FUNCTION(Runtime_Modulus) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- Object::Modulus(isolate, lhs, rhs));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate, Object::Modulus(isolate, lhs, rhs));
}
@@ -50,10 +41,7 @@ RUNTIME_FUNCTION(Runtime_Add) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- Object::Add(isolate, lhs, rhs));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate, Object::Add(isolate, lhs, rhs));
}
@@ -62,10 +50,7 @@ RUNTIME_FUNCTION(Runtime_Subtract) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- Object::Subtract(isolate, lhs, rhs));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate, Object::Subtract(isolate, lhs, rhs));
}
@@ -74,10 +59,7 @@ RUNTIME_FUNCTION(Runtime_ShiftLeft) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- Object::ShiftLeft(isolate, lhs, rhs));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate, Object::ShiftLeft(isolate, lhs, rhs));
}
@@ -86,10 +68,7 @@ RUNTIME_FUNCTION(Runtime_ShiftRight) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- Object::ShiftRight(isolate, lhs, rhs));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate, Object::ShiftRight(isolate, lhs, rhs));
}
@@ -98,10 +77,8 @@ RUNTIME_FUNCTION(Runtime_ShiftRightLogical) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Object::ShiftRightLogical(isolate, lhs, rhs));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate,
+ Object::ShiftRightLogical(isolate, lhs, rhs));
}
@@ -110,10 +87,7 @@ RUNTIME_FUNCTION(Runtime_BitwiseAnd) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- Object::BitwiseAnd(isolate, lhs, rhs));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate, Object::BitwiseAnd(isolate, lhs, rhs));
}
@@ -122,10 +96,7 @@ RUNTIME_FUNCTION(Runtime_BitwiseOr) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- Object::BitwiseOr(isolate, lhs, rhs));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate, Object::BitwiseOr(isolate, lhs, rhs));
}
@@ -134,10 +105,7 @@ RUNTIME_FUNCTION(Runtime_BitwiseXor) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- Object::BitwiseXor(isolate, lhs, rhs));
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate, Object::BitwiseXor(isolate, lhs, rhs));
}
RUNTIME_FUNCTION(Runtime_Equal) {
@@ -216,5 +184,14 @@ RUNTIME_FUNCTION(Runtime_GreaterThanOrEqual) {
return isolate->heap()->ToBoolean(result.FromJust());
}
+RUNTIME_FUNCTION(Runtime_InstanceOf) {
+ HandleScope shs(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, callable, 1);
+ RETURN_RESULT_OR_FAILURE(isolate,
+ Object::InstanceOf(isolate, object, callable));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-proxy.cc b/deps/v8/src/runtime/runtime-proxy.cc
index 7764d25c58..87c7c9112b 100644
--- a/deps/v8/src/runtime/runtime-proxy.cc
+++ b/deps/v8/src/runtime/runtime-proxy.cc
@@ -40,17 +40,15 @@ RUNTIME_FUNCTION(Runtime_JSProxyCall) {
Object::GetMethod(Handle<JSReceiver>::cast(handler), trap_name));
// 6. If trap is undefined, then
int const arguments_length = args.length() - 2;
- if (trap->IsUndefined()) {
+ if (trap->IsUndefined(isolate)) {
// 6.a. Return Call(target, thisArgument, argumentsList).
ScopedVector<Handle<Object>> argv(arguments_length);
for (int i = 0; i < arguments_length; ++i) {
argv[i] = args.at<Object>(i + 1);
}
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Execution::Call(isolate, target, receiver,
- arguments_length, argv.start()));
- return *result;
+ RETURN_RESULT_OR_FAILURE(
+ isolate, Execution::Call(isolate, target, receiver, arguments_length,
+ argv.start()));
}
// 7. Let argArray be CreateArrayFromList(argumentsList).
Handle<JSArray> arg_array = isolate->factory()->NewJSArray(
@@ -63,12 +61,10 @@ RUNTIME_FUNCTION(Runtime_JSProxyCall) {
}
}
// 8. Return Call(trap, handler, «target, thisArgument, argArray»).
- Handle<Object> trap_result;
Handle<Object> trap_args[] = {target, receiver, arg_array};
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, trap_result,
+ RETURN_RESULT_OR_FAILURE(
+ isolate,
Execution::Call(isolate, trap, handler, arraysize(trap_args), trap_args));
- return *trap_result;
}
@@ -98,7 +94,7 @@ RUNTIME_FUNCTION(Runtime_JSProxyConstruct) {
Object::GetMethod(Handle<JSReceiver>::cast(handler), trap_name));
// 6. If trap is undefined, then
int const arguments_length = args.length() - 3;
- if (trap->IsUndefined()) {
+ if (trap->IsUndefined(isolate)) {
// 6.a. Assert: target has a [[Construct]] internal method.
DCHECK(target->IsConstructor());
// 6.b. Return Construct(target, argumentsList, newTarget).
@@ -106,11 +102,9 @@ RUNTIME_FUNCTION(Runtime_JSProxyConstruct) {
for (int i = 0; i < arguments_length; ++i) {
argv[i] = args.at<Object>(i + 1);
}
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Execution::New(isolate, target, new_target,
- arguments_length, argv.start()));
- return *result;
+ RETURN_RESULT_OR_FAILURE(
+ isolate, Execution::New(isolate, target, new_target, arguments_length,
+ argv.start()));
}
// 7. Let argArray be CreateArrayFromList(argumentsList).
Handle<JSArray> arg_array = isolate->factory()->NewJSArray(
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index aead0171ce..b36e5e66cb 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -386,11 +386,10 @@ void FindStringIndicesDispatch(Isolate* isolate, String* subject,
}
}
-
template <typename ResultSeqString>
MUST_USE_RESULT static Object* StringReplaceGlobalAtomRegExpWithString(
Isolate* isolate, Handle<String> subject, Handle<JSRegExp> pattern_regexp,
- Handle<String> replacement, Handle<JSArray> last_match_info) {
+ Handle<String> replacement, Handle<JSObject> last_match_info) {
DCHECK(subject->IsFlat());
DCHECK(replacement->IsFlat());
@@ -465,10 +464,9 @@ MUST_USE_RESULT static Object* StringReplaceGlobalAtomRegExpWithString(
return *result;
}
-
MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithString(
Isolate* isolate, Handle<String> subject, Handle<JSRegExp> regexp,
- Handle<String> replacement, Handle<JSArray> last_match_info) {
+ Handle<String> replacement, Handle<JSObject> last_match_info) {
DCHECK(subject->IsFlat());
DCHECK(replacement->IsFlat());
@@ -544,16 +542,13 @@ MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithString(
RegExpImpl::SetLastMatchInfo(last_match_info, subject, capture_count,
global_cache.LastSuccessfulMatch());
- Handle<String> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, builder.ToString());
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate, builder.ToString());
}
-
template <typename ResultSeqString>
MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithEmptyString(
Isolate* isolate, Handle<String> subject, Handle<JSRegExp> regexp,
- Handle<JSArray> last_match_info) {
+ Handle<JSObject> last_match_info) {
DCHECK(subject->IsFlat());
// Shortcut for simple non-regexp global replacements
@@ -656,10 +651,10 @@ RUNTIME_FUNCTION(Runtime_StringReplaceGlobalRegExpWithString) {
CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
CONVERT_ARG_HANDLE_CHECKED(String, replacement, 2);
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, last_match_info, 3);
- RUNTIME_ASSERT(regexp->GetFlags() & JSRegExp::kGlobal);
- RUNTIME_ASSERT(last_match_info->HasFastObjectElements());
+ CHECK(regexp->GetFlags() & JSRegExp::kGlobal);
+ CHECK(last_match_info->HasFastObjectElements());
subject = String::Flatten(subject);
@@ -686,11 +681,11 @@ RUNTIME_FUNCTION(Runtime_StringSplit) {
CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[2]);
- RUNTIME_ASSERT(limit > 0);
+ CHECK(limit > 0);
int subject_length = subject->length();
int pattern_length = pattern->length();
- RUNTIME_ASSERT(pattern_length > 0);
+ CHECK(pattern_length > 0);
if (limit == 0xffffffffu) {
FixedArray* last_match_cache_unused;
@@ -773,17 +768,14 @@ RUNTIME_FUNCTION(Runtime_RegExpExec) {
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
CONVERT_INT32_ARG_CHECKED(index, 2);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, last_match_info, 3);
// Due to the way the JS calls are constructed this must be less than the
// length of a string, i.e. it is always a Smi. We check anyway for security.
- RUNTIME_ASSERT(index >= 0);
- RUNTIME_ASSERT(index <= subject->length());
+ CHECK(index >= 0);
+ CHECK(index <= subject->length());
isolate->counters()->regexp_entry_runtime()->Increment();
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- RegExpImpl::Exec(regexp, subject, index, last_match_info));
- return *result;
+ RETURN_RESULT_OR_FAILURE(
+ isolate, RegExpImpl::Exec(regexp, subject, index, last_match_info));
}
@@ -807,7 +799,7 @@ RUNTIME_FUNCTION(Runtime_RegExpConstructResult) {
HandleScope handle_scope(isolate);
DCHECK(args.length() == 3);
CONVERT_SMI_ARG_CHECKED(size, 0);
- RUNTIME_ASSERT(size >= 0 && size <= FixedArray::kMaxLength);
+ CHECK(size >= 0 && size <= FixedArray::kMaxLength);
CONVERT_ARG_HANDLE_CHECKED(Object, index, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, input, 2);
Handle<FixedArray> elements = isolate->factory()->NewFixedArray(size);
@@ -843,7 +835,7 @@ RUNTIME_FUNCTION(Runtime_RegExpInitializeAndCompile) {
template <bool has_capture>
static Object* SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
Handle<JSRegExp> regexp,
- Handle<JSArray> last_match_array,
+ Handle<JSObject> last_match_array,
Handle<JSArray> result_array) {
DCHECK(subject->IsFlat());
DCHECK_NE(has_capture, regexp->CaptureCount() == 0);
@@ -993,13 +985,13 @@ RUNTIME_FUNCTION(Runtime_RegExpExecMultiple) {
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, last_match_info, 2);
CONVERT_ARG_HANDLE_CHECKED(JSArray, result_array, 3);
- RUNTIME_ASSERT(last_match_info->HasFastObjectElements());
- RUNTIME_ASSERT(result_array->HasFastObjectElements());
+ CHECK(last_match_info->HasFastObjectElements());
+ CHECK(result_array->HasFastObjectElements());
subject = String::Flatten(subject);
- RUNTIME_ASSERT(regexp->GetFlags() & JSRegExp::kGlobal);
+ CHECK(regexp->GetFlags() & JSRegExp::kGlobal);
if (regexp->CaptureCount() == 0) {
return SearchRegExpMultiple<false>(isolate, subject, regexp,
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index 5f3cdf2682..26bfb29d93 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -4,9 +4,10 @@
#include "src/runtime/runtime-utils.h"
+#include <memory>
+
#include "src/accessors.h"
#include "src/arguments.h"
-#include "src/ast/scopeinfo.h"
#include "src/ast/scopes.h"
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
@@ -16,43 +17,56 @@
namespace v8 {
namespace internal {
-static Object* ThrowRedeclarationError(Isolate* isolate, Handle<String> name) {
+RUNTIME_FUNCTION(Runtime_ThrowConstAssignError) {
HandleScope scope(isolate);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kVarRedeclaration, name));
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate,
+ NewTypeError(MessageTemplate::kConstAssign));
}
+namespace {
-RUNTIME_FUNCTION(Runtime_ThrowConstAssignError) {
+enum class RedeclarationType { kSyntaxError = 0, kTypeError = 1 };
+
+Object* ThrowRedeclarationError(Isolate* isolate, Handle<String> name,
+ RedeclarationType redeclaration_type) {
HandleScope scope(isolate);
- THROW_NEW_ERROR_RETURN_FAILURE(isolate,
- NewTypeError(MessageTemplate::kConstAssign));
+ if (redeclaration_type == RedeclarationType::kSyntaxError) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewSyntaxError(MessageTemplate::kVarRedeclaration, name));
+ } else {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kVarRedeclaration, name));
+ }
}
// May throw a RedeclarationError.
-static Object* DeclareGlobals(Isolate* isolate, Handle<JSGlobalObject> global,
- Handle<String> name, Handle<Object> value,
- PropertyAttributes attr, bool is_var,
- bool is_const, bool is_function) {
+Object* DeclareGlobal(
+ Isolate* isolate, Handle<JSGlobalObject> global, Handle<String> name,
+ Handle<Object> value, PropertyAttributes attr, bool is_var,
+ bool is_function, RedeclarationType redeclaration_type,
+ Handle<TypeFeedbackVector> feedback_vector = Handle<TypeFeedbackVector>(),
+ FeedbackVectorSlot slot = FeedbackVectorSlot::Invalid()) {
Handle<ScriptContextTable> script_contexts(
global->native_context()->script_context_table());
ScriptContextTable::LookupResult lookup;
if (ScriptContextTable::Lookup(script_contexts, name, &lookup) &&
IsLexicalVariableMode(lookup.mode)) {
- return ThrowRedeclarationError(isolate, name);
+ // ES#sec-globaldeclarationinstantiation 6.a:
+ // If envRec.HasLexicalDeclaration(name) is true, throw a SyntaxError
+ // exception.
+ return ThrowRedeclarationError(isolate, name,
+ RedeclarationType::kSyntaxError);
}
// Do the lookup own properties only, see ES5 erratum.
- LookupIterator it(global, name, global,
- LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
+ LookupIterator it(global, name, global, LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
if (!maybe.IsJust()) return isolate->heap()->exception();
if (it.IsFound()) {
PropertyAttributes old_attributes = maybe.FromJust();
// The name was declared before; check for conflicting re-declarations.
- if (is_const) return ThrowRedeclarationError(isolate, name);
// Skip var re-declarations.
if (is_var) return isolate->heap()->undefined_value();
@@ -69,7 +83,11 @@ static Object* DeclareGlobals(Isolate* isolate, Handle<JSGlobalObject> global,
if (old_details.IsReadOnly() || old_details.IsDontEnum() ||
(it.state() == LookupIterator::ACCESSOR &&
it.GetAccessors()->IsAccessorPair())) {
- return ThrowRedeclarationError(isolate, name);
+ // ES#sec-globaldeclarationinstantiation 5.d:
+ // If hasRestrictedGlobal is true, throw a SyntaxError exception.
+ // ES#sec-evaldeclarationinstantiation 8.a.iv.1.b:
+ // If fnDefinable is false, throw a TypeError exception.
+ return ThrowRedeclarationError(isolate, name, redeclaration_type);
}
// If the existing property is not configurable, keep its attributes. Do
attr = old_attributes;
@@ -88,33 +106,35 @@ static Object* DeclareGlobals(Isolate* isolate, Handle<JSGlobalObject> global,
RETURN_FAILURE_ON_EXCEPTION(
isolate, JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, attr));
+ if (!feedback_vector.is_null()) {
+ DCHECK_EQ(*global, *it.GetHolder<Object>());
+ // Preinitialize the feedback slot if the global object does not have
+ // named interceptor or the interceptor is not masking.
+ if (!global->HasNamedInterceptor() ||
+ global->GetNamedInterceptor()->non_masking()) {
+ LoadGlobalICNexus nexus(feedback_vector, slot);
+ nexus.ConfigurePropertyCellMode(it.GetPropertyCell());
+ }
+ }
return isolate->heap()->undefined_value();
}
-
-RUNTIME_FUNCTION(Runtime_DeclareGlobals) {
+Object* DeclareGlobals(Isolate* isolate, Handle<FixedArray> pairs, int flags,
+ Handle<TypeFeedbackVector> feedback_vector) {
HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
Handle<JSGlobalObject> global(isolate->global_object());
Handle<Context> context(isolate->context());
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, pairs, 0);
- CONVERT_SMI_ARG_CHECKED(flags, 1);
-
// Traverse the name/value pairs and set the properties.
int length = pairs->length();
FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < length, i += 2, {
- Handle<String> name(String::cast(pairs->get(i)));
+ FeedbackVectorSlot slot(Smi::cast(pairs->get(i))->value());
+ Handle<String> name(feedback_vector->GetName(slot), isolate);
Handle<Object> initial_value(pairs->get(i + 1), isolate);
- // We have to declare a global const property. To capture we only
- // assign to it when evaluating the assignment for "const x =
- // <expr>" the initial value is the hole.
- bool is_var = initial_value->IsUndefined();
- bool is_const = initial_value->IsTheHole();
+ bool is_var = initial_value->IsUndefined(isolate);
bool is_function = initial_value->IsSharedFunctionInfo();
- DCHECK_EQ(1,
- BoolToInt(is_var) + BoolToInt(is_const) + BoolToInt(is_function));
+ DCHECK_EQ(1, BoolToInt(is_var) + BoolToInt(is_function));
Handle<Object> value;
if (is_function) {
@@ -134,154 +154,126 @@ RUNTIME_FUNCTION(Runtime_DeclareGlobals) {
bool is_native = DeclareGlobalsNativeFlag::decode(flags);
bool is_eval = DeclareGlobalsEvalFlag::decode(flags);
int attr = NONE;
- if (is_const) attr |= READ_ONLY;
if (is_function && is_native) attr |= READ_ONLY;
- if (!is_const && !is_eval) attr |= DONT_DELETE;
-
- Object* result = DeclareGlobals(isolate, global, name, value,
- static_cast<PropertyAttributes>(attr),
- is_var, is_const, is_function);
+ if (!is_eval) attr |= DONT_DELETE;
+
+ // ES#sec-globaldeclarationinstantiation 5.d:
+ // If hasRestrictedGlobal is true, throw a SyntaxError exception.
+ Object* result = DeclareGlobal(
+ isolate, global, name, value, static_cast<PropertyAttributes>(attr),
+ is_var, is_function, RedeclarationType::kSyntaxError, feedback_vector,
+ slot);
if (isolate->has_pending_exception()) return result;
});
return isolate->heap()->undefined_value();
}
+} // namespace
-RUNTIME_FUNCTION(Runtime_InitializeVarGlobal) {
+RUNTIME_FUNCTION(Runtime_DeclareGlobals) {
HandleScope scope(isolate);
- // args[0] == name
- // args[1] == language_mode
- // args[2] == value (optional)
-
- // Determine if we need to assign to the variable if it already
- // exists (based on the number of arguments).
- RUNTIME_ASSERT(args.length() == 3);
+ DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ CONVERT_ARG_HANDLE_CHECKED(FixedArray, pairs, 0);
+ CONVERT_SMI_ARG_CHECKED(flags, 1);
+ CONVERT_ARG_HANDLE_CHECKED(TypeFeedbackVector, feedback_vector, 2);
- Handle<JSGlobalObject> global(isolate->context()->global_object());
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Object::SetProperty(global, name, value, language_mode));
- return *result;
+ return DeclareGlobals(isolate, pairs, flags, feedback_vector);
}
+// TODO(ishell): merge this with Runtime::kDeclareGlobals once interpreter
+// is able to pass feedback vector.
+RUNTIME_FUNCTION(Runtime_DeclareGlobalsForInterpreter) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
-RUNTIME_FUNCTION(Runtime_InitializeConstGlobal) {
- HandleScope handle_scope(isolate);
- // All constants are declared with an initial value. The name
- // of the constant is the first argument and the initial value
- // is the second.
- RUNTIME_ASSERT(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
-
- Handle<JSGlobalObject> global = isolate->global_object();
-
- // Lookup the property as own on the global object.
- LookupIterator it(global, name, global,
- LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
- Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
- DCHECK(maybe.IsJust());
- PropertyAttributes old_attributes = maybe.FromJust();
+ CONVERT_ARG_HANDLE_CHECKED(FixedArray, pairs, 0);
+ CONVERT_SMI_ARG_CHECKED(flags, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 2);
- PropertyAttributes attr =
- static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
- // Set the value if the property is either missing, or the property attributes
- // allow setting the value without invoking an accessor.
- if (it.IsFound()) {
- // Ignore if we can't reconfigure the value.
- if ((old_attributes & DONT_DELETE) != 0) {
- if ((old_attributes & READ_ONLY) != 0 ||
- it.state() == LookupIterator::ACCESSOR) {
- return *value;
- }
- attr = static_cast<PropertyAttributes>(old_attributes | READ_ONLY);
- }
- }
+ Handle<TypeFeedbackVector> feedback_vector(closure->feedback_vector(),
+ isolate);
+ return DeclareGlobals(isolate, pairs, flags, feedback_vector);
+}
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, attr));
+RUNTIME_FUNCTION(Runtime_InitializeVarGlobal) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+ CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- return *value;
+ Handle<JSGlobalObject> global(isolate->global_object());
+ RETURN_RESULT_OR_FAILURE(
+ isolate, Object::SetProperty(global, name, value, language_mode));
}
-
namespace {
-Object* DeclareLookupSlot(Isolate* isolate, Handle<String> name,
- Handle<Object> initial_value,
- PropertyAttributes attr) {
- // Declarations are always made in a function, eval or script context, or
- // a declaration block scope.
- // In the case of eval code, the context passed is the context of the caller,
- // which may be some nested context and not the declaration context.
+Object* DeclareEvalHelper(Isolate* isolate, Handle<String> name,
+ Handle<Object> value) {
+ // Declarations are always made in a function, native, or script context, or
+ // a declaration block scope. Since this is called from eval, the context
+ // passed is the context of the caller, which may be some nested context and
+ // not the declaration context.
Handle<Context> context_arg(isolate->context(), isolate);
Handle<Context> context(context_arg->declaration_context(), isolate);
- // TODO(verwaest): Unify the encoding indicating "var" with DeclareGlobals.
- bool is_var = *initial_value == NULL;
- bool is_const = initial_value->IsTheHole();
- bool is_function = initial_value->IsJSFunction();
- DCHECK_EQ(1,
- BoolToInt(is_var) + BoolToInt(is_const) + BoolToInt(is_function));
+ DCHECK(context->IsFunctionContext() || context->IsNativeContext() ||
+ context->IsScriptContext() ||
+ (context->IsBlockContext() && context->has_extension()));
+
+ bool is_function = value->IsJSFunction();
+ bool is_var = !is_function;
+ DCHECK(!is_var || value->IsUndefined(isolate));
int index;
PropertyAttributes attributes;
- BindingFlags binding_flags;
-
- if ((attr & EVAL_DECLARED) != 0) {
- // Check for a conflict with a lexically scoped variable
- context_arg->Lookup(name, LEXICAL_TEST, &index, &attributes,
- &binding_flags);
- if (attributes != ABSENT &&
- (binding_flags == MUTABLE_CHECK_INITIALIZED ||
- binding_flags == IMMUTABLE_CHECK_INITIALIZED ||
- binding_flags == IMMUTABLE_CHECK_INITIALIZED_HARMONY)) {
- return ThrowRedeclarationError(isolate, name);
- }
- attr = static_cast<PropertyAttributes>(attr & ~EVAL_DECLARED);
+ InitializationFlag init_flag;
+ VariableMode mode;
+
+ // Check for a conflict with a lexically scoped variable
+ context_arg->Lookup(name, LEXICAL_TEST, &index, &attributes, &init_flag,
+ &mode);
+ if (attributes != ABSENT && IsLexicalVariableMode(mode)) {
+ // ES#sec-evaldeclarationinstantiation 5.a.i.1:
+ // If varEnvRec.HasLexicalDeclaration(name) is true, throw a SyntaxError
+ // exception.
+ // ES#sec-evaldeclarationinstantiation 5.d.ii.2.a.i:
+ // Throw a SyntaxError exception.
+ return ThrowRedeclarationError(isolate, name,
+ RedeclarationType::kSyntaxError);
}
Handle<Object> holder = context->Lookup(name, DONT_FOLLOW_CHAINS, &index,
- &attributes, &binding_flags);
- if (holder.is_null()) {
- // In case of JSProxy, an exception might have been thrown.
- if (isolate->has_pending_exception()) return isolate->heap()->exception();
- }
+ &attributes, &init_flag, &mode);
+ DCHECK(!isolate->has_pending_exception());
Handle<JSObject> object;
- Handle<Object> value =
- is_function ? initial_value
- : Handle<Object>::cast(isolate->factory()->undefined_value());
- // TODO(verwaest): This case should probably not be covered by this function,
- // but by DeclareGlobals instead.
if (attributes != ABSENT && holder->IsJSGlobalObject()) {
- return DeclareGlobals(isolate, Handle<JSGlobalObject>::cast(holder), name,
- value, attr, is_var, is_const, is_function);
+ // ES#sec-evaldeclarationinstantiation 8.a.iv.1.b:
+ // If fnDefinable is false, throw a TypeError exception.
+ return DeclareGlobal(isolate, Handle<JSGlobalObject>::cast(holder), name,
+ value, NONE, is_var, is_function,
+ RedeclarationType::kTypeError);
}
if (context_arg->extension()->IsJSGlobalObject()) {
Handle<JSGlobalObject> global(
JSGlobalObject::cast(context_arg->extension()), isolate);
- return DeclareGlobals(isolate, global, name, value, attr, is_var, is_const,
- is_function);
+ return DeclareGlobal(isolate, global, name, value, NONE, is_var,
+ is_function, RedeclarationType::kTypeError);
} else if (context->IsScriptContext()) {
DCHECK(context->global_object()->IsJSGlobalObject());
Handle<JSGlobalObject> global(
JSGlobalObject::cast(context->global_object()), isolate);
- return DeclareGlobals(isolate, global, name, value, attr, is_var, is_const,
- is_function);
+ return DeclareGlobal(isolate, global, name, value, NONE, is_var,
+ is_function, RedeclarationType::kTypeError);
}
if (attributes != ABSENT) {
- // The name was declared before; check for conflicting re-declarations.
- if (is_const || (attributes & READ_ONLY) != 0) {
- return ThrowRedeclarationError(isolate, name);
- }
+ DCHECK_EQ(NONE, attributes);
// Skip var re-declarations.
if (is_var) return isolate->heap()->undefined_value();
@@ -289,7 +281,7 @@ Object* DeclareLookupSlot(Isolate* isolate, Handle<String> name,
DCHECK(is_function);
if (index != Context::kNotFound) {
DCHECK(holder.is_identical_to(context));
- context->set(index, *initial_value);
+ context->set(index, *value);
return isolate->heap()->undefined_value();
}
@@ -318,113 +310,35 @@ Object* DeclareLookupSlot(Isolate* isolate, Handle<String> name,
}
RETURN_FAILURE_ON_EXCEPTION(isolate, JSObject::SetOwnPropertyIgnoreAttributes(
- object, name, value, attr));
+ object, name, value, NONE));
return isolate->heap()->undefined_value();
}
} // namespace
-
-RUNTIME_FUNCTION(Runtime_DeclareLookupSlot) {
+RUNTIME_FUNCTION(Runtime_DeclareEvalFunction) {
HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, initial_value, 1);
- CONVERT_ARG_HANDLE_CHECKED(Smi, property_attributes, 2);
-
- PropertyAttributes attributes =
- static_cast<PropertyAttributes>(property_attributes->value());
- return DeclareLookupSlot(isolate, name, initial_value, attributes);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+ return DeclareEvalHelper(isolate, name, value);
}
-
-RUNTIME_FUNCTION(Runtime_InitializeLegacyConstLookupSlot) {
+RUNTIME_FUNCTION(Runtime_DeclareEvalVar) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
-
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
- DCHECK(!value->IsTheHole());
- // Initializations are always done in a function or native context.
- CONVERT_ARG_HANDLE_CHECKED(Context, context_arg, 1);
- Handle<Context> context(context_arg->declaration_context());
- CONVERT_ARG_HANDLE_CHECKED(String, name, 2);
-
- int index;
- PropertyAttributes attributes;
- ContextLookupFlags flags = DONT_FOLLOW_CHAINS;
- BindingFlags binding_flags;
- Handle<Object> holder =
- context->Lookup(name, flags, &index, &attributes, &binding_flags);
- if (holder.is_null()) {
- // In case of JSProxy, an exception might have been thrown.
- if (isolate->has_pending_exception()) return isolate->heap()->exception();
- }
-
- if (index != Context::kNotFound) {
- DCHECK(holder->IsContext());
- // Property was found in a context. Perform the assignment if the constant
- // was uninitialized.
- Handle<Context> context = Handle<Context>::cast(holder);
- DCHECK((attributes & READ_ONLY) != 0);
- if (context->get(index)->IsTheHole()) context->set(index, *value);
- return *value;
- }
-
- PropertyAttributes attr =
- static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
-
- // Strict mode handling not needed (legacy const is disallowed in strict
- // mode).
-
- // The declared const was configurable, and may have been deleted in the
- // meanwhile. If so, re-introduce the variable in the context extension.
- if (attributes == ABSENT) {
- Handle<Context> declaration_context(context_arg->declaration_context());
- if (declaration_context->IsScriptContext()) {
- holder = handle(declaration_context->global_object(), isolate);
- } else {
- holder = handle(declaration_context->extension_object(), isolate);
- DCHECK(!holder.is_null());
- }
- CHECK(holder->IsJSObject());
- } else {
- // For JSContextExtensionObjects, the initializer can be run multiple times
- // if in a for loop: for (var i = 0; i < 2; i++) { const x = i; }. Only the
- // first assignment should go through. For JSGlobalObjects, additionally any
- // code can run in between that modifies the declared property.
- DCHECK(holder->IsJSGlobalObject() || holder->IsJSContextExtensionObject());
-
- LookupIterator it(holder, name, Handle<JSReceiver>::cast(holder),
- LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
- Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
- if (!maybe.IsJust()) return isolate->heap()->exception();
- PropertyAttributes old_attributes = maybe.FromJust();
-
- // Ignore if we can't reconfigure the value.
- if ((old_attributes & DONT_DELETE) != 0) {
- if ((old_attributes & READ_ONLY) != 0 ||
- it.state() == LookupIterator::ACCESSOR) {
- return *value;
- }
- attr = static_cast<PropertyAttributes>(old_attributes | READ_ONLY);
- }
- }
-
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, JSObject::SetOwnPropertyIgnoreAttributes(
- Handle<JSObject>::cast(holder), name, value, attr));
-
- return *value;
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+ return DeclareEvalHelper(isolate, name,
+ isolate->factory()->undefined_value());
}
-
namespace {
// Find the arguments of the JavaScript function invocation that called
// into C++ code. Collect these in a newly allocated array of handles.
-base::SmartArrayPointer<Handle<Object>> GetCallerArguments(Isolate* isolate,
- int* total_argc) {
+std::unique_ptr<Handle<Object>[]> GetCallerArguments(Isolate* isolate,
+ int* total_argc) {
// Find frame containing arguments passed to the caller.
JavaScriptFrameIterator it(isolate);
JavaScriptFrame* frame = it.frame();
@@ -449,7 +363,7 @@ base::SmartArrayPointer<Handle<Object>> GetCallerArguments(Isolate* isolate,
argument_count--;
*total_argc = argument_count;
- base::SmartArrayPointer<Handle<Object>> param_data(
+ std::unique_ptr<Handle<Object>[]> param_data(
NewArray<Handle<Object>>(*total_argc));
bool should_deoptimize = false;
for (int i = 0; i < argument_count; i++) {
@@ -470,7 +384,7 @@ base::SmartArrayPointer<Handle<Object>> GetCallerArguments(Isolate* isolate,
int args_count = frame->ComputeParametersCount();
*total_argc = args_count;
- base::SmartArrayPointer<Handle<Object>> param_data(
+ std::unique_ptr<Handle<Object>[]> param_data(
NewArray<Handle<Object>>(*total_argc));
for (int i = 0; i < args_count; i++) {
Handle<Object> val = Handle<Object>(frame->GetParameter(i), isolate);
@@ -480,7 +394,6 @@ base::SmartArrayPointer<Handle<Object>> GetCallerArguments(Isolate* isolate,
}
}
-
template <typename T>
Handle<JSObject> NewSloppyArguments(Isolate* isolate, Handle<JSFunction> callee,
T parameters, int argument_count) {
@@ -599,7 +512,7 @@ RUNTIME_FUNCTION(Runtime_NewSloppyArguments_Generic) {
// This generic runtime function can also be used when the caller has been
// inlined, we use the slow but accurate {GetCallerArguments}.
int argument_count = 0;
- base::SmartArrayPointer<Handle<Object>> arguments =
+ std::unique_ptr<Handle<Object>[]> arguments =
GetCallerArguments(isolate, &argument_count);
HandleArguments argument_getter(arguments.get());
return *NewSloppyArguments(isolate, callee, argument_getter, argument_count);
@@ -613,7 +526,7 @@ RUNTIME_FUNCTION(Runtime_NewStrictArguments) {
// This generic runtime function can also be used when the caller has been
// inlined, we use the slow but accurate {GetCallerArguments}.
int argument_count = 0;
- base::SmartArrayPointer<Handle<Object>> arguments =
+ std::unique_ptr<Handle<Object>[]> arguments =
GetCallerArguments(isolate, &argument_count);
Handle<JSObject> result =
isolate->factory()->NewArgumentsObject(callee, argument_count);
@@ -639,7 +552,7 @@ RUNTIME_FUNCTION(Runtime_NewRestParameter) {
// This generic runtime function can also be used when the caller has been
// inlined, we use the slow but accurate {GetCallerArguments}.
int argument_count = 0;
- base::SmartArrayPointer<Handle<Object>> arguments =
+ std::unique_ptr<Handle<Object>[]> arguments =
GetCallerArguments(isolate, &argument_count);
int num_elements = std::max(0, argument_count - start_index);
Handle<JSObject> result =
@@ -663,12 +576,6 @@ RUNTIME_FUNCTION(Runtime_NewSloppyArguments) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0);
Object** parameters = reinterpret_cast<Object**>(args[1]);
CONVERT_SMI_ARG_CHECKED(argument_count, 2);
-#ifdef DEBUG
- // This runtime function does not materialize the correct arguments when the
- // caller has been inlined, better make sure we are not hitting that case.
- JavaScriptFrameIterator it(isolate);
- DCHECK(!it.frame()->HasInlinedFrames());
-#endif // DEBUG
ParameterArguments argument_getter(parameters);
return *NewSloppyArguments(isolate, callee, argument_getter, argument_count);
}
@@ -705,17 +612,27 @@ static Object* FindNameClash(Handle<ScopeInfo> scope_info,
ScriptContextTable::LookupResult lookup;
if (ScriptContextTable::Lookup(script_context, name, &lookup)) {
if (IsLexicalVariableMode(mode) || IsLexicalVariableMode(lookup.mode)) {
- return ThrowRedeclarationError(isolate, name);
+ // ES#sec-globaldeclarationinstantiation 5.b:
+ // If envRec.HasLexicalDeclaration(name) is true, throw a SyntaxError
+ // exception.
+ return ThrowRedeclarationError(isolate, name,
+ RedeclarationType::kSyntaxError);
}
}
if (IsLexicalVariableMode(mode)) {
LookupIterator it(global_object, name, global_object,
- LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
if (!maybe.IsJust()) return isolate->heap()->exception();
if ((maybe.FromJust() & DONT_DELETE) != 0) {
- return ThrowRedeclarationError(isolate, name);
+ // ES#sec-globaldeclarationinstantiation 5.a:
+ // If envRec.HasVarDeclaration(name) is true, throw a SyntaxError
+ // exception.
+ // ES#sec-globaldeclarationinstantiation 5.d:
+ // If hasRestrictedGlobal is true, throw a SyntaxError exception.
+ return ThrowRedeclarationError(isolate, name,
+ RedeclarationType::kSyntaxError);
}
JSGlobalObject::InvalidatePropertyCell(global_object, name);
@@ -812,100 +729,6 @@ RUNTIME_FUNCTION(Runtime_PushBlockContext) {
}
-RUNTIME_FUNCTION(Runtime_IsJSModule) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(Object, obj, 0);
- return isolate->heap()->ToBoolean(obj->IsJSModule());
-}
-
-
-RUNTIME_FUNCTION(Runtime_PushModuleContext) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
- CONVERT_SMI_ARG_CHECKED(index, 0);
-
- if (!args[1]->IsScopeInfo()) {
- // Module already initialized. Find hosting context and retrieve context.
- Context* host = Context::cast(isolate->context())->script_context();
- Context* context = Context::cast(host->get(index));
- DCHECK(context->previous() == isolate->context());
- isolate->set_context(context);
- return context;
- }
-
- CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 1);
-
- // Allocate module context.
- HandleScope scope(isolate);
- Factory* factory = isolate->factory();
- Handle<Context> context = factory->NewModuleContext(scope_info);
- Handle<JSModule> module = factory->NewJSModule(context, scope_info);
- context->set_module(*module);
- Context* previous = isolate->context();
- context->set_previous(previous);
- context->set_closure(previous->closure());
- context->set_native_context(previous->native_context());
- isolate->set_context(*context);
-
- // Find hosting scope and initialize internal variable holding module there.
- previous->script_context()->set(index, *context);
-
- return *context;
-}
-
-
-RUNTIME_FUNCTION(Runtime_DeclareModules) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, descriptions, 0);
- Context* host_context = isolate->context();
-
- for (int i = 0; i < descriptions->length(); ++i) {
- Handle<ModuleInfo> description(ModuleInfo::cast(descriptions->get(i)));
- int host_index = description->host_index();
- Handle<Context> context(Context::cast(host_context->get(host_index)));
- Handle<JSModule> module(context->module());
-
- for (int j = 0; j < description->length(); ++j) {
- Handle<String> name(description->name(j));
- VariableMode mode = description->mode(j);
- int index = description->index(j);
- switch (mode) {
- case VAR:
- case LET:
- case CONST:
- case CONST_LEGACY:
- case IMPORT: {
- PropertyAttributes attr =
- IsImmutableVariableMode(mode) ? FROZEN : SEALED;
- Handle<AccessorInfo> info =
- Accessors::MakeModuleExport(name, index, attr);
- Handle<Object> result =
- JSObject::SetAccessor(module, info).ToHandleChecked();
- DCHECK(!result->IsUndefined());
- USE(result);
- break;
- }
- case TEMPORARY:
- case DYNAMIC:
- case DYNAMIC_GLOBAL:
- case DYNAMIC_LOCAL:
- UNREACHABLE();
- }
- }
-
- if (JSObject::PreventExtensions(module, Object::THROW_ON_ERROR)
- .IsNothing()) {
- DCHECK(false);
- }
- }
-
- DCHECK(!isolate->has_pending_exception());
- return isolate->heap()->undefined_value();
-}
-
-
RUNTIME_FUNCTION(Runtime_DeleteLookupSlot) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -913,9 +736,10 @@ RUNTIME_FUNCTION(Runtime_DeleteLookupSlot) {
int index;
PropertyAttributes attributes;
- BindingFlags flags;
+ InitializationFlag flag;
+ VariableMode mode;
Handle<Object> holder = isolate->context()->Lookup(
- name, FOLLOW_CHAINS, &index, &attributes, &flags);
+ name, FOLLOW_CHAINS, &index, &attributes, &flag, &mode);
// If the slot was not found the result is true.
if (holder.is_null()) {
@@ -948,9 +772,10 @@ MaybeHandle<Object> LoadLookupSlot(Handle<String> name,
int index;
PropertyAttributes attributes;
- BindingFlags flags;
+ InitializationFlag flag;
+ VariableMode mode;
Handle<Object> holder = isolate->context()->Lookup(
- name, FOLLOW_CHAINS, &index, &attributes, &flags);
+ name, FOLLOW_CHAINS, &index, &attributes, &flag, &mode);
if (isolate->has_pending_exception()) return MaybeHandle<Object>();
if (index != Context::kNotFound) {
@@ -960,31 +785,14 @@ MaybeHandle<Object> LoadLookupSlot(Handle<String> name,
Handle<Object> receiver = isolate->factory()->undefined_value();
Handle<Object> value = handle(Context::cast(*holder)->get(index), isolate);
// Check for uninitialized bindings.
- switch (flags) {
- case MUTABLE_CHECK_INITIALIZED:
- case IMMUTABLE_CHECK_INITIALIZED_HARMONY:
- if (value->IsTheHole()) {
- THROW_NEW_ERROR(isolate,
- NewReferenceError(MessageTemplate::kNotDefined, name),
- Object);
- }
- // FALLTHROUGH
- case IMMUTABLE_CHECK_INITIALIZED:
- if (value->IsTheHole()) {
- DCHECK(attributes & READ_ONLY);
- value = isolate->factory()->undefined_value();
- }
- // FALLTHROUGH
- case MUTABLE_IS_INITIALIZED:
- case IMMUTABLE_IS_INITIALIZED:
- case IMMUTABLE_IS_INITIALIZED_HARMONY:
- DCHECK(!value->IsTheHole());
- if (receiver_return) *receiver_return = receiver;
- return value;
- case MISSING_BINDING:
- break;
+ if (flag == kNeedsInitialization && value->IsTheHole(isolate)) {
+ THROW_NEW_ERROR(isolate,
+ NewReferenceError(MessageTemplate::kNotDefined, name),
+ Object);
}
- UNREACHABLE();
+ DCHECK(!value->IsTheHole(isolate));
+ if (receiver_return) *receiver_return = receiver;
+ return value;
}
// Otherwise, if the slot was found the holder is a context extension
@@ -1024,10 +832,8 @@ RUNTIME_FUNCTION(Runtime_LoadLookupSlot) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- Handle<Object> value;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, value, LoadLookupSlot(name, Object::THROW_ON_ERROR));
- return *value;
+ RETURN_RESULT_OR_FAILURE(isolate,
+ LoadLookupSlot(name, Object::THROW_ON_ERROR));
}
@@ -1035,10 +841,7 @@ RUNTIME_FUNCTION(Runtime_LoadLookupSlotInsideTypeof) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- Handle<Object> value;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, value, LoadLookupSlot(name, Object::DONT_THROW));
- return *value;
+ RETURN_RESULT_OR_FAILURE(isolate, LoadLookupSlot(name, Object::DONT_THROW));
}
@@ -1065,9 +868,10 @@ MaybeHandle<Object> StoreLookupSlot(Handle<String> name, Handle<Object> value,
int index;
PropertyAttributes attributes;
- BindingFlags flags;
+ InitializationFlag flag;
+ VariableMode mode;
Handle<Object> holder =
- context->Lookup(name, FOLLOW_CHAINS, &index, &attributes, &flags);
+ context->Lookup(name, FOLLOW_CHAINS, &index, &attributes, &flag, &mode);
if (holder.is_null()) {
// In case of JSProxy, an exception might have been thrown.
if (isolate->has_pending_exception()) return MaybeHandle<Object>();
@@ -1075,8 +879,7 @@ MaybeHandle<Object> StoreLookupSlot(Handle<String> name, Handle<Object> value,
// The property was found in a context slot.
if (index != Context::kNotFound) {
- if ((flags == MUTABLE_CHECK_INITIALIZED ||
- flags == IMMUTABLE_CHECK_INITIALIZED_HARMONY) &&
+ if (flag == kNeedsInitialization &&
Handle<Context>::cast(holder)->is_the_hole(index)) {
THROW_NEW_ERROR(isolate,
NewReferenceError(MessageTemplate::kNotDefined, name),
@@ -1123,9 +926,7 @@ RUNTIME_FUNCTION(Runtime_StoreLookupSlot_Sloppy) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
- StoreLookupSlot(name, value, SLOPPY));
- return *value;
+ RETURN_RESULT_OR_FAILURE(isolate, StoreLookupSlot(name, value, SLOPPY));
}
@@ -1134,9 +935,7 @@ RUNTIME_FUNCTION(Runtime_StoreLookupSlot_Strict) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
- StoreLookupSlot(name, value, STRICT));
- return *value;
+ RETURN_RESULT_OR_FAILURE(isolate, StoreLookupSlot(name, value, STRICT));
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-simd.cc b/deps/v8/src/runtime/runtime-simd.cc
index 9e5614242a..9542a4420a 100644
--- a/deps/v8/src/runtime/runtime-simd.cc
+++ b/deps/v8/src/runtime/runtime-simd.cc
@@ -26,6 +26,7 @@ static bool CanCast(F from) {
// A float can't represent 2^31 - 1 or 2^32 - 1 exactly, so promote the limits
// to double. Otherwise, the limit is truncated and numbers like 2^31 or 2^32
// get through, causing any static_cast to be undefined.
+ from = trunc(from);
return from >= static_cast<double>(std::numeric_limits<T>::min()) &&
from <= static_cast<double>(std::numeric_limits<T>::max());
}
@@ -168,9 +169,19 @@ RUNTIME_FUNCTION(Runtime_IsSimdValue) {
// Utility macros.
-#define CONVERT_SIMD_LANE_ARG_CHECKED(name, index, lanes) \
- CONVERT_INT32_ARG_CHECKED(name, index); \
- RUNTIME_ASSERT(name >= 0 && name < lanes);
+// TODO(gdeepti): Fix to use ToNumber conversion once polyfill is updated.
+#define CONVERT_SIMD_LANE_ARG_CHECKED(name, index, lanes) \
+ Handle<Object> name_object = args.at<Object>(index); \
+ if (!name_object->IsNumber()) { \
+ THROW_NEW_ERROR_RETURN_FAILURE( \
+ isolate, NewTypeError(MessageTemplate::kInvalidSimdIndex)); \
+ } \
+ double number = name_object->Number(); \
+ if (number < 0 || number >= lanes || !IsInt32Double(number)) { \
+ THROW_NEW_ERROR_RETURN_FAILURE( \
+ isolate, NewRangeError(MessageTemplate::kInvalidSimdIndex)); \
+ } \
+ uint32_t name = static_cast<uint32_t>(number);
#define CONVERT_SIMD_ARG_HANDLE_THROW(Type, name, index) \
Handle<Type> name; \
@@ -217,8 +228,10 @@ RUNTIME_FUNCTION(Runtime_IsSimdValue) {
// Common functions.
-#define GET_NUMERIC_ARG(lane_type, name, index) \
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(a, index); \
+#define GET_NUMERIC_ARG(lane_type, name, index) \
+ Handle<Object> a; \
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION( \
+ isolate, a, Object::ToNumber(args.at<Object>(index))); \
name = ConvertNumber<lane_type>(a->Number());
#define GET_BOOLEAN_ARG(lane_type, name, index) \
@@ -395,10 +408,14 @@ SIMD_MAXNUM_FUNCTION(Float32x4, float, 4)
FUNCTION(Uint16x8, uint16_t, 16, 8) \
FUNCTION(Uint8x16, uint8_t, 8, 16)
-#define CONVERT_SHIFT_ARG_CHECKED(name, index) \
- RUNTIME_ASSERT(args[index]->IsNumber()); \
- int32_t signed_shift = 0; \
- RUNTIME_ASSERT(args[index]->ToInt32(&signed_shift)); \
+#define CONVERT_SHIFT_ARG_CHECKED(name, index) \
+ Handle<Object> name_object = args.at<Object>(index); \
+ if (!name_object->IsNumber()) { \
+ THROW_NEW_ERROR_RETURN_FAILURE( \
+ isolate, NewTypeError(MessageTemplate::kInvalidSimdOperation)); \
+ } \
+ int32_t signed_shift = 0; \
+ args[index]->ToInt32(&signed_shift); \
uint32_t name = bit_cast<uint32_t>(signed_shift);
#define SIMD_LSL_FUNCTION(type, lane_type, lane_bits, lane_count) \
@@ -409,31 +426,29 @@ SIMD_MAXNUM_FUNCTION(Float32x4, float, 4)
CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
CONVERT_SHIFT_ARG_CHECKED(shift, 1); \
lane_type lanes[kLaneCount] = {0}; \
- if (shift < lane_bits) { \
- for (int i = 0; i < kLaneCount; i++) { \
- lanes[i] = a->get_lane(i) << shift; \
- } \
+ shift &= lane_bits - 1; \
+ for (int i = 0; i < kLaneCount; i++) { \
+ lanes[i] = a->get_lane(i) << shift; \
} \
Handle<type> result = isolate->factory()->New##type(lanes); \
return *result; \
}
-#define SIMD_LSR_FUNCTION(type, lane_type, lane_bits, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##ShiftRightByScalar) { \
- static const int kLaneCount = lane_count; \
- HandleScope scope(isolate); \
- DCHECK(args.length() == 2); \
- CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
- CONVERT_SHIFT_ARG_CHECKED(shift, 1); \
- lane_type lanes[kLaneCount] = {0}; \
- if (shift < lane_bits) { \
- for (int i = 0; i < kLaneCount; i++) { \
- lanes[i] = static_cast<lane_type>( \
- bit_cast<lane_type>(a->get_lane(i)) >> shift); \
- } \
- } \
- Handle<type> result = isolate->factory()->New##type(lanes); \
- return *result; \
+#define SIMD_LSR_FUNCTION(type, lane_type, lane_bits, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##ShiftRightByScalar) { \
+ static const int kLaneCount = lane_count; \
+ HandleScope scope(isolate); \
+ DCHECK(args.length() == 2); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
+ CONVERT_SHIFT_ARG_CHECKED(shift, 1); \
+ lane_type lanes[kLaneCount] = {0}; \
+ shift &= lane_bits - 1; \
+ for (int i = 0; i < kLaneCount; i++) { \
+ lanes[i] = static_cast<lane_type>(bit_cast<lane_type>(a->get_lane(i)) >> \
+ shift); \
+ } \
+ Handle<type> result = isolate->factory()->New##type(lanes); \
+ return *result; \
}
#define SIMD_ASR_FUNCTION(type, lane_type, lane_bits, lane_count) \
@@ -443,7 +458,7 @@ SIMD_MAXNUM_FUNCTION(Float32x4, float, 4)
DCHECK(args.length() == 2); \
CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
CONVERT_SHIFT_ARG_CHECKED(shift, 1); \
- if (shift >= lane_bits) shift = lane_bits - 1; \
+ shift &= lane_bits - 1; \
lane_type lanes[kLaneCount]; \
for (int i = 0; i < kLaneCount; i++) { \
int64_t shifted = static_cast<int64_t>(a->get_lane(i)) >> shift; \
@@ -785,8 +800,10 @@ SIMD_SIGNED_TYPES(SIMD_NEG_FUNCTION)
lane_type lanes[kLaneCount]; \
for (int i = 0; i < kLaneCount; i++) { \
from_ctype a_value = a->get_lane(i); \
- if (a_value != a_value) a_value = 0; \
- RUNTIME_ASSERT(CanCast<lane_type>(a_value)); \
+ if (a_value != a_value || !CanCast<lane_type>(a_value)) { \
+ THROW_NEW_ERROR_RETURN_FAILURE( \
+ isolate, NewRangeError(MessageTemplate::kInvalidSimdLaneValue)); \
+ } \
lanes[i] = static_cast<lane_type>(a_value); \
} \
Handle<type> result = isolate->factory()->New##type(lanes); \
@@ -863,48 +880,63 @@ SIMD_FROM_BITS_TYPES(SIMD_FROM_BITS_FUNCTION)
FUNCTION(Int32x4, int32_t, 4) \
FUNCTION(Uint32x4, uint32_t, 4)
+#define SIMD_COERCE_INDEX(name, i) \
+ Handle<Object> length_object, number_object; \
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION( \
+ isolate, length_object, Object::ToLength(isolate, args.at<Object>(i))); \
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_object, \
+ Object::ToNumber(args.at<Object>(i))); \
+ if (number_object->Number() != length_object->Number()) { \
+ THROW_NEW_ERROR_RETURN_FAILURE( \
+ isolate, NewTypeError(MessageTemplate::kInvalidSimdIndex)); \
+ } \
+ int32_t name = number_object->Number();
// Common Load and Store Functions
-#define SIMD_LOAD(type, lane_type, lane_count, count, result) \
- static const int kLaneCount = lane_count; \
- DCHECK(args.length() == 2); \
- CONVERT_SIMD_ARG_HANDLE_THROW(JSTypedArray, tarray, 0); \
- CONVERT_INT32_ARG_CHECKED(index, 1) \
- size_t bpe = tarray->element_size(); \
- uint32_t bytes = count * sizeof(lane_type); \
- size_t byte_length = NumberToSize(isolate, tarray->byte_length()); \
- RUNTIME_ASSERT(index >= 0 && index * bpe + bytes <= byte_length); \
- size_t tarray_offset = NumberToSize(isolate, tarray->byte_offset()); \
- uint8_t* tarray_base = \
- static_cast<uint8_t*>(tarray->GetBuffer()->backing_store()) + \
- tarray_offset; \
- lane_type lanes[kLaneCount] = {0}; \
- memcpy(lanes, tarray_base + index * bpe, bytes); \
+#define SIMD_LOAD(type, lane_type, lane_count, count, result) \
+ static const int kLaneCount = lane_count; \
+ DCHECK(args.length() == 2); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(JSTypedArray, tarray, 0); \
+ SIMD_COERCE_INDEX(index, 1); \
+ size_t bpe = tarray->element_size(); \
+ uint32_t bytes = count * sizeof(lane_type); \
+ size_t byte_length = NumberToSize(tarray->byte_length()); \
+ if (index < 0 || index * bpe + bytes > byte_length) { \
+ THROW_NEW_ERROR_RETURN_FAILURE( \
+ isolate, NewRangeError(MessageTemplate::kInvalidSimdIndex)); \
+ } \
+ size_t tarray_offset = NumberToSize(tarray->byte_offset()); \
+ uint8_t* tarray_base = \
+ static_cast<uint8_t*>(tarray->GetBuffer()->backing_store()) + \
+ tarray_offset; \
+ lane_type lanes[kLaneCount] = {0}; \
+ memcpy(lanes, tarray_base + index * bpe, bytes); \
Handle<type> result = isolate->factory()->New##type(lanes);
-
-#define SIMD_STORE(type, lane_type, lane_count, count, a) \
- static const int kLaneCount = lane_count; \
- DCHECK(args.length() == 3); \
- CONVERT_SIMD_ARG_HANDLE_THROW(JSTypedArray, tarray, 0); \
- CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 2); \
- CONVERT_INT32_ARG_CHECKED(index, 1) \
- size_t bpe = tarray->element_size(); \
- uint32_t bytes = count * sizeof(lane_type); \
- size_t byte_length = NumberToSize(isolate, tarray->byte_length()); \
- RUNTIME_ASSERT(index >= 0 && index * bpe + bytes <= byte_length); \
- size_t tarray_offset = NumberToSize(isolate, tarray->byte_offset()); \
- uint8_t* tarray_base = \
- static_cast<uint8_t*>(tarray->GetBuffer()->backing_store()) + \
- tarray_offset; \
- lane_type lanes[kLaneCount]; \
- for (int i = 0; i < kLaneCount; i++) { \
- lanes[i] = a->get_lane(i); \
- } \
+#define SIMD_STORE(type, lane_type, lane_count, count, a) \
+ static const int kLaneCount = lane_count; \
+ DCHECK(args.length() == 3); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(JSTypedArray, tarray, 0); \
+ CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 2); \
+ SIMD_COERCE_INDEX(index, 1); \
+ size_t bpe = tarray->element_size(); \
+ uint32_t bytes = count * sizeof(lane_type); \
+ size_t byte_length = NumberToSize(tarray->byte_length()); \
+ if (index < 0 || byte_length < index * bpe + bytes) { \
+ THROW_NEW_ERROR_RETURN_FAILURE( \
+ isolate, NewRangeError(MessageTemplate::kInvalidSimdIndex)); \
+ } \
+ size_t tarray_offset = NumberToSize(tarray->byte_offset()); \
+ uint8_t* tarray_base = \
+ static_cast<uint8_t*>(tarray->GetBuffer()->backing_store()) + \
+ tarray_offset; \
+ lane_type lanes[kLaneCount]; \
+ for (int i = 0; i < kLaneCount; i++) { \
+ lanes[i] = a->get_lane(i); \
+ } \
memcpy(tarray_base + index * bpe, lanes, bytes);
-
#define SIMD_LOAD_FUNCTION(type, lane_type, lane_count) \
RUNTIME_FUNCTION(Runtime_##type##Load) { \
HandleScope scope(isolate); \
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index 6786fa99fb..517513ed4e 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -5,58 +5,13 @@
#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
-#include "src/conversions-inl.h"
-#include "src/isolate-inl.h"
#include "src/regexp/jsregexp-inl.h"
-#include "src/regexp/jsregexp.h"
#include "src/string-builder.h"
#include "src/string-search.h"
namespace v8 {
namespace internal {
-
-// Perform string match of pattern on subject, starting at start index.
-// Caller must ensure that 0 <= start_index <= sub->length(),
-// and should check that pat->length() + start_index <= sub->length().
-int StringMatch(Isolate* isolate, Handle<String> sub, Handle<String> pat,
- int start_index) {
- DCHECK(0 <= start_index);
- DCHECK(start_index <= sub->length());
-
- int pattern_length = pat->length();
- if (pattern_length == 0) return start_index;
-
- int subject_length = sub->length();
- if (start_index + pattern_length > subject_length) return -1;
-
- sub = String::Flatten(sub);
- pat = String::Flatten(pat);
-
- DisallowHeapAllocation no_gc; // ensure vectors stay valid
- // Extract flattened substrings of cons strings before getting encoding.
- String::FlatContent seq_sub = sub->GetFlatContent();
- String::FlatContent seq_pat = pat->GetFlatContent();
-
- // dispatch on type of strings
- if (seq_pat.IsOneByte()) {
- Vector<const uint8_t> pat_vector = seq_pat.ToOneByteVector();
- if (seq_sub.IsOneByte()) {
- return SearchString(isolate, seq_sub.ToOneByteVector(), pat_vector,
- start_index);
- }
- return SearchString(isolate, seq_sub.ToUC16Vector(), pat_vector,
- start_index);
- }
- Vector<const uc16> pat_vector = seq_pat.ToUC16Vector();
- if (seq_sub.IsOneByte()) {
- return SearchString(isolate, seq_sub.ToOneByteVector(), pat_vector,
- start_index);
- }
- return SearchString(isolate, seq_sub.ToUC16Vector(), pat_vector, start_index);
-}
-
-
// This may return an empty MaybeHandle if an exception is thrown or
// we abort due to reaching the recursion limit.
MaybeHandle<String> StringReplaceOneCharWithString(
@@ -88,7 +43,7 @@ MaybeHandle<String> StringReplaceOneCharWithString(
return subject;
} else {
- int index = StringMatch(isolate, subject, search, 0);
+ int index = String::IndexOf(isolate, subject, search, 0);
if (index == -1) return subject;
*found = true;
Handle<String> first = isolate->factory()->NewSubString(subject, 0, index);
@@ -143,8 +98,8 @@ RUNTIME_FUNCTION(Runtime_StringIndexOf) {
uint32_t start_index = 0;
if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1);
- RUNTIME_ASSERT(start_index <= static_cast<uint32_t>(sub->length()));
- int position = StringMatch(isolate, sub, pat, start_index);
+ CHECK(start_index <= static_cast<uint32_t>(sub->length()));
+ int position = String::IndexOf(isolate, sub, pat, start_index);
return Smi::FromInt(position);
}
@@ -295,15 +250,19 @@ RUNTIME_FUNCTION(Runtime_SubString) {
CONVERT_SMI_ARG_CHECKED(to_number, 2);
start = from_number;
end = to_number;
- } else {
+ } else if (args[1]->IsNumber() && args[2]->IsNumber()) {
CONVERT_DOUBLE_ARG_CHECKED(from_number, 1);
CONVERT_DOUBLE_ARG_CHECKED(to_number, 2);
start = FastD2IChecked(from_number);
end = FastD2IChecked(to_number);
+ } else {
+ return isolate->ThrowIllegalOperation();
+ }
+ // The following condition is intentionally robust because the SubStringStub
+ // delegates here and we test this in cctest/test-strings/RobustSubStringStub.
+ if (end < start || start < 0 || end > string->length()) {
+ return isolate->ThrowIllegalOperation();
}
- RUNTIME_ASSERT(end >= start);
- RUNTIME_ASSERT(start >= 0);
- RUNTIME_ASSERT(end <= string->length());
isolate->counters()->sub_string_runtime()->Increment();
return *isolate->factory()->NewSubString(string, start, end);
@@ -313,19 +272,23 @@ RUNTIME_FUNCTION(Runtime_SubString) {
RUNTIME_FUNCTION(Runtime_StringAdd) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(String, str1, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, str2, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, obj1, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, obj2, 1);
isolate->counters()->string_add_runtime()->Increment();
- Handle<String> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, isolate->factory()->NewConsString(str1, str2));
- return *result;
+ MaybeHandle<String> maybe_str1(Object::ToString(isolate, obj1));
+ MaybeHandle<String> maybe_str2(Object::ToString(isolate, obj2));
+ Handle<String> str1;
+ Handle<String> str2;
+ maybe_str1.ToHandle(&str1);
+ maybe_str2.ToHandle(&str2);
+ RETURN_RESULT_OR_FAILURE(isolate,
+ isolate->factory()->NewConsString(str1, str2));
}
RUNTIME_FUNCTION(Runtime_InternalizeString) {
HandleScope handles(isolate);
- RUNTIME_ASSERT(args.length() == 1);
+ DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
return *isolate->factory()->InternalizeString(string);
}
@@ -339,7 +302,7 @@ RUNTIME_FUNCTION(Runtime_StringMatch) {
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
CONVERT_ARG_HANDLE_CHECKED(JSArray, regexp_info, 2);
- RUNTIME_ASSERT(regexp_info->HasFastObjectElements());
+ CHECK(regexp_info->HasFastObjectElements());
RegExpImpl::GlobalCache global_cache(regexp, subject, isolate);
if (global_cache.HasException()) return isolate->heap()->exception();
@@ -436,15 +399,14 @@ RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
CONVERT_ARG_HANDLE_CHECKED(String, special, 2);
size_t actual_array_length = 0;
- RUNTIME_ASSERT(
- TryNumberToSize(isolate, array->length(), &actual_array_length));
- RUNTIME_ASSERT(array_length >= 0);
- RUNTIME_ASSERT(static_cast<size_t>(array_length) <= actual_array_length);
+ CHECK(TryNumberToSize(array->length(), &actual_array_length));
+ CHECK(array_length >= 0);
+ CHECK(static_cast<size_t>(array_length) <= actual_array_length);
// This assumption is used by the slice encoding in one or two smis.
DCHECK(Smi::kMaxValue >= String::kMaxLength);
- RUNTIME_ASSERT(array->HasFastElements());
+ CHECK(array->HasFastElements());
JSObject::EnsureCanContainHeapObjectElements(array);
int special_length = special->length();
@@ -505,8 +467,8 @@ RUNTIME_FUNCTION(Runtime_StringBuilderJoin) {
THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewInvalidStringLengthError());
}
CONVERT_ARG_HANDLE_CHECKED(String, separator, 2);
- RUNTIME_ASSERT(array->HasFastObjectElements());
- RUNTIME_ASSERT(array_length >= 0);
+ CHECK(array->HasFastObjectElements());
+ CHECK(array_length >= 0);
Handle<FixedArray> fixed_array(FixedArray::cast(array->elements()));
if (fixed_array->length() < array_length) {
@@ -517,12 +479,12 @@ RUNTIME_FUNCTION(Runtime_StringBuilderJoin) {
return isolate->heap()->empty_string();
} else if (array_length == 1) {
Object* first = fixed_array->get(0);
- RUNTIME_ASSERT(first->IsString());
+ CHECK(first->IsString());
return first;
}
int separator_length = separator->length();
- RUNTIME_ASSERT(separator_length > 0);
+ CHECK(separator_length > 0);
int max_nof_separators =
(String::kMaxLength + separator_length - 1) / separator_length;
if (max_nof_separators < (array_length - 1)) {
@@ -531,7 +493,7 @@ RUNTIME_FUNCTION(Runtime_StringBuilderJoin) {
int length = (array_length - 1) * separator_length;
for (int i = 0; i < array_length; i++) {
Object* element_obj = fixed_array->get(i);
- RUNTIME_ASSERT(element_obj->IsString());
+ CHECK(element_obj->IsString());
String* element = String::cast(element_obj);
int increment = element->length();
if (increment > String::kMaxLength - length) {
@@ -553,7 +515,7 @@ RUNTIME_FUNCTION(Runtime_StringBuilderJoin) {
uc16* end = sink + length;
#endif
- RUNTIME_ASSERT(fixed_array->get(0)->IsString());
+ CHECK(fixed_array->get(0)->IsString());
String* first = String::cast(fixed_array->get(0));
String* separator_raw = *separator;
@@ -566,7 +528,7 @@ RUNTIME_FUNCTION(Runtime_StringBuilderJoin) {
String::WriteToFlat(separator_raw, sink, 0, separator_length);
sink += separator_length;
- RUNTIME_ASSERT(fixed_array->get(i)->IsString());
+ CHECK(fixed_array->get(i)->IsString());
String* element = String::cast(fixed_array->get(i));
int element_length = element->length();
DCHECK(sink + element_length <= end);
@@ -645,18 +607,18 @@ RUNTIME_FUNCTION(Runtime_SparseJoinWithSeparator) {
CONVERT_ARG_HANDLE_CHECKED(String, separator, 2);
// elements_array is fast-mode JSarray of alternating positions
// (increasing order) and strings.
- RUNTIME_ASSERT(elements_array->HasFastSmiOrObjectElements());
+ CHECK(elements_array->HasFastSmiOrObjectElements());
// array_length is length of original array (used to add separators);
// separator is string to put between elements. Assumed to be non-empty.
- RUNTIME_ASSERT(array_length > 0);
+ CHECK(array_length > 0);
// Find total length of join result.
int string_length = 0;
bool is_one_byte = separator->IsOneByteRepresentation();
bool overflow = false;
CONVERT_NUMBER_CHECKED(int, elements_length, Int32, elements_array->length());
- RUNTIME_ASSERT(elements_length <= elements_array->elements()->length());
- RUNTIME_ASSERT((elements_length & 1) == 0); // Even length.
+ CHECK(elements_length <= elements_array->elements()->length());
+ CHECK((elements_length & 1) == 0); // Even length.
FixedArray* elements = FixedArray::cast(elements_array->elements());
{
DisallowHeapAllocation no_gc;
@@ -1062,7 +1024,7 @@ MUST_USE_RESULT static Object* ConvertCase(
}
Object* answer = ConvertCaseHelper(isolate, *s, *result, length, mapping);
- if (answer->IsException() || answer->IsString()) return answer;
+ if (answer->IsException(isolate) || answer->IsString()) return answer;
DCHECK(answer->IsSmi());
length = Smi::cast(answer)->value();
@@ -1080,7 +1042,7 @@ MUST_USE_RESULT static Object* ConvertCase(
RUNTIME_FUNCTION(Runtime_StringToLowerCase) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(args.length(), 1);
CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
return ConvertCase(s, isolate, isolate->runtime_state()->to_lower_mapping());
}
@@ -1088,72 +1050,11 @@ RUNTIME_FUNCTION(Runtime_StringToLowerCase) {
RUNTIME_FUNCTION(Runtime_StringToUpperCase) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(args.length(), 1);
CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
return ConvertCase(s, isolate, isolate->runtime_state()->to_upper_mapping());
}
-
-RUNTIME_FUNCTION(Runtime_StringTrim) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
-
- CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
- CONVERT_BOOLEAN_ARG_CHECKED(trimLeft, 1);
- CONVERT_BOOLEAN_ARG_CHECKED(trimRight, 2);
-
- string = String::Flatten(string);
- int length = string->length();
-
- int left = 0;
- UnicodeCache* unicode_cache = isolate->unicode_cache();
- if (trimLeft) {
- while (left < length &&
- unicode_cache->IsWhiteSpaceOrLineTerminator(string->Get(left))) {
- left++;
- }
- }
-
- int right = length;
- if (trimRight) {
- while (
- right > left &&
- unicode_cache->IsWhiteSpaceOrLineTerminator(string->Get(right - 1))) {
- right--;
- }
- }
-
- return *isolate->factory()->NewSubString(string, left, right);
-}
-
-
-RUNTIME_FUNCTION(Runtime_TruncateString) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(SeqString, string, 0);
- CONVERT_INT32_ARG_CHECKED(new_length, 1);
- RUNTIME_ASSERT(new_length >= 0);
- return *SeqString::Truncate(string, new_length);
-}
-
-
-RUNTIME_FUNCTION(Runtime_NewString) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_INT32_ARG_CHECKED(length, 0);
- CONVERT_BOOLEAN_ARG_CHECKED(is_one_byte, 1);
- if (length == 0) return isolate->heap()->empty_string();
- Handle<String> result;
- if (is_one_byte) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, isolate->factory()->NewRawOneByteString(length));
- } else {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, isolate->factory()->NewRawTwoByteString(length));
- }
- return *result;
-}
-
RUNTIME_FUNCTION(Runtime_StringLessThan) {
HandleScope handle_scope(isolate);
DCHECK_EQ(2, args.length());
@@ -1261,59 +1162,14 @@ RUNTIME_FUNCTION(Runtime_StringCharFromCode) {
return isolate->heap()->empty_string();
}
-
-RUNTIME_FUNCTION(Runtime_StringCharAt) {
+RUNTIME_FUNCTION(Runtime_ExternalStringGetChar) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
- if (!args[0]->IsString()) return Smi::FromInt(0);
- if (!args[1]->IsNumber()) return Smi::FromInt(0);
- if (std::isinf(args.number_at(1))) return isolate->heap()->empty_string();
- Object* code = __RT_impl_Runtime_StringCharCodeAtRT(args, isolate);
- if (code->IsNaN()) return isolate->heap()->empty_string();
- return __RT_impl_Runtime_StringCharFromCode(Arguments(1, &code), isolate);
-}
-
-
-RUNTIME_FUNCTION(Runtime_OneByteSeqStringGetChar) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_CHECKED(SeqOneByteString, string, 0);
- CONVERT_INT32_ARG_CHECKED(index, 1);
- return Smi::FromInt(string->SeqOneByteStringGet(index));
-}
-
-
-RUNTIME_FUNCTION(Runtime_OneByteSeqStringSetChar) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 3);
- CONVERT_INT32_ARG_CHECKED(index, 0);
- CONVERT_INT32_ARG_CHECKED(value, 1);
- CONVERT_ARG_CHECKED(SeqOneByteString, string, 2);
- string->SeqOneByteStringSet(index, value);
- return string;
-}
-
-
-RUNTIME_FUNCTION(Runtime_TwoByteSeqStringGetChar) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_CHECKED(SeqTwoByteString, string, 0);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_CHECKED(ExternalString, string, 0);
CONVERT_INT32_ARG_CHECKED(index, 1);
- return Smi::FromInt(string->SeqTwoByteStringGet(index));
-}
-
-
-RUNTIME_FUNCTION(Runtime_TwoByteSeqStringSetChar) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 3);
- CONVERT_INT32_ARG_CHECKED(index, 0);
- CONVERT_INT32_ARG_CHECKED(value, 1);
- CONVERT_ARG_CHECKED(SeqTwoByteString, string, 2);
- string->SeqTwoByteStringSet(index, value);
- return string;
+ return Smi::FromInt(string->Get(index));
}
-
RUNTIME_FUNCTION(Runtime_StringCharCodeAt) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 2);
diff --git a/deps/v8/src/runtime/runtime-symbol.cc b/deps/v8/src/runtime/runtime-symbol.cc
index 234b45606d..300a6439b1 100644
--- a/deps/v8/src/runtime/runtime-symbol.cc
+++ b/deps/v8/src/runtime/runtime-symbol.cc
@@ -16,7 +16,7 @@ RUNTIME_FUNCTION(Runtime_CreateSymbol) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
- RUNTIME_ASSERT(name->IsString() || name->IsUndefined());
+ CHECK(name->IsString() || name->IsUndefined(isolate));
Handle<Symbol> symbol = isolate->factory()->NewSymbol();
if (name->IsString()) symbol->set_name(*name);
return *symbol;
@@ -27,7 +27,7 @@ RUNTIME_FUNCTION(Runtime_CreatePrivateSymbol) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
- RUNTIME_ASSERT(name->IsString() || name->IsUndefined());
+ CHECK(name->IsString() || name->IsUndefined(isolate));
Handle<Symbol> symbol = isolate->factory()->NewPrivateSymbol();
if (name->IsString()) symbol->set_name(*name);
return *symbol;
@@ -52,9 +52,7 @@ RUNTIME_FUNCTION(Runtime_SymbolDescriptiveString) {
builder.AppendString(handle(String::cast(symbol->name()), isolate));
}
builder.AppendCharacter(')');
- Handle<String> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, builder.Finish());
- return *result;
+ RETURN_RESULT_OR_FAILURE(isolate, builder.Finish());
}
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index a0f05665a3..0d6cb0efdd 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -4,19 +4,44 @@
#include "src/runtime/runtime-utils.h"
+#include <memory>
+
#include "src/arguments.h"
+#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/isolate-inl.h"
+#include "src/runtime-profiler.h"
+#include "src/snapshot/code-serializer.h"
#include "src/snapshot/natives.h"
+#include "src/wasm/wasm-module.h"
namespace v8 {
namespace internal {
+RUNTIME_FUNCTION(Runtime_ConstructDouble) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_NUMBER_CHECKED(uint32_t, hi, Uint32, args[0]);
+ CONVERT_NUMBER_CHECKED(uint32_t, lo, Uint32, args[1]);
+ uint64_t result = (static_cast<uint64_t>(hi) << 32) | lo;
+ return *isolate->factory()->NewNumber(uint64_to_double(result));
+}
+
RUNTIME_FUNCTION(Runtime_DeoptimizeFunction) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+
+ // This function is used by fuzzers to get coverage in compiler.
+ // Ignore calls on non-function objects to avoid runtime errors.
+ CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
+ if (!function_object->IsJSFunction()) {
+ return isolate->heap()->undefined_value();
+ }
+ Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
+
+ // If the function is not optimized, just return.
if (!function->IsOptimized()) return isolate->heap()->undefined_value();
// TODO(turbofan): Deoptimization is not supported yet.
@@ -37,17 +62,12 @@ RUNTIME_FUNCTION(Runtime_DeoptimizeNow) {
Handle<JSFunction> function;
- // If the argument is 'undefined', deoptimize the topmost
- // function.
+ // Find the JavaScript function on the top of the stack.
JavaScriptFrameIterator it(isolate);
- while (!it.done()) {
- if (it.frame()->is_java_script()) {
- function = Handle<JSFunction>(it.frame()->function());
- break;
- }
- }
+ if (!it.done()) function = Handle<JSFunction>(it.frame()->function());
if (function.is_null()) return isolate->heap()->undefined_value();
+ // If the function is not optimized, just return.
if (!function->IsOptimized()) return isolate->heap()->undefined_value();
// TODO(turbofan): Deoptimization is not supported yet.
@@ -83,13 +103,27 @@ RUNTIME_FUNCTION(Runtime_IsConcurrentRecompilationSupported) {
RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
HandleScope scope(isolate);
- RUNTIME_ASSERT(args.length() == 1 || args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- // The following assertion was lifted from the DCHECK inside
+
+ // This function is used by fuzzers, ignore calls with bogus arguments count.
+ if (args.length() != 1 && args.length() != 2) {
+ return isolate->heap()->undefined_value();
+ }
+
+ // This function is used by fuzzers to get coverage for optimizations
+ // in compiler. Ignore calls on non-function objects to avoid runtime errors.
+ CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
+ if (!function_object->IsJSFunction()) {
+ return isolate->heap()->undefined_value();
+ }
+ Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
+
+ // The following condition was lifted from the DCHECK inside
// JSFunction::MarkForOptimization().
- RUNTIME_ASSERT(function->shared()->allows_lazy_compilation() ||
- (function->code()->kind() == Code::FUNCTION &&
- !function->shared()->optimization_disabled()));
+ if (!(function->shared()->allows_lazy_compilation() ||
+ (function->code()->kind() == Code::FUNCTION &&
+ !function->shared()->optimization_disabled()))) {
+ return isolate->heap()->undefined_value();
+ }
// If the function is already optimized, just return.
if (function->IsOptimized()) return isolate->heap()->undefined_value();
@@ -108,41 +142,70 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
return isolate->heap()->undefined_value();
}
-
-RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
+RUNTIME_FUNCTION(Runtime_InterpretFunctionOnNextCall) {
HandleScope scope(isolate);
- RUNTIME_ASSERT(args.length() == 0 || args.length() == 1);
- Handle<JSFunction> function = Handle<JSFunction>::null();
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
+ if (!function_object->IsJSFunction()) {
+ return isolate->heap()->undefined_value();
+ }
+ Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
- if (args.length() == 0) {
- // Find the JavaScript function on the top of the stack.
- JavaScriptFrameIterator it(isolate);
- while (!it.done()) {
- if (it.frame()->is_java_script()) {
- function = Handle<JSFunction>(it.frame()->function());
- break;
- }
+ // Do not tier down if we are already on optimized code. Replacing optimized
+ // code without actual deoptimization can lead to funny bugs.
+ if (function->code()->kind() != Code::OPTIMIZED_FUNCTION &&
+ function->shared()->HasBytecodeArray()) {
+ function->ReplaceCode(*isolate->builtins()->InterpreterEntryTrampoline());
+ }
+ return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_BaselineFunctionOnNextCall) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
+ if (!function_object->IsJSFunction()) {
+ return isolate->heap()->undefined_value();
+ }
+ Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
+
+ // Do not tier down if we are already on optimized code. Replacing optimized
+ // code without actual deoptimization can lead to funny bugs.
+ if (function->code()->kind() != Code::OPTIMIZED_FUNCTION &&
+ function->code()->kind() != Code::FUNCTION) {
+ if (function->shared()->HasBaselineCode()) {
+ function->ReplaceCode(function->shared()->code());
+ } else {
+ function->MarkForBaseline();
}
- if (function.is_null()) return isolate->heap()->undefined_value();
- } else {
- // Function was passed as an argument.
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, arg, 0);
- function = arg;
}
- // The following assertion was lifted from the DCHECK inside
- // JSFunction::MarkForOptimization().
- RUNTIME_ASSERT(function->shared()->allows_lazy_compilation() ||
- !function->shared()->optimization_disabled());
+ return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 0 || args.length() == 1);
+
+ Handle<JSFunction> function;
+
+ // The optional parameter determines the frame being targeted.
+ int stack_depth = args.length() == 1 ? args.smi_at(0) : 0;
+
+ // Find the JavaScript function on the top of the stack.
+ JavaScriptFrameIterator it(isolate);
+ while (!it.done() && stack_depth--) it.Advance();
+ if (!it.done()) function = Handle<JSFunction>(it.frame()->function());
+ if (function.is_null()) return isolate->heap()->undefined_value();
// If the function is already optimized, just return.
if (function->IsOptimized()) return isolate->heap()->undefined_value();
- Code* unoptimized = function->shared()->code();
- if (unoptimized->kind() == Code::FUNCTION) {
- DCHECK(BackEdgeTable::Verify(isolate, unoptimized));
+ // Make the profiler arm all back edges in unoptimized code.
+ if (it.frame()->type() == StackFrame::JAVA_SCRIPT ||
+ it.frame()->type() == StackFrame::INTERPRETED) {
isolate->runtime_profiler()->AttemptOnStackReplacement(
- *function, Code::kMaxLoopNestingMarker);
+ it.frame(), AbstractCode::kMaxLoopNestingMarker);
}
return isolate->heap()->undefined_value();
@@ -153,7 +216,8 @@ RUNTIME_FUNCTION(Runtime_NeverOptimizeFunction) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
- function->shared()->set_disable_optimization_reason(kOptimizationDisabled);
+ function->shared()->set_disable_optimization_reason(
+ kOptimizationDisabledForTest);
function->shared()->set_optimization_disabled(true);
return isolate->heap()->undefined_value();
}
@@ -161,18 +225,29 @@ RUNTIME_FUNCTION(Runtime_NeverOptimizeFunction) {
RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
HandleScope scope(isolate);
- RUNTIME_ASSERT(args.length() == 1 || args.length() == 2);
+ DCHECK(args.length() == 1 || args.length() == 2);
if (!isolate->use_crankshaft()) {
return Smi::FromInt(4); // 4 == "never".
}
+
+ // This function is used by fuzzers to get coverage for optimizations
+ // in compiler. Ignore calls on non-function objects to avoid runtime errors.
+ CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
+ if (!function_object->IsJSFunction()) {
+ return isolate->heap()->undefined_value();
+ }
+ Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
+
bool sync_with_compiler_thread = true;
if (args.length() == 2) {
- CONVERT_ARG_HANDLE_CHECKED(String, sync, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, sync_object, 1);
+ if (!sync_object->IsString()) return isolate->heap()->undefined_value();
+ Handle<String> sync = Handle<String>::cast(sync_object);
if (sync->IsOneByteEqualTo(STATIC_CHAR_VECTOR("no sync"))) {
sync_with_compiler_thread = false;
}
}
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+
if (isolate->concurrent_recompilation_enabled() &&
sync_with_compiler_thread) {
while (function->IsInOptimizationQueue()) {
@@ -198,9 +273,10 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
RUNTIME_FUNCTION(Runtime_UnblockConcurrentRecompilation) {
DCHECK(args.length() == 0);
- RUNTIME_ASSERT(FLAG_block_concurrent_recompilation);
- RUNTIME_ASSERT(isolate->concurrent_recompilation_enabled());
- isolate->optimizing_compile_dispatcher()->Unblock();
+ if (FLAG_block_concurrent_recompilation &&
+ isolate->concurrent_recompilation_enabled()) {
+ isolate->optimizing_compile_dispatcher()->Unblock();
+ }
return isolate->heap()->undefined_value();
}
@@ -227,12 +303,40 @@ RUNTIME_FUNCTION(Runtime_GetUndetectable) {
return *Utils::OpenHandle(*obj);
}
+static void call_as_function(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ double v1 = args[0]
+ ->NumberValue(v8::Isolate::GetCurrent()->GetCurrentContext())
+ .ToChecked();
+ double v2 = args[1]
+ ->NumberValue(v8::Isolate::GetCurrent()->GetCurrentContext())
+ .ToChecked();
+ args.GetReturnValue().Set(
+ v8::Number::New(v8::Isolate::GetCurrent(), v1 - v2));
+}
+
+// Returns a callable object. The object returns the difference of its two
+// parameters when it is called.
+RUNTIME_FUNCTION(Runtime_GetCallable) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 0);
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(v8_isolate);
+ Local<ObjectTemplate> instance_template = t->InstanceTemplate();
+ instance_template->SetCallAsFunctionHandler(call_as_function);
+ v8_isolate->GetCurrentContext();
+ Local<v8::Object> instance =
+ t->GetFunction(v8_isolate->GetCurrentContext())
+ .ToLocalChecked()
+ ->NewInstance(v8_isolate->GetCurrentContext())
+ .ToLocalChecked();
+ return *Utils::OpenHandle(*instance);
+}
RUNTIME_FUNCTION(Runtime_ClearFunctionTypeFeedback) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- function->shared()->ClearTypeFeedbackInfo();
+ function->ClearTypeFeedbackInfo();
Code* unoptimized = function->shared()->code();
if (unoptimized->kind() == Code::FUNCTION) {
unoptimized->ClearInlineCaches();
@@ -240,6 +344,68 @@ RUNTIME_FUNCTION(Runtime_ClearFunctionTypeFeedback) {
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(Runtime_CheckWasmWrapperElision) {
+ // This only supports the case where the function being exported
+ // calls an intermediate function, and the intermediate function
+ // calls exactly one imported function
+ HandleScope scope(isolate);
+ CHECK(args.length() == 2);
+ // It takes two parameters, the first one is the JSFunction,
+ // The second one is the type
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ // If type is 0, it means that it is supposed to be a direct call into a WASM
+ // function
+ // If type is 1, it means that it is supposed to have wrappers
+ CONVERT_ARG_HANDLE_CHECKED(Smi, type, 1);
+ Handle<Code> export_code = handle(function->code());
+ CHECK(export_code->kind() == Code::JS_TO_WASM_FUNCTION);
+ int const mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
+ // check the type of the $export_fct
+ Handle<Code> export_fct;
+ int count = 0;
+ for (RelocIterator it(*export_code, mask); !it.done(); it.next()) {
+ RelocInfo* rinfo = it.rinfo();
+ Address target_address = rinfo->target_address();
+ Code* target = Code::GetCodeFromTargetAddress(target_address);
+ if (target->kind() == Code::WASM_FUNCTION) {
+ ++count;
+ export_fct = handle(target);
+ }
+ }
+ CHECK(count == 1);
+ // check the type of the intermediate_fct
+ Handle<Code> intermediate_fct;
+ count = 0;
+ for (RelocIterator it(*export_fct, mask); !it.done(); it.next()) {
+ RelocInfo* rinfo = it.rinfo();
+ Address target_address = rinfo->target_address();
+ Code* target = Code::GetCodeFromTargetAddress(target_address);
+ if (target->kind() == Code::WASM_FUNCTION) {
+ ++count;
+ intermediate_fct = handle(target);
+ }
+ }
+ CHECK(count == 1);
+ // check the type of the imported exported function, it should be also a WASM
+ // function in our case
+ Handle<Code> imported_fct;
+ CHECK(type->value() == 0 || type->value() == 1);
+
+ Code::Kind target_kind =
+ type->value() == 0 ? Code::WASM_FUNCTION : Code::WASM_TO_JS_FUNCTION;
+ count = 0;
+ for (RelocIterator it(*intermediate_fct, mask); !it.done(); it.next()) {
+ RelocInfo* rinfo = it.rinfo();
+ Address target_address = rinfo->target_address();
+ Code* target = Code::GetCodeFromTargetAddress(target_address);
+ if (target->kind() == target_kind) {
+ ++count;
+ imported_fct = handle(target);
+ }
+ }
+ CHECK_LE(count, 1);
+ return isolate->heap()->ToBoolean(count == 1);
+}
RUNTIME_FUNCTION(Runtime_NotifyContextDisposed) {
HandleScope scope(isolate);
@@ -342,7 +508,7 @@ RUNTIME_FUNCTION(Runtime_SetFlags) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_CHECKED(String, arg, 0);
- base::SmartArrayPointer<char> flags =
+ std::unique_ptr<char[]> flags =
arg->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
FlagList::SetFlagsFromString(flags.get(), StrLength(flags.get()));
return isolate->heap()->undefined_value();
@@ -457,6 +623,31 @@ RUNTIME_FUNCTION(Runtime_TraceTailCall) {
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(Runtime_GetExceptionDetails) {
+ HandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, exception_obj, 0);
+
+ Factory* factory = isolate->factory();
+ Handle<JSMessageObject> message_obj =
+ isolate->CreateMessage(exception_obj, nullptr);
+
+ Handle<JSObject> message = factory->NewJSObject(isolate->object_function());
+
+ Handle<String> key;
+ Handle<Object> value;
+
+ key = factory->NewStringFromAsciiChecked("start_pos");
+ value = handle(Smi::FromInt(message_obj->start_position()), isolate);
+ JSObject::SetProperty(message, key, value, STRICT).Assert();
+
+ key = factory->NewStringFromAsciiChecked("end_pos");
+ value = handle(Smi::FromInt(message_obj->end_position()), isolate);
+ JSObject::SetProperty(message, key, value, STRICT).Assert();
+
+ return *message;
+}
+
RUNTIME_FUNCTION(Runtime_HaveSameMap) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 2);
@@ -473,6 +664,37 @@ RUNTIME_FUNCTION(Runtime_InNewSpace) {
return isolate->heap()->ToBoolean(isolate->heap()->InNewSpace(obj));
}
+static bool IsAsmWasmCode(Isolate* isolate, Handle<JSFunction> function) {
+ if (!function->shared()->HasAsmWasmData()) {
+ // Doesn't have wasm data.
+ return false;
+ }
+ if (function->shared()->code() !=
+ isolate->builtins()->builtin(Builtins::kInstantiateAsmJs)) {
+ // Hasn't been compiled yet.
+ return false;
+ }
+ return true;
+}
+
+RUNTIME_FUNCTION(Runtime_IsAsmWasmCode) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ // TODO(mstarzinger): --always-opt should still allow asm.js->wasm,
+ // but currently does not. For now, pretend asm.js->wasm is on for
+ // this case. Be more accurate once this is corrected.
+ return isolate->heap()->ToBoolean(
+ ((FLAG_always_opt || FLAG_prepare_always_opt) && FLAG_validate_asm) ||
+ IsAsmWasmCode(isolate, function));
+}
+
+RUNTIME_FUNCTION(Runtime_IsNotAsmWasmCode) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ return isolate->heap()->ToBoolean(!IsAsmWasmCode(isolate, function));
+}
#define ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(Name) \
RUNTIME_FUNCTION(Runtime_Has##Name) { \
@@ -511,6 +733,43 @@ RUNTIME_FUNCTION(Runtime_SpeciesProtector) {
return isolate->heap()->ToBoolean(isolate->IsArraySpeciesLookupChainIntact());
}
+// Take a compiled wasm module, serialize it and copy the buffer into an array
+// buffer, which is then returned.
+RUNTIME_FUNCTION(Runtime_SerializeWasmModule) {
+ HandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, module_obj, 0);
+
+ Handle<FixedArray> orig =
+ handle(FixedArray::cast(module_obj->GetInternalField(0)));
+ std::unique_ptr<ScriptData> data =
+ WasmCompiledModuleSerializer::SerializeWasmModule(isolate, orig);
+ void* buff = isolate->array_buffer_allocator()->Allocate(data->length());
+ Handle<JSArrayBuffer> ret = isolate->factory()->NewJSArrayBuffer();
+ JSArrayBuffer::Setup(ret, isolate, false, buff, data->length());
+ memcpy(buff, data->data(), data->length());
+ return *ret;
+}
+
+// Take an array buffer and attempt to reconstruct a compiled wasm module.
+// Return undefined if unsuccessful.
+RUNTIME_FUNCTION(Runtime_DeserializeWasmModule) {
+ HandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, buffer, 0);
+
+ Address mem_start = static_cast<Address>(buffer->backing_store());
+ int mem_size = static_cast<int>(buffer->byte_length()->Number());
+
+ ScriptData sc(mem_start, mem_size);
+ MaybeHandle<FixedArray> maybe_compiled_module =
+ WasmCompiledModuleSerializer::DeserializeWasmModule(isolate, &sc);
+ Handle<FixedArray> compiled_module;
+ if (!maybe_compiled_module.ToHandle(&compiled_module)) {
+ return isolate->heap()->undefined_value();
+ }
+ return *wasm::CreateCompiledModuleObject(isolate, compiled_module);
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index bf0ee9f1c1..04bf368974 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -28,17 +28,25 @@ RUNTIME_FUNCTION(Runtime_ArrayBufferSliceImpl) {
CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, target, 1);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(first, 2);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(new_length, 3);
- RUNTIME_ASSERT(!source.is_identical_to(target));
+
+ if (source->was_neutered() || target->was_neutered()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kDetachedOperation,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "ArrayBuffer.prototype.slice")));
+ }
+
+ CHECK(!source.is_identical_to(target));
size_t start = 0, target_length = 0;
- RUNTIME_ASSERT(TryNumberToSize(isolate, *first, &start));
- RUNTIME_ASSERT(TryNumberToSize(isolate, *new_length, &target_length));
- RUNTIME_ASSERT(NumberToSize(isolate, target->byte_length()) >= target_length);
+ CHECK(TryNumberToSize(*first, &start));
+ CHECK(TryNumberToSize(*new_length, &target_length));
+ CHECK(NumberToSize(target->byte_length()) >= target_length);
if (target_length == 0) return isolate->heap()->undefined_value();
- size_t source_byte_length = NumberToSize(isolate, source->byte_length());
- RUNTIME_ASSERT(start <= source_byte_length);
- RUNTIME_ASSERT(source_byte_length - start >= target_length);
+ size_t source_byte_length = NumberToSize(source->byte_length());
+ CHECK(start <= source_byte_length);
+ CHECK(source_byte_length - start >= target_length);
uint8_t* source_data = reinterpret_cast<uint8_t*>(source->backing_store());
uint8_t* target_data = reinterpret_cast<uint8_t*>(target->backing_store());
CopyBytes(target_data, source_data + start, target_length);
@@ -55,10 +63,10 @@ RUNTIME_FUNCTION(Runtime_ArrayBufferNeuter) {
return isolate->heap()->undefined_value();
}
// Shared array buffers should never be neutered.
- RUNTIME_ASSERT(!array_buffer->is_shared());
+ CHECK(!array_buffer->is_shared());
DCHECK(!array_buffer->is_external());
void* backing_store = array_buffer->backing_store();
- size_t byte_length = NumberToSize(isolate, array_buffer->byte_length());
+ size_t byte_length = NumberToSize(array_buffer->byte_length());
array_buffer->set_is_external(true);
isolate->heap()->UnregisterArrayBuffer(*array_buffer);
array_buffer->Neuter();
@@ -97,32 +105,31 @@ RUNTIME_FUNCTION(Runtime_TypedArrayInitialize) {
CONVERT_NUMBER_ARG_HANDLE_CHECKED(byte_length_object, 4);
CONVERT_BOOLEAN_ARG_CHECKED(initialize, 5);
- RUNTIME_ASSERT(arrayId >= Runtime::ARRAY_ID_FIRST &&
- arrayId <= Runtime::ARRAY_ID_LAST);
+ CHECK(arrayId >= Runtime::ARRAY_ID_FIRST &&
+ arrayId <= Runtime::ARRAY_ID_LAST);
ExternalArrayType array_type = kExternalInt8Array; // Bogus initialization.
size_t element_size = 1; // Bogus initialization.
ElementsKind fixed_elements_kind = INT8_ELEMENTS; // Bogus initialization.
Runtime::ArrayIdToTypeAndSize(arrayId, &array_type, &fixed_elements_kind,
&element_size);
- RUNTIME_ASSERT(holder->map()->elements_kind() == fixed_elements_kind);
+ CHECK(holder->map()->elements_kind() == fixed_elements_kind);
size_t byte_offset = 0;
size_t byte_length = 0;
- RUNTIME_ASSERT(TryNumberToSize(isolate, *byte_offset_object, &byte_offset));
- RUNTIME_ASSERT(TryNumberToSize(isolate, *byte_length_object, &byte_length));
+ CHECK(TryNumberToSize(*byte_offset_object, &byte_offset));
+ CHECK(TryNumberToSize(*byte_length_object, &byte_length));
if (maybe_buffer->IsJSArrayBuffer()) {
Handle<JSArrayBuffer> buffer = Handle<JSArrayBuffer>::cast(maybe_buffer);
- size_t array_buffer_byte_length =
- NumberToSize(isolate, buffer->byte_length());
- RUNTIME_ASSERT(byte_offset <= array_buffer_byte_length);
- RUNTIME_ASSERT(array_buffer_byte_length - byte_offset >= byte_length);
+ size_t array_buffer_byte_length = NumberToSize(buffer->byte_length());
+ CHECK(byte_offset <= array_buffer_byte_length);
+ CHECK(array_buffer_byte_length - byte_offset >= byte_length);
} else {
- RUNTIME_ASSERT(maybe_buffer->IsNull());
+ CHECK(maybe_buffer->IsNull(isolate));
}
- RUNTIME_ASSERT(byte_length % element_size == 0);
+ CHECK(byte_length % element_size == 0);
size_t length = byte_length / element_size;
if (length > static_cast<unsigned>(Smi::kMaxValue)) {
@@ -142,7 +149,7 @@ RUNTIME_FUNCTION(Runtime_TypedArrayInitialize) {
holder->set_byte_offset(*byte_offset_object);
holder->set_byte_length(*byte_length_object);
- if (!maybe_buffer->IsNull()) {
+ if (!maybe_buffer->IsNull(isolate)) {
Handle<JSArrayBuffer> buffer = Handle<JSArrayBuffer>::cast(maybe_buffer);
holder->set_buffer(*buffer);
@@ -178,8 +185,8 @@ RUNTIME_FUNCTION(Runtime_TypedArrayInitializeFromArrayLike) {
CONVERT_ARG_HANDLE_CHECKED(Object, source, 2);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(length_obj, 3);
- RUNTIME_ASSERT(arrayId >= Runtime::ARRAY_ID_FIRST &&
- arrayId <= Runtime::ARRAY_ID_LAST);
+ CHECK(arrayId >= Runtime::ARRAY_ID_FIRST &&
+ arrayId <= Runtime::ARRAY_ID_LAST);
ExternalArrayType array_type = kExternalInt8Array; // Bogus initialization.
size_t element_size = 1; // Bogus initialization.
@@ -187,7 +194,7 @@ RUNTIME_FUNCTION(Runtime_TypedArrayInitializeFromArrayLike) {
Runtime::ArrayIdToTypeAndSize(arrayId, &array_type, &fixed_elements_kind,
&element_size);
- RUNTIME_ASSERT(holder->map()->elements_kind() == fixed_elements_kind);
+ CHECK(holder->map()->elements_kind() == fixed_elements_kind);
Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
size_t length = 0;
@@ -196,7 +203,7 @@ RUNTIME_FUNCTION(Runtime_TypedArrayInitializeFromArrayLike) {
length_obj = handle(JSTypedArray::cast(*source)->length(), isolate);
length = JSTypedArray::cast(*source)->length_value();
} else {
- RUNTIME_ASSERT(TryNumberToSize(isolate, *length_obj, &length));
+ CHECK(TryNumberToSize(*length_obj, &length));
}
if ((length > static_cast<unsigned>(Smi::kMaxValue)) ||
@@ -253,8 +260,7 @@ RUNTIME_FUNCTION(Runtime_TypedArrayInitializeFromArrayLike) {
if (typed_array->type() == holder->type()) {
uint8_t* backing_store =
static_cast<uint8_t*>(typed_array->GetBuffer()->backing_store());
- size_t source_byte_offset =
- NumberToSize(isolate, typed_array->byte_offset());
+ size_t source_byte_offset = NumberToSize(typed_array->byte_offset());
memcpy(buffer->backing_store(), backing_store + source_byte_offset,
byte_length);
return isolate->heap()->true_value();
@@ -276,7 +282,6 @@ RUNTIME_FUNCTION(Runtime_TypedArrayInitializeFromArrayLike) {
BUFFER_VIEW_GETTER(ArrayBufferView, ByteLength, byte_length)
BUFFER_VIEW_GETTER(ArrayBufferView, ByteOffset, byte_offset)
BUFFER_VIEW_GETTER(TypedArray, Length, length)
-BUFFER_VIEW_GETTER(DataView, Buffer, buffer)
#undef BUFFER_VIEW_GETTER
@@ -321,19 +326,19 @@ RUNTIME_FUNCTION(Runtime_TypedArraySetFastCases) {
Handle<JSTypedArray> target(JSTypedArray::cast(*target_obj));
Handle<JSTypedArray> source(JSTypedArray::cast(*source_obj));
size_t offset = 0;
- RUNTIME_ASSERT(TryNumberToSize(isolate, *offset_obj, &offset));
+ CHECK(TryNumberToSize(*offset_obj, &offset));
size_t target_length = target->length_value();
size_t source_length = source->length_value();
- size_t target_byte_length = NumberToSize(isolate, target->byte_length());
- size_t source_byte_length = NumberToSize(isolate, source->byte_length());
+ size_t target_byte_length = NumberToSize(target->byte_length());
+ size_t source_byte_length = NumberToSize(source->byte_length());
if (offset > target_length || offset + source_length > target_length ||
offset + source_length < offset) { // overflow
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewRangeError(MessageTemplate::kTypedArraySetSourceTooLarge));
}
- size_t target_offset = NumberToSize(isolate, target->byte_offset());
- size_t source_offset = NumberToSize(isolate, source->byte_offset());
+ size_t target_offset = NumberToSize(target->byte_offset());
+ size_t source_offset = NumberToSize(source->byte_offset());
uint8_t* target_base =
static_cast<uint8_t*>(target->GetBuffer()->backing_store()) +
target_offset;
@@ -397,7 +402,8 @@ RUNTIME_FUNCTION(Runtime_IsSharedIntegerTypedArray) {
Handle<JSTypedArray> obj(JSTypedArray::cast(args[0]));
return isolate->heap()->ToBoolean(obj->GetBuffer()->is_shared() &&
obj->type() != kExternalFloat32Array &&
- obj->type() != kExternalFloat64Array);
+ obj->type() != kExternalFloat64Array &&
+ obj->type() != kExternalUint8ClampedArray);
}
@@ -446,15 +452,13 @@ inline static bool DataViewGetValue(Isolate* isolate,
Handle<Object> byte_offset_obj,
bool is_little_endian, T* result) {
size_t byte_offset = 0;
- if (!TryNumberToSize(isolate, *byte_offset_obj, &byte_offset)) {
+ if (!TryNumberToSize(*byte_offset_obj, &byte_offset)) {
return false;
}
Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()));
- size_t data_view_byte_offset =
- NumberToSize(isolate, data_view->byte_offset());
- size_t data_view_byte_length =
- NumberToSize(isolate, data_view->byte_length());
+ size_t data_view_byte_offset = NumberToSize(data_view->byte_offset());
+ size_t data_view_byte_length = NumberToSize(data_view->byte_length());
if (byte_offset + sizeof(T) > data_view_byte_length ||
byte_offset + sizeof(T) < byte_offset) { // overflow
return false;
@@ -467,8 +471,7 @@ inline static bool DataViewGetValue(Isolate* isolate,
Value value;
size_t buffer_offset = data_view_byte_offset + byte_offset;
- DCHECK(NumberToSize(isolate, buffer->byte_length()) >=
- buffer_offset + sizeof(T));
+ DCHECK(NumberToSize(buffer->byte_length()) >= buffer_offset + sizeof(T));
uint8_t* source =
static_cast<uint8_t*>(buffer->backing_store()) + buffer_offset;
if (NeedToFlipBytes(is_little_endian)) {
@@ -486,15 +489,13 @@ static bool DataViewSetValue(Isolate* isolate, Handle<JSDataView> data_view,
Handle<Object> byte_offset_obj,
bool is_little_endian, T data) {
size_t byte_offset = 0;
- if (!TryNumberToSize(isolate, *byte_offset_obj, &byte_offset)) {
+ if (!TryNumberToSize(*byte_offset_obj, &byte_offset)) {
return false;
}
Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()));
- size_t data_view_byte_offset =
- NumberToSize(isolate, data_view->byte_offset());
- size_t data_view_byte_length =
- NumberToSize(isolate, data_view->byte_length());
+ size_t data_view_byte_offset = NumberToSize(data_view->byte_offset());
+ size_t data_view_byte_length = NumberToSize(data_view->byte_length());
if (byte_offset + sizeof(T) > data_view_byte_length ||
byte_offset + sizeof(T) < byte_offset) { // overflow
return false;
@@ -508,8 +509,7 @@ static bool DataViewSetValue(Isolate* isolate, Handle<JSDataView> data_view,
Value value;
value.data = data;
size_t buffer_offset = data_view_byte_offset + byte_offset;
- DCHECK(NumberToSize(isolate, buffer->byte_length()) >=
- buffer_offset + sizeof(T));
+ DCHECK(NumberToSize(buffer->byte_length()) >= buffer_offset + sizeof(T));
uint8_t* target =
static_cast<uint8_t*>(buffer->backing_store()) + buffer_offset;
if (NeedToFlipBytes(is_little_endian)) {
diff --git a/deps/v8/src/runtime/runtime-uri.cc b/deps/v8/src/runtime/runtime-uri.cc
deleted file mode 100644
index e64e9dcea7..0000000000
--- a/deps/v8/src/runtime/runtime-uri.cc
+++ /dev/null
@@ -1,293 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/runtime/runtime-utils.h"
-
-#include "src/arguments.h"
-#include "src/conversions.h"
-#include "src/isolate-inl.h"
-#include "src/objects-inl.h"
-#include "src/string-search.h"
-#include "src/utils.h"
-
-namespace v8 {
-namespace internal {
-
-class URIUnescape : public AllStatic {
- public:
- template <typename Char>
- MUST_USE_RESULT static MaybeHandle<String> Unescape(Isolate* isolate,
- Handle<String> source);
-
- private:
- static const signed char kHexValue['g'];
-
- template <typename Char>
- MUST_USE_RESULT static MaybeHandle<String> UnescapeSlow(Isolate* isolate,
- Handle<String> string,
- int start_index);
-
- static INLINE(int TwoDigitHex(uint16_t character1, uint16_t character2));
-
- template <typename Char>
- static INLINE(int UnescapeChar(Vector<const Char> vector, int i, int length,
- int* step));
-};
-
-
-const signed char URIUnescape::kHexValue[] = {
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -0, 1, 2, 3, 4, 5,
- 6, 7, 8, 9, -1, -1, -1, -1, -1, -1, -1, 10, 11, 12, 13, 14, 15, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 10, 11, 12, 13, 14, 15};
-
-
-template <typename Char>
-MaybeHandle<String> URIUnescape::Unescape(Isolate* isolate,
- Handle<String> source) {
- int index;
- {
- DisallowHeapAllocation no_allocation;
- StringSearch<uint8_t, Char> search(isolate, STATIC_CHAR_VECTOR("%"));
- index = search.Search(source->GetCharVector<Char>(), 0);
- if (index < 0) return source;
- }
- return UnescapeSlow<Char>(isolate, source, index);
-}
-
-
-template <typename Char>
-MaybeHandle<String> URIUnescape::UnescapeSlow(Isolate* isolate,
- Handle<String> string,
- int start_index) {
- bool one_byte = true;
- int length = string->length();
-
- int unescaped_length = 0;
- {
- DisallowHeapAllocation no_allocation;
- Vector<const Char> vector = string->GetCharVector<Char>();
- for (int i = start_index; i < length; unescaped_length++) {
- int step;
- if (UnescapeChar(vector, i, length, &step) >
- String::kMaxOneByteCharCode) {
- one_byte = false;
- }
- i += step;
- }
- }
-
- DCHECK(start_index < length);
- Handle<String> first_part =
- isolate->factory()->NewProperSubString(string, 0, start_index);
-
- int dest_position = 0;
- Handle<String> second_part;
- DCHECK(unescaped_length <= String::kMaxLength);
- if (one_byte) {
- Handle<SeqOneByteString> dest = isolate->factory()
- ->NewRawOneByteString(unescaped_length)
- .ToHandleChecked();
- DisallowHeapAllocation no_allocation;
- Vector<const Char> vector = string->GetCharVector<Char>();
- for (int i = start_index; i < length; dest_position++) {
- int step;
- dest->SeqOneByteStringSet(dest_position,
- UnescapeChar(vector, i, length, &step));
- i += step;
- }
- second_part = dest;
- } else {
- Handle<SeqTwoByteString> dest = isolate->factory()
- ->NewRawTwoByteString(unescaped_length)
- .ToHandleChecked();
- DisallowHeapAllocation no_allocation;
- Vector<const Char> vector = string->GetCharVector<Char>();
- for (int i = start_index; i < length; dest_position++) {
- int step;
- dest->SeqTwoByteStringSet(dest_position,
- UnescapeChar(vector, i, length, &step));
- i += step;
- }
- second_part = dest;
- }
- return isolate->factory()->NewConsString(first_part, second_part);
-}
-
-
-int URIUnescape::TwoDigitHex(uint16_t character1, uint16_t character2) {
- if (character1 > 'f') return -1;
- int hi = kHexValue[character1];
- if (hi == -1) return -1;
- if (character2 > 'f') return -1;
- int lo = kHexValue[character2];
- if (lo == -1) return -1;
- return (hi << 4) + lo;
-}
-
-
-template <typename Char>
-int URIUnescape::UnescapeChar(Vector<const Char> vector, int i, int length,
- int* step) {
- uint16_t character = vector[i];
- int32_t hi = 0;
- int32_t lo = 0;
- if (character == '%' && i <= length - 6 && vector[i + 1] == 'u' &&
- (hi = TwoDigitHex(vector[i + 2], vector[i + 3])) != -1 &&
- (lo = TwoDigitHex(vector[i + 4], vector[i + 5])) != -1) {
- *step = 6;
- return (hi << 8) + lo;
- } else if (character == '%' && i <= length - 3 &&
- (lo = TwoDigitHex(vector[i + 1], vector[i + 2])) != -1) {
- *step = 3;
- return lo;
- } else {
- *step = 1;
- return character;
- }
-}
-
-
-class URIEscape : public AllStatic {
- public:
- template <typename Char>
- MUST_USE_RESULT static MaybeHandle<String> Escape(Isolate* isolate,
- Handle<String> string);
-
- private:
- static const char kHexChars[17];
- static const char kNotEscaped[256];
-
- static bool IsNotEscaped(uint16_t c) { return kNotEscaped[c] != 0; }
-};
-
-
-const char URIEscape::kHexChars[] = "0123456789ABCDEF";
-
-
-// kNotEscaped is generated by the following:
-//
-// #!/bin/perl
-// for (my $i = 0; $i < 256; $i++) {
-// print "\n" if $i % 16 == 0;
-// my $c = chr($i);
-// my $escaped = 1;
-// $escaped = 0 if $c =~ m#[A-Za-z0-9@*_+./-]#;
-// print $escaped ? "0, " : "1, ";
-// }
-
-const char URIEscape::kNotEscaped[] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,
- 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-
-
-template <typename Char>
-MaybeHandle<String> URIEscape::Escape(Isolate* isolate, Handle<String> string) {
- DCHECK(string->IsFlat());
- int escaped_length = 0;
- int length = string->length();
-
- {
- DisallowHeapAllocation no_allocation;
- Vector<const Char> vector = string->GetCharVector<Char>();
- for (int i = 0; i < length; i++) {
- uint16_t c = vector[i];
- if (c >= 256) {
- escaped_length += 6;
- } else if (IsNotEscaped(c)) {
- escaped_length++;
- } else {
- escaped_length += 3;
- }
-
- // We don't allow strings that are longer than a maximal length.
- DCHECK(String::kMaxLength < 0x7fffffff - 6); // Cannot overflow.
- if (escaped_length > String::kMaxLength) break; // Provoke exception.
- }
- }
-
- // No length change implies no change. Return original string if no change.
- if (escaped_length == length) return string;
-
- Handle<SeqOneByteString> dest;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, dest, isolate->factory()->NewRawOneByteString(escaped_length),
- String);
- int dest_position = 0;
-
- {
- DisallowHeapAllocation no_allocation;
- Vector<const Char> vector = string->GetCharVector<Char>();
- for (int i = 0; i < length; i++) {
- uint16_t c = vector[i];
- if (c >= 256) {
- dest->SeqOneByteStringSet(dest_position, '%');
- dest->SeqOneByteStringSet(dest_position + 1, 'u');
- dest->SeqOneByteStringSet(dest_position + 2, kHexChars[c >> 12]);
- dest->SeqOneByteStringSet(dest_position + 3, kHexChars[(c >> 8) & 0xf]);
- dest->SeqOneByteStringSet(dest_position + 4, kHexChars[(c >> 4) & 0xf]);
- dest->SeqOneByteStringSet(dest_position + 5, kHexChars[c & 0xf]);
- dest_position += 6;
- } else if (IsNotEscaped(c)) {
- dest->SeqOneByteStringSet(dest_position, c);
- dest_position++;
- } else {
- dest->SeqOneByteStringSet(dest_position, '%');
- dest->SeqOneByteStringSet(dest_position + 1, kHexChars[c >> 4]);
- dest->SeqOneByteStringSet(dest_position + 2, kHexChars[c & 0xf]);
- dest_position += 3;
- }
- }
- }
-
- return dest;
-}
-
-
-RUNTIME_FUNCTION(Runtime_URIEscape) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
- Handle<String> source;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, source,
- Object::ToString(isolate, input));
- source = String::Flatten(source);
- Handle<String> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, source->IsOneByteRepresentationUnderneath()
- ? URIEscape::Escape<uint8_t>(isolate, source)
- : URIEscape::Escape<uc16>(isolate, source));
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_URIUnescape) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
- Handle<String> source;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, source,
- Object::ToString(isolate, input));
- source = String::Flatten(source);
- Handle<String> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, source->IsOneByteRepresentationUnderneath()
- ? URIUnescape::Unescape<uint8_t>(isolate, source)
- : URIUnescape::Unescape<uc16>(isolate, source));
- return *result;
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-utils.h b/deps/v8/src/runtime/runtime-utils.h
index 17c78d5a0b..0d84354f44 100644
--- a/deps/v8/src/runtime/runtime-utils.h
+++ b/deps/v8/src/runtime/runtime-utils.h
@@ -11,134 +11,91 @@
namespace v8 {
namespace internal {
-#ifdef DEBUG
-
-#define RUNTIME_ASSERT(value) \
- do { \
- if (!(value)) { \
- V8_RuntimeError(__FILE__, __LINE__, #value); \
- return isolate->ThrowIllegalOperation(); \
- } \
- } while (0)
-
-#define RUNTIME_ASSERT_HANDLIFIED(value, T) \
- do { \
- if (!(value)) { \
- V8_RuntimeError(__FILE__, __LINE__, #value); \
- isolate->ThrowIllegalOperation(); \
- return MaybeHandle<T>(); \
- } \
- } while (0)
-
-#else
-
-#define RUNTIME_ASSERT(value) \
- do { \
- if (!(value)) { \
- return isolate->ThrowIllegalOperation(); \
- } \
- } while (0)
-
-#define RUNTIME_ASSERT_HANDLIFIED(value, T) \
- do { \
- if (!(value)) { \
- isolate->ThrowIllegalOperation(); \
- return MaybeHandle<T>(); \
- } \
- } while (0)
-
-#endif
-
// Cast the given object to a value of the specified type and store
// it in a variable with the given name. If the object is not of the
-// expected type call IllegalOperation and return.
+// expected type we crash safely.
#define CONVERT_ARG_CHECKED(Type, name, index) \
- RUNTIME_ASSERT(args[index]->Is##Type()); \
+ CHECK(args[index]->Is##Type()); \
Type* name = Type::cast(args[index]);
#define CONVERT_ARG_HANDLE_CHECKED(Type, name, index) \
- RUNTIME_ASSERT(args[index]->Is##Type()); \
+ CHECK(args[index]->Is##Type()); \
Handle<Type> name = args.at<Type>(index);
#define CONVERT_NUMBER_ARG_HANDLE_CHECKED(name, index) \
- RUNTIME_ASSERT(args[index]->IsNumber()); \
+ CHECK(args[index]->IsNumber()); \
Handle<Object> name = args.at<Object>(index);
// Cast the given object to a boolean and store it in a variable with
-// the given name. If the object is not a boolean call IllegalOperation
-// and return.
+// the given name. If the object is not a boolean we crash safely.
#define CONVERT_BOOLEAN_ARG_CHECKED(name, index) \
- RUNTIME_ASSERT(args[index]->IsBoolean()); \
- bool name = args[index]->IsTrue();
+ CHECK(args[index]->IsBoolean()); \
+ bool name = args[index]->IsTrue(isolate);
// Cast the given argument to a Smi and store its value in an int variable
-// with the given name. If the argument is not a Smi call IllegalOperation
-// and return.
+// with the given name. If the argument is not a Smi we crash safely.
#define CONVERT_SMI_ARG_CHECKED(name, index) \
- RUNTIME_ASSERT(args[index]->IsSmi()); \
+ CHECK(args[index]->IsSmi()); \
int name = args.smi_at(index);
// Cast the given argument to a double and store it in a variable with
// the given name. If the argument is not a number (as opposed to
-// the number not-a-number) call IllegalOperation and return.
+// the number not-a-number) we crash safely.
#define CONVERT_DOUBLE_ARG_CHECKED(name, index) \
- RUNTIME_ASSERT(args[index]->IsNumber()); \
+ CHECK(args[index]->IsNumber()); \
double name = args.number_at(index);
-
// Cast the given argument to a size_t and store its value in a variable with
-// the given name. If the argument is not a size_t call IllegalOperation and
-// return.
+// the given name. If the argument is not a size_t we crash safely.
#define CONVERT_SIZE_ARG_CHECKED(name, index) \
- RUNTIME_ASSERT(args[index]->IsNumber()); \
+ CHECK(args[index]->IsNumber()); \
Handle<Object> name##_object = args.at<Object>(index); \
size_t name = 0; \
- RUNTIME_ASSERT(TryNumberToSize(isolate, *name##_object, &name));
-
+ CHECK(TryNumberToSize(*name##_object, &name));
// Call the specified converter on the object *comand store the result in
// a variable of the specified type with the given name. If the
-// object is not a Number call IllegalOperation and return.
+// object is not a Number we crash safely.
#define CONVERT_NUMBER_CHECKED(type, name, Type, obj) \
- RUNTIME_ASSERT(obj->IsNumber()); \
+ CHECK(obj->IsNumber()); \
type name = NumberTo##Type(obj);
-
// Cast the given argument to PropertyDetails and store its value in a
-// variable with the given name. If the argument is not a Smi call
-// IllegalOperation and return.
+// variable with the given name. If the argument is not a Smi we crash safely.
#define CONVERT_PROPERTY_DETAILS_CHECKED(name, index) \
- RUNTIME_ASSERT(args[index]->IsSmi()); \
+ CHECK(args[index]->IsSmi()); \
PropertyDetails name = PropertyDetails(Smi::cast(args[index]));
-
// Assert that the given argument has a valid value for a LanguageMode
// and store it in a LanguageMode variable with the given name.
-#define CONVERT_LANGUAGE_MODE_ARG_CHECKED(name, index) \
- RUNTIME_ASSERT(args[index]->IsSmi()); \
- RUNTIME_ASSERT(is_valid_language_mode(args.smi_at(index))); \
+#define CONVERT_LANGUAGE_MODE_ARG_CHECKED(name, index) \
+ CHECK(args[index]->IsSmi()); \
+ CHECK(is_valid_language_mode(args.smi_at(index))); \
LanguageMode name = static_cast<LanguageMode>(args.smi_at(index));
-
// Assert that the given argument is a number within the Int32 range
-// and convert it to int32_t. If the argument is not an Int32 call
-// IllegalOperation and return.
+// and convert it to int32_t. If the argument is not an Int32 we crash safely.
#define CONVERT_INT32_ARG_CHECKED(name, index) \
- RUNTIME_ASSERT(args[index]->IsNumber()); \
+ CHECK(args[index]->IsNumber()); \
int32_t name = 0; \
- RUNTIME_ASSERT(args[index]->ToInt32(&name));
+ CHECK(args[index]->ToInt32(&name));
+// Assert that the given argument is a number within the Uint32 range
+// and convert it to uint32_t. If the argument is not an Uint32 call
+// IllegalOperation and return.
+#define CONVERT_UINT32_ARG_CHECKED(name, index) \
+ CHECK(args[index]->IsNumber()); \
+ uint32_t name = 0; \
+ CHECK(args[index]->ToUint32(&name));
// Cast the given argument to PropertyAttributes and store its value in a
-// variable with the given name. If the argument is not a Smi call or the
-// enum value is out of range, call IllegalOperation and return.
-#define CONVERT_PROPERTY_ATTRIBUTES_CHECKED(name, index) \
- RUNTIME_ASSERT(args[index]->IsSmi()); \
- RUNTIME_ASSERT( \
- (args.smi_at(index) & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0); \
+// variable with the given name. If the argument is not a Smi or the
+// enum value is out of range, we crash safely.
+#define CONVERT_PROPERTY_ATTRIBUTES_CHECKED(name, index) \
+ CHECK(args[index]->IsSmi()); \
+ CHECK((args.smi_at(index) & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0); \
PropertyAttributes name = static_cast<PropertyAttributes>(args.smi_at(index));
-
// A mechanism to return a pair of Object pointers in registers (if possible).
// How this is achieved is calling convention-dependent.
// All currently supported x86 compiles uses calling conventions that are cdecl
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
new file mode 100644
index 0000000000..37608e61cd
--- /dev/null
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -0,0 +1,120 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/runtime/runtime-utils.h"
+
+#include "src/arguments.h"
+#include "src/assembler.h"
+#include "src/compiler/wasm-compiler.h"
+#include "src/conversions.h"
+#include "src/debug/debug.h"
+#include "src/factory.h"
+#include "src/frames-inl.h"
+#include "src/objects-inl.h"
+#include "src/v8memory.h"
+#include "src/wasm/wasm-module.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+const int kWasmMemArrayBuffer = 2;
+}
+
+RUNTIME_FUNCTION(Runtime_WasmGrowMemory) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ uint32_t delta_pages = 0;
+ CHECK(args[0]->ToUint32(&delta_pages));
+ Handle<JSObject> module_object;
+
+ {
+ // Get the module JSObject
+ DisallowHeapAllocation no_allocation;
+ const Address entry = Isolate::c_entry_fp(isolate->thread_local_top());
+ Address pc =
+ Memory::Address_at(entry + StandardFrameConstants::kCallerPCOffset);
+ Code* code =
+ isolate->inner_pointer_to_code_cache()->GetCacheEntry(pc)->code;
+ FixedArray* deopt_data = code->deoptimization_data();
+ DCHECK(deopt_data->length() == 2);
+ module_object = Handle<JSObject>::cast(handle(deopt_data->get(0), isolate));
+ CHECK(!module_object->IsNull(isolate));
+ }
+
+ Address old_mem_start, new_mem_start;
+ uint32_t old_size, new_size;
+
+ // Get mem buffer associated with module object
+ Handle<Object> obj(module_object->GetInternalField(kWasmMemArrayBuffer),
+ isolate);
+
+ if (obj->IsUndefined(isolate)) {
+ // If module object does not have linear memory associated with it,
+ // Allocate new array buffer of given size.
+ old_mem_start = nullptr;
+ old_size = 0;
+ // TODO(gdeepti): Fix bounds check to take into account size of memtype.
+ new_size = delta_pages * wasm::WasmModule::kPageSize;
+ if (delta_pages > wasm::WasmModule::kMaxMemPages) {
+ return *isolate->factory()->NewNumberFromInt(-1);
+ }
+ new_mem_start =
+ static_cast<Address>(isolate->array_buffer_allocator()->Allocate(
+ static_cast<uint32_t>(new_size)));
+ if (new_mem_start == NULL) {
+ return *isolate->factory()->NewNumberFromInt(-1);
+ }
+#if DEBUG
+ // Double check the API allocator actually zero-initialized the memory.
+ for (size_t i = old_size; i < new_size; i++) {
+ DCHECK_EQ(0, new_mem_start[i]);
+ }
+#endif
+ } else {
+ Handle<JSArrayBuffer> old_buffer = Handle<JSArrayBuffer>::cast(obj);
+ old_mem_start = static_cast<Address>(old_buffer->backing_store());
+ old_size = old_buffer->byte_length()->Number();
+ // If the old memory was zero-sized, we should have been in the
+ // "undefined" case above.
+ DCHECK_NOT_NULL(old_mem_start);
+ DCHECK_NE(0, old_size);
+
+ new_size = old_size + delta_pages * wasm::WasmModule::kPageSize;
+ if (new_size >
+ wasm::WasmModule::kMaxMemPages * wasm::WasmModule::kPageSize) {
+ return *isolate->factory()->NewNumberFromInt(-1);
+ }
+ new_mem_start = static_cast<Address>(realloc(old_mem_start, new_size));
+ if (new_mem_start == NULL) {
+ return *isolate->factory()->NewNumberFromInt(-1);
+ }
+ old_buffer->set_is_external(true);
+ isolate->heap()->UnregisterArrayBuffer(*old_buffer);
+ // Zero initializing uninitialized memory from realloc
+ memset(new_mem_start + old_size, 0, new_size - old_size);
+ }
+
+ Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
+ JSArrayBuffer::Setup(buffer, isolate, false, new_mem_start, new_size);
+ buffer->set_is_neuterable(false);
+
+ // Set new buffer to be wasm memory
+ module_object->SetInternalField(kWasmMemArrayBuffer, *buffer);
+
+ CHECK(wasm::UpdateWasmModuleMemory(module_object, old_mem_start,
+ new_mem_start, old_size, new_size));
+
+ return *isolate->factory()->NewNumberFromInt(old_size /
+ wasm::WasmModule::kPageSize);
+}
+
+RUNTIME_FUNCTION(Runtime_WasmThrowTypeError) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(0, args.length());
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kWasmTrapTypeError));
+}
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 2dadf25fa5..38eb51d5a3 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -5,6 +5,8 @@
#ifndef V8_RUNTIME_RUNTIME_H_
#define V8_RUNTIME_RUNTIME_H_
+#include <memory>
+
#include "src/allocation.h"
#include "src/base/platform/time.h"
#include "src/objects.h"
@@ -30,6 +32,8 @@ namespace internal {
// Entries have the form F(name, number of arguments, number of values):
+// A variable number of arguments is specified by a -1, additional restrictions
+// are specified by inline comments
#define FOR_EACH_INTRINSIC_ARRAY(F) \
F(FinishArrayPrototypeSetup, 1, 1) \
@@ -39,38 +43,37 @@ namespace internal {
F(MoveArrayContents, 2, 1) \
F(EstimateNumberOfElements, 1, 1) \
F(GetArrayKeys, 2, 1) \
- F(ArrayConstructor, -1, 1) \
F(NewArray, -1 /* >= 3 */, 1) \
- F(InternalArrayConstructor, -1, 1) \
F(ArrayPush, -1, 1) \
+ F(FunctionBind, -1, 1) \
F(NormalizeElements, 1, 1) \
F(GrowArrayElements, 2, 1) \
F(HasComplexElements, 1, 1) \
F(IsArray, 1, 1) \
+ F(ArrayIsArray, 1, 1) \
F(HasCachedArrayIndex, 1, 1) \
F(GetCachedArrayIndex, 1, 1) \
F(FixedArrayGet, 2, 1) \
F(FixedArraySet, 3, 1) \
- F(ArraySpeciesConstructor, 1, 1)
-
-#define FOR_EACH_INTRINSIC_ATOMICS(F) \
- F(AtomicsCompareExchange, 4, 1) \
- F(AtomicsLoad, 2, 1) \
- F(AtomicsStore, 3, 1) \
- F(AtomicsAdd, 3, 1) \
- F(AtomicsSub, 3, 1) \
- F(AtomicsAnd, 3, 1) \
- F(AtomicsOr, 3, 1) \
- F(AtomicsXor, 3, 1) \
- F(AtomicsExchange, 3, 1) \
- F(AtomicsIsLockFree, 1, 1)
-
-
-#define FOR_EACH_INTRINSIC_FUTEX(F) \
- F(AtomicsFutexWait, 4, 1) \
- F(AtomicsFutexWake, 3, 1) \
- F(AtomicsFutexWakeOrRequeue, 5, 1) \
- F(AtomicsFutexNumWaitersForTesting, 2, 1)
+ F(ArraySpeciesConstructor, 1, 1) \
+ F(ArrayIncludes_Slow, 3, 1) \
+ F(ArrayIndexOf, 3, 1)
+
+#define FOR_EACH_INTRINSIC_ATOMICS(F) \
+ F(ThrowNotIntegerSharedTypedArrayError, 1, 1) \
+ F(ThrowNotInt32SharedTypedArrayError, 1, 1) \
+ F(ThrowInvalidAtomicAccessIndexError, 0, 1) \
+ F(AtomicsCompareExchange, 4, 1) \
+ F(AtomicsAdd, 3, 1) \
+ F(AtomicsSub, 3, 1) \
+ F(AtomicsAnd, 3, 1) \
+ F(AtomicsOr, 3, 1) \
+ F(AtomicsXor, 3, 1) \
+ F(AtomicsExchange, 3, 1) \
+ F(AtomicsIsLockFree, 1, 1) \
+ F(AtomicsWait, 4, 1) \
+ F(AtomicsWake, 3, 1) \
+ F(AtomicsNumWaitersForTesting, 2, 1)
#define FOR_EACH_INTRINSIC_CLASSES(F) \
F(ThrowNonMethodError, 0, 1) \
@@ -81,7 +84,6 @@ namespace internal {
F(ThrowIfStaticPrototype, 1, 1) \
F(HomeObjectSymbol, 0, 1) \
F(DefineClass, 4, 1) \
- F(FinalizeClassDefinition, 2, 1) \
F(LoadFromSuper, 3, 1) \
F(LoadKeyedFromSuper, 3, 1) \
F(StoreToSuper_Strict, 4, 1) \
@@ -117,88 +119,95 @@ namespace internal {
F(WeakCollectionHas, 3, 1) \
F(WeakCollectionDelete, 3, 1) \
F(WeakCollectionSet, 4, 1) \
- F(GetWeakSetValues, 2, 1) \
- F(ObservationWeakMapCreate, 0, 1)
-
+ F(GetWeakSetValues, 2, 1)
#define FOR_EACH_INTRINSIC_COMPILER(F) \
F(CompileLazy, 1, 1) \
+ F(CompileBaseline, 1, 1) \
F(CompileOptimized_Concurrent, 1, 1) \
F(CompileOptimized_NotConcurrent, 1, 1) \
F(NotifyStubFailure, 0, 1) \
F(NotifyDeoptimized, 1, 1) \
F(CompileForOnStackReplacement, 1, 1) \
F(TryInstallOptimizedCode, 1, 1) \
- F(ResolvePossiblyDirectEval, 5, 1)
-
+ F(ResolvePossiblyDirectEval, 6, 1) \
+ F(InstantiateAsmJs, 4, 1)
#define FOR_EACH_INTRINSIC_DATE(F) \
F(IsDate, 1, 1) \
F(DateCurrentTime, 0, 1) \
F(ThrowNotDateError, 0, 1)
-#define FOR_EACH_INTRINSIC_DEBUG(F) \
- F(HandleDebuggerStatement, 0, 1) \
- F(DebugBreak, 1, 1) \
- F(DebugBreakOnBytecode, 1, 1) \
- F(SetDebugEventListener, 2, 1) \
- F(ScheduleBreak, 0, 1) \
- F(DebugGetInternalProperties, 1, 1) \
- F(DebugGetPropertyDetails, 2, 1) \
- F(DebugGetProperty, 2, 1) \
- F(DebugPropertyTypeFromDetails, 1, 1) \
- F(DebugPropertyAttributesFromDetails, 1, 1) \
- F(DebugPropertyIndexFromDetails, 1, 1) \
- F(DebugNamedInterceptorPropertyValue, 2, 1) \
- F(DebugIndexedInterceptorElementValue, 2, 1) \
- F(CheckExecutionState, 1, 1) \
- F(GetFrameCount, 1, 1) \
- F(GetFrameDetails, 2, 1) \
- F(GetScopeCount, 2, 1) \
- F(GetScopeDetails, 4, 1) \
- F(GetAllScopesDetails, 4, 1) \
- F(GetFunctionScopeCount, 1, 1) \
- F(GetFunctionScopeDetails, 2, 1) \
- F(SetScopeVariableValue, 6, 1) \
- F(DebugPrintScopes, 0, 1) \
- F(GetThreadCount, 1, 1) \
- F(GetThreadDetails, 2, 1) \
- F(SetBreakPointsActive, 1, 1) \
- F(GetBreakLocations, 2, 1) \
- F(SetFunctionBreakPoint, 3, 1) \
- F(SetScriptBreakPoint, 4, 1) \
- F(ClearBreakPoint, 1, 1) \
- F(ChangeBreakOnException, 2, 1) \
- F(IsBreakOnException, 1, 1) \
- F(PrepareStep, 2, 1) \
- F(ClearStepping, 0, 1) \
- F(DebugEvaluate, 6, 1) \
- F(DebugEvaluateGlobal, 4, 1) \
- F(DebugGetLoadedScripts, 0, 1) \
- F(DebugReferencedBy, 3, 1) \
- F(DebugConstructedBy, 2, 1) \
- F(DebugGetPrototype, 1, 1) \
- F(DebugSetScriptSource, 2, 1) \
- F(FunctionGetInferredName, 1, 1) \
- F(FunctionGetDebugName, 1, 1) \
- F(GetFunctionCodePositionFromSource, 2, 1) \
- F(ExecuteInDebugContext, 1, 1) \
- F(GetDebugContext, 0, 1) \
- F(CollectGarbage, 1, 1) \
- F(GetHeapUsage, 0, 1) \
- F(GetScript, 1, 1) \
- F(DebugPrepareStepInIfStepping, 1, 1) \
- F(DebugPushPromise, 2, 1) \
- F(DebugPopPromise, 0, 1) \
- F(DebugPromiseEvent, 1, 1) \
- F(DebugAsyncTaskEvent, 1, 1) \
- F(DebugIsActive, 0, 1) \
- F(DebugBreakInOptimizedCode, 0, 1)
+#define FOR_EACH_INTRINSIC_DEBUG(F) \
+ F(HandleDebuggerStatement, 0, 1) \
+ F(DebugBreak, 1, 1) \
+ F(DebugBreakOnBytecode, 1, 1) \
+ F(SetDebugEventListener, 2, 1) \
+ F(ScheduleBreak, 0, 1) \
+ F(DebugGetInternalProperties, 1, 1) \
+ F(DebugGetPropertyDetails, 2, 1) \
+ F(DebugGetProperty, 2, 1) \
+ F(DebugPropertyTypeFromDetails, 1, 1) \
+ F(DebugPropertyAttributesFromDetails, 1, 1) \
+ F(CheckExecutionState, 1, 1) \
+ F(GetFrameCount, 1, 1) \
+ F(GetFrameDetails, 2, 1) \
+ F(GetScopeCount, 2, 1) \
+ F(GetScopeDetails, 4, 1) \
+ F(GetAllScopesDetails, 4, 1) \
+ F(GetFunctionScopeCount, 1, 1) \
+ F(GetFunctionScopeDetails, 2, 1) \
+ F(GetGeneratorScopeCount, 1, 1) \
+ F(GetGeneratorScopeDetails, 2, 1) \
+ F(SetScopeVariableValue, 6, 1) \
+ F(DebugPrintScopes, 0, 1) \
+ F(SetBreakPointsActive, 1, 1) \
+ F(GetBreakLocations, 2, 1) \
+ F(SetFunctionBreakPoint, 3, 1) \
+ F(SetScriptBreakPoint, 4, 1) \
+ F(ClearBreakPoint, 1, 1) \
+ F(ChangeBreakOnException, 2, 1) \
+ F(IsBreakOnException, 1, 1) \
+ F(PrepareStep, 2, 1) \
+ F(ClearStepping, 0, 1) \
+ F(DebugEvaluate, 6, 1) \
+ F(DebugEvaluateGlobal, 4, 1) \
+ F(DebugGetLoadedScripts, 0, 1) \
+ F(DebugReferencedBy, 3, 1) \
+ F(DebugConstructedBy, 2, 1) \
+ F(DebugGetPrototype, 1, 1) \
+ F(DebugSetScriptSource, 2, 1) \
+ F(FunctionGetInferredName, 1, 1) \
+ F(FunctionGetDebugName, 1, 1) \
+ F(ExecuteInDebugContext, 1, 1) \
+ F(GetDebugContext, 0, 1) \
+ F(CollectGarbage, 1, 1) \
+ F(GetHeapUsage, 0, 1) \
+ F(GetScript, 1, 1) \
+ F(ScriptLineCount, 1, 1) \
+ F(ScriptLineStartPosition, 2, 1) \
+ F(ScriptLineEndPosition, 2, 1) \
+ F(ScriptLocationFromLine, 4, 1) \
+ F(ScriptPositionInfo, 3, 1) \
+ F(ScriptSourceLine, 2, 1) \
+ F(DebugPrepareStepInIfStepping, 1, 1) \
+ F(DebugPrepareStepInSuspendedGenerator, 0, 1) \
+ F(DebugRecordAsyncFunction, 1, 1) \
+ F(DebugPushPromise, 1, 1) \
+ F(DebugPopPromise, 0, 1) \
+ F(DebugAsyncTaskEvent, 1, 1) \
+ F(DebugIsActive, 0, 1) \
+ F(DebugBreakInOptimizedCode, 0, 1) \
+ F(GetWasmFunctionOffsetTable, 1, 1) \
+ F(DisassembleWasmFunction, 1, 1)
+
+#define FOR_EACH_INTRINSIC_ERROR(F) F(ErrorToString, 1, 1)
#define FOR_EACH_INTRINSIC_FORIN(F) \
F(ForInDone, 2, 1) \
F(ForInEnumerate, 1, 1) \
F(ForInFilter, 2, 1) \
+ F(ForInHasProperty, 2, 1) \
F(ForInNext, 4, 1) \
F(ForInStep, 1, 1)
@@ -216,7 +225,6 @@ namespace internal {
F(FunctionGetScript, 1, 1) \
F(FunctionGetSourceCode, 1, 1) \
F(FunctionGetScriptSourcePosition, 1, 1) \
- F(FunctionGetPositionForOffset, 2, 1) \
F(FunctionGetContextData, 1, 1) \
F(FunctionSetInstanceClassName, 2, 1) \
F(FunctionSetLength, 2, 1) \
@@ -232,19 +240,15 @@ namespace internal {
F(FunctionToString, 1, 1)
#define FOR_EACH_INTRINSIC_GENERATOR(F) \
- F(CreateJSGeneratorObject, 0, 1) \
+ F(CreateJSGeneratorObject, 2, 1) \
F(SuspendJSGeneratorObject, 1, 1) \
- F(ResumeJSGeneratorObject, 3, 1) \
F(GeneratorClose, 1, 1) \
F(GeneratorGetFunction, 1, 1) \
F(GeneratorGetReceiver, 1, 1) \
- F(GeneratorGetInput, 1, 1) \
+ F(GeneratorGetInputOrDebugPos, 1, 1) \
F(GeneratorGetContinuation, 1, 1) \
F(GeneratorGetSourcePosition, 1, 1) \
- F(GeneratorNext, 2, 1) \
- F(GeneratorReturn, 2, 1) \
- F(GeneratorThrow, 2, 1)
-
+ F(GeneratorGetResumeMode, 1, 1)
#ifdef V8_I18N_SUPPORT
#define FOR_EACH_INTRINSIC_I18N(F) \
@@ -270,62 +274,60 @@ namespace internal {
F(BreakIteratorFirst, 1, 1) \
F(BreakIteratorNext, 1, 1) \
F(BreakIteratorCurrent, 1, 1) \
- F(BreakIteratorBreakType, 1, 1)
+ F(BreakIteratorBreakType, 1, 1) \
+ F(StringToLowerCaseI18N, 1, 1) \
+ F(StringToUpperCaseI18N, 1, 1) \
+ F(StringLocaleConvertCase, 3, 1) \
+ F(DateCacheVersion, 0, 1)
#else
#define FOR_EACH_INTRINSIC_I18N(F)
#endif
#define FOR_EACH_INTRINSIC_INTERNAL(F) \
+ F(AllocateInNewSpace, 1, 1) \
+ F(AllocateInTargetSpace, 2, 1) \
+ F(AllocateSeqOneByteString, 1, 1) \
+ F(AllocateSeqTwoByteString, 1, 1) \
F(CheckIsBootstrapping, 0, 1) \
- F(ExportFromRuntime, 1, 1) \
+ F(CreateListFromArrayLike, 1, 1) \
+ F(EnqueueMicrotask, 1, 1) \
+ F(GetAndResetRuntimeCallStats, -1 /* <= 2 */, 1) \
F(ExportExperimentalFromRuntime, 1, 1) \
+ F(ExportFromRuntime, 1, 1) \
+ F(IncrementUseCounter, 1, 1) \
F(InstallToContext, 1, 1) \
- F(Throw, 1, 1) \
- F(ReThrow, 1, 1) \
- F(UnwindAndFindExceptionHandler, 0, 1) \
- F(PromoteScheduledException, 0, 1) \
- F(ThrowReferenceError, 1, 1) \
- F(ThrowApplyNonFunction, 1, 1) \
- F(NewTypeError, 2, 1) \
- F(NewSyntaxError, 2, 1) \
+ F(Interrupt, 0, 1) \
+ F(IS_VAR, 1, 1) \
+ F(IsWasmObject, 1, 1) \
F(NewReferenceError, 2, 1) \
- F(ThrowIllegalInvocation, 0, 1) \
- F(ThrowIteratorResultNotAnObject, 1, 1) \
- F(ThrowStackOverflow, 0, 1) \
+ F(NewSyntaxError, 2, 1) \
+ F(NewTypeError, 2, 1) \
+ F(OrdinaryHasInstance, 2, 1) \
F(PromiseRejectEvent, 3, 1) \
F(PromiseRevokeReject, 1, 1) \
+ F(PromoteScheduledException, 0, 1) \
+ F(ReThrow, 1, 1) \
+ F(RunMicrotasks, 0, 1) \
F(StackGuard, 0, 1) \
- F(Interrupt, 0, 1) \
- F(AllocateInNewSpace, 1, 1) \
- F(AllocateInTargetSpace, 2, 1) \
- F(CollectStackTrace, 2, 1) \
- F(MessageGetStartPosition, 1, 1) \
- F(MessageGetScript, 1, 1) \
- F(FormatMessageString, 4, 1) \
- F(CallSiteGetFileNameRT, 1, 1) \
- F(CallSiteGetFunctionNameRT, 1, 1) \
- F(CallSiteGetScriptNameOrSourceUrlRT, 1, 1) \
- F(CallSiteGetMethodNameRT, 1, 1) \
- F(CallSiteGetLineNumberRT, 1, 1) \
- F(CallSiteGetColumnNumberRT, 1, 1) \
- F(CallSiteIsNativeRT, 1, 1) \
- F(CallSiteIsToplevelRT, 1, 1) \
- F(CallSiteIsEvalRT, 1, 1) \
- F(CallSiteIsConstructorRT, 1, 1) \
- F(IS_VAR, 1, 1) \
+ F(Throw, 1, 1) \
+ F(ThrowApplyNonFunction, 1, 1) \
+ F(ThrowCannotConvertToPrimitive, 0, 1) \
+ F(ThrowCalledNonCallable, 1, 1) \
+ F(ThrowCalledOnNullOrUndefined, 1, 1) \
F(ThrowConstructedNonConstructable, 1, 1) \
F(ThrowDerivedConstructorReturnedNonObject, 0, 1) \
- F(ThrowCalledNonCallable, 1, 1) \
- F(CreateListFromArrayLike, 1, 1) \
- F(IncrementUseCounter, 1, 1) \
- F(GetOrdinaryHasInstance, 0, 1) \
- F(GetAndResetRuntimeCallStats, 0, 1)
-
-#define FOR_EACH_INTRINSIC_JSON(F) \
- F(QuoteJSONString, 1, 1) \
- F(BasicJSONStringify, 1, 1) \
- F(ParseJson, 1, 1)
-
+ F(ThrowGeneratorRunning, 0, 1) \
+ F(ThrowIllegalInvocation, 0, 1) \
+ F(ThrowIncompatibleMethodReceiver, 2, 1) \
+ F(ThrowInvalidStringLength, 0, 1) \
+ F(ThrowIteratorResultNotAnObject, 1, 1) \
+ F(ThrowNotGeneric, 1, 1) \
+ F(ThrowReferenceError, 1, 1) \
+ F(ThrowStackOverflow, 0, 1) \
+ F(ThrowWasmError, 2, 1) \
+ F(ThrowUndefinedOrNullToObject, 1, 1) \
+ F(Typeof, 1, 1) \
+ F(UnwindAndFindExceptionHandler, 0, 1)
#define FOR_EACH_INTRINSIC_LITERALS(F) \
F(CreateRegExpLiteral, 4, 1) \
@@ -347,32 +349,15 @@ namespace internal {
F(LiveEditCompareStrings, 2, 1) \
F(LiveEditRestartFrame, 2, 1)
-
-#define FOR_EACH_INTRINSIC_MATHS(F) \
- F(MathLogRT, 1, 1) \
- F(DoubleHi, 1, 1) \
- F(DoubleLo, 1, 1) \
- F(ConstructDouble, 2, 1) \
- F(RemPiO2, 2, 1) \
- F(MathAtan2, 2, 1) \
- F(MathExpRT, 1, 1) \
- F(MathPow, 2, 1) \
- F(MathPowRT, 2, 1) \
- F(GenerateRandomNumbers, 1, 1)
-
+#define FOR_EACH_INTRINSIC_MATHS(F) F(GenerateRandomNumbers, 1, 1)
#define FOR_EACH_INTRINSIC_NUMBERS(F) \
- F(NumberToRadixString, 2, 1) \
- F(NumberToFixed, 2, 1) \
- F(NumberToExponential, 2, 1) \
- F(NumberToPrecision, 2, 1) \
F(IsValidSmi, 1, 1) \
F(StringToNumber, 1, 1) \
F(StringParseInt, 2, 1) \
F(StringParseFloat, 1, 1) \
F(NumberToString, 1, 1) \
F(NumberToStringSkipCache, 1, 1) \
- F(NumberToIntegerMapMinusZero, 1, 1) \
F(NumberToSmi, 1, 1) \
F(SmiLexicographicCompare, 2, 1) \
F(MaxSmi, 0, 1) \
@@ -386,11 +371,9 @@ namespace internal {
F(ObjectHasOwnProperty, 2, 1) \
F(InternalSetPrototype, 2, 1) \
F(SetPrototype, 2, 1) \
- F(GetOwnProperty_Legacy, 2, 1) \
F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
F(GetProperty, 2, 1) \
F(KeyedGetProperty, 2, 1) \
- F(LoadGlobalViaContext, 1, 1) \
F(StoreGlobalViaContext_Sloppy, 2, 1) \
F(StoreGlobalViaContext_Strict, 2, 1) \
F(AddNamedProperty, 4, 1) \
@@ -400,22 +383,19 @@ namespace internal {
F(DeleteProperty_Sloppy, 2, 1) \
F(DeleteProperty_Strict, 2, 1) \
F(HasProperty, 2, 1) \
- F(PropertyIsEnumerable, 2, 1) \
F(GetOwnPropertyKeys, 2, 1) \
F(GetInterceptorInfo, 1, 1) \
F(ToFastProperties, 1, 1) \
F(AllocateHeapNumber, 0, 1) \
F(NewObject, 2, 1) \
F(FinalizeInstanceSize, 1, 1) \
- F(GlobalProxy, 1, 1) \
- F(LookupAccessor, 3, 1) \
F(LoadMutableDouble, 2, 1) \
F(TryMigrateInstance, 1, 1) \
F(IsJSGlobalProxy, 1, 1) \
F(DefineAccessorPropertyUnchecked, 5, 1) \
- F(DefineDataPropertyUnchecked, 4, 1) \
F(DefineDataPropertyInLiteral, 5, 1) \
F(GetDataProperty, 2, 1) \
+ F(GetConstructorName, 1, 1) \
F(HasFastPackedElements, 1, 1) \
F(ValueOf, 1, 1) \
F(IsJSReceiver, 1, 1) \
@@ -425,7 +405,6 @@ namespace internal {
F(ToObject, 1, 1) \
F(ToPrimitive, 1, 1) \
F(ToPrimitive_Number, 1, 1) \
- F(ToPrimitive_String, 1, 1) \
F(ToNumber, 1, 1) \
F(ToInteger, 1, 1) \
F(ToLength, 1, 1) \
@@ -434,26 +413,10 @@ namespace internal {
F(SameValue, 2, 1) \
F(SameValueZero, 2, 1) \
F(Compare, 3, 1) \
- F(InstanceOf, 2, 1) \
- F(OrdinaryHasInstance, 2, 1) \
F(HasInPrototypeChain, 2, 1) \
F(CreateIterResultObject, 2, 1) \
F(IsAccessCheckNeeded, 1, 1) \
- F(ObjectDefineProperties, 2, 1) \
- F(ObjectDefineProperty, 3, 1)
-
-#define FOR_EACH_INTRINSIC_OBSERVE(F) \
- F(IsObserved, 1, 1) \
- F(SetIsObserved, 1, 1) \
- F(EnqueueMicrotask, 1, 1) \
- F(RunMicrotasks, 0, 1) \
- F(DeliverObservationChangeRecords, 2, 1) \
- F(GetObservationState, 0, 1) \
- F(ObserverObjectAndRecordHaveSameOrigin, 3, 1) \
- F(ObjectWasCreatedInCurrentOrigin, 1, 1) \
- F(GetObjectContextObjectObserve, 1, 1) \
- F(GetObjectContextObjectGetNotifier, 1, 1) \
- F(GetObjectContextNotifierPerformChange, 1, 1)
+ F(CreateDataProperty, 3, 1)
#define FOR_EACH_INTRINSIC_OPERATORS(F) \
F(Multiply, 2, 1) \
@@ -474,7 +437,8 @@ namespace internal {
F(LessThan, 2, 1) \
F(GreaterThan, 2, 1) \
F(LessThanOrEqual, 2, 1) \
- F(GreaterThanOrEqual, 2, 1)
+ F(GreaterThanOrEqual, 2, 1) \
+ F(InstanceOf, 2, 1)
#define FOR_EACH_INTRINSIC_PROXY(F) \
F(IsJSProxy, 1, 1) \
@@ -496,31 +460,28 @@ namespace internal {
F(RegExpExecReThrow, 4, 1) \
F(IsRegExp, 1, 1)
-#define FOR_EACH_INTRINSIC_SCOPES(F) \
- F(ThrowConstAssignError, 0, 1) \
- F(DeclareGlobals, 2, 1) \
- F(InitializeVarGlobal, 3, 1) \
- F(InitializeConstGlobal, 2, 1) \
- F(DeclareLookupSlot, 3, 1) \
- F(InitializeLegacyConstLookupSlot, 3, 1) \
- F(NewSloppyArguments_Generic, 1, 1) \
- F(NewStrictArguments, 1, 1) \
- F(NewRestParameter, 1, 1) \
- F(NewSloppyArguments, 3, 1) \
- F(NewClosure, 1, 1) \
- F(NewClosure_Tenured, 1, 1) \
- F(NewScriptContext, 2, 1) \
- F(NewFunctionContext, 1, 1) \
- F(PushWithContext, 2, 1) \
- F(PushCatchContext, 3, 1) \
- F(PushBlockContext, 2, 1) \
- F(IsJSModule, 1, 1) \
- F(PushModuleContext, 2, 1) \
- F(DeclareModules, 1, 1) \
- F(DeleteLookupSlot, 1, 1) \
- F(LoadLookupSlot, 1, 1) \
- F(LoadLookupSlotInsideTypeof, 1, 1) \
- F(StoreLookupSlot_Sloppy, 2, 1) \
+#define FOR_EACH_INTRINSIC_SCOPES(F) \
+ F(ThrowConstAssignError, 0, 1) \
+ F(DeclareGlobals, 3, 1) \
+ F(DeclareGlobalsForInterpreter, 3, 1) \
+ F(InitializeVarGlobal, 3, 1) \
+ F(DeclareEvalFunction, 2, 1) \
+ F(DeclareEvalVar, 1, 1) \
+ F(NewSloppyArguments_Generic, 1, 1) \
+ F(NewStrictArguments, 1, 1) \
+ F(NewRestParameter, 1, 1) \
+ F(NewSloppyArguments, 3, 1) \
+ F(NewClosure, 1, 1) \
+ F(NewClosure_Tenured, 1, 1) \
+ F(NewScriptContext, 2, 1) \
+ F(NewFunctionContext, 1, 1) \
+ F(PushWithContext, 2, 1) \
+ F(PushCatchContext, 3, 1) \
+ F(PushBlockContext, 2, 1) \
+ F(DeleteLookupSlot, 1, 1) \
+ F(LoadLookupSlot, 1, 1) \
+ F(LoadLookupSlotInsideTypeof, 1, 1) \
+ F(StoreLookupSlot_Sloppy, 2, 1) \
F(StoreLookupSlot_Strict, 2, 1)
#define FOR_EACH_INTRINSIC_SIMD(F) \
@@ -850,9 +811,6 @@ namespace internal {
F(StringToArray, 2, 1) \
F(StringToLowerCase, 1, 1) \
F(StringToUpperCase, 1, 1) \
- F(StringTrim, 3, 1) \
- F(TruncateString, 2, 1) \
- F(NewString, 2, 1) \
F(StringLessThan, 2, 1) \
F(StringLessThanOrEqual, 2, 1) \
F(StringGreaterThan, 2, 1) \
@@ -861,11 +819,7 @@ namespace internal {
F(StringNotEqual, 2, 1) \
F(FlattenString, 1, 1) \
F(StringCharFromCode, 1, 1) \
- F(StringCharAt, 2, 1) \
- F(OneByteSeqStringGetChar, 2, 1) \
- F(OneByteSeqStringSetChar, 3, 1) \
- F(TwoByteSeqStringGetChar, 2, 1) \
- F(TwoByteSeqStringSetChar, 3, 1) \
+ F(ExternalStringGetChar, 2, 1) \
F(StringCharCodeAt, 2, 1)
#define FOR_EACH_INTRINSIC_SYMBOL(F) \
@@ -877,22 +831,28 @@ namespace internal {
F(SymbolIsPrivate, 1, 1)
#define FOR_EACH_INTRINSIC_TEST(F) \
+ F(ConstructDouble, 2, 1) \
F(DeoptimizeFunction, 1, 1) \
F(DeoptimizeNow, 0, 1) \
F(RunningInSimulator, 0, 1) \
F(IsConcurrentRecompilationSupported, 0, 1) \
F(OptimizeFunctionOnNextCall, -1, 1) \
+ F(InterpretFunctionOnNextCall, 1, 1) \
+ F(BaselineFunctionOnNextCall, 1, 1) \
F(OptimizeOsr, -1, 1) \
F(NeverOptimizeFunction, 1, 1) \
F(GetOptimizationStatus, -1, 1) \
F(UnblockConcurrentRecompilation, 0, 1) \
F(GetOptimizationCount, 1, 1) \
F(GetUndetectable, 0, 1) \
+ F(GetCallable, 0, 1) \
F(ClearFunctionTypeFeedback, 1, 1) \
+ F(CheckWasmWrapperElision, 2, 1) \
F(NotifyContextDisposed, 0, 1) \
F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
F(DebugPrint, 1, 1) \
F(DebugTrace, 0, 1) \
+ F(GetExceptionDetails, 1, 1) \
F(GlobalPrint, 1, 1) \
F(SystemBreak, 0, 1) \
F(SetFlags, 1, 1) \
@@ -924,7 +884,11 @@ namespace internal {
F(HasFixedFloat32Elements, 1, 1) \
F(HasFixedFloat64Elements, 1, 1) \
F(HasFixedUint8ClampedElements, 1, 1) \
- F(SpeciesProtector, 0, 1)
+ F(SpeciesProtector, 0, 1) \
+ F(SerializeWasmModule, 1, 1) \
+ F(DeserializeWasmModule, 1, 1) \
+ F(IsAsmWasmCode, 1, 1) \
+ F(IsNotAsmWasmCode, 1, 1)
#define FOR_EACH_INTRINSIC_TYPEDARRAY(F) \
F(ArrayBufferGetByteLength, 1, 1) \
@@ -935,7 +899,6 @@ namespace internal {
F(ArrayBufferViewGetByteLength, 1, 1) \
F(ArrayBufferViewGetByteOffset, 1, 1) \
F(TypedArrayGetLength, 1, 1) \
- F(DataViewGetBuffer, 1, 1) \
F(TypedArrayGetBuffer, 1, 1) \
F(TypedArraySetFastCases, 3, 1) \
F(TypedArrayMaxSizeInHeap, 0, 1) \
@@ -960,10 +923,9 @@ namespace internal {
F(DataViewSetFloat32, 4, 1) \
F(DataViewSetFloat64, 4, 1)
-
-#define FOR_EACH_INTRINSIC_URI(F) \
- F(URIEscape, 1, 1) \
- F(URIUnescape, 1, 1)
+#define FOR_EACH_INTRINSIC_WASM(F) \
+ F(WasmGrowMemory, 1, 1) \
+ F(WasmThrowTypeError, 0, 1)
#define FOR_EACH_INTRINSIC_RETURN_PAIR(F) \
F(LoadLookupSlotForCall, 1, 2)
@@ -973,31 +935,32 @@ namespace internal {
// Most intrinsics are implemented in the runtime/ directory, but ICs are
// implemented in ic.cc for now.
-#define FOR_EACH_INTRINSIC_IC(F) \
- F(BinaryOpIC_Miss, 2, 1) \
- F(BinaryOpIC_MissWithAllocationSite, 3, 1) \
- F(CallIC_Miss, 3, 1) \
- F(CompareIC_Miss, 3, 1) \
- F(ElementsTransitionAndStoreIC_Miss, 5, 1) \
- F(KeyedLoadIC_Miss, 4, 1) \
- F(KeyedLoadIC_MissFromStubFailure, 4, 1) \
- F(KeyedStoreIC_Miss, 5, 1) \
- F(KeyedStoreIC_MissFromStubFailure, 5, 1) \
- F(KeyedStoreIC_Slow, 5, 1) \
- F(LoadElementWithInterceptor, 2, 1) \
- F(LoadIC_Miss, 4, 1) \
- F(LoadIC_MissFromStubFailure, 4, 1) \
- F(LoadPropertyWithInterceptor, 3, 1) \
- F(LoadPropertyWithInterceptorOnly, 3, 1) \
- F(StoreCallbackProperty, 6, 1) \
- F(StoreIC_Miss, 5, 1) \
- F(StoreIC_MissFromStubFailure, 5, 1) \
- F(StoreIC_Slow, 5, 1) \
- F(StorePropertyWithInterceptor, 3, 1) \
- F(ToBooleanIC_Miss, 1, 1) \
+#define FOR_EACH_INTRINSIC_IC(F) \
+ F(BinaryOpIC_Miss, 2, 1) \
+ F(BinaryOpIC_MissWithAllocationSite, 3, 1) \
+ F(CallIC_Miss, 3, 1) \
+ F(CompareIC_Miss, 3, 1) \
+ F(ElementsTransitionAndStoreIC_Miss, 5, 1) \
+ F(KeyedLoadIC_Miss, 4, 1) \
+ F(KeyedLoadIC_MissFromStubFailure, 4, 1) \
+ F(KeyedStoreIC_Miss, 5, 1) \
+ F(KeyedStoreIC_MissFromStubFailure, 5, 1) \
+ F(KeyedStoreIC_Slow, 5, 1) \
+ F(LoadElementWithInterceptor, 2, 1) \
+ F(LoadGlobalIC_Miss, 2, 1) \
+ F(LoadGlobalIC_Slow, 2, 1) \
+ F(LoadIC_Miss, 4, 1) \
+ F(LoadIC_MissFromStubFailure, 4, 1) \
+ F(LoadPropertyWithInterceptor, 3, 1) \
+ F(LoadPropertyWithInterceptorOnly, 3, 1) \
+ F(StoreCallbackProperty, 6, 1) \
+ F(StoreIC_Miss, 5, 1) \
+ F(StoreIC_MissFromStubFailure, 5, 1) \
+ F(TransitionStoreIC_MissFromStubFailure, 6, 1) \
+ F(StorePropertyWithInterceptor, 3, 1) \
+ F(ToBooleanIC_Miss, 1, 1) \
F(Unreachable, 0, 1)
-
#define FOR_EACH_INTRINSIC_RETURN_OBJECT(F) \
FOR_EACH_INTRINSIC_IC(F) \
FOR_EACH_INTRINSIC_ARRAY(F) \
@@ -1007,20 +970,18 @@ namespace internal {
FOR_EACH_INTRINSIC_COMPILER(F) \
FOR_EACH_INTRINSIC_DATE(F) \
FOR_EACH_INTRINSIC_DEBUG(F) \
+ FOR_EACH_INTRINSIC_ERROR(F) \
FOR_EACH_INTRINSIC_FORIN(F) \
FOR_EACH_INTRINSIC_INTERPRETER(F) \
FOR_EACH_INTRINSIC_FUNCTION(F) \
- FOR_EACH_INTRINSIC_FUTEX(F) \
FOR_EACH_INTRINSIC_GENERATOR(F) \
FOR_EACH_INTRINSIC_I18N(F) \
FOR_EACH_INTRINSIC_INTERNAL(F) \
- FOR_EACH_INTRINSIC_JSON(F) \
FOR_EACH_INTRINSIC_LITERALS(F) \
FOR_EACH_INTRINSIC_LIVEEDIT(F) \
FOR_EACH_INTRINSIC_MATHS(F) \
FOR_EACH_INTRINSIC_NUMBERS(F) \
FOR_EACH_INTRINSIC_OBJECT(F) \
- FOR_EACH_INTRINSIC_OBSERVE(F) \
FOR_EACH_INTRINSIC_OPERATORS(F) \
FOR_EACH_INTRINSIC_PROXY(F) \
FOR_EACH_INTRINSIC_REGEXP(F) \
@@ -1030,7 +991,7 @@ namespace internal {
FOR_EACH_INTRINSIC_SYMBOL(F) \
FOR_EACH_INTRINSIC_TEST(F) \
FOR_EACH_INTRINSIC_TYPEDARRAY(F) \
- FOR_EACH_INTRINSIC_URI(F)
+ FOR_EACH_INTRINSIC_WASM(F)
// FOR_EACH_INTRINSIC defines the list of all intrinsics, coming in 2 flavors,
// either returning an object or a pair.
@@ -1109,7 +1070,8 @@ class Runtime : public AllStatic {
Handle<Object> value, LanguageMode language_mode);
MUST_USE_RESULT static MaybeHandle<Object> GetObjectProperty(
- Isolate* isolate, Handle<Object> object, Handle<Object> key);
+ Isolate* isolate, Handle<Object> object, Handle<Object> key,
+ bool* is_found_out = nullptr);
enum TypedArrayId {
// arrayIds below should be synchronized with typedarray.js natives.
@@ -1150,7 +1112,7 @@ class RuntimeState {
void set_redirected_intrinsic_functions(
Runtime::Function* redirected_intrinsic_functions) {
- redirected_intrinsic_functions_.Reset(redirected_intrinsic_functions);
+ redirected_intrinsic_functions_.reset(redirected_intrinsic_functions);
}
private:
@@ -1158,8 +1120,7 @@ class RuntimeState {
unibrow::Mapping<unibrow::ToUppercase, 128> to_upper_mapping_;
unibrow::Mapping<unibrow::ToLowercase, 128> to_lower_mapping_;
-
- base::SmartArrayPointer<Runtime::Function> redirected_intrinsic_functions_;
+ std::unique_ptr<Runtime::Function[]> redirected_intrinsic_functions_;
friend class Isolate;
friend class Runtime;
@@ -1178,8 +1139,8 @@ class AllocateTargetSpace : public BitField<AllocationSpace, 1, 3> {};
class DeclareGlobalsEvalFlag : public BitField<bool, 0, 1> {};
class DeclareGlobalsNativeFlag : public BitField<bool, 1, 1> {};
-STATIC_ASSERT(LANGUAGE_END == 3);
-class DeclareGlobalsLanguageMode : public BitField<LanguageMode, 2, 2> {};
+STATIC_ASSERT(LANGUAGE_END == 2);
+class DeclareGlobalsLanguageMode : public BitField<LanguageMode, 2, 1> {};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/s390/OWNERS b/deps/v8/src/s390/OWNERS
index eb007cb908..752e8e3d81 100644
--- a/deps/v8/src/s390/OWNERS
+++ b/deps/v8/src/s390/OWNERS
@@ -3,3 +3,4 @@ dstence@us.ibm.com
joransiu@ca.ibm.com
mbrandy@us.ibm.com
michael_dawson@ca.ibm.com
+bjaideep@ca.ibm.com
diff --git a/deps/v8/src/s390/assembler-s390-inl.h b/deps/v8/src/s390/assembler-s390-inl.h
index 400d5436a1..189b89c258 100644
--- a/deps/v8/src/s390/assembler-s390-inl.h
+++ b/deps/v8/src/s390/assembler-s390-inl.h
@@ -47,6 +47,8 @@ namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; }
+bool CpuFeatures::SupportsSimd128() { return false; }
+
void RelocInfo::apply(intptr_t delta) {
// Absolute code pointer inside code object moves with the code object.
if (IsInternalReference(rmode_)) {
@@ -93,11 +95,6 @@ Address RelocInfo::target_address() {
return Assembler::target_address_at(pc_, host_);
}
-Address RelocInfo::wasm_memory_reference() {
- DCHECK(IsWasmMemoryReference(rmode_));
- return Assembler::target_address_at(pc_, host_);
-}
-
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) ||
rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
@@ -121,20 +118,6 @@ Address RelocInfo::constant_pool_entry_address() {
int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
-void RelocInfo::set_target_address(Address target,
- WriteBarrierMode write_barrier_mode,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(isolate_, pc_, host_, target,
- icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
- IsCodeTarget(rmode_)) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
-}
-
Address Assembler::target_address_from_return_address(Address pc) {
// Returns the address of the call target from the return address that will
// be returned to after a call.
@@ -156,19 +139,6 @@ Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
return code_targets_[index];
}
-void RelocInfo::update_wasm_memory_reference(
- Address old_base, Address new_base, size_t old_size, size_t new_size,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(IsWasmMemoryReference(rmode_));
- DCHECK(old_base <= wasm_memory_reference() &&
- wasm_memory_reference() < old_base + old_size);
- Address updated_reference = new_base + (wasm_memory_reference() - old_base);
- DCHECK(new_base <= updated_reference &&
- updated_reference < new_base + new_size);
- Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
- icache_flush_mode);
-}
-
Object* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
@@ -195,6 +165,7 @@ void RelocInfo::set_target_object(Object* target,
target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target));
+ host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
}
}
@@ -312,6 +283,7 @@ void RelocInfo::WipeOut() {
}
}
+template <typename ObjectVisitor>
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/deps/v8/src/s390/assembler-s390.cc b/deps/v8/src/s390/assembler-s390.cc
index 35ba4315db..a448947307 100644
--- a/deps/v8/src/s390/assembler-s390.cc
+++ b/deps/v8/src/s390/assembler-s390.cc
@@ -217,6 +217,33 @@ bool RelocInfo::IsCodedSpecially() {
bool RelocInfo::IsInConstantPool() { return false; }
+Address RelocInfo::wasm_memory_reference() {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ return Assembler::target_address_at(pc_, host_);
+}
+
+uint32_t RelocInfo::wasm_memory_size_reference() {
+ DCHECK(IsWasmMemorySizeReference(rmode_));
+ return static_cast<uint32_t>(
+ reinterpret_cast<intptr_t>(Assembler::target_address_at(pc_, host_)));
+}
+
+Address RelocInfo::wasm_global_reference() {
+ DCHECK(IsWasmGlobalReference(rmode_));
+ return Assembler::target_address_at(pc_, host_);
+}
+
+void RelocInfo::unchecked_update_wasm_memory_reference(
+ Address address, ICacheFlushMode flush_mode) {
+ Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
+}
+
+void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
+ ICacheFlushMode flush_mode) {
+ Assembler::set_target_address_at(isolate_, pc_, host_,
+ reinterpret_cast<Address>(size), flush_mode);
+}
+
// -----------------------------------------------------------------------------
// Implementation of Operand and MemOperand
// See assembler-s390-inl.h for inlined constructors
@@ -227,7 +254,6 @@ Operand::Operand(Handle<Object> handle) {
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
if (obj->IsHeapObject()) {
- DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
imm_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
} else {
@@ -255,8 +281,7 @@ MemOperand::MemOperand(Register rx, Register rb, int32_t offset) {
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
recorded_ast_id_(TypeFeedbackId::None()),
- code_targets_(100),
- positions_recorder_(this) {
+ code_targets_(100) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
last_bound_pos_ = 0;
@@ -273,6 +298,8 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->instr_size = pc_offset();
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
desc->origin = this;
+ desc->unwinding_info_size = 0;
+ desc->unwinding_info = nullptr;
}
void Assembler::Align(int m) {
@@ -943,6 +970,20 @@ void Assembler::rxy_form(Opcode op, Register r1, Register x2, Register b2,
emit6bytes(code);
}
+void Assembler::rxy_form(Opcode op, Register r1, Condition m3, Register b2,
+ Disp d2) {
+ DCHECK(is_int20(d2));
+ DCHECK(is_uint16(op));
+ uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
+ (static_cast<uint64_t>(r1.code())) * B36 |
+ (static_cast<uint64_t>(m3 & 0xF)) * B32 |
+ (static_cast<uint64_t>(b2.code())) * B28 |
+ (static_cast<uint64_t>(d2 & 0x0FFF)) * B16 |
+ (static_cast<uint64_t>(d2 & 0x0FF000)) >> 4 |
+ (static_cast<uint64_t>(op & 0x00FF));
+ emit6bytes(code);
+}
+
void Assembler::rxy_form(Opcode op, DoubleRegister r1, Register x2, Register b2,
Disp d2) {
DCHECK(is_int20(d2));
@@ -1379,7 +1420,6 @@ void Assembler::rrfe_form(Opcode op, Condition m3, Condition m4, Register r1,
RX_FORM_EMIT(bc, BC)
RR_FORM_EMIT(bctr, BCTR)
RXE_FORM_EMIT(ceb, CEB)
-RRE_FORM_EMIT(cefbr, CEFBR)
SS1_FORM_EMIT(ed, ED)
RX_FORM_EMIT(ex, EX)
RRE_FORM_EMIT(flogr, FLOGR)
@@ -1391,8 +1431,10 @@ RIL1_FORM_EMIT(llihf, LLIHF)
RIL1_FORM_EMIT(llilf, LLILF)
RRE_FORM_EMIT(lngr, LNGR)
RR_FORM_EMIT(lnr, LNR)
-RSY1_FORM_EMIT(loc, LOC)
+RRE_FORM_EMIT(lrvr, LRVR)
+RRE_FORM_EMIT(lrvgr, LRVGR)
RXY_FORM_EMIT(lrv, LRV)
+RXY_FORM_EMIT(lrvg, LRVG)
RXY_FORM_EMIT(lrvh, LRVH)
SS1_FORM_EMIT(mvn, MVN)
SS1_FORM_EMIT(nc, NC)
@@ -1408,7 +1450,9 @@ RRE_FORM_EMIT(popcnt, POPCNT_Z)
RIL1_FORM_EMIT(slfi, SLFI)
RXY_FORM_EMIT(slgf, SLGF)
RIL1_FORM_EMIT(slgfi, SLGFI)
+RXY_FORM_EMIT(strvh, STRVH)
RXY_FORM_EMIT(strv, STRV)
+RXY_FORM_EMIT(strvg, STRVG)
RI1_FORM_EMIT(tmll, TMLL)
SS1_FORM_EMIT(tr, TR)
S_FORM_EMIT(ts, TS)
@@ -1572,6 +1616,26 @@ void Assembler::llhr(Register r1, Register r2) { rre_form(LLHR, r1, r2); }
// Load Logical halfword Register-Register (64)
void Assembler::llghr(Register r1, Register r2) { rre_form(LLGHR, r1, r2); }
+// Load On Condition R-R (32)
+void Assembler::locr(Condition m3, Register r1, Register r2) {
+ rrf2_form(LOCR << 16 | m3 * B12 | r1.code() * B4 | r2.code());
+}
+
+// Load On Condition R-R (64)
+void Assembler::locgr(Condition m3, Register r1, Register r2) {
+ rrf2_form(LOCGR << 16 | m3 * B12 | r1.code() * B4 | r2.code());
+}
+
+// Load On Condition R-M (32)
+void Assembler::loc(Condition m3, Register r1, const MemOperand& src) {
+ rxy_form(LOC, r1, m3, src.rb(), src.offset());
+}
+
+// Load On Condition R-M (64)
+void Assembler::locg(Condition m3, Register r1, const MemOperand& src) {
+ rxy_form(LOCG, r1, m3, src.rb(), src.offset());
+}
+
// -------------------
// Branch Instructions
// -------------------
@@ -1855,7 +1919,7 @@ void Assembler::agf(Register r1, const MemOperand& opnd) {
// Add Immediate (64)
void Assembler::agfi(Register r1, const Operand& opnd) {
- ril_form(ALFI, r1, opnd);
+ ril_form(AGFI, r1, opnd);
}
// Add Register-Register (64<-32)
@@ -2034,9 +2098,15 @@ void Assembler::slgrk(Register r1, Register r2, Register r3) {
// ----------------------------
// Multiply Register-Storage (64<32)
void Assembler::m(Register r1, const MemOperand& opnd) {
+ DCHECK(r1.code() % 2 == 0);
rx_form(M, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
+void Assembler::mfy(Register r1, const MemOperand& opnd) {
+ DCHECK(r1.code() % 2 == 0);
+ rxy_form(MFY, r1, opnd.rx(), opnd.rb(), opnd.offset());
+}
+
// Multiply Register (64<32)
void Assembler::mr_z(Register r1, Register r2) {
DCHECK(r1.code() % 2 == 0);
@@ -2466,7 +2536,6 @@ void Assembler::srdl(Register r1, const Operand& opnd) {
void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode,
TypeFeedbackId ast_id) {
- positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
int32_t target_index = emit_code_target(target, rmode, ast_id);
@@ -2582,6 +2651,11 @@ void Assembler::iill(Register r1, const Operand& opnd) {
ri_form(IILL, r1, opnd);
}
+// Load Immediate 32->64
+void Assembler::lgfi(Register r1, const Operand& opnd) {
+ ril_form(LGFI, r1, opnd);
+}
+
// GPR <-> FPR Instructions
// Floating point instructions
@@ -2717,6 +2791,12 @@ void Assembler::lcdbr(DoubleRegister r1, DoubleRegister r2) {
Register::from_code(r2.code()));
}
+// Load Complement Register-Register (LB)
+void Assembler::lcebr(DoubleRegister r1, DoubleRegister r2) {
+ rre_form(LCEBR, Register::from_code(r1.code()),
+ Register::from_code(r2.code()));
+}
+
// Load Positive Register-Register (LB)
void Assembler::lpebr(DoubleRegister r1, DoubleRegister r2) {
rre_form(LPEBR, Register::from_code(r1.code()),
@@ -2862,10 +2942,8 @@ void Assembler::celgbr(Condition m3, Condition m4, DoubleRegister r1,
// Convert from Fixed Logical (F32<-32)
void Assembler::celfbr(Condition m3, Condition m4, DoubleRegister r1,
Register r2) {
- DCHECK_EQ(m3, Condition(0));
DCHECK_EQ(m4, Condition(0));
- rrfe_form(CELFBR, Condition(0), Condition(0), Register::from_code(r1.code()),
- r2);
+ rrfe_form(CELFBR, m3, Condition(0), Register::from_code(r1.code()), r2);
}
// Convert from Fixed Logical (L<-64)
@@ -2885,8 +2963,8 @@ void Assembler::cdlfbr(Condition m3, Condition m4, DoubleRegister r1,
}
// Convert from Fixed point (S<-32)
-void Assembler::cefbr(DoubleRegister r1, Register r2) {
- rre_form(CEFBR, Register::from_code(r1.code()), r2);
+void Assembler::cefbr(Condition m3, DoubleRegister r1, Register r2) {
+ rrfe_form(CEFBR, m3, Condition(0), Register::from_code(r1.code()), r2);
}
// Convert to Fixed point (32<-S)
@@ -3052,8 +3130,6 @@ void Assembler::EmitRelocations() {
reloc_info_writer.Write(&rinfo);
}
-
- reloc_info_writer.Finish();
}
} // namespace internal
diff --git a/deps/v8/src/s390/assembler-s390.h b/deps/v8/src/s390/assembler-s390.h
index 0b9fa38539..ffe0ac4621 100644
--- a/deps/v8/src/s390/assembler-s390.h
+++ b/deps/v8/src/s390/assembler-s390.h
@@ -90,6 +90,9 @@ namespace internal {
V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) V(d14) V(d15)
+#define FLOAT_REGISTERS DOUBLE_REGISTERS
+#define SIMD128_REGISTERS DOUBLE_REGISTERS
+
#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
V(d8) V(d9) V(d10) V(d11) V(d12) V(d15) V(d0)
@@ -143,8 +146,6 @@ struct Register {
return r;
}
- const char* ToString();
- bool IsAllocatable() const;
bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
bool is(Register reg) const { return reg_code == reg.reg_code; }
int code() const {
@@ -185,6 +186,8 @@ const Register kLithiumScratch = r1; // lithium scratch.
const Register kRootRegister = r10; // Roots array pointer.
const Register cp = r13; // JavaScript context pointer.
+static const bool kSimpleFPAliasing = true;
+
// Double word FP register.
struct DoubleRegister {
enum Code {
@@ -198,8 +201,6 @@ struct DoubleRegister {
static const int kNumRegisters = Code::kAfterLast;
static const int kMaxNumRegisters = kNumRegisters;
- const char* ToString();
- bool IsAllocatable() const;
bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
@@ -221,7 +222,10 @@ struct DoubleRegister {
int reg_code;
};
-typedef DoubleRegister DoubleRegister;
+typedef DoubleRegister FloatRegister;
+
+// TODO(john.yan) Define SIMD registers.
+typedef DoubleRegister Simd128Register;
#define DECLARE_REGISTER(R) \
const DoubleRegister R = {DoubleRegister::kCode_##R};
@@ -265,9 +269,6 @@ const CRegister cr5 = {5};
const CRegister cr6 = {6};
const CRegister cr7 = {7};
-// TODO(john.yan) Define SIMD registers.
-typedef DoubleRegister Simd128Register;
-
// -----------------------------------------------------------------------------
// Machine instruction Operands
@@ -546,7 +547,6 @@ class Assembler : public AssemblerBase {
// Helper for unconditional branch to Label with update to save register
void b(Register r, Label* l) {
- positions_recorder()->WriteRecordedPositions();
int32_t halfwords = branch_offset(l) / 2;
brasl(r, Operand(halfwords));
}
@@ -607,7 +607,7 @@ class Assembler : public AssemblerBase {
void breakpoint(bool do_print) {
if (do_print) {
- printf("DebugBreak is inserted to %p\n", pc_);
+ PrintF("DebugBreak is inserted to %p\n", static_cast<void*>(pc_));
}
#if V8_HOST_ARCH_64_BIT
int64_t value = reinterpret_cast<uint64_t>(&v8::base::OS::DebugBreak);
@@ -765,7 +765,6 @@ class Assembler : public AssemblerBase {
RRE_FORM(cdr);
RXE_FORM(cdb);
RXE_FORM(ceb);
- RRE_FORM(cefbr);
RXE_FORM(ddb);
RRE_FORM(ddbr);
SS1_FORM(ed);
@@ -779,6 +778,7 @@ class Assembler : public AssemblerBase {
RI1_FORM(iihh);
RI1_FORM(iihl);
RIL1_FORM(iilf);
+ RIL1_FORM(lgfi);
RI1_FORM(iilh);
RI1_FORM(iill);
RRE_FORM(lcgr);
@@ -791,7 +791,10 @@ class Assembler : public AssemblerBase {
RR_FORM(lnr);
RSY1_FORM(loc);
RXY_FORM(lrv);
+ RRE_FORM(lrvr);
+ RRE_FORM(lrvgr);
RXY_FORM(lrvh);
+ RXY_FORM(lrvg);
RXE_FORM(mdb);
RRE_FORM(mdbr);
SS4_FORM(mvck);
@@ -817,6 +820,8 @@ class Assembler : public AssemblerBase {
RX_FORM(ste);
RXY_FORM(stey);
RXY_FORM(strv);
+ RXY_FORM(strvh);
+ RXY_FORM(strvg);
RI1_FORM(tmll);
SS1_FORM(tr);
S_FORM(ts);
@@ -871,6 +876,12 @@ class Assembler : public AssemblerBase {
void lmy(Register r1, Register r2, const MemOperand& src);
void lmg(Register r1, Register r2, const MemOperand& src);
+ // Load On Condition Instructions
+ void locr(Condition m3, Register r1, Register r2);
+ void locgr(Condition m3, Register r1, Register r2);
+ void loc(Condition m3, Register r1, const MemOperand& src);
+ void locg(Condition m3, Register r1, const MemOperand& src);
+
// Store Instructions
void st(Register r, const MemOperand& src);
void stc(Register r, const MemOperand& src);
@@ -1044,6 +1055,7 @@ class Assembler : public AssemblerBase {
// 32-bit Multiply Instructions
void m(Register r1, const MemOperand& opnd);
+ void mfy(Register r1, const MemOperand& opnd);
void mr_z(Register r1, Register r2);
void ml(Register r1, const MemOperand& opnd);
void mlr(Register r1, Register r2);
@@ -1144,7 +1156,7 @@ class Assembler : public AssemblerBase {
void cegbr(DoubleRegister fltReg, Register fixReg);
void cdgbr(DoubleRegister fltReg, Register fixReg);
void cfebr(Condition m3, Register fixReg, DoubleRegister fltReg);
- void cefbr(DoubleRegister fltReg, Register fixReg);
+ void cefbr(Condition m3, DoubleRegister fltReg, Register fixReg);
// Floating Point Compare Instructions
void cebr(DoubleRegister r1, DoubleRegister r2);
@@ -1171,6 +1183,7 @@ class Assembler : public AssemblerBase {
void sqdb(DoubleRegister r1, const MemOperand& opnd);
void sqdbr(DoubleRegister r1, DoubleRegister r2);
void lcdbr(DoubleRegister r1, DoubleRegister r2);
+ void lcebr(DoubleRegister r1, DoubleRegister r2);
void ldeb(DoubleRegister r1, const MemOperand& opnd);
enum FIDBRA_MASK3 {
@@ -1241,7 +1254,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, int raw_position);
+ void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
// Writes a single byte or word of data in the code stream. Used
// for inline tables, e.g., jump-tables.
@@ -1250,10 +1263,6 @@ class Assembler : public AssemblerBase {
void dq(uint64_t data);
void dp(uintptr_t data);
- AssemblerPositionsRecorder* positions_recorder() {
- return &positions_recorder_;
- }
-
void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
ConstantPoolEntry::Access access,
ConstantPoolEntry::Type type) {
@@ -1416,6 +1425,8 @@ class Assembler : public AssemblerBase {
inline void rxy_form(Opcode op, Register r1, Register x2, Register b2,
Disp d2);
+ inline void rxy_form(Opcode op, Register r1, Condition m3, Register b2,
+ Disp d2);
inline void rxy_form(Opcode op, DoubleRegister r1, Register x2, Register b2,
Disp d2);
@@ -1449,9 +1460,6 @@ class Assembler : public AssemblerBase {
friend class CodePatcher;
List<Handle<Code> > code_targets_;
-
- AssemblerPositionsRecorder positions_recorder_;
- friend class AssemblerPositionsRecorder;
friend class EnsureSpace;
};
diff --git a/deps/v8/src/s390/code-stubs-s390.cc b/deps/v8/src/s390/code-stubs-s390.cc
index 1c7d27b5ca..ce8038418d 100644
--- a/deps/v8/src/s390/code-stubs-s390.cc
+++ b/deps/v8/src/s390/code-stubs-s390.cc
@@ -21,54 +21,15 @@
namespace v8 {
namespace internal {
-static void InitializeArrayConstructorDescriptor(
- Isolate* isolate, CodeStubDescriptor* descriptor,
- int constant_stack_parameter_count) {
- Address deopt_handler =
- Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
-
- if (constant_stack_parameter_count == 0) {
- descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- } else {
- descriptor->Initialize(r2, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- }
-}
-
-static void InitializeInternalArrayConstructorDescriptor(
- Isolate* isolate, CodeStubDescriptor* descriptor,
- int constant_stack_parameter_count) {
- Address deopt_handler =
- Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
-
- if (constant_stack_parameter_count == 0) {
- descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- } else {
- descriptor->Initialize(r2, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- }
-}
-
-void ArrayNoArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
-void ArraySingleArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
-}
-
-void ArrayNArgumentsConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
-}
+#define __ ACCESS_MASM(masm)
-void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
+void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
+ __ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
+ __ StoreP(r3, MemOperand(sp, r1));
+ __ push(r3);
+ __ push(r4);
+ __ AddP(r2, r2, Operand(3));
+ __ TailCallRuntime(Runtime::kNewArray);
}
void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
@@ -76,18 +37,12 @@ void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize(r2, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
}
-void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
-}
-
-void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
+void FastFunctionBindStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
+ Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
+ descriptor->Initialize(r2, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
}
-#define __ ACCESS_MASM(masm)
-
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
Condition cond);
static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
@@ -756,11 +711,8 @@ void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
}
void MathPowStub::Generate(MacroAssembler* masm) {
- const Register base = r3;
const Register exponent = MathPowTaggedDescriptor::exponent();
DCHECK(exponent.is(r4));
- const Register heapnumbermap = r7;
- const Register heapnumber = r2;
const DoubleRegister double_base = d1;
const DoubleRegister double_exponent = d2;
const DoubleRegister double_result = d3;
@@ -769,36 +721,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
const Register scratch2 = r9;
Label call_runtime, done, int_exponent;
- if (exponent_type() == ON_STACK) {
- Label base_is_smi, unpack_exponent;
- // The exponent and base are supplied as arguments on the stack.
- // This can only happen if the stub is called from non-optimized code.
- // Load input parameters from stack to double registers.
- __ LoadP(base, MemOperand(sp, 1 * kPointerSize));
- __ LoadP(exponent, MemOperand(sp, 0 * kPointerSize));
-
- __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
-
- __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
- __ LoadP(scratch, FieldMemOperand(base, JSObject::kMapOffset));
- __ CmpP(scratch, heapnumbermap);
- __ bne(&call_runtime);
-
- __ LoadDouble(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
- __ b(&unpack_exponent, Label::kNear);
-
- __ bind(&base_is_smi);
- __ ConvertIntToDouble(scratch, double_base);
- __ bind(&unpack_exponent);
-
- __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
- __ LoadP(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
- __ CmpP(scratch, heapnumbermap);
- __ bne(&call_runtime);
-
- __ LoadDouble(double_exponent,
- FieldMemOperand(exponent, HeapNumber::kValueOffset));
- } else if (exponent_type() == TAGGED) {
+ if (exponent_type() == TAGGED) {
// Base is already in double_base.
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
@@ -812,57 +735,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
double_scratch);
__ beq(&int_exponent, Label::kNear);
- if (exponent_type() == ON_STACK) {
- // Detect square root case. Crankshaft detects constant +/-0.5 at
- // compile time and uses DoMathPowHalf instead. We then skip this check
- // for non-constant cases of +/-0.5 as these hardly occur.
- Label not_plus_half, not_minus_inf1, not_minus_inf2;
-
- // Test for 0.5.
- __ LoadDoubleLiteral(double_scratch, 0.5, scratch);
- __ cdbr(double_exponent, double_scratch);
- __ bne(&not_plus_half, Label::kNear);
-
- // Calculates square root of base. Check for the special case of
- // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
- __ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch);
- __ cdbr(double_base, double_scratch);
- __ bne(&not_minus_inf1, Label::kNear);
- __ lcdbr(double_result, double_scratch);
- __ b(&done);
- __ bind(&not_minus_inf1);
-
- // Add +0 to convert -0 to +0.
- __ ldr(double_scratch, double_base);
- __ lzdr(kDoubleRegZero);
- __ adbr(double_scratch, kDoubleRegZero);
- __ sqdbr(double_result, double_scratch);
- __ b(&done);
-
- __ bind(&not_plus_half);
- __ LoadDoubleLiteral(double_scratch, -0.5, scratch);
- __ cdbr(double_exponent, double_scratch);
- __ bne(&call_runtime);
-
- // Calculates square root of base. Check for the special case of
- // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
- __ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch);
- __ cdbr(double_base, double_scratch);
- __ bne(&not_minus_inf2, Label::kNear);
- __ ldr(double_result, kDoubleRegZero);
- __ b(&done);
- __ bind(&not_minus_inf2);
-
- // Add +0 to convert -0 to +0.
- __ ldr(double_scratch, double_base);
- __ lzdr(kDoubleRegZero);
- __ adbr(double_scratch, kDoubleRegZero);
- __ LoadDoubleLiteral(double_result, 1.0, scratch);
- __ sqdbr(double_scratch, double_scratch);
- __ ddbr(double_result, double_scratch);
- __ b(&done);
- }
-
__ push(r14);
{
AllowExternalCallThatCantCauseGC scope(masm);
@@ -904,7 +776,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ beq(&no_carry, Label::kNear);
__ mdbr(double_result, double_scratch);
__ bind(&no_carry);
- __ ShiftRightArithP(scratch, scratch, Operand(1));
+ __ ShiftRightP(scratch, scratch, Operand(1));
+ __ LoadAndTestP(scratch, scratch);
__ beq(&loop_end, Label::kNear);
__ mdbr(double_scratch, double_scratch);
__ b(&while_true);
@@ -929,35 +802,19 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ ConvertIntToDouble(exponent, double_exponent);
// Returning or bailing out.
- if (exponent_type() == ON_STACK) {
- // The arguments are still on the stack.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMathPowRT);
-
- // The stub is called from non-optimized code, which expects the result
- // as heap number in exponent.
- __ bind(&done);
- __ AllocateHeapNumber(heapnumber, scratch, scratch2, heapnumbermap,
- &call_runtime);
- __ StoreDouble(double_result,
- FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
- DCHECK(heapnumber.is(r2));
- __ Ret(2);
- } else {
- __ push(r14);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch);
- __ MovToFloatParameters(double_base, double_exponent);
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()), 0, 2);
- }
- __ pop(r14);
- __ MovFromFloatResult(double_result);
-
- __ bind(&done);
- __ Ret();
+ __ push(r14);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ MovToFloatParameters(double_base, double_exponent);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(isolate()), 0, 2);
}
+ __ pop(r14);
+ __ MovFromFloatResult(double_result);
+
+ __ bind(&done);
+ __ Ret();
}
bool CEntryStub::NeedsImmovableCode() { return true; }
@@ -966,7 +823,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
- ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+ CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
@@ -974,7 +831,6 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
- TypeofStub::GenerateAheadOfTime(isolate);
}
void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
@@ -1041,7 +897,9 @@ void CEntryStub::Generate(MacroAssembler* masm) {
arg_stack_space += 2;
#endif
- __ EnterExitFrame(save_doubles(), arg_stack_space);
+ __ EnterExitFrame(save_doubles(), arg_stack_space, is_builtin_exit()
+ ? StackFrame::BUILTIN_EXIT
+ : StackFrame::EXIT);
// Store a copy of argc, argv in callee-saved registers for later.
__ LoadRR(r6, r2);
@@ -1080,7 +938,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// zLinux ABI requires caller's frame to have sufficient space for callee
// preserved regsiter save area.
// __ lay(sp, MemOperand(sp, -kCalleeRegisterSaveAreaSize));
- __ positions_recorder()->WriteRecordedPositions();
__ b(target);
__ bind(&return_label);
// __ la(sp, MemOperand(sp, +kCalleeRegisterSaveAreaSize));
@@ -1293,12 +1150,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// restores all kCalleeSaved registers (including cp and fp) to their
// saved values before returning a failure to C.
- // Clear any pending exceptions.
- __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate())));
- __ mov(r7, Operand(isolate()->factory()->the_hole_value()));
- __ StoreP(r7, MemOperand(ip));
-
// Invoke the function by calling through JS entry trampoline builtin.
// Notice that we cannot store a reference to the trampoline code directly in
// this stub, because runtime stubs are not traversed when doing GC.
@@ -1377,125 +1228,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ b(r14);
}
-void InstanceOfStub::Generate(MacroAssembler* masm) {
- Register const object = r3; // Object (lhs).
- Register const function = r2; // Function (rhs).
- Register const object_map = r4; // Map of {object}.
- Register const function_map = r5; // Map of {function}.
- Register const function_prototype = r6; // Prototype of {function}.
- Register const scratch = r7;
-
- DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
- DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
-
- // Check if {object} is a smi.
- Label object_is_smi;
- __ JumpIfSmi(object, &object_is_smi);
-
- // Lookup the {function} and the {object} map in the global instanceof cache.
- // Note: This is safe because we clear the global instanceof cache whenever
- // we change the prototype of any object.
- Label fast_case, slow_case;
- __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
- __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ bne(&fast_case);
- __ CompareRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
- __ bne(&fast_case);
- __ LoadRoot(r2, Heap::kInstanceofCacheAnswerRootIndex);
- __ Ret();
-
- // If {object} is a smi we can safely return false if {function} is a JS
- // function, otherwise we have to miss to the runtime and throw an exception.
- __ bind(&object_is_smi);
- __ JumpIfSmi(function, &slow_case);
- __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
- __ bne(&slow_case);
- __ LoadRoot(r2, Heap::kFalseValueRootIndex);
- __ Ret();
-
- // Fast-case: The {function} must be a valid JSFunction.
- __ bind(&fast_case);
- __ JumpIfSmi(function, &slow_case);
- __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
- __ bne(&slow_case);
-
- // Go to the runtime if the function is not a constructor.
- __ LoadlB(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
- __ TestBit(scratch, Map::kIsConstructor, r0);
- __ beq(&slow_case);
-
- // Ensure that {function} has an instance prototype.
- __ TestBit(scratch, Map::kHasNonInstancePrototype, r0);
- __ bne(&slow_case);
-
- // Get the "prototype" (or initial map) of the {function}.
- __ LoadP(function_prototype,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- __ AssertNotSmi(function_prototype);
-
- // Resolve the prototype if the {function} has an initial map. Afterwards the
- // {function_prototype} will be either the JSReceiver prototype object or the
- // hole value, which means that no instances of the {function} were created so
- // far and hence we should return false.
- Label function_prototype_valid;
- __ CompareObjectType(function_prototype, scratch, scratch, MAP_TYPE);
- __ bne(&function_prototype_valid);
- __ LoadP(function_prototype,
- FieldMemOperand(function_prototype, Map::kPrototypeOffset));
- __ bind(&function_prototype_valid);
- __ AssertNotSmi(function_prototype);
-
- // Update the global instanceof cache with the current {object} map and
- // {function}. The cached answer will be set when it is known below.
- __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
-
- // Loop through the prototype chain looking for the {function} prototype.
- // Assume true, and change to false if not found.
- Register const object_instance_type = function_map;
- Register const map_bit_field = function_map;
- Register const null = scratch;
- Register const result = r2;
-
- Label done, loop, fast_runtime_fallback;
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ LoadRoot(null, Heap::kNullValueRootIndex);
- __ bind(&loop);
-
- // Check if the object needs to be access checked.
- __ LoadlB(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
- __ TestBit(map_bit_field, Map::kIsAccessCheckNeeded, r0);
- __ bne(&fast_runtime_fallback);
- // Check if the current object is a Proxy.
- __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
- __ beq(&fast_runtime_fallback);
-
- __ LoadP(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
- __ CmpP(object, function_prototype);
- __ beq(&done);
- __ CmpP(object, null);
- __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
- __ bne(&loop);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
- __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
- __ Ret();
-
- // Found Proxy or access check needed: Call the runtime
- __ bind(&fast_runtime_fallback);
- __ Push(object, function_prototype);
- // Invalidate the instanceof cache.
- __ LoadSmiLiteral(scratch, Smi::FromInt(0));
- __ StoreRoot(scratch, Heap::kInstanceofCacheFunctionRootIndex);
- __ TailCallRuntime(Runtime::kHasInPrototypeChain);
-
- // Slow-case: Call the %InstanceOf runtime function.
- __ bind(&slow_case);
- __ Push(object, function);
- __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
- : Runtime::kInstanceOf);
-}
-
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
Register receiver = LoadDescriptor::ReceiverRegister();
@@ -1530,7 +1262,6 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
&miss, // When not a string.
&miss, // When not a number.
&miss, // When index out of range.
- STRING_INDEX_IS_ARRAY_INDEX,
RECEIVER_IS_STRING);
char_at_generator.GenerateFast(masm);
__ Ret();
@@ -1857,9 +1588,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ LoadP(r2, MemOperand(sp, kLastMatchInfoOffset));
__ JumpIfSmi(r2, &runtime);
- __ CompareObjectType(r2, r4, r4, JS_ARRAY_TYPE);
+ __ CompareObjectType(r2, r4, r4, JS_OBJECT_TYPE);
__ bne(&runtime);
- // Check that the JSArray is in fast case.
+ // Check that the object has fast elements.
__ LoadP(last_match_info_elements,
FieldMemOperand(r2, JSArray::kElementsOffset));
__ LoadP(r2,
@@ -1977,9 +1708,11 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// Number-of-arguments register must be smi-tagged to call out.
__ SmiTag(r2);
__ Push(r5, r4, r3, r2);
+ __ Push(cp);
__ CallStub(stub);
+ __ Pop(cp);
__ Pop(r5, r4, r3, r2);
__ SmiUntag(r2);
}
@@ -1993,12 +1726,15 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// r4 : feedback vector
// r5 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
+ Label done_initialize_count, done_increment_count;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
+ const int count_offset = FixedArray::kHeaderSize + kPointerSize;
+
// Load the cache state into r7.
__ SmiToPtrArrayOffset(r7, r5);
__ AddP(r7, r4, r7);
@@ -2013,9 +1749,9 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
Register weak_value = r9;
__ LoadP(weak_value, FieldMemOperand(r7, WeakCell::kValueOffset));
__ CmpP(r3, weak_value);
- __ beq(&done);
+ __ beq(&done_increment_count, Label::kNear);
__ CompareRoot(r7, Heap::kmegamorphic_symbolRootIndex);
- __ beq(&done);
+ __ beq(&done, Label::kNear);
__ LoadP(feedback_map, FieldMemOperand(r7, HeapObject::kMapOffset));
__ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
__ bne(&check_allocation_site);
@@ -2036,7 +1772,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
__ CmpP(r3, r7);
__ bne(&megamorphic);
- __ b(&done);
+ __ b(&done_increment_count, Label::kNear);
__ bind(&miss);
@@ -2066,12 +1802,31 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// slot.
CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub);
- __ b(&done);
+ __ b(&done_initialize_count, Label::kNear);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &weak_cell_stub);
+
+ __ bind(&done_initialize_count);
+ // Initialize the call counter.
+ __ LoadSmiLiteral(r7, Smi::FromInt(1));
+ __ SmiToPtrArrayOffset(r6, r5);
+ __ AddP(r6, r4, r6);
+ __ StoreP(r7, FieldMemOperand(r6, count_offset), r0);
+ __ b(&done, Label::kNear);
+
+ __ bind(&done_increment_count);
+
+ // Increment the call count for monomorphic function calls.
+ __ SmiToPtrArrayOffset(r7, r5);
+ __ AddP(r7, r4, r7);
+
+ __ LoadP(r6, FieldMemOperand(r7, count_offset));
+ __ AddSmiLiteral(r6, r6, Smi::FromInt(1), r0);
+ __ StoreP(r6, FieldMemOperand(r7, count_offset), r0);
+
__ bind(&done);
}
@@ -2134,7 +1889,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
__ SmiToPtrArrayOffset(r7, r5);
__ AddP(r4, r4, r7);
__ LoadP(r5, FieldMemOperand(r4, count_offset));
- __ AddSmiLiteral(r5, r5, Smi::FromInt(CallICNexus::kCallCountIncrement), r0);
+ __ AddSmiLiteral(r5, r5, Smi::FromInt(1), r0);
__ StoreP(r5, FieldMemOperand(r4, count_offset), r0);
__ LoadRR(r4, r6);
@@ -2181,7 +1936,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Increment the call count for monomorphic function calls.
const int count_offset = FixedArray::kHeaderSize + kPointerSize;
__ LoadP(r5, FieldMemOperand(r8, count_offset));
- __ AddSmiLiteral(r5, r5, Smi::FromInt(CallICNexus::kCallCountIncrement), r0);
+ __ AddSmiLiteral(r5, r5, Smi::FromInt(1), r0);
__ StoreP(r5, FieldMemOperand(r8, count_offset), r0);
__ bind(&call_function);
@@ -2251,7 +2006,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bne(&miss);
// Initialize the call counter.
- __ LoadSmiLiteral(r7, Smi::FromInt(CallICNexus::kCallCountIncrement));
+ __ LoadSmiLiteral(r7, Smi::FromInt(1));
__ StoreP(r7, FieldMemOperand(r8, count_offset), r0);
// Store the function. Use a stub since we need a frame for allocation.
@@ -2261,9 +2016,9 @@ void CallICStub::Generate(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
CreateWeakCellStub create_stub(masm->isolate());
- __ Push(r3);
+ __ Push(cp, r3);
__ CallStub(&create_stub);
- __ Pop(r3);
+ __ Pop(cp, r3);
}
__ b(&call_function);
@@ -2340,13 +2095,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
// index_ is consumed by runtime conversion function.
__ Push(object_, index_);
}
- if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
- } else {
- DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
- // NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi);
- }
+ __ CallRuntime(Runtime::kNumberToSmi);
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
__ Move(index_, r2);
@@ -2677,69 +2426,13 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// r5: from index (untagged)
__ SmiTag(r5, r5);
StringCharAtGenerator generator(r2, r5, r4, r2, &runtime, &runtime, &runtime,
- STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
+ RECEIVER_IS_STRING);
generator.GenerateFast(masm);
__ Drop(3);
__ Ret();
generator.SkipSlow(masm, &runtime);
}
-void ToNumberStub::Generate(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in r2.
- STATIC_ASSERT(kSmiTag == 0);
- __ TestIfSmi(r2);
- __ Ret(eq);
-
- __ CompareObjectType(r2, r3, r3, HEAP_NUMBER_TYPE);
- // r2: receiver
- // r3: receiver instance type
- Label not_heap_number;
- __ bne(&not_heap_number);
- __ Ret();
- __ bind(&not_heap_number);
-
- NonNumberToNumberStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
- // The NonNumberToNumber stub takes one argument in r2.
- __ AssertNotNumber(r2);
-
- __ CompareObjectType(r2, r3, r3, FIRST_NONSTRING_TYPE);
- // r2: receiver
- // r3: receiver instance type
- StringToNumberStub stub(masm->isolate());
- __ TailCallStub(&stub, lt);
-
- Label not_oddball;
- __ CmpP(r3, Operand(ODDBALL_TYPE));
- __ bne(&not_oddball, Label::kNear);
- __ LoadP(r2, FieldMemOperand(r2, Oddball::kToNumberOffset));
- __ b(r14);
- __ bind(&not_oddball);
-
- __ push(r2); // Push argument.
- __ TailCallRuntime(Runtime::kToNumber);
-}
-
-void StringToNumberStub::Generate(MacroAssembler* masm) {
- // The StringToNumber stub takes one argument in r2.
- __ AssertString(r2);
-
- // Check if string has a cached array index.
- Label runtime;
- __ LoadlW(r4, FieldMemOperand(r2, String::kHashFieldOffset));
- __ And(r0, r4, Operand(String::kContainsCachedArrayIndexMask));
- __ bne(&runtime);
- __ IndexFromHash(r4, r2);
- __ Ret();
-
- __ bind(&runtime);
- __ push(r2); // Push argument.
- __ TailCallRuntime(Runtime::kStringToNumber);
-}
-
void ToStringStub::Generate(MacroAssembler* masm) {
// The ToString stub takes one argument in r2.
Label done;
@@ -2917,7 +2610,7 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// Load r4 with the allocation site. We stick an undefined dummy value here
// and replace it with the real allocation site later when we instantiate this
// stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
- __ Move(r4, handle(isolate()->heap()->undefined_value()));
+ __ Move(r4, isolate()->factory()->undefined_value());
// Make sure that we actually patched the allocation site.
if (FLAG_debug_code) {
@@ -3299,10 +2992,6 @@ void CompareICStub::GenerateMiss(MacroAssembler* masm) {
void DirectCEntryStub::Generate(MacroAssembler* masm) {
__ CleanseP(r14);
- // Statement positions are expected to be recorded when the target
- // address is loaded.
- __ positions_recorder()->WriteRecordedPositions();
-
__ b(ip); // Callee will return to R14 directly
}
@@ -3756,13 +3445,13 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- LoadICStub stub(isolate(), state());
+ LoadICStub stub(isolate());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- KeyedLoadICStub stub(isolate(), state());
+ KeyedLoadICStub stub(isolate());
stub.GenerateForTrampoline(masm);
}
@@ -3898,11 +3587,8 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ bind(&not_array);
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ bne(&miss);
- Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
- receiver, name, feedback,
- receiver_map, scratch1, r9);
+ masm->isolate()->load_stub_cache()->GenerateProbe(
+ masm, receiver, name, feedback, receiver_map, scratch1, r9);
__ bind(&miss);
LoadIC::GenerateMiss(masm);
@@ -3978,32 +3664,30 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ b(&compare_map);
}
-void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
- VectorStoreICStub stub(isolate(), state());
+void StoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
+ StoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
-void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
- VectorKeyedStoreICStub stub(isolate(), state());
+void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
+ KeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
-void VectorStoreICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
+void StoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+void StoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
-void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // r3
- Register key = VectorStoreICDescriptor::NameRegister(); // r4
- Register vector = VectorStoreICDescriptor::VectorRegister(); // r5
- Register slot = VectorStoreICDescriptor::SlotRegister(); // r6
- DCHECK(VectorStoreICDescriptor::ValueRegister().is(r2)); // r2
+void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // r3
+ Register key = StoreWithVectorDescriptor::NameRegister(); // r4
+ Register vector = StoreWithVectorDescriptor::VectorRegister(); // r5
+ Register slot = StoreWithVectorDescriptor::SlotRegister(); // r6
+ DCHECK(StoreWithVectorDescriptor::ValueRegister().is(r2)); // r2
Register feedback = r7;
Register receiver_map = r8;
Register scratch1 = r9;
@@ -4033,11 +3717,8 @@ void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ bind(&not_array);
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ bne(&miss);
- Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map,
- scratch1, scratch2);
+ masm->isolate()->store_stub_cache()->GenerateProbe(
+ masm, receiver, key, feedback, receiver_map, scratch1, scratch2);
__ bind(&miss);
StoreIC::GenerateMiss(masm);
@@ -4047,11 +3728,11 @@ void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ b(&compare_map);
}
-void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
+void KeyedStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
-void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
@@ -4119,12 +3800,12 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
__ b(miss);
}
-void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // r3
- Register key = VectorStoreICDescriptor::NameRegister(); // r4
- Register vector = VectorStoreICDescriptor::VectorRegister(); // r5
- Register slot = VectorStoreICDescriptor::SlotRegister(); // r6
- DCHECK(VectorStoreICDescriptor::ValueRegister().is(r2)); // r2
+void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // r3
+ Register key = StoreWithVectorDescriptor::NameRegister(); // r4
+ Register vector = StoreWithVectorDescriptor::VectorRegister(); // r5
+ Register slot = StoreWithVectorDescriptor::SlotRegister(); // r6
+ DCHECK(StoreWithVectorDescriptor::ValueRegister().is(r2)); // r2
Register feedback = r7;
Register receiver_map = r8;
Register scratch1 = r9;
@@ -4402,17 +4083,11 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
}
}
-void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
isolate);
- ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
- isolate);
- ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
- isolate);
-}
-
-void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
- Isolate* isolate) {
+ ArrayNArgumentsConstructorStub stub(isolate);
+ stub.GetCode();
ElementsKind kinds[2] = {FAST_ELEMENTS, FAST_HOLEY_ELEMENTS};
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
@@ -4420,8 +4095,6 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
stubh1.GetCode();
InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
stubh2.GetCode();
- InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
- stubh3.GetCode();
}
}
@@ -4439,13 +4112,15 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
CreateArrayDispatchOneArgument(masm, mode);
__ bind(&not_one_case);
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ ArrayNArgumentsConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
} else if (argument_count() == NONE) {
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
} else if (argument_count() == ONE) {
CreateArrayDispatchOneArgument(masm, mode);
} else if (argument_count() == MORE_THAN_ONE) {
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ ArrayNArgumentsConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
} else {
UNREACHABLE();
}
@@ -4527,7 +4202,7 @@ void InternalArrayConstructorStub::GenerateCase(MacroAssembler* masm,
InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
__ TailCallStub(&stub0, lt);
- InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+ ArrayNArgumentsConstructorStub stubN(isolate());
__ TailCallStub(&stubN, gt);
if (IsFastPackedElementsKind(kind)) {
@@ -4625,15 +4300,15 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
__ bind(&done_allocate);
// Initialize the JSObject fields.
- __ StoreP(r4, MemOperand(r2, JSObject::kMapOffset));
+ __ StoreP(r4, FieldMemOperand(r2, JSObject::kMapOffset));
__ LoadRoot(r5, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r5, MemOperand(r2, JSObject::kPropertiesOffset));
- __ StoreP(r5, MemOperand(r2, JSObject::kElementsOffset));
+ __ StoreP(r5, FieldMemOperand(r2, JSObject::kPropertiesOffset));
+ __ StoreP(r5, FieldMemOperand(r2, JSObject::kElementsOffset));
STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ AddP(r3, r2, Operand(JSObject::kHeaderSize));
+ __ AddP(r3, r2, Operand(JSObject::kHeaderSize - kHeapObjectTag));
// ----------- S t a t e -------------
- // -- r2 : result (untagged)
+ // -- r2 : result (tagged)
// -- r3 : result fields (untagged)
// -- r7 : result end (untagged)
// -- r4 : initial map
@@ -4653,8 +4328,6 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
// Initialize all in-object fields with undefined.
__ InitializeFieldsWithFiller(r3, r7, r8);
- // Add the object tag to make the JSObject real.
- __ AddP(r2, r2, Operand(kHeapObjectTag));
__ Ret();
}
__ bind(&slack_tracking);
@@ -4674,9 +4347,6 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
__ LoadRoot(r8, Heap::kOnePointerFillerMapRootIndex);
__ InitializeFieldsWithFiller(r3, r7, r8);
- // Add the object tag to make the JSObject real.
- __ AddP(r2, r2, Operand(kHeapObjectTag));
-
// Check if we can finalize the instance size.
__ CmpP(r9, Operand(Map::kSlackTrackingCounterEnd));
__ Ret(ne);
@@ -4702,10 +4372,10 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ Pop(r4);
}
- __ SubP(r2, r2, Operand(kHeapObjectTag));
__ LoadlB(r7, FieldMemOperand(r4, Map::kInstanceSizeOffset));
__ ShiftLeftP(r7, r7, Operand(kPointerSizeLog2));
__ AddP(r7, r2, r7);
+ __ SubP(r7, r7, Operand(kHeapObjectTag));
__ b(&done_allocate);
// Fall back to %NewObject.
@@ -4723,20 +4393,20 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// -----------------------------------
__ AssertFunction(r3);
- // For Ignition we need to skip all possible handler/stub frames until
- // we reach the JavaScript frame for the function (similar to what the
- // runtime fallback implementation does). So make r4 point to that
- // JavaScript frame.
- {
- Label loop, loop_entry;
- __ LoadRR(r4, fp);
- __ b(&loop_entry);
- __ bind(&loop);
+ // Make r4 point to the JavaScript frame.
+ __ LoadRR(r4, fp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
__ LoadP(r4, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
- __ bind(&loop_entry);
+ }
+ if (FLAG_debug_code) {
+ Label ok;
__ LoadP(ip, MemOperand(r4, StandardFrameConstants::kFunctionOffset));
__ CmpP(ip, r3);
- __ bne(&loop);
+ __ b(&ok, Label::kNear);
+ __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+ __ bind(&ok);
}
// Check if we have rest parameters (only possible if we have an
@@ -4751,13 +4421,13 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// specified by the function's internal formal parameter count.
Label rest_parameters;
__ LoadP(r2, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ LoadP(r3, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r5, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadW(
- r3, FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
+ r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
#if V8_TARGET_ARCH_S390X
- __ SmiTag(r3);
+ __ SmiTag(r5);
#endif
- __ SubP(r2, r2, r3);
+ __ SubP(r2, r2, r5);
__ bgt(&rest_parameters);
// Return an empty rest parameter array.
@@ -4770,7 +4440,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// Allocate an empty rest parameter array.
Label allocate, done_allocate;
- __ Allocate(JSArray::kSize, r2, r3, r4, &allocate, TAG_OBJECT);
+ __ Allocate(JSArray::kSize, r2, r3, r4, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Setup the rest parameter array in r0.
@@ -4804,6 +4474,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- cp : context
// -- r2 : number of rest parameters (tagged)
+ // -- r3 : function
// -- r4 : pointer just past first rest parameters
// -- r8 : size of rest parameters
// -- lr : return address
@@ -4811,9 +4482,9 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// Allocate space for the rest parameter array plus the backing store.
Label allocate, done_allocate;
- __ mov(r3, Operand(JSArray::kSize + FixedArray::kHeaderSize));
- __ AddP(r3, r3, r8);
- __ Allocate(r3, r5, r6, r7, &allocate, TAG_OBJECT);
+ __ mov(r9, Operand(JSArray::kSize + FixedArray::kHeaderSize));
+ __ AddP(r9, r9, r8);
+ __ Allocate(r9, r5, r6, r7, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Setup the elements array in r5.
@@ -4847,17 +4518,25 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
__ AddP(r2, r6, Operand(kHeapObjectTag));
__ Ret();
- // Fall back to %AllocateInNewSpace.
+ // Fall back to %AllocateInNewSpace (if not too big).
+ Label too_big_for_new_space;
__ bind(&allocate);
+ __ CmpP(r9, Operand(Page::kMaxRegularHeapObjectSize));
+ __ bgt(&too_big_for_new_space);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(r3);
- __ Push(r2, r4, r3);
+ __ SmiTag(r9);
+ __ Push(r2, r4, r9);
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ LoadRR(r5, r2);
__ Pop(r2, r4);
}
__ b(&done_allocate);
+
+ // Fall back to %NewRestParameter.
+ __ bind(&too_big_for_new_space);
+ __ push(r3);
+ __ TailCallRuntime(Runtime::kNewRestParameter);
}
}
@@ -4870,6 +4549,22 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
// -----------------------------------
__ AssertFunction(r3);
+ // Make r9 point to the JavaScript frame.
+ __ LoadRR(r9, fp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
+ __ LoadP(r9, MemOperand(r9, StandardFrameConstants::kCallerFPOffset));
+ }
+ if (FLAG_debug_code) {
+ Label ok;
+ __ LoadP(ip, MemOperand(r9, StandardFrameConstants::kFunctionOffset));
+ __ CmpP(ip, r3);
+ __ beq(&ok, Label::kNear);
+ __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+ __ bind(&ok);
+ }
+
// TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadW(
@@ -4878,19 +4573,20 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
__ SmiTag(r4);
#endif
__ SmiToPtrArrayOffset(r5, r4);
- __ AddP(r5, fp, r5);
+ __ AddP(r5, r9, r5);
__ AddP(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
// r3 : function
// r4 : number of parameters (tagged)
// r5 : parameters pointer
+ // r9 : JavaScript frame pointer
// Registers used over whole function:
// r7 : arguments count (tagged)
// r8 : mapped parameter count (tagged)
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
- __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(r6, MemOperand(r9, StandardFrameConstants::kCallerFPOffset));
__ LoadP(r2, MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset));
__ CmpSmiLiteral(r2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
__ beq(&adaptor_frame);
@@ -4943,7 +4639,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
__ AddP(r1, r1, Operand(JSSloppyArgumentsObject::kSize));
// Do the allocation of all three objects in one go.
- __ Allocate(r1, r2, r1, r6, &runtime, TAG_OBJECT);
+ __ Allocate(r1, r2, r1, r6, &runtime, NO_ALLOCATION_FLAGS);
// r2 = address of new object(s) (tagged)
// r4 = argument count (smi-tagged)
@@ -5107,20 +4803,20 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// -----------------------------------
__ AssertFunction(r3);
- // For Ignition we need to skip all possible handler/stub frames until
- // we reach the JavaScript frame for the function (similar to what the
- // runtime fallback implementation does). So make r4 point to that
- // JavaScript frame.
- {
- Label loop, loop_entry;
- __ LoadRR(r4, fp);
- __ b(&loop_entry);
- __ bind(&loop);
+ // Make r4 point to the JavaScript frame.
+ __ LoadRR(r4, fp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
__ LoadP(r4, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
- __ bind(&loop_entry);
+ }
+ if (FLAG_debug_code) {
+ Label ok;
__ LoadP(ip, MemOperand(r4, StandardFrameConstants::kFunctionOffset));
__ CmpP(ip, r3);
- __ bne(&loop);
+ __ beq(&ok, Label::kNear);
+ __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+ __ bind(&ok);
}
// Check if we have an arguments adaptor frame below the function frame.
@@ -5130,9 +4826,9 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
__ beq(&arguments_adaptor);
{
- __ LoadP(r3, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
__ LoadW(r2, FieldMemOperand(
- r3, SharedFunctionInfo::kFormalParameterCountOffset));
+ r6, SharedFunctionInfo::kFormalParameterCountOffset));
#if V8_TARGET_ARCH_S390X
__ SmiTag(r2);
#endif
@@ -5152,6 +4848,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- cp : context
// -- r2 : number of rest parameters (tagged)
+ // -- r3 : function
// -- r4 : pointer just past first rest parameters
// -- r8 : size of rest parameters
// -- lr : return address
@@ -5159,9 +4856,9 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// Allocate space for the strict arguments object plus the backing store.
Label allocate, done_allocate;
- __ mov(r3, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
- __ AddP(r3, r3, r8);
- __ Allocate(r3, r5, r6, r7, &allocate, TAG_OBJECT);
+ __ mov(r9, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
+ __ AddP(r9, r9, r8);
+ __ Allocate(r9, r5, r6, r7, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Setup the elements array in r5.
@@ -5196,47 +4893,25 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ AddP(r2, r6, Operand(kHeapObjectTag));
__ Ret();
- // Fall back to %AllocateInNewSpace.
+ // Fall back to %AllocateInNewSpace (if not too big).
+ Label too_big_for_new_space;
__ bind(&allocate);
+ __ CmpP(r9, Operand(Page::kMaxRegularHeapObjectSize));
+ __ bgt(&too_big_for_new_space);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(r3);
- __ Push(r2, r4, r3);
+ __ SmiTag(r9);
+ __ Push(r2, r4, r9);
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ LoadRR(r5, r2);
__ Pop(r2, r4);
}
__ b(&done_allocate);
-}
-void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
- Register context = cp;
- Register result = r2;
- Register slot = r4;
-
- // Go up the context chain to the script context.
- for (int i = 0; i < depth(); ++i) {
- __ LoadP(result, ContextMemOperand(context, Context::PREVIOUS_INDEX));
- context = result;
- }
-
- // Load the PropertyCell value at the specified slot.
- __ ShiftLeftP(r0, slot, Operand(kPointerSizeLog2));
- __ AddP(result, context, r0);
- __ LoadP(result, ContextMemOperand(result));
- __ LoadP(result, FieldMemOperand(result, PropertyCell::kValueOffset));
-
- // If the result is not the_hole, return. Otherwise, handle in the runtime.
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- Label runtime;
- __ beq(&runtime);
- __ Ret();
- __ bind(&runtime);
-
- // Fallback to runtime.
- __ SmiTag(slot);
- __ Push(slot);
- __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
+ // Fall back to %NewStrictArguments.
+ __ bind(&too_big_for_new_space);
+ __ push(r3);
+ __ TailCallRuntime(Runtime::kNewStrictArguments);
}
void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
@@ -5533,7 +5208,11 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
STATIC_ASSERT(FCA::kIsolateIndex == 1);
STATIC_ASSERT(FCA::kHolderIndex == 0);
- STATIC_ASSERT(FCA::kArgsLength == 7);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 7);
+ STATIC_ASSERT(FCA::kArgsLength == 8);
+
+ // new target
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
// context save
__ push(context);
@@ -5569,10 +5248,10 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// it's not controlled by GC.
// S390 LINUX ABI:
//
- // Create 5 extra slots on stack:
+ // Create 4 extra slots on stack:
// [0] space for DirectCEntryStub's LR save
- // [1-4] FunctionCallbackInfo
- const int kApiStackSpace = 5;
+ // [1-3] FunctionCallbackInfo
+ const int kApiStackSpace = 4;
const int kFunctionCallbackInfoOffset =
(kStackFrameExtraParamSlot + 1) * kPointerSize;
@@ -5591,9 +5270,6 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// FunctionCallbackInfo::length_ = argc
__ LoadImmP(ip, Operand(argc()));
__ StoreW(ip, MemOperand(r2, 2 * kPointerSize));
- // FunctionCallbackInfo::is_construct_call_ = 0
- __ LoadImmP(ip, Operand::Zero());
- __ StoreW(ip, MemOperand(r2, 2 * kPointerSize + kIntSize));
ExternalReference thunk_ref =
ExternalReference::invoke_function_callback(masm->isolate());
@@ -5610,9 +5286,9 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
}
MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
int stack_space = 0;
- MemOperand is_construct_call_operand =
- MemOperand(sp, kFunctionCallbackInfoOffset + 2 * kPointerSize + kIntSize);
- MemOperand* stack_space_operand = &is_construct_call_operand;
+ MemOperand length_operand =
+ MemOperand(sp, kFunctionCallbackInfoOffset + 2 * kPointerSize);
+ MemOperand* stack_space_operand = &length_operand;
stack_space = argc() + FCA::kArgsLength + 1;
stack_space_operand = NULL;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
@@ -5621,18 +5297,39 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
}
void CallApiGetterStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- sp[0] : name
- // -- sp[4 .. (4 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_
- // -- ...
- // -- r4 : api_function_address
- // -----------------------------------
-
- Register api_function_address = ApiGetterDescriptor::function_address();
int arg0Slot = 0;
int accessorInfoSlot = 0;
int apiStackSpace = 0;
- DCHECK(api_function_address.is(r4));
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+ Register receiver = ApiGetterDescriptor::ReceiverRegister();
+ Register holder = ApiGetterDescriptor::HolderRegister();
+ Register callback = ApiGetterDescriptor::CallbackRegister();
+ Register scratch = r6;
+ DCHECK(!AreAliased(receiver, holder, callback, scratch));
+
+ Register api_function_address = r4;
+
+ __ push(receiver);
+ // Push data from AccessorInfo.
+ __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+ __ push(scratch);
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+ __ Push(scratch, scratch);
+ __ mov(scratch, Operand(ExternalReference::isolate_address(isolate())));
+ __ Push(scratch, holder);
+ __ Push(Smi::FromInt(0)); // should_throw_on_error -> false
+ __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+ __ push(scratch);
// v8::PropertyCallbackInfo::args_ array and name handle.
const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
@@ -5680,6 +5377,10 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
+ __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+ __ LoadP(api_function_address,
+ FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
+
// +3 is to skip prolog, return address and name handle.
MemOperand return_value_operand(
fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
diff --git a/deps/v8/src/s390/codegen-s390.cc b/deps/v8/src/s390/codegen-s390.cc
index 6636a7ca1d..d92cc54ab2 100644
--- a/deps/v8/src/s390/codegen-s390.cc
+++ b/deps/v8/src/s390/codegen-s390.cc
@@ -6,6 +6,8 @@
#if V8_TARGET_ARCH_S390
+#include <memory>
+
#include "src/codegen.h"
#include "src/macro-assembler.h"
#include "src/s390/simulator-s390.h"
@@ -15,56 +17,6 @@ namespace internal {
#define __ masm.
-#if defined(USE_SIMULATOR)
-byte* fast_exp_s390_machine_code = nullptr;
-double fast_exp_simulator(double x, Isolate* isolate) {
- return Simulator::current(isolate)->CallFPReturnsDouble(
- fast_exp_s390_machine_code, x, 0);
-}
-#endif
-
-UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
- size_t actual_size;
- byte* buffer =
- static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == nullptr) return nullptr;
- ExternalReference::InitializeMathExpData();
-
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
- CodeObjectRequired::kNo);
-
- {
- DoubleRegister input = d0;
- DoubleRegister result = d2;
- DoubleRegister double_scratch1 = d3;
- DoubleRegister double_scratch2 = d4;
- Register temp1 = r6;
- Register temp2 = r7;
- Register temp3 = r8;
-
- __ Push(temp3, temp2, temp1);
- MathExpGenerator::EmitMathExp(&masm, input, result, double_scratch1,
- double_scratch2, temp1, temp2, temp3);
- __ Pop(temp3, temp2, temp1);
- __ ldr(d0, result);
- __ Ret();
- }
-
- CodeDesc desc;
- masm.GetCode(&desc);
- DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
-
- Assembler::FlushICache(isolate, buffer, actual_size);
- base::OS::ProtectCode(buffer, actual_size);
-
-#if !defined(USE_SIMULATOR)
- return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
-#else
- fast_exp_s390_machine_code = buffer;
- return &fast_exp_simulator;
-#endif
-}
-
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
return nullptr;
@@ -172,7 +124,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ SmiToDoubleArrayOffset(r14, length);
__ AddP(r14, Operand(FixedDoubleArray::kHeaderSize));
__ Allocate(r14, array, r9, scratch2, &gc_required, DOUBLE_ALIGNMENT);
-
+ __ SubP(array, array, Operand(kHeapObjectTag));
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
__ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
@@ -302,12 +254,12 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ AddP(array_size, r0);
__ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
NO_ALLOCATION_FLAGS);
- // array: destination FixedArray, not tagged as heap object
+ // array: destination FixedArray, tagged as heap object
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
- __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
- __ StoreP(scratch, MemOperand(array, HeapObject::kMapOffset));
- __ AddP(array, Operand(kHeapObjectTag));
+ __ StoreP(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset),
+ r0);
+ __ StoreP(scratch, FieldMemOperand(array, HeapObject::kMapOffset), r0);
// Prepare for conversion loop.
Register src_elements = elements;
@@ -507,95 +459,6 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
__ bind(&done);
}
-static MemOperand ExpConstant(int index, Register base) {
- return MemOperand(base, index * kDoubleSize);
-}
-
-void MathExpGenerator::EmitMathExp(MacroAssembler* masm, DoubleRegister input,
- DoubleRegister result,
- DoubleRegister double_scratch1,
- DoubleRegister double_scratch2,
- Register temp1, Register temp2,
- Register temp3) {
- DCHECK(!input.is(result));
- DCHECK(!input.is(double_scratch1));
- DCHECK(!input.is(double_scratch2));
- DCHECK(!result.is(double_scratch1));
- DCHECK(!result.is(double_scratch2));
- DCHECK(!double_scratch1.is(double_scratch2));
- DCHECK(!temp1.is(temp2));
- DCHECK(!temp1.is(temp3));
- DCHECK(!temp2.is(temp3));
- DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
- DCHECK(!masm->serializer_enabled()); // External references not serializable.
-
- Label zero, infinity, done;
-
- __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
-
- __ LoadDouble(double_scratch1, ExpConstant(0, temp3));
- __ cdbr(double_scratch1, input);
- __ ldr(result, input);
- __ bunordered(&done, Label::kNear);
- __ bge(&zero, Label::kNear);
-
- __ LoadDouble(double_scratch2, ExpConstant(1, temp3));
- __ cdbr(input, double_scratch2);
- __ bge(&infinity, Label::kNear);
-
- __ LoadDouble(double_scratch1, ExpConstant(3, temp3));
- __ LoadDouble(result, ExpConstant(4, temp3));
-
- // Do not generate madbr, as intermediate result are not
- // rounded properly
- __ mdbr(double_scratch1, input);
- __ adbr(double_scratch1, result);
-
- // Move low word of double_scratch1 to temp2
- __ lgdr(temp2, double_scratch1);
- __ nihf(temp2, Operand::Zero());
-
- __ sdbr(double_scratch1, result);
- __ LoadDouble(result, ExpConstant(6, temp3));
- __ LoadDouble(double_scratch2, ExpConstant(5, temp3));
- __ mdbr(double_scratch1, double_scratch2);
- __ sdbr(double_scratch1, input);
- __ sdbr(result, double_scratch1);
- __ ldr(double_scratch2, double_scratch1);
- __ mdbr(double_scratch2, double_scratch2);
- __ mdbr(result, double_scratch2);
- __ LoadDouble(double_scratch2, ExpConstant(7, temp3));
- __ mdbr(result, double_scratch2);
- __ sdbr(result, double_scratch1);
- __ LoadDouble(double_scratch2, ExpConstant(8, temp3));
- __ adbr(result, double_scratch2);
- __ ShiftRight(temp1, temp2, Operand(11));
- __ AndP(temp2, Operand(0x7ff));
- __ AddP(temp1, Operand(0x3ff));
-
- // Must not call ExpConstant() after overwriting temp3!
- __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
- __ ShiftLeft(temp2, temp2, Operand(3));
-
- __ lg(temp2, MemOperand(temp2, temp3));
- __ sllg(temp1, temp1, Operand(52));
- __ ogr(temp2, temp1);
- __ ldgr(double_scratch1, temp2);
-
- __ mdbr(result, double_scratch1);
- __ b(&done, Label::kNear);
-
- __ bind(&zero);
- __ lzdr(kDoubleRegZero);
- __ ldr(result, kDoubleRegZero);
- __ b(&done, Label::kNear);
-
- __ bind(&infinity);
- __ LoadDouble(result, ExpConstant(2, temp3));
-
- __ bind(&done);
-}
-
#undef __
CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
@@ -605,7 +468,7 @@ CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
// the process, before ARM simulator ICache is setup.
- base::SmartPointer<CodePatcher> patcher(
+ std::unique_ptr<CodePatcher> patcher(
new CodePatcher(isolate, young_sequence_.start(),
young_sequence_.length(), CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
diff --git a/deps/v8/src/s390/codegen-s390.h b/deps/v8/src/s390/codegen-s390.h
index 18cf8e29d1..3001bc13f8 100644
--- a/deps/v8/src/s390/codegen-s390.h
+++ b/deps/v8/src/s390/codegen-s390.h
@@ -8,7 +8,6 @@
#ifndef V8_S390_CODEGEN_S390_H_
#define V8_S390_CODEGEN_S390_H_
-#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {
@@ -25,19 +24,6 @@ class StringCharLoadGenerator : public AllStatic {
private:
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
-
-class MathExpGenerator : public AllStatic {
- public:
- // Register input isn't modified. All other registers are clobbered.
- static void EmitMathExp(MacroAssembler* masm, DoubleRegister input,
- DoubleRegister result, DoubleRegister double_scratch1,
- DoubleRegister double_scratch2, Register temp1,
- Register temp2, Register temp3);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/s390/constants-s390.cc b/deps/v8/src/s390/constants-s390.cc
index a958082a8f..da53613bc7 100644
--- a/deps/v8/src/s390/constants-s390.cc
+++ b/deps/v8/src/s390/constants-s390.cc
@@ -9,6 +9,267 @@
namespace v8 {
namespace internal {
+Instruction::OpcodeFormatType Instruction::OpcodeFormatTable[] = {
+ // Based on Figure B-3 in z/Architecture Principles of
+ // Operation.
+ TWO_BYTE_OPCODE, // 0x00
+ TWO_BYTE_OPCODE, // 0x01
+ TWO_BYTE_DISJOINT_OPCODE, // 0x02
+ TWO_BYTE_DISJOINT_OPCODE, // 0x03
+ ONE_BYTE_OPCODE, // 0x04
+ ONE_BYTE_OPCODE, // 0x05
+ ONE_BYTE_OPCODE, // 0x06
+ ONE_BYTE_OPCODE, // 0x07
+ ONE_BYTE_OPCODE, // 0x08
+ ONE_BYTE_OPCODE, // 0x09
+ ONE_BYTE_OPCODE, // 0x0a
+ ONE_BYTE_OPCODE, // 0x0b
+ ONE_BYTE_OPCODE, // 0x0c
+ ONE_BYTE_OPCODE, // 0x0d
+ ONE_BYTE_OPCODE, // 0x0e
+ ONE_BYTE_OPCODE, // 0x0f
+ ONE_BYTE_OPCODE, // 0x10
+ ONE_BYTE_OPCODE, // 0x11
+ ONE_BYTE_OPCODE, // 0x12
+ ONE_BYTE_OPCODE, // 0x13
+ ONE_BYTE_OPCODE, // 0x14
+ ONE_BYTE_OPCODE, // 0x15
+ ONE_BYTE_OPCODE, // 0x16
+ ONE_BYTE_OPCODE, // 0x17
+ ONE_BYTE_OPCODE, // 0x18
+ ONE_BYTE_OPCODE, // 0x19
+ ONE_BYTE_OPCODE, // 0x1a
+ ONE_BYTE_OPCODE, // 0x1b
+ ONE_BYTE_OPCODE, // 0x1c
+ ONE_BYTE_OPCODE, // 0x1d
+ ONE_BYTE_OPCODE, // 0x1e
+ ONE_BYTE_OPCODE, // 0x1f
+ ONE_BYTE_OPCODE, // 0x20
+ ONE_BYTE_OPCODE, // 0x21
+ ONE_BYTE_OPCODE, // 0x22
+ ONE_BYTE_OPCODE, // 0x23
+ ONE_BYTE_OPCODE, // 0x24
+ ONE_BYTE_OPCODE, // 0x25
+ ONE_BYTE_OPCODE, // 0x26
+ ONE_BYTE_OPCODE, // 0x27
+ ONE_BYTE_OPCODE, // 0x28
+ ONE_BYTE_OPCODE, // 0x29
+ ONE_BYTE_OPCODE, // 0x2a
+ ONE_BYTE_OPCODE, // 0x2b
+ ONE_BYTE_OPCODE, // 0x2c
+ ONE_BYTE_OPCODE, // 0x2d
+ ONE_BYTE_OPCODE, // 0x2e
+ ONE_BYTE_OPCODE, // 0x2f
+ ONE_BYTE_OPCODE, // 0x30
+ ONE_BYTE_OPCODE, // 0x31
+ ONE_BYTE_OPCODE, // 0x32
+ ONE_BYTE_OPCODE, // 0x33
+ ONE_BYTE_OPCODE, // 0x34
+ ONE_BYTE_OPCODE, // 0x35
+ ONE_BYTE_OPCODE, // 0x36
+ ONE_BYTE_OPCODE, // 0x37
+ ONE_BYTE_OPCODE, // 0x38
+ ONE_BYTE_OPCODE, // 0x39
+ ONE_BYTE_OPCODE, // 0x3a
+ ONE_BYTE_OPCODE, // 0x3b
+ ONE_BYTE_OPCODE, // 0x3c
+ ONE_BYTE_OPCODE, // 0x3d
+ ONE_BYTE_OPCODE, // 0x3e
+ ONE_BYTE_OPCODE, // 0x3f
+ ONE_BYTE_OPCODE, // 0x40
+ ONE_BYTE_OPCODE, // 0x41
+ ONE_BYTE_OPCODE, // 0x42
+ ONE_BYTE_OPCODE, // 0x43
+ ONE_BYTE_OPCODE, // 0x44
+ ONE_BYTE_OPCODE, // 0x45
+ ONE_BYTE_OPCODE, // 0x46
+ ONE_BYTE_OPCODE, // 0x47
+ ONE_BYTE_OPCODE, // 0x48
+ ONE_BYTE_OPCODE, // 0x49
+ ONE_BYTE_OPCODE, // 0x4a
+ ONE_BYTE_OPCODE, // 0x4b
+ ONE_BYTE_OPCODE, // 0x4c
+ ONE_BYTE_OPCODE, // 0x4d
+ ONE_BYTE_OPCODE, // 0x4e
+ ONE_BYTE_OPCODE, // 0x4f
+ ONE_BYTE_OPCODE, // 0x50
+ ONE_BYTE_OPCODE, // 0x51
+ ONE_BYTE_OPCODE, // 0x52
+ ONE_BYTE_OPCODE, // 0x53
+ ONE_BYTE_OPCODE, // 0x54
+ ONE_BYTE_OPCODE, // 0x55
+ ONE_BYTE_OPCODE, // 0x56
+ ONE_BYTE_OPCODE, // 0x57
+ ONE_BYTE_OPCODE, // 0x58
+ ONE_BYTE_OPCODE, // 0x59
+ ONE_BYTE_OPCODE, // 0x5a
+ ONE_BYTE_OPCODE, // 0x5b
+ ONE_BYTE_OPCODE, // 0x5c
+ ONE_BYTE_OPCODE, // 0x5d
+ ONE_BYTE_OPCODE, // 0x5e
+ ONE_BYTE_OPCODE, // 0x5f
+ ONE_BYTE_OPCODE, // 0x60
+ ONE_BYTE_OPCODE, // 0x61
+ ONE_BYTE_OPCODE, // 0x62
+ ONE_BYTE_OPCODE, // 0x63
+ ONE_BYTE_OPCODE, // 0x64
+ ONE_BYTE_OPCODE, // 0x65
+ ONE_BYTE_OPCODE, // 0x66
+ ONE_BYTE_OPCODE, // 0x67
+ ONE_BYTE_OPCODE, // 0x68
+ ONE_BYTE_OPCODE, // 0x69
+ ONE_BYTE_OPCODE, // 0x6a
+ ONE_BYTE_OPCODE, // 0x6b
+ ONE_BYTE_OPCODE, // 0x6c
+ ONE_BYTE_OPCODE, // 0x6d
+ ONE_BYTE_OPCODE, // 0x6e
+ ONE_BYTE_OPCODE, // 0x6f
+ ONE_BYTE_OPCODE, // 0x70
+ ONE_BYTE_OPCODE, // 0x71
+ ONE_BYTE_OPCODE, // 0x72
+ ONE_BYTE_OPCODE, // 0x73
+ ONE_BYTE_OPCODE, // 0x74
+ ONE_BYTE_OPCODE, // 0x75
+ ONE_BYTE_OPCODE, // 0x76
+ ONE_BYTE_OPCODE, // 0x77
+ ONE_BYTE_OPCODE, // 0x78
+ ONE_BYTE_OPCODE, // 0x79
+ ONE_BYTE_OPCODE, // 0x7a
+ ONE_BYTE_OPCODE, // 0x7b
+ ONE_BYTE_OPCODE, // 0x7c
+ ONE_BYTE_OPCODE, // 0x7d
+ ONE_BYTE_OPCODE, // 0x7e
+ ONE_BYTE_OPCODE, // 0x7f
+ ONE_BYTE_OPCODE, // 0x80
+ ONE_BYTE_OPCODE, // 0x81
+ ONE_BYTE_OPCODE, // 0x82
+ ONE_BYTE_OPCODE, // 0x83
+ ONE_BYTE_OPCODE, // 0x84
+ ONE_BYTE_OPCODE, // 0x85
+ ONE_BYTE_OPCODE, // 0x86
+ ONE_BYTE_OPCODE, // 0x87
+ ONE_BYTE_OPCODE, // 0x88
+ ONE_BYTE_OPCODE, // 0x89
+ ONE_BYTE_OPCODE, // 0x8a
+ ONE_BYTE_OPCODE, // 0x8b
+ ONE_BYTE_OPCODE, // 0x8c
+ ONE_BYTE_OPCODE, // 0x8d
+ ONE_BYTE_OPCODE, // 0x8e
+ ONE_BYTE_OPCODE, // 0x8f
+ ONE_BYTE_OPCODE, // 0x90
+ ONE_BYTE_OPCODE, // 0x91
+ ONE_BYTE_OPCODE, // 0x92
+ ONE_BYTE_OPCODE, // 0x93
+ ONE_BYTE_OPCODE, // 0x94
+ ONE_BYTE_OPCODE, // 0x95
+ ONE_BYTE_OPCODE, // 0x96
+ ONE_BYTE_OPCODE, // 0x97
+ ONE_BYTE_OPCODE, // 0x98
+ ONE_BYTE_OPCODE, // 0x99
+ ONE_BYTE_OPCODE, // 0x9a
+ ONE_BYTE_OPCODE, // 0x9b
+ TWO_BYTE_DISJOINT_OPCODE, // 0x9c
+ TWO_BYTE_DISJOINT_OPCODE, // 0x9d
+ TWO_BYTE_DISJOINT_OPCODE, // 0x9e
+ TWO_BYTE_DISJOINT_OPCODE, // 0x9f
+ TWO_BYTE_DISJOINT_OPCODE, // 0xa0
+ TWO_BYTE_DISJOINT_OPCODE, // 0xa1
+ TWO_BYTE_DISJOINT_OPCODE, // 0xa2
+ TWO_BYTE_DISJOINT_OPCODE, // 0xa3
+ TWO_BYTE_DISJOINT_OPCODE, // 0xa4
+ THREE_NIBBLE_OPCODE, // 0xa5
+ TWO_BYTE_DISJOINT_OPCODE, // 0xa6
+ THREE_NIBBLE_OPCODE, // 0xa7
+ ONE_BYTE_OPCODE, // 0xa8
+ ONE_BYTE_OPCODE, // 0xa9
+ ONE_BYTE_OPCODE, // 0xaa
+ ONE_BYTE_OPCODE, // 0xab
+ ONE_BYTE_OPCODE, // 0xac
+ ONE_BYTE_OPCODE, // 0xad
+ ONE_BYTE_OPCODE, // 0xae
+ ONE_BYTE_OPCODE, // 0xaf
+ ONE_BYTE_OPCODE, // 0xb0
+ ONE_BYTE_OPCODE, // 0xb1
+ TWO_BYTE_OPCODE, // 0xb2
+ TWO_BYTE_OPCODE, // 0xb3
+ TWO_BYTE_DISJOINT_OPCODE, // 0xb4
+ TWO_BYTE_DISJOINT_OPCODE, // 0xb5
+ TWO_BYTE_DISJOINT_OPCODE, // 0xb6
+ TWO_BYTE_DISJOINT_OPCODE, // 0xb7
+ TWO_BYTE_DISJOINT_OPCODE, // 0xb8
+ TWO_BYTE_OPCODE, // 0xb9
+ ONE_BYTE_OPCODE, // 0xba
+ ONE_BYTE_OPCODE, // 0xbb
+ ONE_BYTE_OPCODE, // 0xbc
+ ONE_BYTE_OPCODE, // 0xbd
+ ONE_BYTE_OPCODE, // 0xbe
+ ONE_BYTE_OPCODE, // 0xbf
+ THREE_NIBBLE_OPCODE, // 0xc0
+ THREE_NIBBLE_OPCODE, // 0xc1
+ THREE_NIBBLE_OPCODE, // 0xc2
+ THREE_NIBBLE_OPCODE, // 0xc3
+ THREE_NIBBLE_OPCODE, // 0xc4
+ THREE_NIBBLE_OPCODE, // 0xc5
+ THREE_NIBBLE_OPCODE, // 0xc6
+ ONE_BYTE_OPCODE, // 0xc7
+ THREE_NIBBLE_OPCODE, // 0xc8
+ THREE_NIBBLE_OPCODE, // 0xc9
+ THREE_NIBBLE_OPCODE, // 0xca
+ THREE_NIBBLE_OPCODE, // 0xcb
+ THREE_NIBBLE_OPCODE, // 0xcc
+ TWO_BYTE_DISJOINT_OPCODE, // 0xcd
+ TWO_BYTE_DISJOINT_OPCODE, // 0xce
+ TWO_BYTE_DISJOINT_OPCODE, // 0xcf
+ ONE_BYTE_OPCODE, // 0xd0
+ ONE_BYTE_OPCODE, // 0xd1
+ ONE_BYTE_OPCODE, // 0xd2
+ ONE_BYTE_OPCODE, // 0xd3
+ ONE_BYTE_OPCODE, // 0xd4
+ ONE_BYTE_OPCODE, // 0xd5
+ ONE_BYTE_OPCODE, // 0xd6
+ ONE_BYTE_OPCODE, // 0xd7
+ ONE_BYTE_OPCODE, // 0xd8
+ ONE_BYTE_OPCODE, // 0xd9
+ ONE_BYTE_OPCODE, // 0xda
+ ONE_BYTE_OPCODE, // 0xdb
+ ONE_BYTE_OPCODE, // 0xdc
+ ONE_BYTE_OPCODE, // 0xdd
+ ONE_BYTE_OPCODE, // 0xde
+ ONE_BYTE_OPCODE, // 0xdf
+ ONE_BYTE_OPCODE, // 0xe0
+ ONE_BYTE_OPCODE, // 0xe1
+ ONE_BYTE_OPCODE, // 0xe2
+ TWO_BYTE_DISJOINT_OPCODE, // 0xe3
+ TWO_BYTE_DISJOINT_OPCODE, // 0xe4
+ TWO_BYTE_OPCODE, // 0xe5
+ TWO_BYTE_DISJOINT_OPCODE, // 0xe6
+ TWO_BYTE_DISJOINT_OPCODE, // 0xe7
+ ONE_BYTE_OPCODE, // 0xe8
+ ONE_BYTE_OPCODE, // 0xe9
+ ONE_BYTE_OPCODE, // 0xea
+ TWO_BYTE_DISJOINT_OPCODE, // 0xeb
+ TWO_BYTE_DISJOINT_OPCODE, // 0xec
+ TWO_BYTE_DISJOINT_OPCODE, // 0xed
+ ONE_BYTE_OPCODE, // 0xee
+ ONE_BYTE_OPCODE, // 0xef
+ ONE_BYTE_OPCODE, // 0xf0
+ ONE_BYTE_OPCODE, // 0xf1
+ ONE_BYTE_OPCODE, // 0xf2
+ ONE_BYTE_OPCODE, // 0xf3
+ ONE_BYTE_OPCODE, // 0xf4
+ ONE_BYTE_OPCODE, // 0xf5
+ ONE_BYTE_OPCODE, // 0xf6
+ ONE_BYTE_OPCODE, // 0xf7
+ ONE_BYTE_OPCODE, // 0xf8
+ ONE_BYTE_OPCODE, // 0xf9
+ ONE_BYTE_OPCODE, // 0xfa
+ ONE_BYTE_OPCODE, // 0xfb
+ ONE_BYTE_OPCODE, // 0xfc
+ ONE_BYTE_OPCODE, // 0xfd
+ TWO_BYTE_DISJOINT_OPCODE, // 0xfe
+ TWO_BYTE_DISJOINT_OPCODE, // 0xff
+};
+
// These register names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
const char* Registers::names_[kNumRegisters] = {
diff --git a/deps/v8/src/s390/constants-s390.h b/deps/v8/src/s390/constants-s390.h
index c313c929e6..9dfb32c7e7 100644
--- a/deps/v8/src/s390/constants-s390.h
+++ b/deps/v8/src/s390/constants-s390.h
@@ -1080,6 +1080,7 @@ class Instruction {
THREE_NIBBLE_OPCODE // Three Nibbles - Bits 0 to 7, 12 to 15
};
+ static OpcodeFormatType OpcodeFormatTable[256];
// Helper macro to define static accessors.
// We use the cast to char* trick to bypass the strict anti-aliasing rules.
#define DECLARE_STATIC_TYPED_ACCESSOR(return_type, Name) \
@@ -1254,37 +1255,7 @@ class Instruction {
// Get Instruction Format Type
static OpcodeFormatType getOpcodeFormatType(const byte* instr) {
const byte firstByte = *instr;
- // Based on Figure B-3 in z/Architecture Principles of
- // Operation.
-
- // 1-byte opcodes
- // I, RR, RS, RSI, RX, SS Formats
- if ((0x04 <= firstByte && 0x9B >= firstByte) ||
- (0xA8 <= firstByte && 0xB1 >= firstByte) ||
- (0xBA <= firstByte && 0xBF >= firstByte) || (0xC5 == firstByte) ||
- (0xC7 == firstByte) || (0xD0 <= firstByte && 0xE2 >= firstByte) ||
- (0xE8 <= firstByte && 0xEA >= firstByte) ||
- (0xEE <= firstByte && 0xFD >= firstByte)) {
- return ONE_BYTE_OPCODE;
- }
-
- // 2-byte opcodes
- // E, IE, RRD, RRE, RRF, SIL, S, SSE Formats
- if ((0x00 == firstByte) || // Software breakpoint 0x0001
- (0x01 == firstByte) || (0xB2 == firstByte) || (0xB3 == firstByte) ||
- (0xB9 == firstByte) || (0xE5 == firstByte)) {
- return TWO_BYTE_OPCODE;
- }
-
- // 3-nibble opcodes
- // RI, RIL, SSF Formats
- if ((0xA5 == firstByte) || (0xA7 == firstByte) ||
- (0xC0 <= firstByte && 0xCC >= firstByte)) { // C5,C7 handled above
- return THREE_NIBBLE_OPCODE;
- }
- // Remaining ones are all TWO_BYTE_DISJOINT OPCODES.
- DCHECK(InstructionLength(instr) == 6);
- return TWO_BYTE_DISJOINT_OPCODE;
+ return OpcodeFormatTable[firstByte];
}
// Extract the full opcode from the instruction.
@@ -1304,11 +1275,10 @@ class Instruction {
case TWO_BYTE_DISJOINT_OPCODE:
// Two Bytes - Bits 0 to 7, 40 to 47
return static_cast<Opcode>((*instr << 8) | (*(instr + 5) & 0xFF));
- case THREE_NIBBLE_OPCODE:
+ default:
+ // case THREE_NIBBLE_OPCODE:
// Three Nibbles - Bits 0 to 7, 12 to 15
return static_cast<Opcode>((*instr << 4) | (*(instr + 1) & 0xF));
- default:
- break;
}
UNREACHABLE();
diff --git a/deps/v8/src/s390/deoptimizer-s390.cc b/deps/v8/src/s390/deoptimizer-s390.cc
index 44062d6e79..6ee8c74213 100644
--- a/deps/v8/src/s390/deoptimizer-s390.cc
+++ b/deps/v8/src/s390/deoptimizer-s390.cc
@@ -116,8 +116,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Save all double registers before messing with them.
__ lay(sp, MemOperand(sp, -kDoubleRegsSize));
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister dreg = DoubleRegister::from_code(code);
diff --git a/deps/v8/src/s390/disasm-s390.cc b/deps/v8/src/s390/disasm-s390.cc
index 5bab604b7b..26079b9992 100644
--- a/deps/v8/src/s390/disasm-s390.cc
+++ b/deps/v8/src/s390/disasm-s390.cc
@@ -37,6 +37,8 @@
namespace v8 {
namespace internal {
+const auto GetRegConfig = RegisterConfiguration::Crankshaft;
+
//------------------------------------------------------------------------------
// Decoder decodes and disassembles instructions into an output buffer.
@@ -111,7 +113,7 @@ void Decoder::PrintRegister(int reg) {
// Print the double FP register name according to the active name converter.
void Decoder::PrintDRegister(int reg) {
- Print(DoubleRegister::from_code(reg).ToString());
+ Print(GetRegConfig()->GetDoubleRegisterName(reg));
}
// Print SoftwareInterrupt codes. Factoring this out reduces the complexity of
@@ -734,6 +736,12 @@ bool Decoder::DecodeFourByte(Instruction* instr) {
case LTEBR:
Format(instr, "ltebr\t'f5,'f6");
break;
+ case LRVR:
+ Format(instr, "lrvr\t'r5,'r6");
+ break;
+ case LRVGR:
+ Format(instr, "lrvgr\t'r5,'r6");
+ break;
case LGR:
Format(instr, "lgr\t'r5,'r6");
break;
@@ -812,6 +820,12 @@ bool Decoder::DecodeFourByte(Instruction* instr) {
case LLGHR:
Format(instr, "llghr\t'r5,'r6");
break;
+ case LOCR:
+ Format(instr, "locr\t'm1,'r5,'r6");
+ break;
+ case LOCGR:
+ Format(instr, "locgr\t'm1,'r5,'r6");
+ break;
case LNGR:
Format(instr, "lngr\t'r5,'r6");
break;
@@ -893,6 +907,9 @@ bool Decoder::DecodeFourByte(Instruction* instr) {
case LDGR:
Format(instr, "ldgr\t'f5,'r6");
break;
+ case MS:
+ Format(instr, "ms\t'r1,'d1('r2d,'r3)");
+ break;
case STE:
Format(instr, "ste\t'f1,'d1('r2d,'r3)");
break;
@@ -911,6 +928,9 @@ bool Decoder::DecodeFourByte(Instruction* instr) {
case CEFBR:
Format(instr, "cefbr\t'f5,'m2,'r6");
break;
+ case CELFBR:
+ Format(instr, "celfbr\t'f5,'m2,'r6");
+ break;
case CGEBR:
Format(instr, "cgebr\t'r5,'m2,'f6");
break;
@@ -935,6 +955,12 @@ bool Decoder::DecodeFourByte(Instruction* instr) {
case CLFDBR:
Format(instr, "clfdbr\t'r5,'m2,'f6");
break;
+ case CLFEBR:
+ Format(instr, "clfebr\t'r5,'m2,'f6");
+ break;
+ case CLGEBR:
+ Format(instr, "clgebr\t'r5,'m2,'f6");
+ break;
case CLGDBR:
Format(instr, "clgdbr\t'r5,'m2,'f6");
break;
@@ -977,6 +1003,9 @@ bool Decoder::DecodeFourByte(Instruction* instr) {
case LCDBR:
Format(instr, "lcdbr\t'f5,'f6");
break;
+ case LCEBR:
+ Format(instr, "lcebr\t'f5,'f6");
+ break;
case STH:
Format(instr, "sth\t'r1,'d1('r2d,'r3)");
break;
@@ -1068,6 +1097,9 @@ bool Decoder::DecodeSixByte(Instruction* instr) {
case IIHF:
Format(instr, "iihf\t'r1,'i7");
break;
+ case LGFI:
+ Format(instr, "lgfi\t'r1,'i7");
+ break;
case IILF:
Format(instr, "iilf\t'r1,'i7");
break;
@@ -1113,6 +1145,12 @@ bool Decoder::DecodeSixByte(Instruction* instr) {
case RISBGN:
Format(instr, "risbgn\t'r1,'r2,'i9,'ia,'ib");
break;
+ case LOCG:
+ Format(instr, "locg\t'm2,'r1,'d2('r3)");
+ break;
+ case LOC:
+ Format(instr, "loc\t'm2,'r1,'d2('r3)");
+ break;
case LMY:
Format(instr, "lmy\t'r1,'r2,'d2('r3)");
break;
@@ -1185,6 +1223,15 @@ bool Decoder::DecodeSixByte(Instruction* instr) {
case LB:
Format(instr, "lb\t'r1,'d2('r2d,'r3)");
break;
+ case LRVH:
+ Format(instr, "lrvh\t'r1,'d2('r2d,'r3)");
+ break;
+ case LRV:
+ Format(instr, "lrv\t'r1,'d2('r2d,'r3)");
+ break;
+ case LRVG:
+ Format(instr, "lrvg\t'r1,'d2('r2d,'r3)");
+ break;
case LG:
Format(instr, "lg\t'r1,'d2('r2d,'r3)");
break;
@@ -1257,6 +1304,15 @@ bool Decoder::DecodeSixByte(Instruction* instr) {
case STY:
Format(instr, "sty\t'r1,'d2('r2d,'r3)");
break;
+ case STRVH:
+ Format(instr, "strvh\t'r1,'d2('r2d,'r3)");
+ break;
+ case STRV:
+ Format(instr, "strv\t'r1,'d2('r2d,'r3)");
+ break;
+ case STRVG:
+ Format(instr, "strvg\t'r1,'d2('r2d,'r3)");
+ break;
case STG:
Format(instr, "stg\t'r1,'d2('r2d,'r3)");
break;
@@ -1305,6 +1361,12 @@ bool Decoder::DecodeSixByte(Instruction* instr) {
case LEY:
Format(instr, "ley\t'f1,'d2('r2d,'r3)");
break;
+ case MSG:
+ Format(instr, "msg\t'r1,'d2('r2d,'r3)");
+ break;
+ case MSY:
+ Format(instr, "msy\t'r1,'d2('r2d,'r3)");
+ break;
case STEY:
Format(instr, "stey\t'f1,'d2('r2d,'r3)");
break;
@@ -1357,7 +1419,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
namespace disasm {
const char* NameConverter::NameOfAddress(byte* addr) const {
- v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
+ v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
return tmp_buffer_.start();
}
@@ -1366,7 +1428,7 @@ const char* NameConverter::NameOfConstant(byte* addr) const {
}
const char* NameConverter::NameOfCPURegister(int reg) const {
- return v8::internal::Register::from_code(reg).ToString();
+ return v8::internal::GetRegConfig()->GetGeneralRegisterName(reg);
}
const char* NameConverter::NameOfByteCPURegister(int reg) const {
@@ -1411,7 +1473,7 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
buffer[0] = '\0';
byte* prev_pc = pc;
pc += d.InstructionDecode(buffer, pc);
- v8::internal::PrintF(f, "%p %08x %s\n", prev_pc,
+ v8::internal::PrintF(f, "%p %08x %s\n", static_cast<void*>(prev_pc),
*reinterpret_cast<int32_t*>(prev_pc), buffer.start());
}
}
diff --git a/deps/v8/src/s390/interface-descriptors-s390.cc b/deps/v8/src/s390/interface-descriptors-s390.cc
index 63afca85ee..4cdcd54521 100644
--- a/deps/v8/src/s390/interface-descriptors-s390.cc
+++ b/deps/v8/src/s390/interface-descriptors-s390.cc
@@ -11,6 +11,20 @@ namespace internal {
const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
+void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
+ CallInterfaceDescriptorData* data, int register_parameter_count) {
+ const Register default_stub_registers[] = {r2, r3, r4, r5, r6};
+ CHECK_LE(static_cast<size_t>(register_parameter_count),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(register_parameter_count,
+ default_stub_registers);
+}
+
+const Register FastNewFunctionContextDescriptor::FunctionRegister() {
+ return r3;
+}
+const Register FastNewFunctionContextDescriptor::SlotsRegister() { return r2; }
+
const Register LoadDescriptor::ReceiverRegister() { return r3; }
const Register LoadDescriptor::NameRegister() { return r4; }
const Register LoadDescriptor::SlotRegister() { return r2; }
@@ -20,10 +34,9 @@ const Register LoadWithVectorDescriptor::VectorRegister() { return r5; }
const Register StoreDescriptor::ReceiverRegister() { return r3; }
const Register StoreDescriptor::NameRegister() { return r4; }
const Register StoreDescriptor::ValueRegister() { return r2; }
+const Register StoreDescriptor::SlotRegister() { return r6; }
-const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return r6; }
-
-const Register VectorStoreICDescriptor::VectorRegister() { return r5; }
+const Register StoreWithVectorDescriptor::VectorRegister() { return r5; }
const Register VectorStoreTransitionDescriptor::SlotRegister() { return r6; }
const Register VectorStoreTransitionDescriptor::VectorRegister() { return r5; }
@@ -31,18 +44,14 @@ const Register VectorStoreTransitionDescriptor::MapRegister() { return r7; }
const Register StoreTransitionDescriptor::MapRegister() { return r5; }
-const Register LoadGlobalViaContextDescriptor::SlotRegister() { return r4; }
-
const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r4; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r2; }
-const Register InstanceOfDescriptor::LeftRegister() { return r3; }
-const Register InstanceOfDescriptor::RightRegister() { return r2; }
-
const Register StringCompareDescriptor::LeftRegister() { return r3; }
const Register StringCompareDescriptor::RightRegister() { return r2; }
-const Register ApiGetterDescriptor::function_address() { return r4; }
+const Register ApiGetterDescriptor::HolderRegister() { return r2; }
+const Register ApiGetterDescriptor::CallbackRegister() { return r5; }
const Register MathPowTaggedDescriptor::exponent() { return r4; }
@@ -59,12 +68,6 @@ void FastNewClosureDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void FastNewContextDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void FastNewObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r5};
@@ -211,46 +214,34 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC
-void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
+void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
// r2 -- number of arguments
// r3 -- function
// r4 -- allocation site with elements kind
- Register registers[] = {r3, r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ArrayConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // stack param count needs (constructor pointer, and single argument)
Register registers[] = {r3, r4, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void InternalArrayConstructorConstantArgCountDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// register state
// r2 -- number of arguments
- // r3 -- constructor function
- Register registers[] = {r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ // r3 -- function
+ // r4 -- allocation site with elements kind
+ Register registers[] = {r3, r4, r2};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
+void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
- Register registers[] = {r3, r2};
+ Register registers[] = {r3, r4, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void FastArrayPushDescriptor::InitializePlatformSpecific(
+void VarArgFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (arg count)
Register registers[] = {r2};
@@ -275,6 +266,23 @@ void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void BinaryOpWithVectorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // register state
+ // r3 -- lhs
+ // r2 -- rhs
+ // r6 -- slot id
+ // r5 -- vector
+ Register registers[] = {r3, r2, r6, r5};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CountOpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void StringAddDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r2};
@@ -330,9 +338,8 @@ void ApiCallbackDescriptorBase::InitializePlatformSpecific(
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
- kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
- kInterpreterDispatchTableRegister};
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -367,6 +374,16 @@ void InterpreterCEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void ResumeGeneratorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ r2, // the value to pass to the generator
+ r3, // the JSGeneratorObject to resume
+ r4 // the resume mode (tagged)
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/s390/macro-assembler-s390.cc b/deps/v8/src/s390/macro-assembler-s390.cc
index 21058f420f..8b708de734 100644
--- a/deps/v8/src/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/s390/macro-assembler-s390.cc
@@ -70,10 +70,6 @@ void MacroAssembler::Call(Register target) {
Label start;
bind(&start);
- // Statement positions are expected to be recorded when the target
- // address is loaded.
- positions_recorder()->WriteRecordedPositions();
-
// Branch to target via indirect branch
basr(r14, target);
@@ -122,10 +118,6 @@ void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
bind(&start);
#endif
- // Statement positions are expected to be recorded when the target
- // address is loaded.
- positions_recorder()->WriteRecordedPositions();
-
mov(ip, Operand(reinterpret_cast<intptr_t>(target), rmode));
basr(r14, ip);
@@ -178,19 +170,7 @@ void MacroAssembler::Push(Handle<Object> handle) {
}
void MacroAssembler::Move(Register dst, Handle<Object> value) {
- AllowDeferredHandleDereference smi_check;
- if (value->IsSmi()) {
- LoadSmiLiteral(dst, reinterpret_cast<Smi*>(*value));
- } else {
- DCHECK(value->IsHeapObject());
- if (isolate()->heap()->InNewSpace(*value)) {
- Handle<Cell> cell = isolate()->factory()->NewCell(value);
- mov(dst, Operand(cell));
- LoadP(dst, FieldMemOperand(dst, Cell::kValueOffset));
- } else {
- mov(dst, Operand(value));
- }
- }
+ mov(dst, Operand(value));
}
void MacroAssembler::Move(Register dst, Register src, Condition cond) {
@@ -480,8 +460,8 @@ void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
// Save caller-saved registers. js_function and code_entry are in the
// caller-saved register list.
DCHECK(kJSCallerSaved & js_function.bit());
- DCHECK(kJSCallerSaved & code_entry.bit());
- MultiPush(kJSCallerSaved | r14.bit());
+ // DCHECK(kJSCallerSaved & code_entry.bit());
+ MultiPush(kJSCallerSaved | code_entry.bit() | r14.bit());
int argument_count = 3;
PrepareCallCFunction(argument_count, code_entry);
@@ -499,7 +479,7 @@ void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
}
// Restore caller-saved registers (including js_function and code_entry).
- MultiPop(kJSCallerSaved | r14.bit());
+ MultiPop(kJSCallerSaved | code_entry.bit() | r14.bit());
bind(&done);
}
@@ -645,8 +625,7 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
// General purpose registers are pushed last on the stack.
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
int doubles_size = config->num_allocatable_double_registers() * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
return MemOperand(sp, doubles_size + register_offset);
@@ -677,12 +656,12 @@ void MacroAssembler::ConvertUnsignedIntToDouble(Register src,
}
void MacroAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) {
- cefbr(dst, src);
+ cefbr(Condition(4), dst, src);
}
void MacroAssembler::ConvertUnsignedIntToFloat(Register src,
DoubleRegister dst) {
- celfbr(Condition(0), Condition(0), dst, src);
+ celfbr(Condition(4), Condition(0), dst, src);
}
#if V8_TARGET_ARCH_S390X
@@ -781,7 +760,7 @@ void MacroAssembler::ConvertFloat32ToInt32(const DoubleRegister double_input,
m = Condition(5);
break;
case kRoundToNearest:
- UNIMPLEMENTED();
+ m = Condition(4);
break;
case kRoundToPlusInf:
m = Condition(6);
@@ -794,6 +773,10 @@ void MacroAssembler::ConvertFloat32ToInt32(const DoubleRegister double_input,
break;
}
cfebr(m, dst, double_input);
+ Label done;
+ b(Condition(0xe), &done, Label::kNear); // special case
+ LoadImmP(dst, Operand::Zero());
+ bind(&done);
ldgr(double_dst, dst);
}
@@ -819,6 +802,10 @@ void MacroAssembler::ConvertFloat32ToUnsignedInt32(
break;
}
clfebr(m, Condition(0), dst, double_input);
+ Label done;
+ b(Condition(0xe), &done, Label::kNear); // special case
+ LoadImmP(dst, Operand::Zero());
+ bind(&done);
ldgr(double_dst, dst);
}
@@ -987,9 +974,8 @@ void MacroAssembler::Prologue(bool code_pre_aging, Register base,
void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- LoadP(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
- LoadP(vector,
- FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+ LoadP(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
+ LoadP(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
}
void MacroAssembler::EnterFrame(StackFrame::Type type,
@@ -1022,6 +1008,20 @@ int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
return frame_ends;
}
+void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
+ Register argc) {
+ CleanseP(r14);
+ Push(r14, fp, context, target);
+ la(fp, MemOperand(sp, 2 * kPointerSize));
+ Push(argc);
+}
+
+void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
+ Register argc) {
+ Pop(argc);
+ Pop(r14, fp, context, target);
+}
+
// ExitFrame layout (probably wrongish.. needs updating)
//
// SP -> previousSP
@@ -1046,7 +1046,10 @@ int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
// gaps
// Args
// ABIRes <- newSP
-void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
+void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
+ StackFrame::Type frame_type) {
+ DCHECK(frame_type == StackFrame::EXIT ||
+ frame_type == StackFrame::BUILTIN_EXIT);
// Set up the frame structure on the stack.
DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
@@ -1057,7 +1060,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
// all of the pushes that have happened inside of V8
// since we were called from C code
CleanseP(r14);
- LoadSmiLiteral(r1, Smi::FromInt(StackFrame::EXIT));
+ LoadSmiLiteral(r1, Smi::FromInt(frame_type));
PushCommonFrame(r1);
// Reserve room for saved entry sp and code object.
lay(sp, MemOperand(fp, -ExitFrameConstants::kFixedFrameSizeFromFp));
@@ -1310,12 +1313,13 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
Label skip_flooding;
- ExternalReference step_in_enabled =
- ExternalReference::debug_step_in_enabled_address(isolate());
- mov(r6, Operand(step_in_enabled));
- LoadlB(r6, MemOperand(r6));
- CmpP(r6, Operand::Zero());
- beq(&skip_flooding);
+ ExternalReference last_step_action =
+ ExternalReference::debug_last_step_action_address(isolate());
+ STATIC_ASSERT(StepFrame > StepIn);
+ mov(r6, Operand(last_step_action));
+ LoadB(r6, MemOperand(r6));
+ CmpP(r6, Operand(StepIn));
+ blt(&skip_flooding);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -1706,6 +1710,7 @@ void MacroAssembler::Allocate(int object_size, Register result,
Register scratch1, Register scratch2,
Label* gc_required, AllocationFlags flags) {
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1767,7 +1772,7 @@ void MacroAssembler::Allocate(int object_size, Register result,
STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
AndP(result_end, result, Operand(kDoubleAlignmentMask));
Label aligned;
- beq(&aligned);
+ beq(&aligned, Label::kNear);
if ((flags & PRETENURE) != 0) {
CmpLogicalP(result, alloc_limit);
bge(gc_required);
@@ -1792,17 +1797,20 @@ void MacroAssembler::Allocate(int object_size, Register result,
blt(gc_required);
AddP(result_end, result, result_end);
}
- StoreP(result_end, MemOperand(top_address));
- // Tag object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- AddP(result, result, Operand(kHeapObjectTag));
+ if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+ // The top pointer is not updated for allocation folding dominators.
+ StoreP(result_end, MemOperand(top_address));
}
+
+ // Tag object.
+ AddP(result, result, Operand(kHeapObjectTag));
}
void MacroAssembler::Allocate(Register object_size, Register result,
Register result_end, Register scratch,
Label* gc_required, AllocationFlags flags) {
+ DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1860,7 +1868,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
AndP(result_end, result, Operand(kDoubleAlignmentMask));
Label aligned;
- beq(&aligned);
+ beq(&aligned, Label::kNear);
if ((flags & PRETENURE) != 0) {
CmpLogicalP(result, alloc_limit);
bge(gc_required);
@@ -1892,12 +1900,114 @@ void MacroAssembler::Allocate(Register object_size, Register result,
AndP(r0, result_end, Operand(kObjectAlignmentMask));
Check(eq, kUnalignedAllocationInNewSpace, cr0);
}
+ if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+ // The top pointer is not updated for allocation folding dominators.
+ StoreP(result_end, MemOperand(top_address));
+ }
+
+ // Tag object.
+ AddP(result, result, Operand(kHeapObjectTag));
+}
+
+void MacroAssembler::FastAllocate(Register object_size, Register result,
+ Register result_end, Register scratch,
+ AllocationFlags flags) {
+ // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
+ // is not specified. Other registers must not overlap.
+ DCHECK(!AreAliased(object_size, result, scratch, ip));
+ DCHECK(!AreAliased(result_end, result, scratch, ip));
+ DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
+
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+ Register top_address = scratch;
+ mov(top_address, Operand(allocation_top));
+ LoadP(result, MemOperand(top_address));
+
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+// Align the next allocation. Storing the filler map without checking top is
+// safe in new-space because the limit of the heap is aligned there.
+#if V8_TARGET_ARCH_S390X
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+#else
+ DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+ AndP(result_end, result, Operand(kDoubleAlignmentMask));
+ Label aligned;
+ beq(&aligned, Label::kNear);
+ mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ StoreW(result_end, MemOperand(result));
+ AddP(result, result, Operand(kDoubleSize / 2));
+ bind(&aligned);
+#endif
+ }
+
+ // Calculate new top using result. Object size may be in words so a shift is
+ // required to get the number of bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ ShiftLeftP(result_end, object_size, Operand(kPointerSizeLog2));
+ AddP(result_end, result, result_end);
+ } else {
+ AddP(result_end, result, object_size);
+ }
+
+ // Update allocation top. result temporarily holds the new top.
+ if (emit_debug_code()) {
+ AndP(r0, result_end, Operand(kObjectAlignmentMask));
+ Check(eq, kUnalignedAllocationInNewSpace, cr0);
+ }
StoreP(result_end, MemOperand(top_address));
- // Tag object if requested.
- if ((flags & TAG_OBJECT) != 0) {
- AddP(result, result, Operand(kHeapObjectTag));
+ // Tag object.
+ AddP(result, result, Operand(kHeapObjectTag));
+}
+
+void MacroAssembler::FastAllocate(int object_size, Register result,
+ Register scratch1, Register scratch2,
+ AllocationFlags flags) {
+ DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK(!AreAliased(result, scratch1, scratch2, ip));
+
+ // Make object size into bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ object_size *= kPointerSize;
}
+ DCHECK_EQ(0, object_size & kObjectAlignmentMask);
+
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+ // Set up allocation top address register.
+ Register top_address = scratch1;
+ Register result_end = scratch2;
+ mov(top_address, Operand(allocation_top));
+ LoadP(result, MemOperand(top_address));
+
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+// Align the next allocation. Storing the filler map without checking top is
+// safe in new-space because the limit of the heap is aligned there.
+#if V8_TARGET_ARCH_S390X
+ STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+#else
+ DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+ AndP(result_end, result, Operand(kDoubleAlignmentMask));
+ Label aligned;
+ beq(&aligned, Label::kNear);
+ mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+ StoreW(result_end, MemOperand(result));
+ AddP(result, result, Operand(kDoubleSize / 2));
+ bind(&aligned);
+#endif
+ }
+
+ // Calculate new top using result.
+ AddP(result_end, result, Operand(object_size));
+
+ // The top pointer is not updated for allocation folding dominators.
+ StoreP(result_end, MemOperand(top_address));
+
+ // Tag object.
+ AddP(result, result, Operand(kHeapObjectTag));
}
void MacroAssembler::AllocateTwoByteString(Register result, Register length,
@@ -1914,7 +2024,8 @@ void MacroAssembler::AllocateTwoByteString(Register result, Register length,
AndP(scratch1, Operand(~kObjectAlignmentMask));
// Allocate two-byte string in new space.
- Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT);
+ Allocate(scratch1, result, scratch2, scratch3, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
InitializeNewString(result, length, Heap::kStringMapRootIndex, scratch1,
@@ -1934,7 +2045,8 @@ void MacroAssembler::AllocateOneByteString(Register result, Register length,
AndP(scratch1, Operand(~kObjectAlignmentMask));
// Allocate one-byte string in new space.
- Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT);
+ Allocate(scratch1, result, scratch2, scratch3, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
@@ -1946,7 +2058,7 @@ void MacroAssembler::AllocateTwoByteConsString(Register result, Register length,
Register scratch2,
Label* gc_required) {
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kConsStringMapRootIndex, scratch1,
scratch2);
@@ -1957,7 +2069,7 @@ void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
Register scratch2,
Label* gc_required) {
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
scratch1, scratch2);
@@ -1969,7 +2081,7 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
Register scratch2,
Label* gc_required) {
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kSlicedStringMapRootIndex, scratch1,
scratch2);
@@ -1981,7 +2093,7 @@ void MacroAssembler::AllocateOneByteSlicedString(Register result,
Register scratch2,
Label* gc_required) {
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
scratch1, scratch2);
@@ -2076,89 +2188,6 @@ void MacroAssembler::StoreNumberToDoubleElements(
FixedDoubleArray::kHeaderSize - elements_offset));
}
-void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
- Register right,
- Register overflow_dst,
- Register scratch) {
- DCHECK(!dst.is(overflow_dst));
- DCHECK(!dst.is(scratch));
- DCHECK(!overflow_dst.is(scratch));
- DCHECK(!overflow_dst.is(left));
- DCHECK(!overflow_dst.is(right));
-
- // TODO(joransiu): Optimize paths for left == right.
- bool left_is_right = left.is(right);
-
- // C = A+B; C overflows if A/B have same sign and C has diff sign than A
- if (dst.is(left)) {
- LoadRR(scratch, left); // Preserve left.
- AddP(dst, left, right); // Left is overwritten.
- XorP(overflow_dst, scratch, dst); // Original left.
- if (!left_is_right) XorP(scratch, dst, right);
- } else if (dst.is(right)) {
- LoadRR(scratch, right); // Preserve right.
- AddP(dst, left, right); // Right is overwritten.
- XorP(overflow_dst, dst, left);
- if (!left_is_right) XorP(scratch, dst, scratch);
- } else {
- AddP(dst, left, right);
- XorP(overflow_dst, dst, left);
- if (!left_is_right) XorP(scratch, dst, right);
- }
- if (!left_is_right) AndP(overflow_dst, scratch, overflow_dst);
- LoadAndTestRR(overflow_dst, overflow_dst);
-}
-
-void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
- intptr_t right,
- Register overflow_dst,
- Register scratch) {
- DCHECK(!dst.is(overflow_dst));
- DCHECK(!dst.is(scratch));
- DCHECK(!overflow_dst.is(scratch));
- DCHECK(!overflow_dst.is(left));
-
- mov(r1, Operand(right));
- AddAndCheckForOverflow(dst, left, r1, overflow_dst, scratch);
-}
-
-void MacroAssembler::SubAndCheckForOverflow(Register dst, Register left,
- Register right,
- Register overflow_dst,
- Register scratch) {
- DCHECK(!dst.is(overflow_dst));
- DCHECK(!dst.is(scratch));
- DCHECK(!overflow_dst.is(scratch));
- DCHECK(!overflow_dst.is(left));
- DCHECK(!overflow_dst.is(right));
-
- // C = A-B; C overflows if A/B have diff signs and C has diff sign than A
- if (dst.is(left)) {
- LoadRR(scratch, left); // Preserve left.
- SubP(dst, left, right); // Left is overwritten.
- XorP(overflow_dst, dst, scratch);
- XorP(scratch, right);
- AndP(overflow_dst, scratch /*, SetRC*/);
- LoadAndTestRR(overflow_dst, overflow_dst);
- // Should be okay to remove rc
- } else if (dst.is(right)) {
- LoadRR(scratch, right); // Preserve right.
- SubP(dst, left, right); // Right is overwritten.
- XorP(overflow_dst, dst, left);
- XorP(scratch, left);
- AndP(overflow_dst, scratch /*, SetRC*/);
- LoadAndTestRR(overflow_dst, overflow_dst);
- // Should be okay to remove rc
- } else {
- SubP(dst, left, right);
- XorP(overflow_dst, dst, left);
- XorP(scratch, left, right);
- AndP(overflow_dst, scratch /*, SetRC*/);
- LoadAndTestRR(overflow_dst, overflow_dst);
- // Should be okay to remove rc
- }
-}
-
void MacroAssembler::CompareMap(Register obj, Register scratch, Handle<Map> map,
Label* early_success) {
LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
@@ -2527,9 +2556,11 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
JumpToExternalReference(ExternalReference(fid, isolate()));
}
-void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
+ bool builtin_exit_frame) {
mov(r3, Operand(builtin));
- CEntryStub stub(isolate(), 1);
+ CEntryStub stub(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
+ builtin_exit_frame);
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
@@ -2613,16 +2644,19 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- LoadSmiLiteral(r0, Smi::FromInt(reason));
- push(r0);
+ // Check if Abort() has already been initialized.
+ DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
+
+ LoadSmiLiteral(r3, Smi::FromInt(static_cast<int>(reason)));
+
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort);
+ Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
} else {
- CallRuntime(Runtime::kAbort);
+ Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
}
// will not return here
}
@@ -2847,6 +2881,18 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
}
+void MacroAssembler::AssertGeneratorObject(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ TestIfSmi(object);
+ Check(ne, kOperandIsASmiAndNotAGeneratorObject, cr0);
+ push(object);
+ CompareObjectType(object, object, object, JS_GENERATOR_OBJECT_TYPE);
+ pop(object);
+ Check(eq, kOperandIsNotAGeneratorObject);
+ }
+}
+
void MacroAssembler::AssertReceiver(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -2935,12 +2981,11 @@ void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1,
Register scratch2,
Register heap_number_map,
Label* gc_required,
- TaggingMode tagging_mode,
MutableMode mode) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
- tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
+ NO_ALLOCATION_FLAGS);
Heap::RootListIndex map_index = mode == MUTABLE
? Heap::kMutableHeapNumberMapRootIndex
@@ -2948,11 +2993,7 @@ void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1,
AssertIsRoot(heap_number_map, map_index);
// Store heap number map in the allocated object.
- if (tagging_mode == TAG_RESULT) {
StoreP(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
- } else {
- StoreP(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
- }
}
void MacroAssembler::AllocateHeapNumberWithValue(
@@ -2971,7 +3012,8 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
DCHECK(!result.is(value));
// Allocate JSValue in new space.
- Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
+ Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
+ NO_ALLOCATION_FLAGS);
// Initialize the JSValue.
LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
@@ -3539,7 +3581,7 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
Label* no_memento_found) {
Label map_check;
Label top_check;
- ExternalReference new_space_allocation_top =
+ ExternalReference new_space_allocation_top_adr =
ExternalReference::new_space_allocation_top_address(isolate());
const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
@@ -3550,11 +3592,13 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
- AddP(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
// If the object is in new space, we need to check whether it is on the same
// page as the current top.
- XorP(r0, scratch_reg, Operand(new_space_allocation_top));
+ AddP(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+ mov(ip, Operand(new_space_allocation_top_adr));
+ LoadP(ip, MemOperand(ip));
+ XorP(r0, scratch_reg, ip);
AndP(r0, r0, Operand(~Page::kPageAlignmentMask));
beq(&top_check, Label::kNear);
// The object is on a different page than allocation top. Bail out if the
@@ -3568,7 +3612,7 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
// If top is on the same page as the current object, we need to check whether
// we are below top.
bind(&top_check);
- CmpP(scratch_reg, Operand(new_space_allocation_top));
+ CmpP(scratch_reg, ip);
bgt(no_memento_found);
// Memento map check.
bind(&map_check);
@@ -3587,8 +3631,7 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
if (reg5.is_valid()) regs |= reg5.bit();
if (reg6.is_valid()) regs |= reg6.bit();
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
int code = config->GetAllocatableGeneralCode(i);
Register candidate = Register::from_code(code);
@@ -3654,6 +3697,36 @@ void MacroAssembler::mov(Register dst, const Operand& src) {
#endif
}
+void MacroAssembler::Mul32(Register dst, const MemOperand& src1) {
+ if (is_uint12(src1.offset())) {
+ ms(dst, src1);
+ } else if (is_int20(src1.offset())) {
+ msy(dst, src1);
+ } else {
+ UNIMPLEMENTED();
+ }
+}
+
+void MacroAssembler::Mul32(Register dst, Register src1) { msr(dst, src1); }
+
+void MacroAssembler::Mul32(Register dst, const Operand& src1) {
+ msfi(dst, src1);
+}
+
+void MacroAssembler::Mul64(Register dst, const MemOperand& src1) {
+ if (is_int20(src1.offset())) {
+ msg(dst, src1);
+ } else {
+ UNIMPLEMENTED();
+ }
+}
+
+void MacroAssembler::Mul64(Register dst, Register src1) { msgr(dst, src1); }
+
+void MacroAssembler::Mul64(Register dst, const Operand& src1) {
+ msgfi(dst, src1);
+}
+
void MacroAssembler::Mul(Register dst, Register src1, Register src2) {
if (dst.is(src2)) {
MulP(dst, src1);
@@ -4033,15 +4106,18 @@ void MacroAssembler::SubP_ExtendSrc(Register dst, Register src) {
// Subtract 32-bit (Register = Register - Register)
void MacroAssembler::Sub32(Register dst, Register src1, Register src2) {
// Use non-clobbering version if possible
- if (CpuFeatures::IsSupported(DISTINCT_OPS) && !dst.is(src1)) {
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
srk(dst, src1, src2);
return;
}
if (!dst.is(src1) && !dst.is(src2)) lr(dst, src1);
// In scenario where we have dst = src - dst, we need to swap and negate
if (!dst.is(src1) && dst.is(src2)) {
- sr(dst, src1); // dst = (dst - src)
+ Label done;
lcr(dst, dst); // dst = -dst
+ b(overflow, &done);
+ ar(dst, src1); // dst = dst + src
+ bind(&done);
} else {
sr(dst, src2);
}
@@ -4050,15 +4126,18 @@ void MacroAssembler::Sub32(Register dst, Register src1, Register src2) {
// Subtract Pointer Sized (Register = Register - Register)
void MacroAssembler::SubP(Register dst, Register src1, Register src2) {
// Use non-clobbering version if possible
- if (CpuFeatures::IsSupported(DISTINCT_OPS) && !dst.is(src1)) {
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
SubP_RRR(dst, src1, src2);
return;
}
if (!dst.is(src1) && !dst.is(src2)) LoadRR(dst, src1);
// In scenario where we have dst = src - dst, we need to swap and negate
if (!dst.is(src1) && dst.is(src2)) {
- SubP(dst, src1); // dst = (dst - src)
+ Label done;
LoadComplementRR(dst, dst); // dst = -dst
+ b(overflow, &done);
+ AddP(dst, src1); // dst = dst + src
+ bind(&done);
} else {
SubP(dst, src2);
}
@@ -4076,8 +4155,8 @@ void MacroAssembler::SubP_ExtendSrc(Register dst, Register src1,
// In scenario where we have dst = src - dst, we need to swap and negate
if (!dst.is(src1) && dst.is(src2)) {
lgfr(dst, dst); // Sign extend this operand first.
- SubP(dst, src1); // dst = (dst - src)
LoadComplementRR(dst, dst); // dst = -dst
+ AddP(dst, src1); // dst = -dst + src
} else {
sgfr(dst, src2);
}
@@ -4459,12 +4538,22 @@ void MacroAssembler::XorP(Register dst, Register src, const Operand& opnd) {
XorP(dst, opnd);
}
-void MacroAssembler::NotP(Register dst) {
-#if V8_TARGET_ARCH_S390X
+void MacroAssembler::Not32(Register dst, Register src) {
+ if (!src.is(no_reg) && !src.is(dst)) lr(dst, src);
+ xilf(dst, Operand(0xFFFFFFFF));
+}
+
+void MacroAssembler::Not64(Register dst, Register src) {
+ if (!src.is(no_reg) && !src.is(dst)) lgr(dst, src);
xihf(dst, Operand(0xFFFFFFFF));
xilf(dst, Operand(0xFFFFFFFF));
+}
+
+void MacroAssembler::NotP(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+ Not64(dst, src);
#else
- XorP(dst, Operand(0xFFFFFFFF));
+ Not32(dst, src);
#endif
}
@@ -4639,7 +4728,6 @@ void MacroAssembler::Branch(Condition c, const Operand& opnd) {
// Branch On Count. Decrement R1, and branch if R1 != 0.
void MacroAssembler::BranchOnCount(Register r1, Label* l) {
int32_t offset = branch_offset(l);
- positions_recorder()->WriteRecordedPositions();
if (is_int16(offset)) {
#if V8_TARGET_ARCH_S390X
brctg(r1, Operand(offset));
@@ -4953,6 +5041,22 @@ void MacroAssembler::LoadlW(Register dst, const MemOperand& mem,
#endif
}
+void MacroAssembler::LoadLogicalHalfWordP(Register dst, const MemOperand& mem) {
+#if V8_TARGET_ARCH_S390X
+ llgh(dst, mem);
+#else
+ llh(dst, mem);
+#endif
+}
+
+void MacroAssembler::LoadLogicalHalfWordP(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+ llghr(dst, src);
+#else
+ llhr(dst, src);
+#endif
+}
+
void MacroAssembler::LoadB(Register dst, const MemOperand& mem) {
#if V8_TARGET_ARCH_S390X
lgb(dst, mem);
@@ -4961,6 +5065,14 @@ void MacroAssembler::LoadB(Register dst, const MemOperand& mem) {
#endif
}
+void MacroAssembler::LoadB(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+ lgbr(dst, src);
+#else
+ lbr(dst, src);
+#endif
+}
+
void MacroAssembler::LoadlB(Register dst, const MemOperand& mem) {
#if V8_TARGET_ARCH_S390X
llgc(dst, mem);
@@ -4969,6 +5081,20 @@ void MacroAssembler::LoadlB(Register dst, const MemOperand& mem) {
#endif
}
+void MacroAssembler::LoadLogicalReversedWordP(Register dst,
+ const MemOperand& mem) {
+ lrv(dst, mem);
+ LoadlW(dst, dst);
+}
+
+
+void MacroAssembler::LoadLogicalReversedHalfWordP(Register dst,
+ const MemOperand& mem) {
+ lrvh(dst, mem);
+ LoadLogicalHalfWordP(dst, dst);
+}
+
+
// Load And Test (Reg <- Reg)
void MacroAssembler::LoadAndTest32(Register dst, Register src) {
ltr(dst, src);
@@ -5009,6 +5135,16 @@ void MacroAssembler::LoadAndTestP(Register dst, const MemOperand& mem) {
#endif
}
+// Load On Condition Pointer Sized (Reg <- Reg)
+void MacroAssembler::LoadOnConditionP(Condition cond, Register dst,
+ Register src) {
+#if V8_TARGET_ARCH_S390X
+ locgr(cond, dst, src);
+#else
+ locr(cond, dst, src);
+#endif
+}
+
// Load Double Precision (64-bit) Floating Point number from memory
void MacroAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem) {
// for 32bit and 64bit we all use 64bit floating point regs
@@ -5300,7 +5436,7 @@ void MacroAssembler::Popcnt32(Register dst, Register src) {
ar(dst, r0);
ShiftRight(r0, dst, Operand(8));
ar(dst, r0);
- lbr(dst, dst);
+ LoadB(dst, dst);
}
#ifdef V8_TARGET_ARCH_S390X
@@ -5315,7 +5451,7 @@ void MacroAssembler::Popcnt64(Register dst, Register src) {
AddP(dst, r0);
ShiftRightP(r0, dst, Operand(8));
AddP(dst, r0);
- lbr(dst, dst);
+ LoadB(dst, dst);
}
#endif
diff --git a/deps/v8/src/s390/macro-assembler-s390.h b/deps/v8/src/s390/macro-assembler-s390.h
index 77fcccb182..b8ed3a057d 100644
--- a/deps/v8/src/s390/macro-assembler-s390.h
+++ b/deps/v8/src/s390/macro-assembler-s390.h
@@ -19,10 +19,10 @@ const Register kReturnRegister1 = {Register::kCode_r3};
const Register kReturnRegister2 = {Register::kCode_r4};
const Register kJSFunctionRegister = {Register::kCode_r3};
const Register kContextRegister = {Register::kCode_r13};
+const Register kAllocateSizeRegister = {Register::kCode_r3};
const Register kInterpreterAccumulatorRegister = {Register::kCode_r2};
-const Register kInterpreterRegisterFileRegister = {Register::kCode_r4};
-const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r5};
-const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r6};
+const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r6};
+const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r7};
const Register kInterpreterDispatchTableRegister = {Register::kCode_r8};
const Register kJavaScriptCallArgCountRegister = {Register::kCode_r2};
const Register kJavaScriptCallNewTargetRegister = {Register::kCode_r5};
@@ -112,7 +112,6 @@ bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
#define LoadRR lgr
#define LoadAndTestRR ltgr
#define LoadImmP lghi
-#define LoadLogicalHalfWordP llgh
// Compare
#define CmpPH cghi
@@ -150,7 +149,6 @@ bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
#define LoadRR lr
#define LoadAndTestRR ltr
#define LoadImmP lhi
-#define LoadLogicalHalfWordP llh
// Compare
#define CmpPH chi
@@ -303,6 +301,12 @@ class MacroAssembler : public Assembler {
void MulP(Register dst, Register src);
void MulP(Register dst, const MemOperand& opnd);
void Mul(Register dst, Register src1, Register src2);
+ void Mul32(Register dst, const MemOperand& src1);
+ void Mul32(Register dst, Register src1);
+ void Mul32(Register dst, const Operand& src1);
+ void Mul64(Register dst, const MemOperand& src1);
+ void Mul64(Register dst, Register src1);
+ void Mul64(Register dst, const Operand& src1);
// Divide
void DivP(Register dividend, Register divider);
@@ -333,9 +337,15 @@ class MacroAssembler : public Assembler {
void LoadW(Register dst, Register src);
void LoadlW(Register dst, const MemOperand& opnd, Register scratch = no_reg);
void LoadlW(Register dst, Register src);
+ void LoadLogicalHalfWordP(Register dst, const MemOperand& opnd);
+ void LoadLogicalHalfWordP(Register dst, Register src);
void LoadB(Register dst, const MemOperand& opnd);
+ void LoadB(Register dst, Register src);
void LoadlB(Register dst, const MemOperand& opnd);
+ void LoadLogicalReversedWordP(Register dst, const MemOperand& opnd);
+ void LoadLogicalReversedHalfWordP(Register dst, const MemOperand& opnd);
+
// Load And Test
void LoadAndTest32(Register dst, Register src);
void LoadAndTestP_ExtendSrc(Register dst, Register src);
@@ -349,6 +359,9 @@ class MacroAssembler : public Assembler {
void LoadFloat32(DoubleRegister dst, const MemOperand& opnd);
void LoadFloat32ConvertToDouble(DoubleRegister dst, const MemOperand& mem);
+ // Load On Condition
+ void LoadOnConditionP(Condition cond, Register dst, Register src);
+
// Store Floating Point
void StoreDouble(DoubleRegister dst, const MemOperand& opnd);
void StoreFloat32(DoubleRegister dst, const MemOperand& opnd);
@@ -402,15 +415,22 @@ class MacroAssembler : public Assembler {
void Xor(Register dst, Register src, const Operand& opnd);
void XorP(Register dst, Register src, const Operand& opnd);
void Popcnt32(Register dst, Register src);
+ void Not32(Register dst, Register src = no_reg);
+ void Not64(Register dst, Register src = no_reg);
+ void NotP(Register dst, Register src = no_reg);
#ifdef V8_TARGET_ARCH_S390X
void Popcnt64(Register dst, Register src);
#endif
- void NotP(Register dst);
-
void mov(Register dst, const Operand& src);
+ void CleanUInt32(Register x) {
+#ifdef V8_TARGET_ARCH_S390X
+ llgfr(x, x);
+#endif
+ }
+
// ---------------------------------------------------------------------------
// GC Support
@@ -685,7 +705,7 @@ class MacroAssembler : public Assembler {
void ConvertFloat32ToInt32(const DoubleRegister double_input,
const Register dst,
const DoubleRegister double_dst,
- FPRoundingMode rounding_mode = kRoundToZero);
+ FPRoundingMode rounding_mode);
void ConvertFloat32ToUnsignedInt32(
const DoubleRegister double_input, const Register dst,
const DoubleRegister double_dst,
@@ -727,7 +747,8 @@ class MacroAssembler : public Assembler {
// Enter exit frame.
// stack_space - extra stack space, used for parameters before call to C.
// At least one slot (for the return address) should be provided.
- void EnterExitFrame(bool save_doubles, int stack_space = 1);
+ void EnterExitFrame(bool save_doubles, int stack_space = 1,
+ StackFrame::Type frame_type = StackFrame::EXIT);
// Leave the current exit frame. Expects the return value in r0.
// Expect the number of values, pushed prior to the exit frame, to
@@ -958,6 +979,15 @@ class MacroAssembler : public Assembler {
void Allocate(Register object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
+ // FastAllocate is right now only used for folded allocations. It just
+ // increments the top pointer without checking against limit. This can only
+ // be done if it was proved earlier that the allocation will succeed.
+ void FastAllocate(int object_size, Register result, Register scratch1,
+ Register scratch2, AllocationFlags flags);
+
+ void FastAllocate(Register object_size, Register result, Register result_end,
+ Register scratch, AllocationFlags flags);
+
void AllocateTwoByteString(Register result, Register length,
Register scratch1, Register scratch2,
Register scratch3, Label* gc_required);
@@ -982,7 +1012,6 @@ class MacroAssembler : public Assembler {
// when control continues at the gc_required label.
void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
Register heap_number_map, Label* gc_required,
- TaggingMode tagging_mode = TAG_RESULT,
MutableMode mode = IMMUTABLE);
void AllocateHeapNumberWithValue(Register result, DoubleRegister value,
Register scratch1, Register scratch2,
@@ -1206,44 +1235,6 @@ class MacroAssembler : public Assembler {
Register heap_number_map, Register scratch1,
Label* not_int32);
- // Overflow handling functions.
- // Usage: call the appropriate arithmetic function and then call one of the
- // flow control functions with the corresponding label.
-
- // Compute dst = left + right, setting condition codes. dst may be same as
- // either left or right (or a unique register). left and right must not be
- // the same register.
- void AddAndCheckForOverflow(Register dst, Register left, Register right,
- Register overflow_dst, Register scratch = r0);
- void AddAndCheckForOverflow(Register dst, Register left, intptr_t right,
- Register overflow_dst, Register scratch = r0);
-
- // Compute dst = left - right, setting condition codes. dst may be same as
- // either left or right (or a unique register). left and right must not be
- // the same register.
- void SubAndCheckForOverflow(Register dst, Register left, Register right,
- Register overflow_dst, Register scratch = r0);
-
- void BranchOnOverflow(Label* label) { blt(label /*, cr0*/); }
-
- void BranchOnNoOverflow(Label* label) { bge(label /*, cr0*/); }
-
- void RetOnOverflow(void) {
- Label label;
-
- blt(&label /*, cr0*/);
- Ret();
- bind(&label);
- }
-
- void RetOnNoOverflow(void) {
- Label label;
-
- bge(&label /*, cr0*/);
- Ret();
- bind(&label);
- }
-
// ---------------------------------------------------------------------------
// Runtime calls
@@ -1322,7 +1313,8 @@ class MacroAssembler : public Assembler {
void MovFromFloatResult(DoubleRegister dst);
// Jump to a runtime routine.
- void JumpToExternalReference(const ExternalReference& builtin);
+ void JumpToExternalReference(const ExternalReference& builtin,
+ bool builtin_exit_frame = false);
Handle<Object> CodeObject() {
DCHECK(!code_object_.is_null());
@@ -1567,17 +1559,29 @@ class MacroAssembler : public Assembler {
}
void IndexToArrayOffset(Register dst, Register src, int elementSizeLog2,
- bool isSmi) {
+ bool isSmi, bool keyMaybeNegative) {
if (isSmi) {
SmiToArrayOffset(dst, src, elementSizeLog2);
- } else {
+ } else if (keyMaybeNegative ||
+ !CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
#if V8_TARGET_ARCH_S390X
+ // If array access is dehoisted, the key, being an int32, can contain
+ // a negative value, as needs to be sign-extended to 64-bit for
+ // memory access.
+ //
// src (key) is a 32-bit integer. Sign extension ensures
// upper 32-bit does not contain garbage before being used to
// reference memory.
lgfr(src, src);
#endif
ShiftLeftP(dst, src, Operand(elementSizeLog2));
+ } else {
+ // Small optimization to reduce pathlength. After Bounds Check,
+ // the key is guaranteed to be non-negative. Leverage RISBG,
+ // which also performs zero-extension.
+ risbg(dst, src, Operand(32 - elementSizeLog2),
+ Operand(63 - elementSizeLog2), Operand(elementSizeLog2),
+ true);
}
}
@@ -1659,6 +1663,10 @@ class MacroAssembler : public Assembler {
// enabled via --debug-code.
void AssertBoundFunction(Register object);
+ // Abort execution if argument is not a JSGeneratorObject,
+ // enabled via --debug-code.
+ void AssertGeneratorObject(Register object);
+
// Abort execution if argument is not a JSReceiver, enabled via --debug-code.
void AssertReceiver(Register object);
@@ -1759,6 +1767,9 @@ class MacroAssembler : public Assembler {
// Returns the pc offset at which the frame ends.
int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
+ void EnterBuiltinFrame(Register context, Register target, Register argc);
+ void LeaveBuiltinFrame(Register context, Register target, Register argc);
+
// Expects object in r2 and returns map with validated enum cache
// in r2. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Label* call_runtime);
@@ -1871,16 +1882,8 @@ inline MemOperand NativeContextMemOperand() {
return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}
-#ifdef GENERATED_CODE_COVERAGE
-#define CODE_COVERAGE_STRINGIFY(x) #x
-#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
-#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
-#define ACCESS_MASM(masm) \
- masm->stop(__FILE_LINE__); \
- masm->
-#else
#define ACCESS_MASM(masm) masm->
-#endif
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/s390/simulator-s390.cc b/deps/v8/src/s390/simulator-s390.cc
index 06e52a7626..91db78226b 100644
--- a/deps/v8/src/s390/simulator-s390.cc
+++ b/deps/v8/src/s390/simulator-s390.cc
@@ -10,6 +10,7 @@
#include "src/assembler.h"
#include "src/base/bits.h"
+#include "src/base/once.h"
#include "src/codegen.h"
#include "src/disasm.h"
#include "src/runtime/runtime-utils.h"
@@ -22,6 +23,8 @@
namespace v8 {
namespace internal {
+const auto GetRegConfig = RegisterConfiguration::Crankshaft;
+
// This macro provides a platform independent use of sscanf. The reason for
// SScanF not being implemented in a platform independent way through
// ::v8::internal::OS in the same way as SNPrintF is that the
@@ -33,7 +36,6 @@ namespace internal {
class S390Debugger {
public:
explicit S390Debugger(Simulator* sim) : sim_(sim) {}
- ~S390Debugger();
void Stop(Instruction* instr);
void Debug();
@@ -66,48 +68,6 @@ class S390Debugger {
void RedoBreakpoints();
};
-S390Debugger::~S390Debugger() {}
-
-#ifdef GENERATED_CODE_COVERAGE
-static FILE* coverage_log = NULL;
-
-static void InitializeCoverage() {
- char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
- if (file_name != NULL) {
- coverage_log = fopen(file_name, "aw+");
- }
-}
-
-void S390Debugger::Stop(Instruction* instr) {
- // Get the stop code.
- uint32_t code = instr->SvcValue() & kStopCodeMask;
- // Retrieve the encoded address, which comes just after this stop.
- char** msg_address =
- reinterpret_cast<char**>(sim_->get_pc() + sizeof(FourByteInstr));
- char* msg = *msg_address;
- DCHECK(msg != NULL);
-
- // Update this stop description.
- if (isWatchedStop(code) && !watched_stops_[code].desc) {
- watched_stops_[code].desc = msg;
- }
-
- if (strlen(msg) > 0) {
- if (coverage_log != NULL) {
- fprintf(coverage_log, "%s\n", msg);
- fflush(coverage_log);
- }
- // Overwrite the instruction and address with nops.
- instr->SetInstructionBits(kNopInstr);
- reinterpret_cast<Instruction*>(msg_address)->SetInstructionBits(kNopInstr);
- }
- sim_->set_pc(sim_->get_pc() + sizeof(FourByteInstr) + kPointerSize);
-}
-
-#else // ndef GENERATED_CODE_COVERAGE
-
-static void InitializeCoverage() {}
-
void S390Debugger::Stop(Instruction* instr) {
// Get the stop code.
// use of kStopCodeMask not right on PowerPC
@@ -127,7 +87,6 @@ void S390Debugger::Stop(Instruction* instr) {
sim_->set_pc(sim_->get_pc() + sizeof(FourByteInstr) + kPointerSize);
Debug();
}
-#endif
intptr_t S390Debugger::GetRegisterValue(int regnum) {
return sim_->get_register(regnum);
@@ -274,18 +233,40 @@ void S390Debugger::Debug() {
reinterpret_cast<Instruction*>(sim_->get_pc()));
}
- if (argc == 2 && last_pc != sim_->get_pc() && GetValue(arg1, &value)) {
- for (int i = 1; (!sim_->has_bad_pc()) && i < value; i++) {
- disasm::NameConverter converter;
- disasm::Disassembler dasm(converter);
- // use a reasonably large buffer
- v8::internal::EmbeddedVector<char, 256> buffer;
- dasm.InstructionDecode(buffer,
- reinterpret_cast<byte*>(sim_->get_pc()));
- PrintF(" 0x%08" V8PRIxPTR " %s\n", sim_->get_pc(),
- buffer.start());
- sim_->ExecuteInstruction(
- reinterpret_cast<Instruction*>(sim_->get_pc()));
+ if (argc == 2 && last_pc != sim_->get_pc()) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // use a reasonably large buffer
+ v8::internal::EmbeddedVector<char, 256> buffer;
+
+ if (GetValue(arg1, &value)) {
+ // Interpret a numeric argument as the number of instructions to
+ // step past.
+ for (int i = 1; (!sim_->has_bad_pc()) && i < value; i++) {
+ dasm.InstructionDecode(buffer,
+ reinterpret_cast<byte*>(sim_->get_pc()));
+ PrintF(" 0x%08" V8PRIxPTR " %s\n", sim_->get_pc(),
+ buffer.start());
+ sim_->ExecuteInstruction(
+ reinterpret_cast<Instruction*>(sim_->get_pc()));
+ }
+ } else {
+ // Otherwise treat it as the mnemonic of the opcode to stop at.
+ char mnemonic[256];
+ while (!sim_->has_bad_pc()) {
+ dasm.InstructionDecode(buffer,
+ reinterpret_cast<byte*>(sim_->get_pc()));
+ char* mnemonicStart = buffer.start();
+ while (*mnemonicStart != 0 && *mnemonicStart != ' ')
+ mnemonicStart++;
+ SScanF(mnemonicStart, "%s", mnemonic);
+ if (!strcmp(arg1, mnemonic)) break;
+
+ PrintF(" 0x%08" V8PRIxPTR " %s\n", sim_->get_pc(),
+ buffer.start());
+ sim_->ExecuteInstruction(
+ reinterpret_cast<Instruction*>(sim_->get_pc()));
+ }
}
}
} else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
@@ -308,7 +289,7 @@ void S390Debugger::Debug() {
for (int i = 0; i < kNumRegisters; i++) {
value = GetRegisterValue(i);
PrintF(" %3s: %08" V8PRIxPTR,
- Register::from_code(i).ToString(), value);
+ GetRegConfig()->GetGeneralRegisterName(i), value);
if ((argc == 3 && strcmp(arg2, "fp") == 0) && i < 8 &&
(i % 2) == 0) {
dvalue = GetRegisterPairDoubleValue(i);
@@ -323,7 +304,7 @@ void S390Debugger::Debug() {
for (int i = 0; i < kNumRegisters; i++) {
value = GetRegisterValue(i);
PrintF(" %3s: %08" V8PRIxPTR " %11" V8PRIdPTR,
- Register::from_code(i).ToString(), value, value);
+ GetRegConfig()->GetGeneralRegisterName(i), value, value);
if ((argc == 3 && strcmp(arg2, "fp") == 0) && i < 8 &&
(i % 2) == 0) {
dvalue = GetRegisterPairDoubleValue(i);
@@ -339,14 +320,15 @@ void S390Debugger::Debug() {
float fvalue = GetFPFloatRegisterValue(i);
uint32_t as_words = bit_cast<uint32_t>(fvalue);
PrintF("%3s: %f 0x%08x\n",
- DoubleRegister::from_code(i).ToString(), fvalue, as_words);
+ GetRegConfig()->GetDoubleRegisterName(i), fvalue,
+ as_words);
}
} else if (strcmp(arg1, "alld") == 0) {
for (int i = 0; i < DoubleRegister::kNumRegisters; i++) {
dvalue = GetFPDoubleRegisterValue(i);
uint64_t as_words = bit_cast<uint64_t>(dvalue);
PrintF("%3s: %f 0x%08x %08x\n",
- DoubleRegister::from_code(i).ToString(), dvalue,
+ GetRegConfig()->GetDoubleRegisterName(i), dvalue,
static_cast<uint32_t>(as_words >> 32),
static_cast<uint32_t>(as_words & 0xffffffff));
}
@@ -574,6 +556,8 @@ void S390Debugger::Debug() {
} else {
PrintF("Wrong usage. Use help command for more information.\n");
}
+ } else if (strcmp(cmd, "icount") == 0) {
+ PrintF("%05" PRId64 "\n", sim_->icount_);
} else if ((strcmp(cmd, "t") == 0) || strcmp(cmd, "trace") == 0) {
::v8::internal::FLAG_trace_sim = !::v8::internal::FLAG_trace_sim;
PrintF("Trace of executed instructions is %s\n",
@@ -676,7 +660,7 @@ void Simulator::set_last_debugger_input(char* input) {
last_debugger_input_ = input;
}
-void Simulator::FlushICache(v8::internal::HashMap* i_cache, void* start_addr,
+void Simulator::FlushICache(base::HashMap* i_cache, void* start_addr,
size_t size) {
intptr_t start = reinterpret_cast<intptr_t>(start_addr);
int intra_line = (start & CachePage::kLineMask);
@@ -697,9 +681,8 @@ void Simulator::FlushICache(v8::internal::HashMap* i_cache, void* start_addr,
}
}
-CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
- v8::internal::HashMap::Entry* entry =
- i_cache->LookupOrInsert(page, ICacheHash(page));
+CachePage* Simulator::GetCachePage(base::HashMap* i_cache, void* page) {
+ base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
if (entry->value == NULL) {
CachePage* new_page = new CachePage();
entry->value = new_page;
@@ -708,8 +691,7 @@ CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
}
// Flush from start up to and not including start + size.
-void Simulator::FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
- int size) {
+void Simulator::FlushOnePage(base::HashMap* i_cache, intptr_t start, int size) {
DCHECK(size <= CachePage::kPageSize);
DCHECK(AllOnOnePage(start, size - 1));
DCHECK((start & CachePage::kLineMask) == 0);
@@ -721,8 +703,7 @@ void Simulator::FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
}
-void Simulator::CheckICache(v8::internal::HashMap* i_cache,
- Instruction* instr) {
+void Simulator::CheckICache(base::HashMap* i_cache, Instruction* instr) {
intptr_t address = reinterpret_cast<intptr_t>(instr);
void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
@@ -748,12 +729,747 @@ void Simulator::Initialize(Isolate* isolate) {
isolate->set_simulator_initialized(true);
::v8::internal::ExternalReference::set_redirector(isolate,
&RedirectExternalReference);
+ static base::OnceType once = V8_ONCE_INIT;
+ base::CallOnce(&once, &Simulator::EvalTableInit);
}
+Simulator::EvaluateFuncType Simulator::EvalTable[] = {NULL};
+
+void Simulator::EvalTableInit() {
+ for (int i = 0; i < MAX_NUM_OPCODES; i++) {
+ EvalTable[i] = &Simulator::Evaluate_Unknown;
+ }
+
+ EvalTable[BKPT] = &Simulator::Evaluate_BKPT;
+ EvalTable[SPM] = &Simulator::Evaluate_SPM;
+ EvalTable[BALR] = &Simulator::Evaluate_BALR;
+ EvalTable[BCTR] = &Simulator::Evaluate_BCTR;
+ EvalTable[BCR] = &Simulator::Evaluate_BCR;
+ EvalTable[SVC] = &Simulator::Evaluate_SVC;
+ EvalTable[BSM] = &Simulator::Evaluate_BSM;
+ EvalTable[BASSM] = &Simulator::Evaluate_BASSM;
+ EvalTable[BASR] = &Simulator::Evaluate_BASR;
+ EvalTable[MVCL] = &Simulator::Evaluate_MVCL;
+ EvalTable[CLCL] = &Simulator::Evaluate_CLCL;
+ EvalTable[LPR] = &Simulator::Evaluate_LPR;
+ EvalTable[LNR] = &Simulator::Evaluate_LNR;
+ EvalTable[LTR] = &Simulator::Evaluate_LTR;
+ EvalTable[LCR] = &Simulator::Evaluate_LCR;
+ EvalTable[NR] = &Simulator::Evaluate_NR;
+ EvalTable[CLR] = &Simulator::Evaluate_CLR;
+ EvalTable[OR] = &Simulator::Evaluate_OR;
+ EvalTable[XR] = &Simulator::Evaluate_XR;
+ EvalTable[LR] = &Simulator::Evaluate_LR;
+ EvalTable[CR] = &Simulator::Evaluate_CR;
+ EvalTable[AR] = &Simulator::Evaluate_AR;
+ EvalTable[SR] = &Simulator::Evaluate_SR;
+ EvalTable[MR] = &Simulator::Evaluate_MR;
+ EvalTable[DR] = &Simulator::Evaluate_DR;
+ EvalTable[ALR] = &Simulator::Evaluate_ALR;
+ EvalTable[SLR] = &Simulator::Evaluate_SLR;
+ EvalTable[LDR] = &Simulator::Evaluate_LDR;
+ EvalTable[CDR] = &Simulator::Evaluate_CDR;
+ EvalTable[LER] = &Simulator::Evaluate_LER;
+ EvalTable[STH] = &Simulator::Evaluate_STH;
+ EvalTable[LA] = &Simulator::Evaluate_LA;
+ EvalTable[STC] = &Simulator::Evaluate_STC;
+ EvalTable[IC_z] = &Simulator::Evaluate_IC_z;
+ EvalTable[EX] = &Simulator::Evaluate_EX;
+ EvalTable[BAL] = &Simulator::Evaluate_BAL;
+ EvalTable[BCT] = &Simulator::Evaluate_BCT;
+ EvalTable[BC] = &Simulator::Evaluate_BC;
+ EvalTable[LH] = &Simulator::Evaluate_LH;
+ EvalTable[CH] = &Simulator::Evaluate_CH;
+ EvalTable[AH] = &Simulator::Evaluate_AH;
+ EvalTable[SH] = &Simulator::Evaluate_SH;
+ EvalTable[MH] = &Simulator::Evaluate_MH;
+ EvalTable[BAS] = &Simulator::Evaluate_BAS;
+ EvalTable[CVD] = &Simulator::Evaluate_CVD;
+ EvalTable[CVB] = &Simulator::Evaluate_CVB;
+ EvalTable[ST] = &Simulator::Evaluate_ST;
+ EvalTable[LAE] = &Simulator::Evaluate_LAE;
+ EvalTable[N] = &Simulator::Evaluate_N;
+ EvalTable[CL] = &Simulator::Evaluate_CL;
+ EvalTable[O] = &Simulator::Evaluate_O;
+ EvalTable[X] = &Simulator::Evaluate_X;
+ EvalTable[L] = &Simulator::Evaluate_L;
+ EvalTable[C] = &Simulator::Evaluate_C;
+ EvalTable[A] = &Simulator::Evaluate_A;
+ EvalTable[S] = &Simulator::Evaluate_S;
+ EvalTable[M] = &Simulator::Evaluate_M;
+ EvalTable[D] = &Simulator::Evaluate_D;
+ EvalTable[AL] = &Simulator::Evaluate_AL;
+ EvalTable[SL] = &Simulator::Evaluate_SL;
+ EvalTable[STD] = &Simulator::Evaluate_STD;
+ EvalTable[LD] = &Simulator::Evaluate_LD;
+ EvalTable[CD] = &Simulator::Evaluate_CD;
+ EvalTable[STE] = &Simulator::Evaluate_STE;
+ EvalTable[MS] = &Simulator::Evaluate_MS;
+ EvalTable[LE] = &Simulator::Evaluate_LE;
+ EvalTable[BRXH] = &Simulator::Evaluate_BRXH;
+ EvalTable[BRXLE] = &Simulator::Evaluate_BRXLE;
+ EvalTable[BXH] = &Simulator::Evaluate_BXH;
+ EvalTable[BXLE] = &Simulator::Evaluate_BXLE;
+ EvalTable[SRL] = &Simulator::Evaluate_SRL;
+ EvalTable[SLL] = &Simulator::Evaluate_SLL;
+ EvalTable[SRA] = &Simulator::Evaluate_SRA;
+ EvalTable[SLA] = &Simulator::Evaluate_SLA;
+ EvalTable[SRDL] = &Simulator::Evaluate_SRDL;
+ EvalTable[SLDL] = &Simulator::Evaluate_SLDL;
+ EvalTable[SRDA] = &Simulator::Evaluate_SRDA;
+ EvalTable[SLDA] = &Simulator::Evaluate_SLDA;
+ EvalTable[STM] = &Simulator::Evaluate_STM;
+ EvalTable[TM] = &Simulator::Evaluate_TM;
+ EvalTable[MVI] = &Simulator::Evaluate_MVI;
+ EvalTable[TS] = &Simulator::Evaluate_TS;
+ EvalTable[NI] = &Simulator::Evaluate_NI;
+ EvalTable[CLI] = &Simulator::Evaluate_CLI;
+ EvalTable[OI] = &Simulator::Evaluate_OI;
+ EvalTable[XI] = &Simulator::Evaluate_XI;
+ EvalTable[LM] = &Simulator::Evaluate_LM;
+ EvalTable[MVCLE] = &Simulator::Evaluate_MVCLE;
+ EvalTable[CLCLE] = &Simulator::Evaluate_CLCLE;
+ EvalTable[MC] = &Simulator::Evaluate_MC;
+ EvalTable[CDS] = &Simulator::Evaluate_CDS;
+ EvalTable[STCM] = &Simulator::Evaluate_STCM;
+ EvalTable[ICM] = &Simulator::Evaluate_ICM;
+ EvalTable[BPRP] = &Simulator::Evaluate_BPRP;
+ EvalTable[BPP] = &Simulator::Evaluate_BPP;
+ EvalTable[TRTR] = &Simulator::Evaluate_TRTR;
+ EvalTable[MVN] = &Simulator::Evaluate_MVN;
+ EvalTable[MVC] = &Simulator::Evaluate_MVC;
+ EvalTable[MVZ] = &Simulator::Evaluate_MVZ;
+ EvalTable[NC] = &Simulator::Evaluate_NC;
+ EvalTable[CLC] = &Simulator::Evaluate_CLC;
+ EvalTable[OC] = &Simulator::Evaluate_OC;
+ EvalTable[XC] = &Simulator::Evaluate_XC;
+ EvalTable[MVCP] = &Simulator::Evaluate_MVCP;
+ EvalTable[TR] = &Simulator::Evaluate_TR;
+ EvalTable[TRT] = &Simulator::Evaluate_TRT;
+ EvalTable[ED] = &Simulator::Evaluate_ED;
+ EvalTable[EDMK] = &Simulator::Evaluate_EDMK;
+ EvalTable[PKU] = &Simulator::Evaluate_PKU;
+ EvalTable[UNPKU] = &Simulator::Evaluate_UNPKU;
+ EvalTable[MVCIN] = &Simulator::Evaluate_MVCIN;
+ EvalTable[PKA] = &Simulator::Evaluate_PKA;
+ EvalTable[UNPKA] = &Simulator::Evaluate_UNPKA;
+ EvalTable[PLO] = &Simulator::Evaluate_PLO;
+ EvalTable[LMD] = &Simulator::Evaluate_LMD;
+ EvalTable[SRP] = &Simulator::Evaluate_SRP;
+ EvalTable[MVO] = &Simulator::Evaluate_MVO;
+ EvalTable[PACK] = &Simulator::Evaluate_PACK;
+ EvalTable[UNPK] = &Simulator::Evaluate_UNPK;
+ EvalTable[ZAP] = &Simulator::Evaluate_ZAP;
+ EvalTable[AP] = &Simulator::Evaluate_AP;
+ EvalTable[SP] = &Simulator::Evaluate_SP;
+ EvalTable[MP] = &Simulator::Evaluate_MP;
+ EvalTable[DP] = &Simulator::Evaluate_DP;
+ EvalTable[UPT] = &Simulator::Evaluate_UPT;
+ EvalTable[PFPO] = &Simulator::Evaluate_PFPO;
+ EvalTable[IIHH] = &Simulator::Evaluate_IIHH;
+ EvalTable[IIHL] = &Simulator::Evaluate_IIHL;
+ EvalTable[IILH] = &Simulator::Evaluate_IILH;
+ EvalTable[IILL] = &Simulator::Evaluate_IILL;
+ EvalTable[NIHH] = &Simulator::Evaluate_NIHH;
+ EvalTable[NIHL] = &Simulator::Evaluate_NIHL;
+ EvalTable[NILH] = &Simulator::Evaluate_NILH;
+ EvalTable[NILL] = &Simulator::Evaluate_NILL;
+ EvalTable[OIHH] = &Simulator::Evaluate_OIHH;
+ EvalTable[OIHL] = &Simulator::Evaluate_OIHL;
+ EvalTable[OILH] = &Simulator::Evaluate_OILH;
+ EvalTable[OILL] = &Simulator::Evaluate_OILL;
+ EvalTable[LLIHH] = &Simulator::Evaluate_LLIHH;
+ EvalTable[LLIHL] = &Simulator::Evaluate_LLIHL;
+ EvalTable[LLILH] = &Simulator::Evaluate_LLILH;
+ EvalTable[LLILL] = &Simulator::Evaluate_LLILL;
+ EvalTable[TMLH] = &Simulator::Evaluate_TMLH;
+ EvalTable[TMLL] = &Simulator::Evaluate_TMLL;
+ EvalTable[TMHH] = &Simulator::Evaluate_TMHH;
+ EvalTable[TMHL] = &Simulator::Evaluate_TMHL;
+ EvalTable[BRC] = &Simulator::Evaluate_BRC;
+ EvalTable[BRAS] = &Simulator::Evaluate_BRAS;
+ EvalTable[BRCT] = &Simulator::Evaluate_BRCT;
+ EvalTable[BRCTG] = &Simulator::Evaluate_BRCTG;
+ EvalTable[LHI] = &Simulator::Evaluate_LHI;
+ EvalTable[LGHI] = &Simulator::Evaluate_LGHI;
+ EvalTable[AHI] = &Simulator::Evaluate_AHI;
+ EvalTable[AGHI] = &Simulator::Evaluate_AGHI;
+ EvalTable[MHI] = &Simulator::Evaluate_MHI;
+ EvalTable[MGHI] = &Simulator::Evaluate_MGHI;
+ EvalTable[CHI] = &Simulator::Evaluate_CHI;
+ EvalTable[CGHI] = &Simulator::Evaluate_CGHI;
+ EvalTable[LARL] = &Simulator::Evaluate_LARL;
+ EvalTable[LGFI] = &Simulator::Evaluate_LGFI;
+ EvalTable[BRCL] = &Simulator::Evaluate_BRCL;
+ EvalTable[BRASL] = &Simulator::Evaluate_BRASL;
+ EvalTable[XIHF] = &Simulator::Evaluate_XIHF;
+ EvalTable[XILF] = &Simulator::Evaluate_XILF;
+ EvalTable[IIHF] = &Simulator::Evaluate_IIHF;
+ EvalTable[IILF] = &Simulator::Evaluate_IILF;
+ EvalTable[NIHF] = &Simulator::Evaluate_NIHF;
+ EvalTable[NILF] = &Simulator::Evaluate_NILF;
+ EvalTable[OIHF] = &Simulator::Evaluate_OIHF;
+ EvalTable[OILF] = &Simulator::Evaluate_OILF;
+ EvalTable[LLIHF] = &Simulator::Evaluate_LLIHF;
+ EvalTable[LLILF] = &Simulator::Evaluate_LLILF;
+ EvalTable[MSGFI] = &Simulator::Evaluate_MSGFI;
+ EvalTable[MSFI] = &Simulator::Evaluate_MSFI;
+ EvalTable[SLGFI] = &Simulator::Evaluate_SLGFI;
+ EvalTable[SLFI] = &Simulator::Evaluate_SLFI;
+ EvalTable[AGFI] = &Simulator::Evaluate_AGFI;
+ EvalTable[AFI] = &Simulator::Evaluate_AFI;
+ EvalTable[ALGFI] = &Simulator::Evaluate_ALGFI;
+ EvalTable[ALFI] = &Simulator::Evaluate_ALFI;
+ EvalTable[CGFI] = &Simulator::Evaluate_CGFI;
+ EvalTable[CFI] = &Simulator::Evaluate_CFI;
+ EvalTable[CLGFI] = &Simulator::Evaluate_CLGFI;
+ EvalTable[CLFI] = &Simulator::Evaluate_CLFI;
+ EvalTable[LLHRL] = &Simulator::Evaluate_LLHRL;
+ EvalTable[LGHRL] = &Simulator::Evaluate_LGHRL;
+ EvalTable[LHRL] = &Simulator::Evaluate_LHRL;
+ EvalTable[LLGHRL] = &Simulator::Evaluate_LLGHRL;
+ EvalTable[STHRL] = &Simulator::Evaluate_STHRL;
+ EvalTable[LGRL] = &Simulator::Evaluate_LGRL;
+ EvalTable[STGRL] = &Simulator::Evaluate_STGRL;
+ EvalTable[LGFRL] = &Simulator::Evaluate_LGFRL;
+ EvalTable[LRL] = &Simulator::Evaluate_LRL;
+ EvalTable[LLGFRL] = &Simulator::Evaluate_LLGFRL;
+ EvalTable[STRL] = &Simulator::Evaluate_STRL;
+ EvalTable[EXRL] = &Simulator::Evaluate_EXRL;
+ EvalTable[PFDRL] = &Simulator::Evaluate_PFDRL;
+ EvalTable[CGHRL] = &Simulator::Evaluate_CGHRL;
+ EvalTable[CHRL] = &Simulator::Evaluate_CHRL;
+ EvalTable[CGRL] = &Simulator::Evaluate_CGRL;
+ EvalTable[CGFRL] = &Simulator::Evaluate_CGFRL;
+ EvalTable[ECTG] = &Simulator::Evaluate_ECTG;
+ EvalTable[CSST] = &Simulator::Evaluate_CSST;
+ EvalTable[LPD] = &Simulator::Evaluate_LPD;
+ EvalTable[LPDG] = &Simulator::Evaluate_LPDG;
+ EvalTable[BRCTH] = &Simulator::Evaluate_BRCTH;
+ EvalTable[AIH] = &Simulator::Evaluate_AIH;
+ EvalTable[ALSIH] = &Simulator::Evaluate_ALSIH;
+ EvalTable[ALSIHN] = &Simulator::Evaluate_ALSIHN;
+ EvalTable[CIH] = &Simulator::Evaluate_CIH;
+ EvalTable[STCK] = &Simulator::Evaluate_STCK;
+ EvalTable[CFC] = &Simulator::Evaluate_CFC;
+ EvalTable[IPM] = &Simulator::Evaluate_IPM;
+ EvalTable[HSCH] = &Simulator::Evaluate_HSCH;
+ EvalTable[MSCH] = &Simulator::Evaluate_MSCH;
+ EvalTable[SSCH] = &Simulator::Evaluate_SSCH;
+ EvalTable[STSCH] = &Simulator::Evaluate_STSCH;
+ EvalTable[TSCH] = &Simulator::Evaluate_TSCH;
+ EvalTable[TPI] = &Simulator::Evaluate_TPI;
+ EvalTable[SAL] = &Simulator::Evaluate_SAL;
+ EvalTable[RSCH] = &Simulator::Evaluate_RSCH;
+ EvalTable[STCRW] = &Simulator::Evaluate_STCRW;
+ EvalTable[STCPS] = &Simulator::Evaluate_STCPS;
+ EvalTable[RCHP] = &Simulator::Evaluate_RCHP;
+ EvalTable[SCHM] = &Simulator::Evaluate_SCHM;
+ EvalTable[CKSM] = &Simulator::Evaluate_CKSM;
+ EvalTable[SAR] = &Simulator::Evaluate_SAR;
+ EvalTable[EAR] = &Simulator::Evaluate_EAR;
+ EvalTable[MSR] = &Simulator::Evaluate_MSR;
+ EvalTable[MVST] = &Simulator::Evaluate_MVST;
+ EvalTable[CUSE] = &Simulator::Evaluate_CUSE;
+ EvalTable[SRST] = &Simulator::Evaluate_SRST;
+ EvalTable[XSCH] = &Simulator::Evaluate_XSCH;
+ EvalTable[STCKE] = &Simulator::Evaluate_STCKE;
+ EvalTable[STCKF] = &Simulator::Evaluate_STCKF;
+ EvalTable[SRNM] = &Simulator::Evaluate_SRNM;
+ EvalTable[STFPC] = &Simulator::Evaluate_STFPC;
+ EvalTable[LFPC] = &Simulator::Evaluate_LFPC;
+ EvalTable[TRE] = &Simulator::Evaluate_TRE;
+ EvalTable[CUUTF] = &Simulator::Evaluate_CUUTF;
+ EvalTable[CUTFU] = &Simulator::Evaluate_CUTFU;
+ EvalTable[STFLE] = &Simulator::Evaluate_STFLE;
+ EvalTable[SRNMB] = &Simulator::Evaluate_SRNMB;
+ EvalTable[SRNMT] = &Simulator::Evaluate_SRNMT;
+ EvalTable[LFAS] = &Simulator::Evaluate_LFAS;
+ EvalTable[PPA] = &Simulator::Evaluate_PPA;
+ EvalTable[ETND] = &Simulator::Evaluate_ETND;
+ EvalTable[TEND] = &Simulator::Evaluate_TEND;
+ EvalTable[NIAI] = &Simulator::Evaluate_NIAI;
+ EvalTable[TABORT] = &Simulator::Evaluate_TABORT;
+ EvalTable[TRAP4] = &Simulator::Evaluate_TRAP4;
+ EvalTable[LPEBR] = &Simulator::Evaluate_LPEBR;
+ EvalTable[LNEBR] = &Simulator::Evaluate_LNEBR;
+ EvalTable[LTEBR] = &Simulator::Evaluate_LTEBR;
+ EvalTable[LCEBR] = &Simulator::Evaluate_LCEBR;
+ EvalTable[LDEBR] = &Simulator::Evaluate_LDEBR;
+ EvalTable[LXDBR] = &Simulator::Evaluate_LXDBR;
+ EvalTable[LXEBR] = &Simulator::Evaluate_LXEBR;
+ EvalTable[MXDBR] = &Simulator::Evaluate_MXDBR;
+ EvalTable[KEBR] = &Simulator::Evaluate_KEBR;
+ EvalTable[CEBR] = &Simulator::Evaluate_CEBR;
+ EvalTable[AEBR] = &Simulator::Evaluate_AEBR;
+ EvalTable[SEBR] = &Simulator::Evaluate_SEBR;
+ EvalTable[MDEBR] = &Simulator::Evaluate_MDEBR;
+ EvalTable[DEBR] = &Simulator::Evaluate_DEBR;
+ EvalTable[MAEBR] = &Simulator::Evaluate_MAEBR;
+ EvalTable[MSEBR] = &Simulator::Evaluate_MSEBR;
+ EvalTable[LPDBR] = &Simulator::Evaluate_LPDBR;
+ EvalTable[LNDBR] = &Simulator::Evaluate_LNDBR;
+ EvalTable[LTDBR] = &Simulator::Evaluate_LTDBR;
+ EvalTable[LCDBR] = &Simulator::Evaluate_LCDBR;
+ EvalTable[SQEBR] = &Simulator::Evaluate_SQEBR;
+ EvalTable[SQDBR] = &Simulator::Evaluate_SQDBR;
+ EvalTable[SQXBR] = &Simulator::Evaluate_SQXBR;
+ EvalTable[MEEBR] = &Simulator::Evaluate_MEEBR;
+ EvalTable[KDBR] = &Simulator::Evaluate_KDBR;
+ EvalTable[CDBR] = &Simulator::Evaluate_CDBR;
+ EvalTable[ADBR] = &Simulator::Evaluate_ADBR;
+ EvalTable[SDBR] = &Simulator::Evaluate_SDBR;
+ EvalTable[MDBR] = &Simulator::Evaluate_MDBR;
+ EvalTable[DDBR] = &Simulator::Evaluate_DDBR;
+ EvalTable[MADBR] = &Simulator::Evaluate_MADBR;
+ EvalTable[MSDBR] = &Simulator::Evaluate_MSDBR;
+ EvalTable[LPXBR] = &Simulator::Evaluate_LPXBR;
+ EvalTable[LNXBR] = &Simulator::Evaluate_LNXBR;
+ EvalTable[LTXBR] = &Simulator::Evaluate_LTXBR;
+ EvalTable[LCXBR] = &Simulator::Evaluate_LCXBR;
+ EvalTable[LEDBRA] = &Simulator::Evaluate_LEDBRA;
+ EvalTable[LDXBRA] = &Simulator::Evaluate_LDXBRA;
+ EvalTable[LEXBRA] = &Simulator::Evaluate_LEXBRA;
+ EvalTable[FIXBRA] = &Simulator::Evaluate_FIXBRA;
+ EvalTable[KXBR] = &Simulator::Evaluate_KXBR;
+ EvalTable[CXBR] = &Simulator::Evaluate_CXBR;
+ EvalTable[AXBR] = &Simulator::Evaluate_AXBR;
+ EvalTable[SXBR] = &Simulator::Evaluate_SXBR;
+ EvalTable[MXBR] = &Simulator::Evaluate_MXBR;
+ EvalTable[DXBR] = &Simulator::Evaluate_DXBR;
+ EvalTable[TBEDR] = &Simulator::Evaluate_TBEDR;
+ EvalTable[TBDR] = &Simulator::Evaluate_TBDR;
+ EvalTable[DIEBR] = &Simulator::Evaluate_DIEBR;
+ EvalTable[FIEBRA] = &Simulator::Evaluate_FIEBRA;
+ EvalTable[THDER] = &Simulator::Evaluate_THDER;
+ EvalTable[THDR] = &Simulator::Evaluate_THDR;
+ EvalTable[DIDBR] = &Simulator::Evaluate_DIDBR;
+ EvalTable[FIDBRA] = &Simulator::Evaluate_FIDBRA;
+ EvalTable[LXR] = &Simulator::Evaluate_LXR;
+ EvalTable[LPDFR] = &Simulator::Evaluate_LPDFR;
+ EvalTable[LNDFR] = &Simulator::Evaluate_LNDFR;
+ EvalTable[LCDFR] = &Simulator::Evaluate_LCDFR;
+ EvalTable[LZER] = &Simulator::Evaluate_LZER;
+ EvalTable[LZDR] = &Simulator::Evaluate_LZDR;
+ EvalTable[LZXR] = &Simulator::Evaluate_LZXR;
+ EvalTable[SFPC] = &Simulator::Evaluate_SFPC;
+ EvalTable[SFASR] = &Simulator::Evaluate_SFASR;
+ EvalTable[EFPC] = &Simulator::Evaluate_EFPC;
+ EvalTable[CELFBR] = &Simulator::Evaluate_CELFBR;
+ EvalTable[CDLFBR] = &Simulator::Evaluate_CDLFBR;
+ EvalTable[CXLFBR] = &Simulator::Evaluate_CXLFBR;
+ EvalTable[CEFBRA] = &Simulator::Evaluate_CEFBRA;
+ EvalTable[CDFBRA] = &Simulator::Evaluate_CDFBRA;
+ EvalTable[CXFBRA] = &Simulator::Evaluate_CXFBRA;
+ EvalTable[CFEBRA] = &Simulator::Evaluate_CFEBRA;
+ EvalTable[CFDBRA] = &Simulator::Evaluate_CFDBRA;
+ EvalTable[CFXBRA] = &Simulator::Evaluate_CFXBRA;
+ EvalTable[CLFEBR] = &Simulator::Evaluate_CLFEBR;
+ EvalTable[CLFDBR] = &Simulator::Evaluate_CLFDBR;
+ EvalTable[CLFXBR] = &Simulator::Evaluate_CLFXBR;
+ EvalTable[CELGBR] = &Simulator::Evaluate_CELGBR;
+ EvalTable[CDLGBR] = &Simulator::Evaluate_CDLGBR;
+ EvalTable[CXLGBR] = &Simulator::Evaluate_CXLGBR;
+ EvalTable[CEGBRA] = &Simulator::Evaluate_CEGBRA;
+ EvalTable[CDGBRA] = &Simulator::Evaluate_CDGBRA;
+ EvalTable[CXGBRA] = &Simulator::Evaluate_CXGBRA;
+ EvalTable[CGEBRA] = &Simulator::Evaluate_CGEBRA;
+ EvalTable[CGDBRA] = &Simulator::Evaluate_CGDBRA;
+ EvalTable[CGXBRA] = &Simulator::Evaluate_CGXBRA;
+ EvalTable[CLGEBR] = &Simulator::Evaluate_CLGEBR;
+ EvalTable[CLGDBR] = &Simulator::Evaluate_CLGDBR;
+ EvalTable[CFER] = &Simulator::Evaluate_CFER;
+ EvalTable[CFDR] = &Simulator::Evaluate_CFDR;
+ EvalTable[CFXR] = &Simulator::Evaluate_CFXR;
+ EvalTable[LDGR] = &Simulator::Evaluate_LDGR;
+ EvalTable[CGER] = &Simulator::Evaluate_CGER;
+ EvalTable[CGDR] = &Simulator::Evaluate_CGDR;
+ EvalTable[CGXR] = &Simulator::Evaluate_CGXR;
+ EvalTable[LGDR] = &Simulator::Evaluate_LGDR;
+ EvalTable[MDTR] = &Simulator::Evaluate_MDTR;
+ EvalTable[MDTRA] = &Simulator::Evaluate_MDTRA;
+ EvalTable[DDTRA] = &Simulator::Evaluate_DDTRA;
+ EvalTable[ADTRA] = &Simulator::Evaluate_ADTRA;
+ EvalTable[SDTRA] = &Simulator::Evaluate_SDTRA;
+ EvalTable[LDETR] = &Simulator::Evaluate_LDETR;
+ EvalTable[LEDTR] = &Simulator::Evaluate_LEDTR;
+ EvalTable[LTDTR] = &Simulator::Evaluate_LTDTR;
+ EvalTable[FIDTR] = &Simulator::Evaluate_FIDTR;
+ EvalTable[MXTRA] = &Simulator::Evaluate_MXTRA;
+ EvalTable[DXTRA] = &Simulator::Evaluate_DXTRA;
+ EvalTable[AXTRA] = &Simulator::Evaluate_AXTRA;
+ EvalTable[SXTRA] = &Simulator::Evaluate_SXTRA;
+ EvalTable[LXDTR] = &Simulator::Evaluate_LXDTR;
+ EvalTable[LDXTR] = &Simulator::Evaluate_LDXTR;
+ EvalTable[LTXTR] = &Simulator::Evaluate_LTXTR;
+ EvalTable[FIXTR] = &Simulator::Evaluate_FIXTR;
+ EvalTable[KDTR] = &Simulator::Evaluate_KDTR;
+ EvalTable[CGDTRA] = &Simulator::Evaluate_CGDTRA;
+ EvalTable[CUDTR] = &Simulator::Evaluate_CUDTR;
+ EvalTable[CDTR] = &Simulator::Evaluate_CDTR;
+ EvalTable[EEDTR] = &Simulator::Evaluate_EEDTR;
+ EvalTable[ESDTR] = &Simulator::Evaluate_ESDTR;
+ EvalTable[KXTR] = &Simulator::Evaluate_KXTR;
+ EvalTable[CGXTRA] = &Simulator::Evaluate_CGXTRA;
+ EvalTable[CUXTR] = &Simulator::Evaluate_CUXTR;
+ EvalTable[CSXTR] = &Simulator::Evaluate_CSXTR;
+ EvalTable[CXTR] = &Simulator::Evaluate_CXTR;
+ EvalTable[EEXTR] = &Simulator::Evaluate_EEXTR;
+ EvalTable[ESXTR] = &Simulator::Evaluate_ESXTR;
+ EvalTable[CDGTRA] = &Simulator::Evaluate_CDGTRA;
+ EvalTable[CDUTR] = &Simulator::Evaluate_CDUTR;
+ EvalTable[CDSTR] = &Simulator::Evaluate_CDSTR;
+ EvalTable[CEDTR] = &Simulator::Evaluate_CEDTR;
+ EvalTable[QADTR] = &Simulator::Evaluate_QADTR;
+ EvalTable[IEDTR] = &Simulator::Evaluate_IEDTR;
+ EvalTable[RRDTR] = &Simulator::Evaluate_RRDTR;
+ EvalTable[CXGTRA] = &Simulator::Evaluate_CXGTRA;
+ EvalTable[CXUTR] = &Simulator::Evaluate_CXUTR;
+ EvalTable[CXSTR] = &Simulator::Evaluate_CXSTR;
+ EvalTable[CEXTR] = &Simulator::Evaluate_CEXTR;
+ EvalTable[QAXTR] = &Simulator::Evaluate_QAXTR;
+ EvalTable[IEXTR] = &Simulator::Evaluate_IEXTR;
+ EvalTable[RRXTR] = &Simulator::Evaluate_RRXTR;
+ EvalTable[LPGR] = &Simulator::Evaluate_LPGR;
+ EvalTable[LNGR] = &Simulator::Evaluate_LNGR;
+ EvalTable[LTGR] = &Simulator::Evaluate_LTGR;
+ EvalTable[LCGR] = &Simulator::Evaluate_LCGR;
+ EvalTable[LGR] = &Simulator::Evaluate_LGR;
+ EvalTable[LGBR] = &Simulator::Evaluate_LGBR;
+ EvalTable[LGHR] = &Simulator::Evaluate_LGHR;
+ EvalTable[AGR] = &Simulator::Evaluate_AGR;
+ EvalTable[SGR] = &Simulator::Evaluate_SGR;
+ EvalTable[ALGR] = &Simulator::Evaluate_ALGR;
+ EvalTable[SLGR] = &Simulator::Evaluate_SLGR;
+ EvalTable[MSGR] = &Simulator::Evaluate_MSGR;
+ EvalTable[DSGR] = &Simulator::Evaluate_DSGR;
+ EvalTable[LRVGR] = &Simulator::Evaluate_LRVGR;
+ EvalTable[LPGFR] = &Simulator::Evaluate_LPGFR;
+ EvalTable[LNGFR] = &Simulator::Evaluate_LNGFR;
+ EvalTable[LTGFR] = &Simulator::Evaluate_LTGFR;
+ EvalTable[LCGFR] = &Simulator::Evaluate_LCGFR;
+ EvalTable[LGFR] = &Simulator::Evaluate_LGFR;
+ EvalTable[LLGFR] = &Simulator::Evaluate_LLGFR;
+ EvalTable[LLGTR] = &Simulator::Evaluate_LLGTR;
+ EvalTable[AGFR] = &Simulator::Evaluate_AGFR;
+ EvalTable[SGFR] = &Simulator::Evaluate_SGFR;
+ EvalTable[ALGFR] = &Simulator::Evaluate_ALGFR;
+ EvalTable[SLGFR] = &Simulator::Evaluate_SLGFR;
+ EvalTable[MSGFR] = &Simulator::Evaluate_MSGFR;
+ EvalTable[DSGFR] = &Simulator::Evaluate_DSGFR;
+ EvalTable[KMAC] = &Simulator::Evaluate_KMAC;
+ EvalTable[LRVR] = &Simulator::Evaluate_LRVR;
+ EvalTable[CGR] = &Simulator::Evaluate_CGR;
+ EvalTable[CLGR] = &Simulator::Evaluate_CLGR;
+ EvalTable[LBR] = &Simulator::Evaluate_LBR;
+ EvalTable[LHR] = &Simulator::Evaluate_LHR;
+ EvalTable[KMF] = &Simulator::Evaluate_KMF;
+ EvalTable[KMO] = &Simulator::Evaluate_KMO;
+ EvalTable[PCC] = &Simulator::Evaluate_PCC;
+ EvalTable[KMCTR] = &Simulator::Evaluate_KMCTR;
+ EvalTable[KM] = &Simulator::Evaluate_KM;
+ EvalTable[KMC] = &Simulator::Evaluate_KMC;
+ EvalTable[CGFR] = &Simulator::Evaluate_CGFR;
+ EvalTable[KIMD] = &Simulator::Evaluate_KIMD;
+ EvalTable[KLMD] = &Simulator::Evaluate_KLMD;
+ EvalTable[CFDTR] = &Simulator::Evaluate_CFDTR;
+ EvalTable[CLGDTR] = &Simulator::Evaluate_CLGDTR;
+ EvalTable[CLFDTR] = &Simulator::Evaluate_CLFDTR;
+ EvalTable[BCTGR] = &Simulator::Evaluate_BCTGR;
+ EvalTable[CFXTR] = &Simulator::Evaluate_CFXTR;
+ EvalTable[CLFXTR] = &Simulator::Evaluate_CLFXTR;
+ EvalTable[CDFTR] = &Simulator::Evaluate_CDFTR;
+ EvalTable[CDLGTR] = &Simulator::Evaluate_CDLGTR;
+ EvalTable[CDLFTR] = &Simulator::Evaluate_CDLFTR;
+ EvalTable[CXFTR] = &Simulator::Evaluate_CXFTR;
+ EvalTable[CXLGTR] = &Simulator::Evaluate_CXLGTR;
+ EvalTable[CXLFTR] = &Simulator::Evaluate_CXLFTR;
+ EvalTable[CGRT] = &Simulator::Evaluate_CGRT;
+ EvalTable[NGR] = &Simulator::Evaluate_NGR;
+ EvalTable[OGR] = &Simulator::Evaluate_OGR;
+ EvalTable[XGR] = &Simulator::Evaluate_XGR;
+ EvalTable[FLOGR] = &Simulator::Evaluate_FLOGR;
+ EvalTable[LLGCR] = &Simulator::Evaluate_LLGCR;
+ EvalTable[LLGHR] = &Simulator::Evaluate_LLGHR;
+ EvalTable[MLGR] = &Simulator::Evaluate_MLGR;
+ EvalTable[DLGR] = &Simulator::Evaluate_DLGR;
+ EvalTable[ALCGR] = &Simulator::Evaluate_ALCGR;
+ EvalTable[SLBGR] = &Simulator::Evaluate_SLBGR;
+ EvalTable[EPSW] = &Simulator::Evaluate_EPSW;
+ EvalTable[TRTT] = &Simulator::Evaluate_TRTT;
+ EvalTable[TRTO] = &Simulator::Evaluate_TRTO;
+ EvalTable[TROT] = &Simulator::Evaluate_TROT;
+ EvalTable[TROO] = &Simulator::Evaluate_TROO;
+ EvalTable[LLCR] = &Simulator::Evaluate_LLCR;
+ EvalTable[LLHR] = &Simulator::Evaluate_LLHR;
+ EvalTable[MLR] = &Simulator::Evaluate_MLR;
+ EvalTable[DLR] = &Simulator::Evaluate_DLR;
+ EvalTable[ALCR] = &Simulator::Evaluate_ALCR;
+ EvalTable[SLBR] = &Simulator::Evaluate_SLBR;
+ EvalTable[CU14] = &Simulator::Evaluate_CU14;
+ EvalTable[CU24] = &Simulator::Evaluate_CU24;
+ EvalTable[CU41] = &Simulator::Evaluate_CU41;
+ EvalTable[CU42] = &Simulator::Evaluate_CU42;
+ EvalTable[TRTRE] = &Simulator::Evaluate_TRTRE;
+ EvalTable[SRSTU] = &Simulator::Evaluate_SRSTU;
+ EvalTable[TRTE] = &Simulator::Evaluate_TRTE;
+ EvalTable[AHHHR] = &Simulator::Evaluate_AHHHR;
+ EvalTable[SHHHR] = &Simulator::Evaluate_SHHHR;
+ EvalTable[ALHHHR] = &Simulator::Evaluate_ALHHHR;
+ EvalTable[SLHHHR] = &Simulator::Evaluate_SLHHHR;
+ EvalTable[CHHR] = &Simulator::Evaluate_CHHR;
+ EvalTable[AHHLR] = &Simulator::Evaluate_AHHLR;
+ EvalTable[SHHLR] = &Simulator::Evaluate_SHHLR;
+ EvalTable[ALHHLR] = &Simulator::Evaluate_ALHHLR;
+ EvalTable[SLHHLR] = &Simulator::Evaluate_SLHHLR;
+ EvalTable[CHLR] = &Simulator::Evaluate_CHLR;
+ EvalTable[POPCNT_Z] = &Simulator::Evaluate_POPCNT_Z;
+ EvalTable[LOCGR] = &Simulator::Evaluate_LOCGR;
+ EvalTable[NGRK] = &Simulator::Evaluate_NGRK;
+ EvalTable[OGRK] = &Simulator::Evaluate_OGRK;
+ EvalTable[XGRK] = &Simulator::Evaluate_XGRK;
+ EvalTable[AGRK] = &Simulator::Evaluate_AGRK;
+ EvalTable[SGRK] = &Simulator::Evaluate_SGRK;
+ EvalTable[ALGRK] = &Simulator::Evaluate_ALGRK;
+ EvalTable[SLGRK] = &Simulator::Evaluate_SLGRK;
+ EvalTable[LOCR] = &Simulator::Evaluate_LOCR;
+ EvalTable[NRK] = &Simulator::Evaluate_NRK;
+ EvalTable[ORK] = &Simulator::Evaluate_ORK;
+ EvalTable[XRK] = &Simulator::Evaluate_XRK;
+ EvalTable[ARK] = &Simulator::Evaluate_ARK;
+ EvalTable[SRK] = &Simulator::Evaluate_SRK;
+ EvalTable[ALRK] = &Simulator::Evaluate_ALRK;
+ EvalTable[SLRK] = &Simulator::Evaluate_SLRK;
+ EvalTable[LTG] = &Simulator::Evaluate_LTG;
+ EvalTable[LG] = &Simulator::Evaluate_LG;
+ EvalTable[CVBY] = &Simulator::Evaluate_CVBY;
+ EvalTable[AG] = &Simulator::Evaluate_AG;
+ EvalTable[SG] = &Simulator::Evaluate_SG;
+ EvalTable[ALG] = &Simulator::Evaluate_ALG;
+ EvalTable[SLG] = &Simulator::Evaluate_SLG;
+ EvalTable[MSG] = &Simulator::Evaluate_MSG;
+ EvalTable[DSG] = &Simulator::Evaluate_DSG;
+ EvalTable[CVBG] = &Simulator::Evaluate_CVBG;
+ EvalTable[LRVG] = &Simulator::Evaluate_LRVG;
+ EvalTable[LT] = &Simulator::Evaluate_LT;
+ EvalTable[LGF] = &Simulator::Evaluate_LGF;
+ EvalTable[LGH] = &Simulator::Evaluate_LGH;
+ EvalTable[LLGF] = &Simulator::Evaluate_LLGF;
+ EvalTable[LLGT] = &Simulator::Evaluate_LLGT;
+ EvalTable[AGF] = &Simulator::Evaluate_AGF;
+ EvalTable[SGF] = &Simulator::Evaluate_SGF;
+ EvalTable[ALGF] = &Simulator::Evaluate_ALGF;
+ EvalTable[SLGF] = &Simulator::Evaluate_SLGF;
+ EvalTable[MSGF] = &Simulator::Evaluate_MSGF;
+ EvalTable[DSGF] = &Simulator::Evaluate_DSGF;
+ EvalTable[LRV] = &Simulator::Evaluate_LRV;
+ EvalTable[LRVH] = &Simulator::Evaluate_LRVH;
+ EvalTable[CG] = &Simulator::Evaluate_CG;
+ EvalTable[CLG] = &Simulator::Evaluate_CLG;
+ EvalTable[STG] = &Simulator::Evaluate_STG;
+ EvalTable[NTSTG] = &Simulator::Evaluate_NTSTG;
+ EvalTable[CVDY] = &Simulator::Evaluate_CVDY;
+ EvalTable[CVDG] = &Simulator::Evaluate_CVDG;
+ EvalTable[STRVG] = &Simulator::Evaluate_STRVG;
+ EvalTable[CGF] = &Simulator::Evaluate_CGF;
+ EvalTable[CLGF] = &Simulator::Evaluate_CLGF;
+ EvalTable[LTGF] = &Simulator::Evaluate_LTGF;
+ EvalTable[CGH] = &Simulator::Evaluate_CGH;
+ EvalTable[PFD] = &Simulator::Evaluate_PFD;
+ EvalTable[STRV] = &Simulator::Evaluate_STRV;
+ EvalTable[STRVH] = &Simulator::Evaluate_STRVH;
+ EvalTable[BCTG] = &Simulator::Evaluate_BCTG;
+ EvalTable[STY] = &Simulator::Evaluate_STY;
+ EvalTable[MSY] = &Simulator::Evaluate_MSY;
+ EvalTable[NY] = &Simulator::Evaluate_NY;
+ EvalTable[CLY] = &Simulator::Evaluate_CLY;
+ EvalTable[OY] = &Simulator::Evaluate_OY;
+ EvalTable[XY] = &Simulator::Evaluate_XY;
+ EvalTable[LY] = &Simulator::Evaluate_LY;
+ EvalTable[CY] = &Simulator::Evaluate_CY;
+ EvalTable[AY] = &Simulator::Evaluate_AY;
+ EvalTable[SY] = &Simulator::Evaluate_SY;
+ EvalTable[MFY] = &Simulator::Evaluate_MFY;
+ EvalTable[ALY] = &Simulator::Evaluate_ALY;
+ EvalTable[SLY] = &Simulator::Evaluate_SLY;
+ EvalTable[STHY] = &Simulator::Evaluate_STHY;
+ EvalTable[LAY] = &Simulator::Evaluate_LAY;
+ EvalTable[STCY] = &Simulator::Evaluate_STCY;
+ EvalTable[ICY] = &Simulator::Evaluate_ICY;
+ EvalTable[LAEY] = &Simulator::Evaluate_LAEY;
+ EvalTable[LB] = &Simulator::Evaluate_LB;
+ EvalTable[LGB] = &Simulator::Evaluate_LGB;
+ EvalTable[LHY] = &Simulator::Evaluate_LHY;
+ EvalTable[CHY] = &Simulator::Evaluate_CHY;
+ EvalTable[AHY] = &Simulator::Evaluate_AHY;
+ EvalTable[SHY] = &Simulator::Evaluate_SHY;
+ EvalTable[MHY] = &Simulator::Evaluate_MHY;
+ EvalTable[NG] = &Simulator::Evaluate_NG;
+ EvalTable[OG] = &Simulator::Evaluate_OG;
+ EvalTable[XG] = &Simulator::Evaluate_XG;
+ EvalTable[LGAT] = &Simulator::Evaluate_LGAT;
+ EvalTable[MLG] = &Simulator::Evaluate_MLG;
+ EvalTable[DLG] = &Simulator::Evaluate_DLG;
+ EvalTable[ALCG] = &Simulator::Evaluate_ALCG;
+ EvalTable[SLBG] = &Simulator::Evaluate_SLBG;
+ EvalTable[STPQ] = &Simulator::Evaluate_STPQ;
+ EvalTable[LPQ] = &Simulator::Evaluate_LPQ;
+ EvalTable[LLGC] = &Simulator::Evaluate_LLGC;
+ EvalTable[LLGH] = &Simulator::Evaluate_LLGH;
+ EvalTable[LLC] = &Simulator::Evaluate_LLC;
+ EvalTable[LLH] = &Simulator::Evaluate_LLH;
+ EvalTable[ML] = &Simulator::Evaluate_ML;
+ EvalTable[DL] = &Simulator::Evaluate_DL;
+ EvalTable[ALC] = &Simulator::Evaluate_ALC;
+ EvalTable[SLB] = &Simulator::Evaluate_SLB;
+ EvalTable[LLGTAT] = &Simulator::Evaluate_LLGTAT;
+ EvalTable[LLGFAT] = &Simulator::Evaluate_LLGFAT;
+ EvalTable[LAT] = &Simulator::Evaluate_LAT;
+ EvalTable[LBH] = &Simulator::Evaluate_LBH;
+ EvalTable[LLCH] = &Simulator::Evaluate_LLCH;
+ EvalTable[STCH] = &Simulator::Evaluate_STCH;
+ EvalTable[LHH] = &Simulator::Evaluate_LHH;
+ EvalTable[LLHH] = &Simulator::Evaluate_LLHH;
+ EvalTable[STHH] = &Simulator::Evaluate_STHH;
+ EvalTable[LFHAT] = &Simulator::Evaluate_LFHAT;
+ EvalTable[LFH] = &Simulator::Evaluate_LFH;
+ EvalTable[STFH] = &Simulator::Evaluate_STFH;
+ EvalTable[CHF] = &Simulator::Evaluate_CHF;
+ EvalTable[MVCDK] = &Simulator::Evaluate_MVCDK;
+ EvalTable[MVHHI] = &Simulator::Evaluate_MVHHI;
+ EvalTable[MVGHI] = &Simulator::Evaluate_MVGHI;
+ EvalTable[MVHI] = &Simulator::Evaluate_MVHI;
+ EvalTable[CHHSI] = &Simulator::Evaluate_CHHSI;
+ EvalTable[CGHSI] = &Simulator::Evaluate_CGHSI;
+ EvalTable[CHSI] = &Simulator::Evaluate_CHSI;
+ EvalTable[CLFHSI] = &Simulator::Evaluate_CLFHSI;
+ EvalTable[TBEGIN] = &Simulator::Evaluate_TBEGIN;
+ EvalTable[TBEGINC] = &Simulator::Evaluate_TBEGINC;
+ EvalTable[LMG] = &Simulator::Evaluate_LMG;
+ EvalTable[SRAG] = &Simulator::Evaluate_SRAG;
+ EvalTable[SLAG] = &Simulator::Evaluate_SLAG;
+ EvalTable[SRLG] = &Simulator::Evaluate_SRLG;
+ EvalTable[SLLG] = &Simulator::Evaluate_SLLG;
+ EvalTable[CSY] = &Simulator::Evaluate_CSY;
+ EvalTable[RLLG] = &Simulator::Evaluate_RLLG;
+ EvalTable[RLL] = &Simulator::Evaluate_RLL;
+ EvalTable[STMG] = &Simulator::Evaluate_STMG;
+ EvalTable[STMH] = &Simulator::Evaluate_STMH;
+ EvalTable[STCMH] = &Simulator::Evaluate_STCMH;
+ EvalTable[STCMY] = &Simulator::Evaluate_STCMY;
+ EvalTable[CDSY] = &Simulator::Evaluate_CDSY;
+ EvalTable[CDSG] = &Simulator::Evaluate_CDSG;
+ EvalTable[BXHG] = &Simulator::Evaluate_BXHG;
+ EvalTable[BXLEG] = &Simulator::Evaluate_BXLEG;
+ EvalTable[ECAG] = &Simulator::Evaluate_ECAG;
+ EvalTable[TMY] = &Simulator::Evaluate_TMY;
+ EvalTable[MVIY] = &Simulator::Evaluate_MVIY;
+ EvalTable[NIY] = &Simulator::Evaluate_NIY;
+ EvalTable[CLIY] = &Simulator::Evaluate_CLIY;
+ EvalTable[OIY] = &Simulator::Evaluate_OIY;
+ EvalTable[XIY] = &Simulator::Evaluate_XIY;
+ EvalTable[ASI] = &Simulator::Evaluate_ASI;
+ EvalTable[ALSI] = &Simulator::Evaluate_ALSI;
+ EvalTable[AGSI] = &Simulator::Evaluate_AGSI;
+ EvalTable[ALGSI] = &Simulator::Evaluate_ALGSI;
+ EvalTable[ICMH] = &Simulator::Evaluate_ICMH;
+ EvalTable[ICMY] = &Simulator::Evaluate_ICMY;
+ EvalTable[MVCLU] = &Simulator::Evaluate_MVCLU;
+ EvalTable[CLCLU] = &Simulator::Evaluate_CLCLU;
+ EvalTable[STMY] = &Simulator::Evaluate_STMY;
+ EvalTable[LMH] = &Simulator::Evaluate_LMH;
+ EvalTable[LMY] = &Simulator::Evaluate_LMY;
+ EvalTable[TP] = &Simulator::Evaluate_TP;
+ EvalTable[SRAK] = &Simulator::Evaluate_SRAK;
+ EvalTable[SLAK] = &Simulator::Evaluate_SLAK;
+ EvalTable[SRLK] = &Simulator::Evaluate_SRLK;
+ EvalTable[SLLK] = &Simulator::Evaluate_SLLK;
+ EvalTable[LOCG] = &Simulator::Evaluate_LOCG;
+ EvalTable[STOCG] = &Simulator::Evaluate_STOCG;
+ EvalTable[LANG] = &Simulator::Evaluate_LANG;
+ EvalTable[LAOG] = &Simulator::Evaluate_LAOG;
+ EvalTable[LAXG] = &Simulator::Evaluate_LAXG;
+ EvalTable[LAAG] = &Simulator::Evaluate_LAAG;
+ EvalTable[LAALG] = &Simulator::Evaluate_LAALG;
+ EvalTable[LOC] = &Simulator::Evaluate_LOC;
+ EvalTable[STOC] = &Simulator::Evaluate_STOC;
+ EvalTable[LAN] = &Simulator::Evaluate_LAN;
+ EvalTable[LAO] = &Simulator::Evaluate_LAO;
+ EvalTable[LAX] = &Simulator::Evaluate_LAX;
+ EvalTable[LAA] = &Simulator::Evaluate_LAA;
+ EvalTable[LAAL] = &Simulator::Evaluate_LAAL;
+ EvalTable[BRXHG] = &Simulator::Evaluate_BRXHG;
+ EvalTable[BRXLG] = &Simulator::Evaluate_BRXLG;
+ EvalTable[RISBLG] = &Simulator::Evaluate_RISBLG;
+ EvalTable[RNSBG] = &Simulator::Evaluate_RNSBG;
+ EvalTable[RISBG] = &Simulator::Evaluate_RISBG;
+ EvalTable[ROSBG] = &Simulator::Evaluate_ROSBG;
+ EvalTable[RXSBG] = &Simulator::Evaluate_RXSBG;
+ EvalTable[RISBGN] = &Simulator::Evaluate_RISBGN;
+ EvalTable[RISBHG] = &Simulator::Evaluate_RISBHG;
+ EvalTable[CGRJ] = &Simulator::Evaluate_CGRJ;
+ EvalTable[CGIT] = &Simulator::Evaluate_CGIT;
+ EvalTable[CIT] = &Simulator::Evaluate_CIT;
+ EvalTable[CLFIT] = &Simulator::Evaluate_CLFIT;
+ EvalTable[CGIJ] = &Simulator::Evaluate_CGIJ;
+ EvalTable[CIJ] = &Simulator::Evaluate_CIJ;
+ EvalTable[AHIK] = &Simulator::Evaluate_AHIK;
+ EvalTable[AGHIK] = &Simulator::Evaluate_AGHIK;
+ EvalTable[ALHSIK] = &Simulator::Evaluate_ALHSIK;
+ EvalTable[ALGHSIK] = &Simulator::Evaluate_ALGHSIK;
+ EvalTable[CGRB] = &Simulator::Evaluate_CGRB;
+ EvalTable[CGIB] = &Simulator::Evaluate_CGIB;
+ EvalTable[CIB] = &Simulator::Evaluate_CIB;
+ EvalTable[LDEB] = &Simulator::Evaluate_LDEB;
+ EvalTable[LXDB] = &Simulator::Evaluate_LXDB;
+ EvalTable[LXEB] = &Simulator::Evaluate_LXEB;
+ EvalTable[MXDB] = &Simulator::Evaluate_MXDB;
+ EvalTable[KEB] = &Simulator::Evaluate_KEB;
+ EvalTable[CEB] = &Simulator::Evaluate_CEB;
+ EvalTable[AEB] = &Simulator::Evaluate_AEB;
+ EvalTable[SEB] = &Simulator::Evaluate_SEB;
+ EvalTable[MDEB] = &Simulator::Evaluate_MDEB;
+ EvalTable[DEB] = &Simulator::Evaluate_DEB;
+ EvalTable[MAEB] = &Simulator::Evaluate_MAEB;
+ EvalTable[MSEB] = &Simulator::Evaluate_MSEB;
+ EvalTable[TCEB] = &Simulator::Evaluate_TCEB;
+ EvalTable[TCDB] = &Simulator::Evaluate_TCDB;
+ EvalTable[TCXB] = &Simulator::Evaluate_TCXB;
+ EvalTable[SQEB] = &Simulator::Evaluate_SQEB;
+ EvalTable[SQDB] = &Simulator::Evaluate_SQDB;
+ EvalTable[MEEB] = &Simulator::Evaluate_MEEB;
+ EvalTable[KDB] = &Simulator::Evaluate_KDB;
+ EvalTable[CDB] = &Simulator::Evaluate_CDB;
+ EvalTable[ADB] = &Simulator::Evaluate_ADB;
+ EvalTable[SDB] = &Simulator::Evaluate_SDB;
+ EvalTable[MDB] = &Simulator::Evaluate_MDB;
+ EvalTable[DDB] = &Simulator::Evaluate_DDB;
+ EvalTable[MADB] = &Simulator::Evaluate_MADB;
+ EvalTable[MSDB] = &Simulator::Evaluate_MSDB;
+ EvalTable[SLDT] = &Simulator::Evaluate_SLDT;
+ EvalTable[SRDT] = &Simulator::Evaluate_SRDT;
+ EvalTable[SLXT] = &Simulator::Evaluate_SLXT;
+ EvalTable[SRXT] = &Simulator::Evaluate_SRXT;
+ EvalTable[TDCET] = &Simulator::Evaluate_TDCET;
+ EvalTable[TDGET] = &Simulator::Evaluate_TDGET;
+ EvalTable[TDCDT] = &Simulator::Evaluate_TDCDT;
+ EvalTable[TDGDT] = &Simulator::Evaluate_TDGDT;
+ EvalTable[TDCXT] = &Simulator::Evaluate_TDCXT;
+ EvalTable[TDGXT] = &Simulator::Evaluate_TDGXT;
+ EvalTable[LEY] = &Simulator::Evaluate_LEY;
+ EvalTable[LDY] = &Simulator::Evaluate_LDY;
+ EvalTable[STEY] = &Simulator::Evaluate_STEY;
+ EvalTable[STDY] = &Simulator::Evaluate_STDY;
+ EvalTable[CZDT] = &Simulator::Evaluate_CZDT;
+ EvalTable[CZXT] = &Simulator::Evaluate_CZXT;
+ EvalTable[CDZT] = &Simulator::Evaluate_CDZT;
+ EvalTable[CXZT] = &Simulator::Evaluate_CXZT;
+} // NOLINT
+
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
i_cache_ = isolate_->simulator_i_cache();
if (i_cache_ == NULL) {
- i_cache_ = new v8::internal::HashMap(&ICacheMatch);
+ i_cache_ = new base::HashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
}
Initialize(isolate);
@@ -795,7 +1511,6 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
// some buffer below.
registers_[sp] =
reinterpret_cast<intptr_t>(stack_) + stack_size - stack_protection_size_;
- InitializeCoverage();
last_debugger_input_ = NULL;
}
@@ -894,10 +1609,10 @@ class Redirection {
};
// static
-void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
+void Simulator::TearDown(base::HashMap* i_cache, Redirection* first) {
Redirection::DeleteChain(first);
if (i_cache != nullptr) {
- for (HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
+ for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
entry = i_cache->Next(entry)) {
delete static_cast<CachePage*>(entry->value);
}
@@ -1033,6 +1748,11 @@ uint32_t Simulator::ReadWU(intptr_t addr, Instruction* instr) {
return *ptr;
}
+int64_t Simulator::ReadW64(intptr_t addr, Instruction* instr) {
+ int64_t* ptr = reinterpret_cast<int64_t*>(addr);
+ return *ptr;
+}
+
int32_t Simulator::ReadW(intptr_t addr, Instruction* instr) {
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
return *ptr;
@@ -1268,15 +1988,17 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
case ExternalReference::BUILTIN_FP_FP_CALL:
case ExternalReference::BUILTIN_COMPARE_CALL:
PrintF("Call to host function at %p with args %f, %f",
- FUNCTION_ADDR(generic_target), dval0, dval1);
+ static_cast<void*>(FUNCTION_ADDR(generic_target)), dval0,
+ dval1);
break;
case ExternalReference::BUILTIN_FP_CALL:
PrintF("Call to host function at %p with arg %f",
- FUNCTION_ADDR(generic_target), dval0);
+ static_cast<void*>(FUNCTION_ADDR(generic_target)), dval0);
break;
case ExternalReference::BUILTIN_FP_INT_CALL:
PrintF("Call to host function at %p with args %f, %" V8PRIdPTR,
- FUNCTION_ADDR(generic_target), dval0, ival);
+ static_cast<void*>(FUNCTION_ADDR(generic_target)), dval0,
+ ival);
break;
default:
UNREACHABLE();
@@ -1418,8 +2140,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
"Call to host function at %p,\n"
"\t\t\t\targs %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR,
- FUNCTION_ADDR(target), arg[0], arg[1], arg[2], arg[3], arg[4],
- arg[5]);
+ static_cast<void*>(FUNCTION_ADDR(target)), arg[0], arg[1], arg[2],
+ arg[3], arg[4], arg[5]);
if (!stack_aligned) {
PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
static_cast<intptr_t>(get_register(sp)));
@@ -2587,6 +3309,7 @@ bool Simulator::DecodeFourByteArithmetic64Bit(Instruction* instr) {
SetS390OverflowCode(isOF);
set_register(r1, r2_val - r3_val);
}
+ break;
}
case AGHI:
case MGHI: {
@@ -2987,17 +3710,19 @@ bool Simulator::DecodeFourByteArithmetic(Instruction* instr) {
RREInstruction* rrinst = reinterpret_cast<RREInstruction*>(instr);
int r1 = rrinst->R1Value();
int r2 = rrinst->R2Value();
-#ifdef V8_TARGET_ARCH_S390X
+ if (op == LGBR) {
int64_t r2_val = get_low_register<int64_t>(r2);
r2_val <<= 56;
r2_val >>= 56;
set_register(r1, r2_val);
-#else
+ } else if (op == LBR) {
int32_t r2_val = get_low_register<int32_t>(r2);
r2_val <<= 24;
r2_val >>= 24;
set_low_register(r1, r2_val);
-#endif
+ } else {
+ UNREACHABLE();
+ }
break;
}
case LGHR:
@@ -3005,17 +3730,19 @@ bool Simulator::DecodeFourByteArithmetic(Instruction* instr) {
RREInstruction* rrinst = reinterpret_cast<RREInstruction*>(instr);
int r1 = rrinst->R1Value();
int r2 = rrinst->R2Value();
-#ifdef V8_TARGET_ARCH_S390X
+ if (op == LGHR) {
int64_t r2_val = get_low_register<int64_t>(r2);
r2_val <<= 48;
r2_val >>= 48;
set_register(r1, r2_val);
-#else
+ } else if (op == LHR) {
int32_t r2_val = get_low_register<int32_t>(r2);
r2_val <<= 16;
r2_val >>= 16;
set_low_register(r1, r2_val);
-#endif
+ } else {
+ UNREACHABLE();
+ }
break;
}
case ALCR: {
@@ -3427,6 +4154,7 @@ bool Simulator::DecodeFourByteFloatingPoint(Instruction* instr) {
case CFEBR:
case CEFBR:
case LCDBR:
+ case LCEBR:
case LPDBR:
case LPEBR: {
RREInstruction* rreInstr = reinterpret_cast<RREInstruction*>(instr);
@@ -3533,6 +4261,18 @@ bool Simulator::DecodeFourByteFloatingPoint(Instruction* instr) {
} else if (r2_val > 0) {
condition_reg_ = CC_GT;
}
+ } else if (op == LCEBR) {
+ fr1_val = -fr2_val;
+ set_d_register_from_float32(r1, fr1_val);
+ if (fr2_val != fr2_val) { // input is NaN
+ condition_reg_ = CC_OF;
+ } else if (fr2_val == 0) {
+ condition_reg_ = CC_EQ;
+ } else if (fr2_val < 0) {
+ condition_reg_ = CC_LT;
+ } else if (fr2_val > 0) {
+ condition_reg_ = CC_GT;
+ }
} else if (op == LPDBR) {
r1_val = std::fabs(r2_val);
set_d_register_from_double(r1, r1_val);
@@ -4791,58 +5531,83 @@ bool Simulator::DecodeSixByteArithmetic(Instruction* instr) {
}
int16_t Simulator::ByteReverse(int16_t hword) {
+#if defined(__GNUC__)
+ return __builtin_bswap16(hword);
+#else
return (hword << 8) | ((hword >> 8) & 0x00ff);
+#endif
}
int32_t Simulator::ByteReverse(int32_t word) {
+#if defined(__GNUC__)
+ return __builtin_bswap32(word);
+#else
int32_t result = word << 24;
result |= (word << 8) & 0x00ff0000;
result |= (word >> 8) & 0x0000ff00;
result |= (word >> 24) & 0x00000ff;
return result;
+#endif
+}
+
+int64_t Simulator::ByteReverse(int64_t dword) {
+#if defined(__GNUC__)
+ return __builtin_bswap64(dword);
+#else
+#error unsupport __builtin_bswap64
+#endif
+}
+
+int Simulator::DecodeInstructionOriginal(Instruction* instr) {
+ int instrLength = instr->InstructionLength();
+ bool processed = true;
+ if (instrLength == 2)
+ processed = DecodeTwoByte(instr);
+ else if (instrLength == 4)
+ processed = DecodeFourByte(instr);
+ else if (instrLength == 6)
+ processed = DecodeSixByte(instr);
+ return instrLength;
+}
+
+int Simulator::DecodeInstruction(Instruction* instr) {
+ Opcode op = instr->S390OpcodeValue();
+ DCHECK(EvalTable[op] != NULL);
+ return (this->*EvalTable[op])(instr);
}
// Executes the current instruction.
void Simulator::ExecuteInstruction(Instruction* instr, bool auto_incr_pc) {
+ icount_++;
+
if (v8::internal::FLAG_check_icache) {
CheckICache(isolate_->simulator_i_cache(), instr);
}
+
pc_modified_ = false;
+
if (::v8::internal::FLAG_trace_sim) {
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
// use a reasonably large buffer
v8::internal::EmbeddedVector<char, 256> buffer;
dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
-#ifdef V8_TARGET_ARCH_S390X
- PrintF("%05ld %08" V8PRIxPTR " %s\n", icount_,
+ PrintF("%05" PRId64 " %08" V8PRIxPTR " %s\n", icount_,
reinterpret_cast<intptr_t>(instr), buffer.start());
-#else
- PrintF("%05lld %08" V8PRIxPTR " %s\n", icount_,
- reinterpret_cast<intptr_t>(instr), buffer.start());
-#endif
+
// Flush stdout to prevent incomplete file output during abnormal exits
// This is caused by the output being buffered before being written to file
fflush(stdout);
}
// Try to simulate as S390 Instruction first.
- bool processed = true;
-
- int instrLength = instr->InstructionLength();
- if (instrLength == 2)
- processed = DecodeTwoByte(instr);
- else if (instrLength == 4)
- processed = DecodeFourByte(instr);
- else if (instrLength == 6)
- processed = DecodeSixByte(instr);
+ int length = DecodeInstruction(instr);
- if (processed) {
- if (!pc_modified_ && auto_incr_pc) {
- set_pc(reinterpret_cast<intptr_t>(instr) + instrLength);
- }
- return;
+ if (!pc_modified_ && auto_incr_pc) {
+ DCHECK(length == instr->InstructionLength());
+ set_pc(reinterpret_cast<intptr_t>(instr) + length);
}
+ return;
}
void Simulator::DebugStart() {
@@ -4860,7 +5625,6 @@ void Simulator::Execute() {
// should be stopping at a particular executed instruction.
while (program_counter != end_sim_pc) {
Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
- icount_++;
ExecuteInstruction(instr);
program_counter = get_pc();
}
@@ -4869,7 +5633,6 @@ void Simulator::Execute() {
// we reach the particular instuction count.
while (program_counter != end_sim_pc) {
Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
- icount_++;
if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
S390Debugger dbg(this);
dbg.Debug();
@@ -4882,6 +5645,9 @@ void Simulator::Execute() {
}
void Simulator::CallInternal(byte* entry, int reg_arg_count) {
+ // Adjust JS-based stack limit to C-based stack limit.
+ isolate_->stack_guard()->AdjustStackLimitForSimulator();
+
// Prepare to execute the code at entry
if (ABI_USES_FUNCTION_DESCRIPTORS) {
// entry is the function descriptor
@@ -4964,6 +5730,9 @@ void Simulator::CallInternal(byte* entry, int reg_arg_count) {
}
intptr_t Simulator::Call(byte* entry, int argument_count, ...) {
+ // Adjust JS-based stack limit to C-based stack limit.
+ isolate_->stack_guard()->AdjustStackLimitForSimulator();
+
// Remember the values of non-volatile registers.
int64_t r6_val = get_register(r6);
int64_t r7_val = get_register(r7);
@@ -5121,6 +5890,6752 @@ uintptr_t Simulator::PopAddress() {
return address;
}
+#define EVALUATE(name) \
+ int Simulator::Evaluate_##name(Instruction* instr)
+
+#define DCHECK_OPCODE(op) DCHECK(instr->S390OpcodeValue() == op)
+
+#define AS(type) reinterpret_cast<type*>(instr)
+
+#define DECODE_RIL_A_INSTRUCTION(r1, i2) \
+ int r1 = AS(RILInstruction)->R1Value(); \
+ uint32_t i2 = AS(RILInstruction)->I2UnsignedValue(); \
+ int length = 6;
+
+#define DECODE_RIL_B_INSTRUCTION(r1, i2) \
+ int r1 = AS(RILInstruction)->R1Value(); \
+ int32_t i2 = AS(RILInstruction)->I2Value(); \
+ int length = 6;
+
+#define DECODE_RIL_C_INSTRUCTION(m1, ri2) \
+ Condition m1 = static_cast<Condition>(AS(RILInstruction)->R1Value()); \
+ uint64_t ri2 = AS(RILInstruction)->I2Value(); \
+ int length = 6;
+
+#define DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2) \
+ int r1 = AS(RXYInstruction)->R1Value(); \
+ int x2 = AS(RXYInstruction)->X2Value(); \
+ int b2 = AS(RXYInstruction)->B2Value(); \
+ int d2 = AS(RXYInstruction)->D2Value(); \
+ int length = 6;
+
+#define DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val) \
+ int x2 = AS(RXInstruction)->X2Value(); \
+ int b2 = AS(RXInstruction)->B2Value(); \
+ int r1 = AS(RXInstruction)->R1Value(); \
+ intptr_t d2_val = AS(RXInstruction)->D2Value(); \
+ int length = 4;
+
+#define DECODE_RS_A_INSTRUCTION(r1, r3, b2, d2) \
+ int r3 = AS(RSInstruction)->R3Value(); \
+ int b2 = AS(RSInstruction)->B2Value(); \
+ int r1 = AS(RSInstruction)->R1Value(); \
+ intptr_t d2 = AS(RSInstruction)->D2Value(); \
+ int length = 4;
+
+#define DECODE_RS_A_INSTRUCTION_NO_R3(r1, b2, d2) \
+ int b2 = AS(RSInstruction)->B2Value(); \
+ int r1 = AS(RSInstruction)->R1Value(); \
+ int d2 = AS(RSInstruction)->D2Value(); \
+ int length = 4;
+
+#define DECODE_SI_INSTRUCTION_I_UINT8(b1, d1_val, imm_val) \
+ int b1 = AS(SIInstruction)->B1Value(); \
+ intptr_t d1_val = AS(SIInstruction)->D1Value(); \
+ uint8_t imm_val = AS(SIInstruction)->I2Value(); \
+ int length = 4;
+
+#define DECODE_SIL_INSTRUCTION(b1, d1, i2) \
+ int b1 = AS(SILInstruction)->B1Value(); \
+ intptr_t d1 = AS(SILInstruction)->D1Value(); \
+ int16_t i2 = AS(SILInstruction)->I2Value(); \
+ int length = 6;
+
+#define DECODE_SIY_INSTRUCTION(b1, d1, i2) \
+ int b1 = AS(SIYInstruction)->B1Value(); \
+ intptr_t d1 = AS(SIYInstruction)->D1Value(); \
+ uint8_t i2 = AS(SIYInstruction)->I2Value(); \
+ int length = 6;
+
+#define DECODE_RRE_INSTRUCTION(r1, r2) \
+ int r1 = AS(RREInstruction)->R1Value(); \
+ int r2 = AS(RREInstruction)->R2Value(); \
+ int length = 4;
+
+#define DECODE_RRE_INSTRUCTION_M3(r1, r2, m3) \
+ int r1 = AS(RREInstruction)->R1Value(); \
+ int r2 = AS(RREInstruction)->R2Value(); \
+ int m3 = AS(RREInstruction)->M3Value(); \
+ int length = 4;
+
+#define DECODE_RRE_INSTRUCTION_NO_R2(r1) \
+ int r1 = AS(RREInstruction)->R1Value(); \
+ int length = 4;
+
+#define DECODE_RRD_INSTRUCTION(r1, r2, r3) \
+ int r1 = AS(RRDInstruction)->R1Value(); \
+ int r2 = AS(RRDInstruction)->R2Value(); \
+ int r3 = AS(RRDInstruction)->R3Value(); \
+ int length = 4;
+
+#define DECODE_RRF_E_INSTRUCTION(r1, r2, m3, m4) \
+ int r1 = AS(RRFInstruction)->R1Value(); \
+ int r2 = AS(RRFInstruction)->R2Value(); \
+ int m3 = AS(RRFInstruction)->M3Value(); \
+ int m4 = AS(RRFInstruction)->M4Value(); \
+ int length = 4;
+
+#define DECODE_RRF_A_INSTRUCTION(r1, r2, r3) \
+ int r1 = AS(RRFInstruction)->R1Value(); \
+ int r2 = AS(RRFInstruction)->R2Value(); \
+ int r3 = AS(RRFInstruction)->R3Value(); \
+ int length = 4;
+
+#define DECODE_RRF_C_INSTRUCTION(r1, r2, m3) \
+ int r1 = AS(RRFInstruction)->R1Value(); \
+ int r2 = AS(RRFInstruction)->R2Value(); \
+ Condition m3 = static_cast<Condition>(AS(RRFInstruction)->M3Value()); \
+ int length = 4;
+
+#define DECODE_RR_INSTRUCTION(r1, r2) \
+ int r1 = AS(RRInstruction)->R1Value(); \
+ int r2 = AS(RRInstruction)->R2Value(); \
+ int length = 2;
+
+#define DECODE_RIE_D_INSTRUCTION(r1, r2, i2) \
+ int r1 = AS(RIEInstruction)->R1Value(); \
+ int r2 = AS(RIEInstruction)->R2Value(); \
+ int32_t i2 = AS(RIEInstruction)->I6Value(); \
+ int length = 6;
+
+#define DECODE_RIE_F_INSTRUCTION(r1, r2, i3, i4, i5) \
+ int r1 = AS(RIEInstruction)->R1Value(); \
+ int r2 = AS(RIEInstruction)->R2Value(); \
+ uint32_t i3 = AS(RIEInstruction)->I3Value(); \
+ uint32_t i4 = AS(RIEInstruction)->I4Value(); \
+ uint32_t i5 = AS(RIEInstruction)->I5Value(); \
+ int length = 6;
+
+#define DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2) \
+ int r1 = AS(RSYInstruction)->R1Value(); \
+ int r3 = AS(RSYInstruction)->R3Value(); \
+ int b2 = AS(RSYInstruction)->B2Value(); \
+ intptr_t d2 = AS(RSYInstruction)->D2Value(); \
+ int length = 6;
+
+#define DECODE_RI_A_INSTRUCTION(instr, r1, i2) \
+ int32_t r1 = AS(RIInstruction)->R1Value(); \
+ int16_t i2 = AS(RIInstruction)->I2Value(); \
+ int length = 4;
+
+#define DECODE_RI_B_INSTRUCTION(instr, r1, i2) \
+ int32_t r1 = AS(RILInstruction)->R1Value(); \
+ int16_t i2 = AS(RILInstruction)->I2Value(); \
+ int length = 4;
+
+#define DECODE_RI_C_INSTRUCTION(instr, m1, i2) \
+ Condition m1 = static_cast<Condition>(AS(RIInstruction)->R1Value()); \
+ int16_t i2 = AS(RIInstruction)->I2Value(); \
+ int length = 4;
+
+#define DECODE_RXE_INSTRUCTION(r1, b2, x2, d2) \
+ int r1 = AS(RXEInstruction)->R1Value(); \
+ int b2 = AS(RXEInstruction)->B2Value(); \
+ int x2 = AS(RXEInstruction)->X2Value(); \
+ int d2 = AS(RXEInstruction)->D2Value(); \
+ int length = 6;
+
+#define GET_ADDRESS(index_reg, base_reg, offset) \
+ (((index_reg) == 0) ? 0 : get_register(index_reg)) + \
+ (((base_reg) == 0) ? 0 : get_register(base_reg)) + offset
+
+int Simulator::Evaluate_Unknown(Instruction* instr) {
+ UNREACHABLE();
+ return 0;
+}
+
+EVALUATE(CLR) {
+ DCHECK_OPCODE(CLR);
+ DECODE_RR_INSTRUCTION(r1, r2);
+ uint32_t r1_val = get_low_register<uint32_t>(r1);
+ uint32_t r2_val = get_low_register<uint32_t>(r2);
+ SetS390ConditionCode<uint32_t>(r1_val, r2_val);
+ return length;
+}
+
+EVALUATE(LR) {
+ DCHECK_OPCODE(LR);
+ DECODE_RR_INSTRUCTION(r1, r2);
+ set_low_register(r1, get_low_register<int32_t>(r2));
+ return length;
+}
+
+EVALUATE(AR) {
+ DCHECK_OPCODE(AR);
+ DECODE_RR_INSTRUCTION(r1, r2);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ bool isOF = CheckOverflowForIntAdd(r1_val, r2_val, int32_t);
+ r1_val += r2_val;
+ SetS390ConditionCode<int32_t>(r1_val, 0);
+ SetS390OverflowCode(isOF);
+ set_low_register(r1, r1_val);
+ return length;
+}
+
+EVALUATE(L) {
+ DCHECK_OPCODE(L);
+ DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t addr = b2_val + x2_val + d2_val;
+ int32_t mem_val = ReadW(addr, instr);
+ set_low_register(r1, mem_val);
+ return length;
+}
+
+EVALUATE(BRC) {
+ DCHECK_OPCODE(BRC);
+ DECODE_RI_C_INSTRUCTION(instr, m1, i2);
+
+ if (TestConditionCode(m1)) {
+ intptr_t offset = 2 * i2;
+ set_pc(get_pc() + offset);
+ }
+ return length;
+}
+
+EVALUATE(AHI) {
+ DCHECK_OPCODE(AHI);
+ DECODE_RI_A_INSTRUCTION(instr, r1, i2);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ bool isOF = CheckOverflowForIntAdd(r1_val, i2, int32_t);
+ r1_val += i2;
+ set_low_register(r1, r1_val);
+ SetS390ConditionCode<int32_t>(r1_val, 0);
+ SetS390OverflowCode(isOF);
+ return length;
+}
+
+EVALUATE(AGHI) {
+ DCHECK_OPCODE(AGHI);
+ DECODE_RI_A_INSTRUCTION(instr, r1, i2);
+ int64_t r1_val = get_register(r1);
+ bool isOF = false;
+ isOF = CheckOverflowForIntAdd(r1_val, i2, int64_t);
+ r1_val += i2;
+ set_register(r1, r1_val);
+ SetS390ConditionCode<int64_t>(r1_val, 0);
+ SetS390OverflowCode(isOF);
+ return length;
+}
+
+EVALUATE(BRCL) {
+ DCHECK_OPCODE(BRCL);
+ DECODE_RIL_C_INSTRUCTION(m1, ri2);
+
+ if (TestConditionCode(m1)) {
+ intptr_t offset = 2 * ri2;
+ set_pc(get_pc() + offset);
+ }
+ return length;
+}
+
+EVALUATE(IIHF) {
+ DCHECK_OPCODE(IIHF);
+ DECODE_RIL_A_INSTRUCTION(r1, imm);
+ set_high_register(r1, imm);
+ return length;
+}
+
+EVALUATE(IILF) {
+ DCHECK_OPCODE(IILF);
+ DECODE_RIL_A_INSTRUCTION(r1, imm);
+ set_low_register(r1, imm);
+ return length;
+}
+
+EVALUATE(LGR) {
+ DCHECK_OPCODE(LGR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ set_register(r1, get_register(r2));
+ return length;
+}
+
+EVALUATE(LG) {
+ DCHECK_OPCODE(LG);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ intptr_t addr = GET_ADDRESS(x2, b2, d2);
+ int64_t mem_val = ReadDW(addr);
+ set_register(r1, mem_val);
+ return length;
+}
+
+EVALUATE(AGR) {
+ DCHECK_OPCODE(AGR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ int64_t r1_val = get_register(r1);
+ int64_t r2_val = get_register(r2);
+ bool isOF = CheckOverflowForIntAdd(r1_val, r2_val, int64_t);
+ r1_val += r2_val;
+ set_register(r1, r1_val);
+ SetS390ConditionCode<int64_t>(r1_val, 0);
+ SetS390OverflowCode(isOF);
+ return length;
+}
+
+EVALUATE(LGFR) {
+ DCHECK_OPCODE(LGFR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ int64_t result = static_cast<int64_t>(r2_val);
+ set_register(r1, result);
+
+ return length;
+}
+
+EVALUATE(LBR) {
+ DCHECK_OPCODE(LBR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ r2_val <<= 24;
+ r2_val >>= 24;
+ set_low_register(r1, r2_val);
+ return length;
+}
+
+EVALUATE(LGBR) {
+ DCHECK_OPCODE(LGBR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ int64_t r2_val = get_low_register<int64_t>(r2);
+ r2_val <<= 56;
+ r2_val >>= 56;
+ set_register(r1, r2_val);
+ return length;
+}
+
+EVALUATE(LHR) {
+ DCHECK_OPCODE(LHR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ r2_val <<= 16;
+ r2_val >>= 16;
+ set_low_register(r1, r2_val);
+ return length;
+}
+
+EVALUATE(LGHR) {
+ DCHECK_OPCODE(LGHR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ int64_t r2_val = get_low_register<int64_t>(r2);
+ r2_val <<= 48;
+ r2_val >>= 48;
+ set_register(r1, r2_val);
+ return length;
+}
+
+EVALUATE(LGF) {
+ DCHECK_OPCODE(LGF);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ intptr_t addr = GET_ADDRESS(x2, b2, d2);
+ int64_t mem_val = static_cast<int64_t>(ReadW(addr, instr));
+ set_register(r1, mem_val);
+ return length;
+}
+
+EVALUATE(ST) {
+ DCHECK_OPCODE(ST);
+ DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t addr = b2_val + x2_val + d2_val;
+ WriteW(addr, r1_val, instr);
+ return length;
+}
+
+EVALUATE(STG) {
+ DCHECK_OPCODE(STG);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ intptr_t addr = GET_ADDRESS(x2, b2, d2);
+ uint64_t value = get_register(r1);
+ WriteDW(addr, value);
+ return length;
+}
+
+EVALUATE(STY) {
+ DCHECK_OPCODE(STY);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ intptr_t addr = GET_ADDRESS(x2, b2, d2);
+ uint32_t value = get_low_register<uint32_t>(r1);
+ WriteW(addr, value, instr);
+ return length;
+}
+
+EVALUATE(LY) {
+ DCHECK_OPCODE(LY);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ intptr_t addr = GET_ADDRESS(x2, b2, d2);
+ uint32_t mem_val = ReadWU(addr, instr);
+ set_low_register(r1, mem_val);
+ return length;
+}
+
+EVALUATE(LLGC) {
+ DCHECK_OPCODE(LLGC);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ uint8_t mem_val = ReadBU(GET_ADDRESS(x2, b2, d2));
+ set_register(r1, static_cast<uint64_t>(mem_val));
+ return length;
+}
+
+EVALUATE(LLC) {
+ DCHECK_OPCODE(LLC);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ uint8_t mem_val = ReadBU(GET_ADDRESS(x2, b2, d2));
+ set_low_register(r1, static_cast<uint32_t>(mem_val));
+ return length;
+}
+
+EVALUATE(RLL) {
+ DCHECK_OPCODE(RLL);
+ DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
+ // only takes rightmost 6 bits
+ int shiftBits = GET_ADDRESS(0, b2, d2) & 0x3F;
+ // unsigned
+ uint32_t r3_val = get_low_register<uint32_t>(r3);
+ uint32_t alu_out = 0;
+ uint32_t rotateBits = r3_val >> (32 - shiftBits);
+ alu_out = (r3_val << shiftBits) | (rotateBits);
+ set_low_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(RISBG) {
+ DCHECK_OPCODE(RISBG);
+ DECODE_RIE_F_INSTRUCTION(r1, r2, i3, i4, i5);
+ // Starting Bit Position is Bits 2-7 of I3 field
+ uint32_t start_bit = i3 & 0x3F;
+ // Ending Bit Position is Bits 2-7 of I4 field
+ uint32_t end_bit = i4 & 0x3F;
+ // Shift Amount is Bits 2-7 of I5 field
+ uint32_t shift_amount = i5 & 0x3F;
+ // Zero out Remaining (unslected) bits if Bit 0 of I4 is 1.
+ bool zero_remaining = (0 != (i4 & 0x80));
+
+ uint64_t src_val = get_register(r2);
+
+ // Rotate Left by Shift Amount first
+ uint64_t rotated_val =
+ (src_val << shift_amount) | (src_val >> (64 - shift_amount));
+ int32_t width = end_bit - start_bit + 1;
+
+ uint64_t selection_mask = 0;
+ if (width < 64) {
+ selection_mask = (static_cast<uint64_t>(1) << width) - 1;
+ } else {
+ selection_mask = static_cast<uint64_t>(static_cast<int64_t>(-1));
+ }
+ selection_mask = selection_mask << (63 - end_bit);
+
+ uint64_t selected_val = rotated_val & selection_mask;
+
+ if (!zero_remaining) {
+ // Merged the unselected bits from the original value
+ selected_val = (src_val & ~selection_mask) | selected_val;
+ }
+
+ // Condition code is set by treating result as 64-bit signed int
+ SetS390ConditionCode<int64_t>(selected_val, 0);
+ set_register(r1, selected_val);
+ return length;
+}
+
+EVALUATE(AHIK) {
+ DCHECK_OPCODE(AHIK);
+ DECODE_RIE_D_INSTRUCTION(r1, r2, i2);
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ int32_t imm = static_cast<int32_t>(i2);
+ bool isOF = CheckOverflowForIntAdd(r2_val, imm, int32_t);
+ set_low_register(r1, r2_val + imm);
+ SetS390ConditionCode<int32_t>(r2_val + imm, 0);
+ SetS390OverflowCode(isOF);
+ return length;
+}
+
+EVALUATE(AGHIK) {
+ // 64-bit Add
+ DCHECK_OPCODE(AGHIK);
+ DECODE_RIE_D_INSTRUCTION(r1, r2, i2);
+ int64_t r2_val = get_register(r2);
+ int64_t imm = static_cast<int64_t>(i2);
+ bool isOF = CheckOverflowForIntAdd(r2_val, imm, int64_t);
+ set_register(r1, r2_val + imm);
+ SetS390ConditionCode<int64_t>(r2_val + imm, 0);
+ SetS390OverflowCode(isOF);
+ return length;
+}
+
+EVALUATE(BKPT) {
+ DCHECK_OPCODE(BKPT);
+ set_pc(get_pc() + 2);
+ S390Debugger dbg(this);
+ dbg.Debug();
+ int length = 2;
+ return length;
+}
+
+EVALUATE(SPM) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(BALR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(BCTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(BCR) {
+ DCHECK_OPCODE(BCR);
+ DECODE_RR_INSTRUCTION(r1, r2);
+ if (TestConditionCode(Condition(r1))) {
+ intptr_t r2_val = get_register(r2);
+#if (!V8_TARGET_ARCH_S390X && V8_HOST_ARCH_S390)
+ // On 31-bit, the top most bit may be 0 or 1, but is ignored by the
+ // hardware. Cleanse the top bit before jumping to it, unless it's one
+ // of the special PCs
+ if (r2_val != bad_lr && r2_val != end_sim_pc) r2_val &= 0x7FFFFFFF;
+#endif
+ set_pc(r2_val);
+ }
+
+ return length;
+}
+
+EVALUATE(SVC) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(BSM) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(BASSM) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(BASR) {
+ DCHECK_OPCODE(BASR);
+ DECODE_RR_INSTRUCTION(r1, r2);
+ intptr_t link_addr = get_pc() + 2;
+ // If R2 is zero, the BASR does not branch.
+ int64_t r2_val = (r2 == 0) ? link_addr : get_register(r2);
+#if (!V8_TARGET_ARCH_S390X && V8_HOST_ARCH_S390)
+ // On 31-bit, the top most bit may be 0 or 1, which can cause issues
+ // for stackwalker. The top bit should either be cleanse before being
+ // pushed onto the stack, or during stack walking when dereferenced.
+ // For simulator, we'll take the worst case scenario and always tag
+ // the high bit, to flush out more problems.
+ link_addr |= 0x80000000;
+#endif
+ set_register(r1, link_addr);
+ set_pc(r2_val);
+ return length;
+}
+
+EVALUATE(MVCL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CLCL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LPR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LNR) {
+ DCHECK_OPCODE(LNR);
+ // Load Negative (32)
+ DECODE_RR_INSTRUCTION(r1, r2);
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ r2_val = (r2_val >= 0) ? -r2_val : r2_val; // If pos, then negate it.
+ set_low_register(r1, r2_val);
+ condition_reg_ = (r2_val == 0) ? CC_EQ : CC_LT; // CC0 - result is zero
+ // CC1 - result is negative
+ return length;
+}
+
+EVALUATE(LTR) {
+ DCHECK_OPCODE(LTR);
+ DECODE_RR_INSTRUCTION(r1, r2);
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ SetS390ConditionCode<int32_t>(r2_val, 0);
+ set_low_register(r1, r2_val);
+ return length;
+}
+
+EVALUATE(LCR) {
+ DCHECK_OPCODE(LCR);
+ DECODE_RR_INSTRUCTION(r1, r2);
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ int32_t original_r2_val = r2_val;
+ r2_val = ~r2_val;
+ r2_val = r2_val + 1;
+ set_low_register(r1, r2_val);
+ SetS390ConditionCode<int32_t>(r2_val, 0);
+ // Checks for overflow where r2_val = -2147483648.
+ // Cannot do int comparison due to GCC 4.8 bug on x86.
+ // Detect INT_MIN alternatively, as it is the only value where both
+ // original and result are negative due to overflow.
+ if (r2_val < 0 && original_r2_val < 0) {
+ SetS390OverflowCode(true);
+ }
+ return length;
+}
+
+EVALUATE(NR) {
+ DCHECK_OPCODE(NR);
+ DECODE_RR_INSTRUCTION(r1, r2);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ r1_val &= r2_val;
+ SetS390BitWiseConditionCode<uint32_t>(r1_val);
+ set_low_register(r1, r1_val);
+ return length;
+}
+
+EVALUATE(OR) {
+ DCHECK_OPCODE(OR);
+ DECODE_RR_INSTRUCTION(r1, r2);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ r1_val |= r2_val;
+ SetS390BitWiseConditionCode<uint32_t>(r1_val);
+ set_low_register(r1, r1_val);
+ return length;
+}
+
+EVALUATE(XR) {
+ DCHECK_OPCODE(XR);
+ DECODE_RR_INSTRUCTION(r1, r2);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ r1_val ^= r2_val;
+ SetS390BitWiseConditionCode<uint32_t>(r1_val);
+ set_low_register(r1, r1_val);
+ return length;
+}
+
+EVALUATE(CR) {
+ DCHECK_OPCODE(CR);
+ DECODE_RR_INSTRUCTION(r1, r2);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ SetS390ConditionCode<int32_t>(r1_val, r2_val);
+ return length;
+}
+
+EVALUATE(SR) {
+ DCHECK_OPCODE(SR);
+ DECODE_RR_INSTRUCTION(r1, r2);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ bool isOF = false;
+ isOF = CheckOverflowForIntSub(r1_val, r2_val, int32_t);
+ r1_val -= r2_val;
+ SetS390ConditionCode<int32_t>(r1_val, 0);
+ SetS390OverflowCode(isOF);
+ set_low_register(r1, r1_val);
+ return length;
+}
+
+EVALUATE(MR) {
+ DCHECK_OPCODE(MR);
+ DECODE_RR_INSTRUCTION(r1, r2);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ DCHECK(r1 % 2 == 0);
+ r1_val = get_low_register<int32_t>(r1 + 1);
+ int64_t product = static_cast<int64_t>(r1_val) * static_cast<int64_t>(r2_val);
+ int32_t high_bits = product >> 32;
+ r1_val = high_bits;
+ int32_t low_bits = product & 0x00000000FFFFFFFF;
+ set_low_register(r1, high_bits);
+ set_low_register(r1 + 1, low_bits);
+ return length;
+}
+
+EVALUATE(DR) {
+ DCHECK_OPCODE(DR);
+ DECODE_RR_INSTRUCTION(r1, r2);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ // reg-reg pair should be even-odd pair, assert r1 is an even register
+ DCHECK(r1 % 2 == 0);
+ // leftmost 32 bits of the dividend are in r1
+ // rightmost 32 bits of the dividend are in r1+1
+ // get the signed value from r1
+ int64_t dividend = static_cast<int64_t>(r1_val) << 32;
+ // get unsigned value from r1+1
+ // avoid addition with sign-extended r1+1 value
+ dividend += get_low_register<uint32_t>(r1 + 1);
+ int32_t remainder = dividend % r2_val;
+ int32_t quotient = dividend / r2_val;
+ r1_val = remainder;
+ set_low_register(r1, remainder);
+ set_low_register(r1 + 1, quotient);
+ set_low_register(r1, r1_val);
+ return length;
+}
+
+EVALUATE(ALR) {
+ DCHECK_OPCODE(ALR);
+ DECODE_RR_INSTRUCTION(r1, r2);
+ uint32_t r1_val = get_low_register<uint32_t>(r1);
+ uint32_t r2_val = get_low_register<uint32_t>(r2);
+ uint32_t alu_out = 0;
+ bool isOF = false;
+ alu_out = r1_val + r2_val;
+ isOF = CheckOverflowForUIntAdd(r1_val, r2_val);
+ set_low_register(r1, alu_out);
+ SetS390ConditionCodeCarry<uint32_t>(alu_out, isOF);
+ return length;
+}
+
+EVALUATE(SLR) {
+ DCHECK_OPCODE(SLR);
+ DECODE_RR_INSTRUCTION(r1, r2);
+ uint32_t r1_val = get_low_register<uint32_t>(r1);
+ uint32_t r2_val = get_low_register<uint32_t>(r2);
+ uint32_t alu_out = 0;
+ bool isOF = false;
+ alu_out = r1_val - r2_val;
+ isOF = CheckOverflowForUIntSub(r1_val, r2_val);
+ set_low_register(r1, alu_out);
+ SetS390ConditionCodeCarry<uint32_t>(alu_out, isOF);
+ return length;
+}
+
+EVALUATE(LDR) {
+ DCHECK_OPCODE(LDR);
+ DECODE_RR_INSTRUCTION(r1, r2);
+ int64_t r2_val = get_d_register(r2);
+ set_d_register(r1, r2_val);
+ return length;
+}
+
+EVALUATE(CDR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LER) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(STH) {
+ DCHECK_OPCODE(STH);
+ DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+ int16_t r1_val = get_low_register<int32_t>(r1);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t mem_addr = b2_val + x2_val + d2_val;
+ WriteH(mem_addr, r1_val, instr);
+
+ return length;
+}
+
+EVALUATE(LA) {
+ DCHECK_OPCODE(LA);
+ DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t addr = b2_val + x2_val + d2_val;
+ set_register(r1, addr);
+ return length;
+}
+
+EVALUATE(STC) {
+ DCHECK_OPCODE(STC);
+ // Store Character/Byte
+ DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+ uint8_t r1_val = get_low_register<int32_t>(r1);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t mem_addr = b2_val + x2_val + d2_val;
+ WriteB(mem_addr, r1_val);
+ return length;
+}
+
+EVALUATE(IC_z) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(EX) {
+ DCHECK_OPCODE(EX);
+ DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+
+ SixByteInstr the_instr = Instruction::InstructionBits(
+ reinterpret_cast<const byte*>(b2_val + x2_val + d2_val));
+ int inst_length = Instruction::InstructionLength(
+ reinterpret_cast<const byte*>(b2_val + x2_val + d2_val));
+
+ char new_instr_buf[8];
+ char* addr = reinterpret_cast<char*>(&new_instr_buf[0]);
+ the_instr |= static_cast<SixByteInstr>(r1_val & 0xff)
+ << (8 * inst_length - 16);
+ Instruction::SetInstructionBits<SixByteInstr>(
+ reinterpret_cast<byte*>(addr), static_cast<SixByteInstr>(the_instr));
+ ExecuteInstruction(reinterpret_cast<Instruction*>(addr), false);
+ return length;
+}
+
+EVALUATE(BAL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(BCT) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(BC) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LH) {
+ DCHECK_OPCODE(LH);
+ // Load Halfword
+ DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ intptr_t mem_addr = x2_val + b2_val + d2_val;
+
+ int32_t result = static_cast<int32_t>(ReadH(mem_addr, instr));
+ set_low_register(r1, result);
+ return length;
+}
+
+EVALUATE(CH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(AH) {
+ DCHECK_OPCODE(AH);
+ DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t addr = b2_val + x2_val + d2_val;
+ int32_t mem_val = static_cast<int32_t>(ReadH(addr, instr));
+ int32_t alu_out = 0;
+ bool isOF = false;
+ isOF = CheckOverflowForIntAdd(r1_val, mem_val, int32_t);
+ alu_out = r1_val + mem_val;
+ set_low_register(r1, alu_out);
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ SetS390OverflowCode(isOF);
+
+ return length;
+}
+
+EVALUATE(SH) {
+ DCHECK_OPCODE(SH);
+ DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t addr = b2_val + x2_val + d2_val;
+ int32_t mem_val = static_cast<int32_t>(ReadH(addr, instr));
+ int32_t alu_out = 0;
+ bool isOF = false;
+ isOF = CheckOverflowForIntSub(r1_val, mem_val, int32_t);
+ alu_out = r1_val - mem_val;
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ SetS390OverflowCode(isOF);
+
+ return length;
+}
+
+EVALUATE(MH) {
+ DCHECK_OPCODE(MH);
+ DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t addr = b2_val + x2_val + d2_val;
+ int32_t mem_val = static_cast<int32_t>(ReadH(addr, instr));
+ int32_t alu_out = 0;
+ alu_out = r1_val * mem_val;
+ set_low_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(BAS) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CVD) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CVB) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LAE) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(N) {
+ DCHECK_OPCODE(N);
+ // 32-bit Reg-Mem instructions
+ DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+ int32_t alu_out = 0;
+ alu_out = r1_val & mem_val;
+ SetS390BitWiseConditionCode<uint32_t>(alu_out);
+ set_low_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(CL) {
+ DCHECK_OPCODE(CL);
+ DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t addr = b2_val + x2_val + d2_val;
+ int32_t mem_val = ReadW(addr, instr);
+ SetS390ConditionCode<uint32_t>(r1_val, mem_val);
+ return length;
+}
+
+EVALUATE(O) {
+ DCHECK_OPCODE(O);
+ // 32-bit Reg-Mem instructions
+ DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+ int32_t alu_out = 0;
+ alu_out = r1_val | mem_val;
+ SetS390BitWiseConditionCode<uint32_t>(alu_out);
+ set_low_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(X) {
+ DCHECK_OPCODE(X);
+ // 32-bit Reg-Mem instructions
+ DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+ int32_t alu_out = 0;
+ alu_out = r1_val ^ mem_val;
+ SetS390BitWiseConditionCode<uint32_t>(alu_out);
+ set_low_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(C) {
+ DCHECK_OPCODE(C);
+ DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t addr = b2_val + x2_val + d2_val;
+ int32_t mem_val = ReadW(addr, instr);
+ SetS390ConditionCode<int32_t>(r1_val, mem_val);
+ return length;
+}
+
+EVALUATE(A) {
+ DCHECK_OPCODE(A);
+ // 32-bit Reg-Mem instructions
+ DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+ int32_t alu_out = 0;
+ bool isOF = false;
+ isOF = CheckOverflowForIntAdd(r1_val, mem_val, int32_t);
+ alu_out = r1_val + mem_val;
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ SetS390OverflowCode(isOF);
+ set_low_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(S) {
+ DCHECK_OPCODE(S);
+ // 32-bit Reg-Mem instructions
+ DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+ int32_t alu_out = 0;
+ bool isOF = false;
+ isOF = CheckOverflowForIntSub(r1_val, mem_val, int32_t);
+ alu_out = r1_val - mem_val;
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ SetS390OverflowCode(isOF);
+ set_low_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(M) {
+ DCHECK_OPCODE(M);
+ DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t addr = b2_val + x2_val + d2_val;
+ DCHECK(r1 % 2 == 0);
+ int32_t mem_val = ReadW(addr, instr);
+ int32_t r1_val = get_low_register<int32_t>(r1 + 1);
+ int64_t product =
+ static_cast<int64_t>(r1_val) * static_cast<int64_t>(mem_val);
+ int32_t high_bits = product >> 32;
+ r1_val = high_bits;
+ int32_t low_bits = product & 0x00000000FFFFFFFF;
+ set_low_register(r1, high_bits);
+ set_low_register(r1 + 1, low_bits);
+ return length;
+}
+
+EVALUATE(D) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(AL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(STD) {
+ DCHECK_OPCODE(STD);
+ DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t addr = b2_val + x2_val + d2_val;
+ int64_t frs_val = get_d_register(r1);
+ WriteDW(addr, frs_val);
+ return length;
+}
+
+EVALUATE(LD) {
+ DCHECK_OPCODE(LD);
+ DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t addr = b2_val + x2_val + d2_val;
+ int64_t dbl_val = *reinterpret_cast<int64_t*>(addr);
+ set_d_register(r1, dbl_val);
+ return length;
+}
+
+EVALUATE(CD) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(STE) {
+ DCHECK_OPCODE(STE);
+ DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t addr = b2_val + x2_val + d2_val;
+ int64_t frs_val = get_d_register(r1) >> 32;
+ WriteW(addr, static_cast<int32_t>(frs_val), instr);
+ return length;
+}
+
+EVALUATE(MS) {
+ DCHECK_OPCODE(MS);
+ DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ set_low_register(r1, r1_val * mem_val);
+ return length;
+}
+
+EVALUATE(LE) {
+ DCHECK_OPCODE(LE);
+ DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t addr = b2_val + x2_val + d2_val;
+ float float_val = *reinterpret_cast<float*>(addr);
+ set_d_register_from_float32(r1, float_val);
+ return length;
+}
+
+EVALUATE(BRXH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(BRXLE) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(BXH) {
+ DCHECK_OPCODE(BXH);
+ DECODE_RS_A_INSTRUCTION(r1, r3, b2, d2);
+
+ // r1_val is the first operand, r3_val is the increment
+ int32_t r1_val = r1 == 0 ? 0 : get_register(r1);
+ int32_t r3_val = r2 == 0 ? 0 : get_register(r3);
+ intptr_t b2_val = b2 == 0 ? 0 : get_register(b2);
+ intptr_t branch_address = b2_val + d2;
+ // increment r1_val
+ r1_val += r3_val;
+
+ // if the increment is even, then it designates a pair of registers
+ // and the contents of the even and odd registers of the pair are used as
+ // the increment and compare value respectively. If the increment is odd,
+ // the increment itself is used as both the increment and compare value
+ int32_t compare_val = r3 % 2 == 0 ? get_register(r3 + 1) : r3_val;
+ if (r1_val > compare_val) {
+ // branch to address if r1_val is greater than compare value
+ set_pc(branch_address);
+ }
+
+ // update contents of register in r1 with the new incremented value
+ set_register(r1, r1_val);
+
+ return length;
+}
+
+EVALUATE(BXLE) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SRL) {
+ DCHECK_OPCODE(SRL);
+ DECODE_RS_A_INSTRUCTION_NO_R3(r1, b2, d2);
+ // only takes rightmost 6bits
+ int64_t b2_val = b2 == 0 ? 0 : get_register(b2);
+ int shiftBits = (b2_val + d2) & 0x3F;
+ uint32_t r1_val = get_low_register<uint32_t>(r1);
+ uint32_t alu_out = 0;
+ alu_out = r1_val >> shiftBits;
+ set_low_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(SLL) {
+ DCHECK_OPCODE(SLL);
+ DECODE_RS_A_INSTRUCTION_NO_R3(r1, b2, d2)
+ // only takes rightmost 6bits
+ int64_t b2_val = b2 == 0 ? 0 : get_register(b2);
+ int shiftBits = (b2_val + d2) & 0x3F;
+ uint32_t r1_val = get_low_register<uint32_t>(r1);
+ uint32_t alu_out = 0;
+ alu_out = r1_val << shiftBits;
+ set_low_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(SRA) {
+ DCHECK_OPCODE(SRA);
+ DECODE_RS_A_INSTRUCTION_NO_R3(r1, b2, d2);
+ // only takes rightmost 6bits
+ int64_t b2_val = b2 == 0 ? 0 : get_register(b2);
+ int shiftBits = (b2_val + d2) & 0x3F;
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int32_t alu_out = 0;
+ bool isOF = false;
+ alu_out = r1_val >> shiftBits;
+ set_low_register(r1, alu_out);
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ SetS390OverflowCode(isOF);
+ return length;
+}
+
+EVALUATE(SLA) {
+ DCHECK_OPCODE(SLA);
+ DECODE_RS_A_INSTRUCTION_NO_R3(r1, b2, d2);
+ // only takes rightmost 6bits
+ int64_t b2_val = b2 == 0 ? 0 : get_register(b2);
+ int shiftBits = (b2_val + d2) & 0x3F;
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int32_t alu_out = 0;
+ bool isOF = false;
+ isOF = CheckOverflowForShiftLeft(r1_val, shiftBits);
+ alu_out = r1_val << shiftBits;
+ set_low_register(r1, alu_out);
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ SetS390OverflowCode(isOF);
+ return length;
+}
+
+EVALUATE(SRDL) {
+ DCHECK_OPCODE(SRDL);
+ DECODE_RS_A_INSTRUCTION_NO_R3(r1, b2, d2);
+ DCHECK(r1 % 2 == 0); // must be a reg pair
+ // only takes rightmost 6bits
+ int64_t b2_val = b2 == 0 ? 0 : get_register(b2);
+ int shiftBits = (b2_val + d2) & 0x3F;
+ uint64_t opnd1 = static_cast<uint64_t>(get_low_register<uint32_t>(r1)) << 32;
+ uint64_t opnd2 = static_cast<uint64_t>(get_low_register<uint32_t>(r1 + 1));
+ uint64_t r1_val = opnd1 | opnd2;
+ uint64_t alu_out = r1_val >> shiftBits;
+ set_low_register(r1, alu_out >> 32);
+ set_low_register(r1 + 1, alu_out & 0x00000000FFFFFFFF);
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ return length;
+}
+
+EVALUATE(SLDL) {
+ DCHECK_OPCODE(SLDL);
+ DECODE_RS_A_INSTRUCTION_NO_R3(r1, b2, d2);
+ // only takes rightmost 6bits
+ int64_t b2_val = b2 == 0 ? 0 : get_register(b2);
+ int shiftBits = (b2_val + d2) & 0x3F;
+
+ DCHECK(r1 % 2 == 0);
+ uint32_t r1_val = get_low_register<uint32_t>(r1);
+ uint32_t r1_next_val = get_low_register<uint32_t>(r1 + 1);
+ uint64_t alu_out = (static_cast<uint64_t>(r1_val) << 32) |
+ (static_cast<uint64_t>(r1_next_val));
+ alu_out <<= shiftBits;
+ set_low_register(r1 + 1, static_cast<uint32_t>(alu_out));
+ set_low_register(r1, static_cast<uint32_t>(alu_out >> 32));
+ return length;
+}
+
+EVALUATE(SRDA) {
+ DCHECK_OPCODE(SRDA);
+ DECODE_RS_A_INSTRUCTION_NO_R3(r1, b2, d2);
+ DCHECK(r1 % 2 == 0); // must be a reg pair
+ // only takes rightmost 6bits
+ int64_t b2_val = b2 == 0 ? 0 : get_register(b2);
+ int shiftBits = (b2_val + d2) & 0x3F;
+ int64_t opnd1 = static_cast<int64_t>(get_low_register<int32_t>(r1)) << 32;
+ int64_t opnd2 = static_cast<uint64_t>(get_low_register<uint32_t>(r1 + 1));
+ int64_t r1_val = opnd1 + opnd2;
+ int64_t alu_out = r1_val >> shiftBits;
+ set_low_register(r1, alu_out >> 32);
+ set_low_register(r1 + 1, alu_out & 0x00000000FFFFFFFF);
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ return length;
+}
+
+EVALUATE(SLDA) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(STM) {
+ DCHECK_OPCODE(STM);
+ DECODE_RS_A_INSTRUCTION(r1, r3, rb, d2);
+ // Store Multiple 32-bits.
+ int offset = d2;
+ // Regs roll around if r3 is less than r1.
+ // Artifically increase r3 by 16 so we can calculate
+ // the number of regs stored properly.
+ if (r3 < r1) r3 += 16;
+
+ int32_t rb_val = (rb == 0) ? 0 : get_low_register<int32_t>(rb);
+
+ // Store each register in ascending order.
+ for (int i = 0; i <= r3 - r1; i++) {
+ int32_t value = get_low_register<int32_t>((r1 + i) % 16);
+ WriteW(rb_val + offset + 4 * i, value, instr);
+ }
+ return length;
+}
+
+EVALUATE(TM) {
+ DCHECK_OPCODE(TM);
+ // Test Under Mask (Mem - Imm) (8)
+ DECODE_SI_INSTRUCTION_I_UINT8(b1, d1_val, imm_val)
+ int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+ intptr_t addr = b1_val + d1_val;
+ uint8_t mem_val = ReadB(addr);
+ uint8_t selected_bits = mem_val & imm_val;
+ // CC0: Selected bits are zero
+ // CC1: Selected bits mixed zeros and ones
+ // CC3: Selected bits all ones
+ if (0 == selected_bits) {
+ condition_reg_ = CC_EQ; // CC0
+ } else if (selected_bits == imm_val) {
+ condition_reg_ = 0x1; // CC3
+ } else {
+ condition_reg_ = 0x4; // CC1
+ }
+ return length;
+}
+
+EVALUATE(MVI) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TS) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(NI) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CLI) {
+ DCHECK_OPCODE(CLI);
+ // Compare Immediate (Mem - Imm) (8)
+ DECODE_SI_INSTRUCTION_I_UINT8(b1, d1_val, imm_val)
+ int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+ intptr_t addr = b1_val + d1_val;
+ uint8_t mem_val = ReadB(addr);
+ SetS390ConditionCode<uint8_t>(mem_val, imm_val);
+ return length;
+}
+
+EVALUATE(OI) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(XI) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LM) {
+ DCHECK_OPCODE(LM);
+ DECODE_RS_A_INSTRUCTION(r1, r3, rb, d2);
+ // Store Multiple 32-bits.
+ int offset = d2;
+ // Regs roll around if r3 is less than r1.
+ // Artifically increase r3 by 16 so we can calculate
+ // the number of regs stored properly.
+ if (r3 < r1) r3 += 16;
+
+ int32_t rb_val = (rb == 0) ? 0 : get_low_register<int32_t>(rb);
+
+ // Store each register in ascending order.
+ for (int i = 0; i <= r3 - r1; i++) {
+ int32_t value = ReadW(rb_val + offset + 4 * i, instr);
+ set_low_register((r1 + i) % 16, value);
+ }
+ return length;
+}
+
+EVALUATE(MVCLE) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CLCLE) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MC) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CDS) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(STCM) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(ICM) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(BPRP) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(BPP) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TRTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MVN) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MVC) {
+ DCHECK_OPCODE(MVC);
+ // Move Character
+ SSInstruction* ssInstr = reinterpret_cast<SSInstruction*>(instr);
+ int b1 = ssInstr->B1Value();
+ intptr_t d1 = ssInstr->D1Value();
+ int b2 = ssInstr->B2Value();
+ intptr_t d2 = ssInstr->D2Value();
+ int length = ssInstr->Length();
+ int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ intptr_t src_addr = b2_val + d2;
+ intptr_t dst_addr = b1_val + d1;
+ // remember that the length is the actual length - 1
+ for (int i = 0; i < length + 1; ++i) {
+ WriteB(dst_addr++, ReadB(src_addr++));
+ }
+ length = 6;
+ return length;
+}
+
+EVALUATE(MVZ) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(NC) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CLC) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(OC) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(XC) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MVCP) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TRT) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(ED) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(EDMK) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(PKU) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(UNPKU) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MVCIN) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(PKA) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(UNPKA) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(PLO) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LMD) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SRP) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MVO) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(PACK) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(UNPK) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(ZAP) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(AP) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SP) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MP) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(DP) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(UPT) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(PFPO) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(IIHH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(IIHL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(IILH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(IILL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(NIHH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(NIHL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(NILH) {
+ DCHECK_OPCODE(NILH);
+ DECODE_RI_A_INSTRUCTION(instr, r1, i);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ // CC is set based on the 16 bits that are AND'd
+ SetS390BitWiseConditionCode<uint16_t>((r1_val >> 16) & i);
+ i = (i << 16) | 0x0000FFFF;
+ set_low_register(r1, r1_val & i);
+ return length;
+}
+
+EVALUATE(NILL) {
+ DCHECK_OPCODE(NILL);
+ DECODE_RI_A_INSTRUCTION(instr, r1, i);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ // CC is set based on the 16 bits that are AND'd
+ SetS390BitWiseConditionCode<uint16_t>(r1_val & i);
+ i |= 0xFFFF0000;
+ set_low_register(r1, r1_val & i);
+ return length;
+}
+
+EVALUATE(OIHH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(OIHL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(OILH) {
+ DCHECK_OPCODE(OILH);
+ DECODE_RI_A_INSTRUCTION(instr, r1, i);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ // CC is set based on the 16 bits that are AND'd
+ SetS390BitWiseConditionCode<uint16_t>((r1_val >> 16) | i);
+ i = i << 16;
+ set_low_register(r1, r1_val | i);
+ return length;
+}
+
+EVALUATE(OILL) {
+ DCHECK_OPCODE(OILL);
+ DECODE_RI_A_INSTRUCTION(instr, r1, i);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ // CC is set based on the 16 bits that are AND'd
+ SetS390BitWiseConditionCode<uint16_t>(r1_val | i);
+ set_low_register(r1, r1_val | i);
+ return length;
+}
+
+EVALUATE(LLIHH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LLIHL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LLILH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LLILL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TMLH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TMLL) {
+ DCHECK_OPCODE(TMLL);
+ DECODE_RI_A_INSTRUCTION(instr, r1, i2);
+ int mask = i2 & 0x0000FFFF;
+ if (mask == 0) {
+ condition_reg_ = 0x0;
+ return length;
+ }
+ uint32_t r1_val = get_low_register<uint32_t>(r1);
+ r1_val = r1_val & 0x0000FFFF; // uses only the last 16bits
+
+ // Test if all selected bits are Zero
+ bool allSelectedBitsAreZeros = true;
+ for (int i = 0; i < 15; i++) {
+ if (mask & (1 << i)) {
+ if (r1_val & (1 << i)) {
+ allSelectedBitsAreZeros = false;
+ break;
+ }
+ }
+ }
+ if (allSelectedBitsAreZeros) {
+ condition_reg_ = 0x8;
+ return length; // Done!
+ }
+
+ // Test if all selected bits are one
+ bool allSelectedBitsAreOnes = true;
+ for (int i = 0; i < 15; i++) {
+ if (mask & (1 << i)) {
+ if (!(r1_val & (1 << i))) {
+ allSelectedBitsAreOnes = false;
+ break;
+ }
+ }
+ }
+ if (allSelectedBitsAreOnes) {
+ condition_reg_ = 0x1;
+ return length; // Done!
+ }
+
+ // Now we know selected bits mixed zeros and ones
+ // Test if the leftmost bit is zero or one
+ for (int i = 14; i >= 0; i--) {
+ if (mask & (1 << i)) {
+ if (r1_val & (1 << i)) {
+ // leftmost bit is one
+ condition_reg_ = 0x2;
+ } else {
+ // leftmost bit is zero
+ condition_reg_ = 0x4;
+ }
+ return length; // Done!
+ }
+ }
+ return length;
+}
+
+EVALUATE(TMHH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TMHL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(BRAS) {
+ DCHECK_OPCODE(BRAS);
+ // Branch Relative and Save
+ DECODE_RI_B_INSTRUCTION(instr, r1, d2)
+ intptr_t pc = get_pc();
+ // Set PC of next instruction to register
+ set_register(r1, pc + sizeof(FourByteInstr));
+ // Update PC to branch target
+ set_pc(pc + d2 * 2);
+ return length;
+}
+
+EVALUATE(BRCT) {
+ DCHECK_OPCODE(BRCT);
+ // Branch On Count (32/64).
+ DECODE_RI_A_INSTRUCTION(instr, r1, i2);
+ int64_t value = get_low_register<int32_t>(r1);
+ set_low_register(r1, --value);
+ // Branch if value != 0
+ if (value != 0) {
+ intptr_t offset = i2 * 2;
+ set_pc(get_pc() + offset);
+ }
+ return length;
+}
+
+EVALUATE(BRCTG) {
+ DCHECK_OPCODE(BRCTG);
+ // Branch On Count (32/64).
+ DECODE_RI_A_INSTRUCTION(instr, r1, i2);
+ int64_t value = get_register(r1);
+ set_register(r1, --value);
+ // Branch if value != 0
+ if (value != 0) {
+ intptr_t offset = i2 * 2;
+ set_pc(get_pc() + offset);
+ }
+ return length;
+}
+
+EVALUATE(LHI) {
+ DCHECK_OPCODE(LHI);
+ DECODE_RI_A_INSTRUCTION(instr, r1, i);
+ set_low_register(r1, i);
+ return length;
+}
+
+EVALUATE(LGHI) {
+ DCHECK_OPCODE(LGHI);
+ DECODE_RI_A_INSTRUCTION(instr, r1, i2);
+ int64_t i = static_cast<int64_t>(i2);
+ set_register(r1, i);
+ return length;
+}
+
+EVALUATE(MHI) {
+ DCHECK_OPCODE(MHI);
+ DECODE_RI_A_INSTRUCTION(instr, r1, i);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ bool isOF = false;
+ isOF = CheckOverflowForMul(r1_val, i);
+ r1_val *= i;
+ set_low_register(r1, r1_val);
+ SetS390ConditionCode<int32_t>(r1_val, 0);
+ SetS390OverflowCode(isOF);
+ return length;
+}
+
+EVALUATE(MGHI) {
+ DCHECK_OPCODE(MGHI);
+ DECODE_RI_A_INSTRUCTION(instr, r1, i2);
+ int64_t i = static_cast<int64_t>(i2);
+ int64_t r1_val = get_register(r1);
+ bool isOF = false;
+ isOF = CheckOverflowForMul(r1_val, i);
+ r1_val *= i;
+ set_register(r1, r1_val);
+ SetS390ConditionCode<int32_t>(r1_val, 0);
+ SetS390OverflowCode(isOF);
+ return length;
+}
+
+EVALUATE(CHI) {
+ DCHECK_OPCODE(CHI);
+ DECODE_RI_A_INSTRUCTION(instr, r1, i);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ SetS390ConditionCode<int32_t>(r1_val, i);
+ return length;
+}
+
+EVALUATE(CGHI) {
+ DCHECK_OPCODE(CGHI);
+ DECODE_RI_A_INSTRUCTION(instr, r1, i2);
+ int64_t i = static_cast<int64_t>(i2);
+ int64_t r1_val = get_register(r1);
+ SetS390ConditionCode<int64_t>(r1_val, i);
+ return length;
+}
+
+EVALUATE(LARL) {
+ DCHECK_OPCODE(LARL);
+ DECODE_RIL_B_INSTRUCTION(r1, i2);
+ intptr_t offset = i2 * 2;
+ set_register(r1, get_pc() + offset);
+ return length;
+}
+
+EVALUATE(LGFI) {
+ DCHECK_OPCODE(LGFI);
+ DECODE_RIL_A_INSTRUCTION(r1, imm);
+ set_register(r1, static_cast<int64_t>(static_cast<int32_t>(imm)));
+ return length;
+}
+
+EVALUATE(BRASL) {
+ DCHECK_OPCODE(BRASL);
+ // Branch and Save Relative Long
+ DECODE_RIL_B_INSTRUCTION(r1, i2);
+ intptr_t d2 = i2;
+ intptr_t pc = get_pc();
+ set_register(r1, pc + 6); // save next instruction to register
+ set_pc(pc + d2 * 2); // update register
+ return length;
+}
+
+EVALUATE(XIHF) {
+ DCHECK_OPCODE(XIHF);
+ DECODE_RIL_A_INSTRUCTION(r1, imm);
+ uint32_t alu_out = 0;
+ alu_out = get_high_register<uint32_t>(r1);
+ alu_out = alu_out ^ imm;
+ set_high_register(r1, alu_out);
+ SetS390BitWiseConditionCode<uint32_t>(alu_out);
+ return length;
+}
+
+EVALUATE(XILF) {
+ DCHECK_OPCODE(XILF);
+ DECODE_RIL_A_INSTRUCTION(r1, imm);
+ uint32_t alu_out = 0;
+ alu_out = get_low_register<uint32_t>(r1);
+ alu_out = alu_out ^ imm;
+ set_low_register(r1, alu_out);
+ SetS390BitWiseConditionCode<uint32_t>(alu_out);
+ return length;
+}
+
+EVALUATE(NIHF) {
+ DCHECK_OPCODE(NIHF);
+ // Bitwise Op on upper 32-bits
+ DECODE_RIL_A_INSTRUCTION(r1, imm);
+ uint32_t alu_out = get_high_register<uint32_t>(r1);
+ alu_out &= imm;
+ SetS390BitWiseConditionCode<uint32_t>(alu_out);
+ set_high_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(NILF) {
+ DCHECK_OPCODE(NILF);
+ // Bitwise Op on lower 32-bits
+ DECODE_RIL_A_INSTRUCTION(r1, imm);
+ uint32_t alu_out = get_low_register<uint32_t>(r1);
+ alu_out &= imm;
+ SetS390BitWiseConditionCode<uint32_t>(alu_out);
+ set_low_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(OIHF) {
+ DCHECK_OPCODE(OIHF);
+ // Bitwise Op on upper 32-bits
+ DECODE_RIL_B_INSTRUCTION(r1, imm);
+ uint32_t alu_out = get_high_register<uint32_t>(r1);
+ alu_out |= imm;
+ SetS390BitWiseConditionCode<uint32_t>(alu_out);
+ set_high_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(OILF) {
+ DCHECK_OPCODE(OILF);
+ // Bitwise Op on lower 32-bits
+ DECODE_RIL_B_INSTRUCTION(r1, imm);
+ uint32_t alu_out = get_low_register<uint32_t>(r1);
+ alu_out |= imm;
+ SetS390BitWiseConditionCode<uint32_t>(alu_out);
+ set_low_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(LLIHF) {
+ DCHECK_OPCODE(LLIHF);
+ // Load Logical Immediate into high word
+ DECODE_RIL_A_INSTRUCTION(r1, i2);
+ uint64_t imm = static_cast<uint64_t>(i2);
+ set_register(r1, imm << 32);
+ return length;
+}
+
+EVALUATE(LLILF) {
+ DCHECK_OPCODE(LLILF);
+ // Load Logical into lower 32-bits (zero extend upper 32-bits)
+ DECODE_RIL_A_INSTRUCTION(r1, i2);
+ uint64_t imm = static_cast<uint64_t>(i2);
+ set_register(r1, imm);
+ return length;
+}
+
+EVALUATE(MSGFI) {
+ DCHECK_OPCODE(MSGFI);
+ DECODE_RIL_B_INSTRUCTION(r1, i2);
+ int64_t alu_out = get_register(r1);
+ alu_out = alu_out * i2;
+ set_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(MSFI) {
+ DCHECK_OPCODE(MSFI);
+ DECODE_RIL_B_INSTRUCTION(r1, i2);
+ int32_t alu_out = get_low_register<int32_t>(r1);
+ alu_out = alu_out * i2;
+ set_low_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(SLGFI) {
+ DCHECK_OPCODE(SLGFI);
+#ifndef V8_TARGET_ARCH_S390X
+ // should only be called on 64bit
+ DCHECK(false);
+#endif
+ DECODE_RIL_A_INSTRUCTION(r1, i2);
+ uint64_t r1_val = (uint64_t)(get_register(r1));
+ uint64_t alu_out;
+ alu_out = r1_val - i2;
+ set_register(r1, (intptr_t)alu_out);
+ SetS390ConditionCode<uint64_t>(alu_out, 0);
+ return length;
+}
+
+EVALUATE(SLFI) {
+ DCHECK_OPCODE(SLFI);
+ DECODE_RIL_A_INSTRUCTION(r1, imm);
+ uint32_t alu_out = get_low_register<uint32_t>(r1);
+ alu_out -= imm;
+ SetS390ConditionCode<uint32_t>(alu_out, 0);
+ set_low_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(AGFI) {
+ DCHECK_OPCODE(AGFI);
+ // Clobbering Add Word Immediate
+ DECODE_RIL_B_INSTRUCTION(r1, i2_val);
+ bool isOF = false;
+ // 64-bit Add (Register + 32-bit Imm)
+ int64_t r1_val = get_register(r1);
+ int64_t i2 = static_cast<int64_t>(i2_val);
+ isOF = CheckOverflowForIntAdd(r1_val, i2, int64_t);
+ int64_t alu_out = r1_val + i2;
+ set_register(r1, alu_out);
+ SetS390ConditionCode<int64_t>(alu_out, 0);
+ SetS390OverflowCode(isOF);
+ return length;
+}
+
+EVALUATE(AFI) {
+ DCHECK_OPCODE(AFI);
+ // Clobbering Add Word Immediate
+ DECODE_RIL_B_INSTRUCTION(r1, i2);
+ bool isOF = false;
+ // 32-bit Add (Register + 32-bit Immediate)
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ isOF = CheckOverflowForIntAdd(r1_val, i2, int32_t);
+ int32_t alu_out = r1_val + i2;
+ set_low_register(r1, alu_out);
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ SetS390OverflowCode(isOF);
+ return length;
+}
+
+EVALUATE(ALGFI) {
+ DCHECK_OPCODE(ALGFI);
+#ifndef V8_TARGET_ARCH_S390X
+ // should only be called on 64bit
+ DCHECK(false);
+#endif
+ DECODE_RIL_A_INSTRUCTION(r1, i2);
+ uint64_t r1_val = (uint64_t)(get_register(r1));
+ uint64_t alu_out;
+ alu_out = r1_val + i2;
+ set_register(r1, (intptr_t)alu_out);
+ SetS390ConditionCode<uint64_t>(alu_out, 0);
+
+ return length;
+}
+
+EVALUATE(ALFI) {
+ DCHECK_OPCODE(ALFI);
+ DECODE_RIL_A_INSTRUCTION(r1, imm);
+ uint32_t alu_out = get_low_register<uint32_t>(r1);
+ alu_out += imm;
+ SetS390ConditionCode<uint32_t>(alu_out, 0);
+ set_low_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(CGFI) {
+ DCHECK_OPCODE(CGFI);
+ // Compare with Immediate (64)
+ DECODE_RIL_B_INSTRUCTION(r1, i2);
+ int64_t imm = static_cast<int64_t>(i2);
+ SetS390ConditionCode<int64_t>(get_register(r1), imm);
+ return length;
+}
+
+EVALUATE(CFI) {
+ DCHECK_OPCODE(CFI);
+ // Compare with Immediate (32)
+ DECODE_RIL_B_INSTRUCTION(r1, imm);
+ SetS390ConditionCode<int32_t>(get_low_register<int32_t>(r1), imm);
+ return length;
+}
+
+EVALUATE(CLGFI) {
+ DCHECK_OPCODE(CLGFI);
+ // Compare Logical with Immediate (64)
+ DECODE_RIL_A_INSTRUCTION(r1, i2);
+ uint64_t imm = static_cast<uint64_t>(i2);
+ SetS390ConditionCode<uint64_t>(get_register(r1), imm);
+ return length;
+}
+
+EVALUATE(CLFI) {
+ DCHECK_OPCODE(CLFI);
+ // Compare Logical with Immediate (32)
+ DECODE_RIL_A_INSTRUCTION(r1, imm);
+ SetS390ConditionCode<uint32_t>(get_low_register<uint32_t>(r1), imm);
+ return length;
+}
+
+EVALUATE(LLHRL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LGHRL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LHRL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LLGHRL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(STHRL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LGRL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(STGRL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LGFRL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LRL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LLGFRL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(STRL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(EXRL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(PFDRL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CGHRL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CHRL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CGRL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CGFRL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(ECTG) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CSST) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LPD) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LPDG) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(BRCTH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(AIH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(ALSIH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(ALSIHN) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CIH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(STCK) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CFC) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(IPM) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(HSCH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MSCH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SSCH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(STSCH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TSCH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TPI) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SAL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(RSCH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(STCRW) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(STCPS) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(RCHP) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SCHM) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CKSM) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SAR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(EAR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MSR) {
+ DCHECK_OPCODE(MSR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ set_low_register(r1, r1_val * r2_val);
+ return length;
+}
+
+EVALUATE(MVST) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CUSE) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SRST) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(XSCH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(STCKE) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(STCKF) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SRNM) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(STFPC) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LFPC) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TRE) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CUUTF) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CUTFU) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(STFLE) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SRNMB) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SRNMT) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LFAS) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(PPA) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(ETND) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TEND) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(NIAI) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TABORT) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TRAP4) {
+ DCHECK_OPCODE(TRAP4);
+ int length = 4;
+ // whack the space of the caller allocated stack
+ int64_t sp_addr = get_register(sp);
+ for (int i = 0; i < kCalleeRegisterSaveAreaSize / kPointerSize; ++i) {
+ // we dont want to whack the RA (r14)
+ if (i != 14) (reinterpret_cast<intptr_t*>(sp_addr))[i] = 0xdeadbabe;
+ }
+ SoftwareInterrupt(instr);
+ return length;
+}
+
+EVALUATE(LPEBR) {
+ DCHECK_OPCODE(LPEBR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ float fr1_val = get_float32_from_d_register(r1);
+ float fr2_val = get_float32_from_d_register(r2);
+ fr1_val = std::fabs(fr2_val);
+ set_d_register_from_float32(r1, fr1_val);
+ if (fr2_val != fr2_val) { // input is NaN
+ condition_reg_ = CC_OF;
+ } else if (fr2_val == 0) {
+ condition_reg_ = CC_EQ;
+ } else {
+ condition_reg_ = CC_GT;
+ }
+
+ return length;
+}
+
+EVALUATE(LNEBR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LTEBR) {
+ DCHECK_OPCODE(LTEBR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ int64_t r2_val = get_d_register(r2);
+ float fr2_val = get_float32_from_d_register(r2);
+ SetS390ConditionCode<float>(fr2_val, 0.0);
+ set_d_register(r1, r2_val);
+ return length;
+}
+
+EVALUATE(LCEBR) {
+ DCHECK_OPCODE(LCEBR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ float fr1_val = get_float32_from_d_register(r1);
+ float fr2_val = get_float32_from_d_register(r2);
+ fr1_val = -fr2_val;
+ set_d_register_from_float32(r1, fr1_val);
+ if (fr2_val != fr2_val) { // input is NaN
+ condition_reg_ = CC_OF;
+ } else if (fr2_val == 0) {
+ condition_reg_ = CC_EQ;
+ } else if (fr2_val < 0) {
+ condition_reg_ = CC_LT;
+ } else if (fr2_val > 0) {
+ condition_reg_ = CC_GT;
+ }
+ return length;
+}
+
+EVALUATE(LDEBR) {
+ DCHECK_OPCODE(LDEBR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ float fp_val = get_float32_from_d_register(r2);
+ double db_val = static_cast<double>(fp_val);
+ set_d_register_from_double(r1, db_val);
+ return length;
+}
+
+EVALUATE(LXDBR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LXEBR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MXDBR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(KEBR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CEBR) {
+ DCHECK_OPCODE(CEBR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ float fr1_val = get_float32_from_d_register(r1);
+ float fr2_val = get_float32_from_d_register(r2);
+ if (isNaN(fr1_val) || isNaN(fr2_val)) {
+ condition_reg_ = CC_OF;
+ } else {
+ SetS390ConditionCode<float>(fr1_val, fr2_val);
+ }
+
+ return length;
+}
+
+EVALUATE(AEBR) {
+ DCHECK_OPCODE(AEBR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ float fr1_val = get_float32_from_d_register(r1);
+ float fr2_val = get_float32_from_d_register(r2);
+ fr1_val += fr2_val;
+ set_d_register_from_float32(r1, fr1_val);
+ SetS390ConditionCode<float>(fr1_val, 0);
+
+ return length;
+}
+
+EVALUATE(SEBR) {
+ DCHECK_OPCODE(SEBR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ float fr1_val = get_float32_from_d_register(r1);
+ float fr2_val = get_float32_from_d_register(r2);
+ fr1_val -= fr2_val;
+ set_d_register_from_float32(r1, fr1_val);
+ SetS390ConditionCode<float>(fr1_val, 0);
+
+ return length;
+}
+
+EVALUATE(MDEBR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(DEBR) {
+ DCHECK_OPCODE(DEBR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ float fr1_val = get_float32_from_d_register(r1);
+ float fr2_val = get_float32_from_d_register(r2);
+ fr1_val /= fr2_val;
+ set_d_register_from_float32(r1, fr1_val);
+ SetS390ConditionCode<float>(fr1_val, 0);
+
+ return length;
+}
+
+EVALUATE(MAEBR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MSEBR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LPDBR) {
+ DCHECK_OPCODE(LPDBR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ double r1_val = get_double_from_d_register(r1);
+ double r2_val = get_double_from_d_register(r2);
+ r1_val = std::fabs(r2_val);
+ set_d_register_from_double(r1, r1_val);
+ if (r2_val != r2_val) { // input is NaN
+ condition_reg_ = CC_OF;
+ } else if (r2_val == 0) {
+ condition_reg_ = CC_EQ;
+ } else {
+ condition_reg_ = CC_GT;
+ }
+ return length;
+}
+
+EVALUATE(LNDBR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LTDBR) {
+ DCHECK_OPCODE(LTDBR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ int64_t r2_val = get_d_register(r2);
+ SetS390ConditionCode<double>(bit_cast<double, int64_t>(r2_val), 0.0);
+ set_d_register(r1, r2_val);
+ return length;
+}
+
+EVALUATE(LCDBR) {
+ DCHECK_OPCODE(LCDBR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ double r1_val = get_double_from_d_register(r1);
+ double r2_val = get_double_from_d_register(r2);
+ r1_val = -r2_val;
+ set_d_register_from_double(r1, r1_val);
+ if (r2_val != r2_val) { // input is NaN
+ condition_reg_ = CC_OF;
+ } else if (r2_val == 0) {
+ condition_reg_ = CC_EQ;
+ } else if (r2_val < 0) {
+ condition_reg_ = CC_LT;
+ } else if (r2_val > 0) {
+ condition_reg_ = CC_GT;
+ }
+ return length;
+}
+
+EVALUATE(SQEBR) {
+ DCHECK_OPCODE(SQEBR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ float fr1_val = get_float32_from_d_register(r1);
+ float fr2_val = get_float32_from_d_register(r2);
+ fr1_val = std::sqrt(fr2_val);
+ set_d_register_from_float32(r1, fr1_val);
+ return length;
+}
+
+EVALUATE(SQDBR) {
+ DCHECK_OPCODE(SQDBR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ double r1_val = get_double_from_d_register(r1);
+ double r2_val = get_double_from_d_register(r2);
+ r1_val = std::sqrt(r2_val);
+ set_d_register_from_double(r1, r1_val);
+ return length;
+}
+
+EVALUATE(SQXBR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MEEBR) {
+ DCHECK_OPCODE(MEEBR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ float fr1_val = get_float32_from_d_register(r1);
+ float fr2_val = get_float32_from_d_register(r2);
+ fr1_val *= fr2_val;
+ set_d_register_from_float32(r1, fr1_val);
+ SetS390ConditionCode<float>(fr1_val, 0);
+ return length;
+}
+
+EVALUATE(KDBR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CDBR) {
+ DCHECK_OPCODE(CDBR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ double r1_val = get_double_from_d_register(r1);
+ double r2_val = get_double_from_d_register(r2);
+ if (isNaN(r1_val) || isNaN(r2_val)) {
+ condition_reg_ = CC_OF;
+ } else {
+ SetS390ConditionCode<double>(r1_val, r2_val);
+ }
+ return length;
+}
+
+EVALUATE(ADBR) {
+ DCHECK_OPCODE(ADBR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ double r1_val = get_double_from_d_register(r1);
+ double r2_val = get_double_from_d_register(r2);
+ r1_val += r2_val;
+ set_d_register_from_double(r1, r1_val);
+ SetS390ConditionCode<double>(r1_val, 0);
+ return length;
+}
+
+EVALUATE(SDBR) {
+ DCHECK_OPCODE(SDBR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ double r1_val = get_double_from_d_register(r1);
+ double r2_val = get_double_from_d_register(r2);
+ r1_val -= r2_val;
+ set_d_register_from_double(r1, r1_val);
+ SetS390ConditionCode<double>(r1_val, 0);
+ return length;
+}
+
+EVALUATE(MDBR) {
+ DCHECK_OPCODE(MDBR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ double r1_val = get_double_from_d_register(r1);
+ double r2_val = get_double_from_d_register(r2);
+ r1_val *= r2_val;
+ set_d_register_from_double(r1, r1_val);
+ SetS390ConditionCode<double>(r1_val, 0);
+ return length;
+}
+
+EVALUATE(DDBR) {
+ DCHECK_OPCODE(DDBR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ double r1_val = get_double_from_d_register(r1);
+ double r2_val = get_double_from_d_register(r2);
+ r1_val /= r2_val;
+ set_d_register_from_double(r1, r1_val);
+ SetS390ConditionCode<double>(r1_val, 0);
+ return length;
+}
+
+EVALUATE(MADBR) {
+ DCHECK_OPCODE(MADBR);
+ DECODE_RRD_INSTRUCTION(r1, r2, r3);
+ double r1_val = get_double_from_d_register(r1);
+ double r2_val = get_double_from_d_register(r2);
+ double r3_val = get_double_from_d_register(r3);
+ r1_val += r2_val * r3_val;
+ set_d_register_from_double(r1, r1_val);
+ SetS390ConditionCode<double>(r1_val, 0);
+ return length;
+}
+
+EVALUATE(MSDBR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LPXBR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LNXBR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LTXBR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LCXBR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LEDBRA) {
+ DCHECK_OPCODE(LEDBRA);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ double r2_val = get_double_from_d_register(r2);
+ set_d_register_from_float32(r1, static_cast<float>(r2_val));
+ return length;
+}
+
+EVALUATE(LDXBRA) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LEXBRA) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(FIXBRA) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(KXBR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CXBR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(AXBR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SXBR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MXBR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(DXBR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TBEDR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TBDR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(DIEBR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(FIEBRA) {
+ DCHECK_OPCODE(FIEBRA);
+ DECODE_RRF_E_INSTRUCTION(r1, r2, m3, m4);
+ float r2_val = get_float32_from_d_register(r2);
+ CHECK(m4 == 0);
+ switch (m3) {
+ case Assembler::FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0:
+ set_d_register_from_float32(r1, round(r2_val));
+ break;
+ case Assembler::FIDBRA_ROUND_TOWARD_0:
+ set_d_register_from_float32(r1, trunc(r2_val));
+ break;
+ case Assembler::FIDBRA_ROUND_TOWARD_POS_INF:
+ set_d_register_from_float32(r1, std::ceil(r2_val));
+ break;
+ case Assembler::FIDBRA_ROUND_TOWARD_NEG_INF:
+ set_d_register_from_float32(r1, std::floor(r2_val));
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ return length;
+}
+
+EVALUATE(THDER) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(THDR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(DIDBR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(FIDBRA) {
+ DCHECK_OPCODE(FIDBRA);
+ DECODE_RRF_E_INSTRUCTION(r1, r2, m3, m4);
+ double r2_val = get_double_from_d_register(r2);
+ CHECK(m4 == 0);
+ switch (m3) {
+ case Assembler::FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0:
+ set_d_register_from_double(r1, round(r2_val));
+ break;
+ case Assembler::FIDBRA_ROUND_TOWARD_0:
+ set_d_register_from_double(r1, trunc(r2_val));
+ break;
+ case Assembler::FIDBRA_ROUND_TOWARD_POS_INF:
+ set_d_register_from_double(r1, std::ceil(r2_val));
+ break;
+ case Assembler::FIDBRA_ROUND_TOWARD_NEG_INF:
+ set_d_register_from_double(r1, std::floor(r2_val));
+ break;
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ return length;
+}
+
+EVALUATE(LXR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LPDFR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LNDFR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LCDFR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LZER) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LZDR) {
+ DCHECK_OPCODE(LZDR);
+ DECODE_RRE_INSTRUCTION_NO_R2(r1);
+ set_d_register_from_double(r1, 0.0);
+ return length;
+}
+
+EVALUATE(LZXR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SFPC) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SFASR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(EFPC) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CELFBR) {
+ DCHECK_OPCODE(CELFBR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ uint32_t r2_val = get_low_register<uint32_t>(r2);
+ float r1_val = static_cast<float>(r2_val);
+ set_d_register_from_float32(r1, r1_val);
+ return length;
+}
+
+EVALUATE(CDLFBR) {
+ DCHECK_OPCODE(CDLFBR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ uint32_t r2_val = get_low_register<uint32_t>(r2);
+ double r1_val = static_cast<double>(r2_val);
+ set_d_register_from_double(r1, r1_val);
+ return length;
+}
+
+EVALUATE(CXLFBR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CEFBRA) {
+ DCHECK_OPCODE(CEFBRA);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ int32_t fr2_val = get_low_register<int32_t>(r2);
+ float fr1_val = static_cast<float>(fr2_val);
+ set_d_register_from_float32(r1, fr1_val);
+ return length;
+}
+
+EVALUATE(CDFBRA) {
+ DCHECK_OPCODE(CDFBRA);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ double r1_val = static_cast<double>(r2_val);
+ set_d_register_from_double(r1, r1_val);
+ return length;
+}
+
+EVALUATE(CXFBRA) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CFEBRA) {
+ DCHECK_OPCODE(CFEBRA);
+ DECODE_RRE_INSTRUCTION_M3(r1, r2, mask_val);
+ float r2_fval = get_float32_from_d_register(r2);
+ int32_t r1_val = 0;
+
+ SetS390RoundConditionCode(r2_fval, INT32_MAX, INT32_MIN);
+
+ switch (mask_val) {
+ case CURRENT_ROUNDING_MODE:
+ case ROUND_TO_PREPARE_FOR_SHORTER_PRECISION: {
+ r1_val = static_cast<int32_t>(r2_fval);
+ break;
+ }
+ case ROUND_TO_NEAREST_WITH_TIES_AWAY_FROM_0: {
+ float ceil_val = std::ceil(r2_fval);
+ float floor_val = std::floor(r2_fval);
+ float sub_val1 = std::fabs(r2_fval - floor_val);
+ float sub_val2 = std::fabs(r2_fval - ceil_val);
+ if (sub_val1 > sub_val2) {
+ r1_val = static_cast<int32_t>(ceil_val);
+ } else if (sub_val1 < sub_val2) {
+ r1_val = static_cast<int32_t>(floor_val);
+ } else { // round away from zero:
+ if (r2_fval > 0.0) {
+ r1_val = static_cast<int32_t>(ceil_val);
+ } else {
+ r1_val = static_cast<int32_t>(floor_val);
+ }
+ }
+ break;
+ }
+ case ROUND_TO_NEAREST_WITH_TIES_TO_EVEN: {
+ float ceil_val = std::ceil(r2_fval);
+ float floor_val = std::floor(r2_fval);
+ float sub_val1 = std::fabs(r2_fval - floor_val);
+ float sub_val2 = std::fabs(r2_fval - ceil_val);
+ if (sub_val1 > sub_val2) {
+ r1_val = static_cast<int32_t>(ceil_val);
+ } else if (sub_val1 < sub_val2) {
+ r1_val = static_cast<int32_t>(floor_val);
+ } else { // check which one is even:
+ int32_t c_v = static_cast<int32_t>(ceil_val);
+ int32_t f_v = static_cast<int32_t>(floor_val);
+ if (f_v % 2 == 0)
+ r1_val = f_v;
+ else
+ r1_val = c_v;
+ }
+ break;
+ }
+ case ROUND_TOWARD_0: {
+ // check for overflow, cast r2_fval to 64bit integer
+ // then check value within the range of INT_MIN and INT_MAX
+ // and set condition code accordingly
+ int64_t temp = static_cast<int64_t>(r2_fval);
+ if (temp < INT_MIN || temp > INT_MAX) {
+ condition_reg_ = CC_OF;
+ }
+ r1_val = static_cast<int32_t>(r2_fval);
+ break;
+ }
+ case ROUND_TOWARD_PLUS_INFINITE: {
+ r1_val = static_cast<int32_t>(std::ceil(r2_fval));
+ break;
+ }
+ case ROUND_TOWARD_MINUS_INFINITE: {
+ // check for overflow, cast r2_fval to 64bit integer
+ // then check value within the range of INT_MIN and INT_MAX
+ // and set condition code accordingly
+ int64_t temp = static_cast<int64_t>(std::floor(r2_fval));
+ if (temp < INT_MIN || temp > INT_MAX) {
+ condition_reg_ = CC_OF;
+ }
+ r1_val = static_cast<int32_t>(std::floor(r2_fval));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ set_low_register(r1, r1_val);
+ return length;
+}
+
+EVALUATE(CFDBRA) {
+ DCHECK_OPCODE(CFDBRA);
+ DECODE_RRE_INSTRUCTION_M3(r1, r2, mask_val);
+ double r2_val = get_double_from_d_register(r2);
+ int32_t r1_val = 0;
+
+ SetS390RoundConditionCode(r2_val, INT32_MAX, INT32_MIN);
+
+ switch (mask_val) {
+ case CURRENT_ROUNDING_MODE:
+ case ROUND_TO_PREPARE_FOR_SHORTER_PRECISION: {
+ r1_val = static_cast<int32_t>(r2_val);
+ break;
+ }
+ case ROUND_TO_NEAREST_WITH_TIES_AWAY_FROM_0: {
+ double ceil_val = std::ceil(r2_val);
+ double floor_val = std::floor(r2_val);
+ double sub_val1 = std::fabs(r2_val - floor_val);
+ double sub_val2 = std::fabs(r2_val - ceil_val);
+ if (sub_val1 > sub_val2) {
+ r1_val = static_cast<int32_t>(ceil_val);
+ } else if (sub_val1 < sub_val2) {
+ r1_val = static_cast<int32_t>(floor_val);
+ } else { // round away from zero:
+ if (r2_val > 0.0) {
+ r1_val = static_cast<int32_t>(ceil_val);
+ } else {
+ r1_val = static_cast<int32_t>(floor_val);
+ }
+ }
+ break;
+ }
+ case ROUND_TO_NEAREST_WITH_TIES_TO_EVEN: {
+ double ceil_val = std::ceil(r2_val);
+ double floor_val = std::floor(r2_val);
+ double sub_val1 = std::fabs(r2_val - floor_val);
+ double sub_val2 = std::fabs(r2_val - ceil_val);
+ if (sub_val1 > sub_val2) {
+ r1_val = static_cast<int32_t>(ceil_val);
+ } else if (sub_val1 < sub_val2) {
+ r1_val = static_cast<int32_t>(floor_val);
+ } else { // check which one is even:
+ int32_t c_v = static_cast<int32_t>(ceil_val);
+ int32_t f_v = static_cast<int32_t>(floor_val);
+ if (f_v % 2 == 0)
+ r1_val = f_v;
+ else
+ r1_val = c_v;
+ }
+ break;
+ }
+ case ROUND_TOWARD_0: {
+ // check for overflow, cast r2_val to 64bit integer
+ // then check value within the range of INT_MIN and INT_MAX
+ // and set condition code accordingly
+ int64_t temp = static_cast<int64_t>(r2_val);
+ if (temp < INT_MIN || temp > INT_MAX) {
+ condition_reg_ = CC_OF;
+ }
+ r1_val = static_cast<int32_t>(r2_val);
+ break;
+ }
+ case ROUND_TOWARD_PLUS_INFINITE: {
+ r1_val = static_cast<int32_t>(std::ceil(r2_val));
+ break;
+ }
+ case ROUND_TOWARD_MINUS_INFINITE: {
+ // check for overflow, cast r2_val to 64bit integer
+ // then check value within the range of INT_MIN and INT_MAX
+ // and set condition code accordingly
+ int64_t temp = static_cast<int64_t>(std::floor(r2_val));
+ if (temp < INT_MIN || temp > INT_MAX) {
+ condition_reg_ = CC_OF;
+ }
+ r1_val = static_cast<int32_t>(std::floor(r2_val));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ set_low_register(r1, r1_val);
+ return length;
+}
+
+EVALUATE(CFXBRA) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CLFEBR) {
+ DCHECK_OPCODE(CLFEBR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ float r2_val = get_float32_from_d_register(r2);
+ uint32_t r1_val = static_cast<uint32_t>(r2_val);
+ set_low_register(r1, r1_val);
+ SetS390ConvertConditionCode<double>(r2_val, r1_val, UINT32_MAX);
+ return length;
+}
+
+EVALUATE(CLFDBR) {
+ DCHECK_OPCODE(CLFDBR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ double r2_val = get_double_from_d_register(r2);
+ uint32_t r1_val = static_cast<uint32_t>(r2_val);
+ set_low_register(r1, r1_val);
+ SetS390ConvertConditionCode<double>(r2_val, r1_val, UINT32_MAX);
+ return length;
+}
+
+EVALUATE(CLFXBR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CELGBR) {
+ DCHECK_OPCODE(CELGBR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ uint64_t r2_val = get_register(r2);
+ float r1_val = static_cast<float>(r2_val);
+ set_d_register_from_float32(r1, r1_val);
+ return length;
+}
+
+EVALUATE(CDLGBR) {
+ DCHECK_OPCODE(CDLGBR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ uint64_t r2_val = get_register(r2);
+ double r1_val = static_cast<double>(r2_val);
+ set_d_register_from_double(r1, r1_val);
+ return length;
+}
+
+EVALUATE(CXLGBR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CEGBRA) {
+ DCHECK_OPCODE(CEGBRA);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ int64_t fr2_val = get_register(r2);
+ float fr1_val = static_cast<float>(fr2_val);
+ set_d_register_from_float32(r1, fr1_val);
+ return length;
+}
+
+EVALUATE(CDGBRA) {
+ DCHECK_OPCODE(CDGBRA);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ int64_t r2_val = get_register(r2);
+ double r1_val = static_cast<double>(r2_val);
+ set_d_register_from_double(r1, r1_val);
+ return length;
+}
+
+EVALUATE(CXGBRA) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CGEBRA) {
+ DCHECK_OPCODE(CGEBRA);
+ DECODE_RRE_INSTRUCTION_M3(r1, r2, mask_val);
+ float r2_fval = get_float32_from_d_register(r2);
+ int64_t r1_val = 0;
+
+ SetS390RoundConditionCode(r2_fval, INT64_MAX, INT64_MIN);
+
+ switch (mask_val) {
+ case CURRENT_ROUNDING_MODE:
+ case ROUND_TO_NEAREST_WITH_TIES_AWAY_FROM_0:
+ case ROUND_TO_PREPARE_FOR_SHORTER_PRECISION: {
+ UNIMPLEMENTED();
+ break;
+ }
+ case ROUND_TO_NEAREST_WITH_TIES_TO_EVEN: {
+ float ceil_val = std::ceil(r2_fval);
+ float floor_val = std::floor(r2_fval);
+ if (std::abs(r2_fval - floor_val) > std::abs(r2_fval - ceil_val)) {
+ r1_val = static_cast<int64_t>(ceil_val);
+ } else if (std::abs(r2_fval - floor_val) < std::abs(r2_fval - ceil_val)) {
+ r1_val = static_cast<int64_t>(floor_val);
+ } else { // check which one is even:
+ int64_t c_v = static_cast<int64_t>(ceil_val);
+ int64_t f_v = static_cast<int64_t>(floor_val);
+ if (f_v % 2 == 0)
+ r1_val = f_v;
+ else
+ r1_val = c_v;
+ }
+ break;
+ }
+ case ROUND_TOWARD_0: {
+ r1_val = static_cast<int64_t>(r2_fval);
+ break;
+ }
+ case ROUND_TOWARD_PLUS_INFINITE: {
+ r1_val = static_cast<int64_t>(std::ceil(r2_fval));
+ break;
+ }
+ case ROUND_TOWARD_MINUS_INFINITE: {
+ r1_val = static_cast<int64_t>(std::floor(r2_fval));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ set_register(r1, r1_val);
+ return length;
+}
+
+EVALUATE(CGDBRA) {
+ DCHECK_OPCODE(CGDBRA);
+ DECODE_RRE_INSTRUCTION_M3(r1, r2, mask_val);
+ double r2_val = get_double_from_d_register(r2);
+ int64_t r1_val = 0;
+
+ SetS390RoundConditionCode(r2_val, INT64_MAX, INT64_MIN);
+
+ switch (mask_val) {
+ case CURRENT_ROUNDING_MODE:
+ case ROUND_TO_NEAREST_WITH_TIES_AWAY_FROM_0:
+ case ROUND_TO_PREPARE_FOR_SHORTER_PRECISION: {
+ UNIMPLEMENTED();
+ break;
+ }
+ case ROUND_TO_NEAREST_WITH_TIES_TO_EVEN: {
+ double ceil_val = std::ceil(r2_val);
+ double floor_val = std::floor(r2_val);
+ if (std::abs(r2_val - floor_val) > std::abs(r2_val - ceil_val)) {
+ r1_val = static_cast<int64_t>(ceil_val);
+ } else if (std::abs(r2_val - floor_val) < std::abs(r2_val - ceil_val)) {
+ r1_val = static_cast<int64_t>(floor_val);
+ } else { // check which one is even:
+ int64_t c_v = static_cast<int64_t>(ceil_val);
+ int64_t f_v = static_cast<int64_t>(floor_val);
+ if (f_v % 2 == 0)
+ r1_val = f_v;
+ else
+ r1_val = c_v;
+ }
+ break;
+ }
+ case ROUND_TOWARD_0: {
+ r1_val = static_cast<int64_t>(r2_val);
+ break;
+ }
+ case ROUND_TOWARD_PLUS_INFINITE: {
+ r1_val = static_cast<int64_t>(std::ceil(r2_val));
+ break;
+ }
+ case ROUND_TOWARD_MINUS_INFINITE: {
+ r1_val = static_cast<int64_t>(std::floor(r2_val));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ set_register(r1, r1_val);
+ return length;
+}
+
+EVALUATE(CGXBRA) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CLGEBR) {
+ DCHECK_OPCODE(CLGEBR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ float r2_val = get_float32_from_d_register(r2);
+ uint64_t r1_val = static_cast<uint64_t>(r2_val);
+ set_register(r1, r1_val);
+ SetS390ConvertConditionCode<double>(r2_val, r1_val, UINT64_MAX);
+ return length;
+}
+
+EVALUATE(CLGDBR) {
+ DCHECK_OPCODE(CLGDBR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ double r2_val = get_double_from_d_register(r2);
+ uint64_t r1_val = static_cast<uint64_t>(r2_val);
+ set_register(r1, r1_val);
+ SetS390ConvertConditionCode<double>(r2_val, r1_val, UINT64_MAX);
+ return length;
+}
+
+EVALUATE(CFER) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CFDR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CFXR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LDGR) {
+ DCHECK_OPCODE(LDGR);
+ // Load FPR from GPR (L <- 64)
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ uint64_t int_val = get_register(r2);
+ // double double_val = bit_cast<double, uint64_t>(int_val);
+ // set_d_register_from_double(rreInst->R1Value(), double_val);
+ set_d_register(r1, int_val);
+ return length;
+}
+
+EVALUATE(CGER) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CGDR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CGXR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LGDR) {
+ DCHECK_OPCODE(LGDR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ // Load GPR from FPR (64 <- L)
+ int64_t double_val = get_d_register(r2);
+ set_register(r1, double_val);
+ return length;
+}
+
+EVALUATE(MDTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MDTRA) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(DDTRA) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(ADTRA) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SDTRA) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LDETR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LEDTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LTDTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(FIDTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MXTRA) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(DXTRA) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(AXTRA) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SXTRA) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LXDTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LDXTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LTXTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(FIXTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(KDTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CGDTRA) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CUDTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CDTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(EEDTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(ESDTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(KXTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CGXTRA) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CUXTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CSXTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CXTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(EEXTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(ESXTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CDGTRA) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CDUTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CDSTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CEDTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(QADTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(IEDTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(RRDTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CXGTRA) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CXUTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CXSTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CEXTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(QAXTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(IEXTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(RRXTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LPGR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LNGR) {
+ DCHECK_OPCODE(LNGR);
+ // Load Negative (64)
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ int64_t r2_val = get_register(r2);
+ r2_val = (r2_val >= 0) ? -r2_val : r2_val; // If pos, then negate it.
+ set_register(r1, r2_val);
+ condition_reg_ = (r2_val == 0) ? CC_EQ : CC_LT; // CC0 - result is zero
+ // CC1 - result is negative
+ return length;
+}
+
+EVALUATE(LTGR) {
+ DCHECK_OPCODE(LTGR);
+ // Load Register (64)
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ int64_t r2_val = get_register(r2);
+ SetS390ConditionCode<int64_t>(r2_val, 0);
+ set_register(r1, get_register(r2));
+ return length;
+}
+
+EVALUATE(LCGR) {
+ DCHECK_OPCODE(LCGR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ int64_t r2_val = get_register(r2);
+ r2_val = ~r2_val;
+ r2_val = r2_val + 1;
+ set_register(r1, r2_val);
+ SetS390ConditionCode<int64_t>(r2_val, 0);
+ // if the input is INT_MIN, loading its compliment would be overflowing
+ if (r2_val < 0 && (r2_val + 1) > 0) {
+ SetS390OverflowCode(true);
+ }
+ return length;
+}
+
+EVALUATE(SGR) {
+ DCHECK_OPCODE(SGR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ int64_t r1_val = get_register(r1);
+ int64_t r2_val = get_register(r2);
+ bool isOF = false;
+ isOF = CheckOverflowForIntSub(r1_val, r2_val, int64_t);
+ r1_val -= r2_val;
+ SetS390ConditionCode<int64_t>(r1_val, 0);
+ SetS390OverflowCode(isOF);
+ set_register(r1, r1_val);
+ return length;
+}
+
+EVALUATE(ALGR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SLGR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MSGR) {
+ DCHECK_OPCODE(MSGR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ int64_t r1_val = get_register(r1);
+ int64_t r2_val = get_register(r2);
+ set_register(r1, r1_val * r2_val);
+ return length;
+}
+
+EVALUATE(DSGR) {
+ DCHECK_OPCODE(DSGR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+
+ DCHECK(r1 % 2 == 0);
+
+ int64_t dividend = get_register(r1 + 1);
+ int64_t divisor = get_register(r2);
+ set_register(r1, dividend % divisor);
+ set_register(r1 + 1, dividend / divisor);
+ return length;
+}
+
+EVALUATE(LRVGR) {
+ DCHECK_OPCODE(LRVGR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ int64_t r2_val = get_register(r2);
+ int64_t r1_val = ByteReverse(r2_val);
+
+ set_register(r1, r1_val);
+ return length;
+}
+
+EVALUATE(LPGFR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LNGFR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LTGFR) {
+ DCHECK_OPCODE(LTGFR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ // Load and Test Register (64 <- 32) (Sign Extends 32-bit val)
+ // Load Register (64 <- 32) (Sign Extends 32-bit val)
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ int64_t result = static_cast<int64_t>(r2_val);
+ set_register(r1, result);
+ SetS390ConditionCode<int64_t>(result, 0);
+ return length;
+}
+
+EVALUATE(LCGFR) {
+ DCHECK_OPCODE(LCGFR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ // Load and Test Register (64 <- 32) (Sign Extends 32-bit val)
+ // Load Register (64 <- 32) (Sign Extends 32-bit val)
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ int64_t result = static_cast<int64_t>(r2_val);
+ set_register(r1, result);
+ return length;
+}
+
+EVALUATE(LLGFR) {
+ DCHECK_OPCODE(LLGFR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ uint64_t r2_finalval = (static_cast<uint64_t>(r2_val) & 0x00000000ffffffff);
+ set_register(r1, r2_finalval);
+ return length;
+}
+
+EVALUATE(LLGTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(AGFR) {
+ DCHECK_OPCODE(AGFR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ // Add Register (64 <- 32) (Sign Extends 32-bit val)
+ int64_t r1_val = get_register(r1);
+ int64_t r2_val = static_cast<int64_t>(get_low_register<int32_t>(r2));
+ bool isOF = CheckOverflowForIntAdd(r1_val, r2_val, int64_t);
+ r1_val += r2_val;
+ SetS390ConditionCode<int64_t>(r1_val, 0);
+ SetS390OverflowCode(isOF);
+ set_register(r1, r1_val);
+ return length;
+}
+
+EVALUATE(SGFR) {
+ DCHECK_OPCODE(SGFR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ // Sub Reg (64 <- 32)
+ int64_t r1_val = get_register(r1);
+ int64_t r2_val = static_cast<int64_t>(get_low_register<int32_t>(r2));
+ bool isOF = false;
+ isOF = CheckOverflowForIntSub(r1_val, r2_val, int64_t);
+ r1_val -= r2_val;
+ SetS390ConditionCode<int64_t>(r1_val, 0);
+ SetS390OverflowCode(isOF);
+ set_register(r1, r1_val);
+ return length;
+}
+
+EVALUATE(ALGFR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SLGFR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MSGFR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(DSGFR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(KMAC) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LRVR) {
+ DCHECK_OPCODE(LRVR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ int32_t r1_val = ByteReverse(r2_val);
+
+ set_low_register(r1, r1_val);
+ return length;
+}
+
+EVALUATE(CGR) {
+ DCHECK_OPCODE(CGR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ // Compare (64)
+ int64_t r1_val = get_register(r1);
+ int64_t r2_val = get_register(r2);
+ SetS390ConditionCode<int64_t>(r1_val, r2_val);
+ return length;
+}
+
+EVALUATE(CLGR) {
+ DCHECK_OPCODE(CLGR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ // Compare Logical (64)
+ uint64_t r1_val = static_cast<uint64_t>(get_register(r1));
+ uint64_t r2_val = static_cast<uint64_t>(get_register(r2));
+ SetS390ConditionCode<uint64_t>(r1_val, r2_val);
+ return length;
+}
+
+EVALUATE(KMF) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(KMO) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(PCC) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(KMCTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(KM) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(KMC) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CGFR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(KIMD) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(KLMD) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CFDTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CLGDTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CLFDTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(BCTGR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CFXTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CLFXTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CDFTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CDLGTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CDLFTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CXFTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CXLGTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CXLFTR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CGRT) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(NGR) {
+ DCHECK_OPCODE(NGR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ int64_t r1_val = get_register(r1);
+ int64_t r2_val = get_register(r2);
+ r1_val &= r2_val;
+ SetS390BitWiseConditionCode<uint64_t>(r1_val);
+ set_register(r1, r1_val);
+ return length;
+}
+
+EVALUATE(OGR) {
+ DCHECK_OPCODE(OGR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ int64_t r1_val = get_register(r1);
+ int64_t r2_val = get_register(r2);
+ r1_val |= r2_val;
+ SetS390BitWiseConditionCode<uint64_t>(r1_val);
+ set_register(r1, r1_val);
+ return length;
+}
+
+EVALUATE(XGR) {
+ DCHECK_OPCODE(XGR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ int64_t r1_val = get_register(r1);
+ int64_t r2_val = get_register(r2);
+ r1_val ^= r2_val;
+ SetS390BitWiseConditionCode<uint64_t>(r1_val);
+ set_register(r1, r1_val);
+ return length;
+}
+
+EVALUATE(FLOGR) {
+ DCHECK_OPCODE(FLOGR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+
+ DCHECK(r1 % 2 == 0);
+
+ int64_t r2_val = get_register(r2);
+
+ int i = 0;
+ for (; i < 64; i++) {
+ if (r2_val < 0) break;
+ r2_val <<= 1;
+ }
+
+ r2_val = get_register(r2);
+
+ int64_t mask = ~(1 << (63 - i));
+ set_register(r1, i);
+ set_register(r1 + 1, r2_val & mask);
+ return length;
+}
+
+EVALUATE(LLGCR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LLGHR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MLGR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(DLGR) {
+ DCHECK_OPCODE(DLGR);
+#ifdef V8_TARGET_ARCH_S390X
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ uint64_t r1_val = get_register(r1);
+ uint64_t r2_val = get_register(r2);
+ DCHECK(r1 % 2 == 0);
+ unsigned __int128 dividend = static_cast<unsigned __int128>(r1_val) << 64;
+ dividend += get_register(r1 + 1);
+ uint64_t remainder = dividend % r2_val;
+ uint64_t quotient = dividend / r2_val;
+ r1_val = remainder;
+ set_register(r1, remainder);
+ set_register(r1 + 1, quotient);
+ return length;
+#else
+ UNREACHABLE();
+#endif
+}
+
+EVALUATE(ALCGR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SLBGR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(EPSW) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TRTT) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TRTO) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TROT) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TROO) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LLCR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LLHR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MLR) {
+ DCHECK_OPCODE(MLR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ DCHECK(r1 % 2 == 0);
+
+ uint32_t r1_val = get_low_register<uint32_t>(r1 + 1);
+ uint32_t r2_val = get_low_register<uint32_t>(r2);
+ uint64_t product =
+ static_cast<uint64_t>(r1_val) * static_cast<uint64_t>(r2_val);
+ int32_t high_bits = product >> 32;
+ int32_t low_bits = product & 0x00000000FFFFFFFF;
+ set_low_register(r1, high_bits);
+ set_low_register(r1 + 1, low_bits);
+ return length;
+}
+
+EVALUATE(DLR) {
+ DCHECK_OPCODE(DLR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ uint32_t r1_val = get_low_register<uint32_t>(r1);
+ uint32_t r2_val = get_low_register<uint32_t>(r2);
+ DCHECK(r1 % 2 == 0);
+ uint64_t dividend = static_cast<uint64_t>(r1_val) << 32;
+ dividend += get_low_register<uint32_t>(r1 + 1);
+ uint32_t remainder = dividend % r2_val;
+ uint32_t quotient = dividend / r2_val;
+ r1_val = remainder;
+ set_low_register(r1, remainder);
+ set_low_register(r1 + 1, quotient);
+ return length;
+}
+
+EVALUATE(ALCR) {
+ DCHECK_OPCODE(ALCR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ uint32_t r1_val = get_low_register<uint32_t>(r1);
+ uint32_t r2_val = get_low_register<uint32_t>(r2);
+ uint32_t alu_out = 0;
+ bool isOF = false;
+
+ alu_out = r1_val + r2_val;
+ bool isOF_original = CheckOverflowForUIntAdd(r1_val, r2_val);
+ if (TestConditionCode((Condition)2) || TestConditionCode((Condition)3)) {
+ alu_out = alu_out + 1;
+ isOF = isOF_original || CheckOverflowForUIntAdd(alu_out, 1);
+ } else {
+ isOF = isOF_original;
+ }
+ set_low_register(r1, alu_out);
+ SetS390ConditionCodeCarry<uint32_t>(alu_out, isOF);
+ return length;
+}
+
+EVALUATE(SLBR) {
+ DCHECK_OPCODE(SLBR);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ uint32_t r1_val = get_low_register<uint32_t>(r1);
+ uint32_t r2_val = get_low_register<uint32_t>(r2);
+ uint32_t alu_out = 0;
+ bool isOF = false;
+
+ alu_out = r1_val - r2_val;
+ bool isOF_original = CheckOverflowForUIntSub(r1_val, r2_val);
+ if (TestConditionCode((Condition)2) || TestConditionCode((Condition)3)) {
+ alu_out = alu_out - 1;
+ isOF = isOF_original || CheckOverflowForUIntSub(alu_out, 1);
+ } else {
+ isOF = isOF_original;
+ }
+ set_low_register(r1, alu_out);
+ SetS390ConditionCodeCarry<uint32_t>(alu_out, isOF);
+ return length;
+}
+
+EVALUATE(CU14) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CU24) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CU41) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CU42) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TRTRE) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SRSTU) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TRTE) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(AHHHR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SHHHR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(ALHHHR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SLHHHR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CHHR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(AHHLR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SHHLR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(ALHHLR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SLHHLR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CHLR) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(POPCNT_Z) {
+ DCHECK_OPCODE(POPCNT_Z);
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ int64_t r2_val = get_register(r2);
+ int64_t r1_val = 0;
+
+ uint8_t* r2_val_ptr = reinterpret_cast<uint8_t*>(&r2_val);
+ uint8_t* r1_val_ptr = reinterpret_cast<uint8_t*>(&r1_val);
+ for (int i = 0; i < 8; i++) {
+ uint32_t x = static_cast<uint32_t>(r2_val_ptr[i]);
+#if defined(__GNUC__)
+ r1_val_ptr[i] = __builtin_popcount(x);
+#else
+#error unsupport __builtin_popcount
+#endif
+ }
+ set_register(r1, static_cast<uint64_t>(r1_val));
+ return length;
+}
+
+EVALUATE(LOCGR) {
+ DCHECK_OPCODE(LOCGR);
+ DECODE_RRF_C_INSTRUCTION(r1, r2, m3);
+ if (TestConditionCode(m3)) {
+ set_register(r1, get_register(r2));
+ }
+ return length;
+}
+
+EVALUATE(NGRK) {
+ DCHECK_OPCODE(NGRK);
+ DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+ // 64-bit Non-clobbering arithmetics / bitwise ops.
+ int64_t r2_val = get_register(r2);
+ int64_t r3_val = get_register(r3);
+ uint64_t bitwise_result = 0;
+ bitwise_result = r2_val & r3_val;
+ SetS390BitWiseConditionCode<uint64_t>(bitwise_result);
+ set_register(r1, bitwise_result);
+ return length;
+}
+
+EVALUATE(OGRK) {
+ DCHECK_OPCODE(OGRK);
+ DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+ // 64-bit Non-clobbering arithmetics / bitwise ops.
+ int64_t r2_val = get_register(r2);
+ int64_t r3_val = get_register(r3);
+ uint64_t bitwise_result = 0;
+ bitwise_result = r2_val | r3_val;
+ SetS390BitWiseConditionCode<uint64_t>(bitwise_result);
+ set_register(r1, bitwise_result);
+ return length;
+}
+
+EVALUATE(XGRK) {
+ DCHECK_OPCODE(XGRK);
+ DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+ // 64-bit Non-clobbering arithmetics / bitwise ops.
+ int64_t r2_val = get_register(r2);
+ int64_t r3_val = get_register(r3);
+ uint64_t bitwise_result = 0;
+ bitwise_result = r2_val ^ r3_val;
+ SetS390BitWiseConditionCode<uint64_t>(bitwise_result);
+ set_register(r1, bitwise_result);
+ return length;
+}
+
+EVALUATE(AGRK) {
+ DCHECK_OPCODE(AGRK);
+ DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+ // 64-bit Non-clobbering arithmetics / bitwise ops.
+ int64_t r2_val = get_register(r2);
+ int64_t r3_val = get_register(r3);
+ bool isOF = CheckOverflowForIntAdd(r2_val, r3_val, int64_t);
+ SetS390ConditionCode<int64_t>(r2_val + r3_val, 0);
+ SetS390OverflowCode(isOF);
+ set_register(r1, r2_val + r3_val);
+ return length;
+}
+
+EVALUATE(SGRK) {
+ DCHECK_OPCODE(SGRK);
+ DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+ // 64-bit Non-clobbering arithmetics / bitwise ops.
+ int64_t r2_val = get_register(r2);
+ int64_t r3_val = get_register(r3);
+ bool isOF = CheckOverflowForIntSub(r2_val, r3_val, int64_t);
+ SetS390ConditionCode<int64_t>(r2_val - r3_val, 0);
+ SetS390OverflowCode(isOF);
+ set_register(r1, r2_val - r3_val);
+ return length;
+}
+
+EVALUATE(ALGRK) {
+ DCHECK_OPCODE(ALGRK);
+ DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+ // 64-bit Non-clobbering unsigned arithmetics
+ uint64_t r2_val = get_register(r2);
+ uint64_t r3_val = get_register(r3);
+ bool isOF = CheckOverflowForUIntAdd(r2_val, r3_val);
+ SetS390ConditionCode<uint64_t>(r2_val + r3_val, 0);
+ SetS390OverflowCode(isOF);
+ set_register(r1, r2_val + r3_val);
+ return length;
+}
+
+EVALUATE(SLGRK) {
+ DCHECK_OPCODE(SLGRK);
+ DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+ // 64-bit Non-clobbering unsigned arithmetics
+ uint64_t r2_val = get_register(r2);
+ uint64_t r3_val = get_register(r3);
+ bool isOF = CheckOverflowForUIntSub(r2_val, r3_val);
+ SetS390ConditionCode<uint64_t>(r2_val - r3_val, 0);
+ SetS390OverflowCode(isOF);
+ set_register(r1, r2_val - r3_val);
+ return length;
+}
+
+EVALUATE(LOCR) {
+ DCHECK_OPCODE(LOCR);
+ DECODE_RRF_C_INSTRUCTION(r1, r2, m3);
+ if (TestConditionCode(m3)) {
+ set_low_register(r1, get_low_register<int32_t>(r2));
+ }
+ return length;
+}
+
+EVALUATE(NRK) {
+ DCHECK_OPCODE(NRK);
+ DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+ // 32-bit Non-clobbering arithmetics / bitwise ops
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ int32_t r3_val = get_low_register<int32_t>(r3);
+ // Assume bitwise operation here
+ uint32_t bitwise_result = 0;
+ bitwise_result = r2_val & r3_val;
+ SetS390BitWiseConditionCode<uint32_t>(bitwise_result);
+ set_low_register(r1, bitwise_result);
+ return length;
+}
+
+EVALUATE(ORK) {
+ DCHECK_OPCODE(ORK);
+ DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+ // 32-bit Non-clobbering arithmetics / bitwise ops
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ int32_t r3_val = get_low_register<int32_t>(r3);
+ // Assume bitwise operation here
+ uint32_t bitwise_result = 0;
+ bitwise_result = r2_val | r3_val;
+ SetS390BitWiseConditionCode<uint32_t>(bitwise_result);
+ set_low_register(r1, bitwise_result);
+ return length;
+}
+
+EVALUATE(XRK) {
+ DCHECK_OPCODE(XRK);
+ DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+ // 32-bit Non-clobbering arithmetics / bitwise ops
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ int32_t r3_val = get_low_register<int32_t>(r3);
+ // Assume bitwise operation here
+ uint32_t bitwise_result = 0;
+ bitwise_result = r2_val ^ r3_val;
+ SetS390BitWiseConditionCode<uint32_t>(bitwise_result);
+ set_low_register(r1, bitwise_result);
+ return length;
+}
+
+EVALUATE(ARK) {
+ DCHECK_OPCODE(ARK);
+ DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+ // 32-bit Non-clobbering arithmetics / bitwise ops
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ int32_t r3_val = get_low_register<int32_t>(r3);
+ bool isOF = CheckOverflowForIntAdd(r2_val, r3_val, int32_t);
+ SetS390ConditionCode<int32_t>(r2_val + r3_val, 0);
+ SetS390OverflowCode(isOF);
+ set_low_register(r1, r2_val + r3_val);
+ return length;
+}
+
+EVALUATE(SRK) {
+ DCHECK_OPCODE(SRK);
+ DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+ // 32-bit Non-clobbering arithmetics / bitwise ops
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ int32_t r3_val = get_low_register<int32_t>(r3);
+ bool isOF = CheckOverflowForIntSub(r2_val, r3_val, int32_t);
+ SetS390ConditionCode<int32_t>(r2_val - r3_val, 0);
+ SetS390OverflowCode(isOF);
+ set_low_register(r1, r2_val - r3_val);
+ return length;
+}
+
+EVALUATE(ALRK) {
+ DCHECK_OPCODE(ALRK);
+ DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+ // 32-bit Non-clobbering unsigned arithmetics
+ uint32_t r2_val = get_low_register<uint32_t>(r2);
+ uint32_t r3_val = get_low_register<uint32_t>(r3);
+ bool isOF = CheckOverflowForUIntAdd(r2_val, r3_val);
+ SetS390ConditionCode<uint32_t>(r2_val + r3_val, 0);
+ SetS390OverflowCode(isOF);
+ set_low_register(r1, r2_val + r3_val);
+ return length;
+}
+
+EVALUATE(SLRK) {
+ DCHECK_OPCODE(SLRK);
+ DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+ // 32-bit Non-clobbering unsigned arithmetics
+ uint32_t r2_val = get_low_register<uint32_t>(r2);
+ uint32_t r3_val = get_low_register<uint32_t>(r3);
+ bool isOF = CheckOverflowForUIntSub(r2_val, r3_val);
+ SetS390ConditionCode<uint32_t>(r2_val - r3_val, 0);
+ SetS390OverflowCode(isOF);
+ set_low_register(r1, r2_val - r3_val);
+ return length;
+}
+
+EVALUATE(LTG) {
+ DCHECK_OPCODE(LTG);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ intptr_t addr = x2_val + b2_val + d2;
+ int64_t value = ReadDW(addr);
+ set_register(r1, value);
+ SetS390ConditionCode<int64_t>(value, 0);
+ return length;
+}
+
+EVALUATE(CVBY) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(AG) {
+ DCHECK_OPCODE(AG);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t alu_out = get_register(r1);
+ int64_t mem_val = ReadDW(b2_val + x2_val + d2);
+ alu_out += mem_val;
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ set_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(SG) {
+ DCHECK_OPCODE(SG);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t alu_out = get_register(r1);
+ int64_t mem_val = ReadDW(b2_val + x2_val + d2);
+ alu_out -= mem_val;
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ set_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(ALG) {
+ DCHECK_OPCODE(ALG);
+#ifndef V8_TARGET_ARCH_S390X
+ DCHECK(false);
+#endif
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ uint64_t r1_val = get_register(r1);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = d2;
+ uint64_t alu_out = r1_val;
+ uint64_t mem_val = static_cast<uint64_t>(ReadDW(b2_val + d2_val + x2_val));
+ alu_out += mem_val;
+ SetS390ConditionCode<uint64_t>(alu_out, 0);
+ set_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(SLG) {
+ DCHECK_OPCODE(SLG);
+#ifndef V8_TARGET_ARCH_S390X
+ DCHECK(false);
+#endif
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ uint64_t r1_val = get_register(r1);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = d2;
+ uint64_t alu_out = r1_val;
+ uint64_t mem_val = static_cast<uint64_t>(ReadDW(b2_val + d2_val + x2_val));
+ alu_out -= mem_val;
+ SetS390ConditionCode<uint64_t>(alu_out, 0);
+ set_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(MSG) {
+ DCHECK_OPCODE(MSG);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = d2;
+ int64_t mem_val = ReadDW(b2_val + d2_val + x2_val);
+ int64_t r1_val = get_register(r1);
+ set_register(r1, mem_val * r1_val);
+ return length;
+}
+
+EVALUATE(DSG) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CVBG) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LT) {
+ DCHECK_OPCODE(LT);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ intptr_t addr = x2_val + b2_val + d2;
+ int32_t value = ReadW(addr, instr);
+ set_low_register(r1, value);
+ SetS390ConditionCode<int32_t>(value, 0);
+ return length;
+}
+
+EVALUATE(LGH) {
+ DCHECK_OPCODE(LGH);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ // Miscellaneous Loads and Stores
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ intptr_t addr = x2_val + b2_val + d2;
+ int64_t mem_val = static_cast<int64_t>(ReadH(addr, instr));
+ set_register(r1, mem_val);
+ return length;
+}
+
+EVALUATE(LLGF) {
+ DCHECK_OPCODE(LLGF);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ // Miscellaneous Loads and Stores
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ intptr_t addr = x2_val + b2_val + d2;
+ uint64_t mem_val = static_cast<uint64_t>(ReadWU(addr, instr));
+ set_register(r1, mem_val);
+ return length;
+}
+
+EVALUATE(LLGT) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(AGF) {
+ DCHECK_OPCODE(AGF);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ uint64_t r1_val = get_register(r1);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = d2;
+ uint64_t alu_out = r1_val;
+ uint32_t mem_val = ReadW(b2_val + d2_val + x2_val, instr);
+ alu_out += mem_val;
+ SetS390ConditionCode<int64_t>(alu_out, 0);
+ set_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(SGF) {
+ DCHECK_OPCODE(SGF);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ uint64_t r1_val = get_register(r1);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = d2;
+ uint64_t alu_out = r1_val;
+ uint32_t mem_val = ReadW(b2_val + d2_val + x2_val, instr);
+ alu_out -= mem_val;
+ SetS390ConditionCode<int64_t>(alu_out, 0);
+ set_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(ALGF) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SLGF) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MSGF) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(DSGF) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LRVG) {
+ DCHECK_OPCODE(LRVG);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ intptr_t mem_addr = b2_val + x2_val + d2;
+ int64_t mem_val = ReadW64(mem_addr, instr);
+ set_register(r1, ByteReverse(mem_val));
+ return length;
+}
+
+EVALUATE(LRV) {
+ DCHECK_OPCODE(LRV);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ intptr_t mem_addr = b2_val + x2_val + d2;
+ int32_t mem_val = ReadW(mem_addr, instr);
+ set_low_register(r1, ByteReverse(mem_val));
+ return length;
+}
+
+EVALUATE(LRVH) {
+ DCHECK_OPCODE(LRVH);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ intptr_t mem_addr = b2_val + x2_val + d2;
+ int16_t mem_val = ReadH(mem_addr, instr);
+ int32_t result = ByteReverse(mem_val) & 0x0000ffff;
+ result |= r1_val & 0xffff0000;
+ set_low_register(r1, result);
+ return length;
+}
+
+EVALUATE(CG) {
+ DCHECK_OPCODE(CG);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t alu_out = get_register(r1);
+ int64_t mem_val = ReadDW(b2_val + x2_val + d2);
+ SetS390ConditionCode<int64_t>(alu_out, mem_val);
+ set_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(CLG) {
+ DCHECK_OPCODE(CLG);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t alu_out = get_register(r1);
+ int64_t mem_val = ReadDW(b2_val + x2_val + d2);
+ SetS390ConditionCode<uint64_t>(alu_out, mem_val);
+ set_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(NTSTG) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CVDY) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CVDG) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CGF) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CLGF) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LTGF) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CGH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(PFD) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(STRV) {
+ DCHECK_OPCODE(STRV);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ intptr_t mem_addr = b2_val + x2_val + d2;
+ WriteW(mem_addr, ByteReverse(r1_val), instr);
+ return length;
+}
+
+EVALUATE(STRVG) {
+ DCHECK_OPCODE(STRVG);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int64_t r1_val = get_register(r1);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ intptr_t mem_addr = b2_val + x2_val + d2;
+ WriteDW(mem_addr, ByteReverse(r1_val));
+ return length;
+}
+
+EVALUATE(STRVH) {
+ DCHECK_OPCODE(STRVH);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ intptr_t mem_addr = b2_val + x2_val + d2;
+ int16_t result = static_cast<int16_t>(r1_val >> 16);
+ WriteH(mem_addr, ByteReverse(result), instr);
+ return length;
+}
+
+EVALUATE(BCTG) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MSY) {
+ DCHECK_OPCODE(MSY);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = d2;
+ int32_t mem_val = ReadW(b2_val + d2_val + x2_val, instr);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ set_low_register(r1, mem_val * r1_val);
+ return length;
+}
+
+EVALUATE(NY) {
+ DCHECK_OPCODE(NY);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int32_t alu_out = get_low_register<int32_t>(r1);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+ alu_out &= mem_val;
+ SetS390BitWiseConditionCode<uint32_t>(alu_out);
+ set_low_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(CLY) {
+ DCHECK_OPCODE(CLY);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ uint32_t alu_out = get_low_register<uint32_t>(r1);
+ uint32_t mem_val = ReadWU(b2_val + x2_val + d2, instr);
+ SetS390ConditionCode<uint32_t>(alu_out, mem_val);
+ return length;
+}
+
+EVALUATE(OY) {
+ DCHECK_OPCODE(OY);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int32_t alu_out = get_low_register<int32_t>(r1);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+ alu_out |= mem_val;
+ SetS390BitWiseConditionCode<uint32_t>(alu_out);
+ set_low_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(XY) {
+ DCHECK_OPCODE(XY);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int32_t alu_out = get_low_register<int32_t>(r1);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+ alu_out ^= mem_val;
+ SetS390BitWiseConditionCode<uint32_t>(alu_out);
+ set_low_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(CY) {
+ DCHECK_OPCODE(CY);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int32_t alu_out = get_low_register<int32_t>(r1);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+ SetS390ConditionCode<int32_t>(alu_out, mem_val);
+ return length;
+}
+
+EVALUATE(AY) {
+ DCHECK_OPCODE(AY);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int32_t alu_out = get_low_register<int32_t>(r1);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+ bool isOF = false;
+ isOF = CheckOverflowForIntAdd(alu_out, mem_val, int32_t);
+ alu_out += mem_val;
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ SetS390OverflowCode(isOF);
+ set_low_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(SY) {
+ DCHECK_OPCODE(SY);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int32_t alu_out = get_low_register<int32_t>(r1);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+ bool isOF = false;
+ isOF = CheckOverflowForIntSub(alu_out, mem_val, int32_t);
+ alu_out -= mem_val;
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ SetS390OverflowCode(isOF);
+ set_low_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(MFY) {
+ DCHECK_OPCODE(MFY);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ DCHECK(r1 % 2 == 0);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+ int32_t r1_val = get_low_register<int32_t>(r1 + 1);
+ int64_t product =
+ static_cast<int64_t>(r1_val) * static_cast<int64_t>(mem_val);
+ int32_t high_bits = product >> 32;
+ r1_val = high_bits;
+ int32_t low_bits = product & 0x00000000FFFFFFFF;
+ set_low_register(r1, high_bits);
+ set_low_register(r1 + 1, low_bits);
+ return length;
+}
+
+EVALUATE(ALY) {
+ DCHECK_OPCODE(ALY);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ uint32_t alu_out = get_low_register<uint32_t>(r1);
+ uint32_t mem_val = ReadWU(b2_val + x2_val + d2, instr);
+ alu_out += mem_val;
+ set_low_register(r1, alu_out);
+ SetS390ConditionCode<uint32_t>(alu_out, 0);
+ return length;
+}
+
+EVALUATE(SLY) {
+ DCHECK_OPCODE(SLY);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ uint32_t alu_out = get_low_register<uint32_t>(r1);
+ uint32_t mem_val = ReadWU(b2_val + x2_val + d2, instr);
+ alu_out -= mem_val;
+ set_low_register(r1, alu_out);
+ SetS390ConditionCode<uint32_t>(alu_out, 0);
+ return length;
+}
+
+EVALUATE(STHY) {
+ DCHECK_OPCODE(STHY);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ // Miscellaneous Loads and Stores
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ intptr_t addr = x2_val + b2_val + d2;
+ uint16_t value = get_low_register<uint32_t>(r1);
+ WriteH(addr, value, instr);
+ return length;
+}
+
+EVALUATE(LAY) {
+ DCHECK_OPCODE(LAY);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ // Load Address
+ int rb = b2;
+ int rx = x2;
+ int offset = d2;
+ int64_t rb_val = (rb == 0) ? 0 : get_register(rb);
+ int64_t rx_val = (rx == 0) ? 0 : get_register(rx);
+ set_register(r1, rx_val + rb_val + offset);
+ return length;
+}
+
+EVALUATE(STCY) {
+ DCHECK_OPCODE(STCY);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ // Miscellaneous Loads and Stores
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ intptr_t addr = x2_val + b2_val + d2;
+ uint8_t value = get_low_register<uint32_t>(r1);
+ WriteB(addr, value);
+ return length;
+}
+
+EVALUATE(ICY) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LAEY) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LB) {
+ DCHECK_OPCODE(LB);
+ // Miscellaneous Loads and Stores
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ intptr_t addr = x2_val + b2_val + d2;
+ int32_t mem_val = ReadB(addr);
+ set_low_register(r1, mem_val);
+ return length;
+}
+
+EVALUATE(LGB) {
+ DCHECK_OPCODE(LGB);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ // Miscellaneous Loads and Stores
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ intptr_t addr = x2_val + b2_val + d2;
+ int64_t mem_val = ReadB(addr);
+ set_register(r1, mem_val);
+ return length;
+}
+
+EVALUATE(LHY) {
+ DCHECK_OPCODE(LHY);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ // Miscellaneous Loads and Stores
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ intptr_t addr = x2_val + b2_val + d2;
+ int32_t result = static_cast<int32_t>(ReadH(addr, instr));
+ set_low_register(r1, result);
+ return length;
+}
+
+EVALUATE(CHY) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(AHY) {
+ DCHECK_OPCODE(AHY);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = d2;
+ int32_t mem_val =
+ static_cast<int32_t>(ReadH(b2_val + d2_val + x2_val, instr));
+ int32_t alu_out = 0;
+ bool isOF = false;
+ alu_out = r1_val + mem_val;
+ isOF = CheckOverflowForIntAdd(r1_val, mem_val, int32_t);
+ set_low_register(r1, alu_out);
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ SetS390OverflowCode(isOF);
+ return length;
+}
+
+EVALUATE(SHY) {
+ DCHECK_OPCODE(SHY);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int32_t r1_val = get_low_register<int32_t>(r1);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = d2;
+ int32_t mem_val =
+ static_cast<int32_t>(ReadH(b2_val + d2_val + x2_val, instr));
+ int32_t alu_out = 0;
+ bool isOF = false;
+ alu_out = r1_val - mem_val;
+ isOF = CheckOverflowForIntSub(r1_val, mem_val, int64_t);
+ set_low_register(r1, alu_out);
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ SetS390OverflowCode(isOF);
+ return length;
+}
+
+EVALUATE(MHY) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(NG) {
+ DCHECK_OPCODE(NG);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t alu_out = get_register(r1);
+ int64_t mem_val = ReadDW(b2_val + x2_val + d2);
+ alu_out &= mem_val;
+ SetS390BitWiseConditionCode<uint32_t>(alu_out);
+ set_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(OG) {
+ DCHECK_OPCODE(OG);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t alu_out = get_register(r1);
+ int64_t mem_val = ReadDW(b2_val + x2_val + d2);
+ alu_out |= mem_val;
+ SetS390BitWiseConditionCode<uint32_t>(alu_out);
+ set_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(XG) {
+ DCHECK_OPCODE(XG);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t alu_out = get_register(r1);
+ int64_t mem_val = ReadDW(b2_val + x2_val + d2);
+ alu_out ^= mem_val;
+ SetS390BitWiseConditionCode<uint32_t>(alu_out);
+ set_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(LGAT) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MLG) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(DLG) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(ALCG) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SLBG) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(STPQ) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LPQ) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LLGH) {
+ DCHECK_OPCODE(LLGH);
+ // Load Logical Halfword
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = d2;
+ uint16_t mem_val = ReadHU(b2_val + d2_val + x2_val, instr);
+ set_register(r1, mem_val);
+ return length;
+}
+
+EVALUATE(LLH) {
+ DCHECK_OPCODE(LLH);
+ // Load Logical Halfword
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = d2;
+ uint16_t mem_val = ReadHU(b2_val + d2_val + x2_val, instr);
+ set_low_register(r1, mem_val);
+ return length;
+}
+
+EVALUATE(ML) {
+ DCHECK_OPCODE(ML);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ DCHECK(r1 % 2 == 0);
+ uint32_t mem_val = ReadWU(b2_val + x2_val + d2, instr);
+ uint32_t r1_val = get_low_register<uint32_t>(r1 + 1);
+ uint64_t product =
+ static_cast<uint64_t>(r1_val) * static_cast<uint64_t>(mem_val);
+ uint32_t high_bits = product >> 32;
+ r1_val = high_bits;
+ uint32_t low_bits = product & 0x00000000FFFFFFFF;
+ set_low_register(r1, high_bits);
+ set_low_register(r1 + 1, low_bits);
+ return length;
+}
+
+EVALUATE(DL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(ALC) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SLB) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LLGTAT) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LLGFAT) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LAT) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LBH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LLCH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(STCH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LHH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LLHH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(STHH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LFHAT) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LFH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(STFH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CHF) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MVCDK) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MVHHI) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MVGHI) {
+ DCHECK_OPCODE(MVGHI);
+ // Move Integer (64)
+ DECODE_SIL_INSTRUCTION(b1, d1, i2);
+ int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+ intptr_t src_addr = b1_val + d1;
+ WriteDW(src_addr, i2);
+ return length;
+}
+
+EVALUATE(MVHI) {
+ DCHECK_OPCODE(MVHI);
+ // Move Integer (32)
+ DECODE_SIL_INSTRUCTION(b1, d1, i2);
+ int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+ intptr_t src_addr = b1_val + d1;
+ WriteW(src_addr, i2, instr);
+ return length;
+}
+
+EVALUATE(CHHSI) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CGHSI) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CHSI) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CLFHSI) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TBEGIN) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TBEGINC) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LMG) {
+ DCHECK_OPCODE(LMG);
+ // Store Multiple 64-bits.
+ DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
+ int rb = b2;
+ int offset = d2;
+
+ // Regs roll around if r3 is less than r1.
+ // Artifically increase r3 by 16 so we can calculate
+ // the number of regs stored properly.
+ if (r3 < r1) r3 += 16;
+
+ int64_t rb_val = (rb == 0) ? 0 : get_register(rb);
+
+ // Store each register in ascending order.
+ for (int i = 0; i <= r3 - r1; i++) {
+ int64_t value = ReadDW(rb_val + offset + 8 * i);
+ set_register((r1 + i) % 16, value);
+ }
+ return length;
+}
+
+EVALUATE(SRAG) {
+ DCHECK_OPCODE(SRAG);
+ // 64-bit non-clobbering shift-left/right arithmetic
+ DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
+ // only takes rightmost 6 bits
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int shiftBits = (b2_val + d2) & 0x3F;
+ int64_t r3_val = get_register(r3);
+ intptr_t alu_out = 0;
+ bool isOF = false;
+ alu_out = r3_val >> shiftBits;
+ set_register(r1, alu_out);
+ SetS390ConditionCode<intptr_t>(alu_out, 0);
+ SetS390OverflowCode(isOF);
+ return length;
+}
+
+EVALUATE(SLAG) {
+ DCHECK_OPCODE(SLAG);
+ // 64-bit non-clobbering shift-left/right arithmetic
+ DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
+ // only takes rightmost 6 bits
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int shiftBits = (b2_val + d2) & 0x3F;
+ int64_t r3_val = get_register(r3);
+ intptr_t alu_out = 0;
+ bool isOF = false;
+ isOF = CheckOverflowForShiftLeft(r3_val, shiftBits);
+ alu_out = r3_val << shiftBits;
+ set_register(r1, alu_out);
+ SetS390ConditionCode<intptr_t>(alu_out, 0);
+ SetS390OverflowCode(isOF);
+ return length;
+}
+
+EVALUATE(SRLG) {
+ DCHECK_OPCODE(SRLG);
+ // For SLLG/SRLG, the 64-bit third operand is shifted the number
+ // of bits specified by the second-operand address, and the result is
+ // placed at the first-operand location. Except for when the R1 and R3
+ // fields designate the same register, the third operand remains
+ // unchanged in general register R3.
+ DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
+ // only takes rightmost 6 bits
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int shiftBits = (b2_val + d2) & 0x3F;
+ // unsigned
+ uint64_t r3_val = get_register(r3);
+ uint64_t alu_out = 0;
+ alu_out = r3_val >> shiftBits;
+ set_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(SLLG) {
+ DCHECK_OPCODE(SLLG);
+ // For SLLG/SRLG, the 64-bit third operand is shifted the number
+ // of bits specified by the second-operand address, and the result is
+ // placed at the first-operand location. Except for when the R1 and R3
+ // fields designate the same register, the third operand remains
+ // unchanged in general register R3.
+ DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
+ // only takes rightmost 6 bits
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int shiftBits = (b2_val + d2) & 0x3F;
+ // unsigned
+ uint64_t r3_val = get_register(r3);
+ uint64_t alu_out = 0;
+ alu_out = r3_val << shiftBits;
+ set_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(CSY) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(RLLG) {
+ DCHECK_OPCODE(RLLG);
+ // For SLLG/SRLG, the 64-bit third operand is shifted the number
+ // of bits specified by the second-operand address, and the result is
+ // placed at the first-operand location. Except for when the R1 and R3
+ // fields designate the same register, the third operand remains
+ // unchanged in general register R3.
+ DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
+ // only takes rightmost 6 bits
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int shiftBits = (b2_val + d2) & 0x3F;
+ // unsigned
+ uint64_t r3_val = get_register(r3);
+ uint64_t alu_out = 0;
+ uint64_t rotateBits = r3_val >> (64 - shiftBits);
+ alu_out = (r3_val << shiftBits) | (rotateBits);
+ set_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(STMG) {
+ DCHECK_OPCODE(STMG);
+ DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
+ int rb = b2;
+ int offset = d2;
+
+ // Regs roll around if r3 is less than r1.
+ // Artifically increase r3 by 16 so we can calculate
+ // the number of regs stored properly.
+ if (r3 < r1) r3 += 16;
+
+ int64_t rb_val = (rb == 0) ? 0 : get_register(rb);
+
+ // Store each register in ascending order.
+ for (int i = 0; i <= r3 - r1; i++) {
+ int64_t value = get_register((r1 + i) % 16);
+ WriteDW(rb_val + offset + 8 * i, value);
+ }
+ return length;
+}
+
+EVALUATE(STMH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(STCMH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(STCMY) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CDSY) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CDSG) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(BXHG) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(BXLEG) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(ECAG) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TMY) {
+ DCHECK_OPCODE(TMY);
+ // Test Under Mask (Mem - Imm) (8)
+ DECODE_SIY_INSTRUCTION(b1, d1, i2);
+ int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+ intptr_t d1_val = d1;
+ intptr_t addr = b1_val + d1_val;
+ uint8_t mem_val = ReadB(addr);
+ uint8_t imm_val = i2;
+ uint8_t selected_bits = mem_val & imm_val;
+ // CC0: Selected bits are zero
+ // CC1: Selected bits mixed zeros and ones
+ // CC3: Selected bits all ones
+ if (0 == selected_bits) {
+ condition_reg_ = CC_EQ; // CC0
+ } else if (selected_bits == imm_val) {
+ condition_reg_ = 0x1; // CC3
+ } else {
+ condition_reg_ = 0x4; // CC1
+ }
+ return length;
+}
+
+EVALUATE(MVIY) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(NIY) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CLIY) {
+ DCHECK_OPCODE(CLIY);
+ DECODE_SIY_INSTRUCTION(b1, d1, i2);
+ // Compare Immediate (Mem - Imm) (8)
+ int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+ intptr_t d1_val = d1;
+ intptr_t addr = b1_val + d1_val;
+ uint8_t mem_val = ReadB(addr);
+ uint8_t imm_val = i2;
+ SetS390ConditionCode<uint8_t>(mem_val, imm_val);
+ return length;
+}
+
+EVALUATE(OIY) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(XIY) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(ASI) {
+ DCHECK_OPCODE(ASI);
+ // TODO(bcleung): Change all fooInstr->I2Value() to template functions.
+ // The below static cast to 8 bit and then to 32 bit is necessary
+ // because siyInstr->I2Value() returns a uint8_t, which a direct
+ // cast to int32_t could incorrectly interpret.
+ DECODE_SIY_INSTRUCTION(b1, d1, i2_unsigned);
+ int8_t i2_8bit = static_cast<int8_t>(i2_unsigned);
+ int32_t i2 = static_cast<int32_t>(i2_8bit);
+ intptr_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+
+ int d1_val = d1;
+ intptr_t addr = b1_val + d1_val;
+
+ int32_t mem_val = ReadW(addr, instr);
+ bool isOF = CheckOverflowForIntAdd(mem_val, i2, int32_t);
+ int32_t alu_out = mem_val + i2;
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ SetS390OverflowCode(isOF);
+ WriteW(addr, alu_out, instr);
+ return length;
+}
+
+EVALUATE(ALSI) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(AGSI) {
+ DCHECK_OPCODE(AGSI);
+ // TODO(bcleung): Change all fooInstr->I2Value() to template functions.
+ // The below static cast to 8 bit and then to 32 bit is necessary
+ // because siyInstr->I2Value() returns a uint8_t, which a direct
+ // cast to int32_t could incorrectly interpret.
+ DECODE_SIY_INSTRUCTION(b1, d1, i2_unsigned);
+ int8_t i2_8bit = static_cast<int8_t>(i2_unsigned);
+ int64_t i2 = static_cast<int64_t>(i2_8bit);
+ intptr_t b1_val = (b1 == 0) ? 0 : get_register(b1);
+
+ int d1_val = d1;
+ intptr_t addr = b1_val + d1_val;
+
+ int64_t mem_val = ReadDW(addr);
+ int isOF = CheckOverflowForIntAdd(mem_val, i2, int64_t);
+ int64_t alu_out = mem_val + i2;
+ SetS390ConditionCode<uint64_t>(alu_out, 0);
+ SetS390OverflowCode(isOF);
+ WriteDW(addr, alu_out);
+ return length;
+}
+
+EVALUATE(ALGSI) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(ICMH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(ICMY) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MVCLU) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CLCLU) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(STMY) {
+ DCHECK_OPCODE(STMY);
+ DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
+ // Load/Store Multiple (32)
+ int offset = d2;
+
+ // Regs roll around if r3 is less than r1.
+ // Artifically increase r3 by 16 so we can calculate
+ // the number of regs stored properly.
+ if (r3 < r1) r3 += 16;
+
+ int32_t b2_val = (b2 == 0) ? 0 : get_low_register<int32_t>(b2);
+
+ // Store each register in ascending order.
+ for (int i = 0; i <= r3 - r1; i++) {
+ int32_t value = get_low_register<int32_t>((r1 + i) % 16);
+ WriteW(b2_val + offset + 4 * i, value, instr);
+ }
+ return length;
+}
+
+EVALUATE(LMH) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LMY) {
+ DCHECK_OPCODE(LMY);
+ DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
+ // Load/Store Multiple (32)
+ int offset = d2;
+
+ // Regs roll around if r3 is less than r1.
+ // Artifically increase r3 by 16 so we can calculate
+ // the number of regs stored properly.
+ if (r3 < r1) r3 += 16;
+
+ int32_t b2_val = (b2 == 0) ? 0 : get_low_register<int32_t>(b2);
+
+ // Store each register in ascending order.
+ for (int i = 0; i <= r3 - r1; i++) {
+ int32_t value = ReadW(b2_val + offset + 4 * i, instr);
+ set_low_register((r1 + i) % 16, value);
+ }
+ return length;
+}
+
+EVALUATE(TP) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SRAK) {
+ DCHECK_OPCODE(SRAK);
+ DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
+ // 32-bit non-clobbering shift-left/right arithmetic
+ // only takes rightmost 6 bits
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int shiftBits = (b2_val + d2) & 0x3F;
+ int32_t r3_val = get_low_register<int32_t>(r3);
+ int32_t alu_out = 0;
+ bool isOF = false;
+ alu_out = r3_val >> shiftBits;
+ set_low_register(r1, alu_out);
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ SetS390OverflowCode(isOF);
+ return length;
+}
+
+EVALUATE(SLAK) {
+ DCHECK_OPCODE(SLAK);
+ DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
+ // 32-bit non-clobbering shift-left/right arithmetic
+ // only takes rightmost 6 bits
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int shiftBits = (b2_val + d2) & 0x3F;
+ int32_t r3_val = get_low_register<int32_t>(r3);
+ int32_t alu_out = 0;
+ bool isOF = false;
+ isOF = CheckOverflowForShiftLeft(r3_val, shiftBits);
+ alu_out = r3_val << shiftBits;
+ set_low_register(r1, alu_out);
+ SetS390ConditionCode<int32_t>(alu_out, 0);
+ SetS390OverflowCode(isOF);
+ return length;
+}
+
+EVALUATE(SRLK) {
+ DCHECK_OPCODE(SRLK);
+ // For SLLK/SRLL, the 32-bit third operand is shifted the number
+ // of bits specified by the second-operand address, and the result is
+ // placed at the first-operand location. Except for when the R1 and R3
+ // fields designate the same register, the third operand remains
+ // unchanged in general register R3.
+ DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
+ // only takes rightmost 6 bits
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int shiftBits = (b2_val + d2) & 0x3F;
+ // unsigned
+ uint32_t r3_val = get_low_register<uint32_t>(r3);
+ uint32_t alu_out = 0;
+ alu_out = r3_val >> shiftBits;
+ set_low_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(SLLK) {
+ DCHECK_OPCODE(SLLK);
+ // For SLLK/SRLL, the 32-bit third operand is shifted the number
+ // of bits specified by the second-operand address, and the result is
+ // placed at the first-operand location. Except for when the R1 and R3
+ // fields designate the same register, the third operand remains
+ // unchanged in general register R3.
+ DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2);
+ // only takes rightmost 6 bits
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int shiftBits = (b2_val + d2) & 0x3F;
+ // unsigned
+ uint32_t r3_val = get_low_register<uint32_t>(r3);
+ uint32_t alu_out = 0;
+ alu_out = r3_val << shiftBits;
+ set_low_register(r1, alu_out);
+ return length;
+}
+
+EVALUATE(LOCG) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(STOCG) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LANG) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LAOG) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LAXG) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LAAG) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LAALG) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LOC) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(STOC) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LAN) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LAO) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LAX) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LAA) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LAAL) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(BRXHG) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(BRXLG) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(RISBLG) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(RNSBG) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(ROSBG) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(RXSBG) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(RISBGN) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(RISBHG) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CGRJ) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CGIT) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CIT) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CLFIT) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CGIJ) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CIJ) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(ALHSIK) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(ALGHSIK) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CGRB) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CGIB) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CIB) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LDEB) {
+ DCHECK_OPCODE(LDEB);
+ // Load Float
+ DECODE_RXE_INSTRUCTION(r1, b2, x2, d2);
+ int rb = b2;
+ int rx = x2;
+ int offset = d2;
+ int64_t rb_val = (rb == 0) ? 0 : get_register(rb);
+ int64_t rx_val = (rx == 0) ? 0 : get_register(rx);
+ double ret =
+ static_cast<double>(*reinterpret_cast<float*>(rx_val + rb_val + offset));
+ set_d_register_from_double(r1, ret);
+ return length;
+}
+
+EVALUATE(LXDB) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LXEB) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MXDB) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(KEB) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CEB) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(AEB) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SEB) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MDEB) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(DEB) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MAEB) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MSEB) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TCEB) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TCDB) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TCXB) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SQEB) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SQDB) {
+ DCHECK_OPCODE(SQDB);
+ DECODE_RXE_INSTRUCTION(r1, b2, x2, d2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = d2;
+ double r1_val = get_double_from_d_register(r1);
+ double dbl_val = ReadDouble(b2_val + x2_val + d2_val);
+ r1_val = std::sqrt(dbl_val);
+ set_d_register_from_double(r1, r1_val);
+ return length;
+}
+
+EVALUATE(MEEB) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(KDB) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CDB) {
+ DCHECK_OPCODE(CDB);
+
+ DECODE_RXE_INSTRUCTION(r1, b2, x2, d2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = d2;
+ double r1_val = get_double_from_d_register(r1);
+ double dbl_val = ReadDouble(b2_val + x2_val + d2_val);
+ SetS390ConditionCode<double>(r1_val, dbl_val);
+ return length;
+}
+
+EVALUATE(ADB) {
+ DCHECK_OPCODE(ADB);
+
+ DECODE_RXE_INSTRUCTION(r1, b2, x2, d2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = d2;
+ double r1_val = get_double_from_d_register(r1);
+ double dbl_val = ReadDouble(b2_val + x2_val + d2_val);
+ r1_val += dbl_val;
+ set_d_register_from_double(r1, r1_val);
+ SetS390ConditionCode<double>(r1_val, 0);
+ return length;
+}
+
+EVALUATE(SDB) {
+ DCHECK_OPCODE(SDB);
+ DECODE_RXE_INSTRUCTION(r1, b2, x2, d2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = d2;
+ double r1_val = get_double_from_d_register(r1);
+ double dbl_val = ReadDouble(b2_val + x2_val + d2_val);
+ r1_val -= dbl_val;
+ set_d_register_from_double(r1, r1_val);
+ SetS390ConditionCode<double>(r1_val, 0);
+ return length;
+}
+
+EVALUATE(MDB) {
+ DCHECK_OPCODE(MDB);
+ DECODE_RXE_INSTRUCTION(r1, b2, x2, d2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = d2;
+ double r1_val = get_double_from_d_register(r1);
+ double dbl_val = ReadDouble(b2_val + x2_val + d2_val);
+ r1_val *= dbl_val;
+ set_d_register_from_double(r1, r1_val);
+ SetS390ConditionCode<double>(r1_val, 0);
+ return length;
+}
+
+EVALUATE(DDB) {
+ DCHECK_OPCODE(DDB);
+ DECODE_RXE_INSTRUCTION(r1, b2, x2, d2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ intptr_t d2_val = d2;
+ double r1_val = get_double_from_d_register(r1);
+ double dbl_val = ReadDouble(b2_val + x2_val + d2_val);
+ r1_val /= dbl_val;
+ set_d_register_from_double(r1, r1_val);
+ SetS390ConditionCode<double>(r1_val, 0);
+ return length;
+}
+
+EVALUATE(MADB) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(MSDB) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SLDT) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SRDT) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SLXT) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(SRXT) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TDCET) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TDGET) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TDCDT) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TDGDT) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TDCXT) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(TDGXT) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(LEY) {
+ DCHECK_OPCODE(LEY);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ // Miscellaneous Loads and Stores
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ intptr_t addr = x2_val + b2_val + d2;
+ float float_val = *reinterpret_cast<float*>(addr);
+ set_d_register_from_float32(r1, float_val);
+ return length;
+}
+
+EVALUATE(LDY) {
+ DCHECK_OPCODE(LDY);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ // Miscellaneous Loads and Stores
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ intptr_t addr = x2_val + b2_val + d2;
+ uint64_t dbl_val = *reinterpret_cast<uint64_t*>(addr);
+ set_d_register(r1, dbl_val);
+ return length;
+}
+
+EVALUATE(STEY) {
+ DCHECK_OPCODE(STEY);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ // Miscellaneous Loads and Stores
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ intptr_t addr = x2_val + b2_val + d2;
+ int64_t frs_val = get_d_register(r1) >> 32;
+ WriteW(addr, static_cast<int32_t>(frs_val), instr);
+ return length;
+}
+
+EVALUATE(STDY) {
+ DCHECK_OPCODE(STDY);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ // Miscellaneous Loads and Stores
+ int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ intptr_t addr = x2_val + b2_val + d2;
+ int64_t frs_val = get_d_register(r1);
+ WriteDW(addr, frs_val);
+ return length;
+}
+
+EVALUATE(CZDT) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CZXT) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CDZT) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+EVALUATE(CXZT) {
+ UNIMPLEMENTED();
+ USE(instr);
+ return 0;
+}
+
+#undef EVALUATE
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/s390/simulator-s390.h b/deps/v8/src/s390/simulator-s390.h
index ae3dd58209..7af00ee25f 100644
--- a/deps/v8/src/s390/simulator-s390.h
+++ b/deps/v8/src/s390/simulator-s390.h
@@ -64,7 +64,7 @@ class SimulatorStack : public v8::internal::AllStatic {
// Running with a simulator.
#include "src/assembler.h"
-#include "src/hashmap.h"
+#include "src/base/hashmap.h"
#include "src/s390/constants-s390.h"
namespace v8 {
@@ -211,7 +211,7 @@ class Simulator {
// Call on program start.
static void Initialize(Isolate* isolate);
- static void TearDown(HashMap* i_cache, Redirection* first);
+ static void TearDown(base::HashMap* i_cache, Redirection* first);
// V8 generally calls into generated JS code with 5 parameters and into
// generated RegExp code with 7 parameters. This is a convenience function,
@@ -233,8 +233,7 @@ class Simulator {
char* last_debugger_input() { return last_debugger_input_; }
// ICache checking.
- static void FlushICache(v8::internal::HashMap* i_cache, void* start,
- size_t size);
+ static void FlushICache(base::HashMap* i_cache, void* start, size_t size);
// Returns true if pc register contains one of the 'special_values' defined
// below (bad_lr, end_sim_pc).
@@ -282,6 +281,7 @@ class Simulator {
// Byte Reverse
inline int16_t ByteReverse(int16_t hword);
inline int32_t ByteReverse(int32_t word);
+ inline int64_t ByteReverse(int64_t dword);
// Read and write memory.
inline uint8_t ReadBU(intptr_t addr);
@@ -297,6 +297,7 @@ class Simulator {
inline uint32_t ReadWU(intptr_t addr, Instruction* instr);
inline int32_t ReadW(intptr_t addr, Instruction* instr);
+ inline int64_t ReadW64(intptr_t addr, Instruction* instr);
inline void WriteW(intptr_t addr, uint32_t value, Instruction* instr);
inline void WriteW(intptr_t addr, int32_t value, Instruction* instr);
@@ -444,10 +445,9 @@ class Simulator {
void ExecuteInstruction(Instruction* instr, bool auto_incr_pc = true);
// ICache.
- static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
- static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
- int size);
- static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
+ static void CheckICache(base::HashMap* i_cache, Instruction* instr);
+ static void FlushOnePage(base::HashMap* i_cache, intptr_t start, int size);
+ static CachePage* GetCachePage(base::HashMap* i_cache, void* page);
// Runtime call support.
static void* RedirectExternalReference(
@@ -482,7 +482,7 @@ class Simulator {
char* last_debugger_input_;
// Icache simulation
- v8::internal::HashMap* i_cache_;
+ base::HashMap* i_cache_;
// Registered breakpoints.
Instruction* break_pc_;
@@ -507,6 +507,742 @@ class Simulator {
};
StopCountAndDesc watched_stops_[kNumOfWatchedStops];
void DebugStart();
+
+ int DecodeInstructionOriginal(Instruction* instr);
+ int DecodeInstruction(Instruction* instr);
+ int Evaluate_Unknown(Instruction* instr);
+#define MAX_NUM_OPCODES (1 << 16)
+ typedef int (Simulator::*EvaluateFuncType)(Instruction*);
+
+ static EvaluateFuncType EvalTable[MAX_NUM_OPCODES];
+ static void EvalTableInit();
+
+#define EVALUATE(name) int Evaluate_##name(Instruction* instr)
+ EVALUATE(BKPT);
+ EVALUATE(SPM);
+ EVALUATE(BALR);
+ EVALUATE(BCTR);
+ EVALUATE(BCR);
+ EVALUATE(SVC);
+ EVALUATE(BSM);
+ EVALUATE(BASSM);
+ EVALUATE(BASR);
+ EVALUATE(MVCL);
+ EVALUATE(CLCL);
+ EVALUATE(LPR);
+ EVALUATE(LNR);
+ EVALUATE(LTR);
+ EVALUATE(LCR);
+ EVALUATE(NR);
+ EVALUATE(CLR);
+ EVALUATE(OR);
+ EVALUATE(XR);
+ EVALUATE(LR);
+ EVALUATE(CR);
+ EVALUATE(AR);
+ EVALUATE(SR);
+ EVALUATE(MR);
+ EVALUATE(DR);
+ EVALUATE(ALR);
+ EVALUATE(SLR);
+ EVALUATE(LDR);
+ EVALUATE(CDR);
+ EVALUATE(LER);
+ EVALUATE(STH);
+ EVALUATE(LA);
+ EVALUATE(STC);
+ EVALUATE(IC_z);
+ EVALUATE(EX);
+ EVALUATE(BAL);
+ EVALUATE(BCT);
+ EVALUATE(BC);
+ EVALUATE(LH);
+ EVALUATE(CH);
+ EVALUATE(AH);
+ EVALUATE(SH);
+ EVALUATE(MH);
+ EVALUATE(BAS);
+ EVALUATE(CVD);
+ EVALUATE(CVB);
+ EVALUATE(ST);
+ EVALUATE(LAE);
+ EVALUATE(N);
+ EVALUATE(CL);
+ EVALUATE(O);
+ EVALUATE(X);
+ EVALUATE(L);
+ EVALUATE(C);
+ EVALUATE(A);
+ EVALUATE(S);
+ EVALUATE(M);
+ EVALUATE(D);
+ EVALUATE(AL);
+ EVALUATE(SL);
+ EVALUATE(STD);
+ EVALUATE(LD);
+ EVALUATE(CD);
+ EVALUATE(STE);
+ EVALUATE(MS);
+ EVALUATE(LE);
+ EVALUATE(BRXH);
+ EVALUATE(BRXLE);
+ EVALUATE(BXH);
+ EVALUATE(BXLE);
+ EVALUATE(SRL);
+ EVALUATE(SLL);
+ EVALUATE(SRA);
+ EVALUATE(SLA);
+ EVALUATE(SRDL);
+ EVALUATE(SLDL);
+ EVALUATE(SRDA);
+ EVALUATE(SLDA);
+ EVALUATE(STM);
+ EVALUATE(TM);
+ EVALUATE(MVI);
+ EVALUATE(TS);
+ EVALUATE(NI);
+ EVALUATE(CLI);
+ EVALUATE(OI);
+ EVALUATE(XI);
+ EVALUATE(LM);
+ EVALUATE(MVCLE);
+ EVALUATE(CLCLE);
+ EVALUATE(MC);
+ EVALUATE(CDS);
+ EVALUATE(STCM);
+ EVALUATE(ICM);
+ EVALUATE(BPRP);
+ EVALUATE(BPP);
+ EVALUATE(TRTR);
+ EVALUATE(MVN);
+ EVALUATE(MVC);
+ EVALUATE(MVZ);
+ EVALUATE(NC);
+ EVALUATE(CLC);
+ EVALUATE(OC);
+ EVALUATE(XC);
+ EVALUATE(MVCP);
+ EVALUATE(TR);
+ EVALUATE(TRT);
+ EVALUATE(ED);
+ EVALUATE(EDMK);
+ EVALUATE(PKU);
+ EVALUATE(UNPKU);
+ EVALUATE(MVCIN);
+ EVALUATE(PKA);
+ EVALUATE(UNPKA);
+ EVALUATE(PLO);
+ EVALUATE(LMD);
+ EVALUATE(SRP);
+ EVALUATE(MVO);
+ EVALUATE(PACK);
+ EVALUATE(UNPK);
+ EVALUATE(ZAP);
+ EVALUATE(AP);
+ EVALUATE(SP);
+ EVALUATE(MP);
+ EVALUATE(DP);
+ EVALUATE(UPT);
+ EVALUATE(PFPO);
+ EVALUATE(IIHH);
+ EVALUATE(IIHL);
+ EVALUATE(IILH);
+ EVALUATE(IILL);
+ EVALUATE(NIHH);
+ EVALUATE(NIHL);
+ EVALUATE(NILH);
+ EVALUATE(NILL);
+ EVALUATE(OIHH);
+ EVALUATE(OIHL);
+ EVALUATE(OILH);
+ EVALUATE(OILL);
+ EVALUATE(LLIHH);
+ EVALUATE(LLIHL);
+ EVALUATE(LLILH);
+ EVALUATE(LLILL);
+ EVALUATE(TMLH);
+ EVALUATE(TMLL);
+ EVALUATE(TMHH);
+ EVALUATE(TMHL);
+ EVALUATE(BRC);
+ EVALUATE(BRAS);
+ EVALUATE(BRCT);
+ EVALUATE(BRCTG);
+ EVALUATE(LHI);
+ EVALUATE(LGHI);
+ EVALUATE(AHI);
+ EVALUATE(AGHI);
+ EVALUATE(MHI);
+ EVALUATE(MGHI);
+ EVALUATE(CHI);
+ EVALUATE(CGHI);
+ EVALUATE(LARL);
+ EVALUATE(LGFI);
+ EVALUATE(BRCL);
+ EVALUATE(BRASL);
+ EVALUATE(XIHF);
+ EVALUATE(XILF);
+ EVALUATE(IIHF);
+ EVALUATE(IILF);
+ EVALUATE(NIHF);
+ EVALUATE(NILF);
+ EVALUATE(OIHF);
+ EVALUATE(OILF);
+ EVALUATE(LLIHF);
+ EVALUATE(LLILF);
+ EVALUATE(MSGFI);
+ EVALUATE(MSFI);
+ EVALUATE(SLGFI);
+ EVALUATE(SLFI);
+ EVALUATE(AGFI);
+ EVALUATE(AFI);
+ EVALUATE(ALGFI);
+ EVALUATE(ALFI);
+ EVALUATE(CGFI);
+ EVALUATE(CFI);
+ EVALUATE(CLGFI);
+ EVALUATE(CLFI);
+ EVALUATE(LLHRL);
+ EVALUATE(LGHRL);
+ EVALUATE(LHRL);
+ EVALUATE(LLGHRL);
+ EVALUATE(STHRL);
+ EVALUATE(LGRL);
+ EVALUATE(STGRL);
+ EVALUATE(LGFRL);
+ EVALUATE(LRL);
+ EVALUATE(LLGFRL);
+ EVALUATE(STRL);
+ EVALUATE(EXRL);
+ EVALUATE(PFDRL);
+ EVALUATE(CGHRL);
+ EVALUATE(CHRL);
+ EVALUATE(CGRL);
+ EVALUATE(CGFRL);
+ EVALUATE(ECTG);
+ EVALUATE(CSST);
+ EVALUATE(LPD);
+ EVALUATE(LPDG);
+ EVALUATE(BRCTH);
+ EVALUATE(AIH);
+ EVALUATE(ALSIH);
+ EVALUATE(ALSIHN);
+ EVALUATE(CIH);
+ EVALUATE(STCK);
+ EVALUATE(CFC);
+ EVALUATE(IPM);
+ EVALUATE(HSCH);
+ EVALUATE(MSCH);
+ EVALUATE(SSCH);
+ EVALUATE(STSCH);
+ EVALUATE(TSCH);
+ EVALUATE(TPI);
+ EVALUATE(SAL);
+ EVALUATE(RSCH);
+ EVALUATE(STCRW);
+ EVALUATE(STCPS);
+ EVALUATE(RCHP);
+ EVALUATE(SCHM);
+ EVALUATE(CKSM);
+ EVALUATE(SAR);
+ EVALUATE(EAR);
+ EVALUATE(MSR);
+ EVALUATE(MVST);
+ EVALUATE(CUSE);
+ EVALUATE(SRST);
+ EVALUATE(XSCH);
+ EVALUATE(STCKE);
+ EVALUATE(STCKF);
+ EVALUATE(SRNM);
+ EVALUATE(STFPC);
+ EVALUATE(LFPC);
+ EVALUATE(TRE);
+ EVALUATE(CUUTF);
+ EVALUATE(CUTFU);
+ EVALUATE(STFLE);
+ EVALUATE(SRNMB);
+ EVALUATE(SRNMT);
+ EVALUATE(LFAS);
+ EVALUATE(PPA);
+ EVALUATE(ETND);
+ EVALUATE(TEND);
+ EVALUATE(NIAI);
+ EVALUATE(TABORT);
+ EVALUATE(TRAP4);
+ EVALUATE(LPEBR);
+ EVALUATE(LNEBR);
+ EVALUATE(LTEBR);
+ EVALUATE(LCEBR);
+ EVALUATE(LDEBR);
+ EVALUATE(LXDBR);
+ EVALUATE(LXEBR);
+ EVALUATE(MXDBR);
+ EVALUATE(KEBR);
+ EVALUATE(CEBR);
+ EVALUATE(AEBR);
+ EVALUATE(SEBR);
+ EVALUATE(MDEBR);
+ EVALUATE(DEBR);
+ EVALUATE(MAEBR);
+ EVALUATE(MSEBR);
+ EVALUATE(LPDBR);
+ EVALUATE(LNDBR);
+ EVALUATE(LTDBR);
+ EVALUATE(LCDBR);
+ EVALUATE(SQEBR);
+ EVALUATE(SQDBR);
+ EVALUATE(SQXBR);
+ EVALUATE(MEEBR);
+ EVALUATE(KDBR);
+ EVALUATE(CDBR);
+ EVALUATE(ADBR);
+ EVALUATE(SDBR);
+ EVALUATE(MDBR);
+ EVALUATE(DDBR);
+ EVALUATE(MADBR);
+ EVALUATE(MSDBR);
+ EVALUATE(LPXBR);
+ EVALUATE(LNXBR);
+ EVALUATE(LTXBR);
+ EVALUATE(LCXBR);
+ EVALUATE(LEDBRA);
+ EVALUATE(LDXBRA);
+ EVALUATE(LEXBRA);
+ EVALUATE(FIXBRA);
+ EVALUATE(KXBR);
+ EVALUATE(CXBR);
+ EVALUATE(AXBR);
+ EVALUATE(SXBR);
+ EVALUATE(MXBR);
+ EVALUATE(DXBR);
+ EVALUATE(TBEDR);
+ EVALUATE(TBDR);
+ EVALUATE(DIEBR);
+ EVALUATE(FIEBRA);
+ EVALUATE(THDER);
+ EVALUATE(THDR);
+ EVALUATE(DIDBR);
+ EVALUATE(FIDBRA);
+ EVALUATE(LXR);
+ EVALUATE(LPDFR);
+ EVALUATE(LNDFR);
+ EVALUATE(LCDFR);
+ EVALUATE(LZER);
+ EVALUATE(LZDR);
+ EVALUATE(LZXR);
+ EVALUATE(SFPC);
+ EVALUATE(SFASR);
+ EVALUATE(EFPC);
+ EVALUATE(CELFBR);
+ EVALUATE(CDLFBR);
+ EVALUATE(CXLFBR);
+ EVALUATE(CEFBRA);
+ EVALUATE(CDFBRA);
+ EVALUATE(CXFBRA);
+ EVALUATE(CFEBRA);
+ EVALUATE(CFDBRA);
+ EVALUATE(CFXBRA);
+ EVALUATE(CLFEBR);
+ EVALUATE(CLFDBR);
+ EVALUATE(CLFXBR);
+ EVALUATE(CELGBR);
+ EVALUATE(CDLGBR);
+ EVALUATE(CXLGBR);
+ EVALUATE(CEGBRA);
+ EVALUATE(CDGBRA);
+ EVALUATE(CXGBRA);
+ EVALUATE(CGEBRA);
+ EVALUATE(CGDBRA);
+ EVALUATE(CGXBRA);
+ EVALUATE(CLGEBR);
+ EVALUATE(CLGDBR);
+ EVALUATE(CFER);
+ EVALUATE(CFDR);
+ EVALUATE(CFXR);
+ EVALUATE(LDGR);
+ EVALUATE(CGER);
+ EVALUATE(CGDR);
+ EVALUATE(CGXR);
+ EVALUATE(LGDR);
+ EVALUATE(MDTR);
+ EVALUATE(MDTRA);
+ EVALUATE(DDTRA);
+ EVALUATE(ADTRA);
+ EVALUATE(SDTRA);
+ EVALUATE(LDETR);
+ EVALUATE(LEDTR);
+ EVALUATE(LTDTR);
+ EVALUATE(FIDTR);
+ EVALUATE(MXTRA);
+ EVALUATE(DXTRA);
+ EVALUATE(AXTRA);
+ EVALUATE(SXTRA);
+ EVALUATE(LXDTR);
+ EVALUATE(LDXTR);
+ EVALUATE(LTXTR);
+ EVALUATE(FIXTR);
+ EVALUATE(KDTR);
+ EVALUATE(CGDTRA);
+ EVALUATE(CUDTR);
+ EVALUATE(CDTR);
+ EVALUATE(EEDTR);
+ EVALUATE(ESDTR);
+ EVALUATE(KXTR);
+ EVALUATE(CGXTRA);
+ EVALUATE(CUXTR);
+ EVALUATE(CSXTR);
+ EVALUATE(CXTR);
+ EVALUATE(EEXTR);
+ EVALUATE(ESXTR);
+ EVALUATE(CDGTRA);
+ EVALUATE(CDUTR);
+ EVALUATE(CDSTR);
+ EVALUATE(CEDTR);
+ EVALUATE(QADTR);
+ EVALUATE(IEDTR);
+ EVALUATE(RRDTR);
+ EVALUATE(CXGTRA);
+ EVALUATE(CXUTR);
+ EVALUATE(CXSTR);
+ EVALUATE(CEXTR);
+ EVALUATE(QAXTR);
+ EVALUATE(IEXTR);
+ EVALUATE(RRXTR);
+ EVALUATE(LPGR);
+ EVALUATE(LNGR);
+ EVALUATE(LTGR);
+ EVALUATE(LCGR);
+ EVALUATE(LGR);
+ EVALUATE(LGBR);
+ EVALUATE(LGHR);
+ EVALUATE(AGR);
+ EVALUATE(SGR);
+ EVALUATE(ALGR);
+ EVALUATE(SLGR);
+ EVALUATE(MSGR);
+ EVALUATE(DSGR);
+ EVALUATE(LRVGR);
+ EVALUATE(LPGFR);
+ EVALUATE(LNGFR);
+ EVALUATE(LTGFR);
+ EVALUATE(LCGFR);
+ EVALUATE(LGFR);
+ EVALUATE(LLGFR);
+ EVALUATE(LLGTR);
+ EVALUATE(AGFR);
+ EVALUATE(SGFR);
+ EVALUATE(ALGFR);
+ EVALUATE(SLGFR);
+ EVALUATE(MSGFR);
+ EVALUATE(DSGFR);
+ EVALUATE(KMAC);
+ EVALUATE(LRVR);
+ EVALUATE(CGR);
+ EVALUATE(CLGR);
+ EVALUATE(LBR);
+ EVALUATE(LHR);
+ EVALUATE(KMF);
+ EVALUATE(KMO);
+ EVALUATE(PCC);
+ EVALUATE(KMCTR);
+ EVALUATE(KM);
+ EVALUATE(KMC);
+ EVALUATE(CGFR);
+ EVALUATE(KIMD);
+ EVALUATE(KLMD);
+ EVALUATE(CFDTR);
+ EVALUATE(CLGDTR);
+ EVALUATE(CLFDTR);
+ EVALUATE(BCTGR);
+ EVALUATE(CFXTR);
+ EVALUATE(CLFXTR);
+ EVALUATE(CDFTR);
+ EVALUATE(CDLGTR);
+ EVALUATE(CDLFTR);
+ EVALUATE(CXFTR);
+ EVALUATE(CXLGTR);
+ EVALUATE(CXLFTR);
+ EVALUATE(CGRT);
+ EVALUATE(NGR);
+ EVALUATE(OGR);
+ EVALUATE(XGR);
+ EVALUATE(FLOGR);
+ EVALUATE(LLGCR);
+ EVALUATE(LLGHR);
+ EVALUATE(MLGR);
+ EVALUATE(DLGR);
+ EVALUATE(ALCGR);
+ EVALUATE(SLBGR);
+ EVALUATE(EPSW);
+ EVALUATE(TRTT);
+ EVALUATE(TRTO);
+ EVALUATE(TROT);
+ EVALUATE(TROO);
+ EVALUATE(LLCR);
+ EVALUATE(LLHR);
+ EVALUATE(MLR);
+ EVALUATE(DLR);
+ EVALUATE(ALCR);
+ EVALUATE(SLBR);
+ EVALUATE(CU14);
+ EVALUATE(CU24);
+ EVALUATE(CU41);
+ EVALUATE(CU42);
+ EVALUATE(TRTRE);
+ EVALUATE(SRSTU);
+ EVALUATE(TRTE);
+ EVALUATE(AHHHR);
+ EVALUATE(SHHHR);
+ EVALUATE(ALHHHR);
+ EVALUATE(SLHHHR);
+ EVALUATE(CHHR);
+ EVALUATE(AHHLR);
+ EVALUATE(SHHLR);
+ EVALUATE(ALHHLR);
+ EVALUATE(SLHHLR);
+ EVALUATE(CHLR);
+ EVALUATE(POPCNT_Z);
+ EVALUATE(LOCGR);
+ EVALUATE(NGRK);
+ EVALUATE(OGRK);
+ EVALUATE(XGRK);
+ EVALUATE(AGRK);
+ EVALUATE(SGRK);
+ EVALUATE(ALGRK);
+ EVALUATE(SLGRK);
+ EVALUATE(LOCR);
+ EVALUATE(NRK);
+ EVALUATE(ORK);
+ EVALUATE(XRK);
+ EVALUATE(ARK);
+ EVALUATE(SRK);
+ EVALUATE(ALRK);
+ EVALUATE(SLRK);
+ EVALUATE(LTG);
+ EVALUATE(LG);
+ EVALUATE(CVBY);
+ EVALUATE(AG);
+ EVALUATE(SG);
+ EVALUATE(ALG);
+ EVALUATE(SLG);
+ EVALUATE(MSG);
+ EVALUATE(DSG);
+ EVALUATE(CVBG);
+ EVALUATE(LRVG);
+ EVALUATE(LT);
+ EVALUATE(LGF);
+ EVALUATE(LGH);
+ EVALUATE(LLGF);
+ EVALUATE(LLGT);
+ EVALUATE(AGF);
+ EVALUATE(SGF);
+ EVALUATE(ALGF);
+ EVALUATE(SLGF);
+ EVALUATE(MSGF);
+ EVALUATE(DSGF);
+ EVALUATE(LRV);
+ EVALUATE(LRVH);
+ EVALUATE(CG);
+ EVALUATE(CLG);
+ EVALUATE(STG);
+ EVALUATE(NTSTG);
+ EVALUATE(CVDY);
+ EVALUATE(CVDG);
+ EVALUATE(STRVG);
+ EVALUATE(CGF);
+ EVALUATE(CLGF);
+ EVALUATE(LTGF);
+ EVALUATE(CGH);
+ EVALUATE(PFD);
+ EVALUATE(STRV);
+ EVALUATE(STRVH);
+ EVALUATE(BCTG);
+ EVALUATE(STY);
+ EVALUATE(MSY);
+ EVALUATE(NY);
+ EVALUATE(CLY);
+ EVALUATE(OY);
+ EVALUATE(XY);
+ EVALUATE(LY);
+ EVALUATE(CY);
+ EVALUATE(AY);
+ EVALUATE(SY);
+ EVALUATE(MFY);
+ EVALUATE(ALY);
+ EVALUATE(SLY);
+ EVALUATE(STHY);
+ EVALUATE(LAY);
+ EVALUATE(STCY);
+ EVALUATE(ICY);
+ EVALUATE(LAEY);
+ EVALUATE(LB);
+ EVALUATE(LGB);
+ EVALUATE(LHY);
+ EVALUATE(CHY);
+ EVALUATE(AHY);
+ EVALUATE(SHY);
+ EVALUATE(MHY);
+ EVALUATE(NG);
+ EVALUATE(OG);
+ EVALUATE(XG);
+ EVALUATE(LGAT);
+ EVALUATE(MLG);
+ EVALUATE(DLG);
+ EVALUATE(ALCG);
+ EVALUATE(SLBG);
+ EVALUATE(STPQ);
+ EVALUATE(LPQ);
+ EVALUATE(LLGC);
+ EVALUATE(LLGH);
+ EVALUATE(LLC);
+ EVALUATE(LLH);
+ EVALUATE(ML);
+ EVALUATE(DL);
+ EVALUATE(ALC);
+ EVALUATE(SLB);
+ EVALUATE(LLGTAT);
+ EVALUATE(LLGFAT);
+ EVALUATE(LAT);
+ EVALUATE(LBH);
+ EVALUATE(LLCH);
+ EVALUATE(STCH);
+ EVALUATE(LHH);
+ EVALUATE(LLHH);
+ EVALUATE(STHH);
+ EVALUATE(LFHAT);
+ EVALUATE(LFH);
+ EVALUATE(STFH);
+ EVALUATE(CHF);
+ EVALUATE(MVCDK);
+ EVALUATE(MVHHI);
+ EVALUATE(MVGHI);
+ EVALUATE(MVHI);
+ EVALUATE(CHHSI);
+ EVALUATE(CGHSI);
+ EVALUATE(CHSI);
+ EVALUATE(CLFHSI);
+ EVALUATE(TBEGIN);
+ EVALUATE(TBEGINC);
+ EVALUATE(LMG);
+ EVALUATE(SRAG);
+ EVALUATE(SLAG);
+ EVALUATE(SRLG);
+ EVALUATE(SLLG);
+ EVALUATE(CSY);
+ EVALUATE(RLLG);
+ EVALUATE(RLL);
+ EVALUATE(STMG);
+ EVALUATE(STMH);
+ EVALUATE(STCMH);
+ EVALUATE(STCMY);
+ EVALUATE(CDSY);
+ EVALUATE(CDSG);
+ EVALUATE(BXHG);
+ EVALUATE(BXLEG);
+ EVALUATE(ECAG);
+ EVALUATE(TMY);
+ EVALUATE(MVIY);
+ EVALUATE(NIY);
+ EVALUATE(CLIY);
+ EVALUATE(OIY);
+ EVALUATE(XIY);
+ EVALUATE(ASI);
+ EVALUATE(ALSI);
+ EVALUATE(AGSI);
+ EVALUATE(ALGSI);
+ EVALUATE(ICMH);
+ EVALUATE(ICMY);
+ EVALUATE(MVCLU);
+ EVALUATE(CLCLU);
+ EVALUATE(STMY);
+ EVALUATE(LMH);
+ EVALUATE(LMY);
+ EVALUATE(TP);
+ EVALUATE(SRAK);
+ EVALUATE(SLAK);
+ EVALUATE(SRLK);
+ EVALUATE(SLLK);
+ EVALUATE(LOCG);
+ EVALUATE(STOCG);
+ EVALUATE(LANG);
+ EVALUATE(LAOG);
+ EVALUATE(LAXG);
+ EVALUATE(LAAG);
+ EVALUATE(LAALG);
+ EVALUATE(LOC);
+ EVALUATE(STOC);
+ EVALUATE(LAN);
+ EVALUATE(LAO);
+ EVALUATE(LAX);
+ EVALUATE(LAA);
+ EVALUATE(LAAL);
+ EVALUATE(BRXHG);
+ EVALUATE(BRXLG);
+ EVALUATE(RISBLG);
+ EVALUATE(RNSBG);
+ EVALUATE(RISBG);
+ EVALUATE(ROSBG);
+ EVALUATE(RXSBG);
+ EVALUATE(RISBGN);
+ EVALUATE(RISBHG);
+ EVALUATE(CGRJ);
+ EVALUATE(CGIT);
+ EVALUATE(CIT);
+ EVALUATE(CLFIT);
+ EVALUATE(CGIJ);
+ EVALUATE(CIJ);
+ EVALUATE(AHIK);
+ EVALUATE(AGHIK);
+ EVALUATE(ALHSIK);
+ EVALUATE(ALGHSIK);
+ EVALUATE(CGRB);
+ EVALUATE(CGIB);
+ EVALUATE(CIB);
+ EVALUATE(LDEB);
+ EVALUATE(LXDB);
+ EVALUATE(LXEB);
+ EVALUATE(MXDB);
+ EVALUATE(KEB);
+ EVALUATE(CEB);
+ EVALUATE(AEB);
+ EVALUATE(SEB);
+ EVALUATE(MDEB);
+ EVALUATE(DEB);
+ EVALUATE(MAEB);
+ EVALUATE(MSEB);
+ EVALUATE(TCEB);
+ EVALUATE(TCDB);
+ EVALUATE(TCXB);
+ EVALUATE(SQEB);
+ EVALUATE(SQDB);
+ EVALUATE(MEEB);
+ EVALUATE(KDB);
+ EVALUATE(CDB);
+ EVALUATE(ADB);
+ EVALUATE(SDB);
+ EVALUATE(MDB);
+ EVALUATE(DDB);
+ EVALUATE(MADB);
+ EVALUATE(MSDB);
+ EVALUATE(SLDT);
+ EVALUATE(SRDT);
+ EVALUATE(SLXT);
+ EVALUATE(SRXT);
+ EVALUATE(TDCET);
+ EVALUATE(TDGET);
+ EVALUATE(TDCDT);
+ EVALUATE(TDGDT);
+ EVALUATE(TDCXT);
+ EVALUATE(TDGXT);
+ EVALUATE(LEY);
+ EVALUATE(LDY);
+ EVALUATE(STEY);
+ EVALUATE(STDY);
+ EVALUATE(CZDT);
+ EVALUATE(CZXT);
+ EVALUATE(CDZT);
+ EVALUATE(CXZT);
+#undef EVALUATE
};
// When running with the simulator transition into simulated execution at this
diff --git a/deps/v8/src/signature.h b/deps/v8/src/signature.h
index 076d17a7e2..3fa5f8290f 100644
--- a/deps/v8/src/signature.h
+++ b/deps/v8/src/signature.h
@@ -14,7 +14,7 @@ namespace internal {
template <typename T>
class Signature : public ZoneObject {
public:
- Signature(size_t return_count, size_t parameter_count, T* reps)
+ Signature(size_t return_count, size_t parameter_count, const T* reps)
: return_count_(return_count),
parameter_count_(parameter_count),
reps_(reps) {}
@@ -32,6 +32,8 @@ class Signature : public ZoneObject {
return reps_[index];
}
+ const T* raw_data() const { return reps_; }
+
// For incrementally building signatures.
class Builder {
public:
@@ -71,7 +73,7 @@ class Signature : public ZoneObject {
protected:
size_t return_count_;
size_t parameter_count_;
- T* reps_;
+ const T* reps_;
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index 84a08c103d..8d2f5d9339 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -4,11 +4,13 @@
#include "src/snapshot/code-serializer.h"
+#include <memory>
+
#include "src/code-stubs.h"
#include "src/log.h"
#include "src/macro-assembler.h"
-#include "src/profiler/cpu-profiler.h"
#include "src/snapshot/deserializer.h"
+#include "src/snapshot/snapshot.h"
#include "src/version.h"
namespace v8 {
@@ -27,35 +29,43 @@ ScriptData* CodeSerializer::Serialize(Isolate* isolate,
}
// Serialize code object.
- SnapshotByteSink sink(info->code()->CodeSize() * 2);
- CodeSerializer cs(isolate, &sink, *source);
+ CodeSerializer cs(isolate, SerializedCodeData::SourceHash(source));
DisallowHeapAllocation no_gc;
- Object** location = Handle<Object>::cast(info).location();
- cs.VisitPointer(location);
- cs.SerializeDeferredObjects();
- cs.Pad();
-
- SerializedCodeData data(sink.data(), cs);
- ScriptData* script_data = data.GetScriptData();
+ cs.reference_map()->AddAttachedReference(*source);
+ ScriptData* ret = cs.Serialize(info);
if (FLAG_profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
- int length = script_data->length();
+ int length = ret->length();
PrintF("[Serializing to %d bytes took %0.3f ms]\n", length, ms);
}
- return script_data;
+ return ret;
+}
+
+ScriptData* CodeSerializer::Serialize(Handle<HeapObject> obj) {
+ DisallowHeapAllocation no_gc;
+
+ VisitPointer(Handle<Object>::cast(obj).location());
+ SerializeDeferredObjects();
+ Pad();
+
+ SerializedCodeData data(sink()->data(), this);
+
+ return data.GetScriptData();
}
void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
+ if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
+
int root_index = root_index_map_.Lookup(obj);
if (root_index != RootIndexMap::kInvalidRootIndex) {
PutRoot(root_index, obj, how_to_code, where_to_point, skip);
return;
}
- if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
+ if (SerializeBackReference(obj, how_to_code, where_to_point, skip)) return;
FlushSkip(skip);
@@ -73,21 +83,17 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
where_to_point);
return;
case Code::STUB:
- SerializeCodeStub(code_object->stub_key(), how_to_code, where_to_point);
- return;
#define IC_KIND_CASE(KIND) case Code::KIND:
IC_KIND_LIST(IC_KIND_CASE)
#undef IC_KIND_CASE
- SerializeIC(code_object, how_to_code, where_to_point);
+ SerializeCodeStub(code_object, how_to_code, where_to_point);
return;
case Code::FUNCTION:
DCHECK(code_object->has_reloc_info_for_serialization());
SerializeGeneric(code_object, how_to_code, where_to_point);
return;
- case Code::WASM_FUNCTION:
- case Code::WASM_TO_JS_FUNCTION:
- case Code::JS_TO_WASM_FUNCTION:
- UNREACHABLE();
+ default:
+ return SerializeCodeObject(code_object, how_to_code, where_to_point);
}
UNREACHABLE();
}
@@ -108,7 +114,7 @@ void CodeSerializer::SerializeGeneric(HeapObject* heap_object,
HowToCode how_to_code,
WhereToPoint where_to_point) {
// Object has not yet been serialized. Serialize it here.
- ObjectSerializer serializer(this, heap_object, sink_, how_to_code,
+ ObjectSerializer serializer(this, heap_object, &sink_, how_to_code,
where_to_point);
serializer.Serialize();
}
@@ -126,75 +132,27 @@ void CodeSerializer::SerializeBuiltin(int builtin_index, HowToCode how_to_code,
isolate()->builtins()->name(builtin_index));
}
- sink_->Put(kBuiltin + how_to_code + where_to_point, "Builtin");
- sink_->PutInt(builtin_index, "builtin_index");
+ sink_.Put(kBuiltin + how_to_code + where_to_point, "Builtin");
+ sink_.PutInt(builtin_index, "builtin_index");
}
-void CodeSerializer::SerializeCodeStub(uint32_t stub_key, HowToCode how_to_code,
+void CodeSerializer::SerializeCodeStub(Code* code_stub, HowToCode how_to_code,
WhereToPoint where_to_point) {
- DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
- (how_to_code == kPlain && where_to_point == kInnerPointer) ||
- (how_to_code == kFromCode && where_to_point == kInnerPointer));
+ // We only arrive here if we have not encountered this code stub before.
+ DCHECK(!reference_map()->Lookup(code_stub).is_valid());
+ uint32_t stub_key = code_stub->stub_key();
DCHECK(CodeStub::MajorKeyFromKey(stub_key) != CodeStub::NoCache);
DCHECK(!CodeStub::GetCode(isolate(), stub_key).is_null());
+ stub_keys_.Add(stub_key);
- int index = AddCodeStubKey(stub_key) + kCodeStubsBaseIndex;
-
+ SerializerReference reference =
+ reference_map()->AddAttachedReference(code_stub);
if (FLAG_trace_serializer) {
- PrintF(" Encoding code stub %s as %d\n",
- CodeStub::MajorName(CodeStub::MajorKeyFromKey(stub_key)), index);
+ PrintF(" Encoding code stub %s as attached reference %d\n",
+ CodeStub::MajorName(CodeStub::MajorKeyFromKey(stub_key)),
+ reference.attached_reference_index());
}
-
- sink_->Put(kAttachedReference + how_to_code + where_to_point, "CodeStub");
- sink_->PutInt(index, "CodeStub key");
-}
-
-void CodeSerializer::SerializeIC(Code* ic, HowToCode how_to_code,
- WhereToPoint where_to_point) {
- // The IC may be implemented as a stub.
- uint32_t stub_key = ic->stub_key();
- if (stub_key != CodeStub::NoCacheKey()) {
- if (FLAG_trace_serializer) {
- PrintF(" %s is a code stub\n", Code::Kind2String(ic->kind()));
- }
- SerializeCodeStub(stub_key, how_to_code, where_to_point);
- return;
- }
- // The IC may be implemented as builtin. Only real builtins have an
- // actual builtin_index value attached (otherwise it's just garbage).
- // Compare to make sure we are really dealing with a builtin.
- int builtin_index = ic->builtin_index();
- if (builtin_index < Builtins::builtin_count) {
- Builtins::Name name = static_cast<Builtins::Name>(builtin_index);
- Code* builtin = isolate()->builtins()->builtin(name);
- if (builtin == ic) {
- if (FLAG_trace_serializer) {
- PrintF(" %s is a builtin\n", Code::Kind2String(ic->kind()));
- }
- DCHECK(ic->kind() == Code::KEYED_LOAD_IC ||
- ic->kind() == Code::KEYED_STORE_IC);
- SerializeBuiltin(builtin_index, how_to_code, where_to_point);
- return;
- }
- }
- // The IC may also just be a piece of code kept in the non_monomorphic_cache.
- // In that case, just serialize as a normal code object.
- if (FLAG_trace_serializer) {
- PrintF(" %s has no special handling\n", Code::Kind2String(ic->kind()));
- }
- DCHECK(ic->kind() == Code::LOAD_IC || ic->kind() == Code::STORE_IC);
- SerializeGeneric(ic, how_to_code, where_to_point);
-}
-
-int CodeSerializer::AddCodeStubKey(uint32_t stub_key) {
- // TODO(yangguo) Maybe we need a hash table for a faster lookup than O(n^2).
- int index = 0;
- while (index < stub_keys_.length()) {
- if (stub_keys_[index] == stub_key) return index;
- index++;
- }
- stub_keys_.Add(stub_key);
- return index;
+ PutAttachedReference(reference, how_to_code, where_to_point);
}
MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
@@ -204,35 +162,37 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
HandleScope scope(isolate);
- base::SmartPointer<SerializedCodeData> scd(
- SerializedCodeData::FromCachedData(isolate, cached_data, *source));
- if (scd.is_empty()) {
+ SerializedCodeData::SanityCheckResult sanity_check_result =
+ SerializedCodeData::CHECK_SUCCESS;
+ const SerializedCodeData scd = SerializedCodeData::FromCachedData(
+ isolate, cached_data, SerializedCodeData::SourceHash(source),
+ &sanity_check_result);
+ if (sanity_check_result != SerializedCodeData::CHECK_SUCCESS) {
if (FLAG_profile_deserialization) PrintF("[Cached code failed check]\n");
DCHECK(cached_data->rejected());
+ source->GetIsolate()->counters()->code_cache_reject_reason()->AddSample(
+ sanity_check_result);
return MaybeHandle<SharedFunctionInfo>();
}
- // Prepare and register list of attached objects.
- Vector<const uint32_t> code_stub_keys = scd->CodeStubKeys();
- Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New(
- code_stub_keys.length() + kCodeStubsBaseIndex);
- attached_objects[kSourceObjectIndex] = source;
+ Deserializer deserializer(&scd);
+ deserializer.AddAttachedObject(source);
+ Vector<const uint32_t> code_stub_keys = scd.CodeStubKeys();
for (int i = 0; i < code_stub_keys.length(); i++) {
- attached_objects[i + kCodeStubsBaseIndex] =
- CodeStub::GetCode(isolate, code_stub_keys[i]).ToHandleChecked();
+ deserializer.AddAttachedObject(
+ CodeStub::GetCode(isolate, code_stub_keys[i]).ToHandleChecked());
}
- Deserializer deserializer(scd.get());
- deserializer.SetAttachedObjects(attached_objects);
-
// Deserialize.
- Handle<SharedFunctionInfo> result;
- if (!deserializer.DeserializeCode(isolate).ToHandle(&result)) {
+ Handle<HeapObject> as_heap_object;
+ if (!deserializer.DeserializeObject(isolate).ToHandle(&as_heap_object)) {
// Deserializing may fail if the reservations cannot be fulfilled.
if (FLAG_profile_deserialization) PrintF("[Deserializing failed]\n");
return MaybeHandle<SharedFunctionInfo>();
}
+ Handle<SharedFunctionInfo> result =
+ Handle<SharedFunctionInfo>::cast(as_heap_object);
if (FLAG_profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
int length = cached_data->length();
@@ -240,19 +200,52 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
}
result->set_deserialized(true);
- if (isolate->logger()->is_logging_code_events() ||
- isolate->cpu_profiler()->is_profiling()) {
+ if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
String* name = isolate->heap()->empty_string();
if (result->script()->IsScript()) {
Script* script = Script::cast(result->script());
if (script->name()->IsString()) name = String::cast(script->name());
}
- isolate->logger()->CodeCreateEvent(
- Logger::SCRIPT_TAG, result->abstract_code(), *result, NULL, name);
+ PROFILE(isolate, CodeCreateEvent(CodeEventListener::SCRIPT_TAG,
+ result->abstract_code(), *result, name));
}
return scope.CloseAndEscape(result);
}
+std::unique_ptr<ScriptData> WasmCompiledModuleSerializer::SerializeWasmModule(
+ Isolate* isolate, Handle<FixedArray> compiled_module) {
+ WasmCompiledModuleSerializer wasm_cs(isolate, 0);
+ wasm_cs.reference_map()->AddAttachedReference(*isolate->native_context());
+ ScriptData* data = wasm_cs.Serialize(compiled_module);
+ return std::unique_ptr<ScriptData>(data);
+}
+
+MaybeHandle<FixedArray> WasmCompiledModuleSerializer::DeserializeWasmModule(
+ Isolate* isolate, ScriptData* data) {
+ SerializedCodeData::SanityCheckResult sanity_check_result =
+ SerializedCodeData::CHECK_SUCCESS;
+ MaybeHandle<FixedArray> nothing;
+ const SerializedCodeData scd = SerializedCodeData::FromCachedData(
+ isolate, data, 0, &sanity_check_result);
+
+ if (sanity_check_result != SerializedCodeData::CHECK_SUCCESS) {
+ return nothing;
+ }
+
+ Deserializer deserializer(&scd, true);
+ deserializer.AddAttachedObject(isolate->native_context());
+
+ Vector<const uint32_t> stub_keys = scd.CodeStubKeys();
+ for (int i = 0; i < stub_keys.length(); ++i) {
+ deserializer.AddAttachedObject(
+ CodeStub::GetCode(isolate, stub_keys[i]).ToHandleChecked());
+ }
+
+ MaybeHandle<HeapObject> obj = deserializer.DeserializeObject(isolate);
+ if (obj.is_null() || !obj.ToHandleChecked()->IsFixedArray()) return nothing;
+ return Handle<FixedArray>::cast(obj.ToHandleChecked());
+}
+
class Checksum {
public:
explicit Checksum(Vector<const byte> payload) {
@@ -292,13 +285,13 @@ class Checksum {
DISALLOW_COPY_AND_ASSIGN(Checksum);
};
-SerializedCodeData::SerializedCodeData(const List<byte>& payload,
- const CodeSerializer& cs) {
+SerializedCodeData::SerializedCodeData(const List<byte>* payload,
+ const CodeSerializer* cs) {
DisallowHeapAllocation no_gc;
- const List<uint32_t>* stub_keys = cs.stub_keys();
+ const List<uint32_t>* stub_keys = cs->stub_keys();
List<Reservation> reservations;
- cs.EncodeReservations(&reservations);
+ cs->EncodeReservations(&reservations);
// Calculate sizes.
int reservation_size = reservations.length() * kInt32Size;
@@ -306,25 +299,21 @@ SerializedCodeData::SerializedCodeData(const List<byte>& payload,
int stub_keys_size = stub_keys->length() * kInt32Size;
int payload_offset = kHeaderSize + reservation_size + stub_keys_size;
int padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
- int size = padded_payload_offset + payload.length();
+ int size = padded_payload_offset + payload->length();
// Allocate backing store and create result data.
AllocateData(size);
// Set header values.
- SetMagicNumber(cs.isolate());
+ SetMagicNumber(cs->isolate());
SetHeaderValue(kVersionHashOffset, Version::Hash());
- SetHeaderValue(kSourceHashOffset, SourceHash(cs.source()));
+ SetHeaderValue(kSourceHashOffset, cs->source_hash());
SetHeaderValue(kCpuFeaturesOffset,
static_cast<uint32_t>(CpuFeatures::SupportedFeatures()));
SetHeaderValue(kFlagHashOffset, FlagList::Hash());
SetHeaderValue(kNumReservationsOffset, reservations.length());
SetHeaderValue(kNumCodeStubKeysOffset, num_stub_keys);
- SetHeaderValue(kPayloadLengthOffset, payload.length());
-
- Checksum checksum(payload.ToConstVector());
- SetHeaderValue(kChecksum1Offset, checksum.a());
- SetHeaderValue(kChecksum2Offset, checksum.b());
+ SetHeaderValue(kPayloadLengthOffset, payload->length());
// Copy reservation chunk sizes.
CopyBytes(data_ + kHeaderSize, reinterpret_cast<byte*>(reservations.begin()),
@@ -337,12 +326,16 @@ SerializedCodeData::SerializedCodeData(const List<byte>& payload,
memset(data_ + payload_offset, 0, padded_payload_offset - payload_offset);
// Copy serialized data.
- CopyBytes(data_ + padded_payload_offset, payload.begin(),
- static_cast<size_t>(payload.length()));
+ CopyBytes(data_ + padded_payload_offset, payload->begin(),
+ static_cast<size_t>(payload->length()));
+
+ Checksum checksum(DataWithoutHeader());
+ SetHeaderValue(kChecksum1Offset, checksum.a());
+ SetHeaderValue(kChecksum2Offset, checksum.b());
}
SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
- Isolate* isolate, String* source) const {
+ Isolate* isolate, uint32_t expected_source_hash) const {
uint32_t magic_number = GetMagicNumber();
if (magic_number != ComputeMagicNumber(isolate)) return MAGIC_NUMBER_MISMATCH;
uint32_t version_hash = GetHeaderValue(kVersionHashOffset);
@@ -352,16 +345,16 @@ SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
uint32_t c1 = GetHeaderValue(kChecksum1Offset);
uint32_t c2 = GetHeaderValue(kChecksum2Offset);
if (version_hash != Version::Hash()) return VERSION_MISMATCH;
- if (source_hash != SourceHash(source)) return SOURCE_MISMATCH;
+ if (source_hash != expected_source_hash) return SOURCE_MISMATCH;
if (cpu_features != static_cast<uint32_t>(CpuFeatures::SupportedFeatures())) {
return CPU_FEATURES_MISMATCH;
}
if (flags_hash != FlagList::Hash()) return FLAGS_MISMATCH;
- if (!Checksum(Payload()).Check(c1, c2)) return CHECKSUM_MISMATCH;
+ if (!Checksum(DataWithoutHeader()).Check(c1, c2)) return CHECKSUM_MISMATCH;
return CHECK_SUCCESS;
}
-uint32_t SerializedCodeData::SourceHash(String* source) const {
+uint32_t SerializedCodeData::SourceHash(Handle<String> source) {
return source->length();
}
@@ -404,17 +397,17 @@ Vector<const uint32_t> SerializedCodeData::CodeStubKeys() const {
SerializedCodeData::SerializedCodeData(ScriptData* data)
: SerializedData(const_cast<byte*>(data->data()), data->length()) {}
-SerializedCodeData* SerializedCodeData::FromCachedData(Isolate* isolate,
- ScriptData* cached_data,
- String* source) {
+const SerializedCodeData SerializedCodeData::FromCachedData(
+ Isolate* isolate, ScriptData* cached_data, uint32_t expected_source_hash,
+ SanityCheckResult* rejection_result) {
DisallowHeapAllocation no_gc;
- SerializedCodeData* scd = new SerializedCodeData(cached_data);
- SanityCheckResult r = scd->SanityCheck(isolate, source);
- if (r == CHECK_SUCCESS) return scd;
- cached_data->Reject();
- source->GetIsolate()->counters()->code_cache_reject_reason()->AddSample(r);
- delete scd;
- return NULL;
+ SerializedCodeData scd(cached_data);
+ *rejection_result = scd.SanityCheck(isolate, expected_source_hash);
+ if (*rejection_result != CHECK_SUCCESS) {
+ cached_data->Reject();
+ return SerializedCodeData(nullptr, 0);
+ }
+ return scd;
}
} // namespace internal
diff --git a/deps/v8/src/snapshot/code-serializer.h b/deps/v8/src/snapshot/code-serializer.h
index b217fff52b..e82a7d5dd6 100644
--- a/deps/v8/src/snapshot/code-serializer.h
+++ b/deps/v8/src/snapshot/code-serializer.h
@@ -17,58 +17,88 @@ class CodeSerializer : public Serializer {
Handle<SharedFunctionInfo> info,
Handle<String> source);
+ ScriptData* Serialize(Handle<HeapObject> obj);
+
MUST_USE_RESULT static MaybeHandle<SharedFunctionInfo> Deserialize(
Isolate* isolate, ScriptData* cached_data, Handle<String> source);
- static const int kSourceObjectIndex = 0;
- STATIC_ASSERT(kSourceObjectReference == kSourceObjectIndex);
-
- static const int kCodeStubsBaseIndex = 1;
+ const List<uint32_t>* stub_keys() const { return &stub_keys_; }
- String* source() const {
- DCHECK(!AllowHeapAllocation::IsAllowed());
- return source_;
- }
+ uint32_t source_hash() const { return source_hash_; }
- const List<uint32_t>* stub_keys() const { return &stub_keys_; }
+ protected:
+ explicit CodeSerializer(Isolate* isolate, uint32_t source_hash)
+ : Serializer(isolate), source_hash_(source_hash) {}
+ ~CodeSerializer() override { OutputStatistics("CodeSerializer"); }
- private:
- CodeSerializer(Isolate* isolate, SnapshotByteSink* sink, String* source)
- : Serializer(isolate, sink), source_(source) {
- back_reference_map_.AddSourceString(source);
+ virtual void SerializeCodeObject(Code* code_object, HowToCode how_to_code,
+ WhereToPoint where_to_point) {
+ UNREACHABLE();
}
- ~CodeSerializer() override { OutputStatistics("CodeSerializer"); }
+ void SerializeGeneric(HeapObject* heap_object, HowToCode how_to_code,
+ WhereToPoint where_to_point);
+ private:
void SerializeObject(HeapObject* o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) override;
void SerializeBuiltin(int builtin_index, HowToCode how_to_code,
WhereToPoint where_to_point);
- void SerializeIC(Code* ic, HowToCode how_to_code,
- WhereToPoint where_to_point);
- void SerializeCodeStub(uint32_t stub_key, HowToCode how_to_code,
+ void SerializeCodeStub(Code* code_stub, HowToCode how_to_code,
WhereToPoint where_to_point);
- void SerializeGeneric(HeapObject* heap_object, HowToCode how_to_code,
- WhereToPoint where_to_point);
- int AddCodeStubKey(uint32_t stub_key);
DisallowHeapAllocation no_gc_;
- String* source_;
+ uint32_t source_hash_;
List<uint32_t> stub_keys_;
DISALLOW_COPY_AND_ASSIGN(CodeSerializer);
};
+class WasmCompiledModuleSerializer : public CodeSerializer {
+ public:
+ static std::unique_ptr<ScriptData> SerializeWasmModule(
+ Isolate* isolate, Handle<FixedArray> compiled_module);
+ static MaybeHandle<FixedArray> DeserializeWasmModule(Isolate* isolate,
+ ScriptData* data);
+
+ protected:
+ void SerializeCodeObject(Code* code_object, HowToCode how_to_code,
+ WhereToPoint where_to_point) override {
+ Code::Kind kind = code_object->kind();
+ if (kind == Code::WASM_FUNCTION || kind == Code::WASM_TO_JS_FUNCTION ||
+ kind == Code::JS_TO_WASM_FUNCTION) {
+ SerializeGeneric(code_object, how_to_code, where_to_point);
+ } else {
+ UNREACHABLE();
+ }
+ }
+
+ private:
+ WasmCompiledModuleSerializer(Isolate* isolate, uint32_t source_hash)
+ : CodeSerializer(isolate, source_hash) {}
+ DISALLOW_COPY_AND_ASSIGN(WasmCompiledModuleSerializer);
+};
+
// Wrapper around ScriptData to provide code-serializer-specific functionality.
class SerializedCodeData : public SerializedData {
public:
+ enum SanityCheckResult {
+ CHECK_SUCCESS = 0,
+ MAGIC_NUMBER_MISMATCH = 1,
+ VERSION_MISMATCH = 2,
+ SOURCE_MISMATCH = 3,
+ CPU_FEATURES_MISMATCH = 4,
+ FLAGS_MISMATCH = 5,
+ CHECKSUM_MISMATCH = 6
+ };
+
// Used when consuming.
- static SerializedCodeData* FromCachedData(Isolate* isolate,
- ScriptData* cached_data,
- String* source);
+ static const SerializedCodeData FromCachedData(
+ Isolate* isolate, ScriptData* cached_data, uint32_t expected_source_hash,
+ SanityCheckResult* rejection_result);
// Used when producing.
- SerializedCodeData(const List<byte>& payload, const CodeSerializer& cs);
+ SerializedCodeData(const List<byte>* payload, const CodeSerializer* cs);
// Return ScriptData object and relinquish ownership over it to the caller.
ScriptData* GetScriptData();
@@ -78,23 +108,19 @@ class SerializedCodeData : public SerializedData {
Vector<const uint32_t> CodeStubKeys() const;
+ static uint32_t SourceHash(Handle<String> source);
+
private:
explicit SerializedCodeData(ScriptData* data);
+ SerializedCodeData(const byte* data, int size)
+ : SerializedData(const_cast<byte*>(data), size) {}
- enum SanityCheckResult {
- CHECK_SUCCESS = 0,
- MAGIC_NUMBER_MISMATCH = 1,
- VERSION_MISMATCH = 2,
- SOURCE_MISMATCH = 3,
- CPU_FEATURES_MISMATCH = 4,
- FLAGS_MISMATCH = 5,
- CHECKSUM_MISMATCH = 6
- };
-
- SanityCheckResult SanityCheck(Isolate* isolate, String* source) const;
-
- uint32_t SourceHash(String* source) const;
+ Vector<const byte> DataWithoutHeader() const {
+ return Vector<const byte>(data_ + kHeaderSize, size_ - kHeaderSize);
+ }
+ SanityCheckResult SanityCheck(Isolate* isolate,
+ uint32_t expected_source_hash) const;
// The data header consists of uint32_t-sized entries:
// [0] magic number and external reference count
// [1] version hash
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index 0a21feffa1..7a2df28f62 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -31,17 +31,18 @@ void Deserializer::DecodeReservation(
void Deserializer::FlushICacheForNewIsolate() {
DCHECK(!deserializing_user_code_);
// The entire isolate is newly deserialized. Simply flush all code pages.
- PageIterator it(isolate_->heap()->code_space());
- while (it.has_next()) {
- Page* p = it.next();
+ for (Page* p : *isolate_->heap()->code_space()) {
Assembler::FlushICache(isolate_, p->area_start(),
p->area_end() - p->area_start());
}
}
-void Deserializer::FlushICacheForNewCodeObjects() {
+void Deserializer::FlushICacheForNewCodeObjectsAndRecordEmbeddedObjects() {
DCHECK(deserializing_user_code_);
for (Code* code : new_code_objects_) {
+ // Record all references to embedded objects in the new code object.
+ isolate_->heap()->RecordWritesIntoCode(code);
+
if (FLAG_serialize_age_code) code->PreAge(isolate_);
Assembler::FlushICache(isolate_, code->instruction_start(),
code->instruction_size());
@@ -54,7 +55,9 @@ bool Deserializer::ReserveSpace() {
CHECK(reservations_[i].length() > 0);
}
#endif // DEBUG
- if (!isolate_->heap()->ReserveSpace(reservations_)) return false;
+ DCHECK(allocated_maps_.is_empty());
+ if (!isolate_->heap()->ReserveSpace(reservations_, &allocated_maps_))
+ return false;
for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
high_water_[i] = reservations_[i][0].start;
}
@@ -101,10 +104,6 @@ void Deserializer::Deserialize(Isolate* isolate) {
isolate_->heap()->undefined_value());
}
- // Update data pointers to the external strings containing natives sources.
- Natives::UpdateSourceCache(isolate_->heap());
- ExtraNatives::UpdateSourceCache(isolate_->heap());
-
// Issue code events for newly deserialized code objects.
LOG_CODE_EVENT(isolate_, LogCodeObjects());
LOG_CODE_EVENT(isolate_, LogBytecodeHandlers());
@@ -119,9 +118,7 @@ MaybeHandle<Object> Deserializer::DeserializePartial(
return MaybeHandle<Object>();
}
- Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New(1);
- attached_objects[kGlobalProxyReference] = global_proxy;
- SetAttachedObjects(attached_objects);
+ AddAttachedObject(global_proxy);
DisallowHeapAllocation no_gc;
// Keep track of the code space start and end pointers in case new
@@ -141,22 +138,21 @@ MaybeHandle<Object> Deserializer::DeserializePartial(
return Handle<Object>(root, isolate);
}
-MaybeHandle<SharedFunctionInfo> Deserializer::DeserializeCode(
- Isolate* isolate) {
+MaybeHandle<HeapObject> Deserializer::DeserializeObject(Isolate* isolate) {
Initialize(isolate);
if (!ReserveSpace()) {
- return Handle<SharedFunctionInfo>();
+ return MaybeHandle<HeapObject>();
} else {
deserializing_user_code_ = true;
HandleScope scope(isolate);
- Handle<SharedFunctionInfo> result;
+ Handle<HeapObject> result;
{
DisallowHeapAllocation no_gc;
Object* root;
VisitPointer(&root);
DeserializeDeferredObjects();
- FlushICacheForNewCodeObjects();
- result = Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(root));
+ FlushICacheForNewCodeObjectsAndRecordEmbeddedObjects();
+ result = Handle<HeapObject>(HeapObject::cast(root));
isolate->heap()->RegisterReservationsForBlackAllocation(reservations_);
}
CommitPostProcessedObjects(isolate);
@@ -167,7 +163,14 @@ MaybeHandle<SharedFunctionInfo> Deserializer::DeserializeCode(
Deserializer::~Deserializer() {
// TODO(svenpanne) Re-enable this assertion when v8 initialization is fixed.
// DCHECK(source_.AtEOF());
- attached_objects_.Dispose();
+#ifdef DEBUG
+ for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
+ int chunk_index = current_chunk_[space];
+ CHECK_EQ(reservations_[space].length(), chunk_index + 1);
+ CHECK_EQ(reservations_[space][chunk_index].end, high_water_[space]);
+ }
+ CHECK_EQ(allocated_maps_.length(), next_map_index_);
+#endif // DEBUG
}
// This is called on the roots. It is the driver of the deserialization
@@ -315,11 +318,15 @@ void Deserializer::CommitPostProcessedObjects(Isolate* isolate) {
HeapObject* Deserializer::GetBackReferencedObject(int space) {
HeapObject* obj;
- BackReference back_reference(source_.GetInt());
+ SerializerReference back_reference =
+ SerializerReference::FromBitfield(source_.GetInt());
if (space == LO_SPACE) {
- CHECK(back_reference.chunk_index() == 0);
uint32_t index = back_reference.large_object_index();
obj = deserialized_large_objects_[index];
+ } else if (space == MAP_SPACE) {
+ int index = back_reference.map_index();
+ DCHECK(index < next_map_index_);
+ obj = HeapObject::FromAddress(allocated_maps_[index]);
} else {
DCHECK(space < kNumberOfPreallocatedSpaces);
uint32_t chunk_index = back_reference.chunk_index();
@@ -410,6 +417,9 @@ Address Deserializer::Allocate(int space_index, int size) {
HeapObject* obj = HeapObject::cast(result.ToObjectChecked());
deserialized_large_objects_.Add(obj);
return obj->address();
+ } else if (space_index == MAP_SPACE) {
+ DCHECK_EQ(Map::kSize, size);
+ return allocated_maps_[next_map_index_++];
} else {
DCHECK(space_index < kNumberOfPreallocatedSpaces);
Address address = high_water_[space_index];
@@ -483,6 +493,7 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(id); \
new_object = isolate->heap()->root(root_index); \
emit_write_barrier = isolate->heap()->InNewSpace(new_object); \
+ hot_objects_.Add(HeapObject::cast(new_object)); \
} else if (where == kPartialSnapshotCache) { \
int cache_index = source_.GetInt(); \
new_object = isolate->partial_snapshot_cache()->at(cache_index); \
@@ -496,7 +507,6 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
new_object = reinterpret_cast<Object*>(address); \
} else if (where == kAttachedReference) { \
int index = source_.GetInt(); \
- DCHECK(deserializing_user_code() || index == kGlobalProxyReference); \
new_object = *attached_objects_[index]; \
emit_write_barrier = isolate->heap()->InNewSpace(new_object); \
} else { \
@@ -510,12 +520,11 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
emit_write_barrier = false; \
} \
if (within == kInnerPointer) { \
- if (space_number != CODE_SPACE || new_object->IsCode()) { \
- Code* new_code_object = reinterpret_cast<Code*>(new_object); \
+ if (new_object->IsCode()) { \
+ Code* new_code_object = Code::cast(new_object); \
new_object = \
reinterpret_cast<Object*>(new_code_object->instruction_start()); \
} else { \
- DCHECK(space_number == CODE_SPACE); \
Cell* cell = Cell::cast(new_object); \
new_object = reinterpret_cast<Object*>(cell->ValueAddress()); \
} \
@@ -582,6 +591,9 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
// pointer because it points at the entry point, not at the start of the
// code object.
SINGLE_CASE(kNewObject, kPlain, kInnerPointer, CODE_SPACE)
+ // Support for pointers into a cell. It's an inner pointer because it
+ // points directly at the value field, not the start of the cell object.
+ SINGLE_CASE(kNewObject, kPlain, kInnerPointer, OLD_SPACE)
// Deserialize a new code object and write a pointer to its first
// instruction to the current code object.
ALL_SPACES(kNewObject, kFromCode, kInnerPointer)
@@ -608,8 +620,12 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
// object.
ALL_SPACES(kBackref, kFromCode, kInnerPointer)
ALL_SPACES(kBackrefWithSkip, kFromCode, kInnerPointer)
- ALL_SPACES(kBackref, kPlain, kInnerPointer)
- ALL_SPACES(kBackrefWithSkip, kPlain, kInnerPointer)
+ // Support for direct instruction pointers in functions.
+ SINGLE_CASE(kBackref, kPlain, kInnerPointer, CODE_SPACE)
+ SINGLE_CASE(kBackrefWithSkip, kPlain, kInnerPointer, CODE_SPACE)
+ // Support for pointers into a cell.
+ SINGLE_CASE(kBackref, kPlain, kInnerPointer, OLD_SPACE)
+ SINGLE_CASE(kBackrefWithSkip, kPlain, kInnerPointer, OLD_SPACE)
// Find an object in the roots array and write a pointer to it to the
// current object.
SINGLE_CASE(kRootArray, kPlain, kStartOfObject, 0)
@@ -633,6 +649,7 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
// the current object.
SINGLE_CASE(kAttachedReference, kPlain, kStartOfObject, 0)
SINGLE_CASE(kAttachedReference, kPlain, kInnerPointer, 0)
+ SINGLE_CASE(kAttachedReference, kFromCode, kStartOfObject, 0)
SINGLE_CASE(kAttachedReference, kFromCode, kInnerPointer, 0)
// Find a builtin and write a pointer to it to the current object.
SINGLE_CASE(kBuiltin, kPlain, kStartOfObject, 0)
@@ -770,9 +787,8 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
int index = data & kHotObjectMask;
Object* hot_object = hot_objects_.Get(index);
UnalignedCopy(current, &hot_object);
- if (write_barrier_needed) {
+ if (write_barrier_needed && isolate->heap()->InNewSpace(hot_object)) {
Address current_address = reinterpret_cast<Address>(current);
- SLOW_DCHECK(isolate->heap()->ContainsSlow(current_object_address));
isolate->heap()->RecordWrite(
HeapObject::FromAddress(current_object_address),
static_cast<int>(current_address - current_object_address),
diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h
index 58c481cc79..634d80e3ab 100644
--- a/deps/v8/src/snapshot/deserializer.h
+++ b/deps/v8/src/snapshot/deserializer.h
@@ -30,13 +30,14 @@ class Deserializer : public SerializerDeserializer {
public:
// Create a deserializer from a snapshot byte source.
template <class Data>
- explicit Deserializer(Data* data)
+ explicit Deserializer(Data* data, bool deserializing_user_code = false)
: isolate_(NULL),
source_(data->Payload()),
magic_number_(data->GetMagicNumber()),
+ next_map_index_(0),
external_reference_table_(NULL),
deserialized_large_objects_(0),
- deserializing_user_code_(false),
+ deserializing_user_code_(deserializing_user_code),
next_alignment_(kWordAligned) {
DecodeReservation(data->Reservations());
}
@@ -50,13 +51,13 @@ class Deserializer : public SerializerDeserializer {
MaybeHandle<Object> DeserializePartial(Isolate* isolate,
Handle<JSGlobalProxy> global_proxy);
- // Deserialize a shared function info. Fail gracefully.
- MaybeHandle<SharedFunctionInfo> DeserializeCode(Isolate* isolate);
+ // Deserialize an object graph. Fail gracefully.
+ MaybeHandle<HeapObject> DeserializeObject(Isolate* isolate);
- // Pass a vector of externally-provided objects referenced by the snapshot.
- // The ownership to its backing store is handed over as well.
- void SetAttachedObjects(Vector<Handle<Object> > attached_objects) {
- attached_objects_ = attached_objects;
+ // Add an object to back an attached reference. The order to add objects must
+ // mirror the order they are added in the serializer.
+ void AddAttachedObject(Handle<HeapObject> attached_object) {
+ attached_objects_.Add(attached_object);
}
private:
@@ -89,7 +90,7 @@ class Deserializer : public SerializerDeserializer {
void DeserializeDeferredObjects();
void FlushICacheForNewIsolate();
- void FlushICacheForNewCodeObjects();
+ void FlushICacheForNewCodeObjectsAndRecordEmbeddedObjects();
void CommitPostProcessedObjects(Isolate* isolate);
@@ -117,7 +118,7 @@ class Deserializer : public SerializerDeserializer {
Isolate* isolate_;
// Objects from the attached object descriptions in the serialized user code.
- Vector<Handle<Object> > attached_objects_;
+ List<Handle<HeapObject> > attached_objects_;
SnapshotByteSource source_;
uint32_t magic_number_;
@@ -129,6 +130,8 @@ class Deserializer : public SerializerDeserializer {
Heap::Reservation reservations_[kNumberOfSpaces];
uint32_t current_chunk_[kNumberOfPreallocatedSpaces];
Address high_water_[kNumberOfPreallocatedSpaces];
+ int next_map_index_;
+ List<Address> allocated_maps_;
ExternalReferenceTable* external_reference_table_;
diff --git a/deps/v8/src/snapshot/mksnapshot.cc b/deps/v8/src/snapshot/mksnapshot.cc
index c38f92f5b1..f4362e5077 100644
--- a/deps/v8/src/snapshot/mksnapshot.cc
+++ b/deps/v8/src/snapshot/mksnapshot.cc
@@ -150,7 +150,7 @@ int main(int argc, char** argv) {
}
i::CpuFeatures::Probe(true);
- V8::InitializeICU();
+ V8::InitializeICUDefaultLocation(argv[0]);
v8::Platform* platform = v8::platform::CreateDefaultPlatform();
v8::V8::InitializePlatform(platform);
v8::V8::Initialize();
diff --git a/deps/v8/src/snapshot/natives-common.cc b/deps/v8/src/snapshot/natives-common.cc
index f30e794009..338b92bd16 100644
--- a/deps/v8/src/snapshot/natives-common.cc
+++ b/deps/v8/src/snapshot/natives-common.cc
@@ -34,24 +34,5 @@ FixedArray* NativesCollection<EXPERIMENTAL_EXTRAS>::GetSourceCache(Heap* heap) {
return heap->experimental_extra_natives_source_cache();
}
-
-template <NativeType type>
-void NativesCollection<type>::UpdateSourceCache(Heap* heap) {
- for (int i = 0; i < GetBuiltinsCount(); i++) {
- Object* source = GetSourceCache(heap)->get(i);
- if (!source->IsUndefined()) {
- ExternalOneByteString::cast(source)->update_data_cache();
- }
- }
-}
-
-
-// Explicit template instantiations.
-template void NativesCollection<CORE>::UpdateSourceCache(Heap* heap);
-template void NativesCollection<EXPERIMENTAL>::UpdateSourceCache(Heap* heap);
-template void NativesCollection<EXTRAS>::UpdateSourceCache(Heap* heap);
-template void NativesCollection<EXPERIMENTAL_EXTRAS>::UpdateSourceCache(
- Heap* heap);
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/natives.h b/deps/v8/src/snapshot/natives.h
index 07f6b1aed3..e44751537f 100644
--- a/deps/v8/src/snapshot/natives.h
+++ b/deps/v8/src/snapshot/natives.h
@@ -44,7 +44,6 @@ class NativesCollection {
// The following methods are implemented in natives-common.cc:
static FixedArray* GetSourceCache(Heap* heap);
- static void UpdateSourceCache(Heap* heap);
};
typedef NativesCollection<CORE> Natives;
diff --git a/deps/v8/src/snapshot/partial-serializer.cc b/deps/v8/src/snapshot/partial-serializer.cc
index 0f1f133edc..b46f6755f0 100644
--- a/deps/v8/src/snapshot/partial-serializer.cc
+++ b/deps/v8/src/snapshot/partial-serializer.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/snapshot/partial-serializer.h"
+#include "src/snapshot/startup-serializer.h"
#include "src/objects-inl.h"
@@ -10,12 +11,8 @@ namespace v8 {
namespace internal {
PartialSerializer::PartialSerializer(Isolate* isolate,
- Serializer* startup_snapshot_serializer,
- SnapshotByteSink* sink)
- : Serializer(isolate, sink),
- startup_serializer_(startup_snapshot_serializer),
- global_object_(NULL),
- next_partial_cache_index_(0) {
+ StartupSerializer* startup_serializer)
+ : Serializer(isolate), startup_serializer_(startup_serializer) {
InitializeCodeAddressMap();
}
@@ -26,8 +23,7 @@ PartialSerializer::~PartialSerializer() {
void PartialSerializer::Serialize(Object** o) {
if ((*o)->IsContext()) {
Context* context = Context::cast(*o);
- global_object_ = context->global_object();
- back_reference_map()->AddGlobalProxy(context->global_proxy());
+ reference_map()->AddAttachedReference(context->global_proxy());
// The bootstrap snapshot has a code-stub context. When serializing the
// partial snapshot, it is chained into the weak context list on the isolate
// and it's next context pointer may point to the code-stub context. Clear
@@ -36,7 +32,7 @@ void PartialSerializer::Serialize(Object** o) {
if (context->IsNativeContext()) {
context->set(Context::NEXT_CONTEXT_LINK,
isolate_->heap()->undefined_value());
- DCHECK(!context->global_object()->IsUndefined());
+ DCHECK(!context->global_object()->IsUndefined(context->GetIsolate()));
}
}
VisitPointer(o);
@@ -55,58 +51,53 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
// Replace typed arrays by undefined.
if (obj->IsJSTypedArray()) obj = isolate_->heap()->undefined_value();
+ if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
+
int root_index = root_index_map_.Lookup(obj);
if (root_index != RootIndexMap::kInvalidRootIndex) {
PutRoot(root_index, obj, how_to_code, where_to_point, skip);
return;
}
+ if (SerializeBackReference(obj, how_to_code, where_to_point, skip)) return;
+
if (ShouldBeInThePartialSnapshotCache(obj)) {
FlushSkip(skip);
- int cache_index = PartialSnapshotCacheIndex(obj);
- sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point,
- "PartialSnapshotCache");
- sink_->PutInt(cache_index, "partial_snapshot_cache_index");
+ int cache_index = startup_serializer_->PartialSnapshotCacheIndex(obj);
+ sink_.Put(kPartialSnapshotCache + how_to_code + where_to_point,
+ "PartialSnapshotCache");
+ sink_.PutInt(cache_index, "partial_snapshot_cache_index");
return;
}
// Pointers from the partial snapshot to the objects in the startup snapshot
// should go through the root array or through the partial snapshot cache.
// If this is not the case you may have to add something to the root array.
- DCHECK(!startup_serializer_->back_reference_map()->Lookup(obj).is_valid());
+ DCHECK(!startup_serializer_->reference_map()->Lookup(obj).is_valid());
// All the internalized strings that the partial snapshot needs should be
// either in the root table or in the partial snapshot cache.
DCHECK(!obj->IsInternalizedString());
-
- if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
+ // Function and object templates are not context specific.
+ DCHECK(!obj->IsTemplateInfo());
FlushSkip(skip);
// Clear literal boilerplates.
if (obj->IsJSFunction()) {
- FixedArray* literals = JSFunction::cast(obj)->literals();
- for (int i = 0; i < literals->length(); i++) literals->set_undefined(i);
+ JSFunction* function = JSFunction::cast(obj);
+ LiteralsArray* literals = function->literals();
+ for (int i = 0; i < literals->literals_count(); i++) {
+ literals->set_literal_undefined(i);
+ }
+ function->ClearTypeFeedbackInfo();
}
// Object has not yet been serialized. Serialize it here.
- ObjectSerializer serializer(this, obj, sink_, how_to_code, where_to_point);
+ ObjectSerializer serializer(this, obj, &sink_, how_to_code, where_to_point);
serializer.Serialize();
}
-int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
- int index = partial_cache_index_map_.LookupOrInsert(
- heap_object, next_partial_cache_index_);
- if (index == PartialCacheIndexMap::kInvalidIndex) {
- // This object is not part of the partial snapshot cache yet. Add it to the
- // startup snapshot so we can refer to it via partial snapshot index from
- // the partial snapshot.
- startup_serializer_->VisitPointer(reinterpret_cast<Object**>(&heap_object));
- return next_partial_cache_index_++;
- }
- return index;
-}
-
bool PartialSerializer::ShouldBeInThePartialSnapshotCache(HeapObject* o) {
// Scripts should be referred only through shared function infos. We can't
// allow them to be part of the partial snapshot because they contain a
diff --git a/deps/v8/src/snapshot/partial-serializer.h b/deps/v8/src/snapshot/partial-serializer.h
index 0bf61dd055..282f76e78f 100644
--- a/deps/v8/src/snapshot/partial-serializer.h
+++ b/deps/v8/src/snapshot/partial-serializer.h
@@ -11,10 +11,11 @@
namespace v8 {
namespace internal {
+class StartupSerializer;
+
class PartialSerializer : public Serializer {
public:
- PartialSerializer(Isolate* isolate, Serializer* startup_snapshot_serializer,
- SnapshotByteSink* sink);
+ PartialSerializer(Isolate* isolate, StartupSerializer* startup_serializer);
~PartialSerializer() override;
@@ -22,37 +23,12 @@ class PartialSerializer : public Serializer {
void Serialize(Object** o);
private:
- class PartialCacheIndexMap : public AddressMapBase {
- public:
- PartialCacheIndexMap() : map_(HashMap::PointersMatch) {}
-
- static const int kInvalidIndex = -1;
-
- // Lookup object in the map. Return its index if found, or create
- // a new entry with new_index as value, and return kInvalidIndex.
- int LookupOrInsert(HeapObject* obj, int new_index) {
- HashMap::Entry* entry = LookupEntry(&map_, obj, false);
- if (entry != NULL) return GetValue(entry);
- SetValue(LookupEntry(&map_, obj, true), static_cast<uint32_t>(new_index));
- return kInvalidIndex;
- }
-
- private:
- HashMap map_;
-
- DISALLOW_COPY_AND_ASSIGN(PartialCacheIndexMap);
- };
-
void SerializeObject(HeapObject* o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) override;
- int PartialSnapshotCacheIndex(HeapObject* o);
bool ShouldBeInThePartialSnapshotCache(HeapObject* o);
- Serializer* startup_serializer_;
- Object* global_object_;
- PartialCacheIndexMap partial_cache_index_map_;
- int next_partial_cache_index_;
+ StartupSerializer* startup_serializer_;
DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
};
diff --git a/deps/v8/src/snapshot/serializer-common.cc b/deps/v8/src/snapshot/serializer-common.cc
index eeb7eb73fa..bb3cc5c535 100644
--- a/deps/v8/src/snapshot/serializer-common.cc
+++ b/deps/v8/src/snapshot/serializer-common.cc
@@ -14,13 +14,16 @@ namespace internal {
ExternalReferenceEncoder::ExternalReferenceEncoder(Isolate* isolate) {
map_ = isolate->external_reference_map();
if (map_ != NULL) return;
- map_ = new HashMap(HashMap::PointersMatch);
+ map_ = new base::HashMap(base::HashMap::PointersMatch);
ExternalReferenceTable* table = ExternalReferenceTable::instance(isolate);
for (int i = 0; i < table->size(); ++i) {
Address addr = table->address(i);
if (addr == ExternalReferenceTable::NotAvailable()) continue;
// We expect no duplicate external references entries in the table.
- DCHECK_NULL(map_->Lookup(addr, Hash(addr)));
+ // AccessorRefTable getter may have duplicates, indicated by an empty string
+ // as name.
+ DCHECK(table->name(i)[0] == '\0' ||
+ map_->Lookup(addr, Hash(addr)) == nullptr);
map_->LookupOrInsert(addr, Hash(addr))->value = reinterpret_cast<void*>(i);
}
isolate->set_external_reference_map(map_);
@@ -28,16 +31,16 @@ ExternalReferenceEncoder::ExternalReferenceEncoder(Isolate* isolate) {
uint32_t ExternalReferenceEncoder::Encode(Address address) const {
DCHECK_NOT_NULL(address);
- HashMap::Entry* entry =
- const_cast<HashMap*>(map_)->Lookup(address, Hash(address));
+ base::HashMap::Entry* entry =
+ const_cast<base::HashMap*>(map_)->Lookup(address, Hash(address));
DCHECK_NOT_NULL(entry);
return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
}
const char* ExternalReferenceEncoder::NameOfAddress(Isolate* isolate,
Address address) const {
- HashMap::Entry* entry =
- const_cast<HashMap*>(map_)->Lookup(address, Hash(address));
+ base::HashMap::Entry* entry =
+ const_cast<base::HashMap*>(map_)->Lookup(address, Hash(address));
if (entry == NULL) return "<unknown>";
uint32_t i = static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
return ExternalReferenceTable::instance(isolate)->name(i);
@@ -56,6 +59,7 @@ void SerializedData::AllocateData(int size) {
// - during deserialization to populate it.
// - during normal GC to keep its content alive.
// - not during serialization. The partial serializer adds to it explicitly.
+DISABLE_CFI_PERF
void SerializerDeserializer::Iterate(Isolate* isolate, ObjectVisitor* visitor) {
List<Object*>* cache = isolate->partial_snapshot_cache();
for (int i = 0;; ++i) {
@@ -64,7 +68,7 @@ void SerializerDeserializer::Iterate(Isolate* isolate, ObjectVisitor* visitor) {
// During deserialization, the visitor populates the partial snapshot cache
// and eventually terminates the cache with undefined.
visitor->VisitPointer(&cache->at(i));
- if (cache->at(i)->IsUndefined()) break;
+ if (cache->at(i)->IsUndefined(isolate)) break;
}
}
diff --git a/deps/v8/src/snapshot/serializer-common.h b/deps/v8/src/snapshot/serializer-common.h
index 645a9af3bf..74b0218073 100644
--- a/deps/v8/src/snapshot/serializer-common.h
+++ b/deps/v8/src/snapshot/serializer-common.h
@@ -28,7 +28,7 @@ class ExternalReferenceEncoder {
kPointerSizeLog2);
}
- HashMap* map_;
+ base::HashMap* map_;
DISALLOW_COPY_AND_ASSIGN(ExternalReferenceEncoder);
};
@@ -40,11 +40,13 @@ class HotObjectsList {
}
void Add(HeapObject* object) {
+ DCHECK(!AllowHeapAllocation::IsAllowed());
circular_queue_[index_] = object;
index_ = (index_ + 1) & kSizeMask;
}
HeapObject* Get(int index) {
+ DCHECK(!AllowHeapAllocation::IsAllowed());
DCHECK_NOT_NULL(circular_queue_[index]);
return circular_queue_[index];
}
@@ -52,6 +54,7 @@ class HotObjectsList {
static const int kNotFound = -1;
int Find(HeapObject* object) {
+ DCHECK(!AllowHeapAllocation::IsAllowed());
for (int i = 0; i < kSize; i++) {
if (circular_queue_[i] == object) return i;
}
@@ -77,7 +80,9 @@ class SerializerDeserializer : public ObjectVisitor {
static void Iterate(Isolate* isolate, ObjectVisitor* visitor);
// No reservation for large object space necessary.
- static const int kNumberOfPreallocatedSpaces = LAST_PAGED_SPACE + 1;
+ // We also handle map space differenly.
+ STATIC_ASSERT(MAP_SPACE == CODE_SPACE + 1);
+ static const int kNumberOfPreallocatedSpaces = CODE_SPACE + 1;
static const int kNumberOfSpaces = LAST_SPACE + 1;
protected:
@@ -91,31 +96,26 @@ class SerializerDeserializer : public ObjectVisitor {
STATIC_ASSERT(5 == kNumberOfSpaces);
enum Where {
// 0x00..0x04 Allocate new object, in specified space.
- kNewObject = 0,
- // 0x05 Unused (including 0x25, 0x45, 0x65).
- // 0x06 Unused (including 0x26, 0x46, 0x66).
- // 0x07 Unused (including 0x27, 0x47, 0x67).
+ kNewObject = 0x00,
// 0x08..0x0c Reference to previous object from space.
kBackref = 0x08,
- // 0x0d Unused (including 0x2d, 0x4d, 0x6d).
- // 0x0e Unused (including 0x2e, 0x4e, 0x6e).
- // 0x0f Unused (including 0x2f, 0x4f, 0x6f).
// 0x10..0x14 Reference to previous object from space after skip.
kBackrefWithSkip = 0x10,
- // 0x15 Unused (including 0x35, 0x55, 0x75).
- // 0x16 Unused (including 0x36, 0x56, 0x76).
- // 0x17 Misc (including 0x37, 0x57, 0x77).
- // 0x18 Root array item.
- kRootArray = 0x18,
- // 0x19 Object in the partial snapshot cache.
- kPartialSnapshotCache = 0x19,
- // 0x1a External reference referenced by id.
- kExternalReference = 0x1a,
- // 0x1b Object provided in the attached list.
- kAttachedReference = 0x1b,
- // 0x1c Builtin code referenced by index.
- kBuiltin = 0x1c
- // 0x1d..0x1f Misc (including 0x3d..0x3f, 0x5d..0x5f, 0x7d..0x7f)
+
+ // 0x05 Root array item.
+ kRootArray = 0x05,
+ // 0x06 Object in the partial snapshot cache.
+ kPartialSnapshotCache = 0x06,
+ // 0x07 External reference referenced by id.
+ kExternalReference = 0x07,
+
+ // 0x0d Object provided in the attached list.
+ kAttachedReference = 0x0d,
+ // 0x0e Builtin code referenced by index.
+ kBuiltin = 0x0e,
+
+ // 0x0f Misc, see below (incl. 0x2f, 0x4f, 0x6f).
+ // 0x15..0x1f Misc, see below (incl. 0x35..0x3f, 0x55..0x5f, 0x75..0x7f).
};
static const int kWhereMask = 0x1f;
@@ -144,36 +144,45 @@ class SerializerDeserializer : public ObjectVisitor {
// ---------- Misc ----------
// Skip.
- static const int kSkip = 0x1d;
- // Internal reference encoded as offsets of pc and target from code entry.
- static const int kInternalReference = 0x1e;
- static const int kInternalReferenceEncoded = 0x1f;
+ static const int kSkip = 0x0f;
// Do nothing, used for padding.
- static const int kNop = 0x3d;
+ static const int kNop = 0x2f;
// Move to next reserved chunk.
- static const int kNextChunk = 0x3e;
+ static const int kNextChunk = 0x4f;
// Deferring object content.
- static const int kDeferred = 0x3f;
- // Used for the source code of the natives, which is in the executable, but
- // is referred to from external strings in the snapshot.
- static const int kNativesStringResource = 0x5d;
- // Used for the source code for compiled stubs, which is in the executable,
- // but is referred to from external strings in the snapshot.
- static const int kExtraNativesStringResource = 0x5e;
+ static const int kDeferred = 0x6f;
+ // Alignment prefixes 0x15..0x17
+ static const int kAlignmentPrefix = 0x15;
// A tag emitted at strategic points in the snapshot to delineate sections.
// If the deserializer does not find these at the expected moments then it
// is an indication that the snapshot and the VM do not fit together.
// Examine the build process for architecture, version or configuration
// mismatches.
- static const int kSynchronize = 0x17;
+ static const int kSynchronize = 0x18;
// Repeats of variable length.
- static const int kVariableRepeat = 0x37;
+ static const int kVariableRepeat = 0x19;
// Raw data of variable length.
- static const int kVariableRawData = 0x57;
- // Alignment prefixes 0x7d..0x7f
- static const int kAlignmentPrefix = 0x7d;
+ static const int kVariableRawData = 0x1a;
+ // Internal reference encoded as offsets of pc and target from code entry.
+ static const int kInternalReference = 0x1b;
+ static const int kInternalReferenceEncoded = 0x1c;
+ // Used for the source code of the natives, which is in the executable, but
+ // is referred to from external strings in the snapshot.
+ static const int kNativesStringResource = 0x1d;
+ // Used for the source code for compiled stubs, which is in the executable,
+ // but is referred to from external strings in the snapshot.
+ static const int kExtraNativesStringResource = 0x1e;
+
+ // 8 hot (recently seen or back-referenced) objects with optional skip.
+ static const int kNumberOfHotObjects = 8;
+ STATIC_ASSERT(kNumberOfHotObjects == HotObjectsList::kSize);
+ // 0x38..0x3f
+ static const int kHotObject = 0x38;
+ // 0x58..0x5f
+ static const int kHotObjectWithSkip = 0x58;
+ static const int kHotObjectMask = 0x07;
- // 0x77 unused
+ // 0x1f, 0x35..0x37, 0x55..0x57, 0x75..0x7f unused.
// ---------- byte code range 0x80..0xff ----------
// First 32 root array items.
@@ -184,39 +193,27 @@ class SerializerDeserializer : public ObjectVisitor {
static const int kRootArrayConstantsWithSkip = 0xa0;
static const int kRootArrayConstantsMask = 0x1f;
- // 8 hot (recently seen or back-referenced) objects with optional skip.
- static const int kNumberOfHotObjects = 0x08;
- // 0xc0..0xc7
- static const int kHotObject = 0xc0;
- // 0xc8..0xcf
- static const int kHotObjectWithSkip = 0xc8;
- static const int kHotObjectMask = 0x07;
-
// 32 common raw data lengths.
static const int kNumberOfFixedRawData = 0x20;
- // 0xd0..0xef
- static const int kFixedRawData = 0xd0;
+ // 0xc0..0xdf
+ static const int kFixedRawData = 0xc0;
static const int kOnePointerRawData = kFixedRawData;
static const int kFixedRawDataStart = kFixedRawData - 1;
// 16 repeats lengths.
static const int kNumberOfFixedRepeat = 0x10;
- // 0xf0..0xff
- static const int kFixedRepeat = 0xf0;
+ // 0xe0..0xef
+ static const int kFixedRepeat = 0xe0;
static const int kFixedRepeatStart = kFixedRepeat - 1;
+ // 0xf0..0xff unused.
+
// ---------- special values ----------
static const int kAnyOldSpace = -1;
// Sentinel after a new object to indicate that double alignment is needed.
static const int kDoubleAlignmentSentinel = 0;
- // Used as index for the attached reference representing the source object.
- static const int kSourceObjectReference = 0;
-
- // Used as index for the attached reference representing the global proxy.
- static const int kGlobalProxyReference = 0;
-
// ---------- member variable ----------
HotObjectsList hot_objects_;
};
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index 41693384f3..d7a7f89278 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -10,13 +10,13 @@
namespace v8 {
namespace internal {
-Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
+Serializer::Serializer(Isolate* isolate)
: isolate_(isolate),
- sink_(sink),
external_reference_encoder_(isolate),
root_index_map_(isolate),
recursion_depth_(0),
code_address_map_(NULL),
+ num_maps_(0),
large_objects_total_size_(0),
seen_large_objects_index_(0) {
// The serializer is meant to be used only to generate initial heap images
@@ -71,15 +71,15 @@ void Serializer::OutputStatistics(const char* name) {
for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
size_t s = pending_chunk_[space];
for (uint32_t chunk_size : completed_chunks_[space]) s += chunk_size;
- PrintF("%16" V8_SIZET_PREFIX V8_PTR_PREFIX "d", s);
+ PrintF("%16" PRIuS, s);
}
PrintF("%16d\n", large_objects_total_size_);
#ifdef OBJECT_PRINT
PrintF(" Instance types (count and bytes):\n");
-#define PRINT_INSTANCE_TYPE(Name) \
- if (instance_type_count_[Name]) { \
- PrintF("%10d %10" V8_SIZET_PREFIX V8_PTR_PREFIX "d %s\n", \
- instance_type_count_[Name], instance_type_size_[Name], #Name); \
+#define PRINT_INSTANCE_TYPE(Name) \
+ if (instance_type_count_[Name]) { \
+ PrintF("%10d %10" PRIuS " %s\n", instance_type_count_[Name], \
+ instance_type_size_[Name], #Name); \
}
INSTANCE_TYPE_LIST(PRINT_INSTANCE_TYPE)
#undef PRINT_INSTANCE_TYPE
@@ -90,10 +90,10 @@ void Serializer::OutputStatistics(const char* name) {
void Serializer::SerializeDeferredObjects() {
while (deferred_objects_.length() > 0) {
HeapObject* obj = deferred_objects_.RemoveLast();
- ObjectSerializer obj_serializer(this, obj, sink_, kPlain, kStartOfObject);
+ ObjectSerializer obj_serializer(this, obj, &sink_, kPlain, kStartOfObject);
obj_serializer.SerializeDeferred();
}
- sink_->Put(kSynchronize, "Finished with deferred objects");
+ sink_.Put(kSynchronize, "Finished with deferred objects");
}
void Serializer::VisitPointers(Object** start, Object** end) {
@@ -118,90 +118,88 @@ void Serializer::EncodeReservations(
}
out->last().mark_as_last();
}
-
+ out->Add(SerializedData::Reservation(num_maps_ * Map::kSize));
+ out->last().mark_as_last();
out->Add(SerializedData::Reservation(large_objects_total_size_));
out->last().mark_as_last();
}
#ifdef DEBUG
-bool Serializer::BackReferenceIsAlreadyAllocated(BackReference reference) {
- DCHECK(reference.is_valid());
- DCHECK(!reference.is_source());
- DCHECK(!reference.is_global_proxy());
+bool Serializer::BackReferenceIsAlreadyAllocated(
+ SerializerReference reference) {
+ DCHECK(reference.is_back_reference());
AllocationSpace space = reference.space();
- int chunk_index = reference.chunk_index();
if (space == LO_SPACE) {
- return chunk_index == 0 &&
- reference.large_object_index() < seen_large_objects_index_;
- } else if (chunk_index == completed_chunks_[space].length()) {
- return reference.chunk_offset() < pending_chunk_[space];
+ return reference.large_object_index() < seen_large_objects_index_;
+ } else if (space == MAP_SPACE) {
+ return reference.map_index() < num_maps_;
} else {
- return chunk_index < completed_chunks_[space].length() &&
- reference.chunk_offset() < completed_chunks_[space][chunk_index];
+ int chunk_index = reference.chunk_index();
+ if (chunk_index == completed_chunks_[space].length()) {
+ return reference.chunk_offset() < pending_chunk_[space];
+ } else {
+ return chunk_index < completed_chunks_[space].length() &&
+ reference.chunk_offset() < completed_chunks_[space][chunk_index];
+ }
}
}
#endif // DEBUG
-bool Serializer::SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) {
- if (how_to_code == kPlain && where_to_point == kStartOfObject) {
- // Encode a reference to a hot object by its index in the working set.
- int index = hot_objects_.Find(obj);
- if (index != HotObjectsList::kNotFound) {
- DCHECK(index >= 0 && index < kNumberOfHotObjects);
- if (FLAG_trace_serializer) {
- PrintF(" Encoding hot object %d:", index);
- obj->ShortPrint();
- PrintF("\n");
- }
- if (skip != 0) {
- sink_->Put(kHotObjectWithSkip + index, "HotObjectWithSkip");
- sink_->PutInt(skip, "HotObjectSkipDistance");
- } else {
- sink_->Put(kHotObject + index, "HotObject");
- }
- return true;
- }
+bool Serializer::SerializeHotObject(HeapObject* obj, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip) {
+ if (how_to_code != kPlain || where_to_point != kStartOfObject) return false;
+ // Encode a reference to a hot object by its index in the working set.
+ int index = hot_objects_.Find(obj);
+ if (index == HotObjectsList::kNotFound) return false;
+ DCHECK(index >= 0 && index < kNumberOfHotObjects);
+ if (FLAG_trace_serializer) {
+ PrintF(" Encoding hot object %d:", index);
+ obj->ShortPrint();
+ PrintF("\n");
}
- BackReference back_reference = back_reference_map_.Lookup(obj);
- if (back_reference.is_valid()) {
- // Encode the location of an already deserialized object in order to write
- // its location into a later object. We can encode the location as an
- // offset fromthe start of the deserialized objects or as an offset
- // backwards from thecurrent allocation pointer.
- if (back_reference.is_source()) {
- FlushSkip(skip);
- if (FLAG_trace_serializer) PrintF(" Encoding source object\n");
- DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject);
- sink_->Put(kAttachedReference + kPlain + kStartOfObject, "Source");
- sink_->PutInt(kSourceObjectReference, "kSourceObjectReference");
- } else if (back_reference.is_global_proxy()) {
- FlushSkip(skip);
- if (FLAG_trace_serializer) PrintF(" Encoding global proxy\n");
- DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject);
- sink_->Put(kAttachedReference + kPlain + kStartOfObject, "Global Proxy");
- sink_->PutInt(kGlobalProxyReference, "kGlobalProxyReference");
- } else {
- if (FLAG_trace_serializer) {
- PrintF(" Encoding back reference to: ");
- obj->ShortPrint();
- PrintF("\n");
- }
+ if (skip != 0) {
+ sink_.Put(kHotObjectWithSkip + index, "HotObjectWithSkip");
+ sink_.PutInt(skip, "HotObjectSkipDistance");
+ } else {
+ sink_.Put(kHotObject + index, "HotObject");
+ }
+ return true;
+}
+bool Serializer::SerializeBackReference(HeapObject* obj, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip) {
+ SerializerReference reference = reference_map_.Lookup(obj);
+ if (!reference.is_valid()) return false;
+ // Encode the location of an already deserialized object in order to write
+ // its location into a later object. We can encode the location as an
+ // offset fromthe start of the deserialized objects or as an offset
+ // backwards from thecurrent allocation pointer.
+ if (reference.is_attached_reference()) {
+ FlushSkip(skip);
+ if (FLAG_trace_serializer) {
+ PrintF(" Encoding attached reference %d\n",
+ reference.attached_reference_index());
+ }
+ PutAttachedReference(reference, how_to_code, where_to_point);
+ } else {
+ DCHECK(reference.is_back_reference());
+ if (FLAG_trace_serializer) {
+ PrintF(" Encoding back reference to: ");
+ obj->ShortPrint();
+ PrintF("\n");
+ }
- PutAlignmentPrefix(obj);
- AllocationSpace space = back_reference.space();
- if (skip == 0) {
- sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRef");
- } else {
- sink_->Put(kBackrefWithSkip + how_to_code + where_to_point + space,
- "BackRefWithSkip");
- sink_->PutInt(skip, "BackRefSkipDistance");
- }
- PutBackReference(obj, back_reference);
+ PutAlignmentPrefix(obj);
+ AllocationSpace space = reference.space();
+ if (skip == 0) {
+ sink_.Put(kBackref + how_to_code + where_to_point + space, "BackRef");
+ } else {
+ sink_.Put(kBackrefWithSkip + how_to_code + where_to_point + space,
+ "BackRefWithSkip");
+ sink_.PutInt(skip, "BackRefSkipDistance");
}
- return true;
+ PutBackReference(obj, reference);
}
- return false;
+ return true;
}
void Serializer::PutRoot(int root_index, HeapObject* object,
@@ -218,77 +216,95 @@ void Serializer::PutRoot(int root_index, HeapObject* object,
root_index < kNumberOfRootArrayConstants &&
!isolate()->heap()->InNewSpace(object)) {
if (skip == 0) {
- sink_->Put(kRootArrayConstants + root_index, "RootConstant");
+ sink_.Put(kRootArrayConstants + root_index, "RootConstant");
} else {
- sink_->Put(kRootArrayConstantsWithSkip + root_index, "RootConstant");
- sink_->PutInt(skip, "SkipInPutRoot");
+ sink_.Put(kRootArrayConstantsWithSkip + root_index, "RootConstant");
+ sink_.PutInt(skip, "SkipInPutRoot");
}
} else {
FlushSkip(skip);
- sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
- sink_->PutInt(root_index, "root_index");
+ sink_.Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
+ sink_.PutInt(root_index, "root_index");
+ hot_objects_.Add(object);
}
}
void Serializer::PutSmi(Smi* smi) {
- sink_->Put(kOnePointerRawData, "Smi");
+ sink_.Put(kOnePointerRawData, "Smi");
byte* bytes = reinterpret_cast<byte*>(&smi);
- for (int i = 0; i < kPointerSize; i++) sink_->Put(bytes[i], "Byte");
+ for (int i = 0; i < kPointerSize; i++) sink_.Put(bytes[i], "Byte");
}
-void Serializer::PutBackReference(HeapObject* object, BackReference reference) {
+void Serializer::PutBackReference(HeapObject* object,
+ SerializerReference reference) {
DCHECK(BackReferenceIsAlreadyAllocated(reference));
- sink_->PutInt(reference.reference(), "BackRefValue");
+ sink_.PutInt(reference.back_reference(), "BackRefValue");
hot_objects_.Add(object);
}
+void Serializer::PutAttachedReference(SerializerReference reference,
+ HowToCode how_to_code,
+ WhereToPoint where_to_point) {
+ DCHECK(reference.is_attached_reference());
+ DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
+ (how_to_code == kPlain && where_to_point == kInnerPointer) ||
+ (how_to_code == kFromCode && where_to_point == kStartOfObject) ||
+ (how_to_code == kFromCode && where_to_point == kInnerPointer));
+ sink_.Put(kAttachedReference + how_to_code + where_to_point, "AttachedRef");
+ sink_.PutInt(reference.attached_reference_index(), "AttachedRefIndex");
+}
+
int Serializer::PutAlignmentPrefix(HeapObject* object) {
AllocationAlignment alignment = object->RequiredAlignment();
if (alignment != kWordAligned) {
DCHECK(1 <= alignment && alignment <= 3);
byte prefix = (kAlignmentPrefix - 1) + alignment;
- sink_->Put(prefix, "Alignment");
+ sink_.Put(prefix, "Alignment");
return Heap::GetMaximumFillToAlign(alignment);
}
return 0;
}
-BackReference Serializer::AllocateLargeObject(int size) {
+SerializerReference Serializer::AllocateLargeObject(int size) {
// Large objects are allocated one-by-one when deserializing. We do not
// have to keep track of multiple chunks.
large_objects_total_size_ += size;
- return BackReference::LargeObjectReference(seen_large_objects_index_++);
+ return SerializerReference::LargeObjectReference(seen_large_objects_index_++);
+}
+
+SerializerReference Serializer::AllocateMap() {
+ // Maps are allocated one-by-one when deserializing.
+ return SerializerReference::MapReference(num_maps_++);
}
-BackReference Serializer::Allocate(AllocationSpace space, int size) {
+SerializerReference Serializer::Allocate(AllocationSpace space, int size) {
DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
DCHECK(size > 0 && size <= static_cast<int>(max_chunk_size(space)));
uint32_t new_chunk_size = pending_chunk_[space] + size;
if (new_chunk_size > max_chunk_size(space)) {
// The new chunk size would not fit onto a single page. Complete the
// current chunk and start a new one.
- sink_->Put(kNextChunk, "NextChunk");
- sink_->Put(space, "NextChunkSpace");
+ sink_.Put(kNextChunk, "NextChunk");
+ sink_.Put(space, "NextChunkSpace");
completed_chunks_[space].Add(pending_chunk_[space]);
- DCHECK_LE(completed_chunks_[space].length(), BackReference::kMaxChunkIndex);
pending_chunk_[space] = 0;
new_chunk_size = size;
}
uint32_t offset = pending_chunk_[space];
pending_chunk_[space] = new_chunk_size;
- return BackReference::Reference(space, completed_chunks_[space].length(),
- offset);
+ return SerializerReference::BackReference(
+ space, completed_chunks_[space].length(), offset);
}
void Serializer::Pad() {
// The non-branching GetInt will read up to 3 bytes too far, so we need
// to pad the snapshot to make sure we don't read over the end.
for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) {
- sink_->Put(kNop, "Padding");
+ sink_.Put(kNop, "Padding");
}
// Pad up to pointer size for checksum.
- while (!IsAligned(sink_->Position(), kPointerAlignment)) {
- sink_->Put(kNop, "Padding");
+ while (!IsAligned(sink_.Position(), kPointerAlignment)) {
+ sink_.Put(kNop, "Padding");
}
}
@@ -320,7 +336,7 @@ void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
CodeNameEvent(object_->address(), sink_->Position(), code_name));
}
- BackReference back_reference;
+ SerializerReference back_reference;
if (space == LO_SPACE) {
sink_->Put(kNewObject + reference_representation_ + space,
"NewLargeObject");
@@ -331,6 +347,12 @@ void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
sink_->Put(NOT_EXECUTABLE, "not executable large object");
}
back_reference = serializer_->AllocateLargeObject(size);
+ } else if (space == MAP_SPACE) {
+ DCHECK_EQ(Map::kSize, size);
+ back_reference = serializer_->AllocateMap();
+ sink_->Put(kNewObject + reference_representation_ + space, "NewMap");
+ // This is redundant, but we include it anyways.
+ sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
} else {
int fill = serializer_->PutAlignmentPrefix(object_);
back_reference = serializer_->Allocate(space, size + fill);
@@ -345,7 +367,7 @@ void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
#endif // OBJECT_PRINT
// Mark this object as already serialized.
- serializer_->back_reference_map()->Add(object_, back_reference);
+ serializer_->reference_map()->Add(object_, back_reference);
// Serialize the map (first word of the object).
serializer_->SerializeObject(map, kPlain, kStartOfObject, 0);
@@ -513,15 +535,17 @@ void Serializer::ObjectSerializer::SerializeDeferred() {
int size = object_->Size();
Map* map = object_->map();
- BackReference reference = serializer_->back_reference_map()->Lookup(object_);
+ SerializerReference back_reference =
+ serializer_->reference_map()->Lookup(object_);
+ DCHECK(back_reference.is_back_reference());
// Serialize the rest of the object.
CHECK_EQ(0, bytes_processed_so_far_);
bytes_processed_so_far_ = kPointerSize;
serializer_->PutAlignmentPrefix(object_);
- sink_->Put(kNewObject + reference.space(), "deferred object");
- serializer_->PutBackReference(object_, reference);
+ sink_->Put(kNewObject + back_reference.space(), "deferred object");
+ serializer_->PutBackReference(object_, back_reference);
sink_->PutInt(size >> kPointerSizeLog2, "deferred object size");
UnlinkWeakNextScope unlink_weak_next(object_);
@@ -660,9 +684,10 @@ bool Serializer::ObjectSerializer::SerializeExternalNativeSourceString(
int builtin_count,
v8::String::ExternalOneByteStringResource** resource_pointer,
FixedArray* source_cache, int resource_index) {
+ Isolate* isolate = serializer_->isolate();
for (int i = 0; i < builtin_count; i++) {
Object* source = source_cache->get(i);
- if (!source->IsUndefined()) {
+ if (!source->IsUndefined(isolate)) {
ExternalOneByteString* string = ExternalOneByteString::cast(source);
typedef v8::String::ExternalOneByteStringResource Resource;
const Resource* resource = string->resource();
@@ -679,6 +704,9 @@ bool Serializer::ObjectSerializer::SerializeExternalNativeSourceString(
void Serializer::ObjectSerializer::VisitExternalOneByteString(
v8::String::ExternalOneByteStringResource** resource_pointer) {
+ DCHECK_EQ(serializer_->isolate()->heap()->native_source_string_map(),
+ object_->map());
+ DCHECK(ExternalOneByteString::cast(object_)->is_short());
Address references_start = reinterpret_cast<Address>(resource_pointer);
OutputRawData(references_start);
if (SerializeExternalNativeSourceString(
@@ -699,25 +727,27 @@ void Serializer::ObjectSerializer::VisitExternalOneByteString(
}
Address Serializer::ObjectSerializer::PrepareCode() {
- // To make snapshots reproducible, we make a copy of the code object
- // and wipe all pointers in the copy, which we then serialize.
- Code* original = Code::cast(object_);
- Code* code = serializer_->CopyCode(original);
+ Code* code = Code::cast(object_);
+ if (FLAG_predictable) {
+ // To make snapshots reproducible, we make a copy of the code object
+ // and wipe all pointers in the copy, which we then serialize.
+ code = serializer_->CopyCode(code);
+ int mode_mask = RelocInfo::kCodeTargetMask |
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
+ RelocInfo* rinfo = it.rinfo();
+ rinfo->WipeOut();
+ }
+ // We need to wipe out the header fields *after* wiping out the
+ // relocations, because some of these fields are needed for the latter.
+ code->WipeOutHeader();
+ }
// Code age headers are not serializable.
code->MakeYoung(serializer_->isolate());
- int mode_mask = RelocInfo::kCodeTargetMask |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
- RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
- for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
- RelocInfo* rinfo = it.rinfo();
- rinfo->WipeOut();
- }
- // We need to wipe out the header fields *after* wiping out the
- // relocations, because some of these fields are needed for the latter.
- code->WipeOutHeader();
return code->address();
}
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index eccbaabf5b..ff2c6a979d 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -38,28 +38,29 @@ class CodeAddressMap : public CodeEventLogger {
private:
class NameMap {
public:
- NameMap() : impl_(HashMap::PointersMatch) {}
+ NameMap() : impl_(base::HashMap::PointersMatch) {}
~NameMap() {
- for (HashMap::Entry* p = impl_.Start(); p != NULL; p = impl_.Next(p)) {
+ for (base::HashMap::Entry* p = impl_.Start(); p != NULL;
+ p = impl_.Next(p)) {
DeleteArray(static_cast<const char*>(p->value));
}
}
void Insert(Address code_address, const char* name, int name_size) {
- HashMap::Entry* entry = FindOrCreateEntry(code_address);
+ base::HashMap::Entry* entry = FindOrCreateEntry(code_address);
if (entry->value == NULL) {
entry->value = CopyName(name, name_size);
}
}
const char* Lookup(Address code_address) {
- HashMap::Entry* entry = FindEntry(code_address);
+ base::HashMap::Entry* entry = FindEntry(code_address);
return (entry != NULL) ? static_cast<const char*>(entry->value) : NULL;
}
void Remove(Address code_address) {
- HashMap::Entry* entry = FindEntry(code_address);
+ base::HashMap::Entry* entry = FindEntry(code_address);
if (entry != NULL) {
DeleteArray(static_cast<char*>(entry->value));
RemoveEntry(entry);
@@ -68,11 +69,11 @@ class CodeAddressMap : public CodeEventLogger {
void Move(Address from, Address to) {
if (from == to) return;
- HashMap::Entry* from_entry = FindEntry(from);
+ base::HashMap::Entry* from_entry = FindEntry(from);
DCHECK(from_entry != NULL);
void* value = from_entry->value;
RemoveEntry(from_entry);
- HashMap::Entry* to_entry = FindOrCreateEntry(to);
+ base::HashMap::Entry* to_entry = FindOrCreateEntry(to);
DCHECK(to_entry->value == NULL);
to_entry->value = value;
}
@@ -89,20 +90,20 @@ class CodeAddressMap : public CodeEventLogger {
return result;
}
- HashMap::Entry* FindOrCreateEntry(Address code_address) {
+ base::HashMap::Entry* FindOrCreateEntry(Address code_address) {
return impl_.LookupOrInsert(code_address,
ComputePointerHash(code_address));
}
- HashMap::Entry* FindEntry(Address code_address) {
+ base::HashMap::Entry* FindEntry(Address code_address) {
return impl_.Lookup(code_address, ComputePointerHash(code_address));
}
- void RemoveEntry(HashMap::Entry* entry) {
+ void RemoveEntry(base::HashMap::Entry* entry) {
impl_.Remove(entry->key, entry->hash);
}
- HashMap impl_;
+ base::HashMap impl_;
DISALLOW_COPY_AND_ASSIGN(NameMap);
};
@@ -119,7 +120,7 @@ class CodeAddressMap : public CodeEventLogger {
// There can be only one serializer per V8 process.
class Serializer : public SerializerDeserializer {
public:
- Serializer(Isolate* isolate, SnapshotByteSink* sink);
+ explicit Serializer(Isolate* isolate);
~Serializer() override;
void EncodeReservations(List<SerializedData::Reservation>* out) const;
@@ -128,7 +129,7 @@ class Serializer : public SerializerDeserializer {
Isolate* isolate() const { return isolate_; }
- BackReferenceMap* back_reference_map() { return &back_reference_map_; }
+ SerializerReferenceMap* reference_map() { return &reference_map_; }
RootIndexMap* root_index_map() { return &root_index_map_; }
#ifdef OBJECT_PRINT
@@ -162,27 +163,35 @@ class Serializer : public SerializerDeserializer {
void PutSmi(Smi* smi);
- void PutBackReference(HeapObject* object, BackReference reference);
+ void PutBackReference(HeapObject* object, SerializerReference reference);
+
+ void PutAttachedReference(SerializerReference reference,
+ HowToCode how_to_code, WhereToPoint where_to_point);
// Emit alignment prefix if necessary, return required padding space in bytes.
int PutAlignmentPrefix(HeapObject* object);
- // Returns true if the object was successfully serialized.
- bool SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip);
+ // Returns true if the object was successfully serialized as hot object.
+ bool SerializeHotObject(HeapObject* obj, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip);
+
+ // Returns true if the object was successfully serialized as back reference.
+ bool SerializeBackReference(HeapObject* obj, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip);
inline void FlushSkip(int skip) {
if (skip != 0) {
- sink_->Put(kSkip, "SkipFromSerializeObject");
- sink_->PutInt(skip, "SkipDistanceFromSerializeObject");
+ sink_.Put(kSkip, "SkipFromSerializeObject");
+ sink_.PutInt(skip, "SkipDistanceFromSerializeObject");
}
}
- bool BackReferenceIsAlreadyAllocated(BackReference back_reference);
+ bool BackReferenceIsAlreadyAllocated(SerializerReference back_reference);
// This will return the space for an object.
- BackReference AllocateLargeObject(int size);
- BackReference Allocate(AllocationSpace space, int size);
+ SerializerReference AllocateLargeObject(int size);
+ SerializerReference AllocateMap();
+ SerializerReference Allocate(AllocationSpace space, int size);
int EncodeExternalReference(Address addr) {
return external_reference_encoder_.Encode(addr);
}
@@ -204,10 +213,10 @@ class Serializer : public SerializerDeserializer {
return max_chunk_size_[space];
}
- SnapshotByteSink* sink() const { return sink_; }
+ const SnapshotByteSink* sink() const { return &sink_; }
void QueueDeferredObject(HeapObject* obj) {
- DCHECK(back_reference_map_.Lookup(obj).is_valid());
+ DCHECK(reference_map_.Lookup(obj).is_back_reference());
deferred_objects_.Add(obj);
}
@@ -215,10 +224,10 @@ class Serializer : public SerializerDeserializer {
Isolate* isolate_;
- SnapshotByteSink* sink_;
+ SnapshotByteSink sink_;
ExternalReferenceEncoder external_reference_encoder_;
- BackReferenceMap back_reference_map_;
+ SerializerReferenceMap reference_map_;
RootIndexMap root_index_map_;
int recursion_depth_;
@@ -237,6 +246,8 @@ class Serializer : public SerializerDeserializer {
uint32_t pending_chunk_[kNumberOfPreallocatedSpaces];
List<uint32_t> completed_chunks_[kNumberOfPreallocatedSpaces];
uint32_t max_chunk_size_[kNumberOfPreallocatedSpaces];
+ // Number of maps that we need to allocate.
+ uint32_t num_maps_;
// We map serialized large objects to indexes for back-referencing.
uint32_t large_objects_total_size_;
diff --git a/deps/v8/src/snapshot/snapshot-common.cc b/deps/v8/src/snapshot/snapshot-common.cc
index eb3bdb5604..fed45d16b6 100644
--- a/deps/v8/src/snapshot/snapshot-common.cc
+++ b/deps/v8/src/snapshot/snapshot-common.cc
@@ -18,22 +18,17 @@ namespace internal {
#ifdef DEBUG
bool Snapshot::SnapshotIsValid(v8::StartupData* snapshot_blob) {
- return !Snapshot::ExtractStartupData(snapshot_blob).is_empty() &&
- !Snapshot::ExtractContextData(snapshot_blob).is_empty();
+ return Snapshot::ExtractNumContexts(snapshot_blob) > 0;
}
#endif // DEBUG
-
-bool Snapshot::HaveASnapshotToStartFrom(Isolate* isolate) {
+bool Snapshot::HasContextSnapshot(Isolate* isolate, size_t index) {
// Do not use snapshots if the isolate is used to create snapshots.
- return isolate->snapshot_blob() != NULL &&
- isolate->snapshot_blob()->data != NULL;
-}
-
-
-bool Snapshot::EmbedsScript(Isolate* isolate) {
- if (!isolate->snapshot_available()) return false;
- return ExtractMetadata(isolate->snapshot_blob()).embeds_script();
+ const v8::StartupData* blob = isolate->snapshot_blob();
+ if (blob == nullptr) return false;
+ if (blob->data == nullptr) return false;
+ size_t num_contexts = static_cast<size_t>(ExtractNumContexts(blob));
+ return index < num_contexts;
}
@@ -67,15 +62,16 @@ bool Snapshot::Initialize(Isolate* isolate) {
return success;
}
-
MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
- Isolate* isolate, Handle<JSGlobalProxy> global_proxy) {
+ Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
+ size_t context_index) {
if (!isolate->snapshot_available()) return Handle<Context>();
base::ElapsedTimer timer;
if (FLAG_profile_deserialization) timer.Start();
const v8::StartupData* blob = isolate->snapshot_blob();
- Vector<const byte> context_data = ExtractContextData(blob);
+ Vector<const byte> context_data =
+ ExtractContextData(blob, static_cast<int>(context_index));
SnapshotData snapshot_data(context_data);
Deserializer deserializer(&snapshot_data);
@@ -87,177 +83,192 @@ MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
if (FLAG_profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
int bytes = context_data.length();
- PrintF("[Deserializing context (%d bytes) took %0.3f ms]\n", bytes, ms);
+ PrintF("[Deserializing context #%zu (%d bytes) took %0.3f ms]\n",
+ context_index, bytes, ms);
}
return Handle<Context>::cast(result);
}
+void UpdateMaxRequirementPerPage(
+ uint32_t* requirements,
+ Vector<const SerializedData::Reservation> reservations) {
+ int space = 0;
+ uint32_t current_requirement = 0;
+ for (const auto& reservation : reservations) {
+ current_requirement += reservation.chunk_size();
+ if (reservation.is_last()) {
+ requirements[space] = std::max(requirements[space], current_requirement);
+ current_requirement = 0;
+ space++;
+ }
+ }
+ DCHECK_EQ(i::Serializer::kNumberOfSpaces, space);
+}
-void CalculateFirstPageSizes(bool is_default_snapshot,
- const SnapshotData& startup_snapshot,
- const SnapshotData& context_snapshot,
+void CalculateFirstPageSizes(const SnapshotData* startup_snapshot,
+ const List<SnapshotData*>* context_snapshots,
uint32_t* sizes_out) {
- Vector<const SerializedData::Reservation> startup_reservations =
- startup_snapshot.Reservations();
- Vector<const SerializedData::Reservation> context_reservations =
- context_snapshot.Reservations();
- int startup_index = 0;
- int context_index = 0;
-
if (FLAG_profile_deserialization) {
int startup_total = 0;
- int context_total = 0;
- for (auto& reservation : startup_reservations) {
+ PrintF("Deserialization will reserve:\n");
+ for (const auto& reservation : startup_snapshot->Reservations()) {
startup_total += reservation.chunk_size();
}
- for (auto& reservation : context_reservations) {
- context_total += reservation.chunk_size();
+ PrintF("%10d bytes per isolate\n", startup_total);
+ for (int i = 0; i < context_snapshots->length(); i++) {
+ int context_total = 0;
+ for (const auto& reservation : context_snapshots->at(i)->Reservations()) {
+ context_total += reservation.chunk_size();
+ }
+ PrintF("%10d bytes per context #%d\n", context_total, i);
}
- PrintF(
- "Deserialization will reserve:\n"
- "%10d bytes per isolate\n"
- "%10d bytes per context\n",
- startup_total, context_total);
}
+ uint32_t startup_requirements[i::Serializer::kNumberOfSpaces];
+ uint32_t context_requirements[i::Serializer::kNumberOfSpaces];
for (int space = 0; space < i::Serializer::kNumberOfSpaces; space++) {
- bool single_chunk = true;
- while (!startup_reservations[startup_index].is_last()) {
- single_chunk = false;
- startup_index++;
- }
- while (!context_reservations[context_index].is_last()) {
- single_chunk = false;
- context_index++;
- }
+ startup_requirements[space] = 0;
+ context_requirements[space] = 0;
+ }
- uint32_t required = kMaxUInt32;
- if (single_chunk) {
- // If both the startup snapshot data and the context snapshot data on
- // this space fit in a single page, then we consider limiting the size
- // of the first page. For this, we add the chunk sizes and some extra
- // allowance. This way we achieve a smaller startup memory footprint.
- required = (startup_reservations[startup_index].chunk_size() +
- 2 * context_reservations[context_index].chunk_size()) +
- Page::kObjectStartOffset;
- // Add a small allowance to the code space for small scripts.
- if (space == CODE_SPACE) required += 32 * KB;
- } else {
- // We expect the vanilla snapshot to only require on page per space.
- DCHECK(!is_default_snapshot);
- }
+ UpdateMaxRequirementPerPage(startup_requirements,
+ startup_snapshot->Reservations());
+ for (const auto& context_snapshot : *context_snapshots) {
+ UpdateMaxRequirementPerPage(context_requirements,
+ context_snapshot->Reservations());
+ }
+
+ for (int space = 0; space < i::Serializer::kNumberOfSpaces; space++) {
+ // If the space requirement for a page is less than a page size, we consider
+ // limiting the size of the first page in order to save memory on startup.
+ uint32_t required = startup_requirements[space] +
+ 2 * context_requirements[space] +
+ Page::kObjectStartOffset;
+ // Add a small allowance to the code space for small scripts.
+ if (space == CODE_SPACE) required += 32 * KB;
if (space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE) {
uint32_t max_size =
MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(space));
- sizes_out[space - FIRST_PAGED_SPACE] = Min(required, max_size);
- } else {
- DCHECK(single_chunk);
+ sizes_out[space - FIRST_PAGED_SPACE] = std::min(required, max_size);
}
- startup_index++;
- context_index++;
}
-
- DCHECK_EQ(startup_reservations.length(), startup_index);
- DCHECK_EQ(context_reservations.length(), context_index);
}
-
v8::StartupData Snapshot::CreateSnapshotBlob(
- const i::StartupSerializer& startup_ser,
- const i::PartialSerializer& context_ser, Snapshot::Metadata metadata) {
- SnapshotData startup_snapshot(startup_ser);
- SnapshotData context_snapshot(context_ser);
- Vector<const byte> startup_data = startup_snapshot.RawData();
- Vector<const byte> context_data = context_snapshot.RawData();
+ const SnapshotData* startup_snapshot,
+ const List<SnapshotData*>* context_snapshots) {
+ int num_contexts = context_snapshots->length();
+ int startup_snapshot_offset = StartupSnapshotOffset(num_contexts);
+ int total_length = startup_snapshot_offset;
+ total_length += startup_snapshot->RawData().length();
+ for (const auto& context_snapshot : *context_snapshots) {
+ total_length += context_snapshot->RawData().length();
+ }
uint32_t first_page_sizes[kNumPagedSpaces];
+ CalculateFirstPageSizes(startup_snapshot, context_snapshots,
+ first_page_sizes);
- CalculateFirstPageSizes(!metadata.embeds_script(), startup_snapshot,
- context_snapshot, first_page_sizes);
-
- int startup_length = startup_data.length();
- int context_length = context_data.length();
- int context_offset = ContextOffset(startup_length);
-
- int length = context_offset + context_length;
- char* data = new char[length];
-
- memcpy(data + kMetadataOffset, &metadata.RawValue(), kInt32Size);
+ char* data = new char[total_length];
memcpy(data + kFirstPageSizesOffset, first_page_sizes,
kNumPagedSpaces * kInt32Size);
- memcpy(data + kStartupLengthOffset, &startup_length, kInt32Size);
- memcpy(data + kStartupDataOffset, startup_data.begin(), startup_length);
- memcpy(data + context_offset, context_data.begin(), context_length);
- v8::StartupData result = {data, length};
-
+ memcpy(data + kNumberOfContextsOffset, &num_contexts, kInt32Size);
+ int payload_offset = StartupSnapshotOffset(num_contexts);
+ int payload_length = startup_snapshot->RawData().length();
+ memcpy(data + payload_offset, startup_snapshot->RawData().start(),
+ payload_length);
if (FLAG_profile_deserialization) {
- PrintF(
- "Snapshot blob consists of:\n"
- "%10d bytes for startup\n"
- "%10d bytes for context\n",
- startup_length, context_length);
+ PrintF("Snapshot blob consists of:\n%10d bytes for startup\n",
+ payload_length);
+ }
+ payload_offset += payload_length;
+ for (int i = 0; i < num_contexts; i++) {
+ memcpy(data + ContextSnapshotOffsetOffset(i), &payload_offset, kInt32Size);
+ SnapshotData* context_snapshot = context_snapshots->at(i);
+ payload_length = context_snapshot->RawData().length();
+ memcpy(data + payload_offset, context_snapshot->RawData().start(),
+ payload_length);
+ if (FLAG_profile_deserialization) {
+ PrintF("%10d bytes for context #%d\n", payload_length, i);
+ }
+ payload_offset += payload_length;
}
+
+ v8::StartupData result = {data, total_length};
return result;
}
-
-Snapshot::Metadata Snapshot::ExtractMetadata(const v8::StartupData* data) {
- uint32_t raw;
- memcpy(&raw, data->data + kMetadataOffset, kInt32Size);
- return Metadata(raw);
+int Snapshot::ExtractNumContexts(const v8::StartupData* data) {
+ CHECK_LT(kNumberOfContextsOffset, data->raw_size);
+ int num_contexts;
+ memcpy(&num_contexts, data->data + kNumberOfContextsOffset, kInt32Size);
+ return num_contexts;
}
-
Vector<const byte> Snapshot::ExtractStartupData(const v8::StartupData* data) {
- DCHECK_LT(kIntSize, data->raw_size);
- int startup_length;
- memcpy(&startup_length, data->data + kStartupLengthOffset, kInt32Size);
- DCHECK_LT(startup_length, data->raw_size);
+ int num_contexts = ExtractNumContexts(data);
+ int startup_offset = StartupSnapshotOffset(num_contexts);
+ CHECK_LT(startup_offset, data->raw_size);
+ int first_context_offset;
+ memcpy(&first_context_offset, data->data + ContextSnapshotOffsetOffset(0),
+ kInt32Size);
+ CHECK_LT(first_context_offset, data->raw_size);
+ int startup_length = first_context_offset - startup_offset;
const byte* startup_data =
- reinterpret_cast<const byte*>(data->data + kStartupDataOffset);
+ reinterpret_cast<const byte*>(data->data + startup_offset);
return Vector<const byte>(startup_data, startup_length);
}
+Vector<const byte> Snapshot::ExtractContextData(const v8::StartupData* data,
+ int index) {
+ int num_contexts = ExtractNumContexts(data);
+ CHECK_LT(index, num_contexts);
+
+ int context_offset;
+ memcpy(&context_offset, data->data + ContextSnapshotOffsetOffset(index),
+ kInt32Size);
+ int next_context_offset;
+ if (index == num_contexts - 1) {
+ next_context_offset = data->raw_size;
+ } else {
+ memcpy(&next_context_offset,
+ data->data + ContextSnapshotOffsetOffset(index + 1), kInt32Size);
+ CHECK_LT(next_context_offset, data->raw_size);
+ }
-Vector<const byte> Snapshot::ExtractContextData(const v8::StartupData* data) {
- DCHECK_LT(kIntSize, data->raw_size);
- int startup_length;
- memcpy(&startup_length, data->data + kStartupLengthOffset, kIntSize);
- int context_offset = ContextOffset(startup_length);
const byte* context_data =
reinterpret_cast<const byte*>(data->data + context_offset);
- DCHECK_LT(context_offset, data->raw_size);
- int context_length = data->raw_size - context_offset;
+ int context_length = next_context_offset - context_offset;
return Vector<const byte>(context_data, context_length);
}
-SnapshotData::SnapshotData(const Serializer& ser) {
+SnapshotData::SnapshotData(const Serializer* serializer) {
DisallowHeapAllocation no_gc;
List<Reservation> reservations;
- ser.EncodeReservations(&reservations);
- const List<byte>& payload = ser.sink()->data();
+ serializer->EncodeReservations(&reservations);
+ const List<byte>* payload = serializer->sink()->data();
// Calculate sizes.
int reservation_size = reservations.length() * kInt32Size;
- int size = kHeaderSize + reservation_size + payload.length();
+ int size = kHeaderSize + reservation_size + payload->length();
// Allocate backing store and create result data.
AllocateData(size);
// Set header values.
- SetMagicNumber(ser.isolate());
+ SetMagicNumber(serializer->isolate());
SetHeaderValue(kCheckSumOffset, Version::Hash());
SetHeaderValue(kNumReservationsOffset, reservations.length());
- SetHeaderValue(kPayloadLengthOffset, payload.length());
+ SetHeaderValue(kPayloadLengthOffset, payload->length());
// Copy reservation chunk sizes.
CopyBytes(data_ + kHeaderSize, reinterpret_cast<byte*>(reservations.begin()),
reservation_size);
// Copy serialized data.
- CopyBytes(data_ + kHeaderSize + reservation_size, payload.begin(),
- static_cast<size_t>(payload.length()));
+ CopyBytes(data_ + kHeaderSize + reservation_size, payload->begin(),
+ static_cast<size_t>(payload->length()));
}
bool SnapshotData::IsSane() {
diff --git a/deps/v8/src/snapshot/snapshot-source-sink.h b/deps/v8/src/snapshot/snapshot-source-sink.h
index 360ec76bb6..5d4c08d43a 100644
--- a/deps/v8/src/snapshot/snapshot-source-sink.h
+++ b/deps/v8/src/snapshot/snapshot-source-sink.h
@@ -94,7 +94,7 @@ class SnapshotByteSink {
void PutRaw(const byte* data, int number_of_bytes, const char* description);
int Position() { return data_.length(); }
- const List<byte>& data() const { return data_; }
+ const List<byte>* data() const { return &data_; }
private:
List<byte> data_;
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index c648d7595e..a541592fee 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -16,32 +16,55 @@ class Isolate;
class PartialSerializer;
class StartupSerializer;
-class Snapshot : public AllStatic {
+// Wrapper around reservation sizes and the serialization payload.
+class SnapshotData : public SerializedData {
public:
- class Metadata {
- public:
- explicit Metadata(uint32_t data = 0) : data_(data) {}
- bool embeds_script() { return EmbedsScriptBits::decode(data_); }
- void set_embeds_script(bool v) {
- data_ = EmbedsScriptBits::update(data_, v);
- }
+ // Used when producing.
+ explicit SnapshotData(const Serializer* serializer);
+
+ // Used when consuming.
+ explicit SnapshotData(const Vector<const byte> snapshot)
+ : SerializedData(const_cast<byte*>(snapshot.begin()), snapshot.length()) {
+ CHECK(IsSane());
+ }
- uint32_t& RawValue() { return data_; }
+ Vector<const Reservation> Reservations() const;
+ Vector<const byte> Payload() const;
- private:
- class EmbedsScriptBits : public BitField<bool, 0, 1> {};
- uint32_t data_;
- };
+ Vector<const byte> RawData() const {
+ return Vector<const byte>(data_, size_);
+ }
+
+ private:
+ bool IsSane();
+ // The data header consists of uint32_t-sized entries:
+ // [0] magic number and external reference count
+ // [1] version hash
+ // [2] number of reservation size entries
+ // [3] payload length
+ // ... reservations
+ // ... serialized payload
+ static const int kCheckSumOffset = kMagicNumberOffset + kInt32Size;
+ static const int kNumReservationsOffset = kCheckSumOffset + kInt32Size;
+ static const int kPayloadLengthOffset = kNumReservationsOffset + kInt32Size;
+ static const int kHeaderSize = kPayloadLengthOffset + kInt32Size;
+};
+
+class Snapshot : public AllStatic {
+ public:
// Initialize the Isolate from the internal snapshot. Returns false if no
// snapshot could be found.
static bool Initialize(Isolate* isolate);
// Create a new context using the internal partial snapshot.
static MaybeHandle<Context> NewContextFromSnapshot(
- Isolate* isolate, Handle<JSGlobalProxy> global_proxy);
+ Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
+ size_t context_index);
static bool HaveASnapshotToStartFrom(Isolate* isolate);
+ static bool HasContextSnapshot(Isolate* isolate, size_t index);
+
static bool EmbedsScript(Isolate* isolate);
static uint32_t SizeOfFirstPage(Isolate* isolate, AllocationSpace space);
@@ -51,35 +74,44 @@ class Snapshot : public AllStatic {
static const v8::StartupData* DefaultSnapshotBlob();
static v8::StartupData CreateSnapshotBlob(
- const StartupSerializer& startup_ser,
- const PartialSerializer& context_ser, Snapshot::Metadata metadata);
+ const SnapshotData* startup_snapshot,
+ const List<SnapshotData*>* context_snapshots);
#ifdef DEBUG
static bool SnapshotIsValid(v8::StartupData* snapshot_blob);
#endif // DEBUG
private:
+ static int ExtractNumContexts(const v8::StartupData* data);
static Vector<const byte> ExtractStartupData(const v8::StartupData* data);
- static Vector<const byte> ExtractContextData(const v8::StartupData* data);
- static Metadata ExtractMetadata(const v8::StartupData* data);
+ static Vector<const byte> ExtractContextData(const v8::StartupData* data,
+ int index);
// Snapshot blob layout:
- // [0] metadata
- // [1 - 6] pre-calculated first page sizes for paged spaces
- // [7] serialized start up data length
- // ... serialized start up data
- // ... serialized context data
+ // [0 - 5] pre-calculated first page sizes for paged spaces
+ // [6] number of contexts N
+ // [7] offset to context 0
+ // [8] offset to context 1
+ // ...
+ // ... offset to context N - 1
+ // ... startup snapshot data
+ // ... context 0 snapshot data
+ // ... context 1 snapshot data
static const int kNumPagedSpaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
- static const int kMetadataOffset = 0;
- static const int kFirstPageSizesOffset = kMetadataOffset + kInt32Size;
- static const int kStartupLengthOffset =
+ static const int kFirstPageSizesOffset = 0;
+ static const int kNumberOfContextsOffset =
kFirstPageSizesOffset + kNumPagedSpaces * kInt32Size;
- static const int kStartupDataOffset = kStartupLengthOffset + kInt32Size;
+ static const int kFirstContextOffsetOffset =
+ kNumberOfContextsOffset + kInt32Size;
+
+ static int StartupSnapshotOffset(int num_contexts) {
+ return kFirstContextOffsetOffset + num_contexts * kInt32Size;
+ }
- static int ContextOffset(int startup_length) {
- return kStartupDataOffset + startup_length;
+ static int ContextSnapshotOffsetOffset(int index) {
+ return kFirstContextOffsetOffset + index * kInt32Size;
}
DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot);
@@ -89,41 +121,6 @@ class Snapshot : public AllStatic {
void SetSnapshotFromFile(StartupData* snapshot_blob);
#endif
-// Wrapper around reservation sizes and the serialization payload.
-class SnapshotData : public SerializedData {
- public:
- // Used when producing.
- explicit SnapshotData(const Serializer& ser);
-
- // Used when consuming.
- explicit SnapshotData(const Vector<const byte> snapshot)
- : SerializedData(const_cast<byte*>(snapshot.begin()), snapshot.length()) {
- CHECK(IsSane());
- }
-
- Vector<const Reservation> Reservations() const;
- Vector<const byte> Payload() const;
-
- Vector<const byte> RawData() const {
- return Vector<const byte>(data_, size_);
- }
-
- private:
- bool IsSane();
-
- // The data header consists of uint32_t-sized entries:
- // [0] magic number and external reference count
- // [1] version hash
- // [2] number of reservation size entries
- // [3] payload length
- // ... reservations
- // ... serialized payload
- static const int kCheckSumOffset = kMagicNumberOffset + kInt32Size;
- static const int kNumReservationsOffset = kCheckSumOffset + kInt32Size;
- static const int kPayloadLengthOffset = kNumReservationsOffset + kInt32Size;
- static const int kHeaderSize = kPayloadLengthOffset + kInt32Size;
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/startup-serializer.cc b/deps/v8/src/snapshot/startup-serializer.cc
index fab01f51f8..80598e80bd 100644
--- a/deps/v8/src/snapshot/startup-serializer.cc
+++ b/deps/v8/src/snapshot/startup-serializer.cc
@@ -11,10 +11,11 @@ namespace v8 {
namespace internal {
StartupSerializer::StartupSerializer(
- Isolate* isolate, SnapshotByteSink* sink,
- FunctionCodeHandling function_code_handling)
- : Serializer(isolate, sink),
- function_code_handling_(function_code_handling),
+ Isolate* isolate,
+ v8::SnapshotCreator::FunctionCodeHandling function_code_handling)
+ : Serializer(isolate),
+ clear_function_code_(function_code_handling ==
+ v8::SnapshotCreator::FunctionCodeHandling::kClear),
serializing_builtins_(false) {
InitializeCodeAddressMap();
}
@@ -27,21 +28,21 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
DCHECK(!obj->IsJSFunction());
- if (function_code_handling_ == CLEAR_FUNCTION_CODE) {
+ if (clear_function_code_) {
if (obj->IsCode()) {
Code* code = Code::cast(obj);
// If the function code is compiled (either as native code or bytecode),
// replace it with lazy-compile builtin. Only exception is when we are
// serializing the canonical interpreter-entry-trampoline builtin.
if (code->kind() == Code::FUNCTION ||
- (!serializing_builtins_ && code->is_interpreter_entry_trampoline())) {
+ (!serializing_builtins_ &&
+ code->is_interpreter_trampoline_builtin())) {
obj = isolate()->builtins()->builtin(Builtins::kCompileLazy);
}
} else if (obj->IsBytecodeArray()) {
obj = isolate()->heap()->undefined_value();
}
} else if (obj->IsCode()) {
- DCHECK_EQ(KEEP_FUNCTION_CODE, function_code_handling_);
Code* code = Code::cast(obj);
if (code->kind() == Code::FUNCTION) {
code->ClearInlineCaches();
@@ -49,6 +50,8 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
}
}
+ if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
+
int root_index = root_index_map_.Lookup(obj);
// We can only encode roots as such if it has already been serialized.
// That applies to root indices below the wave front.
@@ -59,12 +62,12 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
}
}
- if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
+ if (SerializeBackReference(obj, how_to_code, where_to_point, skip)) return;
FlushSkip(skip);
// Object has not yet been serialized. Serialize it here.
- ObjectSerializer object_serializer(this, obj, sink_, how_to_code,
+ ObjectSerializer object_serializer(this, obj, &sink_, how_to_code,
where_to_point);
object_serializer.Serialize();
@@ -73,8 +76,8 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
// Make sure that the immortal immovable root has been included in the first
// chunk of its reserved space , so that it is deserialized onto the first
// page of its space and stays immortal immovable.
- BackReference ref = back_reference_map_.Lookup(obj);
- CHECK(ref.is_valid() && ref.chunk_index() == 0);
+ SerializerReference ref = reference_map_.Lookup(obj);
+ CHECK(ref.is_back_reference() && ref.chunk_index() == 0);
}
}
@@ -89,11 +92,22 @@ void StartupSerializer::SerializeWeakReferencesAndDeferred() {
Pad();
}
+int StartupSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
+ int index;
+ if (!partial_cache_index_map_.LookupOrInsert(heap_object, &index)) {
+ // This object is not part of the partial snapshot cache yet. Add it to the
+ // startup snapshot so we can refer to it via partial snapshot index from
+ // the partial snapshot.
+ VisitPointer(reinterpret_cast<Object**>(&heap_object));
+ }
+ return index;
+}
+
void StartupSerializer::Synchronize(VisitorSynchronization::SyncTag tag) {
// We expect the builtins tag after builtins have been serialized.
DCHECK(!serializing_builtins_ || tag == VisitorSynchronization::kBuiltins);
serializing_builtins_ = (tag == VisitorSynchronization::kHandleScope);
- sink_->Put(kSynchronize, "Synchronize");
+ sink_.Put(kSynchronize, "Synchronize");
}
void StartupSerializer::SerializeStrongReferences() {
diff --git a/deps/v8/src/snapshot/startup-serializer.h b/deps/v8/src/snapshot/startup-serializer.h
index 71b8475469..cc66f71ae9 100644
--- a/deps/v8/src/snapshot/startup-serializer.h
+++ b/deps/v8/src/snapshot/startup-serializer.h
@@ -6,6 +6,7 @@
#define V8_SNAPSHOT_STARTUP_SERIALIZER_H_
#include <bitset>
+#include "include/v8.h"
#include "src/snapshot/serializer.h"
namespace v8 {
@@ -13,11 +14,9 @@ namespace internal {
class StartupSerializer : public Serializer {
public:
- enum FunctionCodeHandling { CLEAR_FUNCTION_CODE, KEEP_FUNCTION_CODE };
-
StartupSerializer(
- Isolate* isolate, SnapshotByteSink* sink,
- FunctionCodeHandling function_code_handling = CLEAR_FUNCTION_CODE);
+ Isolate* isolate,
+ v8::SnapshotCreator::FunctionCodeHandling function_code_handling);
~StartupSerializer() override;
// Serialize the current state of the heap. The order is:
@@ -28,7 +27,34 @@ class StartupSerializer : public Serializer {
void SerializeStrongReferences();
void SerializeWeakReferencesAndDeferred();
+ int PartialSnapshotCacheIndex(HeapObject* o);
+
private:
+ class PartialCacheIndexMap : public AddressMapBase {
+ public:
+ PartialCacheIndexMap()
+ : map_(base::HashMap::PointersMatch), next_index_(0) {}
+
+ // Lookup object in the map. Return its index if found, or create
+ // a new entry with new_index as value, and return kInvalidIndex.
+ bool LookupOrInsert(HeapObject* obj, int* index_out) {
+ base::HashMap::Entry* entry = LookupEntry(&map_, obj, false);
+ if (entry != NULL) {
+ *index_out = GetValue(entry);
+ return true;
+ }
+ *index_out = next_index_;
+ SetValue(LookupEntry(&map_, obj, true), next_index_++);
+ return false;
+ }
+
+ private:
+ base::HashMap map_;
+ int next_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(PartialCacheIndexMap);
+ };
+
// The StartupSerializer has to serialize the root array, which is slightly
// different.
void VisitPointers(Object** start, Object** end) override;
@@ -42,10 +68,11 @@ class StartupSerializer : public Serializer {
// roots. In the second pass, we serialize the rest.
bool RootShouldBeSkipped(int root_index);
- FunctionCodeHandling function_code_handling_;
+ bool clear_function_code_;
bool serializing_builtins_;
bool serializing_immortal_immovables_roots_;
std::bitset<Heap::kStrongRootListLength> root_has_been_serialized_;
+ PartialCacheIndexMap partial_cache_index_map_;
DISALLOW_COPY_AND_ASSIGN(StartupSerializer);
};
diff --git a/deps/v8/src/interpreter/source-position-table.cc b/deps/v8/src/source-position-table.cc
index 99a865b84e..ef6d0773f9 100644
--- a/deps/v8/src/interpreter/source-position-table.cc
+++ b/deps/v8/src/source-position-table.cc
@@ -2,18 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/interpreter/source-position-table.h"
+#include "src/source-position-table.h"
+#include "src/log.h"
#include "src/objects-inl.h"
#include "src/objects.h"
namespace v8 {
namespace internal {
-namespace interpreter {
// We'll use a simple encoding scheme to record the source positions.
// Conceptually, each position consists of:
-// - bytecode_offset: An integer index into the BytecodeArray
+// - code_offset: An integer index into the BytecodeArray or code.
// - source_position: An integer index into the source string.
// - position type: Each position is either a statement or an expression.
//
@@ -21,26 +21,20 @@ namespace interpreter {
// where each byte contains 7 bits of payload data, and 1 'more' bit that
// determines whether additional bytes follow. Additionally:
// - we record the difference from the previous position,
-// - we just stuff one bit for the type into the bytecode offset,
+// - we just stuff one bit for the type into the code offset,
// - we write least-significant bits first,
-// - negative numbers occur only rarely, so we use a denormalized
-// most-significant byte (a byte with all zeros, which normally wouldn't
-// make any sense) to encode a negative sign, so that we 'pay' nothing for
-// positive numbers, but have to pay a full byte for negative integers.
+// - we use zig-zag encoding to encode both positive and negative numbers.
namespace {
-// A zero-value in the most-significant byte is used to mark negative numbers.
-const int kNegativeSignMarker = 0;
-
// Each byte is encoded as MoreBit | ValueBits.
class MoreBit : public BitField8<bool, 7, 1> {};
-class ValueBits : public BitField8<int, 0, 7> {};
+class ValueBits : public BitField8<unsigned, 0, 7> {};
// Helper: Add the offsets from 'other' to 'value'. Also set is_statement.
void AddAndSetEntry(PositionTableEntry& value,
const PositionTableEntry& other) {
- value.bytecode_offset += other.bytecode_offset;
+ value.code_offset += other.code_offset;
value.source_position += other.source_position;
value.is_statement = other.is_statement;
}
@@ -48,127 +42,108 @@ void AddAndSetEntry(PositionTableEntry& value,
// Helper: Substract the offsets from 'other' from 'value'.
void SubtractFromEntry(PositionTableEntry& value,
const PositionTableEntry& other) {
- value.bytecode_offset -= other.bytecode_offset;
+ value.code_offset -= other.code_offset;
value.source_position -= other.source_position;
}
// Helper: Encode an integer.
void EncodeInt(ZoneVector<byte>& bytes, int value) {
- bool sign = false;
- if (value < 0) {
- sign = true;
- value = -value;
- }
-
+ // Zig-zag encoding.
+ static const int kShift = kIntSize * kBitsPerByte - 1;
+ value = ((value << 1) ^ (value >> kShift));
+ DCHECK_GE(value, 0);
+ unsigned int encoded = static_cast<unsigned int>(value);
bool more;
do {
- more = value > ValueBits::kMax;
- bytes.push_back(MoreBit::encode(more || sign) |
- ValueBits::encode(value & ValueBits::kMax));
- value >>= ValueBits::kSize;
+ more = encoded > ValueBits::kMax;
+ bytes.push_back(MoreBit::encode(more) |
+ ValueBits::encode(encoded & ValueBits::kMask));
+ encoded >>= ValueBits::kSize;
} while (more);
-
- if (sign) {
- bytes.push_back(MoreBit::encode(false) |
- ValueBits::encode(kNegativeSignMarker));
- }
}
// Encode a PositionTableEntry.
void EncodeEntry(ZoneVector<byte>& bytes, const PositionTableEntry& entry) {
- // 1 bit for sign + is_statement each, which leaves 30b for the value.
- DCHECK(abs(entry.bytecode_offset) < (1 << 30));
- EncodeInt(bytes, (entry.is_statement ? 1 : 0) | (entry.bytecode_offset << 1));
+ // We only accept ascending code offsets.
+ DCHECK(entry.code_offset >= 0);
+ // Since code_offset is not negative, we use sign to encode is_statement.
+ EncodeInt(bytes,
+ entry.is_statement ? entry.code_offset : -entry.code_offset - 1);
EncodeInt(bytes, entry.source_position);
}
// Helper: Decode an integer.
void DecodeInt(ByteArray* bytes, int* index, int* v) {
byte current;
- int n = 0;
- int value = 0;
+ int shift = 0;
+ int decoded = 0;
bool more;
do {
current = bytes->get((*index)++);
- value |= ValueBits::decode(current) << (n * ValueBits::kSize);
- n++;
+ decoded |= ValueBits::decode(current) << shift;
more = MoreBit::decode(current);
+ shift += ValueBits::kSize;
} while (more);
-
- if (ValueBits::decode(current) == kNegativeSignMarker) {
- value = -value;
- }
- *v = value;
+ DCHECK_GE(decoded, 0);
+ decoded = (decoded >> 1) ^ (-(decoded & 1));
+ *v = decoded;
}
void DecodeEntry(ByteArray* bytes, int* index, PositionTableEntry* entry) {
int tmp;
DecodeInt(bytes, index, &tmp);
- entry->is_statement = (tmp & 1);
-
- // Note that '>>' needs to be arithmetic shift in order to handle negative
- // numbers properly.
- entry->bytecode_offset = (tmp >> 1);
-
+ if (tmp >= 0) {
+ entry->is_statement = true;
+ entry->code_offset = tmp;
+ } else {
+ entry->is_statement = false;
+ entry->code_offset = -(tmp + 1);
+ }
DecodeInt(bytes, index, &entry->source_position);
}
} // namespace
-void SourcePositionTableBuilder::AddStatementPosition(size_t bytecode_offset,
- int source_position) {
- int offset = static_cast<int>(bytecode_offset);
- AddEntry({offset, source_position, true});
+SourcePositionTableBuilder::SourcePositionTableBuilder(
+ Zone* zone, SourcePositionTableBuilder::RecordingMode mode)
+ : mode_(mode),
+ bytes_(zone),
+#ifdef ENABLE_SLOW_DCHECKS
+ raw_entries_(zone),
+#endif
+ previous_() {
}
-void SourcePositionTableBuilder::AddExpressionPosition(size_t bytecode_offset,
- int source_position) {
- int offset = static_cast<int>(bytecode_offset);
- AddEntry({offset, source_position, false});
+void SourcePositionTableBuilder::AddPosition(size_t code_offset,
+ int source_position,
+ bool is_statement) {
+ if (Omit()) return;
+ int offset = static_cast<int>(code_offset);
+ AddEntry({offset, source_position, is_statement});
}
void SourcePositionTableBuilder::AddEntry(const PositionTableEntry& entry) {
- // Don't encode a new entry if this bytecode already has a source position
- // assigned.
- if (candidate_.bytecode_offset == entry.bytecode_offset) {
- if (entry.is_statement) candidate_ = entry;
- return;
- }
-
- CommitEntry();
- candidate_ = entry;
-}
-
-void SourcePositionTableBuilder::CommitEntry() {
- if (candidate_.bytecode_offset == kUninitializedCandidateOffset) return;
- PositionTableEntry tmp(candidate_);
+ PositionTableEntry tmp(entry);
SubtractFromEntry(tmp, previous_);
EncodeEntry(bytes_, tmp);
- previous_ = candidate_;
-
- if (candidate_.is_statement) {
- LOG_CODE_EVENT(isolate_, CodeLinePosInfoAddStatementPositionEvent(
- jit_handler_data_, candidate_.bytecode_offset,
- candidate_.source_position));
- }
- LOG_CODE_EVENT(isolate_, CodeLinePosInfoAddPositionEvent(
- jit_handler_data_, candidate_.bytecode_offset,
- candidate_.source_position));
-
+ previous_ = entry;
#ifdef ENABLE_SLOW_DCHECKS
- raw_entries_.push_back(candidate_);
+ raw_entries_.push_back(entry);
#endif
}
-Handle<ByteArray> SourcePositionTableBuilder::ToSourcePositionTable() {
- CommitEntry();
- if (bytes_.empty()) return isolate_->factory()->empty_byte_array();
+Handle<ByteArray> SourcePositionTableBuilder::ToSourcePositionTable(
+ Isolate* isolate, Handle<AbstractCode> code) {
+ if (bytes_.empty()) return isolate->factory()->empty_byte_array();
+ DCHECK(!Omit());
- Handle<ByteArray> table = isolate_->factory()->NewByteArray(
+ Handle<ByteArray> table = isolate->factory()->NewByteArray(
static_cast<int>(bytes_.size()), TENURED);
MemCopy(table->GetDataStartAddress(), &*bytes_.begin(), bytes_.size());
+ LOG_CODE_EVENT(isolate, CodeLinePosInfoRecordEvent(*code, *table));
+
#ifdef ENABLE_SLOW_DCHECKS
// Brute force testing: Record all positions and decode
// the entire table to verify they are identical.
@@ -176,13 +151,14 @@ Handle<ByteArray> SourcePositionTableBuilder::ToSourcePositionTable() {
for (SourcePositionTableIterator encoded(*table); !encoded.done();
encoded.Advance(), raw++) {
DCHECK(raw != raw_entries_.end());
- DCHECK_EQ(encoded.bytecode_offset(), raw->bytecode_offset);
+ DCHECK_EQ(encoded.code_offset(), raw->code_offset);
DCHECK_EQ(encoded.source_position(), raw->source_position);
DCHECK_EQ(encoded.is_statement(), raw->is_statement);
}
DCHECK(raw == raw_entries_.end());
+ // No additional source positions after creating the table.
+ mode_ = OMIT_SOURCE_POSITIONS;
#endif
-
return table;
}
@@ -203,6 +179,5 @@ void SourcePositionTableIterator::Advance() {
}
}
-} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/source-position-table.h b/deps/v8/src/source-position-table.h
new file mode 100644
index 0000000000..76ae4a0759
--- /dev/null
+++ b/deps/v8/src/source-position-table.h
@@ -0,0 +1,90 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SOURCE_POSITION_TABLE_H_
+#define V8_SOURCE_POSITION_TABLE_H_
+
+#include "src/assert-scope.h"
+#include "src/checks.h"
+#include "src/handles.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class AbstractCode;
+class BytecodeArray;
+class ByteArray;
+class Isolate;
+class Zone;
+
+struct PositionTableEntry {
+ PositionTableEntry()
+ : code_offset(0), source_position(0), is_statement(false) {}
+ PositionTableEntry(int offset, int source, bool statement)
+ : code_offset(offset), source_position(source), is_statement(statement) {}
+
+ int code_offset;
+ int source_position;
+ bool is_statement;
+};
+
+class SourcePositionTableBuilder {
+ public:
+ enum RecordingMode { OMIT_SOURCE_POSITIONS, RECORD_SOURCE_POSITIONS };
+
+ SourcePositionTableBuilder(Zone* zone,
+ RecordingMode mode = RECORD_SOURCE_POSITIONS);
+
+ void AddPosition(size_t code_offset, int source_position, bool is_statement);
+
+ Handle<ByteArray> ToSourcePositionTable(Isolate* isolate,
+ Handle<AbstractCode> code);
+
+ private:
+ void AddEntry(const PositionTableEntry& entry);
+
+ inline bool Omit() const { return mode_ == OMIT_SOURCE_POSITIONS; }
+
+ RecordingMode mode_;
+ ZoneVector<byte> bytes_;
+#ifdef ENABLE_SLOW_DCHECKS
+ ZoneVector<PositionTableEntry> raw_entries_;
+#endif
+ PositionTableEntry previous_; // Previously written entry, to compute delta.
+};
+
+class SourcePositionTableIterator {
+ public:
+ explicit SourcePositionTableIterator(ByteArray* byte_array);
+
+ void Advance();
+
+ int code_offset() const {
+ DCHECK(!done());
+ return current_.code_offset;
+ }
+ int source_position() const {
+ DCHECK(!done());
+ return current_.source_position;
+ }
+ bool is_statement() const {
+ DCHECK(!done());
+ return current_.is_statement;
+ }
+ bool done() const { return index_ == kDone; }
+
+ private:
+ static const int kDone = -1;
+
+ ByteArray* table_;
+ int index_;
+ PositionTableEntry current_;
+ DisallowHeapAllocation no_gc;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SOURCE_POSITION_TABLE_H_
diff --git a/deps/v8/src/source-position.h b/deps/v8/src/source-position.h
index 46ee9820b2..2d36e97521 100644
--- a/deps/v8/src/source-position.h
+++ b/deps/v8/src/source-position.h
@@ -7,8 +7,8 @@
#include <ostream>
-#include "src/assembler.h"
#include "src/flags.h"
+#include "src/globals.h"
#include "src/utils.h"
namespace v8 {
@@ -49,8 +49,7 @@ class SourcePosition {
uint32_t raw() const { return value_; }
private:
- static const uint32_t kNoPosition =
- static_cast<uint32_t>(RelocInfo::kNoPosition);
+ static const uint32_t kNoPosition = static_cast<uint32_t>(kNoSourcePosition);
typedef BitField<uint32_t, 0, 9> InliningIdField;
// Offset from the start of the inlined function.
diff --git a/deps/v8/src/startup-data-util.cc b/deps/v8/src/startup-data-util.cc
index e20ec218d5..7c6d9ebf21 100644
--- a/deps/v8/src/startup-data-util.cc
+++ b/deps/v8/src/startup-data-util.cc
@@ -7,9 +7,9 @@
#include <stdlib.h>
#include <string.h>
+#include "src/base/file-utils.h"
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
-#include "src/flags.h"
#include "src/utils.h"
@@ -78,27 +78,6 @@ void LoadFromFiles(const char* natives_blob, const char* snapshot_blob) {
atexit(&FreeStartupData);
}
-
-char* RelativePath(char** buffer, const char* exec_path, const char* name) {
- DCHECK(exec_path);
- int path_separator = static_cast<int>(strlen(exec_path)) - 1;
- while (path_separator >= 0 &&
- !base::OS::isDirectorySeparator(exec_path[path_separator])) {
- path_separator--;
- }
- if (path_separator >= 0) {
- int name_length = static_cast<int>(strlen(name));
- *buffer =
- reinterpret_cast<char*>(calloc(path_separator + name_length + 2, 1));
- *buffer[0] = '\0';
- strncat(*buffer, exec_path, path_separator + 1);
- strncat(*buffer, name, name_length);
- } else {
- *buffer = strdup(name);
- }
- return *buffer;
-}
-
} // namespace
#endif // V8_USE_EXTERNAL_STARTUP_DATA
@@ -108,9 +87,7 @@ void InitializeExternalStartupData(const char* directory_path) {
char* natives;
char* snapshot;
LoadFromFiles(RelativePath(&natives, directory_path, "natives_blob.bin"),
- RelativePath(&snapshot, directory_path,
- FLAG_ignition ? "snapshot_blob_ignition.bin"
- : "snapshot_blob.bin"));
+ RelativePath(&snapshot, directory_path, "snapshot_blob.bin"));
free(natives);
free(snapshot);
#endif // V8_USE_EXTERNAL_STARTUP_DATA
diff --git a/deps/v8/src/string-builder.h b/deps/v8/src/string-builder.h
index 98bd82b97a..192603f33f 100644
--- a/deps/v8/src/string-builder.h
+++ b/deps/v8/src/string-builder.h
@@ -293,6 +293,14 @@ class IncrementalStringBuilder {
}
}
+ INLINE(void AppendCString(const uc16* s)) {
+ if (encoding_ == String::ONE_BYTE_ENCODING) {
+ while (*s != '\0') Append<uc16, uint8_t>(*(s++));
+ } else {
+ while (*s != '\0') Append<uc16, uc16>(*(s++));
+ }
+ }
+
INLINE(bool CurrentPartCanFit(int length)) {
return part_length_ - current_index_ > length;
}
@@ -301,6 +309,8 @@ class IncrementalStringBuilder {
MaybeHandle<String> Finish();
+ INLINE(bool HasOverflowed()) const { return overflowed_; }
+
// Change encoding to two-byte.
void ChangeEncoding() {
DCHECK_EQ(String::ONE_BYTE_ENCODING, encoding_);
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index 02f6f1c2bb..3ae4580709 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -4,6 +4,8 @@
#include "src/string-stream.h"
+#include <memory>
+
#include "src/handles-inl.h"
#include "src/prototype.h"
@@ -249,12 +251,11 @@ void StringStream::Add(const char* format, FmtElm arg0, FmtElm arg1,
Add(CStrVector(format), Vector<FmtElm>(argv, argc));
}
-
-base::SmartArrayPointer<const char> StringStream::ToCString() const {
+std::unique_ptr<char[]> StringStream::ToCString() const {
char* str = NewArray<char>(length_ + 1);
MemCopy(str, buffer_, length_);
str[length_] = '\0';
- return base::SmartArrayPointer<const char>(str);
+ return std::unique_ptr<char[]>(str);
}
@@ -378,14 +379,14 @@ void StringStream::PrintUsingMap(JSObject* js_object) {
void StringStream::PrintFixedArray(FixedArray* array, unsigned int limit) {
- Heap* heap = array->GetHeap();
+ Isolate* isolate = array->GetIsolate();
for (unsigned int i = 0; i < 10 && i < limit; i++) {
Object* element = array->get(i);
- if (element != heap->the_hole_value()) {
- for (int len = 1; len < 18; len++)
- Put(' ');
- Add("%d: %o\n", i, array->get(i));
+ if (element->IsTheHole(isolate)) continue;
+ for (int len = 1; len < 18; len++) {
+ Put(' ');
}
+ Add("%d: %o\n", i, array->get(i));
}
if (limit >= 10) {
Add(" ...\n");
@@ -527,19 +528,20 @@ void StringStream::PrintPrototype(JSFunction* fun, Object* receiver) {
Object* name = fun->shared()->name();
bool print_name = false;
Isolate* isolate = fun->GetIsolate();
- if (receiver->IsNull() || receiver->IsUndefined() || receiver->IsJSProxy()) {
+ if (receiver->IsNull(isolate) || receiver->IsUndefined(isolate) ||
+ receiver->IsTheHole(isolate) || receiver->IsJSProxy()) {
print_name = true;
- } else {
+ } else if (isolate->context() != nullptr) {
if (!receiver->IsJSObject()) {
receiver = receiver->GetRootMap(isolate)->prototype();
}
for (PrototypeIterator iter(isolate, JSObject::cast(receiver),
- PrototypeIterator::START_AT_RECEIVER);
+ kStartAtReceiver);
!iter.IsAtEnd(); iter.Advance()) {
if (iter.GetCurrent()->IsJSProxy()) break;
Object* key = iter.GetCurrent<JSObject>()->SlowReverseLookup(fun);
- if (!key->IsUndefined()) {
+ if (!key->IsUndefined(isolate)) {
if (!name->IsString() ||
!key->IsString() ||
!String::cast(name)->Equals(String::cast(key))) {
diff --git a/deps/v8/src/string-stream.h b/deps/v8/src/string-stream.h
index 03ea0620ad..1c1d27a16b 100644
--- a/deps/v8/src/string-stream.h
+++ b/deps/v8/src/string-stream.h
@@ -5,8 +5,9 @@
#ifndef V8_STRING_STREAM_H_
#define V8_STRING_STREAM_H_
+#include <memory>
+
#include "src/allocation.h"
-#include "src/base/smart-pointers.h"
#include "src/handles.h"
#include "src/vector.h"
@@ -135,7 +136,7 @@ class StringStream final {
void OutputToStdOut() { OutputToFile(stdout); }
void Log(Isolate* isolate);
Handle<String> ToString(Isolate* isolate);
- base::SmartArrayPointer<const char> ToCString() const;
+ std::unique_ptr<char[]> ToCString() const;
int length() const { return length_; }
// Object printing support.
diff --git a/deps/v8/src/third_party/fdlibm/README.v8 b/deps/v8/src/third_party/fdlibm/README.v8
deleted file mode 100644
index ea8fdb6ce1..0000000000
--- a/deps/v8/src/third_party/fdlibm/README.v8
+++ /dev/null
@@ -1,18 +0,0 @@
-Name: Freely Distributable LIBM
-Short Name: fdlibm
-URL: http://www.netlib.org/fdlibm/
-Version: 5.3
-License: Freely Distributable.
-License File: LICENSE.
-Security Critical: yes.
-License Android Compatible: yes.
-
-Description:
-This is used to provide a accurate implementation for trigonometric functions
-used in V8.
-
-Local Modifications:
-For the use in V8, fdlibm has been reduced to include only sine, cosine and
-tangent. To make inlining into generated code possible, a large portion of
-that has been translated to Javascript. The rest remains in C, but has been
-refactored and reformatted to interoperate with the rest of V8.
diff --git a/deps/v8/src/third_party/fdlibm/fdlibm.cc b/deps/v8/src/third_party/fdlibm/fdlibm.cc
deleted file mode 100644
index 0ef2301ae3..0000000000
--- a/deps/v8/src/third_party/fdlibm/fdlibm.cc
+++ /dev/null
@@ -1,228 +0,0 @@
-// The following is adapted from fdlibm (http://www.netlib.org/fdlibm).
-//
-// ====================================================
-// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
-//
-// Developed at SunSoft, a Sun Microsystems, Inc. business.
-// Permission to use, copy, modify, and distribute this
-// software is freely granted, provided that this notice
-// is preserved.
-// ====================================================
-//
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2014 the V8 project authors. All rights reserved.
-
-#include "src/third_party/fdlibm/fdlibm.h"
-
-#include <stdint.h>
-#include <cmath>
-#include <limits>
-
-#include "src/base/macros.h"
-#include "src/double.h"
-
-namespace v8 {
-namespace fdlibm {
-
-#ifdef _MSC_VER
-inline double scalbn(double x, int y) { return _scalb(x, y); }
-#endif // _MSC_VER
-
-
-// Table of constants for 2/pi, 396 Hex digits (476 decimal) of 2/pi
-static const int two_over_pi[] = {
- 0xA2F983, 0x6E4E44, 0x1529FC, 0x2757D1, 0xF534DD, 0xC0DB62, 0x95993C,
- 0x439041, 0xFE5163, 0xABDEBB, 0xC561B7, 0x246E3A, 0x424DD2, 0xE00649,
- 0x2EEA09, 0xD1921C, 0xFE1DEB, 0x1CB129, 0xA73EE8, 0x8235F5, 0x2EBB44,
- 0x84E99C, 0x7026B4, 0x5F7E41, 0x3991D6, 0x398353, 0x39F49C, 0x845F8B,
- 0xBDF928, 0x3B1FF8, 0x97FFDE, 0x05980F, 0xEF2F11, 0x8B5A0A, 0x6D1F6D,
- 0x367ECF, 0x27CB09, 0xB74F46, 0x3F669E, 0x5FEA2D, 0x7527BA, 0xC7EBE5,
- 0xF17B3D, 0x0739F7, 0x8A5292, 0xEA6BFB, 0x5FB11F, 0x8D5D08, 0x560330,
- 0x46FC7B, 0x6BABF0, 0xCFBC20, 0x9AF436, 0x1DA9E3, 0x91615E, 0xE61B08,
- 0x659985, 0x5F14A0, 0x68408D, 0xFFD880, 0x4D7327, 0x310606, 0x1556CA,
- 0x73A8C9, 0x60E27B, 0xC08C6B};
-
-static const double zero = 0.0;
-static const double two24 = 1.6777216e+07;
-static const double one = 1.0;
-static const double twon24 = 5.9604644775390625e-08;
-
-static const double PIo2[] = {
- 1.57079625129699707031e+00, // 0x3FF921FB, 0x40000000
- 7.54978941586159635335e-08, // 0x3E74442D, 0x00000000
- 5.39030252995776476554e-15, // 0x3CF84698, 0x80000000
- 3.28200341580791294123e-22, // 0x3B78CC51, 0x60000000
- 1.27065575308067607349e-29, // 0x39F01B83, 0x80000000
- 1.22933308981111328932e-36, // 0x387A2520, 0x40000000
- 2.73370053816464559624e-44, // 0x36E38222, 0x80000000
- 2.16741683877804819444e-51 // 0x3569F31D, 0x00000000
-};
-
-
-INLINE(int __kernel_rem_pio2(double* x, double* y, int e0, int nx)) {
- static const int32_t jk = 3;
- double fw;
- int32_t jx = nx - 1;
- int32_t jv = (e0 - 3) / 24;
- if (jv < 0) jv = 0;
- int32_t q0 = e0 - 24 * (jv + 1);
- int32_t m = jx + jk;
-
- double f[20];
- for (int i = 0, j = jv - jx; i <= m; i++, j++) {
- f[i] = (j < 0) ? zero : static_cast<double>(two_over_pi[j]);
- }
-
- double q[20];
- for (int i = 0; i <= jk; i++) {
- fw = 0.0;
- for (int j = 0; j <= jx; j++) fw += x[j] * f[jx + i - j];
- q[i] = fw;
- }
-
- int32_t jz = jk;
-
-recompute:
-
- int32_t iq[20];
- double z = q[jz];
- for (int i = 0, j = jz; j > 0; i++, j--) {
- fw = static_cast<double>(static_cast<int32_t>(twon24 * z));
- iq[i] = static_cast<int32_t>(z - two24 * fw);
- z = q[j - 1] + fw;
- }
-
- z = scalbn(z, q0);
- z -= 8.0 * std::floor(z * 0.125);
- int32_t n = static_cast<int32_t>(z);
- z -= static_cast<double>(n);
- int32_t ih = 0;
- if (q0 > 0) {
- int32_t i = (iq[jz - 1] >> (24 - q0));
- n += i;
- iq[jz - 1] -= i << (24 - q0);
- ih = iq[jz - 1] >> (23 - q0);
- } else if (q0 == 0) {
- ih = iq[jz - 1] >> 23;
- } else if (z >= 0.5) {
- ih = 2;
- }
-
- if (ih > 0) {
- n += 1;
- int32_t carry = 0;
- for (int i = 0; i < jz; i++) {
- int32_t j = iq[i];
- if (carry == 0) {
- if (j != 0) {
- carry = 1;
- iq[i] = 0x1000000 - j;
- }
- } else {
- iq[i] = 0xffffff - j;
- }
- }
- if (q0 == 1) {
- iq[jz - 1] &= 0x7fffff;
- } else if (q0 == 2) {
- iq[jz - 1] &= 0x3fffff;
- }
- if (ih == 2) {
- z = one - z;
- if (carry != 0) z -= scalbn(one, q0);
- }
- }
-
- if (z == zero) {
- int32_t j = 0;
- for (int i = jz - 1; i >= jk; i--) j |= iq[i];
- if (j == 0) {
- int32_t k = 1;
- while (iq[jk - k] == 0) k++;
- for (int i = jz + 1; i <= jz + k; i++) {
- f[jx + i] = static_cast<double>(two_over_pi[jv + i]);
- for (j = 0, fw = 0.0; j <= jx; j++) fw += x[j] * f[jx + i - j];
- q[i] = fw;
- }
- jz += k;
- goto recompute;
- }
- }
-
- if (z == 0.0) {
- jz -= 1;
- q0 -= 24;
- while (iq[jz] == 0) {
- jz--;
- q0 -= 24;
- }
- } else {
- z = scalbn(z, -q0);
- if (z >= two24) {
- fw = static_cast<double>(static_cast<int32_t>(twon24 * z));
- iq[jz] = static_cast<int32_t>(z - two24 * fw);
- jz += 1;
- q0 += 24;
- iq[jz] = static_cast<int32_t>(fw);
- } else {
- iq[jz] = static_cast<int32_t>(z);
- }
- }
-
- fw = scalbn(one, q0);
- for (int i = jz; i >= 0; i--) {
- q[i] = fw * static_cast<double>(iq[i]);
- fw *= twon24;
- }
-
- double fq[20];
- for (int i = jz; i >= 0; i--) {
- fw = 0.0;
- for (int k = 0; k <= jk && k <= jz - i; k++) fw += PIo2[k] * q[i + k];
- fq[jz - i] = fw;
- }
-
- fw = 0.0;
- for (int i = jz; i >= 0; i--) fw += fq[i];
- y[0] = (ih == 0) ? fw : -fw;
- fw = fq[0] - fw;
- for (int i = 1; i <= jz; i++) fw += fq[i];
- y[1] = (ih == 0) ? fw : -fw;
- return n & 7;
-}
-
-
-int rempio2(double x, double* y) {
- int32_t hx = static_cast<int32_t>(internal::double_to_uint64(x) >> 32);
- int32_t ix = hx & 0x7fffffff;
-
- if (ix >= 0x7ff00000) {
- *y = std::numeric_limits<double>::quiet_NaN();
- return 0;
- }
-
- int32_t e0 = (ix >> 20) - 1046;
- uint64_t zi = internal::double_to_uint64(x) & 0xFFFFFFFFu;
- zi |= static_cast<uint64_t>(ix - (e0 << 20)) << 32;
- double z = internal::uint64_to_double(zi);
-
- double tx[3];
- for (int i = 0; i < 2; i++) {
- tx[i] = static_cast<double>(static_cast<int32_t>(z));
- z = (z - tx[i]) * two24;
- }
- tx[2] = z;
-
- int nx = 3;
- while (tx[nx - 1] == zero) nx--;
- int n = __kernel_rem_pio2(tx, y, e0, nx);
- if (hx < 0) {
- y[0] = -y[0];
- y[1] = -y[1];
- return -n;
- }
- return n;
-}
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/third_party/fdlibm/fdlibm.h b/deps/v8/src/third_party/fdlibm/fdlibm.h
deleted file mode 100644
index e417c8ce59..0000000000
--- a/deps/v8/src/third_party/fdlibm/fdlibm.h
+++ /dev/null
@@ -1,27 +0,0 @@
-// The following is adapted from fdlibm (http://www.netlib.org/fdlibm).
-//
-// ====================================================
-// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
-//
-// Developed at SunSoft, a Sun Microsystems, Inc. business.
-// Permission to use, copy, modify, and distribute this
-// software is freely granted, provided that this notice
-// is preserved.
-// ====================================================
-//
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2014 the V8 project authors. All rights reserved.
-
-#ifndef V8_FDLIBM_H_
-#define V8_FDLIBM_H_
-
-namespace v8 {
-namespace fdlibm {
-
-int rempio2(double x, double* y);
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_FDLIBM_H_
diff --git a/deps/v8/src/third_party/fdlibm/fdlibm.js b/deps/v8/src/third_party/fdlibm/fdlibm.js
deleted file mode 100644
index a5e789f38a..0000000000
--- a/deps/v8/src/third_party/fdlibm/fdlibm.js
+++ /dev/null
@@ -1,1117 +0,0 @@
-// The following is adapted from fdlibm (http://www.netlib.org/fdlibm),
-//
-// ====================================================
-// Copyright (C) 1993-2004 by Sun Microsystems, Inc. All rights reserved.
-//
-// Developed at SunSoft, a Sun Microsystems, Inc. business.
-// Permission to use, copy, modify, and distribute this
-// software is freely granted, provided that this notice
-// is preserved.
-// ====================================================
-//
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2014 the V8 project authors. All rights reserved.
-//
-// The following is a straightforward translation of fdlibm routines
-// by Raymond Toy (rtoy@google.com).
-
-// rempio2result is used as a container for return values of %RemPiO2. It is
-// initialized to a two-element Float64Array during genesis.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalFloat64Array = global.Float64Array;
-var GlobalMath = global.Math;
-var MathAbs;
-var MathExp;
-var NaN = %GetRootNaN();
-var rempio2result;
-
-utils.Import(function(from) {
- MathAbs = from.MathAbs;
- MathExp = from.MathExp;
-});
-
-utils.CreateDoubleResultArray = function(global) {
- rempio2result = new GlobalFloat64Array(2);
-};
-
-// -------------------------------------------------------------------
-
-define INVPIO2 = 6.36619772367581382433e-01;
-define PIO2_1 = 1.57079632673412561417;
-define PIO2_1T = 6.07710050650619224932e-11;
-define PIO2_2 = 6.07710050630396597660e-11;
-define PIO2_2T = 2.02226624879595063154e-21;
-define PIO2_3 = 2.02226624871116645580e-21;
-define PIO2_3T = 8.47842766036889956997e-32;
-define PIO4 = 7.85398163397448278999e-01;
-define PIO4LO = 3.06161699786838301793e-17;
-
-// Compute k and r such that x - k*pi/2 = r where |r| < pi/4. For
-// precision, r is returned as two values y0 and y1 such that r = y0 + y1
-// to more than double precision.
-
-macro REMPIO2(X)
- var n, y0, y1;
- var hx = %_DoubleHi(X);
- var ix = hx & 0x7fffffff;
-
- if (ix < 0x4002d97c) {
- // |X| ~< 3*pi/4, special case with n = +/- 1
- if (hx > 0) {
- var z = X - PIO2_1;
- if (ix != 0x3ff921fb) {
- // 33+53 bit pi is good enough
- y0 = z - PIO2_1T;
- y1 = (z - y0) - PIO2_1T;
- } else {
- // near pi/2, use 33+33+53 bit pi
- z -= PIO2_2;
- y0 = z - PIO2_2T;
- y1 = (z - y0) - PIO2_2T;
- }
- n = 1;
- } else {
- // Negative X
- var z = X + PIO2_1;
- if (ix != 0x3ff921fb) {
- // 33+53 bit pi is good enough
- y0 = z + PIO2_1T;
- y1 = (z - y0) + PIO2_1T;
- } else {
- // near pi/2, use 33+33+53 bit pi
- z += PIO2_2;
- y0 = z + PIO2_2T;
- y1 = (z - y0) + PIO2_2T;
- }
- n = -1;
- }
- } else if (ix <= 0x413921fb) {
- // |X| ~<= 2^19*(pi/2), medium size
- var t = MathAbs(X);
- n = (t * INVPIO2 + 0.5) | 0;
- var r = t - n * PIO2_1;
- var w = n * PIO2_1T;
- // First round good to 85 bit
- y0 = r - w;
- if (ix - (%_DoubleHi(y0) & 0x7ff00000) > 0x1000000) {
- // 2nd iteration needed, good to 118
- t = r;
- w = n * PIO2_2;
- r = t - w;
- w = n * PIO2_2T - ((t - r) - w);
- y0 = r - w;
- if (ix - (%_DoubleHi(y0) & 0x7ff00000) > 0x3100000) {
- // 3rd iteration needed. 151 bits accuracy
- t = r;
- w = n * PIO2_3;
- r = t - w;
- w = n * PIO2_3T - ((t - r) - w);
- y0 = r - w;
- }
- }
- y1 = (r - y0) - w;
- if (hx < 0) {
- n = -n;
- y0 = -y0;
- y1 = -y1;
- }
- } else {
- // Need to do full Payne-Hanek reduction here.
- n = %RemPiO2(X, rempio2result);
- y0 = rempio2result[0];
- y1 = rempio2result[1];
- }
-endmacro
-
-
-// __kernel_sin(X, Y, IY)
-// kernel sin function on [-pi/4, pi/4], pi/4 ~ 0.7854
-// Input X is assumed to be bounded by ~pi/4 in magnitude.
-// Input Y is the tail of X so that x = X + Y.
-//
-// Algorithm
-// 1. Since ieee_sin(-x) = -ieee_sin(x), we need only to consider positive x.
-// 2. ieee_sin(x) is approximated by a polynomial of degree 13 on
-// [0,pi/4]
-// 3 13
-// sin(x) ~ x + S1*x + ... + S6*x
-// where
-//
-// |ieee_sin(x) 2 4 6 8 10 12 | -58
-// |----- - (1+S1*x +S2*x +S3*x +S4*x +S5*x +S6*x )| <= 2
-// | x |
-//
-// 3. ieee_sin(X+Y) = ieee_sin(X) + sin'(X')*Y
-// ~ ieee_sin(X) + (1-X*X/2)*Y
-// For better accuracy, let
-// 3 2 2 2 2
-// r = X *(S2+X *(S3+X *(S4+X *(S5+X *S6))))
-// then 3 2
-// sin(x) = X + (S1*X + (X *(r-Y/2)+Y))
-//
-define S1 = -1.66666666666666324348e-01;
-define S2 = 8.33333333332248946124e-03;
-define S3 = -1.98412698298579493134e-04;
-define S4 = 2.75573137070700676789e-06;
-define S5 = -2.50507602534068634195e-08;
-define S6 = 1.58969099521155010221e-10;
-
-macro RETURN_KERNELSIN(X, Y, SIGN)
- var z = X * X;
- var v = z * X;
- var r = S2 + z * (S3 + z * (S4 + z * (S5 + z * S6)));
- return (X - ((z * (0.5 * Y - v * r) - Y) - v * S1)) SIGN;
-endmacro
-
-// __kernel_cos(X, Y)
-// kernel cos function on [-pi/4, pi/4], pi/4 ~ 0.785398164
-// Input X is assumed to be bounded by ~pi/4 in magnitude.
-// Input Y is the tail of X so that x = X + Y.
-//
-// Algorithm
-// 1. Since ieee_cos(-x) = ieee_cos(x), we need only to consider positive x.
-// 2. ieee_cos(x) is approximated by a polynomial of degree 14 on
-// [0,pi/4]
-// 4 14
-// cos(x) ~ 1 - x*x/2 + C1*x + ... + C6*x
-// where the remez error is
-//
-// | 2 4 6 8 10 12 14 | -58
-// |ieee_cos(x)-(1-.5*x +C1*x +C2*x +C3*x +C4*x +C5*x +C6*x )| <= 2
-// | |
-//
-// 4 6 8 10 12 14
-// 3. let r = C1*x +C2*x +C3*x +C4*x +C5*x +C6*x , then
-// ieee_cos(x) = 1 - x*x/2 + r
-// since ieee_cos(X+Y) ~ ieee_cos(X) - ieee_sin(X)*Y
-// ~ ieee_cos(X) - X*Y,
-// a correction term is necessary in ieee_cos(x) and hence
-// cos(X+Y) = 1 - (X*X/2 - (r - X*Y))
-// For better accuracy when x > 0.3, let qx = |x|/4 with
-// the last 32 bits mask off, and if x > 0.78125, let qx = 0.28125.
-// Then
-// cos(X+Y) = (1-qx) - ((X*X/2-qx) - (r-X*Y)).
-// Note that 1-qx and (X*X/2-qx) is EXACT here, and the
-// magnitude of the latter is at least a quarter of X*X/2,
-// thus, reducing the rounding error in the subtraction.
-//
-define C1 = 4.16666666666666019037e-02;
-define C2 = -1.38888888888741095749e-03;
-define C3 = 2.48015872894767294178e-05;
-define C4 = -2.75573143513906633035e-07;
-define C5 = 2.08757232129817482790e-09;
-define C6 = -1.13596475577881948265e-11;
-
-macro RETURN_KERNELCOS(X, Y, SIGN)
- var ix = %_DoubleHi(X) & 0x7fffffff;
- var z = X * X;
- var r = z * (C1 + z * (C2 + z * (C3 + z * (C4 + z * (C5 + z * C6)))));
- if (ix < 0x3fd33333) { // |x| ~< 0.3
- return (1 - (0.5 * z - (z * r - X * Y))) SIGN;
- } else {
- var qx;
- if (ix > 0x3fe90000) { // |x| > 0.78125
- qx = 0.28125;
- } else {
- qx = %_ConstructDouble(%_DoubleHi(0.25 * X), 0);
- }
- var hz = 0.5 * z - qx;
- return (1 - qx - (hz - (z * r - X * Y))) SIGN;
- }
-endmacro
-
-
-// kernel tan function on [-pi/4, pi/4], pi/4 ~ 0.7854
-// Input x is assumed to be bounded by ~pi/4 in magnitude.
-// Input y is the tail of x.
-// Input k indicates whether ieee_tan (if k = 1) or -1/tan (if k = -1)
-// is returned.
-//
-// Algorithm
-// 1. Since ieee_tan(-x) = -ieee_tan(x), we need only to consider positive x.
-// 2. if x < 2^-28 (hx<0x3e300000 0), return x with inexact if x!=0.
-// 3. ieee_tan(x) is approximated by a odd polynomial of degree 27 on
-// [0,0.67434]
-// 3 27
-// tan(x) ~ x + T1*x + ... + T13*x
-// where
-//
-// |ieee_tan(x) 2 4 26 | -59.2
-// |----- - (1+T1*x +T2*x +.... +T13*x )| <= 2
-// | x |
-//
-// Note: ieee_tan(x+y) = ieee_tan(x) + tan'(x)*y
-// ~ ieee_tan(x) + (1+x*x)*y
-// Therefore, for better accuracy in computing ieee_tan(x+y), let
-// 3 2 2 2 2
-// r = x *(T2+x *(T3+x *(...+x *(T12+x *T13))))
-// then
-// 3 2
-// tan(x+y) = x + (T1*x + (x *(r+y)+y))
-//
-// 4. For x in [0.67434,pi/4], let y = pi/4 - x, then
-// tan(x) = ieee_tan(pi/4-y) = (1-ieee_tan(y))/(1+ieee_tan(y))
-// = 1 - 2*(ieee_tan(y) - (ieee_tan(y)^2)/(1+ieee_tan(y)))
-//
-// Set returnTan to 1 for tan; -1 for cot. Anything else is illegal
-// and will cause incorrect results.
-//
-define T00 = 3.33333333333334091986e-01;
-define T01 = 1.33333333333201242699e-01;
-define T02 = 5.39682539762260521377e-02;
-define T03 = 2.18694882948595424599e-02;
-define T04 = 8.86323982359930005737e-03;
-define T05 = 3.59207910759131235356e-03;
-define T06 = 1.45620945432529025516e-03;
-define T07 = 5.88041240820264096874e-04;
-define T08 = 2.46463134818469906812e-04;
-define T09 = 7.81794442939557092300e-05;
-define T10 = 7.14072491382608190305e-05;
-define T11 = -1.85586374855275456654e-05;
-define T12 = 2.59073051863633712884e-05;
-
-function KernelTan(x, y, returnTan) {
- var z;
- var w;
- var hx = %_DoubleHi(x);
- var ix = hx & 0x7fffffff;
-
- if (ix < 0x3e300000) { // |x| < 2^-28
- if (((ix | %_DoubleLo(x)) | (returnTan + 1)) == 0) {
- // x == 0 && returnTan = -1
- return 1 / MathAbs(x);
- } else {
- if (returnTan == 1) {
- return x;
- } else {
- // Compute -1/(x + y) carefully
- var w = x + y;
- var z = %_ConstructDouble(%_DoubleHi(w), 0);
- var v = y - (z - x);
- var a = -1 / w;
- var t = %_ConstructDouble(%_DoubleHi(a), 0);
- var s = 1 + t * z;
- return t + a * (s + t * v);
- }
- }
- }
- if (ix >= 0x3fe59428) { // |x| > .6744
- if (x < 0) {
- x = -x;
- y = -y;
- }
- z = PIO4 - x;
- w = PIO4LO - y;
- x = z + w;
- y = 0;
- }
- z = x * x;
- w = z * z;
-
- // Break x^5 * (T1 + x^2*T2 + ...) into
- // x^5 * (T1 + x^4*T3 + ... + x^20*T11) +
- // x^5 * (x^2 * (T2 + x^4*T4 + ... + x^22*T12))
- var r = T01 + w * (T03 + w * (T05 +
- w * (T07 + w * (T09 + w * T11))));
- var v = z * (T02 + w * (T04 + w * (T06 +
- w * (T08 + w * (T10 + w * T12)))));
- var s = z * x;
- r = y + z * (s * (r + v) + y);
- r = r + T00 * s;
- w = x + r;
- if (ix >= 0x3fe59428) {
- return (1 - ((hx >> 30) & 2)) *
- (returnTan - 2.0 * (x - (w * w / (w + returnTan) - r)));
- }
- if (returnTan == 1) {
- return w;
- } else {
- z = %_ConstructDouble(%_DoubleHi(w), 0);
- v = r - (z - x);
- var a = -1 / w;
- var t = %_ConstructDouble(%_DoubleHi(a), 0);
- s = 1 + t * z;
- return t + a * (s + t * v);
- }
-}
-
-function MathSinSlow(x) {
- REMPIO2(x);
- var sign = 1 - (n & 2);
- if (n & 1) {
- RETURN_KERNELCOS(y0, y1, * sign);
- } else {
- RETURN_KERNELSIN(y0, y1, * sign);
- }
-}
-
-function MathCosSlow(x) {
- REMPIO2(x);
- if (n & 1) {
- var sign = (n & 2) - 1;
- RETURN_KERNELSIN(y0, y1, * sign);
- } else {
- var sign = 1 - (n & 2);
- RETURN_KERNELCOS(y0, y1, * sign);
- }
-}
-
-// ECMA 262 - 15.8.2.16
-function MathSin(x) {
- x = +x; // Convert to number.
- if ((%_DoubleHi(x) & 0x7fffffff) <= 0x3fe921fb) {
- // |x| < pi/4, approximately. No reduction needed.
- RETURN_KERNELSIN(x, 0, /* empty */);
- }
- return +MathSinSlow(x);
-}
-
-// ECMA 262 - 15.8.2.7
-function MathCos(x) {
- x = +x; // Convert to number.
- if ((%_DoubleHi(x) & 0x7fffffff) <= 0x3fe921fb) {
- // |x| < pi/4, approximately. No reduction needed.
- RETURN_KERNELCOS(x, 0, /* empty */);
- }
- return +MathCosSlow(x);
-}
-
-// ECMA 262 - 15.8.2.18
-function MathTan(x) {
- x = x * 1; // Convert to number.
- if ((%_DoubleHi(x) & 0x7fffffff) <= 0x3fe921fb) {
- // |x| < pi/4, approximately. No reduction needed.
- return KernelTan(x, 0, 1);
- }
- REMPIO2(x);
- return KernelTan(y0, y1, (n & 1) ? -1 : 1);
-}
-
-// ES6 draft 09-27-13, section 20.2.2.20.
-// Math.log1p
-//
-// Method :
-// 1. Argument Reduction: find k and f such that
-// 1+x = 2^k * (1+f),
-// where sqrt(2)/2 < 1+f < sqrt(2) .
-//
-// Note. If k=0, then f=x is exact. However, if k!=0, then f
-// may not be representable exactly. In that case, a correction
-// term is need. Let u=1+x rounded. Let c = (1+x)-u, then
-// log(1+x) - log(u) ~ c/u. Thus, we proceed to compute log(u),
-// and add back the correction term c/u.
-// (Note: when x > 2**53, one can simply return log(x))
-//
-// 2. Approximation of log1p(f).
-// Let s = f/(2+f) ; based on log(1+f) = log(1+s) - log(1-s)
-// = 2s + 2/3 s**3 + 2/5 s**5 + .....,
-// = 2s + s*R
-// We use a special Reme algorithm on [0,0.1716] to generate
-// a polynomial of degree 14 to approximate R The maximum error
-// of this polynomial approximation is bounded by 2**-58.45. In
-// other words,
-// 2 4 6 8 10 12 14
-// R(z) ~ Lp1*s +Lp2*s +Lp3*s +Lp4*s +Lp5*s +Lp6*s +Lp7*s
-// (the values of Lp1 to Lp7 are listed in the program)
-// and
-// | 2 14 | -58.45
-// | Lp1*s +...+Lp7*s - R(z) | <= 2
-// | |
-// Note that 2s = f - s*f = f - hfsq + s*hfsq, where hfsq = f*f/2.
-// In order to guarantee error in log below 1ulp, we compute log
-// by
-// log1p(f) = f - (hfsq - s*(hfsq+R)).
-//
-// 3. Finally, log1p(x) = k*ln2 + log1p(f).
-// = k*ln2_hi+(f-(hfsq-(s*(hfsq+R)+k*ln2_lo)))
-// Here ln2 is split into two floating point number:
-// ln2_hi + ln2_lo,
-// where n*ln2_hi is always exact for |n| < 2000.
-//
-// Special cases:
-// log1p(x) is NaN with signal if x < -1 (including -INF) ;
-// log1p(+INF) is +INF; log1p(-1) is -INF with signal;
-// log1p(NaN) is that NaN with no signal.
-//
-// Accuracy:
-// according to an error analysis, the error is always less than
-// 1 ulp (unit in the last place).
-//
-// Constants:
-// Constants are found in fdlibm.cc. We assume the C++ compiler to convert
-// from decimal to binary accurately enough to produce the intended values.
-//
-// Note: Assuming log() return accurate answer, the following
-// algorithm can be used to compute log1p(x) to within a few ULP:
-//
-// u = 1+x;
-// if (u==1.0) return x ; else
-// return log(u)*(x/(u-1.0));
-//
-// See HP-15C Advanced Functions Handbook, p.193.
-//
-define LN2_HI = 6.93147180369123816490e-01;
-define LN2_LO = 1.90821492927058770002e-10;
-define TWO_THIRD = 6.666666666666666666e-01;
-define LP1 = 6.666666666666735130e-01;
-define LP2 = 3.999999999940941908e-01;
-define LP3 = 2.857142874366239149e-01;
-define LP4 = 2.222219843214978396e-01;
-define LP5 = 1.818357216161805012e-01;
-define LP6 = 1.531383769920937332e-01;
-define LP7 = 1.479819860511658591e-01;
-
-// 2^54
-define TWO54 = 18014398509481984;
-
-function MathLog1p(x) {
- x = x * 1; // Convert to number.
- var hx = %_DoubleHi(x);
- var ax = hx & 0x7fffffff;
- var k = 1;
- var f = x;
- var hu = 1;
- var c = 0;
- var u = x;
-
- if (hx < 0x3fda827a) {
- // x < 0.41422
- if (ax >= 0x3ff00000) { // |x| >= 1
- if (x === -1) {
- return -INFINITY; // log1p(-1) = -inf
- } else {
- return NaN; // log1p(x<-1) = NaN
- }
- } else if (ax < 0x3c900000) {
- // For |x| < 2^-54 we can return x.
- return x;
- } else if (ax < 0x3e200000) {
- // For |x| < 2^-29 we can use a simple two-term Taylor series.
- return x - x * x * 0.5;
- }
-
- if ((hx > 0) || (hx <= -0x402D413D)) { // (int) 0xbfd2bec3 = -0x402d413d
- // -.2929 < x < 0.41422
- k = 0;
- }
- }
-
- // Handle Infinity and NaN
- if (hx >= 0x7ff00000) return x;
-
- if (k !== 0) {
- if (hx < 0x43400000) {
- // x < 2^53
- u = 1 + x;
- hu = %_DoubleHi(u);
- k = (hu >> 20) - 1023;
- c = (k > 0) ? 1 - (u - x) : x - (u - 1);
- c = c / u;
- } else {
- hu = %_DoubleHi(u);
- k = (hu >> 20) - 1023;
- }
- hu = hu & 0xfffff;
- if (hu < 0x6a09e) {
- u = %_ConstructDouble(hu | 0x3ff00000, %_DoubleLo(u)); // Normalize u.
- } else {
- ++k;
- u = %_ConstructDouble(hu | 0x3fe00000, %_DoubleLo(u)); // Normalize u/2.
- hu = (0x00100000 - hu) >> 2;
- }
- f = u - 1;
- }
-
- var hfsq = 0.5 * f * f;
- if (hu === 0) {
- // |f| < 2^-20;
- if (f === 0) {
- if (k === 0) {
- return 0.0;
- } else {
- return k * LN2_HI + (c + k * LN2_LO);
- }
- }
- var R = hfsq * (1 - TWO_THIRD * f);
- if (k === 0) {
- return f - R;
- } else {
- return k * LN2_HI - ((R - (k * LN2_LO + c)) - f);
- }
- }
-
- var s = f / (2 + f);
- var z = s * s;
- var R = z * (LP1 + z * (LP2 + z * (LP3 + z * (LP4 +
- z * (LP5 + z * (LP6 + z * LP7))))));
- if (k === 0) {
- return f - (hfsq - s * (hfsq + R));
- } else {
- return k * LN2_HI - ((hfsq - (s * (hfsq + R) + (k * LN2_LO + c))) - f);
- }
-}
-
-// ES6 draft 09-27-13, section 20.2.2.14.
-// Math.expm1
-// Returns exp(x)-1, the exponential of x minus 1.
-//
-// Method
-// 1. Argument reduction:
-// Given x, find r and integer k such that
-//
-// x = k*ln2 + r, |r| <= 0.5*ln2 ~ 0.34658
-//
-// Here a correction term c will be computed to compensate
-// the error in r when rounded to a floating-point number.
-//
-// 2. Approximating expm1(r) by a special rational function on
-// the interval [0,0.34658]:
-// Since
-// r*(exp(r)+1)/(exp(r)-1) = 2+ r^2/6 - r^4/360 + ...
-// we define R1(r*r) by
-// r*(exp(r)+1)/(exp(r)-1) = 2+ r^2/6 * R1(r*r)
-// That is,
-// R1(r**2) = 6/r *((exp(r)+1)/(exp(r)-1) - 2/r)
-// = 6/r * ( 1 + 2.0*(1/(exp(r)-1) - 1/r))
-// = 1 - r^2/60 + r^4/2520 - r^6/100800 + ...
-// We use a special Remes algorithm on [0,0.347] to generate
-// a polynomial of degree 5 in r*r to approximate R1. The
-// maximum error of this polynomial approximation is bounded
-// by 2**-61. In other words,
-// R1(z) ~ 1.0 + Q1*z + Q2*z**2 + Q3*z**3 + Q4*z**4 + Q5*z**5
-// where Q1 = -1.6666666666666567384E-2,
-// Q2 = 3.9682539681370365873E-4,
-// Q3 = -9.9206344733435987357E-6,
-// Q4 = 2.5051361420808517002E-7,
-// Q5 = -6.2843505682382617102E-9;
-// (where z=r*r, and the values of Q1 to Q5 are listed below)
-// with error bounded by
-// | 5 | -61
-// | 1.0+Q1*z+...+Q5*z - R1(z) | <= 2
-// | |
-//
-// expm1(r) = exp(r)-1 is then computed by the following
-// specific way which minimize the accumulation rounding error:
-// 2 3
-// r r [ 3 - (R1 + R1*r/2) ]
-// expm1(r) = r + --- + --- * [--------------------]
-// 2 2 [ 6 - r*(3 - R1*r/2) ]
-//
-// To compensate the error in the argument reduction, we use
-// expm1(r+c) = expm1(r) + c + expm1(r)*c
-// ~ expm1(r) + c + r*c
-// Thus c+r*c will be added in as the correction terms for
-// expm1(r+c). Now rearrange the term to avoid optimization
-// screw up:
-// ( 2 2 )
-// ({ ( r [ R1 - (3 - R1*r/2) ] ) } r )
-// expm1(r+c)~r - ({r*(--- * [--------------------]-c)-c} - --- )
-// ({ ( 2 [ 6 - r*(3 - R1*r/2) ] ) } 2 )
-// ( )
-//
-// = r - E
-// 3. Scale back to obtain expm1(x):
-// From step 1, we have
-// expm1(x) = either 2^k*[expm1(r)+1] - 1
-// = or 2^k*[expm1(r) + (1-2^-k)]
-// 4. Implementation notes:
-// (A). To save one multiplication, we scale the coefficient Qi
-// to Qi*2^i, and replace z by (x^2)/2.
-// (B). To achieve maximum accuracy, we compute expm1(x) by
-// (i) if x < -56*ln2, return -1.0, (raise inexact if x!=inf)
-// (ii) if k=0, return r-E
-// (iii) if k=-1, return 0.5*(r-E)-0.5
-// (iv) if k=1 if r < -0.25, return 2*((r+0.5)- E)
-// else return 1.0+2.0*(r-E);
-// (v) if (k<-2||k>56) return 2^k(1-(E-r)) - 1 (or exp(x)-1)
-// (vi) if k <= 20, return 2^k((1-2^-k)-(E-r)), else
-// (vii) return 2^k(1-((E+2^-k)-r))
-//
-// Special cases:
-// expm1(INF) is INF, expm1(NaN) is NaN;
-// expm1(-INF) is -1, and
-// for finite argument, only expm1(0)=0 is exact.
-//
-// Accuracy:
-// according to an error analysis, the error is always less than
-// 1 ulp (unit in the last place).
-//
-// Misc. info.
-// For IEEE double
-// if x > 7.09782712893383973096e+02 then expm1(x) overflow
-//
-define KEXPM1_OVERFLOW = 7.09782712893383973096e+02;
-define INVLN2 = 1.44269504088896338700;
-define EXPM1_1 = -3.33333333333331316428e-02;
-define EXPM1_2 = 1.58730158725481460165e-03;
-define EXPM1_3 = -7.93650757867487942473e-05;
-define EXPM1_4 = 4.00821782732936239552e-06;
-define EXPM1_5 = -2.01099218183624371326e-07;
-
-function MathExpm1(x) {
- x = x * 1; // Convert to number.
- var y;
- var hi;
- var lo;
- var k;
- var t;
- var c;
-
- var hx = %_DoubleHi(x);
- var xsb = hx & 0x80000000; // Sign bit of x
- var y = (xsb === 0) ? x : -x; // y = |x|
- hx &= 0x7fffffff; // High word of |x|
-
- // Filter out huge and non-finite argument
- if (hx >= 0x4043687a) { // if |x| ~=> 56 * ln2
- if (hx >= 0x40862e42) { // if |x| >= 709.78
- if (hx >= 0x7ff00000) {
- // expm1(inf) = inf; expm1(-inf) = -1; expm1(nan) = nan;
- return (x === -INFINITY) ? -1 : x;
- }
- if (x > KEXPM1_OVERFLOW) return INFINITY; // Overflow
- }
- if (xsb != 0) return -1; // x < -56 * ln2, return -1.
- }
-
- // Argument reduction
- if (hx > 0x3fd62e42) { // if |x| > 0.5 * ln2
- if (hx < 0x3ff0a2b2) { // and |x| < 1.5 * ln2
- if (xsb === 0) {
- hi = x - LN2_HI;
- lo = LN2_LO;
- k = 1;
- } else {
- hi = x + LN2_HI;
- lo = -LN2_LO;
- k = -1;
- }
- } else {
- k = (INVLN2 * x + ((xsb === 0) ? 0.5 : -0.5)) | 0;
- t = k;
- // t * ln2_hi is exact here.
- hi = x - t * LN2_HI;
- lo = t * LN2_LO;
- }
- x = hi - lo;
- c = (hi - x) - lo;
- } else if (hx < 0x3c900000) {
- // When |x| < 2^-54, we can return x.
- return x;
- } else {
- // Fall through.
- k = 0;
- }
-
- // x is now in primary range
- var hfx = 0.5 * x;
- var hxs = x * hfx;
- var r1 = 1 + hxs * (EXPM1_1 + hxs * (EXPM1_2 + hxs *
- (EXPM1_3 + hxs * (EXPM1_4 + hxs * EXPM1_5))));
- t = 3 - r1 * hfx;
- var e = hxs * ((r1 - t) / (6 - x * t));
- if (k === 0) { // c is 0
- return x - (x*e - hxs);
- } else {
- e = (x * (e - c) - c);
- e -= hxs;
- if (k === -1) return 0.5 * (x - e) - 0.5;
- if (k === 1) {
- if (x < -0.25) return -2 * (e - (x + 0.5));
- return 1 + 2 * (x - e);
- }
-
- if (k <= -2 || k > 56) {
- // suffice to return exp(x) + 1
- y = 1 - (e - x);
- // Add k to y's exponent
- y = %_ConstructDouble(%_DoubleHi(y) + (k << 20), %_DoubleLo(y));
- return y - 1;
- }
- if (k < 20) {
- // t = 1 - 2^k
- t = %_ConstructDouble(0x3ff00000 - (0x200000 >> k), 0);
- y = t - (e - x);
- // Add k to y's exponent
- y = %_ConstructDouble(%_DoubleHi(y) + (k << 20), %_DoubleLo(y));
- } else {
- // t = 2^-k
- t = %_ConstructDouble((0x3ff - k) << 20, 0);
- y = x - (e + t);
- y += 1;
- // Add k to y's exponent
- y = %_ConstructDouble(%_DoubleHi(y) + (k << 20), %_DoubleLo(y));
- }
- }
- return y;
-}
-
-
-// ES6 draft 09-27-13, section 20.2.2.30.
-// Math.sinh
-// Method :
-// mathematically sinh(x) if defined to be (exp(x)-exp(-x))/2
-// 1. Replace x by |x| (sinh(-x) = -sinh(x)).
-// 2.
-// E + E/(E+1)
-// 0 <= x <= 22 : sinh(x) := --------------, E=expm1(x)
-// 2
-//
-// 22 <= x <= lnovft : sinh(x) := exp(x)/2
-// lnovft <= x <= ln2ovft: sinh(x) := exp(x/2)/2 * exp(x/2)
-// ln2ovft < x : sinh(x) := x*shuge (overflow)
-//
-// Special cases:
-// sinh(x) is |x| if x is +Infinity, -Infinity, or NaN.
-// only sinh(0)=0 is exact for finite x.
-//
-define KSINH_OVERFLOW = 710.4758600739439;
-define TWO_M28 = 3.725290298461914e-9; // 2^-28, empty lower half
-define LOG_MAXD = 709.7822265625; // 0x40862e42 00000000, empty lower half
-
-function MathSinh(x) {
- x = x * 1; // Convert to number.
- var h = (x < 0) ? -0.5 : 0.5;
- // |x| in [0, 22]. return sign(x)*0.5*(E+E/(E+1))
- var ax = MathAbs(x);
- if (ax < 22) {
- // For |x| < 2^-28, sinh(x) = x
- if (ax < TWO_M28) return x;
- var t = MathExpm1(ax);
- if (ax < 1) return h * (2 * t - t * t / (t + 1));
- return h * (t + t / (t + 1));
- }
- // |x| in [22, log(maxdouble)], return 0.5 * exp(|x|)
- if (ax < LOG_MAXD) return h * MathExp(ax);
- // |x| in [log(maxdouble), overflowthreshold]
- // overflowthreshold = 710.4758600739426
- if (ax <= KSINH_OVERFLOW) {
- var w = MathExp(0.5 * ax);
- var t = h * w;
- return t * w;
- }
- // |x| > overflowthreshold or is NaN.
- // Return Infinity of the appropriate sign or NaN.
- return x * INFINITY;
-}
-
-
-// ES6 draft 09-27-13, section 20.2.2.12.
-// Math.cosh
-// Method :
-// mathematically cosh(x) if defined to be (exp(x)+exp(-x))/2
-// 1. Replace x by |x| (cosh(x) = cosh(-x)).
-// 2.
-// [ exp(x) - 1 ]^2
-// 0 <= x <= ln2/2 : cosh(x) := 1 + -------------------
-// 2*exp(x)
-//
-// exp(x) + 1/exp(x)
-// ln2/2 <= x <= 22 : cosh(x) := -------------------
-// 2
-// 22 <= x <= lnovft : cosh(x) := exp(x)/2
-// lnovft <= x <= ln2ovft: cosh(x) := exp(x/2)/2 * exp(x/2)
-// ln2ovft < x : cosh(x) := huge*huge (overflow)
-//
-// Special cases:
-// cosh(x) is |x| if x is +INF, -INF, or NaN.
-// only cosh(0)=1 is exact for finite x.
-//
-define KCOSH_OVERFLOW = 710.4758600739439;
-
-function MathCosh(x) {
- x = x * 1; // Convert to number.
- var ix = %_DoubleHi(x) & 0x7fffffff;
- // |x| in [0,0.5*log2], return 1+expm1(|x|)^2/(2*exp(|x|))
- if (ix < 0x3fd62e43) {
- var t = MathExpm1(MathAbs(x));
- var w = 1 + t;
- // For |x| < 2^-55, cosh(x) = 1
- if (ix < 0x3c800000) return w;
- return 1 + (t * t) / (w + w);
- }
- // |x| in [0.5*log2, 22], return (exp(|x|)+1/exp(|x|)/2
- if (ix < 0x40360000) {
- var t = MathExp(MathAbs(x));
- return 0.5 * t + 0.5 / t;
- }
- // |x| in [22, log(maxdouble)], return half*exp(|x|)
- if (ix < 0x40862e42) return 0.5 * MathExp(MathAbs(x));
- // |x| in [log(maxdouble), overflowthreshold]
- if (MathAbs(x) <= KCOSH_OVERFLOW) {
- var w = MathExp(0.5 * MathAbs(x));
- var t = 0.5 * w;
- return t * w;
- }
- if (NUMBER_IS_NAN(x)) return x;
- // |x| > overflowthreshold.
- return INFINITY;
-}
-
-// ES6 draft 09-27-13, section 20.2.2.33.
-// Math.tanh(x)
-// Method :
-// x -x
-// e - e
-// 0. tanh(x) is defined to be -----------
-// x -x
-// e + e
-// 1. reduce x to non-negative by tanh(-x) = -tanh(x).
-// 2. 0 <= x <= 2**-55 : tanh(x) := x*(one+x)
-// -t
-// 2**-55 < x <= 1 : tanh(x) := -----; t = expm1(-2x)
-// t + 2
-// 2
-// 1 <= x <= 22.0 : tanh(x) := 1- ----- ; t = expm1(2x)
-// t + 2
-// 22.0 < x <= INF : tanh(x) := 1.
-//
-// Special cases:
-// tanh(NaN) is NaN;
-// only tanh(0) = 0 is exact for finite argument.
-//
-
-define TWO_M55 = 2.77555756156289135105e-17; // 2^-55, empty lower half
-
-function MathTanh(x) {
- x = x * 1; // Convert to number.
- // x is Infinity or NaN
- if (!NUMBER_IS_FINITE(x)) {
- if (x > 0) return 1;
- if (x < 0) return -1;
- return x;
- }
-
- var ax = MathAbs(x);
- var z;
- // |x| < 22
- if (ax < 22) {
- if (ax < TWO_M55) {
- // |x| < 2^-55, tanh(small) = small.
- return x;
- }
- if (ax >= 1) {
- // |x| >= 1
- var t = MathExpm1(2 * ax);
- z = 1 - 2 / (t + 2);
- } else {
- var t = MathExpm1(-2 * ax);
- z = -t / (t + 2);
- }
- } else {
- // |x| > 22, return +/- 1
- z = 1;
- }
- return (x >= 0) ? z : -z;
-}
-
-// ES6 draft 09-27-13, section 20.2.2.21.
-// Return the base 10 logarithm of x
-//
-// Method :
-// Let log10_2hi = leading 40 bits of log10(2) and
-// log10_2lo = log10(2) - log10_2hi,
-// ivln10 = 1/log(10) rounded.
-// Then
-// n = ilogb(x),
-// if(n<0) n = n+1;
-// x = scalbn(x,-n);
-// log10(x) := n*log10_2hi + (n*log10_2lo + ivln10*log(x))
-//
-// Note 1:
-// To guarantee log10(10**n)=n, where 10**n is normal, the rounding
-// mode must set to Round-to-Nearest.
-// Note 2:
-// [1/log(10)] rounded to 53 bits has error .198 ulps;
-// log10 is monotonic at all binary break points.
-//
-// Special cases:
-// log10(x) is NaN if x < 0;
-// log10(+INF) is +INF; log10(0) is -INF;
-// log10(NaN) is that NaN;
-// log10(10**N) = N for N=0,1,...,22.
-//
-
-define IVLN10 = 4.34294481903251816668e-01;
-define LOG10_2HI = 3.01029995663611771306e-01;
-define LOG10_2LO = 3.69423907715893078616e-13;
-
-function MathLog10(x) {
- x = x * 1; // Convert to number.
- var hx = %_DoubleHi(x);
- var lx = %_DoubleLo(x);
- var k = 0;
-
- if (hx < 0x00100000) {
- // x < 2^-1022
- // log10(+/- 0) = -Infinity.
- if (((hx & 0x7fffffff) | lx) === 0) return -INFINITY;
- // log10 of negative number is NaN.
- if (hx < 0) return NaN;
- // Subnormal number. Scale up x.
- k -= 54;
- x *= TWO54;
- hx = %_DoubleHi(x);
- lx = %_DoubleLo(x);
- }
-
- // Infinity or NaN.
- if (hx >= 0x7ff00000) return x;
-
- k += (hx >> 20) - 1023;
- var i = (k & 0x80000000) >>> 31;
- hx = (hx & 0x000fffff) | ((0x3ff - i) << 20);
- var y = k + i;
- x = %_ConstructDouble(hx, lx);
-
- var z = y * LOG10_2LO + IVLN10 * %_MathLogRT(x);
- return z + y * LOG10_2HI;
-}
-
-
-// ES6 draft 09-27-13, section 20.2.2.22.
-// Return the base 2 logarithm of x
-//
-// fdlibm does not have an explicit log2 function, but fdlibm's pow
-// function does implement an accurate log2 function as part of the
-// pow implementation. This extracts the core parts of that as a
-// separate log2 function.
-
-// Method:
-// Compute log2(x) in two pieces:
-// log2(x) = w1 + w2
-// where w1 has 53-24 = 29 bits of trailing zeroes.
-
-define DP_H = 5.84962487220764160156e-01;
-define DP_L = 1.35003920212974897128e-08;
-
-// Polynomial coefficients for (3/2)*(log2(x) - 2*s - 2/3*s^3)
-define LOG2_1 = 5.99999999999994648725e-01;
-define LOG2_2 = 4.28571428578550184252e-01;
-define LOG2_3 = 3.33333329818377432918e-01;
-define LOG2_4 = 2.72728123808534006489e-01;
-define LOG2_5 = 2.30660745775561754067e-01;
-define LOG2_6 = 2.06975017800338417784e-01;
-
-// cp = 2/(3*ln(2)). Note that cp_h + cp_l is cp, but with more accuracy.
-define CP = 9.61796693925975554329e-01;
-define CP_H = 9.61796700954437255859e-01;
-define CP_L = -7.02846165095275826516e-09;
-// 2^53
-define TWO53 = 9007199254740992;
-
-function MathLog2(x) {
- x = x * 1; // Convert to number.
- var ax = MathAbs(x);
- var hx = %_DoubleHi(x);
- var lx = %_DoubleLo(x);
- var ix = hx & 0x7fffffff;
-
- // Handle special cases.
- // log2(+/- 0) = -Infinity
- if ((ix | lx) == 0) return -INFINITY;
-
- // log(x) = NaN, if x < 0
- if (hx < 0) return NaN;
-
- // log2(Infinity) = Infinity, log2(NaN) = NaN
- if (ix >= 0x7ff00000) return x;
-
- var n = 0;
-
- // Take care of subnormal number.
- if (ix < 0x00100000) {
- ax *= TWO53;
- n -= 53;
- ix = %_DoubleHi(ax);
- }
-
- n += (ix >> 20) - 0x3ff;
- var j = ix & 0x000fffff;
-
- // Determine interval.
- ix = j | 0x3ff00000; // normalize ix.
-
- var bp = 1;
- var dp_h = 0;
- var dp_l = 0;
- if (j > 0x3988e) { // |x| > sqrt(3/2)
- if (j < 0xbb67a) { // |x| < sqrt(3)
- bp = 1.5;
- dp_h = DP_H;
- dp_l = DP_L;
- } else {
- n += 1;
- ix -= 0x00100000;
- }
- }
-
- ax = %_ConstructDouble(ix, %_DoubleLo(ax));
-
- // Compute ss = s_h + s_l = (x - 1)/(x+1) or (x - 1.5)/(x + 1.5)
- var u = ax - bp;
- var v = 1 / (ax + bp);
- var ss = u * v;
- var s_h = %_ConstructDouble(%_DoubleHi(ss), 0);
-
- // t_h = ax + bp[k] High
- var t_h = %_ConstructDouble(%_DoubleHi(ax + bp), 0)
- var t_l = ax - (t_h - bp);
- var s_l = v * ((u - s_h * t_h) - s_h * t_l);
-
- // Compute log2(ax)
- var s2 = ss * ss;
- var r = s2 * s2 * (LOG2_1 + s2 * (LOG2_2 + s2 * (LOG2_3 + s2 * (
- LOG2_4 + s2 * (LOG2_5 + s2 * LOG2_6)))));
- r += s_l * (s_h + ss);
- s2 = s_h * s_h;
- t_h = %_ConstructDouble(%_DoubleHi(3.0 + s2 + r), 0);
- t_l = r - ((t_h - 3.0) - s2);
- // u + v = ss * (1 + ...)
- u = s_h * t_h;
- v = s_l * t_h + t_l * ss;
-
- // 2 / (3 * log(2)) * (ss + ...)
- var p_h = %_ConstructDouble(%_DoubleHi(u + v), 0);
- var p_l = v - (p_h - u);
- var z_h = CP_H * p_h;
- var z_l = CP_L * p_h + p_l * CP + dp_l;
-
- // log2(ax) = (ss + ...) * 2 / (3 * log(2)) = n + dp_h + z_h + z_l
- var t = n;
- var t1 = %_ConstructDouble(%_DoubleHi(((z_h + z_l) + dp_h) + t), 0);
- var t2 = z_l - (((t1 - t) - dp_h) - z_h);
-
- // t1 + t2 = log2(ax), sum up because we do not care about extra precision.
- return t1 + t2;
-}
-
-//-------------------------------------------------------------------
-
-utils.InstallFunctions(GlobalMath, DONT_ENUM, [
- "cos", MathCos,
- "sin", MathSin,
- "tan", MathTan,
- "sinh", MathSinh,
- "cosh", MathCosh,
- "tanh", MathTanh,
- "log10", MathLog10,
- "log2", MathLog2,
- "log1p", MathLog1p,
- "expm1", MathExpm1
-]);
-
-%SetForceInlineFlag(MathSin);
-%SetForceInlineFlag(MathCos);
-
-})
diff --git a/deps/v8/src/third_party/vtune/v8vtune.gyp b/deps/v8/src/third_party/vtune/v8vtune.gyp
index 6adf365689..aaf521f310 100644
--- a/deps/v8/src/third_party/vtune/v8vtune.gyp
+++ b/deps/v8/src/third_party/vtune/v8vtune.gyp
@@ -29,13 +29,13 @@
'variables': {
'v8_code': 1,
},
- 'includes': ['../../../build/toolchain.gypi', '../../../build/features.gypi'],
+ 'includes': ['../../../gypfiles/toolchain.gypi', '../../../gypfiles/features.gypi'],
'targets': [
{
'target_name': 'v8_vtune',
'type': 'static_library',
'dependencies': [
- '../../../tools/gyp/v8.gyp:v8',
+ '../../v8.gyp:v8',
],
'sources': [
'ittnotify_config.h',
diff --git a/deps/v8/src/third_party/vtune/vtune-jit.cc b/deps/v8/src/third_party/vtune/vtune-jit.cc
index 30f6196001..0bd19546ac 100644
--- a/deps/v8/src/third_party/vtune/vtune-jit.cc
+++ b/deps/v8/src/third_party/vtune/vtune-jit.cc
@@ -55,26 +55,12 @@
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <string.h>
-#ifdef WIN32
-#include <hash_map>
-using namespace std;
-#else
-// To avoid GCC 4.4 compilation warning about hash_map being deprecated.
-#define OLD_DEPRECATED __DEPRECATED
-#undef __DEPRECATED
-#if defined (ANDROID)
-#include <hash_map>
-using namespace std;
-#else
-#include <ext/hash_map>
-using namespace __gnu_cxx;
-#endif
-#define __DEPRECATED OLD_DEPRECATED
-#endif
+#include <stdlib.h>
+#include <string.h>
#include <list>
+#include <unordered_map>
#include "v8-vtune.h"
#include "vtune-jit.h"
@@ -126,11 +112,8 @@ struct HashForCodeObject {
}
};
-#ifdef WIN32
-typedef hash_map<void*, void*> JitInfoMap;
-#else
-typedef hash_map<void*, void*, HashForCodeObject, SameCodeObjects> JitInfoMap;
-#endif
+typedef std::unordered_map<void*, void*, HashForCodeObject, SameCodeObjects>
+ JitInfoMap;
static JitInfoMap* GetEntries() {
static JitInfoMap* entries;
diff --git a/deps/v8/src/tracing/trace-event.cc b/deps/v8/src/tracing/trace-event.cc
index 04f1f2e2ea..3e0a0fab21 100644
--- a/deps/v8/src/tracing/trace-event.cc
+++ b/deps/v8/src/tracing/trace-event.cc
@@ -4,16 +4,137 @@
#include "src/tracing/trace-event.h"
+#include <string.h>
+
+#include "src/isolate.h"
#include "src/v8.h"
namespace v8 {
namespace internal {
namespace tracing {
+// A global flag used as a shortcut to check for the
+// v8.runtime-call-stats category due to its high frequency use.
+base::Atomic32 kRuntimeCallStatsTracingEnabled = false;
+
v8::Platform* TraceEventHelper::GetCurrentPlatform() {
return v8::internal::V8::GetCurrentPlatform();
}
+void CallStatsScopedTracer::AddEndTraceEvent() {
+ if (!has_parent_scope_ && p_data_->isolate) {
+ v8::internal::tracing::AddTraceEvent(
+ TRACE_EVENT_PHASE_END, p_data_->category_group_enabled, p_data_->name,
+ v8::internal::tracing::kGlobalScope, v8::internal::tracing::kNoId,
+ v8::internal::tracing::kNoId, TRACE_EVENT_FLAG_COPY,
+ "runtime-call-stat",
+ TRACE_STR_COPY(p_data_->isolate->trace_event_stats_table()->Dump()));
+ } else {
+ v8::internal::tracing::AddTraceEvent(
+ TRACE_EVENT_PHASE_END, p_data_->category_group_enabled, p_data_->name,
+ v8::internal::tracing::kGlobalScope, v8::internal::tracing::kNoId,
+ v8::internal::tracing::kNoId, TRACE_EVENT_FLAG_NONE);
+ }
+}
+
+void CallStatsScopedTracer::Initialize(Isolate* isolate,
+ const uint8_t* category_group_enabled,
+ const char* name) {
+ data_.isolate = isolate;
+ data_.category_group_enabled = category_group_enabled;
+ data_.name = name;
+ p_data_ = &data_;
+ TraceEventStatsTable* table = isolate->trace_event_stats_table();
+ has_parent_scope_ = table->InUse();
+ if (!has_parent_scope_) table->Reset();
+ v8::internal::tracing::AddTraceEvent(
+ TRACE_EVENT_PHASE_BEGIN, category_group_enabled, name,
+ v8::internal::tracing::kGlobalScope, v8::internal::tracing::kNoId,
+ TRACE_EVENT_FLAG_NONE, v8::internal::tracing::kNoId);
+}
+
+void TraceEventStatsTable::Enter(Isolate* isolate,
+ TraceEventCallStatsTimer* timer,
+ CounterId counter_id) {
+ TraceEventStatsTable* table = isolate->trace_event_stats_table();
+ RuntimeCallCounter* counter = &(table->*counter_id);
+ timer->Start(counter, table->current_timer_);
+ table->current_timer_ = timer;
+}
+
+void TraceEventStatsTable::Leave(Isolate* isolate,
+ TraceEventCallStatsTimer* timer) {
+ TraceEventStatsTable* table = isolate->trace_event_stats_table();
+ if (table->current_timer_ == timer) {
+ table->current_timer_ = timer->Stop();
+ }
+}
+
+void TraceEventStatsTable::Reset() {
+ in_use_ = true;
+ current_timer_ = nullptr;
+
+#define RESET_COUNTER(name) this->name.Reset();
+ FOR_EACH_MANUAL_COUNTER(RESET_COUNTER)
+#undef RESET_COUNTER
+
+#define RESET_COUNTER(name, nargs, result_size) this->Runtime_##name.Reset();
+ FOR_EACH_INTRINSIC(RESET_COUNTER)
+#undef RESET_COUNTER
+
+#define RESET_COUNTER(name) this->Builtin_##name.Reset();
+ BUILTIN_LIST_C(RESET_COUNTER)
+#undef RESET_COUNTER
+
+#define RESET_COUNTER(name) this->API_##name.Reset();
+ FOR_EACH_API_COUNTER(RESET_COUNTER)
+#undef RESET_COUNTER
+
+#define RESET_COUNTER(name) this->Handler_##name.Reset();
+ FOR_EACH_HANDLER_COUNTER(RESET_COUNTER)
+#undef RESET_COUNTER
+}
+
+const char* TraceEventStatsTable::Dump() {
+ buffer_.str(std::string());
+ buffer_.clear();
+ buffer_ << "{";
+#define DUMP_COUNTER(name) \
+ if (this->name.count > 0) this->name.Dump(buffer_);
+ FOR_EACH_MANUAL_COUNTER(DUMP_COUNTER)
+#undef DUMP_COUNTER
+
+#define DUMP_COUNTER(name, nargs, result_size) \
+ if (this->Runtime_##name.count > 0) this->Runtime_##name.Dump(buffer_);
+ FOR_EACH_INTRINSIC(DUMP_COUNTER)
+#undef DUMP_COUNTER
+
+#define DUMP_COUNTER(name) \
+ if (this->Builtin_##name.count > 0) this->Builtin_##name.Dump(buffer_);
+ BUILTIN_LIST_C(DUMP_COUNTER)
+#undef DUMP_COUNTER
+
+#define DUMP_COUNTER(name) \
+ if (this->API_##name.count > 0) this->API_##name.Dump(buffer_);
+ FOR_EACH_API_COUNTER(DUMP_COUNTER)
+#undef DUMP_COUNTER
+
+#define DUMP_COUNTER(name) \
+ if (this->Handler_##name.count > 0) this->Handler_##name.Dump(buffer_);
+ FOR_EACH_HANDLER_COUNTER(DUMP_COUNTER)
+#undef DUMP_COUNTER
+ buffer_ << "\"END\":[]}";
+ const std::string& buffer_str = buffer_.str();
+ size_t length = buffer_str.size();
+ if (length > len_) {
+ buffer_c_str_.reset(new char[length + 1]);
+ len_ = length;
+ }
+ strncpy(buffer_c_str_.get(), buffer_str.c_str(), length + 1);
+ in_use_ = false;
+ return buffer_c_str_.get();
+}
+
} // namespace tracing
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/tracing/trace-event.h b/deps/v8/src/tracing/trace-event.h
index 246ddd3aba..25ccd8045a 100644
--- a/deps/v8/src/tracing/trace-event.h
+++ b/deps/v8/src/tracing/trace-event.h
@@ -11,6 +11,7 @@
#include "include/v8-platform.h"
#include "src/base/atomicops.h"
#include "src/base/macros.h"
+#include "src/counters.h"
// This header file defines implementation details of how the trace macros in
// trace_event_common.h collect and store trace events. Anything not
@@ -282,6 +283,37 @@ extern TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
INTERNAL_TRACE_EVENT_UID(ScopedContext) \
INTERNAL_TRACE_EVENT_UID(scoped_context)(context.raw_id());
+#define TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() \
+ base::NoBarrier_Load(&v8::internal::tracing::kRuntimeCallStatsTracingEnabled)
+
+#define TRACE_EVENT_CALL_STATS_SCOPED(isolate, category_group, name) \
+ INTERNAL_TRACE_EVENT_CALL_STATS_SCOPED(isolate, category_group, name)
+
+#define TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(isolate, counter_id) \
+ INTERNAL_TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(isolate, counter_id)
+
+#define INTERNAL_TRACE_EVENT_CALL_STATS_SCOPED(isolate, category_group, name) \
+ { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO( \
+ TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats")); \
+ base::NoBarrier_Store( \
+ &v8::internal::tracing::kRuntimeCallStatsTracingEnabled, \
+ INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()); \
+ } \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ v8::internal::tracing::CallStatsScopedTracer INTERNAL_TRACE_EVENT_UID( \
+ tracer); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ INTERNAL_TRACE_EVENT_UID(tracer) \
+ .Initialize(isolate, INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
+ name); \
+ }
+
+#define INTERNAL_TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(isolate, \
+ counter_id) \
+ v8::internal::tracing::CounterScope INTERNAL_TRACE_EVENT_UID(scope)( \
+ isolate, counter_id);
+
namespace v8 {
namespace internal {
namespace tracing {
@@ -292,6 +324,8 @@ const int kZeroNumArgs = 0;
const decltype(nullptr) kGlobalScope = nullptr;
const uint64_t kNoId = 0;
+extern base::Atomic32 kRuntimeCallStatsTracingEnabled;
+
class TraceEventHelper {
public:
static v8::Platform* GetCurrentPlatform();
@@ -478,7 +512,7 @@ static V8_INLINE uint64_t AddTraceEvent(char phase,
uint64_t id, uint64_t bind_id,
unsigned int flags) {
return TRACE_EVENT_API_ADD_TRACE_EVENT(phase, category_group_enabled, name,
- id, bind_id, kZeroNumArgs, NULL,
+ scope, id, bind_id, kZeroNumArgs, NULL,
NULL, NULL, flags);
}
@@ -492,7 +526,7 @@ static V8_INLINE uint64_t AddTraceEvent(
uint64_t arg_values[1];
SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
return TRACE_EVENT_API_ADD_TRACE_EVENT(
- phase, category_group_enabled, name, id, bind_id, num_args,
+ phase, category_group_enabled, name, scope, id, bind_id, num_args,
&arg1_name, arg_types, arg_values, flags);
}
@@ -509,7 +543,7 @@ static V8_INLINE uint64_t AddTraceEvent(
SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]);
return TRACE_EVENT_API_ADD_TRACE_EVENT(
- phase, category_group_enabled, name, id, bind_id, num_args,
+ phase, category_group_enabled, name, scope, id, bind_id, num_args,
arg_names, arg_types, arg_values, flags);
}
@@ -590,6 +624,146 @@ class TraceEventSamplingStateScope {
const char* previous_state_;
};
+// Do not use directly.
+class CallStatsScopedTracer {
+ public:
+ CallStatsScopedTracer() : p_data_(nullptr) {}
+ ~CallStatsScopedTracer() {
+ if (V8_UNLIKELY(p_data_ && *data_.category_group_enabled)) {
+ AddEndTraceEvent();
+ }
+ }
+
+ void Initialize(Isolate* isolate, const uint8_t* category_group_enabled,
+ const char* name);
+
+ private:
+ void AddEndTraceEvent();
+ struct Data {
+ const uint8_t* category_group_enabled;
+ const char* name;
+ Isolate* isolate;
+ };
+ bool has_parent_scope_;
+ Data* p_data_;
+ Data data_;
+};
+
+// TraceEventCallStatsTimer is used to keep track of the stack of currently
+// active timers used for properly measuring the own time of a
+// RuntimeCallCounter.
+class TraceEventCallStatsTimer {
+ public:
+ TraceEventCallStatsTimer() : counter_(nullptr), parent_(nullptr) {}
+ RuntimeCallCounter* counter() { return counter_; }
+ base::ElapsedTimer timer() { return timer_; }
+
+ private:
+ friend class TraceEventStatsTable;
+
+ V8_INLINE void Start(RuntimeCallCounter* counter,
+ TraceEventCallStatsTimer* parent) {
+ counter_ = counter;
+ parent_ = parent;
+ timer_.Start();
+ }
+
+ V8_INLINE TraceEventCallStatsTimer* Stop() {
+ base::TimeDelta delta = timer_.Elapsed();
+ timer_.Stop();
+ counter_->count++;
+ counter_->time += delta;
+ if (parent_ != nullptr) {
+ // Adjust parent timer so that it does not include sub timer's time.
+ parent_->counter_->time -= delta;
+ }
+ return parent_;
+ }
+
+ RuntimeCallCounter* counter_;
+ TraceEventCallStatsTimer* parent_;
+ base::ElapsedTimer timer_;
+};
+
+class TraceEventStatsTable {
+ public:
+ typedef RuntimeCallCounter TraceEventStatsTable::*CounterId;
+
+#define CALL_RUNTIME_COUNTER(name) \
+ RuntimeCallCounter name = RuntimeCallCounter(#name);
+ FOR_EACH_MANUAL_COUNTER(CALL_RUNTIME_COUNTER)
+#undef CALL_RUNTIME_COUNTER
+#define CALL_RUNTIME_COUNTER(name, nargs, ressize) \
+ RuntimeCallCounter Runtime_##name = RuntimeCallCounter(#name);
+ FOR_EACH_INTRINSIC(CALL_RUNTIME_COUNTER)
+#undef CALL_RUNTIME_COUNTER
+#define CALL_BUILTIN_COUNTER(name) \
+ RuntimeCallCounter Builtin_##name = RuntimeCallCounter(#name);
+ BUILTIN_LIST_C(CALL_BUILTIN_COUNTER)
+#undef CALL_BUILTIN_COUNTER
+#define CALL_BUILTIN_COUNTER(name) \
+ RuntimeCallCounter API_##name = RuntimeCallCounter("API_" #name);
+ FOR_EACH_API_COUNTER(CALL_BUILTIN_COUNTER)
+#undef CALL_BUILTIN_COUNTER
+#define CALL_BUILTIN_COUNTER(name) \
+ RuntimeCallCounter Handler_##name = RuntimeCallCounter(#name);
+ FOR_EACH_HANDLER_COUNTER(CALL_BUILTIN_COUNTER)
+#undef CALL_BUILTIN_COUNTER
+
+ // Starting measuring the time for a function. This will establish the
+ // connection to the parent counter for properly calculating the own times.
+ static void Enter(Isolate* isolate, TraceEventCallStatsTimer* timer,
+ CounterId counter_id);
+
+ // Leave a scope for a measured runtime function. This will properly add
+ // the time delta to the current_counter and subtract the delta from its
+ // parent.
+ static void Leave(Isolate* isolate, TraceEventCallStatsTimer* timer);
+
+ void Reset();
+ const char* Dump();
+
+ TraceEventStatsTable() {
+ Reset();
+ in_use_ = false;
+ }
+
+ TraceEventCallStatsTimer* current_timer() { return current_timer_; }
+ bool InUse() { return in_use_; }
+
+ private:
+ std::stringstream buffer_;
+ std::unique_ptr<char[]> buffer_c_str_;
+ size_t len_ = 0;
+ // Counter to track recursive time events.
+ TraceEventCallStatsTimer* current_timer_ = nullptr;
+ bool in_use_;
+};
+
+class CounterScope {
+ public:
+ CounterScope(Isolate* isolate, TraceEventStatsTable::CounterId counter_id)
+ : isolate_(nullptr) {
+ if (V8_UNLIKELY(TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED())) {
+ isolate_ = isolate;
+ TraceEventStatsTable::Enter(isolate_, &timer_, counter_id);
+ }
+ }
+ ~CounterScope() {
+ // A non-nullptr isolate_ means the stats table already entered the scope
+ // and started the timer, we need to leave the scope and reset the timer
+ // even when we stop tracing, otherwise we have the risk to have a dangling
+ // pointer.
+ if (V8_UNLIKELY(isolate_ != nullptr)) {
+ TraceEventStatsTable::Leave(isolate_, &timer_);
+ }
+ }
+
+ private:
+ Isolate* isolate_;
+ TraceEventCallStatsTimer timer_;
+};
+
} // namespace tracing
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/transitions-inl.h b/deps/v8/src/transitions-inl.h
index ea02d61031..828a673d7f 100644
--- a/deps/v8/src/transitions-inl.h
+++ b/deps/v8/src/transitions-inl.h
@@ -113,8 +113,7 @@ bool TransitionArray::IsSpecialTransition(Name* name) {
return name == heap->nonextensible_symbol() ||
name == heap->sealed_symbol() || name == heap->frozen_symbol() ||
name == heap->elements_transition_symbol() ||
- name == heap->strict_function_transition_symbol() ||
- name == heap->observed_symbol();
+ name == heap->strict_function_transition_symbol();
}
#endif
diff --git a/deps/v8/src/type-cache.h b/deps/v8/src/type-cache.h
index 2a95df9f8c..f83f3bdb71 100644
--- a/deps/v8/src/type-cache.h
+++ b/deps/v8/src/type-cache.h
@@ -38,25 +38,43 @@ class TypeCache final {
Type* const kFloat64 = CreateNative(Type::Number(), Type::UntaggedFloat64());
Type* const kSmi = CreateNative(Type::SignedSmall(), Type::TaggedSigned());
+ Type* const kHoleySmi = Type::Union(kSmi, Type::Hole(), zone());
Type* const kHeapNumber = CreateNative(Type::Number(), Type::TaggedPointer());
Type* const kSingletonZero = CreateRange(0.0, 0.0);
Type* const kSingletonOne = CreateRange(1.0, 1.0);
+ Type* const kSingletonTen = CreateRange(10.0, 10.0);
+ Type* const kSingletonMinusOne = CreateRange(-1.0, -1.0);
+ Type* const kZeroOrUndefined =
+ Type::Union(kSingletonZero, Type::Undefined(), zone());
+ Type* const kTenOrUndefined =
+ Type::Union(kSingletonTen, Type::Undefined(), zone());
+ Type* const kMinusOneOrZero = CreateRange(-1.0, 0.0);
+ Type* const kMinusOneToOne = CreateRange(-1.0, 1.0);
Type* const kZeroOrOne = CreateRange(0.0, 1.0);
+ Type* const kZeroOrOneOrNaN = Type::Union(kZeroOrOne, Type::NaN(), zone());
Type* const kZeroToThirtyOne = CreateRange(0.0, 31.0);
Type* const kZeroToThirtyTwo = CreateRange(0.0, 32.0);
Type* const kZeroish =
Type::Union(kSingletonZero, Type::MinusZeroOrNaN(), zone());
Type* const kInteger = CreateRange(-V8_INFINITY, V8_INFINITY);
- Type* const kPositiveInteger = CreateRange(0.0, V8_INFINITY);
Type* const kIntegerOrMinusZero =
Type::Union(kInteger, Type::MinusZero(), zone());
Type* const kIntegerOrMinusZeroOrNaN =
Type::Union(kIntegerOrMinusZero, Type::NaN(), zone());
+ Type* const kPositiveInteger = CreateRange(0.0, V8_INFINITY);
+ Type* const kPositiveIntegerOrMinusZero =
+ Type::Union(kPositiveInteger, Type::MinusZero(), zone());
+ Type* const kPositiveIntegerOrMinusZeroOrNaN =
+ Type::Union(kPositiveIntegerOrMinusZero, Type::NaN(), zone());
Type* const kAdditiveSafeInteger =
CreateRange(-4503599627370496.0, 4503599627370496.0);
Type* const kSafeInteger = CreateRange(-kMaxSafeInteger, kMaxSafeInteger);
+ Type* const kAdditiveSafeIntegerOrMinusZero =
+ Type::Union(kAdditiveSafeInteger, Type::MinusZero(), zone());
+ Type* const kSafeIntegerOrMinusZero =
+ Type::Union(kSafeInteger, Type::MinusZero(), zone());
Type* const kPositiveSafeInteger = CreateRange(0.0, kMaxSafeInteger);
Type* const kUntaggedUndefined =
@@ -108,6 +126,11 @@ class TypeCache final {
Type* const kJSArrayLengthType =
CreateNative(Type::Unsigned32(), Type::Tagged());
+ // The JSTyped::length property always contains a tagged number in the range
+ // [0, kMaxSmiValue].
+ Type* const kJSTypedArrayLengthType =
+ CreateNative(Type::UnsignedSmall(), Type::TaggedSigned());
+
// The String::length property always contains a smi in the range
// [0, String::kMaxLength].
Type* const kStringLengthType =
diff --git a/deps/v8/src/type-feedback-vector-inl.h b/deps/v8/src/type-feedback-vector-inl.h
index 015104e96a..771021fb99 100644
--- a/deps/v8/src/type-feedback-vector-inl.h
+++ b/deps/v8/src/type-feedback-vector-inl.h
@@ -14,13 +14,11 @@ namespace internal {
template <typename Derived>
FeedbackVectorSlot FeedbackVectorSpecBase<Derived>::AddSlot(
FeedbackVectorSlotKind kind) {
- Derived* derived = static_cast<Derived*>(this);
-
- int slot = derived->slots();
+ int slot = This()->slots();
int entries_per_slot = TypeFeedbackMetadata::GetSlotSize(kind);
- derived->append(kind);
+ This()->append(kind);
for (int i = 1; i < entries_per_slot; i++) {
- derived->append(FeedbackVectorSlotKind::INVALID);
+ This()->append(FeedbackVectorSlotKind::INVALID);
}
return FeedbackVectorSlot(slot);
}
@@ -32,6 +30,10 @@ TypeFeedbackMetadata* TypeFeedbackMetadata::cast(Object* obj) {
return reinterpret_cast<TypeFeedbackMetadata*>(obj);
}
+bool TypeFeedbackMetadata::is_empty() const {
+ if (length() == 0) return true;
+ return false;
+}
int TypeFeedbackMetadata::slot_count() const {
if (length() == 0) return 0;
@@ -53,6 +55,26 @@ int TypeFeedbackMetadata::GetSlotSize(FeedbackVectorSlotKind kind) {
return kind == FeedbackVectorSlotKind::GENERAL ? 1 : 2;
}
+bool TypeFeedbackMetadata::SlotRequiresName(FeedbackVectorSlotKind kind) {
+ switch (kind) {
+ case FeedbackVectorSlotKind::LOAD_GLOBAL_IC:
+ return true;
+
+ case FeedbackVectorSlotKind::CALL_IC:
+ case FeedbackVectorSlotKind::LOAD_IC:
+ case FeedbackVectorSlotKind::KEYED_LOAD_IC:
+ case FeedbackVectorSlotKind::STORE_IC:
+ case FeedbackVectorSlotKind::KEYED_STORE_IC:
+ case FeedbackVectorSlotKind::GENERAL:
+ case FeedbackVectorSlotKind::INVALID:
+ return false;
+
+ case FeedbackVectorSlotKind::KINDS_NUMBER:
+ break;
+ }
+ UNREACHABLE();
+ return false;
+}
bool TypeFeedbackVector::is_empty() const {
if (length() == 0) return true;
@@ -73,24 +95,10 @@ TypeFeedbackMetadata* TypeFeedbackVector::metadata() const {
: TypeFeedbackMetadata::cast(get(kMetadataIndex));
}
-
-FeedbackVectorSlotKind TypeFeedbackVector::GetKind(
- FeedbackVectorSlot slot) const {
- DCHECK(!is_empty());
- return metadata()->GetKind(slot);
-}
-
-
-int TypeFeedbackVector::GetIndex(FeedbackVectorSlot slot) const {
- DCHECK(slot.ToInt() < slot_count());
- return kReservedIndexCount + slot.ToInt();
-}
-
-
-// Conversion from an integer index to either a slot or an ic slot. The caller
-// should know what kind she expects.
-FeedbackVectorSlot TypeFeedbackVector::ToSlot(int index) const {
- DCHECK(index >= kReservedIndexCount && index < length());
+// Conversion from an integer index to either a slot or an ic slot.
+// static
+FeedbackVectorSlot TypeFeedbackVector::ToSlot(int index) {
+ DCHECK(index >= kReservedIndexCount);
return FeedbackVectorSlot(index - kReservedIndexCount);
}
@@ -149,6 +157,21 @@ Symbol* TypeFeedbackVector::RawUninitializedSentinel(Isolate* isolate) {
return isolate->heap()->uninitialized_symbol();
}
+bool TypeFeedbackMetadataIterator::HasNext() const {
+ return next_slot_.ToInt() < metadata()->slot_count();
+}
+
+FeedbackVectorSlot TypeFeedbackMetadataIterator::Next() {
+ DCHECK(HasNext());
+ cur_slot_ = next_slot_;
+ slot_kind_ = metadata()->GetKind(cur_slot_);
+ next_slot_ = FeedbackVectorSlot(next_slot_.ToInt() + entry_size());
+ return cur_slot_;
+}
+
+int TypeFeedbackMetadataIterator::entry_size() const {
+ return TypeFeedbackMetadata::GetSlotSize(kind());
+}
Object* FeedbackNexus::GetFeedback() const { return vector()->Get(slot()); }
diff --git a/deps/v8/src/type-feedback-vector.cc b/deps/v8/src/type-feedback-vector.cc
index 4519bd65c2..61f5e8b9c7 100644
--- a/deps/v8/src/type-feedback-vector.cc
+++ b/deps/v8/src/type-feedback-vector.cc
@@ -37,6 +37,16 @@ FeedbackVectorSlotKind TypeFeedbackMetadata::GetKind(
return VectorICComputer::decode(data, slot.ToInt());
}
+String* TypeFeedbackMetadata::GetName(FeedbackVectorSlot slot) const {
+ DCHECK(SlotRequiresName(GetKind(slot)));
+ UnseededNumberDictionary* names =
+ UnseededNumberDictionary::cast(get(kNamesTableIndex));
+ int entry = names->FindEntry(GetIsolate(), slot.ToInt());
+ CHECK_NE(UnseededNumberDictionary::kNotFound, entry);
+ Object* name = names->ValueAt(entry);
+ DCHECK(name->IsString());
+ return String::cast(name);
+}
void TypeFeedbackMetadata::SetKind(FeedbackVectorSlot slot,
FeedbackVectorSlotKind kind) {
@@ -57,12 +67,13 @@ template Handle<TypeFeedbackMetadata> TypeFeedbackMetadata::New(
template <typename Spec>
Handle<TypeFeedbackMetadata> TypeFeedbackMetadata::New(Isolate* isolate,
const Spec* spec) {
+ Factory* factory = isolate->factory();
+
const int slot_count = spec->slots();
const int slot_kinds_length = VectorICComputer::word_count(slot_count);
const int length = slot_kinds_length + kReservedIndexCount;
if (length == kReservedIndexCount) {
- return Handle<TypeFeedbackMetadata>::cast(
- isolate->factory()->empty_fixed_array());
+ return Handle<TypeFeedbackMetadata>::cast(factory->empty_fixed_array());
}
#ifdef DEBUG
for (int i = 0; i < slot_count;) {
@@ -76,7 +87,7 @@ Handle<TypeFeedbackMetadata> TypeFeedbackMetadata::New(Isolate* isolate,
}
#endif
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(length, TENURED);
+ Handle<FixedArray> array = factory->NewFixedArray(length, TENURED);
array->set(kSlotsCountIndex, Smi::FromInt(slot_count));
// Fill the bit-vector part with zeros.
for (int i = 0; i < slot_kinds_length; i++) {
@@ -85,9 +96,39 @@ Handle<TypeFeedbackMetadata> TypeFeedbackMetadata::New(Isolate* isolate,
Handle<TypeFeedbackMetadata> metadata =
Handle<TypeFeedbackMetadata>::cast(array);
+
+ // Add names to NamesTable.
+ const int name_count = spec->name_count();
+
+ Handle<UnseededNumberDictionary> names;
+ if (name_count) {
+ names = UnseededNumberDictionary::New(
+ isolate, base::bits::RoundUpToPowerOfTwo32(name_count), TENURED,
+ USE_CUSTOM_MINIMUM_CAPACITY);
+ }
+
+ int name_index = 0;
for (int i = 0; i < slot_count; i++) {
- metadata->SetKind(FeedbackVectorSlot(i), spec->GetKind(i));
+ FeedbackVectorSlotKind kind = spec->GetKind(i);
+ metadata->SetKind(FeedbackVectorSlot(i), kind);
+ if (SlotRequiresName(kind)) {
+ Handle<String> name = spec->GetName(name_index);
+ DCHECK(!name.is_null());
+ names = UnseededNumberDictionary::AtNumberPut(names, i, name);
+ name_index++;
+ }
}
+ DCHECK_EQ(name_count, name_index);
+ metadata->set(kNamesTableIndex,
+ name_count ? static_cast<Object*>(*names) : Smi::FromInt(0));
+
+ // It's important that the TypeFeedbackMetadata have a COW map, since it's
+ // pointed to by both a SharedFunctionInfo and indirectly by closures through
+ // the TypeFeedbackVector. The serializer uses the COW map type to decide
+ // this object belongs in the startup snapshot and not the partial
+ // snapshot(s).
+ metadata->set_map(isolate->heap()->fixed_cow_array_map());
+
return metadata;
}
@@ -99,14 +140,51 @@ bool TypeFeedbackMetadata::SpecDiffersFrom(
}
int slots = slot_count();
- for (int i = 0; i < slots; i++) {
- if (GetKind(FeedbackVectorSlot(i)) != other_spec->GetKind(i)) {
+ int name_index = 0;
+ for (int i = 0; i < slots;) {
+ FeedbackVectorSlot slot(i);
+ FeedbackVectorSlotKind kind = GetKind(slot);
+ int entry_size = TypeFeedbackMetadata::GetSlotSize(kind);
+
+ if (kind != other_spec->GetKind(i)) {
return true;
}
+ if (SlotRequiresName(kind)) {
+ String* name = GetName(slot);
+ DCHECK(name != GetHeap()->empty_string());
+ String* other_name = *other_spec->GetName(name_index++);
+ if (name != other_name) {
+ return true;
+ }
+ }
+ i += entry_size;
}
return false;
}
+bool TypeFeedbackMetadata::DiffersFrom(
+ const TypeFeedbackMetadata* other_metadata) const {
+ if (other_metadata->slot_count() != slot_count()) {
+ return true;
+ }
+
+ int slots = slot_count();
+ for (int i = 0; i < slots;) {
+ FeedbackVectorSlot slot(i);
+ FeedbackVectorSlotKind kind = GetKind(slot);
+ int entry_size = TypeFeedbackMetadata::GetSlotSize(kind);
+ if (GetKind(slot) != other_metadata->GetKind(slot)) {
+ return true;
+ }
+ if (SlotRequiresName(kind)) {
+ if (GetName(slot) != other_metadata->GetName(slot)) {
+ return true;
+ }
+ }
+ i += entry_size;
+ }
+ return false;
+}
const char* TypeFeedbackMetadata::Kind2String(FeedbackVectorSlotKind kind) {
switch (kind) {
@@ -116,6 +194,8 @@ const char* TypeFeedbackMetadata::Kind2String(FeedbackVectorSlotKind kind) {
return "CALL_IC";
case FeedbackVectorSlotKind::LOAD_IC:
return "LOAD_IC";
+ case FeedbackVectorSlotKind::LOAD_GLOBAL_IC:
+ return "LOAD_GLOBAL_IC";
case FeedbackVectorSlotKind::KEYED_LOAD_IC:
return "KEYED_LOAD_IC";
case FeedbackVectorSlotKind::STORE_IC:
@@ -131,6 +211,16 @@ const char* TypeFeedbackMetadata::Kind2String(FeedbackVectorSlotKind kind) {
return "?";
}
+FeedbackVectorSlotKind TypeFeedbackVector::GetKind(
+ FeedbackVectorSlot slot) const {
+ DCHECK(!is_empty());
+ return metadata()->GetKind(slot);
+}
+
+String* TypeFeedbackVector::GetName(FeedbackVectorSlot slot) const {
+ DCHECK(!is_empty());
+ return metadata()->GetName(slot);
+}
// static
Handle<TypeFeedbackVector> TypeFeedbackVector::New(
@@ -146,13 +236,29 @@ Handle<TypeFeedbackVector> TypeFeedbackVector::New(
Handle<FixedArray> array = factory->NewFixedArray(length, TENURED);
array->set(kMetadataIndex, *metadata);
+ DisallowHeapAllocation no_gc;
+
// Ensure we can skip the write barrier
Handle<Object> uninitialized_sentinel = UninitializedSentinel(isolate);
DCHECK_EQ(*factory->uninitialized_symbol(), *uninitialized_sentinel);
- for (int i = kReservedIndexCount; i < length; i++) {
- array->set(i, *uninitialized_sentinel, SKIP_WRITE_BARRIER);
- }
+ for (int i = 0; i < slot_count;) {
+ FeedbackVectorSlot slot(i);
+ FeedbackVectorSlotKind kind = metadata->GetKind(slot);
+ int index = TypeFeedbackVector::GetIndex(slot);
+ int entry_size = TypeFeedbackMetadata::GetSlotSize(kind);
+ Object* value;
+ if (kind == FeedbackVectorSlotKind::LOAD_GLOBAL_IC) {
+ value = *factory->empty_weak_cell();
+ } else {
+ value = *uninitialized_sentinel;
+ }
+ array->set(index, value, SKIP_WRITE_BARRIER);
+ for (int j = 1; j < entry_size; j++) {
+ array->set(index + j, *uninitialized_sentinel, SKIP_WRITE_BARRIER);
+ }
+ i += entry_size;
+ }
return Handle<TypeFeedbackVector>::cast(array);
}
@@ -208,6 +314,11 @@ void TypeFeedbackVector::ClearSlotsImpl(SharedFunctionInfo* shared,
nexus.Clear(shared->code());
break;
}
+ case FeedbackVectorSlotKind::LOAD_GLOBAL_IC: {
+ LoadGlobalICNexus nexus(this, slot);
+ nexus.Clear(shared->code());
+ break;
+ }
case FeedbackVectorSlotKind::KEYED_LOAD_IC: {
KeyedLoadICNexus nexus(this, slot);
nexus.Clear(shared->code());
@@ -251,8 +362,20 @@ void TypeFeedbackVector::ClearAllKeyedStoreICs(Isolate* isolate) {
SharedFunctionInfo::Iterator iterator(isolate);
SharedFunctionInfo* shared;
while ((shared = iterator.Next())) {
- TypeFeedbackVector* vector = shared->feedback_vector();
- vector->ClearKeyedStoreICs(shared);
+ if (!shared->OptimizedCodeMapIsCleared()) {
+ FixedArray* optimized_code_map = shared->optimized_code_map();
+ int length = optimized_code_map->length();
+ for (int i = SharedFunctionInfo::kEntriesStart; i < length;
+ i += SharedFunctionInfo::kEntryLength) {
+ WeakCell* cell = WeakCell::cast(
+ optimized_code_map->get(i + SharedFunctionInfo::kLiteralsOffset));
+ if (cell->value()->IsLiteralsArray()) {
+ TypeFeedbackVector* vector =
+ LiteralsArray::cast(cell->value())->feedback_vector();
+ vector->ClearKeyedStoreICs(shared);
+ }
+ }
+ }
}
}
@@ -309,10 +432,9 @@ Handle<FixedArray> FeedbackNexus::EnsureExtraArrayOfSize(int length) {
return Handle<FixedArray>::cast(feedback_extra);
}
-
void FeedbackNexus::InstallHandlers(Handle<FixedArray> array,
MapHandleList* maps,
- CodeHandleList* handlers) {
+ List<Handle<Object>>* handlers) {
int receiver_count = maps->length();
for (int current = 0; current < receiver_count; ++current) {
Handle<Map> map = maps->at(current);
@@ -389,6 +511,17 @@ InlineCacheState LoadICNexus::StateFromFeedback() const {
return UNINITIALIZED;
}
+InlineCacheState LoadGlobalICNexus::StateFromFeedback() const {
+ Isolate* isolate = GetIsolate();
+ Object* feedback = GetFeedback();
+
+ Object* extra = GetFeedbackExtra();
+ if (!WeakCell::cast(feedback)->cleared() ||
+ extra != *TypeFeedbackVector::UninitializedSentinel(isolate)) {
+ return MONOMORPHIC;
+ }
+ return UNINITIALIZED;
+}
InlineCacheState KeyedLoadICNexus::StateFromFeedback() const {
Isolate* isolate = GetIsolate();
@@ -488,7 +621,7 @@ InlineCacheState CallICNexus::StateFromFeedback() const {
int CallICNexus::ExtractCallCount() {
Object* call_count = GetFeedbackExtra();
if (call_count->IsSmi()) {
- int value = Smi::cast(call_count)->value() / 2;
+ int value = Smi::cast(call_count)->value();
return value;
}
return -1;
@@ -505,14 +638,14 @@ void CallICNexus::ConfigureMonomorphicArray() {
GetIsolate()->factory()->NewAllocationSite();
SetFeedback(*new_site);
}
- SetFeedbackExtra(Smi::FromInt(kCallCountIncrement), SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(Smi::FromInt(1), SKIP_WRITE_BARRIER);
}
void CallICNexus::ConfigureMonomorphic(Handle<JSFunction> function) {
Handle<WeakCell> new_cell = GetIsolate()->factory()->NewWeakCell(function);
SetFeedback(*new_cell);
- SetFeedbackExtra(Smi::FromInt(kCallCountIncrement), SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(Smi::FromInt(1), SKIP_WRITE_BARRIER);
}
@@ -524,22 +657,38 @@ void CallICNexus::ConfigureMegamorphic() {
void CallICNexus::ConfigureMegamorphic(int call_count) {
SetFeedback(*TypeFeedbackVector::MegamorphicSentinel(GetIsolate()),
SKIP_WRITE_BARRIER);
- SetFeedbackExtra(Smi::FromInt(call_count * kCallCountIncrement),
- SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(Smi::FromInt(call_count), SKIP_WRITE_BARRIER);
}
-
void LoadICNexus::ConfigureMonomorphic(Handle<Map> receiver_map,
- Handle<Code> handler) {
+ Handle<Object> handler) {
Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
SetFeedback(*cell);
SetFeedbackExtra(*handler);
}
+void LoadGlobalICNexus::ConfigureUninitialized() {
+ Isolate* isolate = GetIsolate();
+ SetFeedback(isolate->heap()->empty_weak_cell(), SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(*TypeFeedbackVector::UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
+}
+
+void LoadGlobalICNexus::ConfigurePropertyCellMode(Handle<PropertyCell> cell) {
+ Isolate* isolate = GetIsolate();
+ SetFeedback(*isolate->factory()->NewWeakCell(cell));
+ SetFeedbackExtra(*TypeFeedbackVector::UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
+}
+
+void LoadGlobalICNexus::ConfigureHandlerMode(Handle<Code> handler) {
+ SetFeedback(GetIsolate()->heap()->empty_weak_cell());
+ SetFeedbackExtra(*handler);
+}
void KeyedLoadICNexus::ConfigureMonomorphic(Handle<Name> name,
Handle<Map> receiver_map,
- Handle<Code> handler) {
+ Handle<Object> handler) {
Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
if (name.is_null()) {
SetFeedback(*cell);
@@ -576,9 +725,8 @@ void KeyedStoreICNexus::ConfigureMonomorphic(Handle<Name> name,
}
}
-
void LoadICNexus::ConfigurePolymorphic(MapHandleList* maps,
- CodeHandleList* handlers) {
+ List<Handle<Object>>* handlers) {
Isolate* isolate = GetIsolate();
int receiver_count = maps->length();
Handle<FixedArray> array = EnsureArrayOfSize(receiver_count * 2);
@@ -587,10 +735,9 @@ void LoadICNexus::ConfigurePolymorphic(MapHandleList* maps,
SKIP_WRITE_BARRIER);
}
-
void KeyedLoadICNexus::ConfigurePolymorphic(Handle<Name> name,
MapHandleList* maps,
- CodeHandleList* handlers) {
+ List<Handle<Object>>* handlers) {
int receiver_count = maps->length();
DCHECK(receiver_count > 1);
Handle<FixedArray> array;
@@ -606,9 +753,8 @@ void KeyedLoadICNexus::ConfigurePolymorphic(Handle<Name> name,
InstallHandlers(array, maps, handlers);
}
-
void StoreICNexus::ConfigurePolymorphic(MapHandleList* maps,
- CodeHandleList* handlers) {
+ List<Handle<Object>>* handlers) {
Isolate* isolate = GetIsolate();
int receiver_count = maps->length();
Handle<FixedArray> array = EnsureArrayOfSize(receiver_count * 2);
@@ -617,10 +763,9 @@ void StoreICNexus::ConfigurePolymorphic(MapHandleList* maps,
SKIP_WRITE_BARRIER);
}
-
void KeyedStoreICNexus::ConfigurePolymorphic(Handle<Name> name,
MapHandleList* maps,
- CodeHandleList* handlers) {
+ List<Handle<Object>>* handlers) {
int receiver_count = maps->length();
DCHECK(receiver_count > 1);
Handle<FixedArray> array;
@@ -662,6 +807,30 @@ void KeyedStoreICNexus::ConfigurePolymorphic(MapHandleList* maps,
}
}
+namespace {
+
+int GetStepSize(FixedArray* array, Isolate* isolate) {
+ // The array should be of the form
+ // [map, handler, map, handler, ...]
+ // or
+ // [map, map, handler, map, map, handler, ...]
+ // where "map" is either a WeakCell or |undefined|,
+ // and "handler" is either a Code object or a Smi.
+ DCHECK(array->length() >= 2);
+ Object* second = array->get(1);
+ if (second->IsWeakCell() || second->IsUndefined(isolate)) return 3;
+ DCHECK(second->IsCode() || second->IsSmi());
+ return 2;
+}
+
+#ifdef DEBUG // Only used by DCHECKs below.
+bool IsHandler(Object* object) {
+ return object->IsSmi() ||
+ (object->IsCode() && Code::cast(object)->is_handler());
+}
+#endif
+
+} // namespace
int FeedbackNexus::ExtractMaps(MapHandleList* maps) const {
Isolate* isolate = GetIsolate();
@@ -673,12 +842,7 @@ int FeedbackNexus::ExtractMaps(MapHandleList* maps) const {
feedback = GetFeedbackExtra();
}
FixedArray* array = FixedArray::cast(feedback);
- // The array should be of the form
- // [map, handler, map, handler, ...]
- // or
- // [map, map, handler, map, map, handler, ...]
- DCHECK(array->length() >= 2);
- int increment = array->get(1)->IsCode() ? 2 : 3;
+ int increment = GetStepSize(array, isolate);
for (int i = 0; i < array->length(); i += increment) {
DCHECK(array->get(i)->IsWeakCell());
WeakCell* cell = WeakCell::cast(array->get(i));
@@ -701,26 +865,25 @@ int FeedbackNexus::ExtractMaps(MapHandleList* maps) const {
return 0;
}
-
-MaybeHandle<Code> FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
+MaybeHandle<Object> FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
Object* feedback = GetFeedback();
+ Isolate* isolate = GetIsolate();
bool is_named_feedback = IsPropertyNameFeedback(feedback);
if (feedback->IsFixedArray() || is_named_feedback) {
if (is_named_feedback) {
feedback = GetFeedbackExtra();
}
FixedArray* array = FixedArray::cast(feedback);
- DCHECK(array->length() >= 2);
- int increment = array->get(1)->IsCode() ? 2 : 3;
+ int increment = GetStepSize(array, isolate);
for (int i = 0; i < array->length(); i += increment) {
DCHECK(array->get(i)->IsWeakCell());
WeakCell* cell = WeakCell::cast(array->get(i));
if (!cell->cleared()) {
Map* array_map = Map::cast(cell->value());
if (array_map == *map) {
- Code* code = Code::cast(array->get(i + increment - 1));
- DCHECK(code->kind() == Code::HANDLER);
- return handle(code);
+ Object* code = array->get(i + increment - 1);
+ DCHECK(IsHandler(code));
+ return handle(code, isolate);
}
}
}
@@ -729,9 +892,9 @@ MaybeHandle<Code> FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
if (!cell->cleared()) {
Map* cell_map = Map::cast(cell->value());
if (cell_map == *map) {
- Code* code = Code::cast(GetFeedbackExtra());
- DCHECK(code->kind() == Code::HANDLER);
- return handle(code);
+ Object* code = GetFeedbackExtra();
+ DCHECK(IsHandler(code));
+ return handle(code, isolate);
}
}
}
@@ -739,9 +902,10 @@ MaybeHandle<Code> FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
return MaybeHandle<Code>();
}
-
-bool FeedbackNexus::FindHandlers(CodeHandleList* code_list, int length) const {
+bool FeedbackNexus::FindHandlers(List<Handle<Object>>* code_list,
+ int length) const {
Object* feedback = GetFeedback();
+ Isolate* isolate = GetIsolate();
int count = 0;
bool is_named_feedback = IsPropertyNameFeedback(feedback);
if (feedback->IsFixedArray() || is_named_feedback) {
@@ -749,29 +913,24 @@ bool FeedbackNexus::FindHandlers(CodeHandleList* code_list, int length) const {
feedback = GetFeedbackExtra();
}
FixedArray* array = FixedArray::cast(feedback);
- // The array should be of the form
- // [map, handler, map, handler, ...]
- // or
- // [map, map, handler, map, map, handler, ...]
- // Be sure to skip handlers whose maps have been cleared.
- DCHECK(array->length() >= 2);
- int increment = array->get(1)->IsCode() ? 2 : 3;
+ int increment = GetStepSize(array, isolate);
for (int i = 0; i < array->length(); i += increment) {
DCHECK(array->get(i)->IsWeakCell());
WeakCell* cell = WeakCell::cast(array->get(i));
+ // Be sure to skip handlers whose maps have been cleared.
if (!cell->cleared()) {
- Code* code = Code::cast(array->get(i + increment - 1));
- DCHECK(code->kind() == Code::HANDLER);
- code_list->Add(handle(code));
+ Object* code = array->get(i + increment - 1);
+ DCHECK(IsHandler(code));
+ code_list->Add(handle(code, isolate));
count++;
}
}
} else if (feedback->IsWeakCell()) {
WeakCell* cell = WeakCell::cast(feedback);
if (!cell->cleared()) {
- Code* code = Code::cast(GetFeedbackExtra());
- DCHECK(code->kind() == Code::HANDLER);
- code_list->Add(handle(code));
+ Object* code = GetFeedbackExtra();
+ DCHECK(IsHandler(code));
+ code_list->Add(handle(code, isolate));
count++;
}
}
@@ -781,6 +940,9 @@ bool FeedbackNexus::FindHandlers(CodeHandleList* code_list, int length) const {
void LoadICNexus::Clear(Code* host) { LoadIC::Clear(GetIsolate(), host, this); }
+void LoadGlobalICNexus::Clear(Code* host) {
+ LoadGlobalIC::Clear(GetIsolate(), host, this);
+}
void KeyedLoadICNexus::Clear(Code* host) {
KeyedLoadIC::Clear(GetIsolate(), host, this);
@@ -818,7 +980,7 @@ void KeyedStoreICNexus::Clear(Code* host) {
KeyedAccessStoreMode KeyedStoreICNexus::GetKeyedAccessStoreMode() const {
KeyedAccessStoreMode mode = STANDARD_STORE;
MapHandleList maps;
- CodeHandleList handlers;
+ List<Handle<Object>> handlers;
if (GetKeyType() == PROPERTY) return mode;
@@ -826,7 +988,7 @@ KeyedAccessStoreMode KeyedStoreICNexus::GetKeyedAccessStoreMode() const {
FindHandlers(&handlers, maps.length());
for (int i = 0; i < handlers.length(); i++) {
// The first handler that isn't the slow handler will have the bits we need.
- Handle<Code> handler = handlers.at(i);
+ Handle<Code> handler = Handle<Code>::cast(handlers.at(i));
CodeStub::Major major_key = CodeStub::MajorKeyFromKey(handler->stub_key());
uint32_t minor_key = CodeStub::MinorKeyFromKey(handler->stub_key());
CHECK(major_key == CodeStub::KeyedStoreSloppyArguments ||
diff --git a/deps/v8/src/type-feedback-vector.h b/deps/v8/src/type-feedback-vector.h
index 770b5e5ded..5355ee7188 100644
--- a/deps/v8/src/type-feedback-vector.h
+++ b/deps/v8/src/type-feedback-vector.h
@@ -15,7 +15,6 @@
namespace v8 {
namespace internal {
-
enum class FeedbackVectorSlotKind {
// This kind means that the slot points to the middle of other slot
// which occupies more than one feedback vector element.
@@ -24,6 +23,7 @@ enum class FeedbackVectorSlotKind {
CALL_IC,
LOAD_IC,
+ LOAD_GLOBAL_IC,
KEYED_LOAD_IC,
STORE_IC,
KEYED_STORE_IC,
@@ -34,7 +34,6 @@ enum class FeedbackVectorSlotKind {
KINDS_NUMBER // Last value indicating number of kinds.
};
-
std::ostream& operator<<(std::ostream& os, FeedbackVectorSlotKind kind);
@@ -51,6 +50,11 @@ class FeedbackVectorSpecBase {
return AddSlot(FeedbackVectorSlotKind::LOAD_IC);
}
+ FeedbackVectorSlot AddLoadGlobalICSlot(Handle<String> name) {
+ This()->append_name(name);
+ return AddSlot(FeedbackVectorSlotKind::LOAD_GLOBAL_IC);
+ }
+
FeedbackVectorSlot AddKeyedLoadICSlot() {
return AddSlot(FeedbackVectorSlotKind::KEYED_LOAD_IC);
}
@@ -66,40 +70,65 @@ class FeedbackVectorSpecBase {
FeedbackVectorSlot AddGeneralSlot() {
return AddSlot(FeedbackVectorSlotKind::GENERAL);
}
+
+#ifdef OBJECT_PRINT
+ // For gdb debugging.
+ void Print();
+#endif // OBJECT_PRINT
+
+ DECLARE_PRINTER(FeedbackVectorSpec)
+
+ private:
+ Derived* This() { return static_cast<Derived*>(this); }
};
class StaticFeedbackVectorSpec
: public FeedbackVectorSpecBase<StaticFeedbackVectorSpec> {
public:
- StaticFeedbackVectorSpec() : slots_(0) {}
+ StaticFeedbackVectorSpec() : slot_count_(0), name_count_(0) {}
- int slots() const { return slots_; }
+ int slots() const { return slot_count_; }
FeedbackVectorSlotKind GetKind(int slot) const {
- DCHECK(slot >= 0 && slot < slots_);
+ DCHECK(slot >= 0 && slot < slot_count_);
return kinds_[slot];
}
+ int name_count() const { return name_count_; }
+
+ Handle<String> GetName(int index) const {
+ DCHECK(index >= 0 && index < name_count_);
+ return names_[index];
+ }
+
private:
friend class FeedbackVectorSpecBase<StaticFeedbackVectorSpec>;
void append(FeedbackVectorSlotKind kind) {
- DCHECK(slots_ < kMaxLength);
- kinds_[slots_++] = kind;
+ DCHECK(slot_count_ < kMaxLength);
+ kinds_[slot_count_++] = kind;
+ }
+
+ void append_name(Handle<String> name) {
+ DCHECK(name_count_ < kMaxLength);
+ names_[name_count_++] = name;
}
static const int kMaxLength = 12;
- int slots_;
+ int slot_count_;
FeedbackVectorSlotKind kinds_[kMaxLength];
+ int name_count_;
+ Handle<String> names_[kMaxLength];
};
class FeedbackVectorSpec : public FeedbackVectorSpecBase<FeedbackVectorSpec> {
public:
- explicit FeedbackVectorSpec(Zone* zone) : slot_kinds_(zone) {
+ explicit FeedbackVectorSpec(Zone* zone) : slot_kinds_(zone), names_(zone) {
slot_kinds_.reserve(16);
+ names_.reserve(8);
}
int slots() const { return static_cast<int>(slot_kinds_.size()); }
@@ -108,6 +137,10 @@ class FeedbackVectorSpec : public FeedbackVectorSpecBase<FeedbackVectorSpec> {
return static_cast<FeedbackVectorSlotKind>(slot_kinds_.at(slot));
}
+ int name_count() const { return static_cast<int>(names_.size()); }
+
+ Handle<String> GetName(int index) const { return names_.at(index); }
+
private:
friend class FeedbackVectorSpecBase<FeedbackVectorSpec>;
@@ -115,13 +148,17 @@ class FeedbackVectorSpec : public FeedbackVectorSpecBase<FeedbackVectorSpec> {
slot_kinds_.push_back(static_cast<unsigned char>(kind));
}
+ void append_name(Handle<String> name) { names_.push_back(name); }
+
ZoneVector<unsigned char> slot_kinds_;
+ ZoneVector<Handle<String>> names_;
};
// The shape of the TypeFeedbackMetadata is an array with:
// 0: slot_count
-// 1..N: slot kinds packed into a bit vector
+// 1: names table
+// 2..N: slot kinds packed into a bit vector
//
class TypeFeedbackMetadata : public FixedArray {
public:
@@ -129,19 +166,34 @@ class TypeFeedbackMetadata : public FixedArray {
static inline TypeFeedbackMetadata* cast(Object* obj);
static const int kSlotsCountIndex = 0;
- static const int kReservedIndexCount = 1;
+ static const int kNamesTableIndex = 1;
+ static const int kReservedIndexCount = 2;
+
+ static const int kNameTableEntrySize = 2;
+ static const int kNameTableSlotIndex = 0;
+ static const int kNameTableNameIndex = 1;
// Returns number of feedback vector elements used by given slot kind.
static inline int GetSlotSize(FeedbackVectorSlotKind kind);
+ // Defines if slots of given kind require "name".
+ static inline bool SlotRequiresName(FeedbackVectorSlotKind kind);
+
bool SpecDiffersFrom(const FeedbackVectorSpec* other_spec) const;
+ bool DiffersFrom(const TypeFeedbackMetadata* other_metadata) const;
+
+ inline bool is_empty() const;
+
// Returns number of slots in the vector.
inline int slot_count() const;
// Returns slot kind for given slot.
FeedbackVectorSlotKind GetKind(FeedbackVectorSlot slot) const;
+ // Returns name for given slot.
+ String* GetName(FeedbackVectorSlot slot) const;
+
template <typename Spec>
static Handle<TypeFeedbackMetadata> New(Isolate* isolate, const Spec* spec);
@@ -155,7 +207,7 @@ class TypeFeedbackMetadata : public FixedArray {
static const char* Kind2String(FeedbackVectorSlotKind kind);
private:
- static const int kFeedbackVectorSlotKindBits = 3;
+ static const int kFeedbackVectorSlotKindBits = 4;
STATIC_ASSERT(static_cast<int>(FeedbackVectorSlotKind::KINDS_NUMBER) <
(1 << kFeedbackVectorSlotKindBits));
@@ -172,9 +224,9 @@ class TypeFeedbackMetadata : public FixedArray {
// 0: feedback metadata
// 1: ics_with_types
// 2: ics_with_generic_info
-// 3: feedback slot #0 (N >= 3)
+// 3: feedback slot #0
// ...
-// N + slot_count - 1: feedback slot #(slot_count-1)
+// 3 + slot_count - 1: feedback slot #(slot_count-1)
//
class TypeFeedbackVector : public FixedArray {
public:
@@ -194,18 +246,22 @@ class TypeFeedbackVector : public FixedArray {
inline TypeFeedbackMetadata* metadata() const;
// Conversion from a slot to an integer index to the underlying array.
- inline int GetIndex(FeedbackVectorSlot slot) const;
+ static int GetIndex(FeedbackVectorSlot slot) {
+ return kReservedIndexCount + slot.ToInt();
+ }
static int GetIndexFromSpec(const FeedbackVectorSpec* spec,
FeedbackVectorSlot slot);
// Conversion from an integer index to the underlying array to a slot.
- inline FeedbackVectorSlot ToSlot(int index) const;
+ static inline FeedbackVectorSlot ToSlot(int index);
inline Object* Get(FeedbackVectorSlot slot) const;
inline void Set(FeedbackVectorSlot slot, Object* value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Returns slot kind for given slot.
- inline FeedbackVectorSlotKind GetKind(FeedbackVectorSlot slot) const;
+ FeedbackVectorSlotKind GetKind(FeedbackVectorSlot slot) const;
+ // Returns name corresponding to given slot or an empty string.
+ String* GetName(FeedbackVectorSlot slot) const;
static Handle<TypeFeedbackVector> New(Isolate* isolate,
Handle<TypeFeedbackMetadata> metadata);
@@ -280,23 +336,17 @@ class TypeFeedbackMetadataIterator {
public:
explicit TypeFeedbackMetadataIterator(Handle<TypeFeedbackMetadata> metadata)
: metadata_handle_(metadata),
- slot_(FeedbackVectorSlot(0)),
+ next_slot_(FeedbackVectorSlot(0)),
slot_kind_(FeedbackVectorSlotKind::INVALID) {}
explicit TypeFeedbackMetadataIterator(TypeFeedbackMetadata* metadata)
: metadata_(metadata),
- slot_(FeedbackVectorSlot(0)),
+ next_slot_(FeedbackVectorSlot(0)),
slot_kind_(FeedbackVectorSlotKind::INVALID) {}
- bool HasNext() const { return slot_.ToInt() < metadata()->slot_count(); }
+ inline bool HasNext() const;
- FeedbackVectorSlot Next() {
- DCHECK(HasNext());
- FeedbackVectorSlot slot = slot_;
- slot_kind_ = metadata()->GetKind(slot);
- slot_ = FeedbackVectorSlot(slot_.ToInt() + entry_size());
- return slot;
- }
+ inline FeedbackVectorSlot Next();
// Returns slot kind of the last slot returned by Next().
FeedbackVectorSlotKind kind() const {
@@ -306,7 +356,12 @@ class TypeFeedbackMetadataIterator {
}
// Returns entry size of the last slot returned by Next().
- int entry_size() const { return TypeFeedbackMetadata::GetSlotSize(kind()); }
+ inline int entry_size() const;
+
+ String* name() const {
+ DCHECK(TypeFeedbackMetadata::SlotRequiresName(kind()));
+ return metadata()->GetName(cur_slot_);
+ }
private:
TypeFeedbackMetadata* metadata() const {
@@ -318,7 +373,8 @@ class TypeFeedbackMetadataIterator {
// pointer use cases.
Handle<TypeFeedbackMetadata> metadata_handle_;
TypeFeedbackMetadata* metadata_;
- FeedbackVectorSlot slot_;
+ FeedbackVectorSlot cur_slot_;
+ FeedbackVectorSlot next_slot_;
FeedbackVectorSlotKind slot_kind_;
};
@@ -356,8 +412,9 @@ class FeedbackNexus {
virtual InlineCacheState StateFromFeedback() const = 0;
virtual int ExtractMaps(MapHandleList* maps) const;
- virtual MaybeHandle<Code> FindHandlerForMap(Handle<Map> map) const;
- virtual bool FindHandlers(CodeHandleList* code_list, int length = -1) const;
+ virtual MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const;
+ virtual bool FindHandlers(List<Handle<Object>>* code_list,
+ int length = -1) const;
virtual Name* FindFirstName() const { return NULL; }
virtual void ConfigureUninitialized();
@@ -378,7 +435,7 @@ class FeedbackNexus {
Handle<FixedArray> EnsureArrayOfSize(int length);
Handle<FixedArray> EnsureExtraArrayOfSize(int length);
void InstallHandlers(Handle<FixedArray> array, MapHandleList* maps,
- CodeHandleList* handlers);
+ List<Handle<Object>>* handlers);
private:
// The reason for having a vector handle and a raw pointer is that we can and
@@ -393,10 +450,6 @@ class FeedbackNexus {
class CallICNexus final : public FeedbackNexus {
public:
- // Monomorphic call ics store call counts. Platform code needs to increment
- // the count appropriately (ie, by 2).
- static const int kCallCountIncrement = 2;
-
CallICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
: FeedbackNexus(vector, slot) {
DCHECK_EQ(FeedbackVectorSlotKind::CALL_IC, vector->GetKind(slot));
@@ -419,10 +472,11 @@ class CallICNexus final : public FeedbackNexus {
// CallICs don't record map feedback.
return 0;
}
- MaybeHandle<Code> FindHandlerForMap(Handle<Map> map) const final {
+ MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
return MaybeHandle<Code>();
}
- bool FindHandlers(CodeHandleList* code_list, int length = -1) const final {
+ bool FindHandlers(List<Handle<Object>>* code_list,
+ int length = -1) const final {
return length == 0;
}
@@ -447,13 +501,46 @@ class LoadICNexus : public FeedbackNexus {
void Clear(Code* host);
- void ConfigureMonomorphic(Handle<Map> receiver_map, Handle<Code> handler);
+ void ConfigureMonomorphic(Handle<Map> receiver_map, Handle<Object> handler);
- void ConfigurePolymorphic(MapHandleList* maps, CodeHandleList* handlers);
+ void ConfigurePolymorphic(MapHandleList* maps,
+ List<Handle<Object>>* handlers);
InlineCacheState StateFromFeedback() const override;
};
+class LoadGlobalICNexus : public FeedbackNexus {
+ public:
+ LoadGlobalICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
+ : FeedbackNexus(vector, slot) {
+ DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC, vector->GetKind(slot));
+ }
+ LoadGlobalICNexus(TypeFeedbackVector* vector, FeedbackVectorSlot slot)
+ : FeedbackNexus(vector, slot) {
+ DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC, vector->GetKind(slot));
+ }
+
+ int ExtractMaps(MapHandleList* maps) const final {
+ // LoadGlobalICs don't record map feedback.
+ return 0;
+ }
+ MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
+ return MaybeHandle<Code>();
+ }
+ bool FindHandlers(List<Handle<Object>>* code_list,
+ int length = -1) const final {
+ return length == 0;
+ }
+
+ void ConfigureMegamorphic() override { UNREACHABLE(); }
+ void Clear(Code* host);
+
+ void ConfigureUninitialized() override;
+ void ConfigurePropertyCellMode(Handle<PropertyCell> cell);
+ void ConfigureHandlerMode(Handle<Code> handler);
+
+ InlineCacheState StateFromFeedback() const override;
+};
class KeyedLoadICNexus : public FeedbackNexus {
public:
@@ -470,10 +557,10 @@ class KeyedLoadICNexus : public FeedbackNexus {
// name can be a null handle for element loads.
void ConfigureMonomorphic(Handle<Name> name, Handle<Map> receiver_map,
- Handle<Code> handler);
+ Handle<Object> handler);
// name can be null.
void ConfigurePolymorphic(Handle<Name> name, MapHandleList* maps,
- CodeHandleList* handlers);
+ List<Handle<Object>>* handlers);
void ConfigureMegamorphicKeyed(IcCheckType property_type);
@@ -502,7 +589,8 @@ class StoreICNexus : public FeedbackNexus {
void ConfigureMonomorphic(Handle<Map> receiver_map, Handle<Code> handler);
- void ConfigurePolymorphic(MapHandleList* maps, CodeHandleList* handlers);
+ void ConfigurePolymorphic(MapHandleList* maps,
+ List<Handle<Object>>* handlers);
InlineCacheState StateFromFeedback() const override;
};
@@ -530,7 +618,7 @@ class KeyedStoreICNexus : public FeedbackNexus {
Handle<Code> handler);
// name can be null.
void ConfigurePolymorphic(Handle<Name> name, MapHandleList* maps,
- CodeHandleList* handlers);
+ List<Handle<Object>>* handlers);
void ConfigurePolymorphic(MapHandleList* maps,
MapHandleList* transitioned_maps,
CodeHandleList* handlers);
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index 9087576f01..8289d91125 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -20,7 +20,7 @@ TypeFeedbackOracle::TypeFeedbackOracle(
Handle<TypeFeedbackVector> feedback_vector, Handle<Context> native_context)
: native_context_(native_context), isolate_(isolate), zone_(zone) {
BuildDictionary(code);
- DCHECK(dictionary_->IsDictionary());
+ DCHECK(dictionary_->IsUnseededNumberDictionary());
// We make a copy of the feedback vector because a GC could clear
// the type feedback info contained therein.
// TODO(mvstanton): revisit the decision to copy when we weakly
@@ -108,7 +108,7 @@ bool TypeFeedbackOracle::StoreIsUninitialized(FeedbackVectorSlot slot) {
bool TypeFeedbackOracle::CallIsUninitialized(FeedbackVectorSlot slot) {
Handle<Object> value = GetInfo(slot);
- return value->IsUndefined() ||
+ return value->IsUndefined(isolate()) ||
value.is_identical_to(
TypeFeedbackVector::UninitializedSentinel(isolate()));
}
@@ -280,8 +280,8 @@ void TypeFeedbackOracle::PropertyReceiverTypes(FeedbackVectorSlot slot,
receiver_types->Clear();
if (!slot.IsInvalid()) {
LoadICNexus nexus(feedback_vector_, slot);
- Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
- CollectReceiverTypes(&nexus, name, flags, receiver_types);
+ CollectReceiverTypes(isolate()->load_stub_cache(), &nexus, name,
+ receiver_types);
}
}
@@ -295,7 +295,7 @@ void TypeFeedbackOracle::KeyedPropertyReceiverTypes(
*key_type = ELEMENT;
} else {
KeyedLoadICNexus nexus(feedback_vector_, slot);
- CollectReceiverTypes<FeedbackNexus>(&nexus, receiver_types);
+ CollectReceiverTypes(&nexus, receiver_types);
*is_string = HasOnlyStringMaps(receiver_types);
*key_type = nexus.GetKeyType();
}
@@ -306,8 +306,8 @@ void TypeFeedbackOracle::AssignmentReceiverTypes(FeedbackVectorSlot slot,
Handle<Name> name,
SmallMapList* receiver_types) {
receiver_types->Clear();
- Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
- CollectReceiverTypes(slot, name, flags, receiver_types);
+ CollectReceiverTypes(isolate()->store_stub_cache(), slot, name,
+ receiver_types);
}
@@ -326,27 +326,24 @@ void TypeFeedbackOracle::CountReceiverTypes(FeedbackVectorSlot slot,
if (!slot.IsInvalid()) CollectReceiverTypes(slot, receiver_types);
}
-
-void TypeFeedbackOracle::CollectReceiverTypes(FeedbackVectorSlot slot,
+void TypeFeedbackOracle::CollectReceiverTypes(StubCache* stub_cache,
+ FeedbackVectorSlot slot,
Handle<Name> name,
- Code::Flags flags,
SmallMapList* types) {
StoreICNexus nexus(feedback_vector_, slot);
- CollectReceiverTypes<FeedbackNexus>(&nexus, name, flags, types);
+ CollectReceiverTypes(stub_cache, &nexus, name, types);
}
-
-template <class T>
-void TypeFeedbackOracle::CollectReceiverTypes(T* obj, Handle<Name> name,
- Code::Flags flags,
+void TypeFeedbackOracle::CollectReceiverTypes(StubCache* stub_cache,
+ FeedbackNexus* nexus,
+ Handle<Name> name,
SmallMapList* types) {
if (FLAG_collect_megamorphic_maps_from_stub_cache &&
- obj->ic_state() == MEGAMORPHIC) {
+ nexus->ic_state() == MEGAMORPHIC) {
types->Reserve(4, zone());
- isolate()->stub_cache()->CollectMatchingMaps(
- types, name, flags, native_context_, zone());
+ stub_cache->CollectMatchingMaps(types, name, native_context_, zone());
} else {
- CollectReceiverTypes<T>(obj, types);
+ CollectReceiverTypes(nexus, types);
}
}
@@ -356,23 +353,22 @@ void TypeFeedbackOracle::CollectReceiverTypes(FeedbackVectorSlot slot,
FeedbackVectorSlotKind kind = feedback_vector_->GetKind(slot);
if (kind == FeedbackVectorSlotKind::STORE_IC) {
StoreICNexus nexus(feedback_vector_, slot);
- CollectReceiverTypes<FeedbackNexus>(&nexus, types);
+ CollectReceiverTypes(&nexus, types);
} else {
DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC, kind);
KeyedStoreICNexus nexus(feedback_vector_, slot);
- CollectReceiverTypes<FeedbackNexus>(&nexus, types);
+ CollectReceiverTypes(&nexus, types);
}
}
-
-template <class T>
-void TypeFeedbackOracle::CollectReceiverTypes(T* obj, SmallMapList* types) {
+void TypeFeedbackOracle::CollectReceiverTypes(FeedbackNexus* nexus,
+ SmallMapList* types) {
MapHandleList maps;
- if (obj->ic_state() == MONOMORPHIC) {
- Map* map = obj->FindFirstMap();
+ if (nexus->ic_state() == MONOMORPHIC) {
+ Map* map = nexus->FindFirstMap();
if (map != NULL) maps.Add(handle(map));
- } else if (obj->ic_state() == POLYMORPHIC) {
- obj->FindAllMaps(&maps);
+ } else if (nexus->ic_state() == POLYMORPHIC) {
+ nexus->FindAllMaps(&maps);
} else {
return;
}
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index c4b0928fc4..4e8dc54d02 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -17,7 +17,8 @@ namespace internal {
// Forward declarations.
class SmallMapList;
-
+class FeedbackNexus;
+class StubCache;
class TypeFeedbackOracle: public ZoneObject {
public:
@@ -56,8 +57,7 @@ class TypeFeedbackOracle: public ZoneObject {
SmallMapList* receiver_types);
void CollectReceiverTypes(FeedbackVectorSlot slot, SmallMapList* types);
- template <class T>
- void CollectReceiverTypes(T* obj, SmallMapList* types);
+ void CollectReceiverTypes(FeedbackNexus* nexus, SmallMapList* types);
static bool IsRelevantFeedback(Map* map, Context* native_context) {
Object* constructor = map->GetConstructor();
@@ -96,11 +96,10 @@ class TypeFeedbackOracle: public ZoneObject {
Isolate* isolate() const { return isolate_; }
private:
- void CollectReceiverTypes(FeedbackVectorSlot slot, Handle<Name> name,
- Code::Flags flags, SmallMapList* types);
- template <class T>
- void CollectReceiverTypes(T* obj, Handle<Name> name, Code::Flags flags,
- SmallMapList* types);
+ void CollectReceiverTypes(StubCache* stub_cache, FeedbackVectorSlot slot,
+ Handle<Name> name, SmallMapList* types);
+ void CollectReceiverTypes(StubCache* stub_cache, FeedbackNexus* nexus,
+ Handle<Name> name, SmallMapList* types);
// Returns true if there is at least one string map and if
// all maps are string maps.
diff --git a/deps/v8/src/types.cc b/deps/v8/src/types.cc
index 49c941816e..c978dac5c2 100644
--- a/deps/v8/src/types.cc
+++ b/deps/v8/src/types.cc
@@ -147,10 +147,10 @@ Type::bitset BitsetType::Lub(Type* type) {
if (type->IsClass()) return type->AsClass()->Lub();
if (type->IsConstant()) return type->AsConstant()->Lub();
if (type->IsRange()) return type->AsRange()->Lub();
- if (type->IsContext()) return kInternal & kTaggedPointer;
+ if (type->IsContext()) return kOtherInternal & kTaggedPointer;
if (type->IsArray()) return kOtherObject;
if (type->IsFunction()) return kFunction;
- if (type->IsTuple()) return kInternal;
+ if (type->IsTuple()) return kOtherInternal;
UNREACHABLE();
return kNone;
}
@@ -187,21 +187,25 @@ Type::bitset BitsetType::Lub(i::Map* map) {
if (map == heap->undefined_map()) return kUndefined;
if (map == heap->null_map()) return kNull;
if (map == heap->boolean_map()) return kBoolean;
- DCHECK(map == heap->the_hole_map() ||
- map == heap->uninitialized_map() ||
+ if (map == heap->the_hole_map()) return kHole;
+ DCHECK(map == heap->uninitialized_map() ||
map == heap->no_interceptor_result_sentinel_map() ||
map == heap->termination_exception_map() ||
map == heap->arguments_marker_map() ||
- map == heap->optimized_out_map());
- return kInternal & kTaggedPointer;
+ map == heap->optimized_out_map() ||
+ map == heap->stale_register_map());
+ return kOtherInternal & kTaggedPointer;
}
case HEAP_NUMBER_TYPE:
return kNumber & kTaggedPointer;
case SIMD128_VALUE_TYPE:
return kSimd;
case JS_OBJECT_TYPE:
+ case JS_ARGUMENTS_TYPE:
+ case JS_ERROR_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
case JS_GLOBAL_PROXY_TYPE:
+ case JS_API_OBJECT_TYPE:
case JS_SPECIAL_API_OBJECT_TYPE:
if (map->is_undetectable()) return kOtherUndetectable;
return kOtherObject;
@@ -246,10 +250,10 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case SCRIPT_TYPE:
case CODE_TYPE:
case PROPERTY_CELL_TYPE:
- return kInternal & kTaggedPointer;
+ return kOtherInternal & kTaggedPointer;
// Remaining instance types are unsupported for now. If any of them do
- // require bit set types, they should get kInternal & kTaggedPointer.
+ // require bit set types, they should get kOtherInternal & kTaggedPointer.
case MUTABLE_HEAP_NUMBER_TYPE:
case FREE_SPACE_TYPE:
#define FIXED_TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
@@ -266,8 +270,6 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case SIGNATURE_INFO_TYPE:
case TYPE_SWITCH_INFO_TYPE:
case ALLOCATION_MEMENTO_TYPE:
- case CODE_CACHE_TYPE:
- case POLYMORPHIC_CODE_CACHE_TYPE:
case TYPE_FEEDBACK_INFO_TYPE:
case ALIASED_ARGUMENTS_ENTRY_TYPE:
case BOX_TYPE:
diff --git a/deps/v8/src/types.h b/deps/v8/src/types.h
index 8061410429..746cca764e 100644
--- a/deps/v8/src/types.h
+++ b/deps/v8/src/types.h
@@ -199,38 +199,49 @@ namespace internal {
V(OtherUndetectable, 1u << 16 | REPRESENTATION(kTaggedPointer)) \
V(Proxy, 1u << 18 | REPRESENTATION(kTaggedPointer)) \
V(Function, 1u << 19 | REPRESENTATION(kTaggedPointer)) \
- V(Internal, 1u << 20 | REPRESENTATION(kTagged | kUntagged)) \
+ V(Hole, 1u << 20 | REPRESENTATION(kTaggedPointer)) \
+ V(OtherInternal, 1u << 21 | REPRESENTATION(kTagged | kUntagged)) \
\
- V(Signed31, kUnsigned30 | kNegative31) \
- V(Signed32, kSigned31 | kOtherUnsigned31 | kOtherSigned32) \
- V(Negative32, kNegative31 | kOtherSigned32) \
- V(Unsigned31, kUnsigned30 | kOtherUnsigned31) \
- V(Unsigned32, kUnsigned30 | kOtherUnsigned31 | \
- kOtherUnsigned32) \
- V(Integral32, kSigned32 | kUnsigned32) \
- V(PlainNumber, kIntegral32 | kOtherNumber) \
- V(OrderedNumber, kPlainNumber | kMinusZero) \
- V(MinusZeroOrNaN, kMinusZero | kNaN) \
- V(Number, kOrderedNumber | kNaN) \
- V(String, kInternalizedString | kOtherString) \
- V(UniqueName, kSymbol | kInternalizedString) \
- V(Name, kSymbol | kString) \
- V(BooleanOrNumber, kBoolean | kNumber) \
- V(BooleanOrNullOrUndefined, kBoolean | kNull | kUndefined) \
- V(NullOrUndefined, kNull | kUndefined) \
- V(Undetectable, kNullOrUndefined | kOtherUndetectable) \
- V(NumberOrString, kNumber | kString) \
- V(NumberOrUndefined, kNumber | kUndefined) \
- V(PlainPrimitive, kNumberOrString | kBoolean | kNullOrUndefined) \
- V(Primitive, kSymbol | kSimd | kPlainPrimitive) \
- V(DetectableReceiver, kFunction | kOtherObject | kProxy) \
- V(Object, kFunction | kOtherObject | kOtherUndetectable) \
- V(Receiver, kObject | kProxy) \
- V(StringOrReceiver, kString | kReceiver) \
- V(Unique, kBoolean | kUniqueName | kNull | kUndefined | \
- kReceiver) \
- V(NonNumber, kUnique | kString | kInternal) \
- V(Any, 0xfffffffeu)
+ V(Signed31, kUnsigned30 | kNegative31) \
+ V(Signed32, kSigned31 | kOtherUnsigned31 | kOtherSigned32) \
+ V(Signed32OrMinusZero, kSigned32 | kMinusZero) \
+ V(Signed32OrMinusZeroOrNaN, kSigned32 | kMinusZero | kNaN) \
+ V(Negative32, kNegative31 | kOtherSigned32) \
+ V(Unsigned31, kUnsigned30 | kOtherUnsigned31) \
+ V(Unsigned32, kUnsigned30 | kOtherUnsigned31 | \
+ kOtherUnsigned32) \
+ V(Unsigned32OrMinusZero, kUnsigned32 | kMinusZero) \
+ V(Unsigned32OrMinusZeroOrNaN, kUnsigned32 | kMinusZero | kNaN) \
+ V(Integral32, kSigned32 | kUnsigned32) \
+ V(PlainNumber, kIntegral32 | kOtherNumber) \
+ V(OrderedNumber, kPlainNumber | kMinusZero) \
+ V(MinusZeroOrNaN, kMinusZero | kNaN) \
+ V(Number, kOrderedNumber | kNaN) \
+ V(String, kInternalizedString | kOtherString) \
+ V(UniqueName, kSymbol | kInternalizedString) \
+ V(Name, kSymbol | kString) \
+ V(BooleanOrNumber, kBoolean | kNumber) \
+ V(BooleanOrNullOrNumber, kBooleanOrNumber | kNull) \
+ V(BooleanOrNullOrUndefined, kBoolean | kNull | kUndefined) \
+ V(NullOrNumber, kNull | kNumber) \
+ V(NullOrUndefined, kNull | kUndefined) \
+ V(Undetectable, kNullOrUndefined | kOtherUndetectable) \
+ V(NumberOrOddball, kNumber | kNullOrUndefined | kBoolean | kHole) \
+ V(NumberOrSimdOrString, kNumber | kSimd | kString) \
+ V(NumberOrString, kNumber | kString) \
+ V(NumberOrUndefined, kNumber | kUndefined) \
+ V(PlainPrimitive, kNumberOrString | kBoolean | kNullOrUndefined) \
+ V(Primitive, kSymbol | kSimd | kPlainPrimitive) \
+ V(DetectableReceiver, kFunction | kOtherObject | kProxy) \
+ V(Object, kFunction | kOtherObject | kOtherUndetectable) \
+ V(Receiver, kObject | kProxy) \
+ V(StringOrReceiver, kString | kReceiver) \
+ V(Unique, kBoolean | kUniqueName | kNull | kUndefined | \
+ kReceiver) \
+ V(Internal, kHole | kOtherInternal) \
+ V(NonInternal, kPrimitive | kReceiver) \
+ V(NonNumber, kUnique | kString | kInternal) \
+ V(Any, 0xfffffffeu)
// clang-format on
@@ -740,8 +751,8 @@ class Type {
SIMD128_TYPES(CONSTRUCT_SIMD_TYPE)
#undef CONSTRUCT_SIMD_TYPE
- static Type* Union(Type* type1, Type* type2, Zone* reg);
- static Type* Intersect(Type* type1, Type* type2, Zone* reg);
+ static Type* Union(Type* type1, Type* type2, Zone* zone);
+ static Type* Intersect(Type* type1, Type* type2, Zone* zone);
static Type* Of(double value, Zone* zone) {
return BitsetType::New(BitsetType::ExpandInternals(BitsetType::Lub(value)));
diff --git a/deps/v8/src/typing-asm.cc b/deps/v8/src/typing-asm.cc
deleted file mode 100644
index 7482c4f651..0000000000
--- a/deps/v8/src/typing-asm.cc
+++ /dev/null
@@ -1,1622 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/typing-asm.h"
-
-#include <limits>
-
-#include "src/v8.h"
-
-#include "src/ast/ast.h"
-#include "src/ast/scopes.h"
-#include "src/codegen.h"
-#include "src/type-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define FAIL(node, msg) \
- do { \
- valid_ = false; \
- int line = node->position() == RelocInfo::kNoPosition \
- ? -1 \
- : script_->GetLineNumber(node->position()); \
- base::OS::SNPrintF(error_message_, sizeof(error_message_), \
- "asm: line %d: %s\n", line + 1, msg); \
- return; \
- } while (false)
-
-
-#define RECURSE(call) \
- do { \
- DCHECK(!HasStackOverflow()); \
- call; \
- if (HasStackOverflow()) return; \
- if (!valid_) return; \
- } while (false)
-
-AsmTyper::AsmTyper(Isolate* isolate, Zone* zone, Script* script,
- FunctionLiteral* root)
- : zone_(zone),
- isolate_(isolate),
- script_(script),
- root_(root),
- valid_(true),
- allow_simd_(false),
- property_info_(NULL),
- intish_(0),
- stdlib_types_(zone),
- stdlib_heap_types_(zone),
- stdlib_math_types_(zone),
-#define V(NAME, Name, name, lane_count, lane_type) \
- stdlib_simd_##name##_types_(zone),
- SIMD128_TYPES(V)
-#undef V
- global_variable_type_(HashMap::PointersMatch,
- ZoneHashMap::kDefaultHashMapCapacity,
- ZoneAllocationPolicy(zone)),
- local_variable_type_(HashMap::PointersMatch,
- ZoneHashMap::kDefaultHashMapCapacity,
- ZoneAllocationPolicy(zone)),
- in_function_(false),
- building_function_tables_(false),
- visiting_exports_(false),
- cache_(TypeCache::Get()) {
- InitializeAstVisitor(isolate);
- InitializeStdlib();
-}
-
-
-bool AsmTyper::Validate() {
- VisitAsmModule(root_);
- return valid_ && !HasStackOverflow();
-}
-
-
-void AsmTyper::VisitAsmModule(FunctionLiteral* fun) {
- Scope* scope = fun->scope();
- if (!scope->is_function_scope()) FAIL(fun, "not at function scope");
-
- ExpressionStatement* use_asm = fun->body()->first()->AsExpressionStatement();
- if (use_asm == NULL) FAIL(fun, "missing \"use asm\"");
- Literal* use_asm_literal = use_asm->expression()->AsLiteral();
- if (use_asm_literal == NULL) FAIL(fun, "missing \"use asm\"");
- if (!use_asm_literal->raw_value()->AsString()->IsOneByteEqualTo("use asm"))
- FAIL(fun, "missing \"use asm\"");
-
- // Module parameters.
- for (int i = 0; i < scope->num_parameters(); ++i) {
- Variable* param = scope->parameter(i);
- DCHECK(GetType(param) == NULL);
- SetType(param, Type::None());
- }
-
- ZoneList<Declaration*>* decls = scope->declarations();
-
- // Set all globals to type Any.
- VariableDeclaration* decl = scope->function();
- if (decl != NULL) SetType(decl->proxy()->var(), Type::None());
- RECURSE(VisitDeclarations(scope->declarations()));
-
- // Validate global variables.
- RECURSE(VisitStatements(fun->body()));
-
- // Validate function annotations.
- for (int i = 0; i < decls->length(); ++i) {
- FunctionDeclaration* decl = decls->at(i)->AsFunctionDeclaration();
- if (decl != NULL) {
- RECURSE(VisitFunctionAnnotation(decl->fun()));
- Variable* var = decl->proxy()->var();
- if (property_info_ != NULL) {
- SetVariableInfo(var, property_info_);
- property_info_ = NULL;
- }
- SetType(var, computed_type_);
- DCHECK(GetType(var) != NULL);
- }
- }
-
- // Build function tables.
- building_function_tables_ = true;
- RECURSE(VisitStatements(fun->body()));
- building_function_tables_ = false;
-
- // Validate function bodies.
- for (int i = 0; i < decls->length(); ++i) {
- FunctionDeclaration* decl = decls->at(i)->AsFunctionDeclaration();
- if (decl != NULL) {
- RECURSE(VisitWithExpectation(decl->fun(), Type::Any(), "UNREACHABLE"));
- if (!computed_type_->IsFunction()) {
- FAIL(decl->fun(), "function literal expected to be a function");
- }
- }
- }
-
- // Validate exports.
- visiting_exports_ = true;
- ReturnStatement* stmt = fun->body()->last()->AsReturnStatement();
- if (stmt == nullptr) {
- FAIL(fun->body()->last(), "last statement in module is not a return");
- }
- RECURSE(VisitWithExpectation(stmt->expression(), Type::Object(),
- "expected object export"));
-}
-
-
-void AsmTyper::VisitVariableDeclaration(VariableDeclaration* decl) {
- Variable* var = decl->proxy()->var();
- if (var->location() != VariableLocation::PARAMETER) {
- if (GetType(var) == NULL) {
- SetType(var, Type::Any());
- } else {
- DCHECK(!GetType(var)->IsFunction());
- }
- }
- DCHECK(GetType(var) != NULL);
- intish_ = 0;
-}
-
-
-void AsmTyper::VisitFunctionDeclaration(FunctionDeclaration* decl) {
- if (in_function_) {
- FAIL(decl, "function declared inside another");
- }
- // Set function type so global references to functions have some type
- // (so they can give a more useful error).
- Variable* var = decl->proxy()->var();
- SetType(var, Type::Function());
-}
-
-
-void AsmTyper::VisitFunctionAnnotation(FunctionLiteral* fun) {
- // Extract result type.
- ZoneList<Statement*>* body = fun->body();
- Type* result_type = Type::Undefined();
- if (body->length() > 0) {
- ReturnStatement* stmt = body->last()->AsReturnStatement();
- if (stmt != NULL) {
- Literal* literal = stmt->expression()->AsLiteral();
- Type* old_expected = expected_type_;
- expected_type_ = Type::Any();
- if (literal) {
- RECURSE(VisitLiteral(literal, true));
- } else {
- RECURSE(VisitExpressionAnnotation(stmt->expression(), NULL, true));
- }
- expected_type_ = old_expected;
- result_type = computed_type_;
- }
- }
- Type* type =
- Type::Function(result_type, Type::Any(), fun->parameter_count(), zone());
-
- // Extract parameter types.
- bool good = true;
- for (int i = 0; i < fun->parameter_count(); ++i) {
- good = false;
- if (i >= body->length()) break;
- ExpressionStatement* stmt = body->at(i)->AsExpressionStatement();
- if (stmt == NULL) break;
- Assignment* expr = stmt->expression()->AsAssignment();
- if (expr == NULL || expr->is_compound()) break;
- VariableProxy* proxy = expr->target()->AsVariableProxy();
- if (proxy == NULL) break;
- Variable* var = proxy->var();
- if (var->location() != VariableLocation::PARAMETER || var->index() != i)
- break;
- RECURSE(VisitExpressionAnnotation(expr->value(), var, false));
- if (property_info_ != NULL) {
- SetVariableInfo(var, property_info_);
- property_info_ = NULL;
- }
- SetType(var, computed_type_);
- type->AsFunction()->InitParameter(i, computed_type_);
- good = true;
- }
- if (!good) FAIL(fun, "missing parameter type annotations");
-
- SetResult(fun, type);
-}
-
-
-void AsmTyper::VisitExpressionAnnotation(Expression* expr, Variable* var,
- bool is_return) {
- // Normal +x or x|0 annotations.
- BinaryOperation* bin = expr->AsBinaryOperation();
- if (bin != NULL) {
- if (var != NULL) {
- VariableProxy* proxy = bin->left()->AsVariableProxy();
- if (proxy == NULL) {
- FAIL(bin->left(), "expected variable for type annotation");
- }
- if (proxy->var() != var) {
- FAIL(proxy, "annotation source doesn't match destination");
- }
- }
- Literal* right = bin->right()->AsLiteral();
- if (right != NULL) {
- switch (bin->op()) {
- case Token::MUL: // We encode +x as x*1.0
- if (right->raw_value()->ContainsDot() &&
- right->raw_value()->AsNumber() == 1.0) {
- SetResult(expr, cache_.kAsmDouble);
- return;
- }
- break;
- case Token::BIT_OR:
- if (!right->raw_value()->ContainsDot() &&
- right->raw_value()->AsNumber() == 0.0) {
- if (is_return) {
- SetResult(expr, cache_.kAsmSigned);
- } else {
- SetResult(expr, cache_.kAsmInt);
- }
- return;
- }
- break;
- default:
- break;
- }
- }
- FAIL(expr, "invalid type annotation on binary op");
- }
-
- // Numbers or the undefined literal (for empty returns).
- if (expr->IsLiteral()) {
- RECURSE(VisitWithExpectation(expr, Type::Any(), "invalid literal"));
- return;
- }
-
- Call* call = expr->AsCall();
- if (call != NULL) {
- VariableProxy* proxy = call->expression()->AsVariableProxy();
- if (proxy != NULL) {
- VariableInfo* info = GetVariableInfo(proxy->var(), false);
- if (!info ||
- (!info->is_check_function && !info->is_constructor_function)) {
- if (allow_simd_) {
- FAIL(call->expression(),
- "only fround/SIMD.checks allowed on expression annotations");
- } else {
- FAIL(call->expression(),
- "only fround allowed on expression annotations");
- }
- }
- Type* type = info->type;
- DCHECK(type->IsFunction());
- if (info->is_check_function) {
- DCHECK(type->AsFunction()->Arity() == 1);
- }
- if (call->arguments()->length() != type->AsFunction()->Arity()) {
- FAIL(call, "invalid argument count calling function");
- }
- SetResult(expr, type->AsFunction()->Result());
- return;
- }
- }
-
- FAIL(expr, "invalid type annotation");
-}
-
-
-void AsmTyper::VisitStatements(ZoneList<Statement*>* stmts) {
- for (int i = 0; i < stmts->length(); ++i) {
- Statement* stmt = stmts->at(i);
- RECURSE(Visit(stmt));
- }
-}
-
-
-void AsmTyper::VisitBlock(Block* stmt) {
- RECURSE(VisitStatements(stmt->statements()));
-}
-
-
-void AsmTyper::VisitExpressionStatement(ExpressionStatement* stmt) {
- RECURSE(VisitWithExpectation(stmt->expression(), Type::Any(),
- "expression statement expected to be any"));
-}
-
-
-void AsmTyper::VisitEmptyStatement(EmptyStatement* stmt) {}
-
-
-void AsmTyper::VisitSloppyBlockFunctionStatement(
- SloppyBlockFunctionStatement* stmt) {
- Visit(stmt->statement());
-}
-
-
-void AsmTyper::VisitEmptyParentheses(EmptyParentheses* expr) { UNREACHABLE(); }
-
-
-void AsmTyper::VisitIfStatement(IfStatement* stmt) {
- if (!in_function_) {
- FAIL(stmt, "if statement inside module body");
- }
- RECURSE(VisitWithExpectation(stmt->condition(), cache_.kAsmSigned,
- "if condition expected to be integer"));
- RECURSE(Visit(stmt->then_statement()));
- RECURSE(Visit(stmt->else_statement()));
-}
-
-
-void AsmTyper::VisitContinueStatement(ContinueStatement* stmt) {
- if (!in_function_) {
- FAIL(stmt, "continue statement inside module body");
- }
-}
-
-
-void AsmTyper::VisitBreakStatement(BreakStatement* stmt) {
- if (!in_function_) {
- FAIL(stmt, "continue statement inside module body");
- }
-}
-
-
-void AsmTyper::VisitReturnStatement(ReturnStatement* stmt) {
- // Handle module return statement in VisitAsmModule.
- if (!in_function_) {
- return;
- }
- Literal* literal = stmt->expression()->AsLiteral();
- if (literal) {
- VisitLiteral(literal, true);
- } else {
- RECURSE(
- VisitWithExpectation(stmt->expression(), Type::Any(),
- "return expression expected to have return type"));
- }
- if (!computed_type_->Is(return_type_) || !return_type_->Is(computed_type_)) {
- FAIL(stmt->expression(), "return type does not match function signature");
- }
-}
-
-
-void AsmTyper::VisitWithStatement(WithStatement* stmt) {
- FAIL(stmt, "bad with statement");
-}
-
-
-void AsmTyper::VisitSwitchStatement(SwitchStatement* stmt) {
- if (!in_function_) {
- FAIL(stmt, "switch statement inside module body");
- }
- RECURSE(VisitWithExpectation(stmt->tag(), cache_.kAsmSigned,
- "switch expression non-integer"));
- ZoneList<CaseClause*>* clauses = stmt->cases();
- ZoneSet<int32_t> cases(zone());
- for (int i = 0; i < clauses->length(); ++i) {
- CaseClause* clause = clauses->at(i);
- if (clause->is_default()) {
- if (i != clauses->length() - 1) {
- FAIL(clause, "default case out of order");
- }
- } else {
- Expression* label = clause->label();
- RECURSE(VisitWithExpectation(label, cache_.kAsmSigned,
- "case label non-integer"));
- if (!label->IsLiteral()) FAIL(label, "non-literal case label");
- Handle<Object> value = label->AsLiteral()->value();
- int32_t value32;
- if (!value->ToInt32(&value32)) FAIL(label, "illegal case label value");
- if (cases.find(value32) != cases.end()) {
- FAIL(label, "duplicate case value");
- }
- cases.insert(value32);
- }
- // TODO(bradnelson): Detect duplicates.
- ZoneList<Statement*>* stmts = clause->statements();
- RECURSE(VisitStatements(stmts));
- }
- if (cases.size() > 0) {
- int64_t min_case = *cases.begin();
- int64_t max_case = *cases.rbegin();
- if (max_case - min_case > std::numeric_limits<int32_t>::max()) {
- FAIL(stmt, "case range too large");
- }
- }
-}
-
-
-void AsmTyper::VisitCaseClause(CaseClause* clause) { UNREACHABLE(); }
-
-
-void AsmTyper::VisitDoWhileStatement(DoWhileStatement* stmt) {
- if (!in_function_) {
- FAIL(stmt, "do statement inside module body");
- }
- RECURSE(Visit(stmt->body()));
- RECURSE(VisitWithExpectation(stmt->cond(), cache_.kAsmSigned,
- "do condition expected to be integer"));
-}
-
-
-void AsmTyper::VisitWhileStatement(WhileStatement* stmt) {
- if (!in_function_) {
- FAIL(stmt, "while statement inside module body");
- }
- RECURSE(VisitWithExpectation(stmt->cond(), cache_.kAsmSigned,
- "while condition expected to be integer"));
- RECURSE(Visit(stmt->body()));
-}
-
-
-void AsmTyper::VisitForStatement(ForStatement* stmt) {
- if (!in_function_) {
- FAIL(stmt, "for statement inside module body");
- }
- if (stmt->init() != NULL) {
- RECURSE(Visit(stmt->init()));
- }
- if (stmt->cond() != NULL) {
- RECURSE(VisitWithExpectation(stmt->cond(), cache_.kAsmSigned,
- "for condition expected to be integer"));
- }
- if (stmt->next() != NULL) {
- RECURSE(Visit(stmt->next()));
- }
- RECURSE(Visit(stmt->body()));
-}
-
-
-void AsmTyper::VisitForInStatement(ForInStatement* stmt) {
- FAIL(stmt, "for-in statement encountered");
-}
-
-
-void AsmTyper::VisitForOfStatement(ForOfStatement* stmt) {
- FAIL(stmt, "for-of statement encountered");
-}
-
-
-void AsmTyper::VisitTryCatchStatement(TryCatchStatement* stmt) {
- FAIL(stmt, "try statement encountered");
-}
-
-
-void AsmTyper::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- FAIL(stmt, "try statement encountered");
-}
-
-
-void AsmTyper::VisitDebuggerStatement(DebuggerStatement* stmt) {
- FAIL(stmt, "debugger statement encountered");
-}
-
-
-void AsmTyper::VisitFunctionLiteral(FunctionLiteral* expr) {
- if (in_function_) {
- FAIL(expr, "invalid nested function");
- }
- Scope* scope = expr->scope();
- DCHECK(scope->is_function_scope());
-
- if (!expr->bounds().upper->IsFunction()) {
- FAIL(expr, "invalid function literal");
- }
-
- Type* type = expr->bounds().upper;
- Type* save_return_type = return_type_;
- return_type_ = type->AsFunction()->Result();
- in_function_ = true;
- local_variable_type_.Clear();
- RECURSE(VisitDeclarations(scope->declarations()));
- RECURSE(VisitStatements(expr->body()));
- in_function_ = false;
- return_type_ = save_return_type;
- IntersectResult(expr, type);
-}
-
-
-void AsmTyper::VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
- FAIL(expr, "function info literal encountered");
-}
-
-
-void AsmTyper::VisitDoExpression(DoExpression* expr) {
- FAIL(expr, "do-expression encountered");
-}
-
-
-void AsmTyper::VisitConditional(Conditional* expr) {
- if (!in_function_) {
- FAIL(expr, "ternary operator inside module body");
- }
- RECURSE(VisitWithExpectation(expr->condition(), Type::Number(),
- "condition expected to be integer"));
- if (!computed_type_->Is(cache_.kAsmInt)) {
- FAIL(expr->condition(), "condition must be of type int");
- }
-
- RECURSE(VisitWithExpectation(
- expr->then_expression(), expected_type_,
- "conditional then branch type mismatch with enclosing expression"));
- Type* then_type = StorageType(computed_type_);
- if (intish_ != 0 || !then_type->Is(cache_.kAsmComparable)) {
- FAIL(expr->then_expression(), "invalid type in ? then expression");
- }
-
- RECURSE(VisitWithExpectation(
- expr->else_expression(), expected_type_,
- "conditional else branch type mismatch with enclosing expression"));
- Type* else_type = StorageType(computed_type_);
- if (intish_ != 0 || !else_type->Is(cache_.kAsmComparable)) {
- FAIL(expr->else_expression(), "invalid type in ? else expression");
- }
-
- if (!then_type->Is(else_type) || !else_type->Is(then_type)) {
- FAIL(expr, "then and else expressions in ? must have the same type");
- }
-
- IntersectResult(expr, then_type);
-}
-
-
-void AsmTyper::VisitVariableProxy(VariableProxy* expr) {
- VisitVariableProxy(expr, false);
-}
-
-void AsmTyper::VisitVariableProxy(VariableProxy* expr, bool assignment) {
- Variable* var = expr->var();
- VariableInfo* info = GetVariableInfo(var, false);
- if (!assignment && !in_function_ && !building_function_tables_ &&
- !visiting_exports_) {
- if (var->location() != VariableLocation::PARAMETER || var->index() >= 3) {
- FAIL(expr, "illegal variable reference in module body");
- }
- }
- if (info == NULL || info->type == NULL) {
- if (var->mode() == TEMPORARY) {
- SetType(var, Type::Any());
- info = GetVariableInfo(var, false);
- } else {
- FAIL(expr, "unbound variable");
- }
- }
- if (property_info_ != NULL) {
- SetVariableInfo(var, property_info_);
- property_info_ = NULL;
- }
- Type* type = Type::Intersect(info->type, expected_type_, zone());
- if (type->Is(cache_.kAsmInt)) {
- type = cache_.kAsmInt;
- }
- info->type = type;
- intish_ = 0;
- IntersectResult(expr, type);
-}
-
-
-void AsmTyper::VisitLiteral(Literal* expr, bool is_return) {
- intish_ = 0;
- Handle<Object> value = expr->value();
- if (value->IsNumber()) {
- int32_t i;
- uint32_t u;
- if (expr->raw_value()->ContainsDot()) {
- IntersectResult(expr, cache_.kAsmDouble);
- } else if (!is_return && value->ToUint32(&u)) {
- if (u <= 0x7fffffff) {
- IntersectResult(expr, cache_.kAsmFixnum);
- } else {
- IntersectResult(expr, cache_.kAsmUnsigned);
- }
- } else if (value->ToInt32(&i)) {
- IntersectResult(expr, cache_.kAsmSigned);
- } else {
- FAIL(expr, "illegal number");
- }
- } else if (!is_return && value->IsString()) {
- IntersectResult(expr, Type::String());
- } else if (value->IsUndefined()) {
- IntersectResult(expr, Type::Undefined());
- } else {
- FAIL(expr, "illegal literal");
- }
-}
-
-
-void AsmTyper::VisitLiteral(Literal* expr) { VisitLiteral(expr, false); }
-
-
-void AsmTyper::VisitRegExpLiteral(RegExpLiteral* expr) {
- FAIL(expr, "regular expression encountered");
-}
-
-
-void AsmTyper::VisitObjectLiteral(ObjectLiteral* expr) {
- if (in_function_) {
- FAIL(expr, "object literal in function");
- }
- // Allowed for asm module's export declaration.
- ZoneList<ObjectLiteralProperty*>* props = expr->properties();
- for (int i = 0; i < props->length(); ++i) {
- ObjectLiteralProperty* prop = props->at(i);
- RECURSE(VisitWithExpectation(prop->value(), Type::Any(),
- "object property expected to be a function"));
- if (!computed_type_->IsFunction()) {
- FAIL(prop->value(), "non-function in function table");
- }
- }
- IntersectResult(expr, Type::Object());
-}
-
-
-void AsmTyper::VisitArrayLiteral(ArrayLiteral* expr) {
- if (in_function_) {
- FAIL(expr, "array literal inside a function");
- }
- // Allowed for function tables.
- ZoneList<Expression*>* values = expr->values();
- Type* elem_type = Type::None();
- for (int i = 0; i < values->length(); ++i) {
- Expression* value = values->at(i);
- RECURSE(VisitWithExpectation(value, Type::Any(), "UNREACHABLE"));
- if (!computed_type_->IsFunction()) {
- FAIL(value, "array component expected to be a function");
- }
- elem_type = Type::Union(elem_type, computed_type_, zone());
- }
- array_size_ = values->length();
- IntersectResult(expr, Type::Array(elem_type, zone()));
-}
-
-
-void AsmTyper::VisitAssignment(Assignment* expr) {
- // Handle function tables and everything else in different passes.
- if (!in_function_) {
- if (expr->value()->IsArrayLiteral()) {
- if (!building_function_tables_) {
- return;
- }
- } else {
- if (building_function_tables_) {
- return;
- }
- }
- }
- if (expr->is_compound()) FAIL(expr, "compound assignment encountered");
- Type* type = expected_type_;
- RECURSE(VisitWithExpectation(
- expr->value(), type, "assignment value expected to match surrounding"));
- Type* target_type = StorageType(computed_type_);
- if (expr->target()->IsVariableProxy()) {
- if (intish_ != 0) {
- FAIL(expr, "intish or floatish assignment");
- }
- expected_type_ = target_type;
- VisitVariableProxy(expr->target()->AsVariableProxy(), true);
- } else if (expr->target()->IsProperty()) {
- int32_t value_intish = intish_;
- Property* property = expr->target()->AsProperty();
- RECURSE(VisitWithExpectation(property->obj(), Type::Any(),
- "bad propety object"));
- if (!computed_type_->IsArray()) {
- FAIL(property->obj(), "array expected");
- }
- if (value_intish != 0 && computed_type_->Is(cache_.kFloat64Array)) {
- FAIL(expr, "floatish assignment to double array");
- }
- VisitHeapAccess(property, true, target_type);
- }
- IntersectResult(expr, target_type);
-}
-
-
-void AsmTyper::VisitYield(Yield* expr) {
- FAIL(expr, "yield expression encountered");
-}
-
-
-void AsmTyper::VisitThrow(Throw* expr) {
- FAIL(expr, "throw statement encountered");
-}
-
-
-int AsmTyper::ElementShiftSize(Type* type) {
- if (type->Is(cache_.kAsmSize8)) return 0;
- if (type->Is(cache_.kAsmSize16)) return 1;
- if (type->Is(cache_.kAsmSize32)) return 2;
- if (type->Is(cache_.kAsmSize64)) return 3;
- return -1;
-}
-
-
-Type* AsmTyper::StorageType(Type* type) {
- if (type->Is(cache_.kAsmInt)) {
- return cache_.kAsmInt;
- } else {
- return type;
- }
-}
-
-
-void AsmTyper::VisitHeapAccess(Property* expr, bool assigning,
- Type* assignment_type) {
- ArrayType* array_type = computed_type_->AsArray();
- // size_t size = array_size_;
- Type* type = array_type->Element();
- if (type->IsFunction()) {
- if (assigning) {
- FAIL(expr, "assigning to function table is illegal");
- }
- // TODO(bradnelson): Fix the parser and then un-comment this part
- // BinaryOperation* bin = expr->key()->AsBinaryOperation();
- // if (bin == NULL || bin->op() != Token::BIT_AND) {
- // FAIL(expr->key(), "expected & in call");
- // }
- // RECURSE(VisitWithExpectation(bin->left(), cache_.kAsmSigned,
- // "array index expected to be integer"));
- // Literal* right = bin->right()->AsLiteral();
- // if (right == NULL || right->raw_value()->ContainsDot()) {
- // FAIL(right, "call mask must be integer");
- // }
- // RECURSE(VisitWithExpectation(bin->right(), cache_.kAsmSigned,
- // "call mask expected to be integer"));
- // if (static_cast<size_t>(right->raw_value()->AsNumber()) != size - 1) {
- // FAIL(right, "call mask must match function table");
- // }
- // bin->set_bounds(Bounds(cache_.kAsmSigned));
- RECURSE(VisitWithExpectation(expr->key(), cache_.kAsmSigned,
- "must be integer"));
- IntersectResult(expr, type);
- } else {
- Literal* literal = expr->key()->AsLiteral();
- if (literal) {
- RECURSE(VisitWithExpectation(literal, cache_.kAsmSigned,
- "array index expected to be integer"));
- } else {
- int expected_shift = ElementShiftSize(type);
- if (expected_shift == 0) {
- RECURSE(Visit(expr->key()));
- } else {
- BinaryOperation* bin = expr->key()->AsBinaryOperation();
- if (bin == NULL || bin->op() != Token::SAR) {
- FAIL(expr->key(), "expected >> in heap access");
- }
- RECURSE(VisitWithExpectation(bin->left(), cache_.kAsmSigned,
- "array index expected to be integer"));
- Literal* right = bin->right()->AsLiteral();
- if (right == NULL || right->raw_value()->ContainsDot()) {
- FAIL(bin->right(), "heap access shift must be integer");
- }
- RECURSE(VisitWithExpectation(bin->right(), cache_.kAsmSigned,
- "array shift expected to be integer"));
- int n = static_cast<int>(right->raw_value()->AsNumber());
- if (expected_shift < 0 || n != expected_shift) {
- FAIL(right, "heap access shift must match element size");
- }
- }
- expr->key()->set_bounds(Bounds(cache_.kAsmSigned));
- }
- Type* result_type;
- if (type->Is(cache_.kAsmIntArrayElement)) {
- result_type = cache_.kAsmIntQ;
- intish_ = kMaxUncombinedAdditiveSteps;
- } else if (type->Is(cache_.kAsmFloat)) {
- if (assigning) {
- result_type = cache_.kAsmFloatDoubleQ;
- } else {
- result_type = cache_.kAsmFloatQ;
- }
- intish_ = 0;
- } else if (type->Is(cache_.kAsmDouble)) {
- if (assigning) {
- result_type = cache_.kAsmFloatDoubleQ;
- if (intish_ != 0) {
- FAIL(expr, "Assignment of floatish to Float64Array");
- }
- } else {
- result_type = cache_.kAsmDoubleQ;
- }
- intish_ = 0;
- } else {
- UNREACHABLE();
- }
- if (assigning) {
- if (!assignment_type->Is(result_type)) {
- FAIL(expr, "illegal type in assignment");
- }
- } else {
- IntersectResult(expr, expected_type_);
- IntersectResult(expr, result_type);
- }
- }
-}
-
-
-bool AsmTyper::IsStdlibObject(Expression* expr) {
- VariableProxy* proxy = expr->AsVariableProxy();
- if (proxy == NULL) {
- return false;
- }
- Variable* var = proxy->var();
- VariableInfo* info = GetVariableInfo(var, false);
- if (info) {
- if (info->standard_member == kStdlib) return true;
- }
- if (var->location() != VariableLocation::PARAMETER || var->index() != 0) {
- return false;
- }
- info = GetVariableInfo(var, true);
- info->type = Type::Object();
- info->standard_member = kStdlib;
- return true;
-}
-
-
-Expression* AsmTyper::GetReceiverOfPropertyAccess(Expression* expr,
- const char* name) {
- Property* property = expr->AsProperty();
- if (property == NULL) {
- return NULL;
- }
- Literal* key = property->key()->AsLiteral();
- if (key == NULL || !key->IsPropertyName() ||
- !key->AsPropertyName()->IsUtf8EqualTo(CStrVector(name))) {
- return NULL;
- }
- return property->obj();
-}
-
-
-bool AsmTyper::IsMathObject(Expression* expr) {
- Expression* obj = GetReceiverOfPropertyAccess(expr, "Math");
- return obj && IsStdlibObject(obj);
-}
-
-
-bool AsmTyper::IsSIMDObject(Expression* expr) {
- Expression* obj = GetReceiverOfPropertyAccess(expr, "SIMD");
- return obj && IsStdlibObject(obj);
-}
-
-
-bool AsmTyper::IsSIMDTypeObject(Expression* expr, const char* name) {
- Expression* obj = GetReceiverOfPropertyAccess(expr, name);
- return obj && IsSIMDObject(obj);
-}
-
-
-void AsmTyper::VisitProperty(Property* expr) {
- if (IsMathObject(expr->obj())) {
- VisitLibraryAccess(&stdlib_math_types_, expr);
- return;
- }
-#define V(NAME, Name, name, lane_count, lane_type) \
- if (IsSIMDTypeObject(expr->obj(), #Name)) { \
- VisitLibraryAccess(&stdlib_simd_##name##_types_, expr); \
- return; \
- } \
- if (IsSIMDTypeObject(expr, #Name)) { \
- VariableInfo* info = stdlib_simd_##name##_constructor_type_; \
- SetResult(expr, info->type); \
- property_info_ = info; \
- return; \
- }
- SIMD128_TYPES(V)
-#undef V
- if (IsStdlibObject(expr->obj())) {
- VisitLibraryAccess(&stdlib_types_, expr);
- return;
- }
-
- property_info_ = NULL;
-
- // Only recurse at this point so that we avoid needing
- // stdlib.Math to have a real type.
- RECURSE(
- VisitWithExpectation(expr->obj(), Type::Any(), "bad property object"));
-
- // For heap view or function table access.
- if (computed_type_->IsArray()) {
- VisitHeapAccess(expr, false, NULL);
- return;
- }
-
- VariableProxy* proxy = expr->obj()->AsVariableProxy();
- if (proxy != NULL) {
- Variable* var = proxy->var();
- if (var->location() == VariableLocation::PARAMETER && var->index() == 1) {
- // foreign.x - Function represent as () -> Any
- if (Type::Any()->Is(expected_type_)) {
- SetResult(expr, Type::Function(Type::Any(), zone()));
- } else {
- SetResult(expr, expected_type_);
- }
- return;
- }
- }
-
- FAIL(expr, "invalid property access");
-}
-
-void AsmTyper::CheckPolymorphicStdlibArguments(
- enum StandardMember standard_member, ZoneList<Expression*>* args) {
- if (args->length() == 0) {
- return;
- }
- // Handle polymorphic stdlib functions specially.
- Expression* arg0 = args->at(0);
- Type* arg0_type = arg0->bounds().upper;
- switch (standard_member) {
- case kMathFround: {
- if (!arg0_type->Is(cache_.kAsmFloat) &&
- !arg0_type->Is(cache_.kAsmDouble) &&
- !arg0_type->Is(cache_.kAsmSigned) &&
- !arg0_type->Is(cache_.kAsmUnsigned)) {
- FAIL(arg0, "illegal function argument type");
- }
- break;
- }
- case kMathCeil:
- case kMathFloor:
- case kMathSqrt: {
- if (!arg0_type->Is(cache_.kAsmFloat) &&
- !arg0_type->Is(cache_.kAsmDouble)) {
- FAIL(arg0, "illegal function argument type");
- }
- break;
- }
- case kMathAbs:
- case kMathMin:
- case kMathMax: {
- if (!arg0_type->Is(cache_.kAsmFloat) &&
- !arg0_type->Is(cache_.kAsmDouble) &&
- !arg0_type->Is(cache_.kAsmSigned)) {
- FAIL(arg0, "illegal function argument type");
- }
- if (args->length() > 1) {
- Type* other = Type::Intersect(args->at(0)->bounds().upper,
- args->at(1)->bounds().upper, zone());
- if (!other->Is(cache_.kAsmFloat) && !other->Is(cache_.kAsmDouble) &&
- !other->Is(cache_.kAsmSigned)) {
- FAIL(arg0, "function arguments types don't match");
- }
- }
- break;
- }
- default: { break; }
- }
-}
-
-void AsmTyper::VisitCall(Call* expr) {
- Type* expected_type = expected_type_;
- RECURSE(VisitWithExpectation(expr->expression(), Type::Any(),
- "callee expected to be any"));
- StandardMember standard_member = kNone;
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
- if (proxy) {
- standard_member = VariableAsStandardMember(proxy->var());
- }
- if (!in_function_ && (proxy == NULL || standard_member != kMathFround)) {
- FAIL(expr, "calls forbidden outside function bodies");
- }
- if (proxy == NULL && !expr->expression()->IsProperty()) {
- FAIL(expr, "calls must be to bound variables or function tables");
- }
- if (computed_type_->IsFunction()) {
- FunctionType* fun_type = computed_type_->AsFunction();
- Type* result_type = fun_type->Result();
- ZoneList<Expression*>* args = expr->arguments();
- if (Type::Any()->Is(result_type)) {
- // For foreign calls.
- for (int i = 0; i < args->length(); ++i) {
- Expression* arg = args->at(i);
- RECURSE(VisitWithExpectation(
- arg, Type::Any(), "foreign call argument expected to be any"));
- // Checking for asm extern types explicitly, as the type system
- // doesn't correctly check their inheritance relationship.
- if (!computed_type_->Is(cache_.kAsmSigned) &&
- !computed_type_->Is(cache_.kAsmFixnum) &&
- !computed_type_->Is(cache_.kAsmDouble)) {
- FAIL(arg,
- "foreign call argument expected to be int, double, or fixnum");
- }
- }
- intish_ = 0;
- expr->expression()->set_bounds(
- Bounds(Type::Function(Type::Any(), zone())));
- IntersectResult(expr, expected_type);
- } else {
- if (fun_type->Arity() != args->length()) {
- FAIL(expr, "call with wrong arity");
- }
- for (int i = 0; i < args->length(); ++i) {
- Expression* arg = args->at(i);
- RECURSE(VisitWithExpectation(
- arg, fun_type->Parameter(i),
- "call argument expected to match callee parameter"));
- if (standard_member != kNone && standard_member != kMathFround &&
- i == 0) {
- result_type = computed_type_;
- }
- }
- RECURSE(CheckPolymorphicStdlibArguments(standard_member, args));
- intish_ = 0;
- IntersectResult(expr, result_type);
- }
- } else {
- FAIL(expr, "invalid callee");
- }
-}
-
-
-void AsmTyper::VisitCallNew(CallNew* expr) {
- if (in_function_) {
- FAIL(expr, "new not allowed in module function");
- }
- RECURSE(VisitWithExpectation(expr->expression(), Type::Any(),
- "expected stdlib function"));
- if (computed_type_->IsFunction()) {
- FunctionType* fun_type = computed_type_->AsFunction();
- ZoneList<Expression*>* args = expr->arguments();
- if (fun_type->Arity() != args->length())
- FAIL(expr, "call with wrong arity");
- for (int i = 0; i < args->length(); ++i) {
- Expression* arg = args->at(i);
- RECURSE(VisitWithExpectation(
- arg, fun_type->Parameter(i),
- "constructor argument expected to match callee parameter"));
- }
- IntersectResult(expr, fun_type->Result());
- return;
- }
-
- FAIL(expr, "ill-typed new operator");
-}
-
-
-void AsmTyper::VisitCallRuntime(CallRuntime* expr) {
- // Allow runtime calls for now.
-}
-
-
-void AsmTyper::VisitUnaryOperation(UnaryOperation* expr) {
- if (!in_function_) {
- FAIL(expr, "unary operator inside module body");
- }
- switch (expr->op()) {
- case Token::NOT: // Used to encode != and !==
- RECURSE(VisitWithExpectation(expr->expression(), cache_.kAsmInt,
- "operand expected to be integer"));
- IntersectResult(expr, cache_.kAsmSigned);
- return;
- case Token::DELETE:
- FAIL(expr, "delete operator encountered");
- case Token::VOID:
- FAIL(expr, "void operator encountered");
- case Token::TYPEOF:
- FAIL(expr, "typeof operator encountered");
- default:
- UNREACHABLE();
- }
-}
-
-
-void AsmTyper::VisitCountOperation(CountOperation* expr) {
- FAIL(expr, "increment or decrement operator encountered");
-}
-
-
-void AsmTyper::VisitIntegerBitwiseOperator(BinaryOperation* expr,
- Type* left_expected,
- Type* right_expected,
- Type* result_type, bool conversion) {
- RECURSE(VisitWithExpectation(expr->left(), Type::Number(),
- "left bitwise operand expected to be a number"));
- int32_t left_intish = intish_;
- Type* left_type = computed_type_;
- if (!left_type->Is(left_expected)) {
- FAIL(expr->left(), "left bitwise operand expected to be an integer");
- }
- if (left_intish > kMaxUncombinedAdditiveSteps) {
- FAIL(expr->left(), "too many consecutive additive ops");
- }
-
- RECURSE(
- VisitWithExpectation(expr->right(), Type::Number(),
- "right bitwise operand expected to be a number"));
- int32_t right_intish = intish_;
- Type* right_type = computed_type_;
- if (!right_type->Is(right_expected)) {
- FAIL(expr->right(), "right bitwise operand expected to be an integer");
- }
- if (right_intish > kMaxUncombinedAdditiveSteps) {
- FAIL(expr->right(), "too many consecutive additive ops");
- }
-
- intish_ = 0;
-
- if (left_type->Is(cache_.kAsmFixnum) && right_type->Is(cache_.kAsmInt)) {
- left_type = right_type;
- }
- if (right_type->Is(cache_.kAsmFixnum) && left_type->Is(cache_.kAsmInt)) {
- right_type = left_type;
- }
- if (!conversion) {
- if (!left_type->Is(cache_.kAsmIntQ) || !right_type->Is(cache_.kAsmIntQ)) {
- FAIL(expr, "ill-typed bitwise operation");
- }
- }
- IntersectResult(expr, result_type);
-}
-
-
-void AsmTyper::VisitBinaryOperation(BinaryOperation* expr) {
- if (!in_function_) {
- if (expr->op() != Token::BIT_OR && expr->op() != Token::MUL) {
- FAIL(expr, "illegal binary operator inside module body");
- }
- if (!(expr->left()->IsProperty() || expr->left()->IsVariableProxy()) ||
- !expr->right()->IsLiteral()) {
- FAIL(expr, "illegal computation inside module body");
- }
- DCHECK(expr->right()->AsLiteral() != nullptr);
- const AstValue* right_value = expr->right()->AsLiteral()->raw_value();
- if (expr->op() == Token::BIT_OR) {
- if (right_value->AsNumber() != 0.0 || right_value->ContainsDot()) {
- FAIL(expr, "illegal integer annotation value");
- }
- }
- if (expr->op() == Token::MUL) {
- if (right_value->AsNumber() != 1.0 && right_value->ContainsDot()) {
- FAIL(expr, "illegal double annotation value");
- }
- }
- }
- switch (expr->op()) {
- case Token::COMMA: {
- RECURSE(VisitWithExpectation(expr->left(), Type::Any(),
- "left comma operand expected to be any"));
- RECURSE(VisitWithExpectation(expr->right(), Type::Any(),
- "right comma operand expected to be any"));
- IntersectResult(expr, computed_type_);
- return;
- }
- case Token::OR:
- case Token::AND:
- FAIL(expr, "illegal logical operator");
- case Token::BIT_OR: {
- // BIT_OR allows Any since it is used as a type coercion.
- RECURSE(VisitIntegerBitwiseOperator(expr, Type::Any(), cache_.kAsmIntQ,
- cache_.kAsmSigned, true));
- if (expr->left()->IsCall() && expr->op() == Token::BIT_OR &&
- Type::Number()->Is(expr->left()->bounds().upper)) {
- // Force the return types of foreign functions.
- expr->left()->set_bounds(Bounds(cache_.kAsmSigned));
- }
- if (in_function_ && !expr->left()->bounds().upper->Is(cache_.kAsmIntQ)) {
- FAIL(expr->left(), "intish required");
- }
- return;
- }
- case Token::BIT_XOR: {
- // Handle booleans specially to handle de-sugared !
- Literal* left = expr->left()->AsLiteral();
- if (left && left->value()->IsBoolean()) {
- if (left->ToBooleanIsTrue()) {
- left->set_bounds(Bounds(cache_.kSingletonOne));
- RECURSE(VisitWithExpectation(expr->right(), cache_.kAsmIntQ,
- "not operator expects an integer"));
- IntersectResult(expr, cache_.kAsmSigned);
- return;
- } else {
- FAIL(left, "unexpected false");
- }
- }
- // BIT_XOR allows Any since it is used as a type coercion (via ~~).
- RECURSE(VisitIntegerBitwiseOperator(expr, Type::Any(), cache_.kAsmIntQ,
- cache_.kAsmSigned, true));
- return;
- }
- case Token::SHR: {
- RECURSE(VisitIntegerBitwiseOperator(
- expr, cache_.kAsmIntQ, cache_.kAsmIntQ, cache_.kAsmUnsigned, false));
- return;
- }
- case Token::SHL:
- case Token::SAR:
- case Token::BIT_AND: {
- RECURSE(VisitIntegerBitwiseOperator(
- expr, cache_.kAsmIntQ, cache_.kAsmIntQ, cache_.kAsmSigned, false));
- return;
- }
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- RECURSE(VisitWithExpectation(
- expr->left(), Type::Number(),
- "left arithmetic operand expected to be number"));
- Type* left_type = computed_type_;
- int32_t left_intish = intish_;
- RECURSE(VisitWithExpectation(
- expr->right(), Type::Number(),
- "right arithmetic operand expected to be number"));
- Type* right_type = computed_type_;
- int32_t right_intish = intish_;
- Type* type = Type::Union(left_type, right_type, zone());
- if (type->Is(cache_.kAsmInt)) {
- if (expr->op() == Token::MUL) {
- int32_t i;
- Literal* left = expr->left()->AsLiteral();
- Literal* right = expr->right()->AsLiteral();
- if (left != nullptr && left->value()->IsNumber() &&
- left->value()->ToInt32(&i)) {
- if (right_intish != 0) {
- FAIL(expr, "intish not allowed in multiply");
- }
- } else if (right != nullptr && right->value()->IsNumber() &&
- right->value()->ToInt32(&i)) {
- if (left_intish != 0) {
- FAIL(expr, "intish not allowed in multiply");
- }
- } else {
- FAIL(expr, "multiply must be by an integer literal");
- }
- i = abs(i);
- if (i >= (1 << 20)) {
- FAIL(expr, "multiply must be by value in -2^20 < n < 2^20");
- }
- intish_ = i;
- IntersectResult(expr, cache_.kAsmInt);
- return;
- } else {
- intish_ = left_intish + right_intish + 1;
- if (expr->op() == Token::ADD || expr->op() == Token::SUB) {
- if (intish_ > kMaxUncombinedAdditiveSteps) {
- FAIL(expr, "too many consecutive additive ops");
- }
- } else {
- if (intish_ > kMaxUncombinedMultiplicativeSteps) {
- FAIL(expr, "too many consecutive multiplicative ops");
- }
- }
- IntersectResult(expr, cache_.kAsmInt);
- return;
- }
- } else if (expr->op() == Token::MUL && expr->right()->IsLiteral() &&
- right_type->Is(cache_.kAsmDouble) &&
- expr->right()->AsLiteral()->raw_value()->ContainsDot() &&
- expr->right()->AsLiteral()->raw_value()->AsNumber() == 1.0) {
- // For unary +, expressed as x * 1.0
- if (expr->left()->IsCall() &&
- Type::Number()->Is(expr->left()->bounds().upper)) {
- // Force the return types of foreign functions.
- expr->left()->set_bounds(Bounds(cache_.kAsmDouble));
- left_type = expr->left()->bounds().upper;
- }
- if (!(expr->left()->IsProperty() &&
- Type::Number()->Is(expr->left()->bounds().upper))) {
- if (!left_type->Is(cache_.kAsmSigned) &&
- !left_type->Is(cache_.kAsmUnsigned) &&
- !left_type->Is(cache_.kAsmFixnum) &&
- !left_type->Is(cache_.kAsmFloatQ) &&
- !left_type->Is(cache_.kAsmDoubleQ)) {
- FAIL(
- expr->left(),
- "unary + only allowed on signed, unsigned, float?, or double?");
- }
- }
- IntersectResult(expr, cache_.kAsmDouble);
- return;
- } else if (expr->op() == Token::MUL && left_type->Is(cache_.kAsmDouble) &&
- expr->right()->IsLiteral() &&
- !expr->right()->AsLiteral()->raw_value()->ContainsDot() &&
- expr->right()->AsLiteral()->raw_value()->AsNumber() == -1.0) {
- // For unary -, expressed as x * -1
- expr->right()->set_bounds(Bounds(cache_.kAsmDouble));
- IntersectResult(expr, cache_.kAsmDouble);
- return;
- } else if (type->Is(cache_.kAsmFloat) && expr->op() != Token::MOD) {
- if (left_intish != 0 || right_intish != 0) {
- FAIL(expr, "float operation before required fround");
- }
- IntersectResult(expr, cache_.kAsmFloat);
- intish_ = 1;
- return;
- } else if (type->Is(cache_.kAsmDouble)) {
- IntersectResult(expr, cache_.kAsmDouble);
- return;
- } else {
- FAIL(expr, "ill-typed arithmetic operation");
- }
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-void AsmTyper::VisitCompareOperation(CompareOperation* expr) {
- if (!in_function_) {
- FAIL(expr, "comparison inside module body");
- }
- Token::Value op = expr->op();
- if (op != Token::EQ && op != Token::NE && op != Token::LT &&
- op != Token::LTE && op != Token::GT && op != Token::GTE) {
- FAIL(expr, "illegal comparison operator");
- }
-
- RECURSE(
- VisitWithExpectation(expr->left(), Type::Number(),
- "left comparison operand expected to be number"));
- Type* left_type = computed_type_;
- if (!left_type->Is(cache_.kAsmComparable)) {
- FAIL(expr->left(), "bad type on left side of comparison");
- }
-
- RECURSE(
- VisitWithExpectation(expr->right(), Type::Number(),
- "right comparison operand expected to be number"));
- Type* right_type = computed_type_;
- if (!right_type->Is(cache_.kAsmComparable)) {
- FAIL(expr->right(), "bad type on right side of comparison");
- }
-
- if (!left_type->Is(right_type) && !right_type->Is(left_type)) {
- FAIL(expr, "left and right side of comparison must match");
- }
-
- IntersectResult(expr, cache_.kAsmSigned);
-}
-
-
-void AsmTyper::VisitThisFunction(ThisFunction* expr) {
- FAIL(expr, "this function not allowed");
-}
-
-
-void AsmTyper::VisitDeclarations(ZoneList<Declaration*>* decls) {
- for (int i = 0; i < decls->length(); ++i) {
- Declaration* decl = decls->at(i);
- RECURSE(Visit(decl));
- }
-}
-
-
-void AsmTyper::VisitImportDeclaration(ImportDeclaration* decl) {
- FAIL(decl, "import declaration encountered");
-}
-
-
-void AsmTyper::VisitExportDeclaration(ExportDeclaration* decl) {
- FAIL(decl, "export declaration encountered");
-}
-
-
-void AsmTyper::VisitClassLiteral(ClassLiteral* expr) {
- FAIL(expr, "class literal not allowed");
-}
-
-
-void AsmTyper::VisitSpread(Spread* expr) { FAIL(expr, "spread not allowed"); }
-
-
-void AsmTyper::VisitSuperPropertyReference(SuperPropertyReference* expr) {
- FAIL(expr, "super property reference not allowed");
-}
-
-
-void AsmTyper::VisitSuperCallReference(SuperCallReference* expr) {
- FAIL(expr, "call reference not allowed");
-}
-
-
-void AsmTyper::InitializeStdlibSIMD() {
-#define V(NAME, Name, name, lane_count, lane_type) \
- { \
- Type* type = Type::Function(Type::Name(isolate_, zone()), Type::Any(), \
- lane_count, zone()); \
- for (int i = 0; i < lane_count; ++i) { \
- type->AsFunction()->InitParameter(i, Type::Number()); \
- } \
- stdlib_simd_##name##_constructor_type_ = new (zone()) VariableInfo(type); \
- stdlib_simd_##name##_constructor_type_->is_constructor_function = true; \
- }
- SIMD128_TYPES(V)
-#undef V
-}
-
-
-void AsmTyper::InitializeStdlib() {
- if (allow_simd_) {
- InitializeStdlibSIMD();
- }
- Type* number_type = Type::Number();
- Type* double_type = cache_.kAsmDouble;
- Type* double_fn1_type = Type::Function(double_type, double_type, zone());
- Type* double_fn2_type =
- Type::Function(double_type, double_type, double_type, zone());
-
- Type* fround_type = Type::Function(cache_.kAsmFloat, number_type, zone());
- Type* imul_type =
- Type::Function(cache_.kAsmSigned, cache_.kAsmInt, cache_.kAsmInt, zone());
- // TODO(bradnelson): currently only approximating the proper intersection type
- // (which we cannot currently represent).
- Type* number_fn1_type = Type::Function(number_type, number_type, zone());
- Type* number_fn2_type =
- Type::Function(number_type, number_type, number_type, zone());
-
- struct Assignment {
- const char* name;
- StandardMember standard_member;
- Type* type;
- };
-
- const Assignment math[] = {{"PI", kMathPI, double_type},
- {"E", kMathE, double_type},
- {"LN2", kMathLN2, double_type},
- {"LN10", kMathLN10, double_type},
- {"LOG2E", kMathLOG2E, double_type},
- {"LOG10E", kMathLOG10E, double_type},
- {"SQRT2", kMathSQRT2, double_type},
- {"SQRT1_2", kMathSQRT1_2, double_type},
- {"imul", kMathImul, imul_type},
- {"abs", kMathAbs, number_fn1_type},
- {"ceil", kMathCeil, number_fn1_type},
- {"floor", kMathFloor, number_fn1_type},
- {"fround", kMathFround, fround_type},
- {"pow", kMathPow, double_fn2_type},
- {"exp", kMathExp, double_fn1_type},
- {"log", kMathLog, double_fn1_type},
- {"min", kMathMin, number_fn2_type},
- {"max", kMathMax, number_fn2_type},
- {"sqrt", kMathSqrt, number_fn1_type},
- {"cos", kMathCos, double_fn1_type},
- {"sin", kMathSin, double_fn1_type},
- {"tan", kMathTan, double_fn1_type},
- {"acos", kMathAcos, double_fn1_type},
- {"asin", kMathAsin, double_fn1_type},
- {"atan", kMathAtan, double_fn1_type},
- {"atan2", kMathAtan2, double_fn2_type}};
- for (unsigned i = 0; i < arraysize(math); ++i) {
- stdlib_math_types_[math[i].name] = new (zone()) VariableInfo(math[i].type);
- stdlib_math_types_[math[i].name]->standard_member = math[i].standard_member;
- }
- stdlib_math_types_["fround"]->is_check_function = true;
-
- stdlib_types_["Infinity"] = new (zone()) VariableInfo(double_type);
- stdlib_types_["Infinity"]->standard_member = kInfinity;
- stdlib_types_["NaN"] = new (zone()) VariableInfo(double_type);
- stdlib_types_["NaN"]->standard_member = kNaN;
- Type* buffer_type = Type::Any();
-#define TYPED_ARRAY(TypeName, type_name, TYPE_NAME, ctype, size) \
- stdlib_types_[#TypeName "Array"] = new (zone()) VariableInfo( \
- Type::Function(cache_.k##TypeName##Array, buffer_type, zone()));
- TYPED_ARRAYS(TYPED_ARRAY)
-#undef TYPED_ARRAY
-
-#define TYPED_ARRAY(TypeName, type_name, TYPE_NAME, ctype, size) \
- stdlib_heap_types_[#TypeName "Array"] = new (zone()) VariableInfo( \
- Type::Function(cache_.k##TypeName##Array, buffer_type, zone()));
- TYPED_ARRAYS(TYPED_ARRAY)
-#undef TYPED_ARRAY
-}
-
-
-void AsmTyper::VisitLibraryAccess(ObjectTypeMap* map, Property* expr) {
- Literal* key = expr->key()->AsLiteral();
- if (key == NULL || !key->IsPropertyName())
- FAIL(expr, "invalid key used on stdlib member");
- Handle<String> name = key->AsPropertyName();
- VariableInfo* info = LibType(map, name);
- if (info == NULL || info->type == NULL) FAIL(expr, "unknown stdlib function");
- SetResult(expr, info->type);
- property_info_ = info;
-}
-
-
-AsmTyper::VariableInfo* AsmTyper::LibType(ObjectTypeMap* map,
- Handle<String> name) {
- base::SmartArrayPointer<char> aname = name->ToCString();
- ObjectTypeMap::iterator i = map->find(std::string(aname.get()));
- if (i == map->end()) {
- return NULL;
- }
- return i->second;
-}
-
-
-void AsmTyper::SetType(Variable* variable, Type* type) {
- VariableInfo* info = GetVariableInfo(variable, true);
- info->type = type;
-}
-
-
-Type* AsmTyper::GetType(Variable* variable) {
- VariableInfo* info = GetVariableInfo(variable, false);
- if (!info) return NULL;
- return info->type;
-}
-
-
-AsmTyper::VariableInfo* AsmTyper::GetVariableInfo(Variable* variable,
- bool setting) {
- ZoneHashMap::Entry* entry;
- ZoneHashMap* map;
- if (in_function_) {
- map = &local_variable_type_;
- } else {
- map = &global_variable_type_;
- }
- if (setting) {
- entry = map->LookupOrInsert(variable, ComputePointerHash(variable),
- ZoneAllocationPolicy(zone()));
- } else {
- entry = map->Lookup(variable, ComputePointerHash(variable));
- if (!entry && in_function_) {
- entry =
- global_variable_type_.Lookup(variable, ComputePointerHash(variable));
- }
- }
- if (!entry) return NULL;
- if (!entry->value) {
- if (!setting) return NULL;
- entry->value = new (zone()) VariableInfo;
- }
- return reinterpret_cast<VariableInfo*>(entry->value);
-}
-
-
-void AsmTyper::SetVariableInfo(Variable* variable, const VariableInfo* info) {
- VariableInfo* dest = GetVariableInfo(variable, true);
- dest->type = info->type;
- dest->is_check_function = info->is_check_function;
- dest->is_constructor_function = info->is_constructor_function;
- dest->standard_member = info->standard_member;
-}
-
-
-AsmTyper::StandardMember AsmTyper::VariableAsStandardMember(
- Variable* variable) {
- VariableInfo* info = GetVariableInfo(variable, false);
- if (!info) return kNone;
- return info->standard_member;
-}
-
-
-void AsmTyper::SetResult(Expression* expr, Type* type) {
- computed_type_ = type;
- expr->set_bounds(Bounds(computed_type_));
-}
-
-
-void AsmTyper::IntersectResult(Expression* expr, Type* type) {
- computed_type_ = type;
- Type* bounded_type = Type::Intersect(computed_type_, expected_type_, zone());
- expr->set_bounds(Bounds(bounded_type));
-}
-
-
-void AsmTyper::VisitWithExpectation(Expression* expr, Type* expected_type,
- const char* msg) {
- Type* save = expected_type_;
- expected_type_ = expected_type;
- RECURSE(Visit(expr));
- Type* bounded_type = Type::Intersect(computed_type_, expected_type_, zone());
- if (bounded_type->Is(Type::None())) {
-#ifdef DEBUG
- PrintF("Computed type: ");
- computed_type_->Print();
- PrintF("Expected type: ");
- expected_type_->Print();
-#endif
- FAIL(expr, msg);
- }
- expected_type_ = save;
-}
-
-
-void AsmTyper::VisitRewritableExpression(RewritableExpression* expr) {
- RECURSE(Visit(expr->expression()));
-}
-
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/typing-asm.h b/deps/v8/src/typing-asm.h
deleted file mode 100644
index c7984b2965..0000000000
--- a/deps/v8/src/typing-asm.h
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_TYPING_ASM_H_
-#define V8_TYPING_ASM_H_
-
-#include "src/allocation.h"
-#include "src/ast/ast.h"
-#include "src/effects.h"
-#include "src/type-info.h"
-#include "src/types.h"
-#include "src/zone.h"
-
-namespace v8 {
-namespace internal {
-
-class TypeCache;
-
-class AsmTyper : public AstVisitor {
- public:
- explicit AsmTyper(Isolate* isolate, Zone* zone, Script* script,
- FunctionLiteral* root);
- bool Validate();
- void set_allow_simd(bool simd) { allow_simd_ = simd; }
- const char* error_message() { return error_message_; }
-
- enum StandardMember {
- kNone = 0,
- kStdlib,
- kInfinity,
- kNaN,
- kMathAcos,
- kMathAsin,
- kMathAtan,
- kMathCos,
- kMathSin,
- kMathTan,
- kMathExp,
- kMathLog,
- kMathCeil,
- kMathFloor,
- kMathSqrt,
- kMathAbs,
- kMathMin,
- kMathMax,
- kMathAtan2,
- kMathPow,
- kMathImul,
- kMathFround,
- kMathE,
- kMathLN10,
- kMathLN2,
- kMathLOG2E,
- kMathLOG10E,
- kMathPI,
- kMathSQRT1_2,
- kMathSQRT2,
- };
-
- StandardMember VariableAsStandardMember(Variable* variable);
-
- DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
-
- private:
- Zone* zone_;
- Isolate* isolate_;
- Script* script_;
- FunctionLiteral* root_;
- bool valid_;
- bool allow_simd_;
-
- struct VariableInfo : public ZoneObject {
- Type* type;
- bool is_check_function;
- bool is_constructor_function;
- StandardMember standard_member;
-
- VariableInfo()
- : type(NULL),
- is_check_function(false),
- is_constructor_function(false),
- standard_member(kNone) {}
- explicit VariableInfo(Type* t)
- : type(t),
- is_check_function(false),
- is_constructor_function(false),
- standard_member(kNone) {}
- };
-
- // Information for bi-directional typing with a cap on nesting depth.
- Type* expected_type_;
- Type* computed_type_;
- VariableInfo* property_info_;
- int32_t intish_; // How many ops we've gone without a x|0.
-
- Type* return_type_; // Return type of last function.
- size_t array_size_; // Array size of last ArrayLiteral.
-
- typedef ZoneMap<std::string, VariableInfo*> ObjectTypeMap;
- ObjectTypeMap stdlib_types_;
- ObjectTypeMap stdlib_heap_types_;
- ObjectTypeMap stdlib_math_types_;
-#define V(NAME, Name, name, lane_count, lane_type) \
- ObjectTypeMap stdlib_simd_##name##_types_; \
- VariableInfo* stdlib_simd_##name##_constructor_type_;
- SIMD128_TYPES(V)
-#undef V
-
- // Map from Variable* to global/local variable Type*.
- ZoneHashMap global_variable_type_;
- ZoneHashMap local_variable_type_;
-
- bool in_function_; // In module function?
- bool building_function_tables_;
- bool visiting_exports_;
-
- TypeCache const& cache_;
-
- static const int kErrorMessageLimit = 100;
- char error_message_[kErrorMessageLimit];
-
- static const int kMaxUncombinedAdditiveSteps = 1 << 20;
- static const int kMaxUncombinedMultiplicativeSteps = 1;
-
- void InitializeStdlib();
- void InitializeStdlibSIMD();
-
- void VisitDeclarations(ZoneList<Declaration*>* d) override;
- void VisitStatements(ZoneList<Statement*>* s) override;
-
- void VisitExpressionAnnotation(Expression* e, Variable* var, bool is_return);
- void VisitFunctionAnnotation(FunctionLiteral* f);
- void VisitAsmModule(FunctionLiteral* f);
-
- void VisitHeapAccess(Property* expr, bool assigning, Type* assignment_type);
-
- void CheckPolymorphicStdlibArguments(enum StandardMember standard_member,
- ZoneList<Expression*>* args);
-
- Expression* GetReceiverOfPropertyAccess(Expression* expr, const char* name);
- bool IsMathObject(Expression* expr);
- bool IsSIMDObject(Expression* expr);
- bool IsSIMDTypeObject(Expression* expr, const char* name);
- bool IsStdlibObject(Expression* expr);
-
- void VisitSIMDProperty(Property* expr);
-
- int ElementShiftSize(Type* type);
- Type* StorageType(Type* type);
-
- void SetType(Variable* variable, Type* type);
- Type* GetType(Variable* variable);
- VariableInfo* GetVariableInfo(Variable* variable, bool setting);
- void SetVariableInfo(Variable* variable, const VariableInfo* info);
-
- VariableInfo* LibType(ObjectTypeMap* map, Handle<String> name);
- void VisitLibraryAccess(ObjectTypeMap* map, Property* expr);
-
- void SetResult(Expression* expr, Type* type);
- void IntersectResult(Expression* expr, Type* type);
-
- void VisitWithExpectation(Expression* expr, Type* expected_type,
- const char* msg);
-
- void VisitLiteral(Literal* expr, bool is_return);
-
- void VisitVariableProxy(VariableProxy* expr, bool assignment);
-
- void VisitIntegerBitwiseOperator(BinaryOperation* expr, Type* left_expected,
- Type* right_expected, Type* result_type,
- bool conversion);
-
- Zone* zone() const { return zone_; }
-
-#define DECLARE_VISIT(type) void Visit##type(type* node) override;
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- DISALLOW_COPY_AND_ASSIGN(AsmTyper);
-};
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TYPING_ASM_H_
diff --git a/deps/v8/src/typing-reset.cc b/deps/v8/src/typing-reset.cc
deleted file mode 100644
index c22f7a9276..0000000000
--- a/deps/v8/src/typing-reset.cc
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/typing-reset.h"
-
-#include "src/ast/ast.h"
-#include "src/ast/scopes.h"
-#include "src/codegen.h"
-
-namespace v8 {
-namespace internal {
-
-
-TypingReseter::TypingReseter(Isolate* isolate, FunctionLiteral* root)
- : AstExpressionVisitor(isolate, root) {}
-
-
-void TypingReseter::VisitExpression(Expression* expression) {
- expression->set_bounds(Bounds::Unbounded());
-}
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/typing-reset.h b/deps/v8/src/typing-reset.h
deleted file mode 100644
index 3e1969d9ed..0000000000
--- a/deps/v8/src/typing-reset.h
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_TYPING_RESET_H_
-#define V8_TYPING_RESET_H_
-
-#include "src/ast/ast-expression-visitor.h"
-
-namespace v8 {
-namespace internal {
-
-// A Visitor over a CompilationInfo's AST that resets
-// typing bounds back to their default.
-
-class TypingReseter : public AstExpressionVisitor {
- public:
- TypingReseter(Isolate* isolate, FunctionLiteral* root);
-
- protected:
- void VisitExpression(Expression* expression) override;
-};
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TYPING_RESET_H_
diff --git a/deps/v8/src/unicode-inl.h b/deps/v8/src/unicode-inl.h
index b22e482528..ebebfaa1bd 100644
--- a/deps/v8/src/unicode-inl.h
+++ b/deps/v8/src/unicode-inl.h
@@ -137,6 +137,12 @@ unsigned Utf8::Length(uchar c, int previous) {
}
}
+bool Utf8::IsValidCharacter(uchar c) {
+ return c < 0xD800u || (c >= 0xE000u && c < 0xFDD0u) ||
+ (c > 0xFDEFu && c <= 0x10FFFFu && (c & 0xFFFEu) != 0xFFFEu &&
+ c != kBadChar);
+}
+
} // namespace unibrow
#endif // V8_UNICODE_INL_H_
diff --git a/deps/v8/src/unicode.cc b/deps/v8/src/unicode.cc
index de5e36038b..db98be8675 100644
--- a/deps/v8/src/unicode.cc
+++ b/deps/v8/src/unicode.cc
@@ -305,6 +305,20 @@ uchar Utf8::CalculateValue(const byte* str, size_t max_length, size_t* cursor) {
0x03C82080;
}
+bool Utf8::Validate(const byte* bytes, size_t length) {
+ size_t cursor = 0;
+
+ // Performance optimization: Skip over single-byte values first.
+ while (cursor < length && bytes[cursor] <= kMaxOneByteChar) {
+ ++cursor;
+ }
+
+ while (cursor < length) {
+ uchar c = ValueOf(bytes + cursor, length - cursor, &cursor);
+ if (!IsValidCharacter(c)) return false;
+ }
+ return true;
+}
// Uppercase: point.category == 'Lu'
diff --git a/deps/v8/src/unicode.h b/deps/v8/src/unicode.h
index 7471a638c0..35717bca86 100644
--- a/deps/v8/src/unicode.h
+++ b/deps/v8/src/unicode.h
@@ -155,6 +155,11 @@ class Utf8 {
// UTF-8.
static const unsigned kMax16BitCodeUnitSize = 3;
static inline uchar ValueOf(const byte* str, size_t length, size_t* cursor);
+
+ // Excludes non-characters from the set of valid code points.
+ static inline bool IsValidCharacter(uchar c);
+
+ static bool Validate(const byte* str, size_t length);
};
struct Uppercase {
diff --git a/deps/v8/src/uri.cc b/deps/v8/src/uri.cc
new file mode 100644
index 0000000000..de7bd9bf57
--- /dev/null
+++ b/deps/v8/src/uri.cc
@@ -0,0 +1,505 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/uri.h"
+
+#include "src/char-predicates-inl.h"
+#include "src/handles.h"
+#include "src/isolate-inl.h"
+#include "src/list.h"
+#include "src/string-search.h"
+
+namespace v8 {
+namespace internal {
+
+namespace { // anonymous namespace for DecodeURI helper functions
+bool IsReservedPredicate(uc16 c) {
+ switch (c) {
+ case '#':
+ case '$':
+ case '&':
+ case '+':
+ case ',':
+ case '/':
+ case ':':
+ case ';':
+ case '=':
+ case '?':
+ case '@':
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool IsReplacementCharacter(const uint8_t* octets, int length) {
+ // The replacement character is at codepoint U+FFFD in the Unicode Specials
+ // table. Its UTF-8 encoding is 0xEF 0xBF 0xBD.
+ if (length != 3 || octets[0] != 0xef || octets[1] != 0xbf ||
+ octets[2] != 0xbd) {
+ return false;
+ }
+ return true;
+}
+
+bool DecodeOctets(const uint8_t* octets, int length, List<uc16>* buffer) {
+ size_t cursor = 0;
+ uc32 value = unibrow::Utf8::ValueOf(octets, length, &cursor);
+ if (value == unibrow::Utf8::kBadChar &&
+ !IsReplacementCharacter(octets, length)) {
+ return false;
+ }
+
+ if (value <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
+ buffer->Add(value);
+ } else {
+ buffer->Add(unibrow::Utf16::LeadSurrogate(value));
+ buffer->Add(unibrow::Utf16::TrailSurrogate(value));
+ }
+ return true;
+}
+
+int TwoDigitHex(uc16 character1, uc16 character2) {
+ if (character1 > 'f') return -1;
+ int high = HexValue(character1);
+ if (high == -1) return -1;
+ if (character2 > 'f') return -1;
+ int low = HexValue(character2);
+ if (low == -1) return -1;
+ return (high << 4) + low;
+}
+
+template <typename T>
+void AddToBuffer(uc16 decoded, String::FlatContent* uri_content, int index,
+ bool is_uri, List<T>* buffer) {
+ if (is_uri && IsReservedPredicate(decoded)) {
+ buffer->Add('%');
+ uc16 first = uri_content->Get(index + 1);
+ uc16 second = uri_content->Get(index + 2);
+ DCHECK_GT(std::numeric_limits<T>::max(), first);
+ DCHECK_GT(std::numeric_limits<T>::max(), second);
+
+ buffer->Add(first);
+ buffer->Add(second);
+ } else {
+ buffer->Add(decoded);
+ }
+}
+
+bool IntoTwoByte(int index, bool is_uri, int uri_length,
+ String::FlatContent* uri_content, List<uc16>* buffer) {
+ for (int k = index; k < uri_length; k++) {
+ uc16 code = uri_content->Get(k);
+ if (code == '%') {
+ int two_digits;
+ if (k + 2 >= uri_length ||
+ (two_digits = TwoDigitHex(uri_content->Get(k + 1),
+ uri_content->Get(k + 2))) < 0) {
+ return false;
+ }
+ k += 2;
+ uc16 decoded = static_cast<uc16>(two_digits);
+ if (decoded > unibrow::Utf8::kMaxOneByteChar) {
+ uint8_t octets[unibrow::Utf8::kMaxEncodedSize];
+ octets[0] = decoded;
+
+ int number_of_continuation_bytes = 0;
+ while ((decoded << ++number_of_continuation_bytes) & 0x80) {
+ if (number_of_continuation_bytes > 3 || k + 3 >= uri_length) {
+ return false;
+ }
+ if (uri_content->Get(++k) != '%' ||
+ (two_digits = TwoDigitHex(uri_content->Get(k + 1),
+ uri_content->Get(k + 2))) < 0) {
+ return false;
+ }
+ k += 2;
+ uc16 continuation_byte = static_cast<uc16>(two_digits);
+ octets[number_of_continuation_bytes] = continuation_byte;
+ }
+
+ if (!DecodeOctets(octets, number_of_continuation_bytes, buffer)) {
+ return false;
+ }
+ } else {
+ AddToBuffer(decoded, uri_content, k - 2, is_uri, buffer);
+ }
+ } else {
+ buffer->Add(code);
+ }
+ }
+ return true;
+}
+
+bool IntoOneAndTwoByte(Handle<String> uri, bool is_uri,
+ List<uint8_t>* one_byte_buffer,
+ List<uc16>* two_byte_buffer) {
+ DisallowHeapAllocation no_gc;
+ String::FlatContent uri_content = uri->GetFlatContent();
+
+ int uri_length = uri->length();
+ for (int k = 0; k < uri_length; k++) {
+ uc16 code = uri_content.Get(k);
+ if (code == '%') {
+ int two_digits;
+ if (k + 2 >= uri_length ||
+ (two_digits = TwoDigitHex(uri_content.Get(k + 1),
+ uri_content.Get(k + 2))) < 0) {
+ return false;
+ }
+
+ uc16 decoded = static_cast<uc16>(two_digits);
+ if (decoded > unibrow::Utf8::kMaxOneByteChar) {
+ return IntoTwoByte(k, is_uri, uri_length, &uri_content,
+ two_byte_buffer);
+ }
+
+ AddToBuffer(decoded, &uri_content, k, is_uri, one_byte_buffer);
+ k += 2;
+ } else {
+ if (code > unibrow::Utf8::kMaxOneByteChar) {
+ return IntoTwoByte(k, is_uri, uri_length, &uri_content,
+ two_byte_buffer);
+ }
+ one_byte_buffer->Add(code);
+ }
+ }
+ return true;
+}
+
+} // anonymous namespace
+
+MaybeHandle<String> Uri::Decode(Isolate* isolate, Handle<String> uri,
+ bool is_uri) {
+ uri = String::Flatten(uri);
+ List<uint8_t> one_byte_buffer;
+ List<uc16> two_byte_buffer;
+
+ if (!IntoOneAndTwoByte(uri, is_uri, &one_byte_buffer, &two_byte_buffer)) {
+ THROW_NEW_ERROR(isolate, NewURIError(), String);
+ }
+
+ if (two_byte_buffer.is_empty()) {
+ return isolate->factory()->NewStringFromOneByte(
+ one_byte_buffer.ToConstVector());
+ }
+
+ Handle<SeqTwoByteString> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result, isolate->factory()->NewRawTwoByteString(
+ one_byte_buffer.length() + two_byte_buffer.length()),
+ String);
+
+ CopyChars(result->GetChars(), one_byte_buffer.ToConstVector().start(),
+ one_byte_buffer.length());
+ CopyChars(result->GetChars() + one_byte_buffer.length(),
+ two_byte_buffer.ToConstVector().start(), two_byte_buffer.length());
+
+ return result;
+}
+
+namespace { // anonymous namespace for EncodeURI helper functions
+bool IsUnescapePredicateInUriComponent(uc16 c) {
+ if (IsAlphaNumeric(c)) {
+ return true;
+ }
+
+ switch (c) {
+ case '!':
+ case '\'':
+ case '(':
+ case ')':
+ case '*':
+ case '-':
+ case '.':
+ case '_':
+ case '~':
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool IsUriSeparator(uc16 c) {
+ switch (c) {
+ case '#':
+ case ':':
+ case ';':
+ case '/':
+ case '?':
+ case '$':
+ case '&':
+ case '+':
+ case ',':
+ case '@':
+ case '=':
+ return true;
+ default:
+ return false;
+ }
+}
+
+void AddEncodedOctetToBuffer(uint8_t octet, List<uint8_t>* buffer) {
+ buffer->Add('%');
+ buffer->Add(HexCharOfValue(octet >> 4));
+ buffer->Add(HexCharOfValue(octet & 0x0F));
+}
+
+void EncodeSingle(uc16 c, List<uint8_t>* buffer) {
+ char s[4] = {};
+ int number_of_bytes;
+ number_of_bytes =
+ unibrow::Utf8::Encode(s, c, unibrow::Utf16::kNoPreviousCharacter, false);
+ for (int k = 0; k < number_of_bytes; k++) {
+ AddEncodedOctetToBuffer(s[k], buffer);
+ }
+}
+
+void EncodePair(uc16 cc1, uc16 cc2, List<uint8_t>* buffer) {
+ char s[4] = {};
+ int number_of_bytes =
+ unibrow::Utf8::Encode(s, unibrow::Utf16::CombineSurrogatePair(cc1, cc2),
+ unibrow::Utf16::kNoPreviousCharacter, false);
+ for (int k = 0; k < number_of_bytes; k++) {
+ AddEncodedOctetToBuffer(s[k], buffer);
+ }
+}
+
+} // anonymous namespace
+
+MaybeHandle<String> Uri::Encode(Isolate* isolate, Handle<String> uri,
+ bool is_uri) {
+ uri = String::Flatten(uri);
+ int uri_length = uri->length();
+ List<uint8_t> buffer(uri_length);
+
+ {
+ DisallowHeapAllocation no_gc;
+ String::FlatContent uri_content = uri->GetFlatContent();
+
+ for (int k = 0; k < uri_length; k++) {
+ uc16 cc1 = uri_content.Get(k);
+ if (unibrow::Utf16::IsLeadSurrogate(cc1)) {
+ k++;
+ if (k < uri_length) {
+ uc16 cc2 = uri->Get(k);
+ if (unibrow::Utf16::IsTrailSurrogate(cc2)) {
+ EncodePair(cc1, cc2, &buffer);
+ continue;
+ }
+ }
+ } else if (!unibrow::Utf16::IsTrailSurrogate(cc1)) {
+ if (IsUnescapePredicateInUriComponent(cc1) ||
+ (is_uri && IsUriSeparator(cc1))) {
+ buffer.Add(cc1);
+ } else {
+ EncodeSingle(cc1, &buffer);
+ }
+ continue;
+ }
+
+ AllowHeapAllocation allocate_error_and_return;
+ THROW_NEW_ERROR(isolate, NewURIError(), String);
+ }
+ }
+
+ return isolate->factory()->NewStringFromOneByte(buffer.ToConstVector());
+}
+
+namespace { // Anonymous namespace for Escape and Unescape
+
+template <typename Char>
+int UnescapeChar(Vector<const Char> vector, int i, int length, int* step) {
+ uint16_t character = vector[i];
+ int32_t hi = 0;
+ int32_t lo = 0;
+ if (character == '%' && i <= length - 6 && vector[i + 1] == 'u' &&
+ (hi = TwoDigitHex(vector[i + 2], vector[i + 3])) > -1 &&
+ (lo = TwoDigitHex(vector[i + 4], vector[i + 5])) > -1) {
+ *step = 6;
+ return (hi << 8) + lo;
+ } else if (character == '%' && i <= length - 3 &&
+ (lo = TwoDigitHex(vector[i + 1], vector[i + 2])) > -1) {
+ *step = 3;
+ return lo;
+ } else {
+ *step = 1;
+ return character;
+ }
+}
+
+template <typename Char>
+MaybeHandle<String> UnescapeSlow(Isolate* isolate, Handle<String> string,
+ int start_index) {
+ bool one_byte = true;
+ int length = string->length();
+
+ int unescaped_length = 0;
+ {
+ DisallowHeapAllocation no_allocation;
+ Vector<const Char> vector = string->GetCharVector<Char>();
+ for (int i = start_index; i < length; unescaped_length++) {
+ int step;
+ if (UnescapeChar(vector, i, length, &step) >
+ String::kMaxOneByteCharCode) {
+ one_byte = false;
+ }
+ i += step;
+ }
+ }
+
+ DCHECK(start_index < length);
+ Handle<String> first_part =
+ isolate->factory()->NewProperSubString(string, 0, start_index);
+
+ int dest_position = 0;
+ Handle<String> second_part;
+ DCHECK(unescaped_length <= String::kMaxLength);
+ if (one_byte) {
+ Handle<SeqOneByteString> dest = isolate->factory()
+ ->NewRawOneByteString(unescaped_length)
+ .ToHandleChecked();
+ DisallowHeapAllocation no_allocation;
+ Vector<const Char> vector = string->GetCharVector<Char>();
+ for (int i = start_index; i < length; dest_position++) {
+ int step;
+ dest->SeqOneByteStringSet(dest_position,
+ UnescapeChar(vector, i, length, &step));
+ i += step;
+ }
+ second_part = dest;
+ } else {
+ Handle<SeqTwoByteString> dest = isolate->factory()
+ ->NewRawTwoByteString(unescaped_length)
+ .ToHandleChecked();
+ DisallowHeapAllocation no_allocation;
+ Vector<const Char> vector = string->GetCharVector<Char>();
+ for (int i = start_index; i < length; dest_position++) {
+ int step;
+ dest->SeqTwoByteStringSet(dest_position,
+ UnescapeChar(vector, i, length, &step));
+ i += step;
+ }
+ second_part = dest;
+ }
+ return isolate->factory()->NewConsString(first_part, second_part);
+}
+
+bool IsNotEscaped(uint16_t c) {
+ if (IsAlphaNumeric(c)) {
+ return true;
+ }
+ // @*_+-./
+ switch (c) {
+ case '@':
+ case '*':
+ case '_':
+ case '+':
+ case '-':
+ case '.':
+ case '/':
+ return true;
+ default:
+ return false;
+ }
+}
+
+template <typename Char>
+static MaybeHandle<String> UnescapePrivate(Isolate* isolate,
+ Handle<String> source) {
+ int index;
+ {
+ DisallowHeapAllocation no_allocation;
+ StringSearch<uint8_t, Char> search(isolate, STATIC_CHAR_VECTOR("%"));
+ index = search.Search(source->GetCharVector<Char>(), 0);
+ if (index < 0) return source;
+ }
+ return UnescapeSlow<Char>(isolate, source, index);
+}
+
+template <typename Char>
+static MaybeHandle<String> EscapePrivate(Isolate* isolate,
+ Handle<String> string) {
+ DCHECK(string->IsFlat());
+ int escaped_length = 0;
+ int length = string->length();
+
+ {
+ DisallowHeapAllocation no_allocation;
+ Vector<const Char> vector = string->GetCharVector<Char>();
+ for (int i = 0; i < length; i++) {
+ uint16_t c = vector[i];
+ if (c >= 256) {
+ escaped_length += 6;
+ } else if (IsNotEscaped(c)) {
+ escaped_length++;
+ } else {
+ escaped_length += 3;
+ }
+
+ // We don't allow strings that are longer than a maximal length.
+ DCHECK(String::kMaxLength < 0x7fffffff - 6); // Cannot overflow.
+ if (escaped_length > String::kMaxLength) break; // Provoke exception.
+ }
+ }
+
+ // No length change implies no change. Return original string if no change.
+ if (escaped_length == length) return string;
+
+ Handle<SeqOneByteString> dest;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, dest, isolate->factory()->NewRawOneByteString(escaped_length),
+ String);
+ int dest_position = 0;
+
+ {
+ DisallowHeapAllocation no_allocation;
+ Vector<const Char> vector = string->GetCharVector<Char>();
+ for (int i = 0; i < length; i++) {
+ uint16_t c = vector[i];
+ if (c >= 256) {
+ dest->SeqOneByteStringSet(dest_position, '%');
+ dest->SeqOneByteStringSet(dest_position + 1, 'u');
+ dest->SeqOneByteStringSet(dest_position + 2, HexCharOfValue(c >> 12));
+ dest->SeqOneByteStringSet(dest_position + 3,
+ HexCharOfValue((c >> 8) & 0xf));
+ dest->SeqOneByteStringSet(dest_position + 4,
+ HexCharOfValue((c >> 4) & 0xf));
+ dest->SeqOneByteStringSet(dest_position + 5, HexCharOfValue(c & 0xf));
+ dest_position += 6;
+ } else if (IsNotEscaped(c)) {
+ dest->SeqOneByteStringSet(dest_position, c);
+ dest_position++;
+ } else {
+ dest->SeqOneByteStringSet(dest_position, '%');
+ dest->SeqOneByteStringSet(dest_position + 1, HexCharOfValue(c >> 4));
+ dest->SeqOneByteStringSet(dest_position + 2, HexCharOfValue(c & 0xf));
+ dest_position += 3;
+ }
+ }
+ }
+
+ return dest;
+}
+
+} // Anonymous namespace
+
+MaybeHandle<String> Uri::Escape(Isolate* isolate, Handle<String> string) {
+ Handle<String> result;
+ string = String::Flatten(string);
+ return string->IsOneByteRepresentationUnderneath()
+ ? EscapePrivate<uint8_t>(isolate, string)
+ : EscapePrivate<uc16>(isolate, string);
+}
+
+MaybeHandle<String> Uri::Unescape(Isolate* isolate, Handle<String> string) {
+ Handle<String> result;
+ string = String::Flatten(string);
+ return string->IsOneByteRepresentationUnderneath()
+ ? UnescapePrivate<uint8_t>(isolate, string)
+ : UnescapePrivate<uc16>(isolate, string);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/uri.h b/deps/v8/src/uri.h
new file mode 100644
index 0000000000..dfa057fd09
--- /dev/null
+++ b/deps/v8/src/uri.h
@@ -0,0 +1,54 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_URI_H_
+#define V8_URI_H_
+
+#include "src/allocation.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+class Uri : public AllStatic {
+ public:
+ // ES6 section 18.2.6.2 decodeURI (encodedURI)
+ static MaybeHandle<String> DecodeUri(Isolate* isolate, Handle<String> uri) {
+ return Decode(isolate, uri, true);
+ }
+
+ // ES6 section 18.2.6.3 decodeURIComponent (encodedURIComponent)
+ static MaybeHandle<String> DecodeUriComponent(Isolate* isolate,
+ Handle<String> component) {
+ return Decode(isolate, component, false);
+ }
+
+ // ES6 section 18.2.6.4 encodeURI (uri)
+ static MaybeHandle<String> EncodeUri(Isolate* isolate, Handle<String> uri) {
+ return Encode(isolate, uri, true);
+ }
+
+ // ES6 section 18.2.6.5 encodeURIComponenet (uriComponent)
+ static MaybeHandle<String> EncodeUriComponent(Isolate* isolate,
+ Handle<String> component) {
+ return Encode(isolate, component, false);
+ }
+
+ // ES6 section B.2.1.1 escape (string)
+ static MaybeHandle<String> Escape(Isolate* isolate, Handle<String> string);
+
+ // ES6 section B.2.1.2 unescape (string)
+ static MaybeHandle<String> Unescape(Isolate* isolate, Handle<String> string);
+
+ private:
+ static MaybeHandle<String> Decode(Isolate* isolate, Handle<String> uri,
+ bool is_uri);
+ static MaybeHandle<String> Encode(Isolate* isolate, Handle<String> uri,
+ bool is_uri);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_URI_H_
diff --git a/deps/v8/src/utils.cc b/deps/v8/src/utils.cc
index c46028f059..16b5b7c61f 100644
--- a/deps/v8/src/utils.cc
+++ b/deps/v8/src/utils.cc
@@ -430,11 +430,8 @@ void init_memcopy_functions(Isolate* isolate) {
bool DoubleToBoolean(double d) {
// NaN, +0, and -0 should return the false object
-#if V8_TARGET_LITTLE_ENDIAN
- union IeeeDoubleLittleEndianArchType u;
-#else
- union IeeeDoubleBigEndianArchType u;
-#endif
+ IeeeDoubleArchType u;
+
u.d = d;
if (u.bits.exp == 2047) {
// Detect NaN for IEEE double precision floating point.
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index 44865edede..8eca39207d 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -13,6 +13,7 @@
#include "include/v8.h"
#include "src/allocation.h"
#include "src/base/bits.h"
+#include "src/base/compiler-specific.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
@@ -36,6 +37,11 @@ inline int HexValue(uc32 c) {
return -1;
}
+inline char HexCharOfValue(int value) {
+ DCHECK(0 <= value && value <= 16);
+ if (value < 10) return value + '0';
+ return value - 10 + 'A';
+}
inline int BoolToInt(bool b) { return b ? 1 : 0; }
@@ -194,6 +200,23 @@ T Min(T a, T b) {
return a < b ? a : b;
}
+// Returns the maximum of the two parameters according to JavaScript semantics.
+template <typename T>
+T JSMax(T x, T y) {
+ if (std::isnan(x)) return x;
+ if (std::isnan(y)) return y;
+ if (std::signbit(x) < std::signbit(y)) return x;
+ return x > y ? x : y;
+}
+
+// Returns the maximum of the two parameters according to JavaScript semantics.
+template <typename T>
+T JSMin(T x, T y) {
+ if (std::isnan(x)) return x;
+ if (std::isnan(y)) return y;
+ if (std::signbit(x) < std::signbit(y)) return y;
+ return x > y ? y : x;
+}
// Returns the absolute value of its argument.
template <typename T>
@@ -299,8 +322,9 @@ class BitFieldBase {
static T decode(U value) {
return static_cast<T>((value & kMask) >> shift);
}
-};
+ STATIC_ASSERT((kNext - 1) / 8 < sizeof(U));
+};
template <class T, int shift, int size>
class BitField8 : public BitFieldBase<T, shift, size, uint8_t> {};
@@ -932,42 +956,21 @@ class TokenDispenserForFinally {
// ----------------------------------------------------------------------------
// I/O support.
-#if __GNUC__ >= 4
-// On gcc we can ask the compiler to check the types of %d-style format
-// specifiers and their associated arguments. TODO(erikcorry) fix this
-// so it works on MacOSX.
-#if defined(__MACH__) && defined(__APPLE__)
-#define PRINTF_CHECKING
-#define FPRINTF_CHECKING
-#define PRINTF_METHOD_CHECKING
-#define FPRINTF_METHOD_CHECKING
-#else // MacOsX.
-#define PRINTF_CHECKING __attribute__ ((format (printf, 1, 2)))
-#define FPRINTF_CHECKING __attribute__ ((format (printf, 2, 3)))
-#define PRINTF_METHOD_CHECKING __attribute__ ((format (printf, 2, 3)))
-#define FPRINTF_METHOD_CHECKING __attribute__ ((format (printf, 3, 4)))
-#endif
-#else
-#define PRINTF_CHECKING
-#define FPRINTF_CHECKING
-#define PRINTF_METHOD_CHECKING
-#define FPRINTF_METHOD_CHECKING
-#endif
-
// Our version of printf().
-void PRINTF_CHECKING PrintF(const char* format, ...);
-void FPRINTF_CHECKING PrintF(FILE* out, const char* format, ...);
+void PRINTF_FORMAT(1, 2) PrintF(const char* format, ...);
+void PRINTF_FORMAT(2, 3) PrintF(FILE* out, const char* format, ...);
// Prepends the current process ID to the output.
-void PRINTF_CHECKING PrintPID(const char* format, ...);
+void PRINTF_FORMAT(1, 2) PrintPID(const char* format, ...);
// Prepends the current process ID and given isolate pointer to the output.
-void PrintIsolate(void* isolate, const char* format, ...);
+void PRINTF_FORMAT(2, 3) PrintIsolate(void* isolate, const char* format, ...);
// Safe formatting print. Ensures that str is always null-terminated.
// Returns the number of chars written, or -1 if output was truncated.
-int FPRINTF_CHECKING SNPrintF(Vector<char> str, const char* format, ...);
-int VSNPrintF(Vector<char> str, const char* format, va_list args);
+int PRINTF_FORMAT(2, 3) SNPrintF(Vector<char> str, const char* format, ...);
+int PRINTF_FORMAT(2, 0)
+ VSNPrintF(Vector<char> str, const char* format, va_list args);
void StrNCpy(Vector<char> dest, const char* src, size_t n);
@@ -1114,13 +1117,6 @@ inline void MemsetPointer(T** dest, U* value, int counter) {
#define STOS "stosq"
#endif
#endif
-#if defined(__native_client__)
- // This STOS sequence does not validate for x86_64 Native Client.
- // Here we #undef STOS to force use of the slower C version.
- // TODO(bradchen): Profile V8 and implement a faster REP STOS
- // here if the profile indicates it matters.
-#undef STOS
-#endif
#if defined(MEMORY_SANITIZER)
// MemorySanitizer does not understand inline assembly.
@@ -1470,10 +1466,11 @@ class StringBuilder : public SimpleStringBuilder {
StringBuilder(char* buffer, int size) : SimpleStringBuilder(buffer, size) { }
// Add formatted contents to the builder just like printf().
- void AddFormatted(const char* format, ...);
+ void PRINTF_FORMAT(2, 3) AddFormatted(const char* format, ...);
// Add formatted contents like printf based on a va_list.
- void AddFormattedList(const char* format, va_list list);
+ void PRINTF_FORMAT(2, 0) AddFormattedList(const char* format, va_list list);
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder);
};
@@ -1520,22 +1517,26 @@ inline uintptr_t GetCurrentStackPosition() {
template <typename V>
static inline V ReadUnalignedValue(const void* p) {
-#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64)
+#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM)
return *reinterpret_cast<const V*>(p);
-#else
+#else // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM
V r;
memmove(&r, p, sizeof(V));
return r;
-#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM
}
template <typename V>
static inline void WriteUnalignedValue(void* p, V value) {
-#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64)
+#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM)
*(reinterpret_cast<V*>(p)) = value;
-#else // V8_TARGET_ARCH_MIPS
+#else // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM
memmove(p, &value, sizeof(V));
-#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM
+}
+
+static inline double ReadFloatValue(const void* p) {
+ return ReadUnalignedValue<float>(p);
}
static inline double ReadDoubleValue(const void* p) {
@@ -1562,6 +1563,33 @@ static inline void WriteUnalignedUInt32(void* p, uint32_t value) {
WriteUnalignedValue(p, value);
}
+template <typename V>
+static inline V ReadLittleEndianValue(const void* p) {
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ return ReadUnalignedValue<V>(p);
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ V ret = 0;
+ const byte* src = reinterpret_cast<const byte*>(p);
+ byte* dst = reinterpret_cast<byte*>(&ret);
+ for (size_t i = 0; i < sizeof(V); i++) {
+ dst[i] = src[sizeof(V) - i - 1];
+ }
+ return ret;
+#endif // V8_TARGET_LITTLE_ENDIAN
+}
+
+template <typename V>
+static inline void WriteLittleEndianValue(void* p, V value) {
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ WriteUnalignedValue<V>(p, value);
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ byte* src = reinterpret_cast<byte*>(&value);
+ byte* dst = reinterpret_cast<byte*>(p);
+ for (size_t i = 0; i < sizeof(V); i++) {
+ dst[i] = src[sizeof(V) - i - 1];
+ }
+#endif // V8_TARGET_LITTLE_ENDIAN
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 154cf6201d..08796f3f0e 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -14,9 +14,9 @@
#include "src/elements.h"
#include "src/frames.h"
#include "src/isolate.h"
+#include "src/libsampler/sampler.h"
#include "src/objects.h"
#include "src/profiler/heap-profiler.h"
-#include "src/profiler/sampler.h"
#include "src/runtime-profiler.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
@@ -45,10 +45,9 @@ void V8::TearDown() {
Bootstrapper::TearDownExtensions();
ElementsAccessor::TearDown();
LOperand::TearDownCaches();
- ExternalReference::TearDownMathExpData();
RegisteredExtension::UnregisterAll();
Isolate::GlobalTearDown();
- Sampler::TearDown();
+ sampler::Sampler::TearDown();
FlagList::ResetAllFlags(); // Frees memory held by string arguments.
}
@@ -76,7 +75,7 @@ void V8::InitializeOncePerProcessImpl() {
Isolate::InitializeOncePerProcess();
- Sampler::SetUp();
+ sampler::Sampler::SetUp();
CpuFeatures::Probe(false);
ElementsAccessor::InitializeOncePerProcess();
LOperand::SetUpCaches();
diff --git a/deps/v8/src/v8.gyp b/deps/v8/src/v8.gyp
new file mode 100644
index 0000000000..1adb2fe8a1
--- /dev/null
+++ b/deps/v8/src/v8.gyp
@@ -0,0 +1,2419 @@
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+{
+ 'variables': {
+ 'v8_code': 1,
+ 'v8_random_seed%': 314159265,
+ 'v8_vector_stores%': 0,
+ 'embed_script%': "",
+ 'warmup_script%': "",
+ 'v8_extra_library_files%': [],
+ 'v8_experimental_extra_library_files%': [],
+ 'mksnapshot_exec': '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)',
+ 'mkpeephole_exec': '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mkpeephole<(EXECUTABLE_SUFFIX)',
+ },
+ 'includes': ['../gypfiles/toolchain.gypi', '../gypfiles/features.gypi'],
+ 'targets': [
+ {
+ 'target_name': 'v8',
+ 'dependencies_traverse': 1,
+ 'dependencies': ['v8_maybe_snapshot'],
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ['component=="shared_library"', {
+ 'type': '<(component)',
+ 'sources': [
+ # Note: on non-Windows we still build this file so that gyp
+ # has some sources to link into the component.
+ 'v8dll-main.cc',
+ ],
+ 'include_dirs': [
+ '..',
+ ],
+ 'defines': [
+ 'V8_SHARED',
+ 'BUILDING_V8_SHARED',
+ ],
+ 'direct_dependent_settings': {
+ 'defines': [
+ 'V8_SHARED',
+ 'USING_V8_SHARED',
+ ],
+ },
+ 'target_conditions': [
+ ['OS=="android" and _toolset=="target"', {
+ 'libraries': [
+ '-llog',
+ ],
+ 'include_dirs': [
+ 'src/common/android/include',
+ ],
+ }],
+ ],
+ 'conditions': [
+ ['OS=="mac"', {
+ 'xcode_settings': {
+ 'OTHER_LDFLAGS': ['-dynamiclib', '-all_load']
+ },
+ }],
+ ['soname_version!=""', {
+ 'product_extension': 'so.<(soname_version)',
+ }],
+ ],
+ },
+ {
+ 'type': 'none',
+ }],
+ ],
+ 'direct_dependent_settings': {
+ 'include_dirs': [
+ '../include',
+ ],
+ },
+ },
+ {
+ # This rule delegates to either v8_snapshot, v8_nosnapshot, or
+ # v8_external_snapshot, depending on the current variables.
+ # The intention is to make the 'calling' rules a bit simpler.
+ 'target_name': 'v8_maybe_snapshot',
+ 'type': 'none',
+ 'conditions': [
+ ['v8_use_snapshot!="true"', {
+ # The dependency on v8_base should come from a transitive
+ # dependency however the Android toolchain requires libv8_base.a
+ # to appear before libv8_snapshot.a so it's listed explicitly.
+ 'dependencies': ['v8_base', 'v8_nosnapshot'],
+ }],
+ ['v8_use_snapshot=="true" and v8_use_external_startup_data==0', {
+ # The dependency on v8_base should come from a transitive
+ # dependency however the Android toolchain requires libv8_base.a
+ # to appear before libv8_snapshot.a so it's listed explicitly.
+ 'dependencies': ['v8_base', 'v8_snapshot'],
+ }],
+ ['v8_use_snapshot=="true" and v8_use_external_startup_data==1 and want_separate_host_toolset==0', {
+ 'dependencies': ['v8_base', 'v8_external_snapshot'],
+ 'inputs': [ '<(PRODUCT_DIR)/snapshot_blob.bin', ],
+ }],
+ ['v8_use_snapshot=="true" and v8_use_external_startup_data==1 and want_separate_host_toolset==1', {
+ 'dependencies': ['v8_base', 'v8_external_snapshot'],
+ 'target_conditions': [
+ ['_toolset=="host"', {
+ 'inputs': [
+ '<(PRODUCT_DIR)/snapshot_blob_host.bin',
+ ],
+ }, {
+ 'inputs': [
+ '<(PRODUCT_DIR)/snapshot_blob.bin',
+ ],
+ }],
+ ],
+ }],
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ]
+ },
+ {
+ 'target_name': 'v8_snapshot',
+ 'type': 'static_library',
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ 'dependencies': [
+ 'mksnapshot#host',
+ 'js2c#host',
+ ],
+ }, {
+ 'toolsets': ['target'],
+ 'dependencies': [
+ 'mksnapshot',
+ 'js2c',
+ ],
+ }],
+ ['component=="shared_library"', {
+ 'defines': [
+ 'V8_SHARED',
+ 'BUILDING_V8_SHARED',
+ ],
+ 'direct_dependent_settings': {
+ 'defines': [
+ 'V8_SHARED',
+ 'USING_V8_SHARED',
+ ],
+ },
+ }],
+ ],
+ 'dependencies': [
+ 'v8_base',
+ ],
+ 'include_dirs+': [
+ '..',
+ '<(DEPTH)',
+ ],
+ 'sources': [
+ '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
+ '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
+ '<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
+ '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
+ '<(INTERMEDIATE_DIR)/snapshot.cc',
+ ],
+ 'actions': [
+ {
+ 'action_name': 'run_mksnapshot',
+ 'inputs': [
+ '<(mksnapshot_exec)',
+ ],
+ 'conditions': [
+ ['embed_script!=""', {
+ 'inputs': [
+ '<(embed_script)',
+ ],
+ }],
+ ['warmup_script!=""', {
+ 'inputs': [
+ '<(warmup_script)',
+ ],
+ }],
+ ],
+ 'outputs': [
+ '<(INTERMEDIATE_DIR)/snapshot.cc',
+ ],
+ 'variables': {
+ 'mksnapshot_flags': [],
+ 'conditions': [
+ ['v8_random_seed!=0', {
+ 'mksnapshot_flags': ['--random-seed', '<(v8_random_seed)'],
+ }],
+ ['v8_vector_stores!=0', {
+ 'mksnapshot_flags': ['--vector-stores'],
+ }],
+ ],
+ },
+ 'action': [
+ '<(mksnapshot_exec)',
+ '<@(mksnapshot_flags)',
+ '--startup_src', '<@(INTERMEDIATE_DIR)/snapshot.cc',
+ '<(embed_script)',
+ '<(warmup_script)',
+ ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'v8_nosnapshot',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'v8_base',
+ ],
+ 'include_dirs+': [
+ '..',
+ '<(DEPTH)',
+ ],
+ 'sources': [
+ '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
+ '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
+ '<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
+ '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
+ 'snapshot/snapshot-empty.cc',
+ ],
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ 'dependencies': ['js2c#host'],
+ }, {
+ 'toolsets': ['target'],
+ 'dependencies': ['js2c'],
+ }],
+ ['component=="shared_library"', {
+ 'defines': [
+ 'BUILDING_V8_SHARED',
+ 'V8_SHARED',
+ ],
+ }],
+ ]
+ },
+ {
+ 'target_name': 'v8_external_snapshot',
+ 'type': 'static_library',
+ 'conditions': [
+ [ 'v8_use_external_startup_data==1', {
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ 'dependencies': [
+ 'mksnapshot#host',
+ 'js2c#host',
+ 'natives_blob',
+ ]}, {
+ 'toolsets': ['target'],
+ 'dependencies': [
+ 'mksnapshot',
+ 'js2c',
+ 'natives_blob',
+ ],
+ }],
+ ['component=="shared_library"', {
+ 'defines': [
+ 'V8_SHARED',
+ 'BUILDING_V8_SHARED',
+ ],
+ 'direct_dependent_settings': {
+ 'defines': [
+ 'V8_SHARED',
+ 'USING_V8_SHARED',
+ ],
+ },
+ }],
+ ],
+ 'dependencies': [
+ 'v8_base',
+ ],
+ 'include_dirs+': [
+ '..',
+ '<(DEPTH)',
+ ],
+ 'sources': [
+ 'snapshot/natives-external.cc',
+ 'snapshot/snapshot-external.cc',
+ ],
+ 'actions': [
+ {
+ 'action_name': 'run_mksnapshot (external)',
+ 'inputs': [
+ '<(mksnapshot_exec)',
+ ],
+ 'variables': {
+ 'mksnapshot_flags': [],
+ 'conditions': [
+ ['v8_random_seed!=0', {
+ 'mksnapshot_flags': ['--random-seed', '<(v8_random_seed)'],
+ }],
+ ['v8_vector_stores!=0', {
+ 'mksnapshot_flags': ['--vector-stores'],
+ }],
+ ],
+ },
+ 'conditions': [
+ ['embed_script!=""', {
+ 'inputs': [
+ '<(embed_script)',
+ ],
+ }],
+ ['warmup_script!=""', {
+ 'inputs': [
+ '<(warmup_script)',
+ ],
+ }],
+ ['want_separate_host_toolset==1', {
+ 'target_conditions': [
+ ['_toolset=="host"', {
+ 'outputs': [
+ '<(PRODUCT_DIR)/snapshot_blob_host.bin',
+ ],
+ 'action': [
+ '<(mksnapshot_exec)',
+ '<@(mksnapshot_flags)',
+ '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob_host.bin',
+ '<(embed_script)',
+ '<(warmup_script)',
+ ],
+ }, {
+ 'outputs': [
+ '<(PRODUCT_DIR)/snapshot_blob.bin',
+ ],
+ 'action': [
+ '<(mksnapshot_exec)',
+ '<@(mksnapshot_flags)',
+ '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob.bin',
+ '<(embed_script)',
+ '<(warmup_script)',
+ ],
+ }],
+ ],
+ }, {
+ 'outputs': [
+ '<(PRODUCT_DIR)/snapshot_blob.bin',
+ ],
+ 'action': [
+ '<(mksnapshot_exec)',
+ '<@(mksnapshot_flags)',
+ '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob.bin',
+ '<(embed_script)',
+ '<(warmup_script)',
+ ],
+ }],
+ ],
+ },
+ ],
+ }],
+ ],
+ },
+ {
+ 'target_name': 'v8_base',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'v8_libbase',
+ 'v8_libsampler',
+ ],
+ 'objs': ['foo.o'],
+ 'variables': {
+ 'optimize': 'max',
+ },
+ 'include_dirs+': [
+ '..',
+ '<(DEPTH)',
+ '<(SHARED_INTERMEDIATE_DIR)'
+ ],
+ 'actions':[{
+ 'action_name': 'run mkpeephole',
+ 'inputs': ['<(mkpeephole_exec)'],
+ 'outputs': ['<(INTERMEDIATE_DIR)/bytecode-peephole-table.cc'],
+ 'action': ['<(mkpeephole_exec)', '<(INTERMEDIATE_DIR)/bytecode-peephole-table.cc' ],
+ 'process_outputs_as_sources': 1,
+ }],
+ 'sources': [ ### gcmole(all) ###
+ '../include/v8-debug.h',
+ '../include/v8-experimental.h',
+ '../include/v8-platform.h',
+ '../include/v8-profiler.h',
+ '../include/v8-testing.h',
+ '../include/v8-util.h',
+ '../include/v8-version.h',
+ '../include/v8.h',
+ '../include/v8config.h',
+ 'accessors.cc',
+ 'accessors.h',
+ 'address-map.cc',
+ 'address-map.h',
+ 'allocation.cc',
+ 'allocation.h',
+ 'allocation-site-scopes.cc',
+ 'allocation-site-scopes.h',
+ 'api-experimental.cc',
+ 'api-experimental.h',
+ 'api.cc',
+ 'api.h',
+ 'api-arguments-inl.h',
+ 'api-arguments.cc',
+ 'api-arguments.h',
+ 'api-natives.cc',
+ 'api-natives.h',
+ 'arguments.cc',
+ 'arguments.h',
+ 'asmjs/asm-js.cc',
+ 'asmjs/asm-js.h',
+ 'asmjs/asm-typer.cc',
+ 'asmjs/asm-typer.h',
+ 'asmjs/asm-types.cc',
+ 'asmjs/asm-types.h',
+ 'asmjs/asm-wasm-builder.cc',
+ 'asmjs/asm-wasm-builder.h',
+ 'assembler.cc',
+ 'assembler.h',
+ 'assert-scope.h',
+ 'assert-scope.cc',
+ 'ast/ast-expression-rewriter.cc',
+ 'ast/ast-expression-rewriter.h',
+ 'ast/ast-literal-reindexer.cc',
+ 'ast/ast-literal-reindexer.h',
+ 'ast/ast-numbering.cc',
+ 'ast/ast-numbering.h',
+ 'ast/ast-traversal-visitor.h',
+ 'ast/ast-type-bounds.h',
+ 'ast/ast-value-factory.cc',
+ 'ast/ast-value-factory.h',
+ 'ast/ast.cc',
+ 'ast/ast.h',
+ 'ast/context-slot-cache.cc',
+ 'ast/context-slot-cache.h',
+ 'ast/modules.cc',
+ 'ast/modules.h',
+ 'ast/prettyprinter.cc',
+ 'ast/prettyprinter.h',
+ 'ast/scopeinfo.cc',
+ 'ast/scopeinfo.h',
+ 'ast/scopes.cc',
+ 'ast/scopes.h',
+ 'ast/variables.cc',
+ 'ast/variables.h',
+ 'background-parsing-task.cc',
+ 'background-parsing-task.h',
+ 'bailout-reason.cc',
+ 'bailout-reason.h',
+ 'basic-block-profiler.cc',
+ 'basic-block-profiler.h',
+ 'bignum-dtoa.cc',
+ 'bignum-dtoa.h',
+ 'bignum.cc',
+ 'bignum.h',
+ 'bit-vector.cc',
+ 'bit-vector.h',
+ 'bootstrapper.cc',
+ 'bootstrapper.h',
+ 'builtins/builtins-api.cc',
+ 'builtins/builtins-arraybuffer.cc',
+ 'builtins/builtins-array.cc',
+ 'builtins/builtins-boolean.cc',
+ 'builtins/builtins-call.cc',
+ 'builtins/builtins-callsite.cc',
+ 'builtins/builtins-conversion.cc',
+ 'builtins/builtins-dataview.cc',
+ 'builtins/builtins-date.cc',
+ 'builtins/builtins-debug.cc',
+ 'builtins/builtins-error.cc',
+ 'builtins/builtins-function.cc',
+ 'builtins/builtins-generator.cc',
+ 'builtins/builtins-global.cc',
+ 'builtins/builtins-handler.cc',
+ 'builtins/builtins-internal.cc',
+ 'builtins/builtins-interpreter.cc',
+ 'builtins/builtins-json.cc',
+ 'builtins/builtins-math.cc',
+ 'builtins/builtins-number.cc',
+ 'builtins/builtins-object.cc',
+ 'builtins/builtins-proxy.cc',
+ 'builtins/builtins-reflect.cc',
+ 'builtins/builtins-sharedarraybuffer.cc',
+ 'builtins/builtins-string.cc',
+ 'builtins/builtins-symbol.cc',
+ 'builtins/builtins-typedarray.cc',
+ 'builtins/builtins-utils.h',
+ 'builtins/builtins.cc',
+ 'builtins/builtins.h',
+ 'cached-powers.cc',
+ 'cached-powers.h',
+ 'cancelable-task.cc',
+ 'cancelable-task.h',
+ 'char-predicates.cc',
+ 'char-predicates-inl.h',
+ 'char-predicates.h',
+ 'checks.h',
+ 'code-events.h',
+ 'code-factory.cc',
+ 'code-factory.h',
+ 'code-stub-assembler.cc',
+ 'code-stub-assembler.h',
+ 'code-stubs.cc',
+ 'code-stubs.h',
+ 'code-stubs-hydrogen.cc',
+ 'codegen.cc',
+ 'codegen.h',
+ 'collector.h',
+ 'compilation-cache.cc',
+ 'compilation-cache.h',
+ 'compilation-dependencies.cc',
+ 'compilation-dependencies.h',
+ 'compilation-statistics.cc',
+ 'compilation-statistics.h',
+ 'compiler/access-builder.cc',
+ 'compiler/access-builder.h',
+ 'compiler/access-info.cc',
+ 'compiler/access-info.h',
+ 'compiler/all-nodes.cc',
+ 'compiler/all-nodes.h',
+ 'compiler/ast-graph-builder.cc',
+ 'compiler/ast-graph-builder.h',
+ 'compiler/ast-loop-assignment-analyzer.cc',
+ 'compiler/ast-loop-assignment-analyzer.h',
+ 'compiler/basic-block-instrumentor.cc',
+ 'compiler/basic-block-instrumentor.h',
+ 'compiler/branch-elimination.cc',
+ 'compiler/branch-elimination.h',
+ 'compiler/bytecode-branch-analysis.cc',
+ 'compiler/bytecode-branch-analysis.h',
+ 'compiler/bytecode-graph-builder.cc',
+ 'compiler/bytecode-graph-builder.h',
+ 'compiler/bytecode-loop-analysis.cc',
+ 'compiler/bytecode-loop-analysis.h',
+ 'compiler/c-linkage.cc',
+ 'compiler/checkpoint-elimination.cc',
+ 'compiler/checkpoint-elimination.h',
+ 'compiler/code-generator-impl.h',
+ 'compiler/code-generator.cc',
+ 'compiler/code-generator.h',
+ 'compiler/code-assembler.cc',
+ 'compiler/code-assembler.h',
+ 'compiler/common-node-cache.cc',
+ 'compiler/common-node-cache.h',
+ 'compiler/common-operator-reducer.cc',
+ 'compiler/common-operator-reducer.h',
+ 'compiler/common-operator.cc',
+ 'compiler/common-operator.h',
+ 'compiler/control-builders.cc',
+ 'compiler/control-builders.h',
+ 'compiler/control-equivalence.cc',
+ 'compiler/control-equivalence.h',
+ 'compiler/control-flow-optimizer.cc',
+ 'compiler/control-flow-optimizer.h',
+ 'compiler/dead-code-elimination.cc',
+ 'compiler/dead-code-elimination.h',
+ 'compiler/diamond.h',
+ 'compiler/effect-control-linearizer.cc',
+ 'compiler/effect-control-linearizer.h',
+ 'compiler/escape-analysis.cc',
+ 'compiler/escape-analysis.h',
+ "compiler/escape-analysis-reducer.cc",
+ "compiler/escape-analysis-reducer.h",
+ 'compiler/frame.cc',
+ 'compiler/frame.h',
+ 'compiler/frame-elider.cc',
+ 'compiler/frame-elider.h',
+ "compiler/frame-states.cc",
+ "compiler/frame-states.h",
+ 'compiler/gap-resolver.cc',
+ 'compiler/gap-resolver.h',
+ 'compiler/graph-reducer.cc',
+ 'compiler/graph-reducer.h',
+ 'compiler/graph-replay.cc',
+ 'compiler/graph-replay.h',
+ 'compiler/graph-trimmer.cc',
+ 'compiler/graph-trimmer.h',
+ 'compiler/graph-visualizer.cc',
+ 'compiler/graph-visualizer.h',
+ 'compiler/graph.cc',
+ 'compiler/graph.h',
+ 'compiler/instruction-codes.h',
+ 'compiler/instruction-selector-impl.h',
+ 'compiler/instruction-selector.cc',
+ 'compiler/instruction-selector.h',
+ 'compiler/instruction-scheduler.cc',
+ 'compiler/instruction-scheduler.h',
+ 'compiler/instruction.cc',
+ 'compiler/instruction.h',
+ 'compiler/int64-lowering.cc',
+ 'compiler/int64-lowering.h',
+ 'compiler/js-builtin-reducer.cc',
+ 'compiler/js-builtin-reducer.h',
+ 'compiler/js-call-reducer.cc',
+ 'compiler/js-call-reducer.h',
+ 'compiler/js-context-specialization.cc',
+ 'compiler/js-context-specialization.h',
+ 'compiler/js-create-lowering.cc',
+ 'compiler/js-create-lowering.h',
+ 'compiler/js-frame-specialization.cc',
+ 'compiler/js-frame-specialization.h',
+ 'compiler/js-generic-lowering.cc',
+ 'compiler/js-generic-lowering.h',
+ 'compiler/js-global-object-specialization.cc',
+ 'compiler/js-global-object-specialization.h',
+ 'compiler/js-graph.cc',
+ 'compiler/js-graph.h',
+ 'compiler/js-inlining.cc',
+ 'compiler/js-inlining.h',
+ 'compiler/js-inlining-heuristic.cc',
+ 'compiler/js-inlining-heuristic.h',
+ 'compiler/js-intrinsic-lowering.cc',
+ 'compiler/js-intrinsic-lowering.h',
+ 'compiler/js-native-context-specialization.cc',
+ 'compiler/js-native-context-specialization.h',
+ 'compiler/js-operator.cc',
+ 'compiler/js-operator.h',
+ 'compiler/js-typed-lowering.cc',
+ 'compiler/js-typed-lowering.h',
+ 'compiler/jump-threading.cc',
+ 'compiler/jump-threading.h',
+ 'compiler/linkage.cc',
+ 'compiler/linkage.h',
+ 'compiler/liveness-analyzer.cc',
+ 'compiler/liveness-analyzer.h',
+ 'compiler/live-range-separator.cc',
+ 'compiler/live-range-separator.h',
+ 'compiler/load-elimination.cc',
+ 'compiler/load-elimination.h',
+ 'compiler/loop-analysis.cc',
+ 'compiler/loop-analysis.h',
+ 'compiler/loop-peeling.cc',
+ 'compiler/loop-peeling.h',
+ 'compiler/loop-variable-optimizer.cc',
+ 'compiler/loop-variable-optimizer.h',
+ 'compiler/machine-operator-reducer.cc',
+ 'compiler/machine-operator-reducer.h',
+ 'compiler/machine-operator.cc',
+ 'compiler/machine-operator.h',
+ 'compiler/memory-optimizer.cc',
+ 'compiler/memory-optimizer.h',
+ 'compiler/move-optimizer.cc',
+ 'compiler/move-optimizer.h',
+ 'compiler/node-aux-data.h',
+ 'compiler/node-cache.cc',
+ 'compiler/node-cache.h',
+ 'compiler/node-marker.cc',
+ 'compiler/node-marker.h',
+ 'compiler/node-matchers.cc',
+ 'compiler/node-matchers.h',
+ 'compiler/node-properties.cc',
+ 'compiler/node-properties.h',
+ 'compiler/node.cc',
+ 'compiler/node.h',
+ 'compiler/opcodes.cc',
+ 'compiler/opcodes.h',
+ 'compiler/operation-typer.cc',
+ 'compiler/operation-typer.h',
+ 'compiler/operator-properties.cc',
+ 'compiler/operator-properties.h',
+ 'compiler/operator.cc',
+ 'compiler/operator.h',
+ 'compiler/osr.cc',
+ 'compiler/osr.h',
+ 'compiler/pipeline.cc',
+ 'compiler/pipeline.h',
+ 'compiler/pipeline-statistics.cc',
+ 'compiler/pipeline-statistics.h',
+ 'compiler/raw-machine-assembler.cc',
+ 'compiler/raw-machine-assembler.h',
+ 'compiler/redundancy-elimination.cc',
+ 'compiler/redundancy-elimination.h',
+ 'compiler/register-allocator.cc',
+ 'compiler/register-allocator.h',
+ 'compiler/register-allocator-verifier.cc',
+ 'compiler/register-allocator-verifier.h',
+ 'compiler/representation-change.cc',
+ 'compiler/representation-change.h',
+ 'compiler/schedule.cc',
+ 'compiler/schedule.h',
+ 'compiler/scheduler.cc',
+ 'compiler/scheduler.h',
+ 'compiler/select-lowering.cc',
+ 'compiler/select-lowering.h',
+ 'compiler/simplified-lowering.cc',
+ 'compiler/simplified-lowering.h',
+ 'compiler/simplified-operator-reducer.cc',
+ 'compiler/simplified-operator-reducer.h',
+ 'compiler/simplified-operator.cc',
+ 'compiler/simplified-operator.h',
+ 'compiler/source-position.cc',
+ 'compiler/source-position.h',
+ 'compiler/state-values-utils.cc',
+ 'compiler/state-values-utils.h',
+ 'compiler/store-store-elimination.cc',
+ 'compiler/store-store-elimination.h',
+ 'compiler/tail-call-optimization.cc',
+ 'compiler/tail-call-optimization.h',
+ 'compiler/type-hint-analyzer.cc',
+ 'compiler/type-hint-analyzer.h',
+ 'compiler/type-hints.cc',
+ 'compiler/type-hints.h',
+ 'compiler/typer.cc',
+ 'compiler/typer.h',
+ 'compiler/unwinding-info-writer.h',
+ 'compiler/value-numbering-reducer.cc',
+ 'compiler/value-numbering-reducer.h',
+ 'compiler/verifier.cc',
+ 'compiler/verifier.h',
+ 'compiler/wasm-compiler.cc',
+ 'compiler/wasm-compiler.h',
+ 'compiler/wasm-linkage.cc',
+ 'compiler/zone-pool.cc',
+ 'compiler/zone-pool.h',
+ 'compiler-dispatcher/compiler-dispatcher-job.cc',
+ 'compiler-dispatcher/compiler-dispatcher-job.h',
+ 'compiler-dispatcher/optimizing-compile-dispatcher.cc',
+ 'compiler-dispatcher/optimizing-compile-dispatcher.h',
+ 'compiler.cc',
+ 'compiler.h',
+ 'context-measure.cc',
+ 'context-measure.h',
+ 'contexts-inl.h',
+ 'contexts.cc',
+ 'contexts.h',
+ 'conversions-inl.h',
+ 'conversions.cc',
+ 'conversions.h',
+ 'counters-inl.h',
+ 'counters.cc',
+ 'counters.h',
+ 'crankshaft/compilation-phase.cc',
+ 'crankshaft/compilation-phase.h',
+ 'crankshaft/hydrogen-alias-analysis.h',
+ 'crankshaft/hydrogen-bce.cc',
+ 'crankshaft/hydrogen-bce.h',
+ 'crankshaft/hydrogen-canonicalize.cc',
+ 'crankshaft/hydrogen-canonicalize.h',
+ 'crankshaft/hydrogen-check-elimination.cc',
+ 'crankshaft/hydrogen-check-elimination.h',
+ 'crankshaft/hydrogen-dce.cc',
+ 'crankshaft/hydrogen-dce.h',
+ 'crankshaft/hydrogen-dehoist.cc',
+ 'crankshaft/hydrogen-dehoist.h',
+ 'crankshaft/hydrogen-environment-liveness.cc',
+ 'crankshaft/hydrogen-environment-liveness.h',
+ 'crankshaft/hydrogen-escape-analysis.cc',
+ 'crankshaft/hydrogen-escape-analysis.h',
+ 'crankshaft/hydrogen-flow-engine.h',
+ 'crankshaft/hydrogen-gvn.cc',
+ 'crankshaft/hydrogen-gvn.h',
+ 'crankshaft/hydrogen-infer-representation.cc',
+ 'crankshaft/hydrogen-infer-representation.h',
+ 'crankshaft/hydrogen-infer-types.cc',
+ 'crankshaft/hydrogen-infer-types.h',
+ 'crankshaft/hydrogen-instructions.cc',
+ 'crankshaft/hydrogen-instructions.h',
+ 'crankshaft/hydrogen-load-elimination.cc',
+ 'crankshaft/hydrogen-load-elimination.h',
+ 'crankshaft/hydrogen-mark-deoptimize.cc',
+ 'crankshaft/hydrogen-mark-deoptimize.h',
+ 'crankshaft/hydrogen-mark-unreachable.cc',
+ 'crankshaft/hydrogen-mark-unreachable.h',
+ 'crankshaft/hydrogen-osr.cc',
+ 'crankshaft/hydrogen-osr.h',
+ 'crankshaft/hydrogen-range-analysis.cc',
+ 'crankshaft/hydrogen-range-analysis.h',
+ 'crankshaft/hydrogen-redundant-phi.cc',
+ 'crankshaft/hydrogen-redundant-phi.h',
+ 'crankshaft/hydrogen-removable-simulates.cc',
+ 'crankshaft/hydrogen-removable-simulates.h',
+ 'crankshaft/hydrogen-representation-changes.cc',
+ 'crankshaft/hydrogen-representation-changes.h',
+ 'crankshaft/hydrogen-sce.cc',
+ 'crankshaft/hydrogen-sce.h',
+ 'crankshaft/hydrogen-store-elimination.cc',
+ 'crankshaft/hydrogen-store-elimination.h',
+ 'crankshaft/hydrogen-types.cc',
+ 'crankshaft/hydrogen-types.h',
+ 'crankshaft/hydrogen-uint32-analysis.cc',
+ 'crankshaft/hydrogen-uint32-analysis.h',
+ 'crankshaft/hydrogen.cc',
+ 'crankshaft/hydrogen.h',
+ 'crankshaft/lithium-allocator-inl.h',
+ 'crankshaft/lithium-allocator.cc',
+ 'crankshaft/lithium-allocator.h',
+ 'crankshaft/lithium-codegen.cc',
+ 'crankshaft/lithium-codegen.h',
+ 'crankshaft/lithium.cc',
+ 'crankshaft/lithium.h',
+ 'crankshaft/lithium-inl.h',
+ 'crankshaft/typing.cc',
+ 'crankshaft/typing.h',
+ 'crankshaft/unique.h',
+ 'date.cc',
+ 'date.h',
+ 'dateparser-inl.h',
+ 'dateparser.cc',
+ 'dateparser.h',
+ 'debug/debug-evaluate.cc',
+ 'debug/debug-evaluate.h',
+ 'debug/debug-frames.cc',
+ 'debug/debug-frames.h',
+ 'debug/debug-scopes.cc',
+ 'debug/debug-scopes.h',
+ 'debug/debug.cc',
+ 'debug/debug.h',
+ 'debug/liveedit.cc',
+ 'debug/liveedit.h',
+ 'deoptimize-reason.cc',
+ 'deoptimize-reason.h',
+ 'deoptimizer.cc',
+ 'deoptimizer.h',
+ 'disasm.h',
+ 'disassembler.cc',
+ 'disassembler.h',
+ 'diy-fp.cc',
+ 'diy-fp.h',
+ 'double.h',
+ 'dtoa.cc',
+ 'dtoa.h',
+ 'effects.h',
+ 'eh-frame.cc',
+ 'eh-frame.h',
+ 'elements-kind.cc',
+ 'elements-kind.h',
+ 'elements.cc',
+ 'elements.h',
+ 'execution.cc',
+ 'execution.h',
+ 'extensions/externalize-string-extension.cc',
+ 'extensions/externalize-string-extension.h',
+ 'extensions/free-buffer-extension.cc',
+ 'extensions/free-buffer-extension.h',
+ 'extensions/gc-extension.cc',
+ 'extensions/gc-extension.h',
+ 'extensions/ignition-statistics-extension.cc',
+ 'extensions/ignition-statistics-extension.h',
+ 'extensions/statistics-extension.cc',
+ 'extensions/statistics-extension.h',
+ 'extensions/trigger-failure-extension.cc',
+ 'extensions/trigger-failure-extension.h',
+ 'external-reference-table.cc',
+ 'external-reference-table.h',
+ 'factory.cc',
+ 'factory.h',
+ 'fast-accessor-assembler.cc',
+ 'fast-accessor-assembler.h',
+ 'fast-dtoa.cc',
+ 'fast-dtoa.h',
+ 'field-index.h',
+ 'field-index-inl.h',
+ 'field-type.cc',
+ 'field-type.h',
+ 'fixed-dtoa.cc',
+ 'fixed-dtoa.h',
+ 'flag-definitions.h',
+ 'flags.cc',
+ 'flags.h',
+ 'frames-inl.h',
+ 'frames.cc',
+ 'frames.h',
+ 'full-codegen/full-codegen.cc',
+ 'full-codegen/full-codegen.h',
+ 'futex-emulation.cc',
+ 'futex-emulation.h',
+ 'gdb-jit.cc',
+ 'gdb-jit.h',
+ 'global-handles.cc',
+ 'global-handles.h',
+ 'globals.h',
+ 'handles-inl.h',
+ 'handles.cc',
+ 'handles.h',
+ 'heap-symbols.h',
+ 'heap/array-buffer-tracker-inl.h',
+ 'heap/array-buffer-tracker.cc',
+ 'heap/array-buffer-tracker.h',
+ 'heap/code-stats.cc',
+ 'heap/code-stats.h',
+ 'heap/memory-reducer.cc',
+ 'heap/memory-reducer.h',
+ 'heap/gc-idle-time-handler.cc',
+ 'heap/gc-idle-time-handler.h',
+ 'heap/gc-tracer.cc',
+ 'heap/gc-tracer.h',
+ 'heap/heap-inl.h',
+ 'heap/heap.cc',
+ 'heap/heap.h',
+ 'heap/incremental-marking-inl.h',
+ 'heap/incremental-marking-job.cc',
+ 'heap/incremental-marking-job.h',
+ 'heap/incremental-marking.cc',
+ 'heap/incremental-marking.h',
+ 'heap/mark-compact-inl.h',
+ 'heap/mark-compact.cc',
+ 'heap/mark-compact.h',
+ 'heap/marking.h',
+ 'heap/object-stats.cc',
+ 'heap/object-stats.h',
+ 'heap/objects-visiting-inl.h',
+ 'heap/objects-visiting.cc',
+ 'heap/objects-visiting.h',
+ 'heap/page-parallel-job.h',
+ 'heap/remembered-set.cc',
+ 'heap/remembered-set.h',
+ 'heap/scavenge-job.h',
+ 'heap/scavenge-job.cc',
+ 'heap/scavenger-inl.h',
+ 'heap/scavenger.cc',
+ 'heap/scavenger.h',
+ 'heap/slot-set.h',
+ 'heap/spaces-inl.h',
+ 'heap/spaces.cc',
+ 'heap/spaces.h',
+ 'heap/store-buffer.cc',
+ 'heap/store-buffer.h',
+ 'i18n.cc',
+ 'i18n.h',
+ 'icu_util.cc',
+ 'icu_util.h',
+ 'ic/access-compiler.cc',
+ 'ic/access-compiler.h',
+ 'ic/call-optimization.cc',
+ 'ic/call-optimization.h',
+ 'ic/handler-compiler.cc',
+ 'ic/handler-compiler.h',
+ 'ic/ic-inl.h',
+ 'ic/ic-state.cc',
+ 'ic/ic-state.h',
+ 'ic/ic.cc',
+ 'ic/ic.h',
+ 'ic/ic-compiler.cc',
+ 'ic/ic-compiler.h',
+ 'identity-map.cc',
+ 'identity-map.h',
+ 'interface-descriptors.cc',
+ 'interface-descriptors.h',
+ 'interpreter/bytecodes.cc',
+ 'interpreter/bytecodes.h',
+ 'interpreter/bytecode-array-builder.cc',
+ 'interpreter/bytecode-array-builder.h',
+ 'interpreter/bytecode-array-iterator.cc',
+ 'interpreter/bytecode-array-iterator.h',
+ 'interpreter/bytecode-array-writer.cc',
+ 'interpreter/bytecode-array-writer.h',
+ 'interpreter/bytecode-dead-code-optimizer.cc',
+ 'interpreter/bytecode-dead-code-optimizer.h',
+ 'interpreter/bytecode-decoder.cc',
+ 'interpreter/bytecode-decoder.h',
+ 'interpreter/bytecode-flags.cc',
+ 'interpreter/bytecode-flags.h',
+ 'interpreter/bytecode-generator.cc',
+ 'interpreter/bytecode-generator.h',
+ 'interpreter/bytecode-label.cc',
+ 'interpreter/bytecode-label.h',
+ 'interpreter/bytecode-peephole-optimizer.cc',
+ 'interpreter/bytecode-peephole-optimizer.h',
+ 'interpreter/bytecode-peephole-table.h',
+ 'interpreter/bytecode-pipeline.cc',
+ 'interpreter/bytecode-pipeline.h',
+ 'interpreter/bytecode-register.cc',
+ 'interpreter/bytecode-register.h',
+ 'interpreter/bytecode-register-allocator.cc',
+ 'interpreter/bytecode-register-allocator.h',
+ 'interpreter/bytecode-register-optimizer.cc',
+ 'interpreter/bytecode-register-optimizer.h',
+ 'interpreter/bytecode-traits.h',
+ 'interpreter/constant-array-builder.cc',
+ 'interpreter/constant-array-builder.h',
+ 'interpreter/control-flow-builders.cc',
+ 'interpreter/control-flow-builders.h',
+ 'interpreter/handler-table-builder.cc',
+ 'interpreter/handler-table-builder.h',
+ 'interpreter/interpreter.cc',
+ 'interpreter/interpreter.h',
+ 'interpreter/interpreter-assembler.cc',
+ 'interpreter/interpreter-assembler.h',
+ 'interpreter/interpreter-intrinsics.cc',
+ 'interpreter/interpreter-intrinsics.h',
+ 'isolate-inl.h',
+ 'isolate.cc',
+ 'isolate.h',
+ 'json-parser.cc',
+ 'json-parser.h',
+ 'json-stringifier.cc',
+ 'json-stringifier.h',
+ 'keys.h',
+ 'keys.cc',
+ 'layout-descriptor-inl.h',
+ 'layout-descriptor.cc',
+ 'layout-descriptor.h',
+ 'list-inl.h',
+ 'list.h',
+ 'locked-queue-inl.h',
+ 'locked-queue.h',
+ 'log-inl.h',
+ 'log-utils.cc',
+ 'log-utils.h',
+ 'log.cc',
+ 'log.h',
+ 'lookup.cc',
+ 'lookup.h',
+ 'macro-assembler.h',
+ 'machine-type.cc',
+ 'machine-type.h',
+ 'messages.cc',
+ 'messages.h',
+ 'msan.h',
+ 'objects-body-descriptors-inl.h',
+ 'objects-body-descriptors.h',
+ 'objects-debug.cc',
+ 'objects-inl.h',
+ 'objects-printer.cc',
+ 'objects.cc',
+ 'objects.h',
+ 'ostreams.cc',
+ 'ostreams.h',
+ 'parsing/expression-classifier.h',
+ 'parsing/func-name-inferrer.cc',
+ 'parsing/func-name-inferrer.h',
+ 'parsing/parameter-initializer-rewriter.cc',
+ 'parsing/parameter-initializer-rewriter.h',
+ 'parsing/parse-info.cc',
+ 'parsing/parse-info.h',
+ 'parsing/parser-base.h',
+ 'parsing/parser.cc',
+ 'parsing/parser.h',
+ 'parsing/pattern-rewriter.cc',
+ 'parsing/preparse-data-format.h',
+ 'parsing/preparse-data.cc',
+ 'parsing/preparse-data.h',
+ 'parsing/preparser.cc',
+ 'parsing/preparser.h',
+ 'parsing/rewriter.cc',
+ 'parsing/rewriter.h',
+ 'parsing/scanner-character-streams.cc',
+ 'parsing/scanner-character-streams.h',
+ 'parsing/scanner.cc',
+ 'parsing/scanner.h',
+ 'parsing/token.cc',
+ 'parsing/token.h',
+ 'pending-compilation-error-handler.cc',
+ 'pending-compilation-error-handler.h',
+ 'perf-jit.cc',
+ 'perf-jit.h',
+ 'profiler/allocation-tracker.cc',
+ 'profiler/allocation-tracker.h',
+ 'profiler/circular-queue-inl.h',
+ 'profiler/circular-queue.h',
+ 'profiler/cpu-profiler-inl.h',
+ 'profiler/cpu-profiler.cc',
+ 'profiler/cpu-profiler.h',
+ 'profiler/heap-profiler.cc',
+ 'profiler/heap-profiler.h',
+ 'profiler/heap-snapshot-generator-inl.h',
+ 'profiler/heap-snapshot-generator.cc',
+ 'profiler/heap-snapshot-generator.h',
+ 'profiler/profiler-listener.cc',
+ 'profiler/profiler-listener.h',
+ 'profiler/profile-generator-inl.h',
+ 'profiler/profile-generator.cc',
+ 'profiler/profile-generator.h',
+ 'profiler/sampling-heap-profiler.cc',
+ 'profiler/sampling-heap-profiler.h',
+ 'profiler/strings-storage.cc',
+ 'profiler/strings-storage.h',
+ 'profiler/tick-sample.cc',
+ 'profiler/tick-sample.h',
+ 'profiler/unbound-queue-inl.h',
+ 'profiler/unbound-queue.h',
+ 'property-descriptor.cc',
+ 'property-descriptor.h',
+ 'property-details.h',
+ 'property.cc',
+ 'property.h',
+ 'prototype.h',
+ 'regexp/bytecodes-irregexp.h',
+ 'regexp/interpreter-irregexp.cc',
+ 'regexp/interpreter-irregexp.h',
+ 'regexp/jsregexp-inl.h',
+ 'regexp/jsregexp.cc',
+ 'regexp/jsregexp.h',
+ 'regexp/regexp-ast.cc',
+ 'regexp/regexp-ast.h',
+ 'regexp/regexp-macro-assembler-irregexp-inl.h',
+ 'regexp/regexp-macro-assembler-irregexp.cc',
+ 'regexp/regexp-macro-assembler-irregexp.h',
+ 'regexp/regexp-macro-assembler-tracer.cc',
+ 'regexp/regexp-macro-assembler-tracer.h',
+ 'regexp/regexp-macro-assembler.cc',
+ 'regexp/regexp-macro-assembler.h',
+ 'regexp/regexp-parser.cc',
+ 'regexp/regexp-parser.h',
+ 'regexp/regexp-stack.cc',
+ 'regexp/regexp-stack.h',
+ 'register-configuration.cc',
+ 'register-configuration.h',
+ 'runtime-profiler.cc',
+ 'runtime-profiler.h',
+ 'runtime/runtime-array.cc',
+ 'runtime/runtime-atomics.cc',
+ 'runtime/runtime-classes.cc',
+ 'runtime/runtime-collections.cc',
+ 'runtime/runtime-compiler.cc',
+ 'runtime/runtime-date.cc',
+ 'runtime/runtime-debug.cc',
+ 'runtime/runtime-forin.cc',
+ 'runtime/runtime-function.cc',
+ 'runtime/runtime-error.cc',
+ 'runtime/runtime-futex.cc',
+ 'runtime/runtime-generator.cc',
+ 'runtime/runtime-i18n.cc',
+ 'runtime/runtime-internal.cc',
+ 'runtime/runtime-interpreter.cc',
+ 'runtime/runtime-literals.cc',
+ 'runtime/runtime-liveedit.cc',
+ 'runtime/runtime-maths.cc',
+ 'runtime/runtime-numbers.cc',
+ 'runtime/runtime-object.cc',
+ 'runtime/runtime-operators.cc',
+ 'runtime/runtime-proxy.cc',
+ 'runtime/runtime-regexp.cc',
+ 'runtime/runtime-scopes.cc',
+ 'runtime/runtime-simd.cc',
+ 'runtime/runtime-strings.cc',
+ 'runtime/runtime-symbol.cc',
+ 'runtime/runtime-test.cc',
+ 'runtime/runtime-typedarray.cc',
+ 'runtime/runtime-utils.h',
+ 'runtime/runtime-wasm.cc',
+ 'runtime/runtime.cc',
+ 'runtime/runtime.h',
+ 'safepoint-table.cc',
+ 'safepoint-table.h',
+ 'signature.h',
+ 'simulator.h',
+ 'small-pointer-list.h',
+ 'snapshot/code-serializer.cc',
+ 'snapshot/code-serializer.h',
+ 'snapshot/deserializer.cc',
+ 'snapshot/deserializer.h',
+ 'snapshot/natives.h',
+ 'snapshot/natives-common.cc',
+ 'snapshot/partial-serializer.cc',
+ 'snapshot/partial-serializer.h',
+ 'snapshot/serializer.cc',
+ 'snapshot/serializer.h',
+ 'snapshot/serializer-common.cc',
+ 'snapshot/serializer-common.h',
+ 'snapshot/snapshot.h',
+ 'snapshot/snapshot-common.cc',
+ 'snapshot/snapshot-source-sink.cc',
+ 'snapshot/snapshot-source-sink.h',
+ 'snapshot/startup-serializer.cc',
+ 'snapshot/startup-serializer.h',
+ 'source-position-table.cc',
+ 'source-position-table.h',
+ 'source-position.h',
+ 'splay-tree.h',
+ 'splay-tree-inl.h',
+ 'startup-data-util.cc',
+ 'startup-data-util.h',
+ 'string-builder.cc',
+ 'string-builder.h',
+ 'string-search.h',
+ 'string-stream.cc',
+ 'string-stream.h',
+ 'strtod.cc',
+ 'strtod.h',
+ 'ic/stub-cache.cc',
+ 'ic/stub-cache.h',
+ 'tracing/trace-event.cc',
+ 'tracing/trace-event.h',
+ 'transitions-inl.h',
+ 'transitions.cc',
+ 'transitions.h',
+ 'type-cache.cc',
+ 'type-cache.h',
+ 'type-feedback-vector-inl.h',
+ 'type-feedback-vector.cc',
+ 'type-feedback-vector.h',
+ 'type-info.cc',
+ 'type-info.h',
+ 'types.cc',
+ 'types.h',
+ 'unicode-inl.h',
+ 'unicode.cc',
+ 'unicode.h',
+ 'unicode-cache-inl.h',
+ 'unicode-cache.h',
+ 'unicode-decoder.cc',
+ 'unicode-decoder.h',
+ 'uri.cc',
+ 'uri.h',
+ 'utils-inl.h',
+ 'utils.cc',
+ 'utils.h',
+ 'v8.cc',
+ 'v8.h',
+ 'v8memory.h',
+ 'v8threads.cc',
+ 'v8threads.h',
+ 'value-serializer.cc',
+ 'value-serializer.h',
+ 'vector.h',
+ 'version.cc',
+ 'version.h',
+ 'vm-state-inl.h',
+ 'vm-state.h',
+ 'wasm/ast-decoder.cc',
+ 'wasm/ast-decoder.h',
+ 'wasm/decoder.h',
+ 'wasm/encoder.cc',
+ 'wasm/encoder.h',
+ 'wasm/leb-helper.h',
+ 'wasm/module-decoder.cc',
+ 'wasm/module-decoder.h',
+ 'wasm/switch-logic.h',
+ 'wasm/switch-logic.cc',
+ 'wasm/wasm-debug.cc',
+ 'wasm/wasm-debug.h',
+ 'wasm/wasm-external-refs.cc',
+ 'wasm/wasm-external-refs.h',
+ 'wasm/wasm-function-name-table.cc',
+ 'wasm/wasm-function-name-table.h',
+ 'wasm/wasm-js.cc',
+ 'wasm/wasm-js.h',
+ 'wasm/wasm-macro-gen.h',
+ 'wasm/wasm-module.cc',
+ 'wasm/wasm-module.h',
+ 'wasm/wasm-interpreter.cc',
+ 'wasm/wasm-interpreter.h',
+ 'wasm/wasm-opcodes.cc',
+ 'wasm/wasm-opcodes.h',
+ 'wasm/wasm-result.cc',
+ 'wasm/wasm-result.h',
+ 'zone.cc',
+ 'zone.h',
+ 'zone-allocator.h',
+ 'zone-containers.h',
+ ],
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ['want_separate_host_toolset_mkpeephole==1', {
+ 'dependencies': ['mkpeephole#host'],
+ }, {
+ 'dependencies': ['mkpeephole'],
+ }],
+ ['v8_target_arch=="arm"', {
+ 'sources': [ ### gcmole(arch:arm) ###
+ 'arm/assembler-arm-inl.h',
+ 'arm/assembler-arm.cc',
+ 'arm/assembler-arm.h',
+ 'arm/code-stubs-arm.cc',
+ 'arm/code-stubs-arm.h',
+ 'arm/codegen-arm.cc',
+ 'arm/codegen-arm.h',
+ 'arm/constants-arm.h',
+ 'arm/constants-arm.cc',
+ 'arm/cpu-arm.cc',
+ 'arm/deoptimizer-arm.cc',
+ 'arm/disasm-arm.cc',
+ 'arm/frames-arm.cc',
+ 'arm/frames-arm.h',
+ 'arm/interface-descriptors-arm.cc',
+ 'arm/interface-descriptors-arm.h',
+ 'arm/macro-assembler-arm.cc',
+ 'arm/macro-assembler-arm.h',
+ 'arm/simulator-arm.cc',
+ 'arm/simulator-arm.h',
+ 'arm/eh-frame-arm.cc',
+ 'builtins/arm/builtins-arm.cc',
+ 'compiler/arm/code-generator-arm.cc',
+ 'compiler/arm/instruction-codes-arm.h',
+ 'compiler/arm/instruction-scheduler-arm.cc',
+ 'compiler/arm/instruction-selector-arm.cc',
+ 'compiler/arm/unwinding-info-writer-arm.h',
+ 'compiler/arm/unwinding-info-writer-arm.cc',
+ 'crankshaft/arm/lithium-arm.cc',
+ 'crankshaft/arm/lithium-arm.h',
+ 'crankshaft/arm/lithium-codegen-arm.cc',
+ 'crankshaft/arm/lithium-codegen-arm.h',
+ 'crankshaft/arm/lithium-gap-resolver-arm.cc',
+ 'crankshaft/arm/lithium-gap-resolver-arm.h',
+ 'debug/arm/debug-arm.cc',
+ 'full-codegen/arm/full-codegen-arm.cc',
+ 'ic/arm/access-compiler-arm.cc',
+ 'ic/arm/handler-compiler-arm.cc',
+ 'ic/arm/ic-arm.cc',
+ 'ic/arm/ic-compiler-arm.cc',
+ 'ic/arm/stub-cache-arm.cc',
+ 'regexp/arm/regexp-macro-assembler-arm.cc',
+ 'regexp/arm/regexp-macro-assembler-arm.h',
+ ],
+ }],
+ ['v8_target_arch=="arm64"', {
+ 'sources': [ ### gcmole(arch:arm64) ###
+ 'arm64/assembler-arm64.cc',
+ 'arm64/assembler-arm64.h',
+ 'arm64/assembler-arm64-inl.h',
+ 'arm64/codegen-arm64.cc',
+ 'arm64/codegen-arm64.h',
+ 'arm64/code-stubs-arm64.cc',
+ 'arm64/code-stubs-arm64.h',
+ 'arm64/constants-arm64.h',
+ 'arm64/cpu-arm64.cc',
+ 'arm64/decoder-arm64.cc',
+ 'arm64/decoder-arm64.h',
+ 'arm64/decoder-arm64-inl.h',
+ 'arm64/deoptimizer-arm64.cc',
+ 'arm64/disasm-arm64.cc',
+ 'arm64/disasm-arm64.h',
+ 'arm64/frames-arm64.cc',
+ 'arm64/frames-arm64.h',
+ 'arm64/instructions-arm64.cc',
+ 'arm64/instructions-arm64.h',
+ 'arm64/instrument-arm64.cc',
+ 'arm64/instrument-arm64.h',
+ 'arm64/interface-descriptors-arm64.cc',
+ 'arm64/interface-descriptors-arm64.h',
+ 'arm64/macro-assembler-arm64.cc',
+ 'arm64/macro-assembler-arm64.h',
+ 'arm64/macro-assembler-arm64-inl.h',
+ 'arm64/simulator-arm64.cc',
+ 'arm64/simulator-arm64.h',
+ 'arm64/utils-arm64.cc',
+ 'arm64/utils-arm64.h',
+ 'arm64/eh-frame-arm64.cc',
+ 'builtins/arm64/builtins-arm64.cc',
+ 'compiler/arm64/code-generator-arm64.cc',
+ 'compiler/arm64/instruction-codes-arm64.h',
+ 'compiler/arm64/instruction-scheduler-arm64.cc',
+ 'compiler/arm64/instruction-selector-arm64.cc',
+ 'compiler/arm64/unwinding-info-writer-arm64.h',
+ 'compiler/arm64/unwinding-info-writer-arm64.cc',
+ 'crankshaft/arm64/delayed-masm-arm64.cc',
+ 'crankshaft/arm64/delayed-masm-arm64.h',
+ 'crankshaft/arm64/delayed-masm-arm64-inl.h',
+ 'crankshaft/arm64/lithium-arm64.cc',
+ 'crankshaft/arm64/lithium-arm64.h',
+ 'crankshaft/arm64/lithium-codegen-arm64.cc',
+ 'crankshaft/arm64/lithium-codegen-arm64.h',
+ 'crankshaft/arm64/lithium-gap-resolver-arm64.cc',
+ 'crankshaft/arm64/lithium-gap-resolver-arm64.h',
+ 'debug/arm64/debug-arm64.cc',
+ 'full-codegen/arm64/full-codegen-arm64.cc',
+ 'ic/arm64/access-compiler-arm64.cc',
+ 'ic/arm64/handler-compiler-arm64.cc',
+ 'ic/arm64/ic-arm64.cc',
+ 'ic/arm64/ic-compiler-arm64.cc',
+ 'ic/arm64/stub-cache-arm64.cc',
+ 'regexp/arm64/regexp-macro-assembler-arm64.cc',
+ 'regexp/arm64/regexp-macro-assembler-arm64.h',
+ ],
+ }],
+ ['v8_target_arch=="ia32"', {
+ 'sources': [ ### gcmole(arch:ia32) ###
+ 'ia32/assembler-ia32-inl.h',
+ 'ia32/assembler-ia32.cc',
+ 'ia32/assembler-ia32.h',
+ 'ia32/code-stubs-ia32.cc',
+ 'ia32/code-stubs-ia32.h',
+ 'ia32/codegen-ia32.cc',
+ 'ia32/codegen-ia32.h',
+ 'ia32/cpu-ia32.cc',
+ 'ia32/deoptimizer-ia32.cc',
+ 'ia32/disasm-ia32.cc',
+ 'ia32/frames-ia32.cc',
+ 'ia32/frames-ia32.h',
+ 'ia32/interface-descriptors-ia32.cc',
+ 'ia32/macro-assembler-ia32.cc',
+ 'ia32/macro-assembler-ia32.h',
+ 'builtins/ia32/builtins-ia32.cc',
+ 'compiler/ia32/code-generator-ia32.cc',
+ 'compiler/ia32/instruction-codes-ia32.h',
+ 'compiler/ia32/instruction-scheduler-ia32.cc',
+ 'compiler/ia32/instruction-selector-ia32.cc',
+ 'crankshaft/ia32/lithium-codegen-ia32.cc',
+ 'crankshaft/ia32/lithium-codegen-ia32.h',
+ 'crankshaft/ia32/lithium-gap-resolver-ia32.cc',
+ 'crankshaft/ia32/lithium-gap-resolver-ia32.h',
+ 'crankshaft/ia32/lithium-ia32.cc',
+ 'crankshaft/ia32/lithium-ia32.h',
+ 'debug/ia32/debug-ia32.cc',
+ 'full-codegen/ia32/full-codegen-ia32.cc',
+ 'ic/ia32/access-compiler-ia32.cc',
+ 'ic/ia32/handler-compiler-ia32.cc',
+ 'ic/ia32/ic-ia32.cc',
+ 'ic/ia32/ic-compiler-ia32.cc',
+ 'ic/ia32/stub-cache-ia32.cc',
+ 'regexp/ia32/regexp-macro-assembler-ia32.cc',
+ 'regexp/ia32/regexp-macro-assembler-ia32.h',
+ ],
+ }],
+ ['v8_target_arch=="x87"', {
+ 'sources': [ ### gcmole(arch:x87) ###
+ 'x87/assembler-x87-inl.h',
+ 'x87/assembler-x87.cc',
+ 'x87/assembler-x87.h',
+ 'x87/code-stubs-x87.cc',
+ 'x87/code-stubs-x87.h',
+ 'x87/codegen-x87.cc',
+ 'x87/codegen-x87.h',
+ 'x87/cpu-x87.cc',
+ 'x87/deoptimizer-x87.cc',
+ 'x87/disasm-x87.cc',
+ 'x87/frames-x87.cc',
+ 'x87/frames-x87.h',
+ 'x87/interface-descriptors-x87.cc',
+ 'x87/macro-assembler-x87.cc',
+ 'x87/macro-assembler-x87.h',
+ 'builtins/x87/builtins-x87.cc',
+ 'compiler/x87/code-generator-x87.cc',
+ 'compiler/x87/instruction-codes-x87.h',
+ 'compiler/x87/instruction-scheduler-x87.cc',
+ 'compiler/x87/instruction-selector-x87.cc',
+ 'crankshaft/x87/lithium-codegen-x87.cc',
+ 'crankshaft/x87/lithium-codegen-x87.h',
+ 'crankshaft/x87/lithium-gap-resolver-x87.cc',
+ 'crankshaft/x87/lithium-gap-resolver-x87.h',
+ 'crankshaft/x87/lithium-x87.cc',
+ 'crankshaft/x87/lithium-x87.h',
+ 'debug/x87/debug-x87.cc',
+ 'full-codegen/x87/full-codegen-x87.cc',
+ 'ic/x87/access-compiler-x87.cc',
+ 'ic/x87/handler-compiler-x87.cc',
+ 'ic/x87/ic-x87.cc',
+ 'ic/x87/ic-compiler-x87.cc',
+ 'ic/x87/stub-cache-x87.cc',
+ 'regexp/x87/regexp-macro-assembler-x87.cc',
+ 'regexp/x87/regexp-macro-assembler-x87.h',
+ ],
+ }],
+ ['v8_target_arch=="mips" or v8_target_arch=="mipsel"', {
+ 'sources': [ ### gcmole(arch:mipsel) ###
+ 'mips/assembler-mips.cc',
+ 'mips/assembler-mips.h',
+ 'mips/assembler-mips-inl.h',
+ 'mips/codegen-mips.cc',
+ 'mips/codegen-mips.h',
+ 'mips/code-stubs-mips.cc',
+ 'mips/code-stubs-mips.h',
+ 'mips/constants-mips.cc',
+ 'mips/constants-mips.h',
+ 'mips/cpu-mips.cc',
+ 'mips/deoptimizer-mips.cc',
+ 'mips/disasm-mips.cc',
+ 'mips/frames-mips.cc',
+ 'mips/frames-mips.h',
+ 'mips/interface-descriptors-mips.cc',
+ 'mips/macro-assembler-mips.cc',
+ 'mips/macro-assembler-mips.h',
+ 'mips/simulator-mips.cc',
+ 'mips/simulator-mips.h',
+ 'builtins/mips/builtins-mips.cc',
+ 'compiler/mips/code-generator-mips.cc',
+ 'compiler/mips/instruction-codes-mips.h',
+ 'compiler/mips/instruction-scheduler-mips.cc',
+ 'compiler/mips/instruction-selector-mips.cc',
+ 'crankshaft/mips/lithium-codegen-mips.cc',
+ 'crankshaft/mips/lithium-codegen-mips.h',
+ 'crankshaft/mips/lithium-gap-resolver-mips.cc',
+ 'crankshaft/mips/lithium-gap-resolver-mips.h',
+ 'crankshaft/mips/lithium-mips.cc',
+ 'crankshaft/mips/lithium-mips.h',
+ 'full-codegen/mips/full-codegen-mips.cc',
+ 'debug/mips/debug-mips.cc',
+ 'ic/mips/access-compiler-mips.cc',
+ 'ic/mips/handler-compiler-mips.cc',
+ 'ic/mips/ic-mips.cc',
+ 'ic/mips/ic-compiler-mips.cc',
+ 'ic/mips/stub-cache-mips.cc',
+ 'regexp/mips/regexp-macro-assembler-mips.cc',
+ 'regexp/mips/regexp-macro-assembler-mips.h',
+ ],
+ }],
+ ['v8_target_arch=="mips64" or v8_target_arch=="mips64el"', {
+ 'sources': [ ### gcmole(arch:mips64el) ###
+ 'mips64/assembler-mips64.cc',
+ 'mips64/assembler-mips64.h',
+ 'mips64/assembler-mips64-inl.h',
+ 'mips64/codegen-mips64.cc',
+ 'mips64/codegen-mips64.h',
+ 'mips64/code-stubs-mips64.cc',
+ 'mips64/code-stubs-mips64.h',
+ 'mips64/constants-mips64.cc',
+ 'mips64/constants-mips64.h',
+ 'mips64/cpu-mips64.cc',
+ 'mips64/deoptimizer-mips64.cc',
+ 'mips64/disasm-mips64.cc',
+ 'mips64/frames-mips64.cc',
+ 'mips64/frames-mips64.h',
+ 'mips64/interface-descriptors-mips64.cc',
+ 'mips64/macro-assembler-mips64.cc',
+ 'mips64/macro-assembler-mips64.h',
+ 'mips64/simulator-mips64.cc',
+ 'mips64/simulator-mips64.h',
+ 'builtins/mips64/builtins-mips64.cc',
+ 'compiler/mips64/code-generator-mips64.cc',
+ 'compiler/mips64/instruction-codes-mips64.h',
+ 'compiler/mips64/instruction-scheduler-mips64.cc',
+ 'compiler/mips64/instruction-selector-mips64.cc',
+ 'crankshaft/mips64/lithium-codegen-mips64.cc',
+ 'crankshaft/mips64/lithium-codegen-mips64.h',
+ 'crankshaft/mips64/lithium-gap-resolver-mips64.cc',
+ 'crankshaft/mips64/lithium-gap-resolver-mips64.h',
+ 'crankshaft/mips64/lithium-mips64.cc',
+ 'crankshaft/mips64/lithium-mips64.h',
+ 'debug/mips64/debug-mips64.cc',
+ 'full-codegen/mips64/full-codegen-mips64.cc',
+ 'ic/mips64/access-compiler-mips64.cc',
+ 'ic/mips64/handler-compiler-mips64.cc',
+ 'ic/mips64/ic-mips64.cc',
+ 'ic/mips64/ic-compiler-mips64.cc',
+ 'ic/mips64/stub-cache-mips64.cc',
+ 'regexp/mips64/regexp-macro-assembler-mips64.cc',
+ 'regexp/mips64/regexp-macro-assembler-mips64.h',
+ ],
+ }],
+ ['v8_target_arch=="x64" or v8_target_arch=="x32"', {
+ 'sources': [ ### gcmole(arch:x64) ###
+ 'builtins/x64/builtins-x64.cc',
+ 'crankshaft/x64/lithium-codegen-x64.cc',
+ 'crankshaft/x64/lithium-codegen-x64.h',
+ 'crankshaft/x64/lithium-gap-resolver-x64.cc',
+ 'crankshaft/x64/lithium-gap-resolver-x64.h',
+ 'crankshaft/x64/lithium-x64.cc',
+ 'crankshaft/x64/lithium-x64.h',
+ 'x64/assembler-x64-inl.h',
+ 'x64/assembler-x64.cc',
+ 'x64/assembler-x64.h',
+ 'x64/code-stubs-x64.cc',
+ 'x64/code-stubs-x64.h',
+ 'x64/codegen-x64.cc',
+ 'x64/codegen-x64.h',
+ 'x64/cpu-x64.cc',
+ 'x64/deoptimizer-x64.cc',
+ 'x64/disasm-x64.cc',
+ 'x64/frames-x64.cc',
+ 'x64/frames-x64.h',
+ 'x64/interface-descriptors-x64.cc',
+ 'x64/macro-assembler-x64.cc',
+ 'x64/macro-assembler-x64.h',
+ 'debug/x64/debug-x64.cc',
+ 'full-codegen/x64/full-codegen-x64.cc',
+ 'ic/x64/access-compiler-x64.cc',
+ 'ic/x64/handler-compiler-x64.cc',
+ 'ic/x64/ic-x64.cc',
+ 'ic/x64/ic-compiler-x64.cc',
+ 'ic/x64/stub-cache-x64.cc',
+ 'regexp/x64/regexp-macro-assembler-x64.cc',
+ 'regexp/x64/regexp-macro-assembler-x64.h',
+ ],
+ }],
+ ['v8_target_arch=="x64"', {
+ 'sources': [
+ 'compiler/x64/code-generator-x64.cc',
+ 'compiler/x64/instruction-codes-x64.h',
+ 'compiler/x64/instruction-scheduler-x64.cc',
+ 'compiler/x64/instruction-selector-x64.cc',
+ 'compiler/x64/unwinding-info-writer-x64.h',
+ 'compiler/x64/unwinding-info-writer-x64.cc',
+ 'x64/eh-frame-x64.cc',
+ ],
+ }],
+ ['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
+ 'sources': [ ### gcmole(arch:ppc) ###
+ 'builtins/ppc/builtins-ppc.cc',
+ 'compiler/ppc/code-generator-ppc.cc',
+ 'compiler/ppc/instruction-codes-ppc.h',
+ 'compiler/ppc/instruction-scheduler-ppc.cc',
+ 'compiler/ppc/instruction-selector-ppc.cc',
+ 'crankshaft/ppc/lithium-ppc.cc',
+ 'crankshaft/ppc/lithium-ppc.h',
+ 'crankshaft/ppc/lithium-codegen-ppc.cc',
+ 'crankshaft/ppc/lithium-codegen-ppc.h',
+ 'crankshaft/ppc/lithium-gap-resolver-ppc.cc',
+ 'crankshaft/ppc/lithium-gap-resolver-ppc.h',
+ 'debug/ppc/debug-ppc.cc',
+ 'full-codegen/ppc/full-codegen-ppc.cc',
+ 'ic/ppc/access-compiler-ppc.cc',
+ 'ic/ppc/handler-compiler-ppc.cc',
+ 'ic/ppc/ic-ppc.cc',
+ 'ic/ppc/ic-compiler-ppc.cc',
+ 'ic/ppc/stub-cache-ppc.cc',
+ 'ppc/assembler-ppc-inl.h',
+ 'ppc/assembler-ppc.cc',
+ 'ppc/assembler-ppc.h',
+ 'ppc/code-stubs-ppc.cc',
+ 'ppc/code-stubs-ppc.h',
+ 'ppc/codegen-ppc.cc',
+ 'ppc/codegen-ppc.h',
+ 'ppc/constants-ppc.h',
+ 'ppc/constants-ppc.cc',
+ 'ppc/cpu-ppc.cc',
+ 'ppc/deoptimizer-ppc.cc',
+ 'ppc/disasm-ppc.cc',
+ 'ppc/frames-ppc.cc',
+ 'ppc/frames-ppc.h',
+ 'ppc/interface-descriptors-ppc.cc',
+ 'ppc/macro-assembler-ppc.cc',
+ 'ppc/macro-assembler-ppc.h',
+ 'ppc/simulator-ppc.cc',
+ 'ppc/simulator-ppc.h',
+ 'regexp/ppc/regexp-macro-assembler-ppc.cc',
+ 'regexp/ppc/regexp-macro-assembler-ppc.h',
+ ],
+ }],
+ ['v8_target_arch=="s390" or v8_target_arch=="s390x"', {
+ 'sources': [ ### gcmole(arch:s390) ###
+ 'builtins/s390/builtins-s390.cc',
+ 'compiler/s390/code-generator-s390.cc',
+ 'compiler/s390/instruction-codes-s390.h',
+ 'compiler/s390/instruction-scheduler-s390.cc',
+ 'compiler/s390/instruction-selector-s390.cc',
+ 'crankshaft/s390/lithium-codegen-s390.cc',
+ 'crankshaft/s390/lithium-codegen-s390.h',
+ 'crankshaft/s390/lithium-gap-resolver-s390.cc',
+ 'crankshaft/s390/lithium-gap-resolver-s390.h',
+ 'crankshaft/s390/lithium-s390.cc',
+ 'crankshaft/s390/lithium-s390.h',
+ 'debug/s390/debug-s390.cc',
+ 'full-codegen/s390/full-codegen-s390.cc',
+ 'ic/s390/access-compiler-s390.cc',
+ 'ic/s390/handler-compiler-s390.cc',
+ 'ic/s390/ic-compiler-s390.cc',
+ 'ic/s390/ic-s390.cc',
+ 'ic/s390/stub-cache-s390.cc',
+ 'regexp/s390/regexp-macro-assembler-s390.cc',
+ 'regexp/s390/regexp-macro-assembler-s390.h',
+ 's390/assembler-s390.cc',
+ 's390/assembler-s390.h',
+ 's390/assembler-s390-inl.h',
+ 's390/codegen-s390.cc',
+ 's390/codegen-s390.h',
+ 's390/code-stubs-s390.cc',
+ 's390/code-stubs-s390.h',
+ 's390/constants-s390.cc',
+ 's390/constants-s390.h',
+ 's390/cpu-s390.cc',
+ 's390/deoptimizer-s390.cc',
+ 's390/disasm-s390.cc',
+ 's390/frames-s390.cc',
+ 's390/frames-s390.h',
+ 's390/interface-descriptors-s390.cc',
+ 's390/macro-assembler-s390.cc',
+ 's390/macro-assembler-s390.h',
+ 's390/simulator-s390.cc',
+ 's390/simulator-s390.h',
+ ],
+ }],
+ ['OS=="win"', {
+ 'variables': {
+ 'gyp_generators': '<!(echo $GYP_GENERATORS)',
+ },
+ 'msvs_disabled_warnings': [4351, 4355, 4800],
+ # When building Official, the .lib is too large and exceeds the 2G
+ # limit. This breaks it into multiple pieces to avoid the limit.
+ # See http://crbug.com/485155.
+ 'msvs_shard': 4,
+ }],
+ ['component=="shared_library"', {
+ 'defines': [
+ 'BUILDING_V8_SHARED',
+ 'V8_SHARED',
+ ],
+ }],
+ ['v8_postmortem_support=="true"', {
+ 'sources': [
+ '<(SHARED_INTERMEDIATE_DIR)/debug-support.cc',
+ ]
+ }],
+ ['v8_enable_i18n_support==1', {
+ 'dependencies': [
+ '<(icu_gyp_path):icui18n',
+ '<(icu_gyp_path):icuuc',
+ ],
+ 'conditions': [
+ ['icu_use_data_file_flag==1', {
+ 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_FILE'],
+ }, { # else icu_use_data_file_flag !=1
+ 'conditions': [
+ ['OS=="win"', {
+ 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_SHARED'],
+ }, {
+ 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC'],
+ }],
+ ],
+ }],
+ ],
+ }, { # v8_enable_i18n_support==0
+ 'sources!': [
+ 'i18n.cc',
+ 'i18n.h',
+ ],
+ }],
+ ['OS=="win" and v8_enable_i18n_support==1', {
+ 'dependencies': [
+ '<(icu_gyp_path):icudata',
+ ],
+ }],
+ ],
+ },
+ {
+ 'target_name': 'v8_libbase',
+ 'type': 'static_library',
+ 'variables': {
+ 'optimize': 'max',
+ },
+ 'include_dirs+': [
+ '..',
+ ],
+ 'sources': [
+ 'base/accounting-allocator.cc',
+ 'base/accounting-allocator.h',
+ 'base/adapters.h',
+ 'base/atomic-utils.h',
+ 'base/atomicops.h',
+ 'base/atomicops_internals_arm64_gcc.h',
+ 'base/atomicops_internals_arm_gcc.h',
+ 'base/atomicops_internals_atomicword_compat.h',
+ 'base/atomicops_internals_mac.h',
+ 'base/atomicops_internals_mips_gcc.h',
+ 'base/atomicops_internals_mips64_gcc.h',
+ 'base/atomicops_internals_ppc_gcc.h',
+ 'base/atomicops_internals_s390_gcc.h',
+ 'base/atomicops_internals_tsan.h',
+ 'base/atomicops_internals_x86_gcc.cc',
+ 'base/atomicops_internals_x86_gcc.h',
+ 'base/atomicops_internals_x86_msvc.h',
+ 'base/bits.cc',
+ 'base/bits.h',
+ 'base/build_config.h',
+ 'base/compiler-specific.h',
+ 'base/cpu.cc',
+ 'base/cpu.h',
+ 'base/division-by-constant.cc',
+ 'base/division-by-constant.h',
+ 'base/debug/stack_trace.cc',
+ 'base/debug/stack_trace.h',
+ 'base/file-utils.cc',
+ 'base/file-utils.h',
+ 'base/flags.h',
+ 'base/format-macros.h',
+ 'base/free_deleter.h',
+ 'base/functional.cc',
+ 'base/functional.h',
+ 'base/hashmap.h',
+ 'base/ieee754.cc',
+ 'base/ieee754.h',
+ 'base/iterator.h',
+ 'base/lazy-instance.h',
+ 'base/logging.cc',
+ 'base/logging.h',
+ 'base/macros.h',
+ 'base/once.cc',
+ 'base/once.h',
+ 'base/platform/elapsed-timer.h',
+ 'base/platform/time.cc',
+ 'base/platform/time.h',
+ 'base/platform/condition-variable.cc',
+ 'base/platform/condition-variable.h',
+ 'base/platform/mutex.cc',
+ 'base/platform/mutex.h',
+ 'base/platform/platform.h',
+ 'base/platform/semaphore.cc',
+ 'base/platform/semaphore.h',
+ 'base/safe_conversions.h',
+ 'base/safe_conversions_impl.h',
+ 'base/safe_math.h',
+ 'base/safe_math_impl.h',
+ 'base/sys-info.cc',
+ 'base/sys-info.h',
+ 'base/utils/random-number-generator.cc',
+ 'base/utils/random-number-generator.h',
+ ],
+ 'conditions': [
+ ['want_separate_host_toolset==1 or \
+ want_separate_host_toolset_mkpeephole==1', {
+ 'toolsets': ['host', 'target'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ['OS=="linux"', {
+ 'link_settings': {
+ 'libraries': [
+ '-ldl',
+ '-lrt'
+ ],
+ },
+ 'sources': [
+ 'base/debug/stack_trace_posix.cc',
+ 'base/platform/platform-linux.cc',
+ 'base/platform/platform-posix.cc',
+ ],
+ }
+ ],
+ ['OS=="android"', {
+ 'sources': [
+ 'base/debug/stack_trace_android.cc',
+ 'base/platform/platform-posix.cc',
+ ],
+ 'link_settings': {
+ 'target_conditions': [
+ ['_toolset=="host" and host_os!="mac"', {
+ # Only include libdl and librt on host builds because they
+ # are included by default on Android target builds, and we
+ # don't want to re-include them here since this will change
+ # library order and break (see crbug.com/469973).
+ # These libraries do not exist on Mac hosted builds.
+ 'libraries': [
+ '-ldl',
+ '-lrt'
+ ]
+ }]
+ ]
+ },
+ 'conditions': [
+ ['host_os=="mac"', {
+ 'target_conditions': [
+ ['_toolset=="host"', {
+ 'sources': [
+ 'base/platform/platform-macos.cc'
+ ]
+ }, {
+ 'sources': [
+ 'base/platform/platform-linux.cc'
+ ]
+ }],
+ ],
+ }, {
+ 'sources': [
+ 'base/platform/platform-linux.cc'
+ ]
+ }],
+ ],
+ },
+ ],
+ ['OS=="qnx"', {
+ 'link_settings': {
+ 'target_conditions': [
+ ['_toolset=="host" and host_os=="linux"', {
+ 'libraries': [
+ '-lrt'
+ ],
+ }],
+ ['_toolset=="target"', {
+ 'libraries': [
+ '-lbacktrace'
+ ],
+ }],
+ ],
+ },
+ 'sources': [
+ 'base/debug/stack_trace_posix.cc',
+ 'base/platform/platform-posix.cc',
+ 'base/qnx-math.h'
+ ],
+ 'target_conditions': [
+ ['_toolset=="host" and host_os=="linux"', {
+ 'sources': [
+ 'base/platform/platform-linux.cc'
+ ],
+ }],
+ ['_toolset=="host" and host_os=="mac"', {
+ 'sources': [
+ 'base/platform/platform-macos.cc'
+ ],
+ }],
+ ['_toolset=="target"', {
+ 'sources': [
+ 'base/platform/platform-qnx.cc'
+ ],
+ }],
+ ],
+ },
+ ],
+ ['OS=="freebsd"', {
+ 'link_settings': {
+ 'libraries': [
+ '-L/usr/local/lib -lexecinfo',
+ ]},
+ 'sources': [
+ 'base/debug/stack_trace_posix.cc',
+ 'base/platform/platform-freebsd.cc',
+ 'base/platform/platform-posix.cc',
+ ],
+ }
+ ],
+ ['OS=="openbsd"', {
+ 'link_settings': {
+ 'libraries': [
+ '-L/usr/local/lib -lexecinfo',
+ ]},
+ 'sources': [
+ 'base/platform/platform-openbsd.cc',
+ 'base/platform/platform-posix.cc'
+ ],
+ }
+ ],
+ ['OS=="netbsd"', {
+ 'link_settings': {
+ 'libraries': [
+ '-L/usr/pkg/lib -Wl,-R/usr/pkg/lib -lexecinfo',
+ ]},
+ 'sources': [
+ 'base/debug/stack_trace_posix.cc',
+ 'base/platform/platform-openbsd.cc',
+ 'base/platform/platform-posix.cc',
+ ],
+ }
+ ],
+ ['OS=="aix"', {
+ 'sources': [
+ 'base/debug/stack_trace_posix.cc',
+ 'base/platform/platform-aix.cc',
+ 'base/platform/platform-posix.cc'
+ ]},
+ ],
+ ['OS=="solaris"', {
+ 'link_settings': {
+ 'libraries': [
+ '-lnsl -lrt',
+ ]},
+ 'sources': [
+ 'base/debug/stack_trace_posix.cc',
+ 'base/platform/platform-solaris.cc',
+ 'base/platform/platform-posix.cc',
+ ],
+ }
+ ],
+ ['OS=="mac"', {
+ 'sources': [
+ 'base/debug/stack_trace_posix.cc',
+ 'base/platform/platform-macos.cc',
+ 'base/platform/platform-posix.cc',
+ ]},
+ ],
+ ['OS=="win"', {
+ 'defines': [
+ '_CRT_RAND_S' # for rand_s()
+ ],
+ 'variables': {
+ 'gyp_generators': '<!(echo $GYP_GENERATORS)',
+ },
+ 'conditions': [
+ ['gyp_generators=="make"', {
+ 'variables': {
+ 'build_env': '<!(uname -o)',
+ },
+ 'conditions': [
+ ['build_env=="Cygwin"', {
+ 'sources': [
+ 'base/debug/stack_trace_posix.cc',
+ 'base/platform/platform-cygwin.cc',
+ 'base/platform/platform-posix.cc',
+ ],
+ }, {
+ 'sources': [
+ 'base/debug/stack_trace_win.cc',
+ 'base/platform/platform-win32.cc',
+ 'base/win32-headers.h',
+ ],
+ }],
+ ],
+ 'link_settings': {
+ 'libraries': [ '-lwinmm', '-lws2_32' ],
+ },
+ }, {
+ 'sources': [
+ 'base/debug/stack_trace_win.cc',
+ 'base/platform/platform-win32.cc',
+ 'base/win32-headers.h',
+ ],
+ 'msvs_disabled_warnings': [4351, 4355, 4800],
+ 'link_settings': {
+ 'libraries': [
+ '-ldbghelp.lib',
+ '-lshlwapi.lib',
+ '-lwinmm.lib',
+ '-lws2_32.lib'
+ ],
+ },
+ }],
+ ],
+ }],
+ ],
+ },
+ {
+ 'target_name': 'v8_libplatform',
+ 'type': 'static_library',
+ 'variables': {
+ 'optimize': 'max',
+ },
+ 'dependencies': [
+ 'v8_libbase',
+ ],
+ 'include_dirs+': [
+ '..',
+ '<(DEPTH)',
+ '../include',
+ ],
+ 'sources': [
+ '../include/libplatform/libplatform.h',
+ '../include/libplatform/v8-tracing.h',
+ 'libplatform/default-platform.cc',
+ 'libplatform/default-platform.h',
+ 'libplatform/task-queue.cc',
+ 'libplatform/task-queue.h',
+ 'libplatform/tracing/trace-buffer.cc',
+ 'libplatform/tracing/trace-buffer.h',
+ 'libplatform/tracing/trace-config.cc',
+ 'libplatform/tracing/trace-object.cc',
+ 'libplatform/tracing/trace-writer.cc',
+ 'libplatform/tracing/trace-writer.h',
+ 'libplatform/tracing/tracing-controller.cc',
+ 'libplatform/worker-thread.cc',
+ 'libplatform/worker-thread.h',
+ ],
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ],
+ 'direct_dependent_settings': {
+ 'include_dirs': [
+ '../include',
+ ],
+ },
+ },
+ {
+ 'target_name': 'v8_libsampler',
+ 'type': 'static_library',
+ 'variables': {
+ 'optimize': 'max',
+ },
+ 'dependencies': [
+ 'v8_libbase',
+ ],
+ 'include_dirs+': [
+ '..',
+ '../include',
+ ],
+ 'sources': [
+ 'libsampler/sampler.cc',
+ 'libsampler/sampler.h'
+ ],
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ],
+ 'direct_dependent_settings': {
+ 'include_dirs': [
+ '../include',
+ ],
+ },
+ },
+ {
+ 'target_name': 'natives_blob',
+ 'type': 'none',
+ 'conditions': [
+ [ 'v8_use_external_startup_data==1', {
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'dependencies': ['js2c#host'],
+ }, {
+ 'dependencies': ['js2c'],
+ }],
+ ],
+ 'actions': [{
+ 'action_name': 'concatenate_natives_blob',
+ 'inputs': [
+ '../tools/concatenate-files.py',
+ '<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
+ '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental.bin',
+ '<(SHARED_INTERMEDIATE_DIR)/libraries-extras.bin',
+ '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental-extras.bin',
+ ],
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'target_conditions': [
+ ['_toolset=="host"', {
+ 'outputs': [
+ '<(PRODUCT_DIR)/natives_blob_host.bin',
+ ],
+ 'action': [
+ 'python', '<@(_inputs)', '<(PRODUCT_DIR)/natives_blob_host.bin'
+ ],
+ }, {
+ 'outputs': [
+ '<(PRODUCT_DIR)/natives_blob.bin',
+ ],
+ 'action': [
+ 'python', '<@(_inputs)', '<(PRODUCT_DIR)/natives_blob.bin'
+ ],
+ }],
+ ],
+ }, {
+ 'outputs': [
+ '<(PRODUCT_DIR)/natives_blob.bin',
+ ],
+ 'action': [
+ 'python', '<@(_inputs)', '<(PRODUCT_DIR)/natives_blob.bin'
+ ],
+ }],
+ ],
+ }],
+ }],
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ]
+ },
+ {
+ 'target_name': 'js2c',
+ 'type': 'none',
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ],
+ 'variables': {
+ 'library_files': [
+ 'js/macros.py',
+ 'messages.h',
+ 'js/prologue.js',
+ 'js/runtime.js',
+ 'js/v8natives.js',
+ 'js/symbol.js',
+ 'js/array.js',
+ 'js/string.js',
+ 'js/math.js',
+ 'js/regexp.js',
+ 'js/arraybuffer.js',
+ 'js/typedarray.js',
+ 'js/iterator-prototype.js',
+ 'js/collection.js',
+ 'js/weak-collection.js',
+ 'js/collection-iterator.js',
+ 'js/promise.js',
+ 'js/messages.js',
+ 'js/array-iterator.js',
+ 'js/string-iterator.js',
+ 'js/templates.js',
+ 'js/spread.js',
+ 'js/proxy.js',
+ 'debug/mirrors.js',
+ 'debug/debug.js',
+ 'debug/liveedit.js',
+ ],
+ 'experimental_library_files': [
+ 'js/macros.py',
+ 'messages.h',
+ 'js/harmony-atomics.js',
+ 'js/harmony-simd.js',
+ 'js/harmony-string-padding.js',
+ 'js/harmony-async-await.js'
+ ],
+ 'libraries_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
+ 'libraries_experimental_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental.bin',
+ 'libraries_extras_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-extras.bin',
+ 'libraries_experimental_extras_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental-extras.bin',
+ 'conditions': [
+ ['v8_enable_i18n_support==1', {
+ 'library_files': ['js/i18n.js'],
+ 'experimental_library_files': [
+ 'js/icu-case-mapping.js',
+ 'js/intl-extra.js',
+ ],
+ }],
+ ],
+ },
+ 'actions': [
+ {
+ 'action_name': 'js2c',
+ 'inputs': [
+ '../tools/js2c.py',
+ '<@(library_files)',
+ ],
+ 'outputs': ['<(SHARED_INTERMEDIATE_DIR)/libraries.cc'],
+ 'action': [
+ 'python',
+ '../tools/js2c.py',
+ '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
+ 'CORE',
+ '<@(library_files)',
+ ],
+ },
+ {
+ 'action_name': 'js2c_bin',
+ 'inputs': [
+ '../tools/js2c.py',
+ '<@(library_files)',
+ ],
+ 'outputs': ['<@(libraries_bin_file)'],
+ 'action': [
+ 'python',
+ '../tools/js2c.py',
+ '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
+ 'CORE',
+ '<@(library_files)',
+ '--startup_blob', '<@(libraries_bin_file)',
+ '--nojs',
+ ],
+ },
+ {
+ 'action_name': 'js2c_experimental',
+ 'inputs': [
+ '../tools/js2c.py',
+ '<@(experimental_library_files)',
+ ],
+ 'outputs': ['<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc'],
+ 'action': [
+ 'python',
+ '../tools/js2c.py',
+ '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
+ 'EXPERIMENTAL',
+ '<@(experimental_library_files)',
+ ],
+ },
+ {
+ 'action_name': 'js2c_experimental_bin',
+ 'inputs': [
+ '../tools/js2c.py',
+ '<@(experimental_library_files)',
+ ],
+ 'outputs': ['<@(libraries_experimental_bin_file)'],
+ 'action': [
+ 'python',
+ '../tools/js2c.py',
+ '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
+ 'EXPERIMENTAL',
+ '<@(experimental_library_files)',
+ '--startup_blob', '<@(libraries_experimental_bin_file)',
+ '--nojs',
+ ],
+ },
+ {
+ 'action_name': 'js2c_extras',
+ 'inputs': [
+ '../tools/js2c.py',
+ '<@(v8_extra_library_files)',
+ ],
+ 'outputs': ['<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc'],
+ 'action': [
+ 'python',
+ '../tools/js2c.py',
+ '<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
+ 'EXTRAS',
+ '<@(v8_extra_library_files)',
+ ],
+ },
+ {
+ 'action_name': 'js2c_extras_bin',
+ 'inputs': [
+ '../tools/js2c.py',
+ '<@(v8_extra_library_files)',
+ ],
+ 'outputs': ['<@(libraries_extras_bin_file)'],
+ 'action': [
+ 'python',
+ '../tools/js2c.py',
+ '<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
+ 'EXTRAS',
+ '<@(v8_extra_library_files)',
+ '--startup_blob', '<@(libraries_extras_bin_file)',
+ '--nojs',
+ ],
+ },
+ {
+ 'action_name': 'js2c_experimental_extras',
+ 'inputs': [
+ '../tools/js2c.py',
+ '<@(v8_experimental_extra_library_files)',
+ ],
+ 'outputs': [
+ '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
+ ],
+ 'action': [
+ 'python',
+ '../tools/js2c.py',
+ '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
+ 'EXPERIMENTAL_EXTRAS',
+ '<@(v8_experimental_extra_library_files)',
+ ],
+ },
+ {
+ 'action_name': 'js2c_experimental_extras_bin',
+ 'inputs': [
+ '../tools/js2c.py',
+ '<@(v8_experimental_extra_library_files)',
+ ],
+ 'outputs': ['<@(libraries_experimental_extras_bin_file)'],
+ 'action': [
+ 'python',
+ '../tools/js2c.py',
+ '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
+ 'EXPERIMENTAL_EXTRAS',
+ '<@(v8_experimental_extra_library_files)',
+ '--startup_blob', '<@(libraries_experimental_extras_bin_file)',
+ '--nojs',
+ ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'postmortem-metadata',
+ 'type': 'none',
+ 'variables': {
+ 'heapobject_files': [
+ 'objects.h',
+ 'objects-inl.h',
+ ],
+ },
+ 'actions': [
+ {
+ 'action_name': 'gen-postmortem-metadata',
+ 'inputs': [
+ '../tools/gen-postmortem-metadata.py',
+ '<@(heapobject_files)',
+ ],
+ 'outputs': [
+ '<(SHARED_INTERMEDIATE_DIR)/debug-support.cc',
+ ],
+ 'action': [
+ 'python',
+ '../tools/gen-postmortem-metadata.py',
+ '<@(_outputs)',
+ '<@(heapobject_files)'
+ ]
+ }
+ ]
+ },
+ {
+ 'target_name': 'mksnapshot',
+ 'type': 'executable',
+ 'dependencies': ['v8_base', 'v8_nosnapshot', 'v8_libplatform'],
+ 'include_dirs+': [
+ '..',
+ '<(DEPTH)',
+ ],
+ 'sources': [
+ 'snapshot/mksnapshot.cc',
+ ],
+ 'conditions': [
+ ['v8_enable_i18n_support==1', {
+ 'dependencies': [
+ '<(icu_gyp_path):icui18n',
+ '<(icu_gyp_path):icuuc',
+ ]
+ }],
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ],
+ },
+ {
+ 'target_name': 'mkpeephole',
+ 'type': 'executable',
+ 'dependencies': [ 'v8_libbase' ],
+ 'include_dirs+': [
+ '..',
+ ],
+ 'sources': [
+ 'interpreter/bytecode-peephole-table.h',
+ 'interpreter/bytecodes.h',
+ 'interpreter/bytecodes.cc',
+ 'interpreter/mkpeephole.cc'
+ ],
+ 'conditions': [
+ ['want_separate_host_toolset_mkpeephole==1', {
+ 'toolsets': ['host'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ],
+ },
+ ],
+}
diff --git a/deps/v8/src/v8memory.h b/deps/v8/src/v8memory.h
index b1ae939f84..d34bce7746 100644
--- a/deps/v8/src/v8memory.h
+++ b/deps/v8/src/v8memory.h
@@ -64,6 +64,13 @@ class Memory {
static Handle<Object>& Object_Handle_at(Address addr) {
return *reinterpret_cast<Handle<Object>*>(addr);
}
+
+ static bool IsAddressInRange(Address base, Address address, uint32_t size) {
+ uintptr_t numeric_base = reinterpret_cast<uintptr_t>(base);
+ uintptr_t numeric_address = reinterpret_cast<uintptr_t>(address);
+ return numeric_base <= numeric_address &&
+ numeric_address < numeric_base + size;
+ }
};
} // namespace internal
diff --git a/deps/v8/src/value-serializer.cc b/deps/v8/src/value-serializer.cc
new file mode 100644
index 0000000000..0af4838abf
--- /dev/null
+++ b/deps/v8/src/value-serializer.cc
@@ -0,0 +1,967 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/value-serializer.h"
+
+#include <type_traits>
+
+#include "src/base/logging.h"
+#include "src/factory.h"
+#include "src/handles-inl.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+static const uint32_t kLatestVersion = 9;
+
+template <typename T>
+static size_t BytesNeededForVarint(T value) {
+ static_assert(std::is_integral<T>::value && std::is_unsigned<T>::value,
+ "Only unsigned integer types can be written as varints.");
+ size_t result = 0;
+ do {
+ result++;
+ value >>= 7;
+ } while (value);
+ return result;
+}
+
+enum class SerializationTag : uint8_t {
+ // version:uint32_t (if at beginning of data, sets version > 0)
+ kVersion = 0xFF,
+ // ignore
+ kPadding = '\0',
+ // refTableSize:uint32_t (previously used for sanity checks; safe to ignore)
+ kVerifyObjectCount = '?',
+ // Oddballs (no data).
+ kUndefined = '_',
+ kNull = '0',
+ kTrue = 'T',
+ kFalse = 'F',
+ // Number represented as 32-bit integer, ZigZag-encoded
+ // (like sint32 in protobuf)
+ kInt32 = 'I',
+ // Number represented as 32-bit unsigned integer, varint-encoded
+ // (like uint32 in protobuf)
+ kUint32 = 'U',
+ // Number represented as a 64-bit double.
+ // Host byte order is used (N.B. this makes the format non-portable).
+ kDouble = 'N',
+ // byteLength:uint32_t, then raw data
+ kUtf8String = 'S',
+ kTwoByteString = 'c',
+ // Reference to a serialized object. objectID:uint32_t
+ kObjectReference = '^',
+ // Beginning of a JS object.
+ kBeginJSObject = 'o',
+ // End of a JS object. numProperties:uint32_t
+ kEndJSObject = '{',
+ // Beginning of a sparse JS array. length:uint32_t
+ // Elements and properties are written as key/value pairs, like objects.
+ kBeginSparseJSArray = 'a',
+ // End of a sparse JS array. numProperties:uint32_t length:uint32_t
+ kEndSparseJSArray = '@',
+ // Beginning of a dense JS array. length:uint32_t
+ // |length| elements, followed by properties as key/value pairs
+ kBeginDenseJSArray = 'A',
+ // End of a dense JS array. numProperties:uint32_t length:uint32_t
+ kEndDenseJSArray = '$',
+ // Date. millisSinceEpoch:double
+ kDate = 'D',
+ // Boolean object. No data.
+ kTrueObject = 'y',
+ kFalseObject = 'x',
+ // Number object. value:double
+ kNumberObject = 'n',
+ // String object, UTF-8 encoding. byteLength:uint32_t, then raw data.
+ kStringObject = 's',
+ // Regular expression, UTF-8 encoding. byteLength:uint32_t, raw data,
+ // flags:uint32_t.
+ kRegExp = 'R',
+};
+
+ValueSerializer::ValueSerializer(Isolate* isolate)
+ : isolate_(isolate),
+ zone_(isolate->allocator()),
+ id_map_(isolate->heap(), &zone_) {}
+
+ValueSerializer::~ValueSerializer() {}
+
+void ValueSerializer::WriteHeader() {
+ WriteTag(SerializationTag::kVersion);
+ WriteVarint(kLatestVersion);
+}
+
+void ValueSerializer::WriteTag(SerializationTag tag) {
+ buffer_.push_back(static_cast<uint8_t>(tag));
+}
+
+template <typename T>
+void ValueSerializer::WriteVarint(T value) {
+ // Writes an unsigned integer as a base-128 varint.
+ // The number is written, 7 bits at a time, from the least significant to the
+ // most significant 7 bits. Each byte, except the last, has the MSB set.
+ // See also https://developers.google.com/protocol-buffers/docs/encoding
+ static_assert(std::is_integral<T>::value && std::is_unsigned<T>::value,
+ "Only unsigned integer types can be written as varints.");
+ uint8_t stack_buffer[sizeof(T) * 8 / 7 + 1];
+ uint8_t* next_byte = &stack_buffer[0];
+ do {
+ *next_byte = (value & 0x7f) | 0x80;
+ next_byte++;
+ value >>= 7;
+ } while (value);
+ *(next_byte - 1) &= 0x7f;
+ buffer_.insert(buffer_.end(), stack_buffer, next_byte);
+}
+
+template <typename T>
+void ValueSerializer::WriteZigZag(T value) {
+ // Writes a signed integer as a varint using ZigZag encoding (i.e. 0 is
+ // encoded as 0, -1 as 1, 1 as 2, -2 as 3, and so on).
+ // See also https://developers.google.com/protocol-buffers/docs/encoding
+ // Note that this implementation relies on the right shift being arithmetic.
+ static_assert(std::is_integral<T>::value && std::is_signed<T>::value,
+ "Only signed integer types can be written as zigzag.");
+ using UnsignedT = typename std::make_unsigned<T>::type;
+ WriteVarint((static_cast<UnsignedT>(value) << 1) ^
+ (value >> (8 * sizeof(T) - 1)));
+}
+
+void ValueSerializer::WriteDouble(double value) {
+ // Warning: this uses host endianness.
+ buffer_.insert(buffer_.end(), reinterpret_cast<const uint8_t*>(&value),
+ reinterpret_cast<const uint8_t*>(&value + 1));
+}
+
+void ValueSerializer::WriteOneByteString(Vector<const uint8_t> chars) {
+ WriteVarint<uint32_t>(chars.length());
+ buffer_.insert(buffer_.end(), chars.begin(), chars.end());
+}
+
+void ValueSerializer::WriteTwoByteString(Vector<const uc16> chars) {
+ // Warning: this uses host endianness.
+ WriteVarint<uint32_t>(chars.length() * sizeof(uc16));
+ buffer_.insert(buffer_.end(), reinterpret_cast<const uint8_t*>(chars.begin()),
+ reinterpret_cast<const uint8_t*>(chars.end()));
+}
+
+uint8_t* ValueSerializer::ReserveRawBytes(size_t bytes) {
+ if (!bytes) return nullptr;
+ auto old_size = buffer_.size();
+ buffer_.resize(buffer_.size() + bytes);
+ return &buffer_[old_size];
+}
+
+Maybe<bool> ValueSerializer::WriteObject(Handle<Object> object) {
+ if (object->IsSmi()) {
+ WriteSmi(Smi::cast(*object));
+ return Just(true);
+ }
+
+ DCHECK(object->IsHeapObject());
+ switch (HeapObject::cast(*object)->map()->instance_type()) {
+ case ODDBALL_TYPE:
+ WriteOddball(Oddball::cast(*object));
+ return Just(true);
+ case HEAP_NUMBER_TYPE:
+ case MUTABLE_HEAP_NUMBER_TYPE:
+ WriteHeapNumber(HeapNumber::cast(*object));
+ return Just(true);
+ default:
+ if (object->IsString()) {
+ WriteString(Handle<String>::cast(object));
+ return Just(true);
+ } else if (object->IsJSReceiver()) {
+ return WriteJSReceiver(Handle<JSReceiver>::cast(object));
+ }
+ UNIMPLEMENTED();
+ return Nothing<bool>();
+ }
+}
+
+void ValueSerializer::WriteOddball(Oddball* oddball) {
+ SerializationTag tag = SerializationTag::kUndefined;
+ switch (oddball->kind()) {
+ case Oddball::kUndefined:
+ tag = SerializationTag::kUndefined;
+ break;
+ case Oddball::kFalse:
+ tag = SerializationTag::kFalse;
+ break;
+ case Oddball::kTrue:
+ tag = SerializationTag::kTrue;
+ break;
+ case Oddball::kNull:
+ tag = SerializationTag::kNull;
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ WriteTag(tag);
+}
+
+void ValueSerializer::WriteSmi(Smi* smi) {
+ static_assert(kSmiValueSize <= 32, "Expected SMI <= 32 bits.");
+ WriteTag(SerializationTag::kInt32);
+ WriteZigZag<int32_t>(smi->value());
+}
+
+void ValueSerializer::WriteHeapNumber(HeapNumber* number) {
+ WriteTag(SerializationTag::kDouble);
+ WriteDouble(number->value());
+}
+
+void ValueSerializer::WriteString(Handle<String> string) {
+ string = String::Flatten(string);
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat = string->GetFlatContent();
+ DCHECK(flat.IsFlat());
+ if (flat.IsOneByte()) {
+ // The existing format uses UTF-8, rather than Latin-1. As a result we must
+ // to do work to encode strings that have characters outside ASCII.
+ // TODO(jbroman): In a future format version, consider adding a tag for
+ // Latin-1 strings, so that this can be skipped.
+ WriteTag(SerializationTag::kUtf8String);
+ Vector<const uint8_t> chars = flat.ToOneByteVector();
+ if (String::IsAscii(chars.begin(), chars.length())) {
+ WriteOneByteString(chars);
+ } else {
+ v8::Local<v8::String> api_string = Utils::ToLocal(string);
+ uint32_t utf8_length = api_string->Utf8Length();
+ WriteVarint(utf8_length);
+ api_string->WriteUtf8(
+ reinterpret_cast<char*>(ReserveRawBytes(utf8_length)), utf8_length,
+ nullptr, v8::String::NO_NULL_TERMINATION);
+ }
+ } else if (flat.IsTwoByte()) {
+ Vector<const uc16> chars = flat.ToUC16Vector();
+ uint32_t byte_length = chars.length() * sizeof(uc16);
+ // The existing reading code expects 16-byte strings to be aligned.
+ if ((buffer_.size() + 1 + BytesNeededForVarint(byte_length)) & 1)
+ WriteTag(SerializationTag::kPadding);
+ WriteTag(SerializationTag::kTwoByteString);
+ WriteTwoByteString(chars);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
+ // If the object has already been serialized, just write its ID.
+ uint32_t* id_map_entry = id_map_.Get(receiver);
+ if (uint32_t id = *id_map_entry) {
+ WriteTag(SerializationTag::kObjectReference);
+ WriteVarint(id - 1);
+ return Just(true);
+ }
+
+ // Otherwise, allocate an ID for it.
+ uint32_t id = next_id_++;
+ *id_map_entry = id + 1;
+
+ // Eliminate callable and exotic objects, which should not be serialized.
+ InstanceType instance_type = receiver->map()->instance_type();
+ if (receiver->IsCallable() || instance_type <= LAST_SPECIAL_RECEIVER_TYPE) {
+ return Nothing<bool>();
+ }
+
+ // If we are at the end of the stack, abort. This function may recurse.
+ if (StackLimitCheck(isolate_).HasOverflowed()) return Nothing<bool>();
+
+ HandleScope scope(isolate_);
+ switch (instance_type) {
+ case JS_ARRAY_TYPE:
+ return WriteJSArray(Handle<JSArray>::cast(receiver));
+ case JS_OBJECT_TYPE:
+ case JS_API_OBJECT_TYPE:
+ return WriteJSObject(Handle<JSObject>::cast(receiver));
+ case JS_DATE_TYPE:
+ WriteJSDate(JSDate::cast(*receiver));
+ return Just(true);
+ case JS_VALUE_TYPE:
+ return WriteJSValue(Handle<JSValue>::cast(receiver));
+ case JS_REGEXP_TYPE:
+ WriteJSRegExp(JSRegExp::cast(*receiver));
+ return Just(true);
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ return Nothing<bool>();
+}
+
+Maybe<bool> ValueSerializer::WriteJSObject(Handle<JSObject> object) {
+ WriteTag(SerializationTag::kBeginJSObject);
+ Handle<FixedArray> keys;
+ uint32_t properties_written;
+ if (!KeyAccumulator::GetKeys(object, KeyCollectionMode::kOwnOnly,
+ ENUMERABLE_STRINGS)
+ .ToHandle(&keys) ||
+ !WriteJSObjectProperties(object, keys).To(&properties_written)) {
+ return Nothing<bool>();
+ }
+ WriteTag(SerializationTag::kEndJSObject);
+ WriteVarint<uint32_t>(properties_written);
+ return Just(true);
+}
+
+Maybe<bool> ValueSerializer::WriteJSArray(Handle<JSArray> array) {
+ uint32_t length = 0;
+ bool valid_length = array->length()->ToArrayLength(&length);
+ DCHECK(valid_length);
+ USE(valid_length);
+
+ // To keep things simple, for now we decide between dense and sparse
+ // serialization based on elements kind. A more principled heuristic could
+ // count the elements, but would need to take care to note which indices
+ // existed (as only indices which were enumerable own properties at this point
+ // should be serialized).
+ const bool should_serialize_densely =
+ array->HasFastElements() && !array->HasFastHoleyElements();
+
+ if (should_serialize_densely) {
+ // TODO(jbroman): Distinguish between undefined and a hole (this can happen
+ // if serializing one of the elements deletes another). This requires wire
+ // format changes.
+ WriteTag(SerializationTag::kBeginDenseJSArray);
+ WriteVarint<uint32_t>(length);
+ for (uint32_t i = 0; i < length; i++) {
+ // Serializing the array's elements can have arbitrary side effects, so we
+ // cannot rely on still having fast elements, even if it did to begin
+ // with.
+ Handle<Object> element;
+ LookupIterator it(isolate_, array, i, array, LookupIterator::OWN);
+ if (!Object::GetProperty(&it).ToHandle(&element) ||
+ !WriteObject(element).FromMaybe(false)) {
+ return Nothing<bool>();
+ }
+ }
+ KeyAccumulator accumulator(isolate_, KeyCollectionMode::kOwnOnly,
+ ENUMERABLE_STRINGS);
+ if (!accumulator.CollectOwnPropertyNames(array, array).FromMaybe(false)) {
+ return Nothing<bool>();
+ }
+ Handle<FixedArray> keys =
+ accumulator.GetKeys(GetKeysConversion::kConvertToString);
+ uint32_t properties_written;
+ if (!WriteJSObjectProperties(array, keys).To(&properties_written)) {
+ return Nothing<bool>();
+ }
+ WriteTag(SerializationTag::kEndDenseJSArray);
+ WriteVarint<uint32_t>(properties_written);
+ WriteVarint<uint32_t>(length);
+ } else {
+ WriteTag(SerializationTag::kBeginSparseJSArray);
+ WriteVarint<uint32_t>(length);
+ Handle<FixedArray> keys;
+ uint32_t properties_written;
+ if (!KeyAccumulator::GetKeys(array, KeyCollectionMode::kOwnOnly,
+ ENUMERABLE_STRINGS)
+ .ToHandle(&keys) ||
+ !WriteJSObjectProperties(array, keys).To(&properties_written)) {
+ return Nothing<bool>();
+ }
+ WriteTag(SerializationTag::kEndSparseJSArray);
+ WriteVarint<uint32_t>(properties_written);
+ WriteVarint<uint32_t>(length);
+ }
+ return Just(true);
+}
+
+void ValueSerializer::WriteJSDate(JSDate* date) {
+ WriteTag(SerializationTag::kDate);
+ WriteDouble(date->value()->Number());
+}
+
+Maybe<bool> ValueSerializer::WriteJSValue(Handle<JSValue> value) {
+ Object* inner_value = value->value();
+ if (inner_value->IsTrue(isolate_)) {
+ WriteTag(SerializationTag::kTrueObject);
+ } else if (inner_value->IsFalse(isolate_)) {
+ WriteTag(SerializationTag::kFalseObject);
+ } else if (inner_value->IsNumber()) {
+ WriteTag(SerializationTag::kNumberObject);
+ WriteDouble(inner_value->Number());
+ } else if (inner_value->IsString()) {
+ // TODO(jbroman): Replace UTF-8 encoding with the same options available for
+ // ordinary strings.
+ WriteTag(SerializationTag::kStringObject);
+ v8::Local<v8::String> api_string =
+ Utils::ToLocal(handle(String::cast(inner_value), isolate_));
+ uint32_t utf8_length = api_string->Utf8Length();
+ WriteVarint(utf8_length);
+ api_string->WriteUtf8(reinterpret_cast<char*>(ReserveRawBytes(utf8_length)),
+ utf8_length, nullptr,
+ v8::String::NO_NULL_TERMINATION);
+ } else {
+ DCHECK(inner_value->IsSymbol());
+ return Nothing<bool>();
+ }
+ return Just(true);
+}
+
+void ValueSerializer::WriteJSRegExp(JSRegExp* regexp) {
+ WriteTag(SerializationTag::kRegExp);
+ v8::Local<v8::String> api_string =
+ Utils::ToLocal(handle(regexp->Pattern(), isolate_));
+ uint32_t utf8_length = api_string->Utf8Length();
+ WriteVarint(utf8_length);
+ api_string->WriteUtf8(reinterpret_cast<char*>(ReserveRawBytes(utf8_length)),
+ utf8_length, nullptr, v8::String::NO_NULL_TERMINATION);
+ WriteVarint(static_cast<uint32_t>(regexp->GetFlags()));
+}
+
+Maybe<uint32_t> ValueSerializer::WriteJSObjectProperties(
+ Handle<JSObject> object, Handle<FixedArray> keys) {
+ uint32_t properties_written = 0;
+ int length = keys->length();
+ for (int i = 0; i < length; i++) {
+ Handle<Object> key(keys->get(i), isolate_);
+
+ bool success;
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate_, object, key, &success, LookupIterator::OWN);
+ DCHECK(success);
+ Handle<Object> value;
+ if (!Object::GetProperty(&it).ToHandle(&value)) return Nothing<uint32_t>();
+
+ // If the property is no longer found, do not serialize it.
+ // This could happen if a getter deleted the property.
+ if (!it.IsFound()) continue;
+
+ if (!WriteObject(key).FromMaybe(false) ||
+ !WriteObject(value).FromMaybe(false)) {
+ return Nothing<uint32_t>();
+ }
+
+ properties_written++;
+ }
+ return Just(properties_written);
+}
+
+ValueDeserializer::ValueDeserializer(Isolate* isolate,
+ Vector<const uint8_t> data)
+ : isolate_(isolate),
+ position_(data.start()),
+ end_(data.start() + data.length()),
+ id_map_(Handle<SeededNumberDictionary>::cast(
+ isolate->global_handles()->Create(
+ *SeededNumberDictionary::New(isolate, 0)))) {}
+
+ValueDeserializer::~ValueDeserializer() {
+ GlobalHandles::Destroy(Handle<Object>::cast(id_map_).location());
+}
+
+Maybe<bool> ValueDeserializer::ReadHeader() {
+ if (position_ < end_ &&
+ *position_ == static_cast<uint8_t>(SerializationTag::kVersion)) {
+ ReadTag().ToChecked();
+ if (!ReadVarint<uint32_t>().To(&version_)) return Nothing<bool>();
+ if (version_ > kLatestVersion) return Nothing<bool>();
+ }
+ return Just(true);
+}
+
+Maybe<SerializationTag> ValueDeserializer::PeekTag() const {
+ const uint8_t* peek_position = position_;
+ SerializationTag tag;
+ do {
+ if (peek_position >= end_) return Nothing<SerializationTag>();
+ tag = static_cast<SerializationTag>(*peek_position);
+ peek_position++;
+ } while (tag == SerializationTag::kPadding);
+ return Just(tag);
+}
+
+void ValueDeserializer::ConsumeTag(SerializationTag peeked_tag) {
+ SerializationTag actual_tag = ReadTag().ToChecked();
+ DCHECK(actual_tag == peeked_tag);
+ USE(actual_tag);
+}
+
+Maybe<SerializationTag> ValueDeserializer::ReadTag() {
+ SerializationTag tag;
+ do {
+ if (position_ >= end_) return Nothing<SerializationTag>();
+ tag = static_cast<SerializationTag>(*position_);
+ position_++;
+ } while (tag == SerializationTag::kPadding);
+ return Just(tag);
+}
+
+template <typename T>
+Maybe<T> ValueDeserializer::ReadVarint() {
+ // Reads an unsigned integer as a base-128 varint.
+ // The number is written, 7 bits at a time, from the least significant to the
+ // most significant 7 bits. Each byte, except the last, has the MSB set.
+ // If the varint is larger than T, any more significant bits are discarded.
+ // See also https://developers.google.com/protocol-buffers/docs/encoding
+ static_assert(std::is_integral<T>::value && std::is_unsigned<T>::value,
+ "Only unsigned integer types can be read as varints.");
+ T value = 0;
+ unsigned shift = 0;
+ bool has_another_byte;
+ do {
+ if (position_ >= end_) return Nothing<T>();
+ uint8_t byte = *position_;
+ if (V8_LIKELY(shift < sizeof(T) * 8)) {
+ value |= (byte & 0x7f) << shift;
+ shift += 7;
+ }
+ has_another_byte = byte & 0x80;
+ position_++;
+ } while (has_another_byte);
+ return Just(value);
+}
+
+template <typename T>
+Maybe<T> ValueDeserializer::ReadZigZag() {
+ // Writes a signed integer as a varint using ZigZag encoding (i.e. 0 is
+ // encoded as 0, -1 as 1, 1 as 2, -2 as 3, and so on).
+ // See also https://developers.google.com/protocol-buffers/docs/encoding
+ static_assert(std::is_integral<T>::value && std::is_signed<T>::value,
+ "Only signed integer types can be read as zigzag.");
+ using UnsignedT = typename std::make_unsigned<T>::type;
+ UnsignedT unsigned_value;
+ if (!ReadVarint<UnsignedT>().To(&unsigned_value)) return Nothing<T>();
+ return Just(static_cast<T>((unsigned_value >> 1) ^
+ -static_cast<T>(unsigned_value & 1)));
+}
+
+Maybe<double> ValueDeserializer::ReadDouble() {
+ // Warning: this uses host endianness.
+ if (position_ > end_ - sizeof(double)) return Nothing<double>();
+ double value;
+ memcpy(&value, position_, sizeof(double));
+ position_ += sizeof(double);
+ if (std::isnan(value)) value = std::numeric_limits<double>::quiet_NaN();
+ return Just(value);
+}
+
+Maybe<Vector<const uint8_t>> ValueDeserializer::ReadRawBytes(int size) {
+ if (size > end_ - position_) return Nothing<Vector<const uint8_t>>();
+ const uint8_t* start = position_;
+ position_ += size;
+ return Just(Vector<const uint8_t>(start, size));
+}
+
+MaybeHandle<Object> ValueDeserializer::ReadObject() {
+ SerializationTag tag;
+ if (!ReadTag().To(&tag)) return MaybeHandle<Object>();
+ switch (tag) {
+ case SerializationTag::kVerifyObjectCount:
+ // Read the count and ignore it.
+ if (ReadVarint<uint32_t>().IsNothing()) return MaybeHandle<Object>();
+ return ReadObject();
+ case SerializationTag::kUndefined:
+ return isolate_->factory()->undefined_value();
+ case SerializationTag::kNull:
+ return isolate_->factory()->null_value();
+ case SerializationTag::kTrue:
+ return isolate_->factory()->true_value();
+ case SerializationTag::kFalse:
+ return isolate_->factory()->false_value();
+ case SerializationTag::kInt32: {
+ Maybe<int32_t> number = ReadZigZag<int32_t>();
+ if (number.IsNothing()) return MaybeHandle<Object>();
+ return isolate_->factory()->NewNumberFromInt(number.FromJust());
+ }
+ case SerializationTag::kUint32: {
+ Maybe<uint32_t> number = ReadVarint<uint32_t>();
+ if (number.IsNothing()) return MaybeHandle<Object>();
+ return isolate_->factory()->NewNumberFromUint(number.FromJust());
+ }
+ case SerializationTag::kDouble: {
+ Maybe<double> number = ReadDouble();
+ if (number.IsNothing()) return MaybeHandle<Object>();
+ return isolate_->factory()->NewNumber(number.FromJust());
+ }
+ case SerializationTag::kUtf8String:
+ return ReadUtf8String();
+ case SerializationTag::kTwoByteString:
+ return ReadTwoByteString();
+ case SerializationTag::kObjectReference: {
+ uint32_t id;
+ if (!ReadVarint<uint32_t>().To(&id)) return MaybeHandle<Object>();
+ return GetObjectWithID(id);
+ }
+ case SerializationTag::kBeginJSObject:
+ return ReadJSObject();
+ case SerializationTag::kBeginSparseJSArray:
+ return ReadSparseJSArray();
+ case SerializationTag::kBeginDenseJSArray:
+ return ReadDenseJSArray();
+ case SerializationTag::kDate:
+ return ReadJSDate();
+ case SerializationTag::kTrueObject:
+ case SerializationTag::kFalseObject:
+ case SerializationTag::kNumberObject:
+ case SerializationTag::kStringObject:
+ return ReadJSValue(tag);
+ case SerializationTag::kRegExp:
+ return ReadJSRegExp();
+ default:
+ return MaybeHandle<Object>();
+ }
+}
+
+MaybeHandle<String> ValueDeserializer::ReadUtf8String() {
+ uint32_t utf8_length;
+ Vector<const uint8_t> utf8_bytes;
+ if (!ReadVarint<uint32_t>().To(&utf8_length) ||
+ utf8_length >
+ static_cast<uint32_t>(std::numeric_limits<int32_t>::max()) ||
+ !ReadRawBytes(utf8_length).To(&utf8_bytes))
+ return MaybeHandle<String>();
+ return isolate_->factory()->NewStringFromUtf8(
+ Vector<const char>::cast(utf8_bytes));
+}
+
+MaybeHandle<String> ValueDeserializer::ReadTwoByteString() {
+ uint32_t byte_length;
+ Vector<const uint8_t> bytes;
+ if (!ReadVarint<uint32_t>().To(&byte_length) ||
+ byte_length >
+ static_cast<uint32_t>(std::numeric_limits<int32_t>::max()) ||
+ byte_length % sizeof(uc16) != 0 || !ReadRawBytes(byte_length).To(&bytes))
+ return MaybeHandle<String>();
+
+ // Allocate an uninitialized string so that we can do a raw memcpy into the
+ // string on the heap (regardless of alignment).
+ Handle<SeqTwoByteString> string;
+ if (!isolate_->factory()
+ ->NewRawTwoByteString(byte_length / sizeof(uc16))
+ .ToHandle(&string))
+ return MaybeHandle<String>();
+
+ // Copy the bytes directly into the new string.
+ // Warning: this uses host endianness.
+ memcpy(string->GetChars(), bytes.begin(), bytes.length());
+ return string;
+}
+
+MaybeHandle<JSObject> ValueDeserializer::ReadJSObject() {
+ // If we are at the end of the stack, abort. This function may recurse.
+ if (StackLimitCheck(isolate_).HasOverflowed()) return MaybeHandle<JSObject>();
+
+ uint32_t id = next_id_++;
+ HandleScope scope(isolate_);
+ Handle<JSObject> object =
+ isolate_->factory()->NewJSObject(isolate_->object_function());
+ AddObjectWithID(id, object);
+
+ uint32_t num_properties;
+ uint32_t expected_num_properties;
+ if (!ReadJSObjectProperties(object, SerializationTag::kEndJSObject)
+ .To(&num_properties) ||
+ !ReadVarint<uint32_t>().To(&expected_num_properties) ||
+ num_properties != expected_num_properties) {
+ return MaybeHandle<JSObject>();
+ }
+
+ DCHECK(HasObjectWithID(id));
+ return scope.CloseAndEscape(object);
+}
+
+MaybeHandle<JSArray> ValueDeserializer::ReadSparseJSArray() {
+ // If we are at the end of the stack, abort. This function may recurse.
+ if (StackLimitCheck(isolate_).HasOverflowed()) return MaybeHandle<JSArray>();
+
+ uint32_t length;
+ if (!ReadVarint<uint32_t>().To(&length)) return MaybeHandle<JSArray>();
+
+ uint32_t id = next_id_++;
+ HandleScope scope(isolate_);
+ Handle<JSArray> array = isolate_->factory()->NewJSArray(0);
+ JSArray::SetLength(array, length);
+ AddObjectWithID(id, array);
+
+ uint32_t num_properties;
+ uint32_t expected_num_properties;
+ uint32_t expected_length;
+ if (!ReadJSObjectProperties(array, SerializationTag::kEndSparseJSArray)
+ .To(&num_properties) ||
+ !ReadVarint<uint32_t>().To(&expected_num_properties) ||
+ !ReadVarint<uint32_t>().To(&expected_length) ||
+ num_properties != expected_num_properties || length != expected_length) {
+ return MaybeHandle<JSArray>();
+ }
+
+ DCHECK(HasObjectWithID(id));
+ return scope.CloseAndEscape(array);
+}
+
+MaybeHandle<JSArray> ValueDeserializer::ReadDenseJSArray() {
+ // If we are at the end of the stack, abort. This function may recurse.
+ if (StackLimitCheck(isolate_).HasOverflowed()) return MaybeHandle<JSArray>();
+
+ uint32_t length;
+ if (!ReadVarint<uint32_t>().To(&length)) return MaybeHandle<JSArray>();
+
+ uint32_t id = next_id_++;
+ HandleScope scope(isolate_);
+ Handle<JSArray> array = isolate_->factory()->NewJSArray(
+ FAST_HOLEY_ELEMENTS, length, length, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
+ AddObjectWithID(id, array);
+
+ Handle<FixedArray> elements(FixedArray::cast(array->elements()), isolate_);
+ for (uint32_t i = 0; i < length; i++) {
+ Handle<Object> element;
+ if (!ReadObject().ToHandle(&element)) return MaybeHandle<JSArray>();
+ // TODO(jbroman): Distinguish between undefined and a hole.
+ if (element->IsUndefined(isolate_)) continue;
+ elements->set(i, *element);
+ }
+
+ uint32_t num_properties;
+ uint32_t expected_num_properties;
+ uint32_t expected_length;
+ if (!ReadJSObjectProperties(array, SerializationTag::kEndDenseJSArray)
+ .To(&num_properties) ||
+ !ReadVarint<uint32_t>().To(&expected_num_properties) ||
+ !ReadVarint<uint32_t>().To(&expected_length) ||
+ num_properties != expected_num_properties || length != expected_length) {
+ return MaybeHandle<JSArray>();
+ }
+
+ DCHECK(HasObjectWithID(id));
+ return scope.CloseAndEscape(array);
+}
+
+MaybeHandle<JSDate> ValueDeserializer::ReadJSDate() {
+ double value;
+ if (!ReadDouble().To(&value)) return MaybeHandle<JSDate>();
+ uint32_t id = next_id_++;
+ Handle<JSDate> date;
+ if (!JSDate::New(isolate_->date_function(), isolate_->date_function(), value)
+ .ToHandle(&date)) {
+ return MaybeHandle<JSDate>();
+ }
+ AddObjectWithID(id, date);
+ return date;
+}
+
+MaybeHandle<JSValue> ValueDeserializer::ReadJSValue(SerializationTag tag) {
+ uint32_t id = next_id_++;
+ Handle<JSValue> value;
+ switch (tag) {
+ case SerializationTag::kTrueObject:
+ value = Handle<JSValue>::cast(
+ isolate_->factory()->NewJSObject(isolate_->boolean_function()));
+ value->set_value(isolate_->heap()->true_value());
+ break;
+ case SerializationTag::kFalseObject:
+ value = Handle<JSValue>::cast(
+ isolate_->factory()->NewJSObject(isolate_->boolean_function()));
+ value->set_value(isolate_->heap()->false_value());
+ break;
+ case SerializationTag::kNumberObject: {
+ double number;
+ if (!ReadDouble().To(&number)) return MaybeHandle<JSValue>();
+ value = Handle<JSValue>::cast(
+ isolate_->factory()->NewJSObject(isolate_->number_function()));
+ Handle<Object> number_object = isolate_->factory()->NewNumber(number);
+ value->set_value(*number_object);
+ break;
+ }
+ case SerializationTag::kStringObject: {
+ Handle<String> string;
+ if (!ReadUtf8String().ToHandle(&string)) return MaybeHandle<JSValue>();
+ value = Handle<JSValue>::cast(
+ isolate_->factory()->NewJSObject(isolate_->string_function()));
+ value->set_value(*string);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ return MaybeHandle<JSValue>();
+ }
+ AddObjectWithID(id, value);
+ return value;
+}
+
+MaybeHandle<JSRegExp> ValueDeserializer::ReadJSRegExp() {
+ uint32_t id = next_id_++;
+ Handle<String> pattern;
+ uint32_t raw_flags;
+ Handle<JSRegExp> regexp;
+ if (!ReadUtf8String().ToHandle(&pattern) ||
+ !ReadVarint<uint32_t>().To(&raw_flags) ||
+ !JSRegExp::New(pattern, static_cast<JSRegExp::Flags>(raw_flags))
+ .ToHandle(&regexp)) {
+ return MaybeHandle<JSRegExp>();
+ }
+ AddObjectWithID(id, regexp);
+ return regexp;
+}
+
+Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
+ Handle<JSObject> object, SerializationTag end_tag) {
+ for (uint32_t num_properties = 0;; num_properties++) {
+ SerializationTag tag;
+ if (!PeekTag().To(&tag)) return Nothing<uint32_t>();
+ if (tag == end_tag) {
+ ConsumeTag(end_tag);
+ return Just(num_properties);
+ }
+
+ Handle<Object> key;
+ if (!ReadObject().ToHandle(&key)) return Nothing<uint32_t>();
+ Handle<Object> value;
+ if (!ReadObject().ToHandle(&value)) return Nothing<uint32_t>();
+
+ bool success;
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate_, object, key, &success, LookupIterator::OWN);
+ if (!success ||
+ JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, NONE)
+ .is_null()) {
+ return Nothing<uint32_t>();
+ }
+ }
+}
+
+bool ValueDeserializer::HasObjectWithID(uint32_t id) {
+ return id_map_->Has(isolate_, id);
+}
+
+MaybeHandle<JSReceiver> ValueDeserializer::GetObjectWithID(uint32_t id) {
+ int index = id_map_->FindEntry(isolate_, id);
+ if (index == SeededNumberDictionary::kNotFound) {
+ return MaybeHandle<JSReceiver>();
+ }
+ Object* value = id_map_->ValueAt(index);
+ DCHECK(value->IsJSReceiver());
+ return Handle<JSReceiver>(JSReceiver::cast(value), isolate_);
+}
+
+void ValueDeserializer::AddObjectWithID(uint32_t id,
+ Handle<JSReceiver> object) {
+ DCHECK(!HasObjectWithID(id));
+ const bool used_as_prototype = false;
+ Handle<SeededNumberDictionary> new_dictionary =
+ SeededNumberDictionary::AtNumberPut(id_map_, id, object,
+ used_as_prototype);
+
+ // If the dictionary was reallocated, update the global handle.
+ if (!new_dictionary.is_identical_to(id_map_)) {
+ GlobalHandles::Destroy(Handle<Object>::cast(id_map_).location());
+ id_map_ = Handle<SeededNumberDictionary>::cast(
+ isolate_->global_handles()->Create(*new_dictionary));
+ }
+}
+
+static Maybe<bool> SetPropertiesFromKeyValuePairs(Isolate* isolate,
+ Handle<JSObject> object,
+ Handle<Object>* data,
+ uint32_t num_properties) {
+ for (unsigned i = 0; i < 2 * num_properties; i += 2) {
+ Handle<Object> key = data[i];
+ Handle<Object> value = data[i + 1];
+ bool success;
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, object, key, &success, LookupIterator::OWN);
+ if (!success ||
+ JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, NONE)
+ .is_null()) {
+ return Nothing<bool>();
+ }
+ }
+ return Just(true);
+}
+
+MaybeHandle<Object>
+ValueDeserializer::ReadObjectUsingEntireBufferForLegacyFormat() {
+ if (version_ > 0) return MaybeHandle<Object>();
+
+ HandleScope scope(isolate_);
+ std::vector<Handle<Object>> stack;
+ while (position_ < end_) {
+ SerializationTag tag;
+ if (!PeekTag().To(&tag)) break;
+
+ Handle<Object> new_object;
+ switch (tag) {
+ case SerializationTag::kEndJSObject: {
+ ConsumeTag(SerializationTag::kEndJSObject);
+
+ // JS Object: Read the last 2*n values from the stack and use them as
+ // key-value pairs.
+ uint32_t num_properties;
+ if (!ReadVarint<uint32_t>().To(&num_properties) ||
+ stack.size() / 2 < num_properties) {
+ return MaybeHandle<Object>();
+ }
+
+ size_t begin_properties =
+ stack.size() - 2 * static_cast<size_t>(num_properties);
+ Handle<JSObject> js_object =
+ isolate_->factory()->NewJSObject(isolate_->object_function());
+ if (num_properties &&
+ !SetPropertiesFromKeyValuePairs(
+ isolate_, js_object, &stack[begin_properties], num_properties)
+ .FromMaybe(false)) {
+ return MaybeHandle<Object>();
+ }
+
+ stack.resize(begin_properties);
+ new_object = js_object;
+ break;
+ }
+ case SerializationTag::kEndSparseJSArray: {
+ ConsumeTag(SerializationTag::kEndSparseJSArray);
+
+ // Sparse JS Array: Read the last 2*|num_properties| from the stack.
+ uint32_t num_properties;
+ uint32_t length;
+ if (!ReadVarint<uint32_t>().To(&num_properties) ||
+ !ReadVarint<uint32_t>().To(&length) ||
+ stack.size() / 2 < num_properties) {
+ return MaybeHandle<Object>();
+ }
+
+ Handle<JSArray> js_array = isolate_->factory()->NewJSArray(0);
+ JSArray::SetLength(js_array, length);
+ size_t begin_properties =
+ stack.size() - 2 * static_cast<size_t>(num_properties);
+ if (num_properties &&
+ !SetPropertiesFromKeyValuePairs(
+ isolate_, js_array, &stack[begin_properties], num_properties)
+ .FromMaybe(false)) {
+ return MaybeHandle<Object>();
+ }
+
+ stack.resize(begin_properties);
+ new_object = js_array;
+ break;
+ }
+ case SerializationTag::kEndDenseJSArray:
+ // This was already broken in Chromium, and apparently wasn't missed.
+ return MaybeHandle<Object>();
+ default:
+ if (!ReadObject().ToHandle(&new_object)) return MaybeHandle<Object>();
+ break;
+ }
+ stack.push_back(new_object);
+ }
+
+// Nothing remains but padding.
+#ifdef DEBUG
+ while (position_ < end_) {
+ DCHECK(*position_++ == static_cast<uint8_t>(SerializationTag::kPadding));
+ }
+#endif
+ position_ = end_;
+
+ if (stack.size() != 1) return MaybeHandle<Object>();
+ return scope.CloseAndEscape(stack[0]);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/value-serializer.h b/deps/v8/src/value-serializer.h
new file mode 100644
index 0000000000..ab9c664899
--- /dev/null
+++ b/deps/v8/src/value-serializer.h
@@ -0,0 +1,181 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_VALUE_SERIALIZER_H_
+#define V8_VALUE_SERIALIZER_H_
+
+#include <cstdint>
+#include <vector>
+
+#include "include/v8.h"
+#include "src/base/compiler-specific.h"
+#include "src/base/macros.h"
+#include "src/identity-map.h"
+#include "src/vector.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+class HeapNumber;
+class Isolate;
+class JSDate;
+class JSRegExp;
+class JSValue;
+class Object;
+class Oddball;
+class Smi;
+
+enum class SerializationTag : uint8_t;
+
+/**
+ * Writes V8 objects in a binary format that allows the objects to be cloned
+ * according to the HTML structured clone algorithm.
+ *
+ * Format is based on Blink's previous serialization logic.
+ */
+class ValueSerializer {
+ public:
+ explicit ValueSerializer(Isolate* isolate);
+ ~ValueSerializer();
+
+ /*
+ * Writes out a header, which includes the format version.
+ */
+ void WriteHeader();
+
+ /*
+ * Serializes a V8 object into the buffer.
+ */
+ Maybe<bool> WriteObject(Handle<Object> object) WARN_UNUSED_RESULT;
+
+ /*
+ * Returns the stored data. This serializer should not be used once the buffer
+ * is released. The contents are undefined if a previous write has failed.
+ */
+ std::vector<uint8_t> ReleaseBuffer() { return std::move(buffer_); }
+
+ private:
+ // Writing the wire format.
+ void WriteTag(SerializationTag tag);
+ template <typename T>
+ void WriteVarint(T value);
+ template <typename T>
+ void WriteZigZag(T value);
+ void WriteDouble(double value);
+ void WriteOneByteString(Vector<const uint8_t> chars);
+ void WriteTwoByteString(Vector<const uc16> chars);
+ uint8_t* ReserveRawBytes(size_t bytes);
+
+ // Writing V8 objects of various kinds.
+ void WriteOddball(Oddball* oddball);
+ void WriteSmi(Smi* smi);
+ void WriteHeapNumber(HeapNumber* number);
+ void WriteString(Handle<String> string);
+ Maybe<bool> WriteJSReceiver(Handle<JSReceiver> receiver) WARN_UNUSED_RESULT;
+ Maybe<bool> WriteJSObject(Handle<JSObject> object) WARN_UNUSED_RESULT;
+ Maybe<bool> WriteJSArray(Handle<JSArray> array) WARN_UNUSED_RESULT;
+ void WriteJSDate(JSDate* date);
+ Maybe<bool> WriteJSValue(Handle<JSValue> value) WARN_UNUSED_RESULT;
+ void WriteJSRegExp(JSRegExp* regexp);
+
+ /*
+ * Reads the specified keys from the object and writes key-value pairs to the
+ * buffer. Returns the number of keys actually written, which may be smaller
+ * if some keys are not own properties when accessed.
+ */
+ Maybe<uint32_t> WriteJSObjectProperties(
+ Handle<JSObject> object, Handle<FixedArray> keys) WARN_UNUSED_RESULT;
+
+ Isolate* const isolate_;
+ std::vector<uint8_t> buffer_;
+ Zone zone_;
+
+ // To avoid extra lookups in the identity map, ID+1 is actually stored in the
+ // map (checking if the used identity is zero is the fast way of checking if
+ // the entry is new).
+ IdentityMap<uint32_t> id_map_;
+ uint32_t next_id_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(ValueSerializer);
+};
+
+/*
+ * Deserializes values from data written with ValueSerializer, or a compatible
+ * implementation.
+ */
+class ValueDeserializer {
+ public:
+ ValueDeserializer(Isolate* isolate, Vector<const uint8_t> data);
+ ~ValueDeserializer();
+
+ /*
+ * Runs version detection logic, which may fail if the format is invalid.
+ */
+ Maybe<bool> ReadHeader() WARN_UNUSED_RESULT;
+
+ /*
+ * Deserializes a V8 object from the buffer.
+ */
+ MaybeHandle<Object> ReadObject() WARN_UNUSED_RESULT;
+
+ /*
+ * Reads an object, consuming the entire buffer.
+ *
+ * This is required for the legacy "version 0" format, which did not allow
+ * reference deduplication, and instead relied on a "stack" model for
+ * deserializing, with the contents of objects and arrays provided first.
+ */
+ MaybeHandle<Object> ReadObjectUsingEntireBufferForLegacyFormat()
+ WARN_UNUSED_RESULT;
+
+ private:
+ // Reading the wire format.
+ Maybe<SerializationTag> PeekTag() const WARN_UNUSED_RESULT;
+ void ConsumeTag(SerializationTag peeked_tag);
+ Maybe<SerializationTag> ReadTag() WARN_UNUSED_RESULT;
+ template <typename T>
+ Maybe<T> ReadVarint() WARN_UNUSED_RESULT;
+ template <typename T>
+ Maybe<T> ReadZigZag() WARN_UNUSED_RESULT;
+ Maybe<double> ReadDouble() WARN_UNUSED_RESULT;
+ Maybe<Vector<const uint8_t>> ReadRawBytes(int size) WARN_UNUSED_RESULT;
+
+ // Reading V8 objects of specific kinds.
+ // The tag is assumed to have already been read.
+ MaybeHandle<String> ReadUtf8String() WARN_UNUSED_RESULT;
+ MaybeHandle<String> ReadTwoByteString() WARN_UNUSED_RESULT;
+ MaybeHandle<JSObject> ReadJSObject() WARN_UNUSED_RESULT;
+ MaybeHandle<JSArray> ReadSparseJSArray() WARN_UNUSED_RESULT;
+ MaybeHandle<JSArray> ReadDenseJSArray() WARN_UNUSED_RESULT;
+ MaybeHandle<JSDate> ReadJSDate() WARN_UNUSED_RESULT;
+ MaybeHandle<JSValue> ReadJSValue(SerializationTag tag) WARN_UNUSED_RESULT;
+ MaybeHandle<JSRegExp> ReadJSRegExp() WARN_UNUSED_RESULT;
+
+ /*
+ * Reads key-value pairs into the object until the specified end tag is
+ * encountered. If successful, returns the number of properties read.
+ */
+ Maybe<uint32_t> ReadJSObjectProperties(Handle<JSObject> object,
+ SerializationTag end_tag);
+
+ // Manipulating the map from IDs to reified objects.
+ bool HasObjectWithID(uint32_t id);
+ MaybeHandle<JSReceiver> GetObjectWithID(uint32_t id);
+ void AddObjectWithID(uint32_t id, Handle<JSReceiver> object);
+
+ Isolate* const isolate_;
+ const uint8_t* position_;
+ const uint8_t* const end_;
+ uint32_t version_ = 0;
+ Handle<SeededNumberDictionary> id_map_; // Always a global handle.
+ uint32_t next_id_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(ValueDeserializer);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_VALUE_SERIALIZER_H_
diff --git a/deps/v8/src/vector.h b/deps/v8/src/vector.h
index e4637c91c9..d120dfc4ac 100644
--- a/deps/v8/src/vector.h
+++ b/deps/v8/src/vector.h
@@ -24,6 +24,9 @@ class Vector {
DCHECK(length == 0 || (length > 0 && data != NULL));
}
+ template <int N>
+ explicit Vector(T (&arr)[N]) : start_(arr), length_(N) {}
+
static Vector<T> New(int length) {
return Vector<T>(NewArray<T>(length), length);
}
@@ -201,6 +204,10 @@ inline Vector<char> MutableCStrVector(char* data, int max) {
return Vector<char>(data, (length < max) ? length : max);
}
+template <typename T, int N>
+inline Vector<T> ArrayVector(T (&arr)[N]) {
+ return Vector<T>(arr);
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/vm-state-inl.h b/deps/v8/src/vm-state-inl.h
index c8bd4e8082..35b69a1ddc 100644
--- a/deps/v8/src/vm-state-inl.h
+++ b/deps/v8/src/vm-state-inl.h
@@ -63,19 +63,11 @@ ExternalCallbackScope::ExternalCallbackScope(Isolate* isolate, Address callback)
scope_address_ = Simulator::current(isolate)->get_sp();
#endif
isolate_->set_external_callback_scope(this);
- if (FLAG_runtime_call_stats) {
- RuntimeCallStats* stats = isolate->counters()->runtime_call_stats();
- timer_.Initialize(&stats->ExternalCallback, stats->current_timer());
- stats->Enter(&timer_);
- }
TRACE_EVENT_BEGIN0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"),
"V8.ExternalCallback");
}
ExternalCallbackScope::~ExternalCallbackScope() {
- if (FLAG_runtime_call_stats) {
- isolate_->counters()->runtime_call_stats()->Leave(&timer_);
- }
isolate_->set_external_callback_scope(previous_scope_);
TRACE_EVENT_END0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"),
"V8.ExternalCallback");
diff --git a/deps/v8/src/vm-state.h b/deps/v8/src/vm-state.h
index 3f8d3811b3..29cbf39593 100644
--- a/deps/v8/src/vm-state.h
+++ b/deps/v8/src/vm-state.h
@@ -49,7 +49,6 @@ class ExternalCallbackScope BASE_EMBEDDED {
Isolate* isolate_;
Address callback_;
ExternalCallbackScope* previous_scope_;
- RuntimeCallTimer timer_;
#ifdef USE_SIMULATOR
Address scope_address_;
#endif
diff --git a/deps/v8/src/wasm/OWNERS b/deps/v8/src/wasm/OWNERS
index a9d24ade28..2822c29819 100644
--- a/deps/v8/src/wasm/OWNERS
+++ b/deps/v8/src/wasm/OWNERS
@@ -1,5 +1,7 @@
set noparent
-titzer@chromium.org
-bradnelson@chromium.org
ahaas@chromium.org
+bradnelson@chromium.org
+mtrofin@chromium.org
+rossberg@chromium.org
+titzer@chromium.org
diff --git a/deps/v8/src/wasm/ast-decoder.cc b/deps/v8/src/wasm/ast-decoder.cc
index e2f6a046b3..0f192508ba 100644
--- a/deps/v8/src/wasm/ast-decoder.cc
+++ b/deps/v8/src/wasm/ast-decoder.cc
@@ -31,27 +31,11 @@ namespace wasm {
#define TRACE(...)
#endif
-// The root of a decoded tree.
-struct Tree {
- LocalType type; // tree type.
- uint32_t count; // number of children.
- const byte* pc; // start of the syntax tree.
- TFNode* node; // node in the TurboFan graph.
- Tree* children[1]; // pointers to children.
-
- WasmOpcode opcode() const { return static_cast<WasmOpcode>(*pc); }
-};
-
-// A production represents an incomplete decoded tree in the LR decoder.
-struct Production {
- Tree* tree; // the root of the syntax tree.
- int index; // the current index into the children of the tree.
-
- WasmOpcode opcode() const { return static_cast<WasmOpcode>(*pc()); }
- const byte* pc() const { return tree->pc; }
- bool done() const { return index >= static_cast<int>(tree->count); }
- Tree* last() const { return index > 0 ? tree->children[index - 1] : nullptr; }
-};
+#define CHECK_PROTOTYPE_OPCODE(flag) \
+ if (!FLAG_##flag) { \
+ error("Invalid opcode (enable with --" #flag ")"); \
+ break; \
+ }
// An SsaEnv environment carries the current local variable renaming
// as well as the current effect and control dependency in the TF graph.
@@ -72,19 +56,67 @@ struct SsaEnv {
control = nullptr;
effect = nullptr;
}
+ void SetNotMerged() {
+ if (state == kMerged) state = kReached;
+ }
};
-// An entry in the stack of blocks during decoding.
-struct Block {
- SsaEnv* ssa_env; // SSA renaming environment.
- int stack_depth; // production stack depth.
+// An entry on the value stack.
+struct Value {
+ const byte* pc;
+ TFNode* node;
+ LocalType type;
};
-// An entry in the stack of ifs during decoding.
-struct IfEnv {
- SsaEnv* false_env;
- SsaEnv* merge_env;
- SsaEnv** case_envs;
+// An entry on the control stack (i.e. if, block, loop).
+struct Control {
+ const byte* pc;
+ int stack_depth; // stack height at the beginning of the construct.
+ SsaEnv* end_env; // end environment for the construct.
+ SsaEnv* false_env; // false environment (only for if).
+ SsaEnv* catch_env; // catch environment (only for try with catch).
+ SsaEnv* finish_try_env; // the environment where a try with finally lives.
+ TFNode* node; // result node for the construct.
+ LocalType type; // result type for the construct.
+ bool is_loop; // true if this is the inner label of a loop.
+
+ bool is_if() const { return *pc == kExprIf; }
+
+ bool is_try() const {
+ return *pc == kExprTryCatch || *pc == kExprTryCatchFinally ||
+ *pc == kExprTryFinally;
+ }
+
+ bool has_catch() const {
+ return *pc == kExprTryCatch || *pc == kExprTryCatchFinally;
+ }
+
+ bool has_finally() const {
+ return *pc == kExprTryCatchFinally || *pc == kExprTryFinally;
+ }
+
+ // Named constructors.
+ static Control Block(const byte* pc, int stack_depth, SsaEnv* end_env) {
+ return {pc, stack_depth, end_env, nullptr, nullptr,
+ nullptr, nullptr, kAstEnd, false};
+ }
+
+ static Control If(const byte* pc, int stack_depth, SsaEnv* end_env,
+ SsaEnv* false_env) {
+ return {pc, stack_depth, end_env, false_env, nullptr,
+ nullptr, nullptr, kAstStmt, false};
+ }
+
+ static Control Loop(const byte* pc, int stack_depth, SsaEnv* end_env) {
+ return {pc, stack_depth, end_env, nullptr, nullptr,
+ nullptr, nullptr, kAstEnd, true};
+ }
+
+ static Control Try(const byte* pc, int stack_depth, SsaEnv* end_env,
+ SsaEnv* catch_env, SsaEnv* finish_try_env) {
+ return {pc, stack_depth, end_env, nullptr, catch_env, finish_try_env,
+ nullptr, kAstEnd, false};
+ }
};
// Macros that build nodes only if there is a graph and the current SSA
@@ -109,30 +141,6 @@ class WasmDecoder : public Decoder {
size_t total_locals_;
ZoneVector<LocalType>* local_types_;
- byte ByteOperand(const byte* pc, const char* msg = "missing 1-byte operand") {
- if ((pc + sizeof(byte)) >= limit_) {
- error(pc, msg);
- return 0;
- }
- return pc[1];
- }
-
- uint32_t Uint32Operand(const byte* pc) {
- if ((pc + sizeof(uint32_t)) >= limit_) {
- error(pc, "missing 4-byte operand");
- return 0;
- }
- return read_u32(pc + 1);
- }
-
- uint64_t Uint64Operand(const byte* pc) {
- if ((pc + sizeof(uint64_t)) >= limit_) {
- error(pc, "missing 8-byte operand");
- return 0;
- }
- return read_u64(pc + 1);
- }
-
inline bool Validate(const byte* pc, LocalIndexOperand& operand) {
if (operand.index < total_locals_) {
if (local_types_) {
@@ -149,48 +157,92 @@ class WasmDecoder : public Decoder {
inline bool Validate(const byte* pc, GlobalIndexOperand& operand) {
ModuleEnv* m = module_;
if (m && m->module && operand.index < m->module->globals.size()) {
- operand.machine_type = m->module->globals[operand.index].type;
- operand.type = WasmOpcodes::LocalTypeFor(operand.machine_type);
+ operand.type = m->module->globals[operand.index].type;
return true;
}
error(pc, pc + 1, "invalid global index");
return false;
}
- inline bool Validate(const byte* pc, FunctionIndexOperand& operand) {
+ inline bool Complete(const byte* pc, CallFunctionOperand& operand) {
ModuleEnv* m = module_;
if (m && m->module && operand.index < m->module->functions.size()) {
operand.sig = m->module->functions[operand.index].sig;
return true;
}
+ return false;
+ }
+
+ inline bool Validate(const byte* pc, CallFunctionOperand& operand) {
+ if (Complete(pc, operand)) {
+ uint32_t expected = static_cast<uint32_t>(operand.sig->parameter_count());
+ if (operand.arity != expected) {
+ error(pc, pc + 1,
+ "arity mismatch in direct function call (expected %u, got %u)",
+ expected, operand.arity);
+ return false;
+ }
+ return true;
+ }
error(pc, pc + 1, "invalid function index");
return false;
}
- inline bool Validate(const byte* pc, SignatureIndexOperand& operand) {
+ inline bool Complete(const byte* pc, CallIndirectOperand& operand) {
ModuleEnv* m = module_;
if (m && m->module && operand.index < m->module->signatures.size()) {
operand.sig = m->module->signatures[operand.index];
return true;
}
+ return false;
+ }
+
+ inline bool Validate(const byte* pc, CallIndirectOperand& operand) {
+ if (Complete(pc, operand)) {
+ uint32_t expected = static_cast<uint32_t>(operand.sig->parameter_count());
+ if (operand.arity != expected) {
+ error(pc, pc + 1,
+ "arity mismatch in indirect function call (expected %u, got %u)",
+ expected, operand.arity);
+ return false;
+ }
+ return true;
+ }
error(pc, pc + 1, "invalid signature index");
return false;
}
- inline bool Validate(const byte* pc, ImportIndexOperand& operand) {
+ inline bool Complete(const byte* pc, CallImportOperand& operand) {
ModuleEnv* m = module_;
if (m && m->module && operand.index < m->module->import_table.size()) {
operand.sig = m->module->import_table[operand.index].sig;
return true;
}
+ return false;
+ }
+
+ inline bool Validate(const byte* pc, CallImportOperand& operand) {
+ if (Complete(pc, operand)) {
+ uint32_t expected = static_cast<uint32_t>(operand.sig->parameter_count());
+ if (operand.arity != expected) {
+ error(pc, pc + 1, "arity mismatch in import call (expected %u, got %u)",
+ expected, operand.arity);
+ return false;
+ }
+ return true;
+ }
error(pc, pc + 1, "invalid signature index");
return false;
}
inline bool Validate(const byte* pc, BreakDepthOperand& operand,
- ZoneVector<Block>& blocks) {
- if (operand.depth < blocks.size()) {
- operand.target = &blocks[blocks.size() - operand.depth - 1];
+ ZoneVector<Control>& control) {
+ if (operand.arity > 1) {
+ error(pc, pc + 1, "invalid arity for br or br_if");
+ return false;
+ }
+ if (operand.depth < control.size()) {
+ operand.target = &control[control.size() - operand.depth - 1];
return true;
}
error(pc, pc + 1, "invalid break depth");
@@ -199,8 +251,12 @@ class WasmDecoder : public Decoder {
bool Validate(const byte* pc, BranchTableOperand& operand,
size_t block_depth) {
+ if (operand.arity > 1) {
+ error(pc, pc + 1, "invalid arity for break");
+ return false;
+ }
// Verify table.
- for (uint32_t i = 0; i < operand.table_count + 1; i++) {
+ for (uint32_t i = 0; i < operand.table_count + 1; ++i) {
uint32_t target = operand.read_entry(this, i);
if (target >= block_depth) {
error(operand.table + i * 2, "improper branch in br_table");
@@ -210,7 +266,7 @@ class WasmDecoder : public Decoder {
return true;
}
- int OpcodeArity(const byte* pc) {
+ unsigned OpcodeArity(const byte* pc) {
#define DECLARE_ARITY(name, ...) \
static const LocalType kTypes_##name[] = {__VA_ARGS__}; \
static const int kArity_##name = \
@@ -226,49 +282,58 @@ class WasmDecoder : public Decoder {
case kExprF64Const:
case kExprF32Const:
case kExprGetLocal:
- case kExprLoadGlobal:
+ case kExprGetGlobal:
case kExprNop:
case kExprUnreachable:
+ case kExprEnd:
+ case kExprBlock:
+ case kExprThrow:
+ case kExprTryCatch:
+ case kExprTryCatchFinally:
+ case kExprTryFinally:
+ case kExprFinally:
+ case kExprLoop:
return 0;
- case kExprBr:
- case kExprStoreGlobal:
+ case kExprSetGlobal:
case kExprSetLocal:
+ case kExprElse:
+ case kExprCatch:
return 1;
+ case kExprBr: {
+ BreakDepthOperand operand(this, pc);
+ return operand.arity;
+ }
+ case kExprBrIf: {
+ BreakDepthOperand operand(this, pc);
+ return 1 + operand.arity;
+ }
+ case kExprBrTable: {
+ BranchTableOperand operand(this, pc);
+ return 1 + operand.arity;
+ }
+
case kExprIf:
- case kExprBrIf:
- return 2;
- case kExprIfElse:
+ return 1;
case kExprSelect:
return 3;
- case kExprBlock:
- case kExprLoop: {
- BlockCountOperand operand(this, pc);
- return operand.count;
- }
-
case kExprCallFunction: {
- FunctionIndexOperand operand(this, pc);
- return static_cast<int>(
- module_->GetFunctionSignature(operand.index)->parameter_count());
+ CallFunctionOperand operand(this, pc);
+ return operand.arity;
}
case kExprCallIndirect: {
- SignatureIndexOperand operand(this, pc);
- return 1 + static_cast<int>(
- module_->GetSignature(operand.index)->parameter_count());
+ CallIndirectOperand operand(this, pc);
+ return 1 + operand.arity;
}
case kExprCallImport: {
- ImportIndexOperand operand(this, pc);
- return static_cast<int>(
- module_->GetImportSignature(operand.index)->parameter_count());
+ CallImportOperand operand(this, pc);
+ return operand.arity;
}
case kExprReturn: {
- return static_cast<int>(sig_->return_count());
- }
- case kExprBrTable: {
- return 1;
+ ReturnArityOperand operand(this, pc);
+ return operand.arity;
}
#define DECLARE_OPCODE_CASE(name, opcode, sig) \
@@ -279,16 +344,17 @@ class WasmDecoder : public Decoder {
FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
FOREACH_MISC_MEM_OPCODE(DECLARE_OPCODE_CASE)
FOREACH_SIMPLE_OPCODE(DECLARE_OPCODE_CASE)
+ FOREACH_SIMPLE_MEM_OPCODE(DECLARE_OPCODE_CASE)
FOREACH_ASMJS_COMPAT_OPCODE(DECLARE_OPCODE_CASE)
+ FOREACH_SIMD_OPCODE(DECLARE_OPCODE_CASE)
#undef DECLARE_OPCODE_CASE
- case kExprDeclLocals:
default:
UNREACHABLE();
return 0;
}
}
- int OpcodeLength(const byte* pc) {
+ unsigned OpcodeLength(const byte* pc) {
switch (static_cast<WasmOpcode>(*pc)) {
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
@@ -298,37 +364,33 @@ class WasmDecoder : public Decoder {
MemoryAccessOperand operand(this, pc);
return 1 + operand.length;
}
- case kExprBlock:
- case kExprLoop: {
- BlockCountOperand operand(this, pc);
- return 1 + operand.length;
- }
case kExprBr:
case kExprBrIf: {
BreakDepthOperand operand(this, pc);
return 1 + operand.length;
}
- case kExprStoreGlobal:
- case kExprLoadGlobal: {
+ case kExprSetGlobal:
+ case kExprGetGlobal: {
GlobalIndexOperand operand(this, pc);
return 1 + operand.length;
}
case kExprCallFunction: {
- FunctionIndexOperand operand(this, pc);
+ CallFunctionOperand operand(this, pc);
return 1 + operand.length;
}
case kExprCallIndirect: {
- SignatureIndexOperand operand(this, pc);
+ CallIndirectOperand operand(this, pc);
return 1 + operand.length;
}
case kExprCallImport: {
- ImportIndexOperand operand(this, pc);
+ CallImportOperand operand(this, pc);
return 1 + operand.length;
}
case kExprSetLocal:
- case kExprGetLocal: {
+ case kExprGetLocal:
+ case kExprCatch: {
LocalIndexOperand operand(this, pc);
return 1 + operand.length;
}
@@ -350,6 +412,10 @@ class WasmDecoder : public Decoder {
return 5;
case kExprF64Const:
return 9;
+ case kExprReturn: {
+ ReturnArityOperand operand(this, pc);
+ return 1 + operand.length;
+ }
default:
return 1;
@@ -357,66 +423,72 @@ class WasmDecoder : public Decoder {
}
};
-
-// A shift-reduce-parser strategy for decoding Wasm code that uses an explicit
-// shift-reduce strategy with multiple internal stacks.
-class SR_WasmDecoder : public WasmDecoder {
+// The full WASM decoder for bytecode. Both verifies bytecode and generates
+// a TurboFan IR graph.
+class WasmFullDecoder : public WasmDecoder {
public:
- SR_WasmDecoder(Zone* zone, TFBuilder* builder, FunctionBody& body)
+ WasmFullDecoder(Zone* zone, TFBuilder* builder, const FunctionBody& body)
: WasmDecoder(body.module, body.sig, body.start, body.end),
zone_(zone),
builder_(builder),
base_(body.base),
local_type_vec_(zone),
- trees_(zone),
stack_(zone),
- blocks_(zone),
- ifs_(zone) {
+ control_(zone) {
local_types_ = &local_type_vec_;
}
- TreeResult Decode() {
+ bool Decode() {
+ base::ElapsedTimer decode_timer;
+ if (FLAG_trace_wasm_decode_time) {
+ decode_timer.Start();
+ }
+ stack_.clear();
+ control_.clear();
+
if (end_ < pc_) {
error(pc_, "function body end < start");
- return result_;
+ return false;
}
DecodeLocalDecls();
InitSsaEnv();
DecodeFunctionBody();
- Tree* tree = nullptr;
- if (ok()) {
- if (ssa_env_->go()) {
- if (stack_.size() > 0) {
- error(stack_.back().pc(), end_, "fell off end of code");
- }
- AddImplicitReturnAtEnd();
- }
- if (trees_.size() == 0) {
- if (sig_->return_count() > 0) {
- error(start_, "no trees created");
- }
- } else {
- tree = trees_[0];
- }
+ if (failed()) return TraceFailed();
+
+ if (!control_.empty()) {
+ error(pc_, control_.back().pc, "unterminated control structure");
+ return TraceFailed();
+ }
+
+ if (ssa_env_->go()) {
+ TRACE(" @%-6d #xx:%-20s|", startrel(pc_), "ImplicitReturn");
+ DoReturn();
+ if (failed()) return TraceFailed();
+ TRACE("\n");
}
- if (ok()) {
- TRACE("wasm-decode ok\n");
+ if (FLAG_trace_wasm_decode_time) {
+ double ms = decode_timer.Elapsed().InMillisecondsF();
+ PrintF("wasm-decode ok (%0.3f ms)\n\n", ms);
} else {
- TRACE("wasm-error module+%-6d func+%d: %s\n\n", baserel(error_pc_),
- startrel(error_pc_), error_msg_.get());
+ TRACE("wasm-decode ok\n\n");
}
- return toResult(tree);
+ return true;
+ }
+
+ bool TraceFailed() {
+ TRACE("wasm-error module+%-6d func+%d: %s\n\n", baserel(error_pc_),
+ startrel(error_pc_), error_msg_.get());
+ return false;
}
bool DecodeLocalDecls(AstLocalDecls& decls) {
DecodeLocalDecls();
if (failed()) return false;
decls.decls_encoded_size = pc_offset();
- decls.total_local_count = 0;
decls.local_types.reserve(local_type_vec_.size());
for (size_t pos = 0; pos < local_type_vec_.size();) {
uint32_t count = 0;
@@ -425,9 +497,9 @@ class SR_WasmDecoder : public WasmDecoder {
pos++;
count++;
}
- decls.total_local_count += count;
decls.local_types.push_back(std::pair<LocalType, uint32_t>(type, count));
}
+ decls.total_local_count = static_cast<uint32_t>(local_type_vec_.size());
return true;
}
@@ -448,15 +520,12 @@ class SR_WasmDecoder : public WasmDecoder {
Zone* zone_;
TFBuilder* builder_;
const byte* base_;
- TreeResult result_;
SsaEnv* ssa_env_;
- ZoneVector<LocalType> local_type_vec_;
- ZoneVector<Tree*> trees_;
- ZoneVector<Production> stack_;
- ZoneVector<Block> blocks_;
- ZoneVector<IfEnv> ifs_;
+ ZoneVector<LocalType> local_type_vec_; // types of local variables.
+ ZoneVector<Value> stack_; // stack of values.
+ ZoneVector<Control> control_; // stack of blocks, loops, and ifs.
inline bool build() { return builder_ && ssa_env_->go(); }
@@ -490,6 +559,9 @@ class SR_WasmDecoder : public WasmDecoder {
ssa_env->control = start;
ssa_env->effect = start;
SetEnv("initial", ssa_env);
+ if (builder_) {
+ builder_->StackCheck(position());
+ }
}
TFNode* DefaultValue(LocalType type) {
@@ -508,57 +580,10 @@ class SR_WasmDecoder : public WasmDecoder {
}
}
- void Leaf(LocalType type, TFNode* node = nullptr) {
- size_t size = sizeof(Tree);
- Tree* tree = reinterpret_cast<Tree*>(zone_->New(size));
- tree->type = type;
- tree->count = 0;
- tree->pc = pc_;
- tree->node = node;
- tree->children[0] = nullptr;
- Reduce(tree);
- }
-
- void Shift(LocalType type, uint32_t count) {
- size_t size =
- sizeof(Tree) + (count == 0 ? 0 : ((count - 1) * sizeof(Tree*)));
- Tree* tree = reinterpret_cast<Tree*>(zone_->New(size));
- tree->type = type;
- tree->count = count;
- tree->pc = pc_;
- tree->node = nullptr;
- for (uint32_t i = 0; i < count; i++) tree->children[i] = nullptr;
- if (count == 0) {
- Production p = {tree, 0};
- Reduce(&p);
- Reduce(tree);
- } else {
- stack_.push_back({tree, 0});
- }
- }
-
- void Reduce(Tree* tree) {
- while (true) {
- if (stack_.size() == 0) {
- trees_.push_back(tree);
- break;
- }
- Production* p = &stack_.back();
- p->tree->children[p->index++] = tree;
- Reduce(p);
- if (p->done()) {
- tree = p->tree;
- stack_.pop_back();
- } else {
- break;
- }
- }
- }
-
char* indentation() {
static const int kMaxIndent = 64;
static char bytes[kMaxIndent + 1];
- for (int i = 0; i < kMaxIndent; i++) bytes[i] = ' ';
+ for (int i = 0; i < kMaxIndent; ++i) bytes[i] = ' ';
bytes[kMaxIndent] = 0;
if (stack_.size() < kMaxIndent / 2) {
bytes[stack_.size() * 2] = 0;
@@ -572,15 +597,14 @@ class SR_WasmDecoder : public WasmDecoder {
// Initialize {local_type_vec} from signature.
if (sig_) {
local_type_vec_.reserve(sig_->parameter_count());
- for (size_t i = 0; i < sig_->parameter_count(); i++) {
+ for (size_t i = 0; i < sig_->parameter_count(); ++i) {
local_type_vec_.push_back(sig_->GetParam(i));
}
}
// Decode local declarations, if any.
- int length;
- uint32_t entries = consume_u32v(&length, "local decls count");
+ uint32_t entries = consume_u32v("local decls count");
while (entries-- > 0 && pc_ < limit_) {
- uint32_t count = consume_u32v(&length, "local count");
+ uint32_t count = consume_u32v("local count");
byte code = consume_u8("local type");
LocalType type;
switch (code) {
@@ -605,769 +629,811 @@ class SR_WasmDecoder : public WasmDecoder {
total_locals_ = local_type_vec_.size();
}
- // Decodes the body of a function, producing reduced trees into {result}.
+ // Decodes the body of a function.
void DecodeFunctionBody() {
- TRACE("wasm-decode %p...%p (%d bytes) %s\n",
+ TRACE("wasm-decode %p...%p (module+%d, %d bytes) %s\n",
reinterpret_cast<const void*>(start_),
- reinterpret_cast<const void*>(limit_),
+ reinterpret_cast<const void*>(limit_), baserel(pc_),
static_cast<int>(limit_ - start_), builder_ ? "graph building" : "");
if (pc_ >= limit_) return; // Nothing to do.
while (true) { // decoding loop.
- int len = 1;
+ unsigned len = 1;
WasmOpcode opcode = static_cast<WasmOpcode>(*pc_);
- TRACE("wasm-decode module+%-6d %s func+%d: 0x%02x %s\n", baserel(pc_),
- indentation(), startrel(pc_), opcode,
- WasmOpcodes::OpcodeName(opcode));
+ TRACE(" @%-6d #%02x:%-20s|", startrel(pc_), opcode,
+ WasmOpcodes::ShortOpcodeName(opcode));
FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (sig) {
- // A simple expression with a fixed signature.
- Shift(sig->GetReturn(), static_cast<uint32_t>(sig->parameter_count()));
- pc_ += len;
- if (pc_ >= limit_) {
- // End of code reached or exceeded.
- if (pc_ > limit_ && ok()) {
- error("Beyond end of code");
+ // Fast case of a simple operator.
+ TFNode* node;
+ switch (sig->parameter_count()) {
+ case 1: {
+ Value val = Pop(0, sig->GetParam(0));
+ node = BUILD(Unop, opcode, val.node, position());
+ break;
}
- return;
+ case 2: {
+ Value rval = Pop(1, sig->GetParam(1));
+ Value lval = Pop(0, sig->GetParam(0));
+ node = BUILD(Binop, opcode, lval.node, rval.node, position());
+ break;
+ }
+ default:
+ UNREACHABLE();
+ node = nullptr;
+ break;
}
- continue; // back to decoding loop.
- }
-
- switch (opcode) {
- case kExprNop:
- Leaf(kAstStmt);
- break;
- case kExprBlock: {
- BlockCountOperand operand(this, pc_);
- if (operand.count < 1) {
- Leaf(kAstStmt);
- } else {
- Shift(kAstEnd, operand.count);
+ Push(GetReturnType(sig), node);
+ } else {
+ // Complex bytecode.
+ switch (opcode) {
+ case kExprNop:
+ Push(kAstStmt, nullptr);
+ break;
+ case kExprBlock: {
// The break environment is the outer environment.
SsaEnv* break_env = ssa_env_;
PushBlock(break_env);
SetEnv("block:start", Steal(break_env));
+ break;
}
- len = 1 + operand.length;
- break;
- }
- case kExprLoop: {
- BlockCountOperand operand(this, pc_);
- if (operand.count < 1) {
- Leaf(kAstStmt);
- } else {
- Shift(kAstEnd, operand.count);
+ case kExprThrow: {
+ CHECK_PROTOTYPE_OPCODE(wasm_eh_prototype);
+ Pop(0, kAstI32);
+
+ // TODO(jpp): start exception propagation.
+ break;
+ }
+ case kExprTryCatch: {
+ CHECK_PROTOTYPE_OPCODE(wasm_eh_prototype);
+ SsaEnv* outer_env = ssa_env_;
+ SsaEnv* try_env = Steal(outer_env);
+ SsaEnv* catch_env = Split(try_env);
+ PushTry(outer_env, catch_env, nullptr);
+ SetEnv("try_catch:start", try_env);
+ break;
+ }
+ case kExprTryCatchFinally: {
+ CHECK_PROTOTYPE_OPCODE(wasm_eh_prototype);
+ SsaEnv* outer_env = ssa_env_;
+ SsaEnv* try_env = Steal(outer_env);
+ SsaEnv* catch_env = Split(try_env);
+ SsaEnv* finally_env = Split(try_env);
+ PushTry(finally_env, catch_env, outer_env);
+ SetEnv("try_catch_finally:start", try_env);
+ break;
+ }
+ case kExprTryFinally: {
+ CHECK_PROTOTYPE_OPCODE(wasm_eh_prototype);
+ SsaEnv* outer_env = ssa_env_;
+ SsaEnv* try_env = Steal(outer_env);
+ SsaEnv* finally_env = Split(outer_env);
+ PushTry(finally_env, nullptr, outer_env);
+ SetEnv("try_finally:start", try_env);
+ break;
+ }
+ case kExprCatch: {
+ CHECK_PROTOTYPE_OPCODE(wasm_eh_prototype);
+ LocalIndexOperand operand(this, pc_);
+ len = 1 + operand.length;
+
+ if (control_.empty()) {
+ error(pc_, "catch does not match a any try");
+ break;
+ }
+
+ Control* c = &control_.back();
+ if (!c->has_catch()) {
+ error(pc_, "catch does not match a try with catch");
+ break;
+ }
+
+ if (c->catch_env == nullptr) {
+ error(pc_, "catch already present for try with catch");
+ break;
+ }
+
+ Goto(ssa_env_, c->end_env);
+
+ SsaEnv* catch_env = c->catch_env;
+ c->catch_env = nullptr;
+ SetEnv("catch:begin", catch_env);
+
+ if (Validate(pc_, operand)) {
+ // TODO(jpp): figure out how thrown value is propagated. It is
+ // unlikely to be a value on the stack.
+ if (ssa_env_->locals) {
+ ssa_env_->locals[operand.index] = nullptr;
+ }
+ }
+
+ PopUpTo(c->stack_depth);
+
+ break;
+ }
+ case kExprFinally: {
+ CHECK_PROTOTYPE_OPCODE(wasm_eh_prototype);
+ if (control_.empty()) {
+ error(pc_, "finally does not match a any try");
+ break;
+ }
+
+ Control* c = &control_.back();
+ if (c->has_catch() && c->catch_env != nullptr) {
+ error(pc_, "missing catch for try with catch and finally");
+ break;
+ }
+
+ if (!c->has_finally()) {
+ error(pc_, "finally does not match a try with finally");
+ break;
+ }
+
+ if (c->finish_try_env == nullptr) {
+ error(pc_, "finally already present for try with finally");
+ break;
+ }
+
+ // ssa_env_ is either the env for either the try or the catch, but
+ // it does not matter: either way we need to direct the control flow
+ // to the end_env, which is the env for the finally.
+ // c->finish_try_env is the the environment enclosing the try block.
+ Goto(ssa_env_, c->end_env);
+
+ PopUpTo(c->stack_depth);
+
+ // The current environment becomes end_env, and finish_try_env
+ // becomes the new end_env. This ensures that any control flow
+ // leaving a try block up to now will do so by branching to the
+ // finally block. Setting the end_env to be finish_try_env ensures
+ // that kExprEnd below can handle the try block as it would any
+ // other block construct.
+ SsaEnv* finally_env = c->end_env;
+ c->end_env = c->finish_try_env;
+ SetEnv("finally:begin", finally_env);
+ c->finish_try_env = nullptr;
+
+ break;
+ }
+ case kExprLoop: {
// The break environment is the outer environment.
SsaEnv* break_env = ssa_env_;
PushBlock(break_env);
- SsaEnv* cont_env = Steal(break_env);
+ SsaEnv* finish_try_env = Steal(break_env);
// The continue environment is the inner environment.
- PrepareForLoop(pc_, cont_env);
- SetEnv("loop:start", Split(cont_env));
- if (ssa_env_->go()) ssa_env_->state = SsaEnv::kReached;
- PushBlock(cont_env);
- blocks_.back().stack_depth = -1; // no production for inner block.
+ PrepareForLoop(pc_, finish_try_env);
+ SetEnv("loop:start", Split(finish_try_env));
+ ssa_env_->SetNotMerged();
+ PushLoop(finish_try_env);
+ break;
}
- len = 1 + operand.length;
- break;
- }
- case kExprIf:
- Shift(kAstStmt, 2);
- break;
- case kExprIfElse:
- Shift(kAstEnd, 3); // Result type is typeof(x) in {c ? x : y}.
- break;
- case kExprSelect:
- Shift(kAstStmt, 3); // Result type is typeof(x) in {c ? x : y}.
- break;
- case kExprBr: {
- BreakDepthOperand operand(this, pc_);
- if (Validate(pc_, operand, blocks_)) {
- Shift(kAstEnd, 1);
+ case kExprIf: {
+ // Condition on top of stack. Split environments for branches.
+ Value cond = Pop(0, kAstI32);
+ TFNode* if_true = nullptr;
+ TFNode* if_false = nullptr;
+ BUILD(Branch, cond.node, &if_true, &if_false);
+ SsaEnv* end_env = ssa_env_;
+ SsaEnv* false_env = Split(ssa_env_);
+ false_env->control = if_false;
+ SsaEnv* true_env = Steal(ssa_env_);
+ true_env->control = if_true;
+ PushIf(end_env, false_env);
+ SetEnv("if:true", true_env);
+ break;
}
- len = 1 + operand.length;
- break;
- }
- case kExprBrIf: {
- BreakDepthOperand operand(this, pc_);
- if (Validate(pc_, operand, blocks_)) {
- Shift(kAstStmt, 2);
+ case kExprElse: {
+ if (control_.empty()) {
+ error(pc_, "else does not match any if");
+ break;
+ }
+ Control* c = &control_.back();
+ if (!c->is_if()) {
+ error(pc_, c->pc, "else does not match an if");
+ break;
+ }
+ if (c->false_env == nullptr) {
+ error(pc_, c->pc, "else already present for if");
+ break;
+ }
+ Value val = PopUpTo(c->stack_depth);
+ MergeInto(c->end_env, &c->node, &c->type, val);
+ // Switch to environment for false branch.
+ SetEnv("if_else:false", c->false_env);
+ c->false_env = nullptr; // record that an else is already seen
+ break;
}
- len = 1 + operand.length;
- break;
- }
- case kExprBrTable: {
- BranchTableOperand operand(this, pc_);
- if (Validate(pc_, operand, blocks_.size())) {
- Shift(kAstEnd, 1);
+ case kExprEnd: {
+ if (control_.empty()) {
+ error(pc_, "end does not match any if or block");
+ break;
+ }
+ const char* name = "block:end";
+ Control* c = &control_.back();
+ Value val = PopUpTo(c->stack_depth);
+ if (c->is_loop) {
+ // Loops always push control in pairs.
+ control_.pop_back();
+ c = &control_.back();
+ name = "loop:end";
+ } else if (c->is_if()) {
+ if (c->false_env != nullptr) {
+ // End the true branch of a one-armed if.
+ Goto(c->false_env, c->end_env);
+ val = {val.pc, nullptr, kAstStmt};
+ name = "if:merge";
+ } else {
+ // End the false branch of a two-armed if.
+ name = "if_else:merge";
+ }
+ } else if (c->is_try()) {
+ name = "try:end";
+
+ // try blocks do not yield a value.
+ val = {val.pc, nullptr, kAstStmt};
+
+ // validate that catch/finally were seen.
+ if (c->catch_env != nullptr) {
+ error(pc_, "missing catch in try with catch");
+ break;
+ }
+
+ if (c->finish_try_env != nullptr) {
+ error(pc_, "missing finally in try with finally");
+ break;
+ }
+ }
+
+ if (ssa_env_->go()) {
+ MergeInto(c->end_env, &c->node, &c->type, val);
+ }
+ SetEnv(name, c->end_env);
+ stack_.resize(c->stack_depth);
+ Push(c->type, c->node);
+ control_.pop_back();
+ break;
}
- len = 1 + operand.length;
- break;
- }
- case kExprReturn: {
- int count = static_cast<int>(sig_->return_count());
- if (count == 0) {
- BUILD(Return, 0, builder_->Buffer(0));
- ssa_env_->Kill();
- Leaf(kAstEnd);
- } else {
- Shift(kAstEnd, count);
+ case kExprSelect: {
+ Value cond = Pop(2, kAstI32);
+ Value fval = Pop();
+ Value tval = Pop();
+ if (tval.type == kAstStmt || tval.type != fval.type) {
+ if (tval.type != kAstEnd && fval.type != kAstEnd) {
+ error(pc_, "type mismatch in select");
+ break;
+ }
+ }
+ if (build()) {
+ DCHECK(tval.type != kAstEnd);
+ DCHECK(fval.type != kAstEnd);
+ DCHECK(cond.type != kAstEnd);
+ TFNode* controls[2];
+ builder_->Branch(cond.node, &controls[0], &controls[1]);
+ TFNode* merge = builder_->Merge(2, controls);
+ TFNode* vals[2] = {tval.node, fval.node};
+ TFNode* phi = builder_->Phi(tval.type, 2, vals, merge);
+ Push(tval.type, phi);
+ ssa_env_->control = merge;
+ } else {
+ Push(tval.type, nullptr);
+ }
+ break;
}
- break;
- }
- case kExprUnreachable: {
- BUILD0(Unreachable);
- ssa_env_->Kill(SsaEnv::kControlEnd);
- Leaf(kAstEnd, nullptr);
- break;
- }
- case kExprI8Const: {
- ImmI8Operand operand(this, pc_);
- Leaf(kAstI32, BUILD(Int32Constant, operand.value));
- len = 1 + operand.length;
- break;
- }
- case kExprI32Const: {
- ImmI32Operand operand(this, pc_);
- Leaf(kAstI32, BUILD(Int32Constant, operand.value));
- len = 1 + operand.length;
- break;
- }
- case kExprI64Const: {
- ImmI64Operand operand(this, pc_);
- Leaf(kAstI64, BUILD(Int64Constant, operand.value));
- len = 1 + operand.length;
- break;
- }
- case kExprF32Const: {
- ImmF32Operand operand(this, pc_);
- Leaf(kAstF32, BUILD(Float32Constant, operand.value));
- len = 1 + operand.length;
- break;
- }
- case kExprF64Const: {
- ImmF64Operand operand(this, pc_);
- Leaf(kAstF64, BUILD(Float64Constant, operand.value));
- len = 1 + operand.length;
- break;
- }
- case kExprGetLocal: {
- LocalIndexOperand operand(this, pc_);
- if (Validate(pc_, operand)) {
- TFNode* val = build() ? ssa_env_->locals[operand.index] : nullptr;
- Leaf(operand.type, val);
+ case kExprBr: {
+ BreakDepthOperand operand(this, pc_);
+ Value val = {pc_, nullptr, kAstStmt};
+ if (operand.arity) val = Pop();
+ if (Validate(pc_, operand, control_)) {
+ BreakTo(operand.target, val);
+ }
+ len = 1 + operand.length;
+ Push(kAstEnd, nullptr);
+ break;
}
- len = 1 + operand.length;
- break;
- }
- case kExprSetLocal: {
- LocalIndexOperand operand(this, pc_);
- if (Validate(pc_, operand)) {
- Shift(operand.type, 1);
+ case kExprBrIf: {
+ BreakDepthOperand operand(this, pc_);
+ Value cond = Pop(operand.arity, kAstI32);
+ Value val = {pc_, nullptr, kAstStmt};
+ if (operand.arity == 1) val = Pop();
+ if (Validate(pc_, operand, control_)) {
+ SsaEnv* fenv = ssa_env_;
+ SsaEnv* tenv = Split(fenv);
+ fenv->SetNotMerged();
+ BUILD(Branch, cond.node, &tenv->control, &fenv->control);
+ ssa_env_ = tenv;
+ BreakTo(operand.target, val);
+ ssa_env_ = fenv;
+ }
+ len = 1 + operand.length;
+ Push(kAstStmt, nullptr);
+ break;
}
- len = 1 + operand.length;
- break;
- }
- case kExprLoadGlobal: {
- GlobalIndexOperand operand(this, pc_);
- if (Validate(pc_, operand)) {
- Leaf(operand.type, BUILD(LoadGlobal, operand.index));
+ case kExprBrTable: {
+ BranchTableOperand operand(this, pc_);
+ if (Validate(pc_, operand, control_.size())) {
+ Value key = Pop(operand.arity, kAstI32);
+ Value val = {pc_, nullptr, kAstStmt};
+ if (operand.arity == 1) val = Pop();
+ if (failed()) break;
+
+ SsaEnv* break_env = ssa_env_;
+ if (operand.table_count > 0) {
+ // Build branches to the various blocks based on the table.
+ TFNode* sw = BUILD(Switch, operand.table_count + 1, key.node);
+
+ SsaEnv* copy = Steal(break_env);
+ ssa_env_ = copy;
+ for (uint32_t i = 0; i < operand.table_count + 1; ++i) {
+ uint16_t target = operand.read_entry(this, i);
+ ssa_env_ = Split(copy);
+ ssa_env_->control = (i == operand.table_count)
+ ? BUILD(IfDefault, sw)
+ : BUILD(IfValue, i, sw);
+ int depth = target;
+ Control* c = &control_[control_.size() - depth - 1];
+ MergeInto(c->end_env, &c->node, &c->type, val);
+ }
+ } else {
+ // Only a default target. Do the equivalent of br.
+ uint16_t target = operand.read_entry(this, 0);
+ int depth = target;
+ Control* c = &control_[control_.size() - depth - 1];
+ MergeInto(c->end_env, &c->node, &c->type, val);
+ }
+ // br_table ends the control flow like br.
+ ssa_env_ = break_env;
+ Push(kAstStmt, nullptr);
+ }
+ len = 1 + operand.length;
+ break;
}
- len = 1 + operand.length;
- break;
- }
- case kExprStoreGlobal: {
- GlobalIndexOperand operand(this, pc_);
- if (Validate(pc_, operand)) {
- Shift(operand.type, 1);
+ case kExprReturn: {
+ ReturnArityOperand operand(this, pc_);
+ if (operand.arity != sig_->return_count()) {
+ error(pc_, pc_ + 1, "arity mismatch in return");
+ }
+ DoReturn();
+ len = 1 + operand.length;
+ break;
}
- len = 1 + operand.length;
- break;
- }
- case kExprI32LoadMem8S:
- case kExprI32LoadMem8U:
- case kExprI32LoadMem16S:
- case kExprI32LoadMem16U:
- case kExprI32LoadMem:
- len = DecodeLoadMem(pc_, kAstI32);
- break;
- case kExprI64LoadMem8S:
- case kExprI64LoadMem8U:
- case kExprI64LoadMem16S:
- case kExprI64LoadMem16U:
- case kExprI64LoadMem32S:
- case kExprI64LoadMem32U:
- case kExprI64LoadMem:
- len = DecodeLoadMem(pc_, kAstI64);
- break;
- case kExprF32LoadMem:
- len = DecodeLoadMem(pc_, kAstF32);
- break;
- case kExprF64LoadMem:
- len = DecodeLoadMem(pc_, kAstF64);
- break;
- case kExprI32StoreMem8:
- case kExprI32StoreMem16:
- case kExprI32StoreMem:
- len = DecodeStoreMem(pc_, kAstI32);
- break;
- case kExprI64StoreMem8:
- case kExprI64StoreMem16:
- case kExprI64StoreMem32:
- case kExprI64StoreMem:
- len = DecodeStoreMem(pc_, kAstI64);
- break;
- case kExprF32StoreMem:
- len = DecodeStoreMem(pc_, kAstF32);
- break;
- case kExprF64StoreMem:
- len = DecodeStoreMem(pc_, kAstF64);
- break;
- case kExprMemorySize:
- Leaf(kAstI32, BUILD(MemSize, 0));
- break;
- case kExprGrowMemory:
- Shift(kAstI32, 1);
- break;
- case kExprCallFunction: {
- FunctionIndexOperand operand(this, pc_);
- if (Validate(pc_, operand)) {
- LocalType type = operand.sig->return_count() == 0
- ? kAstStmt
- : operand.sig->GetReturn();
- Shift(type, static_cast<int>(operand.sig->parameter_count()));
+ case kExprUnreachable: {
+ Push(kAstEnd, BUILD(Unreachable, position()));
+ ssa_env_->Kill(SsaEnv::kControlEnd);
+ break;
}
- len = 1 + operand.length;
- break;
- }
- case kExprCallIndirect: {
- SignatureIndexOperand operand(this, pc_);
- if (Validate(pc_, operand)) {
- LocalType type = operand.sig->return_count() == 0
- ? kAstStmt
- : operand.sig->GetReturn();
- Shift(type, static_cast<int>(1 + operand.sig->parameter_count()));
+ case kExprI8Const: {
+ ImmI8Operand operand(this, pc_);
+ Push(kAstI32, BUILD(Int32Constant, operand.value));
+ len = 1 + operand.length;
+ break;
}
- len = 1 + operand.length;
- break;
+ case kExprI32Const: {
+ ImmI32Operand operand(this, pc_);
+ Push(kAstI32, BUILD(Int32Constant, operand.value));
+ len = 1 + operand.length;
+ break;
+ }
+ case kExprI64Const: {
+ ImmI64Operand operand(this, pc_);
+ Push(kAstI64, BUILD(Int64Constant, operand.value));
+ len = 1 + operand.length;
+ break;
+ }
+ case kExprF32Const: {
+ ImmF32Operand operand(this, pc_);
+ Push(kAstF32, BUILD(Float32Constant, operand.value));
+ len = 1 + operand.length;
+ break;
+ }
+ case kExprF64Const: {
+ ImmF64Operand operand(this, pc_);
+ Push(kAstF64, BUILD(Float64Constant, operand.value));
+ len = 1 + operand.length;
+ break;
+ }
+ case kExprGetLocal: {
+ LocalIndexOperand operand(this, pc_);
+ if (Validate(pc_, operand)) {
+ if (build()) {
+ Push(operand.type, ssa_env_->locals[operand.index]);
+ } else {
+ Push(operand.type, nullptr);
+ }
+ }
+ len = 1 + operand.length;
+ break;
+ }
+ case kExprSetLocal: {
+ LocalIndexOperand operand(this, pc_);
+ if (Validate(pc_, operand)) {
+ Value val = Pop(0, local_type_vec_[operand.index]);
+ if (ssa_env_->locals) ssa_env_->locals[operand.index] = val.node;
+ Push(val.type, val.node);
+ }
+ len = 1 + operand.length;
+ break;
+ }
+ case kExprGetGlobal: {
+ GlobalIndexOperand operand(this, pc_);
+ if (Validate(pc_, operand)) {
+ Push(operand.type, BUILD(GetGlobal, operand.index));
+ }
+ len = 1 + operand.length;
+ break;
+ }
+ case kExprSetGlobal: {
+ GlobalIndexOperand operand(this, pc_);
+ if (Validate(pc_, operand)) {
+ Value val = Pop(0, operand.type);
+ BUILD(SetGlobal, operand.index, val.node);
+ Push(val.type, val.node);
+ }
+ len = 1 + operand.length;
+ break;
+ }
+ case kExprI32LoadMem8S:
+ len = DecodeLoadMem(kAstI32, MachineType::Int8());
+ break;
+ case kExprI32LoadMem8U:
+ len = DecodeLoadMem(kAstI32, MachineType::Uint8());
+ break;
+ case kExprI32LoadMem16S:
+ len = DecodeLoadMem(kAstI32, MachineType::Int16());
+ break;
+ case kExprI32LoadMem16U:
+ len = DecodeLoadMem(kAstI32, MachineType::Uint16());
+ break;
+ case kExprI32LoadMem:
+ len = DecodeLoadMem(kAstI32, MachineType::Int32());
+ break;
+
+ case kExprI64LoadMem8S:
+ len = DecodeLoadMem(kAstI64, MachineType::Int8());
+ break;
+ case kExprI64LoadMem8U:
+ len = DecodeLoadMem(kAstI64, MachineType::Uint8());
+ break;
+ case kExprI64LoadMem16S:
+ len = DecodeLoadMem(kAstI64, MachineType::Int16());
+ break;
+ case kExprI64LoadMem16U:
+ len = DecodeLoadMem(kAstI64, MachineType::Uint16());
+ break;
+ case kExprI64LoadMem32S:
+ len = DecodeLoadMem(kAstI64, MachineType::Int32());
+ break;
+ case kExprI64LoadMem32U:
+ len = DecodeLoadMem(kAstI64, MachineType::Uint32());
+ break;
+ case kExprI64LoadMem:
+ len = DecodeLoadMem(kAstI64, MachineType::Int64());
+ break;
+ case kExprF32LoadMem:
+ len = DecodeLoadMem(kAstF32, MachineType::Float32());
+ break;
+ case kExprF64LoadMem:
+ len = DecodeLoadMem(kAstF64, MachineType::Float64());
+ break;
+ case kExprI32StoreMem8:
+ len = DecodeStoreMem(kAstI32, MachineType::Int8());
+ break;
+ case kExprI32StoreMem16:
+ len = DecodeStoreMem(kAstI32, MachineType::Int16());
+ break;
+ case kExprI32StoreMem:
+ len = DecodeStoreMem(kAstI32, MachineType::Int32());
+ break;
+ case kExprI64StoreMem8:
+ len = DecodeStoreMem(kAstI64, MachineType::Int8());
+ break;
+ case kExprI64StoreMem16:
+ len = DecodeStoreMem(kAstI64, MachineType::Int16());
+ break;
+ case kExprI64StoreMem32:
+ len = DecodeStoreMem(kAstI64, MachineType::Int32());
+ break;
+ case kExprI64StoreMem:
+ len = DecodeStoreMem(kAstI64, MachineType::Int64());
+ break;
+ case kExprF32StoreMem:
+ len = DecodeStoreMem(kAstF32, MachineType::Float32());
+ break;
+ case kExprF64StoreMem:
+ len = DecodeStoreMem(kAstF64, MachineType::Float64());
+ break;
+
+ case kExprMemorySize:
+ Push(kAstI32, BUILD(MemSize, 0));
+ break;
+ case kExprCallFunction: {
+ CallFunctionOperand operand(this, pc_);
+ if (Validate(pc_, operand)) {
+ TFNode** buffer = PopArgs(operand.sig);
+ TFNode* call =
+ BUILD(CallDirect, operand.index, buffer, position());
+ Push(GetReturnType(operand.sig), call);
+ }
+ len = 1 + operand.length;
+ break;
+ }
+ case kExprCallIndirect: {
+ CallIndirectOperand operand(this, pc_);
+ if (Validate(pc_, operand)) {
+ TFNode** buffer = PopArgs(operand.sig);
+ Value index = Pop(0, kAstI32);
+ if (buffer) buffer[0] = index.node;
+ TFNode* call =
+ BUILD(CallIndirect, operand.index, buffer, position());
+ Push(GetReturnType(operand.sig), call);
+ }
+ len = 1 + operand.length;
+ break;
+ }
+ case kExprCallImport: {
+ CallImportOperand operand(this, pc_);
+ if (Validate(pc_, operand)) {
+ TFNode** buffer = PopArgs(operand.sig);
+ TFNode* call =
+ BUILD(CallImport, operand.index, buffer, position());
+ Push(GetReturnType(operand.sig), call);
+ }
+ len = 1 + operand.length;
+ break;
+ }
+ case kSimdPrefix: {
+ CHECK_PROTOTYPE_OPCODE(wasm_simd_prototype);
+ len++;
+ byte simd_index = *(pc_ + 1);
+ opcode = static_cast<WasmOpcode>(opcode << 8 | simd_index);
+ DecodeSimdOpcode(opcode);
+ break;
+ }
+ default:
+ error("Invalid opcode");
+ return;
}
- case kExprCallImport: {
- ImportIndexOperand operand(this, pc_);
- if (Validate(pc_, operand)) {
- LocalType type = operand.sig->return_count() == 0
- ? kAstStmt
- : operand.sig->GetReturn();
- Shift(type, static_cast<int>(operand.sig->parameter_count()));
+ } // end complex bytecode
+
+#if DEBUG
+ if (FLAG_trace_wasm_decoder) {
+ for (size_t i = 0; i < stack_.size(); ++i) {
+ Value& val = stack_[i];
+ WasmOpcode opcode = static_cast<WasmOpcode>(*val.pc);
+ PrintF(" %c@%d:%s", WasmOpcodes::ShortNameOf(val.type),
+ static_cast<int>(val.pc - start_),
+ WasmOpcodes::ShortOpcodeName(opcode));
+ switch (opcode) {
+ case kExprI32Const: {
+ ImmI32Operand operand(this, val.pc);
+ PrintF("[%d]", operand.value);
+ break;
+ }
+ case kExprGetLocal: {
+ LocalIndexOperand operand(this, val.pc);
+ PrintF("[%u]", operand.index);
+ break;
+ }
+ case kExprSetLocal: {
+ LocalIndexOperand operand(this, val.pc);
+ PrintF("[%u]", operand.index);
+ break;
+ }
+ default:
+ break;
}
- len = 1 + operand.length;
- break;
}
- case kExprDeclLocals:
- default:
- error("Invalid opcode");
- return;
+ PrintF("\n");
}
+#endif
pc_ += len;
if (pc_ >= limit_) {
// End of code reached or exceeded.
- if (pc_ > limit_ && ok()) {
- error("Beyond end of code");
- }
+ if (pc_ > limit_ && ok()) error("Beyond end of code");
return;
}
+ } // end decode loop
+ } // end DecodeFunctionBody()
+
+ TFNode** PopArgs(FunctionSig* sig) {
+ if (build()) {
+ int count = static_cast<int>(sig->parameter_count());
+ TFNode** buffer = builder_->Buffer(count + 1);
+ buffer[0] = nullptr; // reserved for code object or function index.
+ for (int i = count - 1; i >= 0; i--) {
+ buffer[i + 1] = Pop(i, sig->GetParam(i)).node;
+ }
+ return buffer;
+ } else {
+ int count = static_cast<int>(sig->parameter_count());
+ for (int i = count - 1; i >= 0; i--) {
+ Pop(i, sig->GetParam(i));
+ }
+ return nullptr;
}
}
- void PushBlock(SsaEnv* ssa_env) {
- blocks_.push_back({ssa_env, static_cast<int>(stack_.size() - 1)});
+ LocalType GetReturnType(FunctionSig* sig) {
+ return sig->return_count() == 0 ? kAstStmt : sig->GetReturn();
+ }
+
+ void PushBlock(SsaEnv* end_env) {
+ const int stack_depth = static_cast<int>(stack_.size());
+ control_.emplace_back(Control::Block(pc_, stack_depth, end_env));
}
- int DecodeLoadMem(const byte* pc, LocalType type) {
- MemoryAccessOperand operand(this, pc);
- Shift(type, 1);
+ void PushLoop(SsaEnv* end_env) {
+ const int stack_depth = static_cast<int>(stack_.size());
+ control_.emplace_back(Control::Loop(pc_, stack_depth, end_env));
+ }
+
+ void PushIf(SsaEnv* end_env, SsaEnv* false_env) {
+ const int stack_depth = static_cast<int>(stack_.size());
+ control_.emplace_back(Control::If(pc_, stack_depth, end_env, false_env));
+ }
+
+ void PushTry(SsaEnv* end_env, SsaEnv* catch_env, SsaEnv* finish_try_env) {
+ const int stack_depth = static_cast<int>(stack_.size());
+ control_.emplace_back(
+ Control::Try(pc_, stack_depth, end_env, catch_env, finish_try_env));
+ }
+
+ int DecodeLoadMem(LocalType type, MachineType mem_type) {
+ MemoryAccessOperand operand(this, pc_);
+ Value index = Pop(0, kAstI32);
+ TFNode* node = BUILD(LoadMem, type, mem_type, index.node, operand.offset,
+ operand.alignment, position());
+ Push(type, node);
return 1 + operand.length;
}
- int DecodeStoreMem(const byte* pc, LocalType type) {
- MemoryAccessOperand operand(this, pc);
- Shift(type, 2);
+ int DecodeStoreMem(LocalType type, MachineType mem_type) {
+ MemoryAccessOperand operand(this, pc_);
+ Value val = Pop(1, type);
+ Value index = Pop(0, kAstI32);
+ BUILD(StoreMem, mem_type, index.node, operand.offset, operand.alignment,
+ val.node, position());
+ Push(type, val.node);
return 1 + operand.length;
}
- void AddImplicitReturnAtEnd() {
- int retcount = static_cast<int>(sig_->return_count());
- if (retcount == 0) {
- BUILD0(ReturnVoid);
- return;
+ void DecodeSimdOpcode(WasmOpcode opcode) {
+ FunctionSig* sig = WasmOpcodes::Signature(opcode);
+ compiler::NodeVector inputs(sig->parameter_count(), zone_);
+ for (size_t i = sig->parameter_count(); i > 0; i--) {
+ Value val = Pop(static_cast<int>(i - 1), sig->GetParam(i - 1));
+ inputs[i - 1] = val.node;
}
+ TFNode* node = BUILD(SimdOp, opcode, inputs);
+ Push(GetReturnType(sig), node);
+ }
- if (static_cast<int>(trees_.size()) < retcount) {
- error(limit_, nullptr,
- "ImplicitReturn expects %d arguments, only %d remain", retcount,
- static_cast<int>(trees_.size()));
- return;
- }
+ void DoReturn() {
+ int count = static_cast<int>(sig_->return_count());
+ TFNode** buffer = nullptr;
+ if (build()) buffer = builder_->Buffer(count);
- TRACE("wasm-decode implicit return of %d args\n", retcount);
-
- TFNode** buffer = BUILD(Buffer, retcount);
- for (int index = 0; index < retcount; index++) {
- Tree* tree = trees_[trees_.size() - 1 - index];
- if (buffer) buffer[index] = tree->node;
- LocalType expected = sig_->GetReturn(index);
- if (tree->type != expected) {
- error(limit_, tree->pc,
- "ImplicitReturn[%d] expected type %s, found %s of type %s", index,
- WasmOpcodes::TypeName(expected),
- WasmOpcodes::OpcodeName(tree->opcode()),
- WasmOpcodes::TypeName(tree->type));
- return;
- }
+ // Pop return values off the stack in reverse order.
+ for (int i = count - 1; i >= 0; i--) {
+ Value val = Pop(i, sig_->GetReturn(i));
+ if (buffer) buffer[i] = val.node;
}
- BUILD(Return, retcount, buffer);
+ Push(kAstEnd, BUILD(Return, count, buffer));
+ ssa_env_->Kill(SsaEnv::kControlEnd);
}
- int baserel(const byte* ptr) {
- return base_ ? static_cast<int>(ptr - base_) : 0;
+ void Push(LocalType type, TFNode* node) {
+ stack_.push_back({pc_, node, type});
}
- int startrel(const byte* ptr) { return static_cast<int>(ptr - start_); }
+ const char* SafeOpcodeNameAt(const byte* pc) {
+ if (pc >= end_) return "<end>";
+ return WasmOpcodes::ShortOpcodeName(static_cast<WasmOpcode>(*pc));
+ }
- void Reduce(Production* p) {
- WasmOpcode opcode = p->opcode();
- TRACE("-----reduce module+%-6d %s func+%d: 0x%02x %s\n", baserel(p->pc()),
- indentation(), startrel(p->pc()), opcode,
- WasmOpcodes::OpcodeName(opcode));
- FunctionSig* sig = WasmOpcodes::Signature(opcode);
- if (sig) {
- // A simple expression with a fixed signature.
- TypeCheckLast(p, sig->GetParam(p->index - 1));
- if (p->done() && build()) {
- if (sig->parameter_count() == 2) {
- p->tree->node = builder_->Binop(opcode, p->tree->children[0]->node,
- p->tree->children[1]->node);
- } else if (sig->parameter_count() == 1) {
- p->tree->node = builder_->Unop(opcode, p->tree->children[0]->node);
- } else {
- UNREACHABLE();
- }
+ Value Pop(int index, LocalType expected) {
+ Value val = Pop();
+ if (val.type != expected) {
+ if (val.type != kAstEnd) {
+ error(pc_, val.pc, "%s[%d] expected type %s, found %s of type %s",
+ SafeOpcodeNameAt(pc_), index, WasmOpcodes::TypeName(expected),
+ SafeOpcodeNameAt(val.pc), WasmOpcodes::TypeName(val.type));
}
- return;
}
+ return val;
+ }
- switch (opcode) {
- case kExprBlock: {
- if (p->done()) {
- Block* last = &blocks_.back();
- DCHECK_EQ(stack_.size() - 1, last->stack_depth);
- // fallthrough with the last expression.
- ReduceBreakToExprBlock(p, last);
- SetEnv("block:end", last->ssa_env);
- blocks_.pop_back();
- }
- break;
- }
- case kExprLoop: {
- if (p->done()) {
- // Pop the continue environment.
- blocks_.pop_back();
- // Get the break environment.
- Block* last = &blocks_.back();
- DCHECK_EQ(stack_.size() - 1, last->stack_depth);
- // fallthrough with the last expression.
- ReduceBreakToExprBlock(p, last);
- SetEnv("loop:end", last->ssa_env);
- blocks_.pop_back();
- }
- break;
- }
- case kExprIf: {
- if (p->index == 1) {
- // Condition done. Split environment for true branch.
- TypeCheckLast(p, kAstI32);
- SsaEnv* false_env = ssa_env_;
- SsaEnv* true_env = Split(ssa_env_);
- ifs_.push_back({nullptr, false_env, nullptr});
- BUILD(Branch, p->last()->node, &true_env->control,
- &false_env->control);
- SetEnv("if:true", true_env);
- } else if (p->index == 2) {
- // True block done. Merge true and false environments.
- IfEnv* env = &ifs_.back();
- SsaEnv* merge = env->merge_env;
- if (merge->go()) {
- merge->state = SsaEnv::kReached;
- Goto(ssa_env_, merge);
- }
- SetEnv("if:merge", merge);
- ifs_.pop_back();
- }
- break;
- }
- case kExprIfElse: {
- if (p->index == 1) {
- // Condition done. Split environment for true and false branches.
- TypeCheckLast(p, kAstI32);
- SsaEnv* merge_env = ssa_env_;
- TFNode* if_true = nullptr;
- TFNode* if_false = nullptr;
- BUILD(Branch, p->last()->node, &if_true, &if_false);
- SsaEnv* false_env = Split(ssa_env_);
- SsaEnv* true_env = Steal(ssa_env_);
- false_env->control = if_false;
- true_env->control = if_true;
- ifs_.push_back({false_env, merge_env, nullptr});
- SetEnv("if_else:true", true_env);
- } else if (p->index == 2) {
- // True expr done.
- IfEnv* env = &ifs_.back();
- MergeIntoProduction(p, env->merge_env, p->last());
- // Switch to environment for false branch.
- SsaEnv* false_env = ifs_.back().false_env;
- SetEnv("if_else:false", false_env);
- } else if (p->index == 3) {
- // False expr done.
- IfEnv* env = &ifs_.back();
- MergeIntoProduction(p, env->merge_env, p->last());
- SetEnv("if_else:merge", env->merge_env);
- ifs_.pop_back();
- }
- break;
- }
- case kExprSelect: {
- if (p->index == 1) {
- // True expression done.
- p->tree->type = p->last()->type;
- if (p->tree->type == kAstStmt) {
- error(p->pc(), p->tree->children[1]->pc,
- "select operand should be expression");
- }
- } else if (p->index == 2) {
- // False expression done.
- TypeCheckLast(p, p->tree->type);
- } else {
- // Condition done.
- DCHECK(p->done());
- TypeCheckLast(p, kAstI32);
- if (build()) {
- TFNode* controls[2];
- builder_->Branch(p->tree->children[2]->node, &controls[0],
- &controls[1]);
- TFNode* merge = builder_->Merge(2, controls);
- TFNode* vals[2] = {p->tree->children[0]->node,
- p->tree->children[1]->node};
- TFNode* phi = builder_->Phi(p->tree->type, 2, vals, merge);
- p->tree->node = phi;
- ssa_env_->control = merge;
- }
- }
- break;
- }
- case kExprBr: {
- BreakDepthOperand operand(this, p->pc());
- CHECK(Validate(p->pc(), operand, blocks_));
- ReduceBreakToExprBlock(p, operand.target);
- break;
- }
- case kExprBrIf: {
- if (p->done()) {
- TypeCheckLast(p, kAstI32);
- BreakDepthOperand operand(this, p->pc());
- CHECK(Validate(p->pc(), operand, blocks_));
- SsaEnv* fenv = ssa_env_;
- SsaEnv* tenv = Split(fenv);
- BUILD(Branch, p->tree->children[1]->node, &tenv->control,
- &fenv->control);
- ssa_env_ = tenv;
- ReduceBreakToExprBlock(p, operand.target, p->tree->children[0]);
- ssa_env_ = fenv;
- }
- break;
- }
- case kExprBrTable: {
- if (p->index == 1) {
- // Switch key finished.
- TypeCheckLast(p, kAstI32);
- if (failed()) break;
-
- BranchTableOperand operand(this, p->pc());
- DCHECK(Validate(p->pc(), operand, blocks_.size()));
-
- // Build a switch only if it has more than just a default target.
- bool build_switch = operand.table_count > 0;
- TFNode* sw = nullptr;
- if (build_switch) {
- sw = BUILD(Switch, operand.table_count + 1, p->last()->node);
- }
-
- // Process the targets of the break table.
- SsaEnv* prev = ssa_env_;
- SsaEnv* copy = Steal(prev);
- for (uint32_t i = 0; i < operand.table_count + 1; i++) {
- uint32_t target = operand.read_entry(this, i);
- SsaEnv* env = copy;
- if (build_switch) {
- ssa_env_ = env = Split(env);
- env->control = i == operand.table_count ? BUILD(IfDefault, sw)
- : BUILD(IfValue, i, sw);
- }
- SsaEnv* tenv = blocks_[blocks_.size() - target - 1].ssa_env;
- Goto(env, tenv);
- }
- ssa_env_ = prev;
- }
- break;
- }
- case kExprReturn: {
- TypeCheckLast(p, sig_->GetReturn(p->index - 1));
- if (p->done()) {
- if (build()) {
- int count = p->tree->count;
- TFNode** buffer = builder_->Buffer(count);
- for (int i = 0; i < count; i++) {
- buffer[i] = p->tree->children[i]->node;
- }
- BUILD(Return, count, buffer);
- }
- ssa_env_->Kill(SsaEnv::kControlEnd);
- }
- break;
- }
- case kExprSetLocal: {
- LocalIndexOperand operand(this, p->pc());
- CHECK(Validate(p->pc(), operand));
- Tree* val = p->last();
- if (operand.type == val->type) {
- if (build()) ssa_env_->locals[operand.index] = val->node;
- p->tree->node = val->node;
- } else {
- error(p->pc(), val->pc, "Typecheck failed in SetLocal");
- }
- break;
- }
- case kExprStoreGlobal: {
- GlobalIndexOperand operand(this, p->pc());
- CHECK(Validate(p->pc(), operand));
- Tree* val = p->last();
- if (operand.type == val->type) {
- BUILD(StoreGlobal, operand.index, val->node);
- p->tree->node = val->node;
- } else {
- error(p->pc(), val->pc, "Typecheck failed in StoreGlobal");
- }
- break;
- }
-
- case kExprI32LoadMem8S:
- return ReduceLoadMem(p, kAstI32, MachineType::Int8());
- case kExprI32LoadMem8U:
- return ReduceLoadMem(p, kAstI32, MachineType::Uint8());
- case kExprI32LoadMem16S:
- return ReduceLoadMem(p, kAstI32, MachineType::Int16());
- case kExprI32LoadMem16U:
- return ReduceLoadMem(p, kAstI32, MachineType::Uint16());
- case kExprI32LoadMem:
- return ReduceLoadMem(p, kAstI32, MachineType::Int32());
-
- case kExprI64LoadMem8S:
- return ReduceLoadMem(p, kAstI64, MachineType::Int8());
- case kExprI64LoadMem8U:
- return ReduceLoadMem(p, kAstI64, MachineType::Uint8());
- case kExprI64LoadMem16S:
- return ReduceLoadMem(p, kAstI64, MachineType::Int16());
- case kExprI64LoadMem16U:
- return ReduceLoadMem(p, kAstI64, MachineType::Uint16());
- case kExprI64LoadMem32S:
- return ReduceLoadMem(p, kAstI64, MachineType::Int32());
- case kExprI64LoadMem32U:
- return ReduceLoadMem(p, kAstI64, MachineType::Uint32());
- case kExprI64LoadMem:
- return ReduceLoadMem(p, kAstI64, MachineType::Int64());
-
- case kExprF32LoadMem:
- return ReduceLoadMem(p, kAstF32, MachineType::Float32());
-
- case kExprF64LoadMem:
- return ReduceLoadMem(p, kAstF64, MachineType::Float64());
-
- case kExprI32StoreMem8:
- return ReduceStoreMem(p, kAstI32, MachineType::Int8());
- case kExprI32StoreMem16:
- return ReduceStoreMem(p, kAstI32, MachineType::Int16());
- case kExprI32StoreMem:
- return ReduceStoreMem(p, kAstI32, MachineType::Int32());
-
- case kExprI64StoreMem8:
- return ReduceStoreMem(p, kAstI64, MachineType::Int8());
- case kExprI64StoreMem16:
- return ReduceStoreMem(p, kAstI64, MachineType::Int16());
- case kExprI64StoreMem32:
- return ReduceStoreMem(p, kAstI64, MachineType::Int32());
- case kExprI64StoreMem:
- return ReduceStoreMem(p, kAstI64, MachineType::Int64());
-
- case kExprF32StoreMem:
- return ReduceStoreMem(p, kAstF32, MachineType::Float32());
-
- case kExprF64StoreMem:
- return ReduceStoreMem(p, kAstF64, MachineType::Float64());
-
- case kExprGrowMemory:
- TypeCheckLast(p, kAstI32);
- // TODO(titzer): build node for GrowMemory
- p->tree->node = BUILD(Int32Constant, 0);
- return;
+ Value Pop() {
+ size_t limit = control_.empty() ? 0 : control_.back().stack_depth;
+ if (stack_.size() <= limit) {
+ Value val = {pc_, nullptr, kAstStmt};
+ error(pc_, pc_, "%s found empty stack", SafeOpcodeNameAt(pc_));
+ return val;
+ }
+ Value val = stack_.back();
+ stack_.pop_back();
+ return val;
+ }
- case kExprCallFunction: {
- FunctionIndexOperand operand(this, p->pc());
- CHECK(Validate(p->pc(), operand));
- if (p->index > 0) {
- TypeCheckLast(p, operand.sig->GetParam(p->index - 1));
- }
- if (p->done() && build()) {
- uint32_t count = p->tree->count + 1;
- TFNode** buffer = builder_->Buffer(count);
- buffer[0] = nullptr; // reserved for code object.
- for (uint32_t i = 1; i < count; i++) {
- buffer[i] = p->tree->children[i - 1]->node;
- }
- p->tree->node = builder_->CallDirect(operand.index, buffer);
- }
- break;
- }
- case kExprCallIndirect: {
- SignatureIndexOperand operand(this, p->pc());
- CHECK(Validate(p->pc(), operand));
- if (p->index == 1) {
- TypeCheckLast(p, kAstI32);
- } else {
- TypeCheckLast(p, operand.sig->GetParam(p->index - 2));
- }
- if (p->done() && build()) {
- uint32_t count = p->tree->count;
- TFNode** buffer = builder_->Buffer(count);
- for (uint32_t i = 0; i < count; i++) {
- buffer[i] = p->tree->children[i]->node;
- }
- p->tree->node = builder_->CallIndirect(operand.index, buffer);
- }
- break;
- }
- case kExprCallImport: {
- ImportIndexOperand operand(this, p->pc());
- CHECK(Validate(p->pc(), operand));
- if (p->index > 0) {
- TypeCheckLast(p, operand.sig->GetParam(p->index - 1));
- }
- if (p->done() && build()) {
- uint32_t count = p->tree->count + 1;
- TFNode** buffer = builder_->Buffer(count);
- buffer[0] = nullptr; // reserved for code object.
- for (uint32_t i = 1; i < count; i++) {
- buffer[i] = p->tree->children[i - 1]->node;
- }
- p->tree->node = builder_->CallImport(operand.index, buffer);
- }
- break;
- }
- default:
- break;
+ Value PopUpTo(int stack_depth) {
+ if (stack_depth == stack_.size()) {
+ Value val = {pc_, nullptr, kAstStmt};
+ return val;
+ } else {
+ DCHECK_LE(stack_depth, static_cast<int>(stack_.size()));
+ Value val = Pop();
+ stack_.resize(stack_depth);
+ return val;
}
}
- void ReduceBreakToExprBlock(Production* p, Block* block) {
- ReduceBreakToExprBlock(p, block, p->tree->count > 0 ? p->last() : nullptr);
+ int baserel(const byte* ptr) {
+ return base_ ? static_cast<int>(ptr - base_) : 0;
}
- void ReduceBreakToExprBlock(Production* p, Block* block, Tree* val) {
- if (block->stack_depth < 0) {
+ int startrel(const byte* ptr) { return static_cast<int>(ptr - start_); }
+
+ void BreakTo(Control* block, Value& val) {
+ if (block->is_loop) {
// This is the inner loop block, which does not have a value.
- Goto(ssa_env_, block->ssa_env);
+ Goto(ssa_env_, block->end_env);
} else {
// Merge the value into the production for the block.
- Production* bp = &stack_[block->stack_depth];
- MergeIntoProduction(bp, block->ssa_env, val);
+ MergeInto(block->end_env, &block->node, &block->type, val);
}
}
- void MergeIntoProduction(Production* p, SsaEnv* target, Tree* expr) {
+ void MergeInto(SsaEnv* target, TFNode** node, LocalType* type, Value& val) {
if (!ssa_env_->go()) return;
+ DCHECK_NE(kAstEnd, val.type);
bool first = target->state == SsaEnv::kUnreachable;
Goto(ssa_env_, target);
- if (expr == nullptr || expr->type == kAstEnd) return;
if (first) {
// first merge to this environment; set the type and the node.
- p->tree->type = expr->type;
- p->tree->node = expr->node;
- } else {
+ *type = val.type;
+ *node = val.node;
+ } else if (val.type == *type && val.type != kAstStmt) {
// merge with the existing value for this block.
- LocalType type = p->tree->type;
- if (expr->type != type) {
- type = kAstStmt;
- p->tree->type = kAstStmt;
- p->tree->node = nullptr;
- } else if (type != kAstStmt) {
- p->tree->node = CreateOrMergeIntoPhi(type, target->control,
- p->tree->node, expr->node);
- }
- }
- }
-
- void ReduceLoadMem(Production* p, LocalType type, MachineType mem_type) {
- DCHECK_EQ(1, p->index);
- TypeCheckLast(p, kAstI32); // index
- if (build()) {
- MemoryAccessOperand operand(this, p->pc());
- p->tree->node =
- builder_->LoadMem(type, mem_type, p->last()->node, operand.offset);
- }
- }
-
- void ReduceStoreMem(Production* p, LocalType type, MachineType mem_type) {
- if (p->index == 1) {
- TypeCheckLast(p, kAstI32); // index
+ *node = CreateOrMergeIntoPhi(*type, target->control, *node, val.node);
} else {
- DCHECK_EQ(2, p->index);
- TypeCheckLast(p, type);
- if (build()) {
- MemoryAccessOperand operand(this, p->pc());
- TFNode* val = p->tree->children[1]->node;
- builder_->StoreMem(mem_type, p->tree->children[0]->node, operand.offset,
- val);
- p->tree->node = val;
- }
- }
- }
-
- void TypeCheckLast(Production* p, LocalType expected) {
- LocalType result = p->last()->type;
- if (result == expected) return;
- if (result == kAstEnd) return;
- if (expected != kAstStmt) {
- error(p->pc(), p->last()->pc,
- "%s[%d] expected type %s, found %s of type %s",
- WasmOpcodes::OpcodeName(p->opcode()), p->index - 1,
- WasmOpcodes::TypeName(expected),
- WasmOpcodes::OpcodeName(p->last()->opcode()),
- WasmOpcodes::TypeName(p->last()->type));
+ // types don't match, or block is already a stmt.
+ *type = kAstStmt;
+ *node = nullptr;
}
}
void SetEnv(const char* reason, SsaEnv* env) {
#if DEBUG
- TRACE(" env = %p, block depth = %d, reason = %s", static_cast<void*>(env),
- static_cast<int>(blocks_.size()), reason);
- if (FLAG_trace_wasm_decoder && env && env->control) {
- TRACE(", control = ");
- compiler::WasmGraphBuilder::PrintDebugName(env->control);
+ if (FLAG_trace_wasm_decoder) {
+ char state = 'X';
+ if (env) {
+ switch (env->state) {
+ case SsaEnv::kReached:
+ state = 'R';
+ break;
+ case SsaEnv::kUnreachable:
+ state = 'U';
+ break;
+ case SsaEnv::kMerged:
+ state = 'M';
+ break;
+ case SsaEnv::kControlEnd:
+ state = 'E';
+ break;
+ }
+ }
+ PrintF(" env = %p, state = %c, reason = %s", static_cast<void*>(env),
+ state, reason);
+ if (env && env->control) {
+ PrintF(", control = ");
+ compiler::WasmGraphBuilder::PrintDebugName(env->control);
+ }
+ PrintF("\n");
}
- TRACE("\n");
#endif
ssa_env_ = env;
if (builder_) {
@@ -1417,7 +1483,7 @@ class SR_WasmDecoder : public WasmDecoder {
builder_->AppendToMerge(merge, from->control);
// Merge effects.
if (builder_->IsPhiWithMerge(to->effect, merge)) {
- builder_->AppendToPhi(merge, to->effect, from->effect);
+ builder_->AppendToPhi(to->effect, from->effect);
} else if (to->effect != from->effect) {
uint32_t count = builder_->InputCount(merge);
TFNode** effects = builder_->Buffer(count);
@@ -1432,7 +1498,7 @@ class SR_WasmDecoder : public WasmDecoder {
TFNode* tnode = to->locals[i];
TFNode* fnode = from->locals[i];
if (builder_->IsPhiWithMerge(tnode, merge)) {
- builder_->AppendToPhi(merge, tnode, fnode);
+ builder_->AppendToPhi(tnode, fnode);
} else if (tnode != fnode) {
uint32_t count = builder_->InputCount(merge);
TFNode** vals = builder_->Buffer(count);
@@ -1455,7 +1521,7 @@ class SR_WasmDecoder : public WasmDecoder {
TFNode* CreateOrMergeIntoPhi(LocalType type, TFNode* merge, TFNode* tnode,
TFNode* fnode) {
if (builder_->IsPhiWithMerge(tnode, merge)) {
- builder_->AppendToPhi(merge, tnode, fnode);
+ builder_->AppendToPhi(tnode, fnode);
} else if (tnode != fnode) {
uint32_t count = builder_->InputCount(merge);
TFNode** vals = builder_->Buffer(count);
@@ -1501,8 +1567,6 @@ class SR_WasmDecoder : public WasmDecoder {
size_t size = sizeof(TFNode*) * EnvironmentCount();
result->control = from->control;
result->effect = from->effect;
- result->state = from->state == SsaEnv::kUnreachable ? SsaEnv::kUnreachable
- : SsaEnv::kReached;
if (from->go()) {
result->state = SsaEnv::kReached;
@@ -1549,88 +1613,56 @@ class SR_WasmDecoder : public WasmDecoder {
virtual void onFirstError() {
limit_ = start_; // Terminate decoding loop.
builder_ = nullptr; // Don't build any more nodes.
-#if DEBUG
- PrintStackForDebugging();
-#endif
- }
-
-#if DEBUG
- void PrintStackForDebugging() { PrintProduction(0); }
-
- void PrintProduction(size_t depth) {
- if (depth >= stack_.size()) return;
- Production* p = &stack_[depth];
- for (size_t d = 0; d < depth; d++) PrintF(" ");
-
- PrintF("@%d %s [%d]\n", static_cast<int>(p->tree->pc - start_),
- WasmOpcodes::OpcodeName(p->opcode()), p->tree->count);
- for (int i = 0; i < p->index; i++) {
- Tree* child = p->tree->children[i];
- for (size_t d = 0; d <= depth; d++) PrintF(" ");
- PrintF("@%d %s [%d]", static_cast<int>(child->pc - start_),
- WasmOpcodes::OpcodeName(child->opcode()), child->count);
- if (child->node) {
- PrintF(" => TF");
- compiler::WasmGraphBuilder::PrintDebugName(child->node);
- }
- PrintF("\n");
- }
- PrintProduction(depth + 1);
+ TRACE(" !%s\n", error_msg_.get());
}
-#endif
-
BitVector* AnalyzeLoopAssignment(const byte* pc) {
if (pc >= limit_) return nullptr;
if (*pc != kExprLoop) return nullptr;
BitVector* assigned =
- new (zone_) BitVector(static_cast<int>(total_locals_), zone_);
- // Keep a stack to model the nesting of expressions.
- std::vector<int> arity_stack;
- arity_stack.push_back(OpcodeArity(pc));
- pc += OpcodeLength(pc);
-
+ new (zone_) BitVector(static_cast<int>(local_type_vec_.size()), zone_);
+ int depth = 0;
// Iteratively process all AST nodes nested inside the loop.
- while (pc < limit_) {
+ while (pc < limit_ && ok()) {
WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
- int arity = 0;
- int length = 1;
- int assigned_index = -1;
- if (opcode == kExprSetLocal) {
- LocalIndexOperand operand(this, pc);
- if (assigned->length() > 0 &&
- static_cast<int>(operand.index) < assigned->length()) {
- // Unverified code might have an out-of-bounds index.
- // Ignore out-of-bounds indices, as the main verification will fail.
- assigned->Add(operand.index);
- assigned_index = operand.index;
+ unsigned length = 1;
+ switch (opcode) {
+ case kExprLoop:
+ case kExprIf:
+ case kExprBlock:
+ case kExprTryCatch:
+ case kExprTryCatchFinally:
+ case kExprTryFinally:
+ depth++;
+ DCHECK_EQ(1, OpcodeLength(pc));
+ break;
+ case kExprSetLocal: {
+ LocalIndexOperand operand(this, pc);
+ if (assigned->length() > 0 &&
+ static_cast<int>(operand.index) < assigned->length()) {
+ // Unverified code might have an out-of-bounds index.
+ assigned->Add(operand.index);
+ }
+ length = 1 + operand.length;
+ break;
}
- arity = 1;
- length = 1 + operand.length;
- } else {
- arity = OpcodeArity(pc);
- length = OpcodeLength(pc);
- }
-
- TRACE("loop-assign module+%-6d %s func+%d: 0x%02x %s", baserel(pc),
- indentation(), startrel(pc), opcode,
- WasmOpcodes::OpcodeName(opcode));
-
- if (assigned_index >= 0) {
- TRACE(" (assigned local #%d)\n", assigned_index);
- } else {
- TRACE("\n");
+ case kExprEnd:
+ depth--;
+ break;
+ default:
+ length = OpcodeLength(pc);
+ break;
}
-
+ if (depth <= 0) break;
pc += length;
- arity_stack.push_back(arity);
- while (arity_stack.back() == 0) {
- arity_stack.pop_back();
- if (arity_stack.empty()) return assigned; // reached end of loop
- arity_stack.back()--;
- }
}
- return assigned;
+ return ok() ? assigned : nullptr;
+ }
+
+ inline wasm::WasmCodePosition position() {
+ int offset = static_cast<int>(pc_ - start_);
+ DCHECK_EQ(pc_ - start_, offset); // overflows cannot happen
+ return offset;
}
};
@@ -1639,156 +1671,188 @@ bool DecodeLocalDecls(AstLocalDecls& decls, const byte* start,
base::AccountingAllocator allocator;
Zone tmp(&allocator);
FunctionBody body = {nullptr, nullptr, nullptr, start, end};
- SR_WasmDecoder decoder(&tmp, nullptr, body);
+ WasmFullDecoder decoder(&tmp, nullptr, body);
return decoder.DecodeLocalDecls(decls);
}
-TreeResult VerifyWasmCode(base::AccountingAllocator* allocator,
- FunctionBody& body) {
- Zone zone(allocator);
- SR_WasmDecoder decoder(&zone, nullptr, body);
- TreeResult result = decoder.Decode();
- return result;
+BytecodeIterator::BytecodeIterator(const byte* start, const byte* end,
+ AstLocalDecls* decls)
+ : Decoder(start, end) {
+ if (decls != nullptr) {
+ if (DecodeLocalDecls(*decls, start, end)) {
+ pc_ += decls->decls_encoded_size;
+ if (pc_ > end_) pc_ = end_;
+ }
+ }
}
-TreeResult BuildTFGraph(base::AccountingAllocator* allocator,
- TFBuilder* builder, FunctionBody& body) {
+DecodeResult VerifyWasmCode(base::AccountingAllocator* allocator,
+ FunctionBody& body) {
Zone zone(allocator);
- SR_WasmDecoder decoder(&zone, builder, body);
- TreeResult result = decoder.Decode();
- return result;
+ WasmFullDecoder decoder(&zone, nullptr, body);
+ decoder.Decode();
+ return decoder.toResult<DecodeStruct*>(nullptr);
}
-
-std::ostream& operator<<(std::ostream& os, const Tree& tree) {
- if (tree.pc == nullptr) {
- os << "null";
- return os;
- }
- PrintF("%s", WasmOpcodes::OpcodeName(tree.opcode()));
- if (tree.count > 0) os << "(";
- for (uint32_t i = 0; i < tree.count; i++) {
- if (i > 0) os << ", ";
- os << *tree.children[i];
- }
- if (tree.count > 0) os << ")";
- return os;
-}
-
-
-ReadUnsignedLEB128ErrorCode ReadUnsignedLEB128Operand(const byte* pc,
- const byte* limit,
- int* length,
- uint32_t* result) {
- Decoder decoder(pc, limit);
- *result = decoder.checked_read_u32v(pc, 0, length);
- if (decoder.ok()) return kNoError;
- return (limit - pc) > 1 ? kInvalidLEB128 : kMissingLEB128;
+DecodeResult BuildTFGraph(base::AccountingAllocator* allocator,
+ TFBuilder* builder, FunctionBody& body) {
+ Zone zone(allocator);
+ WasmFullDecoder decoder(&zone, builder, body);
+ decoder.Decode();
+ return decoder.toResult<DecodeStruct*>(nullptr);
}
-int OpcodeLength(const byte* pc, const byte* end) {
+unsigned OpcodeLength(const byte* pc, const byte* end) {
WasmDecoder decoder(nullptr, nullptr, pc, end);
return decoder.OpcodeLength(pc);
}
-int OpcodeArity(ModuleEnv* module, FunctionSig* sig, const byte* pc,
- const byte* end) {
- WasmDecoder decoder(module, sig, pc, end);
+unsigned OpcodeArity(const byte* pc, const byte* end) {
+ WasmDecoder decoder(nullptr, nullptr, pc, end);
return decoder.OpcodeArity(pc);
}
-void PrintAst(base::AccountingAllocator* allocator, FunctionBody& body) {
- Zone zone(allocator);
- SR_WasmDecoder decoder(&zone, nullptr, body);
-
+void PrintAstForDebugging(const byte* start, const byte* end) {
+ base::AccountingAllocator allocator;
OFStream os(stdout);
+ PrintAst(&allocator, FunctionBodyForTesting(start, end), os, nullptr);
+}
+
+bool PrintAst(base::AccountingAllocator* allocator, const FunctionBody& body,
+ std::ostream& os,
+ std::vector<std::tuple<uint32_t, int, int>>* offset_table) {
+ Zone zone(allocator);
+ WasmFullDecoder decoder(&zone, nullptr, body);
+ int line_nr = 0;
// Print the function signature.
if (body.sig) {
os << "// signature: " << *body.sig << std::endl;
+ ++line_nr;
}
// Print the local declarations.
AstLocalDecls decls(&zone);
- decoder.DecodeLocalDecls(decls);
- const byte* pc = decoder.pc();
- if (body.start != decoder.pc()) {
- printf("// locals:");
+ BytecodeIterator i(body.start, body.end, &decls);
+ if (body.start != i.pc()) {
+ os << "// locals: ";
for (auto p : decls.local_types) {
LocalType type = p.first;
uint32_t count = p.second;
os << " " << count << " " << WasmOpcodes::TypeName(type);
}
os << std::endl;
+ ++line_nr;
- for (const byte* locals = body.start; locals < pc; locals++) {
- printf(" 0x%02x,", *locals);
+ for (const byte* locals = body.start; locals < i.pc(); locals++) {
+ os << (locals == body.start ? "0x" : " 0x") << AsHex(*locals, 2) << ",";
}
- printf("\n");
+ os << std::endl;
+ ++line_nr;
}
- printf("// body: \n");
- std::vector<int> arity_stack;
- while (pc < body.end) {
- int arity = decoder.OpcodeArity(pc);
- size_t length = decoder.OpcodeLength(pc);
+ os << "// body: " << std::endl;
+ ++line_nr;
+ unsigned control_depth = 0;
+ for (; i.has_next(); i.next()) {
+ unsigned length = decoder.OpcodeLength(i.pc());
- for (auto arity : arity_stack) {
- printf(" ");
- USE(arity);
+ WasmOpcode opcode = i.current();
+ if (opcode == kExprElse) control_depth--;
+
+ int num_whitespaces = control_depth < 32 ? 2 * control_depth : 64;
+ if (offset_table) {
+ offset_table->push_back(
+ std::make_tuple(i.pc_offset(), line_nr, num_whitespaces));
}
- WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
- printf("k%s,", WasmOpcodes::OpcodeName(opcode));
+ // 64 whitespaces
+ const char* padding =
+ " ";
+ os.write(padding, num_whitespaces);
+ os << "k" << WasmOpcodes::OpcodeName(opcode) << ",";
- for (size_t i = 1; i < length; i++) {
- printf(" 0x%02x,", pc[i]);
+ for (size_t j = 1; j < length; ++j) {
+ os << " " << AsHex(i.pc()[j], 2) << ",";
}
- if (body.module) {
- switch (opcode) {
- case kExprCallIndirect: {
- SignatureIndexOperand operand(&decoder, pc);
- if (decoder.Validate(pc, operand)) {
- os << " // sig #" << operand.index << ": " << *operand.sig;
- }
- break;
+ switch (opcode) {
+ case kExprIf:
+ case kExprElse:
+ case kExprLoop:
+ case kExprBlock:
+ case kExprTryCatch:
+ case kExprTryCatchFinally:
+ case kExprTryFinally:
+ os << " // @" << i.pc_offset();
+ control_depth++;
+ break;
+ case kExprEnd:
+ os << " // @" << i.pc_offset();
+ control_depth--;
+ break;
+ case kExprBr: {
+ BreakDepthOperand operand(&i, i.pc());
+ os << " // arity=" << operand.arity << " depth=" << operand.depth;
+ break;
+ }
+ case kExprBrIf: {
+ BreakDepthOperand operand(&i, i.pc());
+ os << " // arity=" << operand.arity << " depth" << operand.depth;
+ break;
+ }
+ case kExprBrTable: {
+ BranchTableOperand operand(&i, i.pc());
+ os << " // arity=" << operand.arity
+ << " entries=" << operand.table_count;
+ break;
+ }
+ case kExprCallIndirect: {
+ CallIndirectOperand operand(&i, i.pc());
+ if (decoder.Complete(i.pc(), operand)) {
+ os << " // sig #" << operand.index << ": " << *operand.sig;
+ } else {
+ os << " // arity=" << operand.arity << " sig #" << operand.index;
}
- case kExprCallImport: {
- ImportIndexOperand operand(&decoder, pc);
- if (decoder.Validate(pc, operand)) {
- os << " // import #" << operand.index << ": " << *operand.sig;
- }
- break;
+ break;
+ }
+ case kExprCallImport: {
+ CallImportOperand operand(&i, i.pc());
+ if (decoder.Complete(i.pc(), operand)) {
+ os << " // import #" << operand.index << ": " << *operand.sig;
+ } else {
+ os << " // arity=" << operand.arity << " import #" << operand.index;
}
- case kExprCallFunction: {
- FunctionIndexOperand operand(&decoder, pc);
- if (decoder.Validate(pc, operand)) {
- os << " // function #" << operand.index << ": " << *operand.sig;
- }
- break;
+ break;
+ }
+ case kExprCallFunction: {
+ CallFunctionOperand operand(&i, i.pc());
+ if (decoder.Complete(i.pc(), operand)) {
+ os << " // function #" << operand.index << ": " << *operand.sig;
+ } else {
+ os << " // arity=" << operand.arity << " function #" << operand.index;
}
- default:
- break;
+ break;
}
- }
-
- pc += length;
- printf("\n");
-
- arity_stack.push_back(arity);
- while (arity_stack.back() == 0) {
- arity_stack.pop_back();
- if (arity_stack.empty()) break;
- arity_stack.back()--;
- }
+ case kExprReturn: {
+ ReturnArityOperand operand(&i, i.pc());
+ os << " // arity=" << operand.arity;
+ break;
+ }
+ default:
+ break;
+ }
+ os << std::endl;
+ ++line_nr;
}
+
+ return decoder.ok();
}
BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, size_t num_locals,
const byte* start, const byte* end) {
FunctionBody body = {nullptr, nullptr, nullptr, start, end};
- SR_WasmDecoder decoder(zone, nullptr, body);
+ WasmFullDecoder decoder(zone, nullptr, body);
return decoder.AnalyzeLoopAssignmentForTesting(start, num_locals);
}
diff --git a/deps/v8/src/wasm/ast-decoder.h b/deps/v8/src/wasm/ast-decoder.h
index 5376e7bfdd..c4f6c1679a 100644
--- a/deps/v8/src/wasm/ast-decoder.h
+++ b/deps/v8/src/wasm/ast-decoder.h
@@ -25,7 +25,7 @@ namespace wasm {
struct LocalIndexOperand {
uint32_t index;
LocalType type;
- int length;
+ unsigned length;
inline LocalIndexOperand(Decoder* decoder, const byte* pc) {
index = decoder->checked_read_u32v(pc, 1, &length, "local index");
@@ -35,7 +35,7 @@ struct LocalIndexOperand {
struct ImmI8Operand {
int8_t value;
- int length;
+ unsigned length;
inline ImmI8Operand(Decoder* decoder, const byte* pc) {
value = bit_cast<int8_t>(decoder->checked_read_u8(pc, 1, "immi8"));
length = 1;
@@ -44,7 +44,7 @@ struct ImmI8Operand {
struct ImmI32Operand {
int32_t value;
- int length;
+ unsigned length;
inline ImmI32Operand(Decoder* decoder, const byte* pc) {
value = decoder->checked_read_i32v(pc, 1, &length, "immi32");
}
@@ -52,7 +52,7 @@ struct ImmI32Operand {
struct ImmI64Operand {
int64_t value;
- int length;
+ unsigned length;
inline ImmI64Operand(Decoder* decoder, const byte* pc) {
value = decoder->checked_read_i64v(pc, 1, &length, "immi64");
}
@@ -60,7 +60,7 @@ struct ImmI64Operand {
struct ImmF32Operand {
float value;
- int length;
+ unsigned length;
inline ImmF32Operand(Decoder* decoder, const byte* pc) {
value = bit_cast<float>(decoder->checked_read_u32(pc, 1, "immf32"));
length = 4;
@@ -69,7 +69,7 @@ struct ImmF32Operand {
struct ImmF64Operand {
double value;
- int length;
+ unsigned length;
inline ImmF64Operand(Decoder* decoder, const byte* pc) {
value = bit_cast<double>(decoder->checked_read_u64(pc, 1, "immf64"));
length = 8;
@@ -79,76 +79,93 @@ struct ImmF64Operand {
struct GlobalIndexOperand {
uint32_t index;
LocalType type;
- MachineType machine_type;
- int length;
+ unsigned length;
inline GlobalIndexOperand(Decoder* decoder, const byte* pc) {
index = decoder->checked_read_u32v(pc, 1, &length, "global index");
type = kAstStmt;
- machine_type = MachineType::None();
}
};
-struct Block;
+struct Control;
struct BreakDepthOperand {
+ uint32_t arity;
uint32_t depth;
- Block* target;
- int length;
+ Control* target;
+ unsigned length;
inline BreakDepthOperand(Decoder* decoder, const byte* pc) {
- depth = decoder->checked_read_u32v(pc, 1, &length, "break depth");
+ unsigned len1 = 0;
+ unsigned len2 = 0;
+ arity = decoder->checked_read_u32v(pc, 1, &len1, "argument count");
+ depth = decoder->checked_read_u32v(pc, 1 + len1, &len2, "break depth");
+ length = len1 + len2;
target = nullptr;
}
};
-struct BlockCountOperand {
- uint32_t count;
- int length;
- inline BlockCountOperand(Decoder* decoder, const byte* pc) {
- count = decoder->checked_read_u32v(pc, 1, &length, "block count");
- }
-};
-
-struct SignatureIndexOperand {
+struct CallIndirectOperand {
+ uint32_t arity;
uint32_t index;
FunctionSig* sig;
- int length;
- inline SignatureIndexOperand(Decoder* decoder, const byte* pc) {
- index = decoder->checked_read_u32v(pc, 1, &length, "signature index");
+ unsigned length;
+ inline CallIndirectOperand(Decoder* decoder, const byte* pc) {
+ unsigned len1 = 0;
+ unsigned len2 = 0;
+ arity = decoder->checked_read_u32v(pc, 1, &len1, "argument count");
+ index = decoder->checked_read_u32v(pc, 1 + len1, &len2, "signature index");
+ length = len1 + len2;
sig = nullptr;
}
};
-struct FunctionIndexOperand {
+struct CallFunctionOperand {
+ uint32_t arity;
uint32_t index;
FunctionSig* sig;
- int length;
- inline FunctionIndexOperand(Decoder* decoder, const byte* pc) {
- index = decoder->checked_read_u32v(pc, 1, &length, "function index");
+ unsigned length;
+ inline CallFunctionOperand(Decoder* decoder, const byte* pc) {
+ unsigned len1 = 0;
+ unsigned len2 = 0;
+ arity = decoder->checked_read_u32v(pc, 1, &len1, "argument count");
+ index = decoder->checked_read_u32v(pc, 1 + len1, &len2, "function index");
+ length = len1 + len2;
sig = nullptr;
}
};
-struct ImportIndexOperand {
+struct CallImportOperand {
+ uint32_t arity;
uint32_t index;
FunctionSig* sig;
- int length;
- inline ImportIndexOperand(Decoder* decoder, const byte* pc) {
- index = decoder->checked_read_u32v(pc, 1, &length, "import index");
+ unsigned length;
+ inline CallImportOperand(Decoder* decoder, const byte* pc) {
+ unsigned len1 = 0;
+ unsigned len2 = 0;
+ arity = decoder->checked_read_u32v(pc, 1, &len1, "argument count");
+ index = decoder->checked_read_u32v(pc, 1 + len1, &len2, "import index");
+ length = len1 + len2;
sig = nullptr;
}
};
struct BranchTableOperand {
+ uint32_t arity;
uint32_t table_count;
const byte* table;
- int length;
+ unsigned length;
inline BranchTableOperand(Decoder* decoder, const byte* pc) {
- int varint_length;
+ unsigned len1 = 0;
+ unsigned len2 = 0;
+ arity = decoder->checked_read_u32v(pc, 1, &len1, "argument count");
table_count =
- decoder->checked_read_u32v(pc, 1, &varint_length, "expected #entries");
- length = varint_length + (table_count + 1) * sizeof(uint32_t);
+ decoder->checked_read_u32v(pc, 1 + len1, &len2, "table count");
+ if (table_count > (UINT_MAX / sizeof(uint32_t)) - 1 ||
+ len1 + len2 > UINT_MAX - (table_count + 1) * sizeof(uint32_t)) {
+ decoder->error(pc, "branch table size overflow");
+ }
+ length = len1 + len2 + (table_count + 1) * sizeof(uint32_t);
- uint32_t table_start = 1 + varint_length;
+ uint32_t table_start = 1 + len1 + len2;
if (decoder->check(pc, table_start, (table_count + 1) * sizeof(uint32_t),
"expected <table entries>")) {
table = pc + table_start;
@@ -156,8 +173,8 @@ struct BranchTableOperand {
table = nullptr;
}
}
- inline uint32_t read_entry(Decoder* decoder, int i) {
- DCHECK(i >= 0 && static_cast<uint32_t>(i) <= table_count);
+ inline uint32_t read_entry(Decoder* decoder, unsigned i) {
+ DCHECK(i <= table_count);
return table ? decoder->read_u32(table + i * sizeof(uint32_t)) : 0;
}
};
@@ -165,18 +182,27 @@ struct BranchTableOperand {
struct MemoryAccessOperand {
uint32_t alignment;
uint32_t offset;
- int length;
+ unsigned length;
inline MemoryAccessOperand(Decoder* decoder, const byte* pc) {
- int alignment_length;
+ unsigned alignment_length;
alignment =
decoder->checked_read_u32v(pc, 1, &alignment_length, "alignment");
- int offset_length;
+ unsigned offset_length;
offset = decoder->checked_read_u32v(pc, 1 + alignment_length,
&offset_length, "offset");
length = alignment_length + offset_length;
}
};
+struct ReturnArityOperand {
+ uint32_t arity;
+ unsigned length;
+
+ inline ReturnArityOperand(Decoder* decoder, const byte* pc) {
+ arity = decoder->checked_read_u32v(pc, 1, &length, "return count");
+ }
+};
+
typedef compiler::WasmGraphBuilder TFBuilder;
struct ModuleEnv; // forward declaration of module interface.
@@ -189,37 +215,45 @@ struct FunctionBody {
const byte* end; // end of the function body
};
-struct Tree;
-typedef Result<Tree*> TreeResult;
+static inline FunctionBody FunctionBodyForTesting(const byte* start,
+ const byte* end) {
+ return {nullptr, nullptr, start, start, end};
+}
+
+struct DecodeStruct {
+ int unused;
+};
+typedef Result<DecodeStruct*> DecodeResult;
+inline std::ostream& operator<<(std::ostream& os, const DecodeStruct& tree) {
+ return os;
+}
-std::ostream& operator<<(std::ostream& os, const Tree& tree);
+DecodeResult VerifyWasmCode(base::AccountingAllocator* allocator,
+ FunctionBody& body);
+DecodeResult BuildTFGraph(base::AccountingAllocator* allocator,
+ TFBuilder* builder, FunctionBody& body);
+bool PrintAst(base::AccountingAllocator* allocator, const FunctionBody& body,
+ std::ostream& os,
+ std::vector<std::tuple<uint32_t, int, int>>* offset_table);
-TreeResult VerifyWasmCode(base::AccountingAllocator* allocator,
- FunctionBody& body);
-TreeResult BuildTFGraph(base::AccountingAllocator* allocator,
- TFBuilder* builder, FunctionBody& body);
-void PrintAst(base::AccountingAllocator* allocator, FunctionBody& body);
+// A simplified form of AST printing, e.g. from a debugger.
+void PrintAstForDebugging(const byte* start, const byte* end);
-inline TreeResult VerifyWasmCode(base::AccountingAllocator* allocator,
- ModuleEnv* module, FunctionSig* sig,
- const byte* start, const byte* end) {
+inline DecodeResult VerifyWasmCode(base::AccountingAllocator* allocator,
+ ModuleEnv* module, FunctionSig* sig,
+ const byte* start, const byte* end) {
FunctionBody body = {module, sig, nullptr, start, end};
return VerifyWasmCode(allocator, body);
}
-inline TreeResult BuildTFGraph(base::AccountingAllocator* allocator,
- TFBuilder* builder, ModuleEnv* module,
- FunctionSig* sig, const byte* start,
- const byte* end) {
+inline DecodeResult BuildTFGraph(base::AccountingAllocator* allocator,
+ TFBuilder* builder, ModuleEnv* module,
+ FunctionSig* sig, const byte* start,
+ const byte* end) {
FunctionBody body = {module, sig, nullptr, start, end};
return BuildTFGraph(allocator, builder, body);
}
-enum ReadUnsignedLEB128ErrorCode { kNoError, kInvalidLEB128, kMissingLEB128 };
-
-ReadUnsignedLEB128ErrorCode ReadUnsignedLEB128Operand(const byte*, const byte*,
- int*, uint32_t*);
-
struct AstLocalDecls {
// The size of the encoded declarations.
uint32_t decls_encoded_size; // size of encoded declarations
@@ -240,11 +274,64 @@ BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, size_t num_locals,
const byte* start, const byte* end);
// Computes the length of the opcode at the given address.
-int OpcodeLength(const byte* pc, const byte* end);
+unsigned OpcodeLength(const byte* pc, const byte* end);
// Computes the arity (number of sub-nodes) of the opcode at the given address.
-int OpcodeArity(ModuleEnv* module, FunctionSig* sig, const byte* pc,
- const byte* end);
+unsigned OpcodeArity(const byte* pc, const byte* end);
+
+// A simple forward iterator for bytecodes.
+class BytecodeIterator : public Decoder {
+ public:
+ // If one wants to iterate over the bytecode without looking at {pc_offset()}.
+ class iterator {
+ public:
+ inline iterator& operator++() {
+ DCHECK_LT(ptr_, end_);
+ ptr_ += OpcodeLength(ptr_, end_);
+ return *this;
+ }
+ inline WasmOpcode operator*() {
+ DCHECK_LT(ptr_, end_);
+ return static_cast<WasmOpcode>(*ptr_);
+ }
+ inline bool operator==(const iterator& that) {
+ return this->ptr_ == that.ptr_;
+ }
+ inline bool operator!=(const iterator& that) {
+ return this->ptr_ != that.ptr_;
+ }
+
+ private:
+ friend class BytecodeIterator;
+ const byte* ptr_;
+ const byte* end_;
+ iterator(const byte* ptr, const byte* end) : ptr_(ptr), end_(end) {}
+ };
+
+ // Create a new {BytecodeIterator}. If the {decls} pointer is non-null,
+ // assume the bytecode starts with local declarations and decode them.
+ // Otherwise, do not decode local decls.
+ BytecodeIterator(const byte* start, const byte* end,
+ AstLocalDecls* decls = nullptr);
+
+ inline iterator begin() const { return iterator(pc_, end_); }
+ inline iterator end() const { return iterator(end_, end_); }
+
+ WasmOpcode current() {
+ return static_cast<WasmOpcode>(
+ checked_read_u8(pc_, 0, "expected bytecode"));
+ }
+
+ void next() {
+ if (pc_ < end_) {
+ pc_ += OpcodeLength(pc_, end_);
+ if (pc_ >= end_) pc_ = end_;
+ }
+ }
+
+ bool has_next() { return pc_ < end_; }
+};
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index f9de2e1143..a6ede54bec 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -5,9 +5,12 @@
#ifndef V8_WASM_DECODER_H_
#define V8_WASM_DECODER_H_
-#include "src/base/smart-pointers.h"
+#include <memory>
+
+#include "src/base/compiler-specific.h"
#include "src/flags.h"
#include "src/signature.h"
+#include "src/utils.h"
#include "src/wasm/wasm-result.h"
#include "src/zone-containers.h"
@@ -24,12 +27,6 @@ namespace wasm {
#define TRACE(...)
#endif
-#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM)
-#define UNALIGNED_ACCESS_OK 1
-#else
-#define UNALIGNED_ACCESS_OK 0
-#endif
-
// A helper utility to decode bytes, integers, fields, varints, etc, from
// a buffer of bytes.
class Decoder {
@@ -44,47 +41,49 @@ class Decoder {
virtual ~Decoder() {}
- inline bool check(const byte* base, int offset, int length, const char* msg) {
+ inline bool check(const byte* base, unsigned offset, unsigned length,
+ const char* msg) {
DCHECK_GE(base, start_);
if ((base + offset + length) > limit_) {
- error(base, base + offset, msg);
+ error(base, base + offset, "%s", msg);
return false;
}
return true;
}
// Reads a single 8-bit byte, reporting an error if out of bounds.
- inline uint8_t checked_read_u8(const byte* base, int offset,
+ inline uint8_t checked_read_u8(const byte* base, unsigned offset,
const char* msg = "expected 1 byte") {
return check(base, offset, 1, msg) ? base[offset] : 0;
}
// Reads 16-bit word, reporting an error if out of bounds.
- inline uint16_t checked_read_u16(const byte* base, int offset,
+ inline uint16_t checked_read_u16(const byte* base, unsigned offset,
const char* msg = "expected 2 bytes") {
return check(base, offset, 2, msg) ? read_u16(base + offset) : 0;
}
// Reads 32-bit word, reporting an error if out of bounds.
- inline uint32_t checked_read_u32(const byte* base, int offset,
+ inline uint32_t checked_read_u32(const byte* base, unsigned offset,
const char* msg = "expected 4 bytes") {
return check(base, offset, 4, msg) ? read_u32(base + offset) : 0;
}
// Reads 64-bit word, reporting an error if out of bounds.
- inline uint64_t checked_read_u64(const byte* base, int offset,
+ inline uint64_t checked_read_u64(const byte* base, unsigned offset,
const char* msg = "expected 8 bytes") {
return check(base, offset, 8, msg) ? read_u64(base + offset) : 0;
}
// Reads a variable-length unsigned integer (little endian).
- uint32_t checked_read_u32v(const byte* base, int offset, int* length,
+ uint32_t checked_read_u32v(const byte* base, unsigned offset,
+ unsigned* length,
const char* msg = "expected LEB32") {
return checked_read_leb<uint32_t, false>(base, offset, length, msg);
}
// Reads a variable-length signed integer (little endian).
- int32_t checked_read_i32v(const byte* base, int offset, int* length,
+ int32_t checked_read_i32v(const byte* base, unsigned offset, unsigned* length,
const char* msg = "expected SLEB32") {
uint32_t result =
checked_read_leb<uint32_t, true>(base, offset, length, msg);
@@ -98,13 +97,14 @@ class Decoder {
}
// Reads a variable-length unsigned integer (little endian).
- uint64_t checked_read_u64v(const byte* base, int offset, int* length,
+ uint64_t checked_read_u64v(const byte* base, unsigned offset,
+ unsigned* length,
const char* msg = "expected LEB64") {
return checked_read_leb<uint64_t, false>(base, offset, length, msg);
}
// Reads a variable-length signed integer (little endian).
- int64_t checked_read_i64v(const byte* base, int offset, int* length,
+ int64_t checked_read_i64v(const byte* base, unsigned offset, unsigned* length,
const char* msg = "expected SLEB64") {
uint64_t result =
checked_read_leb<uint64_t, true>(base, offset, length, msg);
@@ -120,47 +120,19 @@ class Decoder {
// Reads a single 16-bit unsigned integer (little endian).
inline uint16_t read_u16(const byte* ptr) {
DCHECK(ptr >= start_ && (ptr + 2) <= end_);
-#if V8_TARGET_LITTLE_ENDIAN && UNALIGNED_ACCESS_OK
- return *reinterpret_cast<const uint16_t*>(ptr);
-#else
- uint16_t b0 = ptr[0];
- uint16_t b1 = ptr[1];
- return (b1 << 8) | b0;
-#endif
+ return ReadLittleEndianValue<uint16_t>(ptr);
}
// Reads a single 32-bit unsigned integer (little endian).
inline uint32_t read_u32(const byte* ptr) {
DCHECK(ptr >= start_ && (ptr + 4) <= end_);
-#if V8_TARGET_LITTLE_ENDIAN && UNALIGNED_ACCESS_OK
- return *reinterpret_cast<const uint32_t*>(ptr);
-#else
- uint32_t b0 = ptr[0];
- uint32_t b1 = ptr[1];
- uint32_t b2 = ptr[2];
- uint32_t b3 = ptr[3];
- return (b3 << 24) | (b2 << 16) | (b1 << 8) | b0;
-#endif
+ return ReadLittleEndianValue<uint32_t>(ptr);
}
// Reads a single 64-bit unsigned integer (little endian).
inline uint64_t read_u64(const byte* ptr) {
DCHECK(ptr >= start_ && (ptr + 8) <= end_);
-#if V8_TARGET_LITTLE_ENDIAN && UNALIGNED_ACCESS_OK
- return *reinterpret_cast<const uint64_t*>(ptr);
-#else
- uint32_t b0 = ptr[0];
- uint32_t b1 = ptr[1];
- uint32_t b2 = ptr[2];
- uint32_t b3 = ptr[3];
- uint32_t low = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0;
- uint32_t b4 = ptr[4];
- uint32_t b5 = ptr[5];
- uint32_t b6 = ptr[6];
- uint32_t b7 = ptr[7];
- uint64_t high = (b7 << 24) | (b6 << 16) | (b5 << 8) | b4;
- return (high << 32) | low;
-#endif
+ return ReadLittleEndianValue<uint64_t>(ptr);
}
// Reads a 8-bit unsigned integer (byte) and advances {pc_}.
@@ -202,10 +174,9 @@ class Decoder {
}
// Reads a LEB128 variable-length 32-bit integer and advances {pc_}.
- uint32_t consume_u32v(int* length, const char* name = nullptr) {
+ uint32_t consume_u32v(const char* name = nullptr) {
TRACE(" +%d %-20s: ", static_cast<int>(pc_ - start_),
name ? name : "varint");
-
if (checkAvailable(1)) {
const byte* pos = pc_;
const byte* end = pc_ + 5;
@@ -222,10 +193,10 @@ class Decoder {
shift += 7;
}
- *length = static_cast<int>(pc_ - pos);
+ int length = static_cast<int>(pc_ - pos);
if (pc_ == end && (b & 0x80)) {
error(pc_ - 1, "varint too large");
- } else if (*length == 0) {
+ } else if (length == 0) {
error(pc_, "varint of length 0");
} else {
TRACE("= %u\n", result);
@@ -258,12 +229,13 @@ class Decoder {
}
}
- void error(const char* msg) { error(pc_, nullptr, msg); }
+ void error(const char* msg) { error(pc_, nullptr, "%s", msg); }
- void error(const byte* pc, const char* msg) { error(pc, nullptr, msg); }
+ void error(const byte* pc, const char* msg) { error(pc, nullptr, "%s", msg); }
// Sets internal error state.
- void error(const byte* pc, const byte* pt, const char* format, ...) {
+ void PRINTF_FORMAT(4, 5)
+ error(const byte* pc, const byte* pt, const char* format, ...) {
if (ok()) {
#if DEBUG
if (FLAG_wasm_break_on_decoder_error) {
@@ -276,7 +248,7 @@ class Decoder {
va_start(arguments, format);
base::OS::VSNPrintF(buffer, kMaxErrorMsg - 1, format, arguments);
va_end(arguments);
- error_msg_.Reset(buffer);
+ error_msg_.reset(buffer);
error_pc_ = pc;
error_pt_ = pt;
onFirstError();
@@ -309,11 +281,11 @@ class Decoder {
result.error_pc = error_pc_;
result.error_pt = error_pt_;
// transfer ownership of the error to the result.
- result.error_msg.Reset(error_msg_.Detach());
+ result.error_msg.reset(error_msg_.release());
} else {
result.error_code = kSuccess;
}
- result.val = val;
+ result.val = std::move(val);
return result;
}
@@ -325,11 +297,11 @@ class Decoder {
end_ = end;
error_pc_ = nullptr;
error_pt_ = nullptr;
- error_msg_.Reset(nullptr);
+ error_msg_.reset();
}
bool ok() const { return error_pc_ == nullptr; }
- bool failed() const { return !error_msg_.is_empty(); }
+ bool failed() const { return !!error_msg_; }
bool more() const { return pc_ < limit_; }
const byte* start() { return start_; }
@@ -343,11 +315,11 @@ class Decoder {
const byte* end_;
const byte* error_pc_;
const byte* error_pt_;
- base::SmartArrayPointer<char> error_msg_;
+ std::unique_ptr<char[]> error_msg_;
private:
template <typename IntType, bool is_signed>
- IntType checked_read_leb(const byte* base, int offset, int* length,
+ IntType checked_read_leb(const byte* base, unsigned offset, unsigned* length,
const char* msg) {
if (!check(base, offset, 1, msg)) {
*length = 0;
@@ -368,7 +340,7 @@ class Decoder {
shift += 7;
}
DCHECK_LE(ptr - (base + offset), kMaxLength);
- *length = static_cast<int>(ptr - (base + offset));
+ *length = static_cast<unsigned>(ptr - (base + offset));
if (ptr == end) {
// Check there are no bits set beyond the bitwidth of {IntType}.
const int kExtraBits = (1 + kMaxLength * 7) - (sizeof(IntType) * 8);
@@ -392,7 +364,7 @@ class Decoder {
return 0;
}
if ((b & 0x80) != 0) {
- error(base, ptr, msg);
+ error(base, ptr, "%s", msg);
return 0;
}
}
diff --git a/deps/v8/src/wasm/encoder.cc b/deps/v8/src/wasm/encoder.cc
index 92e6b1145c..ef0bddc836 100644
--- a/deps/v8/src/wasm/encoder.cc
+++ b/deps/v8/src/wasm/encoder.cc
@@ -10,6 +10,7 @@
#include "src/wasm/ast-decoder.h"
#include "src/wasm/encoder.h"
+#include "src/wasm/leb-helper.h"
#include "src/wasm/wasm-macro-gen.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
@@ -29,146 +30,72 @@ namespace v8 {
namespace internal {
namespace wasm {
-/*TODO: add error cases for adding too many locals, too many functions and bad
- indices in body */
+// Emit a section name and the size as a padded varint that can be patched
+// later.
+size_t EmitSection(WasmSection::Code code, ZoneBuffer& buffer) {
+ // Emit the section name.
+ const char* name = WasmSection::getName(code);
+ TRACE("emit section: %s\n", name);
+ size_t length = WasmSection::getNameLength(code);
+ buffer.write_size(length); // Section name string size.
+ buffer.write(reinterpret_cast<const byte*>(name), length);
-namespace {
-void EmitUint8(byte** b, uint8_t x) {
- Memory::uint8_at(*b) = x;
- *b += 1;
+ // Emit a placeholder for the length.
+ return buffer.reserve_u32v();
}
-
-void EmitUint16(byte** b, uint16_t x) {
- WriteUnalignedUInt16(*b, x);
- *b += 2;
+// Patch the size of a section after it's finished.
+void FixupSection(ZoneBuffer& buffer, size_t start) {
+ buffer.patch_u32v(start, static_cast<uint32_t>(buffer.offset() - start -
+ kPaddedVarInt32Size));
}
+WasmFunctionBuilder::WasmFunctionBuilder(WasmModuleBuilder* builder)
+ : builder_(builder),
+ locals_(builder->zone()),
+ signature_index_(0),
+ exported_(0),
+ body_(builder->zone()),
+ name_(builder->zone()) {}
-void EmitUint32(byte** b, uint32_t x) {
- WriteUnalignedUInt32(*b, x);
- *b += 4;
-}
-
-// Sections all start with a size, but it's unknown at the start.
-// We generate a large varint which we then fixup later when the size is known.
-//
-// TODO(jfb) Not strictly necessary since sizes are calculated ahead of time.
-const size_t padded_varint = 5;
-
-void EmitVarInt(byte** b, size_t val) {
- while (true) {
- size_t next = val >> 7;
- byte out = static_cast<byte>(val & 0x7f);
- if (next) {
- *((*b)++) = 0x80 | out;
- val = next;
- } else {
- *((*b)++) = out;
- break;
- }
+void WasmFunctionBuilder::EmitVarInt(uint32_t val) {
+ byte buffer[8];
+ byte* ptr = buffer;
+ LEBHelper::write_u32v(&ptr, val);
+ for (byte* p = buffer; p < ptr; p++) {
+ body_.push_back(*p);
}
}
-size_t SizeOfVarInt(size_t value) {
- size_t size = 0;
- do {
- size++;
- value = value >> 7;
- } while (value > 0);
- return size;
+void WasmFunctionBuilder::SetSignature(FunctionSig* sig) {
+ DCHECK(!locals_.has_sig());
+ locals_.set_sig(sig);
+ signature_index_ = builder_->AddSignature(sig);
}
-void FixupSection(byte* start, byte* end) {
- // Same as EmitVarInt, but fixed-width with zeroes in the MSBs.
- size_t val = end - start - padded_varint;
- TRACE(" fixup %u\n", (unsigned)val);
- for (size_t pos = 0; pos != padded_varint; ++pos) {
- size_t next = val >> 7;
- byte out = static_cast<byte>(val & 0x7f);
- if (pos != padded_varint - 1) {
- *(start++) = 0x80 | out;
- val = next;
- } else {
- *(start++) = out;
- // TODO(jfb) check that the pre-allocated fixup size isn't overflowed.
- }
- }
+uint32_t WasmFunctionBuilder::AddLocal(LocalType type) {
+ DCHECK(locals_.has_sig());
+ return locals_.AddLocals(1, type);
}
-// Returns the start of the section, where the section VarInt size is.
-byte* EmitSection(WasmSection::Code code, byte** b) {
- byte* start = *b;
- const char* name = WasmSection::getName(code);
- size_t length = WasmSection::getNameLength(code);
- TRACE("emit section: %s\n", name);
- for (size_t padding = 0; padding != padded_varint; ++padding) {
- EmitUint8(b, 0xff); // Will get fixed up later.
- }
- EmitVarInt(b, length); // Section name string size.
- for (size_t i = 0; i != length; ++i) EmitUint8(b, name[i]);
- return start;
+void WasmFunctionBuilder::EmitGetLocal(uint32_t local_index) {
+ EmitWithVarInt(kExprGetLocal, local_index);
}
-} // namespace
-
-struct WasmFunctionBuilder::Type {
- bool param_;
- LocalType type_;
-};
-
-WasmFunctionBuilder::WasmFunctionBuilder(Zone* zone)
- : return_type_(kAstI32),
- locals_(zone),
- exported_(0),
- external_(0),
- body_(zone),
- local_indices_(zone),
- name_(zone) {}
-
-
-uint16_t WasmFunctionBuilder::AddParam(LocalType type) {
- return AddVar(type, true);
+void WasmFunctionBuilder::EmitSetLocal(uint32_t local_index) {
+ EmitWithVarInt(kExprSetLocal, local_index);
}
-
-uint16_t WasmFunctionBuilder::AddLocal(LocalType type) {
- return AddVar(type, false);
-}
-
-
-uint16_t WasmFunctionBuilder::AddVar(LocalType type, bool param) {
- locals_.push_back({param, type});
- return static_cast<uint16_t>(locals_.size() - 1);
-}
-
-
-void WasmFunctionBuilder::ReturnType(LocalType type) { return_type_ = type; }
-
-
void WasmFunctionBuilder::EmitCode(const byte* code, uint32_t code_size) {
- EmitCode(code, code_size, nullptr, 0);
-}
-
-
-void WasmFunctionBuilder::EmitCode(const byte* code, uint32_t code_size,
- const uint32_t* local_indices,
- uint32_t indices_size) {
- size_t size = body_.size();
- for (size_t i = 0; i < code_size; i++) {
+ for (size_t i = 0; i < code_size; ++i) {
body_.push_back(code[i]);
}
- for (size_t i = 0; i < indices_size; i++) {
- local_indices_.push_back(local_indices[i] + static_cast<uint32_t>(size));
- }
}
-
void WasmFunctionBuilder::Emit(WasmOpcode opcode) {
body_.push_back(static_cast<byte>(opcode));
}
-
void WasmFunctionBuilder::EmitWithU8(WasmOpcode opcode, const byte immediate) {
body_.push_back(static_cast<byte>(opcode));
body_.push_back(immediate);
@@ -184,251 +111,75 @@ void WasmFunctionBuilder::EmitWithU8U8(WasmOpcode opcode, const byte imm1,
void WasmFunctionBuilder::EmitWithVarInt(WasmOpcode opcode,
uint32_t immediate) {
body_.push_back(static_cast<byte>(opcode));
- size_t immediate_size = SizeOfVarInt(immediate);
- body_.insert(body_.end(), immediate_size, 0);
- byte* p = &body_[body_.size() - immediate_size];
- EmitVarInt(&p, immediate);
-}
-
-uint32_t WasmFunctionBuilder::EmitEditableVarIntImmediate() {
- // Guess that the immediate will be 1 byte. If it is more, we'll have to
- // shift everything down.
- body_.push_back(0);
- return static_cast<uint32_t>(body_.size()) - 1;
+ EmitVarInt(immediate);
}
-void WasmFunctionBuilder::EditVarIntImmediate(uint32_t offset,
- const uint32_t immediate) {
- uint32_t immediate_size = static_cast<uint32_t>(SizeOfVarInt(immediate));
- // In EmitEditableVarIntImmediate, we guessed that we'd only need one byte.
- // If we need more, shift everything down to make room for the larger
- // immediate.
- if (immediate_size > 1) {
- uint32_t diff = immediate_size - 1;
- body_.insert(body_.begin() + offset, diff, 0);
-
- for (size_t i = 0; i < local_indices_.size(); ++i) {
- if (local_indices_[i] >= offset) {
- local_indices_[i] += diff;
- }
- }
+void WasmFunctionBuilder::EmitI32Const(int32_t value) {
+ // TODO(titzer): variable-length signed and unsigned i32 constants.
+ if (-128 <= value && value <= 127) {
+ EmitWithU8(kExprI8Const, static_cast<byte>(value));
+ } else {
+ byte code[] = {WASM_I32V_5(value)};
+ EmitCode(code, sizeof(code));
}
- DCHECK(offset + immediate_size <= body_.size());
- byte* p = &body_[offset];
- EmitVarInt(&p, immediate);
}
+void WasmFunctionBuilder::SetExported() { exported_ = true; }
-void WasmFunctionBuilder::Exported(uint8_t flag) { exported_ = flag; }
-
-
-void WasmFunctionBuilder::External(uint8_t flag) { external_ = flag; }
-
-void WasmFunctionBuilder::SetName(const unsigned char* name, int name_length) {
+void WasmFunctionBuilder::SetName(const char* name, int name_length) {
name_.clear();
if (name_length > 0) {
- for (int i = 0; i < name_length; i++) {
+ for (int i = 0; i < name_length; ++i) {
name_.push_back(*(name + i));
}
}
}
-
-WasmFunctionEncoder* WasmFunctionBuilder::Build(Zone* zone,
- WasmModuleBuilder* mb) const {
- WasmFunctionEncoder* e =
- new (zone) WasmFunctionEncoder(zone, return_type_, exported_, external_);
- uint16_t* var_index = zone->NewArray<uint16_t>(locals_.size());
- IndexVars(e, var_index);
- if (body_.size() > 0) {
- // TODO(titzer): iterate over local indexes, not the bytes.
- const byte* start = &body_[0];
- const byte* end = start + body_.size();
- size_t local_index = 0;
- for (size_t i = 0; i < body_.size();) {
- if (local_index < local_indices_.size() &&
- i == local_indices_[local_index]) {
- int length = 0;
- uint32_t index;
- ReadUnsignedLEB128Operand(start + i, end, &length, &index);
- uint16_t new_index = var_index[index];
- const std::vector<uint8_t>& index_vec = UnsignedLEB128From(new_index);
- for (size_t j = 0; j < index_vec.size(); j++) {
- e->body_.push_back(index_vec.at(j));
- }
- i += length;
- local_index++;
- } else {
- e->body_.push_back(*(start + i));
- i++;
- }
- }
- }
- FunctionSig::Builder sig(zone, return_type_ == kAstStmt ? 0 : 1,
- e->params_.size());
- if (return_type_ != kAstStmt) {
- sig.AddReturn(static_cast<LocalType>(return_type_));
- }
- for (size_t i = 0; i < e->params_.size(); i++) {
- sig.AddParam(static_cast<LocalType>(e->params_[i]));
- }
- e->signature_index_ = mb->AddSignature(sig.Build());
- e->name_.insert(e->name_.begin(), name_.begin(), name_.end());
- return e;
+void WasmFunctionBuilder::WriteSignature(ZoneBuffer& buffer) const {
+ buffer.write_u32v(signature_index_);
}
-
-void WasmFunctionBuilder::IndexVars(WasmFunctionEncoder* e,
- uint16_t* var_index) const {
- uint16_t param = 0;
- uint16_t i32 = 0;
- uint16_t i64 = 0;
- uint16_t f32 = 0;
- uint16_t f64 = 0;
- for (size_t i = 0; i < locals_.size(); i++) {
- if (locals_.at(i).param_) {
- param++;
- } else if (locals_.at(i).type_ == kAstI32) {
- i32++;
- } else if (locals_.at(i).type_ == kAstI64) {
- i64++;
- } else if (locals_.at(i).type_ == kAstF32) {
- f32++;
- } else if (locals_.at(i).type_ == kAstF64) {
- f64++;
- }
- }
- e->local_i32_count_ = i32;
- e->local_i64_count_ = i64;
- e->local_f32_count_ = f32;
- e->local_f64_count_ = f64;
- f64 = param + i32 + i64 + f32;
- f32 = param + i32 + i64;
- i64 = param + i32;
- i32 = param;
- param = 0;
- for (size_t i = 0; i < locals_.size(); i++) {
- if (locals_.at(i).param_) {
- e->params_.push_back(locals_.at(i).type_);
- var_index[i] = param++;
- } else if (locals_.at(i).type_ == kAstI32) {
- var_index[i] = i32++;
- } else if (locals_.at(i).type_ == kAstI64) {
- var_index[i] = i64++;
- } else if (locals_.at(i).type_ == kAstF32) {
- var_index[i] = f32++;
- } else if (locals_.at(i).type_ == kAstF64) {
- var_index[i] = f64++;
+void WasmFunctionBuilder::WriteExport(ZoneBuffer& buffer,
+ uint32_t func_index) const {
+ if (exported_) {
+ buffer.write_u32v(func_index);
+ buffer.write_size(name_.size());
+ if (name_.size() > 0) {
+ buffer.write(reinterpret_cast<const byte*>(&name_[0]), name_.size());
}
}
}
-
-WasmFunctionEncoder::WasmFunctionEncoder(Zone* zone, LocalType return_type,
- bool exported, bool external)
- : params_(zone),
- exported_(exported),
- external_(external),
- body_(zone),
- name_(zone) {}
-
-
-uint32_t WasmFunctionEncoder::HeaderSize() const {
- uint32_t size = 3;
- if (!external_) size += 2;
- if (HasName()) {
- uint32_t name_size = NameSize();
- size += static_cast<uint32_t>(SizeOfVarInt(name_size)) + name_size;
- }
- return size;
-}
-
-
-uint32_t WasmFunctionEncoder::BodySize(void) const {
- // TODO(titzer): embed a LocalDeclEncoder in the WasmFunctionEncoder
- LocalDeclEncoder local_decl;
- local_decl.AddLocals(local_i32_count_, kAstI32);
- local_decl.AddLocals(local_i64_count_, kAstI64);
- local_decl.AddLocals(local_f32_count_, kAstF32);
- local_decl.AddLocals(local_f64_count_, kAstF64);
-
- return external_ ? 0
- : static_cast<uint32_t>(body_.size() + local_decl.Size());
-}
-
-
-uint32_t WasmFunctionEncoder::NameSize() const {
- return HasName() ? static_cast<uint32_t>(name_.size()) : 0;
-}
-
-
-void WasmFunctionEncoder::Serialize(byte* buffer, byte** header,
- byte** body) const {
- uint8_t decl_bits = (exported_ ? kDeclFunctionExport : 0) |
- (external_ ? kDeclFunctionImport : 0) |
- (HasName() ? kDeclFunctionName : 0);
-
- EmitUint8(header, decl_bits);
- EmitUint16(header, signature_index_);
-
- if (HasName()) {
- EmitVarInt(header, NameSize());
- for (size_t i = 0; i < name_.size(); ++i) {
- EmitUint8(header, name_[i]);
- }
- }
-
-
- if (!external_) {
- // TODO(titzer): embed a LocalDeclEncoder in the WasmFunctionEncoder
- LocalDeclEncoder local_decl;
- local_decl.AddLocals(local_i32_count_, kAstI32);
- local_decl.AddLocals(local_i64_count_, kAstI64);
- local_decl.AddLocals(local_f32_count_, kAstF32);
- local_decl.AddLocals(local_f64_count_, kAstF64);
-
- EmitUint16(header, static_cast<uint16_t>(body_.size() + local_decl.Size()));
- (*header) += local_decl.Emit(*header);
- if (body_.size() > 0) {
- std::memcpy(*header, &body_[0], body_.size());
- (*header) += body_.size();
- }
+void WasmFunctionBuilder::WriteBody(ZoneBuffer& buffer) const {
+ size_t locals_size = locals_.Size();
+ buffer.write_size(locals_size + body_.size());
+ buffer.EnsureSpace(locals_size);
+ byte** ptr = buffer.pos_ptr();
+ locals_.Emit(*ptr);
+ (*ptr) += locals_size; // UGLY: manual bump of position pointer
+ if (body_.size() > 0) {
+ buffer.write(&body_[0], body_.size());
}
}
-
WasmDataSegmentEncoder::WasmDataSegmentEncoder(Zone* zone, const byte* data,
uint32_t size, uint32_t dest)
: data_(zone), dest_(dest) {
- for (size_t i = 0; i < size; i++) {
+ for (size_t i = 0; i < size; ++i) {
data_.push_back(data[i]);
}
}
-
-uint32_t WasmDataSegmentEncoder::HeaderSize() const {
- static const int kDataSegmentSize = 13;
- return kDataSegmentSize;
-}
-
-
-uint32_t WasmDataSegmentEncoder::BodySize() const {
- return static_cast<uint32_t>(data_.size());
-}
-
-
-void WasmDataSegmentEncoder::Serialize(byte* buffer, byte** header,
- byte** body) const {
- EmitVarInt(header, dest_);
- EmitVarInt(header, static_cast<uint32_t>(data_.size()));
-
- std::memcpy(*header, &data_[0], data_.size());
- (*header) += data_.size();
+void WasmDataSegmentEncoder::Write(ZoneBuffer& buffer) const {
+ buffer.write_u32v(dest_);
+ buffer.write_u32v(static_cast<uint32_t>(data_.size()));
+ buffer.write(&data_[0], data_.size());
}
WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
: zone_(zone),
signatures_(zone),
+ imports_(zone),
functions_(zone),
data_segments_(zone),
indirect_functions_(zone),
@@ -436,12 +187,11 @@ WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
signature_map_(zone),
start_function_index_(-1) {}
-uint16_t WasmModuleBuilder::AddFunction() {
- functions_.push_back(new (zone_) WasmFunctionBuilder(zone_));
- return static_cast<uint16_t>(functions_.size() - 1);
+uint32_t WasmModuleBuilder::AddFunction() {
+ functions_.push_back(new (zone_) WasmFunctionBuilder(this));
+ return static_cast<uint32_t>(functions_.size() - 1);
}
-
WasmFunctionBuilder* WasmModuleBuilder::FunctionAt(size_t index) {
if (functions_.size() > index) {
return functions_.at(index);
@@ -450,12 +200,10 @@ WasmFunctionBuilder* WasmModuleBuilder::FunctionAt(size_t index) {
}
}
-
void WasmModuleBuilder::AddDataSegment(WasmDataSegmentEncoder* data) {
data_segments_.push_back(data);
}
-
bool WasmModuleBuilder::CompareFunctionSigs::operator()(FunctionSig* a,
FunctionSig* b) const {
if (a->return_count() < b->return_count()) return true;
@@ -473,266 +221,161 @@ bool WasmModuleBuilder::CompareFunctionSigs::operator()(FunctionSig* a,
return false;
}
-
-uint16_t WasmModuleBuilder::AddSignature(FunctionSig* sig) {
+uint32_t WasmModuleBuilder::AddSignature(FunctionSig* sig) {
SignatureMap::iterator pos = signature_map_.find(sig);
if (pos != signature_map_.end()) {
return pos->second;
} else {
- uint16_t index = static_cast<uint16_t>(signatures_.size());
+ uint32_t index = static_cast<uint32_t>(signatures_.size());
signature_map_[sig] = index;
signatures_.push_back(sig);
return index;
}
}
-
-void WasmModuleBuilder::AddIndirectFunction(uint16_t index) {
+void WasmModuleBuilder::AddIndirectFunction(uint32_t index) {
indirect_functions_.push_back(index);
}
-void WasmModuleBuilder::MarkStartFunction(uint16_t index) {
- start_function_index_ = index;
+uint32_t WasmModuleBuilder::AddImport(const char* name, int name_length,
+ FunctionSig* sig) {
+ imports_.push_back({AddSignature(sig), name, name_length});
+ return static_cast<uint32_t>(imports_.size() - 1);
}
-WasmModuleWriter* WasmModuleBuilder::Build(Zone* zone) {
- WasmModuleWriter* writer = new (zone) WasmModuleWriter(zone);
- for (auto function : functions_) {
- writer->functions_.push_back(function->Build(zone, this));
- }
- for (auto segment : data_segments_) {
- writer->data_segments_.push_back(segment);
- }
- for (auto sig : signatures_) {
- writer->signatures_.push_back(sig);
- }
- for (auto index : indirect_functions_) {
- writer->indirect_functions_.push_back(index);
- }
- for (auto global : globals_) {
- writer->globals_.push_back(global);
- }
- writer->start_function_index_ = start_function_index_;
- return writer;
+void WasmModuleBuilder::MarkStartFunction(uint32_t index) {
+ start_function_index_ = index;
}
-
-uint32_t WasmModuleBuilder::AddGlobal(MachineType type, bool exported) {
+uint32_t WasmModuleBuilder::AddGlobal(LocalType type, bool exported) {
globals_.push_back(std::make_pair(type, exported));
return static_cast<uint32_t>(globals_.size() - 1);
}
+void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
+ uint32_t exports = 0;
-WasmModuleWriter::WasmModuleWriter(Zone* zone)
- : functions_(zone),
- data_segments_(zone),
- signatures_(zone),
- indirect_functions_(zone),
- globals_(zone) {}
-
-struct Sizes {
- size_t header_size;
- size_t body_size;
-
- size_t total() { return header_size + body_size; }
+ // == Emit magic =============================================================
+ TRACE("emit magic\n");
+ buffer.write_u32(kWasmMagic);
+ buffer.write_u32(kWasmVersion);
- void Add(size_t header, size_t body) {
- header_size += header;
- body_size += body;
- }
+ // == Emit signatures ========================================================
+ if (signatures_.size() > 0) {
+ size_t start = EmitSection(WasmSection::Code::Signatures, buffer);
+ buffer.write_size(signatures_.size());
- void AddSection(WasmSection::Code code, size_t other_size) {
- Add(padded_varint + SizeOfVarInt(WasmSection::getNameLength(code)) +
- WasmSection::getNameLength(code),
- 0);
- if (other_size) Add(SizeOfVarInt(other_size), 0);
+ for (FunctionSig* sig : signatures_) {
+ buffer.write_u8(kWasmFunctionTypeForm);
+ buffer.write_size(sig->parameter_count());
+ for (size_t j = 0; j < sig->parameter_count(); j++) {
+ buffer.write_u8(WasmOpcodes::LocalTypeCodeFor(sig->GetParam(j)));
+ }
+ buffer.write_size(sig->return_count());
+ for (size_t j = 0; j < sig->return_count(); j++) {
+ buffer.write_u8(WasmOpcodes::LocalTypeCodeFor(sig->GetReturn(j)));
+ }
+ }
+ FixupSection(buffer, start);
}
-};
-
-WasmModuleIndex* WasmModuleWriter::WriteTo(Zone* zone) const {
- Sizes sizes = {0, 0};
-
- sizes.Add(2 * sizeof(uint32_t), 0); // header
-
- sizes.AddSection(WasmSection::Code::Memory, 0);
- sizes.Add(kDeclMemorySize, 0);
- TRACE("Size after memory: %u, %u\n", (unsigned)sizes.header_size,
- (unsigned)sizes.body_size);
+ // == Emit globals ===========================================================
if (globals_.size() > 0) {
- sizes.AddSection(WasmSection::Code::Globals, globals_.size());
- /* These globals never have names, so are always 3 bytes. */
- sizes.Add(3 * globals_.size(), 0);
- TRACE("Size after globals: %u, %u\n", (unsigned)sizes.header_size,
- (unsigned)sizes.body_size);
- }
+ size_t start = EmitSection(WasmSection::Code::Globals, buffer);
+ buffer.write_size(globals_.size());
- if (signatures_.size() > 0) {
- sizes.AddSection(WasmSection::Code::Signatures, signatures_.size());
- for (auto sig : signatures_) {
- sizes.Add(
- 1 + SizeOfVarInt(sig->parameter_count()) + sig->parameter_count(), 0);
+ for (auto global : globals_) {
+ buffer.write_u32v(0); // Length of the global name.
+ buffer.write_u8(WasmOpcodes::LocalTypeCodeFor(global.first));
+ buffer.write_u8(global.second);
+ }
+ FixupSection(buffer, start);
+ }
+
+ // == Emit imports ===========================================================
+ if (imports_.size() > 0) {
+ size_t start = EmitSection(WasmSection::Code::ImportTable, buffer);
+ buffer.write_size(imports_.size());
+ for (auto import : imports_) {
+ buffer.write_u32v(import.sig_index);
+ buffer.write_u32v(import.name_length);
+ buffer.write(reinterpret_cast<const byte*>(import.name),
+ import.name_length);
+ buffer.write_u32v(0);
}
- TRACE("Size after signatures: %u, %u\n", (unsigned)sizes.header_size,
- (unsigned)sizes.body_size);
+ FixupSection(buffer, start);
}
+ // == Emit function signatures ===============================================
if (functions_.size() > 0) {
- sizes.AddSection(WasmSection::Code::Functions, functions_.size());
+ size_t start = EmitSection(WasmSection::Code::FunctionSignatures, buffer);
+ buffer.write_size(functions_.size());
for (auto function : functions_) {
- sizes.Add(function->HeaderSize() + function->BodySize(),
- function->NameSize());
+ function->WriteSignature(buffer);
+ if (function->exported()) exports++;
}
- TRACE("Size after functions: %u, %u\n", (unsigned)sizes.header_size,
- (unsigned)sizes.body_size);
- }
-
- if (start_function_index_ >= 0) {
- sizes.AddSection(WasmSection::Code::StartFunction, 0);
- sizes.Add(SizeOfVarInt(start_function_index_), 0);
- TRACE("Size after start: %u, %u\n", (unsigned)sizes.header_size,
- (unsigned)sizes.body_size);
- }
-
- if (data_segments_.size() > 0) {
- sizes.AddSection(WasmSection::Code::DataSegments, data_segments_.size());
- for (auto segment : data_segments_) {
- sizes.Add(segment->HeaderSize(), segment->BodySize());
- }
- TRACE("Size after data segments: %u, %u\n", (unsigned)sizes.header_size,
- (unsigned)sizes.body_size);
+ FixupSection(buffer, start);
}
+ // == emit function table ====================================================
if (indirect_functions_.size() > 0) {
- sizes.AddSection(WasmSection::Code::FunctionTable,
- indirect_functions_.size());
- for (auto function_index : indirect_functions_) {
- sizes.Add(SizeOfVarInt(function_index), 0);
- }
- TRACE("Size after indirect functions: %u, %u\n",
- (unsigned)sizes.header_size, (unsigned)sizes.body_size);
- }
+ size_t start = EmitSection(WasmSection::Code::FunctionTable, buffer);
+ buffer.write_size(indirect_functions_.size());
- if (sizes.body_size > 0) {
- sizes.AddSection(WasmSection::Code::End, 0);
- TRACE("Size after end: %u, %u\n", (unsigned)sizes.header_size,
- (unsigned)sizes.body_size);
+ for (auto index : indirect_functions_) {
+ buffer.write_u32v(index);
+ }
+ FixupSection(buffer, start);
}
- ZoneVector<uint8_t> buffer_vector(sizes.total(), zone);
- byte* buffer = &buffer_vector[0];
- byte* header = buffer;
- byte* body = buffer + sizes.header_size;
-
- // -- emit magic -------------------------------------------------------------
- TRACE("emit magic\n");
- EmitUint32(&header, kWasmMagic);
- EmitUint32(&header, kWasmVersion);
-
- // -- emit memory declaration ------------------------------------------------
+ // == emit memory declaration ================================================
{
- byte* section = EmitSection(WasmSection::Code::Memory, &header);
- EmitVarInt(&header, 16); // min memory size
- EmitVarInt(&header, 16); // max memory size
- EmitUint8(&header, 0); // memory export
+ size_t start = EmitSection(WasmSection::Code::Memory, buffer);
+ buffer.write_u32v(16); // min memory size
+ buffer.write_u32v(16); // max memory size
+ buffer.write_u8(0); // memory export
static_assert(kDeclMemorySize == 3, "memory size must match emit above");
- FixupSection(section, header);
+ FixupSection(buffer, start);
}
- // -- emit globals -----------------------------------------------------------
- if (globals_.size() > 0) {
- byte* section = EmitSection(WasmSection::Code::Globals, &header);
- EmitVarInt(&header, globals_.size());
-
- for (auto global : globals_) {
- EmitVarInt(&header, 0); // Length of the global name.
- EmitUint8(&header, WasmOpcodes::MemTypeCodeFor(global.first));
- EmitUint8(&header, global.second);
+ // == emit exports ===========================================================
+ if (exports > 0) {
+ size_t start = EmitSection(WasmSection::Code::ExportTable, buffer);
+ buffer.write_u32v(exports);
+ uint32_t index = 0;
+ for (auto function : functions_) {
+ function->WriteExport(buffer, index++);
}
- FixupSection(section, header);
+ FixupSection(buffer, start);
}
- // -- emit signatures --------------------------------------------------------
- if (signatures_.size() > 0) {
- byte* section = EmitSection(WasmSection::Code::Signatures, &header);
- EmitVarInt(&header, signatures_.size());
-
- for (FunctionSig* sig : signatures_) {
- EmitVarInt(&header, sig->parameter_count());
- if (sig->return_count() > 0) {
- EmitUint8(&header, WasmOpcodes::LocalTypeCodeFor(sig->GetReturn()));
- } else {
- EmitUint8(&header, kLocalVoid);
- }
- for (size_t j = 0; j < sig->parameter_count(); j++) {
- EmitUint8(&header, WasmOpcodes::LocalTypeCodeFor(sig->GetParam(j)));
- }
- }
- FixupSection(section, header);
+ // == emit start function index ==============================================
+ if (start_function_index_ >= 0) {
+ size_t start = EmitSection(WasmSection::Code::StartFunction, buffer);
+ buffer.write_u32v(start_function_index_);
+ FixupSection(buffer, start);
}
- // -- emit functions ---------------------------------------------------------
+ // == emit code ==============================================================
if (functions_.size() > 0) {
- byte* section = EmitSection(WasmSection::Code::Functions, &header);
- EmitVarInt(&header, functions_.size());
-
- for (auto func : functions_) {
- func->Serialize(buffer, &header, &body);
+ size_t start = EmitSection(WasmSection::Code::FunctionBodies, buffer);
+ buffer.write_size(functions_.size());
+ for (auto function : functions_) {
+ function->WriteBody(buffer);
}
- FixupSection(section, header);
- }
-
- // -- emit start function index ----------------------------------------------
- if (start_function_index_ >= 0) {
- byte* section = EmitSection(WasmSection::Code::StartFunction, &header);
- EmitVarInt(&header, start_function_index_);
- FixupSection(section, header);
+ FixupSection(buffer, start);
}
- // -- emit data segments -----------------------------------------------------
+ // == emit data segments =====================================================
if (data_segments_.size() > 0) {
- byte* section = EmitSection(WasmSection::Code::DataSegments, &header);
- EmitVarInt(&header, data_segments_.size());
+ size_t start = EmitSection(WasmSection::Code::DataSegments, buffer);
+ buffer.write_size(data_segments_.size());
for (auto segment : data_segments_) {
- segment->Serialize(buffer, &header, &body);
- }
- FixupSection(section, header);
- }
-
- // -- emit function table ----------------------------------------------------
- if (indirect_functions_.size() > 0) {
- byte* section = EmitSection(WasmSection::Code::FunctionTable, &header);
- EmitVarInt(&header, indirect_functions_.size());
-
- for (auto index : indirect_functions_) {
- EmitVarInt(&header, index);
+ segment->Write(buffer);
}
- FixupSection(section, header);
- }
-
- if (sizes.body_size > 0) {
- byte* section = EmitSection(WasmSection::Code::End, &header);
- FixupSection(section, header);
+ FixupSection(buffer, start);
}
-
- return new (zone) WasmModuleIndex(buffer, buffer + sizes.total());
-}
-
-
-std::vector<uint8_t> UnsignedLEB128From(uint32_t result) {
- std::vector<uint8_t> output;
- uint8_t next = 0;
- int shift = 0;
- do {
- next = static_cast<uint8_t>(result >> shift);
- if (((result >> shift) & 0xFFFFFF80) != 0) {
- next = next | 0x80;
- }
- output.push_back(next);
- shift += 7;
- } while ((next & 0x80) != 0);
- return output;
}
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/encoder.h b/deps/v8/src/wasm/encoder.h
index 49a7bf7d05..eb8aa64abd 100644
--- a/deps/v8/src/wasm/encoder.h
+++ b/deps/v8/src/wasm/encoder.h
@@ -8,8 +8,8 @@
#include "src/signature.h"
#include "src/zone-containers.h"
-#include "src/base/smart-pointers.h"
-
+#include "src/wasm/leb-helper.h"
+#include "src/wasm/wasm-macro-gen.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"
@@ -18,137 +18,186 @@ namespace v8 {
namespace internal {
namespace wasm {
-class WasmModuleBuilder;
-
-class WasmFunctionEncoder : public ZoneObject {
+class ZoneBuffer : public ZoneObject {
public:
- uint32_t HeaderSize() const;
- uint32_t BodySize() const;
- uint32_t NameSize() const;
- void Serialize(byte* buffer, byte** header, byte** body) const;
+ static const uint32_t kInitialSize = 4096;
+ explicit ZoneBuffer(Zone* zone, size_t initial = kInitialSize)
+ : zone_(zone), buffer_(reinterpret_cast<byte*>(zone->New(initial))) {
+ pos_ = buffer_;
+ end_ = buffer_ + initial;
+ }
+
+ void write_u8(uint8_t x) {
+ EnsureSpace(1);
+ *(pos_++) = x;
+ }
+
+ void write_u16(uint16_t x) {
+ EnsureSpace(2);
+ WriteLittleEndianValue<uint16_t>(pos_, x);
+ pos_ += 2;
+ }
+
+ void write_u32(uint32_t x) {
+ EnsureSpace(4);
+ WriteLittleEndianValue<uint32_t>(pos_, x);
+ pos_ += 4;
+ }
+
+ void write_u32v(uint32_t val) {
+ EnsureSpace(kMaxVarInt32Size);
+ LEBHelper::write_u32v(&pos_, val);
+ }
+
+ void write_size(size_t val) {
+ EnsureSpace(kMaxVarInt32Size);
+ DCHECK_EQ(val, static_cast<uint32_t>(val));
+ LEBHelper::write_u32v(&pos_, static_cast<uint32_t>(val));
+ }
+
+ void write(const byte* data, size_t size) {
+ EnsureSpace(size);
+ memcpy(pos_, data, size);
+ pos_ += size;
+ }
+
+ size_t reserve_u32v() {
+ size_t off = offset();
+ EnsureSpace(kMaxVarInt32Size);
+ pos_ += kMaxVarInt32Size;
+ return off;
+ }
+
+ // Patch a (padded) u32v at the given offset to be the given value.
+ void patch_u32v(size_t offset, uint32_t val) {
+ byte* ptr = buffer_ + offset;
+ for (size_t pos = 0; pos != kPaddedVarInt32Size; ++pos) {
+ uint32_t next = val >> 7;
+ byte out = static_cast<byte>(val & 0x7f);
+ if (pos != kPaddedVarInt32Size - 1) {
+ *(ptr++) = 0x80 | out;
+ val = next;
+ } else {
+ *(ptr++) = out;
+ }
+ }
+ }
+
+ size_t offset() { return static_cast<size_t>(pos_ - buffer_); }
+ size_t size() { return static_cast<size_t>(pos_ - buffer_); }
+ const byte* begin() { return buffer_; }
+ const byte* end() { return pos_; }
+
+ void EnsureSpace(size_t size) {
+ if ((pos_ + size) > end_) {
+ size_t new_size = 4096 + (end_ - buffer_) * 3;
+ byte* new_buffer = reinterpret_cast<byte*>(zone_->New(new_size));
+ memcpy(new_buffer, buffer_, (pos_ - buffer_));
+ pos_ = new_buffer + (pos_ - buffer_);
+ buffer_ = new_buffer;
+ end_ = new_buffer + new_size;
+ }
+ }
+
+ byte** pos_ptr() { return &pos_; }
private:
- WasmFunctionEncoder(Zone* zone, LocalType return_type, bool exported,
- bool external);
- friend class WasmFunctionBuilder;
- uint16_t signature_index_;
- ZoneVector<LocalType> params_;
- uint16_t local_i32_count_;
- uint16_t local_i64_count_;
- uint16_t local_f32_count_;
- uint16_t local_f64_count_;
- bool exported_;
- bool external_;
- ZoneVector<uint8_t> body_;
- ZoneVector<char> name_;
-
- bool HasName() const { return (exported_ || external_) && name_.size() > 0; }
+ Zone* zone_;
+ byte* buffer_;
+ byte* pos_;
+ byte* end_;
};
+class WasmModuleBuilder;
+
class WasmFunctionBuilder : public ZoneObject {
public:
- uint16_t AddParam(LocalType type);
- uint16_t AddLocal(LocalType type);
- void ReturnType(LocalType type);
+ // Building methods.
+ void SetSignature(FunctionSig* sig);
+ uint32_t AddLocal(LocalType type);
+ void EmitVarInt(uint32_t val);
void EmitCode(const byte* code, uint32_t code_size);
- void EmitCode(const byte* code, uint32_t code_size,
- const uint32_t* local_indices, uint32_t indices_size);
void Emit(WasmOpcode opcode);
+ void EmitGetLocal(uint32_t index);
+ void EmitSetLocal(uint32_t index);
+ void EmitI32Const(int32_t val);
void EmitWithU8(WasmOpcode opcode, const byte immediate);
void EmitWithU8U8(WasmOpcode opcode, const byte imm1, const byte imm2);
void EmitWithVarInt(WasmOpcode opcode, uint32_t immediate);
- uint32_t EmitEditableVarIntImmediate();
- void EditVarIntImmediate(uint32_t offset, const uint32_t immediate);
- void Exported(uint8_t flag);
- void External(uint8_t flag);
- void SetName(const unsigned char* name, int name_length);
- WasmFunctionEncoder* Build(Zone* zone, WasmModuleBuilder* mb) const;
+ void SetExported();
+ void SetName(const char* name, int name_length);
+ bool exported() { return exported_; }
+
+ // Writing methods.
+ void WriteSignature(ZoneBuffer& buffer) const;
+ void WriteExport(ZoneBuffer& buffer, uint32_t func_index) const;
+ void WriteBody(ZoneBuffer& buffer) const;
private:
- explicit WasmFunctionBuilder(Zone* zone);
+ explicit WasmFunctionBuilder(WasmModuleBuilder* builder);
friend class WasmModuleBuilder;
- LocalType return_type_;
- struct Type;
- ZoneVector<Type> locals_;
- uint8_t exported_;
- uint8_t external_;
+ WasmModuleBuilder* builder_;
+ LocalDeclEncoder locals_;
+ uint32_t signature_index_;
+ bool exported_;
ZoneVector<uint8_t> body_;
- ZoneVector<uint32_t> local_indices_;
ZoneVector<char> name_;
- uint16_t AddVar(LocalType type, bool param);
- void IndexVars(WasmFunctionEncoder* e, uint16_t* var_index) const;
};
+// TODO(titzer): kill!
class WasmDataSegmentEncoder : public ZoneObject {
public:
WasmDataSegmentEncoder(Zone* zone, const byte* data, uint32_t size,
uint32_t dest);
- uint32_t HeaderSize() const;
- uint32_t BodySize() const;
- void Serialize(byte* buffer, byte** header, byte** body) const;
+ void Write(ZoneBuffer& buffer) const;
private:
ZoneVector<byte> data_;
uint32_t dest_;
};
-class WasmModuleIndex : public ZoneObject {
- public:
- const byte* Begin() const { return begin_; }
- const byte* End() const { return end_; }
-
- private:
- friend class WasmModuleWriter;
- WasmModuleIndex(const byte* begin, const byte* end)
- : begin_(begin), end_(end) {}
- const byte* begin_;
- const byte* end_;
-};
-
-class WasmModuleWriter : public ZoneObject {
- public:
- WasmModuleIndex* WriteTo(Zone* zone) const;
-
- private:
- friend class WasmModuleBuilder;
- explicit WasmModuleWriter(Zone* zone);
- ZoneVector<WasmFunctionEncoder*> functions_;
- ZoneVector<WasmDataSegmentEncoder*> data_segments_;
- ZoneVector<FunctionSig*> signatures_;
- ZoneVector<uint16_t> indirect_functions_;
- ZoneVector<std::pair<MachineType, bool>> globals_;
- int start_function_index_;
+struct WasmFunctionImport {
+ uint32_t sig_index;
+ const char* name;
+ int name_length;
};
class WasmModuleBuilder : public ZoneObject {
public:
explicit WasmModuleBuilder(Zone* zone);
- uint16_t AddFunction();
- uint32_t AddGlobal(MachineType type, bool exported);
+
+ // Building methods.
+ uint32_t AddFunction();
+ uint32_t AddGlobal(LocalType type, bool exported);
WasmFunctionBuilder* FunctionAt(size_t index);
void AddDataSegment(WasmDataSegmentEncoder* data);
- uint16_t AddSignature(FunctionSig* sig);
- void AddIndirectFunction(uint16_t index);
- void MarkStartFunction(uint16_t index);
- WasmModuleWriter* Build(Zone* zone);
+ uint32_t AddSignature(FunctionSig* sig);
+ void AddIndirectFunction(uint32_t index);
+ void MarkStartFunction(uint32_t index);
+ uint32_t AddImport(const char* name, int name_length, FunctionSig* sig);
+
+ // Writing methods.
+ void WriteTo(ZoneBuffer& buffer) const;
struct CompareFunctionSigs {
bool operator()(FunctionSig* a, FunctionSig* b) const;
};
- typedef ZoneMap<FunctionSig*, uint16_t, CompareFunctionSigs> SignatureMap;
+ typedef ZoneMap<FunctionSig*, uint32_t, CompareFunctionSigs> SignatureMap;
+
+ Zone* zone() { return zone_; }
private:
Zone* zone_;
ZoneVector<FunctionSig*> signatures_;
+ ZoneVector<WasmFunctionImport> imports_;
ZoneVector<WasmFunctionBuilder*> functions_;
ZoneVector<WasmDataSegmentEncoder*> data_segments_;
- ZoneVector<uint16_t> indirect_functions_;
- ZoneVector<std::pair<MachineType, bool>> globals_;
+ ZoneVector<uint32_t> indirect_functions_;
+ ZoneVector<std::pair<LocalType, bool>> globals_;
SignatureMap signature_map_;
int start_function_index_;
};
-std::vector<uint8_t> UnsignedLEB128From(uint32_t result);
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/leb-helper.h b/deps/v8/src/wasm/leb-helper.h
new file mode 100644
index 0000000000..0e4ba3418c
--- /dev/null
+++ b/deps/v8/src/wasm/leb-helper.h
@@ -0,0 +1,134 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_LEB_HELPER_H_
+#define V8_WASM_LEB_HELPER_H_
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+static const size_t kPaddedVarInt32Size = 5;
+static const size_t kMaxVarInt32Size = 5;
+
+class LEBHelper {
+ public:
+ // Write a 32-bit unsigned LEB to {dest}, updating {dest} to point after
+ // the last uint8_t written. No safety checks.
+ static void write_u32v(uint8_t** dest, uint32_t val) {
+ while (val >= 0x80) {
+ *((*dest)++) = static_cast<uint8_t>(0x80 | (val & 0x7F));
+ val >>= 7;
+ }
+ *((*dest)++) = static_cast<uint8_t>(val & 0x7F);
+ }
+
+ // Write a 32-bit signed LEB to {dest}, updating {dest} to point after
+ // the last uint8_t written. No safety checks.
+ static void write_i32v(uint8_t** dest, int32_t val) {
+ if (val >= 0) {
+ while (val >= 0x40) { // prevent sign extension.
+ *((*dest)++) = static_cast<uint8_t>(0x80 | (val & 0x7F));
+ val >>= 7;
+ }
+ *((*dest)++) = static_cast<uint8_t>(val & 0xFF);
+ } else {
+ while ((val >> 6) != -1) {
+ *((*dest)++) = static_cast<uint8_t>(0x80 | (val & 0x7F));
+ val >>= 7;
+ }
+ *((*dest)++) = static_cast<uint8_t>(val & 0x7F);
+ }
+ }
+
+ // Write a 64-bit unsigned LEB to {dest}, updating {dest} to point after
+ // the last uint8_t written. No safety checks.
+ static void write_u64v(uint8_t** dest, uint64_t val) {
+ while (val >= 0x80) {
+ *((*dest)++) = static_cast<uint8_t>(0x80 | (val & 0x7F));
+ val >>= 7;
+ }
+ *((*dest)++) = static_cast<uint8_t>(val & 0x7F);
+ }
+
+ // Write a 64-bit signed LEB to {dest}, updating {dest} to point after
+ // the last uint8_t written. No safety checks.
+ static void write_i64v(uint8_t** dest, int64_t val) {
+ if (val >= 0) {
+ while (val >= 0x40) { // prevent sign extension.
+ *((*dest)++) = static_cast<uint8_t>(0x80 | (val & 0x7F));
+ val >>= 7;
+ }
+ *((*dest)++) = static_cast<uint8_t>(val & 0xFF);
+ } else {
+ while ((val >> 6) != -1) {
+ *((*dest)++) = static_cast<uint8_t>(0x80 | (val & 0x7F));
+ val >>= 7;
+ }
+ *((*dest)++) = static_cast<uint8_t>(val & 0x7F);
+ }
+ }
+
+ // TODO(titzer): move core logic for decoding LEBs from decoder.h to here.
+
+ // Compute the size of {val} if emitted as an LEB32.
+ static inline size_t sizeof_u32v(size_t val) {
+ size_t size = 0;
+ do {
+ size++;
+ val = val >> 7;
+ } while (val > 0);
+ return size;
+ }
+
+ // Compute the size of {val} if emitted as an LEB32.
+ static inline size_t sizeof_i32v(int32_t val) {
+ size_t size = 1;
+ if (val >= 0) {
+ while (val >= 0x40) { // prevent sign extension.
+ size++;
+ val >>= 7;
+ }
+ } else {
+ while ((val >> 6) != -1) {
+ size++;
+ val >>= 7;
+ }
+ }
+ return size;
+ }
+
+ // Compute the size of {val} if emitted as an unsigned LEB64.
+ static inline size_t sizeof_u64v(uint64_t val) {
+ size_t size = 0;
+ do {
+ size++;
+ val = val >> 7;
+ } while (val > 0);
+ return size;
+ }
+
+ // Compute the size of {val} if emitted as a signed LEB64.
+ static inline size_t sizeof_i64v(int64_t val) {
+ size_t size = 1;
+ if (val >= 0) {
+ while (val >= 0x40) { // prevent sign extension.
+ size++;
+ val >>= 7;
+ }
+ } else {
+ while ((val >> 6) != -1) {
+ size++;
+ val >>= 7;
+ }
+ }
+ return size;
+ }
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_LEB_HELPER_H_
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 3e85a1b53c..542c47ca15 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -25,6 +25,7 @@ namespace wasm {
#define TRACE(...)
#endif
+namespace {
// The main logic for decoding the bytes of a module.
class ModuleDecoder : public Decoder {
@@ -43,7 +44,7 @@ class ModuleDecoder : public Decoder {
pc_ = limit_; // On error, terminate section decoding loop.
}
- static void DumpModule(WasmModule* module, ModuleResult result) {
+ static void DumpModule(WasmModule* module, const ModuleResult& result) {
std::string path;
if (FLAG_dump_wasm_module_path) {
path = FLAG_dump_wasm_module_path;
@@ -79,9 +80,8 @@ class ModuleDecoder : public Decoder {
module->mem_external = false;
module->origin = origin_;
- bool sections[(size_t)WasmSection::Code::Max] = {false};
-
const byte* pos = pc_;
+ int current_order = 0;
uint32_t magic_word = consume_u32("wasm magic");
#define BYTES(x) (x & 0xff), (x >> 8) & 0xff, (x >> 16) & 0xff, (x >> 24) & 0xff
if (magic_word != kWasmMagic) {
@@ -109,91 +109,85 @@ class ModuleDecoder : public Decoder {
TRACE("DecodeSection\n");
pos = pc_;
- int length;
- uint32_t section_length = consume_u32v(&length, "section size");
-
- int section_string_leb_length = 0;
- uint32_t section_string_length = 0;
- WasmSection::Code section = consume_section_name(
- &section_string_leb_length, &section_string_length);
- uint32_t string_and_leb_length =
- section_string_leb_length + section_string_length;
- if (string_and_leb_length > section_length) {
- error(pos, pos,
- "section string of size %u longer than total section bytes %u",
- string_and_leb_length, section_length);
+ // Read the section name.
+ uint32_t string_length = consume_u32v("section name length");
+ const byte* section_name_start = pc_;
+ consume_bytes(string_length);
+ if (failed()) {
+ TRACE("Section name of length %u couldn't be read\n", string_length);
break;
}
- if (section == WasmSection::Code::Max) {
- // Skip unknown section.
- uint32_t skip = section_length - string_and_leb_length;
- TRACE("skipping %u bytes from unknown section\n", skip);
- consume_bytes(skip);
- continue;
+ TRACE(" +%d section name : \"%.*s\"\n",
+ static_cast<int>(section_name_start - start_),
+ string_length < 20 ? string_length : 20, section_name_start);
+
+ WasmSection::Code section =
+ WasmSection::lookup(section_name_start, string_length);
+
+ // Read and check the section size.
+ uint32_t section_length = consume_u32v("section length");
+ if (!checkAvailable(section_length)) {
+ // The section would extend beyond the end of the module.
+ break;
}
+ const byte* section_start = pc_;
+ const byte* expected_section_end = pc_ + section_length;
- // Each section should appear at most once.
- CheckForPreviousSection(sections, section, false);
- sections[(size_t)section] = true;
+ current_order = CheckSectionOrder(current_order, section);
switch (section) {
case WasmSection::Code::End:
// Terminate section decoding.
limit_ = pc_;
break;
- case WasmSection::Code::Memory:
- int length;
- module->min_mem_pages = consume_u32v(&length, "min memory");
- module->max_mem_pages = consume_u32v(&length, "max memory");
+ case WasmSection::Code::Memory: {
+ module->min_mem_pages = consume_u32v("min memory");
+ module->max_mem_pages = consume_u32v("max memory");
module->mem_export = consume_u8("export memory") != 0;
break;
+ }
case WasmSection::Code::Signatures: {
- int length;
- uint32_t signatures_count = consume_u32v(&length, "signatures count");
+ uint32_t signatures_count = consume_u32v("signatures count");
module->signatures.reserve(SafeReserve(signatures_count));
// Decode signatures.
- for (uint32_t i = 0; i < signatures_count; i++) {
+ for (uint32_t i = 0; i < signatures_count; ++i) {
if (failed()) break;
TRACE("DecodeSignature[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
- FunctionSig* s = consume_sig(); // read function sig.
+ FunctionSig* s = consume_sig();
module->signatures.push_back(s);
}
break;
}
case WasmSection::Code::FunctionSignatures: {
- // Functions require a signature table first.
- CheckForPreviousSection(sections, WasmSection::Code::Signatures,
- true);
- int length;
- uint32_t functions_count = consume_u32v(&length, "functions count");
+ uint32_t functions_count = consume_u32v("functions count");
module->functions.reserve(SafeReserve(functions_count));
- for (uint32_t i = 0; i < functions_count; i++) {
- module->functions.push_back(
- {nullptr, i, 0, 0, 0, 0, 0, 0, false, false});
+ for (uint32_t i = 0; i < functions_count; ++i) {
+ module->functions.push_back({nullptr, // sig
+ i, // func_index
+ 0, // sig_index
+ 0, // name_offset
+ 0, // name_length
+ 0, // code_start_offset
+ 0}); // code_end_offset
WasmFunction* function = &module->functions.back();
function->sig_index = consume_sig_index(module, &function->sig);
}
break;
}
case WasmSection::Code::FunctionBodies: {
- // Function bodies should follow signatures.
- CheckForPreviousSection(sections,
- WasmSection::Code::FunctionSignatures, true);
- int length;
const byte* pos = pc_;
- uint32_t functions_count = consume_u32v(&length, "functions count");
+ uint32_t functions_count = consume_u32v("functions count");
if (functions_count != module->functions.size()) {
error(pos, pos, "function body count %u mismatch (%u expected)",
functions_count,
static_cast<uint32_t>(module->functions.size()));
break;
}
- for (uint32_t i = 0; i < functions_count; i++) {
+ for (uint32_t i = 0; i < functions_count; ++i) {
WasmFunction* function = &module->functions[i];
- int length;
- uint32_t size = consume_u32v(&length, "body size");
+ uint32_t size = consume_u32v("body size");
function->code_start_offset = pc_offset();
function->code_end_offset = pc_offset() + size;
@@ -206,49 +200,9 @@ class ModuleDecoder : public Decoder {
}
break;
}
- case WasmSection::Code::Functions: {
- // Functions require a signature table first.
- CheckForPreviousSection(sections, WasmSection::Code::Signatures,
- true);
- int length;
- uint32_t functions_count = consume_u32v(&length, "functions count");
- module->functions.reserve(SafeReserve(functions_count));
- // Set up module environment for verification.
- ModuleEnv menv;
- menv.module = module;
- menv.instance = nullptr;
- menv.origin = origin_;
- // Decode functions.
- for (uint32_t i = 0; i < functions_count; i++) {
- if (failed()) break;
- TRACE("DecodeFunction[%d] module+%d\n", i,
- static_cast<int>(pc_ - start_));
-
- module->functions.push_back(
- {nullptr, i, 0, 0, 0, 0, 0, 0, false, false});
- WasmFunction* function = &module->functions.back();
- DecodeFunctionInModule(module, function, false);
- }
- if (ok() && verify_functions) {
- for (uint32_t i = 0; i < functions_count; i++) {
- if (failed()) break;
- WasmFunction* function = &module->functions[i];
- if (!function->external) {
- VerifyFunctionBody(i, &menv, function);
- if (result_.failed())
- error(result_.error_pc, result_.error_msg.get());
- }
- }
- }
- break;
- }
case WasmSection::Code::Names: {
- // Names correspond to functions.
- CheckForPreviousSection(sections,
- WasmSection::Code::FunctionSignatures, true);
- int length;
const byte* pos = pc_;
- uint32_t functions_count = consume_u32v(&length, "functions count");
+ uint32_t functions_count = consume_u32v("functions count");
if (functions_count != module->functions.size()) {
error(pos, pos, "function name count %u mismatch (%u expected)",
functions_count,
@@ -256,16 +210,15 @@ class ModuleDecoder : public Decoder {
break;
}
- for (uint32_t i = 0; i < functions_count; i++) {
+ for (uint32_t i = 0; i < functions_count; ++i) {
WasmFunction* function = &module->functions[i];
function->name_offset =
- consume_string(&function->name_length, "function name");
+ consume_string(&function->name_length, false);
- uint32_t local_names_count =
- consume_u32v(&length, "local names count");
+ uint32_t local_names_count = consume_u32v("local names count");
for (uint32_t j = 0; j < local_names_count; j++) {
uint32_t unused = 0;
- uint32_t offset = consume_string(&unused, "local name");
+ uint32_t offset = consume_string(&unused, false);
USE(unused);
USE(offset);
}
@@ -273,31 +226,32 @@ class ModuleDecoder : public Decoder {
break;
}
case WasmSection::Code::Globals: {
- int length;
- uint32_t globals_count = consume_u32v(&length, "globals count");
+ uint32_t globals_count = consume_u32v("globals count");
module->globals.reserve(SafeReserve(globals_count));
// Decode globals.
- for (uint32_t i = 0; i < globals_count; i++) {
+ for (uint32_t i = 0; i < globals_count; ++i) {
if (failed()) break;
TRACE("DecodeGlobal[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
- module->globals.push_back({0, 0, MachineType::Int32(), 0, false});
+ // Add an uninitialized global and pass a pointer to it.
+ module->globals.push_back({0, 0, kAstStmt, 0, false});
WasmGlobal* global = &module->globals.back();
DecodeGlobalInModule(global);
}
break;
}
case WasmSection::Code::DataSegments: {
- int length;
- uint32_t data_segments_count =
- consume_u32v(&length, "data segments count");
+ uint32_t data_segments_count = consume_u32v("data segments count");
module->data_segments.reserve(SafeReserve(data_segments_count));
// Decode data segments.
- for (uint32_t i = 0; i < data_segments_count; i++) {
+ for (uint32_t i = 0; i < data_segments_count; ++i) {
if (failed()) break;
TRACE("DecodeDataSegment[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
- module->data_segments.push_back({0, 0, 0});
+ module->data_segments.push_back({0, // dest_addr
+ 0, // source_offset
+ 0, // source_size
+ false}); // init
WasmDataSegment* segment = &module->data_segments.back();
DecodeDataSegmentInModule(module, segment);
}
@@ -306,21 +260,16 @@ class ModuleDecoder : public Decoder {
case WasmSection::Code::FunctionTable: {
// An indirect function table requires functions first.
CheckForFunctions(module, section);
- int length;
- uint32_t function_table_count =
- consume_u32v(&length, "function table count");
- module->function_table.reserve(SafeReserve(function_table_count));
+ // Assume only one table for now.
+ static const uint32_t kSupportedTableCount = 1;
+ module->function_tables.reserve(SafeReserve(kSupportedTableCount));
// Decode function table.
- for (uint32_t i = 0; i < function_table_count; i++) {
+ for (uint32_t i = 0; i < kSupportedTableCount; ++i) {
if (failed()) break;
TRACE("DecodeFunctionTable[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
- uint16_t index = consume_u32v(&length);
- if (index >= module->functions.size()) {
- error(pc_ - 2, "invalid function index");
- break;
- }
- module->function_table.push_back(index);
+ module->function_tables.push_back({0, 0, std::vector<uint16_t>()});
+ DecodeFunctionTableInModule(module, &module->function_tables[i]);
}
break;
}
@@ -341,63 +290,109 @@ class ModuleDecoder : public Decoder {
break;
}
case WasmSection::Code::ImportTable: {
- // Declares an import table.
- CheckForPreviousSection(sections, WasmSection::Code::Signatures,
- true);
- int length;
- uint32_t import_table_count =
- consume_u32v(&length, "import table count");
+ uint32_t import_table_count = consume_u32v("import table count");
module->import_table.reserve(SafeReserve(import_table_count));
// Decode import table.
- for (uint32_t i = 0; i < import_table_count; i++) {
+ for (uint32_t i = 0; i < import_table_count; ++i) {
if (failed()) break;
TRACE("DecodeImportTable[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
- module->import_table.push_back({nullptr, 0, 0});
+ module->import_table.push_back({nullptr, // sig
+ 0, // sig_index
+ 0, // module_name_offset
+ 0, // module_name_length
+ 0, // function_name_offset
+ 0}); // function_name_length
WasmImport* import = &module->import_table.back();
import->sig_index = consume_sig_index(module, &import->sig);
const byte* pos = pc_;
- import->module_name_offset = consume_string(
- &import->module_name_length, "import module name");
+ import->module_name_offset =
+ consume_string(&import->module_name_length, true);
if (import->module_name_length == 0) {
error(pos, "import module name cannot be NULL");
}
- import->function_name_offset = consume_string(
- &import->function_name_length, "import function name");
+ import->function_name_offset =
+ consume_string(&import->function_name_length, true);
}
break;
}
case WasmSection::Code::ExportTable: {
// Declares an export table.
CheckForFunctions(module, section);
- int length;
- uint32_t export_table_count =
- consume_u32v(&length, "export table count");
+ uint32_t export_table_count = consume_u32v("export table count");
module->export_table.reserve(SafeReserve(export_table_count));
// Decode export table.
- for (uint32_t i = 0; i < export_table_count; i++) {
+ for (uint32_t i = 0; i < export_table_count; ++i) {
if (failed()) break;
TRACE("DecodeExportTable[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
- module->export_table.push_back({0, 0});
+ module->export_table.push_back({0, // func_index
+ 0, // name_offset
+ 0}); // name_length
WasmExport* exp = &module->export_table.back();
WasmFunction* func;
exp->func_index = consume_func_index(module, &func);
- exp->name_offset = consume_string(&exp->name_length, "export name");
+ exp->name_offset = consume_string(&exp->name_length, true);
+ }
+ // Check for duplicate exports.
+ if (ok() && module->export_table.size() > 1) {
+ std::vector<WasmExport> sorted_exports(module->export_table);
+ const byte* base = start_;
+ auto cmp_less = [base](const WasmExport& a, const WasmExport& b) {
+ // Return true if a < b.
+ uint32_t len = a.name_length;
+ if (len != b.name_length) return len < b.name_length;
+ return memcmp(base + a.name_offset, base + b.name_offset, len) <
+ 0;
+ };
+ std::stable_sort(sorted_exports.begin(), sorted_exports.end(),
+ cmp_less);
+ auto it = sorted_exports.begin();
+ WasmExport* last = &*it++;
+ for (auto end = sorted_exports.end(); it != end; last = &*it++) {
+ DCHECK(!cmp_less(*it, *last)); // Vector must be sorted.
+ if (!cmp_less(*last, *it)) {
+ const byte* pc = start_ + it->name_offset;
+ error(pc, pc,
+ "Duplicate export name '%.*s' for functions %d and %d",
+ it->name_length, pc, last->func_index, it->func_index);
+ break;
+ }
+ }
}
break;
}
case WasmSection::Code::Max:
- UNREACHABLE(); // Already skipped unknown sections.
+ // Skip unknown sections.
+ TRACE("Unknown section: '");
+ for (uint32_t i = 0; i != string_length; ++i) {
+ TRACE("%c", *(section_name_start + i));
+ }
+ TRACE("'\n");
+ consume_bytes(section_length);
+ break;
+ }
+
+ if (pc_ != expected_section_end) {
+ const char* diff = pc_ < expected_section_end ? "shorter" : "longer";
+ size_t expected_length = static_cast<size_t>(section_length);
+ size_t actual_length = static_cast<size_t>(pc_ - section_start);
+ error(pc_, pc_,
+ "section \"%s\" %s (%zu bytes) than specified (%zu bytes)",
+ WasmSection::getName(section), diff, actual_length,
+ expected_length);
+ break;
}
}
done:
- ModuleResult result = toResult(module);
+ if (ok()) CalculateGlobalsOffsets(module);
+ const WasmModule* finished_module = module;
+ ModuleResult result = toResult(finished_module);
if (FLAG_dump_wasm_module) {
DumpModule(module, result);
}
@@ -417,17 +412,18 @@ class ModuleDecoder : public Decoder {
}
}
- void CheckForPreviousSection(bool* sections, WasmSection::Code section,
- bool present) {
- if (section >= WasmSection::Code::Max) return;
- if (sections[(size_t)section] == present) return;
- if (present) {
- error(pc_ - 1, nullptr, "required %s section missing",
+ int CheckSectionOrder(int current_order, WasmSection::Code section) {
+ int next_order = WasmSection::getOrder(section);
+ if (next_order == 0) return current_order;
+ if (next_order == current_order) {
+ error(pc_, pc_, "section \"%s\" already defined",
WasmSection::getName(section));
- } else {
- error(pc_ - 1, nullptr, "%s section already present",
+ }
+ if (next_order < current_order) {
+ error(pc_, pc_, "section \"%s\" out of order",
WasmSection::getName(section));
}
+ return next_order;
}
// Decodes a single anonymous function starting at {start_}.
@@ -439,13 +435,11 @@ class ModuleDecoder : public Decoder {
function->name_length = 0; // ---- name length
function->code_start_offset = off(pc_); // ---- code start
function->code_end_offset = off(limit_); // ---- code end
- function->exported = false; // ---- exported
- function->external = false; // ---- external
if (ok()) VerifyFunctionBody(0, module_env, function);
FunctionResult result;
- result.CopyFrom(result_); // Copy error code and location.
+ result.MoveFrom(result_); // Copy error code and location.
result.val = function;
return result;
}
@@ -466,69 +460,16 @@ class ModuleDecoder : public Decoder {
// Decodes a single global entry inside a module starting at {pc_}.
void DecodeGlobalInModule(WasmGlobal* global) {
- global->name_offset = consume_string(&global->name_length, "global name");
- global->type = mem_type();
+ global->name_offset = consume_string(&global->name_length, false);
+ if (!unibrow::Utf8::Validate(start_ + global->name_offset,
+ global->name_length)) {
+ error("global name is not valid utf8");
+ }
+ global->type = consume_local_type();
global->offset = 0;
global->exported = consume_u8("exported") != 0;
}
- // Decodes a single function entry inside a module starting at {pc_}.
- // TODO(titzer): legacy function body; remove
- void DecodeFunctionInModule(WasmModule* module, WasmFunction* function,
- bool verify_body = true) {
- byte decl_bits = consume_u8("function decl");
-
- const byte* sigpos = pc_;
- function->sig_index = consume_u16("signature index");
-
- if (function->sig_index >= module->signatures.size()) {
- return error(sigpos, "invalid signature index");
- } else {
- function->sig = module->signatures[function->sig_index];
- }
-
- TRACE(" +%d <function attributes:%s%s%s%s%s>\n",
- static_cast<int>(pc_ - start_),
- decl_bits & kDeclFunctionName ? " name" : "",
- decl_bits & kDeclFunctionImport ? " imported" : "",
- decl_bits & kDeclFunctionLocals ? " locals" : "",
- decl_bits & kDeclFunctionExport ? " exported" : "",
- (decl_bits & kDeclFunctionImport) == 0 ? " body" : "");
-
- if (decl_bits & kDeclFunctionName) {
- function->name_offset =
- consume_string(&function->name_length, "function name");
- }
-
- function->exported = decl_bits & kDeclFunctionExport;
-
- // Imported functions have no locals or body.
- if (decl_bits & kDeclFunctionImport) {
- function->external = true;
- return;
- }
-
- if (decl_bits & kDeclFunctionLocals) {
- function->local_i32_count = consume_u16("i32 count");
- function->local_i64_count = consume_u16("i64 count");
- function->local_f32_count = consume_u16("f32 count");
- function->local_f64_count = consume_u16("f64 count");
- }
-
- uint16_t size = consume_u16("body size");
- if (ok()) {
- if ((pc_ + size) > limit_) {
- return error(pc_, limit_,
- "expected %d bytes for function body, fell off end", size);
- }
- function->code_start_offset = static_cast<uint32_t>(pc_ - start_);
- function->code_end_offset = function->code_start_offset + size;
- TRACE(" +%d %-20s: (%d bytes)\n", static_cast<int>(pc_ - start_),
- "function body", size);
- pc_ += size;
- }
- }
-
bool IsWithinLimit(uint32_t limit, uint32_t offset, uint32_t size) {
if (offset > limit) return false;
if ((offset + size) < offset) return false; // overflow
@@ -538,9 +479,8 @@ class ModuleDecoder : public Decoder {
// Decodes a single data segment entry inside a module starting at {pc_}.
void DecodeDataSegmentInModule(WasmModule* module, WasmDataSegment* segment) {
const byte* start = pc_;
- int length;
- segment->dest_addr = consume_u32v(&length, "destination");
- segment->source_size = consume_u32v(&length, "source size");
+ segment->dest_addr = consume_u32v("destination");
+ segment->source_size = consume_u32v("source size");
segment->source_offset = static_cast<uint32_t>(pc_ - start_);
segment->init = true;
@@ -563,19 +503,55 @@ class ModuleDecoder : public Decoder {
consume_bytes(segment->source_size);
}
+ // Decodes a single function table inside a module starting at {pc_}.
+ void DecodeFunctionTableInModule(WasmModule* module,
+ WasmIndirectFunctionTable* table) {
+ table->size = consume_u32v("function table entry count");
+ table->max_size = table->size;
+
+ if (table->max_size != table->size) {
+ error("invalid table maximum size");
+ }
+
+ for (uint32_t i = 0; i < table->size; ++i) {
+ uint16_t index = consume_u32v();
+ if (index >= module->functions.size()) {
+ error(pc_ - sizeof(index), "invalid function index");
+ break;
+ }
+ table->values.push_back(index);
+ }
+ }
+
+ // Calculate individual global offsets and total size of globals table.
+ void CalculateGlobalsOffsets(WasmModule* module) {
+ uint32_t offset = 0;
+ if (module->globals.size() == 0) {
+ module->globals_size = 0;
+ return;
+ }
+ for (WasmGlobal& global : module->globals) {
+ byte size =
+ WasmOpcodes::MemSize(WasmOpcodes::MachineTypeFor(global.type));
+ offset = (offset + size - 1) & ~(size - 1); // align
+ global.offset = offset;
+ offset += size;
+ }
+ module->globals_size = offset;
+ }
+
// Verifies the body (code) of a given function.
void VerifyFunctionBody(uint32_t func_num, ModuleEnv* menv,
WasmFunction* function) {
- if (FLAG_trace_wasm_decode_time) {
+ if (FLAG_trace_wasm_decoder || FLAG_trace_wasm_decode_time) {
OFStream os(stdout);
os << "Verifying WASM function " << WasmFunctionName(function, menv)
<< std::endl;
- os << std::endl;
}
FunctionBody body = {menv, function->sig, start_,
start_ + function->code_start_offset,
start_ + function->code_end_offset};
- TreeResult result = VerifyWasmCode(module_zone->allocator(), body);
+ DecodeResult result = VerifyWasmCode(module_zone->allocator(), body);
if (result.failed()) {
// Wrap the error message from the function decoder.
std::ostringstream str;
@@ -589,8 +565,8 @@ class ModuleDecoder : public Decoder {
buffer[len - 1] = 0;
// Copy error code and location.
- result_.CopyFrom(result);
- result_.error_msg.Reset(buffer);
+ result_.MoveFrom(result);
+ result_.error_msg.reset(buffer);
}
}
@@ -606,19 +582,20 @@ class ModuleDecoder : public Decoder {
// Reads a length-prefixed string, checking that it is within bounds. Returns
// the offset of the string, and the length as an out parameter.
- uint32_t consume_string(uint32_t* length, const char* name = nullptr) {
- int varint_length;
- *length = consume_u32v(&varint_length, "string length");
+ uint32_t consume_string(uint32_t* length, bool validate_utf8) {
+ *length = consume_u32v("string length");
uint32_t offset = pc_offset();
TRACE(" +%u %-20s: (%u bytes)\n", offset, "string", *length);
+ if (validate_utf8 && !unibrow::Utf8::Validate(pc_, *length)) {
+ error(pc_, "no valid UTF-8 string");
+ }
consume_bytes(*length);
return offset;
}
uint32_t consume_sig_index(WasmModule* module, FunctionSig** sig) {
const byte* pos = pc_;
- int length;
- uint32_t sig_index = consume_u32v(&length, "signature index");
+ uint32_t sig_index = consume_u32v("signature index");
if (sig_index >= module->signatures.size()) {
error(pos, pos, "signature index %u out of bounds (%d signatures)",
sig_index, static_cast<int>(module->signatures.size()));
@@ -631,8 +608,7 @@ class ModuleDecoder : public Decoder {
uint32_t consume_func_index(WasmModule* module, WasmFunction** func) {
const byte* pos = pc_;
- int length;
- uint32_t func_index = consume_u32v(&length, "function index");
+ uint32_t func_index = consume_u32v("function index");
if (func_index >= module->functions.size()) {
error(pos, pos, "function index %u out of bounds (%d functions)",
func_index, static_cast<int>(module->functions.size()));
@@ -643,30 +619,6 @@ class ModuleDecoder : public Decoder {
return func_index;
}
- // Reads a section name.
- WasmSection::Code consume_section_name(int* string_leb_length,
- uint32_t* string_length) {
- *string_length = consume_u32v(string_leb_length, "name length");
- const byte* start = pc_;
- consume_bytes(*string_length);
- if (failed()) {
- TRACE("Section name of length %u couldn't be read\n", *string_length);
- return WasmSection::Code::Max;
- }
- // TODO(jfb) Linear search, it may be better to do a common-prefix search.
- for (WasmSection::Code i = WasmSection::begin(); i != WasmSection::end();
- i = WasmSection::next(i)) {
- if (WasmSection::getNameLength(i) == *string_length &&
- 0 == memcmp(WasmSection::getName(i), start, *string_length)) {
- return i;
- }
- }
- TRACE("Unknown section: '");
- for (uint32_t i = 0; i != *string_length; ++i) TRACE("%c", *(start + i));
- TRACE("'\n");
- return WasmSection::Code::Max;
- }
-
// Reads a single 8-bit integer, interpreting it as a local type.
LocalType consume_local_type() {
byte val = consume_u8("local type");
@@ -688,55 +640,50 @@ class ModuleDecoder : public Decoder {
}
}
- // Reads a single 8-bit integer, interpreting it as a memory type.
- MachineType mem_type() {
- byte val = consume_u8("memory type");
- MemTypeCode t = static_cast<MemTypeCode>(val);
- switch (t) {
- case kMemI8:
- return MachineType::Int8();
- case kMemU8:
- return MachineType::Uint8();
- case kMemI16:
- return MachineType::Int16();
- case kMemU16:
- return MachineType::Uint16();
- case kMemI32:
- return MachineType::Int32();
- case kMemU32:
- return MachineType::Uint32();
- case kMemI64:
- return MachineType::Int64();
- case kMemU64:
- return MachineType::Uint64();
- case kMemF32:
- return MachineType::Float32();
- case kMemF64:
- return MachineType::Float64();
- default:
- error(pc_ - 1, "invalid memory type");
- return MachineType::None();
- }
- }
-
- // Parses an inline function signature.
+ // Parses a type entry, which is currently limited to functions only.
FunctionSig* consume_sig() {
- int length;
- byte count = consume_u32v(&length, "param count");
- LocalType ret = consume_local_type();
- FunctionSig::Builder builder(module_zone, ret == kAstStmt ? 0 : 1, count);
- if (ret != kAstStmt) builder.AddReturn(ret);
-
- for (int i = 0; i < count; i++) {
+ const byte* pos = pc_;
+ byte form = consume_u8("type form");
+ if (form != kWasmFunctionTypeForm) {
+ error(pos, pos, "expected function type form (0x%02x), got: 0x%02x",
+ kWasmFunctionTypeForm, form);
+ return nullptr;
+ }
+ // parse parameter types
+ uint32_t param_count = consume_u32v("param count");
+ std::vector<LocalType> params;
+ for (uint32_t i = 0; i < param_count; ++i) {
LocalType param = consume_local_type();
if (param == kAstStmt) error(pc_ - 1, "invalid void parameter type");
- builder.AddParam(param);
+ params.push_back(param);
+ }
+
+ // parse return types
+ const byte* pt = pc_;
+ uint32_t return_count = consume_u32v("return count");
+ if (return_count > kMaxReturnCount) {
+ error(pt, pt, "return count of %u exceeds maximum of %u", return_count,
+ kMaxReturnCount);
+ return nullptr;
}
- return builder.Build();
+ std::vector<LocalType> returns;
+ for (uint32_t i = 0; i < return_count; ++i) {
+ LocalType ret = consume_local_type();
+ if (ret == kAstStmt) error(pc_ - 1, "invalid void return type");
+ returns.push_back(ret);
+ }
+
+ // FunctionSig stores the return types first.
+ LocalType* buffer =
+ module_zone->NewArray<LocalType>(param_count + return_count);
+ uint32_t b = 0;
+ for (uint32_t i = 0; i < return_count; ++i) buffer[b++] = returns[i];
+ for (uint32_t i = 0; i < param_count; ++i) buffer[b++] = params[i];
+
+ return new (module_zone) FunctionSig(return_count, param_count, buffer);
}
};
-
// Helpers for nice error messages.
class ModuleError : public ModuleResult {
public:
@@ -746,11 +693,10 @@ class ModuleError : public ModuleResult {
char* result = new char[len];
strncpy(result, msg, len);
result[len - 1] = 0;
- error_msg.Reset(result);
+ error_msg.reset(result);
}
};
-
// Helpers for nice error messages.
class FunctionError : public FunctionResult {
public:
@@ -760,41 +706,115 @@ class FunctionError : public FunctionResult {
char* result = new char[len];
strncpy(result, msg, len);
result[len - 1] = 0;
- error_msg.Reset(result);
+ error_msg.reset(result);
}
};
+Vector<const byte> FindSection(const byte* module_start, const byte* module_end,
+ WasmSection::Code code) {
+ Decoder decoder(module_start, module_end);
+
+ uint32_t magic_word = decoder.consume_u32("wasm magic");
+ if (magic_word != kWasmMagic) decoder.error("wrong magic word");
+
+ uint32_t magic_version = decoder.consume_u32("wasm version");
+ if (magic_version != kWasmVersion) decoder.error("wrong wasm version");
+
+ while (decoder.more() && decoder.ok()) {
+ // Read the section name.
+ uint32_t string_length = decoder.consume_u32v("section name length");
+ const byte* section_name_start = decoder.pc();
+ decoder.consume_bytes(string_length);
+ if (decoder.failed()) break;
+
+ WasmSection::Code section =
+ WasmSection::lookup(section_name_start, string_length);
+
+ // Read and check the section size.
+ uint32_t section_length = decoder.consume_u32v("section length");
+
+ const byte* section_start = decoder.pc();
+ decoder.consume_bytes(section_length);
+ if (section == code && decoder.ok()) {
+ return Vector<const uint8_t>(section_start, section_length);
+ }
+ }
+
+ return Vector<const uint8_t>();
+}
+
+} // namespace
+
ModuleResult DecodeWasmModule(Isolate* isolate, Zone* zone,
const byte* module_start, const byte* module_end,
bool verify_functions, ModuleOrigin origin) {
+ size_t decode_memory_start = zone->allocation_size();
+ HistogramTimerScope wasm_decode_module_time_scope(
+ isolate->counters()->wasm_decode_module_time());
size_t size = module_end - module_start;
if (module_start > module_end) return ModuleError("start > end");
if (size >= kMaxModuleSize) return ModuleError("size > maximum module size");
+ // TODO(bradnelson): Improve histogram handling of size_t.
+ isolate->counters()->wasm_module_size_bytes()->AddSample(
+ static_cast<int>(size));
WasmModule* module = new WasmModule();
ModuleDecoder decoder(zone, module_start, module_end, origin);
- return decoder.DecodeModule(module, verify_functions);
+ ModuleResult result = decoder.DecodeModule(module, verify_functions);
+ // TODO(bradnelson): Improve histogram handling of size_t.
+ isolate->counters()->wasm_decode_module_peak_memory_bytes()->AddSample(
+ static_cast<int>(zone->allocation_size() - decode_memory_start));
+ return result;
}
-
FunctionSig* DecodeWasmSignatureForTesting(Zone* zone, const byte* start,
const byte* end) {
ModuleDecoder decoder(zone, start, end, kWasmOrigin);
return decoder.DecodeFunctionSignature(start);
}
-
FunctionResult DecodeWasmFunction(Isolate* isolate, Zone* zone,
ModuleEnv* module_env,
const byte* function_start,
const byte* function_end) {
+ HistogramTimerScope wasm_decode_function_time_scope(
+ isolate->counters()->wasm_decode_function_time());
size_t size = function_end - function_start;
if (function_start > function_end) return FunctionError("start > end");
if (size > kMaxFunctionSize)
return FunctionError("size > maximum function size");
+ isolate->counters()->wasm_function_size_bytes()->AddSample(
+ static_cast<int>(size));
WasmFunction* function = new WasmFunction();
ModuleDecoder decoder(zone, function_start, function_end, kWasmOrigin);
return decoder.DecodeSingleFunction(module_env, function);
}
+
+FunctionOffsetsResult DecodeWasmFunctionOffsets(const byte* module_start,
+ const byte* module_end) {
+ Vector<const byte> code_section =
+ FindSection(module_start, module_end, WasmSection::Code::FunctionBodies);
+ Decoder decoder(code_section.start(), code_section.end());
+ if (!code_section.start()) decoder.error("no code section");
+
+ uint32_t functions_count = decoder.consume_u32v("functions count");
+ FunctionOffsets table;
+ // Take care of invalid input here.
+ if (functions_count < static_cast<unsigned>(code_section.length()) / 2)
+ table.reserve(functions_count);
+ int section_offset = static_cast<int>(code_section.start() - module_start);
+ DCHECK_LE(0, section_offset);
+ for (uint32_t i = 0; i < functions_count && decoder.ok(); ++i) {
+ uint32_t size = decoder.consume_u32v("body size");
+ int offset = static_cast<int>(section_offset + decoder.pc_offset());
+ table.push_back(std::make_pair(offset, static_cast<int>(size)));
+ DCHECK(table.back().first >= 0 && table.back().second >= 0);
+ decoder.consume_bytes(size);
+ }
+ if (decoder.more()) decoder.error("unexpected additional bytes");
+
+ return decoder.toResult(std::move(table));
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index 00a9b878c6..dd6bd3bc86 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -26,6 +26,13 @@ FunctionSig* DecodeWasmSignatureForTesting(Zone* zone, const byte* start,
FunctionResult DecodeWasmFunction(Isolate* isolate, Zone* zone, ModuleEnv* env,
const byte* function_start,
const byte* function_end);
+
+// Extracts the function offset table from the wasm module bytes.
+// Returns a vector with <offset, length> entries, or failure if the wasm bytes
+// are detected as invalid. Note that this validation is not complete.
+FunctionOffsetsResult DecodeWasmFunctionOffsets(const byte* module_start,
+ const byte* module_end);
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/switch-logic.cc b/deps/v8/src/wasm/switch-logic.cc
new file mode 100644
index 0000000000..9ebc0b3452
--- /dev/null
+++ b/deps/v8/src/wasm/switch-logic.cc
@@ -0,0 +1,63 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/switch-logic.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+namespace {
+CaseNode* CreateBst(ZoneVector<CaseNode*>* nodes, size_t begin, size_t end) {
+ if (end < begin) {
+ return nullptr;
+ } else if (end == begin) {
+ return nodes->at(begin);
+ } else {
+ size_t root_index = (begin + end) / 2;
+ CaseNode* root = nodes->at(root_index);
+ if (root_index != 0) {
+ root->left = CreateBst(nodes, begin, root_index - 1);
+ }
+ root->right = CreateBst(nodes, root_index + 1, end);
+ return root;
+ }
+}
+} // namespace
+
+CaseNode* OrderCases(ZoneVector<int>* cases, Zone* zone) {
+ const int max_distance = 2;
+ const int min_size = 4;
+ if (cases->empty()) {
+ return nullptr;
+ }
+ std::sort(cases->begin(), cases->end());
+ ZoneVector<size_t> table_breaks(zone);
+ for (size_t i = 1; i < cases->size(); ++i) {
+ if (cases->at(i) - cases->at(i - 1) > max_distance) {
+ table_breaks.push_back(i);
+ }
+ }
+ table_breaks.push_back(cases->size());
+ ZoneVector<CaseNode*> nodes(zone);
+ size_t curr_pos = 0;
+ for (size_t i = 0; i < table_breaks.size(); ++i) {
+ size_t break_pos = table_breaks[i];
+ if (break_pos - curr_pos >= min_size) {
+ int begin = cases->at(curr_pos);
+ int end = cases->at(break_pos - 1);
+ nodes.push_back(new (zone) CaseNode(begin, end));
+ curr_pos = break_pos;
+ } else {
+ for (; curr_pos < break_pos; curr_pos++) {
+ nodes.push_back(new (zone)
+ CaseNode(cases->at(curr_pos), cases->at(curr_pos)));
+ }
+ }
+ }
+ return CreateBst(&nodes, 0, nodes.size() - 1);
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/switch-logic.h b/deps/v8/src/wasm/switch-logic.h
new file mode 100644
index 0000000000..8cef08b98b
--- /dev/null
+++ b/deps/v8/src/wasm/switch-logic.h
@@ -0,0 +1,31 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_SWITCH_LOGIC_H
+#define V8_WASM_SWITCH_LOGIC_H
+
+#include "src/zone-containers.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+struct CaseNode : public ZoneObject {
+ const int begin;
+ const int end;
+ CaseNode* left;
+ CaseNode* right;
+ CaseNode(int begin, int end) : begin(begin), end(end) {
+ left = nullptr;
+ right = nullptr;
+ }
+};
+
+CaseNode* OrderCases(ZoneVector<int>* cases, Zone* zone);
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
new file mode 100644
index 0000000000..54e7100935
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -0,0 +1,234 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-debug.h"
+
+#include "src/assert-scope.h"
+#include "src/debug/debug.h"
+#include "src/factory.h"
+#include "src/isolate.h"
+#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-module.h"
+
+using namespace v8::internal;
+using namespace v8::internal::wasm;
+
+namespace {
+
+enum {
+ kWasmDebugInfoWasmObj,
+ kWasmDebugInfoWasmBytesHash,
+ kWasmDebugInfoFunctionByteOffsets,
+ kWasmDebugInfoFunctionScripts,
+ kWasmDebugInfoNumEntries
+};
+
+ByteArray *GetOrCreateFunctionOffsetTable(Handle<WasmDebugInfo> debug_info) {
+ Object *offset_table = debug_info->get(kWasmDebugInfoFunctionByteOffsets);
+ Isolate *isolate = debug_info->GetIsolate();
+ if (!offset_table->IsUndefined(isolate)) return ByteArray::cast(offset_table);
+
+ FunctionOffsetsResult function_offsets;
+ {
+ DisallowHeapAllocation no_gc;
+ SeqOneByteString *wasm_bytes =
+ wasm::GetWasmBytes(debug_info->wasm_object());
+ const byte *bytes_start = wasm_bytes->GetChars();
+ const byte *bytes_end = bytes_start + wasm_bytes->length();
+ function_offsets = wasm::DecodeWasmFunctionOffsets(bytes_start, bytes_end);
+ }
+ DCHECK(function_offsets.ok());
+ size_t array_size = 2 * kIntSize * function_offsets.val.size();
+ CHECK_LE(array_size, static_cast<size_t>(kMaxInt));
+ ByteArray *arr =
+ *isolate->factory()->NewByteArray(static_cast<int>(array_size));
+ int idx = 0;
+ for (std::pair<int, int> p : function_offsets.val) {
+ arr->set_int(idx++, p.first);
+ arr->set_int(idx++, p.second);
+ }
+ DCHECK_EQ(arr->length(), idx * kIntSize);
+ debug_info->set(kWasmDebugInfoFunctionByteOffsets, arr);
+
+ return arr;
+}
+
+std::pair<int, int> GetFunctionOffsetAndLength(Handle<WasmDebugInfo> debug_info,
+ int func_index) {
+ ByteArray *arr = GetOrCreateFunctionOffsetTable(debug_info);
+ DCHECK(func_index >= 0 && func_index < arr->length() / kIntSize / 2);
+
+ int offset = arr->get_int(2 * func_index);
+ int length = arr->get_int(2 * func_index + 1);
+ // Assert that it's distinguishable from the "illegal function index" return.
+ DCHECK(offset > 0 && length > 0);
+ return {offset, length};
+}
+
+Vector<const uint8_t> GetFunctionBytes(Handle<WasmDebugInfo> debug_info,
+ int func_index) {
+ SeqOneByteString *module_bytes =
+ wasm::GetWasmBytes(debug_info->wasm_object());
+ std::pair<int, int> offset_and_length =
+ GetFunctionOffsetAndLength(debug_info, func_index);
+ return Vector<const uint8_t>(
+ module_bytes->GetChars() + offset_and_length.first,
+ offset_and_length.second);
+}
+
+} // namespace
+
+Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<JSObject> wasm) {
+ Isolate *isolate = wasm->GetIsolate();
+ Factory *factory = isolate->factory();
+ Handle<FixedArray> arr =
+ factory->NewFixedArray(kWasmDebugInfoNumEntries, TENURED);
+ arr->set(kWasmDebugInfoWasmObj, *wasm);
+ int hash = 0;
+ Handle<SeqOneByteString> wasm_bytes(GetWasmBytes(*wasm), isolate);
+ {
+ DisallowHeapAllocation no_gc;
+ hash = StringHasher::HashSequentialString(
+ wasm_bytes->GetChars(), wasm_bytes->length(), kZeroHashSeed);
+ }
+ Handle<Object> hash_obj = factory->NewNumberFromInt(hash, TENURED);
+ arr->set(kWasmDebugInfoWasmBytesHash, *hash_obj);
+
+ return Handle<WasmDebugInfo>::cast(arr);
+}
+
+bool WasmDebugInfo::IsDebugInfo(Object *object) {
+ if (!object->IsFixedArray()) return false;
+ FixedArray *arr = FixedArray::cast(object);
+ Isolate *isolate = arr->GetIsolate();
+ return arr->length() == kWasmDebugInfoNumEntries &&
+ IsWasmObject(arr->get(kWasmDebugInfoWasmObj)) &&
+ arr->get(kWasmDebugInfoWasmBytesHash)->IsNumber() &&
+ (arr->get(kWasmDebugInfoFunctionByteOffsets)->IsUndefined(isolate) ||
+ arr->get(kWasmDebugInfoFunctionByteOffsets)->IsByteArray()) &&
+ (arr->get(kWasmDebugInfoFunctionScripts)->IsUndefined(isolate) ||
+ arr->get(kWasmDebugInfoFunctionScripts)->IsFixedArray());
+}
+
+WasmDebugInfo *WasmDebugInfo::cast(Object *object) {
+ DCHECK(IsDebugInfo(object));
+ return reinterpret_cast<WasmDebugInfo *>(object);
+}
+
+JSObject *WasmDebugInfo::wasm_object() {
+ return JSObject::cast(get(kWasmDebugInfoWasmObj));
+}
+
+Script *WasmDebugInfo::GetFunctionScript(Handle<WasmDebugInfo> debug_info,
+ int func_index) {
+ Isolate *isolate = debug_info->GetIsolate();
+ Object *scripts_obj = debug_info->get(kWasmDebugInfoFunctionScripts);
+ Handle<FixedArray> scripts;
+ if (scripts_obj->IsUndefined(isolate)) {
+ int num_functions = wasm::GetNumberOfFunctions(debug_info->wasm_object());
+ scripts = isolate->factory()->NewFixedArray(num_functions, TENURED);
+ debug_info->set(kWasmDebugInfoFunctionScripts, *scripts);
+ } else {
+ scripts = handle(FixedArray::cast(scripts_obj), isolate);
+ }
+
+ DCHECK(func_index >= 0 && func_index < scripts->length());
+ Object *script_or_undef = scripts->get(func_index);
+ if (!script_or_undef->IsUndefined(isolate)) {
+ return Script::cast(script_or_undef);
+ }
+
+ Handle<Script> script =
+ isolate->factory()->NewScript(isolate->factory()->empty_string());
+ scripts->set(func_index, *script);
+
+ script->set_type(Script::TYPE_WASM);
+ script->set_wasm_object(debug_info->wasm_object());
+ script->set_wasm_function_index(func_index);
+
+ int hash = 0;
+ debug_info->get(kWasmDebugInfoWasmBytesHash)->ToInt32(&hash);
+ char buffer[32];
+ SNPrintF(ArrayVector(buffer), "wasm://%08x/%d", hash, func_index);
+ Handle<String> source_url =
+ isolate->factory()->NewStringFromAsciiChecked(buffer, TENURED);
+ script->set_source_url(*source_url);
+
+ int func_bytes_len =
+ GetFunctionOffsetAndLength(debug_info, func_index).second;
+ Handle<FixedArray> line_ends = isolate->factory()->NewFixedArray(1, TENURED);
+ line_ends->set(0, Smi::FromInt(func_bytes_len));
+ line_ends->set_map(isolate->heap()->fixed_cow_array_map());
+ script->set_line_ends(*line_ends);
+
+ // TODO(clemensh): Register with the debugger. Note that we cannot call into
+ // JS at this point since this function is called from within stack trace
+ // collection (which means we cannot call Debug::OnAfterCompile in its
+ // current form). See crbug.com/641065.
+ if (false) isolate->debug()->OnAfterCompile(script);
+
+ return *script;
+}
+
+Handle<String> WasmDebugInfo::DisassembleFunction(
+ Handle<WasmDebugInfo> debug_info, int func_index) {
+ std::ostringstream disassembly_os;
+
+ {
+ Vector<const uint8_t> bytes_vec = GetFunctionBytes(debug_info, func_index);
+ DisallowHeapAllocation no_gc;
+
+ base::AccountingAllocator allocator;
+ bool ok = PrintAst(
+ &allocator, FunctionBodyForTesting(bytes_vec.start(), bytes_vec.end()),
+ disassembly_os, nullptr);
+ DCHECK(ok);
+ USE(ok);
+ }
+
+ // Unfortunately, we have to copy the string here.
+ std::string code_str = disassembly_os.str();
+ CHECK_LE(code_str.length(), static_cast<size_t>(kMaxInt));
+ Factory *factory = debug_info->GetIsolate()->factory();
+ Vector<const char> code_vec(code_str.data(),
+ static_cast<int>(code_str.length()));
+ return factory->NewStringFromAscii(code_vec).ToHandleChecked();
+}
+
+Handle<FixedArray> WasmDebugInfo::GetFunctionOffsetTable(
+ Handle<WasmDebugInfo> debug_info, int func_index) {
+ class NullBuf : public std::streambuf {};
+ NullBuf null_buf;
+ std::ostream null_stream(&null_buf);
+
+ std::vector<std::tuple<uint32_t, int, int>> offset_table_vec;
+
+ {
+ Vector<const uint8_t> bytes_vec = GetFunctionBytes(debug_info, func_index);
+ DisallowHeapAllocation no_gc;
+
+ v8::base::AccountingAllocator allocator;
+ bool ok = PrintAst(
+ &allocator, FunctionBodyForTesting(bytes_vec.start(), bytes_vec.end()),
+ null_stream, &offset_table_vec);
+ DCHECK(ok);
+ USE(ok);
+ }
+
+ size_t arr_size = 3 * offset_table_vec.size();
+ CHECK_LE(arr_size, static_cast<size_t>(kMaxInt));
+ Factory *factory = debug_info->GetIsolate()->factory();
+ Handle<FixedArray> offset_table =
+ factory->NewFixedArray(static_cast<int>(arr_size), TENURED);
+
+ int idx = 0;
+ for (std::tuple<uint32_t, int, int> elem : offset_table_vec) {
+ offset_table->set(idx++, Smi::FromInt(std::get<0>(elem)));
+ offset_table->set(idx++, Smi::FromInt(std::get<1>(elem)));
+ offset_table->set(idx++, Smi::FromInt(std::get<2>(elem)));
+ }
+ DCHECK_EQ(idx, offset_table->length());
+
+ return offset_table;
+}
diff --git a/deps/v8/src/wasm/wasm-debug.h b/deps/v8/src/wasm/wasm-debug.h
new file mode 100644
index 0000000000..9659951271
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-debug.h
@@ -0,0 +1,46 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_DEBUG_H_
+#define V8_WASM_DEBUG_H_
+
+#include "src/handles.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class WasmDebugInfo : public FixedArray {
+ public:
+ static Handle<WasmDebugInfo> New(Handle<JSObject> wasm);
+
+ static bool IsDebugInfo(Object* object);
+ static WasmDebugInfo* cast(Object* object);
+
+ JSObject* wasm_object();
+
+ bool SetBreakPoint(int byte_offset);
+
+ // Get the Script for the specified function.
+ static Script* GetFunctionScript(Handle<WasmDebugInfo> debug_info,
+ int func_index);
+
+ // Disassemble the specified function from this module.
+ static Handle<String> DisassembleFunction(Handle<WasmDebugInfo> debug_info,
+ int func_index);
+
+ // Get the offset table for the specified function, mapping from byte offsets
+ // to position in the disassembly.
+ // Returns an array with three entries per instruction: byte offset, line and
+ // column.
+ static Handle<FixedArray> GetFunctionOffsetTable(
+ Handle<WasmDebugInfo> debug_info, int func_index);
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_DEBUG_H_
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
new file mode 100644
index 0000000000..09294c2c28
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -0,0 +1,216 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <math.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <limits>
+
+#include "include/v8config.h"
+
+#include "src/base/bits.h"
+#include "src/utils.h"
+#include "src/wasm/wasm-external-refs.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+void f32_trunc_wrapper(float* param) { *param = truncf(*param); }
+
+void f32_floor_wrapper(float* param) { *param = floorf(*param); }
+
+void f32_ceil_wrapper(float* param) { *param = ceilf(*param); }
+
+void f32_nearest_int_wrapper(float* param) { *param = nearbyintf(*param); }
+
+void f64_trunc_wrapper(double* param) {
+ WriteDoubleValue(param, trunc(ReadDoubleValue(param)));
+}
+
+void f64_floor_wrapper(double* param) {
+ WriteDoubleValue(param, floor(ReadDoubleValue(param)));
+}
+
+void f64_ceil_wrapper(double* param) {
+ WriteDoubleValue(param, ceil(ReadDoubleValue(param)));
+}
+
+void f64_nearest_int_wrapper(double* param) {
+ WriteDoubleValue(param, nearbyint(ReadDoubleValue(param)));
+}
+
+void int64_to_float32_wrapper(int64_t* input, float* output) {
+ *output = static_cast<float>(*input);
+}
+
+void uint64_to_float32_wrapper(uint64_t* input, float* output) {
+#if V8_CC_MSVC
+ // With MSVC we use static_cast<float>(uint32_t) instead of
+ // static_cast<float>(uint64_t) to achieve round-to-nearest-ties-even
+ // semantics. The idea is to calculate
+ // static_cast<float>(high_word) * 2^32 + static_cast<float>(low_word). To
+ // achieve proper rounding in all cases we have to adjust the high_word
+ // with a "rounding bit" sometimes. The rounding bit is stored in the LSB of
+ // the high_word if the low_word may affect the rounding of the high_word.
+ uint32_t low_word = static_cast<uint32_t>(*input & 0xffffffff);
+ uint32_t high_word = static_cast<uint32_t>(*input >> 32);
+
+ float shift = static_cast<float>(1ull << 32);
+ // If the MSB of the high_word is set, then we make space for a rounding bit.
+ if (high_word < 0x80000000) {
+ high_word <<= 1;
+ shift = static_cast<float>(1ull << 31);
+ }
+
+ if ((high_word & 0xfe000000) && low_word) {
+ // Set the rounding bit.
+ high_word |= 1;
+ }
+
+ float result = static_cast<float>(high_word);
+ result *= shift;
+ result += static_cast<float>(low_word);
+ *output = result;
+
+#else
+ *output = static_cast<float>(*input);
+#endif
+}
+
+void int64_to_float64_wrapper(int64_t* input, double* output) {
+ *output = static_cast<double>(*input);
+}
+
+void uint64_to_float64_wrapper(uint64_t* input, double* output) {
+#if V8_CC_MSVC
+ // With MSVC we use static_cast<double>(uint32_t) instead of
+ // static_cast<double>(uint64_t) to achieve round-to-nearest-ties-even
+ // semantics. The idea is to calculate
+ // static_cast<double>(high_word) * 2^32 + static_cast<double>(low_word).
+ uint32_t low_word = static_cast<uint32_t>(*input & 0xffffffff);
+ uint32_t high_word = static_cast<uint32_t>(*input >> 32);
+
+ double shift = static_cast<double>(1ull << 32);
+
+ double result = static_cast<double>(high_word);
+ result *= shift;
+ result += static_cast<double>(low_word);
+ *output = result;
+
+#else
+ *output = static_cast<double>(*input);
+#endif
+}
+
+int32_t float32_to_int64_wrapper(float* input, int64_t* output) {
+ // We use "<" here to check the upper bound because of rounding problems: With
+ // "<=" some inputs would be considered within int64 range which are actually
+ // not within int64 range.
+ if (*input >= static_cast<float>(std::numeric_limits<int64_t>::min()) &&
+ *input < static_cast<float>(std::numeric_limits<int64_t>::max())) {
+ *output = static_cast<int64_t>(*input);
+ return 1;
+ }
+ return 0;
+}
+
+int32_t float32_to_uint64_wrapper(float* input, uint64_t* output) {
+ // We use "<" here to check the upper bound because of rounding problems: With
+ // "<=" some inputs would be considered within uint64 range which are actually
+ // not within uint64 range.
+ if (*input > -1.0 &&
+ *input < static_cast<float>(std::numeric_limits<uint64_t>::max())) {
+ *output = static_cast<uint64_t>(*input);
+ return 1;
+ }
+ return 0;
+}
+
+int32_t float64_to_int64_wrapper(double* input, int64_t* output) {
+ // We use "<" here to check the upper bound because of rounding problems: With
+ // "<=" some inputs would be considered within int64 range which are actually
+ // not within int64 range.
+ if (*input >= static_cast<double>(std::numeric_limits<int64_t>::min()) &&
+ *input < static_cast<double>(std::numeric_limits<int64_t>::max())) {
+ *output = static_cast<int64_t>(*input);
+ return 1;
+ }
+ return 0;
+}
+
+int32_t float64_to_uint64_wrapper(double* input, uint64_t* output) {
+ // We use "<" here to check the upper bound because of rounding problems: With
+ // "<=" some inputs would be considered within uint64 range which are actually
+ // not within uint64 range.
+ if (*input > -1.0 &&
+ *input < static_cast<double>(std::numeric_limits<uint64_t>::max())) {
+ *output = static_cast<uint64_t>(*input);
+ return 1;
+ }
+ return 0;
+}
+
+int32_t int64_div_wrapper(int64_t* dst, int64_t* src) {
+ if (*src == 0) {
+ return 0;
+ }
+ if (*src == -1 && *dst == std::numeric_limits<int64_t>::min()) {
+ return -1;
+ }
+ *dst /= *src;
+ return 1;
+}
+
+int32_t int64_mod_wrapper(int64_t* dst, int64_t* src) {
+ if (*src == 0) {
+ return 0;
+ }
+ *dst %= *src;
+ return 1;
+}
+
+int32_t uint64_div_wrapper(uint64_t* dst, uint64_t* src) {
+ if (*src == 0) {
+ return 0;
+ }
+ *dst /= *src;
+ return 1;
+}
+
+int32_t uint64_mod_wrapper(uint64_t* dst, uint64_t* src) {
+ if (*src == 0) {
+ return 0;
+ }
+ *dst %= *src;
+ return 1;
+}
+
+uint32_t word32_ctz_wrapper(uint32_t* input) {
+ return static_cast<uint32_t>(base::bits::CountTrailingZeros32(*input));
+}
+
+uint32_t word64_ctz_wrapper(uint64_t* input) {
+ return static_cast<uint32_t>(base::bits::CountTrailingZeros64(*input));
+}
+
+uint32_t word32_popcnt_wrapper(uint32_t* input) {
+ return static_cast<uint32_t>(base::bits::CountPopulation(*input));
+}
+
+uint32_t word64_popcnt_wrapper(uint64_t* input) {
+ return static_cast<uint32_t>(base::bits::CountPopulation(*input));
+}
+
+void float64_pow_wrapper(double* param0, double* param1) {
+ double x = ReadDoubleValue(param0);
+ double y = ReadDoubleValue(param1);
+ if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) {
+ WriteDoubleValue(param0, std::numeric_limits<double>::quiet_NaN());
+ }
+ WriteDoubleValue(param0, Pow(x, y));
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-external-refs.h b/deps/v8/src/wasm/wasm-external-refs.h
index 4aa452bbf5..d9539ce71a 100644
--- a/deps/v8/src/wasm/wasm-external-refs.h
+++ b/deps/v8/src/wasm/wasm-external-refs.h
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <stdint.h>
+
#ifndef WASM_EXTERNAL_REFS_H
#define WASM_EXTERNAL_REFS_H
@@ -9,173 +11,57 @@ namespace v8 {
namespace internal {
namespace wasm {
-static void f32_trunc_wrapper(float* param) { *param = truncf(*param); }
+void f32_trunc_wrapper(float* param);
-static void f32_floor_wrapper(float* param) { *param = floorf(*param); }
+void f32_floor_wrapper(float* param);
-static void f32_ceil_wrapper(float* param) { *param = ceilf(*param); }
+void f32_ceil_wrapper(float* param);
-static void f32_nearest_int_wrapper(float* param) {
- *param = nearbyintf(*param);
-}
+void f32_nearest_int_wrapper(float* param);
-static void f64_trunc_wrapper(double* param) { *param = trunc(*param); }
+void f64_trunc_wrapper(double* param);
-static void f64_floor_wrapper(double* param) { *param = floor(*param); }
+void f64_floor_wrapper(double* param);
-static void f64_ceil_wrapper(double* param) { *param = ceil(*param); }
+void f64_ceil_wrapper(double* param);
-static void f64_nearest_int_wrapper(double* param) {
- *param = nearbyint(*param);
-}
+void f64_nearest_int_wrapper(double* param);
-static void int64_to_float32_wrapper(int64_t* input, float* output) {
- *output = static_cast<float>(*input);
-}
+void int64_to_float32_wrapper(int64_t* input, float* output);
-static void uint64_to_float32_wrapper(uint64_t* input, float* output) {
-#if V8_CC_MSVC
- // With MSVC we use static_cast<float>(uint32_t) instead of
- // static_cast<float>(uint64_t) to achieve round-to-nearest-ties-even
- // semantics. The idea is to calculate
- // static_cast<float>(high_word) * 2^32 + static_cast<float>(low_word). To
- // achieve proper rounding in all cases we have to adjust the high_word
- // with a "rounding bit" sometimes. The rounding bit is stored in the LSB of
- // the high_word if the low_word may affect the rounding of the high_word.
- uint32_t low_word = static_cast<uint32_t>(*input & 0xffffffff);
- uint32_t high_word = static_cast<uint32_t>(*input >> 32);
+void uint64_to_float32_wrapper(uint64_t* input, float* output);
- float shift = static_cast<float>(1ull << 32);
- // If the MSB of the high_word is set, then we make space for a rounding bit.
- if (high_word < 0x80000000) {
- high_word <<= 1;
- shift = static_cast<float>(1ull << 31);
- }
+void int64_to_float64_wrapper(int64_t* input, double* output);
- if ((high_word & 0xfe000000) && low_word) {
- // Set the rounding bit.
- high_word |= 1;
- }
+void uint64_to_float64_wrapper(uint64_t* input, double* output);
- float result = static_cast<float>(high_word);
- result *= shift;
- result += static_cast<float>(low_word);
- *output = result;
+int32_t float32_to_int64_wrapper(float* input, int64_t* output);
+
+int32_t float32_to_uint64_wrapper(float* input, uint64_t* output);
+
+int32_t float64_to_int64_wrapper(double* input, int64_t* output);
+
+int32_t float64_to_uint64_wrapper(double* input, uint64_t* output);
+
+int32_t int64_div_wrapper(int64_t* dst, int64_t* src);
+
+int32_t int64_mod_wrapper(int64_t* dst, int64_t* src);
+
+int32_t uint64_div_wrapper(uint64_t* dst, uint64_t* src);
+
+int32_t uint64_mod_wrapper(uint64_t* dst, uint64_t* src);
+
+uint32_t word32_ctz_wrapper(uint32_t* input);
+
+uint32_t word64_ctz_wrapper(uint64_t* input);
+
+uint32_t word32_popcnt_wrapper(uint32_t* input);
+
+uint32_t word64_popcnt_wrapper(uint64_t* input);
+
+void float64_pow_wrapper(double* param0, double* param1);
-#else
- *output = static_cast<float>(*input);
-#endif
-}
-
-static void int64_to_float64_wrapper(int64_t* input, double* output) {
- *output = static_cast<double>(*input);
-}
-
-static void uint64_to_float64_wrapper(uint64_t* input, double* output) {
-#if V8_CC_MSVC
- // With MSVC we use static_cast<double>(uint32_t) instead of
- // static_cast<double>(uint64_t) to achieve round-to-nearest-ties-even
- // semantics. The idea is to calculate
- // static_cast<double>(high_word) * 2^32 + static_cast<double>(low_word).
- uint32_t low_word = static_cast<uint32_t>(*input & 0xffffffff);
- uint32_t high_word = static_cast<uint32_t>(*input >> 32);
-
- double shift = static_cast<double>(1ull << 32);
-
- double result = static_cast<double>(high_word);
- result *= shift;
- result += static_cast<double>(low_word);
- *output = result;
-
-#else
- *output = static_cast<double>(*input);
-#endif
-}
-
-static int32_t float32_to_int64_wrapper(float* input, int64_t* output) {
- // We use "<" here to check the upper bound because of rounding problems: With
- // "<=" some inputs would be considered within int64 range which are actually
- // not within int64 range.
- if (*input >= static_cast<float>(std::numeric_limits<int64_t>::min()) &&
- *input < static_cast<float>(std::numeric_limits<int64_t>::max())) {
- *output = static_cast<int64_t>(*input);
- return 1;
- }
- return 0;
-}
-
-static int32_t float32_to_uint64_wrapper(float* input, uint64_t* output) {
- // We use "<" here to check the upper bound because of rounding problems: With
- // "<=" some inputs would be considered within uint64 range which are actually
- // not within uint64 range.
- if (*input > -1.0 &&
- *input < static_cast<float>(std::numeric_limits<uint64_t>::max())) {
- *output = static_cast<uint64_t>(*input);
- return 1;
- }
- return 0;
-}
-
-static int32_t float64_to_int64_wrapper(double* input, int64_t* output) {
- // We use "<" here to check the upper bound because of rounding problems: With
- // "<=" some inputs would be considered within int64 range which are actually
- // not within int64 range.
- if (*input >= static_cast<double>(std::numeric_limits<int64_t>::min()) &&
- *input < static_cast<double>(std::numeric_limits<int64_t>::max())) {
- *output = static_cast<int64_t>(*input);
- return 1;
- }
- return 0;
-}
-
-static int32_t float64_to_uint64_wrapper(double* input, uint64_t* output) {
- // We use "<" here to check the upper bound because of rounding problems: With
- // "<=" some inputs would be considered within uint64 range which are actually
- // not within uint64 range.
- if (*input > -1.0 &&
- *input < static_cast<double>(std::numeric_limits<uint64_t>::max())) {
- *output = static_cast<uint64_t>(*input);
- return 1;
- }
- return 0;
-}
-
-static int32_t int64_div_wrapper(int64_t* dst, int64_t* src) {
- if (*src == 0) {
- return 0;
- }
- if (*src == -1 && *dst == std::numeric_limits<int64_t>::min()) {
- return -1;
- }
- *dst /= *src;
- return 1;
-}
-
-static int32_t int64_mod_wrapper(int64_t* dst, int64_t* src) {
- if (*src == 0) {
- return 0;
- }
- *dst %= *src;
- return 1;
-}
-
-static int32_t uint64_div_wrapper(uint64_t* dst, uint64_t* src) {
- if (*src == 0) {
- return 0;
- }
- *dst /= *src;
- return 1;
-}
-
-static int32_t uint64_mod_wrapper(uint64_t* dst, uint64_t* src) {
- if (*src == 0) {
- return 0;
- }
- *dst %= *src;
- return 1;
-}
} // namespace wasm
} // namespace internal
} // namespace v8
-
#endif
diff --git a/deps/v8/src/wasm/wasm-function-name-table.cc b/deps/v8/src/wasm/wasm-function-name-table.cc
new file mode 100644
index 0000000000..cc52125500
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-function-name-table.cc
@@ -0,0 +1,71 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-function-name-table.h"
+
+#include "src/wasm/wasm-module.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// Build an array with all function names. If there are N functions in the
+// module, then the first (kIntSize * (N+1)) bytes are integer entries.
+// The first integer entry encodes the number of functions in the module.
+// The entries 1 to N contain offsets into the second part of this array.
+// If a function is unnamed (not to be confused with an empty name), then the
+// integer entry is the negative offset of the next function name.
+// After these N+1 integer entries, the second part begins, which holds a
+// concatenation of all function names.
+Handle<ByteArray> BuildFunctionNamesTable(Isolate* isolate,
+ const WasmModule* module) {
+ uint64_t func_names_length = 0;
+ for (auto& func : module->functions) func_names_length += func.name_length;
+ int num_funcs_int = static_cast<int>(module->functions.size());
+ int current_offset = (num_funcs_int + 1) * kIntSize;
+ uint64_t total_array_length = current_offset + func_names_length;
+ int total_array_length_int = static_cast<int>(total_array_length);
+ // Check for overflow.
+ CHECK(total_array_length_int == total_array_length && num_funcs_int >= 0 &&
+ num_funcs_int == module->functions.size());
+ Handle<ByteArray> func_names_array =
+ isolate->factory()->NewByteArray(total_array_length_int, TENURED);
+ func_names_array->set_int(0, num_funcs_int);
+ int func_index = 0;
+ for (const WasmFunction& fun : module->functions) {
+ WasmName name = module->GetNameOrNull(&fun);
+ if (name.start() == nullptr) {
+ func_names_array->set_int(func_index + 1, -current_offset);
+ } else {
+ func_names_array->copy_in(current_offset,
+ reinterpret_cast<const byte*>(name.start()),
+ name.length());
+ func_names_array->set_int(func_index + 1, current_offset);
+ current_offset += name.length();
+ }
+ ++func_index;
+ }
+ return func_names_array;
+}
+
+MaybeHandle<String> GetWasmFunctionNameFromTable(
+ Handle<ByteArray> func_names_array, uint32_t func_index) {
+ uint32_t num_funcs = static_cast<uint32_t>(func_names_array->get_int(0));
+ DCHECK(static_cast<int>(num_funcs) >= 0);
+ Factory* factory = func_names_array->GetIsolate()->factory();
+ DCHECK(func_index < num_funcs);
+ int offset = func_names_array->get_int(func_index + 1);
+ if (offset < 0) return {};
+ int next_offset = func_index == num_funcs - 1
+ ? func_names_array->length()
+ : abs(func_names_array->get_int(func_index + 2));
+ ScopedVector<byte> buffer(next_offset - offset);
+ func_names_array->copy_out(offset, buffer.start(), next_offset - offset);
+ if (!unibrow::Utf8::Validate(buffer.start(), buffer.length())) return {};
+ return factory->NewStringFromUtf8(Vector<const char>::cast(buffer));
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-function-name-table.h b/deps/v8/src/wasm/wasm-function-name-table.h
new file mode 100644
index 0000000000..ffee782413
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-function-name-table.h
@@ -0,0 +1,33 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_FUNCTION_NAME_TABLE_H_
+#define V8_WASM_FUNCTION_NAME_TABLE_H_
+
+#include "src/handles.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// Forward declarations for some WASM data structures.
+struct WasmModule;
+
+// Encode all function names of the WasmModule into one ByteArray.
+Handle<ByteArray> BuildFunctionNamesTable(Isolate* isolate,
+ const WasmModule* module);
+
+// Extract the function name for the given func_index from the function name
+// table.
+// Returns a null handle if the respective function is unnamed (not to be
+// confused with empty names) or the function name is not a valid UTF-8 string.
+MaybeHandle<String> GetWasmFunctionNameFromTable(
+ Handle<ByteArray> wasm_names_table, uint32_t func_index);
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_FUNCTION_NAME_TABLE_H_
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
new file mode 100644
index 0000000000..7e3127dd53
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -0,0 +1,1813 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-interpreter.h"
+
+#include "src/utils.h"
+#include "src/wasm/ast-decoder.h"
+#include "src/wasm/decoder.h"
+#include "src/wasm/wasm-external-refs.h"
+#include "src/wasm/wasm-module.h"
+
+#include "src/base/accounting-allocator.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+#if DEBUG
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_wasm_interpreter) PrintF(__VA_ARGS__); \
+ } while (false)
+#else
+#define TRACE(...)
+#endif
+
+#define FOREACH_INTERNAL_OPCODE(V) V(Breakpoint, 0xFF)
+
+#define FOREACH_SIMPLE_BINOP(V) \
+ V(I32Add, uint32_t, +) \
+ V(I32Sub, uint32_t, -) \
+ V(I32Mul, uint32_t, *) \
+ V(I32And, uint32_t, &) \
+ V(I32Ior, uint32_t, |) \
+ V(I32Xor, uint32_t, ^) \
+ V(I32Eq, uint32_t, ==) \
+ V(I32Ne, uint32_t, !=) \
+ V(I32LtU, uint32_t, <) \
+ V(I32LeU, uint32_t, <=) \
+ V(I32GtU, uint32_t, >) \
+ V(I32GeU, uint32_t, >=) \
+ V(I32LtS, int32_t, <) \
+ V(I32LeS, int32_t, <=) \
+ V(I32GtS, int32_t, >) \
+ V(I32GeS, int32_t, >=) \
+ V(I64Add, uint64_t, +) \
+ V(I64Sub, uint64_t, -) \
+ V(I64Mul, uint64_t, *) \
+ V(I64And, uint64_t, &) \
+ V(I64Ior, uint64_t, |) \
+ V(I64Xor, uint64_t, ^) \
+ V(I64Eq, uint64_t, ==) \
+ V(I64Ne, uint64_t, !=) \
+ V(I64LtU, uint64_t, <) \
+ V(I64LeU, uint64_t, <=) \
+ V(I64GtU, uint64_t, >) \
+ V(I64GeU, uint64_t, >=) \
+ V(I64LtS, int64_t, <) \
+ V(I64LeS, int64_t, <=) \
+ V(I64GtS, int64_t, >) \
+ V(I64GeS, int64_t, >=) \
+ V(F32Add, float, +) \
+ V(F32Mul, float, *) \
+ V(F32Div, float, /) \
+ V(F32Eq, float, ==) \
+ V(F32Ne, float, !=) \
+ V(F32Lt, float, <) \
+ V(F32Le, float, <=) \
+ V(F32Gt, float, >) \
+ V(F32Ge, float, >=) \
+ V(F64Add, double, +) \
+ V(F64Mul, double, *) \
+ V(F64Div, double, /) \
+ V(F64Eq, double, ==) \
+ V(F64Ne, double, !=) \
+ V(F64Lt, double, <) \
+ V(F64Le, double, <=) \
+ V(F64Gt, double, >) \
+ V(F64Ge, double, >=)
+
+#define FOREACH_OTHER_BINOP(V) \
+ V(I32DivS, int32_t) \
+ V(I32DivU, uint32_t) \
+ V(I32RemS, int32_t) \
+ V(I32RemU, uint32_t) \
+ V(I32Shl, uint32_t) \
+ V(I32ShrU, uint32_t) \
+ V(I32ShrS, int32_t) \
+ V(I64DivS, int64_t) \
+ V(I64DivU, uint64_t) \
+ V(I64RemS, int64_t) \
+ V(I64RemU, uint64_t) \
+ V(I64Shl, uint64_t) \
+ V(I64ShrU, uint64_t) \
+ V(I64ShrS, int64_t) \
+ V(I32Ror, int32_t) \
+ V(I32Rol, int32_t) \
+ V(I64Ror, int64_t) \
+ V(I64Rol, int64_t) \
+ V(F32Sub, float) \
+ V(F32Min, float) \
+ V(F32Max, float) \
+ V(F32CopySign, float) \
+ V(F64Min, double) \
+ V(F64Max, double) \
+ V(F64Sub, double) \
+ V(F64CopySign, double) \
+ V(I32AsmjsDivS, int32_t) \
+ V(I32AsmjsDivU, uint32_t) \
+ V(I32AsmjsRemS, int32_t) \
+ V(I32AsmjsRemU, uint32_t)
+
+#define FOREACH_OTHER_UNOP(V) \
+ V(I32Clz, uint32_t) \
+ V(I32Ctz, uint32_t) \
+ V(I32Popcnt, uint32_t) \
+ V(I32Eqz, uint32_t) \
+ V(I64Clz, uint64_t) \
+ V(I64Ctz, uint64_t) \
+ V(I64Popcnt, uint64_t) \
+ V(I64Eqz, uint64_t) \
+ V(F32Abs, float) \
+ V(F32Neg, float) \
+ V(F32Ceil, float) \
+ V(F32Floor, float) \
+ V(F32Trunc, float) \
+ V(F32NearestInt, float) \
+ V(F32Sqrt, float) \
+ V(F64Abs, double) \
+ V(F64Neg, double) \
+ V(F64Ceil, double) \
+ V(F64Floor, double) \
+ V(F64Trunc, double) \
+ V(F64NearestInt, double) \
+ V(F64Sqrt, double) \
+ V(I32SConvertF32, float) \
+ V(I32SConvertF64, double) \
+ V(I32UConvertF32, float) \
+ V(I32UConvertF64, double) \
+ V(I32ConvertI64, int64_t) \
+ V(I64SConvertF32, float) \
+ V(I64SConvertF64, double) \
+ V(I64UConvertF32, float) \
+ V(I64UConvertF64, double) \
+ V(I64SConvertI32, int32_t) \
+ V(I64UConvertI32, uint32_t) \
+ V(F32SConvertI32, int32_t) \
+ V(F32UConvertI32, uint32_t) \
+ V(F32SConvertI64, int64_t) \
+ V(F32UConvertI64, uint64_t) \
+ V(F32ConvertF64, double) \
+ V(F32ReinterpretI32, int32_t) \
+ V(F64SConvertI32, int32_t) \
+ V(F64UConvertI32, uint32_t) \
+ V(F64SConvertI64, int64_t) \
+ V(F64UConvertI64, uint64_t) \
+ V(F64ConvertF32, float) \
+ V(F64ReinterpretI64, int64_t) \
+ V(I32ReinterpretF32, float) \
+ V(I64ReinterpretF64, double) \
+ V(I32AsmjsSConvertF32, float) \
+ V(I32AsmjsUConvertF32, float) \
+ V(I32AsmjsSConvertF64, double) \
+ V(I32AsmjsUConvertF64, double)
+
+static inline int32_t ExecuteI32DivS(int32_t a, int32_t b, TrapReason* trap) {
+ if (b == 0) {
+ *trap = kTrapDivByZero;
+ return 0;
+ }
+ if (b == -1 && a == std::numeric_limits<int32_t>::min()) {
+ *trap = kTrapDivUnrepresentable;
+ return 0;
+ }
+ return a / b;
+}
+
+static inline uint32_t ExecuteI32DivU(uint32_t a, uint32_t b,
+ TrapReason* trap) {
+ if (b == 0) {
+ *trap = kTrapDivByZero;
+ return 0;
+ }
+ return a / b;
+}
+
+static inline int32_t ExecuteI32RemS(int32_t a, int32_t b, TrapReason* trap) {
+ if (b == 0) {
+ *trap = kTrapRemByZero;
+ return 0;
+ }
+ if (b == -1) return 0;
+ return a % b;
+}
+
+static inline uint32_t ExecuteI32RemU(uint32_t a, uint32_t b,
+ TrapReason* trap) {
+ if (b == 0) {
+ *trap = kTrapRemByZero;
+ return 0;
+ }
+ return a % b;
+}
+
+static inline uint32_t ExecuteI32Shl(uint32_t a, uint32_t b, TrapReason* trap) {
+ return a << (b & 0x1f);
+}
+
+static inline uint32_t ExecuteI32ShrU(uint32_t a, uint32_t b,
+ TrapReason* trap) {
+ return a >> (b & 0x1f);
+}
+
+static inline int32_t ExecuteI32ShrS(int32_t a, int32_t b, TrapReason* trap) {
+ return a >> (b & 0x1f);
+}
+
+static inline int64_t ExecuteI64DivS(int64_t a, int64_t b, TrapReason* trap) {
+ if (b == 0) {
+ *trap = kTrapDivByZero;
+ return 0;
+ }
+ if (b == -1 && a == std::numeric_limits<int64_t>::min()) {
+ *trap = kTrapDivUnrepresentable;
+ return 0;
+ }
+ return a / b;
+}
+
+static inline uint64_t ExecuteI64DivU(uint64_t a, uint64_t b,
+ TrapReason* trap) {
+ if (b == 0) {
+ *trap = kTrapDivByZero;
+ return 0;
+ }
+ return a / b;
+}
+
+static inline int64_t ExecuteI64RemS(int64_t a, int64_t b, TrapReason* trap) {
+ if (b == 0) {
+ *trap = kTrapRemByZero;
+ return 0;
+ }
+ if (b == -1) return 0;
+ return a % b;
+}
+
+static inline uint64_t ExecuteI64RemU(uint64_t a, uint64_t b,
+ TrapReason* trap) {
+ if (b == 0) {
+ *trap = kTrapRemByZero;
+ return 0;
+ }
+ return a % b;
+}
+
+static inline uint64_t ExecuteI64Shl(uint64_t a, uint64_t b, TrapReason* trap) {
+ return a << (b & 0x3f);
+}
+
+static inline uint64_t ExecuteI64ShrU(uint64_t a, uint64_t b,
+ TrapReason* trap) {
+ return a >> (b & 0x3f);
+}
+
+static inline int64_t ExecuteI64ShrS(int64_t a, int64_t b, TrapReason* trap) {
+ return a >> (b & 0x3f);
+}
+
+static inline uint32_t ExecuteI32Ror(uint32_t a, uint32_t b, TrapReason* trap) {
+ uint32_t shift = (b & 0x1f);
+ return (a >> shift) | (a << (32 - shift));
+}
+
+static inline uint32_t ExecuteI32Rol(uint32_t a, uint32_t b, TrapReason* trap) {
+ uint32_t shift = (b & 0x1f);
+ return (a << shift) | (a >> (32 - shift));
+}
+
+static inline uint64_t ExecuteI64Ror(uint64_t a, uint64_t b, TrapReason* trap) {
+ uint32_t shift = (b & 0x3f);
+ return (a >> shift) | (a << (64 - shift));
+}
+
+static inline uint64_t ExecuteI64Rol(uint64_t a, uint64_t b, TrapReason* trap) {
+ uint32_t shift = (b & 0x3f);
+ return (a << shift) | (a >> (64 - shift));
+}
+
+static float quiet(float a) {
+ static const uint32_t kSignalingBit = 1 << 22;
+ uint32_t q = bit_cast<uint32_t>(std::numeric_limits<float>::quiet_NaN());
+ if ((q & kSignalingBit) != 0) {
+ // On some machines, the signaling bit set indicates it's a quiet NaN.
+ return bit_cast<float>(bit_cast<uint32_t>(a) | kSignalingBit);
+ } else {
+ // On others, the signaling bit set indicates it's a signaling NaN.
+ return bit_cast<float>(bit_cast<uint32_t>(a) & ~kSignalingBit);
+ }
+}
+
+static double quiet(double a) {
+ static const uint64_t kSignalingBit = 1ULL << 51;
+ uint64_t q = bit_cast<uint64_t>(std::numeric_limits<double>::quiet_NaN());
+ if ((q & kSignalingBit) != 0) {
+ // On some machines, the signaling bit set indicates it's a quiet NaN.
+ return bit_cast<double>(bit_cast<uint64_t>(a) | kSignalingBit);
+ } else {
+ // On others, the signaling bit set indicates it's a signaling NaN.
+ return bit_cast<double>(bit_cast<uint64_t>(a) & ~kSignalingBit);
+ }
+}
+
+static inline float ExecuteF32Sub(float a, float b, TrapReason* trap) {
+ float result = a - b;
+ // Some architectures (e.g. MIPS) need extra checking to preserve the payload
+ // of a NaN operand.
+ if (result - result != 0) {
+ if (std::isnan(a)) return quiet(a);
+ if (std::isnan(b)) return quiet(b);
+ }
+ return result;
+}
+
+static inline float ExecuteF32Min(float a, float b, TrapReason* trap) {
+ return JSMin(a, b);
+}
+
+static inline float ExecuteF32Max(float a, float b, TrapReason* trap) {
+ return JSMax(a, b);
+}
+
+static inline float ExecuteF32CopySign(float a, float b, TrapReason* trap) {
+ return copysignf(a, b);
+}
+
+static inline double ExecuteF64Sub(double a, double b, TrapReason* trap) {
+ double result = a - b;
+ // Some architectures (e.g. MIPS) need extra checking to preserve the payload
+ // of a NaN operand.
+ if (result - result != 0) {
+ if (std::isnan(a)) return quiet(a);
+ if (std::isnan(b)) return quiet(b);
+ }
+ return result;
+}
+
+static inline double ExecuteF64Min(double a, double b, TrapReason* trap) {
+ return JSMin(a, b);
+}
+
+static inline double ExecuteF64Max(double a, double b, TrapReason* trap) {
+ return JSMax(a, b);
+}
+
+static inline double ExecuteF64CopySign(double a, double b, TrapReason* trap) {
+ return copysign(a, b);
+}
+
+static inline int32_t ExecuteI32AsmjsDivS(int32_t a, int32_t b,
+ TrapReason* trap) {
+ if (b == 0) return 0;
+ if (b == -1 && a == std::numeric_limits<int32_t>::min()) {
+ return std::numeric_limits<int32_t>::min();
+ }
+ return a / b;
+}
+
+static inline uint32_t ExecuteI32AsmjsDivU(uint32_t a, uint32_t b,
+ TrapReason* trap) {
+ if (b == 0) return 0;
+ return a / b;
+}
+
+static inline int32_t ExecuteI32AsmjsRemS(int32_t a, int32_t b,
+ TrapReason* trap) {
+ if (b == 0) return 0;
+ if (b == -1) return 0;
+ return a % b;
+}
+
+static inline uint32_t ExecuteI32AsmjsRemU(uint32_t a, uint32_t b,
+ TrapReason* trap) {
+ if (b == 0) return 0;
+ return a % b;
+}
+
+static inline int32_t ExecuteI32AsmjsSConvertF32(float a, TrapReason* trap) {
+ return DoubleToInt32(a);
+}
+
+static inline uint32_t ExecuteI32AsmjsUConvertF32(float a, TrapReason* trap) {
+ return DoubleToUint32(a);
+}
+
+static inline int32_t ExecuteI32AsmjsSConvertF64(double a, TrapReason* trap) {
+ return DoubleToInt32(a);
+}
+
+static inline uint32_t ExecuteI32AsmjsUConvertF64(double a, TrapReason* trap) {
+ return DoubleToUint32(a);
+}
+
+static int32_t ExecuteI32Clz(uint32_t val, TrapReason* trap) {
+ return base::bits::CountLeadingZeros32(val);
+}
+
+static uint32_t ExecuteI32Ctz(uint32_t val, TrapReason* trap) {
+ return base::bits::CountTrailingZeros32(val);
+}
+
+static uint32_t ExecuteI32Popcnt(uint32_t val, TrapReason* trap) {
+ return word32_popcnt_wrapper(&val);
+}
+
+static inline uint32_t ExecuteI32Eqz(uint32_t val, TrapReason* trap) {
+ return val == 0 ? 1 : 0;
+}
+
+static int64_t ExecuteI64Clz(uint64_t val, TrapReason* trap) {
+ return base::bits::CountLeadingZeros64(val);
+}
+
+static inline uint64_t ExecuteI64Ctz(uint64_t val, TrapReason* trap) {
+ return base::bits::CountTrailingZeros64(val);
+}
+
+static inline int64_t ExecuteI64Popcnt(uint64_t val, TrapReason* trap) {
+ return word64_popcnt_wrapper(&val);
+}
+
+static inline int32_t ExecuteI64Eqz(uint64_t val, TrapReason* trap) {
+ return val == 0 ? 1 : 0;
+}
+
+static inline float ExecuteF32Abs(float a, TrapReason* trap) {
+ return bit_cast<float>(bit_cast<uint32_t>(a) & 0x7fffffff);
+}
+
+static inline float ExecuteF32Neg(float a, TrapReason* trap) {
+ return bit_cast<float>(bit_cast<uint32_t>(a) ^ 0x80000000);
+}
+
+static inline float ExecuteF32Ceil(float a, TrapReason* trap) {
+ return ceilf(a);
+}
+
+static inline float ExecuteF32Floor(float a, TrapReason* trap) {
+ return floorf(a);
+}
+
+static inline float ExecuteF32Trunc(float a, TrapReason* trap) {
+ return truncf(a);
+}
+
+static inline float ExecuteF32NearestInt(float a, TrapReason* trap) {
+ return nearbyintf(a);
+}
+
+static inline float ExecuteF32Sqrt(float a, TrapReason* trap) {
+ return sqrtf(a);
+}
+
+static inline double ExecuteF64Abs(double a, TrapReason* trap) {
+ return bit_cast<double>(bit_cast<uint64_t>(a) & 0x7fffffffffffffff);
+}
+
+static inline double ExecuteF64Neg(double a, TrapReason* trap) {
+ return bit_cast<double>(bit_cast<uint64_t>(a) ^ 0x8000000000000000);
+}
+
+static inline double ExecuteF64Ceil(double a, TrapReason* trap) {
+ return ceil(a);
+}
+
+static inline double ExecuteF64Floor(double a, TrapReason* trap) {
+ return floor(a);
+}
+
+static inline double ExecuteF64Trunc(double a, TrapReason* trap) {
+ return trunc(a);
+}
+
+static inline double ExecuteF64NearestInt(double a, TrapReason* trap) {
+ return nearbyint(a);
+}
+
+static inline double ExecuteF64Sqrt(double a, TrapReason* trap) {
+ return sqrt(a);
+}
+
+static int32_t ExecuteI32SConvertF32(float a, TrapReason* trap) {
+ // The upper bound is (INT32_MAX + 1), which is the lowest float-representable
+ // number above INT32_MAX which cannot be represented as int32.
+ float upper_bound = 2147483648.0f;
+ // We use INT32_MIN as a lower bound because (INT32_MIN - 1) is not
+ // representable as float, and no number between (INT32_MIN - 1) and INT32_MIN
+ // is.
+ float lower_bound = static_cast<float>(INT32_MIN);
+ if (a < upper_bound && a >= lower_bound) {
+ return static_cast<int32_t>(a);
+ }
+ *trap = kTrapFloatUnrepresentable;
+ return 0;
+}
+
+static int32_t ExecuteI32SConvertF64(double a, TrapReason* trap) {
+ // The upper bound is (INT32_MAX + 1), which is the lowest double-
+ // representable number above INT32_MAX which cannot be represented as int32.
+ double upper_bound = 2147483648.0;
+ // The lower bound is (INT32_MIN - 1), which is the greatest double-
+ // representable number below INT32_MIN which cannot be represented as int32.
+ double lower_bound = -2147483649.0;
+ if (a < upper_bound && a > lower_bound) {
+ return static_cast<int32_t>(a);
+ }
+ *trap = kTrapFloatUnrepresentable;
+ return 0;
+}
+
+static uint32_t ExecuteI32UConvertF32(float a, TrapReason* trap) {
+ // The upper bound is (UINT32_MAX + 1), which is the lowest
+ // float-representable number above UINT32_MAX which cannot be represented as
+ // uint32.
+ double upper_bound = 4294967296.0f;
+ double lower_bound = -1.0f;
+ if (a < upper_bound && a > lower_bound) {
+ return static_cast<uint32_t>(a);
+ }
+ *trap = kTrapFloatUnrepresentable;
+ return 0;
+}
+
+static uint32_t ExecuteI32UConvertF64(double a, TrapReason* trap) {
+ // The upper bound is (UINT32_MAX + 1), which is the lowest
+ // double-representable number above UINT32_MAX which cannot be represented as
+ // uint32.
+ double upper_bound = 4294967296.0;
+ double lower_bound = -1.0;
+ if (a < upper_bound && a > lower_bound) {
+ return static_cast<uint32_t>(a);
+ }
+ *trap = kTrapFloatUnrepresentable;
+ return 0;
+}
+
+static inline uint32_t ExecuteI32ConvertI64(int64_t a, TrapReason* trap) {
+ return static_cast<uint32_t>(a & 0xFFFFFFFF);
+}
+
+static int64_t ExecuteI64SConvertF32(float a, TrapReason* trap) {
+ int64_t output;
+ if (!float32_to_int64_wrapper(&a, &output)) {
+ *trap = kTrapFloatUnrepresentable;
+ }
+ return output;
+}
+
+static int64_t ExecuteI64SConvertF64(double a, TrapReason* trap) {
+ int64_t output;
+ if (!float64_to_int64_wrapper(&a, &output)) {
+ *trap = kTrapFloatUnrepresentable;
+ }
+ return output;
+}
+
+static uint64_t ExecuteI64UConvertF32(float a, TrapReason* trap) {
+ uint64_t output;
+ if (!float32_to_uint64_wrapper(&a, &output)) {
+ *trap = kTrapFloatUnrepresentable;
+ }
+ return output;
+}
+
+static uint64_t ExecuteI64UConvertF64(double a, TrapReason* trap) {
+ uint64_t output;
+ if (!float64_to_uint64_wrapper(&a, &output)) {
+ *trap = kTrapFloatUnrepresentable;
+ }
+ return output;
+}
+
+static inline int64_t ExecuteI64SConvertI32(int32_t a, TrapReason* trap) {
+ return static_cast<int64_t>(a);
+}
+
+static inline int64_t ExecuteI64UConvertI32(uint32_t a, TrapReason* trap) {
+ return static_cast<uint64_t>(a);
+}
+
+static inline float ExecuteF32SConvertI32(int32_t a, TrapReason* trap) {
+ return static_cast<float>(a);
+}
+
+static inline float ExecuteF32UConvertI32(uint32_t a, TrapReason* trap) {
+ return static_cast<float>(a);
+}
+
+static inline float ExecuteF32SConvertI64(int64_t a, TrapReason* trap) {
+ float output;
+ int64_to_float32_wrapper(&a, &output);
+ return output;
+}
+
+static inline float ExecuteF32UConvertI64(uint64_t a, TrapReason* trap) {
+ float output;
+ uint64_to_float32_wrapper(&a, &output);
+ return output;
+}
+
+static inline float ExecuteF32ConvertF64(double a, TrapReason* trap) {
+ return static_cast<float>(a);
+}
+
+static inline float ExecuteF32ReinterpretI32(int32_t a, TrapReason* trap) {
+ return bit_cast<float>(a);
+}
+
+static inline double ExecuteF64SConvertI32(int32_t a, TrapReason* trap) {
+ return static_cast<double>(a);
+}
+
+static inline double ExecuteF64UConvertI32(uint32_t a, TrapReason* trap) {
+ return static_cast<double>(a);
+}
+
+static inline double ExecuteF64SConvertI64(int64_t a, TrapReason* trap) {
+ double output;
+ int64_to_float64_wrapper(&a, &output);
+ return output;
+}
+
+static inline double ExecuteF64UConvertI64(uint64_t a, TrapReason* trap) {
+ double output;
+ uint64_to_float64_wrapper(&a, &output);
+ return output;
+}
+
+static inline double ExecuteF64ConvertF32(float a, TrapReason* trap) {
+ return static_cast<double>(a);
+}
+
+static inline double ExecuteF64ReinterpretI64(int64_t a, TrapReason* trap) {
+ return bit_cast<double>(a);
+}
+
+static inline int32_t ExecuteI32ReinterpretF32(float a, TrapReason* trap) {
+ return bit_cast<int32_t>(a);
+}
+
+static inline int64_t ExecuteI64ReinterpretF64(double a, TrapReason* trap) {
+ return bit_cast<int64_t>(a);
+}
+
+enum InternalOpcode {
+#define DECL_INTERNAL_ENUM(name, value) kInternal##name = value,
+ FOREACH_INTERNAL_OPCODE(DECL_INTERNAL_ENUM)
+#undef DECL_INTERNAL_ENUM
+};
+
+static const char* OpcodeName(uint32_t val) {
+ switch (val) {
+#define DECL_INTERNAL_CASE(name, value) \
+ case kInternal##name: \
+ return "Internal" #name;
+ FOREACH_INTERNAL_OPCODE(DECL_INTERNAL_CASE)
+#undef DECL_INTERNAL_CASE
+ }
+ return WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(val));
+}
+
+static const int kRunSteps = 1000;
+
+// A helper class to compute the control transfers for each bytecode offset.
+// Control transfers allow Br, BrIf, BrTable, If, Else, and End bytecodes to
+// be directly executed without the need to dynamically track blocks.
+class ControlTransfers : public ZoneObject {
+ public:
+ ControlTransferMap map_;
+
+ ControlTransfers(Zone* zone, size_t locals_encoded_size, const byte* start,
+ const byte* end)
+ : map_(zone) {
+ // A control reference including from PC, from value depth, and whether
+ // a value is explicitly passed (e.g. br/br_if/br_table with value).
+ struct CRef {
+ const byte* pc;
+ sp_t value_depth;
+ bool explicit_value;
+ };
+
+ // Represents a control flow label.
+ struct CLabel : public ZoneObject {
+ const byte* target;
+ size_t value_depth;
+ ZoneVector<CRef> refs;
+
+ CLabel(Zone* zone, size_t v)
+ : target(nullptr), value_depth(v), refs(zone) {}
+
+ // Bind this label to the given PC.
+ void Bind(ControlTransferMap* map, const byte* start, const byte* pc,
+ bool expect_value) {
+ DCHECK_NULL(target);
+ target = pc;
+ for (auto from : refs) {
+ auto pcdiff = static_cast<pcdiff_t>(target - from.pc);
+ auto spdiff = static_cast<spdiff_t>(from.value_depth - value_depth);
+ ControlTransfer::StackAction action = ControlTransfer::kNoAction;
+ if (expect_value && !from.explicit_value) {
+ action = spdiff == 0 ? ControlTransfer::kPushVoid
+ : ControlTransfer::kPopAndRepush;
+ }
+ pc_t offset = static_cast<size_t>(from.pc - start);
+ (*map)[offset] = {pcdiff, spdiff, action};
+ }
+ }
+
+ // Reference this label from the given location.
+ void Ref(ControlTransferMap* map, const byte* start, CRef from) {
+ DCHECK_GE(from.value_depth, value_depth);
+ if (target) {
+ auto pcdiff = static_cast<pcdiff_t>(target - from.pc);
+ auto spdiff = static_cast<spdiff_t>(from.value_depth - value_depth);
+ pc_t offset = static_cast<size_t>(from.pc - start);
+ (*map)[offset] = {pcdiff, spdiff, ControlTransfer::kNoAction};
+ } else {
+ refs.push_back(from);
+ }
+ }
+ };
+
+ // An entry in the control stack.
+ struct Control {
+ const byte* pc;
+ CLabel* end_label;
+ CLabel* else_label;
+
+ void Ref(ControlTransferMap* map, const byte* start, const byte* from_pc,
+ size_t from_value_depth, bool explicit_value) {
+ end_label->Ref(map, start, {from_pc, from_value_depth, explicit_value});
+ }
+ };
+
+ // Compute the ControlTransfer map.
+ // This works by maintaining a stack of control constructs similar to the
+ // AST decoder. The {control_stack} allows matching {br,br_if,br_table}
+ // bytecodes with their target, as well as determining whether the current
+ // bytecodes are within the true or false block of an else.
+ // The value stack depth is tracked as {value_depth} and is needed to
+ // determine how many values to pop off the stack for explicit and
+ // implicit control flow.
+
+ std::vector<Control> control_stack;
+ size_t value_depth = 0;
+ for (BytecodeIterator i(start + locals_encoded_size, end); i.has_next();
+ i.next()) {
+ WasmOpcode opcode = i.current();
+ TRACE("@%u: control %s (depth = %zu)\n", i.pc_offset(),
+ WasmOpcodes::OpcodeName(opcode), value_depth);
+ switch (opcode) {
+ case kExprBlock: {
+ TRACE("control @%u $%zu: Block\n", i.pc_offset(), value_depth);
+ CLabel* label = new (zone) CLabel(zone, value_depth);
+ control_stack.push_back({i.pc(), label, nullptr});
+ break;
+ }
+ case kExprLoop: {
+ TRACE("control @%u $%zu: Loop\n", i.pc_offset(), value_depth);
+ CLabel* label1 = new (zone) CLabel(zone, value_depth);
+ CLabel* label2 = new (zone) CLabel(zone, value_depth);
+ control_stack.push_back({i.pc(), label1, nullptr});
+ control_stack.push_back({i.pc(), label2, nullptr});
+ label2->Bind(&map_, start, i.pc(), false);
+ break;
+ }
+ case kExprIf: {
+ TRACE("control @%u $%zu: If\n", i.pc_offset(), value_depth);
+ value_depth--;
+ CLabel* end_label = new (zone) CLabel(zone, value_depth);
+ CLabel* else_label = new (zone) CLabel(zone, value_depth);
+ control_stack.push_back({i.pc(), end_label, else_label});
+ else_label->Ref(&map_, start, {i.pc(), value_depth, false});
+ break;
+ }
+ case kExprElse: {
+ Control* c = &control_stack.back();
+ TRACE("control @%u $%zu: Else\n", i.pc_offset(), value_depth);
+ c->end_label->Ref(&map_, start, {i.pc(), value_depth, false});
+ value_depth = c->end_label->value_depth;
+ DCHECK_NOT_NULL(c->else_label);
+ c->else_label->Bind(&map_, start, i.pc() + 1, false);
+ c->else_label = nullptr;
+ break;
+ }
+ case kExprEnd: {
+ Control* c = &control_stack.back();
+ TRACE("control @%u $%zu: End\n", i.pc_offset(), value_depth);
+ if (c->end_label->target) {
+ // only loops have bound labels.
+ DCHECK_EQ(kExprLoop, *c->pc);
+ control_stack.pop_back();
+ c = &control_stack.back();
+ }
+ if (c->else_label)
+ c->else_label->Bind(&map_, start, i.pc() + 1, true);
+ c->end_label->Ref(&map_, start, {i.pc(), value_depth, false});
+ c->end_label->Bind(&map_, start, i.pc() + 1, true);
+ value_depth = c->end_label->value_depth + 1;
+ control_stack.pop_back();
+ break;
+ }
+ case kExprBr: {
+ BreakDepthOperand operand(&i, i.pc());
+ TRACE("control @%u $%zu: Br[arity=%u, depth=%u]\n", i.pc_offset(),
+ value_depth, operand.arity, operand.depth);
+ value_depth -= operand.arity;
+ control_stack[control_stack.size() - operand.depth - 1].Ref(
+ &map_, start, i.pc(), value_depth, operand.arity > 0);
+ value_depth++;
+ break;
+ }
+ case kExprBrIf: {
+ BreakDepthOperand operand(&i, i.pc());
+ TRACE("control @%u $%zu: BrIf[arity=%u, depth=%u]\n", i.pc_offset(),
+ value_depth, operand.arity, operand.depth);
+ value_depth -= (operand.arity + 1);
+ control_stack[control_stack.size() - operand.depth - 1].Ref(
+ &map_, start, i.pc(), value_depth, operand.arity > 0);
+ value_depth++;
+ break;
+ }
+ case kExprBrTable: {
+ BranchTableOperand operand(&i, i.pc());
+ TRACE("control @%u $%zu: BrTable[arity=%u count=%u]\n", i.pc_offset(),
+ value_depth, operand.arity, operand.table_count);
+ value_depth -= (operand.arity + 1);
+ for (uint32_t j = 0; j < operand.table_count + 1; ++j) {
+ uint32_t target = operand.read_entry(&i, j);
+ control_stack[control_stack.size() - target - 1].Ref(
+ &map_, start, i.pc() + j, value_depth, operand.arity > 0);
+ }
+ value_depth++;
+ break;
+ }
+ default: {
+ value_depth = value_depth - OpcodeArity(i.pc(), end) + 1;
+ break;
+ }
+ }
+ }
+ }
+
+ ControlTransfer Lookup(pc_t from) {
+ auto result = map_.find(from);
+ if (result == map_.end()) {
+ V8_Fatal(__FILE__, __LINE__, "no control target for pc %zu", from);
+ }
+ return result->second;
+ }
+};
+
+// Code and metadata needed to execute a function.
+struct InterpreterCode {
+ const WasmFunction* function; // wasm function
+ AstLocalDecls locals; // local declarations
+ const byte* orig_start; // start of original code
+ const byte* orig_end; // end of original code
+ byte* start; // start of (maybe altered) code
+ byte* end; // end of (maybe altered) code
+ ControlTransfers* targets; // helper for control flow.
+
+ const byte* at(pc_t pc) { return start + pc; }
+};
+
+// The main storage for interpreter code. It maps {WasmFunction} to the
+// metadata needed to execute each function.
+class CodeMap {
+ public:
+ Zone* zone_;
+ const WasmModule* module_;
+ ZoneVector<InterpreterCode> interpreter_code_;
+
+ CodeMap(const WasmModule* module, Zone* zone)
+ : zone_(zone), module_(module), interpreter_code_(zone) {
+ if (module == nullptr) return;
+ for (size_t i = 0; i < module->functions.size(); ++i) {
+ const WasmFunction* function = &module->functions[i];
+ const byte* code_start =
+ module->module_start + function->code_start_offset;
+ const byte* code_end = module->module_start + function->code_end_offset;
+ AddFunction(function, code_start, code_end);
+ }
+ }
+
+ InterpreterCode* FindCode(const WasmFunction* function) {
+ if (function->func_index < interpreter_code_.size()) {
+ InterpreterCode* code = &interpreter_code_[function->func_index];
+ DCHECK_EQ(function, code->function);
+ return code;
+ }
+ return nullptr;
+ }
+
+ InterpreterCode* GetCode(uint32_t function_index) {
+ CHECK_LT(function_index, interpreter_code_.size());
+ return Preprocess(&interpreter_code_[function_index]);
+ }
+
+ InterpreterCode* GetIndirectCode(uint32_t table_index, uint32_t entry_index) {
+ if (table_index >= module_->function_tables.size()) return nullptr;
+ const WasmIndirectFunctionTable* table =
+ &module_->function_tables[table_index];
+ if (entry_index >= table->values.size()) return nullptr;
+ uint32_t index = table->values[entry_index];
+ if (index >= interpreter_code_.size()) return nullptr;
+ return GetCode(index);
+ }
+
+ InterpreterCode* Preprocess(InterpreterCode* code) {
+ if (code->targets == nullptr && code->start) {
+ // Compute the control targets map and the local declarations.
+ CHECK(DecodeLocalDecls(code->locals, code->start, code->end));
+ code->targets =
+ new (zone_) ControlTransfers(zone_, code->locals.decls_encoded_size,
+ code->orig_start, code->orig_end);
+ }
+ return code;
+ }
+
+ int AddFunction(const WasmFunction* function, const byte* code_start,
+ const byte* code_end) {
+ InterpreterCode code = {
+ function, AstLocalDecls(zone_), code_start,
+ code_end, const_cast<byte*>(code_start), const_cast<byte*>(code_end),
+ nullptr};
+
+ DCHECK_EQ(interpreter_code_.size(), function->func_index);
+ interpreter_code_.push_back(code);
+ return static_cast<int>(interpreter_code_.size()) - 1;
+ }
+
+ bool SetFunctionCode(const WasmFunction* function, const byte* start,
+ const byte* end) {
+ InterpreterCode* code = FindCode(function);
+ if (code == nullptr) return false;
+ code->targets = nullptr;
+ code->orig_start = start;
+ code->orig_end = end;
+ code->start = const_cast<byte*>(start);
+ code->end = const_cast<byte*>(end);
+ Preprocess(code);
+ return true;
+ }
+};
+
+// Responsible for executing code directly.
+class ThreadImpl : public WasmInterpreter::Thread {
+ public:
+ ThreadImpl(Zone* zone, CodeMap* codemap, WasmModuleInstance* instance)
+ : codemap_(codemap),
+ instance_(instance),
+ stack_(zone),
+ frames_(zone),
+ state_(WasmInterpreter::STOPPED),
+ break_pc_(kInvalidPc),
+ trap_reason_(kTrapCount) {}
+
+ virtual ~ThreadImpl() {}
+
+ //==========================================================================
+ // Implementation of public interface for WasmInterpreter::Thread.
+ //==========================================================================
+
+ virtual WasmInterpreter::State state() { return state_; }
+
+ virtual void PushFrame(const WasmFunction* function, WasmVal* args) {
+ InterpreterCode* code = codemap()->FindCode(function);
+ CHECK_NOT_NULL(code);
+ frames_.push_back({code, 0, 0, stack_.size()});
+ for (size_t i = 0; i < function->sig->parameter_count(); ++i) {
+ stack_.push_back(args[i]);
+ }
+ frames_.back().ret_pc = InitLocals(code);
+ TRACE(" => PushFrame(#%u @%zu)\n", code->function->func_index,
+ frames_.back().ret_pc);
+ }
+
+ virtual WasmInterpreter::State Run() {
+ do {
+ TRACE(" => Run()\n");
+ if (state_ == WasmInterpreter::STOPPED ||
+ state_ == WasmInterpreter::PAUSED) {
+ state_ = WasmInterpreter::RUNNING;
+ Execute(frames_.back().code, frames_.back().ret_pc, kRunSteps);
+ }
+ } while (state_ == WasmInterpreter::STOPPED);
+ return state_;
+ }
+
+ virtual WasmInterpreter::State Step() {
+ TRACE(" => Step()\n");
+ if (state_ == WasmInterpreter::STOPPED ||
+ state_ == WasmInterpreter::PAUSED) {
+ state_ = WasmInterpreter::RUNNING;
+ Execute(frames_.back().code, frames_.back().ret_pc, 1);
+ }
+ return state_;
+ }
+
+ virtual void Pause() { UNIMPLEMENTED(); }
+
+ virtual void Reset() {
+ TRACE("----- RESET -----\n");
+ stack_.clear();
+ frames_.clear();
+ state_ = WasmInterpreter::STOPPED;
+ trap_reason_ = kTrapCount;
+ }
+
+ virtual int GetFrameCount() { return static_cast<int>(frames_.size()); }
+
+ virtual const WasmFrame* GetFrame(int index) {
+ UNIMPLEMENTED();
+ return nullptr;
+ }
+
+ virtual WasmFrame* GetMutableFrame(int index) {
+ UNIMPLEMENTED();
+ return nullptr;
+ }
+
+ virtual WasmVal GetReturnValue() {
+ if (state_ == WasmInterpreter::TRAPPED) return WasmVal(0xdeadbeef);
+ CHECK_EQ(WasmInterpreter::FINISHED, state_);
+ CHECK_EQ(1, stack_.size());
+ return stack_[0];
+ }
+
+ virtual pc_t GetBreakpointPc() { return break_pc_; }
+
+ bool Terminated() {
+ return state_ == WasmInterpreter::TRAPPED ||
+ state_ == WasmInterpreter::FINISHED;
+ }
+
+ private:
+ // Entries on the stack of functions being evaluated.
+ struct Frame {
+ InterpreterCode* code;
+ pc_t call_pc;
+ pc_t ret_pc;
+ sp_t sp;
+
+ // Limit of parameters.
+ sp_t plimit() { return sp + code->function->sig->parameter_count(); }
+ // Limit of locals.
+ sp_t llimit() { return plimit() + code->locals.total_local_count; }
+ };
+
+ CodeMap* codemap_;
+ WasmModuleInstance* instance_;
+ ZoneVector<WasmVal> stack_;
+ ZoneVector<Frame> frames_;
+ WasmInterpreter::State state_;
+ pc_t break_pc_;
+ TrapReason trap_reason_;
+
+ CodeMap* codemap() { return codemap_; }
+ WasmModuleInstance* instance() { return instance_; }
+ const WasmModule* module() { return instance_->module; }
+
+ void DoTrap(TrapReason trap, pc_t pc) {
+ state_ = WasmInterpreter::TRAPPED;
+ trap_reason_ = trap;
+ CommitPc(pc);
+ }
+
+ // Push a frame with arguments already on the stack.
+ void PushFrame(InterpreterCode* code, pc_t call_pc, pc_t ret_pc) {
+ CHECK_NOT_NULL(code);
+ DCHECK(!frames_.empty());
+ frames_.back().call_pc = call_pc;
+ frames_.back().ret_pc = ret_pc;
+ size_t arity = code->function->sig->parameter_count();
+ DCHECK_GE(stack_.size(), arity);
+ // The parameters will overlap the arguments already on the stack.
+ frames_.push_back({code, 0, 0, stack_.size() - arity});
+ frames_.back().ret_pc = InitLocals(code);
+ TRACE(" => push func#%u @%zu\n", code->function->func_index,
+ frames_.back().ret_pc);
+ }
+
+ pc_t InitLocals(InterpreterCode* code) {
+ for (auto p : code->locals.local_types) {
+ WasmVal val;
+ switch (p.first) {
+ case kAstI32:
+ val = WasmVal(static_cast<int32_t>(0));
+ break;
+ case kAstI64:
+ val = WasmVal(static_cast<int64_t>(0));
+ break;
+ case kAstF32:
+ val = WasmVal(static_cast<float>(0));
+ break;
+ case kAstF64:
+ val = WasmVal(static_cast<double>(0));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ stack_.insert(stack_.end(), p.second, val);
+ }
+ return code->locals.decls_encoded_size;
+ }
+
+ void CommitPc(pc_t pc) {
+ if (!frames_.empty()) {
+ frames_.back().ret_pc = pc;
+ }
+ }
+
+ bool SkipBreakpoint(InterpreterCode* code, pc_t pc) {
+ if (pc == break_pc_) {
+ break_pc_ = kInvalidPc;
+ return true;
+ }
+ return false;
+ }
+
+ bool DoReturn(InterpreterCode** code, pc_t* pc, pc_t* limit, WasmVal val) {
+ DCHECK_GT(frames_.size(), 0u);
+ stack_.resize(frames_.back().sp);
+ frames_.pop_back();
+ if (frames_.size() == 0) {
+ // A return from the top frame terminates the execution.
+ state_ = WasmInterpreter::FINISHED;
+ stack_.clear();
+ stack_.push_back(val);
+ TRACE(" => finish\n");
+ return false;
+ } else {
+ // Return to caller frame.
+ Frame* top = &frames_.back();
+ *code = top->code;
+ *pc = top->ret_pc;
+ *limit = top->code->end - top->code->start;
+ if (top->code->start[top->call_pc] == kExprCallIndirect ||
+ (top->code->orig_start &&
+ top->code->orig_start[top->call_pc] == kExprCallIndirect)) {
+ // UGLY: An indirect call has the additional function index on the
+ // stack.
+ stack_.pop_back();
+ }
+ TRACE(" => pop func#%u @%zu\n", (*code)->function->func_index, *pc);
+
+ stack_.push_back(val);
+ return true;
+ }
+ }
+
+ void DoCall(InterpreterCode* target, pc_t* pc, pc_t ret_pc, pc_t* limit) {
+ PushFrame(target, *pc, ret_pc);
+ *pc = frames_.back().ret_pc;
+ *limit = target->end - target->start;
+ }
+
+ // Adjust the program counter {pc} and the stack contents according to the
+ // code's precomputed control transfer map. Returns the different between
+ // the new pc and the old pc.
+ int DoControlTransfer(InterpreterCode* code, pc_t pc) {
+ auto target = code->targets->Lookup(pc);
+ switch (target.action) {
+ case ControlTransfer::kNoAction:
+ TRACE(" action [sp-%u]\n", target.spdiff);
+ PopN(target.spdiff);
+ break;
+ case ControlTransfer::kPopAndRepush: {
+ WasmVal val = Pop();
+ TRACE(" action [pop x, sp-%u, push x]\n", target.spdiff - 1);
+ DCHECK_GE(target.spdiff, 1u);
+ PopN(target.spdiff - 1);
+ Push(pc, val);
+ break;
+ }
+ case ControlTransfer::kPushVoid:
+ TRACE(" action [sp-%u, push void]\n", target.spdiff);
+ PopN(target.spdiff);
+ Push(pc, WasmVal());
+ break;
+ }
+ return target.pcdiff;
+ }
+
+ void Execute(InterpreterCode* code, pc_t pc, int max) {
+ Decoder decoder(code->start, code->end);
+ pc_t limit = code->end - code->start;
+ while (true) {
+ if (max-- <= 0) {
+ // Maximum number of instructions reached.
+ state_ = WasmInterpreter::PAUSED;
+ return CommitPc(pc);
+ }
+
+ if (pc >= limit) {
+ // Fell off end of code; do an implicit return.
+ TRACE("@%-3zu: ImplicitReturn\n", pc);
+ WasmVal val = PopArity(code->function->sig->return_count());
+ if (!DoReturn(&code, &pc, &limit, val)) return;
+ decoder.Reset(code->start, code->end);
+ continue;
+ }
+
+ const char* skip = " ";
+ int len = 1;
+ byte opcode = code->start[pc];
+ byte orig = opcode;
+ if (opcode == kInternalBreakpoint) {
+ orig = code->orig_start[pc];
+ if (SkipBreakpoint(code, pc)) {
+ // skip breakpoint by switching on original code.
+ skip = "[skip] ";
+ } else {
+ state_ = WasmInterpreter::PAUSED;
+ TRACE("@%-3zu: [break] %-24s:", pc,
+ WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(orig)));
+ TraceValueStack();
+ TRACE("\n");
+ break_pc_ = pc;
+ return CommitPc(pc);
+ }
+ }
+
+ USE(skip);
+ TRACE("@%-3zu: %s%-24s:", pc, skip,
+ WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(orig)));
+ TraceValueStack();
+ TRACE("\n");
+
+ switch (orig) {
+ case kExprNop:
+ Push(pc, WasmVal());
+ break;
+ case kExprBlock:
+ case kExprLoop: {
+ // Do nothing.
+ break;
+ }
+ case kExprIf: {
+ WasmVal cond = Pop();
+ bool is_true = cond.to<uint32_t>() != 0;
+ if (is_true) {
+ // fall through to the true block.
+ TRACE(" true => fallthrough\n");
+ } else {
+ len = DoControlTransfer(code, pc);
+ TRACE(" false => @%zu\n", pc + len);
+ }
+ break;
+ }
+ case kExprElse: {
+ len = DoControlTransfer(code, pc);
+ TRACE(" end => @%zu\n", pc + len);
+ break;
+ }
+ case kExprSelect: {
+ WasmVal cond = Pop();
+ WasmVal fval = Pop();
+ WasmVal tval = Pop();
+ Push(pc, cond.to<int32_t>() != 0 ? tval : fval);
+ break;
+ }
+ case kExprBr: {
+ BreakDepthOperand operand(&decoder, code->at(pc));
+ WasmVal val = PopArity(operand.arity);
+ len = DoControlTransfer(code, pc);
+ TRACE(" br => @%zu\n", pc + len);
+ if (operand.arity > 0) Push(pc, val);
+ break;
+ }
+ case kExprBrIf: {
+ BreakDepthOperand operand(&decoder, code->at(pc));
+ WasmVal cond = Pop();
+ WasmVal val = PopArity(operand.arity);
+ bool is_true = cond.to<uint32_t>() != 0;
+ if (is_true) {
+ len = DoControlTransfer(code, pc);
+ TRACE(" br_if => @%zu\n", pc + len);
+ if (operand.arity > 0) Push(pc, val);
+ } else {
+ TRACE(" false => fallthrough\n");
+ len = 1 + operand.length;
+ Push(pc, WasmVal());
+ }
+ break;
+ }
+ case kExprBrTable: {
+ BranchTableOperand operand(&decoder, code->at(pc));
+ uint32_t key = Pop().to<uint32_t>();
+ WasmVal val = PopArity(operand.arity);
+ if (key >= operand.table_count) key = operand.table_count;
+ len = DoControlTransfer(code, pc + key) + key;
+ TRACE(" br[%u] => @%zu\n", key, pc + len);
+ if (operand.arity > 0) Push(pc, val);
+ break;
+ }
+ case kExprReturn: {
+ ReturnArityOperand operand(&decoder, code->at(pc));
+ WasmVal val = PopArity(operand.arity);
+ if (!DoReturn(&code, &pc, &limit, val)) return;
+ decoder.Reset(code->start, code->end);
+ continue;
+ }
+ case kExprUnreachable: {
+ DoTrap(kTrapUnreachable, pc);
+ return CommitPc(pc);
+ }
+ case kExprEnd: {
+ len = DoControlTransfer(code, pc);
+ DCHECK_EQ(1, len);
+ break;
+ }
+ case kExprI8Const: {
+ ImmI8Operand operand(&decoder, code->at(pc));
+ Push(pc, WasmVal(operand.value));
+ len = 1 + operand.length;
+ break;
+ }
+ case kExprI32Const: {
+ ImmI32Operand operand(&decoder, code->at(pc));
+ Push(pc, WasmVal(operand.value));
+ len = 1 + operand.length;
+ break;
+ }
+ case kExprI64Const: {
+ ImmI64Operand operand(&decoder, code->at(pc));
+ Push(pc, WasmVal(operand.value));
+ len = 1 + operand.length;
+ break;
+ }
+ case kExprF32Const: {
+ ImmF32Operand operand(&decoder, code->at(pc));
+ Push(pc, WasmVal(operand.value));
+ len = 1 + operand.length;
+ break;
+ }
+ case kExprF64Const: {
+ ImmF64Operand operand(&decoder, code->at(pc));
+ Push(pc, WasmVal(operand.value));
+ len = 1 + operand.length;
+ break;
+ }
+ case kExprGetLocal: {
+ LocalIndexOperand operand(&decoder, code->at(pc));
+ Push(pc, stack_[frames_.back().sp + operand.index]);
+ len = 1 + operand.length;
+ break;
+ }
+ case kExprSetLocal: {
+ LocalIndexOperand operand(&decoder, code->at(pc));
+ WasmVal val = Pop();
+ stack_[frames_.back().sp + operand.index] = val;
+ Push(pc, val);
+ len = 1 + operand.length;
+ break;
+ }
+ case kExprCallFunction: {
+ CallFunctionOperand operand(&decoder, code->at(pc));
+ InterpreterCode* target = codemap()->GetCode(operand.index);
+ DoCall(target, &pc, pc + 1 + operand.length, &limit);
+ code = target;
+ decoder.Reset(code->start, code->end);
+ continue;
+ }
+ case kExprCallIndirect: {
+ CallIndirectOperand operand(&decoder, code->at(pc));
+ size_t index = stack_.size() - operand.arity - 1;
+ DCHECK_LT(index, stack_.size());
+ uint32_t entry_index = stack_[index].to<uint32_t>();
+ // Assume only one table for now.
+ DCHECK_LE(module()->function_tables.size(), 1u);
+ InterpreterCode* target = codemap()->GetIndirectCode(0, entry_index);
+ if (target == nullptr) {
+ return DoTrap(kTrapFuncInvalid, pc);
+ } else if (target->function->sig_index != operand.index) {
+ return DoTrap(kTrapFuncSigMismatch, pc);
+ }
+
+ DoCall(target, &pc, pc + 1 + operand.length, &limit);
+ code = target;
+ decoder.Reset(code->start, code->end);
+ continue;
+ }
+ case kExprCallImport: {
+ UNIMPLEMENTED();
+ break;
+ }
+ case kExprGetGlobal: {
+ GlobalIndexOperand operand(&decoder, code->at(pc));
+ const WasmGlobal* global = &module()->globals[operand.index];
+ byte* ptr = instance()->globals_start + global->offset;
+ LocalType type = global->type;
+ WasmVal val;
+ if (type == kAstI32) {
+ val = WasmVal(*reinterpret_cast<int32_t*>(ptr));
+ } else if (type == kAstI64) {
+ val = WasmVal(*reinterpret_cast<int64_t*>(ptr));
+ } else if (type == kAstF32) {
+ val = WasmVal(*reinterpret_cast<float*>(ptr));
+ } else if (type == kAstF64) {
+ val = WasmVal(*reinterpret_cast<double*>(ptr));
+ } else {
+ UNREACHABLE();
+ }
+ Push(pc, val);
+ len = 1 + operand.length;
+ break;
+ }
+ case kExprSetGlobal: {
+ GlobalIndexOperand operand(&decoder, code->at(pc));
+ const WasmGlobal* global = &module()->globals[operand.index];
+ byte* ptr = instance()->globals_start + global->offset;
+ LocalType type = global->type;
+ WasmVal val = Pop();
+ if (type == kAstI32) {
+ *reinterpret_cast<int32_t*>(ptr) = val.to<int32_t>();
+ } else if (type == kAstI64) {
+ *reinterpret_cast<int64_t*>(ptr) = val.to<int64_t>();
+ } else if (type == kAstF32) {
+ *reinterpret_cast<float*>(ptr) = val.to<float>();
+ } else if (type == kAstF64) {
+ *reinterpret_cast<double*>(ptr) = val.to<double>();
+ } else {
+ UNREACHABLE();
+ }
+ Push(pc, val);
+ len = 1 + operand.length;
+ break;
+ }
+
+#define LOAD_CASE(name, ctype, mtype) \
+ case kExpr##name: { \
+ MemoryAccessOperand operand(&decoder, code->at(pc)); \
+ uint32_t index = Pop().to<uint32_t>(); \
+ size_t effective_mem_size = instance()->mem_size - sizeof(mtype); \
+ if (operand.offset > effective_mem_size || \
+ index > (effective_mem_size - operand.offset)) { \
+ return DoTrap(kTrapMemOutOfBounds, pc); \
+ } \
+ byte* addr = instance()->mem_start + operand.offset + index; \
+ WasmVal result(static_cast<ctype>(ReadLittleEndianValue<mtype>(addr))); \
+ Push(pc, result); \
+ len = 1 + operand.length; \
+ break; \
+ }
+
+ LOAD_CASE(I32LoadMem8S, int32_t, int8_t);
+ LOAD_CASE(I32LoadMem8U, int32_t, uint8_t);
+ LOAD_CASE(I32LoadMem16S, int32_t, int16_t);
+ LOAD_CASE(I32LoadMem16U, int32_t, uint16_t);
+ LOAD_CASE(I64LoadMem8S, int64_t, int8_t);
+ LOAD_CASE(I64LoadMem8U, int64_t, uint8_t);
+ LOAD_CASE(I64LoadMem16S, int64_t, int16_t);
+ LOAD_CASE(I64LoadMem16U, int64_t, uint16_t);
+ LOAD_CASE(I64LoadMem32S, int64_t, int32_t);
+ LOAD_CASE(I64LoadMem32U, int64_t, uint32_t);
+ LOAD_CASE(I32LoadMem, int32_t, int32_t);
+ LOAD_CASE(I64LoadMem, int64_t, int64_t);
+ LOAD_CASE(F32LoadMem, float, float);
+ LOAD_CASE(F64LoadMem, double, double);
+#undef LOAD_CASE
+
+#define STORE_CASE(name, ctype, mtype) \
+ case kExpr##name: { \
+ MemoryAccessOperand operand(&decoder, code->at(pc)); \
+ WasmVal val = Pop(); \
+ uint32_t index = Pop().to<uint32_t>(); \
+ size_t effective_mem_size = instance()->mem_size - sizeof(mtype); \
+ if (operand.offset > effective_mem_size || \
+ index > (effective_mem_size - operand.offset)) { \
+ return DoTrap(kTrapMemOutOfBounds, pc); \
+ } \
+ byte* addr = instance()->mem_start + operand.offset + index; \
+ WriteLittleEndianValue<mtype>(addr, static_cast<mtype>(val.to<ctype>())); \
+ Push(pc, val); \
+ len = 1 + operand.length; \
+ break; \
+ }
+
+ STORE_CASE(I32StoreMem8, int32_t, int8_t);
+ STORE_CASE(I32StoreMem16, int32_t, int16_t);
+ STORE_CASE(I64StoreMem8, int64_t, int8_t);
+ STORE_CASE(I64StoreMem16, int64_t, int16_t);
+ STORE_CASE(I64StoreMem32, int64_t, int32_t);
+ STORE_CASE(I32StoreMem, int32_t, int32_t);
+ STORE_CASE(I64StoreMem, int64_t, int64_t);
+ STORE_CASE(F32StoreMem, float, float);
+ STORE_CASE(F64StoreMem, double, double);
+#undef STORE_CASE
+
+#define ASMJS_LOAD_CASE(name, ctype, mtype, defval) \
+ case kExpr##name: { \
+ uint32_t index = Pop().to<uint32_t>(); \
+ ctype result; \
+ if (index >= (instance()->mem_size - sizeof(mtype))) { \
+ result = defval; \
+ } else { \
+ byte* addr = instance()->mem_start + index; \
+ /* TODO(titzer): alignment for asmjs load mem? */ \
+ result = static_cast<ctype>(*reinterpret_cast<mtype*>(addr)); \
+ } \
+ Push(pc, WasmVal(result)); \
+ break; \
+ }
+ ASMJS_LOAD_CASE(I32AsmjsLoadMem8S, int32_t, int8_t, 0);
+ ASMJS_LOAD_CASE(I32AsmjsLoadMem8U, int32_t, uint8_t, 0);
+ ASMJS_LOAD_CASE(I32AsmjsLoadMem16S, int32_t, int16_t, 0);
+ ASMJS_LOAD_CASE(I32AsmjsLoadMem16U, int32_t, uint16_t, 0);
+ ASMJS_LOAD_CASE(I32AsmjsLoadMem, int32_t, int32_t, 0);
+ ASMJS_LOAD_CASE(F32AsmjsLoadMem, float, float,
+ std::numeric_limits<float>::quiet_NaN());
+ ASMJS_LOAD_CASE(F64AsmjsLoadMem, double, double,
+ std::numeric_limits<double>::quiet_NaN());
+#undef ASMJS_LOAD_CASE
+
+#define ASMJS_STORE_CASE(name, ctype, mtype) \
+ case kExpr##name: { \
+ WasmVal val = Pop(); \
+ uint32_t index = Pop().to<uint32_t>(); \
+ if (index < (instance()->mem_size - sizeof(mtype))) { \
+ byte* addr = instance()->mem_start + index; \
+ /* TODO(titzer): alignment for asmjs store mem? */ \
+ *(reinterpret_cast<mtype*>(addr)) = static_cast<mtype>(val.to<ctype>()); \
+ } \
+ Push(pc, val); \
+ break; \
+ }
+
+ ASMJS_STORE_CASE(I32AsmjsStoreMem8, int32_t, int8_t);
+ ASMJS_STORE_CASE(I32AsmjsStoreMem16, int32_t, int16_t);
+ ASMJS_STORE_CASE(I32AsmjsStoreMem, int32_t, int32_t);
+ ASMJS_STORE_CASE(F32AsmjsStoreMem, float, float);
+ ASMJS_STORE_CASE(F64AsmjsStoreMem, double, double);
+#undef ASMJS_STORE_CASE
+
+ case kExprMemorySize: {
+ Push(pc, WasmVal(static_cast<uint32_t>(instance()->mem_size)));
+ break;
+ }
+#define EXECUTE_SIMPLE_BINOP(name, ctype, op) \
+ case kExpr##name: { \
+ WasmVal rval = Pop(); \
+ WasmVal lval = Pop(); \
+ WasmVal result(lval.to<ctype>() op rval.to<ctype>()); \
+ Push(pc, result); \
+ break; \
+ }
+ FOREACH_SIMPLE_BINOP(EXECUTE_SIMPLE_BINOP)
+#undef EXECUTE_SIMPLE_BINOP
+
+#define EXECUTE_OTHER_BINOP(name, ctype) \
+ case kExpr##name: { \
+ TrapReason trap = kTrapCount; \
+ volatile ctype rval = Pop().to<ctype>(); \
+ volatile ctype lval = Pop().to<ctype>(); \
+ WasmVal result(Execute##name(lval, rval, &trap)); \
+ if (trap != kTrapCount) return DoTrap(trap, pc); \
+ Push(pc, result); \
+ break; \
+ }
+ FOREACH_OTHER_BINOP(EXECUTE_OTHER_BINOP)
+#undef EXECUTE_OTHER_BINOP
+
+#define EXECUTE_OTHER_UNOP(name, ctype) \
+ case kExpr##name: { \
+ TrapReason trap = kTrapCount; \
+ volatile ctype val = Pop().to<ctype>(); \
+ WasmVal result(Execute##name(val, &trap)); \
+ if (trap != kTrapCount) return DoTrap(trap, pc); \
+ Push(pc, result); \
+ break; \
+ }
+ FOREACH_OTHER_UNOP(EXECUTE_OTHER_UNOP)
+#undef EXECUTE_OTHER_UNOP
+
+ default:
+ V8_Fatal(__FILE__, __LINE__, "Unknown or unimplemented opcode #%d:%s",
+ code->start[pc], OpcodeName(code->start[pc]));
+ UNREACHABLE();
+ }
+
+ pc += len;
+ }
+ UNREACHABLE(); // above decoding loop should run forever.
+ }
+
+ WasmVal Pop() {
+ DCHECK_GT(stack_.size(), 0u);
+ DCHECK_GT(frames_.size(), 0u);
+ DCHECK_GT(stack_.size(), frames_.back().llimit()); // can't pop into locals
+ WasmVal val = stack_.back();
+ stack_.pop_back();
+ return val;
+ }
+
+ void PopN(int n) {
+ DCHECK_GE(stack_.size(), static_cast<size_t>(n));
+ DCHECK_GT(frames_.size(), 0u);
+ size_t nsize = stack_.size() - n;
+ DCHECK_GE(nsize, frames_.back().llimit()); // can't pop into locals
+ stack_.resize(nsize);
+ }
+
+ WasmVal PopArity(size_t arity) {
+ if (arity == 0) return WasmVal();
+ CHECK_EQ(1, arity);
+ return Pop();
+ }
+
+ void Push(pc_t pc, WasmVal val) {
+ // TODO(titzer): store PC as well?
+ stack_.push_back(val);
+ }
+
+ void TraceStack(const char* phase, pc_t pc) {
+ if (FLAG_trace_wasm_interpreter) {
+ PrintF("%s @%zu", phase, pc);
+ UNIMPLEMENTED();
+ PrintF("\n");
+ }
+ }
+
+ void TraceValueStack() {
+ Frame* top = frames_.size() > 0 ? &frames_.back() : nullptr;
+ sp_t sp = top ? top->sp : 0;
+ sp_t plimit = top ? top->plimit() : 0;
+ sp_t llimit = top ? top->llimit() : 0;
+ if (FLAG_trace_wasm_interpreter) {
+ for (size_t i = sp; i < stack_.size(); ++i) {
+ if (i < plimit)
+ PrintF(" p%zu:", i);
+ else if (i < llimit)
+ PrintF(" l%zu:", i);
+ else
+ PrintF(" s%zu:", i);
+ WasmVal val = stack_[i];
+ switch (val.type) {
+ case kAstI32:
+ PrintF("i32:%d", val.to<int32_t>());
+ break;
+ case kAstI64:
+ PrintF("i64:%" PRId64 "", val.to<int64_t>());
+ break;
+ case kAstF32:
+ PrintF("f32:%f", val.to<float>());
+ break;
+ case kAstF64:
+ PrintF("f64:%lf", val.to<double>());
+ break;
+ case kAstStmt:
+ PrintF("void");
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+ }
+};
+
+//============================================================================
+// The implementation details of the interpreter.
+//============================================================================
+class WasmInterpreterInternals : public ZoneObject {
+ public:
+ WasmModuleInstance* instance_;
+ CodeMap codemap_;
+ ZoneVector<ThreadImpl*> threads_;
+
+ WasmInterpreterInternals(Zone* zone, WasmModuleInstance* instance)
+ : instance_(instance),
+ codemap_(instance_ ? instance_->module : nullptr, zone),
+ threads_(zone) {
+ threads_.push_back(new ThreadImpl(zone, &codemap_, instance));
+ }
+
+ void Delete() {
+ // TODO(titzer): CFI doesn't like threads in the ZoneVector.
+ for (auto t : threads_) delete t;
+ threads_.resize(0);
+ }
+};
+
+//============================================================================
+// Implementation of the public interface of the interpreter.
+//============================================================================
+WasmInterpreter::WasmInterpreter(WasmModuleInstance* instance,
+ base::AccountingAllocator* allocator)
+ : zone_(allocator),
+ internals_(new (&zone_) WasmInterpreterInternals(&zone_, instance)) {}
+
+WasmInterpreter::~WasmInterpreter() { internals_->Delete(); }
+
+void WasmInterpreter::Run() { internals_->threads_[0]->Run(); }
+
+void WasmInterpreter::Pause() { internals_->threads_[0]->Pause(); }
+
+bool WasmInterpreter::SetBreakpoint(const WasmFunction* function, pc_t pc,
+ bool enabled) {
+ InterpreterCode* code = internals_->codemap_.FindCode(function);
+ if (!code) return false;
+ size_t size = static_cast<size_t>(code->end - code->start);
+ // Check bounds for {pc}.
+ if (pc < code->locals.decls_encoded_size || pc >= size) return false;
+ // Make a copy of the code before enabling a breakpoint.
+ if (enabled && code->orig_start == code->start) {
+ code->start = reinterpret_cast<byte*>(zone_.New(size));
+ memcpy(code->start, code->orig_start, size);
+ code->end = code->start + size;
+ }
+ bool prev = code->start[pc] == kInternalBreakpoint;
+ if (enabled) {
+ code->start[pc] = kInternalBreakpoint;
+ } else {
+ code->start[pc] = code->orig_start[pc];
+ }
+ return prev;
+}
+
+bool WasmInterpreter::GetBreakpoint(const WasmFunction* function, pc_t pc) {
+ InterpreterCode* code = internals_->codemap_.FindCode(function);
+ if (!code) return false;
+ size_t size = static_cast<size_t>(code->end - code->start);
+ // Check bounds for {pc}.
+ if (pc < code->locals.decls_encoded_size || pc >= size) return false;
+ // Check if a breakpoint is present at that place in the code.
+ return code->start[pc] == kInternalBreakpoint;
+}
+
+bool WasmInterpreter::SetTracing(const WasmFunction* function, bool enabled) {
+ UNIMPLEMENTED();
+ return false;
+}
+
+int WasmInterpreter::GetThreadCount() {
+ return 1; // only one thread for now.
+}
+
+WasmInterpreter::Thread* WasmInterpreter::GetThread(int id) {
+ CHECK_EQ(0, id); // only one thread for now.
+ return internals_->threads_[id];
+}
+
+WasmVal WasmInterpreter::GetLocalVal(const WasmFrame* frame, int index) {
+ CHECK_GE(index, 0);
+ UNIMPLEMENTED();
+ WasmVal none;
+ none.type = kAstStmt;
+ return none;
+}
+
+WasmVal WasmInterpreter::GetExprVal(const WasmFrame* frame, int pc) {
+ UNIMPLEMENTED();
+ WasmVal none;
+ none.type = kAstStmt;
+ return none;
+}
+
+void WasmInterpreter::SetLocalVal(WasmFrame* frame, int index, WasmVal val) {
+ UNIMPLEMENTED();
+}
+
+void WasmInterpreter::SetExprVal(WasmFrame* frame, int pc, WasmVal val) {
+ UNIMPLEMENTED();
+}
+
+size_t WasmInterpreter::GetMemorySize() {
+ return internals_->instance_->mem_size;
+}
+
+WasmVal WasmInterpreter::ReadMemory(size_t offset) {
+ UNIMPLEMENTED();
+ return WasmVal();
+}
+
+void WasmInterpreter::WriteMemory(size_t offset, WasmVal val) {
+ UNIMPLEMENTED();
+}
+
+int WasmInterpreter::AddFunctionForTesting(const WasmFunction* function) {
+ return internals_->codemap_.AddFunction(function, nullptr, nullptr);
+}
+
+bool WasmInterpreter::SetFunctionCodeForTesting(const WasmFunction* function,
+ const byte* start,
+ const byte* end) {
+ return internals_->codemap_.SetFunctionCode(function, start, end);
+}
+
+ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting(
+ Zone* zone, const byte* start, const byte* end) {
+ ControlTransfers targets(zone, 0, start, end);
+ return targets.map_;
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-interpreter.h b/deps/v8/src/wasm/wasm-interpreter.h
new file mode 100644
index 0000000000..b106a202d2
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-interpreter.h
@@ -0,0 +1,209 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_INTERPRETER_H_
+#define V8_WASM_INTERPRETER_H_
+
+#include "src/wasm/wasm-opcodes.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace base {
+class AccountingAllocator;
+}
+
+namespace internal {
+namespace wasm {
+
+// forward declarations.
+struct WasmFunction;
+struct WasmModuleInstance;
+class WasmInterpreterInternals;
+
+typedef size_t pc_t;
+typedef size_t sp_t;
+typedef int32_t pcdiff_t;
+typedef uint32_t spdiff_t;
+
+const pc_t kInvalidPc = 0x80000000;
+
+// Visible for testing. A {ControlTransfer} helps the interpreter figure out
+// the target program counter and stack manipulations for a branch.
+struct ControlTransfer {
+ enum StackAction { kNoAction, kPopAndRepush, kPushVoid };
+ pcdiff_t pcdiff; // adjustment to the program counter (positive or negative).
+ spdiff_t spdiff; // number of elements to pop off the stack.
+ StackAction action; // action to perform on the stack.
+};
+typedef ZoneMap<pc_t, ControlTransfer> ControlTransferMap;
+
+// Macro for defining union members.
+#define FOREACH_UNION_MEMBER(V) \
+ V(i32, kAstI32, int32_t) \
+ V(u32, kAstI32, uint32_t) \
+ V(i64, kAstI64, int64_t) \
+ V(u64, kAstI64, uint64_t) \
+ V(f32, kAstF32, float) \
+ V(f64, kAstF64, double)
+
+// Representation of values within the interpreter.
+struct WasmVal {
+ LocalType type;
+ union {
+#define DECLARE_FIELD(field, localtype, ctype) ctype field;
+ FOREACH_UNION_MEMBER(DECLARE_FIELD)
+#undef DECLARE_FIELD
+ } val;
+
+ WasmVal() : type(kAstStmt) {}
+
+#define DECLARE_CONSTRUCTOR(field, localtype, ctype) \
+ explicit WasmVal(ctype v) : type(localtype) { val.field = v; }
+ FOREACH_UNION_MEMBER(DECLARE_CONSTRUCTOR)
+#undef DECLARE_CONSTRUCTOR
+
+ template <typename T>
+ T to() {
+ UNREACHABLE();
+ }
+};
+
+#define DECLARE_CAST(field, localtype, ctype) \
+ template <> \
+ inline ctype WasmVal::to() { \
+ CHECK_EQ(localtype, type); \
+ return val.field; \
+ }
+FOREACH_UNION_MEMBER(DECLARE_CAST)
+#undef DECLARE_CAST
+
+template <>
+inline void WasmVal::to() {
+ CHECK_EQ(kAstStmt, type);
+}
+
+// Representation of frames within the interpreter.
+class WasmFrame {
+ public:
+ const WasmFunction* function() const { return function_; }
+ int pc() const { return pc_; }
+
+ private:
+ friend class WasmInterpreter;
+
+ WasmFrame(const WasmFunction* function, int pc, int fp, int sp)
+ : function_(function), pc_(pc), fp_(fp), sp_(sp) {}
+
+ const WasmFunction* function_;
+ int pc_;
+ int fp_;
+ int sp_;
+};
+
+// An interpreter capable of executing WASM.
+class WasmInterpreter {
+ public:
+ // State machine for a Thread:
+ // +---------------Run()-----------+
+ // V |
+ // STOPPED ---Run()--> RUNNING ------Pause()-----+-> PAUSED <------+
+ // | | | / | |
+ // | | +---- Breakpoint ---+ +-- Step() --+
+ // | |
+ // | +------------ Trap --------------> TRAPPED
+ // +------------- Finish -------------> FINISHED
+ enum State { STOPPED, RUNNING, PAUSED, FINISHED, TRAPPED };
+
+ // Representation of a thread in the interpreter.
+ class Thread {
+ public:
+ // Execution control.
+ virtual State state() = 0;
+ virtual void PushFrame(const WasmFunction* function, WasmVal* args) = 0;
+ virtual State Run() = 0;
+ virtual State Step() = 0;
+ virtual void Pause() = 0;
+ virtual void Reset() = 0;
+ virtual ~Thread() {}
+
+ // Stack inspection and modification.
+ virtual pc_t GetBreakpointPc() = 0;
+ virtual int GetFrameCount() = 0;
+ virtual const WasmFrame* GetFrame(int index) = 0;
+ virtual WasmFrame* GetMutableFrame(int index) = 0;
+ virtual WasmVal GetReturnValue() = 0;
+
+ // Thread-specific breakpoints.
+ bool SetBreakpoint(const WasmFunction* function, int pc, bool enabled);
+ bool GetBreakpoint(const WasmFunction* function, int pc);
+ };
+
+ WasmInterpreter(WasmModuleInstance* instance,
+ base::AccountingAllocator* allocator);
+ ~WasmInterpreter();
+
+ //==========================================================================
+ // Execution controls.
+ //==========================================================================
+ void Run();
+ void Pause();
+
+ // Set a breakpoint at {pc} in {function} to be {enabled}. Returns the
+ // previous state of the breakpoint at {pc}.
+ bool SetBreakpoint(const WasmFunction* function, pc_t pc, bool enabled);
+
+ // Gets the current state of the breakpoint at {function}.
+ bool GetBreakpoint(const WasmFunction* function, pc_t pc);
+
+ // Enable or disable tracing for {function}. Return the previous state.
+ bool SetTracing(const WasmFunction* function, bool enabled);
+
+ //==========================================================================
+ // Thread iteration and inspection.
+ //==========================================================================
+ int GetThreadCount();
+ Thread* GetThread(int id);
+
+ //==========================================================================
+ // Stack frame inspection.
+ //==========================================================================
+ WasmVal GetLocalVal(const WasmFrame* frame, int index);
+ WasmVal GetExprVal(const WasmFrame* frame, int pc);
+ void SetLocalVal(WasmFrame* frame, int index, WasmVal val);
+ void SetExprVal(WasmFrame* frame, int pc, WasmVal val);
+
+ //==========================================================================
+ // Memory access.
+ //==========================================================================
+ size_t GetMemorySize();
+ WasmVal ReadMemory(size_t offset);
+ void WriteMemory(size_t offset, WasmVal val);
+
+ //==========================================================================
+ // Testing functionality.
+ //==========================================================================
+ // Manually adds a function to this interpreter, returning the index of the
+ // function.
+ int AddFunctionForTesting(const WasmFunction* function);
+ // Manually adds code to the interpreter for the given function.
+ bool SetFunctionCodeForTesting(const WasmFunction* function,
+ const byte* start, const byte* end);
+
+ // Computes the control targets for the given bytecode as {pc offset, sp
+ // offset}
+ // pairs. Used internally in the interpreter, but exposed for testing.
+ static ControlTransferMap ComputeControlTransfersForTesting(Zone* zone,
+ const byte* start,
+ const byte* end);
+
+ private:
+ Zone zone_;
+ WasmInterpreterInternals* internals_;
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_INTERPRETER_H_
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 83009d7c81..10ae43c78b 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -2,19 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api.h"
#include "src/api-natives.h"
+#include "src/api.h"
+#include "src/asmjs/asm-js.h"
+#include "src/asmjs/asm-typer.h"
+#include "src/asmjs/asm-wasm-builder.h"
#include "src/assert-scope.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
+#include "src/compiler.h"
+#include "src/execution.h"
#include "src/factory.h"
#include "src/handles.h"
#include "src/isolate.h"
#include "src/objects.h"
-#include "src/parsing/parser.h"
-#include "src/typing-asm.h"
+#include "src/parsing/parse-info.h"
-#include "src/wasm/asm-wasm-builder.h"
#include "src/wasm/encoder.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-js.h"
@@ -34,31 +37,25 @@ struct RawBuffer {
size_t size() { return static_cast<size_t>(end - start); }
};
-
-RawBuffer GetRawBufferArgument(
- ErrorThrower& thrower, const v8::FunctionCallbackInfo<v8::Value>& args) {
- if (args.Length() < 1) {
- thrower.Error("Argument 0 must be an array buffer");
- return {nullptr, nullptr};
- }
-
+RawBuffer GetRawBufferSource(
+ v8::Local<v8::Value> source, ErrorThrower* thrower) {
const byte* start = nullptr;
const byte* end = nullptr;
- if (args[0]->IsArrayBuffer()) {
+ if (source->IsArrayBuffer()) {
// A raw array buffer was passed.
- Local<ArrayBuffer> buffer = Local<ArrayBuffer>::Cast(args[0]);
+ Local<ArrayBuffer> buffer = Local<ArrayBuffer>::Cast(source);
ArrayBuffer::Contents contents = buffer->GetContents();
start = reinterpret_cast<const byte*>(contents.Data());
end = start + contents.ByteLength();
if (start == nullptr || end == start) {
- thrower.Error("ArrayBuffer argument is empty");
+ thrower->Error("ArrayBuffer argument is empty");
}
- } else if (args[0]->IsTypedArray()) {
+ } else if (source->IsTypedArray()) {
// A TypedArray was passed.
- Local<TypedArray> array = Local<TypedArray>::Cast(args[0]);
+ Local<TypedArray> array = Local<TypedArray>::Cast(source);
Local<ArrayBuffer> buffer = array->Buffer();
ArrayBuffer::Contents contents = buffer->GetContents();
@@ -68,22 +65,25 @@ RawBuffer GetRawBufferArgument(
end = start + array->ByteLength();
if (start == nullptr || end == start) {
- thrower.Error("ArrayBuffer argument is empty");
+ thrower->Error("ArrayBuffer argument is empty");
}
} else {
- thrower.Error("Argument 0 must be an ArrayBuffer or Uint8Array");
+ thrower->Error("Argument 0 must be an ArrayBuffer or Uint8Array");
}
return {start, end};
}
-
void VerifyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(args.GetIsolate());
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
- ErrorThrower thrower(isolate, "WASM.verifyModule()");
+ ErrorThrower thrower(isolate, "Wasm.verifyModule()");
- RawBuffer buffer = GetRawBufferArgument(thrower, args);
+ if (args.Length() < 1) {
+ thrower.Error("Argument 0 must be a buffer source");
+ return;
+ }
+ RawBuffer buffer = GetRawBufferSource(args[0], &thrower);
if (thrower.error()) return;
i::Zone zone(isolate->allocator());
@@ -98,13 +98,16 @@ void VerifyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (result.val) delete result.val;
}
-
void VerifyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(args.GetIsolate());
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
- ErrorThrower thrower(isolate, "WASM.verifyFunction()");
+ ErrorThrower thrower(isolate, "Wasm.verifyFunction()");
- RawBuffer buffer = GetRawBufferArgument(thrower, args);
+ if (args.Length() < 1) {
+ thrower.Error("Argument 0 must be a buffer source");
+ return;
+ }
+ RawBuffer buffer = GetRawBufferSource(args[0], &thrower);
if (thrower.error()) return;
internal::wasm::FunctionResult result;
@@ -123,60 +126,20 @@ void VerifyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (result.val) delete result.val;
}
-v8::internal::wasm::WasmModuleIndex* TranslateAsmModule(
- i::ParseInfo* info, i::Handle<i::Object> foreign, ErrorThrower* thrower) {
- info->set_global();
- info->set_lazy(false);
- info->set_allow_lazy_parsing(false);
- info->set_toplevel(true);
-
- if (!i::Compiler::ParseAndAnalyze(info)) {
- return nullptr;
- }
-
- info->set_literal(
- info->scope()->declarations()->at(0)->AsFunctionDeclaration()->fun());
-
- v8::internal::AsmTyper typer(info->isolate(), info->zone(), *(info->script()),
- info->literal());
- if (i::FLAG_enable_simd_asmjs) {
- typer.set_allow_simd(true);
- }
- if (!typer.Validate()) {
- thrower->Error("Asm.js validation failed: %s", typer.error_message());
- return nullptr;
- }
-
- auto module =
- v8::internal::wasm::AsmWasmBuilder(info->isolate(), info->zone(),
- info->literal(), foreign, &typer)
- .Run();
-
- return module;
-}
-
-void InstantiateModuleCommon(const v8::FunctionCallbackInfo<v8::Value>& args,
- const byte* start, const byte* end,
- ErrorThrower* thrower,
- internal::wasm::ModuleOrigin origin) {
+i::MaybeHandle<i::JSObject> InstantiateModule(
+ const v8::FunctionCallbackInfo<v8::Value>& args, const byte* start,
+ const byte* end, ErrorThrower* thrower,
+ internal::wasm::ModuleOrigin origin = i::wasm::kWasmOrigin) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
- i::Handle<i::JSArrayBuffer> memory = i::Handle<i::JSArrayBuffer>::null();
- if (args.Length() > 2 && args[2]->IsArrayBuffer()) {
- Local<Object> obj = Local<Object>::Cast(args[2]);
- i::Handle<i::Object> mem_obj = v8::Utils::OpenHandle(*obj);
- memory = i::Handle<i::JSArrayBuffer>(i::JSArrayBuffer::cast(*mem_obj));
- }
-
// Decode but avoid a redundant pass over function bodies for verification.
// Verification will happen during compilation.
i::Zone zone(isolate->allocator());
internal::wasm::ModuleResult result = internal::wasm::DecodeWasmModule(
isolate, &zone, start, end, false, origin);
- if (result.failed() && origin == internal::wasm::kAsmJsOrigin) {
- thrower->Error("Asm.js converted module failed to decode");
- } else if (result.failed()) {
+ i::MaybeHandle<i::JSObject> object;
+ if (result.failed()) {
thrower->Failed("", result);
} else {
// Success. Instantiate the module and return the object.
@@ -186,64 +149,172 @@ void InstantiateModuleCommon(const v8::FunctionCallbackInfo<v8::Value>& args,
ffi = i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj));
}
- i::MaybeHandle<i::JSObject> object =
- result.val->Instantiate(isolate, ffi, memory);
+ i::Handle<i::JSArrayBuffer> memory = i::Handle<i::JSArrayBuffer>::null();
+ if (args.Length() > 2 && args[2]->IsArrayBuffer()) {
+ Local<Object> obj = Local<Object>::Cast(args[2]);
+ i::Handle<i::Object> mem_obj = v8::Utils::OpenHandle(*obj);
+ memory = i::Handle<i::JSArrayBuffer>(i::JSArrayBuffer::cast(*mem_obj));
+ }
- if (!object.is_null()) {
- args.GetReturnValue().Set(v8::Utils::ToLocal(object.ToHandleChecked()));
+ i::MaybeHandle<i::FixedArray> compiled_module =
+ result.val->CompileFunctions(isolate, thrower);
+ if (!thrower->error()) {
+ DCHECK(!compiled_module.is_null());
+ object = i::wasm::WasmModule::Instantiate(
+ isolate, compiled_module.ToHandleChecked(), ffi, memory);
+ if (!object.is_null()) {
+ args.GetReturnValue().Set(v8::Utils::ToLocal(object.ToHandleChecked()));
+ }
}
}
if (result.val) delete result.val;
+ return object;
}
-
-void InstantiateModuleFromAsm(const v8::FunctionCallbackInfo<v8::Value>& args) {
+void InstantiateModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(args.GetIsolate());
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
- ErrorThrower thrower(isolate, "WASM.instantiateModuleFromAsm()");
+ ErrorThrower thrower(isolate, "Wasm.instantiateModule()");
- if (!args[0]->IsString()) {
- thrower.Error("Asm module text should be a string");
+ if (args.Length() < 1) {
+ thrower.Error("Argument 0 must be a buffer source");
return;
}
+ RawBuffer buffer = GetRawBufferSource(args[0], &thrower);
+ if (buffer.start == nullptr) return;
- i::Factory* factory = isolate->factory();
- i::Zone zone(isolate->allocator());
- Local<String> source = Local<String>::Cast(args[0]);
- i::Handle<i::Script> script = factory->NewScript(Utils::OpenHandle(*source));
- i::ParseInfo info(&zone, script);
+ InstantiateModule(args, buffer.start, buffer.end, &thrower);
+}
- i::Handle<i::Object> foreign;
- if (args.Length() > 1 && args[1]->IsObject()) {
- Local<Object> local_foreign = Local<Object>::Cast(args[1]);
- foreign = v8::Utils::OpenHandle(*local_foreign);
+static i::MaybeHandle<i::JSObject> CreateModuleObject(
+ v8::Isolate* isolate, const v8::Local<v8::Value> source,
+ ErrorThrower* thrower) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::MaybeHandle<i::JSObject> nothing;
+
+ RawBuffer buffer = GetRawBufferSource(source, thrower);
+ if (buffer.start == nullptr) return i::MaybeHandle<i::JSObject>();
+
+ DCHECK(source->IsArrayBuffer() || source->IsTypedArray());
+ i::Zone zone(i_isolate->allocator());
+ i::wasm::ModuleResult result = i::wasm::DecodeWasmModule(
+ i_isolate, &zone, buffer.start, buffer.end, false, i::wasm::kWasmOrigin);
+ std::unique_ptr<const i::wasm::WasmModule> decoded_module(result.val);
+ if (result.failed()) {
+ thrower->Failed("", result);
+ return nothing;
}
+ i::MaybeHandle<i::FixedArray> compiled_module =
+ decoded_module->CompileFunctions(i_isolate, thrower);
+ if (compiled_module.is_null()) return nothing;
+
+ return i::wasm::CreateCompiledModuleObject(i_isolate,
+ compiled_module.ToHandleChecked());
+}
+
+void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = args.GetIsolate();
+ HandleScope scope(isolate);
+ ErrorThrower thrower(reinterpret_cast<i::Isolate*>(isolate),
+ "WebAssembly.compile()");
- auto module = TranslateAsmModule(&info, foreign, &thrower);
- if (module == nullptr) {
+ if (args.Length() < 1) {
+ thrower.Error("Argument 0 must be a buffer source");
return;
}
-
- InstantiateModuleCommon(args, module->Begin(), module->End(), &thrower,
- internal::wasm::kAsmJsOrigin);
+ i::MaybeHandle<i::JSObject> module_obj =
+ CreateModuleObject(isolate, args[0], &thrower);
+
+ Local<Context> context = isolate->GetCurrentContext();
+ v8::Local<v8::Promise::Resolver> resolver;
+ if (!v8::Promise::Resolver::New(context).ToLocal(&resolver)) return;
+ if (thrower.error()) {
+ resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
+ } else {
+ resolver->Resolve(context, Utils::ToLocal(module_obj.ToHandleChecked()));
+ }
+ v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
+ return_value.Set(resolver->GetPromise());
}
+void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = args.GetIsolate();
+ HandleScope scope(isolate);
+ ErrorThrower thrower(reinterpret_cast<i::Isolate*>(isolate),
+ "WebAssembly.Module()");
-void InstantiateModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() < 1) {
+ thrower.Error("Argument 0 must be a buffer source");
+ return;
+ }
+ i::MaybeHandle<i::JSObject> module_obj =
+ CreateModuleObject(isolate, args[0], &thrower);
+ if (module_obj.is_null()) return;
+
+ v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
+ return_value.Set(Utils::ToLocal(module_obj.ToHandleChecked()));
+}
+
+void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(args.GetIsolate());
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
- ErrorThrower thrower(isolate, "WASM.instantiateModule()");
+ v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- RawBuffer buffer = GetRawBufferArgument(thrower, args);
- if (buffer.start == nullptr) return;
+ ErrorThrower thrower(i_isolate, "WebAssembly.Instance()");
+
+ if (args.Length() < 1) {
+ thrower.Error(
+ "Argument 0 must be provided, and must be a WebAssembly.Module object");
+ return;
+ }
- InstantiateModuleCommon(args, buffer.start, buffer.end, &thrower,
- internal::wasm::kWasmOrigin);
+ Local<Context> context = isolate->GetCurrentContext();
+ i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
+ i::Handle<i::Symbol> module_sym(i_context->wasm_module_sym());
+ i::MaybeHandle<i::Object> source =
+ i::Object::GetProperty(Utils::OpenHandle(*args[0]), module_sym);
+ if (source.is_null() || source.ToHandleChecked()->IsUndefined(i_isolate)) {
+ thrower.Error("Argument 0 must be a WebAssembly.Module");
+ return;
+ }
+
+ Local<Object> obj = Local<Object>::Cast(args[0]);
+
+ i::Handle<i::JSObject> module_obj =
+ i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj));
+ if (module_obj->GetInternalFieldCount() < 1 ||
+ !module_obj->GetInternalField(0)->IsFixedArray()) {
+ thrower.Error("Argument 0 is an invalid WebAssembly.Module");
+ return;
+ }
+
+ i::Handle<i::FixedArray> compiled_code = i::Handle<i::FixedArray>(
+ i::FixedArray::cast(module_obj->GetInternalField(0)));
+
+ i::Handle<i::JSReceiver> ffi = i::Handle<i::JSObject>::null();
+ if (args.Length() > 1 && args[1]->IsObject()) {
+ Local<Object> obj = Local<Object>::Cast(args[1]);
+ ffi = i::Handle<i::JSReceiver>::cast(v8::Utils::OpenHandle(*obj));
+ }
+
+ i::Handle<i::JSArrayBuffer> memory = i::Handle<i::JSArrayBuffer>::null();
+ if (args.Length() > 2 && args[2]->IsArrayBuffer()) {
+ Local<Object> obj = Local<Object>::Cast(args[2]);
+ i::Handle<i::Object> mem_obj = v8::Utils::OpenHandle(*obj);
+ memory = i::Handle<i::JSArrayBuffer>(i::JSArrayBuffer::cast(*mem_obj));
+ }
+ i::MaybeHandle<i::JSObject> instance =
+ i::wasm::WasmModule::Instantiate(i_isolate, compiled_code, ffi, memory);
+ if (instance.is_null()) {
+ thrower.Error("Could not instantiate module");
+ return;
+ }
+ v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
+ return_value.Set(Utils::ToLocal(instance.ToHandleChecked()));
}
} // namespace
-
// TODO(titzer): we use the API to create the function template because the
// internal guts are too ugly to replicate here.
static i::Handle<i::FunctionTemplateInfo> NewTemplate(i::Isolate* i_isolate,
@@ -253,15 +324,13 @@ static i::Handle<i::FunctionTemplateInfo> NewTemplate(i::Isolate* i_isolate,
return v8::Utils::OpenHandle(*local);
}
-
namespace internal {
static Handle<String> v8_str(Isolate* isolate, const char* str) {
return isolate->factory()->NewStringFromAsciiChecked(str);
}
-
-static void InstallFunc(Isolate* isolate, Handle<JSObject> object,
- const char* str, FunctionCallback func) {
+static Handle<JSFunction> InstallFunc(Isolate* isolate, Handle<JSObject> object,
+ const char* str, FunctionCallback func) {
Handle<String> name = v8_str(isolate, str);
Handle<FunctionTemplateInfo> temp = NewTemplate(isolate, func);
Handle<JSFunction> function =
@@ -269,17 +338,60 @@ static void InstallFunc(Isolate* isolate, Handle<JSObject> object,
PropertyAttributes attributes =
static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
JSObject::AddProperty(object, name, function, attributes);
+ return function;
}
-
void WasmJs::Install(Isolate* isolate, Handle<JSGlobalObject> global) {
+ if (!FLAG_expose_wasm && !FLAG_validate_asm) {
+ return;
+ }
+
+ Factory* factory = isolate->factory();
+
// Setup wasm function map.
Handle<Context> context(global->native_context(), isolate);
InstallWasmFunctionMap(isolate, context);
- // Bind the WASM object.
- Factory* factory = isolate->factory();
- Handle<String> name = v8_str(isolate, "Wasm");
+ if (!FLAG_expose_wasm) {
+ return;
+ }
+
+ // Bind the experimental WASM object.
+ // TODO(rossberg, titzer): remove once it's no longer needed.
+ {
+ Handle<String> name = v8_str(isolate, "Wasm");
+ Handle<JSFunction> cons = factory->NewFunction(name);
+ JSFunction::SetInstancePrototype(
+ cons, Handle<Object>(context->initial_object_prototype(), isolate));
+ cons->shared()->set_instance_class_name(*name);
+ Handle<JSObject> wasm_object = factory->NewJSObject(cons, TENURED);
+ PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
+ JSObject::AddProperty(global, name, wasm_object, attributes);
+
+ // Install functions on the WASM object.
+ InstallFunc(isolate, wasm_object, "verifyModule", VerifyModule);
+ InstallFunc(isolate, wasm_object, "verifyFunction", VerifyFunction);
+ InstallFunc(isolate, wasm_object, "instantiateModule", InstantiateModule);
+
+ {
+ // Add the Wasm.experimentalVersion property.
+ Handle<String> name = v8_str(isolate, "experimentalVersion");
+ PropertyAttributes attributes =
+ static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
+ Handle<Smi> value =
+ Handle<Smi>(Smi::FromInt(wasm::kWasmVersion), isolate);
+ JSObject::AddProperty(wasm_object, name, value, attributes);
+ }
+ }
+
+ // Create private symbols.
+ Handle<Symbol> module_sym = isolate->factory()->NewPrivateSymbol();
+ Handle<Symbol> instance_sym = isolate->factory()->NewPrivateSymbol();
+ context->set_wasm_module_sym(*module_sym);
+ context->set_wasm_instance_sym(*instance_sym);
+
+ // Bind the WebAssembly object.
+ Handle<String> name = v8_str(isolate, "WebAssembly");
Handle<JSFunction> cons = factory->NewFunction(name);
JSFunction::SetInstancePrototype(
cons, Handle<Object>(context->initial_object_prototype(), isolate));
@@ -288,15 +400,21 @@ void WasmJs::Install(Isolate* isolate, Handle<JSGlobalObject> global) {
PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
JSObject::AddProperty(global, name, wasm_object, attributes);
- // Install functions on the WASM object.
- InstallFunc(isolate, wasm_object, "verifyModule", VerifyModule);
- InstallFunc(isolate, wasm_object, "verifyFunction", VerifyFunction);
- InstallFunc(isolate, wasm_object, "instantiateModule", InstantiateModule);
- InstallFunc(isolate, wasm_object, "instantiateModuleFromAsm",
- InstantiateModuleFromAsm);
+ // Install static methods on WebAssembly object.
+ InstallFunc(isolate, wasm_object, "compile", WebAssemblyCompile);
+ Handle<JSFunction> module_constructor =
+ InstallFunc(isolate, wasm_object, "Module", WebAssemblyModule);
+ Handle<JSFunction> instance_constructor =
+ InstallFunc(isolate, wasm_object, "Instance", WebAssemblyInstance);
+ i::Handle<i::Map> map = isolate->factory()->NewMap(
+ i::JS_OBJECT_TYPE, i::JSObject::kHeaderSize + i::kPointerSize);
+ module_constructor->set_prototype_or_initial_map(*map);
+ map->SetConstructor(*module_constructor);
+
+ context->set_wasm_module_constructor(*module_constructor);
+ context->set_wasm_instance_constructor(*instance_constructor);
}
-
void WasmJs::InstallWasmFunctionMap(Isolate* isolate, Handle<Context> context) {
if (!context->get(Context::WASM_FUNCTION_MAP_INDEX)->IsMap()) {
// TODO(titzer): Move this to bootstrapper.cc??
@@ -308,9 +426,12 @@ void WasmJs::InstallWasmFunctionMap(Isolate* isolate, Handle<Context> context) {
CHECK_EQ(0, internal_fields);
int pre_allocated =
prev_map->GetInObjectProperties() - prev_map->unused_property_fields();
- int instance_size;
- int in_object_properties;
- JSFunction::CalculateInstanceSizeHelper(instance_type, internal_fields + 1,
+ int instance_size = 0;
+ int in_object_properties = 0;
+ int wasm_internal_fields = internal_fields + 1 // module instance object
+ + 1 // function arity
+ + 1; // function signature
+ JSFunction::CalculateInstanceSizeHelper(instance_type, wasm_internal_fields,
0, &instance_size,
&in_object_properties);
diff --git a/deps/v8/src/wasm/wasm-js.h b/deps/v8/src/wasm/wasm-js.h
index e7305aa164..ded9a1a90b 100644
--- a/deps/v8/src/wasm/wasm-js.h
+++ b/deps/v8/src/wasm/wasm-js.h
@@ -7,7 +7,7 @@
#ifndef V8_SHARED
#include "src/allocation.h"
-#include "src/hashmap.h"
+#include "src/base/hashmap.h"
#else
#include "include/v8.h"
#include "src/base/compiler-specific.h"
diff --git a/deps/v8/src/wasm/wasm-macro-gen.h b/deps/v8/src/wasm/wasm-macro-gen.h
index d9199e82fb..abd57d505a 100644
--- a/deps/v8/src/wasm/wasm-macro-gen.h
+++ b/deps/v8/src/wasm/wasm-macro-gen.h
@@ -7,6 +7,8 @@
#include "src/wasm/wasm-opcodes.h"
+#include "src/zone-containers.h"
+
#define U32_LE(v) \
static_cast<byte>(v), static_cast<byte>((v) >> 8), \
static_cast<byte>((v) >> 16), static_cast<byte>((v) >> 24)
@@ -58,27 +60,38 @@
//------------------------------------------------------------------------------
#define WASM_NOP kExprNop
-#define WASM_BLOCK(count, ...) kExprBlock, static_cast<byte>(count), __VA_ARGS__
-#define WASM_INFINITE_LOOP kExprLoop, 1, kExprBr, 0, kExprNop
-#define WASM_LOOP(count, ...) kExprLoop, static_cast<byte>(count), __VA_ARGS__
-#define WASM_IF(cond, tstmt) kExprIf, cond, tstmt
-#define WASM_IF_ELSE(cond, tstmt, fstmt) kExprIfElse, cond, tstmt, fstmt
-#define WASM_SELECT(cond, tval, fval) kExprSelect, cond, tval, fval
-#define WASM_BR(depth) kExprBr, static_cast<byte>(depth), kExprNop
+#define ARITY_0 0
+#define ARITY_1 1
+#define DEPTH_0 0
+#define DEPTH_1 1
+
+#define WASM_BLOCK(...) kExprBlock, __VA_ARGS__, kExprEnd
+#define WASM_INFINITE_LOOP kExprLoop, kExprBr, ARITY_0, DEPTH_0, kExprEnd
+#define WASM_LOOP(...) kExprLoop, __VA_ARGS__, kExprEnd
+#define WASM_IF(cond, tstmt) cond, kExprIf, tstmt, kExprEnd
+#define WASM_IF_ELSE(cond, tstmt, fstmt) \
+ cond, kExprIf, tstmt, kExprElse, fstmt, kExprEnd
+#define WASM_SELECT(tval, fval, cond) tval, fval, cond, kExprSelect
+#define WASM_BR(depth) kExprBr, ARITY_0, static_cast<byte>(depth)
#define WASM_BR_IF(depth, cond) \
- kExprBrIf, static_cast<byte>(depth), kExprNop, cond
-#define WASM_BRV(depth, val) kExprBr, static_cast<byte>(depth), val
+ cond, kExprBrIf, ARITY_0, static_cast<byte>(depth)
+#define WASM_BRV(depth, val) val, kExprBr, ARITY_1, static_cast<byte>(depth)
#define WASM_BRV_IF(depth, val, cond) \
- kExprBrIf, static_cast<byte>(depth), val, cond
-#define WASM_BREAK(depth) kExprBr, static_cast<byte>(depth + 1), kExprNop
-#define WASM_CONTINUE(depth) kExprBr, static_cast<byte>(depth), kExprNop
-#define WASM_BREAKV(depth, val) kExprBr, static_cast<byte>(depth + 1), val
-#define WASM_RETURN0 kExprReturn
-#define WASM_RETURN(...) kExprReturn, __VA_ARGS__
+ val, cond, kExprBrIf, ARITY_1, static_cast<byte>(depth)
+#define WASM_BREAK(depth) kExprBr, ARITY_0, static_cast<byte>(depth + 1)
+#define WASM_CONTINUE(depth) kExprBr, ARITY_0, static_cast<byte>(depth)
+#define WASM_BREAKV(depth, val) \
+ val, kExprBr, ARITY_1, static_cast<byte>(depth + 1)
+#define WASM_RETURN0 kExprReturn, ARITY_0
+#define WASM_RETURN1(val) val, kExprReturn, ARITY_1
+#define WASM_RETURNN(count, ...) __VA_ARGS__, kExprReturn, count
#define WASM_UNREACHABLE kExprUnreachable
#define WASM_BR_TABLE(key, count, ...) \
- kExprBrTable, U32V_1(count), __VA_ARGS__, key
+ key, kExprBrTable, ARITY_0, U32V_1(count), __VA_ARGS__
+
+#define WASM_BR_TABLEV(val, key, count, ...) \
+ val, key, kExprBrTable, ARITY_1, U32V_1(count), __VA_ARGS__
#define WASM_CASE(x) static_cast<byte>(x), static_cast<byte>(x >> 8)
#define WASM_CASE_BR(x) static_cast<byte>(x), static_cast<byte>(0x80 | (x) >> 8)
@@ -119,13 +132,17 @@ inline void CheckI64v(int64_t value, int length) {
// A helper for encoding local declarations prepended to the body of a
// function.
+// TODO(titzer): move this to an appropriate header.
class LocalDeclEncoder {
public:
+ explicit LocalDeclEncoder(Zone* zone, FunctionSig* s = nullptr)
+ : sig(s), local_decls(zone), total(0) {}
+
// Prepend local declarations by creating a new buffer and copying data
// over. The new buffer must be delete[]'d by the caller.
- void Prepend(const byte** start, const byte** end) const {
+ void Prepend(Zone* zone, const byte** start, const byte** end) const {
size_t size = (*end - *start);
- byte* buffer = new byte[Size() + size];
+ byte* buffer = reinterpret_cast<byte*>(zone->New(Size() + size));
size_t pos = Emit(buffer);
memcpy(buffer + pos, *start, size);
pos += size;
@@ -136,7 +153,7 @@ class LocalDeclEncoder {
size_t Emit(byte* buffer) const {
size_t pos = 0;
pos = WriteUint32v(buffer, pos, static_cast<uint32_t>(local_decls.size()));
- for (size_t i = 0; i < local_decls.size(); i++) {
+ for (size_t i = 0; i < local_decls.size(); ++i) {
pos = WriteUint32v(buffer, pos, local_decls[i].first);
buffer[pos++] = WasmOpcodes::LocalTypeCodeFor(local_decls[i].second);
}
@@ -146,19 +163,16 @@ class LocalDeclEncoder {
// Add locals declarations to this helper. Return the index of the newly added
// local(s), with an optional adjustment for the parameters.
- uint32_t AddLocals(uint32_t count, LocalType type,
- FunctionSig* sig = nullptr) {
- if (count == 0) {
- return static_cast<uint32_t>((sig ? sig->parameter_count() : 0) +
- local_decls.size());
- }
- size_t pos = local_decls.size();
+ uint32_t AddLocals(uint32_t count, LocalType type) {
+ uint32_t result =
+ static_cast<uint32_t>(total + (sig ? sig->parameter_count() : 0));
+ total += count;
if (local_decls.size() > 0 && local_decls.back().second == type) {
count += local_decls.back().first;
local_decls.pop_back();
}
local_decls.push_back(std::pair<uint32_t, LocalType>(count, type));
- return static_cast<uint32_t>(pos + (sig ? sig->parameter_count() : 0));
+ return result;
}
size_t Size() const {
@@ -167,8 +181,14 @@ class LocalDeclEncoder {
return size;
}
+ bool has_sig() const { return sig != nullptr; }
+ FunctionSig* get_sig() const { return sig; }
+ void set_sig(FunctionSig* s) { sig = s; }
+
private:
- std::vector<std::pair<uint32_t, LocalType>> local_decls;
+ FunctionSig* sig;
+ ZoneVector<std::pair<uint32_t, LocalType>> local_decls;
+ size_t total;
size_t SizeofUint32v(uint32_t val) const {
size_t size = 1;
@@ -322,193 +342,267 @@ class LocalDeclEncoder {
static_cast<byte>(bit_cast<uint64_t>(val) >> 48), \
static_cast<byte>(bit_cast<uint64_t>(val) >> 56)
#define WASM_GET_LOCAL(index) kExprGetLocal, static_cast<byte>(index)
-#define WASM_SET_LOCAL(index, val) kExprSetLocal, static_cast<byte>(index), val
-#define WASM_LOAD_GLOBAL(index) kExprLoadGlobal, static_cast<byte>(index)
-#define WASM_STORE_GLOBAL(index, val) \
- kExprStoreGlobal, static_cast<byte>(index), val
-#define WASM_LOAD_MEM(type, index) \
- static_cast<byte>( \
- v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, false)), \
- ZERO_ALIGNMENT, ZERO_OFFSET, index
-#define WASM_STORE_MEM(type, index, val) \
- static_cast<byte>( \
- v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, true)), \
- ZERO_ALIGNMENT, ZERO_OFFSET, index, val
-#define WASM_LOAD_MEM_OFFSET(type, offset, index) \
- static_cast<byte>( \
- v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, false)), \
- ZERO_ALIGNMENT, U32V_1(offset), index
-#define WASM_STORE_MEM_OFFSET(type, offset, index, val) \
- static_cast<byte>( \
- v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, true)), \
- ZERO_ALIGNMENT, U32V_1(offset), index, val
-#define WASM_CALL_FUNCTION(index, ...) \
- kExprCallFunction, static_cast<byte>(index), __VA_ARGS__
-#define WASM_CALL_IMPORT(index, ...) \
- kExprCallImport, static_cast<byte>(index), __VA_ARGS__
-#define WASM_CALL_INDIRECT(index, func, ...) \
- kExprCallIndirect, static_cast<byte>(index), func, __VA_ARGS__
-#define WASM_CALL_FUNCTION0(index) kExprCallFunction, static_cast<byte>(index)
-#define WASM_CALL_IMPORT0(index) kExprCallImport, static_cast<byte>(index)
+#define WASM_SET_LOCAL(index, val) val, kExprSetLocal, static_cast<byte>(index)
+#define WASM_GET_GLOBAL(index) kExprGetGlobal, static_cast<byte>(index)
+#define WASM_SET_GLOBAL(index, val) \
+ val, kExprSetGlobal, static_cast<byte>(index)
+#define WASM_LOAD_MEM(type, index) \
+ index, static_cast<byte>( \
+ v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, false)), \
+ ZERO_ALIGNMENT, ZERO_OFFSET
+#define WASM_STORE_MEM(type, index, val) \
+ index, val, \
+ static_cast<byte>( \
+ v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, true)), \
+ ZERO_ALIGNMENT, ZERO_OFFSET
+#define WASM_LOAD_MEM_OFFSET(type, offset, index) \
+ index, static_cast<byte>( \
+ v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, false)), \
+ ZERO_ALIGNMENT, static_cast<byte>(offset)
+#define WASM_STORE_MEM_OFFSET(type, offset, index, val) \
+ index, val, \
+ static_cast<byte>( \
+ v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, true)), \
+ ZERO_ALIGNMENT, static_cast<byte>(offset)
+#define WASM_LOAD_MEM_ALIGNMENT(type, index, alignment) \
+ index, static_cast<byte>( \
+ v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, false)), \
+ alignment, ZERO_OFFSET
+#define WASM_STORE_MEM_ALIGNMENT(type, index, alignment, val) \
+ index, val, \
+ static_cast<byte>( \
+ v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, true)), \
+ alignment, ZERO_OFFSET
+
+#define WASM_CALL_FUNCTION0(index) \
+ kExprCallFunction, 0, static_cast<byte>(index)
+#define WASM_CALL_FUNCTION1(index, a) \
+ a, kExprCallFunction, 1, static_cast<byte>(index)
+#define WASM_CALL_FUNCTION2(index, a, b) \
+ a, b, kExprCallFunction, 2, static_cast<byte>(index)
+#define WASM_CALL_FUNCTION3(index, a, b, c) \
+ a, b, c, kExprCallFunction, 3, static_cast<byte>(index)
+#define WASM_CALL_FUNCTION4(index, a, b, c, d) \
+ a, b, c, d, kExprCallFunction, 4, static_cast<byte>(index)
+#define WASM_CALL_FUNCTION5(index, a, b, c, d, e) \
+ kExprCallFunction, 5, static_cast<byte>(index)
+#define WASM_CALL_FUNCTIONN(arity, index, ...) \
+ __VA_ARGS__, kExprCallFunction, arity, static_cast<byte>(index)
+
+#define WASM_CALL_IMPORT0(index) kExprCallImport, 0, static_cast<byte>(index)
+#define WASM_CALL_IMPORT1(index, a) \
+ a, kExprCallImport, 1, static_cast<byte>(index)
+#define WASM_CALL_IMPORT2(index, a, b) \
+ a, b, kExprCallImport, 2, static_cast<byte>(index)
+#define WASM_CALL_IMPORT3(index, a, b, c) \
+ a, b, c, kExprCallImport, 3, static_cast<byte>(index)
+#define WASM_CALL_IMPORT4(index, a, b, c, d) \
+ a, b, c, d, kExprCallImport, 4, static_cast<byte>(index)
+#define WASM_CALL_IMPORT5(index, a, b, c, d, e) \
+ a, b, c, d, e, kExprCallImport, 5, static_cast<byte>(index)
+#define WASM_CALL_IMPORTN(arity, index, ...) \
+ __VA_ARGS__, kExprCallImport, U32V_1(arity), static_cast<byte>(index),
+
#define WASM_CALL_INDIRECT0(index, func) \
- kExprCallIndirect, static_cast<byte>(index), func
-#define WASM_NOT(x) kExprI32Eqz, x
+ func, kExprCallIndirect, 0, static_cast<byte>(index)
+#define WASM_CALL_INDIRECT1(index, func, a) \
+ func, a, kExprCallIndirect, 1, static_cast<byte>(index)
+#define WASM_CALL_INDIRECT2(index, func, a, b) \
+ func, a, b, kExprCallIndirect, 2, static_cast<byte>(index)
+#define WASM_CALL_INDIRECT3(index, func, a, b, c) \
+ func, a, b, c, kExprCallIndirect, 3, static_cast<byte>(index)
+#define WASM_CALL_INDIRECT4(index, func, a, b, c, d) \
+ func, a, b, c, d, kExprCallIndirect, 4, static_cast<byte>(index)
+#define WASM_CALL_INDIRECT5(index, func, a, b, c, d, e) \
+ func, a, b, c, d, e, kExprCallIndirect, 5, static_cast<byte>(index)
+#define WASM_CALL_INDIRECTN(arity, index, func, ...) \
+ func, __VA_ARGS__, kExprCallIndirect, U32V_1(arity), static_cast<byte>(index)
+
+#define WASM_NOT(x) x, kExprI32Eqz
+#define WASM_SEQ(...) __VA_ARGS__
//------------------------------------------------------------------------------
// Constructs that are composed of multiple bytecodes.
//------------------------------------------------------------------------------
-#define WASM_WHILE(x, y) kExprLoop, 1, kExprIf, x, kExprBr, 0, y
-#define WASM_INC_LOCAL(index) \
- kExprSetLocal, static_cast<byte>(index), kExprI32Add, kExprGetLocal, \
- static_cast<byte>(index), kExprI8Const, 1
-#define WASM_INC_LOCAL_BY(index, count) \
- kExprSetLocal, static_cast<byte>(index), kExprI32Add, kExprGetLocal, \
- static_cast<byte>(index), kExprI8Const, static_cast<int8_t>(count)
-
-#define WASM_UNOP(opcode, x) static_cast<byte>(opcode), x
-#define WASM_BINOP(opcode, x, y) static_cast<byte>(opcode), x, y
+#define WASM_WHILE(x, y) \
+ kExprLoop, x, kExprIf, y, kExprBr, ARITY_1, DEPTH_1, kExprEnd, kExprEnd
+#define WASM_INC_LOCAL(index) \
+ kExprGetLocal, static_cast<byte>(index), kExprI8Const, 1, kExprI32Add, \
+ kExprSetLocal, static_cast<byte>(index)
+#define WASM_INC_LOCAL_BY(index, count) \
+ kExprGetLocal, static_cast<byte>(index), kExprI8Const, \
+ static_cast<byte>(count), kExprI32Add, kExprSetLocal, \
+ static_cast<byte>(index)
+#define WASM_UNOP(opcode, x) x, static_cast<byte>(opcode)
+#define WASM_BINOP(opcode, x, y) x, y, static_cast<byte>(opcode)
//------------------------------------------------------------------------------
// Int32 operations
//------------------------------------------------------------------------------
-#define WASM_I32_ADD(x, y) kExprI32Add, x, y
-#define WASM_I32_SUB(x, y) kExprI32Sub, x, y
-#define WASM_I32_MUL(x, y) kExprI32Mul, x, y
-#define WASM_I32_DIVS(x, y) kExprI32DivS, x, y
-#define WASM_I32_DIVU(x, y) kExprI32DivU, x, y
-#define WASM_I32_REMS(x, y) kExprI32RemS, x, y
-#define WASM_I32_REMU(x, y) kExprI32RemU, x, y
-#define WASM_I32_AND(x, y) kExprI32And, x, y
-#define WASM_I32_IOR(x, y) kExprI32Ior, x, y
-#define WASM_I32_XOR(x, y) kExprI32Xor, x, y
-#define WASM_I32_SHL(x, y) kExprI32Shl, x, y
-#define WASM_I32_SHR(x, y) kExprI32ShrU, x, y
-#define WASM_I32_SAR(x, y) kExprI32ShrS, x, y
-#define WASM_I32_ROR(x, y) kExprI32Ror, x, y
-#define WASM_I32_ROL(x, y) kExprI32Rol, x, y
-#define WASM_I32_EQ(x, y) kExprI32Eq, x, y
-#define WASM_I32_NE(x, y) kExprI32Ne, x, y
-#define WASM_I32_LTS(x, y) kExprI32LtS, x, y
-#define WASM_I32_LES(x, y) kExprI32LeS, x, y
-#define WASM_I32_LTU(x, y) kExprI32LtU, x, y
-#define WASM_I32_LEU(x, y) kExprI32LeU, x, y
-#define WASM_I32_GTS(x, y) kExprI32GtS, x, y
-#define WASM_I32_GES(x, y) kExprI32GeS, x, y
-#define WASM_I32_GTU(x, y) kExprI32GtU, x, y
-#define WASM_I32_GEU(x, y) kExprI32GeU, x, y
-#define WASM_I32_CLZ(x) kExprI32Clz, x
-#define WASM_I32_CTZ(x) kExprI32Ctz, x
-#define WASM_I32_POPCNT(x) kExprI32Popcnt, x
-#define WASM_I32_EQZ(x) kExprI32Eqz, x
+#define WASM_I32_ADD(x, y) x, y, kExprI32Add
+#define WASM_I32_SUB(x, y) x, y, kExprI32Sub
+#define WASM_I32_MUL(x, y) x, y, kExprI32Mul
+#define WASM_I32_DIVS(x, y) x, y, kExprI32DivS
+#define WASM_I32_DIVU(x, y) x, y, kExprI32DivU
+#define WASM_I32_REMS(x, y) x, y, kExprI32RemS
+#define WASM_I32_REMU(x, y) x, y, kExprI32RemU
+#define WASM_I32_AND(x, y) x, y, kExprI32And
+#define WASM_I32_IOR(x, y) x, y, kExprI32Ior
+#define WASM_I32_XOR(x, y) x, y, kExprI32Xor
+#define WASM_I32_SHL(x, y) x, y, kExprI32Shl
+#define WASM_I32_SHR(x, y) x, y, kExprI32ShrU
+#define WASM_I32_SAR(x, y) x, y, kExprI32ShrS
+#define WASM_I32_ROR(x, y) x, y, kExprI32Ror
+#define WASM_I32_ROL(x, y) x, y, kExprI32Rol
+#define WASM_I32_EQ(x, y) x, y, kExprI32Eq
+#define WASM_I32_NE(x, y) x, y, kExprI32Ne
+#define WASM_I32_LTS(x, y) x, y, kExprI32LtS
+#define WASM_I32_LES(x, y) x, y, kExprI32LeS
+#define WASM_I32_LTU(x, y) x, y, kExprI32LtU
+#define WASM_I32_LEU(x, y) x, y, kExprI32LeU
+#define WASM_I32_GTS(x, y) x, y, kExprI32GtS
+#define WASM_I32_GES(x, y) x, y, kExprI32GeS
+#define WASM_I32_GTU(x, y) x, y, kExprI32GtU
+#define WASM_I32_GEU(x, y) x, y, kExprI32GeU
+#define WASM_I32_CLZ(x) x, kExprI32Clz
+#define WASM_I32_CTZ(x) x, kExprI32Ctz
+#define WASM_I32_POPCNT(x) x, kExprI32Popcnt
+#define WASM_I32_EQZ(x) x, kExprI32Eqz
//------------------------------------------------------------------------------
// Int64 operations
//------------------------------------------------------------------------------
-#define WASM_I64_ADD(x, y) kExprI64Add, x, y
-#define WASM_I64_SUB(x, y) kExprI64Sub, x, y
-#define WASM_I64_MUL(x, y) kExprI64Mul, x, y
-#define WASM_I64_DIVS(x, y) kExprI64DivS, x, y
-#define WASM_I64_DIVU(x, y) kExprI64DivU, x, y
-#define WASM_I64_REMS(x, y) kExprI64RemS, x, y
-#define WASM_I64_REMU(x, y) kExprI64RemU, x, y
-#define WASM_I64_AND(x, y) kExprI64And, x, y
-#define WASM_I64_IOR(x, y) kExprI64Ior, x, y
-#define WASM_I64_XOR(x, y) kExprI64Xor, x, y
-#define WASM_I64_SHL(x, y) kExprI64Shl, x, y
-#define WASM_I64_SHR(x, y) kExprI64ShrU, x, y
-#define WASM_I64_SAR(x, y) kExprI64ShrS, x, y
-#define WASM_I64_ROR(x, y) kExprI64Ror, x, y
-#define WASM_I64_ROL(x, y) kExprI64Rol, x, y
-#define WASM_I64_EQ(x, y) kExprI64Eq, x, y
-#define WASM_I64_NE(x, y) kExprI64Ne, x, y
-#define WASM_I64_LTS(x, y) kExprI64LtS, x, y
-#define WASM_I64_LES(x, y) kExprI64LeS, x, y
-#define WASM_I64_LTU(x, y) kExprI64LtU, x, y
-#define WASM_I64_LEU(x, y) kExprI64LeU, x, y
-#define WASM_I64_GTS(x, y) kExprI64GtS, x, y
-#define WASM_I64_GES(x, y) kExprI64GeS, x, y
-#define WASM_I64_GTU(x, y) kExprI64GtU, x, y
-#define WASM_I64_GEU(x, y) kExprI64GeU, x, y
-#define WASM_I64_CLZ(x) kExprI64Clz, x
-#define WASM_I64_CTZ(x) kExprI64Ctz, x
-#define WASM_I64_POPCNT(x) kExprI64Popcnt, x
-#define WASM_I64_EQZ(x) kExprI64Eqz, x
+#define WASM_I64_ADD(x, y) x, y, kExprI64Add
+#define WASM_I64_SUB(x, y) x, y, kExprI64Sub
+#define WASM_I64_MUL(x, y) x, y, kExprI64Mul
+#define WASM_I64_DIVS(x, y) x, y, kExprI64DivS
+#define WASM_I64_DIVU(x, y) x, y, kExprI64DivU
+#define WASM_I64_REMS(x, y) x, y, kExprI64RemS
+#define WASM_I64_REMU(x, y) x, y, kExprI64RemU
+#define WASM_I64_AND(x, y) x, y, kExprI64And
+#define WASM_I64_IOR(x, y) x, y, kExprI64Ior
+#define WASM_I64_XOR(x, y) x, y, kExprI64Xor
+#define WASM_I64_SHL(x, y) x, y, kExprI64Shl
+#define WASM_I64_SHR(x, y) x, y, kExprI64ShrU
+#define WASM_I64_SAR(x, y) x, y, kExprI64ShrS
+#define WASM_I64_ROR(x, y) x, y, kExprI64Ror
+#define WASM_I64_ROL(x, y) x, y, kExprI64Rol
+#define WASM_I64_EQ(x, y) x, y, kExprI64Eq
+#define WASM_I64_NE(x, y) x, y, kExprI64Ne
+#define WASM_I64_LTS(x, y) x, y, kExprI64LtS
+#define WASM_I64_LES(x, y) x, y, kExprI64LeS
+#define WASM_I64_LTU(x, y) x, y, kExprI64LtU
+#define WASM_I64_LEU(x, y) x, y, kExprI64LeU
+#define WASM_I64_GTS(x, y) x, y, kExprI64GtS
+#define WASM_I64_GES(x, y) x, y, kExprI64GeS
+#define WASM_I64_GTU(x, y) x, y, kExprI64GtU
+#define WASM_I64_GEU(x, y) x, y, kExprI64GeU
+#define WASM_I64_CLZ(x) x, kExprI64Clz
+#define WASM_I64_CTZ(x) x, kExprI64Ctz
+#define WASM_I64_POPCNT(x) x, kExprI64Popcnt
+#define WASM_I64_EQZ(x) x, kExprI64Eqz
//------------------------------------------------------------------------------
// Float32 operations
//------------------------------------------------------------------------------
-#define WASM_F32_ADD(x, y) kExprF32Add, x, y
-#define WASM_F32_SUB(x, y) kExprF32Sub, x, y
-#define WASM_F32_MUL(x, y) kExprF32Mul, x, y
-#define WASM_F32_DIV(x, y) kExprF32Div, x, y
-#define WASM_F32_MIN(x, y) kExprF32Min, x, y
-#define WASM_F32_MAX(x, y) kExprF32Max, x, y
-#define WASM_F32_ABS(x) kExprF32Abs, x
-#define WASM_F32_NEG(x) kExprF32Neg, x
-#define WASM_F32_COPYSIGN(x, y) kExprF32CopySign, x, y
-#define WASM_F32_CEIL(x) kExprF32Ceil, x
-#define WASM_F32_FLOOR(x) kExprF32Floor, x
-#define WASM_F32_TRUNC(x) kExprF32Trunc, x
-#define WASM_F32_NEARESTINT(x) kExprF32NearestInt, x
-#define WASM_F32_SQRT(x) kExprF32Sqrt, x
-#define WASM_F32_EQ(x, y) kExprF32Eq, x, y
-#define WASM_F32_NE(x, y) kExprF32Ne, x, y
-#define WASM_F32_LT(x, y) kExprF32Lt, x, y
-#define WASM_F32_LE(x, y) kExprF32Le, x, y
-#define WASM_F32_GT(x, y) kExprF32Gt, x, y
-#define WASM_F32_GE(x, y) kExprF32Ge, x, y
+#define WASM_F32_ADD(x, y) x, y, kExprF32Add
+#define WASM_F32_SUB(x, y) x, y, kExprF32Sub
+#define WASM_F32_MUL(x, y) x, y, kExprF32Mul
+#define WASM_F32_DIV(x, y) x, y, kExprF32Div
+#define WASM_F32_MIN(x, y) x, y, kExprF32Min
+#define WASM_F32_MAX(x, y) x, y, kExprF32Max
+#define WASM_F32_ABS(x) x, kExprF32Abs
+#define WASM_F32_NEG(x) x, kExprF32Neg
+#define WASM_F32_COPYSIGN(x, y) x, y, kExprF32CopySign
+#define WASM_F32_CEIL(x) x, kExprF32Ceil
+#define WASM_F32_FLOOR(x) x, kExprF32Floor
+#define WASM_F32_TRUNC(x) x, kExprF32Trunc
+#define WASM_F32_NEARESTINT(x) x, kExprF32NearestInt
+#define WASM_F32_SQRT(x) x, kExprF32Sqrt
+#define WASM_F32_EQ(x, y) x, y, kExprF32Eq
+#define WASM_F32_NE(x, y) x, y, kExprF32Ne
+#define WASM_F32_LT(x, y) x, y, kExprF32Lt
+#define WASM_F32_LE(x, y) x, y, kExprF32Le
+#define WASM_F32_GT(x, y) x, y, kExprF32Gt
+#define WASM_F32_GE(x, y) x, y, kExprF32Ge
//------------------------------------------------------------------------------
// Float64 operations
//------------------------------------------------------------------------------
-#define WASM_F64_ADD(x, y) kExprF64Add, x, y
-#define WASM_F64_SUB(x, y) kExprF64Sub, x, y
-#define WASM_F64_MUL(x, y) kExprF64Mul, x, y
-#define WASM_F64_DIV(x, y) kExprF64Div, x, y
-#define WASM_F64_MIN(x, y) kExprF64Min, x, y
-#define WASM_F64_MAX(x, y) kExprF64Max, x, y
-#define WASM_F64_ABS(x) kExprF64Abs, x
-#define WASM_F64_NEG(x) kExprF64Neg, x
-#define WASM_F64_COPYSIGN(x, y) kExprF64CopySign, x, y
-#define WASM_F64_CEIL(x) kExprF64Ceil, x
-#define WASM_F64_FLOOR(x) kExprF64Floor, x
-#define WASM_F64_TRUNC(x) kExprF64Trunc, x
-#define WASM_F64_NEARESTINT(x) kExprF64NearestInt, x
-#define WASM_F64_SQRT(x) kExprF64Sqrt, x
-#define WASM_F64_EQ(x, y) kExprF64Eq, x, y
-#define WASM_F64_NE(x, y) kExprF64Ne, x, y
-#define WASM_F64_LT(x, y) kExprF64Lt, x, y
-#define WASM_F64_LE(x, y) kExprF64Le, x, y
-#define WASM_F64_GT(x, y) kExprF64Gt, x, y
-#define WASM_F64_GE(x, y) kExprF64Ge, x, y
+#define WASM_F64_ADD(x, y) x, y, kExprF64Add
+#define WASM_F64_SUB(x, y) x, y, kExprF64Sub
+#define WASM_F64_MUL(x, y) x, y, kExprF64Mul
+#define WASM_F64_DIV(x, y) x, y, kExprF64Div
+#define WASM_F64_MIN(x, y) x, y, kExprF64Min
+#define WASM_F64_MAX(x, y) x, y, kExprF64Max
+#define WASM_F64_ABS(x) x, kExprF64Abs
+#define WASM_F64_NEG(x) x, kExprF64Neg
+#define WASM_F64_COPYSIGN(x, y) x, y, kExprF64CopySign
+#define WASM_F64_CEIL(x) x, kExprF64Ceil
+#define WASM_F64_FLOOR(x) x, kExprF64Floor
+#define WASM_F64_TRUNC(x) x, kExprF64Trunc
+#define WASM_F64_NEARESTINT(x) x, kExprF64NearestInt
+#define WASM_F64_SQRT(x) x, kExprF64Sqrt
+#define WASM_F64_EQ(x, y) x, y, kExprF64Eq
+#define WASM_F64_NE(x, y) x, y, kExprF64Ne
+#define WASM_F64_LT(x, y) x, y, kExprF64Lt
+#define WASM_F64_LE(x, y) x, y, kExprF64Le
+#define WASM_F64_GT(x, y) x, y, kExprF64Gt
+#define WASM_F64_GE(x, y) x, y, kExprF64Ge
//------------------------------------------------------------------------------
// Type conversions.
//------------------------------------------------------------------------------
-#define WASM_I32_SCONVERT_F32(x) kExprI32SConvertF32, x
-#define WASM_I32_SCONVERT_F64(x) kExprI32SConvertF64, x
-#define WASM_I32_UCONVERT_F32(x) kExprI32UConvertF32, x
-#define WASM_I32_UCONVERT_F64(x) kExprI32UConvertF64, x
-#define WASM_I32_CONVERT_I64(x) kExprI32ConvertI64, x
-#define WASM_I64_SCONVERT_F32(x) kExprI64SConvertF32, x
-#define WASM_I64_SCONVERT_F64(x) kExprI64SConvertF64, x
-#define WASM_I64_UCONVERT_F32(x) kExprI64UConvertF32, x
-#define WASM_I64_UCONVERT_F64(x) kExprI64UConvertF64, x
-#define WASM_I64_SCONVERT_I32(x) kExprI64SConvertI32, x
-#define WASM_I64_UCONVERT_I32(x) kExprI64UConvertI32, x
-#define WASM_F32_SCONVERT_I32(x) kExprF32SConvertI32, x
-#define WASM_F32_UCONVERT_I32(x) kExprF32UConvertI32, x
-#define WASM_F32_SCONVERT_I64(x) kExprF32SConvertI64, x
-#define WASM_F32_UCONVERT_I64(x) kExprF32UConvertI64, x
-#define WASM_F32_CONVERT_F64(x) kExprF32ConvertF64, x
-#define WASM_F32_REINTERPRET_I32(x) kExprF32ReinterpretI32, x
-#define WASM_F64_SCONVERT_I32(x) kExprF64SConvertI32, x
-#define WASM_F64_UCONVERT_I32(x) kExprF64UConvertI32, x
-#define WASM_F64_SCONVERT_I64(x) kExprF64SConvertI64, x
-#define WASM_F64_UCONVERT_I64(x) kExprF64UConvertI64, x
-#define WASM_F64_CONVERT_F32(x) kExprF64ConvertF32, x
-#define WASM_F64_REINTERPRET_I64(x) kExprF64ReinterpretI64, x
-#define WASM_I32_REINTERPRET_F32(x) kExprI32ReinterpretF32, x
-#define WASM_I64_REINTERPRET_F64(x) kExprI64ReinterpretF64, x
+#define WASM_I32_SCONVERT_F32(x) x, kExprI32SConvertF32
+#define WASM_I32_SCONVERT_F64(x) x, kExprI32SConvertF64
+#define WASM_I32_UCONVERT_F32(x) x, kExprI32UConvertF32
+#define WASM_I32_UCONVERT_F64(x) x, kExprI32UConvertF64
+#define WASM_I32_CONVERT_I64(x) x, kExprI32ConvertI64
+#define WASM_I64_SCONVERT_F32(x) x, kExprI64SConvertF32
+#define WASM_I64_SCONVERT_F64(x) x, kExprI64SConvertF64
+#define WASM_I64_UCONVERT_F32(x) x, kExprI64UConvertF32
+#define WASM_I64_UCONVERT_F64(x) x, kExprI64UConvertF64
+#define WASM_I64_SCONVERT_I32(x) x, kExprI64SConvertI32
+#define WASM_I64_UCONVERT_I32(x) x, kExprI64UConvertI32
+#define WASM_F32_SCONVERT_I32(x) x, kExprF32SConvertI32
+#define WASM_F32_UCONVERT_I32(x) x, kExprF32UConvertI32
+#define WASM_F32_SCONVERT_I64(x) x, kExprF32SConvertI64
+#define WASM_F32_UCONVERT_I64(x) x, kExprF32UConvertI64
+#define WASM_F32_CONVERT_F64(x) x, kExprF32ConvertF64
+#define WASM_F32_REINTERPRET_I32(x) x, kExprF32ReinterpretI32
+#define WASM_F64_SCONVERT_I32(x) x, kExprF64SConvertI32
+#define WASM_F64_UCONVERT_I32(x) x, kExprF64UConvertI32
+#define WASM_F64_SCONVERT_I64(x) x, kExprF64SConvertI64
+#define WASM_F64_UCONVERT_I64(x) x, kExprF64UConvertI64
+#define WASM_F64_CONVERT_F32(x) x, kExprF64ConvertF32
+#define WASM_F64_REINTERPRET_I64(x) x, kExprF64ReinterpretI64
+#define WASM_I32_REINTERPRET_F32(x) x, kExprI32ReinterpretF32
+#define WASM_I64_REINTERPRET_F64(x) x, kExprI64ReinterpretF64
+
+//------------------------------------------------------------------------------
+// Simd Operations.
+//------------------------------------------------------------------------------
+#define WASM_SIMD_I32x4_SPLAT(x) x, kSimdPrefix, kExprI32x4Splat & 0xff
+#define WASM_SIMD_I32x4_EXTRACT_LANE(x, y) \
+ x, y, kSimdPrefix, kExprI32x4ExtractLane & 0xff
+
+#define SIG_ENTRY_v_v kWasmFunctionTypeForm, 0, 0
+#define SIZEOF_SIG_ENTRY_v_v 3
+
+#define SIG_ENTRY_v_x(a) kWasmFunctionTypeForm, 1, a, 0
+#define SIG_ENTRY_v_xx(a, b) kWasmFunctionTypeForm, 2, a, b, 0
+#define SIG_ENTRY_v_xxx(a, b, c) kWasmFunctionTypeForm, 3, a, b, c, 0
+#define SIZEOF_SIG_ENTRY_v_x 4
+#define SIZEOF_SIG_ENTRY_v_xx 5
+#define SIZEOF_SIG_ENTRY_v_xxx 6
+
+#define SIG_ENTRY_x(r) kWasmFunctionTypeForm, 0, 1, r
+#define SIG_ENTRY_x_x(r, a) kWasmFunctionTypeForm, 1, a, 1, r
+#define SIG_ENTRY_x_xx(r, a, b) kWasmFunctionTypeForm, 2, a, b, 1, r
+#define SIG_ENTRY_x_xxx(r, a, b, c) kWasmFunctionTypeForm, 3, a, b, c, 1, r
+#define SIZEOF_SIG_ENTRY_x 4
+#define SIZEOF_SIG_ENTRY_x_x 5
+#define SIZEOF_SIG_ENTRY_x_xx 6
+#define SIZEOF_SIG_ENTRY_x_xxx 7
#endif // V8_WASM_MACRO_GEN_H_
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index a1c2a7a3e1..94bf998e53 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -2,14 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <memory>
+
+#include "src/base/atomic-utils.h"
+#include "src/code-stubs.h"
+
#include "src/macro-assembler.h"
#include "src/objects.h"
-#include "src/v8.h"
-
+#include "src/property-descriptor.h"
#include "src/simulator.h"
+#include "src/snapshot/snapshot.h"
+#include "src/v8.h"
#include "src/wasm/ast-decoder.h"
#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-debug.h"
+#include "src/wasm/wasm-function-name-table.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-result.h"
@@ -19,20 +27,37 @@ namespace v8 {
namespace internal {
namespace wasm {
+enum JSFunctionExportInternalField {
+ kInternalModuleInstance,
+ kInternalArity,
+ kInternalSignature
+};
+
+static const int kPlaceholderMarker = 1000000000;
+
static const char* wasmSections[] = {
-#define F(enumerator, string) string,
+#define F(enumerator, order, string) string,
FOR_EACH_WASM_SECTION_TYPE(F)
#undef F
+ "<unknown>" // entry for "Max"
};
static uint8_t wasmSectionsLengths[]{
-#define F(enumerator, string) sizeof(string) - 1,
+#define F(enumerator, order, string) sizeof(string) - 1,
+ FOR_EACH_WASM_SECTION_TYPE(F)
+#undef F
+ 9 // entry for "Max"
+};
+
+static uint8_t wasmSectionsOrders[]{
+#define F(enumerator, order, string) order,
FOR_EACH_WASM_SECTION_TYPE(F)
#undef F
+ 0 // entry for "Max"
};
static_assert(sizeof(wasmSections) / sizeof(wasmSections[0]) ==
- (size_t)WasmSection::Code::Max,
+ (size_t)WasmSection::Code::Max + 1,
"expected enum WasmSection::Code to be monotonic from 0");
WasmSection::Code WasmSection::begin() { return (WasmSection::Code)0; }
@@ -49,6 +74,20 @@ size_t WasmSection::getNameLength(WasmSection::Code code) {
return wasmSectionsLengths[(size_t)code];
}
+int WasmSection::getOrder(WasmSection::Code code) {
+ return wasmSectionsOrders[(size_t)code];
+}
+
+WasmSection::Code WasmSection::lookup(const byte* string, uint32_t length) {
+ // TODO(jfb) Linear search, it may be better to do a common-prefix search.
+ for (Code i = begin(); i != end(); i = next(i)) {
+ if (getNameLength(i) == length && 0 == memcmp(getName(i), string, length)) {
+ return i;
+ }
+ }
+ return Code::Max;
+}
+
std::ostream& operator<<(std::ostream& os, const WasmModule& module) {
os << "WASM module with ";
os << (module.min_mem_pages * module.kPageSize) << " min mem";
@@ -59,16 +98,9 @@ std::ostream& operator<<(std::ostream& os, const WasmModule& module) {
return os;
}
-
std::ostream& operator<<(std::ostream& os, const WasmFunction& function) {
os << "WASM function with signature " << *function.sig;
- os << " locals: ";
- if (function.local_i32_count) os << function.local_i32_count << " i32s ";
- if (function.local_i64_count) os << function.local_i64_count << " i64s ";
- if (function.local_f32_count) os << function.local_f32_count << " f32s ";
- if (function.local_f64_count) os << function.local_f64_count << " f64s ";
-
os << " code bytes: "
<< (function.code_end_offset - function.code_start_offset);
return os;
@@ -80,7 +112,7 @@ std::ostream& operator<<(std::ostream& os, const WasmFunctionName& pair) {
if (pair.module_) {
WasmName name = pair.module_->GetName(pair.function_->name_offset,
pair.function_->name_length);
- os.write(name.name, name.length);
+ os.write(name.start(), name.length());
} else {
os << "+" << pair.function_->func_index;
}
@@ -90,159 +122,189 @@ std::ostream& operator<<(std::ostream& os, const WasmFunctionName& pair) {
return os;
}
-// A helper class for compiling multiple wasm functions that offers
-// placeholder code objects for calling functions that are not yet compiled.
-class WasmLinker {
- public:
- WasmLinker(Isolate* isolate, size_t size)
- : isolate_(isolate), placeholder_code_(size), function_code_(size) {}
-
- // Get the code object for a function, allocating a placeholder if it has
- // not yet been compiled.
- Handle<Code> GetFunctionCode(uint32_t index) {
- DCHECK(index < function_code_.size());
- if (function_code_[index].is_null()) {
- // Create a placeholder code object and encode the corresponding index in
- // the {constant_pool_offset} field of the code object.
- // TODO(titzer): placeholder code objects are somewhat dangerous.
- Handle<Code> self(nullptr, isolate_);
- byte buffer[] = {0, 0, 0, 0, 0, 0, 0, 0}; // fake instructions.
- CodeDesc desc = {buffer, 8, 8, 0, 0, nullptr};
- Handle<Code> code = isolate_->factory()->NewCode(
- desc, Code::KindField::encode(Code::WASM_FUNCTION), self);
- code->set_constant_pool_offset(index + kPlaceholderMarker);
- placeholder_code_[index] = code;
- function_code_[index] = code;
- }
- return function_code_[index];
- }
-
- void Finish(uint32_t index, Handle<Code> code) {
- DCHECK(index < function_code_.size());
- function_code_[index] = code;
- }
-
- void Link(Handle<FixedArray> function_table,
- std::vector<uint16_t>& functions) {
- for (size_t i = 0; i < function_code_.size(); i++) {
- LinkFunction(function_code_[i]);
- }
- if (!function_table.is_null()) {
- int table_size = static_cast<int>(functions.size());
- DCHECK_EQ(function_table->length(), table_size * 2);
- for (int i = 0; i < table_size; i++) {
- function_table->set(i + table_size, *function_code_[functions[i]]);
- }
- }
- }
-
- private:
- static const int kPlaceholderMarker = 1000000000;
-
- Isolate* isolate_;
- std::vector<Handle<Code>> placeholder_code_;
- std::vector<Handle<Code>> function_code_;
-
- void LinkFunction(Handle<Code> code) {
- bool modified = false;
- int mode_mask = RelocInfo::kCodeTargetMask;
- AllowDeferredHandleDereference embedding_raw_address;
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (RelocInfo::IsCodeTarget(mode)) {
- Code* target =
- Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
- if (target->kind() == Code::WASM_FUNCTION &&
- target->constant_pool_offset() >= kPlaceholderMarker) {
- // Patch direct calls to placeholder code objects.
- uint32_t index = target->constant_pool_offset() - kPlaceholderMarker;
- CHECK(index < function_code_.size());
- Handle<Code> new_target = function_code_[index];
- if (target != *new_target) {
- CHECK_EQ(*placeholder_code_[index], target);
- it.rinfo()->set_target_address(new_target->instruction_start(),
- SKIP_WRITE_BARRIER,
- SKIP_ICACHE_FLUSH);
- modified = true;
- }
- }
- }
- }
- if (modified) {
- Assembler::FlushICache(isolate_, code->instruction_start(),
- code->instruction_size());
- }
+Handle<JSFunction> WrapExportCodeAsJSFunction(
+ Isolate* isolate, Handle<Code> export_code, Handle<String> name, int arity,
+ MaybeHandle<ByteArray> maybe_signature, Handle<JSObject> module_instance) {
+ Handle<SharedFunctionInfo> shared =
+ isolate->factory()->NewSharedFunctionInfo(name, export_code, false);
+ shared->set_length(arity);
+ shared->set_internal_formal_parameter_count(arity);
+ Handle<JSFunction> function = isolate->factory()->NewFunction(
+ isolate->wasm_function_map(), name, export_code);
+ function->set_shared(*shared);
+
+ function->SetInternalField(kInternalModuleInstance, *module_instance);
+ // add another Internal Field as the function arity
+ function->SetInternalField(kInternalArity, Smi::FromInt(arity));
+ // add another Internal Field as the signature of the foreign function
+ Handle<ByteArray> signature;
+ if (maybe_signature.ToHandle(&signature)) {
+ function->SetInternalField(kInternalSignature, *signature);
}
-};
+ return function;
+}
namespace {
// Internal constants for the layout of the module object.
-const int kWasmModuleInternalFieldCount = 4;
const int kWasmModuleFunctionTable = 0;
const int kWasmModuleCodeTable = 1;
const int kWasmMemArrayBuffer = 2;
const int kWasmGlobalsArrayBuffer = 3;
+// TODO(clemensh): Remove function name array, extract names from module bytes.
+const int kWasmFunctionNamesArray = 4;
+const int kWasmModuleBytesString = 5;
+const int kWasmDebugInfo = 6;
+const int kWasmModuleInternalFieldCount = 7;
+
+// TODO(mtrofin): Unnecessary once we stop using JS Heap for wasm code.
+// For now, each field is expected to have the type commented by its side.
+// The elements typed as "maybe" are optional. The others are mandatory. Since
+// the compiled module is either obtained from the current v8 instance, or from
+// a snapshot produced by a compatible (==identical) v8 instance, we simply
+// fail at instantiation time, in the face of invalid data.
+enum CompiledWasmObjectFields {
+ kFunctions, // FixedArray of Code
+ kImportData, // maybe FixedArray of FixedArray respecting the
+ // WasmImportMetadata structure.
+ kExports, // maybe FixedArray of FixedArray of WasmExportMetadata
+ // structure
+ kStartupFunction, // maybe FixedArray of WasmExportMetadata structure
+ kTableOfIndirectFunctionTables, // maybe FixedArray of FixedArray of
+ // WasmIndirectFunctionTableMetadata
+ kModuleBytes, // maybe String
+ kFunctionNameTable, // maybe ByteArray
+ kMinRequiredMemory, // Smi. an uint32_t
+ // The following 2 are either together present or absent:
+ kDataSegmentsInfo, // maybe FixedArray of FixedArray respecting the
+ // WasmSegmentInfo structure
+ kDataSegments, // maybe ByteArray.
+
+ kGlobalsSize, // Smi. an uint32_t
+ kExportMem, // Smi. bool
+ kOrigin, // Smi. ModuleOrigin
+ kCompiledWasmObjectTableSize // Sentinel value.
+};
-size_t AllocateGlobalsOffsets(std::vector<WasmGlobal>& globals) {
- uint32_t offset = 0;
- if (globals.size() == 0) return 0;
- for (WasmGlobal& global : globals) {
- byte size = WasmOpcodes::MemSize(global.type);
- offset = (offset + size - 1) & ~(size - 1); // align
- global.offset = offset;
- offset += size;
- }
- return offset;
+enum WasmImportMetadata {
+ kModuleName, // String
+ kFunctionName, // maybe String
+ kOutputCount, // Smi. an uint32_t
+ kSignature, // ByteArray. A copy of the data in FunctionSig
+ kWasmImportDataTableSize // Sentinel value.
+};
+
+enum WasmExportMetadata {
+ kExportCode, // Code
+ kExportName, // String
+ kExportArity, // Smi, an int
+ kExportedFunctionIndex, // Smi, an uint32_t
+ kExportedSignature, // ByteArray. A copy of the data in FunctionSig
+ kWasmExportMetadataTableSize // Sentinel value.
+};
+
+enum WasmSegmentInfo {
+ kDestAddr, // Smi. an uint32_t
+ kSourceSize, // Smi. an uint32_t
+ kWasmSegmentInfoSize // Sentinel value.
+};
+
+enum WasmIndirectFunctionTableMetadata {
+ kSize, // Smi. an uint32_t
+ kTable, // FixedArray of indirect function table
+ kWasmIndirectFunctionTableMetadataSize // Sentinel value.
+};
+
+uint32_t GetMinModuleMemSize(const WasmModule* module) {
+ return WasmModule::kPageSize * module->min_mem_pages;
}
+void LoadDataSegments(Handle<FixedArray> compiled_module, Address mem_addr,
+ size_t mem_size) {
+ Isolate* isolate = compiled_module->GetIsolate();
+ MaybeHandle<ByteArray> maybe_data =
+ compiled_module->GetValue<ByteArray>(isolate, kDataSegments);
+ MaybeHandle<FixedArray> maybe_segments =
+ compiled_module->GetValue<FixedArray>(isolate, kDataSegmentsInfo);
+
+ // We either have both or neither.
+ CHECK(maybe_data.is_null() == maybe_segments.is_null());
+ // If we have neither, we're done.
+ if (maybe_data.is_null()) return;
+
+ Handle<ByteArray> data = maybe_data.ToHandleChecked();
+ Handle<FixedArray> segments = maybe_segments.ToHandleChecked();
+
+ uint32_t last_extraction_pos = 0;
+ for (int i = 0; i < segments->length(); ++i) {
+ Handle<ByteArray> segment =
+ Handle<ByteArray>(ByteArray::cast(segments->get(i)));
+ uint32_t dest_addr = static_cast<uint32_t>(segment->get_int(kDestAddr));
+ uint32_t source_size = static_cast<uint32_t>(segment->get_int(kSourceSize));
+ CHECK_LT(dest_addr, mem_size);
+ CHECK_LE(source_size, mem_size);
+ CHECK_LE(dest_addr, mem_size - source_size);
+ byte* addr = mem_addr + dest_addr;
+ data->copy_out(last_extraction_pos, addr, source_size);
+ last_extraction_pos += source_size;
+ }
+}
-void LoadDataSegments(WasmModule* module, byte* mem_addr, size_t mem_size) {
+void SaveDataSegmentInfo(Factory* factory, const WasmModule* module,
+ Handle<FixedArray> compiled_module) {
+ Handle<FixedArray> segments = factory->NewFixedArray(
+ static_cast<int>(module->data_segments.size()), TENURED);
+ uint32_t data_size = 0;
for (const WasmDataSegment& segment : module->data_segments) {
if (!segment.init) continue;
- if (!segment.source_size) continue;
- CHECK_LT(segment.dest_addr, mem_size);
- CHECK_LE(segment.source_size, mem_size);
- CHECK_LE(segment.dest_addr + segment.source_size, mem_size);
- byte* addr = mem_addr + segment.dest_addr;
- memcpy(addr, module->module_start + segment.source_offset,
- segment.source_size);
+ if (segment.source_size == 0) continue;
+ data_size += segment.source_size;
}
-}
-
+ Handle<ByteArray> data = factory->NewByteArray(data_size, TENURED);
-Handle<FixedArray> BuildFunctionTable(Isolate* isolate, WasmModule* module) {
- if (module->function_table.size() == 0) {
- return Handle<FixedArray>::null();
+ uint32_t last_insertion_pos = 0;
+ for (uint32_t i = 0; i < module->data_segments.size(); ++i) {
+ const WasmDataSegment& segment = module->data_segments[i];
+ if (!segment.init) continue;
+ if (segment.source_size == 0) continue;
+ Handle<ByteArray> js_segment =
+ factory->NewByteArray(kWasmSegmentInfoSize * sizeof(uint32_t), TENURED);
+ js_segment->set_int(kDestAddr, segment.dest_addr);
+ js_segment->set_int(kSourceSize, segment.source_size);
+ segments->set(i, *js_segment);
+ data->copy_in(last_insertion_pos,
+ module->module_start + segment.source_offset,
+ segment.source_size);
+ last_insertion_pos += segment.source_size;
}
- int table_size = static_cast<int>(module->function_table.size());
- Handle<FixedArray> fixed = isolate->factory()->NewFixedArray(2 * table_size);
- for (int i = 0; i < table_size; i++) {
- WasmFunction* function = &module->functions[module->function_table[i]];
- fixed->set(i, Smi::FromInt(function->sig_index));
+ compiled_module->set(kDataSegmentsInfo, *segments);
+ compiled_module->set(kDataSegments, *data);
+}
+
+void PatchFunctionTable(Handle<Code> code,
+ Handle<FixedArray> old_indirect_table,
+ Handle<FixedArray> new_indirect_table) {
+ for (RelocIterator it(*code, 1 << RelocInfo::EMBEDDED_OBJECT); !it.done();
+ it.next()) {
+ if (it.rinfo()->target_object() == *old_indirect_table) {
+ it.rinfo()->set_target_object(*new_indirect_table);
+ }
}
- return fixed;
}
-Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
- byte** backing_store) {
+Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size) {
if (size > (WasmModule::kMaxMemPages * WasmModule::kPageSize)) {
// TODO(titzer): lift restriction on maximum memory allocated here.
- *backing_store = nullptr;
return Handle<JSArrayBuffer>::null();
}
- void* memory =
- isolate->array_buffer_allocator()->Allocate(static_cast<int>(size));
- if (!memory) {
- *backing_store = nullptr;
+ void* memory = isolate->array_buffer_allocator()->Allocate(size);
+ if (memory == nullptr) {
return Handle<JSArrayBuffer>::null();
}
- *backing_store = reinterpret_cast<byte*>(memory);
-
#if DEBUG
// Double check the API allocator actually zero-initialized the memory.
- byte* bytes = reinterpret_cast<byte*>(*backing_store);
- for (size_t i = 0; i < size; i++) {
+ const byte* bytes = reinterpret_cast<const byte*>(memory);
+ for (size_t i = 0; i < size; ++i) {
DCHECK_EQ(0, bytes[i]);
}
#endif
@@ -253,91 +315,173 @@ Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
return buffer;
}
-// Set the memory for a module instance to be the {memory} array buffer.
-void SetMemory(WasmModuleInstance* instance, Handle<JSArrayBuffer> memory) {
- memory->set_is_neuterable(false);
- instance->mem_start = reinterpret_cast<byte*>(memory->backing_store());
- instance->mem_size = memory->byte_length()->Number();
- instance->mem_buffer = memory;
+void RelocateInstanceCode(Handle<JSObject> instance, Address start,
+ uint32_t prev_size, uint32_t new_size) {
+ Handle<FixedArray> functions = Handle<FixedArray>(
+ FixedArray::cast(instance->GetInternalField(kWasmModuleCodeTable)));
+ for (int i = 0; i < functions->length(); ++i) {
+ Handle<Code> function = Handle<Code>(Code::cast(functions->get(i)));
+ AllowDeferredHandleDereference embedding_raw_address;
+ int mask = (1 << RelocInfo::WASM_MEMORY_REFERENCE) |
+ (1 << RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ for (RelocIterator it(*function, mask); !it.done(); it.next()) {
+ it.rinfo()->update_wasm_memory_reference(nullptr, start, prev_size,
+ new_size);
+ }
+ }
}
// Allocate memory for a module instance as a new JSArrayBuffer.
-bool AllocateMemory(ErrorThrower* thrower, Isolate* isolate,
- WasmModuleInstance* instance) {
- DCHECK(instance->module);
- DCHECK(instance->mem_buffer.is_null());
-
- if (instance->module->min_mem_pages > WasmModule::kMaxMemPages) {
+Handle<JSArrayBuffer> AllocateMemory(ErrorThrower* thrower, Isolate* isolate,
+ uint32_t min_mem_pages) {
+ if (min_mem_pages > WasmModule::kMaxMemPages) {
thrower->Error("Out of memory: wasm memory too large");
- return false;
+ return Handle<JSArrayBuffer>::null();
}
- instance->mem_size = WasmModule::kPageSize * instance->module->min_mem_pages;
- instance->mem_buffer =
- NewArrayBuffer(isolate, instance->mem_size, &instance->mem_start);
- if (!instance->mem_start) {
+ Handle<JSArrayBuffer> mem_buffer =
+ NewArrayBuffer(isolate, min_mem_pages * WasmModule::kPageSize);
+
+ if (mem_buffer.is_null()) {
thrower->Error("Out of memory: wasm memory");
- instance->mem_size = 0;
- return false;
}
- return true;
+ return mem_buffer;
}
-bool AllocateGlobals(ErrorThrower* thrower, Isolate* isolate,
- WasmModuleInstance* instance) {
- instance->globals_size = AllocateGlobalsOffsets(instance->module->globals);
+void RelocateGlobals(Handle<JSObject> instance, Address globals_start) {
+ Handle<FixedArray> functions = Handle<FixedArray>(
+ FixedArray::cast(instance->GetInternalField(kWasmModuleCodeTable)));
+ uint32_t function_count = static_cast<uint32_t>(functions->length());
+ for (uint32_t i = 0; i < function_count; ++i) {
+ Handle<Code> function = Handle<Code>(Code::cast(functions->get(i)));
+ AllowDeferredHandleDereference embedding_raw_address;
+ int mask = 1 << RelocInfo::WASM_GLOBAL_REFERENCE;
+ for (RelocIterator it(*function, mask); !it.done(); it.next()) {
+ it.rinfo()->update_wasm_global_reference(nullptr, globals_start);
+ }
+ }
+}
- if (instance->globals_size > 0) {
- instance->globals_buffer = NewArrayBuffer(isolate, instance->globals_size,
- &instance->globals_start);
- if (!instance->globals_start) {
- // Not enough space for backing store of globals.
- thrower->Error("Out of memory: wasm globals");
- return false;
+Handle<Code> CreatePlaceholder(Factory* factory, uint32_t index,
+ Code::Kind kind) {
+ // Create a placeholder code object and encode the corresponding index in
+ // the {constant_pool_offset} field of the code object.
+ // TODO(titzer): placeholder code objects are somewhat dangerous.
+ static byte buffer[] = {0, 0, 0, 0, 0, 0, 0, 0}; // fake instructions.
+ static CodeDesc desc = {
+ buffer, arraysize(buffer), arraysize(buffer), 0, 0, nullptr, 0, nullptr};
+ Handle<Code> code = factory->NewCode(desc, Code::KindField::encode(kind),
+ Handle<Object>::null());
+ code->set_constant_pool_offset(static_cast<int>(index) + kPlaceholderMarker);
+ return code;
+}
+
+// TODO(mtrofin): remove when we stop relying on placeholders.
+void InitializePlaceholders(Factory* factory,
+ std::vector<Handle<Code>>* placeholders,
+ size_t size) {
+ DCHECK(placeholders->empty());
+ placeholders->reserve(size);
+
+ for (uint32_t i = 0; i < size; ++i) {
+ placeholders->push_back(CreatePlaceholder(factory, i, Code::WASM_FUNCTION));
+ }
+}
+
+bool LinkFunction(Handle<Code> unlinked,
+ const std::vector<Handle<Code>>& code_targets,
+ Code::Kind kind) {
+ bool modified = false;
+ int mode_mask = RelocInfo::kCodeTargetMask;
+ AllowDeferredHandleDereference embedding_raw_address;
+ for (RelocIterator it(*unlinked, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (RelocInfo::IsCodeTarget(mode)) {
+ Code* target =
+ Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+ if (target->kind() == kind &&
+ target->constant_pool_offset() >= kPlaceholderMarker) {
+ // Patch direct calls to placeholder code objects.
+ uint32_t index = target->constant_pool_offset() - kPlaceholderMarker;
+ CHECK(index < code_targets.size());
+ Handle<Code> new_target = code_targets[index];
+ if (target != *new_target) {
+ it.rinfo()->set_target_address(new_target->instruction_start(),
+ UPDATE_WRITE_BARRIER,
+ SKIP_ICACHE_FLUSH);
+ modified = true;
+ }
+ }
}
}
- return true;
+ return modified;
}
+
+void LinkModuleFunctions(Isolate* isolate,
+ std::vector<Handle<Code>>& functions) {
+ for (size_t i = 0; i < functions.size(); ++i) {
+ Handle<Code> code = functions[i];
+ LinkFunction(code, functions, Code::WASM_FUNCTION);
+ }
+}
+
+void LinkImports(Isolate* isolate, std::vector<Handle<Code>>& functions,
+ const std::vector<Handle<Code>>& imports) {
+ for (uint32_t i = 0; i < functions.size(); ++i) {
+ Handle<Code> code = functions[i];
+ LinkFunction(code, imports, Code::WASM_TO_JS_FUNCTION);
+ }
+}
+
+void FlushAssemblyCache(Isolate* isolate, Handle<FixedArray> functions) {
+ for (int i = 0; i < functions->length(); ++i) {
+ Handle<Code> code = functions->GetValueChecked<Code>(isolate, i);
+ Assembler::FlushICache(isolate, code->instruction_start(),
+ code->instruction_size());
+ }
+}
+
} // namespace
-WasmModule::WasmModule()
- : shared_isolate(nullptr),
- module_start(nullptr),
+WasmModule::WasmModule(byte* module_start)
+ : module_start(module_start),
module_end(nullptr),
min_mem_pages(0),
max_mem_pages(0),
mem_export(false),
mem_external(false),
start_function_index(-1),
- origin(kWasmOrigin) {}
-
-static MaybeHandle<JSFunction> ReportFFIError(ErrorThrower& thrower,
- const char* error, uint32_t index,
- wasm::WasmName module_name,
- wasm::WasmName function_name) {
- if (function_name.name) {
+ origin(kWasmOrigin),
+ globals_size(0),
+ pending_tasks(new base::Semaphore(0)) {}
+
+static MaybeHandle<JSFunction> ReportFFIError(
+ ErrorThrower& thrower, const char* error, uint32_t index,
+ Handle<String> module_name, MaybeHandle<String> function_name) {
+ Handle<String> function_name_handle;
+ if (function_name.ToHandle(&function_name_handle)) {
thrower.Error("Import #%d module=\"%.*s\" function=\"%.*s\" error: %s",
- index, module_name.length, module_name.name,
- function_name.length, function_name.name, error);
+ index, module_name->length(), module_name->ToCString().get(),
+ function_name_handle->length(),
+ function_name_handle->ToCString().get(), error);
} else {
thrower.Error("Import #%d module=\"%.*s\" error: %s", index,
- module_name.length, module_name.name, error);
+ module_name->length(), module_name->ToCString().get(), error);
}
thrower.Error("Import ");
return MaybeHandle<JSFunction>();
}
-static MaybeHandle<JSFunction> LookupFunction(
- ErrorThrower& thrower, Factory* factory, Handle<JSObject> ffi,
- uint32_t index, wasm::WasmName module_name, wasm::WasmName function_name) {
+static MaybeHandle<JSReceiver> LookupFunction(
+ ErrorThrower& thrower, Factory* factory, Handle<JSReceiver> ffi,
+ uint32_t index, Handle<String> module_name,
+ MaybeHandle<String> function_name) {
if (ffi.is_null()) {
return ReportFFIError(thrower, "FFI is not an object", index, module_name,
function_name);
}
// Look up the module first.
- Handle<String> name = factory->InternalizeUtf8String(
- Vector<const char>(module_name.name, module_name.length));
- MaybeHandle<Object> result = Object::GetProperty(ffi, name);
+ MaybeHandle<Object> result = Object::GetProperty(ffi, module_name);
if (result.is_null()) {
return ReportFFIError(thrower, "module not found", index, module_name,
function_name);
@@ -351,11 +495,10 @@ static MaybeHandle<JSFunction> LookupFunction(
}
Handle<Object> function;
- if (function_name.name) {
+ if (!function_name.is_null()) {
// Look up the function in the module.
- Handle<String> name = factory->InternalizeUtf8String(
- Vector<const char>(function_name.name, function_name.length));
- MaybeHandle<Object> result = Object::GetProperty(module, name);
+ MaybeHandle<Object> result =
+ Object::GetProperty(module, function_name.ToHandleChecked());
if (result.is_null()) {
return ReportFFIError(thrower, "function not found", index, module_name,
function_name);
@@ -366,209 +509,915 @@ static MaybeHandle<JSFunction> LookupFunction(
function = module;
}
- if (!function->IsJSFunction()) {
- return ReportFFIError(thrower, "not a function", index, module_name,
+ if (!function->IsCallable()) {
+ return ReportFFIError(thrower, "not a callable", index, module_name,
function_name);
}
- return Handle<JSFunction>::cast(function);
+ return Handle<JSReceiver>::cast(function);
}
-// Instantiates a wasm module as a JSObject.
-// * allocates a backing store of {mem_size} bytes.
-// * installs a named property "memory" for that buffer if exported
-// * installs named properties on the object for exported functions
-// * compiles wasm code to machine code
-MaybeHandle<JSObject> WasmModule::Instantiate(Isolate* isolate,
- Handle<JSObject> ffi,
- Handle<JSArrayBuffer> memory) {
- this->shared_isolate = isolate; // TODO(titzer): have a real shared isolate.
- ErrorThrower thrower(isolate, "WasmModule::Instantiate()");
- Factory* factory = isolate->factory();
+namespace {
+// Fetches the compilation unit of a wasm function and executes its parallel
+// phase.
+bool FetchAndExecuteCompilationUnit(
+ Isolate* isolate,
+ std::vector<compiler::WasmCompilationUnit*>* compilation_units,
+ std::queue<compiler::WasmCompilationUnit*>* executed_units,
+ base::Mutex* result_mutex, base::AtomicNumber<size_t>* next_unit) {
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
+ DisallowCodeDependencyChange no_dependency_change;
+
+ // - 1 because AtomicIntrement returns the value after the atomic increment.
+ size_t index = next_unit->Increment(1) - 1;
+ if (index >= compilation_units->size()) {
+ return false;
+ }
- //-------------------------------------------------------------------------
- // Allocate the instance and its JS counterpart.
- //-------------------------------------------------------------------------
- Handle<Map> map = factory->NewMap(
- JS_OBJECT_TYPE,
- JSObject::kHeaderSize + kWasmModuleInternalFieldCount * kPointerSize);
- WasmModuleInstance instance(this);
- instance.context = isolate->native_context();
- instance.js_object = factory->NewJSObjectFromMap(map, TENURED);
- Handle<FixedArray> code_table =
- factory->NewFixedArray(static_cast<int>(functions.size()), TENURED);
- instance.js_object->SetInternalField(kWasmModuleCodeTable, *code_table);
+ compiler::WasmCompilationUnit* unit = compilation_units->at(index);
+ if (unit != nullptr) {
+ unit->ExecuteCompilation();
+ {
+ base::LockGuard<base::Mutex> guard(result_mutex);
+ executed_units->push(unit);
+ }
+ }
+ return true;
+}
- //-------------------------------------------------------------------------
- // Allocate and initialize the linear memory.
- //-------------------------------------------------------------------------
- if (memory.is_null()) {
- if (!AllocateMemory(&thrower, isolate, &instance)) {
- return MaybeHandle<JSObject>();
+class WasmCompilationTask : public CancelableTask {
+ public:
+ WasmCompilationTask(
+ Isolate* isolate,
+ std::vector<compiler::WasmCompilationUnit*>* compilation_units,
+ std::queue<compiler::WasmCompilationUnit*>* executed_units,
+ base::Semaphore* on_finished, base::Mutex* result_mutex,
+ base::AtomicNumber<size_t>* next_unit)
+ : CancelableTask(isolate),
+ isolate_(isolate),
+ compilation_units_(compilation_units),
+ executed_units_(executed_units),
+ on_finished_(on_finished),
+ result_mutex_(result_mutex),
+ next_unit_(next_unit) {}
+
+ void RunInternal() override {
+ while (FetchAndExecuteCompilationUnit(isolate_, compilation_units_,
+ executed_units_, result_mutex_,
+ next_unit_)) {
}
- } else {
- SetMemory(&instance, memory);
+ on_finished_->Signal();
}
- instance.js_object->SetInternalField(kWasmMemArrayBuffer,
- *instance.mem_buffer);
- LoadDataSegments(this, instance.mem_start, instance.mem_size);
- //-------------------------------------------------------------------------
- // Allocate the globals area if necessary.
- //-------------------------------------------------------------------------
- if (!AllocateGlobals(&thrower, isolate, &instance)) {
- return MaybeHandle<JSObject>();
+ Isolate* isolate_;
+ std::vector<compiler::WasmCompilationUnit*>* compilation_units_;
+ std::queue<compiler::WasmCompilationUnit*>* executed_units_;
+ base::Semaphore* on_finished_;
+ base::Mutex* result_mutex_;
+ base::AtomicNumber<size_t>* next_unit_;
+};
+
+static void RecordStats(Isolate* isolate, Code* code) {
+ isolate->counters()->wasm_generated_code_size()->Increment(code->body_size());
+ isolate->counters()->wasm_reloc_size()->Increment(
+ code->relocation_info()->length());
+}
+
+static void RecordStats(Isolate* isolate,
+ const std::vector<Handle<Code>>& functions) {
+ for (Handle<Code> c : functions) RecordStats(isolate, *c);
+}
+
+static void RecordStats(Isolate* isolate, Handle<FixedArray> functions) {
+ DisallowHeapAllocation no_gc;
+ for (int i = 0; i < functions->length(); ++i) {
+ RecordStats(isolate, Code::cast(functions->get(i)));
}
- if (!instance.globals_buffer.is_null()) {
- instance.js_object->SetInternalField(kWasmGlobalsArrayBuffer,
- *instance.globals_buffer);
+}
+
+Handle<FixedArray> GetImportsMetadata(Factory* factory,
+ const WasmModule* module) {
+ Handle<FixedArray> ret = factory->NewFixedArray(
+ static_cast<int>(module->import_table.size()), TENURED);
+ for (size_t i = 0; i < module->import_table.size(); ++i) {
+ const WasmImport& import = module->import_table[i];
+ WasmName module_name = module->GetNameOrNull(import.module_name_offset,
+ import.module_name_length);
+ WasmName function_name = module->GetNameOrNull(import.function_name_offset,
+ import.function_name_length);
+
+ Handle<String> module_name_string =
+ factory->InternalizeUtf8String(module_name);
+ Handle<String> function_name_string =
+ function_name.is_empty()
+ ? Handle<String>::null()
+ : factory->InternalizeUtf8String(function_name);
+ Handle<ByteArray> sig =
+ factory->NewByteArray(static_cast<int>(import.sig->parameter_count() +
+ import.sig->return_count()),
+ TENURED);
+ sig->copy_in(0, reinterpret_cast<const byte*>(import.sig->raw_data()),
+ sig->length());
+ Handle<FixedArray> encoded_import =
+ factory->NewFixedArray(kWasmImportDataTableSize, TENURED);
+ encoded_import->set(kModuleName, *module_name_string);
+ if (!function_name_string.is_null()) {
+ encoded_import->set(kFunctionName, *function_name_string);
+ }
+ encoded_import->set(
+ kOutputCount,
+ Smi::FromInt(static_cast<int>(import.sig->return_count())));
+ encoded_import->set(kSignature, *sig);
+ ret->set(static_cast<int>(i), *encoded_import);
}
+ return ret;
+}
- //-------------------------------------------------------------------------
- // Compile wrappers to imported functions.
- //-------------------------------------------------------------------------
- uint32_t index = 0;
- instance.function_table = BuildFunctionTable(isolate, this);
- WasmLinker linker(isolate, functions.size());
- ModuleEnv module_env;
- module_env.module = this;
- module_env.instance = &instance;
- module_env.linker = &linker;
- module_env.origin = origin;
+bool CompileWrappersToImportedFunctions(Isolate* isolate,
+ const Handle<JSReceiver> ffi,
+ std::vector<Handle<Code>>& imports,
+ Handle<FixedArray> import_data,
+ ErrorThrower* thrower) {
+ uint32_t import_count = static_cast<uint32_t>(import_data->length());
+ if (import_count > 0) {
+ imports.reserve(import_count);
+ for (uint32_t index = 0; index < import_count; ++index) {
+ Handle<FixedArray> data =
+ import_data->GetValueChecked<FixedArray>(isolate, index);
+ Handle<String> module_name =
+ data->GetValueChecked<String>(isolate, kModuleName);
+ MaybeHandle<String> function_name =
+ data->GetValue<String>(isolate, kFunctionName);
+
+ // TODO(mtrofin): this is an uint32_t, actually. We should rationalize
+ // it when we rationalize signed/unsigned stuff.
+ int ret_count = Smi::cast(data->get(kOutputCount))->value();
+ CHECK(ret_count >= 0);
+ Handle<ByteArray> sig_data =
+ data->GetValueChecked<ByteArray>(isolate, kSignature);
+ int sig_data_size = sig_data->length();
+ int param_count = sig_data_size - ret_count;
+ CHECK(param_count >= 0);
+
+ MaybeHandle<JSReceiver> function = LookupFunction(
+ *thrower, isolate->factory(), ffi, index, module_name, function_name);
+ if (function.is_null()) return false;
+ Handle<Code> code;
+ Handle<JSReceiver> target = function.ToHandleChecked();
+ bool isMatch = false;
+ Handle<Code> export_wrapper_code;
+ if (target->IsJSFunction()) {
+ Handle<JSFunction> func = Handle<JSFunction>::cast(target);
+ export_wrapper_code = handle(func->code());
+ if (export_wrapper_code->kind() == Code::JS_TO_WASM_FUNCTION) {
+ int exported_param_count =
+ Smi::cast(func->GetInternalField(kInternalArity))->value();
+ Handle<ByteArray> exportedSig = Handle<ByteArray>(
+ ByteArray::cast(func->GetInternalField(kInternalSignature)));
+ if (exported_param_count == param_count &&
+ exportedSig->length() == sig_data->length() &&
+ memcmp(exportedSig->data(), sig_data->data(),
+ exportedSig->length()) == 0) {
+ isMatch = true;
+ }
+ }
+ }
+ if (isMatch) {
+ int wasm_count = 0;
+ int const mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
+ for (RelocIterator it(*export_wrapper_code, mask); !it.done();
+ it.next()) {
+ RelocInfo* rinfo = it.rinfo();
+ Address target_address = rinfo->target_address();
+ Code* target = Code::GetCodeFromTargetAddress(target_address);
+ if (target->kind() == Code::WASM_FUNCTION) {
+ ++wasm_count;
+ code = handle(target);
+ }
+ }
+ DCHECK(wasm_count == 1);
+ } else {
+ // Copy the signature to avoid a raw pointer into a heap object when
+ // GC can happen.
+ Zone zone(isolate->allocator());
+ MachineRepresentation* reps =
+ zone.NewArray<MachineRepresentation>(sig_data_size);
+ memcpy(reps, sig_data->data(),
+ sizeof(MachineRepresentation) * sig_data_size);
+ FunctionSig sig(ret_count, param_count, reps);
+
+ code = compiler::CompileWasmToJSWrapper(isolate, target, &sig, index,
+ module_name, function_name);
+ }
+ imports.push_back(code);
+ }
+ }
+ return true;
+}
- if (import_table.size() > 0) {
- instance.import_code.reserve(import_table.size());
- for (const WasmImport& import : import_table) {
- WasmName module_name =
- GetNameOrNull(import.module_name_offset, import.module_name_length);
- WasmName function_name = GetNameOrNull(import.function_name_offset,
- import.function_name_length);
- MaybeHandle<JSFunction> function = LookupFunction(
- thrower, factory, ffi, index, module_name, function_name);
- if (function.is_null()) return MaybeHandle<JSObject>();
- Handle<Code> code = compiler::CompileWasmToJSWrapper(
- isolate, &module_env, function.ToHandleChecked(), import.sig,
- module_name, function_name);
- instance.import_code.push_back(code);
- index++;
+void InitializeParallelCompilation(
+ Isolate* isolate, const std::vector<WasmFunction>& functions,
+ std::vector<compiler::WasmCompilationUnit*>& compilation_units,
+ ModuleEnv& module_env, ErrorThrower& thrower) {
+ for (uint32_t i = FLAG_skip_compiling_wasm_funcs; i < functions.size(); ++i) {
+ compilation_units[i] = new compiler::WasmCompilationUnit(
+ &thrower, isolate, &module_env, &functions[i], i);
+ }
+}
+
+uint32_t* StartCompilationTasks(
+ Isolate* isolate,
+ std::vector<compiler::WasmCompilationUnit*>& compilation_units,
+ std::queue<compiler::WasmCompilationUnit*>& executed_units,
+ base::Semaphore* pending_tasks, base::Mutex& result_mutex,
+ base::AtomicNumber<size_t>& next_unit) {
+ const size_t num_tasks =
+ Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads());
+ uint32_t* task_ids = new uint32_t[num_tasks];
+ for (size_t i = 0; i < num_tasks; ++i) {
+ WasmCompilationTask* task =
+ new WasmCompilationTask(isolate, &compilation_units, &executed_units,
+ pending_tasks, &result_mutex, &next_unit);
+ task_ids[i] = task->id();
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ task, v8::Platform::kShortRunningTask);
+ }
+ return task_ids;
+}
+
+void WaitForCompilationTasks(Isolate* isolate, uint32_t* task_ids,
+ base::Semaphore* pending_tasks) {
+ const size_t num_tasks =
+ Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads());
+ for (size_t i = 0; i < num_tasks; ++i) {
+ // If the task has not started yet, then we abort it. Otherwise we wait for
+ // it to finish.
+ if (!isolate->cancelable_task_manager()->TryAbort(task_ids[i])) {
+ pending_tasks->Wait();
}
}
+}
- //-------------------------------------------------------------------------
- // Compile all functions in the module.
- //-------------------------------------------------------------------------
+void FinishCompilationUnits(
+ std::queue<compiler::WasmCompilationUnit*>& executed_units,
+ std::vector<Handle<Code>>& results, base::Mutex& result_mutex) {
+ while (true) {
+ compiler::WasmCompilationUnit* unit = nullptr;
+ {
+ base::LockGuard<base::Mutex> guard(&result_mutex);
+ if (executed_units.empty()) {
+ break;
+ }
+ unit = executed_units.front();
+ executed_units.pop();
+ }
+ int j = unit->index();
+ results[j] = unit->FinishCompilation();
+ delete unit;
+ }
+}
- // First pass: compile each function and initialize the code table.
- index = FLAG_skip_compiling_wasm_funcs;
- while (index < functions.size()) {
- const WasmFunction& func = functions[index];
- if (thrower.error()) break;
- DCHECK_EQ(index, func.func_index);
-
- WasmName str = GetName(func.name_offset, func.name_length);
- WasmName str_null = {nullptr, 0};
- Handle<String> name = factory->InternalizeUtf8String(
- Vector<const char>(str.name, str.length));
+void CompileInParallel(Isolate* isolate, const WasmModule* module,
+ std::vector<Handle<Code>>& functions,
+ ErrorThrower* thrower, ModuleEnv* module_env) {
+ // Data structures for the parallel compilation.
+ std::vector<compiler::WasmCompilationUnit*> compilation_units(
+ module->functions.size());
+ std::queue<compiler::WasmCompilationUnit*> executed_units;
+
+ //-----------------------------------------------------------------------
+ // For parallel compilation:
+ // 1) The main thread allocates a compilation unit for each wasm function
+ // and stores them in the vector {compilation_units}.
+ // 2) The main thread spawns {WasmCompilationTask} instances which run on
+ // the background threads.
+ // 3.a) The background threads and the main thread pick one compilation
+ // unit at a time and execute the parallel phase of the compilation
+ // unit. After finishing the execution of the parallel phase, the
+ // result is enqueued in {executed_units}.
+ // 3.b) If {executed_units} contains a compilation unit, the main thread
+ // dequeues it and finishes the compilation.
+ // 4) After the parallel phase of all compilation units has started, the
+ // main thread waits for all {WasmCompilationTask} instances to finish.
+ // 5) The main thread finishes the compilation.
+
+ // Turn on the {CanonicalHandleScope} so that the background threads can
+ // use the node cache.
+ CanonicalHandleScope canonical(isolate);
+
+ // 1) The main thread allocates a compilation unit for each wasm function
+ // and stores them in the vector {compilation_units}.
+ InitializeParallelCompilation(isolate, module->functions, compilation_units,
+ *module_env, *thrower);
+
+ // Objects for the synchronization with the background threads.
+ base::Mutex result_mutex;
+ base::AtomicNumber<size_t> next_unit(
+ static_cast<size_t>(FLAG_skip_compiling_wasm_funcs));
+
+ // 2) The main thread spawns {WasmCompilationTask} instances which run on
+ // the background threads.
+ std::unique_ptr<uint32_t[]> task_ids(StartCompilationTasks(
+ isolate, compilation_units, executed_units, module->pending_tasks.get(),
+ result_mutex, next_unit));
+
+ // 3.a) The background threads and the main thread pick one compilation
+ // unit at a time and execute the parallel phase of the compilation
+ // unit. After finishing the execution of the parallel phase, the
+ // result is enqueued in {executed_units}.
+ while (FetchAndExecuteCompilationUnit(isolate, &compilation_units,
+ &executed_units, &result_mutex,
+ &next_unit)) {
+ // 3.b) If {executed_units} contains a compilation unit, the main thread
+ // dequeues it and finishes the compilation unit. Compilation units
+ // are finished concurrently to the background threads to save
+ // memory.
+ FinishCompilationUnits(executed_units, functions, result_mutex);
+ }
+ // 4) After the parallel phase of all compilation units has started, the
+ // main thread waits for all {WasmCompilationTask} instances to finish.
+ WaitForCompilationTasks(isolate, task_ids.get(), module->pending_tasks.get());
+ // Finish the compilation of the remaining compilation units.
+ FinishCompilationUnits(executed_units, functions, result_mutex);
+}
+
+void CompileSequentially(Isolate* isolate, const WasmModule* module,
+ std::vector<Handle<Code>>& functions,
+ ErrorThrower* thrower, ModuleEnv* module_env) {
+ DCHECK(!thrower->error());
+
+ for (uint32_t i = FLAG_skip_compiling_wasm_funcs;
+ i < module->functions.size(); ++i) {
+ const WasmFunction& func = module->functions[i];
+
+ DCHECK_EQ(i, func.func_index);
+ WasmName str = module->GetName(func.name_offset, func.name_length);
Handle<Code> code = Handle<Code>::null();
- Handle<JSFunction> function = Handle<JSFunction>::null();
- if (func.external) {
- // Lookup external function in FFI object.
- MaybeHandle<JSFunction> function =
- LookupFunction(thrower, factory, ffi, index, str, str_null);
- if (function.is_null()) return MaybeHandle<JSObject>();
- code = compiler::CompileWasmToJSWrapper(isolate, &module_env,
- function.ToHandleChecked(),
- func.sig, str, str_null);
- } else {
- // Compile the function.
- code = compiler::CompileWasmFunction(thrower, isolate, &module_env, func);
- if (code.is_null()) {
- thrower.Error("Compilation of #%d:%.*s failed.", index, str.length,
- str.name);
- return MaybeHandle<JSObject>();
- }
- if (func.exported) {
- function = compiler::CompileJSToWasmWrapper(
- isolate, &module_env, name, code, instance.js_object, index);
- }
+ // Compile the function.
+ code = compiler::WasmCompilationUnit::CompileWasmFunction(
+ thrower, isolate, module_env, &func);
+ if (code.is_null()) {
+ thrower->Error("Compilation of #%d:%.*s failed.", i, str.length(),
+ str.start());
+ break;
}
- if (!code.is_null()) {
// Install the code into the linker table.
- linker.Finish(index, code);
- code_table->set(index, *code);
+ functions[i] = code;
+ }
+}
+
+void SetDebugSupport(Factory* factory, Handle<FixedArray> compiled_module,
+ Handle<JSObject> js_object) {
+ Isolate* isolate = compiled_module->GetIsolate();
+ MaybeHandle<String> module_bytes_string =
+ compiled_module->GetValue<String>(isolate, kModuleBytes);
+ if (!module_bytes_string.is_null()) {
+ js_object->SetInternalField(kWasmModuleBytesString,
+ *module_bytes_string.ToHandleChecked());
+ }
+ Handle<FixedArray> functions = Handle<FixedArray>(
+ FixedArray::cast(js_object->GetInternalField(kWasmModuleCodeTable)));
+
+ for (int i = FLAG_skip_compiling_wasm_funcs; i < functions->length(); ++i) {
+ Handle<Code> code = functions->GetValueChecked<Code>(isolate, i);
+ DCHECK(code->deoptimization_data() == nullptr ||
+ code->deoptimization_data()->length() == 0);
+ Handle<FixedArray> deopt_data = factory->NewFixedArray(2, TENURED);
+ if (!js_object.is_null()) {
+ deopt_data->set(0, *js_object);
}
- if (func.exported) {
- // Exported functions are installed as read-only properties on the module.
- JSObject::AddProperty(instance.js_object, name, function, READ_ONLY);
+ deopt_data->set(1, Smi::FromInt(static_cast<int>(i)));
+ deopt_data->set_length(2);
+ code->set_deoptimization_data(*deopt_data);
+ }
+
+ MaybeHandle<ByteArray> function_name_table =
+ compiled_module->GetValue<ByteArray>(isolate, kFunctionNameTable);
+ if (!function_name_table.is_null()) {
+ js_object->SetInternalField(kWasmFunctionNamesArray,
+ *function_name_table.ToHandleChecked());
+ }
+}
+
+bool SetupGlobals(Isolate* isolate, Handle<FixedArray> compiled_module,
+ Handle<JSObject> instance, ErrorThrower* thrower) {
+ uint32_t globals_size = static_cast<uint32_t>(
+ Smi::cast(compiled_module->get(kGlobalsSize))->value());
+ if (globals_size > 0) {
+ Handle<JSArrayBuffer> globals_buffer =
+ NewArrayBuffer(isolate, globals_size);
+ if (globals_buffer.is_null()) {
+ thrower->Error("Out of memory: wasm globals");
+ return false;
+ }
+ RelocateGlobals(instance,
+ static_cast<Address>(globals_buffer->backing_store()));
+ instance->SetInternalField(kWasmGlobalsArrayBuffer, *globals_buffer);
+ }
+ return true;
+}
+
+bool SetupInstanceHeap(Isolate* isolate, Handle<FixedArray> compiled_module,
+ Handle<JSObject> instance, Handle<JSArrayBuffer> memory,
+ ErrorThrower* thrower) {
+ uint32_t min_mem_pages = static_cast<uint32_t>(
+ Smi::cast(compiled_module->get(kMinRequiredMemory))->value());
+ isolate->counters()->wasm_min_mem_pages_count()->AddSample(min_mem_pages);
+ // TODO(wasm): re-enable counter for max_mem_pages when we use that field.
+
+ if (memory.is_null() && min_mem_pages > 0) {
+ memory = AllocateMemory(thrower, isolate, min_mem_pages);
+ if (memory.is_null()) {
+ return false;
}
- index++;
}
- // Second pass: patch all direct call sites.
- linker.Link(instance.function_table, this->function_table);
- instance.js_object->SetInternalField(kWasmModuleFunctionTable,
- Smi::FromInt(0));
+ if (!memory.is_null()) {
+ instance->SetInternalField(kWasmMemArrayBuffer, *memory);
+ Address mem_start = static_cast<Address>(memory->backing_store());
+ uint32_t mem_size = static_cast<uint32_t>(memory->byte_length()->Number());
+ RelocateInstanceCode(instance, mem_start,
+ WasmModule::kPageSize * min_mem_pages, mem_size);
+ LoadDataSegments(compiled_module, mem_start, mem_size);
+ }
+ return true;
+}
+bool SetupImports(Isolate* isolate, Handle<FixedArray> compiled_module,
+ Handle<JSObject> instance, ErrorThrower* thrower,
+ Handle<JSReceiver> ffi) {
//-------------------------------------------------------------------------
- // Create and populate the exports object.
+ // Compile wrappers to imported functions.
//-------------------------------------------------------------------------
- if (export_table.size() > 0 || mem_export) {
- index = 0;
- // Create the "exports" object.
- Handle<JSFunction> object_function = Handle<JSFunction>(
- isolate->native_context()->object_function(), isolate);
- Handle<JSObject> exports_object =
- factory->NewJSObject(object_function, TENURED);
- Handle<String> exports_name = factory->InternalizeUtf8String("exports");
- JSObject::AddProperty(instance.js_object, exports_name, exports_object,
- READ_ONLY);
-
- // Compile wrappers and add them to the exports object.
- for (const WasmExport& exp : export_table) {
- if (thrower.error()) break;
- WasmName str = GetName(exp.name_offset, exp.name_length);
- Handle<String> name = factory->InternalizeUtf8String(
- Vector<const char>(str.name, str.length));
- Handle<Code> code = linker.GetFunctionCode(exp.func_index);
- Handle<JSFunction> function = compiler::CompileJSToWasmWrapper(
- isolate, &module_env, name, code, instance.js_object, exp.func_index);
- JSObject::AddProperty(exports_object, name, function, READ_ONLY);
+ std::vector<Handle<Code>> import_code;
+ MaybeHandle<FixedArray> maybe_import_data =
+ compiled_module->GetValue<FixedArray>(isolate, kImportData);
+ Handle<FixedArray> import_data;
+ if (maybe_import_data.ToHandle(&import_data)) {
+ if (!CompileWrappersToImportedFunctions(isolate, ffi, import_code,
+ import_data, thrower)) {
+ return false;
}
+ }
+ RecordStats(isolate, import_code);
+
+ Handle<FixedArray> code_table = Handle<FixedArray>(
+ FixedArray::cast(instance->GetInternalField(kWasmModuleCodeTable)));
+ // TODO(mtrofin): get the code off std::vector and on FixedArray, for
+ // consistency.
+ std::vector<Handle<Code>> function_code(code_table->length());
+ for (int i = 0; i < code_table->length(); ++i) {
+ Handle<Code> code = Handle<Code>(Code::cast(code_table->get(i)));
+ function_code[i] = code;
+ }
+
+ LinkImports(isolate, function_code, import_code);
+ return true;
+}
+
+bool SetupExportsObject(Handle<FixedArray> compiled_module, Isolate* isolate,
+ Handle<JSObject> instance, ErrorThrower* thrower) {
+ Factory* factory = isolate->factory();
+ bool mem_export =
+ static_cast<bool>(Smi::cast(compiled_module->get(kExportMem))->value());
+ ModuleOrigin origin = static_cast<ModuleOrigin>(
+ Smi::cast(compiled_module->get(kOrigin))->value());
+
+ MaybeHandle<FixedArray> maybe_exports =
+ compiled_module->GetValue<FixedArray>(isolate, kExports);
+ if (!maybe_exports.is_null() || mem_export) {
+ PropertyDescriptor desc;
+ desc.set_writable(false);
+
+ Handle<JSObject> exports_object = instance;
+ if (origin == kWasmOrigin) {
+ // Create the "exports" object.
+ Handle<JSFunction> object_function = Handle<JSFunction>(
+ isolate->native_context()->object_function(), isolate);
+ exports_object = factory->NewJSObject(object_function, TENURED);
+ Handle<String> exports_name = factory->InternalizeUtf8String("exports");
+ JSObject::AddProperty(instance, exports_name, exports_object, READ_ONLY);
+ }
+ Handle<FixedArray> exports;
+ if (maybe_exports.ToHandle(&exports)) {
+ int exports_size = exports->length();
+ for (int i = 0; i < exports_size; ++i) {
+ if (thrower->error()) return false;
+ Handle<FixedArray> export_metadata =
+ exports->GetValueChecked<FixedArray>(isolate, i);
+ Handle<Code> export_code =
+ export_metadata->GetValueChecked<Code>(isolate, kExportCode);
+ RecordStats(isolate, *export_code);
+ Handle<String> name =
+ export_metadata->GetValueChecked<String>(isolate, kExportName);
+ int arity = Smi::cast(export_metadata->get(kExportArity))->value();
+ MaybeHandle<ByteArray> signature =
+ export_metadata->GetValue<ByteArray>(isolate, kExportedSignature);
+ Handle<JSFunction> function = WrapExportCodeAsJSFunction(
+ isolate, export_code, name, arity, signature, instance);
+ desc.set_value(function);
+ Maybe<bool> status = JSReceiver::DefineOwnProperty(
+ isolate, exports_object, name, &desc, Object::THROW_ON_ERROR);
+ if (!status.IsJust()) {
+ thrower->Error("export of %.*s failed.", name->length(),
+ name->ToCString().get());
+ return false;
+ }
+ }
+ }
if (mem_export) {
// Export the memory as a named property.
Handle<String> name = factory->InternalizeUtf8String("memory");
- JSObject::AddProperty(exports_object, name, instance.mem_buffer,
- READ_ONLY);
+ Handle<JSArrayBuffer> memory = Handle<JSArrayBuffer>(
+ JSArrayBuffer::cast(instance->GetInternalField(kWasmMemArrayBuffer)));
+ JSObject::AddProperty(exports_object, name, memory, READ_ONLY);
}
}
+ return true;
+}
- // Run the start function if one was specified.
- if (this->start_function_index >= 0) {
+} // namespace
+
+MaybeHandle<FixedArray> WasmModule::CompileFunctions(
+ Isolate* isolate, ErrorThrower* thrower) const {
+ Factory* factory = isolate->factory();
+
+ MaybeHandle<FixedArray> nothing;
+
+ WasmModuleInstance temp_instance_for_compilation(this);
+ temp_instance_for_compilation.context = isolate->native_context();
+ temp_instance_for_compilation.mem_size = GetMinModuleMemSize(this);
+ temp_instance_for_compilation.mem_start = nullptr;
+ temp_instance_for_compilation.globals_start = nullptr;
+
+ MaybeHandle<FixedArray> indirect_table =
+ function_tables.size()
+ ? factory->NewFixedArray(static_cast<int>(function_tables.size()),
+ TENURED)
+ : MaybeHandle<FixedArray>();
+ for (uint32_t i = 0; i < function_tables.size(); ++i) {
+ Handle<FixedArray> values = wasm::BuildFunctionTable(isolate, i, this);
+ temp_instance_for_compilation.function_tables[i] = values;
+
+ Handle<FixedArray> metadata = isolate->factory()->NewFixedArray(
+ kWasmIndirectFunctionTableMetadataSize, TENURED);
+ metadata->set(kSize, Smi::FromInt(function_tables[i].size));
+ metadata->set(kTable, *values);
+ indirect_table.ToHandleChecked()->set(i, *metadata);
+ }
+
+ HistogramTimerScope wasm_compile_module_time_scope(
+ isolate->counters()->wasm_compile_module_time());
+
+ ModuleEnv module_env;
+ module_env.module = this;
+ module_env.instance = &temp_instance_for_compilation;
+ module_env.origin = origin;
+ InitializePlaceholders(factory, &module_env.placeholders, functions.size());
+
+ Handle<FixedArray> compiled_functions =
+ factory->NewFixedArray(static_cast<int>(functions.size()), TENURED);
+
+ temp_instance_for_compilation.import_code.resize(import_table.size());
+ for (uint32_t i = 0; i < import_table.size(); ++i) {
+ temp_instance_for_compilation.import_code[i] =
+ CreatePlaceholder(factory, i, Code::WASM_TO_JS_FUNCTION);
+ }
+ isolate->counters()->wasm_functions_per_module()->AddSample(
+ static_cast<int>(functions.size()));
+ if (FLAG_wasm_num_compilation_tasks != 0) {
+ CompileInParallel(isolate, this,
+ temp_instance_for_compilation.function_code, thrower,
+ &module_env);
+ } else {
+ CompileSequentially(isolate, this,
+ temp_instance_for_compilation.function_code, thrower,
+ &module_env);
+ }
+ if (thrower->error()) return nothing;
+
+ // At this point, compilation has completed. Update the code table.
+ for (size_t i = FLAG_skip_compiling_wasm_funcs;
+ i < temp_instance_for_compilation.function_code.size(); ++i) {
+ Code* code = *temp_instance_for_compilation.function_code[i];
+ compiled_functions->set(static_cast<int>(i), code);
+ }
+
+ // Create the compiled module object, and populate with compiled functions
+ // and information needed at instantiation time. This object needs to be
+ // serializable. Instantiation may occur off a deserialized version of this
+ // object.
+ Handle<FixedArray> ret =
+ factory->NewFixedArray(kCompiledWasmObjectTableSize, TENURED);
+ ret->set(kFunctions, *compiled_functions);
+ if (!indirect_table.is_null()) {
+ ret->set(kTableOfIndirectFunctionTables, *indirect_table.ToHandleChecked());
+ }
+ Handle<FixedArray> import_data = GetImportsMetadata(factory, this);
+ ret->set(kImportData, *import_data);
+
+ // Compile export functions.
+ int export_size = static_cast<int>(export_table.size());
+ Handle<Code> startup_fct;
+ if (export_size > 0) {
+ Handle<FixedArray> exports = factory->NewFixedArray(export_size, TENURED);
+ for (int i = 0; i < export_size; ++i) {
+ Handle<FixedArray> export_metadata =
+ factory->NewFixedArray(kWasmExportMetadataTableSize, TENURED);
+ const WasmExport& exp = export_table[i];
+ FunctionSig* funcSig = functions[exp.func_index].sig;
+ Handle<ByteArray> exportedSig =
+ factory->NewByteArray(static_cast<int>(funcSig->parameter_count() +
+ funcSig->return_count()),
+ TENURED);
+ exportedSig->copy_in(0,
+ reinterpret_cast<const byte*>(funcSig->raw_data()),
+ exportedSig->length());
+ export_metadata->set(kExportedSignature, *exportedSig);
+ WasmName str = GetName(exp.name_offset, exp.name_length);
+ Handle<String> name = factory->InternalizeUtf8String(str);
+ Handle<Code> code =
+ temp_instance_for_compilation.function_code[exp.func_index];
+ Handle<Code> export_code = compiler::CompileJSToWasmWrapper(
+ isolate, &module_env, code, exp.func_index);
+ if (thrower->error()) return nothing;
+ export_metadata->set(kExportCode, *export_code);
+ export_metadata->set(kExportName, *name);
+ export_metadata->set(
+ kExportArity, Smi::FromInt(static_cast<int>(
+ functions[exp.func_index].sig->parameter_count())));
+ export_metadata->set(kExportedFunctionIndex,
+ Smi::FromInt(static_cast<int>(exp.func_index)));
+ exports->set(i, *export_metadata);
+ if (exp.func_index == start_function_index) {
+ startup_fct = export_code;
+ }
+ }
+ ret->set(kExports, *exports);
+ }
+
+ // Compile startup function, if we haven't already.
+ if (start_function_index >= 0) {
+ uint32_t index = static_cast<uint32_t>(start_function_index);
HandleScope scope(isolate);
- uint32_t index = static_cast<uint32_t>(this->start_function_index);
- Handle<String> name = isolate->factory()->NewStringFromStaticChars("start");
- Handle<Code> code = linker.GetFunctionCode(index);
- Handle<JSFunction> jsfunc = compiler::CompileJSToWasmWrapper(
- isolate, &module_env, name, code, instance.js_object, index);
+ if (startup_fct.is_null()) {
+ Handle<Code> code = temp_instance_for_compilation.function_code[index];
+ DCHECK_EQ(0, functions[index].sig->parameter_count());
+ startup_fct =
+ compiler::CompileJSToWasmWrapper(isolate, &module_env, code, index);
+ }
+ Handle<FixedArray> metadata =
+ factory->NewFixedArray(kWasmExportMetadataTableSize, TENURED);
+ metadata->set(kExportCode, *startup_fct);
+ metadata->set(kExportArity, Smi::FromInt(0));
+ metadata->set(kExportedFunctionIndex, Smi::FromInt(start_function_index));
+ ret->set(kStartupFunction, *metadata);
+ }
+
+ // TODO(wasm): saving the module bytes for debugging is wasteful. We should
+ // consider downloading this on-demand.
+ {
+ size_t module_bytes_len = module_end - module_start;
+ DCHECK_LE(module_bytes_len, static_cast<size_t>(kMaxInt));
+ Vector<const uint8_t> module_bytes_vec(module_start,
+ static_cast<int>(module_bytes_len));
+ Handle<String> module_bytes_string =
+ factory->NewStringFromOneByte(module_bytes_vec, TENURED)
+ .ToHandleChecked();
+ ret->set(kModuleBytes, *module_bytes_string);
+ }
+ Handle<ByteArray> function_name_table =
+ BuildFunctionNamesTable(isolate, module_env.module);
+ ret->set(kFunctionNameTable, *function_name_table);
+ ret->set(kMinRequiredMemory, Smi::FromInt(min_mem_pages));
+ if (data_segments.size() > 0) SaveDataSegmentInfo(factory, this, ret);
+ ret->set(kGlobalsSize, Smi::FromInt(globals_size));
+ ret->set(kExportMem, Smi::FromInt(mem_export));
+ ret->set(kOrigin, Smi::FromInt(origin));
+ return ret;
+}
+
+void PatchJSWrapper(Isolate* isolate, Handle<Code> wrapper,
+ Handle<Code> new_target) {
+ AllowDeferredHandleDereference embedding_raw_address;
+ bool seen = false;
+ for (RelocIterator it(*wrapper, 1 << RelocInfo::CODE_TARGET); !it.done();
+ it.next()) {
+ Code* target = Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+ if (target->kind() == Code::WASM_FUNCTION) {
+ DCHECK(!seen);
+ seen = true;
+ it.rinfo()->set_target_address(new_target->instruction_start(),
+ UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ }
+ }
+ CHECK(seen);
+ Assembler::FlushICache(isolate, wrapper->instruction_start(),
+ wrapper->instruction_size());
+}
+
+Handle<FixedArray> SetupIndirectFunctionTable(
+ Isolate* isolate, Handle<FixedArray> wasm_functions,
+ Handle<FixedArray> indirect_table_template) {
+ Factory* factory = isolate->factory();
+ Handle<FixedArray> cloned_indirect_tables =
+ factory->CopyFixedArray(indirect_table_template);
+ for (int i = 0; i < cloned_indirect_tables->length(); ++i) {
+ Handle<FixedArray> orig_metadata =
+ cloned_indirect_tables->GetValueChecked<FixedArray>(isolate, i);
+ Handle<FixedArray> cloned_metadata = factory->CopyFixedArray(orig_metadata);
+ cloned_indirect_tables->set(i, *cloned_metadata);
+
+ Handle<FixedArray> orig_table =
+ cloned_metadata->GetValueChecked<FixedArray>(isolate, kTable);
+ Handle<FixedArray> cloned_table = factory->CopyFixedArray(orig_table);
+ cloned_metadata->set(kTable, *cloned_table);
+ // Patch the cloned code to refer to the cloned kTable.
+ for (int i = 0; i < wasm_functions->length(); ++i) {
+ Handle<Code> wasm_function =
+ wasm_functions->GetValueChecked<Code>(isolate, i);
+ PatchFunctionTable(wasm_function, orig_table, cloned_table);
+ }
+ }
+ return cloned_indirect_tables;
+}
+
+Handle<FixedArray> CloneModuleForInstance(Isolate* isolate,
+ Handle<FixedArray> original) {
+ Factory* factory = isolate->factory();
+ Handle<FixedArray> clone = factory->CopyFixedArray(original);
+
+ // Clone each wasm code object.
+ Handle<FixedArray> orig_wasm_functions =
+ original->GetValueChecked<FixedArray>(isolate, kFunctions);
+ Handle<FixedArray> clone_wasm_functions =
+ factory->CopyFixedArray(orig_wasm_functions);
+ clone->set(kFunctions, *clone_wasm_functions);
+ for (int i = 0; i < clone_wasm_functions->length(); ++i) {
+ Handle<Code> orig_code =
+ clone_wasm_functions->GetValueChecked<Code>(isolate, i);
+ Handle<Code> cloned_code = factory->CopyCode(orig_code);
+ clone_wasm_functions->set(i, *cloned_code);
+ }
+
+ MaybeHandle<FixedArray> maybe_orig_exports =
+ original->GetValue<FixedArray>(isolate, kExports);
+ Handle<FixedArray> orig_exports;
+ if (maybe_orig_exports.ToHandle(&orig_exports)) {
+ Handle<FixedArray> cloned_exports = factory->CopyFixedArray(orig_exports);
+ clone->set(kExports, *cloned_exports);
+ for (int i = 0; i < orig_exports->length(); ++i) {
+ Handle<FixedArray> export_metadata =
+ orig_exports->GetValueChecked<FixedArray>(isolate, i);
+ Handle<FixedArray> clone_metadata =
+ factory->CopyFixedArray(export_metadata);
+ cloned_exports->set(i, *clone_metadata);
+ Handle<Code> orig_code =
+ export_metadata->GetValueChecked<Code>(isolate, kExportCode);
+ Handle<Code> cloned_code = factory->CopyCode(orig_code);
+ clone_metadata->set(kExportCode, *cloned_code);
+ // TODO(wasm): This is actually a uint32_t, but since FixedArray indexes
+ // in int, we are taking the risk of invalid values.
+ int exported_fct_index =
+ Smi::cast(export_metadata->get(kExportedFunctionIndex))->value();
+ CHECK_GE(exported_fct_index, 0);
+ CHECK_LT(exported_fct_index, clone_wasm_functions->length());
+ Handle<Code> new_target = clone_wasm_functions->GetValueChecked<Code>(
+ isolate, exported_fct_index);
+ PatchJSWrapper(isolate, cloned_code, new_target);
+ }
+ }
+
+ MaybeHandle<FixedArray> maybe_startup =
+ original->GetValue<FixedArray>(isolate, kStartupFunction);
+ if (!maybe_startup.is_null()) {
+ Handle<FixedArray> startup_metadata =
+ factory->CopyFixedArray(maybe_startup.ToHandleChecked());
+ Handle<Code> startup_fct_clone = factory->CopyCode(
+ startup_metadata->GetValueChecked<Code>(isolate, kExportCode));
+ startup_metadata->set(kExportCode, *startup_fct_clone);
+ clone->set(kStartupFunction, *startup_metadata);
+ // TODO(wasm): see todo above about int vs size_t indexing in FixedArray.
+ int startup_fct_index =
+ Smi::cast(startup_metadata->get(kExportedFunctionIndex))->value();
+ CHECK_GE(startup_fct_index, 0);
+ CHECK_LT(startup_fct_index, clone_wasm_functions->length());
+ Handle<Code> new_target =
+ clone_wasm_functions->GetValueChecked<Code>(isolate, startup_fct_index);
+ PatchJSWrapper(isolate, startup_fct_clone, new_target);
+ }
+ return clone;
+}
+
+// Instantiates a wasm module as a JSObject.
+// * allocates a backing store of {mem_size} bytes.
+// * installs a named property "memory" for that buffer if exported
+// * installs named properties on the object for exported functions
+// * compiles wasm code to machine code
+MaybeHandle<JSObject> WasmModule::Instantiate(
+ Isolate* isolate, Handle<FixedArray> compiled_module,
+ Handle<JSReceiver> ffi, Handle<JSArrayBuffer> memory) {
+ HistogramTimerScope wasm_instantiate_module_time_scope(
+ isolate->counters()->wasm_instantiate_module_time());
+ ErrorThrower thrower(isolate, "WasmModule::Instantiate()");
+ Factory* factory = isolate->factory();
+
+ compiled_module = CloneModuleForInstance(isolate, compiled_module);
+
+ // These fields are compulsory.
+ Handle<FixedArray> code_table =
+ compiled_module->GetValueChecked<FixedArray>(isolate, kFunctions);
+
+ std::vector<Handle<Code>> functions(
+ static_cast<size_t>(code_table->length()));
+ for (int i = 0; i < code_table->length(); ++i) {
+ functions[static_cast<size_t>(i)] =
+ code_table->GetValueChecked<Code>(isolate, i);
+ }
+ LinkModuleFunctions(isolate, functions);
+
+ RecordStats(isolate, code_table);
+
+ MaybeHandle<JSObject> nothing;
+
+ Handle<Map> map = factory->NewMap(
+ JS_OBJECT_TYPE,
+ JSObject::kHeaderSize + kWasmModuleInternalFieldCount * kPointerSize);
+ Handle<JSObject> js_object = factory->NewJSObjectFromMap(map, TENURED);
+ js_object->SetInternalField(kWasmModuleCodeTable, *code_table);
+
+ if (!(SetupInstanceHeap(isolate, compiled_module, js_object, memory,
+ &thrower) &&
+ SetupGlobals(isolate, compiled_module, js_object, &thrower) &&
+ SetupImports(isolate, compiled_module, js_object, &thrower, ffi) &&
+ SetupExportsObject(compiled_module, isolate, js_object, &thrower))) {
+ return nothing;
+ }
+
+ SetDebugSupport(factory, compiled_module, js_object);
+
+ FlushAssemblyCache(isolate, code_table);
+
+ MaybeHandle<FixedArray> maybe_indirect_tables =
+ compiled_module->GetValue<FixedArray>(isolate,
+ kTableOfIndirectFunctionTables);
+ Handle<FixedArray> indirect_tables_template;
+ if (maybe_indirect_tables.ToHandle(&indirect_tables_template)) {
+ Handle<FixedArray> indirect_tables = SetupIndirectFunctionTable(
+ isolate, code_table, indirect_tables_template);
+ for (int i = 0; i < indirect_tables->length(); ++i) {
+ Handle<FixedArray> metadata =
+ indirect_tables->GetValueChecked<FixedArray>(isolate, i);
+ uint32_t size = Smi::cast(metadata->get(kSize))->value();
+ Handle<FixedArray> table =
+ metadata->GetValueChecked<FixedArray>(isolate, kTable);
+ wasm::PopulateFunctionTable(table, size, &functions);
+ }
+ js_object->SetInternalField(kWasmModuleFunctionTable, *indirect_tables);
+ }
+
+ // Run the start function if one was specified.
+ MaybeHandle<FixedArray> maybe_startup_fct =
+ compiled_module->GetValue<FixedArray>(isolate, kStartupFunction);
+ Handle<FixedArray> metadata;
+ if (maybe_startup_fct.ToHandle(&metadata)) {
+ HandleScope scope(isolate);
+ Handle<Code> startup_code =
+ metadata->GetValueChecked<Code>(isolate, kExportCode);
+ int arity = Smi::cast(metadata->get(kExportArity))->value();
+ MaybeHandle<ByteArray> startup_signature =
+ metadata->GetValue<ByteArray>(isolate, kExportedSignature);
+ Handle<JSFunction> startup_fct = WrapExportCodeAsJSFunction(
+ isolate, startup_code, factory->InternalizeUtf8String("start"), arity,
+ startup_signature, js_object);
+ RecordStats(isolate, *startup_code);
// Call the JS function.
- Handle<Object> undefined(isolate->heap()->undefined_value(), isolate);
+ Handle<Object> undefined = isolate->factory()->undefined_value();
MaybeHandle<Object> retval =
- Execution::Call(isolate, jsfunc, undefined, 0, nullptr);
+ Execution::Call(isolate, startup_fct, undefined, 0, nullptr);
if (retval.is_null()) {
thrower.Error("WASM.instantiateModule(): start function failed");
+ return nothing;
}
}
- return instance.js_object;
-}
+ DCHECK(wasm::IsWasmObject(*js_object));
+ return js_object;
+}
-Handle<Code> ModuleEnv::GetFunctionCode(uint32_t index) {
+// TODO(mtrofin): remove this once we move to WASM_DIRECT_CALL
+Handle<Code> ModuleEnv::GetCodeOrPlaceholder(uint32_t index) const {
DCHECK(IsValidFunction(index));
- if (linker) return linker->GetFunctionCode(index);
- return instance ? instance->function_code[index] : Handle<Code>::null();
+ if (!placeholders.empty()) return placeholders[index];
+ DCHECK_NOT_NULL(instance);
+ return instance->function_code[index];
}
Handle<Code> ModuleEnv::GetImportCode(uint32_t index) {
@@ -581,106 +1430,229 @@ compiler::CallDescriptor* ModuleEnv::GetCallDescriptor(Zone* zone,
DCHECK(IsValidFunction(index));
// Always make a direct call to whatever is in the table at that location.
// A wrapper will be generated for FFI calls.
- WasmFunction* function = &module->functions[index];
+ const WasmFunction* function = &module->functions[index];
return GetWasmCallDescriptor(zone, function->sig);
}
-
-int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
- const byte* module_end, bool asm_js) {
- HandleScope scope(isolate);
- Zone zone(isolate->allocator());
- // Decode the module, but don't verify function bodies, since we'll
- // be compiling them anyway.
- ModuleResult result = DecodeWasmModule(isolate, &zone, module_start,
- module_end, false, kWasmOrigin);
- if (result.failed()) {
- if (result.val) {
- delete result.val;
+Handle<Object> GetWasmFunctionNameOrNull(Isolate* isolate, Handle<Object> wasm,
+ uint32_t func_index) {
+ if (!wasm->IsUndefined(isolate)) {
+ Handle<ByteArray> func_names_arr_obj(
+ ByteArray::cast(Handle<JSObject>::cast(wasm)->GetInternalField(
+ kWasmFunctionNamesArray)),
+ isolate);
+ // TODO(clemens): Extract this from the module bytes; skip whole function
+ // name table.
+ Handle<Object> name;
+ if (GetWasmFunctionNameFromTable(func_names_arr_obj, func_index)
+ .ToHandle(&name)) {
+ return name;
}
- // Module verification failed. throw.
- std::ostringstream str;
- str << "WASM.compileRun() failed: " << result;
- isolate->Throw(
- *isolate->factory()->NewStringFromAsciiChecked(str.str().c_str()));
- return -1;
}
+ return isolate->factory()->null_value();
+}
- int32_t retval = CompileAndRunWasmModule(isolate, result.val);
- delete result.val;
- return retval;
+Handle<String> GetWasmFunctionName(Isolate* isolate, Handle<Object> wasm,
+ uint32_t func_index) {
+ Handle<Object> name_or_null =
+ GetWasmFunctionNameOrNull(isolate, wasm, func_index);
+ if (!name_or_null->IsNull(isolate)) {
+ return Handle<String>::cast(name_or_null);
+ }
+ return isolate->factory()->NewStringFromStaticChars("<WASM UNNAMED>");
}
+bool IsWasmObject(Object* object) {
+ if (!object->IsJSObject()) return false;
-int32_t CompileAndRunWasmModule(Isolate* isolate, WasmModule* module) {
- ErrorThrower thrower(isolate, "CompileAndRunWasmModule");
- WasmModuleInstance instance(module);
+ JSObject* obj = JSObject::cast(object);
+ Isolate* isolate = obj->GetIsolate();
+ if (obj->GetInternalFieldCount() != kWasmModuleInternalFieldCount) {
+ return false;
+ }
- // Allocate and initialize the linear memory.
- if (!AllocateMemory(&thrower, isolate, &instance)) {
- return -1;
+ Object* mem = obj->GetInternalField(kWasmMemArrayBuffer);
+ if (obj->GetInternalField(kWasmModuleCodeTable)->IsFixedArray() &&
+ (mem->IsUndefined(isolate) || mem->IsJSArrayBuffer()) &&
+ obj->GetInternalField(kWasmFunctionNamesArray)->IsByteArray()) {
+ Object* debug_bytes = obj->GetInternalField(kWasmModuleBytesString);
+ if (!debug_bytes->IsUndefined(isolate)) {
+ if (!debug_bytes->IsSeqOneByteString()) {
+ return false;
+ }
+ DisallowHeapAllocation no_gc;
+ SeqOneByteString* bytes = SeqOneByteString::cast(debug_bytes);
+ if (bytes->length() < 4) return false;
+ if (memcmp(bytes->GetChars(), "\0asm", 4)) return false;
+ // All checks passed.
+ }
+ return true;
}
- LoadDataSegments(module, instance.mem_start, instance.mem_size);
+ return false;
+}
- // Allocate the globals area if necessary.
- if (!AllocateGlobals(&thrower, isolate, &instance)) {
- return -1;
+SeqOneByteString* GetWasmBytes(JSObject* wasm) {
+ return SeqOneByteString::cast(wasm->GetInternalField(kWasmModuleBytesString));
+}
+
+Handle<WasmDebugInfo> GetDebugInfo(Handle<JSObject> wasm) {
+ Handle<Object> info(wasm->GetInternalField(kWasmDebugInfo),
+ wasm->GetIsolate());
+ if (!info->IsUndefined(wasm->GetIsolate()))
+ return Handle<WasmDebugInfo>::cast(info);
+ Handle<WasmDebugInfo> new_info = WasmDebugInfo::New(wasm);
+ wasm->SetInternalField(kWasmDebugInfo, *new_info);
+ return new_info;
+}
+
+bool UpdateWasmModuleMemory(Handle<JSObject> object, Address old_start,
+ Address new_start, uint32_t old_size,
+ uint32_t new_size) {
+ DisallowHeapAllocation no_allocation;
+ if (!IsWasmObject(*object)) {
+ return false;
}
- // Build the function table.
- instance.function_table = BuildFunctionTable(isolate, module);
+ // Get code table associated with the module js_object
+ Object* obj = object->GetInternalField(kWasmModuleCodeTable);
+ Handle<FixedArray> code_table(FixedArray::cast(obj));
- // Create module environment.
- WasmLinker linker(isolate, module->functions.size());
- ModuleEnv module_env;
- module_env.module = module;
- module_env.instance = &instance;
- module_env.linker = &linker;
- module_env.origin = module->origin;
-
- // Compile all functions.
- Handle<Code> main_code = Handle<Code>::null(); // record last code.
- uint32_t index = 0;
- int main_index = 0;
- for (const WasmFunction& func : module->functions) {
- DCHECK_EQ(index, func.func_index);
- if (!func.external) {
- // Compile the function and install it in the code table.
- Handle<Code> code =
- compiler::CompileWasmFunction(thrower, isolate, &module_env, func);
- if (!code.is_null()) {
- if (func.exported) {
- main_code = code;
- main_index = index;
- }
- linker.Finish(index, code);
+ // Iterate through the code objects in the code table and update relocation
+ // information
+ for (int i = 0; i < code_table->length(); i++) {
+ obj = code_table->get(i);
+ Handle<Code> code(Code::cast(obj));
+
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::WASM_MEMORY_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (RelocInfo::IsWasmMemoryReference(mode) ||
+ RelocInfo::IsWasmMemorySizeReference(mode)) {
+ it.rinfo()->update_wasm_memory_reference(old_start, new_start, old_size,
+ new_size);
}
- if (thrower.error()) return -1;
}
- index++;
}
+ return true;
+}
+
+Handle<FixedArray> BuildFunctionTable(Isolate* isolate, uint32_t index,
+ const WasmModule* module) {
+ const WasmIndirectFunctionTable* table = &module->function_tables[index];
+ DCHECK_EQ(table->size, table->values.size());
+ DCHECK_GE(table->max_size, table->size);
+ Handle<FixedArray> values =
+ isolate->factory()->NewFixedArray(2 * table->max_size, TENURED);
+ for (uint32_t i = 0; i < table->size; ++i) {
+ const WasmFunction* function = &module->functions[table->values[i]];
+ values->set(i, Smi::FromInt(function->sig_index));
+ values->set(i + table->max_size, Smi::FromInt(table->values[i]));
+ }
+ // Set the remaining elements to -1 (instead of "undefined"). These
+ // elements are accessed directly as SMIs (without a check). On 64-bit
+ // platforms, it is possible to have the top bits of "undefined" take
+ // small integer values (or zero), which are more likely to be equal to
+ // the signature index we check against.
+ for (uint32_t i = table->size; i < table->max_size; i++) {
+ values->set(i, Smi::FromInt(-1));
+ }
+ return values;
+}
+
+void PopulateFunctionTable(Handle<FixedArray> table, uint32_t table_size,
+ const std::vector<Handle<Code>>* code_table) {
+ uint32_t max_size = table->length() / 2;
+ for (uint32_t i = max_size; i < max_size + table_size; ++i) {
+ int index = Smi::cast(table->get(static_cast<int>(i)))->value();
+ DCHECK_GE(index, 0);
+ DCHECK_LT(static_cast<size_t>(index), code_table->size());
+ table->set(static_cast<int>(i), *(*code_table)[index]);
+ }
+}
+
+int GetNumberOfFunctions(JSObject* wasm) {
+ Object* func_names_obj = wasm->GetInternalField(kWasmFunctionNamesArray);
+ // TODO(clemensh): this looks inside an array constructed elsewhere. Refactor.
+ return ByteArray::cast(func_names_obj)->get_int(0);
+}
- if (main_code.is_null()) {
- thrower.Error("WASM.compileRun() failed: no main code found");
+Handle<JSObject> CreateCompiledModuleObject(
+ Isolate* isolate, Handle<FixedArray> compiled_module) {
+ Handle<JSFunction> module_cons(
+ isolate->native_context()->wasm_module_constructor());
+ Handle<JSObject> module_obj = isolate->factory()->NewJSObject(module_cons);
+ module_obj->SetInternalField(0, *compiled_module);
+ Handle<Symbol> module_sym(isolate->native_context()->wasm_module_sym());
+ Object::SetProperty(module_obj, module_sym, module_obj, STRICT).Check();
+ return module_obj;
+}
+
+namespace testing {
+
+int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
+ const byte* module_end, bool asm_js) {
+ HandleScope scope(isolate);
+ Zone zone(isolate->allocator());
+ ErrorThrower thrower(isolate, "CompileAndRunWasmModule");
+
+ // Decode the module, but don't verify function bodies, since we'll
+ // be compiling them anyway.
+ ModuleResult decoding_result =
+ DecodeWasmModule(isolate, &zone, module_start, module_end, false,
+ asm_js ? kAsmJsOrigin : kWasmOrigin);
+
+ std::unique_ptr<const WasmModule> module(decoding_result.val);
+ if (decoding_result.failed()) {
+ // Module verification failed. throw.
+ thrower.Error("WASM.compileRun() failed: %s",
+ decoding_result.error_msg.get());
return -1;
}
- linker.Link(instance.function_table, instance.module->function_table);
+ if (module->import_table.size() > 0) {
+ thrower.Error("Not supported: module has imports.");
+ }
+ if (module->export_table.size() == 0) {
+ thrower.Error("Not supported: module has no exports.");
+ }
+
+ if (thrower.error()) return -1;
+ MaybeHandle<FixedArray> compiled_module =
+ module->CompileFunctions(isolate, &thrower);
- // Wrap the main code so it can be called as a JS function.
- Handle<String> name = isolate->factory()->NewStringFromStaticChars("main");
- Handle<JSObject> module_object = Handle<JSObject>(0, isolate);
- Handle<JSFunction> jsfunc = compiler::CompileJSToWasmWrapper(
- isolate, &module_env, name, main_code, module_object, main_index);
+ if (compiled_module.is_null()) return -1;
+ Handle<JSObject> instance =
+ WasmModule::Instantiate(isolate, compiled_module.ToHandleChecked(),
+ Handle<JSReceiver>::null(),
+ Handle<JSArrayBuffer>::null())
+ .ToHandleChecked();
+
+ return CallFunction(isolate, instance, &thrower, "main", 0, nullptr);
+}
+
+int32_t CallFunction(Isolate* isolate, Handle<JSObject> instance,
+ ErrorThrower* thrower, const char* name, int argc,
+ Handle<Object> argv[]) {
+ Handle<Name> exports = isolate->factory()->InternalizeUtf8String("exports");
+ Handle<JSObject> exports_object = Handle<JSObject>::cast(
+ JSObject::GetProperty(instance, exports).ToHandleChecked());
+ Handle<Name> main_name = isolate->factory()->NewStringFromAsciiChecked(name);
+ PropertyDescriptor desc;
+ Maybe<bool> property_found = JSReceiver::GetOwnPropertyDescriptor(
+ isolate, exports_object, main_name, &desc);
+ if (!property_found.FromMaybe(false)) return -1;
+
+ Handle<JSFunction> main_export = Handle<JSFunction>::cast(desc.value());
// Call the JS function.
- Handle<Object> undefined(isolate->heap()->undefined_value(), isolate);
+ Handle<Object> undefined = isolate->factory()->undefined_value();
MaybeHandle<Object> retval =
- Execution::Call(isolate, jsfunc, undefined, 0, nullptr);
+ Execution::Call(isolate, main_export, undefined, argc, argv);
// The result should be a number.
if (retval.is_null()) {
- thrower.Error("WASM.compileRun() failed: Invocation was null");
+ thrower->Error("WASM.compileRun() failed: Invocation was null");
return -1;
}
Handle<Object> result = retval.ToHandleChecked();
@@ -690,9 +1662,11 @@ int32_t CompileAndRunWasmModule(Isolate* isolate, WasmModule* module) {
if (result->IsHeapNumber()) {
return static_cast<int32_t>(HeapNumber::cast(*result)->value());
}
- thrower.Error("WASM.compileRun() failed: Return value should be number");
+ thrower->Error("WASM.compileRun() failed: Return value should be number");
return -1;
}
+
+} // namespace testing
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index 4e5aa78486..0c3df51d76 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -5,17 +5,21 @@
#ifndef V8_WASM_MODULE_H_
#define V8_WASM_MODULE_H_
-#include "src/wasm/wasm-opcodes.h"
-#include "src/wasm/wasm-result.h"
+#include <memory>
#include "src/api.h"
#include "src/handles.h"
+#include "src/parsing/preparse-data.h"
+
+#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-result.h"
namespace v8 {
namespace internal {
namespace compiler {
class CallDescriptor;
+class WasmCompilationUnit;
}
namespace wasm {
@@ -23,69 +27,61 @@ const size_t kMaxModuleSize = 1024 * 1024 * 1024;
const size_t kMaxFunctionSize = 128 * 1024;
const size_t kMaxStringSize = 256;
const uint32_t kWasmMagic = 0x6d736100;
-const uint32_t kWasmVersion = 0x0a;
+const uint32_t kWasmVersion = 0x0b;
+const uint8_t kWasmFunctionTypeForm = 0x40;
// WebAssembly sections are named as strings in the binary format, but
// internally V8 uses an enum to handle them.
//
// Entries have the form F(enumerator, string).
-#define FOR_EACH_WASM_SECTION_TYPE(F) \
- F(Memory, "memory") \
- F(Signatures, "signatures") \
- F(Functions, "functions") \
- F(Globals, "globals") \
- F(DataSegments, "data_segments") \
- F(FunctionTable, "function_table") \
- F(End, "end") \
- F(StartFunction, "start_function") \
- F(ImportTable, "import_table") \
- F(ExportTable, "export_table") \
- F(FunctionSignatures, "function_signatures") \
- F(FunctionBodies, "function_bodies") \
- F(Names, "names")
+#define FOR_EACH_WASM_SECTION_TYPE(F) \
+ F(Signatures, 1, "type") \
+ F(ImportTable, 2, "import") \
+ F(FunctionSignatures, 3, "function") \
+ F(FunctionTable, 4, "table") \
+ F(Memory, 5, "memory") \
+ F(ExportTable, 6, "export") \
+ F(StartFunction, 7, "start") \
+ F(FunctionBodies, 8, "code") \
+ F(DataSegments, 9, "data") \
+ F(Names, 10, "name") \
+ F(Globals, 0, "global") \
+ F(End, 0, "end")
// Contants for the above section types: {LEB128 length, characters...}.
#define WASM_SECTION_MEMORY 6, 'm', 'e', 'm', 'o', 'r', 'y'
-#define WASM_SECTION_SIGNATURES \
- 10, 's', 'i', 'g', 'n', 'a', 't', 'u', 'r', 'e', 's'
-#define WASM_SECTION_FUNCTIONS 9, 'f', 'u', 'n', 'c', 't', 'i', 'o', 'n', 's'
-#define WASM_SECTION_GLOBALS 7, 'g', 'l', 'o', 'b', 'a', 'l', 's'
-#define WASM_SECTION_DATA_SEGMENTS \
- 13, 'd', 'a', 't', 'a', '_', 's', 'e', 'g', 'm', 'e', 'n', 't', 's'
-#define WASM_SECTION_FUNCTION_TABLE \
- 14, 'f', 'u', 'n', 'c', 't', 'i', 'o', 'n', '_', 't', 'a', 'b', 'l', 'e'
+#define WASM_SECTION_SIGNATURES 4, 't', 'y', 'p', 'e'
+#define WASM_SECTION_GLOBALS 6, 'g', 'l', 'o', 'b', 'a', 'l'
+#define WASM_SECTION_DATA_SEGMENTS 4, 'd', 'a', 't', 'a'
+#define WASM_SECTION_FUNCTION_TABLE 5, 't', 'a', 'b', 'l', 'e'
#define WASM_SECTION_END 3, 'e', 'n', 'd'
-#define WASM_SECTION_START_FUNCTION \
- 14, 's', 't', 'a', 'r', 't', '_', 'f', 'u', 'n', 'c', 't', 'i', 'o', 'n'
-#define WASM_SECTION_IMPORT_TABLE \
- 12, 'i', 'm', 'p', 'o', 'r', 't', '_', 't', 'a', 'b', 'l', 'e'
-#define WASM_SECTION_EXPORT_TABLE \
- 12, 'e', 'x', 'p', 'o', 'r', 't', '_', 't', 'a', 'b', 'l', 'e'
-#define WASM_SECTION_FUNCTION_SIGNATURES \
- 19, 'f', 'u', 'n', 'c', 't', 'i', 'o', 'n', '_', 's', 'i', 'g', 'n', 'a', \
- 't', 'u', 'r', 'e', 's'
-#define WASM_SECTION_FUNCTION_BODIES \
- 15, 'f', 'u', 'n', 'c', 't', 'i', 'o', 'n', '_', 'b', 'o', 'd', 'i', 'e', 's'
-#define WASM_SECTION_NAMES 5, 'n', 'a', 'm', 'e', 's'
+#define WASM_SECTION_START_FUNCTION 5, 's', 't', 'a', 'r', 't'
+#define WASM_SECTION_IMPORT_TABLE 6, 'i', 'm', 'p', 'o', 'r', 't'
+#define WASM_SECTION_EXPORT_TABLE 6, 'e', 'x', 'p', 'o', 'r', 't'
+#define WASM_SECTION_FUNCTION_SIGNATURES \
+ 8, 'f', 'u', 'n', 'c', 't', 'i', 'o', 'n'
+#define WASM_SECTION_FUNCTION_BODIES 4, 'c', 'o', 'd', 'e'
+#define WASM_SECTION_NAMES 4, 'n', 'a', 'm', 'e'
// Constants for the above section headers' size (LEB128 + characters).
#define WASM_SECTION_MEMORY_SIZE ((size_t)7)
-#define WASM_SECTION_SIGNATURES_SIZE ((size_t)11)
-#define WASM_SECTION_FUNCTIONS_SIZE ((size_t)10)
-#define WASM_SECTION_GLOBALS_SIZE ((size_t)8)
-#define WASM_SECTION_DATA_SEGMENTS_SIZE ((size_t)14)
-#define WASM_SECTION_FUNCTION_TABLE_SIZE ((size_t)15)
+#define WASM_SECTION_SIGNATURES_SIZE ((size_t)5)
+#define WASM_SECTION_GLOBALS_SIZE ((size_t)7)
+#define WASM_SECTION_DATA_SEGMENTS_SIZE ((size_t)5)
+#define WASM_SECTION_FUNCTION_TABLE_SIZE ((size_t)6)
#define WASM_SECTION_END_SIZE ((size_t)4)
-#define WASM_SECTION_START_FUNCTION_SIZE ((size_t)15)
-#define WASM_SECTION_IMPORT_TABLE_SIZE ((size_t)13)
-#define WASM_SECTION_EXPORT_TABLE_SIZE ((size_t)13)
-#define WASM_SECTION_FUNCTION_SIGNATURES_SIZE ((size_t)20)
-#define WASM_SECTION_FUNCTION_BODIES_SIZE ((size_t)16)
-#define WASM_SECTION_NAMES_SIZE ((size_t)6)
+#define WASM_SECTION_START_FUNCTION_SIZE ((size_t)6)
+#define WASM_SECTION_IMPORT_TABLE_SIZE ((size_t)7)
+#define WASM_SECTION_EXPORT_TABLE_SIZE ((size_t)7)
+#define WASM_SECTION_FUNCTION_SIGNATURES_SIZE ((size_t)9)
+#define WASM_SECTION_FUNCTION_BODIES_SIZE ((size_t)5)
+#define WASM_SECTION_NAMES_SIZE ((size_t)5)
+
+class WasmDebugInfo;
struct WasmSection {
enum class Code : uint32_t {
-#define F(enumerator, string) enumerator,
+#define F(enumerator, order, string) enumerator,
FOR_EACH_WASM_SECTION_TYPE(F)
#undef F
Max
@@ -94,13 +90,13 @@ struct WasmSection {
static WasmSection::Code end();
static WasmSection::Code next(WasmSection::Code code);
static const char* getName(Code code);
+ static int getOrder(Code code);
static size_t getNameLength(Code code);
+ static WasmSection::Code lookup(const byte* string, uint32_t length);
};
enum WasmFunctionDeclBit {
kDeclFunctionName = 0x01,
- kDeclFunctionImport = 0x02,
- kDeclFunctionLocals = 0x04,
kDeclFunctionExport = 0x08
};
@@ -108,6 +104,8 @@ enum WasmFunctionDeclBit {
static const size_t kDeclMemorySize = 3;
static const size_t kDeclDataSegmentSize = 13;
+static const uint32_t kMaxReturnCount = 1;
+
// Static representation of a WASM function.
struct WasmFunction {
FunctionSig* sig; // signature of the function.
@@ -117,12 +115,6 @@ struct WasmFunction {
uint32_t name_length; // length in bytes of the name.
uint32_t code_start_offset; // offset in the module bytes of code start.
uint32_t code_end_offset; // offset in the module bytes of code end.
- uint16_t local_i32_count; // number of i32 local variables.
- uint16_t local_i64_count; // number of i64 local variables.
- uint16_t local_f32_count; // number of f32 local variables.
- uint16_t local_f64_count; // number of f64 local variables.
- bool exported; // true if this function is exported.
- bool external; // true if this function is externally supplied.
};
// Static representation of an imported WASM function.
@@ -146,7 +138,7 @@ struct WasmExport {
struct WasmGlobal {
uint32_t name_offset; // offset in the module bytes of the name, if any.
uint32_t name_length; // length in bytes of the global name.
- MachineType type; // type of the global.
+ LocalType type; // type of the global.
uint32_t offset; // offset from beginning of globals area.
bool exported; // true if this global is exported.
};
@@ -159,6 +151,13 @@ struct WasmDataSegment {
bool init; // true if loaded upon instantiation.
};
+// Static representation of a wasm indirect call table.
+struct WasmIndirectFunctionTable {
+ uint32_t size; // initial table size.
+ uint32_t max_size; // maximum table size.
+ std::vector<uint16_t> values; // function table.
+};
+
enum ModuleOrigin { kWasmOrigin, kAsmJsOrigin };
// Static representation of a module.
@@ -167,101 +166,138 @@ struct WasmModule {
static const uint32_t kMinMemPages = 1; // Minimum memory size = 64kb
static const uint32_t kMaxMemPages = 16384; // Maximum memory size = 1gb
- Isolate* shared_isolate; // isolate for storing shared code.
const byte* module_start; // starting address for the module bytes.
const byte* module_end; // end address for the module bytes.
uint32_t min_mem_pages; // minimum size of the memory in 64k pages.
uint32_t max_mem_pages; // maximum size of the memory in 64k pages.
bool mem_export; // true if the memory is exported.
bool mem_external; // true if the memory is external.
+ // TODO(wasm): reconcile start function index being an int with
+ // the fact that we index on uint32_t, so we may technically not be
+ // able to represent some start_function_index -es.
int start_function_index; // start function, if any.
ModuleOrigin origin; // origin of the module
std::vector<WasmGlobal> globals; // globals in this module.
+ uint32_t globals_size; // size of globals table.
std::vector<FunctionSig*> signatures; // signatures in this module.
std::vector<WasmFunction> functions; // functions in this module.
std::vector<WasmDataSegment> data_segments; // data segments in this module.
- std::vector<uint16_t> function_table; // function table.
+ std::vector<WasmIndirectFunctionTable> function_tables; // function tables.
std::vector<WasmImport> import_table; // import table.
std::vector<WasmExport> export_table; // export table.
-
- WasmModule();
+ // We store the semaphore here to extend its lifetime. In <libc-2.21, which we
+ // use on the try bots, semaphore::Wait() can return while some compilation
+ // tasks are still executing semaphore::Signal(). If the semaphore is cleaned
+ // up right after semaphore::Wait() returns, then this can cause an
+ // invalid-semaphore error in the compilation tasks.
+ // TODO(wasm): Move this semaphore back to CompileInParallel when the try bots
+ // switch to libc-2.21 or higher.
+ std::unique_ptr<base::Semaphore> pending_tasks;
+
+ WasmModule() : WasmModule(nullptr) {}
+ explicit WasmModule(byte* module_start);
// Get a string stored in the module bytes representing a name.
WasmName GetName(uint32_t offset, uint32_t length) const {
if (length == 0) return {"<?>", 3}; // no name.
CHECK(BoundsCheck(offset, offset + length));
- return {reinterpret_cast<const char*>(module_start + offset), length};
+ DCHECK_GE(static_cast<int>(length), 0);
+ return {reinterpret_cast<const char*>(module_start + offset),
+ static_cast<int>(length)};
+ }
+
+ // Get a string stored in the module bytes representing a function name.
+ WasmName GetName(WasmFunction* function) const {
+ return GetName(function->name_offset, function->name_length);
}
// Get a string stored in the module bytes representing a name.
WasmName GetNameOrNull(uint32_t offset, uint32_t length) const {
- if (length == 0) return {NULL, 0}; // no name.
+ if (offset == 0 && length == 0) return {NULL, 0}; // no name.
CHECK(BoundsCheck(offset, offset + length));
- return {reinterpret_cast<const char*>(module_start + offset), length};
+ DCHECK_GE(static_cast<int>(length), 0);
+ return {reinterpret_cast<const char*>(module_start + offset),
+ static_cast<int>(length)};
+ }
+
+ // Get a string stored in the module bytes representing a function name.
+ WasmName GetNameOrNull(const WasmFunction* function) const {
+ return GetNameOrNull(function->name_offset, function->name_length);
}
// Checks the given offset range is contained within the module bytes.
bool BoundsCheck(uint32_t start, uint32_t end) const {
size_t size = module_end - module_start;
- return start < size && end < size;
+ return start <= size && end <= size;
}
// Creates a new instantiation of the module in the given isolate.
- MaybeHandle<JSObject> Instantiate(Isolate* isolate, Handle<JSObject> ffi,
- Handle<JSArrayBuffer> memory);
+ static MaybeHandle<JSObject> Instantiate(Isolate* isolate,
+ Handle<FixedArray> compiled_module,
+ Handle<JSReceiver> ffi,
+ Handle<JSArrayBuffer> memory);
+
+ MaybeHandle<FixedArray> CompileFunctions(Isolate* isolate,
+ ErrorThrower* thrower) const;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(WasmModule);
};
// An instantiated WASM module, including memory, function table, etc.
struct WasmModuleInstance {
- WasmModule* module; // static representation of the module.
+ const WasmModule* module; // static representation of the module.
// -- Heap allocated --------------------------------------------------------
Handle<JSObject> js_object; // JavaScript module object.
Handle<Context> context; // JavaScript native context.
Handle<JSArrayBuffer> mem_buffer; // Handle to array buffer of memory.
Handle<JSArrayBuffer> globals_buffer; // Handle to array buffer of globals.
- Handle<FixedArray> function_table; // indirect function table.
+ std::vector<Handle<FixedArray>> function_tables; // indirect function tables.
std::vector<Handle<Code>> function_code; // code objects for each function.
std::vector<Handle<Code>> import_code; // code objects for each import.
// -- raw memory ------------------------------------------------------------
byte* mem_start; // start of linear memory.
- size_t mem_size; // size of the linear memory.
+ uint32_t mem_size; // size of the linear memory.
// -- raw globals -----------------------------------------------------------
byte* globals_start; // start of the globals area.
- size_t globals_size; // size of the globals area.
- explicit WasmModuleInstance(WasmModule* m)
+ explicit WasmModuleInstance(const WasmModule* m)
: module(m),
+ function_tables(m->function_tables.size()),
+ function_code(m->functions.size()),
+ import_code(m->import_table.size()),
mem_start(nullptr),
mem_size(0),
- globals_start(nullptr),
- globals_size(0) {}
+ globals_start(nullptr) {}
};
-// forward declaration.
-class WasmLinker;
-
// Interface provided to the decoder/graph builder which contains only
// minimal information about the globals, functions, and function tables.
struct ModuleEnv {
- WasmModule* module;
+ const WasmModule* module;
WasmModuleInstance* instance;
- WasmLinker* linker;
ModuleOrigin origin;
+ // TODO(mtrofin): remove this once we introduce WASM_DIRECT_CALL
+ // reloc infos.
+ std::vector<Handle<Code>> placeholders;
- bool IsValidGlobal(uint32_t index) {
+ bool IsValidGlobal(uint32_t index) const {
return module && index < module->globals.size();
}
- bool IsValidFunction(uint32_t index) {
+ bool IsValidFunction(uint32_t index) const {
return module && index < module->functions.size();
}
- bool IsValidSignature(uint32_t index) {
+ bool IsValidSignature(uint32_t index) const {
return module && index < module->signatures.size();
}
- bool IsValidImport(uint32_t index) {
+ bool IsValidImport(uint32_t index) const {
return module && index < module->import_table.size();
}
- MachineType GetGlobalType(uint32_t index) {
+ bool IsValidTable(uint32_t index) const {
+ return module && index < module->function_tables.size();
+ }
+ LocalType GetGlobalType(uint32_t index) {
DCHECK(IsValidGlobal(index));
return module->globals[index].type;
}
@@ -277,15 +313,15 @@ struct ModuleEnv {
DCHECK(IsValidSignature(index));
return module->signatures[index];
}
- size_t FunctionTableSize() {
- return module ? module->function_table.size() : 0;
+ const WasmIndirectFunctionTable* GetTable(uint32_t index) const {
+ DCHECK(IsValidTable(index));
+ return &module->function_tables[index];
}
bool asm_js() { return origin == kAsmJsOrigin; }
- Handle<Code> GetFunctionCode(uint32_t index);
+ Handle<Code> GetCodeOrPlaceholder(uint32_t index) const;
Handle<Code> GetImportCode(uint32_t index);
- Handle<FixedArray> GetFunctionTable();
static compiler::CallDescriptor* GetWasmCallDescriptor(Zone* zone,
FunctionSig* sig);
@@ -306,18 +342,76 @@ std::ostream& operator<<(std::ostream& os, const WasmModule& module);
std::ostream& operator<<(std::ostream& os, const WasmFunction& function);
std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name);
-typedef Result<WasmModule*> ModuleResult;
+typedef Result<const WasmModule*> ModuleResult;
typedef Result<WasmFunction*> FunctionResult;
-
-// For testing. Decode, verify, and run the last exported function in the
-// given encoded module.
+typedef std::vector<std::pair<int, int>> FunctionOffsets;
+typedef Result<FunctionOffsets> FunctionOffsetsResult;
+
+// Extract a function name from the given wasm object.
+// Returns "<WASM UNNAMED>" if the function is unnamed or the name is not a
+// valid UTF-8 string.
+Handle<String> GetWasmFunctionName(Isolate* isolate, Handle<Object> wasm,
+ uint32_t func_index);
+
+// Extract a function name from the given wasm object.
+// Returns a null handle if the function is unnamed or the name is not a valid
+// UTF-8 string.
+Handle<Object> GetWasmFunctionNameOrNull(Isolate* isolate, Handle<Object> wasm,
+ uint32_t func_index);
+
+// Return the binary source bytes of a wasm module.
+SeqOneByteString* GetWasmBytes(JSObject* wasm);
+
+// Get the debug info associated with the given wasm object.
+// If no debug info exists yet, it is created automatically.
+Handle<WasmDebugInfo> GetDebugInfo(Handle<JSObject> wasm);
+
+// Return the number of functions in the given wasm object.
+int GetNumberOfFunctions(JSObject* wasm);
+
+// Create and export JSFunction
+Handle<JSFunction> WrapExportCodeAsJSFunction(Isolate* isolate,
+ Handle<Code> export_code,
+ Handle<String> name, int arity,
+ MaybeHandle<ByteArray> signature,
+ Handle<JSObject> module_instance);
+
+// Check whether the given object is a wasm object.
+// This checks the number and type of internal fields, so it's not 100 percent
+// secure. If it turns out that we need more complete checks, we could add a
+// special marker as internal field, which will definitely never occur anywhere
+// else.
+bool IsWasmObject(Object* object);
+
+// Update memory references of code objects associated with the module
+bool UpdateWasmModuleMemory(Handle<JSObject> object, Address old_start,
+ Address new_start, uint32_t old_size,
+ uint32_t new_size);
+
+// Constructs a single function table as a FixedArray of double size,
+// populating it with function signature indices and function indices.
+Handle<FixedArray> BuildFunctionTable(Isolate* isolate, uint32_t index,
+ const WasmModule* module);
+
+// Populates a function table by replacing function indices with handles to
+// the compiled code.
+void PopulateFunctionTable(Handle<FixedArray> table, uint32_t table_size,
+ const std::vector<Handle<Code>>* code_table);
+
+Handle<JSObject> CreateCompiledModuleObject(Isolate* isolate,
+ Handle<FixedArray> compiled_module);
+
+namespace testing {
+
+// Decode, verify, and run the function labeled "main" in the
+// given encoded module. The module should have no imports.
int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
const byte* module_end, bool asm_js = false);
-// For testing. Decode, verify, and run the last exported function in the
-// given decoded module.
-int32_t CompileAndRunWasmModule(Isolate* isolate, WasmModule* module);
-
+int32_t CallFunction(Isolate* isolate, Handle<JSObject> instance,
+ ErrorThrower* thrower, const char* name, int argc,
+ Handle<Object> argv[]);
+} // namespace testing
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index 736c4d9609..8f54207661 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/wasm/wasm-opcodes.h"
+#include "src/messages.h"
#include "src/signature.h"
namespace v8 {
@@ -24,27 +25,36 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
return "Unknown";
}
+const char* WasmOpcodes::ShortOpcodeName(WasmOpcode opcode) {
+ switch (opcode) {
+#define DECLARE_NAME_CASE(name, opcode, sig) \
+ case kExpr##name: \
+ return #name;
+ FOREACH_OPCODE(DECLARE_NAME_CASE)
+#undef DECLARE_NAME_CASE
+ default:
+ break;
+ }
+ return "Unknown";
+}
std::ostream& operator<<(std::ostream& os, const FunctionSig& sig) {
if (sig.return_count() == 0) os << "v";
- for (size_t i = 0; i < sig.return_count(); i++) {
+ for (size_t i = 0; i < sig.return_count(); ++i) {
os << WasmOpcodes::ShortNameOf(sig.GetReturn(i));
}
os << "_";
if (sig.parameter_count() == 0) os << "v";
- for (size_t i = 0; i < sig.parameter_count(); i++) {
+ for (size_t i = 0; i < sig.parameter_count(); ++i) {
os << WasmOpcodes::ShortNameOf(sig.GetParam(i));
}
return os;
}
-
#define DECLARE_SIG_ENUM(name, ...) kSigEnum_##name,
-
enum WasmOpcodeSig { FOREACH_SIGNATURE(DECLARE_SIG_ENUM) };
-
// TODO(titzer): not static-initializer safe. Wrap in LazyInstance.
#define DECLARE_SIG(name, ...) \
static LocalType kTypes_##name[] = {__VA_ARGS__}; \
@@ -58,27 +68,56 @@ FOREACH_SIGNATURE(DECLARE_SIG)
static const FunctionSig* kSimpleExprSigs[] = {
nullptr, FOREACH_SIGNATURE(DECLARE_SIG_ENTRY)};
-static byte kSimpleExprSigTable[256];
+#define DECLARE_SIMD_SIG_ENTRY(name, ...) &kSig_##name,
+
+static const FunctionSig* kSimdExprSigs[] = {
+ nullptr, FOREACH_SIMD_SIGNATURE(DECLARE_SIMD_SIG_ENTRY)};
+static byte kSimpleExprSigTable[256];
+static byte kSimdExprSigTable[256];
// Initialize the signature table.
-static void InitSigTable() {
+static void InitSigTables() {
#define SET_SIG_TABLE(name, opcode, sig) \
kSimpleExprSigTable[opcode] = static_cast<int>(kSigEnum_##sig) + 1;
FOREACH_SIMPLE_OPCODE(SET_SIG_TABLE);
+ FOREACH_SIMPLE_MEM_OPCODE(SET_SIG_TABLE);
FOREACH_ASMJS_COMPAT_OPCODE(SET_SIG_TABLE);
#undef SET_SIG_TABLE
+ byte simd_index;
+#define SET_SIG_TABLE(name, opcode, sig) \
+ simd_index = opcode & 0xff; \
+ kSimdExprSigTable[simd_index] = static_cast<int>(kSigEnum_##sig) + 1;
+ FOREACH_SIMD_OPCODE(SET_SIG_TABLE)
+#undef SET_SIG_TABLE
}
+class SigTable {
+ public:
+ SigTable() {
+ // TODO(ahaas): Move {InitSigTable} into the class.
+ InitSigTables();
+ }
+ FunctionSig* Signature(WasmOpcode opcode) const {
+ return const_cast<FunctionSig*>(
+ kSimpleExprSigs[kSimpleExprSigTable[static_cast<byte>(opcode)]]);
+ }
+ FunctionSig* SimdSignature(WasmOpcode opcode) const {
+ return const_cast<FunctionSig*>(
+ kSimdExprSigs[kSimdExprSigTable[static_cast<byte>(opcode & 0xff)]]);
+ }
+};
+
+static base::LazyInstance<SigTable>::type sig_table = LAZY_INSTANCE_INITIALIZER;
FunctionSig* WasmOpcodes::Signature(WasmOpcode opcode) {
- // TODO(titzer): use LazyInstance to make this thread safe.
- if (kSimpleExprSigTable[kExprI32Add] == 0) InitSigTable();
- return const_cast<FunctionSig*>(
- kSimpleExprSigs[kSimpleExprSigTable[static_cast<byte>(opcode)]]);
+ if (opcode >> 8 == kSimdPrefix) {
+ return sig_table.Get().SimdSignature(opcode);
+ } else {
+ return sig_table.Get().Signature(opcode);
+ }
}
-
// TODO(titzer): pull WASM_64 up to a common header.
#if !V8_TARGET_ARCH_32_BIT || V8_TARGET_ARCH_X64
#define WASM_64 1
@@ -86,64 +125,20 @@ FunctionSig* WasmOpcodes::Signature(WasmOpcode opcode) {
#define WASM_64 0
#endif
-
-bool WasmOpcodes::IsSupported(WasmOpcode opcode) {
-#if !WASM_64
- switch (opcode) {
- // Opcodes not supported on 32-bit platforms.
- case kExprI64Add:
- case kExprI64Sub:
- case kExprI64Mul:
- case kExprI64DivS:
- case kExprI64DivU:
- case kExprI64RemS:
- case kExprI64RemU:
- case kExprI64And:
- case kExprI64Ior:
- case kExprI64Xor:
- case kExprI64Shl:
- case kExprI64ShrU:
- case kExprI64ShrS:
- case kExprI64Ror:
- case kExprI64Rol:
- case kExprI64Eq:
- case kExprI64Ne:
- case kExprI64LtS:
- case kExprI64LeS:
- case kExprI64LtU:
- case kExprI64LeU:
- case kExprI64GtS:
- case kExprI64GeS:
- case kExprI64GtU:
- case kExprI64GeU:
-
- case kExprI32ConvertI64:
- case kExprI64SConvertI32:
- case kExprI64UConvertI32:
-
- case kExprF64ReinterpretI64:
- case kExprI64ReinterpretF64:
-
- case kExprI64Clz:
- case kExprI64Ctz:
- case kExprI64Popcnt:
-
- case kExprF32SConvertI64:
- case kExprF32UConvertI64:
- case kExprF64SConvertI64:
- case kExprF64UConvertI64:
- case kExprI64SConvertF32:
- case kExprI64SConvertF64:
- case kExprI64UConvertF32:
- case kExprI64UConvertF64:
-
- return false;
+int WasmOpcodes::TrapReasonToMessageId(TrapReason reason) {
+ switch (reason) {
+#define TRAPREASON_TO_MESSAGE(name) \
+ case k##name: \
+ return MessageTemplate::kWasm##name;
+ FOREACH_WASM_TRAPREASON(TRAPREASON_TO_MESSAGE)
+#undef TRAPREASON_TO_MESSAGE
default:
- return true;
+ return MessageTemplate::kNone;
}
-#else
- return true;
-#endif
+}
+
+const char* WasmOpcodes::TrapReasonMessage(TrapReason reason) {
+ return MessageTemplate::TemplateString(TrapReasonToMessageId(reason));
}
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index 52f85aab0a..4d66e567ef 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -18,21 +18,8 @@ enum LocalTypeCode {
kLocalI32 = 1,
kLocalI64 = 2,
kLocalF32 = 3,
- kLocalF64 = 4
-};
-
-// Binary encoding of memory types.
-enum MemTypeCode {
- kMemI8 = 0,
- kMemU8 = 1,
- kMemI16 = 2,
- kMemU16 = 3,
- kMemI32 = 4,
- kMemU32 = 5,
- kMemI64 = 6,
- kMemU64 = 7,
- kMemF32 = 8,
- kMemF64 = 9
+ kLocalF64 = 4,
+ kLocalS128 = 5
};
// We reuse the internal machine type to represent WebAssembly AST types.
@@ -43,18 +30,17 @@ const LocalType kAstI32 = MachineRepresentation::kWord32;
const LocalType kAstI64 = MachineRepresentation::kWord64;
const LocalType kAstF32 = MachineRepresentation::kFloat32;
const LocalType kAstF64 = MachineRepresentation::kFloat64;
+const LocalType kAstS128 = MachineRepresentation::kSimd128;
// We use kTagged here because kNone is already used by kAstStmt.
const LocalType kAstEnd = MachineRepresentation::kTagged;
typedef Signature<LocalType> FunctionSig;
std::ostream& operator<<(std::ostream& os, const FunctionSig& function);
-struct WasmName {
- const char* name;
- uint32_t length;
-};
+typedef Vector<const char> WasmName;
-// TODO(titzer): Renumber all the opcodes to fill in holes.
+typedef int WasmCodePosition;
+const WasmCodePosition kNoCodePosition = -1;
// Control expressions and blocks.
#define FOREACH_CONTROL_OPCODE(V) \
@@ -62,29 +48,35 @@ struct WasmName {
V(Block, 0x01, _) \
V(Loop, 0x02, _) \
V(If, 0x03, _) \
- V(IfElse, 0x04, _) \
+ V(Else, 0x04, _) \
V(Select, 0x05, _) \
V(Br, 0x06, _) \
V(BrIf, 0x07, _) \
V(BrTable, 0x08, _) \
- V(Return, 0x14, _) \
- V(Unreachable, 0x15, _)
+ V(Return, 0x09, _) \
+ V(Unreachable, 0x0a, _) \
+ V(Throw, 0xfa, _) \
+ V(TryCatch, 0xfb, _) \
+ V(TryCatchFinally, 0xfc, _) \
+ V(TryFinally, 0xfd, _) \
+ V(Catch, 0xfe, _) \
+ V(Finally, 0xff, _) \
+ V(End, 0x0F, _)
// Constants, locals, globals, and calls.
#define FOREACH_MISC_OPCODE(V) \
- V(I8Const, 0x09, _) \
- V(I32Const, 0x0a, _) \
- V(I64Const, 0x0b, _) \
- V(F64Const, 0x0c, _) \
- V(F32Const, 0x0d, _) \
- V(GetLocal, 0x0e, _) \
- V(SetLocal, 0x0f, _) \
- V(LoadGlobal, 0x10, _) \
- V(StoreGlobal, 0x11, _) \
- V(CallFunction, 0x12, _) \
- V(CallIndirect, 0x13, _) \
- V(CallImport, 0x1F, _) \
- V(DeclLocals, 0x1E, _)
+ V(I32Const, 0x10, _) \
+ V(I64Const, 0x11, _) \
+ V(F64Const, 0x12, _) \
+ V(F32Const, 0x13, _) \
+ V(GetLocal, 0x14, _) \
+ V(SetLocal, 0x15, _) \
+ V(CallFunction, 0x16, _) \
+ V(CallIndirect, 0x17, _) \
+ V(CallImport, 0x18, _) \
+ V(I8Const, 0xcb, _) \
+ V(GetGlobal, 0xbb, _) \
+ V(SetGlobal, 0xbc, _)
// Load memory expressions.
#define FOREACH_LOAD_MEM_OPCODE(V) \
@@ -115,10 +107,11 @@ struct WasmName {
V(F32StoreMem, 0x35, f_if) \
V(F64StoreMem, 0x36, d_id)
+#define FOREACH_SIMPLE_MEM_OPCODE(V) V(GrowMemory, 0x39, i_i)
+
// Load memory expressions.
#define FOREACH_MISC_MEM_OPCODE(V) \
- V(MemorySize, 0x3b, i_v) \
- V(GrowMemory, 0x39, i_i)
+ V(MemorySize, 0x3b, i_v)
// Expressions with signatures.
#define FOREACH_SIMPLE_OPCODE(V) \
@@ -258,42 +251,167 @@ struct WasmName {
V(F64Log, 0xc7, d_d) \
V(F64Atan2, 0xc8, d_dd) \
V(F64Pow, 0xc9, d_dd) \
- V(F64Mod, 0xca, d_dd)
-
-// TODO(titzer): sketch of asm-js compatibility bytecodes
-/* V(I32AsmjsDivS, 0xd0, i_ii) \ */
-/* V(I32AsmjsDivU, 0xd1, i_ii) \ */
-/* V(I32AsmjsRemS, 0xd2, i_ii) \ */
-/* V(I32AsmjsRemU, 0xd3, i_ii) \ */
-/* V(I32AsmjsLoad8S, 0xd4, i_i) \ */
-/* V(I32AsmjsLoad8U, 0xd5, i_i) \ */
-/* V(I32AsmjsLoad16S, 0xd6, i_i) \ */
-/* V(I32AsmjsLoad16U, 0xd7, i_i) \ */
-/* V(I32AsmjsLoad, 0xd8, i_i) \ */
-/* V(F32AsmjsLoad, 0xd9, f_i) \ */
-/* V(F64AsmjsLoad, 0xda, d_i) \ */
-/* V(I32AsmjsStore8, 0xdb, i_i) \ */
-/* V(I32AsmjsStore16, 0xdc, i_i) \ */
-/* V(I32AsmjsStore, 0xdd, i_ii) \ */
-/* V(F32AsmjsStore, 0xde, i_if) \ */
-/* V(F64AsmjsStore, 0xdf, i_id) \ */
-/* V(I32SAsmjsConvertF32, 0xe0, i_f) \ */
-/* V(I32UAsmjsConvertF32, 0xe1, i_f) \ */
-/* V(I32SAsmjsConvertF64, 0xe2, i_d) \ */
-/* V(I32SAsmjsConvertF64, 0xe3, i_d) */
+ V(F64Mod, 0xca, d_dd) \
+ V(I32AsmjsDivS, 0xd0, i_ii) \
+ V(I32AsmjsDivU, 0xd1, i_ii) \
+ V(I32AsmjsRemS, 0xd2, i_ii) \
+ V(I32AsmjsRemU, 0xd3, i_ii) \
+ V(I32AsmjsLoadMem8S, 0xd4, i_i) \
+ V(I32AsmjsLoadMem8U, 0xd5, i_i) \
+ V(I32AsmjsLoadMem16S, 0xd6, i_i) \
+ V(I32AsmjsLoadMem16U, 0xd7, i_i) \
+ V(I32AsmjsLoadMem, 0xd8, i_i) \
+ V(F32AsmjsLoadMem, 0xd9, f_i) \
+ V(F64AsmjsLoadMem, 0xda, d_i) \
+ V(I32AsmjsStoreMem8, 0xdb, i_ii) \
+ V(I32AsmjsStoreMem16, 0xdc, i_ii) \
+ V(I32AsmjsStoreMem, 0xdd, i_ii) \
+ V(F32AsmjsStoreMem, 0xde, f_if) \
+ V(F64AsmjsStoreMem, 0xdf, d_id) \
+ V(I32AsmjsSConvertF32, 0xe0, i_f) \
+ V(I32AsmjsUConvertF32, 0xe1, i_f) \
+ V(I32AsmjsSConvertF64, 0xe2, i_d) \
+ V(I32AsmjsUConvertF64, 0xe3, i_d)
+
+#define FOREACH_SIMD_OPCODE(V) \
+ V(F32x4Splat, 0xe500, s_f) \
+ V(F32x4ExtractLane, 0xe501, f_si) \
+ V(F32x4ReplaceLane, 0xe502, s_sif) \
+ V(F32x4Abs, 0xe503, s_s) \
+ V(F32x4Neg, 0xe504, s_s) \
+ V(F32x4Sqrt, 0xe505, s_s) \
+ V(F32x4RecipApprox, 0xe506, s_s) \
+ V(F32x4SqrtApprox, 0xe507, s_s) \
+ V(F32x4Add, 0xe508, s_ss) \
+ V(F32x4Sub, 0xe509, s_ss) \
+ V(F32x4Mul, 0xe50a, s_ss) \
+ V(F32x4Div, 0xe50b, s_ss) \
+ V(F32x4Min, 0xe50c, s_ss) \
+ V(F32x4Max, 0xe50d, s_ss) \
+ V(F32x4MinNum, 0xe50e, s_ss) \
+ V(F32x4MaxNum, 0xe50f, s_ss) \
+ V(F32x4Eq, 0xe510, s_ss) \
+ V(F32x4Ne, 0xe511, s_ss) \
+ V(F32x4Lt, 0xe512, s_ss) \
+ V(F32x4Le, 0xe513, s_ss) \
+ V(F32x4Gt, 0xe514, s_ss) \
+ V(F32x4Ge, 0xe515, s_ss) \
+ V(F32x4Select, 0xe516, s_sss) \
+ V(F32x4Swizzle, 0xe517, s_s) \
+ V(F32x4Shuffle, 0xe518, s_ss) \
+ V(F32x4FromInt32x4, 0xe519, s_s) \
+ V(F32x4FromUint32x4, 0xe51a, s_s) \
+ V(I32x4Splat, 0xe51b, s_i) \
+ V(I32x4ExtractLane, 0xe51c, i_si) \
+ V(I32x4ReplaceLane, 0xe51d, s_sii) \
+ V(I32x4Neg, 0xe51e, s_s) \
+ V(I32x4Add, 0xe51f, s_ss) \
+ V(I32x4Sub, 0xe520, s_ss) \
+ V(I32x4Mul, 0xe521, s_ss) \
+ V(I32x4Min_s, 0xe522, s_ss) \
+ V(I32x4Max_s, 0xe523, s_ss) \
+ V(I32x4Shl, 0xe524, s_si) \
+ V(I32x4Shr_s, 0xe525, s_si) \
+ V(I32x4Eq, 0xe526, s_ss) \
+ V(I32x4Ne, 0xe527, s_ss) \
+ V(I32x4Lt_s, 0xe528, s_ss) \
+ V(I32x4Le_s, 0xe529, s_ss) \
+ V(I32x4Gt_s, 0xe52a, s_ss) \
+ V(I32x4Ge_s, 0xe52b, s_ss) \
+ V(I32x4Select, 0xe52c, s_sss) \
+ V(I32x4Swizzle, 0xe52d, s_s) \
+ V(I32x4Shuffle, 0xe52e, s_ss) \
+ V(I32x4FromFloat32x4, 0xe52f, s_s) \
+ V(I32x4Min_u, 0xe530, s_ss) \
+ V(I32x4Max_u, 0xe531, s_ss) \
+ V(I32x4Shr_u, 0xe532, s_ss) \
+ V(I32x4Lt_u, 0xe533, s_ss) \
+ V(I32x4Le_u, 0xe534, s_ss) \
+ V(I32x4Gt_u, 0xe535, s_ss) \
+ V(I32x4Ge_u, 0xe536, s_ss) \
+ V(Ui32x4FromFloat32x4, 0xe537, s_s) \
+ V(I16x8Splat, 0xe538, s_i) \
+ V(I16x8ExtractLane, 0xe539, i_si) \
+ V(I16x8ReplaceLane, 0xe53a, s_sii) \
+ V(I16x8Neg, 0xe53b, s_s) \
+ V(I16x8Add, 0xe53c, s_ss) \
+ V(I16x8AddSaturate_s, 0xe53d, s_ss) \
+ V(I16x8Sub, 0xe53e, s_ss) \
+ V(I16x8SubSaturate_s, 0xe53f, s_ss) \
+ V(I16x8Mul, 0xe540, s_ss) \
+ V(I16x8Min_s, 0xe541, s_ss) \
+ V(I16x8Max_s, 0xe542, s_ss) \
+ V(I16x8Shl, 0xe543, s_si) \
+ V(I16x8Shr_s, 0xe544, s_si) \
+ V(I16x8Eq, 0xe545, s_ss) \
+ V(I16x8Ne, 0xe546, s_ss) \
+ V(I16x8Lt_s, 0xe547, s_ss) \
+ V(I16x8Le_s, 0xe548, s_ss) \
+ V(I16x8Gt_s, 0xe549, s_ss) \
+ V(I16x8Ge_s, 0xe54a, s_ss) \
+ V(I16x8Select, 0xe54b, s_sss) \
+ V(I16x8Swizzle, 0xe54c, s_s) \
+ V(I16x8Shuffle, 0xe54d, s_ss) \
+ V(I16x8AddSaturate_u, 0xe54e, s_ss) \
+ V(I16x8SubSaturate_u, 0xe54f, s_ss) \
+ V(I16x8Min_u, 0xe550, s_ss) \
+ V(I16x8Max_u, 0xe551, s_ss) \
+ V(I16x8Shr_u, 0xe552, s_si) \
+ V(I16x8Lt_u, 0xe553, s_ss) \
+ V(I16x8Le_u, 0xe554, s_ss) \
+ V(I16x8Gt_u, 0xe555, s_ss) \
+ V(I16x8Ge_u, 0xe556, s_ss) \
+ V(I8x16Splat, 0xe557, s_i) \
+ V(I8x16ExtractLane, 0xe558, i_si) \
+ V(I8x16ReplaceLane, 0xe559, s_sii) \
+ V(I8x16Neg, 0xe55a, s_s) \
+ V(I8x16Add, 0xe55b, s_ss) \
+ V(I8x16AddSaturate_s, 0xe55c, s_ss) \
+ V(I8x16Sub, 0xe55d, s_ss) \
+ V(I8x16SubSaturate_s, 0xe55e, s_ss) \
+ V(I8x16Mul, 0xe55f, s_ss) \
+ V(I8x16Min_s, 0xe560, s_ss) \
+ V(I8x16Max_s, 0xe561, s_ss) \
+ V(I8x16Shl, 0xe562, s_si) \
+ V(I8x16Shr_s, 0xe563, s_si) \
+ V(I8x16Eq, 0xe564, s_ss) \
+ V(I8x16Neq, 0xe565, s_ss) \
+ V(I8x16Lt_s, 0xe566, s_ss) \
+ V(I8x16Le_s, 0xe567, s_ss) \
+ V(I8x16Gt_s, 0xe568, s_ss) \
+ V(I8x16Ge_s, 0xe569, s_ss) \
+ V(I8x16Select, 0xe56a, s_sss) \
+ V(I8x16Swizzle, 0xe56b, s_s) \
+ V(I8x16Shuffle, 0xe56c, s_ss) \
+ V(I8x16AddSaturate_u, 0xe56d, s_ss) \
+ V(I8x16Sub_saturate_u, 0xe56e, s_ss) \
+ V(I8x16Min_u, 0xe56f, s_ss) \
+ V(I8x16Max_u, 0xe570, s_ss) \
+ V(I8x16Shr_u, 0xe571, s_ss) \
+ V(I8x16Lt_u, 0xe572, s_ss) \
+ V(I8x16Le_u, 0xe573, s_ss) \
+ V(I8x16Gt_u, 0xe574, s_ss) \
+ V(I8x16Ge_u, 0xe575, s_ss) \
+ V(S128And, 0xe576, s_ss) \
+ V(S128Ior, 0xe577, s_ss) \
+ V(S128Xor, 0xe578, s_ss) \
+ V(S128Not, 0xe579, s_s)
// All opcodes.
-#define FOREACH_OPCODE(V) \
- FOREACH_CONTROL_OPCODE(V) \
- FOREACH_MISC_OPCODE(V) \
- FOREACH_SIMPLE_OPCODE(V) \
- FOREACH_STORE_MEM_OPCODE(V) \
- FOREACH_LOAD_MEM_OPCODE(V) \
- FOREACH_MISC_MEM_OPCODE(V) \
- FOREACH_ASMJS_COMPAT_OPCODE(V)
+#define FOREACH_OPCODE(V) \
+ FOREACH_CONTROL_OPCODE(V) \
+ FOREACH_MISC_OPCODE(V) \
+ FOREACH_SIMPLE_OPCODE(V) \
+ FOREACH_SIMPLE_MEM_OPCODE(V) \
+ FOREACH_STORE_MEM_OPCODE(V) \
+ FOREACH_LOAD_MEM_OPCODE(V) \
+ FOREACH_MISC_MEM_OPCODE(V) \
+ FOREACH_ASMJS_COMPAT_OPCODE(V) \
+ FOREACH_SIMD_OPCODE(V)
// All signatures.
#define FOREACH_SIGNATURE(V) \
+ FOREACH_SIMD_SIGNATURE(V) \
V(i_ii, kAstI32, kAstI32, kAstI32) \
V(i_i, kAstI32, kAstI32) \
V(i_v, kAstI32) \
@@ -322,33 +440,59 @@ struct WasmName {
V(f_if, kAstF32, kAstI32, kAstF32) \
V(l_il, kAstI64, kAstI32, kAstI64)
+#define FOREACH_SIMD_SIGNATURE(V) \
+ V(s_s, kAstS128, kAstS128) \
+ V(s_f, kAstS128, kAstF32) \
+ V(f_si, kAstF32, kAstS128, kAstI32) \
+ V(s_sif, kAstS128, kAstS128, kAstI32, kAstF32) \
+ V(s_ss, kAstS128, kAstS128, kAstS128) \
+ V(s_sss, kAstS128, kAstS128, kAstS128, kAstS128) \
+ V(s_i, kAstS128, kAstI32) \
+ V(i_si, kAstI32, kAstS128, kAstI32) \
+ V(s_sii, kAstS128, kAstS128, kAstI32, kAstI32) \
+ V(s_si, kAstS128, kAstS128, kAstI32)
+
+#define FOREACH_PREFIX(V) V(Simd, 0xe5)
+
enum WasmOpcode {
// Declare expression opcodes.
#define DECLARE_NAMED_ENUM(name, opcode, sig) kExpr##name = opcode,
FOREACH_OPCODE(DECLARE_NAMED_ENUM)
#undef DECLARE_NAMED_ENUM
+#define DECLARE_PREFIX(name, opcode) k##name##Prefix = opcode,
+ FOREACH_PREFIX(DECLARE_PREFIX)
+#undef DECLARE_PREFIX
};
// The reason for a trap.
+#define FOREACH_WASM_TRAPREASON(V) \
+ V(TrapUnreachable) \
+ V(TrapMemOutOfBounds) \
+ V(TrapDivByZero) \
+ V(TrapDivUnrepresentable) \
+ V(TrapRemByZero) \
+ V(TrapFloatUnrepresentable) \
+ V(TrapFuncInvalid) \
+ V(TrapFuncSigMismatch) \
+ V(TrapInvalidIndex)
+
enum TrapReason {
- kTrapUnreachable,
- kTrapMemOutOfBounds,
- kTrapDivByZero,
- kTrapDivUnrepresentable,
- kTrapRemByZero,
- kTrapFloatUnrepresentable,
- kTrapFuncInvalid,
- kTrapFuncSigMismatch,
+#define DECLARE_ENUM(name) k##name,
+ FOREACH_WASM_TRAPREASON(DECLARE_ENUM)
kTrapCount
+#undef DECLARE_ENUM
};
// A collection of opcode-related static methods.
class WasmOpcodes {
public:
- static bool IsSupported(WasmOpcode opcode);
static const char* OpcodeName(WasmOpcode opcode);
+ static const char* ShortOpcodeName(WasmOpcode opcode);
static FunctionSig* Signature(WasmOpcode opcode);
+ static int TrapReasonToMessageId(TrapReason reason);
+ static const char* TrapReasonMessage(TrapReason reason);
+
static byte MemSize(MachineType type) {
return 1 << ElementSizeLog2Of(type.representation());
}
@@ -365,39 +509,14 @@ class WasmOpcodes {
return kLocalF64;
case kAstStmt:
return kLocalVoid;
+ case kAstS128:
+ return kLocalS128;
default:
UNREACHABLE();
return kLocalVoid;
}
}
- static MemTypeCode MemTypeCodeFor(MachineType type) {
- if (type == MachineType::Int8()) {
- return kMemI8;
- } else if (type == MachineType::Uint8()) {
- return kMemU8;
- } else if (type == MachineType::Int16()) {
- return kMemI16;
- } else if (type == MachineType::Uint16()) {
- return kMemU16;
- } else if (type == MachineType::Int32()) {
- return kMemI32;
- } else if (type == MachineType::Uint32()) {
- return kMemU32;
- } else if (type == MachineType::Int64()) {
- return kMemI64;
- } else if (type == MachineType::Uint64()) {
- return kMemU64;
- } else if (type == MachineType::Float32()) {
- return kMemF32;
- } else if (type == MachineType::Float64()) {
- return kMemF64;
- } else {
- UNREACHABLE();
- return kMemI32;
- }
- }
-
static MachineType MachineTypeFor(LocalType type) {
switch (type) {
case kAstI32:
@@ -408,6 +527,8 @@ class WasmOpcodes {
return MachineType::Float32();
case kAstF64:
return MachineType::Float64();
+ case kAstS128:
+ return MachineType::Simd128();
case kAstStmt:
return MachineType::None();
default:
@@ -437,6 +558,8 @@ class WasmOpcodes {
return kAstF32;
} else if (type == MachineType::Float64()) {
return kAstF64;
+ } else if (type == MachineType::Simd128()) {
+ return kAstS128;
} else {
UNREACHABLE();
return kAstI32;
@@ -480,6 +603,8 @@ class WasmOpcodes {
return 'f';
case kAstF64:
return 'd';
+ case kAstS128:
+ return 's';
case kAstStmt:
return 'v';
case kAstEnd:
@@ -500,6 +625,8 @@ class WasmOpcodes {
return "f32";
case kAstF64:
return "f64";
+ case kAstS128:
+ return "s128";
case kAstStmt:
return "<stmt>";
case kAstEnd:
@@ -508,29 +635,6 @@ class WasmOpcodes {
return "<unknown>";
}
}
-
- static const char* TrapReasonName(TrapReason reason) {
- switch (reason) {
- case kTrapUnreachable:
- return "unreachable";
- case kTrapMemOutOfBounds:
- return "memory access out of bounds";
- case kTrapDivByZero:
- return "divide by zero";
- case kTrapDivUnrepresentable:
- return "divide result unrepresentable";
- case kTrapRemByZero:
- return "remainder by zero";
- case kTrapFloatUnrepresentable:
- return "integer result unrepresentable";
- case kTrapFuncInvalid:
- return "invalid function";
- case kTrapFuncSigMismatch:
- return "function signature mismatch";
- default:
- return "<?>";
- }
- }
};
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-result.cc b/deps/v8/src/wasm/wasm-result.cc
index 4fd17ee364..30268ac8ad 100644
--- a/deps/v8/src/wasm/wasm-result.cc
+++ b/deps/v8/src/wasm/wasm-result.cc
@@ -6,8 +6,7 @@
#include "src/factory.h"
#include "src/heap/heap.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
+#include "src/isolate-inl.h"
#include "src/objects.h"
#include "src/base/platform/platform.h"
@@ -28,12 +27,11 @@ std::ostream& operator<<(std::ostream& os, const ErrorCode& error_code) {
return os;
}
-
void ErrorThrower::Error(const char* format, ...) {
- if (error_) return; // only report the first error.
- error_ = true;
- char buffer[256];
+ // Only report the first error.
+ if (error()) return;
+ char buffer[256];
va_list arguments;
va_start(arguments, format);
base::OS::VSNPrintF(buffer, 255, format, arguments);
@@ -45,8 +43,13 @@ void ErrorThrower::Error(const char* format, ...) {
}
str << buffer;
- isolate_->ScheduleThrow(
- *isolate_->factory()->NewStringFromAsciiChecked(str.str().c_str()));
+ message_ = isolate_->factory()->NewStringFromAsciiChecked(str.str().c_str());
+}
+
+ErrorThrower::~ErrorThrower() {
+ if (error() && !isolate_->has_pending_exception()) {
+ isolate_->ScheduleThrow(*message_);
+ }
}
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-result.h b/deps/v8/src/wasm/wasm-result.h
index 59ab29ebe4..f16c15906d 100644
--- a/deps/v8/src/wasm/wasm-result.h
+++ b/deps/v8/src/wasm/wasm-result.h
@@ -5,8 +5,11 @@
#ifndef V8_WASM_RESULT_H_
#define V8_WASM_RESULT_H_
-#include "src/base/smart-pointers.h"
+#include <memory>
+#include "src/base/compiler-specific.h"
+
+#include "src/handles.h"
#include "src/globals.h"
namespace v8 {
@@ -37,9 +40,12 @@ enum ErrorCode {
// The overall result of decoding a function or a module.
template <typename T>
struct Result {
- Result()
- : val(nullptr), error_code(kSuccess), start(nullptr), error_pc(nullptr) {
- error_msg.Reset(nullptr);
+ Result() : val(), error_code(kSuccess), start(nullptr), error_pc(nullptr) {}
+ Result(Result&& other) { *this = std::move(other); }
+ Result& operator=(Result&& other) {
+ MoveFrom(other);
+ val = other.val;
+ return *this;
}
T val;
@@ -47,19 +53,22 @@ struct Result {
const byte* start;
const byte* error_pc;
const byte* error_pt;
- base::SmartArrayPointer<char> error_msg;
+ std::unique_ptr<char[]> error_msg;
bool ok() const { return error_code == kSuccess; }
bool failed() const { return error_code != kSuccess; }
template <typename V>
- void CopyFrom(Result<V>& that) {
+ void MoveFrom(Result<V>& that) {
error_code = that.error_code;
start = that.start;
error_pc = that.error_pc;
error_pt = that.error_pt;
- error_msg = that.error_msg;
+ error_msg = std::move(that.error_msg);
}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Result);
};
template <typename T>
@@ -91,23 +100,30 @@ std::ostream& operator<<(std::ostream& os, const ErrorCode& error_code);
class ErrorThrower {
public:
ErrorThrower(Isolate* isolate, const char* context)
- : isolate_(isolate), context_(context), error_(false) {}
+ : isolate_(isolate), context_(context) {}
+ ~ErrorThrower();
- void Error(const char* fmt, ...);
+ PRINTF_FORMAT(2, 3) void Error(const char* fmt, ...);
template <typename T>
void Failed(const char* error, Result<T>& result) {
std::ostringstream str;
str << error << result;
- return Error(str.str().c_str());
+ return Error("%s", str.str().c_str());
+ }
+
+ i::Handle<i::String> Reify() {
+ auto result = message_;
+ message_ = i::Handle<i::String>();
+ return result;
}
- bool error() const { return error_; }
+ bool error() const { return !message_.is_null(); }
private:
Isolate* isolate_;
const char* context_;
- bool error_;
+ i::Handle<i::String> message_;
};
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index f32f407a8d..518df5a47c 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -16,6 +16,7 @@ namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; }
+bool CpuFeatures::SupportsSimd128() { return false; }
// -----------------------------------------------------------------------------
// Implementation of Assembler
@@ -65,7 +66,7 @@ void Assembler::emit_code_target(Handle<Code> target,
RecordRelocInfo(rmode);
}
int current = code_targets_.length();
- if (current > 0 && code_targets_.last().is_identical_to(target)) {
+ if (current > 0 && code_targets_.last().address() == target.address()) {
// Optimization if we keep jumping to the same code target.
emitl(current - 1);
} else {
@@ -78,7 +79,8 @@ void Assembler::emit_code_target(Handle<Code> target,
void Assembler::emit_runtime_entry(Address entry, RelocInfo::Mode rmode) {
DCHECK(RelocInfo::IsRuntimeEntry(rmode));
RecordRelocInfo(rmode);
- emitl(static_cast<uint32_t>(entry - isolate()->code_range()->start()));
+ emitl(static_cast<uint32_t>(
+ entry - isolate()->heap()->memory_allocator()->code_range()->start()));
}
@@ -178,12 +180,10 @@ void Assembler::emit_optional_rex_32(Register rm_reg) {
if (rm_reg.high_bit()) emit(0x41);
}
-
void Assembler::emit_optional_rex_32(XMMRegister rm_reg) {
if (rm_reg.high_bit()) emit(0x41);
}
-
void Assembler::emit_optional_rex_32(const Operand& op) {
if (op.rex_ != 0) emit(0x40 | op.rex_);
}
@@ -299,7 +299,8 @@ Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
Address Assembler::runtime_entry_at(Address pc) {
- return Memory::int32_at(pc) + isolate()->code_range()->start();
+ return Memory::int32_at(pc) +
+ isolate()->heap()->memory_allocator()->code_range()->start();
}
// -----------------------------------------------------------------------------
@@ -326,11 +327,6 @@ Address RelocInfo::target_address() {
return Assembler::target_address_at(pc_, host_);
}
-Address RelocInfo::wasm_memory_reference() {
- DCHECK(IsWasmMemoryReference(rmode_));
- return Memory::Address_at(pc_);
-}
-
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
@@ -354,35 +350,6 @@ int RelocInfo::target_address_size() {
}
-void RelocInfo::set_target_address(Address target,
- WriteBarrierMode write_barrier_mode,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(isolate_, pc_, host_, target,
- icache_flush_mode);
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
- IsCodeTarget(rmode_)) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
-}
-
-void RelocInfo::update_wasm_memory_reference(
- Address old_base, Address new_base, size_t old_size, size_t new_size,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(IsWasmMemoryReference(rmode_));
- DCHECK(old_base <= wasm_memory_reference() &&
- wasm_memory_reference() < old_base + old_size);
- Address updated_reference = new_base + (wasm_memory_reference() - old_base);
- DCHECK(new_base <= updated_reference &&
- updated_reference < new_base + new_size);
- Memory::Address_at(pc_) = updated_reference;
- if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate_, pc_, sizeof(int64_t));
- }
-}
-
Object* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Memory::Object_at(pc_);
@@ -430,6 +397,7 @@ void RelocInfo::set_target_object(Object* target,
target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target));
+ host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
}
}
@@ -538,7 +506,7 @@ void RelocInfo::set_debug_call_address(Address target) {
}
}
-
+template <typename ObjectVisitor>
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 214b786fed..9a0d18e9c2 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -114,6 +114,33 @@ void CpuFeatures::PrintFeatures() {
CpuFeatures::IsSupported(POPCNT), CpuFeatures::IsSupported(ATOM));
}
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+Address RelocInfo::wasm_memory_reference() {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ return Memory::Address_at(pc_);
+}
+
+Address RelocInfo::wasm_global_reference() {
+ DCHECK(IsWasmGlobalReference(rmode_));
+ return Memory::Address_at(pc_);
+}
+
+uint32_t RelocInfo::wasm_memory_size_reference() {
+ DCHECK(IsWasmMemorySizeReference(rmode_));
+ return Memory::uint32_at(pc_);
+}
+
+void RelocInfo::unchecked_update_wasm_memory_reference(
+ Address address, ICacheFlushMode flush_mode) {
+ Memory::Address_at(pc_) = address;
+}
+
+void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
+ ICacheFlushMode flush_mode) {
+ Memory::uint32_at(pc_) = size;
+}
// -----------------------------------------------------------------------------
// Implementation of Operand
@@ -253,17 +280,11 @@ bool Operand::AddressUsesRegister(Register reg) const {
// -----------------------------------------------------------------------------
// Implementation of Assembler.
-#ifdef GENERATED_CODE_COVERAGE
-static void InitCoverageLog();
-#endif
-
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
- : AssemblerBase(isolate, buffer, buffer_size),
- code_targets_(100),
- positions_recorder_(this) {
- // Clear the buffer in debug mode unless it was provided by the
- // caller in which case we can't be sure it's okay to overwrite
- // existing code in it.
+ : AssemblerBase(isolate, buffer, buffer_size), code_targets_(100) {
+// Clear the buffer in debug mode unless it was provided by the
+// caller in which case we can't be sure it's okay to overwrite
+// existing code in it.
#ifdef DEBUG
if (own_buffer_) {
memset(buffer_, 0xCC, buffer_size_); // int3
@@ -271,18 +292,12 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
#endif
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
-
-
-#ifdef GENERATED_CODE_COVERAGE
- InitCoverageLog();
-#endif
}
void Assembler::GetCode(CodeDesc* desc) {
// Finalize code (at this point overflow() may be true, but the gap ensures
// that we are still not overlapping instructions and relocation info).
- reloc_info_writer.Finish();
DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
// Set up code descriptor.
desc->buffer = buffer_;
@@ -293,6 +308,8 @@ void Assembler::GetCode(CodeDesc* desc) {
static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
desc->origin = this;
desc->constant_pool_size = 0;
+ desc->unwinding_info_size = 0;
+ desc->unwinding_info = nullptr;
}
@@ -560,17 +577,17 @@ void Assembler::immediate_arithmetic_op(byte subcode,
int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, size);
- if (is_int8(src.value_)) {
+ if (is_int8(src.value_) && RelocInfo::IsNone(src.rmode_)) {
emit(0x83);
emit_modrm(subcode, dst);
emit(src.value_);
} else if (dst.is(rax)) {
emit(0x05 | (subcode << 3));
- emitl(src.value_);
+ emit(src);
} else {
emit(0x81);
emit_modrm(subcode, dst);
- emitl(src.value_);
+ emit(src);
}
}
@@ -583,11 +600,14 @@ void Assembler::immediate_arithmetic_op(byte subcode,
if (is_int8(src.value_)) {
emit(0x83);
emit_operand(subcode, dst);
+ if (!RelocInfo::IsNone(src.rmode_)) {
+ RecordRelocInfo(src.rmode_);
+ }
emit(src.value_);
} else {
emit(0x81);
emit_operand(subcode, dst);
- emitl(src.value_);
+ emit(src);
}
}
@@ -803,7 +823,6 @@ void Assembler::bsfq(Register dst, const Operand& src) {
void Assembler::call(Label* L) {
- positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
// 1110 1000 #32-bit disp.
emit(0xE8);
@@ -825,7 +844,6 @@ void Assembler::call(Label* L) {
void Assembler::call(Address entry, RelocInfo::Mode rmode) {
DCHECK(RelocInfo::IsRuntimeEntry(rmode));
- positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
// 1110 1000 #32-bit disp.
emit(0xE8);
@@ -836,7 +854,6 @@ void Assembler::call(Address entry, RelocInfo::Mode rmode) {
void Assembler::call(Handle<Code> target,
RelocInfo::Mode rmode,
TypeFeedbackId ast_id) {
- positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
// 1110 1000 #32-bit disp.
emit(0xE8);
@@ -845,7 +862,6 @@ void Assembler::call(Handle<Code> target,
void Assembler::call(Register adr) {
- positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
// Opcode: FF /2 r64.
emit_optional_rex_32(adr);
@@ -855,7 +871,6 @@ void Assembler::call(Register adr) {
void Assembler::call(const Operand& op) {
- positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
// Opcode: FF /2 m64.
emit_optional_rex_32(op);
@@ -869,7 +884,6 @@ void Assembler::call(const Operand& op) {
// same Code object. Should not be used when generating new code (use labels),
// but only when patching existing code.
void Assembler::call(Address target) {
- positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
// 1110 1000 #32-bit disp.
emit(0xE8);
@@ -971,6 +985,40 @@ void Assembler::cmpb_al(Immediate imm8) {
emit(imm8.value_);
}
+void Assembler::lock() {
+ EnsureSpace ensure_space(this);
+ emit(0xf0);
+}
+
+void Assembler::cmpxchgb(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ if (!src.is_byte_register()) {
+ // Register is not one of al, bl, cl, dl. Its encoding needs REX.
+ emit_rex_32(src, dst);
+ } else {
+ emit_optional_rex_32(src, dst);
+ }
+ emit(0x0f);
+ emit(0xb0);
+ emit_operand(src, dst);
+}
+
+void Assembler::cmpxchgw(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(src, dst);
+ emit(0x0f);
+ emit(0xb1);
+ emit_operand(src, dst);
+}
+
+void Assembler::emit_cmpxchg(const Operand& dst, Register src, int size) {
+ EnsureSpace ensure_space(this);
+ emit_rex(src, dst, size);
+ emit(0x0f);
+ emit(0xb1);
+ emit_operand(src, dst);
+}
void Assembler::cpuid() {
EnsureSpace ensure_space(this);
@@ -1484,7 +1532,6 @@ void Assembler::movq(Register dst, uint64_t value, RelocInfo::Mode rmode) {
movq(dst, static_cast<int64_t>(value), rmode);
}
-
// Loads the ip-relative location of the src label into the target location
// (as a 32-bit offset sign extended to 64-bit).
void Assembler::movl(const Operand& dst, Label* src) {
@@ -1539,6 +1586,13 @@ void Assembler::movsxbq(Register dst, const Operand& src) {
emit_operand(dst, src);
}
+void Assembler::movsxbq(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0xBE);
+ emit_modrm(dst, src);
+}
void Assembler::movsxwl(Register dst, Register src) {
EnsureSpace ensure_space(this);
@@ -1566,6 +1620,13 @@ void Assembler::movsxwq(Register dst, const Operand& src) {
emit_operand(dst, src);
}
+void Assembler::movsxwq(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0xBF);
+ emit_modrm(dst, src);
+}
void Assembler::movsxlq(Register dst, Register src) {
EnsureSpace ensure_space(this);
@@ -1909,6 +1970,25 @@ void Assembler::shrd(Register dst, Register src) {
emit_modrm(src, dst);
}
+void Assembler::xchgb(Register reg, const Operand& op) {
+ EnsureSpace ensure_space(this);
+ if (!reg.is_byte_register()) {
+ // Register is not one of al, bl, cl, dl. Its encoding needs REX.
+ emit_rex_32(reg, op);
+ } else {
+ emit_optional_rex_32(reg, op);
+ }
+ emit(0x86);
+ emit_operand(reg, op);
+}
+
+void Assembler::xchgw(Register reg, const Operand& op) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(reg, op);
+ emit(0x87);
+ emit_operand(reg, op);
+}
void Assembler::emit_xchg(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
@@ -2032,14 +2112,14 @@ void Assembler::testw(Register reg, Immediate mask) {
emit(0x66);
if (reg.is(rax)) {
emit(0xA9);
- emit(mask.value_);
+ emitw(mask.value_);
} else {
if (reg.low_bits() == 4) {
emit_rex_32(reg);
}
emit(0xF7);
emit_modrm(0x0, reg);
- emit(mask.value_);
+ emitw(mask.value_);
}
}
@@ -2050,7 +2130,7 @@ void Assembler::testw(const Operand& op, Immediate mask) {
emit_optional_rex_32(rax, op);
emit(0xF7);
emit_operand(rax, op);
- emit(mask.value_);
+ emitw(mask.value_);
}
void Assembler::testw(const Operand& op, Register reg) {
@@ -2793,6 +2873,18 @@ void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
emit(imm8);
}
+void Assembler::insertps(XMMRegister dst, XMMRegister src, byte imm8) {
+ DCHECK(CpuFeatures::IsSupported(SSE4_1));
+ DCHECK(is_uint8(imm8));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x3A);
+ emit(0x21);
+ emit_sse_operand(dst, src);
+ emit(imm8);
+}
void Assembler::movsd(const Operand& dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
@@ -2876,6 +2968,24 @@ void Assembler::movapd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::movupd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x10);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::movupd(const Operand& dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(src, dst);
+ emit(0x0F);
+ emit(0x11);
+ emit_sse_operand(src, dst);
+}
+
void Assembler::addss(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -3114,6 +3224,43 @@ void Assembler::psrld(XMMRegister reg, byte imm8) {
emit(imm8);
}
+void Assembler::cmpps(XMMRegister dst, XMMRegister src, int8_t cmp) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xC2);
+ emit_sse_operand(dst, src);
+ emit(cmp);
+}
+
+void Assembler::cmpps(XMMRegister dst, const Operand& src, int8_t cmp) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xC2);
+ emit_sse_operand(dst, src);
+ emit(cmp);
+}
+
+void Assembler::cmppd(XMMRegister dst, XMMRegister src, int8_t cmp) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x66);
+ emit(0x0F);
+ emit(0xC2);
+ emit_sse_operand(dst, src);
+ emit(cmp);
+}
+
+void Assembler::cmppd(XMMRegister dst, const Operand& src, int8_t cmp) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x66);
+ emit(0x0F);
+ emit(0xC2);
+ emit_sse_operand(dst, src);
+ emit(cmp);
+}
void Assembler::cvttss2si(Register dst, const Operand& src) {
DCHECK(!IsEnabled(AVX));
@@ -3486,6 +3633,16 @@ void Assembler::andpd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::andpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x54);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::orpd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0x66);
@@ -3496,6 +3653,16 @@ void Assembler::orpd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::orpd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x56);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3507,6 +3674,17 @@ void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::xorpd(XMMRegister dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x57);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3631,6 +3809,14 @@ void Assembler::punpckldq(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
+void Assembler::punpckldq(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x62);
+ emit_sse_operand(dst, src);
+}
void Assembler::punpckhdq(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
@@ -4095,6 +4281,246 @@ void Assembler::rorxl(Register dst, const Operand& src, byte imm8) {
emit(imm8);
}
+void Assembler::minps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5D);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::minps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5D);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::maxps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5F);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::maxps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5F);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::rcpps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x53);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::rcpps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x53);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::rsqrtps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x52);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::rsqrtps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x52);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::sqrtps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x51);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::sqrtps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x51);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5B);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::cvtdq2ps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5B);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::movups(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ if (src.low_bits() == 4) {
+ // Try to avoid an unnecessary SIB byte.
+ emit_optional_rex_32(src, dst);
+ emit(0x0F);
+ emit(0x11);
+ emit_sse_operand(src, dst);
+ } else {
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x10);
+ emit_sse_operand(dst, src);
+ }
+}
+
+void Assembler::movups(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x10);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::movups(const Operand& dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(src, dst);
+ emit(0x0F);
+ emit(0x11);
+ emit_sse_operand(src, dst);
+}
+
+void Assembler::paddd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xFE);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::paddd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xFE);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::psubd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xFA);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::psubd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xFA);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::pmulld(XMMRegister dst, XMMRegister src) {
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x38);
+ emit(0x40);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::pmulld(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x38);
+ emit(0x40);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::pmuludq(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xF4);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::pmuludq(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xF4);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::psrldq(XMMRegister dst, uint8_t shift) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst);
+ emit(0x0F);
+ emit(0x73);
+ emit_sse_operand(dst);
+ emit(shift);
+}
+
+void Assembler::cvtps2dq(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5B);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::cvtps2dq(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5B);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x70);
+ emit_sse_operand(dst, src);
+ emit(shuffle);
+}
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() };
@@ -4122,6 +4548,10 @@ void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
}
+void Assembler::emit_sse_operand(XMMRegister dst) {
+ emit(0xD8 | dst.low_bits());
+}
+
void Assembler::db(uint8_t data) {
EnsureSpace ensure_space(this);
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index e48f3586d3..b2154fbaf4 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -117,8 +117,6 @@ struct Register {
Register r = {code};
return r;
}
- const char* ToString();
- bool IsAllocatable() const;
bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
bool is(Register reg) const { return reg_code == reg.reg_code; }
int code() const {
@@ -183,7 +181,11 @@ const Register arg_reg_4 = {Register::kCode_rcx};
V(xmm14) \
V(xmm15)
+#define FLOAT_REGISTERS DOUBLE_REGISTERS
+#define SIMD128_REGISTERS DOUBLE_REGISTERS
+
#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(xmm0) \
V(xmm1) \
V(xmm2) \
V(xmm3) \
@@ -197,11 +199,11 @@ const Register arg_reg_4 = {Register::kCode_rcx};
V(xmm11) \
V(xmm12) \
V(xmm13) \
- V(xmm14) \
- V(xmm15)
+ V(xmm14)
+static const bool kSimpleFPAliasing = true;
-struct DoubleRegister {
+struct XMMRegister {
enum Code {
#define REGISTER_CODE(R) kCode_##R,
DOUBLE_REGISTERS(REGISTER_CODE)
@@ -212,15 +214,13 @@ struct DoubleRegister {
static const int kMaxNumRegisters = Code::kAfterLast;
- static DoubleRegister from_code(int code) {
- DoubleRegister result = {code};
+ static XMMRegister from_code(int code) {
+ XMMRegister result = {code};
return result;
}
- const char* ToString();
- bool IsAllocatable() const;
bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
- bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
+ bool is(XMMRegister reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
return reg_code;
@@ -238,6 +238,11 @@ struct DoubleRegister {
int reg_code;
};
+typedef XMMRegister FloatRegister;
+
+typedef XMMRegister DoubleRegister;
+
+typedef XMMRegister Simd128Register;
#define DECLARE_REGISTER(R) \
const DoubleRegister R = {DoubleRegister::kCode_##R};
@@ -245,11 +250,6 @@ DOUBLE_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
const DoubleRegister no_double_reg = {DoubleRegister::kCode_no_reg};
-
-typedef DoubleRegister XMMRegister;
-
-typedef DoubleRegister Simd128Register;
-
enum Condition {
// any value < 0 is considered no_condition
no_condition = -1,
@@ -334,6 +334,8 @@ enum RoundingMode {
class Immediate BASE_EMBEDDED {
public:
explicit Immediate(int32_t value) : value_(value) {}
+ explicit Immediate(int32_t value, RelocInfo::Mode rmode)
+ : value_(value), rmode_(rmode) {}
explicit Immediate(Smi* value) {
DCHECK(SmiValuesAre31Bits()); // Only available for 31-bit SMI.
value_ = static_cast<int32_t>(reinterpret_cast<intptr_t>(value));
@@ -341,6 +343,7 @@ class Immediate BASE_EMBEDDED {
private:
int32_t value_;
+ RelocInfo::Mode rmode_ = RelocInfo::NONE32;
friend class Assembler;
};
@@ -417,11 +420,11 @@ class Operand BASE_EMBEDDED {
friend class Assembler;
};
-
#define ASSEMBLER_INSTRUCTION_LIST(V) \
V(add) \
V(and) \
V(cmp) \
+ V(cmpxchg) \
V(dec) \
V(idiv) \
V(div) \
@@ -441,7 +444,6 @@ class Operand BASE_EMBEDDED {
V(xchg) \
V(xor)
-
// Shift instructions on operands/registers with kPointerSize, kInt32Size and
// kInt64Size.
#define SHIFT_INSTRUCTION_LIST(V) \
@@ -706,9 +708,11 @@ class Assembler : public AssemblerBase {
void movsxbl(Register dst, Register src);
void movsxbl(Register dst, const Operand& src);
+ void movsxbq(Register dst, Register src);
void movsxbq(Register dst, const Operand& src);
void movsxwl(Register dst, Register src);
void movsxwl(Register dst, const Operand& src);
+ void movsxwq(Register dst, Register src);
void movsxwq(Register dst, const Operand& src);
void movsxlq(Register dst, Register src);
void movsxlq(Register dst, const Operand& src);
@@ -784,6 +788,15 @@ class Assembler : public AssemblerBase {
void decb(Register dst);
void decb(const Operand& dst);
+ // Lock prefix.
+ void lock();
+
+ void xchgb(Register reg, const Operand& op);
+ void xchgw(Register reg, const Operand& op);
+
+ void cmpxchgb(const Operand& dst, Register src);
+ void cmpxchgw(const Operand& dst, Register src);
+
// Sign-extends rax into rdx:rax.
void cqo();
// Sign-extends eax into edx:eax.
@@ -1083,6 +1096,8 @@ class Assembler : public AssemblerBase {
void movdqu(XMMRegister dst, const Operand& src);
void movapd(XMMRegister dst, XMMRegister src);
+ void movupd(XMMRegister dst, const Operand& src);
+ void movupd(const Operand& dst, XMMRegister src);
void psllq(XMMRegister reg, byte imm8);
void psrlq(XMMRegister reg, byte imm8);
@@ -1129,8 +1144,11 @@ class Assembler : public AssemblerBase {
void minsd(XMMRegister dst, const Operand& src);
void andpd(XMMRegister dst, XMMRegister src);
+ void andpd(XMMRegister dst, const Operand& src);
void orpd(XMMRegister dst, XMMRegister src);
+ void orpd(XMMRegister dst, const Operand& src);
void xorpd(XMMRegister dst, XMMRegister src);
+ void xorpd(XMMRegister dst, const Operand& src);
void sqrtsd(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, const Operand& src);
@@ -1142,19 +1160,69 @@ class Assembler : public AssemblerBase {
void movmskpd(Register dst, XMMRegister src);
void punpckldq(XMMRegister dst, XMMRegister src);
+ void punpckldq(XMMRegister dst, const Operand& src);
void punpckhdq(XMMRegister dst, XMMRegister src);
// SSE 4.1 instruction
+ void insertps(XMMRegister dst, XMMRegister src, byte imm8);
void extractps(Register dst, XMMRegister src, byte imm8);
-
void pextrd(Register dst, XMMRegister src, int8_t imm8);
-
void pinsrd(XMMRegister dst, Register src, int8_t imm8);
void pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+ void cmpps(XMMRegister dst, XMMRegister src, int8_t cmp);
+ void cmpps(XMMRegister dst, const Operand& src, int8_t cmp);
+ void cmppd(XMMRegister dst, XMMRegister src, int8_t cmp);
+ void cmppd(XMMRegister dst, const Operand& src, int8_t cmp);
+
+#define SSE_CMP_P(instr, imm8) \
+ void instr##ps(XMMRegister dst, XMMRegister src) { cmpps(dst, src, imm8); } \
+ void instr##ps(XMMRegister dst, const Operand& src) { \
+ cmpps(dst, src, imm8); \
+ } \
+ void instr##pd(XMMRegister dst, XMMRegister src) { cmppd(dst, src, imm8); } \
+ void instr##pd(XMMRegister dst, const Operand& src) { cmppd(dst, src, imm8); }
+
+ SSE_CMP_P(cmpeq, 0x0);
+ SSE_CMP_P(cmplt, 0x1);
+ SSE_CMP_P(cmple, 0x2);
+ SSE_CMP_P(cmpneq, 0x4);
+ SSE_CMP_P(cmpnlt, 0x5);
+ SSE_CMP_P(cmpnle, 0x6);
+
+#undef SSE_CMP_P
+
+ void minps(XMMRegister dst, XMMRegister src);
+ void minps(XMMRegister dst, const Operand& src);
+ void maxps(XMMRegister dst, XMMRegister src);
+ void maxps(XMMRegister dst, const Operand& src);
+ void rcpps(XMMRegister dst, XMMRegister src);
+ void rcpps(XMMRegister dst, const Operand& src);
+ void rsqrtps(XMMRegister dst, XMMRegister src);
+ void rsqrtps(XMMRegister dst, const Operand& src);
+ void sqrtps(XMMRegister dst, XMMRegister src);
+ void sqrtps(XMMRegister dst, const Operand& src);
+ void movups(XMMRegister dst, XMMRegister src);
+ void movups(XMMRegister dst, const Operand& src);
+ void movups(const Operand& dst, XMMRegister src);
+ void paddd(XMMRegister dst, XMMRegister src);
+ void paddd(XMMRegister dst, const Operand& src);
+ void psubd(XMMRegister dst, XMMRegister src);
+ void psubd(XMMRegister dst, const Operand& src);
+ void pmulld(XMMRegister dst, XMMRegister src);
+ void pmulld(XMMRegister dst, const Operand& src);
+ void pmuludq(XMMRegister dst, XMMRegister src);
+ void pmuludq(XMMRegister dst, const Operand& src);
+ void psrldq(XMMRegister dst, uint8_t shift);
+ void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle);
+ void cvtps2dq(XMMRegister dst, XMMRegister src);
+ void cvtps2dq(XMMRegister dst, const Operand& src);
+ void cvtdq2ps(XMMRegister dst, XMMRegister src);
+ void cvtdq2ps(XMMRegister dst, const Operand& src);
+
// AVX instruction
void vfmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmasd(0x99, dst, src1, src2);
@@ -1484,11 +1552,69 @@ class Assembler : public AssemblerBase {
void vss(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
void vmovaps(XMMRegister dst, XMMRegister src) { vps(0x28, dst, xmm0, src); }
+ void vmovups(XMMRegister dst, XMMRegister src) { vps(0x10, dst, xmm0, src); }
+ void vmovups(XMMRegister dst, const Operand& src) {
+ vps(0x10, dst, xmm0, src);
+ }
+ void vmovups(const Operand& dst, XMMRegister src) {
+ vps(0x11, src, xmm0, dst);
+ }
void vmovapd(XMMRegister dst, XMMRegister src) { vpd(0x28, dst, xmm0, src); }
+ void vmovupd(XMMRegister dst, const Operand& src) {
+ vpd(0x10, dst, xmm0, src);
+ }
+ void vmovupd(const Operand& dst, XMMRegister src) {
+ vpd(0x11, src, xmm0, dst);
+ }
+ void vmovmskps(Register dst, XMMRegister src) {
+ XMMRegister idst = {dst.code()};
+ vps(0x50, idst, xmm0, src);
+ }
void vmovmskpd(Register dst, XMMRegister src) {
XMMRegister idst = {dst.code()};
vpd(0x50, idst, xmm0, src);
}
+ void vcmpps(XMMRegister dst, XMMRegister src1, XMMRegister src2, int8_t cmp) {
+ vps(0xC2, dst, src1, src2);
+ emit(cmp);
+ }
+ void vcmpps(XMMRegister dst, XMMRegister src1, const Operand& src2,
+ int8_t cmp) {
+ vps(0xC2, dst, src1, src2);
+ emit(cmp);
+ }
+ void vcmppd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int8_t cmp) {
+ vpd(0xC2, dst, src1, src2);
+ emit(cmp);
+ }
+ void vcmppd(XMMRegister dst, XMMRegister src1, const Operand& src2,
+ int8_t cmp) {
+ vpd(0xC2, dst, src1, src2);
+ emit(cmp);
+ }
+
+#define AVX_CMP_P(instr, imm8) \
+ void instr##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
+ vcmpps(dst, src1, src2, imm8); \
+ } \
+ void instr##ps(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
+ vcmpps(dst, src1, src2, imm8); \
+ } \
+ void instr##pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
+ vcmppd(dst, src1, src2, imm8); \
+ } \
+ void instr##pd(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
+ vcmppd(dst, src1, src2, imm8); \
+ }
+
+ AVX_CMP_P(vcmpeq, 0x0);
+ AVX_CMP_P(vcmplt, 0x1);
+ AVX_CMP_P(vcmple, 0x2);
+ AVX_CMP_P(vcmpneq, 0x4);
+ AVX_CMP_P(vcmpnlt, 0x5);
+ AVX_CMP_P(vcmpnle, 0x6);
+
+#undef AVX_CMP_P
void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
void vps(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
@@ -1689,7 +1815,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, int raw_position);
+ void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
ConstantPoolEntry::Access access,
@@ -1706,10 +1832,6 @@ class Assembler : public AssemblerBase {
void dp(uintptr_t data) { dq(data); }
void dq(Label* label);
- AssemblerPositionsRecorder* positions_recorder() {
- return &positions_recorder_;
- }
-
// Check if there is less than kGap bytes available in the buffer.
// If this is the case, we need to grow the buffer before emitting
// an instruction or relocation information.
@@ -1755,7 +1877,12 @@ class Assembler : public AssemblerBase {
RelocInfo::Mode rmode,
TypeFeedbackId ast_id = TypeFeedbackId::None());
inline void emit_runtime_entry(Address entry, RelocInfo::Mode rmode);
- void emit(Immediate x) { emitl(x.value_); }
+ void emit(Immediate x) {
+ if (!RelocInfo::IsNone(x.rmode_)) {
+ RecordRelocInfo(x.rmode_);
+ }
+ emitl(x.value_);
+ }
// Emits a REX prefix that encodes a 64-bit operand size and
// the top bit of both register codes.
@@ -1926,6 +2053,7 @@ class Assembler : public AssemblerBase {
void emit_sse_operand(Register reg, const Operand& adr);
void emit_sse_operand(XMMRegister dst, Register src);
void emit_sse_operand(Register dst, XMMRegister src);
+ void emit_sse_operand(XMMRegister dst);
// Emit machine code for one of the operations ADD, ADC, SUB, SBC,
// AND, OR, XOR, or CMP. The encodings of these operations are all
@@ -2042,6 +2170,11 @@ class Assembler : public AssemblerBase {
immediate_arithmetic_op(0x7, dst, src, size);
}
+ // Compare {al,ax,eax,rax} with src. If equal, set ZF and write dst into
+ // src. Otherwise clear ZF and write src into {al,ax,eax,rax}. This
+ // operation is only atomic if prefixed by the lock instruction.
+ void emit_cmpxchg(const Operand& dst, Register src, int size);
+
void emit_dec(Register dst, int size);
void emit_dec(const Operand& dst, int size);
@@ -2190,9 +2323,6 @@ class Assembler : public AssemblerBase {
std::deque<int> internal_reference_positions_;
List< Handle<Code> > code_targets_;
-
- AssemblerPositionsRecorder positions_recorder_;
- friend class AssemblerPositionsRecorder;
};
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index e737801f58..4b5165a0be 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -20,60 +20,16 @@
namespace v8 {
namespace internal {
+#define __ ACCESS_MASM(masm)
-static void InitializeArrayConstructorDescriptor(
- Isolate* isolate, CodeStubDescriptor* descriptor,
- int constant_stack_parameter_count) {
- Address deopt_handler = Runtime::FunctionForId(
- Runtime::kArrayConstructor)->entry;
-
- if (constant_stack_parameter_count == 0) {
- descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- } else {
- descriptor->Initialize(rax, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- }
-}
-
-
-static void InitializeInternalArrayConstructorDescriptor(
- Isolate* isolate, CodeStubDescriptor* descriptor,
- int constant_stack_parameter_count) {
- Address deopt_handler = Runtime::FunctionForId(
- Runtime::kInternalArrayConstructor)->entry;
-
- if (constant_stack_parameter_count == 0) {
- descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- } else {
- descriptor->Initialize(rax, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- }
-}
-
-
-void ArrayNoArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
-
-void ArraySingleArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
-}
-
-
-void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
+void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
+ __ popq(rcx);
+ __ movq(MemOperand(rsp, rax, times_8, 0), rdi);
+ __ pushq(rdi);
+ __ pushq(rbx);
+ __ pushq(rcx);
+ __ addq(rax, Immediate(3));
+ __ TailCallRuntime(Runtime::kNewArray);
}
void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
@@ -81,21 +37,12 @@ void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize(rax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
}
-void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+void FastFunctionBindStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
-}
-
-
-void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
+ Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
+ descriptor->Initialize(rax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
}
-
-#define __ ACCESS_MASM(masm)
-
-
void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
ExternalReference miss) {
// Update the static counter each time a new code stub is generated.
@@ -185,7 +132,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
bool stash_exponent_copy = !input_reg.is(rsp);
__ movl(scratch1, mantissa_operand);
- __ Movsd(xmm0, mantissa_operand);
+ __ Movsd(kScratchDoubleReg, mantissa_operand);
__ movl(rcx, exponent_operand);
if (stash_exponent_copy) __ pushq(rcx);
@@ -205,7 +152,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
__ jmp(&check_negative);
__ bind(&process_64_bits);
- __ Cvttsd2siq(result_reg, xmm0);
+ __ Cvttsd2siq(result_reg, kScratchDoubleReg);
__ jmp(&done, Label::kNear);
// If the double was negative, negate the integer result.
@@ -267,7 +214,6 @@ void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
void MathPowStub::Generate(MacroAssembler* masm) {
const Register exponent = MathPowTaggedDescriptor::exponent();
DCHECK(exponent.is(rdx));
- const Register base = rax;
const Register scratch = rcx;
const XMMRegister double_result = xmm3;
const XMMRegister double_base = xmm2;
@@ -280,37 +226,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ movp(scratch, Immediate(1));
__ Cvtlsi2sd(double_result, scratch);
- if (exponent_type() == ON_STACK) {
- Label base_is_smi, unpack_exponent;
- // The exponent and base are supplied as arguments on the stack.
- // This can only happen if the stub is called from non-optimized code.
- // Load input parameters from stack.
- StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movp(base, args.GetArgumentOperand(0));
- __ movp(exponent, args.GetArgumentOperand(1));
- __ JumpIfSmi(base, &base_is_smi, Label::kNear);
- __ CompareRoot(FieldOperand(base, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_runtime);
-
- __ Movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
- __ jmp(&unpack_exponent, Label::kNear);
-
- __ bind(&base_is_smi);
- __ SmiToInteger32(base, base);
- __ Cvtlsi2sd(double_base, base);
- __ bind(&unpack_exponent);
-
- __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
- __ SmiToInteger32(exponent, exponent);
- __ jmp(&int_exponent);
-
- __ bind(&exponent_not_smi);
- __ CompareRoot(FieldOperand(exponent, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_runtime);
- __ Movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
- } else if (exponent_type() == TAGGED) {
+ if (exponent_type() == TAGGED) {
__ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
__ SmiToInteger32(exponent, exponent);
__ jmp(&int_exponent);
@@ -334,76 +250,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ cmpl(exponent, Immediate(0x1));
__ j(overflow, &call_runtime);
- if (exponent_type() == ON_STACK) {
- // Detect square root case. Crankshaft detects constant +/-0.5 at
- // compile time and uses DoMathPowHalf instead. We then skip this check
- // for non-constant cases of +/-0.5 as these hardly occur.
- Label continue_sqrt, continue_rsqrt, not_plus_half;
- // Test for 0.5.
- // Load double_scratch with 0.5.
- __ movq(scratch, V8_UINT64_C(0x3FE0000000000000));
- __ Movq(double_scratch, scratch);
- // Already ruled out NaNs for exponent.
- __ Ucomisd(double_scratch, double_exponent);
- __ j(not_equal, &not_plus_half, Label::kNear);
-
- // Calculates square root of base. Check for the special case of
- // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
- // According to IEEE-754, double-precision -Infinity has the highest
- // 12 bits set and the lowest 52 bits cleared.
- __ movq(scratch, V8_UINT64_C(0xFFF0000000000000));
- __ Movq(double_scratch, scratch);
- __ Ucomisd(double_scratch, double_base);
- // Comparing -Infinity with NaN results in "unordered", which sets the
- // zero flag as if both were equal. However, it also sets the carry flag.
- __ j(not_equal, &continue_sqrt, Label::kNear);
- __ j(carry, &continue_sqrt, Label::kNear);
-
- // Set result to Infinity in the special case.
- __ Xorpd(double_result, double_result);
- __ Subsd(double_result, double_scratch);
- __ jmp(&done);
-
- __ bind(&continue_sqrt);
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ Xorpd(double_scratch, double_scratch);
- __ Addsd(double_scratch, double_base); // Convert -0 to 0.
- __ Sqrtsd(double_result, double_scratch);
- __ jmp(&done);
-
- // Test for -0.5.
- __ bind(&not_plus_half);
- // Load double_scratch with -0.5 by substracting 1.
- __ Subsd(double_scratch, double_result);
- // Already ruled out NaNs for exponent.
- __ Ucomisd(double_scratch, double_exponent);
- __ j(not_equal, &fast_power, Label::kNear);
-
- // Calculates reciprocal of square root of base. Check for the special
- // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
- // According to IEEE-754, double-precision -Infinity has the highest
- // 12 bits set and the lowest 52 bits cleared.
- __ movq(scratch, V8_UINT64_C(0xFFF0000000000000));
- __ Movq(double_scratch, scratch);
- __ Ucomisd(double_scratch, double_base);
- // Comparing -Infinity with NaN results in "unordered", which sets the
- // zero flag as if both were equal. However, it also sets the carry flag.
- __ j(not_equal, &continue_rsqrt, Label::kNear);
- __ j(carry, &continue_rsqrt, Label::kNear);
-
- // Set result to 0 in the special case.
- __ Xorpd(double_result, double_result);
- __ jmp(&done);
-
- __ bind(&continue_rsqrt);
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ Xorpd(double_exponent, double_exponent);
- __ Addsd(double_exponent, double_base); // Convert -0 to +0.
- __ Sqrtsd(double_exponent, double_exponent);
- __ Divsd(double_result, double_exponent);
- __ jmp(&done);
- }
-
// Using FPU instructions to calculate power.
Label fast_power_failed;
__ bind(&fast_power);
@@ -492,34 +338,21 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ Cvtlsi2sd(double_exponent, exponent);
// Returning or bailing out.
- if (exponent_type() == ON_STACK) {
- // The arguments are still on the stack.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMathPowRT);
-
- // The stub is called from non-optimized code, which expects the result
- // as heap number in rax.
- __ bind(&done);
- __ AllocateHeapNumber(rax, rcx, &call_runtime);
- __ Movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result);
- __ ret(2 * kPointerSize);
- } else {
- __ bind(&call_runtime);
- // Move base to the correct argument register. Exponent is already in xmm1.
- __ Movsd(xmm0, double_base);
- DCHECK(double_exponent.is(xmm1));
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(2);
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()), 2);
- }
- // Return value is in xmm0.
- __ Movsd(double_result, xmm0);
-
- __ bind(&done);
- __ ret(0);
+ __ bind(&call_runtime);
+ // Move base to the correct argument register. Exponent is already in xmm1.
+ __ Movsd(xmm0, double_base);
+ DCHECK(double_exponent.is(xmm1));
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(2);
+ __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
+ 2);
}
+ // Return value is in xmm0.
+ __ Movsd(double_result, xmm0);
+
+ __ bind(&done);
+ __ ret(0);
}
@@ -558,7 +391,6 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
&miss, // When not a string.
&miss, // When not a number.
&miss, // When index out of range.
- STRING_INDEX_IS_ARRAY_INDEX,
RECEIVER_IS_STRING);
char_at_generator.GenerateFast(masm);
__ ret(0);
@@ -861,12 +693,12 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ leal(rdx, Operand(rax, rax, times_1, 2));
// rdx: Number of capture registers
- // Check that the fourth object is a JSArray object.
+ // Check that the fourth object is a JSObject.
__ movp(r15, args.GetArgumentOperand(LAST_MATCH_INFO_ARGUMENT_INDEX));
__ JumpIfSmi(r15, &runtime);
- __ CmpObjectType(r15, JS_ARRAY_TYPE, kScratchRegister);
+ __ CmpObjectType(r15, JS_OBJECT_TYPE, kScratchRegister);
__ j(not_equal, &runtime);
- // Check that the JSArray is in fast case.
+ // Check that the object has fast elements.
__ movp(rbx, FieldOperand(r15, JSArray::kElementsOffset));
__ movp(rax, FieldOperand(rbx, HeapObject::kMapOffset));
__ CompareRoot(rax, Heap::kFixedArrayMapRootIndex);
@@ -1334,9 +1166,11 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
__ Integer32ToSmi(rdx, rdx);
__ Push(rdx);
__ Push(rbx);
+ __ Push(rsi);
__ CallStub(stub);
+ __ Pop(rsi);
__ Pop(rbx);
__ Pop(rdx);
__ Pop(rdi);
@@ -1354,8 +1188,8 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// rdx : slot in feedback vector (Smi)
// rdi : the function to call
Isolate* isolate = masm->isolate();
- Label initialize, done, miss, megamorphic, not_array_function,
- done_no_smi_convert;
+ Label initialize, done, miss, megamorphic, not_array_function;
+ Label done_initialize_count, done_increment_count;
// Load the cache state into r11.
__ SmiToInteger32(rdx, rdx);
@@ -1369,7 +1203,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// type-feedback-vector.h).
Label check_allocation_site;
__ cmpp(rdi, FieldOperand(r11, WeakCell::kValueOffset));
- __ j(equal, &done, Label::kFar);
+ __ j(equal, &done_increment_count, Label::kFar);
__ CompareRoot(r11, Heap::kmegamorphic_symbolRootIndex);
__ j(equal, &done, Label::kFar);
__ CompareRoot(FieldOperand(r11, HeapObject::kMapOffset),
@@ -1393,7 +1227,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r11);
__ cmpp(rdi, r11);
__ j(not_equal, &megamorphic);
- __ jmp(&done);
+ __ jmp(&done_increment_count);
__ bind(&miss);
@@ -1419,17 +1253,29 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
CreateAllocationSiteStub create_stub(isolate);
CallStubInRecordCallTarget(masm, &create_stub);
- __ jmp(&done_no_smi_convert);
+ __ jmp(&done_initialize_count);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(isolate);
CallStubInRecordCallTarget(masm, &weak_cell_stub);
- __ jmp(&done_no_smi_convert);
+
+ __ bind(&done_initialize_count);
+ // Initialize the call counter.
+ __ SmiToInteger32(rdx, rdx);
+ __ Move(FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize),
+ Smi::FromInt(1));
+ __ jmp(&done);
+
+ __ bind(&done_increment_count);
+
+ // Increment the call count for monomorphic function calls.
+ __ SmiAddConstant(FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize),
+ Smi::FromInt(1));
__ bind(&done);
__ Integer32ToSmi(rdx, rdx);
-
- __ bind(&done_no_smi_convert);
}
@@ -1490,7 +1336,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// Increment the call count for monomorphic function calls.
__ SmiAddConstant(FieldOperand(rbx, rdx, times_pointer_size,
FixedArray::kHeaderSize + kPointerSize),
- Smi::FromInt(CallICNexus::kCallCountIncrement));
+ Smi::FromInt(1));
__ movp(rbx, rcx);
__ movp(rdx, rdi);
@@ -1540,7 +1386,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Increment the call count for monomorphic function calls.
__ SmiAddConstant(FieldOperand(rbx, rdx, times_pointer_size,
FixedArray::kHeaderSize + kPointerSize),
- Smi::FromInt(CallICNexus::kCallCountIncrement));
+ Smi::FromInt(1));
__ bind(&call_function);
__ Set(rax, argc);
@@ -1610,7 +1456,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Initialize the call counter.
__ Move(FieldOperand(rbx, rdx, times_pointer_size,
FixedArray::kHeaderSize + kPointerSize),
- Smi::FromInt(CallICNexus::kCallCountIncrement));
+ Smi::FromInt(1));
// Store the function. Use a stub since we need a frame for allocation.
// rbx - vector
@@ -1622,7 +1468,9 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Integer32ToSmi(rdx, rdx);
__ Push(rdi);
+ __ Push(rsi);
__ CallStub(&create_stub);
+ __ Pop(rsi);
__ Pop(rdi);
}
@@ -1667,13 +1515,12 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
// It is important that the store buffer overflow stubs are generated first.
- ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+ CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
- TypeofStub::GenerateAheadOfTime(isolate);
}
@@ -1731,11 +1578,14 @@ void CEntryStub::Generate(MacroAssembler* masm) {
(result_size() <= kMaxRegisterResultSize ? 0 : result_size());
if (argv_in_register()) {
DCHECK(!save_doubles());
+ DCHECK(!is_builtin_exit());
__ EnterApiExitFrame(arg_stack_space);
// Move argc into r14 (argv is already in r15).
__ movp(r14, rax);
} else {
- __ EnterExitFrame(arg_stack_space, save_doubles());
+ __ EnterExitFrame(
+ arg_stack_space, save_doubles(),
+ is_builtin_exit() ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
}
// rbx: pointer to builtin function (C callee-saved).
@@ -1939,10 +1789,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ bind(&invoke);
__ PushStackHandler();
- // Clear any pending exceptions.
- __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
- __ Store(pending_exception, rax);
-
// Fake a receiver (NULL).
__ Push(Immediate(0)); // receiver
@@ -2013,125 +1859,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
}
-void InstanceOfStub::Generate(MacroAssembler* masm) {
- Register const object = rdx; // Object (lhs).
- Register const function = rax; // Function (rhs).
- Register const object_map = rcx; // Map of {object}.
- Register const function_map = r8; // Map of {function}.
- Register const function_prototype = rdi; // Prototype of {function}.
-
- DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
- DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
-
- // Check if {object} is a smi.
- Label object_is_smi;
- __ JumpIfSmi(object, &object_is_smi, Label::kNear);
-
- // Lookup the {function} and the {object} map in the global instanceof cache.
- // Note: This is safe because we clear the global instanceof cache whenever
- // we change the prototype of any object.
- Label fast_case, slow_case;
- __ movp(object_map, FieldOperand(object, HeapObject::kMapOffset));
- __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ j(not_equal, &fast_case, Label::kNear);
- __ CompareRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
- __ j(not_equal, &fast_case, Label::kNear);
- __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(0);
-
- // If {object} is a smi we can safely return false if {function} is a JS
- // function, otherwise we have to miss to the runtime and throw an exception.
- __ bind(&object_is_smi);
- __ JumpIfSmi(function, &slow_case);
- __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
- __ j(not_equal, &slow_case);
- __ LoadRoot(rax, Heap::kFalseValueRootIndex);
- __ ret(0);
-
- // Fast-case: The {function} must be a valid JSFunction.
- __ bind(&fast_case);
- __ JumpIfSmi(function, &slow_case);
- __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
- __ j(not_equal, &slow_case);
-
- // Go to the runtime if the function is not a constructor.
- __ testb(FieldOperand(function_map, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsConstructor));
- __ j(zero, &slow_case);
-
- // Ensure that {function} has an instance prototype.
- __ testb(FieldOperand(function_map, Map::kBitFieldOffset),
- Immediate(1 << Map::kHasNonInstancePrototype));
- __ j(not_zero, &slow_case);
-
- // Get the "prototype" (or initial map) of the {function}.
- __ movp(function_prototype,
- FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- __ AssertNotSmi(function_prototype);
-
- // Resolve the prototype if the {function} has an initial map. Afterwards the
- // {function_prototype} will be either the JSReceiver prototype object or the
- // hole value, which means that no instances of the {function} were created so
- // far and hence we should return false.
- Label function_prototype_valid;
- Register const function_prototype_map = kScratchRegister;
- __ CmpObjectType(function_prototype, MAP_TYPE, function_prototype_map);
- __ j(not_equal, &function_prototype_valid, Label::kNear);
- __ movp(function_prototype,
- FieldOperand(function_prototype, Map::kPrototypeOffset));
- __ bind(&function_prototype_valid);
- __ AssertNotSmi(function_prototype);
-
- // Update the global instanceof cache with the current {object} map and
- // {function}. The cached answer will be set when it is known below.
- __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
-
- // Loop through the prototype chain looking for the {function} prototype.
- // Assume true, and change to false if not found.
- Label done, loop, fast_runtime_fallback;
- __ LoadRoot(rax, Heap::kTrueValueRootIndex);
- __ bind(&loop);
-
- __ testb(FieldOperand(object_map, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
- __ j(not_zero, &fast_runtime_fallback, Label::kNear);
- __ CmpInstanceType(object_map, JS_PROXY_TYPE);
- __ j(equal, &fast_runtime_fallback, Label::kNear);
-
- __ movp(object, FieldOperand(object_map, Map::kPrototypeOffset));
- __ cmpp(object, function_prototype);
- __ j(equal, &done, Label::kNear);
- __ CompareRoot(object, Heap::kNullValueRootIndex);
- __ movp(object_map, FieldOperand(object, HeapObject::kMapOffset));
- __ j(not_equal, &loop);
- __ LoadRoot(rax, Heap::kFalseValueRootIndex);
- __ bind(&done);
- __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(0);
-
- // Found Proxy or access check needed: Call the runtime.
- __ bind(&fast_runtime_fallback);
- __ PopReturnAddressTo(kScratchRegister);
- __ Push(object);
- __ Push(function_prototype);
- __ PushReturnAddressFrom(kScratchRegister);
- // Invalidate the instanceof cache.
- __ Move(rax, Smi::FromInt(0));
- __ StoreRoot(rax, Heap::kInstanceofCacheFunctionRootIndex);
- __ TailCallRuntime(Runtime::kHasInPrototypeChain);
-
- // Slow-case: Call the %InstanceOf runtime function.
- __ bind(&slow_case);
- __ PopReturnAddressTo(kScratchRegister);
- __ Push(object);
- __ Push(function);
- __ PushReturnAddressFrom(kScratchRegister);
- __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
- : Runtime::kInstanceOf);
-}
-
-
// -------------------------------------------------------------------------
// StringCharCodeAtGenerator
@@ -2186,13 +1913,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
}
__ Push(object_);
__ Push(index_); // Consumed by runtime conversion function.
- if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
- } else {
- DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
- // NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi);
- }
+ __ CallRuntime(Runtime::kNumberToSmi);
if (!index_.is(rax)) {
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
@@ -2513,78 +2234,12 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rcx: sub string length (smi)
// rdx: from index (smi)
StringCharAtGenerator generator(rax, rdx, rcx, rax, &runtime, &runtime,
- &runtime, STRING_INDEX_IS_NUMBER,
- RECEIVER_IS_STRING);
+ &runtime, RECEIVER_IS_STRING);
generator.GenerateFast(masm);
__ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
generator.SkipSlow(masm, &runtime);
}
-
-void ToNumberStub::Generate(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in rax.
- Label not_smi;
- __ JumpIfNotSmi(rax, &not_smi, Label::kNear);
- __ Ret();
- __ bind(&not_smi);
-
- Label not_heap_number;
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &not_heap_number, Label::kNear);
- __ Ret();
- __ bind(&not_heap_number);
-
- NonNumberToNumberStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
- // The NonNumberToNumber stub takes one argument in rax.
- __ AssertNotNumber(rax);
-
- Label not_string;
- __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdi);
- // rax: object
- // rdi: object map
- __ j(above_equal, &not_string, Label::kNear);
- StringToNumberStub stub(masm->isolate());
- __ TailCallStub(&stub);
- __ bind(&not_string);
-
- Label not_oddball;
- __ CmpInstanceType(rdi, ODDBALL_TYPE);
- __ j(not_equal, &not_oddball, Label::kNear);
- __ movp(rax, FieldOperand(rax, Oddball::kToNumberOffset));
- __ Ret();
- __ bind(&not_oddball);
-
- __ PopReturnAddressTo(rcx); // Pop return address.
- __ Push(rax); // Push argument.
- __ PushReturnAddressFrom(rcx); // Push return address.
- __ TailCallRuntime(Runtime::kToNumber);
-}
-
-void StringToNumberStub::Generate(MacroAssembler* masm) {
- // The StringToNumber stub takes one argument in rax.
- __ AssertString(rax);
-
- // Check if string has a cached array index.
- Label runtime;
- __ testl(FieldOperand(rax, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- __ j(not_zero, &runtime, Label::kNear);
- __ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
- __ IndexFromHash(rax, rax);
- __ Ret();
-
- __ bind(&runtime);
- __ PopReturnAddressTo(rcx); // Pop return address.
- __ Push(rax); // Push argument.
- __ PushReturnAddressFrom(rcx); // Push return address.
- __ TailCallRuntime(Runtime::kStringToNumber);
-}
-
void ToStringStub::Generate(MacroAssembler* masm) {
// The ToString stub takes one argument in rax.
Label is_number;
@@ -2619,7 +2274,6 @@ void ToStringStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kToString);
}
-
void ToNameStub::Generate(MacroAssembler* masm) {
// The ToName stub takes one argument in rax.
Label is_number;
@@ -2802,7 +2456,7 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// Load rcx with the allocation site. We stick an undefined dummy value here
// and replace it with the real allocation site later when we instantiate this
// stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
- __ Move(rcx, handle(isolate()->heap()->undefined_value()));
+ __ Move(rcx, isolate()->factory()->undefined_value());
// Make sure that we actually patched the allocation site.
if (FLAG_debug_code) {
@@ -3612,14 +3266,14 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- LoadICStub stub(isolate(), state());
+ LoadICStub stub(isolate());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- KeyedLoadICStub stub(isolate(), state());
+ KeyedLoadICStub stub(isolate());
stub.GenerateForTrampoline(masm);
}
@@ -3735,10 +3389,8 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ bind(&not_array);
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ j(not_equal, &miss);
- Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(
- masm, Code::LOAD_IC, code_flags, receiver, name, feedback, no_reg);
+ masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, name,
+ feedback, no_reg);
__ bind(&miss);
LoadIC::GenerateMiss(masm);
@@ -3818,37 +3470,30 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ jmp(&compare_map);
}
-
-void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
- VectorStoreICStub stub(isolate(), state());
+void StoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
+ StoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
-
-void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
- VectorKeyedStoreICStub stub(isolate(), state());
+void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
+ KeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
+void StoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-void VectorStoreICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-
-void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+void StoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
-
-void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // rdx
- Register key = VectorStoreICDescriptor::NameRegister(); // rcx
- Register vector = VectorStoreICDescriptor::VectorRegister(); // rbx
- Register slot = VectorStoreICDescriptor::SlotRegister(); // rdi
- DCHECK(VectorStoreICDescriptor::ValueRegister().is(rax)); // rax
+void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // rdx
+ Register key = StoreWithVectorDescriptor::NameRegister(); // rcx
+ Register vector = StoreWithVectorDescriptor::VectorRegister(); // rbx
+ Register slot = StoreWithVectorDescriptor::SlotRegister(); // rdi
+ DCHECK(StoreWithVectorDescriptor::ValueRegister().is(rax)); // rax
Register feedback = r8;
Register integer_slot = r9;
Register receiver_map = r11;
@@ -3877,10 +3522,8 @@ void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ j(not_equal, &miss);
- Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, code_flags,
- receiver, key, feedback, no_reg);
+ masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key,
+ feedback, no_reg);
__ bind(&miss);
StoreIC::GenerateMiss(masm);
@@ -3890,13 +3533,11 @@ void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ jmp(&compare_map);
}
-
-void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
+void KeyedStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
-
-void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
@@ -3953,13 +3594,12 @@ static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
__ jmp(miss);
}
-
-void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // rdx
- Register key = VectorStoreICDescriptor::NameRegister(); // rcx
- Register vector = VectorStoreICDescriptor::VectorRegister(); // rbx
- Register slot = VectorStoreICDescriptor::SlotRegister(); // rdi
- DCHECK(VectorStoreICDescriptor::ValueRegister().is(rax)); // rax
+void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // rdx
+ Register key = StoreWithVectorDescriptor::NameRegister(); // rcx
+ Register vector = StoreWithVectorDescriptor::VectorRegister(); // rbx
+ Register slot = StoreWithVectorDescriptor::SlotRegister(); // rdi
+ DCHECK(StoreWithVectorDescriptor::ValueRegister().is(rax)); // rax
Register feedback = r8;
Register integer_slot = r9;
Register receiver_map = r11;
@@ -4100,9 +3740,6 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// rdi - constructor?
// rsp[0] - return address
// rsp[8] - last argument
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
Label normal_sequence;
if (mode == DONT_OVERRIDE) {
@@ -4193,19 +3830,14 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
}
}
-
-void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
isolate);
ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
isolate);
- ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
- isolate);
-}
-
+ ArrayNArgumentsConstructorStub stub(isolate);
+ stub.GetCode();
-void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
- Isolate* isolate) {
ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
@@ -4213,8 +3845,6 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
stubh1.GetCode();
InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
stubh2.GetCode();
- InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
- stubh3.GetCode();
}
}
@@ -4234,13 +3864,15 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
CreateArrayDispatchOneArgument(masm, mode);
__ bind(&not_one_case);
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ ArrayNArgumentsConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
} else if (argument_count() == NONE) {
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
} else if (argument_count() == ONE) {
CreateArrayDispatchOneArgument(masm, mode);
} else if (argument_count() == MORE_THAN_ONE) {
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ ArrayNArgumentsConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
} else {
UNREACHABLE();
}
@@ -4359,7 +3991,7 @@ void InternalArrayConstructorStub::GenerateCase(
__ TailCallStub(&stub1);
__ bind(&not_one_case);
- InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+ ArrayNArgumentsConstructorStub stubN(isolate());
__ TailCallStub(&stubN);
}
@@ -4449,15 +4081,15 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
__ bind(&done_allocate);
// Initialize the JSObject fields.
- __ movp(Operand(rax, JSObject::kMapOffset), rcx);
+ __ movp(FieldOperand(rax, JSObject::kMapOffset), rcx);
__ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
- __ movp(Operand(rax, JSObject::kPropertiesOffset), rbx);
- __ movp(Operand(rax, JSObject::kElementsOffset), rbx);
+ __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
+ __ movp(FieldOperand(rax, JSObject::kElementsOffset), rbx);
STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ leap(rbx, Operand(rax, JSObject::kHeaderSize));
+ __ leap(rbx, FieldOperand(rax, JSObject::kHeaderSize));
// ----------- S t a t e -------------
- // -- rax : result (untagged)
+ // -- rax : result (tagged)
// -- rbx : result fields (untagged)
// -- rdi : result end (untagged)
// -- rcx : initial map
@@ -4475,10 +4107,6 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
{
// Initialize all in-object fields with undefined.
__ InitializeFieldsWithFiller(rbx, rdi, r11);
-
- // Add the object tag to make the JSObject real.
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ incp(rax);
__ Ret();
}
__ bind(&slack_tracking);
@@ -4498,10 +4126,6 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
__ LoadRoot(r11, Heap::kOnePointerFillerMapRootIndex);
__ InitializeFieldsWithFiller(rdx, rdi, r11);
- // Add the object tag to make the JSObject real.
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ incp(rax);
-
// Check if we can finalize the instance size.
Label finalize;
STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
@@ -4532,10 +4156,10 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ Pop(rcx);
}
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ decp(rax);
__ movzxbl(rbx, FieldOperand(rcx, Map::kInstanceSizeOffset));
__ leap(rdi, Operand(rax, rbx, times_pointer_size, 0));
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ decp(rdi); // Remove the tag from the end address.
__ jmp(&done_allocate);
// Fall back to %NewObject.
@@ -4557,19 +4181,19 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// -----------------------------------
__ AssertFunction(rdi);
- // For Ignition we need to skip all possible handler/stub frames until
- // we reach the JavaScript frame for the function (similar to what the
- // runtime fallback implementation does). So make rdx point to that
- // JavaScript frame.
- {
- Label loop, loop_entry;
- __ movp(rdx, rbp);
- __ jmp(&loop_entry, Label::kNear);
- __ bind(&loop);
+ // Make rdx point to the JavaScript frame.
+ __ movp(rdx, rbp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
__ movp(rdx, Operand(rdx, StandardFrameConstants::kCallerFPOffset));
- __ bind(&loop_entry);
+ }
+ if (FLAG_debug_code) {
+ Label ok;
__ cmpp(rdi, Operand(rdx, StandardFrameConstants::kFunctionOffset));
- __ j(not_equal, &loop);
+ __ j(equal, &ok);
+ __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+ __ bind(&ok);
}
// Check if we have rest parameters (only possible if we have an
@@ -4601,7 +4225,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// Allocate an empty rest parameter array.
Label allocate, done_allocate;
- __ Allocate(JSArray::kSize, rax, rdx, rcx, &allocate, TAG_OBJECT);
+ __ Allocate(JSArray::kSize, rax, rdx, rcx, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Setup the rest parameter array in rax.
@@ -4632,6 +4256,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
1 * kPointerSize));
// ----------- S t a t e -------------
+ // -- rdi : function
// -- rsi : context
// -- rax : number of rest parameters
// -- rbx : pointer to first rest parameters
@@ -4642,7 +4267,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
Label allocate, done_allocate;
__ leal(rcx, Operand(rax, times_pointer_size,
JSArray::kSize + FixedArray::kHeaderSize));
- __ Allocate(rcx, rdx, rdi, no_reg, &allocate, TAG_OBJECT);
+ __ Allocate(rcx, rdx, r8, no_reg, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Compute the arguments.length in rdi.
@@ -4680,8 +4305,11 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
__ Ret();
- // Fall back to %AllocateInNewSpace.
+ // Fall back to %AllocateInNewSpace (if not too big).
+ Label too_big_for_new_space;
__ bind(&allocate);
+ __ cmpl(rcx, Immediate(Page::kMaxRegularHeapObjectSize));
+ __ j(greater, &too_big_for_new_space);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Integer32ToSmi(rax, rax);
@@ -4696,6 +4324,13 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
__ SmiToInteger32(rax, rax);
}
__ jmp(&done_allocate);
+
+ // Fall back to %NewRestParameter.
+ __ bind(&too_big_for_new_space);
+ __ PopReturnAddressTo(kScratchRegister);
+ __ Push(rdi);
+ __ PushReturnAddressFrom(kScratchRegister);
+ __ TailCallRuntime(Runtime::kNewRestParameter);
}
}
@@ -4709,11 +4344,26 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
// -----------------------------------
__ AssertFunction(rdi);
+ // Make r9 point to the JavaScript frame.
+ __ movp(r9, rbp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
+ __ movp(r9, Operand(r9, StandardFrameConstants::kCallerFPOffset));
+ }
+ if (FLAG_debug_code) {
+ Label ok;
+ __ cmpp(rdi, Operand(r9, StandardFrameConstants::kFunctionOffset));
+ __ j(equal, &ok);
+ __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+ __ bind(&ok);
+ }
+
// TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
__ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ LoadSharedFunctionInfoSpecialField(
rcx, rcx, SharedFunctionInfo::kFormalParameterCountOffset);
- __ leap(rdx, Operand(rbp, rcx, times_pointer_size,
+ __ leap(rdx, Operand(r9, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
__ Integer32ToSmi(rcx, rcx);
@@ -4721,6 +4371,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
// rdx : parameters pointer
// rdi : function
// rsp[0] : return address
+ // r9 : JavaScript frame pointer.
// Registers used over the whole function:
// rbx: the mapped parameter count (untagged)
// rax: the allocated object (tagged).
@@ -4731,7 +4382,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
- __ movp(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rax, Operand(r9, StandardFrameConstants::kCallerFPOffset));
__ movp(r8, Operand(rax, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Cmp(r8, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adaptor_frame);
@@ -4774,7 +4425,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
__ addp(r8, Immediate(JSSloppyArgumentsObject::kSize));
// Do the allocation of all three objects in one go.
- __ Allocate(r8, rax, r9, no_reg, &runtime, TAG_OBJECT);
+ __ Allocate(r8, rax, r9, no_reg, &runtime, NO_ALLOCATION_FLAGS);
// rax = address of new object(s) (tagged)
// r11 = argument count (untagged)
@@ -4927,19 +4578,19 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// -----------------------------------
__ AssertFunction(rdi);
- // For Ignition we need to skip all possible handler/stub frames until
- // we reach the JavaScript frame for the function (similar to what the
- // runtime fallback implementation does). So make rdx point to that
- // JavaScript frame.
- {
- Label loop, loop_entry;
- __ movp(rdx, rbp);
- __ jmp(&loop_entry, Label::kNear);
- __ bind(&loop);
+ // Make rdx point to the JavaScript frame.
+ __ movp(rdx, rbp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
__ movp(rdx, Operand(rdx, StandardFrameConstants::kCallerFPOffset));
- __ bind(&loop_entry);
+ }
+ if (FLAG_debug_code) {
+ Label ok;
__ cmpp(rdi, Operand(rdx, StandardFrameConstants::kFunctionOffset));
- __ j(not_equal, &loop);
+ __ j(equal, &ok);
+ __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+ __ bind(&ok);
}
// Check if we have an arguments adaptor frame below the function frame.
@@ -4970,6 +4621,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : number of arguments
// -- rbx : pointer to the first argument
+ // -- rdi : function
// -- rsi : context
// -- rsp[0] : return address
// -----------------------------------
@@ -4978,7 +4630,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
Label allocate, done_allocate;
__ leal(rcx, Operand(rax, times_pointer_size, JSStrictArgumentsObject::kSize +
FixedArray::kHeaderSize));
- __ Allocate(rcx, rdx, rdi, no_reg, &allocate, TAG_OBJECT);
+ __ Allocate(rcx, rdx, r8, no_reg, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Compute the arguments.length in rdi.
@@ -5016,8 +4668,11 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
__ Ret();
- // Fall back to %AllocateInNewSpace.
+ // Fall back to %AllocateInNewSpace (if not too big).
+ Label too_big_for_new_space;
__ bind(&allocate);
+ __ cmpl(rcx, Immediate(Page::kMaxRegularHeapObjectSize));
+ __ j(greater, &too_big_for_new_space);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Integer32ToSmi(rax, rax);
@@ -5032,37 +4687,13 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ SmiToInteger32(rax, rax);
}
__ jmp(&done_allocate);
-}
-
-void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
- Register context_reg = rsi;
- Register slot_reg = rbx;
- Register result_reg = rax;
- Label slow_case;
-
- // Go up context chain to the script context.
- for (int i = 0; i < depth(); ++i) {
- __ movp(rdi, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
- context_reg = rdi;
- }
-
- // Load the PropertyCell value at the specified slot.
- __ movp(result_reg, ContextOperand(context_reg, slot_reg));
- __ movp(result_reg, FieldOperand(result_reg, PropertyCell::kValueOffset));
-
- // Check that value is not the_hole.
- __ CompareRoot(result_reg, Heap::kTheHoleValueRootIndex);
- __ j(equal, &slow_case, Label::kNear);
- __ Ret();
-
- // Fallback to the runtime.
- __ bind(&slow_case);
- __ Integer32ToSmi(slot_reg, slot_reg);
+ // Fall back to %NewStrictArguments.
+ __ bind(&too_big_for_new_space);
__ PopReturnAddressTo(kScratchRegister);
- __ Push(slot_reg);
- __ Push(kScratchRegister);
- __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
+ __ Push(rdi);
+ __ PushReturnAddressFrom(kScratchRegister);
+ __ TailCallRuntime(Runtime::kNewStrictArguments);
}
@@ -5404,10 +5035,14 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
STATIC_ASSERT(FCA::kIsolateIndex == 1);
STATIC_ASSERT(FCA::kHolderIndex == 0);
- STATIC_ASSERT(FCA::kArgsLength == 7);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 7);
+ STATIC_ASSERT(FCA::kArgsLength == 8);
__ PopReturnAddressTo(return_address);
+ // new target
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+
// context save
__ Push(context);
@@ -5441,7 +5076,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
- const int kApiStackSpace = 4;
+ const int kApiStackSpace = 3;
PrepareCallApiFunction(masm, kApiStackSpace);
@@ -5453,8 +5088,6 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
__ movp(StackSpaceOperand(1), scratch);
// FunctionCallbackInfo::length_.
__ Set(StackSpaceOperand(2), argc);
- // FunctionCallbackInfo::is_construct_call_.
- __ Set(StackSpaceOperand(3), 0);
#if defined(__MINGW64__) || defined(_WIN64)
Register arguments_arg = rcx;
@@ -5479,11 +5112,11 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
ARGUMENTS_DONT_CONTAIN_RECEIVER);
Operand context_restore_operand = args_from_rbp.GetArgumentOperand(
FCA::kArgsLength - FCA::kContextSaveIndex);
- Operand is_construct_call_operand = StackSpaceOperand(3);
+ Operand length_operand = StackSpaceOperand(2);
Operand return_value_operand = args_from_rbp.GetArgumentOperand(
this->is_store() ? 0 : FCA::kArgsLength - FCA::kReturnValueOffset);
int stack_space = 0;
- Operand* stack_space_operand = &is_construct_call_operand;
+ Operand* stack_space_operand = &length_operand;
stack_space = argc + FCA::kArgsLength + 1;
stack_space_operand = nullptr;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, callback_arg,
@@ -5493,14 +5126,6 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
void CallApiGetterStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] : name
- // -- rsp[16 .. (16 + kArgsLength*8)] : v8::PropertyCallbackInfo::args_
- // -- ...
- // -- r8 : api_function_address
- // -----------------------------------
-
#if defined(__MINGW64__) || defined(_WIN64)
Register getter_arg = r8;
Register accessor_info_arg = rdx;
@@ -5510,9 +5135,36 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
Register accessor_info_arg = rsi;
Register name_arg = rdi;
#endif
- Register api_function_address = ApiGetterDescriptor::function_address();
- DCHECK(api_function_address.is(r8));
+ Register api_function_address = r8;
+ Register receiver = ApiGetterDescriptor::ReceiverRegister();
+ Register holder = ApiGetterDescriptor::HolderRegister();
+ Register callback = ApiGetterDescriptor::CallbackRegister();
Register scratch = rax;
+ DCHECK(!AreAliased(receiver, holder, callback, scratch));
+
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+ // Insert additional parameters into the stack frame above return address.
+ __ PopReturnAddressTo(scratch);
+ __ Push(receiver);
+ __ Push(FieldOperand(callback, AccessorInfo::kDataOffset));
+ __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+ __ Push(kScratchRegister); // return value
+ __ Push(kScratchRegister); // return value default
+ __ PushAddress(ExternalReference::isolate_address(isolate()));
+ __ Push(holder);
+ __ Push(Smi::FromInt(0)); // should_throw_on_error -> false
+ __ Push(FieldOperand(callback, AccessorInfo::kNameOffset));
+ __ PushReturnAddressFrom(scratch);
// v8::PropertyCallbackInfo::args_ array and name handle.
const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
@@ -5539,8 +5191,11 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// It's okay if api_function_address == getter_arg
// but not accessor_info_arg or name_arg
- DCHECK(!api_function_address.is(accessor_info_arg) &&
- !api_function_address.is(name_arg));
+ DCHECK(!api_function_address.is(accessor_info_arg));
+ DCHECK(!api_function_address.is(name_arg));
+ __ movp(scratch, FieldOperand(callback, AccessorInfo::kJsGetterOffset));
+ __ movp(api_function_address,
+ FieldOperand(scratch, Foreign::kForeignAddressOffset));
// +3 is to skip prolog, return address and name handle.
Operand return_value_operand(
@@ -5550,7 +5205,6 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
NULL);
}
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
index d4f8b29dbc..a181377221 100644
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -295,8 +295,8 @@ class RecordWriteStub: public PlatformCodeStub {
Register r2,
Register r3) {
for (int i = 0; i < Register::kNumRegisters; i++) {
- Register candidate = Register::from_code(i);
- if (candidate.IsAllocatable()) {
+ if (RegisterConfiguration::Crankshaft()->IsAllocatableGeneralCode(i)) {
+ Register candidate = Register::from_code(i);
if (candidate.is(rcx)) continue;
if (candidate.is(r1)) continue;
if (candidate.is(r2)) continue;
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 33e987e248..911f3cb64a 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -32,38 +32,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ masm.
-UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
- size_t actual_size;
- byte* buffer =
- static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == nullptr) return nullptr;
- ExternalReference::InitializeMathExpData();
-
- MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
- CodeObjectRequired::kNo);
- // xmm0: raw double input.
- XMMRegister input = xmm0;
- XMMRegister result = xmm1;
- __ pushq(rax);
- __ pushq(rbx);
-
- MathExpGenerator::EmitMathExp(&masm, input, result, xmm2, rax, rbx);
-
- __ popq(rbx);
- __ popq(rax);
- __ Movsd(xmm0, result);
- __ Ret();
-
- CodeDesc desc;
- masm.GetCode(&desc);
- DCHECK(!RelocInfo::RequiresRelocation(desc));
-
- Assembler::FlushICache(isolate, buffer, actual_size);
- base::OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
-}
-
-
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
size_t actual_size;
// Allocate buffer in executable space.
@@ -204,7 +172,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Allocate new backing store.
__ bind(&new_backing_store);
__ leap(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
- __ Allocate(rdi, r14, r11, r15, fail, TAG_OBJECT);
+ __ Allocate(rdi, r14, r11, r15, fail, NO_ALLOCATION_FLAGS);
// Set backing store's map
__ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
__ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
@@ -243,8 +211,9 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// rbx: current element (smi-tagged)
__ JumpIfNotSmi(rbx, &convert_hole);
__ SmiToInteger32(rbx, rbx);
- __ Cvtlsi2sd(xmm0, rbx);
- __ Movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), xmm0);
+ __ Cvtlsi2sd(kScratchDoubleReg, rbx);
+ __ Movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
+ kScratchDoubleReg);
__ jmp(&entry);
__ bind(&convert_hole);
@@ -296,7 +265,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// r8 : source FixedDoubleArray
// r9 : number of elements
__ leap(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
- __ Allocate(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
+ __ Allocate(rdi, r11, r14, r15, &gc_required, NO_ALLOCATION_FLAGS);
// r11: destination FixedArray
__ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
__ movp(FieldOperand(r11, HeapObject::kMapOffset), rdi);
@@ -498,59 +467,6 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ bind(&done);
}
-
-void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
- XMMRegister input,
- XMMRegister result,
- XMMRegister double_scratch,
- Register temp1,
- Register temp2) {
- DCHECK(!input.is(result));
- DCHECK(!input.is(double_scratch));
- DCHECK(!result.is(double_scratch));
- DCHECK(!temp1.is(temp2));
- DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
- DCHECK(!masm->serializer_enabled()); // External references not serializable.
-
- Label done;
-
- __ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
- __ Movsd(double_scratch, Operand(kScratchRegister, 0 * kDoubleSize));
- __ Xorpd(result, result);
- __ Ucomisd(double_scratch, input);
- __ j(above_equal, &done);
- __ Ucomisd(input, Operand(kScratchRegister, 1 * kDoubleSize));
- __ Movsd(result, Operand(kScratchRegister, 2 * kDoubleSize));
- __ j(above_equal, &done);
- __ Movsd(double_scratch, Operand(kScratchRegister, 3 * kDoubleSize));
- __ Movsd(result, Operand(kScratchRegister, 4 * kDoubleSize));
- __ Mulsd(double_scratch, input);
- __ Addsd(double_scratch, result);
- __ Movq(temp2, double_scratch);
- __ Subsd(double_scratch, result);
- __ Movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
- __ leaq(temp1, Operand(temp2, 0x1ff800));
- __ andq(temp2, Immediate(0x7ff));
- __ shrq(temp1, Immediate(11));
- __ Mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
- __ Move(kScratchRegister, ExternalReference::math_exp_log_table());
- __ shlq(temp1, Immediate(52));
- __ orq(temp1, Operand(kScratchRegister, temp2, times_8, 0));
- __ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
- __ Subsd(double_scratch, input);
- __ Movsd(input, double_scratch);
- __ Subsd(result, double_scratch);
- __ Mulsd(input, double_scratch);
- __ Mulsd(result, input);
- __ Movq(input, temp1);
- __ Mulsd(result, Operand(kScratchRegister, 7 * kDoubleSize));
- __ Subsd(result, double_scratch);
- __ Addsd(result, Operand(kScratchRegister, 8 * kDoubleSize));
- __ Mulsd(result, input);
-
- __ bind(&done);
-}
-
#undef __
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index 1403781c67..799187869e 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -5,7 +5,6 @@
#ifndef V8_X64_CODEGEN_X64_H_
#define V8_X64_CODEGEN_X64_H_
-#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {
@@ -28,20 +27,6 @@ class StringCharLoadGenerator : public AllStatic {
};
-class MathExpGenerator : public AllStatic {
- public:
- static void EmitMathExp(MacroAssembler* masm,
- XMMRegister input,
- XMMRegister result,
- XMMRegister double_scratch,
- Register temp1,
- Register temp2);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
-};
-
-
enum StackArgumentsAccessorReceiverMode {
ARGUMENTS_CONTAIN_RECEIVER,
ARGUMENTS_DONT_CONTAIN_RECEIVER
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index 9d70c3236b..35da7a2c00 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -116,8 +116,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
const int kDoubleRegsSize = kDoubleSize * XMMRegister::kMaxNumRegisters;
__ subp(rsp, Immediate(kDoubleRegsSize));
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
XMMRegister xmm_reg = XMMRegister::from_code(code);
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index a9532dc3ad..83f34d07a0 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -8,6 +8,7 @@
#if V8_TARGET_ARCH_X64
+#include "src/base/compiler-specific.h"
#include "src/base/lazy-instance.h"
#include "src/disasm.h"
@@ -141,19 +142,18 @@ enum InstructionType {
SHORT_IMMEDIATE_INSTR
};
-
enum Prefixes {
ESCAPE_PREFIX = 0x0F,
OPERAND_SIZE_OVERRIDE_PREFIX = 0x66,
ADDRESS_SIZE_OVERRIDE_PREFIX = 0x67,
VEX3_PREFIX = 0xC4,
VEX2_PREFIX = 0xC5,
+ LOCK_PREFIX = 0xF0,
REPNE_PREFIX = 0xF2,
REP_PREFIX = 0xF3,
REPEQ_PREFIX = REP_PREFIX
};
-
struct InstructionDesc {
const char* mnem;
InstructionType type;
@@ -359,7 +359,7 @@ class DisassemblerX64 {
bool vex_128() {
DCHECK(vex_byte0_ == VEX3_PREFIX || vex_byte0_ == VEX2_PREFIX);
byte checked = vex_byte0_ == VEX3_PREFIX ? vex_byte2_ : vex_byte1_;
- return (checked & 4) != 1;
+ return (checked & 4) == 0;
}
bool vex_none() {
@@ -479,7 +479,7 @@ class DisassemblerX64 {
int MemoryFPUInstruction(int escape_opcode, int regop, byte* modrm_start);
int RegisterFPUInstruction(int escape_opcode, byte modrm_byte);
int AVXInstruction(byte* data);
- void AppendToBuffer(const char* format, ...);
+ PRINTF_FORMAT(2, 3) void AppendToBuffer(const char* format, ...);
void UnimplementedInstruction() {
if (abort_on_unimplemented_) {
@@ -618,7 +618,7 @@ int DisassemblerX64::PrintImmediate(byte* data, OperandSize size) {
value = 0; // Initialize variables on all paths to satisfy the compiler.
count = 0;
}
- AppendToBuffer("%" V8_PTR_PREFIX "x", value);
+ AppendToBuffer("%" PRIx64, value);
return count;
}
@@ -1227,6 +1227,15 @@ int DisassemblerX64::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
+ case 0x10:
+ AppendToBuffer("vmovups %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
+ case 0x11:
+ AppendToBuffer("vmovups ");
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ break;
case 0x28:
AppendToBuffer("vmovaps %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
@@ -1240,6 +1249,10 @@ int DisassemblerX64::AVXInstruction(byte* data) {
AppendToBuffer("vucomiss %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
+ case 0x50:
+ AppendToBuffer("vmovmskps %s,", NameOfCPURegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x54:
AppendToBuffer("vandps %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -1250,6 +1263,16 @@ int DisassemblerX64::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0xC2: {
+ AppendToBuffer("vcmpps %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ const char* const pseudo_op[] = {"eq", "lt", "le", "unord",
+ "neq", "nlt", "nle", "ord"};
+ AppendToBuffer(", (%s)", pseudo_op[*current]);
+ current += 1;
+ break;
+ }
default:
UnimplementedInstruction();
}
@@ -1257,6 +1280,15 @@ int DisassemblerX64::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
+ case 0x10:
+ AppendToBuffer("vmovupd %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
+ case 0x11:
+ AppendToBuffer("vmovupd ");
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ break;
case 0x28:
AppendToBuffer("vmovapd %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
@@ -1310,6 +1342,16 @@ int DisassemblerX64::AVXInstruction(byte* data) {
current += PrintRightOperand(current);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
break;
+ case 0xC2: {
+ AppendToBuffer("vcmppd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ const char* const pseudo_op[] = {"eq", "lt", "le", "unord",
+ "neq", "nlt", "nle", "ord"};
+ AppendToBuffer(", (%s)", pseudo_op[*current]);
+ current += 1;
+ break;
+ }
default:
UnimplementedInstruction();
}
@@ -1513,7 +1555,16 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
if (operand_size_ == 0x66) {
// 0x66 0x0F prefix.
int mod, regop, rm;
- if (opcode == 0x3A) {
+ if (opcode == 0x38) {
+ byte third_byte = *current;
+ current = data + 3;
+ if (third_byte == 0x40) {
+ // pmulld xmm, xmm/m128
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("pmulld %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ }
+ } else if (opcode == 0x3A) {
byte third_byte = *current;
current = data + 3;
if (third_byte == 0x17) {
@@ -1536,11 +1587,18 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
AppendToBuffer(",0x%x", (*current) & 3);
current += 1;
} else if (third_byte == 0x16) {
- get_modrm(*current, &mod, &rm, &regop);
+ get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("pextrd "); // reg/m32, xmm, imm8
current += PrintRightOperand(current);
AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
current += 1;
+ } else if (third_byte == 0x21) {
+ get_modrm(*current, &mod, &regop, &rm);
+ // insertps xmm, xmm/m32, imm8
+ AppendToBuffer("insertps %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",0x%x", (*current) & 3);
+ current += 1;
} else if (third_byte == 0x22) {
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("pinsrd "); // xmm, reg/m32, imm8
@@ -1564,6 +1622,13 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += 4;
} // else no immediate displacement.
AppendToBuffer("nop");
+ } else if (opcode == 0x10) {
+ AppendToBuffer("movupd %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ } else if (opcode == 0x11) {
+ AppendToBuffer("movupd ");
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (opcode == 0x28) {
AppendToBuffer("movapd %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
@@ -1596,6 +1661,11 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else if (opcode == 0x50) {
AppendToBuffer("movmskpd %s,", NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
+ } else if (opcode == 0x70) {
+ AppendToBuffer("pshufd %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",0x%x", *current);
+ current += 1;
} else if (opcode == 0x72) {
current += 1;
AppendToBuffer("%s %s,%d", (regop == 6) ? "pslld" : "psrld",
@@ -1606,6 +1676,8 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
AppendToBuffer("%s %s,%d", (regop == 6) ? "psllq" : "psrlq",
NameOfXMMRegister(rm), *current & 0x7f);
current += 1;
+ } else if (opcode == 0xB1) {
+ current += PrintOperands("cmpxchg", OPER_REG_OP_ORDER, current);
} else {
const char* mnemonic = "?";
if (opcode == 0x54) {
@@ -1614,6 +1686,8 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
mnemonic = "orpd";
} else if (opcode == 0x57) {
mnemonic = "xorpd";
+ } else if (opcode == 0x5B) {
+ mnemonic = "cvtps2dq";
} else if (opcode == 0x2E) {
mnemonic = "ucomisd";
} else if (opcode == 0x2F) {
@@ -1624,11 +1698,25 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
mnemonic = "punpckldq";
} else if (opcode == 0x6A) {
mnemonic = "punpckhdq";
+ } else if (opcode == 0xF4) {
+ mnemonic = "pmuludq";
+ } else if (opcode == 0xFA) {
+ mnemonic = "psubd";
+ } else if (opcode == 0xFE) {
+ mnemonic = "paddd";
+ } else if (opcode == 0xC2) {
+ mnemonic = "cmppd";
} else {
UnimplementedInstruction();
}
AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
+ if (opcode == 0xC2) {
+ const char* const pseudo_op[] = {"eq", "lt", "le", "unord",
+ "neq", "nlt", "nle", "ord"};
+ AppendToBuffer(", (%s)", pseudo_op[*current]);
+ current += 1;
+ }
}
}
} else if (group_1_prefix_ == 0xF2) {
@@ -1765,6 +1853,19 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else {
UnimplementedInstruction();
}
+ } else if (opcode == 0x10 || opcode == 0x11) {
+ // movups xmm, xmm/m128
+ // movups xmm/m128, xmm
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("movups ");
+ if (opcode == 0x11) {
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else {
+ AppendToBuffer("%s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ }
} else if (opcode == 0x1F) {
// NOP
int mod, regop, rm;
@@ -1811,29 +1912,28 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
byte_size_operand_ = idesc.byte_size_operation;
current += PrintOperands(idesc.mnem, idesc.op_order_, current);
- } else if (opcode >= 0x53 && opcode <= 0x5F) {
+ } else if (opcode >= 0x51 && opcode <= 0x5F) {
const char* const pseudo_op[] = {
- "rcpps",
- "andps",
- "andnps",
- "orps",
- "xorps",
- "addps",
- "mulps",
- "cvtps2pd",
- "cvtdq2ps",
- "subps",
- "minps",
- "divps",
- "maxps",
+ "sqrtps", "rsqrtps", "rcpps", "andps", "andnps",
+ "orps", "xorps", "addps", "mulps", "cvtps2pd",
+ "cvtdq2ps", "subps", "minps", "divps", "maxps",
};
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("%s %s,",
- pseudo_op[opcode - 0x53],
+ AppendToBuffer("%s %s,", pseudo_op[opcode - 0x51],
NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
+ } else if (opcode == 0xC2) {
+ // cmpps xmm, xmm/m128, imm8
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ const char* const pseudo_op[] = {"eq", "lt", "le", "unord",
+ "neq", "nlt", "nle", "ord"};
+ AppendToBuffer("cmpps %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(", %s", pseudo_op[*current]);
+ current += 1;
} else if (opcode == 0xC6) {
// shufps xmm, xmm/m128, imm8
int mod, regop, rm;
@@ -1842,7 +1942,6 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += PrintRightXMMOperand(current);
AppendToBuffer(", %d", (*current) & 3);
current += 1;
-
} else if (opcode == 0x50) {
// movmskps reg, xmm
int mod, regop, rm;
@@ -1883,6 +1982,12 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += PrintRightOperand(current);
} else if (opcode == 0x0B) {
AppendToBuffer("ud2");
+ } else if (opcode == 0xB0 || opcode == 0xB1) {
+ // CMPXCHG.
+ if (opcode == 0xB0) {
+ byte_size_operand_ = true;
+ }
+ current += PrintOperands(mnemonic, OPER_REG_OP_ORDER, current);
} else {
UnimplementedInstruction();
}
@@ -1925,6 +2030,9 @@ const char* DisassemblerX64::TwoByteMnemonic(byte opcode) {
return "shrd";
case 0xAF:
return "imul";
+ case 0xB0:
+ case 0xB1:
+ return "cmpxchg";
case 0xB6:
return "movzxb";
case 0xB7:
@@ -1962,6 +2070,8 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
if (rex_w()) AppendToBuffer("REX.W ");
} else if ((current & 0xFE) == 0xF2) { // Group 1 prefix (0xF2 or 0xF3).
group_1_prefix_ = current;
+ } else if (current == LOCK_PREFIX) {
+ AppendToBuffer("lock ");
} else if (current == VEX3_PREFIX) {
vex_byte0_ = current;
vex_byte1_ = *(data + 1);
@@ -1999,7 +2109,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
if (rex_w()) AppendToBuffer("REX.W ");
AppendToBuffer("%s%c", idesc.mnem, operand_size_code());
} else {
- AppendToBuffer("%s", idesc.mnem, operand_size_code());
+ AppendToBuffer("%s%c", idesc.mnem, operand_size_code());
}
data++;
break;
@@ -2141,9 +2251,11 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
default:
mnem = "???";
}
- AppendToBuffer(((regop <= 1) ? "%s%c " : "%s "),
- mnem,
- operand_size_code());
+ if (regop <= 1) {
+ AppendToBuffer("%s%c ", mnem, operand_size_code());
+ } else {
+ AppendToBuffer("%s ", mnem);
+ }
data += PrintRightOperand(data);
}
break;
@@ -2334,9 +2446,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
default:
UNREACHABLE();
}
- AppendToBuffer("test%c rax,0x%" V8_PTR_PREFIX "x",
- operand_size_code(),
- value);
+ AppendToBuffer("test%c rax,0x%" PRIx64, operand_size_code(), value);
break;
}
case 0xD1: // fall through
@@ -2426,7 +2536,7 @@ static const char* const xmm_regs[16] = {
const char* NameConverter::NameOfAddress(byte* addr) const {
- v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
+ v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
return tmp_buffer_.start();
}
@@ -2493,7 +2603,7 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
buffer[0] = '\0';
byte* prev_pc = pc;
pc += d.InstructionDecode(buffer, pc);
- fprintf(f, "%p", prev_pc);
+ fprintf(f, "%p", static_cast<void*>(prev_pc));
fprintf(f, " ");
for (byte* bp = prev_pc; bp < pc; bp++) {
diff --git a/deps/v8/src/x64/eh-frame-x64.cc b/deps/v8/src/x64/eh-frame-x64.cc
new file mode 100644
index 0000000000..afbcf2167e
--- /dev/null
+++ b/deps/v8/src/x64/eh-frame-x64.cc
@@ -0,0 +1,63 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/eh-frame.h"
+
+namespace v8 {
+namespace internal {
+
+static const int kRaxDwarfCode = 0;
+static const int kRbpDwarfCode = 6;
+static const int kRspDwarfCode = 7;
+static const int kRipDwarfCode = 16;
+
+const int EhFrameConstants::kCodeAlignmentFactor = 1;
+const int EhFrameConstants::kDataAlignmentFactor = -8;
+
+void EhFrameWriter::WriteReturnAddressRegisterCode() {
+ WriteULeb128(kRipDwarfCode);
+}
+
+void EhFrameWriter::WriteInitialStateInCie() {
+ SetBaseAddressRegisterAndOffset(rsp, kPointerSize);
+ // x64 rip (r16) has no Register instance associated.
+ RecordRegisterSavedToStack(kRipDwarfCode, -kPointerSize);
+}
+
+// static
+int EhFrameWriter::RegisterToDwarfCode(Register name) {
+ switch (name.code()) {
+ case Register::kCode_rbp:
+ return kRbpDwarfCode;
+ case Register::kCode_rsp:
+ return kRspDwarfCode;
+ case Register::kCode_rax:
+ return kRaxDwarfCode;
+ default:
+ UNIMPLEMENTED();
+ return -1;
+ }
+}
+
+#ifdef ENABLE_DISASSEMBLER
+
+// static
+const char* EhFrameDisassembler::DwarfRegisterCodeToString(int code) {
+ switch (code) {
+ case kRbpDwarfCode:
+ return "rbp";
+ case kRspDwarfCode:
+ return "rsp";
+ case kRipDwarfCode:
+ return "rip";
+ default:
+ UNIMPLEMENTED();
+ return nullptr;
+ }
+}
+
+#endif
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/x64/interface-descriptors-x64.cc b/deps/v8/src/x64/interface-descriptors-x64.cc
index b10b52298b..7d39b42ac3 100644
--- a/deps/v8/src/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/x64/interface-descriptors-x64.cc
@@ -11,6 +11,19 @@ namespace internal {
const Register CallInterfaceDescriptor::ContextRegister() { return rsi; }
+void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
+ CallInterfaceDescriptorData* data, int register_parameter_count) {
+ const Register default_stub_registers[] = {rax, rbx, rcx, rdx, rdi};
+ CHECK_LE(static_cast<size_t>(register_parameter_count),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(register_parameter_count,
+ default_stub_registers);
+}
+
+const Register FastNewFunctionContextDescriptor::FunctionRegister() {
+ return rdi;
+}
+const Register FastNewFunctionContextDescriptor::SlotsRegister() { return rax; }
const Register LoadDescriptor::ReceiverRegister() { return rdx; }
const Register LoadDescriptor::NameRegister() { return rcx; }
@@ -23,13 +36,9 @@ const Register LoadWithVectorDescriptor::VectorRegister() { return rbx; }
const Register StoreDescriptor::ReceiverRegister() { return rdx; }
const Register StoreDescriptor::NameRegister() { return rcx; }
const Register StoreDescriptor::ValueRegister() { return rax; }
+const Register StoreDescriptor::SlotRegister() { return rdi; }
-
-const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return rdi; }
-
-
-const Register VectorStoreICDescriptor::VectorRegister() { return rbx; }
-
+const Register StoreWithVectorDescriptor::VectorRegister() { return rbx; }
const Register VectorStoreTransitionDescriptor::SlotRegister() { return rdi; }
const Register VectorStoreTransitionDescriptor::VectorRegister() { return rbx; }
@@ -39,23 +48,15 @@ const Register VectorStoreTransitionDescriptor::MapRegister() { return r11; }
const Register StoreTransitionDescriptor::MapRegister() { return rbx; }
-const Register LoadGlobalViaContextDescriptor::SlotRegister() { return rbx; }
-
-
const Register StoreGlobalViaContextDescriptor::SlotRegister() { return rbx; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return rax; }
-const Register InstanceOfDescriptor::LeftRegister() { return rdx; }
-const Register InstanceOfDescriptor::RightRegister() { return rax; }
-
-
const Register StringCompareDescriptor::LeftRegister() { return rdx; }
const Register StringCompareDescriptor::RightRegister() { return rax; }
-
-const Register ApiGetterDescriptor::function_address() { return r8; }
-
+const Register ApiGetterDescriptor::HolderRegister() { return rcx; }
+const Register ApiGetterDescriptor::CallbackRegister() { return rbx; }
const Register MathPowTaggedDescriptor::exponent() { return rdx; }
@@ -75,13 +76,6 @@ void FastNewClosureDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-void FastNewContextDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rdi};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void FastNewObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rdi, rdx};
@@ -243,50 +237,37 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC
-void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
+void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
// rax -- number of arguments
// rdi -- function
// rbx -- allocation site with elements kind
- Register registers[] = {rdi, rbx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ArrayConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // stack param count needs (constructor pointer, and single argument)
Register registers[] = {rdi, rbx, rax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-
-void InternalArrayConstructorConstantArgCountDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
+void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
// register state
// rax -- number of arguments
- // rdi -- constructor function
- Register registers[] = {rdi};
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ // rdi -- function
+ // rbx -- allocation site with elements kind
+ Register registers[] = {rdi, rbx, rax};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-
-void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
+void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // stack param count needs (constructor pointer, and single argument)
- Register registers[] = {rdi, rax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ // register state
+ // rax -- number of arguments
+ // rdi -- function
+ // rbx -- allocation site with elements kind
+ Register registers[] = {rdi, rbx, rax};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void FastArrayPushDescriptor::InitializePlatformSpecific(
+void VarArgFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (arg count)
Register registers[] = {rax};
@@ -313,6 +294,22 @@ void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void BinaryOpWithVectorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // register state
+ // rdx -- lhs
+ // rax -- rhs
+ // rdi -- slot id
+ // rbx -- vector
+ Register registers[] = {rdx, rax, rdi, rbx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CountOpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void StringAddDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -373,9 +370,8 @@ void ApiCallbackDescriptorBase::InitializePlatformSpecific(
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
- kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
- kInterpreterDispatchTableRegister};
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -410,6 +406,16 @@ void InterpreterCEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void ResumeGeneratorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ rax, // the value to pass to the generator
+ rbx, // the JSGeneratorObject to resume
+ rdx // the resume mode (tagged)
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 566091df4e..6dacc011df 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -489,7 +489,7 @@ void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
// easier.
DCHECK(js_function.is(rdi));
DCHECK(code_entry.is(rcx));
- DCHECK(scratch.is(rax));
+ DCHECK(scratch.is(r15));
// Since a code entry (value) is always in old space, we don't need to update
// remembered set. If incremental marking is off, there is nothing for us to
@@ -537,13 +537,13 @@ void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
DCHECK(arg_reg_2.is(rdx) && arg_reg_3.is(r8));
movp(arg_reg_1, js_function); // rcx gets rdi.
- movp(arg_reg_2, dst); // rdx gets rax.
+ movp(arg_reg_2, dst); // rdx gets r15.
} else {
// AMD64 calling convention.
DCHECK(arg_reg_1.is(rdi) && arg_reg_2.is(rsi) && arg_reg_3.is(rdx));
// rdi is already loaded with js_function.
- movp(arg_reg_2, dst); // rsi gets rax.
+ movp(arg_reg_2, dst); // rsi gets r15.
}
Move(arg_reg_3, ExternalReference::isolate_address(isolate()));
@@ -638,17 +638,18 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- Move(kScratchRegister, Smi::FromInt(static_cast<int>(reason)),
- Assembler::RelocInfoNone());
- Push(kScratchRegister);
+ // Check if Abort() has already been initialized.
+ DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
+
+ Move(rdx, Smi::FromInt(static_cast<int>(reason)));
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort);
+ Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
} else {
- CallRuntime(Runtime::kAbort);
+ Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
}
// Control will not return here.
int3();
@@ -738,15 +739,15 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
JumpToExternalReference(ExternalReference(fid, isolate()));
}
-
-void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
+void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
+ bool builtin_exit_frame) {
// Set the entry point and jump to the C entry runtime stub.
LoadAddress(rbx, ext);
- CEntryStub ces(isolate(), 1);
+ CEntryStub ces(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
+ builtin_exit_frame);
jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
}
-
#define REG(Name) \
{ Register::kCode_##Name }
@@ -1116,15 +1117,6 @@ void MacroAssembler::Set(Register dst, int64_t x) {
}
}
-void MacroAssembler::Set(Register dst, int64_t x, RelocInfo::Mode rmode) {
- if (rmode == RelocInfo::WASM_MEMORY_REFERENCE) {
- DCHECK(x != 0);
- movq(dst, x, rmode);
- } else {
- DCHECK(RelocInfo::IsNone(rmode));
- }
-}
-
void MacroAssembler::Set(const Operand& dst, intptr_t x) {
if (kPointerSize == kInt64Size) {
if (is_int32(x)) {
@@ -2727,6 +2719,32 @@ void MacroAssembler::Movaps(XMMRegister dst, XMMRegister src) {
}
}
+void MacroAssembler::Movups(XMMRegister dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovups(dst, src);
+ } else {
+ movups(dst, src);
+ }
+}
+
+void MacroAssembler::Movups(XMMRegister dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovups(dst, src);
+ } else {
+ movups(dst, src);
+ }
+}
+
+void MacroAssembler::Movups(const Operand& dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovups(dst, src);
+ } else {
+ movups(dst, src);
+ }
+}
void MacroAssembler::Movapd(XMMRegister dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
@@ -2737,6 +2755,23 @@ void MacroAssembler::Movapd(XMMRegister dst, XMMRegister src) {
}
}
+void MacroAssembler::Movupd(XMMRegister dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovupd(dst, src);
+ } else {
+ movupd(dst, src);
+ }
+}
+
+void MacroAssembler::Movupd(const Operand& dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovupd(dst, src);
+ } else {
+ movupd(dst, src);
+ }
+}
void MacroAssembler::Movsd(XMMRegister dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
@@ -2847,6 +2882,14 @@ void MacroAssembler::Movq(Register dst, XMMRegister src) {
}
}
+void MacroAssembler::Movmskps(Register dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmovmskps(dst, src);
+ } else {
+ movmskps(dst, src);
+ }
+}
void MacroAssembler::Movmskpd(Register dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
@@ -2857,6 +2900,23 @@ void MacroAssembler::Movmskpd(Register dst, XMMRegister src) {
}
}
+void MacroAssembler::Xorps(XMMRegister dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vxorps(dst, dst, src);
+ } else {
+ xorps(dst, src);
+ }
+}
+
+void MacroAssembler::Xorps(XMMRegister dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vxorps(dst, dst, src);
+ } else {
+ xorps(dst, src);
+ }
+}
void MacroAssembler::Roundss(XMMRegister dst, XMMRegister src,
RoundingMode mode) {
@@ -2939,6 +2999,27 @@ void MacroAssembler::Ucomisd(XMMRegister src1, const Operand& src2) {
}
}
+// ----------------------------------------------------------------------------
+
+void MacroAssembler::Absps(XMMRegister dst) {
+ Andps(dst,
+ ExternalOperand(ExternalReference::address_of_float_abs_constant()));
+}
+
+void MacroAssembler::Negps(XMMRegister dst) {
+ Xorps(dst,
+ ExternalOperand(ExternalReference::address_of_float_neg_constant()));
+}
+
+void MacroAssembler::Abspd(XMMRegister dst) {
+ Andps(dst,
+ ExternalOperand(ExternalReference::address_of_double_abs_constant()));
+}
+
+void MacroAssembler::Negpd(XMMRegister dst) {
+ Xorps(dst,
+ ExternalOperand(ExternalReference::address_of_double_neg_constant()));
+}
void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
AllowDeferredHandleDereference smi_check;
@@ -2975,15 +3056,8 @@ void MacroAssembler::Push(Handle<Object> source) {
void MacroAssembler::MoveHeapObject(Register result,
Handle<Object> object) {
- AllowDeferredHandleDereference using_raw_address;
DCHECK(object->IsHeapObject());
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<Cell> cell = isolate()->factory()->NewCell(object);
- Move(result, cell, RelocInfo::CELL);
- movp(result, Operand(result, 0));
- } else {
- Move(result, object, RelocInfo::EMBEDDED_OBJECT);
- }
+ Move(result, object, RelocInfo::EMBEDDED_OBJECT);
}
@@ -3268,12 +3342,12 @@ void MacroAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
pinsrd(dst, src, imm8);
return;
}
- Movd(xmm0, src);
+ Movd(kScratchDoubleReg, src);
if (imm8 == 1) {
- punpckldq(dst, xmm0);
+ punpckldq(dst, kScratchDoubleReg);
} else {
DCHECK_EQ(0, imm8);
- Movss(dst, xmm0);
+ Movss(dst, kScratchDoubleReg);
}
}
@@ -3285,12 +3359,12 @@ void MacroAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
pinsrd(dst, src, imm8);
return;
}
- Movd(xmm0, src);
+ Movd(kScratchDoubleReg, src);
if (imm8 == 1) {
- punpckldq(dst, xmm0);
+ punpckldq(dst, kScratchDoubleReg);
} else {
DCHECK_EQ(0, imm8);
- Movss(dst, xmm0);
+ Movss(dst, kScratchDoubleReg);
}
}
@@ -3752,15 +3826,15 @@ void MacroAssembler::SlowTruncateToI(Register result_reg,
void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
Register input_reg) {
Label done;
- Movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- Cvttsd2siq(result_reg, xmm0);
+ Movsd(kScratchDoubleReg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ Cvttsd2siq(result_reg, kScratchDoubleReg);
cmpq(result_reg, Immediate(1));
j(no_overflow, &done, Label::kNear);
// Slow case.
if (input_reg.is(result_reg)) {
subp(rsp, Immediate(kDoubleSize));
- Movsd(MemOperand(rsp, 0), xmm0);
+ Movsd(MemOperand(rsp, 0), kScratchDoubleReg);
SlowTruncateToI(result_reg, rsp, 0);
addp(rsp, Immediate(kDoubleSize));
} else {
@@ -3797,8 +3871,8 @@ void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
Label* lost_precision, Label* is_nan,
Label* minus_zero, Label::Distance dst) {
Cvttsd2si(result_reg, input_reg);
- Cvtlsi2sd(xmm0, result_reg);
- Ucomisd(xmm0, input_reg);
+ Cvtlsi2sd(kScratchDoubleReg, result_reg);
+ Ucomisd(kScratchDoubleReg, input_reg);
j(not_equal, lost_precision, dst);
j(parity_even, is_nan, dst); // NaN.
if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
@@ -3970,6 +4044,16 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
}
+void MacroAssembler::AssertGeneratorObject(Register object) {
+ if (emit_debug_code()) {
+ testb(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsASmiAndNotAGeneratorObject);
+ Push(object);
+ CmpObjectType(object, JS_GENERATOR_OBJECT_TYPE, object);
+ Pop(object);
+ Check(equal, kOperandIsNotAGeneratorObject);
+ }
+}
void MacroAssembler::AssertReceiver(Register object) {
if (emit_debug_code()) {
@@ -4337,11 +4421,12 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
Label skip_flooding;
- ExternalReference step_in_enabled =
- ExternalReference::debug_step_in_enabled_address(isolate());
- Operand step_in_enabled_operand = ExternalOperand(step_in_enabled);
- cmpb(step_in_enabled_operand, Immediate(0));
- j(equal, &skip_flooding);
+ ExternalReference last_step_action =
+ ExternalReference::debug_last_step_action_address(isolate());
+ Operand last_step_action_operand = ExternalOperand(last_step_action);
+ STATIC_ASSERT(StepFrame > StepIn);
+ cmpb(last_step_action_operand, Immediate(StepIn));
+ j(less, &skip_flooding);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -4400,8 +4485,8 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
movp(vector, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- movp(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset));
- movp(vector, FieldOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+ movp(vector, FieldOperand(vector, JSFunction::kLiteralsOffset));
+ movp(vector, FieldOperand(vector, LiteralsArray::kFeedbackVectorOffset));
}
@@ -4441,8 +4526,28 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
popq(rbp);
}
+void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
+ Register argc) {
+ Push(rbp);
+ Move(rbp, rsp);
+ Push(context);
+ Push(target);
+ Push(argc);
+}
+
+void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
+ Register argc) {
+ Pop(argc);
+ Pop(target);
+ Pop(context);
+ leave();
+}
+
+void MacroAssembler::EnterExitFramePrologue(bool save_rax,
+ StackFrame::Type frame_type) {
+ DCHECK(frame_type == StackFrame::EXIT ||
+ frame_type == StackFrame::BUILTIN_EXIT);
-void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
// Set up the frame structure on the stack.
// All constants are relative to the frame pointer of the exit frame.
DCHECK_EQ(kFPOnStackSize + kPCOnStackSize,
@@ -4453,11 +4558,11 @@ void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
movp(rbp, rsp);
// Reserve room for entry stack pointer and push the code object.
- Push(Smi::FromInt(StackFrame::EXIT));
+ Push(Smi::FromInt(frame_type));
DCHECK_EQ(-2 * kPointerSize, ExitFrameConstants::kSPOffset);
Push(Immediate(0)); // Saved entry sp, patched before call.
Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
- Push(kScratchRegister); // Accessed from EditFrame::code_slot.
+ Push(kScratchRegister); // Accessed from ExitFrame::code_slot.
// Save the frame pointer and the context in top.
if (save_rax) {
@@ -4482,8 +4587,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
arg_stack_space * kRegisterSize;
subp(rsp, Immediate(space));
int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
DoubleRegister reg =
DoubleRegister::from_code(config->GetAllocatableDoubleCode(i));
@@ -4505,9 +4609,9 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
}
-
-void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
- EnterExitFramePrologue(true);
+void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles,
+ StackFrame::Type frame_type) {
+ EnterExitFramePrologue(true, frame_type);
// Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
// so it must be retained across the C-call.
@@ -4519,7 +4623,7 @@ void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
- EnterExitFramePrologue(false);
+ EnterExitFramePrologue(false, StackFrame::EXIT);
EnterExitFrameEpilogue(arg_stack_space, false);
}
@@ -4529,8 +4633,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
// r15 : argv
if (save_doubles) {
int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
DoubleRegister reg =
DoubleRegister::from_code(config->GetAllocatableDoubleCode(i));
@@ -4829,7 +4932,7 @@ void MacroAssembler::MakeSureDoubleAlignedHelper(Register result,
Label aligned;
testl(result, Immediate(kDoubleAlignmentMask));
j(zero, &aligned, Label::kNear);
- if ((flags & PRETENURE) != 0) {
+ if (((flags & ALLOCATION_FOLDED) == 0) && ((flags & PRETENURE) != 0)) {
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
cmpp(result, ExternalOperand(allocation_limit));
@@ -4872,6 +4975,7 @@ void MacroAssembler::Allocate(int object_size,
AllocationFlags flags) {
DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -4905,23 +5009,19 @@ void MacroAssembler::Allocate(int object_size,
movp(top_reg, result);
}
addp(top_reg, Immediate(object_size));
- j(carry, gc_required);
Operand limit_operand = ExternalOperand(allocation_limit);
cmpp(top_reg, limit_operand);
j(above, gc_required);
- // Update allocation top.
- UpdateAllocationTopHelper(top_reg, scratch, flags);
+ if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+ // The top pointer is not updated for allocation folding dominators.
+ UpdateAllocationTopHelper(top_reg, scratch, flags);
+ }
- bool tag_result = (flags & TAG_OBJECT) != 0;
if (top_reg.is(result)) {
- if (tag_result) {
- subp(result, Immediate(object_size - kHeapObjectTag));
- } else {
- subp(result, Immediate(object_size));
- }
- } else if (tag_result) {
- // Tag the result if requested.
+ subp(result, Immediate(object_size - kHeapObjectTag));
+ } else {
+ // Tag the result.
DCHECK(kHeapObjectTag == 1);
incp(result);
}
@@ -4937,6 +5037,8 @@ void MacroAssembler::Allocate(int header_size,
Label* gc_required,
AllocationFlags flags) {
DCHECK((flags & SIZE_IN_WORDS) == 0);
+ DCHECK((flags & ALLOCATION_FOLDING_DOMINATOR) == 0);
+ DCHECK((flags & ALLOCATION_FOLDED) == 0);
leap(result_end, Operand(element_count, element_size, header_size));
Allocate(result_end, result, result_end, scratch, gc_required, flags);
}
@@ -4949,6 +5051,7 @@ void MacroAssembler::Allocate(Register object_size,
Label* gc_required,
AllocationFlags flags) {
DCHECK((flags & SIZE_IN_WORDS) == 0);
+ DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -4971,34 +5074,66 @@ void MacroAssembler::Allocate(Register object_size,
MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
}
- // Calculate new top and bail out if new space is exhausted.
ExternalReference allocation_limit =
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
if (!object_size.is(result_end)) {
movp(result_end, object_size);
}
addp(result_end, result);
- j(carry, gc_required);
Operand limit_operand = ExternalOperand(allocation_limit);
cmpp(result_end, limit_operand);
j(above, gc_required);
- // Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch, flags);
+ if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+ // The top pointer is not updated for allocation folding dominators.
+ UpdateAllocationTopHelper(result_end, scratch, flags);
+ }
- // Tag the result if requested.
- if ((flags & TAG_OBJECT) != 0) {
- addp(result, Immediate(kHeapObjectTag));
+ // Tag the result.
+ addp(result, Immediate(kHeapObjectTag));
+}
+
+void MacroAssembler::FastAllocate(int object_size, Register result,
+ Register result_end, AllocationFlags flags) {
+ DCHECK(!result.is(result_end));
+ // Load address of new object into result.
+ LoadAllocationTopHelper(result, no_reg, flags);
+
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ MakeSureDoubleAlignedHelper(result, no_reg, NULL, flags);
}
+
+ leap(result_end, Operand(result, object_size));
+
+ UpdateAllocationTopHelper(result_end, no_reg, flags);
+
+ addp(result, Immediate(kHeapObjectTag));
}
+void MacroAssembler::FastAllocate(Register object_size, Register result,
+ Register result_end, AllocationFlags flags) {
+ DCHECK(!result.is(result_end));
+ // Load address of new object into result.
+ LoadAllocationTopHelper(result, no_reg, flags);
+
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ MakeSureDoubleAlignedHelper(result, no_reg, NULL, flags);
+ }
+
+ leap(result_end, Operand(result, object_size, times_1, 0));
+
+ UpdateAllocationTopHelper(result_end, no_reg, flags);
+
+ addp(result, Immediate(kHeapObjectTag));
+}
void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch,
Label* gc_required,
MutableMode mode) {
// Allocate heap number in new space.
- Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
+ Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required,
+ NO_ALLOCATION_FLAGS);
Heap::RootListIndex map_index = mode == MUTABLE
? Heap::kMutableHeapNumberMapRootIndex
@@ -5030,14 +5165,8 @@ void MacroAssembler::AllocateTwoByteString(Register result,
}
// Allocate two byte string in new space.
- Allocate(SeqTwoByteString::kHeaderSize,
- times_1,
- scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(SeqTwoByteString::kHeaderSize, times_1, scratch1, result, scratch2,
+ scratch3, gc_required, NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
@@ -5066,14 +5195,8 @@ void MacroAssembler::AllocateOneByteString(Register result, Register length,
}
// Allocate one-byte string in new space.
- Allocate(SeqOneByteString::kHeaderSize,
- times_1,
- scratch1,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(SeqOneByteString::kHeaderSize, times_1, scratch1, result, scratch2,
+ scratch3, gc_required, NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
LoadRoot(kScratchRegister, Heap::kOneByteStringMapRootIndex);
@@ -5091,7 +5214,7 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
Label* gc_required) {
// Allocate heap number in new space.
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
@@ -5103,12 +5226,8 @@ void MacroAssembler::AllocateOneByteConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
- Allocate(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kConsOneByteStringMapRootIndex);
@@ -5122,7 +5241,7 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
Label* gc_required) {
// Allocate heap number in new space.
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
@@ -5136,7 +5255,7 @@ void MacroAssembler::AllocateOneByteSlicedString(Register result,
Label* gc_required) {
// Allocate heap number in new space.
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kSlicedOneByteStringMapRootIndex);
@@ -5152,7 +5271,8 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
DCHECK(!result.is(value));
// Allocate JSValue in new space.
- Allocate(JSValue::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
+ Allocate(JSValue::kSize, result, scratch, no_reg, gc_required,
+ NO_ALLOCATION_FLAGS);
// Initialize the JSValue.
LoadGlobalFunctionInitialMap(constructor, scratch);
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index af3dd031ca..d5e411f36f 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -21,8 +21,8 @@ const Register kReturnRegister1 = {Register::kCode_rdx};
const Register kReturnRegister2 = {Register::kCode_r8};
const Register kJSFunctionRegister = {Register::kCode_rdi};
const Register kContextRegister = {Register::kCode_rsi};
+const Register kAllocateSizeRegister = {Register::kCode_rdx};
const Register kInterpreterAccumulatorRegister = {Register::kCode_rax};
-const Register kInterpreterRegisterFileRegister = {Register::kCode_r11};
const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r12};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r14};
const Register kInterpreterDispatchTableRegister = {Register::kCode_r15};
@@ -34,8 +34,9 @@ const Register kRuntimeCallArgCountRegister = {Register::kCode_rax};
// Default scratch register used by MacroAssembler (and other code that needs
// a spare register). The register isn't callee save, and not used by the
// function calling convention.
-const Register kScratchRegister = { 10 }; // r10.
-const Register kRootRegister = { 13 }; // r13 (callee save).
+const Register kScratchRegister = {10}; // r10.
+const XMMRegister kScratchDoubleReg = {15}; // xmm15.
+const Register kRootRegister = {13}; // r13 (callee save).
// Actual value of root register is offset from the root array's start
// to take advantage of negitive 8-bit displacement values.
const int kRootRegisterBias = 128;
@@ -338,7 +339,8 @@ class MacroAssembler: public Assembler {
//
// Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
// accessible via StackSpaceOperand.
- void EnterExitFrame(int arg_stack_space = 0, bool save_doubles = false);
+ void EnterExitFrame(int arg_stack_space = 0, bool save_doubles = false,
+ StackFrame::Type frame_type = StackFrame::EXIT);
// Enter specific kind of exit frame. Allocates arg_stack_space * kPointerSize
// memory (not GCed) on the stack accessible via StackSpaceOperand.
@@ -818,7 +820,6 @@ class MacroAssembler: public Assembler {
// Load a register with a long value as efficiently as possible.
void Set(Register dst, int64_t x);
- void Set(Register dst, int64_t x, RelocInfo::Mode rmode);
void Set(const Operand& dst, intptr_t x);
void Cvtss2sd(XMMRegister dst, XMMRegister src);
@@ -927,7 +928,6 @@ class MacroAssembler: public Assembler {
AllowDeferredHandleDereference using_raw_address;
DCHECK(!RelocInfo::IsNone(rmode));
DCHECK(value->IsHeapObject());
- DCHECK(!isolate()->heap()->InNewSpace(*value));
movp(dst, reinterpret_cast<void*>(value.location()), rmode);
}
@@ -956,10 +956,24 @@ class MacroAssembler: public Assembler {
AVX_OP2_XO(Addsd, addsd)
AVX_OP2_XO(Subsd, subsd)
AVX_OP2_XO(Mulsd, mulsd)
+ AVX_OP2_XO(Divss, divss)
AVX_OP2_XO(Divsd, divsd)
- AVX_OP2_X(Andpd, andpd)
- AVX_OP2_X(Orpd, orpd)
- AVX_OP2_X(Xorpd, xorpd)
+ AVX_OP2_XO(Andps, andps)
+ AVX_OP2_XO(Andpd, andpd)
+ AVX_OP2_XO(Orpd, orpd)
+ AVX_OP2_XO(Xorpd, xorpd)
+ AVX_OP2_XO(Cmpeqps, cmpeqps)
+ AVX_OP2_XO(Cmpltps, cmpltps)
+ AVX_OP2_XO(Cmpleps, cmpleps)
+ AVX_OP2_XO(Cmpneqps, cmpneqps)
+ AVX_OP2_XO(Cmpnltps, cmpnltps)
+ AVX_OP2_XO(Cmpnleps, cmpnleps)
+ AVX_OP2_XO(Cmpeqpd, cmpeqpd)
+ AVX_OP2_XO(Cmpltpd, cmpltpd)
+ AVX_OP2_XO(Cmplepd, cmplepd)
+ AVX_OP2_XO(Cmpneqpd, cmpneqpd)
+ AVX_OP2_XO(Cmpnltpd, cmpnltpd)
+ AVX_OP2_XO(Cmpnlepd, cmpnlepd)
AVX_OP2_X(Pcmpeqd, pcmpeqd)
AVX_OP2_WITH_TYPE(Psllq, psllq, byte)
AVX_OP2_WITH_TYPE(Psrlq, psrlq, byte)
@@ -983,9 +997,18 @@ class MacroAssembler: public Assembler {
void Movq(Register dst, XMMRegister src);
void Movaps(XMMRegister dst, XMMRegister src);
+ void Movups(XMMRegister dst, XMMRegister src);
+ void Movups(XMMRegister dst, const Operand& src);
+ void Movups(const Operand& dst, XMMRegister src);
+ void Movmskps(Register dst, XMMRegister src);
void Movapd(XMMRegister dst, XMMRegister src);
+ void Movupd(XMMRegister dst, const Operand& src);
+ void Movupd(const Operand& dst, XMMRegister src);
void Movmskpd(Register dst, XMMRegister src);
+ void Xorps(XMMRegister dst, XMMRegister src);
+ void Xorps(XMMRegister dst, const Operand& src);
+
void Roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
void Roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
void Sqrtsd(XMMRegister dst, XMMRegister src);
@@ -996,6 +1019,13 @@ class MacroAssembler: public Assembler {
void Ucomisd(XMMRegister src1, XMMRegister src2);
void Ucomisd(XMMRegister src1, const Operand& src2);
+ // ---------------------------------------------------------------------------
+ // SIMD macros.
+ void Absps(XMMRegister dst);
+ void Negps(XMMRegister dst);
+ void Abspd(XMMRegister dst);
+ void Negpd(XMMRegister dst);
+
// Control Flow
void Jump(Address destination, RelocInfo::Mode rmode);
void Jump(ExternalReference ext);
@@ -1224,6 +1254,10 @@ class MacroAssembler: public Assembler {
// enabled via --debug-code.
void AssertBoundFunction(Register object);
+ // Abort execution if argument is not a JSGeneratorObject,
+ // enabled via --debug-code.
+ void AssertGeneratorObject(Register object);
+
// Abort execution if argument is not a JSReceiver, enabled via --debug-code.
void AssertReceiver(Register object);
@@ -1304,6 +1338,15 @@ class MacroAssembler: public Assembler {
Label* gc_required,
AllocationFlags flags);
+ // FastAllocate is right now only used for folded allocations. It just
+ // increments the top pointer without checking against limit. This can only
+ // be done if it was proved earlier that the allocation will succeed.
+ void FastAllocate(int object_size, Register result, Register result_end,
+ AllocationFlags flags);
+
+ void FastAllocate(Register object_size, Register result, Register result_end,
+ AllocationFlags flags);
+
// Allocate a heap number in new space with undefined value. Returns
// tagged pointer in result register, or jumps to gc_required if new
// space is full.
@@ -1456,7 +1499,8 @@ class MacroAssembler: public Assembler {
void TailCallRuntime(Runtime::FunctionId fid);
// Jump to a runtime routines
- void JumpToExternalReference(const ExternalReference& ext);
+ void JumpToExternalReference(const ExternalReference& ext,
+ bool builtin_exit_frame = false);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, arguments must be stored in rsp[0], rsp[8],
@@ -1561,6 +1605,9 @@ class MacroAssembler: public Assembler {
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
void LeaveFrame(StackFrame::Type type);
+ void EnterBuiltinFrame(Register context, Register target, Register argc);
+ void LeaveBuiltinFrame(Register context, Register target, Register argc);
+
// Expects object in rax and returns map with validated enum cache
// in rax. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Label* call_runtime);
@@ -1621,7 +1668,7 @@ class MacroAssembler: public Assembler {
Label::Distance near_jump,
const CallWrapper& call_wrapper);
- void EnterExitFramePrologue(bool save_rax);
+ void EnterExitFramePrologue(bool save_rax, StackFrame::Type frame_type);
// Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
// accessible via StackSpaceOperand.
@@ -1743,26 +1790,7 @@ inline Operand StackOperandForReturnAddress(int32_t disp) {
return Operand(rsp, disp);
}
-
-#ifdef GENERATED_CODE_COVERAGE
-extern void LogGeneratedCodeCoverage(const char* file_line);
-#define CODE_COVERAGE_STRINGIFY(x) #x
-#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
-#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
-#define ACCESS_MASM(masm) { \
- Address x64_coverage_function = FUNCTION_ADDR(LogGeneratedCodeCoverage); \
- masm->pushfq(); \
- masm->Pushad(); \
- masm->Push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
- masm->Call(x64_coverage_function, RelocInfo::EXTERNAL_REFERENCE); \
- masm->Pop(rax); \
- masm->Popad(); \
- masm->popfq(); \
- } \
- masm->
-#else
#define ACCESS_MASM(masm) masm->
-#endif
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/x87/assembler-x87-inl.h b/deps/v8/src/x87/assembler-x87-inl.h
index 802c80fa71..fa9b5a40d4 100644
--- a/deps/v8/src/x87/assembler-x87-inl.h
+++ b/deps/v8/src/x87/assembler-x87-inl.h
@@ -47,6 +47,7 @@ namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; }
+bool CpuFeatures::SupportsSimd128() { return false; }
static const byte kCallOpcode = 0xE8;
static const int kNoCodeAgeSequenceLength = 5;
@@ -81,11 +82,6 @@ Address RelocInfo::target_address() {
return Assembler::target_address_at(pc_, host_);
}
-Address RelocInfo::wasm_memory_reference() {
- DCHECK(IsWasmMemoryReference(rmode_));
- return Memory::Address_at(pc_);
-}
-
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
@@ -105,36 +101,6 @@ int RelocInfo::target_address_size() {
}
-void RelocInfo::set_target_address(Address target,
- WriteBarrierMode write_barrier_mode,
- ICacheFlushMode icache_flush_mode) {
- Assembler::set_target_address_at(isolate_, pc_, host_, target,
- icache_flush_mode);
- Assembler::set_target_address_at(isolate_, pc_, host_, target);
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
- IsCodeTarget(rmode_)) {
- Object* target_code = Code::GetCodeFromTargetAddress(target);
- host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
- host(), this, HeapObject::cast(target_code));
- }
-}
-
-void RelocInfo::update_wasm_memory_reference(
- Address old_base, Address new_base, size_t old_size, size_t new_size,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(IsWasmMemoryReference(rmode_));
- DCHECK(old_base <= wasm_memory_reference() &&
- wasm_memory_reference() < old_base + old_size);
- Address updated_reference = new_base + (wasm_memory_reference() - old_base);
- DCHECK(new_base <= updated_reference &&
- updated_reference < new_base + new_size);
- Memory::Address_at(pc_) = updated_reference;
- if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- Assembler::FlushICache(isolate_, pc_, sizeof(int32_t));
- }
-}
-
Object* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Memory::Object_at(pc_);
@@ -158,6 +124,7 @@ void RelocInfo::set_target_object(Object* target,
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
+ host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target));
}
@@ -284,7 +251,7 @@ void RelocInfo::WipeOut() {
}
}
-
+template <typename ObjectVisitor>
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
@@ -362,7 +329,6 @@ Immediate::Immediate(Handle<Object> handle) {
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
if (obj->IsHeapObject()) {
- DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
x_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
} else {
@@ -401,7 +367,6 @@ void Assembler::emit(Handle<Object> handle) {
AllowDeferredHandleDereference heap_object_check;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
- DCHECK(!isolate()->heap()->InNewSpace(obj));
if (obj->IsHeapObject()) {
emit(reinterpret_cast<intptr_t>(handle.location()),
RelocInfo::EMBEDDED_OBJECT);
diff --git a/deps/v8/src/x87/assembler-x87.cc b/deps/v8/src/x87/assembler-x87.cc
index e74d77030a..62b662f285 100644
--- a/deps/v8/src/x87/assembler-x87.cc
+++ b/deps/v8/src/x87/assembler-x87.cc
@@ -101,6 +101,30 @@ bool RelocInfo::IsInConstantPool() {
return false;
}
+Address RelocInfo::wasm_memory_reference() {
+ DCHECK(IsWasmMemoryReference(rmode_));
+ return Memory::Address_at(pc_);
+}
+
+Address RelocInfo::wasm_global_reference() {
+ DCHECK(IsWasmGlobalReference(rmode_));
+ return Memory::Address_at(pc_);
+}
+
+uint32_t RelocInfo::wasm_memory_size_reference() {
+ DCHECK(IsWasmMemorySizeReference(rmode_));
+ return Memory::uint32_at(pc_);
+}
+
+void RelocInfo::unchecked_update_wasm_memory_reference(
+ Address address, ICacheFlushMode flush_mode) {
+ Memory::Address_at(pc_) = address;
+}
+
+void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
+ ICacheFlushMode flush_mode) {
+ Memory::uint32_at(pc_) = size;
+}
// -----------------------------------------------------------------------------
// Implementation of Operand
@@ -186,17 +210,11 @@ Register Operand::reg() const {
#define EMIT(x) \
*pc_++ = (x)
-
-#ifdef GENERATED_CODE_COVERAGE
-static void InitCoverageLog();
-#endif
-
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
- : AssemblerBase(isolate, buffer, buffer_size),
- positions_recorder_(this) {
- // Clear the buffer in debug mode unless it was provided by the
- // caller in which case we can't be sure it's okay to overwrite
- // existing code in it; see CodePatcher::CodePatcher(...).
+ : AssemblerBase(isolate, buffer, buffer_size) {
+// Clear the buffer in debug mode unless it was provided by the
+// caller in which case we can't be sure it's okay to overwrite
+// existing code in it; see CodePatcher::CodePatcher(...).
#ifdef DEBUG
if (own_buffer_) {
memset(buffer_, 0xCC, buffer_size_); // int3
@@ -204,17 +222,12 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
#endif
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
-
-#ifdef GENERATED_CODE_COVERAGE
- InitCoverageLog();
-#endif
}
void Assembler::GetCode(CodeDesc* desc) {
// Finalize code (at this point overflow() may be true, but the gap ensures
// that we are still not overlapping instructions and relocation info).
- reloc_info_writer.Finish();
DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
// Set up code descriptor.
desc->buffer = buffer_;
@@ -223,6 +236,8 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
desc->origin = this;
desc->constant_pool_size = 0;
+ desc->unwinding_info_size = 0;
+ desc->unwinding_info = nullptr;
}
@@ -552,6 +567,45 @@ void Assembler::xchg(Register dst, const Operand& src) {
emit_operand(dst, src);
}
+void Assembler::xchg_b(Register reg, const Operand& op) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x86);
+ emit_operand(reg, op);
+}
+
+void Assembler::xchg_w(Register reg, const Operand& op) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x87);
+ emit_operand(reg, op);
+}
+
+void Assembler::lock() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF0);
+}
+
+void Assembler::cmpxchg(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xB1);
+ emit_operand(src, dst);
+}
+
+void Assembler::cmpxchg_b(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xB0);
+ emit_operand(src, dst);
+}
+
+void Assembler::cmpxchg_w(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xB1);
+ emit_operand(src, dst);
+}
void Assembler::adc(Register dst, int32_t imm32) {
EnsureSpace ensure_space(this);
@@ -658,14 +712,14 @@ void Assembler::cmpw(const Operand& op, Immediate imm16) {
void Assembler::cmpw(Register reg, const Operand& op) {
EnsureSpace ensure_space(this);
EMIT(0x66);
- EMIT(0x39);
+ EMIT(0x3B);
emit_operand(reg, op);
}
void Assembler::cmpw(const Operand& op, Register reg) {
EnsureSpace ensure_space(this);
EMIT(0x66);
- EMIT(0x3B);
+ EMIT(0x39);
emit_operand(reg, op);
}
@@ -1339,7 +1393,6 @@ void Assembler::bind(Label* L) {
void Assembler::call(Label* L) {
- positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
if (L->is_bound()) {
const int long_size = 5;
@@ -1357,7 +1410,6 @@ void Assembler::call(Label* L) {
void Assembler::call(byte* entry, RelocInfo::Mode rmode) {
- positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
DCHECK(!RelocInfo::IsCodeTarget(rmode));
EMIT(0xE8);
@@ -1376,7 +1428,6 @@ int Assembler::CallSize(const Operand& adr) {
void Assembler::call(const Operand& adr) {
- positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
EMIT(0xFF);
emit_operand(edx, adr);
@@ -1391,7 +1442,6 @@ int Assembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
void Assembler::call(Handle<Code> code,
RelocInfo::Mode rmode,
TypeFeedbackId ast_id) {
- positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
DCHECK(RelocInfo::IsCodeTarget(rmode)
|| rmode == RelocInfo::CODE_AGE_SEQUENCE);
@@ -2149,32 +2199,6 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
reloc_info_writer.Write(&rinfo);
}
-
-#ifdef GENERATED_CODE_COVERAGE
-static FILE* coverage_log = NULL;
-
-
-static void InitCoverageLog() {
- char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
- if (file_name != NULL) {
- coverage_log = fopen(file_name, "aw+");
- }
-}
-
-
-void LogGeneratedCodeCoverage(const char* file_line) {
- const char* return_address = (&file_line)[-1];
- char* push_insn = const_cast<char*>(return_address - 12);
- push_insn[0] = 0xeb; // Relative branch insn.
- push_insn[1] = 13; // Skip over coverage insns.
- if (coverage_log != NULL) {
- fprintf(coverage_log, "%s\n", file_line);
- fflush(coverage_log);
- }
-}
-
-#endif
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/x87/assembler-x87.h b/deps/v8/src/x87/assembler-x87.h
index 96eced9624..4111e8df12 100644
--- a/deps/v8/src/x87/assembler-x87.h
+++ b/deps/v8/src/x87/assembler-x87.h
@@ -74,6 +74,9 @@ namespace internal {
V(stX_6) \
V(stX_7)
+#define FLOAT_REGISTERS DOUBLE_REGISTERS
+#define SIMD128_REGISTERS DOUBLE_REGISTERS
+
#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
V(stX_0) \
V(stX_1) \
@@ -120,8 +123,6 @@ struct Register {
Register r = {code};
return r;
}
- const char* ToString();
- bool IsAllocatable() const;
bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
bool is(Register reg) const { return reg_code == reg.reg_code; }
int code() const {
@@ -145,8 +146,9 @@ GENERAL_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
const Register no_reg = {Register::kCode_no_reg};
+static const bool kSimpleFPAliasing = true;
-struct DoubleRegister {
+struct X87Register {
enum Code {
#define REGISTER_CODE(R) kCode_##R,
DOUBLE_REGISTERS(REGISTER_CODE)
@@ -158,12 +160,11 @@ struct DoubleRegister {
static const int kMaxNumRegisters = Code::kAfterLast;
static const int kMaxNumAllocatableRegisters = 6;
- static DoubleRegister from_code(int code) {
- DoubleRegister result = {code};
+ static X87Register from_code(int code) {
+ X87Register result = {code};
return result;
}
- bool IsAllocatable() const;
bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
int code() const {
@@ -171,24 +172,24 @@ struct DoubleRegister {
return reg_code;
}
- bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
-
- const char* ToString();
+ bool is(X87Register reg) const { return reg_code == reg.reg_code; }
int reg_code;
};
+typedef X87Register FloatRegister;
+
+typedef X87Register DoubleRegister;
+
+// TODO(x87) Define SIMD registers.
+typedef X87Register Simd128Register;
+
#define DECLARE_REGISTER(R) \
const DoubleRegister R = {DoubleRegister::kCode_##R};
DOUBLE_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
const DoubleRegister no_double_reg = {DoubleRegister::kCode_no_reg};
-typedef DoubleRegister X87Register;
-
-// TODO(x87) Define SIMD registers.
-typedef DoubleRegister Simd128Register;
-
enum Condition {
// any value < 0 is considered no_condition
no_condition = -1,
@@ -648,6 +649,16 @@ class Assembler : public AssemblerBase {
// Exchange
void xchg(Register dst, Register src);
void xchg(Register dst, const Operand& src);
+ void xchg_b(Register reg, const Operand& op);
+ void xchg_w(Register reg, const Operand& op);
+
+ // Lock prefix
+ void lock();
+
+ // CompareExchange
+ void cmpxchg(const Operand& dst, Register src);
+ void cmpxchg_b(const Operand& dst, Register src);
+ void cmpxchg_w(const Operand& dst, Register src);
// Arithmetics
void adc(Register dst, int32_t imm32);
@@ -958,7 +969,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, int raw_position);
+ void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
@@ -980,10 +991,6 @@ class Assembler : public AssemblerBase {
static bool IsNop(Address addr);
- AssemblerPositionsRecorder* positions_recorder() {
- return &positions_recorder_;
- }
-
int relocation_writer_size() {
return (buffer_ + buffer_size_) - reloc_info_writer.pos();
}
@@ -1069,9 +1076,6 @@ class Assembler : public AssemblerBase {
// code generation
RelocInfoWriter reloc_info_writer;
-
- AssemblerPositionsRecorder positions_recorder_;
- friend class AssemblerPositionsRecorder;
};
diff --git a/deps/v8/src/x87/code-stubs-x87.cc b/deps/v8/src/x87/code-stubs-x87.cc
index 71adfd3531..02de67afbc 100644
--- a/deps/v8/src/x87/code-stubs-x87.cc
+++ b/deps/v8/src/x87/code-stubs-x87.cc
@@ -22,67 +22,16 @@
namespace v8 {
namespace internal {
+#define __ ACCESS_MASM(masm)
-static void InitializeArrayConstructorDescriptor(
- Isolate* isolate, CodeStubDescriptor* descriptor,
- int constant_stack_parameter_count) {
- // register state
- // eax -- number of arguments
- // edi -- function
- // ebx -- allocation site with elements kind
- Address deopt_handler = Runtime::FunctionForId(
- Runtime::kArrayConstructor)->entry;
-
- if (constant_stack_parameter_count == 0) {
- descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- } else {
- descriptor->Initialize(eax, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- }
-}
-
-
-static void InitializeInternalArrayConstructorDescriptor(
- Isolate* isolate, CodeStubDescriptor* descriptor,
- int constant_stack_parameter_count) {
- // register state
- // eax -- number of arguments
- // edi -- constructor function
- Address deopt_handler = Runtime::FunctionForId(
- Runtime::kInternalArrayConstructor)->entry;
-
- if (constant_stack_parameter_count == 0) {
- descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- } else {
- descriptor->Initialize(eax, deopt_handler, constant_stack_parameter_count,
- JS_FUNCTION_STUB_MODE);
- }
-}
-
-
-void ArrayNoArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
-}
-
-
-void ArraySingleArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
-}
-
-
-void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
+void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
+ __ pop(ecx);
+ __ mov(MemOperand(esp, eax, times_4, 0), edi);
+ __ push(edi);
+ __ push(ebx);
+ __ push(ecx);
+ __ add(eax, Immediate(3));
+ __ TailCallRuntime(Runtime::kNewArray);
}
void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
@@ -90,21 +39,12 @@ void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize(eax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
}
-void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
-}
-
-
-void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
+void FastFunctionBindStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
- InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
+ Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
+ descriptor->Initialize(eax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
}
-
-#define __ ACCESS_MASM(masm)
-
-
void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
ExternalReference miss) {
// Update the static counter each time a new code stub is generated.
@@ -330,26 +270,26 @@ void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
void MathPowStub::Generate(MacroAssembler* masm) {
- const Register base = edx;
const Register scratch = ecx;
- Label call_runtime;
-
- // We will call runtime helper function directly.
- if (exponent_type() == ON_STACK) {
- // The arguments are still on the stack.
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMathPowRT);
-
- // The stub is called from non-optimized code, which expects the result
- // as heap number in exponent.
- __ AllocateHeapNumber(eax, scratch, base, &call_runtime);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(2 * kPointerSize);
- } else {
- // Currently it's only called from full-compiler and exponent type is
- // ON_STACK.
- UNIMPLEMENTED();
+
+ // Load the double_exponent into x87 FPU
+ __ fld_d(Operand(esp, 0 * kDoubleSize + 4));
+ // Load the double_base into x87 FPU
+ __ fld_d(Operand(esp, 1 * kDoubleSize + 4));
+
+ // Call ieee754 runtime directly.
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(4, scratch);
+ // Put the double_base parameter in call stack
+ __ fstp_d(Operand(esp, 0 * kDoubleSize));
+ // Put the double_exponent parameter in call stack
+ __ fstp_d(Operand(esp, 1 * kDoubleSize));
+ __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
+ 4);
}
+ // Return value is in st(0) on ia32.
+ __ ret(0);
}
@@ -392,7 +332,6 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
&miss, // When not a string.
&miss, // When not a number.
&miss, // When index out of range.
- STRING_INDEX_IS_ARRAY_INDEX,
RECEIVER_IS_STRING);
char_at_generator.GenerateFast(masm);
__ ret(0);
@@ -692,13 +631,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ add(edx, Immediate(2)); // edx was a smi.
// edx: Number of capture registers
- // Load last_match_info which is still known to be a fast case JSArray.
- // Check that the fourth object is a JSArray object.
+ // Load last_match_info which is still known to be a fast-elements JSObject.
+ // Check that the fourth object is a JSObject.
__ mov(eax, Operand(esp, kLastMatchInfoOffset));
__ JumpIfSmi(eax, &runtime);
- __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
+ __ CmpObjectType(eax, JS_OBJECT_TYPE, ebx);
__ j(not_equal, &runtime);
- // Check that the JSArray is in fast case.
+ // Check that the object has fast elements.
__ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
__ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
__ cmp(eax, factory->fixed_array_map());
@@ -1167,9 +1106,11 @@ static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
__ push(edi);
__ push(edx);
__ push(ebx);
+ __ push(esi);
__ CallStub(stub);
+ __ pop(esi);
__ pop(ebx);
__ pop(edx);
__ pop(edi);
@@ -1189,6 +1130,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// edi : the function to call
Isolate* isolate = masm->isolate();
Label initialize, done, miss, megamorphic, not_array_function;
+ Label done_increment_count, done_initialize_count;
// Load the cache state into ecx.
__ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
@@ -1201,7 +1143,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// type-feedback-vector.h).
Label check_allocation_site;
__ cmp(edi, FieldOperand(ecx, WeakCell::kValueOffset));
- __ j(equal, &done, Label::kFar);
+ __ j(equal, &done_increment_count, Label::kFar);
__ CompareRoot(ecx, Heap::kmegamorphic_symbolRootIndex);
__ j(equal, &done, Label::kFar);
__ CompareRoot(FieldOperand(ecx, HeapObject::kMapOffset),
@@ -1224,7 +1166,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
__ cmp(edi, ecx);
__ j(not_equal, &megamorphic);
- __ jmp(&done, Label::kFar);
+ __ jmp(&done_increment_count, Label::kFar);
__ bind(&miss);
@@ -1253,11 +1195,25 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// slot.
CreateAllocationSiteStub create_stub(isolate);
CallStubInRecordCallTarget(masm, &create_stub);
- __ jmp(&done);
+ __ jmp(&done_initialize_count);
__ bind(&not_array_function);
CreateWeakCellStub weak_cell_stub(isolate);
CallStubInRecordCallTarget(masm, &weak_cell_stub);
+ __ bind(&done_initialize_count);
+
+ // Initialize the call counter.
+ __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize),
+ Immediate(Smi::FromInt(1)));
+ __ jmp(&done);
+
+ __ bind(&done_increment_count);
+ // Increment the call count for monomorphic function calls.
+ __ add(FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize),
+ Immediate(Smi::FromInt(1)));
+
__ bind(&done);
}
@@ -1321,7 +1277,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// Increment the call count for monomorphic function calls.
__ add(FieldOperand(ebx, edx, times_half_pointer_size,
FixedArray::kHeaderSize + kPointerSize),
- Immediate(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+ Immediate(Smi::FromInt(1)));
__ mov(ebx, ecx);
__ mov(edx, edi);
@@ -1369,7 +1325,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Increment the call count for monomorphic function calls.
__ add(FieldOperand(ebx, edx, times_half_pointer_size,
FixedArray::kHeaderSize + kPointerSize),
- Immediate(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+ Immediate(Smi::FromInt(1)));
__ bind(&call_function);
__ Set(eax, argc);
@@ -1440,7 +1396,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Initialize the call counter.
__ mov(FieldOperand(ebx, edx, times_half_pointer_size,
FixedArray::kHeaderSize + kPointerSize),
- Immediate(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+ Immediate(Smi::FromInt(1)));
// Store the function. Use a stub since we need a frame for allocation.
// ebx - vector
@@ -1450,7 +1406,9 @@ void CallICStub::Generate(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
CreateWeakCellStub create_stub(isolate);
__ push(edi);
+ __ push(esi);
__ CallStub(&create_stub);
+ __ pop(esi);
__ pop(edi);
}
@@ -1494,13 +1452,12 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
// It is important that the store buffer overflow stubs are generated first.
- ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+ CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
- TypeofStub::GenerateAheadOfTime(isolate);
}
@@ -1544,13 +1501,16 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Enter the exit frame that transitions from JavaScript to C++.
if (argv_in_register()) {
DCHECK(!save_doubles());
+ DCHECK(!is_builtin_exit());
__ EnterApiExitFrame(arg_stack_space);
// Move argc and argv into the correct registers.
__ mov(esi, ecx);
__ mov(edi, eax);
} else {
- __ EnterExitFrame(arg_stack_space, save_doubles());
+ __ EnterExitFrame(
+ arg_stack_space, save_doubles(),
+ is_builtin_exit() ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
}
// ebx: pointer to C function (C callee-saved)
@@ -1731,10 +1691,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ bind(&invoke);
__ PushStackHandler();
- // Clear any pending exceptions.
- __ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
- __ mov(Operand::StaticVariable(pending_exception), edx);
-
// Fake a receiver (NULL).
__ push(Immediate(0)); // receiver
@@ -1781,129 +1737,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
}
-void InstanceOfStub::Generate(MacroAssembler* masm) {
- Register const object = edx; // Object (lhs).
- Register const function = eax; // Function (rhs).
- Register const object_map = ecx; // Map of {object}.
- Register const function_map = ebx; // Map of {function}.
- Register const function_prototype = function_map; // Prototype of {function}.
- Register const scratch = edi;
-
- DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
- DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
-
- // Check if {object} is a smi.
- Label object_is_smi;
- __ JumpIfSmi(object, &object_is_smi, Label::kNear);
-
- // Lookup the {function} and the {object} map in the global instanceof cache.
- // Note: This is safe because we clear the global instanceof cache whenever
- // we change the prototype of any object.
- Label fast_case, slow_case;
- __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
- __ CompareRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
- __ j(not_equal, &fast_case, Label::kNear);
- __ CompareRoot(object_map, scratch, Heap::kInstanceofCacheMapRootIndex);
- __ j(not_equal, &fast_case, Label::kNear);
- __ LoadRoot(eax, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(0);
-
- // If {object} is a smi we can safely return false if {function} is a JS
- // function, otherwise we have to miss to the runtime and throw an exception.
- __ bind(&object_is_smi);
- __ JumpIfSmi(function, &slow_case);
- __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
- __ j(not_equal, &slow_case);
- __ LoadRoot(eax, Heap::kFalseValueRootIndex);
- __ ret(0);
-
- // Fast-case: The {function} must be a valid JSFunction.
- __ bind(&fast_case);
- __ JumpIfSmi(function, &slow_case);
- __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
- __ j(not_equal, &slow_case);
-
- // Go to the runtime if the function is not a constructor.
- __ test_b(FieldOperand(function_map, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsConstructor));
- __ j(zero, &slow_case);
-
- // Ensure that {function} has an instance prototype.
- __ test_b(FieldOperand(function_map, Map::kBitFieldOffset),
- Immediate(1 << Map::kHasNonInstancePrototype));
- __ j(not_zero, &slow_case);
-
- // Get the "prototype" (or initial map) of the {function}.
- __ mov(function_prototype,
- FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
- __ AssertNotSmi(function_prototype);
-
- // Resolve the prototype if the {function} has an initial map. Afterwards the
- // {function_prototype} will be either the JSReceiver prototype object or the
- // hole value, which means that no instances of the {function} were created so
- // far and hence we should return false.
- Label function_prototype_valid;
- Register const function_prototype_map = scratch;
- __ CmpObjectType(function_prototype, MAP_TYPE, function_prototype_map);
- __ j(not_equal, &function_prototype_valid, Label::kNear);
- __ mov(function_prototype,
- FieldOperand(function_prototype, Map::kPrototypeOffset));
- __ bind(&function_prototype_valid);
- __ AssertNotSmi(function_prototype);
-
- // Update the global instanceof cache with the current {object} map and
- // {function}. The cached answer will be set when it is known below.
- __ StoreRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(object_map, scratch, Heap::kInstanceofCacheMapRootIndex);
-
- // Loop through the prototype chain looking for the {function} prototype.
- // Assume true, and change to false if not found.
- Label done, loop, fast_runtime_fallback;
- __ mov(eax, isolate()->factory()->true_value());
- __ bind(&loop);
-
- // Check if the object needs to be access checked.
- __ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
- __ j(not_zero, &fast_runtime_fallback, Label::kNear);
- // Check if the current object is a Proxy.
- __ CmpInstanceType(object_map, JS_PROXY_TYPE);
- __ j(equal, &fast_runtime_fallback, Label::kNear);
-
- __ mov(object, FieldOperand(object_map, Map::kPrototypeOffset));
- __ cmp(object, function_prototype);
- __ j(equal, &done, Label::kNear);
- __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
- __ cmp(object, isolate()->factory()->null_value());
- __ j(not_equal, &loop);
- __ mov(eax, isolate()->factory()->false_value());
-
- __ bind(&done);
- __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret(0);
-
- // Found Proxy or access check needed: Call the runtime.
- __ bind(&fast_runtime_fallback);
- __ PopReturnAddressTo(scratch);
- __ Push(object);
- __ Push(function_prototype);
- __ PushReturnAddressFrom(scratch);
- // Invalidate the instanceof cache.
- __ Move(eax, Immediate(Smi::FromInt(0)));
- __ StoreRoot(eax, scratch, Heap::kInstanceofCacheFunctionRootIndex);
- __ TailCallRuntime(Runtime::kHasInPrototypeChain);
-
- // Slow-case: Call the %InstanceOf runtime function.
- __ bind(&slow_case);
- __ PopReturnAddressTo(scratch);
- __ Push(object);
- __ Push(function);
- __ PushReturnAddressFrom(scratch);
- __ TailCallRuntime(is_es6_instanceof() ? Runtime::kOrdinaryHasInstance
- : Runtime::kInstanceOf);
-}
-
-
// -------------------------------------------------------------------------
// StringCharCodeAtGenerator
@@ -1958,13 +1791,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
}
__ push(object_);
__ push(index_); // Consumed by runtime conversion function.
- if (index_flags_ == STRING_INDEX_IS_NUMBER) {
- __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
- } else {
- DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
- // NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi);
- }
+ __ CallRuntime(Runtime::kNumberToSmi);
if (!index_.is(eax)) {
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
@@ -2297,77 +2124,12 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// ecx: sub string length (smi)
// edx: from index (smi)
StringCharAtGenerator generator(eax, edx, ecx, eax, &runtime, &runtime,
- &runtime, STRING_INDEX_IS_NUMBER,
- RECEIVER_IS_STRING);
+ &runtime, RECEIVER_IS_STRING);
generator.GenerateFast(masm);
__ ret(3 * kPointerSize);
generator.SkipSlow(masm, &runtime);
}
-
-void ToNumberStub::Generate(MacroAssembler* masm) {
- // The ToNumber stub takes one argument in eax.
- Label not_smi;
- __ JumpIfNotSmi(eax, &not_smi, Label::kNear);
- __ Ret();
- __ bind(&not_smi);
-
- Label not_heap_number;
- __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, &not_heap_number, Label::kNear);
- __ Ret();
- __ bind(&not_heap_number);
-
- NonNumberToNumberStub stub(masm->isolate());
- __ TailCallStub(&stub);
-}
-
-void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
- // The NonNumberToNumber stub takes one argument in eax.
- __ AssertNotNumber(eax);
-
- Label not_string;
- __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edi);
- // eax: object
- // edi: object map
- __ j(above_equal, &not_string, Label::kNear);
- StringToNumberStub stub(masm->isolate());
- __ TailCallStub(&stub);
- __ bind(&not_string);
-
- Label not_oddball;
- __ CmpInstanceType(edi, ODDBALL_TYPE);
- __ j(not_equal, &not_oddball, Label::kNear);
- __ mov(eax, FieldOperand(eax, Oddball::kToNumberOffset));
- __ Ret();
- __ bind(&not_oddball);
-
- __ pop(ecx); // Pop return address.
- __ push(eax); // Push argument.
- __ push(ecx); // Push return address.
- __ TailCallRuntime(Runtime::kToNumber);
-}
-
-void StringToNumberStub::Generate(MacroAssembler* masm) {
- // The StringToNumber stub takes one argument in eax.
- __ AssertString(eax);
-
- // Check if string has a cached array index.
- Label runtime;
- __ test(FieldOperand(eax, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- __ j(not_zero, &runtime, Label::kNear);
- __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
- __ IndexFromHash(eax, eax);
- __ Ret();
-
- __ bind(&runtime);
- __ PopReturnAddressTo(ecx); // Pop return address.
- __ Push(eax); // Push argument.
- __ PushReturnAddressFrom(ecx); // Push return address.
- __ TailCallRuntime(Runtime::kStringToNumber);
-}
-
void ToStringStub::Generate(MacroAssembler* masm) {
// The ToString stub takes one argument in eax.
Label is_number;
@@ -2574,7 +2336,7 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// Load ecx with the allocation site. We stick an undefined dummy value here
// and replace it with the real allocation site later when we instantiate this
// stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
- __ mov(ecx, handle(isolate()->heap()->undefined_value()));
+ __ mov(ecx, isolate()->factory()->undefined_value());
// Make sure that we actually patched the allocation site.
if (FLAG_debug_code) {
@@ -3375,14 +3137,14 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- LoadICStub stub(isolate(), state());
+ LoadICStub stub(isolate());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- KeyedLoadICStub stub(isolate(), state());
+ KeyedLoadICStub stub(isolate());
stub.GenerateForTrampoline(masm);
}
@@ -3534,10 +3296,8 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ j(not_equal, &miss);
__ push(slot);
__ push(vector);
- Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::LOAD_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
- receiver, name, vector, scratch);
+ masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, name,
+ vector, scratch);
__ pop(vector);
__ pop(slot);
@@ -3603,27 +3363,21 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
KeyedLoadIC::GenerateMiss(masm);
}
-
-void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
- VectorStoreICStub stub(isolate(), state());
+void StoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
+ StoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
-
-void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
- VectorKeyedStoreICStub stub(isolate(), state());
+void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
+ __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
+ KeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
+void StoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-void VectorStoreICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-
-void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+void StoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
@@ -3661,7 +3415,7 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
// found, now call handler.
Register handler = feedback;
- DCHECK(handler.is(VectorStoreICDescriptor::ValueRegister()));
+ DCHECK(handler.is(StoreWithVectorDescriptor::ValueRegister()));
__ mov(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1)));
__ pop(vector);
__ pop(receiver);
@@ -3721,7 +3475,7 @@ static void HandleMonomorphicStoreCase(MacroAssembler* masm, Register receiver,
Register slot, Register weak_cell,
Label* miss) {
// The store ic value is on the stack.
- DCHECK(weak_cell.is(VectorStoreICDescriptor::ValueRegister()));
+ DCHECK(weak_cell.is(StoreWithVectorDescriptor::ValueRegister()));
ExternalReference virtual_register =
ExternalReference::virtual_handler_register(masm->isolate());
@@ -3759,13 +3513,12 @@ static void HandleMonomorphicStoreCase(MacroAssembler* masm, Register receiver,
__ jmp(Operand::StaticVariable(virtual_register));
}
-
-void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // edx
- Register key = VectorStoreICDescriptor::NameRegister(); // ecx
- Register value = VectorStoreICDescriptor::ValueRegister(); // eax
- Register vector = VectorStoreICDescriptor::VectorRegister(); // ebx
- Register slot = VectorStoreICDescriptor::SlotRegister(); // edi
+void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // edx
+ Register key = StoreWithVectorDescriptor::NameRegister(); // ecx
+ Register value = StoreWithVectorDescriptor::ValueRegister(); // eax
+ Register vector = StoreWithVectorDescriptor::VectorRegister(); // ebx
+ Register slot = StoreWithVectorDescriptor::SlotRegister(); // edi
Label miss;
__ push(value);
@@ -3795,10 +3548,8 @@ void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ pop(value);
__ push(slot);
__ push(vector);
- Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
- Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, code_flags,
- receiver, key, slot, no_reg);
+ masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, slot,
+ no_reg);
__ pop(vector);
__ pop(slot);
Label no_pop_miss;
@@ -3810,13 +3561,11 @@ void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
StoreIC::GenerateMiss(masm);
}
-
-void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
+void KeyedStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
-
-void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
+void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
@@ -3929,12 +3678,12 @@ static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
__ jmp(&compare_map);
}
-void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // edx
- Register key = VectorStoreICDescriptor::NameRegister(); // ecx
- Register value = VectorStoreICDescriptor::ValueRegister(); // eax
- Register vector = VectorStoreICDescriptor::VectorRegister(); // ebx
- Register slot = VectorStoreICDescriptor::SlotRegister(); // edi
+void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // edx
+ Register key = StoreWithVectorDescriptor::NameRegister(); // ecx
+ Register value = StoreWithVectorDescriptor::ValueRegister(); // eax
+ Register vector = StoreWithVectorDescriptor::VectorRegister(); // ebx
+ Register slot = StoreWithVectorDescriptor::SlotRegister(); // edi
Label miss;
__ push(value);
@@ -4147,17 +3896,14 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
}
}
-void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
isolate);
ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
isolate);
- ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
- isolate);
-}
+ ArrayNArgumentsConstructorStub stub(isolate);
+ stub.GetCode();
-void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
- Isolate* isolate) {
ElementsKind kinds[2] = {FAST_ELEMENTS, FAST_HOLEY_ELEMENTS};
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
@@ -4165,8 +3911,6 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
stubh1.GetCode();
InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
stubh2.GetCode();
- InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
- stubh3.GetCode();
}
}
@@ -4184,13 +3928,15 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
CreateArrayDispatchOneArgument(masm, mode);
__ bind(&not_one_case);
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ ArrayNArgumentsConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
} else if (argument_count() == NONE) {
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
} else if (argument_count() == ONE) {
CreateArrayDispatchOneArgument(masm, mode);
} else if (argument_count() == MORE_THAN_ONE) {
- CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+ ArrayNArgumentsConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
} else {
UNREACHABLE();
}
@@ -4300,7 +4046,7 @@ void InternalArrayConstructorStub::GenerateCase(MacroAssembler* masm,
__ TailCallStub(&stub1);
__ bind(&not_one_case);
- InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+ ArrayNArgumentsConstructorStub stubN(isolate());
__ TailCallStub(&stubN);
}
@@ -4386,16 +4132,16 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
__ bind(&done_allocate);
// Initialize the JSObject fields.
- __ mov(Operand(eax, JSObject::kMapOffset), ecx);
- __ mov(Operand(eax, JSObject::kPropertiesOffset),
+ __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
+ __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
masm->isolate()->factory()->empty_fixed_array());
- __ mov(Operand(eax, JSObject::kElementsOffset),
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset),
masm->isolate()->factory()->empty_fixed_array());
STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ lea(ebx, Operand(eax, JSObject::kHeaderSize));
+ __ lea(ebx, FieldOperand(eax, JSObject::kHeaderSize));
// ----------- S t a t e -------------
- // -- eax : result (untagged)
+ // -- eax : result (tagged)
// -- ebx : result fields (untagged)
// -- edi : result end (untagged)
// -- ecx : initial map
@@ -4413,10 +4159,6 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
// Initialize all in-object fields with undefined.
__ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
__ InitializeFieldsWithFiller(ebx, edi, edx);
-
- // Add the object tag to make the JSObject real.
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ inc(eax);
__ Ret();
}
__ bind(&slack_tracking);
@@ -4439,10 +4181,6 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
__ LoadRoot(edi, Heap::kOnePointerFillerMapRootIndex);
__ InitializeFieldsWithFiller(ebx, edx, edi);
- // Add the object tag to make the JSObject real.
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ inc(eax);
-
// Check if we can finalize the instance size.
Label finalize;
STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
@@ -4473,10 +4211,10 @@ void FastNewObjectStub::Generate(MacroAssembler* masm) {
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ Pop(ecx);
}
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ dec(eax);
__ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceSizeOffset));
__ lea(edi, Operand(eax, ebx, times_pointer_size, 0));
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ dec(edi);
__ jmp(&done_allocate);
// Fall back to %NewObject.
@@ -4497,19 +4235,19 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// -----------------------------------
__ AssertFunction(edi);
- // For Ignition we need to skip all possible handler/stub frames until
- // we reach the JavaScript frame for the function (similar to what the
- // runtime fallback implementation does). So make edx point to that
- // JavaScript frame.
- {
- Label loop, loop_entry;
- __ mov(edx, ebp);
- __ jmp(&loop_entry, Label::kNear);
- __ bind(&loop);
+ // Make edx point to the JavaScript frame.
+ __ mov(edx, ebp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
__ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
- __ bind(&loop_entry);
+ }
+ if (FLAG_debug_code) {
+ Label ok;
__ cmp(edi, Operand(edx, StandardFrameConstants::kFunctionOffset));
- __ j(not_equal, &loop);
+ __ j(equal, &ok);
+ __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
+ __ bind(&ok);
}
// Check if we have rest parameters (only possible if we have an
@@ -4539,7 +4277,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// Allocate an empty rest parameter array.
Label allocate, done_allocate;
- __ Allocate(JSArray::kSize, eax, edx, ecx, &allocate, TAG_OBJECT);
+ __ Allocate(JSArray::kSize, eax, edx, ecx, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Setup the rest parameter array in rax.
@@ -4581,7 +4319,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
Label allocate, done_allocate;
__ lea(ecx, Operand(eax, times_half_pointer_size,
JSArray::kSize + FixedArray::kHeaderSize));
- __ Allocate(ecx, edx, edi, no_reg, &allocate, TAG_OBJECT);
+ __ Allocate(ecx, edx, edi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Setup the elements array in edx.
@@ -4617,8 +4355,11 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
__ mov(eax, edi);
__ Ret();
- // Fall back to %AllocateInNewSpace.
+ // Fall back to %AllocateInNewSpace (if not too big).
+ Label too_big_for_new_space;
__ bind(&allocate);
+ __ cmp(ecx, Immediate(Page::kMaxRegularHeapObjectSize));
+ __ j(greater, &too_big_for_new_space);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(ecx);
@@ -4631,6 +4372,22 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
__ Pop(eax);
}
__ jmp(&done_allocate);
+
+ // Fall back to %NewRestParameter.
+ __ bind(&too_big_for_new_space);
+ __ PopReturnAddressTo(ecx);
+ // We reload the function from the caller frame due to register pressure
+ // within this stub. This is the slow path, hence reloading is preferable.
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ Push(Operand(edx, StandardFrameConstants::kFunctionOffset));
+ } else {
+ __ Push(Operand(ebp, StandardFrameConstants::kFunctionOffset));
+ }
+ __ PushReturnAddressFrom(ecx);
+ __ TailCallRuntime(Runtime::kNewRestParameter);
}
}
@@ -4643,35 +4400,50 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
// -----------------------------------
__ AssertFunction(edi);
+ // Make ecx point to the JavaScript frame.
+ __ mov(ecx, ebp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
+ __ mov(ecx, Operand(ecx, StandardFrameConstants::kCallerFPOffset));
+ }
+ if (FLAG_debug_code) {
+ Label ok;
+ __ cmp(edi, Operand(ecx, StandardFrameConstants::kFunctionOffset));
+ __ j(equal, &ok);
+ __ Abort(kInvalidFrameForFastNewSloppyArgumentsStub);
+ __ bind(&ok);
+ }
+
// TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx,
- FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
- __ lea(edx, Operand(ebp, ecx, times_half_pointer_size,
+ __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ebx,
+ FieldOperand(ebx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ lea(edx, Operand(ecx, ebx, times_half_pointer_size,
StandardFrameConstants::kCallerSPOffset));
- // ecx : number of parameters (tagged)
+ // ebx : number of parameters (tagged)
// edx : parameters pointer
// edi : function
+ // ecx : JavaScript frame pointer.
// esp[0] : return address
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
- __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(eax, Operand(ebx, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ mov(eax, Operand(ecx, StandardFrameConstants::kCallerFPOffset));
+ __ mov(eax, Operand(eax, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor_frame, Label::kNear);
// No adaptor, parameter count = argument count.
- __ mov(ebx, ecx);
- __ push(ecx);
+ __ mov(ecx, ebx);
+ __ push(ebx);
__ jmp(&try_allocate, Label::kNear);
// We have an adaptor frame. Patch the parameters pointer.
__ bind(&adaptor_frame);
- __ mov(ebx, ecx);
- __ push(ecx);
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ push(ebx);
+ __ mov(edx, Operand(ecx, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ lea(edx,
Operand(edx, ecx, times_2, StandardFrameConstants::kCallerSPOffset));
@@ -4705,7 +4477,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
__ add(ebx, Immediate(JSSloppyArgumentsObject::kSize));
// Do the allocation of all three objects in one go.
- __ Allocate(ebx, eax, edi, no_reg, &runtime, TAG_OBJECT);
+ __ Allocate(ebx, eax, edi, no_reg, &runtime, NO_ALLOCATION_FLAGS);
// eax = address of new object(s) (tagged)
// ecx = argument count (smi-tagged)
@@ -4883,19 +4655,19 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// -----------------------------------
__ AssertFunction(edi);
- // For Ignition we need to skip all possible handler/stub frames until
- // we reach the JavaScript frame for the function (similar to what the
- // runtime fallback implementation does). So make edx point to that
- // JavaScript frame.
- {
- Label loop, loop_entry;
- __ mov(edx, ebp);
- __ jmp(&loop_entry, Label::kNear);
- __ bind(&loop);
+ // Make edx point to the JavaScript frame.
+ __ mov(edx, ebp);
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
__ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
- __ bind(&loop_entry);
+ }
+ if (FLAG_debug_code) {
+ Label ok;
__ cmp(edi, Operand(edx, StandardFrameConstants::kFunctionOffset));
- __ j(not_equal, &loop);
+ __ j(equal, &ok);
+ __ Abort(kInvalidFrameForFastNewStrictArgumentsStub);
+ __ bind(&ok);
}
// Check if we have an arguments adaptor frame below the function frame.
@@ -4934,7 +4706,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ lea(ecx,
Operand(eax, times_half_pointer_size,
JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
- __ Allocate(ecx, edx, edi, no_reg, &allocate, TAG_OBJECT);
+ __ Allocate(ecx, edx, edi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Setup the elements array in edx.
@@ -4970,8 +4742,11 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ mov(eax, edi);
__ Ret();
- // Fall back to %AllocateInNewSpace.
+ // Fall back to %AllocateInNewSpace (if not too big).
+ Label too_big_for_new_space;
__ bind(&allocate);
+ __ cmp(ecx, Immediate(Page::kMaxRegularHeapObjectSize));
+ __ j(greater, &too_big_for_new_space);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(ecx);
@@ -4984,39 +4759,24 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ Pop(eax);
}
__ jmp(&done_allocate);
-}
-
-void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
- Register context_reg = esi;
- Register slot_reg = ebx;
- Register result_reg = eax;
- Label slow_case;
- // Go up context chain to the script context.
- for (int i = 0; i < depth(); ++i) {
- __ mov(result_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
- context_reg = result_reg;
+ // Fall back to %NewStrictArguments.
+ __ bind(&too_big_for_new_space);
+ __ PopReturnAddressTo(ecx);
+ // We reload the function from the caller frame due to register pressure
+ // within this stub. This is the slow path, hence reloading is preferable.
+ if (skip_stub_frame()) {
+ // For Ignition we need to skip the handler/stub frame to reach the
+ // JavaScript frame for the function.
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ Push(Operand(edx, StandardFrameConstants::kFunctionOffset));
+ } else {
+ __ Push(Operand(ebp, StandardFrameConstants::kFunctionOffset));
}
-
- // Load the PropertyCell value at the specified slot.
- __ mov(result_reg, ContextOperand(context_reg, slot_reg));
- __ mov(result_reg, FieldOperand(result_reg, PropertyCell::kValueOffset));
-
- // Check that value is not the_hole.
- __ CompareRoot(result_reg, Heap::kTheHoleValueRootIndex);
- __ j(equal, &slow_case, Label::kNear);
- __ Ret();
-
- // Fallback to the runtime.
- __ bind(&slow_case);
- __ SmiTag(slot_reg);
- __ Pop(result_reg); // Pop return address.
- __ Push(slot_reg);
- __ Push(result_reg); // Push return address.
- __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
+ __ PushReturnAddressFrom(ecx);
+ __ TailCallRuntime(Runtime::kNewStrictArguments);
}
-
void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
Register context_reg = esi;
Register slot_reg = ebx;
@@ -5354,9 +5114,14 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
STATIC_ASSERT(FCA::kIsolateIndex == 1);
STATIC_ASSERT(FCA::kHolderIndex == 0);
- STATIC_ASSERT(FCA::kArgsLength == 7);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 7);
+ STATIC_ASSERT(FCA::kArgsLength == 8);
__ pop(return_address);
+
+ // new target
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+
// context save.
__ push(context);
@@ -5401,7 +5166,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
- const int kApiStackSpace = 4;
+ const int kApiStackSpace = 3;
PrepareCallApiFunction(masm, kApiArgc + kApiStackSpace);
@@ -5412,8 +5177,6 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
__ mov(ApiParameterOperand(3), scratch);
// FunctionCallbackInfo::length_.
__ Move(ApiParameterOperand(4), Immediate(argc()));
- // FunctionCallbackInfo::is_construct_call_.
- __ Move(ApiParameterOperand(5), Immediate(0));
// v8::InvocationCallback's argument.
__ lea(scratch, ApiParameterOperand(2));
@@ -5433,8 +5196,8 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
}
Operand return_value_operand(ebp, return_value_offset * kPointerSize);
int stack_space = 0;
- Operand is_construct_call_operand = ApiParameterOperand(5);
- Operand* stack_space_operand = &is_construct_call_operand;
+ Operand length_operand = ApiParameterOperand(4);
+ Operand* stack_space_operand = &length_operand;
stack_space = argc() + FCA::kArgsLength + 1;
stack_space_operand = nullptr;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
@@ -5445,14 +5208,34 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
void CallApiGetterStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- esp[0] : return address
- // -- esp[4] : name
- // -- esp[8 .. (8 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_
- // -- ...
- // -- edx : api_function_address
- // -----------------------------------
- DCHECK(edx.is(ApiGetterDescriptor::function_address()));
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+ Register receiver = ApiGetterDescriptor::ReceiverRegister();
+ Register holder = ApiGetterDescriptor::HolderRegister();
+ Register callback = ApiGetterDescriptor::CallbackRegister();
+ Register scratch = ebx;
+ DCHECK(!AreAliased(receiver, holder, callback, scratch));
+
+ __ pop(scratch); // Pop return address to extend the frame.
+ __ push(receiver);
+ __ push(FieldOperand(callback, AccessorInfo::kDataOffset));
+ __ PushRoot(Heap::kUndefinedValueRootIndex); // ReturnValue
+ // ReturnValue default value
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ push(Immediate(ExternalReference::isolate_address(isolate())));
+ __ push(holder);
+ __ push(Immediate(Smi::FromInt(0))); // should_throw_on_error -> false
+ __ push(FieldOperand(callback, AccessorInfo::kNameOffset));
+ __ push(scratch); // Restore return address.
// v8::PropertyCallbackInfo::args_ array and name handle.
const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
@@ -5462,9 +5245,6 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// active) in non-GCed stack space.
const int kApiArgc = 3 + 1;
- Register api_function_address = edx;
- Register scratch = ebx;
-
// Load address of v8::PropertyAccessorInfo::args_ array.
__ lea(scratch, Operand(esp, 2 * kPointerSize));
@@ -5474,25 +5254,30 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
Operand info_object = ApiParameterOperand(3);
__ mov(info_object, scratch);
+ // Name as handle.
__ sub(scratch, Immediate(kPointerSize));
- __ mov(ApiParameterOperand(0), scratch); // name.
+ __ mov(ApiParameterOperand(0), scratch);
+ // Arguments pointer.
__ lea(scratch, info_object);
- __ mov(ApiParameterOperand(1), scratch); // arguments pointer.
+ __ mov(ApiParameterOperand(1), scratch);
// Reserve space for optional callback address parameter.
Operand thunk_last_arg = ApiParameterOperand(2);
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
+ __ mov(scratch, FieldOperand(callback, AccessorInfo::kJsGetterOffset));
+ Register function_address = edx;
+ __ mov(function_address,
+ FieldOperand(scratch, Foreign::kForeignAddressOffset));
// +3 is to skip prolog, return address and name handle.
Operand return_value_operand(
ebp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
- CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
- thunk_last_arg, kStackUnwindSpace, nullptr,
- return_value_operand, NULL);
+ CallApiFunctionAndReturn(masm, function_address, thunk_ref, thunk_last_arg,
+ kStackUnwindSpace, nullptr, return_value_operand,
+ NULL);
}
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/x87/code-stubs-x87.h b/deps/v8/src/x87/code-stubs-x87.h
index 39a4603626..6290cfed1c 100644
--- a/deps/v8/src/x87/code-stubs-x87.h
+++ b/deps/v8/src/x87/code-stubs-x87.h
@@ -298,8 +298,8 @@ class RecordWriteStub: public PlatformCodeStub {
Register r2,
Register r3) {
for (int i = 0; i < Register::kNumRegisters; i++) {
- Register candidate = Register::from_code(i);
- if (candidate.IsAllocatable()) {
+ if (RegisterConfiguration::Crankshaft()->IsAllocatableGeneralCode(i)) {
+ Register candidate = Register::from_code(i);
if (candidate.is(ecx)) continue;
if (candidate.is(r1)) continue;
if (candidate.is(r2)) continue;
diff --git a/deps/v8/src/x87/codegen-x87.cc b/deps/v8/src/x87/codegen-x87.cc
index 776edeb646..5cda23dcea 100644
--- a/deps/v8/src/x87/codegen-x87.cc
+++ b/deps/v8/src/x87/codegen-x87.cc
@@ -33,10 +33,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ masm.
-UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
- return nullptr;
-}
-
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
size_t actual_size;
@@ -269,14 +265,14 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ push(eax);
__ push(ebx);
+ __ push(esi);
__ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));
// Allocate new FixedDoubleArray.
// edx: receiver
// edi: length of source FixedArray (smi-tagged)
- AllocationFlags flags =
- static_cast<AllocationFlags>(TAG_OBJECT | DOUBLE_ALIGNMENT);
+ AllocationFlags flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT);
__ Allocate(FixedDoubleArray::kHeaderSize, times_8, edi,
REGISTER_VALUE_IS_SMI, eax, ebx, no_reg, &gc_required, flags);
@@ -302,8 +298,9 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Call into runtime if GC is required.
__ bind(&gc_required);
+
// Restore registers before jumping into runtime.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ pop(esi);
__ pop(ebx);
__ pop(eax);
__ jmp(fail);
@@ -339,12 +336,11 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ sub(edi, Immediate(Smi::FromInt(1)));
__ j(not_sign, &loop);
+ // Restore registers.
+ __ pop(esi);
__ pop(ebx);
__ pop(eax);
- // Restore esi.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-
__ bind(&only_change_map);
// eax: value
// ebx: target map
@@ -391,7 +387,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Allocate new FixedArray.
// ebx: length of source FixedDoubleArray (smi-tagged)
__ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
- __ Allocate(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT);
+ __ Allocate(edi, eax, esi, no_reg, &gc_required, NO_ALLOCATION_FLAGS);
// eax: destination FixedArray
// ebx: number of elements
diff --git a/deps/v8/src/x87/codegen-x87.h b/deps/v8/src/x87/codegen-x87.h
index 170b40397a..f034a9c2fa 100644
--- a/deps/v8/src/x87/codegen-x87.h
+++ b/deps/v8/src/x87/codegen-x87.h
@@ -5,7 +5,6 @@
#ifndef V8_X87_CODEGEN_X87_H_
#define V8_X87_CODEGEN_X87_H_
-#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {
diff --git a/deps/v8/src/x87/deoptimizer-x87.cc b/deps/v8/src/x87/deoptimizer-x87.cc
index 9d4645e782..8df66bcfac 100644
--- a/deps/v8/src/x87/deoptimizer-x87.cc
+++ b/deps/v8/src/x87/deoptimizer-x87.cc
@@ -35,6 +35,7 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
int pc_offset = deopt_data->Pc(i)->value();
if (pc_offset == -1) continue;
+ pc_offset = pc_offset + 1; // We will encode the pc offset after the call.
DCHECK_GE(pc_offset, prev_pc_offset);
int pc_delta = pc_offset - prev_pc_offset;
// We use RUNTIME_ENTRY reloc info which has a size of 2 bytes
@@ -277,8 +278,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
int double_regs_offset = FrameDescription::double_registers_offset();
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
// Fill in the double input registers.
for (int i = 0; i < X87Register::kMaxNumAllocatableRegisters; ++i) {
int code = config->GetAllocatableDoubleCode(i);
diff --git a/deps/v8/src/x87/disasm-x87.cc b/deps/v8/src/x87/disasm-x87.cc
index 91ce2272e9..657dc7be24 100644
--- a/deps/v8/src/x87/disasm-x87.cc
+++ b/deps/v8/src/x87/disasm-x87.cc
@@ -8,6 +8,7 @@
#if V8_TARGET_ARCH_X87
+#include "src/base/compiler-specific.h"
#include "src/disasm.h"
namespace disasm {
@@ -29,18 +30,19 @@ struct ByteMnemonic {
};
static const ByteMnemonic two_operands_instr[] = {
- {0x01, "add", OPER_REG_OP_ORDER}, {0x03, "add", REG_OPER_OP_ORDER},
- {0x09, "or", OPER_REG_OP_ORDER}, {0x0B, "or", REG_OPER_OP_ORDER},
- {0x13, "adc", REG_OPER_OP_ORDER}, {0x1B, "sbb", REG_OPER_OP_ORDER},
- {0x21, "and", OPER_REG_OP_ORDER}, {0x23, "and", REG_OPER_OP_ORDER},
- {0x29, "sub", OPER_REG_OP_ORDER}, {0x2A, "subb", REG_OPER_OP_ORDER},
- {0x2B, "sub", REG_OPER_OP_ORDER}, {0x31, "xor", OPER_REG_OP_ORDER},
- {0x33, "xor", REG_OPER_OP_ORDER}, {0x38, "cmpb", OPER_REG_OP_ORDER},
- {0x39, "cmp", OPER_REG_OP_ORDER}, {0x3A, "cmpb", REG_OPER_OP_ORDER},
- {0x3B, "cmp", REG_OPER_OP_ORDER}, {0x84, "test_b", REG_OPER_OP_ORDER},
- {0x85, "test", REG_OPER_OP_ORDER}, {0x87, "xchg", REG_OPER_OP_ORDER},
- {0x8A, "mov_b", REG_OPER_OP_ORDER}, {0x8B, "mov", REG_OPER_OP_ORDER},
- {0x8D, "lea", REG_OPER_OP_ORDER}, {-1, "", UNSET_OP_ORDER}};
+ {0x01, "add", OPER_REG_OP_ORDER}, {0x03, "add", REG_OPER_OP_ORDER},
+ {0x09, "or", OPER_REG_OP_ORDER}, {0x0B, "or", REG_OPER_OP_ORDER},
+ {0x13, "adc", REG_OPER_OP_ORDER}, {0x1B, "sbb", REG_OPER_OP_ORDER},
+ {0x21, "and", OPER_REG_OP_ORDER}, {0x23, "and", REG_OPER_OP_ORDER},
+ {0x29, "sub", OPER_REG_OP_ORDER}, {0x2A, "subb", REG_OPER_OP_ORDER},
+ {0x2B, "sub", REG_OPER_OP_ORDER}, {0x31, "xor", OPER_REG_OP_ORDER},
+ {0x33, "xor", REG_OPER_OP_ORDER}, {0x38, "cmpb", OPER_REG_OP_ORDER},
+ {0x39, "cmp", OPER_REG_OP_ORDER}, {0x3A, "cmpb", REG_OPER_OP_ORDER},
+ {0x3B, "cmp", REG_OPER_OP_ORDER}, {0x84, "test_b", REG_OPER_OP_ORDER},
+ {0x85, "test", REG_OPER_OP_ORDER}, {0x86, "xchg_b", REG_OPER_OP_ORDER},
+ {0x87, "xchg", REG_OPER_OP_ORDER}, {0x8A, "mov_b", REG_OPER_OP_ORDER},
+ {0x8B, "mov", REG_OPER_OP_ORDER}, {0x8D, "lea", REG_OPER_OP_ORDER},
+ {-1, "", UNSET_OP_ORDER}};
static const ByteMnemonic zero_operands_instr[] = {
{0xC3, "ret", UNSET_OP_ORDER},
@@ -325,8 +327,7 @@ class DisassemblerX87 {
int FPUInstruction(byte* data);
int MemoryFPUInstruction(int escape_opcode, int regop, byte* modrm_start);
int RegisterFPUInstruction(int escape_opcode, byte modrm_byte);
- void AppendToBuffer(const char* format, ...);
-
+ PRINTF_FORMAT(2, 3) void AppendToBuffer(const char* format, ...);
void UnimplementedInstruction() {
if (abort_on_unimplemented_) {
@@ -919,6 +920,10 @@ static const char* F0Mnem(byte f0byte) {
return "shrd"; // 3-operand version.
case 0xAB:
return "bts";
+ case 0xB0:
+ return "cmpxchg_b";
+ case 0xB1:
+ return "cmpxchg";
case 0xBC:
return "bsf";
case 0xBD:
@@ -942,13 +947,17 @@ int DisassemblerX87::InstructionDecode(v8::internal::Vector<char> out_buffer,
} else if (*data == 0x2E /*cs*/) {
branch_hint = "predicted not taken";
data++;
+ } else if (*data == 0xF0 /*lock*/) {
+ AppendToBuffer("lock ");
+ data++;
}
+
bool processed = true; // Will be set to false if the current instruction
// is not in 'instructions' table.
const InstructionDesc& idesc = instruction_table_->Get(*data);
switch (idesc.type) {
case ZERO_OPERANDS_INSTR:
- AppendToBuffer(idesc.mnem);
+ AppendToBuffer("%s", idesc.mnem);
data++;
break;
@@ -1161,6 +1170,24 @@ int DisassemblerX87::InstructionDecode(v8::internal::Vector<char> out_buffer,
} else {
AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
}
+ } else if (f0byte == 0xB0) {
+ // cmpxchg_b
+ data += 2;
+ AppendToBuffer("%s ", f0mnem);
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfByteCPURegister(regop));
+ } else if (f0byte == 0xB1) {
+ // cmpxchg
+ data += 2;
+ data += PrintOperands(f0mnem, OPER_REG_OP_ORDER, data);
+ } else if (f0byte == 0xBC) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("%s %s,", f0mnem, NameOfCPURegister(regop));
+ data += PrintRightOperand(data);
} else if (f0byte == 0xBD) {
data += 2;
int mod, regop, rm;
@@ -1262,11 +1289,25 @@ int DisassemblerX87::InstructionDecode(v8::internal::Vector<char> out_buffer,
while (*data == 0x66) data++;
if (*data == 0xf && data[1] == 0x1f) {
AppendToBuffer("nop"); // 0x66 prefix
- } else if (*data == 0x90) {
- AppendToBuffer("nop"); // 0x66 prefix
- } else if (*data == 0x8B) {
+ } else if (*data == 0x39) {
data++;
- data += PrintOperands("mov_w", REG_OPER_OP_ORDER, data);
+ data += PrintOperands("cmpw", OPER_REG_OP_ORDER, data);
+ } else if (*data == 0x3B) {
+ data++;
+ data += PrintOperands("cmpw", REG_OPER_OP_ORDER, data);
+ } else if (*data == 0x81) {
+ data++;
+ AppendToBuffer("cmpw ");
+ data += PrintRightOperand(data);
+ int imm = *reinterpret_cast<int16_t*>(data);
+ AppendToBuffer(",0x%x", imm);
+ data += 2;
+ } else if (*data == 0x87) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("xchg_w %s,", NameOfCPURegister(regop));
+ data += PrintRightOperand(data);
} else if (*data == 0x89) {
data++;
int mod, regop, rm;
@@ -1274,6 +1315,11 @@ int DisassemblerX87::InstructionDecode(v8::internal::Vector<char> out_buffer,
AppendToBuffer("mov_w ");
data += PrintRightOperand(data);
AppendToBuffer(",%s", NameOfCPURegister(regop));
+ } else if (*data == 0x8B) {
+ data++;
+ data += PrintOperands("mov_w", REG_OPER_OP_ORDER, data);
+ } else if (*data == 0x90) {
+ AppendToBuffer("nop"); // 0x66 prefix
} else if (*data == 0xC7) {
data++;
AppendToBuffer("%s ", "mov_w");
@@ -1505,6 +1551,9 @@ int DisassemblerX87::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (*data == 0xB1) {
+ data++;
+ data += PrintOperands("cmpxchg_w", OPER_REG_OP_ORDER, data);
} else {
UnimplementedInstruction();
}
@@ -1744,7 +1793,7 @@ static const char* const xmm_regs[8] = {
const char* NameConverter::NameOfAddress(byte* addr) const {
- v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
+ v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
return tmp_buffer_.start();
}
@@ -1807,7 +1856,7 @@ int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
buffer[0] = '\0';
byte* prev_pc = pc;
pc += d.InstructionDecode(buffer, pc);
- fprintf(f, "%p", prev_pc);
+ fprintf(f, "%p", static_cast<void*>(prev_pc));
fprintf(f, " ");
for (byte* bp = prev_pc; bp < pc; bp++) {
diff --git a/deps/v8/src/x87/interface-descriptors-x87.cc b/deps/v8/src/x87/interface-descriptors-x87.cc
index e41d42cdf5..4ef88e87dc 100644
--- a/deps/v8/src/x87/interface-descriptors-x87.cc
+++ b/deps/v8/src/x87/interface-descriptors-x87.cc
@@ -11,6 +11,19 @@ namespace internal {
const Register CallInterfaceDescriptor::ContextRegister() { return esi; }
+void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
+ CallInterfaceDescriptorData* data, int register_parameter_count) {
+ const Register default_stub_registers[] = {eax, ebx, ecx, edx, edi};
+ CHECK_LE(static_cast<size_t>(register_parameter_count),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(register_parameter_count,
+ default_stub_registers);
+}
+
+const Register FastNewFunctionContextDescriptor::FunctionRegister() {
+ return edi;
+}
+const Register FastNewFunctionContextDescriptor::SlotsRegister() { return eax; }
const Register LoadDescriptor::ReceiverRegister() { return edx; }
const Register LoadDescriptor::NameRegister() { return ecx; }
@@ -22,13 +35,9 @@ const Register LoadWithVectorDescriptor::VectorRegister() { return ebx; }
const Register StoreDescriptor::ReceiverRegister() { return edx; }
const Register StoreDescriptor::NameRegister() { return ecx; }
const Register StoreDescriptor::ValueRegister() { return eax; }
+const Register StoreDescriptor::SlotRegister() { return edi; }
-
-const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return edi; }
-
-
-const Register VectorStoreICDescriptor::VectorRegister() { return ebx; }
-
+const Register StoreWithVectorDescriptor::VectorRegister() { return ebx; }
const Register VectorStoreTransitionDescriptor::SlotRegister() {
return no_reg;
@@ -44,23 +53,15 @@ const Register VectorStoreTransitionDescriptor::MapRegister() { return edi; }
const Register StoreTransitionDescriptor::MapRegister() { return ebx; }
-const Register LoadGlobalViaContextDescriptor::SlotRegister() { return ebx; }
-
-
const Register StoreGlobalViaContextDescriptor::SlotRegister() { return ebx; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return eax; }
-const Register InstanceOfDescriptor::LeftRegister() { return edx; }
-const Register InstanceOfDescriptor::RightRegister() { return eax; }
-
-
const Register StringCompareDescriptor::LeftRegister() { return edx; }
const Register StringCompareDescriptor::RightRegister() { return eax; }
-
-const Register ApiGetterDescriptor::function_address() { return edx; }
-
+const Register ApiGetterDescriptor::HolderRegister() { return ecx; }
+const Register ApiGetterDescriptor::CallbackRegister() { return eax; }
const Register MathPowTaggedDescriptor::exponent() { return eax; }
@@ -80,13 +81,6 @@ void FastNewClosureDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-
-void FastNewContextDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edi};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
void FastNewObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edi, edx};
@@ -250,50 +244,37 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC
-void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {eax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
+void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
// eax -- number of arguments
// edi -- function
// ebx -- allocation site with elements kind
- Register registers[] = {edi, ebx};
+ Register registers[] = {edi, ebx, eax};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-
-void ArrayConstructorDescriptor::InitializePlatformSpecific(
+void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // stack param count needs (constructor pointer, and single argument)
- Register registers[] = {edi, ebx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void InternalArrayConstructorConstantArgCountDescriptor::
- InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
// register state
// eax -- number of arguments
// edi -- function
- Register registers[] = {edi};
+ // ebx -- allocation site with elements kind
+ Register registers[] = {edi, ebx, eax};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-
-void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
+void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // stack param count needs (constructor pointer, and single argument)
- Register registers[] = {edi, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
+ // register state
+ // eax -- number of arguments
+ // edi -- function
+ // ebx -- allocation site with elements kind
+ Register registers[] = {edi, ebx, eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void FastArrayPushDescriptor::InitializePlatformSpecific(
+void VarArgFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (arg count)
Register registers[] = {eax};
@@ -320,6 +301,22 @@ void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
+void BinaryOpWithVectorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // register state
+ // edx -- lhs
+ // eax -- rhs
+ // edi -- slot id
+ // ebx -- vector
+ Register registers[] = {edx, eax, edi, ebx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void CountOpDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void StringAddDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -380,8 +377,8 @@ void ApiCallbackDescriptorBase::InitializePlatformSpecific(
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
- kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister};
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -416,6 +413,16 @@ void InterpreterCEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void ResumeGeneratorDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ eax, // the value to pass to the generator
+ ebx, // the JSGeneratorObject to resume
+ edx // the resume mode (tagged)
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/x87/macro-assembler-x87.cc b/deps/v8/src/x87/macro-assembler-x87.cc
index b46167d1f9..9ffbf9f34b 100644
--- a/deps/v8/src/x87/macro-assembler-x87.cc
+++ b/deps/v8/src/x87/macro-assembler-x87.cc
@@ -973,6 +973,17 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
}
+void MacroAssembler::AssertGeneratorObject(Register object) {
+ if (emit_debug_code()) {
+ test(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsASmiAndNotAGeneratorObject);
+ Push(object);
+ CmpObjectType(object, JS_GENERATOR_OBJECT_TYPE, object);
+ Pop(object);
+ Check(equal, kOperandIsNotAGeneratorObject);
+ }
+}
+
void MacroAssembler::AssertReceiver(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
@@ -1032,8 +1043,8 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
mov(vector, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- mov(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset));
- mov(vector, FieldOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+ mov(vector, FieldOperand(vector, JSFunction::kLiteralsOffset));
+ mov(vector, FieldOperand(vector, LiteralsArray::kFeedbackVectorOffset));
}
@@ -1067,8 +1078,27 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
leave();
}
+void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
+ Register argc) {
+ Push(ebp);
+ Move(ebp, esp);
+ Push(context);
+ Push(target);
+ Push(argc);
+}
+
+void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
+ Register argc) {
+ Pop(argc);
+ Pop(target);
+ Pop(context);
+ leave();
+}
+
+void MacroAssembler::EnterExitFramePrologue(StackFrame::Type frame_type) {
+ DCHECK(frame_type == StackFrame::EXIT ||
+ frame_type == StackFrame::BUILTIN_EXIT);
-void MacroAssembler::EnterExitFramePrologue() {
// Set up the frame structure on the stack.
DCHECK_EQ(+2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
DCHECK_EQ(+1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
@@ -1077,7 +1107,7 @@ void MacroAssembler::EnterExitFramePrologue() {
mov(ebp, esp);
// Reserve room for entry stack pointer and push the code object.
- push(Immediate(Smi::FromInt(StackFrame::EXIT)));
+ push(Immediate(Smi::FromInt(frame_type)));
DCHECK_EQ(-2 * kPointerSize, ExitFrameConstants::kSPOffset);
push(Immediate(0)); // Saved entry sp, patched before call.
DCHECK_EQ(-3 * kPointerSize, ExitFrameConstants::kCodeOffset);
@@ -1116,9 +1146,9 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
mov(Operand(ebp, ExitFrameConstants::kSPOffset), esp);
}
-
-void MacroAssembler::EnterExitFrame(int argc, bool save_doubles) {
- EnterExitFramePrologue();
+void MacroAssembler::EnterExitFrame(int argc, bool save_doubles,
+ StackFrame::Type frame_type) {
+ EnterExitFramePrologue(frame_type);
// Set up argc and argv in callee-saved registers.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
@@ -1131,7 +1161,7 @@ void MacroAssembler::EnterExitFrame(int argc, bool save_doubles) {
void MacroAssembler::EnterApiExitFrame(int argc) {
- EnterExitFramePrologue();
+ EnterExitFramePrologue(StackFrame::EXIT);
EnterExitFrameEpilogue(argc, false);
}
@@ -1458,6 +1488,7 @@ void MacroAssembler::Allocate(int object_size,
AllocationFlags flags) {
DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1499,26 +1530,23 @@ void MacroAssembler::Allocate(int object_size,
// Calculate new top and bail out if space is exhausted.
Register top_reg = result_end.is_valid() ? result_end : result;
+
if (!top_reg.is(result)) {
mov(top_reg, result);
}
add(top_reg, Immediate(object_size));
- j(carry, gc_required);
cmp(top_reg, Operand::StaticVariable(allocation_limit));
j(above, gc_required);
- // Update allocation top.
- UpdateAllocationTopHelper(top_reg, scratch, flags);
+ if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+ // The top pointer is not updated for allocation folding dominators.
+ UpdateAllocationTopHelper(top_reg, scratch, flags);
+ }
- // Tag result if requested.
- bool tag_result = (flags & TAG_OBJECT) != 0;
if (top_reg.is(result)) {
- if (tag_result) {
- sub(result, Immediate(object_size - kHeapObjectTag));
- } else {
- sub(result, Immediate(object_size));
- }
- } else if (tag_result) {
+ sub(result, Immediate(object_size - kHeapObjectTag));
+ } else {
+ // Tag the result.
DCHECK(kHeapObjectTag == 1);
inc(result);
}
@@ -1535,6 +1563,8 @@ void MacroAssembler::Allocate(int header_size,
Label* gc_required,
AllocationFlags flags) {
DCHECK((flags & SIZE_IN_WORDS) == 0);
+ DCHECK((flags & ALLOCATION_FOLDING_DOMINATOR) == 0);
+ DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1592,16 +1622,14 @@ void MacroAssembler::Allocate(int header_size,
cmp(result_end, Operand::StaticVariable(allocation_limit));
j(above, gc_required);
- if ((flags & TAG_OBJECT) != 0) {
- DCHECK(kHeapObjectTag == 1);
- inc(result);
- }
+ // Tag result.
+ DCHECK(kHeapObjectTag == 1);
+ inc(result);
// Update allocation top.
UpdateAllocationTopHelper(result_end, scratch, flags);
}
-
void MacroAssembler::Allocate(Register object_size,
Register result,
Register result_end,
@@ -1609,6 +1637,7 @@ void MacroAssembler::Allocate(Register object_size,
Label* gc_required,
AllocationFlags flags) {
DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
+ DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1652,20 +1681,66 @@ void MacroAssembler::Allocate(Register object_size,
mov(result_end, object_size);
}
add(result_end, result);
- j(carry, gc_required);
cmp(result_end, Operand::StaticVariable(allocation_limit));
j(above, gc_required);
- // Tag result if requested.
- if ((flags & TAG_OBJECT) != 0) {
- DCHECK(kHeapObjectTag == 1);
- inc(result);
+ // Tag result.
+ DCHECK(kHeapObjectTag == 1);
+ inc(result);
+
+ if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+ // The top pointer is not updated for allocation folding dominators.
+ UpdateAllocationTopHelper(result_end, scratch, flags);
}
+}
- // Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch, flags);
+void MacroAssembler::FastAllocate(int object_size, Register result,
+ Register result_end, AllocationFlags flags) {
+ DCHECK(!result.is(result_end));
+ // Load address of new object into result.
+ LoadAllocationTopHelper(result, no_reg, flags);
+
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+ Label aligned;
+ test(result, Immediate(kDoubleAlignmentMask));
+ j(zero, &aligned, Label::kNear);
+ mov(Operand(result, 0),
+ Immediate(isolate()->factory()->one_pointer_filler_map()));
+ add(result, Immediate(kDoubleSize / 2));
+ bind(&aligned);
+ }
+
+ lea(result_end, Operand(result, object_size));
+ UpdateAllocationTopHelper(result_end, no_reg, flags);
+
+ DCHECK(kHeapObjectTag == 1);
+ inc(result);
}
+void MacroAssembler::FastAllocate(Register object_size, Register result,
+ Register result_end, AllocationFlags flags) {
+ DCHECK(!result.is(result_end));
+ // Load address of new object into result.
+ LoadAllocationTopHelper(result, no_reg, flags);
+
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+ Label aligned;
+ test(result, Immediate(kDoubleAlignmentMask));
+ j(zero, &aligned, Label::kNear);
+ mov(Operand(result, 0),
+ Immediate(isolate()->factory()->one_pointer_filler_map()));
+ add(result, Immediate(kDoubleSize / 2));
+ bind(&aligned);
+ }
+
+ lea(result_end, Operand(result, object_size, times_1, 0));
+ UpdateAllocationTopHelper(result_end, no_reg, flags);
+
+ DCHECK(kHeapObjectTag == 1);
+ inc(result);
+}
void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch1,
@@ -1674,7 +1749,7 @@ void MacroAssembler::AllocateHeapNumber(Register result,
MutableMode mode) {
// Allocate heap number in new space.
Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
Handle<Map> map = mode == MUTABLE
? isolate()->factory()->mutable_heap_number_map()
@@ -1700,15 +1775,9 @@ void MacroAssembler::AllocateTwoByteString(Register result,
and_(scratch1, Immediate(~kObjectAlignmentMask));
// Allocate two byte string in new space.
- Allocate(SeqTwoByteString::kHeaderSize,
- times_1,
- scratch1,
- REGISTER_VALUE_IS_INT32,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(SeqTwoByteString::kHeaderSize, times_1, scratch1,
+ REGISTER_VALUE_IS_INT32, result, scratch2, scratch3, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1734,15 +1803,9 @@ void MacroAssembler::AllocateOneByteString(Register result, Register length,
and_(scratch1, Immediate(~kObjectAlignmentMask));
// Allocate one-byte string in new space.
- Allocate(SeqOneByteString::kHeaderSize,
- times_1,
- scratch1,
- REGISTER_VALUE_IS_INT32,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ Allocate(SeqOneByteString::kHeaderSize, times_1, scratch1,
+ REGISTER_VALUE_IS_INT32, result, scratch2, scratch3, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1762,7 +1825,7 @@ void MacroAssembler::AllocateOneByteString(Register result, int length,
// Allocate one-byte string in new space.
Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2,
- gc_required, TAG_OBJECT);
+ gc_required, NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1780,7 +1843,7 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
Label* gc_required) {
// Allocate heap number in new space.
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1792,12 +1855,8 @@ void MacroAssembler::AllocateOneByteConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
- Allocate(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ NO_ALLOCATION_FLAGS);
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1811,7 +1870,7 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
Label* gc_required) {
// Allocate heap number in new space.
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1825,7 +1884,7 @@ void MacroAssembler::AllocateOneByteSlicedString(Register result,
Label* gc_required) {
// Allocate heap number in new space.
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
+ NO_ALLOCATION_FLAGS);
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
@@ -1841,7 +1900,8 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
DCHECK(!result.is(value));
// Allocate JSValue in new space.
- Allocate(JSValue::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
+ Allocate(JSValue::kSize, result, scratch, no_reg, gc_required,
+ NO_ALLOCATION_FLAGS);
// Initialize the JSValue.
LoadGlobalFunctionInitialMap(constructor, scratch);
@@ -2100,11 +2160,12 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
JumpToExternalReference(ExternalReference(fid, isolate()));
}
-
-void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
+void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
+ bool builtin_exit_frame) {
// Set the entry point and jump to the C entry runtime stub.
mov(ebx, Immediate(ext));
- CEntryStub ces(isolate(), 1);
+ CEntryStub ces(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
+ builtin_exit_frame);
jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
}
@@ -2261,10 +2322,11 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
Label skip_flooding;
- ExternalReference step_in_enabled =
- ExternalReference::debug_step_in_enabled_address(isolate());
- cmpb(Operand::StaticVariable(step_in_enabled), Immediate(0));
- j(equal, &skip_flooding);
+ ExternalReference last_step_action =
+ ExternalReference::debug_last_step_action_address(isolate());
+ STATIC_ASSERT(StepFrame > StepIn);
+ cmpb(Operand::StaticVariable(last_step_action), Immediate(StepIn));
+ j(less, &skip_flooding);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -2492,37 +2554,15 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
void MacroAssembler::LoadHeapObject(Register result,
Handle<HeapObject> object) {
- AllowDeferredHandleDereference embedding_raw_address;
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<Cell> cell = isolate()->factory()->NewCell(object);
- mov(result, Operand::ForCell(cell));
- } else {
- mov(result, object);
- }
+ mov(result, object);
}
void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) {
- AllowDeferredHandleDereference using_raw_address;
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<Cell> cell = isolate()->factory()->NewCell(object);
- cmp(reg, Operand::ForCell(cell));
- } else {
- cmp(reg, object);
- }
-}
-
-
-void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
- AllowDeferredHandleDereference using_raw_address;
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<Cell> cell = isolate()->factory()->NewCell(object);
- push(Operand::ForCell(cell));
- } else {
- Push(object);
- }
+ cmp(reg, object);
}
+void MacroAssembler::PushHeapObject(Handle<HeapObject> object) { Push(object); }
void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
Register scratch) {
@@ -2600,7 +2640,7 @@ void MacroAssembler::Move(Register dst, Register src) {
void MacroAssembler::Move(Register dst, const Immediate& x) {
- if (x.is_zero()) {
+ if (x.is_zero() && RelocInfo::IsNone(x.rmode_)) {
xor_(dst, dst); // Shorter than mov of 32-bit immediate 0.
} else {
mov(dst, x);
@@ -2770,15 +2810,19 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(reason))));
+ // Check if Abort() has already been initialized.
+ DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
+
+ Move(edx, Smi::FromInt(static_cast<int>(reason)));
+
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort);
+ Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
} else {
- CallRuntime(Runtime::kAbort);
+ Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
}
// will not return here
int3();
diff --git a/deps/v8/src/x87/macro-assembler-x87.h b/deps/v8/src/x87/macro-assembler-x87.h
index 55714132f7..13988aee67 100644
--- a/deps/v8/src/x87/macro-assembler-x87.h
+++ b/deps/v8/src/x87/macro-assembler-x87.h
@@ -19,10 +19,11 @@ const Register kReturnRegister1 = {Register::kCode_edx};
const Register kReturnRegister2 = {Register::kCode_edi};
const Register kJSFunctionRegister = {Register::kCode_edi};
const Register kContextRegister = {Register::kCode_esi};
+const Register kAllocateSizeRegister = {Register::kCode_edx};
const Register kInterpreterAccumulatorRegister = {Register::kCode_eax};
-const Register kInterpreterRegisterFileRegister = {Register::kCode_edx};
const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_ecx};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_edi};
+const Register kInterpreterDispatchTableRegister = {Register::kCode_esi};
const Register kJavaScriptCallArgCountRegister = {Register::kCode_eax};
const Register kJavaScriptCallNewTargetRegister = {Register::kCode_edx};
const Register kRuntimeCallFunctionRegister = {Register::kCode_ebx};
@@ -243,7 +244,7 @@ class MacroAssembler: public Assembler {
// arguments in register eax and sets up the number of arguments in
// register edi and the pointer to the first argument in register
// esi.
- void EnterExitFrame(int argc, bool save_doubles);
+ void EnterExitFrame(int argc, bool save_doubles, StackFrame::Type frame_type);
void EnterApiExitFrame(int argc);
@@ -499,6 +500,23 @@ class MacroAssembler: public Assembler {
j(not_zero, not_smi_label, distance);
}
+ // Jump if the value cannot be represented by a smi.
+ inline void JumpIfNotValidSmiValue(Register value, Register scratch,
+ Label* on_invalid,
+ Label::Distance distance = Label::kFar) {
+ mov(scratch, value);
+ add(scratch, Immediate(0x40000000U));
+ j(sign, on_invalid, distance);
+ }
+
+ // Jump if the unsigned integer value cannot be represented by a smi.
+ inline void JumpIfUIntNotValidSmiValue(
+ Register value, Label* on_invalid,
+ Label::Distance distance = Label::kFar) {
+ cmp(value, Immediate(0x40000000U));
+ j(above_equal, on_invalid, distance);
+ }
+
void LoadInstanceDescriptors(Register map, Register descriptors);
void EnumLength(Register dst, Register map);
void NumberOfOwnDescriptors(Register dst, Register map);
@@ -552,6 +570,10 @@ class MacroAssembler: public Assembler {
// enabled via --debug-code.
void AssertBoundFunction(Register object);
+ // Abort execution if argument is not a JSGeneratorObject,
+ // enabled via --debug-code.
+ void AssertGeneratorObject(Register object);
+
// Abort execution if argument is not a JSReceiver, enabled via --debug-code.
void AssertReceiver(Register object);
@@ -607,6 +629,14 @@ class MacroAssembler: public Assembler {
void Allocate(Register object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
+ // FastAllocate is right now only used for folded allocations. It just
+ // increments the top pointer without checking against limit. This can only
+ // be done if it was proved earlier that the allocation will succeed.
+ void FastAllocate(int object_size, Register result, Register result_end,
+ AllocationFlags flags);
+ void FastAllocate(Register object_size, Register result, Register result_end,
+ AllocationFlags flags);
+
// Allocate a heap number in new space with undefined value. The
// register scratch2 can be passed as no_reg; the others must be
// valid registers. Returns tagged pointer in result register, or
@@ -745,7 +775,8 @@ class MacroAssembler: public Assembler {
void CallCFunction(Register function, int num_arguments);
// Jump to a runtime routine.
- void JumpToExternalReference(const ExternalReference& ext);
+ void JumpToExternalReference(const ExternalReference& ext,
+ bool builtin_exit_frame = false);
// ---------------------------------------------------------------------------
// Utilities
@@ -880,6 +911,9 @@ class MacroAssembler: public Assembler {
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
void LeaveFrame(StackFrame::Type type);
+ void EnterBuiltinFrame(Register context, Register target, Register argc);
+ void LeaveBuiltinFrame(Register context, Register target, Register argc);
+
// Expects object in eax and returns map with validated enum cache
// in eax. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Label* call_runtime);
@@ -921,7 +955,7 @@ class MacroAssembler: public Assembler {
Label::Distance done_distance,
const CallWrapper& call_wrapper);
- void EnterExitFramePrologue();
+ void EnterExitFramePrologue(StackFrame::Type frame_type);
void EnterExitFrameEpilogue(int argc, bool save_doubles);
void LeaveExitFrameEpilogue(bool restore_context);
@@ -1005,26 +1039,7 @@ inline Operand NativeContextOperand() {
return ContextOperand(esi, Context::NATIVE_CONTEXT_INDEX);
}
-#ifdef GENERATED_CODE_COVERAGE
-extern void LogGeneratedCodeCoverage(const char* file_line);
-#define CODE_COVERAGE_STRINGIFY(x) #x
-#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
-#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
-#define ACCESS_MASM(masm) { \
- byte* ia32_coverage_function = \
- reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
- masm->pushfd(); \
- masm->pushad(); \
- masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
- masm->call(ia32_coverage_function, RelocInfo::RUNTIME_ENTRY); \
- masm->pop(eax); \
- masm->popad(); \
- masm->popfd(); \
- } \
- masm->
-#else
#define ACCESS_MASM(masm) masm->
-#endif
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/zone.h b/deps/v8/src/zone.h
index fa21155fe1..29055cb70d 100644
--- a/deps/v8/src/zone.h
+++ b/deps/v8/src/zone.h
@@ -8,9 +8,9 @@
#include <limits>
#include "src/base/accounting-allocator.h"
+#include "src/base/hashmap.h"
#include "src/base/logging.h"
#include "src/globals.h"
-#include "src/hashmap.h"
#include "src/list.h"
#include "src/splay-tree.h"
@@ -244,8 +244,7 @@ class ZoneSplayTree final : public SplayTree<Config, ZoneAllocationPolicy> {
void operator delete(void* pointer, Zone* zone) { UNREACHABLE(); }
};
-
-typedef TemplateHashMapImpl<ZoneAllocationPolicy> ZoneHashMap;
+typedef base::TemplateHashMapImpl<ZoneAllocationPolicy> ZoneHashMap;
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/BUILD.gn b/deps/v8/test/BUILD.gn
new file mode 100644
index 0000000000..36ca7a2049
--- /dev/null
+++ b/deps/v8/test/BUILD.gn
@@ -0,0 +1,194 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("../gni/isolate.gni")
+
+group("gn_all") {
+ testonly = true
+
+ deps = [
+ ":default_tests",
+ ]
+
+ if (host_os != "mac" || !is_android) {
+ # These items don't compile for Android on Mac.
+ deps += [
+ "cctest:cctest",
+ "cctest:generate-bytecode-expectations",
+ "unittests:unittests",
+ ]
+ }
+
+ if (v8_test_isolation_mode != "noop") {
+ deps += [
+ ":benchmarks_run",
+ ":bot_default_run",
+ ":default_run",
+ ":mozilla_run",
+ ":simdjs_run",
+ "test262:test262_run",
+ ]
+ }
+}
+
+###############################################################################
+# Test groups
+#
+
+group("default_tests") {
+ testonly = true
+
+ if (v8_test_isolation_mode != "noop") {
+ deps = [
+ ":cctest_run",
+ ":fuzzer_run",
+ ":intl_run",
+ ":message_run",
+ ":mjsunit_run",
+ ":preparser_run",
+ ":unittests_run",
+ ]
+ }
+}
+
+v8_isolate_run("bot_default") {
+ deps = [
+ ":default_tests",
+ ":webkit_run",
+ ]
+
+ isolate = "bot_default.isolate"
+}
+
+v8_isolate_run("default") {
+ deps = [
+ ":default_tests",
+ ]
+
+ isolate = "default.isolate"
+}
+
+v8_isolate_run("optimize_for_size") {
+ deps = [
+ ":cctest_run",
+ ":intl_run",
+ ":mjsunit_run",
+ ":webkit_run",
+ ]
+
+ isolate = "optimize_for_size.isolate"
+}
+
+v8_isolate_run("perf") {
+ deps = [
+ ":cctest_exe_run",
+ "..:d8_run",
+ ]
+
+ isolate = "perf.isolate"
+}
+
+###############################################################################
+# Subtests
+#
+
+v8_isolate_run("benchmarks") {
+ deps = [
+ "..:d8_run",
+ ]
+
+ isolate = "benchmarks/benchmarks.isolate"
+}
+
+v8_isolate_run("cctest") {
+ deps = [
+ ":cctest_exe_run",
+ ]
+
+ isolate = "cctest/cctest.isolate"
+}
+
+v8_isolate_run("cctest_exe") {
+ deps = [
+ "cctest:cctest",
+ ]
+
+ isolate = "cctest/cctest_exe.isolate"
+}
+
+v8_isolate_run("fuzzer") {
+ deps = [
+ "..:v8_simple_json_fuzzer",
+ "..:v8_simple_parser_fuzzer",
+ "..:v8_simple_regexp_fuzzer",
+ "..:v8_simple_wasm_asmjs_fuzzer",
+ "..:v8_simple_wasm_fuzzer",
+ ]
+
+ isolate = "fuzzer/fuzzer.isolate"
+}
+
+v8_isolate_run("intl") {
+ deps = [
+ "..:d8_run",
+ ]
+
+ isolate = "intl/intl.isolate"
+}
+
+v8_isolate_run("message") {
+ deps = [
+ "..:d8_run",
+ ]
+
+ isolate = "message/message.isolate"
+}
+
+v8_isolate_run("mjsunit") {
+ deps = [
+ "..:d8_run",
+ ]
+
+ isolate = "mjsunit/mjsunit.isolate"
+}
+
+v8_isolate_run("mozilla") {
+ deps = [
+ "..:d8_run",
+ ]
+
+ isolate = "mozilla/mozilla.isolate"
+}
+
+v8_isolate_run("preparser") {
+ deps = [
+ "..:d8_run",
+ ]
+
+ isolate = "preparser/preparser.isolate"
+}
+
+v8_isolate_run("simdjs") {
+ deps = [
+ "..:d8_run",
+ ]
+
+ isolate = "simdjs/simdjs.isolate"
+}
+
+v8_isolate_run("unittests") {
+ deps = [
+ "unittests:unittests",
+ ]
+
+ isolate = "unittests/unittests.isolate"
+}
+
+v8_isolate_run("webkit") {
+ deps = [
+ "..:d8_run",
+ ]
+
+ isolate = "webkit/webkit.isolate"
+}
diff --git a/deps/v8/test/benchmarks/benchmarks.gyp b/deps/v8/test/benchmarks/benchmarks.gyp
index 3884b0901f..0822ee4ecb 100644
--- a/deps/v8/test/benchmarks/benchmarks.gyp
+++ b/deps/v8/test/benchmarks/benchmarks.gyp
@@ -13,8 +13,8 @@
'../../src/d8.gyp:d8_run',
],
'includes': [
- '../../build/features.gypi',
- '../../build/isolate.gypi',
+ '../../gypfiles/features.gypi',
+ '../../gypfiles/isolate.gypi',
],
'sources': [
'benchmarks.isolate',
diff --git a/deps/v8/test/bot_default.gyp b/deps/v8/test/bot_default.gyp
index 9b39f58412..04679183b4 100644
--- a/deps/v8/test/bot_default.gyp
+++ b/deps/v8/test/bot_default.gyp
@@ -20,8 +20,8 @@
'webkit/webkit.gyp:webkit_run',
],
'includes': [
- '../build/features.gypi',
- '../build/isolate.gypi',
+ '../gypfiles/features.gypi',
+ '../gypfiles/isolate.gypi',
],
'sources': [
'bot_default.isolate',
diff --git a/deps/v8/test/cctest/BUILD.gn b/deps/v8/test/cctest/BUILD.gn
new file mode 100644
index 0000000000..db94e0c7ff
--- /dev/null
+++ b/deps/v8/test/cctest/BUILD.gn
@@ -0,0 +1,173 @@
+# Copyright 2016 The V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# The sources are kept automatically in sync with cctest.gyp.
+
+import("../../gni/v8.gni")
+
+gypi_values = exec_script("//build/gypi_to_gn.py",
+ [ rebase_path("cctest.gyp") ],
+ "scope",
+ [ "cctest.gyp" ])
+
+v8_executable("cctest") {
+ testonly = true
+
+ sources = [ "$target_gen_dir/resources.cc" ] + gypi_values.cctest_sources
+
+ if (v8_current_cpu == "arm") {
+ sources += gypi_values.cctest_sources_arm
+ } else if (v8_current_cpu == "arm64") {
+ sources += gypi_values.cctest_sources_arm64
+ } else if (v8_current_cpu == "x86") {
+ sources += gypi_values.cctest_sources_ia32
+ } else if (v8_current_cpu == "mips") {
+ sources += gypi_values.cctest_sources_mips
+ } else if (v8_current_cpu == "mipsel") {
+ sources += gypi_values.cctest_sources_mipsel
+ } else if (v8_current_cpu == "mips64") {
+ sources += gypi_values.cctest_sources_mips64
+ } else if (v8_current_cpu == "mips64el") {
+ sources += gypi_values.cctest_sources_mips64el
+ } else if (v8_current_cpu == "x64") {
+ sources += gypi_values.cctest_sources_x64
+ } else if (v8_current_cpu == "x87") {
+ sources += gypi_values.cctest_sources_x87
+ } else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
+ sources += gypi_values.cctest_sources_ppc
+ } else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
+ sources += gypi_values.cctest_sources_s390
+ }
+
+ if (is_linux) {
+ # TODO(machenbach): Translate 'or OS=="qnx"' from gyp.
+ sources += [ "test-platform-linux.cc" ]
+ } else if (is_win) {
+ sources += [ "test-platform-win32.cc" ]
+ }
+
+ configs = [
+ "../..:external_config",
+ "../..:internal_config_base",
+ ]
+
+ # TODO(machenbach): Translate from gyp.
+ #["OS=="aix"", {
+ # "ldflags": [ "-Wl,-bbigtoc" ],
+ #}],
+
+ deps = [
+ ":resources",
+ "../..:v8_libplatform",
+ "//build/config/sanitizers:deps",
+ "//build/win:default_exe_manifest",
+ ]
+
+ if (is_component_build) {
+ # cctest can't be built against a shared library, so we
+ # need to depend on the underlying static target in that case.
+ deps += [ "../..:v8_maybe_snapshot" ]
+ } else {
+ deps += [ "../..:v8" ]
+ }
+
+ cflags = []
+ ldflags = []
+
+ if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64" ||
+ v8_current_cpu == "arm" || v8_current_cpu == "arm64" ||
+ v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
+ # Disable fmadd/fmsub so that expected results match generated code in
+ # RunFloat64MulAndFloat64Add1 and friends.
+ cflags += [ "-ffp-contract=off" ]
+ }
+
+ if (is_win) {
+ # This warning is benignly triggered by the U16 and U32 macros in
+ # bytecode-utils.h.
+ # C4309: 'static_cast': truncation of constant value
+ cflags += [ "/wd4309" ]
+
+ # MSVS wants this for gay-{precision,shortest}.cc.
+ cflags += [ "/bigobj" ]
+
+ # Suppress warnings about importing locally defined symbols.
+ if (is_component_build) {
+ ldflags += [
+ "/ignore:4049",
+ "/ignore:4217",
+ ]
+ }
+ }
+}
+
+action("resources") {
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
+
+ script = "../../tools/js2c.py"
+
+ # The script depends on this other script, this rule causes a rebuild if it
+ # changes.
+ inputs = [
+ "../../tools/jsmin.py",
+ ]
+
+ # NOSORT
+ sources = [
+ "../../tools/splaytree.js",
+ "../../tools/codemap.js",
+ "../../tools/csvparser.js",
+ "../../tools/consarray.js",
+ "../../tools/profile.js",
+ "../../tools/profile_view.js",
+ "../../tools/logreader.js",
+ "log-eq-of-logging-and-traversal.js",
+ ]
+
+ outputs = [
+ "$target_gen_dir/resources.cc",
+ ]
+
+ args = [
+ rebase_path("$target_gen_dir/resources.cc", root_build_dir),
+ "TEST",
+ ]
+ args += rebase_path(sources, root_build_dir)
+}
+
+v8_executable("generate-bytecode-expectations") {
+ sources = [
+ "interpreter/bytecode-expectations-printer.cc",
+ "interpreter/bytecode-expectations-printer.h",
+ "interpreter/generate-bytecode-expectations.cc",
+ ]
+
+ configs = [
+ "../..:external_config",
+ "../..:internal_config_base",
+ ]
+
+ deps = [
+ "../..:v8_libplatform",
+ "//build/config/sanitizers:deps",
+ "//build/win:default_exe_manifest",
+ ]
+
+ if (is_component_build) {
+ # Same as cctest, we need to depend on the underlying static target.
+ deps += [ "../..:v8_maybe_snapshot" ]
+ } else {
+ deps += [ "../..:v8" ]
+ }
+
+ if (is_win) {
+ # Suppress warnings about importing locally defined symbols.
+ if (is_component_build) {
+ ldflags = [
+ "/ignore:4049",
+ "/ignore:4217",
+ ]
+ }
+ }
+}
diff --git a/deps/v8/test/cctest/OWNERS b/deps/v8/test/cctest/OWNERS
index 4b2b7c51e5..06141ef626 100644
--- a/deps/v8/test/cctest/OWNERS
+++ b/deps/v8/test/cctest/OWNERS
@@ -16,7 +16,3 @@ per-file *-s390*=mbrandy@us.ibm.com
per-file *-s390*=michael_dawson@ca.ibm.com
per-file *-x87*=chunyang.dai@intel.com
per-file *-x87*=weiliang.lin@intel.com
-per-file expression-type-collector*=aseemgarg@chromium.org
-per-file expression-type-collector*=bradnelson@chromium.org
-per-file test-asm-validator.cc=aseemgarg@chromium.org
-per-file test-asm-validator.cc=bradnelson@chromium.org
diff --git a/deps/v8/test/cctest/asmjs/OWNERS b/deps/v8/test/cctest/asmjs/OWNERS
new file mode 100644
index 0000000000..d8fad3059a
--- /dev/null
+++ b/deps/v8/test/cctest/asmjs/OWNERS
@@ -0,0 +1,10 @@
+# Keep in sync with src/asmjs/OWNERS.
+
+set noparent
+
+ahaas@chromium.org
+bradnelson@chromium.org
+jpp@chromium.org
+mtrofin@chromium.org
+rossberg@chromium.org
+titzer@chromium.org
diff --git a/deps/v8/test/cctest/asmjs/test-asm-typer.cc b/deps/v8/test/cctest/asmjs/test-asm-typer.cc
new file mode 100644
index 0000000000..dcb778533d
--- /dev/null
+++ b/deps/v8/test/cctest/asmjs/test-asm-typer.cc
@@ -0,0 +1,2003 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cstring>
+#include <functional>
+#include <iostream>
+#include <memory>
+
+#include "src/asmjs/asm-typer.h"
+#include "src/asmjs/asm-types.h"
+#include "src/ast/ast-value-factory.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
+#include "src/base/platform/platform.h"
+#include "src/compiler.h"
+#include "src/parsing/parse-info.h"
+#include "src/parsing/parser.h"
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+namespace iw = v8::internal::wasm;
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+namespace {
+enum ValidationType {
+ ValidateModule,
+ ValidateGlobals,
+ ValidateFunctionTables,
+ ValidateExport,
+ ValidateFunction,
+ ValidateStatement,
+ ValidateExpression,
+};
+} // namespace
+
+class AsmTyperHarnessBuilder {
+ public:
+ AsmTyperHarnessBuilder(const char* source, ValidationType type)
+ : source_(source),
+ validation_type_(type),
+ handles_(),
+ zone_(handles_.main_zone()),
+ isolate_(CcTest::i_isolate()),
+ ast_value_factory_(zone_, isolate_->heap()->HashSeed()),
+ factory_(isolate_->factory()),
+ source_code_(
+ factory_->NewStringFromUtf8(CStrVector(source)).ToHandleChecked()),
+ script_(factory_->NewScript(source_code_)) {
+ ParseInfo info(zone_, script_);
+ info.set_global();
+ info.set_lazy(false);
+ info.set_allow_lazy_parsing(false);
+ info.set_toplevel(true);
+ info.set_ast_value_factory(&ast_value_factory_);
+ info.set_ast_value_factory_owned(false);
+ Parser parser(&info);
+
+ if (!Compiler::ParseAndAnalyze(&info)) {
+ std::cerr << "Failed to parse:\n" << source_ << "\n";
+ CHECK(false);
+ }
+
+ outer_scope_ = info.script_scope();
+ module_ =
+ info.scope()->declarations()->at(0)->AsFunctionDeclaration()->fun();
+ typer_.reset(new AsmTyper(isolate_, zone_, *script_, module_));
+
+ if (validation_type_ == ValidateStatement ||
+ validation_type_ == ValidateExpression) {
+ fun_scope_.reset(new AsmTyper::FunctionScope(typer_.get()));
+
+ auto* decls = module_->scope()->declarations();
+ for (int ii = 0; ii < decls->length(); ++ii) {
+ Declaration* decl = decls->at(ii);
+ if (FunctionDeclaration* fun_decl = decl->AsFunctionDeclaration()) {
+ fun_decl_ = fun_decl;
+ break;
+ }
+ }
+ CHECK_NOT_NULL(fun_decl_);
+ }
+ }
+
+ struct VariableName {
+ VariableName(const char* name, VariableMode mode)
+ : name_(name), mode_(mode) {}
+ VariableName(const VariableName&) = default;
+ VariableName& operator=(const VariableName&) = default;
+
+ const char* name_;
+ const VariableMode mode_;
+ };
+
+ AsmTyperHarnessBuilder* WithLocal(VariableName var_name, AsmType* type) {
+ CHECK(validation_type_ == ValidateStatement ||
+ validation_type_ == ValidateExpression);
+ auto* var = DeclareVariable(var_name);
+ auto* var_info = new (zone_) AsmTyper::VariableInfo(type);
+ var_info->set_mutability(AsmTyper::VariableInfo::kLocal);
+ CHECK(typer_->AddLocal(var, var_info));
+ return this;
+ }
+
+ AsmTyperHarnessBuilder* WithGlobal(VariableName var_name, AsmType* type) {
+ auto* var = DeclareVariable(var_name);
+ auto* var_info = new (zone_) AsmTyper::VariableInfo(type);
+ var_info->set_mutability(AsmTyper::VariableInfo::kMutableGlobal);
+ CHECK(typer_->AddGlobal(var, var_info));
+ return this;
+ }
+
+ AsmTyperHarnessBuilder* WithGlobal(
+ VariableName var_name, std::function<AsmType*(Zone*)> type_creator) {
+ return WithGlobal(var_name, type_creator(zone_));
+ }
+
+ AsmTyperHarnessBuilder* WithUndefinedGlobal(
+ VariableName var_name, std::function<AsmType*(Zone*)> type_creator) {
+ auto* type = type_creator(zone_);
+ CHECK(type->AsFunctionType() != nullptr ||
+ type->AsFunctionTableType() != nullptr);
+ WithGlobal(var_name, type);
+ auto* var_info = typer_->Lookup(DeclareVariable(var_name));
+ CHECK(var_info);
+ var_info->FirstForwardUseIs(nullptr);
+ return this;
+ }
+
+ AsmTyperHarnessBuilder* WithImport(VariableName var_name,
+ AsmTyper::StandardMember standard_member) {
+ auto* var = DeclareVariable(var_name);
+ AsmTyper::VariableInfo* var_info = nullptr;
+ auto* stdlib_map = &typer_->stdlib_math_types_;
+ switch (standard_member) {
+ case AsmTyper::kHeap:
+ case AsmTyper::kStdlib:
+ case AsmTyper::kModule:
+ case AsmTyper::kNone:
+ CHECK(false);
+ case AsmTyper::kFFI:
+ stdlib_map = nullptr;
+ var_info = new (zone_) AsmTyper::VariableInfo(AsmType::FFIType(zone_));
+ var_info->set_mutability(AsmTyper::VariableInfo::kImmutableGlobal);
+ break;
+ case AsmTyper::kInfinity:
+ case AsmTyper::kNaN:
+ stdlib_map = &typer_->stdlib_types_;
+ default:
+ break;
+ }
+
+ if (var_info == nullptr) {
+ for (auto iter : *stdlib_map) {
+ if (iter.second->standard_member() == standard_member) {
+ var_info = iter.second;
+ break;
+ }
+ }
+
+ CHECK(var_info != nullptr);
+ var_info = var_info->Clone(zone_);
+ }
+
+ CHECK(typer_->AddGlobal(var, var_info));
+ return this;
+ }
+
+ AsmTyperHarnessBuilder* WithReturnType(AsmType* type) {
+ CHECK(type->IsReturnType());
+ CHECK(typer_->return_type_ == AsmType::None());
+ typer_->return_type_ = type;
+ return this;
+ }
+
+ AsmTyperHarnessBuilder* WithStdlib(VariableName var_name) {
+ auto* var = DeclareVariable(var_name);
+ auto* var_info =
+ AsmTyper::VariableInfo::ForSpecialSymbol(zone_, AsmTyper::kStdlib);
+ CHECK(typer_->AddGlobal(var, var_info));
+ return this;
+ }
+
+ AsmTyperHarnessBuilder* WithHeap(VariableName var_name) {
+ auto* var = DeclareVariable(var_name);
+ auto* var_info =
+ AsmTyper::VariableInfo::ForSpecialSymbol(zone_, AsmTyper::kHeap);
+ CHECK(typer_->AddGlobal(var, var_info));
+ return this;
+ }
+
+ AsmTyperHarnessBuilder* WithFFI(VariableName var_name) {
+ auto* var = DeclareVariable(var_name);
+ auto* var_info =
+ AsmTyper::VariableInfo::ForSpecialSymbol(zone_, AsmTyper::kFFI);
+ CHECK(typer_->AddGlobal(var, var_info));
+ return this;
+ }
+
+ bool Succeeds() {
+ CHECK(validation_type_ == ValidateModule ||
+ validation_type_ == ValidateGlobals ||
+ validation_type_ == ValidateFunctionTables ||
+ validation_type_ == ValidateExport ||
+ validation_type_ == ValidateFunction ||
+ validation_type_ == ValidateStatement);
+
+ if (validation_type_ == ValidateStatement) {
+ CHECK(typer_->return_type_ != AsmType::None());
+ if (ValidateAllStatements(fun_decl_)) {
+ return true;
+ }
+ } else if (typer_->Validate()) {
+ return true;
+ }
+
+ std::cerr << "Asm validation failed: " << typer_->error_message() << "\n";
+ return false;
+ }
+
+ bool SucceedsWithExactType(AsmType* type) {
+ CHECK(validation_type_ == ValidateExpression);
+ auto* validated_as = ValidateExpressionStatment(fun_decl_);
+ if (validated_as == AsmType::None()) {
+ std::cerr << "Validation failure: " << typer_->error_message() << "\n";
+ return false;
+ } else if (validated_as != type) {
+ std::cerr << "Validation succeeded with wrong type "
+ << validated_as->Name() << " (vs. " << type->Name() << ").\n";
+ return false;
+ }
+
+ return true;
+ }
+
+ bool FailsWithMessage(const char* error_message) {
+ CHECK(validation_type_ == ValidateModule ||
+ validation_type_ == ValidateGlobals ||
+ validation_type_ == ValidateFunctionTables ||
+ validation_type_ == ValidateExport ||
+ validation_type_ == ValidateFunction ||
+ validation_type_ == ValidateStatement ||
+ validation_type_ == ValidateExpression);
+
+ bool success;
+ if (validation_type_ == ValidateStatement) {
+ CHECK(typer_->return_type_ != AsmType::None());
+ success = ValidateAllStatements(fun_decl_);
+ } else if (validation_type_ == ValidateExpression) {
+ success = ValidateExpressionStatment(fun_decl_) != AsmType::None();
+ } else {
+ success = typer_->Validate();
+ }
+
+ if (success) {
+ std::cerr << "Asm validation succeeded\n";
+ return false;
+ }
+
+ if (std::strstr(typer_->error_message(), error_message) == nullptr) {
+ std::cerr << "Asm validation failed with the wrong error message:\n"
+ "Expected to contain '"
+ << error_message << "'\n"
+ " Actually is '"
+ << typer_->error_message() << "'\n";
+ return false;
+ }
+
+ return true;
+ }
+
+ private:
+ Variable* DeclareVariable(VariableName var_name) {
+ auto* name_ast_string = ast_value_factory_.GetOneByteString(var_name.name_);
+ return var_name.mode_ == DYNAMIC_GLOBAL
+ ? outer_scope_->DeclareDynamicGlobal(name_ast_string,
+ Variable::NORMAL)
+ : module_->scope()->DeclareLocal(name_ast_string, VAR,
+ kCreatedInitialized,
+ Variable::NORMAL);
+ }
+
+ bool ValidateAllStatements(FunctionDeclaration* fun_decl) {
+ AsmTyper::FlattenedStatements iter(zone_, fun_decl->fun()->body());
+ while (auto* curr = iter.Next()) {
+ if (typer_->ValidateStatement(curr) == AsmType::None()) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ AsmType* ValidateExpressionStatment(FunctionDeclaration* fun_decl) {
+ AsmTyper::FlattenedStatements iter(zone_, fun_decl->fun()->body());
+ AsmType* ret = AsmType::None();
+ bool last_was_expression_statement = false;
+ while (auto* curr = iter.Next()) {
+ if (auto* expr_stmt = curr->AsExpressionStatement()) {
+ last_was_expression_statement = true;
+ if ((ret = typer_->ValidateExpression(expr_stmt->expression())) ==
+ AsmType::None()) {
+ break;
+ }
+ } else {
+ ret = AsmType::None();
+ last_was_expression_statement = true;
+ if (typer_->ValidateStatement(curr) == AsmType::None()) {
+ break;
+ }
+ }
+ }
+ CHECK(last_was_expression_statement || ret == AsmType::None());
+ return ret;
+ }
+
+ std::string source_;
+ ValidationType validation_type_;
+ HandleAndZoneScope handles_;
+ Zone* zone_;
+ Isolate* isolate_;
+ AstValueFactory ast_value_factory_;
+ Factory* factory_;
+ Handle<String> source_code_;
+ Handle<Script> script_;
+
+ DeclarationScope* outer_scope_;
+ FunctionLiteral* module_;
+ FunctionDeclaration* fun_decl_;
+ std::unique_ptr<AsmTyper> typer_;
+ std::unique_ptr<AsmTyper::FunctionScope> fun_scope_;
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+namespace {
+
+struct ValidationInput {
+ ValidationInput(const std::string& source, iw::ValidationType type)
+ : source_(source), type_(type) {}
+
+ const std::string source_;
+ const iw::ValidationType type_;
+};
+
+std::unique_ptr<iw::AsmTyperHarnessBuilder> ValidationOf(
+ ValidationInput input) {
+ return std::unique_ptr<iw::AsmTyperHarnessBuilder>(
+ new iw::AsmTyperHarnessBuilder(input.source_.c_str(), input.type_));
+}
+
+ValidationInput Module(const char* source) {
+ return ValidationInput(source, iw::ValidateModule);
+}
+
+std::string WrapInFunction(const char* source, bool needs_use_asm) {
+ if (needs_use_asm) {
+ return std::string(
+ "function foo() {\n"
+ " 'use asm';\n"
+ " ") +
+ source +
+ "\n"
+ "}";
+ }
+
+ return std::string(
+ "function bar() {\n"
+ " ") +
+ source +
+ "\n"
+ "}\n"
+ "return {b: bar};\n";
+}
+
+ValidationInput Globals(const char* source) {
+ static const bool kNeedsUseAsm = true;
+ return ValidationInput(WrapInFunction(source, kNeedsUseAsm),
+ iw::ValidateGlobals);
+}
+
+ValidationInput FunctionTables(const char* source) {
+ static const bool kNeedsUseAsm = true;
+ return ValidationInput(WrapInFunction(source, kNeedsUseAsm),
+ iw::ValidateFunctionTables);
+}
+
+ValidationInput Export(const char* source) {
+ static const bool kNeedsUseAsm = true;
+ return ValidationInput(WrapInFunction(source, kNeedsUseAsm),
+ iw::ValidateExport);
+}
+
+ValidationInput Function(const char* source) {
+ static const bool kNeedsUseAsm = true;
+ return ValidationInput(WrapInFunction(source, kNeedsUseAsm),
+ iw::ValidateFunction);
+}
+
+ValidationInput Statement(const char* source) {
+ static const bool kDoesNotNeedUseAsm = false;
+ static const bool kNeedsUseAsm = true;
+ return ValidationInput(
+ WrapInFunction(WrapInFunction(source, kDoesNotNeedUseAsm).c_str(),
+ kNeedsUseAsm),
+ iw::ValidateStatement);
+}
+
+ValidationInput Expression(const char* source) {
+ static const bool kDoesNotNeedUseAsm = false;
+ static const bool kNeedsUseAsm = true;
+ return ValidationInput(
+ WrapInFunction(WrapInFunction(source, kDoesNotNeedUseAsm).c_str(),
+ kNeedsUseAsm),
+ iw::ValidateExpression);
+}
+
+iw::AsmTyperHarnessBuilder::VariableName Var(const char* name) {
+ return iw::AsmTyperHarnessBuilder::VariableName(name, VAR);
+}
+
+iw::AsmTyperHarnessBuilder::VariableName DynamicGlobal(const char* name) {
+ return iw::AsmTyperHarnessBuilder::VariableName(name, DYNAMIC_GLOBAL);
+}
+
+TEST(MissingUseAsmDirective) {
+ v8::V8::Initialize();
+
+ // We can't test the empty input ("") because the AsmTyperHarnessBuilder will
+ // CHECK if there's no function in the top-level scope.
+ const char* kTests[] = {"function module(){}",
+ "function module(){ use_asm; }",
+ "function module(){ \"use asm \"; }",
+ "function module(){ \" use asm \"; }",
+ "function module(){ \"use Asm\"; }"};
+
+ for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
+ const char* module = kTests[ii];
+ if (!ValidationOf(Module(module))
+ ->FailsWithMessage("Missing \"use asm\"")) {
+ std::cerr << "Test:\n" << module;
+ CHECK(false);
+ }
+ }
+}
+
+TEST(InvalidModuleSignature) {
+ v8::V8::Initialize();
+
+ const struct {
+ const char* module;
+ const char* error_message;
+ } kTests[] = {
+ {"function eval(){ \"use asm\"; }",
+ "Invalid asm.js identifier in module name"},
+ {"function arguments(){ \"use asm\"; }",
+ "Invalid asm.js identifier in module name"},
+ {"function module(eval){ \"use asm\"; }",
+ "Invalid asm.js identifier in module parameter"},
+ {"function module(arguments){ \"use asm\"; }",
+ "Invalid asm.js identifier in module parameter"},
+ {"function module(stdlib, eval){ \"use asm\"; }",
+ "Invalid asm.js identifier in module parameter"},
+ {"function module(stdlib, arguments){ \"use asm\"; }",
+ "Invalid asm.js identifier in module parameter"},
+ {"function module(stdlib, foreign, eval){ \"use asm\"; }",
+ "Invalid asm.js identifier in module parameter"},
+ {"function module(stdlib, foreign, arguments){ \"use asm\"; }",
+ "Invalid asm.js identifier in module parameter"},
+ {"function module(stdlib, foreign, heap, eval){ \"use asm\"; }",
+ "asm.js modules may not have more than three parameters"},
+ {"function module(stdlib, foreign, heap, arguments){ \"use asm\"; }",
+ "asm.js modules may not have more than three parameters"},
+ {"function module(module){ \"use asm\"; }",
+ "Redeclared identifier in module parameter"},
+ {"function module(stdlib, module){ \"use asm\"; }",
+ "Redeclared identifier in module parameter"},
+ {"function module(stdlib, stdlib){ \"use asm\"; }",
+ "Redeclared identifier in module parameter"},
+ {"function module(stdlib, foreign, module){ \"use asm\"; }",
+ "Redeclared identifier in module parameter"},
+ {"function module(stdlib, foreign, stdlib){ \"use asm\"; }",
+ "Redeclared identifier in module parameter"},
+ {"function module(stdlib, foreign, foreign){ \"use asm\"; }",
+ "Redeclared identifier in module parameter"},
+ };
+
+ for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
+ const auto* test = kTests + ii;
+ if (!ValidationOf(Module(test->module))
+ ->FailsWithMessage(test->error_message)) {
+ std::cerr << "Test:\n" << test->module;
+ CHECK(false);
+ }
+ }
+}
+
+TEST(ErrorsInGlobalVariableDefinition) {
+ const struct {
+ const char* decl;
+ const char* error_message;
+ } kTests[] = {
+ {"var v;", "Global variable missing initializer"},
+ {"var v = uninitialized;", "Invalid global variable initializer"},
+ {"var v = 'use asm';", "type annotation - forbidden literal"},
+ {"var v = 4294967296;", " - forbidden literal"},
+ {"var v = not_fround;", "Invalid global variable initializer"},
+ {"var v = not_fround(1);", "expected call fround(literal)"},
+ {"var v = __fround__(1.0);", "expected call fround(literal)"},
+ {"var v = fround(1.0, 1.0);", "expected call fround(literal)"},
+ {"var v = fround(not_fround);", "literal argument for call to fround"},
+ {"var v = fround(1);", "literal argument to be a floating point"},
+ {"var v = stdlib.nan", "Invalid import"},
+ {"var v = stdlib.Math.nan", "Invalid import"},
+ {"var v = stdlib.Mathh.E", "Invalid import"},
+ {"var v = stdlib.Math", "Invalid import"},
+ {"var v = Stdlib.Math.E", "Invalid import"},
+ {"var v = stdlib.Math.E[0]", "Invalid import"},
+ {"var v = stdlibb.NaN", "Invalid import"},
+ {"var v = ffi.NaN[0]", "Invalid import"},
+ {"var v = heap.NaN[0]", "Invalid import"},
+ {"var v = ffi.foo * 2.0;", "unrecognized annotation"},
+ {"var v = ffi.foo|1;", "unrecognized annotation"},
+ {"var v = ffi()|0;", "must import member"},
+ {"var v = +ffi();", "must import member"},
+ {"var v = ffi().a|0;", "object lookup failed"},
+ {"var v = +ffi().a;", "object lookup failed"},
+ {"var v = sstdlib.a|0;", "object lookup failed"},
+ {"var v = +sstdlib.a;", "object lookup failed"},
+ {"var v = stdlib.NaN|0;", "object is not the ffi"},
+ {"var v = +stdlib.NaN;", "object is not the ffi"},
+ {"var v = new f()", "Invalid type after new"},
+ {"var v = new stdli.Uint8Array(heap)", "Unknown stdlib member in heap"},
+ {"var v = new stdlib.dd(heap)", "Unknown stdlib member in heap"},
+ {"var v = new stdlib.Math.fround(heap)", "Type is not a heap view type"},
+ {"var v = new stdlib.Uint8Array(a, b)", "Invalid number of arguments"},
+ {"var v = new stdlib.Uint8Array(heap())", "should be the module's heap"},
+ {"var v = new stdlib.Uint8Array(heap_)", "instead of heap parameter"},
+ {"var v = new stdlib.Uint8Array(ffi)", "should be the module's heap"},
+ {"var eval = 0;", "in global variable"},
+ {"var eval = 0.0;", "in global variable"},
+ {"var eval = fround(0.0);", "in global variable"},
+ {"var eval = +ffi.a;", "in global variable"},
+ {"var eval = ffi.a|0;", "in global variable"},
+ {"var eval = ffi.a;", "in global variable"},
+ {"var eval = new stdlib.Uint8Array(heap);", "in global variable"},
+ {"var arguments = 0;", "in global variable"},
+ {"var arguments = 0.0;", "in global variable"},
+ {"var arguments = fround(0.0);", "in global variable"},
+ {"var arguments = +ffi.a;", "in global variable"},
+ {"var arguments = ffi.a|0;", "in global variable"},
+ {"var arguments = ffi.a;", "in global variable"},
+ {"var arguments = new stdlib.Uint8Array(heap);", "in global variable"},
+ {"var a = 0, a = 0.0;", "Redefined global variable"},
+ {"var a = 0; var a = 0;", "Redefined global variable"},
+ {"var a = 0, b = 0; var a = 0;", "Redefined global variable"},
+ {"var a = 0, b = 0; var b = 0, a = 0.0;", "Redefined global variable"},
+ {"var a = stdlib.Int8Array", "Heap view types can not be aliased"},
+ };
+
+ for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
+ const auto* test = kTests + ii;
+ if (!ValidationOf(Globals(test->decl))
+ ->WithStdlib(DynamicGlobal("stdlib"))
+ ->WithFFI(DynamicGlobal("ffi"))
+ ->WithHeap(DynamicGlobal("heap"))
+ ->WithGlobal(DynamicGlobal("not_fround"), iw::AsmType::Int())
+ ->WithImport(DynamicGlobal("fround"), iw::AsmTyper::kMathFround)
+ ->FailsWithMessage(test->error_message)) {
+ std::cerr << "Test:\n" << test->decl;
+ CHECK(false);
+ }
+ }
+}
+
+TEST(ErrorsInFunctionTableDefinition) {
+ const struct {
+ const char* tables;
+ const char* error_message;
+ } kTests[] = {
+ {"var a = [a, a, a];", "Invalid length for function pointer table"},
+ {"var a = [d2s0()];", "must be a function name"},
+ {"var a = [d2s44];", "Undefined identifier in function pointer"},
+ {"var a = [fround];", "not be a member of the standard library"},
+ {"var a = [imul];", "not be a member of the standard library"},
+ {"var a = [ffi_import];", "must be an asm.js function"},
+ {"var a = [dI];", "must be an asm.js function"},
+ {"var a = [d2s0, d2s1, d2s0, f2s0];", "mismatch in function pointer"},
+ {"var eval = [d2s0, d2s1];", "asm.js identifier in function table name"},
+ {"var arguments = [d2s0, d2s1];", "asm.js identifier in function table"},
+ {"var foo = [d2s0, d2s1];",
+ "Identifier redefined as function pointer table"},
+ {"var I = [d2s0, d2s1];",
+ "Identifier redefined as function pointer table"},
+ {"var d2s = [d2f0, d2f1];", "redefined as function pointer table"},
+ {"var d2s_t = [d2s0];", "Function table size mismatch"},
+ {"var d2s_t = [d2f0, d2f1];", "initializer does not match previous"},
+ };
+
+ auto d2s = [](Zone* zone) -> iw::AsmType* {
+ auto* ret = iw::AsmType::Function(zone, iw::AsmType::Signed());
+ ret->AsFunctionType()->AddArgument(iw::AsmType::Double());
+ return ret;
+ };
+
+ auto d2s_tbl = [](Zone* zone) -> iw::AsmType* {
+ auto* d2s = iw::AsmType::Function(zone, iw::AsmType::Signed());
+ d2s->AsFunctionType()->AddArgument(iw::AsmType::Double());
+
+ auto* ret = iw::AsmType::FunctionTableType(zone, 2, d2s);
+ return ret;
+ };
+
+ auto f2s = [](Zone* zone) -> iw::AsmType* {
+ auto* ret = iw::AsmType::Function(zone, iw::AsmType::Signed());
+ ret->AsFunctionType()->AddArgument(iw::AsmType::Float());
+ return ret;
+ };
+
+ auto d2f = [](Zone* zone) -> iw::AsmType* {
+ auto* ret = iw::AsmType::Function(zone, iw::AsmType::Float());
+ ret->AsFunctionType()->AddArgument(iw::AsmType::Double());
+ return ret;
+ };
+
+ for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
+ const auto* test = kTests + ii;
+ if (!ValidationOf(FunctionTables(test->tables))
+ ->WithImport(DynamicGlobal("ffi_import"), iw::AsmTyper::kFFI)
+ ->WithImport(DynamicGlobal("imul"), iw::AsmTyper::kMathImul)
+ ->WithImport(DynamicGlobal("E"), iw::AsmTyper::kMathE)
+ ->WithImport(DynamicGlobal("fround"), iw::AsmTyper::kMathFround)
+ ->WithImport(DynamicGlobal("floor"), iw::AsmTyper::kMathFround)
+ ->WithGlobal(DynamicGlobal("d2s0"), d2s)
+ ->WithGlobal(DynamicGlobal("d2s1"), d2s)
+ ->WithGlobal(DynamicGlobal("f2s0"), f2s)
+ ->WithGlobal(DynamicGlobal("f2s1"), f2s)
+ ->WithGlobal(DynamicGlobal("d2f0"), d2f)
+ ->WithGlobal(DynamicGlobal("d2f1"), d2f)
+ ->WithGlobal(DynamicGlobal("dI"), iw::AsmType::Int())
+ ->WithGlobal(Var("I"), iw::AsmType::Int())
+ ->WithUndefinedGlobal(Var("d2s"), d2s)
+ ->WithUndefinedGlobal(Var("d2s_t"), d2s_tbl)
+ ->FailsWithMessage(test->error_message)) {
+ std::cerr << "Test:\n" << test->tables;
+ CHECK(false);
+ }
+ }
+}
+
+TEST(ErrorsInModuleExport) {
+ const struct {
+ const char* module_export;
+ const char* error_message;
+ } kTests[] = {
+ {"", "Missing asm.js module export"},
+ {"return;", "Unrecognized expression in asm.js module export expression"},
+ {"return f;", "Undefined identifier in asm.js module export"},
+ {"return f();", "Unrecognized expression in asm.js module export"},
+ {"return d2s_tbl;", "cannot export function tables"},
+ {"return min;", "cannot export standard library functions"},
+ {"return ffi;", "cannot export foreign functions"},
+ {"return I;", "is not an asm.js function"},
+ {"return {'a': d2s_tbl}", "cannot export function tables"},
+ {"return {'a': min}", "cannot export standard library functions"},
+ {"return {'a': ffi}", "cannot export foreign functions"},
+ {"return {'a': f()}", "must be an asm.js function name"},
+ {"return {'a': f}", "Undefined identifier in asm.js module export"},
+ {"function v() { a(); } return {b: d2s}", "Missing definition for forw"},
+ {"return {b: d2s, 'a': d2s_tbl}", "cannot export function tables"},
+ {"return {b: d2s, 'a': min}", "cannot export standard library"},
+ {"return {b: d2s, 'a': ffi}", "cannot export foreign functions"},
+ {"return {b: d2s, 'a': f()}", "must be an asm.js function name"},
+ {"return {b: d2s, 'a': f}", "Undefined identifier in asm.js module"},
+ };
+
+ auto d2s_tbl = [](Zone* zone) -> iw::AsmType* {
+ auto* d2s = iw::AsmType::Function(zone, iw::AsmType::Signed());
+ d2s->AsFunctionType()->AddArgument(iw::AsmType::Double());
+
+ auto* ret = iw::AsmType::FunctionTableType(zone, 2, d2s);
+ return ret;
+ };
+
+ auto d2s = [](Zone* zone) -> iw::AsmType* {
+ auto* ret = iw::AsmType::Function(zone, iw::AsmType::Signed());
+ ret->AsFunctionType()->AddArgument(iw::AsmType::Double());
+ return ret;
+ };
+
+ for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
+ const auto* test = kTests + ii;
+ if (!ValidationOf(Export(test->module_export))
+ ->WithGlobal(DynamicGlobal("d2s_tbl"), d2s_tbl)
+ ->WithGlobal(DynamicGlobal("d2s"), d2s)
+ ->WithImport(DynamicGlobal("min"), iw::AsmTyper::kMathMin)
+ ->WithImport(DynamicGlobal("ffi"), iw::AsmTyper::kFFI)
+ ->WithGlobal(DynamicGlobal("I"), iw::AsmType::Int())
+ ->FailsWithMessage(test->error_message)) {
+ std::cerr << "Test:\n" << test->module_export;
+ CHECK(false);
+ }
+ }
+}
+
+TEST(ErrorsInFunction) {
+ auto d2s = [](Zone* zone) -> iw::AsmType* {
+ auto* ret = iw::AsmType::Function(zone, iw::AsmType::Signed());
+ ret->AsFunctionType()->AddArgument(iw::AsmType::Double());
+ return ret;
+ };
+
+ const struct {
+ const char* function;
+ const char* error_message;
+ } kTests[] = {
+ {"function f(eval) {"
+ " eval = eval|0;"
+ "}\n",
+ "Invalid asm.js identifier in parameter name"},
+ {"function f(arguments) {"
+ " arguments = arguments|0;"
+ "}\n",
+ "Invalid asm.js identifier in parameter name"},
+ // The following error should actually be a "redeclared local," but the
+ // AST "hides" the first parameter from us, so the parameter type checking
+ // will fail because the validator will think that the a = a|0 is
+ // annotating the second parameter.
+ {"function f(a, a) {\n"
+ " a = a|0;\n"
+ " a = +a;\n"
+ "}\n",
+ "Incorrect parameter type annotations"},
+ {"function f(b, a) {\n"
+ " if (0) return;\n"
+ " b = +b;\n"
+ " a = a|0;\n"
+ "}\n",
+ "Incorrect parameter type annotations"},
+ {"function f(b, a) {\n"
+ " f();\n"
+ " b = +b;\n"
+ " a = a|0;\n"
+ "}\n",
+ "Incorrect parameter type annotations"},
+ {"function f(b, a) {\n"
+ " f.a = 0;\n"
+ " b = +b;\n"
+ " a = a|0;\n"
+ "}\n",
+ "Incorrect parameter type annotations"},
+ {"function f(b, a) {\n"
+ " a = a|0;\n"
+ " b = +b;\n"
+ "}\n",
+ "Incorrect parameter type annotations"},
+ {"function f(b, a) {\n"
+ " b = +b;\n"
+ " a = a|0;\n"
+ " var eval = 0;\n"
+ "}\n",
+ "Invalid asm.js identifier in local variable"},
+ {"function f(b, a) {\n"
+ " b = +b;\n"
+ " a = a|0;\n"
+ " var b = 0;\n"
+ "}\n",
+ "Redeclared local"},
+ {"function f(b, a) {\n"
+ " b = +b;\n"
+ " a = a|0;\n"
+ " var c = 0, c = 1.0;\n"
+ "}\n",
+ "Redeclared local"},
+ {"function f(b, a) {\n"
+ " b = +b;\n"
+ " a = a|0;\n"
+ " var c = 0; var c = 1.0;\n"
+ "}\n",
+ "Redeclared local"},
+ {"function f(b, a) {\n"
+ " b = +b;\n"
+ " a = a|0;\n"
+ " f();\n"
+ " var c = 0;\n"
+ "}\n",
+ "Local variable missing initializer in asm.js module"},
+ {"function f() {\n"
+ " function ff() {}\n"
+ "}\n",
+ "Functions may only define inner variables"},
+ {"function f() {\n"
+ " return a+1;\n"
+ "}\n",
+ "Invalid return type annotation"},
+ {"function f() {\n"
+ " return ~~x;\n"
+ "}\n",
+ "Invalid return type annotation"},
+ {"function f() {\n"
+ " return d();\n"
+ "}\n",
+ "Invalid function call in return statement"},
+ {"function f() {\n"
+ " return 'use asm';\n"
+ "}\n",
+ "Invalid literal in return statement"},
+ {"function f() {\n"
+ " return 2147483648;\n"
+ "}\n",
+ "Invalid literal in return statement"},
+ {"function f() {\n"
+ " return stdlib.Math.E;"
+ "}\n",
+ "Invalid return type expression"},
+ {"function f() {\n"
+ " return E[0];"
+ "}\n",
+ "Invalid return type expression"},
+ {"function I() {}\n", "Identifier redefined as function"},
+ {"function foo() {}\n", "Identifier redefined as function"},
+ {"function d2s() {}\n", "Identifier redefined (function name)"},
+ {"function d2s(x) {\n"
+ " x = x|0;\n"
+ " return -1;\n"
+ "}\n",
+ "Identifier redefined (function name)"},
+ {"function d2s(x) {\n"
+ " x = +x;\n"
+ " return -1.0;\n"
+ "}\n",
+ "Identifier redefined (function name)"},
+ };
+
+ for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
+ const auto* test = kTests + ii;
+ if (!ValidationOf(Function(test->function))
+ ->WithGlobal(Var("I"), iw::AsmType::Int())
+ ->WithGlobal(Var("d2s"), d2s)
+ ->FailsWithMessage(test->error_message)) {
+ std::cerr << "Test:\n" << test->function;
+ CHECK(false);
+ }
+ }
+}
+
+TEST(ErrorsInStatement) {
+ const struct {
+ const char* statement;
+ const char* error_message;
+ } kTests[] = {
+ {"if (fround(1));", "If condition must be type int"},
+ {"return;", "Type mismatch in return statement"},
+ {"return +1.0;", "Type mismatch in return statement"},
+ {"return +d()", "Type mismatch in return statement"},
+ {"while (fround(1));", "While condition must be type int"},
+ {"do {} while (fround(1));", "Do {} While condition must be type int"},
+ {"for (;fround(1););", "For condition must be type int"},
+ {"switch(flocal){ case 0: return 0; }", "Switch tag must be signed"},
+ {"switch(slocal){ default: case 0: return 0; }",
+ "Switch default must appear last"},
+ {"switch(slocal){ case 1: case 1: return 0; }", "Duplicated case label"},
+ {"switch(slocal){ case 1: case 0: break; case 1: return 0; }",
+ "Duplicated case label"},
+ {"switch(slocal){ case 1.0: return 0; }",
+ "Case label must be a 32-bit signed integer"},
+ {"switch(slocal){ case 1.0: return 0; }",
+ "Case label must be a 32-bit signed integer"},
+ {"switch(slocal){ case -100000: case 2147483647: return 0; }",
+ "Out-of-bounds case"},
+ {"switch(slocal){ case 2147483648: return 0; }",
+ "Case label must be a 32-bit signed"},
+ };
+
+ for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
+ const auto* test = kTests + ii;
+ if (!ValidationOf(Statement(test->statement))
+ ->WithReturnType(iw::AsmType::Signed())
+ ->WithImport(DynamicGlobal("fround"), iw::AsmTyper::kMathFround)
+ ->WithLocal(DynamicGlobal("flocal"), iw::AsmType::Float())
+ ->WithLocal(DynamicGlobal("slocal"), iw::AsmType::Signed())
+ ->FailsWithMessage(test->error_message)) {
+ std::cerr << "Test:\n" << test->statement;
+ CHECK(false);
+ }
+ }
+}
+
+TEST(ErrorsInExpression) {
+ auto d2d = [](Zone* zone) -> iw::AsmType* {
+ auto* ret = iw::AsmType::Function(zone, iw::AsmType::Double());
+ ret->AsFunctionType()->AddArgument(iw::AsmType::Double());
+ return ret;
+ };
+
+ auto d2s_tbl = [](Zone* zone) -> iw::AsmType* {
+ auto* d2s = iw::AsmType::Function(zone, iw::AsmType::Signed());
+ d2s->AsFunctionType()->AddArgument(iw::AsmType::Double());
+
+ auto* ret = iw::AsmType::FunctionTableType(zone, 2, d2s);
+ return ret;
+ };
+
+ const struct {
+ const char* expression;
+ const char* error_message;
+ } kTests[] = {
+ {"noy_a_function();", "Unanotated call to a function must be a call to"},
+ {"a = 0;", "Undeclared identifier"},
+ // we can't verify the module's name being referenced here because
+ // expression validation does not invoke ValidateModule, which sets up the
+ // module information in the AsmTyper.
+ {"stdlib", "accessed by ordinary expressions"},
+ {"ffi", "accessed by ordinary expressions"},
+ {"heap", "accessed by ordinary expressions"},
+ {"d2d", "accessed by ordinary expression"},
+ {"fround", "accessed by ordinary expression"},
+ {"d2s_tbl", "accessed by ordinary expression"},
+ {"ilocal = +1.0", "Type mismatch in assignment"},
+ {"!dlocal", "Invalid type for !"},
+ {"2 * dlocal", "Invalid types for intish *"},
+ {"dlocal * 2", "Invalid types for intish *"},
+ {"1048577 * ilocal", "Invalid operands for *"},
+ {"1048577 / ilocal", "Invalid operands for /"},
+ {"1048577 % dlocal", "Invalid operands for %"},
+ {"1048577 * dlocal", "Invalid operands for *"},
+ {"1048577 / dlocal", "Invalid operands for /"},
+ {"1048577 % ilocal", "Invalid operands for %"},
+ {"ilocal * dlocal", "Invalid operands for *"},
+ {"ilocal / dlocal", "Invalid operands for /"},
+ {"ilocal % dlocal", "Invalid operands for %"},
+ {"1048577 + dlocal", "Invalid operands for additive expression"},
+ {"1048577 - dlocal", "Invalid operands for additive expression"},
+ {"ilocal + dlocal", "Invalid operands for additive expression"},
+ {"ilocal - dlocal", "Invalid operands for additive expression"},
+ {"1048577 << dlocal", "Invalid operands for <<"},
+ {"1048577 >> dlocal", "Invalid operands for >>"},
+ {"1048577 >>> dlocal", "Invalid operands for >>"},
+ {"ilocal << dlocal", "Invalid operands for <<"},
+ {"ilocal >> dlocal", "Invalid operands for >>"},
+ {"ilocal >>> dlocal", "Invalid operands for >>>"},
+ {"1048577 < dlocal", "Invalid operands for <"},
+ {"ilocal < dlocal", "Invalid operands for <"},
+ {"1048577 > dlocal", "Invalid operands for >"},
+ {"ilocal > dlocal", "Invalid operands for >"},
+ {"1048577 <= dlocal", "Invalid operands for <="},
+ {"ilocal <= dlocal", "Invalid operands for <="},
+ {"1048577 >= dlocal", "Invalid operands for >="},
+ {"ilocal >= dlocal", "Invalid operands for >="},
+ {"1048577 == dlocal", "Invalid operands for =="},
+ {"ilocal == dlocal", "Invalid operands for =="},
+ /* NOTE: the parser converts a == b to !(a == b). */
+ {"1048577 != dlocal", "Invalid operands for =="},
+ {"ilocal != dlocal", "Invalid operands for =="},
+ {"dlocal & dlocal", "Invalid operands for &"},
+ {"1048577 & dlocal", "Invalid operands for &"},
+ {"ilocal & dlocal", "Invalid operands for &"},
+ {"dlocal | dlocal2", "Invalid operands for |"},
+ {"1048577 | dlocal", "Invalid operands for |"},
+ {"ilocal | dlocal", "Invalid operands for |"},
+ {"dlocal ^ dlocal2", "Invalid operands for ^"},
+ {"1048577 ^ dlocal", "Invalid operands for ^"},
+ {"ilocal ^ dlocal", "Invalid operands for ^"},
+ {"dlocal ? 0 : 1", "Ternary operation condition should be int"},
+ {"ilocal ? dlocal : 1", "Type mismatch for ternary operation result"},
+ {"ilocal ? 1 : dlocal", "Type mismatch for ternary operation result"},
+ {"eval(10)|0", "Invalid asm.js identifier in (forward) function"},
+ {"arguments(10)|0", "Invalid asm.js identifier in (forward) function"},
+ {"not_a_function(10)|0", "Calling something that's not a function"},
+ {"fround(FFI())", "Foreign functions can't return float"},
+ {"FFI(fround(0))|0", "Function invocation does not match function type"},
+ {"FFI(2147483648)|0", "Function invocation does not match function type"},
+ {"d2d(2.0)|0", "Function invocation does not match function type"},
+ {"+d2d(2)", "Function invocation does not match function type"},
+ {"eval[ilocal & 3]()|0", "Invalid asm.js identifier in (forward)"},
+ {"arguments[ilocal & 3]()|0", "Invalid asm.js identifier in (forward)"},
+ {"not_a_function[ilocal & 3]()|0", "Identifier does not name a function"},
+ {"d2s_tbl[ilocal & 3](0.0)|0", "Function table size does not match"},
+ {"+d2s_tbl[ilocal & 1](0.0)", "does not match previous signature"},
+ {"d2s_tbl[ilocal & 1](0)|0", "does not match previous signature"},
+ {"a.b()|0", "Indirect call index must be in the expr & mask form"},
+ {"HEAP32[0][0] = 0", "Invalid heap access"},
+ {"heap32[0] = 0", "Undeclared identifier in heap access"},
+ {"not_a_function[0] = 0", "Identifier does not represent a heap view"},
+ {"HEAP32[0.0] = 0", "Heap access index must be int"},
+ {"HEAP32[-1] = 0", "Heap access index must be a 32-bit unsigned integer"},
+ {"HEAP32[ilocal >> 1] = 0", "Invalid heap access index"},
+ // *VIOLATION* the following is invalid, but because of desugaring it is
+ // accepted.
+ // {"HEAP32[0 >> 1] = 0", "Invalid heap access index"},
+ {"HEAP8[fround(0.0)] = 0", "Invalid heap access index for byte array"},
+ {"HEAP8[iish] = 0", "Invalid heap access index for byte array"},
+ };
+
+ for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
+ const auto* test = kTests + ii;
+ if (!ValidationOf(Expression(test->expression))
+ ->WithStdlib(DynamicGlobal("stdlib"))
+ ->WithFFI(DynamicGlobal("ffi"))
+ ->WithHeap(DynamicGlobal("heap"))
+ ->WithLocal(DynamicGlobal("iish"), iw::AsmType::Intish())
+ ->WithLocal(DynamicGlobal("ilocal"), iw::AsmType::Int())
+ ->WithLocal(DynamicGlobal("dlocal"), iw::AsmType::Double())
+ ->WithLocal(DynamicGlobal("dlocal2"), iw::AsmType::Double())
+ ->WithLocal(DynamicGlobal("not_a_function"), iw::AsmType::Int())
+ ->WithImport(DynamicGlobal("fround"), iw::AsmTyper::kMathFround)
+ ->WithImport(DynamicGlobal("FFI"), iw::AsmTyper::kFFI)
+ ->WithGlobal(DynamicGlobal("d2d"), d2d)
+ ->WithGlobal(DynamicGlobal("d2s_tbl"), d2s_tbl)
+ ->WithGlobal(DynamicGlobal("HEAP32"), iw::AsmType::Int32Array())
+ ->WithGlobal(DynamicGlobal("HEAP8"), iw::AsmType::Int8Array())
+ ->FailsWithMessage(test->error_message)) {
+ std::cerr << "Test:\n" << test->expression;
+ CHECK(false);
+ }
+ }
+}
+
+TEST(ValidateNumericLiteral) {
+ const struct {
+ const char* expression;
+ iw::AsmType* expected_type;
+ } kTests[] = {
+ {"0", iw::AsmType::FixNum()},
+ {"-1", iw::AsmType::Signed()},
+ {"2147483648", iw::AsmType::Unsigned()},
+ {"0.0", iw::AsmType::Double()},
+ };
+
+ for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
+ const auto* test = kTests + ii;
+ if (!ValidationOf(Expression(test->expression))
+ ->SucceedsWithExactType(test->expected_type)) {
+ std::cerr << "Test:\n" << test->expression;
+ CHECK(false);
+ }
+ }
+}
+
+TEST(ValidateIdentifier) {
+ const struct {
+ const char* expression;
+ iw::AsmType* expected_type;
+ } kTests[] = {{"afixnum", iw::AsmType::FixNum()},
+ {"adouble", iw::AsmType::Double()},
+ {"afloat", iw::AsmType::Float()},
+ {"anextern", iw::AsmType::Extern()},
+ {"avoid", iw::AsmType::Void()}};
+
+ for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
+ const auto* test = kTests + ii;
+ if (!ValidationOf(Expression(test->expression))
+ ->WithLocal(DynamicGlobal(test->expression), test->expected_type)
+ ->WithGlobal(DynamicGlobal(test->expression),
+ iw::AsmType::Floatish())
+ ->SucceedsWithExactType(test->expected_type)) {
+ std::cerr << "Test (local identifiers):\n" << test->expression;
+ CHECK(false);
+ }
+ }
+
+ for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
+ const auto* test = kTests + ii;
+ if (!ValidationOf(Expression(test->expression))
+ ->WithGlobal(DynamicGlobal(test->expression), test->expected_type)
+ ->SucceedsWithExactType(test->expected_type)) {
+ std::cerr << "Test (global identifiers):\n" << test->expression;
+ CHECK(false);
+ }
+ }
+}
+
+TEST(ValidateCallExpression) {
+ auto v2f = [](Zone* zone) -> iw::AsmType* {
+ auto* ret = iw::AsmType::Function(zone, iw::AsmType::Float());
+ return ret;
+ };
+
+ const struct {
+ const char* expression;
+ } kTests[] = {
+ {"a_float_function()"},
+ {"fround(0)"},
+ {"slocal"},
+ {"ulocal"},
+ {"dqlocal"},
+ {"fishlocal"},
+ };
+
+ char full_test[200];
+ static const size_t kFullTestSize = arraysize(full_test);
+ for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
+ const auto* test = kTests + ii;
+ CHECK(v8::base::OS::SNPrintF(full_test, kFullTestSize, "fround(%s)",
+ test->expression) < kFullTestSize);
+ if (!ValidationOf(Expression(full_test))
+ ->WithImport(DynamicGlobal("fround"), iw::AsmTyper::kMathFround)
+ ->WithGlobal(DynamicGlobal("a_float_function"), v2f)
+ ->WithLocal(DynamicGlobal("slocal"), iw::AsmType::Signed())
+ ->WithLocal(DynamicGlobal("ulocal"), iw::AsmType::Unsigned())
+ ->WithLocal(DynamicGlobal("dqlocal"), iw::AsmType::DoubleQ())
+ ->WithLocal(DynamicGlobal("fishlocal"), iw::AsmType::Floatish())
+ ->SucceedsWithExactType(iw::AsmType::Float())) {
+ std::cerr << "Test:\n" << full_test;
+ CHECK(false);
+ }
+ }
+
+ const struct {
+ const char* expression;
+ const char* error_message;
+ } kFailureTests[] = {
+ {"vlocal", "Invalid argument type to fround"},
+ {"ilocal", "Invalid argument type to fround"},
+ {"a_double_function()", "Function invocation does not match"},
+ };
+
+ auto v2d = [](Zone* zone) -> iw::AsmType* {
+ auto* ret = iw::AsmType::Function(zone, iw::AsmType::Double());
+ return ret;
+ };
+
+ for (size_t ii = 0; ii < arraysize(kFailureTests); ++ii) {
+ const auto* test = kFailureTests + ii;
+ CHECK(v8::base::OS::SNPrintF(full_test, kFullTestSize, "fround(%s)",
+ test->expression) < kFullTestSize);
+ if (!ValidationOf(Expression(full_test))
+ ->WithImport(DynamicGlobal("fround"), iw::AsmTyper::kMathFround)
+ ->WithLocal(DynamicGlobal("ilocal"), iw::AsmType::Int())
+ ->WithLocal(DynamicGlobal("vlocal"), iw::AsmType::Void())
+ ->WithGlobal(DynamicGlobal("a_double_function"), v2d)
+ ->FailsWithMessage(test->error_message)) {
+ std::cerr << "Test:\n" << full_test;
+ CHECK(false);
+ }
+ }
+}
+
+TEST(ValidateMemberExpression) {
+ const struct {
+ const char* expression;
+ iw::AsmType* load_type;
+ } kTests[] = {
+ {"I8[i]", iw::AsmType::Intish()}, // Legacy: no shift for 8-bit view.
+ {"I8[iish >> 0]", iw::AsmType::Intish()},
+ {"I8[0]", iw::AsmType::Intish()},
+ {"I8[2147483648]", iw::AsmType::Intish()},
+ {"U8[iish >> 0]", iw::AsmType::Intish()},
+ {"U8[i]", iw::AsmType::Intish()}, // Legacy: no shift for 8-bit view.
+ {"U8[0]", iw::AsmType::Intish()},
+ {"U8[2147483648]", iw::AsmType::Intish()},
+ {"I16[iish >> 1]", iw::AsmType::Intish()},
+ {"I16[0]", iw::AsmType::Intish()},
+ {"I16[1073741824]", iw::AsmType::Intish()},
+ {"U16[iish >> 1]", iw::AsmType::Intish()},
+ {"U16[0]", iw::AsmType::Intish()},
+ {"U16[1073741824]", iw::AsmType::Intish()},
+ {"I32[iish >> 2]", iw::AsmType::Intish()},
+ {"I32[0]", iw::AsmType::Intish()},
+ {"I32[536870912]", iw::AsmType::Intish()},
+ {"U32[iish >> 2]", iw::AsmType::Intish()},
+ {"U32[0]", iw::AsmType::Intish()},
+ {"U32[536870912]", iw::AsmType::Intish()},
+ {"F32[iish >> 2]", iw::AsmType::FloatQ()},
+ {"F32[0]", iw::AsmType::FloatQ()},
+ {"F32[536870912]", iw::AsmType::FloatQ()},
+ {"F64[iish >> 3]", iw::AsmType::DoubleQ()},
+ {"F64[0]", iw::AsmType::DoubleQ()},
+ {"F64[268435456]", iw::AsmType::DoubleQ()},
+ };
+
+ for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
+ const auto* test = kTests + ii;
+ if (!ValidationOf(Expression(test->expression))
+ ->WithGlobal(DynamicGlobal("I8"), iw::AsmType::Int8Array())
+ ->WithGlobal(DynamicGlobal("U8"), iw::AsmType::Uint8Array())
+ ->WithGlobal(DynamicGlobal("I16"), iw::AsmType::Int16Array())
+ ->WithGlobal(DynamicGlobal("U16"), iw::AsmType::Uint16Array())
+ ->WithGlobal(DynamicGlobal("I32"), iw::AsmType::Int32Array())
+ ->WithGlobal(DynamicGlobal("U32"), iw::AsmType::Uint32Array())
+ ->WithGlobal(DynamicGlobal("F32"), iw::AsmType::Float32Array())
+ ->WithGlobal(DynamicGlobal("F64"), iw::AsmType::Float64Array())
+ ->WithLocal(DynamicGlobal("iish"), iw::AsmType::Intish())
+ ->WithLocal(DynamicGlobal("i"), iw::AsmType::Int())
+ ->SucceedsWithExactType(test->load_type)) {
+ std::cerr << "Test:\n" << test->expression;
+ CHECK(false);
+ }
+ }
+}
+
+TEST(ValidateAssignmentExpression) {
+ const struct {
+ const char* expression;
+ iw::AsmType* load_type;
+ } kTests[] = {
+ // -----------------------------------------------------------------------
+ // Array assignments.
+ // Storing signed to int heap view.
+ {"I8[1024] = -1024", iw::AsmType::Signed()},
+ {"I8[1024 >> 0] = -1024", iw::AsmType::Signed()},
+ {"I8[0] = -1024", iw::AsmType::Signed()},
+ {"I8[2147483648] = -1024", iw::AsmType::Signed()},
+ {"U8[1024 >> 0] = -1024", iw::AsmType::Signed()},
+ {"U8[0] = -1024", iw::AsmType::Signed()},
+ {"U8[2147483648] = -1024", iw::AsmType::Signed()},
+ {"I16[1024 >> 1] = -1024", iw::AsmType::Signed()},
+ {"I16[0] = -1024", iw::AsmType::Signed()},
+ {"I16[1073741824] = -1024", iw::AsmType::Signed()}, // not pre-shifted.
+ {"U16[1024 >> 1] = -1024", iw::AsmType::Signed()},
+ {"U16[0] = -1024", iw::AsmType::Signed()},
+ {"U16[1073741824] = -1024", iw::AsmType::Signed()}, // not pre-shifted.
+ {"I32[1024 >> 2] = -1024", iw::AsmType::Signed()},
+ {"I32[0] = -1024", iw::AsmType::Signed()},
+ {"I32[536870912] = -1024", iw::AsmType::Signed()}, // not pre-shifted.
+ {"U32[1024 >> 2] = -1024", iw::AsmType::Signed()},
+ {"U32[0] = -1024", iw::AsmType::Signed()},
+ {"U32[536870912] = -1024", iw::AsmType::Signed()}, // not pre-shifted.
+ // Sroting fixnum to int heap view.
+ {"I8[1024] = 1024", iw::AsmType::FixNum()},
+ {"I8[1024 >> 0] = 1024", iw::AsmType::FixNum()},
+ {"I8[0] = 1024", iw::AsmType::FixNum()},
+ {"I8[2147483648] = 1024", iw::AsmType::FixNum()},
+ {"U8[1024 >> 0] = 1024", iw::AsmType::FixNum()},
+ {"U8[0] = 1024", iw::AsmType::FixNum()},
+ {"U8[2147483648] = 1024", iw::AsmType::FixNum()},
+ {"I16[1024 >> 1] = 1024", iw::AsmType::FixNum()},
+ {"I16[0] = 1024", iw::AsmType::FixNum()},
+ {"I16[1073741824] = 1024", iw::AsmType::FixNum()}, // not pre-shifted.
+ {"U16[1024 >> 1] = 1024", iw::AsmType::FixNum()},
+ {"U16[0] = 1024", iw::AsmType::FixNum()},
+ {"U16[1073741824] = 1024", iw::AsmType::FixNum()}, // not pre-shifted.
+ {"I32[1024 >> 2] = 1024", iw::AsmType::FixNum()},
+ {"I32[0] = 1024", iw::AsmType::FixNum()},
+ {"I32[536870912] = 1024", iw::AsmType::FixNum()}, // not pre-shifted.
+ {"U32[1024 >> 2] = 1024", iw::AsmType::FixNum()},
+ {"U32[0] = 1024", iw::AsmType::FixNum()},
+ {"U32[536870912] = 1024", iw::AsmType::FixNum()}, // not pre-shifted.
+ // Storing int to int heap view.
+ {"I8[ilocal] = ilocal", iw::AsmType::Int()},
+ {"I8[ilocal >> 0] = ilocal", iw::AsmType::Int()},
+ {"I8[0] = ilocal", iw::AsmType::Int()},
+ {"I8[2147483648] = ilocal", iw::AsmType::Int()},
+ {"U8[ilocal >> 0] = ilocal", iw::AsmType::Int()},
+ {"U8[0] = ilocal", iw::AsmType::Int()},
+ {"U8[2147483648] = ilocal", iw::AsmType::Int()},
+ {"I16[ilocal >> 1] = ilocal", iw::AsmType::Int()},
+ {"I16[0] = ilocal", iw::AsmType::Int()},
+ {"I16[1073741824] = ilocal", iw::AsmType::Int()}, // not pre-shifted.
+ {"U16[ilocal >> 1] = ilocal", iw::AsmType::Int()},
+ {"U16[0] = ilocal", iw::AsmType::Int()},
+ {"U16[1073741824] = ilocal", iw::AsmType::Int()}, // not pre-shifted.
+ {"I32[ilocal >> 2] = ilocal", iw::AsmType::Int()},
+ {"I32[0] = ilocal", iw::AsmType::Int()},
+ {"I32[536870912] = ilocal", iw::AsmType::Int()}, // not pre-shifted.
+ {"U32[ilocal >> 2] = ilocal", iw::AsmType::Int()},
+ {"U32[0] = ilocal", iw::AsmType::Int()},
+ {"U32[536870912] = ilocal", iw::AsmType::Int()}, // not pre-shifted.
+ // Storing intish to int heap view.
+ {"I8[ilocal] = iish", iw::AsmType::Intish()},
+ {"I8[iish >> 0] = iish", iw::AsmType::Intish()},
+ {"I8[0] = iish", iw::AsmType::Intish()},
+ {"I8[2147483648] = iish", iw::AsmType::Intish()},
+ {"U8[iish >> 0] = iish", iw::AsmType::Intish()},
+ {"U8[0] = iish", iw::AsmType::Intish()},
+ {"U8[2147483648] = iish", iw::AsmType::Intish()},
+ {"I16[iish >> 1] = iish", iw::AsmType::Intish()},
+ {"I16[0] = iish", iw::AsmType::Intish()},
+ {"I16[1073741824] = iish", iw::AsmType::Intish()}, // not pre-shifted.
+ {"U16[iish >> 1] = iish", iw::AsmType::Intish()},
+ {"U16[0] = iish", iw::AsmType::Intish()},
+ {"U16[1073741824] = iish", iw::AsmType::Intish()}, // not pre-shifted.
+ {"I32[iish >> 2] = iish", iw::AsmType::Intish()},
+ {"I32[0] = iish", iw::AsmType::Intish()},
+ {"I32[536870912] = iish", iw::AsmType::Intish()}, // not pre-shifted.
+ {"U32[iish >> 2] = iish", iw::AsmType::Intish()},
+ {"U32[0] = iish", iw::AsmType::Intish()},
+ {"U32[536870912] = iish", iw::AsmType::Intish()}, // not pre-shifted.
+ // Storing floatish to f32 heap view.
+ {"F32[iish >> 2] = fish", iw::AsmType::Floatish()},
+ {"F32[0] = fish", iw::AsmType::Floatish()},
+ {"F32[536870912] = fish ", iw::AsmType::Floatish()}, // not pre-shifted.
+ // Storing double? to f32 heap view.
+ {"F32[iish >> 2] = dq", iw::AsmType::DoubleQ()},
+ {"F32[0] = dq", iw::AsmType::DoubleQ()},
+ {"F32[536870912] = dq", iw::AsmType::DoubleQ()}, // not pre-shifted.
+ // Storing float? to f64 heap view.
+ {"F64[iish >> 3] = fq", iw::AsmType::FloatQ()},
+ {"F64[0] = fq", iw::AsmType::FloatQ()},
+ {"F64[268435456] = fq", iw::AsmType::FloatQ()}, // not pre-shifted.
+ // Storing double? to f64 heap view.
+ {"F64[iish >> 3] = dq", iw::AsmType::DoubleQ()},
+ {"F64[0] = dq", iw::AsmType::DoubleQ()},
+ {"F64[268435456] = dq", iw::AsmType::DoubleQ()}, // not pre-shifted.
+ // -----------------------------------------------------------------------
+ // Scalar assignments.
+ {"ilocal = 1024", iw::AsmType::FixNum()},
+ {"ilocal = -1024", iw::AsmType::Signed()},
+ {"ilocal = 2147483648", iw::AsmType::Unsigned()},
+ {"ilocal = iglobal", iw::AsmType::Int()},
+ {"iglobal = 1024", iw::AsmType::FixNum()},
+ {"iglobal = -1024", iw::AsmType::Signed()},
+ {"iglobal = 2147483648", iw::AsmType::Unsigned()},
+ {"iglobal = ilocal", iw::AsmType::Int()},
+ {"dlocal = 0.0", iw::AsmType::Double()},
+ {"dlocal = +make_double()", iw::AsmType::Double()},
+ {"dglobal = 0.0", iw::AsmType::Double()},
+ {"dglobal = +make_double()", iw::AsmType::Double()},
+ {"flocal = fround(0)", iw::AsmType::Float()},
+ {"flocal = fround(make_float())", iw::AsmType::Float()},
+ {"fglobal = fround(0)", iw::AsmType::Float()},
+ {"fglobal = fround(make_float())", iw::AsmType::Float()},
+ };
+
+ for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
+ const auto* test = kTests + ii;
+ if (!ValidationOf(Expression(test->expression))
+ ->WithImport(DynamicGlobal("fround"), iw::AsmTyper::kMathFround)
+ ->WithLocal(DynamicGlobal("fq"), iw::AsmType::FloatQ())
+ ->WithLocal(DynamicGlobal("dq"), iw::AsmType::DoubleQ())
+ ->WithLocal(DynamicGlobal("fish"), iw::AsmType::Floatish())
+ ->WithLocal(DynamicGlobal("iish"), iw::AsmType::Intish())
+ ->WithGlobal(DynamicGlobal("iglobal"), iw::AsmType::Int())
+ ->WithGlobal(DynamicGlobal("dglobal"), iw::AsmType::Double())
+ ->WithGlobal(DynamicGlobal("fglobal"), iw::AsmType::Float())
+ ->WithLocal(DynamicGlobal("ilocal"), iw::AsmType::Int())
+ ->WithLocal(DynamicGlobal("dlocal"), iw::AsmType::Double())
+ ->WithLocal(DynamicGlobal("flocal"), iw::AsmType::Float())
+ ->WithGlobal(DynamicGlobal("I8"), iw::AsmType::Int8Array())
+ ->WithGlobal(DynamicGlobal("U8"), iw::AsmType::Uint8Array())
+ ->WithGlobal(DynamicGlobal("I16"), iw::AsmType::Int16Array())
+ ->WithGlobal(DynamicGlobal("U16"), iw::AsmType::Uint16Array())
+ ->WithGlobal(DynamicGlobal("I32"), iw::AsmType::Int32Array())
+ ->WithGlobal(DynamicGlobal("U32"), iw::AsmType::Uint32Array())
+ ->WithGlobal(DynamicGlobal("F32"), iw::AsmType::Float32Array())
+ ->WithGlobal(DynamicGlobal("F64"), iw::AsmType::Float64Array())
+ ->SucceedsWithExactType(test->load_type)) {
+ std::cerr << "Test:\n" << test->expression;
+ CHECK(false);
+ }
+ }
+}
+
+TEST(ValidateUnaryExpression) {
+ auto v2d = [](Zone* zone) -> iw::AsmType* {
+ auto* ret = iw::AsmType::Function(zone, iw::AsmType::Double());
+ return ret;
+ };
+
+ const struct {
+ const char* expression;
+ iw::AsmType* load_type;
+ } kTests[] = {
+ {"-2147483648", iw::AsmType::Signed()},
+ {"-1024", iw::AsmType::Signed()},
+ {"-1", iw::AsmType::Signed()},
+ {"-2147483648.0", iw::AsmType::Double()},
+ {"+make_double()", iw::AsmType::Double()},
+ {"+dbl()", iw::AsmType::Double()},
+ {"make_double() * 1.0", iw::AsmType::Double()}, // Violation.
+ {"~~fq", iw::AsmType::Signed()},
+ {"~~dglobal", iw::AsmType::Signed()},
+ {"+slocal", iw::AsmType::Double()},
+ {"slocal * 1.0", iw::AsmType::Double()}, // Violation.
+ {"+ulocal", iw::AsmType::Double()},
+ {"ulocal * 1.0", iw::AsmType::Double()}, // Violation.
+ {"+dq", iw::AsmType::Double()},
+ {"dq * 1.0", iw::AsmType::Double()}, // Violation.
+ {"+fq", iw::AsmType::Double()},
+ {"fq * 1.0", iw::AsmType::Double()}, // Violation.
+ {"-ilocal", iw::AsmType::Intish()},
+ {"ilocal * -1", iw::AsmType::Intish()}, // Violation.
+ {"-dq", iw::AsmType::Double()},
+ {"dq * -1", iw::AsmType::Double()}, // Violation.
+ {"-fq", iw::AsmType::Floatish()},
+ {"fq * -1", iw::AsmType::Floatish()}, // Violation.
+ {"~iish", iw::AsmType::Signed()},
+ {"iish ^ -1", iw::AsmType::Signed()}, // Violation, but OK.
+ {"!ilocal", iw::AsmType::Int()},
+ };
+
+ for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
+ const auto* test = kTests + ii;
+ if (!ValidationOf(Expression(test->expression))
+ ->WithLocal(DynamicGlobal("fq"), iw::AsmType::FloatQ())
+ ->WithLocal(DynamicGlobal("dq"), iw::AsmType::DoubleQ())
+ ->WithLocal(DynamicGlobal("iish"), iw::AsmType::Intish())
+ ->WithLocal(DynamicGlobal("slocal"), iw::AsmType::Signed())
+ ->WithLocal(DynamicGlobal("ulocal"), iw::AsmType::Unsigned())
+ ->WithLocal(DynamicGlobal("ilocal"), iw::AsmType::Int())
+ ->WithGlobal(DynamicGlobal("dglobal"), iw::AsmType::Double())
+ ->WithGlobal(DynamicGlobal("dbl"), v2d)
+ ->SucceedsWithExactType(test->load_type)) {
+ std::cerr << "Test:\n" << test->expression;
+ CHECK(false);
+ }
+ }
+}
+
+TEST(ValidateMultiplicativeExpression) {
+ const struct {
+ const char* expression;
+ iw::AsmType* load_type;
+ } kTests[] = {
+ {"dq * dq", iw::AsmType::Double()},
+ {"fq * fq", iw::AsmType::Floatish()},
+ {"slocal / slocal", iw::AsmType::Intish()},
+ {"ulocal / ulocal", iw::AsmType::Intish()},
+ {"dq / dq", iw::AsmType::Double()},
+ {"fq / fq", iw::AsmType::Floatish()},
+ {"slocal % slocal", iw::AsmType::Intish()},
+ {"ulocal % ulocal", iw::AsmType::Intish()},
+ {"dq % dq", iw::AsmType::Double()},
+ {"-1048575 * ilocal", iw::AsmType::Intish()},
+ {"ilocal * -1048575", iw::AsmType::Intish()},
+ {"1048575 * ilocal", iw::AsmType::Intish()},
+ {"ilocal * 1048575", iw::AsmType::Intish()},
+ };
+
+ for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
+ const auto* test = kTests + ii;
+ if (!ValidationOf(Expression(test->expression))
+ ->WithLocal(DynamicGlobal("fq"), iw::AsmType::FloatQ())
+ ->WithLocal(DynamicGlobal("dq"), iw::AsmType::DoubleQ())
+ ->WithLocal(DynamicGlobal("slocal"), iw::AsmType::Signed())
+ ->WithLocal(DynamicGlobal("ulocal"), iw::AsmType::Unsigned())
+ ->WithLocal(DynamicGlobal("ilocal"), iw::AsmType::Int())
+ ->WithGlobal(DynamicGlobal("dglobal"), iw::AsmType::Double())
+ ->SucceedsWithExactType(test->load_type)) {
+ std::cerr << "Test:\n" << test->expression;
+ CHECK(false);
+ }
+ }
+}
+
+TEST(ValidateAdditiveExpression) {
+ const struct {
+ const char* expression;
+ iw::AsmType* load_type;
+ } kTests[] = {
+ {"dlocal + dlocal", iw::AsmType::Double()},
+ {"fq + fq", iw::AsmType::Floatish()},
+ {"dq - dq", iw::AsmType::Double()},
+ {"fq - fq", iw::AsmType::Floatish()},
+ {"ilocal + 1", iw::AsmType::Intish()},
+ {"ilocal - 1", iw::AsmType::Intish()},
+ {"slocal + ilocal + 1", iw::AsmType::Intish()},
+ {"slocal - ilocal + 1", iw::AsmType::Intish()},
+ {"ulocal + ilocal + 1", iw::AsmType::Intish()},
+ {"ulocal - ilocal + 1", iw::AsmType::Intish()},
+ {"ulocal + slocal + ilocal + 1", iw::AsmType::Intish()},
+ {"ulocal + slocal - ilocal + 1", iw::AsmType::Intish()},
+ {"ulocal - slocal + ilocal + 1", iw::AsmType::Intish()},
+ {"ulocal - slocal - ilocal + 1", iw::AsmType::Intish()},
+ {"1 + 1", iw::AsmType::FixNum()}, // Violation: intish.
+ };
+
+ for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
+ const auto* test = kTests + ii;
+ if (!ValidationOf(Expression(test->expression))
+ ->WithLocal(DynamicGlobal("fq"), iw::AsmType::FloatQ())
+ ->WithLocal(DynamicGlobal("dq"), iw::AsmType::DoubleQ())
+ ->WithLocal(DynamicGlobal("iish"), iw::AsmType::Intish())
+ ->WithLocal(DynamicGlobal("dlocal"), iw::AsmType::Double())
+ ->WithLocal(DynamicGlobal("slocal"), iw::AsmType::Signed())
+ ->WithLocal(DynamicGlobal("ulocal"), iw::AsmType::Unsigned())
+ ->WithLocal(DynamicGlobal("ilocal"), iw::AsmType::Int())
+ ->SucceedsWithExactType(test->load_type)) {
+ std::cerr << "Test:\n" << test->expression;
+ CHECK(false);
+ }
+ }
+}
+
+TEST(ValidateShiftExpression) {
+ const struct {
+ const char* expression;
+ iw::AsmType* load_type;
+ } kTests[] = {
+ {"iish << iish", iw::AsmType::Signed()},
+ {"iish >> iish", iw::AsmType::Signed()},
+ {"iish >>> iish", iw::AsmType::Unsigned()},
+ {"1 << 0", iw::AsmType::FixNum()}, // Violation: signed.
+ {"1 >> 0", iw::AsmType::FixNum()}, // Violation: signed.
+ {"4294967295 >>> 0", iw::AsmType::Unsigned()},
+ {"-1 >>> 0", iw::AsmType::Unsigned()},
+ {"2147483647 >>> 0", iw::AsmType::FixNum()}, // Violation: unsigned.
+ };
+
+ for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
+ const auto* test = kTests + ii;
+ if (!ValidationOf(Expression(test->expression))
+ ->WithLocal(DynamicGlobal("iish"), iw::AsmType::Intish())
+ ->SucceedsWithExactType(test->load_type)) {
+ std::cerr << "Test:\n" << test->expression;
+ CHECK(false);
+ }
+ }
+}
+
+TEST(ValidateComparisonExpression) {
+ const struct {
+ const char* expression;
+ iw::AsmType* load_type;
+ } kTests[] = {
+ // -----------------------------------------------------------------------
+ // Non const <op> Non const
+ {"s0 == s1", iw::AsmType::Int()},
+ {"u0 == u1", iw::AsmType::Int()},
+ {"f0 == f1", iw::AsmType::Int()},
+ {"d0 == d1", iw::AsmType::Int()},
+ {"s0 != s1", iw::AsmType::Int()},
+ {"u0 != u1", iw::AsmType::Int()},
+ {"f0 != f1", iw::AsmType::Int()},
+ {"d0 != d1", iw::AsmType::Int()},
+ {"s0 < s1", iw::AsmType::Int()},
+ {"u0 < u1", iw::AsmType::Int()},
+ {"f0 < f1", iw::AsmType::Int()},
+ {"d0 < d1", iw::AsmType::Int()},
+ {"s0 <= s1", iw::AsmType::Int()},
+ {"u0 <= u1", iw::AsmType::Int()},
+ {"f0 <= f1", iw::AsmType::Int()},
+ {"d0 <= d1", iw::AsmType::Int()},
+ {"s0 > s1", iw::AsmType::Int()},
+ {"u0 > u1", iw::AsmType::Int()},
+ {"f0 > f1", iw::AsmType::Int()},
+ {"d0 > d1", iw::AsmType::Int()},
+ {"s0 >= s1", iw::AsmType::Int()},
+ {"u0 >= u1", iw::AsmType::Int()},
+ {"f0 >= f1", iw::AsmType::Int()},
+ {"d0 >= d1", iw::AsmType::Int()},
+ // -----------------------------------------------------------------------
+ // Non const <op> Const
+ {"s0 == -1025", iw::AsmType::Int()},
+ {"u0 == 123456789", iw::AsmType::Int()},
+ {"f0 == fround(123456.78)", iw::AsmType::Int()},
+ {"d0 == 9876543.201", iw::AsmType::Int()},
+ {"s0 != -1025", iw::AsmType::Int()},
+ {"u0 != 123456789", iw::AsmType::Int()},
+ {"f0 != fround(123456.78)", iw::AsmType::Int()},
+ {"d0 != 9876543.201", iw::AsmType::Int()},
+ {"s0 < -1025", iw::AsmType::Int()},
+ {"u0 < 123456789", iw::AsmType::Int()},
+ {"f0 < fround(123456.78)", iw::AsmType::Int()},
+ {"d0 < 9876543.201", iw::AsmType::Int()},
+ {"s0 <= -1025", iw::AsmType::Int()},
+ {"u0 <= 123456789", iw::AsmType::Int()},
+ {"f0 <= fround(123456.78)", iw::AsmType::Int()},
+ {"d0 <= 9876543.201", iw::AsmType::Int()},
+ {"s0 > -1025", iw::AsmType::Int()},
+ {"u0 > 123456789", iw::AsmType::Int()},
+ {"f0 > fround(123456.78)", iw::AsmType::Int()},
+ {"d0 > 9876543.201", iw::AsmType::Int()},
+ {"s0 >= -1025", iw::AsmType::Int()},
+ {"u0 >= 123456789", iw::AsmType::Int()},
+ {"f0 >= fround(123456.78)", iw::AsmType::Int()},
+ {"d0 >= 9876543.201", iw::AsmType::Int()},
+ // -----------------------------------------------------------------------
+ // Const <op> Non const
+ {"-1025 == s0", iw::AsmType::Int()},
+ {"123456789 == u0", iw::AsmType::Int()},
+ {"fround(123456.78) == f0", iw::AsmType::Int()},
+ {"9876543.201 == d0", iw::AsmType::Int()},
+ {"-1025 != s0", iw::AsmType::Int()},
+ {"123456789 != u0", iw::AsmType::Int()},
+ {"fround(123456.78) != f0", iw::AsmType::Int()},
+ {"9876543.201 != d0", iw::AsmType::Int()},
+ {"-1025 < s0", iw::AsmType::Int()},
+ {"123456789 < u0", iw::AsmType::Int()},
+ {"fround(123456.78) < f0", iw::AsmType::Int()},
+ {"9876543.201 < d0", iw::AsmType::Int()},
+ {"-1025 <= s0", iw::AsmType::Int()},
+ {"123456789 <= u0", iw::AsmType::Int()},
+ {"fround(123456.78) <= f0", iw::AsmType::Int()},
+ {"9876543.201 <= d0", iw::AsmType::Int()},
+ {"-1025 > s0", iw::AsmType::Int()},
+ {"123456789 > u0", iw::AsmType::Int()},
+ {"fround(123456.78) > f0", iw::AsmType::Int()},
+ {"9876543.201 > d0", iw::AsmType::Int()},
+ {"-1025 >= s0", iw::AsmType::Int()},
+ {"123456789 >= u0", iw::AsmType::Int()},
+ {"fround(123456.78) >= f0", iw::AsmType::Int()},
+ {"9876543.201 >= d0", iw::AsmType::Int()},
+ // TODO(jpp): maybe add Const <op> Const.
+ };
+
+ for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
+ const auto* test = kTests + ii;
+ if (!ValidationOf(Expression(test->expression))
+ ->WithImport(DynamicGlobal("fround"), iw::AsmTyper::kMathFround)
+ ->WithLocal(DynamicGlobal("u0"), iw::AsmType::Unsigned())
+ ->WithLocal(DynamicGlobal("u1"), iw::AsmType::Unsigned())
+ ->WithLocal(DynamicGlobal("s0"), iw::AsmType::Signed())
+ ->WithLocal(DynamicGlobal("s1"), iw::AsmType::Signed())
+ ->WithLocal(DynamicGlobal("f0"), iw::AsmType::Float())
+ ->WithLocal(DynamicGlobal("f1"), iw::AsmType::Float())
+ ->WithLocal(DynamicGlobal("d0"), iw::AsmType::Double())
+ ->WithLocal(DynamicGlobal("d1"), iw::AsmType::Double())
+ ->SucceedsWithExactType(test->load_type)) {
+ std::cerr << "Test:\n" << test->expression;
+ CHECK(false);
+ }
+ }
+}
+
+TEST(ValidateBitwiseExpression) {
+ auto v2s = [](Zone* zone) -> iw::AsmType* {
+ auto* ret = iw::AsmType::Function(zone, iw::AsmType::Signed());
+ return ret;
+ };
+
+ const struct {
+ const char* expression;
+ iw::AsmType* load_type;
+ } kTests[] = {
+ {"iish0 & iish1", iw::AsmType::Signed()},
+ {"iish0 | iish1", iw::AsmType::Signed()},
+ {"iish0 ^ iish1", iw::AsmType::Signed()},
+ {"iish0 & -1", iw::AsmType::Signed()},
+ {"iish0 | -1", iw::AsmType::Signed()},
+ {"iish0 ^ -1", iw::AsmType::Signed()},
+ {"2147483648 & iish1", iw::AsmType::Signed()},
+ {"2147483648 | iish1", iw::AsmType::Signed()},
+ {"2147483648 ^ iish1", iw::AsmType::Signed()},
+ {"2147483648 & 0", iw::AsmType::FixNum()}, // Violation: signed.
+ {"2147483648 | 0", iw::AsmType::Signed()},
+ {"2147483648 ^ 0", iw::AsmType::Signed()},
+ {"2134651 & 123", iw::AsmType::FixNum()}, // Violation: signed.
+ {"2134651 | 123", iw::AsmType::FixNum()}, // Violation: signed.
+ {"2134651 ^ 123", iw::AsmType::FixNum()}, // Violation: signed.
+ {"make_signed()|0", iw::AsmType::Signed()},
+ {"signed()|0", iw::AsmType::Signed()},
+ };
+
+ for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
+ const auto* test = kTests + ii;
+ if (!ValidationOf(Expression(test->expression))
+ ->WithLocal(DynamicGlobal("iish1"), iw::AsmType::Intish())
+ ->WithLocal(DynamicGlobal("iish0"), iw::AsmType::Intish())
+ ->WithGlobal(DynamicGlobal("signed"), v2s)
+ ->SucceedsWithExactType(test->load_type)) {
+ std::cerr << "Test:\n" << test->expression;
+ CHECK(false);
+ }
+ }
+}
+
+TEST(ValidateConditionalExpression) {
+ const struct {
+ const char* expression;
+ iw::AsmType* load_type;
+ } kTests[] = {
+ {"i0 ? i0 : i1", iw::AsmType::Int()},
+ {"i0 ? f0 : f1", iw::AsmType::Float()},
+ {"i0 ? d0 : d1", iw::AsmType::Double()},
+ {"0 ? -1 : 2147483648", iw::AsmType::Int()},
+ };
+
+ for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
+ const auto* test = kTests + ii;
+ if (!ValidationOf(Expression(test->expression))
+ ->WithLocal(DynamicGlobal("i0"), iw::AsmType::Int())
+ ->WithLocal(DynamicGlobal("i1"), iw::AsmType::Int())
+ ->WithLocal(DynamicGlobal("f0"), iw::AsmType::Float())
+ ->WithLocal(DynamicGlobal("f1"), iw::AsmType::Float())
+ ->WithLocal(DynamicGlobal("d0"), iw::AsmType::Double())
+ ->WithLocal(DynamicGlobal("d1"), iw::AsmType::Double())
+ ->SucceedsWithExactType(test->load_type)) {
+ std::cerr << "Test:\n" << test->expression;
+ CHECK(false);
+ }
+ }
+}
+
+TEST(ValidateCall) {
+ auto v2f = [](Zone* zone) -> iw::AsmType* {
+ auto* ret = iw::AsmType::Function(zone, iw::AsmType::Float());
+ return ret;
+ };
+
+ // ifd2_ is a helper function that returns a lambda for creating a function
+ // type that accepts an int, a float, and a double. ret_type_factory is a
+ // pointer to an AsmType*() function, and (*ret_type_factory)() returns the
+ // desired return type. For example,
+ //
+ // ifd2_(&iw::AsmType::Float)
+ //
+ // returns an AsmType representing an asm.j function with the following
+ // signature:
+ //
+ // float(int, float, double)
+ auto ifd2_ = [](iw::AsmType* (
+ *ret_type_factory)()) -> std::function<iw::AsmType*(Zone*)> {
+ return [ret_type_factory](Zone* zone) -> iw::AsmType* {
+ auto* ret = iw::AsmType::Function(zone, (*ret_type_factory)());
+ ret->AsFunctionType()->AddArgument(iw::AsmType::Int());
+ ret->AsFunctionType()->AddArgument(iw::AsmType::Float());
+ ret->AsFunctionType()->AddArgument(iw::AsmType::Double());
+ return ret;
+ };
+ };
+ auto ifd2f = ifd2_(&iw::AsmType::Float);
+ auto ifd2d = ifd2_(&iw::AsmType::Double);
+ auto ifd2i = ifd2_(&iw::AsmType::Signed);
+
+ // Just like ifd2_, but this one returns a type representing a function table.
+ auto tbl_ifd2_ = [](size_t tbl_size, iw::AsmType* (*ret_type_factory)())
+ -> std::function<iw::AsmType*(Zone*)> {
+ return [tbl_size, ret_type_factory](Zone* zone) -> iw::AsmType* {
+ auto* signature = iw::AsmType::Function(zone, (*ret_type_factory)());
+ signature->AsFunctionType()->AddArgument(iw::AsmType::Int());
+ signature->AsFunctionType()->AddArgument(iw::AsmType::Float());
+ signature->AsFunctionType()->AddArgument(iw::AsmType::Double());
+
+ auto* ret = iw::AsmType::FunctionTableType(zone, tbl_size, signature);
+ return ret;
+ };
+ };
+ auto ifd2f_tbl = tbl_ifd2_(32, &iw::AsmType::Float);
+ auto ifd2d_tbl = tbl_ifd2_(64, &iw::AsmType::Double);
+ auto ifd2i_tbl = tbl_ifd2_(4096, &iw::AsmType::Signed);
+
+ const struct {
+ const char* expression;
+ iw::AsmType* load_type;
+ } kTests[] = {
+ // -----------------------------------------------------------------------
+ // Functions.
+ {"fround(v2f())", iw::AsmType::Float()},
+ {"fround(fish)", iw::AsmType::Float()},
+ {"fround(dq)", iw::AsmType::Float()},
+ {"fround(s)", iw::AsmType::Float()},
+ {"fround(u)", iw::AsmType::Float()},
+ {"ffi()|0", iw::AsmType::Signed()},
+ {"ffi(1.0)|0", iw::AsmType::Signed()},
+ {"ffi(1.0, 2.0)|0", iw::AsmType::Signed()},
+ {"ffi(1.0, 2.0, 3)|0", iw::AsmType::Signed()},
+ {"ffi(1.0, 2.0, 3, 4)|0", iw::AsmType::Signed()},
+ {"+ffi()", iw::AsmType::Double()},
+ {"+ffi(1.0)", iw::AsmType::Double()},
+ {"+ffi(1.0, 2.0)", iw::AsmType::Double()},
+ {"+ffi(1.0, 2.0, 3)", iw::AsmType::Double()},
+ {"+ffi(1.0, 2.0, 3, 4)", iw::AsmType::Double()},
+ {"fround(ifd2f(1, fround(1), 1.0))", iw::AsmType::Float()},
+ {"+ifd2d(1, fround(1), 1.0)", iw::AsmType::Double()},
+ {"ifd2i(1, fround(1), 1.0)|0", iw::AsmType::Signed()},
+ // -----------------------------------------------------------------------
+ // Function tables.
+ {"fround(ifd2f_tbl[iish & 31](1, fround(1), 1.0))", iw::AsmType::Float()},
+ {"+ifd2d_tbl[iish & 63](1, fround(1), 1.0)", iw::AsmType::Double()},
+ {"ifd2i_tbl[iish & 4095](1, fround(1), 1.0)|0", iw::AsmType::Signed()},
+ };
+
+ for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
+ const auto* test = kTests + ii;
+ if (!ValidationOf(Expression(test->expression))
+ ->WithImport(DynamicGlobal("fround"), iw::AsmTyper::kMathFround)
+ ->WithImport(DynamicGlobal("ffi"), iw::AsmTyper::kFFI)
+ ->WithLocal(DynamicGlobal("fish"), iw::AsmType::Floatish())
+ ->WithLocal(DynamicGlobal("dq"), iw::AsmType::DoubleQ())
+ ->WithLocal(DynamicGlobal("s"), iw::AsmType::Signed())
+ ->WithLocal(DynamicGlobal("u"), iw::AsmType::Unsigned())
+ ->WithLocal(DynamicGlobal("iish"), iw::AsmType::Intish())
+ ->WithGlobal(DynamicGlobal("v2f"), v2f)
+ ->WithGlobal(DynamicGlobal("ifd2f_tbl"), ifd2f_tbl)
+ ->WithGlobal(DynamicGlobal("ifd2d_tbl"), ifd2d_tbl)
+ ->WithGlobal(DynamicGlobal("ifd2i_tbl"), ifd2i_tbl)
+ ->SucceedsWithExactType(test->load_type)) {
+ std::cerr << "Test:\n" << test->expression;
+ CHECK(false);
+ }
+ }
+}
+
+TEST(CannotReferenceModuleName) {
+ v8::V8::Initialize();
+
+ const struct {
+ const char* module;
+ const char* error_message;
+ } kTests[] = {
+ {"function asm() {\n"
+ " 'use asm';\n"
+ " function f() { asm; }\n"
+ "}",
+ "accessed by ordinary expressions"},
+ {"function asm() { 'use asm'; return asm; }", "Module cannot export"},
+ };
+
+ for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
+ const auto* test = kTests + ii;
+ if (!ValidationOf(Module(test->module))
+ ->FailsWithMessage(test->error_message)) {
+ std::cerr << "Test:\n" << test->module;
+ CHECK(false);
+ }
+ }
+}
+
+TEST(InvalidSourceLayout) {
+ const char* kTests[] = {
+ "function asm() {\n"
+ " 'use asm';\n"
+ " function f() {}\n"
+ " var v = 0;\n"
+ " var v_v = [f];\n"
+ " return f;\n"
+ "}",
+ "function asm() {\n"
+ " 'use asm';\n"
+ " function f() {}\n"
+ " var v_v = [f];\n"
+ " var v = 0;\n"
+ " return f;\n"
+ "}",
+ "function asm() {\n"
+ " 'use asm';\n"
+ " function f() {}\n"
+ " var v_v = [f];\n"
+ " return f;\n"
+ " var v = 0;\n"
+ "}",
+ "function asm() {\n"
+ " 'use asm';\n"
+ " var v = 0;\n"
+ " var v_v = [f];\n"
+ " function f() {}\n"
+ " return f;\n"
+ "}",
+ "function asm() {\n"
+ " 'use asm';\n"
+ " var v = 0;\n"
+ " var v_v = [f];\n"
+ " return f;\n"
+ " function f() {}\n"
+ "}",
+ "function asm() {\n"
+ " 'use asm';\n"
+ " var v = 0;\n"
+ " function f() {}\n"
+ " return f;\n"
+ " var v_v = [f];\n"
+ "}",
+ "function asm() {\n"
+ " 'use asm';\n"
+ " var v = 0;\n"
+ " function f() {}\n"
+ " var v1 = 0;\n"
+ " var v_v = [f];\n"
+ " return f;\n"
+ "}",
+ "function asm() {\n"
+ " 'use asm';\n"
+ " var v = 0;\n"
+ " function f() {}\n"
+ " var v_v = [f];\n"
+ " var v1 = 0;\n"
+ " return f;\n"
+ "}",
+ "function asm() {\n"
+ " 'use asm';\n"
+ " var v = 0;\n"
+ " function f() {}\n"
+ " var v_v = [f];\n"
+ " return f;\n"
+ " var v1 = 0;\n"
+ "}",
+ "function asm() {\n"
+ " function f() {}\n"
+ " 'use asm';\n"
+ " var v_v = [f];\n"
+ " return f;\n"
+ "}",
+ "function asm() {\n"
+ " 'use asm';\n"
+ " return f;\n"
+ " var v = 0;\n"
+ " function f() {}\n"
+ " var v_v = [f];\n"
+ "}",
+ "function asm() {\n"
+ " 'use asm';\n"
+ " return f;\n"
+ " function f() {}\n"
+ "}",
+ "function __f_59() {\n"
+ " 'use asm';\n"
+ " function __f_110() {\n"
+ " return 71;\n"
+ " }\n"
+ " function __f_21() {\n"
+ " var __v_38 = 0;\n"
+ " return __v_23[__v_38&0]() | 0;\n"
+ " }\n"
+ " return {__f_21:__f_21};\n"
+ " var __v_23 = [__f_110];\n"
+ "}",
+ };
+
+ for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
+ if (!ValidationOf(Module(kTests[ii]))
+ ->FailsWithMessage("Invalid asm.js source code layout")) {
+ std::cerr << "Test:\n" << kTests[ii];
+ CHECK(false);
+ }
+ }
+}
+
+// This issue was triggered because of the "lenient" 8-bit heap access code
+// path. The canonical heap access index validation fails because __34 is not an
+// intish. Then, during the "lenient" code path for accessing elements in 8-bit
+// heap views, the __34 node in the indexing expression would be re-tagged, thus
+// causing the assertion failure.
+TEST(B63099) {
+ const char* kTests[] = {
+ "function __f_109(stdlib, __v_36, buffer) {\n"
+ " 'use asm';\n"
+ " var __v_34 = new stdlib.Uint8Array(buffer);\n"
+ " function __f_22() {__v_34[__v_34>>0]|0 + 1 | 0;\n"
+ " }\n"
+ "}",
+ "function __f_109(stdlib, __v_36, buffer) {\n"
+ " 'use asm';\n"
+ " var __v_34 = new stdlib.Int8Array(buffer);\n"
+ " function __f_22() {__v_34[__v_34>>0]|0 + 1 | 0;\n"
+ " }\n"
+ "}",
+ };
+
+ for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
+ if (!ValidationOf(Module(kTests[ii]))
+ ->FailsWithMessage("Invalid heap access index")) {
+ std::cerr << "Test:\n" << kTests[ii];
+ CHECK(false);
+ }
+ }
+}
+
+// This issue was triggered because assignments to immutable symbols (e.g., the
+// module's name, or any of the asm.js' module parameters) was not being
+// handled.
+TEST(B640194) {
+ const char* kTests[] = {
+ "function asm() {\n"
+ " 'use asm';\n"
+ " function f() {\n"
+ " asm = 0;\n"
+ " }\n"
+ " return f;\n"
+ "}",
+ "function asm(stdlib) {\n"
+ " 'use asm';\n"
+ " function f() {\n"
+ " stdlib = 0;\n"
+ " }\n"
+ " return f;\n"
+ "}",
+ "function asm(stdlib, foreign) {\n"
+ " 'use asm';\n"
+ " function f() {\n"
+ " foreign = 0;\n"
+ " }\n"
+ " return f;\n"
+ "}",
+ "function asm(stdlib, foreign, heap) {\n"
+ " 'use asm';\n"
+ " function f() {\n"
+ " heap = 0;\n"
+ " }\n"
+ " return f;\n"
+ "}",
+ "function asm(stdlib, foreign, heap) {\n"
+ " 'use asm';\n"
+ " var f = stdlib.Math.fround;\n"
+ " function f() {\n"
+ " f = 0;\n"
+ " }\n"
+ " return f;\n"
+ "}",
+ "function asm(stdlib, foreign, heap) {\n"
+ " 'use asm';\n"
+ " var E = stdlib.Math.E;\n"
+ " function f() {\n"
+ " E = 0;\n"
+ " }\n"
+ " return f;\n"
+ "}",
+ };
+ for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
+ if (!ValidationOf(Module(kTests[ii]))
+ ->FailsWithMessage("Can't assign to immutable symbol")) {
+ std::cerr << "Test:\n" << kTests[ii];
+ CHECK(false);
+ }
+ }
+}
+
+} // namespace
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index 5681f704e1..312001a35b 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -186,7 +186,7 @@ int main(int argc, char* argv[]) {
}
}
- v8::V8::InitializeICU();
+ v8::V8::InitializeICUDefaultLocation(argv[0]);
v8::Platform* platform = v8::platform::CreateDefaultPlatform();
v8::V8::InitializePlatform(platform);
v8::internal::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
diff --git a/deps/v8/test/cctest/cctest.gyp b/deps/v8/test/cctest/cctest.gyp
index 2e00e88b5b..217d74b6dc 100644
--- a/deps/v8/test/cctest/cctest.gyp
+++ b/deps/v8/test/cctest/cctest.gyp
@@ -25,277 +25,344 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# The sources are kept automatically in sync with BUILD.gn.
+
{
'variables': {
'v8_code': 1,
'generated_file': '<(SHARED_INTERMEDIATE_DIR)/resources.cc',
+ 'cctest_sources': [ ### gcmole(all) ###
+ 'asmjs/test-asm-typer.cc',
+ 'compiler/c-signature.h',
+ 'compiler/codegen-tester.cc',
+ 'compiler/codegen-tester.h',
+ 'compiler/code-assembler-tester.h',
+ 'compiler/function-tester.h',
+ 'compiler/graph-builder-tester.h',
+ 'compiler/test-basic-block-profiler.cc',
+ 'compiler/test-branch-combine.cc',
+ 'compiler/test-run-unwinding-info.cc',
+ 'compiler/test-gap-resolver.cc',
+ 'compiler/test-graph-visualizer.cc',
+ 'compiler/test-code-assembler.cc',
+ 'compiler/test-instruction.cc',
+ 'compiler/test-js-context-specialization.cc',
+ 'compiler/test-js-constant-cache.cc',
+ 'compiler/test-js-typed-lowering.cc',
+ 'compiler/test-jump-threading.cc',
+ 'compiler/test-linkage.cc',
+ 'compiler/test-loop-assignment-analysis.cc',
+ 'compiler/test-loop-analysis.cc',
+ 'compiler/test-machine-operator-reducer.cc',
+ 'compiler/test-multiple-return.cc',
+ 'compiler/test-node.cc',
+ 'compiler/test-operator.cc',
+ 'compiler/test-osr.cc',
+ 'compiler/test-representation-change.cc',
+ 'compiler/test-run-bytecode-graph-builder.cc',
+ 'compiler/test-run-calls-to-external-references.cc',
+ 'compiler/test-run-deopt.cc',
+ 'compiler/test-run-inlining.cc',
+ 'compiler/test-run-intrinsics.cc',
+ 'compiler/test-run-jsbranches.cc',
+ 'compiler/test-run-jscalls.cc',
+ 'compiler/test-run-jsexceptions.cc',
+ 'compiler/test-run-jsobjects.cc',
+ 'compiler/test-run-jsops.cc',
+ 'compiler/test-run-load-store.cc',
+ 'compiler/test-run-machops.cc',
+ 'compiler/test-run-native-calls.cc',
+ 'compiler/test-run-stackcheck.cc',
+ 'compiler/test-run-stubs.cc',
+ 'compiler/test-run-variables.cc',
+ 'compiler/test-run-wasm-machops.cc',
+ 'compiler/test-simplified-lowering.cc',
+ 'cctest.cc',
+ 'interpreter/interpreter-tester.cc',
+ 'interpreter/source-position-matcher.cc',
+ 'interpreter/source-position-matcher.h',
+ 'interpreter/test-bytecode-generator.cc',
+ 'interpreter/test-interpreter.cc',
+ 'interpreter/test-interpreter-intrinsics.cc',
+ 'interpreter/test-source-positions.cc',
+ 'interpreter/bytecode-expectations-printer.cc',
+ 'interpreter/bytecode-expectations-printer.h',
+ 'gay-fixed.cc',
+ 'gay-precision.cc',
+ 'gay-shortest.cc',
+ 'heap/heap-tester.h',
+ 'heap/heap-utils.cc',
+ 'heap/heap-utils.h',
+ 'heap/test-alloc.cc',
+ 'heap/test-array-buffer-tracker.cc',
+ 'heap/test-compaction.cc',
+ 'heap/test-heap.cc',
+ 'heap/test-incremental-marking.cc',
+ 'heap/test-lab.cc',
+ 'heap/test-mark-compact.cc',
+ 'heap/test-page-promotion.cc',
+ 'heap/test-spaces.cc',
+ 'libplatform/test-tracing.cc',
+ 'libsampler/test-sampler.cc',
+ 'print-extension.cc',
+ 'profiler-extension.cc',
+ 'test-access-checks.cc',
+ 'test-accessors.cc',
+ 'test-api.cc',
+ 'test-api.h',
+ 'test-api-accessors.cc',
+ 'test-api-interceptors.cc',
+ 'test-api-fast-accessor-builder.cc',
+ 'test-array-list.cc',
+ 'test-ast.cc',
+ 'test-atomicops.cc',
+ 'test-bignum.cc',
+ 'test-bignum-dtoa.cc',
+ 'test-bit-vector.cc',
+ 'test-circular-queue.cc',
+ 'test-code-cache.cc',
+ 'test-code-layout.cc',
+ 'test-code-stub-assembler.cc',
+ 'test-compiler.cc',
+ 'test-constantpool.cc',
+ 'test-conversions.cc',
+ 'test-cpu-profiler.cc',
+ 'test-date.cc',
+ 'test-debug.cc',
+ 'test-decls.cc',
+ 'test-deoptimization.cc',
+ 'test-dictionary.cc',
+ 'test-diy-fp.cc',
+ 'test-double.cc',
+ 'test-dtoa.cc',
+ 'test-elements-kind.cc',
+ 'test-fast-dtoa.cc',
+ 'test-feedback-vector.cc',
+ 'test-field-type-tracking.cc',
+ 'test-fixed-dtoa.cc',
+ 'test-flags.cc',
+ 'test-func-name-inference.cc',
+ 'test-global-handles.cc',
+ 'test-global-object.cc',
+ 'test-hashing.cc',
+ 'test-hashmap.cc',
+ 'test-heap-profiler.cc',
+ 'test-hydrogen-types.cc',
+ 'test-identity-map.cc',
+ 'test-inobject-slack-tracking.cc',
+ 'test-list.cc',
+ 'test-liveedit.cc',
+ 'test-lockers.cc',
+ 'test-log.cc',
+ 'test-mementos.cc',
+ 'test-object.cc',
+ 'test-parsing.cc',
+ 'test-platform.cc',
+ 'test-profile-generator.cc',
+ 'test-random-number-generator.cc',
+ 'test-receiver-check-hidden-prototype.cc',
+ 'test-regexp.cc',
+ 'test-representation.cc',
+ 'test-sampler-api.cc',
+ 'test-serialize.cc',
+ 'test-simd.cc',
+ 'test-strings.cc',
+ 'test-symbols.cc',
+ 'test-strtod.cc',
+ 'test-thread-termination.cc',
+ 'test-threads.cc',
+ 'test-trace-event.cc',
+ 'test-transitions.cc',
+ 'test-typedarrays.cc',
+ 'test-types.cc',
+ 'test-unbound-queue.cc',
+ 'test-unboxed-doubles.cc',
+ 'test-unique.cc',
+ 'test-unscopables-hidden-prototype.cc',
+ 'test-usecounters.cc',
+ 'test-utils.cc',
+ 'test-version.cc',
+ 'test-weakmaps.cc',
+ 'test-weaksets.cc',
+ 'trace-extension.cc',
+ 'wasm/test-run-wasm.cc',
+ 'wasm/test-run-wasm-64.cc',
+ 'wasm/test-run-wasm-asmjs.cc',
+ 'wasm/test-run-wasm-interpreter.cc',
+ 'wasm/test-run-wasm-js.cc',
+ 'wasm/test-run-wasm-module.cc',
+ 'wasm/test-run-wasm-relocation.cc',
+ 'wasm/test-signatures.h',
+ 'wasm/test-wasm-function-name-table.cc',
+ 'wasm/test-wasm-stack.cc',
+ 'wasm/test-wasm-trap-position.cc',
+ 'wasm/wasm-run-utils.h',
+ ],
+ 'cctest_sources_ia32': [ ### gcmole(arch:ia32) ###
+ 'test-assembler-ia32.cc',
+ 'test-code-stubs.cc',
+ 'test-code-stubs-ia32.cc',
+ 'test-disasm-ia32.cc',
+ 'test-macro-assembler-ia32.cc',
+ 'test-log-stack-tracer.cc',
+ 'test-run-wasm-relocation-ia32.cc'
+ ],
+ 'cctest_sources_x64': [ ### gcmole(arch:x64) ###
+ 'test-assembler-x64.cc',
+ 'test-code-stubs.cc',
+ 'test-code-stubs-x64.cc',
+ 'test-disasm-x64.cc',
+ 'test-macro-assembler-x64.cc',
+ 'test-log-stack-tracer.cc',
+ 'test-run-wasm-relocation-x64.cc'
+ ],
+ 'cctest_sources_arm': [ ### gcmole(arch:arm) ###
+ 'test-assembler-arm.cc',
+ 'test-code-stubs.cc',
+ 'test-code-stubs-arm.cc',
+ 'test-disasm-arm.cc',
+ 'test-macro-assembler-arm.cc',
+ 'test-run-wasm-relocation-arm.cc'
+ ],
+ 'cctest_sources_arm64': [ ### gcmole(arch:arm64) ###
+ 'test-utils-arm64.cc',
+ 'test-assembler-arm64.cc',
+ 'test-code-stubs.cc',
+ 'test-code-stubs-arm64.cc',
+ 'test-disasm-arm64.cc',
+ 'test-fuzz-arm64.cc',
+ 'test-javascript-arm64.cc',
+ 'test-js-arm64-variables.cc',
+ 'test-run-wasm-relocation-arm64.cc'
+ ],
+ 'cctest_sources_s390': [ ### gcmole(arch:s390) ###
+ 'test-assembler-s390.cc',
+ 'test-code-stubs.cc',
+ 'test-disasm-s390.cc'
+ ],
+ 'cctest_sources_ppc': [ ### gcmole(arch:ppc) ###
+ 'test-assembler-ppc.cc',
+ 'test-code-stubs.cc',
+ 'test-disasm-ppc.cc'
+ ],
+ 'cctest_sources_mips': [ ### gcmole(arch:mips) ###
+ 'test-assembler-mips.cc',
+ 'test-code-stubs.cc',
+ 'test-code-stubs-mips.cc',
+ 'test-disasm-mips.cc',
+ 'test-macro-assembler-mips.cc'
+ ],
+ 'cctest_sources_mipsel': [ ### gcmole(arch:mipsel) ###
+ 'test-assembler-mips.cc',
+ 'test-code-stubs.cc',
+ 'test-code-stubs-mips.cc',
+ 'test-disasm-mips.cc',
+ 'test-macro-assembler-mips.cc'
+ ],
+ 'cctest_sources_mips64': [ ### gcmole(arch:mips64) ###
+ 'test-assembler-mips64.cc',
+ 'test-code-stubs.cc',
+ 'test-code-stubs-mips64.cc',
+ 'test-disasm-mips64.cc',
+ 'test-macro-assembler-mips64.cc'
+ ],
+ 'cctest_sources_mips64el': [ ### gcmole(arch:mips64el) ###
+ 'test-assembler-mips64.cc',
+ 'test-code-stubs.cc',
+ 'test-code-stubs-mips64.cc',
+ 'test-disasm-mips64.cc',
+ 'test-macro-assembler-mips64.cc'
+ ],
+ 'cctest_sources_x87': [ ### gcmole(arch:x87) ###
+ 'test-assembler-x87.cc',
+ 'test-code-stubs.cc',
+ 'test-code-stubs-x87.cc',
+ 'test-disasm-x87.cc',
+ 'test-macro-assembler-x87.cc',
+ 'test-log-stack-tracer.cc',
+ 'test-run-wasm-relocation-x87.cc'
+ ],
},
- 'includes': ['../../build/toolchain.gypi', '../../build/features.gypi'],
+ 'includes': ['../../gypfiles/toolchain.gypi', '../../gypfiles/features.gypi'],
'targets': [
{
'target_name': 'cctest',
'type': 'executable',
'dependencies': [
'resources',
- '../../tools/gyp/v8.gyp:v8_libplatform',
+ '../../src/v8.gyp:v8_libplatform',
],
'include_dirs': [
'../..',
],
- 'sources': [ ### gcmole(all) ###
+ 'sources': [
+ '<@(cctest_sources)',
'<(generated_file)',
- 'compiler/c-signature.h',
- 'compiler/codegen-tester.cc',
- 'compiler/codegen-tester.h',
- 'compiler/function-tester.h',
- 'compiler/graph-builder-tester.h',
- 'compiler/test-basic-block-profiler.cc',
- 'compiler/test-branch-combine.cc',
- 'compiler/test-changes-lowering.cc',
- 'compiler/test-code-stub-assembler.cc',
- 'compiler/test-gap-resolver.cc',
- 'compiler/test-graph-visualizer.cc',
- 'compiler/test-instruction.cc',
- 'compiler/test-js-context-specialization.cc',
- 'compiler/test-js-constant-cache.cc',
- 'compiler/test-js-typed-lowering.cc',
- 'compiler/test-jump-threading.cc',
- 'compiler/test-linkage.cc',
- 'compiler/test-loop-assignment-analysis.cc',
- 'compiler/test-loop-analysis.cc',
- 'compiler/test-machine-operator-reducer.cc',
- 'compiler/test-multiple-return.cc',
- 'compiler/test-node.cc',
- 'compiler/test-operator.cc',
- 'compiler/test-osr.cc',
- 'compiler/test-pipeline.cc',
- 'compiler/test-representation-change.cc',
- 'compiler/test-run-bytecode-graph-builder.cc',
- 'compiler/test-run-calls-to-external-references.cc',
- 'compiler/test-run-deopt.cc',
- 'compiler/test-run-inlining.cc',
- 'compiler/test-run-intrinsics.cc',
- 'compiler/test-run-jsbranches.cc',
- 'compiler/test-run-jscalls.cc',
- 'compiler/test-run-jsexceptions.cc',
- 'compiler/test-run-jsobjects.cc',
- 'compiler/test-run-jsops.cc',
- 'compiler/test-run-machops.cc',
- 'compiler/test-run-native-calls.cc',
- 'compiler/test-run-stackcheck.cc',
- 'compiler/test-run-stubs.cc',
- 'compiler/test-run-variables.cc',
- 'compiler/test-simplified-lowering.cc',
- 'cctest.cc',
- 'expression-type-collector.cc',
- 'expression-type-collector.h',
- 'interpreter/interpreter-tester.cc',
- 'interpreter/test-bytecode-generator.cc',
- 'interpreter/test-interpreter.cc',
- 'interpreter/test-interpreter-intrinsics.cc',
- 'interpreter/bytecode-expectations-printer.cc',
- 'interpreter/bytecode-expectations-printer.h',
- 'gay-fixed.cc',
- 'gay-precision.cc',
- 'gay-shortest.cc',
- 'heap/heap-tester.h',
- 'heap/test-alloc.cc',
- 'heap/test-compaction.cc',
- 'heap/test-heap.cc',
- 'heap/test-incremental-marking.cc',
- 'heap/test-lab.cc',
- 'heap/test-mark-compact.cc',
- 'heap/test-spaces.cc',
- 'heap/utils-inl.h',
- 'print-extension.cc',
- 'profiler-extension.cc',
- 'test-accessors.cc',
- 'test-api.cc',
- 'test-api.h',
- 'test-api-accessors.cc',
- 'test-api-interceptors.cc',
- 'test-api-fast-accessor-builder.cc',
- 'test-array-list.cc',
- 'test-ast.cc',
- 'test-ast-expression-visitor.cc',
- 'test-asm-validator.cc',
- 'test-atomicops.cc',
- 'test-bignum.cc',
- 'test-bignum-dtoa.cc',
- 'test-bit-vector.cc',
- 'test-circular-queue.cc',
- 'test-compiler.cc',
- 'test-constantpool.cc',
- 'test-conversions.cc',
- 'test-cpu-profiler.cc',
- 'test-date.cc',
- 'test-debug.cc',
- 'test-decls.cc',
- 'test-deoptimization.cc',
- 'test-dictionary.cc',
- 'test-diy-fp.cc',
- 'test-double.cc',
- 'test-dtoa.cc',
- 'test-elements-kind.cc',
- 'test-fast-dtoa.cc',
- 'test-feedback-vector.cc',
- 'test-field-type-tracking.cc',
- 'test-fixed-dtoa.cc',
- 'test-flags.cc',
- 'test-func-name-inference.cc',
- 'test-global-handles.cc',
- 'test-global-object.cc',
- 'test-hashing.cc',
- 'test-hashmap.cc',
- 'test-heap-profiler.cc',
- 'test-hydrogen-types.cc',
- 'test-identity-map.cc',
- 'test-inobject-slack-tracking.cc',
- 'test-list.cc',
- 'test-liveedit.cc',
- 'test-lockers.cc',
- 'test-log.cc',
- 'test-microtask-delivery.cc',
- 'test-mementos.cc',
- 'test-object-observe.cc',
- 'test-parsing.cc',
- 'test-platform.cc',
- 'test-profile-generator.cc',
- 'test-random-number-generator.cc',
- 'test-receiver-check-hidden-prototype.cc',
- 'test-regexp.cc',
- 'test-reloc-info.cc',
- 'test-representation.cc',
- 'test-sampler-api.cc',
- 'test-serialize.cc',
- 'test-simd.cc',
- 'test-strings.cc',
- 'test-symbols.cc',
- 'test-strtod.cc',
- 'test-thread-termination.cc',
- 'test-threads.cc',
- 'test-trace-event.cc',
- 'test-transitions.cc',
- 'test-typedarrays.cc',
- 'test-types.cc',
- 'test-typing-reset.cc',
- 'test-unbound-queue.cc',
- 'test-unboxed-doubles.cc',
- 'test-unique.cc',
- 'test-unscopables-hidden-prototype.cc',
- 'test-utils.cc',
- 'test-version.cc',
- 'test-weakmaps.cc',
- 'test-weaksets.cc',
- 'trace-extension.cc',
- 'wasm/test-run-wasm.cc',
- 'wasm/test-run-wasm-64.cc',
- 'wasm/test-run-wasm-js.cc',
- 'wasm/test-run-wasm-module.cc',
- 'wasm/test-signatures.h',
- 'wasm/wasm-run-utils.h',
],
'conditions': [
['v8_target_arch=="ia32"', {
- 'sources': [ ### gcmole(arch:ia32) ###
- 'test-assembler-ia32.cc',
- 'test-code-stubs.cc',
- 'test-code-stubs-ia32.cc',
- 'test-disasm-ia32.cc',
- 'test-macro-assembler-ia32.cc',
- 'test-log-stack-tracer.cc',
- 'test-run-wasm-relocation-ia32.cc'
+ 'sources': [
+ '<@(cctest_sources_ia32)',
],
}],
['v8_target_arch=="x64"', {
- 'sources': [ ### gcmole(arch:x64) ###
- 'test-assembler-x64.cc',
- 'test-code-stubs.cc',
- 'test-code-stubs-x64.cc',
- 'test-disasm-x64.cc',
- 'test-macro-assembler-x64.cc',
- 'test-log-stack-tracer.cc',
- 'test-run-wasm-relocation-x64.cc'
+ 'sources': [
+ '<@(cctest_sources_x64)',
],
}],
['v8_target_arch=="arm"', {
- 'sources': [ ### gcmole(arch:arm) ###
- 'test-assembler-arm.cc',
- 'test-code-stubs.cc',
- 'test-code-stubs-arm.cc',
- 'test-disasm-arm.cc',
- 'test-macro-assembler-arm.cc',
- 'test-run-wasm-relocation-arm.cc'
+ 'sources': [
+ '<@(cctest_sources_arm)',
],
}],
['v8_target_arch=="arm64"', {
- 'sources': [ ### gcmole(arch:arm64) ###
- 'test-utils-arm64.cc',
- 'test-assembler-arm64.cc',
- 'test-code-stubs.cc',
- 'test-code-stubs-arm64.cc',
- 'test-disasm-arm64.cc',
- 'test-fuzz-arm64.cc',
- 'test-javascript-arm64.cc',
- 'test-js-arm64-variables.cc',
- 'test-run-wasm-relocation-arm64.cc'
+ 'sources': [
+ '<@(cctest_sources_arm64)',
],
}],
['v8_target_arch=="s390"', {
- 'sources': [ ### gcmole(arch:s390) ###
- 'test-assembler-s390.cc',
- 'test-code-stubs.cc',
- 'test-disasm-s390.cc'
+ 'sources': [
+ '<@(cctest_sources_s390)',
],
}],
['v8_target_arch=="s390x"', {
- 'sources': [ ### gcmole(arch:s390x) ###
- 'test-assembler-s390.cc',
- 'test-code-stubs.cc',
- 'test-disasm-s390.cc'
+ 'sources': [
+ '<@(cctest_sources_s390)',
],
}],
['v8_target_arch=="ppc"', {
- 'sources': [ ### gcmole(arch:ppc) ###
- 'test-assembler-ppc.cc',
- 'test-code-stubs.cc',
- 'test-disasm-ppc.cc'
+ 'sources': [
+ '<@(cctest_sources_ppc)',
],
}],
['v8_target_arch=="ppc64"', {
- 'sources': [ ### gcmole(arch:ppc64) ###
- 'test-assembler-ppc.cc',
- 'test-code-stubs.cc',
- 'test-disasm-ppc.cc'
+ 'sources': [
+ '<@(cctest_sources_ppc)',
+ ],
+ }],
+ ['v8_target_arch=="mips"', {
+ 'sources': [
+ '<@(cctest_sources_mips)',
],
}],
['v8_target_arch=="mipsel"', {
- 'sources': [ ### gcmole(arch:mipsel) ###
- 'test-assembler-mips.cc',
- 'test-code-stubs.cc',
- 'test-code-stubs-mips.cc',
- 'test-disasm-mips.cc',
- 'test-macro-assembler-mips.cc'
+ 'sources': [
+ '<@(cctest_sources_mipsel)',
+ ],
+ }],
+ ['v8_target_arch=="mips64"', {
+ 'sources': [
+ '<@(cctest_sources_mips64)',
],
}],
['v8_target_arch=="mips64el"', {
'sources': [
- 'test-assembler-mips64.cc',
- 'test-code-stubs.cc',
- 'test-code-stubs-mips64.cc',
- 'test-disasm-mips64.cc',
- 'test-macro-assembler-mips64.cc'
+ '<@(cctest_sources_mips64el)',
],
}],
['v8_target_arch=="x87"', {
- 'sources': [ ### gcmole(arch:x87) ###
- 'test-assembler-x87.cc',
- 'test-code-stubs.cc',
- 'test-code-stubs-x87.cc',
- 'test-disasm-x87.cc',
- 'test-macro-assembler-x87.cc',
- 'test-log-stack-tracer.cc',
- 'test-run-wasm-relocation-x87.cc'
+ 'sources': [
+ '<@(cctest_sources_x87)',
],
}],
[ 'OS=="linux" or OS=="qnx"', {
@@ -327,9 +394,9 @@
['component=="shared_library"', {
# cctest can't be built against a shared library, so we need to
# depend on the underlying static target in that case.
- 'dependencies': ['../../tools/gyp/v8.gyp:v8_maybe_snapshot'],
+ 'dependencies': ['../../src/v8.gyp:v8_maybe_snapshot'],
}, {
- 'dependencies': ['../../tools/gyp/v8.gyp:v8'],
+ 'dependencies': ['../../src/v8.gyp:v8'],
}],
],
},
@@ -372,14 +439,14 @@
'target_name': 'generate-bytecode-expectations',
'type': 'executable',
'dependencies': [
- '../../tools/gyp/v8.gyp:v8_libplatform',
+ '../../src/v8.gyp:v8_libplatform',
],
'conditions': [
['component=="shared_library"', {
# Same as cctest, we need to depend on the underlying static target.
- 'dependencies': ['../../tools/gyp/v8.gyp:v8_maybe_snapshot'],
+ 'dependencies': ['../../src/v8.gyp:v8_maybe_snapshot'],
}, {
- 'dependencies': ['../../tools/gyp/v8.gyp:v8'],
+ 'dependencies': ['../../src/v8.gyp:v8'],
}],
],
'include_dirs+': [
@@ -402,7 +469,7 @@
'cctest',
],
'includes': [
- '../../build/isolate.gypi',
+ '../../gypfiles/isolate.gypi',
],
'sources': [
'cctest_exe.isolate',
@@ -415,7 +482,7 @@
'cctest_exe_run',
],
'includes': [
- '../../build/isolate.gypi',
+ '../../gypfiles/isolate.gypi',
],
'sources': [
'cctest.isolate',
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index c992ab6160..5a88f0f701 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -86,43 +86,9 @@
'test-func-name-inference/UpperCaseClass': [FAIL],
'test-func-name-inference/LowerCaseClass': [FAIL],
- ##############################################################################
- # TurboFan compiler failures.
-
- # Some tests are just too slow to run for now.
- 'test-heap/IncrementalMarkingStepMakesBigProgressWithLargeObjects': [PASS, NO_VARIANTS],
- 'test-heap-profiler/ManyLocalsInSharedContext': [PASS, NO_VARIANTS],
- 'test-serialize/CodeSerializerLargeCodeObject': [PASS, NO_VARIANTS],
- 'test-debug/ThreadedDebugging': [PASS, NO_VARIANTS],
# BUG(3742).
'test-mark-compact/MarkCompactCollector': [PASS, ['arch==arm', NO_VARIANTS]],
- # TODO(mstarzinger): The rewriter is not being called when top-level code is
- # optimized and hence scripts don't "return" the correct value. Fix this.
- 'test-compiler/CompileFunctionInContext*': [PASS, NO_VARIANTS],
-
- # TODO(bmeurer): TurboFan embeds strong references to all kinds of objects
- # via deoptimization data (Crankshaft also does this, but lack proper test
- # coverage).
- 'test-heap/ObjectsInOptimizedCodeAreWeak': [PASS, NO_VARIANTS],
-
- # TurboFan doesn't support allocation sites currently.
- 'test-heap/EnsureAllocationSiteDependentCodesProcessed': [PASS, NO_VARIANTS],
- 'test-heap/OptimizedPretenuringAllocationFolding': [PASS, NO_VARIANTS],
- 'test-heap/OptimizedPretenuringdoubleArrayLiterals': [PASS, NO_VARIANTS],
- 'test-heap/OptimizedPretenuringDoubleArrayProperties': [PASS, NO_VARIANTS],
- 'test-heap/OptimizedPretenuringMixedInObjectProperties': [PASS, NO_VARIANTS],
- 'test-heap/OptimizedPretenuringNestedDoubleLiterals': [PASS, NO_VARIANTS],
- 'test-heap/OptimizedPretenuringNestedMixedArrayLiterals': [PASS, NO_VARIANTS],
- 'test-heap/OptimizedPretenuringNestedObjectLiterals': [PASS, NO_VARIANTS],
- 'test-heap/OptimizedPretenuringObjectArrayLiterals': [PASS, NO_VARIANTS],
-
- # TurboFan cpu profiler result is different.
- 'test-cpu-profiler/CollectDeoptEvents': [PASS, NO_VARIANTS],
- 'test-cpu-profiler/DeoptAtFirstLevelInlinedSource': [PASS, NO_VARIANTS],
- 'test-cpu-profiler/DeoptAtSecondLevelInlinedSource': [PASS, NO_VARIANTS],
- 'test-cpu-profiler/DeoptUntrackedFunction': [PASS, NO_VARIANTS],
-
############################################################################
# Slow tests.
'test-api/Threading1': [PASS, ['mode == debug', SLOW]],
@@ -159,18 +125,6 @@
# BUG(v8:3434).
' test-api/LoadICFastApi_DirectCall_GCMoveStubWithProfiler': [SKIP],
-
- # BUG(v8:4795).
- 'test-run-wasm-js/Run_JSSelectAlign_0': [SKIP],
- 'test-run-wasm-js/Run_JSSelectAlign_2': [SKIP],
- 'test-run-wasm-js/Run_JSSelectAlign_4': [SKIP],
- 'test-run-wasm-js/Run_JSSelect_0': [SKIP],
- 'test-run-wasm-js/Run_JSSelect_1': [SKIP],
- 'test-run-wasm-js/Run_JSSelect_2': [SKIP],
- 'test-run-wasm-js/Run_JSSelect_3': [SKIP],
- 'test-run-wasm-js/Run_JSSelect_4': [SKIP],
- 'test-run-wasm-js/Run_JSSelect_5': [SKIP],
- 'test-run-wasm-js/Run_JSSelect_6': [SKIP],
}], # 'arch == arm64'
['arch == arm64 and simulator_run == True', {
@@ -245,27 +199,10 @@
'test-cpu-profiler/JsNativeJsRuntimeJsSampleMultiple': [SKIP],
'test-cpu-profiler/JsNativeJsSample': [SKIP],
'test-cpu-profiler/JsNative1JsNative2JsSample': [SKIP],
-}], # 'system == windows'
-##############################################################################
-['byteorder == big', {
- # TODO(mips-team): Fix Wasm for big-endian.
- 'test-run-wasm-module/Run_WasmModule_CallAdd': [SKIP],
- 'test-run-wasm-module/Run_WasmModule_CallMain_recursive': [SKIP],
- 'test-run-wasm-module/Run_WasmModule_ReadLoadedDataSegment': [SKIP],
- 'test-run-wasm-module/Run_WasmModule_Return114': [SKIP],
- 'test-run-wasm-module/Run_WasmModule_CheckMemoryIsZero': [SKIP],
- 'test-run-wasm-module/Run_WasmModule_Global': [SKIP],
- 'test-run-wasm/Run_Wasm_Int32LoadInt16_signext': [SKIP],
- 'test-run-wasm/Run_Wasm_Int32LoadInt16_zeroext': [SKIP],
- 'test-run-wasm/Run_WasmMixedGlobals': [SKIP],
- 'test-run-wasm-64/Run_WasmI64*': [SKIP],
- 'test-run-wasm-64/Run_Wasm_I64*': [SKIP],
- 'test-run-wasm-64/Run_Wasm_LoadStoreI64_sx': [SKIP],
- 'test-run-wasm-64/Run_TestI64WasmRunner': [SKIP],
- 'test-run-wasm-64/Run_WasmCall_Int64Sub': [SKIP],
- 'test-run-wasm-64/Run_Wasm_MemI64_Sum': [SKIP],
-}], # 'byteorder == big'
+ # BUG(5193): Flaky timeout.
+ 'test-sampler/LibSamplerCollectSample': [PASS, ['arch == x64', SKIP]],
+}], # 'system == windows'
##############################################################################
['arch == arm', {
@@ -341,7 +278,12 @@
'test-run-machops/RunFloat64MulAndFloat64Add2': [SKIP],
'test-run-machops/RunFloat64MulAndFloat64Sub1': [SKIP],
'test-run-machops/RunFloat64MulAndFloat64Sub2': [SKIP],
+ 'test-run-machops/RunFloat64Sin': [SKIP],
+ 'test-run-machops/RunFloat64Cos': [SKIP],
+ 'test-run-machops/RunFloat64Expm1': [SKIP],
+ 'test-run-machops/RunFloat64Tan': [SKIP],
'test-cpu-profiler/Inlining': [SKIP],
+ 'test-gap-resolver/FuzzResolver': [SKIP],
}], # 'arch == x87'
##############################################################################
@@ -354,111 +296,6 @@
}], # 'arch == android_arm or arch == android_ia32'
##############################################################################
-['arch == nacl_ia32 or arch == nacl_x64', {
-
- # NaCl builds have problems with threaded tests since Pepper_28.
- # V8 Issue 2786
- 'test-api/Threading1': [SKIP],
- 'test-lockers/ExtensionsRegistration': [SKIP],
-
- # These tests fail as there is no /tmp directory in Native Client.
- 'test-log/LogAccessorCallbacks': [SKIP],
- 'test-log/LogCallbacks': [SKIP],
- 'test-log/ProfLazyMode': [SKIP],
-
- # Native Client doesn't support sockets.
- 'test-debug/DebuggerAgent': [SKIP],
- 'test-debug/DebuggerAgentProtocolOverflowHeader': [SKIP],
- 'test-socket/Socket': [SKIP],
-
- # Profiling doesn't work on Native Client.
- 'test-cpu-profiler/*': [SKIP],
-
- # Fails since 16322 (new test).
- 'test-code-stubs-arm/ConvertDToI': [SKIP],
-
- # BUG(2998).
- 'test-macro-assembler-arm/LoadAndStoreWithRepresentation': [SKIP],
-
- # BUG(3150).
- 'test-api/PreCompileInvalidPreparseDataError': [SKIP],
-
- 'test-types/Convert' : [SKIP],
- 'test-symbols/Create' : [SKIP],
- 'test-parsing/ParserSync' : [SKIP],
- 'test-parsing/ErrorsEvalAndArguments' : [SKIP],
- 'test-parsing/ErrorsFutureStrictReservedWords' : [SKIP],
- 'test-parsing/ErrorsReservedWords' : [SKIP],
- 'test-parsing/ErrorsYieldStrict' : [SKIP],
- 'test-parsing/ErrorsNotAnIdentifierName' : [SKIP],
- 'test-parsing/FunctionDeclaresItselfStrict' : [SKIP],
- 'test-parsing/ErrorsObjectLiteralChecking' : [SKIP],
- 'test-parsing/InvalidLeftHandSide' : [SKIP],
- 'test-heap/GarbageCollection' : [SKIP],
- 'test-heap/GlobalHandles' : [SKIP],
- 'test-heap/WeakGlobalHandlesScavenge' : [SKIP],
- 'test-heap/DeleteWeakGlobalHandle' : [SKIP],
- 'test-heap/GrowAndShrinkNewSpace' : [SKIP],
- 'test-heap/OptimizedAllocationAlwaysInNewSpace' : [SKIP],
- 'test-heap/OptimizedPretenuringAllocationFolding' : [SKIP],
- 'test-heap/OptimizedPretenuringObjectArrayLiterals' : [SKIP],
- 'test-heap/OptimizedPretenuringAllocationFoldingBlocks' : [SKIP],
- 'test-heap/OptimizedPretenuringMixedInObjectProperties' : [SKIP],
- 'test-heap/OptimizedPretenuringDoubleArrayProperties' : [SKIP],
- 'test-heap/OptimizedPretenuringdoubleArrayLiterals' : [SKIP],
- 'test-heap/OptimizedPretenuringNestedMixedArrayLiterals' : [SKIP],
- 'test-heap/OptimizedPretenuringNestedObjectLiterals' : [SKIP],
- 'test-heap/OptimizedPretenuringNestedDoubleLiterals' : [SKIP],
- 'test-heap/Regress169928' : [SKIP],
- 'test-decls/Unknown' : [SKIP],
- 'test-decls/Present' : [SKIP],
- 'test-decls/Absent' : [SKIP],
- 'test-decls/Appearing' : [SKIP],
- 'test-decls/Reappearing' : [SKIP],
- 'test-decls/ExistsInPrototype' : [SKIP],
- 'test-decls/AbsentInPrototype' : [SKIP],
- 'test-decls/ExistsInHiddenPrototype' : [SKIP],
- 'test-debug/ConditionalScriptBreakPoint' : [SKIP],
- 'test-debug/DebugEvaluate' : [SKIP],
- 'test-debug/ConditionalBreakpointWithCodeGenerationDisallowed' : [SKIP],
- 'test-debug/DebugEvaluateWithCodeGenerationDisallowed' : [SKIP],
- 'test-debug/DebugBreak' : [SKIP],
- 'test-debug/ThreadedDebugging' : [SKIP],
- 'test-debug/RecursiveBreakpoints' : [SKIP],
- 'test-dictionary/HashMap' : [SKIP],
- 'test-debug/Backtrace' : [SKIP],
- 'test-debug/DebugBreakLoop' : [SKIP],
- 'test-constantpool/ConstantPool' : [SKIP],
- 'test-compiler/GetScriptLineNumber' : [SKIP],
- 'test-api/ScriptMakingExternalString' : [SKIP],
- 'test-api/ScriptMakingExternalOneByteString' : [SKIP],
- 'test-api/MakingExternalStringConditions' : [SKIP],
- 'test-api/MakingExternalOneByteStringConditions' : [SKIP],
- 'test-api/MakingExternalUnalignedOneByteString' : [SKIP],
- 'test-api/IndexedInterceptorUnboxedDoubleWithIndexedAccessor' : [SKIP],
- 'test-api/IndependentWeakHandle' : [SKIP],
- 'test-api/GCFromWeakCallbacks' : [SKIP],
- 'test-api/IndependentHandleRevival' : [SKIP],
- 'test-api/StringWrite' : [SKIP],
- 'test-api/Threading3' : [SKIP],
- 'test-api/Threading4' : [SKIP],
- 'test-api/Threading2' : [SKIP],
- 'test-api/FixedFloat32Array' : [SKIP],
- 'test-api/FixedFloat64Array' : [SKIP],
- 'test-api/ExternalFloat32Array' : [SKIP],
- 'test-api/ExternalFloat64Array' : [SKIP],
- 'test-api/ExternalArrays' : [SKIP],
- 'test-api/Float32Array' : [SKIP],
- 'test-api/Float64Array' : [SKIP],
- 'test-api/Regress2333' : [SKIP],
- 'test-alloc/StressHandles' : [SKIP],
- 'test-alloc/StressJS' : [SKIP],
- 'test-accessors/HandleScopePop' : [SKIP],
- 'test-accessors/Gc' : [SKIP],
-
-}], # 'arch == nacl_ia32 or arch == nacl_x64'
-
-##############################################################################
['system == aix and arch == ppc64', {
# BUG 2857
@@ -488,35 +325,42 @@
}], # 'arch == ppc64 and simulator_run == True'
##############################################################################
-['ignition == True', {
+['variant == turbofan', {
+
+ # TODO(bmeurer): TurboFan embeds strong references to all kinds of objects
+ # via deoptimization data (Crankshaft also does this, but lack proper test
+ # coverage).
+ 'test-heap/ObjectsInOptimizedCodeAreWeak': [FAIL],
+
+ # TurboFan cpu profiler result is different.
+ 'test-cpu-profiler/CollectDeoptEvents': [FAIL],
+ 'test-cpu-profiler/DeoptAtFirstLevelInlinedSource': [FAIL],
+ 'test-cpu-profiler/DeoptAtSecondLevelInlinedSource': [FAIL],
- # TODO(rmcilroy,4680): The function_data field should be a BytecodeArray on interpreter entry
- 'test-api/SetFunctionEntryHook': [FAIL],
+}], # variant == turbofan
- # TODO(rmcilroy,4680): Check failed: !function->shared()->is_compiled() || function->IsOptimized().
+##############################################################################
+['variant == turbofan_opt', {
+ # BUG(5193): Flaky.
+ 'test-cpu-profiler/FunctionApplySample': [PASS, ['system == windows', SKIP]],
+}], # variant == turbofan_opt
+
+##############################################################################
+['variant == ignition', {
+ # TODO(rmcilroy,4680): Related to lack of code flushing. Check failed: !function->shared()->is_compiled() || function->IsOptimized().
'test-heap/TestCodeFlushingPreAged': [FAIL],
'test-heap/TestCodeFlushingIncrementalScavenge': [FAIL],
'test-heap/TestCodeFlushing': [FAIL],
'test-heap/TestCodeFlushingIncremental': [FAIL],
'test-heap/TestCodeFlushingIncrementalAbort': [PASS, ['mode == debug or dcheck_always_on == True', FAIL]],
- # TODO(rmcilroy,4766): Requires BytecodeGraphBuilder to track source position
- # on nodes (behind --turbo_source_positions flag).
- 'test-cpu-profiler/TickLinesOptimized': [FAIL],
-
- # TODO(rmcilroy,4680): Fails to find the correct function name for the
- # anonymous function. Fails without ignition but with --no-lazy also, so seems
- # to be an issue when eagerly parsing.
- 'test-func-name-inference/ReturnAnonymousFunction': [FAIL],
+ # TODO(mythria,4780): Related to type feedback support for Array function.
+ 'test-feedback-vector/VectorCallFeedbackForArray': [FAIL],
- # TODO(mythria,4780): Related to type feedback support for calls.
- 'test-feedback-vector/VectorCallICStates': [FAIL],
- 'test-compiler/FeedbackVectorPreservedAcrossRecompiles': [FAIL],
+ # TODO(mythria,4780): Related to type feedback support for constructor.
+ 'test-feedback-vector/VectorConstructCounts': [FAIL],
'test-heap/WeakFunctionInConstructor': [FAIL],
- 'test-heap/IncrementalMarkingClearsMonomorphicConstructor': [FAIL],
'test-heap/IncrementalMarkingPreservesMonomorphicConstructor': [FAIL],
- 'test-heap/IncrementalMarkingPreservesMonomorphicCallIC': [FAIL],
- 'test-heap/CellsInOptimizedCodeAreWeak': [FAIL],
# TODO(mythria,4680): Lack of code-ageing in interpreter.
'test-heap/Regress169209': [FAIL],
@@ -525,13 +369,95 @@
# in interpreter.
'test-heap/CompilationCacheCachingBehavior': [FAIL],
- # TODO(mstarzinger,4680): Fails due to the turbo-asm pipeline only being taken
- # in compiler.cc GetLazyCode for uncompiled code, and no similar path for eager
- # code.
- 'test-api/TurboAsmDisablesNeuter': [FAIL],
+ # BUG(rmcilroy,4680): Function is optimized without type feedback and so immediately deopts again, causing check failure in the test.
+ 'test-heap/ResetSharedFunctionInfoCountersDuringIncrementalMarking': [FAIL],
+ 'test-heap/ResetSharedFunctionInfoCountersDuringMarkSweep': [FAIL],
+
+ # BUG(4680): Missing type feedback makes optimistic optimizations fail.
+ 'test-cpu-profiler/CollectDeoptEvents': [FAIL],
+ 'test-cpu-profiler/DeoptUntrackedFunction': [SKIP],
+
+ # BUG(4680): Ignition doesn't support allocation sites currently.
+ 'test-heap/EnsureAllocationSiteDependentCodesProcessed': [FAIL],
+ 'test-heap/OptimizedPretenuringAllocationFolding': [FAIL],
+ 'test-heap/OptimizedPretenuringdoubleArrayLiterals': [FAIL],
+ 'test-heap/OptimizedPretenuringNestedDoubleLiterals': [FAIL],
+ 'test-heap/OptimizedPretenuringNestedMixedArrayLiterals': [FAIL],
+ 'test-heap/OptimizedPretenuringNestedObjectLiterals': [FAIL],
+ 'test-heap/OptimizedPretenuringObjectArrayLiterals': [FAIL],
+
+ # BUG(4751). Flaky with ignition.
+ 'test-cpu-profiler/JsNativeJsSample': [PASS, FAIL],
+
+ # TODO(ignition): Fails due to missing type info when optimizing from bytecode
+ # with crankshaft.
+ 'test-cpu-profiler/TickLinesOptimized': [SKIP],
+
+ # TurboFan cpu profiler result is different.
+ 'test-cpu-profiler/DeoptAtFirstLevelInlinedSource': [FAIL],
+ 'test-cpu-profiler/DeoptAtSecondLevelInlinedSource': [FAIL],
+
+ # BUG(5193): Flaky.
+ 'test-cpu-profiler/FunctionApplySample': [PASS, ['system == windows', SKIP]],
+}], # variant == ignition
- # TODO(rmcilroy,4837): We don't set a LoadContextSlot for a function as
- # immutable in the BytecodeGraphBuilder, therefore no inlining happens.
+##############################################################################
+['variant == ignition_staging', {
+ 'test-cpu-profiler/DeoptUntrackedFunction': [SKIP],
+ 'test-cpu-profiler/TickLinesOptimized': [SKIP],
+ 'test-cpu-profiler/CollectDeoptEvents': [FAIL],
+ 'test-cpu-profiler/DeoptAtFirstLevelInlinedSource': [FAIL],
+ 'test-cpu-profiler/DeoptAtSecondLevelInlinedSource': [FAIL],
+ 'test-feedback-vector/VectorCallFeedbackForArray': [FAIL],
+ 'test-feedback-vector/VectorConstructCounts': [FAIL],
+ 'test-heap/CompilationCacheCachingBehavior': [FAIL],
+ 'test-heap/EnsureAllocationSiteDependentCodesProcessed': [FAIL],
+ 'test-heap/IncrementalMarkingPreservesMonomorphicConstructor': [FAIL],
+ 'test-heap/OptimizedPretenuringAllocationFolding': [FAIL],
+ 'test-heap/OptimizedPretenuringdoubleArrayLiterals': [FAIL],
+ 'test-heap/OptimizedPretenuringNestedDoubleLiterals': [FAIL],
+ 'test-heap/OptimizedPretenuringNestedMixedArrayLiterals': [FAIL],
+ 'test-heap/OptimizedPretenuringNestedObjectLiterals': [FAIL],
+ 'test-heap/OptimizedPretenuringObjectArrayLiterals': [FAIL],
+ 'test-heap/Regress169209': [FAIL],
+ 'test-heap/ResetSharedFunctionInfoCountersDuringIncrementalMarking': [FAIL],
+ 'test-heap/ResetSharedFunctionInfoCountersDuringMarkSweep': [FAIL],
+ 'test-heap/TestCodeFlushing': [FAIL],
+ 'test-heap/TestCodeFlushingIncremental': [FAIL],
+ 'test-heap/TestCodeFlushingIncrementalScavenge': [FAIL],
+ 'test-heap/TestCodeFlushingPreAged': [FAIL],
+ 'test-heap/WeakFunctionInConstructor': [FAIL],
+ 'test-run-inlining/InlineBuiltin': [FAIL],
+ 'test-run-inlining/InlineLoopGuardedEmpty': [FAIL],
+ 'test-run-inlining/InlineLoopGuardedOnce': [FAIL],
+ 'test-run-inlining/InlineLoopGuardedTwice': [FAIL],
+ 'test-run-inlining/InlineLoopUnguardedEmpty': [FAIL],
+ 'test-run-inlining/InlineLoopUnguardedOnce': [FAIL],
+ 'test-run-inlining/InlineLoopUnguardedTwice': [FAIL],
+ 'test-run-inlining/InlineMutuallyRecursive': [FAIL],
+ 'test-run-inlining/InlineNestedBuiltin': [FAIL],
+ 'test-run-inlining/InlineOmitArgumentsDeopt': [FAIL],
+ 'test-run-inlining/InlineOmitArguments': [FAIL],
+ 'test-run-inlining/InlineOmitArgumentsObject': [FAIL],
+ 'test-run-inlining/InlineSurplusArgumentsDeopt': [FAIL],
+ 'test-run-inlining/InlineSurplusArguments': [FAIL],
+ 'test-run-inlining/InlineSurplusArgumentsObject': [FAIL],
+ 'test-run-inlining/InlineTwiceDependentDiamondDifferent': [FAIL],
+ 'test-run-inlining/InlineTwiceDependentDiamond': [FAIL],
+ 'test-run-inlining/InlineTwiceDependent': [FAIL],
+ 'test-run-inlining/InlineTwice': [FAIL],
+ 'test-run-inlining/InlineWithArguments': [FAIL],
+ 'test-run-inlining/SimpleInliningContextDeopt': [FAIL],
+ 'test-run-inlining/SimpleInliningContext': [FAIL],
+ 'test-run-inlining/SimpleInlining': [FAIL],
+
+ # BUG(5193): Flaky.
+ 'test-cpu-profiler/FunctionApplySample': [PASS, ['system == windows', SKIP]],
+}], # variant == ignition_staging
+
+##############################################################################
+['variant == ignition_turbofan', {
+ # TODO(5251): Inlining is currently disabled for the BytecodeGraphBuilder.
'test-run-inlining/InlineLoopGuardedTwice': [FAIL],
'test-run-inlining/InlineSurplusArgumentsDeopt': [FAIL],
'test-run-inlining/InlineTwice': [FAIL],
@@ -555,6 +481,60 @@
'test-run-inlining/InlineBuiltin': [FAIL],
'test-run-inlining/InlineTwiceDependent': [FAIL],
'test-run-inlining/SimpleInliningContextDeopt': [FAIL],
-}], # ignition == True
+
+ # TODO(rmcilroy,4766): Requires BytecodeGraphBuilder to track source position
+ # on nodes (behind --turbo_source_positions flag).
+ 'test-cpu-profiler/TickLinesOptimized': [FAIL],
+
+ # TODO(rmcilroy,4680): Related to lack of code flushing. Check failed: !function->shared()->is_compiled() || function->IsOptimized().
+ 'test-heap/TestCodeFlushingPreAged': [FAIL],
+ 'test-heap/TestCodeFlushingIncrementalScavenge': [FAIL],
+ 'test-heap/TestCodeFlushing': [FAIL],
+ 'test-heap/TestCodeFlushingIncremental': [FAIL],
+ 'test-heap/TestCodeFlushingIncrementalAbort': [PASS, ['mode == debug or dcheck_always_on == True', FAIL]],
+
+ # TODO(mythria,4780): Related to type feedback support for Array function.
+ 'test-feedback-vector/VectorCallFeedbackForArray': [FAIL],
+
+ # TODO(mythria,4780): Related to type feedback support for constructor.
+ 'test-feedback-vector/VectorConstructCounts': [FAIL],
+ 'test-heap/WeakFunctionInConstructor': [FAIL],
+ 'test-heap/IncrementalMarkingPreservesMonomorphicConstructor': [FAIL],
+
+ # TODO(mythria,4680): Lack of code-ageing in interpreter.
+ 'test-heap/Regress169209': [FAIL],
+
+ # TODO(mythria,4680): Lack of code-ageing and/or lack of compilation cache
+ # in interpreter.
+ 'test-heap/CompilationCacheCachingBehavior': [FAIL],
+
+ # BUG(4680): Missing type feedback makes optimistic optimizations fail.
+ 'test-cpu-profiler/CollectDeoptEvents': [FAIL],
+ 'test-cpu-profiler/DeoptUntrackedFunction': [SKIP],
+
+ # BUG(4680): Ignition doesn't support allocation sites currently.
+ 'test-heap/EnsureAllocationSiteDependentCodesProcessed': [FAIL],
+ 'test-heap/OptimizedPretenuringAllocationFolding': [FAIL],
+ 'test-heap/OptimizedPretenuringdoubleArrayLiterals': [FAIL],
+ 'test-heap/OptimizedPretenuringNestedDoubleLiterals': [FAIL],
+ 'test-heap/OptimizedPretenuringNestedMixedArrayLiterals': [FAIL],
+ 'test-heap/OptimizedPretenuringNestedObjectLiterals': [FAIL],
+ 'test-heap/OptimizedPretenuringObjectArrayLiterals': [FAIL],
+
+ # BUG(4751). Flaky with Ignition.
+ 'test-cpu-profiler/JsNativeJsSample': [SKIP],
+
+ # TODO(bmeurer): TurboFan embeds strong references to all kinds of objects
+ # via deoptimization data (Crankshaft also does this, but lack proper test
+ # coverage).
+ 'test-heap/ObjectsInOptimizedCodeAreWeak': [FAIL],
+
+ # TurboFan cpu profiler result is different.
+ 'test-cpu-profiler/DeoptAtFirstLevelInlinedSource': [FAIL],
+ 'test-cpu-profiler/DeoptAtSecondLevelInlinedSource': [FAIL],
+
+ # BUG(5193): Flaky.
+ 'test-cpu-profiler/FunctionApplySample': [PASS, ['system == windows', SKIP]],
+}], # variant == ignition_turbofan
]
diff --git a/deps/v8/test/cctest/compiler/call-tester.h b/deps/v8/test/cctest/compiler/call-tester.h
index c75bde1e91..77d2ce1e95 100644
--- a/deps/v8/test/cctest/compiler/call-tester.h
+++ b/deps/v8/test/cctest/compiler/call-tester.h
@@ -106,12 +106,14 @@ struct ParameterTraits<int32_t> {
static int64_t Cast(int32_t r) { return static_cast<int64_t>(r); }
};
+#if !V8_TARGET_ARCH_PPC64
template <>
struct ParameterTraits<uint32_t> {
static int64_t Cast(uint32_t r) {
return static_cast<int64_t>(static_cast<int32_t>(r));
}
};
+#endif
#endif // !V8_TARGET_ARCH_64_BIT
diff --git a/deps/v8/test/cctest/compiler/code-assembler-tester.h b/deps/v8/test/cctest/compiler/code-assembler-tester.h
new file mode 100644
index 0000000000..eb2d77a171
--- /dev/null
+++ b/deps/v8/test/cctest/compiler/code-assembler-tester.h
@@ -0,0 +1,61 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/handles.h"
+#include "src/interface-descriptors.h"
+#include "src/isolate.h"
+#include "test/cctest/compiler/function-tester.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class ZoneHolder {
+ public:
+ explicit ZoneHolder(Isolate* isolate) : zone_(isolate->allocator()) {}
+ Zone* zone() { return &zone_; }
+
+ private:
+ Zone zone_;
+};
+
+// Inherit from ZoneHolder in order to create a zone that can be passed to
+// CodeAssembler base class constructor.
+template <typename CodeAssemblerT>
+class CodeAssemblerTesterImpl : private ZoneHolder, public CodeAssemblerT {
+ public:
+ // Test generating code for a stub.
+ CodeAssemblerTesterImpl(Isolate* isolate,
+ const CallInterfaceDescriptor& descriptor)
+ : ZoneHolder(isolate),
+ CodeAssemblerT(isolate, ZoneHolder::zone(), descriptor,
+ Code::ComputeFlags(Code::STUB), "test"),
+ scope_(isolate) {}
+
+ // Test generating code for a JS function (e.g. builtins).
+ CodeAssemblerTesterImpl(Isolate* isolate, int parameter_count)
+ : ZoneHolder(isolate),
+ CodeAssemblerT(isolate, ZoneHolder::zone(), parameter_count,
+ Code::ComputeFlags(Code::FUNCTION), "test"),
+ scope_(isolate) {}
+
+ // This constructor is intended to be used for creating code objects with
+ // specific flags.
+ CodeAssemblerTesterImpl(Isolate* isolate, Code::Flags flags)
+ : ZoneHolder(isolate),
+ CodeAssemblerT(isolate, ZoneHolder::zone(), 0, flags, "test"),
+ scope_(isolate) {}
+
+ Handle<Code> GenerateCodeCloseAndEscape() {
+ return scope_.CloseAndEscape(CodeAssemblerT::GenerateCode());
+ }
+
+ private:
+ HandleScope scope_;
+ LocalContext context_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/codegen-tester.h b/deps/v8/test/cctest/compiler/codegen-tester.h
index 5d670bfee8..3d115454b9 100644
--- a/deps/v8/test/cctest/compiler/codegen-tester.h
+++ b/deps/v8/test/cctest/compiler/codegen-tester.h
@@ -5,6 +5,7 @@
#ifndef V8_CCTEST_COMPILER_CODEGEN_TESTER_H_
#define V8_CCTEST_COMPILER_CODEGEN_TESTER_H_
+#include "src/compiler.h"
#include "src/compiler/instruction-selector.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/raw-machine-assembler.h"
@@ -38,7 +39,8 @@ class RawMachineAssemblerTester : public HandleAndZoneScope,
p1, p2, p3, p4),
true),
MachineType::PointerRepresentation(),
- InstructionSelector::SupportedMachineOperatorFlags()) {}
+ InstructionSelector::SupportedMachineOperatorFlags(),
+ InstructionSelector::AlignmentRequirements()) {}
virtual ~RawMachineAssemblerTester() {}
@@ -65,7 +67,7 @@ class RawMachineAssemblerTester : public HandleAndZoneScope,
Schedule* schedule = this->Export();
CallDescriptor* call_descriptor = this->call_descriptor();
Graph* graph = this->graph();
- CompilationInfo info("testing", main_isolate(), main_zone());
+ CompilationInfo info(ArrayVector("testing"), main_isolate(), main_zone());
code_ = Pipeline::GenerateCodeForTesting(&info, call_descriptor, graph,
schedule);
}
@@ -256,7 +258,7 @@ class BufferedRawMachineAssemblerTester<void>
// parameters from memory. Thereby it is possible to pass 64 bit parameters
// to the IR graph.
Node* Parameter(size_t index) {
- CHECK(index >= 0 && index < 4);
+ CHECK(index < 4);
return parameter_nodes_[index];
}
diff --git a/deps/v8/test/cctest/compiler/function-tester.h b/deps/v8/test/cctest/compiler/function-tester.h
index 631bdde5ce..c1473ac960 100644
--- a/deps/v8/test/cctest/compiler/function-tester.h
+++ b/deps/v8/test/cctest/compiler/function-tester.h
@@ -6,7 +6,6 @@
#define V8_CCTEST_COMPILER_FUNCTION_TESTER_H_
#include "src/ast/ast-numbering.h"
-#include "src/ast/scopes.h"
#include "src/compiler.h"
#include "src/compiler/linkage.h"
#include "src/compiler/pipeline.h"
@@ -14,6 +13,7 @@
#include "src/full-codegen/full-codegen.h"
#include "src/handles.h"
#include "src/objects-inl.h"
+#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
#include "src/parsing/rewriter.h"
#include "test/cctest/cctest.h"
@@ -30,8 +30,8 @@ class FunctionTester : public InitializedHandleScope {
flags_(flags) {
Compile(function);
const uint32_t supported_flags =
- CompilationInfo::kFunctionContextSpecializing |
- CompilationInfo::kInliningEnabled | CompilationInfo::kTypingEnabled;
+ CompilationInfo::kNativeContextSpecializing |
+ CompilationInfo::kInliningEnabled;
CHECK_EQ(0u, flags_ & ~supported_flags);
}
@@ -42,16 +42,18 @@ class FunctionTester : public InitializedHandleScope {
CompileGraph(graph);
}
- FunctionTester(const CallInterfaceDescriptor& descriptor, Handle<Code> code)
+ FunctionTester(Handle<Code> code, int param_count)
: isolate(main_isolate()),
- function(
- (FLAG_allow_natives_syntax = true,
- NewFunction(BuildFunctionFromDescriptor(descriptor).c_str()))),
+ function((FLAG_allow_natives_syntax = true,
+ NewFunction(BuildFunction(param_count).c_str()))),
flags_(0) {
Compile(function);
function->ReplaceCode(*code);
}
+ FunctionTester(const CallInterfaceDescriptor& descriptor, Handle<Code> code)
+ : FunctionTester(code, descriptor.GetParameterCount()) {}
+
Isolate* isolate;
Handle<JSFunction> function;
@@ -59,11 +61,22 @@ class FunctionTester : public InitializedHandleScope {
return Execution::Call(isolate, function, undefined(), 0, nullptr);
}
+ MaybeHandle<Object> Call(Handle<Object> a) {
+ Handle<Object> args[] = {a};
+ return Execution::Call(isolate, function, undefined(), 1, args);
+ }
+
MaybeHandle<Object> Call(Handle<Object> a, Handle<Object> b) {
Handle<Object> args[] = {a, b};
return Execution::Call(isolate, function, undefined(), 2, args);
}
+ MaybeHandle<Object> Call(Handle<Object> a, Handle<Object> b,
+ Handle<Object> c) {
+ Handle<Object> args[] = {a, b, c};
+ return Execution::Call(isolate, function, undefined(), 3, args);
+ }
+
MaybeHandle<Object> Call(Handle<Object> a, Handle<Object> b, Handle<Object> c,
Handle<Object> d) {
Handle<Object> args[] = {a, b, c, d};
@@ -91,41 +104,56 @@ class FunctionTester : public InitializedHandleScope {
return try_catch.Message();
}
- void CheckCall(Handle<Object> expected, Handle<Object> a, Handle<Object> b) {
- Handle<Object> result = Call(a, b).ToHandleChecked();
+ void CheckCall(Handle<Object> expected, Handle<Object> a, Handle<Object> b,
+ Handle<Object> c, Handle<Object> d) {
+ Handle<Object> result = Call(a, b, c, d).ToHandleChecked();
CHECK(expected->SameValue(*result));
}
+ void CheckCall(Handle<Object> expected, Handle<Object> a, Handle<Object> b,
+ Handle<Object> c) {
+ return CheckCall(expected, a, b, c, undefined());
+ }
+
+ void CheckCall(Handle<Object> expected, Handle<Object> a, Handle<Object> b) {
+ return CheckCall(expected, a, b, undefined());
+ }
+
void CheckCall(Handle<Object> expected, Handle<Object> a) {
CheckCall(expected, a, undefined());
}
- void CheckCall(Handle<Object> expected) {
- CheckCall(expected, undefined(), undefined());
- }
+ void CheckCall(Handle<Object> expected) { CheckCall(expected, undefined()); }
void CheckCall(double expected, double a, double b) {
CheckCall(Val(expected), Val(a), Val(b));
}
+ void CheckTrue(Handle<Object> a) { CheckCall(true_value(), a); }
+
void CheckTrue(Handle<Object> a, Handle<Object> b) {
CheckCall(true_value(), a, b);
}
- void CheckTrue(Handle<Object> a) { CheckCall(true_value(), a, undefined()); }
+ void CheckTrue(Handle<Object> a, Handle<Object> b, Handle<Object> c) {
+ CheckCall(true_value(), a, b, c);
+ }
+
+ void CheckTrue(Handle<Object> a, Handle<Object> b, Handle<Object> c,
+ Handle<Object> d) {
+ CheckCall(true_value(), a, b, c, d);
+ }
void CheckTrue(double a, double b) {
CheckCall(true_value(), Val(a), Val(b));
}
+ void CheckFalse(Handle<Object> a) { CheckCall(false_value(), a); }
+
void CheckFalse(Handle<Object> a, Handle<Object> b) {
CheckCall(false_value(), a, b);
}
- void CheckFalse(Handle<Object> a) {
- CheckCall(false_value(), a, undefined());
- }
-
void CheckFalse(double a, double b) {
CheckCall(false_value(), Val(a), Val(b));
}
@@ -177,25 +205,29 @@ class FunctionTester : public InitializedHandleScope {
Handle<JSFunction> Compile(Handle<JSFunction> function) {
Zone zone(function->GetIsolate()->allocator());
ParseInfo parse_info(&zone, function);
- CompilationInfo info(&parse_info);
+ CompilationInfo info(&parse_info, function);
info.MarkAsDeoptimizationEnabled();
- CHECK(Parser::ParseStatic(info.parse_info()));
+ if (!FLAG_turbo_from_bytecode) {
+ CHECK(Parser::ParseStatic(info.parse_info()));
+ }
info.SetOptimizing();
- if (flags_ & CompilationInfo::kFunctionContextSpecializing) {
- info.MarkAsFunctionContextSpecializing();
+ if (flags_ & CompilationInfo::kNativeContextSpecializing) {
+ info.MarkAsNativeContextSpecializing();
}
if (flags_ & CompilationInfo::kInliningEnabled) {
info.MarkAsInliningEnabled();
}
- if (flags_ & CompilationInfo::kTypingEnabled) {
- info.MarkAsTypingEnabled();
+ if (FLAG_turbo_from_bytecode) {
+ CHECK(Compiler::EnsureBytecode(&info));
+ info.MarkAsOptimizeFromBytecode();
+ } else {
+ CHECK(Compiler::Analyze(info.parse_info()));
+ CHECK(Compiler::EnsureDeoptimizationSupport(&info));
}
- CHECK(Compiler::Analyze(info.parse_info()));
- CHECK(Compiler::EnsureDeoptimizationSupport(&info));
+ JSFunction::EnsureLiterals(function);
- Pipeline pipeline(&info);
- Handle<Code> code = pipeline.GenerateCode();
+ Handle<Code> code = Pipeline::GenerateCodeForTesting(&info);
CHECK(!code.is_null());
info.dependencies()->Commit(code);
info.context()->native_context()->AddOptimizedCode(*code);
@@ -216,22 +248,15 @@ class FunctionTester : public InitializedHandleScope {
return function_string;
}
- std::string BuildFunctionFromDescriptor(
- const CallInterfaceDescriptor& descriptor) {
- return BuildFunction(descriptor.GetParameterCount());
- }
-
// Compile the given machine graph instead of the source of the function
// and replace the JSFunction's code with the result.
Handle<JSFunction> CompileGraph(Graph* graph) {
Zone zone(function->GetIsolate()->allocator());
ParseInfo parse_info(&zone, function);
- CompilationInfo info(&parse_info);
+ CompilationInfo info(&parse_info, function);
CHECK(Parser::ParseStatic(info.parse_info()));
info.SetOptimizing();
- CHECK(Compiler::Analyze(info.parse_info()));
- CHECK(Compiler::EnsureDeoptimizationSupport(&info));
Handle<Code> code = Pipeline::GenerateCodeForTesting(&info, graph);
CHECK(!code.is_null());
diff --git a/deps/v8/test/cctest/compiler/graph-builder-tester.h b/deps/v8/test/cctest/compiler/graph-builder-tester.h
index de2713a5ac..c870a3e84e 100644
--- a/deps/v8/test/cctest/compiler/graph-builder-tester.h
+++ b/deps/v8/test/cctest/compiler/graph-builder-tester.h
@@ -5,6 +5,7 @@
#ifndef V8_CCTEST_COMPILER_GRAPH_BUILDER_TESTER_H_
#define V8_CCTEST_COMPILER_GRAPH_BUILDER_TESTER_H_
+#include "src/compiler.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/instruction-selector.h"
#include "src/compiler/linkage.h"
@@ -25,7 +26,8 @@ class GraphAndBuilders {
: main_graph_(new (zone) Graph(zone)),
main_common_(zone),
main_machine_(zone, MachineType::PointerRepresentation(),
- InstructionSelector::SupportedMachineOperatorFlags()),
+ InstructionSelector::SupportedMachineOperatorFlags(),
+ InstructionSelector::AlignmentRequirements()),
main_simplified_(zone) {}
Graph* graph() const { return main_graph_; }
@@ -168,11 +170,11 @@ class GraphBuilderTester : public HandleAndZoneScope,
Node* ChangeFloat64ToTagged(Node* a) {
return NewNode(simplified()->ChangeFloat64ToTagged(), a);
}
- Node* ChangeBoolToBit(Node* a) {
- return NewNode(simplified()->ChangeBoolToBit(), a);
+ Node* ChangeTaggedToBit(Node* a) {
+ return NewNode(simplified()->ChangeTaggedToBit(), a);
}
- Node* ChangeBitToBool(Node* a) {
- return NewNode(simplified()->ChangeBitToBool(), a);
+ Node* ChangeBitToTagged(Node* a) {
+ return NewNode(simplified()->ChangeBitToTagged(), a);
}
Node* LoadField(const FieldAccess& access, Node* object) {
@@ -238,7 +240,7 @@ class GraphBuilderTester : public HandleAndZoneScope,
CHECK_EQ(op->ValueInputCount(), value_input_count);
CHECK(!OperatorProperties::HasContextInput(op));
- CHECK_EQ(0, OperatorProperties::GetFrameStateInputCount(op));
+ CHECK(!OperatorProperties::HasFrameStateInput(op));
bool has_control = op->ControlInputCount() == 1;
bool has_effect = op->EffectInputCount() == 1;
@@ -277,7 +279,7 @@ class GraphBuilderTester : public HandleAndZoneScope,
Zone* zone = graph()->zone();
CallDescriptor* desc =
Linkage::GetSimplifiedCDescriptor(zone, this->csig_);
- CompilationInfo info("testing", main_isolate(), main_zone());
+ CompilationInfo info(ArrayVector("testing"), main_isolate(), main_zone());
code_ = Pipeline::GenerateCodeForTesting(&info, desc, graph());
#ifdef ENABLE_DISASSEMBLER
if (!code_.is_null() && FLAG_print_opt_code) {
diff --git a/deps/v8/test/cctest/compiler/test-branch-combine.cc b/deps/v8/test/cctest/compiler/test-branch-combine.cc
index c5c41667a0..edaf7b6ac1 100644
--- a/deps/v8/test/cctest/compiler/test-branch-combine.cc
+++ b/deps/v8/test/cctest/compiler/test-branch-combine.cc
@@ -478,6 +478,454 @@ TEST(BranchCombineEffectLevel) {
CHECK_EQ(42, m.Call());
}
+TEST(BranchCombineInt32AddLessThanZero) {
+ int32_t t_constant = -1033;
+ int32_t f_constant = 825118;
+
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
+ MachineType::Int32());
+ Node* a = m.Parameter(0);
+ Node* b = m.Parameter(1);
+ Node* add = m.Int32Add(a, b);
+ Node* compare = m.Int32LessThan(add, m.Int32Constant(0));
+
+ RawMachineLabel blocka, blockb;
+ m.Branch(compare, &blocka, &blockb);
+ m.Bind(&blocka);
+ m.Return(m.Int32Constant(t_constant));
+ m.Bind(&blockb);
+ m.Return(m.Int32Constant(f_constant));
+
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ int32_t a = *i;
+ int32_t b = *j;
+ int32_t expect = (a + b < 0) ? t_constant : f_constant;
+ CHECK_EQ(expect, m.Call(a, b));
+ }
+ }
+}
+
+TEST(BranchCombineInt32AddGreaterThanOrEqualZero) {
+ int32_t t_constant = -1033;
+ int32_t f_constant = 825118;
+
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
+ MachineType::Int32());
+ Node* a = m.Parameter(0);
+ Node* b = m.Parameter(1);
+ Node* add = m.Int32Add(a, b);
+ Node* compare = m.Int32GreaterThanOrEqual(add, m.Int32Constant(0));
+
+ RawMachineLabel blocka, blockb;
+ m.Branch(compare, &blocka, &blockb);
+ m.Bind(&blocka);
+ m.Return(m.Int32Constant(t_constant));
+ m.Bind(&blockb);
+ m.Return(m.Int32Constant(f_constant));
+
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ int32_t a = *i;
+ int32_t b = *j;
+ int32_t expect = (a + b >= 0) ? t_constant : f_constant;
+ CHECK_EQ(expect, m.Call(a, b));
+ }
+ }
+}
+
+TEST(BranchCombineInt32ZeroGreaterThanAdd) {
+ int32_t t_constant = -1033;
+ int32_t f_constant = 825118;
+
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
+ MachineType::Int32());
+ Node* a = m.Parameter(0);
+ Node* b = m.Parameter(1);
+ Node* add = m.Int32Add(a, b);
+ Node* compare = m.Int32GreaterThan(m.Int32Constant(0), add);
+
+ RawMachineLabel blocka, blockb;
+ m.Branch(compare, &blocka, &blockb);
+ m.Bind(&blocka);
+ m.Return(m.Int32Constant(t_constant));
+ m.Bind(&blockb);
+ m.Return(m.Int32Constant(f_constant));
+
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ int32_t a = *i;
+ int32_t b = *j;
+ int32_t expect = (0 > a + b) ? t_constant : f_constant;
+ CHECK_EQ(expect, m.Call(a, b));
+ }
+ }
+}
+
+TEST(BranchCombineInt32ZeroLessThanOrEqualAdd) {
+ int32_t t_constant = -1033;
+ int32_t f_constant = 825118;
+
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
+ MachineType::Int32());
+ Node* a = m.Parameter(0);
+ Node* b = m.Parameter(1);
+ Node* add = m.Int32Add(a, b);
+ Node* compare = m.Int32LessThanOrEqual(m.Int32Constant(0), add);
+
+ RawMachineLabel blocka, blockb;
+ m.Branch(compare, &blocka, &blockb);
+ m.Bind(&blocka);
+ m.Return(m.Int32Constant(t_constant));
+ m.Bind(&blockb);
+ m.Return(m.Int32Constant(f_constant));
+
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ int32_t a = *i;
+ int32_t b = *j;
+ int32_t expect = (0 <= a + b) ? t_constant : f_constant;
+ CHECK_EQ(expect, m.Call(a, b));
+ }
+ }
+}
+
+TEST(BranchCombineUint32AddLessThanOrEqualZero) {
+ int32_t t_constant = -1033;
+ int32_t f_constant = 825118;
+
+ RawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
+ MachineType::Uint32());
+ Node* a = m.Parameter(0);
+ Node* b = m.Parameter(1);
+ Node* add = m.Int32Add(a, b);
+ Node* compare = m.Uint32LessThanOrEqual(add, m.Int32Constant(0));
+
+ RawMachineLabel blocka, blockb;
+ m.Branch(compare, &blocka, &blockb);
+ m.Bind(&blocka);
+ m.Return(m.Int32Constant(t_constant));
+ m.Bind(&blockb);
+ m.Return(m.Int32Constant(f_constant));
+
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ uint32_t a = *i;
+ uint32_t b = *j;
+ int32_t expect = (a + b <= 0) ? t_constant : f_constant;
+ CHECK_EQ(expect, m.Call(a, b));
+ }
+ }
+}
+
+TEST(BranchCombineUint32AddGreaterThanZero) {
+ int32_t t_constant = -1033;
+ int32_t f_constant = 825118;
+
+ RawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
+ MachineType::Uint32());
+ Node* a = m.Parameter(0);
+ Node* b = m.Parameter(1);
+ Node* add = m.Int32Add(a, b);
+ Node* compare = m.Uint32GreaterThan(add, m.Int32Constant(0));
+
+ RawMachineLabel blocka, blockb;
+ m.Branch(compare, &blocka, &blockb);
+ m.Bind(&blocka);
+ m.Return(m.Int32Constant(t_constant));
+ m.Bind(&blockb);
+ m.Return(m.Int32Constant(f_constant));
+
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ uint32_t a = *i;
+ uint32_t b = *j;
+ int32_t expect = (a + b > 0) ? t_constant : f_constant;
+ CHECK_EQ(expect, m.Call(a, b));
+ }
+ }
+}
+
+TEST(BranchCombineUint32ZeroGreaterThanOrEqualAdd) {
+ int32_t t_constant = -1033;
+ int32_t f_constant = 825118;
+
+ RawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
+ MachineType::Uint32());
+ Node* a = m.Parameter(0);
+ Node* b = m.Parameter(1);
+ Node* add = m.Int32Add(a, b);
+ Node* compare = m.Uint32GreaterThanOrEqual(m.Int32Constant(0), add);
+
+ RawMachineLabel blocka, blockb;
+ m.Branch(compare, &blocka, &blockb);
+ m.Bind(&blocka);
+ m.Return(m.Int32Constant(t_constant));
+ m.Bind(&blockb);
+ m.Return(m.Int32Constant(f_constant));
+
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ uint32_t a = *i;
+ uint32_t b = *j;
+ int32_t expect = (0 >= a + b) ? t_constant : f_constant;
+ CHECK_EQ(expect, m.Call(a, b));
+ }
+ }
+}
+
+TEST(BranchCombineUint32ZeroLessThanAdd) {
+ int32_t t_constant = -1033;
+ int32_t f_constant = 825118;
+
+ RawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
+ MachineType::Uint32());
+ Node* a = m.Parameter(0);
+ Node* b = m.Parameter(1);
+ Node* add = m.Int32Add(a, b);
+ Node* compare = m.Uint32LessThan(m.Int32Constant(0), add);
+
+ RawMachineLabel blocka, blockb;
+ m.Branch(compare, &blocka, &blockb);
+ m.Bind(&blocka);
+ m.Return(m.Int32Constant(t_constant));
+ m.Bind(&blockb);
+ m.Return(m.Int32Constant(f_constant));
+
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ uint32_t a = *i;
+ uint32_t b = *j;
+ int32_t expect = (0 < a + b) ? t_constant : f_constant;
+ CHECK_EQ(expect, m.Call(a, b));
+ }
+ }
+}
+
+TEST(BranchCombineWord32AndLessThanZero) {
+ int32_t t_constant = -1033;
+ int32_t f_constant = 825118;
+
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
+ MachineType::Int32());
+ Node* a = m.Parameter(0);
+ Node* b = m.Parameter(1);
+ Node* add = m.Word32And(a, b);
+ Node* compare = m.Int32LessThan(add, m.Int32Constant(0));
+
+ RawMachineLabel blocka, blockb;
+ m.Branch(compare, &blocka, &blockb);
+ m.Bind(&blocka);
+ m.Return(m.Int32Constant(t_constant));
+ m.Bind(&blockb);
+ m.Return(m.Int32Constant(f_constant));
+
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ int32_t a = *i;
+ int32_t b = *j;
+ int32_t expect = ((a & b) < 0) ? t_constant : f_constant;
+ CHECK_EQ(expect, m.Call(a, b));
+ }
+ }
+}
+
+TEST(BranchCombineWord32AndGreaterThanOrEqualZero) {
+ int32_t t_constant = -1033;
+ int32_t f_constant = 825118;
+
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
+ MachineType::Int32());
+ Node* a = m.Parameter(0);
+ Node* b = m.Parameter(1);
+ Node* add = m.Word32And(a, b);
+ Node* compare = m.Int32GreaterThanOrEqual(add, m.Int32Constant(0));
+
+ RawMachineLabel blocka, blockb;
+ m.Branch(compare, &blocka, &blockb);
+ m.Bind(&blocka);
+ m.Return(m.Int32Constant(t_constant));
+ m.Bind(&blockb);
+ m.Return(m.Int32Constant(f_constant));
+
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ int32_t a = *i;
+ int32_t b = *j;
+ int32_t expect = ((a & b) >= 0) ? t_constant : f_constant;
+ CHECK_EQ(expect, m.Call(a, b));
+ }
+ }
+}
+
+TEST(BranchCombineInt32ZeroGreaterThanAnd) {
+ int32_t t_constant = -1033;
+ int32_t f_constant = 825118;
+
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
+ MachineType::Int32());
+ Node* a = m.Parameter(0);
+ Node* b = m.Parameter(1);
+ Node* add = m.Word32And(a, b);
+ Node* compare = m.Int32GreaterThan(m.Int32Constant(0), add);
+
+ RawMachineLabel blocka, blockb;
+ m.Branch(compare, &blocka, &blockb);
+ m.Bind(&blocka);
+ m.Return(m.Int32Constant(t_constant));
+ m.Bind(&blockb);
+ m.Return(m.Int32Constant(f_constant));
+
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ int32_t a = *i;
+ int32_t b = *j;
+ int32_t expect = (0 > (a & b)) ? t_constant : f_constant;
+ CHECK_EQ(expect, m.Call(a, b));
+ }
+ }
+}
+
+TEST(BranchCombineInt32ZeroLessThanOrEqualAnd) {
+ int32_t t_constant = -1033;
+ int32_t f_constant = 825118;
+
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
+ MachineType::Int32());
+ Node* a = m.Parameter(0);
+ Node* b = m.Parameter(1);
+ Node* add = m.Word32And(a, b);
+ Node* compare = m.Int32LessThanOrEqual(m.Int32Constant(0), add);
+
+ RawMachineLabel blocka, blockb;
+ m.Branch(compare, &blocka, &blockb);
+ m.Bind(&blocka);
+ m.Return(m.Int32Constant(t_constant));
+ m.Bind(&blockb);
+ m.Return(m.Int32Constant(f_constant));
+
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ int32_t a = *i;
+ int32_t b = *j;
+ int32_t expect = (0 <= (a & b)) ? t_constant : f_constant;
+ CHECK_EQ(expect, m.Call(a, b));
+ }
+ }
+}
+
+TEST(BranchCombineUint32AndLessThanOrEqualZero) {
+ int32_t t_constant = -1033;
+ int32_t f_constant = 825118;
+
+ RawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
+ MachineType::Uint32());
+ Node* a = m.Parameter(0);
+ Node* b = m.Parameter(1);
+ Node* add = m.Word32And(a, b);
+ Node* compare = m.Uint32LessThanOrEqual(add, m.Int32Constant(0));
+
+ RawMachineLabel blocka, blockb;
+ m.Branch(compare, &blocka, &blockb);
+ m.Bind(&blocka);
+ m.Return(m.Int32Constant(t_constant));
+ m.Bind(&blockb);
+ m.Return(m.Int32Constant(f_constant));
+
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ uint32_t a = *i;
+ uint32_t b = *j;
+ int32_t expect = ((a & b) <= 0) ? t_constant : f_constant;
+ CHECK_EQ(expect, m.Call(a, b));
+ }
+ }
+}
+
+TEST(BranchCombineUint32AndGreaterThanZero) {
+ int32_t t_constant = -1033;
+ int32_t f_constant = 825118;
+
+ RawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
+ MachineType::Uint32());
+ Node* a = m.Parameter(0);
+ Node* b = m.Parameter(1);
+ Node* add = m.Word32And(a, b);
+ Node* compare = m.Uint32GreaterThan(add, m.Int32Constant(0));
+
+ RawMachineLabel blocka, blockb;
+ m.Branch(compare, &blocka, &blockb);
+ m.Bind(&blocka);
+ m.Return(m.Int32Constant(t_constant));
+ m.Bind(&blockb);
+ m.Return(m.Int32Constant(f_constant));
+
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ uint32_t a = *i;
+ uint32_t b = *j;
+ int32_t expect = ((a & b) > 0) ? t_constant : f_constant;
+ CHECK_EQ(expect, m.Call(a, b));
+ }
+ }
+}
+
+TEST(BranchCombineUint32ZeroGreaterThanOrEqualAnd) {
+ int32_t t_constant = -1033;
+ int32_t f_constant = 825118;
+
+ RawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
+ MachineType::Uint32());
+ Node* a = m.Parameter(0);
+ Node* b = m.Parameter(1);
+ Node* add = m.Word32And(a, b);
+ Node* compare = m.Uint32GreaterThanOrEqual(m.Int32Constant(0), add);
+
+ RawMachineLabel blocka, blockb;
+ m.Branch(compare, &blocka, &blockb);
+ m.Bind(&blocka);
+ m.Return(m.Int32Constant(t_constant));
+ m.Bind(&blockb);
+ m.Return(m.Int32Constant(f_constant));
+
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ uint32_t a = *i;
+ uint32_t b = *j;
+ int32_t expect = (0 >= (a & b)) ? t_constant : f_constant;
+ CHECK_EQ(expect, m.Call(a, b));
+ }
+ }
+}
+
+TEST(BranchCombineUint32ZeroLessThanAnd) {
+ int32_t t_constant = -1033;
+ int32_t f_constant = 825118;
+
+ RawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
+ MachineType::Uint32());
+ Node* a = m.Parameter(0);
+ Node* b = m.Parameter(1);
+ Node* add = m.Word32And(a, b);
+ Node* compare = m.Uint32LessThan(m.Int32Constant(0), add);
+
+ RawMachineLabel blocka, blockb;
+ m.Branch(compare, &blocka, &blockb);
+ m.Bind(&blocka);
+ m.Return(m.Int32Constant(t_constant));
+ m.Bind(&blockb);
+ m.Return(m.Int32Constant(f_constant));
+
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ uint32_t a = *i;
+ uint32_t b = *j;
+ int32_t expect = (0 < (a & b)) ? t_constant : f_constant;
+ CHECK_EQ(expect, m.Call(a, b));
+ }
+ }
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-changes-lowering.cc b/deps/v8/test/cctest/compiler/test-changes-lowering.cc
deleted file mode 100644
index ddeabe479b..0000000000
--- a/deps/v8/test/cctest/compiler/test-changes-lowering.cc
+++ /dev/null
@@ -1,290 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <limits>
-
-#include "src/ast/scopes.h"
-#include "src/compiler/change-lowering.h"
-#include "src/compiler/control-builders.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/node-properties.h"
-#include "src/compiler/pipeline.h"
-#include "src/compiler/select-lowering.h"
-#include "src/compiler/simplified-lowering.h"
-#include "src/compiler/typer.h"
-#include "src/compiler/verifier.h"
-#include "src/execution.h"
-#include "src/globals.h"
-#include "src/parsing/parser.h"
-#include "src/parsing/rewriter.h"
-#include "test/cctest/cctest.h"
-#include "test/cctest/compiler/codegen-tester.h"
-#include "test/cctest/compiler/function-tester.h"
-#include "test/cctest/compiler/graph-builder-tester.h"
-#include "test/cctest/compiler/value-helper.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-template <typename ReturnType>
-class ChangesLoweringTester : public GraphBuilderTester<ReturnType> {
- public:
- explicit ChangesLoweringTester(MachineType p0 = MachineType::None())
- : GraphBuilderTester<ReturnType>(p0),
- javascript(this->zone()),
- jsgraph(this->isolate(), this->graph(), this->common(), &javascript,
- nullptr, this->machine()),
- function(Handle<JSFunction>::null()) {}
-
- JSOperatorBuilder javascript;
- JSGraph jsgraph;
- Handle<JSFunction> function;
-
- Node* start() { return this->graph()->start(); }
-
- template <typename T>
- T* CallWithPotentialGC() {
- // TODO(titzer): we wrap the code in a JSFunction here to reuse the
- // JSEntryStub; that could be done with a special prologue or other stub.
- if (function.is_null()) {
- function = FunctionTester::ForMachineGraph(this->graph());
- }
- Handle<Object>* args = NULL;
- MaybeHandle<Object> result =
- Execution::Call(this->isolate(), function, factory()->undefined_value(),
- 0, args, false);
- return T::cast(*result.ToHandleChecked());
- }
-
- void StoreFloat64(Node* node, double* ptr) {
- Node* ptr_node = this->PointerConstant(ptr);
- this->Store(MachineType::Float64(), ptr_node, node);
- }
-
- Node* LoadInt32(int32_t* ptr) {
- Node* ptr_node = this->PointerConstant(ptr);
- return this->Load(MachineType::Int32(), ptr_node);
- }
-
- Node* LoadUint32(uint32_t* ptr) {
- Node* ptr_node = this->PointerConstant(ptr);
- return this->Load(MachineType::Uint32(), ptr_node);
- }
-
- Node* LoadFloat64(double* ptr) {
- Node* ptr_node = this->PointerConstant(ptr);
- return this->Load(MachineType::Float64(), ptr_node);
- }
-
- void CheckNumber(double expected, Object* number) {
- CHECK(this->isolate()->factory()->NewNumber(expected)->SameValue(number));
- }
-
- void BuildAndLower(const Operator* op) {
- // We build a graph by hand here, because the raw machine assembler
- // does not add the correct control and effect nodes.
- Node* p0 = this->Parameter(0);
- Node* change = this->graph()->NewNode(op, p0);
- Node* ret = this->graph()->NewNode(this->common()->Return(), change,
- this->start(), this->start());
- Node* end = this->graph()->NewNode(this->common()->End(1), ret);
- this->graph()->SetEnd(end);
- LowerChange(change);
- }
-
- void BuildStoreAndLower(const Operator* op, const Operator* store_op,
- void* location) {
- // We build a graph by hand here, because the raw machine assembler
- // does not add the correct control and effect nodes.
- Node* p0 = this->Parameter(0);
- Node* change = this->graph()->NewNode(op, p0);
- Node* store = this->graph()->NewNode(
- store_op, this->PointerConstant(location), this->Int32Constant(0),
- change, this->start(), this->start());
- Node* ret = this->graph()->NewNode(
- this->common()->Return(), this->Int32Constant(0), store, this->start());
- Node* end = this->graph()->NewNode(this->common()->End(1), ret);
- this->graph()->SetEnd(end);
- LowerChange(change);
- }
-
- void BuildLoadAndLower(const Operator* op, const Operator* load_op,
- void* location) {
- // We build a graph by hand here, because the raw machine assembler
- // does not add the correct control and effect nodes.
- Node* load = this->graph()->NewNode(
- load_op, this->PointerConstant(location), this->Int32Constant(0),
- this->start(), this->start());
- Node* change = this->graph()->NewNode(op, load);
- Node* ret = this->graph()->NewNode(this->common()->Return(), change,
- this->start(), this->start());
- Node* end = this->graph()->NewNode(this->common()->End(1), ret);
- this->graph()->SetEnd(end);
- LowerChange(change);
- }
-
- void LowerChange(Node* change) {
- // Run the graph reducer with changes lowering on a single node.
- Typer typer(this->isolate(), this->graph());
- typer.Run();
- ChangeLowering change_lowering(&jsgraph);
- SelectLowering select_lowering(this->graph(), this->common());
- GraphReducer reducer(this->zone(), this->graph());
- reducer.AddReducer(&change_lowering);
- reducer.AddReducer(&select_lowering);
- reducer.ReduceNode(change);
- Verifier::Run(this->graph(), Verifier::UNTYPED);
- }
-
- Factory* factory() { return this->isolate()->factory(); }
- Heap* heap() { return this->isolate()->heap(); }
-};
-
-
-TEST(RunChangeTaggedToInt32) {
- // Build and lower a graph by hand.
- ChangesLoweringTester<int32_t> t(MachineType::AnyTagged());
- t.BuildAndLower(t.simplified()->ChangeTaggedToInt32());
-
- FOR_INT32_INPUTS(i) {
- int32_t input = *i;
-
- if (Smi::IsValid(input)) {
- int32_t result = t.Call(Smi::FromInt(input));
- CHECK_EQ(input, result);
- }
-
- {
- Handle<Object> number = t.factory()->NewNumber(input);
- int32_t result = t.Call(*number);
- CHECK_EQ(input, result);
- }
-
- {
- Handle<HeapNumber> number = t.factory()->NewHeapNumber(input);
- int32_t result = t.Call(*number);
- CHECK_EQ(input, result);
- }
- }
-}
-
-
-TEST(RunChangeTaggedToUint32) {
- // Build and lower a graph by hand.
- ChangesLoweringTester<uint32_t> t(MachineType::AnyTagged());
- t.BuildAndLower(t.simplified()->ChangeTaggedToUint32());
-
- FOR_UINT32_INPUTS(i) {
- uint32_t input = *i;
-
- if (Smi::IsValid(input)) {
- uint32_t result = t.Call(Smi::FromInt(input));
- CHECK_EQ(static_cast<int32_t>(input), static_cast<int32_t>(result));
- }
-
- {
- Handle<Object> number = t.factory()->NewNumber(input);
- uint32_t result = t.Call(*number);
- CHECK_EQ(static_cast<int32_t>(input), static_cast<int32_t>(result));
- }
-
- {
- Handle<HeapNumber> number = t.factory()->NewHeapNumber(input);
- uint32_t result = t.Call(*number);
- CHECK_EQ(static_cast<int32_t>(input), static_cast<int32_t>(result));
- }
- }
-}
-
-
-TEST(RunChangeTaggedToFloat64) {
- ChangesLoweringTester<int32_t> t(MachineType::AnyTagged());
- double result;
-
- t.BuildStoreAndLower(t.simplified()->ChangeTaggedToFloat64(),
- t.machine()->Store(StoreRepresentation(
- MachineRepresentation::kFloat64, kNoWriteBarrier)),
- &result);
-
- {
- FOR_INT32_INPUTS(i) {
- int32_t input = *i;
-
- if (Smi::IsValid(input)) {
- t.Call(Smi::FromInt(input));
- CHECK_EQ(input, static_cast<int32_t>(result));
- }
-
- {
- Handle<Object> number = t.factory()->NewNumber(input);
- t.Call(*number);
- CHECK_EQ(input, static_cast<int32_t>(result));
- }
-
- {
- Handle<HeapNumber> number = t.factory()->NewHeapNumber(input);
- t.Call(*number);
- CHECK_EQ(input, static_cast<int32_t>(result));
- }
- }
- }
-
- {
- FOR_FLOAT64_INPUTS(i) {
- double input = *i;
- {
- Handle<Object> number = t.factory()->NewNumber(input);
- t.Call(*number);
- CHECK_DOUBLE_EQ(input, result);
- }
-
- {
- Handle<HeapNumber> number = t.factory()->NewHeapNumber(input);
- t.Call(*number);
- CHECK_DOUBLE_EQ(input, result);
- }
- }
- }
-}
-
-
-TEST(RunChangeBoolToBit) {
- ChangesLoweringTester<int32_t> t(MachineType::AnyTagged());
- t.BuildAndLower(t.simplified()->ChangeBoolToBit());
-
- {
- Object* true_obj = t.heap()->true_value();
- int32_t result = t.Call(true_obj);
- CHECK_EQ(1, result);
- }
-
- {
- Object* false_obj = t.heap()->false_value();
- int32_t result = t.Call(false_obj);
- CHECK_EQ(0, result);
- }
-}
-
-
-TEST(RunChangeBitToBool) {
- ChangesLoweringTester<Object*> t(MachineType::Int32());
- t.BuildAndLower(t.simplified()->ChangeBitToBool());
-
- {
- Object* result = t.Call(1);
- Object* true_obj = t.heap()->true_value();
- CHECK_EQ(true_obj, result);
- }
-
- {
- Object* result = t.Call(0);
- Object* false_obj = t.heap()->false_value();
- CHECK_EQ(false_obj, result);
- }
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-code-stub-assembler.cc b/deps/v8/test/cctest/compiler/test-code-assembler.cc
index ff02cc9b44..d9bb9346f5 100644
--- a/deps/v8/test/cctest/compiler/test-code-stub-assembler.cc
+++ b/deps/v8/test/cctest/compiler/test-code-assembler.cc
@@ -2,52 +2,54 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/interface-descriptors.h"
+#include "src/compiler/code-assembler.h"
#include "src/isolate.h"
+#include "test/cctest/compiler/code-assembler-tester.h"
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
namespace internal {
namespace compiler {
+typedef CodeAssemblerTesterImpl<CodeAssembler> CodeAssemblerTester;
-class CodeStubAssemblerTester : public CodeStubAssembler {
- public:
- // Test generating code for a stub.
- CodeStubAssemblerTester(Isolate* isolate,
- const CallInterfaceDescriptor& descriptor)
- : CodeStubAssembler(isolate, isolate->runtime_zone(), descriptor,
- Code::ComputeFlags(Code::STUB), "test"),
- scope_(isolate) {}
+namespace {
+
+Node* SmiTag(CodeAssemblerTester& m, Node* value) {
+ int32_t constant_value;
+ if (m.ToInt32Constant(value, constant_value) &&
+ Smi::IsValid(constant_value)) {
+ return m.SmiConstant(Smi::FromInt(constant_value));
+ }
+ return m.WordShl(value, m.IntPtrConstant(kSmiShiftSize + kSmiTagSize));
+}
- // Test generating code for a JS function (e.g. builtins).
- CodeStubAssemblerTester(Isolate* isolate, int parameter_count)
- : CodeStubAssembler(isolate, isolate->runtime_zone(), parameter_count,
- Code::ComputeFlags(Code::FUNCTION), "test"),
- scope_(isolate) {}
+Node* UndefinedConstant(CodeAssemblerTester& m) {
+ return m.LoadRoot(Heap::kUndefinedValueRootIndex);
+}
- private:
- HandleScope scope_;
- LocalContext context_;
-};
+Node* LoadObjectField(CodeAssemblerTester& m, Node* object, int offset,
+ MachineType rep = MachineType::AnyTagged()) {
+ return m.Load(rep, object, m.IntPtrConstant(offset - kHeapObjectTag));
+}
+} // namespace
TEST(SimpleSmiReturn) {
Isolate* isolate(CcTest::InitIsolateOnce());
VoidDescriptor descriptor(isolate);
- CodeStubAssemblerTester m(isolate, descriptor);
- m.Return(m.SmiTag(m.Int32Constant(37)));
+ CodeAssemblerTester m(isolate, descriptor);
+ m.Return(SmiTag(m, m.Int32Constant(37)));
Handle<Code> code = m.GenerateCode();
FunctionTester ft(descriptor, code);
MaybeHandle<Object> result = ft.Call();
CHECK_EQ(37, Handle<Smi>::cast(result.ToHandleChecked())->value());
}
-
TEST(SimpleIntPtrReturn) {
Isolate* isolate(CcTest::InitIsolateOnce());
VoidDescriptor descriptor(isolate);
- CodeStubAssemblerTester m(isolate, descriptor);
+ CodeAssemblerTester m(isolate, descriptor);
int test;
m.Return(m.IntPtrConstant(reinterpret_cast<intptr_t>(&test)));
Handle<Code> code = m.GenerateCode();
@@ -57,11 +59,10 @@ TEST(SimpleIntPtrReturn) {
reinterpret_cast<intptr_t>(*result.ToHandleChecked()));
}
-
TEST(SimpleDoubleReturn) {
Isolate* isolate(CcTest::InitIsolateOnce());
VoidDescriptor descriptor(isolate);
- CodeStubAssemblerTester m(isolate, descriptor);
+ CodeAssemblerTester m(isolate, descriptor);
m.Return(m.NumberConstant(0.5));
Handle<Code> code = m.GenerateCode();
FunctionTester ft(descriptor, code);
@@ -69,13 +70,12 @@ TEST(SimpleDoubleReturn) {
CHECK_EQ(0.5, Handle<HeapNumber>::cast(result.ToHandleChecked())->value());
}
-
TEST(SimpleCallRuntime1Arg) {
Isolate* isolate(CcTest::InitIsolateOnce());
VoidDescriptor descriptor(isolate);
- CodeStubAssemblerTester m(isolate, descriptor);
+ CodeAssemblerTester m(isolate, descriptor);
Node* context = m.HeapConstant(Handle<Context>(isolate->native_context()));
- Node* b = m.SmiTag(m.Int32Constant(0));
+ Node* b = SmiTag(m, m.Int32Constant(0));
m.Return(m.CallRuntime(Runtime::kNumberToSmi, context, b));
Handle<Code> code = m.GenerateCode();
FunctionTester ft(descriptor, code);
@@ -83,13 +83,12 @@ TEST(SimpleCallRuntime1Arg) {
CHECK_EQ(0, Handle<Smi>::cast(result.ToHandleChecked())->value());
}
-
TEST(SimpleTailCallRuntime1Arg) {
Isolate* isolate(CcTest::InitIsolateOnce());
VoidDescriptor descriptor(isolate);
- CodeStubAssemblerTester m(isolate, descriptor);
+ CodeAssemblerTester m(isolate, descriptor);
Node* context = m.HeapConstant(Handle<Context>(isolate->native_context()));
- Node* b = m.SmiTag(m.Int32Constant(0));
+ Node* b = SmiTag(m, m.Int32Constant(0));
m.TailCallRuntime(Runtime::kNumberToSmi, context, b);
Handle<Code> code = m.GenerateCode();
FunctionTester ft(descriptor, code);
@@ -97,40 +96,123 @@ TEST(SimpleTailCallRuntime1Arg) {
CHECK_EQ(0, Handle<Smi>::cast(result.ToHandleChecked())->value());
}
-
TEST(SimpleCallRuntime2Arg) {
Isolate* isolate(CcTest::InitIsolateOnce());
VoidDescriptor descriptor(isolate);
- CodeStubAssemblerTester m(isolate, descriptor);
+ CodeAssemblerTester m(isolate, descriptor);
Node* context = m.HeapConstant(Handle<Context>(isolate->native_context()));
- Node* a = m.SmiTag(m.Int32Constant(2));
- Node* b = m.SmiTag(m.Int32Constant(4));
- m.Return(m.CallRuntime(Runtime::kMathPow, context, a, b));
+ Node* a = SmiTag(m, m.Int32Constant(2));
+ Node* b = SmiTag(m, m.Int32Constant(4));
+ m.Return(m.CallRuntime(Runtime::kAdd, context, a, b));
Handle<Code> code = m.GenerateCode();
FunctionTester ft(descriptor, code);
MaybeHandle<Object> result = ft.Call();
- CHECK_EQ(16, Handle<Smi>::cast(result.ToHandleChecked())->value());
+ CHECK_EQ(6, Handle<Smi>::cast(result.ToHandleChecked())->value());
}
-
TEST(SimpleTailCallRuntime2Arg) {
Isolate* isolate(CcTest::InitIsolateOnce());
VoidDescriptor descriptor(isolate);
- CodeStubAssemblerTester m(isolate, descriptor);
+ CodeAssemblerTester m(isolate, descriptor);
Node* context = m.HeapConstant(Handle<Context>(isolate->native_context()));
- Node* a = m.SmiTag(m.Int32Constant(2));
- Node* b = m.SmiTag(m.Int32Constant(4));
- m.TailCallRuntime(Runtime::kMathPow, context, a, b);
+ Node* a = SmiTag(m, m.Int32Constant(2));
+ Node* b = SmiTag(m, m.Int32Constant(4));
+ m.TailCallRuntime(Runtime::kAdd, context, a, b);
Handle<Code> code = m.GenerateCode();
FunctionTester ft(descriptor, code);
MaybeHandle<Object> result = ft.Call();
- CHECK_EQ(16, Handle<Smi>::cast(result.ToHandleChecked())->value());
+ CHECK_EQ(6, Handle<Smi>::cast(result.ToHandleChecked())->value());
+}
+
+namespace {
+
+Handle<JSFunction> CreateSumAllArgumentsFunction(FunctionTester& ft) {
+ const char* source =
+ "(function() {\n"
+ " var sum = 0 + this;\n"
+ " for (var i = 0; i < arguments.length; i++) {\n"
+ " sum += arguments[i];\n"
+ " }\n"
+ " return sum;\n"
+ "})";
+ return ft.NewFunction(source);
+}
+
+} // namespace
+
+TEST(SimpleCallJSFunction0Arg) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ const int kNumParams = 1;
+ CodeAssemblerTester m(isolate, kNumParams);
+ {
+ Node* function = m.Parameter(0);
+ Node* context = m.Parameter(kNumParams + 2);
+
+ Node* receiver = SmiTag(m, m.Int32Constant(42));
+
+ Callable callable = CodeFactory::Call(isolate);
+ Node* result = m.CallJS(callable, context, function, receiver);
+ m.Return(result);
+ }
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(code, kNumParams);
+
+ Handle<JSFunction> sum = CreateSumAllArgumentsFunction(ft);
+ MaybeHandle<Object> result = ft.Call(sum);
+ CHECK_EQ(Smi::FromInt(42), *result.ToHandleChecked());
+}
+
+TEST(SimpleCallJSFunction1Arg) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ const int kNumParams = 2;
+ CodeAssemblerTester m(isolate, kNumParams);
+ {
+ Node* function = m.Parameter(0);
+ Node* context = m.Parameter(1);
+
+ Node* receiver = SmiTag(m, m.Int32Constant(42));
+ Node* a = SmiTag(m, m.Int32Constant(13));
+
+ Callable callable = CodeFactory::Call(isolate);
+ Node* result = m.CallJS(callable, context, function, receiver, a);
+ m.Return(result);
+ }
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(code, kNumParams);
+
+ Handle<JSFunction> sum = CreateSumAllArgumentsFunction(ft);
+ MaybeHandle<Object> result = ft.Call(sum);
+ CHECK_EQ(Smi::FromInt(55), *result.ToHandleChecked());
+}
+
+TEST(SimpleCallJSFunction2Arg) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ const int kNumParams = 2;
+ CodeAssemblerTester m(isolate, kNumParams);
+ {
+ Node* function = m.Parameter(0);
+ Node* context = m.Parameter(1);
+
+ Node* receiver = SmiTag(m, m.Int32Constant(42));
+ Node* a = SmiTag(m, m.Int32Constant(13));
+ Node* b = SmiTag(m, m.Int32Constant(153));
+
+ Callable callable = CodeFactory::Call(isolate);
+ Node* result = m.CallJS(callable, context, function, receiver, a, b);
+ m.Return(result);
+ }
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(code, kNumParams);
+
+ Handle<JSFunction> sum = CreateSumAllArgumentsFunction(ft);
+ MaybeHandle<Object> result = ft.Call(sum);
+ CHECK_EQ(Smi::FromInt(208), *result.ToHandleChecked());
}
TEST(VariableMerge1) {
Isolate* isolate(CcTest::InitIsolateOnce());
VoidDescriptor descriptor(isolate);
- CodeStubAssemblerTester m(isolate, descriptor);
+ CodeAssemblerTester m(isolate, descriptor);
CodeStubAssembler::Variable var1(&m, MachineRepresentation::kTagged);
CodeStubAssembler::Label l1(&m), l2(&m), merge(&m);
Node* temp = m.Int32Constant(0);
@@ -149,7 +231,7 @@ TEST(VariableMerge1) {
TEST(VariableMerge2) {
Isolate* isolate(CcTest::InitIsolateOnce());
VoidDescriptor descriptor(isolate);
- CodeStubAssemblerTester m(isolate, descriptor);
+ CodeAssemblerTester m(isolate, descriptor);
CodeStubAssembler::Variable var1(&m, MachineRepresentation::kTagged);
CodeStubAssembler::Label l1(&m), l2(&m), merge(&m);
Node* temp = m.Int32Constant(0);
@@ -170,7 +252,7 @@ TEST(VariableMerge2) {
TEST(VariableMerge3) {
Isolate* isolate(CcTest::InitIsolateOnce());
VoidDescriptor descriptor(isolate);
- CodeStubAssemblerTester m(isolate, descriptor);
+ CodeAssemblerTester m(isolate, descriptor);
CodeStubAssembler::Variable var1(&m, MachineRepresentation::kTagged);
CodeStubAssembler::Variable var2(&m, MachineRepresentation::kTagged);
CodeStubAssembler::Label l1(&m), l2(&m), merge(&m);
@@ -195,7 +277,7 @@ TEST(VariableMerge3) {
TEST(VariableMergeBindFirst) {
Isolate* isolate(CcTest::InitIsolateOnce());
VoidDescriptor descriptor(isolate);
- CodeStubAssemblerTester m(isolate, descriptor);
+ CodeAssemblerTester m(isolate, descriptor);
CodeStubAssembler::Variable var1(&m, MachineRepresentation::kTagged);
CodeStubAssembler::Label l1(&m), l2(&m), merge(&m, &var1), end(&m);
Node* temp = m.Int32Constant(0);
@@ -221,7 +303,7 @@ TEST(VariableMergeBindFirst) {
TEST(VariableMergeSwitch) {
Isolate* isolate(CcTest::InitIsolateOnce());
VoidDescriptor descriptor(isolate);
- CodeStubAssemblerTester m(isolate, descriptor);
+ CodeAssemblerTester m(isolate, descriptor);
CodeStubAssembler::Variable var1(&m, MachineRepresentation::kTagged);
CodeStubAssembler::Label l1(&m), l2(&m), default_label(&m);
CodeStubAssembler::Label* labels[] = {&l1, &l2};
@@ -240,101 +322,10 @@ TEST(VariableMergeSwitch) {
m.Return(temp);
}
-TEST(FixedArrayAccessSmiIndex) {
- Isolate* isolate(CcTest::InitIsolateOnce());
- VoidDescriptor descriptor(isolate);
- CodeStubAssemblerTester m(isolate, descriptor);
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(5);
- array->set(4, Smi::FromInt(733));
- m.Return(m.LoadFixedArrayElementSmiIndex(m.HeapConstant(array),
- m.SmiTag(m.Int32Constant(4))));
- Handle<Code> code = m.GenerateCode();
- FunctionTester ft(descriptor, code);
- MaybeHandle<Object> result = ft.Call();
- CHECK_EQ(733, Handle<Smi>::cast(result.ToHandleChecked())->value());
-}
-
-TEST(LoadHeapNumberValue) {
- Isolate* isolate(CcTest::InitIsolateOnce());
- VoidDescriptor descriptor(isolate);
- CodeStubAssemblerTester m(isolate, descriptor);
- Handle<HeapNumber> number = isolate->factory()->NewHeapNumber(1234);
- m.Return(m.SmiTag(
- m.ChangeFloat64ToUint32(m.LoadHeapNumberValue(m.HeapConstant(number)))));
- Handle<Code> code = m.GenerateCode();
- FunctionTester ft(descriptor, code);
- MaybeHandle<Object> result = ft.Call();
- CHECK_EQ(1234, Handle<Smi>::cast(result.ToHandleChecked())->value());
-}
-
-TEST(LoadInstanceType) {
- Isolate* isolate(CcTest::InitIsolateOnce());
- VoidDescriptor descriptor(isolate);
- CodeStubAssemblerTester m(isolate, descriptor);
- Handle<HeapObject> undefined = isolate->factory()->undefined_value();
- m.Return(m.SmiTag(m.LoadInstanceType(m.HeapConstant(undefined))));
- Handle<Code> code = m.GenerateCode();
- FunctionTester ft(descriptor, code);
- MaybeHandle<Object> result = ft.Call();
- CHECK_EQ(InstanceType::ODDBALL_TYPE,
- Handle<Smi>::cast(result.ToHandleChecked())->value());
-}
-
-namespace {
-
-class TestBitField : public BitField<unsigned, 3, 3> {};
-
-} // namespace
-
-TEST(BitFieldDecode) {
- Isolate* isolate(CcTest::InitIsolateOnce());
- VoidDescriptor descriptor(isolate);
- CodeStubAssemblerTester m(isolate, descriptor);
- m.Return(m.SmiTag(m.BitFieldDecode<TestBitField>(m.Int32Constant(0x2f))));
- Handle<Code> code = m.GenerateCode();
- FunctionTester ft(descriptor, code);
- MaybeHandle<Object> result = ft.Call();
- // value = 00101111
- // mask = 00111000
- // result = 101
- CHECK_EQ(5, Handle<Smi>::cast(result.ToHandleChecked())->value());
-}
-
-namespace {
-
-Handle<JSFunction> CreateFunctionFromCode(int parameter_count_with_receiver,
- Handle<Code> code) {
- Isolate* isolate = code->GetIsolate();
- Handle<String> name = isolate->factory()->InternalizeUtf8String("test");
- Handle<JSFunction> function =
- isolate->factory()->NewFunctionWithoutPrototype(name, code);
- function->shared()->set_internal_formal_parameter_count(
- parameter_count_with_receiver - 1); // Implicit undefined receiver.
- return function;
-}
-
-} // namespace
-
-TEST(JSFunction) {
- const int kNumParams = 3; // Receiver, left, right.
- Isolate* isolate(CcTest::InitIsolateOnce());
- CodeStubAssemblerTester m(isolate, kNumParams);
- m.Return(m.SmiTag(m.Int32Add(m.SmiToWord32(m.Parameter(1)),
- m.SmiToWord32(m.Parameter(2)))));
- Handle<Code> code = m.GenerateCode();
- Handle<JSFunction> function = CreateFunctionFromCode(kNumParams, code);
- Handle<Object> args[] = {Handle<Smi>(Smi::FromInt(23), isolate),
- Handle<Smi>(Smi::FromInt(34), isolate)};
- MaybeHandle<Object> result =
- Execution::Call(isolate, function, isolate->factory()->undefined_value(),
- arraysize(args), args);
- CHECK_EQ(57, Handle<Smi>::cast(result.ToHandleChecked())->value());
-}
-
TEST(SplitEdgeBranchMerge) {
Isolate* isolate(CcTest::InitIsolateOnce());
VoidDescriptor descriptor(isolate);
- CodeStubAssemblerTester m(isolate, descriptor);
+ CodeAssemblerTester m(isolate, descriptor);
CodeStubAssembler::Label l1(&m), merge(&m);
m.Branch(m.Int32Constant(1), &l1, &merge);
m.Bind(&l1);
@@ -346,7 +337,7 @@ TEST(SplitEdgeBranchMerge) {
TEST(SplitEdgeSwitchMerge) {
Isolate* isolate(CcTest::InitIsolateOnce());
VoidDescriptor descriptor(isolate);
- CodeStubAssemblerTester m(isolate, descriptor);
+ CodeAssemblerTester m(isolate, descriptor);
CodeStubAssembler::Label l1(&m), l2(&m), l3(&m), default_label(&m);
CodeStubAssembler::Label* labels[] = {&l1, &l2};
int32_t values[] = {1, 2};
@@ -361,6 +352,87 @@ TEST(SplitEdgeSwitchMerge) {
USE(m.GenerateCode());
}
+TEST(TestToConstant) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ VoidDescriptor descriptor(isolate);
+ CodeAssemblerTester m(isolate, descriptor);
+ int32_t value32;
+ int64_t value64;
+ Node* a = m.Int32Constant(5);
+ CHECK(m.ToInt32Constant(a, value32));
+ CHECK(m.ToInt64Constant(a, value64));
+
+ a = m.Int64Constant(static_cast<int64_t>(1) << 32);
+ CHECK(!m.ToInt32Constant(a, value32));
+ CHECK(m.ToInt64Constant(a, value64));
+
+ a = m.Int64Constant(13);
+ CHECK(m.ToInt32Constant(a, value32));
+ CHECK(m.ToInt64Constant(a, value64));
+
+ a = UndefinedConstant(m);
+ CHECK(!m.ToInt32Constant(a, value32));
+ CHECK(!m.ToInt64Constant(a, value64));
+
+ a = UndefinedConstant(m);
+ CHECK(!m.ToInt32Constant(a, value32));
+ CHECK(!m.ToInt64Constant(a, value64));
+}
+
+TEST(DeferredCodePhiHints) {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ VoidDescriptor descriptor(isolate);
+ CodeAssemblerTester m(isolate, descriptor);
+ Label block1(&m, Label::kDeferred);
+ m.Goto(&block1);
+ m.Bind(&block1);
+ {
+ Variable var_object(&m, MachineRepresentation::kTagged);
+ Label loop(&m, &var_object);
+ var_object.Bind(m.IntPtrConstant(0));
+ m.Goto(&loop);
+ m.Bind(&loop);
+ {
+ Node* map = LoadObjectField(m, var_object.value(), JSObject::kMapOffset);
+ var_object.Bind(map);
+ m.Goto(&loop);
+ }
+ }
+ CHECK(!m.GenerateCode().is_null());
+}
+
+TEST(TestOutOfScopeVariable) {
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ VoidDescriptor descriptor(isolate);
+ CodeAssemblerTester m(isolate, descriptor);
+ Label block1(&m);
+ Label block2(&m);
+ Label block3(&m);
+ Label block4(&m);
+ m.Branch(m.WordEqual(m.Parameter(0), m.IntPtrConstant(0)), &block1, &block4);
+ m.Bind(&block4);
+ {
+ Variable var_object(&m, MachineRepresentation::kTagged);
+ m.Branch(m.WordEqual(m.Parameter(0), m.IntPtrConstant(0)), &block2,
+ &block3);
+
+ m.Bind(&block2);
+ var_object.Bind(m.IntPtrConstant(55));
+ m.Goto(&block1);
+
+ m.Bind(&block3);
+ var_object.Bind(m.IntPtrConstant(66));
+ m.Goto(&block1);
+ }
+ m.Bind(&block1);
+ CHECK(!m.GenerateCode().is_null());
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-gap-resolver.cc b/deps/v8/test/cctest/compiler/test-gap-resolver.cc
index 7f85088809..b8b1251724 100644
--- a/deps/v8/test/cctest/compiler/test-gap-resolver.cc
+++ b/deps/v8/test/cctest/compiler/test-gap-resolver.cc
@@ -33,7 +33,7 @@ class InterpreterState {
private:
struct Key {
bool is_constant;
- bool is_float;
+ MachineRepresentation rep;
LocationOperand::LocationKind kind;
int index;
@@ -41,8 +41,8 @@ class InterpreterState {
if (this->is_constant != other.is_constant) {
return this->is_constant;
}
- if (this->is_float != other.is_float) {
- return this->is_float;
+ if (this->rep != other.rep) {
+ return static_cast<int>(this->rep) < static_cast<int>(other.rep);
}
if (this->kind != other.kind) {
return this->kind < other.kind;
@@ -51,7 +51,7 @@ class InterpreterState {
}
bool operator==(const Key& other) const {
- return this->is_constant == other.is_constant &&
+ return this->is_constant == other.is_constant && this->rep == other.rep &&
this->kind == other.kind && this->index == other.index;
}
};
@@ -75,24 +75,26 @@ class InterpreterState {
static Key KeyFor(const InstructionOperand& op) {
bool is_constant = op.IsConstant();
- bool is_float = false;
+ MachineRepresentation rep =
+ v8::internal::compiler::InstructionSequence::DefaultRepresentation();
LocationOperand::LocationKind kind;
int index;
if (!is_constant) {
- if (op.IsRegister()) {
- index = LocationOperand::cast(op).GetRegister().code();
- } else if (op.IsDoubleRegister()) {
- index = LocationOperand::cast(op).GetDoubleRegister().code();
+ const LocationOperand& loc_op = LocationOperand::cast(op);
+ if (loc_op.IsAnyRegister()) {
+ if (loc_op.IsFPRegister()) {
+ rep = MachineRepresentation::kFloat64;
+ }
+ index = loc_op.register_code();
} else {
- index = LocationOperand::cast(op).index();
+ index = loc_op.index();
}
- is_float = IsFloatingPoint(LocationOperand::cast(op).representation());
- kind = LocationOperand::cast(op).location_kind();
+ kind = loc_op.location_kind();
} else {
index = ConstantOperand::cast(op).virtual_register();
kind = LocationOperand::REGISTER;
}
- Key key = {is_constant, is_float, kind, index};
+ Key key = {is_constant, rep, kind, index};
return key;
}
@@ -102,10 +104,7 @@ class InterpreterState {
if (key.is_constant) {
return ConstantOperand(key.index);
}
- return AllocatedOperand(
- key.kind,
- v8::internal::compiler::InstructionSequence::DefaultRepresentation(),
- key.index);
+ return AllocatedOperand(key.kind, key.rep, key.index);
}
friend std::ostream& operator<<(std::ostream& os,
@@ -113,12 +112,10 @@ class InterpreterState {
for (OperandMap::const_iterator it = is.values_.begin();
it != is.values_.end(); ++it) {
if (it != is.values_.begin()) os << " ";
- InstructionOperand source = FromKey(it->first);
- InstructionOperand destination = FromKey(it->second);
+ InstructionOperand source = FromKey(it->second);
+ InstructionOperand destination = FromKey(it->first);
MoveOperands mo(source, destination);
- PrintableMoveOperands pmo = {
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
- &mo};
+ PrintableMoveOperands pmo = {RegisterConfiguration::Turbofan(), &mo};
os << pmo;
}
return os;
@@ -168,7 +165,9 @@ class ParallelMoveCreator : public HandleAndZoneScope {
ParallelMove* parallel_move = new (main_zone()) ParallelMove(main_zone());
std::set<InstructionOperand, CompareOperandModuloType> seen;
for (int i = 0; i < size; ++i) {
- MoveOperands mo(CreateRandomOperand(true), CreateRandomOperand(false));
+ MachineRepresentation rep = RandomRepresentation();
+ MoveOperands mo(CreateRandomOperand(true, rep),
+ CreateRandomOperand(false, rep));
if (!mo.IsRedundant() && seen.find(mo.destination()) == seen.end()) {
parallel_move->AddMove(mo.source(), mo.destination());
seen.insert(mo.destination());
@@ -179,52 +178,59 @@ class ParallelMoveCreator : public HandleAndZoneScope {
private:
MachineRepresentation RandomRepresentation() {
- int index = rng_->NextInt(3);
+ int index = rng_->NextInt(5);
switch (index) {
case 0:
return MachineRepresentation::kWord32;
case 1:
return MachineRepresentation::kWord64;
case 2:
+ return MachineRepresentation::kFloat32;
+ case 3:
+ return MachineRepresentation::kFloat64;
+ case 4:
return MachineRepresentation::kTagged;
}
UNREACHABLE();
return MachineRepresentation::kNone;
}
- MachineRepresentation RandomDoubleRepresentation() {
- int index = rng_->NextInt(2);
- if (index == 0) return MachineRepresentation::kFloat64;
- return MachineRepresentation::kFloat32;
- }
-
- InstructionOperand CreateRandomOperand(bool is_source) {
+ InstructionOperand CreateRandomOperand(bool is_source,
+ MachineRepresentation rep) {
+ auto conf = RegisterConfiguration::Turbofan();
+ auto GetRegisterCode = [&conf](MachineRepresentation rep, int index) {
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+#if V8_TARGET_ARCH_ARM
+ // Only even number float registers are used on Arm.
+ // TODO(bbudge) Eliminate this when FP register aliasing works.
+ return conf->RegisterConfiguration::GetAllocatableDoubleCode(index) *
+ 2;
+#endif
+ // Fall through on non-Arm targets.
+ case MachineRepresentation::kFloat64:
+ return conf->RegisterConfiguration::GetAllocatableDoubleCode(index);
+
+ default:
+ return conf->RegisterConfiguration::GetAllocatableGeneralCode(index);
+ }
+ UNREACHABLE();
+ return static_cast<int>(Register::kCode_no_reg);
+ };
int index = rng_->NextInt(7);
// destination can't be Constant.
- switch (rng_->NextInt(is_source ? 7 : 6)) {
+ switch (rng_->NextInt(is_source ? 5 : 4)) {
case 0:
- return AllocatedOperand(LocationOperand::STACK_SLOT,
- RandomRepresentation(), index);
+ return AllocatedOperand(LocationOperand::STACK_SLOT, rep, index);
case 1:
- return AllocatedOperand(LocationOperand::STACK_SLOT,
- RandomDoubleRepresentation(), index);
+ return AllocatedOperand(LocationOperand::REGISTER, rep, index);
case 2:
- return AllocatedOperand(LocationOperand::REGISTER,
- RandomRepresentation(), index);
+ return ExplicitOperand(LocationOperand::REGISTER, rep,
+ GetRegisterCode(rep, 1));
case 3:
- return AllocatedOperand(LocationOperand::REGISTER,
- RandomDoubleRepresentation(), index);
+ return ExplicitOperand(LocationOperand::STACK_SLOT, rep,
+ GetRegisterCode(rep, index));
case 4:
- return ExplicitOperand(
- LocationOperand::REGISTER, RandomRepresentation(),
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->GetAllocatableGeneralCode(1));
- case 5:
- return ExplicitOperand(
- LocationOperand::STACK_SLOT, RandomRepresentation(),
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->GetAllocatableGeneralCode(index));
- case 6:
return ConstantOperand(index);
}
UNREACHABLE();
@@ -250,7 +256,7 @@ TEST(FuzzResolver) {
GapResolver resolver(&mi2);
resolver.Resolve(pm);
- CHECK(mi1.state() == mi2.state());
+ CHECK_EQ(mi1.state(), mi2.state());
}
}
}
diff --git a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
index c7cd47a55c..e9bf064750 100644
--- a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
+++ b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
@@ -186,81 +186,6 @@ TEST(ReduceJSStoreContext) {
}
-// TODO(titzer): factor out common code with effects checking in typed lowering.
-static void CheckEffectInput(Node* effect, Node* use) {
- CHECK_EQ(effect, NodeProperties::GetEffectInput(use));
-}
-
-
-TEST(SpecializeToContext) {
- ContextSpecializationTester t;
-
- Node* start = t.graph()->NewNode(t.common()->Start(0));
- t.graph()->SetStart(start);
-
- // Make a context and initialize it a bit for this test.
- Handle<Context> native = t.factory()->NewNativeContext();
- Handle<Object> expected = t.factory()->InternalizeUtf8String("gboy!");
- const int slot = Context::NATIVE_CONTEXT_INDEX;
- native->set(slot, *expected);
-
- Node* const_context = t.jsgraph()->Constant(native);
- Node* param_context = t.graph()->NewNode(t.common()->Parameter(0), start);
-
- {
- // Check that specialization replaces values and forwards effects
- // correctly, and folds values from constant and non-constant contexts
- Node* effect_in = start;
- Node* load = t.graph()->NewNode(t.javascript()->LoadContext(0, slot, true),
- const_context, const_context, effect_in);
-
-
- Node* value_use =
- t.graph()->NewNode(t.simplified()->ChangeTaggedToInt32(), load);
- Node* other_load =
- t.graph()->NewNode(t.javascript()->LoadContext(0, slot, true),
- param_context, param_context, load);
- Node* effect_use = other_load;
- Node* other_use =
- t.graph()->NewNode(t.simplified()->ChangeTaggedToInt32(), other_load);
-
- Node* add = t.graph()->NewNode(
- t.javascript()->Add(BinaryOperationHints::Any()), value_use, other_use,
- param_context, t.jsgraph()->EmptyFrameState(),
- t.jsgraph()->EmptyFrameState(), other_load, start);
-
- Node* ret =
- t.graph()->NewNode(t.common()->Return(), add, effect_use, start);
- Node* end = t.graph()->NewNode(t.common()->End(1), ret);
- USE(end);
- t.graph()->SetEnd(end);
-
- // Double check the above graph is what we expect, or the test is broken.
- CheckEffectInput(effect_in, load);
- CheckEffectInput(load, effect_use);
-
- // Perform the reduction on the entire graph.
- GraphReducer graph_reducer(t.main_zone(), t.graph());
- JSContextSpecialization spec(&graph_reducer, t.jsgraph(),
- MaybeHandle<Context>());
- graph_reducer.AddReducer(&spec);
- graph_reducer.ReduceGraph();
-
- // Effects should have been forwarded (not replaced with a value).
- CheckEffectInput(effect_in, effect_use);
-
- // Use of {other_load} should not have been replaced.
- CHECK_EQ(other_load, other_use->InputAt(0));
-
- Node* replacement = value_use->InputAt(0);
- HeapObjectMatcher match(replacement);
- CHECK(match.HasValue());
- CHECK_EQ(*expected, *match.Value());
- }
- // TODO(titzer): clean up above test and test more complicated effects.
-}
-
-
TEST(SpecializeJSFunction_ToConstant1) {
FunctionTester T(
"(function() { var x = 1; function inc(a)"
@@ -300,10 +225,14 @@ TEST(SpecializeJSFunction_ToConstant_uninit) {
FunctionTester T(
"(function() { if (false) { var x = 1; } function inc(a)"
" { return x; } return inc; })()"); // x is undefined!
-
- CHECK(T.Call(T.Val(0.0), T.Val(0.0)).ToHandleChecked()->IsUndefined());
- CHECK(T.Call(T.Val(2.0), T.Val(0.0)).ToHandleChecked()->IsUndefined());
- CHECK(T.Call(T.Val(-2.1), T.Val(0.0)).ToHandleChecked()->IsUndefined());
+ i::Isolate* isolate = CcTest::i_isolate();
+ CHECK(
+ T.Call(T.Val(0.0), T.Val(0.0)).ToHandleChecked()->IsUndefined(isolate));
+ CHECK(
+ T.Call(T.Val(2.0), T.Val(0.0)).ToHandleChecked()->IsUndefined(isolate));
+ CHECK(T.Call(T.Val(-2.1), T.Val(0.0))
+ .ToHandleChecked()
+ ->IsUndefined(isolate));
}
{
diff --git a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
index 0075de5329..88cd6c663c 100644
--- a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
@@ -19,7 +19,9 @@ namespace compiler {
class JSTypedLoweringTester : public HandleAndZoneScope {
public:
- explicit JSTypedLoweringTester(int num_parameters = 0)
+ JSTypedLoweringTester(
+ int num_parameters = 0,
+ JSTypedLowering::Flags flags = JSTypedLowering::kDeoptimizationEnabled)
: isolate(main_isolate()),
binop(NULL),
unop(NULL),
@@ -30,7 +32,8 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
deps(main_isolate(), main_zone()),
graph(main_zone()),
typer(main_isolate(), &graph),
- context_node(NULL) {
+ context_node(NULL),
+ flags(flags) {
graph.SetStart(graph.NewNode(common.Start(num_parameters)));
graph.SetEnd(graph.NewNode(common.End(1), graph.start()));
typer.Run();
@@ -47,7 +50,9 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
Graph graph;
Typer typer;
Node* context_node;
- BinaryOperationHints const hints = BinaryOperationHints::Any();
+ JSTypedLowering::Flags flags;
+ BinaryOperationHint const binop_hints = BinaryOperationHint::kAny;
+ CompareOperationHint const compare_hints = CompareOperationHint::kAny;
Node* Parameter(Type* t, int32_t index = 0) {
Node* n = graph.NewNode(common.Parameter(index), graph.start());
@@ -82,8 +87,7 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
&machine);
// TODO(titzer): mock the GraphReducer here for better unit testing.
GraphReducer graph_reducer(main_zone(), &graph);
- JSTypedLowering reducer(&graph_reducer, &deps,
- JSTypedLowering::kDeoptimizationEnabled, &jsgraph,
+ JSTypedLowering reducer(&graph_reducer, &deps, flags, &jsgraph,
main_zone());
Reduction reduction = reducer.Reduce(node);
if (reduction.Changed()) return reduction.replacement();
@@ -243,7 +247,7 @@ TEST(AddNumber1) {
for (size_t i = 0; i < arraysize(kNumberTypes); ++i) {
Node* p0 = R.Parameter(kNumberTypes[i], 0);
Node* p1 = R.Parameter(kNumberTypes[i], 1);
- Node* add = R.Binop(R.javascript.Add(BinaryOperationHints::Any()), p0, p1);
+ Node* add = R.Binop(R.javascript.Add(BinaryOperationHint::kAny), p0, p1);
Node* r = R.reduce(add);
R.CheckBinop(IrOpcode::kNumberAdd, r);
@@ -255,11 +259,11 @@ TEST(AddNumber1) {
TEST(NumberBinops) {
JSTypedLoweringTester R;
const Operator* ops[] = {
- R.javascript.Add(R.hints), R.simplified.NumberAdd(),
- R.javascript.Subtract(R.hints), R.simplified.NumberSubtract(),
- R.javascript.Multiply(R.hints), R.simplified.NumberMultiply(),
- R.javascript.Divide(R.hints), R.simplified.NumberDivide(),
- R.javascript.Modulus(R.hints), R.simplified.NumberModulus(),
+ R.javascript.Add(R.binop_hints), R.simplified.NumberAdd(),
+ R.javascript.Subtract(R.binop_hints), R.simplified.NumberSubtract(),
+ R.javascript.Multiply(R.binop_hints), R.simplified.NumberMultiply(),
+ R.javascript.Divide(R.binop_hints), R.simplified.NumberDivide(),
+ R.javascript.Modulus(R.binop_hints), R.simplified.NumberModulus(),
};
for (size_t i = 0; i < arraysize(kNumberTypes); ++i) {
@@ -301,11 +305,11 @@ class JSBitwiseShiftTypedLoweringTester : public JSTypedLoweringTester {
public:
JSBitwiseShiftTypedLoweringTester() : JSTypedLoweringTester() {
int i = 0;
- set(i++, javascript.ShiftLeft(hints), true);
+ set(i++, javascript.ShiftLeft(binop_hints), true);
set(i++, simplified.NumberShiftLeft(), false);
- set(i++, javascript.ShiftRight(hints), true);
+ set(i++, javascript.ShiftRight(binop_hints), true);
set(i++, simplified.NumberShiftRight(), false);
- set(i++, javascript.ShiftRightLogical(hints), false);
+ set(i++, javascript.ShiftRightLogical(binop_hints), false);
set(i++, simplified.NumberShiftRightLogical(), false);
}
static const int kNumberOps = 6;
@@ -357,11 +361,11 @@ class JSBitwiseTypedLoweringTester : public JSTypedLoweringTester {
public:
JSBitwiseTypedLoweringTester() : JSTypedLoweringTester() {
int i = 0;
- set(i++, javascript.BitwiseOr(hints), true);
+ set(i++, javascript.BitwiseOr(binop_hints), true);
set(i++, simplified.NumberBitwiseOr(), true);
- set(i++, javascript.BitwiseXor(hints), true);
+ set(i++, javascript.BitwiseXor(binop_hints), true);
set(i++, simplified.NumberBitwiseXor(), true);
- set(i++, javascript.BitwiseAnd(hints), true);
+ set(i++, javascript.BitwiseAnd(binop_hints), true);
set(i++, simplified.NumberBitwiseAnd(), true);
}
static const int kNumberOps = 6;
@@ -438,7 +442,7 @@ TEST(JSToNumber_replacement) {
R.graph.NewNode(R.javascript.ToNumber(), n, R.context(),
R.EmptyFrameState(R.context()), R.start(), R.start());
Node* effect_use = R.UseForEffect(c);
- Node* add = R.graph.NewNode(R.simplified.ReferenceEqual(Type::Any()), n, c);
+ Node* add = R.graph.NewNode(R.simplified.ReferenceEqual(), n, c);
R.CheckEffectInput(c, effect_use);
Node* r = R.reduce(c);
@@ -492,7 +496,7 @@ TEST(JSToNumberOfNumberOrOtherPrimitive) {
for (size_t i = 0; i < arraysize(others); i++) {
Type* t = Type::Union(Type::Number(), others[i], R.main_zone());
Node* r = R.ReduceUnop(R.javascript.ToNumber(), t);
- CHECK_EQ(IrOpcode::kJSToNumber, r->opcode());
+ CHECK_EQ(IrOpcode::kPlainPrimitiveToNumber, r->opcode());
}
}
@@ -550,7 +554,7 @@ TEST(JSToString_replacement) {
R.graph.NewNode(R.javascript.ToString(), n, R.context(),
R.EmptyFrameState(R.context()), R.start(), R.start());
Node* effect_use = R.UseForEffect(c);
- Node* add = R.graph.NewNode(R.simplified.ReferenceEqual(Type::Any()), n, c);
+ Node* add = R.graph.NewNode(R.simplified.ReferenceEqual(), n, c);
R.CheckEffectInput(c, effect_use);
Node* r = R.reduce(c);
@@ -571,10 +575,14 @@ TEST(StringComparison) {
JSTypedLoweringTester R;
const Operator* ops[] = {
- R.javascript.LessThan(), R.simplified.StringLessThan(),
- R.javascript.LessThanOrEqual(), R.simplified.StringLessThanOrEqual(),
- R.javascript.GreaterThan(), R.simplified.StringLessThan(),
- R.javascript.GreaterThanOrEqual(), R.simplified.StringLessThanOrEqual()};
+ R.javascript.LessThan(CompareOperationHint::kAny),
+ R.simplified.StringLessThan(),
+ R.javascript.LessThanOrEqual(CompareOperationHint::kAny),
+ R.simplified.StringLessThanOrEqual(),
+ R.javascript.GreaterThan(CompareOperationHint::kAny),
+ R.simplified.StringLessThan(),
+ R.javascript.GreaterThanOrEqual(CompareOperationHint::kAny),
+ R.simplified.StringLessThanOrEqual()};
for (size_t i = 0; i < arraysize(kStringTypes); i++) {
Node* p0 = R.Parameter(kStringTypes[i], 0);
@@ -604,9 +612,6 @@ TEST(StringComparison) {
static void CheckIsConvertedToNumber(Node* val, Node* converted) {
if (NodeProperties::GetType(val)->Is(Type::Number())) {
CHECK_EQ(val, converted);
- } else if (NodeProperties::GetType(val)->Is(Type::Boolean())) {
- CHECK_EQ(IrOpcode::kBooleanToNumber, converted->opcode());
- CHECK_EQ(val, converted->InputAt(0));
} else {
if (converted->opcode() == IrOpcode::kNumberConstant) return;
CHECK_EQ(IrOpcode::kJSToNumber, converted->opcode());
@@ -618,10 +623,14 @@ TEST(NumberComparison) {
JSTypedLoweringTester R;
const Operator* ops[] = {
- R.javascript.LessThan(), R.simplified.NumberLessThan(),
- R.javascript.LessThanOrEqual(), R.simplified.NumberLessThanOrEqual(),
- R.javascript.GreaterThan(), R.simplified.NumberLessThan(),
- R.javascript.GreaterThanOrEqual(), R.simplified.NumberLessThanOrEqual()};
+ R.javascript.LessThan(CompareOperationHint::kAny),
+ R.simplified.NumberLessThan(),
+ R.javascript.LessThanOrEqual(CompareOperationHint::kAny),
+ R.simplified.NumberLessThanOrEqual(),
+ R.javascript.GreaterThan(CompareOperationHint::kAny),
+ R.simplified.NumberLessThan(),
+ R.javascript.GreaterThanOrEqual(CompareOperationHint::kAny),
+ R.simplified.NumberLessThanOrEqual()};
Node* const p0 = R.Parameter(Type::Number(), 0);
Node* const p1 = R.Parameter(Type::Number(), 1);
@@ -655,7 +664,8 @@ TEST(MixedComparison1) {
for (size_t j = 0; j < arraysize(types); j++) {
Node* p1 = R.Parameter(types[j], 1);
{
- const Operator* less_than = R.javascript.LessThan();
+ const Operator* less_than =
+ R.javascript.LessThan(CompareOperationHint::kAny);
Node* cmp = R.Binop(less_than, p0, p1);
Node* r = R.reduce(cmp);
if (types[i]->Is(Type::String()) && types[j]->Is(Type::String())) {
@@ -700,14 +710,12 @@ TEST(RemoveToNumberEffects) {
case 2:
effect_use = R.graph.NewNode(R.common.EffectPhi(1), ton, R.start());
case 3:
- effect_use =
- R.graph.NewNode(R.javascript.Add(R.hints), ton, ton, R.context(),
- frame_state, frame_state, ton, R.start());
+ effect_use = R.graph.NewNode(R.javascript.Add(R.binop_hints), ton, ton,
+ R.context(), frame_state, ton, R.start());
break;
case 4:
- effect_use =
- R.graph.NewNode(R.javascript.Add(R.hints), p0, p0, R.context(),
- frame_state, frame_state, ton, R.start());
+ effect_use = R.graph.NewNode(R.javascript.Add(R.binop_hints), p0, p0,
+ R.context(), frame_state, ton, R.start());
break;
case 5:
effect_use = R.graph.NewNode(R.common.Return(), p0, ton, R.start());
@@ -739,8 +747,10 @@ TEST(RemoveToNumberEffects) {
// Helper class for testing the reduction of a single binop.
class BinopEffectsTester {
public:
- explicit BinopEffectsTester(const Operator* op, Type* t0, Type* t1)
- : R(),
+ BinopEffectsTester(
+ const Operator* op, Type* t0, Type* t1,
+ JSTypedLowering::Flags flags = JSTypedLowering::kDeoptimizationEnabled)
+ : R(0, flags),
p0(R.Parameter(t0, 0)),
p1(R.Parameter(t1, 1)),
binop(R.Binop(op, p0, p1)),
@@ -801,7 +811,8 @@ void CheckEqualityReduction(JSTypedLoweringTester* R, bool strict, Node* l,
{
const Operator* op =
- strict ? R->javascript.StrictEqual() : R->javascript.Equal();
+ strict ? R->javascript.StrictEqual(CompareOperationHint::kAny)
+ : R->javascript.Equal(CompareOperationHint::kAny);
Node* eq = R->Binop(op, p0, p1);
Node* r = R->reduce(eq);
R->CheckBinop(expected, r);
@@ -809,7 +820,8 @@ void CheckEqualityReduction(JSTypedLoweringTester* R, bool strict, Node* l,
{
const Operator* op =
- strict ? R->javascript.StrictNotEqual() : R->javascript.NotEqual();
+ strict ? R->javascript.StrictNotEqual(CompareOperationHint::kAny)
+ : R->javascript.NotEqual(CompareOperationHint::kAny);
Node* ne = R->Binop(op, p0, p1);
Node* n = R->reduce(ne);
CHECK_EQ(IrOpcode::kBooleanNot, n->opcode());
@@ -876,14 +888,22 @@ TEST(RemovePureNumberBinopEffects) {
JSTypedLoweringTester R;
const Operator* ops[] = {
- R.javascript.Equal(), R.simplified.NumberEqual(),
- R.javascript.Add(R.hints), R.simplified.NumberAdd(),
- R.javascript.Subtract(R.hints), R.simplified.NumberSubtract(),
- R.javascript.Multiply(R.hints), R.simplified.NumberMultiply(),
- R.javascript.Divide(R.hints), R.simplified.NumberDivide(),
- R.javascript.Modulus(R.hints), R.simplified.NumberModulus(),
- R.javascript.LessThan(), R.simplified.NumberLessThan(),
- R.javascript.LessThanOrEqual(), R.simplified.NumberLessThanOrEqual(),
+ R.javascript.Equal(R.compare_hints),
+ R.simplified.NumberEqual(),
+ R.javascript.Add(R.binop_hints),
+ R.simplified.NumberAdd(),
+ R.javascript.Subtract(R.binop_hints),
+ R.simplified.NumberSubtract(),
+ R.javascript.Multiply(R.binop_hints),
+ R.simplified.NumberMultiply(),
+ R.javascript.Divide(R.binop_hints),
+ R.simplified.NumberDivide(),
+ R.javascript.Modulus(R.binop_hints),
+ R.simplified.NumberModulus(),
+ R.javascript.LessThan(R.compare_hints),
+ R.simplified.NumberLessThan(),
+ R.javascript.LessThanOrEqual(R.compare_hints),
+ R.simplified.NumberLessThanOrEqual(),
};
for (size_t j = 0; j < arraysize(ops); j += 2) {
@@ -904,13 +924,13 @@ TEST(OrderNumberBinopEffects1) {
JSTypedLoweringTester R;
const Operator* ops[] = {
- R.javascript.Subtract(R.hints), R.simplified.NumberSubtract(),
- R.javascript.Multiply(R.hints), R.simplified.NumberMultiply(),
- R.javascript.Divide(R.hints), R.simplified.NumberDivide(),
+ R.javascript.Subtract(R.binop_hints), R.simplified.NumberSubtract(),
+ R.javascript.Multiply(R.binop_hints), R.simplified.NumberMultiply(),
};
for (size_t j = 0; j < arraysize(ops); j += 2) {
- BinopEffectsTester B(ops[j], Type::Symbol(), Type::Symbol());
+ BinopEffectsTester B(ops[j], Type::Symbol(), Type::Symbol(),
+ JSTypedLowering::kNoFlags);
CHECK_EQ(ops[j + 1]->opcode(), B.result->op()->opcode());
Node* i0 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 0, true);
@@ -929,14 +949,14 @@ TEST(OrderNumberBinopEffects2) {
JSTypedLoweringTester R;
const Operator* ops[] = {
- R.javascript.Add(R.hints), R.simplified.NumberAdd(),
- R.javascript.Subtract(R.hints), R.simplified.NumberSubtract(),
- R.javascript.Multiply(R.hints), R.simplified.NumberMultiply(),
- R.javascript.Divide(R.hints), R.simplified.NumberDivide(),
+ R.javascript.Add(R.binop_hints), R.simplified.NumberAdd(),
+ R.javascript.Subtract(R.binop_hints), R.simplified.NumberSubtract(),
+ R.javascript.Multiply(R.binop_hints), R.simplified.NumberMultiply(),
};
for (size_t j = 0; j < arraysize(ops); j += 2) {
- BinopEffectsTester B(ops[j], Type::Number(), Type::Symbol());
+ BinopEffectsTester B(ops[j], Type::Number(), Type::Symbol(),
+ JSTypedLowering::kNoFlags);
Node* i0 = B.CheckNoOp(0);
Node* i1 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 1, true);
@@ -949,7 +969,8 @@ TEST(OrderNumberBinopEffects2) {
}
for (size_t j = 0; j < arraysize(ops); j += 2) {
- BinopEffectsTester B(ops[j], Type::Symbol(), Type::Number());
+ BinopEffectsTester B(ops[j], Type::Symbol(), Type::Number(),
+ JSTypedLowering::kNoFlags);
Node* i0 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 0, true);
Node* i1 = B.CheckNoOp(1);
@@ -967,15 +988,18 @@ TEST(OrderCompareEffects) {
JSTypedLoweringTester R;
const Operator* ops[] = {
- R.javascript.GreaterThan(), R.simplified.NumberLessThan(),
- R.javascript.GreaterThanOrEqual(), R.simplified.NumberLessThanOrEqual(),
+ R.javascript.GreaterThan(R.compare_hints), R.simplified.NumberLessThan(),
+ R.javascript.GreaterThanOrEqual(R.compare_hints),
+ R.simplified.NumberLessThanOrEqual(),
};
for (size_t j = 0; j < arraysize(ops); j += 2) {
- BinopEffectsTester B(ops[j], Type::Symbol(), Type::String());
+ BinopEffectsTester B(ops[j], Type::Symbol(), Type::String(),
+ JSTypedLowering::kNoFlags);
CHECK_EQ(ops[j + 1]->opcode(), B.result->op()->opcode());
- Node* i0 = B.CheckConvertedInput(IrOpcode::kStringToNumber, 0, false);
+ Node* i0 =
+ B.CheckConvertedInput(IrOpcode::kPlainPrimitiveToNumber, 0, false);
Node* i1 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 1, true);
// Inputs should be commuted.
@@ -987,7 +1011,8 @@ TEST(OrderCompareEffects) {
}
for (size_t j = 0; j < arraysize(ops); j += 2) {
- BinopEffectsTester B(ops[j], Type::Number(), Type::Symbol());
+ BinopEffectsTester B(ops[j], Type::Number(), Type::Symbol(),
+ JSTypedLowering::kNoFlags);
Node* i0 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 0, true);
Node* i1 = B.result->InputAt(1);
@@ -1000,7 +1025,8 @@ TEST(OrderCompareEffects) {
}
for (size_t j = 0; j < arraysize(ops); j += 2) {
- BinopEffectsTester B(ops[j], Type::Symbol(), Type::Number());
+ BinopEffectsTester B(ops[j], Type::Symbol(), Type::Number(),
+ JSTypedLowering::kNoFlags);
Node* i0 = B.result->InputAt(0);
Node* i1 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 1, true);
@@ -1018,7 +1044,8 @@ TEST(Int32BinopEffects) {
JSBitwiseTypedLoweringTester R;
for (int j = 0; j < R.kNumberOps; j += 2) {
bool signed_left = R.signedness[j], signed_right = R.signedness[j + 1];
- BinopEffectsTester B(R.ops[j], I32Type(signed_left), I32Type(signed_right));
+ BinopEffectsTester B(R.ops[j], I32Type(signed_left), I32Type(signed_right),
+ JSTypedLowering::kNoFlags);
CHECK_EQ(R.ops[j + 1]->opcode(), B.result->op()->opcode());
B.R.CheckBinop(B.result->opcode(), B.result);
@@ -1031,7 +1058,8 @@ TEST(Int32BinopEffects) {
for (int j = 0; j < R.kNumberOps; j += 2) {
bool signed_left = R.signedness[j], signed_right = R.signedness[j + 1];
- BinopEffectsTester B(R.ops[j], Type::Number(), Type::Number());
+ BinopEffectsTester B(R.ops[j], Type::Number(), Type::Number(),
+ JSTypedLowering::kNoFlags);
CHECK_EQ(R.ops[j + 1]->opcode(), B.result->op()->opcode());
B.R.CheckBinop(B.result->opcode(), B.result);
@@ -1044,7 +1072,8 @@ TEST(Int32BinopEffects) {
for (int j = 0; j < R.kNumberOps; j += 2) {
bool signed_left = R.signedness[j], signed_right = R.signedness[j + 1];
- BinopEffectsTester B(R.ops[j], Type::Number(), Type::Primitive());
+ BinopEffectsTester B(R.ops[j], Type::Number(), Type::Primitive(),
+ JSTypedLowering::kNoFlags);
B.R.CheckBinop(B.result->opcode(), B.result);
@@ -1061,7 +1090,8 @@ TEST(Int32BinopEffects) {
for (int j = 0; j < R.kNumberOps; j += 2) {
bool signed_left = R.signedness[j], signed_right = R.signedness[j + 1];
- BinopEffectsTester B(R.ops[j], Type::Primitive(), Type::Number());
+ BinopEffectsTester B(R.ops[j], Type::Primitive(), Type::Number(),
+ JSTypedLowering::kNoFlags);
B.R.CheckBinop(B.result->opcode(), B.result);
@@ -1078,7 +1108,8 @@ TEST(Int32BinopEffects) {
for (int j = 0; j < R.kNumberOps; j += 2) {
bool signed_left = R.signedness[j], signed_right = R.signedness[j + 1];
- BinopEffectsTester B(R.ops[j], Type::Primitive(), Type::Primitive());
+ BinopEffectsTester B(R.ops[j], Type::Primitive(), Type::Primitive(),
+ JSTypedLowering::kNoFlags);
B.R.CheckBinop(B.result->opcode(), B.result);
@@ -1171,23 +1202,18 @@ TEST(Int32Comparisons) {
struct Entry {
const Operator* js_op;
- const Operator* uint_op;
- const Operator* int_op;
const Operator* num_op;
bool commute;
};
- Entry ops[] = {
- {R.javascript.LessThan(), R.machine.Uint32LessThan(),
- R.machine.Int32LessThan(), R.simplified.NumberLessThan(), false},
- {R.javascript.LessThanOrEqual(), R.machine.Uint32LessThanOrEqual(),
- R.machine.Int32LessThanOrEqual(), R.simplified.NumberLessThanOrEqual(),
- false},
- {R.javascript.GreaterThan(), R.machine.Uint32LessThan(),
- R.machine.Int32LessThan(), R.simplified.NumberLessThan(), true},
- {R.javascript.GreaterThanOrEqual(), R.machine.Uint32LessThanOrEqual(),
- R.machine.Int32LessThanOrEqual(), R.simplified.NumberLessThanOrEqual(),
- true}};
+ Entry ops[] = {{R.javascript.LessThan(R.compare_hints),
+ R.simplified.NumberLessThan(), false},
+ {R.javascript.LessThanOrEqual(R.compare_hints),
+ R.simplified.NumberLessThanOrEqual(), false},
+ {R.javascript.GreaterThan(R.compare_hints),
+ R.simplified.NumberLessThan(), true},
+ {R.javascript.GreaterThanOrEqual(R.compare_hints),
+ R.simplified.NumberLessThanOrEqual(), true}};
for (size_t o = 0; o < arraysize(ops); o++) {
for (size_t i = 0; i < arraysize(kNumberTypes); i++) {
@@ -1201,15 +1227,7 @@ TEST(Int32Comparisons) {
Node* cmp = R.Binop(ops[o].js_op, p0, p1);
Node* r = R.reduce(cmp);
- const Operator* expected;
- if (t0->Is(Type::Unsigned32()) && t1->Is(Type::Unsigned32())) {
- expected = ops[o].uint_op;
- } else if (t0->Is(Type::Signed32()) && t1->Is(Type::Signed32())) {
- expected = ops[o].int_op;
- } else {
- expected = ops[o].num_op;
- }
- R.CheckBinop(expected, r);
+ R.CheckBinop(ops[o].num_op, r);
if (ops[o].commute) {
CHECK_EQ(p1, r->InputAt(0));
CHECK_EQ(p0, r->InputAt(1));
diff --git a/deps/v8/test/cctest/compiler/test-linkage.cc b/deps/v8/test/cctest/compiler/test-linkage.cc
index 0cbdb4c6b2..6661e916db 100644
--- a/deps/v8/test/cctest/compiler/test-linkage.cc
+++ b/deps/v8/test/cctest/compiler/test-linkage.cc
@@ -4,9 +4,10 @@
#include "src/code-stubs.h"
#include "src/compiler.h"
-#include "src/parsing/parser.h"
+#include "src/parsing/parse-info.h"
#include "src/zone.h"
+#include "src/code-factory.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/linkage.h"
@@ -43,7 +44,7 @@ TEST(TestLinkageCreate) {
HandleAndZoneScope handles;
Handle<JSFunction> function = Compile("a + b");
ParseInfo parse_info(handles.main_zone(), function);
- CompilationInfo info(&parse_info);
+ CompilationInfo info(&parse_info, function);
CallDescriptor* descriptor = Linkage::ComputeIncoming(info.zone(), &info);
CHECK(descriptor);
}
@@ -59,7 +60,7 @@ TEST(TestLinkageJSFunctionIncoming) {
Handle<JSFunction>::cast(v8::Utils::OpenHandle(
*v8::Local<v8::Function>::Cast(CompileRun(sources[i]))));
ParseInfo parse_info(handles.main_zone(), function);
- CompilationInfo info(&parse_info);
+ CompilationInfo info(&parse_info, function);
CallDescriptor* descriptor = Linkage::ComputeIncoming(info.zone(), &info);
CHECK(descriptor);
@@ -75,7 +76,7 @@ TEST(TestLinkageJSCall) {
HandleAndZoneScope handles;
Handle<JSFunction> function = Compile("a + c");
ParseInfo parse_info(handles.main_zone(), function);
- CompilationInfo info(&parse_info);
+ CompilationInfo info(&parse_info, function);
for (int i = 0; i < 32; i++) {
CallDescriptor* descriptor = Linkage::GetJSCallDescriptor(
@@ -97,13 +98,12 @@ TEST(TestLinkageRuntimeCall) {
TEST(TestLinkageStubCall) {
Isolate* isolate = CcTest::InitIsolateOnce();
Zone zone(isolate->allocator());
- ToNumberStub stub(isolate);
- CompilationInfo info("test", isolate, &zone, Code::ComputeFlags(Code::STUB));
- CallInterfaceDescriptor interface_descriptor =
- stub.GetCallInterfaceDescriptor();
+ Callable callable = CodeFactory::ToNumber(isolate);
+ CompilationInfo info(ArrayVector("test"), isolate, &zone,
+ Code::ComputeFlags(Code::STUB));
CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
- isolate, &zone, interface_descriptor, stub.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties);
+ isolate, &zone, callable.descriptor(), 0, CallDescriptor::kNoFlags,
+ Operator::kNoProperties);
CHECK(descriptor);
CHECK_EQ(0, static_cast<int>(descriptor->StackParameterCount()));
CHECK_EQ(1, static_cast<int>(descriptor->ReturnCount()));
diff --git a/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc b/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc
index 69f5e157ad..8ee79ddb60 100644
--- a/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc
+++ b/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc
@@ -3,7 +3,9 @@
// found in the LICENSE file.
#include "src/ast/scopes.h"
+#include "src/compiler.h"
#include "src/compiler/ast-loop-assignment-analyzer.h"
+#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
#include "src/parsing/rewriter.h"
#include "test/cctest/cctest.h"
@@ -31,13 +33,13 @@ struct TestHelper : public HandleAndZoneScope {
void CheckLoopAssignedCount(int expected, const char* var_name) {
// TODO(titzer): don't scope analyze every single time.
ParseInfo parse_info(main_zone(), function);
- CompilationInfo info(&parse_info);
+ CompilationInfo info(&parse_info, function);
CHECK(Parser::ParseStatic(&parse_info));
CHECK(Rewriter::Rewrite(&parse_info));
- CHECK(Scope::Analyze(&parse_info));
+ Scope::Analyze(&parse_info);
- Scope* scope = info.literal()->scope();
+ DeclarationScope* scope = info.literal()->scope();
AstValueFactory* factory = parse_info.ast_value_factory();
CHECK(scope);
diff --git a/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc b/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
index 86888e96f5..d4ea47368a 100644
--- a/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
+++ b/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
@@ -26,11 +26,22 @@ const Operator* NewConstantOperator<int32_t>(CommonOperatorBuilder* common,
}
template <>
+const Operator* NewConstantOperator<int64_t>(CommonOperatorBuilder* common,
+ volatile int64_t value) {
+ return common->Int64Constant(value);
+}
+
+template <>
const Operator* NewConstantOperator<double>(CommonOperatorBuilder* common,
volatile double value) {
return common->Float64Constant(value);
}
+template <>
+const Operator* NewConstantOperator<float>(CommonOperatorBuilder* common,
+ volatile float value) {
+ return common->Float32Constant(value);
+}
template <typename T>
T ValueOfOperator(const Operator* op);
@@ -42,6 +53,18 @@ int32_t ValueOfOperator<int32_t>(const Operator* op) {
}
template <>
+int64_t ValueOfOperator<int64_t>(const Operator* op) {
+ CHECK_EQ(IrOpcode::kInt64Constant, op->opcode());
+ return OpParameter<int64_t>(op);
+}
+
+template <>
+float ValueOfOperator<float>(const Operator* op) {
+ CHECK_EQ(IrOpcode::kFloat32Constant, op->opcode());
+ return OpParameter<float>(op);
+}
+
+template <>
double ValueOfOperator<double>(const Operator* op) {
CHECK_EQ(IrOpcode::kFloat64Constant, op->opcode());
return OpParameter<double>(op);
@@ -50,9 +73,9 @@ double ValueOfOperator<double>(const Operator* op) {
class ReducerTester : public HandleAndZoneScope {
public:
- explicit ReducerTester(
- int num_parameters = 0,
- MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags)
+ explicit ReducerTester(int num_parameters = 0,
+ MachineOperatorBuilder::Flags flags =
+ MachineOperatorBuilder::kAllOptionalOps)
: isolate(main_isolate()),
binop(NULL),
unop(NULL),
@@ -105,7 +128,15 @@ class ReducerTester : public HandleAndZoneScope {
Reduction reduction = reducer.Reduce(n);
CHECK(reduction.Changed());
CHECK_NE(n, reduction.replacement());
- CHECK_EQ(expect, ValueOf<T>(reduction.replacement()->op()));
+ // Deal with NaNs.
+ if (expect == expect) {
+ // We do not expect a NaN, check for equality.
+ CHECK_EQ(expect, ValueOf<T>(reduction.replacement()->op()));
+ } else {
+ // Check for NaN.
+ T result = ValueOf<T>(reduction.replacement()->op());
+ CHECK_NE(result, result);
+ }
}
// Check that the reduction of this binop applied to {a} and {b} yields
@@ -315,6 +346,24 @@ TEST(ReduceWord32Shl) {
R.CheckBinop(x, x, zero); // x << 0 => x
}
+TEST(ReduceWord64Shl) {
+ ReducerTester R;
+ R.binop = R.machine.Word64Shl();
+
+ FOR_INT64_INPUTS(i) {
+ for (int64_t y = 0; y < 64; y++) {
+ int64_t x = *i;
+ R.CheckFoldBinop<int64_t>(x << y, x, y);
+ }
+ }
+
+ R.CheckDontPutConstantOnRight(44);
+
+ Node* x = R.Parameter();
+ Node* zero = R.Constant<int64_t>(0);
+
+ R.CheckBinop(x, x, zero); // x << 0 => x
+}
TEST(ReduceWord32Shr) {
ReducerTester R;
@@ -336,6 +385,24 @@ TEST(ReduceWord32Shr) {
R.CheckBinop(x, x, zero); // x >>> 0 => x
}
+TEST(ReduceWord64Shr) {
+ ReducerTester R;
+ R.binop = R.machine.Word64Shr();
+
+ FOR_UINT64_INPUTS(i) {
+ for (uint64_t y = 0; y < 64; y++) {
+ uint64_t x = *i;
+ R.CheckFoldBinop<int64_t>(x >> y, x, y);
+ }
+ }
+
+ R.CheckDontPutConstantOnRight(44);
+
+ Node* x = R.Parameter();
+ Node* zero = R.Constant<int64_t>(0);
+
+ R.CheckBinop(x, x, zero); // x >>> 0 => x
+}
TEST(ReduceWord32Sar) {
ReducerTester R;
@@ -357,6 +424,24 @@ TEST(ReduceWord32Sar) {
R.CheckBinop(x, x, zero); // x >> 0 => x
}
+TEST(ReduceWord64Sar) {
+ ReducerTester R;
+ R.binop = R.machine.Word64Sar();
+
+ FOR_INT64_INPUTS(i) {
+ for (int64_t y = 0; y < 64; y++) {
+ int64_t x = *i;
+ R.CheckFoldBinop<int64_t>(x >> y, x, y);
+ }
+ }
+
+ R.CheckDontPutConstantOnRight(44);
+
+ Node* x = R.Parameter();
+ Node* zero = R.Constant<int64_t>(0);
+
+ R.CheckBinop(x, x, zero); // x >> 0 => x
+}
static void CheckJsShift(ReducerTester* R) {
CHECK(R->machine.Word32ShiftIsSafe());
@@ -433,6 +518,24 @@ TEST(ReduceInt32Add) {
R.CheckBinop(x, zero, x); // 0 + x => x
}
+TEST(ReduceInt64Add) {
+ ReducerTester R;
+ R.binop = R.machine.Int64Add();
+
+ FOR_INT64_INPUTS(pl) {
+ FOR_INT64_INPUTS(pr) {
+ int64_t x = *pl, y = *pr;
+ R.CheckFoldBinop<int64_t>(x + y, x, y);
+ }
+ }
+
+ R.CheckPutConstantOnRight(41);
+
+ Node* x = R.Parameter();
+ Node* zero = R.Constant<int64_t>(0);
+ R.CheckBinop(x, x, zero); // x + 0 => x
+ R.CheckBinop(x, zero, x); // 0 + x => x
+}
TEST(ReduceInt32Sub) {
ReducerTester R;
@@ -453,6 +556,30 @@ TEST(ReduceInt32Sub) {
R.CheckBinop(x, x, zero); // x - 0 => x
}
+TEST(ReduceInt64Sub) {
+ ReducerTester R;
+ R.binop = R.machine.Int64Sub();
+
+ FOR_INT64_INPUTS(pl) {
+ FOR_INT64_INPUTS(pr) {
+ int64_t x = *pl, y = *pr;
+ R.CheckFoldBinop<int64_t>(x - y, x, y);
+ }
+ }
+
+ R.CheckDontPutConstantOnRight(42);
+
+ Node* x = R.Parameter();
+ Node* zero = R.Constant<int64_t>(0);
+
+ R.CheckBinop(x, x, zero); // x - 0 => x
+ R.CheckFoldBinop<int64_t>(0, x, x); // x - x => 0
+
+ Node* k = R.Constant<int64_t>(6);
+
+ R.CheckFoldBinop<int64_t>(x, R.machine.Int64Add(), -6, x,
+ k); // x - K => x + -K
+}
TEST(ReduceInt32Mul) {
ReducerTester R;
@@ -616,13 +743,8 @@ TEST(ReduceInt32LessThan) {
R.CheckDontPutConstantOnRight(-440197);
Node* x = R.Parameter(0);
- Node* y = R.Parameter(1);
- Node* zero = R.Constant<int32_t>(0);
- Node* sub = R.graph.NewNode(R.machine.Int32Sub(), x, y);
R.CheckFoldBinop<int32_t>(0, x, x); // x < x => 0
- R.CheckFoldBinop(x, y, sub, zero); // x - y < 0 => x < y
- R.CheckFoldBinop(y, x, zero, sub); // 0 < x - y => y < x
}
@@ -640,13 +762,8 @@ TEST(ReduceInt32LessThanOrEqual) {
FOR_INT32_INPUTS(i) { R.CheckDontPutConstantOnRight<int32_t>(*i); }
Node* x = R.Parameter(0);
- Node* y = R.Parameter(1);
- Node* zero = R.Constant<int32_t>(0);
- Node* sub = R.graph.NewNode(R.machine.Int32Sub(), x, y);
R.CheckFoldBinop<int32_t>(1, x, x); // x <= x => 1
- R.CheckFoldBinop(x, y, sub, zero); // x - y <= 0 => x <= y
- R.CheckFoldBinop(y, x, zero, sub); // 0 <= x - y => y <= x
}
@@ -723,17 +840,51 @@ TEST(ReduceLoadStore) {
}
}
+TEST(ReduceFloat32Sub) {
+ ReducerTester R;
+ R.binop = R.machine.Float32Sub();
+
+ FOR_FLOAT32_INPUTS(pl) {
+ FOR_FLOAT32_INPUTS(pr) {
+ float x = *pl, y = *pr;
+ R.CheckFoldBinop<float>(x - y, x, y);
+ }
+ }
+
+ Node* x = R.Parameter();
+ Node* zero = R.Constant<float>(0.0);
+ Node* nan = R.Constant<float>(std::numeric_limits<float>::quiet_NaN());
+
+ R.CheckBinop(x, x, zero); // x - 0 => x
+ R.CheckBinop(nan, nan, x); // nan - x => nan
+ R.CheckBinop(nan, x, nan); // x - nan => nan
+}
+
+TEST(ReduceFloat64Sub) {
+ ReducerTester R;
+ R.binop = R.machine.Float64Sub();
+
+ FOR_FLOAT64_INPUTS(pl) {
+ FOR_FLOAT64_INPUTS(pr) {
+ double x = *pl, y = *pr;
+ R.CheckFoldBinop<double>(x - y, x, y);
+ }
+ }
+
+ Node* x = R.Parameter();
+ Node* zero = R.Constant<double>(0.0);
+ Node* nan = R.Constant<double>(std::numeric_limits<double>::quiet_NaN());
+
+ R.CheckBinop(x, x, zero); // x - 0 => x
+ R.CheckBinop(nan, nan, x); // nan - x => nan
+ R.CheckBinop(nan, x, nan); // x - nan => nan
+}
// TODO(titzer): test MachineOperatorReducer for Word64And
// TODO(titzer): test MachineOperatorReducer for Word64Or
// TODO(titzer): test MachineOperatorReducer for Word64Xor
-// TODO(titzer): test MachineOperatorReducer for Word64Shl
-// TODO(titzer): test MachineOperatorReducer for Word64Shr
-// TODO(titzer): test MachineOperatorReducer for Word64Sar
// TODO(titzer): test MachineOperatorReducer for Word64Equal
// TODO(titzer): test MachineOperatorReducer for Word64Not
-// TODO(titzer): test MachineOperatorReducer for Int64Add
-// TODO(titzer): test MachineOperatorReducer for Int64Sub
// TODO(titzer): test MachineOperatorReducer for Int64Mul
// TODO(titzer): test MachineOperatorReducer for Int64UMul
// TODO(titzer): test MachineOperatorReducer for Int64Div
diff --git a/deps/v8/test/cctest/compiler/test-multiple-return.cc b/deps/v8/test/cctest/compiler/test-multiple-return.cc
index 2108ab1302..2221ffbc86 100644
--- a/deps/v8/test/cctest/compiler/test-multiple-return.cc
+++ b/deps/v8/test/cctest/compiler/test-multiple-return.cc
@@ -25,25 +25,21 @@ namespace {
CallDescriptor* GetCallDescriptor(Zone* zone, int return_count,
int param_count) {
- MachineSignature::Builder msig(zone, return_count, param_count);
LocationSignature::Builder locations(zone, return_count, param_count);
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
+ const RegisterConfiguration* config = RegisterConfiguration::Turbofan();
// Add return location(s).
CHECK(return_count <= config->num_allocatable_general_registers());
for (int i = 0; i < return_count; i++) {
- msig.AddReturn(MachineType::Int32());
- locations.AddReturn(
- LinkageLocation::ForRegister(config->allocatable_general_codes()[i]));
+ locations.AddReturn(LinkageLocation::ForRegister(
+ config->allocatable_general_codes()[i], MachineType::AnyTagged()));
}
// Add register and/or stack parameter(s).
CHECK(param_count <= config->num_allocatable_general_registers());
for (int i = 0; i < param_count; i++) {
- msig.AddParam(MachineType::Int32());
- locations.AddParam(
- LinkageLocation::ForRegister(config->allocatable_general_codes()[i]));
+ locations.AddParam(LinkageLocation::ForRegister(
+ config->allocatable_general_codes()[i], MachineType::AnyTagged()));
}
const RegList kCalleeSaveRegisters = 0;
@@ -56,7 +52,6 @@ CallDescriptor* GetCallDescriptor(Zone* zone, int return_count,
CallDescriptor::kCallCodeObject, // kind
target_type, // target MachineType
target_loc, // target location
- msig.Build(), // machine_sig
locations.Build(), // location_sig
0, // js_parameter_count
compiler::Operator::kNoProperties, // properties
@@ -85,7 +80,8 @@ TEST(ReturnThreeValues) {
Node* mul = m.Int32Mul(p0, p1);
m.Return(add, sub, mul);
- CompilationInfo info("testing", handles.main_isolate(), handles.main_zone());
+ CompilationInfo info(ArrayVector("testing"), handles.main_isolate(),
+ handles.main_zone());
Handle<Code> code =
Pipeline::GenerateCodeForTesting(&info, desc, m.graph(), m.Export());
#ifdef ENABLE_DISASSEMBLER
diff --git a/deps/v8/test/cctest/compiler/test-node.cc b/deps/v8/test/cctest/compiler/test-node.cc
index d317c3877c..e2aacf3100 100644
--- a/deps/v8/test/cctest/compiler/test-node.cc
+++ b/deps/v8/test/cctest/compiler/test-node.cc
@@ -320,6 +320,80 @@ TEST(Inputs) {
CHECK_USES(n4, n3, n3, n5);
}
+TEST(InsertInputs) {
+ base::AccountingAllocator allocator;
+ Zone zone(&allocator);
+ Graph graph(&zone);
+
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n1 = graph.NewNode(&dummy_operator1, n0);
+ Node* n2 = graph.NewNode(&dummy_operator1, n0);
+
+ {
+ Node* node = graph.NewNode(&dummy_operator1, n0);
+ node->InsertInputs(graph.zone(), 0, 1);
+ node->ReplaceInput(0, n1);
+ CHECK_INPUTS(node, n1, n0);
+ }
+ {
+ Node* node = graph.NewNode(&dummy_operator1, n0);
+ node->InsertInputs(graph.zone(), 0, 2);
+ node->ReplaceInput(0, node);
+ node->ReplaceInput(1, n2);
+ CHECK_INPUTS(node, node, n2, n0);
+ }
+ {
+ Node* node = graph.NewNode(&dummy_operator3, n0, n1, n2);
+ node->InsertInputs(graph.zone(), 0, 1);
+ node->ReplaceInput(0, node);
+ CHECK_INPUTS(node, node, n0, n1, n2);
+ }
+ {
+ Node* node = graph.NewNode(&dummy_operator3, n0, n1, n2);
+ node->InsertInputs(graph.zone(), 1, 1);
+ node->ReplaceInput(1, node);
+ CHECK_INPUTS(node, n0, node, n1, n2);
+ }
+ {
+ Node* node = graph.NewNode(&dummy_operator3, n0, n1, n2);
+ node->InsertInputs(graph.zone(), 2, 1);
+ node->ReplaceInput(2, node);
+ CHECK_INPUTS(node, n0, n1, node, n2);
+ }
+ {
+ Node* node = graph.NewNode(&dummy_operator3, n0, n1, n2);
+ node->InsertInputs(graph.zone(), 2, 1);
+ node->ReplaceInput(2, node);
+ CHECK_INPUTS(node, n0, n1, node, n2);
+ }
+ {
+ Node* node = graph.NewNode(&dummy_operator3, n0, n1, n2);
+ node->InsertInputs(graph.zone(), 0, 4);
+ node->ReplaceInput(0, node);
+ node->ReplaceInput(1, node);
+ node->ReplaceInput(2, node);
+ node->ReplaceInput(3, node);
+ CHECK_INPUTS(node, node, node, node, node, n0, n1, n2);
+ }
+ {
+ Node* node = graph.NewNode(&dummy_operator3, n0, n1, n2);
+ node->InsertInputs(graph.zone(), 1, 4);
+ node->ReplaceInput(1, node);
+ node->ReplaceInput(2, node);
+ node->ReplaceInput(3, node);
+ node->ReplaceInput(4, node);
+ CHECK_INPUTS(node, n0, node, node, node, node, n1, n2);
+ }
+ {
+ Node* node = graph.NewNode(&dummy_operator3, n0, n1, n2);
+ node->InsertInputs(graph.zone(), 2, 4);
+ node->ReplaceInput(2, node);
+ node->ReplaceInput(3, node);
+ node->ReplaceInput(4, node);
+ node->ReplaceInput(5, node);
+ CHECK_INPUTS(node, n0, n1, node, node, node, node, n2);
+ }
+}
TEST(RemoveInput) {
base::AccountingAllocator allocator;
diff --git a/deps/v8/test/cctest/compiler/test-operator.cc b/deps/v8/test/cctest/compiler/test-operator.cc
index eecf46a054..90814ba99d 100644
--- a/deps/v8/test/cctest/compiler/test-operator.cc
+++ b/deps/v8/test/cctest/compiler/test-operator.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <memory>
#include <sstream>
#include "src/compiler/operator.h"
@@ -67,11 +68,10 @@ TEST(TestOperator_Equals) {
CHECK(!op2b.Equals(&op1b));
}
-
-static v8::base::SmartArrayPointer<const char> OperatorToString(Operator* op) {
+static std::unique_ptr<char[]> OperatorToString(Operator* op) {
std::ostringstream os;
os << *op;
- return v8::base::SmartArrayPointer<const char>(StrDup(os.str().c_str()));
+ return std::unique_ptr<char[]>(StrDup(os.str().c_str()));
}
diff --git a/deps/v8/test/cctest/compiler/test-osr.cc b/deps/v8/test/cctest/compiler/test-osr.cc
index f0640c2e0a..9e3445ac0b 100644
--- a/deps/v8/test/cctest/compiler/test-osr.cc
+++ b/deps/v8/test/cctest/compiler/test-osr.cc
@@ -120,10 +120,10 @@ class OsrDeconstructorTester : public HandleAndZoneScope {
CHECK(!nodes.IsLive(osr_normal_entry));
CHECK(!nodes.IsLive(osr_loop_entry));
// No dangling nodes should be left over.
- for (Node* const node : nodes.live) {
+ for (Node* const node : nodes.reachable) {
for (Node* const use : node->uses()) {
- CHECK(std::find(nodes.live.begin(), nodes.live.end(), use) !=
- nodes.live.end());
+ CHECK(std::find(nodes.reachable.begin(), nodes.reachable.end(), use) !=
+ nodes.reachable.end());
}
}
}
diff --git a/deps/v8/test/cctest/compiler/test-pipeline.cc b/deps/v8/test/cctest/compiler/test-pipeline.cc
deleted file mode 100644
index 35e342765b..0000000000
--- a/deps/v8/test/cctest/compiler/test-pipeline.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler.h"
-#include "src/compiler/pipeline.h"
-#include "src/handles.h"
-#include "src/parsing/parser.h"
-#include "test/cctest/cctest.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-static void RunPipeline(Zone* zone, const char* source) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(v8::Utils::OpenHandle(
- *v8::Local<v8::Function>::Cast(CompileRun(source))));
- ParseInfo parse_info(zone, function);
- CHECK(Compiler::ParseAndAnalyze(&parse_info));
- CompilationInfo info(&parse_info);
- info.SetOptimizing();
-
- Pipeline pipeline(&info);
- Handle<Code> code = pipeline.GenerateCode();
- CHECK(!code.is_null());
-}
-
-
-TEST(PipelineTyped) {
- HandleAndZoneScope handles;
- FLAG_turbo_types = true;
- RunPipeline(handles.main_zone(), "(function(a,b) { return a + b; })");
-}
-
-
-TEST(PipelineGeneric) {
- HandleAndZoneScope handles;
- FLAG_turbo_types = false;
- RunPipeline(handles.main_zone(), "(function(a,b) { return a + b; })");
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-representation-change.cc b/deps/v8/test/cctest/compiler/test-representation-change.cc
index 7e75bf8eb0..b475e9a5b9 100644
--- a/deps/v8/test/cctest/compiler/test-representation-change.cc
+++ b/deps/v8/test/cctest/compiler/test-representation-change.cc
@@ -83,12 +83,20 @@ class RepresentationChangerTester : public HandleAndZoneScope,
return n;
}
+ Node* Return(Node* input) {
+ Node* n = graph()->NewNode(common()->Return(), input, graph()->start(),
+ graph()->start());
+ return n;
+ }
+
void CheckTypeError(MachineRepresentation from, Type* from_type,
MachineRepresentation to) {
changer()->testing_type_errors_ = true;
changer()->type_error_ = false;
Node* n = Parameter(0);
- Node* c = changer()->GetRepresentationFor(n, from, from_type, to);
+ Node* use = Return(n);
+ Node* c = changer()->GetRepresentationFor(n, from, from_type, use,
+ UseInfo(to, Truncation::None()));
CHECK(changer()->type_error_);
CHECK_EQ(n, c);
}
@@ -96,7 +104,9 @@ class RepresentationChangerTester : public HandleAndZoneScope,
void CheckNop(MachineRepresentation from, Type* from_type,
MachineRepresentation to) {
Node* n = Parameter(0);
- Node* c = changer()->GetRepresentationFor(n, from, from_type, to);
+ Node* use = Return(n);
+ Node* c = changer()->GetRepresentationFor(n, from, from_type, use,
+ UseInfo(to, Truncation::None()));
CHECK_EQ(n, c);
}
};
@@ -113,15 +123,17 @@ TEST(BoolToBit_constant) {
RepresentationChangerTester r;
Node* true_node = r.jsgraph()->TrueConstant();
+ Node* true_use = r.Return(true_node);
Node* true_bit = r.changer()->GetRepresentationFor(
- true_node, MachineRepresentation::kTagged, Type::None(),
- MachineRepresentation::kBit);
+ true_node, MachineRepresentation::kTagged, Type::None(), true_use,
+ UseInfo(MachineRepresentation::kBit, Truncation::None()));
r.CheckInt32Constant(true_bit, 1);
Node* false_node = r.jsgraph()->FalseConstant();
+ Node* false_use = r.Return(false_node);
Node* false_bit = r.changer()->GetRepresentationFor(
- false_node, MachineRepresentation::kTagged, Type::None(),
- MachineRepresentation::kBit);
+ false_node, MachineRepresentation::kTagged, Type::None(), false_use,
+ UseInfo(MachineRepresentation::kBit, Truncation::None()));
r.CheckInt32Constant(false_bit, 0);
}
@@ -131,9 +143,10 @@ TEST(BitToBool_constant) {
for (int i = -5; i < 5; i++) {
Node* node = r.jsgraph()->Int32Constant(i);
+ Node* use = r.Return(node);
Node* val = r.changer()->GetRepresentationFor(
- node, MachineRepresentation::kBit, Type::Boolean(),
- MachineRepresentation::kTagged);
+ node, MachineRepresentation::kBit, Type::Boolean(), use,
+ UseInfo(MachineRepresentation::kTagged, Truncation::None()));
r.CheckHeapConstant(val, i == 0 ? r.isolate()->heap()->false_value()
: r.isolate()->heap()->true_value());
}
@@ -146,49 +159,54 @@ TEST(ToTagged_constant) {
{
FOR_FLOAT64_INPUTS(i) {
Node* n = r.jsgraph()->Float64Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kFloat64, Type::None(),
- MachineRepresentation::kTagged);
- r.CheckNumberConstant(c, *i);
+ Node* use = r.Return(n);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kFloat64, Type::None(), use,
+ UseInfo(MachineRepresentation::kTagged, Truncation::None()));
+ r.CheckNumberConstant(c, *i);
}
}
{
FOR_FLOAT64_INPUTS(i) {
Node* n = r.jsgraph()->Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kFloat64, Type::None(),
- MachineRepresentation::kTagged);
- r.CheckNumberConstant(c, *i);
+ Node* use = r.Return(n);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kFloat64, Type::None(), use,
+ UseInfo(MachineRepresentation::kTagged, Truncation::None()));
+ r.CheckNumberConstant(c, *i);
}
}
{
FOR_FLOAT32_INPUTS(i) {
Node* n = r.jsgraph()->Float32Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kFloat32, Type::None(),
- MachineRepresentation::kTagged);
- r.CheckNumberConstant(c, *i);
+ Node* use = r.Return(n);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kFloat32, Type::None(), use,
+ UseInfo(MachineRepresentation::kTagged, Truncation::None()));
+ r.CheckNumberConstant(c, *i);
}
}
{
FOR_INT32_INPUTS(i) {
Node* n = r.jsgraph()->Int32Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kWord32, Type::Signed32(),
- MachineRepresentation::kTagged);
- r.CheckNumberConstant(c, *i);
+ Node* use = r.Return(n);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kWord32, Type::Signed32(), use,
+ UseInfo(MachineRepresentation::kTagged, Truncation::None()));
+ r.CheckNumberConstant(c, *i);
}
}
{
FOR_UINT32_INPUTS(i) {
Node* n = r.jsgraph()->Int32Constant(*i);
+ Node* use = r.Return(n);
Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kWord32, Type::Unsigned32(),
- MachineRepresentation::kTagged);
+ n, MachineRepresentation::kWord32, Type::Unsigned32(), use,
+ UseInfo(MachineRepresentation::kTagged, Truncation::None()));
r.CheckNumberConstant(c, *i);
}
}
@@ -201,49 +219,54 @@ TEST(ToFloat64_constant) {
{
FOR_FLOAT64_INPUTS(i) {
Node* n = r.jsgraph()->Float64Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kFloat64, Type::None(),
- MachineRepresentation::kFloat64);
- CHECK_EQ(n, c);
+ Node* use = r.Return(n);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kFloat64, Type::None(), use,
+ UseInfo(MachineRepresentation::kFloat64, Truncation::None()));
+ CHECK_EQ(n, c);
}
}
{
FOR_FLOAT64_INPUTS(i) {
Node* n = r.jsgraph()->Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kTagged, Type::None(),
- MachineRepresentation::kFloat64);
- r.CheckFloat64Constant(c, *i);
+ Node* use = r.Return(n);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kTagged, Type::None(), use,
+ UseInfo(MachineRepresentation::kFloat64, Truncation::None()));
+ r.CheckFloat64Constant(c, *i);
}
}
{
FOR_FLOAT32_INPUTS(i) {
Node* n = r.jsgraph()->Float32Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kFloat32, Type::None(),
- MachineRepresentation::kFloat64);
- r.CheckFloat64Constant(c, *i);
+ Node* use = r.Return(n);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kFloat32, Type::None(), use,
+ UseInfo(MachineRepresentation::kFloat64, Truncation::None()));
+ r.CheckFloat64Constant(c, *i);
}
}
{
FOR_INT32_INPUTS(i) {
Node* n = r.jsgraph()->Int32Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kWord32, Type::Signed32(),
- MachineRepresentation::kFloat64);
- r.CheckFloat64Constant(c, *i);
+ Node* use = r.Return(n);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kWord32, Type::Signed32(), use,
+ UseInfo(MachineRepresentation::kFloat64, Truncation::None()));
+ r.CheckFloat64Constant(c, *i);
}
}
{
FOR_UINT32_INPUTS(i) {
Node* n = r.jsgraph()->Int32Constant(*i);
+ Node* use = r.Return(n);
Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kWord32, Type::Unsigned32(),
- MachineRepresentation::kFloat64);
+ n, MachineRepresentation::kWord32, Type::Unsigned32(), use,
+ UseInfo(MachineRepresentation::kFloat64, Truncation::None()));
r.CheckFloat64Constant(c, *i);
}
}
@@ -264,30 +287,33 @@ TEST(ToFloat32_constant) {
{
FOR_FLOAT32_INPUTS(i) {
Node* n = r.jsgraph()->Float32Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kFloat32, Type::None(),
- MachineRepresentation::kFloat32);
- CHECK_EQ(n, c);
+ Node* use = r.Return(n);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kFloat32, Type::None(), use,
+ UseInfo(MachineRepresentation::kFloat32, Truncation::None()));
+ CHECK_EQ(n, c);
}
}
{
FOR_FLOAT32_INPUTS(i) {
Node* n = r.jsgraph()->Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kTagged, Type::None(),
- MachineRepresentation::kFloat32);
- r.CheckFloat32Constant(c, *i);
+ Node* use = r.Return(n);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kTagged, Type::None(), use,
+ UseInfo(MachineRepresentation::kFloat32, Truncation::None()));
+ r.CheckFloat32Constant(c, *i);
}
}
{
FOR_FLOAT32_INPUTS(i) {
Node* n = r.jsgraph()->Float64Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kFloat64, Type::None(),
- MachineRepresentation::kFloat32);
- r.CheckFloat32Constant(c, *i);
+ Node* use = r.Return(n);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kFloat64, Type::None(), use,
+ UseInfo(MachineRepresentation::kFloat32, Truncation::None()));
+ r.CheckFloat32Constant(c, *i);
}
}
@@ -295,9 +321,10 @@ TEST(ToFloat32_constant) {
FOR_INT32_INPUTS(i) {
if (!IsFloat32Int32(*i)) continue;
Node* n = r.jsgraph()->Int32Constant(*i);
+ Node* use = r.Return(n);
Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kWord32, Type::Signed32(),
- MachineRepresentation::kFloat32);
+ n, MachineRepresentation::kWord32, Type::Signed32(), use,
+ UseInfo(MachineRepresentation::kFloat32, Truncation::None()));
r.CheckFloat32Constant(c, static_cast<float>(*i));
}
}
@@ -306,9 +333,10 @@ TEST(ToFloat32_constant) {
FOR_UINT32_INPUTS(i) {
if (!IsFloat32Uint32(*i)) continue;
Node* n = r.jsgraph()->Int32Constant(*i);
+ Node* use = r.Return(n);
Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kWord32, Type::Unsigned32(),
- MachineRepresentation::kFloat32);
+ n, MachineRepresentation::kWord32, Type::Unsigned32(), use,
+ UseInfo(MachineRepresentation::kFloat32, Truncation::None()));
r.CheckFloat32Constant(c, static_cast<float>(*i));
}
}
@@ -321,10 +349,11 @@ TEST(ToInt32_constant) {
{
FOR_INT32_INPUTS(i) {
Node* n = r.jsgraph()->Int32Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kWord32, Type::Signed32(),
- MachineRepresentation::kWord32);
- r.CheckInt32Constant(c, *i);
+ Node* use = r.Return(n);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kWord32, Type::Signed32(), use,
+ UseInfo(MachineRepresentation::kWord32, Truncation::None()));
+ r.CheckInt32Constant(c, *i);
}
}
@@ -332,9 +361,10 @@ TEST(ToInt32_constant) {
FOR_INT32_INPUTS(i) {
if (!IsFloat32Int32(*i)) continue;
Node* n = r.jsgraph()->Float32Constant(static_cast<float>(*i));
+ Node* use = r.Return(n);
Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kFloat32, Type::Signed32(),
- MachineRepresentation::kWord32);
+ n, MachineRepresentation::kFloat32, Type::Signed32(), use,
+ UseInfo(MachineRepresentation::kWord32, Truncation::None()));
r.CheckInt32Constant(c, *i);
}
}
@@ -342,19 +372,21 @@ TEST(ToInt32_constant) {
{
FOR_INT32_INPUTS(i) {
Node* n = r.jsgraph()->Float64Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kFloat64, Type::Signed32(),
- MachineRepresentation::kWord32);
- r.CheckInt32Constant(c, *i);
+ Node* use = r.Return(n);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kFloat64, Type::Signed32(), use,
+ UseInfo(MachineRepresentation::kWord32, Truncation::None()));
+ r.CheckInt32Constant(c, *i);
}
}
{
FOR_INT32_INPUTS(i) {
Node* n = r.jsgraph()->Constant(*i);
+ Node* use = r.Return(n);
Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kTagged, Type::Signed32(),
- MachineRepresentation::kWord32);
+ n, MachineRepresentation::kTagged, Type::Signed32(), use,
+ UseInfo(MachineRepresentation::kWord32, Truncation::None()));
r.CheckInt32Constant(c, *i);
}
}
@@ -367,10 +399,11 @@ TEST(ToUint32_constant) {
{
FOR_UINT32_INPUTS(i) {
Node* n = r.jsgraph()->Int32Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kWord32, Type::Unsigned32(),
- MachineRepresentation::kWord32);
- r.CheckUint32Constant(c, *i);
+ Node* use = r.Return(n);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kWord32, Type::Unsigned32(), use,
+ UseInfo(MachineRepresentation::kWord32, Truncation::None()));
+ r.CheckUint32Constant(c, *i);
}
}
@@ -378,9 +411,10 @@ TEST(ToUint32_constant) {
FOR_UINT32_INPUTS(i) {
if (!IsFloat32Uint32(*i)) continue;
Node* n = r.jsgraph()->Float32Constant(static_cast<float>(*i));
+ Node* use = r.Return(n);
Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kFloat32, Type::Unsigned32(),
- MachineRepresentation::kWord32);
+ n, MachineRepresentation::kFloat32, Type::Unsigned32(), use,
+ UseInfo(MachineRepresentation::kWord32, Truncation::None()));
r.CheckUint32Constant(c, *i);
}
}
@@ -388,31 +422,34 @@ TEST(ToUint32_constant) {
{
FOR_UINT32_INPUTS(i) {
Node* n = r.jsgraph()->Float64Constant(*i);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kFloat64, Type::Unsigned32(),
- MachineRepresentation::kWord32);
- r.CheckUint32Constant(c, *i);
+ Node* use = r.Return(n);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kFloat64, Type::Unsigned32(), use,
+ UseInfo(MachineRepresentation::kWord32, Truncation::None()));
+ r.CheckUint32Constant(c, *i);
}
}
{
FOR_UINT32_INPUTS(i) {
Node* n = r.jsgraph()->Constant(static_cast<double>(*i));
+ Node* use = r.Return(n);
Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kTagged, Type::Unsigned32(),
- MachineRepresentation::kWord32);
+ n, MachineRepresentation::kTagged, Type::Unsigned32(), use,
+ UseInfo(MachineRepresentation::kWord32, Truncation::None()));
r.CheckUint32Constant(c, *i);
}
}
}
-
static void CheckChange(IrOpcode::Value expected, MachineRepresentation from,
Type* from_type, MachineRepresentation to) {
RepresentationChangerTester r;
Node* n = r.Parameter();
- Node* c = r.changer()->GetRepresentationFor(n, from, from_type, to);
+ Node* use = r.Return(n);
+ Node* c = r.changer()->GetRepresentationFor(n, from, from_type, use,
+ UseInfo(to, Truncation::None()));
CHECK_NE(c, n);
CHECK_EQ(expected, c->opcode());
@@ -427,7 +464,9 @@ static void CheckTwoChanges(IrOpcode::Value expected2,
RepresentationChangerTester r;
Node* n = r.Parameter();
- Node* c1 = r.changer()->GetRepresentationFor(n, from, from_type, to);
+ Node* use = r.Return(n);
+ Node* c1 = r.changer()->GetRepresentationFor(n, from, from_type, use,
+ UseInfo(to, Truncation::None()));
CHECK_NE(c1, n);
CHECK_EQ(expected1, c1->opcode());
@@ -437,26 +476,64 @@ static void CheckTwoChanges(IrOpcode::Value expected2,
CHECK_EQ(n, c2->InputAt(0));
}
+static void CheckChange(IrOpcode::Value expected, MachineRepresentation from,
+ Type* from_type, MachineRepresentation to,
+ UseInfo use_info) {
+ RepresentationChangerTester r;
+
+ Node* n = r.Parameter();
+ Node* use = r.Return(n);
+ Node* c =
+ r.changer()->GetRepresentationFor(n, from, from_type, use, use_info);
+
+ CHECK_NE(c, n);
+ CHECK_EQ(expected, c->opcode());
+ CHECK_EQ(n, c->InputAt(0));
+}
TEST(SingleChanges) {
- CheckChange(IrOpcode::kChangeBoolToBit, MachineRepresentation::kTagged,
+ CheckChange(IrOpcode::kChangeTaggedToBit, MachineRepresentation::kTagged,
Type::None(), MachineRepresentation::kBit);
- CheckChange(IrOpcode::kChangeBitToBool, MachineRepresentation::kBit,
+ CheckChange(IrOpcode::kChangeBitToTagged, MachineRepresentation::kBit,
Type::None(), MachineRepresentation::kTagged);
+ CheckChange(IrOpcode::kChangeInt31ToTaggedSigned,
+ MachineRepresentation::kWord32, Type::Signed31(),
+ MachineRepresentation::kTagged);
CheckChange(IrOpcode::kChangeInt32ToTagged, MachineRepresentation::kWord32,
Type::Signed32(), MachineRepresentation::kTagged);
CheckChange(IrOpcode::kChangeUint32ToTagged, MachineRepresentation::kWord32,
Type::Unsigned32(), MachineRepresentation::kTagged);
CheckChange(IrOpcode::kChangeFloat64ToTagged, MachineRepresentation::kFloat64,
- Type::None(), MachineRepresentation::kTagged);
+ Type::Number(), MachineRepresentation::kTagged);
+ CheckTwoChanges(IrOpcode::kChangeFloat64ToInt32,
+ IrOpcode::kChangeInt31ToTaggedSigned,
+ MachineRepresentation::kFloat64, Type::Signed31(),
+ MachineRepresentation::kTagged);
+ CheckTwoChanges(IrOpcode::kChangeFloat64ToInt32,
+ IrOpcode::kChangeInt32ToTagged,
+ MachineRepresentation::kFloat64, Type::Signed32(),
+ MachineRepresentation::kTagged);
+ CheckTwoChanges(IrOpcode::kChangeFloat64ToUint32,
+ IrOpcode::kChangeUint32ToTagged,
+ MachineRepresentation::kFloat64, Type::Unsigned32(),
+ MachineRepresentation::kTagged);
CheckChange(IrOpcode::kChangeTaggedToInt32, MachineRepresentation::kTagged,
Type::Signed32(), MachineRepresentation::kWord32);
CheckChange(IrOpcode::kChangeTaggedToUint32, MachineRepresentation::kTagged,
Type::Unsigned32(), MachineRepresentation::kWord32);
CheckChange(IrOpcode::kChangeTaggedToFloat64, MachineRepresentation::kTagged,
- Type::None(), MachineRepresentation::kFloat64);
+ Type::Number(), MachineRepresentation::kFloat64);
+ CheckChange(IrOpcode::kChangeTaggedToFloat64, MachineRepresentation::kTagged,
+ Type::Number(), MachineRepresentation::kFloat64);
+ CheckChange(IrOpcode::kTruncateTaggedToFloat64,
+ MachineRepresentation::kTagged, Type::NumberOrUndefined(),
+ MachineRepresentation::kFloat64);
+ CheckTwoChanges(IrOpcode::kChangeTaggedSignedToInt32,
+ IrOpcode::kChangeInt32ToFloat64,
+ MachineRepresentation::kTagged, Type::TaggedSigned(),
+ MachineRepresentation::kFloat64);
// Int32,Uint32 <-> Float64 are actually machine conversions.
CheckChange(IrOpcode::kChangeInt32ToFloat64, MachineRepresentation::kWord32,
@@ -513,16 +590,20 @@ TEST(SignednessInWord32) {
Type::None(), MachineRepresentation::kFloat64);
CheckChange(IrOpcode::kChangeFloat64ToInt32, MachineRepresentation::kFloat64,
Type::Signed32(), MachineRepresentation::kWord32);
- CheckChange(IrOpcode::kTruncateFloat64ToInt32,
+ CheckChange(IrOpcode::kTruncateFloat64ToWord32,
MachineRepresentation::kFloat64, Type::Number(),
MachineRepresentation::kWord32);
+ CheckChange(IrOpcode::kCheckedTruncateTaggedToWord32,
+ MachineRepresentation::kTagged, Type::NumberOrOddball(),
+ MachineRepresentation::kWord32,
+ UseInfo::CheckedNumberOrOddballAsWord32());
CheckTwoChanges(IrOpcode::kChangeInt32ToFloat64,
IrOpcode::kTruncateFloat64ToFloat32,
MachineRepresentation::kWord32, Type::None(),
MachineRepresentation::kFloat32);
CheckTwoChanges(IrOpcode::kChangeFloat32ToFloat64,
- IrOpcode::kTruncateFloat64ToInt32,
+ IrOpcode::kTruncateFloat64ToWord32,
MachineRepresentation::kFloat32, Type::Number(),
MachineRepresentation::kWord32);
}
@@ -593,10 +674,6 @@ TEST(TypeErrors) {
// Floats cannot be implicitly converted to/from comparison conditions.
r.CheckTypeError(MachineRepresentation::kFloat64, Type::None(),
MachineRepresentation::kBit);
- r.CheckTypeError(MachineRepresentation::kBit, Type::None(),
- MachineRepresentation::kFloat64);
- r.CheckTypeError(MachineRepresentation::kBit, Type::Boolean(),
- MachineRepresentation::kFloat64);
// Floats cannot be implicitly converted to/from comparison conditions.
r.CheckTypeError(MachineRepresentation::kFloat32, Type::None(),
diff --git a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
index c32f92387e..446b5e7d5f 100644
--- a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
+++ b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
@@ -4,12 +4,13 @@
#include <utility>
+#include "src/compiler.h"
#include "src/compiler/pipeline.h"
#include "src/execution.h"
#include "src/handles.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/interpreter.h"
-#include "src/parsing/parser.h"
+#include "src/parsing/parse-info.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -125,11 +126,11 @@ class BytecodeGraphTester {
// having to instantiate a ParseInfo first. Fix this!
ParseInfo parse_info(zone_, function);
- CompilationInfo compilation_info(&parse_info);
+ CompilationInfo compilation_info(&parse_info, function);
compilation_info.SetOptimizing();
compilation_info.MarkAsDeoptimizationEnabled();
- compiler::Pipeline pipeline(&compilation_info);
- Handle<Code> code = pipeline.GenerateCode();
+ compilation_info.MarkAsOptimizeFromBytecode();
+ Handle<Code> code = Pipeline::GenerateCodeForTesting(&compilation_info);
function->ReplaceCode(*code);
return function;
@@ -2332,7 +2333,19 @@ TEST(BytecodeGraphBuilderDo) {
" if (x == 4) break;\n"
"} while (x < 7);\n"
"return y;",
- {factory->NewNumberFromInt(16)}}};
+ {factory->NewNumberFromInt(16)}},
+ {"var x = 0, sum = 0;\n"
+ "do {\n"
+ " do {\n"
+ " ++sum;\n"
+ " ++x;\n"
+ " } while (sum < 1 || x < 2)\n"
+ " do {\n"
+ " ++x;\n"
+ " } while (x < 1)\n"
+ "} while (sum < 3)\n"
+ "return sum;",
+ {factory->NewNumber(3)}}};
for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(1024);
@@ -2413,6 +2426,19 @@ TEST(BytecodeGraphBuilderFor) {
"}\n"
"return sum;",
{factory->NewNumberFromInt(385)}},
+ {"var sum = 0;\n"
+ "for (var x = 0; x < 5; x++) {\n"
+ " for (var y = 0; y < 5; y++) {\n"
+ " ++sum;\n"
+ " }\n"
+ "}\n"
+ "for (var x = 0; x < 5; x++) {\n"
+ " for (var y = 0; y < 5; y++) {\n"
+ " ++sum;\n"
+ " }\n"
+ "}\n"
+ "return sum;",
+ {factory->NewNumberFromInt(50)}},
};
for (size_t i = 0; i < arraysize(snippets); i++) {
diff --git a/deps/v8/test/cctest/compiler/test-run-calls-to-external-references.cc b/deps/v8/test/cctest/compiler/test-run-calls-to-external-references.cc
index 3b79cd8a44..0bc6ff3998 100644
--- a/deps/v8/test/cctest/compiler/test-run-calls-to-external-references.cc
+++ b/deps/v8/test/cctest/compiler/test-run-calls-to-external-references.cc
@@ -2,6 +2,7 @@
// source code is governed by a BSD-style license that can be found in the
// LICENSE file.
+#include "src/wasm/wasm-external-refs.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
#include "test/cctest/compiler/value-helper.h"
@@ -10,521 +11,240 @@ namespace v8 {
namespace internal {
namespace compiler {
-template <typename T>
-void TestExternalReferenceRoundingFunction(
- BufferedRawMachineAssemblerTester<int32_t>* m, ExternalReference ref,
- T (*comparison)(T)) {
- T parameter;
+template <typename P>
+void TestExternalReference(BufferedRawMachineAssemblerTester<int32_t>* m,
+ ExternalReference ref, void (*comparison)(P*),
+ P param) {
+ P comparison_param = param;
Node* function = m->ExternalConstant(ref);
m->CallCFunction1(MachineType::Pointer(), MachineType::Pointer(), function,
- m->PointerConstant(&parameter));
+ m->PointerConstant(&param));
m->Return(m->Int32Constant(4356));
- FOR_FLOAT64_INPUTS(i) {
- parameter = *i;
- m->Call();
- CHECK_DOUBLE_EQ(comparison(*i), parameter);
- }
+
+ m->Call();
+ comparison(&comparison_param);
+
+ CHECK_EQ(comparison_param, param);
+}
+
+template <typename P1, typename P2>
+void TestExternalReference(BufferedRawMachineAssemblerTester<int32_t>* m,
+ ExternalReference ref, void (*comparison)(P1*, P2*),
+ P1 param1, P2 param2) {
+ P1 comparison_param1 = param1;
+ P2 comparison_param2 = param2;
+
+ Node* function = m->ExternalConstant(ref);
+ m->CallCFunction2(MachineType::Pointer(), MachineType::Pointer(),
+ MachineType::Pointer(), function,
+ m->PointerConstant(&param1), m->PointerConstant(&param2));
+ m->Return(m->Int32Constant(4356));
+
+ m->Call();
+ comparison(&comparison_param1, &comparison_param2);
+
+ CHECK_EQ(comparison_param1, param1);
+ CHECK_EQ(comparison_param2, param2);
+}
+
+template <typename R, typename P>
+void TestExternalReference(BufferedRawMachineAssemblerTester<R>* m,
+ ExternalReference ref, R (*comparison)(P*),
+ P param) {
+ P comparison_param = param;
+
+ Node* function = m->ExternalConstant(ref);
+ m->Return(m->CallCFunction1(MachineType::Pointer(), MachineType::Pointer(),
+ function, m->PointerConstant(&param)));
+
+ CHECK_EQ(comparison(&comparison_param), m->Call());
+
+ CHECK_EQ(comparison_param, param);
+}
+
+template <typename R, typename P1, typename P2>
+void TestExternalReference(BufferedRawMachineAssemblerTester<R>* m,
+ ExternalReference ref, R (*comparison)(P1*, P2*),
+ P1 param1, P2 param2) {
+ P1 comparison_param1 = param1;
+ P2 comparison_param2 = param2;
+
+ Node* function = m->ExternalConstant(ref);
+ m->Return(m->CallCFunction2(
+ MachineType::Pointer(), MachineType::Pointer(), MachineType::Pointer(),
+ function, m->PointerConstant(&param1), m->PointerConstant(&param2)));
+
+ CHECK_EQ(comparison(&comparison_param1, &comparison_param2), m->Call());
+
+ CHECK_EQ(comparison_param1, param1);
+ CHECK_EQ(comparison_param2, param2);
}
TEST(RunCallF32Trunc) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_f32_trunc(m.isolate());
- TestExternalReferenceRoundingFunction<float>(&m, ref, truncf);
+ TestExternalReference(&m, ref, wasm::f32_trunc_wrapper, 1.25f);
}
TEST(RunCallF32Floor) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_f32_floor(m.isolate());
- TestExternalReferenceRoundingFunction<float>(&m, ref, floorf);
+ TestExternalReference(&m, ref, wasm::f32_floor_wrapper, 1.25f);
}
TEST(RunCallF32Ceil) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_f32_ceil(m.isolate());
- TestExternalReferenceRoundingFunction<float>(&m, ref, ceilf);
+ TestExternalReference(&m, ref, wasm::f32_ceil_wrapper, 1.25f);
}
TEST(RunCallF32RoundTiesEven) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_f32_nearest_int(m.isolate());
- TestExternalReferenceRoundingFunction<float>(&m, ref, nearbyintf);
+ TestExternalReference(&m, ref, wasm::f32_nearest_int_wrapper, 1.25f);
}
TEST(RunCallF64Trunc) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_f64_trunc(m.isolate());
- TestExternalReferenceRoundingFunction<double>(&m, ref, trunc);
+ TestExternalReference(&m, ref, wasm::f64_trunc_wrapper, 1.25);
}
TEST(RunCallF64Floor) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_f64_floor(m.isolate());
- TestExternalReferenceRoundingFunction<double>(&m, ref, floor);
+ TestExternalReference(&m, ref, wasm::f64_floor_wrapper, 1.25);
}
TEST(RunCallF64Ceil) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_f64_ceil(m.isolate());
- TestExternalReferenceRoundingFunction<double>(&m, ref, ceil);
+ TestExternalReference(&m, ref, wasm::f64_ceil_wrapper, 1.25);
}
TEST(RunCallF64RoundTiesEven) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_f64_nearest_int(m.isolate());
- TestExternalReferenceRoundingFunction<double>(&m, ref, nearbyint);
+ TestExternalReference(&m, ref, wasm::f64_nearest_int_wrapper, 1.25);
}
TEST(RunCallInt64ToFloat32) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_int64_to_float32(m.isolate());
-
- int64_t input;
- float output;
-
- Node* function = m.ExternalConstant(ref);
- m.CallCFunction2(MachineType::Pointer(), MachineType::Pointer(),
- MachineType::Pointer(), function, m.PointerConstant(&input),
- m.PointerConstant(&output));
- m.Return(m.Int32Constant(4356));
- FOR_INT64_INPUTS(i) {
- input = *i;
- m.Call();
- CHECK_FLOAT_EQ(static_cast<float>(*i), output);
- }
+ TestExternalReference(&m, ref, wasm::int64_to_float32_wrapper, int64_t(-2124),
+ 1.25f);
}
TEST(RunCallUint64ToFloat32) {
- struct {
- uint64_t input;
- uint32_t expected;
- } values[] = {{0x0, 0x0},
- {0x1, 0x3f800000},
- {0xffffffff, 0x4f800000},
- {0x1b09788b, 0x4dd84bc4},
- {0x4c5fce8, 0x4c98bf9d},
- {0xcc0de5bf, 0x4f4c0de6},
- {0x2, 0x40000000},
- {0x3, 0x40400000},
- {0x4, 0x40800000},
- {0x5, 0x40a00000},
- {0x8, 0x41000000},
- {0x9, 0x41100000},
- {0xffffffffffffffff, 0x5f800000},
- {0xfffffffffffffffe, 0x5f800000},
- {0xfffffffffffffffd, 0x5f800000},
- {0x0, 0x0},
- {0x100000000, 0x4f800000},
- {0xffffffff00000000, 0x5f800000},
- {0x1b09788b00000000, 0x5dd84bc4},
- {0x4c5fce800000000, 0x5c98bf9d},
- {0xcc0de5bf00000000, 0x5f4c0de6},
- {0x200000000, 0x50000000},
- {0x300000000, 0x50400000},
- {0x400000000, 0x50800000},
- {0x500000000, 0x50a00000},
- {0x800000000, 0x51000000},
- {0x900000000, 0x51100000},
- {0x273a798e187937a3, 0x5e1ce9e6},
- {0xece3af835495a16b, 0x5f6ce3b0},
- {0xb668ecc11223344, 0x5d3668ed},
- {0x9e, 0x431e0000},
- {0x43, 0x42860000},
- {0xaf73, 0x472f7300},
- {0x116b, 0x458b5800},
- {0x658ecc, 0x4acb1d98},
- {0x2b3b4c, 0x4a2ced30},
- {0x88776655, 0x4f087766},
- {0x70000000, 0x4ee00000},
- {0x7200000, 0x4ce40000},
- {0x7fffffff, 0x4f000000},
- {0x56123761, 0x4eac246f},
- {0x7fffff00, 0x4efffffe},
- {0x761c4761eeeeeeee, 0x5eec388f},
- {0x80000000eeeeeeee, 0x5f000000},
- {0x88888888dddddddd, 0x5f088889},
- {0xa0000000dddddddd, 0x5f200000},
- {0xddddddddaaaaaaaa, 0x5f5dddde},
- {0xe0000000aaaaaaaa, 0x5f600000},
- {0xeeeeeeeeeeeeeeee, 0x5f6eeeef},
- {0xfffffffdeeeeeeee, 0x5f800000},
- {0xf0000000dddddddd, 0x5f700000},
- {0x7fffffdddddddd, 0x5b000000},
- {0x3fffffaaaaaaaa, 0x5a7fffff},
- {0x1fffffaaaaaaaa, 0x59fffffd},
- {0xfffff, 0x497ffff0},
- {0x7ffff, 0x48ffffe0},
- {0x3ffff, 0x487fffc0},
- {0x1ffff, 0x47ffff80},
- {0xffff, 0x477fff00},
- {0x7fff, 0x46fffe00},
- {0x3fff, 0x467ffc00},
- {0x1fff, 0x45fff800},
- {0xfff, 0x457ff000},
- {0x7ff, 0x44ffe000},
- {0x3ff, 0x447fc000},
- {0x1ff, 0x43ff8000},
- {0x3fffffffffff, 0x56800000},
- {0x1fffffffffff, 0x56000000},
- {0xfffffffffff, 0x55800000},
- {0x7ffffffffff, 0x55000000},
- {0x3ffffffffff, 0x54800000},
- {0x1ffffffffff, 0x54000000},
- {0x8000008000000000, 0x5f000000},
- {0x8000008000000001, 0x5f000001},
- {0x8000008000000002, 0x5f000001},
- {0x8000008000000004, 0x5f000001},
- {0x8000008000000008, 0x5f000001},
- {0x8000008000000010, 0x5f000001},
- {0x8000008000000020, 0x5f000001},
- {0x8000009000000000, 0x5f000001},
- {0x800000a000000000, 0x5f000001},
- {0x8000008000100000, 0x5f000001},
- {0x8000000000000400, 0x5f000000},
- {0x8000000000000401, 0x5f000000}};
-
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref =
ExternalReference::wasm_uint64_to_float32(m.isolate());
-
- uint64_t input;
- float output;
-
- Node* function = m.ExternalConstant(ref);
- m.CallCFunction2(MachineType::Pointer(), MachineType::Pointer(),
- MachineType::Pointer(), function, m.PointerConstant(&input),
- m.PointerConstant(&output));
- m.Return(m.Int32Constant(4356));
-
- for (size_t i = 0; i < arraysize(values); i++) {
- input = values[i].input;
- m.Call();
- CHECK_EQ(values[i].expected, bit_cast<uint32_t>(output));
- }
+ TestExternalReference(&m, ref, wasm::uint64_to_float32_wrapper,
+ uint64_t(2124), 1.25f);
}
TEST(RunCallInt64ToFloat64) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_int64_to_float64(m.isolate());
-
- int64_t input;
- double output;
-
- Node* function = m.ExternalConstant(ref);
- m.CallCFunction2(MachineType::Pointer(), MachineType::Pointer(),
- MachineType::Pointer(), function, m.PointerConstant(&input),
- m.PointerConstant(&output));
- m.Return(m.Int32Constant(4356));
- FOR_INT64_INPUTS(i) {
- input = *i;
- m.Call();
- CHECK_DOUBLE_EQ(static_cast<double>(*i), output);
- }
+ TestExternalReference(&m, ref, wasm::int64_to_float64_wrapper, int64_t(2124),
+ 1.25);
}
TEST(RunCallUint64ToFloat64) {
- struct {
- uint64_t input;
- uint64_t expected;
- } values[] = {{0x0, 0x0},
- {0x1, 0x3ff0000000000000},
- {0xffffffff, 0x41efffffffe00000},
- {0x1b09788b, 0x41bb09788b000000},
- {0x4c5fce8, 0x419317f3a0000000},
- {0xcc0de5bf, 0x41e981bcb7e00000},
- {0x2, 0x4000000000000000},
- {0x3, 0x4008000000000000},
- {0x4, 0x4010000000000000},
- {0x5, 0x4014000000000000},
- {0x8, 0x4020000000000000},
- {0x9, 0x4022000000000000},
- {0xffffffffffffffff, 0x43f0000000000000},
- {0xfffffffffffffffe, 0x43f0000000000000},
- {0xfffffffffffffffd, 0x43f0000000000000},
- {0x100000000, 0x41f0000000000000},
- {0xffffffff00000000, 0x43efffffffe00000},
- {0x1b09788b00000000, 0x43bb09788b000000},
- {0x4c5fce800000000, 0x439317f3a0000000},
- {0xcc0de5bf00000000, 0x43e981bcb7e00000},
- {0x200000000, 0x4200000000000000},
- {0x300000000, 0x4208000000000000},
- {0x400000000, 0x4210000000000000},
- {0x500000000, 0x4214000000000000},
- {0x800000000, 0x4220000000000000},
- {0x900000000, 0x4222000000000000},
- {0x273a798e187937a3, 0x43c39d3cc70c3c9c},
- {0xece3af835495a16b, 0x43ed9c75f06a92b4},
- {0xb668ecc11223344, 0x43a6cd1d98224467},
- {0x9e, 0x4063c00000000000},
- {0x43, 0x4050c00000000000},
- {0xaf73, 0x40e5ee6000000000},
- {0x116b, 0x40b16b0000000000},
- {0x658ecc, 0x415963b300000000},
- {0x2b3b4c, 0x41459da600000000},
- {0x88776655, 0x41e10eeccaa00000},
- {0x70000000, 0x41dc000000000000},
- {0x7200000, 0x419c800000000000},
- {0x7fffffff, 0x41dfffffffc00000},
- {0x56123761, 0x41d5848dd8400000},
- {0x7fffff00, 0x41dfffffc0000000},
- {0x761c4761eeeeeeee, 0x43dd8711d87bbbbc},
- {0x80000000eeeeeeee, 0x43e00000001dddde},
- {0x88888888dddddddd, 0x43e11111111bbbbc},
- {0xa0000000dddddddd, 0x43e40000001bbbbc},
- {0xddddddddaaaaaaaa, 0x43ebbbbbbbb55555},
- {0xe0000000aaaaaaaa, 0x43ec000000155555},
- {0xeeeeeeeeeeeeeeee, 0x43edddddddddddde},
- {0xfffffffdeeeeeeee, 0x43efffffffbdddde},
- {0xf0000000dddddddd, 0x43ee0000001bbbbc},
- {0x7fffffdddddddd, 0x435ffffff7777777},
- {0x3fffffaaaaaaaa, 0x434fffffd5555555},
- {0x1fffffaaaaaaaa, 0x433fffffaaaaaaaa},
- {0xfffff, 0x412ffffe00000000},
- {0x7ffff, 0x411ffffc00000000},
- {0x3ffff, 0x410ffff800000000},
- {0x1ffff, 0x40fffff000000000},
- {0xffff, 0x40efffe000000000},
- {0x7fff, 0x40dfffc000000000},
- {0x3fff, 0x40cfff8000000000},
- {0x1fff, 0x40bfff0000000000},
- {0xfff, 0x40affe0000000000},
- {0x7ff, 0x409ffc0000000000},
- {0x3ff, 0x408ff80000000000},
- {0x1ff, 0x407ff00000000000},
- {0x3fffffffffff, 0x42cfffffffffff80},
- {0x1fffffffffff, 0x42bfffffffffff00},
- {0xfffffffffff, 0x42affffffffffe00},
- {0x7ffffffffff, 0x429ffffffffffc00},
- {0x3ffffffffff, 0x428ffffffffff800},
- {0x1ffffffffff, 0x427ffffffffff000},
- {0x8000008000000000, 0x43e0000010000000},
- {0x8000008000000001, 0x43e0000010000000},
- {0x8000000000000400, 0x43e0000000000000},
- {0x8000000000000401, 0x43e0000000000001},
- {0x8000000000000402, 0x43e0000000000001},
- {0x8000000000000404, 0x43e0000000000001},
- {0x8000000000000408, 0x43e0000000000001},
- {0x8000000000000410, 0x43e0000000000001},
- {0x8000000000000420, 0x43e0000000000001},
- {0x8000000000000440, 0x43e0000000000001},
- {0x8000000000000480, 0x43e0000000000001},
- {0x8000000000000500, 0x43e0000000000001},
- {0x8000000000000600, 0x43e0000000000001}};
-
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref =
ExternalReference::wasm_uint64_to_float64(m.isolate());
-
- uint64_t input;
- double output;
-
- Node* function = m.ExternalConstant(ref);
- m.CallCFunction2(MachineType::Pointer(), MachineType::Pointer(),
- MachineType::Pointer(), function, m.PointerConstant(&input),
- m.PointerConstant(&output));
- m.Return(m.Int32Constant(4356));
-
- for (size_t i = 0; i < arraysize(values); i++) {
- input = values[i].input;
- m.Call();
- CHECK_EQ(values[i].expected, bit_cast<uint64_t>(output));
- }
+ TestExternalReference(&m, ref, wasm::uint64_to_float64_wrapper,
+ uint64_t(2124), 1.25);
}
TEST(RunCallFloat32ToInt64) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_float32_to_int64(m.isolate());
-
- float input;
- int64_t output;
-
- Node* function = m.ExternalConstant(ref);
- m.Return(m.CallCFunction2(
- MachineType::Int32(), MachineType::Pointer(), MachineType::Pointer(),
- function, m.PointerConstant(&input), m.PointerConstant(&output)));
- FOR_FLOAT32_INPUTS(i) {
- input = *i;
- if (*i >= static_cast<float>(std::numeric_limits<int64_t>::min()) &&
- *i < static_cast<float>(std::numeric_limits<int64_t>::max())) {
- CHECK_EQ(1, m.Call());
- CHECK_EQ(static_cast<int64_t>(*i), output);
- } else {
- CHECK_EQ(0, m.Call());
- }
- }
+ TestExternalReference(&m, ref, wasm::float32_to_int64_wrapper, 1.25f,
+ int64_t(2124));
}
TEST(RunCallFloat32ToUint64) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref =
ExternalReference::wasm_float32_to_uint64(m.isolate());
-
- float input;
- uint64_t output;
-
- Node* function = m.ExternalConstant(ref);
- m.Return(m.CallCFunction2(
- MachineType::Int32(), MachineType::Pointer(), MachineType::Pointer(),
- function, m.PointerConstant(&input), m.PointerConstant(&output)));
- FOR_FLOAT32_INPUTS(i) {
- input = *i;
- if (*i > -1.0 &&
- *i < static_cast<float>(std::numeric_limits<uint64_t>::max())) {
- CHECK_EQ(1, m.Call());
- CHECK_EQ(static_cast<uint64_t>(*i), output);
- } else {
- CHECK_EQ(0, m.Call());
- }
- }
+ TestExternalReference(&m, ref, wasm::float32_to_uint64_wrapper, 1.25f,
+ uint64_t(2124));
}
TEST(RunCallFloat64ToInt64) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_float64_to_int64(m.isolate());
-
- double input;
- int64_t output;
-
- Node* function = m.ExternalConstant(ref);
- m.Return(m.CallCFunction2(
- MachineType::Int32(), MachineType::Pointer(), MachineType::Pointer(),
- function, m.PointerConstant(&input), m.PointerConstant(&output)));
- FOR_FLOAT64_INPUTS(i) {
- input = *i;
- if (*i >= static_cast<double>(std::numeric_limits<int64_t>::min()) &&
- *i < static_cast<double>(std::numeric_limits<int64_t>::max())) {
- CHECK_EQ(1, m.Call());
- CHECK_EQ(static_cast<int64_t>(*i), output);
- } else {
- CHECK_EQ(0, m.Call());
- }
- }
+ TestExternalReference(&m, ref, wasm::float64_to_int64_wrapper, 1.25,
+ int64_t(2124));
}
TEST(RunCallFloat64ToUint64) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref =
ExternalReference::wasm_float64_to_uint64(m.isolate());
-
- double input;
- uint64_t output;
-
- Node* function = m.ExternalConstant(ref);
- m.Return(m.CallCFunction2(
- MachineType::Int32(), MachineType::Pointer(), MachineType::Pointer(),
- function, m.PointerConstant(&input), m.PointerConstant(&output)));
- FOR_FLOAT64_INPUTS(i) {
- input = *i;
- if (*i > -1.0 &&
- *i < static_cast<double>(std::numeric_limits<uint64_t>::max())) {
- CHECK_EQ(1, m.Call());
- CHECK_EQ(static_cast<uint64_t>(*i), output);
- } else {
- CHECK_EQ(0, m.Call());
- }
- }
+ TestExternalReference(&m, ref, wasm::float64_to_uint64_wrapper, 1.25,
+ uint64_t(2124));
}
TEST(RunCallInt64Div) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_int64_div(m.isolate());
-
- int64_t dst;
- int64_t src;
-
- Node* function = m.ExternalConstant(ref);
- m.Return(m.CallCFunction2(MachineType::Int32(), MachineType::Pointer(),
- MachineType::Pointer(), function,
- m.PointerConstant(&dst), m.PointerConstant(&src)));
- FOR_INT64_INPUTS(i) {
- FOR_INT64_INPUTS(j) {
- dst = *i;
- src = *j;
- if (src == 0) {
- CHECK_EQ(0, m.Call());
- } else if (src == -1 && dst == std::numeric_limits<int64_t>::min()) {
- CHECK_EQ(-1, m.Call());
- } else {
- CHECK_EQ(1, m.Call());
- CHECK_EQ(*i / *j, dst);
- }
- }
- }
+ TestExternalReference(&m, ref, wasm::int64_div_wrapper, int64_t(1774),
+ int64_t(21));
}
TEST(RunCallInt64Mod) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_int64_mod(m.isolate());
-
- int64_t dst;
- int64_t src;
-
- Node* function = m.ExternalConstant(ref);
- m.Return(m.CallCFunction2(MachineType::Int32(), MachineType::Pointer(),
- MachineType::Pointer(), function,
- m.PointerConstant(&dst), m.PointerConstant(&src)));
- FOR_INT64_INPUTS(i) {
- FOR_INT64_INPUTS(j) {
- dst = *i;
- src = *j;
- if (src == 0) {
- CHECK_EQ(0, m.Call());
- } else {
- CHECK_EQ(1, m.Call());
- CHECK_EQ(*i % *j, dst);
- }
- }
- }
+ TestExternalReference(&m, ref, wasm::int64_mod_wrapper, int64_t(1774),
+ int64_t(21));
}
TEST(RunCallUint64Div) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_uint64_div(m.isolate());
-
- uint64_t dst;
- uint64_t src;
-
- Node* function = m.ExternalConstant(ref);
- m.Return(m.CallCFunction2(MachineType::Int32(), MachineType::Pointer(),
- MachineType::Pointer(), function,
- m.PointerConstant(&dst), m.PointerConstant(&src)));
- FOR_UINT64_INPUTS(i) {
- FOR_UINT64_INPUTS(j) {
- dst = *i;
- src = *j;
- if (src == 0) {
- CHECK_EQ(0, m.Call());
- } else {
- CHECK_EQ(1, m.Call());
- CHECK_EQ(*i / *j, dst);
- }
- }
- }
+ TestExternalReference(&m, ref, wasm::uint64_div_wrapper, uint64_t(1774),
+ uint64_t(21));
}
TEST(RunCallUint64Mod) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_uint64_mod(m.isolate());
+ TestExternalReference(&m, ref, wasm::uint64_mod_wrapper, uint64_t(1774),
+ uint64_t(21));
+}
+
+TEST(RunCallWord32Ctz) {
+ BufferedRawMachineAssemblerTester<uint32_t> m;
+ ExternalReference ref = ExternalReference::wasm_word32_ctz(m.isolate());
+ TestExternalReference(&m, ref, wasm::word32_ctz_wrapper, uint32_t(1774));
+}
+
+TEST(RunCallWord64Ctz) {
+ BufferedRawMachineAssemblerTester<uint32_t> m;
+ ExternalReference ref = ExternalReference::wasm_word64_ctz(m.isolate());
+ TestExternalReference(&m, ref, wasm::word64_ctz_wrapper, uint64_t(1774));
+}
+
+TEST(RunCallWord32Popcnt) {
+ BufferedRawMachineAssemblerTester<uint32_t> m;
+ ExternalReference ref = ExternalReference::wasm_word32_popcnt(m.isolate());
+ TestExternalReference(&m, ref, wasm::word32_popcnt_wrapper, uint32_t(1774));
+}
- uint64_t dst;
- uint64_t src;
-
- Node* function = m.ExternalConstant(ref);
- m.Return(m.CallCFunction2(MachineType::Int32(), MachineType::Pointer(),
- MachineType::Pointer(), function,
- m.PointerConstant(&dst), m.PointerConstant(&src)));
- FOR_UINT64_INPUTS(i) {
- FOR_UINT64_INPUTS(j) {
- dst = *i;
- src = *j;
- if (src == 0) {
- CHECK_EQ(0, m.Call());
- } else {
- CHECK_EQ(1, m.Call());
- CHECK_EQ(*i % *j, dst);
- }
- }
- }
+TEST(RunCallWord64Popcnt) {
+ BufferedRawMachineAssemblerTester<uint32_t> m;
+ ExternalReference ref = ExternalReference::wasm_word64_popcnt(m.isolate());
+ TestExternalReference(&m, ref, wasm::word64_popcnt_wrapper, uint64_t(1774));
+}
+
+TEST(RunCallFloat64Pow) {
+ BufferedRawMachineAssemblerTester<int32_t> m;
+ ExternalReference ref = ExternalReference::wasm_float64_pow(m.isolate());
+ TestExternalReference(&m, ref, wasm::float64_pow_wrapper, 1.5, 1.5);
}
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/test/cctest/compiler/test-run-inlining.cc b/deps/v8/test/cctest/compiler/test-run-inlining.cc
index 234060c7f7..b715214c0d 100644
--- a/deps/v8/test/cctest/compiler/test-run-inlining.cc
+++ b/deps/v8/test/cctest/compiler/test-run-inlining.cc
@@ -14,7 +14,7 @@ namespace {
// Helper to determine inline count via JavaScriptFrame::GetFunctions.
// Note that a count of 1 indicates that no inlining has occured.
void AssertInlineCount(const v8::FunctionCallbackInfo<v8::Value>& args) {
- StackTraceFrameIterator it(CcTest::i_isolate());
+ JavaScriptFrameIterator it(CcTest::i_isolate());
int frames_seen = 0;
JavaScriptFrame* topmost = it.frame();
while (!it.done()) {
@@ -47,25 +47,20 @@ void InstallAssertInlineCountHelper(v8::Isolate* isolate) {
.FromJust());
}
-
const uint32_t kRestrictedInliningFlags =
- CompilationInfo::kFunctionContextSpecializing |
- CompilationInfo::kTypingEnabled;
+ CompilationInfo::kNativeContextSpecializing;
const uint32_t kInlineFlags = CompilationInfo::kInliningEnabled |
- CompilationInfo::kFunctionContextSpecializing |
- CompilationInfo::kTypingEnabled;
+ CompilationInfo::kNativeContextSpecializing;
} // namespace
TEST(SimpleInlining) {
FunctionTester T(
- "(function(){"
- " function foo(s) { AssertInlineCount(2); return s; };"
- " function bar(s, t) { return foo(s); };"
- " return bar;"
- "})();",
+ "function foo(s) { AssertInlineCount(2); return s; };"
+ "function bar(s, t) { return foo(s); };"
+ "bar;",
kInlineFlags);
InstallAssertInlineCountHelper(CcTest::isolate());
@@ -75,11 +70,9 @@ TEST(SimpleInlining) {
TEST(SimpleInliningDeopt) {
FunctionTester T(
- "(function(){"
- " function foo(s) { %DeoptimizeFunction(bar); return s; };"
- " function bar(s, t) { return foo(s); };"
- " return bar;"
- "})();",
+ "function foo(s) { %DeoptimizeFunction(bar); return s; };"
+ "function bar(s, t) { return foo(s); };"
+ "bar;",
kInlineFlags);
InstallAssertInlineCountHelper(CcTest::isolate());
@@ -89,11 +82,9 @@ TEST(SimpleInliningDeopt) {
TEST(SimpleInliningDeoptSelf) {
FunctionTester T(
- "(function(){"
- " function foo(s) { %_DeoptimizeNow(); return s; };"
- " function bar(s, t) { return foo(s); };"
- " return bar;"
- "})();",
+ "function foo(s) { %_DeoptimizeNow(); return s; };"
+ "function bar(s, t) { return foo(s); };"
+ "bar;",
kInlineFlags);
InstallAssertInlineCountHelper(CcTest::isolate());
@@ -103,11 +94,9 @@ TEST(SimpleInliningDeoptSelf) {
TEST(SimpleInliningContext) {
FunctionTester T(
- "(function () {"
- " function foo(s) { AssertInlineCount(2); var x = 12; return s + x; };"
- " function bar(s, t) { return foo(s); };"
- " return bar;"
- "})();",
+ "function foo(s) { AssertInlineCount(2); var x = 12; return s + x; };"
+ "function bar(s, t) { return foo(s); };"
+ "bar;",
kInlineFlags);
InstallAssertInlineCountHelper(CcTest::isolate());
@@ -117,14 +106,12 @@ TEST(SimpleInliningContext) {
TEST(SimpleInliningContextDeopt) {
FunctionTester T(
- "(function () {"
- " function foo(s) {"
- " AssertInlineCount(2); %DeoptimizeFunction(bar); var x = 12;"
- " return s + x;"
- " };"
- " function bar(s, t) { return foo(s); };"
- " return bar;"
- "})();",
+ "function foo(s) {"
+ " AssertInlineCount(2); %DeoptimizeFunction(bar); var x = 12;"
+ " return s + x;"
+ "};"
+ "function bar(s, t) { return foo(s); };"
+ "bar;",
kInlineFlags);
InstallAssertInlineCountHelper(CcTest::isolate());
@@ -152,10 +139,8 @@ TEST(CaptureContext) {
TEST(DontInlineEval) {
FunctionTester T(
"var x = 42;"
- "(function () {"
- " function bar(s, t) { return eval(\"AssertInlineCount(1); x\") };"
- " return bar;"
- "})();",
+ "function bar(s, t) { return eval(\"AssertInlineCount(1); x\") };"
+ "bar;",
kInlineFlags);
InstallAssertInlineCountHelper(CcTest::isolate());
@@ -165,12 +150,10 @@ TEST(DontInlineEval) {
TEST(InlineOmitArguments) {
FunctionTester T(
- "(function () {"
- " var x = 42;"
- " function bar(s, t, u, v) { AssertInlineCount(2); return x + s; };"
- " function foo(s, t) { return bar(s); };"
- " return foo;"
- "})();",
+ "var x = 42;"
+ "function bar(s, t, u, v) { AssertInlineCount(2); return x + s; };"
+ "function foo(s, t) { return bar(s); };"
+ "foo;",
kInlineFlags);
InstallAssertInlineCountHelper(CcTest::isolate());
@@ -180,13 +163,11 @@ TEST(InlineOmitArguments) {
TEST(InlineOmitArgumentsObject) {
FunctionTester T(
- "(function () {"
- " function bar(s, t, u, v) { AssertInlineCount(2); return arguments; };"
- " function foo(s, t) { var args = bar(s);"
- " return args.length == 1 &&"
- " args[0] == 11; };"
- " return foo;"
- "})();",
+ "function bar(s, t, u, v) { AssertInlineCount(2); return arguments; };"
+ "function foo(s, t) { var args = bar(s);"
+ " return args.length == 1 &&"
+ " args[0] == 11; };"
+ "foo;",
kInlineFlags);
InstallAssertInlineCountHelper(CcTest::isolate());
@@ -196,14 +177,12 @@ TEST(InlineOmitArgumentsObject) {
TEST(InlineOmitArgumentsDeopt) {
FunctionTester T(
- "(function () {"
- " function foo(s,t,u,v) { AssertInlineCount(2);"
- " %DeoptimizeFunction(bar); return baz(); };"
- " function bar() { return foo(11); };"
- " function baz() { return foo.arguments.length == 1 &&"
- " foo.arguments[0] == 11; }"
- " return bar;"
- "})();",
+ "function foo(s,t,u,v) { AssertInlineCount(2);"
+ " %DeoptimizeFunction(bar); return baz(); };"
+ "function bar() { return foo(11); };"
+ "function baz() { return foo.arguments.length == 1 &&"
+ " foo.arguments[0] == 11; }"
+ "bar;",
kInlineFlags);
InstallAssertInlineCountHelper(CcTest::isolate());
@@ -213,12 +192,10 @@ TEST(InlineOmitArgumentsDeopt) {
TEST(InlineSurplusArguments) {
FunctionTester T(
- "(function () {"
- " var x = 42;"
- " function foo(s) { AssertInlineCount(2); return x + s; };"
- " function bar(s, t) { return foo(s, t, 13); };"
- " return bar;"
- "})();",
+ "var x = 42;"
+ "function foo(s) { AssertInlineCount(2); return x + s; };"
+ "function bar(s, t) { return foo(s, t, 13); };"
+ "bar;",
kInlineFlags);
InstallAssertInlineCountHelper(CcTest::isolate());
@@ -228,15 +205,13 @@ TEST(InlineSurplusArguments) {
TEST(InlineSurplusArgumentsObject) {
FunctionTester T(
- "(function () {"
- " function foo(s) { AssertInlineCount(2); return arguments; };"
- " function bar(s, t) { var args = foo(s, t, 13);"
- " return args.length == 3 &&"
- " args[0] == 11 &&"
- " args[1] == 12 &&"
- " args[2] == 13; };"
- " return bar;"
- "})();",
+ "function foo(s) { AssertInlineCount(2); return arguments; };"
+ "function bar(s, t) { var args = foo(s, t, 13);"
+ " return args.length == 3 &&"
+ " args[0] == 11 &&"
+ " args[1] == 12 &&"
+ " args[2] == 13; };"
+ "bar;",
kInlineFlags);
InstallAssertInlineCountHelper(CcTest::isolate());
@@ -246,16 +221,14 @@ TEST(InlineSurplusArgumentsObject) {
TEST(InlineSurplusArgumentsDeopt) {
FunctionTester T(
- "(function () {"
- " function foo(s) { AssertInlineCount(2); %DeoptimizeFunction(bar);"
- " return baz(); };"
- " function bar() { return foo(13, 14, 15); };"
- " function baz() { return foo.arguments.length == 3 &&"
- " foo.arguments[0] == 13 &&"
- " foo.arguments[1] == 14 &&"
- " foo.arguments[2] == 15; }"
- " return bar;"
- "})();",
+ "function foo(s) { AssertInlineCount(2); %DeoptimizeFunction(bar);"
+ " return baz(); };"
+ "function bar() { return foo(13, 14, 15); };"
+ "function baz() { return foo.arguments.length == 3 &&"
+ " foo.arguments[0] == 13 &&"
+ " foo.arguments[1] == 14 &&"
+ " foo.arguments[2] == 15; }"
+ "bar;",
kInlineFlags);
InstallAssertInlineCountHelper(CcTest::isolate());
@@ -265,12 +238,10 @@ TEST(InlineSurplusArgumentsDeopt) {
TEST(InlineTwice) {
FunctionTester T(
- "(function () {"
- " var x = 42;"
- " function bar(s) { AssertInlineCount(2); return x + s; };"
- " function foo(s, t) { return bar(s) + bar(t); };"
- " return foo;"
- "})();",
+ "var x = 42;"
+ "function bar(s) { AssertInlineCount(2); return x + s; };"
+ "function foo(s, t) { return bar(s) + bar(t); };"
+ "foo;",
kInlineFlags);
InstallAssertInlineCountHelper(CcTest::isolate());
@@ -280,12 +251,10 @@ TEST(InlineTwice) {
TEST(InlineTwiceDependent) {
FunctionTester T(
- "(function () {"
- " var x = 42;"
- " function foo(s) { AssertInlineCount(2); return x + s; };"
- " function bar(s,t) { return foo(foo(s)); };"
- " return bar;"
- "})();",
+ "var x = 42;"
+ "function foo(s) { AssertInlineCount(2); return x + s; };"
+ "function bar(s,t) { return foo(foo(s)); };"
+ "bar;",
kInlineFlags);
InstallAssertInlineCountHelper(CcTest::isolate());
@@ -295,13 +264,11 @@ TEST(InlineTwiceDependent) {
TEST(InlineTwiceDependentDiamond) {
FunctionTester T(
- "(function () {"
- " var x = 41;"
- " function foo(s) { AssertInlineCount(2); if (s % 2 == 0) {"
- " return x - s } else { return x + s; } };"
- " function bar(s,t) { return foo(foo(s)); };"
- " return bar;"
- "})();",
+ "var x = 41;"
+ "function foo(s) { AssertInlineCount(2); if (s % 2 == 0) {"
+ " return x - s } else { return x + s; } };"
+ "function bar(s,t) { return foo(foo(s)); };"
+ "bar;",
kInlineFlags);
InstallAssertInlineCountHelper(CcTest::isolate());
@@ -311,13 +278,11 @@ TEST(InlineTwiceDependentDiamond) {
TEST(InlineTwiceDependentDiamondDifferent) {
FunctionTester T(
- "(function () {"
- " var x = 41;"
- " function foo(s,t) { AssertInlineCount(2); if (s % 2 == 0) {"
- " return x - s * t } else { return x + s * t; } };"
- " function bar(s,t) { return foo(foo(s, 3), 5); };"
- " return bar;"
- "})();",
+ "var x = 41;"
+ "function foo(s,t) { AssertInlineCount(2); if (s % 2 == 0) {"
+ " return x - s * t } else { return x + s * t; } };"
+ "function bar(s,t) { return foo(foo(s, 3), 5); };"
+ "bar;",
kInlineFlags);
InstallAssertInlineCountHelper(CcTest::isolate());
@@ -327,11 +292,9 @@ TEST(InlineTwiceDependentDiamondDifferent) {
TEST(InlineLoopGuardedEmpty) {
FunctionTester T(
- "(function () {"
- " function foo(s) { AssertInlineCount(2); if (s) while (s); return s; };"
- " function bar(s,t) { return foo(s); };"
- " return bar;"
- "})();",
+ "function foo(s) { AssertInlineCount(2); if (s) while (s); return s; };"
+ "function bar(s,t) { return foo(s); };"
+ "bar;",
kInlineFlags);
InstallAssertInlineCountHelper(CcTest::isolate());
@@ -341,12 +304,10 @@ TEST(InlineLoopGuardedEmpty) {
TEST(InlineLoopGuardedOnce) {
FunctionTester T(
- "(function () {"
- " function foo(s,t) { AssertInlineCount(2); if (t > 0) while (s > 0) {"
- " s = s - 1; }; return s; };"
- " function bar(s,t) { return foo(s,t); };"
- " return bar;"
- "})();",
+ "function foo(s,t) { AssertInlineCount(2); if (t > 0) while (s > 0) {"
+ " s = s - 1; }; return s; };"
+ "function bar(s,t) { return foo(s,t); };"
+ "bar;",
kInlineFlags);
InstallAssertInlineCountHelper(CcTest::isolate());
@@ -356,12 +317,10 @@ TEST(InlineLoopGuardedOnce) {
TEST(InlineLoopGuardedTwice) {
FunctionTester T(
- "(function () {"
- " function foo(s,t) { AssertInlineCount(2); if (t > 0) while (s > 0) {"
- " s = s - 1; }; return s; };"
- " function bar(s,t) { return foo(foo(s,t),t); };"
- " return bar;"
- "})();",
+ "function foo(s,t) { AssertInlineCount(2); if (t > 0) while (s > 0) {"
+ " s = s - 1; }; return s; };"
+ "function bar(s,t) { return foo(foo(s,t),t); };"
+ "bar;",
kInlineFlags);
InstallAssertInlineCountHelper(CcTest::isolate());
@@ -371,11 +330,9 @@ TEST(InlineLoopGuardedTwice) {
TEST(InlineLoopUnguardedEmpty) {
FunctionTester T(
- "(function () {"
- " function foo(s) { AssertInlineCount(2); while (s); return s; };"
- " function bar(s, t) { return foo(s); };"
- " return bar;"
- "})();",
+ "function foo(s) { AssertInlineCount(2); while (s); return s; };"
+ "function bar(s, t) { return foo(s); };"
+ "bar;",
kInlineFlags);
InstallAssertInlineCountHelper(CcTest::isolate());
@@ -385,12 +342,10 @@ TEST(InlineLoopUnguardedEmpty) {
TEST(InlineLoopUnguardedOnce) {
FunctionTester T(
- "(function () {"
- " function foo(s) { AssertInlineCount(2); while (s) {"
- " s = s - 1; }; return s; };"
- " function bar(s, t) { return foo(s); };"
- " return bar;"
- "})();",
+ "function foo(s) { AssertInlineCount(2); while (s) {"
+ " s = s - 1; }; return s; };"
+ "function bar(s, t) { return foo(s); };"
+ "bar;",
kInlineFlags);
InstallAssertInlineCountHelper(CcTest::isolate());
@@ -400,12 +355,10 @@ TEST(InlineLoopUnguardedOnce) {
TEST(InlineLoopUnguardedTwice) {
FunctionTester T(
- "(function () {"
- " function foo(s) { AssertInlineCount(2); while (s > 0) {"
- " s = s - 1; }; return s; };"
- " function bar(s,t) { return foo(foo(s,t),t); };"
- " return bar;"
- "})();",
+ "function foo(s) { AssertInlineCount(2); while (s > 0) {"
+ " s = s - 1; }; return s; };"
+ "function bar(s,t) { return foo(foo(s,t),t); };"
+ "bar;",
kInlineFlags);
InstallAssertInlineCountHelper(CcTest::isolate());
@@ -415,13 +368,11 @@ TEST(InlineLoopUnguardedTwice) {
TEST(InlineStrictIntoNonStrict) {
FunctionTester T(
- "(function () {"
- " var x = Object.create({}, { y: { value:42, writable:false } });"
- " function foo(s) { 'use strict';"
- " x.y = 9; };"
- " function bar(s,t) { return foo(s); };"
- " return bar;"
- "})();",
+ "var x = Object.create({}, { y: { value:42, writable:false } });"
+ "function foo(s) { 'use strict';"
+ " x.y = 9; };"
+ "function bar(s,t) { return foo(s); };"
+ "bar;",
kInlineFlags);
InstallAssertInlineCountHelper(CcTest::isolate());
@@ -431,12 +382,10 @@ TEST(InlineStrictIntoNonStrict) {
TEST(InlineNonStrictIntoStrict) {
FunctionTester T(
- "(function () {"
- " var x = Object.create({}, { y: { value:42, writable:false } });"
- " function foo(s) { x.y = 9; return x.y; };"
- " function bar(s,t) { \'use strict\'; return foo(s); };"
- " return bar;"
- "})();",
+ "var x = Object.create({}, { y: { value:42, writable:false } });"
+ "function foo(s) { x.y = 9; return x.y; };"
+ "function bar(s,t) { \'use strict\'; return foo(s); };"
+ "bar;",
kInlineFlags);
InstallAssertInlineCountHelper(CcTest::isolate());
@@ -444,66 +393,16 @@ TEST(InlineNonStrictIntoStrict) {
}
-TEST(InlineIntrinsicIsSmi) {
- FunctionTester T(
- "(function () {"
- " var x = 42;"
- " function bar(s,t) { return %_IsSmi(x); };"
- " return bar;"
- "})();",
- kInlineFlags);
-
- InstallAssertInlineCountHelper(CcTest::isolate());
- T.CheckCall(T.true_value(), T.Val(12), T.Val(4));
-}
-
-
-TEST(InlineIntrinsicIsArray) {
- FunctionTester T(
- "(function () {"
- " var x = [1,2,3];"
- " function bar(s,t) { return %_IsArray(x); };"
- " return bar;"
- "})();",
- kInlineFlags);
-
- InstallAssertInlineCountHelper(CcTest::isolate());
- T.CheckCall(T.true_value(), T.Val(12), T.Val(4));
-
- FunctionTester T2(
- "(function () {"
- " var x = 32;"
- " function bar(s,t) { return %_IsArray(x); };"
- " return bar;"
- "})();",
- kInlineFlags);
-
- T2.CheckCall(T.false_value(), T.Val(12), T.Val(4));
-
- FunctionTester T3(
- "(function () {"
- " var x = bar;"
- " function bar(s,t) { return %_IsArray(x); };"
- " return bar;"
- "})();",
- kInlineFlags);
-
- T3.CheckCall(T.false_value(), T.Val(12), T.Val(4));
-}
-
-
TEST(InlineWithArguments) {
FunctionTester T(
- "(function () {"
- " function foo(s,t,u) { AssertInlineCount(2);"
- " return foo.arguments.length == 3 &&"
- " foo.arguments[0] == 13 &&"
- " foo.arguments[1] == 14 &&"
- " foo.arguments[2] == 15;"
- " }"
- " function bar() { return foo(13, 14, 15); };"
- " return bar;"
- "})();",
+ "function foo(s,t,u) { AssertInlineCount(2);"
+ " return foo.arguments.length == 3 &&"
+ " foo.arguments[0] == 13 &&"
+ " foo.arguments[1] == 14 &&"
+ " foo.arguments[2] == 15;"
+ "}"
+ "function bar() { return foo(13, 14, 15); };"
+ "bar;",
kInlineFlags);
InstallAssertInlineCountHelper(CcTest::isolate());
@@ -513,12 +412,10 @@ TEST(InlineWithArguments) {
TEST(InlineBuiltin) {
FunctionTester T(
- "(function () {"
- " function foo(s,t,u) { AssertInlineCount(2); return true; }"
- " function bar() { return foo(); };"
- " %SetForceInlineFlag(foo);"
- " return bar;"
- "})();",
+ "function foo(s,t,u) { AssertInlineCount(2); return true; }"
+ "function bar() { return foo(); };"
+ "%SetForceInlineFlag(foo);"
+ "bar;",
kRestrictedInliningFlags);
InstallAssertInlineCountHelper(CcTest::isolate());
@@ -528,14 +425,12 @@ TEST(InlineBuiltin) {
TEST(InlineNestedBuiltin) {
FunctionTester T(
- "(function () {"
- " function foo(s,t,u) { AssertInlineCount(3); return true; }"
- " function baz(s,t,u) { return foo(s,t,u); }"
- " function bar() { return baz(); };"
- " %SetForceInlineFlag(foo);"
- " %SetForceInlineFlag(baz);"
- " return bar;"
- "})();",
+ "function foo(s,t,u) { AssertInlineCount(3); return true; }"
+ "function baz(s,t,u) { return foo(s,t,u); }"
+ "function bar() { return baz(); };"
+ "%SetForceInlineFlag(foo);"
+ "%SetForceInlineFlag(baz);"
+ "bar;",
kRestrictedInliningFlags);
InstallAssertInlineCountHelper(CcTest::isolate());
@@ -545,14 +440,12 @@ TEST(InlineNestedBuiltin) {
TEST(InlineSelfRecursive) {
FunctionTester T(
- "(function () {"
- " function foo(x) { "
- " AssertInlineCount(1);"
- " if (x == 1) return foo(12);"
- " return x;"
- " }"
- " return foo;"
- "})();",
+ "function foo(x) { "
+ " AssertInlineCount(1);"
+ " if (x == 1) return foo(12);"
+ " return x;"
+ "}"
+ "foo;",
kInlineFlags);
InstallAssertInlineCountHelper(CcTest::isolate());
@@ -562,14 +455,12 @@ TEST(InlineSelfRecursive) {
TEST(InlineMutuallyRecursive) {
FunctionTester T(
- "(function () {"
- " function bar(x) { AssertInlineCount(2); return foo(x); }"
- " function foo(x) { "
- " if (x == 1) return bar(42);"
- " return x;"
- " }"
- " return foo;"
- "})();",
+ "function bar(x) { AssertInlineCount(2); return foo(x); }"
+ "function foo(x) { "
+ " if (x == 1) return bar(42);"
+ " return x;"
+ "}"
+ "foo;",
kInlineFlags);
InstallAssertInlineCountHelper(CcTest::isolate());
diff --git a/deps/v8/test/cctest/compiler/test-run-intrinsics.cc b/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
index 6aa5f391ad..681891c91f 100644
--- a/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
+++ b/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
@@ -111,37 +111,6 @@ TEST(IsSmi) {
}
-TEST(OneByteSeqStringGetChar) {
- FunctionTester T("(function(a,b) { return %_OneByteSeqStringGetChar(a,b); })",
- flags);
-
- Handle<SeqOneByteString> string =
- T.main_isolate()->factory()->NewRawOneByteString(3).ToHandleChecked();
- string->SeqOneByteStringSet(0, 'b');
- string->SeqOneByteStringSet(1, 'a');
- string->SeqOneByteStringSet(2, 'r');
- T.CheckCall(T.Val('b'), string, T.Val(0.0));
- T.CheckCall(T.Val('a'), string, T.Val(1));
- T.CheckCall(T.Val('r'), string, T.Val(2));
-}
-
-
-TEST(OneByteSeqStringSetChar) {
- FunctionTester T("(function(a,b) { %_OneByteSeqStringSetChar(a,88,b); })",
- flags);
-
- Handle<SeqOneByteString> string =
- T.main_isolate()->factory()->NewRawOneByteString(3).ToHandleChecked();
- string->SeqOneByteStringSet(0, 'b');
- string->SeqOneByteStringSet(1, 'a');
- string->SeqOneByteStringSet(2, 'r');
- T.Call(T.Val(1), string);
- CHECK_EQ('b', string->SeqOneByteStringGet(0));
- CHECK_EQ('X', string->SeqOneByteStringGet(1));
- CHECK_EQ('r', string->SeqOneByteStringGet(2));
-}
-
-
TEST(StringAdd) {
FunctionTester T("(function(a,b) { return %_StringAdd(a,b); })", flags);
@@ -151,15 +120,6 @@ TEST(StringAdd) {
}
-TEST(StringCharAt) {
- FunctionTester T("(function(a,b) { return %_StringCharAt(a,b); })", flags);
-
- T.CheckCall(T.Val("e"), T.Val("huge fan!"), T.Val(3));
- T.CheckCall(T.Val("f"), T.Val("\xE2\x9D\x8A fan!"), T.Val(2));
- T.CheckCall(T.Val(""), T.Val("not a fan!"), T.Val(23));
-}
-
-
TEST(StringCharCodeAt) {
FunctionTester T("(function(a,b) { return %_StringCharCodeAt(a,b); })",
flags);
@@ -196,47 +156,6 @@ TEST(SubString) {
T.CheckCall(T.Val("aaa"), T.Val("aaa"), T.Val(0.0));
}
-
-TEST(TwoByteSeqStringGetChar) {
- FunctionTester T("(function(a,b) { return %_TwoByteSeqStringGetChar(a,b); })",
- flags);
-
- Handle<SeqTwoByteString> string =
- T.main_isolate()->factory()->NewRawTwoByteString(3).ToHandleChecked();
- string->SeqTwoByteStringSet(0, 'b');
- string->SeqTwoByteStringSet(1, 'a');
- string->SeqTwoByteStringSet(2, 'r');
- T.CheckCall(T.Val('b'), string, T.Val(0.0));
- T.CheckCall(T.Val('a'), string, T.Val(1));
- T.CheckCall(T.Val('r'), string, T.Val(2));
-}
-
-
-TEST(TwoByteSeqStringSetChar) {
- FunctionTester T("(function(a,b) { %_TwoByteSeqStringSetChar(a,88,b); })",
- flags);
-
- Handle<SeqTwoByteString> string =
- T.main_isolate()->factory()->NewRawTwoByteString(3).ToHandleChecked();
- string->SeqTwoByteStringSet(0, 'b');
- string->SeqTwoByteStringSet(1, 'a');
- string->SeqTwoByteStringSet(2, 'r');
- T.Call(T.Val(1), string);
- CHECK_EQ('b', string->SeqTwoByteStringGet(0));
- CHECK_EQ('X', string->SeqTwoByteStringGet(1));
- CHECK_EQ('r', string->SeqTwoByteStringGet(2));
-}
-
-
-TEST(ValueOf) {
- FunctionTester T("(function(a) { return %_ValueOf(a); })", flags);
-
- T.CheckCall(T.Val("a"), T.Val("a"));
- T.CheckCall(T.Val("b"), T.NewObject("(new String('b'))"));
- T.CheckCall(T.Val(123), T.Val(123));
- T.CheckCall(T.Val(456), T.NewObject("(new Number(456))"));
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-run-jscalls.cc b/deps/v8/test/cctest/compiler/test-run-jscalls.cc
index 12566c242a..f69e508f90 100644
--- a/deps/v8/test/cctest/compiler/test-run-jscalls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jscalls.cc
@@ -32,7 +32,7 @@ TEST(SimpleCall2) {
TEST(ConstCall) {
FunctionTester T("(function(foo,a) { return foo(a,3); })");
- FunctionTester U("(function(a,b) { return a + b; })");
+ FunctionTester U("(function (a,b) { return a + b; })");
T.CheckCall(T.Val(6), U.function, T.Val(3));
T.CheckCall(T.Val(6.1), U.function, T.Val(3.1));
@@ -44,7 +44,7 @@ TEST(ConstCall) {
TEST(ConstCall2) {
FunctionTester T("(function(foo,a) { return foo(a,\"3\"); })");
- FunctionTester U("(function(a,b) { return a + b; })");
+ FunctionTester U("(function (a,b) { return a + b; })");
T.CheckCall(T.Val("33"), U.function, T.Val(3));
T.CheckCall(T.Val("3.13"), U.function, T.Val(3.1));
@@ -218,6 +218,7 @@ TEST(ContextLoadedFromActivation) {
i::Handle<i::JSFunction> jsfun = Handle<JSFunction>::cast(ofun);
jsfun->set_code(T.function->code());
jsfun->set_shared(T.function->shared());
+ jsfun->set_literals(T.function->literals());
CHECK(context->Global()
->Set(context, v8_str("foo"), v8::Utils::CallableToLocal(jsfun))
.FromJust());
@@ -242,6 +243,7 @@ TEST(BuiltinLoadedFromActivation) {
i::Handle<i::JSFunction> jsfun = Handle<JSFunction>::cast(ofun);
jsfun->set_code(T.function->code());
jsfun->set_shared(T.function->shared());
+ jsfun->set_literals(T.function->literals());
CHECK(context->Global()
->Set(context, v8_str("foo"), v8::Utils::CallableToLocal(jsfun))
.FromJust());
diff --git a/deps/v8/test/cctest/compiler/test-run-jsops.cc b/deps/v8/test/cctest/compiler/test-run-jsops.cc
index b68fc1cdde..78e12576f1 100644
--- a/deps/v8/test/cctest/compiler/test-run-jsops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jsops.cc
@@ -512,7 +512,6 @@ TEST(RegExpLiteral) {
TEST(ClassLiteral) {
- FLAG_harmony_sloppy = true;
const char* src =
"(function(a,b) {"
" class C {"
diff --git a/deps/v8/test/cctest/compiler/test-run-load-store.cc b/deps/v8/test/cctest/compiler/test-run-load-store.cc
new file mode 100644
index 0000000000..2461129384
--- /dev/null
+++ b/deps/v8/test/cctest/compiler/test-run-load-store.cc
@@ -0,0 +1,1190 @@
+// Copyright 2016 the V8 project authors. All rights reserved. Use of this
+// source code is governed by a BSD-style license that can be found in the
+// LICENSE file.
+
+#include <cmath>
+#include <functional>
+#include <limits>
+
+#include "src/base/bits.h"
+#include "src/base/utils/random-number-generator.h"
+#include "src/codegen.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/codegen-tester.h"
+#include "test/cctest/compiler/graph-builder-tester.h"
+#include "test/cctest/compiler/value-helper.h"
+
+using namespace v8::base;
+
+namespace {
+template <typename Type>
+void CheckOobValue(Type val) {
+ UNREACHABLE();
+}
+
+template <>
+void CheckOobValue(int32_t val) {
+ CHECK_EQ(0, val);
+}
+
+template <>
+void CheckOobValue(int64_t val) {
+ CHECK_EQ(0, val);
+}
+
+template <>
+void CheckOobValue(float val) {
+ CHECK(std::isnan(val));
+}
+
+template <>
+void CheckOobValue(double val) {
+ CHECK(std::isnan(val));
+}
+} // namespace
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+enum TestAlignment {
+ kAligned,
+ kUnaligned,
+};
+
+// This is a America!
+#define A_BILLION 1000000000ULL
+#define A_GIG (1024ULL * 1024ULL * 1024ULL)
+
+namespace {
+void RunLoadInt32(const TestAlignment t) {
+ RawMachineAssemblerTester<int32_t> m;
+
+ int32_t p1 = 0; // loads directly from this location.
+
+ if (t == TestAlignment::kAligned) {
+ m.Return(m.LoadFromPointer(&p1, MachineType::Int32()));
+ } else if (t == TestAlignment::kUnaligned) {
+ m.Return(m.UnalignedLoadFromPointer(&p1, MachineType::Int32()));
+ } else {
+ UNREACHABLE();
+ }
+
+ FOR_INT32_INPUTS(i) {
+ p1 = *i;
+ CHECK_EQ(p1, m.Call());
+ }
+}
+
+void RunLoadInt32Offset(TestAlignment t) {
+ int32_t p1 = 0; // loads directly from this location.
+
+ int32_t offsets[] = {-2000000, -100, -101, 1, 3,
+ 7, 120, 2000, 2000000000, 0xff};
+
+ for (size_t i = 0; i < arraysize(offsets); i++) {
+ RawMachineAssemblerTester<int32_t> m;
+ int32_t offset = offsets[i];
+ byte* pointer = reinterpret_cast<byte*>(&p1) - offset;
+
+ // generate load [#base + #index]
+ if (t == TestAlignment::kAligned) {
+ m.Return(m.LoadFromPointer(pointer, MachineType::Int32(), offset));
+ } else if (t == TestAlignment::kUnaligned) {
+ m.Return(
+ m.UnalignedLoadFromPointer(pointer, MachineType::Int32(), offset));
+ } else {
+ UNREACHABLE();
+ }
+
+ FOR_INT32_INPUTS(j) {
+ p1 = *j;
+ CHECK_EQ(p1, m.Call());
+ }
+ }
+}
+
+void RunLoadStoreFloat32Offset(TestAlignment t) {
+ float p1 = 0.0f; // loads directly from this location.
+ float p2 = 0.0f; // and stores directly into this location.
+
+ FOR_INT32_INPUTS(i) {
+ int32_t magic = 0x2342aabb + *i * 3;
+ RawMachineAssemblerTester<int32_t> m;
+ int32_t offset = *i;
+ byte* from = reinterpret_cast<byte*>(&p1) - offset;
+ byte* to = reinterpret_cast<byte*>(&p2) - offset;
+ // generate load [#base + #index]
+ if (t == TestAlignment::kAligned) {
+ Node* load = m.Load(MachineType::Float32(), m.PointerConstant(from),
+ m.IntPtrConstant(offset));
+ m.Store(MachineRepresentation::kFloat32, m.PointerConstant(to),
+ m.IntPtrConstant(offset), load, kNoWriteBarrier);
+ } else if (t == TestAlignment::kUnaligned) {
+ Node* load =
+ m.UnalignedLoad(MachineType::Float32(), m.PointerConstant(from),
+ m.IntPtrConstant(offset));
+ m.UnalignedStore(MachineRepresentation::kFloat32, m.PointerConstant(to),
+ m.IntPtrConstant(offset), load);
+
+ } else {
+ UNREACHABLE();
+ }
+ m.Return(m.Int32Constant(magic));
+
+ FOR_FLOAT32_INPUTS(j) {
+ p1 = *j;
+ p2 = *j - 5;
+ CHECK_EQ(magic, m.Call());
+ CheckDoubleEq(p1, p2);
+ }
+ }
+}
+
+void RunLoadStoreFloat64Offset(TestAlignment t) {
+ double p1 = 0; // loads directly from this location.
+ double p2 = 0; // and stores directly into this location.
+
+ FOR_INT32_INPUTS(i) {
+ int32_t magic = 0x2342aabb + *i * 3;
+ RawMachineAssemblerTester<int32_t> m;
+ int32_t offset = *i;
+ byte* from = reinterpret_cast<byte*>(&p1) - offset;
+ byte* to = reinterpret_cast<byte*>(&p2) - offset;
+ // generate load [#base + #index]
+ if (t == TestAlignment::kAligned) {
+ Node* load = m.Load(MachineType::Float64(), m.PointerConstant(from),
+ m.IntPtrConstant(offset));
+ m.Store(MachineRepresentation::kFloat64, m.PointerConstant(to),
+ m.IntPtrConstant(offset), load, kNoWriteBarrier);
+ } else if (t == TestAlignment::kUnaligned) {
+ Node* load =
+ m.UnalignedLoad(MachineType::Float64(), m.PointerConstant(from),
+ m.IntPtrConstant(offset));
+ m.UnalignedStore(MachineRepresentation::kFloat64, m.PointerConstant(to),
+ m.IntPtrConstant(offset), load);
+ } else {
+ UNREACHABLE();
+ }
+ m.Return(m.Int32Constant(magic));
+
+ FOR_FLOAT64_INPUTS(j) {
+ p1 = *j;
+ p2 = *j - 5;
+ CHECK_EQ(magic, m.Call());
+ CheckDoubleEq(p1, p2);
+ }
+ }
+}
+} // namespace
+
+TEST(RunLoadInt32) { RunLoadInt32(TestAlignment::kAligned); }
+
+TEST(RunUnalignedLoadInt32) { RunLoadInt32(TestAlignment::kUnaligned); }
+
+TEST(RunLoadInt32Offset) { RunLoadInt32Offset(TestAlignment::kAligned); }
+
+TEST(RunUnalignedLoadInt32Offset) {
+ RunLoadInt32Offset(TestAlignment::kUnaligned);
+}
+
+TEST(RunLoadStoreFloat32Offset) {
+ RunLoadStoreFloat32Offset(TestAlignment::kAligned);
+}
+
+TEST(RunUnalignedLoadStoreFloat32Offset) {
+ RunLoadStoreFloat32Offset(TestAlignment::kUnaligned);
+}
+
+TEST(RunLoadStoreFloat64Offset) {
+ RunLoadStoreFloat64Offset(TestAlignment::kAligned);
+}
+
+TEST(RunUnalignedLoadStoreFloat64Offset) {
+ RunLoadStoreFloat64Offset(TestAlignment::kUnaligned);
+}
+
+namespace {
+template <typename Type>
+void RunLoadImmIndex(MachineType rep, TestAlignment t) {
+ const int kNumElems = 3;
+ Type buffer[kNumElems];
+
+ // initialize the buffer with some raw data.
+ byte* raw = reinterpret_cast<byte*>(buffer);
+ for (size_t i = 0; i < sizeof(buffer); i++) {
+ raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
+ }
+
+ // Test with various large and small offsets.
+ for (int offset = -1; offset <= 200000; offset *= -5) {
+ for (int i = 0; i < kNumElems; i++) {
+ BufferedRawMachineAssemblerTester<Type> m;
+ Node* base = m.PointerConstant(buffer - offset);
+ Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0]));
+ if (t == TestAlignment::kAligned) {
+ m.Return(m.Load(rep, base, index));
+ } else if (t == TestAlignment::kUnaligned) {
+ m.Return(m.UnalignedLoad(rep, base, index));
+ } else {
+ UNREACHABLE();
+ }
+
+ volatile Type expected = buffer[i];
+ volatile Type actual = m.Call();
+ CHECK_EQ(expected, actual);
+ }
+ }
+}
+
+template <typename CType>
+void RunLoadStore(MachineType rep, TestAlignment t) {
+ const int kNumElems = 4;
+ CType buffer[kNumElems];
+
+ for (int32_t x = 0; x < kNumElems; x++) {
+ int32_t y = kNumElems - x - 1;
+ // initialize the buffer with raw data.
+ byte* raw = reinterpret_cast<byte*>(buffer);
+ for (size_t i = 0; i < sizeof(buffer); i++) {
+ raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
+ }
+
+ RawMachineAssemblerTester<int32_t> m;
+ int32_t OK = 0x29000 + x;
+ Node* base = m.PointerConstant(buffer);
+ Node* index0 = m.IntPtrConstant(x * sizeof(buffer[0]));
+ Node* index1 = m.IntPtrConstant(y * sizeof(buffer[0]));
+ if (t == TestAlignment::kAligned) {
+ Node* load = m.Load(rep, base, index0);
+ m.Store(rep.representation(), base, index1, load, kNoWriteBarrier);
+ } else if (t == TestAlignment::kUnaligned) {
+ Node* load = m.UnalignedLoad(rep, base, index0);
+ m.UnalignedStore(rep.representation(), base, index1, load);
+ }
+
+ m.Return(m.Int32Constant(OK));
+
+ CHECK(buffer[x] != buffer[y]);
+ CHECK_EQ(OK, m.Call());
+ CHECK(buffer[x] == buffer[y]);
+ }
+}
+
+template <typename CType>
+void RunUnalignedLoadStoreUnalignedAccess(MachineType rep) {
+ CType in, out;
+ CType in_buffer[2];
+ CType out_buffer[2];
+ byte* raw;
+
+ for (int x = 0; x < sizeof(CType); x++) {
+ int y = sizeof(CType) - x;
+
+ raw = reinterpret_cast<byte*>(&in);
+ for (size_t i = 0; i < sizeof(CType); i++) {
+ raw[i] = static_cast<byte>((i + sizeof(CType)) ^ 0xAA);
+ }
+
+ raw = reinterpret_cast<byte*>(in_buffer);
+ MemCopy(raw + x, &in, sizeof(CType));
+
+ RawMachineAssemblerTester<int32_t> m;
+ int32_t OK = 0x29000 + x;
+
+ Node* base0 = m.PointerConstant(in_buffer);
+ Node* base1 = m.PointerConstant(out_buffer);
+ Node* index0 = m.IntPtrConstant(x);
+ Node* index1 = m.IntPtrConstant(y);
+ Node* load = m.UnalignedLoad(rep, base0, index0);
+ m.UnalignedStore(rep.representation(), base1, index1, load);
+
+ m.Return(m.Int32Constant(OK));
+
+ CHECK_EQ(OK, m.Call());
+
+ raw = reinterpret_cast<byte*>(&out_buffer);
+ MemCopy(&out, raw + y, sizeof(CType));
+ CHECK(in == out);
+ }
+}
+} // namespace
+
+TEST(RunLoadImmIndex) {
+ RunLoadImmIndex<int8_t>(MachineType::Int8(), TestAlignment::kAligned);
+ RunLoadImmIndex<uint8_t>(MachineType::Uint8(), TestAlignment::kAligned);
+ RunLoadImmIndex<int16_t>(MachineType::Int16(), TestAlignment::kAligned);
+ RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned);
+ RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kAligned);
+ RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned);
+ RunLoadImmIndex<int32_t*>(MachineType::AnyTagged(), TestAlignment::kAligned);
+ RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kAligned);
+ RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kAligned);
+#if V8_TARGET_ARCH_64_BIT
+ RunLoadImmIndex<int64_t>(MachineType::Int64(), TestAlignment::kAligned);
+#endif
+ // TODO(titzer): test various indexing modes.
+}
+
+TEST(RunUnalignedLoadImmIndex) {
+ RunLoadImmIndex<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned);
+ RunLoadImmIndex<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned);
+ RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned);
+ RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned);
+ RunLoadImmIndex<int32_t*>(MachineType::AnyTagged(),
+ TestAlignment::kUnaligned);
+ RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kUnaligned);
+ RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kUnaligned);
+#if V8_TARGET_ARCH_64_BIT
+ RunLoadImmIndex<int64_t>(MachineType::Int64(), TestAlignment::kUnaligned);
+#endif
+ // TODO(titzer): test various indexing modes.
+}
+
+TEST(RunLoadStore) {
+ RunLoadStore<int8_t>(MachineType::Int8(), TestAlignment::kAligned);
+ RunLoadStore<uint8_t>(MachineType::Uint8(), TestAlignment::kAligned);
+ RunLoadStore<int16_t>(MachineType::Int16(), TestAlignment::kAligned);
+ RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kAligned);
+ RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kAligned);
+ RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kAligned);
+ RunLoadStore<void*>(MachineType::AnyTagged(), TestAlignment::kAligned);
+ RunLoadStore<float>(MachineType::Float32(), TestAlignment::kAligned);
+ RunLoadStore<double>(MachineType::Float64(), TestAlignment::kAligned);
+#if V8_TARGET_ARCH_64_BIT
+ RunLoadStore<int64_t>(MachineType::Int64(), TestAlignment::kAligned);
+#endif
+}
+
+TEST(RunUnalignedLoadStore) {
+ RunLoadStore<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned);
+ RunLoadStore<uint16_t>(MachineType::Uint16(), TestAlignment::kUnaligned);
+ RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned);
+ RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned);
+ RunLoadStore<void*>(MachineType::AnyTagged(), TestAlignment::kUnaligned);
+ RunLoadStore<float>(MachineType::Float32(), TestAlignment::kUnaligned);
+ RunLoadStore<double>(MachineType::Float64(), TestAlignment::kUnaligned);
+#if V8_TARGET_ARCH_64_BIT
+ RunLoadStore<int64_t>(MachineType::Int64(), TestAlignment::kUnaligned);
+#endif
+}
+
+TEST(RunUnalignedLoadStoreUnalignedAccess) {
+ RunUnalignedLoadStoreUnalignedAccess<int16_t>(MachineType::Int16());
+ RunUnalignedLoadStoreUnalignedAccess<uint16_t>(MachineType::Uint16());
+ RunUnalignedLoadStoreUnalignedAccess<int32_t>(MachineType::Int32());
+ RunUnalignedLoadStoreUnalignedAccess<uint32_t>(MachineType::Uint32());
+ RunUnalignedLoadStoreUnalignedAccess<void*>(MachineType::AnyTagged());
+ RunUnalignedLoadStoreUnalignedAccess<float>(MachineType::Float32());
+ RunUnalignedLoadStoreUnalignedAccess<double>(MachineType::Float64());
+#if V8_TARGET_ARCH_64_BIT
+ RunUnalignedLoadStoreUnalignedAccess<int64_t>(MachineType::Int64());
+#endif
+}
+
+#if V8_TARGET_LITTLE_ENDIAN
+#define LSB(addr, bytes) addr
+#elif V8_TARGET_BIG_ENDIAN
+#define LSB(addr, bytes) reinterpret_cast<byte*>(addr + 1) - bytes
+#else
+#error "Unknown Architecture"
+#endif
+
+namespace {
+void RunLoadStoreSignExtend32(TestAlignment t) {
+ int32_t buffer[4];
+ RawMachineAssemblerTester<int32_t> m;
+ Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Int8());
+ if (t == TestAlignment::kAligned) {
+ Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
+ Node* load32 = m.LoadFromPointer(&buffer[0], MachineType::Int32());
+ m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
+ m.StoreToPointer(&buffer[2], MachineRepresentation::kWord32, load16);
+ m.StoreToPointer(&buffer[3], MachineRepresentation::kWord32, load32);
+ } else if (t == TestAlignment::kUnaligned) {
+ Node* load16 =
+ m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
+ Node* load32 = m.UnalignedLoadFromPointer(&buffer[0], MachineType::Int32());
+ m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
+ m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord32,
+ load16);
+ m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord32,
+ load32);
+ } else {
+ UNREACHABLE();
+ }
+ m.Return(load8);
+
+ FOR_INT32_INPUTS(i) {
+ buffer[0] = *i;
+
+ CHECK_EQ(static_cast<int8_t>(*i & 0xff), m.Call());
+ CHECK_EQ(static_cast<int8_t>(*i & 0xff), buffer[1]);
+ CHECK_EQ(static_cast<int16_t>(*i & 0xffff), buffer[2]);
+ CHECK_EQ(*i, buffer[3]);
+ }
+}
+
+void RunLoadStoreZeroExtend32(TestAlignment t) {
+ uint32_t buffer[4];
+ RawMachineAssemblerTester<uint32_t> m;
+ Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Uint8());
+ if (t == TestAlignment::kAligned) {
+ Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
+ Node* load32 = m.LoadFromPointer(&buffer[0], MachineType::Uint32());
+ m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
+ m.StoreToPointer(&buffer[2], MachineRepresentation::kWord32, load16);
+ m.StoreToPointer(&buffer[3], MachineRepresentation::kWord32, load32);
+ } else if (t == TestAlignment::kUnaligned) {
+ Node* load16 =
+ m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
+ Node* load32 =
+ m.UnalignedLoadFromPointer(&buffer[0], MachineType::Uint32());
+ m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
+ m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord32,
+ load16);
+ m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord32,
+ load32);
+ }
+ m.Return(load8);
+
+ FOR_UINT32_INPUTS(i) {
+ buffer[0] = *i;
+
+ CHECK_EQ((*i & 0xff), m.Call());
+ CHECK_EQ((*i & 0xff), buffer[1]);
+ CHECK_EQ((*i & 0xffff), buffer[2]);
+ CHECK_EQ(*i, buffer[3]);
+ }
+}
+} // namespace
+
+TEST(RunLoadStoreSignExtend32) {
+ RunLoadStoreSignExtend32(TestAlignment::kAligned);
+}
+
+TEST(RunUnalignedLoadStoreSignExtend32) {
+ RunLoadStoreSignExtend32(TestAlignment::kUnaligned);
+}
+
+TEST(RunLoadStoreZeroExtend32) {
+ RunLoadStoreZeroExtend32(TestAlignment::kAligned);
+}
+
+TEST(RunUnalignedLoadStoreZeroExtend32) {
+ RunLoadStoreZeroExtend32(TestAlignment::kUnaligned);
+}
+
+#if V8_TARGET_ARCH_64_BIT
+
+namespace {
+void RunLoadStoreSignExtend64(TestAlignment t) {
+ if (true) return; // TODO(titzer): sign extension of loads to 64-bit.
+ int64_t buffer[5];
+ RawMachineAssemblerTester<int64_t> m;
+ Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Int8());
+ if (t == TestAlignment::kAligned) {
+ Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
+ Node* load32 = m.LoadFromPointer(LSB(&buffer[0], 4), MachineType::Int32());
+ Node* load64 = m.LoadFromPointer(&buffer[0], MachineType::Int64());
+ m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
+ m.StoreToPointer(&buffer[2], MachineRepresentation::kWord64, load16);
+ m.StoreToPointer(&buffer[3], MachineRepresentation::kWord64, load32);
+ m.StoreToPointer(&buffer[4], MachineRepresentation::kWord64, load64);
+ } else if (t == TestAlignment::kUnaligned) {
+ Node* load16 =
+ m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
+ Node* load32 =
+ m.UnalignedLoadFromPointer(LSB(&buffer[0], 4), MachineType::Int32());
+ Node* load64 = m.UnalignedLoadFromPointer(&buffer[0], MachineType::Int64());
+ m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
+ m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord64,
+ load16);
+ m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord64,
+ load32);
+ m.UnalignedStoreToPointer(&buffer[4], MachineRepresentation::kWord64,
+ load64);
+ } else {
+ UNREACHABLE();
+ }
+ m.Return(load8);
+
+ FOR_INT64_INPUTS(i) {
+ buffer[0] = *i;
+
+ CHECK_EQ(static_cast<int8_t>(*i & 0xff), m.Call());
+ CHECK_EQ(static_cast<int8_t>(*i & 0xff), buffer[1]);
+ CHECK_EQ(static_cast<int16_t>(*i & 0xffff), buffer[2]);
+ CHECK_EQ(static_cast<int32_t>(*i & 0xffffffff), buffer[3]);
+ CHECK_EQ(*i, buffer[4]);
+ }
+}
+
+void RunLoadStoreZeroExtend64(TestAlignment t) {
+ if (kPointerSize < 8) return;
+ uint64_t buffer[5];
+ RawMachineAssemblerTester<int64_t> m;
+ Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Uint8());
+ if (t == TestAlignment::kAligned) {
+ Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
+ Node* load32 = m.LoadFromPointer(LSB(&buffer[0], 4), MachineType::Uint32());
+ Node* load64 = m.LoadFromPointer(&buffer[0], MachineType::Uint64());
+ m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
+ m.StoreToPointer(&buffer[2], MachineRepresentation::kWord64, load16);
+ m.StoreToPointer(&buffer[3], MachineRepresentation::kWord64, load32);
+ m.StoreToPointer(&buffer[4], MachineRepresentation::kWord64, load64);
+ } else if (t == TestAlignment::kUnaligned) {
+ Node* load16 =
+ m.UnalignedLoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
+ Node* load32 =
+ m.UnalignedLoadFromPointer(LSB(&buffer[0], 4), MachineType::Uint32());
+ Node* load64 =
+ m.UnalignedLoadFromPointer(&buffer[0], MachineType::Uint64());
+ m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
+ m.UnalignedStoreToPointer(&buffer[2], MachineRepresentation::kWord64,
+ load16);
+ m.UnalignedStoreToPointer(&buffer[3], MachineRepresentation::kWord64,
+ load32);
+ m.UnalignedStoreToPointer(&buffer[4], MachineRepresentation::kWord64,
+ load64);
+ } else {
+ UNREACHABLE();
+ }
+ m.Return(load8);
+
+ FOR_UINT64_INPUTS(i) {
+ buffer[0] = *i;
+
+ CHECK_EQ((*i & 0xff), m.Call());
+ CHECK_EQ((*i & 0xff), buffer[1]);
+ CHECK_EQ((*i & 0xffff), buffer[2]);
+ CHECK_EQ((*i & 0xffffffff), buffer[3]);
+ CHECK_EQ(*i, buffer[4]);
+ }
+}
+
+} // namespace
+
+TEST(RunCheckedLoadInt64) {
+ int64_t buffer[] = {0x66bbccddeeff0011LL, 0x1122334455667788LL};
+ RawMachineAssemblerTester<int64_t> m(MachineType::Int32());
+ Node* base = m.PointerConstant(buffer);
+ Node* index = m.Parameter(0);
+ Node* length = m.Int32Constant(16);
+ Node* load = m.AddNode(m.machine()->CheckedLoad(MachineType::Int64()), base,
+ index, length);
+ m.Return(load);
+
+ CHECK_EQ(buffer[0], m.Call(0));
+ CHECK_EQ(buffer[1], m.Call(8));
+ CheckOobValue(m.Call(16));
+}
+
+TEST(RunLoadStoreSignExtend64) {
+ RunLoadStoreSignExtend64(TestAlignment::kAligned);
+}
+
+TEST(RunUnalignedLoadStoreSignExtend64) {
+ RunLoadStoreSignExtend64(TestAlignment::kUnaligned);
+}
+
+TEST(RunLoadStoreZeroExtend64) {
+ RunLoadStoreZeroExtend64(TestAlignment::kAligned);
+}
+
+TEST(RunUnalignedLoadStoreZeroExtend64) {
+ RunLoadStoreZeroExtend64(TestAlignment::kUnaligned);
+}
+
+TEST(RunCheckedStoreInt64) {
+ const int64_t write = 0x5566778899aabbLL;
+ const int64_t before = 0x33bbccddeeff0011LL;
+ int64_t buffer[] = {before, before};
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
+ Node* base = m.PointerConstant(buffer);
+ Node* index = m.Parameter(0);
+ Node* length = m.Int32Constant(16);
+ Node* value = m.Int64Constant(write);
+ Node* store =
+ m.AddNode(m.machine()->CheckedStore(MachineRepresentation::kWord64), base,
+ index, length, value);
+ USE(store);
+ m.Return(m.Int32Constant(11));
+
+ CHECK_EQ(11, m.Call(16));
+ CHECK_EQ(before, buffer[0]);
+ CHECK_EQ(before, buffer[1]);
+
+ CHECK_EQ(11, m.Call(0));
+ CHECK_EQ(write, buffer[0]);
+ CHECK_EQ(before, buffer[1]);
+
+ CHECK_EQ(11, m.Call(8));
+ CHECK_EQ(write, buffer[0]);
+ CHECK_EQ(write, buffer[1]);
+}
+#endif
+
+namespace {
+template <typename IntType>
+void LoadStoreTruncation(MachineType kRepresentation, TestAlignment t) {
+ IntType input;
+
+ RawMachineAssemblerTester<int32_t> m;
+ Node* ap1;
+ if (t == TestAlignment::kAligned) {
+ Node* a = m.LoadFromPointer(&input, kRepresentation);
+ ap1 = m.Int32Add(a, m.Int32Constant(1));
+ m.StoreToPointer(&input, kRepresentation.representation(), ap1);
+ } else if (t == TestAlignment::kUnaligned) {
+ Node* a = m.UnalignedLoadFromPointer(&input, kRepresentation);
+ ap1 = m.Int32Add(a, m.Int32Constant(1));
+ m.UnalignedStoreToPointer(&input, kRepresentation.representation(), ap1);
+ } else {
+ UNREACHABLE();
+ }
+ m.Return(ap1);
+
+ const IntType max = std::numeric_limits<IntType>::max();
+ const IntType min = std::numeric_limits<IntType>::min();
+
+ // Test upper bound.
+ input = max;
+ CHECK_EQ(max + 1, m.Call());
+ CHECK_EQ(min, input);
+
+ // Test lower bound.
+ input = min;
+ CHECK_EQ(static_cast<IntType>(max + 2), m.Call());
+ CHECK_EQ(min + 1, input);
+
+ // Test all one byte values that are not one byte bounds.
+ for (int i = -127; i < 127; i++) {
+ input = i;
+ int expected = i >= 0 ? i + 1 : max + (i - min) + 2;
+ CHECK_EQ(static_cast<IntType>(expected), m.Call());
+ CHECK_EQ(static_cast<IntType>(i + 1), input);
+ }
+}
+} // namespace
+
+TEST(RunLoadStoreTruncation) {
+ LoadStoreTruncation<int8_t>(MachineType::Int8(), TestAlignment::kAligned);
+ LoadStoreTruncation<int16_t>(MachineType::Int16(), TestAlignment::kAligned);
+}
+
+TEST(RunUnalignedLoadStoreTruncation) {
+ LoadStoreTruncation<int16_t>(MachineType::Int16(), TestAlignment::kUnaligned);
+}
+
+void TestRunOobCheckedLoad(bool length_is_immediate) {
+ USE(CheckOobValue<int32_t>);
+ USE(CheckOobValue<int64_t>);
+ USE(CheckOobValue<float>);
+ USE(CheckOobValue<double>);
+
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
+ MachineType::Int32());
+ MachineOperatorBuilder machine(m.zone());
+ const int32_t kNumElems = 27;
+ const int32_t kLength = kNumElems * 4;
+
+ int32_t buffer[kNumElems];
+ Node* base = m.PointerConstant(buffer);
+ Node* offset = m.Parameter(0);
+ Node* len = length_is_immediate ? m.Int32Constant(kLength) : m.Parameter(1);
+ Node* node =
+ m.AddNode(machine.CheckedLoad(MachineType::Int32()), base, offset, len);
+ m.Return(node);
+
+ {
+ // randomize memory.
+ v8::base::RandomNumberGenerator rng;
+ rng.SetSeed(100);
+ rng.NextBytes(&buffer[0], sizeof(buffer));
+ }
+
+ // in-bounds accesses.
+ for (int32_t i = 0; i < kNumElems; i++) {
+ int32_t offset = static_cast<int32_t>(i * sizeof(int32_t));
+ int32_t expected = buffer[i];
+ CHECK_EQ(expected, m.Call(offset, kLength));
+ }
+
+ // slightly out-of-bounds accesses.
+ for (int32_t i = kLength; i < kNumElems + 30; i++) {
+ int32_t offset = static_cast<int32_t>(i * sizeof(int32_t));
+ CheckOobValue(m.Call(offset, kLength));
+ }
+
+ // way out-of-bounds accesses.
+ for (int32_t offset = -2000000000; offset <= 2000000000;
+ offset += 100000000) {
+ if (offset == 0) continue;
+ CheckOobValue(m.Call(offset, kLength));
+ }
+}
+
+TEST(RunOobCheckedLoad) { TestRunOobCheckedLoad(false); }
+
+TEST(RunOobCheckedLoadImm) { TestRunOobCheckedLoad(true); }
+
+void TestRunOobCheckedStore(bool length_is_immediate) {
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
+ MachineType::Int32());
+ MachineOperatorBuilder machine(m.zone());
+ const int32_t kNumElems = 29;
+ const int32_t kValue = -78227234;
+ const int32_t kLength = kNumElems * 4;
+
+ int32_t buffer[kNumElems + kNumElems];
+ Node* base = m.PointerConstant(buffer);
+ Node* offset = m.Parameter(0);
+ Node* len = length_is_immediate ? m.Int32Constant(kLength) : m.Parameter(1);
+ Node* val = m.Int32Constant(kValue);
+ m.AddNode(machine.CheckedStore(MachineRepresentation::kWord32), base, offset,
+ len, val);
+ m.Return(val);
+
+ // in-bounds accesses.
+ for (int32_t i = 0; i < kNumElems; i++) {
+ memset(buffer, 0, sizeof(buffer));
+ int32_t offset = static_cast<int32_t>(i * sizeof(int32_t));
+ CHECK_EQ(kValue, m.Call(offset, kLength));
+ for (int32_t j = 0; j < kNumElems + kNumElems; j++) {
+ if (i == j) {
+ CHECK_EQ(kValue, buffer[j]);
+ } else {
+ CHECK_EQ(0, buffer[j]);
+ }
+ }
+ }
+
+ memset(buffer, 0, sizeof(buffer));
+
+ // slightly out-of-bounds accesses.
+ for (int32_t i = kLength; i < kNumElems + 30; i++) {
+ int32_t offset = static_cast<int32_t>(i * sizeof(int32_t));
+ CHECK_EQ(kValue, m.Call(offset, kLength));
+ for (int32_t j = 0; j < kNumElems + kNumElems; j++) {
+ CHECK_EQ(0, buffer[j]);
+ }
+ }
+
+ // way out-of-bounds accesses.
+ for (int32_t offset = -2000000000; offset <= 2000000000;
+ offset += 100000000) {
+ if (offset == 0) continue;
+ CHECK_EQ(kValue, m.Call(offset, kLength));
+ for (int32_t j = 0; j < kNumElems + kNumElems; j++) {
+ CHECK_EQ(0, buffer[j]);
+ }
+ }
+}
+
+TEST(RunOobCheckedStore) { TestRunOobCheckedStore(false); }
+
+TEST(RunOobCheckedStoreImm) { TestRunOobCheckedStore(true); }
+
+// TODO(titzer): CheckedLoad/CheckedStore don't support 64-bit offsets.
+#define ALLOW_64_BIT_OFFSETS 0
+
+#if V8_TARGET_ARCH_64_BIT && ALLOW_64_BIT_OFFSETS
+
+void TestRunOobCheckedLoad64(uint32_t pseudo_base, bool length_is_immediate) {
+ RawMachineAssemblerTester<int32_t> m(MachineType::Uint64(),
+ MachineType::Uint64());
+ MachineOperatorBuilder machine(m.zone());
+ const uint32_t kNumElems = 25;
+ const uint32_t kLength = kNumElems * 4;
+ int32_t real_buffer[kNumElems];
+
+ // Simulate the end of a large buffer.
+ int32_t* buffer = real_buffer - (pseudo_base / 4);
+ uint64_t length = kLength + pseudo_base;
+
+ Node* base = m.PointerConstant(buffer);
+ Node* offset = m.Parameter(0);
+ Node* len = length_is_immediate ? m.Int64Constant(length) : m.Parameter(1);
+ Node* node =
+ m.AddNode(machine.CheckedLoad(MachineType::Int32()), base, offset, len);
+ m.Return(node);
+
+ {
+ // randomize memory.
+ v8::base::RandomNumberGenerator rng;
+ rng.SetSeed(100);
+ rng.NextBytes(&real_buffer[0], sizeof(real_buffer));
+ }
+
+ // in-bounds accesses.
+ for (uint32_t i = 0; i < kNumElems; i++) {
+ uint64_t offset = pseudo_base + i * 4;
+ int32_t expected = real_buffer[i];
+ CHECK_EQ(expected, m.Call(offset, length));
+ }
+
+ // in-bounds accesses w.r.t lower 32-bits, but upper bits set.
+ for (uint64_t i = 0x100000000ULL; i != 0; i <<= 1) {
+ uint64_t offset = pseudo_base + i;
+ CheckOobValue(m.Call(offset, length));
+ }
+
+ // slightly out-of-bounds accesses.
+ for (uint32_t i = kLength; i < kNumElems + 30; i++) {
+ uint64_t offset = pseudo_base + i * 4;
+ CheckOobValue(0, m.Call(offset, length));
+ }
+
+ // way out-of-bounds accesses.
+ for (uint64_t offset = length; offset < 100 * A_BILLION; offset += A_GIG) {
+ if (offset < length) continue;
+ CheckOobValue(0, m.Call(offset, length));
+ }
+}
+
+TEST(RunOobCheckedLoad64_0) {
+ TestRunOobCheckedLoad64(0, false);
+ TestRunOobCheckedLoad64(0, true);
+}
+
+TEST(RunOobCheckedLoad64_1) {
+ TestRunOobCheckedLoad64(1 * A_BILLION, false);
+ TestRunOobCheckedLoad64(1 * A_BILLION, true);
+}
+
+TEST(RunOobCheckedLoad64_2) {
+ TestRunOobCheckedLoad64(2 * A_BILLION, false);
+ TestRunOobCheckedLoad64(2 * A_BILLION, true);
+}
+
+TEST(RunOobCheckedLoad64_3) {
+ TestRunOobCheckedLoad64(3 * A_BILLION, false);
+ TestRunOobCheckedLoad64(3 * A_BILLION, true);
+}
+
+TEST(RunOobCheckedLoad64_4) {
+ TestRunOobCheckedLoad64(4 * A_BILLION, false);
+ TestRunOobCheckedLoad64(4 * A_BILLION, true);
+}
+
+void TestRunOobCheckedStore64(uint32_t pseudo_base, bool length_is_immediate) {
+ RawMachineAssemblerTester<int32_t> m(MachineType::Uint64(),
+ MachineType::Uint64());
+ MachineOperatorBuilder machine(m.zone());
+ const uint32_t kNumElems = 21;
+ const uint32_t kLength = kNumElems * 4;
+ const uint32_t kValue = 897234987;
+ int32_t real_buffer[kNumElems + kNumElems];
+
+ // Simulate the end of a large buffer.
+ int32_t* buffer = real_buffer - (pseudo_base / 4);
+ uint64_t length = kLength + pseudo_base;
+
+ Node* base = m.PointerConstant(buffer);
+ Node* offset = m.Parameter(0);
+ Node* len = length_is_immediate ? m.Int64Constant(length) : m.Parameter(1);
+ Node* val = m.Int32Constant(kValue);
+ m.AddNode(machine.CheckedStore(MachineRepresentation::kWord32), base, offset,
+ len, val);
+ m.Return(val);
+
+ // in-bounds accesses.
+ for (uint32_t i = 0; i < kNumElems; i++) {
+ memset(real_buffer, 0, sizeof(real_buffer));
+ uint64_t offset = pseudo_base + i * 4;
+ CHECK_EQ(kValue, m.Call(offset, length));
+ for (uint32_t j = 0; j < kNumElems + kNumElems; j++) {
+ if (i == j) {
+ CHECK_EQ(kValue, real_buffer[j]);
+ } else {
+ CHECK_EQ(0, real_buffer[j]);
+ }
+ }
+ }
+
+ memset(real_buffer, 0, sizeof(real_buffer));
+
+ // in-bounds accesses w.r.t lower 32-bits, but upper bits set.
+ for (uint64_t i = 0x100000000ULL; i != 0; i <<= 1) {
+ uint64_t offset = pseudo_base + i;
+ CHECK_EQ(kValue, m.Call(offset, length));
+ for (int32_t j = 0; j < kNumElems + kNumElems; j++) {
+ CHECK_EQ(0, real_buffer[j]);
+ }
+ }
+
+ // slightly out-of-bounds accesses.
+ for (uint32_t i = kLength; i < kNumElems + 30; i++) {
+ uint64_t offset = pseudo_base + i * 4;
+ CHECK_EQ(kValue, m.Call(offset, length));
+ for (int32_t j = 0; j < kNumElems + kNumElems; j++) {
+ CHECK_EQ(0, real_buffer[j]);
+ }
+ }
+
+ // way out-of-bounds accesses.
+ for (uint64_t offset = length; offset < 100 * A_BILLION; offset += A_GIG) {
+ if (offset < length) continue;
+ CHECK_EQ(kValue, m.Call(offset, length));
+ for (int32_t j = 0; j < kNumElems + kNumElems; j++) {
+ CHECK_EQ(0, real_buffer[j]);
+ }
+ }
+}
+
+TEST(RunOobCheckedStore64_0) {
+ TestRunOobCheckedStore64(0, false);
+ TestRunOobCheckedStore64(0, true);
+}
+
+TEST(RunOobCheckedStore64_1) {
+ TestRunOobCheckedStore64(1 * A_BILLION, false);
+ TestRunOobCheckedStore64(1 * A_BILLION, true);
+}
+
+TEST(RunOobCheckedStore64_2) {
+ TestRunOobCheckedStore64(2 * A_BILLION, false);
+ TestRunOobCheckedStore64(2 * A_BILLION, true);
+}
+
+TEST(RunOobCheckedStore64_3) {
+ TestRunOobCheckedStore64(3 * A_BILLION, false);
+ TestRunOobCheckedStore64(3 * A_BILLION, true);
+}
+
+TEST(RunOobCheckedStore64_4) {
+ TestRunOobCheckedStore64(4 * A_BILLION, false);
+ TestRunOobCheckedStore64(4 * A_BILLION, true);
+}
+
+#endif
+
+void TestRunOobCheckedLoad_pseudo(uint64_t x, bool length_is_immediate) {
+ RawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
+ MachineType::Uint32());
+
+ uint32_t pseudo_base = static_cast<uint32_t>(x);
+ MachineOperatorBuilder machine(m.zone());
+ const uint32_t kNumElems = 29;
+ const uint32_t kLength = pseudo_base + kNumElems * 4;
+
+ int32_t buffer[kNumElems];
+ Node* base = m.PointerConstant(reinterpret_cast<byte*>(buffer) - pseudo_base);
+ Node* offset = m.Parameter(0);
+ Node* len = length_is_immediate ? m.Int32Constant(kLength) : m.Parameter(1);
+ Node* node =
+ m.AddNode(machine.CheckedLoad(MachineType::Int32()), base, offset, len);
+ m.Return(node);
+
+ {
+ // randomize memory.
+ v8::base::RandomNumberGenerator rng;
+ rng.SetSeed(100);
+ rng.NextBytes(&buffer[0], sizeof(buffer));
+ }
+
+ // in-bounds accesses.
+ for (uint32_t i = 0; i < kNumElems; i++) {
+ uint32_t offset = static_cast<uint32_t>(i * sizeof(int32_t));
+ uint32_t expected = buffer[i];
+ CHECK_EQ(expected, m.Call(offset + pseudo_base, kLength));
+ }
+
+ // slightly out-of-bounds accesses.
+ for (int32_t i = kNumElems; i < kNumElems + 30; i++) {
+ uint32_t offset = static_cast<uint32_t>(i * sizeof(int32_t));
+ CheckOobValue(m.Call(offset + pseudo_base, kLength));
+ }
+
+ // way out-of-bounds accesses.
+ for (uint64_t i = pseudo_base + sizeof(buffer); i < 0xFFFFFFFF;
+ i += A_BILLION) {
+ uint32_t offset = static_cast<uint32_t>(i);
+ CheckOobValue(m.Call(offset, kLength));
+ }
+}
+
+TEST(RunOobCheckedLoad_pseudo0) {
+ TestRunOobCheckedLoad_pseudo(0, false);
+ TestRunOobCheckedLoad_pseudo(0, true);
+}
+
+TEST(RunOobCheckedLoad_pseudo1) {
+ TestRunOobCheckedLoad_pseudo(100000, false);
+ TestRunOobCheckedLoad_pseudo(100000, true);
+}
+
+TEST(RunOobCheckedLoad_pseudo2) {
+ TestRunOobCheckedLoad_pseudo(A_BILLION, false);
+ TestRunOobCheckedLoad_pseudo(A_BILLION, true);
+}
+
+TEST(RunOobCheckedLoad_pseudo3) {
+ TestRunOobCheckedLoad_pseudo(A_GIG, false);
+ TestRunOobCheckedLoad_pseudo(A_GIG, true);
+}
+
+TEST(RunOobCheckedLoad_pseudo4) {
+ TestRunOobCheckedLoad_pseudo(2 * A_BILLION, false);
+ TestRunOobCheckedLoad_pseudo(2 * A_BILLION, true);
+}
+
+TEST(RunOobCheckedLoad_pseudo5) {
+ TestRunOobCheckedLoad_pseudo(2 * A_GIG, false);
+ TestRunOobCheckedLoad_pseudo(2 * A_GIG, true);
+}
+
+TEST(RunOobCheckedLoad_pseudo6) {
+ TestRunOobCheckedLoad_pseudo(3 * A_BILLION, false);
+ TestRunOobCheckedLoad_pseudo(3 * A_BILLION, true);
+}
+
+TEST(RunOobCheckedLoad_pseudo7) {
+ TestRunOobCheckedLoad_pseudo(3 * A_GIG, false);
+ TestRunOobCheckedLoad_pseudo(3 * A_GIG, true);
+}
+
+TEST(RunOobCheckedLoad_pseudo8) {
+ TestRunOobCheckedLoad_pseudo(4 * A_BILLION, false);
+ TestRunOobCheckedLoad_pseudo(4 * A_BILLION, true);
+}
+
+template <typename MemType>
+void TestRunOobCheckedLoadT_pseudo(uint64_t x, bool length_is_immediate) {
+ const int32_t kReturn = 11999;
+ const uint32_t kNumElems = 29;
+ MemType buffer[kNumElems];
+ uint32_t pseudo_base = static_cast<uint32_t>(x);
+ const uint32_t kLength = static_cast<uint32_t>(pseudo_base + sizeof(buffer));
+
+ MemType result;
+
+ RawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
+ MachineType::Uint32());
+ MachineOperatorBuilder machine(m.zone());
+ Node* base = m.PointerConstant(reinterpret_cast<byte*>(buffer) - pseudo_base);
+ Node* offset = m.Parameter(0);
+ Node* len = length_is_immediate ? m.Int32Constant(kLength) : m.Parameter(1);
+ Node* node = m.AddNode(machine.CheckedLoad(MachineTypeForC<MemType>()), base,
+ offset, len);
+ Node* store = m.StoreToPointer(
+ &result, MachineTypeForC<MemType>().representation(), node);
+ USE(store);
+ m.Return(m.Int32Constant(kReturn));
+
+ {
+ // randomize memory.
+ v8::base::RandomNumberGenerator rng;
+ rng.SetSeed(103);
+ rng.NextBytes(&buffer[0], sizeof(buffer));
+ }
+
+ // in-bounds accesses.
+ for (uint32_t i = 0; i < kNumElems; i++) {
+ uint32_t offset = static_cast<uint32_t>(i * sizeof(MemType));
+ MemType expected = buffer[i];
+ CHECK_EQ(kReturn, m.Call(offset + pseudo_base, kLength));
+ CHECK_EQ(expected, result);
+ }
+
+ // slightly out-of-bounds accesses.
+ for (int32_t i = kNumElems; i < kNumElems + 30; i++) {
+ uint32_t offset = static_cast<uint32_t>(i * sizeof(MemType));
+ CHECK_EQ(kReturn, m.Call(offset + pseudo_base, kLength));
+ CheckOobValue(result);
+ }
+
+ // way out-of-bounds accesses.
+ for (uint64_t i = pseudo_base + sizeof(buffer); i < 0xFFFFFFFF;
+ i += A_BILLION) {
+ uint32_t offset = static_cast<uint32_t>(i);
+ CHECK_EQ(kReturn, m.Call(offset, kLength));
+ CheckOobValue(result);
+ }
+}
+
+TEST(RunOobCheckedLoadT_pseudo0) {
+ TestRunOobCheckedLoadT_pseudo<int32_t>(0, false);
+ TestRunOobCheckedLoadT_pseudo<int32_t>(0, true);
+ TestRunOobCheckedLoadT_pseudo<float>(0, false);
+ TestRunOobCheckedLoadT_pseudo<float>(0, true);
+ TestRunOobCheckedLoadT_pseudo<double>(0, false);
+ TestRunOobCheckedLoadT_pseudo<double>(0, true);
+}
+
+TEST(RunOobCheckedLoadT_pseudo1) {
+ TestRunOobCheckedLoadT_pseudo<int32_t>(100000, false);
+ TestRunOobCheckedLoadT_pseudo<int32_t>(100000, true);
+ TestRunOobCheckedLoadT_pseudo<float>(100000, false);
+ TestRunOobCheckedLoadT_pseudo<float>(100000, true);
+ TestRunOobCheckedLoadT_pseudo<double>(100000, false);
+ TestRunOobCheckedLoadT_pseudo<double>(100000, true);
+}
+
+TEST(RunOobCheckedLoadT_pseudo2) {
+ TestRunOobCheckedLoadT_pseudo<int32_t>(A_BILLION, false);
+ TestRunOobCheckedLoadT_pseudo<int32_t>(A_BILLION, true);
+ TestRunOobCheckedLoadT_pseudo<float>(A_BILLION, false);
+ TestRunOobCheckedLoadT_pseudo<float>(A_BILLION, true);
+ TestRunOobCheckedLoadT_pseudo<double>(A_BILLION, false);
+ TestRunOobCheckedLoadT_pseudo<double>(A_BILLION, true);
+}
+
+TEST(RunOobCheckedLoadT_pseudo3) {
+ TestRunOobCheckedLoadT_pseudo<int32_t>(A_GIG, false);
+ TestRunOobCheckedLoadT_pseudo<int32_t>(A_GIG, true);
+ TestRunOobCheckedLoadT_pseudo<float>(A_GIG, false);
+ TestRunOobCheckedLoadT_pseudo<float>(A_GIG, true);
+ TestRunOobCheckedLoadT_pseudo<double>(A_GIG, false);
+ TestRunOobCheckedLoadT_pseudo<double>(A_GIG, true);
+}
+
+TEST(RunOobCheckedLoadT_pseudo4) {
+ TestRunOobCheckedLoadT_pseudo<int32_t>(2 * A_BILLION, false);
+ TestRunOobCheckedLoadT_pseudo<int32_t>(2 * A_BILLION, true);
+ TestRunOobCheckedLoadT_pseudo<float>(2 * A_BILLION, false);
+ TestRunOobCheckedLoadT_pseudo<float>(2 * A_BILLION, true);
+ TestRunOobCheckedLoadT_pseudo<double>(2 * A_BILLION, false);
+ TestRunOobCheckedLoadT_pseudo<double>(2 * A_BILLION, true);
+}
+
+TEST(RunOobCheckedLoadT_pseudo5) {
+ TestRunOobCheckedLoadT_pseudo<int32_t>(2 * A_GIG, false);
+ TestRunOobCheckedLoadT_pseudo<int32_t>(2 * A_GIG, true);
+ TestRunOobCheckedLoadT_pseudo<float>(2 * A_GIG, false);
+ TestRunOobCheckedLoadT_pseudo<float>(2 * A_GIG, true);
+ TestRunOobCheckedLoadT_pseudo<double>(2 * A_GIG, false);
+ TestRunOobCheckedLoadT_pseudo<double>(2 * A_GIG, true);
+}
+
+TEST(RunOobCheckedLoadT_pseudo6) {
+ TestRunOobCheckedLoadT_pseudo<int32_t>(3 * A_BILLION, false);
+ TestRunOobCheckedLoadT_pseudo<int32_t>(3 * A_BILLION, true);
+ TestRunOobCheckedLoadT_pseudo<float>(3 * A_BILLION, false);
+ TestRunOobCheckedLoadT_pseudo<float>(3 * A_BILLION, true);
+ TestRunOobCheckedLoadT_pseudo<double>(3 * A_BILLION, false);
+ TestRunOobCheckedLoadT_pseudo<double>(3 * A_BILLION, true);
+}
+
+TEST(RunOobCheckedLoadT_pseudo7) {
+ TestRunOobCheckedLoadT_pseudo<int32_t>(3 * A_GIG, false);
+ TestRunOobCheckedLoadT_pseudo<int32_t>(3 * A_GIG, true);
+ TestRunOobCheckedLoadT_pseudo<float>(3 * A_GIG, false);
+ TestRunOobCheckedLoadT_pseudo<float>(3 * A_GIG, true);
+ TestRunOobCheckedLoadT_pseudo<double>(3 * A_GIG, false);
+ TestRunOobCheckedLoadT_pseudo<double>(3 * A_GIG, true);
+}
+
+TEST(RunOobCheckedLoadT_pseudo8) {
+ TestRunOobCheckedLoadT_pseudo<int32_t>(4 * A_BILLION, false);
+ TestRunOobCheckedLoadT_pseudo<int32_t>(4 * A_BILLION, true);
+ TestRunOobCheckedLoadT_pseudo<float>(4 * A_BILLION, false);
+ TestRunOobCheckedLoadT_pseudo<float>(4 * A_BILLION, true);
+ TestRunOobCheckedLoadT_pseudo<double>(4 * A_BILLION, false);
+ TestRunOobCheckedLoadT_pseudo<double>(4 * A_BILLION, true);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc
index 2bfe1244be..50b46d7d0e 100644
--- a/deps/v8/test/cctest/compiler/test-run-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-machops.cc
@@ -7,8 +7,10 @@
#include <limits>
#include "src/base/bits.h"
+#include "src/base/ieee754.h"
#include "src/base/utils/random-number-generator.h"
#include "src/codegen.h"
+#include "src/utils.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
#include "test/cctest/compiler/graph-builder-tester.h"
@@ -28,6 +30,39 @@ TEST(RunInt32Add) {
CHECK_EQ(1, m.Call());
}
+static int RunInt32AddShift(bool is_left, int32_t add_left, int32_t add_right,
+ int32_t shift_left, int32_t shit_right) {
+ RawMachineAssemblerTester<int32_t> m;
+ Node* shift =
+ m.Word32Shl(m.Int32Constant(shift_left), m.Int32Constant(shit_right));
+ Node* add = m.Int32Add(m.Int32Constant(add_left), m.Int32Constant(add_right));
+ Node* lsa = is_left ? m.Int32Add(shift, add) : m.Int32Add(add, shift);
+ m.Return(lsa);
+ return m.Call();
+}
+
+TEST(RunInt32AddShift) {
+ struct Test_case {
+ int32_t add_left, add_right, shift_left, shit_right, expected;
+ };
+
+ Test_case tc[] = {
+ {20, 22, 4, 2, 58},
+ {20, 22, 4, 1, 50},
+ {20, 22, 1, 6, 106},
+ {INT_MAX - 2, 1, 1, 1, INT_MIN}, // INT_MAX - 2 + 1 + (1 << 1), overflow.
+ };
+ const size_t tc_size = sizeof(tc) / sizeof(Test_case);
+
+ for (size_t i = 0; i < tc_size; ++i) {
+ CHECK_EQ(tc[i].expected,
+ RunInt32AddShift(false, tc[i].add_left, tc[i].add_right,
+ tc[i].shift_left, tc[i].shit_right));
+ CHECK_EQ(tc[i].expected,
+ RunInt32AddShift(true, tc[i].add_left, tc[i].add_right,
+ tc[i].shift_left, tc[i].shit_right));
+ }
+}
TEST(RunWord32ReverseBits) {
BufferedRawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
@@ -47,6 +82,23 @@ TEST(RunWord32ReverseBits) {
CHECK_EQ(uint32_t(0xffffffff), m.Call(uint32_t(0xffffffff)));
}
+TEST(RunWord32ReverseBytes) {
+ BufferedRawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
+ if (!m.machine()->Word32ReverseBytes().IsSupported()) {
+ // We can only test the operator if it exists on the testing platform.
+ return;
+ }
+ m.Return(m.AddNode(m.machine()->Word32ReverseBytes().op(), m.Parameter(0)));
+
+ CHECK_EQ(uint32_t(0x00000000), m.Call(uint32_t(0x00000000)));
+ CHECK_EQ(uint32_t(0x12345678), m.Call(uint32_t(0x78563412)));
+ CHECK_EQ(uint32_t(0xfedcba09), m.Call(uint32_t(0x09badcfe)));
+ CHECK_EQ(uint32_t(0x01010101), m.Call(uint32_t(0x01010101)));
+ CHECK_EQ(uint32_t(0x01020408), m.Call(uint32_t(0x08040201)));
+ CHECK_EQ(uint32_t(0xf0703010), m.Call(uint32_t(0x103070f0)));
+ CHECK_EQ(uint32_t(0x1f8d0a3a), m.Call(uint32_t(0x3a0a8d1f)));
+ CHECK_EQ(uint32_t(0xffffffff), m.Call(uint32_t(0xffffffff)));
+}
TEST(RunWord32Ctz) {
BufferedRawMachineAssemblerTester<int32_t> m(MachineType::Uint32());
@@ -169,6 +221,23 @@ TEST(RunWord64ReverseBits) {
CHECK_EQ(uint64_t(0xffffffffffffffff), m.Call(uint64_t(0xffffffffffffffff)));
}
+TEST(RunWord64ReverseBytes) {
+ BufferedRawMachineAssemblerTester<uint64_t> m(MachineType::Uint64());
+ if (!m.machine()->Word64ReverseBytes().IsSupported()) {
+ return;
+ }
+
+ m.Return(m.AddNode(m.machine()->Word64ReverseBytes().op(), m.Parameter(0)));
+
+ CHECK_EQ(uint64_t(0x0000000000000000), m.Call(uint64_t(0x0000000000000000)));
+ CHECK_EQ(uint64_t(0x1234567890abcdef), m.Call(uint64_t(0xefcdab9078563412)));
+ CHECK_EQ(uint64_t(0xfedcba0987654321), m.Call(uint64_t(0x2143658709badcfe)));
+ CHECK_EQ(uint64_t(0x0101010101010101), m.Call(uint64_t(0x0101010101010101)));
+ CHECK_EQ(uint64_t(0x0102040803060c01), m.Call(uint64_t(0x010c060308040201)));
+ CHECK_EQ(uint64_t(0xf0703010e060200f), m.Call(uint64_t(0x0f2060e0103070f0)));
+ CHECK_EQ(uint64_t(0x2f8a6df01c21fa3b), m.Call(uint64_t(0x3bfa211cf06d8a2f)));
+ CHECK_EQ(uint64_t(0xffffffffffffffff), m.Call(uint64_t(0xffffffffffffffff)));
+}
TEST(RunWord64Clz) {
BufferedRawMachineAssemblerTester<int32_t> m(MachineType::Uint64());
@@ -636,6 +705,38 @@ TEST(RunInt64SubWithOverflowInBranchP) {
}
}
+static int64_t RunInt64AddShift(bool is_left, int64_t add_left,
+ int64_t add_right, int64_t shift_left,
+ int64_t shit_right) {
+ RawMachineAssemblerTester<int64_t> m;
+ Node* shift = m.Word64Shl(m.Int64Constant(4), m.Int64Constant(2));
+ Node* add = m.Int64Add(m.Int64Constant(20), m.Int64Constant(22));
+ Node* dlsa = is_left ? m.Int64Add(shift, add) : m.Int64Add(add, shift);
+ m.Return(dlsa);
+ return m.Call();
+}
+
+TEST(RunInt64AddShift) {
+ struct Test_case {
+ int64_t add_left, add_right, shift_left, shit_right, expected;
+ };
+
+ Test_case tc[] = {
+ {20, 22, 4, 2, 58},
+ {20, 22, 4, 1, 50},
+ {20, 22, 1, 6, 106},
+ {INT64_MAX - 2, 1, 1, 1,
+ INT64_MIN}, // INT64_MAX - 2 + 1 + (1 << 1), overflow.
+ };
+ const size_t tc_size = sizeof(tc) / sizeof(Test_case);
+
+ for (size_t i = 0; i < tc_size; ++i) {
+ CHECK_EQ(58, RunInt64AddShift(false, tc[i].add_left, tc[i].add_right,
+ tc[i].shift_left, tc[i].shit_right));
+ CHECK_EQ(58, RunInt64AddShift(true, tc[i].add_left, tc[i].add_right,
+ tc[i].shift_left, tc[i].shit_right));
+ }
+}
// TODO(titzer): add tests that run 64-bit integer operations.
#endif // V8_TARGET_ARCH_64_BIT
@@ -1142,94 +1243,6 @@ TEST(RunSwitch4) {
}
-TEST(RunLoadInt32) {
- RawMachineAssemblerTester<int32_t> m;
-
- int32_t p1 = 0; // loads directly from this location.
- m.Return(m.LoadFromPointer(&p1, MachineType::Int32()));
-
- FOR_INT32_INPUTS(i) {
- p1 = *i;
- CHECK_EQ(p1, m.Call());
- }
-}
-
-
-TEST(RunLoadInt32Offset) {
- int32_t p1 = 0; // loads directly from this location.
-
- int32_t offsets[] = {-2000000, -100, -101, 1, 3,
- 7, 120, 2000, 2000000000, 0xff};
-
- for (size_t i = 0; i < arraysize(offsets); i++) {
- RawMachineAssemblerTester<int32_t> m;
- int32_t offset = offsets[i];
- byte* pointer = reinterpret_cast<byte*>(&p1) - offset;
- // generate load [#base + #index]
- m.Return(m.LoadFromPointer(pointer, MachineType::Int32(), offset));
-
- FOR_INT32_INPUTS(j) {
- p1 = *j;
- CHECK_EQ(p1, m.Call());
- }
- }
-}
-
-
-TEST(RunLoadStoreFloat32Offset) {
- float p1 = 0.0f; // loads directly from this location.
- float p2 = 0.0f; // and stores directly into this location.
-
- FOR_INT32_INPUTS(i) {
- int32_t magic = 0x2342aabb + *i * 3;
- RawMachineAssemblerTester<int32_t> m;
- int32_t offset = *i;
- byte* from = reinterpret_cast<byte*>(&p1) - offset;
- byte* to = reinterpret_cast<byte*>(&p2) - offset;
- // generate load [#base + #index]
- Node* load = m.Load(MachineType::Float32(), m.PointerConstant(from),
- m.IntPtrConstant(offset));
- m.Store(MachineRepresentation::kFloat32, m.PointerConstant(to),
- m.IntPtrConstant(offset), load, kNoWriteBarrier);
- m.Return(m.Int32Constant(magic));
-
- FOR_FLOAT32_INPUTS(j) {
- p1 = *j;
- p2 = *j - 5;
- CHECK_EQ(magic, m.Call());
- CHECK_DOUBLE_EQ(p1, p2);
- }
- }
-}
-
-
-TEST(RunLoadStoreFloat64Offset) {
- double p1 = 0; // loads directly from this location.
- double p2 = 0; // and stores directly into this location.
-
- FOR_INT32_INPUTS(i) {
- int32_t magic = 0x2342aabb + *i * 3;
- RawMachineAssemblerTester<int32_t> m;
- int32_t offset = *i;
- byte* from = reinterpret_cast<byte*>(&p1) - offset;
- byte* to = reinterpret_cast<byte*>(&p2) - offset;
- // generate load [#base + #index]
- Node* load = m.Load(MachineType::Float64(), m.PointerConstant(from),
- m.IntPtrConstant(offset));
- m.Store(MachineRepresentation::kFloat64, m.PointerConstant(to),
- m.IntPtrConstant(offset), load, kNoWriteBarrier);
- m.Return(m.Int32Constant(magic));
-
- FOR_FLOAT64_INPUTS(j) {
- p1 = *j;
- p2 = *j - 5;
- CHECK_EQ(magic, m.Call());
- CHECK_DOUBLE_EQ(p1, p2);
- }
- }
-}
-
-
TEST(RunInt32AddP) {
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
@@ -1709,7 +1722,6 @@ TEST(RunInt32SubP) {
}
}
-
TEST(RunInt32SubImm) {
{
FOR_UINT32_INPUTS(i) {
@@ -1733,6 +1745,11 @@ TEST(RunInt32SubImm) {
}
}
+TEST(RunInt32SubImm2) {
+ BufferedRawMachineAssemblerTester<int32_t> r;
+ r.Return(r.Int32Sub(r.Int32Constant(-1), r.Int32Constant(0)));
+ CHECK_EQ(-1, r.Call());
+}
TEST(RunInt32SubAndWord32SarP) {
{
@@ -2099,7 +2116,6 @@ TEST(RunInt32MulImm) {
}
}
-
TEST(RunInt32MulAndInt32AddP) {
{
FOR_INT32_INPUTS(i) {
@@ -3566,92 +3582,6 @@ TEST(RunDeadInt32Binops) {
}
-template <typename Type>
-static void RunLoadImmIndex(MachineType rep) {
- const int kNumElems = 3;
- Type buffer[kNumElems];
-
- // initialize the buffer with some raw data.
- byte* raw = reinterpret_cast<byte*>(buffer);
- for (size_t i = 0; i < sizeof(buffer); i++) {
- raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
- }
-
- // Test with various large and small offsets.
- for (int offset = -1; offset <= 200000; offset *= -5) {
- for (int i = 0; i < kNumElems; i++) {
- BufferedRawMachineAssemblerTester<Type> m;
- Node* base = m.PointerConstant(buffer - offset);
- Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0]));
- m.Return(m.Load(rep, base, index));
-
- volatile Type expected = buffer[i];
- volatile Type actual = m.Call();
- CHECK_EQ(expected, actual);
- }
- }
-}
-
-
-TEST(RunLoadImmIndex) {
- RunLoadImmIndex<int8_t>(MachineType::Int8());
- RunLoadImmIndex<uint8_t>(MachineType::Uint8());
- RunLoadImmIndex<int16_t>(MachineType::Int16());
- RunLoadImmIndex<uint16_t>(MachineType::Uint16());
- RunLoadImmIndex<int32_t>(MachineType::Int32());
- RunLoadImmIndex<uint32_t>(MachineType::Uint32());
- RunLoadImmIndex<int32_t*>(MachineType::AnyTagged());
- RunLoadImmIndex<float>(MachineType::Float32());
- RunLoadImmIndex<double>(MachineType::Float64());
- if (kPointerSize == 8) {
- RunLoadImmIndex<int64_t>(MachineType::Int64());
- }
- // TODO(titzer): test various indexing modes.
-}
-
-
-template <typename CType>
-static void RunLoadStore(MachineType rep) {
- const int kNumElems = 4;
- CType buffer[kNumElems];
-
- for (int32_t x = 0; x < kNumElems; x++) {
- int32_t y = kNumElems - x - 1;
- // initialize the buffer with raw data.
- byte* raw = reinterpret_cast<byte*>(buffer);
- for (size_t i = 0; i < sizeof(buffer); i++) {
- raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
- }
-
- RawMachineAssemblerTester<int32_t> m;
- int32_t OK = 0x29000 + x;
- Node* base = m.PointerConstant(buffer);
- Node* index0 = m.IntPtrConstant(x * sizeof(buffer[0]));
- Node* load = m.Load(rep, base, index0);
- Node* index1 = m.IntPtrConstant(y * sizeof(buffer[0]));
- m.Store(rep.representation(), base, index1, load, kNoWriteBarrier);
- m.Return(m.Int32Constant(OK));
-
- CHECK(buffer[x] != buffer[y]);
- CHECK_EQ(OK, m.Call());
- CHECK(buffer[x] == buffer[y]);
- }
-}
-
-
-TEST(RunLoadStore) {
- RunLoadStore<int8_t>(MachineType::Int8());
- RunLoadStore<uint8_t>(MachineType::Uint8());
- RunLoadStore<int16_t>(MachineType::Int16());
- RunLoadStore<uint16_t>(MachineType::Uint16());
- RunLoadStore<int32_t>(MachineType::Int32());
- RunLoadStore<uint32_t>(MachineType::Uint32());
- RunLoadStore<void*>(MachineType::AnyTagged());
- RunLoadStore<float>(MachineType::Float32());
- RunLoadStore<double>(MachineType::Float64());
-}
-
-
TEST(RunFloat32Add) {
BufferedRawMachineAssemblerTester<float> m(MachineType::Float32(),
MachineType::Float32());
@@ -3673,6 +3603,11 @@ TEST(RunFloat32Sub) {
}
}
+TEST(RunFloat32Neg) {
+ BufferedRawMachineAssemblerTester<float> m(MachineType::Float32());
+ m.Return(m.AddNode(m.machine()->Float32Neg(), m.Parameter(0)));
+ FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(-0.0f - *i, m.Call(*i)); }
+}
TEST(RunFloat32Mul) {
BufferedRawMachineAssemblerTester<float> m(MachineType::Float32(),
@@ -3717,6 +3652,11 @@ TEST(RunFloat64Sub) {
}
}
+TEST(RunFloat64Neg) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.AddNode(m.machine()->Float64Neg(), m.Parameter(0)));
+ FOR_FLOAT64_INPUTS(i) { CHECK_FLOAT_EQ(-0.0 - *i, m.Call(*i)); }
+}
TEST(RunFloat64Mul) {
BufferedRawMachineAssemblerTester<double> m(MachineType::Float64(),
@@ -3808,67 +3748,55 @@ TEST(RunFloat64AddP) {
}
}
-
-TEST(RunFloa32MaxP) {
+TEST(RunFloat64MaxP) {
RawMachineAssemblerTester<int32_t> m;
- Float32BinopTester bt(&m);
- if (!m.machine()->Float32Max().IsSupported()) return;
-
- bt.AddReturn(m.Float32Max(bt.param0, bt.param1));
+ Float64BinopTester bt(&m);
+ bt.AddReturn(m.Float64Max(bt.param0, bt.param1));
- FOR_FLOAT32_INPUTS(pl) {
- FOR_FLOAT32_INPUTS(pr) {
- CHECK_DOUBLE_EQ(*pl > *pr ? *pl : *pr, bt.call(*pl, *pr));
+ FOR_FLOAT64_INPUTS(pl) {
+ FOR_FLOAT64_INPUTS(pr) {
+ CHECK_DOUBLE_EQ(JSMax(*pl, *pr), bt.call(*pl, *pr));
}
}
}
-TEST(RunFloat64MaxP) {
+TEST(RunFloat64MinP) {
RawMachineAssemblerTester<int32_t> m;
Float64BinopTester bt(&m);
- if (!m.machine()->Float64Max().IsSupported()) return;
-
- bt.AddReturn(m.Float64Max(bt.param0, bt.param1));
+ bt.AddReturn(m.Float64Min(bt.param0, bt.param1));
FOR_FLOAT64_INPUTS(pl) {
FOR_FLOAT64_INPUTS(pr) {
- CHECK_DOUBLE_EQ(*pl > *pr ? *pl : *pr, bt.call(*pl, *pr));
+ CHECK_DOUBLE_EQ(JSMin(*pl, *pr), bt.call(*pl, *pr));
}
}
}
-
-TEST(RunFloat32MinP) {
+TEST(RunFloat32Max) {
RawMachineAssemblerTester<int32_t> m;
Float32BinopTester bt(&m);
- if (!m.machine()->Float32Min().IsSupported()) return;
-
- bt.AddReturn(m.Float32Min(bt.param0, bt.param1));
+ bt.AddReturn(m.Float32Max(bt.param0, bt.param1));
FOR_FLOAT32_INPUTS(pl) {
FOR_FLOAT32_INPUTS(pr) {
- CHECK_DOUBLE_EQ(*pl < *pr ? *pl : *pr, bt.call(*pl, *pr));
+ CHECK_FLOAT_EQ(JSMax(*pl, *pr), bt.call(*pl, *pr));
}
}
}
-
-TEST(RunFloat64MinP) {
+TEST(RunFloat32Min) {
RawMachineAssemblerTester<int32_t> m;
- Float64BinopTester bt(&m);
- if (!m.machine()->Float64Min().IsSupported()) return;
-
- bt.AddReturn(m.Float64Min(bt.param0, bt.param1));
+ Float32BinopTester bt(&m);
+ bt.AddReturn(m.Float32Min(bt.param0, bt.param1));
- FOR_FLOAT64_INPUTS(pl) {
- FOR_FLOAT64_INPUTS(pr) {
- CHECK_DOUBLE_EQ(*pl < *pr ? *pl : *pr, bt.call(*pl, *pr));
+ FOR_FLOAT32_INPUTS(pl) {
+ FOR_FLOAT32_INPUTS(pr) {
+ CHECK_FLOAT_EQ(JSMin(*pl, *pr), bt.call(*pl, *pr));
}
}
}
-
TEST(RunFloat32SubP) {
RawMachineAssemblerTester<int32_t> m;
Float32BinopTester bt(&m);
@@ -4110,9 +4038,15 @@ TEST(RunChangeUint32ToFloat64) {
TEST(RunTruncateFloat32ToInt32) {
BufferedRawMachineAssemblerTester<int32_t> m(MachineType::Float32());
m.Return(m.TruncateFloat32ToInt32(m.Parameter(0)));
+ // The upper bound is (INT32_MAX + 1), which is the lowest float-representable
+ // number above INT32_MAX which cannot be represented as int32.
+ float upper_bound = 2147483648.0f;
+ // We use INT32_MIN as a lower bound because (INT32_MIN - 1) is not
+ // representable as float, and no number between (INT32_MIN - 1) and INT32_MIN
+ // is.
+ float lower_bound = static_cast<float>(INT32_MIN);
FOR_FLOAT32_INPUTS(i) {
- if (*i <= static_cast<float>(std::numeric_limits<int32_t>::max()) &&
- *i >= static_cast<float>(std::numeric_limits<int32_t>::min())) {
+ if (*i < upper_bound && *i >= lower_bound) {
CHECK_FLOAT_EQ(static_cast<int32_t>(*i), m.Call(*i));
}
}
@@ -4122,23 +4056,20 @@ TEST(RunTruncateFloat32ToInt32) {
TEST(RunTruncateFloat32ToUint32) {
BufferedRawMachineAssemblerTester<uint32_t> m(MachineType::Float32());
m.Return(m.TruncateFloat32ToUint32(m.Parameter(0)));
- {
- FOR_UINT32_INPUTS(i) {
- float input = static_cast<float>(*i);
- // This condition on 'input' is required because
- // static_cast<float>(std::numeric_limits<uint32_t>::max()) results in a
- // value outside uint32 range.
- if (input < static_cast<float>(std::numeric_limits<uint32_t>::max())) {
- CHECK_EQ(static_cast<uint32_t>(input), m.Call(input));
- }
+ // The upper bound is (UINT32_MAX + 1), which is the lowest
+ // float-representable number above UINT32_MAX which cannot be represented as
+ // uint32.
+ double upper_bound = 4294967296.0f;
+ double lower_bound = -1.0f;
+ FOR_UINT32_INPUTS(i) {
+ volatile float input = static_cast<float>(*i);
+ if (input < upper_bound) {
+ CHECK_EQ(static_cast<uint32_t>(input), m.Call(input));
}
}
- {
- FOR_FLOAT32_INPUTS(i) {
- if (*i <= static_cast<float>(std::numeric_limits<uint32_t>::max()) &&
- *i >= static_cast<float>(std::numeric_limits<uint32_t>::min())) {
- CHECK_FLOAT_EQ(static_cast<uint32_t>(*i), m.Call(*i));
- }
+ FOR_FLOAT32_INPUTS(j) {
+ if ((*j < upper_bound) && (*j > lower_bound)) {
+ CHECK_FLOAT_EQ(static_cast<uint32_t>(*j), m.Call(*j));
}
}
}
@@ -4201,7 +4132,7 @@ uint64_t ToInt64(uint32_t low, uint32_t high) {
return (static_cast<uint64_t>(high) << 32) | static_cast<uint64_t>(low);
}
-#if V8_TARGET_ARCH_32_BIT && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_X87
+#if V8_TARGET_ARCH_32_BIT && !V8_TARGET_ARCH_X87
TEST(RunInt32PairAdd) {
BufferedRawMachineAssemblerTester<int32_t> m(
MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32(),
@@ -4449,6 +4380,56 @@ TEST(RunWord32PairShlWithSharedInput) {
TestWord32PairShlWithSharedInput(1, 1);
}
+TEST(RunWord32PairShr) {
+ BufferedRawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32());
+
+ uint32_t high;
+ uint32_t low;
+
+ Node* PairAdd =
+ m.Word32PairShr(m.Parameter(0), m.Parameter(1), m.Parameter(2));
+
+ m.StoreToPointer(&low, MachineRepresentation::kWord32,
+ m.Projection(0, PairAdd));
+ m.StoreToPointer(&high, MachineRepresentation::kWord32,
+ m.Projection(1, PairAdd));
+ m.Return(m.Int32Constant(74));
+
+ FOR_UINT64_INPUTS(i) {
+ for (uint32_t j = 0; j < 64; j++) {
+ m.Call(static_cast<uint32_t>(*i & 0xffffffff),
+ static_cast<uint32_t>(*i >> 32), j);
+ CHECK_EQ(*i >> j, ToInt64(low, high));
+ }
+ }
+}
+
+TEST(RunWord32PairSar) {
+ BufferedRawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32());
+
+ uint32_t high;
+ uint32_t low;
+
+ Node* PairAdd =
+ m.Word32PairSar(m.Parameter(0), m.Parameter(1), m.Parameter(2));
+
+ m.StoreToPointer(&low, MachineRepresentation::kWord32,
+ m.Projection(0, PairAdd));
+ m.StoreToPointer(&high, MachineRepresentation::kWord32,
+ m.Projection(1, PairAdd));
+ m.Return(m.Int32Constant(74));
+
+ FOR_INT64_INPUTS(i) {
+ for (uint32_t j = 0; j < 64; j++) {
+ m.Call(static_cast<uint32_t>(*i & 0xffffffff),
+ static_cast<uint32_t>(*i >> 32), j);
+ CHECK_EQ(*i >> j, ToInt64(low, high));
+ }
+ }
+}
+
#endif
TEST(RunDeadChangeFloat64ToInt32) {
@@ -4968,45 +4949,6 @@ TEST(RunFloat64LessThan) {
}
-template <typename IntType>
-static void LoadStoreTruncation(MachineType kRepresentation) {
- IntType input;
-
- RawMachineAssemblerTester<int32_t> m;
- Node* a = m.LoadFromPointer(&input, kRepresentation);
- Node* ap1 = m.Int32Add(a, m.Int32Constant(1));
- m.StoreToPointer(&input, kRepresentation.representation(), ap1);
- m.Return(ap1);
-
- const IntType max = std::numeric_limits<IntType>::max();
- const IntType min = std::numeric_limits<IntType>::min();
-
- // Test upper bound.
- input = max;
- CHECK_EQ(max + 1, m.Call());
- CHECK_EQ(min, input);
-
- // Test lower bound.
- input = min;
- CHECK_EQ(static_cast<IntType>(max + 2), m.Call());
- CHECK_EQ(min + 1, input);
-
- // Test all one byte values that are not one byte bounds.
- for (int i = -127; i < 127; i++) {
- input = i;
- int expected = i >= 0 ? i + 1 : max + (i - min) + 2;
- CHECK_EQ(static_cast<IntType>(expected), m.Call());
- CHECK_EQ(static_cast<IntType>(i + 1), input);
- }
-}
-
-
-TEST(RunLoadStoreTruncation) {
- LoadStoreTruncation<int8_t>(MachineType::Int8());
- LoadStoreTruncation<int16_t>(MachineType::Int16());
-}
-
-
static void IntPtrCompare(intptr_t left, intptr_t right) {
for (int test = 0; test < 7; test++) {
RawMachineAssemblerTester<bool> m(MachineType::Pointer(),
@@ -5352,6 +5294,98 @@ TEST(RunInt32SubWithOverflowInBranchP) {
}
}
+TEST(RunInt32MulWithOverflowP) {
+ int32_t actual_val = -1;
+ RawMachineAssemblerTester<int32_t> m;
+ Int32BinopTester bt(&m);
+ Node* add = m.Int32MulWithOverflow(bt.param0, bt.param1);
+ Node* val = m.Projection(0, add);
+ Node* ovf = m.Projection(1, add);
+ m.StoreToPointer(&actual_val, MachineRepresentation::kWord32, val);
+ bt.AddReturn(ovf);
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ int32_t expected_val;
+ int expected_ovf = bits::SignedMulOverflow32(*i, *j, &expected_val);
+ CHECK_EQ(expected_ovf, bt.call(*i, *j));
+ if (!expected_ovf) {
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+ }
+}
+
+TEST(RunInt32MulWithOverflowImm) {
+ int32_t actual_val = -1, expected_val = 0;
+ FOR_INT32_INPUTS(i) {
+ {
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
+ Node* add = m.Int32MulWithOverflow(m.Int32Constant(*i), m.Parameter(0));
+ Node* val = m.Projection(0, add);
+ Node* ovf = m.Projection(1, add);
+ m.StoreToPointer(&actual_val, MachineRepresentation::kWord32, val);
+ m.Return(ovf);
+ FOR_INT32_INPUTS(j) {
+ int expected_ovf = bits::SignedMulOverflow32(*i, *j, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call(*j));
+ if (!expected_ovf) {
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+ }
+ {
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
+ Node* add = m.Int32MulWithOverflow(m.Parameter(0), m.Int32Constant(*i));
+ Node* val = m.Projection(0, add);
+ Node* ovf = m.Projection(1, add);
+ m.StoreToPointer(&actual_val, MachineRepresentation::kWord32, val);
+ m.Return(ovf);
+ FOR_INT32_INPUTS(j) {
+ int expected_ovf = bits::SignedMulOverflow32(*i, *j, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call(*j));
+ if (!expected_ovf) {
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+ }
+ FOR_INT32_INPUTS(j) {
+ RawMachineAssemblerTester<int32_t> m;
+ Node* add =
+ m.Int32MulWithOverflow(m.Int32Constant(*i), m.Int32Constant(*j));
+ Node* val = m.Projection(0, add);
+ Node* ovf = m.Projection(1, add);
+ m.StoreToPointer(&actual_val, MachineRepresentation::kWord32, val);
+ m.Return(ovf);
+ int expected_ovf = bits::SignedMulOverflow32(*i, *j, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call());
+ if (!expected_ovf) {
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+ }
+}
+
+TEST(RunInt32MulWithOverflowInBranchP) {
+ int constant = 911777;
+ RawMachineLabel blocka, blockb;
+ RawMachineAssemblerTester<int32_t> m;
+ Int32BinopTester bt(&m);
+ Node* add = m.Int32MulWithOverflow(bt.param0, bt.param1);
+ Node* ovf = m.Projection(1, add);
+ m.Branch(ovf, &blocka, &blockb);
+ m.Bind(&blocka);
+ bt.AddReturn(m.Int32Constant(constant));
+ m.Bind(&blockb);
+ Node* val = m.Projection(0, add);
+ bt.AddReturn(val);
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ int32_t expected;
+ if (bits::SignedMulOverflow32(*i, *j, &expected)) expected = constant;
+ CHECK_EQ(expected, bt.call(*i, *j));
+ }
+ }
+}
TEST(RunWord64EqualInBranchP) {
int64_t input;
@@ -5417,8 +5451,7 @@ TEST(RunTruncateInt64ToInt32P) {
}
}
-
-TEST(RunTruncateFloat64ToInt32P) {
+TEST(RunTruncateFloat64ToWord32P) {
struct {
double from;
double raw;
@@ -5479,8 +5512,7 @@ TEST(RunTruncateFloat64ToInt32P) {
{-1.7976931348623157e+308, 0}};
double input = -1.0;
RawMachineAssemblerTester<int32_t> m;
- m.Return(m.TruncateFloat64ToInt32(
- TruncationMode::kJavaScript,
+ m.Return(m.TruncateFloat64ToWord32(
m.LoadFromPointer(&input, MachineType::Float64())));
for (size_t i = 0; i < arraysize(kValues); ++i) {
input = kValues[i].from;
@@ -5489,6 +5521,12 @@ TEST(RunTruncateFloat64ToInt32P) {
}
}
+TEST(RunTruncateFloat64ToWord32SignExtension) {
+ BufferedRawMachineAssemblerTester<int32_t> r;
+ r.Return(r.Int32Sub(r.TruncateFloat64ToWord32(r.Float64Constant(-1.0)),
+ r.Int32Constant(0)));
+ CHECK_EQ(-1, r.Call());
+}
TEST(RunChangeFloat32ToFloat64) {
BufferedRawMachineAssemblerTester<double> m(MachineType::Float32());
@@ -5573,6 +5611,204 @@ TEST(RunFloat64Abs) {
FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(std::abs(*i), m.Call(*i)); }
}
+TEST(RunFloat64Acos) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Acos(m.Parameter(0)));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::acos(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Acosh) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Acosh(m.Parameter(0)));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::acosh(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Asin) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Asin(m.Parameter(0)));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::asin(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Asinh) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Asinh(m.Parameter(0)));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::asinh(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Atan) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Atan(m.Parameter(0)));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
+ CHECK_DOUBLE_EQ(-0.0, m.Call(-0.0));
+ CHECK_DOUBLE_EQ(0.0, m.Call(0.0));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::atan(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Atanh) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Atanh(m.Parameter(0)));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
+ CHECK_DOUBLE_EQ(std::numeric_limits<double>::infinity(), m.Call(1.0));
+ CHECK_DOUBLE_EQ(-std::numeric_limits<double>::infinity(), m.Call(-1.0));
+ CHECK_DOUBLE_EQ(-0.0, m.Call(-0.0));
+ CHECK_DOUBLE_EQ(0.0, m.Call(0.0));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::atanh(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Atan2) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64(),
+ MachineType::Float64());
+ m.Return(m.Float64Atan2(m.Parameter(0), m.Parameter(1)));
+ FOR_FLOAT64_INPUTS(i) {
+ FOR_FLOAT64_INPUTS(j) {
+ CHECK_DOUBLE_EQ(ieee754::atan2(*i, *j), m.Call(*i, *j));
+ }
+ }
+}
+
+TEST(RunFloat64Cos) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Cos(m.Parameter(0)));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::cos(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Cosh) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Cosh(m.Parameter(0)));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::cosh(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Exp) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Exp(m.Parameter(0)));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
+ CHECK_EQ(0.0, m.Call(-std::numeric_limits<double>::infinity()));
+ CHECK_DOUBLE_EQ(1.0, m.Call(-0.0));
+ CHECK_DOUBLE_EQ(1.0, m.Call(0.0));
+ CHECK_DOUBLE_EQ(std::numeric_limits<double>::infinity(),
+ m.Call(std::numeric_limits<double>::infinity()));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::exp(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Expm1) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Expm1(m.Parameter(0)));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
+ CHECK_EQ(-1.0, m.Call(-std::numeric_limits<double>::infinity()));
+ CHECK_DOUBLE_EQ(std::numeric_limits<double>::infinity(),
+ m.Call(std::numeric_limits<double>::infinity()));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::expm1(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Log) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Log(m.Parameter(0)));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
+ CHECK(std::isnan(m.Call(-std::numeric_limits<double>::infinity())));
+ CHECK(std::isnan(m.Call(-1.0)));
+ CHECK_DOUBLE_EQ(-std::numeric_limits<double>::infinity(), m.Call(-0.0));
+ CHECK_DOUBLE_EQ(-std::numeric_limits<double>::infinity(), m.Call(0.0));
+ CHECK_DOUBLE_EQ(0.0, m.Call(1.0));
+ CHECK_DOUBLE_EQ(std::numeric_limits<double>::infinity(),
+ m.Call(std::numeric_limits<double>::infinity()));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::log(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Log1p) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Log1p(m.Parameter(0)));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
+ CHECK(std::isnan(m.Call(-std::numeric_limits<double>::infinity())));
+ CHECK_DOUBLE_EQ(-std::numeric_limits<double>::infinity(), m.Call(-1.0));
+ CHECK_DOUBLE_EQ(0.0, m.Call(0.0));
+ CHECK_DOUBLE_EQ(-0.0, m.Call(-0.0));
+ CHECK_DOUBLE_EQ(std::numeric_limits<double>::infinity(),
+ m.Call(std::numeric_limits<double>::infinity()));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::log1p(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Log2) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Log2(m.Parameter(0)));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
+ CHECK(std::isnan(m.Call(-std::numeric_limits<double>::infinity())));
+ CHECK(std::isnan(m.Call(-1.0)));
+ CHECK_DOUBLE_EQ(-std::numeric_limits<double>::infinity(), m.Call(-0.0));
+ CHECK_DOUBLE_EQ(-std::numeric_limits<double>::infinity(), m.Call(0.0));
+ CHECK_DOUBLE_EQ(0.0, m.Call(1.0));
+ CHECK_DOUBLE_EQ(std::numeric_limits<double>::infinity(),
+ m.Call(std::numeric_limits<double>::infinity()));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::log2(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Log10) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Log10(m.Parameter(0)));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
+ CHECK(std::isnan(m.Call(-std::numeric_limits<double>::infinity())));
+ CHECK(std::isnan(m.Call(-1.0)));
+ CHECK_DOUBLE_EQ(-std::numeric_limits<double>::infinity(), m.Call(-0.0));
+ CHECK_DOUBLE_EQ(-std::numeric_limits<double>::infinity(), m.Call(0.0));
+ CHECK_DOUBLE_EQ(std::numeric_limits<double>::infinity(),
+ m.Call(std::numeric_limits<double>::infinity()));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::log10(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Cbrt) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Cbrt(m.Parameter(0)));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
+ CHECK_DOUBLE_EQ(std::numeric_limits<double>::infinity(),
+ m.Call(std::numeric_limits<double>::infinity()));
+ CHECK_DOUBLE_EQ(-std::numeric_limits<double>::infinity(),
+ m.Call(-std::numeric_limits<double>::infinity()));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::cbrt(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Sin) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Sin(m.Parameter(0)));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::sin(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Sinh) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Sinh(m.Parameter(0)));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::sinh(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Tan) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Tan(m.Parameter(0)));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::tan(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Tanh) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Tanh(m.Parameter(0)));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::tanh(*i), m.Call(*i)); }
+}
static double two_30 = 1 << 30; // 2^30 is a smi boundary.
static double two_52 = two_30 * (1 << 22); // 2^52 is a precision boundary.
@@ -5854,50 +6090,6 @@ TEST(RunCallCFunction8) {
#if V8_TARGET_ARCH_64_BIT
// TODO(titzer): run int64 tests on all platforms when supported.
-TEST(RunCheckedLoadInt64) {
- int64_t buffer[] = {0x66bbccddeeff0011LL, 0x1122334455667788LL};
- RawMachineAssemblerTester<int64_t> m(MachineType::Int32());
- Node* base = m.PointerConstant(buffer);
- Node* index = m.Parameter(0);
- Node* length = m.Int32Constant(16);
- Node* load = m.AddNode(m.machine()->CheckedLoad(MachineType::Int64()), base,
- index, length);
- m.Return(load);
-
- CHECK_EQ(buffer[0], m.Call(0));
- CHECK_EQ(buffer[1], m.Call(8));
- CHECK_EQ(0, m.Call(16));
-}
-
-
-TEST(RunCheckedStoreInt64) {
- const int64_t write = 0x5566778899aabbLL;
- const int64_t before = 0x33bbccddeeff0011LL;
- int64_t buffer[] = {before, before};
- RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
- Node* base = m.PointerConstant(buffer);
- Node* index = m.Parameter(0);
- Node* length = m.Int32Constant(16);
- Node* value = m.Int64Constant(write);
- Node* store =
- m.AddNode(m.machine()->CheckedStore(MachineRepresentation::kWord64), base,
- index, length, value);
- USE(store);
- m.Return(m.Int32Constant(11));
-
- CHECK_EQ(11, m.Call(16));
- CHECK_EQ(before, buffer[0]);
- CHECK_EQ(before, buffer[1]);
-
- CHECK_EQ(11, m.Call(0));
- CHECK_EQ(write, buffer[0]);
- CHECK_EQ(before, buffer[1]);
-
- CHECK_EQ(11, m.Call(8));
- CHECK_EQ(write, buffer[0]);
- CHECK_EQ(write, buffer[1]);
-}
-
TEST(RunBitcastInt64ToFloat64) {
int64_t input = 1;
@@ -6348,7 +6540,6 @@ TEST(RunComputedCodeObject) {
CallDescriptor::kCallCodeObject, // kind
MachineType::AnyTagged(), // target_type
c->GetInputLocation(0), // target_loc
- &sig, // machine_sig
&loc, // location_sig
0, // stack count
Operator::kNoProperties, // properties
diff --git a/deps/v8/test/cctest/compiler/test-run-native-calls.cc b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
index bfdcc0e8ca..5c2672f8d4 100644
--- a/deps/v8/test/cctest/compiler/test-run-native-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
@@ -18,6 +18,8 @@ namespace v8 {
namespace internal {
namespace compiler {
+const auto GetRegConfig = RegisterConfiguration::Turbofan;
+
namespace {
typedef float float32;
typedef double float64;
@@ -76,12 +78,8 @@ class Pairs {
class RegisterPairs : public Pairs {
public:
RegisterPairs()
- : Pairs(
- 100,
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->num_allocatable_general_registers(),
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->allocatable_general_codes()) {}
+ : Pairs(100, GetRegConfig()->num_allocatable_general_registers(),
+ GetRegConfig()->allocatable_general_codes()) {}
};
@@ -89,12 +87,8 @@ class RegisterPairs : public Pairs {
class Float32RegisterPairs : public Pairs {
public:
Float32RegisterPairs()
- : Pairs(
- 100,
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->num_allocatable_aliased_double_registers(),
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->allocatable_double_codes()) {}
+ : Pairs(100, GetRegConfig()->num_allocatable_aliased_double_registers(),
+ GetRegConfig()->allocatable_double_codes()) {}
};
@@ -102,12 +96,8 @@ class Float32RegisterPairs : public Pairs {
class Float64RegisterPairs : public Pairs {
public:
Float64RegisterPairs()
- : Pairs(
- 100,
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->num_allocatable_aliased_double_registers(),
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->allocatable_double_codes()) {}
+ : Pairs(100, GetRegConfig()->num_allocatable_double_registers(),
+ GetRegConfig()->allocatable_double_codes()) {}
};
@@ -136,28 +126,26 @@ struct Allocator {
if (IsFloatingPoint(type.representation())) {
// Allocate a floating point register/stack location.
if (fp_offset < fp_count) {
- return LinkageLocation::ForRegister(fp_regs[fp_offset++]);
+ int code = fp_regs[fp_offset++];
+ return LinkageLocation::ForRegister(code, type);
} else {
int offset = -1 - stack_offset;
stack_offset += StackWords(type);
- return LinkageLocation::ForCallerFrameSlot(offset);
+ return LinkageLocation::ForCallerFrameSlot(offset, type);
}
} else {
// Allocate a general purpose register/stack location.
if (gp_offset < gp_count) {
- return LinkageLocation::ForRegister(gp_regs[gp_offset++]);
+ return LinkageLocation::ForRegister(gp_regs[gp_offset++], type);
} else {
int offset = -1 - stack_offset;
stack_offset += StackWords(type);
- return LinkageLocation::ForCallerFrameSlot(offset);
+ return LinkageLocation::ForCallerFrameSlot(offset, type);
}
}
}
int StackWords(MachineType type) {
- // TODO(titzer): hack. float32 occupies 8 bytes on stack.
- int size = IsFloatingPoint(type.representation())
- ? kDoubleSize
- : (1 << ElementSizeLog2Of(type.representation()));
+ int size = 1 << ElementSizeLog2Of(type.representation());
return size <= kPointerSize ? 1 : size / kPointerSize;
}
void Reset() {
@@ -200,7 +188,6 @@ class RegisterConfig {
CallDescriptor::kCallCodeObject, // kind
target_type, // target MachineType
target_loc, // target location
- msig, // machine_sig
locations.Build(), // location_sig
stack_param_count, // stack_parameter_count
compiler::Operator::kNoProperties, // properties
@@ -255,7 +242,7 @@ class Int32Signature : public MachineSignature {
Handle<Code> CompileGraph(const char* name, CallDescriptor* desc, Graph* graph,
Schedule* schedule = nullptr) {
Isolate* isolate = CcTest::InitIsolateOnce();
- CompilationInfo info("testing", isolate, graph->zone());
+ CompilationInfo info(ArrayVector("testing"), isolate, graph->zone());
Handle<Code> code =
Pipeline::GenerateCodeForTesting(&info, desc, graph, schedule);
CHECK(!code.is_null());
@@ -271,9 +258,7 @@ Handle<Code> CompileGraph(const char* name, CallDescriptor* desc, Graph* graph,
Handle<Code> WrapWithCFunction(Handle<Code> inner, CallDescriptor* desc) {
Zone zone(inner->GetIsolate()->allocator());
- MachineSignature* msig =
- const_cast<MachineSignature*>(desc->GetMachineSignature());
- int param_count = static_cast<int>(msig->parameter_count());
+ int param_count = static_cast<int>(desc->ParameterCount());
GraphAndBuilders caller(&zone);
{
GraphAndBuilders& b = caller;
@@ -299,6 +284,7 @@ Handle<Code> WrapWithCFunction(Handle<Code> inner, CallDescriptor* desc) {
b.graph()->SetEnd(ret);
}
+ MachineSignature* msig = desc->GetMachineSignature(&zone);
CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, msig);
return CompileGraph("wrapper", cdesc, caller.graph());
@@ -419,7 +405,7 @@ void ArgsBuffer<float64>::Mutate() {
int ParamCount(CallDescriptor* desc) {
- return static_cast<int>(desc->GetMachineSignature()->parameter_count());
+ return static_cast<int>(desc->ParameterCount());
}
@@ -538,8 +524,7 @@ static void TestInt32Sub(CallDescriptor* desc) {
Handle<Code> inner_code = CompileGraph("Int32Sub", desc, inner.graph());
Handle<Code> wrapper = WrapWithCFunction(inner_code, desc);
- MachineSignature* msig =
- const_cast<MachineSignature*>(desc->GetMachineSignature());
+ MachineSignature* msig = desc->GetMachineSignature(&zone);
CodeRunner<int32_t> runnable(isolate, wrapper,
CSignature::FromMachine(&zone, msig));
@@ -636,15 +621,14 @@ static void Test_RunInt32SubWithRet(int retreg) {
// Separate tests for parallelization.
-#define TEST_INT32_SUB_WITH_RET(x) \
- TEST(Run_Int32Sub_all_allocatable_pairs_##x) { \
- if (x < Register::kNumRegisters && \
- Register::from_code(x).IsAllocatable()) { \
- Test_RunInt32SubWithRet(x); \
- } \
+#define TEST_INT32_SUB_WITH_RET(x) \
+ TEST(Run_Int32Sub_all_allocatable_pairs_##x) { \
+ if (x < Register::kNumRegisters && \
+ GetRegConfig()->IsAllocatableGeneralCode(x)) { \
+ Test_RunInt32SubWithRet(x); \
+ } \
}
-
TEST_INT32_SUB_WITH_RET(0)
TEST_INT32_SUB_WITH_RET(1)
TEST_INT32_SUB_WITH_RET(2)
@@ -692,9 +676,7 @@ TEST(Run_CopyTwentyInt32_all_allocatable_pairs) {
base::AccountingAllocator allocator;
Zone zone(&allocator);
int parray[2];
- int rarray[] = {
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->GetAllocatableGeneralCode(0)};
+ int rarray[] = {GetRegConfig()->GetAllocatableGeneralCode(0)};
pairs.Next(&parray[0], &parray[1], false);
Allocator params(parray, 2, nullptr, 0);
Allocator rets(rarray, 1, nullptr, 0);
@@ -741,14 +723,12 @@ static int32_t Compute_Int32_WeightedSum(CallDescriptor* desc, int32_t* input) {
static void Test_Int32_WeightedSum_of_size(int count) {
Int32Signature sig(count);
for (int p0 = 0; p0 < Register::kNumRegisters; p0++) {
- if (Register::from_code(p0).IsAllocatable()) {
+ if (GetRegConfig()->IsAllocatableGeneralCode(p0)) {
base::AccountingAllocator allocator;
Zone zone(&allocator);
int parray[] = {p0};
- int rarray[] = {
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->GetAllocatableGeneralCode(0)};
+ int rarray[] = {GetRegConfig()->GetAllocatableGeneralCode(0)};
Allocator params(parray, 1, nullptr, 0);
Allocator rets(rarray, 1, nullptr, 0);
RegisterConfig config(params, rets);
@@ -801,12 +781,8 @@ static void RunSelect(CallDescriptor* desc) {
template <int which>
void Test_Int32_Select() {
- int parray[] = {
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->GetAllocatableGeneralCode(0)};
- int rarray[] = {
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->GetAllocatableGeneralCode(0)};
+ int parray[] = {GetRegConfig()->GetAllocatableGeneralCode(0)};
+ int rarray[] = {GetRegConfig()->GetAllocatableGeneralCode(0)};
Allocator params(parray, 1, nullptr, 0);
Allocator rets(rarray, 1, nullptr, 0);
RegisterConfig config(params, rets);
@@ -843,14 +819,10 @@ TEST_INT32_SELECT(63)
TEST(Int64Select_registers) {
- if (RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->num_allocatable_general_registers() < 2)
- return;
+ if (GetRegConfig()->num_allocatable_general_registers() < 2) return;
if (kPointerSize < 8) return; // TODO(titzer): int64 on 32-bit platforms
- int rarray[] = {
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->GetAllocatableGeneralCode(0)};
+ int rarray[] = {GetRegConfig()->GetAllocatableGeneralCode(0)};
ArgsBuffer<int64_t>::Sig sig(2);
RegisterPairs pairs;
@@ -871,14 +843,11 @@ TEST(Int64Select_registers) {
TEST(Float32Select_registers) {
- if (RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->num_allocatable_double_registers() < 2) {
+ if (GetRegConfig()->num_allocatable_double_registers() < 2) {
return;
}
- int rarray[] = {
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->GetAllocatableDoubleCode(0)};
+ int rarray[] = {GetRegConfig()->GetAllocatableDoubleCode(0)};
ArgsBuffer<float32>::Sig sig(2);
Float32RegisterPairs pairs;
@@ -899,15 +868,9 @@ TEST(Float32Select_registers) {
TEST(Float64Select_registers) {
- if (RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->num_allocatable_double_registers() < 2)
- return;
- if (RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->num_allocatable_general_registers() < 2)
- return;
- int rarray[] = {
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->GetAllocatableDoubleCode(0)};
+ if (GetRegConfig()->num_allocatable_double_registers() < 2) return;
+ if (GetRegConfig()->num_allocatable_general_registers() < 2) return;
+ int rarray[] = {GetRegConfig()->GetAllocatableDoubleCode(0)};
ArgsBuffer<float64>::Sig sig(2);
Float64RegisterPairs pairs;
@@ -928,9 +891,7 @@ TEST(Float64Select_registers) {
TEST(Float32Select_stack_params_return_reg) {
- int rarray[] = {
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->GetAllocatableDoubleCode(0)};
+ int rarray[] = {GetRegConfig()->GetAllocatableDoubleCode(0)};
Allocator params(nullptr, 0, nullptr, 0);
Allocator rets(nullptr, 0, rarray, 1);
RegisterConfig config(params, rets);
@@ -951,9 +912,7 @@ TEST(Float32Select_stack_params_return_reg) {
TEST(Float64Select_stack_params_return_reg) {
- int rarray[] = {
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->GetAllocatableDoubleCode(0)};
+ int rarray[] = {GetRegConfig()->GetAllocatableDoubleCode(0)};
Allocator params(nullptr, 0, nullptr, 0);
Allocator rets(nullptr, 0, rarray, 1);
RegisterConfig config(params, rets);
@@ -1006,9 +965,7 @@ static void Build_Select_With_Call(CallDescriptor* desc,
TEST(Float64StackParamsToStackParams) {
- int rarray[] = {
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->GetAllocatableDoubleCode(0)};
+ int rarray[] = {GetRegConfig()->GetAllocatableDoubleCode(0)};
Allocator params(nullptr, 0, nullptr, 0);
Allocator rets(nullptr, 0, rarray, 1);
@@ -1027,9 +984,7 @@ TEST(Float64StackParamsToStackParams) {
void MixedParamTest(int start) {
- if (RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->num_double_registers() < 2)
- return;
+ if (GetRegConfig()->num_double_registers() < 2) return;
// TODO(titzer): mix in 64-bit types on all platforms when supported.
#if V8_TARGET_ARCH_32_BIT
@@ -1058,22 +1013,12 @@ void MixedParamTest(int start) {
const int num_params = static_cast<int>(arraysize(types) - start);
// Build call descriptor
- int parray_gp[] = {
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->GetAllocatableGeneralCode(0),
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->GetAllocatableGeneralCode(1)};
- int rarray_gp[] = {
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->GetAllocatableGeneralCode(0)};
- int parray_fp[] = {
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->GetAllocatableDoubleCode(0),
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->GetAllocatableDoubleCode(1)};
- int rarray_fp[] = {
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->GetAllocatableDoubleCode(0)};
+ int parray_gp[] = {GetRegConfig()->GetAllocatableGeneralCode(0),
+ GetRegConfig()->GetAllocatableGeneralCode(1)};
+ int rarray_gp[] = {GetRegConfig()->GetAllocatableGeneralCode(0)};
+ int parray_fp[] = {GetRegConfig()->GetAllocatableDoubleCode(0),
+ GetRegConfig()->GetAllocatableDoubleCode(1)};
+ int rarray_fp[] = {GetRegConfig()->GetAllocatableDoubleCode(0)};
Allocator palloc(parray_gp, 2, parray_fp, 2);
Allocator ralloc(rarray_gp, 1, rarray_fp, 1);
RegisterConfig config(palloc, ralloc);
@@ -1174,29 +1119,17 @@ void TestStackSlot(MachineType slot_type, T expected) {
// Test: Generate with a function f which reserves a stack slot, call an inner
// function g from f which writes into the stack slot of f.
- if (RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->num_allocatable_double_registers() < 2)
- return;
+ if (GetRegConfig()->num_allocatable_double_registers() < 2) return;
Isolate* isolate = CcTest::InitIsolateOnce();
// Lots of code to generate the build descriptor for the inner function.
- int parray_gp[] = {
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->GetAllocatableGeneralCode(0),
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->GetAllocatableGeneralCode(1)};
- int rarray_gp[] = {
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->GetAllocatableGeneralCode(0)};
- int parray_fp[] = {
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->GetAllocatableDoubleCode(0),
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->GetAllocatableDoubleCode(1)};
- int rarray_fp[] = {
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->GetAllocatableDoubleCode(0)};
+ int parray_gp[] = {GetRegConfig()->GetAllocatableGeneralCode(0),
+ GetRegConfig()->GetAllocatableGeneralCode(1)};
+ int rarray_gp[] = {GetRegConfig()->GetAllocatableGeneralCode(0)};
+ int parray_fp[] = {GetRegConfig()->GetAllocatableDoubleCode(0),
+ GetRegConfig()->GetAllocatableDoubleCode(1)};
+ int rarray_fp[] = {GetRegConfig()->GetAllocatableDoubleCode(0)};
Allocator palloc(parray_gp, 2, parray_fp, 2);
Allocator ralloc(rarray_gp, 1, rarray_fp, 1);
RegisterConfig config(palloc, ralloc);
diff --git a/deps/v8/test/cctest/compiler/test-run-stubs.cc b/deps/v8/test/cctest/compiler/test-run-stubs.cc
index c7452191bf..feb25c992a 100644
--- a/deps/v8/test/cctest/compiler/test-run-stubs.cc
+++ b/deps/v8/test/cctest/compiler/test-run-stubs.cc
@@ -27,7 +27,7 @@ TEST(RunStringLengthStub) {
// Create code and an accompanying descriptor.
StringLengthStub stub(isolate);
Handle<Code> code = stub.GenerateCode();
- CompilationInfo info("test", isolate, zone,
+ CompilationInfo info(ArrayVector("test"), isolate, zone,
Code::ComputeFlags(Code::HANDLER));
CallInterfaceDescriptor interface_descriptor =
stub.GetCallInterfaceDescriptor();
diff --git a/deps/v8/test/cctest/compiler/test-run-unwinding-info.cc b/deps/v8/test/cctest/compiler/test-run-unwinding-info.cc
new file mode 100644
index 0000000000..4536725d4f
--- /dev/null
+++ b/deps/v8/test/cctest/compiler/test-run-unwinding-info.cc
@@ -0,0 +1,58 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test enabled only on supported architectures.
+#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM) || \
+ defined(V8_TARGET_ARCH_ARM64)
+
+#include "test/cctest/compiler/function-tester.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+TEST(RunUnwindingInfo) {
+ FLAG_turbo = true;
+ FLAG_perf_prof_unwinding_info = true;
+
+ FunctionTester tester(
+ "(function (x) {\n"
+ " function f(x) { return x*x; }\n"
+ " return x > 0 ? x+1 : f(x);\n"
+ "})");
+
+ tester.Call(tester.Val(-1));
+
+ CHECK(tester.function->code()->has_unwinding_info());
+}
+
+// TODO(ssanfilippo) Build low-level graph and check that state is correctly
+// restored in the following situation:
+//
+// +-----------------+
+// | no frame |---+
+// check that a +-----------------+ |
+// a noframe state | construct frame |<--+
+// is restored here --> +-----------------+ |
+// | construct frame |<--+
+// +-----------------+
+//
+// Same for <construct>/<destruct>/<destruct> (a <construct> status is restored)
+
+// TODO(ssanfilippo) Intentionally reach a BB with different initial states
+// and check that the UnwindingInforWriter fails in debug mode:
+//
+// +----------------+
+// +---| State A |
+// | +----------------+
+// | | State B != A |---+
+// | +----------------+ |
+// +-->| Failure here |<--+
+// +----------------+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif
diff --git a/deps/v8/test/cctest/compiler/test-run-wasm-machops.cc b/deps/v8/test/cctest/compiler/test-run-wasm-machops.cc
new file mode 100644
index 0000000000..0b23669cf7
--- /dev/null
+++ b/deps/v8/test/cctest/compiler/test-run-wasm-machops.cc
@@ -0,0 +1,170 @@
+// Copyright 2016 the V8 project authors. All rights reserved. Use of this
+// source code is governed by a BSD-style license that can be found in the
+// LICENSE file.
+
+#include <cmath>
+#include <functional>
+#include <limits>
+
+#include "src/base/bits.h"
+#include "src/base/utils/random-number-generator.h"
+#include "src/codegen.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/codegen-tester.h"
+#include "test/cctest/compiler/graph-builder-tester.h"
+#include "test/cctest/compiler/value-helper.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+static void UpdateMemoryReferences(Handle<Code> code, Address old_base,
+ Address new_base, uint32_t old_size,
+ uint32_t new_size) {
+ Isolate* isolate = CcTest::i_isolate();
+ bool modified = false;
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::WASM_MEMORY_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (RelocInfo::IsWasmMemoryReference(mode) ||
+ RelocInfo::IsWasmMemorySizeReference(mode)) {
+ // Patch addresses with change in memory start address
+ it.rinfo()->update_wasm_memory_reference(old_base, new_base, old_size,
+ new_size);
+ modified = true;
+ }
+ }
+ if (modified) {
+ Assembler::FlushICache(isolate, code->instruction_start(),
+ code->instruction_size());
+ }
+}
+
+template <typename CType>
+static void RunLoadStoreRelocation(MachineType rep) {
+ const int kNumElems = 2;
+ CType buffer[kNumElems];
+ CType new_buffer[kNumElems];
+ byte* raw = reinterpret_cast<byte*>(buffer);
+ byte* new_raw = reinterpret_cast<byte*>(new_buffer);
+ for (size_t i = 0; i < sizeof(buffer); i++) {
+ raw[i] = static_cast<byte>((i + sizeof(CType)) ^ 0xAA);
+ new_raw[i] = static_cast<byte>((i + sizeof(CType)) ^ 0xAA);
+ }
+ int32_t OK = 0x29000;
+ RawMachineAssemblerTester<uint32_t> m;
+ Node* base = m.RelocatableIntPtrConstant(reinterpret_cast<intptr_t>(raw),
+ RelocInfo::WASM_MEMORY_REFERENCE);
+ Node* base1 = m.RelocatableIntPtrConstant(
+ reinterpret_cast<intptr_t>(raw + sizeof(CType)),
+ RelocInfo::WASM_MEMORY_REFERENCE);
+ Node* index = m.Int32Constant(0);
+ Node* load = m.Load(rep, base, index);
+ m.Store(rep.representation(), base1, index, load, kNoWriteBarrier);
+ m.Return(m.Int32Constant(OK));
+ CHECK(buffer[0] != buffer[1]);
+ CHECK_EQ(OK, m.Call());
+ CHECK(buffer[0] == buffer[1]);
+ m.GenerateCode();
+ Handle<Code> code = m.GetCode();
+ UpdateMemoryReferences(code, raw, new_raw, sizeof(buffer),
+ sizeof(new_buffer));
+ CHECK(new_buffer[0] != new_buffer[1]);
+ CHECK_EQ(OK, m.Call());
+ CHECK(new_buffer[0] == new_buffer[1]);
+}
+
+TEST(RunLoadStoreRelocation) {
+ RunLoadStoreRelocation<int8_t>(MachineType::Int8());
+ RunLoadStoreRelocation<uint8_t>(MachineType::Uint8());
+ RunLoadStoreRelocation<int16_t>(MachineType::Int16());
+ RunLoadStoreRelocation<uint16_t>(MachineType::Uint16());
+ RunLoadStoreRelocation<int32_t>(MachineType::Int32());
+ RunLoadStoreRelocation<uint32_t>(MachineType::Uint32());
+ RunLoadStoreRelocation<void*>(MachineType::AnyTagged());
+ RunLoadStoreRelocation<float>(MachineType::Float32());
+ RunLoadStoreRelocation<double>(MachineType::Float64());
+}
+
+template <typename CType>
+static void RunLoadStoreRelocationOffset(MachineType rep) {
+ RawMachineAssemblerTester<int32_t> r(MachineType::Int32());
+ const int kNumElems = 4;
+ CType buffer[kNumElems];
+ CType new_buffer[kNumElems + 1];
+
+ for (int32_t x = 0; x < kNumElems; x++) {
+ int32_t y = kNumElems - x - 1;
+ // initialize the buffer with raw data.
+ byte* raw = reinterpret_cast<byte*>(buffer);
+ for (size_t i = 0; i < sizeof(buffer); i++) {
+ raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
+ }
+
+ RawMachineAssemblerTester<int32_t> m;
+ int32_t OK = 0x29000 + x;
+ Node* base = m.RelocatableIntPtrConstant(reinterpret_cast<intptr_t>(buffer),
+ RelocInfo::WASM_MEMORY_REFERENCE);
+ Node* index0 = m.IntPtrConstant(x * sizeof(buffer[0]));
+ Node* load = m.Load(rep, base, index0);
+ Node* index1 = m.IntPtrConstant(y * sizeof(buffer[0]));
+ m.Store(rep.representation(), base, index1, load, kNoWriteBarrier);
+ m.Return(m.Int32Constant(OK));
+
+ CHECK(buffer[x] != buffer[y]);
+ CHECK_EQ(OK, m.Call());
+ CHECK(buffer[x] == buffer[y]);
+ m.GenerateCode();
+
+ // Initialize new buffer and set old_buffer to 0
+ byte* new_raw = reinterpret_cast<byte*>(new_buffer);
+ for (size_t i = 0; i < sizeof(buffer); i++) {
+ raw[i] = 0;
+ new_raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
+ }
+
+ // Perform relocation on generated code
+ Handle<Code> code = m.GetCode();
+ UpdateMemoryReferences(code, raw, new_raw, sizeof(buffer),
+ sizeof(new_buffer));
+
+ CHECK(new_buffer[x] != new_buffer[y]);
+ CHECK_EQ(OK, m.Call());
+ CHECK(new_buffer[x] == new_buffer[y]);
+ }
+}
+
+TEST(RunLoadStoreRelocationOffset) {
+ RunLoadStoreRelocationOffset<int8_t>(MachineType::Int8());
+ RunLoadStoreRelocationOffset<uint8_t>(MachineType::Uint8());
+ RunLoadStoreRelocationOffset<int16_t>(MachineType::Int16());
+ RunLoadStoreRelocationOffset<uint16_t>(MachineType::Uint16());
+ RunLoadStoreRelocationOffset<int32_t>(MachineType::Int32());
+ RunLoadStoreRelocationOffset<uint32_t>(MachineType::Uint32());
+ RunLoadStoreRelocationOffset<void*>(MachineType::AnyTagged());
+ RunLoadStoreRelocationOffset<float>(MachineType::Float32());
+ RunLoadStoreRelocationOffset<double>(MachineType::Float64());
+}
+
+TEST(Uint32LessThanRelocation) {
+ RawMachineAssemblerTester<uint32_t> m;
+ RawMachineLabel within_bounds, out_of_bounds;
+ Node* index = m.Int32Constant(0x200);
+ Node* limit =
+ m.RelocatableInt32Constant(0x200, RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ Node* cond = m.AddNode(m.machine()->Uint32LessThan(), index, limit);
+ m.Branch(cond, &within_bounds, &out_of_bounds);
+ m.Bind(&within_bounds);
+ m.Return(m.Int32Constant(0xaced));
+ m.Bind(&out_of_bounds);
+ m.Return(m.Int32Constant(0xdeadbeef));
+ // Check that index is out of bounds with current size
+ CHECK_EQ(0xdeadbeef, m.Call());
+ m.GenerateCode();
+
+ Handle<Code> code = m.GetCode();
+ UpdateMemoryReferences(code, reinterpret_cast<Address>(1234),
+ reinterpret_cast<Address>(1234), 0x200, 0x400);
+ // Check that after limit is increased, index is within bounds.
+ CHECK_EQ(0xaced, m.Call());
+}
diff --git a/deps/v8/test/cctest/compiler/test-simplified-lowering.cc b/deps/v8/test/cctest/compiler/test-simplified-lowering.cc
index b5e992915f..2e3dcd148a 100644
--- a/deps/v8/test/cctest/compiler/test-simplified-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-simplified-lowering.cc
@@ -6,13 +6,14 @@
#include "src/ast/scopes.h"
#include "src/compiler/access-builder.h"
-#include "src/compiler/change-lowering.h"
#include "src/compiler/control-builders.h"
-#include "src/compiler/graph-reducer.h"
+#include "src/compiler/effect-control-linearizer.h"
#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/memory-optimizer.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/representation-change.h"
+#include "src/compiler/scheduler.h"
#include "src/compiler/simplified-lowering.h"
#include "src/compiler/source-position.h"
#include "src/compiler/typer.h"
@@ -36,14 +37,15 @@ class SimplifiedLoweringTester : public GraphBuilderTester<ReturnType> {
SimplifiedLoweringTester(MachineType p0 = MachineType::None(),
MachineType p1 = MachineType::None())
: GraphBuilderTester<ReturnType>(p0, p1),
- typer(this->isolate(), this->graph()),
+ typer(new Typer(this->isolate(), this->graph())),
javascript(this->zone()),
jsgraph(this->isolate(), this->graph(), this->common(), &javascript,
this->simplified(), this->machine()),
source_positions(jsgraph.graph()),
lowering(&jsgraph, this->zone(), &source_positions) {}
+ ~SimplifiedLoweringTester() final { delete typer; }
- Typer typer;
+ Typer* typer = nullptr;
JSOperatorBuilder javascript;
JSGraph jsgraph;
SourcePositionTable source_positions;
@@ -51,20 +53,24 @@ class SimplifiedLoweringTester : public GraphBuilderTester<ReturnType> {
void LowerAllNodes() {
this->End();
- typer.Run();
+ typer->Run();
+ delete typer, typer = nullptr;
lowering.LowerAllNodes();
}
void LowerAllNodesAndLowerChanges() {
this->End();
- typer.Run();
+ typer->Run();
+ delete typer, typer = nullptr;
lowering.LowerAllNodes();
- ChangeLowering lowering(&jsgraph);
- GraphReducer reducer(this->zone(), this->graph());
- reducer.AddReducer(&lowering);
- reducer.ReduceGraph();
- Verifier::Run(this->graph());
+ Schedule* schedule = Scheduler::ComputeSchedule(this->zone(), this->graph(),
+ Scheduler::kNoFlags);
+ EffectControlLinearizer linearizer(&jsgraph, schedule, this->zone());
+ linearizer.Run();
+
+ MemoryOptimizer memory_optimizer(&jsgraph, this->zone());
+ memory_optimizer.Optimize();
}
void CheckNumberCall(double expected, double input) {
@@ -99,13 +105,15 @@ TEST(RunNumberToInt32_float64) {
double input;
int32_t result;
SimplifiedLoweringTester<Object*> t;
- FieldAccess load = {kUntaggedBase, 0, Handle<Name>(), Type::Number(),
- MachineType::Float64()};
+ FieldAccess load = {kUntaggedBase, 0,
+ Handle<Name>(), Type::Number(),
+ MachineType::Float64(), kNoWriteBarrier};
Node* loaded = t.LoadField(load, t.PointerConstant(&input));
NodeProperties::SetType(loaded, Type::Number());
Node* convert = t.NumberToInt32(loaded);
- FieldAccess store = {kUntaggedBase, 0, Handle<Name>(), Type::Signed32(),
- MachineType::Int32()};
+ FieldAccess store = {kUntaggedBase, 0,
+ Handle<Name>(), Type::Signed32(),
+ MachineType::Int32(), kNoWriteBarrier};
t.StoreField(store, t.PointerConstant(&result), convert);
t.Return(t.jsgraph.TrueConstant());
t.LowerAllNodesAndLowerChanges();
@@ -126,13 +134,15 @@ TEST(RunNumberToUint32_float64) {
double input;
uint32_t result;
SimplifiedLoweringTester<Object*> t;
- FieldAccess load = {kUntaggedBase, 0, Handle<Name>(), Type::Number(),
- MachineType::Float64()};
+ FieldAccess load = {kUntaggedBase, 0,
+ Handle<Name>(), Type::Number(),
+ MachineType::Float64(), kNoWriteBarrier};
Node* loaded = t.LoadField(load, t.PointerConstant(&input));
NodeProperties::SetType(loaded, Type::Number());
Node* convert = t.NumberToUint32(loaded);
- FieldAccess store = {kUntaggedBase, 0, Handle<Name>(), Type::Unsigned32(),
- MachineType::Uint32()};
+ FieldAccess store = {kUntaggedBase, 0,
+ Handle<Name>(), Type::Unsigned32(),
+ MachineType::Uint32(), kNoWriteBarrier};
t.StoreField(store, t.PointerConstant(&result), convert);
t.Return(t.jsgraph.TrueConstant());
t.LowerAllNodesAndLowerChanges();
@@ -291,8 +301,12 @@ TEST(RunLoadFieldFromUntaggedBase) {
for (size_t i = 0; i < arraysize(smis); i++) {
int offset = static_cast<int>(i * sizeof(Smi*));
- FieldAccess access = {kUntaggedBase, offset, Handle<Name>(),
- Type::Integral32(), MachineType::AnyTagged()};
+ FieldAccess access = {kUntaggedBase,
+ offset,
+ Handle<Name>(),
+ Type::Integral32(),
+ MachineType::AnyTagged(),
+ kNoWriteBarrier};
SimplifiedLoweringTester<Object*> t;
Node* load = t.LoadField(access, t.PointerConstant(smis));
@@ -313,8 +327,12 @@ TEST(RunStoreFieldToUntaggedBase) {
for (size_t i = 0; i < arraysize(smis); i++) {
int offset = static_cast<int>(i * sizeof(Smi*));
- FieldAccess access = {kUntaggedBase, offset, Handle<Name>(),
- Type::Integral32(), MachineType::AnyTagged()};
+ FieldAccess access = {kUntaggedBase,
+ offset,
+ Handle<Name>(),
+ Type::Integral32(),
+ MachineType::AnyTagged(),
+ kNoWriteBarrier};
SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
Node* p0 = t.Parameter(0);
@@ -340,7 +358,7 @@ TEST(RunLoadElementFromUntaggedBase) {
for (size_t j = 0; (i + j) < arraysize(smis); j++) { // for element index
int offset = static_cast<int>(i * sizeof(Smi*));
ElementAccess access = {kUntaggedBase, offset, Type::Integral32(),
- MachineType::AnyTagged()};
+ MachineType::AnyTagged(), kNoWriteBarrier};
SimplifiedLoweringTester<Object*> t;
Node* load = t.LoadElement(access, t.PointerConstant(smis),
@@ -366,7 +384,7 @@ TEST(RunStoreElementFromUntaggedBase) {
for (size_t j = 0; (i + j) < arraysize(smis); j++) { // for element index
int offset = static_cast<int>(i * sizeof(Smi*));
ElementAccess access = {kUntaggedBase, offset, Type::Integral32(),
- MachineType::AnyTagged()};
+ MachineType::AnyTagged(), kNoWriteBarrier};
SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
Node* p0 = t.Parameter(0);
@@ -518,7 +536,7 @@ class AccessTester : public HandleAndZoneScope {
ElementAccess GetElementAccess() {
ElementAccess access = {tagged ? kTaggedBase : kUntaggedBase,
tagged ? FixedArrayBase::kHeaderSize : 0,
- Type::Any(), rep};
+ Type::Any(), rep, kFullWriteBarrier};
return access;
}
@@ -526,7 +544,10 @@ class AccessTester : public HandleAndZoneScope {
int offset = field * sizeof(E);
FieldAccess access = {tagged ? kTaggedBase : kUntaggedBase,
offset + (tagged ? FixedArrayBase::kHeaderSize : 0),
- Handle<Name>(), Type::Any(), rep};
+ Handle<Name>(),
+ Type::Any(),
+ rep,
+ kFullWriteBarrier};
return access;
}
@@ -664,7 +685,7 @@ TEST(RunAllocate) {
// Fills in most of the nodes of the graph in order to make tests shorter.
class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
public:
- Typer typer;
+ Typer* typer = nullptr;
JSOperatorBuilder javascript;
JSGraph jsgraph;
Node* p0;
@@ -677,7 +698,7 @@ class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
explicit TestingGraph(Type* p0_type, Type* p1_type = Type::None(),
Type* p2_type = Type::None())
: GraphAndBuilders(main_zone()),
- typer(main_isolate(), graph()),
+ typer(new Typer(main_isolate(), graph())),
javascript(main_zone()),
jsgraph(main_isolate(), graph(), common(), &javascript, simplified(),
machine()) {
@@ -690,11 +711,12 @@ class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
p0 = graph()->NewNode(common()->Parameter(0), start);
p1 = graph()->NewNode(common()->Parameter(1), start);
p2 = graph()->NewNode(common()->Parameter(2), start);
- typer.Run();
+ typer->Run();
NodeProperties::SetType(p0, p0_type);
NodeProperties::SetType(p1, p1_type);
NodeProperties::SetType(p2, p2_type);
}
+ ~TestingGraph() { delete typer; }
void CheckLoweringBinop(IrOpcode::Value expected, const Operator* op) {
Node* node = Return(graph()->NewNode(op, p0, p1));
@@ -718,19 +740,25 @@ class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
}
void Lower() {
+ delete typer;
SourcePositionTable table(jsgraph.graph());
SimplifiedLowering(&jsgraph, jsgraph.zone(), &table).LowerAllNodes();
+ typer = new Typer(main_isolate(), graph());
}
void LowerAllNodesAndLowerChanges() {
+ delete typer;
SourcePositionTable table(jsgraph.graph());
SimplifiedLowering(&jsgraph, jsgraph.zone(), &table).LowerAllNodes();
- ChangeLowering lowering(&jsgraph);
- GraphReducer reducer(this->zone(), this->graph());
- reducer.AddReducer(&lowering);
- reducer.ReduceGraph();
- Verifier::Run(this->graph());
+ Schedule* schedule = Scheduler::ComputeSchedule(this->zone(), this->graph(),
+ Scheduler::kNoFlags);
+ EffectControlLinearizer linearizer(&jsgraph, schedule, this->zone());
+ linearizer.Run();
+
+ MemoryOptimizer memory_optimizer(&jsgraph, this->zone());
+ memory_optimizer.Optimize();
+ typer = new Typer(main_isolate(), graph());
}
// Inserts the node as the return value of the graph.
@@ -783,7 +811,7 @@ class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
return graph()->NewNode(machine()->Word32Equal(), node,
jsgraph.Int32Constant(1));
} else {
- return graph()->NewNode(simplified()->ReferenceEqual(Type::Any()), node,
+ return graph()->NewNode(simplified()->ReferenceEqual(), node,
jsgraph.TrueConstant());
}
}
@@ -831,7 +859,7 @@ TEST(LowerBooleanNot_bit_tagged) {
Node* use = t.Use(inv, MachineType::AnyTagged());
t.Return(use);
t.Lower();
- CHECK_EQ(IrOpcode::kChangeBitToBool, use->InputAt(0)->opcode());
+ CHECK_EQ(IrOpcode::kChangeBitToTagged, use->InputAt(0)->opcode());
Node* cmp = use->InputAt(0)->InputAt(0);
CHECK_EQ(t.machine()->Word32Equal()->opcode(), cmp->opcode());
CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
@@ -863,7 +891,7 @@ TEST(LowerBooleanNot_tagged_tagged) {
Node* use = t.Use(inv, MachineType::AnyTagged());
t.Return(use);
t.Lower();
- CHECK_EQ(IrOpcode::kChangeBitToBool, use->InputAt(0)->opcode());
+ CHECK_EQ(IrOpcode::kChangeBitToTagged, use->InputAt(0)->opcode());
Node* cmp = use->InputAt(0)->InputAt(0);
CHECK_EQ(t.machine()->WordEqual()->opcode(), cmp->opcode());
CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
@@ -871,63 +899,6 @@ TEST(LowerBooleanNot_tagged_tagged) {
CHECK(f == cmp->InputAt(0) || f == cmp->InputAt(1));
}
-
-TEST(LowerBooleanToNumber_bit_int32) {
- // BooleanToNumber(x: kRepBit) used as MachineType::Int32()
- TestingGraph t(Type::Boolean());
- Node* b = t.ExampleWithOutput(MachineType::Bool());
- Node* cnv = t.graph()->NewNode(t.simplified()->BooleanToNumber(), b);
- Node* use = t.Use(cnv, MachineType::Int32());
- t.Return(use);
- t.Lower();
- CHECK_EQ(b, use->InputAt(0));
-}
-
-
-TEST(LowerBooleanToNumber_tagged_int32) {
- // BooleanToNumber(x: kRepTagged) used as MachineType::Int32()
- TestingGraph t(Type::Boolean());
- Node* b = t.p0;
- Node* cnv = t.graph()->NewNode(t.simplified()->BooleanToNumber(), b);
- Node* use = t.Use(cnv, MachineType::Int32());
- t.Return(use);
- t.Lower();
- CHECK_EQ(t.machine()->WordEqual()->opcode(), cnv->opcode());
- CHECK(b == cnv->InputAt(0) || b == cnv->InputAt(1));
- Node* c = t.jsgraph.TrueConstant();
- CHECK(c == cnv->InputAt(0) || c == cnv->InputAt(1));
-}
-
-
-TEST(LowerBooleanToNumber_bit_tagged) {
- // BooleanToNumber(x: kRepBit) used as MachineType::AnyTagged()
- TestingGraph t(Type::Boolean());
- Node* b = t.ExampleWithOutput(MachineType::Bool());
- Node* cnv = t.graph()->NewNode(t.simplified()->BooleanToNumber(), b);
- Node* use = t.Use(cnv, MachineType::AnyTagged());
- t.Return(use);
- t.Lower();
- CHECK_EQ(b, use->InputAt(0)->InputAt(0));
- CHECK_EQ(IrOpcode::kChangeUint32ToTagged, use->InputAt(0)->opcode());
-}
-
-
-TEST(LowerBooleanToNumber_tagged_tagged) {
- // BooleanToNumber(x: kRepTagged) used as MachineType::AnyTagged()
- TestingGraph t(Type::Boolean());
- Node* b = t.p0;
- Node* cnv = t.graph()->NewNode(t.simplified()->BooleanToNumber(), b);
- Node* use = t.Use(cnv, MachineType::AnyTagged());
- t.Return(use);
- t.Lower();
- CHECK_EQ(cnv, use->InputAt(0)->InputAt(0));
- CHECK_EQ(IrOpcode::kChangeUint32ToTagged, use->InputAt(0)->opcode());
- CHECK_EQ(t.machine()->WordEqual()->opcode(), cnv->opcode());
- CHECK(b == cnv->InputAt(0) || b == cnv->InputAt(1));
- Node* c = t.jsgraph.TrueConstant();
- CHECK(c == cnv->InputAt(0) || c == cnv->InputAt(1));
-}
-
static Type* test_types[] = {Type::Signed32(), Type::Unsigned32(),
Type::Number()};
@@ -1046,8 +1017,7 @@ TEST(LowerNumberToInt32_to_ChangeTaggedToInt32) {
CheckChangeOf(IrOpcode::kChangeTaggedToInt32, t.p0, use->InputAt(0));
}
-
-TEST(LowerNumberToInt32_to_TruncateFloat64ToInt32) {
+TEST(LowerNumberToInt32_to_TruncateFloat64ToWord32) {
// NumberToInt32(x: kRepFloat64) used as MachineType::Int32()
TestingGraph t(Type::Number());
Node* p0 = t.ExampleWithOutput(MachineType::Float64());
@@ -1055,22 +1025,17 @@ TEST(LowerNumberToInt32_to_TruncateFloat64ToInt32) {
Node* use = t.Use(trunc, MachineType::Int32());
t.Return(use);
t.Lower();
- CheckChangeOf(IrOpcode::kTruncateFloat64ToInt32, p0, use->InputAt(0));
+ CheckChangeOf(IrOpcode::kTruncateFloat64ToWord32, p0, use->InputAt(0));
}
-
-TEST(LowerNumberToInt32_to_TruncateFloat64ToInt32_with_change) {
+TEST(LowerNumberToInt32_to_TruncateTaggedToWord32) {
// NumberToInt32(x: kTypeNumber | kRepTagged) used as MachineType::Int32()
TestingGraph t(Type::Number());
Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
Node* use = t.Use(trunc, MachineType::Int32());
t.Return(use);
t.Lower();
- Node* node = use->InputAt(0);
- CHECK_EQ(IrOpcode::kTruncateFloat64ToInt32, node->opcode());
- Node* of = node->InputAt(0);
- CHECK_EQ(IrOpcode::kChangeTaggedToFloat64, of->opcode());
- CHECK_EQ(t.p0, of->InputAt(0));
+ CheckChangeOf(IrOpcode::kTruncateTaggedToWord32, t.p0, use->InputAt(0));
}
@@ -1084,8 +1049,7 @@ TEST(LowerNumberToUint32_to_ChangeTaggedToUint32) {
CheckChangeOf(IrOpcode::kChangeTaggedToUint32, t.p0, use->InputAt(0));
}
-
-TEST(LowerNumberToUint32_to_TruncateFloat64ToInt32) {
+TEST(LowerNumberToUint32_to_TruncateFloat64ToWord32) {
// NumberToUint32(x: kRepFloat64) used as MachineType::Uint32()
TestingGraph t(Type::Number());
Node* p0 = t.ExampleWithOutput(MachineType::Float64());
@@ -1095,26 +1059,20 @@ TEST(LowerNumberToUint32_to_TruncateFloat64ToInt32) {
Node* use = t.Use(trunc, MachineType::Uint32());
t.Return(use);
t.Lower();
- CheckChangeOf(IrOpcode::kTruncateFloat64ToInt32, p0, use->InputAt(0));
+ CheckChangeOf(IrOpcode::kTruncateFloat64ToWord32, p0, use->InputAt(0));
}
-
-TEST(LowerNumberToUint32_to_TruncateFloat64ToInt32_with_change) {
+TEST(LowerNumberToUint32_to_TruncateTaggedToWord32) {
// NumberToInt32(x: kTypeNumber | kRepTagged) used as MachineType::Uint32()
TestingGraph t(Type::Number());
Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
Node* use = t.Use(trunc, MachineType::Uint32());
t.Return(use);
t.Lower();
- Node* node = use->InputAt(0);
- CHECK_EQ(IrOpcode::kTruncateFloat64ToInt32, node->opcode());
- Node* of = node->InputAt(0);
- CHECK_EQ(IrOpcode::kChangeTaggedToFloat64, of->opcode());
- CHECK_EQ(t.p0, of->InputAt(0));
+ CheckChangeOf(IrOpcode::kTruncateTaggedToWord32, t.p0, use->InputAt(0));
}
-
-TEST(LowerNumberToUint32_to_TruncateFloat64ToInt32_uint32) {
+TEST(LowerNumberToUint32_to_TruncateFloat64ToWord32_uint32) {
// NumberToUint32(x: kRepFloat64) used as kRepWord32
TestingGraph t(Type::Unsigned32());
Node* input = t.ExampleWithOutput(MachineType::Float64());
@@ -1122,7 +1080,7 @@ TEST(LowerNumberToUint32_to_TruncateFloat64ToInt32_uint32) {
Node* use = t.Use(trunc, MachineType::RepWord32());
t.Return(use);
t.Lower();
- CheckChangeOf(IrOpcode::kTruncateFloat64ToInt32, input, use->InputAt(0));
+ CheckChangeOf(IrOpcode::kTruncateFloat64ToWord32, input, use->InputAt(0));
}
@@ -1130,7 +1088,7 @@ TEST(LowerReferenceEqual_to_wordeq) {
TestingGraph t(Type::Any(), Type::Any());
IrOpcode::Value opcode =
static_cast<IrOpcode::Value>(t.machine()->WordEqual()->opcode());
- t.CheckLoweringBinop(opcode, t.simplified()->ReferenceEqual(Type::Any()));
+ t.CheckLoweringBinop(opcode, t.simplified()->ReferenceEqual());
}
void CheckChangeInsertion(IrOpcode::Value expected, MachineType from,
@@ -1150,7 +1108,7 @@ TEST(InsertBasicChanges) {
MachineType::Int32(), Type::Signed32());
CheckChangeInsertion(IrOpcode::kChangeFloat64ToUint32, MachineType::Float64(),
MachineType::Uint32(), Type::Unsigned32());
- CheckChangeInsertion(IrOpcode::kTruncateFloat64ToInt32,
+ CheckChangeInsertion(IrOpcode::kTruncateFloat64ToWord32,
MachineType::Float64(), MachineType::Uint32(),
Type::Integral32());
CheckChangeInsertion(IrOpcode::kChangeTaggedToInt32, MachineType::AnyTagged(),
@@ -1160,7 +1118,7 @@ TEST(InsertBasicChanges) {
Type::Unsigned32());
CheckChangeInsertion(IrOpcode::kChangeFloat64ToTagged, MachineType::Float64(),
- MachineType::AnyTagged());
+ MachineType::AnyTagged(), Type::Number());
CheckChangeInsertion(IrOpcode::kChangeTaggedToFloat64,
MachineType::AnyTagged(), MachineType::Float64(),
Type::Number());
@@ -1178,8 +1136,7 @@ TEST(InsertBasicChanges) {
static void CheckChangesAroundBinop(TestingGraph* t, const Operator* op,
IrOpcode::Value input_change,
- IrOpcode::Value output_change,
- Type* type = Type::Any()) {
+ IrOpcode::Value output_change, Type* type) {
Node* binop =
op->ControlInputCount() == 0
? t->graph()->NewNode(op, t->p0, t->p1)
@@ -1222,7 +1179,7 @@ TEST(InsertChangesAroundInt32Cmp) {
for (size_t i = 0; i < arraysize(ops); i++) {
CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToInt32,
- IrOpcode::kChangeBitToBool);
+ IrOpcode::kChangeBitToTagged, Type::Boolean());
}
}
@@ -1235,7 +1192,7 @@ TEST(InsertChangesAroundUint32Cmp) {
for (size_t i = 0; i < arraysize(ops); i++) {
CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToUint32,
- IrOpcode::kChangeBitToBool);
+ IrOpcode::kChangeBitToTagged, Type::Boolean());
}
}
@@ -1251,7 +1208,7 @@ TEST(InsertChangesAroundFloat64Binops) {
for (size_t i = 0; i < arraysize(ops); i++) {
CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToFloat64,
- IrOpcode::kChangeFloat64ToTagged);
+ IrOpcode::kChangeFloat64ToTagged, Type::Number());
}
}
@@ -1265,7 +1222,7 @@ TEST(InsertChangesAroundFloat64Cmp) {
for (size_t i = 0; i < arraysize(ops); i++) {
CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToFloat64,
- IrOpcode::kChangeBitToBool);
+ IrOpcode::kChangeBitToTagged, Type::Boolean());
}
}
@@ -1281,23 +1238,38 @@ void CheckFieldAccessArithmetic(FieldAccess access, Node* load_or_store) {
Node* CheckElementAccessArithmetic(ElementAccess access, Node* load_or_store) {
Node* index = load_or_store->InputAt(1);
if (kPointerSize == 8) {
+ Int64BinopMatcher mindex(index);
+ CHECK_EQ(IrOpcode::kInt64Add, mindex.node()->opcode());
+ CHECK(mindex.right().Is(access.header_size - access.tag()));
+
+ const int element_size_shift =
+ ElementSizeLog2Of(access.machine_type.representation());
+ Node* index;
+ if (element_size_shift) {
+ Int64BinopMatcher shl(mindex.left().node());
+ CHECK_EQ(IrOpcode::kWord64Shl, shl.node()->opcode());
+ CHECK(shl.right().Is(element_size_shift));
+ index = shl.left().node();
+ } else {
+ index = mindex.left().node();
+ }
CHECK_EQ(IrOpcode::kChangeUint32ToUint64, index->opcode());
- index = index->InputAt(0);
- }
-
- Int32BinopMatcher mindex(index);
- CHECK_EQ(IrOpcode::kInt32Add, mindex.node()->opcode());
- CHECK(mindex.right().Is(access.header_size - access.tag()));
-
- const int element_size_shift =
- ElementSizeLog2Of(access.machine_type.representation());
- if (element_size_shift) {
- Int32BinopMatcher shl(mindex.left().node());
- CHECK_EQ(IrOpcode::kWord32Shl, shl.node()->opcode());
- CHECK(shl.right().Is(element_size_shift));
- return shl.left().node();
+ return index->InputAt(0);
} else {
- return mindex.left().node();
+ Int32BinopMatcher mindex(index);
+ CHECK_EQ(IrOpcode::kInt32Add, mindex.node()->opcode());
+ CHECK(mindex.right().Is(access.header_size - access.tag()));
+
+ const int element_size_shift =
+ ElementSizeLog2Of(access.machine_type.representation());
+ if (element_size_shift) {
+ Int32BinopMatcher shl(mindex.left().node());
+ CHECK_EQ(IrOpcode::kWord32Shl, shl.node()->opcode());
+ CHECK(shl.right().Is(element_size_shift));
+ return shl.left().node();
+ } else {
+ return mindex.left().node();
+ }
}
}
@@ -1311,11 +1283,11 @@ const MachineType kMachineReps[] = {
TEST(LowerLoadField_to_load) {
- TestingGraph t(Type::Any(), Type::Signed32());
-
for (size_t i = 0; i < arraysize(kMachineReps); i++) {
- FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Handle<Name>::null(), Type::Any(), kMachineReps[i]};
+ TestingGraph t(Type::Any(), Type::Signed32());
+ FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+ Handle<Name>::null(), Type::Any(),
+ kMachineReps[i], kNoWriteBarrier};
Node* load = t.graph()->NewNode(t.simplified()->LoadField(access), t.p0,
t.start, t.start);
@@ -1337,9 +1309,9 @@ TEST(LowerStoreField_to_store) {
TestingGraph t(Type::Any(), Type::Signed32());
for (size_t i = 0; i < arraysize(kMachineReps); i++) {
- FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Handle<Name>::null(), Type::Any(), kMachineReps[i]};
-
+ FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+ Handle<Name>::null(), Type::Any(),
+ kMachineReps[i], kNoWriteBarrier};
Node* val = t.ExampleWithOutput(kMachineReps[i]);
Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
@@ -1352,7 +1324,7 @@ TEST(LowerStoreField_to_store) {
StoreRepresentation rep = StoreRepresentationOf(store->op());
if (kMachineReps[i].representation() == MachineRepresentation::kTagged) {
- CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
+ CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
}
CHECK_EQ(kMachineReps[i].representation(), rep.representation());
}
@@ -1362,9 +1334,9 @@ TEST(LowerStoreField_to_store) {
Zone* z = scope.main_zone();
TestingGraph t(Type::Any(), Type::Intersect(Type::SignedSmall(),
Type::TaggedSigned(), z));
- FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Handle<Name>::null(), Type::Any(),
- MachineType::AnyTagged()};
+ FieldAccess access = {
+ kTaggedBase, FixedArrayBase::kHeaderSize, Handle<Name>::null(),
+ Type::Any(), MachineType::AnyTagged(), kNoWriteBarrier};
Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
t.p1, t.start, t.start);
t.Effect(store);
@@ -1378,11 +1350,10 @@ TEST(LowerStoreField_to_store) {
TEST(LowerLoadElement_to_load) {
- TestingGraph t(Type::Any(), Type::Signed32());
-
for (size_t i = 0; i < arraysize(kMachineReps); i++) {
+ TestingGraph t(Type::Any(), Type::Signed32());
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Type::Any(), kMachineReps[i]};
+ Type::Any(), kMachineReps[i], kNoWriteBarrier};
Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
t.p1, t.start, t.start);
@@ -1401,11 +1372,11 @@ TEST(LowerLoadElement_to_load) {
TEST(LowerStoreElement_to_store) {
{
- TestingGraph t(Type::Any(), Type::Signed32());
-
for (size_t i = 0; i < arraysize(kMachineReps); i++) {
+ TestingGraph t(Type::Any(), Type::Signed32());
+
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Type::Any(), kMachineReps[i]};
+ Type::Any(), kMachineReps[i], kNoWriteBarrier};
Node* val = t.ExampleWithOutput(kMachineReps[i]);
Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access),
@@ -1418,7 +1389,7 @@ TEST(LowerStoreElement_to_store) {
StoreRepresentation rep = StoreRepresentationOf(store->op());
if (kMachineReps[i].representation() == MachineRepresentation::kTagged) {
- CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
+ CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
}
CHECK_EQ(kMachineReps[i].representation(), rep.representation());
}
@@ -1430,7 +1401,8 @@ TEST(LowerStoreElement_to_store) {
Type::Any(), Type::Signed32(),
Type::Intersect(Type::SignedSmall(), Type::TaggedSigned(), z));
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Type::Any(), MachineType::AnyTagged()};
+ Type::Any(), MachineType::AnyTagged(),
+ kNoWriteBarrier};
Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
t.p1, t.p2, t.start, t.start);
t.Effect(store);
@@ -1448,7 +1420,7 @@ TEST(InsertChangeForLoadElementIndex) {
// Load(obj, Int32Add(Int32Mul(ChangeTaggedToInt32(index), #k), #k))
TestingGraph t(Type::Any(), Type::Signed32());
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
- MachineType::AnyTagged()};
+ MachineType::AnyTagged(), kNoWriteBarrier};
Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
t.p1, t.start, t.start);
@@ -1465,7 +1437,7 @@ TEST(InsertChangeForStoreElementIndex) {
// Store(obj, Int32Add(Int32Mul(ChangeTaggedToInt32(index), #k), #k), val)
TestingGraph t(Type::Any(), Type::Signed32());
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
- MachineType::AnyTagged()};
+ MachineType::AnyTagged(), kFullWriteBarrier};
Node* store =
t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0, t.p1,
@@ -1481,8 +1453,9 @@ TEST(InsertChangeForStoreElementIndex) {
TEST(InsertChangeForLoadElement) {
// TODO(titzer): test all load/store representation change insertions.
TestingGraph t(Type::Any(), Type::Signed32(), Type::Any());
- ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
- MachineType::Float64()};
+ ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+ Type::Number(), MachineType::Float64(),
+ kNoWriteBarrier};
Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
t.p1, t.start, t.start);
@@ -1497,9 +1470,9 @@ TEST(InsertChangeForLoadElement) {
TEST(InsertChangeForLoadField) {
// TODO(titzer): test all load/store representation change insertions.
TestingGraph t(Type::Any(), Type::Signed32());
- FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Handle<Name>::null(), Type::Any(),
- MachineType::Float64()};
+ FieldAccess access = {
+ kTaggedBase, FixedArrayBase::kHeaderSize, Handle<Name>::null(),
+ Type::Number(), MachineType::Float64(), kNoWriteBarrier};
Node* load = t.graph()->NewNode(t.simplified()->LoadField(access), t.p0,
t.start, t.start);
@@ -1515,7 +1488,7 @@ TEST(InsertChangeForStoreElement) {
// TODO(titzer): test all load/store representation change insertions.
TestingGraph t(Type::Any(), Type::Signed32());
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
- MachineType::Float64()};
+ MachineType::Float64(), kFullWriteBarrier};
Node* store =
t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
@@ -1532,9 +1505,9 @@ TEST(InsertChangeForStoreElement) {
TEST(InsertChangeForStoreField) {
// TODO(titzer): test all load/store representation change insertions.
TestingGraph t(Type::Any(), Type::Signed32());
- FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Handle<Name>::null(), Type::Any(),
- MachineType::Float64()};
+ FieldAccess access = {
+ kTaggedBase, FixedArrayBase::kHeaderSize, Handle<Name>::null(),
+ Type::Any(), MachineType::Float64(), kNoWriteBarrier};
Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
t.p1, t.start, t.start);
@@ -1554,8 +1527,9 @@ TEST(UpdatePhi) {
Type* kTypes[] = {Type::Signed32(), Type::Unsigned32(), Type::Number()};
for (size_t i = 0; i < arraysize(kMachineTypes); i++) {
- FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Handle<Name>::null(), kTypes[i], kMachineTypes[i]};
+ FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+ Handle<Name>::null(), kTypes[i],
+ kMachineTypes[i], kFullWriteBarrier};
Node* load0 = t.graph()->NewNode(t.simplified()->LoadField(access), t.p0,
t.start, t.start);
@@ -1573,84 +1547,6 @@ TEST(UpdatePhi) {
}
-TEST(RunNumberDivide_minus_1_TruncatingToInt32) {
- SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
- Node* num = t.NumberToInt32(t.Parameter(0));
- Node* div = t.NumberDivide(num, t.jsgraph.Constant(-1));
- Node* trunc = t.NumberToInt32(div);
- t.Return(trunc);
-
- t.LowerAllNodesAndLowerChanges();
- t.GenerateCode();
-
- FOR_INT32_INPUTS(i) {
- int32_t x = 0 - *i;
- t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
- }
-}
-
-
-TEST(RunNumberMultiply_TruncatingToInt32) {
- int32_t constants[] = {-100, -10, -1, 0, 1, 100, 1000, 3000999};
-
- for (size_t i = 0; i < arraysize(constants); i++) {
- double k = static_cast<double>(constants[i]);
- SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
- Node* num = t.NumberToInt32(t.Parameter(0));
- Node* mul = t.NumberMultiply(num, t.jsgraph.Constant(k));
- Node* trunc = t.NumberToInt32(mul);
- t.Return(trunc);
-
- t.LowerAllNodesAndLowerChanges();
- t.GenerateCode();
-
- FOR_INT32_INPUTS(i) {
- int32_t x = DoubleToInt32(static_cast<double>(*i) * k);
- t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
- }
- }
-}
-
-
-TEST(RunNumberMultiply_TruncatingToUint32) {
- uint32_t constants[] = {0, 1, 2, 3, 4, 100, 1000, 1024, 2048, 3000999};
-
- for (size_t i = 0; i < arraysize(constants); i++) {
- double k = static_cast<double>(constants[i]);
- SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
- Node* num = t.NumberToUint32(t.Parameter(0));
- Node* mul = t.NumberMultiply(num, t.jsgraph.Constant(k));
- Node* trunc = t.NumberToUint32(mul);
- t.Return(trunc);
-
- t.LowerAllNodesAndLowerChanges();
- t.GenerateCode();
-
- FOR_UINT32_INPUTS(i) {
- uint32_t x = DoubleToUint32(static_cast<double>(*i) * k);
- t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
- }
- }
-}
-
-
-TEST(RunNumberDivide_2_TruncatingToUint32) {
- SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
- Node* num = t.NumberToUint32(t.Parameter(0));
- Node* div = t.NumberDivide(num, t.jsgraph.Constant(2));
- Node* trunc = t.NumberToUint32(div);
- t.Return(trunc);
-
- t.LowerAllNodesAndLowerChanges();
- t.GenerateCode();
-
- FOR_UINT32_INPUTS(i) {
- uint32_t x = DoubleToUint32(static_cast<double>(*i / 2.0));
- t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
- }
-}
-
-
TEST(NumberMultiply_ConstantOutOfRange) {
TestingGraph t(Type::Signed32());
Node* k = t.jsgraph.Constant(1000000023);
@@ -1690,29 +1586,6 @@ TEST(NumberDivide_TruncatingToInt32) {
}
-TEST(RunNumberDivide_TruncatingToInt32) {
- int32_t constants[] = {-100, -10, -1, 1, 2, 100, 1000, 1024, 2048};
-
- for (size_t i = 0; i < arraysize(constants); i++) {
- int32_t k = constants[i];
- SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
- Node* num = t.NumberToInt32(t.Parameter(0));
- Node* div = t.NumberDivide(num, t.jsgraph.Constant(k));
- Node* trunc = t.NumberToInt32(div);
- t.Return(trunc);
-
- t.LowerAllNodesAndLowerChanges();
- t.GenerateCode();
-
- FOR_INT32_INPUTS(i) {
- if (*i == INT_MAX) continue; // exclude max int.
- int32_t x = DoubleToInt32(static_cast<double>(*i) / k);
- t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
- }
- }
-}
-
-
TEST(NumberDivide_TruncatingToUint32) {
double constants[] = {1, 3, 100, 1000, 100998348};
@@ -1729,28 +1602,6 @@ TEST(NumberDivide_TruncatingToUint32) {
}
-TEST(RunNumberDivide_TruncatingToUint32) {
- uint32_t constants[] = {100, 10, 1, 1, 2, 4, 1000, 1024, 2048};
-
- for (size_t i = 0; i < arraysize(constants); i++) {
- uint32_t k = constants[i];
- SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
- Node* num = t.NumberToUint32(t.Parameter(0));
- Node* div = t.NumberDivide(num, t.jsgraph.Constant(static_cast<double>(k)));
- Node* trunc = t.NumberToUint32(div);
- t.Return(trunc);
-
- t.LowerAllNodesAndLowerChanges();
- t.GenerateCode();
-
- FOR_UINT32_INPUTS(i) {
- uint32_t x = *i / k;
- t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
- }
- }
-}
-
-
TEST(NumberDivide_BadConstants) {
{
TestingGraph t(Type::Signed32());
@@ -1805,29 +1656,6 @@ TEST(NumberModulus_TruncatingToInt32) {
}
-TEST(RunNumberModulus_TruncatingToInt32) {
- int32_t constants[] = {-100, -10, -1, 1, 2, 100, 1000, 1024, 2048};
-
- for (size_t i = 0; i < arraysize(constants); i++) {
- int32_t k = constants[i];
- SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
- Node* num = t.NumberToInt32(t.Parameter(0));
- Node* mod = t.NumberModulus(num, t.jsgraph.Constant(k));
- Node* trunc = t.NumberToInt32(mod);
- t.Return(trunc);
-
- t.LowerAllNodesAndLowerChanges();
- t.GenerateCode();
-
- FOR_INT32_INPUTS(i) {
- if (*i == INT_MAX) continue; // exclude max int.
- int32_t x = DoubleToInt32(std::fmod(static_cast<double>(*i), k));
- t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
- }
- }
-}
-
-
TEST(NumberModulus_TruncatingToUint32) {
double constants[] = {1, 3, 100, 1000, 100998348};
@@ -1844,29 +1672,6 @@ TEST(NumberModulus_TruncatingToUint32) {
}
-TEST(RunNumberModulus_TruncatingToUint32) {
- uint32_t constants[] = {1, 2, 100, 1000, 1024, 2048};
-
- for (size_t i = 0; i < arraysize(constants); i++) {
- uint32_t k = constants[i];
- SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
- Node* num = t.NumberToUint32(t.Parameter(0));
- Node* mod =
- t.NumberModulus(num, t.jsgraph.Constant(static_cast<double>(k)));
- Node* trunc = t.NumberToUint32(mod);
- t.Return(trunc);
-
- t.LowerAllNodesAndLowerChanges();
- t.GenerateCode();
-
- FOR_UINT32_INPUTS(i) {
- uint32_t x = *i % k;
- t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
- }
- }
-}
-
-
TEST(NumberModulus_Int32) {
int32_t constants[] = {-100, -10, 1, 4, 100, 1000};
diff --git a/deps/v8/test/cctest/compiler/value-helper.h b/deps/v8/test/cctest/compiler/value-helper.h
index 7d26dbaf0c..297bccb05b 100644
--- a/deps/v8/test/cctest/compiler/value-helper.h
+++ b/deps/v8/test/cctest/compiler/value-helper.h
@@ -82,6 +82,9 @@ class ValueHelper {
-4.66622e+11f,
-2.22581e+11f,
-1.45381e+10f,
+ -2147483904.0f, // First float32 after INT32_MIN
+ -2147483648.0f, // INT32_MIN
+ -2147483520.0f, // Last float32 before INT32_MIN
-1.3956e+09f,
-1.32951e+09f,
-1.30721e+09f,
@@ -110,7 +113,9 @@ class ValueHelper {
-3.63759e-10f,
-4.30175e-14f,
-5.27385e-15f,
+ -1.5707963267948966f,
-1.48084e-15f,
+ -2.220446049250313e-16f,
-1.05755e-19f,
-3.2995e-21f,
-1.67354e-23f,
@@ -129,6 +134,7 @@ class ValueHelper {
6.25073e-22f,
4.1723e-13f,
1.44343e-09f,
+ 1.5707963267948966f,
5.27004e-08f,
9.48298e-08f,
5.57888e-07f,
@@ -148,11 +154,13 @@ class ValueHelper {
20309.0f,
797056.0f,
1.77219e+09f,
+ 2147483648.0f, // INT32_MAX + 1
+ 4294967296.0f, // UINT32_MAX + 1
1.51116e+11f,
4.18193e+13f,
3.59167e+16f,
- 9223372036854775807.0f, // INT64_MAX
- 18446744073709551615.0f, // UINT64_MAX
+ 9223372036854775808.0f, // INT64_MAX + 1
+ 18446744073709551616.0f, // UINT64_MAX + 1
3.38211e+19f,
2.67488e+20f,
1.78831e+21f,
@@ -177,6 +185,7 @@ class ValueHelper {
static std::vector<double> float64_vector() {
static const double nan = std::numeric_limits<double>::quiet_NaN();
static const double values[] = {-2e66,
+ -2.220446049250313e-16,
-9223373136366403584.0,
-9223372036854775808.0, // INT64_MIN
-2147483649.5,
@@ -188,6 +197,7 @@ class ValueHelper {
-999.75,
-2e66,
-1.75,
+ -1.5707963267948966,
-1.0,
-0.5,
-0.0,
@@ -198,7 +208,11 @@ class ValueHelper {
0.375,
0.5,
1.0,
+ 1.17549e-38,
+ 1.56657e-37,
+ 1.0000001,
1.25,
+ 1.5707963267948966,
2,
3.1e7,
5.125,
@@ -211,9 +225,9 @@ class ValueHelper {
2147483648.0,
2147483648.25,
2147483649.25,
- 9223372036854775807.0, // INT64_MAX
+ 9223372036854775808.0, // INT64_MAX + 1
9223373136366403584.0,
- 18446744073709551615.0, // UINT64_MAX
+ 18446744073709551616.0, // UINT64_MAX + 1
2e66,
V8_INFINITY,
-V8_INFINITY,
@@ -318,6 +332,7 @@ static inline void CheckFloatEq(volatile float x, volatile float y) {
CHECK(std::isnan(y));
} else {
CHECK_EQ(x, y);
+ CHECK_EQ(std::signbit(x), std::signbit(y));
}
}
@@ -332,6 +347,7 @@ static inline void CheckDoubleEq(volatile double x, volatile double y) {
CHECK(std::isnan(y));
} else {
CHECK_EQ(x, y);
+ CHECK_EQ(std::signbit(x), std::signbit(y));
}
}
diff --git a/deps/v8/test/cctest/expression-type-collector.cc b/deps/v8/test/cctest/expression-type-collector.cc
deleted file mode 100644
index c5218b3ec4..0000000000
--- a/deps/v8/test/cctest/expression-type-collector.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "test/cctest/expression-type-collector.h"
-
-#include "src/ast/ast.h"
-#include "src/ast/scopes.h"
-#include "src/codegen.h"
-
-namespace v8 {
-namespace internal {
-namespace {
-
-struct {
- AstNode::NodeType type;
- const char* name;
-} NodeTypeNameList[] = {
-#define DECLARE_VISIT(type) \
- { AstNode::k##type, #type } \
- ,
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-};
-
-} // namespace
-
-
-ExpressionTypeCollector::ExpressionTypeCollector(
- Isolate* isolate, FunctionLiteral* root,
- ZoneVector<ExpressionTypeEntry>* dst)
- : AstExpressionVisitor(isolate, root), result_(dst) {}
-
-
-void ExpressionTypeCollector::Run() {
- result_->clear();
- AstExpressionVisitor::Run();
-}
-
-
-void ExpressionTypeCollector::VisitExpression(Expression* expression) {
- ExpressionTypeEntry e;
- e.depth = depth();
- VariableProxy* proxy = expression->AsVariableProxy();
- if (proxy) {
- e.name = proxy->raw_name();
- }
- e.bounds = expression->bounds();
- AstNode::NodeType type = expression->node_type();
- e.kind = "unknown";
- for (size_t i = 0; i < arraysize(NodeTypeNameList); ++i) {
- if (NodeTypeNameList[i].type == type) {
- e.kind = NodeTypeNameList[i].name;
- break;
- }
- }
- result_->push_back(e);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/cctest/expression-type-collector.h b/deps/v8/test/cctest/expression-type-collector.h
deleted file mode 100644
index 37bb9a3c02..0000000000
--- a/deps/v8/test/cctest/expression-type-collector.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_EXPRESSION_TYPE_COLLECTOR_H_
-#define V8_EXPRESSION_TYPE_COLLECTOR_H_
-
-#include "src/ast/ast-expression-visitor.h"
-
-namespace v8 {
-namespace internal {
-
-// A Visitor over an AST that collects a human readable string summarizing
-// structure and types. Used for testing of the typing information attached
-// to the expression nodes of an AST.
-
-struct ExpressionTypeEntry {
- int depth;
- const char* kind;
- const AstRawString* name;
- Bounds bounds;
-};
-
-class ExpressionTypeCollector : public AstExpressionVisitor {
- public:
- ExpressionTypeCollector(Isolate* isolate, FunctionLiteral* root,
- ZoneVector<ExpressionTypeEntry>* dst);
- void Run();
-
- protected:
- void VisitExpression(Expression* expression);
-
- private:
- ZoneVector<ExpressionTypeEntry>* result_;
-};
-} // namespace internal
-} // namespace v8
-
-#endif // V8_EXPRESSION_TYPE_COLLECTOR_H_
diff --git a/deps/v8/test/cctest/heap/heap-tester.h b/deps/v8/test/cctest/heap/heap-tester.h
index 599c5d8be2..674bdcb1cc 100644
--- a/deps/v8/test/cctest/heap/heap-tester.h
+++ b/deps/v8/test/cctest/heap/heap-tester.h
@@ -17,6 +17,7 @@
V(CompactionPartiallyAbortedPageWithStoreBufferEntries) \
V(CompactionSpaceDivideMultiplePages) \
V(CompactionSpaceDivideSinglePage) \
+ V(TestNewSpaceRefsInCopiedCode) \
V(GCFlags) \
V(MarkCompactCollector) \
V(NoPromotion) \
diff --git a/deps/v8/test/cctest/heap/utils-inl.h b/deps/v8/test/cctest/heap/heap-utils.cc
index 56033c151e..7d4d4bf40d 100644
--- a/deps/v8/test/cctest/heap/utils-inl.h
+++ b/deps/v8/test/cctest/heap/heap-utils.cc
@@ -1,9 +1,8 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
+// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef HEAP_UTILS_H_
-#define HEAP_UTILS_H_
+#include "test/cctest/heap/heap-utils.h"
#include "src/factory.h"
#include "src/heap/heap-inl.h"
@@ -11,18 +10,27 @@
#include "src/heap/mark-compact.h"
#include "src/isolate.h"
-
namespace v8 {
namespace internal {
+namespace heap {
+
+void SealCurrentObjects(Heap* heap) {
+ heap->CollectAllGarbage();
+ heap->CollectAllGarbage();
+ heap->mark_compact_collector()->EnsureSweepingCompleted();
+ heap->old_space()->EmptyAllocationInfo();
+ for (Page* page : *heap->old_space()) {
+ page->MarkNeverAllocateForTesting();
+ }
+}
-static int LenFromSize(int size) {
+int FixedArrayLenFromSize(int size) {
return (size - FixedArray::kHeaderSize) / kPointerSize;
}
-
-static inline std::vector<Handle<FixedArray>> CreatePadding(
- Heap* heap, int padding_size, PretenureFlag tenure,
- int object_size = Page::kMaxRegularHeapObjectSize) {
+std::vector<Handle<FixedArray>> CreatePadding(Heap* heap, int padding_size,
+ PretenureFlag tenure,
+ int object_size) {
std::vector<Handle<FixedArray>> handles;
Isolate* isolate = heap->isolate();
int allocate_memory;
@@ -42,14 +50,17 @@ static inline std::vector<Handle<FixedArray>> CreatePadding(
while (free_memory > 0) {
if (free_memory > object_size) {
allocate_memory = object_size;
- length = LenFromSize(allocate_memory);
+ length = FixedArrayLenFromSize(allocate_memory);
} else {
allocate_memory = free_memory;
- length = LenFromSize(allocate_memory);
+ length = FixedArrayLenFromSize(allocate_memory);
if (length <= 0) {
// Not enough room to create another fixed array. Let's create a filler.
- heap->CreateFillerObjectAt(*heap->old_space()->allocation_top_address(),
- free_memory, ClearRecordedSlots::kNo);
+ if (free_memory > (2 * kPointerSize)) {
+ heap->CreateFillerObjectAt(
+ *heap->old_space()->allocation_top_address(), free_memory,
+ ClearRecordedSlots::kNo);
+ }
break;
}
}
@@ -61,55 +72,46 @@ static inline std::vector<Handle<FixedArray>> CreatePadding(
return handles;
}
-
-// Helper function that simulates a full new-space in the heap.
-static inline bool FillUpOnePage(v8::internal::NewSpace* space) {
- space->DisableInlineAllocationSteps();
- int space_remaining = static_cast<int>(*space->allocation_limit_address() -
- *space->allocation_top_address());
- if (space_remaining == 0) return false;
- CreatePadding(space->heap(), space_remaining, i::NOT_TENURED);
- return true;
-}
-
-
-// Helper function that simulates a fill new-space in the heap.
-static inline void AllocateAllButNBytes(v8::internal::NewSpace* space,
- int extra_bytes) {
+void AllocateAllButNBytes(v8::internal::NewSpace* space, int extra_bytes,
+ std::vector<Handle<FixedArray>>* out_handles) {
space->DisableInlineAllocationSteps();
int space_remaining = static_cast<int>(*space->allocation_limit_address() -
*space->allocation_top_address());
CHECK(space_remaining >= extra_bytes);
int new_linear_size = space_remaining - extra_bytes;
if (new_linear_size == 0) return;
- CreatePadding(space->heap(), new_linear_size, i::NOT_TENURED);
+ std::vector<Handle<FixedArray>> handles =
+ heap::CreatePadding(space->heap(), new_linear_size, i::NOT_TENURED);
+ if (out_handles != nullptr)
+ out_handles->insert(out_handles->end(), handles.begin(), handles.end());
}
-
-static inline void FillCurrentPage(v8::internal::NewSpace* space) {
- AllocateAllButNBytes(space, 0);
+void FillCurrentPage(v8::internal::NewSpace* space,
+ std::vector<Handle<FixedArray>>* out_handles) {
+ heap::AllocateAllButNBytes(space, 0, out_handles);
}
-
-static inline void SimulateFullSpace(v8::internal::NewSpace* space) {
- FillCurrentPage(space);
- while (FillUpOnePage(space)) {
- }
+bool FillUpOnePage(v8::internal::NewSpace* space,
+ std::vector<Handle<FixedArray>>* out_handles) {
+ space->DisableInlineAllocationSteps();
+ int space_remaining = static_cast<int>(*space->allocation_limit_address() -
+ *space->allocation_top_address());
+ if (space_remaining == 0) return false;
+ std::vector<Handle<FixedArray>> handles =
+ heap::CreatePadding(space->heap(), space_remaining, i::NOT_TENURED);
+ if (out_handles != nullptr)
+ out_handles->insert(out_handles->end(), handles.begin(), handles.end());
+ return true;
}
-
-// Helper function that simulates a full old-space in the heap.
-static inline void SimulateFullSpace(v8::internal::PagedSpace* space) {
- space->EmptyAllocationInfo();
- space->ResetFreeList();
- space->ClearStats();
+void SimulateFullSpace(v8::internal::NewSpace* space,
+ std::vector<Handle<FixedArray>>* out_handles) {
+ heap::FillCurrentPage(space, out_handles);
+ while (heap::FillUpOnePage(space, out_handles) || space->AddFreshPage()) {
+ }
}
-
-// Helper function that simulates many incremental marking steps until
-// marking is completed.
-static inline void SimulateIncrementalMarking(i::Heap* heap,
- bool force_completion = true) {
+void SimulateIncrementalMarking(i::Heap* heap, bool force_completion) {
i::MarkCompactCollector* collector = heap->mark_compact_collector();
i::IncrementalMarking* marking = heap->incremental_marking();
if (collector->sweeping_in_progress()) {
@@ -131,7 +133,26 @@ static inline void SimulateIncrementalMarking(i::Heap* heap,
CHECK(marking->IsComplete());
}
+void SimulateFullSpace(v8::internal::PagedSpace* space) {
+ space->EmptyAllocationInfo();
+ space->ResetFreeList();
+ space->ClearStats();
+}
+
+void AbandonCurrentlyFreeMemory(PagedSpace* space) {
+ space->EmptyAllocationInfo();
+ for (Page* page : *space) {
+ page->MarkNeverAllocateForTesting();
+ }
+}
+
+void GcAndSweep(Heap* heap, AllocationSpace space) {
+ heap->CollectGarbage(space);
+ if (heap->mark_compact_collector()->sweeping_in_progress()) {
+ heap->mark_compact_collector()->EnsureSweepingCompleted();
+ }
+}
+
+} // namespace heap
} // namespace internal
} // namespace v8
-
-#endif // HEAP_UTILS_H_
diff --git a/deps/v8/test/cctest/heap/heap-utils.h b/deps/v8/test/cctest/heap/heap-utils.h
new file mode 100644
index 0000000000..e03e6fa6e0
--- /dev/null
+++ b/deps/v8/test/cctest/heap/heap-utils.h
@@ -0,0 +1,51 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef HEAP_HEAP_UTILS_H_
+#define HEAP_HEAP_UTILS_H_
+
+#include "src/heap/heap.h"
+
+namespace v8 {
+namespace internal {
+namespace heap {
+
+void SealCurrentObjects(Heap* heap);
+
+int FixedArrayLenFromSize(int size);
+
+std::vector<Handle<FixedArray>> CreatePadding(
+ Heap* heap, int padding_size, PretenureFlag tenure,
+ int object_size = Page::kMaxRegularHeapObjectSize);
+
+void AllocateAllButNBytes(
+ v8::internal::NewSpace* space, int extra_bytes,
+ std::vector<Handle<FixedArray>>* out_handles = nullptr);
+
+void FillCurrentPage(v8::internal::NewSpace* space,
+ std::vector<Handle<FixedArray>>* out_handles = nullptr);
+
+// Helper function that simulates a full new-space in the heap.
+bool FillUpOnePage(v8::internal::NewSpace* space,
+ std::vector<Handle<FixedArray>>* out_handles = nullptr);
+
+void SimulateFullSpace(v8::internal::NewSpace* space,
+ std::vector<Handle<FixedArray>>* out_handles = nullptr);
+
+// Helper function that simulates many incremental marking steps until
+// marking is completed.
+void SimulateIncrementalMarking(i::Heap* heap, bool force_completion = true);
+
+// Helper function that simulates a full old-space in the heap.
+void SimulateFullSpace(v8::internal::PagedSpace* space);
+
+void AbandonCurrentlyFreeMemory(PagedSpace* space);
+
+void GcAndSweep(Heap* heap, AllocationSpace space);
+
+} // namespace heap
+} // namespace internal
+} // namespace v8
+
+#endif // HEAP_HEAP_UTILS_H_
diff --git a/deps/v8/test/cctest/heap/test-alloc.cc b/deps/v8/test/cctest/heap/test-alloc.cc
index 1b969b21ff..348ba1979d 100644
--- a/deps/v8/test/cctest/heap/test-alloc.cc
+++ b/deps/v8/test/cctest/heap/test-alloc.cc
@@ -31,7 +31,7 @@
#include "src/accessors.h"
#include "src/api.h"
#include "test/cctest/heap/heap-tester.h"
-#include "test/cctest/heap/utils-inl.h"
+#include "test/cctest/heap/heap-utils.h"
using namespace v8::internal;
@@ -52,11 +52,11 @@ AllocationResult v8::internal::HeapTester::AllocateAfterFailures() {
heap->CopyJSObject(JSObject::cast(object)).ToObjectChecked();
// Old data space.
- SimulateFullSpace(heap->old_space());
+ heap::SimulateFullSpace(heap->old_space());
heap->AllocateByteArray(100, TENURED).ToObjectChecked();
// Old pointer space.
- SimulateFullSpace(heap->old_space());
+ heap::SimulateFullSpace(heap->old_space());
heap->AllocateFixedArray(10000, TENURED).ToObjectChecked();
// Large object space.
@@ -72,12 +72,12 @@ AllocationResult v8::internal::HeapTester::AllocateAfterFailures() {
kLargeObjectSpaceFillerLength, TENURED).ToObjectChecked();
// Map space.
- SimulateFullSpace(heap->map_space());
+ heap::SimulateFullSpace(heap->map_space());
int instance_size = JSObject::kHeaderSize;
heap->AllocateMap(JS_OBJECT_TYPE, instance_size).ToObjectChecked();
// Test that we can allocate in old pointer space and code space.
- SimulateFullSpace(heap->code_space());
+ heap::SimulateFullSpace(heap->code_space());
heap->AllocateFixedArray(100, TENURED).ToObjectChecked();
heap->CopyCode(CcTest::i_isolate()->builtins()->builtin(
Builtins::kIllegal)).ToObjectChecked();
@@ -102,7 +102,7 @@ HEAP_TEST(StressHandles) {
v8::Local<v8::Context> env = v8::Context::New(CcTest::isolate());
env->Enter();
Handle<Object> o = TestAllocateAfterFailures();
- CHECK(o->IsTrue());
+ CHECK(o->IsTrue(CcTest::i_isolate()));
env->Exit();
}
@@ -211,8 +211,7 @@ TEST(CodeRange) {
const size_t code_range_size = 32*MB;
CcTest::InitializeVM();
CodeRange code_range(reinterpret_cast<Isolate*>(CcTest::isolate()));
- code_range.SetUp(code_range_size +
- kReservedCodeRangePages * v8::base::OS::CommitPageSize());
+ code_range.SetUp(code_range_size);
size_t current_allocated = 0;
size_t total_allocated = 0;
List< ::Block> blocks(1000);
diff --git a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
new file mode 100644
index 0000000000..b331f6bf3a
--- /dev/null
+++ b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
@@ -0,0 +1,318 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/array-buffer-tracker.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/heap/heap-utils.h"
+
+namespace {
+
+typedef i::LocalArrayBufferTracker LocalTracker;
+
+bool IsTracked(i::JSArrayBuffer* buf) {
+ return i::ArrayBufferTracker::IsTracked(buf);
+}
+
+} // namespace
+
+namespace v8 {
+namespace internal {
+
+// The following tests make sure that JSArrayBuffer tracking works expected when
+// moving the objects through various spaces during GC phases.
+
+TEST(ArrayBuffer_OnlyMC) {
+ CcTest::InitializeVM();
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ Heap* heap = reinterpret_cast<Isolate*>(isolate)->heap();
+
+ JSArrayBuffer* raw_ab = nullptr;
+ {
+ v8::HandleScope handle_scope(isolate);
+ Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, 100);
+ Handle<JSArrayBuffer> buf = v8::Utils::OpenHandle(*ab);
+ CHECK(IsTracked(*buf));
+ heap::GcAndSweep(heap, OLD_SPACE);
+ CHECK(IsTracked(*buf));
+ heap::GcAndSweep(heap, OLD_SPACE);
+ CHECK(IsTracked(*buf));
+ raw_ab = *buf;
+ // Prohibit page from being released.
+ Page::FromAddress(buf->address())->MarkNeverEvacuate();
+ }
+ // 2 GCs are needed because we promote to old space as live, meaning that
+ // we will survive one GC.
+ heap::GcAndSweep(heap, OLD_SPACE);
+ heap::GcAndSweep(heap, OLD_SPACE);
+ CHECK(!IsTracked(raw_ab));
+}
+
+TEST(ArrayBuffer_OnlyScavenge) {
+ CcTest::InitializeVM();
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ Heap* heap = reinterpret_cast<Isolate*>(isolate)->heap();
+
+ JSArrayBuffer* raw_ab = nullptr;
+ {
+ v8::HandleScope handle_scope(isolate);
+ Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, 100);
+ Handle<JSArrayBuffer> buf = v8::Utils::OpenHandle(*ab);
+ CHECK(IsTracked(*buf));
+ heap::GcAndSweep(heap, NEW_SPACE);
+ CHECK(IsTracked(*buf));
+ heap::GcAndSweep(heap, NEW_SPACE);
+ CHECK(IsTracked(*buf));
+ heap::GcAndSweep(heap, NEW_SPACE);
+ CHECK(IsTracked(*buf));
+ raw_ab = *buf;
+ // Prohibit page from being released.
+ Page::FromAddress(buf->address())->MarkNeverEvacuate();
+ }
+ // 2 GCs are needed because we promote to old space as live, meaning that
+ // we will survive one GC.
+ heap::GcAndSweep(heap, OLD_SPACE);
+ heap::GcAndSweep(heap, OLD_SPACE);
+ CHECK(!IsTracked(raw_ab));
+}
+
+TEST(ArrayBuffer_ScavengeAndMC) {
+ CcTest::InitializeVM();
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ Heap* heap = reinterpret_cast<Isolate*>(isolate)->heap();
+
+ JSArrayBuffer* raw_ab = nullptr;
+ {
+ v8::HandleScope handle_scope(isolate);
+ Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, 100);
+ Handle<JSArrayBuffer> buf = v8::Utils::OpenHandle(*ab);
+ CHECK(IsTracked(*buf));
+ heap::GcAndSweep(heap, NEW_SPACE);
+ CHECK(IsTracked(*buf));
+ heap::GcAndSweep(heap, NEW_SPACE);
+ CHECK(IsTracked(*buf));
+ heap::GcAndSweep(heap, OLD_SPACE);
+ CHECK(IsTracked(*buf));
+ heap::GcAndSweep(heap, NEW_SPACE);
+ CHECK(IsTracked(*buf));
+ raw_ab = *buf;
+ // Prohibit page from being released.
+ Page::FromAddress(buf->address())->MarkNeverEvacuate();
+ }
+ // 2 GCs are needed because we promote to old space as live, meaning that
+ // we will survive one GC.
+ heap::GcAndSweep(heap, OLD_SPACE);
+ heap::GcAndSweep(heap, OLD_SPACE);
+ CHECK(!IsTracked(raw_ab));
+}
+
+TEST(ArrayBuffer_Compaction) {
+ FLAG_manual_evacuation_candidates_selection = true;
+ CcTest::InitializeVM();
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ Heap* heap = reinterpret_cast<Isolate*>(isolate)->heap();
+ heap::AbandonCurrentlyFreeMemory(heap->old_space());
+
+ v8::HandleScope handle_scope(isolate);
+ Local<v8::ArrayBuffer> ab1 = v8::ArrayBuffer::New(isolate, 100);
+ Handle<JSArrayBuffer> buf1 = v8::Utils::OpenHandle(*ab1);
+ CHECK(IsTracked(*buf1));
+ heap::GcAndSweep(heap, NEW_SPACE);
+ heap::GcAndSweep(heap, NEW_SPACE);
+
+ Page* page_before_gc = Page::FromAddress(buf1->address());
+ page_before_gc->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
+ CHECK(IsTracked(*buf1));
+
+ heap->CollectAllGarbage();
+
+ Page* page_after_gc = Page::FromAddress(buf1->address());
+ CHECK(IsTracked(*buf1));
+
+ CHECK_NE(page_before_gc, page_after_gc);
+}
+
+TEST(ArrayBuffer_UnregisterDuringSweep) {
+// Regular pages in old space (without compaction) are processed concurrently
+// in the sweeper. If we happen to unregister a buffer (either explicitly, or
+// implicitly through e.g. |Externalize|) we need to sync with the sweeper
+// task.
+//
+// Note: This test will will only fail on TSAN configurations.
+
+// Disable verify-heap since it forces sweeping to be completed in the
+// epilogue of the GC.
+#ifdef VERIFY_HEAP
+ i::FLAG_verify_heap = false;
+#endif // VERIFY_HEAP
+
+ CcTest::InitializeVM();
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ Heap* heap = reinterpret_cast<Isolate*>(isolate)->heap();
+ {
+ v8::HandleScope handle_scope(isolate);
+ Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, 100);
+ Handle<JSArrayBuffer> buf = v8::Utils::OpenHandle(*ab);
+
+ {
+ v8::HandleScope handle_scope(isolate);
+ // Allocate another buffer on the same page to force processing a
+ // non-empty set of buffers in the last GC.
+ Local<v8::ArrayBuffer> ab2 = v8::ArrayBuffer::New(isolate, 100);
+ Handle<JSArrayBuffer> buf2 = v8::Utils::OpenHandle(*ab2);
+ CHECK(IsTracked(*buf));
+ CHECK(IsTracked(*buf));
+ heap::GcAndSweep(heap, NEW_SPACE);
+ CHECK(IsTracked(*buf));
+ CHECK(IsTracked(*buf));
+ heap::GcAndSweep(heap, NEW_SPACE);
+ CHECK(IsTracked(*buf));
+ CHECK(IsTracked(*buf2));
+ }
+
+ heap->CollectGarbage(OLD_SPACE);
+ // |Externalize| will cause the buffer to be |Unregister|ed. Without
+ // barriers and proper synchronization this will trigger a data race on
+ // TSAN.
+ v8::ArrayBuffer::Contents contents = ab->Externalize();
+ heap->isolate()->array_buffer_allocator()->Free(contents.Data(),
+ contents.ByteLength());
+ }
+}
+
+TEST(ArrayBuffer_NonLivePromotion) {
+ // The test verifies that the marking state is preserved when promoting
+ // a buffer to old space.
+ CcTest::InitializeVM();
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ Heap* heap = reinterpret_cast<Isolate*>(isolate)->heap();
+
+ JSArrayBuffer* raw_ab = nullptr;
+ {
+ v8::HandleScope handle_scope(isolate);
+ Handle<FixedArray> root =
+ heap->isolate()->factory()->NewFixedArray(1, TENURED);
+ {
+ v8::HandleScope handle_scope(isolate);
+ Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, 100);
+ Handle<JSArrayBuffer> buf = v8::Utils::OpenHandle(*ab);
+ root->set(0, *buf); // Buffer that should not be promoted as live.
+ }
+ heap::SimulateIncrementalMarking(heap, false);
+ CHECK(IsTracked(JSArrayBuffer::cast(root->get(0))));
+ heap::GcAndSweep(heap, NEW_SPACE);
+ CHECK(IsTracked(JSArrayBuffer::cast(root->get(0))));
+ heap::GcAndSweep(heap, NEW_SPACE);
+ CHECK(IsTracked(JSArrayBuffer::cast(root->get(0))));
+ raw_ab = JSArrayBuffer::cast(root->get(0));
+ root->set(0, heap->undefined_value());
+ heap::SimulateIncrementalMarking(heap, true);
+ // Prohibit page from being released.
+ Page::FromAddress(raw_ab->address())->MarkNeverEvacuate();
+ heap::GcAndSweep(heap, OLD_SPACE);
+ CHECK(!IsTracked(raw_ab));
+ }
+}
+
+TEST(ArrayBuffer_LivePromotion) {
+ // The test verifies that the marking state is preserved when promoting
+ // a buffer to old space.
+ CcTest::InitializeVM();
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ Heap* heap = reinterpret_cast<Isolate*>(isolate)->heap();
+
+ JSArrayBuffer* raw_ab = nullptr;
+ {
+ v8::HandleScope handle_scope(isolate);
+ Handle<FixedArray> root =
+ heap->isolate()->factory()->NewFixedArray(1, TENURED);
+ {
+ v8::HandleScope handle_scope(isolate);
+ Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, 100);
+ Handle<JSArrayBuffer> buf = v8::Utils::OpenHandle(*ab);
+ root->set(0, *buf); // Buffer that should be promoted as live.
+ }
+ heap::SimulateIncrementalMarking(heap, true);
+ CHECK(IsTracked(JSArrayBuffer::cast(root->get(0))));
+ heap::GcAndSweep(heap, NEW_SPACE);
+ CHECK(IsTracked(JSArrayBuffer::cast(root->get(0))));
+ heap::GcAndSweep(heap, NEW_SPACE);
+ CHECK(IsTracked(JSArrayBuffer::cast(root->get(0))));
+ raw_ab = JSArrayBuffer::cast(root->get(0));
+ root->set(0, heap->undefined_value());
+ // Prohibit page from being released.
+ Page::FromAddress(raw_ab->address())->MarkNeverEvacuate();
+ heap::GcAndSweep(heap, OLD_SPACE);
+ CHECK(IsTracked(raw_ab));
+ }
+}
+
+TEST(ArrayBuffer_SemiSpaceCopyThenPagePromotion) {
+ // The test verifies that the marking state is preserved across semispace
+ // copy.
+ CcTest::InitializeVM();
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ Heap* heap = reinterpret_cast<Isolate*>(isolate)->heap();
+
+ heap::SealCurrentObjects(heap);
+ {
+ v8::HandleScope handle_scope(isolate);
+ Handle<FixedArray> root =
+ heap->isolate()->factory()->NewFixedArray(1, TENURED);
+ {
+ v8::HandleScope handle_scope(isolate);
+ Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, 100);
+ Handle<JSArrayBuffer> buf = v8::Utils::OpenHandle(*ab);
+ root->set(0, *buf); // Buffer that should be promoted as live.
+ Page::FromAddress(buf->address())->MarkNeverEvacuate();
+ }
+ std::vector<Handle<FixedArray>> handles;
+ // Make the whole page transition from new->old, getting the buffers
+ // processed in the sweeper (relying on marking information) instead of
+ // processing during newspace evacuation.
+ heap::FillCurrentPage(heap->new_space(), &handles);
+ CHECK(IsTracked(JSArrayBuffer::cast(root->get(0))));
+ heap::GcAndSweep(heap, NEW_SPACE);
+ heap::SimulateIncrementalMarking(heap, true);
+ heap::GcAndSweep(heap, OLD_SPACE);
+ CHECK(IsTracked(JSArrayBuffer::cast(root->get(0))));
+ }
+}
+
+UNINITIALIZED_TEST(ArrayBuffer_SemiSpaceCopyMultipleTasks) {
+ if (FLAG_optimize_for_size) return;
+ // Test allocates JSArrayBuffer on different pages before triggering a
+ // full GC that performs the semispace copy. If parallelized, this test
+ // ensures proper synchronization in TSAN configurations.
+ FLAG_min_semi_space_size = 2 * Page::kPageSize / MB;
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ {
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ v8::Context::New(isolate)->Enter();
+ Heap* heap = i_isolate->heap();
+
+ Local<v8::ArrayBuffer> ab1 = v8::ArrayBuffer::New(isolate, 100);
+ Handle<JSArrayBuffer> buf1 = v8::Utils::OpenHandle(*ab1);
+ heap::FillCurrentPage(heap->new_space());
+ Local<v8::ArrayBuffer> ab2 = v8::ArrayBuffer::New(isolate, 100);
+ Handle<JSArrayBuffer> buf2 = v8::Utils::OpenHandle(*ab2);
+ CHECK_NE(Page::FromAddress(buf1->address()),
+ Page::FromAddress(buf2->address()));
+ heap::GcAndSweep(heap, OLD_SPACE);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/heap/test-compaction.cc b/deps/v8/test/cctest/heap/test-compaction.cc
index 0feee5fc46..f61f7e1c41 100644
--- a/deps/v8/test/cctest/heap/test-compaction.cc
+++ b/deps/v8/test/cctest/heap/test-compaction.cc
@@ -4,12 +4,14 @@
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
-#include "test/cctest/heap/utils-inl.h"
+#include "test/cctest/heap/heap-utils.h"
namespace v8 {
namespace internal {
-static void CheckInvariantsOfAbortedPage(Page* page) {
+namespace {
+
+void CheckInvariantsOfAbortedPage(Page* page) {
// Check invariants:
// 1) Markbits are cleared
// 2) The page is not marked as evacuation candidate anymore
@@ -19,6 +21,14 @@ static void CheckInvariantsOfAbortedPage(Page* page) {
CHECK(!page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
}
+void CheckAllObjectsOnPage(std::vector<Handle<FixedArray>>& handles,
+ Page* page) {
+ for (auto& fixed_array : handles) {
+ CHECK(Page::FromAddress(fixed_array->address()) == page);
+ }
+}
+
+} // namespace
HEAP_TEST(CompactionFullAbortedPage) {
// Test the scenario where we reach OOM during compaction and the whole page
@@ -33,23 +43,23 @@ HEAP_TEST(CompactionFullAbortedPage) {
Heap* heap = isolate->heap();
{
HandleScope scope1(isolate);
- PageIterator it(heap->old_space());
- while (it.has_next()) {
- it.next()->MarkNeverAllocateForTesting();
- }
+
+ heap::SealCurrentObjects(heap);
{
HandleScope scope2(isolate);
CHECK(heap->old_space()->Expand());
auto compaction_page_handles =
- CreatePadding(heap, Page::kAllocatableMemory, TENURED);
+ heap::CreatePadding(heap, Page::kAllocatableMemory, TENURED);
Page* to_be_aborted_page =
Page::FromAddress(compaction_page_handles.front()->address());
to_be_aborted_page->SetFlag(
MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
+ CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
heap->set_force_oom(true);
heap->CollectAllGarbage();
+ heap->mark_compact_collector()->EnsureSweepingCompleted();
// Check that all handles still point to the same page, i.e., compaction
// has been aborted on the page.
@@ -71,29 +81,29 @@ HEAP_TEST(CompactionPartiallyAbortedPage) {
FLAG_concurrent_sweeping = false;
FLAG_manual_evacuation_candidates_selection = true;
- const int object_size = 128 * KB;
+ const int objects_per_page = 10;
+ const int object_size = Page::kAllocatableMemory / objects_per_page;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
{
HandleScope scope1(isolate);
- PageIterator it(heap->old_space());
- while (it.has_next()) {
- it.next()->MarkNeverAllocateForTesting();
- }
+
+ heap::SealCurrentObjects(heap);
{
HandleScope scope2(isolate);
// Fill another page with objects of size {object_size} (last one is
// properly adjusted).
CHECK(heap->old_space()->Expand());
- auto compaction_page_handles =
- CreatePadding(heap, Page::kAllocatableMemory, TENURED, object_size);
+ auto compaction_page_handles = heap::CreatePadding(
+ heap, Page::kAllocatableMemory, TENURED, object_size);
Page* to_be_aborted_page =
Page::FromAddress(compaction_page_handles.front()->address());
to_be_aborted_page->SetFlag(
MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
+ CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
{
// Add another page that is filled with {num_objects} objects of size
@@ -101,13 +111,15 @@ HEAP_TEST(CompactionPartiallyAbortedPage) {
HandleScope scope3(isolate);
CHECK(heap->old_space()->Expand());
const int num_objects = 3;
- std::vector<Handle<FixedArray>> page_to_fill_handles = CreatePadding(
- heap, object_size * num_objects, TENURED, object_size);
+ std::vector<Handle<FixedArray>> page_to_fill_handles =
+ heap::CreatePadding(heap, object_size * num_objects, TENURED,
+ object_size);
Page* page_to_fill =
Page::FromAddress(page_to_fill_handles.front()->address());
heap->set_force_oom(true);
heap->CollectAllGarbage();
+ heap->mark_compact_collector()->EnsureSweepingCompleted();
bool migration_aborted = false;
for (Handle<FixedArray> object : compaction_page_handles) {
@@ -143,7 +155,8 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
FLAG_concurrent_sweeping = false;
FLAG_manual_evacuation_candidates_selection = true;
- const int object_size = 128 * KB;
+ const int objects_per_page = 10;
+ const int object_size = Page::kAllocatableMemory / objects_per_page;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -153,10 +166,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
Handle<FixedArray> root_array =
isolate->factory()->NewFixedArray(10, TENURED);
- PageIterator it(heap->old_space());
- while (it.has_next()) {
- it.next()->MarkNeverAllocateForTesting();
- }
+ heap::SealCurrentObjects(heap);
Page* to_be_aborted_page = nullptr;
{
@@ -165,7 +175,8 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
// properly adjusted).
CHECK(heap->old_space()->Expand());
std::vector<Handle<FixedArray>> compaction_page_handles =
- CreatePadding(heap, Page::kAllocatableMemory, TENURED, object_size);
+ heap::CreatePadding(heap, Page::kAllocatableMemory, TENURED,
+ object_size);
to_be_aborted_page =
Page::FromAddress(compaction_page_handles.front()->address());
to_be_aborted_page->SetFlag(
@@ -174,8 +185,8 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
compaction_page_handles[i]->set(0, *compaction_page_handles[i - 1]);
}
root_array->set(0, *compaction_page_handles.back());
+ CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
}
-
{
// Add another page that is filled with {num_objects} objects of size
// {object_size}.
@@ -184,12 +195,13 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
const int num_objects = 2;
int used_memory = object_size * num_objects;
std::vector<Handle<FixedArray>> page_to_fill_handles =
- CreatePadding(heap, used_memory, TENURED, object_size);
+ heap::CreatePadding(heap, used_memory, TENURED, object_size);
Page* page_to_fill =
Page::FromAddress(page_to_fill_handles.front()->address());
heap->set_force_oom(true);
heap->CollectAllGarbage();
+ heap->mark_compact_collector()->EnsureSweepingCompleted();
// The following check makes sure that we compacted "some" objects, while
// leaving others in place.
@@ -230,7 +242,8 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
FLAG_concurrent_sweeping = false;
FLAG_manual_evacuation_candidates_selection = true;
- const int object_size = 128 * KB;
+ const int objects_per_page = 10;
+ const int object_size = Page::kAllocatableMemory / objects_per_page;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -239,10 +252,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
HandleScope scope1(isolate);
Handle<FixedArray> root_array =
isolate->factory()->NewFixedArray(10, TENURED);
- PageIterator it(heap->old_space());
- while (it.has_next()) {
- it.next()->MarkNeverAllocateForTesting();
- }
+ heap::SealCurrentObjects(heap);
Page* to_be_aborted_page = nullptr;
{
@@ -250,8 +260,8 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
// Fill another page with objects of size {object_size} (last one is
// properly adjusted).
CHECK(heap->old_space()->Expand());
- auto compaction_page_handles =
- CreatePadding(heap, Page::kAllocatableMemory, TENURED, object_size);
+ auto compaction_page_handles = heap::CreatePadding(
+ heap, Page::kAllocatableMemory, TENURED, object_size);
// Sanity check that we have enough space for linking up arrays.
CHECK_GE(compaction_page_handles.front()->length(), 2);
to_be_aborted_page =
@@ -267,6 +277,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
isolate->factory()->NewFixedArray(1, NOT_TENURED);
CHECK(heap->InNewSpace(*new_space_array));
compaction_page_handles.front()->set(1, *new_space_array);
+ CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
}
{
@@ -277,12 +288,13 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
const int num_objects = 2;
int used_memory = object_size * num_objects;
std::vector<Handle<FixedArray>> page_to_fill_handles =
- CreatePadding(heap, used_memory, TENURED, object_size);
+ heap::CreatePadding(heap, used_memory, TENURED, object_size);
Page* page_to_fill =
Page::FromAddress(page_to_fill_handles.front()->address());
heap->set_force_oom(true);
heap->CollectAllGarbage();
+ heap->mark_compact_collector()->EnsureSweepingCompleted();
// The following check makes sure that we compacted "some" objects, while
// leaving others in place.
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index 424e9870d8..80c4cc7c34 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -31,6 +31,7 @@
#include "src/compilation-cache.h"
#include "src/context-measure.h"
#include "src/deoptimizer.h"
+#include "src/elements.h"
#include "src/execution.h"
#include "src/factory.h"
#include "src/field-type.h"
@@ -43,7 +44,7 @@
#include "src/snapshot/snapshot.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
-#include "test/cctest/heap/utils-inl.h"
+#include "test/cctest/heap/heap-utils.h"
#include "test/cctest/test-feedback-vector.h"
@@ -98,6 +99,48 @@ static void CheckNumber(Isolate* isolate, double value, const char* string) {
CHECK(String::cast(*print_string)->IsUtf8EqualTo(CStrVector(string)));
}
+void CheckEmbeddedObjectsAreEqual(Handle<Code> lhs, Handle<Code> rhs) {
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ RelocIterator lhs_it(*lhs, mode_mask);
+ RelocIterator rhs_it(*rhs, mode_mask);
+ while (!lhs_it.done() && !rhs_it.done()) {
+ CHECK(lhs_it.rinfo()->target_object() == rhs_it.rinfo()->target_object());
+
+ lhs_it.next();
+ rhs_it.next();
+ }
+ CHECK(lhs_it.done() == rhs_it.done());
+}
+
+HEAP_TEST(TestNewSpaceRefsInCopiedCode) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ Heap* heap = isolate->heap();
+ HandleScope sc(isolate);
+
+ Handle<Object> value = factory->NewNumber(1.000123);
+ CHECK(heap->InNewSpace(*value));
+
+ i::byte buffer[i::Assembler::kMinimalBufferSize];
+ MacroAssembler masm(isolate, buffer, sizeof(buffer),
+ v8::internal::CodeObjectRequired::kYes);
+ // Add a new-space reference to the code.
+ masm.Push(value);
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ Code* tmp = nullptr;
+ heap->CopyCode(*code).To(&tmp);
+ Handle<Code> copy(tmp);
+
+ CheckEmbeddedObjectsAreEqual(code, copy);
+ heap->CollectAllAvailableGarbage();
+ CheckEmbeddedObjectsAreEqual(code, copy);
+}
static void CheckFindCodeObject(Isolate* isolate) {
// Test FindCodeObject
@@ -577,7 +620,7 @@ TEST(GlobalHandles) {
static bool WeakPointerCleared = false;
static void TestWeakGlobalHandleCallback(
- const v8::WeakCallbackData<v8::Value, void>& data) {
+ const v8::WeakCallbackInfo<void>& data) {
std::pair<v8::Persistent<v8::Value>*, int>* p =
reinterpret_cast<std::pair<v8::Persistent<v8::Value>*, int>*>(
data.GetParameter());
@@ -610,9 +653,9 @@ TEST(WeakGlobalHandlesScavenge) {
}
std::pair<Handle<Object>*, int> handle_and_id(&h2, 1234);
- GlobalHandles::MakeWeak(h2.location(),
- reinterpret_cast<void*>(&handle_and_id),
- &TestWeakGlobalHandleCallback);
+ GlobalHandles::MakeWeak(
+ h2.location(), reinterpret_cast<void*>(&handle_and_id),
+ &TestWeakGlobalHandleCallback, v8::WeakCallbackType::kParameter);
// Scavenge treats weak pointers as normal roots.
heap->CollectGarbage(NEW_SPACE);
@@ -657,9 +700,9 @@ TEST(WeakGlobalHandlesMark) {
CHECK(!heap->InNewSpace(*h1) && !heap->InNewSpace(*h2));
std::pair<Handle<Object>*, int> handle_and_id(&h2, 1234);
- GlobalHandles::MakeWeak(h2.location(),
- reinterpret_cast<void*>(&handle_and_id),
- &TestWeakGlobalHandleCallback);
+ GlobalHandles::MakeWeak(
+ h2.location(), reinterpret_cast<void*>(&handle_and_id),
+ &TestWeakGlobalHandleCallback, v8::WeakCallbackType::kParameter);
CHECK(!GlobalHandles::IsNearDeath(h1.location()));
CHECK(!GlobalHandles::IsNearDeath(h2.location()));
@@ -695,9 +738,9 @@ TEST(DeleteWeakGlobalHandle) {
}
std::pair<Handle<Object>*, int> handle_and_id(&h, 1234);
- GlobalHandles::MakeWeak(h.location(),
- reinterpret_cast<void*>(&handle_and_id),
- &TestWeakGlobalHandleCallback);
+ GlobalHandles::MakeWeak(h.location(), reinterpret_cast<void*>(&handle_and_id),
+ &TestWeakGlobalHandleCallback,
+ v8::WeakCallbackType::kParameter);
// Scanvenge does not recognize weak reference.
heap->CollectGarbage(NEW_SPACE);
@@ -710,6 +753,45 @@ TEST(DeleteWeakGlobalHandle) {
CHECK(WeakPointerCleared);
}
+TEST(DoNotPromoteWhiteObjectsOnScavenge) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ Factory* factory = isolate->factory();
+
+ HandleScope scope(isolate);
+ Handle<Object> white = factory->NewStringFromStaticChars("white");
+
+ CHECK(Marking::IsWhite(ObjectMarking::MarkBitFrom(HeapObject::cast(*white))));
+
+ heap->CollectGarbage(NEW_SPACE);
+
+ CHECK(heap->InNewSpace(*white));
+}
+
+TEST(PromoteGreyOrBlackObjectsOnScavenge) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ Factory* factory = isolate->factory();
+
+ HandleScope scope(isolate);
+ Handle<Object> marked = factory->NewStringFromStaticChars("marked");
+
+ IncrementalMarking* marking = heap->incremental_marking();
+ marking->Stop();
+ heap->StartIncrementalMarking();
+ while (
+ Marking::IsWhite(ObjectMarking::MarkBitFrom(HeapObject::cast(*marked)))) {
+ marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ IncrementalMarking::FORCE_MARKING,
+ IncrementalMarking::DO_NOT_FORCE_COMPLETION);
+ }
+
+ heap->CollectGarbage(NEW_SPACE);
+
+ CHECK(!heap->InNewSpace(*marked));
+}
TEST(BytecodeArray) {
static const uint8_t kRawBytes[] = {0xc3, 0x7e, 0xa5, 0x5a};
@@ -724,7 +806,7 @@ TEST(BytecodeArray) {
Factory* factory = isolate->factory();
HandleScope scope(isolate);
- SimulateFullSpace(heap->old_space());
+ heap::SimulateFullSpace(heap->old_space());
Handle<FixedArray> constant_pool = factory->NewFixedArray(5, TENURED);
for (int i = 0; i < 5; i++) {
Handle<Object> number = factory->NewHeapNumber(i);
@@ -1349,7 +1431,7 @@ TEST(TestCodeFlushingIncremental) {
// Simulate several GCs that use incremental marking.
const int kAgingThreshold = 6;
for (int i = 0; i < kAgingThreshold; i++) {
- SimulateIncrementalMarking(CcTest::heap());
+ heap::SimulateIncrementalMarking(CcTest::heap());
CcTest::heap()->CollectAllGarbage();
}
CHECK(!function->shared()->is_compiled() || function->IsOptimized());
@@ -1363,8 +1445,9 @@ TEST(TestCodeFlushingIncremental) {
// Simulate several GCs that use incremental marking but make sure
// the loop breaks once the function is enqueued as a candidate.
for (int i = 0; i < kAgingThreshold; i++) {
- SimulateIncrementalMarking(CcTest::heap());
- if (!function->next_function_link()->IsUndefined()) break;
+ heap::SimulateIncrementalMarking(CcTest::heap());
+ if (!function->next_function_link()->IsUndefined(CcTest::i_isolate()))
+ break;
CcTest::heap()->CollectAllGarbage();
}
@@ -1439,7 +1522,7 @@ TEST(TestCodeFlushingIncrementalScavenge) {
// Simulate incremental marking so that the functions are enqueued as
// code flushing candidates. Then kill one of the functions. Finally
// perform a scavenge while incremental marking is still running.
- SimulateIncrementalMarking(CcTest::heap());
+ heap::SimulateIncrementalMarking(CcTest::heap(), false);
*function2.location() = NULL;
CcTest::heap()->CollectGarbage(NEW_SPACE, "test scavenge while marking");
@@ -1493,16 +1576,16 @@ TEST(TestCodeFlushingIncrementalAbort) {
// Simulate incremental marking so that the function is enqueued as
// code flushing candidate.
- SimulateIncrementalMarking(heap);
+ heap::SimulateIncrementalMarking(heap);
// Enable the debugger and add a breakpoint while incremental marking
// is running so that incremental marking aborts and code flushing is
// disabled.
- int position = 0;
+ int position = function->shared()->start_position();
Handle<Object> breakpoint_object(Smi::FromInt(0), isolate);
EnableDebugger(CcTest::isolate());
isolate->debug()->SetBreakPoint(function, breakpoint_object, &position);
- isolate->debug()->ClearAllBreakPoints();
+ isolate->debug()->ClearBreakPoint(breakpoint_object);
DisableDebugger(CcTest::isolate());
// Force optimization now that code flushing is disabled.
@@ -1546,12 +1629,9 @@ TEST(TestUseOfIncrementalBarrierOnCompileLazy) {
Handle<Object> g_value =
Object::GetProperty(isolate->global_object(), g_name).ToHandleChecked();
Handle<JSFunction> g_function = Handle<JSFunction>::cast(g_value);
- // TODO(mvstanton): change to check that g is *not* compiled when optimized
- // cache
- // map lookup moves to the compile lazy builtin.
- CHECK(g_function->is_compiled());
+ CHECK(!g_function->is_compiled());
- SimulateIncrementalMarking(heap);
+ heap::SimulateIncrementalMarking(heap);
CompileRun("%OptimizeFunctionOnNextCall(f); f();");
// g should now have available an optimized function, unmarked by gc. The
@@ -1590,25 +1670,11 @@ TEST(CompilationCacheCachingBehavior) {
CompileRun(raw_source);
}
- // On first compilation, only a hash is inserted in the code cache. We can't
- // find that value.
+ // The script should be in the cache now.
MaybeHandle<SharedFunctionInfo> info = compilation_cache->LookupScript(
source, Handle<Object>(), 0, 0,
v8::ScriptOriginOptions(false, true, false), native_context,
language_mode);
- CHECK(info.is_null());
-
- {
- v8::HandleScope scope(CcTest::isolate());
- CompileRun(raw_source);
- }
-
- // On second compilation, the hash is replaced by a real cache entry mapping
- // the source to the shared function info containing the code.
- info = compilation_cache->LookupScript(
- source, Handle<Object>(), 0, 0,
- v8::ScriptOriginOptions(false, true, false), native_context,
- language_mode);
CHECK(!info.is_null());
// Check that the code cache entry survives at least on GC.
@@ -1640,36 +1706,6 @@ TEST(CompilationCacheCachingBehavior) {
v8::ScriptOriginOptions(false, true, false), native_context,
language_mode);
CHECK(info.is_null());
-
- {
- v8::HandleScope scope(CcTest::isolate());
- CompileRun(raw_source);
- }
-
- // On first compilation, only a hash is inserted in the code cache. We can't
- // find that value.
- info = compilation_cache->LookupScript(
- source, Handle<Object>(), 0, 0,
- v8::ScriptOriginOptions(false, true, false), native_context,
- language_mode);
- CHECK(info.is_null());
-
- for (int i = 0; i < CompilationCacheTable::kHashGenerations; i++) {
- compilation_cache->MarkCompactPrologue();
- }
-
- {
- v8::HandleScope scope(CcTest::isolate());
- CompileRun(raw_source);
- }
-
- // If we aged the cache before caching the script, ensure that we didn't cache
- // on next compilation.
- info = compilation_cache->LookupScript(
- source, Handle<Object>(), 0, 0,
- v8::ScriptOriginOptions(false, true, false), native_context,
- language_mode);
- CHECK(info.is_null());
}
@@ -1690,9 +1726,9 @@ static void OptimizeEmptyFunction(const char* name) {
int CountNativeContexts() {
int count = 0;
Object* object = CcTest::heap()->native_contexts_list();
- while (!object->IsUndefined()) {
+ while (!object->IsUndefined(CcTest::i_isolate())) {
count++;
- object = Context::cast(object)->get(Context::NEXT_CONTEXT_LINK);
+ object = Context::cast(object)->next_context_link();
}
return count;
}
@@ -1826,12 +1862,11 @@ static int CountNativeContextsWithGC(Isolate* isolate, int n) {
Heap* heap = isolate->heap();
int count = 0;
Handle<Object> object(heap->native_contexts_list(), isolate);
- while (!object->IsUndefined()) {
+ while (!object->IsUndefined(isolate)) {
count++;
if (count == n) heap->CollectAllGarbage();
object =
- Handle<Object>(Context::cast(*object)->get(Context::NEXT_CONTEXT_LINK),
- isolate);
+ Handle<Object>(Context::cast(*object)->next_context_link(), isolate);
}
return count;
}
@@ -1933,11 +1968,7 @@ TEST(TestSizeOfRegExpCode) {
// Get initial heap size after several full GCs, which will stabilize
// the heap size and return with sweeping finished completely.
- CcTest::heap()->CollectAllGarbage();
- CcTest::heap()->CollectAllGarbage();
- CcTest::heap()->CollectAllGarbage();
- CcTest::heap()->CollectAllGarbage();
- CcTest::heap()->CollectAllGarbage();
+ CcTest::heap()->CollectAllAvailableGarbage("initial cleanup");
MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
collector->EnsureSweepingCompleted();
@@ -1970,19 +2001,16 @@ TEST(TestSizeOfRegExpCode) {
HEAP_TEST(TestSizeOfObjects) {
v8::V8::Initialize();
+ Heap* heap = CcTest::heap();
+ MarkCompactCollector* collector = heap->mark_compact_collector();
// Get initial heap size after several full GCs, which will stabilize
// the heap size and return with sweeping finished completely.
- CcTest::heap()->CollectAllGarbage();
- CcTest::heap()->CollectAllGarbage();
- CcTest::heap()->CollectAllGarbage();
- CcTest::heap()->CollectAllGarbage();
- CcTest::heap()->CollectAllGarbage();
- MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
+ heap->CollectAllAvailableGarbage("initial cleanup");
if (collector->sweeping_in_progress()) {
collector->EnsureSweepingCompleted();
}
- int initial_size = static_cast<int>(CcTest::heap()->SizeOfObjects());
+ int initial_size = static_cast<int>(heap->SizeOfObjects());
{
// Allocate objects on several different old-space pages so that
@@ -1991,25 +2019,22 @@ HEAP_TEST(TestSizeOfObjects) {
AlwaysAllocateScope always_allocate(CcTest::i_isolate());
int filler_size = static_cast<int>(FixedArray::SizeFor(8192));
for (int i = 1; i <= 100; i++) {
- CcTest::heap()->AllocateFixedArray(8192, TENURED).ToObjectChecked();
+ heap->AllocateFixedArray(8192, TENURED).ToObjectChecked();
CHECK_EQ(initial_size + i * filler_size,
- static_cast<int>(CcTest::heap()->SizeOfObjects()));
+ static_cast<int>(heap->SizeOfObjects()));
}
}
// The heap size should go back to initial size after a full GC, even
// though sweeping didn't finish yet.
- CcTest::heap()->CollectAllGarbage();
-
+ heap->CollectAllGarbage();
// Normally sweeping would not be complete here, but no guarantees.
-
- CHECK_EQ(initial_size, static_cast<int>(CcTest::heap()->SizeOfObjects()));
-
+ CHECK_EQ(initial_size, static_cast<int>(heap->SizeOfObjects()));
// Waiting for sweeper threads should not change heap size.
if (collector->sweeping_in_progress()) {
collector->EnsureSweepingCompleted();
}
- CHECK_EQ(initial_size, static_cast<int>(CcTest::heap()->SizeOfObjects()));
+ CHECK_EQ(initial_size, static_cast<int>(heap->SizeOfObjects()));
}
@@ -2296,16 +2321,20 @@ TEST(TestSizeOfObjectsVsHeapIteratorPrecision) {
// on the heap.
if (size_of_objects_1 > size_of_objects_2) {
intptr_t delta = size_of_objects_1 - size_of_objects_2;
- PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, "
- "Iterator: %" V8_PTR_PREFIX "d, "
- "delta: %" V8_PTR_PREFIX "d\n",
+ PrintF("Heap::SizeOfObjects: %" V8PRIdPTR
+ ", "
+ "Iterator: %" V8PRIdPTR
+ ", "
+ "delta: %" V8PRIdPTR "\n",
size_of_objects_1, size_of_objects_2, delta);
CHECK_GT(size_of_objects_1 / 20, delta);
} else {
intptr_t delta = size_of_objects_2 - size_of_objects_1;
- PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, "
- "Iterator: %" V8_PTR_PREFIX "d, "
- "delta: %" V8_PTR_PREFIX "d\n",
+ PrintF("Heap::SizeOfObjects: %" V8PRIdPTR
+ ", "
+ "Iterator: %" V8PRIdPTR
+ ", "
+ "delta: %" V8PRIdPTR "\n",
size_of_objects_1, size_of_objects_2, delta);
CHECK_GT(size_of_objects_2 / 20, delta);
}
@@ -2636,7 +2665,7 @@ TEST(InstanceOfStubWriteBarrier) {
CHECK(f->IsOptimized());
- while (!Marking::IsBlack(Marking::MarkBitFrom(f->code())) &&
+ while (!Marking::IsBlack(ObjectMarking::MarkBitFrom(f->code())) &&
!marking->IsStopped()) {
// Discard any pending GC requests otherwise we will get GC when we enter
// code below.
@@ -2657,6 +2686,14 @@ TEST(InstanceOfStubWriteBarrier) {
CcTest::heap()->CollectGarbage(OLD_SPACE);
}
+namespace {
+
+int GetProfilerTicks(SharedFunctionInfo* shared) {
+ return FLAG_ignition ? shared->profiler_ticks()
+ : shared->code()->profiler_ticks();
+}
+
+} // namespace
TEST(ResetSharedFunctionInfoCountersDuringIncrementalMarking) {
i::FLAG_stress_compaction = false;
@@ -2687,16 +2724,18 @@ TEST(ResetSharedFunctionInfoCountersDuringIncrementalMarking) {
CcTest::global()->Get(ctx, v8_str("f")).ToLocalChecked())));
CHECK(f->IsOptimized());
- IncrementalMarking* marking = CcTest::heap()->incremental_marking();
- marking->Stop();
+ // Make sure incremental marking it not running.
+ CcTest::heap()->incremental_marking()->Stop();
+
CcTest::heap()->StartIncrementalMarking();
// The following calls will increment CcTest::heap()->global_ic_age().
CcTest::isolate()->ContextDisposedNotification();
- SimulateIncrementalMarking(CcTest::heap());
+ heap::SimulateIncrementalMarking(CcTest::heap());
CcTest::heap()->CollectAllGarbage();
+
CHECK_EQ(CcTest::heap()->global_ic_age(), f->shared()->ic_age());
CHECK_EQ(0, f->shared()->opt_count());
- CHECK_EQ(0, f->shared()->code()->profiler_ticks());
+ CHECK_EQ(0, GetProfilerTicks(f->shared()));
}
@@ -2727,9 +2766,9 @@ TEST(ResetSharedFunctionInfoCountersDuringMarkSweep) {
i::Handle<JSFunction> f = i::Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
CcTest::global()->Get(ctx, v8_str("f")).ToLocalChecked())));
-
CHECK(f->IsOptimized());
+ // Make sure incremental marking it not running.
CcTest::heap()->incremental_marking()->Stop();
// The following two calls will increment CcTest::heap()->global_ic_age().
@@ -2738,7 +2777,7 @@ TEST(ResetSharedFunctionInfoCountersDuringMarkSweep) {
CHECK_EQ(CcTest::heap()->global_ic_age(), f->shared()->ic_age());
CHECK_EQ(0, f->shared()->opt_count());
- CHECK_EQ(0, f->shared()->code()->profiler_ticks());
+ CHECK_EQ(0, GetProfilerTicks(f->shared()));
}
@@ -2776,12 +2815,13 @@ HEAP_TEST(GCFlags) {
TEST(IdleNotificationFinishMarking) {
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
- SimulateFullSpace(CcTest::heap()->old_space());
+ const int initial_gc_count = CcTest::heap()->gc_count();
+ heap::SimulateFullSpace(CcTest::heap()->old_space());
IncrementalMarking* marking = CcTest::heap()->incremental_marking();
marking->Stop();
CcTest::heap()->StartIncrementalMarking();
- CHECK_EQ(CcTest::heap()->gc_count(), 0);
+ CHECK_EQ(CcTest::heap()->gc_count(), initial_gc_count);
// TODO(hpayer): We cannot write proper unit test right now for heap.
// The ideal test would call kMaxIdleMarkingDelayCounter to test the
@@ -2816,7 +2856,7 @@ TEST(IdleNotificationFinishMarking) {
(v8::base::TimeTicks::HighResolutionNow().ToInternalValue() /
static_cast<double>(v8::base::Time::kMicrosecondsPerSecond)) +
kLongIdleTime);
- CHECK_EQ(CcTest::heap()->gc_count(), 1);
+ CHECK_EQ(CcTest::heap()->gc_count(), initial_gc_count + 1);
}
@@ -2828,7 +2868,7 @@ TEST(OptimizedAllocationAlwaysInNewSpace) {
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
- SimulateFullSpace(CcTest::heap()->new_space());
+ heap::SimulateFullSpace(CcTest::heap()->new_space());
AlwaysAllocateScope always_allocate(CcTest::i_isolate());
v8::Local<v8::Value> res = CompileRun(
"function c(x) {"
@@ -3303,7 +3343,7 @@ TEST(Regress1465) {
CompileRun("%DebugPrint(root);");
CHECK_EQ(transitions_count, transitions_before);
- SimulateIncrementalMarking(CcTest::heap());
+ heap::SimulateIncrementalMarking(CcTest::heap());
CcTest::heap()->CollectAllGarbage();
// Count number of live transitions after marking. Note that one transition
@@ -3473,7 +3513,7 @@ TEST(Regress2143a) {
"root.foo = 0;"
"root = new Object;");
- SimulateIncrementalMarking(CcTest::heap());
+ heap::SimulateIncrementalMarking(CcTest::heap());
// Compile a StoreIC that performs the prepared map transition. This
// will restart incremental marking and should make sure the root is
@@ -3513,7 +3553,7 @@ TEST(Regress2143b) {
"root.foo = 0;"
"root = new Object;");
- SimulateIncrementalMarking(CcTest::heap());
+ heap::SimulateIncrementalMarking(CcTest::heap());
// Compile an optimized LStoreNamedField that performs the prepared
// map transition. This will restart incremental marking and should
@@ -3557,6 +3597,8 @@ TEST(ReleaseOverReservedPages) {
// Concurrent sweeping adds non determinism, depending on when memory is
// available for further reuse.
i::FLAG_concurrent_sweeping = false;
+ // Fast evacuation of pages may result in a different page count in old space.
+ i::FLAG_page_promotion = false;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
@@ -3566,28 +3608,29 @@ TEST(ReleaseOverReservedPages) {
// Prepare many pages with low live-bytes count.
PagedSpace* old_space = heap->old_space();
- CHECK_EQ(1, old_space->CountTotalPages());
+ const int initial_page_count = old_space->CountTotalPages();
+ const int overall_page_count = number_of_test_pages + initial_page_count;
for (int i = 0; i < number_of_test_pages; i++) {
AlwaysAllocateScope always_allocate(isolate);
- SimulateFullSpace(old_space);
+ heap::SimulateFullSpace(old_space);
factory->NewFixedArray(1, TENURED);
}
- CHECK_EQ(number_of_test_pages + 1, old_space->CountTotalPages());
+ CHECK_EQ(overall_page_count, old_space->CountTotalPages());
// Triggering one GC will cause a lot of garbage to be discovered but
// even spread across all allocated pages.
heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
"triggered for preparation");
- CHECK_GE(number_of_test_pages + 1, old_space->CountTotalPages());
+ CHECK_GE(overall_page_count, old_space->CountTotalPages());
// Triggering subsequent GCs should cause at least half of the pages
// to be released to the OS after at most two cycles.
heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
"triggered by test 1");
- CHECK_GE(number_of_test_pages + 1, old_space->CountTotalPages());
+ CHECK_GE(overall_page_count, old_space->CountTotalPages());
heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
"triggered by test 2");
- CHECK_GE(number_of_test_pages + 1, old_space->CountTotalPages() * 2);
+ CHECK_GE(overall_page_count, old_space->CountTotalPages() * 2);
// Triggering a last-resort GC should cause all pages to be released to the
// OS so that other processes can seize the memory. If we get a failure here
@@ -3597,7 +3640,7 @@ TEST(ReleaseOverReservedPages) {
// boots, but if the 20 small arrays don't fit on the first page then that's
// an indication that it is too small.
heap->CollectAllAvailableGarbage("triggered really hard");
- CHECK_EQ(1, old_space->CountTotalPages());
+ CHECK_EQ(initial_page_count, old_space->CountTotalPages());
}
static int forced_gc_counter = 0;
@@ -3670,7 +3713,7 @@ TEST(IncrementalMarkingPreservesMonomorphicCallIC) {
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
CcTest::global()->Get(ctx, v8_str("f")).ToLocalChecked())));
- Handle<TypeFeedbackVector> feedback_vector(f->shared()->feedback_vector());
+ Handle<TypeFeedbackVector> feedback_vector(f->feedback_vector());
FeedbackVectorHelper feedback_helper(feedback_vector);
int expected_slots = 2;
@@ -3680,7 +3723,7 @@ TEST(IncrementalMarkingPreservesMonomorphicCallIC) {
CHECK(feedback_vector->Get(feedback_helper.slot(slot1))->IsWeakCell());
CHECK(feedback_vector->Get(feedback_helper.slot(slot2))->IsWeakCell());
- SimulateIncrementalMarking(CcTest::heap());
+ heap::SimulateIncrementalMarking(CcTest::heap());
CcTest::heap()->CollectAllGarbage();
CHECK(!WeakCell::cast(feedback_vector->Get(feedback_helper.slot(slot1)))
@@ -3707,7 +3750,7 @@ static Code* FindFirstIC(Code* code, Code::Kind kind) {
static void CheckVectorIC(Handle<JSFunction> f, int slot_index,
InlineCacheState desired_state) {
Handle<TypeFeedbackVector> vector =
- Handle<TypeFeedbackVector>(f->shared()->feedback_vector());
+ Handle<TypeFeedbackVector>(f->feedback_vector());
FeedbackVectorHelper helper(vector);
FeedbackVectorSlot slot = helper.slot(slot_index);
if (vector->GetKind(slot) == FeedbackVectorSlotKind::LOAD_IC) {
@@ -3720,16 +3763,6 @@ static void CheckVectorIC(Handle<JSFunction> f, int slot_index,
}
}
-
-static void CheckVectorICCleared(Handle<JSFunction> f, int slot_index) {
- Handle<TypeFeedbackVector> vector =
- Handle<TypeFeedbackVector>(f->shared()->feedback_vector());
- FeedbackVectorSlot slot(slot_index);
- LoadICNexus nexus(vector, slot);
- CHECK(IC::IsCleared(&nexus));
-}
-
-
TEST(IncrementalMarkingPreservesMonomorphicConstructor) {
if (i::FLAG_always_opt) return;
CcTest::InitializeVM();
@@ -3744,54 +3777,15 @@ TEST(IncrementalMarkingPreservesMonomorphicConstructor) {
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
CcTest::global()->Get(ctx, v8_str("f")).ToLocalChecked())));
- Handle<TypeFeedbackVector> vector(f->shared()->feedback_vector());
+ Handle<TypeFeedbackVector> vector(f->feedback_vector());
CHECK(vector->Get(FeedbackVectorSlot(0))->IsWeakCell());
- SimulateIncrementalMarking(CcTest::heap());
+ heap::SimulateIncrementalMarking(CcTest::heap());
CcTest::heap()->CollectAllGarbage();
CHECK(vector->Get(FeedbackVectorSlot(0))->IsWeakCell());
}
-
-TEST(IncrementalMarkingClearsMonomorphicConstructor) {
- if (i::FLAG_always_opt) return;
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Value> fun1;
- v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
-
- {
- LocalContext env;
- CompileRun("function fun() { this.x = 1; };");
- fun1 = env->Global()->Get(env.local(), v8_str("fun")).ToLocalChecked();
- }
-
- // Prepare function f that contains a monomorphic constructor for object
- // originating from a different native context.
- CHECK(CcTest::global()->Set(ctx, v8_str("fun1"), fun1).FromJust());
- CompileRun(
- "function fun() { this.x = 1; };"
- "function f(o) { return new o(); } f(fun1); f(fun1);");
- Handle<JSFunction> f = Handle<JSFunction>::cast(
- v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
- CcTest::global()->Get(ctx, v8_str("f")).ToLocalChecked())));
-
-
- Handle<TypeFeedbackVector> vector(f->shared()->feedback_vector());
- CHECK(vector->Get(FeedbackVectorSlot(0))->IsWeakCell());
-
- // Fire context dispose notification.
- CcTest::isolate()->ContextDisposedNotification();
- SimulateIncrementalMarking(CcTest::heap());
- CcTest::heap()->CollectAllGarbage();
-
- CHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(isolate),
- vector->Get(FeedbackVectorSlot(0)));
-}
-
-
TEST(IncrementalMarkingPreservesMonomorphicIC) {
if (i::FLAG_always_opt) return;
CcTest::InitializeVM();
@@ -3807,45 +3801,12 @@ TEST(IncrementalMarkingPreservesMonomorphicIC) {
CheckVectorIC(f, 0, MONOMORPHIC);
- SimulateIncrementalMarking(CcTest::heap());
+ heap::SimulateIncrementalMarking(CcTest::heap());
CcTest::heap()->CollectAllGarbage();
CheckVectorIC(f, 0, MONOMORPHIC);
}
-
-TEST(IncrementalMarkingClearsMonomorphicIC) {
- if (i::FLAG_always_opt) return;
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Value> obj1;
- v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
-
- {
- LocalContext env;
- CompileRun("function fun() { this.x = 1; }; var obj = new fun();");
- obj1 = env->Global()->Get(env.local(), v8_str("obj")).ToLocalChecked();
- }
-
- // Prepare function f that contains a monomorphic IC for object
- // originating from a different native context.
- CHECK(CcTest::global()->Set(ctx, v8_str("obj1"), obj1).FromJust());
- CompileRun("function f(o) { return o.x; } f(obj1); f(obj1);");
- Handle<JSFunction> f = Handle<JSFunction>::cast(
- v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
- CcTest::global()->Get(ctx, v8_str("f")).ToLocalChecked())));
-
- CheckVectorIC(f, 0, MONOMORPHIC);
-
- // Fire context dispose notification.
- CcTest::isolate()->ContextDisposedNotification();
- SimulateIncrementalMarking(CcTest::heap());
- CcTest::heap()->CollectAllGarbage();
-
- CheckVectorICCleared(f, 0);
-}
-
-
TEST(IncrementalMarkingPreservesPolymorphicIC) {
if (i::FLAG_always_opt) return;
CcTest::InitializeVM();
@@ -3877,14 +3838,13 @@ TEST(IncrementalMarkingPreservesPolymorphicIC) {
CheckVectorIC(f, 0, POLYMORPHIC);
// Fire context dispose notification.
- SimulateIncrementalMarking(CcTest::heap());
+ heap::SimulateIncrementalMarking(CcTest::heap());
CcTest::heap()->CollectAllGarbage();
CheckVectorIC(f, 0, POLYMORPHIC);
}
-
-TEST(IncrementalMarkingClearsPolymorphicIC) {
+TEST(ContextDisposeDoesntClearPolymorphicIC) {
if (i::FLAG_always_opt) return;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
@@ -3916,10 +3876,10 @@ TEST(IncrementalMarkingClearsPolymorphicIC) {
// Fire context dispose notification.
CcTest::isolate()->ContextDisposedNotification();
- SimulateIncrementalMarking(CcTest::heap());
+ heap::SimulateIncrementalMarking(CcTest::heap());
CcTest::heap()->CollectAllGarbage();
- CheckVectorICCleared(f, 0);
+ CheckVectorIC(f, 0, POLYMORPHIC);
}
@@ -4088,7 +4048,7 @@ TEST(Regress159140) {
// Simulate incremental marking so that the functions are enqueued as
// code flushing candidates. Then optimize one function. Finally
// finish the GC to complete code flushing.
- SimulateIncrementalMarking(heap);
+ heap::SimulateIncrementalMarking(heap);
CompileRun("%OptimizeFunctionOnNextCall(g); g(3);");
heap->CollectAllGarbage();
@@ -4134,7 +4094,7 @@ TEST(Regress165495) {
// Simulate incremental marking so that unoptimized code is flushed
// even though it still is cached in the optimized code map.
- SimulateIncrementalMarking(heap);
+ heap::SimulateIncrementalMarking(heap);
heap->CollectAllGarbage();
// Make a new closure that will get code installed from the code map.
@@ -4202,7 +4162,7 @@ TEST(Regress169209) {
}
// Simulate incremental marking and collect code flushing candidates.
- SimulateIncrementalMarking(heap);
+ heap::SimulateIncrementalMarking(heap);
CHECK(shared1->code()->gc_metadata() != NULL);
// Optimize function and make sure the unoptimized code is replaced.
@@ -4258,9 +4218,9 @@ TEST(Regress169928) {
array_data->set(0, Smi::FromInt(1));
array_data->set(1, Smi::FromInt(2));
- AllocateAllButNBytes(CcTest::heap()->new_space(),
- JSArray::kSize + AllocationMemento::kSize +
- kPointerSize);
+ heap::AllocateAllButNBytes(
+ CcTest::heap()->new_space(),
+ JSArray::kSize + AllocationMemento::kSize + kPointerSize);
Handle<JSArray> array =
factory->NewJSArrayWithElements(array_data, FAST_SMI_ELEMENTS);
@@ -4333,9 +4293,10 @@ TEST(Regress513507) {
if (!code->is_optimized_code()) return;
}
- Handle<TypeFeedbackVector> vector = handle(shared->feedback_vector());
+ Handle<TypeFeedbackVector> vector =
+ TypeFeedbackVector::New(isolate, handle(shared->feedback_metadata()));
Handle<LiteralsArray> lit =
- LiteralsArray::New(isolate, vector, shared->num_literals(), TENURED);
+ LiteralsArray::New(isolate, vector, shared->num_literals());
Handle<Context> context(isolate->context());
// Add the new code several times to the optimized code map and also set an
@@ -4390,7 +4351,8 @@ TEST(Regress514122) {
if (!code->is_optimized_code()) return;
}
- Handle<TypeFeedbackVector> vector = handle(shared->feedback_vector());
+ Handle<TypeFeedbackVector> vector =
+ TypeFeedbackVector::New(isolate, handle(shared->feedback_metadata()));
Handle<LiteralsArray> lit =
LiteralsArray::New(isolate, vector, shared->num_literals(), TENURED);
Handle<Context> context(isolate->context());
@@ -4409,11 +4371,10 @@ TEST(Regress514122) {
HandleScope inner_scope(isolate);
AlwaysAllocateScope always_allocate(isolate);
// Make sure literal is placed on an old-space evacuation candidate.
- SimulateFullSpace(heap->old_space());
+ heap::SimulateFullSpace(heap->old_space());
// Make sure there the number of literals is > 0.
- Handle<LiteralsArray> lit =
- LiteralsArray::New(isolate, vector, 23, TENURED);
+ Handle<LiteralsArray> lit = LiteralsArray::New(isolate, vector, 23);
evac_page = Page::FromAddress(lit->address());
BailoutId id = BailoutId(100);
@@ -4424,7 +4385,7 @@ TEST(Regress514122) {
// simulate incremental marking to enqueue optimized code map.
FLAG_manual_evacuation_candidates_selection = true;
evac_page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
- SimulateIncrementalMarking(heap);
+ heap::SimulateIncrementalMarking(heap);
// No matter whether reachable or not, {boomer} is doomed.
Handle<Object> boomer(shared->optimized_code_map(), isolate);
@@ -4621,7 +4582,7 @@ TEST(LargeObjectSlotRecording) {
HandleScope scope(isolate);
// Create an object on an evacuation candidate.
- SimulateFullSpace(heap->old_space());
+ heap::SimulateFullSpace(heap->old_space());
Handle<FixedArray> lit = isolate->factory()->NewFixedArray(4, TENURED);
Page* evac_page = Page::FromAddress(lit->address());
evac_page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
@@ -4634,7 +4595,7 @@ TEST(LargeObjectSlotRecording) {
CHECK(heap->lo_space()->Contains(*lo));
// Start incremental marking to active write barrier.
- SimulateIncrementalMarking(heap, false);
+ heap::SimulateIncrementalMarking(heap, false);
heap->incremental_marking()->AdvanceIncrementalMarking(
10000000, IncrementalMarking::IdleStepActions());
@@ -4735,7 +4696,7 @@ TEST(DisableInlineAllocation) {
static int AllocationSitesCount(Heap* heap) {
int count = 0;
for (Object* site = heap->allocation_sites_list();
- !(site->IsUndefined());
+ !(site->IsUndefined(heap->isolate()));
site = AllocationSite::cast(site)->weak_next()) {
count++;
}
@@ -4774,18 +4735,31 @@ TEST(EnsureAllocationSiteDependentCodesProcessed) {
CompileRun("%OptimizeFunctionOnNextCall(bar); bar();");
- CHECK_EQ(DependentCode::kAllocationSiteTransitionChangedGroup,
- site->dependent_code()->group());
- CHECK_EQ(1, site->dependent_code()->count());
- CHECK(site->dependent_code()->object_at(0)->IsWeakCell());
- Code* function_bar = Code::cast(
- WeakCell::cast(site->dependent_code()->object_at(0))->value());
Handle<JSFunction> bar_handle = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
CcTest::global()
->Get(context.local(), v8_str("bar"))
.ToLocalChecked())));
- CHECK_EQ(bar_handle->code(), function_bar);
+
+ int dependency_group_count = 0;
+ DependentCode* dependency = site->dependent_code();
+ while (dependency != heap->empty_fixed_array()) {
+ CHECK(dependency->group() ==
+ DependentCode::kAllocationSiteTransitionChangedGroup ||
+ dependency->group() ==
+ DependentCode::kAllocationSiteTenuringChangedGroup);
+ CHECK_EQ(1, dependency->count());
+ CHECK(dependency->object_at(0)->IsWeakCell());
+ Code* function_bar =
+ Code::cast(WeakCell::cast(dependency->object_at(0))->value());
+ CHECK_EQ(bar_handle->code(), function_bar);
+ dependency = dependency->next_link();
+ dependency_group_count++;
+ }
+
+ // TurboFan respects pretenuring feedback from allocation sites, Crankshaft
+ // does not. Either is fine for the purposes of this test.
+ CHECK(dependency_group_count == 1 || dependency_group_count == 2);
}
// Now make sure that a gc should get rid of the function, even though we
@@ -4888,6 +4862,67 @@ TEST(ObjectsInOptimizedCodeAreWeak) {
CHECK(code->marked_for_deoptimization());
}
+TEST(NewSpaceObjectsInOptimizedCode) {
+ if (i::FLAG_always_opt || !i::FLAG_crankshaft || i::FLAG_turbo) return;
+ i::FLAG_weak_embedded_objects_in_optimized_code = true;
+ i::FLAG_allow_natives_syntax = true;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ v8::internal::Heap* heap = CcTest::heap();
+
+ if (!isolate->use_crankshaft()) return;
+ HandleScope outer_scope(heap->isolate());
+ Handle<Code> code;
+ {
+ LocalContext context;
+ HandleScope scope(heap->isolate());
+
+ CompileRun(
+ "var foo;"
+ "var bar;"
+ "(function() {"
+ " function foo_func(x) { with (x) { return 1 + x; } };"
+ " %NeverOptimizeFunction(foo_func);"
+ " function bar_func() {"
+ " return foo(1);"
+ " };"
+ " bar = bar_func;"
+ " foo = foo_func;"
+ " bar_func();"
+ " bar_func();"
+ " bar_func();"
+ " %OptimizeFunctionOnNextCall(bar_func);"
+ " bar_func();"
+ "})();");
+
+ Handle<JSFunction> bar = Handle<JSFunction>::cast(v8::Utils::OpenHandle(
+ *v8::Local<v8::Function>::Cast(CcTest::global()
+ ->Get(context.local(), v8_str("bar"))
+ .ToLocalChecked())));
+
+ Handle<JSFunction> foo = Handle<JSFunction>::cast(v8::Utils::OpenHandle(
+ *v8::Local<v8::Function>::Cast(CcTest::global()
+ ->Get(context.local(), v8_str("foo"))
+ .ToLocalChecked())));
+
+ CHECK(heap->InNewSpace(*foo));
+ heap->CollectGarbage(NEW_SPACE);
+ heap->CollectGarbage(NEW_SPACE);
+ CHECK(!heap->InNewSpace(*foo));
+#ifdef VERIFY_HEAP
+ heap->Verify();
+#endif
+ CHECK(!bar->code()->marked_for_deoptimization());
+ code = scope.CloseAndEscape(Handle<Code>(bar->code()));
+ }
+
+ // Now make sure that a gc should get rid of the function
+ for (int i = 0; i < 4; i++) {
+ heap->CollectAllGarbage();
+ }
+
+ CHECK(code->marked_for_deoptimization());
+}
TEST(NoWeakHashTableLeakWithIncrementalMarking) {
if (i::FLAG_always_opt || !i::FLAG_crankshaft) return;
@@ -4900,7 +4935,7 @@ TEST(NoWeakHashTableLeakWithIncrementalMarking) {
Isolate* isolate = CcTest::i_isolate();
// Do not run for no-snap builds.
- if (!i::Snapshot::HaveASnapshotToStartFrom(isolate)) return;
+ if (!i::Snapshot::HasContextSnapshot(isolate, 0)) return;
v8::internal::Heap* heap = CcTest::heap();
@@ -4911,7 +4946,7 @@ TEST(NoWeakHashTableLeakWithIncrementalMarking) {
if (!isolate->use_crankshaft()) return;
HandleScope outer_scope(heap->isolate());
for (int i = 0; i < 3; i++) {
- SimulateIncrementalMarking(heap);
+ heap::SimulateIncrementalMarking(heap);
{
LocalContext context;
HandleScope scope(heap->isolate());
@@ -5095,7 +5130,7 @@ TEST(WeakFunctionInConstructor) {
// cleared. Now, verify that one additional call with a new function
// allows monomorphicity.
Handle<TypeFeedbackVector> feedback_vector = Handle<TypeFeedbackVector>(
- createObj->shared()->feedback_vector(), CcTest::i_isolate());
+ createObj->feedback_vector(), CcTest::i_isolate());
for (int i = 0; i < 20; i++) {
Object* slot_value = feedback_vector->Get(FeedbackVectorSlot(0));
CHECK(slot_value->IsWeakCell());
@@ -5296,12 +5331,11 @@ Handle<JSFunction> GetFunctionByName(Isolate* isolate, const char* name) {
return Handle<JSFunction>::cast(obj);
}
-
-void CheckIC(Code* code, Code::Kind kind, SharedFunctionInfo* shared,
- int slot_index, InlineCacheState state) {
+void CheckIC(Handle<JSFunction> function, Code::Kind kind, int slot_index,
+ InlineCacheState state) {
if (kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC ||
kind == Code::CALL_IC) {
- TypeFeedbackVector* vector = shared->feedback_vector();
+ TypeFeedbackVector* vector = function->feedback_vector();
FeedbackVectorSlot slot(slot_index);
if (kind == Code::LOAD_IC) {
LoadICNexus nexus(vector, slot);
@@ -5314,9 +5348,10 @@ void CheckIC(Code* code, Code::Kind kind, SharedFunctionInfo* shared,
CHECK_EQ(nexus.StateFromFeedback(), state);
}
} else {
- Code* ic = FindFirstIC(code, kind);
+ Code* ic = FindFirstIC(function->code(), kind);
CHECK(ic->is_inline_cache_stub());
- CHECK(ic->ic_state() == state);
+ CHECK(!IC::ICUseVector(kind));
+ CHECK_EQ(state, IC::StateFromCode(ic));
}
}
@@ -5345,12 +5380,12 @@ TEST(MonomorphicStaysMonomorphicAfterGC) {
CompileRun("(testIC())");
}
heap->CollectAllGarbage();
- CheckIC(loadIC->code(), Code::LOAD_IC, loadIC->shared(), 0, MONOMORPHIC);
+ CheckIC(loadIC, Code::LOAD_IC, 0, MONOMORPHIC);
{
v8::HandleScope scope(CcTest::isolate());
CompileRun("(testIC())");
}
- CheckIC(loadIC->code(), Code::LOAD_IC, loadIC->shared(), 0, MONOMORPHIC);
+ CheckIC(loadIC, Code::LOAD_IC, 0, MONOMORPHIC);
}
@@ -5381,12 +5416,12 @@ TEST(PolymorphicStaysPolymorphicAfterGC) {
CompileRun("(testIC())");
}
heap->CollectAllGarbage();
- CheckIC(loadIC->code(), Code::LOAD_IC, loadIC->shared(), 0, POLYMORPHIC);
+ CheckIC(loadIC, Code::LOAD_IC, 0, POLYMORPHIC);
{
v8::HandleScope scope(CcTest::isolate());
CompileRun("(testIC())");
}
- CheckIC(loadIC->code(), Code::LOAD_IC, loadIC->shared(), 0, POLYMORPHIC);
+ CheckIC(loadIC, Code::LOAD_IC, 0, POLYMORPHIC);
}
@@ -5547,19 +5582,21 @@ UNINITIALIZED_TEST(Regress538257) {
isolate->Enter();
{
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ Heap* heap = i_isolate->heap();
HandleScope handle_scope(i_isolate);
- PagedSpace* old_space = i_isolate->heap()->old_space();
+ PagedSpace* old_space = heap->old_space();
const int kMaxObjects = 10000;
const int kFixedArrayLen = 512;
Handle<FixedArray> objects[kMaxObjects];
- for (int i = 0; (i < kMaxObjects) && old_space->CanExpand(Page::kPageSize);
+ for (int i = 0; (i < kMaxObjects) &&
+ heap->CanExpandOldGeneration(old_space->AreaSize());
i++) {
objects[i] = i_isolate->factory()->NewFixedArray(kFixedArrayLen, TENURED);
Page::FromAddress(objects[i]->address())
->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
}
- SimulateFullSpace(old_space);
- i_isolate->heap()->CollectGarbage(OLD_SPACE);
+ heap::SimulateFullSpace(old_space);
+ heap->CollectGarbage(OLD_SPACE);
// If we get this far, we've successfully aborted compaction. Any further
// allocations might trigger OOM.
}
@@ -5618,7 +5655,7 @@ TEST(Regress507979) {
UNINITIALIZED_TEST(PromotionQueue) {
i::FLAG_expose_gc = true;
- i::FLAG_max_semi_space_size = 2 * (Page::kPageSize / MB);
+ i::FLAG_max_semi_space_size = 2 * Page::kPageSize / MB;
i::FLAG_min_semi_space_size = i::FLAG_max_semi_space_size;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -5671,7 +5708,7 @@ UNINITIALIZED_TEST(PromotionQueue) {
CHECK(i::FLAG_min_semi_space_size * MB == new_space->TotalCapacity());
// Fill-up the first semi-space page.
- FillUpOnePage(new_space);
+ heap::FillUpOnePage(new_space);
// Create a small object to initialize the bump pointer on the second
// semi-space page.
@@ -5680,7 +5717,7 @@ UNINITIALIZED_TEST(PromotionQueue) {
CHECK(heap->InNewSpace(*small));
// Fill-up the second semi-space page.
- FillUpOnePage(new_space);
+ heap::FillUpOnePage(new_space);
// This scavenge will corrupt memory if the promotion queue is not
// evacuated.
@@ -5710,9 +5747,9 @@ TEST(Regress388880) {
// Allocate padding objects in old pointer space so, that object allocated
// afterwards would end at the end of the page.
- SimulateFullSpace(heap->old_space());
+ heap::SimulateFullSpace(heap->old_space());
int padding_size = desired_offset - Page::kObjectStartOffset;
- CreatePadding(heap, padding_size, TENURED);
+ heap::CreatePadding(heap, padding_size, TENURED);
Handle<JSObject> o = factory->NewJSObjectFromMap(map1, TENURED);
o->set_properties(*factory->empty_fixed_array());
@@ -5760,7 +5797,7 @@ TEST(Regress3631) {
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(result));
Handle<JSWeakCollection> weak_map(reinterpret_cast<JSWeakCollection*>(*obj));
while (!Marking::IsBlack(
- Marking::MarkBitFrom(HeapObject::cast(weak_map->table()))) &&
+ ObjectMarking::MarkBitFrom(HeapObject::cast(weak_map->table()))) &&
!marking->IsStopped()) {
marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
}
@@ -5859,11 +5896,11 @@ void CheckMapRetainingFor(int n) {
Handle<WeakCell> weak_cell = AddRetainedMap(isolate, heap);
CHECK(!weak_cell->cleared());
for (int i = 0; i < n; i++) {
- SimulateIncrementalMarking(heap);
+ heap::SimulateIncrementalMarking(heap);
heap->CollectGarbage(OLD_SPACE);
}
CHECK(!weak_cell->cleared());
- SimulateIncrementalMarking(heap);
+ heap::SimulateIncrementalMarking(heap);
heap->CollectGarbage(OLD_SPACE);
CHECK(weak_cell->cleared());
}
@@ -5892,7 +5929,7 @@ TEST(RegressArrayListGC) {
heap->CollectGarbage(OLD_SPACE);
// Force GC in old space on next addition of retained map.
Map::WeakCellForMap(map);
- SimulateFullSpace(CcTest::heap()->new_space());
+ heap::SimulateFullSpace(CcTest::heap()->new_space());
for (int i = 0; i < 10; i++) {
heap->AddRetainedMap(map);
}
@@ -6037,7 +6074,7 @@ TEST(BootstrappingExports) {
v8::Isolate* isolate = CcTest::isolate();
LocalContext env;
- if (Snapshot::HaveASnapshotToStartFrom(CcTest::i_isolate())) return;
+ if (Snapshot::HasContextSnapshot(CcTest::i_isolate(), 0)) return;
utils_has_been_collected = false;
@@ -6169,57 +6206,11 @@ TEST(OldSpaceAllocationCounter) {
}
-TEST(NewSpaceAllocationThroughput) {
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
- Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
- GCTracer* tracer = heap->tracer();
- int time1 = 100;
- size_t counter1 = 1000;
- tracer->SampleAllocation(time1, counter1, 0);
- int time2 = 200;
- size_t counter2 = 2000;
- tracer->SampleAllocation(time2, counter2, 0);
- size_t throughput =
- tracer->NewSpaceAllocationThroughputInBytesPerMillisecond();
- CHECK_EQ((counter2 - counter1) / (time2 - time1), throughput);
- int time3 = 1000;
- size_t counter3 = 30000;
- tracer->SampleAllocation(time3, counter3, 0);
- throughput = tracer->NewSpaceAllocationThroughputInBytesPerMillisecond();
- CHECK_EQ((counter3 - counter1) / (time3 - time1), throughput);
-}
-
-
-TEST(NewSpaceAllocationThroughput2) {
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
- Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
- GCTracer* tracer = heap->tracer();
- int time1 = 100;
- size_t counter1 = 1000;
- tracer->SampleAllocation(time1, counter1, 0);
- int time2 = 200;
- size_t counter2 = 2000;
- tracer->SampleAllocation(time2, counter2, 0);
- size_t throughput =
- tracer->NewSpaceAllocationThroughputInBytesPerMillisecond(100);
- CHECK_EQ((counter2 - counter1) / (time2 - time1), throughput);
- int time3 = 1000;
- size_t counter3 = 30000;
- tracer->SampleAllocation(time3, counter3, 0);
- throughput = tracer->NewSpaceAllocationThroughputInBytesPerMillisecond(100);
- CHECK_EQ((counter3 - counter1) / (time3 - time1), throughput);
-}
-
-
static void CheckLeak(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = CcTest::i_isolate();
Object* message =
*reinterpret_cast<Object**>(isolate->pending_message_obj_address());
- CHECK(message->IsTheHole());
+ CHECK(message->IsTheHole(isolate));
}
@@ -6304,54 +6295,29 @@ TEST(CanonicalSharedFunctionInfo) {
"check(g1, g2);");
}
-
-TEST(OldGenerationAllocationThroughput) {
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
- Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
- GCTracer* tracer = heap->tracer();
- int time1 = 100;
- size_t counter1 = 1000;
- tracer->SampleAllocation(time1, 0, counter1);
- int time2 = 200;
- size_t counter2 = 2000;
- tracer->SampleAllocation(time2, 0, counter2);
- size_t throughput = static_cast<size_t>(
- tracer->OldGenerationAllocationThroughputInBytesPerMillisecond(100));
- CHECK_EQ((counter2 - counter1) / (time2 - time1), throughput);
- int time3 = 1000;
- size_t counter3 = 30000;
- tracer->SampleAllocation(time3, 0, counter3);
- throughput = static_cast<size_t>(
- tracer->OldGenerationAllocationThroughputInBytesPerMillisecond(100));
- CHECK_EQ((counter3 - counter1) / (time3 - time1), throughput);
-}
-
-
-TEST(AllocationThroughput) {
+TEST(RemoveCodeFromSharedFunctionInfoButNotFromClosure) {
CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
- Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
- GCTracer* tracer = heap->tracer();
- int time1 = 100;
- size_t counter1 = 1000;
- tracer->SampleAllocation(time1, counter1, counter1);
- int time2 = 200;
- size_t counter2 = 2000;
- tracer->SampleAllocation(time2, counter2, counter2);
- size_t throughput = static_cast<size_t>(
- tracer->AllocationThroughputInBytesPerMillisecond(100));
- CHECK_EQ(2 * (counter2 - counter1) / (time2 - time1), throughput);
- int time3 = 1000;
- size_t counter3 = 30000;
- tracer->SampleAllocation(time3, counter3, counter3);
- throughput = tracer->AllocationThroughputInBytesPerMillisecond(100);
- CHECK_EQ(2 * (counter3 - counter1) / (time3 - time1), throughput);
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate);
+ global->Set(isolate, "check", v8::FunctionTemplate::New(
+ isolate, CheckEqualSharedFunctionInfos));
+ global->Set(isolate, "remove",
+ v8::FunctionTemplate::New(isolate, RemoveCodeAndGC));
+ v8::Local<v8::Context> context = v8::Context::New(isolate, NULL, global);
+ v8::Context::Scope cscope(context);
+ CompileRun(
+ "function f() { return function g() {}; }"
+ "var g1 = f();"
+ "var g2 = f();"
+ "check(g1, g2);"
+ "g1();"
+ "g2();"
+ "remove(g1);"
+ "g2();"
+ "check(g1, g2);");
}
-
TEST(ContextMeasure) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
@@ -6452,7 +6418,7 @@ TEST(Regress519319) {
parent.Reset(isolate, v8::Object::New(isolate));
child.Reset(isolate, v8::Object::New(isolate));
- SimulateFullSpace(heap->old_space());
+ heap::SimulateFullSpace(heap->old_space());
heap->CollectGarbage(OLD_SPACE);
{
UniqueId id = MakeUniqueId(parent);
@@ -6511,7 +6477,7 @@ HEAP_TEST(Regress587004) {
array->set(i, *number);
}
heap->CollectGarbage(OLD_SPACE);
- SimulateFullSpace(heap->old_space());
+ heap::SimulateFullSpace(heap->old_space());
heap->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(*array, N - 1);
heap->mark_compact_collector()->EnsureSweepingCompleted();
ByteArray* byte_array;
@@ -6594,7 +6560,7 @@ HEAP_TEST(Regress589413) {
}
}
}
- SimulateIncrementalMarking(heap);
+ heap::SimulateIncrementalMarking(heap);
for (size_t j = 0; j < arrays.size(); j++) {
heap->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(arrays[j], N - 1);
}
@@ -6604,6 +6570,106 @@ HEAP_TEST(Regress589413) {
heap->CollectGarbage(OLD_SPACE);
}
+TEST(Regress598319) {
+ // This test ensures that no white objects can cross the progress bar of large
+ // objects during incremental marking. It checks this by using Shift() during
+ // incremental marking.
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Heap* heap = CcTest::heap();
+ Isolate* isolate = heap->isolate();
+
+ const int kNumberOfObjects = Page::kMaxRegularHeapObjectSize / kPointerSize;
+
+ struct Arr {
+ Arr(Isolate* isolate, int number_of_objects) {
+ root = isolate->factory()->NewFixedArray(1, TENURED);
+ {
+ // Temporary scope to avoid getting any other objects into the root set.
+ v8::HandleScope scope(CcTest::isolate());
+ Handle<FixedArray> tmp =
+ isolate->factory()->NewFixedArray(number_of_objects);
+ root->set(0, *tmp);
+ for (int i = 0; i < get()->length(); i++) {
+ tmp = isolate->factory()->NewFixedArray(100, TENURED);
+ get()->set(i, *tmp);
+ }
+ }
+ }
+
+ FixedArray* get() { return FixedArray::cast(root->get(0)); }
+
+ Handle<FixedArray> root;
+ } arr(isolate, kNumberOfObjects);
+
+ CHECK_EQ(arr.get()->length(), kNumberOfObjects);
+ CHECK(heap->lo_space()->Contains(arr.get()));
+ LargePage* page = heap->lo_space()->FindPage(arr.get()->address());
+ CHECK_NOT_NULL(page);
+
+ // GC to cleanup state
+ heap->CollectGarbage(OLD_SPACE);
+ MarkCompactCollector* collector = heap->mark_compact_collector();
+ if (collector->sweeping_in_progress()) {
+ collector->EnsureSweepingCompleted();
+ }
+
+ CHECK(heap->lo_space()->Contains(arr.get()));
+ CHECK(Marking::IsWhite(ObjectMarking::MarkBitFrom(arr.get())));
+ for (int i = 0; i < arr.get()->length(); i++) {
+ CHECK(Marking::IsWhite(
+ ObjectMarking::MarkBitFrom(HeapObject::cast(arr.get()->get(i)))));
+ }
+
+ // Start incremental marking.
+ IncrementalMarking* marking = heap->incremental_marking();
+ CHECK(marking->IsMarking() || marking->IsStopped());
+ if (marking->IsStopped()) {
+ heap->StartIncrementalMarking();
+ }
+ CHECK(marking->IsMarking());
+
+ // Check that we have not marked the interesting array during root scanning.
+ for (int i = 0; i < arr.get()->length(); i++) {
+ CHECK(Marking::IsWhite(
+ ObjectMarking::MarkBitFrom(HeapObject::cast(arr.get()->get(i)))));
+ }
+
+ // Now we search for a state where we are in incremental marking and have
+ // only partially marked the large object.
+ while (!marking->IsComplete()) {
+ marking->Step(i::KB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+ if (page->IsFlagSet(Page::HAS_PROGRESS_BAR) && page->progress_bar() > 0) {
+ CHECK_NE(page->progress_bar(), arr.get()->Size());
+ {
+ // Shift by 1, effectively moving one white object across the progress
+ // bar, meaning that we will miss marking it.
+ v8::HandleScope scope(CcTest::isolate());
+ Handle<JSArray> js_array = isolate->factory()->NewJSArrayWithElements(
+ Handle<FixedArray>(arr.get()));
+ js_array->GetElementsAccessor()->Shift(js_array);
+ }
+ break;
+ }
+ }
+
+ // Finish marking with bigger steps to speed up test.
+ while (!marking->IsComplete()) {
+ marking->Step(10 * i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+ if (marking->IsReadyToOverApproximateWeakClosure()) {
+ marking->FinalizeIncrementally();
+ }
+ }
+ CHECK(marking->IsComplete());
+
+ // All objects need to be black after marking. If a white object crossed the
+ // progress bar, we would fail here.
+ for (int i = 0; i < arr.get()->length(); i++) {
+ CHECK(Marking::IsBlack(
+ ObjectMarking::MarkBitFrom(HeapObject::cast(arr.get()->get(i)))));
+ }
+}
+
TEST(Regress609761) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
@@ -6617,5 +6683,406 @@ TEST(Regress609761) {
CHECK_EQ(size_after, size_before + array->Size());
}
+TEST(Regress615489) {
+ FLAG_black_allocation = true;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Heap* heap = CcTest::heap();
+ Isolate* isolate = heap->isolate();
+ heap->CollectAllGarbage();
+
+ i::MarkCompactCollector* collector = heap->mark_compact_collector();
+ i::IncrementalMarking* marking = heap->incremental_marking();
+ if (collector->sweeping_in_progress()) {
+ collector->EnsureSweepingCompleted();
+ }
+ CHECK(marking->IsMarking() || marking->IsStopped());
+ if (marking->IsStopped()) {
+ heap->StartIncrementalMarking();
+ }
+ CHECK(marking->IsMarking());
+ marking->StartBlackAllocationForTesting();
+ {
+ AlwaysAllocateScope always_allocate(CcTest::i_isolate());
+ v8::HandleScope inner(CcTest::isolate());
+ isolate->factory()->NewFixedArray(500, TENURED)->Size();
+ }
+ while (!marking->IsComplete()) {
+ marking->Step(i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+ if (marking->IsReadyToOverApproximateWeakClosure()) {
+ marking->FinalizeIncrementally();
+ }
+ }
+ CHECK(marking->IsComplete());
+ intptr_t size_before = heap->SizeOfObjects();
+ CcTest::heap()->CollectAllGarbage();
+ intptr_t size_after = heap->SizeOfObjects();
+ // Live size does not increase after garbage collection.
+ CHECK_LE(size_after, size_before);
+}
+
+class StaticOneByteResource : public v8::String::ExternalOneByteStringResource {
+ public:
+ explicit StaticOneByteResource(const char* data) : data_(data) {}
+
+ ~StaticOneByteResource() {}
+
+ const char* data() const { return data_; }
+
+ size_t length() const { return strlen(data_); }
+
+ private:
+ const char* data_;
+};
+
+TEST(Regress631969) {
+ FLAG_manual_evacuation_candidates_selection = true;
+ FLAG_parallel_compaction = false;
+ FLAG_concurrent_sweeping = false;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Heap* heap = CcTest::heap();
+ // Get the heap in clean state.
+ heap->CollectGarbage(OLD_SPACE);
+ heap->CollectGarbage(OLD_SPACE);
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ // Allocate two strings in a fresh page and mark the page as evacuation
+ // candidate.
+ heap::SimulateFullSpace(heap->old_space());
+ Handle<String> s1 = factory->NewStringFromStaticChars("123456789", TENURED);
+ Handle<String> s2 = factory->NewStringFromStaticChars("01234", TENURED);
+ Page::FromAddress(s1->address())
+ ->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
+
+ heap::SimulateIncrementalMarking(heap, false);
+
+ // Allocate a cons string and promote it to a fresh page in the old space.
+ heap::SimulateFullSpace(heap->old_space());
+ Handle<String> s3;
+ factory->NewConsString(s1, s2).ToHandle(&s3);
+ heap->CollectGarbage(NEW_SPACE);
+ heap->CollectGarbage(NEW_SPACE);
+
+ // Finish incremental marking.
+ IncrementalMarking* marking = heap->incremental_marking();
+ while (!marking->IsComplete()) {
+ marking->Step(MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+ if (marking->IsReadyToOverApproximateWeakClosure()) {
+ marking->FinalizeIncrementally();
+ }
+ }
+
+ {
+ StaticOneByteResource external_string("12345678901234");
+ s3->MakeExternal(&external_string);
+ heap->CollectGarbage(OLD_SPACE);
+ }
+}
+
+TEST(LeftTrimFixedArrayInBlackArea) {
+ FLAG_black_allocation = true;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Heap* heap = CcTest::heap();
+ Isolate* isolate = heap->isolate();
+ heap->CollectAllGarbage();
+
+ i::MarkCompactCollector* collector = heap->mark_compact_collector();
+ i::IncrementalMarking* marking = heap->incremental_marking();
+ if (collector->sweeping_in_progress()) {
+ collector->EnsureSweepingCompleted();
+ }
+ CHECK(marking->IsMarking() || marking->IsStopped());
+ if (marking->IsStopped()) {
+ heap->StartIncrementalMarking();
+ }
+ CHECK(marking->IsMarking());
+ marking->StartBlackAllocationForTesting();
+
+ // Ensure that we allocate a new page, set up a bump pointer area, and
+ // perform the allocation in a black area.
+ heap::SimulateFullSpace(heap->old_space());
+ isolate->factory()->NewFixedArray(4, TENURED);
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(50, TENURED);
+ CHECK(heap->old_space()->Contains(*array));
+ CHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(*array)));
+
+ // Now left trim the allocated black area. A filler has to be installed
+ // for the trimmed area and all mark bits of the trimmed area have to be
+ // cleared.
+ FixedArrayBase* trimmed = heap->LeftTrimFixedArray(*array, 10);
+ CHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(trimmed)));
+
+ heap::GcAndSweep(heap, OLD_SPACE);
+}
+
+TEST(ContinuousLeftTrimFixedArrayInBlackArea) {
+ FLAG_black_allocation = true;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Heap* heap = CcTest::heap();
+ Isolate* isolate = heap->isolate();
+ heap->CollectAllGarbage();
+
+ i::MarkCompactCollector* collector = heap->mark_compact_collector();
+ i::IncrementalMarking* marking = heap->incremental_marking();
+ if (collector->sweeping_in_progress()) {
+ collector->EnsureSweepingCompleted();
+ }
+ CHECK(marking->IsMarking() || marking->IsStopped());
+ if (marking->IsStopped()) {
+ heap->StartIncrementalMarking();
+ }
+ CHECK(marking->IsMarking());
+ marking->StartBlackAllocationForTesting();
+
+ // Ensure that we allocate a new page, set up a bump pointer area, and
+ // perform the allocation in a black area.
+ heap::SimulateFullSpace(heap->old_space());
+ isolate->factory()->NewFixedArray(10, TENURED);
+
+ // Allocate the fixed array that will be trimmed later.
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(100, TENURED);
+ Address start_address = array->address();
+ Address end_address = start_address + array->Size();
+ Page* page = Page::FromAddress(start_address);
+ CHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(*array)));
+ CHECK(page->markbits()->AllBitsSetInRange(
+ page->AddressToMarkbitIndex(start_address),
+ page->AddressToMarkbitIndex(end_address)));
+ CHECK(heap->old_space()->Contains(*array));
+
+ FixedArrayBase* previous = *array;
+ FixedArrayBase* trimmed;
+
+ // First trim in one word steps.
+ for (int i = 0; i < 10; i++) {
+ trimmed = heap->LeftTrimFixedArray(previous, 1);
+ HeapObject* filler = HeapObject::FromAddress(previous->address());
+ CHECK(filler->IsFiller());
+ CHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(trimmed)));
+ CHECK(Marking::IsImpossible(ObjectMarking::MarkBitFrom(previous)));
+ previous = trimmed;
+ }
+
+ // Then trim in two and three word steps.
+ for (int i = 2; i <= 3; i++) {
+ for (int j = 0; j < 10; j++) {
+ trimmed = heap->LeftTrimFixedArray(previous, i);
+ HeapObject* filler = HeapObject::FromAddress(previous->address());
+ CHECK(filler->IsFiller());
+ CHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(trimmed)));
+ CHECK(Marking::IsWhite(ObjectMarking::MarkBitFrom(previous)));
+ previous = trimmed;
+ }
+ }
+
+ heap::GcAndSweep(heap, OLD_SPACE);
+}
+
+TEST(ContinuousRightTrimFixedArrayInBlackArea) {
+ FLAG_black_allocation = true;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Heap* heap = CcTest::heap();
+ Isolate* isolate = heap->isolate();
+ heap->CollectAllGarbage();
+
+ i::MarkCompactCollector* collector = heap->mark_compact_collector();
+ i::IncrementalMarking* marking = heap->incremental_marking();
+ if (collector->sweeping_in_progress()) {
+ collector->EnsureSweepingCompleted();
+ }
+ CHECK(marking->IsMarking() || marking->IsStopped());
+ if (marking->IsStopped()) {
+ heap->StartIncrementalMarking();
+ }
+ CHECK(marking->IsMarking());
+ marking->StartBlackAllocationForTesting();
+
+ // Ensure that we allocate a new page, set up a bump pointer area, and
+ // perform the allocation in a black area.
+ heap::SimulateFullSpace(heap->old_space());
+ isolate->factory()->NewFixedArray(10, TENURED);
+
+ // Allocate the fixed array that will be trimmed later.
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(100, TENURED);
+ Address start_address = array->address();
+ Address end_address = start_address + array->Size();
+ Page* page = Page::FromAddress(start_address);
+ CHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(*array)));
+ CHECK(page->markbits()->AllBitsSetInRange(
+ page->AddressToMarkbitIndex(start_address),
+ page->AddressToMarkbitIndex(end_address)));
+ CHECK(heap->old_space()->Contains(*array));
+
+ // Trim it once by one word to make checking for white marking color uniform.
+ Address previous = end_address - kPointerSize;
+ heap->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(*array, 1);
+ HeapObject* filler = HeapObject::FromAddress(previous);
+ CHECK(filler->IsFiller());
+ CHECK(Marking::IsImpossible(ObjectMarking::MarkBitFrom(previous)));
+
+ // Trim 10 times by one, two, and three word.
+ for (int i = 1; i <= 3; i++) {
+ for (int j = 0; j < 10; j++) {
+ previous -= kPointerSize * i;
+ heap->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(*array, i);
+ HeapObject* filler = HeapObject::FromAddress(previous);
+ CHECK(filler->IsFiller());
+ CHECK(Marking::IsWhite(ObjectMarking::MarkBitFrom(previous)));
+ }
+ }
+
+ heap::GcAndSweep(heap, OLD_SPACE);
+}
+
+TEST(SlotFilteringAfterBlackAreas) {
+ FLAG_black_allocation = true;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Heap* heap = CcTest::heap();
+ Isolate* isolate = heap->isolate();
+ MarkCompactCollector* mark_compact_collector = heap->mark_compact_collector();
+ heap->CollectAllGarbage();
+
+ i::MarkCompactCollector* collector = heap->mark_compact_collector();
+ i::IncrementalMarking* marking = heap->incremental_marking();
+ if (collector->sweeping_in_progress()) {
+ collector->EnsureSweepingCompleted();
+ }
+ CHECK(marking->IsMarking() || marking->IsStopped());
+ if (marking->IsStopped()) {
+ heap->StartIncrementalMarking();
+ }
+ CHECK(marking->IsMarking());
+ marking->StartBlackAllocationForTesting();
+
+ // Ensure that we allocate a new page, set up a bump pointer area, and
+ // perform the allocation in a black area.
+ heap::SimulateFullSpace(heap->old_space());
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(10, TENURED);
+ Page* page = Page::FromAddress(array->address());
+
+ // After allocation we empty the allocation info to limit the black area
+ // only on the allocated array.
+ heap->old_space()->EmptyAllocationInfo();
+
+ // Slots in the black area are part of the black object.
+ CHECK(mark_compact_collector->IsSlotInBlackObject(page, array->address()));
+ CHECK(mark_compact_collector->IsSlotInBlackObject(
+ page, array->address() + array->Size() - kPointerSize));
+
+ // Slots after the black area are not part of the black object and have to
+ // be filtered out.
+ CHECK(!mark_compact_collector->IsSlotInBlackObject(
+ page, array->address() + array->Size()));
+}
+
+TEST(Regress618958) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Heap* heap = CcTest::heap();
+ bool isolate_is_locked = true;
+ heap->update_external_memory(100 * MB);
+ int mark_sweep_count_before = heap->ms_count();
+ heap->MemoryPressureNotification(MemoryPressureLevel::kCritical,
+ isolate_is_locked);
+ int mark_sweep_count_after = heap->ms_count();
+ int mark_sweeps_performed = mark_sweep_count_after - mark_sweep_count_before;
+ // The memory pressuer handler either performed two GCs or performed one and
+ // started incremental marking.
+ CHECK(mark_sweeps_performed == 2 ||
+ (mark_sweeps_performed == 1 &&
+ !heap->incremental_marking()->IsStopped()));
+}
+
+TEST(UncommitUnusedLargeObjectMemory) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Heap* heap = CcTest::heap();
+ Isolate* isolate = heap->isolate();
+
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(200000);
+ MemoryChunk* chunk = MemoryChunk::FromAddress(array->address());
+ CHECK(chunk->owner()->identity() == LO_SPACE);
+
+ intptr_t size_before = array->Size();
+ size_t committed_memory_before = chunk->CommittedPhysicalMemory();
+
+ array->Shrink(1);
+ CHECK(array->Size() < size_before);
+
+ CcTest::heap()->CollectAllGarbage();
+ CHECK(chunk->CommittedPhysicalMemory() < committed_memory_before);
+ size_t shrinked_size =
+ RoundUp((array->address() - chunk->address()) + array->Size(),
+ base::OS::CommitPageSize());
+ CHECK_EQ(shrinked_size, chunk->CommittedPhysicalMemory());
+}
+
+TEST(RememberedSetRemoveRange) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Heap* heap = CcTest::heap();
+ Isolate* isolate = heap->isolate();
+
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(Page::kPageSize /
+ kPointerSize);
+ MemoryChunk* chunk = MemoryChunk::FromAddress(array->address());
+ CHECK(chunk->owner()->identity() == LO_SPACE);
+ Address start = array->address();
+ // Maps slot to boolean indicator of whether the slot should be in the set.
+ std::map<Address, bool> slots;
+ slots[start + 0] = true;
+ slots[start + kPointerSize] = true;
+ slots[start + Page::kPageSize - kPointerSize] = true;
+ slots[start + Page::kPageSize] = true;
+ slots[start + Page::kPageSize + kPointerSize] = true;
+ slots[chunk->area_end() - kPointerSize] = true;
+
+ for (auto x : slots) {
+ RememberedSet<OLD_TO_NEW>::Insert(chunk, x.first);
+ }
+
+ RememberedSet<OLD_TO_NEW>::Iterate(chunk, [&slots](Address addr) {
+ CHECK(slots[addr]);
+ return KEEP_SLOT;
+ });
+
+ RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, start, start + kPointerSize);
+ slots[start] = false;
+ RememberedSet<OLD_TO_NEW>::Iterate(chunk, [&slots](Address addr) {
+ CHECK(slots[addr]);
+ return KEEP_SLOT;
+ });
+
+ RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, start + kPointerSize,
+ start + Page::kPageSize);
+ slots[start + kPointerSize] = false;
+ slots[start + Page::kPageSize - kPointerSize] = false;
+ RememberedSet<OLD_TO_NEW>::Iterate(chunk, [&slots](Address addr) {
+ CHECK(slots[addr]);
+ return KEEP_SLOT;
+ });
+
+ RememberedSet<OLD_TO_NEW>::RemoveRange(
+ chunk, start, start + Page::kPageSize + kPointerSize);
+ slots[start + Page::kPageSize] = false;
+ RememberedSet<OLD_TO_NEW>::Iterate(chunk, [&slots](Address addr) {
+ CHECK(slots[addr]);
+ return KEEP_SLOT;
+ });
+
+ RememberedSet<OLD_TO_NEW>::RemoveRange(
+ chunk, chunk->area_end() - kPointerSize, chunk->area_end());
+ slots[chunk->area_end() - kPointerSize] = false;
+ RememberedSet<OLD_TO_NEW>::Iterate(chunk, [&slots](Address addr) {
+ CHECK(slots[addr]);
+ return KEEP_SLOT;
+ });
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/heap/test-incremental-marking.cc b/deps/v8/test/cctest/heap/test-incremental-marking.cc
index 765ae53afb..59697a94a1 100644
--- a/deps/v8/test/cctest/heap/test-incremental-marking.cc
+++ b/deps/v8/test/cctest/heap/test-incremental-marking.cc
@@ -19,8 +19,7 @@
#include "src/full-codegen/full-codegen.h"
#include "src/global-handles.h"
#include "test/cctest/cctest.h"
-#include "test/cctest/heap/utils-inl.h"
-
+#include "test/cctest/heap/heap-utils.h"
using v8::IdleTask;
using v8::Task;
@@ -86,7 +85,7 @@ class MockPlatform : public v8::Platform {
}
uint64_t AddTraceEvent(char phase, const uint8_t* categoryEnabledFlag,
- const char* name, uint64_t id,
+ const char* name, const char* scope, uint64_t id,
uint64_t bind_id, int numArgs, const char** argNames,
const uint8_t* argTypes, const uint64_t* argValues,
unsigned int flags) override {
@@ -120,7 +119,7 @@ TEST(IncrementalMarkingUsingIdleTasks) {
v8::Platform* old_platform = i::V8::GetCurrentPlatform();
MockPlatform platform(old_platform);
i::V8::SetPlatformForTesting(&platform);
- SimulateFullSpace(CcTest::heap()->old_space());
+ i::heap::SimulateFullSpace(CcTest::heap()->old_space());
i::IncrementalMarking* marking = CcTest::heap()->incremental_marking();
marking->Stop();
marking->Start();
@@ -145,7 +144,7 @@ TEST(IncrementalMarkingUsingIdleTasksAfterGC) {
v8::Platform* old_platform = i::V8::GetCurrentPlatform();
MockPlatform platform(old_platform);
i::V8::SetPlatformForTesting(&platform);
- SimulateFullSpace(CcTest::heap()->old_space());
+ i::heap::SimulateFullSpace(CcTest::heap()->old_space());
CcTest::heap()->CollectAllGarbage();
i::IncrementalMarking* marking = CcTest::heap()->incremental_marking();
marking->Stop();
@@ -171,7 +170,7 @@ TEST(IncrementalMarkingUsingDelayedTasks) {
v8::Platform* old_platform = i::V8::GetCurrentPlatform();
MockPlatform platform(old_platform);
i::V8::SetPlatformForTesting(&platform);
- SimulateFullSpace(CcTest::heap()->old_space());
+ i::heap::SimulateFullSpace(CcTest::heap()->old_space());
i::IncrementalMarking* marking = CcTest::heap()->incremental_marking();
marking->Stop();
marking->Start();
diff --git a/deps/v8/test/cctest/heap/test-lab.cc b/deps/v8/test/cctest/heap/test-lab.cc
index bf4d3cc999..5a0ff2fbc4 100644
--- a/deps/v8/test/cctest/heap/test-lab.cc
+++ b/deps/v8/test/cctest/heap/test-lab.cc
@@ -170,7 +170,7 @@ TEST(MergeSuccessful) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
const int kLabSize = 2 * KB;
- Address base1 = AllocateLabBackingStore(heap, kLabSize);
+ Address base1 = AllocateLabBackingStore(heap, 2 * kLabSize);
Address limit1 = base1 + kLabSize;
Address base2 = limit1;
Address limit2 = base2 + kLabSize;
@@ -226,7 +226,7 @@ TEST(MergeFailed) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
const int kLabSize = 2 * KB;
- Address base1 = AllocateLabBackingStore(heap, kLabSize);
+ Address base1 = AllocateLabBackingStore(heap, 3 * kLabSize);
Address base2 = base1 + kLabSize;
Address base3 = base2 + kLabSize;
diff --git a/deps/v8/test/cctest/heap/test-mark-compact.cc b/deps/v8/test/cctest/heap/test-mark-compact.cc
index cfcf149c61..1e5d30d0e7 100644
--- a/deps/v8/test/cctest/heap/test-mark-compact.cc
+++ b/deps/v8/test/cctest/heap/test-mark-compact.cc
@@ -43,8 +43,7 @@
#include "src/global-handles.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
-#include "test/cctest/heap/utils-inl.h"
-
+#include "test/cctest/heap/heap-utils.h"
using namespace v8::internal;
using v8::Just;
@@ -76,58 +75,49 @@ TEST(MarkingDeque) {
DeleteArray(mem);
}
-
-HEAP_TEST(Promotion) {
+TEST(Promotion) {
CcTest::InitializeVM();
- Heap* heap = CcTest::heap();
- heap->ConfigureHeap(1, 1, 1, 0);
-
- v8::HandleScope sc(CcTest::isolate());
-
- // Allocate a fixed array in the new space.
- int array_length =
- (Page::kMaxRegularHeapObjectSize - FixedArray::kHeaderSize) /
- (4 * kPointerSize);
- Object* obj = heap->AllocateFixedArray(array_length).ToObjectChecked();
- Handle<FixedArray> array(FixedArray::cast(obj));
+ Isolate* isolate = CcTest::i_isolate();
+ {
+ v8::HandleScope sc(CcTest::isolate());
+ Heap* heap = isolate->heap();
- // Array should be in the new space.
- CHECK(heap->InSpace(*array, NEW_SPACE));
+ heap::SealCurrentObjects(heap);
- // Call mark compact GC, so array becomes an old object.
- heap->CollectAllGarbage();
- heap->CollectAllGarbage();
+ int array_length =
+ heap::FixedArrayLenFromSize(Page::kMaxRegularHeapObjectSize);
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(array_length);
- // Array now sits in the old space
- CHECK(heap->InSpace(*array, OLD_SPACE));
+ // Array should be in the new space.
+ CHECK(heap->InSpace(*array, NEW_SPACE));
+ heap->CollectAllGarbage();
+ heap->CollectAllGarbage();
+ CHECK(heap->InSpace(*array, OLD_SPACE));
+ }
}
-
HEAP_TEST(NoPromotion) {
CcTest::InitializeVM();
- Heap* heap = CcTest::heap();
- heap->ConfigureHeap(1, 1, 1, 0);
-
- v8::HandleScope sc(CcTest::isolate());
-
- // Allocate a big fixed array in the new space.
- int array_length =
- (Page::kMaxRegularHeapObjectSize - FixedArray::kHeaderSize) /
- (2 * kPointerSize);
- Object* obj = heap->AllocateFixedArray(array_length).ToObjectChecked();
- Handle<FixedArray> array(FixedArray::cast(obj));
+ Isolate* isolate = CcTest::i_isolate();
+ {
+ v8::HandleScope sc(CcTest::isolate());
+ Heap* heap = isolate->heap();
- // Array should be in the new space.
- CHECK(heap->InSpace(*array, NEW_SPACE));
+ heap::SealCurrentObjects(heap);
- // Simulate a full old space to make promotion fail.
- SimulateFullSpace(heap->old_space());
+ int array_length =
+ heap::FixedArrayLenFromSize(Page::kMaxRegularHeapObjectSize);
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(array_length);
- // Call mark compact GC, and it should pass.
- heap->CollectGarbage(OLD_SPACE);
+ heap->set_force_oom(true);
+ // Array should be in the new space.
+ CHECK(heap->InSpace(*array, NEW_SPACE));
+ heap->CollectAllGarbage();
+ heap->CollectAllGarbage();
+ CHECK(heap->InSpace(*array, NEW_SPACE));
+ }
}
-
HEAP_TEST(MarkCompactCollector) {
FLAG_incremental_marking = false;
FLAG_retain_maps_for_n_gc = 0;
@@ -236,8 +226,7 @@ TEST(MapCompact) {
static int NumberOfWeakCalls = 0;
-static void WeakPointerCallback(
- const v8::WeakCallbackData<v8::Value, void>& data) {
+static void WeakPointerCallback(const v8::WeakCallbackInfo<void>& data) {
std::pair<v8::Persistent<v8::Value>*, int>* p =
reinterpret_cast<std::pair<v8::Persistent<v8::Value>*, int>*>(
data.GetParameter());
@@ -262,17 +251,17 @@ HEAP_TEST(ObjectGroups) {
Handle<Object> g1c1 =
global_handles->Create(heap->AllocateFixedArray(1).ToObjectChecked());
std::pair<Handle<Object>*, int> g1s1_and_id(&g1s1, 1234);
- GlobalHandles::MakeWeak(g1s1.location(),
- reinterpret_cast<void*>(&g1s1_and_id),
- &WeakPointerCallback);
+ GlobalHandles::MakeWeak(
+ g1s1.location(), reinterpret_cast<void*>(&g1s1_and_id),
+ &WeakPointerCallback, v8::WeakCallbackType::kParameter);
std::pair<Handle<Object>*, int> g1s2_and_id(&g1s2, 1234);
- GlobalHandles::MakeWeak(g1s2.location(),
- reinterpret_cast<void*>(&g1s2_and_id),
- &WeakPointerCallback);
+ GlobalHandles::MakeWeak(
+ g1s2.location(), reinterpret_cast<void*>(&g1s2_and_id),
+ &WeakPointerCallback, v8::WeakCallbackType::kParameter);
std::pair<Handle<Object>*, int> g1c1_and_id(&g1c1, 1234);
- GlobalHandles::MakeWeak(g1c1.location(),
- reinterpret_cast<void*>(&g1c1_and_id),
- &WeakPointerCallback);
+ GlobalHandles::MakeWeak(
+ g1c1.location(), reinterpret_cast<void*>(&g1c1_and_id),
+ &WeakPointerCallback, v8::WeakCallbackType::kParameter);
Handle<Object> g2s1 =
global_handles->Create(heap->AllocateFixedArray(1).ToObjectChecked());
@@ -281,17 +270,17 @@ HEAP_TEST(ObjectGroups) {
Handle<Object> g2c1 =
global_handles->Create(heap->AllocateFixedArray(1).ToObjectChecked());
std::pair<Handle<Object>*, int> g2s1_and_id(&g2s1, 1234);
- GlobalHandles::MakeWeak(g2s1.location(),
- reinterpret_cast<void*>(&g2s1_and_id),
- &WeakPointerCallback);
+ GlobalHandles::MakeWeak(
+ g2s1.location(), reinterpret_cast<void*>(&g2s1_and_id),
+ &WeakPointerCallback, v8::WeakCallbackType::kParameter);
std::pair<Handle<Object>*, int> g2s2_and_id(&g2s2, 1234);
- GlobalHandles::MakeWeak(g2s2.location(),
- reinterpret_cast<void*>(&g2s2_and_id),
- &WeakPointerCallback);
+ GlobalHandles::MakeWeak(
+ g2s2.location(), reinterpret_cast<void*>(&g2s2_and_id),
+ &WeakPointerCallback, v8::WeakCallbackType::kParameter);
std::pair<Handle<Object>*, int> g2c1_and_id(&g2c1, 1234);
- GlobalHandles::MakeWeak(g2c1.location(),
- reinterpret_cast<void*>(&g2c1_and_id),
- &WeakPointerCallback);
+ GlobalHandles::MakeWeak(
+ g2c1.location(), reinterpret_cast<void*>(&g2c1_and_id),
+ &WeakPointerCallback, v8::WeakCallbackType::kParameter);
Handle<Object> root = global_handles->Create(*g1s1); // make a root.
@@ -317,9 +306,9 @@ HEAP_TEST(ObjectGroups) {
// Weaken the root.
std::pair<Handle<Object>*, int> root_and_id(&root, 1234);
- GlobalHandles::MakeWeak(root.location(),
- reinterpret_cast<void*>(&root_and_id),
- &WeakPointerCallback);
+ GlobalHandles::MakeWeak(
+ root.location(), reinterpret_cast<void*>(&root_and_id),
+ &WeakPointerCallback, v8::WeakCallbackType::kParameter);
// But make children strong roots---all the objects (except for children)
// should be collectable now.
global_handles->ClearWeakness(g1c1.location());
@@ -343,12 +332,12 @@ HEAP_TEST(ObjectGroups) {
CHECK_EQ(5, NumberOfWeakCalls);
// And now make children weak again and collect them.
- GlobalHandles::MakeWeak(g1c1.location(),
- reinterpret_cast<void*>(&g1c1_and_id),
- &WeakPointerCallback);
- GlobalHandles::MakeWeak(g2c1.location(),
- reinterpret_cast<void*>(&g2c1_and_id),
- &WeakPointerCallback);
+ GlobalHandles::MakeWeak(
+ g1c1.location(), reinterpret_cast<void*>(&g1c1_and_id),
+ &WeakPointerCallback, v8::WeakCallbackType::kParameter);
+ GlobalHandles::MakeWeak(
+ g2c1.location(), reinterpret_cast<void*>(&g2c1_and_id),
+ &WeakPointerCallback, v8::WeakCallbackType::kParameter);
heap->CollectGarbage(OLD_SPACE);
CHECK_EQ(7, NumberOfWeakCalls);
diff --git a/deps/v8/test/cctest/heap/test-page-promotion.cc b/deps/v8/test/cctest/heap/test-page-promotion.cc
new file mode 100644
index 0000000000..4ec2e2a416
--- /dev/null
+++ b/deps/v8/test/cctest/heap/test-page-promotion.cc
@@ -0,0 +1,129 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/array-buffer-tracker.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/heap/heap-utils.h"
+
+namespace {
+
+v8::Isolate* NewIsolateForPagePromotion() {
+ i::FLAG_page_promotion = true;
+ i::FLAG_page_promotion_threshold = 0; // %
+ i::FLAG_min_semi_space_size = 8 * (i::Page::kPageSize / i::MB);
+ // We cannot optimize for size as we require a new space with more than one
+ // page.
+ i::FLAG_optimize_for_size = false;
+ // Set max_semi_space_size because it could've been initialized by an
+ // implication of optimize_for_size.
+ i::FLAG_max_semi_space_size = i::FLAG_min_semi_space_size;
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ return isolate;
+}
+
+} // namespace
+
+namespace v8 {
+namespace internal {
+
+UNINITIALIZED_TEST(PagePromotion_NewToOld) {
+ v8::Isolate* isolate = NewIsolateForPagePromotion();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ {
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ v8::Context::New(isolate)->Enter();
+ Heap* heap = i_isolate->heap();
+
+ std::vector<Handle<FixedArray>> handles;
+ heap::SimulateFullSpace(heap->new_space(), &handles);
+ heap->CollectGarbage(NEW_SPACE);
+ CHECK_GT(handles.size(), 0u);
+ // First object in handle should be on the first page.
+ Handle<FixedArray> first_object = handles.front();
+ Page* first_page = Page::FromAddress(first_object->address());
+ // To perform a sanity check on live bytes we need to mark the heap.
+ heap::SimulateIncrementalMarking(heap, true);
+ // Sanity check that the page meets the requirements for promotion.
+ const int threshold_bytes =
+ FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
+ CHECK_GE(first_page->LiveBytes(), threshold_bytes);
+
+ // Actual checks: The page is in new space first, but is moved to old space
+ // during a full GC.
+ CHECK(heap->new_space()->ContainsSlow(first_page->address()));
+ CHECK(!heap->old_space()->ContainsSlow(first_page->address()));
+ heap::GcAndSweep(heap, OLD_SPACE);
+ CHECK(!heap->new_space()->ContainsSlow(first_page->address()));
+ CHECK(heap->old_space()->ContainsSlow(first_page->address()));
+ }
+}
+
+UNINITIALIZED_TEST(PagePromotion_NewToNew) {
+ v8::Isolate* isolate = NewIsolateForPagePromotion();
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+ {
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ v8::Context::New(isolate)->Enter();
+ Heap* heap = i_isolate->heap();
+
+ std::vector<Handle<FixedArray>> handles;
+ heap::SimulateFullSpace(heap->new_space(), &handles);
+ CHECK_GT(handles.size(), 0u);
+ // Last object in handles should definitely be on the last page which does
+ // not contain the age mark.
+ Handle<FixedArray> last_object = handles.back();
+ Page* to_be_promoted_page = Page::FromAddress(last_object->address());
+ CHECK(to_be_promoted_page->Contains(last_object->address()));
+ CHECK(heap->new_space()->ToSpaceContainsSlow(last_object->address()));
+ heap::GcAndSweep(heap, OLD_SPACE);
+ CHECK(heap->new_space()->ToSpaceContainsSlow(last_object->address()));
+ CHECK(to_be_promoted_page->Contains(last_object->address()));
+ }
+}
+
+UNINITIALIZED_TEST(PagePromotion_NewToNewJSArrayBuffer) {
+ // Test makes sure JSArrayBuffer backing stores are still tracked after
+ // new-to-new promotion.
+ v8::Isolate* isolate = NewIsolateForPagePromotion();
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+ {
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ v8::Context::New(isolate)->Enter();
+ Heap* heap = i_isolate->heap();
+
+ // Fill the current page which potentially contains the age mark.
+ heap::FillCurrentPage(heap->new_space());
+
+ // Allocate a buffer we would like to check against.
+ Handle<JSArrayBuffer> buffer =
+ i_isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared);
+ JSArrayBuffer::SetupAllocatingData(buffer, i_isolate, 100);
+ std::vector<Handle<FixedArray>> handles;
+ // Simulate a full space, filling the interesting page with live objects.
+ heap::SimulateFullSpace(heap->new_space(), &handles);
+ CHECK_GT(handles.size(), 0u);
+ // Last object in handles should definitely be on the last page which does
+ // not contain the age mark.
+ Handle<FixedArray> first_object = handles.front();
+ Page* to_be_promoted_page = Page::FromAddress(first_object->address());
+ CHECK(to_be_promoted_page->Contains(first_object->address()));
+ CHECK(to_be_promoted_page->Contains(buffer->address()));
+ CHECK(heap->new_space()->ToSpaceContainsSlow(first_object->address()));
+ CHECK(heap->new_space()->ToSpaceContainsSlow(buffer->address()));
+ heap::GcAndSweep(heap, OLD_SPACE);
+ CHECK(heap->new_space()->ToSpaceContainsSlow(first_object->address()));
+ CHECK(heap->new_space()->ToSpaceContainsSlow(buffer->address()));
+ CHECK(to_be_promoted_page->Contains(first_object->address()));
+ CHECK(to_be_promoted_page->Contains(buffer->address()));
+ CHECK(ArrayBufferTracker::IsTracked(*buffer));
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/heap/test-spaces.cc b/deps/v8/test/cctest/heap/test-spaces.cc
index 5eb1549e11..2328518f2a 100644
--- a/deps/v8/test/cctest/heap/test-spaces.cc
+++ b/deps/v8/test/cctest/heap/test-spaces.cc
@@ -32,7 +32,6 @@
#include "src/v8.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
-#include "test/cctest/heap/utils-inl.h"
namespace v8 {
namespace internal {
@@ -107,13 +106,12 @@ TEST(Page) {
class TestMemoryAllocatorScope {
public:
TestMemoryAllocatorScope(Isolate* isolate, MemoryAllocator* allocator)
- : isolate_(isolate),
- old_allocator_(isolate->memory_allocator_) {
- isolate->memory_allocator_ = allocator;
+ : isolate_(isolate), old_allocator_(isolate->heap()->memory_allocator()) {
+ isolate->heap()->memory_allocator_ = allocator;
}
~TestMemoryAllocatorScope() {
- isolate_->memory_allocator_ = old_allocator_;
+ isolate_->heap()->memory_allocator_ = old_allocator_;
}
private:
@@ -129,12 +127,12 @@ class TestCodeRangeScope {
public:
TestCodeRangeScope(Isolate* isolate, CodeRange* code_range)
: isolate_(isolate),
- old_code_range_(isolate->code_range_) {
- isolate->code_range_ = code_range;
+ old_code_range_(isolate->heap()->memory_allocator()->code_range()) {
+ isolate->heap()->memory_allocator()->code_range_ = code_range;
}
~TestCodeRangeScope() {
- isolate_->code_range_ = old_code_range_;
+ isolate_->heap()->memory_allocator()->code_range_ = old_code_range_;
}
private:
@@ -153,50 +151,49 @@ static void VerifyMemoryChunk(Isolate* isolate,
size_t second_commit_area_size,
Executability executable) {
MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
- CHECK(memory_allocator->SetUp(heap->MaxReserved(),
- heap->MaxExecutableSize()));
- TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
- TestCodeRangeScope test_code_range_scope(isolate, code_range);
-
- size_t header_size = (executable == EXECUTABLE)
- ? MemoryAllocator::CodePageGuardStartOffset()
- : MemoryChunk::kObjectStartOffset;
- size_t guard_size = (executable == EXECUTABLE)
- ? MemoryAllocator::CodePageGuardSize()
- : 0;
-
- MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(reserve_area_size,
- commit_area_size,
- executable,
- NULL);
- size_t alignment = code_range != NULL && code_range->valid()
- ? MemoryChunk::kAlignment
- : base::OS::CommitPageSize();
- size_t reserved_size =
- ((executable == EXECUTABLE))
- ? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
- alignment)
- : RoundUp(header_size + reserve_area_size,
- base::OS::CommitPageSize());
- CHECK(memory_chunk->size() == reserved_size);
- CHECK(memory_chunk->area_start() < memory_chunk->address() +
- memory_chunk->size());
- CHECK(memory_chunk->area_end() <= memory_chunk->address() +
- memory_chunk->size());
- CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size);
-
- Address area_start = memory_chunk->area_start();
-
- memory_chunk->CommitArea(second_commit_area_size);
- CHECK(area_start == memory_chunk->area_start());
- CHECK(memory_chunk->area_start() < memory_chunk->address() +
- memory_chunk->size());
- CHECK(memory_chunk->area_end() <= memory_chunk->address() +
- memory_chunk->size());
- CHECK(static_cast<size_t>(memory_chunk->area_size()) ==
- second_commit_area_size);
-
- memory_allocator->Free(memory_chunk);
+ CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize(),
+ 0));
+ {
+ TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
+ TestCodeRangeScope test_code_range_scope(isolate, code_range);
+
+ size_t header_size = (executable == EXECUTABLE)
+ ? MemoryAllocator::CodePageGuardStartOffset()
+ : MemoryChunk::kObjectStartOffset;
+ size_t guard_size =
+ (executable == EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
+
+ MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(
+ reserve_area_size, commit_area_size, executable, NULL);
+ size_t alignment = code_range != NULL && code_range->valid()
+ ? MemoryChunk::kAlignment
+ : base::OS::CommitPageSize();
+ size_t reserved_size =
+ ((executable == EXECUTABLE))
+ ? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
+ alignment)
+ : RoundUp(header_size + reserve_area_size,
+ base::OS::CommitPageSize());
+ CHECK(memory_chunk->size() == reserved_size);
+ CHECK(memory_chunk->area_start() <
+ memory_chunk->address() + memory_chunk->size());
+ CHECK(memory_chunk->area_end() <=
+ memory_chunk->address() + memory_chunk->size());
+ CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size);
+
+ Address area_start = memory_chunk->area_start();
+
+ memory_chunk->CommitArea(second_commit_area_size);
+ CHECK(area_start == memory_chunk->area_start());
+ CHECK(memory_chunk->area_start() <
+ memory_chunk->address() + memory_chunk->size());
+ CHECK(memory_chunk->area_end() <=
+ memory_chunk->address() + memory_chunk->size());
+ CHECK(static_cast<size_t>(memory_chunk->area_size()) ==
+ second_commit_area_size);
+
+ memory_allocator->Free<MemoryAllocator::kFull>(memory_chunk);
+ }
memory_allocator->TearDown();
delete memory_allocator;
}
@@ -205,36 +202,32 @@ static void VerifyMemoryChunk(Isolate* isolate,
TEST(Regress3540) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
- const int pageSize = Page::kPageSize;
MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
- CHECK(
- memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize()));
+ CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize(),
+ 0));
TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
CodeRange* code_range = new CodeRange(isolate);
- const size_t code_range_size = 4 * pageSize;
- if (!code_range->SetUp(
- code_range_size +
- RoundUp(v8::base::OS::CommitPageSize() * kReservedCodeRangePages,
- MemoryChunk::kAlignment) +
- v8::internal::MemoryAllocator::CodePageAreaSize())) {
+ size_t code_range_size =
+ kMinimumCodeRangeSize > 0 ? kMinimumCodeRangeSize : 3 * Page::kPageSize;
+ if (!code_range->SetUp(code_range_size)) {
return;
}
Address address;
size_t size;
- size_t request_size = code_range_size - 2 * pageSize;
+ size_t request_size = code_range_size - Page::kPageSize;
address = code_range->AllocateRawMemory(
request_size, request_size - (2 * MemoryAllocator::CodePageGuardSize()),
&size);
- CHECK(address != NULL);
+ CHECK_NOT_NULL(address);
Address null_address;
size_t null_size;
- request_size = code_range_size - pageSize;
+ request_size = code_range_size - Page::kPageSize;
null_address = code_range->AllocateRawMemory(
request_size, request_size - (2 * MemoryAllocator::CodePageGuardSize()),
&null_size);
- CHECK(null_address == NULL);
+ CHECK_NULL(null_address);
code_range->FreeRawMemory(address, size);
delete code_range;
@@ -283,8 +276,8 @@ TEST(MemoryChunk) {
NOT_EXECUTABLE);
delete code_range;
- // Without CodeRange.
- code_range = NULL;
+ // Without a valid CodeRange, i.e., omitting SetUp.
+ code_range = new CodeRange(isolate);
VerifyMemoryChunk(isolate,
heap,
code_range,
@@ -300,6 +293,7 @@ TEST(MemoryChunk) {
initial_commit_area_size,
second_commit_area_size,
NOT_EXECUTABLE);
+ delete code_range;
}
}
@@ -310,14 +304,14 @@ TEST(MemoryAllocator) {
MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
CHECK(memory_allocator != nullptr);
- CHECK(memory_allocator->SetUp(heap->MaxReserved(),
- heap->MaxExecutableSize()));
+ CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize(),
+ 0));
TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
{
int total_pages = 0;
OldSpace faked_space(heap, OLD_SPACE, NOT_EXECUTABLE);
- Page* first_page = memory_allocator->AllocatePage<Page>(
+ Page* first_page = memory_allocator->AllocatePage(
faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
NOT_EXECUTABLE);
@@ -331,7 +325,7 @@ TEST(MemoryAllocator) {
}
// Again, we should get n or n - 1 pages.
- Page* other = memory_allocator->AllocatePage<Page>(
+ Page* other = memory_allocator->AllocatePage(
faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
NOT_EXECUTABLE);
CHECK(Page::IsValid(other));
@@ -358,8 +352,8 @@ TEST(NewSpace) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
- CHECK(memory_allocator->SetUp(heap->MaxReserved(),
- heap->MaxExecutableSize()));
+ CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize(),
+ 0));
TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
NewSpace new_space(heap);
@@ -385,8 +379,8 @@ TEST(OldSpace) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
- CHECK(memory_allocator->SetUp(heap->MaxReserved(),
- heap->MaxExecutableSize()));
+ CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize(),
+ 0));
TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
OldSpace* s = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
@@ -409,8 +403,8 @@ TEST(CompactionSpace) {
Heap* heap = isolate->heap();
MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
CHECK(memory_allocator != nullptr);
- CHECK(
- memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize()));
+ CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize(),
+ 0));
TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
CompactionSpace* compaction_space =
@@ -491,7 +485,15 @@ TEST(SizeOfFirstPageIsLargeEnough) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
if (!isolate->snapshot_available()) return;
- if (Snapshot::EmbedsScript(isolate)) return;
+ HandleScope scope(isolate);
+ v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
+ // Skip this test on the custom snapshot builder.
+ if (!CcTest::global()
+ ->Get(context, v8_str("assertEquals"))
+ .ToLocalChecked()
+ ->IsUndefined()) {
+ return;
+ }
// If this test fails due to enabling experimental natives that are not part
// of the snapshot, we may need to adjust CalculateFirstPageSizes.
@@ -504,7 +506,6 @@ TEST(SizeOfFirstPageIsLargeEnough) {
}
// Executing the empty script gets by with one page per space.
- HandleScope scope(isolate);
CompileRun("/*empty*/");
for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
// Debug code can be very large, so skip CODE_SPACE if we are generating it.
diff --git a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
index bf43b95402..507875742d 100644
--- a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
+++ b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
@@ -4,6 +4,7 @@
#include "test/cctest/interpreter/bytecode-expectations-printer.h"
+#include <iomanip>
#include <iostream>
#include <vector>
@@ -11,14 +12,15 @@
#include "include/v8.h"
#include "src/base/logging.h"
-#include "src/base/smart-pointers.h"
#include "src/compiler.h"
#include "src/runtime/runtime.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-generator.h"
#include "src/interpreter/bytecodes.h"
+#include "src/interpreter/interpreter-intrinsics.h"
#include "src/interpreter/interpreter.h"
+#include "src/source-position-table.h"
namespace v8 {
namespace internal {
@@ -27,6 +29,7 @@ namespace interpreter {
// static
const char* const BytecodeExpectationsPrinter::kDefaultTopFunctionName =
"__genbckexp_wrapper__";
+const char* const BytecodeExpectationsPrinter::kIndent = " ";
v8::Local<v8::String> BytecodeExpectationsPrinter::V8StringFromUTF8(
const char* data) const {
@@ -95,18 +98,12 @@ void BytecodeExpectationsPrinter::PrintEscapedString(
}
}
-namespace {
-i::Runtime::FunctionId IndexToFunctionId(uint32_t index) {
- return static_cast<i::Runtime::FunctionId>(index);
-}
-} // namespace
-
void BytecodeExpectationsPrinter::PrintBytecodeOperand(
- std::ostream& stream, const BytecodeArrayIterator& bytecode_iter,
+ std::ostream& stream, const BytecodeArrayIterator& bytecode_iterator,
const Bytecode& bytecode, int op_index, int parameter_count) const {
OperandType op_type = Bytecodes::GetOperandType(bytecode, op_index);
OperandSize op_size = Bytecodes::GetOperandSize(
- bytecode, op_index, bytecode_iter.current_operand_scale());
+ bytecode, op_index, bytecode_iterator.current_operand_scale());
const char* size_tag;
switch (op_size) {
@@ -125,7 +122,7 @@ void BytecodeExpectationsPrinter::PrintBytecodeOperand(
}
if (Bytecodes::IsRegisterOperandType(op_type)) {
- Register register_value = bytecode_iter.GetRegisterOperand(op_index);
+ Register register_value = bytecode_iterator.GetRegisterOperand(op_index);
stream << 'R';
if (op_size != OperandSize::kByte) stream << size_tag;
if (register_value.is_new_target()) {
@@ -149,21 +146,27 @@ void BytecodeExpectationsPrinter::PrintBytecodeOperand(
switch (op_type) {
case OperandType::kFlag8:
- stream << bytecode_iter.GetFlagOperand(op_index);
+ stream << bytecode_iterator.GetFlagOperand(op_index);
break;
case OperandType::kIdx:
- stream << bytecode_iter.GetIndexOperand(op_index);
+ stream << bytecode_iterator.GetIndexOperand(op_index);
break;
case OperandType::kImm:
- stream << bytecode_iter.GetImmediateOperand(op_index);
+ stream << bytecode_iterator.GetImmediateOperand(op_index);
break;
case OperandType::kRegCount:
- stream << bytecode_iter.GetRegisterCountOperand(op_index);
+ stream << bytecode_iterator.GetRegisterCountOperand(op_index);
break;
case OperandType::kRuntimeId: {
- uint32_t operand = bytecode_iter.GetRuntimeIdOperand(op_index);
- stream << "Runtime::k"
- << i::Runtime::FunctionForId(IndexToFunctionId(operand))->name;
+ Runtime::FunctionId id =
+ bytecode_iterator.GetRuntimeIdOperand(op_index);
+ stream << "Runtime::k" << i::Runtime::FunctionForId(id)->name;
+ break;
+ }
+ case OperandType::kIntrinsicId: {
+ Runtime::FunctionId id =
+ bytecode_iterator.GetIntrinsicIdOperand(op_index);
+ stream << "Runtime::k" << i::Runtime::FunctionForId(id)->name;
break;
}
default:
@@ -175,10 +178,10 @@ void BytecodeExpectationsPrinter::PrintBytecodeOperand(
}
void BytecodeExpectationsPrinter::PrintBytecode(
- std::ostream& stream, const BytecodeArrayIterator& bytecode_iter,
+ std::ostream& stream, const BytecodeArrayIterator& bytecode_iterator,
int parameter_count) const {
- Bytecode bytecode = bytecode_iter.current_bytecode();
- OperandScale operand_scale = bytecode_iter.current_operand_scale();
+ Bytecode bytecode = bytecode_iterator.current_bytecode();
+ OperandScale operand_scale = bytecode_iterator.current_operand_scale();
if (Bytecodes::OperandScaleRequiresPrefixBytecode(operand_scale)) {
Bytecode prefix = Bytecodes::OperandScaleToPrefixBytecode(operand_scale);
stream << "B(" << Bytecodes::ToString(prefix) << "), ";
@@ -187,11 +190,30 @@ void BytecodeExpectationsPrinter::PrintBytecode(
int operands_count = Bytecodes::NumberOfOperands(bytecode);
for (int op_index = 0; op_index < operands_count; ++op_index) {
stream << ", ";
- PrintBytecodeOperand(stream, bytecode_iter, bytecode, op_index,
+ PrintBytecodeOperand(stream, bytecode_iterator, bytecode, op_index,
parameter_count);
}
}
+void BytecodeExpectationsPrinter::PrintSourcePosition(
+ std::ostream& stream, SourcePositionTableIterator& source_iterator,
+ int bytecode_offset) const {
+ static const size_t kPositionWidth = 4;
+ if (!source_iterator.done() &&
+ source_iterator.code_offset() == bytecode_offset) {
+ stream << "/* " << std::setw(kPositionWidth)
+ << source_iterator.source_position();
+ if (source_iterator.is_statement()) {
+ stream << " S> */ ";
+ } else {
+ stream << " E> */ ";
+ }
+ source_iterator.Advance();
+ } else {
+ stream << " " << std::setw(kPositionWidth) << ' ' << " ";
+ }
+}
+
void BytecodeExpectationsPrinter::PrintV8String(std::ostream& stream,
i::String* string) const {
stream << '"';
@@ -246,10 +268,15 @@ void BytecodeExpectationsPrinter::PrintBytecodeSequence(
std::ostream& stream, i::Handle<i::BytecodeArray> bytecode_array) const {
stream << "bytecode array length: " << bytecode_array->length()
<< "\nbytecodes: [\n";
- BytecodeArrayIterator bytecode_iter(bytecode_array);
- for (; !bytecode_iter.done(); bytecode_iter.Advance()) {
- stream << " ";
- PrintBytecode(stream, bytecode_iter, bytecode_array->parameter_count());
+
+ SourcePositionTableIterator source_iterator(
+ bytecode_array->source_position_table());
+ BytecodeArrayIterator bytecode_iterator(bytecode_array);
+ for (; !bytecode_iterator.done(); bytecode_iterator.Advance()) {
+ stream << kIndent;
+ PrintSourcePosition(stream, source_iterator,
+ bytecode_iterator.current_offset());
+ PrintBytecode(stream, bytecode_iterator, bytecode_array->parameter_count());
stream << ",\n";
}
stream << "]\n";
@@ -261,7 +288,7 @@ void BytecodeExpectationsPrinter::PrintConstantPool(
int num_constants = constant_pool->length();
if (num_constants > 0) {
for (int i = 0; i < num_constants; ++i) {
- stream << " ";
+ stream << kIndent;
PrintConstant(stream, i::FixedArray::get(constant_pool, i, i_isolate()));
stream << ",\n";
}
@@ -275,7 +302,7 @@ void BytecodeExpectationsPrinter::PrintCodeSnippet(
std::stringstream body_stream(body);
std::string body_line;
while (std::getline(body_stream, body_line)) {
- stream << " ";
+ stream << kIndent;
PrintEscapedString(stream, body_line);
stream << '\n';
}
diff --git a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
index 0fcead5716..c64ca90c81 100644
--- a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
+++ b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
@@ -17,6 +17,9 @@ namespace v8 {
class Isolate;
namespace internal {
+
+class SourcePositionTableIterator;
+
namespace interpreter {
class BytecodeArrayIterator;
@@ -65,12 +68,15 @@ class BytecodeExpectationsPrinter final {
void PrintEscapedString(std::ostream& stream, // NOLINT
const std::string& string) const;
void PrintBytecodeOperand(std::ostream& stream, // NOLINT
- const BytecodeArrayIterator& bytecode_iter,
+ const BytecodeArrayIterator& bytecode_iterator,
const Bytecode& bytecode, int op_index,
int parameter_count) const;
void PrintBytecode(std::ostream& stream, // NOLINT
- const BytecodeArrayIterator& bytecode_iter,
+ const BytecodeArrayIterator& bytecode_iterator,
int parameter_count) const;
+ void PrintSourcePosition(std::ostream& stream, // NOLINT
+ SourcePositionTableIterator& source_iterator,
+ int bytecode_offset) const;
void PrintV8String(std::ostream& stream, // NOLINT
i::String* string) const;
void PrintConstant(std::ostream& stream, // NOLINT
@@ -111,6 +117,7 @@ class BytecodeExpectationsPrinter final {
std::string test_function_name_;
static const char* const kDefaultTopFunctionName;
+ static const char* const kIndent;
};
} // namespace interpreter
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
index 4c9753f389..4997d1a004 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
@@ -15,9 +15,9 @@ frame size: 0
parameter count: 1
bytecode array length: 6
bytecodes: [
- B(StackCheck),
- B(CreateArrayLiteral), U8(0), U8(0), U8(3),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(3),
+ /* 51 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -29,28 +29,25 @@ handlers: [
snippet: "
var a = 1; return [ a, a + 1 ];
"
-frame size: 4
+frame size: 3
parameter count: 1
-bytecode array length: 39
+bytecode array length: 35
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(CreateArrayLiteral), U8(0), U8(0), U8(3),
- B(Star), R(2),
- B(LdaZero),
- B(Star), R(1),
- B(Ldar), R(0),
- B(KeyedStoreICSloppy), R(2), R(1), U8(1),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(Ldar), R(0),
- B(Star), R(3),
- B(LdaSmi), U8(1),
- B(Add), R(3),
- B(KeyedStoreICSloppy), R(2), R(1), U8(1),
- B(Ldar), R(2),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ /* 45 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(3),
+ B(Star), R(2),
+ B(LdaZero),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ /* 54 E> */ B(StaKeyedPropertySloppy), R(2), R(1), U8(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ /* 57 E> */ B(AddSmi), U8(1), R(0), U8(1),
+ B(StaKeyedPropertySloppy), R(2), R(1), U8(2),
+ B(Ldar), R(2),
+ /* 66 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -66,9 +63,9 @@ frame size: 0
parameter count: 1
bytecode array length: 6
bytecodes: [
- B(StackCheck),
- B(CreateArrayLiteral), U8(0), U8(2), U8(2),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(CreateArrayLiteral), U8(0), U8(2), U8(2),
+ /* 62 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -80,40 +77,37 @@ handlers: [
snippet: "
var a = 1; return [ [ a, 2 ], [ a + 2 ] ];
"
-frame size: 6
+frame size: 5
parameter count: 1
-bytecode array length: 69
+bytecode array length: 65
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(CreateArrayLiteral), U8(0), U8(2), U8(2),
- B(Star), R(2),
- B(LdaZero),
- B(Star), R(1),
- B(CreateArrayLiteral), U8(1), U8(0), U8(3),
- B(Star), R(4),
- B(LdaZero),
- B(Star), R(3),
- B(Ldar), R(0),
- B(KeyedStoreICSloppy), R(4), R(3), U8(1),
- B(Ldar), R(4),
- B(KeyedStoreICSloppy), R(2), R(1), U8(5),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(CreateArrayLiteral), U8(2), U8(1), U8(3),
- B(Star), R(4),
- B(LdaZero),
- B(Star), R(3),
- B(Ldar), R(0),
- B(Star), R(5),
- B(LdaSmi), U8(2),
- B(Add), R(5),
- B(KeyedStoreICSloppy), R(4), R(3), U8(3),
- B(Ldar), R(4),
- B(KeyedStoreICSloppy), R(2), R(1), U8(5),
- B(Ldar), R(2),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ /* 45 S> */ B(CreateArrayLiteral), U8(0), U8(2), U8(2),
+ B(Star), R(2),
+ B(LdaZero),
+ B(Star), R(1),
+ B(CreateArrayLiteral), U8(1), U8(0), U8(3),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Ldar), R(0),
+ /* 56 E> */ B(StaKeyedPropertySloppy), R(4), R(3), U8(1),
+ B(Ldar), R(4),
+ B(StaKeyedPropertySloppy), R(2), R(1), U8(6),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(CreateArrayLiteral), U8(2), U8(1), U8(3),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
+ /* 66 E> */ B(AddSmi), U8(2), R(0), U8(3),
+ B(StaKeyedPropertySloppy), R(4), R(3), U8(4),
+ B(Ldar), R(4),
+ B(StaKeyedPropertySloppy), R(2), R(1), U8(6),
+ B(Ldar), R(2),
+ /* 77 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiteralsWide.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiteralsWide.golden
index 2077b792d4..9f9a25120b 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiteralsWide.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiteralsWide.golden
@@ -272,521 +272,521 @@ frame size: 1
parameter count: 1
bytecode array length: 1033
bytecodes: [
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Star), R(0),
- B(LdaConstant), U8(1),
- B(Star), R(0),
- B(LdaConstant), U8(2),
- B(Star), R(0),
- B(LdaConstant), U8(3),
- B(Star), R(0),
- B(LdaConstant), U8(4),
- B(Star), R(0),
- B(LdaConstant), U8(5),
- B(Star), R(0),
- B(LdaConstant), U8(6),
- B(Star), R(0),
- B(LdaConstant), U8(7),
- B(Star), R(0),
- B(LdaConstant), U8(8),
- B(Star), R(0),
- B(LdaConstant), U8(9),
- B(Star), R(0),
- B(LdaConstant), U8(10),
- B(Star), R(0),
- B(LdaConstant), U8(11),
- B(Star), R(0),
- B(LdaConstant), U8(12),
- B(Star), R(0),
- B(LdaConstant), U8(13),
- B(Star), R(0),
- B(LdaConstant), U8(14),
- B(Star), R(0),
- B(LdaConstant), U8(15),
- B(Star), R(0),
- B(LdaConstant), U8(16),
- B(Star), R(0),
- B(LdaConstant), U8(17),
- B(Star), R(0),
- B(LdaConstant), U8(18),
- B(Star), R(0),
- B(LdaConstant), U8(19),
- B(Star), R(0),
- B(LdaConstant), U8(20),
- B(Star), R(0),
- B(LdaConstant), U8(21),
- B(Star), R(0),
- B(LdaConstant), U8(22),
- B(Star), R(0),
- B(LdaConstant), U8(23),
- B(Star), R(0),
- B(LdaConstant), U8(24),
- B(Star), R(0),
- B(LdaConstant), U8(25),
- B(Star), R(0),
- B(LdaConstant), U8(26),
- B(Star), R(0),
- B(LdaConstant), U8(27),
- B(Star), R(0),
- B(LdaConstant), U8(28),
- B(Star), R(0),
- B(LdaConstant), U8(29),
- B(Star), R(0),
- B(LdaConstant), U8(30),
- B(Star), R(0),
- B(LdaConstant), U8(31),
- B(Star), R(0),
- B(LdaConstant), U8(32),
- B(Star), R(0),
- B(LdaConstant), U8(33),
- B(Star), R(0),
- B(LdaConstant), U8(34),
- B(Star), R(0),
- B(LdaConstant), U8(35),
- B(Star), R(0),
- B(LdaConstant), U8(36),
- B(Star), R(0),
- B(LdaConstant), U8(37),
- B(Star), R(0),
- B(LdaConstant), U8(38),
- B(Star), R(0),
- B(LdaConstant), U8(39),
- B(Star), R(0),
- B(LdaConstant), U8(40),
- B(Star), R(0),
- B(LdaConstant), U8(41),
- B(Star), R(0),
- B(LdaConstant), U8(42),
- B(Star), R(0),
- B(LdaConstant), U8(43),
- B(Star), R(0),
- B(LdaConstant), U8(44),
- B(Star), R(0),
- B(LdaConstant), U8(45),
- B(Star), R(0),
- B(LdaConstant), U8(46),
- B(Star), R(0),
- B(LdaConstant), U8(47),
- B(Star), R(0),
- B(LdaConstant), U8(48),
- B(Star), R(0),
- B(LdaConstant), U8(49),
- B(Star), R(0),
- B(LdaConstant), U8(50),
- B(Star), R(0),
- B(LdaConstant), U8(51),
- B(Star), R(0),
- B(LdaConstant), U8(52),
- B(Star), R(0),
- B(LdaConstant), U8(53),
- B(Star), R(0),
- B(LdaConstant), U8(54),
- B(Star), R(0),
- B(LdaConstant), U8(55),
- B(Star), R(0),
- B(LdaConstant), U8(56),
- B(Star), R(0),
- B(LdaConstant), U8(57),
- B(Star), R(0),
- B(LdaConstant), U8(58),
- B(Star), R(0),
- B(LdaConstant), U8(59),
- B(Star), R(0),
- B(LdaConstant), U8(60),
- B(Star), R(0),
- B(LdaConstant), U8(61),
- B(Star), R(0),
- B(LdaConstant), U8(62),
- B(Star), R(0),
- B(LdaConstant), U8(63),
- B(Star), R(0),
- B(LdaConstant), U8(64),
- B(Star), R(0),
- B(LdaConstant), U8(65),
- B(Star), R(0),
- B(LdaConstant), U8(66),
- B(Star), R(0),
- B(LdaConstant), U8(67),
- B(Star), R(0),
- B(LdaConstant), U8(68),
- B(Star), R(0),
- B(LdaConstant), U8(69),
- B(Star), R(0),
- B(LdaConstant), U8(70),
- B(Star), R(0),
- B(LdaConstant), U8(71),
- B(Star), R(0),
- B(LdaConstant), U8(72),
- B(Star), R(0),
- B(LdaConstant), U8(73),
- B(Star), R(0),
- B(LdaConstant), U8(74),
- B(Star), R(0),
- B(LdaConstant), U8(75),
- B(Star), R(0),
- B(LdaConstant), U8(76),
- B(Star), R(0),
- B(LdaConstant), U8(77),
- B(Star), R(0),
- B(LdaConstant), U8(78),
- B(Star), R(0),
- B(LdaConstant), U8(79),
- B(Star), R(0),
- B(LdaConstant), U8(80),
- B(Star), R(0),
- B(LdaConstant), U8(81),
- B(Star), R(0),
- B(LdaConstant), U8(82),
- B(Star), R(0),
- B(LdaConstant), U8(83),
- B(Star), R(0),
- B(LdaConstant), U8(84),
- B(Star), R(0),
- B(LdaConstant), U8(85),
- B(Star), R(0),
- B(LdaConstant), U8(86),
- B(Star), R(0),
- B(LdaConstant), U8(87),
- B(Star), R(0),
- B(LdaConstant), U8(88),
- B(Star), R(0),
- B(LdaConstant), U8(89),
- B(Star), R(0),
- B(LdaConstant), U8(90),
- B(Star), R(0),
- B(LdaConstant), U8(91),
- B(Star), R(0),
- B(LdaConstant), U8(92),
- B(Star), R(0),
- B(LdaConstant), U8(93),
- B(Star), R(0),
- B(LdaConstant), U8(94),
- B(Star), R(0),
- B(LdaConstant), U8(95),
- B(Star), R(0),
- B(LdaConstant), U8(96),
- B(Star), R(0),
- B(LdaConstant), U8(97),
- B(Star), R(0),
- B(LdaConstant), U8(98),
- B(Star), R(0),
- B(LdaConstant), U8(99),
- B(Star), R(0),
- B(LdaConstant), U8(100),
- B(Star), R(0),
- B(LdaConstant), U8(101),
- B(Star), R(0),
- B(LdaConstant), U8(102),
- B(Star), R(0),
- B(LdaConstant), U8(103),
- B(Star), R(0),
- B(LdaConstant), U8(104),
- B(Star), R(0),
- B(LdaConstant), U8(105),
- B(Star), R(0),
- B(LdaConstant), U8(106),
- B(Star), R(0),
- B(LdaConstant), U8(107),
- B(Star), R(0),
- B(LdaConstant), U8(108),
- B(Star), R(0),
- B(LdaConstant), U8(109),
- B(Star), R(0),
- B(LdaConstant), U8(110),
- B(Star), R(0),
- B(LdaConstant), U8(111),
- B(Star), R(0),
- B(LdaConstant), U8(112),
- B(Star), R(0),
- B(LdaConstant), U8(113),
- B(Star), R(0),
- B(LdaConstant), U8(114),
- B(Star), R(0),
- B(LdaConstant), U8(115),
- B(Star), R(0),
- B(LdaConstant), U8(116),
- B(Star), R(0),
- B(LdaConstant), U8(117),
- B(Star), R(0),
- B(LdaConstant), U8(118),
- B(Star), R(0),
- B(LdaConstant), U8(119),
- B(Star), R(0),
- B(LdaConstant), U8(120),
- B(Star), R(0),
- B(LdaConstant), U8(121),
- B(Star), R(0),
- B(LdaConstant), U8(122),
- B(Star), R(0),
- B(LdaConstant), U8(123),
- B(Star), R(0),
- B(LdaConstant), U8(124),
- B(Star), R(0),
- B(LdaConstant), U8(125),
- B(Star), R(0),
- B(LdaConstant), U8(126),
- B(Star), R(0),
- B(LdaConstant), U8(127),
- B(Star), R(0),
- B(LdaConstant), U8(128),
- B(Star), R(0),
- B(LdaConstant), U8(129),
- B(Star), R(0),
- B(LdaConstant), U8(130),
- B(Star), R(0),
- B(LdaConstant), U8(131),
- B(Star), R(0),
- B(LdaConstant), U8(132),
- B(Star), R(0),
- B(LdaConstant), U8(133),
- B(Star), R(0),
- B(LdaConstant), U8(134),
- B(Star), R(0),
- B(LdaConstant), U8(135),
- B(Star), R(0),
- B(LdaConstant), U8(136),
- B(Star), R(0),
- B(LdaConstant), U8(137),
- B(Star), R(0),
- B(LdaConstant), U8(138),
- B(Star), R(0),
- B(LdaConstant), U8(139),
- B(Star), R(0),
- B(LdaConstant), U8(140),
- B(Star), R(0),
- B(LdaConstant), U8(141),
- B(Star), R(0),
- B(LdaConstant), U8(142),
- B(Star), R(0),
- B(LdaConstant), U8(143),
- B(Star), R(0),
- B(LdaConstant), U8(144),
- B(Star), R(0),
- B(LdaConstant), U8(145),
- B(Star), R(0),
- B(LdaConstant), U8(146),
- B(Star), R(0),
- B(LdaConstant), U8(147),
- B(Star), R(0),
- B(LdaConstant), U8(148),
- B(Star), R(0),
- B(LdaConstant), U8(149),
- B(Star), R(0),
- B(LdaConstant), U8(150),
- B(Star), R(0),
- B(LdaConstant), U8(151),
- B(Star), R(0),
- B(LdaConstant), U8(152),
- B(Star), R(0),
- B(LdaConstant), U8(153),
- B(Star), R(0),
- B(LdaConstant), U8(154),
- B(Star), R(0),
- B(LdaConstant), U8(155),
- B(Star), R(0),
- B(LdaConstant), U8(156),
- B(Star), R(0),
- B(LdaConstant), U8(157),
- B(Star), R(0),
- B(LdaConstant), U8(158),
- B(Star), R(0),
- B(LdaConstant), U8(159),
- B(Star), R(0),
- B(LdaConstant), U8(160),
- B(Star), R(0),
- B(LdaConstant), U8(161),
- B(Star), R(0),
- B(LdaConstant), U8(162),
- B(Star), R(0),
- B(LdaConstant), U8(163),
- B(Star), R(0),
- B(LdaConstant), U8(164),
- B(Star), R(0),
- B(LdaConstant), U8(165),
- B(Star), R(0),
- B(LdaConstant), U8(166),
- B(Star), R(0),
- B(LdaConstant), U8(167),
- B(Star), R(0),
- B(LdaConstant), U8(168),
- B(Star), R(0),
- B(LdaConstant), U8(169),
- B(Star), R(0),
- B(LdaConstant), U8(170),
- B(Star), R(0),
- B(LdaConstant), U8(171),
- B(Star), R(0),
- B(LdaConstant), U8(172),
- B(Star), R(0),
- B(LdaConstant), U8(173),
- B(Star), R(0),
- B(LdaConstant), U8(174),
- B(Star), R(0),
- B(LdaConstant), U8(175),
- B(Star), R(0),
- B(LdaConstant), U8(176),
- B(Star), R(0),
- B(LdaConstant), U8(177),
- B(Star), R(0),
- B(LdaConstant), U8(178),
- B(Star), R(0),
- B(LdaConstant), U8(179),
- B(Star), R(0),
- B(LdaConstant), U8(180),
- B(Star), R(0),
- B(LdaConstant), U8(181),
- B(Star), R(0),
- B(LdaConstant), U8(182),
- B(Star), R(0),
- B(LdaConstant), U8(183),
- B(Star), R(0),
- B(LdaConstant), U8(184),
- B(Star), R(0),
- B(LdaConstant), U8(185),
- B(Star), R(0),
- B(LdaConstant), U8(186),
- B(Star), R(0),
- B(LdaConstant), U8(187),
- B(Star), R(0),
- B(LdaConstant), U8(188),
- B(Star), R(0),
- B(LdaConstant), U8(189),
- B(Star), R(0),
- B(LdaConstant), U8(190),
- B(Star), R(0),
- B(LdaConstant), U8(191),
- B(Star), R(0),
- B(LdaConstant), U8(192),
- B(Star), R(0),
- B(LdaConstant), U8(193),
- B(Star), R(0),
- B(LdaConstant), U8(194),
- B(Star), R(0),
- B(LdaConstant), U8(195),
- B(Star), R(0),
- B(LdaConstant), U8(196),
- B(Star), R(0),
- B(LdaConstant), U8(197),
- B(Star), R(0),
- B(LdaConstant), U8(198),
- B(Star), R(0),
- B(LdaConstant), U8(199),
- B(Star), R(0),
- B(LdaConstant), U8(200),
- B(Star), R(0),
- B(LdaConstant), U8(201),
- B(Star), R(0),
- B(LdaConstant), U8(202),
- B(Star), R(0),
- B(LdaConstant), U8(203),
- B(Star), R(0),
- B(LdaConstant), U8(204),
- B(Star), R(0),
- B(LdaConstant), U8(205),
- B(Star), R(0),
- B(LdaConstant), U8(206),
- B(Star), R(0),
- B(LdaConstant), U8(207),
- B(Star), R(0),
- B(LdaConstant), U8(208),
- B(Star), R(0),
- B(LdaConstant), U8(209),
- B(Star), R(0),
- B(LdaConstant), U8(210),
- B(Star), R(0),
- B(LdaConstant), U8(211),
- B(Star), R(0),
- B(LdaConstant), U8(212),
- B(Star), R(0),
- B(LdaConstant), U8(213),
- B(Star), R(0),
- B(LdaConstant), U8(214),
- B(Star), R(0),
- B(LdaConstant), U8(215),
- B(Star), R(0),
- B(LdaConstant), U8(216),
- B(Star), R(0),
- B(LdaConstant), U8(217),
- B(Star), R(0),
- B(LdaConstant), U8(218),
- B(Star), R(0),
- B(LdaConstant), U8(219),
- B(Star), R(0),
- B(LdaConstant), U8(220),
- B(Star), R(0),
- B(LdaConstant), U8(221),
- B(Star), R(0),
- B(LdaConstant), U8(222),
- B(Star), R(0),
- B(LdaConstant), U8(223),
- B(Star), R(0),
- B(LdaConstant), U8(224),
- B(Star), R(0),
- B(LdaConstant), U8(225),
- B(Star), R(0),
- B(LdaConstant), U8(226),
- B(Star), R(0),
- B(LdaConstant), U8(227),
- B(Star), R(0),
- B(LdaConstant), U8(228),
- B(Star), R(0),
- B(LdaConstant), U8(229),
- B(Star), R(0),
- B(LdaConstant), U8(230),
- B(Star), R(0),
- B(LdaConstant), U8(231),
- B(Star), R(0),
- B(LdaConstant), U8(232),
- B(Star), R(0),
- B(LdaConstant), U8(233),
- B(Star), R(0),
- B(LdaConstant), U8(234),
- B(Star), R(0),
- B(LdaConstant), U8(235),
- B(Star), R(0),
- B(LdaConstant), U8(236),
- B(Star), R(0),
- B(LdaConstant), U8(237),
- B(Star), R(0),
- B(LdaConstant), U8(238),
- B(Star), R(0),
- B(LdaConstant), U8(239),
- B(Star), R(0),
- B(LdaConstant), U8(240),
- B(Star), R(0),
- B(LdaConstant), U8(241),
- B(Star), R(0),
- B(LdaConstant), U8(242),
- B(Star), R(0),
- B(LdaConstant), U8(243),
- B(Star), R(0),
- B(LdaConstant), U8(244),
- B(Star), R(0),
- B(LdaConstant), U8(245),
- B(Star), R(0),
- B(LdaConstant), U8(246),
- B(Star), R(0),
- B(LdaConstant), U8(247),
- B(Star), R(0),
- B(LdaConstant), U8(248),
- B(Star), R(0),
- B(LdaConstant), U8(249),
- B(Star), R(0),
- B(LdaConstant), U8(250),
- B(Star), R(0),
- B(LdaConstant), U8(251),
- B(Star), R(0),
- B(LdaConstant), U8(252),
- B(Star), R(0),
- B(LdaConstant), U8(253),
- B(Star), R(0),
- B(LdaConstant), U8(254),
- B(Star), R(0),
- B(LdaConstant), U8(255),
- B(Star), R(0),
- B(Wide), B(CreateArrayLiteral), U16(256), U16(0), U8(3),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 41 S> */ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ /* 51 S> */ B(LdaConstant), U8(1),
+ B(Star), R(0),
+ /* 61 S> */ B(LdaConstant), U8(2),
+ B(Star), R(0),
+ /* 71 S> */ B(LdaConstant), U8(3),
+ B(Star), R(0),
+ /* 81 S> */ B(LdaConstant), U8(4),
+ B(Star), R(0),
+ /* 91 S> */ B(LdaConstant), U8(5),
+ B(Star), R(0),
+ /* 101 S> */ B(LdaConstant), U8(6),
+ B(Star), R(0),
+ /* 111 S> */ B(LdaConstant), U8(7),
+ B(Star), R(0),
+ /* 121 S> */ B(LdaConstant), U8(8),
+ B(Star), R(0),
+ /* 131 S> */ B(LdaConstant), U8(9),
+ B(Star), R(0),
+ /* 141 S> */ B(LdaConstant), U8(10),
+ B(Star), R(0),
+ /* 151 S> */ B(LdaConstant), U8(11),
+ B(Star), R(0),
+ /* 161 S> */ B(LdaConstant), U8(12),
+ B(Star), R(0),
+ /* 171 S> */ B(LdaConstant), U8(13),
+ B(Star), R(0),
+ /* 181 S> */ B(LdaConstant), U8(14),
+ B(Star), R(0),
+ /* 191 S> */ B(LdaConstant), U8(15),
+ B(Star), R(0),
+ /* 201 S> */ B(LdaConstant), U8(16),
+ B(Star), R(0),
+ /* 211 S> */ B(LdaConstant), U8(17),
+ B(Star), R(0),
+ /* 221 S> */ B(LdaConstant), U8(18),
+ B(Star), R(0),
+ /* 231 S> */ B(LdaConstant), U8(19),
+ B(Star), R(0),
+ /* 241 S> */ B(LdaConstant), U8(20),
+ B(Star), R(0),
+ /* 251 S> */ B(LdaConstant), U8(21),
+ B(Star), R(0),
+ /* 261 S> */ B(LdaConstant), U8(22),
+ B(Star), R(0),
+ /* 271 S> */ B(LdaConstant), U8(23),
+ B(Star), R(0),
+ /* 281 S> */ B(LdaConstant), U8(24),
+ B(Star), R(0),
+ /* 291 S> */ B(LdaConstant), U8(25),
+ B(Star), R(0),
+ /* 301 S> */ B(LdaConstant), U8(26),
+ B(Star), R(0),
+ /* 311 S> */ B(LdaConstant), U8(27),
+ B(Star), R(0),
+ /* 321 S> */ B(LdaConstant), U8(28),
+ B(Star), R(0),
+ /* 331 S> */ B(LdaConstant), U8(29),
+ B(Star), R(0),
+ /* 341 S> */ B(LdaConstant), U8(30),
+ B(Star), R(0),
+ /* 351 S> */ B(LdaConstant), U8(31),
+ B(Star), R(0),
+ /* 361 S> */ B(LdaConstant), U8(32),
+ B(Star), R(0),
+ /* 371 S> */ B(LdaConstant), U8(33),
+ B(Star), R(0),
+ /* 381 S> */ B(LdaConstant), U8(34),
+ B(Star), R(0),
+ /* 391 S> */ B(LdaConstant), U8(35),
+ B(Star), R(0),
+ /* 401 S> */ B(LdaConstant), U8(36),
+ B(Star), R(0),
+ /* 411 S> */ B(LdaConstant), U8(37),
+ B(Star), R(0),
+ /* 421 S> */ B(LdaConstant), U8(38),
+ B(Star), R(0),
+ /* 431 S> */ B(LdaConstant), U8(39),
+ B(Star), R(0),
+ /* 441 S> */ B(LdaConstant), U8(40),
+ B(Star), R(0),
+ /* 451 S> */ B(LdaConstant), U8(41),
+ B(Star), R(0),
+ /* 461 S> */ B(LdaConstant), U8(42),
+ B(Star), R(0),
+ /* 471 S> */ B(LdaConstant), U8(43),
+ B(Star), R(0),
+ /* 481 S> */ B(LdaConstant), U8(44),
+ B(Star), R(0),
+ /* 491 S> */ B(LdaConstant), U8(45),
+ B(Star), R(0),
+ /* 501 S> */ B(LdaConstant), U8(46),
+ B(Star), R(0),
+ /* 511 S> */ B(LdaConstant), U8(47),
+ B(Star), R(0),
+ /* 521 S> */ B(LdaConstant), U8(48),
+ B(Star), R(0),
+ /* 531 S> */ B(LdaConstant), U8(49),
+ B(Star), R(0),
+ /* 541 S> */ B(LdaConstant), U8(50),
+ B(Star), R(0),
+ /* 551 S> */ B(LdaConstant), U8(51),
+ B(Star), R(0),
+ /* 561 S> */ B(LdaConstant), U8(52),
+ B(Star), R(0),
+ /* 571 S> */ B(LdaConstant), U8(53),
+ B(Star), R(0),
+ /* 581 S> */ B(LdaConstant), U8(54),
+ B(Star), R(0),
+ /* 591 S> */ B(LdaConstant), U8(55),
+ B(Star), R(0),
+ /* 601 S> */ B(LdaConstant), U8(56),
+ B(Star), R(0),
+ /* 611 S> */ B(LdaConstant), U8(57),
+ B(Star), R(0),
+ /* 621 S> */ B(LdaConstant), U8(58),
+ B(Star), R(0),
+ /* 631 S> */ B(LdaConstant), U8(59),
+ B(Star), R(0),
+ /* 641 S> */ B(LdaConstant), U8(60),
+ B(Star), R(0),
+ /* 651 S> */ B(LdaConstant), U8(61),
+ B(Star), R(0),
+ /* 661 S> */ B(LdaConstant), U8(62),
+ B(Star), R(0),
+ /* 671 S> */ B(LdaConstant), U8(63),
+ B(Star), R(0),
+ /* 681 S> */ B(LdaConstant), U8(64),
+ B(Star), R(0),
+ /* 691 S> */ B(LdaConstant), U8(65),
+ B(Star), R(0),
+ /* 701 S> */ B(LdaConstant), U8(66),
+ B(Star), R(0),
+ /* 711 S> */ B(LdaConstant), U8(67),
+ B(Star), R(0),
+ /* 721 S> */ B(LdaConstant), U8(68),
+ B(Star), R(0),
+ /* 731 S> */ B(LdaConstant), U8(69),
+ B(Star), R(0),
+ /* 741 S> */ B(LdaConstant), U8(70),
+ B(Star), R(0),
+ /* 751 S> */ B(LdaConstant), U8(71),
+ B(Star), R(0),
+ /* 761 S> */ B(LdaConstant), U8(72),
+ B(Star), R(0),
+ /* 771 S> */ B(LdaConstant), U8(73),
+ B(Star), R(0),
+ /* 781 S> */ B(LdaConstant), U8(74),
+ B(Star), R(0),
+ /* 791 S> */ B(LdaConstant), U8(75),
+ B(Star), R(0),
+ /* 801 S> */ B(LdaConstant), U8(76),
+ B(Star), R(0),
+ /* 811 S> */ B(LdaConstant), U8(77),
+ B(Star), R(0),
+ /* 821 S> */ B(LdaConstant), U8(78),
+ B(Star), R(0),
+ /* 831 S> */ B(LdaConstant), U8(79),
+ B(Star), R(0),
+ /* 841 S> */ B(LdaConstant), U8(80),
+ B(Star), R(0),
+ /* 851 S> */ B(LdaConstant), U8(81),
+ B(Star), R(0),
+ /* 861 S> */ B(LdaConstant), U8(82),
+ B(Star), R(0),
+ /* 871 S> */ B(LdaConstant), U8(83),
+ B(Star), R(0),
+ /* 881 S> */ B(LdaConstant), U8(84),
+ B(Star), R(0),
+ /* 891 S> */ B(LdaConstant), U8(85),
+ B(Star), R(0),
+ /* 901 S> */ B(LdaConstant), U8(86),
+ B(Star), R(0),
+ /* 911 S> */ B(LdaConstant), U8(87),
+ B(Star), R(0),
+ /* 921 S> */ B(LdaConstant), U8(88),
+ B(Star), R(0),
+ /* 931 S> */ B(LdaConstant), U8(89),
+ B(Star), R(0),
+ /* 941 S> */ B(LdaConstant), U8(90),
+ B(Star), R(0),
+ /* 951 S> */ B(LdaConstant), U8(91),
+ B(Star), R(0),
+ /* 961 S> */ B(LdaConstant), U8(92),
+ B(Star), R(0),
+ /* 971 S> */ B(LdaConstant), U8(93),
+ B(Star), R(0),
+ /* 981 S> */ B(LdaConstant), U8(94),
+ B(Star), R(0),
+ /* 991 S> */ B(LdaConstant), U8(95),
+ B(Star), R(0),
+ /* 1001 S> */ B(LdaConstant), U8(96),
+ B(Star), R(0),
+ /* 1011 S> */ B(LdaConstant), U8(97),
+ B(Star), R(0),
+ /* 1021 S> */ B(LdaConstant), U8(98),
+ B(Star), R(0),
+ /* 1031 S> */ B(LdaConstant), U8(99),
+ B(Star), R(0),
+ /* 1041 S> */ B(LdaConstant), U8(100),
+ B(Star), R(0),
+ /* 1051 S> */ B(LdaConstant), U8(101),
+ B(Star), R(0),
+ /* 1061 S> */ B(LdaConstant), U8(102),
+ B(Star), R(0),
+ /* 1071 S> */ B(LdaConstant), U8(103),
+ B(Star), R(0),
+ /* 1081 S> */ B(LdaConstant), U8(104),
+ B(Star), R(0),
+ /* 1091 S> */ B(LdaConstant), U8(105),
+ B(Star), R(0),
+ /* 1101 S> */ B(LdaConstant), U8(106),
+ B(Star), R(0),
+ /* 1111 S> */ B(LdaConstant), U8(107),
+ B(Star), R(0),
+ /* 1121 S> */ B(LdaConstant), U8(108),
+ B(Star), R(0),
+ /* 1131 S> */ B(LdaConstant), U8(109),
+ B(Star), R(0),
+ /* 1141 S> */ B(LdaConstant), U8(110),
+ B(Star), R(0),
+ /* 1151 S> */ B(LdaConstant), U8(111),
+ B(Star), R(0),
+ /* 1161 S> */ B(LdaConstant), U8(112),
+ B(Star), R(0),
+ /* 1171 S> */ B(LdaConstant), U8(113),
+ B(Star), R(0),
+ /* 1181 S> */ B(LdaConstant), U8(114),
+ B(Star), R(0),
+ /* 1191 S> */ B(LdaConstant), U8(115),
+ B(Star), R(0),
+ /* 1201 S> */ B(LdaConstant), U8(116),
+ B(Star), R(0),
+ /* 1211 S> */ B(LdaConstant), U8(117),
+ B(Star), R(0),
+ /* 1221 S> */ B(LdaConstant), U8(118),
+ B(Star), R(0),
+ /* 1231 S> */ B(LdaConstant), U8(119),
+ B(Star), R(0),
+ /* 1241 S> */ B(LdaConstant), U8(120),
+ B(Star), R(0),
+ /* 1251 S> */ B(LdaConstant), U8(121),
+ B(Star), R(0),
+ /* 1261 S> */ B(LdaConstant), U8(122),
+ B(Star), R(0),
+ /* 1271 S> */ B(LdaConstant), U8(123),
+ B(Star), R(0),
+ /* 1281 S> */ B(LdaConstant), U8(124),
+ B(Star), R(0),
+ /* 1291 S> */ B(LdaConstant), U8(125),
+ B(Star), R(0),
+ /* 1301 S> */ B(LdaConstant), U8(126),
+ B(Star), R(0),
+ /* 1311 S> */ B(LdaConstant), U8(127),
+ B(Star), R(0),
+ /* 1321 S> */ B(LdaConstant), U8(128),
+ B(Star), R(0),
+ /* 1331 S> */ B(LdaConstant), U8(129),
+ B(Star), R(0),
+ /* 1341 S> */ B(LdaConstant), U8(130),
+ B(Star), R(0),
+ /* 1351 S> */ B(LdaConstant), U8(131),
+ B(Star), R(0),
+ /* 1361 S> */ B(LdaConstant), U8(132),
+ B(Star), R(0),
+ /* 1371 S> */ B(LdaConstant), U8(133),
+ B(Star), R(0),
+ /* 1381 S> */ B(LdaConstant), U8(134),
+ B(Star), R(0),
+ /* 1391 S> */ B(LdaConstant), U8(135),
+ B(Star), R(0),
+ /* 1401 S> */ B(LdaConstant), U8(136),
+ B(Star), R(0),
+ /* 1411 S> */ B(LdaConstant), U8(137),
+ B(Star), R(0),
+ /* 1421 S> */ B(LdaConstant), U8(138),
+ B(Star), R(0),
+ /* 1431 S> */ B(LdaConstant), U8(139),
+ B(Star), R(0),
+ /* 1441 S> */ B(LdaConstant), U8(140),
+ B(Star), R(0),
+ /* 1451 S> */ B(LdaConstant), U8(141),
+ B(Star), R(0),
+ /* 1461 S> */ B(LdaConstant), U8(142),
+ B(Star), R(0),
+ /* 1471 S> */ B(LdaConstant), U8(143),
+ B(Star), R(0),
+ /* 1481 S> */ B(LdaConstant), U8(144),
+ B(Star), R(0),
+ /* 1491 S> */ B(LdaConstant), U8(145),
+ B(Star), R(0),
+ /* 1501 S> */ B(LdaConstant), U8(146),
+ B(Star), R(0),
+ /* 1511 S> */ B(LdaConstant), U8(147),
+ B(Star), R(0),
+ /* 1521 S> */ B(LdaConstant), U8(148),
+ B(Star), R(0),
+ /* 1531 S> */ B(LdaConstant), U8(149),
+ B(Star), R(0),
+ /* 1541 S> */ B(LdaConstant), U8(150),
+ B(Star), R(0),
+ /* 1551 S> */ B(LdaConstant), U8(151),
+ B(Star), R(0),
+ /* 1561 S> */ B(LdaConstant), U8(152),
+ B(Star), R(0),
+ /* 1571 S> */ B(LdaConstant), U8(153),
+ B(Star), R(0),
+ /* 1581 S> */ B(LdaConstant), U8(154),
+ B(Star), R(0),
+ /* 1591 S> */ B(LdaConstant), U8(155),
+ B(Star), R(0),
+ /* 1601 S> */ B(LdaConstant), U8(156),
+ B(Star), R(0),
+ /* 1611 S> */ B(LdaConstant), U8(157),
+ B(Star), R(0),
+ /* 1621 S> */ B(LdaConstant), U8(158),
+ B(Star), R(0),
+ /* 1631 S> */ B(LdaConstant), U8(159),
+ B(Star), R(0),
+ /* 1641 S> */ B(LdaConstant), U8(160),
+ B(Star), R(0),
+ /* 1651 S> */ B(LdaConstant), U8(161),
+ B(Star), R(0),
+ /* 1661 S> */ B(LdaConstant), U8(162),
+ B(Star), R(0),
+ /* 1671 S> */ B(LdaConstant), U8(163),
+ B(Star), R(0),
+ /* 1681 S> */ B(LdaConstant), U8(164),
+ B(Star), R(0),
+ /* 1691 S> */ B(LdaConstant), U8(165),
+ B(Star), R(0),
+ /* 1701 S> */ B(LdaConstant), U8(166),
+ B(Star), R(0),
+ /* 1711 S> */ B(LdaConstant), U8(167),
+ B(Star), R(0),
+ /* 1721 S> */ B(LdaConstant), U8(168),
+ B(Star), R(0),
+ /* 1731 S> */ B(LdaConstant), U8(169),
+ B(Star), R(0),
+ /* 1741 S> */ B(LdaConstant), U8(170),
+ B(Star), R(0),
+ /* 1751 S> */ B(LdaConstant), U8(171),
+ B(Star), R(0),
+ /* 1761 S> */ B(LdaConstant), U8(172),
+ B(Star), R(0),
+ /* 1771 S> */ B(LdaConstant), U8(173),
+ B(Star), R(0),
+ /* 1781 S> */ B(LdaConstant), U8(174),
+ B(Star), R(0),
+ /* 1791 S> */ B(LdaConstant), U8(175),
+ B(Star), R(0),
+ /* 1801 S> */ B(LdaConstant), U8(176),
+ B(Star), R(0),
+ /* 1811 S> */ B(LdaConstant), U8(177),
+ B(Star), R(0),
+ /* 1821 S> */ B(LdaConstant), U8(178),
+ B(Star), R(0),
+ /* 1831 S> */ B(LdaConstant), U8(179),
+ B(Star), R(0),
+ /* 1841 S> */ B(LdaConstant), U8(180),
+ B(Star), R(0),
+ /* 1851 S> */ B(LdaConstant), U8(181),
+ B(Star), R(0),
+ /* 1861 S> */ B(LdaConstant), U8(182),
+ B(Star), R(0),
+ /* 1871 S> */ B(LdaConstant), U8(183),
+ B(Star), R(0),
+ /* 1881 S> */ B(LdaConstant), U8(184),
+ B(Star), R(0),
+ /* 1891 S> */ B(LdaConstant), U8(185),
+ B(Star), R(0),
+ /* 1901 S> */ B(LdaConstant), U8(186),
+ B(Star), R(0),
+ /* 1911 S> */ B(LdaConstant), U8(187),
+ B(Star), R(0),
+ /* 1921 S> */ B(LdaConstant), U8(188),
+ B(Star), R(0),
+ /* 1931 S> */ B(LdaConstant), U8(189),
+ B(Star), R(0),
+ /* 1941 S> */ B(LdaConstant), U8(190),
+ B(Star), R(0),
+ /* 1951 S> */ B(LdaConstant), U8(191),
+ B(Star), R(0),
+ /* 1961 S> */ B(LdaConstant), U8(192),
+ B(Star), R(0),
+ /* 1971 S> */ B(LdaConstant), U8(193),
+ B(Star), R(0),
+ /* 1981 S> */ B(LdaConstant), U8(194),
+ B(Star), R(0),
+ /* 1991 S> */ B(LdaConstant), U8(195),
+ B(Star), R(0),
+ /* 2001 S> */ B(LdaConstant), U8(196),
+ B(Star), R(0),
+ /* 2011 S> */ B(LdaConstant), U8(197),
+ B(Star), R(0),
+ /* 2021 S> */ B(LdaConstant), U8(198),
+ B(Star), R(0),
+ /* 2031 S> */ B(LdaConstant), U8(199),
+ B(Star), R(0),
+ /* 2041 S> */ B(LdaConstant), U8(200),
+ B(Star), R(0),
+ /* 2051 S> */ B(LdaConstant), U8(201),
+ B(Star), R(0),
+ /* 2061 S> */ B(LdaConstant), U8(202),
+ B(Star), R(0),
+ /* 2071 S> */ B(LdaConstant), U8(203),
+ B(Star), R(0),
+ /* 2081 S> */ B(LdaConstant), U8(204),
+ B(Star), R(0),
+ /* 2091 S> */ B(LdaConstant), U8(205),
+ B(Star), R(0),
+ /* 2101 S> */ B(LdaConstant), U8(206),
+ B(Star), R(0),
+ /* 2111 S> */ B(LdaConstant), U8(207),
+ B(Star), R(0),
+ /* 2121 S> */ B(LdaConstant), U8(208),
+ B(Star), R(0),
+ /* 2131 S> */ B(LdaConstant), U8(209),
+ B(Star), R(0),
+ /* 2141 S> */ B(LdaConstant), U8(210),
+ B(Star), R(0),
+ /* 2151 S> */ B(LdaConstant), U8(211),
+ B(Star), R(0),
+ /* 2161 S> */ B(LdaConstant), U8(212),
+ B(Star), R(0),
+ /* 2171 S> */ B(LdaConstant), U8(213),
+ B(Star), R(0),
+ /* 2181 S> */ B(LdaConstant), U8(214),
+ B(Star), R(0),
+ /* 2191 S> */ B(LdaConstant), U8(215),
+ B(Star), R(0),
+ /* 2201 S> */ B(LdaConstant), U8(216),
+ B(Star), R(0),
+ /* 2211 S> */ B(LdaConstant), U8(217),
+ B(Star), R(0),
+ /* 2221 S> */ B(LdaConstant), U8(218),
+ B(Star), R(0),
+ /* 2231 S> */ B(LdaConstant), U8(219),
+ B(Star), R(0),
+ /* 2241 S> */ B(LdaConstant), U8(220),
+ B(Star), R(0),
+ /* 2251 S> */ B(LdaConstant), U8(221),
+ B(Star), R(0),
+ /* 2261 S> */ B(LdaConstant), U8(222),
+ B(Star), R(0),
+ /* 2271 S> */ B(LdaConstant), U8(223),
+ B(Star), R(0),
+ /* 2281 S> */ B(LdaConstant), U8(224),
+ B(Star), R(0),
+ /* 2291 S> */ B(LdaConstant), U8(225),
+ B(Star), R(0),
+ /* 2301 S> */ B(LdaConstant), U8(226),
+ B(Star), R(0),
+ /* 2311 S> */ B(LdaConstant), U8(227),
+ B(Star), R(0),
+ /* 2321 S> */ B(LdaConstant), U8(228),
+ B(Star), R(0),
+ /* 2331 S> */ B(LdaConstant), U8(229),
+ B(Star), R(0),
+ /* 2341 S> */ B(LdaConstant), U8(230),
+ B(Star), R(0),
+ /* 2351 S> */ B(LdaConstant), U8(231),
+ B(Star), R(0),
+ /* 2361 S> */ B(LdaConstant), U8(232),
+ B(Star), R(0),
+ /* 2371 S> */ B(LdaConstant), U8(233),
+ B(Star), R(0),
+ /* 2381 S> */ B(LdaConstant), U8(234),
+ B(Star), R(0),
+ /* 2391 S> */ B(LdaConstant), U8(235),
+ B(Star), R(0),
+ /* 2401 S> */ B(LdaConstant), U8(236),
+ B(Star), R(0),
+ /* 2411 S> */ B(LdaConstant), U8(237),
+ B(Star), R(0),
+ /* 2421 S> */ B(LdaConstant), U8(238),
+ B(Star), R(0),
+ /* 2431 S> */ B(LdaConstant), U8(239),
+ B(Star), R(0),
+ /* 2441 S> */ B(LdaConstant), U8(240),
+ B(Star), R(0),
+ /* 2451 S> */ B(LdaConstant), U8(241),
+ B(Star), R(0),
+ /* 2461 S> */ B(LdaConstant), U8(242),
+ B(Star), R(0),
+ /* 2471 S> */ B(LdaConstant), U8(243),
+ B(Star), R(0),
+ /* 2481 S> */ B(LdaConstant), U8(244),
+ B(Star), R(0),
+ /* 2491 S> */ B(LdaConstant), U8(245),
+ B(Star), R(0),
+ /* 2501 S> */ B(LdaConstant), U8(246),
+ B(Star), R(0),
+ /* 2511 S> */ B(LdaConstant), U8(247),
+ B(Star), R(0),
+ /* 2521 S> */ B(LdaConstant), U8(248),
+ B(Star), R(0),
+ /* 2531 S> */ B(LdaConstant), U8(249),
+ B(Star), R(0),
+ /* 2541 S> */ B(LdaConstant), U8(250),
+ B(Star), R(0),
+ /* 2551 S> */ B(LdaConstant), U8(251),
+ B(Star), R(0),
+ /* 2561 S> */ B(LdaConstant), U8(252),
+ B(Star), R(0),
+ /* 2571 S> */ B(LdaConstant), U8(253),
+ B(Star), R(0),
+ /* 2581 S> */ B(LdaConstant), U8(254),
+ B(Star), R(0),
+ /* 2591 S> */ B(LdaConstant), U8(255),
+ B(Star), R(0),
+ /* 2601 S> */ B(Wide), B(CreateArrayLiteral), U16(256), U16(0), U8(3),
+ /* 2619 S> */ B(Return),
]
constant pool: [
InstanceType::HEAP_NUMBER_TYPE,
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden
index f569bab161..5a1efc2889 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden
@@ -16,20 +16,20 @@ frame size: 2
parameter count: 1
bytecode array length: 25
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(0),
- B(LdaSmi), U8(3),
- B(Star), R(1),
- B(LdaSmi), U8(4),
- B(Star), R(0),
- B(LdaSmi), U8(5),
- B(Star), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 49 S> */ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ /* 52 S> */ B(LdaSmi), U8(2),
+ B(Star), R(0),
+ B(LdaSmi), U8(3),
+ B(Star), R(1),
+ B(LdaSmi), U8(4),
+ B(Star), R(0),
+ B(LdaSmi), U8(5),
+ B(Star), R(1),
+ /* 89 S> */ B(Return),
]
constant pool: [
]
@@ -44,15 +44,16 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 12
+bytecode array length: 13
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(55),
- B(Star), R(0),
- B(LdaSmi), U8(100),
- B(Star), R(0),
- B(Star), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(55),
+ B(Star), R(0),
+ /* 57 S> */ B(LdaSmi), U8(100),
+ B(Star), R(0),
+ B(Star), R(1),
+ /* 65 S> */ B(Nop),
+ /* 75 S> */ B(Return),
]
constant pool: [
]
@@ -67,21 +68,22 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 24
+bytecode array length: 28
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(55),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(100),
- B(Star), R(0),
- B(Add), R(1),
- B(Star), R(2),
- B(LdaSmi), U8(101),
- B(Star), R(0),
- B(Add), R(2),
- B(Star), R(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(55),
+ B(Star), R(0),
+ /* 46 S> */ B(LdaSmi), U8(100),
+ B(Mov), R(0), R(1),
+ B(Star), R(0),
+ /* 57 E> */ B(Add), R(1), U8(1),
+ B(Star), R(2),
+ B(LdaSmi), U8(101),
+ B(Star), R(0),
+ /* 69 E> */ B(Add), R(2), U8(2),
+ B(Star), R(0),
+ /* 77 S> */ B(Nop),
+ /* 87 S> */ B(Return),
]
constant pool: [
]
@@ -97,26 +99,23 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 32
+bytecode array length: 29
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(55),
- B(Star), R(0),
- B(LdaSmi), U8(56),
- B(Star), R(0),
- B(Star), R(1),
- B(Ldar), R(0),
- B(Sub), R(1),
- B(Star), R(2),
- B(LdaSmi), U8(57),
- B(Star), R(0),
- B(Add), R(2),
- B(Star), R(0),
- B(ToNumber),
- B(Star), R(1),
- B(Inc),
- B(Star), R(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(55),
+ B(Star), R(0),
+ /* 46 S> */ B(LdaSmi), U8(56),
+ B(Star), R(0),
+ /* 61 E> */ B(Sub), R(0), U8(1),
+ B(Star), R(2),
+ B(LdaSmi), U8(57),
+ B(Star), R(0),
+ /* 68 E> */ B(Add), R(2), U8(2),
+ B(Star), R(0),
+ /* 75 S> */ B(Inc), U8(3),
+ B(Star), R(0),
+ /* 80 S> */ B(Nop),
+ /* 90 S> */ B(Return),
]
constant pool: [
]
@@ -131,25 +130,26 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 32
+bytecode array length: 37
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(55),
- B(Star), R(0),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(Add), R(2),
- B(Star), R(3),
- B(LdaSmi), U8(2),
- B(Star), R(0),
- B(Add), R(3),
- B(Star), R(2),
- B(LdaSmi), U8(3),
- B(Star), R(0),
- B(Add), R(2),
- B(Star), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(55),
+ B(Star), R(0),
+ /* 76 S> */ B(LdaSmi), U8(1),
+ B(Mov), R(0), R(2),
+ B(Star), R(0),
+ /* 61 E> */ B(Add), R(2), U8(1),
+ B(Star), R(3),
+ B(LdaSmi), U8(2),
+ B(Star), R(0),
+ /* 71 E> */ B(Add), R(3), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(3),
+ B(Star), R(0),
+ /* 81 E> */ B(Add), R(2), U8(3),
+ B(Star), R(1),
+ /* 87 S> */ B(Nop),
+ /* 97 S> */ B(Return),
]
constant pool: [
]
@@ -164,25 +164,26 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 32
+bytecode array length: 37
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(55),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(Add), R(1),
- B(Star), R(2),
- B(LdaSmi), U8(2),
- B(Star), R(0),
- B(Add), R(2),
- B(Star), R(1),
- B(LdaSmi), U8(3),
- B(Star), R(0),
- B(Add), R(1),
- B(Star), R(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(55),
+ B(Star), R(0),
+ /* 76 S> */ B(LdaSmi), U8(1),
+ B(Mov), R(0), R(1),
+ B(Star), R(0),
+ /* 61 E> */ B(Add), R(1), U8(1),
+ B(Star), R(2),
+ B(LdaSmi), U8(2),
+ B(Star), R(0),
+ /* 71 E> */ B(Add), R(2), U8(2),
+ B(Star), R(1),
+ B(LdaSmi), U8(3),
+ B(Star), R(0),
+ /* 81 E> */ B(Add), R(1), U8(3),
+ B(Star), R(0),
+ /* 87 S> */ B(Nop),
+ /* 97 S> */ B(Return),
]
constant pool: [
]
@@ -196,44 +197,40 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 70
+bytecode array length: 72
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(10),
- B(Star), R(0),
- B(LdaSmi), U8(20),
- B(Star), R(1),
- B(Ldar), R(0),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(Add), R(2),
- B(Star), R(3),
- B(Ldar), R(0),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Add), R(2),
- B(Star), R(4),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(Mul), R(4),
- B(Add), R(3),
- B(Star), R(2),
- B(LdaSmi), U8(3),
- B(Star), R(1),
- B(Add), R(2),
- B(Star), R(3),
- B(LdaSmi), U8(4),
- B(Star), R(0),
- B(Add), R(3),
- B(Star), R(2),
- B(LdaSmi), U8(5),
- B(Star), R(1),
- B(Add), R(2),
- B(Star), R(3),
- B(Ldar), R(1),
- B(Add), R(3),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(10),
+ B(Star), R(0),
+ /* 50 S> */ B(LdaSmi), U8(20),
+ B(Star), R(1),
+ /* 54 S> */ B(LdaSmi), U8(1),
+ B(Mov), R(0), R(2),
+ B(Star), R(0),
+ /* 68 E> */ B(Add), R(2), U8(1),
+ B(Star), R(3),
+ /* 76 E> */ B(AddSmi), U8(1), R(0), U8(2),
+ B(Star), R(4),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 88 E> */ B(Mul), R(4), U8(3),
+ B(Add), R(3), U8(4),
+ B(Star), R(2),
+ B(LdaSmi), U8(3),
+ B(Star), R(1),
+ /* 98 E> */ B(Add), R(2), U8(5),
+ B(Star), R(3),
+ B(LdaSmi), U8(4),
+ B(Star), R(0),
+ /* 108 E> */ B(Add), R(3), U8(6),
+ B(Star), R(2),
+ B(LdaSmi), U8(5),
+ B(Star), R(1),
+ /* 118 E> */ B(Add), R(2), U8(7),
+ B(Star), R(3),
+ B(Ldar), R(1),
+ /* 125 E> */ B(Add), R(3), U8(8),
+ /* 128 S> */ B(Return),
]
constant pool: [
]
@@ -247,30 +244,28 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 38
+bytecode array length: 41
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(17),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(Ldar), R(0),
- B(Add), R(1),
- B(Star), R(2),
- B(Ldar), R(0),
- B(ToNumber),
- B(Star), R(1),
- B(Inc),
- B(Star), R(0),
- B(Ldar), R(1),
- B(Add), R(2),
- B(Star), R(3),
- B(Ldar), R(0),
- B(ToNumber),
- B(Inc),
- B(Star), R(0),
- B(Add), R(3),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(17),
+ B(Star), R(0),
+ /* 46 S> */ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ /* 57 E> */ B(Add), R(1), U8(1),
+ B(Star), R(2),
+ B(Ldar), R(0),
+ B(ToNumber), R(1),
+ B(Inc), U8(2),
+ B(Star), R(0),
+ B(Ldar), R(1),
+ /* 63 E> */ B(Add), R(2), U8(3),
+ B(Star), R(3),
+ B(Ldar), R(0),
+ B(Inc), U8(4),
+ B(Star), R(0),
+ /* 72 E> */ B(Add), R(3), U8(5),
+ /* 76 S> */ B(Return),
]
constant pool: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicBlockToBoolean.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicBlockToBoolean.golden
index ee98e5a19d..422fad3283 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicBlockToBoolean.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicBlockToBoolean.golden
@@ -11,23 +11,21 @@ wrap: yes
snippet: "
var a = 1; if (a || a < 0) { return 1; }
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 21
+bytecode array length: 17
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(JumpIfToBooleanTrue), U8(9),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaZero),
- B(TestLessThan), R(1),
- B(JumpIfToBooleanFalse), U8(5),
- B(LdaSmi), U8(1),
- B(Return),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ /* 45 S> */ B(JumpIfToBooleanTrue), U8(7),
+ B(LdaZero),
+ /* 56 E> */ B(TestLessThan), R(0),
+ B(JumpIfFalse), U8(5),
+ /* 63 S> */ B(LdaSmi), U8(1),
+ /* 75 S> */ B(Return),
+ B(LdaUndefined),
+ /* 75 S> */ B(Return),
]
constant pool: [
]
@@ -38,23 +36,21 @@ handlers: [
snippet: "
var a = 1; if (a && a < 0) { return 1; }
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 21
+bytecode array length: 17
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(JumpIfToBooleanFalse), U8(9),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaZero),
- B(TestLessThan), R(1),
- B(JumpIfToBooleanFalse), U8(5),
- B(LdaSmi), U8(1),
- B(Return),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ /* 45 S> */ B(JumpIfToBooleanFalse), U8(10),
+ B(LdaZero),
+ /* 56 E> */ B(TestLessThan), R(0),
+ B(JumpIfFalse), U8(5),
+ /* 63 S> */ B(LdaSmi), U8(1),
+ /* 75 S> */ B(Return),
+ B(LdaUndefined),
+ /* 75 S> */ B(Return),
]
constant pool: [
]
@@ -65,25 +61,23 @@ handlers: [
snippet: "
var a = 1; a = (a || a < 0) ? 2 : 3;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 26
+bytecode array length: 22
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(JumpIfToBooleanTrue), U8(9),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaZero),
- B(TestLessThan), R(1),
- B(JumpIfToBooleanFalse), U8(6),
- B(LdaSmi), U8(2),
- B(Jump), U8(4),
- B(LdaSmi), U8(3),
- B(Star), R(0),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ /* 45 S> */ B(JumpIfToBooleanTrue), U8(7),
+ B(LdaZero),
+ /* 57 E> */ B(TestLessThan), R(0),
+ B(JumpIfFalse), U8(6),
+ B(LdaSmi), U8(2),
+ B(Jump), U8(4),
+ B(LdaSmi), U8(3),
+ B(Star), R(0),
+ B(LdaUndefined),
+ /* 71 S> */ B(Return),
]
constant pool: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
index 05ee657105..6dcd2692af 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
@@ -15,12 +15,13 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 5
+bytecode array length: 6
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 88 S> */ B(Nop),
+ /* 98 S> */ B(Return),
]
constant pool: [
]
@@ -37,12 +38,13 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 5
+bytecode array length: 6
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 77 S> */ B(Nop),
+ /* 87 S> */ B(Return),
]
constant pool: [
]
@@ -61,45 +63,35 @@ snippet: "
}
return y;
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 66
+bytecode array length: 49
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(Ldar), R(0),
- B(Star), R(2),
- B(LdaSmi), U8(10),
- B(TestLessThan), R(2),
- B(JumpIfFalse), U8(47),
- B(StackCheck),
- B(Ldar), R(1),
- B(Star), R(2),
- B(LdaSmi), U8(12),
- B(Mul), R(2),
- B(Star), R(1),
- B(Ldar), R(0),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Add), R(2),
- B(Star), R(0),
- B(Star), R(2),
- B(LdaSmi), U8(3),
- B(TestEqual), R(2),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(-39),
- B(Ldar), R(0),
- B(Star), R(2),
- B(LdaSmi), U8(4),
- B(TestEqual), R(2),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(4),
- B(Jump), U8(-53),
- B(Ldar), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 53 S> */ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ /* 65 S> */ B(LdaSmi), U8(10),
+ /* 65 E> */ B(TestLessThan), R(0),
+ B(JumpIfFalse), U8(34),
+ /* 56 E> */ B(StackCheck),
+ /* 75 S> */ B(LdaSmi), U8(12),
+ B(Mul), R(1), U8(1),
+ B(Star), R(1),
+ /* 89 S> */ B(AddSmi), U8(1), R(0), U8(2),
+ B(Star), R(0),
+ /* 102 S> */ B(LdaSmi), U8(3),
+ /* 108 E> */ B(TestEqual), R(0),
+ B(JumpIfFalse), U8(4),
+ /* 114 S> */ B(Jump), U8(10),
+ /* 126 S> */ B(LdaSmi), U8(4),
+ /* 132 E> */ B(TestEqual), R(0),
+ B(JumpIfFalse), U8(4),
+ /* 138 S> */ B(Jump), U8(4),
+ B(Jump), U8(-36),
+ /* 147 S> */ B(Ldar), R(1),
+ /* 157 S> */ B(Return),
]
constant pool: [
]
@@ -119,52 +111,39 @@ snippet: "
}
return i;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 79
+bytecode array length: 55
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(StackCheck),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaZero),
- B(TestLessThan), R(1),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(-10),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(3),
- B(TestEqual), R(1),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(50),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(4),
- B(TestEqual), R(1),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(38),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(10),
- B(TestEqual), R(1),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(-46),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(5),
- B(TestEqual), R(1),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(14),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(Add), R(1),
- B(Star), R(0),
- B(Jump), U8(-70),
- B(Ldar), R(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 45 E> */ B(StackCheck),
+ /* 62 S> */ B(LdaZero),
+ /* 68 E> */ B(TestLessThan), R(0),
+ B(JumpIfFalse), U8(4),
+ /* 73 S> */ B(Jump), U8(40),
+ /* 85 S> */ B(LdaSmi), U8(3),
+ /* 91 E> */ B(TestEqual), R(0),
+ B(JumpIfFalse), U8(4),
+ /* 97 S> */ B(Jump), U8(34),
+ /* 106 S> */ B(LdaSmi), U8(4),
+ /* 112 E> */ B(TestEqual), R(0),
+ B(JumpIfFalse), U8(4),
+ /* 118 S> */ B(Jump), U8(26),
+ /* 127 S> */ B(LdaSmi), U8(10),
+ /* 133 E> */ B(TestEqual), R(0),
+ B(JumpIfFalse), U8(4),
+ /* 140 S> */ B(Jump), U8(16),
+ /* 152 S> */ B(LdaSmi), U8(5),
+ /* 158 E> */ B(TestEqual), R(0),
+ B(JumpIfFalse), U8(4),
+ /* 164 S> */ B(Jump), U8(10),
+ /* 173 S> */ B(AddSmi), U8(1), R(0), U8(1),
+ B(Star), R(0),
+ B(Jump), U8(-46),
+ /* 186 S> */ B(Ldar), R(0),
+ /* 196 S> */ B(Return),
]
constant pool: [
]
@@ -184,41 +163,30 @@ snippet: "
}
return i;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 57
+bytecode array length: 39
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(StackCheck),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(3),
- B(TestLessThan), R(1),
- B(JumpIfFalse), U8(27),
- B(StackCheck),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(TestEqual), R(1),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(14),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(Add), R(1),
- B(Star), R(0),
- B(Jump), U8(-33),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(Add), R(1),
- B(Star), R(0),
- B(Jump), U8(4),
- B(Jump), U8(-48),
- B(Ldar), R(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 45 E> */ B(StackCheck),
+ /* 71 S> */ B(LdaSmi), U8(3),
+ /* 71 E> */ B(TestLessThan), R(0),
+ B(JumpIfFalse), U8(19),
+ /* 62 E> */ B(StackCheck),
+ /* 82 S> */ B(LdaSmi), U8(2),
+ /* 88 E> */ B(TestEqual), R(0),
+ B(JumpIfFalse), U8(4),
+ /* 94 S> */ B(Jump), U8(10),
+ /* 105 S> */ B(AddSmi), U8(1), R(0), U8(1),
+ B(Star), R(0),
+ B(Jump), U8(-21),
+ /* 122 S> */ B(AddSmi), U8(1), R(0), U8(2),
+ B(Star), R(0),
+ /* 135 S> */ B(Jump), U8(2),
+ /* 144 S> */ B(Ldar), R(0),
+ /* 154 S> */ B(Return),
]
constant pool: [
]
@@ -235,31 +203,26 @@ snippet: "
}
return y;
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 39
+bytecode array length: 32
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(10),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(Ldar), R(0),
- B(JumpIfToBooleanFalse), U8(25),
- B(StackCheck),
- B(Ldar), R(1),
- B(Star), R(2),
- B(LdaSmi), U8(12),
- B(Mul), R(2),
- B(Star), R(1),
- B(Ldar), R(0),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Sub), R(2),
- B(Star), R(0),
- B(Jump), U8(-25),
- B(Ldar), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(10),
+ B(Star), R(0),
+ /* 54 S> */ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ /* 64 S> */ B(Ldar), R(0),
+ B(JumpIfToBooleanFalse), U8(18),
+ /* 57 E> */ B(StackCheck),
+ /* 71 S> */ B(LdaSmi), U8(12),
+ B(Mul), R(1), U8(1),
+ B(Star), R(1),
+ /* 85 S> */ B(SubSmi), U8(1), R(0), U8(2),
+ B(Star), R(0),
+ B(Jump), U8(-18),
+ /* 98 S> */ B(Ldar), R(1),
+ /* 108 S> */ B(Return),
]
constant pool: [
]
@@ -277,45 +240,34 @@ snippet: "
} while (x < 10);
return y;
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 66
+bytecode array length: 47
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(StackCheck),
- B(Ldar), R(1),
- B(Star), R(2),
- B(LdaSmi), U8(10),
- B(Mul), R(2),
- B(Star), R(1),
- B(Ldar), R(0),
- B(Star), R(2),
- B(LdaSmi), U8(5),
- B(TestEqual), R(2),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(34),
- B(Ldar), R(0),
- B(Star), R(2),
- B(LdaSmi), U8(6),
- B(TestEqual), R(2),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(12),
- B(Ldar), R(0),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Add), R(2),
- B(Star), R(0),
- B(Ldar), R(0),
- B(Star), R(2),
- B(LdaSmi), U8(10),
- B(TestLessThan), R(2),
- B(JumpIfTrue), U8(-53),
- B(Ldar), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 53 S> */ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ /* 56 E> */ B(StackCheck),
+ /* 63 S> */ B(LdaSmi), U8(10),
+ B(Mul), R(1), U8(1),
+ B(Star), R(1),
+ /* 77 S> */ B(LdaSmi), U8(5),
+ /* 83 E> */ B(TestEqual), R(0),
+ B(JumpIfFalse), U8(4),
+ /* 89 S> */ B(Jump), U8(22),
+ /* 98 S> */ B(LdaSmi), U8(6),
+ /* 104 E> */ B(TestEqual), R(0),
+ B(JumpIfFalse), U8(4),
+ /* 110 S> */ B(Jump), U8(8),
+ /* 122 S> */ B(AddSmi), U8(1), R(0), U8(2),
+ B(Star), R(0),
+ /* 144 S> */ B(LdaSmi), U8(10),
+ /* 144 E> */ B(TestLessThan), R(0),
+ B(JumpIfTrue), U8(-34),
+ /* 151 S> */ B(Ldar), R(1),
+ /* 161 S> */ B(Return),
]
constant pool: [
]
@@ -332,30 +284,24 @@ snippet: "
} while (x);
return y;
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 37
+bytecode array length: 28
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(10),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(StackCheck),
- B(Ldar), R(1),
- B(Star), R(2),
- B(LdaSmi), U8(12),
- B(Mul), R(2),
- B(Star), R(1),
- B(Ldar), R(0),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Sub), R(2),
- B(Star), R(0),
- B(Ldar), R(0),
- B(JumpIfToBooleanTrue), U8(-23),
- B(Ldar), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(10),
+ B(Star), R(0),
+ /* 54 S> */ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ /* 57 E> */ B(StackCheck),
+ /* 64 S> */ B(LdaSmi), U8(12),
+ B(Mul), R(1), U8(1),
+ B(Star), R(1),
+ /* 78 S> */ B(SubSmi), U8(1), R(0), U8(2),
+ B(Star), R(0),
+ /* 98 S> */ B(JumpIfToBooleanTrue), U8(-14),
+ /* 102 S> */ B(Ldar), R(1),
+ /* 112 S> */ B(Return),
]
constant pool: [
]
@@ -373,39 +319,31 @@ snippet: "
} while (false);
return y;
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 54
+bytecode array length: 41
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(StackCheck),
- B(Ldar), R(1),
- B(Star), R(2),
- B(LdaSmi), U8(10),
- B(Mul), R(2),
- B(Star), R(1),
- B(Ldar), R(0),
- B(Star), R(2),
- B(LdaSmi), U8(5),
- B(TestEqual), R(2),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(22),
- B(Ldar), R(0),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Add), R(2),
- B(Star), R(0),
- B(Star), R(2),
- B(LdaSmi), U8(6),
- B(TestEqual), R(2),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(2),
- B(Ldar), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 53 S> */ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ /* 56 E> */ B(StackCheck),
+ /* 63 S> */ B(LdaSmi), U8(10),
+ B(Mul), R(1), U8(1),
+ B(Star), R(1),
+ /* 77 S> */ B(LdaSmi), U8(5),
+ /* 83 E> */ B(TestEqual), R(0),
+ B(JumpIfFalse), U8(4),
+ /* 89 S> */ B(Jump), U8(16),
+ /* 98 S> */ B(AddSmi), U8(1), R(0), U8(2),
+ B(Star), R(0),
+ /* 111 S> */ B(LdaSmi), U8(6),
+ /* 117 E> */ B(TestEqual), R(0),
+ B(JumpIfFalse), U8(4),
+ /* 123 S> */ B(Jump), U8(2),
+ /* 150 S> */ B(Ldar), R(1),
+ /* 160 S> */ B(Return),
]
constant pool: [
]
@@ -423,40 +361,32 @@ snippet: "
} while (true);
return y;
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 56
+bytecode array length: 43
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(StackCheck),
- B(Ldar), R(1),
- B(Star), R(2),
- B(LdaSmi), U8(10),
- B(Mul), R(2),
- B(Star), R(1),
- B(Ldar), R(0),
- B(Star), R(2),
- B(LdaSmi), U8(5),
- B(TestEqual), R(2),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(24),
- B(Ldar), R(0),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Add), R(2),
- B(Star), R(0),
- B(Star), R(2),
- B(LdaSmi), U8(6),
- B(TestEqual), R(2),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(-41),
- B(Jump), U8(-43),
- B(Ldar), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 53 S> */ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ /* 56 E> */ B(StackCheck),
+ /* 63 S> */ B(LdaSmi), U8(10),
+ B(Mul), R(1), U8(1),
+ B(Star), R(1),
+ /* 77 S> */ B(LdaSmi), U8(5),
+ /* 83 E> */ B(TestEqual), R(0),
+ B(JumpIfFalse), U8(4),
+ /* 89 S> */ B(Jump), U8(18),
+ /* 98 S> */ B(AddSmi), U8(1), R(0), U8(2),
+ B(Star), R(0),
+ /* 111 S> */ B(LdaSmi), U8(6),
+ /* 117 E> */ B(TestEqual), R(0),
+ B(JumpIfFalse), U8(4),
+ /* 123 S> */ B(Jump), U8(2),
+ B(Jump), U8(-30),
+ /* 149 S> */ B(Ldar), R(1),
+ /* 159 S> */ B(Return),
]
constant pool: [
]
@@ -472,34 +402,27 @@ snippet: "
x = x + 1;
}
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 43
+bytecode array length: 31
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(StackCheck),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(TestEqual), R(1),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(26),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(TestEqual), R(1),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(-23),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(Add), R(1),
- B(Star), R(0),
- B(Jump), U8(-35),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 45 E> */ B(StackCheck),
+ /* 58 S> */ B(LdaSmi), U8(1),
+ /* 64 E> */ B(TestEqual), R(0),
+ B(JumpIfFalse), U8(4),
+ /* 70 S> */ B(Jump), U8(18),
+ /* 79 S> */ B(LdaSmi), U8(2),
+ /* 85 E> */ B(TestEqual), R(0),
+ B(JumpIfFalse), U8(4),
+ /* 91 S> */ B(Jump), U8(8),
+ /* 103 S> */ B(AddSmi), U8(1), R(0), U8(1),
+ B(Star), R(0),
+ B(Jump), U8(-23),
+ B(LdaUndefined),
+ /* 116 S> */ B(Return),
]
constant pool: [
]
@@ -514,34 +437,27 @@ snippet: "
x = x + 1;
}
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 43
+bytecode array length: 31
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(StackCheck),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(TestEqual), R(1),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(26),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(TestEqual), R(1),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(-23),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(Add), R(1),
- B(Star), R(0),
- B(Jump), U8(-35),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 47 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 34 E> */ B(StackCheck),
+ /* 56 S> */ B(LdaSmi), U8(1),
+ /* 62 E> */ B(TestEqual), R(0),
+ B(JumpIfFalse), U8(4),
+ /* 68 S> */ B(Jump), U8(18),
+ /* 77 S> */ B(LdaSmi), U8(2),
+ /* 83 E> */ B(TestEqual), R(0),
+ B(JumpIfFalse), U8(4),
+ /* 89 S> */ B(Jump), U8(8),
+ /* 101 S> */ B(AddSmi), U8(1), R(0), U8(1),
+ B(Star), R(0),
+ B(Jump), U8(-23),
+ B(LdaUndefined),
+ /* 114 S> */ B(Return),
]
constant pool: [
]
@@ -556,34 +472,27 @@ snippet: "
if (x == 2) continue;
}
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 43
+bytecode array length: 31
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(StackCheck),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(TestEqual), R(1),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(26),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(TestEqual), R(1),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(2),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(Add), R(1),
- B(Star), R(0),
- B(Jump), U8(-35),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 45 E> */ B(StackCheck),
+ /* 68 S> */ B(LdaSmi), U8(1),
+ /* 74 E> */ B(TestEqual), R(0),
+ B(JumpIfFalse), U8(4),
+ /* 80 S> */ B(Jump), U8(18),
+ /* 89 S> */ B(LdaSmi), U8(2),
+ /* 95 E> */ B(TestEqual), R(0),
+ B(JumpIfFalse), U8(4),
+ /* 101 S> */ B(Jump), U8(2),
+ /* 55 S> */ B(AddSmi), U8(1), R(0), U8(1),
+ B(Star), R(0),
+ B(Jump), U8(-23),
+ B(LdaUndefined),
+ /* 113 S> */ B(Return),
]
constant pool: [
]
@@ -597,34 +506,27 @@ snippet: "
if (x == 2) continue;
}
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 43
+bytecode array length: 31
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(StackCheck),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(TestEqual), R(1),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(26),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(TestEqual), R(1),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(2),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(Add), R(1),
- B(Star), R(0),
- B(Jump), U8(-35),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 47 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 34 E> */ B(StackCheck),
+ /* 66 S> */ B(LdaSmi), U8(1),
+ /* 72 E> */ B(TestEqual), R(0),
+ B(JumpIfFalse), U8(4),
+ /* 78 S> */ B(Jump), U8(18),
+ /* 87 S> */ B(LdaSmi), U8(2),
+ /* 93 E> */ B(TestEqual), R(0),
+ B(JumpIfFalse), U8(4),
+ /* 99 S> */ B(Jump), U8(2),
+ /* 53 S> */ B(AddSmi), U8(1), R(0), U8(1),
+ B(Star), R(0),
+ B(Jump), U8(-23),
+ B(LdaUndefined),
+ /* 111 S> */ B(Return),
]
constant pool: [
]
@@ -639,35 +541,27 @@ snippet: "
continue;
}
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 44
+bytecode array length: 32
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(LdaZero),
- B(Star), R(1),
- B(Ldar), R(1),
- B(Star), R(2),
- B(LdaSmi), U8(100),
- B(TestLessThan), R(2),
- B(JumpIfFalse), U8(27),
- B(StackCheck),
- B(Ldar), R(0),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Add), R(2),
- B(Star), R(0),
- B(Jump), U8(2),
- B(Ldar), R(1),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Add), R(2),
- B(Star), R(1),
- B(Jump), U8(-33),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 58 S> */ B(LdaZero),
+ B(Star), R(1),
+ /* 63 S> */ B(LdaSmi), U8(100),
+ /* 63 E> */ B(TestLessThan), R(1),
+ B(JumpIfFalse), U8(19),
+ /* 45 E> */ B(StackCheck),
+ /* 85 S> */ B(AddSmi), U8(1), R(0), U8(2),
+ B(Star), R(0),
+ /* 98 S> */ B(Jump), U8(2),
+ /* 72 S> */ B(AddSmi), U8(1), R(1), U8(1),
+ B(Star), R(1),
+ B(Jump), U8(-21),
+ B(LdaUndefined),
+ /* 110 S> */ B(Return),
]
constant pool: [
]
@@ -682,30 +576,27 @@ snippet: "
}
return y;
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 35
+bytecode array length: 32
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(LdaSmi), U8(10),
- B(Star), R(1),
- B(Ldar), R(1),
- B(JumpIfToBooleanFalse), U8(21),
- B(StackCheck),
- B(Ldar), R(0),
- B(Star), R(2),
- B(LdaSmi), U8(12),
- B(Mul), R(2),
- B(Star), R(0),
- B(Ldar), R(1),
- B(ToNumber),
- B(Dec),
- B(Star), R(1),
- B(Jump), U8(-21),
- B(Ldar), R(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ /* 58 S> */ B(LdaSmi), U8(10),
+ B(Star), R(1),
+ /* 62 S> */ B(Ldar), R(1),
+ B(JumpIfToBooleanFalse), U8(18),
+ /* 45 E> */ B(StackCheck),
+ /* 74 S> */ B(LdaSmi), U8(12),
+ B(Mul), R(0), U8(2),
+ B(Star), R(0),
+ /* 67 S> */ B(Ldar), R(1),
+ B(Dec), U8(1),
+ B(Star), R(1),
+ B(Jump), U8(-18),
+ /* 88 S> */ B(Ldar), R(0),
+ /* 98 S> */ B(Return),
]
constant pool: [
]
@@ -724,13 +615,13 @@ frame size: 2
parameter count: 1
bytecode array length: 10
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(LdaZero),
- B(Star), R(1),
- B(Ldar), R(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 58 S> */ B(LdaZero),
+ B(Star), R(1),
+ /* 91 S> */ B(Ldar), R(0),
+ /* 101 S> */ B(Return),
]
constant pool: [
]
@@ -746,33 +637,28 @@ snippet: "
};
return x;
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 39
+bytecode array length: 33
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(LdaZero),
- B(Star), R(1),
- B(StackCheck),
- B(Ldar), R(0),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Add), R(2),
- B(Star), R(0),
- B(Star), R(2),
- B(LdaSmi), U8(20),
- B(TestEqual), R(2),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(10),
- B(Ldar), R(1),
- B(ToNumber),
- B(Inc),
- B(Star), R(1),
- B(Jump), U8(-27),
- B(Ldar), R(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 58 S> */ B(LdaZero),
+ B(Star), R(1),
+ /* 45 E> */ B(StackCheck),
+ /* 76 S> */ B(AddSmi), U8(1), R(0), U8(2),
+ B(Star), R(0),
+ /* 89 S> */ B(LdaSmi), U8(20),
+ /* 95 E> */ B(TestEqual), R(0),
+ B(JumpIfFalse), U8(4),
+ /* 102 S> */ B(Jump), U8(10),
+ /* 69 S> */ B(Ldar), R(1),
+ B(Inc), U8(1),
+ B(Star), R(1),
+ B(Jump), U8(-21),
+ /* 112 S> */ B(Ldar), R(0),
+ /* 122 S> */ B(Return),
]
constant pool: [
]
@@ -791,69 +677,58 @@ snippet: "
}
}
"
-frame size: 7
+frame size: 6
parameter count: 1
-bytecode array length: 120
+bytecode array length: 97
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(1),
- B(Ldar), R(1),
- B(JumpIfToBooleanFalse), U8(112),
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Star), R(4),
- B(Ldar), R(closure),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kPushBlockContext), R(4), U8(2),
- B(PushContext), R(3),
- B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4),
- B(CreateClosure), U8(1), U8(0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StaContextSlot), R(context), U8(4),
- B(Ldar), R(0),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(2),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
- B(Star), R(2),
- B(LdaContextSlot), R(context), U8(4),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(3),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
- B(JumpIfToBooleanFalse), U8(8),
- B(PopContext), R(3),
- B(PopContext), R(3),
- B(Jump), U8(-69),
- B(LdaContextSlot), R(context), U8(4),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(3),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
- B(ToNumber),
- B(Star), R(4),
- B(Inc),
- B(Star), R(5),
- B(LdaContextSlot), R(context), U8(4),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(3),
- B(Star), R(6),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(6), U8(1),
- B(Ldar), R(5),
- B(StaContextSlot), R(context), U8(4),
- B(PopContext), R(3),
- B(Jump), U8(-112),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaZero),
+ B(Star), R(1),
+ /* 52 S> */ B(Ldar), R(1),
+ B(JumpIfToBooleanFalse), U8(89),
+ /* 45 E> */ B(StackCheck),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(0),
+ B(PushContext), R(3),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateClosure), U8(1), U8(2),
+ B(Star), R(0),
+ /* 73 S> */ B(LdaSmi), U8(1),
+ /* 73 E> */ B(StaContextSlot), R(context), U8(4),
+ B(Mov), R(0), R(2),
+ /* 106 S> */ B(LdaContextSlot), R(context), U8(4),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(2),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
+ B(JumpIfToBooleanFalse), U8(8),
+ /* 113 S> */ B(PopContext), R(3),
+ B(PopContext), R(3),
+ B(Jump), U8(41),
+ /* 126 S> */ B(LdaContextSlot), R(context), U8(4),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(2),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
+ B(Inc), U8(1),
+ B(Star), R(4),
+ /* 127 E> */ B(LdaContextSlot), R(context), U8(4),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(2),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(5), U8(1),
+ B(Ldar), R(4),
+ B(StaContextSlot), R(context), U8(4),
+ B(PopContext), R(3),
+ B(Jump), U8(-89),
+ B(LdaUndefined),
+ /* 137 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
InstanceType::SHARED_FUNCTION_INFO_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
index 162297d1e2..bae9bd4da3 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
@@ -17,20 +17,18 @@ snippet: "
}
return x;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 17
+bytecode array length: 15
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(Add), R(1),
- B(Star), R(0),
- B(Jump), U8(2),
- B(Ldar), R(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 56 S> */ B(AddSmi), U8(1), R(0), U8(1),
+ B(Star), R(0),
+ /* 69 S> */ B(Jump), U8(2),
+ /* 97 S> */ B(Ldar), R(0),
+ /* 107 S> */ B(Return),
]
constant pool: [
]
@@ -52,52 +50,43 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 75
+bytecode array length: 64
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(LdaZero),
- B(Star), R(1),
- B(Ldar), R(1),
- B(Star), R(3),
- B(LdaSmi), U8(10),
- B(TestLessThan), R(3),
- B(JumpIfFalse), U8(57),
- B(StackCheck),
- B(LdaZero),
- B(Star), R(2),
- B(Ldar), R(2),
- B(Star), R(3),
- B(LdaSmi), U8(3),
- B(TestLessThan), R(3),
- B(JumpIfFalse), U8(35),
- B(StackCheck),
- B(Ldar), R(0),
- B(ToNumber),
- B(Inc),
- B(Star), R(0),
- B(Ldar), R(1),
- B(Star), R(3),
- B(Ldar), R(2),
- B(Add), R(3),
- B(Star), R(4),
- B(LdaSmi), U8(12),
- B(TestEqual), R(4),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(18),
- B(Ldar), R(2),
- B(ToNumber),
- B(Inc),
- B(Star), R(2),
- B(Jump), U8(-41),
- B(Ldar), R(1),
- B(ToNumber),
- B(Inc),
- B(Star), R(1),
- B(Jump), U8(-63),
- B(Ldar), R(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 44 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 71 S> */ B(LdaZero),
+ B(Star), R(1),
+ /* 76 S> */ B(LdaSmi), U8(10),
+ /* 76 E> */ B(TestLessThan), R(1),
+ B(JumpIfFalse), U8(50),
+ /* 58 E> */ B(StackCheck),
+ /* 106 S> */ B(LdaZero),
+ B(Star), R(2),
+ /* 111 S> */ B(LdaSmi), U8(3),
+ /* 111 E> */ B(TestLessThan), R(2),
+ B(JumpIfFalse), U8(32),
+ /* 93 E> */ B(StackCheck),
+ /* 129 S> */ B(Ldar), R(0),
+ B(Inc), U8(3),
+ B(Star), R(0),
+ /* 142 S> */ B(Ldar), R(2),
+ /* 150 E> */ B(Add), R(1), U8(4),
+ B(Star), R(4),
+ B(LdaSmi), U8(12),
+ /* 152 E> */ B(TestEqual), R(4),
+ B(JumpIfFalse), U8(4),
+ /* 161 S> */ B(Jump), U8(18),
+ /* 118 S> */ B(Ldar), R(2),
+ B(Inc), U8(2),
+ B(Star), R(2),
+ B(Jump), U8(-34),
+ /* 84 S> */ B(Ldar), R(1),
+ B(Inc), U8(1),
+ B(Star), R(1),
+ B(Jump), U8(-52),
+ /* 188 S> */ B(Ldar), R(0),
+ /* 200 S> */ B(Return),
]
constant pool: [
]
@@ -112,38 +101,30 @@ snippet: "
break outer;
}
"
-frame size: 5
+frame size: 3
parameter count: 1
-bytecode array length: 51
+bytecode array length: 32
bytecodes: [
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Star), R(3),
- B(Ldar), R(closure),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kPushBlockContext), R(3), U8(2),
- B(PushContext), R(2),
- B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4),
- B(CreateClosure), U8(1), U8(0),
- B(Star), R(0),
- B(LdaSmi), U8(10),
- B(StaContextSlot), R(context), U8(4),
- B(Ldar), R(0),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(2),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
- B(Star), R(1),
- B(Jump), U8(2),
- B(PopContext), R(2),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(0),
+ B(PushContext), R(2),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateClosure), U8(1), U8(2),
+ B(Star), R(0),
+ /* 53 S> */ B(LdaSmi), U8(10),
+ /* 53 E> */ B(StaContextSlot), R(context), U8(4),
+ B(Mov), R(0), R(1),
+ B(Ldar), R(0),
+ /* 88 S> */ B(Jump), U8(2),
+ B(PopContext), R(2),
+ B(LdaUndefined),
+ /* 103 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
InstanceType::SHARED_FUNCTION_INFO_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
]
handlers: [
]
@@ -163,69 +144,60 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 131
+bytecode array length: 107
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(2),
- B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4),
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(StaContextSlot), R(context), U8(4),
- B(LdaConstant), U8(0),
- B(Star), R(4),
- B(Ldar), R(closure),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kPushBlockContext), R(4), U8(2),
- B(PushContext), R(3),
- B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4),
- B(CreateClosure), U8(1), U8(0),
- B(Star), R(0),
- B(LdaSmi), U8(2),
- B(StaContextSlot), R(context), U8(4),
- B(Ldar), R(0),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(2),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
- B(Star), R(1),
- B(LdaContextSlot), R(context), U8(4),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(3),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
- B(JumpIfToBooleanFalse), U8(6),
- B(PopContext), R(3),
- B(Jump), U8(27),
- B(LdaSmi), U8(3),
- B(Star), R(4),
- B(LdaContextSlot), R(context), U8(4),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(3),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(5), U8(1),
- B(Ldar), R(4),
- B(StaContextSlot), R(context), U8(4),
- B(PopContext), R(3),
- B(LdaSmi), U8(4),
- B(Star), R(4),
- B(LdaContextSlot), R(context), U8(4),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(4),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(5), U8(1),
- B(Ldar), R(4),
- B(StaContextSlot), R(context), U8(4),
- B(LdaUndefined),
- B(Return),
+ B(CreateFunctionContext), U8(1),
+ B(PushContext), R(2),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ /* 42 E> */ B(StaContextSlot), R(context), U8(4),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(0),
+ B(PushContext), R(3),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateClosure), U8(1), U8(2),
+ B(Star), R(0),
+ /* 76 S> */ B(LdaSmi), U8(2),
+ /* 76 E> */ B(StaContextSlot), R(context), U8(4),
+ B(Mov), R(0), R(1),
+ /* 118 S> */ B(LdaContextSlot), R(context), U8(4),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(2),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
+ B(JumpIfToBooleanFalse), U8(6),
+ /* 125 S> */ B(PopContext), R(3),
+ B(Jump), U8(27),
+ /* 142 S> */ B(LdaSmi), U8(3),
+ B(Star), R(4),
+ /* 144 E> */ B(LdaContextSlot), R(context), U8(4),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(2),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(5), U8(1),
+ B(Ldar), R(4),
+ B(StaContextSlot), R(context), U8(4),
+ B(PopContext), R(3),
+ /* 155 S> */ B(LdaSmi), U8(4),
+ B(Star), R(4),
+ /* 157 E> */ B(LdaContextSlot), R(context), U8(4),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(3),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(5), U8(1),
+ B(Ldar), R(4),
+ B(StaContextSlot), R(context), U8(4),
+ B(LdaUndefined),
+ /* 162 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
InstanceType::SHARED_FUNCTION_INFO_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden
index 04bc3a9ded..45fb07ad08 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden
@@ -16,18 +16,15 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 15
+bytecode array length: 12
bytecodes: [
- B(StackCheck),
- B(LdaUndefined),
- B(Star), R(1),
- B(LdaGlobal), U8(0), U8(3),
- B(Star), R(0),
- B(Call), R(0), R(1), U8(1), U8(1),
- B(Return),
+ /* 27 E> */ B(StackCheck),
+ /* 32 S> */ B(LdrUndefined), R(1),
+ B(LdrGlobal), U8(3), R(0),
+ /* 39 E> */ B(Call), R(0), R(1), U8(1), U8(1),
+ /* 44 S> */ B(Return),
]
constant pool: [
- "t",
]
handlers: [
]
@@ -40,24 +37,21 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 27
+bytecode array length: 24
bytecodes: [
- B(StackCheck),
- B(LdaUndefined),
- B(Star), R(1),
- B(LdaGlobal), U8(0), U8(3),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(Star), R(2),
- B(LdaSmi), U8(2),
- B(Star), R(3),
- B(LdaSmi), U8(3),
- B(Star), R(4),
- B(Call), R(0), R(1), U8(4), U8(1),
- B(Return),
+ /* 34 E> */ B(StackCheck),
+ /* 39 S> */ B(LdrUndefined), R(1),
+ B(LdrGlobal), U8(3), R(0),
+ B(LdaSmi), U8(1),
+ B(Star), R(2),
+ B(LdaSmi), U8(2),
+ B(Star), R(3),
+ B(LdaSmi), U8(3),
+ B(Star), R(4),
+ /* 46 E> */ B(Call), R(0), R(1), U8(4), U8(1),
+ /* 58 S> */ B(Return),
]
constant pool: [
- "t",
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden
index 30b69d3eae..9438503ae4 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden
@@ -11,41 +11,43 @@ wrap: yes
snippet: "
g = function(){}; eval(''); return g();
"
-frame size: 9
+frame size: 10
parameter count: 1
-bytecode array length: 85
+bytecode array length: 86
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(0),
- B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4),
- B(CreateMappedArguments),
- B(StaContextSlot), R(context), U8(5),
- B(Ldar), R(new_target),
- B(StaContextSlot), R(context), U8(6),
- B(StackCheck),
- B(CreateClosure), U8(0), U8(0),
- B(StaLookupSlotSloppy), U8(1),
- B(LdaConstant), U8(2),
- B(Star), R(3),
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(3), U8(1), R(1),
- B(LdaConstant), U8(3),
- B(Star), R(3),
- B(Mov), R(1), R(4),
- B(Mov), R(3), R(5),
- B(Mov), R(closure), R(6),
- B(LdaZero),
- B(Star), R(7),
- B(LdaSmi), U8(30),
- B(Star), R(8),
- B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(5),
- B(Star), R(1),
- B(Call), R(1), R(2), U8(2), U8(0),
- B(LdaConstant), U8(1),
- B(Star), R(3),
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(3), U8(1), R(1),
- B(Call), R(1), R(2), U8(1), U8(3),
- B(Return),
+ B(CreateFunctionContext), U8(3),
+ B(PushContext), R(0),
+ B(Ldar), R(this),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateMappedArguments),
+ B(StaContextSlot), R(context), U8(5),
+ B(Ldar), R(new_target),
+ B(StaContextSlot), R(context), U8(6),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(CreateClosure), U8(0), U8(2),
+ /* 36 E> */ B(StaLookupSlotSloppy), U8(1),
+ /* 52 S> */ B(LdaConstant), U8(2),
+ B(Star), R(3),
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(3), U8(1), R(1),
+ B(LdaConstant), U8(3),
+ B(Star), R(3),
+ B(LdaZero),
+ B(Star), R(7),
+ B(LdaSmi), U8(30),
+ B(Star), R(8),
+ B(LdaSmi), U8(52),
+ B(Star), R(9),
+ B(Mov), R(1), R(4),
+ B(Mov), R(3), R(5),
+ B(Mov), R(closure), R(6),
+ B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
+ B(Star), R(1),
+ /* 52 E> */ B(Call), R(1), R(2), U8(2), U8(0),
+ /* 62 S> */ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(3), U8(1), R(1),
+ /* 69 E> */ B(Call), R(1), R(2), U8(1), U8(3),
+ /* 74 S> */ B(Return),
]
constant pool: [
InstanceType::SHARED_FUNCTION_INFO_TYPE,
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden
index 4952c73ddc..2ee9613b59 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden
@@ -18,14 +18,13 @@ frame size: 1
parameter count: 1
bytecode array length: 11
bytecodes: [
- B(StackCheck),
- B(LdaGlobal), U8(0), U8(2),
- B(Star), R(0),
- B(New), R(0), R(0), U8(0),
- B(Return),
+ /* 45 E> */ B(StackCheck),
+ /* 50 S> */ B(LdrGlobal), U8(3), R(0),
+ B(Ldar), R(0),
+ /* 57 E> */ B(New), R(0), R(0), U8(0),
+ /* 68 S> */ B(Return),
]
constant pool: [
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
]
handlers: [
]
@@ -38,19 +37,17 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 17
+bytecode array length: 15
bytecodes: [
- B(StackCheck),
- B(LdaGlobal), U8(0), U8(2),
- B(Star), R(0),
- B(LdaSmi), U8(3),
- B(Star), R(1),
- B(Ldar), R(0),
- B(New), R(0), R(1), U8(1),
- B(Return),
+ /* 58 E> */ B(StackCheck),
+ /* 63 S> */ B(LdrGlobal), U8(3), R(0),
+ B(LdaSmi), U8(3),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ /* 70 E> */ B(New), R(0), R(1), U8(1),
+ /* 82 S> */ B(Return),
]
constant pool: [
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
]
handlers: [
]
@@ -68,23 +65,21 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 25
+bytecode array length: 23
bytecodes: [
- B(StackCheck),
- B(LdaGlobal), U8(0), U8(2),
- B(Star), R(0),
- B(LdaSmi), U8(3),
- B(Star), R(1),
- B(LdaSmi), U8(4),
- B(Star), R(2),
- B(LdaSmi), U8(5),
- B(Star), R(3),
- B(Ldar), R(0),
- B(New), R(0), R(1), U8(3),
- B(Return),
+ /* 100 E> */ B(StackCheck),
+ /* 105 S> */ B(LdrGlobal), U8(3), R(0),
+ B(LdaSmi), U8(3),
+ B(Star), R(1),
+ B(LdaSmi), U8(4),
+ B(Star), R(2),
+ B(LdaSmi), U8(5),
+ B(Star), R(3),
+ B(Ldar), R(0),
+ /* 112 E> */ B(New), R(0), R(1), U8(3),
+ /* 130 S> */ B(Return),
]
constant pool: [
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden
index ec10e67c10..aa2a994507 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden
@@ -17,10 +17,10 @@ frame size: 0
parameter count: 1
bytecode array length: 8
bytecodes: [
- B(StackCheck),
- B(CallRuntime), U16(Runtime::kTheHole), R(0), U8(0),
- B(LdaUndefined),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 15 S> */ B(CallRuntime), U16(Runtime::kTheHole), R(0), U8(0),
+ B(LdaUndefined),
+ /* 26 S> */ B(Return),
]
constant pool: [
]
@@ -32,15 +32,13 @@ snippet: "
function f(a) { return %IsArray(a) }
f(undefined);
"
-frame size: 1
+frame size: 0
parameter count: 2
-bytecode array length: 11
+bytecode array length: 7
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(CallRuntime), U16(Runtime::kIsArray), R(0), U8(1),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 16 S> */ B(CallRuntime), U16(Runtime::kIsArray), R(arg0), U8(1),
+ /* 35 S> */ B(Return),
]
constant pool: [
]
@@ -56,13 +54,13 @@ frame size: 2
parameter count: 1
bytecode array length: 15
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(CallRuntime), U16(Runtime::kAdd), R(0), U8(2),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 15 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ B(CallRuntime), U16(Runtime::kAdd), R(0), U8(2),
+ /* 33 S> */ B(Return),
]
constant pool: [
]
@@ -76,15 +74,14 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 15
+bytecode array length: 14
bytecodes: [
- B(StackCheck),
- B(LdaUndefined),
- B(Star), R(0),
- B(CreateArrayLiteral), U8(0), U8(0), U8(3),
- B(Star), R(1),
- B(CallJSRuntime), U8(115), R(0), U8(2),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 15 S> */ B(LdrUndefined), R(0),
+ B(CreateArrayLiteral), U8(0), U8(0), U8(3),
+ B(Star), R(1),
+ B(CallJSRuntime), U8(134), R(0), U8(2),
+ /* 44 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
index 6869c5bc58..865a4c3000 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
@@ -22,37 +22,25 @@ snippet: "
test();
})();
"
-frame size: 7
+frame size: 6
parameter count: 1
-bytecode array length: 57
+bytecode array length: 36
bytecodes: [
- B(Ldar), R(closure),
- B(Star), R(0),
- B(StackCheck),
- B(Ldar), R(this),
- B(Star), R(3),
- B(Ldar), R(0),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(0),
- B(Star), R(6),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(6), U8(1),
- B(Star), R(6),
- B(LdaConstant), U8(1),
- B(KeyedLoadIC), R(6), U8(3),
- B(Star), R(4),
- B(LdaConstant), U8(2),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kLoadFromSuper), R(3), U8(3),
- B(Mov), R(3), R(2),
- B(Star), R(1),
- B(Call), R(1), R(2), U8(1), U8(1),
- B(Star), R(3),
- B(LdaSmi), U8(1),
- B(Add), R(3),
- B(Return),
+ B(Mov), R(closure), R(0),
+ /* 99 E> */ B(StackCheck),
+ /* 104 S> */ B(LdaConstant), U8(0),
+ /* 111 E> */ B(LdrKeyedProperty), R(closure), U8(3), R(4),
+ B(LdaConstant), U8(1),
+ B(Star), R(5),
+ B(Mov), R(this), R(3),
+ B(CallRuntime), U16(Runtime::kLoadFromSuper), R(3), U8(3),
+ B(Star), R(1),
+ /* 117 E> */ B(Call), R(1), R(this), U8(1), U8(1),
+ B(Star), R(3),
+ B(AddSmi), U8(1), R(3), U8(7),
+ /* 131 S> */ B(Return),
]
constant pool: [
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
InstanceType::SYMBOL_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
]
@@ -74,47 +62,29 @@ snippet: "
test();
})();
"
-frame size: 6
+frame size: 5
parameter count: 1
-bytecode array length: 80
+bytecode array length: 45
bytecodes: [
- B(Ldar), R(closure),
- B(Star), R(0),
- B(StackCheck),
- B(Ldar), R(this),
- B(Star), R(1),
- B(Ldar), R(0),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(0),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(5), U8(1),
- B(Star), R(5),
- B(LdaConstant), U8(1),
- B(KeyedLoadIC), R(5), U8(1),
- B(Star), R(2),
- B(LdaConstant), U8(2),
- B(Star), R(3),
- B(LdaSmi), U8(2),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kStoreToSuper_Strict), R(1), U8(4),
- B(Ldar), R(this),
- B(Star), R(1),
- B(Ldar), R(0),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(0),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
- B(Star), R(4),
- B(LdaConstant), U8(1),
- B(KeyedLoadIC), R(4), U8(3),
- B(Star), R(2),
- B(LdaConstant), U8(2),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kLoadFromSuper), R(1), U8(3),
- B(Return),
+ B(Mov), R(closure), R(0),
+ /* 125 E> */ B(StackCheck),
+ /* 130 S> */ B(LdaConstant), U8(0),
+ /* 130 E> */ B(LdrKeyedProperty), R(closure), U8(1), R(2),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(LdaSmi), U8(2),
+ B(Star), R(4),
+ B(Mov), R(this), R(1),
+ /* 138 E> */ B(CallRuntime), U16(Runtime::kStoreToSuper_Strict), R(1), U8(4),
+ /* 143 S> */ B(LdaConstant), U8(0),
+ /* 150 E> */ B(LdrKeyedProperty), R(closure), U8(3), R(2),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(Mov), R(this), R(1),
+ B(CallRuntime), U16(Runtime::kLoadFromSuper), R(1), U8(3),
+ /* 159 S> */ B(Return),
]
constant pool: [
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
InstanceType::SYMBOL_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
]
@@ -134,59 +104,45 @@ snippet: "
test = new B().constructor;
})();
"
-frame size: 5
+frame size: 4
parameter count: 1
-bytecode array length: 106
+bytecode array length: 79
bytecodes: [
- B(Ldar), R(closure),
- B(Star), R(1),
- B(Ldar), R(new_target),
- B(Star), R(0),
- B(StackCheck),
- B(Ldar), R(1),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(0),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::k_GetSuperConstructor), R(2), U8(1),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(3),
- B(Ldar), R(0),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(1),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
- B(New), R(2), R(3), U8(1),
- B(Star), R(2),
- B(Ldar), R(this),
- B(JumpIfNotHole), U8(4),
- B(Jump), U8(11),
- B(LdaConstant), U8(2),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
- B(Ldar), R(2),
- B(Star), R(this),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(2),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
- B(Star), R(2),
- B(LdaSmi), U8(2),
- B(StoreICStrict), R(2), U8(3), U8(4),
- B(Ldar), R(this),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(2),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
- B(Return),
+ B(Mov), R(closure), R(1),
+ B(Mov), R(new_target), R(0),
+ /* 113 E> */ B(StackCheck),
+ /* 118 S> */ B(CallRuntime), U16(Runtime::k_GetSuperConstructor), R(closure), U8(1),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(3),
+ B(Ldar), R(new_target),
+ /* 118 E> */ B(New), R(2), R(3), U8(1),
+ B(Star), R(2),
+ B(Ldar), R(this),
+ B(JumpIfNotHole), U8(4),
+ B(Jump), U8(11),
+ B(LdaConstant), U8(0),
+ B(Star), R(3),
+ /* 118 E> */ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
+ B(Mov), R(2), R(this),
+ /* 128 S> */ B(Ldar), R(this),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(0),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
+ B(Star), R(2),
+ B(LdaSmi), U8(2),
+ /* 136 E> */ B(StaNamedPropertyStrict), R(2), U8(1), U8(4),
+ B(Ldar), R(this),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(0),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
+ /* 141 S> */ B(Return),
]
constant pool: [
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
]
handlers: [
]
@@ -206,55 +162,41 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 102
+bytecode array length: 75
bytecodes: [
- B(Ldar), R(closure),
- B(Star), R(1),
- B(Ldar), R(new_target),
- B(Star), R(0),
- B(StackCheck),
- B(Ldar), R(1),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(0),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::k_GetSuperConstructor), R(2), U8(1),
- B(Star), R(2),
- B(Ldar), R(0),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(1),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
- B(New), R(2), R(0), U8(0),
- B(Star), R(2),
- B(Ldar), R(this),
- B(JumpIfNotHole), U8(4),
- B(Jump), U8(11),
- B(LdaConstant), U8(2),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
- B(Ldar), R(2),
- B(Star), R(this),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(2),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
- B(Star), R(2),
- B(LdaSmi), U8(2),
- B(StoreICStrict), R(2), U8(3), U8(4),
- B(Ldar), R(this),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(2),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
- B(Return),
+ B(Mov), R(closure), R(1),
+ B(Mov), R(new_target), R(0),
+ /* 112 E> */ B(StackCheck),
+ /* 117 S> */ B(CallRuntime), U16(Runtime::k_GetSuperConstructor), R(closure), U8(1),
+ B(Star), R(2),
+ B(Ldar), R(new_target),
+ /* 117 E> */ B(New), R(2), R(0), U8(0),
+ B(Star), R(2),
+ B(Ldar), R(this),
+ B(JumpIfNotHole), U8(4),
+ B(Jump), U8(11),
+ B(LdaConstant), U8(0),
+ B(Star), R(3),
+ /* 117 E> */ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
+ B(Mov), R(2), R(this),
+ /* 126 S> */ B(Ldar), R(this),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(0),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
+ B(Star), R(2),
+ B(LdaSmi), U8(2),
+ /* 134 E> */ B(StaNamedPropertyStrict), R(2), U8(1), U8(4),
+ B(Ldar), R(this),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(0),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
+ /* 139 S> */ B(Return),
]
constant pool: [
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
index db42c5018a..d7ebabc8e4 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
@@ -14,42 +14,42 @@ snippet: "
speak() { console.log(this.name + ' is speaking.'); }
}
"
-frame size: 9
+frame size: 10
parameter count: 1
-bytecode array length: 73
+bytecode array length: 74
bytecodes: [
- B(LdaTheHole),
- B(Star), R(1),
- B(StackCheck),
- B(LdaTheHole),
- B(Star), R(0),
- B(LdaTheHole),
- B(Star), R(2),
- B(CreateClosure), U8(0), U8(0),
- B(Star), R(3),
- B(LdaSmi), U8(34),
- B(Star), R(4),
- B(Wide), B(LdaSmi), U16(148),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kDefineClass), R(2), U8(4),
- B(Star), R(2),
- B(LoadIC), R(2), U8(1), U8(1),
- B(Star), R(3),
- B(Mov), R(3), R(4),
- B(LdaConstant), U8(2),
- B(Star), R(5),
- B(CreateClosure), U8(3), U8(0),
- B(Star), R(6),
- B(LdaSmi), U8(2),
- B(Star), R(7),
- B(LdaZero),
- B(Star), R(8),
- B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(4), U8(5),
- B(CallRuntime), U16(Runtime::kFinalizeClassDefinition), R(2), U8(2),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaUndefined),
- B(Return),
+ B(LdaTheHole),
+ B(Star), R(2),
+ /* 30 E> */ B(StackCheck),
+ B(LdaTheHole),
+ B(Star), R(0),
+ /* 34 S> */ B(LdaTheHole),
+ B(Star), R(3),
+ B(CreateClosure), U8(0), U8(2),
+ B(Star), R(4),
+ B(LdaSmi), U8(34),
+ B(Star), R(5),
+ B(Wide), B(LdaSmi), U16(148),
+ B(Star), R(6),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
+ B(Star), R(3),
+ B(LdrNamedProperty), R(3), U8(1), U8(1), R(4),
+ B(LdaConstant), U8(2),
+ B(ToName), R(6),
+ B(CreateClosure), U8(3), U8(2),
+ B(Star), R(7),
+ B(LdaSmi), U8(2),
+ B(Star), R(8),
+ B(LdaZero),
+ B(Star), R(9),
+ B(Mov), R(4), R(5),
+ B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(5), U8(5),
+ B(CallRuntime), U16(Runtime::kToFastProperties), R(3), U8(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(2),
+ B(LdaUndefined),
+ /* 149 S> */ B(Return),
]
constant pool: [
InstanceType::SHARED_FUNCTION_INFO_TYPE,
@@ -67,42 +67,42 @@ snippet: "
speak() { console.log(this.name + ' is speaking.'); }
}
"
-frame size: 9
+frame size: 10
parameter count: 1
-bytecode array length: 73
+bytecode array length: 74
bytecodes: [
- B(LdaTheHole),
- B(Star), R(1),
- B(StackCheck),
- B(LdaTheHole),
- B(Star), R(0),
- B(LdaTheHole),
- B(Star), R(2),
- B(CreateClosure), U8(0), U8(0),
- B(Star), R(3),
- B(LdaSmi), U8(34),
- B(Star), R(4),
- B(Wide), B(LdaSmi), U16(148),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kDefineClass), R(2), U8(4),
- B(Star), R(2),
- B(LoadIC), R(2), U8(1), U8(1),
- B(Star), R(3),
- B(Mov), R(3), R(4),
- B(LdaConstant), U8(2),
- B(Star), R(5),
- B(CreateClosure), U8(3), U8(0),
- B(Star), R(6),
- B(LdaSmi), U8(2),
- B(Star), R(7),
- B(LdaZero),
- B(Star), R(8),
- B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(4), U8(5),
- B(CallRuntime), U16(Runtime::kFinalizeClassDefinition), R(2), U8(2),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaUndefined),
- B(Return),
+ B(LdaTheHole),
+ B(Star), R(2),
+ /* 30 E> */ B(StackCheck),
+ B(LdaTheHole),
+ B(Star), R(0),
+ /* 34 S> */ B(LdaTheHole),
+ B(Star), R(3),
+ B(CreateClosure), U8(0), U8(2),
+ B(Star), R(4),
+ B(LdaSmi), U8(34),
+ B(Star), R(5),
+ B(Wide), B(LdaSmi), U16(148),
+ B(Star), R(6),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
+ B(Star), R(3),
+ B(LdrNamedProperty), R(3), U8(1), U8(1), R(4),
+ B(LdaConstant), U8(2),
+ B(ToName), R(6),
+ B(CreateClosure), U8(3), U8(2),
+ B(Star), R(7),
+ B(LdaSmi), U8(2),
+ B(Star), R(8),
+ B(LdaZero),
+ B(Star), R(9),
+ B(Mov), R(4), R(5),
+ B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(5), U8(5),
+ B(CallRuntime), U16(Runtime::kToFastProperties), R(3), U8(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(2),
+ B(LdaUndefined),
+ /* 149 S> */ B(Return),
]
constant pool: [
InstanceType::SHARED_FUNCTION_INFO_TYPE,
@@ -122,62 +122,60 @@ snippet: "
static [n1]() { return n1; }
}
"
-frame size: 10
+frame size: 11
parameter count: 1
-bytecode array length: 127
+bytecode array length: 123
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(2),
- B(LdaTheHole),
- B(Star), R(1),
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(StaContextSlot), R(context), U8(4),
- B(LdaConstant), U8(1),
- B(StaContextSlot), R(context), U8(5),
- B(LdaTheHole),
- B(Star), R(0),
- B(LdaTheHole),
- B(Star), R(3),
- B(CreateClosure), U8(2), U8(0),
- B(Star), R(4),
- B(LdaSmi), U8(62),
- B(Star), R(5),
- B(Wide), B(LdaSmi), U16(128),
- B(Star), R(6),
- B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
- B(Star), R(3),
- B(LoadIC), R(3), U8(3), U8(1),
- B(Star), R(4),
- B(Mov), R(4), R(5),
- B(LdaContextSlot), R(context), U8(4),
- B(ToName),
- B(Star), R(6),
- B(CreateClosure), U8(4), U8(0),
- B(Star), R(7),
- B(LdaSmi), U8(2),
- B(Star), R(8),
- B(LdaSmi), U8(1),
- B(Star), R(9),
- B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(5), U8(5),
- B(Mov), R(3), R(5),
- B(LdaContextSlot), R(context), U8(5),
- B(ToName),
- B(Star), R(6),
- B(LdaConstant), U8(3),
- B(TestEqualStrict), R(6),
- B(JumpIfFalse), U8(7),
- B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
- B(CreateClosure), U8(5), U8(0),
- B(Star), R(7),
- B(LdaSmi), U8(1),
- B(Star), R(9),
- B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(5), U8(5),
- B(CallRuntime), U16(Runtime::kFinalizeClassDefinition), R(3), U8(2),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaUndefined),
- B(Return),
+ B(CreateFunctionContext), U8(2),
+ B(PushContext), R(3),
+ B(LdaTheHole),
+ B(Star), R(2),
+ /* 30 E> */ B(StackCheck),
+ /* 43 S> */ B(LdaConstant), U8(0),
+ /* 43 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 57 S> */ B(LdaConstant), U8(1),
+ /* 57 E> */ B(StaContextSlot), R(context), U8(5),
+ B(LdaTheHole),
+ B(Star), R(0),
+ /* 62 S> */ B(LdaTheHole),
+ B(Star), R(4),
+ B(CreateClosure), U8(2), U8(2),
+ B(Star), R(5),
+ B(LdaSmi), U8(62),
+ B(Star), R(6),
+ B(Wide), B(LdaSmi), U16(128),
+ B(Star), R(7),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
+ B(Star), R(4),
+ B(LdrNamedProperty), R(4), U8(3), U8(1), R(5),
+ /* 75 E> */ B(LdaContextSlot), R(context), U8(4),
+ B(ToName), R(7),
+ B(CreateClosure), U8(4), U8(2),
+ B(Star), R(8),
+ B(LdaSmi), U8(2),
+ B(Star), R(9),
+ B(LdaSmi), U8(1),
+ B(Star), R(10),
+ B(Mov), R(5), R(6),
+ B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(6), U8(5),
+ /* 106 E> */ B(LdaContextSlot), R(context), U8(5),
+ B(ToName), R(7),
+ B(LdaConstant), U8(3),
+ B(TestEqualStrict), R(7),
+ B(Mov), R(4), R(6),
+ B(JumpIfToBooleanFalse), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
+ B(CreateClosure), U8(5), U8(2),
+ B(Star), R(8),
+ B(LdaSmi), U8(1),
+ B(Star), R(10),
+ B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(6), U8(5),
+ B(CallRuntime), U16(Runtime::kToFastProperties), R(4), U8(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(2),
+ B(LdaUndefined),
+ /* 129 S> */ B(Return),
]
constant pool: [
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
@@ -196,41 +194,41 @@ snippet: "
class C { constructor() { count++; }}
return new C();
"
-frame size: 10
+frame size: 8
parameter count: 1
-bytecode array length: 74
+bytecode array length: 72
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(2),
- B(LdaTheHole),
- B(Star), R(1),
- B(StackCheck),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(4),
- B(LdaTheHole),
- B(Star), R(0),
- B(LdaTheHole),
- B(Star), R(3),
- B(CreateClosure), U8(0), U8(0),
- B(Star), R(4),
- B(LdaSmi), U8(49),
- B(Star), R(5),
- B(LdaSmi), U8(86),
- B(Star), R(6),
- B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
- B(Star), R(3),
- B(LoadIC), R(3), U8(1), U8(1),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kFinalizeClassDefinition), R(3), U8(2),
- B(Star), R(0),
- B(Star), R(1),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(2),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
- B(Star), R(3),
- B(New), R(3), R(0), U8(0),
- B(Return),
+ B(CreateFunctionContext), U8(1),
+ B(PushContext), R(3),
+ B(LdaTheHole),
+ B(Star), R(2),
+ /* 30 E> */ B(StackCheck),
+ /* 46 S> */ B(LdaZero),
+ /* 46 E> */ B(StaContextSlot), R(context), U8(4),
+ B(LdaTheHole),
+ B(Star), R(0),
+ /* 49 S> */ B(LdaTheHole),
+ B(Star), R(4),
+ B(CreateClosure), U8(0), U8(2),
+ B(Star), R(5),
+ B(LdaSmi), U8(49),
+ B(Star), R(6),
+ B(LdaSmi), U8(86),
+ B(Star), R(7),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
+ B(Star), R(4),
+ B(LdrNamedProperty), R(4), U8(1), U8(1), R(5),
+ B(CallRuntime), U16(Runtime::kToFastProperties), R(4), U8(1),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(Star), R(2),
+ /* 87 S> */ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(2),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(5), U8(1),
+ B(Star), R(4),
+ /* 94 E> */ B(New), R(4), R(0), U8(0),
+ /* 103 S> */ B(Return),
]
constant pool: [
InstanceType::SHARED_FUNCTION_INFO_TYPE,
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden
index ccabedc490..873857a613 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden
@@ -13,17 +13,16 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 15
+bytecode array length: 16
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Add), R(1),
- B(Star), R(0),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ /* 45 S> */ B(AddSmi), U8(2), R(0), U8(1),
+ B(Mov), R(0), R(1),
+ B(Star), R(0),
+ B(LdaUndefined),
+ /* 53 S> */ B(Return),
]
constant pool: [
]
@@ -36,17 +35,17 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 15
+bytecode array length: 17
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Div), R(1),
- B(Star), R(0),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ /* 45 S> */ B(LdaSmi), U8(2),
+ B(Div), R(0), U8(1),
+ B(Mov), R(0), R(1),
+ B(Star), R(0),
+ B(LdaUndefined),
+ /* 53 S> */ B(Return),
]
constant pool: [
]
@@ -59,20 +58,17 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 27
+bytecode array length: 25
bytecodes: [
- B(StackCheck),
- B(CreateObjectLiteral), U8(0), U8(0), U8(5),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(1), U8(1),
- B(Star), R(2),
- B(LdaSmi), U8(2),
- B(Mul), R(2),
- B(StoreICSloppy), R(1), U8(1), U8(3),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(1),
+ B(Mov), R(1), R(0),
+ /* 54 S> */ B(LdrNamedProperty), R(0), U8(1), U8(1), R(2),
+ B(LdaSmi), U8(2),
+ B(Mul), R(2), U8(3),
+ /* 61 E> */ B(StaNamedPropertySloppy), R(0), U8(1), U8(4),
+ B(LdaUndefined),
+ /* 67 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -87,22 +83,19 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 30
+bytecode array length: 28
bytecodes: [
- B(StackCheck),
- B(CreateObjectLiteral), U8(0), U8(0), U8(5),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(Star), R(2),
- B(KeyedLoadIC), R(1), U8(1),
- B(Star), R(3),
- B(LdaSmi), U8(2),
- B(BitwiseXor), R(3),
- B(KeyedStoreICSloppy), R(1), R(2), U8(3),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(1),
+ B(Mov), R(1), R(0),
+ /* 52 S> */ B(LdaSmi), U8(1),
+ B(Star), R(2),
+ B(LdrKeyedProperty), R(0), U8(1), R(3),
+ B(LdaSmi), U8(2),
+ B(BitwiseXor), R(3), U8(3),
+ /* 57 E> */ B(StaKeyedPropertySloppy), R(0), R(2), U8(4),
+ B(LdaUndefined),
+ /* 63 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -116,21 +109,19 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 30
+bytecode array length: 26
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(0),
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(StaContextSlot), R(context), U8(4),
- B(CreateClosure), U8(0), U8(0),
- B(LdaContextSlot), R(context), U8(4),
- B(Star), R(1),
- B(LdaSmi), U8(24),
- B(BitwiseOr), R(1),
- B(StaContextSlot), R(context), U8(4),
- B(LdaUndefined),
- B(Return),
+ B(CreateFunctionContext), U8(1),
+ B(PushContext), R(0),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ /* 42 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 45 S> */ B(CreateClosure), U8(0), U8(2),
+ /* 75 S> */ B(LdrContextSlot), R(context), U8(4), R(1),
+ B(BitwiseOrSmi), U8(24), R(1), U8(1),
+ /* 77 E> */ B(StaContextSlot), R(context), U8(4),
+ B(LdaUndefined),
+ /* 84 S> */ B(Return),
]
constant pool: [
InstanceType::SHARED_FUNCTION_INFO_TYPE,
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Conditional.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Conditional.golden
index 17327a508d..ddaf989ca3 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Conditional.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Conditional.golden
@@ -13,15 +13,11 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 12
+bytecode array length: 4
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(JumpIfToBooleanFalse), U8(6),
- B(LdaSmi), U8(2),
- B(Jump), U8(4),
- B(LdaSmi), U8(3),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(LdaSmi), U8(2),
+ /* 52 S> */ B(Return),
]
constant pool: [
]
@@ -34,19 +30,58 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 20
+bytecode array length: 4
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(JumpIfToBooleanFalse), U8(14),
- B(LdaSmi), U8(2),
- B(JumpIfToBooleanFalse), U8(6),
- B(LdaSmi), U8(3),
- B(Jump), U8(4),
- B(LdaSmi), U8(4),
- B(Jump), U8(4),
- B(LdaSmi), U8(5),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(LdaSmi), U8(3),
+ /* 60 S> */ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ return 0 < 1 ? 2 : 3;
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 17
+bytecodes: [
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(LdaZero),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ /* 43 E> */ B(TestLessThan), R(0),
+ B(JumpIfFalse), U8(6),
+ B(LdaSmi), U8(2),
+ B(Jump), U8(4),
+ B(LdaSmi), U8(3),
+ /* 56 S> */ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ var x = 0;
+ return x ? 2 : 3;
+"
+frame size: 1
+parameter count: 1
+bytecode array length: 13
+bytecodes: [
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 45 S> */ B(JumpIfToBooleanFalse), U8(6),
+ B(LdaSmi), U8(2),
+ B(Jump), U8(4),
+ B(LdaSmi), U8(3),
+ /* 63 S> */ B(Return),
]
constant pool: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden
index d3fb484960..f2120cf876 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden
@@ -15,13 +15,13 @@ frame size: 1
parameter count: 1
bytecode array length: 10
bytecodes: [
- B(LdaTheHole),
- B(Star), R(0),
- B(StackCheck),
- B(LdaSmi), U8(10),
- B(Star), R(0),
- B(LdaUndefined),
- B(Return),
+ B(LdaTheHole),
+ B(Star), R(0),
+ /* 30 E> */ B(StackCheck),
+ /* 44 S> */ B(LdaSmi), U8(10),
+ B(Star), R(0),
+ B(LdaUndefined),
+ /* 48 S> */ B(Return),
]
constant pool: [
]
@@ -36,16 +36,16 @@ frame size: 2
parameter count: 1
bytecode array length: 20
bytecodes: [
- B(LdaTheHole),
- B(Star), R(0),
- B(StackCheck),
- B(LdaSmi), U8(10),
- B(Star), R(0),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(0),
- B(Star), R(1),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(1), U8(1),
- B(Return),
+ B(LdaTheHole),
+ B(Star), R(0),
+ /* 30 E> */ B(StackCheck),
+ /* 44 S> */ B(LdaSmi), U8(10),
+ B(Star), R(0),
+ /* 48 S> */ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(0),
+ B(Star), R(1),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(1), U8(1),
+ /* 58 S> */ B(Return),
]
constant pool: [
"x",
@@ -59,23 +59,22 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 32
+bytecode array length: 30
bytecodes: [
- B(LdaTheHole),
- B(Star), R(0),
- B(StackCheck),
- B(LdaSmi), U8(20),
- B(Star), R(1),
- B(Ldar), R(0),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(0),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
- B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
- B(Ldar), R(1),
- B(Star), R(0),
- B(LdaUndefined),
- B(Return),
+ B(LdaTheHole),
+ B(Star), R(0),
+ /* 30 E> */ B(StackCheck),
+ /* 48 S> */ B(LdaSmi), U8(20),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(0),
+ B(Star), R(2),
+ /* 48 E> */ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
+ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
+ B(Star), R(0),
+ B(LdaUndefined),
+ /* 55 S> */ B(Return),
]
constant pool: [
"x",
@@ -89,25 +88,23 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 36
+bytecode array length: 32
bytecodes: [
- B(LdaTheHole),
- B(Star), R(0),
- B(StackCheck),
- B(LdaSmi), U8(10),
- B(Star), R(0),
- B(LdaSmi), U8(20),
- B(Star), R(1),
- B(Ldar), R(0),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(0),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
- B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
- B(Ldar), R(1),
- B(Star), R(0),
- B(LdaUndefined),
- B(Return),
+ B(LdaTheHole),
+ B(Star), R(0),
+ /* 30 E> */ B(StackCheck),
+ /* 44 S> */ B(LdaSmi), U8(10),
+ B(Star), R(0),
+ /* 48 S> */ B(LdaSmi), U8(20),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(0),
+ B(Star), R(2),
+ /* 50 E> */ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
+ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
+ B(LdaUndefined),
+ /* 56 S> */ B(Return),
]
constant pool: [
"x",
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden
index 3b445c1ccb..8bc1afcf37 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden
@@ -13,19 +13,19 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 24
+bytecode array length: 21
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(1),
- B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4),
- B(CreateClosure), U8(0), U8(0),
- B(Star), R(0),
- B(StackCheck),
- B(LdaSmi), U8(10),
- B(StaContextSlot), R(context), U8(4),
- B(LdaUndefined),
- B(Return),
+ B(CreateFunctionContext), U8(1),
+ B(PushContext), R(1),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateClosure), U8(0), U8(2),
+ B(Star), R(0),
+ /* 30 E> */ B(StackCheck),
+ /* 44 S> */ B(LdaSmi), U8(10),
+ /* 44 E> */ B(StaContextSlot), R(context), U8(4),
+ B(LdaUndefined),
+ /* 74 S> */ B(Return),
]
constant pool: [
InstanceType::SHARED_FUNCTION_INFO_TYPE,
@@ -39,23 +39,23 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 37
+bytecode array length: 34
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(1),
- B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4),
- B(CreateClosure), U8(0), U8(0),
- B(Star), R(0),
- B(StackCheck),
- B(LdaSmi), U8(10),
- B(StaContextSlot), R(context), U8(4),
- B(LdaContextSlot), R(context), U8(4),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(1),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
- B(Return),
+ B(CreateFunctionContext), U8(1),
+ B(PushContext), R(1),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateClosure), U8(0), U8(2),
+ B(Star), R(0),
+ /* 30 E> */ B(StackCheck),
+ /* 44 S> */ B(LdaSmi), U8(10),
+ /* 44 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 74 S> */ B(LdaContextSlot), R(context), U8(4),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(1),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
+ /* 84 S> */ B(Return),
]
constant pool: [
InstanceType::SHARED_FUNCTION_INFO_TYPE,
@@ -70,28 +70,26 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 50
+bytecode array length: 42
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(1),
- B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4),
- B(CreateClosure), U8(0), U8(0),
- B(Star), R(0),
- B(StackCheck),
- B(LdaSmi), U8(20),
- B(Star), R(2),
- B(LdaContextSlot), R(context), U8(4),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(1),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
- B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
- B(Ldar), R(2),
- B(StaContextSlot), R(context), U8(4),
- B(StaContextSlot), R(context), U8(4),
- B(LdaUndefined),
- B(Return),
+ B(CreateFunctionContext), U8(1),
+ B(PushContext), R(1),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateClosure), U8(0), U8(2),
+ B(Star), R(0),
+ /* 30 E> */ B(StackCheck),
+ /* 47 S> */ B(LdaSmi), U8(20),
+ B(Star), R(2),
+ /* 47 E> */ B(LdaContextSlot), R(context), U8(4),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
+ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
+ /* 47 E> */ B(StaContextSlot), R(context), U8(4),
+ B(LdaUndefined),
+ /* 80 S> */ B(Return),
]
constant pool: [
InstanceType::SHARED_FUNCTION_INFO_TYPE,
@@ -106,29 +104,27 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 52
+bytecode array length: 44
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(1),
- B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4),
- B(CreateClosure), U8(0), U8(0),
- B(Star), R(0),
- B(StackCheck),
- B(LdaSmi), U8(10),
- B(StaContextSlot), R(context), U8(4),
- B(LdaSmi), U8(20),
- B(Star), R(2),
- B(LdaContextSlot), R(context), U8(4),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(1),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
- B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
- B(Ldar), R(2),
- B(StaContextSlot), R(context), U8(4),
- B(LdaUndefined),
- B(Return),
+ B(CreateFunctionContext), U8(1),
+ B(PushContext), R(1),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateClosure), U8(0), U8(2),
+ B(Star), R(0),
+ /* 30 E> */ B(StackCheck),
+ /* 44 S> */ B(LdaSmi), U8(10),
+ /* 44 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 48 S> */ B(LdaSmi), U8(20),
+ B(Star), R(2),
+ /* 50 E> */ B(LdaContextSlot), R(context), U8(4),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
+ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
+ B(LdaUndefined),
+ /* 82 S> */ B(Return),
]
constant pool: [
InstanceType::SHARED_FUNCTION_INFO_TYPE,
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden
index fb74600241..f07e5ce4d7 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden
@@ -15,15 +15,15 @@ snippet: "
"
frame size: 1
parameter count: 2
-bytecode array length: 17
+bytecode array length: 14
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(0),
- B(Ldar), R(arg0),
- B(StaContextSlot), R(context), U8(4),
- B(StackCheck),
- B(CreateClosure), U8(0), U8(0),
- B(Return),
+ B(CreateFunctionContext), U8(1),
+ B(PushContext), R(0),
+ B(Ldar), R(arg0),
+ B(StaContextSlot), R(context), U8(4),
+ /* 10 E> */ B(StackCheck),
+ /* 19 S> */ B(CreateClosure), U8(0), U8(2),
+ /* 52 S> */ B(Return),
]
constant pool: [
InstanceType::SHARED_FUNCTION_INFO_TYPE,
@@ -38,17 +38,17 @@ snippet: "
"
frame size: 2
parameter count: 2
-bytecode array length: 22
+bytecode array length: 19
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(1),
- B(Ldar), R(arg0),
- B(StaContextSlot), R(context), U8(4),
- B(StackCheck),
- B(CreateClosure), U8(0), U8(0),
- B(Star), R(0),
- B(LdaContextSlot), R(context), U8(4),
- B(Return),
+ B(CreateFunctionContext), U8(1),
+ B(PushContext), R(1),
+ B(Ldar), R(arg0),
+ B(StaContextSlot), R(context), U8(4),
+ /* 10 E> */ B(StackCheck),
+ /* 27 S> */ B(CreateClosure), U8(0), U8(2),
+ B(Star), R(0),
+ /* 53 S> */ B(LdaContextSlot), R(context), U8(4),
+ /* 66 S> */ B(Return),
]
constant pool: [
InstanceType::SHARED_FUNCTION_INFO_TYPE,
@@ -63,17 +63,17 @@ snippet: "
"
frame size: 1
parameter count: 5
-bytecode array length: 22
+bytecode array length: 19
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(0),
- B(Ldar), R(arg0),
- B(StaContextSlot), R(context), U8(5),
- B(Ldar), R(arg2),
- B(StaContextSlot), R(context), U8(4),
- B(StackCheck),
- B(CreateClosure), U8(0), U8(0),
- B(Return),
+ B(CreateFunctionContext), U8(2),
+ B(PushContext), R(0),
+ B(Ldar), R(arg0),
+ B(StaContextSlot), R(context), U8(5),
+ B(Ldar), R(arg2),
+ B(StaContextSlot), R(context), U8(4),
+ /* 10 E> */ B(StackCheck),
+ /* 29 S> */ B(CreateClosure), U8(0), U8(2),
+ /* 61 S> */ B(Return),
]
constant pool: [
InstanceType::SHARED_FUNCTION_INFO_TYPE,
@@ -88,15 +88,15 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 17
+bytecode array length: 14
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(0),
- B(StackCheck),
- B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4),
- B(CreateClosure), U8(0), U8(0),
- B(Return),
+ B(CreateFunctionContext), U8(1),
+ B(PushContext), R(0),
+ /* 10 E> */ B(StackCheck),
+ /* 26 S> */ B(Ldar), R(this),
+ /* 26 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 32 S> */ B(CreateClosure), U8(0), U8(2),
+ /* 65 S> */ B(Return),
]
constant pool: [
InstanceType::SHARED_FUNCTION_INFO_TYPE,
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden
index b86e22bb67..b3226e0d64 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden
@@ -13,13 +13,13 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 9
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(0),
- B(StackCheck),
- B(CreateClosure), U8(0), U8(0),
- B(Return),
+ B(CreateFunctionContext), U8(1),
+ B(PushContext), R(0),
+ /* 30 E> */ B(StackCheck),
+ /* 41 S> */ B(CreateClosure), U8(0), U8(2),
+ /* 71 S> */ B(Return),
]
constant pool: [
InstanceType::SHARED_FUNCTION_INFO_TYPE,
@@ -33,15 +33,15 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 17
+bytecode array length: 14
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(0),
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(StaContextSlot), R(context), U8(4),
- B(CreateClosure), U8(0), U8(0),
- B(Return),
+ B(CreateFunctionContext), U8(1),
+ B(PushContext), R(0),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ /* 42 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 45 S> */ B(CreateClosure), U8(0), U8(2),
+ /* 75 S> */ B(Return),
]
constant pool: [
InstanceType::SHARED_FUNCTION_INFO_TYPE,
@@ -55,17 +55,17 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 22
+bytecode array length: 19
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(0),
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(StaContextSlot), R(context), U8(4),
- B(LdaSmi), U8(2),
- B(StaContextSlot), R(context), U8(5),
- B(CreateClosure), U8(0), U8(0),
- B(Return),
+ B(CreateFunctionContext), U8(2),
+ B(PushContext), R(0),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ /* 42 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 53 S> */ B(LdaSmi), U8(2),
+ /* 53 E> */ B(StaContextSlot), R(context), U8(5),
+ /* 56 S> */ B(CreateClosure), U8(0), U8(2),
+ /* 92 S> */ B(Return),
]
constant pool: [
InstanceType::SHARED_FUNCTION_INFO_TYPE,
@@ -79,18 +79,17 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 25
+bytecode array length: 21
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(0),
- B(StackCheck),
- B(LdaUndefined),
- B(Star), R(2),
- B(CreateClosure), U8(0), U8(0),
- B(Star), R(1),
- B(Call), R(1), R(2), U8(1), U8(1),
- B(LdaContextSlot), R(context), U8(4),
- B(Return),
+ B(CreateFunctionContext), U8(1),
+ B(PushContext), R(0),
+ /* 30 E> */ B(StackCheck),
+ /* 41 S> */ B(LdrUndefined), R(2),
+ B(CreateClosure), U8(0), U8(2),
+ B(Star), R(1),
+ /* 64 E> */ B(Call), R(1), R(2), U8(1), U8(1),
+ /* 68 S> */ B(LdaContextSlot), R(context), U8(4),
+ /* 78 S> */ B(Return),
]
constant pool: [
InstanceType::SHARED_FUNCTION_INFO_TYPE,
@@ -104,30 +103,27 @@ snippet: "
let a = 1;
{ let b = 2; return function() { a + b; }; }
"
-frame size: 4
+frame size: 2
parameter count: 1
-bytecode array length: 47
+bytecode array length: 35
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(0),
- B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4),
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(StaContextSlot), R(context), U8(4),
- B(LdaConstant), U8(0),
- B(Star), R(2),
- B(Ldar), R(closure),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kPushBlockContext), R(2), U8(2),
- B(PushContext), R(1),
- B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4),
- B(LdaSmi), U8(2),
- B(StaContextSlot), R(context), U8(4),
- B(CreateClosure), U8(1), U8(0),
- B(PopContext), R(0),
- B(Return),
+ B(CreateFunctionContext), U8(1),
+ B(PushContext), R(0),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4),
+ /* 30 E> */ B(StackCheck),
+ /* 56 S> */ B(LdaSmi), U8(1),
+ /* 56 E> */ B(StaContextSlot), R(context), U8(4),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(0),
+ B(PushContext), R(1),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4),
+ /* 69 S> */ B(LdaSmi), U8(2),
+ /* 69 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 72 S> */ B(CreateClosure), U8(1), U8(2),
+ B(PopContext), R(0),
+ /* 104 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -394,527 +390,524 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 1046
+bytecode array length: 1040
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(0),
- B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4),
- B(CreateUnmappedArguments),
- B(StaContextSlot), R(context), U8(5),
- B(Ldar), R(new_target),
- B(StaContextSlot), R(context), U8(6),
- B(StackCheck),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(7),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(8),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(9),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(10),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(11),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(12),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(13),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(14),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(15),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(16),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(17),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(18),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(19),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(20),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(21),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(22),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(23),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(24),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(25),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(26),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(27),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(28),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(29),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(30),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(31),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(32),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(33),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(34),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(35),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(36),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(37),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(38),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(39),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(40),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(41),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(42),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(43),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(44),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(45),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(46),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(47),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(48),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(49),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(50),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(51),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(52),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(53),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(54),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(55),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(56),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(57),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(58),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(59),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(60),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(61),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(62),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(63),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(64),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(65),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(66),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(67),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(68),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(69),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(70),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(71),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(72),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(73),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(74),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(75),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(76),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(77),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(78),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(79),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(80),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(81),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(82),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(83),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(84),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(85),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(86),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(87),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(88),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(89),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(90),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(91),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(92),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(93),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(94),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(95),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(96),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(97),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(98),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(99),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(100),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(101),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(102),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(103),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(104),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(105),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(106),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(107),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(108),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(109),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(110),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(111),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(112),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(113),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(114),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(115),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(116),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(117),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(118),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(119),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(120),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(121),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(122),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(123),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(124),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(125),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(126),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(127),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(128),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(129),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(130),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(131),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(132),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(133),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(134),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(135),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(136),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(137),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(138),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(139),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(140),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(141),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(142),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(143),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(144),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(145),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(146),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(147),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(148),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(149),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(150),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(151),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(152),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(153),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(154),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(155),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(156),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(157),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(158),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(159),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(160),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(161),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(162),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(163),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(164),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(165),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(166),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(167),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(168),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(169),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(170),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(171),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(172),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(173),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(174),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(175),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(176),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(177),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(178),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(179),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(180),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(181),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(182),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(183),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(184),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(185),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(186),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(187),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(188),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(189),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(190),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(191),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(192),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(193),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(194),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(195),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(196),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(197),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(198),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(199),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(200),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(201),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(202),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(203),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(204),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(205),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(206),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(207),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(208),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(209),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(210),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(211),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(212),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(213),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(214),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(215),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(216),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(217),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(218),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(219),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(220),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(221),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(222),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(223),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(224),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(225),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(226),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(227),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(228),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(229),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(230),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(231),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(232),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(233),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(234),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(235),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(236),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(237),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(238),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(239),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(240),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(241),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(242),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(243),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(244),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(245),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(246),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(247),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(248),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(249),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(250),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(251),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(252),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(253),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(254),
- B(LdaZero),
- B(StaContextSlot), R(context), U8(255),
- B(LdaUndefined),
- B(Star), R(2),
- B(LdaGlobal), U8(0), U8(1),
- B(Star), R(1),
- B(Call), R(1), R(2), U8(1), U8(0),
- B(LdaSmi), U8(100),
- B(Wide), B(StaContextSlot), R16(context), U16(256),
- B(Wide), B(LdaContextSlot), R16(context), U16(256),
- B(Return),
+ B(CreateFunctionContext), U8(253),
+ B(PushContext), R(0),
+ B(Ldar), R(this),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateUnmappedArguments),
+ B(StaContextSlot), R(context), U8(5),
+ B(Ldar), R(new_target),
+ B(StaContextSlot), R(context), U8(6),
+ /* 30 E> */ B(StackCheck),
+ /* 57 S> */ B(LdaZero),
+ /* 57 E> */ B(StaContextSlot), R(context), U8(7),
+ /* 69 S> */ B(LdaZero),
+ /* 69 E> */ B(StaContextSlot), R(context), U8(8),
+ /* 81 S> */ B(LdaZero),
+ /* 81 E> */ B(StaContextSlot), R(context), U8(9),
+ /* 93 S> */ B(LdaZero),
+ /* 93 E> */ B(StaContextSlot), R(context), U8(10),
+ /* 105 S> */ B(LdaZero),
+ /* 105 E> */ B(StaContextSlot), R(context), U8(11),
+ /* 117 S> */ B(LdaZero),
+ /* 117 E> */ B(StaContextSlot), R(context), U8(12),
+ /* 129 S> */ B(LdaZero),
+ /* 129 E> */ B(StaContextSlot), R(context), U8(13),
+ /* 141 S> */ B(LdaZero),
+ /* 141 E> */ B(StaContextSlot), R(context), U8(14),
+ /* 153 S> */ B(LdaZero),
+ /* 153 E> */ B(StaContextSlot), R(context), U8(15),
+ /* 165 S> */ B(LdaZero),
+ /* 165 E> */ B(StaContextSlot), R(context), U8(16),
+ /* 178 S> */ B(LdaZero),
+ /* 178 E> */ B(StaContextSlot), R(context), U8(17),
+ /* 191 S> */ B(LdaZero),
+ /* 191 E> */ B(StaContextSlot), R(context), U8(18),
+ /* 204 S> */ B(LdaZero),
+ /* 204 E> */ B(StaContextSlot), R(context), U8(19),
+ /* 217 S> */ B(LdaZero),
+ /* 217 E> */ B(StaContextSlot), R(context), U8(20),
+ /* 230 S> */ B(LdaZero),
+ /* 230 E> */ B(StaContextSlot), R(context), U8(21),
+ /* 243 S> */ B(LdaZero),
+ /* 243 E> */ B(StaContextSlot), R(context), U8(22),
+ /* 256 S> */ B(LdaZero),
+ /* 256 E> */ B(StaContextSlot), R(context), U8(23),
+ /* 269 S> */ B(LdaZero),
+ /* 269 E> */ B(StaContextSlot), R(context), U8(24),
+ /* 282 S> */ B(LdaZero),
+ /* 282 E> */ B(StaContextSlot), R(context), U8(25),
+ /* 295 S> */ B(LdaZero),
+ /* 295 E> */ B(StaContextSlot), R(context), U8(26),
+ /* 308 S> */ B(LdaZero),
+ /* 308 E> */ B(StaContextSlot), R(context), U8(27),
+ /* 321 S> */ B(LdaZero),
+ /* 321 E> */ B(StaContextSlot), R(context), U8(28),
+ /* 334 S> */ B(LdaZero),
+ /* 334 E> */ B(StaContextSlot), R(context), U8(29),
+ /* 347 S> */ B(LdaZero),
+ /* 347 E> */ B(StaContextSlot), R(context), U8(30),
+ /* 360 S> */ B(LdaZero),
+ /* 360 E> */ B(StaContextSlot), R(context), U8(31),
+ /* 373 S> */ B(LdaZero),
+ /* 373 E> */ B(StaContextSlot), R(context), U8(32),
+ /* 386 S> */ B(LdaZero),
+ /* 386 E> */ B(StaContextSlot), R(context), U8(33),
+ /* 399 S> */ B(LdaZero),
+ /* 399 E> */ B(StaContextSlot), R(context), U8(34),
+ /* 412 S> */ B(LdaZero),
+ /* 412 E> */ B(StaContextSlot), R(context), U8(35),
+ /* 425 S> */ B(LdaZero),
+ /* 425 E> */ B(StaContextSlot), R(context), U8(36),
+ /* 438 S> */ B(LdaZero),
+ /* 438 E> */ B(StaContextSlot), R(context), U8(37),
+ /* 451 S> */ B(LdaZero),
+ /* 451 E> */ B(StaContextSlot), R(context), U8(38),
+ /* 464 S> */ B(LdaZero),
+ /* 464 E> */ B(StaContextSlot), R(context), U8(39),
+ /* 477 S> */ B(LdaZero),
+ /* 477 E> */ B(StaContextSlot), R(context), U8(40),
+ /* 490 S> */ B(LdaZero),
+ /* 490 E> */ B(StaContextSlot), R(context), U8(41),
+ /* 503 S> */ B(LdaZero),
+ /* 503 E> */ B(StaContextSlot), R(context), U8(42),
+ /* 516 S> */ B(LdaZero),
+ /* 516 E> */ B(StaContextSlot), R(context), U8(43),
+ /* 529 S> */ B(LdaZero),
+ /* 529 E> */ B(StaContextSlot), R(context), U8(44),
+ /* 542 S> */ B(LdaZero),
+ /* 542 E> */ B(StaContextSlot), R(context), U8(45),
+ /* 555 S> */ B(LdaZero),
+ /* 555 E> */ B(StaContextSlot), R(context), U8(46),
+ /* 568 S> */ B(LdaZero),
+ /* 568 E> */ B(StaContextSlot), R(context), U8(47),
+ /* 581 S> */ B(LdaZero),
+ /* 581 E> */ B(StaContextSlot), R(context), U8(48),
+ /* 594 S> */ B(LdaZero),
+ /* 594 E> */ B(StaContextSlot), R(context), U8(49),
+ /* 607 S> */ B(LdaZero),
+ /* 607 E> */ B(StaContextSlot), R(context), U8(50),
+ /* 620 S> */ B(LdaZero),
+ /* 620 E> */ B(StaContextSlot), R(context), U8(51),
+ /* 633 S> */ B(LdaZero),
+ /* 633 E> */ B(StaContextSlot), R(context), U8(52),
+ /* 646 S> */ B(LdaZero),
+ /* 646 E> */ B(StaContextSlot), R(context), U8(53),
+ /* 659 S> */ B(LdaZero),
+ /* 659 E> */ B(StaContextSlot), R(context), U8(54),
+ /* 672 S> */ B(LdaZero),
+ /* 672 E> */ B(StaContextSlot), R(context), U8(55),
+ /* 685 S> */ B(LdaZero),
+ /* 685 E> */ B(StaContextSlot), R(context), U8(56),
+ /* 698 S> */ B(LdaZero),
+ /* 698 E> */ B(StaContextSlot), R(context), U8(57),
+ /* 711 S> */ B(LdaZero),
+ /* 711 E> */ B(StaContextSlot), R(context), U8(58),
+ /* 724 S> */ B(LdaZero),
+ /* 724 E> */ B(StaContextSlot), R(context), U8(59),
+ /* 737 S> */ B(LdaZero),
+ /* 737 E> */ B(StaContextSlot), R(context), U8(60),
+ /* 750 S> */ B(LdaZero),
+ /* 750 E> */ B(StaContextSlot), R(context), U8(61),
+ /* 763 S> */ B(LdaZero),
+ /* 763 E> */ B(StaContextSlot), R(context), U8(62),
+ /* 776 S> */ B(LdaZero),
+ /* 776 E> */ B(StaContextSlot), R(context), U8(63),
+ /* 789 S> */ B(LdaZero),
+ /* 789 E> */ B(StaContextSlot), R(context), U8(64),
+ /* 802 S> */ B(LdaZero),
+ /* 802 E> */ B(StaContextSlot), R(context), U8(65),
+ /* 815 S> */ B(LdaZero),
+ /* 815 E> */ B(StaContextSlot), R(context), U8(66),
+ /* 828 S> */ B(LdaZero),
+ /* 828 E> */ B(StaContextSlot), R(context), U8(67),
+ /* 841 S> */ B(LdaZero),
+ /* 841 E> */ B(StaContextSlot), R(context), U8(68),
+ /* 854 S> */ B(LdaZero),
+ /* 854 E> */ B(StaContextSlot), R(context), U8(69),
+ /* 867 S> */ B(LdaZero),
+ /* 867 E> */ B(StaContextSlot), R(context), U8(70),
+ /* 880 S> */ B(LdaZero),
+ /* 880 E> */ B(StaContextSlot), R(context), U8(71),
+ /* 893 S> */ B(LdaZero),
+ /* 893 E> */ B(StaContextSlot), R(context), U8(72),
+ /* 906 S> */ B(LdaZero),
+ /* 906 E> */ B(StaContextSlot), R(context), U8(73),
+ /* 919 S> */ B(LdaZero),
+ /* 919 E> */ B(StaContextSlot), R(context), U8(74),
+ /* 932 S> */ B(LdaZero),
+ /* 932 E> */ B(StaContextSlot), R(context), U8(75),
+ /* 945 S> */ B(LdaZero),
+ /* 945 E> */ B(StaContextSlot), R(context), U8(76),
+ /* 958 S> */ B(LdaZero),
+ /* 958 E> */ B(StaContextSlot), R(context), U8(77),
+ /* 971 S> */ B(LdaZero),
+ /* 971 E> */ B(StaContextSlot), R(context), U8(78),
+ /* 984 S> */ B(LdaZero),
+ /* 984 E> */ B(StaContextSlot), R(context), U8(79),
+ /* 997 S> */ B(LdaZero),
+ /* 997 E> */ B(StaContextSlot), R(context), U8(80),
+ /* 1010 S> */ B(LdaZero),
+ /* 1010 E> */ B(StaContextSlot), R(context), U8(81),
+ /* 1023 S> */ B(LdaZero),
+ /* 1023 E> */ B(StaContextSlot), R(context), U8(82),
+ /* 1036 S> */ B(LdaZero),
+ /* 1036 E> */ B(StaContextSlot), R(context), U8(83),
+ /* 1049 S> */ B(LdaZero),
+ /* 1049 E> */ B(StaContextSlot), R(context), U8(84),
+ /* 1062 S> */ B(LdaZero),
+ /* 1062 E> */ B(StaContextSlot), R(context), U8(85),
+ /* 1075 S> */ B(LdaZero),
+ /* 1075 E> */ B(StaContextSlot), R(context), U8(86),
+ /* 1088 S> */ B(LdaZero),
+ /* 1088 E> */ B(StaContextSlot), R(context), U8(87),
+ /* 1101 S> */ B(LdaZero),
+ /* 1101 E> */ B(StaContextSlot), R(context), U8(88),
+ /* 1114 S> */ B(LdaZero),
+ /* 1114 E> */ B(StaContextSlot), R(context), U8(89),
+ /* 1127 S> */ B(LdaZero),
+ /* 1127 E> */ B(StaContextSlot), R(context), U8(90),
+ /* 1140 S> */ B(LdaZero),
+ /* 1140 E> */ B(StaContextSlot), R(context), U8(91),
+ /* 1153 S> */ B(LdaZero),
+ /* 1153 E> */ B(StaContextSlot), R(context), U8(92),
+ /* 1166 S> */ B(LdaZero),
+ /* 1166 E> */ B(StaContextSlot), R(context), U8(93),
+ /* 1179 S> */ B(LdaZero),
+ /* 1179 E> */ B(StaContextSlot), R(context), U8(94),
+ /* 1192 S> */ B(LdaZero),
+ /* 1192 E> */ B(StaContextSlot), R(context), U8(95),
+ /* 1205 S> */ B(LdaZero),
+ /* 1205 E> */ B(StaContextSlot), R(context), U8(96),
+ /* 1218 S> */ B(LdaZero),
+ /* 1218 E> */ B(StaContextSlot), R(context), U8(97),
+ /* 1231 S> */ B(LdaZero),
+ /* 1231 E> */ B(StaContextSlot), R(context), U8(98),
+ /* 1244 S> */ B(LdaZero),
+ /* 1244 E> */ B(StaContextSlot), R(context), U8(99),
+ /* 1257 S> */ B(LdaZero),
+ /* 1257 E> */ B(StaContextSlot), R(context), U8(100),
+ /* 1270 S> */ B(LdaZero),
+ /* 1270 E> */ B(StaContextSlot), R(context), U8(101),
+ /* 1283 S> */ B(LdaZero),
+ /* 1283 E> */ B(StaContextSlot), R(context), U8(102),
+ /* 1296 S> */ B(LdaZero),
+ /* 1296 E> */ B(StaContextSlot), R(context), U8(103),
+ /* 1309 S> */ B(LdaZero),
+ /* 1309 E> */ B(StaContextSlot), R(context), U8(104),
+ /* 1322 S> */ B(LdaZero),
+ /* 1322 E> */ B(StaContextSlot), R(context), U8(105),
+ /* 1335 S> */ B(LdaZero),
+ /* 1335 E> */ B(StaContextSlot), R(context), U8(106),
+ /* 1349 S> */ B(LdaZero),
+ /* 1349 E> */ B(StaContextSlot), R(context), U8(107),
+ /* 1363 S> */ B(LdaZero),
+ /* 1363 E> */ B(StaContextSlot), R(context), U8(108),
+ /* 1377 S> */ B(LdaZero),
+ /* 1377 E> */ B(StaContextSlot), R(context), U8(109),
+ /* 1391 S> */ B(LdaZero),
+ /* 1391 E> */ B(StaContextSlot), R(context), U8(110),
+ /* 1405 S> */ B(LdaZero),
+ /* 1405 E> */ B(StaContextSlot), R(context), U8(111),
+ /* 1419 S> */ B(LdaZero),
+ /* 1419 E> */ B(StaContextSlot), R(context), U8(112),
+ /* 1433 S> */ B(LdaZero),
+ /* 1433 E> */ B(StaContextSlot), R(context), U8(113),
+ /* 1447 S> */ B(LdaZero),
+ /* 1447 E> */ B(StaContextSlot), R(context), U8(114),
+ /* 1461 S> */ B(LdaZero),
+ /* 1461 E> */ B(StaContextSlot), R(context), U8(115),
+ /* 1475 S> */ B(LdaZero),
+ /* 1475 E> */ B(StaContextSlot), R(context), U8(116),
+ /* 1489 S> */ B(LdaZero),
+ /* 1489 E> */ B(StaContextSlot), R(context), U8(117),
+ /* 1503 S> */ B(LdaZero),
+ /* 1503 E> */ B(StaContextSlot), R(context), U8(118),
+ /* 1517 S> */ B(LdaZero),
+ /* 1517 E> */ B(StaContextSlot), R(context), U8(119),
+ /* 1531 S> */ B(LdaZero),
+ /* 1531 E> */ B(StaContextSlot), R(context), U8(120),
+ /* 1545 S> */ B(LdaZero),
+ /* 1545 E> */ B(StaContextSlot), R(context), U8(121),
+ /* 1559 S> */ B(LdaZero),
+ /* 1559 E> */ B(StaContextSlot), R(context), U8(122),
+ /* 1573 S> */ B(LdaZero),
+ /* 1573 E> */ B(StaContextSlot), R(context), U8(123),
+ /* 1587 S> */ B(LdaZero),
+ /* 1587 E> */ B(StaContextSlot), R(context), U8(124),
+ /* 1601 S> */ B(LdaZero),
+ /* 1601 E> */ B(StaContextSlot), R(context), U8(125),
+ /* 1615 S> */ B(LdaZero),
+ /* 1615 E> */ B(StaContextSlot), R(context), U8(126),
+ /* 1629 S> */ B(LdaZero),
+ /* 1629 E> */ B(StaContextSlot), R(context), U8(127),
+ /* 1643 S> */ B(LdaZero),
+ /* 1643 E> */ B(StaContextSlot), R(context), U8(128),
+ /* 1657 S> */ B(LdaZero),
+ /* 1657 E> */ B(StaContextSlot), R(context), U8(129),
+ /* 1671 S> */ B(LdaZero),
+ /* 1671 E> */ B(StaContextSlot), R(context), U8(130),
+ /* 1685 S> */ B(LdaZero),
+ /* 1685 E> */ B(StaContextSlot), R(context), U8(131),
+ /* 1699 S> */ B(LdaZero),
+ /* 1699 E> */ B(StaContextSlot), R(context), U8(132),
+ /* 1713 S> */ B(LdaZero),
+ /* 1713 E> */ B(StaContextSlot), R(context), U8(133),
+ /* 1727 S> */ B(LdaZero),
+ /* 1727 E> */ B(StaContextSlot), R(context), U8(134),
+ /* 1741 S> */ B(LdaZero),
+ /* 1741 E> */ B(StaContextSlot), R(context), U8(135),
+ /* 1755 S> */ B(LdaZero),
+ /* 1755 E> */ B(StaContextSlot), R(context), U8(136),
+ /* 1769 S> */ B(LdaZero),
+ /* 1769 E> */ B(StaContextSlot), R(context), U8(137),
+ /* 1783 S> */ B(LdaZero),
+ /* 1783 E> */ B(StaContextSlot), R(context), U8(138),
+ /* 1797 S> */ B(LdaZero),
+ /* 1797 E> */ B(StaContextSlot), R(context), U8(139),
+ /* 1811 S> */ B(LdaZero),
+ /* 1811 E> */ B(StaContextSlot), R(context), U8(140),
+ /* 1825 S> */ B(LdaZero),
+ /* 1825 E> */ B(StaContextSlot), R(context), U8(141),
+ /* 1839 S> */ B(LdaZero),
+ /* 1839 E> */ B(StaContextSlot), R(context), U8(142),
+ /* 1853 S> */ B(LdaZero),
+ /* 1853 E> */ B(StaContextSlot), R(context), U8(143),
+ /* 1867 S> */ B(LdaZero),
+ /* 1867 E> */ B(StaContextSlot), R(context), U8(144),
+ /* 1881 S> */ B(LdaZero),
+ /* 1881 E> */ B(StaContextSlot), R(context), U8(145),
+ /* 1895 S> */ B(LdaZero),
+ /* 1895 E> */ B(StaContextSlot), R(context), U8(146),
+ /* 1909 S> */ B(LdaZero),
+ /* 1909 E> */ B(StaContextSlot), R(context), U8(147),
+ /* 1923 S> */ B(LdaZero),
+ /* 1923 E> */ B(StaContextSlot), R(context), U8(148),
+ /* 1937 S> */ B(LdaZero),
+ /* 1937 E> */ B(StaContextSlot), R(context), U8(149),
+ /* 1951 S> */ B(LdaZero),
+ /* 1951 E> */ B(StaContextSlot), R(context), U8(150),
+ /* 1965 S> */ B(LdaZero),
+ /* 1965 E> */ B(StaContextSlot), R(context), U8(151),
+ /* 1979 S> */ B(LdaZero),
+ /* 1979 E> */ B(StaContextSlot), R(context), U8(152),
+ /* 1993 S> */ B(LdaZero),
+ /* 1993 E> */ B(StaContextSlot), R(context), U8(153),
+ /* 2007 S> */ B(LdaZero),
+ /* 2007 E> */ B(StaContextSlot), R(context), U8(154),
+ /* 2021 S> */ B(LdaZero),
+ /* 2021 E> */ B(StaContextSlot), R(context), U8(155),
+ /* 2035 S> */ B(LdaZero),
+ /* 2035 E> */ B(StaContextSlot), R(context), U8(156),
+ /* 2049 S> */ B(LdaZero),
+ /* 2049 E> */ B(StaContextSlot), R(context), U8(157),
+ /* 2063 S> */ B(LdaZero),
+ /* 2063 E> */ B(StaContextSlot), R(context), U8(158),
+ /* 2077 S> */ B(LdaZero),
+ /* 2077 E> */ B(StaContextSlot), R(context), U8(159),
+ /* 2091 S> */ B(LdaZero),
+ /* 2091 E> */ B(StaContextSlot), R(context), U8(160),
+ /* 2105 S> */ B(LdaZero),
+ /* 2105 E> */ B(StaContextSlot), R(context), U8(161),
+ /* 2119 S> */ B(LdaZero),
+ /* 2119 E> */ B(StaContextSlot), R(context), U8(162),
+ /* 2133 S> */ B(LdaZero),
+ /* 2133 E> */ B(StaContextSlot), R(context), U8(163),
+ /* 2147 S> */ B(LdaZero),
+ /* 2147 E> */ B(StaContextSlot), R(context), U8(164),
+ /* 2161 S> */ B(LdaZero),
+ /* 2161 E> */ B(StaContextSlot), R(context), U8(165),
+ /* 2175 S> */ B(LdaZero),
+ /* 2175 E> */ B(StaContextSlot), R(context), U8(166),
+ /* 2189 S> */ B(LdaZero),
+ /* 2189 E> */ B(StaContextSlot), R(context), U8(167),
+ /* 2203 S> */ B(LdaZero),
+ /* 2203 E> */ B(StaContextSlot), R(context), U8(168),
+ /* 2217 S> */ B(LdaZero),
+ /* 2217 E> */ B(StaContextSlot), R(context), U8(169),
+ /* 2231 S> */ B(LdaZero),
+ /* 2231 E> */ B(StaContextSlot), R(context), U8(170),
+ /* 2245 S> */ B(LdaZero),
+ /* 2245 E> */ B(StaContextSlot), R(context), U8(171),
+ /* 2259 S> */ B(LdaZero),
+ /* 2259 E> */ B(StaContextSlot), R(context), U8(172),
+ /* 2273 S> */ B(LdaZero),
+ /* 2273 E> */ B(StaContextSlot), R(context), U8(173),
+ /* 2287 S> */ B(LdaZero),
+ /* 2287 E> */ B(StaContextSlot), R(context), U8(174),
+ /* 2301 S> */ B(LdaZero),
+ /* 2301 E> */ B(StaContextSlot), R(context), U8(175),
+ /* 2315 S> */ B(LdaZero),
+ /* 2315 E> */ B(StaContextSlot), R(context), U8(176),
+ /* 2329 S> */ B(LdaZero),
+ /* 2329 E> */ B(StaContextSlot), R(context), U8(177),
+ /* 2343 S> */ B(LdaZero),
+ /* 2343 E> */ B(StaContextSlot), R(context), U8(178),
+ /* 2357 S> */ B(LdaZero),
+ /* 2357 E> */ B(StaContextSlot), R(context), U8(179),
+ /* 2371 S> */ B(LdaZero),
+ /* 2371 E> */ B(StaContextSlot), R(context), U8(180),
+ /* 2385 S> */ B(LdaZero),
+ /* 2385 E> */ B(StaContextSlot), R(context), U8(181),
+ /* 2399 S> */ B(LdaZero),
+ /* 2399 E> */ B(StaContextSlot), R(context), U8(182),
+ /* 2413 S> */ B(LdaZero),
+ /* 2413 E> */ B(StaContextSlot), R(context), U8(183),
+ /* 2427 S> */ B(LdaZero),
+ /* 2427 E> */ B(StaContextSlot), R(context), U8(184),
+ /* 2441 S> */ B(LdaZero),
+ /* 2441 E> */ B(StaContextSlot), R(context), U8(185),
+ /* 2455 S> */ B(LdaZero),
+ /* 2455 E> */ B(StaContextSlot), R(context), U8(186),
+ /* 2469 S> */ B(LdaZero),
+ /* 2469 E> */ B(StaContextSlot), R(context), U8(187),
+ /* 2483 S> */ B(LdaZero),
+ /* 2483 E> */ B(StaContextSlot), R(context), U8(188),
+ /* 2497 S> */ B(LdaZero),
+ /* 2497 E> */ B(StaContextSlot), R(context), U8(189),
+ /* 2511 S> */ B(LdaZero),
+ /* 2511 E> */ B(StaContextSlot), R(context), U8(190),
+ /* 2525 S> */ B(LdaZero),
+ /* 2525 E> */ B(StaContextSlot), R(context), U8(191),
+ /* 2539 S> */ B(LdaZero),
+ /* 2539 E> */ B(StaContextSlot), R(context), U8(192),
+ /* 2553 S> */ B(LdaZero),
+ /* 2553 E> */ B(StaContextSlot), R(context), U8(193),
+ /* 2567 S> */ B(LdaZero),
+ /* 2567 E> */ B(StaContextSlot), R(context), U8(194),
+ /* 2581 S> */ B(LdaZero),
+ /* 2581 E> */ B(StaContextSlot), R(context), U8(195),
+ /* 2595 S> */ B(LdaZero),
+ /* 2595 E> */ B(StaContextSlot), R(context), U8(196),
+ /* 2609 S> */ B(LdaZero),
+ /* 2609 E> */ B(StaContextSlot), R(context), U8(197),
+ /* 2623 S> */ B(LdaZero),
+ /* 2623 E> */ B(StaContextSlot), R(context), U8(198),
+ /* 2637 S> */ B(LdaZero),
+ /* 2637 E> */ B(StaContextSlot), R(context), U8(199),
+ /* 2651 S> */ B(LdaZero),
+ /* 2651 E> */ B(StaContextSlot), R(context), U8(200),
+ /* 2665 S> */ B(LdaZero),
+ /* 2665 E> */ B(StaContextSlot), R(context), U8(201),
+ /* 2679 S> */ B(LdaZero),
+ /* 2679 E> */ B(StaContextSlot), R(context), U8(202),
+ /* 2693 S> */ B(LdaZero),
+ /* 2693 E> */ B(StaContextSlot), R(context), U8(203),
+ /* 2707 S> */ B(LdaZero),
+ /* 2707 E> */ B(StaContextSlot), R(context), U8(204),
+ /* 2721 S> */ B(LdaZero),
+ /* 2721 E> */ B(StaContextSlot), R(context), U8(205),
+ /* 2735 S> */ B(LdaZero),
+ /* 2735 E> */ B(StaContextSlot), R(context), U8(206),
+ /* 2749 S> */ B(LdaZero),
+ /* 2749 E> */ B(StaContextSlot), R(context), U8(207),
+ /* 2763 S> */ B(LdaZero),
+ /* 2763 E> */ B(StaContextSlot), R(context), U8(208),
+ /* 2777 S> */ B(LdaZero),
+ /* 2777 E> */ B(StaContextSlot), R(context), U8(209),
+ /* 2791 S> */ B(LdaZero),
+ /* 2791 E> */ B(StaContextSlot), R(context), U8(210),
+ /* 2805 S> */ B(LdaZero),
+ /* 2805 E> */ B(StaContextSlot), R(context), U8(211),
+ /* 2819 S> */ B(LdaZero),
+ /* 2819 E> */ B(StaContextSlot), R(context), U8(212),
+ /* 2833 S> */ B(LdaZero),
+ /* 2833 E> */ B(StaContextSlot), R(context), U8(213),
+ /* 2847 S> */ B(LdaZero),
+ /* 2847 E> */ B(StaContextSlot), R(context), U8(214),
+ /* 2861 S> */ B(LdaZero),
+ /* 2861 E> */ B(StaContextSlot), R(context), U8(215),
+ /* 2875 S> */ B(LdaZero),
+ /* 2875 E> */ B(StaContextSlot), R(context), U8(216),
+ /* 2889 S> */ B(LdaZero),
+ /* 2889 E> */ B(StaContextSlot), R(context), U8(217),
+ /* 2903 S> */ B(LdaZero),
+ /* 2903 E> */ B(StaContextSlot), R(context), U8(218),
+ /* 2917 S> */ B(LdaZero),
+ /* 2917 E> */ B(StaContextSlot), R(context), U8(219),
+ /* 2931 S> */ B(LdaZero),
+ /* 2931 E> */ B(StaContextSlot), R(context), U8(220),
+ /* 2945 S> */ B(LdaZero),
+ /* 2945 E> */ B(StaContextSlot), R(context), U8(221),
+ /* 2959 S> */ B(LdaZero),
+ /* 2959 E> */ B(StaContextSlot), R(context), U8(222),
+ /* 2973 S> */ B(LdaZero),
+ /* 2973 E> */ B(StaContextSlot), R(context), U8(223),
+ /* 2987 S> */ B(LdaZero),
+ /* 2987 E> */ B(StaContextSlot), R(context), U8(224),
+ /* 3001 S> */ B(LdaZero),
+ /* 3001 E> */ B(StaContextSlot), R(context), U8(225),
+ /* 3015 S> */ B(LdaZero),
+ /* 3015 E> */ B(StaContextSlot), R(context), U8(226),
+ /* 3029 S> */ B(LdaZero),
+ /* 3029 E> */ B(StaContextSlot), R(context), U8(227),
+ /* 3043 S> */ B(LdaZero),
+ /* 3043 E> */ B(StaContextSlot), R(context), U8(228),
+ /* 3057 S> */ B(LdaZero),
+ /* 3057 E> */ B(StaContextSlot), R(context), U8(229),
+ /* 3071 S> */ B(LdaZero),
+ /* 3071 E> */ B(StaContextSlot), R(context), U8(230),
+ /* 3085 S> */ B(LdaZero),
+ /* 3085 E> */ B(StaContextSlot), R(context), U8(231),
+ /* 3099 S> */ B(LdaZero),
+ /* 3099 E> */ B(StaContextSlot), R(context), U8(232),
+ /* 3113 S> */ B(LdaZero),
+ /* 3113 E> */ B(StaContextSlot), R(context), U8(233),
+ /* 3127 S> */ B(LdaZero),
+ /* 3127 E> */ B(StaContextSlot), R(context), U8(234),
+ /* 3141 S> */ B(LdaZero),
+ /* 3141 E> */ B(StaContextSlot), R(context), U8(235),
+ /* 3155 S> */ B(LdaZero),
+ /* 3155 E> */ B(StaContextSlot), R(context), U8(236),
+ /* 3169 S> */ B(LdaZero),
+ /* 3169 E> */ B(StaContextSlot), R(context), U8(237),
+ /* 3183 S> */ B(LdaZero),
+ /* 3183 E> */ B(StaContextSlot), R(context), U8(238),
+ /* 3197 S> */ B(LdaZero),
+ /* 3197 E> */ B(StaContextSlot), R(context), U8(239),
+ /* 3211 S> */ B(LdaZero),
+ /* 3211 E> */ B(StaContextSlot), R(context), U8(240),
+ /* 3225 S> */ B(LdaZero),
+ /* 3225 E> */ B(StaContextSlot), R(context), U8(241),
+ /* 3239 S> */ B(LdaZero),
+ /* 3239 E> */ B(StaContextSlot), R(context), U8(242),
+ /* 3253 S> */ B(LdaZero),
+ /* 3253 E> */ B(StaContextSlot), R(context), U8(243),
+ /* 3267 S> */ B(LdaZero),
+ /* 3267 E> */ B(StaContextSlot), R(context), U8(244),
+ /* 3281 S> */ B(LdaZero),
+ /* 3281 E> */ B(StaContextSlot), R(context), U8(245),
+ /* 3295 S> */ B(LdaZero),
+ /* 3295 E> */ B(StaContextSlot), R(context), U8(246),
+ /* 3309 S> */ B(LdaZero),
+ /* 3309 E> */ B(StaContextSlot), R(context), U8(247),
+ /* 3323 S> */ B(LdaZero),
+ /* 3323 E> */ B(StaContextSlot), R(context), U8(248),
+ /* 3337 S> */ B(LdaZero),
+ /* 3337 E> */ B(StaContextSlot), R(context), U8(249),
+ /* 3351 S> */ B(LdaZero),
+ /* 3351 E> */ B(StaContextSlot), R(context), U8(250),
+ /* 3365 S> */ B(LdaZero),
+ /* 3365 E> */ B(StaContextSlot), R(context), U8(251),
+ /* 3379 S> */ B(LdaZero),
+ /* 3379 E> */ B(StaContextSlot), R(context), U8(252),
+ /* 3393 S> */ B(LdaZero),
+ /* 3393 E> */ B(StaContextSlot), R(context), U8(253),
+ /* 3407 S> */ B(LdaZero),
+ /* 3407 E> */ B(StaContextSlot), R(context), U8(254),
+ /* 3421 S> */ B(LdaZero),
+ /* 3421 E> */ B(StaContextSlot), R(context), U8(255),
+ /* 3424 S> */ B(LdrUndefined), R(2),
+ /* 3424 E> */ B(LdrGlobal), U8(1), R(1),
+ /* 3424 E> */ B(Call), R(1), R(2), U8(1), U8(0),
+ /* 3440 S> */ B(LdaSmi), U8(100),
+ /* 3440 E> */ B(Wide), B(StaContextSlot), R16(context), U16(256),
+ /* 3445 S> */ B(Wide), B(LdaContextSlot), R16(context), U16(256),
+ /* 3454 S> */ B(Return),
]
constant pool: [
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
index 885778992a..aef4e1456c 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
@@ -15,13 +15,12 @@ frame size: 1
parameter count: 1
bytecode array length: 10
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(ToNumber),
- B(Inc),
- B(Star), R(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ /* 45 S> */ B(Inc), U8(1),
+ B(Star), R(0),
+ /* 57 S> */ B(Return),
]
constant pool: [
]
@@ -36,15 +35,14 @@ frame size: 2
parameter count: 1
bytecode array length: 14
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(ToNumber),
- B(Star), R(1),
- B(Inc),
- B(Star), R(0),
- B(Ldar), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ /* 45 S> */ B(ToNumber), R(1),
+ B(Inc), U8(1),
+ B(Star), R(0),
+ B(Ldar), R(1),
+ /* 57 S> */ B(Return),
]
constant pool: [
]
@@ -59,13 +57,12 @@ frame size: 1
parameter count: 1
bytecode array length: 10
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(ToNumber),
- B(Dec),
- B(Star), R(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ /* 45 S> */ B(Dec), U8(1),
+ B(Star), R(0),
+ /* 57 S> */ B(Return),
]
constant pool: [
]
@@ -80,15 +77,14 @@ frame size: 2
parameter count: 1
bytecode array length: 14
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(ToNumber),
- B(Star), R(1),
- B(Dec),
- B(Star), R(0),
- B(Ldar), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ /* 45 S> */ B(ToNumber), R(1),
+ B(Dec), U8(1),
+ B(Star), R(0),
+ B(Ldar), R(1),
+ /* 57 S> */ B(Return),
]
constant pool: [
]
@@ -101,20 +97,17 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 26
+bytecode array length: 24
bytecodes: [
- B(StackCheck),
- B(CreateObjectLiteral), U8(0), U8(0), U8(5),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(1), U8(1),
- B(ToNumber),
- B(Star), R(2),
- B(Inc),
- B(StoreICSloppy), R(1), U8(1), U8(3),
- B(Ldar), R(2),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(1),
+ B(Mov), R(1), R(0),
+ /* 54 S> */ B(LdaNamedProperty), R(0), U8(1), U8(1),
+ B(ToNumber), R(2),
+ B(Inc), U8(5),
+ /* 66 E> */ B(StaNamedPropertySloppy), R(0), U8(1), U8(3),
+ B(Ldar), R(2),
+ /* 70 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -129,18 +122,15 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 22
+bytecode array length: 20
bytecodes: [
- B(StackCheck),
- B(CreateObjectLiteral), U8(0), U8(0), U8(5),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(1), U8(1),
- B(ToNumber),
- B(Dec),
- B(StoreICSloppy), R(1), U8(1), U8(3),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(1),
+ B(Mov), R(1), R(0),
+ /* 54 S> */ B(LdaNamedProperty), R(0), U8(1), U8(1),
+ B(Dec), U8(5),
+ /* 65 E> */ B(StaNamedPropertySloppy), R(0), U8(1), U8(3),
+ /* 70 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -155,24 +145,20 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 33
+bytecode array length: 29
bytecodes: [
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Star), R(0),
- B(CreateObjectLiteral), U8(1), U8(0), U8(5),
- B(Star), R(2),
- B(Star), R(1),
- B(Star), R(2),
- B(Ldar), R(0),
- B(Star), R(3),
- B(KeyedLoadIC), R(2), U8(1),
- B(ToNumber),
- B(Star), R(4),
- B(Dec),
- B(KeyedStoreICSloppy), R(2), R(3), U8(3),
- B(Ldar), R(4),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 45 S> */ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ /* 60 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(1), R(2),
+ B(Mov), R(2), R(1),
+ /* 72 S> */ B(Ldar), R(0),
+ /* 81 E> */ B(LdaKeyedProperty), R(1), U8(1),
+ B(ToNumber), R(4),
+ B(Dec), U8(5),
+ /* 86 E> */ B(StaKeyedPropertySloppy), R(1), R(0), U8(3),
+ B(Ldar), R(4),
+ /* 90 S> */ B(Return),
]
constant pool: [
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
@@ -185,24 +171,20 @@ handlers: [
snippet: "
var name = 'var'; var a = { val: 1 }; return ++a[name];
"
-frame size: 4
+frame size: 3
parameter count: 1
-bytecode array length: 29
+bytecode array length: 25
bytecodes: [
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Star), R(0),
- B(CreateObjectLiteral), U8(1), U8(0), U8(5),
- B(Star), R(2),
- B(Star), R(1),
- B(Star), R(2),
- B(Ldar), R(0),
- B(Star), R(3),
- B(KeyedLoadIC), R(2), U8(1),
- B(ToNumber),
- B(Inc),
- B(KeyedStoreICSloppy), R(2), R(3), U8(3),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 45 S> */ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ /* 60 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(1), R(2),
+ B(Mov), R(2), R(1),
+ /* 72 S> */ B(Ldar), R(0),
+ /* 83 E> */ B(LdaKeyedProperty), R(1), U8(1),
+ B(Inc), U8(5),
+ /* 87 E> */ B(StaKeyedPropertySloppy), R(1), R(0), U8(3),
+ /* 90 S> */ B(Return),
]
constant pool: [
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
@@ -217,20 +199,19 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 27
+bytecode array length: 24
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(1),
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(StaContextSlot), R(context), U8(4),
- B(CreateClosure), U8(0), U8(0),
- B(Star), R(0),
- B(LdaContextSlot), R(context), U8(4),
- B(ToNumber),
- B(Inc),
- B(StaContextSlot), R(context), U8(4),
- B(Return),
+ B(CreateFunctionContext), U8(1),
+ B(PushContext), R(1),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ /* 42 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 53 S> */ B(CreateClosure), U8(0), U8(2),
+ B(Star), R(0),
+ /* 78 S> */ B(LdaContextSlot), R(context), U8(4),
+ B(Inc), U8(1),
+ /* 87 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 90 S> */ B(Return),
]
constant pool: [
InstanceType::SHARED_FUNCTION_INFO_TYPE,
@@ -244,22 +225,21 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 31
+bytecode array length: 28
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(1),
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(StaContextSlot), R(context), U8(4),
- B(CreateClosure), U8(0), U8(0),
- B(Star), R(0),
- B(LdaContextSlot), R(context), U8(4),
- B(ToNumber),
- B(Star), R(2),
- B(Dec),
- B(StaContextSlot), R(context), U8(4),
- B(Ldar), R(2),
- B(Return),
+ B(CreateFunctionContext), U8(1),
+ B(PushContext), R(1),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ /* 42 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 53 S> */ B(CreateClosure), U8(0), U8(2),
+ B(Star), R(0),
+ /* 78 S> */ B(LdaContextSlot), R(context), U8(4),
+ B(ToNumber), R(2),
+ B(Dec), U8(1),
+ /* 86 E> */ B(StaContextSlot), R(context), U8(4),
+ B(Ldar), R(2),
+ /* 90 S> */ B(Return),
]
constant pool: [
InstanceType::SHARED_FUNCTION_INFO_TYPE,
@@ -273,22 +253,20 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 28
+bytecode array length: 26
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(CreateArrayLiteral), U8(0), U8(0), U8(3),
- B(Star), R(1),
- B(Star), R(2),
- B(Ldar), R(0),
- B(ToNumber),
- B(Star), R(3),
- B(Inc),
- B(Star), R(0),
- B(LdaSmi), U8(2),
- B(KeyedStoreICSloppy), R(2), R(3), U8(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 44 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ /* 55 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(3),
+ B(Star), R(1),
+ /* 63 S> */ B(Ldar), R(0),
+ B(ToNumber), R(3),
+ B(Inc), U8(1),
+ B(Star), R(0),
+ B(LdaSmi), U8(2),
+ /* 79 E> */ B(StaKeyedPropertySloppy), R(1), R(3), U8(2),
+ /* 84 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden
index ab60ba49b8..1668c81302 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden
@@ -15,13 +15,13 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 7
+bytecode array length: 6
bytecodes: [
- B(CreateMappedArguments),
- B(Star), R(0),
- B(StackCheck),
- B(Ldar), R(0),
- B(Return),
+ B(CreateMappedArguments),
+ B(Star), R(0),
+ /* 10 E> */ B(StackCheck),
+ /* 15 S> */ B(Nop),
+ /* 33 S> */ B(Return),
]
constant pool: [
]
@@ -33,18 +33,16 @@ snippet: "
function f() { return arguments[0]; }
f();
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 13
+bytecode array length: 9
bytecodes: [
- B(CreateMappedArguments),
- B(Star), R(0),
- B(StackCheck),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaZero),
- B(KeyedLoadIC), R(1), U8(1),
- B(Return),
+ B(CreateMappedArguments),
+ B(Star), R(0),
+ /* 10 E> */ B(StackCheck),
+ /* 15 S> */ B(LdaZero),
+ /* 31 E> */ B(LdaKeyedProperty), R(0), U8(1),
+ /* 36 S> */ B(Return),
]
constant pool: [
]
@@ -58,13 +56,13 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 7
+bytecode array length: 6
bytecodes: [
- B(CreateUnmappedArguments),
- B(Star), R(0),
- B(StackCheck),
- B(Ldar), R(0),
- B(Return),
+ B(CreateUnmappedArguments),
+ B(Star), R(0),
+ /* 10 E> */ B(StackCheck),
+ /* 29 S> */ B(Nop),
+ /* 47 S> */ B(Return),
]
constant pool: [
]
@@ -76,22 +74,20 @@ snippet: "
function f(a) { return arguments[0]; }
f();
"
-frame size: 3
+frame size: 2
parameter count: 2
-bytecode array length: 25
+bytecode array length: 18
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(1),
- B(Ldar), R(arg0),
- B(StaContextSlot), R(context), U8(4),
- B(CreateMappedArguments),
- B(Star), R(0),
- B(StackCheck),
- B(Ldar), R(0),
- B(Star), R(2),
- B(LdaZero),
- B(KeyedLoadIC), R(2), U8(1),
- B(Return),
+ B(CreateFunctionContext), U8(1),
+ B(PushContext), R(1),
+ B(Ldar), R(arg0),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateMappedArguments),
+ B(Star), R(0),
+ /* 10 E> */ B(StackCheck),
+ /* 16 S> */ B(LdaZero),
+ /* 32 E> */ B(LdaKeyedProperty), R(0), U8(1),
+ /* 37 S> */ B(Return),
]
constant pool: [
]
@@ -105,21 +101,21 @@ snippet: "
"
frame size: 2
parameter count: 4
-bytecode array length: 29
+bytecode array length: 25
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(1),
- B(Ldar), R(arg0),
- B(StaContextSlot), R(context), U8(6),
- B(Ldar), R(arg1),
- B(StaContextSlot), R(context), U8(5),
- B(Ldar), R(arg2),
- B(StaContextSlot), R(context), U8(4),
- B(CreateMappedArguments),
- B(Star), R(0),
- B(StackCheck),
- B(Ldar), R(0),
- B(Return),
+ B(CreateFunctionContext), U8(3),
+ B(PushContext), R(1),
+ B(Ldar), R(arg0),
+ B(StaContextSlot), R(context), U8(6),
+ B(Ldar), R(arg1),
+ B(StaContextSlot), R(context), U8(5),
+ B(Ldar), R(arg2),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateMappedArguments),
+ B(Star), R(0),
+ /* 10 E> */ B(StackCheck),
+ /* 22 S> */ B(Nop),
+ /* 40 S> */ B(Return),
]
constant pool: [
]
@@ -133,13 +129,13 @@ snippet: "
"
frame size: 1
parameter count: 4
-bytecode array length: 7
+bytecode array length: 6
bytecodes: [
- B(CreateUnmappedArguments),
- B(Star), R(0),
- B(StackCheck),
- B(Ldar), R(0),
- B(Return),
+ B(CreateUnmappedArguments),
+ B(Star), R(0),
+ /* 10 E> */ B(StackCheck),
+ /* 36 S> */ B(Nop),
+ /* 54 S> */ B(Return),
]
constant pool: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
index e43a8cd76c..afa349ac3e 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
@@ -15,13 +15,13 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 7
+bytecode array length: 6
bytecodes: [
- B(CreateRestParameter),
- B(Star), R(0),
- B(StackCheck),
- B(Ldar), R(0),
- B(Return),
+ B(CreateRestParameter),
+ B(Star), R(0),
+ /* 10 E> */ B(StackCheck),
+ /* 26 S> */ B(Nop),
+ /* 43 S> */ B(Return),
]
constant pool: [
]
@@ -35,17 +35,16 @@ snippet: "
"
frame size: 2
parameter count: 2
-bytecode array length: 14
+bytecode array length: 13
bytecodes: [
- B(CreateRestParameter),
- B(Star), R(0),
- B(LdaTheHole),
- B(Star), R(1),
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(0),
- B(Return),
+ B(CreateRestParameter),
+ B(Star), R(0),
+ B(LdaTheHole),
+ B(Star), R(1),
+ /* 10 E> */ B(StackCheck),
+ B(Mov), R(arg0), R(1),
+ /* 29 S> */ B(Ldar), R(0),
+ /* 46 S> */ B(Return),
]
constant pool: [
]
@@ -57,22 +56,19 @@ snippet: "
function f(a, ...restArgs) { return restArgs[0]; }
f();
"
-frame size: 3
+frame size: 2
parameter count: 2
-bytecode array length: 20
+bytecode array length: 15
bytecodes: [
- B(CreateRestParameter),
- B(Star), R(0),
- B(LdaTheHole),
- B(Star), R(1),
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(0),
- B(Star), R(2),
- B(LdaZero),
- B(KeyedLoadIC), R(2), U8(1),
- B(Return),
+ B(CreateRestParameter),
+ B(Star), R(0),
+ B(LdaTheHole),
+ B(Star), R(1),
+ /* 10 E> */ B(StackCheck),
+ B(Mov), R(arg0), R(1),
+ /* 29 S> */ B(LdaZero),
+ /* 44 E> */ B(LdaKeyedProperty), R(0), U8(1),
+ /* 49 S> */ B(Return),
]
constant pool: [
]
@@ -86,28 +82,22 @@ snippet: "
"
frame size: 5
parameter count: 2
-bytecode array length: 35
+bytecode array length: 26
bytecodes: [
- B(CreateUnmappedArguments),
- B(Star), R(0),
- B(CreateRestParameter),
- B(Star), R(1),
- B(LdaTheHole),
- B(Star), R(2),
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(2),
- B(Ldar), R(1),
- B(Star), R(3),
- B(LdaZero),
- B(KeyedLoadIC), R(3), U8(1),
- B(Star), R(4),
- B(Ldar), R(0),
- B(Star), R(3),
- B(LdaZero),
- B(KeyedLoadIC), R(3), U8(3),
- B(Add), R(4),
- B(Return),
+ B(CreateUnmappedArguments),
+ B(Star), R(0),
+ B(CreateRestParameter),
+ B(Star), R(1),
+ B(LdaTheHole),
+ B(Star), R(2),
+ /* 10 E> */ B(StackCheck),
+ B(Mov), R(arg0), R(2),
+ /* 29 S> */ B(LdaZero),
+ /* 44 E> */ B(LdrKeyedProperty), R(1), U8(1), R(4),
+ B(LdaZero),
+ /* 59 E> */ B(LdaKeyedProperty), R(0), U8(3),
+ B(Add), R(4), U8(5),
+ /* 64 S> */ B(Return),
]
constant pool: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeadCodeRemoval.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeadCodeRemoval.golden
index 0c7c176c07..2530404379 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeadCodeRemoval.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeadCodeRemoval.golden
@@ -15,9 +15,9 @@ frame size: 1
parameter count: 1
bytecode array length: 3
bytecodes: [
- B(StackCheck),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(LdaUndefined),
+ /* 58 S> */ B(Return),
]
constant pool: [
]
@@ -32,11 +32,11 @@ frame size: 1
parameter count: 1
bytecode array length: 7
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 66 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(LdaUndefined),
+ /* 69 S> */ B(Return),
]
constant pool: [
]
@@ -51,9 +51,9 @@ frame size: 0
parameter count: 1
bytecode array length: 4
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 46 S> */ B(LdaSmi), U8(1),
+ /* 78 S> */ B(Return),
]
constant pool: [
]
@@ -68,14 +68,14 @@ frame size: 1
parameter count: 1
bytecode array length: 13
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(JumpIfToBooleanFalse), U8(5),
- B(LdaSmi), U8(1),
- B(Return),
- B(LdaSmi), U8(2),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ /* 45 S> */ B(JumpIfToBooleanFalse), U8(5),
+ /* 54 S> */ B(LdaSmi), U8(1),
+ /* 77 S> */ B(Return),
+ /* 67 S> */ B(LdaSmi), U8(2),
+ /* 77 S> */ B(Return),
]
constant pool: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden
index bedbe31a95..9d16d06aff 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden
@@ -14,23 +14,24 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 31
+bytecode array length: 34
bytecodes: [
- B(LdaConstant), U8(0),
- B(Star), R(1),
- B(LdaZero),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(2),
- B(StackCheck),
- B(LdaConstant), U8(1),
- B(Star), R(1),
- B(LdaZero),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kInitializeVarGlobal), R(1), U8(3),
- B(LdaUndefined),
- B(Return),
+ B(LdaConstant), U8(0),
+ B(Star), R(1),
+ B(LdaZero),
+ B(Star), R(2),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kDeclareGlobalsForInterpreter), R(1), U8(3),
+ /* 0 E> */ B(StackCheck),
+ /* 8 S> */ B(LdaConstant), U8(1),
+ B(Star), R(1),
+ B(LdaZero),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kInitializeVarGlobal), R(1), U8(3),
+ B(LdaUndefined),
+ /* 10 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -43,18 +44,19 @@ handlers: [
snippet: "
function f() {}
"
-frame size: 2
+frame size: 3
parameter count: 1
-bytecode array length: 15
+bytecode array length: 18
bytecodes: [
- B(LdaConstant), U8(0),
- B(Star), R(0),
- B(LdaZero),
- B(Star), R(1),
- B(CallRuntime), U16(Runtime::kDeclareGlobals), R(0), U8(2),
- B(StackCheck),
- B(LdaUndefined),
- B(Return),
+ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ B(LdaZero),
+ B(Star), R(1),
+ B(Mov), R(closure), R(2),
+ B(CallRuntime), U16(Runtime::kDeclareGlobalsForInterpreter), R(0), U8(3),
+ /* 0 E> */ B(StackCheck),
+ B(LdaUndefined),
+ /* 15 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -69,25 +71,26 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 37
+bytecode array length: 40
bytecodes: [
- B(LdaConstant), U8(0),
- B(Star), R(1),
- B(LdaZero),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(2),
- B(StackCheck),
- B(LdaConstant), U8(1),
- B(Star), R(1),
- B(LdaZero),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kInitializeVarGlobal), R(1), U8(3),
- B(LdaSmi), U8(2),
- B(StaGlobalSloppy), U8(1), U8(3),
- B(Star), R(0),
- B(Return),
+ B(LdaConstant), U8(0),
+ B(Star), R(1),
+ B(LdaZero),
+ B(Star), R(2),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kDeclareGlobalsForInterpreter), R(1), U8(3),
+ /* 0 E> */ B(StackCheck),
+ /* 8 S> */ B(LdaConstant), U8(1),
+ B(Star), R(1),
+ B(LdaZero),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kInitializeVarGlobal), R(1), U8(3),
+ /* 11 S> */ B(LdaSmi), U8(2),
+ /* 12 E> */ B(StaGlobalSloppy), U8(1), U8(3),
+ B(Star), R(0),
+ /* 15 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -101,27 +104,25 @@ snippet: "
function f() {}
f();
"
-frame size: 3
+frame size: 4
parameter count: 1
bytecode array length: 29
bytecodes: [
- B(LdaConstant), U8(0),
- B(Star), R(1),
- B(LdaZero),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(2),
- B(StackCheck),
- B(LdaUndefined),
- B(Star), R(2),
- B(LdaGlobal), U8(1), U8(1),
- B(Star), R(1),
- B(Call), R(1), R(2), U8(1), U8(3),
- B(Star), R(0),
- B(Return),
+ B(LdaConstant), U8(0),
+ B(Star), R(1),
+ B(LdaZero),
+ B(Star), R(2),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kDeclareGlobalsForInterpreter), R(1), U8(3),
+ /* 0 E> */ B(StackCheck),
+ /* 16 S> */ B(LdrUndefined), R(2),
+ B(LdrGlobal), U8(1), R(1),
+ /* 16 E> */ B(Call), R(1), R(2), U8(1), U8(3),
+ B(Star), R(0),
+ /* 20 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden
index 369c158efe..aeebe7a3c2 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden
@@ -13,16 +13,14 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 16
+bytecode array length: 14
bytecodes: [
- B(StackCheck),
- B(CreateObjectLiteral), U8(0), U8(0), U8(5),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaConstant), U8(1),
- B(DeletePropertySloppy), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(1),
+ B(Mov), R(1), R(0),
+ /* 56 S> */ B(LdaConstant), U8(1),
+ B(DeletePropertySloppy), R(0),
+ /* 75 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -37,16 +35,14 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 16
+bytecode array length: 14
bytecodes: [
- B(StackCheck),
- B(CreateObjectLiteral), U8(0), U8(0), U8(5),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaConstant), U8(1),
- B(DeletePropertyStrict), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 56 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(1),
+ B(Mov), R(1), R(0),
+ /* 70 S> */ B(LdaConstant), U8(1),
+ B(DeletePropertyStrict), R(0),
+ /* 89 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -61,16 +57,14 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 16
+bytecode array length: 14
bytecodes: [
- B(StackCheck),
- B(CreateObjectLiteral), U8(0), U8(0), U8(5),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(DeletePropertySloppy), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(1),
+ B(Mov), R(1), R(0),
+ /* 56 S> */ B(LdaSmi), U8(2),
+ B(DeletePropertySloppy), R(0),
+ /* 76 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -86,11 +80,11 @@ frame size: 1
parameter count: 1
bytecode array length: 7
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(10),
- B(Star), R(0),
- B(LdaFalse),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(10),
+ B(Star), R(0),
+ /* 46 S> */ B(LdaFalse),
+ /* 63 S> */ B(Return),
]
constant pool: [
]
@@ -106,20 +100,19 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 30
+bytecode array length: 27
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(0),
- B(StackCheck),
- B(CreateObjectLiteral), U8(0), U8(0), U8(5),
- B(Star), R(1),
- B(StaContextSlot), R(context), U8(4),
- B(CreateClosure), U8(1), U8(0),
- B(LdaContextSlot), R(context), U8(4),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(DeletePropertyStrict), R(1),
- B(Return),
+ B(CreateFunctionContext), U8(1),
+ B(PushContext), R(0),
+ /* 30 E> */ B(StackCheck),
+ /* 56 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(1),
+ B(Ldar), R(1),
+ /* 56 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 64 S> */ B(CreateClosure), U8(1), U8(2),
+ /* 93 S> */ B(LdrContextSlot), R(context), U8(4), R(1),
+ B(LdaSmi), U8(1),
+ B(DeletePropertyStrict), R(1),
+ /* 113 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -136,9 +129,9 @@ frame size: 0
parameter count: 1
bytecode array length: 3
bytecodes: [
- B(StackCheck),
- B(LdaTrue),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(LdaTrue),
+ /* 56 S> */ B(Return),
]
constant pool: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeleteLookupSlotInEval.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeleteLookupSlotInEval.golden
index b36f421caa..dcc72134b2 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeleteLookupSlotInEval.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeleteLookupSlotInEval.golden
@@ -23,12 +23,12 @@ frame size: 1
parameter count: 1
bytecode array length: 12
bytecodes: [
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Star), R(0),
- B(CallRuntime), U16(Runtime::kDeleteLookupSlot), R(0), U8(1),
- B(LdaUndefined),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 15 S> */ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ B(CallRuntime), U16(Runtime::kDeleteLookupSlot), R(0), U8(1),
+ B(LdaUndefined),
+ /* 25 S> */ B(Return),
]
constant pool: [
"x",
@@ -51,9 +51,9 @@ frame size: 0
parameter count: 1
bytecode array length: 3
bytecodes: [
- B(StackCheck),
- B(LdaFalse),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 15 S> */ B(LdaFalse),
+ /* 32 S> */ B(Return),
]
constant pool: [
]
@@ -75,11 +75,11 @@ frame size: 1
parameter count: 1
bytecode array length: 11
bytecodes: [
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Star), R(0),
- B(CallRuntime), U16(Runtime::kDeleteLookupSlot), R(0), U8(1),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 15 S> */ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ B(CallRuntime), U16(Runtime::kDeleteLookupSlot), R(0), U8(1),
+ /* 32 S> */ B(Return),
]
constant pool: [
"z",
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DoDebugger.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DoDebugger.golden
index ca260651c0..ac0b2ee8d4 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DoDebugger.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DoDebugger.golden
@@ -15,10 +15,10 @@ frame size: 0
parameter count: 1
bytecode array length: 4
bytecodes: [
- B(StackCheck),
- B(Debugger),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(Debugger),
+ B(LdaUndefined),
+ /* 44 S> */ B(Return),
]
constant pool: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DoExpression.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DoExpression.golden
index b46fa571de..e04e131928 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DoExpression.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DoExpression.golden
@@ -14,12 +14,12 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 6
+bytecode array length: 7
bytecodes: [
- B(StackCheck),
- B(Ldar), R(0),
- B(Star), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(Mov), R(0), R(1),
+ /* 50 S> */ B(Ldar), R(1),
+ /* 60 S> */ B(Return),
]
constant pool: [
]
@@ -32,15 +32,16 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 11
+bytecode array length: 13
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(100),
- B(Star), R(1),
- B(LdaUndefined),
- B(Star), R(0),
- B(Star), R(2),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 55 S> */ B(LdaSmi), U8(100),
+ B(Star), R(1),
+ /* 42 S> */ B(LdrUndefined), R(0),
+ B(Ldar), R(0),
+ B(Star), R(2),
+ /* 63 S> */ B(Nop),
+ /* 73 S> */ B(Return),
]
constant pool: [
]
@@ -53,24 +54,18 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 26
+bytecode array length: 16
bytecodes: [
- B(StackCheck),
- B(StackCheck),
- B(LdaSmi), U8(10),
- B(Star), R(1),
- B(ToNumber),
- B(Inc),
- B(Star), R(1),
- B(Star), R(0),
- B(Jump), U8(12),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(20),
- B(Star), R(1),
- B(Jump), U8(-21),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 E> */ B(StackCheck),
+ /* 56 S> */ B(LdaSmi), U8(10),
+ B(Star), R(1),
+ /* 69 S> */ B(Inc), U8(1),
+ B(Star), R(1),
+ B(Star), R(0),
+ /* 74 S> */ B(Jump), U8(2),
+ B(LdaUndefined),
+ /* 94 S> */ B(Return),
]
constant pool: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden
index e3e48bcf5a..f8ee37a398 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden
@@ -11,35 +11,37 @@ wrap: yes
snippet: "
return eval('1;');
"
-frame size: 9
+frame size: 10
parameter count: 1
-bytecode array length: 65
+bytecode array length: 66
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(0),
- B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4),
- B(CreateMappedArguments),
- B(StaContextSlot), R(context), U8(5),
- B(Ldar), R(new_target),
- B(StaContextSlot), R(context), U8(6),
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Star), R(3),
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(3), U8(1), R(1),
- B(LdaConstant), U8(1),
- B(Star), R(3),
- B(Mov), R(1), R(4),
- B(Mov), R(3), R(5),
- B(Mov), R(closure), R(6),
- B(LdaZero),
- B(Star), R(7),
- B(LdaSmi), U8(30),
- B(Star), R(8),
- B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(5),
- B(Star), R(1),
- B(Call), R(1), R(2), U8(2), U8(0),
- B(Return),
+ B(CreateFunctionContext), U8(3),
+ B(PushContext), R(0),
+ B(Ldar), R(this),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateMappedArguments),
+ B(StaContextSlot), R(context), U8(5),
+ B(Ldar), R(new_target),
+ B(StaContextSlot), R(context), U8(6),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(LdaConstant), U8(0),
+ B(Star), R(3),
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(3), U8(1), R(1),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(LdaZero),
+ B(Star), R(7),
+ B(LdaSmi), U8(30),
+ B(Star), R(8),
+ B(LdaSmi), U8(41),
+ B(Star), R(9),
+ B(Mov), R(1), R(4),
+ B(Mov), R(3), R(5),
+ B(Mov), R(closure), R(6),
+ B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
+ B(Star), R(1),
+ /* 41 E> */ B(Call), R(1), R(2), U8(2), U8(0),
+ /* 53 S> */ B(Return),
]
constant pool: [
"eval",
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
index 9f0c64a952..b6a8df8636 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
@@ -15,9 +15,9 @@ frame size: 2
parameter count: 1
bytecode array length: 3
bytecodes: [
- B(StackCheck),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ B(LdaUndefined),
+ /* 57 S> */ B(Return),
]
constant pool: [
]
@@ -32,9 +32,9 @@ frame size: 2
parameter count: 1
bytecode array length: 3
bytecodes: [
- B(StackCheck),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ B(LdaUndefined),
+ /* 62 S> */ B(Return),
]
constant pool: [
]
@@ -49,9 +49,9 @@ frame size: 2
parameter count: 1
bytecode array length: 3
bytecodes: [
- B(StackCheck),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ B(LdaUndefined),
+ /* 62 S> */ B(Return),
]
constant pool: [
]
@@ -65,32 +65,31 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 45
+bytecode array length: 44
bytecodes: [
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Star), R(1),
- B(JumpIfUndefined), U8(38),
- B(JumpIfNull), U8(36),
- B(ToObject),
- B(Star), R(3),
- B(ForInPrepare), R(4),
- B(LdaZero),
- B(Star), R(7),
- B(ForInDone), R(7), R(6),
- B(JumpIfTrue), U8(23),
- B(ForInNext), R(3), R(7), R(4), U8(1),
- B(JumpIfUndefined), U8(10),
- B(Star), R(0),
- B(StackCheck),
- B(Ldar), R(0),
- B(Star), R(2),
- B(Return),
- B(ForInStep), R(7),
- B(Star), R(7),
- B(Jump), U8(-24),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaConstant), U8(0),
+ B(Star), R(1),
+ /* 68 S> */ B(JumpIfUndefined), U8(37),
+ B(JumpIfNull), U8(35),
+ B(ToObject), R(3),
+ B(ForInPrepare), R(3), R(4),
+ B(LdaZero),
+ B(Star), R(7),
+ /* 63 S> */ B(ForInDone), R(7), R(6),
+ B(JumpIfTrue), U8(22),
+ B(ForInNext), R(3), R(7), R(4), U8(1),
+ B(JumpIfUndefined), U8(9),
+ B(Star), R(0),
+ /* 54 E> */ B(StackCheck),
+ B(Star), R(2),
+ /* 73 S> */ B(Nop),
+ /* 85 S> */ B(Return),
+ B(ForInStep), R(7),
+ B(Star), R(7),
+ B(Jump), U8(-23),
+ B(LdaUndefined),
+ /* 85 S> */ B(Return),
]
constant pool: [
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
@@ -105,37 +104,34 @@ snippet: "
"
frame size: 9
parameter count: 1
-bytecode array length: 57
+bytecode array length: 55
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(1),
- B(CreateArrayLiteral), U8(0), U8(0), U8(3),
- B(JumpIfUndefined), U8(47),
- B(JumpIfNull), U8(45),
- B(ToObject),
- B(Star), R(3),
- B(ForInPrepare), R(4),
- B(LdaZero),
- B(Star), R(7),
- B(ForInDone), R(7), R(6),
- B(JumpIfTrue), U8(32),
- B(ForInNext), R(3), R(7), R(4), U8(1),
- B(JumpIfUndefined), U8(19),
- B(Star), R(0),
- B(StackCheck),
- B(Ldar), R(0),
- B(Star), R(2),
- B(Ldar), R(1),
- B(Star), R(8),
- B(Ldar), R(2),
- B(Add), R(8),
- B(Star), R(1),
- B(ForInStep), R(7),
- B(Star), R(7),
- B(Jump), U8(-33),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaZero),
+ B(Star), R(1),
+ /* 59 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(3),
+ B(JumpIfUndefined), U8(45),
+ B(JumpIfNull), U8(43),
+ B(ToObject), R(3),
+ B(ForInPrepare), R(3), R(4),
+ B(LdaZero),
+ B(Star), R(7),
+ /* 54 S> */ B(ForInDone), R(7), R(6),
+ B(JumpIfTrue), U8(30),
+ B(ForInNext), R(3), R(7), R(4), U8(2),
+ B(JumpIfUndefined), U8(17),
+ B(Star), R(0),
+ /* 45 E> */ B(StackCheck),
+ B(Star), R(2),
+ /* 70 S> */ B(Ldar), R(0),
+ /* 75 E> */ B(Add), R(1), U8(1),
+ B(Mov), R(1), R(8),
+ B(Star), R(1),
+ /* 72 E> */ B(ForInStep), R(7),
+ B(Star), R(7),
+ B(Jump), U8(-31),
+ B(LdaUndefined),
+ /* 80 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -153,51 +149,42 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 94
+bytecode array length: 80
bytecodes: [
- B(StackCheck),
- B(CreateObjectLiteral), U8(0), U8(0), U8(5),
- B(Star), R(1),
- B(Star), R(0),
- B(CreateArrayLiteral), U8(1), U8(1), U8(3),
- B(JumpIfUndefined), U8(79),
- B(JumpIfNull), U8(77),
- B(ToObject),
- B(Star), R(1),
- B(ForInPrepare), R(2),
- B(LdaZero),
- B(Star), R(5),
- B(ForInDone), R(5), R(4),
- B(JumpIfTrue), U8(64),
- B(ForInNext), R(1), R(5), R(2), U8(9),
- B(JumpIfUndefined), U8(51),
- B(Star), R(6),
- B(Ldar), R(0),
- B(Star), R(7),
- B(Ldar), R(6),
- B(StoreICSloppy), R(7), U8(2), U8(7),
- B(StackCheck),
- B(Ldar), R(0),
- B(Star), R(6),
- B(LoadIC), R(6), U8(2), U8(3),
- B(Star), R(7),
- B(LdaSmi), U8(10),
- B(TestEqual), R(7),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(20),
- B(Ldar), R(0),
- B(Star), R(6),
- B(LoadIC), R(6), U8(2), U8(5),
- B(Star), R(7),
- B(LdaSmi), U8(20),
- B(TestEqual), R(7),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(8),
- B(ForInStep), R(5),
- B(Star), R(5),
- B(Jump), U8(-65),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(1),
+ B(Mov), R(1), R(0),
+ /* 77 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(3),
+ B(JumpIfUndefined), U8(65),
+ B(JumpIfNull), U8(63),
+ B(ToObject), R(1),
+ B(ForInPrepare), R(1), R(2),
+ B(LdaZero),
+ B(Star), R(5),
+ /* 68 S> */ B(ForInDone), R(5), R(4),
+ B(JumpIfTrue), U8(50),
+ B(ForInNext), R(1), R(5), R(2), U8(9),
+ B(JumpIfUndefined), U8(37),
+ B(Star), R(6),
+ /* 67 E> */ B(StaNamedPropertySloppy), R(0), U8(2), U8(7),
+ /* 62 E> */ B(StackCheck),
+ /* 95 S> */ B(Nop),
+ /* 100 E> */ B(LdrNamedProperty), R(0), U8(2), U8(3), R(7),
+ B(LdaSmi), U8(10),
+ /* 106 E> */ B(TestEqual), R(7),
+ B(JumpIfFalse), U8(4),
+ /* 113 S> */ B(Jump), U8(16),
+ /* 125 S> */ B(Nop),
+ /* 130 E> */ B(LdrNamedProperty), R(0), U8(2), U8(5), R(7),
+ B(LdaSmi), U8(20),
+ /* 136 E> */ B(TestEqual), R(7),
+ B(JumpIfFalse), U8(4),
+ /* 143 S> */ B(Jump), U8(8),
+ B(ForInStep), R(5),
+ B(Star), R(5),
+ B(Jump), U8(-51),
+ B(LdaUndefined),
+ /* 152 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -214,41 +201,36 @@ snippet: "
"
frame size: 9
parameter count: 1
-bytecode array length: 69
+bytecode array length: 61
bytecodes: [
- B(StackCheck),
- B(CreateArrayLiteral), U8(0), U8(0), U8(3),
- B(Star), R(0),
- B(CreateArrayLiteral), U8(1), U8(1), U8(3),
- B(JumpIfUndefined), U8(56),
- B(JumpIfNull), U8(54),
- B(ToObject),
- B(Star), R(1),
- B(ForInPrepare), R(2),
- B(LdaZero),
- B(Star), R(5),
- B(ForInDone), R(5), R(4),
- B(JumpIfTrue), U8(41),
- B(ForInNext), R(1), R(5), R(2), U8(7),
- B(JumpIfUndefined), U8(28),
- B(Star), R(6),
- B(Ldar), R(0),
- B(Star), R(7),
- B(LdaZero),
- B(Star), R(8),
- B(Ldar), R(6),
- B(KeyedStoreICSloppy), R(7), R(8), U8(5),
- B(StackCheck),
- B(Ldar), R(0),
- B(Star), R(6),
- B(LdaSmi), U8(3),
- B(KeyedLoadIC), R(6), U8(3),
- B(Return),
- B(ForInStep), R(5),
- B(Star), R(5),
- B(Jump), U8(-42),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(3),
+ B(Star), R(0),
+ /* 72 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(3),
+ B(JumpIfUndefined), U8(48),
+ B(JumpIfNull), U8(46),
+ B(ToObject), R(1),
+ B(ForInPrepare), R(1), R(2),
+ B(LdaZero),
+ B(Star), R(5),
+ /* 65 S> */ B(ForInDone), R(5), R(4),
+ B(JumpIfTrue), U8(33),
+ B(ForInNext), R(1), R(5), R(2), U8(7),
+ B(JumpIfUndefined), U8(20),
+ B(Star), R(6),
+ B(LdaZero),
+ B(Star), R(8),
+ B(Ldar), R(6),
+ /* 64 E> */ B(StaKeyedPropertySloppy), R(0), R(8), U8(5),
+ /* 59 E> */ B(StackCheck),
+ /* 83 S> */ B(LdaSmi), U8(3),
+ /* 91 E> */ B(LdaKeyedProperty), R(0), U8(3),
+ /* 98 S> */ B(Return),
+ B(ForInStep), R(5),
+ B(Star), R(5),
+ B(Jump), U8(-34),
+ B(LdaUndefined),
+ /* 98 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
index 9b10f41894..01121e5017 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
@@ -11,161 +11,119 @@ wrap: yes
snippet: "
for (var p of [0, 1, 2]) {}
"
-frame size: 16
+frame size: 15
parameter count: 1
-bytecode array length: 347
+bytecode array length: 268
bytecodes: [
- B(StackCheck),
- B(LdaUndefined),
- B(Star), R(4),
- B(LdaZero),
- B(Star), R(3),
- B(Mov), R(context), R(11),
- B(Mov), R(context), R(12),
- B(CreateArrayLiteral), U8(0), U8(0), U8(3),
- B(Star), R(14),
- B(LdaConstant), U8(1),
- B(KeyedLoadIC), R(14), U8(3),
- B(Star), R(13),
- B(Call), R(13), R(14), U8(1), U8(1),
- B(Star), R(1),
- B(Ldar), R(1),
- B(Star), R(15),
- B(LoadIC), R(15), U8(2), U8(7),
- B(Star), R(14),
- B(Call), R(14), R(15), U8(1), U8(5),
- B(Star), R(2),
- B(Star), R(13),
- B(InvokeIntrinsic), U16(Runtime::k_IsJSReceiver), R(13), U8(1),
- B(LogicalNot),
- B(JumpIfFalse), U8(11),
- B(Ldar), R(2),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
- B(Ldar), R(2),
- B(Star), R(13),
- B(LoadIC), R(13), U8(3), U8(9),
- B(JumpIfToBooleanTrue), U8(28),
- B(LdaSmi), U8(2),
- B(Star), R(3),
- B(Ldar), R(2),
- B(Star), R(13),
- B(LoadIC), R(13), U8(4), U8(11),
- B(Star), R(0),
- B(Ldar), R(4),
- B(StackCheck),
- B(Ldar), R(0),
- B(Star), R(7),
- B(LdaZero),
- B(Star), R(3),
- B(Jump), U8(-70),
- B(Jump), U8(47),
- B(Star), R(14),
- B(LdaConstant), U8(5),
- B(Star), R(13),
- B(Ldar), R(closure),
- B(Star), R(15),
- B(CallRuntime), U16(Runtime::kPushCatchContext), R(13), U8(3),
- B(Star), R(12),
- B(PushContext), R(8),
- B(Ldar), R(3),
- B(Star), R(13),
- B(LdaSmi), U8(2),
- B(TestEqualStrict), R(13),
- B(JumpIfFalse), U8(6),
- B(LdaSmi), U8(1),
- B(Star), R(3),
- B(LdaContextSlot), R(context), U8(4),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::kReThrow), R(13), U8(1),
- B(PopContext), R(8),
- B(LdaSmi), U8(-1),
- B(Star), R(9),
- B(Jump), U8(7),
- B(Star), R(10),
- B(LdaZero),
- B(Star), R(9),
- B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
- B(Star), R(11),
- B(Ldar), R(3),
- B(Star), R(12),
- B(LdaZero),
- B(TestEqualStrict), R(12),
- B(JumpIfTrue), U8(9),
- B(Ldar), R(1),
- B(Star), R(12),
- B(LdaUndefined),
- B(TestEqualStrict), R(12),
- B(LogicalNot),
- B(JumpIfFalseConstant), U8(9),
- B(Ldar), R(1),
- B(Star), R(12),
- B(LoadIC), R(12), U8(6), U8(13),
- B(Star), R(5),
- B(Star), R(12),
- B(LdaNull),
- B(TestEqual), R(12),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(124),
- B(Ldar), R(3),
- B(Star), R(12),
- B(LdaSmi), U8(1),
- B(TestEqualStrict), R(12),
- B(JumpIfFalse), U8(79),
- B(Ldar), R(5),
- B(TypeOf),
- B(Star), R(12),
- B(LdaConstant), U8(7),
- B(TestEqualStrict), R(12),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(18),
- B(Wide), B(LdaSmi), U16(139),
- B(Star), R(12),
- B(LdaConstant), U8(8),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
- B(Throw),
- B(Mov), R(context), R(12),
- B(Ldar), R(5),
- B(Star), R(13),
- B(Ldar), R(1),
- B(Star), R(14),
- B(CallRuntime), U16(Runtime::k_Call), R(13), U8(2),
- B(Jump), U8(30),
- B(Star), R(14),
- B(LdaConstant), U8(5),
- B(Star), R(13),
- B(Ldar), R(closure),
- B(Star), R(15),
- B(CallRuntime), U16(Runtime::kPushCatchContext), R(13), U8(3),
- B(Star), R(12),
- B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
- B(Ldar), R(12),
- B(PushContext), R(8),
- B(PopContext), R(8),
- B(Jump), U8(37),
- B(Ldar), R(5),
- B(Star), R(12),
- B(Ldar), R(1),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::k_Call), R(12), U8(2),
- B(Star), R(6),
- B(Star), R(12),
- B(InvokeIntrinsic), U16(Runtime::k_IsJSReceiver), R(12), U8(1),
- B(JumpIfToBooleanFalse), U8(4),
- B(Jump), U8(11),
- B(Ldar), R(6),
- B(Star), R(12),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
- B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(11), U8(1),
- B(LdaZero),
- B(TestEqualStrict), R(9),
- B(JumpIfTrue), U8(4),
- B(Jump), U8(5),
- B(Ldar), R(10),
- B(ReThrow),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Mov), R(context), R(11),
+ B(Mov), R(context), R(12),
+ /* 48 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(3),
+ B(Star), R(14),
+ B(LdaConstant), U8(1),
+ /* 48 E> */ B(LdrKeyedProperty), R(14), U8(3), R(13),
+ /* 48 E> */ B(Call), R(13), R(14), U8(1), U8(1),
+ B(Star), R(1),
+ /* 45 S> */ B(LdrNamedProperty), R(1), U8(2), U8(7), R(14),
+ /* 45 E> */ B(Call), R(14), R(1), U8(1), U8(5),
+ B(Star), R(2),
+ /* 45 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(2), U8(1),
+ B(ToBooleanLogicalNot),
+ B(JumpIfFalse), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(2), U8(1),
+ B(LdaNamedProperty), R(2), U8(3), U8(9),
+ B(JumpIfToBooleanTrue), U8(23),
+ B(LdrNamedProperty), R(2), U8(4), U8(11), R(4),
+ B(LdaSmi), U8(2),
+ B(Star), R(3),
+ B(Mov), R(4), R(0),
+ /* 34 E> */ B(StackCheck),
+ B(Mov), R(0), R(7),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Jump), U8(-49),
+ B(Jump), U8(34),
+ B(Star), R(13),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(13), U8(5),
+ B(Star), R(12),
+ B(PushContext), R(8),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(3),
+ B(JumpIfFalse), U8(6),
+ B(LdaSmi), U8(1),
+ B(Star), R(3),
+ B(LdrContextSlot), R(context), U8(4), R(13),
+ B(CallRuntime), U16(Runtime::kReThrow), R(13), U8(1),
+ B(PopContext), R(8),
+ B(LdaSmi), U8(-1),
+ B(Star), R(9),
+ B(Jump), U8(7),
+ B(Star), R(10),
+ B(LdaZero),
+ B(Star), R(9),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Star), R(11),
+ B(LdaZero),
+ B(TestEqualStrict), R(3),
+ B(JumpIfTrue), U8(116),
+ B(LdaUndefined),
+ B(TestEqualStrict), R(1),
+ B(JumpIfTrue), U8(111),
+ B(LdrNamedProperty), R(1), U8(6), U8(13), R(5),
+ B(LdaNull),
+ B(TestEqual), R(5),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(99),
+ B(LdaSmi), U8(1),
+ B(TestEqualStrict), R(3),
+ B(JumpIfFalse), U8(68),
+ B(Ldar), R(5),
+ B(TypeOf),
+ B(Star), R(12),
+ B(LdaConstant), U8(7),
+ B(TestEqualStrict), R(12),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(18),
+ B(Wide), B(LdaSmi), U16(129),
+ B(Star), R(12),
+ B(LdaConstant), U8(8),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
+ B(Throw),
+ B(Mov), R(context), R(12),
+ B(Mov), R(5), R(13),
+ B(Mov), R(1), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(13), U8(2),
+ B(Jump), U8(22),
+ B(Star), R(13),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(13), U8(5),
+ B(Star), R(12),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Ldar), R(12),
+ B(PushContext), R(8),
+ B(PopContext), R(8),
+ B(Jump), U8(27),
+ B(Mov), R(5), R(12),
+ B(Mov), R(1), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
+ B(Star), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(6), U8(1),
+ B(JumpIfToBooleanFalse), U8(4),
+ B(Jump), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
+ B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(11), U8(1),
+ B(LdaZero),
+ B(TestEqualStrict), R(9),
+ B(JumpIfTrue), U8(4),
+ B(Jump), U8(5),
+ B(Ldar), R(10),
+ B(ReThrow),
+ B(LdaUndefined),
+ /* 62 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -177,12 +135,11 @@ constant pool: [
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- kInstanceTypeDontCare,
]
handlers: [
- [10, 152, 158],
- [13, 105, 107],
- [250, 263, 265],
+ [7, 114, 120],
+ [10, 80, 82],
+ [192, 202, 204],
]
---
@@ -190,170 +147,125 @@ snippet: "
var x = 'potatoes';
for (var p of x) { return p; }
"
-frame size: 17
+frame size: 16
parameter count: 1
-bytecode array length: 363
+bytecode array length: 279
bytecodes: [
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Star), R(7),
- B(LdaUndefined),
- B(Star), R(4),
- B(LdaZero),
- B(Star), R(3),
- B(Mov), R(context), R(12),
- B(Mov), R(context), R(13),
- B(Ldar), R(7),
- B(Star), R(15),
- B(LdaConstant), U8(1),
- B(KeyedLoadIC), R(15), U8(3),
- B(Star), R(14),
- B(Call), R(14), R(15), U8(1), U8(1),
- B(Star), R(1),
- B(Ldar), R(1),
- B(Star), R(16),
- B(LoadIC), R(16), U8(2), U8(7),
- B(Star), R(15),
- B(Call), R(15), R(16), U8(1), U8(5),
- B(Star), R(2),
- B(Star), R(14),
- B(InvokeIntrinsic), U16(Runtime::k_IsJSReceiver), R(14), U8(1),
- B(LogicalNot),
- B(JumpIfFalse), U8(11),
- B(Ldar), R(2),
- B(Star), R(14),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
- B(Ldar), R(2),
- B(Star), R(14),
- B(LoadIC), R(14), U8(3), U8(9),
- B(JumpIfToBooleanTrue), U8(32),
- B(LdaSmi), U8(2),
- B(Star), R(3),
- B(Ldar), R(2),
- B(Star), R(14),
- B(LoadIC), R(14), U8(4), U8(11),
- B(Star), R(0),
- B(Ldar), R(4),
- B(StackCheck),
- B(Ldar), R(0),
- B(Star), R(8),
- B(Star), R(11),
- B(LdaZero),
- B(Star), R(10),
- B(Jump), U8(63),
- B(Jump), U8(-74),
- B(Jump), U8(47),
- B(Star), R(15),
- B(LdaConstant), U8(5),
- B(Star), R(14),
- B(Ldar), R(closure),
- B(Star), R(16),
- B(CallRuntime), U16(Runtime::kPushCatchContext), R(14), U8(3),
- B(Star), R(13),
- B(PushContext), R(9),
- B(Ldar), R(3),
- B(Star), R(14),
- B(LdaSmi), U8(2),
- B(TestEqualStrict), R(14),
- B(JumpIfFalse), U8(6),
- B(LdaSmi), U8(1),
- B(Star), R(3),
- B(LdaContextSlot), R(context), U8(4),
- B(Star), R(14),
- B(CallRuntime), U16(Runtime::kReThrow), R(14), U8(1),
- B(PopContext), R(9),
- B(LdaSmi), U8(-1),
- B(Star), R(10),
- B(Jump), U8(8),
- B(Star), R(11),
- B(LdaSmi), U8(1),
- B(Star), R(10),
- B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
- B(Star), R(12),
- B(Ldar), R(3),
- B(Star), R(13),
- B(LdaZero),
- B(TestEqualStrict), R(13),
- B(JumpIfTrue), U8(9),
- B(Ldar), R(1),
- B(Star), R(13),
- B(LdaUndefined),
- B(TestEqualStrict), R(13),
- B(LogicalNot),
- B(JumpIfFalseConstant), U8(9),
- B(Ldar), R(1),
- B(Star), R(13),
- B(LoadIC), R(13), U8(6), U8(13),
- B(Star), R(5),
- B(Star), R(13),
- B(LdaNull),
- B(TestEqual), R(13),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(124),
- B(Ldar), R(3),
- B(Star), R(13),
- B(LdaSmi), U8(1),
- B(TestEqualStrict), R(13),
- B(JumpIfFalse), U8(79),
- B(Ldar), R(5),
- B(TypeOf),
- B(Star), R(13),
- B(LdaConstant), U8(7),
- B(TestEqualStrict), R(13),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(18),
- B(Wide), B(LdaSmi), U16(139),
- B(Star), R(13),
- B(LdaConstant), U8(8),
- B(Star), R(14),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
- B(Throw),
- B(Mov), R(context), R(13),
- B(Ldar), R(5),
- B(Star), R(14),
- B(Ldar), R(1),
- B(Star), R(15),
- B(CallRuntime), U16(Runtime::k_Call), R(14), U8(2),
- B(Jump), U8(30),
- B(Star), R(15),
- B(LdaConstant), U8(5),
- B(Star), R(14),
- B(Ldar), R(closure),
- B(Star), R(16),
- B(CallRuntime), U16(Runtime::kPushCatchContext), R(14), U8(3),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
- B(Ldar), R(13),
- B(PushContext), R(9),
- B(PopContext), R(9),
- B(Jump), U8(37),
- B(Ldar), R(5),
- B(Star), R(13),
- B(Ldar), R(1),
- B(Star), R(14),
- B(CallRuntime), U16(Runtime::k_Call), R(13), U8(2),
- B(Star), R(6),
- B(Star), R(13),
- B(InvokeIntrinsic), U16(Runtime::k_IsJSReceiver), R(13), U8(1),
- B(JumpIfToBooleanFalse), U8(4),
- B(Jump), U8(11),
- B(Ldar), R(6),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
- B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(12), U8(1),
- B(LdaZero),
- B(TestEqualStrict), R(10),
- B(JumpIfTrue), U8(10),
- B(LdaSmi), U8(1),
- B(TestEqualStrict), R(10),
- B(JumpIfTrue), U8(7),
- B(Jump), U8(8),
- B(Ldar), R(11),
- B(Return),
- B(Ldar), R(11),
- B(ReThrow),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaConstant), U8(0),
+ B(Star), R(7),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Mov), R(context), R(12),
+ B(Mov), R(context), R(13),
+ /* 68 S> */ B(LdaConstant), U8(1),
+ /* 68 E> */ B(LdrKeyedProperty), R(7), U8(3), R(14),
+ /* 68 E> */ B(Call), R(14), R(7), U8(1), U8(1),
+ B(Star), R(1),
+ /* 65 S> */ B(LdrNamedProperty), R(1), U8(2), U8(7), R(15),
+ /* 65 E> */ B(Call), R(15), R(1), U8(1), U8(5),
+ B(Star), R(2),
+ /* 65 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(2), U8(1),
+ B(ToBooleanLogicalNot),
+ B(JumpIfFalse), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(2), U8(1),
+ B(LdaNamedProperty), R(2), U8(3), U8(9),
+ B(JumpIfToBooleanTrue), U8(26),
+ B(LdrNamedProperty), R(2), U8(4), U8(11), R(4),
+ B(LdaSmi), U8(2),
+ B(Star), R(3),
+ B(Mov), R(4), R(0),
+ /* 54 E> */ B(StackCheck),
+ B(Mov), R(0), R(8),
+ /* 73 S> */ B(LdaZero),
+ B(Star), R(10),
+ B(Mov), R(0), R(11),
+ B(Jump), U8(48),
+ B(Jump), U8(34),
+ B(Star), R(14),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(14), U8(5),
+ B(Star), R(13),
+ B(PushContext), R(9),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(3),
+ B(JumpIfFalse), U8(6),
+ B(LdaSmi), U8(1),
+ B(Star), R(3),
+ B(LdrContextSlot), R(context), U8(4), R(14),
+ B(CallRuntime), U16(Runtime::kReThrow), R(14), U8(1),
+ B(PopContext), R(9),
+ B(LdaSmi), U8(-1),
+ B(Star), R(10),
+ B(Jump), U8(8),
+ B(Star), R(11),
+ B(LdaSmi), U8(1),
+ B(Star), R(10),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Star), R(12),
+ B(LdaZero),
+ B(TestEqualStrict), R(3),
+ B(JumpIfTrue), U8(116),
+ B(LdaUndefined),
+ B(TestEqualStrict), R(1),
+ B(JumpIfTrue), U8(111),
+ B(LdrNamedProperty), R(1), U8(6), U8(13), R(5),
+ B(LdaNull),
+ B(TestEqual), R(5),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(99),
+ B(LdaSmi), U8(1),
+ B(TestEqualStrict), R(3),
+ B(JumpIfFalse), U8(68),
+ B(Ldar), R(5),
+ B(TypeOf),
+ B(Star), R(13),
+ B(LdaConstant), U8(7),
+ B(TestEqualStrict), R(13),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(18),
+ B(Wide), B(LdaSmi), U16(129),
+ B(Star), R(13),
+ B(LdaConstant), U8(8),
+ B(Star), R(14),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
+ B(Throw),
+ B(Mov), R(context), R(13),
+ B(Mov), R(5), R(14),
+ B(Mov), R(1), R(15),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(14), U8(2),
+ B(Jump), U8(22),
+ B(Star), R(14),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(14), U8(5),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Ldar), R(13),
+ B(PushContext), R(9),
+ B(PopContext), R(9),
+ B(Jump), U8(27),
+ B(Mov), R(5), R(13),
+ B(Mov), R(1), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(13), U8(2),
+ B(Star), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(6), U8(1),
+ B(JumpIfToBooleanFalse), U8(4),
+ B(Jump), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
+ B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(12), U8(1),
+ B(LdaZero),
+ B(TestEqualStrict), R(10),
+ B(JumpIfTrue), U8(10),
+ B(LdaSmi), U8(1),
+ B(TestEqualStrict), R(10),
+ B(JumpIfTrue), U8(7),
+ B(Jump), U8(8),
+ B(Ldar), R(11),
+ /* 85 S> */ B(Return),
+ B(Ldar), R(11),
+ B(ReThrow),
+ B(LdaUndefined),
+ /* 85 S> */ B(Return),
]
constant pool: [
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
@@ -365,12 +277,11 @@ constant pool: [
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- kInstanceTypeDontCare,
]
handlers: [
- [14, 158, 164],
- [17, 111, 113],
- [257, 270, 272],
+ [11, 115, 121],
+ [14, 81, 83],
+ [194, 204, 206],
]
---
@@ -380,172 +291,127 @@ snippet: "
if (x == 20) break;
}
"
-frame size: 16
+frame size: 15
parameter count: 1
-bytecode array length: 369
+bytecode array length: 284
bytecodes: [
- B(StackCheck),
- B(LdaUndefined),
- B(Star), R(4),
- B(LdaZero),
- B(Star), R(3),
- B(Mov), R(context), R(11),
- B(Mov), R(context), R(12),
- B(CreateArrayLiteral), U8(0), U8(0), U8(3),
- B(Star), R(14),
- B(LdaConstant), U8(1),
- B(KeyedLoadIC), R(14), U8(3),
- B(Star), R(13),
- B(Call), R(13), R(14), U8(1), U8(1),
- B(Star), R(1),
- B(Ldar), R(1),
- B(Star), R(15),
- B(LoadIC), R(15), U8(2), U8(7),
- B(Star), R(14),
- B(Call), R(14), R(15), U8(1), U8(5),
- B(Star), R(2),
- B(Star), R(13),
- B(InvokeIntrinsic), U16(Runtime::k_IsJSReceiver), R(13), U8(1),
- B(LogicalNot),
- B(JumpIfFalse), U8(11),
- B(Ldar), R(2),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
- B(Ldar), R(2),
- B(Star), R(13),
- B(LoadIC), R(13), U8(3), U8(9),
- B(JumpIfToBooleanTrue), U8(50),
- B(LdaSmi), U8(2),
- B(Star), R(3),
- B(Ldar), R(2),
- B(Star), R(13),
- B(LoadIC), R(13), U8(4), U8(11),
- B(Star), R(0),
- B(Ldar), R(4),
- B(StackCheck),
- B(Ldar), R(0),
- B(Star), R(7),
- B(Star), R(13),
- B(LdaSmi), U8(10),
- B(TestEqual), R(13),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(-75),
- B(Ldar), R(7),
- B(Star), R(13),
- B(LdaSmi), U8(20),
- B(TestEqual), R(13),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(7),
- B(LdaZero),
- B(Star), R(3),
- B(Jump), U8(-92),
- B(Jump), U8(47),
- B(Star), R(14),
- B(LdaConstant), U8(5),
- B(Star), R(13),
- B(Ldar), R(closure),
- B(Star), R(15),
- B(CallRuntime), U16(Runtime::kPushCatchContext), R(13), U8(3),
- B(Star), R(12),
- B(PushContext), R(8),
- B(Ldar), R(3),
- B(Star), R(13),
- B(LdaSmi), U8(2),
- B(TestEqualStrict), R(13),
- B(JumpIfFalse), U8(6),
- B(LdaSmi), U8(1),
- B(Star), R(3),
- B(LdaContextSlot), R(context), U8(4),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::kReThrow), R(13), U8(1),
- B(PopContext), R(8),
- B(LdaSmi), U8(-1),
- B(Star), R(9),
- B(Jump), U8(7),
- B(Star), R(10),
- B(LdaZero),
- B(Star), R(9),
- B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
- B(Star), R(11),
- B(Ldar), R(3),
- B(Star), R(12),
- B(LdaZero),
- B(TestEqualStrict), R(12),
- B(JumpIfTrue), U8(9),
- B(Ldar), R(1),
- B(Star), R(12),
- B(LdaUndefined),
- B(TestEqualStrict), R(12),
- B(LogicalNot),
- B(JumpIfFalseConstant), U8(9),
- B(Ldar), R(1),
- B(Star), R(12),
- B(LoadIC), R(12), U8(6), U8(13),
- B(Star), R(5),
- B(Star), R(12),
- B(LdaNull),
- B(TestEqual), R(12),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(124),
- B(Ldar), R(3),
- B(Star), R(12),
- B(LdaSmi), U8(1),
- B(TestEqualStrict), R(12),
- B(JumpIfFalse), U8(79),
- B(Ldar), R(5),
- B(TypeOf),
- B(Star), R(12),
- B(LdaConstant), U8(7),
- B(TestEqualStrict), R(12),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(18),
- B(Wide), B(LdaSmi), U16(139),
- B(Star), R(12),
- B(LdaConstant), U8(8),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
- B(Throw),
- B(Mov), R(context), R(12),
- B(Ldar), R(5),
- B(Star), R(13),
- B(Ldar), R(1),
- B(Star), R(14),
- B(CallRuntime), U16(Runtime::k_Call), R(13), U8(2),
- B(Jump), U8(30),
- B(Star), R(14),
- B(LdaConstant), U8(5),
- B(Star), R(13),
- B(Ldar), R(closure),
- B(Star), R(15),
- B(CallRuntime), U16(Runtime::kPushCatchContext), R(13), U8(3),
- B(Star), R(12),
- B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
- B(Ldar), R(12),
- B(PushContext), R(8),
- B(PopContext), R(8),
- B(Jump), U8(37),
- B(Ldar), R(5),
- B(Star), R(12),
- B(Ldar), R(1),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::k_Call), R(12), U8(2),
- B(Star), R(6),
- B(Star), R(12),
- B(InvokeIntrinsic), U16(Runtime::k_IsJSReceiver), R(12), U8(1),
- B(JumpIfToBooleanFalse), U8(4),
- B(Jump), U8(11),
- B(Ldar), R(6),
- B(Star), R(12),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
- B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(11), U8(1),
- B(LdaZero),
- B(TestEqualStrict), R(9),
- B(JumpIfTrue), U8(4),
- B(Jump), U8(5),
- B(Ldar), R(10),
- B(ReThrow),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Mov), R(context), R(11),
+ B(Mov), R(context), R(12),
+ /* 48 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(3),
+ B(Star), R(14),
+ B(LdaConstant), U8(1),
+ /* 48 E> */ B(LdrKeyedProperty), R(14), U8(3), R(13),
+ /* 48 E> */ B(Call), R(13), R(14), U8(1), U8(1),
+ B(Star), R(1),
+ /* 45 S> */ B(LdrNamedProperty), R(1), U8(2), U8(7), R(14),
+ /* 45 E> */ B(Call), R(14), R(1), U8(1), U8(5),
+ B(Star), R(2),
+ /* 45 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(2), U8(1),
+ B(ToBooleanLogicalNot),
+ B(JumpIfFalse), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(2), U8(1),
+ B(LdaNamedProperty), R(2), U8(3), U8(9),
+ B(JumpIfToBooleanTrue), U8(39),
+ B(LdrNamedProperty), R(2), U8(4), U8(11), R(4),
+ B(LdaSmi), U8(2),
+ B(Star), R(3),
+ B(Mov), R(4), R(0),
+ /* 34 E> */ B(StackCheck),
+ B(Mov), R(0), R(7),
+ /* 66 S> */ B(LdaSmi), U8(10),
+ /* 72 E> */ B(TestEqual), R(7),
+ B(JumpIfFalse), U8(4),
+ /* 79 S> */ B(Jump), U8(13),
+ /* 91 S> */ B(LdaSmi), U8(20),
+ /* 97 E> */ B(TestEqual), R(7),
+ B(JumpIfFalse), U8(4),
+ /* 104 S> */ B(Jump), U8(7),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Jump), U8(-65),
+ B(Jump), U8(34),
+ B(Star), R(13),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(13), U8(5),
+ B(Star), R(12),
+ B(PushContext), R(8),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(3),
+ B(JumpIfFalse), U8(6),
+ B(LdaSmi), U8(1),
+ B(Star), R(3),
+ B(LdrContextSlot), R(context), U8(4), R(13),
+ B(CallRuntime), U16(Runtime::kReThrow), R(13), U8(1),
+ B(PopContext), R(8),
+ B(LdaSmi), U8(-1),
+ B(Star), R(9),
+ B(Jump), U8(7),
+ B(Star), R(10),
+ B(LdaZero),
+ B(Star), R(9),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Star), R(11),
+ B(LdaZero),
+ B(TestEqualStrict), R(3),
+ B(JumpIfTrue), U8(116),
+ B(LdaUndefined),
+ B(TestEqualStrict), R(1),
+ B(JumpIfTrue), U8(111),
+ B(LdrNamedProperty), R(1), U8(6), U8(13), R(5),
+ B(LdaNull),
+ B(TestEqual), R(5),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(99),
+ B(LdaSmi), U8(1),
+ B(TestEqualStrict), R(3),
+ B(JumpIfFalse), U8(68),
+ B(Ldar), R(5),
+ B(TypeOf),
+ B(Star), R(12),
+ B(LdaConstant), U8(7),
+ B(TestEqualStrict), R(12),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(18),
+ B(Wide), B(LdaSmi), U16(129),
+ B(Star), R(12),
+ B(LdaConstant), U8(8),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
+ B(Throw),
+ B(Mov), R(context), R(12),
+ B(Mov), R(5), R(13),
+ B(Mov), R(1), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(13), U8(2),
+ B(Jump), U8(22),
+ B(Star), R(13),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(13), U8(5),
+ B(Star), R(12),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Ldar), R(12),
+ B(PushContext), R(8),
+ B(PopContext), R(8),
+ B(Jump), U8(27),
+ B(Mov), R(5), R(12),
+ B(Mov), R(1), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
+ B(Star), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(6), U8(1),
+ B(JumpIfToBooleanFalse), U8(4),
+ B(Jump), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
+ B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(11), U8(1),
+ B(LdaZero),
+ B(TestEqualStrict), R(9),
+ B(JumpIfTrue), U8(4),
+ B(Jump), U8(5),
+ B(Ldar), R(10),
+ B(ReThrow),
+ B(LdaUndefined),
+ /* 113 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -557,12 +423,11 @@ constant pool: [
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- kInstanceTypeDontCare,
]
handlers: [
- [10, 174, 180],
- [13, 127, 129],
- [272, 285, 287],
+ [7, 130, 136],
+ [10, 96, 98],
+ [208, 218, 220],
]
---
@@ -570,174 +435,128 @@ snippet: "
var x = { 'a': 1, 'b': 2 };
for (x['a'] of [1,2,3]) { return x['a']; }
"
-frame size: 15
+frame size: 14
parameter count: 1
-bytecode array length: 379
+bytecode array length: 292
bytecodes: [
- B(StackCheck),
- B(CreateObjectLiteral), U8(0), U8(0), U8(5),
- B(Star), R(8),
- B(Star), R(6),
- B(LdaUndefined),
- B(Star), R(3),
- B(LdaZero),
- B(Star), R(2),
- B(Mov), R(context), R(10),
- B(Mov), R(context), R(11),
- B(CreateArrayLiteral), U8(1), U8(1), U8(3),
- B(Star), R(13),
- B(LdaConstant), U8(2),
- B(KeyedLoadIC), R(13), U8(3),
- B(Star), R(12),
- B(Call), R(12), R(13), U8(1), U8(1),
- B(Star), R(0),
- B(Ldar), R(0),
- B(Star), R(14),
- B(LoadIC), R(14), U8(3), U8(7),
- B(Star), R(13),
- B(Call), R(13), R(14), U8(1), U8(5),
- B(Star), R(1),
- B(Star), R(12),
- B(InvokeIntrinsic), U16(Runtime::k_IsJSReceiver), R(12), U8(1),
- B(LogicalNot),
- B(JumpIfFalse), U8(11),
- B(Ldar), R(1),
- B(Star), R(12),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
- B(Ldar), R(1),
- B(Star), R(12),
- B(LoadIC), R(12), U8(4), U8(9),
- B(JumpIfToBooleanTrue), U8(42),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(Ldar), R(6),
- B(Star), R(12),
- B(Ldar), R(1),
- B(Star), R(13),
- B(LoadIC), R(13), U8(5), U8(11),
- B(StoreICSloppy), R(12), U8(6), U8(13),
- B(Ldar), R(3),
- B(StackCheck),
- B(Ldar), R(6),
- B(Star), R(12),
- B(LoadIC), R(12), U8(6), U8(15),
- B(Star), R(9),
- B(LdaZero),
- B(Star), R(8),
- B(Jump), U8(63),
- B(Jump), U8(-84),
- B(Jump), U8(47),
- B(Star), R(13),
- B(LdaConstant), U8(7),
- B(Star), R(12),
- B(Ldar), R(closure),
- B(Star), R(14),
- B(CallRuntime), U16(Runtime::kPushCatchContext), R(12), U8(3),
- B(Star), R(11),
- B(PushContext), R(7),
- B(Ldar), R(2),
- B(Star), R(12),
- B(LdaSmi), U8(2),
- B(TestEqualStrict), R(12),
- B(JumpIfFalse), U8(6),
- B(LdaSmi), U8(1),
- B(Star), R(2),
- B(LdaContextSlot), R(context), U8(4),
- B(Star), R(12),
- B(CallRuntime), U16(Runtime::kReThrow), R(12), U8(1),
- B(PopContext), R(7),
- B(LdaSmi), U8(-1),
- B(Star), R(8),
- B(Jump), U8(8),
- B(Star), R(9),
- B(LdaSmi), U8(1),
- B(Star), R(8),
- B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
- B(Star), R(10),
- B(Ldar), R(2),
- B(Star), R(11),
- B(LdaZero),
- B(TestEqualStrict), R(11),
- B(JumpIfTrue), U8(9),
- B(Ldar), R(0),
- B(Star), R(11),
- B(LdaUndefined),
- B(TestEqualStrict), R(11),
- B(LogicalNot),
- B(JumpIfFalseConstant), U8(11),
- B(Ldar), R(0),
- B(Star), R(11),
- B(LoadIC), R(11), U8(8), U8(17),
- B(Star), R(4),
- B(Star), R(11),
- B(LdaNull),
- B(TestEqual), R(11),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(124),
- B(Ldar), R(2),
- B(Star), R(11),
- B(LdaSmi), U8(1),
- B(TestEqualStrict), R(11),
- B(JumpIfFalse), U8(79),
- B(Ldar), R(4),
- B(TypeOf),
- B(Star), R(11),
- B(LdaConstant), U8(9),
- B(TestEqualStrict), R(11),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(18),
- B(Wide), B(LdaSmi), U16(139),
- B(Star), R(11),
- B(LdaConstant), U8(10),
- B(Star), R(12),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(11), U8(2),
- B(Throw),
- B(Mov), R(context), R(11),
- B(Ldar), R(4),
- B(Star), R(12),
- B(Ldar), R(0),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::k_Call), R(12), U8(2),
- B(Jump), U8(30),
- B(Star), R(13),
- B(LdaConstant), U8(7),
- B(Star), R(12),
- B(Ldar), R(closure),
- B(Star), R(14),
- B(CallRuntime), U16(Runtime::kPushCatchContext), R(12), U8(3),
- B(Star), R(11),
- B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
- B(Ldar), R(11),
- B(PushContext), R(7),
- B(PopContext), R(7),
- B(Jump), U8(37),
- B(Ldar), R(4),
- B(Star), R(11),
- B(Ldar), R(0),
- B(Star), R(12),
- B(CallRuntime), U16(Runtime::k_Call), R(11), U8(2),
- B(Star), R(5),
- B(Star), R(11),
- B(InvokeIntrinsic), U16(Runtime::k_IsJSReceiver), R(11), U8(1),
- B(JumpIfToBooleanFalse), U8(4),
- B(Jump), U8(11),
- B(Ldar), R(5),
- B(Star), R(11),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
- B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(10), U8(1),
- B(LdaZero),
- B(TestEqualStrict), R(8),
- B(JumpIfTrue), U8(10),
- B(LdaSmi), U8(1),
- B(TestEqualStrict), R(8),
- B(JumpIfTrue), U8(7),
- B(Jump), U8(8),
- B(Ldar), R(9),
- B(Return),
- B(Ldar), R(9),
- B(ReThrow),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(8),
+ B(Mov), R(8), R(6),
+ B(LdaZero),
+ B(Star), R(2),
+ B(Mov), R(context), R(10),
+ B(Mov), R(context), R(11),
+ /* 77 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(3),
+ B(Star), R(13),
+ B(LdaConstant), U8(2),
+ /* 77 E> */ B(LdrKeyedProperty), R(13), U8(3), R(12),
+ /* 77 E> */ B(Call), R(12), R(13), U8(1), U8(1),
+ B(Star), R(0),
+ /* 74 S> */ B(LdrNamedProperty), R(0), U8(3), U8(7), R(13),
+ /* 74 E> */ B(Call), R(13), R(0), U8(1), U8(5),
+ B(Star), R(1),
+ /* 74 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(1), U8(1),
+ B(ToBooleanLogicalNot),
+ B(JumpIfFalse), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(1), U8(1),
+ B(LdaNamedProperty), R(1), U8(4), U8(9),
+ B(JumpIfToBooleanTrue), U8(29),
+ /* 67 E> */ B(LdrNamedProperty), R(1), U8(5), U8(11), R(3),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(Ldar), R(3),
+ B(StaNamedPropertySloppy), R(6), U8(6), U8(13),
+ /* 62 E> */ B(StackCheck),
+ /* 88 S> */ B(Nop),
+ /* 96 E> */ B(LdrNamedProperty), R(6), U8(6), U8(15), R(9),
+ B(LdaZero),
+ B(Star), R(8),
+ B(Jump), U8(48),
+ B(Jump), U8(34),
+ B(Star), R(12),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(12), U8(7),
+ B(Star), R(11),
+ B(PushContext), R(7),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(2),
+ B(JumpIfFalse), U8(6),
+ B(LdaSmi), U8(1),
+ B(Star), R(2),
+ B(LdrContextSlot), R(context), U8(4), R(12),
+ B(CallRuntime), U16(Runtime::kReThrow), R(12), U8(1),
+ B(PopContext), R(7),
+ B(LdaSmi), U8(-1),
+ B(Star), R(8),
+ B(Jump), U8(8),
+ B(Star), R(9),
+ B(LdaSmi), U8(1),
+ B(Star), R(8),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Star), R(10),
+ B(LdaZero),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrue), U8(116),
+ B(LdaUndefined),
+ B(TestEqualStrict), R(0),
+ B(JumpIfTrue), U8(111),
+ B(LdrNamedProperty), R(0), U8(8), U8(17), R(4),
+ B(LdaNull),
+ B(TestEqual), R(4),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(99),
+ B(LdaSmi), U8(1),
+ B(TestEqualStrict), R(2),
+ B(JumpIfFalse), U8(68),
+ B(Ldar), R(4),
+ B(TypeOf),
+ B(Star), R(11),
+ B(LdaConstant), U8(9),
+ B(TestEqualStrict), R(11),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(18),
+ B(Wide), B(LdaSmi), U16(129),
+ B(Star), R(11),
+ B(LdaConstant), U8(10),
+ B(Star), R(12),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(11), U8(2),
+ B(Throw),
+ B(Mov), R(context), R(11),
+ B(Mov), R(4), R(12),
+ B(Mov), R(0), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
+ B(Jump), U8(22),
+ B(Star), R(12),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(12), U8(7),
+ B(Star), R(11),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Ldar), R(11),
+ B(PushContext), R(7),
+ B(PopContext), R(7),
+ B(Jump), U8(27),
+ B(Mov), R(4), R(11),
+ B(Mov), R(0), R(12),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(11), U8(2),
+ B(Star), R(5),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(5), U8(1),
+ B(JumpIfToBooleanFalse), U8(4),
+ B(Jump), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(5), U8(1),
+ B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(10), U8(1),
+ B(LdaZero),
+ B(TestEqualStrict), R(8),
+ B(JumpIfTrue), U8(10),
+ B(LdaSmi), U8(1),
+ B(TestEqualStrict), R(8),
+ B(JumpIfTrue), U8(7),
+ B(Jump), U8(8),
+ B(Ldar), R(9),
+ /* 105 S> */ B(Return),
+ B(Ldar), R(9),
+ B(ReThrow),
+ B(LdaUndefined),
+ /* 105 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -751,11 +570,10 @@ constant pool: [
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- kInstanceTypeDontCare,
]
handlers: [
- [18, 174, 180],
- [21, 127, 129],
- [273, 286, 288],
+ [15, 128, 134],
+ [18, 94, 96],
+ [207, 217, 219],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden
index fe7176ce15..fd04c713a4 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden
@@ -15,9 +15,9 @@ frame size: 0
parameter count: 1
bytecode array length: 5
bytecodes: [
- B(StackCheck),
- B(CreateClosure), U8(0), U8(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(CreateClosure), U8(0), U8(2),
+ /* 55 S> */ B(Return),
]
constant pool: [
InstanceType::SHARED_FUNCTION_INFO_TYPE,
@@ -31,15 +31,14 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 15
+bytecode array length: 14
bytecodes: [
- B(StackCheck),
- B(LdaUndefined),
- B(Star), R(1),
- B(CreateClosure), U8(0), U8(0),
- B(Star), R(0),
- B(Call), R(0), R(1), U8(1), U8(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(LdrUndefined), R(1),
+ B(CreateClosure), U8(0), U8(2),
+ B(Star), R(0),
+ /* 56 E> */ B(Call), R(0), R(1), U8(1), U8(1),
+ /* 59 S> */ B(Return),
]
constant pool: [
InstanceType::SHARED_FUNCTION_INFO_TYPE,
@@ -53,17 +52,16 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 19
+bytecode array length: 18
bytecodes: [
- B(StackCheck),
- B(LdaUndefined),
- B(Star), R(1),
- B(CreateClosure), U8(0), U8(0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(Star), R(2),
- B(Call), R(0), R(1), U8(2), U8(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(LdrUndefined), R(1),
+ B(CreateClosure), U8(0), U8(2),
+ B(Star), R(0),
+ B(LdaSmi), U8(1),
+ B(Star), R(2),
+ /* 67 E> */ B(Call), R(0), R(1), U8(2), U8(1),
+ /* 71 S> */ B(Return),
]
constant pool: [
InstanceType::SHARED_FUNCTION_INFO_TYPE,
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
new file mode 100644
index 0000000000..57dbfd153d
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
@@ -0,0 +1,604 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+pool type: mixed
+execute: yes
+wrap: no
+test function name: f
+
+---
+snippet: "
+ function* f() { }
+ f();
+"
+frame size: 11
+parameter count: 1
+bytecode array length: 201
+bytecodes: [
+ B(Ldar), R(new_target),
+ B(JumpIfUndefined), U8(20),
+ B(ResumeGenerator), R(new_target),
+ B(Star), R(1),
+ B(LdaZero),
+ B(TestEqualStrict), R(1),
+ B(JumpIfTrue), U8(57),
+ B(LdaSmi), U8(76),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(LdaSmi), U8(-2),
+ B(Star), R(1),
+ B(CreateFunctionContext), U8(2),
+ B(PushContext), R(0),
+ B(Ldar), R(this),
+ B(StaContextSlot), R(context), U8(4),
+ /* 11 E> */ B(StackCheck),
+ B(Mov), R(context), R(4),
+ /* 11 E> */ B(LdrContextSlot), R(context), U8(4), R(6),
+ B(Ldar), R(6),
+ B(Mov), R(closure), R(5),
+ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(5), U8(2),
+ B(StaContextSlot), R(context), U8(5),
+ B(Star), R(5),
+ B(LdrContextSlot), R(context), U8(5), R(6),
+ B(LdaZero),
+ B(SuspendGenerator), R(6),
+ B(Ldar), R(5),
+ /* 16 S> */ B(Return),
+ B(LdaSmi), U8(-2),
+ B(Star), R(1),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(6), U8(1),
+ B(Star), R(7),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(6), U8(1),
+ B(Star), R(8),
+ B(LdaZero),
+ B(TestEqualStrict), R(8),
+ B(JumpIfTrue), U8(31),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(8),
+ B(JumpIfTrue), U8(22),
+ B(Jump), U8(2),
+ B(LdaTrue),
+ B(Star), R(10),
+ B(Mov), R(7), R(9),
+ B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(9), U8(2),
+ B(Star), R(3),
+ B(LdaZero),
+ B(Star), R(2),
+ B(Jump), U8(35),
+ B(Ldar), R(7),
+ /* 11 E> */ B(Throw),
+ B(LdrUndefined), R(5),
+ B(LdaTrue),
+ B(Star), R(6),
+ B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(5), U8(2),
+ B(Star), R(3),
+ B(LdaSmi), U8(1),
+ B(Star), R(2),
+ B(Jump), U8(14),
+ B(LdaSmi), U8(-1),
+ B(Star), R(2),
+ B(Jump), U8(8),
+ B(Star), R(3),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Star), R(4),
+ B(LdrContextSlot), R(context), U8(5), R(5),
+ B(CallRuntime), U16(Runtime::k_GeneratorClose), R(5), U8(1),
+ B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(4), U8(1),
+ B(LdaZero),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrue), U8(16),
+ B(LdaSmi), U8(1),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrue), U8(13),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrue), U8(10),
+ B(Jump), U8(11),
+ B(Ldar), R(3),
+ /* 16 S> */ B(Return),
+ B(Ldar), R(3),
+ /* 16 S> */ B(Return),
+ B(Ldar), R(3),
+ B(ReThrow),
+ B(LdaUndefined),
+ /* 16 S> */ B(Return),
+]
+constant pool: [
+]
+handlers: [
+ [39, 138, 144],
+]
+
+---
+snippet: "
+ function* f() { yield 42 }
+ f();
+"
+frame size: 11
+parameter count: 1
+bytecode array length: 294
+bytecodes: [
+ B(Ldar), R(new_target),
+ B(JumpIfUndefined), U8(26),
+ B(ResumeGenerator), R(new_target),
+ B(Star), R(1),
+ B(LdaZero),
+ B(TestEqualStrict), R(1),
+ B(JumpIfTrue), U8(63),
+ B(LdaSmi), U8(1),
+ B(TestEqualStrict), R(1),
+ B(JumpIfTrueConstant), U8(0),
+ B(LdaSmi), U8(76),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(LdaSmi), U8(-2),
+ B(Star), R(1),
+ B(CreateFunctionContext), U8(2),
+ B(PushContext), R(0),
+ B(Ldar), R(this),
+ B(StaContextSlot), R(context), U8(4),
+ /* 11 E> */ B(StackCheck),
+ B(Mov), R(context), R(4),
+ /* 11 E> */ B(LdrContextSlot), R(context), U8(4), R(6),
+ B(Ldar), R(6),
+ B(Mov), R(closure), R(5),
+ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(5), U8(2),
+ B(StaContextSlot), R(context), U8(5),
+ B(Star), R(5),
+ B(LdrContextSlot), R(context), U8(5), R(6),
+ B(LdaZero),
+ B(SuspendGenerator), R(6),
+ B(Ldar), R(5),
+ /* 25 S> */ B(Return),
+ B(LdaSmi), U8(-2),
+ B(Star), R(1),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(6), U8(1),
+ B(Star), R(7),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(6), U8(1),
+ B(Star), R(8),
+ B(LdaZero),
+ B(TestEqualStrict), R(8),
+ B(JumpIfTrue), U8(31),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(8),
+ B(JumpIfTrue), U8(22),
+ B(Jump), U8(2),
+ B(LdaTrue),
+ B(Star), R(10),
+ B(Mov), R(7), R(9),
+ B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(9), U8(2),
+ B(Star), R(3),
+ B(LdaZero),
+ B(Star), R(2),
+ B(Jump), U8(113),
+ B(Ldar), R(7),
+ /* 11 E> */ B(Throw),
+ /* 16 S> */ B(LdaSmi), U8(42),
+ B(Star), R(5),
+ B(LdaFalse),
+ B(Star), R(6),
+ B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(5), U8(2),
+ B(Star), R(7),
+ B(LdrContextSlot), R(context), U8(5), R(5),
+ B(LdaSmi), U8(1),
+ B(SuspendGenerator), R(5),
+ B(Ldar), R(7),
+ /* 25 S> */ B(Return),
+ B(LdaSmi), U8(-2),
+ B(Star), R(1),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(5), U8(1),
+ B(Star), R(6),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(5), U8(1),
+ B(Star), R(8),
+ B(LdaZero),
+ B(TestEqualStrict), R(8),
+ B(JumpIfTrue), U8(32),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(8),
+ B(JumpIfTrue), U8(23),
+ B(Jump), U8(2),
+ B(LdaTrue),
+ B(Star), R(10),
+ B(Mov), R(6), R(9),
+ B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(9), U8(2),
+ B(Star), R(3),
+ B(LdaSmi), U8(1),
+ B(Star), R(2),
+ B(Jump), U8(35),
+ B(Ldar), R(6),
+ /* 16 E> */ B(Throw),
+ B(LdrUndefined), R(5),
+ B(LdaTrue),
+ B(Star), R(6),
+ B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(5), U8(2),
+ B(Star), R(3),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(Jump), U8(14),
+ B(LdaSmi), U8(-1),
+ B(Star), R(2),
+ B(Jump), U8(8),
+ B(Star), R(3),
+ B(LdaSmi), U8(3),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Star), R(4),
+ B(LdrContextSlot), R(context), U8(5), R(5),
+ B(CallRuntime), U16(Runtime::k_GeneratorClose), R(5), U8(1),
+ B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(4), U8(1),
+ B(LdaZero),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrue), U8(22),
+ B(LdaSmi), U8(1),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrue), U8(19),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrue), U8(16),
+ B(LdaSmi), U8(3),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrue), U8(13),
+ B(Jump), U8(14),
+ B(Ldar), R(3),
+ /* 25 S> */ B(Return),
+ B(Ldar), R(3),
+ /* 25 S> */ B(Return),
+ B(Ldar), R(3),
+ /* 25 S> */ B(Return),
+ B(Ldar), R(3),
+ B(ReThrow),
+ B(LdaUndefined),
+ /* 25 S> */ B(Return),
+]
+constant pool: [
+ kInstanceTypeDontCare,
+]
+handlers: [
+ [45, 222, 228],
+]
+
+---
+snippet: "
+ function* f() { for (let x of [42]) yield x }
+ f();
+"
+frame size: 18
+parameter count: 1
+bytecode array length: 742
+bytecodes: [
+ B(Ldar), R(new_target),
+ B(JumpIfUndefined), U8(26),
+ B(ResumeGenerator), R(new_target),
+ B(Star), R(4),
+ B(LdaZero),
+ B(TestEqualStrict), R(4),
+ B(JumpIfTrue), U8(63),
+ B(LdaSmi), U8(1),
+ B(TestEqualStrict), R(4),
+ B(JumpIfTrueConstant), U8(3),
+ B(LdaSmi), U8(76),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kAbort), R(5), U8(1),
+ B(LdaSmi), U8(-2),
+ B(Star), R(4),
+ B(CreateFunctionContext), U8(9),
+ B(PushContext), R(0),
+ B(Ldar), R(this),
+ B(StaContextSlot), R(context), U8(4),
+ /* 11 E> */ B(StackCheck),
+ B(Mov), R(context), R(7),
+ /* 11 E> */ B(LdrContextSlot), R(context), U8(4), R(9),
+ B(Ldar), R(9),
+ B(Mov), R(closure), R(8),
+ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(8), U8(2),
+ B(StaContextSlot), R(context), U8(5),
+ B(Star), R(8),
+ B(LdrContextSlot), R(context), U8(5), R(9),
+ B(LdaZero),
+ B(SuspendGenerator), R(9),
+ B(Ldar), R(8),
+ /* 44 S> */ B(Return),
+ B(LdaSmi), U8(-2),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(9), U8(1),
+ B(Star), R(10),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(9), U8(1),
+ B(Star), R(11),
+ B(LdaZero),
+ B(TestEqualStrict), R(11),
+ B(JumpIfTrue), U8(31),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(11),
+ B(JumpIfTrue), U8(22),
+ B(Jump), U8(2),
+ B(LdaTrue),
+ B(Star), R(13),
+ B(Mov), R(10), R(12),
+ B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(12), U8(2),
+ B(Star), R(6),
+ B(LdaZero),
+ B(Star), R(5),
+ B(JumpConstant), U8(17),
+ B(Ldar), R(10),
+ /* 11 E> */ B(Throw),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(0),
+ B(PushContext), R(1),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4),
+ B(LdaZero),
+ B(StaContextSlot), R(1), U8(9),
+ B(Mov), R(context), R(10),
+ B(Mov), R(context), R(11),
+ /* 30 S> */ B(CreateArrayLiteral), U8(1), U8(0), U8(3),
+ B(Star), R(13),
+ B(LdaConstant), U8(2),
+ /* 30 E> */ B(LdrKeyedProperty), R(13), U8(3), R(12),
+ /* 30 E> */ B(Call), R(12), R(13), U8(1), U8(1),
+ /* 30 E> */ B(StaContextSlot), R(1), U8(7),
+ B(LdaSmi), U8(-2),
+ B(TestEqual), R(4),
+ B(JumpIfTrue), U8(17),
+ B(LdaSmi), U8(1),
+ B(TestEqualStrict), R(4),
+ B(JumpIfTrueConstant), U8(9),
+ B(LdaSmi), U8(76),
+ B(Star), R(12),
+ B(CallRuntime), U16(Runtime::kAbort), R(12), U8(1),
+ /* 27 S> */ B(LdrContextSlot), R(1), U8(7), R(14),
+ B(LdrNamedProperty), R(14), U8(4), U8(7), R(13),
+ /* 27 E> */ B(Call), R(13), R(14), U8(1), U8(5),
+ /* 27 E> */ B(StaContextSlot), R(1), U8(8),
+ B(Star), R(12),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(12), U8(1),
+ B(ToBooleanLogicalNot),
+ B(JumpIfFalse), U8(11),
+ B(LdrContextSlot), R(1), U8(8), R(12),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
+ B(LdrContextSlot), R(1), U8(8), R(12),
+ B(LdaNamedProperty), R(12), U8(5), U8(9),
+ B(JumpIfToBooleanTrueConstant), U8(10),
+ B(LdrContextSlot), R(1), U8(8), R(12),
+ B(LdaNamedProperty), R(12), U8(6), U8(11),
+ B(StaContextSlot), R(1), U8(10),
+ B(LdaSmi), U8(2),
+ B(StaContextSlot), R(1), U8(9),
+ B(LdaContextSlot), R(1), U8(10),
+ B(StaContextSlot), R(1), U8(6),
+ /* 16 E> */ B(StackCheck),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(7),
+ B(PushContext), R(2),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4),
+ B(LdaContextSlot), R(1), U8(6),
+ B(StaContextSlot), R(context), U8(4),
+ /* 36 S> */ B(LdaContextSlot), R(context), U8(4),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(8),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(13), U8(1),
+ B(Star), R(12),
+ B(LdaFalse),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(12), U8(2),
+ B(Star), R(14),
+ B(LdrContextSlot), R(1), U8(5), R(12),
+ B(LdaSmi), U8(1),
+ B(SuspendGenerator), R(12),
+ B(Ldar), R(14),
+ /* 44 S> */ B(Return),
+ B(LdaSmi), U8(-2),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(12), U8(1),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(12), U8(1),
+ B(Star), R(15),
+ B(LdaZero),
+ B(TestEqualStrict), R(15),
+ B(JumpIfTrue), U8(43),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(15),
+ B(JumpIfTrue), U8(34),
+ B(Jump), U8(2),
+ B(LdaTrue),
+ B(Star), R(17),
+ B(Mov), R(13), R(16),
+ B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(16), U8(2),
+ B(PopContext), R(2),
+ B(PopContext), R(2),
+ B(PopContext), R(2),
+ B(PopContext), R(2),
+ B(PopContext), R(2),
+ B(PopContext), R(2),
+ B(Star), R(9),
+ B(LdaZero),
+ B(Star), R(8),
+ B(Jump), U8(68),
+ B(Ldar), R(13),
+ /* 36 E> */ B(Throw),
+ B(Ldar), R(13),
+ B(PopContext), R(2),
+ B(LdaZero),
+ B(StaContextSlot), R(1), U8(9),
+ B(Wide), B(Jump), U16(-215),
+ B(Jump), U8(39),
+ B(Star), R(12),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(12), U8(11),
+ B(Star), R(11),
+ B(PushContext), R(2),
+ B(LdrContextSlot), R(0), U8(9), R(12),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(12),
+ B(JumpIfFalse), U8(7),
+ B(LdaSmi), U8(1),
+ B(StaContextSlot), R(0), U8(9),
+ B(LdrContextSlot), R(context), U8(4), R(12),
+ B(CallRuntime), U16(Runtime::kReThrow), R(12), U8(1),
+ B(PopContext), R(2),
+ B(LdaSmi), U8(-1),
+ B(Star), R(8),
+ B(Jump), U8(8),
+ B(Star), R(9),
+ B(LdaSmi), U8(1),
+ B(Star), R(8),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Star), R(10),
+ B(LdrContextSlot), R(1), U8(9), R(11),
+ B(LdaZero),
+ B(TestEqualStrict), R(11),
+ B(JumpIfTrueConstant), U8(15),
+ B(LdrContextSlot), R(1), U8(7), R(11),
+ B(LdaUndefined),
+ B(TestEqualStrict), R(11),
+ B(JumpIfTrueConstant), U8(16),
+ B(LdrContextSlot), R(1), U8(7), R(11),
+ B(LdaNamedProperty), R(11), U8(12), U8(13),
+ B(StaContextSlot), R(1), U8(11),
+ B(LdrContextSlot), R(1), U8(11), R(11),
+ B(LdaNull),
+ B(TestEqual), R(11),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(117),
+ B(LdrContextSlot), R(1), U8(9), R(11),
+ B(LdaSmi), U8(1),
+ B(TestEqualStrict), R(11),
+ B(JumpIfFalse), U8(71),
+ B(LdaContextSlot), R(1), U8(11),
+ B(TypeOf),
+ B(Star), R(11),
+ B(LdaConstant), U8(13),
+ B(TestEqualStrict), R(11),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(18),
+ B(Wide), B(LdaSmi), U16(129),
+ B(Star), R(11),
+ B(LdaConstant), U8(14),
+ B(Star), R(12),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(11), U8(2),
+ B(Throw),
+ B(Mov), R(context), R(11),
+ B(LdrContextSlot), R(1), U8(11), R(12),
+ B(LdrContextSlot), R(1), U8(7), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
+ B(Jump), U8(22),
+ B(Star), R(12),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(12), U8(11),
+ B(Star), R(11),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Ldar), R(11),
+ B(PushContext), R(2),
+ B(PopContext), R(2),
+ B(Jump), U8(38),
+ B(LdrContextSlot), R(1), U8(11), R(11),
+ B(LdrContextSlot), R(1), U8(7), R(12),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(11), U8(2),
+ B(StaContextSlot), R(1), U8(12),
+ B(LdrContextSlot), R(1), U8(12), R(11),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(11), U8(1),
+ B(JumpIfToBooleanFalse), U8(4),
+ B(Jump), U8(11),
+ B(LdrContextSlot), R(1), U8(12), R(11),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
+ B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(10), U8(1),
+ B(LdaZero),
+ B(TestEqualStrict), R(8),
+ B(JumpIfTrue), U8(10),
+ B(LdaSmi), U8(1),
+ B(TestEqualStrict), R(8),
+ B(JumpIfTrue), U8(17),
+ B(Jump), U8(28),
+ B(PopContext), R(1),
+ B(PopContext), R(1),
+ B(LdaSmi), U8(1),
+ B(Star), R(5),
+ B(Mov), R(9), R(6),
+ B(Jump), U8(47),
+ B(PopContext), R(1),
+ B(PopContext), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(5),
+ B(Mov), R(9), R(6),
+ B(Jump), U8(34),
+ B(PopContext), R(1),
+ B(LdrUndefined), R(8),
+ B(LdaTrue),
+ B(Star), R(9),
+ B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(8), U8(2),
+ B(Star), R(6),
+ B(LdaSmi), U8(3),
+ B(Star), R(5),
+ B(Jump), U8(14),
+ B(LdaSmi), U8(-1),
+ B(Star), R(5),
+ B(Jump), U8(8),
+ B(Star), R(6),
+ B(LdaSmi), U8(4),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Star), R(7),
+ B(LdrContextSlot), R(context), U8(5), R(8),
+ B(CallRuntime), U16(Runtime::k_GeneratorClose), R(8), U8(1),
+ B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(7), U8(1),
+ B(LdaZero),
+ B(TestEqualStrict), R(5),
+ B(JumpIfTrue), U8(28),
+ B(LdaSmi), U8(1),
+ B(TestEqualStrict), R(5),
+ B(JumpIfTrue), U8(25),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(5),
+ B(JumpIfTrue), U8(22),
+ B(LdaSmi), U8(3),
+ B(TestEqualStrict), R(5),
+ B(JumpIfTrue), U8(19),
+ B(LdaSmi), U8(4),
+ B(TestEqualStrict), R(5),
+ B(JumpIfTrue), U8(16),
+ B(Jump), U8(17),
+ B(Ldar), R(6),
+ /* 44 S> */ B(Return),
+ B(Ldar), R(6),
+ /* 44 S> */ B(Return),
+ B(Ldar), R(6),
+ B(ReThrow),
+ B(Ldar), R(6),
+ /* 44 S> */ B(Return),
+ B(Ldar), R(6),
+ B(ReThrow),
+ B(LdaUndefined),
+ /* 44 S> */ B(Return),
+]
+constant pool: [
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::SYMBOL_TYPE,
+ kInstanceTypeDontCare,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ kInstanceTypeDontCare,
+ kInstanceTypeDontCare,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ kInstanceTypeDontCare,
+ kInstanceTypeDontCare,
+ kInstanceTypeDontCare,
+]
+handlers: [
+ [45, 661, 667],
+ [143, 423, 429],
+ [146, 384, 386],
+ [525, 537, 539],
+]
+
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden
index 42a2c5b15b..166f7f0351 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden
@@ -16,15 +16,13 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 14
+bytecode array length: 12
bytecodes: [
- B(StackCheck),
- B(LdaGlobal), U8(0), U8(1),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(BitwiseAnd), R(0),
- B(StaGlobalSloppy), U8(0), U8(3),
- B(Return),
+ /* 26 E> */ B(StackCheck),
+ /* 31 S> */ B(LdrGlobal), U8(1), R(0),
+ B(BitwiseAndSmi), U8(1), R(0), U8(3),
+ /* 45 E> */ B(StaGlobalSloppy), U8(0), U8(4),
+ /* 51 S> */ B(Return),
]
constant pool: [
"global",
@@ -40,15 +38,13 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 14
+bytecode array length: 12
bytecodes: [
- B(StackCheck),
- B(LdaGlobal), U8(0), U8(1),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(Add), R(0),
- B(StaGlobalSloppy), U8(0), U8(3),
- B(Return),
+ /* 27 E> */ B(StackCheck),
+ /* 32 S> */ B(LdrGlobal), U8(1), R(0),
+ B(AddSmi), U8(1), R(0), U8(3),
+ /* 51 E> */ B(StaGlobalSloppy), U8(0), U8(4),
+ /* 57 S> */ B(Return),
]
constant pool: [
"unallocated",
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
index 7d64503a0c..6a2406ad12 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
@@ -16,14 +16,13 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 10
+bytecode array length: 9
bytecodes: [
- B(StackCheck),
- B(LdaGlobal), U8(0), U8(1),
- B(ToNumber),
- B(Inc),
- B(StaGlobalSloppy), U8(0), U8(3),
- B(Return),
+ /* 26 E> */ B(StackCheck),
+ /* 31 S> */ B(LdaGlobal), U8(1),
+ B(Inc), U8(5),
+ /* 40 E> */ B(StaGlobalSloppy), U8(0), U8(3),
+ /* 48 S> */ B(Return),
]
constant pool: [
"global",
@@ -39,16 +38,15 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 14
+bytecode array length: 13
bytecodes: [
- B(StackCheck),
- B(LdaGlobal), U8(0), U8(1),
- B(ToNumber),
- B(Star), R(0),
- B(Dec),
- B(StaGlobalSloppy), U8(0), U8(3),
- B(Ldar), R(0),
- B(Return),
+ /* 26 E> */ B(StackCheck),
+ /* 31 S> */ B(LdaGlobal), U8(1),
+ B(ToNumber), R(0),
+ B(Dec), U8(5),
+ /* 44 E> */ B(StaGlobalSloppy), U8(0), U8(3),
+ B(Ldar), R(0),
+ /* 48 S> */ B(Return),
]
constant pool: [
"global",
@@ -64,14 +62,13 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 10
+bytecode array length: 9
bytecodes: [
- B(StackCheck),
- B(LdaGlobal), U8(0), U8(1),
- B(ToNumber),
- B(Dec),
- B(StaGlobalStrict), U8(0), U8(3),
- B(Return),
+ /* 27 E> */ B(StackCheck),
+ /* 46 S> */ B(LdaGlobal), U8(1),
+ B(Dec), U8(5),
+ /* 55 E> */ B(StaGlobalStrict), U8(0), U8(3),
+ /* 68 S> */ B(Return),
]
constant pool: [
"unallocated",
@@ -87,16 +84,15 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 14
+bytecode array length: 13
bytecodes: [
- B(StackCheck),
- B(LdaGlobal), U8(0), U8(1),
- B(ToNumber),
- B(Star), R(0),
- B(Inc),
- B(StaGlobalSloppy), U8(0), U8(3),
- B(Ldar), R(0),
- B(Return),
+ /* 27 E> */ B(StackCheck),
+ /* 32 S> */ B(LdaGlobal), U8(1),
+ B(ToNumber), R(0),
+ B(Inc), U8(5),
+ /* 50 E> */ B(StaGlobalSloppy), U8(0), U8(3),
+ B(Ldar), R(0),
+ /* 54 S> */ B(Return),
]
constant pool: [
"unallocated",
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden
index ffed12e79c..adead06c5c 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden
@@ -18,18 +18,16 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 11
+bytecode array length: 9
bytecodes: [
- B(StackCheck),
- B(LdaGlobal), U8(0), U8(1),
- B(Star), R(0),
- B(LdaConstant), U8(1),
- B(DeletePropertySloppy), R(0),
- B(Return),
+ /* 32 E> */ B(StackCheck),
+ /* 39 S> */ B(LdrGlobal), U8(1), R(0),
+ B(LdaConstant), U8(0),
+ B(DeletePropertySloppy), R(0),
+ /* 58 S> */ B(Return),
]
constant pool: [
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
]
handlers: [
]
@@ -45,17 +43,15 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 11
+bytecode array length: 9
bytecodes: [
- B(StackCheck),
- B(LdaGlobal), U8(0), U8(1),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(DeletePropertyStrict), R(0),
- B(Return),
+ /* 28 E> */ B(StackCheck),
+ /* 51 S> */ B(LdrGlobal), U8(1), R(0),
+ B(LdaSmi), U8(1),
+ B(DeletePropertyStrict), R(0),
+ /* 71 S> */ B(Return),
]
constant pool: [
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
]
handlers: [
]
@@ -70,16 +66,14 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 16
+bytecode array length: 14
bytecodes: [
- B(StackCheck),
- B(LdaContextSlot), R(context), U8(3),
- B(Star), R(0),
- B(LdaContextSlot), R(0), U8(2),
- B(Star), R(1),
- B(LdaConstant), U8(0),
- B(DeletePropertySloppy), R(1),
- B(Return),
+ /* 32 E> */ B(StackCheck),
+ /* 39 S> */ B(LdrContextSlot), R(context), U8(3), R(0),
+ B(LdrContextSlot), R(0), U8(2), R(1),
+ B(LdaConstant), U8(0),
+ B(DeletePropertySloppy), R(1),
+ /* 56 S> */ B(Return),
]
constant pool: [
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
@@ -97,16 +91,14 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 16
+bytecode array length: 14
bytecodes: [
- B(StackCheck),
- B(LdaContextSlot), R(context), U8(3),
- B(Star), R(0),
- B(LdaContextSlot), R(0), U8(2),
- B(Star), R(1),
- B(LdaConstant), U8(0),
- B(DeletePropertySloppy), R(1),
- B(Return),
+ /* 18 E> */ B(StackCheck),
+ /* 25 S> */ B(LdrContextSlot), R(context), U8(3), R(0),
+ B(LdrContextSlot), R(0), U8(2), R(1),
+ B(LdaConstant), U8(0),
+ B(DeletePropertySloppy), R(1),
+ /* 42 S> */ B(Return),
]
constant pool: [
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/HeapNumberConstants.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/HeapNumberConstants.golden
index 2ff2485210..f70321aa99 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/HeapNumberConstants.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/HeapNumberConstants.golden
@@ -15,9 +15,9 @@ frame size: 0
parameter count: 1
bytecode array length: 4
bytecodes: [
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(LdaConstant), U8(0),
+ /* 46 S> */ B(Return),
]
constant pool: [
1.2,
@@ -33,11 +33,11 @@ frame size: 1
parameter count: 1
bytecode array length: 8
bytecodes: [
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Star), R(0),
- B(LdaConstant), U8(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ /* 47 S> */ B(LdaConstant), U8(1),
+ /* 59 S> */ B(Return),
]
constant pool: [
1.2,
@@ -54,11 +54,11 @@ frame size: 1
parameter count: 1
bytecode array length: 8
bytecodes: [
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Star), R(0),
- B(LdaConstant), U8(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ /* 48 S> */ B(LdaConstant), U8(1),
+ /* 61 S> */ B(Return),
]
constant pool: [
3.14,
@@ -331,523 +331,523 @@ frame size: 1
parameter count: 1
bytecode array length: 1033
bytecodes: [
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Star), R(0),
- B(LdaConstant), U8(1),
- B(Star), R(0),
- B(LdaConstant), U8(2),
- B(Star), R(0),
- B(LdaConstant), U8(3),
- B(Star), R(0),
- B(LdaConstant), U8(4),
- B(Star), R(0),
- B(LdaConstant), U8(5),
- B(Star), R(0),
- B(LdaConstant), U8(6),
- B(Star), R(0),
- B(LdaConstant), U8(7),
- B(Star), R(0),
- B(LdaConstant), U8(8),
- B(Star), R(0),
- B(LdaConstant), U8(9),
- B(Star), R(0),
- B(LdaConstant), U8(10),
- B(Star), R(0),
- B(LdaConstant), U8(11),
- B(Star), R(0),
- B(LdaConstant), U8(12),
- B(Star), R(0),
- B(LdaConstant), U8(13),
- B(Star), R(0),
- B(LdaConstant), U8(14),
- B(Star), R(0),
- B(LdaConstant), U8(15),
- B(Star), R(0),
- B(LdaConstant), U8(16),
- B(Star), R(0),
- B(LdaConstant), U8(17),
- B(Star), R(0),
- B(LdaConstant), U8(18),
- B(Star), R(0),
- B(LdaConstant), U8(19),
- B(Star), R(0),
- B(LdaConstant), U8(20),
- B(Star), R(0),
- B(LdaConstant), U8(21),
- B(Star), R(0),
- B(LdaConstant), U8(22),
- B(Star), R(0),
- B(LdaConstant), U8(23),
- B(Star), R(0),
- B(LdaConstant), U8(24),
- B(Star), R(0),
- B(LdaConstant), U8(25),
- B(Star), R(0),
- B(LdaConstant), U8(26),
- B(Star), R(0),
- B(LdaConstant), U8(27),
- B(Star), R(0),
- B(LdaConstant), U8(28),
- B(Star), R(0),
- B(LdaConstant), U8(29),
- B(Star), R(0),
- B(LdaConstant), U8(30),
- B(Star), R(0),
- B(LdaConstant), U8(31),
- B(Star), R(0),
- B(LdaConstant), U8(32),
- B(Star), R(0),
- B(LdaConstant), U8(33),
- B(Star), R(0),
- B(LdaConstant), U8(34),
- B(Star), R(0),
- B(LdaConstant), U8(35),
- B(Star), R(0),
- B(LdaConstant), U8(36),
- B(Star), R(0),
- B(LdaConstant), U8(37),
- B(Star), R(0),
- B(LdaConstant), U8(38),
- B(Star), R(0),
- B(LdaConstant), U8(39),
- B(Star), R(0),
- B(LdaConstant), U8(40),
- B(Star), R(0),
- B(LdaConstant), U8(41),
- B(Star), R(0),
- B(LdaConstant), U8(42),
- B(Star), R(0),
- B(LdaConstant), U8(43),
- B(Star), R(0),
- B(LdaConstant), U8(44),
- B(Star), R(0),
- B(LdaConstant), U8(45),
- B(Star), R(0),
- B(LdaConstant), U8(46),
- B(Star), R(0),
- B(LdaConstant), U8(47),
- B(Star), R(0),
- B(LdaConstant), U8(48),
- B(Star), R(0),
- B(LdaConstant), U8(49),
- B(Star), R(0),
- B(LdaConstant), U8(50),
- B(Star), R(0),
- B(LdaConstant), U8(51),
- B(Star), R(0),
- B(LdaConstant), U8(52),
- B(Star), R(0),
- B(LdaConstant), U8(53),
- B(Star), R(0),
- B(LdaConstant), U8(54),
- B(Star), R(0),
- B(LdaConstant), U8(55),
- B(Star), R(0),
- B(LdaConstant), U8(56),
- B(Star), R(0),
- B(LdaConstant), U8(57),
- B(Star), R(0),
- B(LdaConstant), U8(58),
- B(Star), R(0),
- B(LdaConstant), U8(59),
- B(Star), R(0),
- B(LdaConstant), U8(60),
- B(Star), R(0),
- B(LdaConstant), U8(61),
- B(Star), R(0),
- B(LdaConstant), U8(62),
- B(Star), R(0),
- B(LdaConstant), U8(63),
- B(Star), R(0),
- B(LdaConstant), U8(64),
- B(Star), R(0),
- B(LdaConstant), U8(65),
- B(Star), R(0),
- B(LdaConstant), U8(66),
- B(Star), R(0),
- B(LdaConstant), U8(67),
- B(Star), R(0),
- B(LdaConstant), U8(68),
- B(Star), R(0),
- B(LdaConstant), U8(69),
- B(Star), R(0),
- B(LdaConstant), U8(70),
- B(Star), R(0),
- B(LdaConstant), U8(71),
- B(Star), R(0),
- B(LdaConstant), U8(72),
- B(Star), R(0),
- B(LdaConstant), U8(73),
- B(Star), R(0),
- B(LdaConstant), U8(74),
- B(Star), R(0),
- B(LdaConstant), U8(75),
- B(Star), R(0),
- B(LdaConstant), U8(76),
- B(Star), R(0),
- B(LdaConstant), U8(77),
- B(Star), R(0),
- B(LdaConstant), U8(78),
- B(Star), R(0),
- B(LdaConstant), U8(79),
- B(Star), R(0),
- B(LdaConstant), U8(80),
- B(Star), R(0),
- B(LdaConstant), U8(81),
- B(Star), R(0),
- B(LdaConstant), U8(82),
- B(Star), R(0),
- B(LdaConstant), U8(83),
- B(Star), R(0),
- B(LdaConstant), U8(84),
- B(Star), R(0),
- B(LdaConstant), U8(85),
- B(Star), R(0),
- B(LdaConstant), U8(86),
- B(Star), R(0),
- B(LdaConstant), U8(87),
- B(Star), R(0),
- B(LdaConstant), U8(88),
- B(Star), R(0),
- B(LdaConstant), U8(89),
- B(Star), R(0),
- B(LdaConstant), U8(90),
- B(Star), R(0),
- B(LdaConstant), U8(91),
- B(Star), R(0),
- B(LdaConstant), U8(92),
- B(Star), R(0),
- B(LdaConstant), U8(93),
- B(Star), R(0),
- B(LdaConstant), U8(94),
- B(Star), R(0),
- B(LdaConstant), U8(95),
- B(Star), R(0),
- B(LdaConstant), U8(96),
- B(Star), R(0),
- B(LdaConstant), U8(97),
- B(Star), R(0),
- B(LdaConstant), U8(98),
- B(Star), R(0),
- B(LdaConstant), U8(99),
- B(Star), R(0),
- B(LdaConstant), U8(100),
- B(Star), R(0),
- B(LdaConstant), U8(101),
- B(Star), R(0),
- B(LdaConstant), U8(102),
- B(Star), R(0),
- B(LdaConstant), U8(103),
- B(Star), R(0),
- B(LdaConstant), U8(104),
- B(Star), R(0),
- B(LdaConstant), U8(105),
- B(Star), R(0),
- B(LdaConstant), U8(106),
- B(Star), R(0),
- B(LdaConstant), U8(107),
- B(Star), R(0),
- B(LdaConstant), U8(108),
- B(Star), R(0),
- B(LdaConstant), U8(109),
- B(Star), R(0),
- B(LdaConstant), U8(110),
- B(Star), R(0),
- B(LdaConstant), U8(111),
- B(Star), R(0),
- B(LdaConstant), U8(112),
- B(Star), R(0),
- B(LdaConstant), U8(113),
- B(Star), R(0),
- B(LdaConstant), U8(114),
- B(Star), R(0),
- B(LdaConstant), U8(115),
- B(Star), R(0),
- B(LdaConstant), U8(116),
- B(Star), R(0),
- B(LdaConstant), U8(117),
- B(Star), R(0),
- B(LdaConstant), U8(118),
- B(Star), R(0),
- B(LdaConstant), U8(119),
- B(Star), R(0),
- B(LdaConstant), U8(120),
- B(Star), R(0),
- B(LdaConstant), U8(121),
- B(Star), R(0),
- B(LdaConstant), U8(122),
- B(Star), R(0),
- B(LdaConstant), U8(123),
- B(Star), R(0),
- B(LdaConstant), U8(124),
- B(Star), R(0),
- B(LdaConstant), U8(125),
- B(Star), R(0),
- B(LdaConstant), U8(126),
- B(Star), R(0),
- B(LdaConstant), U8(127),
- B(Star), R(0),
- B(LdaConstant), U8(128),
- B(Star), R(0),
- B(LdaConstant), U8(129),
- B(Star), R(0),
- B(LdaConstant), U8(130),
- B(Star), R(0),
- B(LdaConstant), U8(131),
- B(Star), R(0),
- B(LdaConstant), U8(132),
- B(Star), R(0),
- B(LdaConstant), U8(133),
- B(Star), R(0),
- B(LdaConstant), U8(134),
- B(Star), R(0),
- B(LdaConstant), U8(135),
- B(Star), R(0),
- B(LdaConstant), U8(136),
- B(Star), R(0),
- B(LdaConstant), U8(137),
- B(Star), R(0),
- B(LdaConstant), U8(138),
- B(Star), R(0),
- B(LdaConstant), U8(139),
- B(Star), R(0),
- B(LdaConstant), U8(140),
- B(Star), R(0),
- B(LdaConstant), U8(141),
- B(Star), R(0),
- B(LdaConstant), U8(142),
- B(Star), R(0),
- B(LdaConstant), U8(143),
- B(Star), R(0),
- B(LdaConstant), U8(144),
- B(Star), R(0),
- B(LdaConstant), U8(145),
- B(Star), R(0),
- B(LdaConstant), U8(146),
- B(Star), R(0),
- B(LdaConstant), U8(147),
- B(Star), R(0),
- B(LdaConstant), U8(148),
- B(Star), R(0),
- B(LdaConstant), U8(149),
- B(Star), R(0),
- B(LdaConstant), U8(150),
- B(Star), R(0),
- B(LdaConstant), U8(151),
- B(Star), R(0),
- B(LdaConstant), U8(152),
- B(Star), R(0),
- B(LdaConstant), U8(153),
- B(Star), R(0),
- B(LdaConstant), U8(154),
- B(Star), R(0),
- B(LdaConstant), U8(155),
- B(Star), R(0),
- B(LdaConstant), U8(156),
- B(Star), R(0),
- B(LdaConstant), U8(157),
- B(Star), R(0),
- B(LdaConstant), U8(158),
- B(Star), R(0),
- B(LdaConstant), U8(159),
- B(Star), R(0),
- B(LdaConstant), U8(160),
- B(Star), R(0),
- B(LdaConstant), U8(161),
- B(Star), R(0),
- B(LdaConstant), U8(162),
- B(Star), R(0),
- B(LdaConstant), U8(163),
- B(Star), R(0),
- B(LdaConstant), U8(164),
- B(Star), R(0),
- B(LdaConstant), U8(165),
- B(Star), R(0),
- B(LdaConstant), U8(166),
- B(Star), R(0),
- B(LdaConstant), U8(167),
- B(Star), R(0),
- B(LdaConstant), U8(168),
- B(Star), R(0),
- B(LdaConstant), U8(169),
- B(Star), R(0),
- B(LdaConstant), U8(170),
- B(Star), R(0),
- B(LdaConstant), U8(171),
- B(Star), R(0),
- B(LdaConstant), U8(172),
- B(Star), R(0),
- B(LdaConstant), U8(173),
- B(Star), R(0),
- B(LdaConstant), U8(174),
- B(Star), R(0),
- B(LdaConstant), U8(175),
- B(Star), R(0),
- B(LdaConstant), U8(176),
- B(Star), R(0),
- B(LdaConstant), U8(177),
- B(Star), R(0),
- B(LdaConstant), U8(178),
- B(Star), R(0),
- B(LdaConstant), U8(179),
- B(Star), R(0),
- B(LdaConstant), U8(180),
- B(Star), R(0),
- B(LdaConstant), U8(181),
- B(Star), R(0),
- B(LdaConstant), U8(182),
- B(Star), R(0),
- B(LdaConstant), U8(183),
- B(Star), R(0),
- B(LdaConstant), U8(184),
- B(Star), R(0),
- B(LdaConstant), U8(185),
- B(Star), R(0),
- B(LdaConstant), U8(186),
- B(Star), R(0),
- B(LdaConstant), U8(187),
- B(Star), R(0),
- B(LdaConstant), U8(188),
- B(Star), R(0),
- B(LdaConstant), U8(189),
- B(Star), R(0),
- B(LdaConstant), U8(190),
- B(Star), R(0),
- B(LdaConstant), U8(191),
- B(Star), R(0),
- B(LdaConstant), U8(192),
- B(Star), R(0),
- B(LdaConstant), U8(193),
- B(Star), R(0),
- B(LdaConstant), U8(194),
- B(Star), R(0),
- B(LdaConstant), U8(195),
- B(Star), R(0),
- B(LdaConstant), U8(196),
- B(Star), R(0),
- B(LdaConstant), U8(197),
- B(Star), R(0),
- B(LdaConstant), U8(198),
- B(Star), R(0),
- B(LdaConstant), U8(199),
- B(Star), R(0),
- B(LdaConstant), U8(200),
- B(Star), R(0),
- B(LdaConstant), U8(201),
- B(Star), R(0),
- B(LdaConstant), U8(202),
- B(Star), R(0),
- B(LdaConstant), U8(203),
- B(Star), R(0),
- B(LdaConstant), U8(204),
- B(Star), R(0),
- B(LdaConstant), U8(205),
- B(Star), R(0),
- B(LdaConstant), U8(206),
- B(Star), R(0),
- B(LdaConstant), U8(207),
- B(Star), R(0),
- B(LdaConstant), U8(208),
- B(Star), R(0),
- B(LdaConstant), U8(209),
- B(Star), R(0),
- B(LdaConstant), U8(210),
- B(Star), R(0),
- B(LdaConstant), U8(211),
- B(Star), R(0),
- B(LdaConstant), U8(212),
- B(Star), R(0),
- B(LdaConstant), U8(213),
- B(Star), R(0),
- B(LdaConstant), U8(214),
- B(Star), R(0),
- B(LdaConstant), U8(215),
- B(Star), R(0),
- B(LdaConstant), U8(216),
- B(Star), R(0),
- B(LdaConstant), U8(217),
- B(Star), R(0),
- B(LdaConstant), U8(218),
- B(Star), R(0),
- B(LdaConstant), U8(219),
- B(Star), R(0),
- B(LdaConstant), U8(220),
- B(Star), R(0),
- B(LdaConstant), U8(221),
- B(Star), R(0),
- B(LdaConstant), U8(222),
- B(Star), R(0),
- B(LdaConstant), U8(223),
- B(Star), R(0),
- B(LdaConstant), U8(224),
- B(Star), R(0),
- B(LdaConstant), U8(225),
- B(Star), R(0),
- B(LdaConstant), U8(226),
- B(Star), R(0),
- B(LdaConstant), U8(227),
- B(Star), R(0),
- B(LdaConstant), U8(228),
- B(Star), R(0),
- B(LdaConstant), U8(229),
- B(Star), R(0),
- B(LdaConstant), U8(230),
- B(Star), R(0),
- B(LdaConstant), U8(231),
- B(Star), R(0),
- B(LdaConstant), U8(232),
- B(Star), R(0),
- B(LdaConstant), U8(233),
- B(Star), R(0),
- B(LdaConstant), U8(234),
- B(Star), R(0),
- B(LdaConstant), U8(235),
- B(Star), R(0),
- B(LdaConstant), U8(236),
- B(Star), R(0),
- B(LdaConstant), U8(237),
- B(Star), R(0),
- B(LdaConstant), U8(238),
- B(Star), R(0),
- B(LdaConstant), U8(239),
- B(Star), R(0),
- B(LdaConstant), U8(240),
- B(Star), R(0),
- B(LdaConstant), U8(241),
- B(Star), R(0),
- B(LdaConstant), U8(242),
- B(Star), R(0),
- B(LdaConstant), U8(243),
- B(Star), R(0),
- B(LdaConstant), U8(244),
- B(Star), R(0),
- B(LdaConstant), U8(245),
- B(Star), R(0),
- B(LdaConstant), U8(246),
- B(Star), R(0),
- B(LdaConstant), U8(247),
- B(Star), R(0),
- B(LdaConstant), U8(248),
- B(Star), R(0),
- B(LdaConstant), U8(249),
- B(Star), R(0),
- B(LdaConstant), U8(250),
- B(Star), R(0),
- B(LdaConstant), U8(251),
- B(Star), R(0),
- B(LdaConstant), U8(252),
- B(Star), R(0),
- B(LdaConstant), U8(253),
- B(Star), R(0),
- B(LdaConstant), U8(254),
- B(Star), R(0),
- B(LdaConstant), U8(255),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(256),
- B(Star), R(0),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 41 S> */ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ /* 52 S> */ B(LdaConstant), U8(1),
+ B(Star), R(0),
+ /* 63 S> */ B(LdaConstant), U8(2),
+ B(Star), R(0),
+ /* 74 S> */ B(LdaConstant), U8(3),
+ B(Star), R(0),
+ /* 85 S> */ B(LdaConstant), U8(4),
+ B(Star), R(0),
+ /* 96 S> */ B(LdaConstant), U8(5),
+ B(Star), R(0),
+ /* 107 S> */ B(LdaConstant), U8(6),
+ B(Star), R(0),
+ /* 118 S> */ B(LdaConstant), U8(7),
+ B(Star), R(0),
+ /* 129 S> */ B(LdaConstant), U8(8),
+ B(Star), R(0),
+ /* 140 S> */ B(LdaConstant), U8(9),
+ B(Star), R(0),
+ /* 151 S> */ B(LdaConstant), U8(10),
+ B(Star), R(0),
+ /* 162 S> */ B(LdaConstant), U8(11),
+ B(Star), R(0),
+ /* 173 S> */ B(LdaConstant), U8(12),
+ B(Star), R(0),
+ /* 184 S> */ B(LdaConstant), U8(13),
+ B(Star), R(0),
+ /* 195 S> */ B(LdaConstant), U8(14),
+ B(Star), R(0),
+ /* 206 S> */ B(LdaConstant), U8(15),
+ B(Star), R(0),
+ /* 217 S> */ B(LdaConstant), U8(16),
+ B(Star), R(0),
+ /* 228 S> */ B(LdaConstant), U8(17),
+ B(Star), R(0),
+ /* 239 S> */ B(LdaConstant), U8(18),
+ B(Star), R(0),
+ /* 250 S> */ B(LdaConstant), U8(19),
+ B(Star), R(0),
+ /* 261 S> */ B(LdaConstant), U8(20),
+ B(Star), R(0),
+ /* 272 S> */ B(LdaConstant), U8(21),
+ B(Star), R(0),
+ /* 283 S> */ B(LdaConstant), U8(22),
+ B(Star), R(0),
+ /* 294 S> */ B(LdaConstant), U8(23),
+ B(Star), R(0),
+ /* 305 S> */ B(LdaConstant), U8(24),
+ B(Star), R(0),
+ /* 316 S> */ B(LdaConstant), U8(25),
+ B(Star), R(0),
+ /* 327 S> */ B(LdaConstant), U8(26),
+ B(Star), R(0),
+ /* 338 S> */ B(LdaConstant), U8(27),
+ B(Star), R(0),
+ /* 349 S> */ B(LdaConstant), U8(28),
+ B(Star), R(0),
+ /* 360 S> */ B(LdaConstant), U8(29),
+ B(Star), R(0),
+ /* 371 S> */ B(LdaConstant), U8(30),
+ B(Star), R(0),
+ /* 382 S> */ B(LdaConstant), U8(31),
+ B(Star), R(0),
+ /* 393 S> */ B(LdaConstant), U8(32),
+ B(Star), R(0),
+ /* 404 S> */ B(LdaConstant), U8(33),
+ B(Star), R(0),
+ /* 415 S> */ B(LdaConstant), U8(34),
+ B(Star), R(0),
+ /* 426 S> */ B(LdaConstant), U8(35),
+ B(Star), R(0),
+ /* 437 S> */ B(LdaConstant), U8(36),
+ B(Star), R(0),
+ /* 448 S> */ B(LdaConstant), U8(37),
+ B(Star), R(0),
+ /* 459 S> */ B(LdaConstant), U8(38),
+ B(Star), R(0),
+ /* 470 S> */ B(LdaConstant), U8(39),
+ B(Star), R(0),
+ /* 481 S> */ B(LdaConstant), U8(40),
+ B(Star), R(0),
+ /* 492 S> */ B(LdaConstant), U8(41),
+ B(Star), R(0),
+ /* 503 S> */ B(LdaConstant), U8(42),
+ B(Star), R(0),
+ /* 514 S> */ B(LdaConstant), U8(43),
+ B(Star), R(0),
+ /* 525 S> */ B(LdaConstant), U8(44),
+ B(Star), R(0),
+ /* 536 S> */ B(LdaConstant), U8(45),
+ B(Star), R(0),
+ /* 547 S> */ B(LdaConstant), U8(46),
+ B(Star), R(0),
+ /* 558 S> */ B(LdaConstant), U8(47),
+ B(Star), R(0),
+ /* 569 S> */ B(LdaConstant), U8(48),
+ B(Star), R(0),
+ /* 580 S> */ B(LdaConstant), U8(49),
+ B(Star), R(0),
+ /* 591 S> */ B(LdaConstant), U8(50),
+ B(Star), R(0),
+ /* 602 S> */ B(LdaConstant), U8(51),
+ B(Star), R(0),
+ /* 613 S> */ B(LdaConstant), U8(52),
+ B(Star), R(0),
+ /* 624 S> */ B(LdaConstant), U8(53),
+ B(Star), R(0),
+ /* 635 S> */ B(LdaConstant), U8(54),
+ B(Star), R(0),
+ /* 646 S> */ B(LdaConstant), U8(55),
+ B(Star), R(0),
+ /* 657 S> */ B(LdaConstant), U8(56),
+ B(Star), R(0),
+ /* 668 S> */ B(LdaConstant), U8(57),
+ B(Star), R(0),
+ /* 679 S> */ B(LdaConstant), U8(58),
+ B(Star), R(0),
+ /* 690 S> */ B(LdaConstant), U8(59),
+ B(Star), R(0),
+ /* 701 S> */ B(LdaConstant), U8(60),
+ B(Star), R(0),
+ /* 712 S> */ B(LdaConstant), U8(61),
+ B(Star), R(0),
+ /* 723 S> */ B(LdaConstant), U8(62),
+ B(Star), R(0),
+ /* 734 S> */ B(LdaConstant), U8(63),
+ B(Star), R(0),
+ /* 745 S> */ B(LdaConstant), U8(64),
+ B(Star), R(0),
+ /* 756 S> */ B(LdaConstant), U8(65),
+ B(Star), R(0),
+ /* 767 S> */ B(LdaConstant), U8(66),
+ B(Star), R(0),
+ /* 778 S> */ B(LdaConstant), U8(67),
+ B(Star), R(0),
+ /* 789 S> */ B(LdaConstant), U8(68),
+ B(Star), R(0),
+ /* 800 S> */ B(LdaConstant), U8(69),
+ B(Star), R(0),
+ /* 811 S> */ B(LdaConstant), U8(70),
+ B(Star), R(0),
+ /* 822 S> */ B(LdaConstant), U8(71),
+ B(Star), R(0),
+ /* 833 S> */ B(LdaConstant), U8(72),
+ B(Star), R(0),
+ /* 844 S> */ B(LdaConstant), U8(73),
+ B(Star), R(0),
+ /* 855 S> */ B(LdaConstant), U8(74),
+ B(Star), R(0),
+ /* 866 S> */ B(LdaConstant), U8(75),
+ B(Star), R(0),
+ /* 877 S> */ B(LdaConstant), U8(76),
+ B(Star), R(0),
+ /* 888 S> */ B(LdaConstant), U8(77),
+ B(Star), R(0),
+ /* 899 S> */ B(LdaConstant), U8(78),
+ B(Star), R(0),
+ /* 910 S> */ B(LdaConstant), U8(79),
+ B(Star), R(0),
+ /* 921 S> */ B(LdaConstant), U8(80),
+ B(Star), R(0),
+ /* 932 S> */ B(LdaConstant), U8(81),
+ B(Star), R(0),
+ /* 943 S> */ B(LdaConstant), U8(82),
+ B(Star), R(0),
+ /* 954 S> */ B(LdaConstant), U8(83),
+ B(Star), R(0),
+ /* 965 S> */ B(LdaConstant), U8(84),
+ B(Star), R(0),
+ /* 976 S> */ B(LdaConstant), U8(85),
+ B(Star), R(0),
+ /* 987 S> */ B(LdaConstant), U8(86),
+ B(Star), R(0),
+ /* 998 S> */ B(LdaConstant), U8(87),
+ B(Star), R(0),
+ /* 1009 S> */ B(LdaConstant), U8(88),
+ B(Star), R(0),
+ /* 1020 S> */ B(LdaConstant), U8(89),
+ B(Star), R(0),
+ /* 1031 S> */ B(LdaConstant), U8(90),
+ B(Star), R(0),
+ /* 1042 S> */ B(LdaConstant), U8(91),
+ B(Star), R(0),
+ /* 1053 S> */ B(LdaConstant), U8(92),
+ B(Star), R(0),
+ /* 1064 S> */ B(LdaConstant), U8(93),
+ B(Star), R(0),
+ /* 1075 S> */ B(LdaConstant), U8(94),
+ B(Star), R(0),
+ /* 1086 S> */ B(LdaConstant), U8(95),
+ B(Star), R(0),
+ /* 1097 S> */ B(LdaConstant), U8(96),
+ B(Star), R(0),
+ /* 1108 S> */ B(LdaConstant), U8(97),
+ B(Star), R(0),
+ /* 1119 S> */ B(LdaConstant), U8(98),
+ B(Star), R(0),
+ /* 1130 S> */ B(LdaConstant), U8(99),
+ B(Star), R(0),
+ /* 1141 S> */ B(LdaConstant), U8(100),
+ B(Star), R(0),
+ /* 1152 S> */ B(LdaConstant), U8(101),
+ B(Star), R(0),
+ /* 1163 S> */ B(LdaConstant), U8(102),
+ B(Star), R(0),
+ /* 1174 S> */ B(LdaConstant), U8(103),
+ B(Star), R(0),
+ /* 1185 S> */ B(LdaConstant), U8(104),
+ B(Star), R(0),
+ /* 1196 S> */ B(LdaConstant), U8(105),
+ B(Star), R(0),
+ /* 1207 S> */ B(LdaConstant), U8(106),
+ B(Star), R(0),
+ /* 1218 S> */ B(LdaConstant), U8(107),
+ B(Star), R(0),
+ /* 1229 S> */ B(LdaConstant), U8(108),
+ B(Star), R(0),
+ /* 1240 S> */ B(LdaConstant), U8(109),
+ B(Star), R(0),
+ /* 1251 S> */ B(LdaConstant), U8(110),
+ B(Star), R(0),
+ /* 1262 S> */ B(LdaConstant), U8(111),
+ B(Star), R(0),
+ /* 1273 S> */ B(LdaConstant), U8(112),
+ B(Star), R(0),
+ /* 1284 S> */ B(LdaConstant), U8(113),
+ B(Star), R(0),
+ /* 1295 S> */ B(LdaConstant), U8(114),
+ B(Star), R(0),
+ /* 1306 S> */ B(LdaConstant), U8(115),
+ B(Star), R(0),
+ /* 1317 S> */ B(LdaConstant), U8(116),
+ B(Star), R(0),
+ /* 1328 S> */ B(LdaConstant), U8(117),
+ B(Star), R(0),
+ /* 1339 S> */ B(LdaConstant), U8(118),
+ B(Star), R(0),
+ /* 1350 S> */ B(LdaConstant), U8(119),
+ B(Star), R(0),
+ /* 1361 S> */ B(LdaConstant), U8(120),
+ B(Star), R(0),
+ /* 1372 S> */ B(LdaConstant), U8(121),
+ B(Star), R(0),
+ /* 1383 S> */ B(LdaConstant), U8(122),
+ B(Star), R(0),
+ /* 1394 S> */ B(LdaConstant), U8(123),
+ B(Star), R(0),
+ /* 1405 S> */ B(LdaConstant), U8(124),
+ B(Star), R(0),
+ /* 1416 S> */ B(LdaConstant), U8(125),
+ B(Star), R(0),
+ /* 1427 S> */ B(LdaConstant), U8(126),
+ B(Star), R(0),
+ /* 1438 S> */ B(LdaConstant), U8(127),
+ B(Star), R(0),
+ /* 1449 S> */ B(LdaConstant), U8(128),
+ B(Star), R(0),
+ /* 1460 S> */ B(LdaConstant), U8(129),
+ B(Star), R(0),
+ /* 1471 S> */ B(LdaConstant), U8(130),
+ B(Star), R(0),
+ /* 1482 S> */ B(LdaConstant), U8(131),
+ B(Star), R(0),
+ /* 1493 S> */ B(LdaConstant), U8(132),
+ B(Star), R(0),
+ /* 1504 S> */ B(LdaConstant), U8(133),
+ B(Star), R(0),
+ /* 1515 S> */ B(LdaConstant), U8(134),
+ B(Star), R(0),
+ /* 1526 S> */ B(LdaConstant), U8(135),
+ B(Star), R(0),
+ /* 1537 S> */ B(LdaConstant), U8(136),
+ B(Star), R(0),
+ /* 1548 S> */ B(LdaConstant), U8(137),
+ B(Star), R(0),
+ /* 1559 S> */ B(LdaConstant), U8(138),
+ B(Star), R(0),
+ /* 1570 S> */ B(LdaConstant), U8(139),
+ B(Star), R(0),
+ /* 1581 S> */ B(LdaConstant), U8(140),
+ B(Star), R(0),
+ /* 1592 S> */ B(LdaConstant), U8(141),
+ B(Star), R(0),
+ /* 1603 S> */ B(LdaConstant), U8(142),
+ B(Star), R(0),
+ /* 1614 S> */ B(LdaConstant), U8(143),
+ B(Star), R(0),
+ /* 1625 S> */ B(LdaConstant), U8(144),
+ B(Star), R(0),
+ /* 1636 S> */ B(LdaConstant), U8(145),
+ B(Star), R(0),
+ /* 1647 S> */ B(LdaConstant), U8(146),
+ B(Star), R(0),
+ /* 1658 S> */ B(LdaConstant), U8(147),
+ B(Star), R(0),
+ /* 1669 S> */ B(LdaConstant), U8(148),
+ B(Star), R(0),
+ /* 1680 S> */ B(LdaConstant), U8(149),
+ B(Star), R(0),
+ /* 1691 S> */ B(LdaConstant), U8(150),
+ B(Star), R(0),
+ /* 1702 S> */ B(LdaConstant), U8(151),
+ B(Star), R(0),
+ /* 1713 S> */ B(LdaConstant), U8(152),
+ B(Star), R(0),
+ /* 1724 S> */ B(LdaConstant), U8(153),
+ B(Star), R(0),
+ /* 1735 S> */ B(LdaConstant), U8(154),
+ B(Star), R(0),
+ /* 1746 S> */ B(LdaConstant), U8(155),
+ B(Star), R(0),
+ /* 1757 S> */ B(LdaConstant), U8(156),
+ B(Star), R(0),
+ /* 1768 S> */ B(LdaConstant), U8(157),
+ B(Star), R(0),
+ /* 1779 S> */ B(LdaConstant), U8(158),
+ B(Star), R(0),
+ /* 1790 S> */ B(LdaConstant), U8(159),
+ B(Star), R(0),
+ /* 1801 S> */ B(LdaConstant), U8(160),
+ B(Star), R(0),
+ /* 1812 S> */ B(LdaConstant), U8(161),
+ B(Star), R(0),
+ /* 1823 S> */ B(LdaConstant), U8(162),
+ B(Star), R(0),
+ /* 1834 S> */ B(LdaConstant), U8(163),
+ B(Star), R(0),
+ /* 1845 S> */ B(LdaConstant), U8(164),
+ B(Star), R(0),
+ /* 1856 S> */ B(LdaConstant), U8(165),
+ B(Star), R(0),
+ /* 1867 S> */ B(LdaConstant), U8(166),
+ B(Star), R(0),
+ /* 1878 S> */ B(LdaConstant), U8(167),
+ B(Star), R(0),
+ /* 1889 S> */ B(LdaConstant), U8(168),
+ B(Star), R(0),
+ /* 1900 S> */ B(LdaConstant), U8(169),
+ B(Star), R(0),
+ /* 1911 S> */ B(LdaConstant), U8(170),
+ B(Star), R(0),
+ /* 1922 S> */ B(LdaConstant), U8(171),
+ B(Star), R(0),
+ /* 1933 S> */ B(LdaConstant), U8(172),
+ B(Star), R(0),
+ /* 1944 S> */ B(LdaConstant), U8(173),
+ B(Star), R(0),
+ /* 1955 S> */ B(LdaConstant), U8(174),
+ B(Star), R(0),
+ /* 1966 S> */ B(LdaConstant), U8(175),
+ B(Star), R(0),
+ /* 1977 S> */ B(LdaConstant), U8(176),
+ B(Star), R(0),
+ /* 1988 S> */ B(LdaConstant), U8(177),
+ B(Star), R(0),
+ /* 1999 S> */ B(LdaConstant), U8(178),
+ B(Star), R(0),
+ /* 2010 S> */ B(LdaConstant), U8(179),
+ B(Star), R(0),
+ /* 2021 S> */ B(LdaConstant), U8(180),
+ B(Star), R(0),
+ /* 2032 S> */ B(LdaConstant), U8(181),
+ B(Star), R(0),
+ /* 2043 S> */ B(LdaConstant), U8(182),
+ B(Star), R(0),
+ /* 2054 S> */ B(LdaConstant), U8(183),
+ B(Star), R(0),
+ /* 2065 S> */ B(LdaConstant), U8(184),
+ B(Star), R(0),
+ /* 2076 S> */ B(LdaConstant), U8(185),
+ B(Star), R(0),
+ /* 2087 S> */ B(LdaConstant), U8(186),
+ B(Star), R(0),
+ /* 2098 S> */ B(LdaConstant), U8(187),
+ B(Star), R(0),
+ /* 2109 S> */ B(LdaConstant), U8(188),
+ B(Star), R(0),
+ /* 2120 S> */ B(LdaConstant), U8(189),
+ B(Star), R(0),
+ /* 2131 S> */ B(LdaConstant), U8(190),
+ B(Star), R(0),
+ /* 2142 S> */ B(LdaConstant), U8(191),
+ B(Star), R(0),
+ /* 2153 S> */ B(LdaConstant), U8(192),
+ B(Star), R(0),
+ /* 2164 S> */ B(LdaConstant), U8(193),
+ B(Star), R(0),
+ /* 2175 S> */ B(LdaConstant), U8(194),
+ B(Star), R(0),
+ /* 2186 S> */ B(LdaConstant), U8(195),
+ B(Star), R(0),
+ /* 2197 S> */ B(LdaConstant), U8(196),
+ B(Star), R(0),
+ /* 2208 S> */ B(LdaConstant), U8(197),
+ B(Star), R(0),
+ /* 2219 S> */ B(LdaConstant), U8(198),
+ B(Star), R(0),
+ /* 2230 S> */ B(LdaConstant), U8(199),
+ B(Star), R(0),
+ /* 2241 S> */ B(LdaConstant), U8(200),
+ B(Star), R(0),
+ /* 2252 S> */ B(LdaConstant), U8(201),
+ B(Star), R(0),
+ /* 2263 S> */ B(LdaConstant), U8(202),
+ B(Star), R(0),
+ /* 2274 S> */ B(LdaConstant), U8(203),
+ B(Star), R(0),
+ /* 2285 S> */ B(LdaConstant), U8(204),
+ B(Star), R(0),
+ /* 2296 S> */ B(LdaConstant), U8(205),
+ B(Star), R(0),
+ /* 2307 S> */ B(LdaConstant), U8(206),
+ B(Star), R(0),
+ /* 2318 S> */ B(LdaConstant), U8(207),
+ B(Star), R(0),
+ /* 2329 S> */ B(LdaConstant), U8(208),
+ B(Star), R(0),
+ /* 2340 S> */ B(LdaConstant), U8(209),
+ B(Star), R(0),
+ /* 2351 S> */ B(LdaConstant), U8(210),
+ B(Star), R(0),
+ /* 2362 S> */ B(LdaConstant), U8(211),
+ B(Star), R(0),
+ /* 2373 S> */ B(LdaConstant), U8(212),
+ B(Star), R(0),
+ /* 2384 S> */ B(LdaConstant), U8(213),
+ B(Star), R(0),
+ /* 2395 S> */ B(LdaConstant), U8(214),
+ B(Star), R(0),
+ /* 2406 S> */ B(LdaConstant), U8(215),
+ B(Star), R(0),
+ /* 2417 S> */ B(LdaConstant), U8(216),
+ B(Star), R(0),
+ /* 2428 S> */ B(LdaConstant), U8(217),
+ B(Star), R(0),
+ /* 2439 S> */ B(LdaConstant), U8(218),
+ B(Star), R(0),
+ /* 2450 S> */ B(LdaConstant), U8(219),
+ B(Star), R(0),
+ /* 2461 S> */ B(LdaConstant), U8(220),
+ B(Star), R(0),
+ /* 2472 S> */ B(LdaConstant), U8(221),
+ B(Star), R(0),
+ /* 2483 S> */ B(LdaConstant), U8(222),
+ B(Star), R(0),
+ /* 2494 S> */ B(LdaConstant), U8(223),
+ B(Star), R(0),
+ /* 2505 S> */ B(LdaConstant), U8(224),
+ B(Star), R(0),
+ /* 2516 S> */ B(LdaConstant), U8(225),
+ B(Star), R(0),
+ /* 2527 S> */ B(LdaConstant), U8(226),
+ B(Star), R(0),
+ /* 2538 S> */ B(LdaConstant), U8(227),
+ B(Star), R(0),
+ /* 2549 S> */ B(LdaConstant), U8(228),
+ B(Star), R(0),
+ /* 2560 S> */ B(LdaConstant), U8(229),
+ B(Star), R(0),
+ /* 2571 S> */ B(LdaConstant), U8(230),
+ B(Star), R(0),
+ /* 2582 S> */ B(LdaConstant), U8(231),
+ B(Star), R(0),
+ /* 2593 S> */ B(LdaConstant), U8(232),
+ B(Star), R(0),
+ /* 2604 S> */ B(LdaConstant), U8(233),
+ B(Star), R(0),
+ /* 2615 S> */ B(LdaConstant), U8(234),
+ B(Star), R(0),
+ /* 2626 S> */ B(LdaConstant), U8(235),
+ B(Star), R(0),
+ /* 2637 S> */ B(LdaConstant), U8(236),
+ B(Star), R(0),
+ /* 2648 S> */ B(LdaConstant), U8(237),
+ B(Star), R(0),
+ /* 2659 S> */ B(LdaConstant), U8(238),
+ B(Star), R(0),
+ /* 2670 S> */ B(LdaConstant), U8(239),
+ B(Star), R(0),
+ /* 2681 S> */ B(LdaConstant), U8(240),
+ B(Star), R(0),
+ /* 2692 S> */ B(LdaConstant), U8(241),
+ B(Star), R(0),
+ /* 2703 S> */ B(LdaConstant), U8(242),
+ B(Star), R(0),
+ /* 2714 S> */ B(LdaConstant), U8(243),
+ B(Star), R(0),
+ /* 2725 S> */ B(LdaConstant), U8(244),
+ B(Star), R(0),
+ /* 2736 S> */ B(LdaConstant), U8(245),
+ B(Star), R(0),
+ /* 2747 S> */ B(LdaConstant), U8(246),
+ B(Star), R(0),
+ /* 2758 S> */ B(LdaConstant), U8(247),
+ B(Star), R(0),
+ /* 2769 S> */ B(LdaConstant), U8(248),
+ B(Star), R(0),
+ /* 2780 S> */ B(LdaConstant), U8(249),
+ B(Star), R(0),
+ /* 2791 S> */ B(LdaConstant), U8(250),
+ B(Star), R(0),
+ /* 2802 S> */ B(LdaConstant), U8(251),
+ B(Star), R(0),
+ /* 2813 S> */ B(LdaConstant), U8(252),
+ B(Star), R(0),
+ /* 2824 S> */ B(LdaConstant), U8(253),
+ B(Star), R(0),
+ /* 2835 S> */ B(LdaConstant), U8(254),
+ B(Star), R(0),
+ /* 2846 S> */ B(LdaConstant), U8(255),
+ B(Star), R(0),
+ /* 2857 S> */ B(Wide), B(LdaConstant), U16(256),
+ B(Star), R(0),
+ B(LdaUndefined),
+ /* 2867 S> */ B(Return),
]
constant pool: [
1.414,
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden
index f450f3f321..c375fb1e1e 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden
@@ -3,7 +3,7 @@
#
---
-pool type: number
+pool type: mixed
execute: yes
wrap: no
test function name: f
@@ -23,9 +23,9 @@ frame size: 0
parameter count: 1
bytecode array length: 4
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(-1),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 55 S> */ B(LdaSmi), U8(-1),
+ /* 70 S> */ B(Return),
]
constant pool: [
]
@@ -47,9 +47,9 @@ frame size: 0
parameter count: 1
bytecode array length: 4
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 36 S> */ B(LdaSmi), U8(1),
+ /* 76 S> */ B(Return),
]
constant pool: [
]
@@ -71,9 +71,9 @@ frame size: 0
parameter count: 1
bytecode array length: 4
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(-1),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 59 S> */ B(LdaSmi), U8(-1),
+ /* 74 S> */ B(Return),
]
constant pool: [
]
@@ -93,9 +93,9 @@ frame size: 0
parameter count: 1
bytecode array length: 3
bytecodes: [
- B(StackCheck),
- B(LdaUndefined),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 17 S> */ B(LdaUndefined),
+ /* 48 S> */ B(Return),
]
constant pool: [
]
@@ -116,22 +116,20 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 24
+bytecode array length: 23
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(JumpIfToBooleanFalse), U8(14),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(Add), R(1),
- B(Star), R(0),
- B(Jump), U8(5),
- B(LdaSmi), U8(2),
- B(Return),
- B(LdaUndefined),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 25 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ /* 30 S> */ B(JumpIfToBooleanFalse), U8(13),
+ /* 43 S> */ B(AddSmi), U8(1), R(0), U8(1),
+ B(Mov), R(0), R(1),
+ B(Star), R(0),
+ B(Jump), U8(5),
+ /* 66 S> */ B(LdaSmi), U8(2),
+ /* 80 S> */ B(Return),
+ B(LdaUndefined),
+ /* 80 S> */ B(Return),
]
constant pool: [
]
@@ -149,22 +147,20 @@ snippet: "
};
f(99);
"
-frame size: 1
+frame size: 0
parameter count: 2
-bytecode array length: 22
+bytecode array length: 18
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaZero),
- B(TestLessThanOrEqual), R(0),
- B(JumpIfFalse), U8(7),
- B(Wide), B(LdaSmi), U16(200),
- B(Return),
- B(Wide), B(LdaSmi), U16(-200),
- B(Return),
- B(LdaUndefined),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 18 S> */ B(LdaZero),
+ /* 24 E> */ B(TestLessThanOrEqual), R(arg0),
+ B(JumpIfFalse), U8(7),
+ /* 36 S> */ B(Wide), B(LdaSmi), U16(200),
+ /* 80 S> */ B(Return),
+ /* 63 S> */ B(Wide), B(LdaSmi), U16(-200),
+ /* 80 S> */ B(Return),
+ B(LdaUndefined),
+ /* 80 S> */ B(Return),
]
constant pool: [
]
@@ -175,20 +171,18 @@ handlers: [
snippet: "
function f(a, b) { if (a in b) { return 200; } }f('prop', { prop: 'yes'});
"
-frame size: 1
+frame size: 0
parameter count: 3
-bytecode array length: 18
+bytecode array length: 14
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(TestIn), R(0),
- B(JumpIfFalse), U8(7),
- B(Wide), B(LdaSmi), U16(200),
- B(Return),
- B(LdaUndefined),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 19 S> */ B(Ldar), R(arg1),
+ /* 25 E> */ B(TestIn), R(arg0),
+ B(JumpIfFalse), U8(7),
+ /* 33 S> */ B(Wide), B(LdaSmi), U16(200),
+ /* 47 S> */ B(Return),
+ B(LdaUndefined),
+ /* 47 S> */ B(Return),
]
constant pool: [
]
@@ -264,159 +258,156 @@ snippet: "
b = a; a = b;
return 200; } else { return -200; } } f(0.001);
"
-frame size: 3
+frame size: 2
parameter count: 2
-bytecode array length: 287
+bytecode array length: 409
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(LdaZero),
- B(Star), R(1),
- B(Ldar), R(0),
- B(Star), R(2),
- B(LdaConstant), U8(0),
- B(TestEqualStrict), R(2),
- B(JumpIfFalseConstant), U8(1),
- B(Ldar), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Wide), B(LdaSmi), U16(200),
- B(Return),
- B(Wide), B(LdaSmi), U16(-200),
- B(Return),
- B(LdaUndefined),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 24 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 35 S> */ B(LdaZero),
+ B(Star), R(1),
+ /* 38 S> */ B(LdaConstant), U8(0),
+ /* 44 E> */ B(TestEqualStrict), R(0),
+ B(JumpIfFalseConstant), U8(1),
+ /* 58 S> */ B(Mov), R(0), R(1),
+ /* 65 S> */ B(Mov), R(1), R(0),
+ /* 74 S> */ B(Mov), R(0), R(1),
+ /* 81 S> */ B(Mov), R(1), R(0),
+ /* 90 S> */ B(Mov), R(0), R(1),
+ /* 97 S> */ B(Mov), R(1), R(0),
+ /* 106 S> */ B(Mov), R(0), R(1),
+ /* 113 S> */ B(Mov), R(1), R(0),
+ /* 122 S> */ B(Mov), R(0), R(1),
+ /* 129 S> */ B(Mov), R(1), R(0),
+ /* 138 S> */ B(Mov), R(0), R(1),
+ /* 145 S> */ B(Mov), R(1), R(0),
+ /* 154 S> */ B(Mov), R(0), R(1),
+ /* 161 S> */ B(Mov), R(1), R(0),
+ /* 170 S> */ B(Mov), R(0), R(1),
+ /* 177 S> */ B(Mov), R(1), R(0),
+ /* 186 S> */ B(Mov), R(0), R(1),
+ /* 193 S> */ B(Mov), R(1), R(0),
+ /* 202 S> */ B(Mov), R(0), R(1),
+ /* 209 S> */ B(Mov), R(1), R(0),
+ /* 218 S> */ B(Mov), R(0), R(1),
+ /* 225 S> */ B(Mov), R(1), R(0),
+ /* 234 S> */ B(Mov), R(0), R(1),
+ /* 241 S> */ B(Mov), R(1), R(0),
+ /* 250 S> */ B(Mov), R(0), R(1),
+ /* 257 S> */ B(Mov), R(1), R(0),
+ /* 266 S> */ B(Mov), R(0), R(1),
+ /* 273 S> */ B(Mov), R(1), R(0),
+ /* 282 S> */ B(Mov), R(0), R(1),
+ /* 289 S> */ B(Mov), R(1), R(0),
+ /* 298 S> */ B(Mov), R(0), R(1),
+ /* 305 S> */ B(Mov), R(1), R(0),
+ /* 314 S> */ B(Mov), R(0), R(1),
+ /* 321 S> */ B(Mov), R(1), R(0),
+ /* 330 S> */ B(Mov), R(0), R(1),
+ /* 337 S> */ B(Mov), R(1), R(0),
+ /* 346 S> */ B(Mov), R(0), R(1),
+ /* 353 S> */ B(Mov), R(1), R(0),
+ /* 362 S> */ B(Mov), R(0), R(1),
+ /* 369 S> */ B(Mov), R(1), R(0),
+ /* 378 S> */ B(Mov), R(0), R(1),
+ /* 385 S> */ B(Mov), R(1), R(0),
+ /* 394 S> */ B(Mov), R(0), R(1),
+ /* 401 S> */ B(Mov), R(1), R(0),
+ /* 410 S> */ B(Mov), R(0), R(1),
+ /* 417 S> */ B(Mov), R(1), R(0),
+ /* 426 S> */ B(Mov), R(0), R(1),
+ /* 433 S> */ B(Mov), R(1), R(0),
+ /* 442 S> */ B(Mov), R(0), R(1),
+ /* 449 S> */ B(Mov), R(1), R(0),
+ /* 458 S> */ B(Mov), R(0), R(1),
+ /* 465 S> */ B(Mov), R(1), R(0),
+ /* 474 S> */ B(Mov), R(0), R(1),
+ /* 481 S> */ B(Mov), R(1), R(0),
+ /* 490 S> */ B(Mov), R(0), R(1),
+ /* 497 S> */ B(Mov), R(1), R(0),
+ /* 506 S> */ B(Mov), R(0), R(1),
+ /* 513 S> */ B(Mov), R(1), R(0),
+ /* 522 S> */ B(Mov), R(0), R(1),
+ /* 529 S> */ B(Mov), R(1), R(0),
+ /* 538 S> */ B(Mov), R(0), R(1),
+ /* 545 S> */ B(Mov), R(1), R(0),
+ /* 554 S> */ B(Mov), R(0), R(1),
+ /* 561 S> */ B(Mov), R(1), R(0),
+ /* 570 S> */ B(Mov), R(0), R(1),
+ /* 577 S> */ B(Mov), R(1), R(0),
+ /* 586 S> */ B(Mov), R(0), R(1),
+ /* 593 S> */ B(Mov), R(1), R(0),
+ /* 602 S> */ B(Mov), R(0), R(1),
+ /* 609 S> */ B(Mov), R(1), R(0),
+ /* 618 S> */ B(Mov), R(0), R(1),
+ /* 625 S> */ B(Mov), R(1), R(0),
+ /* 634 S> */ B(Mov), R(0), R(1),
+ /* 641 S> */ B(Mov), R(1), R(0),
+ /* 650 S> */ B(Mov), R(0), R(1),
+ /* 657 S> */ B(Mov), R(1), R(0),
+ /* 666 S> */ B(Mov), R(0), R(1),
+ /* 673 S> */ B(Mov), R(1), R(0),
+ /* 682 S> */ B(Mov), R(0), R(1),
+ /* 689 S> */ B(Mov), R(1), R(0),
+ /* 698 S> */ B(Mov), R(0), R(1),
+ /* 705 S> */ B(Mov), R(1), R(0),
+ /* 714 S> */ B(Mov), R(0), R(1),
+ /* 721 S> */ B(Mov), R(1), R(0),
+ /* 730 S> */ B(Mov), R(0), R(1),
+ /* 737 S> */ B(Mov), R(1), R(0),
+ /* 746 S> */ B(Mov), R(0), R(1),
+ /* 753 S> */ B(Mov), R(1), R(0),
+ /* 762 S> */ B(Mov), R(0), R(1),
+ /* 769 S> */ B(Mov), R(1), R(0),
+ /* 778 S> */ B(Mov), R(0), R(1),
+ /* 785 S> */ B(Mov), R(1), R(0),
+ /* 794 S> */ B(Mov), R(0), R(1),
+ /* 801 S> */ B(Mov), R(1), R(0),
+ /* 810 S> */ B(Mov), R(0), R(1),
+ /* 817 S> */ B(Mov), R(1), R(0),
+ /* 826 S> */ B(Mov), R(0), R(1),
+ /* 833 S> */ B(Mov), R(1), R(0),
+ /* 842 S> */ B(Mov), R(0), R(1),
+ /* 849 S> */ B(Mov), R(1), R(0),
+ /* 858 S> */ B(Mov), R(0), R(1),
+ /* 865 S> */ B(Mov), R(1), R(0),
+ /* 874 S> */ B(Mov), R(0), R(1),
+ /* 881 S> */ B(Mov), R(1), R(0),
+ /* 890 S> */ B(Mov), R(0), R(1),
+ /* 897 S> */ B(Mov), R(1), R(0),
+ /* 906 S> */ B(Mov), R(0), R(1),
+ /* 913 S> */ B(Mov), R(1), R(0),
+ /* 922 S> */ B(Mov), R(0), R(1),
+ /* 929 S> */ B(Mov), R(1), R(0),
+ /* 938 S> */ B(Mov), R(0), R(1),
+ /* 945 S> */ B(Mov), R(1), R(0),
+ /* 954 S> */ B(Mov), R(0), R(1),
+ /* 961 S> */ B(Mov), R(1), R(0),
+ /* 970 S> */ B(Mov), R(0), R(1),
+ /* 977 S> */ B(Mov), R(1), R(0),
+ /* 986 S> */ B(Mov), R(0), R(1),
+ /* 993 S> */ B(Mov), R(1), R(0),
+ /* 1002 S> */ B(Mov), R(0), R(1),
+ /* 1009 S> */ B(Mov), R(1), R(0),
+ /* 1018 S> */ B(Mov), R(0), R(1),
+ /* 1025 S> */ B(Mov), R(1), R(0),
+ /* 1034 S> */ B(Mov), R(0), R(1),
+ /* 1041 S> */ B(Mov), R(1), R(0),
+ /* 1050 S> */ B(Mov), R(0), R(1),
+ /* 1057 S> */ B(Mov), R(1), R(0),
+ /* 1066 S> */ B(Mov), R(0), R(1),
+ /* 1073 S> */ B(Mov), R(1), R(0),
+ /* 1081 S> */ B(Wide), B(LdaSmi), U16(200),
+ /* 1117 S> */ B(Return),
+ /* 1102 S> */ B(Wide), B(LdaSmi), U16(-200),
+ /* 1117 S> */ B(Return),
+ B(LdaUndefined),
+ /* 1117 S> */ B(Return),
]
constant pool: [
- 0.01,
- 265,
+ InstanceType::HEAP_NUMBER_TYPE,
+ kInstanceTypeDontCare,
]
handlers: [
]
@@ -496,153 +487,152 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 281
+bytecode array length: 407
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(LdaZero),
- B(Star), R(1),
- B(Ldar), R(0),
- B(JumpIfToBooleanFalseConstant), U8(0),
- B(Ldar), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Star), R(0),
- B(Wide), B(LdaSmi), U16(200),
- B(Return),
- B(Wide), B(LdaSmi), U16(-200),
- B(Return),
- B(LdaUndefined),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 25 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 36 S> */ B(LdaZero),
+ B(Star), R(1),
+ /* 41 S> */ B(Ldar), R(0),
+ B(JumpIfToBooleanFalseConstant), U8(0),
+ /* 52 S> */ B(Mov), R(0), R(1),
+ /* 59 S> */ B(Mov), R(1), R(0),
+ /* 68 S> */ B(Mov), R(0), R(1),
+ /* 75 S> */ B(Mov), R(1), R(0),
+ /* 84 S> */ B(Mov), R(0), R(1),
+ /* 91 S> */ B(Mov), R(1), R(0),
+ /* 100 S> */ B(Mov), R(0), R(1),
+ /* 107 S> */ B(Mov), R(1), R(0),
+ /* 116 S> */ B(Mov), R(0), R(1),
+ /* 123 S> */ B(Mov), R(1), R(0),
+ /* 132 S> */ B(Mov), R(0), R(1),
+ /* 139 S> */ B(Mov), R(1), R(0),
+ /* 148 S> */ B(Mov), R(0), R(1),
+ /* 155 S> */ B(Mov), R(1), R(0),
+ /* 164 S> */ B(Mov), R(0), R(1),
+ /* 171 S> */ B(Mov), R(1), R(0),
+ /* 180 S> */ B(Mov), R(0), R(1),
+ /* 187 S> */ B(Mov), R(1), R(0),
+ /* 196 S> */ B(Mov), R(0), R(1),
+ /* 203 S> */ B(Mov), R(1), R(0),
+ /* 212 S> */ B(Mov), R(0), R(1),
+ /* 219 S> */ B(Mov), R(1), R(0),
+ /* 228 S> */ B(Mov), R(0), R(1),
+ /* 235 S> */ B(Mov), R(1), R(0),
+ /* 244 S> */ B(Mov), R(0), R(1),
+ /* 251 S> */ B(Mov), R(1), R(0),
+ /* 260 S> */ B(Mov), R(0), R(1),
+ /* 267 S> */ B(Mov), R(1), R(0),
+ /* 276 S> */ B(Mov), R(0), R(1),
+ /* 283 S> */ B(Mov), R(1), R(0),
+ /* 292 S> */ B(Mov), R(0), R(1),
+ /* 299 S> */ B(Mov), R(1), R(0),
+ /* 308 S> */ B(Mov), R(0), R(1),
+ /* 315 S> */ B(Mov), R(1), R(0),
+ /* 324 S> */ B(Mov), R(0), R(1),
+ /* 331 S> */ B(Mov), R(1), R(0),
+ /* 340 S> */ B(Mov), R(0), R(1),
+ /* 347 S> */ B(Mov), R(1), R(0),
+ /* 356 S> */ B(Mov), R(0), R(1),
+ /* 363 S> */ B(Mov), R(1), R(0),
+ /* 372 S> */ B(Mov), R(0), R(1),
+ /* 379 S> */ B(Mov), R(1), R(0),
+ /* 388 S> */ B(Mov), R(0), R(1),
+ /* 395 S> */ B(Mov), R(1), R(0),
+ /* 404 S> */ B(Mov), R(0), R(1),
+ /* 411 S> */ B(Mov), R(1), R(0),
+ /* 420 S> */ B(Mov), R(0), R(1),
+ /* 427 S> */ B(Mov), R(1), R(0),
+ /* 436 S> */ B(Mov), R(0), R(1),
+ /* 443 S> */ B(Mov), R(1), R(0),
+ /* 452 S> */ B(Mov), R(0), R(1),
+ /* 459 S> */ B(Mov), R(1), R(0),
+ /* 468 S> */ B(Mov), R(0), R(1),
+ /* 475 S> */ B(Mov), R(1), R(0),
+ /* 484 S> */ B(Mov), R(0), R(1),
+ /* 491 S> */ B(Mov), R(1), R(0),
+ /* 500 S> */ B(Mov), R(0), R(1),
+ /* 507 S> */ B(Mov), R(1), R(0),
+ /* 516 S> */ B(Mov), R(0), R(1),
+ /* 523 S> */ B(Mov), R(1), R(0),
+ /* 532 S> */ B(Mov), R(0), R(1),
+ /* 539 S> */ B(Mov), R(1), R(0),
+ /* 548 S> */ B(Mov), R(0), R(1),
+ /* 555 S> */ B(Mov), R(1), R(0),
+ /* 564 S> */ B(Mov), R(0), R(1),
+ /* 571 S> */ B(Mov), R(1), R(0),
+ /* 580 S> */ B(Mov), R(0), R(1),
+ /* 587 S> */ B(Mov), R(1), R(0),
+ /* 596 S> */ B(Mov), R(0), R(1),
+ /* 603 S> */ B(Mov), R(1), R(0),
+ /* 612 S> */ B(Mov), R(0), R(1),
+ /* 619 S> */ B(Mov), R(1), R(0),
+ /* 628 S> */ B(Mov), R(0), R(1),
+ /* 635 S> */ B(Mov), R(1), R(0),
+ /* 644 S> */ B(Mov), R(0), R(1),
+ /* 651 S> */ B(Mov), R(1), R(0),
+ /* 660 S> */ B(Mov), R(0), R(1),
+ /* 667 S> */ B(Mov), R(1), R(0),
+ /* 676 S> */ B(Mov), R(0), R(1),
+ /* 683 S> */ B(Mov), R(1), R(0),
+ /* 692 S> */ B(Mov), R(0), R(1),
+ /* 699 S> */ B(Mov), R(1), R(0),
+ /* 708 S> */ B(Mov), R(0), R(1),
+ /* 715 S> */ B(Mov), R(1), R(0),
+ /* 724 S> */ B(Mov), R(0), R(1),
+ /* 731 S> */ B(Mov), R(1), R(0),
+ /* 740 S> */ B(Mov), R(0), R(1),
+ /* 747 S> */ B(Mov), R(1), R(0),
+ /* 756 S> */ B(Mov), R(0), R(1),
+ /* 763 S> */ B(Mov), R(1), R(0),
+ /* 772 S> */ B(Mov), R(0), R(1),
+ /* 779 S> */ B(Mov), R(1), R(0),
+ /* 788 S> */ B(Mov), R(0), R(1),
+ /* 795 S> */ B(Mov), R(1), R(0),
+ /* 804 S> */ B(Mov), R(0), R(1),
+ /* 811 S> */ B(Mov), R(1), R(0),
+ /* 820 S> */ B(Mov), R(0), R(1),
+ /* 827 S> */ B(Mov), R(1), R(0),
+ /* 836 S> */ B(Mov), R(0), R(1),
+ /* 843 S> */ B(Mov), R(1), R(0),
+ /* 852 S> */ B(Mov), R(0), R(1),
+ /* 859 S> */ B(Mov), R(1), R(0),
+ /* 868 S> */ B(Mov), R(0), R(1),
+ /* 875 S> */ B(Mov), R(1), R(0),
+ /* 884 S> */ B(Mov), R(0), R(1),
+ /* 891 S> */ B(Mov), R(1), R(0),
+ /* 900 S> */ B(Mov), R(0), R(1),
+ /* 907 S> */ B(Mov), R(1), R(0),
+ /* 916 S> */ B(Mov), R(0), R(1),
+ /* 923 S> */ B(Mov), R(1), R(0),
+ /* 932 S> */ B(Mov), R(0), R(1),
+ /* 939 S> */ B(Mov), R(1), R(0),
+ /* 948 S> */ B(Mov), R(0), R(1),
+ /* 955 S> */ B(Mov), R(1), R(0),
+ /* 964 S> */ B(Mov), R(0), R(1),
+ /* 971 S> */ B(Mov), R(1), R(0),
+ /* 980 S> */ B(Mov), R(0), R(1),
+ /* 987 S> */ B(Mov), R(1), R(0),
+ /* 996 S> */ B(Mov), R(0), R(1),
+ /* 1003 S> */ B(Mov), R(1), R(0),
+ /* 1012 S> */ B(Mov), R(0), R(1),
+ /* 1019 S> */ B(Mov), R(1), R(0),
+ /* 1028 S> */ B(Mov), R(0), R(1),
+ /* 1035 S> */ B(Mov), R(1), R(0),
+ /* 1044 S> */ B(Mov), R(0), R(1),
+ /* 1051 S> */ B(Mov), R(1), R(0),
+ /* 1060 S> */ B(Mov), R(0), R(1),
+ /* 1067 S> */ B(Mov), R(1), R(0),
+ /* 1076 S> */ B(Wide), B(LdaSmi), U16(200),
+ /* 1112 S> */ B(Return),
+ /* 1097 S> */ B(Wide), B(LdaSmi), U16(-200),
+ /* 1112 S> */ B(Return),
+ B(LdaUndefined),
+ /* 1112 S> */ B(Return),
]
constant pool: [
- 265,
+ kInstanceTypeDontCare,
]
handlers: [
]
@@ -662,69 +652,53 @@ snippet: "
}
f(1, 1);
"
-frame size: 1
+frame size: 0
parameter count: 3
-bytecode array length: 107
+bytecode array length: 75
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(TestEqual), R(0),
- B(JumpIfFalse), U8(5),
- B(LdaSmi), U8(1),
- B(Return),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(TestEqualStrict), R(0),
- B(JumpIfFalse), U8(5),
- B(LdaSmi), U8(1),
- B(Return),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(TestLessThan), R(0),
- B(JumpIfFalse), U8(5),
- B(LdaSmi), U8(1),
- B(Return),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(TestGreaterThan), R(0),
- B(JumpIfFalse), U8(5),
- B(LdaSmi), U8(1),
- B(Return),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(TestLessThanOrEqual), R(0),
- B(JumpIfFalse), U8(5),
- B(LdaSmi), U8(1),
- B(Return),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(TestGreaterThanOrEqual), R(0),
- B(JumpIfFalse), U8(5),
- B(LdaSmi), U8(1),
- B(Return),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(TestIn), R(0),
- B(JumpIfFalse), U8(5),
- B(LdaSmi), U8(1),
- B(Return),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(TestInstanceOf), R(0),
- B(JumpIfFalse), U8(5),
- B(LdaSmi), U8(1),
- B(Return),
- B(LdaZero),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 21 S> */ B(Ldar), R(arg1),
+ /* 27 E> */ B(TestEqual), R(arg0),
+ B(JumpIfFalse), U8(5),
+ /* 35 S> */ B(LdaSmi), U8(1),
+ /* 262 S> */ B(Return),
+ /* 49 S> */ B(Ldar), R(arg1),
+ /* 55 E> */ B(TestEqualStrict), R(arg0),
+ B(JumpIfFalse), U8(5),
+ /* 64 S> */ B(LdaSmi), U8(1),
+ /* 262 S> */ B(Return),
+ /* 78 S> */ B(Ldar), R(arg1),
+ /* 84 E> */ B(TestLessThan), R(arg0),
+ B(JumpIfFalse), U8(5),
+ /* 91 S> */ B(LdaSmi), U8(1),
+ /* 262 S> */ B(Return),
+ /* 105 S> */ B(Ldar), R(arg1),
+ /* 111 E> */ B(TestGreaterThan), R(arg0),
+ B(JumpIfFalse), U8(5),
+ /* 118 S> */ B(LdaSmi), U8(1),
+ /* 262 S> */ B(Return),
+ /* 132 S> */ B(Ldar), R(arg1),
+ /* 138 E> */ B(TestLessThanOrEqual), R(arg0),
+ B(JumpIfFalse), U8(5),
+ /* 146 S> */ B(LdaSmi), U8(1),
+ /* 262 S> */ B(Return),
+ /* 160 S> */ B(Ldar), R(arg1),
+ /* 166 E> */ B(TestGreaterThanOrEqual), R(arg0),
+ B(JumpIfFalse), U8(5),
+ /* 174 S> */ B(LdaSmi), U8(1),
+ /* 262 S> */ B(Return),
+ /* 188 S> */ B(Ldar), R(arg1),
+ /* 194 E> */ B(TestIn), R(arg0),
+ B(JumpIfFalse), U8(5),
+ /* 202 S> */ B(LdaSmi), U8(1),
+ /* 262 S> */ B(Return),
+ /* 216 S> */ B(Ldar), R(arg1),
+ /* 222 E> */ B(TestInstanceOf), R(arg0),
+ B(JumpIfFalse), U8(5),
+ /* 238 S> */ B(LdaSmi), U8(1),
+ /* 262 S> */ B(Return),
+ /* 252 S> */ B(LdaZero),
+ /* 262 S> */ B(Return),
]
constant pool: [
]
@@ -747,16 +721,60 @@ frame size: 1
parameter count: 1
bytecode array length: 14
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(JumpIfToBooleanFalse), U8(5),
- B(LdaSmi), U8(20),
- B(Return),
- B(LdaSmi), U8(-20),
- B(Return),
- B(LdaUndefined),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 25 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 30 S> */ B(JumpIfToBooleanFalse), U8(5),
+ /* 43 S> */ B(LdaSmi), U8(20),
+ /* 85 S> */ B(Return),
+ /* 69 S> */ B(LdaSmi), U8(-20),
+ /* 85 S> */ B(Return),
+ B(LdaUndefined),
+ /* 85 S> */ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f(a, b) {
+ if (a == b || a < 0) {
+ return 1;
+ } else if (a > 0 && b > 0) {
+ return 0;
+ } else {
+ return -1;
+ }
+ };
+ f(-1, 1);
+"
+frame size: 0
+parameter count: 3
+bytecode array length: 32
+bytecodes: [
+ /* 10 E> */ B(StackCheck),
+ /* 21 S> */ B(Ldar), R(arg1),
+ /* 27 E> */ B(TestEqual), R(arg0),
+ B(JumpIfTrue), U8(7),
+ B(LdaZero),
+ /* 37 E> */ B(TestLessThan), R(arg0),
+ B(JumpIfFalse), U8(5),
+ /* 48 S> */ B(LdaSmi), U8(1),
+ /* 133 S> */ B(Return),
+ /* 67 S> */ B(LdaZero),
+ /* 73 E> */ B(TestGreaterThan), R(arg0),
+ B(JumpIfFalse), U8(9),
+ B(LdaZero),
+ /* 82 E> */ B(TestGreaterThan), R(arg1),
+ B(JumpIfFalse), U8(4),
+ /* 93 S> */ B(LdaZero),
+ /* 133 S> */ B(Return),
+ /* 118 S> */ B(LdaSmi), U8(-1),
+ /* 133 S> */ B(Return),
+ B(LdaUndefined),
+ /* 133 S> */ B(Return),
]
constant pool: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/IntegerConstants.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/IntegerConstants.golden
index 1c37c124e8..6ac81a606b 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/IntegerConstants.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/IntegerConstants.golden
@@ -15,9 +15,9 @@ frame size: 0
parameter count: 1
bytecode array length: 8
bytecodes: [
- B(StackCheck),
- B(ExtraWide), B(LdaSmi), U32(12345678),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(ExtraWide), B(LdaSmi), U32(12345678),
+ /* 51 S> */ B(Return),
]
constant pool: [
]
@@ -32,11 +32,11 @@ frame size: 1
parameter count: 1
bytecode array length: 12
bytecodes: [
- B(StackCheck),
- B(Wide), B(LdaSmi), U16(1234),
- B(Star), R(0),
- B(Wide), B(LdaSmi), U16(5678),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(Wide), B(LdaSmi), U16(1234),
+ B(Star), R(0),
+ /* 48 S> */ B(Wide), B(LdaSmi), U16(5678),
+ /* 61 S> */ B(Return),
]
constant pool: [
]
@@ -51,11 +51,11 @@ frame size: 1
parameter count: 1
bytecode array length: 12
bytecodes: [
- B(StackCheck),
- B(Wide), B(LdaSmi), U16(1234),
- B(Star), R(0),
- B(Wide), B(LdaSmi), U16(1234),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(Wide), B(LdaSmi), U16(1234),
+ B(Star), R(0),
+ /* 48 S> */ B(Wide), B(LdaSmi), U16(1234),
+ /* 61 S> */ B(Return),
]
constant pool: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/JumpsRequiringConstantWideOperands.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/JumpsRequiringConstantWideOperands.golden
index 3f0b49df82..a5efe58049 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/JumpsRequiringConstantWideOperands.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/JumpsRequiringConstantWideOperands.golden
@@ -327,663 +327,655 @@ snippet: "
}
return 3;
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 1422
+bytecode array length: 1408
bytecodes: [
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Star), R(0),
- B(LdaConstant), U8(1),
- B(Star), R(0),
- B(LdaConstant), U8(2),
- B(Star), R(0),
- B(LdaConstant), U8(3),
- B(Star), R(0),
- B(LdaConstant), U8(4),
- B(Star), R(0),
- B(LdaConstant), U8(5),
- B(Star), R(0),
- B(LdaConstant), U8(6),
- B(Star), R(0),
- B(LdaConstant), U8(7),
- B(Star), R(0),
- B(LdaConstant), U8(8),
- B(Star), R(0),
- B(LdaConstant), U8(9),
- B(Star), R(0),
- B(LdaConstant), U8(10),
- B(Star), R(0),
- B(LdaConstant), U8(11),
- B(Star), R(0),
- B(LdaConstant), U8(12),
- B(Star), R(0),
- B(LdaConstant), U8(13),
- B(Star), R(0),
- B(LdaConstant), U8(14),
- B(Star), R(0),
- B(LdaConstant), U8(15),
- B(Star), R(0),
- B(LdaConstant), U8(16),
- B(Star), R(0),
- B(LdaConstant), U8(17),
- B(Star), R(0),
- B(LdaConstant), U8(18),
- B(Star), R(0),
- B(LdaConstant), U8(19),
- B(Star), R(0),
- B(LdaConstant), U8(20),
- B(Star), R(0),
- B(LdaConstant), U8(21),
- B(Star), R(0),
- B(LdaConstant), U8(22),
- B(Star), R(0),
- B(LdaConstant), U8(23),
- B(Star), R(0),
- B(LdaConstant), U8(24),
- B(Star), R(0),
- B(LdaConstant), U8(25),
- B(Star), R(0),
- B(LdaConstant), U8(26),
- B(Star), R(0),
- B(LdaConstant), U8(27),
- B(Star), R(0),
- B(LdaConstant), U8(28),
- B(Star), R(0),
- B(LdaConstant), U8(29),
- B(Star), R(0),
- B(LdaConstant), U8(30),
- B(Star), R(0),
- B(LdaConstant), U8(31),
- B(Star), R(0),
- B(LdaConstant), U8(32),
- B(Star), R(0),
- B(LdaConstant), U8(33),
- B(Star), R(0),
- B(LdaConstant), U8(34),
- B(Star), R(0),
- B(LdaConstant), U8(35),
- B(Star), R(0),
- B(LdaConstant), U8(36),
- B(Star), R(0),
- B(LdaConstant), U8(37),
- B(Star), R(0),
- B(LdaConstant), U8(38),
- B(Star), R(0),
- B(LdaConstant), U8(39),
- B(Star), R(0),
- B(LdaConstant), U8(40),
- B(Star), R(0),
- B(LdaConstant), U8(41),
- B(Star), R(0),
- B(LdaConstant), U8(42),
- B(Star), R(0),
- B(LdaConstant), U8(43),
- B(Star), R(0),
- B(LdaConstant), U8(44),
- B(Star), R(0),
- B(LdaConstant), U8(45),
- B(Star), R(0),
- B(LdaConstant), U8(46),
- B(Star), R(0),
- B(LdaConstant), U8(47),
- B(Star), R(0),
- B(LdaConstant), U8(48),
- B(Star), R(0),
- B(LdaConstant), U8(49),
- B(Star), R(0),
- B(LdaConstant), U8(50),
- B(Star), R(0),
- B(LdaConstant), U8(51),
- B(Star), R(0),
- B(LdaConstant), U8(52),
- B(Star), R(0),
- B(LdaConstant), U8(53),
- B(Star), R(0),
- B(LdaConstant), U8(54),
- B(Star), R(0),
- B(LdaConstant), U8(55),
- B(Star), R(0),
- B(LdaConstant), U8(56),
- B(Star), R(0),
- B(LdaConstant), U8(57),
- B(Star), R(0),
- B(LdaConstant), U8(58),
- B(Star), R(0),
- B(LdaConstant), U8(59),
- B(Star), R(0),
- B(LdaConstant), U8(60),
- B(Star), R(0),
- B(LdaConstant), U8(61),
- B(Star), R(0),
- B(LdaConstant), U8(62),
- B(Star), R(0),
- B(LdaConstant), U8(63),
- B(Star), R(0),
- B(LdaConstant), U8(64),
- B(Star), R(0),
- B(LdaConstant), U8(65),
- B(Star), R(0),
- B(LdaConstant), U8(66),
- B(Star), R(0),
- B(LdaConstant), U8(67),
- B(Star), R(0),
- B(LdaConstant), U8(68),
- B(Star), R(0),
- B(LdaConstant), U8(69),
- B(Star), R(0),
- B(LdaConstant), U8(70),
- B(Star), R(0),
- B(LdaConstant), U8(71),
- B(Star), R(0),
- B(LdaConstant), U8(72),
- B(Star), R(0),
- B(LdaConstant), U8(73),
- B(Star), R(0),
- B(LdaConstant), U8(74),
- B(Star), R(0),
- B(LdaConstant), U8(75),
- B(Star), R(0),
- B(LdaConstant), U8(76),
- B(Star), R(0),
- B(LdaConstant), U8(77),
- B(Star), R(0),
- B(LdaConstant), U8(78),
- B(Star), R(0),
- B(LdaConstant), U8(79),
- B(Star), R(0),
- B(LdaConstant), U8(80),
- B(Star), R(0),
- B(LdaConstant), U8(81),
- B(Star), R(0),
- B(LdaConstant), U8(82),
- B(Star), R(0),
- B(LdaConstant), U8(83),
- B(Star), R(0),
- B(LdaConstant), U8(84),
- B(Star), R(0),
- B(LdaConstant), U8(85),
- B(Star), R(0),
- B(LdaConstant), U8(86),
- B(Star), R(0),
- B(LdaConstant), U8(87),
- B(Star), R(0),
- B(LdaConstant), U8(88),
- B(Star), R(0),
- B(LdaConstant), U8(89),
- B(Star), R(0),
- B(LdaConstant), U8(90),
- B(Star), R(0),
- B(LdaConstant), U8(91),
- B(Star), R(0),
- B(LdaConstant), U8(92),
- B(Star), R(0),
- B(LdaConstant), U8(93),
- B(Star), R(0),
- B(LdaConstant), U8(94),
- B(Star), R(0),
- B(LdaConstant), U8(95),
- B(Star), R(0),
- B(LdaConstant), U8(96),
- B(Star), R(0),
- B(LdaConstant), U8(97),
- B(Star), R(0),
- B(LdaConstant), U8(98),
- B(Star), R(0),
- B(LdaConstant), U8(99),
- B(Star), R(0),
- B(LdaConstant), U8(100),
- B(Star), R(0),
- B(LdaConstant), U8(101),
- B(Star), R(0),
- B(LdaConstant), U8(102),
- B(Star), R(0),
- B(LdaConstant), U8(103),
- B(Star), R(0),
- B(LdaConstant), U8(104),
- B(Star), R(0),
- B(LdaConstant), U8(105),
- B(Star), R(0),
- B(LdaConstant), U8(106),
- B(Star), R(0),
- B(LdaConstant), U8(107),
- B(Star), R(0),
- B(LdaConstant), U8(108),
- B(Star), R(0),
- B(LdaConstant), U8(109),
- B(Star), R(0),
- B(LdaConstant), U8(110),
- B(Star), R(0),
- B(LdaConstant), U8(111),
- B(Star), R(0),
- B(LdaConstant), U8(112),
- B(Star), R(0),
- B(LdaConstant), U8(113),
- B(Star), R(0),
- B(LdaConstant), U8(114),
- B(Star), R(0),
- B(LdaConstant), U8(115),
- B(Star), R(0),
- B(LdaConstant), U8(116),
- B(Star), R(0),
- B(LdaConstant), U8(117),
- B(Star), R(0),
- B(LdaConstant), U8(118),
- B(Star), R(0),
- B(LdaConstant), U8(119),
- B(Star), R(0),
- B(LdaConstant), U8(120),
- B(Star), R(0),
- B(LdaConstant), U8(121),
- B(Star), R(0),
- B(LdaConstant), U8(122),
- B(Star), R(0),
- B(LdaConstant), U8(123),
- B(Star), R(0),
- B(LdaConstant), U8(124),
- B(Star), R(0),
- B(LdaConstant), U8(125),
- B(Star), R(0),
- B(LdaConstant), U8(126),
- B(Star), R(0),
- B(LdaConstant), U8(127),
- B(Star), R(0),
- B(LdaConstant), U8(128),
- B(Star), R(0),
- B(LdaConstant), U8(129),
- B(Star), R(0),
- B(LdaConstant), U8(130),
- B(Star), R(0),
- B(LdaConstant), U8(131),
- B(Star), R(0),
- B(LdaConstant), U8(132),
- B(Star), R(0),
- B(LdaConstant), U8(133),
- B(Star), R(0),
- B(LdaConstant), U8(134),
- B(Star), R(0),
- B(LdaConstant), U8(135),
- B(Star), R(0),
- B(LdaConstant), U8(136),
- B(Star), R(0),
- B(LdaConstant), U8(137),
- B(Star), R(0),
- B(LdaConstant), U8(138),
- B(Star), R(0),
- B(LdaConstant), U8(139),
- B(Star), R(0),
- B(LdaConstant), U8(140),
- B(Star), R(0),
- B(LdaConstant), U8(141),
- B(Star), R(0),
- B(LdaConstant), U8(142),
- B(Star), R(0),
- B(LdaConstant), U8(143),
- B(Star), R(0),
- B(LdaConstant), U8(144),
- B(Star), R(0),
- B(LdaConstant), U8(145),
- B(Star), R(0),
- B(LdaConstant), U8(146),
- B(Star), R(0),
- B(LdaConstant), U8(147),
- B(Star), R(0),
- B(LdaConstant), U8(148),
- B(Star), R(0),
- B(LdaConstant), U8(149),
- B(Star), R(0),
- B(LdaConstant), U8(150),
- B(Star), R(0),
- B(LdaConstant), U8(151),
- B(Star), R(0),
- B(LdaConstant), U8(152),
- B(Star), R(0),
- B(LdaConstant), U8(153),
- B(Star), R(0),
- B(LdaConstant), U8(154),
- B(Star), R(0),
- B(LdaConstant), U8(155),
- B(Star), R(0),
- B(LdaConstant), U8(156),
- B(Star), R(0),
- B(LdaConstant), U8(157),
- B(Star), R(0),
- B(LdaConstant), U8(158),
- B(Star), R(0),
- B(LdaConstant), U8(159),
- B(Star), R(0),
- B(LdaConstant), U8(160),
- B(Star), R(0),
- B(LdaConstant), U8(161),
- B(Star), R(0),
- B(LdaConstant), U8(162),
- B(Star), R(0),
- B(LdaConstant), U8(163),
- B(Star), R(0),
- B(LdaConstant), U8(164),
- B(Star), R(0),
- B(LdaConstant), U8(165),
- B(Star), R(0),
- B(LdaConstant), U8(166),
- B(Star), R(0),
- B(LdaConstant), U8(167),
- B(Star), R(0),
- B(LdaConstant), U8(168),
- B(Star), R(0),
- B(LdaConstant), U8(169),
- B(Star), R(0),
- B(LdaConstant), U8(170),
- B(Star), R(0),
- B(LdaConstant), U8(171),
- B(Star), R(0),
- B(LdaConstant), U8(172),
- B(Star), R(0),
- B(LdaConstant), U8(173),
- B(Star), R(0),
- B(LdaConstant), U8(174),
- B(Star), R(0),
- B(LdaConstant), U8(175),
- B(Star), R(0),
- B(LdaConstant), U8(176),
- B(Star), R(0),
- B(LdaConstant), U8(177),
- B(Star), R(0),
- B(LdaConstant), U8(178),
- B(Star), R(0),
- B(LdaConstant), U8(179),
- B(Star), R(0),
- B(LdaConstant), U8(180),
- B(Star), R(0),
- B(LdaConstant), U8(181),
- B(Star), R(0),
- B(LdaConstant), U8(182),
- B(Star), R(0),
- B(LdaConstant), U8(183),
- B(Star), R(0),
- B(LdaConstant), U8(184),
- B(Star), R(0),
- B(LdaConstant), U8(185),
- B(Star), R(0),
- B(LdaConstant), U8(186),
- B(Star), R(0),
- B(LdaConstant), U8(187),
- B(Star), R(0),
- B(LdaConstant), U8(188),
- B(Star), R(0),
- B(LdaConstant), U8(189),
- B(Star), R(0),
- B(LdaConstant), U8(190),
- B(Star), R(0),
- B(LdaConstant), U8(191),
- B(Star), R(0),
- B(LdaConstant), U8(192),
- B(Star), R(0),
- B(LdaConstant), U8(193),
- B(Star), R(0),
- B(LdaConstant), U8(194),
- B(Star), R(0),
- B(LdaConstant), U8(195),
- B(Star), R(0),
- B(LdaConstant), U8(196),
- B(Star), R(0),
- B(LdaConstant), U8(197),
- B(Star), R(0),
- B(LdaConstant), U8(198),
- B(Star), R(0),
- B(LdaConstant), U8(199),
- B(Star), R(0),
- B(LdaConstant), U8(200),
- B(Star), R(0),
- B(LdaConstant), U8(201),
- B(Star), R(0),
- B(LdaConstant), U8(202),
- B(Star), R(0),
- B(LdaConstant), U8(203),
- B(Star), R(0),
- B(LdaConstant), U8(204),
- B(Star), R(0),
- B(LdaConstant), U8(205),
- B(Star), R(0),
- B(LdaConstant), U8(206),
- B(Star), R(0),
- B(LdaConstant), U8(207),
- B(Star), R(0),
- B(LdaConstant), U8(208),
- B(Star), R(0),
- B(LdaConstant), U8(209),
- B(Star), R(0),
- B(LdaConstant), U8(210),
- B(Star), R(0),
- B(LdaConstant), U8(211),
- B(Star), R(0),
- B(LdaConstant), U8(212),
- B(Star), R(0),
- B(LdaConstant), U8(213),
- B(Star), R(0),
- B(LdaConstant), U8(214),
- B(Star), R(0),
- B(LdaConstant), U8(215),
- B(Star), R(0),
- B(LdaConstant), U8(216),
- B(Star), R(0),
- B(LdaConstant), U8(217),
- B(Star), R(0),
- B(LdaConstant), U8(218),
- B(Star), R(0),
- B(LdaConstant), U8(219),
- B(Star), R(0),
- B(LdaConstant), U8(220),
- B(Star), R(0),
- B(LdaConstant), U8(221),
- B(Star), R(0),
- B(LdaConstant), U8(222),
- B(Star), R(0),
- B(LdaConstant), U8(223),
- B(Star), R(0),
- B(LdaConstant), U8(224),
- B(Star), R(0),
- B(LdaConstant), U8(225),
- B(Star), R(0),
- B(LdaConstant), U8(226),
- B(Star), R(0),
- B(LdaConstant), U8(227),
- B(Star), R(0),
- B(LdaConstant), U8(228),
- B(Star), R(0),
- B(LdaConstant), U8(229),
- B(Star), R(0),
- B(LdaConstant), U8(230),
- B(Star), R(0),
- B(LdaConstant), U8(231),
- B(Star), R(0),
- B(LdaConstant), U8(232),
- B(Star), R(0),
- B(LdaConstant), U8(233),
- B(Star), R(0),
- B(LdaConstant), U8(234),
- B(Star), R(0),
- B(LdaConstant), U8(235),
- B(Star), R(0),
- B(LdaConstant), U8(236),
- B(Star), R(0),
- B(LdaConstant), U8(237),
- B(Star), R(0),
- B(LdaConstant), U8(238),
- B(Star), R(0),
- B(LdaConstant), U8(239),
- B(Star), R(0),
- B(LdaConstant), U8(240),
- B(Star), R(0),
- B(LdaConstant), U8(241),
- B(Star), R(0),
- B(LdaConstant), U8(242),
- B(Star), R(0),
- B(LdaConstant), U8(243),
- B(Star), R(0),
- B(LdaConstant), U8(244),
- B(Star), R(0),
- B(LdaConstant), U8(245),
- B(Star), R(0),
- B(LdaConstant), U8(246),
- B(Star), R(0),
- B(LdaConstant), U8(247),
- B(Star), R(0),
- B(LdaConstant), U8(248),
- B(Star), R(0),
- B(LdaConstant), U8(249),
- B(Star), R(0),
- B(LdaConstant), U8(250),
- B(Star), R(0),
- B(LdaConstant), U8(251),
- B(Star), R(0),
- B(LdaConstant), U8(252),
- B(Star), R(0),
- B(LdaConstant), U8(253),
- B(Star), R(0),
- B(LdaConstant), U8(254),
- B(Star), R(0),
- B(LdaConstant), U8(255),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(256),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(257),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(258),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(259),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(260),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(261),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(262),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(263),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(264),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(265),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(266),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(267),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(268),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(269),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(270),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(271),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(272),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(273),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(274),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(275),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(276),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(277),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(278),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(279),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(280),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(281),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(282),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(283),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(284),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(285),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(286),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(287),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(288),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(289),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(290),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(291),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(292),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(293),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(294),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(295),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(296),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(297),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(298),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(299),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(300),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(301),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(302),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(303),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(304),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(305),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(306),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(307),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(308),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(309),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(310),
- B(Star), R(0),
- B(Wide), B(LdaConstant), U16(311),
- B(Star), R(0),
- B(LdaZero),
- B(Star), R(1),
- B(Ldar), R(1),
- B(Star), R(2),
- B(LdaSmi), U8(3),
- B(TestLessThan), R(2),
- B(Wide), B(JumpIfFalse), U16(46),
- B(StackCheck),
- B(Ldar), R(1),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(TestEqual), R(2),
- B(Wide), B(JumpIfFalse), U16(7),
- B(Wide), B(Jump), U16(19),
- B(Ldar), R(1),
- B(Star), R(2),
- B(LdaSmi), U8(2),
- B(TestEqual), R(2),
- B(Wide), B(JumpIfFalse), U16(7),
- B(Wide), B(Jump), U16(13),
- B(Ldar), R(1),
- B(ToNumber),
- B(Star), R(2),
- B(Inc),
- B(Star), R(1),
- B(Jump), U8(-53),
- B(LdaSmi), U8(3),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ /* 55 S> */ B(LdaConstant), U8(1),
+ B(Star), R(0),
+ /* 68 S> */ B(LdaConstant), U8(2),
+ B(Star), R(0),
+ /* 81 S> */ B(LdaConstant), U8(3),
+ B(Star), R(0),
+ /* 94 S> */ B(LdaConstant), U8(4),
+ B(Star), R(0),
+ /* 107 S> */ B(LdaConstant), U8(5),
+ B(Star), R(0),
+ /* 120 S> */ B(LdaConstant), U8(6),
+ B(Star), R(0),
+ /* 133 S> */ B(LdaConstant), U8(7),
+ B(Star), R(0),
+ /* 146 S> */ B(LdaConstant), U8(8),
+ B(Star), R(0),
+ /* 159 S> */ B(LdaConstant), U8(9),
+ B(Star), R(0),
+ /* 172 S> */ B(LdaConstant), U8(10),
+ B(Star), R(0),
+ /* 185 S> */ B(LdaConstant), U8(11),
+ B(Star), R(0),
+ /* 198 S> */ B(LdaConstant), U8(12),
+ B(Star), R(0),
+ /* 211 S> */ B(LdaConstant), U8(13),
+ B(Star), R(0),
+ /* 224 S> */ B(LdaConstant), U8(14),
+ B(Star), R(0),
+ /* 237 S> */ B(LdaConstant), U8(15),
+ B(Star), R(0),
+ /* 250 S> */ B(LdaConstant), U8(16),
+ B(Star), R(0),
+ /* 263 S> */ B(LdaConstant), U8(17),
+ B(Star), R(0),
+ /* 276 S> */ B(LdaConstant), U8(18),
+ B(Star), R(0),
+ /* 289 S> */ B(LdaConstant), U8(19),
+ B(Star), R(0),
+ /* 302 S> */ B(LdaConstant), U8(20),
+ B(Star), R(0),
+ /* 315 S> */ B(LdaConstant), U8(21),
+ B(Star), R(0),
+ /* 328 S> */ B(LdaConstant), U8(22),
+ B(Star), R(0),
+ /* 341 S> */ B(LdaConstant), U8(23),
+ B(Star), R(0),
+ /* 354 S> */ B(LdaConstant), U8(24),
+ B(Star), R(0),
+ /* 367 S> */ B(LdaConstant), U8(25),
+ B(Star), R(0),
+ /* 380 S> */ B(LdaConstant), U8(26),
+ B(Star), R(0),
+ /* 393 S> */ B(LdaConstant), U8(27),
+ B(Star), R(0),
+ /* 406 S> */ B(LdaConstant), U8(28),
+ B(Star), R(0),
+ /* 419 S> */ B(LdaConstant), U8(29),
+ B(Star), R(0),
+ /* 432 S> */ B(LdaConstant), U8(30),
+ B(Star), R(0),
+ /* 445 S> */ B(LdaConstant), U8(31),
+ B(Star), R(0),
+ /* 458 S> */ B(LdaConstant), U8(32),
+ B(Star), R(0),
+ /* 471 S> */ B(LdaConstant), U8(33),
+ B(Star), R(0),
+ /* 484 S> */ B(LdaConstant), U8(34),
+ B(Star), R(0),
+ /* 497 S> */ B(LdaConstant), U8(35),
+ B(Star), R(0),
+ /* 510 S> */ B(LdaConstant), U8(36),
+ B(Star), R(0),
+ /* 523 S> */ B(LdaConstant), U8(37),
+ B(Star), R(0),
+ /* 536 S> */ B(LdaConstant), U8(38),
+ B(Star), R(0),
+ /* 549 S> */ B(LdaConstant), U8(39),
+ B(Star), R(0),
+ /* 562 S> */ B(LdaConstant), U8(40),
+ B(Star), R(0),
+ /* 575 S> */ B(LdaConstant), U8(41),
+ B(Star), R(0),
+ /* 588 S> */ B(LdaConstant), U8(42),
+ B(Star), R(0),
+ /* 601 S> */ B(LdaConstant), U8(43),
+ B(Star), R(0),
+ /* 614 S> */ B(LdaConstant), U8(44),
+ B(Star), R(0),
+ /* 627 S> */ B(LdaConstant), U8(45),
+ B(Star), R(0),
+ /* 640 S> */ B(LdaConstant), U8(46),
+ B(Star), R(0),
+ /* 653 S> */ B(LdaConstant), U8(47),
+ B(Star), R(0),
+ /* 666 S> */ B(LdaConstant), U8(48),
+ B(Star), R(0),
+ /* 679 S> */ B(LdaConstant), U8(49),
+ B(Star), R(0),
+ /* 692 S> */ B(LdaConstant), U8(50),
+ B(Star), R(0),
+ /* 705 S> */ B(LdaConstant), U8(51),
+ B(Star), R(0),
+ /* 718 S> */ B(LdaConstant), U8(52),
+ B(Star), R(0),
+ /* 731 S> */ B(LdaConstant), U8(53),
+ B(Star), R(0),
+ /* 744 S> */ B(LdaConstant), U8(54),
+ B(Star), R(0),
+ /* 757 S> */ B(LdaConstant), U8(55),
+ B(Star), R(0),
+ /* 770 S> */ B(LdaConstant), U8(56),
+ B(Star), R(0),
+ /* 783 S> */ B(LdaConstant), U8(57),
+ B(Star), R(0),
+ /* 796 S> */ B(LdaConstant), U8(58),
+ B(Star), R(0),
+ /* 809 S> */ B(LdaConstant), U8(59),
+ B(Star), R(0),
+ /* 822 S> */ B(LdaConstant), U8(60),
+ B(Star), R(0),
+ /* 835 S> */ B(LdaConstant), U8(61),
+ B(Star), R(0),
+ /* 848 S> */ B(LdaConstant), U8(62),
+ B(Star), R(0),
+ /* 861 S> */ B(LdaConstant), U8(63),
+ B(Star), R(0),
+ /* 874 S> */ B(LdaConstant), U8(64),
+ B(Star), R(0),
+ /* 887 S> */ B(LdaConstant), U8(65),
+ B(Star), R(0),
+ /* 900 S> */ B(LdaConstant), U8(66),
+ B(Star), R(0),
+ /* 913 S> */ B(LdaConstant), U8(67),
+ B(Star), R(0),
+ /* 926 S> */ B(LdaConstant), U8(68),
+ B(Star), R(0),
+ /* 939 S> */ B(LdaConstant), U8(69),
+ B(Star), R(0),
+ /* 952 S> */ B(LdaConstant), U8(70),
+ B(Star), R(0),
+ /* 965 S> */ B(LdaConstant), U8(71),
+ B(Star), R(0),
+ /* 978 S> */ B(LdaConstant), U8(72),
+ B(Star), R(0),
+ /* 991 S> */ B(LdaConstant), U8(73),
+ B(Star), R(0),
+ /* 1004 S> */ B(LdaConstant), U8(74),
+ B(Star), R(0),
+ /* 1017 S> */ B(LdaConstant), U8(75),
+ B(Star), R(0),
+ /* 1030 S> */ B(LdaConstant), U8(76),
+ B(Star), R(0),
+ /* 1043 S> */ B(LdaConstant), U8(77),
+ B(Star), R(0),
+ /* 1056 S> */ B(LdaConstant), U8(78),
+ B(Star), R(0),
+ /* 1069 S> */ B(LdaConstant), U8(79),
+ B(Star), R(0),
+ /* 1082 S> */ B(LdaConstant), U8(80),
+ B(Star), R(0),
+ /* 1095 S> */ B(LdaConstant), U8(81),
+ B(Star), R(0),
+ /* 1108 S> */ B(LdaConstant), U8(82),
+ B(Star), R(0),
+ /* 1121 S> */ B(LdaConstant), U8(83),
+ B(Star), R(0),
+ /* 1134 S> */ B(LdaConstant), U8(84),
+ B(Star), R(0),
+ /* 1147 S> */ B(LdaConstant), U8(85),
+ B(Star), R(0),
+ /* 1160 S> */ B(LdaConstant), U8(86),
+ B(Star), R(0),
+ /* 1173 S> */ B(LdaConstant), U8(87),
+ B(Star), R(0),
+ /* 1186 S> */ B(LdaConstant), U8(88),
+ B(Star), R(0),
+ /* 1199 S> */ B(LdaConstant), U8(89),
+ B(Star), R(0),
+ /* 1212 S> */ B(LdaConstant), U8(90),
+ B(Star), R(0),
+ /* 1225 S> */ B(LdaConstant), U8(91),
+ B(Star), R(0),
+ /* 1238 S> */ B(LdaConstant), U8(92),
+ B(Star), R(0),
+ /* 1251 S> */ B(LdaConstant), U8(93),
+ B(Star), R(0),
+ /* 1264 S> */ B(LdaConstant), U8(94),
+ B(Star), R(0),
+ /* 1277 S> */ B(LdaConstant), U8(95),
+ B(Star), R(0),
+ /* 1290 S> */ B(LdaConstant), U8(96),
+ B(Star), R(0),
+ /* 1303 S> */ B(LdaConstant), U8(97),
+ B(Star), R(0),
+ /* 1316 S> */ B(LdaConstant), U8(98),
+ B(Star), R(0),
+ /* 1329 S> */ B(LdaConstant), U8(99),
+ B(Star), R(0),
+ /* 1342 S> */ B(LdaConstant), U8(100),
+ B(Star), R(0),
+ /* 1355 S> */ B(LdaConstant), U8(101),
+ B(Star), R(0),
+ /* 1368 S> */ B(LdaConstant), U8(102),
+ B(Star), R(0),
+ /* 1381 S> */ B(LdaConstant), U8(103),
+ B(Star), R(0),
+ /* 1394 S> */ B(LdaConstant), U8(104),
+ B(Star), R(0),
+ /* 1407 S> */ B(LdaConstant), U8(105),
+ B(Star), R(0),
+ /* 1420 S> */ B(LdaConstant), U8(106),
+ B(Star), R(0),
+ /* 1433 S> */ B(LdaConstant), U8(107),
+ B(Star), R(0),
+ /* 1446 S> */ B(LdaConstant), U8(108),
+ B(Star), R(0),
+ /* 1459 S> */ B(LdaConstant), U8(109),
+ B(Star), R(0),
+ /* 1472 S> */ B(LdaConstant), U8(110),
+ B(Star), R(0),
+ /* 1485 S> */ B(LdaConstant), U8(111),
+ B(Star), R(0),
+ /* 1498 S> */ B(LdaConstant), U8(112),
+ B(Star), R(0),
+ /* 1511 S> */ B(LdaConstant), U8(113),
+ B(Star), R(0),
+ /* 1524 S> */ B(LdaConstant), U8(114),
+ B(Star), R(0),
+ /* 1537 S> */ B(LdaConstant), U8(115),
+ B(Star), R(0),
+ /* 1550 S> */ B(LdaConstant), U8(116),
+ B(Star), R(0),
+ /* 1563 S> */ B(LdaConstant), U8(117),
+ B(Star), R(0),
+ /* 1576 S> */ B(LdaConstant), U8(118),
+ B(Star), R(0),
+ /* 1589 S> */ B(LdaConstant), U8(119),
+ B(Star), R(0),
+ /* 1602 S> */ B(LdaConstant), U8(120),
+ B(Star), R(0),
+ /* 1615 S> */ B(LdaConstant), U8(121),
+ B(Star), R(0),
+ /* 1628 S> */ B(LdaConstant), U8(122),
+ B(Star), R(0),
+ /* 1641 S> */ B(LdaConstant), U8(123),
+ B(Star), R(0),
+ /* 1654 S> */ B(LdaConstant), U8(124),
+ B(Star), R(0),
+ /* 1667 S> */ B(LdaConstant), U8(125),
+ B(Star), R(0),
+ /* 1680 S> */ B(LdaConstant), U8(126),
+ B(Star), R(0),
+ /* 1693 S> */ B(LdaConstant), U8(127),
+ B(Star), R(0),
+ /* 1706 S> */ B(LdaConstant), U8(128),
+ B(Star), R(0),
+ /* 1719 S> */ B(LdaConstant), U8(129),
+ B(Star), R(0),
+ /* 1732 S> */ B(LdaConstant), U8(130),
+ B(Star), R(0),
+ /* 1745 S> */ B(LdaConstant), U8(131),
+ B(Star), R(0),
+ /* 1758 S> */ B(LdaConstant), U8(132),
+ B(Star), R(0),
+ /* 1771 S> */ B(LdaConstant), U8(133),
+ B(Star), R(0),
+ /* 1784 S> */ B(LdaConstant), U8(134),
+ B(Star), R(0),
+ /* 1797 S> */ B(LdaConstant), U8(135),
+ B(Star), R(0),
+ /* 1810 S> */ B(LdaConstant), U8(136),
+ B(Star), R(0),
+ /* 1823 S> */ B(LdaConstant), U8(137),
+ B(Star), R(0),
+ /* 1836 S> */ B(LdaConstant), U8(138),
+ B(Star), R(0),
+ /* 1849 S> */ B(LdaConstant), U8(139),
+ B(Star), R(0),
+ /* 1862 S> */ B(LdaConstant), U8(140),
+ B(Star), R(0),
+ /* 1875 S> */ B(LdaConstant), U8(141),
+ B(Star), R(0),
+ /* 1888 S> */ B(LdaConstant), U8(142),
+ B(Star), R(0),
+ /* 1901 S> */ B(LdaConstant), U8(143),
+ B(Star), R(0),
+ /* 1914 S> */ B(LdaConstant), U8(144),
+ B(Star), R(0),
+ /* 1927 S> */ B(LdaConstant), U8(145),
+ B(Star), R(0),
+ /* 1940 S> */ B(LdaConstant), U8(146),
+ B(Star), R(0),
+ /* 1953 S> */ B(LdaConstant), U8(147),
+ B(Star), R(0),
+ /* 1966 S> */ B(LdaConstant), U8(148),
+ B(Star), R(0),
+ /* 1979 S> */ B(LdaConstant), U8(149),
+ B(Star), R(0),
+ /* 1992 S> */ B(LdaConstant), U8(150),
+ B(Star), R(0),
+ /* 2005 S> */ B(LdaConstant), U8(151),
+ B(Star), R(0),
+ /* 2018 S> */ B(LdaConstant), U8(152),
+ B(Star), R(0),
+ /* 2031 S> */ B(LdaConstant), U8(153),
+ B(Star), R(0),
+ /* 2044 S> */ B(LdaConstant), U8(154),
+ B(Star), R(0),
+ /* 2057 S> */ B(LdaConstant), U8(155),
+ B(Star), R(0),
+ /* 2070 S> */ B(LdaConstant), U8(156),
+ B(Star), R(0),
+ /* 2083 S> */ B(LdaConstant), U8(157),
+ B(Star), R(0),
+ /* 2096 S> */ B(LdaConstant), U8(158),
+ B(Star), R(0),
+ /* 2109 S> */ B(LdaConstant), U8(159),
+ B(Star), R(0),
+ /* 2122 S> */ B(LdaConstant), U8(160),
+ B(Star), R(0),
+ /* 2135 S> */ B(LdaConstant), U8(161),
+ B(Star), R(0),
+ /* 2148 S> */ B(LdaConstant), U8(162),
+ B(Star), R(0),
+ /* 2161 S> */ B(LdaConstant), U8(163),
+ B(Star), R(0),
+ /* 2174 S> */ B(LdaConstant), U8(164),
+ B(Star), R(0),
+ /* 2187 S> */ B(LdaConstant), U8(165),
+ B(Star), R(0),
+ /* 2200 S> */ B(LdaConstant), U8(166),
+ B(Star), R(0),
+ /* 2213 S> */ B(LdaConstant), U8(167),
+ B(Star), R(0),
+ /* 2226 S> */ B(LdaConstant), U8(168),
+ B(Star), R(0),
+ /* 2239 S> */ B(LdaConstant), U8(169),
+ B(Star), R(0),
+ /* 2252 S> */ B(LdaConstant), U8(170),
+ B(Star), R(0),
+ /* 2265 S> */ B(LdaConstant), U8(171),
+ B(Star), R(0),
+ /* 2278 S> */ B(LdaConstant), U8(172),
+ B(Star), R(0),
+ /* 2291 S> */ B(LdaConstant), U8(173),
+ B(Star), R(0),
+ /* 2304 S> */ B(LdaConstant), U8(174),
+ B(Star), R(0),
+ /* 2317 S> */ B(LdaConstant), U8(175),
+ B(Star), R(0),
+ /* 2330 S> */ B(LdaConstant), U8(176),
+ B(Star), R(0),
+ /* 2343 S> */ B(LdaConstant), U8(177),
+ B(Star), R(0),
+ /* 2356 S> */ B(LdaConstant), U8(178),
+ B(Star), R(0),
+ /* 2369 S> */ B(LdaConstant), U8(179),
+ B(Star), R(0),
+ /* 2382 S> */ B(LdaConstant), U8(180),
+ B(Star), R(0),
+ /* 2395 S> */ B(LdaConstant), U8(181),
+ B(Star), R(0),
+ /* 2408 S> */ B(LdaConstant), U8(182),
+ B(Star), R(0),
+ /* 2421 S> */ B(LdaConstant), U8(183),
+ B(Star), R(0),
+ /* 2434 S> */ B(LdaConstant), U8(184),
+ B(Star), R(0),
+ /* 2447 S> */ B(LdaConstant), U8(185),
+ B(Star), R(0),
+ /* 2460 S> */ B(LdaConstant), U8(186),
+ B(Star), R(0),
+ /* 2473 S> */ B(LdaConstant), U8(187),
+ B(Star), R(0),
+ /* 2486 S> */ B(LdaConstant), U8(188),
+ B(Star), R(0),
+ /* 2499 S> */ B(LdaConstant), U8(189),
+ B(Star), R(0),
+ /* 2512 S> */ B(LdaConstant), U8(190),
+ B(Star), R(0),
+ /* 2525 S> */ B(LdaConstant), U8(191),
+ B(Star), R(0),
+ /* 2538 S> */ B(LdaConstant), U8(192),
+ B(Star), R(0),
+ /* 2551 S> */ B(LdaConstant), U8(193),
+ B(Star), R(0),
+ /* 2564 S> */ B(LdaConstant), U8(194),
+ B(Star), R(0),
+ /* 2577 S> */ B(LdaConstant), U8(195),
+ B(Star), R(0),
+ /* 2590 S> */ B(LdaConstant), U8(196),
+ B(Star), R(0),
+ /* 2603 S> */ B(LdaConstant), U8(197),
+ B(Star), R(0),
+ /* 2616 S> */ B(LdaConstant), U8(198),
+ B(Star), R(0),
+ /* 2629 S> */ B(LdaConstant), U8(199),
+ B(Star), R(0),
+ /* 2642 S> */ B(LdaConstant), U8(200),
+ B(Star), R(0),
+ /* 2655 S> */ B(LdaConstant), U8(201),
+ B(Star), R(0),
+ /* 2668 S> */ B(LdaConstant), U8(202),
+ B(Star), R(0),
+ /* 2681 S> */ B(LdaConstant), U8(203),
+ B(Star), R(0),
+ /* 2694 S> */ B(LdaConstant), U8(204),
+ B(Star), R(0),
+ /* 2707 S> */ B(LdaConstant), U8(205),
+ B(Star), R(0),
+ /* 2720 S> */ B(LdaConstant), U8(206),
+ B(Star), R(0),
+ /* 2733 S> */ B(LdaConstant), U8(207),
+ B(Star), R(0),
+ /* 2746 S> */ B(LdaConstant), U8(208),
+ B(Star), R(0),
+ /* 2759 S> */ B(LdaConstant), U8(209),
+ B(Star), R(0),
+ /* 2772 S> */ B(LdaConstant), U8(210),
+ B(Star), R(0),
+ /* 2785 S> */ B(LdaConstant), U8(211),
+ B(Star), R(0),
+ /* 2798 S> */ B(LdaConstant), U8(212),
+ B(Star), R(0),
+ /* 2811 S> */ B(LdaConstant), U8(213),
+ B(Star), R(0),
+ /* 2824 S> */ B(LdaConstant), U8(214),
+ B(Star), R(0),
+ /* 2837 S> */ B(LdaConstant), U8(215),
+ B(Star), R(0),
+ /* 2850 S> */ B(LdaConstant), U8(216),
+ B(Star), R(0),
+ /* 2863 S> */ B(LdaConstant), U8(217),
+ B(Star), R(0),
+ /* 2876 S> */ B(LdaConstant), U8(218),
+ B(Star), R(0),
+ /* 2889 S> */ B(LdaConstant), U8(219),
+ B(Star), R(0),
+ /* 2902 S> */ B(LdaConstant), U8(220),
+ B(Star), R(0),
+ /* 2915 S> */ B(LdaConstant), U8(221),
+ B(Star), R(0),
+ /* 2928 S> */ B(LdaConstant), U8(222),
+ B(Star), R(0),
+ /* 2941 S> */ B(LdaConstant), U8(223),
+ B(Star), R(0),
+ /* 2954 S> */ B(LdaConstant), U8(224),
+ B(Star), R(0),
+ /* 2967 S> */ B(LdaConstant), U8(225),
+ B(Star), R(0),
+ /* 2980 S> */ B(LdaConstant), U8(226),
+ B(Star), R(0),
+ /* 2993 S> */ B(LdaConstant), U8(227),
+ B(Star), R(0),
+ /* 3006 S> */ B(LdaConstant), U8(228),
+ B(Star), R(0),
+ /* 3019 S> */ B(LdaConstant), U8(229),
+ B(Star), R(0),
+ /* 3032 S> */ B(LdaConstant), U8(230),
+ B(Star), R(0),
+ /* 3045 S> */ B(LdaConstant), U8(231),
+ B(Star), R(0),
+ /* 3058 S> */ B(LdaConstant), U8(232),
+ B(Star), R(0),
+ /* 3071 S> */ B(LdaConstant), U8(233),
+ B(Star), R(0),
+ /* 3084 S> */ B(LdaConstant), U8(234),
+ B(Star), R(0),
+ /* 3097 S> */ B(LdaConstant), U8(235),
+ B(Star), R(0),
+ /* 3110 S> */ B(LdaConstant), U8(236),
+ B(Star), R(0),
+ /* 3123 S> */ B(LdaConstant), U8(237),
+ B(Star), R(0),
+ /* 3136 S> */ B(LdaConstant), U8(238),
+ B(Star), R(0),
+ /* 3149 S> */ B(LdaConstant), U8(239),
+ B(Star), R(0),
+ /* 3162 S> */ B(LdaConstant), U8(240),
+ B(Star), R(0),
+ /* 3175 S> */ B(LdaConstant), U8(241),
+ B(Star), R(0),
+ /* 3188 S> */ B(LdaConstant), U8(242),
+ B(Star), R(0),
+ /* 3201 S> */ B(LdaConstant), U8(243),
+ B(Star), R(0),
+ /* 3214 S> */ B(LdaConstant), U8(244),
+ B(Star), R(0),
+ /* 3227 S> */ B(LdaConstant), U8(245),
+ B(Star), R(0),
+ /* 3240 S> */ B(LdaConstant), U8(246),
+ B(Star), R(0),
+ /* 3253 S> */ B(LdaConstant), U8(247),
+ B(Star), R(0),
+ /* 3266 S> */ B(LdaConstant), U8(248),
+ B(Star), R(0),
+ /* 3279 S> */ B(LdaConstant), U8(249),
+ B(Star), R(0),
+ /* 3292 S> */ B(LdaConstant), U8(250),
+ B(Star), R(0),
+ /* 3305 S> */ B(LdaConstant), U8(251),
+ B(Star), R(0),
+ /* 3318 S> */ B(LdaConstant), U8(252),
+ B(Star), R(0),
+ /* 3331 S> */ B(LdaConstant), U8(253),
+ B(Star), R(0),
+ /* 3344 S> */ B(LdaConstant), U8(254),
+ B(Star), R(0),
+ /* 3357 S> */ B(LdaConstant), U8(255),
+ B(Star), R(0),
+ /* 3370 S> */ B(Wide), B(LdaConstant), U16(256),
+ B(Star), R(0),
+ /* 3383 S> */ B(Wide), B(LdaConstant), U16(257),
+ B(Star), R(0),
+ /* 3396 S> */ B(Wide), B(LdaConstant), U16(258),
+ B(Star), R(0),
+ /* 3409 S> */ B(Wide), B(LdaConstant), U16(259),
+ B(Star), R(0),
+ /* 3422 S> */ B(Wide), B(LdaConstant), U16(260),
+ B(Star), R(0),
+ /* 3435 S> */ B(Wide), B(LdaConstant), U16(261),
+ B(Star), R(0),
+ /* 3448 S> */ B(Wide), B(LdaConstant), U16(262),
+ B(Star), R(0),
+ /* 3461 S> */ B(Wide), B(LdaConstant), U16(263),
+ B(Star), R(0),
+ /* 3474 S> */ B(Wide), B(LdaConstant), U16(264),
+ B(Star), R(0),
+ /* 3487 S> */ B(Wide), B(LdaConstant), U16(265),
+ B(Star), R(0),
+ /* 3500 S> */ B(Wide), B(LdaConstant), U16(266),
+ B(Star), R(0),
+ /* 3513 S> */ B(Wide), B(LdaConstant), U16(267),
+ B(Star), R(0),
+ /* 3526 S> */ B(Wide), B(LdaConstant), U16(268),
+ B(Star), R(0),
+ /* 3539 S> */ B(Wide), B(LdaConstant), U16(269),
+ B(Star), R(0),
+ /* 3552 S> */ B(Wide), B(LdaConstant), U16(270),
+ B(Star), R(0),
+ /* 3565 S> */ B(Wide), B(LdaConstant), U16(271),
+ B(Star), R(0),
+ /* 3578 S> */ B(Wide), B(LdaConstant), U16(272),
+ B(Star), R(0),
+ /* 3591 S> */ B(Wide), B(LdaConstant), U16(273),
+ B(Star), R(0),
+ /* 3604 S> */ B(Wide), B(LdaConstant), U16(274),
+ B(Star), R(0),
+ /* 3617 S> */ B(Wide), B(LdaConstant), U16(275),
+ B(Star), R(0),
+ /* 3630 S> */ B(Wide), B(LdaConstant), U16(276),
+ B(Star), R(0),
+ /* 3643 S> */ B(Wide), B(LdaConstant), U16(277),
+ B(Star), R(0),
+ /* 3656 S> */ B(Wide), B(LdaConstant), U16(278),
+ B(Star), R(0),
+ /* 3669 S> */ B(Wide), B(LdaConstant), U16(279),
+ B(Star), R(0),
+ /* 3682 S> */ B(Wide), B(LdaConstant), U16(280),
+ B(Star), R(0),
+ /* 3695 S> */ B(Wide), B(LdaConstant), U16(281),
+ B(Star), R(0),
+ /* 3708 S> */ B(Wide), B(LdaConstant), U16(282),
+ B(Star), R(0),
+ /* 3721 S> */ B(Wide), B(LdaConstant), U16(283),
+ B(Star), R(0),
+ /* 3734 S> */ B(Wide), B(LdaConstant), U16(284),
+ B(Star), R(0),
+ /* 3747 S> */ B(Wide), B(LdaConstant), U16(285),
+ B(Star), R(0),
+ /* 3760 S> */ B(Wide), B(LdaConstant), U16(286),
+ B(Star), R(0),
+ /* 3773 S> */ B(Wide), B(LdaConstant), U16(287),
+ B(Star), R(0),
+ /* 3786 S> */ B(Wide), B(LdaConstant), U16(288),
+ B(Star), R(0),
+ /* 3799 S> */ B(Wide), B(LdaConstant), U16(289),
+ B(Star), R(0),
+ /* 3812 S> */ B(Wide), B(LdaConstant), U16(290),
+ B(Star), R(0),
+ /* 3825 S> */ B(Wide), B(LdaConstant), U16(291),
+ B(Star), R(0),
+ /* 3838 S> */ B(Wide), B(LdaConstant), U16(292),
+ B(Star), R(0),
+ /* 3851 S> */ B(Wide), B(LdaConstant), U16(293),
+ B(Star), R(0),
+ /* 3864 S> */ B(Wide), B(LdaConstant), U16(294),
+ B(Star), R(0),
+ /* 3877 S> */ B(Wide), B(LdaConstant), U16(295),
+ B(Star), R(0),
+ /* 3890 S> */ B(Wide), B(LdaConstant), U16(296),
+ B(Star), R(0),
+ /* 3903 S> */ B(Wide), B(LdaConstant), U16(297),
+ B(Star), R(0),
+ /* 3916 S> */ B(Wide), B(LdaConstant), U16(298),
+ B(Star), R(0),
+ /* 3929 S> */ B(Wide), B(LdaConstant), U16(299),
+ B(Star), R(0),
+ /* 3942 S> */ B(Wide), B(LdaConstant), U16(300),
+ B(Star), R(0),
+ /* 3955 S> */ B(Wide), B(LdaConstant), U16(301),
+ B(Star), R(0),
+ /* 3968 S> */ B(Wide), B(LdaConstant), U16(302),
+ B(Star), R(0),
+ /* 3981 S> */ B(Wide), B(LdaConstant), U16(303),
+ B(Star), R(0),
+ /* 3994 S> */ B(Wide), B(LdaConstant), U16(304),
+ B(Star), R(0),
+ /* 4007 S> */ B(Wide), B(LdaConstant), U16(305),
+ B(Star), R(0),
+ /* 4020 S> */ B(Wide), B(LdaConstant), U16(306),
+ B(Star), R(0),
+ /* 4033 S> */ B(Wide), B(LdaConstant), U16(307),
+ B(Star), R(0),
+ /* 4046 S> */ B(Wide), B(LdaConstant), U16(308),
+ B(Star), R(0),
+ /* 4059 S> */ B(Wide), B(LdaConstant), U16(309),
+ B(Star), R(0),
+ /* 4072 S> */ B(Wide), B(LdaConstant), U16(310),
+ B(Star), R(0),
+ /* 4085 S> */ B(Wide), B(LdaConstant), U16(311),
+ B(Star), R(0),
+ /* 4103 S> */ B(LdaZero),
+ B(Star), R(1),
+ /* 4108 S> */ B(LdaSmi), U8(3),
+ /* 4108 E> */ B(TestLessThan), R(1),
+ B(Wide), B(JumpIfFalse), U16(36),
+ /* 4090 E> */ B(StackCheck),
+ /* 4122 S> */ B(LdaSmi), U8(1),
+ /* 4128 E> */ B(TestEqual), R(1),
+ B(Wide), B(JumpIfFalse), U16(7),
+ /* 4134 S> */ B(Wide), B(Jump), U16(15),
+ /* 4146 S> */ B(LdaSmi), U8(2),
+ /* 4152 E> */ B(TestEqual), R(1),
+ B(Wide), B(JumpIfFalse), U16(7),
+ /* 4158 S> */ B(Wide), B(Jump), U16(11),
+ /* 4114 S> */ B(Ldar), R(1),
+ B(Inc), U8(1),
+ B(Star), R(1),
+ B(Jump), U8(-39),
+ /* 4167 S> */ B(LdaSmi), U8(3),
+ /* 4177 S> */ B(Return),
]
constant pool: [
0.1,
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden
index 5ceda85dbc..4dbbdafd5e 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden
@@ -15,13 +15,13 @@ frame size: 1
parameter count: 1
bytecode array length: 10
bytecodes: [
- B(LdaTheHole),
- B(Star), R(0),
- B(StackCheck),
- B(LdaSmi), U8(10),
- B(Star), R(0),
- B(LdaUndefined),
- B(Return),
+ B(LdaTheHole),
+ B(Star), R(0),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(10),
+ B(Star), R(0),
+ B(LdaUndefined),
+ /* 46 S> */ B(Return),
]
constant pool: [
]
@@ -36,16 +36,16 @@ frame size: 2
parameter count: 1
bytecode array length: 20
bytecodes: [
- B(LdaTheHole),
- B(Star), R(0),
- B(StackCheck),
- B(LdaSmi), U8(10),
- B(Star), R(0),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(0),
- B(Star), R(1),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(1), U8(1),
- B(Return),
+ B(LdaTheHole),
+ B(Star), R(0),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(10),
+ B(Star), R(0),
+ /* 46 S> */ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(0),
+ B(Star), R(1),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(1), U8(1),
+ /* 56 S> */ B(Return),
]
constant pool: [
"x",
@@ -59,22 +59,21 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 27
+bytecode array length: 26
bytecodes: [
- B(LdaTheHole),
- B(Star), R(0),
- B(StackCheck),
- B(LdaSmi), U8(20),
- B(Star), R(1),
- B(Ldar), R(0),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(0),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
- B(Ldar), R(1),
- B(Star), R(0),
- B(LdaUndefined),
- B(Return),
+ B(LdaTheHole),
+ B(Star), R(0),
+ /* 30 E> */ B(StackCheck),
+ /* 45 S> */ B(LdaSmi), U8(20),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(0),
+ B(Star), R(2),
+ /* 45 E> */ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
+ B(Mov), R(1), R(0),
+ B(LdaUndefined),
+ /* 52 S> */ B(Return),
]
constant pool: [
"x",
@@ -88,24 +87,23 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 31
+bytecode array length: 30
bytecodes: [
- B(LdaTheHole),
- B(Star), R(0),
- B(StackCheck),
- B(LdaSmi), U8(10),
- B(Star), R(0),
- B(LdaSmi), U8(20),
- B(Star), R(1),
- B(Ldar), R(0),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(0),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
- B(Ldar), R(1),
- B(Star), R(0),
- B(LdaUndefined),
- B(Return),
+ B(LdaTheHole),
+ B(Star), R(0),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(10),
+ B(Star), R(0),
+ /* 46 S> */ B(LdaSmi), U8(20),
+ B(Star), R(1),
+ B(Ldar), R(0),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(0),
+ B(Star), R(2),
+ /* 48 E> */ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
+ B(Mov), R(1), R(0),
+ B(LdaUndefined),
+ /* 54 S> */ B(Return),
]
constant pool: [
"x",
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden
index 4fc5c4ff99..0b25fbf329 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden
@@ -13,19 +13,19 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 24
+bytecode array length: 21
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(1),
- B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4),
- B(CreateClosure), U8(0), U8(0),
- B(Star), R(0),
- B(StackCheck),
- B(LdaSmi), U8(10),
- B(StaContextSlot), R(context), U8(4),
- B(LdaUndefined),
- B(Return),
+ B(CreateFunctionContext), U8(1),
+ B(PushContext), R(1),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateClosure), U8(0), U8(2),
+ B(Star), R(0),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(10),
+ /* 42 E> */ B(StaContextSlot), R(context), U8(4),
+ B(LdaUndefined),
+ /* 72 S> */ B(Return),
]
constant pool: [
InstanceType::SHARED_FUNCTION_INFO_TYPE,
@@ -39,23 +39,23 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 37
+bytecode array length: 34
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(1),
- B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4),
- B(CreateClosure), U8(0), U8(0),
- B(Star), R(0),
- B(StackCheck),
- B(LdaSmi), U8(10),
- B(StaContextSlot), R(context), U8(4),
- B(LdaContextSlot), R(context), U8(4),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(1),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
- B(Return),
+ B(CreateFunctionContext), U8(1),
+ B(PushContext), R(1),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateClosure), U8(0), U8(2),
+ B(Star), R(0),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(10),
+ /* 42 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 72 S> */ B(LdaContextSlot), R(context), U8(4),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(1),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
+ /* 82 S> */ B(Return),
]
constant pool: [
InstanceType::SHARED_FUNCTION_INFO_TYPE,
@@ -70,27 +70,27 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 45
+bytecode array length: 42
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(1),
- B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4),
- B(CreateClosure), U8(0), U8(0),
- B(Star), R(0),
- B(StackCheck),
- B(LdaSmi), U8(20),
- B(Star), R(2),
- B(LdaContextSlot), R(context), U8(4),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(1),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
- B(Ldar), R(2),
- B(StaContextSlot), R(context), U8(4),
- B(StaContextSlot), R(context), U8(4),
- B(LdaUndefined),
- B(Return),
+ B(CreateFunctionContext), U8(1),
+ B(PushContext), R(1),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateClosure), U8(0), U8(2),
+ B(Star), R(0),
+ /* 30 E> */ B(StackCheck),
+ /* 45 S> */ B(LdaSmi), U8(20),
+ B(Star), R(2),
+ /* 45 E> */ B(LdaContextSlot), R(context), U8(4),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
+ B(Ldar), R(2),
+ B(StaContextSlot), R(context), U8(4),
+ /* 45 E> */ B(StaContextSlot), R(context), U8(4),
+ B(LdaUndefined),
+ /* 78 S> */ B(Return),
]
constant pool: [
InstanceType::SHARED_FUNCTION_INFO_TYPE,
@@ -105,28 +105,28 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 47
+bytecode array length: 44
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(1),
- B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4),
- B(CreateClosure), U8(0), U8(0),
- B(Star), R(0),
- B(StackCheck),
- B(LdaSmi), U8(10),
- B(StaContextSlot), R(context), U8(4),
- B(LdaSmi), U8(20),
- B(Star), R(2),
- B(LdaContextSlot), R(context), U8(4),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(1),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
- B(Ldar), R(2),
- B(StaContextSlot), R(context), U8(4),
- B(LdaUndefined),
- B(Return),
+ B(CreateFunctionContext), U8(1),
+ B(PushContext), R(1),
+ B(LdaTheHole),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateClosure), U8(0), U8(2),
+ B(Star), R(0),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(10),
+ /* 42 E> */ B(StaContextSlot), R(context), U8(4),
+ /* 46 S> */ B(LdaSmi), U8(20),
+ B(Star), R(2),
+ /* 48 E> */ B(LdaContextSlot), R(context), U8(4),
+ B(JumpIfNotHole), U8(11),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
+ B(Ldar), R(2),
+ B(StaContextSlot), R(context), U8(4),
+ B(LdaUndefined),
+ /* 80 S> */ B(Return),
]
constant pool: [
InstanceType::SHARED_FUNCTION_INFO_TYPE,
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden
index 0a96caeee2..dd9f714394 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden
@@ -16,14 +16,13 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 5
+bytecode array length: 4
bytecodes: [
- B(StackCheck),
- B(LdaGlobal), U8(0), U8(1),
- B(Return),
+ /* 21 E> */ B(StackCheck),
+ /* 26 S> */ B(LdaGlobal), U8(1),
+ /* 36 S> */ B(Return),
]
constant pool: [
- "a",
]
handlers: [
]
@@ -36,14 +35,13 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 5
+bytecode array length: 4
bytecodes: [
- B(StackCheck),
- B(LdaGlobal), U8(0), U8(1),
- B(Return),
+ /* 27 E> */ B(StackCheck),
+ /* 32 S> */ B(LdaGlobal), U8(1),
+ /* 42 S> */ B(Return),
]
constant pool: [
- "t",
]
handlers: [
]
@@ -56,14 +54,13 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 5
+bytecode array length: 4
bytecodes: [
- B(StackCheck),
- B(LdaGlobal), U8(0), U8(1),
- B(Return),
+ /* 17 E> */ B(StackCheck),
+ /* 22 S> */ B(LdaGlobal), U8(1),
+ /* 32 S> */ B(Return),
]
constant pool: [
- "a",
]
handlers: [
]
@@ -204,401 +201,272 @@ snippet: "
}
f({name: 1});
"
-frame size: 1
+frame size: 0
parameter count: 2
-bytecode array length: 1032
+bytecode array length: 646
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(1),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(3),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(5),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(7),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(9),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(11),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(13),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(15),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(17),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(19),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(21),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(23),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(25),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(27),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(29),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(31),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(33),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(35),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(37),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(39),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(41),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(43),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(45),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(47),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(49),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(51),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(53),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(55),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(57),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(59),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(61),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(63),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(65),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(67),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(69),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(71),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(73),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(75),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(77),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(79),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(81),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(83),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(85),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(87),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(89),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(91),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(93),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(95),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(97),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(99),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(101),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(103),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(105),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(107),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(109),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(111),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(113),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(115),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(117),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(119),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(121),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(123),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(125),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(127),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(129),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(131),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(133),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(135),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(137),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(139),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(141),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(143),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(145),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(147),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(149),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(151),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(153),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(155),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(157),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(159),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(161),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(163),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(165),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(167),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(169),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(171),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(173),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(175),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(177),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(179),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(181),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(183),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(185),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(187),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(189),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(191),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(193),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(195),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(197),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(199),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(201),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(203),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(205),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(207),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(209),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(211),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(213),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(215),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(217),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(219),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(221),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(223),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(225),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(227),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(229),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(231),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(233),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(235),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(237),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(239),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(241),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(243),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(245),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(247),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(249),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(251),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(253),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(255),
- B(Wide), B(LdaGlobal), U16(1), U16(257),
- B(Return),
+ /* 17 E> */ B(StackCheck),
+ /* 25 S> */ B(Nop),
+ /* 26 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(1),
+ /* 35 S> */ B(Nop),
+ /* 36 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(3),
+ /* 45 S> */ B(Nop),
+ /* 46 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(5),
+ /* 55 S> */ B(Nop),
+ /* 56 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(7),
+ /* 65 S> */ B(Nop),
+ /* 66 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(9),
+ /* 75 S> */ B(Nop),
+ /* 76 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(11),
+ /* 85 S> */ B(Nop),
+ /* 86 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(13),
+ /* 95 S> */ B(Nop),
+ /* 96 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(15),
+ /* 105 S> */ B(Nop),
+ /* 106 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(17),
+ /* 115 S> */ B(Nop),
+ /* 116 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(19),
+ /* 125 S> */ B(Nop),
+ /* 126 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(21),
+ /* 135 S> */ B(Nop),
+ /* 136 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(23),
+ /* 145 S> */ B(Nop),
+ /* 146 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(25),
+ /* 155 S> */ B(Nop),
+ /* 156 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(27),
+ /* 165 S> */ B(Nop),
+ /* 166 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(29),
+ /* 175 S> */ B(Nop),
+ /* 176 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(31),
+ /* 185 S> */ B(Nop),
+ /* 186 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(33),
+ /* 195 S> */ B(Nop),
+ /* 196 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(35),
+ /* 205 S> */ B(Nop),
+ /* 206 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(37),
+ /* 215 S> */ B(Nop),
+ /* 216 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(39),
+ /* 225 S> */ B(Nop),
+ /* 226 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(41),
+ /* 235 S> */ B(Nop),
+ /* 236 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(43),
+ /* 245 S> */ B(Nop),
+ /* 246 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(45),
+ /* 255 S> */ B(Nop),
+ /* 256 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(47),
+ /* 265 S> */ B(Nop),
+ /* 266 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(49),
+ /* 275 S> */ B(Nop),
+ /* 276 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(51),
+ /* 285 S> */ B(Nop),
+ /* 286 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(53),
+ /* 295 S> */ B(Nop),
+ /* 296 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(55),
+ /* 305 S> */ B(Nop),
+ /* 306 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(57),
+ /* 315 S> */ B(Nop),
+ /* 316 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(59),
+ /* 325 S> */ B(Nop),
+ /* 326 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(61),
+ /* 335 S> */ B(Nop),
+ /* 336 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(63),
+ /* 345 S> */ B(Nop),
+ /* 346 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(65),
+ /* 355 S> */ B(Nop),
+ /* 356 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(67),
+ /* 365 S> */ B(Nop),
+ /* 366 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(69),
+ /* 375 S> */ B(Nop),
+ /* 376 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(71),
+ /* 385 S> */ B(Nop),
+ /* 386 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(73),
+ /* 395 S> */ B(Nop),
+ /* 396 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(75),
+ /* 405 S> */ B(Nop),
+ /* 406 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(77),
+ /* 415 S> */ B(Nop),
+ /* 416 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(79),
+ /* 425 S> */ B(Nop),
+ /* 426 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(81),
+ /* 435 S> */ B(Nop),
+ /* 436 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(83),
+ /* 445 S> */ B(Nop),
+ /* 446 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(85),
+ /* 455 S> */ B(Nop),
+ /* 456 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(87),
+ /* 465 S> */ B(Nop),
+ /* 466 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(89),
+ /* 475 S> */ B(Nop),
+ /* 476 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(91),
+ /* 485 S> */ B(Nop),
+ /* 486 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(93),
+ /* 495 S> */ B(Nop),
+ /* 496 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(95),
+ /* 505 S> */ B(Nop),
+ /* 506 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(97),
+ /* 515 S> */ B(Nop),
+ /* 516 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(99),
+ /* 525 S> */ B(Nop),
+ /* 526 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(101),
+ /* 535 S> */ B(Nop),
+ /* 536 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(103),
+ /* 545 S> */ B(Nop),
+ /* 546 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(105),
+ /* 555 S> */ B(Nop),
+ /* 556 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(107),
+ /* 565 S> */ B(Nop),
+ /* 566 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(109),
+ /* 575 S> */ B(Nop),
+ /* 576 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(111),
+ /* 585 S> */ B(Nop),
+ /* 586 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(113),
+ /* 595 S> */ B(Nop),
+ /* 596 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(115),
+ /* 605 S> */ B(Nop),
+ /* 606 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(117),
+ /* 615 S> */ B(Nop),
+ /* 616 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(119),
+ /* 625 S> */ B(Nop),
+ /* 626 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(121),
+ /* 635 S> */ B(Nop),
+ /* 636 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(123),
+ /* 645 S> */ B(Nop),
+ /* 646 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(125),
+ /* 655 S> */ B(Nop),
+ /* 656 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(127),
+ /* 665 S> */ B(Nop),
+ /* 666 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(129),
+ /* 675 S> */ B(Nop),
+ /* 676 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(131),
+ /* 685 S> */ B(Nop),
+ /* 686 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(133),
+ /* 695 S> */ B(Nop),
+ /* 696 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(135),
+ /* 705 S> */ B(Nop),
+ /* 706 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(137),
+ /* 715 S> */ B(Nop),
+ /* 716 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(139),
+ /* 725 S> */ B(Nop),
+ /* 726 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(141),
+ /* 735 S> */ B(Nop),
+ /* 736 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(143),
+ /* 745 S> */ B(Nop),
+ /* 746 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(145),
+ /* 755 S> */ B(Nop),
+ /* 756 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(147),
+ /* 765 S> */ B(Nop),
+ /* 766 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(149),
+ /* 775 S> */ B(Nop),
+ /* 776 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(151),
+ /* 785 S> */ B(Nop),
+ /* 786 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(153),
+ /* 795 S> */ B(Nop),
+ /* 796 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(155),
+ /* 805 S> */ B(Nop),
+ /* 806 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(157),
+ /* 815 S> */ B(Nop),
+ /* 816 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(159),
+ /* 825 S> */ B(Nop),
+ /* 826 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(161),
+ /* 835 S> */ B(Nop),
+ /* 836 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(163),
+ /* 845 S> */ B(Nop),
+ /* 846 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(165),
+ /* 855 S> */ B(Nop),
+ /* 856 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(167),
+ /* 865 S> */ B(Nop),
+ /* 866 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(169),
+ /* 875 S> */ B(Nop),
+ /* 876 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(171),
+ /* 885 S> */ B(Nop),
+ /* 886 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(173),
+ /* 895 S> */ B(Nop),
+ /* 896 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(175),
+ /* 905 S> */ B(Nop),
+ /* 906 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(177),
+ /* 915 S> */ B(Nop),
+ /* 916 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(179),
+ /* 925 S> */ B(Nop),
+ /* 926 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(181),
+ /* 935 S> */ B(Nop),
+ /* 936 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(183),
+ /* 945 S> */ B(Nop),
+ /* 946 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(185),
+ /* 955 S> */ B(Nop),
+ /* 956 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(187),
+ /* 965 S> */ B(Nop),
+ /* 966 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(189),
+ /* 975 S> */ B(Nop),
+ /* 976 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(191),
+ /* 985 S> */ B(Nop),
+ /* 986 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(193),
+ /* 995 S> */ B(Nop),
+ /* 996 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(195),
+ /* 1005 S> */ B(Nop),
+ /* 1006 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(197),
+ /* 1015 S> */ B(Nop),
+ /* 1016 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(199),
+ /* 1025 S> */ B(Nop),
+ /* 1026 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(201),
+ /* 1035 S> */ B(Nop),
+ /* 1036 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(203),
+ /* 1045 S> */ B(Nop),
+ /* 1046 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(205),
+ /* 1055 S> */ B(Nop),
+ /* 1056 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(207),
+ /* 1065 S> */ B(Nop),
+ /* 1066 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(209),
+ /* 1075 S> */ B(Nop),
+ /* 1076 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(211),
+ /* 1085 S> */ B(Nop),
+ /* 1086 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(213),
+ /* 1095 S> */ B(Nop),
+ /* 1096 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(215),
+ /* 1105 S> */ B(Nop),
+ /* 1106 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(217),
+ /* 1115 S> */ B(Nop),
+ /* 1116 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(219),
+ /* 1125 S> */ B(Nop),
+ /* 1126 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(221),
+ /* 1135 S> */ B(Nop),
+ /* 1136 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(223),
+ /* 1145 S> */ B(Nop),
+ /* 1146 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(225),
+ /* 1155 S> */ B(Nop),
+ /* 1156 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(227),
+ /* 1165 S> */ B(Nop),
+ /* 1166 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(229),
+ /* 1175 S> */ B(Nop),
+ /* 1176 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(231),
+ /* 1185 S> */ B(Nop),
+ /* 1186 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(233),
+ /* 1195 S> */ B(Nop),
+ /* 1196 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(235),
+ /* 1205 S> */ B(Nop),
+ /* 1206 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(237),
+ /* 1215 S> */ B(Nop),
+ /* 1216 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(239),
+ /* 1225 S> */ B(Nop),
+ /* 1226 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(241),
+ /* 1235 S> */ B(Nop),
+ /* 1236 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(243),
+ /* 1245 S> */ B(Nop),
+ /* 1246 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(245),
+ /* 1255 S> */ B(Nop),
+ /* 1256 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(247),
+ /* 1265 S> */ B(Nop),
+ /* 1266 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(249),
+ /* 1275 S> */ B(Nop),
+ /* 1276 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(251),
+ /* 1285 S> */ B(Nop),
+ /* 1286 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(253),
+ /* 1295 S> */ B(Nop),
+ /* 1296 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(255),
+ /* 1305 S> */ B(Wide), B(LdaGlobal), U16(257),
+ /* 1315 S> */ B(Return),
]
constant pool: [
"name",
- "a",
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LogicalExpressions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LogicalExpressions.golden
index cddb9a1429..b8c8c5fa72 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LogicalExpressions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LogicalExpressions.golden
@@ -15,12 +15,12 @@ frame size: 1
parameter count: 1
bytecode array length: 9
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(JumpIfToBooleanTrue), U8(4),
- B(LdaSmi), U8(3),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 45 S> */ B(JumpIfToBooleanTrue), U8(4),
+ B(LdaSmi), U8(3),
+ /* 60 S> */ B(Return),
]
constant pool: [
]
@@ -31,19 +31,18 @@ handlers: [
snippet: "
var x = 0; return (x == 1) || 3;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 15
+bytecode array length: 13
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(TestEqual), R(1),
- B(JumpIfTrue), U8(4),
- B(LdaSmi), U8(3),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 45 S> */ B(LdaSmi), U8(1),
+ /* 55 E> */ B(TestEqual), R(0),
+ B(JumpIfTrue), U8(4),
+ B(LdaSmi), U8(3),
+ /* 67 S> */ B(Return),
]
constant pool: [
]
@@ -58,12 +57,12 @@ frame size: 1
parameter count: 1
bytecode array length: 9
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(JumpIfToBooleanFalse), U8(4),
- B(LdaSmi), U8(3),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 45 S> */ B(JumpIfToBooleanFalse), U8(4),
+ B(LdaSmi), U8(3),
+ /* 60 S> */ B(Return),
]
constant pool: [
]
@@ -74,19 +73,18 @@ handlers: [
snippet: "
var x = 0; return (x == 0) && 3;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 14
+bytecode array length: 12
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaZero),
- B(TestEqual), R(1),
- B(JumpIfFalse), U8(4),
- B(LdaSmi), U8(3),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 45 S> */ B(LdaZero),
+ /* 55 E> */ B(TestEqual), R(0),
+ B(JumpIfFalse), U8(4),
+ B(LdaSmi), U8(3),
+ /* 67 S> */ B(Return),
]
constant pool: [
]
@@ -101,12 +99,12 @@ frame size: 1
parameter count: 1
bytecode array length: 9
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(JumpIfToBooleanTrue), U8(4),
- B(LdaSmi), U8(3),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 45 S> */ B(JumpIfToBooleanTrue), U8(4),
+ B(LdaSmi), U8(3),
+ /* 68 S> */ B(Return),
]
constant pool: [
]
@@ -119,25 +117,21 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 32
+bytecode array length: 24
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(2),
- B(Star), R(0),
- B(LdaSmi), U8(3),
- B(Star), R(1),
- B(LdaSmi), U8(4),
- B(Star), R(2),
- B(Ldar), R(0),
- B(JumpIfToBooleanTrue), U8(16),
- B(Ldar), R(0),
- B(Ldar), R(1),
- B(Ldar), R(0),
- B(Ldar), R(1),
- B(LdaSmi), U8(5),
- B(Star), R(2),
- B(LdaSmi), U8(3),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(2),
+ B(Star), R(0),
+ /* 49 S> */ B(LdaSmi), U8(3),
+ B(Star), R(1),
+ /* 56 S> */ B(LdaSmi), U8(4),
+ B(Star), R(2),
+ /* 59 S> */ B(Ldar), R(0),
+ B(JumpIfToBooleanTrue), U8(8),
+ B(LdaSmi), U8(5),
+ B(Star), R(2),
+ B(LdaSmi), U8(3),
+ /* 95 S> */ B(Return),
]
constant pool: [
]
@@ -184,145 +178,145 @@ frame size: 3
parameter count: 1
bytecode array length: 276
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(3),
- B(Star), R(2),
- B(Ldar), R(0),
- B(JumpIfToBooleanTrueConstant), U8(0),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(3),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ /* 53 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 60 S> */ B(LdaSmi), U8(3),
+ B(Star), R(2),
+ /* 63 S> */ B(Ldar), R(0),
+ B(JumpIfToBooleanTrueConstant), U8(0),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(3),
+ /* 624 S> */ B(Return),
]
constant pool: [
260,
@@ -370,145 +364,145 @@ frame size: 3
parameter count: 1
bytecode array length: 275
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(3),
- B(Star), R(2),
- B(Ldar), R(0),
- B(JumpIfToBooleanFalseConstant), U8(0),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(3),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 53 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 60 S> */ B(LdaSmi), U8(3),
+ B(Star), R(2),
+ /* 63 S> */ B(Ldar), R(0),
+ B(JumpIfToBooleanFalseConstant), U8(0),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(3),
+ /* 624 S> */ B(Return),
]
constant pool: [
260,
@@ -552,152 +546,150 @@ snippet: "
a = 1, b = 2,
a = 1, b = 2, 3);
"
-frame size: 4
+frame size: 3
parameter count: 1
-bytecode array length: 282
+bytecode array length: 278
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(3),
- B(Star), R(2),
- B(Ldar), R(0),
- B(Star), R(3),
- B(LdaSmi), U8(3),
- B(TestGreaterThan), R(3),
- B(JumpIfTrueConstant), U8(0),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(3),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ /* 53 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 60 S> */ B(LdaSmi), U8(3),
+ B(Star), R(2),
+ /* 63 S> */ B(LdaSmi), U8(3),
+ /* 73 E> */ B(TestGreaterThan), R(0),
+ B(JumpIfTrueConstant), U8(0),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(3),
+ /* 630 S> */ B(Return),
]
constant pool: [
260,
@@ -741,152 +733,150 @@ snippet: "
a = 1, b = 2,
a = 1, b = 2, 3);
"
-frame size: 4
+frame size: 3
parameter count: 1
-bytecode array length: 281
+bytecode array length: 277
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(3),
- B(Star), R(2),
- B(Ldar), R(0),
- B(Star), R(3),
- B(LdaSmi), U8(5),
- B(TestLessThan), R(3),
- B(JumpIfFalseConstant), U8(0),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(LdaSmi), U8(3),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 53 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 60 S> */ B(LdaSmi), U8(3),
+ B(Star), R(2),
+ /* 63 S> */ B(LdaSmi), U8(5),
+ /* 73 E> */ B(TestLessThan), R(0),
+ B(JumpIfFalseConstant), U8(0),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ B(LdaSmi), U8(3),
+ /* 630 S> */ B(Return),
]
constant pool: [
260,
@@ -902,9 +892,9 @@ frame size: 0
parameter count: 1
bytecode array length: 3
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(LdaZero),
+ /* 49 S> */ B(Return),
]
constant pool: [
]
@@ -919,9 +909,9 @@ frame size: 0
parameter count: 1
bytecode array length: 4
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(LdaSmi), U8(1),
+ /* 49 S> */ B(Return),
]
constant pool: [
]
@@ -936,15 +926,15 @@ frame size: 1
parameter count: 1
bytecode array length: 15
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(JumpIfToBooleanFalse), U8(4),
- B(LdaSmi), U8(3),
- B(JumpIfToBooleanTrue), U8(3),
- B(LdaZero),
- B(LdaSmi), U8(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ /* 45 S> */ B(JumpIfToBooleanFalse), U8(4),
+ B(LdaSmi), U8(3),
+ B(JumpIfToBooleanTrue), U8(3),
+ B(LdaZero),
+ B(LdaSmi), U8(1),
+ /* 68 S> */ B(Return),
]
constant pool: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden
index acec42f2e0..ed13d254ac 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden
@@ -11,36 +11,38 @@ wrap: yes
snippet: "
eval('var x = 10;'); return x;
"
-frame size: 9
+frame size: 10
parameter count: 1
-bytecode array length: 67
+bytecode array length: 68
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(0),
- B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4),
- B(CreateMappedArguments),
- B(StaContextSlot), R(context), U8(5),
- B(Ldar), R(new_target),
- B(StaContextSlot), R(context), U8(6),
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Star), R(3),
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(3), U8(1), R(1),
- B(LdaConstant), U8(1),
- B(Star), R(3),
- B(Mov), R(1), R(4),
- B(Mov), R(3), R(5),
- B(Mov), R(closure), R(6),
- B(LdaZero),
- B(Star), R(7),
- B(LdaSmi), U8(30),
- B(Star), R(8),
- B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(5),
- B(Star), R(1),
- B(Call), R(1), R(2), U8(2), U8(0),
- B(LdaLookupSlot), U8(2),
- B(Return),
+ B(CreateFunctionContext), U8(3),
+ B(PushContext), R(0),
+ B(Ldar), R(this),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateMappedArguments),
+ B(StaContextSlot), R(context), U8(5),
+ B(Ldar), R(new_target),
+ B(StaContextSlot), R(context), U8(6),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(LdaConstant), U8(0),
+ B(Star), R(3),
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(3), U8(1), R(1),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(LdaZero),
+ B(Star), R(7),
+ B(LdaSmi), U8(30),
+ B(Star), R(8),
+ B(LdaSmi), U8(34),
+ B(Star), R(9),
+ B(Mov), R(1), R(4),
+ B(Mov), R(3), R(5),
+ B(Mov), R(closure), R(6),
+ B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
+ B(Star), R(1),
+ /* 34 E> */ B(Call), R(1), R(2), U8(2), U8(0),
+ /* 55 S> */ B(LdaLookupSlot), U8(2),
+ /* 65 S> */ B(Return),
]
constant pool: [
"eval",
@@ -54,37 +56,39 @@ handlers: [
snippet: "
eval('var x = 10;'); return typeof x;
"
-frame size: 9
+frame size: 10
parameter count: 1
-bytecode array length: 68
+bytecode array length: 69
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(0),
- B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4),
- B(CreateMappedArguments),
- B(StaContextSlot), R(context), U8(5),
- B(Ldar), R(new_target),
- B(StaContextSlot), R(context), U8(6),
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Star), R(3),
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(3), U8(1), R(1),
- B(LdaConstant), U8(1),
- B(Star), R(3),
- B(Mov), R(1), R(4),
- B(Mov), R(3), R(5),
- B(Mov), R(closure), R(6),
- B(LdaZero),
- B(Star), R(7),
- B(LdaSmi), U8(30),
- B(Star), R(8),
- B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(5),
- B(Star), R(1),
- B(Call), R(1), R(2), U8(2), U8(0),
- B(LdaLookupSlotInsideTypeof), U8(2),
- B(TypeOf),
- B(Return),
+ B(CreateFunctionContext), U8(3),
+ B(PushContext), R(0),
+ B(Ldar), R(this),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateMappedArguments),
+ B(StaContextSlot), R(context), U8(5),
+ B(Ldar), R(new_target),
+ B(StaContextSlot), R(context), U8(6),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(LdaConstant), U8(0),
+ B(Star), R(3),
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(3), U8(1), R(1),
+ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(LdaZero),
+ B(Star), R(7),
+ B(LdaSmi), U8(30),
+ B(Star), R(8),
+ B(LdaSmi), U8(34),
+ B(Star), R(9),
+ B(Mov), R(1), R(4),
+ B(Mov), R(3), R(5),
+ B(Mov), R(closure), R(6),
+ B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
+ B(Star), R(1),
+ /* 34 E> */ B(Call), R(1), R(2), U8(2), U8(0),
+ /* 55 S> */ B(LdaLookupSlotInsideTypeof), U8(2),
+ B(TypeOf),
+ /* 72 S> */ B(Return),
]
constant pool: [
"eval",
@@ -98,37 +102,39 @@ handlers: [
snippet: "
x = 20; return eval('');
"
-frame size: 9
+frame size: 10
parameter count: 1
-bytecode array length: 69
+bytecode array length: 70
bytecodes: [
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1),
- B(PushContext), R(0),
- B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4),
- B(CreateMappedArguments),
- B(StaContextSlot), R(context), U8(5),
- B(Ldar), R(new_target),
- B(StaContextSlot), R(context), U8(6),
- B(StackCheck),
- B(LdaSmi), U8(20),
- B(StaLookupSlotSloppy), U8(0),
- B(LdaConstant), U8(1),
- B(Star), R(3),
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(3), U8(1), R(1),
- B(LdaConstant), U8(2),
- B(Star), R(3),
- B(Mov), R(1), R(4),
- B(Mov), R(3), R(5),
- B(Mov), R(closure), R(6),
- B(LdaZero),
- B(Star), R(7),
- B(LdaSmi), U8(30),
- B(Star), R(8),
- B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(5),
- B(Star), R(1),
- B(Call), R(1), R(2), U8(2), U8(0),
- B(Return),
+ B(CreateFunctionContext), U8(3),
+ B(PushContext), R(0),
+ B(Ldar), R(this),
+ B(StaContextSlot), R(context), U8(4),
+ B(CreateMappedArguments),
+ B(StaContextSlot), R(context), U8(5),
+ B(Ldar), R(new_target),
+ B(StaContextSlot), R(context), U8(6),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(LdaSmi), U8(20),
+ /* 36 E> */ B(StaLookupSlotSloppy), U8(0),
+ /* 42 S> */ B(LdaConstant), U8(1),
+ B(Star), R(3),
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(3), U8(1), R(1),
+ B(LdaConstant), U8(2),
+ B(Star), R(3),
+ B(LdaZero),
+ B(Star), R(7),
+ B(LdaSmi), U8(30),
+ B(Star), R(8),
+ B(LdaSmi), U8(49),
+ B(Star), R(9),
+ B(Mov), R(1), R(4),
+ B(Mov), R(3), R(5),
+ B(Mov), R(closure), R(6),
+ B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
+ B(Star), R(1),
+ /* 49 E> */ B(Call), R(1), R(2), U8(2), U8(0),
+ /* 59 S> */ B(Return),
]
constant pool: [
"x",
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotInEval.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotInEval.golden
index 26e11e3b18..41476311e8 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotInEval.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotInEval.golden
@@ -21,9 +21,9 @@ frame size: 0
parameter count: 1
bytecode array length: 4
bytecodes: [
- B(StackCheck),
- B(LdaLookupSlot), U8(0),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 15 S> */ B(LdaLookupSlot), U8(0),
+ /* 25 S> */ B(Return),
]
constant pool: [
"x",
@@ -44,11 +44,11 @@ frame size: 0
parameter count: 1
bytecode array length: 7
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(10),
- B(StaLookupSlotSloppy), U8(0),
- B(LdaUndefined),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 15 S> */ B(LdaSmi), U8(10),
+ /* 17 E> */ B(StaLookupSlotSloppy), U8(0),
+ B(LdaUndefined),
+ /* 23 S> */ B(Return),
]
constant pool: [
"x",
@@ -69,11 +69,11 @@ frame size: 0
parameter count: 1
bytecode array length: 7
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(10),
- B(StaLookupSlotStrict), U8(0),
- B(LdaUndefined),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 29 S> */ B(LdaSmi), U8(10),
+ /* 31 E> */ B(StaLookupSlotStrict), U8(0),
+ B(LdaUndefined),
+ /* 37 S> */ B(Return),
]
constant pool: [
"x",
@@ -94,10 +94,10 @@ frame size: 0
parameter count: 1
bytecode array length: 5
bytecodes: [
- B(StackCheck),
- B(LdaLookupSlotInsideTypeof), U8(0),
- B(TypeOf),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 15 S> */ B(LdaLookupSlotInsideTypeof), U8(0),
+ B(TypeOf),
+ /* 32 S> */ B(Return),
]
constant pool: [
"x",
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotWideInEval.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotWideInEval.golden
index 2fdc3e9f54..a668d62452 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotWideInEval.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotWideInEval.golden
@@ -281,521 +281,521 @@ frame size: 1
parameter count: 1
bytecode array length: 1030
bytecodes: [
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Star), R(0),
- B(LdaConstant), U8(1),
- B(Star), R(0),
- B(LdaConstant), U8(2),
- B(Star), R(0),
- B(LdaConstant), U8(3),
- B(Star), R(0),
- B(LdaConstant), U8(4),
- B(Star), R(0),
- B(LdaConstant), U8(5),
- B(Star), R(0),
- B(LdaConstant), U8(6),
- B(Star), R(0),
- B(LdaConstant), U8(7),
- B(Star), R(0),
- B(LdaConstant), U8(8),
- B(Star), R(0),
- B(LdaConstant), U8(9),
- B(Star), R(0),
- B(LdaConstant), U8(10),
- B(Star), R(0),
- B(LdaConstant), U8(11),
- B(Star), R(0),
- B(LdaConstant), U8(12),
- B(Star), R(0),
- B(LdaConstant), U8(13),
- B(Star), R(0),
- B(LdaConstant), U8(14),
- B(Star), R(0),
- B(LdaConstant), U8(15),
- B(Star), R(0),
- B(LdaConstant), U8(16),
- B(Star), R(0),
- B(LdaConstant), U8(17),
- B(Star), R(0),
- B(LdaConstant), U8(18),
- B(Star), R(0),
- B(LdaConstant), U8(19),
- B(Star), R(0),
- B(LdaConstant), U8(20),
- B(Star), R(0),
- B(LdaConstant), U8(21),
- B(Star), R(0),
- B(LdaConstant), U8(22),
- B(Star), R(0),
- B(LdaConstant), U8(23),
- B(Star), R(0),
- B(LdaConstant), U8(24),
- B(Star), R(0),
- B(LdaConstant), U8(25),
- B(Star), R(0),
- B(LdaConstant), U8(26),
- B(Star), R(0),
- B(LdaConstant), U8(27),
- B(Star), R(0),
- B(LdaConstant), U8(28),
- B(Star), R(0),
- B(LdaConstant), U8(29),
- B(Star), R(0),
- B(LdaConstant), U8(30),
- B(Star), R(0),
- B(LdaConstant), U8(31),
- B(Star), R(0),
- B(LdaConstant), U8(32),
- B(Star), R(0),
- B(LdaConstant), U8(33),
- B(Star), R(0),
- B(LdaConstant), U8(34),
- B(Star), R(0),
- B(LdaConstant), U8(35),
- B(Star), R(0),
- B(LdaConstant), U8(36),
- B(Star), R(0),
- B(LdaConstant), U8(37),
- B(Star), R(0),
- B(LdaConstant), U8(38),
- B(Star), R(0),
- B(LdaConstant), U8(39),
- B(Star), R(0),
- B(LdaConstant), U8(40),
- B(Star), R(0),
- B(LdaConstant), U8(41),
- B(Star), R(0),
- B(LdaConstant), U8(42),
- B(Star), R(0),
- B(LdaConstant), U8(43),
- B(Star), R(0),
- B(LdaConstant), U8(44),
- B(Star), R(0),
- B(LdaConstant), U8(45),
- B(Star), R(0),
- B(LdaConstant), U8(46),
- B(Star), R(0),
- B(LdaConstant), U8(47),
- B(Star), R(0),
- B(LdaConstant), U8(48),
- B(Star), R(0),
- B(LdaConstant), U8(49),
- B(Star), R(0),
- B(LdaConstant), U8(50),
- B(Star), R(0),
- B(LdaConstant), U8(51),
- B(Star), R(0),
- B(LdaConstant), U8(52),
- B(Star), R(0),
- B(LdaConstant), U8(53),
- B(Star), R(0),
- B(LdaConstant), U8(54),
- B(Star), R(0),
- B(LdaConstant), U8(55),
- B(Star), R(0),
- B(LdaConstant), U8(56),
- B(Star), R(0),
- B(LdaConstant), U8(57),
- B(Star), R(0),
- B(LdaConstant), U8(58),
- B(Star), R(0),
- B(LdaConstant), U8(59),
- B(Star), R(0),
- B(LdaConstant), U8(60),
- B(Star), R(0),
- B(LdaConstant), U8(61),
- B(Star), R(0),
- B(LdaConstant), U8(62),
- B(Star), R(0),
- B(LdaConstant), U8(63),
- B(Star), R(0),
- B(LdaConstant), U8(64),
- B(Star), R(0),
- B(LdaConstant), U8(65),
- B(Star), R(0),
- B(LdaConstant), U8(66),
- B(Star), R(0),
- B(LdaConstant), U8(67),
- B(Star), R(0),
- B(LdaConstant), U8(68),
- B(Star), R(0),
- B(LdaConstant), U8(69),
- B(Star), R(0),
- B(LdaConstant), U8(70),
- B(Star), R(0),
- B(LdaConstant), U8(71),
- B(Star), R(0),
- B(LdaConstant), U8(72),
- B(Star), R(0),
- B(LdaConstant), U8(73),
- B(Star), R(0),
- B(LdaConstant), U8(74),
- B(Star), R(0),
- B(LdaConstant), U8(75),
- B(Star), R(0),
- B(LdaConstant), U8(76),
- B(Star), R(0),
- B(LdaConstant), U8(77),
- B(Star), R(0),
- B(LdaConstant), U8(78),
- B(Star), R(0),
- B(LdaConstant), U8(79),
- B(Star), R(0),
- B(LdaConstant), U8(80),
- B(Star), R(0),
- B(LdaConstant), U8(81),
- B(Star), R(0),
- B(LdaConstant), U8(82),
- B(Star), R(0),
- B(LdaConstant), U8(83),
- B(Star), R(0),
- B(LdaConstant), U8(84),
- B(Star), R(0),
- B(LdaConstant), U8(85),
- B(Star), R(0),
- B(LdaConstant), U8(86),
- B(Star), R(0),
- B(LdaConstant), U8(87),
- B(Star), R(0),
- B(LdaConstant), U8(88),
- B(Star), R(0),
- B(LdaConstant), U8(89),
- B(Star), R(0),
- B(LdaConstant), U8(90),
- B(Star), R(0),
- B(LdaConstant), U8(91),
- B(Star), R(0),
- B(LdaConstant), U8(92),
- B(Star), R(0),
- B(LdaConstant), U8(93),
- B(Star), R(0),
- B(LdaConstant), U8(94),
- B(Star), R(0),
- B(LdaConstant), U8(95),
- B(Star), R(0),
- B(LdaConstant), U8(96),
- B(Star), R(0),
- B(LdaConstant), U8(97),
- B(Star), R(0),
- B(LdaConstant), U8(98),
- B(Star), R(0),
- B(LdaConstant), U8(99),
- B(Star), R(0),
- B(LdaConstant), U8(100),
- B(Star), R(0),
- B(LdaConstant), U8(101),
- B(Star), R(0),
- B(LdaConstant), U8(102),
- B(Star), R(0),
- B(LdaConstant), U8(103),
- B(Star), R(0),
- B(LdaConstant), U8(104),
- B(Star), R(0),
- B(LdaConstant), U8(105),
- B(Star), R(0),
- B(LdaConstant), U8(106),
- B(Star), R(0),
- B(LdaConstant), U8(107),
- B(Star), R(0),
- B(LdaConstant), U8(108),
- B(Star), R(0),
- B(LdaConstant), U8(109),
- B(Star), R(0),
- B(LdaConstant), U8(110),
- B(Star), R(0),
- B(LdaConstant), U8(111),
- B(Star), R(0),
- B(LdaConstant), U8(112),
- B(Star), R(0),
- B(LdaConstant), U8(113),
- B(Star), R(0),
- B(LdaConstant), U8(114),
- B(Star), R(0),
- B(LdaConstant), U8(115),
- B(Star), R(0),
- B(LdaConstant), U8(116),
- B(Star), R(0),
- B(LdaConstant), U8(117),
- B(Star), R(0),
- B(LdaConstant), U8(118),
- B(Star), R(0),
- B(LdaConstant), U8(119),
- B(Star), R(0),
- B(LdaConstant), U8(120),
- B(Star), R(0),
- B(LdaConstant), U8(121),
- B(Star), R(0),
- B(LdaConstant), U8(122),
- B(Star), R(0),
- B(LdaConstant), U8(123),
- B(Star), R(0),
- B(LdaConstant), U8(124),
- B(Star), R(0),
- B(LdaConstant), U8(125),
- B(Star), R(0),
- B(LdaConstant), U8(126),
- B(Star), R(0),
- B(LdaConstant), U8(127),
- B(Star), R(0),
- B(LdaConstant), U8(128),
- B(Star), R(0),
- B(LdaConstant), U8(129),
- B(Star), R(0),
- B(LdaConstant), U8(130),
- B(Star), R(0),
- B(LdaConstant), U8(131),
- B(Star), R(0),
- B(LdaConstant), U8(132),
- B(Star), R(0),
- B(LdaConstant), U8(133),
- B(Star), R(0),
- B(LdaConstant), U8(134),
- B(Star), R(0),
- B(LdaConstant), U8(135),
- B(Star), R(0),
- B(LdaConstant), U8(136),
- B(Star), R(0),
- B(LdaConstant), U8(137),
- B(Star), R(0),
- B(LdaConstant), U8(138),
- B(Star), R(0),
- B(LdaConstant), U8(139),
- B(Star), R(0),
- B(LdaConstant), U8(140),
- B(Star), R(0),
- B(LdaConstant), U8(141),
- B(Star), R(0),
- B(LdaConstant), U8(142),
- B(Star), R(0),
- B(LdaConstant), U8(143),
- B(Star), R(0),
- B(LdaConstant), U8(144),
- B(Star), R(0),
- B(LdaConstant), U8(145),
- B(Star), R(0),
- B(LdaConstant), U8(146),
- B(Star), R(0),
- B(LdaConstant), U8(147),
- B(Star), R(0),
- B(LdaConstant), U8(148),
- B(Star), R(0),
- B(LdaConstant), U8(149),
- B(Star), R(0),
- B(LdaConstant), U8(150),
- B(Star), R(0),
- B(LdaConstant), U8(151),
- B(Star), R(0),
- B(LdaConstant), U8(152),
- B(Star), R(0),
- B(LdaConstant), U8(153),
- B(Star), R(0),
- B(LdaConstant), U8(154),
- B(Star), R(0),
- B(LdaConstant), U8(155),
- B(Star), R(0),
- B(LdaConstant), U8(156),
- B(Star), R(0),
- B(LdaConstant), U8(157),
- B(Star), R(0),
- B(LdaConstant), U8(158),
- B(Star), R(0),
- B(LdaConstant), U8(159),
- B(Star), R(0),
- B(LdaConstant), U8(160),
- B(Star), R(0),
- B(LdaConstant), U8(161),
- B(Star), R(0),
- B(LdaConstant), U8(162),
- B(Star), R(0),
- B(LdaConstant), U8(163),
- B(Star), R(0),
- B(LdaConstant), U8(164),
- B(Star), R(0),
- B(LdaConstant), U8(165),
- B(Star), R(0),
- B(LdaConstant), U8(166),
- B(Star), R(0),
- B(LdaConstant), U8(167),
- B(Star), R(0),
- B(LdaConstant), U8(168),
- B(Star), R(0),
- B(LdaConstant), U8(169),
- B(Star), R(0),
- B(LdaConstant), U8(170),
- B(Star), R(0),
- B(LdaConstant), U8(171),
- B(Star), R(0),
- B(LdaConstant), U8(172),
- B(Star), R(0),
- B(LdaConstant), U8(173),
- B(Star), R(0),
- B(LdaConstant), U8(174),
- B(Star), R(0),
- B(LdaConstant), U8(175),
- B(Star), R(0),
- B(LdaConstant), U8(176),
- B(Star), R(0),
- B(LdaConstant), U8(177),
- B(Star), R(0),
- B(LdaConstant), U8(178),
- B(Star), R(0),
- B(LdaConstant), U8(179),
- B(Star), R(0),
- B(LdaConstant), U8(180),
- B(Star), R(0),
- B(LdaConstant), U8(181),
- B(Star), R(0),
- B(LdaConstant), U8(182),
- B(Star), R(0),
- B(LdaConstant), U8(183),
- B(Star), R(0),
- B(LdaConstant), U8(184),
- B(Star), R(0),
- B(LdaConstant), U8(185),
- B(Star), R(0),
- B(LdaConstant), U8(186),
- B(Star), R(0),
- B(LdaConstant), U8(187),
- B(Star), R(0),
- B(LdaConstant), U8(188),
- B(Star), R(0),
- B(LdaConstant), U8(189),
- B(Star), R(0),
- B(LdaConstant), U8(190),
- B(Star), R(0),
- B(LdaConstant), U8(191),
- B(Star), R(0),
- B(LdaConstant), U8(192),
- B(Star), R(0),
- B(LdaConstant), U8(193),
- B(Star), R(0),
- B(LdaConstant), U8(194),
- B(Star), R(0),
- B(LdaConstant), U8(195),
- B(Star), R(0),
- B(LdaConstant), U8(196),
- B(Star), R(0),
- B(LdaConstant), U8(197),
- B(Star), R(0),
- B(LdaConstant), U8(198),
- B(Star), R(0),
- B(LdaConstant), U8(199),
- B(Star), R(0),
- B(LdaConstant), U8(200),
- B(Star), R(0),
- B(LdaConstant), U8(201),
- B(Star), R(0),
- B(LdaConstant), U8(202),
- B(Star), R(0),
- B(LdaConstant), U8(203),
- B(Star), R(0),
- B(LdaConstant), U8(204),
- B(Star), R(0),
- B(LdaConstant), U8(205),
- B(Star), R(0),
- B(LdaConstant), U8(206),
- B(Star), R(0),
- B(LdaConstant), U8(207),
- B(Star), R(0),
- B(LdaConstant), U8(208),
- B(Star), R(0),
- B(LdaConstant), U8(209),
- B(Star), R(0),
- B(LdaConstant), U8(210),
- B(Star), R(0),
- B(LdaConstant), U8(211),
- B(Star), R(0),
- B(LdaConstant), U8(212),
- B(Star), R(0),
- B(LdaConstant), U8(213),
- B(Star), R(0),
- B(LdaConstant), U8(214),
- B(Star), R(0),
- B(LdaConstant), U8(215),
- B(Star), R(0),
- B(LdaConstant), U8(216),
- B(Star), R(0),
- B(LdaConstant), U8(217),
- B(Star), R(0),
- B(LdaConstant), U8(218),
- B(Star), R(0),
- B(LdaConstant), U8(219),
- B(Star), R(0),
- B(LdaConstant), U8(220),
- B(Star), R(0),
- B(LdaConstant), U8(221),
- B(Star), R(0),
- B(LdaConstant), U8(222),
- B(Star), R(0),
- B(LdaConstant), U8(223),
- B(Star), R(0),
- B(LdaConstant), U8(224),
- B(Star), R(0),
- B(LdaConstant), U8(225),
- B(Star), R(0),
- B(LdaConstant), U8(226),
- B(Star), R(0),
- B(LdaConstant), U8(227),
- B(Star), R(0),
- B(LdaConstant), U8(228),
- B(Star), R(0),
- B(LdaConstant), U8(229),
- B(Star), R(0),
- B(LdaConstant), U8(230),
- B(Star), R(0),
- B(LdaConstant), U8(231),
- B(Star), R(0),
- B(LdaConstant), U8(232),
- B(Star), R(0),
- B(LdaConstant), U8(233),
- B(Star), R(0),
- B(LdaConstant), U8(234),
- B(Star), R(0),
- B(LdaConstant), U8(235),
- B(Star), R(0),
- B(LdaConstant), U8(236),
- B(Star), R(0),
- B(LdaConstant), U8(237),
- B(Star), R(0),
- B(LdaConstant), U8(238),
- B(Star), R(0),
- B(LdaConstant), U8(239),
- B(Star), R(0),
- B(LdaConstant), U8(240),
- B(Star), R(0),
- B(LdaConstant), U8(241),
- B(Star), R(0),
- B(LdaConstant), U8(242),
- B(Star), R(0),
- B(LdaConstant), U8(243),
- B(Star), R(0),
- B(LdaConstant), U8(244),
- B(Star), R(0),
- B(LdaConstant), U8(245),
- B(Star), R(0),
- B(LdaConstant), U8(246),
- B(Star), R(0),
- B(LdaConstant), U8(247),
- B(Star), R(0),
- B(LdaConstant), U8(248),
- B(Star), R(0),
- B(LdaConstant), U8(249),
- B(Star), R(0),
- B(LdaConstant), U8(250),
- B(Star), R(0),
- B(LdaConstant), U8(251),
- B(Star), R(0),
- B(LdaConstant), U8(252),
- B(Star), R(0),
- B(LdaConstant), U8(253),
- B(Star), R(0),
- B(LdaConstant), U8(254),
- B(Star), R(0),
- B(LdaConstant), U8(255),
- B(Star), R(0),
- B(Wide), B(LdaLookupSlot), U16(256),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 22 S> */ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ /* 34 S> */ B(LdaConstant), U8(1),
+ B(Star), R(0),
+ /* 46 S> */ B(LdaConstant), U8(2),
+ B(Star), R(0),
+ /* 58 S> */ B(LdaConstant), U8(3),
+ B(Star), R(0),
+ /* 70 S> */ B(LdaConstant), U8(4),
+ B(Star), R(0),
+ /* 82 S> */ B(LdaConstant), U8(5),
+ B(Star), R(0),
+ /* 94 S> */ B(LdaConstant), U8(6),
+ B(Star), R(0),
+ /* 106 S> */ B(LdaConstant), U8(7),
+ B(Star), R(0),
+ /* 118 S> */ B(LdaConstant), U8(8),
+ B(Star), R(0),
+ /* 130 S> */ B(LdaConstant), U8(9),
+ B(Star), R(0),
+ /* 142 S> */ B(LdaConstant), U8(10),
+ B(Star), R(0),
+ /* 154 S> */ B(LdaConstant), U8(11),
+ B(Star), R(0),
+ /* 166 S> */ B(LdaConstant), U8(12),
+ B(Star), R(0),
+ /* 178 S> */ B(LdaConstant), U8(13),
+ B(Star), R(0),
+ /* 190 S> */ B(LdaConstant), U8(14),
+ B(Star), R(0),
+ /* 202 S> */ B(LdaConstant), U8(15),
+ B(Star), R(0),
+ /* 214 S> */ B(LdaConstant), U8(16),
+ B(Star), R(0),
+ /* 226 S> */ B(LdaConstant), U8(17),
+ B(Star), R(0),
+ /* 238 S> */ B(LdaConstant), U8(18),
+ B(Star), R(0),
+ /* 250 S> */ B(LdaConstant), U8(19),
+ B(Star), R(0),
+ /* 262 S> */ B(LdaConstant), U8(20),
+ B(Star), R(0),
+ /* 274 S> */ B(LdaConstant), U8(21),
+ B(Star), R(0),
+ /* 286 S> */ B(LdaConstant), U8(22),
+ B(Star), R(0),
+ /* 298 S> */ B(LdaConstant), U8(23),
+ B(Star), R(0),
+ /* 310 S> */ B(LdaConstant), U8(24),
+ B(Star), R(0),
+ /* 322 S> */ B(LdaConstant), U8(25),
+ B(Star), R(0),
+ /* 334 S> */ B(LdaConstant), U8(26),
+ B(Star), R(0),
+ /* 346 S> */ B(LdaConstant), U8(27),
+ B(Star), R(0),
+ /* 358 S> */ B(LdaConstant), U8(28),
+ B(Star), R(0),
+ /* 370 S> */ B(LdaConstant), U8(29),
+ B(Star), R(0),
+ /* 382 S> */ B(LdaConstant), U8(30),
+ B(Star), R(0),
+ /* 394 S> */ B(LdaConstant), U8(31),
+ B(Star), R(0),
+ /* 406 S> */ B(LdaConstant), U8(32),
+ B(Star), R(0),
+ /* 418 S> */ B(LdaConstant), U8(33),
+ B(Star), R(0),
+ /* 430 S> */ B(LdaConstant), U8(34),
+ B(Star), R(0),
+ /* 442 S> */ B(LdaConstant), U8(35),
+ B(Star), R(0),
+ /* 454 S> */ B(LdaConstant), U8(36),
+ B(Star), R(0),
+ /* 466 S> */ B(LdaConstant), U8(37),
+ B(Star), R(0),
+ /* 478 S> */ B(LdaConstant), U8(38),
+ B(Star), R(0),
+ /* 490 S> */ B(LdaConstant), U8(39),
+ B(Star), R(0),
+ /* 502 S> */ B(LdaConstant), U8(40),
+ B(Star), R(0),
+ /* 514 S> */ B(LdaConstant), U8(41),
+ B(Star), R(0),
+ /* 526 S> */ B(LdaConstant), U8(42),
+ B(Star), R(0),
+ /* 538 S> */ B(LdaConstant), U8(43),
+ B(Star), R(0),
+ /* 550 S> */ B(LdaConstant), U8(44),
+ B(Star), R(0),
+ /* 562 S> */ B(LdaConstant), U8(45),
+ B(Star), R(0),
+ /* 574 S> */ B(LdaConstant), U8(46),
+ B(Star), R(0),
+ /* 586 S> */ B(LdaConstant), U8(47),
+ B(Star), R(0),
+ /* 598 S> */ B(LdaConstant), U8(48),
+ B(Star), R(0),
+ /* 610 S> */ B(LdaConstant), U8(49),
+ B(Star), R(0),
+ /* 622 S> */ B(LdaConstant), U8(50),
+ B(Star), R(0),
+ /* 634 S> */ B(LdaConstant), U8(51),
+ B(Star), R(0),
+ /* 646 S> */ B(LdaConstant), U8(52),
+ B(Star), R(0),
+ /* 658 S> */ B(LdaConstant), U8(53),
+ B(Star), R(0),
+ /* 670 S> */ B(LdaConstant), U8(54),
+ B(Star), R(0),
+ /* 682 S> */ B(LdaConstant), U8(55),
+ B(Star), R(0),
+ /* 694 S> */ B(LdaConstant), U8(56),
+ B(Star), R(0),
+ /* 706 S> */ B(LdaConstant), U8(57),
+ B(Star), R(0),
+ /* 718 S> */ B(LdaConstant), U8(58),
+ B(Star), R(0),
+ /* 730 S> */ B(LdaConstant), U8(59),
+ B(Star), R(0),
+ /* 742 S> */ B(LdaConstant), U8(60),
+ B(Star), R(0),
+ /* 754 S> */ B(LdaConstant), U8(61),
+ B(Star), R(0),
+ /* 766 S> */ B(LdaConstant), U8(62),
+ B(Star), R(0),
+ /* 778 S> */ B(LdaConstant), U8(63),
+ B(Star), R(0),
+ /* 790 S> */ B(LdaConstant), U8(64),
+ B(Star), R(0),
+ /* 802 S> */ B(LdaConstant), U8(65),
+ B(Star), R(0),
+ /* 814 S> */ B(LdaConstant), U8(66),
+ B(Star), R(0),
+ /* 826 S> */ B(LdaConstant), U8(67),
+ B(Star), R(0),
+ /* 838 S> */ B(LdaConstant), U8(68),
+ B(Star), R(0),
+ /* 850 S> */ B(LdaConstant), U8(69),
+ B(Star), R(0),
+ /* 862 S> */ B(LdaConstant), U8(70),
+ B(Star), R(0),
+ /* 874 S> */ B(LdaConstant), U8(71),
+ B(Star), R(0),
+ /* 886 S> */ B(LdaConstant), U8(72),
+ B(Star), R(0),
+ /* 898 S> */ B(LdaConstant), U8(73),
+ B(Star), R(0),
+ /* 910 S> */ B(LdaConstant), U8(74),
+ B(Star), R(0),
+ /* 922 S> */ B(LdaConstant), U8(75),
+ B(Star), R(0),
+ /* 934 S> */ B(LdaConstant), U8(76),
+ B(Star), R(0),
+ /* 946 S> */ B(LdaConstant), U8(77),
+ B(Star), R(0),
+ /* 958 S> */ B(LdaConstant), U8(78),
+ B(Star), R(0),
+ /* 970 S> */ B(LdaConstant), U8(79),
+ B(Star), R(0),
+ /* 982 S> */ B(LdaConstant), U8(80),
+ B(Star), R(0),
+ /* 994 S> */ B(LdaConstant), U8(81),
+ B(Star), R(0),
+ /* 1006 S> */ B(LdaConstant), U8(82),
+ B(Star), R(0),
+ /* 1018 S> */ B(LdaConstant), U8(83),
+ B(Star), R(0),
+ /* 1030 S> */ B(LdaConstant), U8(84),
+ B(Star), R(0),
+ /* 1042 S> */ B(LdaConstant), U8(85),
+ B(Star), R(0),
+ /* 1054 S> */ B(LdaConstant), U8(86),
+ B(Star), R(0),
+ /* 1066 S> */ B(LdaConstant), U8(87),
+ B(Star), R(0),
+ /* 1078 S> */ B(LdaConstant), U8(88),
+ B(Star), R(0),
+ /* 1090 S> */ B(LdaConstant), U8(89),
+ B(Star), R(0),
+ /* 1102 S> */ B(LdaConstant), U8(90),
+ B(Star), R(0),
+ /* 1114 S> */ B(LdaConstant), U8(91),
+ B(Star), R(0),
+ /* 1126 S> */ B(LdaConstant), U8(92),
+ B(Star), R(0),
+ /* 1138 S> */ B(LdaConstant), U8(93),
+ B(Star), R(0),
+ /* 1150 S> */ B(LdaConstant), U8(94),
+ B(Star), R(0),
+ /* 1162 S> */ B(LdaConstant), U8(95),
+ B(Star), R(0),
+ /* 1174 S> */ B(LdaConstant), U8(96),
+ B(Star), R(0),
+ /* 1186 S> */ B(LdaConstant), U8(97),
+ B(Star), R(0),
+ /* 1198 S> */ B(LdaConstant), U8(98),
+ B(Star), R(0),
+ /* 1210 S> */ B(LdaConstant), U8(99),
+ B(Star), R(0),
+ /* 1222 S> */ B(LdaConstant), U8(100),
+ B(Star), R(0),
+ /* 1234 S> */ B(LdaConstant), U8(101),
+ B(Star), R(0),
+ /* 1246 S> */ B(LdaConstant), U8(102),
+ B(Star), R(0),
+ /* 1258 S> */ B(LdaConstant), U8(103),
+ B(Star), R(0),
+ /* 1270 S> */ B(LdaConstant), U8(104),
+ B(Star), R(0),
+ /* 1282 S> */ B(LdaConstant), U8(105),
+ B(Star), R(0),
+ /* 1294 S> */ B(LdaConstant), U8(106),
+ B(Star), R(0),
+ /* 1306 S> */ B(LdaConstant), U8(107),
+ B(Star), R(0),
+ /* 1318 S> */ B(LdaConstant), U8(108),
+ B(Star), R(0),
+ /* 1330 S> */ B(LdaConstant), U8(109),
+ B(Star), R(0),
+ /* 1342 S> */ B(LdaConstant), U8(110),
+ B(Star), R(0),
+ /* 1354 S> */ B(LdaConstant), U8(111),
+ B(Star), R(0),
+ /* 1366 S> */ B(LdaConstant), U8(112),
+ B(Star), R(0),
+ /* 1378 S> */ B(LdaConstant), U8(113),
+ B(Star), R(0),
+ /* 1390 S> */ B(LdaConstant), U8(114),
+ B(Star), R(0),
+ /* 1402 S> */ B(LdaConstant), U8(115),
+ B(Star), R(0),
+ /* 1414 S> */ B(LdaConstant), U8(116),
+ B(Star), R(0),
+ /* 1426 S> */ B(LdaConstant), U8(117),
+ B(Star), R(0),
+ /* 1438 S> */ B(LdaConstant), U8(118),
+ B(Star), R(0),
+ /* 1450 S> */ B(LdaConstant), U8(119),
+ B(Star), R(0),
+ /* 1462 S> */ B(LdaConstant), U8(120),
+ B(Star), R(0),
+ /* 1474 S> */ B(LdaConstant), U8(121),
+ B(Star), R(0),
+ /* 1486 S> */ B(LdaConstant), U8(122),
+ B(Star), R(0),
+ /* 1498 S> */ B(LdaConstant), U8(123),
+ B(Star), R(0),
+ /* 1510 S> */ B(LdaConstant), U8(124),
+ B(Star), R(0),
+ /* 1522 S> */ B(LdaConstant), U8(125),
+ B(Star), R(0),
+ /* 1534 S> */ B(LdaConstant), U8(126),
+ B(Star), R(0),
+ /* 1546 S> */ B(LdaConstant), U8(127),
+ B(Star), R(0),
+ /* 1558 S> */ B(LdaConstant), U8(128),
+ B(Star), R(0),
+ /* 1570 S> */ B(LdaConstant), U8(129),
+ B(Star), R(0),
+ /* 1582 S> */ B(LdaConstant), U8(130),
+ B(Star), R(0),
+ /* 1594 S> */ B(LdaConstant), U8(131),
+ B(Star), R(0),
+ /* 1606 S> */ B(LdaConstant), U8(132),
+ B(Star), R(0),
+ /* 1618 S> */ B(LdaConstant), U8(133),
+ B(Star), R(0),
+ /* 1630 S> */ B(LdaConstant), U8(134),
+ B(Star), R(0),
+ /* 1642 S> */ B(LdaConstant), U8(135),
+ B(Star), R(0),
+ /* 1654 S> */ B(LdaConstant), U8(136),
+ B(Star), R(0),
+ /* 1666 S> */ B(LdaConstant), U8(137),
+ B(Star), R(0),
+ /* 1678 S> */ B(LdaConstant), U8(138),
+ B(Star), R(0),
+ /* 1690 S> */ B(LdaConstant), U8(139),
+ B(Star), R(0),
+ /* 1702 S> */ B(LdaConstant), U8(140),
+ B(Star), R(0),
+ /* 1714 S> */ B(LdaConstant), U8(141),
+ B(Star), R(0),
+ /* 1726 S> */ B(LdaConstant), U8(142),
+ B(Star), R(0),
+ /* 1738 S> */ B(LdaConstant), U8(143),
+ B(Star), R(0),
+ /* 1750 S> */ B(LdaConstant), U8(144),
+ B(Star), R(0),
+ /* 1762 S> */ B(LdaConstant), U8(145),
+ B(Star), R(0),
+ /* 1774 S> */ B(LdaConstant), U8(146),
+ B(Star), R(0),
+ /* 1786 S> */ B(LdaConstant), U8(147),
+ B(Star), R(0),
+ /* 1798 S> */ B(LdaConstant), U8(148),
+ B(Star), R(0),
+ /* 1810 S> */ B(LdaConstant), U8(149),
+ B(Star), R(0),
+ /* 1822 S> */ B(LdaConstant), U8(150),
+ B(Star), R(0),
+ /* 1834 S> */ B(LdaConstant), U8(151),
+ B(Star), R(0),
+ /* 1846 S> */ B(LdaConstant), U8(152),
+ B(Star), R(0),
+ /* 1858 S> */ B(LdaConstant), U8(153),
+ B(Star), R(0),
+ /* 1870 S> */ B(LdaConstant), U8(154),
+ B(Star), R(0),
+ /* 1882 S> */ B(LdaConstant), U8(155),
+ B(Star), R(0),
+ /* 1894 S> */ B(LdaConstant), U8(156),
+ B(Star), R(0),
+ /* 1906 S> */ B(LdaConstant), U8(157),
+ B(Star), R(0),
+ /* 1918 S> */ B(LdaConstant), U8(158),
+ B(Star), R(0),
+ /* 1930 S> */ B(LdaConstant), U8(159),
+ B(Star), R(0),
+ /* 1942 S> */ B(LdaConstant), U8(160),
+ B(Star), R(0),
+ /* 1954 S> */ B(LdaConstant), U8(161),
+ B(Star), R(0),
+ /* 1966 S> */ B(LdaConstant), U8(162),
+ B(Star), R(0),
+ /* 1978 S> */ B(LdaConstant), U8(163),
+ B(Star), R(0),
+ /* 1990 S> */ B(LdaConstant), U8(164),
+ B(Star), R(0),
+ /* 2002 S> */ B(LdaConstant), U8(165),
+ B(Star), R(0),
+ /* 2014 S> */ B(LdaConstant), U8(166),
+ B(Star), R(0),
+ /* 2026 S> */ B(LdaConstant), U8(167),
+ B(Star), R(0),
+ /* 2038 S> */ B(LdaConstant), U8(168),
+ B(Star), R(0),
+ /* 2050 S> */ B(LdaConstant), U8(169),
+ B(Star), R(0),
+ /* 2062 S> */ B(LdaConstant), U8(170),
+ B(Star), R(0),
+ /* 2074 S> */ B(LdaConstant), U8(171),
+ B(Star), R(0),
+ /* 2086 S> */ B(LdaConstant), U8(172),
+ B(Star), R(0),
+ /* 2098 S> */ B(LdaConstant), U8(173),
+ B(Star), R(0),
+ /* 2110 S> */ B(LdaConstant), U8(174),
+ B(Star), R(0),
+ /* 2122 S> */ B(LdaConstant), U8(175),
+ B(Star), R(0),
+ /* 2134 S> */ B(LdaConstant), U8(176),
+ B(Star), R(0),
+ /* 2146 S> */ B(LdaConstant), U8(177),
+ B(Star), R(0),
+ /* 2158 S> */ B(LdaConstant), U8(178),
+ B(Star), R(0),
+ /* 2170 S> */ B(LdaConstant), U8(179),
+ B(Star), R(0),
+ /* 2182 S> */ B(LdaConstant), U8(180),
+ B(Star), R(0),
+ /* 2194 S> */ B(LdaConstant), U8(181),
+ B(Star), R(0),
+ /* 2206 S> */ B(LdaConstant), U8(182),
+ B(Star), R(0),
+ /* 2218 S> */ B(LdaConstant), U8(183),
+ B(Star), R(0),
+ /* 2230 S> */ B(LdaConstant), U8(184),
+ B(Star), R(0),
+ /* 2242 S> */ B(LdaConstant), U8(185),
+ B(Star), R(0),
+ /* 2254 S> */ B(LdaConstant), U8(186),
+ B(Star), R(0),
+ /* 2266 S> */ B(LdaConstant), U8(187),
+ B(Star), R(0),
+ /* 2278 S> */ B(LdaConstant), U8(188),
+ B(Star), R(0),
+ /* 2290 S> */ B(LdaConstant), U8(189),
+ B(Star), R(0),
+ /* 2302 S> */ B(LdaConstant), U8(190),
+ B(Star), R(0),
+ /* 2314 S> */ B(LdaConstant), U8(191),
+ B(Star), R(0),
+ /* 2326 S> */ B(LdaConstant), U8(192),
+ B(Star), R(0),
+ /* 2338 S> */ B(LdaConstant), U8(193),
+ B(Star), R(0),
+ /* 2350 S> */ B(LdaConstant), U8(194),
+ B(Star), R(0),
+ /* 2362 S> */ B(LdaConstant), U8(195),
+ B(Star), R(0),
+ /* 2374 S> */ B(LdaConstant), U8(196),
+ B(Star), R(0),
+ /* 2386 S> */ B(LdaConstant), U8(197),
+ B(Star), R(0),
+ /* 2398 S> */ B(LdaConstant), U8(198),
+ B(Star), R(0),
+ /* 2410 S> */ B(LdaConstant), U8(199),
+ B(Star), R(0),
+ /* 2422 S> */ B(LdaConstant), U8(200),
+ B(Star), R(0),
+ /* 2434 S> */ B(LdaConstant), U8(201),
+ B(Star), R(0),
+ /* 2446 S> */ B(LdaConstant), U8(202),
+ B(Star), R(0),
+ /* 2458 S> */ B(LdaConstant), U8(203),
+ B(Star), R(0),
+ /* 2470 S> */ B(LdaConstant), U8(204),
+ B(Star), R(0),
+ /* 2482 S> */ B(LdaConstant), U8(205),
+ B(Star), R(0),
+ /* 2494 S> */ B(LdaConstant), U8(206),
+ B(Star), R(0),
+ /* 2506 S> */ B(LdaConstant), U8(207),
+ B(Star), R(0),
+ /* 2518 S> */ B(LdaConstant), U8(208),
+ B(Star), R(0),
+ /* 2530 S> */ B(LdaConstant), U8(209),
+ B(Star), R(0),
+ /* 2542 S> */ B(LdaConstant), U8(210),
+ B(Star), R(0),
+ /* 2554 S> */ B(LdaConstant), U8(211),
+ B(Star), R(0),
+ /* 2566 S> */ B(LdaConstant), U8(212),
+ B(Star), R(0),
+ /* 2578 S> */ B(LdaConstant), U8(213),
+ B(Star), R(0),
+ /* 2590 S> */ B(LdaConstant), U8(214),
+ B(Star), R(0),
+ /* 2602 S> */ B(LdaConstant), U8(215),
+ B(Star), R(0),
+ /* 2614 S> */ B(LdaConstant), U8(216),
+ B(Star), R(0),
+ /* 2626 S> */ B(LdaConstant), U8(217),
+ B(Star), R(0),
+ /* 2638 S> */ B(LdaConstant), U8(218),
+ B(Star), R(0),
+ /* 2650 S> */ B(LdaConstant), U8(219),
+ B(Star), R(0),
+ /* 2662 S> */ B(LdaConstant), U8(220),
+ B(Star), R(0),
+ /* 2674 S> */ B(LdaConstant), U8(221),
+ B(Star), R(0),
+ /* 2686 S> */ B(LdaConstant), U8(222),
+ B(Star), R(0),
+ /* 2698 S> */ B(LdaConstant), U8(223),
+ B(Star), R(0),
+ /* 2710 S> */ B(LdaConstant), U8(224),
+ B(Star), R(0),
+ /* 2722 S> */ B(LdaConstant), U8(225),
+ B(Star), R(0),
+ /* 2734 S> */ B(LdaConstant), U8(226),
+ B(Star), R(0),
+ /* 2746 S> */ B(LdaConstant), U8(227),
+ B(Star), R(0),
+ /* 2758 S> */ B(LdaConstant), U8(228),
+ B(Star), R(0),
+ /* 2770 S> */ B(LdaConstant), U8(229),
+ B(Star), R(0),
+ /* 2782 S> */ B(LdaConstant), U8(230),
+ B(Star), R(0),
+ /* 2794 S> */ B(LdaConstant), U8(231),
+ B(Star), R(0),
+ /* 2806 S> */ B(LdaConstant), U8(232),
+ B(Star), R(0),
+ /* 2818 S> */ B(LdaConstant), U8(233),
+ B(Star), R(0),
+ /* 2830 S> */ B(LdaConstant), U8(234),
+ B(Star), R(0),
+ /* 2842 S> */ B(LdaConstant), U8(235),
+ B(Star), R(0),
+ /* 2854 S> */ B(LdaConstant), U8(236),
+ B(Star), R(0),
+ /* 2866 S> */ B(LdaConstant), U8(237),
+ B(Star), R(0),
+ /* 2878 S> */ B(LdaConstant), U8(238),
+ B(Star), R(0),
+ /* 2890 S> */ B(LdaConstant), U8(239),
+ B(Star), R(0),
+ /* 2902 S> */ B(LdaConstant), U8(240),
+ B(Star), R(0),
+ /* 2914 S> */ B(LdaConstant), U8(241),
+ B(Star), R(0),
+ /* 2926 S> */ B(LdaConstant), U8(242),
+ B(Star), R(0),
+ /* 2938 S> */ B(LdaConstant), U8(243),
+ B(Star), R(0),
+ /* 2950 S> */ B(LdaConstant), U8(244),
+ B(Star), R(0),
+ /* 2962 S> */ B(LdaConstant), U8(245),
+ B(Star), R(0),
+ /* 2974 S> */ B(LdaConstant), U8(246),
+ B(Star), R(0),
+ /* 2986 S> */ B(LdaConstant), U8(247),
+ B(Star), R(0),
+ /* 2998 S> */ B(LdaConstant), U8(248),
+ B(Star), R(0),
+ /* 3010 S> */ B(LdaConstant), U8(249),
+ B(Star), R(0),
+ /* 3022 S> */ B(LdaConstant), U8(250),
+ B(Star), R(0),
+ /* 3034 S> */ B(LdaConstant), U8(251),
+ B(Star), R(0),
+ /* 3046 S> */ B(LdaConstant), U8(252),
+ B(Star), R(0),
+ /* 3058 S> */ B(LdaConstant), U8(253),
+ B(Star), R(0),
+ /* 3070 S> */ B(LdaConstant), U8(254),
+ B(Star), R(0),
+ /* 3082 S> */ B(LdaConstant), U8(255),
+ B(Star), R(0),
+ /* 3086 S> */ B(Wide), B(LdaLookupSlot), U16(256),
+ /* 3095 S> */ B(Return),
]
constant pool: [
InstanceType::HEAP_NUMBER_TYPE,
@@ -1332,522 +1332,522 @@ frame size: 1
parameter count: 1
bytecode array length: 1031
bytecodes: [
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Star), R(0),
- B(LdaConstant), U8(1),
- B(Star), R(0),
- B(LdaConstant), U8(2),
- B(Star), R(0),
- B(LdaConstant), U8(3),
- B(Star), R(0),
- B(LdaConstant), U8(4),
- B(Star), R(0),
- B(LdaConstant), U8(5),
- B(Star), R(0),
- B(LdaConstant), U8(6),
- B(Star), R(0),
- B(LdaConstant), U8(7),
- B(Star), R(0),
- B(LdaConstant), U8(8),
- B(Star), R(0),
- B(LdaConstant), U8(9),
- B(Star), R(0),
- B(LdaConstant), U8(10),
- B(Star), R(0),
- B(LdaConstant), U8(11),
- B(Star), R(0),
- B(LdaConstant), U8(12),
- B(Star), R(0),
- B(LdaConstant), U8(13),
- B(Star), R(0),
- B(LdaConstant), U8(14),
- B(Star), R(0),
- B(LdaConstant), U8(15),
- B(Star), R(0),
- B(LdaConstant), U8(16),
- B(Star), R(0),
- B(LdaConstant), U8(17),
- B(Star), R(0),
- B(LdaConstant), U8(18),
- B(Star), R(0),
- B(LdaConstant), U8(19),
- B(Star), R(0),
- B(LdaConstant), U8(20),
- B(Star), R(0),
- B(LdaConstant), U8(21),
- B(Star), R(0),
- B(LdaConstant), U8(22),
- B(Star), R(0),
- B(LdaConstant), U8(23),
- B(Star), R(0),
- B(LdaConstant), U8(24),
- B(Star), R(0),
- B(LdaConstant), U8(25),
- B(Star), R(0),
- B(LdaConstant), U8(26),
- B(Star), R(0),
- B(LdaConstant), U8(27),
- B(Star), R(0),
- B(LdaConstant), U8(28),
- B(Star), R(0),
- B(LdaConstant), U8(29),
- B(Star), R(0),
- B(LdaConstant), U8(30),
- B(Star), R(0),
- B(LdaConstant), U8(31),
- B(Star), R(0),
- B(LdaConstant), U8(32),
- B(Star), R(0),
- B(LdaConstant), U8(33),
- B(Star), R(0),
- B(LdaConstant), U8(34),
- B(Star), R(0),
- B(LdaConstant), U8(35),
- B(Star), R(0),
- B(LdaConstant), U8(36),
- B(Star), R(0),
- B(LdaConstant), U8(37),
- B(Star), R(0),
- B(LdaConstant), U8(38),
- B(Star), R(0),
- B(LdaConstant), U8(39),
- B(Star), R(0),
- B(LdaConstant), U8(40),
- B(Star), R(0),
- B(LdaConstant), U8(41),
- B(Star), R(0),
- B(LdaConstant), U8(42),
- B(Star), R(0),
- B(LdaConstant), U8(43),
- B(Star), R(0),
- B(LdaConstant), U8(44),
- B(Star), R(0),
- B(LdaConstant), U8(45),
- B(Star), R(0),
- B(LdaConstant), U8(46),
- B(Star), R(0),
- B(LdaConstant), U8(47),
- B(Star), R(0),
- B(LdaConstant), U8(48),
- B(Star), R(0),
- B(LdaConstant), U8(49),
- B(Star), R(0),
- B(LdaConstant), U8(50),
- B(Star), R(0),
- B(LdaConstant), U8(51),
- B(Star), R(0),
- B(LdaConstant), U8(52),
- B(Star), R(0),
- B(LdaConstant), U8(53),
- B(Star), R(0),
- B(LdaConstant), U8(54),
- B(Star), R(0),
- B(LdaConstant), U8(55),
- B(Star), R(0),
- B(LdaConstant), U8(56),
- B(Star), R(0),
- B(LdaConstant), U8(57),
- B(Star), R(0),
- B(LdaConstant), U8(58),
- B(Star), R(0),
- B(LdaConstant), U8(59),
- B(Star), R(0),
- B(LdaConstant), U8(60),
- B(Star), R(0),
- B(LdaConstant), U8(61),
- B(Star), R(0),
- B(LdaConstant), U8(62),
- B(Star), R(0),
- B(LdaConstant), U8(63),
- B(Star), R(0),
- B(LdaConstant), U8(64),
- B(Star), R(0),
- B(LdaConstant), U8(65),
- B(Star), R(0),
- B(LdaConstant), U8(66),
- B(Star), R(0),
- B(LdaConstant), U8(67),
- B(Star), R(0),
- B(LdaConstant), U8(68),
- B(Star), R(0),
- B(LdaConstant), U8(69),
- B(Star), R(0),
- B(LdaConstant), U8(70),
- B(Star), R(0),
- B(LdaConstant), U8(71),
- B(Star), R(0),
- B(LdaConstant), U8(72),
- B(Star), R(0),
- B(LdaConstant), U8(73),
- B(Star), R(0),
- B(LdaConstant), U8(74),
- B(Star), R(0),
- B(LdaConstant), U8(75),
- B(Star), R(0),
- B(LdaConstant), U8(76),
- B(Star), R(0),
- B(LdaConstant), U8(77),
- B(Star), R(0),
- B(LdaConstant), U8(78),
- B(Star), R(0),
- B(LdaConstant), U8(79),
- B(Star), R(0),
- B(LdaConstant), U8(80),
- B(Star), R(0),
- B(LdaConstant), U8(81),
- B(Star), R(0),
- B(LdaConstant), U8(82),
- B(Star), R(0),
- B(LdaConstant), U8(83),
- B(Star), R(0),
- B(LdaConstant), U8(84),
- B(Star), R(0),
- B(LdaConstant), U8(85),
- B(Star), R(0),
- B(LdaConstant), U8(86),
- B(Star), R(0),
- B(LdaConstant), U8(87),
- B(Star), R(0),
- B(LdaConstant), U8(88),
- B(Star), R(0),
- B(LdaConstant), U8(89),
- B(Star), R(0),
- B(LdaConstant), U8(90),
- B(Star), R(0),
- B(LdaConstant), U8(91),
- B(Star), R(0),
- B(LdaConstant), U8(92),
- B(Star), R(0),
- B(LdaConstant), U8(93),
- B(Star), R(0),
- B(LdaConstant), U8(94),
- B(Star), R(0),
- B(LdaConstant), U8(95),
- B(Star), R(0),
- B(LdaConstant), U8(96),
- B(Star), R(0),
- B(LdaConstant), U8(97),
- B(Star), R(0),
- B(LdaConstant), U8(98),
- B(Star), R(0),
- B(LdaConstant), U8(99),
- B(Star), R(0),
- B(LdaConstant), U8(100),
- B(Star), R(0),
- B(LdaConstant), U8(101),
- B(Star), R(0),
- B(LdaConstant), U8(102),
- B(Star), R(0),
- B(LdaConstant), U8(103),
- B(Star), R(0),
- B(LdaConstant), U8(104),
- B(Star), R(0),
- B(LdaConstant), U8(105),
- B(Star), R(0),
- B(LdaConstant), U8(106),
- B(Star), R(0),
- B(LdaConstant), U8(107),
- B(Star), R(0),
- B(LdaConstant), U8(108),
- B(Star), R(0),
- B(LdaConstant), U8(109),
- B(Star), R(0),
- B(LdaConstant), U8(110),
- B(Star), R(0),
- B(LdaConstant), U8(111),
- B(Star), R(0),
- B(LdaConstant), U8(112),
- B(Star), R(0),
- B(LdaConstant), U8(113),
- B(Star), R(0),
- B(LdaConstant), U8(114),
- B(Star), R(0),
- B(LdaConstant), U8(115),
- B(Star), R(0),
- B(LdaConstant), U8(116),
- B(Star), R(0),
- B(LdaConstant), U8(117),
- B(Star), R(0),
- B(LdaConstant), U8(118),
- B(Star), R(0),
- B(LdaConstant), U8(119),
- B(Star), R(0),
- B(LdaConstant), U8(120),
- B(Star), R(0),
- B(LdaConstant), U8(121),
- B(Star), R(0),
- B(LdaConstant), U8(122),
- B(Star), R(0),
- B(LdaConstant), U8(123),
- B(Star), R(0),
- B(LdaConstant), U8(124),
- B(Star), R(0),
- B(LdaConstant), U8(125),
- B(Star), R(0),
- B(LdaConstant), U8(126),
- B(Star), R(0),
- B(LdaConstant), U8(127),
- B(Star), R(0),
- B(LdaConstant), U8(128),
- B(Star), R(0),
- B(LdaConstant), U8(129),
- B(Star), R(0),
- B(LdaConstant), U8(130),
- B(Star), R(0),
- B(LdaConstant), U8(131),
- B(Star), R(0),
- B(LdaConstant), U8(132),
- B(Star), R(0),
- B(LdaConstant), U8(133),
- B(Star), R(0),
- B(LdaConstant), U8(134),
- B(Star), R(0),
- B(LdaConstant), U8(135),
- B(Star), R(0),
- B(LdaConstant), U8(136),
- B(Star), R(0),
- B(LdaConstant), U8(137),
- B(Star), R(0),
- B(LdaConstant), U8(138),
- B(Star), R(0),
- B(LdaConstant), U8(139),
- B(Star), R(0),
- B(LdaConstant), U8(140),
- B(Star), R(0),
- B(LdaConstant), U8(141),
- B(Star), R(0),
- B(LdaConstant), U8(142),
- B(Star), R(0),
- B(LdaConstant), U8(143),
- B(Star), R(0),
- B(LdaConstant), U8(144),
- B(Star), R(0),
- B(LdaConstant), U8(145),
- B(Star), R(0),
- B(LdaConstant), U8(146),
- B(Star), R(0),
- B(LdaConstant), U8(147),
- B(Star), R(0),
- B(LdaConstant), U8(148),
- B(Star), R(0),
- B(LdaConstant), U8(149),
- B(Star), R(0),
- B(LdaConstant), U8(150),
- B(Star), R(0),
- B(LdaConstant), U8(151),
- B(Star), R(0),
- B(LdaConstant), U8(152),
- B(Star), R(0),
- B(LdaConstant), U8(153),
- B(Star), R(0),
- B(LdaConstant), U8(154),
- B(Star), R(0),
- B(LdaConstant), U8(155),
- B(Star), R(0),
- B(LdaConstant), U8(156),
- B(Star), R(0),
- B(LdaConstant), U8(157),
- B(Star), R(0),
- B(LdaConstant), U8(158),
- B(Star), R(0),
- B(LdaConstant), U8(159),
- B(Star), R(0),
- B(LdaConstant), U8(160),
- B(Star), R(0),
- B(LdaConstant), U8(161),
- B(Star), R(0),
- B(LdaConstant), U8(162),
- B(Star), R(0),
- B(LdaConstant), U8(163),
- B(Star), R(0),
- B(LdaConstant), U8(164),
- B(Star), R(0),
- B(LdaConstant), U8(165),
- B(Star), R(0),
- B(LdaConstant), U8(166),
- B(Star), R(0),
- B(LdaConstant), U8(167),
- B(Star), R(0),
- B(LdaConstant), U8(168),
- B(Star), R(0),
- B(LdaConstant), U8(169),
- B(Star), R(0),
- B(LdaConstant), U8(170),
- B(Star), R(0),
- B(LdaConstant), U8(171),
- B(Star), R(0),
- B(LdaConstant), U8(172),
- B(Star), R(0),
- B(LdaConstant), U8(173),
- B(Star), R(0),
- B(LdaConstant), U8(174),
- B(Star), R(0),
- B(LdaConstant), U8(175),
- B(Star), R(0),
- B(LdaConstant), U8(176),
- B(Star), R(0),
- B(LdaConstant), U8(177),
- B(Star), R(0),
- B(LdaConstant), U8(178),
- B(Star), R(0),
- B(LdaConstant), U8(179),
- B(Star), R(0),
- B(LdaConstant), U8(180),
- B(Star), R(0),
- B(LdaConstant), U8(181),
- B(Star), R(0),
- B(LdaConstant), U8(182),
- B(Star), R(0),
- B(LdaConstant), U8(183),
- B(Star), R(0),
- B(LdaConstant), U8(184),
- B(Star), R(0),
- B(LdaConstant), U8(185),
- B(Star), R(0),
- B(LdaConstant), U8(186),
- B(Star), R(0),
- B(LdaConstant), U8(187),
- B(Star), R(0),
- B(LdaConstant), U8(188),
- B(Star), R(0),
- B(LdaConstant), U8(189),
- B(Star), R(0),
- B(LdaConstant), U8(190),
- B(Star), R(0),
- B(LdaConstant), U8(191),
- B(Star), R(0),
- B(LdaConstant), U8(192),
- B(Star), R(0),
- B(LdaConstant), U8(193),
- B(Star), R(0),
- B(LdaConstant), U8(194),
- B(Star), R(0),
- B(LdaConstant), U8(195),
- B(Star), R(0),
- B(LdaConstant), U8(196),
- B(Star), R(0),
- B(LdaConstant), U8(197),
- B(Star), R(0),
- B(LdaConstant), U8(198),
- B(Star), R(0),
- B(LdaConstant), U8(199),
- B(Star), R(0),
- B(LdaConstant), U8(200),
- B(Star), R(0),
- B(LdaConstant), U8(201),
- B(Star), R(0),
- B(LdaConstant), U8(202),
- B(Star), R(0),
- B(LdaConstant), U8(203),
- B(Star), R(0),
- B(LdaConstant), U8(204),
- B(Star), R(0),
- B(LdaConstant), U8(205),
- B(Star), R(0),
- B(LdaConstant), U8(206),
- B(Star), R(0),
- B(LdaConstant), U8(207),
- B(Star), R(0),
- B(LdaConstant), U8(208),
- B(Star), R(0),
- B(LdaConstant), U8(209),
- B(Star), R(0),
- B(LdaConstant), U8(210),
- B(Star), R(0),
- B(LdaConstant), U8(211),
- B(Star), R(0),
- B(LdaConstant), U8(212),
- B(Star), R(0),
- B(LdaConstant), U8(213),
- B(Star), R(0),
- B(LdaConstant), U8(214),
- B(Star), R(0),
- B(LdaConstant), U8(215),
- B(Star), R(0),
- B(LdaConstant), U8(216),
- B(Star), R(0),
- B(LdaConstant), U8(217),
- B(Star), R(0),
- B(LdaConstant), U8(218),
- B(Star), R(0),
- B(LdaConstant), U8(219),
- B(Star), R(0),
- B(LdaConstant), U8(220),
- B(Star), R(0),
- B(LdaConstant), U8(221),
- B(Star), R(0),
- B(LdaConstant), U8(222),
- B(Star), R(0),
- B(LdaConstant), U8(223),
- B(Star), R(0),
- B(LdaConstant), U8(224),
- B(Star), R(0),
- B(LdaConstant), U8(225),
- B(Star), R(0),
- B(LdaConstant), U8(226),
- B(Star), R(0),
- B(LdaConstant), U8(227),
- B(Star), R(0),
- B(LdaConstant), U8(228),
- B(Star), R(0),
- B(LdaConstant), U8(229),
- B(Star), R(0),
- B(LdaConstant), U8(230),
- B(Star), R(0),
- B(LdaConstant), U8(231),
- B(Star), R(0),
- B(LdaConstant), U8(232),
- B(Star), R(0),
- B(LdaConstant), U8(233),
- B(Star), R(0),
- B(LdaConstant), U8(234),
- B(Star), R(0),
- B(LdaConstant), U8(235),
- B(Star), R(0),
- B(LdaConstant), U8(236),
- B(Star), R(0),
- B(LdaConstant), U8(237),
- B(Star), R(0),
- B(LdaConstant), U8(238),
- B(Star), R(0),
- B(LdaConstant), U8(239),
- B(Star), R(0),
- B(LdaConstant), U8(240),
- B(Star), R(0),
- B(LdaConstant), U8(241),
- B(Star), R(0),
- B(LdaConstant), U8(242),
- B(Star), R(0),
- B(LdaConstant), U8(243),
- B(Star), R(0),
- B(LdaConstant), U8(244),
- B(Star), R(0),
- B(LdaConstant), U8(245),
- B(Star), R(0),
- B(LdaConstant), U8(246),
- B(Star), R(0),
- B(LdaConstant), U8(247),
- B(Star), R(0),
- B(LdaConstant), U8(248),
- B(Star), R(0),
- B(LdaConstant), U8(249),
- B(Star), R(0),
- B(LdaConstant), U8(250),
- B(Star), R(0),
- B(LdaConstant), U8(251),
- B(Star), R(0),
- B(LdaConstant), U8(252),
- B(Star), R(0),
- B(LdaConstant), U8(253),
- B(Star), R(0),
- B(LdaConstant), U8(254),
- B(Star), R(0),
- B(LdaConstant), U8(255),
- B(Star), R(0),
- B(Wide), B(LdaLookupSlotInsideTypeof), U16(256),
- B(TypeOf),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 22 S> */ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ /* 34 S> */ B(LdaConstant), U8(1),
+ B(Star), R(0),
+ /* 46 S> */ B(LdaConstant), U8(2),
+ B(Star), R(0),
+ /* 58 S> */ B(LdaConstant), U8(3),
+ B(Star), R(0),
+ /* 70 S> */ B(LdaConstant), U8(4),
+ B(Star), R(0),
+ /* 82 S> */ B(LdaConstant), U8(5),
+ B(Star), R(0),
+ /* 94 S> */ B(LdaConstant), U8(6),
+ B(Star), R(0),
+ /* 106 S> */ B(LdaConstant), U8(7),
+ B(Star), R(0),
+ /* 118 S> */ B(LdaConstant), U8(8),
+ B(Star), R(0),
+ /* 130 S> */ B(LdaConstant), U8(9),
+ B(Star), R(0),
+ /* 142 S> */ B(LdaConstant), U8(10),
+ B(Star), R(0),
+ /* 154 S> */ B(LdaConstant), U8(11),
+ B(Star), R(0),
+ /* 166 S> */ B(LdaConstant), U8(12),
+ B(Star), R(0),
+ /* 178 S> */ B(LdaConstant), U8(13),
+ B(Star), R(0),
+ /* 190 S> */ B(LdaConstant), U8(14),
+ B(Star), R(0),
+ /* 202 S> */ B(LdaConstant), U8(15),
+ B(Star), R(0),
+ /* 214 S> */ B(LdaConstant), U8(16),
+ B(Star), R(0),
+ /* 226 S> */ B(LdaConstant), U8(17),
+ B(Star), R(0),
+ /* 238 S> */ B(LdaConstant), U8(18),
+ B(Star), R(0),
+ /* 250 S> */ B(LdaConstant), U8(19),
+ B(Star), R(0),
+ /* 262 S> */ B(LdaConstant), U8(20),
+ B(Star), R(0),
+ /* 274 S> */ B(LdaConstant), U8(21),
+ B(Star), R(0),
+ /* 286 S> */ B(LdaConstant), U8(22),
+ B(Star), R(0),
+ /* 298 S> */ B(LdaConstant), U8(23),
+ B(Star), R(0),
+ /* 310 S> */ B(LdaConstant), U8(24),
+ B(Star), R(0),
+ /* 322 S> */ B(LdaConstant), U8(25),
+ B(Star), R(0),
+ /* 334 S> */ B(LdaConstant), U8(26),
+ B(Star), R(0),
+ /* 346 S> */ B(LdaConstant), U8(27),
+ B(Star), R(0),
+ /* 358 S> */ B(LdaConstant), U8(28),
+ B(Star), R(0),
+ /* 370 S> */ B(LdaConstant), U8(29),
+ B(Star), R(0),
+ /* 382 S> */ B(LdaConstant), U8(30),
+ B(Star), R(0),
+ /* 394 S> */ B(LdaConstant), U8(31),
+ B(Star), R(0),
+ /* 406 S> */ B(LdaConstant), U8(32),
+ B(Star), R(0),
+ /* 418 S> */ B(LdaConstant), U8(33),
+ B(Star), R(0),
+ /* 430 S> */ B(LdaConstant), U8(34),
+ B(Star), R(0),
+ /* 442 S> */ B(LdaConstant), U8(35),
+ B(Star), R(0),
+ /* 454 S> */ B(LdaConstant), U8(36),
+ B(Star), R(0),
+ /* 466 S> */ B(LdaConstant), U8(37),
+ B(Star), R(0),
+ /* 478 S> */ B(LdaConstant), U8(38),
+ B(Star), R(0),
+ /* 490 S> */ B(LdaConstant), U8(39),
+ B(Star), R(0),
+ /* 502 S> */ B(LdaConstant), U8(40),
+ B(Star), R(0),
+ /* 514 S> */ B(LdaConstant), U8(41),
+ B(Star), R(0),
+ /* 526 S> */ B(LdaConstant), U8(42),
+ B(Star), R(0),
+ /* 538 S> */ B(LdaConstant), U8(43),
+ B(Star), R(0),
+ /* 550 S> */ B(LdaConstant), U8(44),
+ B(Star), R(0),
+ /* 562 S> */ B(LdaConstant), U8(45),
+ B(Star), R(0),
+ /* 574 S> */ B(LdaConstant), U8(46),
+ B(Star), R(0),
+ /* 586 S> */ B(LdaConstant), U8(47),
+ B(Star), R(0),
+ /* 598 S> */ B(LdaConstant), U8(48),
+ B(Star), R(0),
+ /* 610 S> */ B(LdaConstant), U8(49),
+ B(Star), R(0),
+ /* 622 S> */ B(LdaConstant), U8(50),
+ B(Star), R(0),
+ /* 634 S> */ B(LdaConstant), U8(51),
+ B(Star), R(0),
+ /* 646 S> */ B(LdaConstant), U8(52),
+ B(Star), R(0),
+ /* 658 S> */ B(LdaConstant), U8(53),
+ B(Star), R(0),
+ /* 670 S> */ B(LdaConstant), U8(54),
+ B(Star), R(0),
+ /* 682 S> */ B(LdaConstant), U8(55),
+ B(Star), R(0),
+ /* 694 S> */ B(LdaConstant), U8(56),
+ B(Star), R(0),
+ /* 706 S> */ B(LdaConstant), U8(57),
+ B(Star), R(0),
+ /* 718 S> */ B(LdaConstant), U8(58),
+ B(Star), R(0),
+ /* 730 S> */ B(LdaConstant), U8(59),
+ B(Star), R(0),
+ /* 742 S> */ B(LdaConstant), U8(60),
+ B(Star), R(0),
+ /* 754 S> */ B(LdaConstant), U8(61),
+ B(Star), R(0),
+ /* 766 S> */ B(LdaConstant), U8(62),
+ B(Star), R(0),
+ /* 778 S> */ B(LdaConstant), U8(63),
+ B(Star), R(0),
+ /* 790 S> */ B(LdaConstant), U8(64),
+ B(Star), R(0),
+ /* 802 S> */ B(LdaConstant), U8(65),
+ B(Star), R(0),
+ /* 814 S> */ B(LdaConstant), U8(66),
+ B(Star), R(0),
+ /* 826 S> */ B(LdaConstant), U8(67),
+ B(Star), R(0),
+ /* 838 S> */ B(LdaConstant), U8(68),
+ B(Star), R(0),
+ /* 850 S> */ B(LdaConstant), U8(69),
+ B(Star), R(0),
+ /* 862 S> */ B(LdaConstant), U8(70),
+ B(Star), R(0),
+ /* 874 S> */ B(LdaConstant), U8(71),
+ B(Star), R(0),
+ /* 886 S> */ B(LdaConstant), U8(72),
+ B(Star), R(0),
+ /* 898 S> */ B(LdaConstant), U8(73),
+ B(Star), R(0),
+ /* 910 S> */ B(LdaConstant), U8(74),
+ B(Star), R(0),
+ /* 922 S> */ B(LdaConstant), U8(75),
+ B(Star), R(0),
+ /* 934 S> */ B(LdaConstant), U8(76),
+ B(Star), R(0),
+ /* 946 S> */ B(LdaConstant), U8(77),
+ B(Star), R(0),
+ /* 958 S> */ B(LdaConstant), U8(78),
+ B(Star), R(0),
+ /* 970 S> */ B(LdaConstant), U8(79),
+ B(Star), R(0),
+ /* 982 S> */ B(LdaConstant), U8(80),
+ B(Star), R(0),
+ /* 994 S> */ B(LdaConstant), U8(81),
+ B(Star), R(0),
+ /* 1006 S> */ B(LdaConstant), U8(82),
+ B(Star), R(0),
+ /* 1018 S> */ B(LdaConstant), U8(83),
+ B(Star), R(0),
+ /* 1030 S> */ B(LdaConstant), U8(84),
+ B(Star), R(0),
+ /* 1042 S> */ B(LdaConstant), U8(85),
+ B(Star), R(0),
+ /* 1054 S> */ B(LdaConstant), U8(86),
+ B(Star), R(0),
+ /* 1066 S> */ B(LdaConstant), U8(87),
+ B(Star), R(0),
+ /* 1078 S> */ B(LdaConstant), U8(88),
+ B(Star), R(0),
+ /* 1090 S> */ B(LdaConstant), U8(89),
+ B(Star), R(0),
+ /* 1102 S> */ B(LdaConstant), U8(90),
+ B(Star), R(0),
+ /* 1114 S> */ B(LdaConstant), U8(91),
+ B(Star), R(0),
+ /* 1126 S> */ B(LdaConstant), U8(92),
+ B(Star), R(0),
+ /* 1138 S> */ B(LdaConstant), U8(93),
+ B(Star), R(0),
+ /* 1150 S> */ B(LdaConstant), U8(94),
+ B(Star), R(0),
+ /* 1162 S> */ B(LdaConstant), U8(95),
+ B(Star), R(0),
+ /* 1174 S> */ B(LdaConstant), U8(96),
+ B(Star), R(0),
+ /* 1186 S> */ B(LdaConstant), U8(97),
+ B(Star), R(0),
+ /* 1198 S> */ B(LdaConstant), U8(98),
+ B(Star), R(0),
+ /* 1210 S> */ B(LdaConstant), U8(99),
+ B(Star), R(0),
+ /* 1222 S> */ B(LdaConstant), U8(100),
+ B(Star), R(0),
+ /* 1234 S> */ B(LdaConstant), U8(101),
+ B(Star), R(0),
+ /* 1246 S> */ B(LdaConstant), U8(102),
+ B(Star), R(0),
+ /* 1258 S> */ B(LdaConstant), U8(103),
+ B(Star), R(0),
+ /* 1270 S> */ B(LdaConstant), U8(104),
+ B(Star), R(0),
+ /* 1282 S> */ B(LdaConstant), U8(105),
+ B(Star), R(0),
+ /* 1294 S> */ B(LdaConstant), U8(106),
+ B(Star), R(0),
+ /* 1306 S> */ B(LdaConstant), U8(107),
+ B(Star), R(0),
+ /* 1318 S> */ B(LdaConstant), U8(108),
+ B(Star), R(0),
+ /* 1330 S> */ B(LdaConstant), U8(109),
+ B(Star), R(0),
+ /* 1342 S> */ B(LdaConstant), U8(110),
+ B(Star), R(0),
+ /* 1354 S> */ B(LdaConstant), U8(111),
+ B(Star), R(0),
+ /* 1366 S> */ B(LdaConstant), U8(112),
+ B(Star), R(0),
+ /* 1378 S> */ B(LdaConstant), U8(113),
+ B(Star), R(0),
+ /* 1390 S> */ B(LdaConstant), U8(114),
+ B(Star), R(0),
+ /* 1402 S> */ B(LdaConstant), U8(115),
+ B(Star), R(0),
+ /* 1414 S> */ B(LdaConstant), U8(116),
+ B(Star), R(0),
+ /* 1426 S> */ B(LdaConstant), U8(117),
+ B(Star), R(0),
+ /* 1438 S> */ B(LdaConstant), U8(118),
+ B(Star), R(0),
+ /* 1450 S> */ B(LdaConstant), U8(119),
+ B(Star), R(0),
+ /* 1462 S> */ B(LdaConstant), U8(120),
+ B(Star), R(0),
+ /* 1474 S> */ B(LdaConstant), U8(121),
+ B(Star), R(0),
+ /* 1486 S> */ B(LdaConstant), U8(122),
+ B(Star), R(0),
+ /* 1498 S> */ B(LdaConstant), U8(123),
+ B(Star), R(0),
+ /* 1510 S> */ B(LdaConstant), U8(124),
+ B(Star), R(0),
+ /* 1522 S> */ B(LdaConstant), U8(125),
+ B(Star), R(0),
+ /* 1534 S> */ B(LdaConstant), U8(126),
+ B(Star), R(0),
+ /* 1546 S> */ B(LdaConstant), U8(127),
+ B(Star), R(0),
+ /* 1558 S> */ B(LdaConstant), U8(128),
+ B(Star), R(0),
+ /* 1570 S> */ B(LdaConstant), U8(129),
+ B(Star), R(0),
+ /* 1582 S> */ B(LdaConstant), U8(130),
+ B(Star), R(0),
+ /* 1594 S> */ B(LdaConstant), U8(131),
+ B(Star), R(0),
+ /* 1606 S> */ B(LdaConstant), U8(132),
+ B(Star), R(0),
+ /* 1618 S> */ B(LdaConstant), U8(133),
+ B(Star), R(0),
+ /* 1630 S> */ B(LdaConstant), U8(134),
+ B(Star), R(0),
+ /* 1642 S> */ B(LdaConstant), U8(135),
+ B(Star), R(0),
+ /* 1654 S> */ B(LdaConstant), U8(136),
+ B(Star), R(0),
+ /* 1666 S> */ B(LdaConstant), U8(137),
+ B(Star), R(0),
+ /* 1678 S> */ B(LdaConstant), U8(138),
+ B(Star), R(0),
+ /* 1690 S> */ B(LdaConstant), U8(139),
+ B(Star), R(0),
+ /* 1702 S> */ B(LdaConstant), U8(140),
+ B(Star), R(0),
+ /* 1714 S> */ B(LdaConstant), U8(141),
+ B(Star), R(0),
+ /* 1726 S> */ B(LdaConstant), U8(142),
+ B(Star), R(0),
+ /* 1738 S> */ B(LdaConstant), U8(143),
+ B(Star), R(0),
+ /* 1750 S> */ B(LdaConstant), U8(144),
+ B(Star), R(0),
+ /* 1762 S> */ B(LdaConstant), U8(145),
+ B(Star), R(0),
+ /* 1774 S> */ B(LdaConstant), U8(146),
+ B(Star), R(0),
+ /* 1786 S> */ B(LdaConstant), U8(147),
+ B(Star), R(0),
+ /* 1798 S> */ B(LdaConstant), U8(148),
+ B(Star), R(0),
+ /* 1810 S> */ B(LdaConstant), U8(149),
+ B(Star), R(0),
+ /* 1822 S> */ B(LdaConstant), U8(150),
+ B(Star), R(0),
+ /* 1834 S> */ B(LdaConstant), U8(151),
+ B(Star), R(0),
+ /* 1846 S> */ B(LdaConstant), U8(152),
+ B(Star), R(0),
+ /* 1858 S> */ B(LdaConstant), U8(153),
+ B(Star), R(0),
+ /* 1870 S> */ B(LdaConstant), U8(154),
+ B(Star), R(0),
+ /* 1882 S> */ B(LdaConstant), U8(155),
+ B(Star), R(0),
+ /* 1894 S> */ B(LdaConstant), U8(156),
+ B(Star), R(0),
+ /* 1906 S> */ B(LdaConstant), U8(157),
+ B(Star), R(0),
+ /* 1918 S> */ B(LdaConstant), U8(158),
+ B(Star), R(0),
+ /* 1930 S> */ B(LdaConstant), U8(159),
+ B(Star), R(0),
+ /* 1942 S> */ B(LdaConstant), U8(160),
+ B(Star), R(0),
+ /* 1954 S> */ B(LdaConstant), U8(161),
+ B(Star), R(0),
+ /* 1966 S> */ B(LdaConstant), U8(162),
+ B(Star), R(0),
+ /* 1978 S> */ B(LdaConstant), U8(163),
+ B(Star), R(0),
+ /* 1990 S> */ B(LdaConstant), U8(164),
+ B(Star), R(0),
+ /* 2002 S> */ B(LdaConstant), U8(165),
+ B(Star), R(0),
+ /* 2014 S> */ B(LdaConstant), U8(166),
+ B(Star), R(0),
+ /* 2026 S> */ B(LdaConstant), U8(167),
+ B(Star), R(0),
+ /* 2038 S> */ B(LdaConstant), U8(168),
+ B(Star), R(0),
+ /* 2050 S> */ B(LdaConstant), U8(169),
+ B(Star), R(0),
+ /* 2062 S> */ B(LdaConstant), U8(170),
+ B(Star), R(0),
+ /* 2074 S> */ B(LdaConstant), U8(171),
+ B(Star), R(0),
+ /* 2086 S> */ B(LdaConstant), U8(172),
+ B(Star), R(0),
+ /* 2098 S> */ B(LdaConstant), U8(173),
+ B(Star), R(0),
+ /* 2110 S> */ B(LdaConstant), U8(174),
+ B(Star), R(0),
+ /* 2122 S> */ B(LdaConstant), U8(175),
+ B(Star), R(0),
+ /* 2134 S> */ B(LdaConstant), U8(176),
+ B(Star), R(0),
+ /* 2146 S> */ B(LdaConstant), U8(177),
+ B(Star), R(0),
+ /* 2158 S> */ B(LdaConstant), U8(178),
+ B(Star), R(0),
+ /* 2170 S> */ B(LdaConstant), U8(179),
+ B(Star), R(0),
+ /* 2182 S> */ B(LdaConstant), U8(180),
+ B(Star), R(0),
+ /* 2194 S> */ B(LdaConstant), U8(181),
+ B(Star), R(0),
+ /* 2206 S> */ B(LdaConstant), U8(182),
+ B(Star), R(0),
+ /* 2218 S> */ B(LdaConstant), U8(183),
+ B(Star), R(0),
+ /* 2230 S> */ B(LdaConstant), U8(184),
+ B(Star), R(0),
+ /* 2242 S> */ B(LdaConstant), U8(185),
+ B(Star), R(0),
+ /* 2254 S> */ B(LdaConstant), U8(186),
+ B(Star), R(0),
+ /* 2266 S> */ B(LdaConstant), U8(187),
+ B(Star), R(0),
+ /* 2278 S> */ B(LdaConstant), U8(188),
+ B(Star), R(0),
+ /* 2290 S> */ B(LdaConstant), U8(189),
+ B(Star), R(0),
+ /* 2302 S> */ B(LdaConstant), U8(190),
+ B(Star), R(0),
+ /* 2314 S> */ B(LdaConstant), U8(191),
+ B(Star), R(0),
+ /* 2326 S> */ B(LdaConstant), U8(192),
+ B(Star), R(0),
+ /* 2338 S> */ B(LdaConstant), U8(193),
+ B(Star), R(0),
+ /* 2350 S> */ B(LdaConstant), U8(194),
+ B(Star), R(0),
+ /* 2362 S> */ B(LdaConstant), U8(195),
+ B(Star), R(0),
+ /* 2374 S> */ B(LdaConstant), U8(196),
+ B(Star), R(0),
+ /* 2386 S> */ B(LdaConstant), U8(197),
+ B(Star), R(0),
+ /* 2398 S> */ B(LdaConstant), U8(198),
+ B(Star), R(0),
+ /* 2410 S> */ B(LdaConstant), U8(199),
+ B(Star), R(0),
+ /* 2422 S> */ B(LdaConstant), U8(200),
+ B(Star), R(0),
+ /* 2434 S> */ B(LdaConstant), U8(201),
+ B(Star), R(0),
+ /* 2446 S> */ B(LdaConstant), U8(202),
+ B(Star), R(0),
+ /* 2458 S> */ B(LdaConstant), U8(203),
+ B(Star), R(0),
+ /* 2470 S> */ B(LdaConstant), U8(204),
+ B(Star), R(0),
+ /* 2482 S> */ B(LdaConstant), U8(205),
+ B(Star), R(0),
+ /* 2494 S> */ B(LdaConstant), U8(206),
+ B(Star), R(0),
+ /* 2506 S> */ B(LdaConstant), U8(207),
+ B(Star), R(0),
+ /* 2518 S> */ B(LdaConstant), U8(208),
+ B(Star), R(0),
+ /* 2530 S> */ B(LdaConstant), U8(209),
+ B(Star), R(0),
+ /* 2542 S> */ B(LdaConstant), U8(210),
+ B(Star), R(0),
+ /* 2554 S> */ B(LdaConstant), U8(211),
+ B(Star), R(0),
+ /* 2566 S> */ B(LdaConstant), U8(212),
+ B(Star), R(0),
+ /* 2578 S> */ B(LdaConstant), U8(213),
+ B(Star), R(0),
+ /* 2590 S> */ B(LdaConstant), U8(214),
+ B(Star), R(0),
+ /* 2602 S> */ B(LdaConstant), U8(215),
+ B(Star), R(0),
+ /* 2614 S> */ B(LdaConstant), U8(216),
+ B(Star), R(0),
+ /* 2626 S> */ B(LdaConstant), U8(217),
+ B(Star), R(0),
+ /* 2638 S> */ B(LdaConstant), U8(218),
+ B(Star), R(0),
+ /* 2650 S> */ B(LdaConstant), U8(219),
+ B(Star), R(0),
+ /* 2662 S> */ B(LdaConstant), U8(220),
+ B(Star), R(0),
+ /* 2674 S> */ B(LdaConstant), U8(221),
+ B(Star), R(0),
+ /* 2686 S> */ B(LdaConstant), U8(222),
+ B(Star), R(0),
+ /* 2698 S> */ B(LdaConstant), U8(223),
+ B(Star), R(0),
+ /* 2710 S> */ B(LdaConstant), U8(224),
+ B(Star), R(0),
+ /* 2722 S> */ B(LdaConstant), U8(225),
+ B(Star), R(0),
+ /* 2734 S> */ B(LdaConstant), U8(226),
+ B(Star), R(0),
+ /* 2746 S> */ B(LdaConstant), U8(227),
+ B(Star), R(0),
+ /* 2758 S> */ B(LdaConstant), U8(228),
+ B(Star), R(0),
+ /* 2770 S> */ B(LdaConstant), U8(229),
+ B(Star), R(0),
+ /* 2782 S> */ B(LdaConstant), U8(230),
+ B(Star), R(0),
+ /* 2794 S> */ B(LdaConstant), U8(231),
+ B(Star), R(0),
+ /* 2806 S> */ B(LdaConstant), U8(232),
+ B(Star), R(0),
+ /* 2818 S> */ B(LdaConstant), U8(233),
+ B(Star), R(0),
+ /* 2830 S> */ B(LdaConstant), U8(234),
+ B(Star), R(0),
+ /* 2842 S> */ B(LdaConstant), U8(235),
+ B(Star), R(0),
+ /* 2854 S> */ B(LdaConstant), U8(236),
+ B(Star), R(0),
+ /* 2866 S> */ B(LdaConstant), U8(237),
+ B(Star), R(0),
+ /* 2878 S> */ B(LdaConstant), U8(238),
+ B(Star), R(0),
+ /* 2890 S> */ B(LdaConstant), U8(239),
+ B(Star), R(0),
+ /* 2902 S> */ B(LdaConstant), U8(240),
+ B(Star), R(0),
+ /* 2914 S> */ B(LdaConstant), U8(241),
+ B(Star), R(0),
+ /* 2926 S> */ B(LdaConstant), U8(242),
+ B(Star), R(0),
+ /* 2938 S> */ B(LdaConstant), U8(243),
+ B(Star), R(0),
+ /* 2950 S> */ B(LdaConstant), U8(244),
+ B(Star), R(0),
+ /* 2962 S> */ B(LdaConstant), U8(245),
+ B(Star), R(0),
+ /* 2974 S> */ B(LdaConstant), U8(246),
+ B(Star), R(0),
+ /* 2986 S> */ B(LdaConstant), U8(247),
+ B(Star), R(0),
+ /* 2998 S> */ B(LdaConstant), U8(248),
+ B(Star), R(0),
+ /* 3010 S> */ B(LdaConstant), U8(249),
+ B(Star), R(0),
+ /* 3022 S> */ B(LdaConstant), U8(250),
+ B(Star), R(0),
+ /* 3034 S> */ B(LdaConstant), U8(251),
+ B(Star), R(0),
+ /* 3046 S> */ B(LdaConstant), U8(252),
+ B(Star), R(0),
+ /* 3058 S> */ B(LdaConstant), U8(253),
+ B(Star), R(0),
+ /* 3070 S> */ B(LdaConstant), U8(254),
+ B(Star), R(0),
+ /* 3082 S> */ B(LdaConstant), U8(255),
+ B(Star), R(0),
+ /* 3086 S> */ B(Wide), B(LdaLookupSlotInsideTypeof), U16(256),
+ B(TypeOf),
+ /* 3102 S> */ B(Return),
]
constant pool: [
InstanceType::HEAP_NUMBER_TYPE,
@@ -2384,523 +2384,523 @@ frame size: 1
parameter count: 1
bytecode array length: 1033
bytecodes: [
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Star), R(0),
- B(LdaConstant), U8(1),
- B(Star), R(0),
- B(LdaConstant), U8(2),
- B(Star), R(0),
- B(LdaConstant), U8(3),
- B(Star), R(0),
- B(LdaConstant), U8(4),
- B(Star), R(0),
- B(LdaConstant), U8(5),
- B(Star), R(0),
- B(LdaConstant), U8(6),
- B(Star), R(0),
- B(LdaConstant), U8(7),
- B(Star), R(0),
- B(LdaConstant), U8(8),
- B(Star), R(0),
- B(LdaConstant), U8(9),
- B(Star), R(0),
- B(LdaConstant), U8(10),
- B(Star), R(0),
- B(LdaConstant), U8(11),
- B(Star), R(0),
- B(LdaConstant), U8(12),
- B(Star), R(0),
- B(LdaConstant), U8(13),
- B(Star), R(0),
- B(LdaConstant), U8(14),
- B(Star), R(0),
- B(LdaConstant), U8(15),
- B(Star), R(0),
- B(LdaConstant), U8(16),
- B(Star), R(0),
- B(LdaConstant), U8(17),
- B(Star), R(0),
- B(LdaConstant), U8(18),
- B(Star), R(0),
- B(LdaConstant), U8(19),
- B(Star), R(0),
- B(LdaConstant), U8(20),
- B(Star), R(0),
- B(LdaConstant), U8(21),
- B(Star), R(0),
- B(LdaConstant), U8(22),
- B(Star), R(0),
- B(LdaConstant), U8(23),
- B(Star), R(0),
- B(LdaConstant), U8(24),
- B(Star), R(0),
- B(LdaConstant), U8(25),
- B(Star), R(0),
- B(LdaConstant), U8(26),
- B(Star), R(0),
- B(LdaConstant), U8(27),
- B(Star), R(0),
- B(LdaConstant), U8(28),
- B(Star), R(0),
- B(LdaConstant), U8(29),
- B(Star), R(0),
- B(LdaConstant), U8(30),
- B(Star), R(0),
- B(LdaConstant), U8(31),
- B(Star), R(0),
- B(LdaConstant), U8(32),
- B(Star), R(0),
- B(LdaConstant), U8(33),
- B(Star), R(0),
- B(LdaConstant), U8(34),
- B(Star), R(0),
- B(LdaConstant), U8(35),
- B(Star), R(0),
- B(LdaConstant), U8(36),
- B(Star), R(0),
- B(LdaConstant), U8(37),
- B(Star), R(0),
- B(LdaConstant), U8(38),
- B(Star), R(0),
- B(LdaConstant), U8(39),
- B(Star), R(0),
- B(LdaConstant), U8(40),
- B(Star), R(0),
- B(LdaConstant), U8(41),
- B(Star), R(0),
- B(LdaConstant), U8(42),
- B(Star), R(0),
- B(LdaConstant), U8(43),
- B(Star), R(0),
- B(LdaConstant), U8(44),
- B(Star), R(0),
- B(LdaConstant), U8(45),
- B(Star), R(0),
- B(LdaConstant), U8(46),
- B(Star), R(0),
- B(LdaConstant), U8(47),
- B(Star), R(0),
- B(LdaConstant), U8(48),
- B(Star), R(0),
- B(LdaConstant), U8(49),
- B(Star), R(0),
- B(LdaConstant), U8(50),
- B(Star), R(0),
- B(LdaConstant), U8(51),
- B(Star), R(0),
- B(LdaConstant), U8(52),
- B(Star), R(0),
- B(LdaConstant), U8(53),
- B(Star), R(0),
- B(LdaConstant), U8(54),
- B(Star), R(0),
- B(LdaConstant), U8(55),
- B(Star), R(0),
- B(LdaConstant), U8(56),
- B(Star), R(0),
- B(LdaConstant), U8(57),
- B(Star), R(0),
- B(LdaConstant), U8(58),
- B(Star), R(0),
- B(LdaConstant), U8(59),
- B(Star), R(0),
- B(LdaConstant), U8(60),
- B(Star), R(0),
- B(LdaConstant), U8(61),
- B(Star), R(0),
- B(LdaConstant), U8(62),
- B(Star), R(0),
- B(LdaConstant), U8(63),
- B(Star), R(0),
- B(LdaConstant), U8(64),
- B(Star), R(0),
- B(LdaConstant), U8(65),
- B(Star), R(0),
- B(LdaConstant), U8(66),
- B(Star), R(0),
- B(LdaConstant), U8(67),
- B(Star), R(0),
- B(LdaConstant), U8(68),
- B(Star), R(0),
- B(LdaConstant), U8(69),
- B(Star), R(0),
- B(LdaConstant), U8(70),
- B(Star), R(0),
- B(LdaConstant), U8(71),
- B(Star), R(0),
- B(LdaConstant), U8(72),
- B(Star), R(0),
- B(LdaConstant), U8(73),
- B(Star), R(0),
- B(LdaConstant), U8(74),
- B(Star), R(0),
- B(LdaConstant), U8(75),
- B(Star), R(0),
- B(LdaConstant), U8(76),
- B(Star), R(0),
- B(LdaConstant), U8(77),
- B(Star), R(0),
- B(LdaConstant), U8(78),
- B(Star), R(0),
- B(LdaConstant), U8(79),
- B(Star), R(0),
- B(LdaConstant), U8(80),
- B(Star), R(0),
- B(LdaConstant), U8(81),
- B(Star), R(0),
- B(LdaConstant), U8(82),
- B(Star), R(0),
- B(LdaConstant), U8(83),
- B(Star), R(0),
- B(LdaConstant), U8(84),
- B(Star), R(0),
- B(LdaConstant), U8(85),
- B(Star), R(0),
- B(LdaConstant), U8(86),
- B(Star), R(0),
- B(LdaConstant), U8(87),
- B(Star), R(0),
- B(LdaConstant), U8(88),
- B(Star), R(0),
- B(LdaConstant), U8(89),
- B(Star), R(0),
- B(LdaConstant), U8(90),
- B(Star), R(0),
- B(LdaConstant), U8(91),
- B(Star), R(0),
- B(LdaConstant), U8(92),
- B(Star), R(0),
- B(LdaConstant), U8(93),
- B(Star), R(0),
- B(LdaConstant), U8(94),
- B(Star), R(0),
- B(LdaConstant), U8(95),
- B(Star), R(0),
- B(LdaConstant), U8(96),
- B(Star), R(0),
- B(LdaConstant), U8(97),
- B(Star), R(0),
- B(LdaConstant), U8(98),
- B(Star), R(0),
- B(LdaConstant), U8(99),
- B(Star), R(0),
- B(LdaConstant), U8(100),
- B(Star), R(0),
- B(LdaConstant), U8(101),
- B(Star), R(0),
- B(LdaConstant), U8(102),
- B(Star), R(0),
- B(LdaConstant), U8(103),
- B(Star), R(0),
- B(LdaConstant), U8(104),
- B(Star), R(0),
- B(LdaConstant), U8(105),
- B(Star), R(0),
- B(LdaConstant), U8(106),
- B(Star), R(0),
- B(LdaConstant), U8(107),
- B(Star), R(0),
- B(LdaConstant), U8(108),
- B(Star), R(0),
- B(LdaConstant), U8(109),
- B(Star), R(0),
- B(LdaConstant), U8(110),
- B(Star), R(0),
- B(LdaConstant), U8(111),
- B(Star), R(0),
- B(LdaConstant), U8(112),
- B(Star), R(0),
- B(LdaConstant), U8(113),
- B(Star), R(0),
- B(LdaConstant), U8(114),
- B(Star), R(0),
- B(LdaConstant), U8(115),
- B(Star), R(0),
- B(LdaConstant), U8(116),
- B(Star), R(0),
- B(LdaConstant), U8(117),
- B(Star), R(0),
- B(LdaConstant), U8(118),
- B(Star), R(0),
- B(LdaConstant), U8(119),
- B(Star), R(0),
- B(LdaConstant), U8(120),
- B(Star), R(0),
- B(LdaConstant), U8(121),
- B(Star), R(0),
- B(LdaConstant), U8(122),
- B(Star), R(0),
- B(LdaConstant), U8(123),
- B(Star), R(0),
- B(LdaConstant), U8(124),
- B(Star), R(0),
- B(LdaConstant), U8(125),
- B(Star), R(0),
- B(LdaConstant), U8(126),
- B(Star), R(0),
- B(LdaConstant), U8(127),
- B(Star), R(0),
- B(LdaConstant), U8(128),
- B(Star), R(0),
- B(LdaConstant), U8(129),
- B(Star), R(0),
- B(LdaConstant), U8(130),
- B(Star), R(0),
- B(LdaConstant), U8(131),
- B(Star), R(0),
- B(LdaConstant), U8(132),
- B(Star), R(0),
- B(LdaConstant), U8(133),
- B(Star), R(0),
- B(LdaConstant), U8(134),
- B(Star), R(0),
- B(LdaConstant), U8(135),
- B(Star), R(0),
- B(LdaConstant), U8(136),
- B(Star), R(0),
- B(LdaConstant), U8(137),
- B(Star), R(0),
- B(LdaConstant), U8(138),
- B(Star), R(0),
- B(LdaConstant), U8(139),
- B(Star), R(0),
- B(LdaConstant), U8(140),
- B(Star), R(0),
- B(LdaConstant), U8(141),
- B(Star), R(0),
- B(LdaConstant), U8(142),
- B(Star), R(0),
- B(LdaConstant), U8(143),
- B(Star), R(0),
- B(LdaConstant), U8(144),
- B(Star), R(0),
- B(LdaConstant), U8(145),
- B(Star), R(0),
- B(LdaConstant), U8(146),
- B(Star), R(0),
- B(LdaConstant), U8(147),
- B(Star), R(0),
- B(LdaConstant), U8(148),
- B(Star), R(0),
- B(LdaConstant), U8(149),
- B(Star), R(0),
- B(LdaConstant), U8(150),
- B(Star), R(0),
- B(LdaConstant), U8(151),
- B(Star), R(0),
- B(LdaConstant), U8(152),
- B(Star), R(0),
- B(LdaConstant), U8(153),
- B(Star), R(0),
- B(LdaConstant), U8(154),
- B(Star), R(0),
- B(LdaConstant), U8(155),
- B(Star), R(0),
- B(LdaConstant), U8(156),
- B(Star), R(0),
- B(LdaConstant), U8(157),
- B(Star), R(0),
- B(LdaConstant), U8(158),
- B(Star), R(0),
- B(LdaConstant), U8(159),
- B(Star), R(0),
- B(LdaConstant), U8(160),
- B(Star), R(0),
- B(LdaConstant), U8(161),
- B(Star), R(0),
- B(LdaConstant), U8(162),
- B(Star), R(0),
- B(LdaConstant), U8(163),
- B(Star), R(0),
- B(LdaConstant), U8(164),
- B(Star), R(0),
- B(LdaConstant), U8(165),
- B(Star), R(0),
- B(LdaConstant), U8(166),
- B(Star), R(0),
- B(LdaConstant), U8(167),
- B(Star), R(0),
- B(LdaConstant), U8(168),
- B(Star), R(0),
- B(LdaConstant), U8(169),
- B(Star), R(0),
- B(LdaConstant), U8(170),
- B(Star), R(0),
- B(LdaConstant), U8(171),
- B(Star), R(0),
- B(LdaConstant), U8(172),
- B(Star), R(0),
- B(LdaConstant), U8(173),
- B(Star), R(0),
- B(LdaConstant), U8(174),
- B(Star), R(0),
- B(LdaConstant), U8(175),
- B(Star), R(0),
- B(LdaConstant), U8(176),
- B(Star), R(0),
- B(LdaConstant), U8(177),
- B(Star), R(0),
- B(LdaConstant), U8(178),
- B(Star), R(0),
- B(LdaConstant), U8(179),
- B(Star), R(0),
- B(LdaConstant), U8(180),
- B(Star), R(0),
- B(LdaConstant), U8(181),
- B(Star), R(0),
- B(LdaConstant), U8(182),
- B(Star), R(0),
- B(LdaConstant), U8(183),
- B(Star), R(0),
- B(LdaConstant), U8(184),
- B(Star), R(0),
- B(LdaConstant), U8(185),
- B(Star), R(0),
- B(LdaConstant), U8(186),
- B(Star), R(0),
- B(LdaConstant), U8(187),
- B(Star), R(0),
- B(LdaConstant), U8(188),
- B(Star), R(0),
- B(LdaConstant), U8(189),
- B(Star), R(0),
- B(LdaConstant), U8(190),
- B(Star), R(0),
- B(LdaConstant), U8(191),
- B(Star), R(0),
- B(LdaConstant), U8(192),
- B(Star), R(0),
- B(LdaConstant), U8(193),
- B(Star), R(0),
- B(LdaConstant), U8(194),
- B(Star), R(0),
- B(LdaConstant), U8(195),
- B(Star), R(0),
- B(LdaConstant), U8(196),
- B(Star), R(0),
- B(LdaConstant), U8(197),
- B(Star), R(0),
- B(LdaConstant), U8(198),
- B(Star), R(0),
- B(LdaConstant), U8(199),
- B(Star), R(0),
- B(LdaConstant), U8(200),
- B(Star), R(0),
- B(LdaConstant), U8(201),
- B(Star), R(0),
- B(LdaConstant), U8(202),
- B(Star), R(0),
- B(LdaConstant), U8(203),
- B(Star), R(0),
- B(LdaConstant), U8(204),
- B(Star), R(0),
- B(LdaConstant), U8(205),
- B(Star), R(0),
- B(LdaConstant), U8(206),
- B(Star), R(0),
- B(LdaConstant), U8(207),
- B(Star), R(0),
- B(LdaConstant), U8(208),
- B(Star), R(0),
- B(LdaConstant), U8(209),
- B(Star), R(0),
- B(LdaConstant), U8(210),
- B(Star), R(0),
- B(LdaConstant), U8(211),
- B(Star), R(0),
- B(LdaConstant), U8(212),
- B(Star), R(0),
- B(LdaConstant), U8(213),
- B(Star), R(0),
- B(LdaConstant), U8(214),
- B(Star), R(0),
- B(LdaConstant), U8(215),
- B(Star), R(0),
- B(LdaConstant), U8(216),
- B(Star), R(0),
- B(LdaConstant), U8(217),
- B(Star), R(0),
- B(LdaConstant), U8(218),
- B(Star), R(0),
- B(LdaConstant), U8(219),
- B(Star), R(0),
- B(LdaConstant), U8(220),
- B(Star), R(0),
- B(LdaConstant), U8(221),
- B(Star), R(0),
- B(LdaConstant), U8(222),
- B(Star), R(0),
- B(LdaConstant), U8(223),
- B(Star), R(0),
- B(LdaConstant), U8(224),
- B(Star), R(0),
- B(LdaConstant), U8(225),
- B(Star), R(0),
- B(LdaConstant), U8(226),
- B(Star), R(0),
- B(LdaConstant), U8(227),
- B(Star), R(0),
- B(LdaConstant), U8(228),
- B(Star), R(0),
- B(LdaConstant), U8(229),
- B(Star), R(0),
- B(LdaConstant), U8(230),
- B(Star), R(0),
- B(LdaConstant), U8(231),
- B(Star), R(0),
- B(LdaConstant), U8(232),
- B(Star), R(0),
- B(LdaConstant), U8(233),
- B(Star), R(0),
- B(LdaConstant), U8(234),
- B(Star), R(0),
- B(LdaConstant), U8(235),
- B(Star), R(0),
- B(LdaConstant), U8(236),
- B(Star), R(0),
- B(LdaConstant), U8(237),
- B(Star), R(0),
- B(LdaConstant), U8(238),
- B(Star), R(0),
- B(LdaConstant), U8(239),
- B(Star), R(0),
- B(LdaConstant), U8(240),
- B(Star), R(0),
- B(LdaConstant), U8(241),
- B(Star), R(0),
- B(LdaConstant), U8(242),
- B(Star), R(0),
- B(LdaConstant), U8(243),
- B(Star), R(0),
- B(LdaConstant), U8(244),
- B(Star), R(0),
- B(LdaConstant), U8(245),
- B(Star), R(0),
- B(LdaConstant), U8(246),
- B(Star), R(0),
- B(LdaConstant), U8(247),
- B(Star), R(0),
- B(LdaConstant), U8(248),
- B(Star), R(0),
- B(LdaConstant), U8(249),
- B(Star), R(0),
- B(LdaConstant), U8(250),
- B(Star), R(0),
- B(LdaConstant), U8(251),
- B(Star), R(0),
- B(LdaConstant), U8(252),
- B(Star), R(0),
- B(LdaConstant), U8(253),
- B(Star), R(0),
- B(LdaConstant), U8(254),
- B(Star), R(0),
- B(LdaConstant), U8(255),
- B(Star), R(0),
- B(LdaSmi), U8(10),
- B(Wide), B(StaLookupSlotSloppy), U16(256),
- B(LdaUndefined),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 22 S> */ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ /* 34 S> */ B(LdaConstant), U8(1),
+ B(Star), R(0),
+ /* 46 S> */ B(LdaConstant), U8(2),
+ B(Star), R(0),
+ /* 58 S> */ B(LdaConstant), U8(3),
+ B(Star), R(0),
+ /* 70 S> */ B(LdaConstant), U8(4),
+ B(Star), R(0),
+ /* 82 S> */ B(LdaConstant), U8(5),
+ B(Star), R(0),
+ /* 94 S> */ B(LdaConstant), U8(6),
+ B(Star), R(0),
+ /* 106 S> */ B(LdaConstant), U8(7),
+ B(Star), R(0),
+ /* 118 S> */ B(LdaConstant), U8(8),
+ B(Star), R(0),
+ /* 130 S> */ B(LdaConstant), U8(9),
+ B(Star), R(0),
+ /* 142 S> */ B(LdaConstant), U8(10),
+ B(Star), R(0),
+ /* 154 S> */ B(LdaConstant), U8(11),
+ B(Star), R(0),
+ /* 166 S> */ B(LdaConstant), U8(12),
+ B(Star), R(0),
+ /* 178 S> */ B(LdaConstant), U8(13),
+ B(Star), R(0),
+ /* 190 S> */ B(LdaConstant), U8(14),
+ B(Star), R(0),
+ /* 202 S> */ B(LdaConstant), U8(15),
+ B(Star), R(0),
+ /* 214 S> */ B(LdaConstant), U8(16),
+ B(Star), R(0),
+ /* 226 S> */ B(LdaConstant), U8(17),
+ B(Star), R(0),
+ /* 238 S> */ B(LdaConstant), U8(18),
+ B(Star), R(0),
+ /* 250 S> */ B(LdaConstant), U8(19),
+ B(Star), R(0),
+ /* 262 S> */ B(LdaConstant), U8(20),
+ B(Star), R(0),
+ /* 274 S> */ B(LdaConstant), U8(21),
+ B(Star), R(0),
+ /* 286 S> */ B(LdaConstant), U8(22),
+ B(Star), R(0),
+ /* 298 S> */ B(LdaConstant), U8(23),
+ B(Star), R(0),
+ /* 310 S> */ B(LdaConstant), U8(24),
+ B(Star), R(0),
+ /* 322 S> */ B(LdaConstant), U8(25),
+ B(Star), R(0),
+ /* 334 S> */ B(LdaConstant), U8(26),
+ B(Star), R(0),
+ /* 346 S> */ B(LdaConstant), U8(27),
+ B(Star), R(0),
+ /* 358 S> */ B(LdaConstant), U8(28),
+ B(Star), R(0),
+ /* 370 S> */ B(LdaConstant), U8(29),
+ B(Star), R(0),
+ /* 382 S> */ B(LdaConstant), U8(30),
+ B(Star), R(0),
+ /* 394 S> */ B(LdaConstant), U8(31),
+ B(Star), R(0),
+ /* 406 S> */ B(LdaConstant), U8(32),
+ B(Star), R(0),
+ /* 418 S> */ B(LdaConstant), U8(33),
+ B(Star), R(0),
+ /* 430 S> */ B(LdaConstant), U8(34),
+ B(Star), R(0),
+ /* 442 S> */ B(LdaConstant), U8(35),
+ B(Star), R(0),
+ /* 454 S> */ B(LdaConstant), U8(36),
+ B(Star), R(0),
+ /* 466 S> */ B(LdaConstant), U8(37),
+ B(Star), R(0),
+ /* 478 S> */ B(LdaConstant), U8(38),
+ B(Star), R(0),
+ /* 490 S> */ B(LdaConstant), U8(39),
+ B(Star), R(0),
+ /* 502 S> */ B(LdaConstant), U8(40),
+ B(Star), R(0),
+ /* 514 S> */ B(LdaConstant), U8(41),
+ B(Star), R(0),
+ /* 526 S> */ B(LdaConstant), U8(42),
+ B(Star), R(0),
+ /* 538 S> */ B(LdaConstant), U8(43),
+ B(Star), R(0),
+ /* 550 S> */ B(LdaConstant), U8(44),
+ B(Star), R(0),
+ /* 562 S> */ B(LdaConstant), U8(45),
+ B(Star), R(0),
+ /* 574 S> */ B(LdaConstant), U8(46),
+ B(Star), R(0),
+ /* 586 S> */ B(LdaConstant), U8(47),
+ B(Star), R(0),
+ /* 598 S> */ B(LdaConstant), U8(48),
+ B(Star), R(0),
+ /* 610 S> */ B(LdaConstant), U8(49),
+ B(Star), R(0),
+ /* 622 S> */ B(LdaConstant), U8(50),
+ B(Star), R(0),
+ /* 634 S> */ B(LdaConstant), U8(51),
+ B(Star), R(0),
+ /* 646 S> */ B(LdaConstant), U8(52),
+ B(Star), R(0),
+ /* 658 S> */ B(LdaConstant), U8(53),
+ B(Star), R(0),
+ /* 670 S> */ B(LdaConstant), U8(54),
+ B(Star), R(0),
+ /* 682 S> */ B(LdaConstant), U8(55),
+ B(Star), R(0),
+ /* 694 S> */ B(LdaConstant), U8(56),
+ B(Star), R(0),
+ /* 706 S> */ B(LdaConstant), U8(57),
+ B(Star), R(0),
+ /* 718 S> */ B(LdaConstant), U8(58),
+ B(Star), R(0),
+ /* 730 S> */ B(LdaConstant), U8(59),
+ B(Star), R(0),
+ /* 742 S> */ B(LdaConstant), U8(60),
+ B(Star), R(0),
+ /* 754 S> */ B(LdaConstant), U8(61),
+ B(Star), R(0),
+ /* 766 S> */ B(LdaConstant), U8(62),
+ B(Star), R(0),
+ /* 778 S> */ B(LdaConstant), U8(63),
+ B(Star), R(0),
+ /* 790 S> */ B(LdaConstant), U8(64),
+ B(Star), R(0),
+ /* 802 S> */ B(LdaConstant), U8(65),
+ B(Star), R(0),
+ /* 814 S> */ B(LdaConstant), U8(66),
+ B(Star), R(0),
+ /* 826 S> */ B(LdaConstant), U8(67),
+ B(Star), R(0),
+ /* 838 S> */ B(LdaConstant), U8(68),
+ B(Star), R(0),
+ /* 850 S> */ B(LdaConstant), U8(69),
+ B(Star), R(0),
+ /* 862 S> */ B(LdaConstant), U8(70),
+ B(Star), R(0),
+ /* 874 S> */ B(LdaConstant), U8(71),
+ B(Star), R(0),
+ /* 886 S> */ B(LdaConstant), U8(72),
+ B(Star), R(0),
+ /* 898 S> */ B(LdaConstant), U8(73),
+ B(Star), R(0),
+ /* 910 S> */ B(LdaConstant), U8(74),
+ B(Star), R(0),
+ /* 922 S> */ B(LdaConstant), U8(75),
+ B(Star), R(0),
+ /* 934 S> */ B(LdaConstant), U8(76),
+ B(Star), R(0),
+ /* 946 S> */ B(LdaConstant), U8(77),
+ B(Star), R(0),
+ /* 958 S> */ B(LdaConstant), U8(78),
+ B(Star), R(0),
+ /* 970 S> */ B(LdaConstant), U8(79),
+ B(Star), R(0),
+ /* 982 S> */ B(LdaConstant), U8(80),
+ B(Star), R(0),
+ /* 994 S> */ B(LdaConstant), U8(81),
+ B(Star), R(0),
+ /* 1006 S> */ B(LdaConstant), U8(82),
+ B(Star), R(0),
+ /* 1018 S> */ B(LdaConstant), U8(83),
+ B(Star), R(0),
+ /* 1030 S> */ B(LdaConstant), U8(84),
+ B(Star), R(0),
+ /* 1042 S> */ B(LdaConstant), U8(85),
+ B(Star), R(0),
+ /* 1054 S> */ B(LdaConstant), U8(86),
+ B(Star), R(0),
+ /* 1066 S> */ B(LdaConstant), U8(87),
+ B(Star), R(0),
+ /* 1078 S> */ B(LdaConstant), U8(88),
+ B(Star), R(0),
+ /* 1090 S> */ B(LdaConstant), U8(89),
+ B(Star), R(0),
+ /* 1102 S> */ B(LdaConstant), U8(90),
+ B(Star), R(0),
+ /* 1114 S> */ B(LdaConstant), U8(91),
+ B(Star), R(0),
+ /* 1126 S> */ B(LdaConstant), U8(92),
+ B(Star), R(0),
+ /* 1138 S> */ B(LdaConstant), U8(93),
+ B(Star), R(0),
+ /* 1150 S> */ B(LdaConstant), U8(94),
+ B(Star), R(0),
+ /* 1162 S> */ B(LdaConstant), U8(95),
+ B(Star), R(0),
+ /* 1174 S> */ B(LdaConstant), U8(96),
+ B(Star), R(0),
+ /* 1186 S> */ B(LdaConstant), U8(97),
+ B(Star), R(0),
+ /* 1198 S> */ B(LdaConstant), U8(98),
+ B(Star), R(0),
+ /* 1210 S> */ B(LdaConstant), U8(99),
+ B(Star), R(0),
+ /* 1222 S> */ B(LdaConstant), U8(100),
+ B(Star), R(0),
+ /* 1234 S> */ B(LdaConstant), U8(101),
+ B(Star), R(0),
+ /* 1246 S> */ B(LdaConstant), U8(102),
+ B(Star), R(0),
+ /* 1258 S> */ B(LdaConstant), U8(103),
+ B(Star), R(0),
+ /* 1270 S> */ B(LdaConstant), U8(104),
+ B(Star), R(0),
+ /* 1282 S> */ B(LdaConstant), U8(105),
+ B(Star), R(0),
+ /* 1294 S> */ B(LdaConstant), U8(106),
+ B(Star), R(0),
+ /* 1306 S> */ B(LdaConstant), U8(107),
+ B(Star), R(0),
+ /* 1318 S> */ B(LdaConstant), U8(108),
+ B(Star), R(0),
+ /* 1330 S> */ B(LdaConstant), U8(109),
+ B(Star), R(0),
+ /* 1342 S> */ B(LdaConstant), U8(110),
+ B(Star), R(0),
+ /* 1354 S> */ B(LdaConstant), U8(111),
+ B(Star), R(0),
+ /* 1366 S> */ B(LdaConstant), U8(112),
+ B(Star), R(0),
+ /* 1378 S> */ B(LdaConstant), U8(113),
+ B(Star), R(0),
+ /* 1390 S> */ B(LdaConstant), U8(114),
+ B(Star), R(0),
+ /* 1402 S> */ B(LdaConstant), U8(115),
+ B(Star), R(0),
+ /* 1414 S> */ B(LdaConstant), U8(116),
+ B(Star), R(0),
+ /* 1426 S> */ B(LdaConstant), U8(117),
+ B(Star), R(0),
+ /* 1438 S> */ B(LdaConstant), U8(118),
+ B(Star), R(0),
+ /* 1450 S> */ B(LdaConstant), U8(119),
+ B(Star), R(0),
+ /* 1462 S> */ B(LdaConstant), U8(120),
+ B(Star), R(0),
+ /* 1474 S> */ B(LdaConstant), U8(121),
+ B(Star), R(0),
+ /* 1486 S> */ B(LdaConstant), U8(122),
+ B(Star), R(0),
+ /* 1498 S> */ B(LdaConstant), U8(123),
+ B(Star), R(0),
+ /* 1510 S> */ B(LdaConstant), U8(124),
+ B(Star), R(0),
+ /* 1522 S> */ B(LdaConstant), U8(125),
+ B(Star), R(0),
+ /* 1534 S> */ B(LdaConstant), U8(126),
+ B(Star), R(0),
+ /* 1546 S> */ B(LdaConstant), U8(127),
+ B(Star), R(0),
+ /* 1558 S> */ B(LdaConstant), U8(128),
+ B(Star), R(0),
+ /* 1570 S> */ B(LdaConstant), U8(129),
+ B(Star), R(0),
+ /* 1582 S> */ B(LdaConstant), U8(130),
+ B(Star), R(0),
+ /* 1594 S> */ B(LdaConstant), U8(131),
+ B(Star), R(0),
+ /* 1606 S> */ B(LdaConstant), U8(132),
+ B(Star), R(0),
+ /* 1618 S> */ B(LdaConstant), U8(133),
+ B(Star), R(0),
+ /* 1630 S> */ B(LdaConstant), U8(134),
+ B(Star), R(0),
+ /* 1642 S> */ B(LdaConstant), U8(135),
+ B(Star), R(0),
+ /* 1654 S> */ B(LdaConstant), U8(136),
+ B(Star), R(0),
+ /* 1666 S> */ B(LdaConstant), U8(137),
+ B(Star), R(0),
+ /* 1678 S> */ B(LdaConstant), U8(138),
+ B(Star), R(0),
+ /* 1690 S> */ B(LdaConstant), U8(139),
+ B(Star), R(0),
+ /* 1702 S> */ B(LdaConstant), U8(140),
+ B(Star), R(0),
+ /* 1714 S> */ B(LdaConstant), U8(141),
+ B(Star), R(0),
+ /* 1726 S> */ B(LdaConstant), U8(142),
+ B(Star), R(0),
+ /* 1738 S> */ B(LdaConstant), U8(143),
+ B(Star), R(0),
+ /* 1750 S> */ B(LdaConstant), U8(144),
+ B(Star), R(0),
+ /* 1762 S> */ B(LdaConstant), U8(145),
+ B(Star), R(0),
+ /* 1774 S> */ B(LdaConstant), U8(146),
+ B(Star), R(0),
+ /* 1786 S> */ B(LdaConstant), U8(147),
+ B(Star), R(0),
+ /* 1798 S> */ B(LdaConstant), U8(148),
+ B(Star), R(0),
+ /* 1810 S> */ B(LdaConstant), U8(149),
+ B(Star), R(0),
+ /* 1822 S> */ B(LdaConstant), U8(150),
+ B(Star), R(0),
+ /* 1834 S> */ B(LdaConstant), U8(151),
+ B(Star), R(0),
+ /* 1846 S> */ B(LdaConstant), U8(152),
+ B(Star), R(0),
+ /* 1858 S> */ B(LdaConstant), U8(153),
+ B(Star), R(0),
+ /* 1870 S> */ B(LdaConstant), U8(154),
+ B(Star), R(0),
+ /* 1882 S> */ B(LdaConstant), U8(155),
+ B(Star), R(0),
+ /* 1894 S> */ B(LdaConstant), U8(156),
+ B(Star), R(0),
+ /* 1906 S> */ B(LdaConstant), U8(157),
+ B(Star), R(0),
+ /* 1918 S> */ B(LdaConstant), U8(158),
+ B(Star), R(0),
+ /* 1930 S> */ B(LdaConstant), U8(159),
+ B(Star), R(0),
+ /* 1942 S> */ B(LdaConstant), U8(160),
+ B(Star), R(0),
+ /* 1954 S> */ B(LdaConstant), U8(161),
+ B(Star), R(0),
+ /* 1966 S> */ B(LdaConstant), U8(162),
+ B(Star), R(0),
+ /* 1978 S> */ B(LdaConstant), U8(163),
+ B(Star), R(0),
+ /* 1990 S> */ B(LdaConstant), U8(164),
+ B(Star), R(0),
+ /* 2002 S> */ B(LdaConstant), U8(165),
+ B(Star), R(0),
+ /* 2014 S> */ B(LdaConstant), U8(166),
+ B(Star), R(0),
+ /* 2026 S> */ B(LdaConstant), U8(167),
+ B(Star), R(0),
+ /* 2038 S> */ B(LdaConstant), U8(168),
+ B(Star), R(0),
+ /* 2050 S> */ B(LdaConstant), U8(169),
+ B(Star), R(0),
+ /* 2062 S> */ B(LdaConstant), U8(170),
+ B(Star), R(0),
+ /* 2074 S> */ B(LdaConstant), U8(171),
+ B(Star), R(0),
+ /* 2086 S> */ B(LdaConstant), U8(172),
+ B(Star), R(0),
+ /* 2098 S> */ B(LdaConstant), U8(173),
+ B(Star), R(0),
+ /* 2110 S> */ B(LdaConstant), U8(174),
+ B(Star), R(0),
+ /* 2122 S> */ B(LdaConstant), U8(175),
+ B(Star), R(0),
+ /* 2134 S> */ B(LdaConstant), U8(176),
+ B(Star), R(0),
+ /* 2146 S> */ B(LdaConstant), U8(177),
+ B(Star), R(0),
+ /* 2158 S> */ B(LdaConstant), U8(178),
+ B(Star), R(0),
+ /* 2170 S> */ B(LdaConstant), U8(179),
+ B(Star), R(0),
+ /* 2182 S> */ B(LdaConstant), U8(180),
+ B(Star), R(0),
+ /* 2194 S> */ B(LdaConstant), U8(181),
+ B(Star), R(0),
+ /* 2206 S> */ B(LdaConstant), U8(182),
+ B(Star), R(0),
+ /* 2218 S> */ B(LdaConstant), U8(183),
+ B(Star), R(0),
+ /* 2230 S> */ B(LdaConstant), U8(184),
+ B(Star), R(0),
+ /* 2242 S> */ B(LdaConstant), U8(185),
+ B(Star), R(0),
+ /* 2254 S> */ B(LdaConstant), U8(186),
+ B(Star), R(0),
+ /* 2266 S> */ B(LdaConstant), U8(187),
+ B(Star), R(0),
+ /* 2278 S> */ B(LdaConstant), U8(188),
+ B(Star), R(0),
+ /* 2290 S> */ B(LdaConstant), U8(189),
+ B(Star), R(0),
+ /* 2302 S> */ B(LdaConstant), U8(190),
+ B(Star), R(0),
+ /* 2314 S> */ B(LdaConstant), U8(191),
+ B(Star), R(0),
+ /* 2326 S> */ B(LdaConstant), U8(192),
+ B(Star), R(0),
+ /* 2338 S> */ B(LdaConstant), U8(193),
+ B(Star), R(0),
+ /* 2350 S> */ B(LdaConstant), U8(194),
+ B(Star), R(0),
+ /* 2362 S> */ B(LdaConstant), U8(195),
+ B(Star), R(0),
+ /* 2374 S> */ B(LdaConstant), U8(196),
+ B(Star), R(0),
+ /* 2386 S> */ B(LdaConstant), U8(197),
+ B(Star), R(0),
+ /* 2398 S> */ B(LdaConstant), U8(198),
+ B(Star), R(0),
+ /* 2410 S> */ B(LdaConstant), U8(199),
+ B(Star), R(0),
+ /* 2422 S> */ B(LdaConstant), U8(200),
+ B(Star), R(0),
+ /* 2434 S> */ B(LdaConstant), U8(201),
+ B(Star), R(0),
+ /* 2446 S> */ B(LdaConstant), U8(202),
+ B(Star), R(0),
+ /* 2458 S> */ B(LdaConstant), U8(203),
+ B(Star), R(0),
+ /* 2470 S> */ B(LdaConstant), U8(204),
+ B(Star), R(0),
+ /* 2482 S> */ B(LdaConstant), U8(205),
+ B(Star), R(0),
+ /* 2494 S> */ B(LdaConstant), U8(206),
+ B(Star), R(0),
+ /* 2506 S> */ B(LdaConstant), U8(207),
+ B(Star), R(0),
+ /* 2518 S> */ B(LdaConstant), U8(208),
+ B(Star), R(0),
+ /* 2530 S> */ B(LdaConstant), U8(209),
+ B(Star), R(0),
+ /* 2542 S> */ B(LdaConstant), U8(210),
+ B(Star), R(0),
+ /* 2554 S> */ B(LdaConstant), U8(211),
+ B(Star), R(0),
+ /* 2566 S> */ B(LdaConstant), U8(212),
+ B(Star), R(0),
+ /* 2578 S> */ B(LdaConstant), U8(213),
+ B(Star), R(0),
+ /* 2590 S> */ B(LdaConstant), U8(214),
+ B(Star), R(0),
+ /* 2602 S> */ B(LdaConstant), U8(215),
+ B(Star), R(0),
+ /* 2614 S> */ B(LdaConstant), U8(216),
+ B(Star), R(0),
+ /* 2626 S> */ B(LdaConstant), U8(217),
+ B(Star), R(0),
+ /* 2638 S> */ B(LdaConstant), U8(218),
+ B(Star), R(0),
+ /* 2650 S> */ B(LdaConstant), U8(219),
+ B(Star), R(0),
+ /* 2662 S> */ B(LdaConstant), U8(220),
+ B(Star), R(0),
+ /* 2674 S> */ B(LdaConstant), U8(221),
+ B(Star), R(0),
+ /* 2686 S> */ B(LdaConstant), U8(222),
+ B(Star), R(0),
+ /* 2698 S> */ B(LdaConstant), U8(223),
+ B(Star), R(0),
+ /* 2710 S> */ B(LdaConstant), U8(224),
+ B(Star), R(0),
+ /* 2722 S> */ B(LdaConstant), U8(225),
+ B(Star), R(0),
+ /* 2734 S> */ B(LdaConstant), U8(226),
+ B(Star), R(0),
+ /* 2746 S> */ B(LdaConstant), U8(227),
+ B(Star), R(0),
+ /* 2758 S> */ B(LdaConstant), U8(228),
+ B(Star), R(0),
+ /* 2770 S> */ B(LdaConstant), U8(229),
+ B(Star), R(0),
+ /* 2782 S> */ B(LdaConstant), U8(230),
+ B(Star), R(0),
+ /* 2794 S> */ B(LdaConstant), U8(231),
+ B(Star), R(0),
+ /* 2806 S> */ B(LdaConstant), U8(232),
+ B(Star), R(0),
+ /* 2818 S> */ B(LdaConstant), U8(233),
+ B(Star), R(0),
+ /* 2830 S> */ B(LdaConstant), U8(234),
+ B(Star), R(0),
+ /* 2842 S> */ B(LdaConstant), U8(235),
+ B(Star), R(0),
+ /* 2854 S> */ B(LdaConstant), U8(236),
+ B(Star), R(0),
+ /* 2866 S> */ B(LdaConstant), U8(237),
+ B(Star), R(0),
+ /* 2878 S> */ B(LdaConstant), U8(238),
+ B(Star), R(0),
+ /* 2890 S> */ B(LdaConstant), U8(239),
+ B(Star), R(0),
+ /* 2902 S> */ B(LdaConstant), U8(240),
+ B(Star), R(0),
+ /* 2914 S> */ B(LdaConstant), U8(241),
+ B(Star), R(0),
+ /* 2926 S> */ B(LdaConstant), U8(242),
+ B(Star), R(0),
+ /* 2938 S> */ B(LdaConstant), U8(243),
+ B(Star), R(0),
+ /* 2950 S> */ B(LdaConstant), U8(244),
+ B(Star), R(0),
+ /* 2962 S> */ B(LdaConstant), U8(245),
+ B(Star), R(0),
+ /* 2974 S> */ B(LdaConstant), U8(246),
+ B(Star), R(0),
+ /* 2986 S> */ B(LdaConstant), U8(247),
+ B(Star), R(0),
+ /* 2998 S> */ B(LdaConstant), U8(248),
+ B(Star), R(0),
+ /* 3010 S> */ B(LdaConstant), U8(249),
+ B(Star), R(0),
+ /* 3022 S> */ B(LdaConstant), U8(250),
+ B(Star), R(0),
+ /* 3034 S> */ B(LdaConstant), U8(251),
+ B(Star), R(0),
+ /* 3046 S> */ B(LdaConstant), U8(252),
+ B(Star), R(0),
+ /* 3058 S> */ B(LdaConstant), U8(253),
+ B(Star), R(0),
+ /* 3070 S> */ B(LdaConstant), U8(254),
+ B(Star), R(0),
+ /* 3082 S> */ B(LdaConstant), U8(255),
+ B(Star), R(0),
+ /* 3086 S> */ B(LdaSmi), U8(10),
+ /* 3088 E> */ B(Wide), B(StaLookupSlotSloppy), U16(256),
+ B(LdaUndefined),
+ /* 3093 S> */ B(Return),
]
constant pool: [
InstanceType::HEAP_NUMBER_TYPE,
@@ -3438,523 +3438,523 @@ frame size: 1
parameter count: 1
bytecode array length: 1033
bytecodes: [
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Star), R(0),
- B(LdaConstant), U8(1),
- B(Star), R(0),
- B(LdaConstant), U8(2),
- B(Star), R(0),
- B(LdaConstant), U8(3),
- B(Star), R(0),
- B(LdaConstant), U8(4),
- B(Star), R(0),
- B(LdaConstant), U8(5),
- B(Star), R(0),
- B(LdaConstant), U8(6),
- B(Star), R(0),
- B(LdaConstant), U8(7),
- B(Star), R(0),
- B(LdaConstant), U8(8),
- B(Star), R(0),
- B(LdaConstant), U8(9),
- B(Star), R(0),
- B(LdaConstant), U8(10),
- B(Star), R(0),
- B(LdaConstant), U8(11),
- B(Star), R(0),
- B(LdaConstant), U8(12),
- B(Star), R(0),
- B(LdaConstant), U8(13),
- B(Star), R(0),
- B(LdaConstant), U8(14),
- B(Star), R(0),
- B(LdaConstant), U8(15),
- B(Star), R(0),
- B(LdaConstant), U8(16),
- B(Star), R(0),
- B(LdaConstant), U8(17),
- B(Star), R(0),
- B(LdaConstant), U8(18),
- B(Star), R(0),
- B(LdaConstant), U8(19),
- B(Star), R(0),
- B(LdaConstant), U8(20),
- B(Star), R(0),
- B(LdaConstant), U8(21),
- B(Star), R(0),
- B(LdaConstant), U8(22),
- B(Star), R(0),
- B(LdaConstant), U8(23),
- B(Star), R(0),
- B(LdaConstant), U8(24),
- B(Star), R(0),
- B(LdaConstant), U8(25),
- B(Star), R(0),
- B(LdaConstant), U8(26),
- B(Star), R(0),
- B(LdaConstant), U8(27),
- B(Star), R(0),
- B(LdaConstant), U8(28),
- B(Star), R(0),
- B(LdaConstant), U8(29),
- B(Star), R(0),
- B(LdaConstant), U8(30),
- B(Star), R(0),
- B(LdaConstant), U8(31),
- B(Star), R(0),
- B(LdaConstant), U8(32),
- B(Star), R(0),
- B(LdaConstant), U8(33),
- B(Star), R(0),
- B(LdaConstant), U8(34),
- B(Star), R(0),
- B(LdaConstant), U8(35),
- B(Star), R(0),
- B(LdaConstant), U8(36),
- B(Star), R(0),
- B(LdaConstant), U8(37),
- B(Star), R(0),
- B(LdaConstant), U8(38),
- B(Star), R(0),
- B(LdaConstant), U8(39),
- B(Star), R(0),
- B(LdaConstant), U8(40),
- B(Star), R(0),
- B(LdaConstant), U8(41),
- B(Star), R(0),
- B(LdaConstant), U8(42),
- B(Star), R(0),
- B(LdaConstant), U8(43),
- B(Star), R(0),
- B(LdaConstant), U8(44),
- B(Star), R(0),
- B(LdaConstant), U8(45),
- B(Star), R(0),
- B(LdaConstant), U8(46),
- B(Star), R(0),
- B(LdaConstant), U8(47),
- B(Star), R(0),
- B(LdaConstant), U8(48),
- B(Star), R(0),
- B(LdaConstant), U8(49),
- B(Star), R(0),
- B(LdaConstant), U8(50),
- B(Star), R(0),
- B(LdaConstant), U8(51),
- B(Star), R(0),
- B(LdaConstant), U8(52),
- B(Star), R(0),
- B(LdaConstant), U8(53),
- B(Star), R(0),
- B(LdaConstant), U8(54),
- B(Star), R(0),
- B(LdaConstant), U8(55),
- B(Star), R(0),
- B(LdaConstant), U8(56),
- B(Star), R(0),
- B(LdaConstant), U8(57),
- B(Star), R(0),
- B(LdaConstant), U8(58),
- B(Star), R(0),
- B(LdaConstant), U8(59),
- B(Star), R(0),
- B(LdaConstant), U8(60),
- B(Star), R(0),
- B(LdaConstant), U8(61),
- B(Star), R(0),
- B(LdaConstant), U8(62),
- B(Star), R(0),
- B(LdaConstant), U8(63),
- B(Star), R(0),
- B(LdaConstant), U8(64),
- B(Star), R(0),
- B(LdaConstant), U8(65),
- B(Star), R(0),
- B(LdaConstant), U8(66),
- B(Star), R(0),
- B(LdaConstant), U8(67),
- B(Star), R(0),
- B(LdaConstant), U8(68),
- B(Star), R(0),
- B(LdaConstant), U8(69),
- B(Star), R(0),
- B(LdaConstant), U8(70),
- B(Star), R(0),
- B(LdaConstant), U8(71),
- B(Star), R(0),
- B(LdaConstant), U8(72),
- B(Star), R(0),
- B(LdaConstant), U8(73),
- B(Star), R(0),
- B(LdaConstant), U8(74),
- B(Star), R(0),
- B(LdaConstant), U8(75),
- B(Star), R(0),
- B(LdaConstant), U8(76),
- B(Star), R(0),
- B(LdaConstant), U8(77),
- B(Star), R(0),
- B(LdaConstant), U8(78),
- B(Star), R(0),
- B(LdaConstant), U8(79),
- B(Star), R(0),
- B(LdaConstant), U8(80),
- B(Star), R(0),
- B(LdaConstant), U8(81),
- B(Star), R(0),
- B(LdaConstant), U8(82),
- B(Star), R(0),
- B(LdaConstant), U8(83),
- B(Star), R(0),
- B(LdaConstant), U8(84),
- B(Star), R(0),
- B(LdaConstant), U8(85),
- B(Star), R(0),
- B(LdaConstant), U8(86),
- B(Star), R(0),
- B(LdaConstant), U8(87),
- B(Star), R(0),
- B(LdaConstant), U8(88),
- B(Star), R(0),
- B(LdaConstant), U8(89),
- B(Star), R(0),
- B(LdaConstant), U8(90),
- B(Star), R(0),
- B(LdaConstant), U8(91),
- B(Star), R(0),
- B(LdaConstant), U8(92),
- B(Star), R(0),
- B(LdaConstant), U8(93),
- B(Star), R(0),
- B(LdaConstant), U8(94),
- B(Star), R(0),
- B(LdaConstant), U8(95),
- B(Star), R(0),
- B(LdaConstant), U8(96),
- B(Star), R(0),
- B(LdaConstant), U8(97),
- B(Star), R(0),
- B(LdaConstant), U8(98),
- B(Star), R(0),
- B(LdaConstant), U8(99),
- B(Star), R(0),
- B(LdaConstant), U8(100),
- B(Star), R(0),
- B(LdaConstant), U8(101),
- B(Star), R(0),
- B(LdaConstant), U8(102),
- B(Star), R(0),
- B(LdaConstant), U8(103),
- B(Star), R(0),
- B(LdaConstant), U8(104),
- B(Star), R(0),
- B(LdaConstant), U8(105),
- B(Star), R(0),
- B(LdaConstant), U8(106),
- B(Star), R(0),
- B(LdaConstant), U8(107),
- B(Star), R(0),
- B(LdaConstant), U8(108),
- B(Star), R(0),
- B(LdaConstant), U8(109),
- B(Star), R(0),
- B(LdaConstant), U8(110),
- B(Star), R(0),
- B(LdaConstant), U8(111),
- B(Star), R(0),
- B(LdaConstant), U8(112),
- B(Star), R(0),
- B(LdaConstant), U8(113),
- B(Star), R(0),
- B(LdaConstant), U8(114),
- B(Star), R(0),
- B(LdaConstant), U8(115),
- B(Star), R(0),
- B(LdaConstant), U8(116),
- B(Star), R(0),
- B(LdaConstant), U8(117),
- B(Star), R(0),
- B(LdaConstant), U8(118),
- B(Star), R(0),
- B(LdaConstant), U8(119),
- B(Star), R(0),
- B(LdaConstant), U8(120),
- B(Star), R(0),
- B(LdaConstant), U8(121),
- B(Star), R(0),
- B(LdaConstant), U8(122),
- B(Star), R(0),
- B(LdaConstant), U8(123),
- B(Star), R(0),
- B(LdaConstant), U8(124),
- B(Star), R(0),
- B(LdaConstant), U8(125),
- B(Star), R(0),
- B(LdaConstant), U8(126),
- B(Star), R(0),
- B(LdaConstant), U8(127),
- B(Star), R(0),
- B(LdaConstant), U8(128),
- B(Star), R(0),
- B(LdaConstant), U8(129),
- B(Star), R(0),
- B(LdaConstant), U8(130),
- B(Star), R(0),
- B(LdaConstant), U8(131),
- B(Star), R(0),
- B(LdaConstant), U8(132),
- B(Star), R(0),
- B(LdaConstant), U8(133),
- B(Star), R(0),
- B(LdaConstant), U8(134),
- B(Star), R(0),
- B(LdaConstant), U8(135),
- B(Star), R(0),
- B(LdaConstant), U8(136),
- B(Star), R(0),
- B(LdaConstant), U8(137),
- B(Star), R(0),
- B(LdaConstant), U8(138),
- B(Star), R(0),
- B(LdaConstant), U8(139),
- B(Star), R(0),
- B(LdaConstant), U8(140),
- B(Star), R(0),
- B(LdaConstant), U8(141),
- B(Star), R(0),
- B(LdaConstant), U8(142),
- B(Star), R(0),
- B(LdaConstant), U8(143),
- B(Star), R(0),
- B(LdaConstant), U8(144),
- B(Star), R(0),
- B(LdaConstant), U8(145),
- B(Star), R(0),
- B(LdaConstant), U8(146),
- B(Star), R(0),
- B(LdaConstant), U8(147),
- B(Star), R(0),
- B(LdaConstant), U8(148),
- B(Star), R(0),
- B(LdaConstant), U8(149),
- B(Star), R(0),
- B(LdaConstant), U8(150),
- B(Star), R(0),
- B(LdaConstant), U8(151),
- B(Star), R(0),
- B(LdaConstant), U8(152),
- B(Star), R(0),
- B(LdaConstant), U8(153),
- B(Star), R(0),
- B(LdaConstant), U8(154),
- B(Star), R(0),
- B(LdaConstant), U8(155),
- B(Star), R(0),
- B(LdaConstant), U8(156),
- B(Star), R(0),
- B(LdaConstant), U8(157),
- B(Star), R(0),
- B(LdaConstant), U8(158),
- B(Star), R(0),
- B(LdaConstant), U8(159),
- B(Star), R(0),
- B(LdaConstant), U8(160),
- B(Star), R(0),
- B(LdaConstant), U8(161),
- B(Star), R(0),
- B(LdaConstant), U8(162),
- B(Star), R(0),
- B(LdaConstant), U8(163),
- B(Star), R(0),
- B(LdaConstant), U8(164),
- B(Star), R(0),
- B(LdaConstant), U8(165),
- B(Star), R(0),
- B(LdaConstant), U8(166),
- B(Star), R(0),
- B(LdaConstant), U8(167),
- B(Star), R(0),
- B(LdaConstant), U8(168),
- B(Star), R(0),
- B(LdaConstant), U8(169),
- B(Star), R(0),
- B(LdaConstant), U8(170),
- B(Star), R(0),
- B(LdaConstant), U8(171),
- B(Star), R(0),
- B(LdaConstant), U8(172),
- B(Star), R(0),
- B(LdaConstant), U8(173),
- B(Star), R(0),
- B(LdaConstant), U8(174),
- B(Star), R(0),
- B(LdaConstant), U8(175),
- B(Star), R(0),
- B(LdaConstant), U8(176),
- B(Star), R(0),
- B(LdaConstant), U8(177),
- B(Star), R(0),
- B(LdaConstant), U8(178),
- B(Star), R(0),
- B(LdaConstant), U8(179),
- B(Star), R(0),
- B(LdaConstant), U8(180),
- B(Star), R(0),
- B(LdaConstant), U8(181),
- B(Star), R(0),
- B(LdaConstant), U8(182),
- B(Star), R(0),
- B(LdaConstant), U8(183),
- B(Star), R(0),
- B(LdaConstant), U8(184),
- B(Star), R(0),
- B(LdaConstant), U8(185),
- B(Star), R(0),
- B(LdaConstant), U8(186),
- B(Star), R(0),
- B(LdaConstant), U8(187),
- B(Star), R(0),
- B(LdaConstant), U8(188),
- B(Star), R(0),
- B(LdaConstant), U8(189),
- B(Star), R(0),
- B(LdaConstant), U8(190),
- B(Star), R(0),
- B(LdaConstant), U8(191),
- B(Star), R(0),
- B(LdaConstant), U8(192),
- B(Star), R(0),
- B(LdaConstant), U8(193),
- B(Star), R(0),
- B(LdaConstant), U8(194),
- B(Star), R(0),
- B(LdaConstant), U8(195),
- B(Star), R(0),
- B(LdaConstant), U8(196),
- B(Star), R(0),
- B(LdaConstant), U8(197),
- B(Star), R(0),
- B(LdaConstant), U8(198),
- B(Star), R(0),
- B(LdaConstant), U8(199),
- B(Star), R(0),
- B(LdaConstant), U8(200),
- B(Star), R(0),
- B(LdaConstant), U8(201),
- B(Star), R(0),
- B(LdaConstant), U8(202),
- B(Star), R(0),
- B(LdaConstant), U8(203),
- B(Star), R(0),
- B(LdaConstant), U8(204),
- B(Star), R(0),
- B(LdaConstant), U8(205),
- B(Star), R(0),
- B(LdaConstant), U8(206),
- B(Star), R(0),
- B(LdaConstant), U8(207),
- B(Star), R(0),
- B(LdaConstant), U8(208),
- B(Star), R(0),
- B(LdaConstant), U8(209),
- B(Star), R(0),
- B(LdaConstant), U8(210),
- B(Star), R(0),
- B(LdaConstant), U8(211),
- B(Star), R(0),
- B(LdaConstant), U8(212),
- B(Star), R(0),
- B(LdaConstant), U8(213),
- B(Star), R(0),
- B(LdaConstant), U8(214),
- B(Star), R(0),
- B(LdaConstant), U8(215),
- B(Star), R(0),
- B(LdaConstant), U8(216),
- B(Star), R(0),
- B(LdaConstant), U8(217),
- B(Star), R(0),
- B(LdaConstant), U8(218),
- B(Star), R(0),
- B(LdaConstant), U8(219),
- B(Star), R(0),
- B(LdaConstant), U8(220),
- B(Star), R(0),
- B(LdaConstant), U8(221),
- B(Star), R(0),
- B(LdaConstant), U8(222),
- B(Star), R(0),
- B(LdaConstant), U8(223),
- B(Star), R(0),
- B(LdaConstant), U8(224),
- B(Star), R(0),
- B(LdaConstant), U8(225),
- B(Star), R(0),
- B(LdaConstant), U8(226),
- B(Star), R(0),
- B(LdaConstant), U8(227),
- B(Star), R(0),
- B(LdaConstant), U8(228),
- B(Star), R(0),
- B(LdaConstant), U8(229),
- B(Star), R(0),
- B(LdaConstant), U8(230),
- B(Star), R(0),
- B(LdaConstant), U8(231),
- B(Star), R(0),
- B(LdaConstant), U8(232),
- B(Star), R(0),
- B(LdaConstant), U8(233),
- B(Star), R(0),
- B(LdaConstant), U8(234),
- B(Star), R(0),
- B(LdaConstant), U8(235),
- B(Star), R(0),
- B(LdaConstant), U8(236),
- B(Star), R(0),
- B(LdaConstant), U8(237),
- B(Star), R(0),
- B(LdaConstant), U8(238),
- B(Star), R(0),
- B(LdaConstant), U8(239),
- B(Star), R(0),
- B(LdaConstant), U8(240),
- B(Star), R(0),
- B(LdaConstant), U8(241),
- B(Star), R(0),
- B(LdaConstant), U8(242),
- B(Star), R(0),
- B(LdaConstant), U8(243),
- B(Star), R(0),
- B(LdaConstant), U8(244),
- B(Star), R(0),
- B(LdaConstant), U8(245),
- B(Star), R(0),
- B(LdaConstant), U8(246),
- B(Star), R(0),
- B(LdaConstant), U8(247),
- B(Star), R(0),
- B(LdaConstant), U8(248),
- B(Star), R(0),
- B(LdaConstant), U8(249),
- B(Star), R(0),
- B(LdaConstant), U8(250),
- B(Star), R(0),
- B(LdaConstant), U8(251),
- B(Star), R(0),
- B(LdaConstant), U8(252),
- B(Star), R(0),
- B(LdaConstant), U8(253),
- B(Star), R(0),
- B(LdaConstant), U8(254),
- B(Star), R(0),
- B(LdaConstant), U8(255),
- B(Star), R(0),
- B(LdaSmi), U8(10),
- B(Wide), B(StaLookupSlotStrict), U16(256),
- B(LdaUndefined),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 35 S> */ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ /* 47 S> */ B(LdaConstant), U8(1),
+ B(Star), R(0),
+ /* 59 S> */ B(LdaConstant), U8(2),
+ B(Star), R(0),
+ /* 71 S> */ B(LdaConstant), U8(3),
+ B(Star), R(0),
+ /* 83 S> */ B(LdaConstant), U8(4),
+ B(Star), R(0),
+ /* 95 S> */ B(LdaConstant), U8(5),
+ B(Star), R(0),
+ /* 107 S> */ B(LdaConstant), U8(6),
+ B(Star), R(0),
+ /* 119 S> */ B(LdaConstant), U8(7),
+ B(Star), R(0),
+ /* 131 S> */ B(LdaConstant), U8(8),
+ B(Star), R(0),
+ /* 143 S> */ B(LdaConstant), U8(9),
+ B(Star), R(0),
+ /* 155 S> */ B(LdaConstant), U8(10),
+ B(Star), R(0),
+ /* 167 S> */ B(LdaConstant), U8(11),
+ B(Star), R(0),
+ /* 179 S> */ B(LdaConstant), U8(12),
+ B(Star), R(0),
+ /* 191 S> */ B(LdaConstant), U8(13),
+ B(Star), R(0),
+ /* 203 S> */ B(LdaConstant), U8(14),
+ B(Star), R(0),
+ /* 215 S> */ B(LdaConstant), U8(15),
+ B(Star), R(0),
+ /* 227 S> */ B(LdaConstant), U8(16),
+ B(Star), R(0),
+ /* 239 S> */ B(LdaConstant), U8(17),
+ B(Star), R(0),
+ /* 251 S> */ B(LdaConstant), U8(18),
+ B(Star), R(0),
+ /* 263 S> */ B(LdaConstant), U8(19),
+ B(Star), R(0),
+ /* 275 S> */ B(LdaConstant), U8(20),
+ B(Star), R(0),
+ /* 287 S> */ B(LdaConstant), U8(21),
+ B(Star), R(0),
+ /* 299 S> */ B(LdaConstant), U8(22),
+ B(Star), R(0),
+ /* 311 S> */ B(LdaConstant), U8(23),
+ B(Star), R(0),
+ /* 323 S> */ B(LdaConstant), U8(24),
+ B(Star), R(0),
+ /* 335 S> */ B(LdaConstant), U8(25),
+ B(Star), R(0),
+ /* 347 S> */ B(LdaConstant), U8(26),
+ B(Star), R(0),
+ /* 359 S> */ B(LdaConstant), U8(27),
+ B(Star), R(0),
+ /* 371 S> */ B(LdaConstant), U8(28),
+ B(Star), R(0),
+ /* 383 S> */ B(LdaConstant), U8(29),
+ B(Star), R(0),
+ /* 395 S> */ B(LdaConstant), U8(30),
+ B(Star), R(0),
+ /* 407 S> */ B(LdaConstant), U8(31),
+ B(Star), R(0),
+ /* 419 S> */ B(LdaConstant), U8(32),
+ B(Star), R(0),
+ /* 431 S> */ B(LdaConstant), U8(33),
+ B(Star), R(0),
+ /* 443 S> */ B(LdaConstant), U8(34),
+ B(Star), R(0),
+ /* 455 S> */ B(LdaConstant), U8(35),
+ B(Star), R(0),
+ /* 467 S> */ B(LdaConstant), U8(36),
+ B(Star), R(0),
+ /* 479 S> */ B(LdaConstant), U8(37),
+ B(Star), R(0),
+ /* 491 S> */ B(LdaConstant), U8(38),
+ B(Star), R(0),
+ /* 503 S> */ B(LdaConstant), U8(39),
+ B(Star), R(0),
+ /* 515 S> */ B(LdaConstant), U8(40),
+ B(Star), R(0),
+ /* 527 S> */ B(LdaConstant), U8(41),
+ B(Star), R(0),
+ /* 539 S> */ B(LdaConstant), U8(42),
+ B(Star), R(0),
+ /* 551 S> */ B(LdaConstant), U8(43),
+ B(Star), R(0),
+ /* 563 S> */ B(LdaConstant), U8(44),
+ B(Star), R(0),
+ /* 575 S> */ B(LdaConstant), U8(45),
+ B(Star), R(0),
+ /* 587 S> */ B(LdaConstant), U8(46),
+ B(Star), R(0),
+ /* 599 S> */ B(LdaConstant), U8(47),
+ B(Star), R(0),
+ /* 611 S> */ B(LdaConstant), U8(48),
+ B(Star), R(0),
+ /* 623 S> */ B(LdaConstant), U8(49),
+ B(Star), R(0),
+ /* 635 S> */ B(LdaConstant), U8(50),
+ B(Star), R(0),
+ /* 647 S> */ B(LdaConstant), U8(51),
+ B(Star), R(0),
+ /* 659 S> */ B(LdaConstant), U8(52),
+ B(Star), R(0),
+ /* 671 S> */ B(LdaConstant), U8(53),
+ B(Star), R(0),
+ /* 683 S> */ B(LdaConstant), U8(54),
+ B(Star), R(0),
+ /* 695 S> */ B(LdaConstant), U8(55),
+ B(Star), R(0),
+ /* 707 S> */ B(LdaConstant), U8(56),
+ B(Star), R(0),
+ /* 719 S> */ B(LdaConstant), U8(57),
+ B(Star), R(0),
+ /* 731 S> */ B(LdaConstant), U8(58),
+ B(Star), R(0),
+ /* 743 S> */ B(LdaConstant), U8(59),
+ B(Star), R(0),
+ /* 755 S> */ B(LdaConstant), U8(60),
+ B(Star), R(0),
+ /* 767 S> */ B(LdaConstant), U8(61),
+ B(Star), R(0),
+ /* 779 S> */ B(LdaConstant), U8(62),
+ B(Star), R(0),
+ /* 791 S> */ B(LdaConstant), U8(63),
+ B(Star), R(0),
+ /* 803 S> */ B(LdaConstant), U8(64),
+ B(Star), R(0),
+ /* 815 S> */ B(LdaConstant), U8(65),
+ B(Star), R(0),
+ /* 827 S> */ B(LdaConstant), U8(66),
+ B(Star), R(0),
+ /* 839 S> */ B(LdaConstant), U8(67),
+ B(Star), R(0),
+ /* 851 S> */ B(LdaConstant), U8(68),
+ B(Star), R(0),
+ /* 863 S> */ B(LdaConstant), U8(69),
+ B(Star), R(0),
+ /* 875 S> */ B(LdaConstant), U8(70),
+ B(Star), R(0),
+ /* 887 S> */ B(LdaConstant), U8(71),
+ B(Star), R(0),
+ /* 899 S> */ B(LdaConstant), U8(72),
+ B(Star), R(0),
+ /* 911 S> */ B(LdaConstant), U8(73),
+ B(Star), R(0),
+ /* 923 S> */ B(LdaConstant), U8(74),
+ B(Star), R(0),
+ /* 935 S> */ B(LdaConstant), U8(75),
+ B(Star), R(0),
+ /* 947 S> */ B(LdaConstant), U8(76),
+ B(Star), R(0),
+ /* 959 S> */ B(LdaConstant), U8(77),
+ B(Star), R(0),
+ /* 971 S> */ B(LdaConstant), U8(78),
+ B(Star), R(0),
+ /* 983 S> */ B(LdaConstant), U8(79),
+ B(Star), R(0),
+ /* 995 S> */ B(LdaConstant), U8(80),
+ B(Star), R(0),
+ /* 1007 S> */ B(LdaConstant), U8(81),
+ B(Star), R(0),
+ /* 1019 S> */ B(LdaConstant), U8(82),
+ B(Star), R(0),
+ /* 1031 S> */ B(LdaConstant), U8(83),
+ B(Star), R(0),
+ /* 1043 S> */ B(LdaConstant), U8(84),
+ B(Star), R(0),
+ /* 1055 S> */ B(LdaConstant), U8(85),
+ B(Star), R(0),
+ /* 1067 S> */ B(LdaConstant), U8(86),
+ B(Star), R(0),
+ /* 1079 S> */ B(LdaConstant), U8(87),
+ B(Star), R(0),
+ /* 1091 S> */ B(LdaConstant), U8(88),
+ B(Star), R(0),
+ /* 1103 S> */ B(LdaConstant), U8(89),
+ B(Star), R(0),
+ /* 1115 S> */ B(LdaConstant), U8(90),
+ B(Star), R(0),
+ /* 1127 S> */ B(LdaConstant), U8(91),
+ B(Star), R(0),
+ /* 1139 S> */ B(LdaConstant), U8(92),
+ B(Star), R(0),
+ /* 1151 S> */ B(LdaConstant), U8(93),
+ B(Star), R(0),
+ /* 1163 S> */ B(LdaConstant), U8(94),
+ B(Star), R(0),
+ /* 1175 S> */ B(LdaConstant), U8(95),
+ B(Star), R(0),
+ /* 1187 S> */ B(LdaConstant), U8(96),
+ B(Star), R(0),
+ /* 1199 S> */ B(LdaConstant), U8(97),
+ B(Star), R(0),
+ /* 1211 S> */ B(LdaConstant), U8(98),
+ B(Star), R(0),
+ /* 1223 S> */ B(LdaConstant), U8(99),
+ B(Star), R(0),
+ /* 1235 S> */ B(LdaConstant), U8(100),
+ B(Star), R(0),
+ /* 1247 S> */ B(LdaConstant), U8(101),
+ B(Star), R(0),
+ /* 1259 S> */ B(LdaConstant), U8(102),
+ B(Star), R(0),
+ /* 1271 S> */ B(LdaConstant), U8(103),
+ B(Star), R(0),
+ /* 1283 S> */ B(LdaConstant), U8(104),
+ B(Star), R(0),
+ /* 1295 S> */ B(LdaConstant), U8(105),
+ B(Star), R(0),
+ /* 1307 S> */ B(LdaConstant), U8(106),
+ B(Star), R(0),
+ /* 1319 S> */ B(LdaConstant), U8(107),
+ B(Star), R(0),
+ /* 1331 S> */ B(LdaConstant), U8(108),
+ B(Star), R(0),
+ /* 1343 S> */ B(LdaConstant), U8(109),
+ B(Star), R(0),
+ /* 1355 S> */ B(LdaConstant), U8(110),
+ B(Star), R(0),
+ /* 1367 S> */ B(LdaConstant), U8(111),
+ B(Star), R(0),
+ /* 1379 S> */ B(LdaConstant), U8(112),
+ B(Star), R(0),
+ /* 1391 S> */ B(LdaConstant), U8(113),
+ B(Star), R(0),
+ /* 1403 S> */ B(LdaConstant), U8(114),
+ B(Star), R(0),
+ /* 1415 S> */ B(LdaConstant), U8(115),
+ B(Star), R(0),
+ /* 1427 S> */ B(LdaConstant), U8(116),
+ B(Star), R(0),
+ /* 1439 S> */ B(LdaConstant), U8(117),
+ B(Star), R(0),
+ /* 1451 S> */ B(LdaConstant), U8(118),
+ B(Star), R(0),
+ /* 1463 S> */ B(LdaConstant), U8(119),
+ B(Star), R(0),
+ /* 1475 S> */ B(LdaConstant), U8(120),
+ B(Star), R(0),
+ /* 1487 S> */ B(LdaConstant), U8(121),
+ B(Star), R(0),
+ /* 1499 S> */ B(LdaConstant), U8(122),
+ B(Star), R(0),
+ /* 1511 S> */ B(LdaConstant), U8(123),
+ B(Star), R(0),
+ /* 1523 S> */ B(LdaConstant), U8(124),
+ B(Star), R(0),
+ /* 1535 S> */ B(LdaConstant), U8(125),
+ B(Star), R(0),
+ /* 1547 S> */ B(LdaConstant), U8(126),
+ B(Star), R(0),
+ /* 1559 S> */ B(LdaConstant), U8(127),
+ B(Star), R(0),
+ /* 1571 S> */ B(LdaConstant), U8(128),
+ B(Star), R(0),
+ /* 1583 S> */ B(LdaConstant), U8(129),
+ B(Star), R(0),
+ /* 1595 S> */ B(LdaConstant), U8(130),
+ B(Star), R(0),
+ /* 1607 S> */ B(LdaConstant), U8(131),
+ B(Star), R(0),
+ /* 1619 S> */ B(LdaConstant), U8(132),
+ B(Star), R(0),
+ /* 1631 S> */ B(LdaConstant), U8(133),
+ B(Star), R(0),
+ /* 1643 S> */ B(LdaConstant), U8(134),
+ B(Star), R(0),
+ /* 1655 S> */ B(LdaConstant), U8(135),
+ B(Star), R(0),
+ /* 1667 S> */ B(LdaConstant), U8(136),
+ B(Star), R(0),
+ /* 1679 S> */ B(LdaConstant), U8(137),
+ B(Star), R(0),
+ /* 1691 S> */ B(LdaConstant), U8(138),
+ B(Star), R(0),
+ /* 1703 S> */ B(LdaConstant), U8(139),
+ B(Star), R(0),
+ /* 1715 S> */ B(LdaConstant), U8(140),
+ B(Star), R(0),
+ /* 1727 S> */ B(LdaConstant), U8(141),
+ B(Star), R(0),
+ /* 1739 S> */ B(LdaConstant), U8(142),
+ B(Star), R(0),
+ /* 1751 S> */ B(LdaConstant), U8(143),
+ B(Star), R(0),
+ /* 1763 S> */ B(LdaConstant), U8(144),
+ B(Star), R(0),
+ /* 1775 S> */ B(LdaConstant), U8(145),
+ B(Star), R(0),
+ /* 1787 S> */ B(LdaConstant), U8(146),
+ B(Star), R(0),
+ /* 1799 S> */ B(LdaConstant), U8(147),
+ B(Star), R(0),
+ /* 1811 S> */ B(LdaConstant), U8(148),
+ B(Star), R(0),
+ /* 1823 S> */ B(LdaConstant), U8(149),
+ B(Star), R(0),
+ /* 1835 S> */ B(LdaConstant), U8(150),
+ B(Star), R(0),
+ /* 1847 S> */ B(LdaConstant), U8(151),
+ B(Star), R(0),
+ /* 1859 S> */ B(LdaConstant), U8(152),
+ B(Star), R(0),
+ /* 1871 S> */ B(LdaConstant), U8(153),
+ B(Star), R(0),
+ /* 1883 S> */ B(LdaConstant), U8(154),
+ B(Star), R(0),
+ /* 1895 S> */ B(LdaConstant), U8(155),
+ B(Star), R(0),
+ /* 1907 S> */ B(LdaConstant), U8(156),
+ B(Star), R(0),
+ /* 1919 S> */ B(LdaConstant), U8(157),
+ B(Star), R(0),
+ /* 1931 S> */ B(LdaConstant), U8(158),
+ B(Star), R(0),
+ /* 1943 S> */ B(LdaConstant), U8(159),
+ B(Star), R(0),
+ /* 1955 S> */ B(LdaConstant), U8(160),
+ B(Star), R(0),
+ /* 1967 S> */ B(LdaConstant), U8(161),
+ B(Star), R(0),
+ /* 1979 S> */ B(LdaConstant), U8(162),
+ B(Star), R(0),
+ /* 1991 S> */ B(LdaConstant), U8(163),
+ B(Star), R(0),
+ /* 2003 S> */ B(LdaConstant), U8(164),
+ B(Star), R(0),
+ /* 2015 S> */ B(LdaConstant), U8(165),
+ B(Star), R(0),
+ /* 2027 S> */ B(LdaConstant), U8(166),
+ B(Star), R(0),
+ /* 2039 S> */ B(LdaConstant), U8(167),
+ B(Star), R(0),
+ /* 2051 S> */ B(LdaConstant), U8(168),
+ B(Star), R(0),
+ /* 2063 S> */ B(LdaConstant), U8(169),
+ B(Star), R(0),
+ /* 2075 S> */ B(LdaConstant), U8(170),
+ B(Star), R(0),
+ /* 2087 S> */ B(LdaConstant), U8(171),
+ B(Star), R(0),
+ /* 2099 S> */ B(LdaConstant), U8(172),
+ B(Star), R(0),
+ /* 2111 S> */ B(LdaConstant), U8(173),
+ B(Star), R(0),
+ /* 2123 S> */ B(LdaConstant), U8(174),
+ B(Star), R(0),
+ /* 2135 S> */ B(LdaConstant), U8(175),
+ B(Star), R(0),
+ /* 2147 S> */ B(LdaConstant), U8(176),
+ B(Star), R(0),
+ /* 2159 S> */ B(LdaConstant), U8(177),
+ B(Star), R(0),
+ /* 2171 S> */ B(LdaConstant), U8(178),
+ B(Star), R(0),
+ /* 2183 S> */ B(LdaConstant), U8(179),
+ B(Star), R(0),
+ /* 2195 S> */ B(LdaConstant), U8(180),
+ B(Star), R(0),
+ /* 2207 S> */ B(LdaConstant), U8(181),
+ B(Star), R(0),
+ /* 2219 S> */ B(LdaConstant), U8(182),
+ B(Star), R(0),
+ /* 2231 S> */ B(LdaConstant), U8(183),
+ B(Star), R(0),
+ /* 2243 S> */ B(LdaConstant), U8(184),
+ B(Star), R(0),
+ /* 2255 S> */ B(LdaConstant), U8(185),
+ B(Star), R(0),
+ /* 2267 S> */ B(LdaConstant), U8(186),
+ B(Star), R(0),
+ /* 2279 S> */ B(LdaConstant), U8(187),
+ B(Star), R(0),
+ /* 2291 S> */ B(LdaConstant), U8(188),
+ B(Star), R(0),
+ /* 2303 S> */ B(LdaConstant), U8(189),
+ B(Star), R(0),
+ /* 2315 S> */ B(LdaConstant), U8(190),
+ B(Star), R(0),
+ /* 2327 S> */ B(LdaConstant), U8(191),
+ B(Star), R(0),
+ /* 2339 S> */ B(LdaConstant), U8(192),
+ B(Star), R(0),
+ /* 2351 S> */ B(LdaConstant), U8(193),
+ B(Star), R(0),
+ /* 2363 S> */ B(LdaConstant), U8(194),
+ B(Star), R(0),
+ /* 2375 S> */ B(LdaConstant), U8(195),
+ B(Star), R(0),
+ /* 2387 S> */ B(LdaConstant), U8(196),
+ B(Star), R(0),
+ /* 2399 S> */ B(LdaConstant), U8(197),
+ B(Star), R(0),
+ /* 2411 S> */ B(LdaConstant), U8(198),
+ B(Star), R(0),
+ /* 2423 S> */ B(LdaConstant), U8(199),
+ B(Star), R(0),
+ /* 2435 S> */ B(LdaConstant), U8(200),
+ B(Star), R(0),
+ /* 2447 S> */ B(LdaConstant), U8(201),
+ B(Star), R(0),
+ /* 2459 S> */ B(LdaConstant), U8(202),
+ B(Star), R(0),
+ /* 2471 S> */ B(LdaConstant), U8(203),
+ B(Star), R(0),
+ /* 2483 S> */ B(LdaConstant), U8(204),
+ B(Star), R(0),
+ /* 2495 S> */ B(LdaConstant), U8(205),
+ B(Star), R(0),
+ /* 2507 S> */ B(LdaConstant), U8(206),
+ B(Star), R(0),
+ /* 2519 S> */ B(LdaConstant), U8(207),
+ B(Star), R(0),
+ /* 2531 S> */ B(LdaConstant), U8(208),
+ B(Star), R(0),
+ /* 2543 S> */ B(LdaConstant), U8(209),
+ B(Star), R(0),
+ /* 2555 S> */ B(LdaConstant), U8(210),
+ B(Star), R(0),
+ /* 2567 S> */ B(LdaConstant), U8(211),
+ B(Star), R(0),
+ /* 2579 S> */ B(LdaConstant), U8(212),
+ B(Star), R(0),
+ /* 2591 S> */ B(LdaConstant), U8(213),
+ B(Star), R(0),
+ /* 2603 S> */ B(LdaConstant), U8(214),
+ B(Star), R(0),
+ /* 2615 S> */ B(LdaConstant), U8(215),
+ B(Star), R(0),
+ /* 2627 S> */ B(LdaConstant), U8(216),
+ B(Star), R(0),
+ /* 2639 S> */ B(LdaConstant), U8(217),
+ B(Star), R(0),
+ /* 2651 S> */ B(LdaConstant), U8(218),
+ B(Star), R(0),
+ /* 2663 S> */ B(LdaConstant), U8(219),
+ B(Star), R(0),
+ /* 2675 S> */ B(LdaConstant), U8(220),
+ B(Star), R(0),
+ /* 2687 S> */ B(LdaConstant), U8(221),
+ B(Star), R(0),
+ /* 2699 S> */ B(LdaConstant), U8(222),
+ B(Star), R(0),
+ /* 2711 S> */ B(LdaConstant), U8(223),
+ B(Star), R(0),
+ /* 2723 S> */ B(LdaConstant), U8(224),
+ B(Star), R(0),
+ /* 2735 S> */ B(LdaConstant), U8(225),
+ B(Star), R(0),
+ /* 2747 S> */ B(LdaConstant), U8(226),
+ B(Star), R(0),
+ /* 2759 S> */ B(LdaConstant), U8(227),
+ B(Star), R(0),
+ /* 2771 S> */ B(LdaConstant), U8(228),
+ B(Star), R(0),
+ /* 2783 S> */ B(LdaConstant), U8(229),
+ B(Star), R(0),
+ /* 2795 S> */ B(LdaConstant), U8(230),
+ B(Star), R(0),
+ /* 2807 S> */ B(LdaConstant), U8(231),
+ B(Star), R(0),
+ /* 2819 S> */ B(LdaConstant), U8(232),
+ B(Star), R(0),
+ /* 2831 S> */ B(LdaConstant), U8(233),
+ B(Star), R(0),
+ /* 2843 S> */ B(LdaConstant), U8(234),
+ B(Star), R(0),
+ /* 2855 S> */ B(LdaConstant), U8(235),
+ B(Star), R(0),
+ /* 2867 S> */ B(LdaConstant), U8(236),
+ B(Star), R(0),
+ /* 2879 S> */ B(LdaConstant), U8(237),
+ B(Star), R(0),
+ /* 2891 S> */ B(LdaConstant), U8(238),
+ B(Star), R(0),
+ /* 2903 S> */ B(LdaConstant), U8(239),
+ B(Star), R(0),
+ /* 2915 S> */ B(LdaConstant), U8(240),
+ B(Star), R(0),
+ /* 2927 S> */ B(LdaConstant), U8(241),
+ B(Star), R(0),
+ /* 2939 S> */ B(LdaConstant), U8(242),
+ B(Star), R(0),
+ /* 2951 S> */ B(LdaConstant), U8(243),
+ B(Star), R(0),
+ /* 2963 S> */ B(LdaConstant), U8(244),
+ B(Star), R(0),
+ /* 2975 S> */ B(LdaConstant), U8(245),
+ B(Star), R(0),
+ /* 2987 S> */ B(LdaConstant), U8(246),
+ B(Star), R(0),
+ /* 2999 S> */ B(LdaConstant), U8(247),
+ B(Star), R(0),
+ /* 3011 S> */ B(LdaConstant), U8(248),
+ B(Star), R(0),
+ /* 3023 S> */ B(LdaConstant), U8(249),
+ B(Star), R(0),
+ /* 3035 S> */ B(LdaConstant), U8(250),
+ B(Star), R(0),
+ /* 3047 S> */ B(LdaConstant), U8(251),
+ B(Star), R(0),
+ /* 3059 S> */ B(LdaConstant), U8(252),
+ B(Star), R(0),
+ /* 3071 S> */ B(LdaConstant), U8(253),
+ B(Star), R(0),
+ /* 3083 S> */ B(LdaConstant), U8(254),
+ B(Star), R(0),
+ /* 3095 S> */ B(LdaConstant), U8(255),
+ B(Star), R(0),
+ /* 3099 S> */ B(LdaSmi), U8(10),
+ /* 3101 E> */ B(Wide), B(StaLookupSlotStrict), U16(256),
+ B(LdaUndefined),
+ /* 3106 S> */ B(Return),
]
constant pool: [
InstanceType::HEAP_NUMBER_TYPE,
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewTarget.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewTarget.golden
index da2d35e56b..090fb0bb7a 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewTarget.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewTarget.golden
@@ -11,22 +11,16 @@ wrap: yes
snippet: "
return new.target;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 19
+bytecode array length: 7
bytecodes: [
- B(Ldar), R(new_target),
- B(Star), R(0),
- B(StackCheck),
- B(Ldar), R(0),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(0),
- B(Star), R(1),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(1), U8(1),
- B(Return),
+ B(Mov), R(new_target), R(0),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(Ldar), R(0),
+ /* 53 S> */ B(Return),
]
constant pool: [
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
]
handlers: [
]
@@ -35,23 +29,16 @@ handlers: [
snippet: "
new.target;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 20
+bytecode array length: 6
bytecodes: [
- B(Ldar), R(new_target),
- B(Star), R(0),
- B(StackCheck),
- B(Ldar), R(0),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(0),
- B(Star), R(1),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(1), U8(1),
- B(LdaUndefined),
- B(Return),
+ B(Mov), R(new_target), R(0),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(LdaUndefined),
+ /* 46 S> */ B(Return),
]
constant pool: [
- InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
index 063ebbf759..b9c7d0ca4d 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
@@ -13,12 +13,12 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 8
+bytecode array length: 9
bytecodes: [
- B(StackCheck),
- B(CreateObjectLiteral), U8(0), U8(0), U8(7),
- B(Star), R(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(35), R(0),
+ B(Ldar), R(0),
+ /* 46 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -32,12 +32,12 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 8
+bytecode array length: 9
bytecodes: [
- B(StackCheck),
- B(CreateObjectLiteral), U8(0), U8(0), U8(5),
- B(Star), R(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(0),
+ B(Ldar), R(0),
+ /* 71 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -51,17 +51,15 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 20
+bytecode array length: 17
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(CreateObjectLiteral), U8(0), U8(0), U8(5),
- B(Star), R(1),
- B(Ldar), R(0),
- B(StoreICSloppy), R(1), U8(1), U8(1),
- B(Ldar), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ /* 45 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(1),
+ /* 75 E> */ B(StaNamedPropertySloppy), R(1), U8(1), U8(1),
+ B(Ldar), R(1),
+ /* 80 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -74,22 +72,18 @@ handlers: [
snippet: "
var a = 1; return { val: a, val: a + 1 };
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 26
+bytecode array length: 21
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(CreateObjectLiteral), U8(0), U8(0), U8(5),
- B(Star), R(1),
- B(Ldar), R(0),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(Add), R(2),
- B(StoreICSloppy), R(1), U8(1), U8(1),
- B(Ldar), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ /* 45 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(1),
+ /* 67 E> */ B(AddSmi), U8(1), R(0), U8(1),
+ B(StaNamedPropertySloppy), R(1), U8(1), U8(2),
+ B(Ldar), R(1),
+ /* 76 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -104,15 +98,14 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 17
+bytecode array length: 16
bytecodes: [
- B(StackCheck),
- B(CreateObjectLiteral), U8(0), U8(0), U8(5),
- B(Star), R(0),
- B(CreateClosure), U8(1), U8(0),
- B(StoreICSloppy), R(0), U8(2), U8(1),
- B(Ldar), R(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(0),
+ B(CreateClosure), U8(1), U8(2),
+ B(StaNamedPropertySloppy), R(0), U8(2), U8(1),
+ B(Ldar), R(0),
+ /* 67 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -128,15 +121,14 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 17
+bytecode array length: 16
bytecodes: [
- B(StackCheck),
- B(CreateObjectLiteral), U8(0), U8(0), U8(5),
- B(Star), R(0),
- B(CreateClosure), U8(1), U8(0),
- B(StoreICSloppy), R(0), U8(2), U8(1),
- B(Ldar), R(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(0),
+ B(CreateClosure), U8(1), U8(2),
+ B(StaNamedPropertySloppy), R(0), U8(2), U8(1),
+ B(Ldar), R(0),
+ /* 68 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -152,23 +144,22 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 33
+bytecode array length: 32
bytecodes: [
- B(StackCheck),
- B(CreateObjectLiteral), U8(0), U8(0), U8(5),
- B(Star), R(0),
- B(Mov), R(0), R(1),
- B(LdaConstant), U8(1),
- B(Star), R(2),
- B(CreateClosure), U8(2), U8(0),
- B(Star), R(3),
- B(LdaNull),
- B(Star), R(4),
- B(LdaZero),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kDefineAccessorPropertyUnchecked), R(1), U8(5),
- B(Ldar), R(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(0),
+ B(LdaConstant), U8(1),
+ B(Star), R(2),
+ B(CreateClosure), U8(2), U8(2),
+ B(Star), R(3),
+ B(LdaNull),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(5),
+ B(Mov), R(0), R(1),
+ B(CallRuntime), U16(Runtime::kDefineAccessorPropertyUnchecked), R(1), U8(5),
+ B(Ldar), R(0),
+ /* 68 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -184,23 +175,22 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 35
+bytecode array length: 34
bytecodes: [
- B(StackCheck),
- B(CreateObjectLiteral), U8(0), U8(0), U8(5),
- B(Star), R(0),
- B(Mov), R(0), R(1),
- B(LdaConstant), U8(1),
- B(Star), R(2),
- B(CreateClosure), U8(2), U8(0),
- B(Star), R(3),
- B(CreateClosure), U8(3), U8(0),
- B(Star), R(4),
- B(LdaZero),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kDefineAccessorPropertyUnchecked), R(1), U8(5),
- B(Ldar), R(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(0),
+ B(LdaConstant), U8(1),
+ B(Star), R(2),
+ B(CreateClosure), U8(2), U8(2),
+ B(Star), R(3),
+ B(CreateClosure), U8(3), U8(2),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(5),
+ B(Mov), R(0), R(1),
+ B(CallRuntime), U16(Runtime::kDefineAccessorPropertyUnchecked), R(1), U8(5),
+ B(Ldar), R(0),
+ /* 102 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -217,23 +207,22 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 33
+bytecode array length: 32
bytecodes: [
- B(StackCheck),
- B(CreateObjectLiteral), U8(0), U8(0), U8(5),
- B(Star), R(0),
- B(Mov), R(0), R(1),
- B(LdaConstant), U8(1),
- B(Star), R(2),
- B(LdaNull),
- B(Star), R(3),
- B(CreateClosure), U8(2), U8(0),
- B(Star), R(4),
- B(LdaZero),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kDefineAccessorPropertyUnchecked), R(1), U8(5),
- B(Ldar), R(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(0),
+ B(LdaConstant), U8(1),
+ B(Star), R(2),
+ B(LdaNull),
+ B(Star), R(3),
+ B(CreateClosure), U8(2), U8(2),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(5),
+ B(Mov), R(0), R(1),
+ B(CallRuntime), U16(Runtime::kDefineAccessorPropertyUnchecked), R(1), U8(5),
+ B(Ldar), R(0),
+ /* 74 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -249,23 +238,21 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 33
+bytecode array length: 31
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(CreateObjectLiteral), U8(0), U8(0), U8(5),
- B(Star), R(1),
- B(Mov), R(1), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(3),
- B(Ldar), R(0),
- B(Star), R(4),
- B(LdaZero),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kSetProperty), R(2), U8(4),
- B(Ldar), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ /* 45 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(1),
+ B(LdaSmi), U8(1),
+ B(Star), R(3),
+ B(LdaZero),
+ B(Star), R(5),
+ B(Mov), R(1), R(2),
+ B(Mov), R(0), R(4),
+ /* 57 E> */ B(CallRuntime), U16(Runtime::kSetProperty), R(2), U8(4),
+ B(Ldar), R(1),
+ /* 62 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -279,17 +266,16 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 21
+bytecode array length: 20
bytecodes: [
- B(StackCheck),
- B(CreateObjectLiteral), U8(0), U8(0), U8(7),
- B(Star), R(0),
- B(Mov), R(0), R(1),
- B(LdaNull),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kInternalSetPrototype), R(1), U8(2),
- B(Ldar), R(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(35), R(0),
+ B(LdaNull),
+ B(Star), R(2),
+ B(Mov), R(0), R(1),
+ B(CallRuntime), U16(Runtime::kInternalSetPrototype), R(1), U8(2),
+ B(Ldar), R(0),
+ /* 62 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
@@ -303,26 +289,23 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 37
+bytecode array length: 33
bytecodes: [
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Star), R(0),
- B(CreateObjectLiteral), U8(1), U8(0), U8(7),
- B(Star), R(1),
- B(Mov), R(1), R(2),
- B(Ldar), R(0),
- B(ToName),
- B(Star), R(3),
- B(LdaSmi), U8(1),
- B(Star), R(4),
- B(LdaZero),
- B(Star), R(5),
- B(LdaZero),
- B(Star), R(6),
- B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(2), U8(5),
- B(Ldar), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ /* 50 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(35), R(1),
+ /* 60 E> */ B(ToName), R(3),
+ B(LdaSmi), U8(1),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(5),
+ B(LdaZero),
+ B(Star), R(6),
+ B(Mov), R(1), R(2),
+ B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(2), U8(5),
+ B(Ldar), R(1),
+ /* 69 S> */ B(Return),
]
constant pool: [
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
@@ -337,28 +320,24 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 43
+bytecode array length: 37
bytecodes: [
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Star), R(0),
- B(CreateObjectLiteral), U8(1), U8(0), U8(5),
- B(Star), R(1),
- B(Ldar), R(0),
- B(StoreICSloppy), R(1), U8(2), U8(1),
- B(Mov), R(1), R(2),
- B(Ldar), R(0),
- B(ToName),
- B(Star), R(3),
- B(LdaSmi), U8(1),
- B(Star), R(4),
- B(LdaZero),
- B(Star), R(5),
- B(LdaZero),
- B(Star), R(6),
- B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(2), U8(5),
- B(Ldar), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ /* 50 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(1), R(1),
+ /* 64 E> */ B(StaNamedPropertySloppy), R(1), U8(2), U8(1),
+ /* 68 E> */ B(ToName), R(3),
+ B(LdaSmi), U8(1),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(5),
+ B(LdaZero),
+ B(Star), R(6),
+ B(Mov), R(1), R(2),
+ B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(2), U8(5),
+ B(Ldar), R(1),
+ /* 77 S> */ B(Return),
]
constant pool: [
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
@@ -374,31 +353,27 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 53
+bytecode array length: 49
bytecodes: [
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Star), R(0),
- B(CreateObjectLiteral), U8(1), U8(1), U8(7),
- B(Star), R(1),
- B(Mov), R(1), R(2),
- B(Ldar), R(0),
- B(ToName),
- B(Star), R(3),
- B(LdaSmi), U8(1),
- B(Star), R(4),
- B(LdaZero),
- B(Star), R(5),
- B(LdaZero),
- B(Star), R(6),
- B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(2), U8(5),
- B(Mov), R(1), R(2),
- B(CreateObjectLiteral), U8(1), U8(0), U8(7),
- B(Star), R(4),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kInternalSetPrototype), R(2), U8(2),
- B(Ldar), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ /* 50 S> */ B(CreateObjectLiteral), U8(1), U8(1), U8(35), R(1),
+ /* 60 E> */ B(ToName), R(3),
+ B(LdaSmi), U8(1),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(5),
+ B(LdaZero),
+ B(Star), R(6),
+ B(Mov), R(1), R(2),
+ B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(2), U8(5),
+ B(CreateObjectLiteral), U8(1), U8(0), U8(35), R(4),
+ B(Mov), R(1), R(2),
+ B(Mov), R(4), R(3),
+ B(CallRuntime), U16(Runtime::kInternalSetPrototype), R(2), U8(2),
+ B(Ldar), R(1),
+ /* 84 S> */ B(Return),
]
constant pool: [
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
@@ -413,42 +388,39 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 77
+bytecode array length: 73
bytecodes: [
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Star), R(0),
- B(CreateObjectLiteral), U8(1), U8(0), U8(7),
- B(Star), R(1),
- B(Mov), R(1), R(2),
- B(Ldar), R(0),
- B(ToName),
- B(Star), R(3),
- B(LdaConstant), U8(2),
- B(Star), R(4),
- B(LdaZero),
- B(Star), R(5),
- B(LdaZero),
- B(Star), R(6),
- B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(2), U8(5),
- B(Mov), R(1), R(2),
- B(LdaConstant), U8(3),
- B(Star), R(3),
- B(CreateClosure), U8(4), U8(0),
- B(Star), R(4),
- B(LdaZero),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kDefineGetterPropertyUnchecked), R(2), U8(4),
- B(Mov), R(1), R(2),
- B(LdaConstant), U8(3),
- B(Star), R(3),
- B(CreateClosure), U8(5), U8(0),
- B(Star), R(4),
- B(LdaZero),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kDefineSetterPropertyUnchecked), R(2), U8(4),
- B(Ldar), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ /* 50 S> */ B(CreateObjectLiteral), U8(1), U8(0), U8(35), R(1),
+ /* 60 E> */ B(ToName), R(3),
+ B(LdaConstant), U8(2),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(5),
+ B(LdaZero),
+ B(Star), R(6),
+ B(Mov), R(1), R(2),
+ B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(2), U8(5),
+ B(LdaConstant), U8(3),
+ B(ToName), R(3),
+ B(CreateClosure), U8(4), U8(2),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(5),
+ B(Mov), R(1), R(2),
+ B(CallRuntime), U16(Runtime::kDefineGetterPropertyUnchecked), R(2), U8(4),
+ B(LdaConstant), U8(3),
+ B(ToName), R(3),
+ B(CreateClosure), U8(5), U8(2),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(5),
+ B(Mov), R(1), R(2),
+ B(CallRuntime), U16(Runtime::kDefineSetterPropertyUnchecked), R(2), U8(4),
+ B(Ldar), R(1),
+ /* 99 S> */ B(Return),
]
constant pool: [
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiteralsWide.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiteralsWide.golden
index 83c6fe9658..62b1ace69d 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiteralsWide.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiteralsWide.golden
@@ -270,524 +270,524 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 1035
+bytecode array length: 1037
bytecodes: [
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Star), R(0),
- B(LdaConstant), U8(1),
- B(Star), R(0),
- B(LdaConstant), U8(2),
- B(Star), R(0),
- B(LdaConstant), U8(3),
- B(Star), R(0),
- B(LdaConstant), U8(4),
- B(Star), R(0),
- B(LdaConstant), U8(5),
- B(Star), R(0),
- B(LdaConstant), U8(6),
- B(Star), R(0),
- B(LdaConstant), U8(7),
- B(Star), R(0),
- B(LdaConstant), U8(8),
- B(Star), R(0),
- B(LdaConstant), U8(9),
- B(Star), R(0),
- B(LdaConstant), U8(10),
- B(Star), R(0),
- B(LdaConstant), U8(11),
- B(Star), R(0),
- B(LdaConstant), U8(12),
- B(Star), R(0),
- B(LdaConstant), U8(13),
- B(Star), R(0),
- B(LdaConstant), U8(14),
- B(Star), R(0),
- B(LdaConstant), U8(15),
- B(Star), R(0),
- B(LdaConstant), U8(16),
- B(Star), R(0),
- B(LdaConstant), U8(17),
- B(Star), R(0),
- B(LdaConstant), U8(18),
- B(Star), R(0),
- B(LdaConstant), U8(19),
- B(Star), R(0),
- B(LdaConstant), U8(20),
- B(Star), R(0),
- B(LdaConstant), U8(21),
- B(Star), R(0),
- B(LdaConstant), U8(22),
- B(Star), R(0),
- B(LdaConstant), U8(23),
- B(Star), R(0),
- B(LdaConstant), U8(24),
- B(Star), R(0),
- B(LdaConstant), U8(25),
- B(Star), R(0),
- B(LdaConstant), U8(26),
- B(Star), R(0),
- B(LdaConstant), U8(27),
- B(Star), R(0),
- B(LdaConstant), U8(28),
- B(Star), R(0),
- B(LdaConstant), U8(29),
- B(Star), R(0),
- B(LdaConstant), U8(30),
- B(Star), R(0),
- B(LdaConstant), U8(31),
- B(Star), R(0),
- B(LdaConstant), U8(32),
- B(Star), R(0),
- B(LdaConstant), U8(33),
- B(Star), R(0),
- B(LdaConstant), U8(34),
- B(Star), R(0),
- B(LdaConstant), U8(35),
- B(Star), R(0),
- B(LdaConstant), U8(36),
- B(Star), R(0),
- B(LdaConstant), U8(37),
- B(Star), R(0),
- B(LdaConstant), U8(38),
- B(Star), R(0),
- B(LdaConstant), U8(39),
- B(Star), R(0),
- B(LdaConstant), U8(40),
- B(Star), R(0),
- B(LdaConstant), U8(41),
- B(Star), R(0),
- B(LdaConstant), U8(42),
- B(Star), R(0),
- B(LdaConstant), U8(43),
- B(Star), R(0),
- B(LdaConstant), U8(44),
- B(Star), R(0),
- B(LdaConstant), U8(45),
- B(Star), R(0),
- B(LdaConstant), U8(46),
- B(Star), R(0),
- B(LdaConstant), U8(47),
- B(Star), R(0),
- B(LdaConstant), U8(48),
- B(Star), R(0),
- B(LdaConstant), U8(49),
- B(Star), R(0),
- B(LdaConstant), U8(50),
- B(Star), R(0),
- B(LdaConstant), U8(51),
- B(Star), R(0),
- B(LdaConstant), U8(52),
- B(Star), R(0),
- B(LdaConstant), U8(53),
- B(Star), R(0),
- B(LdaConstant), U8(54),
- B(Star), R(0),
- B(LdaConstant), U8(55),
- B(Star), R(0),
- B(LdaConstant), U8(56),
- B(Star), R(0),
- B(LdaConstant), U8(57),
- B(Star), R(0),
- B(LdaConstant), U8(58),
- B(Star), R(0),
- B(LdaConstant), U8(59),
- B(Star), R(0),
- B(LdaConstant), U8(60),
- B(Star), R(0),
- B(LdaConstant), U8(61),
- B(Star), R(0),
- B(LdaConstant), U8(62),
- B(Star), R(0),
- B(LdaConstant), U8(63),
- B(Star), R(0),
- B(LdaConstant), U8(64),
- B(Star), R(0),
- B(LdaConstant), U8(65),
- B(Star), R(0),
- B(LdaConstant), U8(66),
- B(Star), R(0),
- B(LdaConstant), U8(67),
- B(Star), R(0),
- B(LdaConstant), U8(68),
- B(Star), R(0),
- B(LdaConstant), U8(69),
- B(Star), R(0),
- B(LdaConstant), U8(70),
- B(Star), R(0),
- B(LdaConstant), U8(71),
- B(Star), R(0),
- B(LdaConstant), U8(72),
- B(Star), R(0),
- B(LdaConstant), U8(73),
- B(Star), R(0),
- B(LdaConstant), U8(74),
- B(Star), R(0),
- B(LdaConstant), U8(75),
- B(Star), R(0),
- B(LdaConstant), U8(76),
- B(Star), R(0),
- B(LdaConstant), U8(77),
- B(Star), R(0),
- B(LdaConstant), U8(78),
- B(Star), R(0),
- B(LdaConstant), U8(79),
- B(Star), R(0),
- B(LdaConstant), U8(80),
- B(Star), R(0),
- B(LdaConstant), U8(81),
- B(Star), R(0),
- B(LdaConstant), U8(82),
- B(Star), R(0),
- B(LdaConstant), U8(83),
- B(Star), R(0),
- B(LdaConstant), U8(84),
- B(Star), R(0),
- B(LdaConstant), U8(85),
- B(Star), R(0),
- B(LdaConstant), U8(86),
- B(Star), R(0),
- B(LdaConstant), U8(87),
- B(Star), R(0),
- B(LdaConstant), U8(88),
- B(Star), R(0),
- B(LdaConstant), U8(89),
- B(Star), R(0),
- B(LdaConstant), U8(90),
- B(Star), R(0),
- B(LdaConstant), U8(91),
- B(Star), R(0),
- B(LdaConstant), U8(92),
- B(Star), R(0),
- B(LdaConstant), U8(93),
- B(Star), R(0),
- B(LdaConstant), U8(94),
- B(Star), R(0),
- B(LdaConstant), U8(95),
- B(Star), R(0),
- B(LdaConstant), U8(96),
- B(Star), R(0),
- B(LdaConstant), U8(97),
- B(Star), R(0),
- B(LdaConstant), U8(98),
- B(Star), R(0),
- B(LdaConstant), U8(99),
- B(Star), R(0),
- B(LdaConstant), U8(100),
- B(Star), R(0),
- B(LdaConstant), U8(101),
- B(Star), R(0),
- B(LdaConstant), U8(102),
- B(Star), R(0),
- B(LdaConstant), U8(103),
- B(Star), R(0),
- B(LdaConstant), U8(104),
- B(Star), R(0),
- B(LdaConstant), U8(105),
- B(Star), R(0),
- B(LdaConstant), U8(106),
- B(Star), R(0),
- B(LdaConstant), U8(107),
- B(Star), R(0),
- B(LdaConstant), U8(108),
- B(Star), R(0),
- B(LdaConstant), U8(109),
- B(Star), R(0),
- B(LdaConstant), U8(110),
- B(Star), R(0),
- B(LdaConstant), U8(111),
- B(Star), R(0),
- B(LdaConstant), U8(112),
- B(Star), R(0),
- B(LdaConstant), U8(113),
- B(Star), R(0),
- B(LdaConstant), U8(114),
- B(Star), R(0),
- B(LdaConstant), U8(115),
- B(Star), R(0),
- B(LdaConstant), U8(116),
- B(Star), R(0),
- B(LdaConstant), U8(117),
- B(Star), R(0),
- B(LdaConstant), U8(118),
- B(Star), R(0),
- B(LdaConstant), U8(119),
- B(Star), R(0),
- B(LdaConstant), U8(120),
- B(Star), R(0),
- B(LdaConstant), U8(121),
- B(Star), R(0),
- B(LdaConstant), U8(122),
- B(Star), R(0),
- B(LdaConstant), U8(123),
- B(Star), R(0),
- B(LdaConstant), U8(124),
- B(Star), R(0),
- B(LdaConstant), U8(125),
- B(Star), R(0),
- B(LdaConstant), U8(126),
- B(Star), R(0),
- B(LdaConstant), U8(127),
- B(Star), R(0),
- B(LdaConstant), U8(128),
- B(Star), R(0),
- B(LdaConstant), U8(129),
- B(Star), R(0),
- B(LdaConstant), U8(130),
- B(Star), R(0),
- B(LdaConstant), U8(131),
- B(Star), R(0),
- B(LdaConstant), U8(132),
- B(Star), R(0),
- B(LdaConstant), U8(133),
- B(Star), R(0),
- B(LdaConstant), U8(134),
- B(Star), R(0),
- B(LdaConstant), U8(135),
- B(Star), R(0),
- B(LdaConstant), U8(136),
- B(Star), R(0),
- B(LdaConstant), U8(137),
- B(Star), R(0),
- B(LdaConstant), U8(138),
- B(Star), R(0),
- B(LdaConstant), U8(139),
- B(Star), R(0),
- B(LdaConstant), U8(140),
- B(Star), R(0),
- B(LdaConstant), U8(141),
- B(Star), R(0),
- B(LdaConstant), U8(142),
- B(Star), R(0),
- B(LdaConstant), U8(143),
- B(Star), R(0),
- B(LdaConstant), U8(144),
- B(Star), R(0),
- B(LdaConstant), U8(145),
- B(Star), R(0),
- B(LdaConstant), U8(146),
- B(Star), R(0),
- B(LdaConstant), U8(147),
- B(Star), R(0),
- B(LdaConstant), U8(148),
- B(Star), R(0),
- B(LdaConstant), U8(149),
- B(Star), R(0),
- B(LdaConstant), U8(150),
- B(Star), R(0),
- B(LdaConstant), U8(151),
- B(Star), R(0),
- B(LdaConstant), U8(152),
- B(Star), R(0),
- B(LdaConstant), U8(153),
- B(Star), R(0),
- B(LdaConstant), U8(154),
- B(Star), R(0),
- B(LdaConstant), U8(155),
- B(Star), R(0),
- B(LdaConstant), U8(156),
- B(Star), R(0),
- B(LdaConstant), U8(157),
- B(Star), R(0),
- B(LdaConstant), U8(158),
- B(Star), R(0),
- B(LdaConstant), U8(159),
- B(Star), R(0),
- B(LdaConstant), U8(160),
- B(Star), R(0),
- B(LdaConstant), U8(161),
- B(Star), R(0),
- B(LdaConstant), U8(162),
- B(Star), R(0),
- B(LdaConstant), U8(163),
- B(Star), R(0),
- B(LdaConstant), U8(164),
- B(Star), R(0),
- B(LdaConstant), U8(165),
- B(Star), R(0),
- B(LdaConstant), U8(166),
- B(Star), R(0),
- B(LdaConstant), U8(167),
- B(Star), R(0),
- B(LdaConstant), U8(168),
- B(Star), R(0),
- B(LdaConstant), U8(169),
- B(Star), R(0),
- B(LdaConstant), U8(170),
- B(Star), R(0),
- B(LdaConstant), U8(171),
- B(Star), R(0),
- B(LdaConstant), U8(172),
- B(Star), R(0),
- B(LdaConstant), U8(173),
- B(Star), R(0),
- B(LdaConstant), U8(174),
- B(Star), R(0),
- B(LdaConstant), U8(175),
- B(Star), R(0),
- B(LdaConstant), U8(176),
- B(Star), R(0),
- B(LdaConstant), U8(177),
- B(Star), R(0),
- B(LdaConstant), U8(178),
- B(Star), R(0),
- B(LdaConstant), U8(179),
- B(Star), R(0),
- B(LdaConstant), U8(180),
- B(Star), R(0),
- B(LdaConstant), U8(181),
- B(Star), R(0),
- B(LdaConstant), U8(182),
- B(Star), R(0),
- B(LdaConstant), U8(183),
- B(Star), R(0),
- B(LdaConstant), U8(184),
- B(Star), R(0),
- B(LdaConstant), U8(185),
- B(Star), R(0),
- B(LdaConstant), U8(186),
- B(Star), R(0),
- B(LdaConstant), U8(187),
- B(Star), R(0),
- B(LdaConstant), U8(188),
- B(Star), R(0),
- B(LdaConstant), U8(189),
- B(Star), R(0),
- B(LdaConstant), U8(190),
- B(Star), R(0),
- B(LdaConstant), U8(191),
- B(Star), R(0),
- B(LdaConstant), U8(192),
- B(Star), R(0),
- B(LdaConstant), U8(193),
- B(Star), R(0),
- B(LdaConstant), U8(194),
- B(Star), R(0),
- B(LdaConstant), U8(195),
- B(Star), R(0),
- B(LdaConstant), U8(196),
- B(Star), R(0),
- B(LdaConstant), U8(197),
- B(Star), R(0),
- B(LdaConstant), U8(198),
- B(Star), R(0),
- B(LdaConstant), U8(199),
- B(Star), R(0),
- B(LdaConstant), U8(200),
- B(Star), R(0),
- B(LdaConstant), U8(201),
- B(Star), R(0),
- B(LdaConstant), U8(202),
- B(Star), R(0),
- B(LdaConstant), U8(203),
- B(Star), R(0),
- B(LdaConstant), U8(204),
- B(Star), R(0),
- B(LdaConstant), U8(205),
- B(Star), R(0),
- B(LdaConstant), U8(206),
- B(Star), R(0),
- B(LdaConstant), U8(207),
- B(Star), R(0),
- B(LdaConstant), U8(208),
- B(Star), R(0),
- B(LdaConstant), U8(209),
- B(Star), R(0),
- B(LdaConstant), U8(210),
- B(Star), R(0),
- B(LdaConstant), U8(211),
- B(Star), R(0),
- B(LdaConstant), U8(212),
- B(Star), R(0),
- B(LdaConstant), U8(213),
- B(Star), R(0),
- B(LdaConstant), U8(214),
- B(Star), R(0),
- B(LdaConstant), U8(215),
- B(Star), R(0),
- B(LdaConstant), U8(216),
- B(Star), R(0),
- B(LdaConstant), U8(217),
- B(Star), R(0),
- B(LdaConstant), U8(218),
- B(Star), R(0),
- B(LdaConstant), U8(219),
- B(Star), R(0),
- B(LdaConstant), U8(220),
- B(Star), R(0),
- B(LdaConstant), U8(221),
- B(Star), R(0),
- B(LdaConstant), U8(222),
- B(Star), R(0),
- B(LdaConstant), U8(223),
- B(Star), R(0),
- B(LdaConstant), U8(224),
- B(Star), R(0),
- B(LdaConstant), U8(225),
- B(Star), R(0),
- B(LdaConstant), U8(226),
- B(Star), R(0),
- B(LdaConstant), U8(227),
- B(Star), R(0),
- B(LdaConstant), U8(228),
- B(Star), R(0),
- B(LdaConstant), U8(229),
- B(Star), R(0),
- B(LdaConstant), U8(230),
- B(Star), R(0),
- B(LdaConstant), U8(231),
- B(Star), R(0),
- B(LdaConstant), U8(232),
- B(Star), R(0),
- B(LdaConstant), U8(233),
- B(Star), R(0),
- B(LdaConstant), U8(234),
- B(Star), R(0),
- B(LdaConstant), U8(235),
- B(Star), R(0),
- B(LdaConstant), U8(236),
- B(Star), R(0),
- B(LdaConstant), U8(237),
- B(Star), R(0),
- B(LdaConstant), U8(238),
- B(Star), R(0),
- B(LdaConstant), U8(239),
- B(Star), R(0),
- B(LdaConstant), U8(240),
- B(Star), R(0),
- B(LdaConstant), U8(241),
- B(Star), R(0),
- B(LdaConstant), U8(242),
- B(Star), R(0),
- B(LdaConstant), U8(243),
- B(Star), R(0),
- B(LdaConstant), U8(244),
- B(Star), R(0),
- B(LdaConstant), U8(245),
- B(Star), R(0),
- B(LdaConstant), U8(246),
- B(Star), R(0),
- B(LdaConstant), U8(247),
- B(Star), R(0),
- B(LdaConstant), U8(248),
- B(Star), R(0),
- B(LdaConstant), U8(249),
- B(Star), R(0),
- B(LdaConstant), U8(250),
- B(Star), R(0),
- B(LdaConstant), U8(251),
- B(Star), R(0),
- B(LdaConstant), U8(252),
- B(Star), R(0),
- B(LdaConstant), U8(253),
- B(Star), R(0),
- B(LdaConstant), U8(254),
- B(Star), R(0),
- B(LdaConstant), U8(255),
- B(Star), R(0),
- B(Wide), B(CreateObjectLiteral), U16(256), U16(0), U8(5),
- B(Star), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 41 S> */ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ /* 51 S> */ B(LdaConstant), U8(1),
+ B(Star), R(0),
+ /* 61 S> */ B(LdaConstant), U8(2),
+ B(Star), R(0),
+ /* 71 S> */ B(LdaConstant), U8(3),
+ B(Star), R(0),
+ /* 81 S> */ B(LdaConstant), U8(4),
+ B(Star), R(0),
+ /* 91 S> */ B(LdaConstant), U8(5),
+ B(Star), R(0),
+ /* 101 S> */ B(LdaConstant), U8(6),
+ B(Star), R(0),
+ /* 111 S> */ B(LdaConstant), U8(7),
+ B(Star), R(0),
+ /* 121 S> */ B(LdaConstant), U8(8),
+ B(Star), R(0),
+ /* 131 S> */ B(LdaConstant), U8(9),
+ B(Star), R(0),
+ /* 141 S> */ B(LdaConstant), U8(10),
+ B(Star), R(0),
+ /* 151 S> */ B(LdaConstant), U8(11),
+ B(Star), R(0),
+ /* 161 S> */ B(LdaConstant), U8(12),
+ B(Star), R(0),
+ /* 171 S> */ B(LdaConstant), U8(13),
+ B(Star), R(0),
+ /* 181 S> */ B(LdaConstant), U8(14),
+ B(Star), R(0),
+ /* 191 S> */ B(LdaConstant), U8(15),
+ B(Star), R(0),
+ /* 201 S> */ B(LdaConstant), U8(16),
+ B(Star), R(0),
+ /* 211 S> */ B(LdaConstant), U8(17),
+ B(Star), R(0),
+ /* 221 S> */ B(LdaConstant), U8(18),
+ B(Star), R(0),
+ /* 231 S> */ B(LdaConstant), U8(19),
+ B(Star), R(0),
+ /* 241 S> */ B(LdaConstant), U8(20),
+ B(Star), R(0),
+ /* 251 S> */ B(LdaConstant), U8(21),
+ B(Star), R(0),
+ /* 261 S> */ B(LdaConstant), U8(22),
+ B(Star), R(0),
+ /* 271 S> */ B(LdaConstant), U8(23),
+ B(Star), R(0),
+ /* 281 S> */ B(LdaConstant), U8(24),
+ B(Star), R(0),
+ /* 291 S> */ B(LdaConstant), U8(25),
+ B(Star), R(0),
+ /* 301 S> */ B(LdaConstant), U8(26),
+ B(Star), R(0),
+ /* 311 S> */ B(LdaConstant), U8(27),
+ B(Star), R(0),
+ /* 321 S> */ B(LdaConstant), U8(28),
+ B(Star), R(0),
+ /* 331 S> */ B(LdaConstant), U8(29),
+ B(Star), R(0),
+ /* 341 S> */ B(LdaConstant), U8(30),
+ B(Star), R(0),
+ /* 351 S> */ B(LdaConstant), U8(31),
+ B(Star), R(0),
+ /* 361 S> */ B(LdaConstant), U8(32),
+ B(Star), R(0),
+ /* 371 S> */ B(LdaConstant), U8(33),
+ B(Star), R(0),
+ /* 381 S> */ B(LdaConstant), U8(34),
+ B(Star), R(0),
+ /* 391 S> */ B(LdaConstant), U8(35),
+ B(Star), R(0),
+ /* 401 S> */ B(LdaConstant), U8(36),
+ B(Star), R(0),
+ /* 411 S> */ B(LdaConstant), U8(37),
+ B(Star), R(0),
+ /* 421 S> */ B(LdaConstant), U8(38),
+ B(Star), R(0),
+ /* 431 S> */ B(LdaConstant), U8(39),
+ B(Star), R(0),
+ /* 441 S> */ B(LdaConstant), U8(40),
+ B(Star), R(0),
+ /* 451 S> */ B(LdaConstant), U8(41),
+ B(Star), R(0),
+ /* 461 S> */ B(LdaConstant), U8(42),
+ B(Star), R(0),
+ /* 471 S> */ B(LdaConstant), U8(43),
+ B(Star), R(0),
+ /* 481 S> */ B(LdaConstant), U8(44),
+ B(Star), R(0),
+ /* 491 S> */ B(LdaConstant), U8(45),
+ B(Star), R(0),
+ /* 501 S> */ B(LdaConstant), U8(46),
+ B(Star), R(0),
+ /* 511 S> */ B(LdaConstant), U8(47),
+ B(Star), R(0),
+ /* 521 S> */ B(LdaConstant), U8(48),
+ B(Star), R(0),
+ /* 531 S> */ B(LdaConstant), U8(49),
+ B(Star), R(0),
+ /* 541 S> */ B(LdaConstant), U8(50),
+ B(Star), R(0),
+ /* 551 S> */ B(LdaConstant), U8(51),
+ B(Star), R(0),
+ /* 561 S> */ B(LdaConstant), U8(52),
+ B(Star), R(0),
+ /* 571 S> */ B(LdaConstant), U8(53),
+ B(Star), R(0),
+ /* 581 S> */ B(LdaConstant), U8(54),
+ B(Star), R(0),
+ /* 591 S> */ B(LdaConstant), U8(55),
+ B(Star), R(0),
+ /* 601 S> */ B(LdaConstant), U8(56),
+ B(Star), R(0),
+ /* 611 S> */ B(LdaConstant), U8(57),
+ B(Star), R(0),
+ /* 621 S> */ B(LdaConstant), U8(58),
+ B(Star), R(0),
+ /* 631 S> */ B(LdaConstant), U8(59),
+ B(Star), R(0),
+ /* 641 S> */ B(LdaConstant), U8(60),
+ B(Star), R(0),
+ /* 651 S> */ B(LdaConstant), U8(61),
+ B(Star), R(0),
+ /* 661 S> */ B(LdaConstant), U8(62),
+ B(Star), R(0),
+ /* 671 S> */ B(LdaConstant), U8(63),
+ B(Star), R(0),
+ /* 681 S> */ B(LdaConstant), U8(64),
+ B(Star), R(0),
+ /* 691 S> */ B(LdaConstant), U8(65),
+ B(Star), R(0),
+ /* 701 S> */ B(LdaConstant), U8(66),
+ B(Star), R(0),
+ /* 711 S> */ B(LdaConstant), U8(67),
+ B(Star), R(0),
+ /* 721 S> */ B(LdaConstant), U8(68),
+ B(Star), R(0),
+ /* 731 S> */ B(LdaConstant), U8(69),
+ B(Star), R(0),
+ /* 741 S> */ B(LdaConstant), U8(70),
+ B(Star), R(0),
+ /* 751 S> */ B(LdaConstant), U8(71),
+ B(Star), R(0),
+ /* 761 S> */ B(LdaConstant), U8(72),
+ B(Star), R(0),
+ /* 771 S> */ B(LdaConstant), U8(73),
+ B(Star), R(0),
+ /* 781 S> */ B(LdaConstant), U8(74),
+ B(Star), R(0),
+ /* 791 S> */ B(LdaConstant), U8(75),
+ B(Star), R(0),
+ /* 801 S> */ B(LdaConstant), U8(76),
+ B(Star), R(0),
+ /* 811 S> */ B(LdaConstant), U8(77),
+ B(Star), R(0),
+ /* 821 S> */ B(LdaConstant), U8(78),
+ B(Star), R(0),
+ /* 831 S> */ B(LdaConstant), U8(79),
+ B(Star), R(0),
+ /* 841 S> */ B(LdaConstant), U8(80),
+ B(Star), R(0),
+ /* 851 S> */ B(LdaConstant), U8(81),
+ B(Star), R(0),
+ /* 861 S> */ B(LdaConstant), U8(82),
+ B(Star), R(0),
+ /* 871 S> */ B(LdaConstant), U8(83),
+ B(Star), R(0),
+ /* 881 S> */ B(LdaConstant), U8(84),
+ B(Star), R(0),
+ /* 891 S> */ B(LdaConstant), U8(85),
+ B(Star), R(0),
+ /* 901 S> */ B(LdaConstant), U8(86),
+ B(Star), R(0),
+ /* 911 S> */ B(LdaConstant), U8(87),
+ B(Star), R(0),
+ /* 921 S> */ B(LdaConstant), U8(88),
+ B(Star), R(0),
+ /* 931 S> */ B(LdaConstant), U8(89),
+ B(Star), R(0),
+ /* 941 S> */ B(LdaConstant), U8(90),
+ B(Star), R(0),
+ /* 951 S> */ B(LdaConstant), U8(91),
+ B(Star), R(0),
+ /* 961 S> */ B(LdaConstant), U8(92),
+ B(Star), R(0),
+ /* 971 S> */ B(LdaConstant), U8(93),
+ B(Star), R(0),
+ /* 981 S> */ B(LdaConstant), U8(94),
+ B(Star), R(0),
+ /* 991 S> */ B(LdaConstant), U8(95),
+ B(Star), R(0),
+ /* 1001 S> */ B(LdaConstant), U8(96),
+ B(Star), R(0),
+ /* 1011 S> */ B(LdaConstant), U8(97),
+ B(Star), R(0),
+ /* 1021 S> */ B(LdaConstant), U8(98),
+ B(Star), R(0),
+ /* 1031 S> */ B(LdaConstant), U8(99),
+ B(Star), R(0),
+ /* 1041 S> */ B(LdaConstant), U8(100),
+ B(Star), R(0),
+ /* 1051 S> */ B(LdaConstant), U8(101),
+ B(Star), R(0),
+ /* 1061 S> */ B(LdaConstant), U8(102),
+ B(Star), R(0),
+ /* 1071 S> */ B(LdaConstant), U8(103),
+ B(Star), R(0),
+ /* 1081 S> */ B(LdaConstant), U8(104),
+ B(Star), R(0),
+ /* 1091 S> */ B(LdaConstant), U8(105),
+ B(Star), R(0),
+ /* 1101 S> */ B(LdaConstant), U8(106),
+ B(Star), R(0),
+ /* 1111 S> */ B(LdaConstant), U8(107),
+ B(Star), R(0),
+ /* 1121 S> */ B(LdaConstant), U8(108),
+ B(Star), R(0),
+ /* 1131 S> */ B(LdaConstant), U8(109),
+ B(Star), R(0),
+ /* 1141 S> */ B(LdaConstant), U8(110),
+ B(Star), R(0),
+ /* 1151 S> */ B(LdaConstant), U8(111),
+ B(Star), R(0),
+ /* 1161 S> */ B(LdaConstant), U8(112),
+ B(Star), R(0),
+ /* 1171 S> */ B(LdaConstant), U8(113),
+ B(Star), R(0),
+ /* 1181 S> */ B(LdaConstant), U8(114),
+ B(Star), R(0),
+ /* 1191 S> */ B(LdaConstant), U8(115),
+ B(Star), R(0),
+ /* 1201 S> */ B(LdaConstant), U8(116),
+ B(Star), R(0),
+ /* 1211 S> */ B(LdaConstant), U8(117),
+ B(Star), R(0),
+ /* 1221 S> */ B(LdaConstant), U8(118),
+ B(Star), R(0),
+ /* 1231 S> */ B(LdaConstant), U8(119),
+ B(Star), R(0),
+ /* 1241 S> */ B(LdaConstant), U8(120),
+ B(Star), R(0),
+ /* 1251 S> */ B(LdaConstant), U8(121),
+ B(Star), R(0),
+ /* 1261 S> */ B(LdaConstant), U8(122),
+ B(Star), R(0),
+ /* 1271 S> */ B(LdaConstant), U8(123),
+ B(Star), R(0),
+ /* 1281 S> */ B(LdaConstant), U8(124),
+ B(Star), R(0),
+ /* 1291 S> */ B(LdaConstant), U8(125),
+ B(Star), R(0),
+ /* 1301 S> */ B(LdaConstant), U8(126),
+ B(Star), R(0),
+ /* 1311 S> */ B(LdaConstant), U8(127),
+ B(Star), R(0),
+ /* 1321 S> */ B(LdaConstant), U8(128),
+ B(Star), R(0),
+ /* 1331 S> */ B(LdaConstant), U8(129),
+ B(Star), R(0),
+ /* 1341 S> */ B(LdaConstant), U8(130),
+ B(Star), R(0),
+ /* 1351 S> */ B(LdaConstant), U8(131),
+ B(Star), R(0),
+ /* 1361 S> */ B(LdaConstant), U8(132),
+ B(Star), R(0),
+ /* 1371 S> */ B(LdaConstant), U8(133),
+ B(Star), R(0),
+ /* 1381 S> */ B(LdaConstant), U8(134),
+ B(Star), R(0),
+ /* 1391 S> */ B(LdaConstant), U8(135),
+ B(Star), R(0),
+ /* 1401 S> */ B(LdaConstant), U8(136),
+ B(Star), R(0),
+ /* 1411 S> */ B(LdaConstant), U8(137),
+ B(Star), R(0),
+ /* 1421 S> */ B(LdaConstant), U8(138),
+ B(Star), R(0),
+ /* 1431 S> */ B(LdaConstant), U8(139),
+ B(Star), R(0),
+ /* 1441 S> */ B(LdaConstant), U8(140),
+ B(Star), R(0),
+ /* 1451 S> */ B(LdaConstant), U8(141),
+ B(Star), R(0),
+ /* 1461 S> */ B(LdaConstant), U8(142),
+ B(Star), R(0),
+ /* 1471 S> */ B(LdaConstant), U8(143),
+ B(Star), R(0),
+ /* 1481 S> */ B(LdaConstant), U8(144),
+ B(Star), R(0),
+ /* 1491 S> */ B(LdaConstant), U8(145),
+ B(Star), R(0),
+ /* 1501 S> */ B(LdaConstant), U8(146),
+ B(Star), R(0),
+ /* 1511 S> */ B(LdaConstant), U8(147),
+ B(Star), R(0),
+ /* 1521 S> */ B(LdaConstant), U8(148),
+ B(Star), R(0),
+ /* 1531 S> */ B(LdaConstant), U8(149),
+ B(Star), R(0),
+ /* 1541 S> */ B(LdaConstant), U8(150),
+ B(Star), R(0),
+ /* 1551 S> */ B(LdaConstant), U8(151),
+ B(Star), R(0),
+ /* 1561 S> */ B(LdaConstant), U8(152),
+ B(Star), R(0),
+ /* 1571 S> */ B(LdaConstant), U8(153),
+ B(Star), R(0),
+ /* 1581 S> */ B(LdaConstant), U8(154),
+ B(Star), R(0),
+ /* 1591 S> */ B(LdaConstant), U8(155),
+ B(Star), R(0),
+ /* 1601 S> */ B(LdaConstant), U8(156),
+ B(Star), R(0),
+ /* 1611 S> */ B(LdaConstant), U8(157),
+ B(Star), R(0),
+ /* 1621 S> */ B(LdaConstant), U8(158),
+ B(Star), R(0),
+ /* 1631 S> */ B(LdaConstant), U8(159),
+ B(Star), R(0),
+ /* 1641 S> */ B(LdaConstant), U8(160),
+ B(Star), R(0),
+ /* 1651 S> */ B(LdaConstant), U8(161),
+ B(Star), R(0),
+ /* 1661 S> */ B(LdaConstant), U8(162),
+ B(Star), R(0),
+ /* 1671 S> */ B(LdaConstant), U8(163),
+ B(Star), R(0),
+ /* 1681 S> */ B(LdaConstant), U8(164),
+ B(Star), R(0),
+ /* 1691 S> */ B(LdaConstant), U8(165),
+ B(Star), R(0),
+ /* 1701 S> */ B(LdaConstant), U8(166),
+ B(Star), R(0),
+ /* 1711 S> */ B(LdaConstant), U8(167),
+ B(Star), R(0),
+ /* 1721 S> */ B(LdaConstant), U8(168),
+ B(Star), R(0),
+ /* 1731 S> */ B(LdaConstant), U8(169),
+ B(Star), R(0),
+ /* 1741 S> */ B(LdaConstant), U8(170),
+ B(Star), R(0),
+ /* 1751 S> */ B(LdaConstant), U8(171),
+ B(Star), R(0),
+ /* 1761 S> */ B(LdaConstant), U8(172),
+ B(Star), R(0),
+ /* 1771 S> */ B(LdaConstant), U8(173),
+ B(Star), R(0),
+ /* 1781 S> */ B(LdaConstant), U8(174),
+ B(Star), R(0),
+ /* 1791 S> */ B(LdaConstant), U8(175),
+ B(Star), R(0),
+ /* 1801 S> */ B(LdaConstant), U8(176),
+ B(Star), R(0),
+ /* 1811 S> */ B(LdaConstant), U8(177),
+ B(Star), R(0),
+ /* 1821 S> */ B(LdaConstant), U8(178),
+ B(Star), R(0),
+ /* 1831 S> */ B(LdaConstant), U8(179),
+ B(Star), R(0),
+ /* 1841 S> */ B(LdaConstant), U8(180),
+ B(Star), R(0),
+ /* 1851 S> */ B(LdaConstant), U8(181),
+ B(Star), R(0),
+ /* 1861 S> */ B(LdaConstant), U8(182),
+ B(Star), R(0),
+ /* 1871 S> */ B(LdaConstant), U8(183),
+ B(Star), R(0),
+ /* 1881 S> */ B(LdaConstant), U8(184),
+ B(Star), R(0),
+ /* 1891 S> */ B(LdaConstant), U8(185),
+ B(Star), R(0),
+ /* 1901 S> */ B(LdaConstant), U8(186),
+ B(Star), R(0),
+ /* 1911 S> */ B(LdaConstant), U8(187),
+ B(Star), R(0),
+ /* 1921 S> */ B(LdaConstant), U8(188),
+ B(Star), R(0),
+ /* 1931 S> */ B(LdaConstant), U8(189),
+ B(Star), R(0),
+ /* 1941 S> */ B(LdaConstant), U8(190),
+ B(Star), R(0),
+ /* 1951 S> */ B(LdaConstant), U8(191),
+ B(Star), R(0),
+ /* 1961 S> */ B(LdaConstant), U8(192),
+ B(Star), R(0),
+ /* 1971 S> */ B(LdaConstant), U8(193),
+ B(Star), R(0),
+ /* 1981 S> */ B(LdaConstant), U8(194),
+ B(Star), R(0),
+ /* 1991 S> */ B(LdaConstant), U8(195),
+ B(Star), R(0),
+ /* 2001 S> */ B(LdaConstant), U8(196),
+ B(Star), R(0),
+ /* 2011 S> */ B(LdaConstant), U8(197),
+ B(Star), R(0),
+ /* 2021 S> */ B(LdaConstant), U8(198),
+ B(Star), R(0),
+ /* 2031 S> */ B(LdaConstant), U8(199),
+ B(Star), R(0),
+ /* 2041 S> */ B(LdaConstant), U8(200),
+ B(Star), R(0),
+ /* 2051 S> */ B(LdaConstant), U8(201),
+ B(Star), R(0),
+ /* 2061 S> */ B(LdaConstant), U8(202),
+ B(Star), R(0),
+ /* 2071 S> */ B(LdaConstant), U8(203),
+ B(Star), R(0),
+ /* 2081 S> */ B(LdaConstant), U8(204),
+ B(Star), R(0),
+ /* 2091 S> */ B(LdaConstant), U8(205),
+ B(Star), R(0),
+ /* 2101 S> */ B(LdaConstant), U8(206),
+ B(Star), R(0),
+ /* 2111 S> */ B(LdaConstant), U8(207),
+ B(Star), R(0),
+ /* 2121 S> */ B(LdaConstant), U8(208),
+ B(Star), R(0),
+ /* 2131 S> */ B(LdaConstant), U8(209),
+ B(Star), R(0),
+ /* 2141 S> */ B(LdaConstant), U8(210),
+ B(Star), R(0),
+ /* 2151 S> */ B(LdaConstant), U8(211),
+ B(Star), R(0),
+ /* 2161 S> */ B(LdaConstant), U8(212),
+ B(Star), R(0),
+ /* 2171 S> */ B(LdaConstant), U8(213),
+ B(Star), R(0),
+ /* 2181 S> */ B(LdaConstant), U8(214),
+ B(Star), R(0),
+ /* 2191 S> */ B(LdaConstant), U8(215),
+ B(Star), R(0),
+ /* 2201 S> */ B(LdaConstant), U8(216),
+ B(Star), R(0),
+ /* 2211 S> */ B(LdaConstant), U8(217),
+ B(Star), R(0),
+ /* 2221 S> */ B(LdaConstant), U8(218),
+ B(Star), R(0),
+ /* 2231 S> */ B(LdaConstant), U8(219),
+ B(Star), R(0),
+ /* 2241 S> */ B(LdaConstant), U8(220),
+ B(Star), R(0),
+ /* 2251 S> */ B(LdaConstant), U8(221),
+ B(Star), R(0),
+ /* 2261 S> */ B(LdaConstant), U8(222),
+ B(Star), R(0),
+ /* 2271 S> */ B(LdaConstant), U8(223),
+ B(Star), R(0),
+ /* 2281 S> */ B(LdaConstant), U8(224),
+ B(Star), R(0),
+ /* 2291 S> */ B(LdaConstant), U8(225),
+ B(Star), R(0),
+ /* 2301 S> */ B(LdaConstant), U8(226),
+ B(Star), R(0),
+ /* 2311 S> */ B(LdaConstant), U8(227),
+ B(Star), R(0),
+ /* 2321 S> */ B(LdaConstant), U8(228),
+ B(Star), R(0),
+ /* 2331 S> */ B(LdaConstant), U8(229),
+ B(Star), R(0),
+ /* 2341 S> */ B(LdaConstant), U8(230),
+ B(Star), R(0),
+ /* 2351 S> */ B(LdaConstant), U8(231),
+ B(Star), R(0),
+ /* 2361 S> */ B(LdaConstant), U8(232),
+ B(Star), R(0),
+ /* 2371 S> */ B(LdaConstant), U8(233),
+ B(Star), R(0),
+ /* 2381 S> */ B(LdaConstant), U8(234),
+ B(Star), R(0),
+ /* 2391 S> */ B(LdaConstant), U8(235),
+ B(Star), R(0),
+ /* 2401 S> */ B(LdaConstant), U8(236),
+ B(Star), R(0),
+ /* 2411 S> */ B(LdaConstant), U8(237),
+ B(Star), R(0),
+ /* 2421 S> */ B(LdaConstant), U8(238),
+ B(Star), R(0),
+ /* 2431 S> */ B(LdaConstant), U8(239),
+ B(Star), R(0),
+ /* 2441 S> */ B(LdaConstant), U8(240),
+ B(Star), R(0),
+ /* 2451 S> */ B(LdaConstant), U8(241),
+ B(Star), R(0),
+ /* 2461 S> */ B(LdaConstant), U8(242),
+ B(Star), R(0),
+ /* 2471 S> */ B(LdaConstant), U8(243),
+ B(Star), R(0),
+ /* 2481 S> */ B(LdaConstant), U8(244),
+ B(Star), R(0),
+ /* 2491 S> */ B(LdaConstant), U8(245),
+ B(Star), R(0),
+ /* 2501 S> */ B(LdaConstant), U8(246),
+ B(Star), R(0),
+ /* 2511 S> */ B(LdaConstant), U8(247),
+ B(Star), R(0),
+ /* 2521 S> */ B(LdaConstant), U8(248),
+ B(Star), R(0),
+ /* 2531 S> */ B(LdaConstant), U8(249),
+ B(Star), R(0),
+ /* 2541 S> */ B(LdaConstant), U8(250),
+ B(Star), R(0),
+ /* 2551 S> */ B(LdaConstant), U8(251),
+ B(Star), R(0),
+ /* 2561 S> */ B(LdaConstant), U8(252),
+ B(Star), R(0),
+ /* 2571 S> */ B(LdaConstant), U8(253),
+ B(Star), R(0),
+ /* 2581 S> */ B(LdaConstant), U8(254),
+ B(Star), R(0),
+ /* 2591 S> */ B(LdaConstant), U8(255),
+ B(Star), R(0),
+ /* 2601 S> */ B(Wide), B(CreateObjectLiteral), U16(256), U16(0), U8(1), R16(1),
+ B(Ldar), R(1),
+ /* 2638 S> */ B(Return),
]
constant pool: [
InstanceType::HEAP_NUMBER_TYPE,
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden
index 378a5b7471..e58694f982 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden
@@ -22,18 +22,14 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 21
+bytecode array length: 16
bytecodes: [
- B(StackCheck),
- B(Ldar), R(context),
- B(Star), R(0),
- B(LdaContextSlot), R(0), U8(1),
- B(Star), R(0),
- B(LdaContextSlot), R(0), U8(4),
- B(Star), R(1),
- B(LdaContextSlot), R(context), U8(4),
- B(Mul), R(1),
- B(Return),
+ /* 97 E> */ B(StackCheck),
+ /* 102 S> */ B(LdrContextSlot), R(context), U8(1), R(0),
+ B(LdrContextSlot), R(0), U8(4), R(1),
+ /* 120 E> */ B(LdaContextSlot), R(context), U8(4),
+ B(Mul), R(1), U8(1),
+ /* 130 S> */ B(Return),
]
constant pool: [
]
@@ -54,19 +50,15 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 22
+bytecode array length: 16
bytecodes: [
- B(StackCheck),
- B(LdaContextSlot), R(context), U8(4),
- B(Star), R(0),
- B(Ldar), R(context),
- B(Star), R(1),
- B(LdaContextSlot), R(1), U8(1),
- B(Star), R(1),
- B(Ldar), R(0),
- B(StaContextSlot), R(1), U8(4),
- B(LdaUndefined),
- B(Return),
+ /* 97 E> */ B(StackCheck),
+ /* 102 S> */ B(LdrContextSlot), R(context), U8(4), R(0),
+ /* 111 E> */ B(LdrContextSlot), R(context), U8(1), R(1),
+ B(Ldar), R(0),
+ B(StaContextSlot), R(1), U8(4),
+ B(LdaUndefined),
+ /* 123 S> */ B(Return),
]
constant pool: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Parameters.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Parameters.golden
index e124788baf..71b6df7687 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Parameters.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Parameters.golden
@@ -17,9 +17,9 @@ frame size: 0
parameter count: 1
bytecode array length: 4
bytecodes: [
- B(StackCheck),
- B(Ldar), R(this),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 15 S> */ B(Ldar), R(this),
+ /* 28 S> */ B(Return),
]
constant pool: [
]
@@ -35,9 +35,9 @@ frame size: 0
parameter count: 2
bytecode array length: 4
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 19 S> */ B(Ldar), R(arg0),
+ /* 32 S> */ B(Return),
]
constant pool: [
]
@@ -53,9 +53,9 @@ frame size: 0
parameter count: 2
bytecode array length: 4
bytecodes: [
- B(StackCheck),
- B(Ldar), R(this),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 19 S> */ B(Ldar), R(this),
+ /* 32 S> */ B(Return),
]
constant pool: [
]
@@ -71,9 +71,9 @@ frame size: 0
parameter count: 8
bytecode array length: 4
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg3),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 55 S> */ B(Ldar), R(arg3),
+ /* 68 S> */ B(Return),
]
constant pool: [
]
@@ -89,9 +89,9 @@ frame size: 0
parameter count: 8
bytecode array length: 4
bytecodes: [
- B(StackCheck),
- B(Ldar), R(this),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 55 S> */ B(Ldar), R(this),
+ /* 68 S> */ B(Return),
]
constant pool: [
]
@@ -107,11 +107,11 @@ frame size: 0
parameter count: 2
bytecode array length: 7
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(arg0),
- B(LdaUndefined),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 19 S> */ B(LdaSmi), U8(1),
+ B(Star), R(arg0),
+ B(LdaUndefined),
+ /* 29 S> */ B(Return),
]
constant pool: [
]
@@ -127,11 +127,11 @@ frame size: 0
parameter count: 5
bytecode array length: 7
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(arg1),
- B(LdaUndefined),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 37 S> */ B(LdaSmi), U8(1),
+ B(Star), R(arg1),
+ B(LdaUndefined),
+ /* 47 S> */ B(Return),
]
constant pool: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveExpressions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveExpressions.golden
index 08b20dd426..aadf2dec01 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveExpressions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveExpressions.golden
@@ -13,12 +13,13 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 5
+bytecode array length: 6
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 45 S> */ B(Nop),
+ /* 55 S> */ B(Return),
]
constant pool: [
]
@@ -29,17 +30,15 @@ handlers: [
snippet: "
var x = 0; return x + 3;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 11
+bytecode array length: 9
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(3),
- B(Add), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 45 S> */ B(AddSmi), U8(3), R(0), U8(1),
+ /* 59 S> */ B(Return),
]
constant pool: [
]
@@ -50,17 +49,15 @@ handlers: [
snippet: "
var x = 0; return x - 3;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 11
+bytecode array length: 9
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(3),
- B(Sub), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 45 S> */ B(SubSmi), U8(3), R(0), U8(1),
+ /* 59 S> */ B(Return),
]
constant pool: [
]
@@ -71,17 +68,16 @@ handlers: [
snippet: "
var x = 4; return x * 3;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(4),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(3),
- B(Mul), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(4),
+ B(Star), R(0),
+ /* 45 S> */ B(LdaSmi), U8(3),
+ B(Mul), R(0), U8(1),
+ /* 59 S> */ B(Return),
]
constant pool: [
]
@@ -92,17 +88,16 @@ handlers: [
snippet: "
var x = 4; return x / 3;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(4),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(3),
- B(Div), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(4),
+ B(Star), R(0),
+ /* 45 S> */ B(LdaSmi), U8(3),
+ B(Div), R(0), U8(1),
+ /* 59 S> */ B(Return),
]
constant pool: [
]
@@ -113,17 +108,16 @@ handlers: [
snippet: "
var x = 4; return x % 3;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(4),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(3),
- B(Mod), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(4),
+ B(Star), R(0),
+ /* 45 S> */ B(LdaSmi), U8(3),
+ B(Mod), R(0), U8(1),
+ /* 59 S> */ B(Return),
]
constant pool: [
]
@@ -134,17 +128,15 @@ handlers: [
snippet: "
var x = 1; return x | 2;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 10
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(BitwiseOr), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ /* 45 S> */ B(BitwiseOrSmi), U8(2), R(0), U8(1),
+ /* 59 S> */ B(Return),
]
constant pool: [
]
@@ -155,17 +147,16 @@ handlers: [
snippet: "
var x = 1; return x ^ 2;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(BitwiseXor), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ /* 45 S> */ B(LdaSmi), U8(2),
+ B(BitwiseXor), R(0), U8(1),
+ /* 59 S> */ B(Return),
]
constant pool: [
]
@@ -176,17 +167,15 @@ handlers: [
snippet: "
var x = 1; return x & 2;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 10
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(BitwiseAnd), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ /* 45 S> */ B(BitwiseAndSmi), U8(2), R(0), U8(1),
+ /* 59 S> */ B(Return),
]
constant pool: [
]
@@ -197,17 +186,15 @@ handlers: [
snippet: "
var x = 10; return x << 3;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 10
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(10),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(3),
- B(ShiftLeft), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(10),
+ B(Star), R(0),
+ /* 46 S> */ B(ShiftLeftSmi), U8(3), R(0), U8(1),
+ /* 61 S> */ B(Return),
]
constant pool: [
]
@@ -218,17 +205,15 @@ handlers: [
snippet: "
var x = 10; return x >> 3;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 10
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(10),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(3),
- B(ShiftRight), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(10),
+ B(Star), R(0),
+ /* 46 S> */ B(ShiftRightSmi), U8(3), R(0), U8(1),
+ /* 61 S> */ B(Return),
]
constant pool: [
]
@@ -239,17 +224,16 @@ handlers: [
snippet: "
var x = 10; return x >>> 3;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(10),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(3),
- B(ShiftRightLogical), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(10),
+ B(Star), R(0),
+ /* 46 S> */ B(LdaSmi), U8(3),
+ B(ShiftRightLogical), R(0), U8(1),
+ /* 62 S> */ B(Return),
]
constant pool: [
]
@@ -264,11 +248,11 @@ frame size: 1
parameter count: 1
bytecode array length: 7
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(LdaSmi), U8(3),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 45 S> */ B(LdaSmi), U8(3),
+ /* 60 S> */ B(Return),
]
constant pool: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveReturnStatements.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveReturnStatements.golden
index 0a23f2b7e1..7eaaa88d05 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveReturnStatements.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveReturnStatements.golden
@@ -14,9 +14,9 @@ frame size: 0
parameter count: 1
bytecode array length: 3
bytecodes: [
- B(StackCheck),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ B(LdaUndefined),
+ /* 34 S> */ B(Return),
]
constant pool: [
]
@@ -31,9 +31,9 @@ frame size: 0
parameter count: 1
bytecode array length: 3
bytecodes: [
- B(StackCheck),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(LdaUndefined),
+ /* 42 S> */ B(Return),
]
constant pool: [
]
@@ -48,9 +48,9 @@ frame size: 0
parameter count: 1
bytecode array length: 3
bytecodes: [
- B(StackCheck),
- B(LdaNull),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(LdaNull),
+ /* 47 S> */ B(Return),
]
constant pool: [
]
@@ -65,9 +65,9 @@ frame size: 0
parameter count: 1
bytecode array length: 3
bytecodes: [
- B(StackCheck),
- B(LdaTrue),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(LdaTrue),
+ /* 47 S> */ B(Return),
]
constant pool: [
]
@@ -82,9 +82,9 @@ frame size: 0
parameter count: 1
bytecode array length: 3
bytecodes: [
- B(StackCheck),
- B(LdaFalse),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(LdaFalse),
+ /* 48 S> */ B(Return),
]
constant pool: [
]
@@ -99,9 +99,9 @@ frame size: 0
parameter count: 1
bytecode array length: 3
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(LdaZero),
+ /* 44 S> */ B(Return),
]
constant pool: [
]
@@ -116,9 +116,9 @@ frame size: 0
parameter count: 1
bytecode array length: 4
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(LdaSmi), U8(1),
+ /* 45 S> */ B(Return),
]
constant pool: [
]
@@ -133,9 +133,9 @@ frame size: 0
parameter count: 1
bytecode array length: 4
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(-1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(LdaSmi), U8(-1),
+ /* 45 S> */ B(Return),
]
constant pool: [
]
@@ -150,9 +150,9 @@ frame size: 0
parameter count: 1
bytecode array length: 4
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(127),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(LdaSmi), U8(127),
+ /* 47 S> */ B(Return),
]
constant pool: [
]
@@ -167,9 +167,26 @@ frame size: 0
parameter count: 1
bytecode array length: 4
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(-128),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(LdaSmi), U8(-128),
+ /* 47 S> */ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ return 2.0;
+"
+frame size: 0
+parameter count: 1
+bytecode array length: 4
+bytecodes: [
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(LdaSmi), U8(2),
+ /* 46 S> */ B(Return),
]
constant pool: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden
index d6108f12d6..23501bd4b9 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden
@@ -13,17 +13,15 @@ snippet: "
function f(a) { return a.func(); }
f(new (function Obj() { this.func = function() { return; }})())
"
-frame size: 2
+frame size: 1
parameter count: 2
-bytecode array length: 17
+bytecode array length: 13
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(3),
- B(Star), R(0),
- B(Call), R(0), R(1), U8(1), U8(1),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 16 S> */ B(Nop),
+ /* 24 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(3), R(0),
+ /* 25 E> */ B(Call), R(0), R(arg0), U8(1), U8(1),
+ /* 33 S> */ B(Return),
]
constant pool: [
"func",
@@ -38,19 +36,17 @@ snippet: "
"
frame size: 4
parameter count: 4
-bytecode array length: 25
+bytecode array length: 24
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(3),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(2),
- B(Ldar), R(arg2),
- B(Star), R(3),
- B(Call), R(0), R(1), U8(3), U8(1),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 22 S> */ B(Nop),
+ /* 30 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(3), R(0),
+ B(Ldar), R(0),
+ B(Mov), R(arg0), R(1),
+ B(Mov), R(arg1), R(2),
+ B(Mov), R(arg2), R(3),
+ /* 31 E> */ B(Call), R(0), R(1), U8(3), U8(1),
+ /* 43 S> */ B(Return),
]
constant pool: [
"func",
@@ -65,22 +61,18 @@ snippet: "
"
frame size: 4
parameter count: 3
-bytecode array length: 31
+bytecode array length: 26
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(3),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(3),
- B(Ldar), R(arg1),
- B(Add), R(3),
- B(Star), R(2),
- B(Ldar), R(arg1),
- B(Star), R(3),
- B(Call), R(0), R(1), U8(3), U8(1),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 19 S> */ B(Nop),
+ /* 27 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(3), R(0),
+ B(Ldar), R(arg1),
+ /* 37 E> */ B(Add), R(arg1), U8(5),
+ B(Star), R(2),
+ B(Mov), R(arg0), R(1),
+ B(Mov), R(arg1), R(3),
+ /* 28 E> */ B(Call), R(0), R(1), U8(3), U8(1),
+ /* 44 S> */ B(Return),
]
constant pool: [
"func",
@@ -222,401 +214,271 @@ snippet: "
return a.func(); }
f(new (function Obj() { this.func = function() { return; }})())
"
-frame size: 2
+frame size: 1
parameter count: 2
-bytecode array length: 1050
+bytecode array length: 663
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(1),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(3),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(5),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(7),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(9),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(11),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(13),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(15),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(17),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(19),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(21),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(23),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(25),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(27),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(29),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(31),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(33),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(35),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(37),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(39),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(41),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(43),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(45),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(47),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(49),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(51),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(53),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(55),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(57),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(59),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(61),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(63),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(65),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(67),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(69),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(71),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(73),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(75),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(77),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(79),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(81),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(83),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(85),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(87),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(89),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(91),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(93),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(95),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(97),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(99),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(101),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(103),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(105),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(107),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(109),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(111),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(113),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(115),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(117),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(119),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(121),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(123),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(125),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(127),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(129),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(131),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(133),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(135),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(137),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(139),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(141),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(143),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(145),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(147),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(149),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(151),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(153),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(155),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(157),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(159),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(161),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(163),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(165),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(167),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(169),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(171),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(173),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(175),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(177),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(179),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(181),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(183),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(185),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(187),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(189),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(191),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(193),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(195),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(197),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(199),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(201),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(203),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(205),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(207),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(209),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(211),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(213),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(215),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(217),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(219),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(221),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(223),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(225),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(227),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(229),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(231),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(233),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(235),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(237),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(239),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(241),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(243),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(245),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(247),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(249),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(251),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(253),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(255),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Wide), B(LoadIC), R16(1), U16(0), U16(259),
- B(Star), R(0),
- B(Wide), B(Call), R16(0), R16(1), U16(1), U16(257),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 17 S> */ B(Nop),
+ /* 18 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(1),
+ /* 26 S> */ B(Nop),
+ /* 27 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(3),
+ /* 35 S> */ B(Nop),
+ /* 36 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(5),
+ /* 44 S> */ B(Nop),
+ /* 45 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(7),
+ /* 53 S> */ B(Nop),
+ /* 54 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(9),
+ /* 62 S> */ B(Nop),
+ /* 63 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(11),
+ /* 71 S> */ B(Nop),
+ /* 72 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(13),
+ /* 80 S> */ B(Nop),
+ /* 81 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(15),
+ /* 89 S> */ B(Nop),
+ /* 90 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(17),
+ /* 98 S> */ B(Nop),
+ /* 99 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(19),
+ /* 107 S> */ B(Nop),
+ /* 108 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(21),
+ /* 116 S> */ B(Nop),
+ /* 117 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(23),
+ /* 125 S> */ B(Nop),
+ /* 126 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(25),
+ /* 134 S> */ B(Nop),
+ /* 135 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(27),
+ /* 143 S> */ B(Nop),
+ /* 144 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(29),
+ /* 152 S> */ B(Nop),
+ /* 153 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(31),
+ /* 161 S> */ B(Nop),
+ /* 162 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(33),
+ /* 170 S> */ B(Nop),
+ /* 171 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(35),
+ /* 179 S> */ B(Nop),
+ /* 180 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(37),
+ /* 188 S> */ B(Nop),
+ /* 189 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(39),
+ /* 197 S> */ B(Nop),
+ /* 198 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(41),
+ /* 206 S> */ B(Nop),
+ /* 207 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(43),
+ /* 215 S> */ B(Nop),
+ /* 216 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(45),
+ /* 224 S> */ B(Nop),
+ /* 225 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(47),
+ /* 233 S> */ B(Nop),
+ /* 234 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(49),
+ /* 242 S> */ B(Nop),
+ /* 243 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(51),
+ /* 251 S> */ B(Nop),
+ /* 252 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(53),
+ /* 260 S> */ B(Nop),
+ /* 261 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(55),
+ /* 269 S> */ B(Nop),
+ /* 270 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(57),
+ /* 278 S> */ B(Nop),
+ /* 279 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(59),
+ /* 287 S> */ B(Nop),
+ /* 288 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(61),
+ /* 296 S> */ B(Nop),
+ /* 297 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(63),
+ /* 305 S> */ B(Nop),
+ /* 306 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(65),
+ /* 314 S> */ B(Nop),
+ /* 315 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(67),
+ /* 323 S> */ B(Nop),
+ /* 324 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(69),
+ /* 332 S> */ B(Nop),
+ /* 333 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(71),
+ /* 341 S> */ B(Nop),
+ /* 342 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(73),
+ /* 350 S> */ B(Nop),
+ /* 351 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(75),
+ /* 359 S> */ B(Nop),
+ /* 360 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(77),
+ /* 368 S> */ B(Nop),
+ /* 369 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(79),
+ /* 377 S> */ B(Nop),
+ /* 378 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(81),
+ /* 386 S> */ B(Nop),
+ /* 387 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(83),
+ /* 395 S> */ B(Nop),
+ /* 396 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(85),
+ /* 404 S> */ B(Nop),
+ /* 405 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(87),
+ /* 413 S> */ B(Nop),
+ /* 414 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(89),
+ /* 422 S> */ B(Nop),
+ /* 423 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(91),
+ /* 431 S> */ B(Nop),
+ /* 432 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(93),
+ /* 440 S> */ B(Nop),
+ /* 441 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(95),
+ /* 449 S> */ B(Nop),
+ /* 450 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(97),
+ /* 458 S> */ B(Nop),
+ /* 459 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(99),
+ /* 467 S> */ B(Nop),
+ /* 468 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(101),
+ /* 476 S> */ B(Nop),
+ /* 477 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(103),
+ /* 485 S> */ B(Nop),
+ /* 486 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(105),
+ /* 494 S> */ B(Nop),
+ /* 495 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(107),
+ /* 503 S> */ B(Nop),
+ /* 504 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(109),
+ /* 512 S> */ B(Nop),
+ /* 513 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(111),
+ /* 521 S> */ B(Nop),
+ /* 522 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(113),
+ /* 530 S> */ B(Nop),
+ /* 531 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(115),
+ /* 539 S> */ B(Nop),
+ /* 540 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(117),
+ /* 548 S> */ B(Nop),
+ /* 549 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(119),
+ /* 557 S> */ B(Nop),
+ /* 558 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(121),
+ /* 566 S> */ B(Nop),
+ /* 567 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(123),
+ /* 575 S> */ B(Nop),
+ /* 576 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(125),
+ /* 584 S> */ B(Nop),
+ /* 585 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(127),
+ /* 593 S> */ B(Nop),
+ /* 594 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(129),
+ /* 602 S> */ B(Nop),
+ /* 603 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(131),
+ /* 611 S> */ B(Nop),
+ /* 612 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(133),
+ /* 620 S> */ B(Nop),
+ /* 621 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(135),
+ /* 629 S> */ B(Nop),
+ /* 630 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(137),
+ /* 638 S> */ B(Nop),
+ /* 639 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(139),
+ /* 647 S> */ B(Nop),
+ /* 648 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(141),
+ /* 656 S> */ B(Nop),
+ /* 657 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(143),
+ /* 665 S> */ B(Nop),
+ /* 666 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(145),
+ /* 674 S> */ B(Nop),
+ /* 675 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(147),
+ /* 683 S> */ B(Nop),
+ /* 684 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(149),
+ /* 692 S> */ B(Nop),
+ /* 693 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(151),
+ /* 701 S> */ B(Nop),
+ /* 702 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(153),
+ /* 710 S> */ B(Nop),
+ /* 711 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(155),
+ /* 719 S> */ B(Nop),
+ /* 720 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(157),
+ /* 728 S> */ B(Nop),
+ /* 729 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(159),
+ /* 737 S> */ B(Nop),
+ /* 738 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(161),
+ /* 746 S> */ B(Nop),
+ /* 747 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(163),
+ /* 755 S> */ B(Nop),
+ /* 756 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(165),
+ /* 764 S> */ B(Nop),
+ /* 765 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(167),
+ /* 773 S> */ B(Nop),
+ /* 774 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(169),
+ /* 782 S> */ B(Nop),
+ /* 783 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(171),
+ /* 791 S> */ B(Nop),
+ /* 792 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(173),
+ /* 800 S> */ B(Nop),
+ /* 801 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(175),
+ /* 809 S> */ B(Nop),
+ /* 810 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(177),
+ /* 818 S> */ B(Nop),
+ /* 819 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(179),
+ /* 827 S> */ B(Nop),
+ /* 828 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(181),
+ /* 836 S> */ B(Nop),
+ /* 837 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(183),
+ /* 845 S> */ B(Nop),
+ /* 846 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(185),
+ /* 854 S> */ B(Nop),
+ /* 855 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(187),
+ /* 863 S> */ B(Nop),
+ /* 864 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(189),
+ /* 872 S> */ B(Nop),
+ /* 873 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(191),
+ /* 881 S> */ B(Nop),
+ /* 882 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(193),
+ /* 890 S> */ B(Nop),
+ /* 891 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(195),
+ /* 899 S> */ B(Nop),
+ /* 900 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(197),
+ /* 908 S> */ B(Nop),
+ /* 909 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(199),
+ /* 917 S> */ B(Nop),
+ /* 918 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(201),
+ /* 926 S> */ B(Nop),
+ /* 927 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(203),
+ /* 935 S> */ B(Nop),
+ /* 936 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(205),
+ /* 944 S> */ B(Nop),
+ /* 945 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(207),
+ /* 953 S> */ B(Nop),
+ /* 954 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(209),
+ /* 962 S> */ B(Nop),
+ /* 963 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(211),
+ /* 971 S> */ B(Nop),
+ /* 972 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(213),
+ /* 980 S> */ B(Nop),
+ /* 981 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(215),
+ /* 989 S> */ B(Nop),
+ /* 990 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(217),
+ /* 998 S> */ B(Nop),
+ /* 999 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(219),
+ /* 1007 S> */ B(Nop),
+ /* 1008 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(221),
+ /* 1016 S> */ B(Nop),
+ /* 1017 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(223),
+ /* 1025 S> */ B(Nop),
+ /* 1026 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(225),
+ /* 1034 S> */ B(Nop),
+ /* 1035 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(227),
+ /* 1043 S> */ B(Nop),
+ /* 1044 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(229),
+ /* 1052 S> */ B(Nop),
+ /* 1053 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(231),
+ /* 1061 S> */ B(Nop),
+ /* 1062 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(233),
+ /* 1070 S> */ B(Nop),
+ /* 1071 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(235),
+ /* 1079 S> */ B(Nop),
+ /* 1080 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(237),
+ /* 1088 S> */ B(Nop),
+ /* 1089 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(239),
+ /* 1097 S> */ B(Nop),
+ /* 1098 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(241),
+ /* 1106 S> */ B(Nop),
+ /* 1107 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(243),
+ /* 1115 S> */ B(Nop),
+ /* 1116 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(245),
+ /* 1124 S> */ B(Nop),
+ /* 1125 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(247),
+ /* 1133 S> */ B(Nop),
+ /* 1134 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(249),
+ /* 1142 S> */ B(Nop),
+ /* 1143 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(251),
+ /* 1151 S> */ B(Nop),
+ /* 1152 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(253),
+ /* 1160 S> */ B(Nop),
+ /* 1161 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(255),
+ /* 1169 S> */ B(Nop),
+ /* 1177 E> */ B(Wide), B(LdrNamedProperty), R16(arg0), U16(0), U16(259), R16(0),
+ /* 1178 E> */ B(Wide), B(Call), R16(0), R16(arg0), U16(1), U16(257),
+ /* 1186 S> */ B(Return),
]
constant pool: [
"func",
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden
index 7d5a58e623..cee0357ab8 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden
@@ -13,15 +13,14 @@ snippet: "
function f(a) { return a.name; }
f({name : \"test\"});
"
-frame size: 1
+frame size: 0
parameter count: 2
-bytecode array length: 10
+bytecode array length: 7
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(1),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 16 S> */ B(Nop),
+ /* 24 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(1),
+ /* 31 S> */ B(Return),
]
constant pool: [
"name",
@@ -34,15 +33,14 @@ snippet: "
function f(a) { return a[\"key\"]; }
f({key : \"test\"});
"
-frame size: 1
+frame size: 0
parameter count: 2
-bytecode array length: 10
+bytecode array length: 7
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(1),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 16 S> */ B(Nop),
+ /* 24 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(1),
+ /* 33 S> */ B(Return),
]
constant pool: [
"key",
@@ -55,16 +53,14 @@ snippet: "
function f(a) { return a[100]; }
f({100 : \"test\"});
"
-frame size: 1
+frame size: 0
parameter count: 2
-bytecode array length: 11
+bytecode array length: 7
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(100),
- B(KeyedLoadIC), R(0), U8(1),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 16 S> */ B(LdaSmi), U8(100),
+ /* 24 E> */ B(LdaKeyedProperty), R(arg0), U8(1),
+ /* 31 S> */ B(Return),
]
constant pool: [
]
@@ -76,16 +72,14 @@ snippet: "
function f(a, b) { return a[b]; }
f({arg : \"test\"}, \"arg\");
"
-frame size: 1
+frame size: 0
parameter count: 3
-bytecode array length: 11
+bytecode array length: 7
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(0), U8(1),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 19 S> */ B(Ldar), R(arg1),
+ /* 28 E> */ B(LdaKeyedProperty), R(arg0), U8(1),
+ /* 32 S> */ B(Return),
]
constant pool: [
]
@@ -97,20 +91,17 @@ snippet: "
function f(a) { var b = a.name; return a[-124]; }
f({\"-124\" : \"test\", name : 123 })
"
-frame size: 2
+frame size: 1
parameter count: 2
-bytecode array length: 21
+bytecode array length: 15
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(1),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LdaSmi), U8(-124),
- B(KeyedLoadIC), R(1), U8(3),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 25 S> */ B(Nop),
+ /* 25 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(1), R(0),
+ B(Ldar), R(0),
+ /* 32 S> */ B(LdaSmi), U8(-124),
+ /* 40 E> */ B(LdaKeyedProperty), R(arg0), U8(3),
+ /* 48 S> */ B(Return),
]
constant pool: [
"name",
@@ -254,527 +245,398 @@ snippet: "
}
f({name : \"test\"})
"
-frame size: 2
+frame size: 1
parameter count: 2
-bytecode array length: 1294
+bytecode array length: 1035
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(1),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(3),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(5),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(7),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(9),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(11),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(13),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(15),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(17),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(19),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(21),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(23),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(25),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(27),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(29),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(31),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(33),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(35),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(37),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(39),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(41),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(43),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(45),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(47),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(49),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(51),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(53),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(55),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(57),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(59),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(61),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(63),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(65),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(67),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(69),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(71),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(73),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(75),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(77),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(79),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(81),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(83),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(85),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(87),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(89),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(91),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(93),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(95),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(97),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(99),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(101),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(103),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(105),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(107),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(109),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(111),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(113),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(115),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(117),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(119),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(121),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(123),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(125),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(127),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(129),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(131),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(133),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(135),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(137),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(139),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(141),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(143),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(145),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(147),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(149),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(151),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(153),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(155),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(157),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(159),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(161),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(163),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(165),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(167),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(169),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(171),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(173),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(175),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(177),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(179),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(181),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(183),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(185),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(187),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(189),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(191),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(193),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(195),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(197),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(199),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(201),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(203),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(205),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(207),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(209),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(211),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(213),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(215),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(217),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(219),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(221),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(223),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(225),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(227),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(229),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(231),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(233),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(235),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(237),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(239),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(241),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(243),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(245),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(247),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(249),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(251),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(253),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(0), U8(255),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Wide), B(LoadIC), R16(1), U16(0), U16(257),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 27 S> */ B(Nop),
+ /* 32 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(1), R(0),
+ B(Ldar), R(0),
+ /* 41 S> */ B(Nop),
+ /* 46 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(3), R(0),
+ B(Ldar), R(0),
+ /* 55 S> */ B(Nop),
+ /* 60 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(5), R(0),
+ B(Ldar), R(0),
+ /* 69 S> */ B(Nop),
+ /* 74 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(7), R(0),
+ B(Ldar), R(0),
+ /* 83 S> */ B(Nop),
+ /* 88 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(9), R(0),
+ B(Ldar), R(0),
+ /* 97 S> */ B(Nop),
+ /* 102 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(11), R(0),
+ B(Ldar), R(0),
+ /* 111 S> */ B(Nop),
+ /* 116 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(13), R(0),
+ B(Ldar), R(0),
+ /* 125 S> */ B(Nop),
+ /* 130 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(15), R(0),
+ B(Ldar), R(0),
+ /* 139 S> */ B(Nop),
+ /* 144 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(17), R(0),
+ B(Ldar), R(0),
+ /* 153 S> */ B(Nop),
+ /* 158 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(19), R(0),
+ B(Ldar), R(0),
+ /* 167 S> */ B(Nop),
+ /* 172 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(21), R(0),
+ B(Ldar), R(0),
+ /* 181 S> */ B(Nop),
+ /* 186 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(23), R(0),
+ B(Ldar), R(0),
+ /* 195 S> */ B(Nop),
+ /* 200 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(25), R(0),
+ B(Ldar), R(0),
+ /* 209 S> */ B(Nop),
+ /* 214 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(27), R(0),
+ B(Ldar), R(0),
+ /* 223 S> */ B(Nop),
+ /* 228 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(29), R(0),
+ B(Ldar), R(0),
+ /* 237 S> */ B(Nop),
+ /* 242 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(31), R(0),
+ B(Ldar), R(0),
+ /* 251 S> */ B(Nop),
+ /* 256 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(33), R(0),
+ B(Ldar), R(0),
+ /* 265 S> */ B(Nop),
+ /* 270 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(35), R(0),
+ B(Ldar), R(0),
+ /* 279 S> */ B(Nop),
+ /* 284 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(37), R(0),
+ B(Ldar), R(0),
+ /* 293 S> */ B(Nop),
+ /* 298 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(39), R(0),
+ B(Ldar), R(0),
+ /* 307 S> */ B(Nop),
+ /* 312 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(41), R(0),
+ B(Ldar), R(0),
+ /* 321 S> */ B(Nop),
+ /* 326 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(43), R(0),
+ B(Ldar), R(0),
+ /* 335 S> */ B(Nop),
+ /* 340 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(45), R(0),
+ B(Ldar), R(0),
+ /* 349 S> */ B(Nop),
+ /* 354 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(47), R(0),
+ B(Ldar), R(0),
+ /* 363 S> */ B(Nop),
+ /* 368 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(49), R(0),
+ B(Ldar), R(0),
+ /* 377 S> */ B(Nop),
+ /* 382 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(51), R(0),
+ B(Ldar), R(0),
+ /* 391 S> */ B(Nop),
+ /* 396 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(53), R(0),
+ B(Ldar), R(0),
+ /* 405 S> */ B(Nop),
+ /* 410 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(55), R(0),
+ B(Ldar), R(0),
+ /* 419 S> */ B(Nop),
+ /* 424 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(57), R(0),
+ B(Ldar), R(0),
+ /* 433 S> */ B(Nop),
+ /* 438 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(59), R(0),
+ B(Ldar), R(0),
+ /* 447 S> */ B(Nop),
+ /* 452 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(61), R(0),
+ B(Ldar), R(0),
+ /* 461 S> */ B(Nop),
+ /* 466 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(63), R(0),
+ B(Ldar), R(0),
+ /* 475 S> */ B(Nop),
+ /* 480 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(65), R(0),
+ B(Ldar), R(0),
+ /* 489 S> */ B(Nop),
+ /* 494 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(67), R(0),
+ B(Ldar), R(0),
+ /* 503 S> */ B(Nop),
+ /* 508 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(69), R(0),
+ B(Ldar), R(0),
+ /* 517 S> */ B(Nop),
+ /* 522 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(71), R(0),
+ B(Ldar), R(0),
+ /* 531 S> */ B(Nop),
+ /* 536 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(73), R(0),
+ B(Ldar), R(0),
+ /* 545 S> */ B(Nop),
+ /* 550 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(75), R(0),
+ B(Ldar), R(0),
+ /* 559 S> */ B(Nop),
+ /* 564 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(77), R(0),
+ B(Ldar), R(0),
+ /* 573 S> */ B(Nop),
+ /* 578 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(79), R(0),
+ B(Ldar), R(0),
+ /* 587 S> */ B(Nop),
+ /* 592 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(81), R(0),
+ B(Ldar), R(0),
+ /* 601 S> */ B(Nop),
+ /* 606 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(83), R(0),
+ B(Ldar), R(0),
+ /* 615 S> */ B(Nop),
+ /* 620 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(85), R(0),
+ B(Ldar), R(0),
+ /* 629 S> */ B(Nop),
+ /* 634 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(87), R(0),
+ B(Ldar), R(0),
+ /* 643 S> */ B(Nop),
+ /* 648 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(89), R(0),
+ B(Ldar), R(0),
+ /* 657 S> */ B(Nop),
+ /* 662 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(91), R(0),
+ B(Ldar), R(0),
+ /* 671 S> */ B(Nop),
+ /* 676 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(93), R(0),
+ B(Ldar), R(0),
+ /* 685 S> */ B(Nop),
+ /* 690 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(95), R(0),
+ B(Ldar), R(0),
+ /* 699 S> */ B(Nop),
+ /* 704 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(97), R(0),
+ B(Ldar), R(0),
+ /* 713 S> */ B(Nop),
+ /* 718 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(99), R(0),
+ B(Ldar), R(0),
+ /* 727 S> */ B(Nop),
+ /* 732 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(101), R(0),
+ B(Ldar), R(0),
+ /* 741 S> */ B(Nop),
+ /* 746 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(103), R(0),
+ B(Ldar), R(0),
+ /* 755 S> */ B(Nop),
+ /* 760 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(105), R(0),
+ B(Ldar), R(0),
+ /* 769 S> */ B(Nop),
+ /* 774 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(107), R(0),
+ B(Ldar), R(0),
+ /* 783 S> */ B(Nop),
+ /* 788 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(109), R(0),
+ B(Ldar), R(0),
+ /* 797 S> */ B(Nop),
+ /* 802 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(111), R(0),
+ B(Ldar), R(0),
+ /* 811 S> */ B(Nop),
+ /* 816 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(113), R(0),
+ B(Ldar), R(0),
+ /* 825 S> */ B(Nop),
+ /* 830 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(115), R(0),
+ B(Ldar), R(0),
+ /* 839 S> */ B(Nop),
+ /* 844 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(117), R(0),
+ B(Ldar), R(0),
+ /* 853 S> */ B(Nop),
+ /* 858 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(119), R(0),
+ B(Ldar), R(0),
+ /* 867 S> */ B(Nop),
+ /* 872 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(121), R(0),
+ B(Ldar), R(0),
+ /* 881 S> */ B(Nop),
+ /* 886 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(123), R(0),
+ B(Ldar), R(0),
+ /* 895 S> */ B(Nop),
+ /* 900 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(125), R(0),
+ B(Ldar), R(0),
+ /* 909 S> */ B(Nop),
+ /* 914 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(127), R(0),
+ B(Ldar), R(0),
+ /* 923 S> */ B(Nop),
+ /* 928 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(129), R(0),
+ B(Ldar), R(0),
+ /* 937 S> */ B(Nop),
+ /* 942 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(131), R(0),
+ B(Ldar), R(0),
+ /* 951 S> */ B(Nop),
+ /* 956 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(133), R(0),
+ B(Ldar), R(0),
+ /* 965 S> */ B(Nop),
+ /* 970 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(135), R(0),
+ B(Ldar), R(0),
+ /* 979 S> */ B(Nop),
+ /* 984 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(137), R(0),
+ B(Ldar), R(0),
+ /* 993 S> */ B(Nop),
+ /* 998 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(139), R(0),
+ B(Ldar), R(0),
+ /* 1007 S> */ B(Nop),
+ /* 1012 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(141), R(0),
+ B(Ldar), R(0),
+ /* 1021 S> */ B(Nop),
+ /* 1026 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(143), R(0),
+ B(Ldar), R(0),
+ /* 1035 S> */ B(Nop),
+ /* 1040 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(145), R(0),
+ B(Ldar), R(0),
+ /* 1049 S> */ B(Nop),
+ /* 1054 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(147), R(0),
+ B(Ldar), R(0),
+ /* 1063 S> */ B(Nop),
+ /* 1068 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(149), R(0),
+ B(Ldar), R(0),
+ /* 1077 S> */ B(Nop),
+ /* 1082 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(151), R(0),
+ B(Ldar), R(0),
+ /* 1091 S> */ B(Nop),
+ /* 1096 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(153), R(0),
+ B(Ldar), R(0),
+ /* 1105 S> */ B(Nop),
+ /* 1110 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(155), R(0),
+ B(Ldar), R(0),
+ /* 1119 S> */ B(Nop),
+ /* 1124 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(157), R(0),
+ B(Ldar), R(0),
+ /* 1133 S> */ B(Nop),
+ /* 1138 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(159), R(0),
+ B(Ldar), R(0),
+ /* 1147 S> */ B(Nop),
+ /* 1152 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(161), R(0),
+ B(Ldar), R(0),
+ /* 1161 S> */ B(Nop),
+ /* 1166 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(163), R(0),
+ B(Ldar), R(0),
+ /* 1175 S> */ B(Nop),
+ /* 1180 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(165), R(0),
+ B(Ldar), R(0),
+ /* 1189 S> */ B(Nop),
+ /* 1194 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(167), R(0),
+ B(Ldar), R(0),
+ /* 1203 S> */ B(Nop),
+ /* 1208 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(169), R(0),
+ B(Ldar), R(0),
+ /* 1217 S> */ B(Nop),
+ /* 1222 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(171), R(0),
+ B(Ldar), R(0),
+ /* 1231 S> */ B(Nop),
+ /* 1236 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(173), R(0),
+ B(Ldar), R(0),
+ /* 1245 S> */ B(Nop),
+ /* 1250 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(175), R(0),
+ B(Ldar), R(0),
+ /* 1259 S> */ B(Nop),
+ /* 1264 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(177), R(0),
+ B(Ldar), R(0),
+ /* 1273 S> */ B(Nop),
+ /* 1278 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(179), R(0),
+ B(Ldar), R(0),
+ /* 1287 S> */ B(Nop),
+ /* 1292 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(181), R(0),
+ B(Ldar), R(0),
+ /* 1301 S> */ B(Nop),
+ /* 1306 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(183), R(0),
+ B(Ldar), R(0),
+ /* 1315 S> */ B(Nop),
+ /* 1320 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(185), R(0),
+ B(Ldar), R(0),
+ /* 1329 S> */ B(Nop),
+ /* 1334 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(187), R(0),
+ B(Ldar), R(0),
+ /* 1343 S> */ B(Nop),
+ /* 1348 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(189), R(0),
+ B(Ldar), R(0),
+ /* 1357 S> */ B(Nop),
+ /* 1362 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(191), R(0),
+ B(Ldar), R(0),
+ /* 1371 S> */ B(Nop),
+ /* 1376 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(193), R(0),
+ B(Ldar), R(0),
+ /* 1385 S> */ B(Nop),
+ /* 1390 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(195), R(0),
+ B(Ldar), R(0),
+ /* 1399 S> */ B(Nop),
+ /* 1404 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(197), R(0),
+ B(Ldar), R(0),
+ /* 1413 S> */ B(Nop),
+ /* 1418 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(199), R(0),
+ B(Ldar), R(0),
+ /* 1427 S> */ B(Nop),
+ /* 1432 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(201), R(0),
+ B(Ldar), R(0),
+ /* 1441 S> */ B(Nop),
+ /* 1446 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(203), R(0),
+ B(Ldar), R(0),
+ /* 1455 S> */ B(Nop),
+ /* 1460 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(205), R(0),
+ B(Ldar), R(0),
+ /* 1469 S> */ B(Nop),
+ /* 1474 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(207), R(0),
+ B(Ldar), R(0),
+ /* 1483 S> */ B(Nop),
+ /* 1488 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(209), R(0),
+ B(Ldar), R(0),
+ /* 1497 S> */ B(Nop),
+ /* 1502 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(211), R(0),
+ B(Ldar), R(0),
+ /* 1511 S> */ B(Nop),
+ /* 1516 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(213), R(0),
+ B(Ldar), R(0),
+ /* 1525 S> */ B(Nop),
+ /* 1530 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(215), R(0),
+ B(Ldar), R(0),
+ /* 1539 S> */ B(Nop),
+ /* 1544 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(217), R(0),
+ B(Ldar), R(0),
+ /* 1553 S> */ B(Nop),
+ /* 1558 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(219), R(0),
+ B(Ldar), R(0),
+ /* 1567 S> */ B(Nop),
+ /* 1572 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(221), R(0),
+ B(Ldar), R(0),
+ /* 1581 S> */ B(Nop),
+ /* 1586 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(223), R(0),
+ B(Ldar), R(0),
+ /* 1595 S> */ B(Nop),
+ /* 1600 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(225), R(0),
+ B(Ldar), R(0),
+ /* 1609 S> */ B(Nop),
+ /* 1614 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(227), R(0),
+ B(Ldar), R(0),
+ /* 1623 S> */ B(Nop),
+ /* 1628 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(229), R(0),
+ B(Ldar), R(0),
+ /* 1637 S> */ B(Nop),
+ /* 1642 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(231), R(0),
+ B(Ldar), R(0),
+ /* 1651 S> */ B(Nop),
+ /* 1656 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(233), R(0),
+ B(Ldar), R(0),
+ /* 1665 S> */ B(Nop),
+ /* 1670 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(235), R(0),
+ B(Ldar), R(0),
+ /* 1679 S> */ B(Nop),
+ /* 1684 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(237), R(0),
+ B(Ldar), R(0),
+ /* 1693 S> */ B(Nop),
+ /* 1698 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(239), R(0),
+ B(Ldar), R(0),
+ /* 1707 S> */ B(Nop),
+ /* 1712 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(241), R(0),
+ B(Ldar), R(0),
+ /* 1721 S> */ B(Nop),
+ /* 1726 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(243), R(0),
+ B(Ldar), R(0),
+ /* 1735 S> */ B(Nop),
+ /* 1740 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(245), R(0),
+ B(Ldar), R(0),
+ /* 1749 S> */ B(Nop),
+ /* 1754 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(247), R(0),
+ B(Ldar), R(0),
+ /* 1763 S> */ B(Nop),
+ /* 1768 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(249), R(0),
+ B(Ldar), R(0),
+ /* 1777 S> */ B(Nop),
+ /* 1782 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(251), R(0),
+ B(Ldar), R(0),
+ /* 1791 S> */ B(Nop),
+ /* 1796 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(253), R(0),
+ B(Ldar), R(0),
+ /* 1805 S> */ B(Nop),
+ /* 1810 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(255), R(0),
+ B(Ldar), R(0),
+ /* 1819 S> */ B(Nop),
+ /* 1827 E> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(0), U16(257),
+ /* 1834 S> */ B(Return),
]
constant pool: [
"name",
@@ -918,656 +780,398 @@ snippet: "
}
f({name : \"test\"}, \"name\")
"
-frame size: 2
+frame size: 1
parameter count: 3
-bytecode array length: 1422
+bytecode array length: 1034
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(1),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(3),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(5),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(7),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(9),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(11),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(13),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(15),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(17),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(19),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(21),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(23),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(25),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(27),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(29),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(31),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(33),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(35),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(37),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(39),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(41),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(43),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(45),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(47),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(49),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(51),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(53),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(55),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(57),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(59),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(61),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(63),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(65),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(67),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(69),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(71),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(73),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(75),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(77),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(79),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(81),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(83),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(85),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(87),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(89),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(91),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(93),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(95),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(97),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(99),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(101),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(103),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(105),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(107),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(109),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(111),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(113),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(115),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(117),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(119),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(121),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(123),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(125),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(127),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(129),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(131),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(133),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(135),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(137),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(139),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(141),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(143),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(145),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(147),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(149),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(151),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(153),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(155),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(157),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(159),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(161),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(163),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(165),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(167),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(169),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(171),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(173),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(175),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(177),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(179),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(181),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(183),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(185),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(187),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(189),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(191),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(193),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(195),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(197),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(199),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(201),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(203),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(205),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(207),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(209),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(211),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(213),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(215),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(217),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(219),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(221),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(223),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(225),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(227),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(229),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(231),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(233),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(235),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(237),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(239),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(241),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(243),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(245),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(247),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(249),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(251),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(253),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(KeyedLoadIC), R(1), U8(255),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(Ldar), R(arg1),
- B(Wide), B(KeyedLoadIC), R16(1), U16(257),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 30 S> */ B(Ldar), R(arg1),
+ /* 36 E> */ B(LdrKeyedProperty), R(arg0), U8(1), R(0),
+ B(Ldar), R(0),
+ /* 42 S> */ B(Ldar), R(arg1),
+ /* 48 E> */ B(LdrKeyedProperty), R(arg0), U8(3), R(0),
+ B(Ldar), R(0),
+ /* 54 S> */ B(Ldar), R(arg1),
+ /* 60 E> */ B(LdrKeyedProperty), R(arg0), U8(5), R(0),
+ B(Ldar), R(0),
+ /* 66 S> */ B(Ldar), R(arg1),
+ /* 72 E> */ B(LdrKeyedProperty), R(arg0), U8(7), R(0),
+ B(Ldar), R(0),
+ /* 78 S> */ B(Ldar), R(arg1),
+ /* 84 E> */ B(LdrKeyedProperty), R(arg0), U8(9), R(0),
+ B(Ldar), R(0),
+ /* 90 S> */ B(Ldar), R(arg1),
+ /* 96 E> */ B(LdrKeyedProperty), R(arg0), U8(11), R(0),
+ B(Ldar), R(0),
+ /* 102 S> */ B(Ldar), R(arg1),
+ /* 108 E> */ B(LdrKeyedProperty), R(arg0), U8(13), R(0),
+ B(Ldar), R(0),
+ /* 114 S> */ B(Ldar), R(arg1),
+ /* 120 E> */ B(LdrKeyedProperty), R(arg0), U8(15), R(0),
+ B(Ldar), R(0),
+ /* 126 S> */ B(Ldar), R(arg1),
+ /* 132 E> */ B(LdrKeyedProperty), R(arg0), U8(17), R(0),
+ B(Ldar), R(0),
+ /* 138 S> */ B(Ldar), R(arg1),
+ /* 144 E> */ B(LdrKeyedProperty), R(arg0), U8(19), R(0),
+ B(Ldar), R(0),
+ /* 150 S> */ B(Ldar), R(arg1),
+ /* 156 E> */ B(LdrKeyedProperty), R(arg0), U8(21), R(0),
+ B(Ldar), R(0),
+ /* 162 S> */ B(Ldar), R(arg1),
+ /* 168 E> */ B(LdrKeyedProperty), R(arg0), U8(23), R(0),
+ B(Ldar), R(0),
+ /* 174 S> */ B(Ldar), R(arg1),
+ /* 180 E> */ B(LdrKeyedProperty), R(arg0), U8(25), R(0),
+ B(Ldar), R(0),
+ /* 186 S> */ B(Ldar), R(arg1),
+ /* 192 E> */ B(LdrKeyedProperty), R(arg0), U8(27), R(0),
+ B(Ldar), R(0),
+ /* 198 S> */ B(Ldar), R(arg1),
+ /* 204 E> */ B(LdrKeyedProperty), R(arg0), U8(29), R(0),
+ B(Ldar), R(0),
+ /* 210 S> */ B(Ldar), R(arg1),
+ /* 216 E> */ B(LdrKeyedProperty), R(arg0), U8(31), R(0),
+ B(Ldar), R(0),
+ /* 222 S> */ B(Ldar), R(arg1),
+ /* 228 E> */ B(LdrKeyedProperty), R(arg0), U8(33), R(0),
+ B(Ldar), R(0),
+ /* 234 S> */ B(Ldar), R(arg1),
+ /* 240 E> */ B(LdrKeyedProperty), R(arg0), U8(35), R(0),
+ B(Ldar), R(0),
+ /* 246 S> */ B(Ldar), R(arg1),
+ /* 252 E> */ B(LdrKeyedProperty), R(arg0), U8(37), R(0),
+ B(Ldar), R(0),
+ /* 258 S> */ B(Ldar), R(arg1),
+ /* 264 E> */ B(LdrKeyedProperty), R(arg0), U8(39), R(0),
+ B(Ldar), R(0),
+ /* 270 S> */ B(Ldar), R(arg1),
+ /* 276 E> */ B(LdrKeyedProperty), R(arg0), U8(41), R(0),
+ B(Ldar), R(0),
+ /* 282 S> */ B(Ldar), R(arg1),
+ /* 288 E> */ B(LdrKeyedProperty), R(arg0), U8(43), R(0),
+ B(Ldar), R(0),
+ /* 294 S> */ B(Ldar), R(arg1),
+ /* 300 E> */ B(LdrKeyedProperty), R(arg0), U8(45), R(0),
+ B(Ldar), R(0),
+ /* 306 S> */ B(Ldar), R(arg1),
+ /* 312 E> */ B(LdrKeyedProperty), R(arg0), U8(47), R(0),
+ B(Ldar), R(0),
+ /* 318 S> */ B(Ldar), R(arg1),
+ /* 324 E> */ B(LdrKeyedProperty), R(arg0), U8(49), R(0),
+ B(Ldar), R(0),
+ /* 330 S> */ B(Ldar), R(arg1),
+ /* 336 E> */ B(LdrKeyedProperty), R(arg0), U8(51), R(0),
+ B(Ldar), R(0),
+ /* 342 S> */ B(Ldar), R(arg1),
+ /* 348 E> */ B(LdrKeyedProperty), R(arg0), U8(53), R(0),
+ B(Ldar), R(0),
+ /* 354 S> */ B(Ldar), R(arg1),
+ /* 360 E> */ B(LdrKeyedProperty), R(arg0), U8(55), R(0),
+ B(Ldar), R(0),
+ /* 366 S> */ B(Ldar), R(arg1),
+ /* 372 E> */ B(LdrKeyedProperty), R(arg0), U8(57), R(0),
+ B(Ldar), R(0),
+ /* 378 S> */ B(Ldar), R(arg1),
+ /* 384 E> */ B(LdrKeyedProperty), R(arg0), U8(59), R(0),
+ B(Ldar), R(0),
+ /* 390 S> */ B(Ldar), R(arg1),
+ /* 396 E> */ B(LdrKeyedProperty), R(arg0), U8(61), R(0),
+ B(Ldar), R(0),
+ /* 402 S> */ B(Ldar), R(arg1),
+ /* 408 E> */ B(LdrKeyedProperty), R(arg0), U8(63), R(0),
+ B(Ldar), R(0),
+ /* 414 S> */ B(Ldar), R(arg1),
+ /* 420 E> */ B(LdrKeyedProperty), R(arg0), U8(65), R(0),
+ B(Ldar), R(0),
+ /* 426 S> */ B(Ldar), R(arg1),
+ /* 432 E> */ B(LdrKeyedProperty), R(arg0), U8(67), R(0),
+ B(Ldar), R(0),
+ /* 438 S> */ B(Ldar), R(arg1),
+ /* 444 E> */ B(LdrKeyedProperty), R(arg0), U8(69), R(0),
+ B(Ldar), R(0),
+ /* 450 S> */ B(Ldar), R(arg1),
+ /* 456 E> */ B(LdrKeyedProperty), R(arg0), U8(71), R(0),
+ B(Ldar), R(0),
+ /* 462 S> */ B(Ldar), R(arg1),
+ /* 468 E> */ B(LdrKeyedProperty), R(arg0), U8(73), R(0),
+ B(Ldar), R(0),
+ /* 474 S> */ B(Ldar), R(arg1),
+ /* 480 E> */ B(LdrKeyedProperty), R(arg0), U8(75), R(0),
+ B(Ldar), R(0),
+ /* 486 S> */ B(Ldar), R(arg1),
+ /* 492 E> */ B(LdrKeyedProperty), R(arg0), U8(77), R(0),
+ B(Ldar), R(0),
+ /* 498 S> */ B(Ldar), R(arg1),
+ /* 504 E> */ B(LdrKeyedProperty), R(arg0), U8(79), R(0),
+ B(Ldar), R(0),
+ /* 510 S> */ B(Ldar), R(arg1),
+ /* 516 E> */ B(LdrKeyedProperty), R(arg0), U8(81), R(0),
+ B(Ldar), R(0),
+ /* 522 S> */ B(Ldar), R(arg1),
+ /* 528 E> */ B(LdrKeyedProperty), R(arg0), U8(83), R(0),
+ B(Ldar), R(0),
+ /* 534 S> */ B(Ldar), R(arg1),
+ /* 540 E> */ B(LdrKeyedProperty), R(arg0), U8(85), R(0),
+ B(Ldar), R(0),
+ /* 546 S> */ B(Ldar), R(arg1),
+ /* 552 E> */ B(LdrKeyedProperty), R(arg0), U8(87), R(0),
+ B(Ldar), R(0),
+ /* 558 S> */ B(Ldar), R(arg1),
+ /* 564 E> */ B(LdrKeyedProperty), R(arg0), U8(89), R(0),
+ B(Ldar), R(0),
+ /* 570 S> */ B(Ldar), R(arg1),
+ /* 576 E> */ B(LdrKeyedProperty), R(arg0), U8(91), R(0),
+ B(Ldar), R(0),
+ /* 582 S> */ B(Ldar), R(arg1),
+ /* 588 E> */ B(LdrKeyedProperty), R(arg0), U8(93), R(0),
+ B(Ldar), R(0),
+ /* 594 S> */ B(Ldar), R(arg1),
+ /* 600 E> */ B(LdrKeyedProperty), R(arg0), U8(95), R(0),
+ B(Ldar), R(0),
+ /* 606 S> */ B(Ldar), R(arg1),
+ /* 612 E> */ B(LdrKeyedProperty), R(arg0), U8(97), R(0),
+ B(Ldar), R(0),
+ /* 618 S> */ B(Ldar), R(arg1),
+ /* 624 E> */ B(LdrKeyedProperty), R(arg0), U8(99), R(0),
+ B(Ldar), R(0),
+ /* 630 S> */ B(Ldar), R(arg1),
+ /* 636 E> */ B(LdrKeyedProperty), R(arg0), U8(101), R(0),
+ B(Ldar), R(0),
+ /* 642 S> */ B(Ldar), R(arg1),
+ /* 648 E> */ B(LdrKeyedProperty), R(arg0), U8(103), R(0),
+ B(Ldar), R(0),
+ /* 654 S> */ B(Ldar), R(arg1),
+ /* 660 E> */ B(LdrKeyedProperty), R(arg0), U8(105), R(0),
+ B(Ldar), R(0),
+ /* 666 S> */ B(Ldar), R(arg1),
+ /* 672 E> */ B(LdrKeyedProperty), R(arg0), U8(107), R(0),
+ B(Ldar), R(0),
+ /* 678 S> */ B(Ldar), R(arg1),
+ /* 684 E> */ B(LdrKeyedProperty), R(arg0), U8(109), R(0),
+ B(Ldar), R(0),
+ /* 690 S> */ B(Ldar), R(arg1),
+ /* 696 E> */ B(LdrKeyedProperty), R(arg0), U8(111), R(0),
+ B(Ldar), R(0),
+ /* 702 S> */ B(Ldar), R(arg1),
+ /* 708 E> */ B(LdrKeyedProperty), R(arg0), U8(113), R(0),
+ B(Ldar), R(0),
+ /* 714 S> */ B(Ldar), R(arg1),
+ /* 720 E> */ B(LdrKeyedProperty), R(arg0), U8(115), R(0),
+ B(Ldar), R(0),
+ /* 726 S> */ B(Ldar), R(arg1),
+ /* 732 E> */ B(LdrKeyedProperty), R(arg0), U8(117), R(0),
+ B(Ldar), R(0),
+ /* 738 S> */ B(Ldar), R(arg1),
+ /* 744 E> */ B(LdrKeyedProperty), R(arg0), U8(119), R(0),
+ B(Ldar), R(0),
+ /* 750 S> */ B(Ldar), R(arg1),
+ /* 756 E> */ B(LdrKeyedProperty), R(arg0), U8(121), R(0),
+ B(Ldar), R(0),
+ /* 762 S> */ B(Ldar), R(arg1),
+ /* 768 E> */ B(LdrKeyedProperty), R(arg0), U8(123), R(0),
+ B(Ldar), R(0),
+ /* 774 S> */ B(Ldar), R(arg1),
+ /* 780 E> */ B(LdrKeyedProperty), R(arg0), U8(125), R(0),
+ B(Ldar), R(0),
+ /* 786 S> */ B(Ldar), R(arg1),
+ /* 792 E> */ B(LdrKeyedProperty), R(arg0), U8(127), R(0),
+ B(Ldar), R(0),
+ /* 798 S> */ B(Ldar), R(arg1),
+ /* 804 E> */ B(LdrKeyedProperty), R(arg0), U8(129), R(0),
+ B(Ldar), R(0),
+ /* 810 S> */ B(Ldar), R(arg1),
+ /* 816 E> */ B(LdrKeyedProperty), R(arg0), U8(131), R(0),
+ B(Ldar), R(0),
+ /* 822 S> */ B(Ldar), R(arg1),
+ /* 828 E> */ B(LdrKeyedProperty), R(arg0), U8(133), R(0),
+ B(Ldar), R(0),
+ /* 834 S> */ B(Ldar), R(arg1),
+ /* 840 E> */ B(LdrKeyedProperty), R(arg0), U8(135), R(0),
+ B(Ldar), R(0),
+ /* 846 S> */ B(Ldar), R(arg1),
+ /* 852 E> */ B(LdrKeyedProperty), R(arg0), U8(137), R(0),
+ B(Ldar), R(0),
+ /* 858 S> */ B(Ldar), R(arg1),
+ /* 864 E> */ B(LdrKeyedProperty), R(arg0), U8(139), R(0),
+ B(Ldar), R(0),
+ /* 870 S> */ B(Ldar), R(arg1),
+ /* 876 E> */ B(LdrKeyedProperty), R(arg0), U8(141), R(0),
+ B(Ldar), R(0),
+ /* 882 S> */ B(Ldar), R(arg1),
+ /* 888 E> */ B(LdrKeyedProperty), R(arg0), U8(143), R(0),
+ B(Ldar), R(0),
+ /* 894 S> */ B(Ldar), R(arg1),
+ /* 900 E> */ B(LdrKeyedProperty), R(arg0), U8(145), R(0),
+ B(Ldar), R(0),
+ /* 906 S> */ B(Ldar), R(arg1),
+ /* 912 E> */ B(LdrKeyedProperty), R(arg0), U8(147), R(0),
+ B(Ldar), R(0),
+ /* 918 S> */ B(Ldar), R(arg1),
+ /* 924 E> */ B(LdrKeyedProperty), R(arg0), U8(149), R(0),
+ B(Ldar), R(0),
+ /* 930 S> */ B(Ldar), R(arg1),
+ /* 936 E> */ B(LdrKeyedProperty), R(arg0), U8(151), R(0),
+ B(Ldar), R(0),
+ /* 942 S> */ B(Ldar), R(arg1),
+ /* 948 E> */ B(LdrKeyedProperty), R(arg0), U8(153), R(0),
+ B(Ldar), R(0),
+ /* 954 S> */ B(Ldar), R(arg1),
+ /* 960 E> */ B(LdrKeyedProperty), R(arg0), U8(155), R(0),
+ B(Ldar), R(0),
+ /* 966 S> */ B(Ldar), R(arg1),
+ /* 972 E> */ B(LdrKeyedProperty), R(arg0), U8(157), R(0),
+ B(Ldar), R(0),
+ /* 978 S> */ B(Ldar), R(arg1),
+ /* 984 E> */ B(LdrKeyedProperty), R(arg0), U8(159), R(0),
+ B(Ldar), R(0),
+ /* 990 S> */ B(Ldar), R(arg1),
+ /* 996 E> */ B(LdrKeyedProperty), R(arg0), U8(161), R(0),
+ B(Ldar), R(0),
+ /* 1002 S> */ B(Ldar), R(arg1),
+ /* 1008 E> */ B(LdrKeyedProperty), R(arg0), U8(163), R(0),
+ B(Ldar), R(0),
+ /* 1014 S> */ B(Ldar), R(arg1),
+ /* 1020 E> */ B(LdrKeyedProperty), R(arg0), U8(165), R(0),
+ B(Ldar), R(0),
+ /* 1026 S> */ B(Ldar), R(arg1),
+ /* 1032 E> */ B(LdrKeyedProperty), R(arg0), U8(167), R(0),
+ B(Ldar), R(0),
+ /* 1038 S> */ B(Ldar), R(arg1),
+ /* 1044 E> */ B(LdrKeyedProperty), R(arg0), U8(169), R(0),
+ B(Ldar), R(0),
+ /* 1050 S> */ B(Ldar), R(arg1),
+ /* 1056 E> */ B(LdrKeyedProperty), R(arg0), U8(171), R(0),
+ B(Ldar), R(0),
+ /* 1062 S> */ B(Ldar), R(arg1),
+ /* 1068 E> */ B(LdrKeyedProperty), R(arg0), U8(173), R(0),
+ B(Ldar), R(0),
+ /* 1074 S> */ B(Ldar), R(arg1),
+ /* 1080 E> */ B(LdrKeyedProperty), R(arg0), U8(175), R(0),
+ B(Ldar), R(0),
+ /* 1086 S> */ B(Ldar), R(arg1),
+ /* 1092 E> */ B(LdrKeyedProperty), R(arg0), U8(177), R(0),
+ B(Ldar), R(0),
+ /* 1098 S> */ B(Ldar), R(arg1),
+ /* 1104 E> */ B(LdrKeyedProperty), R(arg0), U8(179), R(0),
+ B(Ldar), R(0),
+ /* 1110 S> */ B(Ldar), R(arg1),
+ /* 1116 E> */ B(LdrKeyedProperty), R(arg0), U8(181), R(0),
+ B(Ldar), R(0),
+ /* 1122 S> */ B(Ldar), R(arg1),
+ /* 1128 E> */ B(LdrKeyedProperty), R(arg0), U8(183), R(0),
+ B(Ldar), R(0),
+ /* 1134 S> */ B(Ldar), R(arg1),
+ /* 1140 E> */ B(LdrKeyedProperty), R(arg0), U8(185), R(0),
+ B(Ldar), R(0),
+ /* 1146 S> */ B(Ldar), R(arg1),
+ /* 1152 E> */ B(LdrKeyedProperty), R(arg0), U8(187), R(0),
+ B(Ldar), R(0),
+ /* 1158 S> */ B(Ldar), R(arg1),
+ /* 1164 E> */ B(LdrKeyedProperty), R(arg0), U8(189), R(0),
+ B(Ldar), R(0),
+ /* 1170 S> */ B(Ldar), R(arg1),
+ /* 1176 E> */ B(LdrKeyedProperty), R(arg0), U8(191), R(0),
+ B(Ldar), R(0),
+ /* 1182 S> */ B(Ldar), R(arg1),
+ /* 1188 E> */ B(LdrKeyedProperty), R(arg0), U8(193), R(0),
+ B(Ldar), R(0),
+ /* 1194 S> */ B(Ldar), R(arg1),
+ /* 1200 E> */ B(LdrKeyedProperty), R(arg0), U8(195), R(0),
+ B(Ldar), R(0),
+ /* 1206 S> */ B(Ldar), R(arg1),
+ /* 1212 E> */ B(LdrKeyedProperty), R(arg0), U8(197), R(0),
+ B(Ldar), R(0),
+ /* 1218 S> */ B(Ldar), R(arg1),
+ /* 1224 E> */ B(LdrKeyedProperty), R(arg0), U8(199), R(0),
+ B(Ldar), R(0),
+ /* 1230 S> */ B(Ldar), R(arg1),
+ /* 1236 E> */ B(LdrKeyedProperty), R(arg0), U8(201), R(0),
+ B(Ldar), R(0),
+ /* 1242 S> */ B(Ldar), R(arg1),
+ /* 1248 E> */ B(LdrKeyedProperty), R(arg0), U8(203), R(0),
+ B(Ldar), R(0),
+ /* 1254 S> */ B(Ldar), R(arg1),
+ /* 1260 E> */ B(LdrKeyedProperty), R(arg0), U8(205), R(0),
+ B(Ldar), R(0),
+ /* 1266 S> */ B(Ldar), R(arg1),
+ /* 1272 E> */ B(LdrKeyedProperty), R(arg0), U8(207), R(0),
+ B(Ldar), R(0),
+ /* 1278 S> */ B(Ldar), R(arg1),
+ /* 1284 E> */ B(LdrKeyedProperty), R(arg0), U8(209), R(0),
+ B(Ldar), R(0),
+ /* 1290 S> */ B(Ldar), R(arg1),
+ /* 1296 E> */ B(LdrKeyedProperty), R(arg0), U8(211), R(0),
+ B(Ldar), R(0),
+ /* 1302 S> */ B(Ldar), R(arg1),
+ /* 1308 E> */ B(LdrKeyedProperty), R(arg0), U8(213), R(0),
+ B(Ldar), R(0),
+ /* 1314 S> */ B(Ldar), R(arg1),
+ /* 1320 E> */ B(LdrKeyedProperty), R(arg0), U8(215), R(0),
+ B(Ldar), R(0),
+ /* 1326 S> */ B(Ldar), R(arg1),
+ /* 1332 E> */ B(LdrKeyedProperty), R(arg0), U8(217), R(0),
+ B(Ldar), R(0),
+ /* 1338 S> */ B(Ldar), R(arg1),
+ /* 1344 E> */ B(LdrKeyedProperty), R(arg0), U8(219), R(0),
+ B(Ldar), R(0),
+ /* 1350 S> */ B(Ldar), R(arg1),
+ /* 1356 E> */ B(LdrKeyedProperty), R(arg0), U8(221), R(0),
+ B(Ldar), R(0),
+ /* 1362 S> */ B(Ldar), R(arg1),
+ /* 1368 E> */ B(LdrKeyedProperty), R(arg0), U8(223), R(0),
+ B(Ldar), R(0),
+ /* 1374 S> */ B(Ldar), R(arg1),
+ /* 1380 E> */ B(LdrKeyedProperty), R(arg0), U8(225), R(0),
+ B(Ldar), R(0),
+ /* 1386 S> */ B(Ldar), R(arg1),
+ /* 1392 E> */ B(LdrKeyedProperty), R(arg0), U8(227), R(0),
+ B(Ldar), R(0),
+ /* 1398 S> */ B(Ldar), R(arg1),
+ /* 1404 E> */ B(LdrKeyedProperty), R(arg0), U8(229), R(0),
+ B(Ldar), R(0),
+ /* 1410 S> */ B(Ldar), R(arg1),
+ /* 1416 E> */ B(LdrKeyedProperty), R(arg0), U8(231), R(0),
+ B(Ldar), R(0),
+ /* 1422 S> */ B(Ldar), R(arg1),
+ /* 1428 E> */ B(LdrKeyedProperty), R(arg0), U8(233), R(0),
+ B(Ldar), R(0),
+ /* 1434 S> */ B(Ldar), R(arg1),
+ /* 1440 E> */ B(LdrKeyedProperty), R(arg0), U8(235), R(0),
+ B(Ldar), R(0),
+ /* 1446 S> */ B(Ldar), R(arg1),
+ /* 1452 E> */ B(LdrKeyedProperty), R(arg0), U8(237), R(0),
+ B(Ldar), R(0),
+ /* 1458 S> */ B(Ldar), R(arg1),
+ /* 1464 E> */ B(LdrKeyedProperty), R(arg0), U8(239), R(0),
+ B(Ldar), R(0),
+ /* 1470 S> */ B(Ldar), R(arg1),
+ /* 1476 E> */ B(LdrKeyedProperty), R(arg0), U8(241), R(0),
+ B(Ldar), R(0),
+ /* 1482 S> */ B(Ldar), R(arg1),
+ /* 1488 E> */ B(LdrKeyedProperty), R(arg0), U8(243), R(0),
+ B(Ldar), R(0),
+ /* 1494 S> */ B(Ldar), R(arg1),
+ /* 1500 E> */ B(LdrKeyedProperty), R(arg0), U8(245), R(0),
+ B(Ldar), R(0),
+ /* 1506 S> */ B(Ldar), R(arg1),
+ /* 1512 E> */ B(LdrKeyedProperty), R(arg0), U8(247), R(0),
+ B(Ldar), R(0),
+ /* 1518 S> */ B(Ldar), R(arg1),
+ /* 1524 E> */ B(LdrKeyedProperty), R(arg0), U8(249), R(0),
+ B(Ldar), R(0),
+ /* 1530 S> */ B(Ldar), R(arg1),
+ /* 1536 E> */ B(LdrKeyedProperty), R(arg0), U8(251), R(0),
+ B(Ldar), R(0),
+ /* 1542 S> */ B(Ldar), R(arg1),
+ /* 1548 E> */ B(LdrKeyedProperty), R(arg0), U8(253), R(0),
+ B(Ldar), R(0),
+ /* 1554 S> */ B(Ldar), R(arg1),
+ /* 1560 E> */ B(LdrKeyedProperty), R(arg0), U8(255), R(0),
+ B(Ldar), R(0),
+ /* 1566 S> */ B(Ldar), R(arg1),
+ /* 1575 E> */ B(Wide), B(LdaKeyedProperty), R16(arg0), U16(257),
+ /* 1579 S> */ B(Return),
]
constant pool: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden
index 9e67a90c7b..7f456cf4fc 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden
@@ -13,17 +13,15 @@ snippet: "
function f(a) { a.name = \"val\"; }
f({name : \"test\"})
"
-frame size: 1
+frame size: 0
parameter count: 2
-bytecode array length: 13
+bytecode array length: 9
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaConstant), U8(0),
- B(StoreICSloppy), R(0), U8(1), U8(1),
- B(LdaUndefined),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 16 S> */ B(LdaConstant), U8(0),
+ /* 23 E> */ B(StaNamedPropertySloppy), R(arg0), U8(1), U8(1),
+ B(LdaUndefined),
+ /* 32 S> */ B(Return),
]
constant pool: [
"val",
@@ -37,17 +35,15 @@ snippet: "
function f(a) { a[\"key\"] = \"val\"; }
f({key : \"test\"})
"
-frame size: 1
+frame size: 0
parameter count: 2
-bytecode array length: 13
+bytecode array length: 9
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaConstant), U8(0),
- B(StoreICSloppy), R(0), U8(1), U8(1),
- B(LdaUndefined),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 16 S> */ B(LdaConstant), U8(0),
+ /* 25 E> */ B(StaNamedPropertySloppy), R(arg0), U8(1), U8(1),
+ B(LdaUndefined),
+ /* 34 S> */ B(Return),
]
constant pool: [
"val",
@@ -63,17 +59,15 @@ snippet: "
"
frame size: 2
parameter count: 2
-bytecode array length: 17
+bytecode array length: 13
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(100),
- B(Star), R(1),
- B(LdaConstant), U8(0),
- B(KeyedStoreICSloppy), R(0), R(1), U8(1),
- B(LdaUndefined),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 16 S> */ B(LdaSmi), U8(100),
+ B(Star), R(1),
+ B(LdaConstant), U8(0),
+ /* 23 E> */ B(StaKeyedPropertySloppy), R(arg0), R(1), U8(1),
+ B(LdaUndefined),
+ /* 32 S> */ B(Return),
]
constant pool: [
"val",
@@ -86,19 +80,15 @@ snippet: "
function f(a, b) { a[b] = \"val\"; }
f({arg : \"test\"}, \"arg\")
"
-frame size: 2
+frame size: 0
parameter count: 3
-bytecode array length: 17
+bytecode array length: 9
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaConstant), U8(0),
- B(KeyedStoreICSloppy), R(0), R(1), U8(1),
- B(LdaUndefined),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 19 S> */ B(LdaConstant), U8(0),
+ /* 24 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(1),
+ B(LdaUndefined),
+ /* 33 S> */ B(Return),
]
constant pool: [
"val",
@@ -111,20 +101,16 @@ snippet: "
function f(a) { a.name = a[-124]; }
f({\"-124\" : \"test\", name : 123 })
"
-frame size: 2
+frame size: 0
parameter count: 2
-bytecode array length: 20
+bytecode array length: 12
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg0),
- B(Star), R(1),
- B(LdaSmi), U8(-124),
- B(KeyedLoadIC), R(1), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(3),
- B(LdaUndefined),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 16 S> */ B(LdaSmi), U8(-124),
+ /* 26 E> */ B(LdaKeyedProperty), R(arg0), U8(1),
+ /* 23 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(3),
+ B(LdaUndefined),
+ /* 34 S> */ B(Return),
]
constant pool: [
"name",
@@ -137,17 +123,15 @@ snippet: "
function f(a) { \"use strict\"; a.name = \"val\"; }
f({name : \"test\"})
"
-frame size: 1
+frame size: 0
parameter count: 2
-bytecode array length: 13
+bytecode array length: 9
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaConstant), U8(0),
- B(StoreICStrict), R(0), U8(1), U8(1),
- B(LdaUndefined),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 30 S> */ B(LdaConstant), U8(0),
+ /* 37 E> */ B(StaNamedPropertyStrict), R(arg0), U8(1), U8(1),
+ B(LdaUndefined),
+ /* 46 S> */ B(Return),
]
constant pool: [
"val",
@@ -161,19 +145,15 @@ snippet: "
function f(a, b) { \"use strict\"; a[b] = \"val\"; }
f({arg : \"test\"}, \"arg\")
"
-frame size: 2
+frame size: 0
parameter count: 3
-bytecode array length: 17
+bytecode array length: 9
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaConstant), U8(0),
- B(KeyedStoreICStrict), R(0), R(1), U8(1),
- B(LdaUndefined),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 33 S> */ B(LdaConstant), U8(0),
+ /* 38 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(1),
+ B(LdaUndefined),
+ /* 47 S> */ B(Return),
]
constant pool: [
"val",
@@ -316,529 +296,271 @@ snippet: "
}
f({name : \"test\"})
"
-frame size: 1
+frame size: 0
parameter count: 2
-bytecode array length: 1297
+bytecode array length: 781
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(1),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(3),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(5),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(7),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(9),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(11),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(13),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(15),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(17),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(19),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(21),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(23),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(25),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(27),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(29),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(31),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(33),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(35),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(37),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(39),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(41),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(43),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(45),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(47),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(49),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(51),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(53),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(55),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(57),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(59),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(61),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(63),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(65),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(67),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(69),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(71),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(73),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(75),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(77),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(79),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(81),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(83),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(85),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(87),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(89),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(91),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(93),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(95),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(97),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(99),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(101),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(103),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(105),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(107),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(109),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(111),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(113),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(115),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(117),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(119),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(121),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(123),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(125),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(127),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(129),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(131),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(133),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(135),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(137),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(139),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(141),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(143),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(145),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(147),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(149),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(151),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(153),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(155),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(157),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(159),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(161),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(163),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(165),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(167),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(169),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(171),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(173),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(175),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(177),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(179),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(181),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(183),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(185),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(187),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(189),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(191),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(193),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(195),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(197),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(199),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(201),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(203),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(205),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(207),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(209),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(211),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(213),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(215),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(217),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(219),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(221),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(223),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(225),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(227),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(229),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(231),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(233),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(235),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(237),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(239),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(241),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(243),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(245),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(247),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(249),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(251),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(253),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICSloppy), R(0), U8(0), U8(255),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(2),
- B(Wide), B(StoreICSloppy), R16(0), U16(0), U16(257),
- B(LdaUndefined),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 18 S> */ B(LdaSmi), U8(1),
+ /* 25 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(1),
+ /* 32 S> */ B(LdaSmi), U8(1),
+ /* 39 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(3),
+ /* 46 S> */ B(LdaSmi), U8(1),
+ /* 53 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(5),
+ /* 60 S> */ B(LdaSmi), U8(1),
+ /* 67 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(7),
+ /* 74 S> */ B(LdaSmi), U8(1),
+ /* 81 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(9),
+ /* 88 S> */ B(LdaSmi), U8(1),
+ /* 95 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(11),
+ /* 102 S> */ B(LdaSmi), U8(1),
+ /* 109 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(13),
+ /* 116 S> */ B(LdaSmi), U8(1),
+ /* 123 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(15),
+ /* 130 S> */ B(LdaSmi), U8(1),
+ /* 137 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(17),
+ /* 144 S> */ B(LdaSmi), U8(1),
+ /* 151 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(19),
+ /* 158 S> */ B(LdaSmi), U8(1),
+ /* 165 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(21),
+ /* 172 S> */ B(LdaSmi), U8(1),
+ /* 179 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(23),
+ /* 186 S> */ B(LdaSmi), U8(1),
+ /* 193 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(25),
+ /* 200 S> */ B(LdaSmi), U8(1),
+ /* 207 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(27),
+ /* 214 S> */ B(LdaSmi), U8(1),
+ /* 221 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(29),
+ /* 228 S> */ B(LdaSmi), U8(1),
+ /* 235 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(31),
+ /* 242 S> */ B(LdaSmi), U8(1),
+ /* 249 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(33),
+ /* 256 S> */ B(LdaSmi), U8(1),
+ /* 263 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(35),
+ /* 270 S> */ B(LdaSmi), U8(1),
+ /* 277 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(37),
+ /* 284 S> */ B(LdaSmi), U8(1),
+ /* 291 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(39),
+ /* 298 S> */ B(LdaSmi), U8(1),
+ /* 305 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(41),
+ /* 312 S> */ B(LdaSmi), U8(1),
+ /* 319 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(43),
+ /* 326 S> */ B(LdaSmi), U8(1),
+ /* 333 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(45),
+ /* 340 S> */ B(LdaSmi), U8(1),
+ /* 347 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(47),
+ /* 354 S> */ B(LdaSmi), U8(1),
+ /* 361 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(49),
+ /* 368 S> */ B(LdaSmi), U8(1),
+ /* 375 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(51),
+ /* 382 S> */ B(LdaSmi), U8(1),
+ /* 389 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(53),
+ /* 396 S> */ B(LdaSmi), U8(1),
+ /* 403 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(55),
+ /* 410 S> */ B(LdaSmi), U8(1),
+ /* 417 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(57),
+ /* 424 S> */ B(LdaSmi), U8(1),
+ /* 431 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(59),
+ /* 438 S> */ B(LdaSmi), U8(1),
+ /* 445 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(61),
+ /* 452 S> */ B(LdaSmi), U8(1),
+ /* 459 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(63),
+ /* 466 S> */ B(LdaSmi), U8(1),
+ /* 473 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(65),
+ /* 480 S> */ B(LdaSmi), U8(1),
+ /* 487 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(67),
+ /* 494 S> */ B(LdaSmi), U8(1),
+ /* 501 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(69),
+ /* 508 S> */ B(LdaSmi), U8(1),
+ /* 515 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(71),
+ /* 522 S> */ B(LdaSmi), U8(1),
+ /* 529 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(73),
+ /* 536 S> */ B(LdaSmi), U8(1),
+ /* 543 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(75),
+ /* 550 S> */ B(LdaSmi), U8(1),
+ /* 557 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(77),
+ /* 564 S> */ B(LdaSmi), U8(1),
+ /* 571 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(79),
+ /* 578 S> */ B(LdaSmi), U8(1),
+ /* 585 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(81),
+ /* 592 S> */ B(LdaSmi), U8(1),
+ /* 599 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(83),
+ /* 606 S> */ B(LdaSmi), U8(1),
+ /* 613 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(85),
+ /* 620 S> */ B(LdaSmi), U8(1),
+ /* 627 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(87),
+ /* 634 S> */ B(LdaSmi), U8(1),
+ /* 641 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(89),
+ /* 648 S> */ B(LdaSmi), U8(1),
+ /* 655 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(91),
+ /* 662 S> */ B(LdaSmi), U8(1),
+ /* 669 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(93),
+ /* 676 S> */ B(LdaSmi), U8(1),
+ /* 683 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(95),
+ /* 690 S> */ B(LdaSmi), U8(1),
+ /* 697 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(97),
+ /* 704 S> */ B(LdaSmi), U8(1),
+ /* 711 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(99),
+ /* 718 S> */ B(LdaSmi), U8(1),
+ /* 725 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(101),
+ /* 732 S> */ B(LdaSmi), U8(1),
+ /* 739 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(103),
+ /* 746 S> */ B(LdaSmi), U8(1),
+ /* 753 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(105),
+ /* 760 S> */ B(LdaSmi), U8(1),
+ /* 767 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(107),
+ /* 774 S> */ B(LdaSmi), U8(1),
+ /* 781 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(109),
+ /* 788 S> */ B(LdaSmi), U8(1),
+ /* 795 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(111),
+ /* 802 S> */ B(LdaSmi), U8(1),
+ /* 809 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(113),
+ /* 816 S> */ B(LdaSmi), U8(1),
+ /* 823 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(115),
+ /* 830 S> */ B(LdaSmi), U8(1),
+ /* 837 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(117),
+ /* 844 S> */ B(LdaSmi), U8(1),
+ /* 851 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(119),
+ /* 858 S> */ B(LdaSmi), U8(1),
+ /* 865 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(121),
+ /* 872 S> */ B(LdaSmi), U8(1),
+ /* 879 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(123),
+ /* 886 S> */ B(LdaSmi), U8(1),
+ /* 893 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(125),
+ /* 900 S> */ B(LdaSmi), U8(1),
+ /* 907 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(127),
+ /* 914 S> */ B(LdaSmi), U8(1),
+ /* 921 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(129),
+ /* 928 S> */ B(LdaSmi), U8(1),
+ /* 935 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(131),
+ /* 942 S> */ B(LdaSmi), U8(1),
+ /* 949 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(133),
+ /* 956 S> */ B(LdaSmi), U8(1),
+ /* 963 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(135),
+ /* 970 S> */ B(LdaSmi), U8(1),
+ /* 977 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(137),
+ /* 984 S> */ B(LdaSmi), U8(1),
+ /* 991 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(139),
+ /* 998 S> */ B(LdaSmi), U8(1),
+ /* 1005 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(141),
+ /* 1012 S> */ B(LdaSmi), U8(1),
+ /* 1019 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(143),
+ /* 1026 S> */ B(LdaSmi), U8(1),
+ /* 1033 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(145),
+ /* 1040 S> */ B(LdaSmi), U8(1),
+ /* 1047 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(147),
+ /* 1054 S> */ B(LdaSmi), U8(1),
+ /* 1061 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(149),
+ /* 1068 S> */ B(LdaSmi), U8(1),
+ /* 1075 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(151),
+ /* 1082 S> */ B(LdaSmi), U8(1),
+ /* 1089 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(153),
+ /* 1096 S> */ B(LdaSmi), U8(1),
+ /* 1103 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(155),
+ /* 1110 S> */ B(LdaSmi), U8(1),
+ /* 1117 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(157),
+ /* 1124 S> */ B(LdaSmi), U8(1),
+ /* 1131 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(159),
+ /* 1138 S> */ B(LdaSmi), U8(1),
+ /* 1145 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(161),
+ /* 1152 S> */ B(LdaSmi), U8(1),
+ /* 1159 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(163),
+ /* 1166 S> */ B(LdaSmi), U8(1),
+ /* 1173 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(165),
+ /* 1180 S> */ B(LdaSmi), U8(1),
+ /* 1187 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(167),
+ /* 1194 S> */ B(LdaSmi), U8(1),
+ /* 1201 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(169),
+ /* 1208 S> */ B(LdaSmi), U8(1),
+ /* 1215 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(171),
+ /* 1222 S> */ B(LdaSmi), U8(1),
+ /* 1229 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(173),
+ /* 1236 S> */ B(LdaSmi), U8(1),
+ /* 1243 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(175),
+ /* 1250 S> */ B(LdaSmi), U8(1),
+ /* 1257 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(177),
+ /* 1264 S> */ B(LdaSmi), U8(1),
+ /* 1271 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(179),
+ /* 1278 S> */ B(LdaSmi), U8(1),
+ /* 1285 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(181),
+ /* 1292 S> */ B(LdaSmi), U8(1),
+ /* 1299 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(183),
+ /* 1306 S> */ B(LdaSmi), U8(1),
+ /* 1313 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(185),
+ /* 1320 S> */ B(LdaSmi), U8(1),
+ /* 1327 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(187),
+ /* 1334 S> */ B(LdaSmi), U8(1),
+ /* 1341 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(189),
+ /* 1348 S> */ B(LdaSmi), U8(1),
+ /* 1355 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(191),
+ /* 1362 S> */ B(LdaSmi), U8(1),
+ /* 1369 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(193),
+ /* 1376 S> */ B(LdaSmi), U8(1),
+ /* 1383 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(195),
+ /* 1390 S> */ B(LdaSmi), U8(1),
+ /* 1397 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(197),
+ /* 1404 S> */ B(LdaSmi), U8(1),
+ /* 1411 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(199),
+ /* 1418 S> */ B(LdaSmi), U8(1),
+ /* 1425 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(201),
+ /* 1432 S> */ B(LdaSmi), U8(1),
+ /* 1439 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(203),
+ /* 1446 S> */ B(LdaSmi), U8(1),
+ /* 1453 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(205),
+ /* 1460 S> */ B(LdaSmi), U8(1),
+ /* 1467 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(207),
+ /* 1474 S> */ B(LdaSmi), U8(1),
+ /* 1481 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(209),
+ /* 1488 S> */ B(LdaSmi), U8(1),
+ /* 1495 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(211),
+ /* 1502 S> */ B(LdaSmi), U8(1),
+ /* 1509 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(213),
+ /* 1516 S> */ B(LdaSmi), U8(1),
+ /* 1523 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(215),
+ /* 1530 S> */ B(LdaSmi), U8(1),
+ /* 1537 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(217),
+ /* 1544 S> */ B(LdaSmi), U8(1),
+ /* 1551 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(219),
+ /* 1558 S> */ B(LdaSmi), U8(1),
+ /* 1565 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(221),
+ /* 1572 S> */ B(LdaSmi), U8(1),
+ /* 1579 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(223),
+ /* 1586 S> */ B(LdaSmi), U8(1),
+ /* 1593 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(225),
+ /* 1600 S> */ B(LdaSmi), U8(1),
+ /* 1607 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(227),
+ /* 1614 S> */ B(LdaSmi), U8(1),
+ /* 1621 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(229),
+ /* 1628 S> */ B(LdaSmi), U8(1),
+ /* 1635 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(231),
+ /* 1642 S> */ B(LdaSmi), U8(1),
+ /* 1649 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(233),
+ /* 1656 S> */ B(LdaSmi), U8(1),
+ /* 1663 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(235),
+ /* 1670 S> */ B(LdaSmi), U8(1),
+ /* 1677 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(237),
+ /* 1684 S> */ B(LdaSmi), U8(1),
+ /* 1691 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(239),
+ /* 1698 S> */ B(LdaSmi), U8(1),
+ /* 1705 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(241),
+ /* 1712 S> */ B(LdaSmi), U8(1),
+ /* 1719 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(243),
+ /* 1726 S> */ B(LdaSmi), U8(1),
+ /* 1733 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(245),
+ /* 1740 S> */ B(LdaSmi), U8(1),
+ /* 1747 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(247),
+ /* 1754 S> */ B(LdaSmi), U8(1),
+ /* 1761 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(249),
+ /* 1768 S> */ B(LdaSmi), U8(1),
+ /* 1775 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(251),
+ /* 1782 S> */ B(LdaSmi), U8(1),
+ /* 1789 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(253),
+ /* 1796 S> */ B(LdaSmi), U8(1),
+ /* 1803 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(255),
+ /* 1810 S> */ B(LdaSmi), U8(2),
+ /* 1817 E> */ B(Wide), B(StaNamedPropertySloppy), R16(arg0), U16(0), U16(257),
+ B(LdaUndefined),
+ /* 1822 S> */ B(Return),
]
constant pool: [
"name",
@@ -982,529 +704,271 @@ snippet: "
}
f({name : \"test\"})
"
-frame size: 1
+frame size: 0
parameter count: 2
-bytecode array length: 1297
+bytecode array length: 781
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(1),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(3),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(5),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(7),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(9),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(11),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(13),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(15),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(17),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(19),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(21),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(23),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(25),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(27),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(29),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(31),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(33),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(35),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(37),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(39),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(41),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(43),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(45),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(47),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(49),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(51),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(53),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(55),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(57),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(59),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(61),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(63),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(65),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(67),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(69),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(71),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(73),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(75),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(77),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(79),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(81),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(83),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(85),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(87),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(89),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(91),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(93),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(95),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(97),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(99),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(101),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(103),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(105),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(107),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(109),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(111),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(113),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(115),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(117),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(119),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(121),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(123),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(125),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(127),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(129),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(131),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(133),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(135),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(137),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(139),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(141),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(143),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(145),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(147),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(149),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(151),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(153),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(155),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(157),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(159),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(161),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(163),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(165),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(167),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(169),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(171),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(173),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(175),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(177),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(179),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(181),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(183),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(185),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(187),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(189),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(191),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(193),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(195),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(197),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(199),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(201),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(203),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(205),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(207),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(209),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(211),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(213),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(215),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(217),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(219),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(221),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(223),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(225),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(227),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(229),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(231),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(233),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(235),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(237),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(239),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(241),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(243),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(245),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(247),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(249),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(251),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(253),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(1),
- B(StoreICStrict), R(0), U8(0), U8(255),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LdaSmi), U8(2),
- B(Wide), B(StoreICStrict), R16(0), U16(0), U16(257),
- B(LdaUndefined),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 33 S> */ B(LdaSmi), U8(1),
+ /* 40 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(1),
+ /* 47 S> */ B(LdaSmi), U8(1),
+ /* 54 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(3),
+ /* 61 S> */ B(LdaSmi), U8(1),
+ /* 68 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(5),
+ /* 75 S> */ B(LdaSmi), U8(1),
+ /* 82 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(7),
+ /* 89 S> */ B(LdaSmi), U8(1),
+ /* 96 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(9),
+ /* 103 S> */ B(LdaSmi), U8(1),
+ /* 110 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(11),
+ /* 117 S> */ B(LdaSmi), U8(1),
+ /* 124 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(13),
+ /* 131 S> */ B(LdaSmi), U8(1),
+ /* 138 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(15),
+ /* 145 S> */ B(LdaSmi), U8(1),
+ /* 152 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(17),
+ /* 159 S> */ B(LdaSmi), U8(1),
+ /* 166 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(19),
+ /* 173 S> */ B(LdaSmi), U8(1),
+ /* 180 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(21),
+ /* 187 S> */ B(LdaSmi), U8(1),
+ /* 194 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(23),
+ /* 201 S> */ B(LdaSmi), U8(1),
+ /* 208 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(25),
+ /* 215 S> */ B(LdaSmi), U8(1),
+ /* 222 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(27),
+ /* 229 S> */ B(LdaSmi), U8(1),
+ /* 236 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(29),
+ /* 243 S> */ B(LdaSmi), U8(1),
+ /* 250 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(31),
+ /* 257 S> */ B(LdaSmi), U8(1),
+ /* 264 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(33),
+ /* 271 S> */ B(LdaSmi), U8(1),
+ /* 278 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(35),
+ /* 285 S> */ B(LdaSmi), U8(1),
+ /* 292 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(37),
+ /* 299 S> */ B(LdaSmi), U8(1),
+ /* 306 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(39),
+ /* 313 S> */ B(LdaSmi), U8(1),
+ /* 320 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(41),
+ /* 327 S> */ B(LdaSmi), U8(1),
+ /* 334 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(43),
+ /* 341 S> */ B(LdaSmi), U8(1),
+ /* 348 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(45),
+ /* 355 S> */ B(LdaSmi), U8(1),
+ /* 362 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(47),
+ /* 369 S> */ B(LdaSmi), U8(1),
+ /* 376 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(49),
+ /* 383 S> */ B(LdaSmi), U8(1),
+ /* 390 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(51),
+ /* 397 S> */ B(LdaSmi), U8(1),
+ /* 404 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(53),
+ /* 411 S> */ B(LdaSmi), U8(1),
+ /* 418 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(55),
+ /* 425 S> */ B(LdaSmi), U8(1),
+ /* 432 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(57),
+ /* 439 S> */ B(LdaSmi), U8(1),
+ /* 446 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(59),
+ /* 453 S> */ B(LdaSmi), U8(1),
+ /* 460 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(61),
+ /* 467 S> */ B(LdaSmi), U8(1),
+ /* 474 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(63),
+ /* 481 S> */ B(LdaSmi), U8(1),
+ /* 488 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(65),
+ /* 495 S> */ B(LdaSmi), U8(1),
+ /* 502 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(67),
+ /* 509 S> */ B(LdaSmi), U8(1),
+ /* 516 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(69),
+ /* 523 S> */ B(LdaSmi), U8(1),
+ /* 530 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(71),
+ /* 537 S> */ B(LdaSmi), U8(1),
+ /* 544 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(73),
+ /* 551 S> */ B(LdaSmi), U8(1),
+ /* 558 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(75),
+ /* 565 S> */ B(LdaSmi), U8(1),
+ /* 572 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(77),
+ /* 579 S> */ B(LdaSmi), U8(1),
+ /* 586 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(79),
+ /* 593 S> */ B(LdaSmi), U8(1),
+ /* 600 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(81),
+ /* 607 S> */ B(LdaSmi), U8(1),
+ /* 614 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(83),
+ /* 621 S> */ B(LdaSmi), U8(1),
+ /* 628 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(85),
+ /* 635 S> */ B(LdaSmi), U8(1),
+ /* 642 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(87),
+ /* 649 S> */ B(LdaSmi), U8(1),
+ /* 656 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(89),
+ /* 663 S> */ B(LdaSmi), U8(1),
+ /* 670 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(91),
+ /* 677 S> */ B(LdaSmi), U8(1),
+ /* 684 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(93),
+ /* 691 S> */ B(LdaSmi), U8(1),
+ /* 698 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(95),
+ /* 705 S> */ B(LdaSmi), U8(1),
+ /* 712 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(97),
+ /* 719 S> */ B(LdaSmi), U8(1),
+ /* 726 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(99),
+ /* 733 S> */ B(LdaSmi), U8(1),
+ /* 740 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(101),
+ /* 747 S> */ B(LdaSmi), U8(1),
+ /* 754 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(103),
+ /* 761 S> */ B(LdaSmi), U8(1),
+ /* 768 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(105),
+ /* 775 S> */ B(LdaSmi), U8(1),
+ /* 782 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(107),
+ /* 789 S> */ B(LdaSmi), U8(1),
+ /* 796 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(109),
+ /* 803 S> */ B(LdaSmi), U8(1),
+ /* 810 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(111),
+ /* 817 S> */ B(LdaSmi), U8(1),
+ /* 824 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(113),
+ /* 831 S> */ B(LdaSmi), U8(1),
+ /* 838 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(115),
+ /* 845 S> */ B(LdaSmi), U8(1),
+ /* 852 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(117),
+ /* 859 S> */ B(LdaSmi), U8(1),
+ /* 866 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(119),
+ /* 873 S> */ B(LdaSmi), U8(1),
+ /* 880 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(121),
+ /* 887 S> */ B(LdaSmi), U8(1),
+ /* 894 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(123),
+ /* 901 S> */ B(LdaSmi), U8(1),
+ /* 908 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(125),
+ /* 915 S> */ B(LdaSmi), U8(1),
+ /* 922 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(127),
+ /* 929 S> */ B(LdaSmi), U8(1),
+ /* 936 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(129),
+ /* 943 S> */ B(LdaSmi), U8(1),
+ /* 950 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(131),
+ /* 957 S> */ B(LdaSmi), U8(1),
+ /* 964 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(133),
+ /* 971 S> */ B(LdaSmi), U8(1),
+ /* 978 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(135),
+ /* 985 S> */ B(LdaSmi), U8(1),
+ /* 992 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(137),
+ /* 999 S> */ B(LdaSmi), U8(1),
+ /* 1006 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(139),
+ /* 1013 S> */ B(LdaSmi), U8(1),
+ /* 1020 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(141),
+ /* 1027 S> */ B(LdaSmi), U8(1),
+ /* 1034 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(143),
+ /* 1041 S> */ B(LdaSmi), U8(1),
+ /* 1048 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(145),
+ /* 1055 S> */ B(LdaSmi), U8(1),
+ /* 1062 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(147),
+ /* 1069 S> */ B(LdaSmi), U8(1),
+ /* 1076 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(149),
+ /* 1083 S> */ B(LdaSmi), U8(1),
+ /* 1090 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(151),
+ /* 1097 S> */ B(LdaSmi), U8(1),
+ /* 1104 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(153),
+ /* 1111 S> */ B(LdaSmi), U8(1),
+ /* 1118 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(155),
+ /* 1125 S> */ B(LdaSmi), U8(1),
+ /* 1132 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(157),
+ /* 1139 S> */ B(LdaSmi), U8(1),
+ /* 1146 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(159),
+ /* 1153 S> */ B(LdaSmi), U8(1),
+ /* 1160 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(161),
+ /* 1167 S> */ B(LdaSmi), U8(1),
+ /* 1174 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(163),
+ /* 1181 S> */ B(LdaSmi), U8(1),
+ /* 1188 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(165),
+ /* 1195 S> */ B(LdaSmi), U8(1),
+ /* 1202 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(167),
+ /* 1209 S> */ B(LdaSmi), U8(1),
+ /* 1216 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(169),
+ /* 1223 S> */ B(LdaSmi), U8(1),
+ /* 1230 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(171),
+ /* 1237 S> */ B(LdaSmi), U8(1),
+ /* 1244 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(173),
+ /* 1251 S> */ B(LdaSmi), U8(1),
+ /* 1258 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(175),
+ /* 1265 S> */ B(LdaSmi), U8(1),
+ /* 1272 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(177),
+ /* 1279 S> */ B(LdaSmi), U8(1),
+ /* 1286 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(179),
+ /* 1293 S> */ B(LdaSmi), U8(1),
+ /* 1300 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(181),
+ /* 1307 S> */ B(LdaSmi), U8(1),
+ /* 1314 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(183),
+ /* 1321 S> */ B(LdaSmi), U8(1),
+ /* 1328 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(185),
+ /* 1335 S> */ B(LdaSmi), U8(1),
+ /* 1342 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(187),
+ /* 1349 S> */ B(LdaSmi), U8(1),
+ /* 1356 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(189),
+ /* 1363 S> */ B(LdaSmi), U8(1),
+ /* 1370 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(191),
+ /* 1377 S> */ B(LdaSmi), U8(1),
+ /* 1384 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(193),
+ /* 1391 S> */ B(LdaSmi), U8(1),
+ /* 1398 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(195),
+ /* 1405 S> */ B(LdaSmi), U8(1),
+ /* 1412 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(197),
+ /* 1419 S> */ B(LdaSmi), U8(1),
+ /* 1426 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(199),
+ /* 1433 S> */ B(LdaSmi), U8(1),
+ /* 1440 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(201),
+ /* 1447 S> */ B(LdaSmi), U8(1),
+ /* 1454 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(203),
+ /* 1461 S> */ B(LdaSmi), U8(1),
+ /* 1468 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(205),
+ /* 1475 S> */ B(LdaSmi), U8(1),
+ /* 1482 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(207),
+ /* 1489 S> */ B(LdaSmi), U8(1),
+ /* 1496 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(209),
+ /* 1503 S> */ B(LdaSmi), U8(1),
+ /* 1510 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(211),
+ /* 1517 S> */ B(LdaSmi), U8(1),
+ /* 1524 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(213),
+ /* 1531 S> */ B(LdaSmi), U8(1),
+ /* 1538 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(215),
+ /* 1545 S> */ B(LdaSmi), U8(1),
+ /* 1552 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(217),
+ /* 1559 S> */ B(LdaSmi), U8(1),
+ /* 1566 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(219),
+ /* 1573 S> */ B(LdaSmi), U8(1),
+ /* 1580 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(221),
+ /* 1587 S> */ B(LdaSmi), U8(1),
+ /* 1594 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(223),
+ /* 1601 S> */ B(LdaSmi), U8(1),
+ /* 1608 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(225),
+ /* 1615 S> */ B(LdaSmi), U8(1),
+ /* 1622 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(227),
+ /* 1629 S> */ B(LdaSmi), U8(1),
+ /* 1636 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(229),
+ /* 1643 S> */ B(LdaSmi), U8(1),
+ /* 1650 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(231),
+ /* 1657 S> */ B(LdaSmi), U8(1),
+ /* 1664 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(233),
+ /* 1671 S> */ B(LdaSmi), U8(1),
+ /* 1678 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(235),
+ /* 1685 S> */ B(LdaSmi), U8(1),
+ /* 1692 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(237),
+ /* 1699 S> */ B(LdaSmi), U8(1),
+ /* 1706 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(239),
+ /* 1713 S> */ B(LdaSmi), U8(1),
+ /* 1720 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(241),
+ /* 1727 S> */ B(LdaSmi), U8(1),
+ /* 1734 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(243),
+ /* 1741 S> */ B(LdaSmi), U8(1),
+ /* 1748 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(245),
+ /* 1755 S> */ B(LdaSmi), U8(1),
+ /* 1762 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(247),
+ /* 1769 S> */ B(LdaSmi), U8(1),
+ /* 1776 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(249),
+ /* 1783 S> */ B(LdaSmi), U8(1),
+ /* 1790 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(251),
+ /* 1797 S> */ B(LdaSmi), U8(1),
+ /* 1804 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(253),
+ /* 1811 S> */ B(LdaSmi), U8(1),
+ /* 1818 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(255),
+ /* 1825 S> */ B(LdaSmi), U8(2),
+ /* 1832 E> */ B(Wide), B(StaNamedPropertyStrict), R16(arg0), U16(0), U16(257),
+ B(LdaUndefined),
+ /* 1837 S> */ B(Return),
]
constant pool: [
"name",
@@ -1647,787 +1111,271 @@ snippet: "
}
f({name : \"test\"})
"
-frame size: 2
+frame size: 0
parameter count: 3
-bytecode array length: 1813
+bytecode array length: 781
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(1),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(3),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(5),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(7),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(9),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(11),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(13),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(15),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(17),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(19),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(21),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(23),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(25),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(27),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(29),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(31),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(33),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(35),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(37),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(39),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(41),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(43),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(45),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(47),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(49),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(51),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(53),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(55),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(57),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(59),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(61),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(63),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(65),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(67),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(69),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(71),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(73),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(75),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(77),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(79),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(81),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(83),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(85),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(87),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(89),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(91),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(93),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(95),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(97),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(99),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(101),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(103),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(105),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(107),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(109),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(111),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(113),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(115),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(117),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(119),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(121),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(123),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(125),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(127),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(129),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(131),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(133),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(135),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(137),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(139),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(141),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(143),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(145),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(147),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(149),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(151),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(153),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(155),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(157),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(159),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(161),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(163),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(165),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(167),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(169),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(171),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(173),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(175),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(177),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(179),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(181),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(183),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(185),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(187),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(189),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(191),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(193),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(195),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(197),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(199),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(201),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(203),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(205),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(207),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(209),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(211),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(213),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(215),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(217),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(219),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(221),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(223),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(225),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(227),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(229),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(231),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(233),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(235),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(237),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(239),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(241),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(243),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(245),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(247),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(249),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(251),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(253),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICSloppy), R(0), R(1), U8(255),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Wide), B(KeyedStoreICSloppy), R16(0), R16(1), U16(257),
- B(LdaUndefined),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 21 S> */ B(LdaSmi), U8(1),
+ /* 26 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(1),
+ /* 33 S> */ B(LdaSmi), U8(1),
+ /* 38 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(3),
+ /* 45 S> */ B(LdaSmi), U8(1),
+ /* 50 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(5),
+ /* 57 S> */ B(LdaSmi), U8(1),
+ /* 62 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(7),
+ /* 69 S> */ B(LdaSmi), U8(1),
+ /* 74 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(9),
+ /* 81 S> */ B(LdaSmi), U8(1),
+ /* 86 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(11),
+ /* 93 S> */ B(LdaSmi), U8(1),
+ /* 98 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(13),
+ /* 105 S> */ B(LdaSmi), U8(1),
+ /* 110 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(15),
+ /* 117 S> */ B(LdaSmi), U8(1),
+ /* 122 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(17),
+ /* 129 S> */ B(LdaSmi), U8(1),
+ /* 134 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(19),
+ /* 141 S> */ B(LdaSmi), U8(1),
+ /* 146 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(21),
+ /* 153 S> */ B(LdaSmi), U8(1),
+ /* 158 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(23),
+ /* 165 S> */ B(LdaSmi), U8(1),
+ /* 170 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(25),
+ /* 177 S> */ B(LdaSmi), U8(1),
+ /* 182 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(27),
+ /* 189 S> */ B(LdaSmi), U8(1),
+ /* 194 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(29),
+ /* 201 S> */ B(LdaSmi), U8(1),
+ /* 206 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(31),
+ /* 213 S> */ B(LdaSmi), U8(1),
+ /* 218 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(33),
+ /* 225 S> */ B(LdaSmi), U8(1),
+ /* 230 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(35),
+ /* 237 S> */ B(LdaSmi), U8(1),
+ /* 242 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(37),
+ /* 249 S> */ B(LdaSmi), U8(1),
+ /* 254 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(39),
+ /* 261 S> */ B(LdaSmi), U8(1),
+ /* 266 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(41),
+ /* 273 S> */ B(LdaSmi), U8(1),
+ /* 278 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(43),
+ /* 285 S> */ B(LdaSmi), U8(1),
+ /* 290 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(45),
+ /* 297 S> */ B(LdaSmi), U8(1),
+ /* 302 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(47),
+ /* 309 S> */ B(LdaSmi), U8(1),
+ /* 314 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(49),
+ /* 321 S> */ B(LdaSmi), U8(1),
+ /* 326 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(51),
+ /* 333 S> */ B(LdaSmi), U8(1),
+ /* 338 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(53),
+ /* 345 S> */ B(LdaSmi), U8(1),
+ /* 350 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(55),
+ /* 357 S> */ B(LdaSmi), U8(1),
+ /* 362 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(57),
+ /* 369 S> */ B(LdaSmi), U8(1),
+ /* 374 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(59),
+ /* 381 S> */ B(LdaSmi), U8(1),
+ /* 386 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(61),
+ /* 393 S> */ B(LdaSmi), U8(1),
+ /* 398 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(63),
+ /* 405 S> */ B(LdaSmi), U8(1),
+ /* 410 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(65),
+ /* 417 S> */ B(LdaSmi), U8(1),
+ /* 422 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(67),
+ /* 429 S> */ B(LdaSmi), U8(1),
+ /* 434 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(69),
+ /* 441 S> */ B(LdaSmi), U8(1),
+ /* 446 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(71),
+ /* 453 S> */ B(LdaSmi), U8(1),
+ /* 458 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(73),
+ /* 465 S> */ B(LdaSmi), U8(1),
+ /* 470 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(75),
+ /* 477 S> */ B(LdaSmi), U8(1),
+ /* 482 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(77),
+ /* 489 S> */ B(LdaSmi), U8(1),
+ /* 494 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(79),
+ /* 501 S> */ B(LdaSmi), U8(1),
+ /* 506 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(81),
+ /* 513 S> */ B(LdaSmi), U8(1),
+ /* 518 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(83),
+ /* 525 S> */ B(LdaSmi), U8(1),
+ /* 530 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(85),
+ /* 537 S> */ B(LdaSmi), U8(1),
+ /* 542 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(87),
+ /* 549 S> */ B(LdaSmi), U8(1),
+ /* 554 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(89),
+ /* 561 S> */ B(LdaSmi), U8(1),
+ /* 566 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(91),
+ /* 573 S> */ B(LdaSmi), U8(1),
+ /* 578 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(93),
+ /* 585 S> */ B(LdaSmi), U8(1),
+ /* 590 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(95),
+ /* 597 S> */ B(LdaSmi), U8(1),
+ /* 602 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(97),
+ /* 609 S> */ B(LdaSmi), U8(1),
+ /* 614 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(99),
+ /* 621 S> */ B(LdaSmi), U8(1),
+ /* 626 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(101),
+ /* 633 S> */ B(LdaSmi), U8(1),
+ /* 638 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(103),
+ /* 645 S> */ B(LdaSmi), U8(1),
+ /* 650 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(105),
+ /* 657 S> */ B(LdaSmi), U8(1),
+ /* 662 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(107),
+ /* 669 S> */ B(LdaSmi), U8(1),
+ /* 674 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(109),
+ /* 681 S> */ B(LdaSmi), U8(1),
+ /* 686 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(111),
+ /* 693 S> */ B(LdaSmi), U8(1),
+ /* 698 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(113),
+ /* 705 S> */ B(LdaSmi), U8(1),
+ /* 710 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(115),
+ /* 717 S> */ B(LdaSmi), U8(1),
+ /* 722 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(117),
+ /* 729 S> */ B(LdaSmi), U8(1),
+ /* 734 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(119),
+ /* 741 S> */ B(LdaSmi), U8(1),
+ /* 746 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(121),
+ /* 753 S> */ B(LdaSmi), U8(1),
+ /* 758 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(123),
+ /* 765 S> */ B(LdaSmi), U8(1),
+ /* 770 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(125),
+ /* 777 S> */ B(LdaSmi), U8(1),
+ /* 782 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(127),
+ /* 789 S> */ B(LdaSmi), U8(1),
+ /* 794 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(129),
+ /* 801 S> */ B(LdaSmi), U8(1),
+ /* 806 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(131),
+ /* 813 S> */ B(LdaSmi), U8(1),
+ /* 818 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(133),
+ /* 825 S> */ B(LdaSmi), U8(1),
+ /* 830 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(135),
+ /* 837 S> */ B(LdaSmi), U8(1),
+ /* 842 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(137),
+ /* 849 S> */ B(LdaSmi), U8(1),
+ /* 854 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(139),
+ /* 861 S> */ B(LdaSmi), U8(1),
+ /* 866 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(141),
+ /* 873 S> */ B(LdaSmi), U8(1),
+ /* 878 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(143),
+ /* 885 S> */ B(LdaSmi), U8(1),
+ /* 890 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(145),
+ /* 897 S> */ B(LdaSmi), U8(1),
+ /* 902 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(147),
+ /* 909 S> */ B(LdaSmi), U8(1),
+ /* 914 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(149),
+ /* 921 S> */ B(LdaSmi), U8(1),
+ /* 926 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(151),
+ /* 933 S> */ B(LdaSmi), U8(1),
+ /* 938 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(153),
+ /* 945 S> */ B(LdaSmi), U8(1),
+ /* 950 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(155),
+ /* 957 S> */ B(LdaSmi), U8(1),
+ /* 962 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(157),
+ /* 969 S> */ B(LdaSmi), U8(1),
+ /* 974 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(159),
+ /* 981 S> */ B(LdaSmi), U8(1),
+ /* 986 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(161),
+ /* 993 S> */ B(LdaSmi), U8(1),
+ /* 998 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(163),
+ /* 1005 S> */ B(LdaSmi), U8(1),
+ /* 1010 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(165),
+ /* 1017 S> */ B(LdaSmi), U8(1),
+ /* 1022 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(167),
+ /* 1029 S> */ B(LdaSmi), U8(1),
+ /* 1034 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(169),
+ /* 1041 S> */ B(LdaSmi), U8(1),
+ /* 1046 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(171),
+ /* 1053 S> */ B(LdaSmi), U8(1),
+ /* 1058 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(173),
+ /* 1065 S> */ B(LdaSmi), U8(1),
+ /* 1070 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(175),
+ /* 1077 S> */ B(LdaSmi), U8(1),
+ /* 1082 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(177),
+ /* 1089 S> */ B(LdaSmi), U8(1),
+ /* 1094 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(179),
+ /* 1101 S> */ B(LdaSmi), U8(1),
+ /* 1106 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(181),
+ /* 1113 S> */ B(LdaSmi), U8(1),
+ /* 1118 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(183),
+ /* 1125 S> */ B(LdaSmi), U8(1),
+ /* 1130 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(185),
+ /* 1137 S> */ B(LdaSmi), U8(1),
+ /* 1142 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(187),
+ /* 1149 S> */ B(LdaSmi), U8(1),
+ /* 1154 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(189),
+ /* 1161 S> */ B(LdaSmi), U8(1),
+ /* 1166 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(191),
+ /* 1173 S> */ B(LdaSmi), U8(1),
+ /* 1178 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(193),
+ /* 1185 S> */ B(LdaSmi), U8(1),
+ /* 1190 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(195),
+ /* 1197 S> */ B(LdaSmi), U8(1),
+ /* 1202 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(197),
+ /* 1209 S> */ B(LdaSmi), U8(1),
+ /* 1214 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(199),
+ /* 1221 S> */ B(LdaSmi), U8(1),
+ /* 1226 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(201),
+ /* 1233 S> */ B(LdaSmi), U8(1),
+ /* 1238 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(203),
+ /* 1245 S> */ B(LdaSmi), U8(1),
+ /* 1250 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(205),
+ /* 1257 S> */ B(LdaSmi), U8(1),
+ /* 1262 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(207),
+ /* 1269 S> */ B(LdaSmi), U8(1),
+ /* 1274 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(209),
+ /* 1281 S> */ B(LdaSmi), U8(1),
+ /* 1286 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(211),
+ /* 1293 S> */ B(LdaSmi), U8(1),
+ /* 1298 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(213),
+ /* 1305 S> */ B(LdaSmi), U8(1),
+ /* 1310 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(215),
+ /* 1317 S> */ B(LdaSmi), U8(1),
+ /* 1322 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(217),
+ /* 1329 S> */ B(LdaSmi), U8(1),
+ /* 1334 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(219),
+ /* 1341 S> */ B(LdaSmi), U8(1),
+ /* 1346 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(221),
+ /* 1353 S> */ B(LdaSmi), U8(1),
+ /* 1358 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(223),
+ /* 1365 S> */ B(LdaSmi), U8(1),
+ /* 1370 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(225),
+ /* 1377 S> */ B(LdaSmi), U8(1),
+ /* 1382 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(227),
+ /* 1389 S> */ B(LdaSmi), U8(1),
+ /* 1394 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(229),
+ /* 1401 S> */ B(LdaSmi), U8(1),
+ /* 1406 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(231),
+ /* 1413 S> */ B(LdaSmi), U8(1),
+ /* 1418 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(233),
+ /* 1425 S> */ B(LdaSmi), U8(1),
+ /* 1430 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(235),
+ /* 1437 S> */ B(LdaSmi), U8(1),
+ /* 1442 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(237),
+ /* 1449 S> */ B(LdaSmi), U8(1),
+ /* 1454 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(239),
+ /* 1461 S> */ B(LdaSmi), U8(1),
+ /* 1466 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(241),
+ /* 1473 S> */ B(LdaSmi), U8(1),
+ /* 1478 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(243),
+ /* 1485 S> */ B(LdaSmi), U8(1),
+ /* 1490 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(245),
+ /* 1497 S> */ B(LdaSmi), U8(1),
+ /* 1502 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(247),
+ /* 1509 S> */ B(LdaSmi), U8(1),
+ /* 1514 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(249),
+ /* 1521 S> */ B(LdaSmi), U8(1),
+ /* 1526 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(251),
+ /* 1533 S> */ B(LdaSmi), U8(1),
+ /* 1538 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(253),
+ /* 1545 S> */ B(LdaSmi), U8(1),
+ /* 1550 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(255),
+ /* 1557 S> */ B(LdaSmi), U8(2),
+ /* 1562 E> */ B(Wide), B(StaKeyedPropertySloppy), R16(arg0), R16(arg1), U16(257),
+ B(LdaUndefined),
+ /* 1567 S> */ B(Return),
]
constant pool: [
]
@@ -2570,787 +1518,271 @@ snippet: "
}
f({name : \"test\"})
"
-frame size: 2
+frame size: 0
parameter count: 3
-bytecode array length: 1813
+bytecode array length: 781
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(1),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(3),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(5),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(7),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(9),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(11),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(13),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(15),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(17),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(19),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(21),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(23),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(25),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(27),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(29),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(31),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(33),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(35),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(37),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(39),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(41),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(43),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(45),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(47),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(49),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(51),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(53),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(55),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(57),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(59),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(61),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(63),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(65),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(67),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(69),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(71),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(73),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(75),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(77),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(79),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(81),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(83),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(85),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(87),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(89),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(91),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(93),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(95),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(97),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(99),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(101),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(103),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(105),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(107),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(109),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(111),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(113),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(115),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(117),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(119),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(121),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(123),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(125),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(127),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(129),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(131),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(133),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(135),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(137),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(139),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(141),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(143),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(145),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(147),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(149),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(151),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(153),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(155),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(157),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(159),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(161),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(163),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(165),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(167),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(169),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(171),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(173),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(175),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(177),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(179),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(181),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(183),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(185),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(187),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(189),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(191),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(193),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(195),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(197),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(199),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(201),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(203),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(205),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(207),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(209),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(211),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(213),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(215),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(217),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(219),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(221),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(223),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(225),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(227),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(229),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(231),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(233),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(235),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(237),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(239),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(241),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(243),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(245),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(247),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(249),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(251),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(253),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(KeyedStoreICStrict), R(0), R(1), U8(255),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(Ldar), R(arg1),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Wide), B(KeyedStoreICStrict), R16(0), R16(1), U16(257),
- B(LdaUndefined),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 37 S> */ B(LdaSmi), U8(1),
+ /* 42 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(1),
+ /* 49 S> */ B(LdaSmi), U8(1),
+ /* 54 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(3),
+ /* 61 S> */ B(LdaSmi), U8(1),
+ /* 66 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(5),
+ /* 73 S> */ B(LdaSmi), U8(1),
+ /* 78 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(7),
+ /* 85 S> */ B(LdaSmi), U8(1),
+ /* 90 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(9),
+ /* 97 S> */ B(LdaSmi), U8(1),
+ /* 102 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(11),
+ /* 109 S> */ B(LdaSmi), U8(1),
+ /* 114 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(13),
+ /* 121 S> */ B(LdaSmi), U8(1),
+ /* 126 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(15),
+ /* 133 S> */ B(LdaSmi), U8(1),
+ /* 138 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(17),
+ /* 145 S> */ B(LdaSmi), U8(1),
+ /* 150 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(19),
+ /* 157 S> */ B(LdaSmi), U8(1),
+ /* 162 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(21),
+ /* 169 S> */ B(LdaSmi), U8(1),
+ /* 174 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(23),
+ /* 181 S> */ B(LdaSmi), U8(1),
+ /* 186 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(25),
+ /* 193 S> */ B(LdaSmi), U8(1),
+ /* 198 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(27),
+ /* 205 S> */ B(LdaSmi), U8(1),
+ /* 210 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(29),
+ /* 217 S> */ B(LdaSmi), U8(1),
+ /* 222 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(31),
+ /* 229 S> */ B(LdaSmi), U8(1),
+ /* 234 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(33),
+ /* 241 S> */ B(LdaSmi), U8(1),
+ /* 246 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(35),
+ /* 253 S> */ B(LdaSmi), U8(1),
+ /* 258 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(37),
+ /* 265 S> */ B(LdaSmi), U8(1),
+ /* 270 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(39),
+ /* 277 S> */ B(LdaSmi), U8(1),
+ /* 282 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(41),
+ /* 289 S> */ B(LdaSmi), U8(1),
+ /* 294 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(43),
+ /* 301 S> */ B(LdaSmi), U8(1),
+ /* 306 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(45),
+ /* 313 S> */ B(LdaSmi), U8(1),
+ /* 318 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(47),
+ /* 325 S> */ B(LdaSmi), U8(1),
+ /* 330 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(49),
+ /* 337 S> */ B(LdaSmi), U8(1),
+ /* 342 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(51),
+ /* 349 S> */ B(LdaSmi), U8(1),
+ /* 354 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(53),
+ /* 361 S> */ B(LdaSmi), U8(1),
+ /* 366 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(55),
+ /* 373 S> */ B(LdaSmi), U8(1),
+ /* 378 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(57),
+ /* 385 S> */ B(LdaSmi), U8(1),
+ /* 390 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(59),
+ /* 397 S> */ B(LdaSmi), U8(1),
+ /* 402 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(61),
+ /* 409 S> */ B(LdaSmi), U8(1),
+ /* 414 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(63),
+ /* 421 S> */ B(LdaSmi), U8(1),
+ /* 426 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(65),
+ /* 433 S> */ B(LdaSmi), U8(1),
+ /* 438 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(67),
+ /* 445 S> */ B(LdaSmi), U8(1),
+ /* 450 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(69),
+ /* 457 S> */ B(LdaSmi), U8(1),
+ /* 462 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(71),
+ /* 469 S> */ B(LdaSmi), U8(1),
+ /* 474 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(73),
+ /* 481 S> */ B(LdaSmi), U8(1),
+ /* 486 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(75),
+ /* 493 S> */ B(LdaSmi), U8(1),
+ /* 498 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(77),
+ /* 505 S> */ B(LdaSmi), U8(1),
+ /* 510 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(79),
+ /* 517 S> */ B(LdaSmi), U8(1),
+ /* 522 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(81),
+ /* 529 S> */ B(LdaSmi), U8(1),
+ /* 534 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(83),
+ /* 541 S> */ B(LdaSmi), U8(1),
+ /* 546 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(85),
+ /* 553 S> */ B(LdaSmi), U8(1),
+ /* 558 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(87),
+ /* 565 S> */ B(LdaSmi), U8(1),
+ /* 570 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(89),
+ /* 577 S> */ B(LdaSmi), U8(1),
+ /* 582 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(91),
+ /* 589 S> */ B(LdaSmi), U8(1),
+ /* 594 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(93),
+ /* 601 S> */ B(LdaSmi), U8(1),
+ /* 606 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(95),
+ /* 613 S> */ B(LdaSmi), U8(1),
+ /* 618 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(97),
+ /* 625 S> */ B(LdaSmi), U8(1),
+ /* 630 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(99),
+ /* 637 S> */ B(LdaSmi), U8(1),
+ /* 642 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(101),
+ /* 649 S> */ B(LdaSmi), U8(1),
+ /* 654 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(103),
+ /* 661 S> */ B(LdaSmi), U8(1),
+ /* 666 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(105),
+ /* 673 S> */ B(LdaSmi), U8(1),
+ /* 678 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(107),
+ /* 685 S> */ B(LdaSmi), U8(1),
+ /* 690 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(109),
+ /* 697 S> */ B(LdaSmi), U8(1),
+ /* 702 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(111),
+ /* 709 S> */ B(LdaSmi), U8(1),
+ /* 714 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(113),
+ /* 721 S> */ B(LdaSmi), U8(1),
+ /* 726 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(115),
+ /* 733 S> */ B(LdaSmi), U8(1),
+ /* 738 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(117),
+ /* 745 S> */ B(LdaSmi), U8(1),
+ /* 750 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(119),
+ /* 757 S> */ B(LdaSmi), U8(1),
+ /* 762 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(121),
+ /* 769 S> */ B(LdaSmi), U8(1),
+ /* 774 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(123),
+ /* 781 S> */ B(LdaSmi), U8(1),
+ /* 786 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(125),
+ /* 793 S> */ B(LdaSmi), U8(1),
+ /* 798 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(127),
+ /* 805 S> */ B(LdaSmi), U8(1),
+ /* 810 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(129),
+ /* 817 S> */ B(LdaSmi), U8(1),
+ /* 822 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(131),
+ /* 829 S> */ B(LdaSmi), U8(1),
+ /* 834 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(133),
+ /* 841 S> */ B(LdaSmi), U8(1),
+ /* 846 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(135),
+ /* 853 S> */ B(LdaSmi), U8(1),
+ /* 858 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(137),
+ /* 865 S> */ B(LdaSmi), U8(1),
+ /* 870 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(139),
+ /* 877 S> */ B(LdaSmi), U8(1),
+ /* 882 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(141),
+ /* 889 S> */ B(LdaSmi), U8(1),
+ /* 894 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(143),
+ /* 901 S> */ B(LdaSmi), U8(1),
+ /* 906 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(145),
+ /* 913 S> */ B(LdaSmi), U8(1),
+ /* 918 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(147),
+ /* 925 S> */ B(LdaSmi), U8(1),
+ /* 930 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(149),
+ /* 937 S> */ B(LdaSmi), U8(1),
+ /* 942 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(151),
+ /* 949 S> */ B(LdaSmi), U8(1),
+ /* 954 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(153),
+ /* 961 S> */ B(LdaSmi), U8(1),
+ /* 966 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(155),
+ /* 973 S> */ B(LdaSmi), U8(1),
+ /* 978 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(157),
+ /* 985 S> */ B(LdaSmi), U8(1),
+ /* 990 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(159),
+ /* 997 S> */ B(LdaSmi), U8(1),
+ /* 1002 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(161),
+ /* 1009 S> */ B(LdaSmi), U8(1),
+ /* 1014 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(163),
+ /* 1021 S> */ B(LdaSmi), U8(1),
+ /* 1026 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(165),
+ /* 1033 S> */ B(LdaSmi), U8(1),
+ /* 1038 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(167),
+ /* 1045 S> */ B(LdaSmi), U8(1),
+ /* 1050 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(169),
+ /* 1057 S> */ B(LdaSmi), U8(1),
+ /* 1062 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(171),
+ /* 1069 S> */ B(LdaSmi), U8(1),
+ /* 1074 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(173),
+ /* 1081 S> */ B(LdaSmi), U8(1),
+ /* 1086 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(175),
+ /* 1093 S> */ B(LdaSmi), U8(1),
+ /* 1098 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(177),
+ /* 1105 S> */ B(LdaSmi), U8(1),
+ /* 1110 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(179),
+ /* 1117 S> */ B(LdaSmi), U8(1),
+ /* 1122 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(181),
+ /* 1129 S> */ B(LdaSmi), U8(1),
+ /* 1134 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(183),
+ /* 1141 S> */ B(LdaSmi), U8(1),
+ /* 1146 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(185),
+ /* 1153 S> */ B(LdaSmi), U8(1),
+ /* 1158 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(187),
+ /* 1165 S> */ B(LdaSmi), U8(1),
+ /* 1170 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(189),
+ /* 1177 S> */ B(LdaSmi), U8(1),
+ /* 1182 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(191),
+ /* 1189 S> */ B(LdaSmi), U8(1),
+ /* 1194 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(193),
+ /* 1201 S> */ B(LdaSmi), U8(1),
+ /* 1206 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(195),
+ /* 1213 S> */ B(LdaSmi), U8(1),
+ /* 1218 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(197),
+ /* 1225 S> */ B(LdaSmi), U8(1),
+ /* 1230 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(199),
+ /* 1237 S> */ B(LdaSmi), U8(1),
+ /* 1242 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(201),
+ /* 1249 S> */ B(LdaSmi), U8(1),
+ /* 1254 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(203),
+ /* 1261 S> */ B(LdaSmi), U8(1),
+ /* 1266 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(205),
+ /* 1273 S> */ B(LdaSmi), U8(1),
+ /* 1278 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(207),
+ /* 1285 S> */ B(LdaSmi), U8(1),
+ /* 1290 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(209),
+ /* 1297 S> */ B(LdaSmi), U8(1),
+ /* 1302 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(211),
+ /* 1309 S> */ B(LdaSmi), U8(1),
+ /* 1314 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(213),
+ /* 1321 S> */ B(LdaSmi), U8(1),
+ /* 1326 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(215),
+ /* 1333 S> */ B(LdaSmi), U8(1),
+ /* 1338 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(217),
+ /* 1345 S> */ B(LdaSmi), U8(1),
+ /* 1350 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(219),
+ /* 1357 S> */ B(LdaSmi), U8(1),
+ /* 1362 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(221),
+ /* 1369 S> */ B(LdaSmi), U8(1),
+ /* 1374 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(223),
+ /* 1381 S> */ B(LdaSmi), U8(1),
+ /* 1386 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(225),
+ /* 1393 S> */ B(LdaSmi), U8(1),
+ /* 1398 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(227),
+ /* 1405 S> */ B(LdaSmi), U8(1),
+ /* 1410 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(229),
+ /* 1417 S> */ B(LdaSmi), U8(1),
+ /* 1422 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(231),
+ /* 1429 S> */ B(LdaSmi), U8(1),
+ /* 1434 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(233),
+ /* 1441 S> */ B(LdaSmi), U8(1),
+ /* 1446 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(235),
+ /* 1453 S> */ B(LdaSmi), U8(1),
+ /* 1458 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(237),
+ /* 1465 S> */ B(LdaSmi), U8(1),
+ /* 1470 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(239),
+ /* 1477 S> */ B(LdaSmi), U8(1),
+ /* 1482 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(241),
+ /* 1489 S> */ B(LdaSmi), U8(1),
+ /* 1494 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(243),
+ /* 1501 S> */ B(LdaSmi), U8(1),
+ /* 1506 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(245),
+ /* 1513 S> */ B(LdaSmi), U8(1),
+ /* 1518 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(247),
+ /* 1525 S> */ B(LdaSmi), U8(1),
+ /* 1530 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(249),
+ /* 1537 S> */ B(LdaSmi), U8(1),
+ /* 1542 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(251),
+ /* 1549 S> */ B(LdaSmi), U8(1),
+ /* 1554 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(253),
+ /* 1561 S> */ B(LdaSmi), U8(1),
+ /* 1566 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(255),
+ /* 1573 S> */ B(LdaSmi), U8(2),
+ /* 1578 E> */ B(Wide), B(StaKeyedPropertyStrict), R16(arg0), R16(arg1), U16(257),
+ B(LdaUndefined),
+ /* 1583 S> */ B(Return),
]
constant pool: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden
index adffb750bb..3637f78230 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden
@@ -15,9 +15,9 @@ frame size: 0
parameter count: 1
bytecode array length: 6
bytecodes: [
- B(StackCheck),
- B(CreateRegExpLiteral), U8(0), U8(0), U8(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(CreateRegExpLiteral), U8(0), U8(0), U8(0),
+ /* 49 S> */ B(Return),
]
constant pool: [
"ab+d",
@@ -33,12 +33,12 @@ frame size: 0
parameter count: 1
bytecode array length: 6
bytecodes: [
- B(StackCheck),
- B(CreateRegExpLiteral), U8(0), U8(0), U8(2),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(CreateRegExpLiteral), U8(0), U8(0), U8(2),
+ /* 58 S> */ B(Return),
]
constant pool: [
- "(\x5cw+)\x5cs(\x5cw+)",
+ "(\u005cw+)\u005cs(\u005cw+)",
]
handlers: [
]
@@ -49,17 +49,16 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 23
+bytecode array length: 22
bytecodes: [
- B(StackCheck),
- B(CreateRegExpLiteral), U8(0), U8(0), U8(0),
- B(Star), R(1),
- B(LoadIC), R(1), U8(1), U8(3),
- B(Star), R(0),
- B(LdaConstant), U8(2),
- B(Star), R(2),
- B(Call), R(0), R(1), U8(2), U8(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(CreateRegExpLiteral), U8(0), U8(0), U8(0),
+ B(Star), R(1),
+ /* 47 E> */ B(LdrNamedProperty), R(1), U8(1), U8(3), R(0),
+ B(LdaConstant), U8(2),
+ B(Star), R(2),
+ /* 48 E> */ B(Call), R(0), R(1), U8(2), U8(1),
+ /* 62 S> */ B(Return),
]
constant pool: [
"ab+d",
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiteralsWide.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiteralsWide.golden
index d6fb160273..3c5499b4cf 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiteralsWide.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiteralsWide.golden
@@ -272,521 +272,521 @@ frame size: 1
parameter count: 1
bytecode array length: 1033
bytecodes: [
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Star), R(0),
- B(LdaConstant), U8(1),
- B(Star), R(0),
- B(LdaConstant), U8(2),
- B(Star), R(0),
- B(LdaConstant), U8(3),
- B(Star), R(0),
- B(LdaConstant), U8(4),
- B(Star), R(0),
- B(LdaConstant), U8(5),
- B(Star), R(0),
- B(LdaConstant), U8(6),
- B(Star), R(0),
- B(LdaConstant), U8(7),
- B(Star), R(0),
- B(LdaConstant), U8(8),
- B(Star), R(0),
- B(LdaConstant), U8(9),
- B(Star), R(0),
- B(LdaConstant), U8(10),
- B(Star), R(0),
- B(LdaConstant), U8(11),
- B(Star), R(0),
- B(LdaConstant), U8(12),
- B(Star), R(0),
- B(LdaConstant), U8(13),
- B(Star), R(0),
- B(LdaConstant), U8(14),
- B(Star), R(0),
- B(LdaConstant), U8(15),
- B(Star), R(0),
- B(LdaConstant), U8(16),
- B(Star), R(0),
- B(LdaConstant), U8(17),
- B(Star), R(0),
- B(LdaConstant), U8(18),
- B(Star), R(0),
- B(LdaConstant), U8(19),
- B(Star), R(0),
- B(LdaConstant), U8(20),
- B(Star), R(0),
- B(LdaConstant), U8(21),
- B(Star), R(0),
- B(LdaConstant), U8(22),
- B(Star), R(0),
- B(LdaConstant), U8(23),
- B(Star), R(0),
- B(LdaConstant), U8(24),
- B(Star), R(0),
- B(LdaConstant), U8(25),
- B(Star), R(0),
- B(LdaConstant), U8(26),
- B(Star), R(0),
- B(LdaConstant), U8(27),
- B(Star), R(0),
- B(LdaConstant), U8(28),
- B(Star), R(0),
- B(LdaConstant), U8(29),
- B(Star), R(0),
- B(LdaConstant), U8(30),
- B(Star), R(0),
- B(LdaConstant), U8(31),
- B(Star), R(0),
- B(LdaConstant), U8(32),
- B(Star), R(0),
- B(LdaConstant), U8(33),
- B(Star), R(0),
- B(LdaConstant), U8(34),
- B(Star), R(0),
- B(LdaConstant), U8(35),
- B(Star), R(0),
- B(LdaConstant), U8(36),
- B(Star), R(0),
- B(LdaConstant), U8(37),
- B(Star), R(0),
- B(LdaConstant), U8(38),
- B(Star), R(0),
- B(LdaConstant), U8(39),
- B(Star), R(0),
- B(LdaConstant), U8(40),
- B(Star), R(0),
- B(LdaConstant), U8(41),
- B(Star), R(0),
- B(LdaConstant), U8(42),
- B(Star), R(0),
- B(LdaConstant), U8(43),
- B(Star), R(0),
- B(LdaConstant), U8(44),
- B(Star), R(0),
- B(LdaConstant), U8(45),
- B(Star), R(0),
- B(LdaConstant), U8(46),
- B(Star), R(0),
- B(LdaConstant), U8(47),
- B(Star), R(0),
- B(LdaConstant), U8(48),
- B(Star), R(0),
- B(LdaConstant), U8(49),
- B(Star), R(0),
- B(LdaConstant), U8(50),
- B(Star), R(0),
- B(LdaConstant), U8(51),
- B(Star), R(0),
- B(LdaConstant), U8(52),
- B(Star), R(0),
- B(LdaConstant), U8(53),
- B(Star), R(0),
- B(LdaConstant), U8(54),
- B(Star), R(0),
- B(LdaConstant), U8(55),
- B(Star), R(0),
- B(LdaConstant), U8(56),
- B(Star), R(0),
- B(LdaConstant), U8(57),
- B(Star), R(0),
- B(LdaConstant), U8(58),
- B(Star), R(0),
- B(LdaConstant), U8(59),
- B(Star), R(0),
- B(LdaConstant), U8(60),
- B(Star), R(0),
- B(LdaConstant), U8(61),
- B(Star), R(0),
- B(LdaConstant), U8(62),
- B(Star), R(0),
- B(LdaConstant), U8(63),
- B(Star), R(0),
- B(LdaConstant), U8(64),
- B(Star), R(0),
- B(LdaConstant), U8(65),
- B(Star), R(0),
- B(LdaConstant), U8(66),
- B(Star), R(0),
- B(LdaConstant), U8(67),
- B(Star), R(0),
- B(LdaConstant), U8(68),
- B(Star), R(0),
- B(LdaConstant), U8(69),
- B(Star), R(0),
- B(LdaConstant), U8(70),
- B(Star), R(0),
- B(LdaConstant), U8(71),
- B(Star), R(0),
- B(LdaConstant), U8(72),
- B(Star), R(0),
- B(LdaConstant), U8(73),
- B(Star), R(0),
- B(LdaConstant), U8(74),
- B(Star), R(0),
- B(LdaConstant), U8(75),
- B(Star), R(0),
- B(LdaConstant), U8(76),
- B(Star), R(0),
- B(LdaConstant), U8(77),
- B(Star), R(0),
- B(LdaConstant), U8(78),
- B(Star), R(0),
- B(LdaConstant), U8(79),
- B(Star), R(0),
- B(LdaConstant), U8(80),
- B(Star), R(0),
- B(LdaConstant), U8(81),
- B(Star), R(0),
- B(LdaConstant), U8(82),
- B(Star), R(0),
- B(LdaConstant), U8(83),
- B(Star), R(0),
- B(LdaConstant), U8(84),
- B(Star), R(0),
- B(LdaConstant), U8(85),
- B(Star), R(0),
- B(LdaConstant), U8(86),
- B(Star), R(0),
- B(LdaConstant), U8(87),
- B(Star), R(0),
- B(LdaConstant), U8(88),
- B(Star), R(0),
- B(LdaConstant), U8(89),
- B(Star), R(0),
- B(LdaConstant), U8(90),
- B(Star), R(0),
- B(LdaConstant), U8(91),
- B(Star), R(0),
- B(LdaConstant), U8(92),
- B(Star), R(0),
- B(LdaConstant), U8(93),
- B(Star), R(0),
- B(LdaConstant), U8(94),
- B(Star), R(0),
- B(LdaConstant), U8(95),
- B(Star), R(0),
- B(LdaConstant), U8(96),
- B(Star), R(0),
- B(LdaConstant), U8(97),
- B(Star), R(0),
- B(LdaConstant), U8(98),
- B(Star), R(0),
- B(LdaConstant), U8(99),
- B(Star), R(0),
- B(LdaConstant), U8(100),
- B(Star), R(0),
- B(LdaConstant), U8(101),
- B(Star), R(0),
- B(LdaConstant), U8(102),
- B(Star), R(0),
- B(LdaConstant), U8(103),
- B(Star), R(0),
- B(LdaConstant), U8(104),
- B(Star), R(0),
- B(LdaConstant), U8(105),
- B(Star), R(0),
- B(LdaConstant), U8(106),
- B(Star), R(0),
- B(LdaConstant), U8(107),
- B(Star), R(0),
- B(LdaConstant), U8(108),
- B(Star), R(0),
- B(LdaConstant), U8(109),
- B(Star), R(0),
- B(LdaConstant), U8(110),
- B(Star), R(0),
- B(LdaConstant), U8(111),
- B(Star), R(0),
- B(LdaConstant), U8(112),
- B(Star), R(0),
- B(LdaConstant), U8(113),
- B(Star), R(0),
- B(LdaConstant), U8(114),
- B(Star), R(0),
- B(LdaConstant), U8(115),
- B(Star), R(0),
- B(LdaConstant), U8(116),
- B(Star), R(0),
- B(LdaConstant), U8(117),
- B(Star), R(0),
- B(LdaConstant), U8(118),
- B(Star), R(0),
- B(LdaConstant), U8(119),
- B(Star), R(0),
- B(LdaConstant), U8(120),
- B(Star), R(0),
- B(LdaConstant), U8(121),
- B(Star), R(0),
- B(LdaConstant), U8(122),
- B(Star), R(0),
- B(LdaConstant), U8(123),
- B(Star), R(0),
- B(LdaConstant), U8(124),
- B(Star), R(0),
- B(LdaConstant), U8(125),
- B(Star), R(0),
- B(LdaConstant), U8(126),
- B(Star), R(0),
- B(LdaConstant), U8(127),
- B(Star), R(0),
- B(LdaConstant), U8(128),
- B(Star), R(0),
- B(LdaConstant), U8(129),
- B(Star), R(0),
- B(LdaConstant), U8(130),
- B(Star), R(0),
- B(LdaConstant), U8(131),
- B(Star), R(0),
- B(LdaConstant), U8(132),
- B(Star), R(0),
- B(LdaConstant), U8(133),
- B(Star), R(0),
- B(LdaConstant), U8(134),
- B(Star), R(0),
- B(LdaConstant), U8(135),
- B(Star), R(0),
- B(LdaConstant), U8(136),
- B(Star), R(0),
- B(LdaConstant), U8(137),
- B(Star), R(0),
- B(LdaConstant), U8(138),
- B(Star), R(0),
- B(LdaConstant), U8(139),
- B(Star), R(0),
- B(LdaConstant), U8(140),
- B(Star), R(0),
- B(LdaConstant), U8(141),
- B(Star), R(0),
- B(LdaConstant), U8(142),
- B(Star), R(0),
- B(LdaConstant), U8(143),
- B(Star), R(0),
- B(LdaConstant), U8(144),
- B(Star), R(0),
- B(LdaConstant), U8(145),
- B(Star), R(0),
- B(LdaConstant), U8(146),
- B(Star), R(0),
- B(LdaConstant), U8(147),
- B(Star), R(0),
- B(LdaConstant), U8(148),
- B(Star), R(0),
- B(LdaConstant), U8(149),
- B(Star), R(0),
- B(LdaConstant), U8(150),
- B(Star), R(0),
- B(LdaConstant), U8(151),
- B(Star), R(0),
- B(LdaConstant), U8(152),
- B(Star), R(0),
- B(LdaConstant), U8(153),
- B(Star), R(0),
- B(LdaConstant), U8(154),
- B(Star), R(0),
- B(LdaConstant), U8(155),
- B(Star), R(0),
- B(LdaConstant), U8(156),
- B(Star), R(0),
- B(LdaConstant), U8(157),
- B(Star), R(0),
- B(LdaConstant), U8(158),
- B(Star), R(0),
- B(LdaConstant), U8(159),
- B(Star), R(0),
- B(LdaConstant), U8(160),
- B(Star), R(0),
- B(LdaConstant), U8(161),
- B(Star), R(0),
- B(LdaConstant), U8(162),
- B(Star), R(0),
- B(LdaConstant), U8(163),
- B(Star), R(0),
- B(LdaConstant), U8(164),
- B(Star), R(0),
- B(LdaConstant), U8(165),
- B(Star), R(0),
- B(LdaConstant), U8(166),
- B(Star), R(0),
- B(LdaConstant), U8(167),
- B(Star), R(0),
- B(LdaConstant), U8(168),
- B(Star), R(0),
- B(LdaConstant), U8(169),
- B(Star), R(0),
- B(LdaConstant), U8(170),
- B(Star), R(0),
- B(LdaConstant), U8(171),
- B(Star), R(0),
- B(LdaConstant), U8(172),
- B(Star), R(0),
- B(LdaConstant), U8(173),
- B(Star), R(0),
- B(LdaConstant), U8(174),
- B(Star), R(0),
- B(LdaConstant), U8(175),
- B(Star), R(0),
- B(LdaConstant), U8(176),
- B(Star), R(0),
- B(LdaConstant), U8(177),
- B(Star), R(0),
- B(LdaConstant), U8(178),
- B(Star), R(0),
- B(LdaConstant), U8(179),
- B(Star), R(0),
- B(LdaConstant), U8(180),
- B(Star), R(0),
- B(LdaConstant), U8(181),
- B(Star), R(0),
- B(LdaConstant), U8(182),
- B(Star), R(0),
- B(LdaConstant), U8(183),
- B(Star), R(0),
- B(LdaConstant), U8(184),
- B(Star), R(0),
- B(LdaConstant), U8(185),
- B(Star), R(0),
- B(LdaConstant), U8(186),
- B(Star), R(0),
- B(LdaConstant), U8(187),
- B(Star), R(0),
- B(LdaConstant), U8(188),
- B(Star), R(0),
- B(LdaConstant), U8(189),
- B(Star), R(0),
- B(LdaConstant), U8(190),
- B(Star), R(0),
- B(LdaConstant), U8(191),
- B(Star), R(0),
- B(LdaConstant), U8(192),
- B(Star), R(0),
- B(LdaConstant), U8(193),
- B(Star), R(0),
- B(LdaConstant), U8(194),
- B(Star), R(0),
- B(LdaConstant), U8(195),
- B(Star), R(0),
- B(LdaConstant), U8(196),
- B(Star), R(0),
- B(LdaConstant), U8(197),
- B(Star), R(0),
- B(LdaConstant), U8(198),
- B(Star), R(0),
- B(LdaConstant), U8(199),
- B(Star), R(0),
- B(LdaConstant), U8(200),
- B(Star), R(0),
- B(LdaConstant), U8(201),
- B(Star), R(0),
- B(LdaConstant), U8(202),
- B(Star), R(0),
- B(LdaConstant), U8(203),
- B(Star), R(0),
- B(LdaConstant), U8(204),
- B(Star), R(0),
- B(LdaConstant), U8(205),
- B(Star), R(0),
- B(LdaConstant), U8(206),
- B(Star), R(0),
- B(LdaConstant), U8(207),
- B(Star), R(0),
- B(LdaConstant), U8(208),
- B(Star), R(0),
- B(LdaConstant), U8(209),
- B(Star), R(0),
- B(LdaConstant), U8(210),
- B(Star), R(0),
- B(LdaConstant), U8(211),
- B(Star), R(0),
- B(LdaConstant), U8(212),
- B(Star), R(0),
- B(LdaConstant), U8(213),
- B(Star), R(0),
- B(LdaConstant), U8(214),
- B(Star), R(0),
- B(LdaConstant), U8(215),
- B(Star), R(0),
- B(LdaConstant), U8(216),
- B(Star), R(0),
- B(LdaConstant), U8(217),
- B(Star), R(0),
- B(LdaConstant), U8(218),
- B(Star), R(0),
- B(LdaConstant), U8(219),
- B(Star), R(0),
- B(LdaConstant), U8(220),
- B(Star), R(0),
- B(LdaConstant), U8(221),
- B(Star), R(0),
- B(LdaConstant), U8(222),
- B(Star), R(0),
- B(LdaConstant), U8(223),
- B(Star), R(0),
- B(LdaConstant), U8(224),
- B(Star), R(0),
- B(LdaConstant), U8(225),
- B(Star), R(0),
- B(LdaConstant), U8(226),
- B(Star), R(0),
- B(LdaConstant), U8(227),
- B(Star), R(0),
- B(LdaConstant), U8(228),
- B(Star), R(0),
- B(LdaConstant), U8(229),
- B(Star), R(0),
- B(LdaConstant), U8(230),
- B(Star), R(0),
- B(LdaConstant), U8(231),
- B(Star), R(0),
- B(LdaConstant), U8(232),
- B(Star), R(0),
- B(LdaConstant), U8(233),
- B(Star), R(0),
- B(LdaConstant), U8(234),
- B(Star), R(0),
- B(LdaConstant), U8(235),
- B(Star), R(0),
- B(LdaConstant), U8(236),
- B(Star), R(0),
- B(LdaConstant), U8(237),
- B(Star), R(0),
- B(LdaConstant), U8(238),
- B(Star), R(0),
- B(LdaConstant), U8(239),
- B(Star), R(0),
- B(LdaConstant), U8(240),
- B(Star), R(0),
- B(LdaConstant), U8(241),
- B(Star), R(0),
- B(LdaConstant), U8(242),
- B(Star), R(0),
- B(LdaConstant), U8(243),
- B(Star), R(0),
- B(LdaConstant), U8(244),
- B(Star), R(0),
- B(LdaConstant), U8(245),
- B(Star), R(0),
- B(LdaConstant), U8(246),
- B(Star), R(0),
- B(LdaConstant), U8(247),
- B(Star), R(0),
- B(LdaConstant), U8(248),
- B(Star), R(0),
- B(LdaConstant), U8(249),
- B(Star), R(0),
- B(LdaConstant), U8(250),
- B(Star), R(0),
- B(LdaConstant), U8(251),
- B(Star), R(0),
- B(LdaConstant), U8(252),
- B(Star), R(0),
- B(LdaConstant), U8(253),
- B(Star), R(0),
- B(LdaConstant), U8(254),
- B(Star), R(0),
- B(LdaConstant), U8(255),
- B(Star), R(0),
- B(Wide), B(CreateRegExpLiteral), U16(256), U16(0), U8(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 41 S> */ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ /* 51 S> */ B(LdaConstant), U8(1),
+ B(Star), R(0),
+ /* 61 S> */ B(LdaConstant), U8(2),
+ B(Star), R(0),
+ /* 71 S> */ B(LdaConstant), U8(3),
+ B(Star), R(0),
+ /* 81 S> */ B(LdaConstant), U8(4),
+ B(Star), R(0),
+ /* 91 S> */ B(LdaConstant), U8(5),
+ B(Star), R(0),
+ /* 101 S> */ B(LdaConstant), U8(6),
+ B(Star), R(0),
+ /* 111 S> */ B(LdaConstant), U8(7),
+ B(Star), R(0),
+ /* 121 S> */ B(LdaConstant), U8(8),
+ B(Star), R(0),
+ /* 131 S> */ B(LdaConstant), U8(9),
+ B(Star), R(0),
+ /* 141 S> */ B(LdaConstant), U8(10),
+ B(Star), R(0),
+ /* 151 S> */ B(LdaConstant), U8(11),
+ B(Star), R(0),
+ /* 161 S> */ B(LdaConstant), U8(12),
+ B(Star), R(0),
+ /* 171 S> */ B(LdaConstant), U8(13),
+ B(Star), R(0),
+ /* 181 S> */ B(LdaConstant), U8(14),
+ B(Star), R(0),
+ /* 191 S> */ B(LdaConstant), U8(15),
+ B(Star), R(0),
+ /* 201 S> */ B(LdaConstant), U8(16),
+ B(Star), R(0),
+ /* 211 S> */ B(LdaConstant), U8(17),
+ B(Star), R(0),
+ /* 221 S> */ B(LdaConstant), U8(18),
+ B(Star), R(0),
+ /* 231 S> */ B(LdaConstant), U8(19),
+ B(Star), R(0),
+ /* 241 S> */ B(LdaConstant), U8(20),
+ B(Star), R(0),
+ /* 251 S> */ B(LdaConstant), U8(21),
+ B(Star), R(0),
+ /* 261 S> */ B(LdaConstant), U8(22),
+ B(Star), R(0),
+ /* 271 S> */ B(LdaConstant), U8(23),
+ B(Star), R(0),
+ /* 281 S> */ B(LdaConstant), U8(24),
+ B(Star), R(0),
+ /* 291 S> */ B(LdaConstant), U8(25),
+ B(Star), R(0),
+ /* 301 S> */ B(LdaConstant), U8(26),
+ B(Star), R(0),
+ /* 311 S> */ B(LdaConstant), U8(27),
+ B(Star), R(0),
+ /* 321 S> */ B(LdaConstant), U8(28),
+ B(Star), R(0),
+ /* 331 S> */ B(LdaConstant), U8(29),
+ B(Star), R(0),
+ /* 341 S> */ B(LdaConstant), U8(30),
+ B(Star), R(0),
+ /* 351 S> */ B(LdaConstant), U8(31),
+ B(Star), R(0),
+ /* 361 S> */ B(LdaConstant), U8(32),
+ B(Star), R(0),
+ /* 371 S> */ B(LdaConstant), U8(33),
+ B(Star), R(0),
+ /* 381 S> */ B(LdaConstant), U8(34),
+ B(Star), R(0),
+ /* 391 S> */ B(LdaConstant), U8(35),
+ B(Star), R(0),
+ /* 401 S> */ B(LdaConstant), U8(36),
+ B(Star), R(0),
+ /* 411 S> */ B(LdaConstant), U8(37),
+ B(Star), R(0),
+ /* 421 S> */ B(LdaConstant), U8(38),
+ B(Star), R(0),
+ /* 431 S> */ B(LdaConstant), U8(39),
+ B(Star), R(0),
+ /* 441 S> */ B(LdaConstant), U8(40),
+ B(Star), R(0),
+ /* 451 S> */ B(LdaConstant), U8(41),
+ B(Star), R(0),
+ /* 461 S> */ B(LdaConstant), U8(42),
+ B(Star), R(0),
+ /* 471 S> */ B(LdaConstant), U8(43),
+ B(Star), R(0),
+ /* 481 S> */ B(LdaConstant), U8(44),
+ B(Star), R(0),
+ /* 491 S> */ B(LdaConstant), U8(45),
+ B(Star), R(0),
+ /* 501 S> */ B(LdaConstant), U8(46),
+ B(Star), R(0),
+ /* 511 S> */ B(LdaConstant), U8(47),
+ B(Star), R(0),
+ /* 521 S> */ B(LdaConstant), U8(48),
+ B(Star), R(0),
+ /* 531 S> */ B(LdaConstant), U8(49),
+ B(Star), R(0),
+ /* 541 S> */ B(LdaConstant), U8(50),
+ B(Star), R(0),
+ /* 551 S> */ B(LdaConstant), U8(51),
+ B(Star), R(0),
+ /* 561 S> */ B(LdaConstant), U8(52),
+ B(Star), R(0),
+ /* 571 S> */ B(LdaConstant), U8(53),
+ B(Star), R(0),
+ /* 581 S> */ B(LdaConstant), U8(54),
+ B(Star), R(0),
+ /* 591 S> */ B(LdaConstant), U8(55),
+ B(Star), R(0),
+ /* 601 S> */ B(LdaConstant), U8(56),
+ B(Star), R(0),
+ /* 611 S> */ B(LdaConstant), U8(57),
+ B(Star), R(0),
+ /* 621 S> */ B(LdaConstant), U8(58),
+ B(Star), R(0),
+ /* 631 S> */ B(LdaConstant), U8(59),
+ B(Star), R(0),
+ /* 641 S> */ B(LdaConstant), U8(60),
+ B(Star), R(0),
+ /* 651 S> */ B(LdaConstant), U8(61),
+ B(Star), R(0),
+ /* 661 S> */ B(LdaConstant), U8(62),
+ B(Star), R(0),
+ /* 671 S> */ B(LdaConstant), U8(63),
+ B(Star), R(0),
+ /* 681 S> */ B(LdaConstant), U8(64),
+ B(Star), R(0),
+ /* 691 S> */ B(LdaConstant), U8(65),
+ B(Star), R(0),
+ /* 701 S> */ B(LdaConstant), U8(66),
+ B(Star), R(0),
+ /* 711 S> */ B(LdaConstant), U8(67),
+ B(Star), R(0),
+ /* 721 S> */ B(LdaConstant), U8(68),
+ B(Star), R(0),
+ /* 731 S> */ B(LdaConstant), U8(69),
+ B(Star), R(0),
+ /* 741 S> */ B(LdaConstant), U8(70),
+ B(Star), R(0),
+ /* 751 S> */ B(LdaConstant), U8(71),
+ B(Star), R(0),
+ /* 761 S> */ B(LdaConstant), U8(72),
+ B(Star), R(0),
+ /* 771 S> */ B(LdaConstant), U8(73),
+ B(Star), R(0),
+ /* 781 S> */ B(LdaConstant), U8(74),
+ B(Star), R(0),
+ /* 791 S> */ B(LdaConstant), U8(75),
+ B(Star), R(0),
+ /* 801 S> */ B(LdaConstant), U8(76),
+ B(Star), R(0),
+ /* 811 S> */ B(LdaConstant), U8(77),
+ B(Star), R(0),
+ /* 821 S> */ B(LdaConstant), U8(78),
+ B(Star), R(0),
+ /* 831 S> */ B(LdaConstant), U8(79),
+ B(Star), R(0),
+ /* 841 S> */ B(LdaConstant), U8(80),
+ B(Star), R(0),
+ /* 851 S> */ B(LdaConstant), U8(81),
+ B(Star), R(0),
+ /* 861 S> */ B(LdaConstant), U8(82),
+ B(Star), R(0),
+ /* 871 S> */ B(LdaConstant), U8(83),
+ B(Star), R(0),
+ /* 881 S> */ B(LdaConstant), U8(84),
+ B(Star), R(0),
+ /* 891 S> */ B(LdaConstant), U8(85),
+ B(Star), R(0),
+ /* 901 S> */ B(LdaConstant), U8(86),
+ B(Star), R(0),
+ /* 911 S> */ B(LdaConstant), U8(87),
+ B(Star), R(0),
+ /* 921 S> */ B(LdaConstant), U8(88),
+ B(Star), R(0),
+ /* 931 S> */ B(LdaConstant), U8(89),
+ B(Star), R(0),
+ /* 941 S> */ B(LdaConstant), U8(90),
+ B(Star), R(0),
+ /* 951 S> */ B(LdaConstant), U8(91),
+ B(Star), R(0),
+ /* 961 S> */ B(LdaConstant), U8(92),
+ B(Star), R(0),
+ /* 971 S> */ B(LdaConstant), U8(93),
+ B(Star), R(0),
+ /* 981 S> */ B(LdaConstant), U8(94),
+ B(Star), R(0),
+ /* 991 S> */ B(LdaConstant), U8(95),
+ B(Star), R(0),
+ /* 1001 S> */ B(LdaConstant), U8(96),
+ B(Star), R(0),
+ /* 1011 S> */ B(LdaConstant), U8(97),
+ B(Star), R(0),
+ /* 1021 S> */ B(LdaConstant), U8(98),
+ B(Star), R(0),
+ /* 1031 S> */ B(LdaConstant), U8(99),
+ B(Star), R(0),
+ /* 1041 S> */ B(LdaConstant), U8(100),
+ B(Star), R(0),
+ /* 1051 S> */ B(LdaConstant), U8(101),
+ B(Star), R(0),
+ /* 1061 S> */ B(LdaConstant), U8(102),
+ B(Star), R(0),
+ /* 1071 S> */ B(LdaConstant), U8(103),
+ B(Star), R(0),
+ /* 1081 S> */ B(LdaConstant), U8(104),
+ B(Star), R(0),
+ /* 1091 S> */ B(LdaConstant), U8(105),
+ B(Star), R(0),
+ /* 1101 S> */ B(LdaConstant), U8(106),
+ B(Star), R(0),
+ /* 1111 S> */ B(LdaConstant), U8(107),
+ B(Star), R(0),
+ /* 1121 S> */ B(LdaConstant), U8(108),
+ B(Star), R(0),
+ /* 1131 S> */ B(LdaConstant), U8(109),
+ B(Star), R(0),
+ /* 1141 S> */ B(LdaConstant), U8(110),
+ B(Star), R(0),
+ /* 1151 S> */ B(LdaConstant), U8(111),
+ B(Star), R(0),
+ /* 1161 S> */ B(LdaConstant), U8(112),
+ B(Star), R(0),
+ /* 1171 S> */ B(LdaConstant), U8(113),
+ B(Star), R(0),
+ /* 1181 S> */ B(LdaConstant), U8(114),
+ B(Star), R(0),
+ /* 1191 S> */ B(LdaConstant), U8(115),
+ B(Star), R(0),
+ /* 1201 S> */ B(LdaConstant), U8(116),
+ B(Star), R(0),
+ /* 1211 S> */ B(LdaConstant), U8(117),
+ B(Star), R(0),
+ /* 1221 S> */ B(LdaConstant), U8(118),
+ B(Star), R(0),
+ /* 1231 S> */ B(LdaConstant), U8(119),
+ B(Star), R(0),
+ /* 1241 S> */ B(LdaConstant), U8(120),
+ B(Star), R(0),
+ /* 1251 S> */ B(LdaConstant), U8(121),
+ B(Star), R(0),
+ /* 1261 S> */ B(LdaConstant), U8(122),
+ B(Star), R(0),
+ /* 1271 S> */ B(LdaConstant), U8(123),
+ B(Star), R(0),
+ /* 1281 S> */ B(LdaConstant), U8(124),
+ B(Star), R(0),
+ /* 1291 S> */ B(LdaConstant), U8(125),
+ B(Star), R(0),
+ /* 1301 S> */ B(LdaConstant), U8(126),
+ B(Star), R(0),
+ /* 1311 S> */ B(LdaConstant), U8(127),
+ B(Star), R(0),
+ /* 1321 S> */ B(LdaConstant), U8(128),
+ B(Star), R(0),
+ /* 1331 S> */ B(LdaConstant), U8(129),
+ B(Star), R(0),
+ /* 1341 S> */ B(LdaConstant), U8(130),
+ B(Star), R(0),
+ /* 1351 S> */ B(LdaConstant), U8(131),
+ B(Star), R(0),
+ /* 1361 S> */ B(LdaConstant), U8(132),
+ B(Star), R(0),
+ /* 1371 S> */ B(LdaConstant), U8(133),
+ B(Star), R(0),
+ /* 1381 S> */ B(LdaConstant), U8(134),
+ B(Star), R(0),
+ /* 1391 S> */ B(LdaConstant), U8(135),
+ B(Star), R(0),
+ /* 1401 S> */ B(LdaConstant), U8(136),
+ B(Star), R(0),
+ /* 1411 S> */ B(LdaConstant), U8(137),
+ B(Star), R(0),
+ /* 1421 S> */ B(LdaConstant), U8(138),
+ B(Star), R(0),
+ /* 1431 S> */ B(LdaConstant), U8(139),
+ B(Star), R(0),
+ /* 1441 S> */ B(LdaConstant), U8(140),
+ B(Star), R(0),
+ /* 1451 S> */ B(LdaConstant), U8(141),
+ B(Star), R(0),
+ /* 1461 S> */ B(LdaConstant), U8(142),
+ B(Star), R(0),
+ /* 1471 S> */ B(LdaConstant), U8(143),
+ B(Star), R(0),
+ /* 1481 S> */ B(LdaConstant), U8(144),
+ B(Star), R(0),
+ /* 1491 S> */ B(LdaConstant), U8(145),
+ B(Star), R(0),
+ /* 1501 S> */ B(LdaConstant), U8(146),
+ B(Star), R(0),
+ /* 1511 S> */ B(LdaConstant), U8(147),
+ B(Star), R(0),
+ /* 1521 S> */ B(LdaConstant), U8(148),
+ B(Star), R(0),
+ /* 1531 S> */ B(LdaConstant), U8(149),
+ B(Star), R(0),
+ /* 1541 S> */ B(LdaConstant), U8(150),
+ B(Star), R(0),
+ /* 1551 S> */ B(LdaConstant), U8(151),
+ B(Star), R(0),
+ /* 1561 S> */ B(LdaConstant), U8(152),
+ B(Star), R(0),
+ /* 1571 S> */ B(LdaConstant), U8(153),
+ B(Star), R(0),
+ /* 1581 S> */ B(LdaConstant), U8(154),
+ B(Star), R(0),
+ /* 1591 S> */ B(LdaConstant), U8(155),
+ B(Star), R(0),
+ /* 1601 S> */ B(LdaConstant), U8(156),
+ B(Star), R(0),
+ /* 1611 S> */ B(LdaConstant), U8(157),
+ B(Star), R(0),
+ /* 1621 S> */ B(LdaConstant), U8(158),
+ B(Star), R(0),
+ /* 1631 S> */ B(LdaConstant), U8(159),
+ B(Star), R(0),
+ /* 1641 S> */ B(LdaConstant), U8(160),
+ B(Star), R(0),
+ /* 1651 S> */ B(LdaConstant), U8(161),
+ B(Star), R(0),
+ /* 1661 S> */ B(LdaConstant), U8(162),
+ B(Star), R(0),
+ /* 1671 S> */ B(LdaConstant), U8(163),
+ B(Star), R(0),
+ /* 1681 S> */ B(LdaConstant), U8(164),
+ B(Star), R(0),
+ /* 1691 S> */ B(LdaConstant), U8(165),
+ B(Star), R(0),
+ /* 1701 S> */ B(LdaConstant), U8(166),
+ B(Star), R(0),
+ /* 1711 S> */ B(LdaConstant), U8(167),
+ B(Star), R(0),
+ /* 1721 S> */ B(LdaConstant), U8(168),
+ B(Star), R(0),
+ /* 1731 S> */ B(LdaConstant), U8(169),
+ B(Star), R(0),
+ /* 1741 S> */ B(LdaConstant), U8(170),
+ B(Star), R(0),
+ /* 1751 S> */ B(LdaConstant), U8(171),
+ B(Star), R(0),
+ /* 1761 S> */ B(LdaConstant), U8(172),
+ B(Star), R(0),
+ /* 1771 S> */ B(LdaConstant), U8(173),
+ B(Star), R(0),
+ /* 1781 S> */ B(LdaConstant), U8(174),
+ B(Star), R(0),
+ /* 1791 S> */ B(LdaConstant), U8(175),
+ B(Star), R(0),
+ /* 1801 S> */ B(LdaConstant), U8(176),
+ B(Star), R(0),
+ /* 1811 S> */ B(LdaConstant), U8(177),
+ B(Star), R(0),
+ /* 1821 S> */ B(LdaConstant), U8(178),
+ B(Star), R(0),
+ /* 1831 S> */ B(LdaConstant), U8(179),
+ B(Star), R(0),
+ /* 1841 S> */ B(LdaConstant), U8(180),
+ B(Star), R(0),
+ /* 1851 S> */ B(LdaConstant), U8(181),
+ B(Star), R(0),
+ /* 1861 S> */ B(LdaConstant), U8(182),
+ B(Star), R(0),
+ /* 1871 S> */ B(LdaConstant), U8(183),
+ B(Star), R(0),
+ /* 1881 S> */ B(LdaConstant), U8(184),
+ B(Star), R(0),
+ /* 1891 S> */ B(LdaConstant), U8(185),
+ B(Star), R(0),
+ /* 1901 S> */ B(LdaConstant), U8(186),
+ B(Star), R(0),
+ /* 1911 S> */ B(LdaConstant), U8(187),
+ B(Star), R(0),
+ /* 1921 S> */ B(LdaConstant), U8(188),
+ B(Star), R(0),
+ /* 1931 S> */ B(LdaConstant), U8(189),
+ B(Star), R(0),
+ /* 1941 S> */ B(LdaConstant), U8(190),
+ B(Star), R(0),
+ /* 1951 S> */ B(LdaConstant), U8(191),
+ B(Star), R(0),
+ /* 1961 S> */ B(LdaConstant), U8(192),
+ B(Star), R(0),
+ /* 1971 S> */ B(LdaConstant), U8(193),
+ B(Star), R(0),
+ /* 1981 S> */ B(LdaConstant), U8(194),
+ B(Star), R(0),
+ /* 1991 S> */ B(LdaConstant), U8(195),
+ B(Star), R(0),
+ /* 2001 S> */ B(LdaConstant), U8(196),
+ B(Star), R(0),
+ /* 2011 S> */ B(LdaConstant), U8(197),
+ B(Star), R(0),
+ /* 2021 S> */ B(LdaConstant), U8(198),
+ B(Star), R(0),
+ /* 2031 S> */ B(LdaConstant), U8(199),
+ B(Star), R(0),
+ /* 2041 S> */ B(LdaConstant), U8(200),
+ B(Star), R(0),
+ /* 2051 S> */ B(LdaConstant), U8(201),
+ B(Star), R(0),
+ /* 2061 S> */ B(LdaConstant), U8(202),
+ B(Star), R(0),
+ /* 2071 S> */ B(LdaConstant), U8(203),
+ B(Star), R(0),
+ /* 2081 S> */ B(LdaConstant), U8(204),
+ B(Star), R(0),
+ /* 2091 S> */ B(LdaConstant), U8(205),
+ B(Star), R(0),
+ /* 2101 S> */ B(LdaConstant), U8(206),
+ B(Star), R(0),
+ /* 2111 S> */ B(LdaConstant), U8(207),
+ B(Star), R(0),
+ /* 2121 S> */ B(LdaConstant), U8(208),
+ B(Star), R(0),
+ /* 2131 S> */ B(LdaConstant), U8(209),
+ B(Star), R(0),
+ /* 2141 S> */ B(LdaConstant), U8(210),
+ B(Star), R(0),
+ /* 2151 S> */ B(LdaConstant), U8(211),
+ B(Star), R(0),
+ /* 2161 S> */ B(LdaConstant), U8(212),
+ B(Star), R(0),
+ /* 2171 S> */ B(LdaConstant), U8(213),
+ B(Star), R(0),
+ /* 2181 S> */ B(LdaConstant), U8(214),
+ B(Star), R(0),
+ /* 2191 S> */ B(LdaConstant), U8(215),
+ B(Star), R(0),
+ /* 2201 S> */ B(LdaConstant), U8(216),
+ B(Star), R(0),
+ /* 2211 S> */ B(LdaConstant), U8(217),
+ B(Star), R(0),
+ /* 2221 S> */ B(LdaConstant), U8(218),
+ B(Star), R(0),
+ /* 2231 S> */ B(LdaConstant), U8(219),
+ B(Star), R(0),
+ /* 2241 S> */ B(LdaConstant), U8(220),
+ B(Star), R(0),
+ /* 2251 S> */ B(LdaConstant), U8(221),
+ B(Star), R(0),
+ /* 2261 S> */ B(LdaConstant), U8(222),
+ B(Star), R(0),
+ /* 2271 S> */ B(LdaConstant), U8(223),
+ B(Star), R(0),
+ /* 2281 S> */ B(LdaConstant), U8(224),
+ B(Star), R(0),
+ /* 2291 S> */ B(LdaConstant), U8(225),
+ B(Star), R(0),
+ /* 2301 S> */ B(LdaConstant), U8(226),
+ B(Star), R(0),
+ /* 2311 S> */ B(LdaConstant), U8(227),
+ B(Star), R(0),
+ /* 2321 S> */ B(LdaConstant), U8(228),
+ B(Star), R(0),
+ /* 2331 S> */ B(LdaConstant), U8(229),
+ B(Star), R(0),
+ /* 2341 S> */ B(LdaConstant), U8(230),
+ B(Star), R(0),
+ /* 2351 S> */ B(LdaConstant), U8(231),
+ B(Star), R(0),
+ /* 2361 S> */ B(LdaConstant), U8(232),
+ B(Star), R(0),
+ /* 2371 S> */ B(LdaConstant), U8(233),
+ B(Star), R(0),
+ /* 2381 S> */ B(LdaConstant), U8(234),
+ B(Star), R(0),
+ /* 2391 S> */ B(LdaConstant), U8(235),
+ B(Star), R(0),
+ /* 2401 S> */ B(LdaConstant), U8(236),
+ B(Star), R(0),
+ /* 2411 S> */ B(LdaConstant), U8(237),
+ B(Star), R(0),
+ /* 2421 S> */ B(LdaConstant), U8(238),
+ B(Star), R(0),
+ /* 2431 S> */ B(LdaConstant), U8(239),
+ B(Star), R(0),
+ /* 2441 S> */ B(LdaConstant), U8(240),
+ B(Star), R(0),
+ /* 2451 S> */ B(LdaConstant), U8(241),
+ B(Star), R(0),
+ /* 2461 S> */ B(LdaConstant), U8(242),
+ B(Star), R(0),
+ /* 2471 S> */ B(LdaConstant), U8(243),
+ B(Star), R(0),
+ /* 2481 S> */ B(LdaConstant), U8(244),
+ B(Star), R(0),
+ /* 2491 S> */ B(LdaConstant), U8(245),
+ B(Star), R(0),
+ /* 2501 S> */ B(LdaConstant), U8(246),
+ B(Star), R(0),
+ /* 2511 S> */ B(LdaConstant), U8(247),
+ B(Star), R(0),
+ /* 2521 S> */ B(LdaConstant), U8(248),
+ B(Star), R(0),
+ /* 2531 S> */ B(LdaConstant), U8(249),
+ B(Star), R(0),
+ /* 2541 S> */ B(LdaConstant), U8(250),
+ B(Star), R(0),
+ /* 2551 S> */ B(LdaConstant), U8(251),
+ B(Star), R(0),
+ /* 2561 S> */ B(LdaConstant), U8(252),
+ B(Star), R(0),
+ /* 2571 S> */ B(LdaConstant), U8(253),
+ B(Star), R(0),
+ /* 2581 S> */ B(LdaConstant), U8(254),
+ B(Star), R(0),
+ /* 2591 S> */ B(LdaConstant), U8(255),
+ B(Star), R(0),
+ /* 2601 S> */ B(Wide), B(CreateRegExpLiteral), U16(256), U16(0), U8(0),
+ /* 2616 S> */ B(Return),
]
constant pool: [
InstanceType::HEAP_NUMBER_TYPE,
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/RemoveRedundantLdar.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/RemoveRedundantLdar.golden
index e9283cdf51..c632a76e69 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/RemoveRedundantLdar.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/RemoveRedundantLdar.golden
@@ -16,27 +16,24 @@ snippet: "
}
return ld_a;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 31
+bytecode array length: 26
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(StackCheck),
- B(Ldar), R(0),
- B(Star), R(1),
- B(Ldar), R(0),
- B(Add), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(10),
- B(TestGreaterThan), R(1),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(4),
- B(Jump), U8(-21),
- B(Ldar), R(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 45 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ /* 48 E> */ B(StackCheck),
+ /* 64 S> */ B(Ldar), R(0),
+ /* 78 E> */ B(Add), R(0), U8(1),
+ B(Star), R(0),
+ /* 86 S> */ B(LdaSmi), U8(10),
+ /* 95 E> */ B(TestGreaterThan), R(0),
+ B(JumpIfFalse), U8(4),
+ /* 101 S> */ B(Jump), U8(4),
+ B(Jump), U8(-16),
+ /* 110 S> */ B(Ldar), R(0),
+ /* 123 S> */ B(Return),
]
constant pool: [
]
@@ -52,26 +49,23 @@ snippet: "
} while(false);
return ld_a;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 29
+bytecode array length: 23
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(StackCheck),
- B(Ldar), R(0),
- B(Star), R(1),
- B(Ldar), R(0),
- B(Add), R(1),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(10),
- B(TestGreaterThan), R(1),
- B(JumpIfFalse), U8(4),
- B(Jump), U8(2),
- B(Ldar), R(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 45 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ /* 48 E> */ B(StackCheck),
+ /* 55 S> */ B(Nop),
+ /* 69 E> */ B(Add), R(0), U8(1),
+ B(Star), R(0),
+ /* 77 S> */ B(LdaSmi), U8(10),
+ /* 86 E> */ B(TestGreaterThan), R(0),
+ B(JumpIfFalse), U8(4),
+ /* 92 S> */ B(Jump), U8(2),
+ /* 118 S> */ B(Ldar), R(0),
+ /* 131 S> */ B(Return),
]
constant pool: [
]
@@ -84,18 +78,18 @@ snippet: "
ld_a = ld_a + ld_a;
return ld_a;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 14
+bytecode array length: 13
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(Star), R(1),
- B(Ldar), R(0),
- B(Add), R(1),
- B(Star), R(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 45 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ /* 50 S> */ B(Nop),
+ /* 64 E> */ B(Add), R(0), U8(1),
+ B(Star), R(0),
+ /* 72 S> */ B(Nop),
+ /* 85 S> */ B(Return),
]
constant pool: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden
index 151c334221..5cc49b2035 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden
@@ -18,11 +18,11 @@ frame size: 0
parameter count: 1
bytecode array length: 8
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(2),
- B(StaGlobalSloppy), U8(0), U8(1),
- B(LdaUndefined),
- B(Return),
+ /* 21 E> */ B(StackCheck),
+ /* 26 S> */ B(LdaSmi), U8(2),
+ /* 28 E> */ B(StaGlobalSloppy), U8(0), U8(1),
+ B(LdaUndefined),
+ /* 33 S> */ B(Return),
]
constant pool: [
"a",
@@ -39,11 +39,11 @@ frame size: 0
parameter count: 2
bytecode array length: 8
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(StaGlobalSloppy), U8(0), U8(1),
- B(LdaUndefined),
- B(Return),
+ /* 26 E> */ B(StackCheck),
+ /* 32 S> */ B(Ldar), R(arg0),
+ /* 34 E> */ B(StaGlobalSloppy), U8(0), U8(1),
+ B(LdaUndefined),
+ /* 39 S> */ B(Return),
]
constant pool: [
"a",
@@ -61,11 +61,11 @@ frame size: 0
parameter count: 1
bytecode array length: 8
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(2),
- B(StaGlobalStrict), U8(0), U8(1),
- B(LdaUndefined),
- B(Return),
+ /* 35 E> */ B(StackCheck),
+ /* 40 S> */ B(LdaSmi), U8(2),
+ /* 42 E> */ B(StaGlobalStrict), U8(0), U8(1),
+ B(LdaUndefined),
+ /* 47 S> */ B(Return),
]
constant pool: [
"a",
@@ -83,11 +83,11 @@ frame size: 0
parameter count: 1
bytecode array length: 8
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(2),
- B(StaGlobalSloppy), U8(0), U8(1),
- B(LdaUndefined),
- B(Return),
+ /* 17 E> */ B(StackCheck),
+ /* 22 S> */ B(LdaSmi), U8(2),
+ /* 24 E> */ B(StaGlobalSloppy), U8(0), U8(1),
+ B(LdaUndefined),
+ /* 29 S> */ B(Return),
]
constant pool: [
"a",
@@ -231,399 +231,271 @@ snippet: "
}
f({name: 1});
"
-frame size: 1
+frame size: 0
parameter count: 2
-bytecode array length: 1035
+bytecode array length: 651
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(1),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(3),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(5),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(7),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(9),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(11),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(13),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(15),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(17),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(19),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(21),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(23),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(25),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(27),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(29),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(31),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(33),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(35),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(37),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(39),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(41),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(43),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(45),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(47),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(49),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(51),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(53),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(55),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(57),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(59),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(61),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(63),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(65),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(67),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(69),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(71),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(73),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(75),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(77),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(79),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(81),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(83),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(85),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(87),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(89),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(91),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(93),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(95),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(97),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(99),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(101),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(103),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(105),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(107),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(109),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(111),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(113),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(115),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(117),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(119),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(121),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(123),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(125),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(127),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(129),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(131),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(133),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(135),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(137),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(139),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(141),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(143),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(145),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(147),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(149),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(151),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(153),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(155),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(157),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(159),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(161),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(163),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(165),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(167),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(169),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(171),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(173),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(175),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(177),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(179),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(181),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(183),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(185),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(187),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(189),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(191),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(193),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(195),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(197),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(199),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(201),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(203),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(205),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(207),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(209),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(211),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(213),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(215),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(217),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(219),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(221),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(223),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(225),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(227),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(229),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(231),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(233),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(235),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(237),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(239),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(241),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(243),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(245),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(247),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(249),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(251),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(253),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(255),
- B(LdaSmi), U8(2),
- B(Wide), B(StaGlobalSloppy), U16(1), U16(257),
- B(LdaUndefined),
- B(Return),
+ /* 17 E> */ B(StackCheck),
+ /* 25 S> */ B(Nop),
+ /* 26 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(1),
+ /* 35 S> */ B(Nop),
+ /* 36 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(3),
+ /* 45 S> */ B(Nop),
+ /* 46 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(5),
+ /* 55 S> */ B(Nop),
+ /* 56 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(7),
+ /* 65 S> */ B(Nop),
+ /* 66 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(9),
+ /* 75 S> */ B(Nop),
+ /* 76 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(11),
+ /* 85 S> */ B(Nop),
+ /* 86 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(13),
+ /* 95 S> */ B(Nop),
+ /* 96 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(15),
+ /* 105 S> */ B(Nop),
+ /* 106 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(17),
+ /* 115 S> */ B(Nop),
+ /* 116 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(19),
+ /* 125 S> */ B(Nop),
+ /* 126 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(21),
+ /* 135 S> */ B(Nop),
+ /* 136 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(23),
+ /* 145 S> */ B(Nop),
+ /* 146 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(25),
+ /* 155 S> */ B(Nop),
+ /* 156 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(27),
+ /* 165 S> */ B(Nop),
+ /* 166 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(29),
+ /* 175 S> */ B(Nop),
+ /* 176 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(31),
+ /* 185 S> */ B(Nop),
+ /* 186 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(33),
+ /* 195 S> */ B(Nop),
+ /* 196 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(35),
+ /* 205 S> */ B(Nop),
+ /* 206 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(37),
+ /* 215 S> */ B(Nop),
+ /* 216 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(39),
+ /* 225 S> */ B(Nop),
+ /* 226 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(41),
+ /* 235 S> */ B(Nop),
+ /* 236 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(43),
+ /* 245 S> */ B(Nop),
+ /* 246 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(45),
+ /* 255 S> */ B(Nop),
+ /* 256 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(47),
+ /* 265 S> */ B(Nop),
+ /* 266 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(49),
+ /* 275 S> */ B(Nop),
+ /* 276 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(51),
+ /* 285 S> */ B(Nop),
+ /* 286 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(53),
+ /* 295 S> */ B(Nop),
+ /* 296 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(55),
+ /* 305 S> */ B(Nop),
+ /* 306 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(57),
+ /* 315 S> */ B(Nop),
+ /* 316 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(59),
+ /* 325 S> */ B(Nop),
+ /* 326 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(61),
+ /* 335 S> */ B(Nop),
+ /* 336 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(63),
+ /* 345 S> */ B(Nop),
+ /* 346 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(65),
+ /* 355 S> */ B(Nop),
+ /* 356 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(67),
+ /* 365 S> */ B(Nop),
+ /* 366 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(69),
+ /* 375 S> */ B(Nop),
+ /* 376 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(71),
+ /* 385 S> */ B(Nop),
+ /* 386 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(73),
+ /* 395 S> */ B(Nop),
+ /* 396 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(75),
+ /* 405 S> */ B(Nop),
+ /* 406 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(77),
+ /* 415 S> */ B(Nop),
+ /* 416 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(79),
+ /* 425 S> */ B(Nop),
+ /* 426 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(81),
+ /* 435 S> */ B(Nop),
+ /* 436 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(83),
+ /* 445 S> */ B(Nop),
+ /* 446 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(85),
+ /* 455 S> */ B(Nop),
+ /* 456 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(87),
+ /* 465 S> */ B(Nop),
+ /* 466 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(89),
+ /* 475 S> */ B(Nop),
+ /* 476 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(91),
+ /* 485 S> */ B(Nop),
+ /* 486 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(93),
+ /* 495 S> */ B(Nop),
+ /* 496 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(95),
+ /* 505 S> */ B(Nop),
+ /* 506 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(97),
+ /* 515 S> */ B(Nop),
+ /* 516 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(99),
+ /* 525 S> */ B(Nop),
+ /* 526 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(101),
+ /* 535 S> */ B(Nop),
+ /* 536 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(103),
+ /* 545 S> */ B(Nop),
+ /* 546 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(105),
+ /* 555 S> */ B(Nop),
+ /* 556 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(107),
+ /* 565 S> */ B(Nop),
+ /* 566 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(109),
+ /* 575 S> */ B(Nop),
+ /* 576 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(111),
+ /* 585 S> */ B(Nop),
+ /* 586 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(113),
+ /* 595 S> */ B(Nop),
+ /* 596 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(115),
+ /* 605 S> */ B(Nop),
+ /* 606 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(117),
+ /* 615 S> */ B(Nop),
+ /* 616 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(119),
+ /* 625 S> */ B(Nop),
+ /* 626 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(121),
+ /* 635 S> */ B(Nop),
+ /* 636 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(123),
+ /* 645 S> */ B(Nop),
+ /* 646 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(125),
+ /* 655 S> */ B(Nop),
+ /* 656 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(127),
+ /* 665 S> */ B(Nop),
+ /* 666 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(129),
+ /* 675 S> */ B(Nop),
+ /* 676 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(131),
+ /* 685 S> */ B(Nop),
+ /* 686 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(133),
+ /* 695 S> */ B(Nop),
+ /* 696 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(135),
+ /* 705 S> */ B(Nop),
+ /* 706 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(137),
+ /* 715 S> */ B(Nop),
+ /* 716 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(139),
+ /* 725 S> */ B(Nop),
+ /* 726 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(141),
+ /* 735 S> */ B(Nop),
+ /* 736 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(143),
+ /* 745 S> */ B(Nop),
+ /* 746 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(145),
+ /* 755 S> */ B(Nop),
+ /* 756 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(147),
+ /* 765 S> */ B(Nop),
+ /* 766 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(149),
+ /* 775 S> */ B(Nop),
+ /* 776 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(151),
+ /* 785 S> */ B(Nop),
+ /* 786 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(153),
+ /* 795 S> */ B(Nop),
+ /* 796 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(155),
+ /* 805 S> */ B(Nop),
+ /* 806 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(157),
+ /* 815 S> */ B(Nop),
+ /* 816 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(159),
+ /* 825 S> */ B(Nop),
+ /* 826 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(161),
+ /* 835 S> */ B(Nop),
+ /* 836 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(163),
+ /* 845 S> */ B(Nop),
+ /* 846 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(165),
+ /* 855 S> */ B(Nop),
+ /* 856 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(167),
+ /* 865 S> */ B(Nop),
+ /* 866 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(169),
+ /* 875 S> */ B(Nop),
+ /* 876 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(171),
+ /* 885 S> */ B(Nop),
+ /* 886 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(173),
+ /* 895 S> */ B(Nop),
+ /* 896 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(175),
+ /* 905 S> */ B(Nop),
+ /* 906 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(177),
+ /* 915 S> */ B(Nop),
+ /* 916 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(179),
+ /* 925 S> */ B(Nop),
+ /* 926 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(181),
+ /* 935 S> */ B(Nop),
+ /* 936 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(183),
+ /* 945 S> */ B(Nop),
+ /* 946 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(185),
+ /* 955 S> */ B(Nop),
+ /* 956 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(187),
+ /* 965 S> */ B(Nop),
+ /* 966 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(189),
+ /* 975 S> */ B(Nop),
+ /* 976 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(191),
+ /* 985 S> */ B(Nop),
+ /* 986 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(193),
+ /* 995 S> */ B(Nop),
+ /* 996 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(195),
+ /* 1005 S> */ B(Nop),
+ /* 1006 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(197),
+ /* 1015 S> */ B(Nop),
+ /* 1016 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(199),
+ /* 1025 S> */ B(Nop),
+ /* 1026 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(201),
+ /* 1035 S> */ B(Nop),
+ /* 1036 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(203),
+ /* 1045 S> */ B(Nop),
+ /* 1046 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(205),
+ /* 1055 S> */ B(Nop),
+ /* 1056 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(207),
+ /* 1065 S> */ B(Nop),
+ /* 1066 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(209),
+ /* 1075 S> */ B(Nop),
+ /* 1076 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(211),
+ /* 1085 S> */ B(Nop),
+ /* 1086 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(213),
+ /* 1095 S> */ B(Nop),
+ /* 1096 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(215),
+ /* 1105 S> */ B(Nop),
+ /* 1106 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(217),
+ /* 1115 S> */ B(Nop),
+ /* 1116 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(219),
+ /* 1125 S> */ B(Nop),
+ /* 1126 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(221),
+ /* 1135 S> */ B(Nop),
+ /* 1136 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(223),
+ /* 1145 S> */ B(Nop),
+ /* 1146 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(225),
+ /* 1155 S> */ B(Nop),
+ /* 1156 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(227),
+ /* 1165 S> */ B(Nop),
+ /* 1166 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(229),
+ /* 1175 S> */ B(Nop),
+ /* 1176 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(231),
+ /* 1185 S> */ B(Nop),
+ /* 1186 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(233),
+ /* 1195 S> */ B(Nop),
+ /* 1196 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(235),
+ /* 1205 S> */ B(Nop),
+ /* 1206 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(237),
+ /* 1215 S> */ B(Nop),
+ /* 1216 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(239),
+ /* 1225 S> */ B(Nop),
+ /* 1226 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(241),
+ /* 1235 S> */ B(Nop),
+ /* 1236 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(243),
+ /* 1245 S> */ B(Nop),
+ /* 1246 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(245),
+ /* 1255 S> */ B(Nop),
+ /* 1256 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(247),
+ /* 1265 S> */ B(Nop),
+ /* 1266 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(249),
+ /* 1275 S> */ B(Nop),
+ /* 1276 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(251),
+ /* 1285 S> */ B(Nop),
+ /* 1286 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(253),
+ /* 1295 S> */ B(Nop),
+ /* 1296 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(255),
+ /* 1305 S> */ B(LdaSmi), U8(2),
+ /* 1307 E> */ B(Wide), B(StaGlobalSloppy), U16(1), U16(257),
+ B(LdaUndefined),
+ /* 1312 S> */ B(Return),
]
constant pool: [
"name",
@@ -769,399 +641,271 @@ snippet: "
}
f({name: 1});
"
-frame size: 1
+frame size: 0
parameter count: 2
-bytecode array length: 1035
+bytecode array length: 651
bytecodes: [
- B(StackCheck),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(1),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(3),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(5),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(7),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(9),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(11),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(13),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(15),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(17),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(19),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(21),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(23),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(25),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(27),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(29),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(31),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(33),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(35),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(37),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(39),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(41),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(43),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(45),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(47),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(49),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(51),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(53),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(55),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(57),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(59),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(61),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(63),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(65),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(67),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(69),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(71),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(73),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(75),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(77),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(79),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(81),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(83),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(85),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(87),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(89),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(91),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(93),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(95),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(97),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(99),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(101),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(103),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(105),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(107),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(109),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(111),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(113),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(115),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(117),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(119),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(121),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(123),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(125),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(127),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(129),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(131),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(133),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(135),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(137),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(139),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(141),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(143),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(145),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(147),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(149),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(151),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(153),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(155),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(157),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(159),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(161),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(163),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(165),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(167),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(169),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(171),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(173),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(175),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(177),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(179),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(181),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(183),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(185),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(187),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(189),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(191),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(193),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(195),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(197),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(199),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(201),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(203),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(205),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(207),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(209),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(211),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(213),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(215),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(217),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(219),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(221),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(223),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(225),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(227),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(229),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(231),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(233),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(235),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(237),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(239),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(241),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(243),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(245),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(247),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(249),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(251),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(253),
- B(Ldar), R(arg0),
- B(Star), R(0),
- B(LoadIC), R(0), U8(0), U8(255),
- B(LdaSmi), U8(2),
- B(Wide), B(StaGlobalStrict), U16(1), U16(257),
- B(LdaUndefined),
- B(Return),
+ /* 17 E> */ B(StackCheck),
+ /* 41 S> */ B(Nop),
+ /* 42 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(1),
+ /* 51 S> */ B(Nop),
+ /* 52 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(3),
+ /* 61 S> */ B(Nop),
+ /* 62 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(5),
+ /* 71 S> */ B(Nop),
+ /* 72 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(7),
+ /* 81 S> */ B(Nop),
+ /* 82 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(9),
+ /* 91 S> */ B(Nop),
+ /* 92 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(11),
+ /* 101 S> */ B(Nop),
+ /* 102 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(13),
+ /* 111 S> */ B(Nop),
+ /* 112 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(15),
+ /* 121 S> */ B(Nop),
+ /* 122 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(17),
+ /* 131 S> */ B(Nop),
+ /* 132 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(19),
+ /* 141 S> */ B(Nop),
+ /* 142 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(21),
+ /* 151 S> */ B(Nop),
+ /* 152 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(23),
+ /* 161 S> */ B(Nop),
+ /* 162 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(25),
+ /* 171 S> */ B(Nop),
+ /* 172 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(27),
+ /* 181 S> */ B(Nop),
+ /* 182 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(29),
+ /* 191 S> */ B(Nop),
+ /* 192 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(31),
+ /* 201 S> */ B(Nop),
+ /* 202 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(33),
+ /* 211 S> */ B(Nop),
+ /* 212 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(35),
+ /* 221 S> */ B(Nop),
+ /* 222 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(37),
+ /* 231 S> */ B(Nop),
+ /* 232 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(39),
+ /* 241 S> */ B(Nop),
+ /* 242 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(41),
+ /* 251 S> */ B(Nop),
+ /* 252 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(43),
+ /* 261 S> */ B(Nop),
+ /* 262 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(45),
+ /* 271 S> */ B(Nop),
+ /* 272 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(47),
+ /* 281 S> */ B(Nop),
+ /* 282 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(49),
+ /* 291 S> */ B(Nop),
+ /* 292 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(51),
+ /* 301 S> */ B(Nop),
+ /* 302 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(53),
+ /* 311 S> */ B(Nop),
+ /* 312 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(55),
+ /* 321 S> */ B(Nop),
+ /* 322 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(57),
+ /* 331 S> */ B(Nop),
+ /* 332 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(59),
+ /* 341 S> */ B(Nop),
+ /* 342 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(61),
+ /* 351 S> */ B(Nop),
+ /* 352 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(63),
+ /* 361 S> */ B(Nop),
+ /* 362 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(65),
+ /* 371 S> */ B(Nop),
+ /* 372 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(67),
+ /* 381 S> */ B(Nop),
+ /* 382 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(69),
+ /* 391 S> */ B(Nop),
+ /* 392 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(71),
+ /* 401 S> */ B(Nop),
+ /* 402 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(73),
+ /* 411 S> */ B(Nop),
+ /* 412 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(75),
+ /* 421 S> */ B(Nop),
+ /* 422 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(77),
+ /* 431 S> */ B(Nop),
+ /* 432 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(79),
+ /* 441 S> */ B(Nop),
+ /* 442 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(81),
+ /* 451 S> */ B(Nop),
+ /* 452 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(83),
+ /* 461 S> */ B(Nop),
+ /* 462 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(85),
+ /* 471 S> */ B(Nop),
+ /* 472 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(87),
+ /* 481 S> */ B(Nop),
+ /* 482 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(89),
+ /* 491 S> */ B(Nop),
+ /* 492 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(91),
+ /* 501 S> */ B(Nop),
+ /* 502 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(93),
+ /* 511 S> */ B(Nop),
+ /* 512 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(95),
+ /* 521 S> */ B(Nop),
+ /* 522 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(97),
+ /* 531 S> */ B(Nop),
+ /* 532 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(99),
+ /* 541 S> */ B(Nop),
+ /* 542 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(101),
+ /* 551 S> */ B(Nop),
+ /* 552 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(103),
+ /* 561 S> */ B(Nop),
+ /* 562 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(105),
+ /* 571 S> */ B(Nop),
+ /* 572 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(107),
+ /* 581 S> */ B(Nop),
+ /* 582 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(109),
+ /* 591 S> */ B(Nop),
+ /* 592 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(111),
+ /* 601 S> */ B(Nop),
+ /* 602 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(113),
+ /* 611 S> */ B(Nop),
+ /* 612 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(115),
+ /* 621 S> */ B(Nop),
+ /* 622 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(117),
+ /* 631 S> */ B(Nop),
+ /* 632 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(119),
+ /* 641 S> */ B(Nop),
+ /* 642 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(121),
+ /* 651 S> */ B(Nop),
+ /* 652 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(123),
+ /* 661 S> */ B(Nop),
+ /* 662 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(125),
+ /* 671 S> */ B(Nop),
+ /* 672 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(127),
+ /* 681 S> */ B(Nop),
+ /* 682 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(129),
+ /* 691 S> */ B(Nop),
+ /* 692 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(131),
+ /* 701 S> */ B(Nop),
+ /* 702 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(133),
+ /* 711 S> */ B(Nop),
+ /* 712 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(135),
+ /* 721 S> */ B(Nop),
+ /* 722 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(137),
+ /* 731 S> */ B(Nop),
+ /* 732 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(139),
+ /* 741 S> */ B(Nop),
+ /* 742 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(141),
+ /* 751 S> */ B(Nop),
+ /* 752 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(143),
+ /* 761 S> */ B(Nop),
+ /* 762 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(145),
+ /* 771 S> */ B(Nop),
+ /* 772 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(147),
+ /* 781 S> */ B(Nop),
+ /* 782 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(149),
+ /* 791 S> */ B(Nop),
+ /* 792 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(151),
+ /* 801 S> */ B(Nop),
+ /* 802 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(153),
+ /* 811 S> */ B(Nop),
+ /* 812 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(155),
+ /* 821 S> */ B(Nop),
+ /* 822 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(157),
+ /* 831 S> */ B(Nop),
+ /* 832 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(159),
+ /* 841 S> */ B(Nop),
+ /* 842 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(161),
+ /* 851 S> */ B(Nop),
+ /* 852 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(163),
+ /* 861 S> */ B(Nop),
+ /* 862 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(165),
+ /* 871 S> */ B(Nop),
+ /* 872 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(167),
+ /* 881 S> */ B(Nop),
+ /* 882 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(169),
+ /* 891 S> */ B(Nop),
+ /* 892 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(171),
+ /* 901 S> */ B(Nop),
+ /* 902 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(173),
+ /* 911 S> */ B(Nop),
+ /* 912 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(175),
+ /* 921 S> */ B(Nop),
+ /* 922 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(177),
+ /* 931 S> */ B(Nop),
+ /* 932 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(179),
+ /* 941 S> */ B(Nop),
+ /* 942 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(181),
+ /* 951 S> */ B(Nop),
+ /* 952 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(183),
+ /* 961 S> */ B(Nop),
+ /* 962 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(185),
+ /* 971 S> */ B(Nop),
+ /* 972 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(187),
+ /* 981 S> */ B(Nop),
+ /* 982 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(189),
+ /* 991 S> */ B(Nop),
+ /* 992 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(191),
+ /* 1001 S> */ B(Nop),
+ /* 1002 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(193),
+ /* 1011 S> */ B(Nop),
+ /* 1012 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(195),
+ /* 1021 S> */ B(Nop),
+ /* 1022 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(197),
+ /* 1031 S> */ B(Nop),
+ /* 1032 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(199),
+ /* 1041 S> */ B(Nop),
+ /* 1042 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(201),
+ /* 1051 S> */ B(Nop),
+ /* 1052 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(203),
+ /* 1061 S> */ B(Nop),
+ /* 1062 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(205),
+ /* 1071 S> */ B(Nop),
+ /* 1072 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(207),
+ /* 1081 S> */ B(Nop),
+ /* 1082 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(209),
+ /* 1091 S> */ B(Nop),
+ /* 1092 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(211),
+ /* 1101 S> */ B(Nop),
+ /* 1102 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(213),
+ /* 1111 S> */ B(Nop),
+ /* 1112 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(215),
+ /* 1121 S> */ B(Nop),
+ /* 1122 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(217),
+ /* 1131 S> */ B(Nop),
+ /* 1132 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(219),
+ /* 1141 S> */ B(Nop),
+ /* 1142 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(221),
+ /* 1151 S> */ B(Nop),
+ /* 1152 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(223),
+ /* 1161 S> */ B(Nop),
+ /* 1162 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(225),
+ /* 1171 S> */ B(Nop),
+ /* 1172 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(227),
+ /* 1181 S> */ B(Nop),
+ /* 1182 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(229),
+ /* 1191 S> */ B(Nop),
+ /* 1192 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(231),
+ /* 1201 S> */ B(Nop),
+ /* 1202 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(233),
+ /* 1211 S> */ B(Nop),
+ /* 1212 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(235),
+ /* 1221 S> */ B(Nop),
+ /* 1222 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(237),
+ /* 1231 S> */ B(Nop),
+ /* 1232 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(239),
+ /* 1241 S> */ B(Nop),
+ /* 1242 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(241),
+ /* 1251 S> */ B(Nop),
+ /* 1252 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(243),
+ /* 1261 S> */ B(Nop),
+ /* 1262 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(245),
+ /* 1271 S> */ B(Nop),
+ /* 1272 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(247),
+ /* 1281 S> */ B(Nop),
+ /* 1282 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(249),
+ /* 1291 S> */ B(Nop),
+ /* 1292 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(251),
+ /* 1301 S> */ B(Nop),
+ /* 1302 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(253),
+ /* 1311 S> */ B(Nop),
+ /* 1312 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(255),
+ /* 1321 S> */ B(LdaSmi), U8(2),
+ /* 1323 E> */ B(Wide), B(StaGlobalStrict), U16(1), U16(257),
+ B(LdaUndefined),
+ /* 1328 S> */ B(Return),
]
constant pool: [
"name",
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StringConstants.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StringConstants.golden
index 9cf212c932..c28ac2a8c9 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StringConstants.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StringConstants.golden
@@ -15,9 +15,9 @@ frame size: 0
parameter count: 1
bytecode array length: 4
bytecodes: [
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(LdaConstant), U8(0),
+ /* 61 S> */ B(Return),
]
constant pool: [
"This is a string",
@@ -33,11 +33,11 @@ frame size: 1
parameter count: 1
bytecode array length: 8
bytecodes: [
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Star), R(0),
- B(LdaConstant), U8(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ /* 58 S> */ B(LdaConstant), U8(1),
+ /* 82 S> */ B(Return),
]
constant pool: [
"First string",
@@ -54,11 +54,11 @@ frame size: 1
parameter count: 1
bytecode array length: 8
bytecodes: [
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Star), R(0),
- B(LdaConstant), U8(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaConstant), U8(0),
+ B(Star), R(0),
+ /* 57 S> */ B(LdaConstant), U8(0),
+ /* 79 S> */ B(Return),
]
constant pool: [
"Same string",
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden
index c908959257..bc16a7b964 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden
@@ -17,26 +17,26 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 31
+bytecode array length: 32
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(TestEqualStrict), R(2),
- B(JumpIfTrue), U8(10),
- B(LdaSmi), U8(2),
- B(TestEqualStrict), R(2),
- B(JumpIfTrue), U8(7),
- B(Jump), U8(8),
- B(LdaSmi), U8(2),
- B(Return),
- B(LdaSmi), U8(3),
- B(Return),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(Star), R(0),
+ /* 45 S> */ B(LdaSmi), U8(1),
+ B(TestEqualStrict), R(0),
+ B(Mov), R(0), R(2),
+ B(JumpIfToBooleanTrue), U8(10),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrue), U8(7),
+ B(Jump), U8(8),
+ /* 66 S> */ B(LdaSmi), U8(2),
+ /* 97 S> */ B(Return),
+ /* 85 S> */ B(LdaSmi), U8(3),
+ /* 97 S> */ B(Return),
+ B(LdaUndefined),
+ /* 97 S> */ B(Return),
]
constant pool: [
]
@@ -53,28 +53,28 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 37
+bytecode array length: 38
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(TestEqualStrict), R(2),
- B(JumpIfTrue), U8(10),
- B(LdaSmi), U8(2),
- B(TestEqualStrict), R(2),
- B(JumpIfTrue), U8(10),
- B(Jump), U8(14),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(Jump), U8(8),
- B(LdaSmi), U8(3),
- B(Star), R(1),
- B(Jump), U8(2),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(Star), R(0),
+ /* 45 S> */ B(LdaSmi), U8(1),
+ B(TestEqualStrict), R(0),
+ B(Mov), R(0), R(2),
+ B(JumpIfToBooleanTrue), U8(10),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrue), U8(10),
+ B(Jump), U8(14),
+ /* 66 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 73 S> */ B(Jump), U8(8),
+ /* 89 S> */ B(LdaSmi), U8(3),
+ B(Star), R(1),
+ /* 96 S> */ B(Jump), U8(2),
+ B(LdaUndefined),
+ /* 105 S> */ B(Return),
]
constant pool: [
]
@@ -91,27 +91,27 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 35
+bytecode array length: 36
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(TestEqualStrict), R(2),
- B(JumpIfTrue), U8(10),
- B(LdaSmi), U8(2),
- B(TestEqualStrict), R(2),
- B(JumpIfTrue), U8(8),
- B(Jump), U8(12),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(3),
- B(Star), R(1),
- B(Jump), U8(2),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(Star), R(0),
+ /* 45 S> */ B(LdaSmi), U8(1),
+ B(TestEqualStrict), R(0),
+ B(Mov), R(0), R(2),
+ B(JumpIfToBooleanTrue), U8(10),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrue), U8(8),
+ B(Jump), U8(12),
+ /* 66 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 98 S> */ B(LdaSmi), U8(3),
+ B(Star), R(1),
+ /* 105 S> */ B(Jump), U8(2),
+ B(LdaUndefined),
+ /* 114 S> */ B(Return),
]
constant pool: [
]
@@ -129,27 +129,27 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 35
+bytecode array length: 36
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(2),
- B(LdaSmi), U8(2),
- B(TestEqualStrict), R(2),
- B(JumpIfTrue), U8(10),
- B(LdaSmi), U8(3),
- B(TestEqualStrict), R(2),
- B(JumpIfTrue), U8(6),
- B(Jump), U8(6),
- B(Jump), U8(10),
- B(Jump), U8(8),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(Jump), U8(2),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(Star), R(0),
+ /* 45 S> */ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(0),
+ B(Mov), R(0), R(2),
+ B(JumpIfToBooleanTrue), U8(10),
+ B(LdaSmi), U8(3),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrue), U8(6),
+ B(Jump), U8(6),
+ /* 66 S> */ B(Jump), U8(10),
+ /* 82 S> */ B(Jump), U8(8),
+ /* 99 S> */ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ /* 106 S> */ B(Jump), U8(2),
+ B(LdaUndefined),
+ /* 115 S> */ B(Return),
]
constant pool: [
]
@@ -167,32 +167,32 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 44
+bytecode array length: 45
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(TypeOf),
- B(Star), R(0),
- B(Star), R(2),
- B(LdaSmi), U8(2),
- B(TestEqualStrict), R(2),
- B(JumpIfTrue), U8(10),
- B(LdaSmi), U8(3),
- B(TestEqualStrict), R(2),
- B(JumpIfTrue), U8(10),
- B(Jump), U8(14),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(Jump), U8(14),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(Jump), U8(8),
- B(LdaSmi), U8(3),
- B(Star), R(1),
- B(Jump), U8(2),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ /* 42 E> */ B(TypeOf),
+ B(Star), R(0),
+ /* 45 S> */ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(0),
+ B(Mov), R(0), R(2),
+ B(JumpIfToBooleanTrue), U8(10),
+ B(LdaSmi), U8(3),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrue), U8(10),
+ B(Jump), U8(14),
+ /* 74 S> */ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ /* 81 S> */ B(Jump), U8(14),
+ /* 97 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 104 S> */ B(Jump), U8(8),
+ /* 121 S> */ B(LdaSmi), U8(3),
+ B(Star), R(1),
+ /* 128 S> */ B(Jump), U8(2),
+ B(LdaUndefined),
+ /* 137 S> */ B(Return),
]
constant pool: [
]
@@ -209,26 +209,25 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 32
+bytecode array length: 31
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(2),
- B(Ldar), R(1),
- B(TypeOf),
- B(TestEqualStrict), R(2),
- B(JumpIfTrue), U8(4),
- B(Jump), U8(8),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(Jump), U8(8),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(Jump), U8(2),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(Star), R(0),
+ /* 45 S> */ B(TypeOf),
+ B(TestEqualStrict), R(0),
+ B(Mov), R(0), R(2),
+ B(JumpIfToBooleanTrue), U8(4),
+ B(Jump), U8(8),
+ /* 74 S> */ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ /* 81 S> */ B(Jump), U8(8),
+ /* 98 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 105 S> */ B(Jump), U8(2),
+ B(LdaUndefined),
+ /* 114 S> */ B(Return),
]
constant pool: [
]
@@ -312,154 +311,154 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 289
+bytecode array length: 290
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(1),
- B(Star), R(0),
- B(Star), R(2),
- B(LdaSmi), U8(1),
- B(TestEqualStrict), R(2),
- B(JumpIfTrue), U8(10),
- B(LdaSmi), U8(2),
- B(TestEqualStrict), R(2),
- B(JumpIfTrueConstant), U8(0),
- B(JumpConstant), U8(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(1),
- B(Jump), U8(8),
- B(LdaSmi), U8(3),
- B(Star), R(1),
- B(Jump), U8(2),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(1),
+ B(Star), R(0),
+ /* 45 S> */ B(LdaSmi), U8(1),
+ B(TestEqualStrict), R(0),
+ B(Mov), R(0), R(2),
+ B(JumpIfToBooleanTrue), U8(10),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrueConstant), U8(0),
+ B(JumpConstant), U8(1),
+ /* 68 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 77 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 86 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 95 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 104 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 113 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 122 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 131 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 140 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 149 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 158 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 167 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 176 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 185 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 194 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 203 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 212 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 221 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 230 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 239 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 248 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 257 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 266 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 275 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 284 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 293 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 302 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 311 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 320 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 329 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 338 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 347 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 356 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 365 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 374 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 383 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 392 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 401 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 410 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 419 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 428 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 437 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 446 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 455 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 464 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 473 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 482 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 491 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 500 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 509 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 518 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 527 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 536 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 545 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 554 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 563 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 572 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 581 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 590 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 599 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 608 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 617 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 626 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 635 S> */ B(LdaSmi), U8(2),
+ B(Star), R(1),
+ /* 644 S> */ B(Jump), U8(8),
+ /* 662 S> */ B(LdaSmi), U8(3),
+ B(Star), R(1),
+ /* 671 S> */ B(Jump), U8(2),
+ B(LdaUndefined),
+ /* 680 S> */ B(Return),
]
constant pool: [
262,
@@ -482,40 +481,37 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 61
+bytecode array length: 59
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(2),
- B(Star), R(0),
- B(Star), R(3),
- B(LdaSmi), U8(1),
- B(TestEqualStrict), R(3),
- B(JumpIfTrue), U8(10),
- B(LdaSmi), U8(2),
- B(TestEqualStrict), R(3),
- B(JumpIfTrue), U8(36),
- B(Jump), U8(38),
- B(Ldar), R(2),
- B(Star), R(4),
- B(LdaSmi), U8(1),
- B(Add), R(4),
- B(Star), R(1),
- B(Star), R(4),
- B(LdaSmi), U8(2),
- B(TestEqualStrict), R(4),
- B(JumpIfTrue), U8(4),
- B(Jump), U8(8),
- B(LdaSmi), U8(1),
- B(Star), R(2),
- B(Jump), U8(8),
- B(LdaSmi), U8(2),
- B(Star), R(2),
- B(Jump), U8(2),
- B(LdaSmi), U8(3),
- B(Star), R(2),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(2),
+ B(Star), R(0),
+ /* 45 S> */ B(LdaSmi), U8(1),
+ B(TestEqualStrict), R(0),
+ B(Mov), R(0), R(3),
+ B(JumpIfToBooleanTrue), U8(10),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(3),
+ B(JumpIfTrue), U8(33),
+ B(Jump), U8(35),
+ /* 77 E> */ B(AddSmi), U8(1), R(2), U8(1),
+ B(Star), R(1),
+ /* 70 S> */ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(1),
+ B(Mov), R(1), R(4),
+ B(JumpIfToBooleanTrue), U8(4),
+ B(Jump), U8(8),
+ /* 101 S> */ B(LdaSmi), U8(1),
+ B(Star), R(2),
+ /* 108 S> */ B(Jump), U8(8),
+ /* 131 S> */ B(LdaSmi), U8(2),
+ B(Star), R(2),
+ /* 138 S> */ B(Jump), U8(2),
+ /* 176 S> */ B(LdaSmi), U8(3),
+ B(Star), R(2),
+ B(LdaUndefined),
+ /* 185 S> */ B(Return),
]
constant pool: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ThisFunction.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ThisFunction.golden
index da84a493e1..582c087341 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ThisFunction.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ThisFunction.golden
@@ -14,21 +14,14 @@ snippet: "
f = function f() {};
f();
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 19
+bytecode array length: 6
bytecodes: [
- B(LdaTheHole),
- B(Star), R(0),
- B(StackCheck),
- B(Ldar), R(closure),
- B(Star), R(1),
- B(Ldar), R(0),
- B(JumpIfNotHole), U8(5),
- B(Mov), R(1), R(0),
- B(Ldar), R(1),
- B(LdaUndefined),
- B(Return),
+ /* 21 E> */ B(StackCheck),
+ B(Mov), R(closure), R(0),
+ B(LdaUndefined),
+ /* 25 S> */ B(Return),
]
constant pool: [
]
@@ -41,23 +34,14 @@ snippet: "
f = function f() { return f; };
f();
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 23
+bytecode array length: 7
bytecodes: [
- B(LdaTheHole),
- B(Star), R(0),
- B(StackCheck),
- B(Ldar), R(closure),
- B(Star), R(1),
- B(Ldar), R(0),
- B(JumpIfNotHole), U8(5),
- B(Mov), R(1), R(0),
- B(Ldar), R(1),
- B(Ldar), R(0),
- B(JumpIfNotHole), U8(3),
- B(LdaUndefined),
- B(Return),
+ /* 21 E> */ B(StackCheck),
+ B(Mov), R(closure), R(0),
+ /* 26 S> */ B(Ldar), R(0),
+ /* 36 S> */ B(Return),
]
constant pool: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Throw.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Throw.golden
index 99e17fc1ea..4e7a0bc225 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Throw.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Throw.golden
@@ -15,9 +15,9 @@ frame size: 0
parameter count: 1
bytecode array length: 4
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Throw),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(LdaSmi), U8(1),
+ /* 34 E> */ B(Throw),
]
constant pool: [
]
@@ -32,9 +32,9 @@ frame size: 0
parameter count: 1
bytecode array length: 4
bytecodes: [
- B(StackCheck),
- B(LdaConstant), U8(0),
- B(Throw),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(LdaConstant), U8(0),
+ /* 34 E> */ B(Throw),
]
constant pool: [
"Error",
@@ -50,14 +50,14 @@ frame size: 1
parameter count: 1
bytecode array length: 12
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(JumpIfToBooleanFalse), U8(5),
- B(LdaConstant), U8(0),
- B(Throw),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ /* 45 S> */ B(JumpIfToBooleanFalse), U8(5),
+ /* 54 S> */ B(LdaConstant), U8(0),
+ /* 54 E> */ B(Throw),
+ B(LdaUndefined),
+ /* 72 S> */ B(Return),
]
constant pool: [
"Error",
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
index 23d3609885..03f8b5496d 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
@@ -14,27 +14,26 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 44
+bytecode array length: 45
bytecodes: [
- B(LdaConstant), U8(0),
- B(Star), R(1),
- B(LdaZero),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(2),
- B(StackCheck),
- B(LdaConstant), U8(1),
- B(Star), R(1),
- B(LdaZero),
- B(Star), R(2),
- B(CreateObjectLiteral), U8(2), U8(0), U8(5),
- B(Star), R(4),
- B(CreateClosure), U8(3), U8(0),
- B(StoreICSloppy), R(4), U8(4), U8(3),
- B(Ldar), R(4),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kInitializeVarGlobal), R(1), U8(3),
- B(LdaUndefined),
- B(Return),
+ B(LdaConstant), U8(0),
+ B(Star), R(1),
+ B(LdaZero),
+ B(Star), R(2),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kDeclareGlobalsForInterpreter), R(1), U8(3),
+ /* 0 E> */ B(StackCheck),
+ /* 8 S> */ B(LdaConstant), U8(1),
+ B(Star), R(1),
+ B(LdaZero),
+ B(CreateObjectLiteral), U8(2), U8(0), U8(1), R(4),
+ B(Star), R(2),
+ B(CreateClosure), U8(3), U8(0),
+ B(StaNamedPropertySloppy), R(4), U8(4), U8(3),
+ B(Mov), R(4), R(3),
+ B(CallRuntime), U16(Runtime::kInitializeVarGlobal), R(1), U8(3),
+ B(LdaUndefined),
+ /* 33 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden
index 20abd3ecb7..17d4ef0fd4 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden
@@ -11,35 +11,33 @@ wrap: yes
snippet: "
try { return 1; } catch(e) { return 2; }
"
-frame size: 5
+frame size: 3
parameter count: 1
-bytecode array length: 40
+bytecode array length: 34
bytecodes: [
- B(StackCheck),
- B(Mov), R(context), R(1),
- B(LdaSmi), U8(1),
- B(Return),
- B(Star), R(3),
- B(LdaConstant), U8(0),
- B(Star), R(2),
- B(Ldar), R(closure),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kPushCatchContext), R(2), U8(3),
- B(Star), R(1),
- B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
- B(Ldar), R(1),
- B(PushContext), R(0),
- B(LdaSmi), U8(2),
- B(PopContext), R(0),
- B(Return),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ B(Mov), R(context), R(1),
+ /* 40 S> */ B(LdaSmi), U8(1),
+ /* 75 S> */ B(Return),
+ B(Jump), U8(25),
+ B(Star), R(2),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(2), U8(0),
+ B(Star), R(1),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Ldar), R(1),
+ B(PushContext), R(0),
+ /* 63 S> */ B(LdaSmi), U8(2),
+ B(PopContext), R(0),
+ /* 75 S> */ B(Return),
+ B(LdaUndefined),
+ /* 75 S> */ B(Return),
]
constant pool: [
"e",
]
handlers: [
- [4, 7, 7],
+ [4, 7, 9],
]
---
@@ -48,45 +46,39 @@ snippet: "
try { a = 1 } catch(e1) {};
try { a = 2 } catch(e2) { a = 3 }
"
-frame size: 6
+frame size: 4
parameter count: 1
-bytecode array length: 81
+bytecode array length: 65
bytecodes: [
- B(StackCheck),
- B(Mov), R(context), R(2),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(Jump), U8(30),
- B(Star), R(4),
- B(LdaConstant), U8(0),
- B(Star), R(3),
- B(Ldar), R(closure),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kPushCatchContext), R(3), U8(3),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
- B(Ldar), R(2),
- B(PushContext), R(1),
- B(PopContext), R(1),
- B(Mov), R(context), R(2),
- B(LdaSmi), U8(2),
- B(Star), R(0),
- B(Jump), U8(34),
- B(Star), R(4),
- B(LdaConstant), U8(1),
- B(Star), R(3),
- B(Ldar), R(closure),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kPushCatchContext), R(3), U8(3),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
- B(Ldar), R(2),
- B(PushContext), R(1),
- B(LdaSmi), U8(3),
- B(Star), R(0),
- B(PopContext), R(1),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ B(Mov), R(context), R(2),
+ /* 47 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(Jump), U8(22),
+ B(Star), R(3),
+ B(Ldar), R(closure),
+ /* 49 E> */ B(CreateCatchContext), R(3), U8(0),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Ldar), R(2),
+ B(PushContext), R(1),
+ B(PopContext), R(1),
+ B(Mov), R(context), R(2),
+ /* 75 S> */ B(LdaSmi), U8(2),
+ B(Star), R(0),
+ B(Jump), U8(26),
+ B(Star), R(3),
+ B(Ldar), R(closure),
+ /* 77 E> */ B(CreateCatchContext), R(3), U8(1),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Ldar), R(2),
+ B(PushContext), R(1),
+ /* 95 S> */ B(LdaSmi), U8(3),
+ B(Star), R(0),
+ B(PopContext), R(1),
+ B(LdaUndefined),
+ /* 103 S> */ B(Return),
]
constant pool: [
"e1",
@@ -94,6 +86,6 @@ constant pool: [
]
handlers: [
[4, 8, 10],
- [41, 45, 47],
+ [33, 37, 39],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden
index e14a709840..a42f90c844 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden
@@ -16,31 +16,31 @@ frame size: 4
parameter count: 1
bytecode array length: 51
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(Mov), R(context), R(3),
- B(LdaSmi), U8(2),
- B(Star), R(0),
- B(LdaSmi), U8(-1),
- B(Star), R(1),
- B(Jump), U8(7),
- B(Star), R(2),
- B(LdaZero),
- B(Star), R(1),
- B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
- B(Star), R(3),
- B(LdaSmi), U8(3),
- B(Star), R(0),
- B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(3), U8(1),
- B(LdaZero),
- B(TestEqualStrict), R(1),
- B(JumpIfTrue), U8(4),
- B(Jump), U8(5),
- B(Ldar), R(2),
- B(ReThrow),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(Mov), R(context), R(3),
+ /* 51 S> */ B(LdaSmi), U8(2),
+ B(Star), R(0),
+ B(LdaSmi), U8(-1),
+ B(Star), R(1),
+ B(Jump), U8(7),
+ B(Star), R(2),
+ B(LdaZero),
+ B(Star), R(1),
+ /* 53 E> */ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Star), R(3),
+ /* 70 S> */ B(LdaSmi), U8(3),
+ B(Star), R(0),
+ /* 72 E> */ B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(3), U8(1),
+ B(LdaZero),
+ B(TestEqualStrict), R(1),
+ B(JumpIfTrue), U8(4),
+ B(Jump), U8(5),
+ B(Ldar), R(2),
+ B(ReThrow),
+ B(LdaUndefined),
+ /* 79 S> */ B(Return),
]
constant pool: [
]
@@ -53,56 +53,53 @@ snippet: "
var a = 1;
try { a = 2; } catch(e) { a = 20 } finally { a = 3; }
"
-frame size: 9
+frame size: 7
parameter count: 1
-bytecode array length: 88
+bytecode array length: 80
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(Mov), R(context), R(4),
- B(Mov), R(context), R(5),
- B(LdaSmi), U8(2),
- B(Star), R(0),
- B(Jump), U8(34),
- B(Star), R(7),
- B(LdaConstant), U8(0),
- B(Star), R(6),
- B(Ldar), R(closure),
- B(Star), R(8),
- B(CallRuntime), U16(Runtime::kPushCatchContext), R(6), U8(3),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
- B(Ldar), R(5),
- B(PushContext), R(1),
- B(LdaSmi), U8(20),
- B(Star), R(0),
- B(PopContext), R(1),
- B(LdaSmi), U8(-1),
- B(Star), R(2),
- B(Jump), U8(7),
- B(Star), R(3),
- B(LdaZero),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
- B(Star), R(4),
- B(LdaSmi), U8(3),
- B(Star), R(0),
- B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(4), U8(1),
- B(LdaZero),
- B(TestEqualStrict), R(2),
- B(JumpIfTrue), U8(4),
- B(Jump), U8(5),
- B(Ldar), R(3),
- B(ReThrow),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(Mov), R(context), R(4),
+ B(Mov), R(context), R(5),
+ /* 51 S> */ B(LdaSmi), U8(2),
+ B(Star), R(0),
+ B(Jump), U8(26),
+ B(Star), R(6),
+ B(Ldar), R(closure),
+ /* 53 E> */ B(CreateCatchContext), R(6), U8(0),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Ldar), R(5),
+ B(PushContext), R(1),
+ /* 71 S> */ B(LdaSmi), U8(20),
+ B(Star), R(0),
+ B(PopContext), R(1),
+ B(LdaSmi), U8(-1),
+ B(Star), R(2),
+ B(Jump), U8(7),
+ B(Star), R(3),
+ B(LdaZero),
+ B(Star), R(2),
+ /* 73 E> */ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Star), R(4),
+ /* 90 S> */ B(LdaSmi), U8(3),
+ B(Star), R(0),
+ /* 92 E> */ B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(4), U8(1),
+ B(LdaZero),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrue), U8(4),
+ B(Jump), U8(5),
+ B(Ldar), R(3),
+ B(ReThrow),
+ B(LdaUndefined),
+ /* 99 S> */ B(Return),
]
constant pool: [
"e",
]
handlers: [
- [8, 49, 55],
+ [8, 41, 47],
[11, 15, 17],
]
@@ -112,70 +109,64 @@ snippet: "
try { a = 1 } catch(e) { a = 2 }
} catch(e) { a = 20 } finally { a = 3; }
"
-frame size: 10
+frame size: 8
parameter count: 1
-bytecode array length: 121
+bytecode array length: 105
bytecodes: [
- B(StackCheck),
- B(Mov), R(context), R(4),
- B(Mov), R(context), R(5),
- B(Mov), R(context), R(6),
- B(LdaSmi), U8(1),
- B(Star), R(0),
- B(Jump), U8(34),
- B(Star), R(8),
- B(LdaConstant), U8(0),
- B(Star), R(7),
- B(Ldar), R(closure),
- B(Star), R(9),
- B(CallRuntime), U16(Runtime::kPushCatchContext), R(7), U8(3),
- B(Star), R(6),
- B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
- B(Ldar), R(6),
- B(PushContext), R(1),
- B(LdaSmi), U8(2),
- B(Star), R(0),
- B(PopContext), R(1),
- B(Jump), U8(34),
- B(Star), R(7),
- B(LdaConstant), U8(0),
- B(Star), R(6),
- B(Ldar), R(closure),
- B(Star), R(8),
- B(CallRuntime), U16(Runtime::kPushCatchContext), R(6), U8(3),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
- B(Ldar), R(5),
- B(PushContext), R(1),
- B(LdaSmi), U8(20),
- B(Star), R(0),
- B(PopContext), R(1),
- B(LdaSmi), U8(-1),
- B(Star), R(2),
- B(Jump), U8(7),
- B(Star), R(3),
- B(LdaZero),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
- B(Star), R(4),
- B(LdaSmi), U8(3),
- B(Star), R(0),
- B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(4), U8(1),
- B(LdaZero),
- B(TestEqualStrict), R(2),
- B(JumpIfTrue), U8(4),
- B(Jump), U8(5),
- B(Ldar), R(3),
- B(ReThrow),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ B(Mov), R(context), R(4),
+ B(Mov), R(context), R(5),
+ B(Mov), R(context), R(6),
+ /* 55 S> */ B(LdaSmi), U8(1),
+ B(Star), R(0),
+ B(Jump), U8(26),
+ B(Star), R(7),
+ B(Ldar), R(closure),
+ /* 57 E> */ B(CreateCatchContext), R(7), U8(0),
+ B(Star), R(6),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Ldar), R(6),
+ B(PushContext), R(1),
+ /* 74 S> */ B(LdaSmi), U8(2),
+ B(Star), R(0),
+ B(PopContext), R(1),
+ B(Jump), U8(26),
+ B(Star), R(6),
+ B(Ldar), R(closure),
+ /* 76 E> */ B(CreateCatchContext), R(6), U8(0),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Ldar), R(5),
+ B(PushContext), R(1),
+ /* 95 S> */ B(LdaSmi), U8(20),
+ B(Star), R(0),
+ B(PopContext), R(1),
+ B(LdaSmi), U8(-1),
+ B(Star), R(2),
+ B(Jump), U8(7),
+ B(Star), R(3),
+ B(LdaZero),
+ B(Star), R(2),
+ /* 97 E> */ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
+ B(Star), R(4),
+ /* 114 S> */ B(LdaSmi), U8(3),
+ B(Star), R(0),
+ /* 116 E> */ B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(4), U8(1),
+ B(LdaZero),
+ B(TestEqualStrict), R(2),
+ B(JumpIfTrue), U8(4),
+ B(Jump), U8(5),
+ B(Ldar), R(3),
+ B(ReThrow),
+ B(LdaUndefined),
+ /* 123 S> */ B(Return),
]
constant pool: [
"e",
]
handlers: [
- [4, 82, 88],
- [7, 48, 50],
+ [4, 66, 72],
+ [7, 40, 42],
[10, 14, 16],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Typeof.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Typeof.golden
index e5db7a7ed9..1fe9354b6f 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Typeof.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Typeof.golden
@@ -20,11 +20,11 @@ frame size: 1
parameter count: 1
bytecode array length: 7
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(13),
- B(Star), R(0),
- B(TypeOf),
- B(Return),
+ /* 10 E> */ B(StackCheck),
+ /* 24 S> */ B(LdaSmi), U8(13),
+ B(Star), R(0),
+ /* 29 S> */ B(TypeOf),
+ /* 47 S> */ B(Return),
]
constant pool: [
]
@@ -41,15 +41,14 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 6
+bytecode array length: 5
bytecodes: [
- B(StackCheck),
- B(LdaGlobalInsideTypeof), U8(0), U8(1),
- B(TypeOf),
- B(Return),
+ /* 22 E> */ B(StackCheck),
+ /* 28 S> */ B(LdaGlobalInsideTypeof), U8(1),
+ B(TypeOf),
+ /* 46 S> */ B(Return),
]
constant pool: [
- "x",
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden
index 0a3ca760c7..0e2c767256 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden
@@ -15,28 +15,22 @@ snippet: "
}
return x;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 31
+bytecode array length: 22
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(10),
- B(TestEqual), R(1),
- B(LogicalNot),
- B(JumpIfFalse), U8(15),
- B(StackCheck),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(10),
- B(Add), R(1),
- B(Star), R(0),
- B(Jump), U8(-22),
- B(Ldar), R(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 54 S> */ B(LdaSmi), U8(10),
+ /* 54 E> */ B(TestEqual), R(0),
+ B(JumpIfTrue), U8(11),
+ /* 45 E> */ B(StackCheck),
+ /* 65 S> */ B(AddSmi), U8(10), R(0), U8(1),
+ B(Star), R(0),
+ B(Jump), U8(-13),
+ /* 79 S> */ B(Ldar), R(0),
+ /* 89 S> */ B(Return),
]
constant pool: [
]
@@ -51,24 +45,22 @@ snippet: "
} while(x == false);
return x;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 22
+bytecode array length: 18
bytecodes: [
- B(StackCheck),
- B(LdaFalse),
- B(Star), R(0),
- B(StackCheck),
- B(Ldar), R(0),
- B(LogicalNot),
- B(Star), R(0),
- B(Ldar), R(0),
- B(Star), R(1),
- B(LdaFalse),
- B(TestEqual), R(1),
- B(JumpIfTrue), U8(-13),
- B(Ldar), R(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaFalse),
+ B(Star), R(0),
+ /* 49 E> */ B(StackCheck),
+ /* 56 S> */ B(Ldar), R(0),
+ B(ToBooleanLogicalNot),
+ B(Star), R(0),
+ /* 74 S> */ B(LdaFalse),
+ /* 74 E> */ B(TestEqual), R(0),
+ B(JumpIfTrue), U8(-9),
+ /* 85 S> */ B(Ldar), R(0),
+ /* 95 S> */ B(Return),
]
constant pool: [
]
@@ -80,18 +72,17 @@ snippet: "
var x = 101;
return void(x * 3);
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 13
+bytecode array length: 12
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(101),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(3),
- B(Mul), R(1),
- B(LdaUndefined),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(101),
+ B(Star), R(0),
+ /* 47 S> */ B(LdaSmi), U8(3),
+ B(Mul), R(0), U8(1),
+ B(LdaUndefined),
+ /* 67 S> */ B(Return),
]
constant pool: [
]
@@ -108,18 +99,17 @@ frame size: 4
parameter count: 1
bytecode array length: 23
bytecodes: [
- B(StackCheck),
- B(Wide), B(LdaSmi), U16(1234),
- B(Star), R(0),
- B(Star), R(2),
- B(Ldar), R(0),
- B(Mul), R(2),
- B(Star), R(3),
- B(LdaSmi), U8(1),
- B(Sub), R(3),
- B(LdaUndefined),
- B(Star), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(Wide), B(LdaSmi), U16(1234),
+ B(Star), R(0),
+ /* 56 S> */ B(Nop),
+ /* 66 E> */ B(Mul), R(0), U8(1),
+ B(Star), R(3),
+ B(SubSmi), U8(1), R(3), U8(2),
+ B(LdrUndefined), R(1),
+ B(Ldar), R(1),
+ /* 74 S> */ B(Nop),
+ /* 84 S> */ B(Return),
]
constant pool: [
]
@@ -131,17 +121,16 @@ snippet: "
var x = 13;
return ~x;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(13),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(-1),
- B(BitwiseXor), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(13),
+ B(Star), R(0),
+ /* 46 S> */ B(LdaSmi), U8(-1),
+ B(BitwiseXor), R(0), U8(1),
+ /* 57 S> */ B(Return),
]
constant pool: [
]
@@ -153,17 +142,16 @@ snippet: "
var x = 13;
return +x;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(13),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(1),
- B(Mul), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(13),
+ B(Star), R(0),
+ /* 46 S> */ B(LdaSmi), U8(1),
+ B(Mul), R(0), U8(1),
+ /* 57 S> */ B(Return),
]
constant pool: [
]
@@ -175,17 +163,16 @@ snippet: "
var x = 13;
return -x;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
- B(StackCheck),
- B(LdaSmi), U8(13),
- B(Star), R(0),
- B(Star), R(1),
- B(LdaSmi), U8(-1),
- B(Mul), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 42 S> */ B(LdaSmi), U8(13),
+ B(Star), R(0),
+ /* 46 S> */ B(LdaSmi), U8(-1),
+ B(Mul), R(0), U8(1),
+ /* 57 S> */ B(Return),
]
constant pool: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden
index 930077503f..a39a1cf6aa 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden
@@ -171,12 +171,12 @@ snippet: "
"
frame size: 157
parameter count: 1
-bytecode array length: 6
+bytecode array length: 10
bytecodes: [
- B(StackCheck),
- B(Ldar), R(127),
- B(Star), R(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 1494 S> */ B(Wide), B(Mov), R16(127), R16(0),
+ /* 1505 S> */ B(Ldar), R(0),
+ /* 1516 S> */ B(Return),
]
constant pool: [
]
@@ -347,12 +347,12 @@ snippet: "
"
frame size: 157
parameter count: 1
-bytecode array length: 6
+bytecode array length: 12
bytecodes: [
- B(StackCheck),
- B(Ldar), R(126),
- B(Star), R(127),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 1494 S> */ B(Wide), B(Mov), R16(126), R16(127),
+ /* 1507 S> */ B(Wide), B(Ldar), R16(127),
+ /* 1520 S> */ B(Return),
]
constant pool: [
]
@@ -521,20 +521,18 @@ snippet: "
if (x2 > 3) { return x129; }
return x128;
"
-frame size: 158
+frame size: 157
parameter count: 1
-bytecode array length: 23
+bytecode array length: 17
bytecodes: [
- B(StackCheck),
- B(Ldar), R(2),
- B(Wide), B(Star), R16(157),
- B(LdaSmi), U8(3),
- B(Wide), B(TestGreaterThan), R16(157),
- B(JumpIfFalse), U8(7),
- B(Wide), B(Ldar), R16(129),
- B(Return),
- B(Ldar), R(128),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 1494 S> */ B(LdaSmi), U8(3),
+ /* 1501 E> */ B(TestGreaterThan), R(2),
+ B(JumpIfFalse), U8(7),
+ /* 1508 S> */ B(Wide), B(Ldar), R16(129),
+ /* 1536 S> */ B(Return),
+ /* 1523 S> */ B(Wide), B(Ldar), R16(128),
+ /* 1536 S> */ B(Return),
]
constant pool: [
]
@@ -705,29 +703,25 @@ snippet: "
if (x2 > 3) { return x0; }
return x129;
"
-frame size: 158
+frame size: 157
parameter count: 1
-bytecode array length: 48
+bytecode array length: 34
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(Wide), B(Ldar), R16(129),
- B(Wide), B(Star), R16(157),
- B(LdaSmi), U8(3),
- B(Wide), B(TestEqual), R16(157),
- B(JumpIfFalse), U8(8),
- B(Ldar), R(0),
- B(Wide), B(Star), R16(129),
- B(Ldar), R(2),
- B(Wide), B(Star), R16(157),
- B(LdaSmi), U8(3),
- B(Wide), B(TestGreaterThan), R16(157),
- B(JumpIfFalse), U8(5),
- B(Ldar), R(0),
- B(Return),
- B(Wide), B(Ldar), R16(129),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 1503 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 1506 S> */ B(LdaSmi), U8(3),
+ /* 1515 E> */ B(Wide), B(TestEqual), R16(129),
+ B(JumpIfFalse), U8(10),
+ /* 1534 S> */ B(Wide), B(Mov), R16(0), R16(129),
+ B(Ldar), R(0),
+ /* 1540 S> */ B(LdaSmi), U8(3),
+ /* 1547 E> */ B(TestGreaterThan), R(2),
+ B(JumpIfFalse), U8(5),
+ /* 1554 S> */ B(Ldar), R(0),
+ /* 1580 S> */ B(Return),
+ /* 1567 S> */ B(Wide), B(Ldar), R16(129),
+ /* 1580 S> */ B(Return),
]
constant pool: [
]
@@ -899,34 +893,29 @@ snippet: "
"
frame size: 158
parameter count: 1
-bytecode array length: 54
+bytecode array length: 53
bytecodes: [
- B(StackCheck),
- B(LdaZero),
- B(Star), R(0),
- B(LdaZero),
- B(Star), R(1),
- B(LdaZero),
- B(Star), R(128),
- B(Ldar), R(128),
- B(Wide), B(Star), R16(157),
- B(LdaSmi), U8(64),
- B(Wide), B(TestLessThan), R16(157),
- B(JumpIfFalse), U8(29),
- B(StackCheck),
- B(Ldar), R(1),
- B(Wide), B(Star), R16(157),
- B(Ldar), R(128),
- B(Wide), B(Add), R16(157),
- B(Star), R(1),
- B(Ldar), R(128),
- B(ToNumber),
- B(Wide), B(Star), R16(157),
- B(Inc),
- B(Star), R(128),
- B(Jump), U8(-39),
- B(Ldar), R(128),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 1503 S> */ B(LdaZero),
+ B(Star), R(0),
+ /* 1515 S> */ B(LdaZero),
+ B(Star), R(1),
+ /* 1523 S> */ B(LdaZero),
+ B(Wide), B(Star), R16(128),
+ /* 1538 S> */ B(LdaSmi), U8(64),
+ /* 1538 E> */ B(Wide), B(TestLessThan), R16(128),
+ B(JumpIfFalse), U8(30),
+ /* 1518 E> */ B(StackCheck),
+ /* 1555 S> */ B(Wide), B(Ldar), R16(128),
+ /* 1561 E> */ B(Add), R(1), U8(2),
+ B(Wide), B(Mov), R16(1), R16(157),
+ B(Star), R(1),
+ /* 1548 S> */ B(Wide), B(Ldar), R16(128),
+ B(Inc), U8(1),
+ B(Wide), B(Star), R16(128),
+ B(Jump), U8(-34),
+ /* 1567 S> */ B(Wide), B(Ldar), R16(128),
+ /* 1580 S> */ B(Return),
]
constant pool: [
]
@@ -1098,37 +1087,35 @@ snippet: "
"
frame size: 163
parameter count: 1
-bytecode array length: 80
+bytecode array length: 84
bytecodes: [
- B(StackCheck),
- B(Wide), B(LdaSmi), U16(1234),
- B(Star), R(0),
- B(LdaZero),
- B(Star), R(1),
- B(Ldar), R(0),
- B(JumpIfUndefined), U8(65),
- B(JumpIfNull), U8(63),
- B(ToObject),
- B(Wide), B(Star), R16(157),
- B(Wide), B(ForInPrepare), R16(158),
- B(LdaZero),
- B(Wide), B(Star), R16(161),
- B(Wide), B(ForInDone), R16(161), R16(160),
- B(JumpIfTrue), U8(41),
- B(Wide), B(ForInNext), R16(157), R16(161), R16(158), U16(1),
- B(JumpIfUndefined), U8(19),
- B(Star), R(128),
- B(StackCheck),
- B(Ldar), R(1),
- B(Wide), B(Star), R16(162),
- B(Ldar), R(128),
- B(Wide), B(Add), R16(162),
- B(Star), R(1),
- B(Wide), B(ForInStep), R16(161),
- B(Wide), B(Star), R16(161),
- B(Jump), U8(-45),
- B(Ldar), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 1503 S> */ B(Wide), B(LdaSmi), U16(1234),
+ B(Star), R(0),
+ /* 1518 S> */ B(LdaZero),
+ B(Star), R(1),
+ /* 1534 S> */ B(Ldar), R(0),
+ B(JumpIfUndefined), U8(69),
+ B(JumpIfNull), U8(67),
+ B(Wide), B(ToObject), R16(157),
+ B(Wide), B(ForInPrepare), R16(157), R16(158),
+ B(LdaZero),
+ B(Wide), B(Star), R16(161),
+ /* 1526 S> */ B(Wide), B(ForInDone), R16(161), R16(160),
+ B(JumpIfTrue), U8(44),
+ B(Wide), B(ForInNext), R16(157), R16(161), R16(158), U16(2),
+ B(JumpIfUndefined), U8(22),
+ B(Wide), B(Star), R16(128),
+ /* 1521 E> */ B(StackCheck),
+ /* 1541 S> */ B(Wide), B(Ldar), R16(128),
+ /* 1547 E> */ B(Add), R(1), U8(1),
+ B(Wide), B(Mov), R16(1), R16(162),
+ B(Star), R(1),
+ /* 1544 E> */ B(Wide), B(ForInStep), R16(161),
+ B(Wide), B(Star), R16(161),
+ B(Jump), U8(-48),
+ /* 1553 S> */ B(Ldar), R(1),
+ /* 1564 S> */ B(Return),
]
constant pool: [
]
@@ -1301,24 +1288,20 @@ snippet: "
"
frame size: 159
parameter count: 1
-bytecode array length: 55
+bytecode array length: 53
bytecodes: [
- B(StackCheck),
- B(Ldar), R(64),
- B(Wide), B(Star), R16(157),
- B(Ldar), R(63),
- B(Wide), B(Star), R16(158),
- B(Wide), B(CallRuntime), U16(Runtime::kAdd), R16(157), U16(2),
- B(Star), R(0),
- B(Ldar), R(27),
- B(Wide), B(Star), R16(157),
- B(Wide), B(Ldar), R16(143),
- B(Wide), B(Star), R16(158),
- B(Wide), B(CallRuntime), U16(Runtime::kAdd), R16(157), U16(2),
- B(Star), R(1),
- B(CallRuntime), U16(Runtime::kTheHole), R(0), U8(0),
- B(Ldar), R(1),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 1494 S> */ B(Wide), B(Mov), R16(64), R16(157),
+ B(Wide), B(Mov), R16(63), R16(158),
+ /* 1509 E> */ B(Wide), B(CallRuntime), U16(Runtime::kAdd), R16(157), U16(2),
+ B(Star), R(0),
+ /* 1515 S> */ B(Wide), B(Mov), R16(27), R16(157),
+ B(Wide), B(Mov), R16(143), R16(158),
+ /* 1530 E> */ B(Wide), B(CallRuntime), U16(Runtime::kAdd), R16(157), U16(2),
+ B(Star), R(1),
+ /* 1537 S> */ B(CallRuntime), U16(Runtime::kTheHole), R(0), U8(0),
+ /* 1549 S> */ B(Ldar), R(1),
+ /* 1560 S> */ B(Return),
]
constant pool: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden
index 350a0a5d7f..b5a0df5da2 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden
@@ -11,22 +11,20 @@ wrap: yes
snippet: "
with ({x:42}) { return x; }
"
-frame size: 4
+frame size: 3
parameter count: 1
-bytecode array length: 26
+bytecode array length: 21
bytecodes: [
- B(StackCheck),
- B(CreateObjectLiteral), U8(0), U8(0), U8(5),
- B(Star), R(1),
- B(ToObject),
- B(Star), R(2),
- B(Ldar), R(closure),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kPushWithContext), R(2), U8(2),
- B(PushContext), R(0),
- B(LdaLookupSlot), U8(1),
- B(PopContext), R(0),
- B(Return),
+ /* 30 E> */ B(StackCheck),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(1),
+ B(Ldar), R(1),
+ B(ToObject), R(2),
+ B(Ldar), R(closure),
+ B(CreateWithContext), R(2),
+ B(PushContext), R(0),
+ /* 50 S> */ B(LdaLookupSlot), U8(1),
+ B(PopContext), R(0),
+ /* 62 S> */ B(Return),
]
constant pool: [
InstanceType::FIXED_ARRAY_TYPE,
diff --git a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
index 92ba9ba0bd..dd03c24b3c 100644
--- a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
+++ b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
@@ -4,6 +4,7 @@
#include <cstring>
#include <fstream>
+#include <memory>
#include <vector>
#include "test/cctest/interpreter/bytecode-expectations-printer.h"
@@ -12,7 +13,6 @@
#include "include/v8.h"
#include "src/base/logging.h"
-#include "src/base/smart-pointers.h"
#include "src/compiler.h"
#include "src/interpreter/interpreter.h"
@@ -90,17 +90,6 @@ class ProgramOptions final {
std::string test_function_name_;
};
-class ArrayBufferAllocator final : public v8::ArrayBuffer::Allocator {
- public:
- void* Allocate(size_t length) override {
- void* data = AllocateUninitialized(length);
- if (data != nullptr) memset(data, 0, length);
- return data;
- }
- void* AllocateUninitialized(size_t length) override { return malloc(length); }
- void Free(void* data, size_t) override { free(data); }
-};
-
class V8InitializationScope final {
public:
explicit V8InitializationScope(const char* exec_path);
@@ -110,7 +99,8 @@ class V8InitializationScope final {
v8::Isolate* isolate() const { return isolate_; }
private:
- v8::base::SmartPointer<v8::Platform> platform_;
+ std::unique_ptr<v8::Platform> platform_;
+ std::unique_ptr<v8::ArrayBuffer::Allocator> allocator_;
v8::Isolate* isolate_;
DISALLOW_COPY_AND_ASSIGN(V8InitializationScope);
@@ -350,14 +340,14 @@ V8InitializationScope::V8InitializationScope(const char* exec_path)
i::FLAG_always_opt = false;
i::FLAG_allow_natives_syntax = true;
- v8::V8::InitializeICU();
+ v8::V8::InitializeICUDefaultLocation(exec_path);
v8::V8::InitializeExternalStartupData(exec_path);
v8::V8::InitializePlatform(platform_.get());
v8::V8::Initialize();
- ArrayBufferAllocator allocator;
v8::Isolate::CreateParams create_params;
- create_params.array_buffer_allocator = &allocator;
+ allocator_.reset(v8::ArrayBuffer::Allocator::NewDefaultAllocator());
+ create_params.array_buffer_allocator = allocator_.get();
isolate_ = v8::Isolate::New(create_params);
}
diff --git a/deps/v8/test/cctest/interpreter/interpreter-tester.h b/deps/v8/test/cctest/interpreter/interpreter-tester.h
index f8a0a8a67a..4e202eada5 100644
--- a/deps/v8/test/cctest/interpreter/interpreter-tester.h
+++ b/deps/v8/test/cctest/interpreter/interpreter-tester.h
@@ -114,7 +114,7 @@ class InterpreterTester {
function->shared()->set_function_data(*bytecode_.ToHandleChecked());
}
if (!feedback_vector_.is_null()) {
- function->shared()->set_feedback_vector(
+ function->literals()->set_feedback_vector(
*feedback_vector_.ToHandleChecked());
}
return function;
diff --git a/deps/v8/test/cctest/interpreter/source-position-matcher.cc b/deps/v8/test/cctest/interpreter/source-position-matcher.cc
new file mode 100644
index 0000000000..30d545abc3
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/source-position-matcher.cc
@@ -0,0 +1,224 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/interpreter/source-position-matcher.h"
+
+#include "src/objects-inl.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+// Comparer for PositionTableEntry instances.
+struct PositionTableEntryComparer {
+ bool operator()(const PositionTableEntry& lhs,
+ const PositionTableEntry& rhs) const {
+ int lhs_type_score = type_score(lhs);
+ int rhs_type_score = type_score(rhs);
+ if (lhs_type_score == rhs_type_score) {
+ return lhs.source_position < rhs.source_position;
+ } else {
+ return lhs_type_score < rhs_type_score;
+ }
+ }
+
+ int type_score(const PositionTableEntry& entry) const {
+ return entry.is_statement ? 1 : 0;
+ }
+};
+
+//
+// The principles for comparing source positions in bytecode arrays
+// are:
+//
+// 1. The number of statement positions must be the same in both.
+//
+// 2. Statement positions may be moved provide they do not affect the
+// debuggers causal view of the v8 heap and local state. This means
+// statement positions may be moved when their initial position is
+// on bytecodes that manipulate the accumulator and temporary
+// registers.
+//
+// 3. When duplicate expression positions are present, either may
+// be dropped.
+//
+// 4. Expression positions may be applied to later bytecodes in the
+// bytecode array if the current bytecode does not throw.
+//
+// 5. Expression positions may be dropped when they are applied to
+// bytecodes that manipulate local frame state and immediately
+// proceeded by another source position.
+//
+// 6. The relative ordering of source positions must be preserved.
+//
+bool SourcePositionMatcher::Match(Handle<BytecodeArray> original_bytecode,
+ Handle<BytecodeArray> optimized_bytecode) {
+ SourcePositionTableIterator original(
+ original_bytecode->source_position_table());
+ SourcePositionTableIterator optimized(
+ optimized_bytecode->source_position_table());
+
+ int last_original_bytecode_offset = 0;
+ int last_optimized_bytecode_offset = 0;
+
+ // Ordered lists of expression positions immediately before the
+ // latest statements in each bytecode array.
+ std::vector<PositionTableEntry> original_expression_entries;
+ std::vector<PositionTableEntry> optimized_expression_entries;
+
+ while (true) {
+ MoveToNextStatement(&original, &original_expression_entries);
+ MoveToNextStatement(&optimized, &optimized_expression_entries);
+
+ if (original.done() && optimized.done()) {
+ return true;
+ } else if (original.done()) {
+ return false;
+ } else if (optimized.done()) {
+ return false;
+ }
+
+ if (HasNewExpressionPositionsInOptimized(&original_expression_entries,
+ &optimized_expression_entries)) {
+ return false;
+ }
+
+ StripUnneededExpressionPositions(original_bytecode,
+ &original_expression_entries,
+ original.code_offset());
+ StripUnneededExpressionPositions(optimized_bytecode,
+ &optimized_expression_entries,
+ optimized.code_offset());
+
+ if (!CompareExpressionPositions(&original_expression_entries,
+ &optimized_expression_entries)) {
+ // Message logged in CompareExpressionPositions().
+ return false;
+ }
+
+ // Check original and optimized have matching source positions.
+ if (original.source_position() != optimized.source_position()) {
+ return false;
+ }
+
+ if (original.code_offset() < last_original_bytecode_offset) {
+ return false;
+ }
+ last_original_bytecode_offset = original.code_offset();
+
+ if (optimized.code_offset() < last_optimized_bytecode_offset) {
+ return false;
+ }
+ last_optimized_bytecode_offset = optimized.code_offset();
+
+ // TODO(oth): Can we compare statement positions are semantically
+ // equivalent? e.g. before a bytecode that has debugger observable
+ // effects. This is likely non-trivial.
+ }
+
+ return true;
+}
+
+bool SourcePositionMatcher::HasNewExpressionPositionsInOptimized(
+ const std::vector<PositionTableEntry>* const original_positions,
+ const std::vector<PositionTableEntry>* const optimized_positions) {
+ std::set<PositionTableEntry, PositionTableEntryComparer> original_set(
+ original_positions->begin(), original_positions->end());
+
+ bool retval = false;
+ for (auto optimized_position : *optimized_positions) {
+ if (original_set.find(optimized_position) == original_set.end()) {
+ retval = true;
+ }
+ }
+ return retval;
+}
+
+bool SourcePositionMatcher::CompareExpressionPositions(
+ const std::vector<PositionTableEntry>* const original_positions,
+ const std::vector<PositionTableEntry>* const optimized_positions) {
+ if (original_positions->size() != optimized_positions->size()) {
+ return false;
+ }
+
+ if (original_positions->size() == 0) {
+ return true;
+ }
+
+ for (size_t i = 0; i < original_positions->size(); ++i) {
+ PositionTableEntry original = original_positions->at(i);
+ PositionTableEntry optimized = original_positions->at(i);
+ CHECK(original.source_position > 0);
+ if ((original.is_statement || optimized.is_statement) ||
+ (original.source_position != optimized.source_position) ||
+ (original.source_position < 0)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void SourcePositionMatcher::StripUnneededExpressionPositions(
+ Handle<BytecodeArray> bytecode_array,
+ std::vector<PositionTableEntry>* expression_positions,
+ int next_statement_bytecode_offset) {
+ size_t j = 0;
+ for (size_t i = 0; i < expression_positions->size(); ++i) {
+ CHECK(expression_positions->at(i).source_position > 0 &&
+ !expression_positions->at(i).is_statement);
+ int bytecode_end = (i == expression_positions->size() - 1)
+ ? next_statement_bytecode_offset
+ : expression_positions->at(i + 1).code_offset;
+ if (ExpressionPositionIsNeeded(bytecode_array,
+ expression_positions->at(i).code_offset,
+ bytecode_end)) {
+ expression_positions->at(j++) = expression_positions->at(i);
+ }
+ }
+ expression_positions->resize(j);
+}
+
+void SourcePositionMatcher::AdvanceBytecodeIterator(
+ BytecodeArrayIterator* iterator, int bytecode_offset) {
+ while (iterator->current_offset() != bytecode_offset) {
+ iterator->Advance();
+ }
+}
+
+bool SourcePositionMatcher::ExpressionPositionIsNeeded(
+ Handle<BytecodeArray> bytecode_array, int start_offset, int end_offset) {
+ CHECK_GT(end_offset, start_offset);
+ BytecodeArrayIterator iterator(bytecode_array);
+ AdvanceBytecodeIterator(&iterator, start_offset);
+
+ while (iterator.current_offset() != end_offset) {
+ if (Bytecodes::IsWithoutExternalSideEffects(iterator.current_bytecode())) {
+ iterator.Advance();
+ } else {
+ // Bytecode could throw so need an expression position.
+ return true;
+ }
+ }
+ return false;
+}
+
+void SourcePositionMatcher::MoveToNextStatement(
+ SourcePositionTableIterator* iterator,
+ std::vector<PositionTableEntry>* positions) {
+ iterator->Advance();
+ positions->clear();
+ while (!iterator->done()) {
+ if (iterator->is_statement()) {
+ break;
+ }
+ positions->push_back({iterator->code_offset(), iterator->source_position(),
+ iterator->is_statement()});
+ iterator->Advance();
+ }
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/interpreter/source-position-matcher.h b/deps/v8/test/cctest/interpreter/source-position-matcher.h
new file mode 100644
index 0000000000..7cc49cc20c
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/source-position-matcher.h
@@ -0,0 +1,50 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef TEST_CCTEST_INTERPRETER_SOURCE_POSITION_COMPARER_H_
+#define TEST_CCTEST_INTERPRETER_SOURCE_POSITION_COMPARER_H_
+
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/objects.h"
+#include "src/source-position-table.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class SourcePositionMatcher final {
+ public:
+ bool Match(Handle<BytecodeArray> original, Handle<BytecodeArray> optimized);
+
+ private:
+ bool HasNewExpressionPositionsInOptimized(
+ const std::vector<PositionTableEntry>* const original_positions,
+ const std::vector<PositionTableEntry>* const optimized_positions);
+
+ bool CompareExpressionPositions(
+ const std::vector<PositionTableEntry>* const original_positions,
+ const std::vector<PositionTableEntry>* const optimized_positions);
+
+ void StripUnneededExpressionPositions(
+ Handle<BytecodeArray> bytecode_array,
+ std::vector<PositionTableEntry>* positions,
+ int next_statement_bytecode_offset);
+
+ bool ExpressionPositionIsNeeded(Handle<BytecodeArray> bytecode_array,
+ int start_offset, int end_offset);
+
+ void MoveToNextStatement(
+ SourcePositionTableIterator* iterator,
+ std::vector<PositionTableEntry>* expression_positions);
+
+ void AdvanceBytecodeIterator(BytecodeArrayIterator* iterator,
+ int bytecode_offset);
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // TEST_CCTEST_INTERPRETER_SOURCE_POSITION_COMPARER_H_
diff --git a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
index 2519f25142..d82bad228b 100644
--- a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
+++ b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
@@ -72,6 +72,7 @@ class InitializedIgnitionHandleScope : public InitializedHandleScope {
public:
InitializedIgnitionHandleScope() {
i::FLAG_ignition = true;
+ i::FLAG_ignition_osr = false; // TODO(4764): Disabled for now.
i::FLAG_always_opt = false;
i::FLAG_allow_natives_syntax = true;
CcTest::i_isolate()->interpreter()->Initialize();
@@ -113,6 +114,44 @@ std::string BuildActual(const BytecodeExpectationsPrinter& printer,
return actual_stream.str();
}
+bool CompareTexts(const std::string& generated, const std::string& expected) {
+ std::istringstream generated_stream(generated);
+ std::istringstream expected_stream(expected);
+ std::string generated_line;
+ std::string expected_line;
+ // Line number does not include golden file header.
+ int line_number = 0;
+
+ do {
+ std::getline(generated_stream, generated_line);
+ std::getline(expected_stream, expected_line);
+
+ if (!generated_stream.good() && !expected_stream.good()) {
+ return true;
+ }
+
+ if (!generated_stream.good()) {
+ std::cerr << "Expected has extra lines after line " << line_number
+ << "\n";
+ std::cerr << " Expected: '" << expected_line << "'\n";
+ return false;
+ } else if (!expected_stream.good()) {
+ std::cerr << "Generated has extra lines after line " << line_number
+ << "\n";
+ std::cerr << " Generated: '" << generated_line << "'\n";
+ return false;
+ }
+
+ if (generated_line != expected_line) {
+ std::cerr << "Inputs differ at line " << line_number << "\n";
+ std::cerr << " Generated: '" << generated_line << "'\n";
+ std::cerr << " Expected: '" << expected_line << "'\n";
+ return false;
+ }
+ line_number++;
+ } while (true);
+}
+
using ConstantPoolType = BytecodeExpectationsPrinter::ConstantPoolType;
TEST(PrimitiveReturnStatements) {
@@ -122,27 +161,29 @@ TEST(PrimitiveReturnStatements) {
const char* snippets[] = {
"",
- "return;",
+ "return;\n",
- "return null;",
+ "return null;\n",
- "return true;",
+ "return true;\n",
- "return false;",
+ "return false;\n",
- "return 0;",
+ "return 0;\n",
- "return +1;",
+ "return +1;\n",
- "return -1;",
+ "return -1;\n",
- "return +127;",
+ "return +127;\n",
- "return -128;",
+ "return -128;\n",
+
+ "return 2.0;\n",
};
- CHECK_EQ(BuildActual(printer, snippets),
- LoadGolden("PrimitiveReturnStatements.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("PrimitiveReturnStatements.golden")));
}
TEST(PrimitiveExpressions) {
@@ -150,35 +191,35 @@ TEST(PrimitiveExpressions) {
BytecodeExpectationsPrinter printer(CcTest::isolate(),
ConstantPoolType::kNumber);
const char* snippets[] = {
- "var x = 0; return x;",
+ "var x = 0; return x;\n",
- "var x = 0; return x + 3;",
+ "var x = 0; return x + 3;\n",
- "var x = 0; return x - 3;",
+ "var x = 0; return x - 3;\n",
- "var x = 4; return x * 3;",
+ "var x = 4; return x * 3;\n",
- "var x = 4; return x / 3;",
+ "var x = 4; return x / 3;\n",
- "var x = 4; return x % 3;",
+ "var x = 4; return x % 3;\n",
- "var x = 1; return x | 2;",
+ "var x = 1; return x | 2;\n",
- "var x = 1; return x ^ 2;",
+ "var x = 1; return x ^ 2;\n",
- "var x = 1; return x & 2;",
+ "var x = 1; return x & 2;\n",
- "var x = 10; return x << 3;",
+ "var x = 10; return x << 3;\n",
- "var x = 10; return x >> 3;",
+ "var x = 10; return x >> 3;\n",
- "var x = 10; return x >>> 3;",
+ "var x = 10; return x >>> 3;\n",
- "var x = 0; return (x, 3);",
+ "var x = 0; return (x, 3);\n",
};
- CHECK_EQ(BuildActual(printer, snippets),
- LoadGolden("PrimitiveExpressions.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("PrimitiveExpressions.golden")));
}
TEST(LogicalExpressions) {
@@ -186,43 +227,43 @@ TEST(LogicalExpressions) {
BytecodeExpectationsPrinter printer(CcTest::isolate(),
ConstantPoolType::kNumber);
const char* snippets[] = {
- "var x = 0; return x || 3;",
+ "var x = 0; return x || 3;\n",
- "var x = 0; return (x == 1) || 3;",
+ "var x = 0; return (x == 1) || 3;\n",
- "var x = 0; return x && 3;",
+ "var x = 0; return x && 3;\n",
- "var x = 0; return (x == 0) && 3;",
+ "var x = 0; return (x == 0) && 3;\n",
- "var x = 0; return x || (1, 2, 3);",
+ "var x = 0; return x || (1, 2, 3);\n",
- "var a = 2, b = 3, c = 4; return a || (a, b, a, b, c = 5, 3);",
+ "var a = 2, b = 3, c = 4; return a || (a, b, a, b, c = 5, 3);\n",
"var x = 1; var a = 2, b = 3; return x || (" //
REPEAT_32("\n a = 1, b = 2, ") //
- "3);",
+ "3);\n",
"var x = 0; var a = 2, b = 3; return x && (" //
REPEAT_32("\n a = 1, b = 2, ") //
- "3);",
+ "3);\n",
"var x = 1; var a = 2, b = 3; return (x > 3) || (" //
REPEAT_32("\n a = 1, b = 2, ") //
- "3);",
+ "3);\n",
"var x = 0; var a = 2, b = 3; return (x < 5) && (" //
REPEAT_32("\n a = 1, b = 2, ") //
- "3);",
+ "3);\n",
- "return 0 && 3;",
+ "return 0 && 3;\n",
- "return 1 || 3;",
+ "return 1 || 3;\n",
- "var x = 1; return x && 3 || 0, 1;",
+ "var x = 1; return x && 3 || 0, 1;\n",
};
- CHECK_EQ(BuildActual(printer, snippets),
- LoadGolden("LogicalExpressions.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("LogicalExpressions.golden")));
}
TEST(Parameters) {
@@ -248,8 +289,8 @@ TEST(Parameters) {
"function f(arg1, arg2, arg3, arg4) { arg2 = 1; }",
};
- CHECK_EQ(BuildActual(printer, snippets, "", "\nf();"),
- LoadGolden("Parameters.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets, "", "\nf();"),
+ LoadGolden("Parameters.golden")));
}
TEST(IntegerConstants) {
@@ -257,15 +298,15 @@ TEST(IntegerConstants) {
BytecodeExpectationsPrinter printer(CcTest::isolate(),
ConstantPoolType::kNumber);
const char* snippets[] = {
- "return 12345678;",
+ "return 12345678;\n",
- "var a = 1234; return 5678;",
+ "var a = 1234; return 5678;\n",
- "var a = 1234; return 1234;",
+ "var a = 1234; return 1234;\n",
};
- CHECK_EQ(BuildActual(printer, snippets),
- LoadGolden("IntegerConstants.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("IntegerConstants.golden")));
}
TEST(HeapNumberConstants) {
@@ -273,19 +314,19 @@ TEST(HeapNumberConstants) {
BytecodeExpectationsPrinter printer(CcTest::isolate(),
ConstantPoolType::kNumber);
const char* snippets[] = {
- "return 1.2;",
+ "return 1.2;\n",
- "var a = 1.2; return 2.6;",
+ "var a = 1.2; return 2.6;\n",
- "var a = 3.14; return 3.14;",
+ "var a = 3.14; return 3.14;\n",
"var a;" //
REPEAT_256("\na = 1.414;") //
- " a = 3.14;",
+ " a = 3.14;\n",
};
- CHECK_EQ(BuildActual(printer, snippets),
- LoadGolden("HeapNumberConstants.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("HeapNumberConstants.golden")));
}
TEST(StringConstants) {
@@ -293,15 +334,15 @@ TEST(StringConstants) {
BytecodeExpectationsPrinter printer(CcTest::isolate(),
ConstantPoolType::kString);
const char* snippets[] = {
- "return \"This is a string\";",
+ "return \"This is a string\";\n",
- "var a = \"First string\"; return \"Second string\";",
+ "var a = \"First string\"; return \"Second string\";\n",
- "var a = \"Same string\"; return \"Same string\";",
+ "var a = \"Same string\"; return \"Same string\";\n",
};
- CHECK_EQ(BuildActual(printer, snippets),
- LoadGolden("StringConstants.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("StringConstants.golden")));
}
TEST(PropertyLoads) {
@@ -313,16 +354,16 @@ TEST(PropertyLoads) {
const char* snippets[] = {
"function f(a) { return a.name; }\n"
- "f({name : \"test\"});",
+ "f({name : \"test\"});\n",
"function f(a) { return a[\"key\"]; }\n"
- "f({key : \"test\"});",
+ "f({key : \"test\"});\n",
"function f(a) { return a[100]; }\n"
- "f({100 : \"test\"});",
+ "f({100 : \"test\"});\n",
"function f(a, b) { return a[b]; }\n"
- "f({arg : \"test\"}, \"arg\");",
+ "f({arg : \"test\"}, \"arg\");\n",
"function f(a) { var b = a.name; return a[-124]; }\n"
"f({\"-124\" : \"test\", name : 123 })",
@@ -344,7 +385,8 @@ TEST(PropertyLoads) {
"f({name : \"test\"}, \"name\")\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("PropertyLoads.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("PropertyLoads.golden")));
}
TEST(PropertyStores) {
@@ -407,7 +449,8 @@ TEST(PropertyStores) {
"f({name : \"test\"})\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("PropertyStores.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("PropertyStores.golden")));
}
#define FUNC_ARG "new (function Obj() { this.func = function() { return; }})()"
@@ -436,7 +479,8 @@ TEST(PropertyCall) {
"f(" FUNC_ARG ")",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("PropertyCall.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("PropertyCall.golden")));
}
TEST(LoadGlobal) {
@@ -465,10 +509,11 @@ TEST(LoadGlobal) {
REPEAT_127(" b.name;\n")
" return a;\n"
"}\n"
- "f({name: 1});",
+ "f({name: 1});\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("LoadGlobal.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("LoadGlobal.golden")));
}
TEST(StoreGlobal) {
@@ -481,18 +526,18 @@ TEST(StoreGlobal) {
const char* snippets[] = {
"var a = 1;\n"
"function f() { a = 2; }\n"
- "f();",
+ "f();\n",
"var a = \"test\"; function f(b) { a = b; }\n"
- "f(\"global\");",
+ "f(\"global\");\n",
"'use strict'; var a = 1;\n"
"function f() { a = 2; }\n"
- "f();",
+ "f();\n",
"a = 1;\n"
"function f() { a = 2; }\n"
- "f();",
+ "f();\n",
"a = 1;\n"
"function f(b) {\n"
@@ -500,7 +545,7 @@ TEST(StoreGlobal) {
REPEAT_127(" b.name;\n")
" a = 2;\n"
"}\n"
- "f({name: 1});",
+ "f({name: 1});\n",
"a = 1;\n"
"function f(b) {\n"
@@ -509,10 +554,11 @@ TEST(StoreGlobal) {
REPEAT_127(" b.name;\n")
" a = 2;\n"
"}\n"
- "f({name: 1});",
+ "f({name: 1});\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("StoreGlobal.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("StoreGlobal.golden")));
}
TEST(CallGlobal) {
@@ -525,14 +571,15 @@ TEST(CallGlobal) {
const char* snippets[] = {
"function t() { }\n"
"function f() { return t(); }\n"
- "f();",
+ "f();\n",
"function t(a, b, c) { }\n"
"function f() { return t(1, 2, 3); }\n"
- "f();",
+ "f();\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("CallGlobal.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("CallGlobal.golden")));
}
TEST(CallRuntime) {
@@ -544,30 +591,26 @@ TEST(CallRuntime) {
const char* snippets[] = {
"function f() { %TheHole() }\n"
- "f();",
+ "f();\n",
"function f(a) { return %IsArray(a) }\n"
- "f(undefined);",
+ "f(undefined);\n",
"function f() { return %Add(1, 2) }\n"
- "f();",
+ "f();\n",
"function f() { return %spread_iterable([1]) }\n"
- "f();",
+ "f();\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("CallRuntime.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("CallRuntime.golden")));
}
TEST(IfConditions) {
- if (FLAG_harmony_instanceof) {
- // TODO(mvstanton): when ES6 instanceof ships, regenerate the bytecode
- // expectations and remove this flag check.
- return;
- }
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate(),
- ConstantPoolType::kNumber);
+ ConstantPoolType::kMixed);
printer.set_wrap(false);
printer.set_test_function_name("f");
@@ -579,7 +622,7 @@ TEST(IfConditions) {
" return -1;\n"
" }\n"
"};\n"
- "f();",
+ "f();\n",
"function f() {\n"
" if ('lucky') {\n"
@@ -588,7 +631,7 @@ TEST(IfConditions) {
" return -1;\n"
" }\n"
"};\n"
- "f();",
+ "f();\n",
"function f() {\n"
" if (false) {\n"
@@ -597,14 +640,14 @@ TEST(IfConditions) {
" return -1;\n"
" }\n"
"};\n"
- "f();",
+ "f();\n",
"function f() {\n"
" if (false) {\n"
" return 1;\n"
" }\n"
"};\n"
- "f();",
+ "f();\n",
"function f() {\n"
" var a = 1;\n"
@@ -614,7 +657,7 @@ TEST(IfConditions) {
" return 2;\n"
" }\n"
"};\n"
- "f();",
+ "f();\n",
"function f(a) {\n"
" if (a <= 0) {\n"
@@ -623,14 +666,14 @@ TEST(IfConditions) {
" return -200;\n"
" }\n"
"};\n"
- "f(99);",
+ "f(99);\n",
"function f(a, b) { if (a in b) { return 200; } }"
- "f('prop', { prop: 'yes'});",
+ "f('prop', { prop: 'yes'});\n",
"function f(z) { var a = 0; var b = 0; if (a === 0.01) {\n"
REPEAT_64(" b = a; a = b;\n")
- " return 200; } else { return -200; } } f(0.001);",
+ " return 200; } else { return -200; } } f(0.001);\n",
"function f() {\n"
" var a = 0; var b = 0;\n"
@@ -638,7 +681,7 @@ TEST(IfConditions) {
REPEAT_64(" b = a; a = b;\n")
" return 200; } else { return -200; }\n"
"};\n"
- "f();",
+ "f();\n",
"function f(a, b) {\n"
" if (a == b) { return 1; }\n"
@@ -651,7 +694,7 @@ TEST(IfConditions) {
" if (a instanceof b) { return 1; }\n"
" return 0;\n"
"}\n"
- "f(1, 1);",
+ "f(1, 1);\n",
"function f() {\n"
" var a = 0;\n"
@@ -661,10 +704,22 @@ TEST(IfConditions) {
" return -20;\n"
" }\n"
"};\n"
- "f();",
+ "f();\n",
+
+ "function f(a, b) {\n"
+ " if (a == b || a < 0) {\n"
+ " return 1;\n"
+ " } else if (a > 0 && b > 0) {\n"
+ " return 0;\n"
+ " } else {\n"
+ " return -1;\n"
+ " }\n"
+ "};\n"
+ "f(-1, 1);\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("IfConditions.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("IfConditions.golden")));
}
TEST(DeclareGlobals) {
@@ -677,18 +732,19 @@ TEST(DeclareGlobals) {
printer.set_top_level(true);
const char* snippets[] = {
- "var a = 1;",
+ "var a = 1;\n",
- "function f() {}",
+ "function f() {}\n",
"var a = 1;\n"
- "a=2;",
+ "a=2;\n",
"function f() {}\n"
- "f();",
+ "f();\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("DeclareGlobals.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("DeclareGlobals.golden")));
}
TEST(BreakableBlocks) {
@@ -703,7 +759,7 @@ TEST(BreakableBlocks) {
" break label;\n"
" x = x + 1;\n"
"}\n"
- "return x;",
+ "return x;\n",
"var sum = 0;\n"
"outer: {\n"
@@ -714,7 +770,7 @@ TEST(BreakableBlocks) {
" }\n"
" }\n"
"}\n"
- "return sum;",
+ "return sum;\n",
"outer: {\n"
" let y = 10;\n"
@@ -731,11 +787,11 @@ TEST(BreakableBlocks) {
" y = 3;\n"
" }\n"
"}\n"
- "x = 4;",
+ "x = 4;\n",
};
- CHECK_EQ(BuildActual(printer, snippets),
- LoadGolden("BreakableBlocks.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("BreakableBlocks.golden")));
}
TEST(BasicLoops) {
@@ -745,13 +801,13 @@ TEST(BasicLoops) {
const char* snippets[] = {
"var x = 0;\n"
"while (false) { x = 99; break; continue; }\n"
- "return x;",
+ "return x;\n",
"var x = 0;\n"
"while (false) {\n"
" x = x + 1;\n"
"};\n"
- "return x;",
+ "return x;\n",
"var x = 0;\n"
"var y = 1;\n"
@@ -761,7 +817,7 @@ TEST(BasicLoops) {
" if (x == 3) continue;\n"
" if (x == 4) break;\n"
"}\n"
- "return y;",
+ "return y;\n",
"var i = 0;\n"
"while (true) {\n"
@@ -772,7 +828,7 @@ TEST(BasicLoops) {
" if (i == 5) break;\n"
" i = i + 1;\n"
"}\n"
- "return i;",
+ "return i;\n",
"var i = 0;\n"
"while (true) {\n"
@@ -783,7 +839,7 @@ TEST(BasicLoops) {
" i = i + 1;\n"
" break;\n"
"}\n"
- "return i;",
+ "return i;\n",
"var x = 10;\n"
"var y = 1;\n"
@@ -791,7 +847,7 @@ TEST(BasicLoops) {
" y = y * 12;\n"
" x = x - 1;\n"
"}\n"
- "return y;",
+ "return y;\n",
"var x = 0; var y = 1;\n"
"do {\n"
@@ -800,7 +856,7 @@ TEST(BasicLoops) {
" if (x == 6) continue;\n"
" x = x + 1;\n"
"} while (x < 10);\n"
- "return y;",
+ "return y;\n",
"var x = 10;\n"
"var y = 1;\n"
@@ -808,7 +864,7 @@ TEST(BasicLoops) {
" y = y * 12;\n"
" x = x - 1;\n"
"} while (x);\n"
- "return y;",
+ "return y;\n",
"var x = 0; var y = 1;\n"
"do {\n"
@@ -817,7 +873,7 @@ TEST(BasicLoops) {
" x = x + 1;\n"
" if (x == 6) continue;\n"
"} while (false);\n"
- "return y;",
+ "return y;\n",
"var x = 0; var y = 1;\n"
"do {\n"
@@ -826,56 +882,56 @@ TEST(BasicLoops) {
" x = x + 1;\n"
" if (x == 6) continue;\n"
"} while (true);\n"
- "return y;",
+ "return y;\n",
"var x = 0;\n"
"for (;;) {\n"
" if (x == 1) break;\n"
" if (x == 2) continue;\n"
" x = x + 1;\n"
- "}",
+ "}\n",
"for (var x = 0;;) {\n"
" if (x == 1) break;\n"
" if (x == 2) continue;\n"
" x = x + 1;\n"
- "}",
+ "}\n",
"var x = 0;\n"
"for (;; x = x + 1) {\n"
" if (x == 1) break;\n"
" if (x == 2) continue;\n"
- "}",
+ "}\n",
"for (var x = 0;; x = x + 1) {\n"
" if (x == 1) break;\n"
" if (x == 2) continue;\n"
- "}",
+ "}\n",
"var u = 0;\n"
"for (var i = 0; i < 100; i = i + 1) {\n"
" u = u + 1;\n"
" continue;\n"
- "}",
+ "}\n",
"var y = 1;\n"
"for (var x = 10; x; --x) {\n"
" y = y * 12;\n"
"}\n"
- "return y;",
+ "return y;\n",
"var x = 0;\n"
"for (var i = 0; false; i++) {\n"
" x = x + 1;\n"
"};\n"
- "return x;",
+ "return x;\n",
"var x = 0;\n"
"for (var i = 0; true; ++i) {\n"
" x = x + 1;\n"
" if (x == 20) break;\n"
"};\n"
- "return x;",
+ "return x;\n",
"var a = 0;\n"
"while (a) {\n"
@@ -885,10 +941,11 @@ TEST(BasicLoops) {
" if (z) continue;\n"
" z++;\n"
" }\n"
- "}",
+ "}\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("BasicLoops.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("BasicLoops.golden")));
}
TEST(JumpsRequiringConstantWideOperands) {
@@ -904,11 +961,11 @@ TEST(JumpsRequiringConstantWideOperands) {
" if (i == 1) continue;\n"
" if (i == 2) break;\n"
"}\n"
- "return 3;",
+ "return 3;\n",
};
- CHECK_EQ(BuildActual(printer, snippets),
- LoadGolden("JumpsRequiringConstantWideOperands.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("JumpsRequiringConstantWideOperands.golden")));
}
TEST(UnaryOperators) {
@@ -920,32 +977,33 @@ TEST(UnaryOperators) {
"while (x != 10) {\n"
" x = x + 10;\n"
"}\n"
- "return x;",
+ "return x;\n",
"var x = false;\n"
"do {\n"
" x = !x;\n"
"} while(x == false);\n"
- "return x;",
+ "return x;\n",
"var x = 101;\n"
- "return void(x * 3);",
+ "return void(x * 3);\n",
"var x = 1234;\n"
"var y = void (x * x - 1);\n"
- "return y;",
+ "return y;\n",
"var x = 13;\n"
- "return ~x;",
+ "return ~x;\n",
"var x = 13;\n"
- "return +x;",
+ "return +x;\n",
"var x = 13;\n"
- "return -x;",
+ "return -x;\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("UnaryOperators.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("UnaryOperators.golden")));
}
TEST(Typeof) {
@@ -967,8 +1025,8 @@ TEST(Typeof) {
"};",
};
- CHECK_EQ(BuildActual(printer, snippets, "", "\nf();"),
- LoadGolden("Typeof.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets, "", "\nf();"),
+ LoadGolden("Typeof.golden")));
}
TEST(Delete) {
@@ -977,23 +1035,24 @@ TEST(Delete) {
ConstantPoolType::kMixed);
const char* snippets[] = {
- "var a = {x:13, y:14}; return delete a.x;",
+ "var a = {x:13, y:14}; return delete a.x;\n",
- "'use strict'; var a = {x:13, y:14}; return delete a.x;",
+ "'use strict'; var a = {x:13, y:14}; return delete a.x;\n",
- "var a = {1:13, 2:14}; return delete a[2];",
+ "var a = {1:13, 2:14}; return delete a[2];\n",
- "var a = 10; return delete a;",
+ "var a = 10; return delete a;\n",
"'use strict';\n"
"var a = {1:10};\n"
"(function f1() {return a;});\n"
- "return delete a[1];",
+ "return delete a[1];\n",
- "return delete 'test';",
+ "return delete 'test';\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("Delete.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("Delete.golden")));
}
TEST(GlobalDelete) {
@@ -1008,29 +1067,30 @@ TEST(GlobalDelete) {
"function f() {\n"
" return delete a.x;\n"
"};\n"
- "f();",
+ "f();\n",
"a = {1:13, 2:14};\n"
"function f() {\n"
" 'use strict';\n"
" return delete a[1];\n"
"};\n"
- "f();",
+ "f();\n",
"var a = {x:13, y:14};\n"
"function f() {\n"
" return delete a;\n"
"};\n"
- "f();",
+ "f();\n",
"b = 30;\n"
"function f() {\n"
" return delete b;\n"
"};\n"
- "f();",
+ "f();\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("GlobalDelete.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("GlobalDelete.golden")));
}
TEST(FunctionLiterals) {
@@ -1039,15 +1099,15 @@ TEST(FunctionLiterals) {
ConstantPoolType::kMixed);
const char* snippets[] = {
- "return function(){ }",
+ "return function(){ }\n",
- "return (function(){ })()",
+ "return (function(){ })()\n",
- "return (function(x){ return x; })(1)",
+ "return (function(x){ return x; })(1)\n",
};
- CHECK_EQ(BuildActual(printer, snippets),
- LoadGolden("FunctionLiterals.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("FunctionLiterals.golden")));
}
TEST(RegExpLiterals) {
@@ -1056,14 +1116,15 @@ TEST(RegExpLiterals) {
ConstantPoolType::kString);
const char* snippets[] = {
- "return /ab+d/;",
+ "return /ab+d/;\n",
- "return /(\\w+)\\s(\\w+)/i;",
+ "return /(\\w+)\\s(\\w+)/i;\n",
- "return /ab+d/.exec('abdd');",
+ "return /ab+d/.exec('abdd');\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("RegExpLiterals.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("RegExpLiterals.golden")));
}
TEST(RegExpLiteralsWide) {
@@ -1074,11 +1135,11 @@ TEST(RegExpLiteralsWide) {
const char* snippets[] = {
"var a;" //
REPEAT_256("\na = 1.23;") //
- "\nreturn /ab+d/;",
+ "\nreturn /ab+d/;\n",
};
- CHECK_EQ(BuildActual(printer, snippets),
- LoadGolden("RegExpLiteralsWide.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("RegExpLiteralsWide.golden")));
}
TEST(ArrayLiterals) {
@@ -1087,16 +1148,17 @@ TEST(ArrayLiterals) {
ConstantPoolType::kMixed);
const char* snippets[] = {
- "return [ 1, 2 ];",
+ "return [ 1, 2 ];\n",
- "var a = 1; return [ a, a + 1 ];",
+ "var a = 1; return [ a, a + 1 ];\n",
- "return [ [ 1, 2 ], [ 3 ] ];",
+ "return [ [ 1, 2 ], [ 3 ] ];\n",
- "var a = 1; return [ [ a, 2 ], [ a + 2 ] ];",
+ "var a = 1; return [ [ a, 2 ], [ a + 2 ] ];\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("ArrayLiterals.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("ArrayLiterals.golden")));
}
TEST(ArrayLiteralsWide) {
@@ -1107,11 +1169,11 @@ TEST(ArrayLiteralsWide) {
const char* snippets[] = {
"var a;" //
REPEAT_256("\na = 1.23;") //
- "\nreturn [ 1 , 2 ];",
+ "\nreturn [ 1 , 2 ];\n",
};
- CHECK_EQ(BuildActual(printer, snippets),
- LoadGolden("ArrayLiteralsWide.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("ArrayLiteralsWide.golden")));
}
TEST(ObjectLiterals) {
@@ -1120,38 +1182,39 @@ TEST(ObjectLiterals) {
ConstantPoolType::kMixed);
const char* snippets[] = {
- "return { };",
+ "return { };\n",
- "return { name: 'string', val: 9.2 };",
+ "return { name: 'string', val: 9.2 };\n",
- "var a = 1; return { name: 'string', val: a };",
+ "var a = 1; return { name: 'string', val: a };\n",
- "var a = 1; return { val: a, val: a + 1 };",
+ "var a = 1; return { val: a, val: a + 1 };\n",
- "return { func: function() { } };",
+ "return { func: function() { } };\n",
- "return { func(a) { return a; } };",
+ "return { func(a) { return a; } };\n",
- "return { get a() { return 2; } };",
+ "return { get a() { return 2; } };\n",
- "return { get a() { return this.x; }, set a(val) { this.x = val } };",
+ "return { get a() { return this.x; }, set a(val) { this.x = val } };\n",
- "return { set b(val) { this.y = val } };",
+ "return { set b(val) { this.y = val } };\n",
- "var a = 1; return { 1: a };",
+ "var a = 1; return { 1: a };\n",
- "return { __proto__: null };",
+ "return { __proto__: null };\n",
- "var a = 'test'; return { [a]: 1 };",
+ "var a = 'test'; return { [a]: 1 };\n",
- "var a = 'test'; return { val: a, [a]: 1 };",
+ "var a = 'test'; return { val: a, [a]: 1 };\n",
- "var a = 'test'; return { [a]: 1, __proto__: {} };",
+ "var a = 'test'; return { [a]: 1, __proto__: {} };\n",
- "var n = 'name'; return { [n]: 'val', get a() { }, set a(b) {} };",
+ "var n = 'name'; return { [n]: 'val', get a() { }, set a(b) {} };\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("ObjectLiterals.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("ObjectLiterals.golden")));
}
TEST(ObjectLiteralsWide) {
@@ -1161,11 +1224,11 @@ TEST(ObjectLiteralsWide) {
const char* snippets[] = {
"var a;" //
REPEAT_256("\na = 1.23;") //
- "\nreturn { name: 'string', val: 9.2 };",
+ "\nreturn { name: 'string', val: 9.2 };\n",
};
- CHECK_EQ(BuildActual(printer, snippets),
- LoadGolden("ObjectLiteralsWide.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("ObjectLiteralsWide.golden")));
}
TEST(TopLevelObjectLiterals) {
@@ -1178,11 +1241,11 @@ TEST(TopLevelObjectLiterals) {
printer.set_top_level(true);
const char* snippets[] = {
- "var a = { func: function() { } };",
+ "var a = { func: function() { } };\n",
};
- CHECK_EQ(BuildActual(printer, snippets),
- LoadGolden("TopLevelObjectLiterals.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("TopLevelObjectLiterals.golden")));
}
TEST(TryCatch) {
@@ -1191,14 +1254,15 @@ TEST(TryCatch) {
ConstantPoolType::kString);
const char* snippets[] = {
- "try { return 1; } catch(e) { return 2; }",
+ "try { return 1; } catch(e) { return 2; }\n",
"var a;\n"
"try { a = 1 } catch(e1) {};\n"
- "try { a = 2 } catch(e2) { a = 3 }",
+ "try { a = 2 } catch(e2) { a = 3 }\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("TryCatch.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("TryCatch.golden")));
}
TEST(TryFinally) {
@@ -1207,17 +1271,18 @@ TEST(TryFinally) {
ConstantPoolType::kString);
const char* snippets[] = {
"var a = 1;\n"
- "try { a = 2; } finally { a = 3; }",
+ "try { a = 2; } finally { a = 3; }\n",
"var a = 1;\n"
- "try { a = 2; } catch(e) { a = 20 } finally { a = 3; }",
+ "try { a = 2; } catch(e) { a = 20 } finally { a = 3; }\n",
"var a; try {\n"
" try { a = 1 } catch(e) { a = 2 }\n"
- "} catch(e) { a = 20 } finally { a = 3; }",
+ "} catch(e) { a = 20 } finally { a = 3; }\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("TryFinally.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("TryFinally.golden")));
}
TEST(Throw) {
@@ -1225,14 +1290,15 @@ TEST(Throw) {
BytecodeExpectationsPrinter printer(CcTest::isolate(),
ConstantPoolType::kString);
const char* snippets[] = {
- "throw 1;",
+ "throw 1;\n",
- "throw 'Error';",
+ "throw 'Error';\n",
- "var a = 1; if (a) { throw 'Error'; };",
+ "var a = 1; if (a) { throw 'Error'; };\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("Throw.golden"));
+ CHECK(
+ CompareTexts(BuildActual(printer, snippets), LoadGolden("Throw.golden")));
}
TEST(CallNew) {
@@ -1245,11 +1311,11 @@ TEST(CallNew) {
const char* snippets[] = {
"function bar() { this.value = 0; }\n"
"function f() { return new bar(); }\n"
- "f();",
+ "f();\n",
"function bar(x) { this.value = 18; this.x = x;}\n"
"function f() { return new bar(3); }\n"
- "f();",
+ "f();\n",
"function bar(w, x, y, z) {\n"
" this.value = 18;\n"
@@ -1258,10 +1324,11 @@ TEST(CallNew) {
" this.z = z;\n"
"}\n"
"function f() { return new bar(3, 4, 5); }\n"
- "f();",
+ "f();\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("CallNew.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("CallNew.golden")));
}
TEST(ContextVariables) {
@@ -1274,27 +1341,27 @@ TEST(ContextVariables) {
BytecodeExpectationsPrinter printer(CcTest::isolate(),
ConstantPoolType::kMixed);
const char* snippets[] = {
- "var a; return function() { a = 1; };",
+ "var a; return function() { a = 1; };\n",
- "var a = 1; return function() { a = 2; };",
+ "var a = 1; return function() { a = 2; };\n",
- "var a = 1; var b = 2; return function() { a = 2; b = 3 };",
+ "var a = 1; var b = 2; return function() { a = 2; b = 3 };\n",
- "var a; (function() { a = 2; })(); return a;",
+ "var a; (function() { a = 2; })(); return a;\n",
"'use strict';\n"
"let a = 1;\n"
- "{ let b = 2; return function() { a + b; }; }",
+ "{ let b = 2; return function() { a + b; }; }\n",
"'use strict';\n"
REPEAT_249_UNIQUE_VARS()
"eval();\n"
"var b = 100;\n"
- "return b",
+ "return b\n",
};
- CHECK_EQ(BuildActual(printer, snippets),
- LoadGolden("ContextVariables.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("ContextVariables.golden")));
}
TEST(ContextParameters) {
@@ -1314,8 +1381,8 @@ TEST(ContextParameters) {
"function f() { var self = this; return function() { self = 2; }; }",
};
- CHECK_EQ(BuildActual(printer, snippets, "", "\nf();"),
- LoadGolden("ContextParameters.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets, "", "\nf();"),
+ LoadGolden("ContextParameters.golden")));
}
TEST(OuterContextVariables) {
@@ -1345,8 +1412,8 @@ TEST(OuterContextVariables) {
"var f = new Outer().getInnerFunc();",
};
- CHECK_EQ(BuildActual(printer, snippets, "", "\nf();"),
- LoadGolden("OuterContextVariables.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets, "", "\nf();"),
+ LoadGolden("OuterContextVariables.golden")));
}
TEST(CountOperators) {
@@ -1354,30 +1421,31 @@ TEST(CountOperators) {
BytecodeExpectationsPrinter printer(CcTest::isolate(),
ConstantPoolType::kMixed);
const char* snippets[] = {
- "var a = 1; return ++a;",
+ "var a = 1; return ++a;\n",
- "var a = 1; return a++;",
+ "var a = 1; return a++;\n",
- "var a = 1; return --a;",
+ "var a = 1; return --a;\n",
- "var a = 1; return a--;",
+ "var a = 1; return a--;\n",
- "var a = { val: 1 }; return a.val++;",
+ "var a = { val: 1 }; return a.val++;\n",
- "var a = { val: 1 }; return --a.val;",
+ "var a = { val: 1 }; return --a.val;\n",
- "var name = 'var'; var a = { val: 1 }; return a[name]--;",
+ "var name = 'var'; var a = { val: 1 }; return a[name]--;\n",
- "var name = 'var'; var a = { val: 1 }; return ++a[name];",
+ "var name = 'var'; var a = { val: 1 }; return ++a[name];\n",
- "var a = 1; var b = function() { return a }; return ++a;",
+ "var a = 1; var b = function() { return a }; return ++a;\n",
- "var a = 1; var b = function() { return a }; return a--;",
+ "var a = 1; var b = function() { return a }; return a--;\n",
- "var idx = 1; var a = [1, 2]; return a[idx++] = 2;",
+ "var idx = 1; var a = [1, 2]; return a[idx++] = 2;\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("CountOperators.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("CountOperators.golden")));
}
TEST(GlobalCountOperators) {
@@ -1390,23 +1458,23 @@ TEST(GlobalCountOperators) {
const char* snippets[] = {
"var global = 1;\n"
"function f() { return ++global; }\n"
- "f();",
+ "f();\n",
"var global = 1;\n"
"function f() { return global--; }\n"
- "f();",
+ "f();\n",
"unallocated = 1;\n"
"function f() { 'use strict'; return --unallocated; }\n"
- "f();",
+ "f();\n",
"unallocated = 1;\n"
"function f() { return unallocated++; }\n"
- "f();",
+ "f();\n",
};
- CHECK_EQ(BuildActual(printer, snippets),
- LoadGolden("GlobalCountOperators.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("GlobalCountOperators.golden")));
}
TEST(CompoundExpressions) {
@@ -1414,19 +1482,19 @@ TEST(CompoundExpressions) {
BytecodeExpectationsPrinter printer(CcTest::isolate(),
ConstantPoolType::kMixed);
const char* snippets[] = {
- "var a = 1; a += 2;",
+ "var a = 1; a += 2;\n",
- "var a = 1; a /= 2;",
+ "var a = 1; a /= 2;\n",
- "var a = { val: 2 }; a.name *= 2;",
+ "var a = { val: 2 }; a.name *= 2;\n",
- "var a = { 1: 2 }; a[1] ^= 2;",
+ "var a = { 1: 2 }; a[1] ^= 2;\n",
- "var a = 1; (function f() { return a; }); a |= 24;",
+ "var a = 1; (function f() { return a; }); a |= 24;\n",
};
- CHECK_EQ(BuildActual(printer, snippets),
- LoadGolden("CompoundExpressions.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("CompoundExpressions.golden")));
}
TEST(GlobalCompoundExpressions) {
@@ -1439,15 +1507,15 @@ TEST(GlobalCompoundExpressions) {
const char* snippets[] = {
"var global = 1;\n"
"function f() { return global &= 1; }\n"
- "f();",
+ "f();\n",
"unallocated = 1;\n"
"function f() { return unallocated += 1; }\n"
- "f();",
+ "f();\n",
};
- CHECK_EQ(BuildActual(printer, snippets),
- LoadGolden("GlobalCompoundExpressions.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("GlobalCompoundExpressions.golden")));
}
TEST(CreateArguments) {
@@ -1471,8 +1539,8 @@ TEST(CreateArguments) {
"function f(a, b, c) { 'use strict'; return arguments; }",
};
- CHECK_EQ(BuildActual(printer, snippets, "", "\nf();"),
- LoadGolden("CreateArguments.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets, "", "\nf();"),
+ LoadGolden("CreateArguments.golden")));
}
TEST(CreateRestParameter) {
@@ -1492,8 +1560,8 @@ TEST(CreateRestParameter) {
"function f(a, ...restArgs) { return restArgs[0] + arguments[0]; }",
};
- CHECK_EQ(BuildActual(printer, snippets, "", "\nf();"),
- LoadGolden("CreateRestParameter.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets, "", "\nf();"),
+ LoadGolden("CreateRestParameter.golden")));
}
TEST(ForIn) {
@@ -1501,29 +1569,30 @@ TEST(ForIn) {
BytecodeExpectationsPrinter printer(CcTest::isolate(),
ConstantPoolType::kMixed);
const char* snippets[] = {
- "for (var p in null) {}",
+ "for (var p in null) {}\n",
- "for (var p in undefined) {}",
+ "for (var p in undefined) {}\n",
- "for (var p in undefined) {}",
+ "for (var p in undefined) {}\n",
"var x = 'potatoes';\n"
- "for (var p in x) { return p; }",
+ "for (var p in x) { return p; }\n",
"var x = 0;\n"
- "for (var p in [1,2,3]) { x += p; }",
+ "for (var p in [1,2,3]) { x += p; }\n",
"var x = { 'a': 1, 'b': 2 };\n"
"for (x['a'] in [10, 20, 30]) {\n"
" if (x['a'] == 10) continue;\n"
" if (x['a'] == 20) break;\n"
- "}",
+ "}\n",
"var x = [ 10, 11, 12 ] ;\n"
- "for (x[0] in [1,2,3]) { return x[3]; }",
+ "for (x[0] in [1,2,3]) { return x[3]; }\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("ForIn.golden"));
+ CHECK(
+ CompareTexts(BuildActual(printer, snippets), LoadGolden("ForIn.golden")));
}
TEST(ForOf) {
@@ -1531,21 +1600,22 @@ TEST(ForOf) {
BytecodeExpectationsPrinter printer(CcTest::isolate(),
ConstantPoolType::kMixed);
const char* snippets[] = {
- "for (var p of [0, 1, 2]) {}",
+ "for (var p of [0, 1, 2]) {}\n",
"var x = 'potatoes';\n"
- "for (var p of x) { return p; }",
+ "for (var p of x) { return p; }\n",
"for (var x of [10, 20, 30]) {\n"
" if (x == 10) continue;\n"
" if (x == 20) break;\n"
- "}",
+ "}\n",
"var x = { 'a': 1, 'b': 2 };\n"
- "for (x['a'] of [1,2,3]) { return x['a']; }",
+ "for (x['a'] of [1,2,3]) { return x['a']; }\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("ForOf.golden"));
+ CHECK(
+ CompareTexts(BuildActual(printer, snippets), LoadGolden("ForOf.golden")));
}
TEST(Conditional) {
@@ -1553,12 +1623,18 @@ TEST(Conditional) {
BytecodeExpectationsPrinter printer(CcTest::isolate(),
ConstantPoolType::kNumber);
const char* snippets[] = {
- "return 1 ? 2 : 3;",
+ "return 1 ? 2 : 3;\n",
+
+ "return 1 ? 2 ? 3 : 4 : 5;\n",
- "return 1 ? 2 ? 3 : 4 : 5;",
+ "return 0 < 1 ? 2 : 3;\n",
+
+ "var x = 0;\n"
+ "return x ? 2 : 3;\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("Conditional.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("Conditional.golden")));
}
TEST(Switch) {
@@ -1570,39 +1646,39 @@ TEST(Switch) {
"switch(a) {\n"
" case 1: return 2;\n"
" case 2: return 3;\n"
- "}",
+ "}\n",
"var a = 1;\n"
"switch(a) {\n"
" case 1: a = 2; break;\n"
" case 2: a = 3; break;\n"
- "}",
+ "}\n",
"var a = 1;\n"
"switch(a) {\n"
" case 1: a = 2; // fall-through\n"
" case 2: a = 3; break;\n"
- "}",
+ "}\n",
"var a = 1;\n"
"switch(a) {\n"
" case 2: break;\n"
" case 3: break;\n"
" default: a = 1; break;\n"
- "}",
+ "}\n",
"var a = 1;\n"
"switch(typeof(a)) {\n"
" case 2: a = 1; break;\n"
" case 3: a = 2; break;\n"
" default: a = 3; break;\n"
- "}",
+ "}\n",
"var a = 1;\n"
"switch(a) {\n"
" case typeof(a): a = 1; break;\n"
" default: a = 2; break;\n"
- "}",
+ "}\n",
"var a = 1;\n"
"switch(a) {\n"
@@ -1612,7 +1688,7 @@ TEST(Switch) {
" case 2:\n"
" a = 3;\n"
" break;\n"
- "}",
+ "}\n",
"var a = 1;\n"
"switch(a) {\n"
@@ -1622,10 +1698,11 @@ TEST(Switch) {
" default : a = 2; break;\n"
" } // fall-through\n"
" case 2: a = 3;\n"
- "}",
+ "}\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("Switch.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("Switch.golden")));
}
TEST(BasicBlockToBoolean) {
@@ -1633,15 +1710,15 @@ TEST(BasicBlockToBoolean) {
BytecodeExpectationsPrinter printer(CcTest::isolate(),
ConstantPoolType::kNumber);
const char* snippets[] = {
- "var a = 1; if (a || a < 0) { return 1; }",
+ "var a = 1; if (a || a < 0) { return 1; }\n",
- "var a = 1; if (a && a < 0) { return 1; }",
+ "var a = 1; if (a && a < 0) { return 1; }\n",
- "var a = 1; a = (a || a < 0) ? 2 : 3;",
+ "var a = 1; a = (a || a < 0) ? 2 : 3;\n",
};
- CHECK_EQ(BuildActual(printer, snippets),
- LoadGolden("BasicBlockToBoolean.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("BasicBlockToBoolean.golden")));
}
TEST(DeadCodeRemoval) {
@@ -1649,17 +1726,17 @@ TEST(DeadCodeRemoval) {
BytecodeExpectationsPrinter printer(CcTest::isolate(),
ConstantPoolType::kNumber);
const char* snippets[] = {
- "return; var a = 1; a();",
+ "return; var a = 1; a();\n",
- "if (false) { return; }; var a = 1;",
+ "if (false) { return; }; var a = 1;\n",
- "if (true) { return 1; } else { return 2; };",
+ "if (true) { return 1; } else { return 2; };\n",
- "var a = 1; if (a) { return 1; }; return 2;",
+ "var a = 1; if (a) { return 1; }; return 2;\n",
};
- CHECK_EQ(BuildActual(printer, snippets),
- LoadGolden("DeadCodeRemoval.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("DeadCodeRemoval.golden")));
}
TEST(ThisFunction) {
@@ -1677,8 +1754,8 @@ TEST(ThisFunction) {
"f = function f() { return f; };",
};
- CHECK_EQ(BuildActual(printer, snippets, "", "\nf();"),
- LoadGolden("ThisFunction.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets, "", "\nf();"),
+ LoadGolden("ThisFunction.golden")));
}
TEST(NewTarget) {
@@ -1687,12 +1764,13 @@ TEST(NewTarget) {
ConstantPoolType::kMixed);
const char* snippets[] = {
- "return new.target;",
+ "return new.target;\n",
- "new.target;",
+ "new.target;\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("NewTarget.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("NewTarget.golden")));
}
TEST(RemoveRedundantLdar) {
@@ -1705,22 +1783,22 @@ TEST(RemoveRedundantLdar) {
" ld_a = ld_a + ld_a;\n" // in a different basicblock.
" if (ld_a > 10) break;\n"
"}\n"
- "return ld_a;",
+ "return ld_a;\n",
"var ld_a = 1;\n"
"do {\n"
" ld_a = ld_a + ld_a;\n"
" if (ld_a > 10) continue;\n"
"} while(false);\n"
- "return ld_a;",
+ "return ld_a;\n",
"var ld_a = 1;\n"
" ld_a = ld_a + ld_a;\n"
- " return ld_a;",
+ " return ld_a;\n",
};
- CHECK_EQ(BuildActual(printer, snippets),
- LoadGolden("RemoveRedundantLdar.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("RemoveRedundantLdar.golden")));
}
TEST(AssignmentsInBinaryExpression) {
@@ -1729,28 +1807,28 @@ TEST(AssignmentsInBinaryExpression) {
ConstantPoolType::kString);
const char* snippets[] = {
"var x = 0, y = 1;\n"
- "return (x = 2, y = 3, x = 4, y = 5);",
+ "return (x = 2, y = 3, x = 4, y = 5);\n",
"var x = 55;\n"
"var y = (x = 100);\n"
- "return y;",
+ "return y;\n",
"var x = 55;\n"
"x = x + (x = 100) + (x = 101);\n"
- "return x;",
+ "return x;\n",
"var x = 55;\n"
"x = (x = 56) - x + (x = 57);\n"
"x++;\n"
- "return x;",
+ "return x;\n",
"var x = 55;\n"
"var y = x + (x = 1) + (x = 2) + (x = 3);\n"
- "return y;",
+ "return y;\n",
"var x = 55;\n"
"var x = x + (x = 1) + (x = 2) + (x = 3);\n"
- "return x;",
+ "return x;\n",
"var x = 10, y = 20;\n"
"return x + (x = 1) + (x + 1) * (y = 2) + (y = 3) + (x = 4) + (y = 5) + "
@@ -1760,8 +1838,8 @@ TEST(AssignmentsInBinaryExpression) {
"return 1 + x + (x++) + (++x);\n",
};
- CHECK_EQ(BuildActual(printer, snippets),
- LoadGolden("AssignmentsInBinaryExpression.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("AssignmentsInBinaryExpression.golden")));
}
TEST(Eval) {
@@ -1769,10 +1847,11 @@ TEST(Eval) {
BytecodeExpectationsPrinter printer(CcTest::isolate(),
ConstantPoolType::kString);
const char* snippets[] = {
- "return eval('1;');",
+ "return eval('1;');\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("Eval.golden"));
+ CHECK(
+ CompareTexts(BuildActual(printer, snippets), LoadGolden("Eval.golden")));
}
TEST(LookupSlot) {
@@ -1781,14 +1860,15 @@ TEST(LookupSlot) {
ConstantPoolType::kString);
const char* snippets[] = {
- "eval('var x = 10;'); return x;",
+ "eval('var x = 10;'); return x;\n",
- "eval('var x = 10;'); return typeof x;",
+ "eval('var x = 10;'); return typeof x;\n",
- "x = 20; return eval('');",
+ "x = 20; return eval('');\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("LookupSlot.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("LookupSlot.golden")));
}
TEST(CallLookupSlot) {
@@ -1796,10 +1876,11 @@ TEST(CallLookupSlot) {
BytecodeExpectationsPrinter printer(CcTest::isolate(),
ConstantPoolType::kMixed);
const char* snippets[] = {
- "g = function(){}; eval(''); return g();",
+ "g = function(){}; eval(''); return g();\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("CallLookupSlot.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("CallLookupSlot.golden")));
}
// TODO(mythria): tests for variable/function declaration in lookup slots.
@@ -1831,7 +1912,7 @@ TEST(LookupSlotInEval) {
"}\n"
"f1();");
- CHECK_EQ(actual, LoadGolden("LookupSlotInEval.golden"));
+ CHECK(CompareTexts(actual, LoadGolden("LookupSlotInEval.golden")));
}
TEST(LookupSlotWideInEval) {
@@ -1867,7 +1948,7 @@ TEST(LookupSlotWideInEval) {
"}\n"
"f1();");
- CHECK_EQ(actual, LoadGolden("LookupSlotWideInEval.golden"));
+ CHECK(CompareTexts(actual, LoadGolden("LookupSlotWideInEval.golden")));
}
TEST(DeleteLookupSlotInEval) {
@@ -1897,7 +1978,7 @@ TEST(DeleteLookupSlotInEval) {
"}\n"
"f1();");
- CHECK_EQ(actual, LoadGolden("DeleteLookupSlotInEval.golden"));
+ CHECK(CompareTexts(actual, LoadGolden("DeleteLookupSlotInEval.golden")));
}
TEST(WideRegisters) {
@@ -1913,41 +1994,41 @@ TEST(WideRegisters) {
ConstantPoolType::kNumber);
const char* snippets[] = {
"x0 = x127;\n"
- "return x0;",
+ "return x0;\n",
"x127 = x126;\n"
- "return x127;",
+ "return x127;\n",
"if (x2 > 3) { return x129; }\n"
- "return x128;",
+ "return x128;\n",
"var x0 = 0;\n"
"if (x129 == 3) { var x129 = x0; }\n"
"if (x2 > 3) { return x0; }\n"
- "return x129;",
+ "return x129;\n",
"var x0 = 0;\n"
"var x1 = 0;\n"
"for (x128 = 0; x128 < 64; x128++) {"
" x1 += x128;"
"}"
- "return x128;",
+ "return x128;\n",
"var x0 = 1234;\n"
"var x1 = 0;\n"
"for (x128 in x0) {"
" x1 += x128;"
"}"
- "return x1;",
+ "return x1;\n",
"x0 = %Add(x64, x63);\n"
"x1 = %Add(x27, x143);\n"
"%TheHole();\n"
- "return x1;",
+ "return x1;\n",
};
- CHECK_EQ(BuildActual(printer, snippets, prologue.c_str()),
- LoadGolden("WideRegisters.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets, prologue.c_str()),
+ LoadGolden("WideRegisters.golden")));
}
TEST(ConstVariable) {
@@ -1955,16 +2036,17 @@ TEST(ConstVariable) {
BytecodeExpectationsPrinter printer(CcTest::isolate(),
ConstantPoolType::kString);
const char* snippets[] = {
- "const x = 10;",
+ "const x = 10;\n",
- "const x = 10; return x;",
+ "const x = 10; return x;\n",
- "const x = ( x = 20);",
+ "const x = ( x = 20);\n",
- "const x = 10; x = 20;",
+ "const x = 10; x = 20;\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("ConstVariable.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("ConstVariable.golden")));
}
TEST(LetVariable) {
@@ -1972,16 +2054,17 @@ TEST(LetVariable) {
BytecodeExpectationsPrinter printer(CcTest::isolate(),
ConstantPoolType::kString);
const char* snippets[] = {
- "let x = 10;",
+ "let x = 10;\n",
- "let x = 10; return x;",
+ "let x = 10; return x;\n",
- "let x = (x = 20);",
+ "let x = (x = 20);\n",
- "let x = 10; x = 20;",
+ "let x = 10; x = 20;\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("LetVariable.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("LetVariable.golden")));
}
TEST(ConstVariableContextSlot) {
@@ -1991,17 +2074,17 @@ TEST(ConstVariableContextSlot) {
BytecodeExpectationsPrinter printer(CcTest::isolate(),
ConstantPoolType::kMixed);
const char* snippets[] = {
- "const x = 10; function f1() {return x;}",
+ "const x = 10; function f1() {return x;}\n",
- "const x = 10; function f1() {return x;} return x;",
+ "const x = 10; function f1() {return x;} return x;\n",
- "const x = (x = 20); function f1() {return x;}",
+ "const x = (x = 20); function f1() {return x;}\n",
- "const x = 10; x = 20; function f1() {return x;}",
+ "const x = 10; x = 20; function f1() {return x;}\n",
};
- CHECK_EQ(BuildActual(printer, snippets),
- LoadGolden("ConstVariableContextSlot.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("ConstVariableContextSlot.golden")));
}
TEST(LetVariableContextSlot) {
@@ -2009,17 +2092,17 @@ TEST(LetVariableContextSlot) {
BytecodeExpectationsPrinter printer(CcTest::isolate(),
ConstantPoolType::kMixed);
const char* snippets[] = {
- "let x = 10; function f1() {return x;}",
+ "let x = 10; function f1() {return x;}\n",
- "let x = 10; function f1() {return x;} return x;",
+ "let x = 10; function f1() {return x;} return x;\n",
- "let x = (x = 20); function f1() {return x;}",
+ "let x = (x = 20); function f1() {return x;}\n",
- "let x = 10; x = 20; function f1() {return x;}",
+ "let x = 10; x = 20; function f1() {return x;}\n",
};
- CHECK_EQ(BuildActual(printer, snippets),
- LoadGolden("LetVariableContextSlot.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("LetVariableContextSlot.golden")));
}
TEST(DoExpression) {
@@ -2030,14 +2113,15 @@ TEST(DoExpression) {
BytecodeExpectationsPrinter printer(CcTest::isolate(),
ConstantPoolType::kString);
const char* snippets[] = {
- "var a = do { }; return a;",
+ "var a = do { }; return a;\n",
- "var a = do { var x = 100; }; return a;",
+ "var a = do { var x = 100; }; return a;\n",
- "while(true) { var a = 10; a = do { ++a; break; }; a = 20; }",
+ "while(true) { var a = 10; a = do { ++a; break; }; a = 20; }\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("DoExpression.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("DoExpression.golden")));
FLAG_harmony_do_expressions = old_flag;
}
@@ -2047,10 +2131,11 @@ TEST(WithStatement) {
BytecodeExpectationsPrinter printer(CcTest::isolate(),
ConstantPoolType::kMixed);
const char* snippets[] = {
- "with ({x:42}) { return x; }",
+ "with ({x:42}) { return x; }\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("WithStatement.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("WithStatement.golden")));
}
TEST(DoDebugger) {
@@ -2058,10 +2143,11 @@ TEST(DoDebugger) {
BytecodeExpectationsPrinter printer(CcTest::isolate(),
ConstantPoolType::kString);
const char* snippets[] = {
- "debugger;",
+ "debugger;\n",
};
- CHECK_EQ(BuildActual(printer, snippets), LoadGolden("DoDebugger.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("DoDebugger.golden")));
}
TEST(ClassDeclarations) {
@@ -2072,27 +2158,27 @@ TEST(ClassDeclarations) {
"class Person {\n"
" constructor(name) { this.name = name; }\n"
" speak() { console.log(this.name + ' is speaking.'); }\n"
- "}",
+ "}\n",
"class person {\n"
" constructor(name) { this.name = name; }\n"
" speak() { console.log(this.name + ' is speaking.'); }\n"
- "}",
+ "}\n",
"var n0 = 'a';\n"
"var n1 = 'b';\n"
"class N {\n"
" [n0]() { return n0; }\n"
" static [n1]() { return n1; }\n"
- "}",
+ "}\n",
"var count = 0;\n"
"class C { constructor() { count++; }}\n"
"return new C();\n",
};
- CHECK_EQ(BuildActual(printer, snippets),
- LoadGolden("ClassDeclarations.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("ClassDeclarations.golden")));
}
TEST(ClassAndSuperClass) {
@@ -2150,8 +2236,30 @@ TEST(ClassAndSuperClass) {
"})();\n",
};
- CHECK_EQ(BuildActual(printer, snippets),
- LoadGolden("ClassAndSuperClass.golden"));
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("ClassAndSuperClass.golden")));
+}
+
+TEST(Generators) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate(),
+ ConstantPoolType::kMixed);
+ printer.set_wrap(false);
+ printer.set_test_function_name("f");
+
+ const char* snippets[] = {
+ "function* f() { }\n"
+ "f();\n",
+
+ "function* f() { yield 42 }\n"
+ "f();\n",
+
+ "function* f() { for (let x of [42]) yield x }\n"
+ "f();\n",
+ };
+
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("Generators.golden")));
}
} // namespace interpreter
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc b/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
index e4cf809ad7..9591e2810e 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
@@ -28,8 +28,8 @@ class InvokeIntrinsicHelper {
BytecodeArrayBuilder builder(isolate_, zone_, sizeof...(args), 0, 0);
builder.CallRuntime(function_id_, builder.Parameter(0), sizeof...(args))
.Return();
- InterpreterTester tester(isolate_, builder.ToBytecodeArray());
- auto callable = tester.GetCallable<Handle<Object>>();
+ InterpreterTester tester(isolate_, builder.ToBytecodeArray(isolate_));
+ auto callable = tester.GetCallable<A...>();
return callable(args...).ToHandleChecked();
}
@@ -91,6 +91,203 @@ TEST(IsArray) {
CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.NewObject("42")));
}
+TEST(IsJSProxy) {
+ HandleAndZoneScope handles;
+
+ InvokeIntrinsicHelper helper(handles.main_isolate(), handles.main_zone(),
+ Runtime::kInlineIsJSProxy);
+ Factory* factory = handles.main_isolate()->factory();
+
+ CHECK_EQ(*factory->false_value(),
+ *helper.Invoke(helper.NewObject("new Date()")));
+ CHECK_EQ(*factory->false_value(),
+ *helper.Invoke(helper.NewObject("(function() {})")));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.NewObject("([1])")));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.NewObject("({})")));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.NewObject("(/x/)")));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.Undefined()));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.Null()));
+ CHECK_EQ(*factory->false_value(),
+ *helper.Invoke(helper.NewObject("'string'")));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.NewObject("42")));
+ CHECK_EQ(*factory->true_value(),
+ *helper.Invoke(helper.NewObject("new Proxy({},{})")));
+}
+
+TEST(IsRegExp) {
+ HandleAndZoneScope handles;
+
+ InvokeIntrinsicHelper helper(handles.main_isolate(), handles.main_zone(),
+ Runtime::kInlineIsRegExp);
+ Factory* factory = handles.main_isolate()->factory();
+
+ CHECK_EQ(*factory->false_value(),
+ *helper.Invoke(helper.NewObject("new Date()")));
+ CHECK_EQ(*factory->false_value(),
+ *helper.Invoke(helper.NewObject("(function() {})")));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.NewObject("([1])")));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.NewObject("({})")));
+ CHECK_EQ(*factory->true_value(), *helper.Invoke(helper.NewObject("(/x/)")));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.Undefined()));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.Null()));
+ CHECK_EQ(*factory->false_value(),
+ *helper.Invoke(helper.NewObject("'string'")));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.NewObject("42")));
+}
+
+TEST(IsTypedArray) {
+ HandleAndZoneScope handles;
+
+ InvokeIntrinsicHelper helper(handles.main_isolate(), handles.main_zone(),
+ Runtime::kInlineIsTypedArray);
+ Factory* factory = handles.main_isolate()->factory();
+
+ CHECK_EQ(*factory->false_value(),
+ *helper.Invoke(helper.NewObject("new Date()")));
+ CHECK_EQ(*factory->false_value(),
+ *helper.Invoke(helper.NewObject("(function() {})")));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.NewObject("([1])")));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.NewObject("({})")));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.NewObject("(/x/)")));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.Undefined()));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.Null()));
+ CHECK_EQ(*factory->false_value(),
+ *helper.Invoke(helper.NewObject("'string'")));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.NewObject("42")));
+
+ CHECK_EQ(
+ *factory->true_value(),
+ *helper.Invoke(helper.NewObject("new Uint8Array(new ArrayBuffer(1));")));
+ CHECK_EQ(
+ *factory->true_value(),
+ *helper.Invoke(helper.NewObject("new Uint16Array(new ArrayBuffer(2));")));
+ CHECK_EQ(
+ *factory->true_value(),
+ *helper.Invoke(helper.NewObject("new Int32Array(new ArrayBuffer(4));")));
+}
+
+TEST(IsSmi) {
+ HandleAndZoneScope handles;
+
+ InvokeIntrinsicHelper helper(handles.main_isolate(), handles.main_zone(),
+ Runtime::kInlineIsSmi);
+ Factory* factory = handles.main_isolate()->factory();
+
+ CHECK_EQ(*factory->false_value(),
+ *helper.Invoke(helper.NewObject("new Date()")));
+ CHECK_EQ(*factory->false_value(),
+ *helper.Invoke(helper.NewObject("(function() {})")));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.NewObject("([1])")));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.NewObject("({})")));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.NewObject("(/x/)")));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.Undefined()));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.Null()));
+ CHECK_EQ(*factory->false_value(),
+ *helper.Invoke(helper.NewObject("'string'")));
+ CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.NewObject("42.2")));
+ CHECK_EQ(*factory->false_value(),
+ *helper.Invoke(helper.NewObject("4294967297")));
+ CHECK_EQ(*factory->true_value(), *helper.Invoke(helper.NewObject("42")));
+}
+
+TEST(Call) {
+ HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
+ InvokeIntrinsicHelper helper(isolate, handles.main_zone(),
+ Runtime::kInlineCall);
+
+ CHECK_EQ(Smi::FromInt(20),
+ *helper.Invoke(helper.NewObject("(function() { return this.x; })"),
+ helper.NewObject("({ x: 20 })")));
+ CHECK_EQ(Smi::FromInt(50),
+ *helper.Invoke(helper.NewObject("(function(arg1) { return arg1; })"),
+ factory->undefined_value(),
+ handle(Smi::FromInt(50), isolate)));
+ CHECK_EQ(
+ Smi::FromInt(20),
+ *helper.Invoke(
+ helper.NewObject("(function(a, b, c) { return a + b + c; })"),
+ factory->undefined_value(), handle(Smi::FromInt(10), isolate),
+ handle(Smi::FromInt(7), isolate), handle(Smi::FromInt(3), isolate)));
+}
+
+TEST(IntrinsicAsStubCall) {
+ HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
+ InvokeIntrinsicHelper to_number_helper(isolate, handles.main_zone(),
+ Runtime::kInlineToNumber);
+ CHECK_EQ(Smi::FromInt(46),
+ *to_number_helper.Invoke(to_number_helper.NewObject("'46'")));
+
+ InvokeIntrinsicHelper to_integer_helper(isolate, handles.main_zone(),
+ Runtime::kInlineToInteger);
+ CHECK_EQ(Smi::FromInt(502),
+ *to_integer_helper.Invoke(to_integer_helper.NewObject("502.67")));
+
+ InvokeIntrinsicHelper has_property_helper(isolate, handles.main_zone(),
+ Runtime::kInlineHasProperty);
+ CHECK_EQ(*factory->true_value(),
+ *has_property_helper.Invoke(
+ has_property_helper.NewObject("'x'"),
+ has_property_helper.NewObject("({ x: 20 })")));
+ CHECK_EQ(*factory->false_value(),
+ *has_property_helper.Invoke(
+ has_property_helper.NewObject("'y'"),
+ has_property_helper.NewObject("({ x: 20 })")));
+
+ InvokeIntrinsicHelper sub_string_helper(isolate, handles.main_zone(),
+ Runtime::kInlineSubString);
+ CHECK(sub_string_helper
+ .Invoke(sub_string_helper.NewObject("'foobar'"),
+ sub_string_helper.NewObject("3"),
+ sub_string_helper.NewObject("6"))
+ ->SameValue(*sub_string_helper.NewObject("'bar'")));
+}
+
+TEST(ValueOf) {
+ HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
+ InvokeIntrinsicHelper helper(handles.main_isolate(), handles.main_zone(),
+ Runtime::kInlineValueOf);
+
+ CHECK_EQ(Smi::FromInt(1234), *helper.Invoke(helper.NewObject("1234")));
+ CHECK_EQ(Smi::FromInt(5678),
+ *helper.Invoke(helper.NewObject("new Object(5678)")));
+
+ CHECK_EQ(*factory->true_value(), *helper.Invoke(helper.NewObject("true")));
+ CHECK_EQ(*factory->false_value(),
+ *helper.Invoke(helper.NewObject("new Object(false)")));
+
+ CHECK(helper.Invoke(helper.NewObject("'foobar'"))
+ ->SameValue(*helper.NewObject("'foobar'")));
+ CHECK(helper.Invoke(helper.NewObject("new Object('foobar')"))
+ ->SameValue(*helper.NewObject("'foobar'")));
+}
+
+TEST(ClassOf) {
+ HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
+ InvokeIntrinsicHelper helper(handles.main_isolate(), handles.main_zone(),
+ Runtime::kInlineClassOf);
+ CHECK_EQ(*helper.Invoke(helper.NewObject("123")), *factory->null_value());
+ CHECK_EQ(*helper.Invoke(helper.NewObject("'true'")), *factory->null_value());
+ CHECK_EQ(*helper.Invoke(helper.NewObject("'foo'")), *factory->null_value());
+ CHECK(helper.Invoke(helper.NewObject("({a:1})"))
+ ->SameValue(*helper.NewObject("'Object'")));
+ CHECK(helper.Invoke(helper.NewObject("(function foo() {})"))
+ ->SameValue(*helper.NewObject("'Function'")));
+ CHECK(helper.Invoke(helper.NewObject("new Date()"))
+ ->SameValue(*helper.NewObject("'Date'")));
+ CHECK(helper.Invoke(helper.NewObject("new Set"))
+ ->SameValue(*helper.NewObject("'Set'")));
+ CHECK(helper.Invoke(helper.NewObject("/x/"))
+ ->SameValue(*helper.NewObject("'RegExp'")));
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter.cc b/deps/v8/test/cctest/interpreter/test-interpreter.cc
index c8dc776010..9572a2d731 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter.cc
@@ -8,6 +8,7 @@
#include "src/handles.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/interpreter.h"
#include "test/cctest/cctest.h"
#include "test/cctest/interpreter/interpreter-tester.h"
@@ -20,15 +21,14 @@ namespace interpreter {
TEST(InterpreterReturn) {
HandleAndZoneScope handles;
- Handle<Object> undefined_value =
- handles.main_isolate()->factory()->undefined_value();
+ Isolate* isolate = handles.main_isolate();
+ Handle<Object> undefined_value = isolate->factory()->undefined_value();
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
- 0, 0);
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 0);
builder.Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
CHECK(return_val.is_identical_to(undefined_value));
@@ -37,15 +37,14 @@ TEST(InterpreterReturn) {
TEST(InterpreterLoadUndefined) {
HandleAndZoneScope handles;
- Handle<Object> undefined_value =
- handles.main_isolate()->factory()->undefined_value();
+ Isolate* isolate = handles.main_isolate();
+ Handle<Object> undefined_value = isolate->factory()->undefined_value();
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
- 0, 0);
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 0);
builder.LoadUndefined().Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
CHECK(return_val.is_identical_to(undefined_value));
@@ -54,14 +53,14 @@ TEST(InterpreterLoadUndefined) {
TEST(InterpreterLoadNull) {
HandleAndZoneScope handles;
- Handle<Object> null_value = handles.main_isolate()->factory()->null_value();
+ Isolate* isolate = handles.main_isolate();
+ Handle<Object> null_value = isolate->factory()->null_value();
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
- 0, 0);
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 0);
builder.LoadNull().Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
CHECK(return_val.is_identical_to(null_value));
@@ -70,15 +69,14 @@ TEST(InterpreterLoadNull) {
TEST(InterpreterLoadTheHole) {
HandleAndZoneScope handles;
- Handle<Object> the_hole_value =
- handles.main_isolate()->factory()->the_hole_value();
+ Isolate* isolate = handles.main_isolate();
+ Handle<Object> the_hole_value = isolate->factory()->the_hole_value();
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
- 0, 0);
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 0);
builder.LoadTheHole().Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
CHECK(return_val.is_identical_to(the_hole_value));
@@ -87,14 +85,14 @@ TEST(InterpreterLoadTheHole) {
TEST(InterpreterLoadTrue) {
HandleAndZoneScope handles;
- Handle<Object> true_value = handles.main_isolate()->factory()->true_value();
+ Isolate* isolate = handles.main_isolate();
+ Handle<Object> true_value = isolate->factory()->true_value();
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
- 0, 0);
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 0);
builder.LoadTrue().Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
CHECK(return_val.is_identical_to(true_value));
@@ -103,14 +101,14 @@ TEST(InterpreterLoadTrue) {
TEST(InterpreterLoadFalse) {
HandleAndZoneScope handles;
- Handle<Object> false_value = handles.main_isolate()->factory()->false_value();
+ Isolate* isolate = handles.main_isolate();
+ Handle<Object> false_value = isolate->factory()->false_value();
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
- 0, 0);
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 0);
builder.LoadFalse().Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
CHECK(return_val.is_identical_to(false_value));
@@ -119,16 +117,16 @@ TEST(InterpreterLoadFalse) {
TEST(InterpreterLoadLiteral) {
HandleAndZoneScope handles;
- i::Factory* factory = handles.main_isolate()->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
// Small Smis.
for (int i = -128; i < 128; i++) {
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
- 0, 0);
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 0);
builder.LoadLiteral(Smi::FromInt(i)).Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
CHECK_EQ(Smi::cast(*return_val), Smi::FromInt(i));
@@ -136,12 +134,12 @@ TEST(InterpreterLoadLiteral) {
// Large Smis.
{
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
- 0, 0);
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 0);
+
builder.LoadLiteral(Smi::FromInt(0x12345678)).Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
CHECK_EQ(Smi::cast(*return_val), Smi::FromInt(0x12345678));
@@ -149,12 +147,12 @@ TEST(InterpreterLoadLiteral) {
// Heap numbers.
{
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
- 0, 0);
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 0);
+
builder.LoadLiteral(factory->NewHeapNumber(-2.1e19)).Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
CHECK_EQ(i::HeapNumber::cast(*return_val)->value(), -2.1e19);
@@ -162,13 +160,13 @@ TEST(InterpreterLoadLiteral) {
// Strings.
{
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
- 0, 0);
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 0);
+
Handle<i::String> string = factory->NewStringFromAsciiChecked("String");
builder.LoadLiteral(string).Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
CHECK(i::String::cast(*return_val)->Equals(*string));
@@ -178,19 +176,20 @@ TEST(InterpreterLoadLiteral) {
TEST(InterpreterLoadStoreRegisters) {
HandleAndZoneScope handles;
- Handle<Object> true_value = handles.main_isolate()->factory()->true_value();
+ Isolate* isolate = handles.main_isolate();
+ Handle<Object> true_value = isolate->factory()->true_value();
for (int i = 0; i <= kMaxInt8; i++) {
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
- 0, i + 1);
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, i + 1);
+
Register reg(i);
builder.LoadTrue()
.StoreAccumulatorInRegister(reg)
.LoadFalse()
.LoadAccumulatorWithRegister(reg)
.Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
CHECK(return_val.is_identical_to(true_value));
@@ -262,20 +261,27 @@ TEST(InterpreterShiftOpsSmi) {
for (size_t r = 0; r < arraysize(rhs_inputs); r++) {
for (size_t o = 0; o < arraysize(kShiftOperators); o++) {
HandleAndZoneScope handles;
- i::Factory* factory = handles.main_isolate()->factory();
- BytecodeArrayBuilder builder(handles.main_isolate(),
- handles.main_zone(), 1, 0, 1);
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
+ Zone zone(isolate->allocator());
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 1);
+
+ FeedbackVectorSpec feedback_spec(&zone);
+ FeedbackVectorSlot slot = feedback_spec.AddGeneralSlot();
+ Handle<i::TypeFeedbackVector> vector =
+ NewTypeFeedbackVector(isolate, &feedback_spec);
+
Register reg(0);
int lhs = lhs_inputs[l];
int rhs = rhs_inputs[r];
builder.LoadLiteral(Smi::FromInt(lhs))
.StoreAccumulatorInRegister(reg)
.LoadLiteral(Smi::FromInt(rhs))
- .BinaryOperation(kShiftOperators[o], reg)
+ .BinaryOperation(kShiftOperators[o], reg, vector->GetIndex(slot))
.Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ InterpreterTester tester(isolate, bytecode_array, vector);
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
Handle<Object> expected_value =
@@ -294,20 +300,28 @@ TEST(InterpreterBinaryOpsSmi) {
for (size_t r = 0; r < arraysize(rhs_inputs); r++) {
for (size_t o = 0; o < arraysize(kArithmeticOperators); o++) {
HandleAndZoneScope handles;
- i::Factory* factory = handles.main_isolate()->factory();
- BytecodeArrayBuilder builder(handles.main_isolate(),
- handles.main_zone(), 1, 0, 1);
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
+ Zone zone(isolate->allocator());
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 1);
+
+ FeedbackVectorSpec feedback_spec(&zone);
+ FeedbackVectorSlot slot = feedback_spec.AddGeneralSlot();
+ Handle<i::TypeFeedbackVector> vector =
+ NewTypeFeedbackVector(isolate, &feedback_spec);
+
Register reg(0);
int lhs = lhs_inputs[l];
int rhs = rhs_inputs[r];
builder.LoadLiteral(Smi::FromInt(lhs))
.StoreAccumulatorInRegister(reg)
.LoadLiteral(Smi::FromInt(rhs))
- .BinaryOperation(kArithmeticOperators[o], reg)
+ .BinaryOperation(kArithmeticOperators[o], reg,
+ vector->GetIndex(slot))
.Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ InterpreterTester tester(isolate, bytecode_array, vector);
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
Handle<Object> expected_value =
@@ -327,20 +341,28 @@ TEST(InterpreterBinaryOpsHeapNumber) {
for (size_t r = 0; r < arraysize(rhs_inputs); r++) {
for (size_t o = 0; o < arraysize(kArithmeticOperators); o++) {
HandleAndZoneScope handles;
- i::Factory* factory = handles.main_isolate()->factory();
- BytecodeArrayBuilder builder(handles.main_isolate(),
- handles.main_zone(), 1, 0, 1);
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
+ Zone zone(isolate->allocator());
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 1);
+
+ FeedbackVectorSpec feedback_spec(&zone);
+ FeedbackVectorSlot slot = feedback_spec.AddGeneralSlot();
+ Handle<i::TypeFeedbackVector> vector =
+ NewTypeFeedbackVector(isolate, &feedback_spec);
+
Register reg(0);
double lhs = lhs_inputs[l];
double rhs = rhs_inputs[r];
builder.LoadLiteral(factory->NewNumber(lhs))
.StoreAccumulatorInRegister(reg)
.LoadLiteral(factory->NewNumber(rhs))
- .BinaryOperation(kArithmeticOperators[o], reg)
+ .BinaryOperation(kArithmeticOperators[o], reg,
+ vector->GetIndex(slot))
.Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ InterpreterTester tester(isolate, bytecode_array, vector);
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
Handle<Object> expected_value =
@@ -354,7 +376,9 @@ TEST(InterpreterBinaryOpsHeapNumber) {
TEST(InterpreterStringAdd) {
HandleAndZoneScope handles;
- i::Factory* factory = handles.main_isolate()->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
+ Zone zone(isolate->allocator());
struct TestCase {
Handle<Object> lhs;
@@ -385,17 +409,21 @@ TEST(InterpreterStringAdd) {
};
for (size_t i = 0; i < arraysize(test_cases); i++) {
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
- 0, 1);
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 1);
+ FeedbackVectorSpec feedback_spec(&zone);
+ FeedbackVectorSlot slot = feedback_spec.AddGeneralSlot();
+ Handle<i::TypeFeedbackVector> vector =
+ NewTypeFeedbackVector(isolate, &feedback_spec);
+
Register reg(0);
builder.LoadLiteral(test_cases[i].lhs)
.StoreAccumulatorInRegister(reg)
.LoadLiteral(test_cases[i].rhs)
- .BinaryOperation(Token::Value::ADD, reg)
+ .BinaryOperation(Token::Value::ADD, reg, vector->GetIndex(slot))
.Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*test_cases[i].expected_value));
@@ -405,16 +433,17 @@ TEST(InterpreterStringAdd) {
TEST(InterpreterParameter1) {
HandleAndZoneScope handles;
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
- 0, 0);
+ Isolate* isolate = handles.main_isolate();
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 0);
+
builder.LoadAccumulatorWithRegister(builder.Parameter(0)).Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<Handle<Object>>();
// Check for heap objects.
- Handle<Object> true_value = handles.main_isolate()->factory()->true_value();
+ Handle<Object> true_value = isolate->factory()->true_value();
Handle<Object> return_val = callable(true_value).ToHandleChecked();
CHECK(return_val.is_identical_to(true_value));
@@ -427,20 +456,41 @@ TEST(InterpreterParameter1) {
TEST(InterpreterParameter8) {
HandleAndZoneScope handles;
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 8,
- 0, 0);
+ Isolate* isolate = handles.main_isolate();
+ Zone zone(isolate->allocator());
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 8, 0, 0);
+
+ FeedbackVectorSpec feedback_spec(&zone);
+ FeedbackVectorSlot slot = feedback_spec.AddGeneralSlot();
+ FeedbackVectorSlot slot1 = feedback_spec.AddGeneralSlot();
+ FeedbackVectorSlot slot2 = feedback_spec.AddGeneralSlot();
+ FeedbackVectorSlot slot3 = feedback_spec.AddGeneralSlot();
+ FeedbackVectorSlot slot4 = feedback_spec.AddGeneralSlot();
+ FeedbackVectorSlot slot5 = feedback_spec.AddGeneralSlot();
+ FeedbackVectorSlot slot6 = feedback_spec.AddGeneralSlot();
+
+ Handle<i::TypeFeedbackVector> vector =
+ NewTypeFeedbackVector(isolate, &feedback_spec);
+
builder.LoadAccumulatorWithRegister(builder.Parameter(0))
- .BinaryOperation(Token::Value::ADD, builder.Parameter(1))
- .BinaryOperation(Token::Value::ADD, builder.Parameter(2))
- .BinaryOperation(Token::Value::ADD, builder.Parameter(3))
- .BinaryOperation(Token::Value::ADD, builder.Parameter(4))
- .BinaryOperation(Token::Value::ADD, builder.Parameter(5))
- .BinaryOperation(Token::Value::ADD, builder.Parameter(6))
- .BinaryOperation(Token::Value::ADD, builder.Parameter(7))
+ .BinaryOperation(Token::Value::ADD, builder.Parameter(1),
+ vector->GetIndex(slot))
+ .BinaryOperation(Token::Value::ADD, builder.Parameter(2),
+ vector->GetIndex(slot1))
+ .BinaryOperation(Token::Value::ADD, builder.Parameter(3),
+ vector->GetIndex(slot2))
+ .BinaryOperation(Token::Value::ADD, builder.Parameter(4),
+ vector->GetIndex(slot3))
+ .BinaryOperation(Token::Value::ADD, builder.Parameter(5),
+ vector->GetIndex(slot4))
+ .BinaryOperation(Token::Value::ADD, builder.Parameter(6),
+ vector->GetIndex(slot5))
+ .BinaryOperation(Token::Value::ADD, builder.Parameter(7),
+ vector->GetIndex(slot6))
.Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ InterpreterTester tester(isolate, bytecode_array, vector);
typedef Handle<Object> H;
auto callable = tester.GetCallable<H, H, H, H, H, H, H, H>();
@@ -459,18 +509,401 @@ TEST(InterpreterParameter8) {
CHECK_EQ(Smi::cast(*return_val), Smi::FromInt(36));
}
+TEST(InterpreterBinaryOpTypeFeedback) {
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+ i::Zone zone(isolate->allocator());
+
+ struct BinaryOpExpectation {
+ Token::Value op;
+ Handle<Object> arg1;
+ Handle<Object> arg2;
+ Handle<Object> result;
+ int32_t feedback;
+ };
+
+ BinaryOpExpectation const kTestCases[] = {
+ // ADD
+ {Token::Value::ADD, Handle<Smi>(Smi::FromInt(2), isolate),
+ Handle<Smi>(Smi::FromInt(3), isolate),
+ Handle<Smi>(Smi::FromInt(5), isolate),
+ BinaryOperationFeedback::kSignedSmall},
+ {Token::Value::ADD, Handle<Smi>(Smi::FromInt(Smi::kMaxValue), isolate),
+ Handle<Smi>(Smi::FromInt(1), isolate),
+ isolate->factory()->NewHeapNumber(Smi::kMaxValue + 1.0),
+ BinaryOperationFeedback::kNumber},
+ {Token::Value::ADD, isolate->factory()->NewHeapNumber(3.1415),
+ Handle<Smi>(Smi::FromInt(3), isolate),
+ isolate->factory()->NewHeapNumber(3.1415 + 3),
+ BinaryOperationFeedback::kNumber},
+ {Token::Value::ADD, isolate->factory()->NewHeapNumber(3.1415),
+ isolate->factory()->NewHeapNumber(1.4142),
+ isolate->factory()->NewHeapNumber(3.1415 + 1.4142),
+ BinaryOperationFeedback::kNumber},
+ {Token::Value::ADD, Handle<Smi>(Smi::FromInt(2), isolate),
+ isolate->factory()->NewStringFromAsciiChecked("2"),
+ isolate->factory()->NewStringFromAsciiChecked("22"),
+ BinaryOperationFeedback::kAny},
+ // SUB
+ {Token::Value::SUB, Handle<Smi>(Smi::FromInt(2), isolate),
+ Handle<Smi>(Smi::FromInt(3), isolate),
+ Handle<Smi>(Smi::FromInt(-1), isolate),
+ BinaryOperationFeedback::kSignedSmall},
+ {Token::Value::SUB, Handle<Smi>(Smi::FromInt(Smi::kMinValue), isolate),
+ Handle<Smi>(Smi::FromInt(1), isolate),
+ isolate->factory()->NewHeapNumber(Smi::kMinValue - 1.0),
+ BinaryOperationFeedback::kNumber},
+ {Token::Value::SUB, isolate->factory()->NewHeapNumber(3.1415),
+ Handle<Smi>(Smi::FromInt(3), isolate),
+ isolate->factory()->NewHeapNumber(3.1415 - 3),
+ BinaryOperationFeedback::kNumber},
+ {Token::Value::SUB, isolate->factory()->NewHeapNumber(3.1415),
+ isolate->factory()->NewHeapNumber(1.4142),
+ isolate->factory()->NewHeapNumber(3.1415 - 1.4142),
+ BinaryOperationFeedback::kNumber},
+ {Token::Value::SUB, Handle<Smi>(Smi::FromInt(2), isolate),
+ isolate->factory()->NewStringFromAsciiChecked("1"),
+ Handle<Smi>(Smi::FromInt(1), isolate), BinaryOperationFeedback::kAny},
+ // MUL
+ {Token::Value::MUL, Handle<Smi>(Smi::FromInt(2), isolate),
+ Handle<Smi>(Smi::FromInt(3), isolate),
+ Handle<Smi>(Smi::FromInt(6), isolate),
+ BinaryOperationFeedback::kSignedSmall},
+ {Token::Value::MUL, Handle<Smi>(Smi::FromInt(Smi::kMinValue), isolate),
+ Handle<Smi>(Smi::FromInt(2), isolate),
+ isolate->factory()->NewHeapNumber(Smi::kMinValue * 2.0),
+ BinaryOperationFeedback::kNumber},
+ {Token::Value::MUL, isolate->factory()->NewHeapNumber(3.1415),
+ Handle<Smi>(Smi::FromInt(3), isolate),
+ isolate->factory()->NewHeapNumber(3 * 3.1415),
+ BinaryOperationFeedback::kNumber},
+ {Token::Value::MUL, isolate->factory()->NewHeapNumber(3.1415),
+ isolate->factory()->NewHeapNumber(1.4142),
+ isolate->factory()->NewHeapNumber(3.1415 * 1.4142),
+ BinaryOperationFeedback::kNumber},
+ {Token::Value::MUL, Handle<Smi>(Smi::FromInt(2), isolate),
+ isolate->factory()->NewStringFromAsciiChecked("1"),
+ Handle<Smi>(Smi::FromInt(2), isolate), BinaryOperationFeedback::kAny},
+ // DIV
+ {Token::Value::DIV, Handle<Smi>(Smi::FromInt(6), isolate),
+ Handle<Smi>(Smi::FromInt(3), isolate),
+ Handle<Smi>(Smi::FromInt(2), isolate),
+ BinaryOperationFeedback::kSignedSmall},
+ {Token::Value::DIV, Handle<Smi>(Smi::FromInt(3), isolate),
+ Handle<Smi>(Smi::FromInt(2), isolate),
+ isolate->factory()->NewHeapNumber(3.0 / 2.0),
+ BinaryOperationFeedback::kNumber},
+ {Token::Value::DIV, isolate->factory()->NewHeapNumber(3.1415),
+ Handle<Smi>(Smi::FromInt(3), isolate),
+ isolate->factory()->NewHeapNumber(3.1415 / 3),
+ BinaryOperationFeedback::kNumber},
+ {Token::Value::DIV, isolate->factory()->NewHeapNumber(3.1415),
+ isolate->factory()->NewHeapNumber(
+ -std::numeric_limits<double>::infinity()),
+ isolate->factory()->NewHeapNumber(-0.0),
+ BinaryOperationFeedback::kNumber},
+ {Token::Value::DIV, Handle<Smi>(Smi::FromInt(2), isolate),
+ isolate->factory()->NewStringFromAsciiChecked("1"),
+ Handle<Smi>(Smi::FromInt(2), isolate), BinaryOperationFeedback::kAny},
+ // MOD
+ {Token::Value::MOD, Handle<Smi>(Smi::FromInt(5), isolate),
+ Handle<Smi>(Smi::FromInt(3), isolate),
+ Handle<Smi>(Smi::FromInt(2), isolate),
+ BinaryOperationFeedback::kSignedSmall},
+ {Token::Value::MOD, Handle<Smi>(Smi::FromInt(-4), isolate),
+ Handle<Smi>(Smi::FromInt(2), isolate),
+ isolate->factory()->NewHeapNumber(-0.0),
+ BinaryOperationFeedback::kNumber},
+ {Token::Value::MOD, isolate->factory()->NewHeapNumber(3.1415),
+ Handle<Smi>(Smi::FromInt(3), isolate),
+ isolate->factory()->NewHeapNumber(fmod(3.1415, 3.0)),
+ BinaryOperationFeedback::kNumber},
+ {Token::Value::MOD, isolate->factory()->NewHeapNumber(-3.1415),
+ isolate->factory()->NewHeapNumber(-1.4142),
+ isolate->factory()->NewHeapNumber(fmod(-3.1415, -1.4142)),
+ BinaryOperationFeedback::kNumber},
+ {Token::Value::MOD, Handle<Smi>(Smi::FromInt(3), isolate),
+ isolate->factory()->NewStringFromAsciiChecked("-2"),
+ Handle<Smi>(Smi::FromInt(1), isolate), BinaryOperationFeedback::kAny}};
+
+ for (const BinaryOpExpectation& test_case : kTestCases) {
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 1);
+
+ i::FeedbackVectorSpec feedback_spec(&zone);
+ i::FeedbackVectorSlot slot0 = feedback_spec.AddGeneralSlot();
+
+ Handle<i::TypeFeedbackVector> vector =
+ i::NewTypeFeedbackVector(isolate, &feedback_spec);
+
+ Register reg(0);
+ builder.LoadLiteral(test_case.arg1)
+ .StoreAccumulatorInRegister(reg)
+ .LoadLiteral(test_case.arg2)
+ .BinaryOperation(test_case.op, reg, vector->GetIndex(slot0))
+ .Return();
+
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
+
+ InterpreterTester tester(isolate, bytecode_array, vector);
+ auto callable = tester.GetCallable<>();
+
+ Handle<Object> return_val = callable().ToHandleChecked();
+ Object* feedback0 = vector->Get(slot0);
+ CHECK(feedback0->IsSmi());
+ CHECK_EQ(test_case.feedback, static_cast<Smi*>(feedback0)->value());
+ CHECK(Object::Equals(test_case.result, return_val).ToChecked());
+ }
+}
+
+TEST(InterpreterBinaryOpSmiTypeFeedback) {
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+ i::Zone zone(isolate->allocator());
+
+ struct BinaryOpExpectation {
+ Token::Value op;
+ Handle<Object> arg1;
+ int32_t arg2;
+ Handle<Object> result;
+ int32_t feedback;
+ };
+
+ BinaryOpExpectation const kTestCases[] = {
+ // ADD
+ {Token::Value::ADD, Handle<Smi>(Smi::FromInt(2), isolate), 42,
+ Handle<Smi>(Smi::FromInt(44), isolate),
+ BinaryOperationFeedback::kSignedSmall},
+ {Token::Value::ADD, Handle<Smi>(Smi::FromInt(2), isolate), Smi::kMaxValue,
+ isolate->factory()->NewHeapNumber(Smi::kMaxValue + 2.0),
+ BinaryOperationFeedback::kNumber},
+ {Token::Value::ADD, isolate->factory()->NewHeapNumber(3.1415), 2,
+ isolate->factory()->NewHeapNumber(3.1415 + 2.0),
+ BinaryOperationFeedback::kNumber},
+ {Token::Value::ADD, isolate->factory()->NewStringFromAsciiChecked("2"), 2,
+ isolate->factory()->NewStringFromAsciiChecked("22"),
+ BinaryOperationFeedback::kAny},
+ // SUB
+ {Token::Value::SUB, Handle<Smi>(Smi::FromInt(2), isolate), 42,
+ Handle<Smi>(Smi::FromInt(-40), isolate),
+ BinaryOperationFeedback::kSignedSmall},
+ {Token::Value::SUB, Handle<Smi>(Smi::FromInt(Smi::kMinValue), isolate), 1,
+ isolate->factory()->NewHeapNumber(Smi::kMinValue - 1.0),
+ BinaryOperationFeedback::kNumber},
+ {Token::Value::SUB, isolate->factory()->NewHeapNumber(3.1415), 2,
+ isolate->factory()->NewHeapNumber(3.1415 - 2.0),
+ BinaryOperationFeedback::kNumber},
+ {Token::Value::SUB, isolate->factory()->NewStringFromAsciiChecked("2"), 2,
+ Handle<Smi>(Smi::FromInt(0), isolate), BinaryOperationFeedback::kAny},
+ // BIT_OR
+ {Token::Value::BIT_OR, Handle<Smi>(Smi::FromInt(4), isolate), 1,
+ Handle<Smi>(Smi::FromInt(5), isolate),
+ BinaryOperationFeedback::kSignedSmall},
+ {Token::Value::BIT_OR, isolate->factory()->NewHeapNumber(3.1415), 8,
+ Handle<Smi>(Smi::FromInt(11), isolate),
+ BinaryOperationFeedback::kNumber},
+ {Token::Value::BIT_OR, isolate->factory()->NewStringFromAsciiChecked("2"),
+ 1, Handle<Smi>(Smi::FromInt(3), isolate), BinaryOperationFeedback::kAny},
+ // BIT_AND
+ {Token::Value::BIT_AND, Handle<Smi>(Smi::FromInt(3), isolate), 1,
+ Handle<Smi>(Smi::FromInt(1), isolate),
+ BinaryOperationFeedback::kSignedSmall},
+ {Token::Value::BIT_AND, isolate->factory()->NewHeapNumber(3.1415), 2,
+ Handle<Smi>(Smi::FromInt(2), isolate), BinaryOperationFeedback::kNumber},
+ {Token::Value::BIT_AND,
+ isolate->factory()->NewStringFromAsciiChecked("2"), 1,
+ Handle<Smi>(Smi::FromInt(0), isolate), BinaryOperationFeedback::kAny},
+ // SHL
+ {Token::Value::SHL, Handle<Smi>(Smi::FromInt(3), isolate), 1,
+ Handle<Smi>(Smi::FromInt(6), isolate),
+ BinaryOperationFeedback::kSignedSmall},
+ {Token::Value::SHL, isolate->factory()->NewHeapNumber(3.1415), 2,
+ Handle<Smi>(Smi::FromInt(12), isolate),
+ BinaryOperationFeedback::kNumber},
+ {Token::Value::SHL, isolate->factory()->NewStringFromAsciiChecked("2"), 1,
+ Handle<Smi>(Smi::FromInt(4), isolate), BinaryOperationFeedback::kAny},
+ // SAR
+ {Token::Value::SAR, Handle<Smi>(Smi::FromInt(3), isolate), 1,
+ Handle<Smi>(Smi::FromInt(1), isolate),
+ BinaryOperationFeedback::kSignedSmall},
+ {Token::Value::SAR, isolate->factory()->NewHeapNumber(3.1415), 2,
+ Handle<Smi>(Smi::FromInt(0), isolate), BinaryOperationFeedback::kNumber},
+ {Token::Value::SAR, isolate->factory()->NewStringFromAsciiChecked("2"), 1,
+ Handle<Smi>(Smi::FromInt(1), isolate), BinaryOperationFeedback::kAny}};
+
+ for (const BinaryOpExpectation& test_case : kTestCases) {
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 1);
+
+ i::FeedbackVectorSpec feedback_spec(&zone);
+ i::FeedbackVectorSlot slot0 = feedback_spec.AddGeneralSlot();
+
+ Handle<i::TypeFeedbackVector> vector =
+ i::NewTypeFeedbackVector(isolate, &feedback_spec);
+
+ Register reg(0);
+ builder.LoadLiteral(test_case.arg1)
+ .StoreAccumulatorInRegister(reg)
+ .LoadLiteral(Smi::FromInt(test_case.arg2))
+ .BinaryOperation(test_case.op, reg, vector->GetIndex(slot0))
+ .Return();
+
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
+
+ InterpreterTester tester(isolate, bytecode_array, vector);
+ auto callable = tester.GetCallable<>();
+
+ Handle<Object> return_val = callable().ToHandleChecked();
+ Object* feedback0 = vector->Get(slot0);
+ CHECK(feedback0->IsSmi());
+ CHECK_EQ(test_case.feedback, static_cast<Smi*>(feedback0)->value());
+ CHECK(Object::Equals(test_case.result, return_val).ToChecked());
+ }
+}
+
+TEST(InterpreterUnaryOpFeedback) {
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+ i::Zone zone(isolate->allocator());
+
+ Handle<Smi> smi_one = Handle<Smi>(Smi::FromInt(1), isolate);
+ Handle<Smi> smi_max = Handle<Smi>(Smi::FromInt(Smi::kMaxValue), isolate);
+ Handle<Smi> smi_min = Handle<Smi>(Smi::FromInt(Smi::kMinValue), isolate);
+ Handle<HeapNumber> number = isolate->factory()->NewHeapNumber(2.1);
+ Handle<String> str = isolate->factory()->NewStringFromAsciiChecked("42");
+
+ struct TestCase {
+ Token::Value op;
+ Handle<Smi> smi_feedback_value;
+ Handle<Smi> smi_to_number_feedback_value;
+ Handle<HeapNumber> number_feedback_value;
+ Handle<Object> any_feedback_value;
+ };
+ TestCase const kTestCases[] = {
+ {Token::Value::ADD, smi_one, smi_max, number, str},
+ {Token::Value::SUB, smi_one, smi_min, number, str}};
+ for (TestCase const& test_case : kTestCases) {
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 4, 0, 0);
+
+ i::FeedbackVectorSpec feedback_spec(&zone);
+ i::FeedbackVectorSlot slot0 = feedback_spec.AddGeneralSlot();
+ i::FeedbackVectorSlot slot1 = feedback_spec.AddGeneralSlot();
+ i::FeedbackVectorSlot slot2 = feedback_spec.AddGeneralSlot();
+ i::FeedbackVectorSlot slot3 = feedback_spec.AddGeneralSlot();
+
+ Handle<i::TypeFeedbackVector> vector =
+ i::NewTypeFeedbackVector(isolate, &feedback_spec);
+
+ builder.LoadAccumulatorWithRegister(builder.Parameter(0))
+ .CountOperation(test_case.op, vector->GetIndex(slot0))
+ .LoadAccumulatorWithRegister(builder.Parameter(1))
+ .CountOperation(test_case.op, vector->GetIndex(slot1))
+ .LoadAccumulatorWithRegister(builder.Parameter(2))
+ .CountOperation(test_case.op, vector->GetIndex(slot2))
+ .LoadAccumulatorWithRegister(builder.Parameter(3))
+ .CountOperation(test_case.op, vector->GetIndex(slot3))
+ .Return();
+
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
+
+ InterpreterTester tester(isolate, bytecode_array, vector);
+ typedef Handle<Object> H;
+ auto callable = tester.GetCallable<H, H, H, H>();
+
+ Handle<Object> return_val =
+ callable(test_case.smi_feedback_value,
+ test_case.smi_to_number_feedback_value,
+ test_case.number_feedback_value, test_case.any_feedback_value)
+ .ToHandleChecked();
+ USE(return_val);
+ Object* feedback0 = vector->Get(slot0);
+ CHECK(feedback0->IsSmi());
+ CHECK_EQ(BinaryOperationFeedback::kSignedSmall,
+ static_cast<Smi*>(feedback0)->value());
+
+ Object* feedback1 = vector->Get(slot1);
+ CHECK(feedback1->IsSmi());
+ CHECK_EQ(BinaryOperationFeedback::kNumber,
+ static_cast<Smi*>(feedback1)->value());
+
+ Object* feedback2 = vector->Get(slot2);
+ CHECK(feedback2->IsSmi());
+ CHECK_EQ(BinaryOperationFeedback::kNumber,
+ static_cast<Smi*>(feedback2)->value());
+
+ Object* feedback3 = vector->Get(slot3);
+ CHECK(feedback3->IsSmi());
+ CHECK_EQ(BinaryOperationFeedback::kAny,
+ static_cast<Smi*>(feedback3)->value());
+ }
+}
+
+TEST(InterpreterBitwiseTypeFeedback) {
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+ i::Zone zone(isolate->allocator());
+ const Token::Value kBitwiseBinaryOperators[] = {
+ Token::Value::BIT_OR, Token::Value::BIT_XOR, Token::Value::BIT_AND,
+ Token::Value::SHL, Token::Value::SHR, Token::Value::SAR};
+
+ for (Token::Value op : kBitwiseBinaryOperators) {
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 4, 0, 0);
+
+ i::FeedbackVectorSpec feedback_spec(&zone);
+ i::FeedbackVectorSlot slot0 = feedback_spec.AddGeneralSlot();
+ i::FeedbackVectorSlot slot1 = feedback_spec.AddGeneralSlot();
+ i::FeedbackVectorSlot slot2 = feedback_spec.AddGeneralSlot();
+
+ Handle<i::TypeFeedbackVector> vector =
+ i::NewTypeFeedbackVector(isolate, &feedback_spec);
+
+ builder.LoadAccumulatorWithRegister(builder.Parameter(0))
+ .BinaryOperation(op, builder.Parameter(1), vector->GetIndex(slot0))
+ .BinaryOperation(op, builder.Parameter(2), vector->GetIndex(slot1))
+ .BinaryOperation(op, builder.Parameter(3), vector->GetIndex(slot2))
+ .Return();
+
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
+
+ InterpreterTester tester(isolate, bytecode_array, vector);
+ typedef Handle<Object> H;
+ auto callable = tester.GetCallable<H, H, H, H>();
+
+ Handle<Smi> arg1 = Handle<Smi>(Smi::FromInt(2), isolate);
+ Handle<Smi> arg2 = Handle<Smi>(Smi::FromInt(2), isolate);
+ Handle<HeapNumber> arg3 = isolate->factory()->NewHeapNumber(2.2);
+ Handle<String> arg4 = isolate->factory()->NewStringFromAsciiChecked("2");
+
+ Handle<Object> return_val =
+ callable(arg1, arg2, arg3, arg4).ToHandleChecked();
+ USE(return_val);
+ Object* feedback0 = vector->Get(slot0);
+ CHECK(feedback0->IsSmi());
+ CHECK_EQ(BinaryOperationFeedback::kSignedSmall,
+ static_cast<Smi*>(feedback0)->value());
+
+ Object* feedback1 = vector->Get(slot1);
+ CHECK(feedback1->IsSmi());
+ CHECK_EQ(BinaryOperationFeedback::kNumber,
+ static_cast<Smi*>(feedback1)->value());
+
+ Object* feedback2 = vector->Get(slot2);
+ CHECK(feedback2->IsSmi());
+ CHECK_EQ(BinaryOperationFeedback::kAny,
+ static_cast<Smi*>(feedback2)->value());
+ }
+}
TEST(InterpreterParameter1Assign) {
HandleAndZoneScope handles;
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
- 0, 0);
+ Isolate* isolate = handles.main_isolate();
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 0);
+
builder.LoadLiteral(Smi::FromInt(5))
.StoreAccumulatorInRegister(builder.Parameter(0))
.LoadAccumulatorWithRegister(builder.Parameter(0))
.Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<Handle<Object>>();
Handle<Object> return_val =
@@ -482,6 +915,7 @@ TEST(InterpreterParameter1Assign) {
TEST(InterpreterLoadGlobal) {
HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
// Test loading a global.
std::string source(
@@ -489,7 +923,7 @@ TEST(InterpreterLoadGlobal) {
"function " + InterpreterTester::function_name() + "() {\n"
" return global;\n"
"}");
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
@@ -499,8 +933,8 @@ TEST(InterpreterLoadGlobal) {
TEST(InterpreterStoreGlobal) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
// Test storing to a global.
std::string source(
@@ -508,7 +942,7 @@ TEST(InterpreterStoreGlobal) {
"function " + InterpreterTester::function_name() + "() {\n"
" global = 999;\n"
"}");
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
callable().ToHandleChecked();
@@ -521,6 +955,7 @@ TEST(InterpreterStoreGlobal) {
TEST(InterpreterCallGlobal) {
HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
// Test calling a global function.
std::string source(
@@ -528,7 +963,7 @@ TEST(InterpreterCallGlobal) {
"function " + InterpreterTester::function_name() + "() {\n"
" return g_add(5, 10);\n"
"}");
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
@@ -538,6 +973,7 @@ TEST(InterpreterCallGlobal) {
TEST(InterpreterLoadUnallocated) {
HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
// Test loading an unallocated global.
std::string source(
@@ -545,7 +981,7 @@ TEST(InterpreterLoadUnallocated) {
"function " + InterpreterTester::function_name() + "() {\n"
" return unallocated;\n"
"}");
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
@@ -555,8 +991,8 @@ TEST(InterpreterLoadUnallocated) {
TEST(InterpreterStoreUnallocated) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
// Test storing to an unallocated global.
std::string source(
@@ -564,7 +1000,7 @@ TEST(InterpreterStoreUnallocated) {
"function " + InterpreterTester::function_name() + "() {\n"
" unallocated = 999;\n"
"}");
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
callable().ToHandleChecked();
@@ -577,26 +1013,26 @@ TEST(InterpreterStoreUnallocated) {
TEST(InterpreterLoadNamedProperty) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
- i::Zone zone(isolate->allocator());
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
+ Zone zone(isolate->allocator());
- i::FeedbackVectorSpec feedback_spec(&zone);
- i::FeedbackVectorSlot slot = feedback_spec.AddLoadICSlot();
+ FeedbackVectorSpec feedback_spec(&zone);
+ FeedbackVectorSlot slot = feedback_spec.AddLoadICSlot();
Handle<i::TypeFeedbackVector> vector =
- i::NewTypeFeedbackVector(isolate, &feedback_spec);
+ NewTypeFeedbackVector(isolate, &feedback_spec);
Handle<i::String> name = factory->NewStringFromAsciiChecked("val");
name = factory->string_table()->LookupString(isolate, name);
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
- 0, 0);
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 0);
+
builder.LoadNamedProperty(builder.Parameter(0), name, vector->GetIndex(slot))
.Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(handles.main_isolate(), bytecode_array, vector);
+ InterpreterTester tester(isolate, bytecode_array, vector);
auto callable = tester.GetCallable<Handle<Object>>();
Handle<Object> object = InterpreterTester::NewObject("({ val : 123 })");
@@ -630,27 +1066,27 @@ TEST(InterpreterLoadNamedProperty) {
TEST(InterpreterLoadKeyedProperty) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
- i::Zone zone(isolate->allocator());
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
+ Zone zone(isolate->allocator());
- i::FeedbackVectorSpec feedback_spec(&zone);
- i::FeedbackVectorSlot slot = feedback_spec.AddKeyedLoadICSlot();
+ FeedbackVectorSpec feedback_spec(&zone);
+ FeedbackVectorSlot slot = feedback_spec.AddKeyedLoadICSlot();
Handle<i::TypeFeedbackVector> vector =
- i::NewTypeFeedbackVector(isolate, &feedback_spec);
+ NewTypeFeedbackVector(isolate, &feedback_spec);
Handle<i::String> key = factory->NewStringFromAsciiChecked("key");
key = factory->string_table()->LookupString(isolate, key);
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
- 0, 1);
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 1);
+
builder.LoadLiteral(key)
.LoadKeyedProperty(builder.Parameter(0), vector->GetIndex(slot))
.Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(handles.main_isolate(), bytecode_array, vector);
+ InterpreterTester tester(isolate, bytecode_array, vector);
auto callable = tester.GetCallable<Handle<Object>>();
Handle<Object> object = InterpreterTester::NewObject("({ key : 123 })");
@@ -672,26 +1108,26 @@ TEST(InterpreterLoadKeyedProperty) {
TEST(InterpreterStoreNamedProperty) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
- i::Zone zone(isolate->allocator());
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
+ Zone zone(isolate->allocator());
- i::FeedbackVectorSpec feedback_spec(&zone);
- i::FeedbackVectorSlot slot = feedback_spec.AddStoreICSlot();
+ FeedbackVectorSpec feedback_spec(&zone);
+ FeedbackVectorSlot slot = feedback_spec.AddStoreICSlot();
Handle<i::TypeFeedbackVector> vector =
- i::NewTypeFeedbackVector(isolate, &feedback_spec);
+ NewTypeFeedbackVector(isolate, &feedback_spec);
Handle<i::String> name = factory->NewStringFromAsciiChecked("val");
name = factory->string_table()->LookupString(isolate, name);
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
- 0, 0);
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 0);
+
builder.LoadLiteral(Smi::FromInt(999))
.StoreNamedProperty(builder.Parameter(0), name, vector->GetIndex(slot),
- i::STRICT)
+ STRICT)
.Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array, vector);
auto callable = tester.GetCallable<Handle<Object>>();
@@ -731,28 +1167,28 @@ TEST(InterpreterStoreNamedProperty) {
TEST(InterpreterStoreKeyedProperty) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
- i::Zone zone(isolate->allocator());
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
+ Zone zone(isolate->allocator());
- i::FeedbackVectorSpec feedback_spec(&zone);
- i::FeedbackVectorSlot slot = feedback_spec.AddKeyedStoreICSlot();
+ FeedbackVectorSpec feedback_spec(&zone);
+ FeedbackVectorSlot slot = feedback_spec.AddKeyedStoreICSlot();
Handle<i::TypeFeedbackVector> vector =
- i::NewTypeFeedbackVector(isolate, &feedback_spec);
+ NewTypeFeedbackVector(isolate, &feedback_spec);
Handle<i::String> name = factory->NewStringFromAsciiChecked("val");
name = factory->string_table()->LookupString(isolate, name);
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
- 0, 1);
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 1);
+
builder.LoadLiteral(name)
.StoreAccumulatorInRegister(Register(0))
.LoadLiteral(Smi::FromInt(999))
.StoreKeyedProperty(builder.Parameter(0), Register(0),
vector->GetIndex(slot), i::SLOPPY)
.Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array, vector);
auto callable = tester.GetCallable<Handle<Object>>();
@@ -778,31 +1214,37 @@ TEST(InterpreterStoreKeyedProperty) {
static void TestInterpreterCall(TailCallMode tail_call_mode) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
- i::Zone zone(isolate->allocator());
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
+ Zone zone(isolate->allocator());
- i::FeedbackVectorSpec feedback_spec(&zone);
- i::FeedbackVectorSlot slot = feedback_spec.AddLoadICSlot();
+ FeedbackVectorSpec feedback_spec(&zone);
+ FeedbackVectorSlot slot = feedback_spec.AddLoadICSlot();
+ FeedbackVectorSlot call_slot = feedback_spec.AddCallICSlot();
Handle<i::TypeFeedbackVector> vector =
- i::NewTypeFeedbackVector(isolate, &feedback_spec);
+ NewTypeFeedbackVector(isolate, &feedback_spec);
int slot_index = vector->GetIndex(slot);
+ int call_slot_index = -1;
+ call_slot_index = vector->GetIndex(call_slot);
Handle<i::String> name = factory->NewStringFromAsciiChecked("func");
name = factory->string_table()->LookupString(isolate, name);
// Check with no args.
{
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
- 0, 1);
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 1);
+
builder.LoadNamedProperty(builder.Parameter(0), name, slot_index)
- .StoreAccumulatorInRegister(Register(0))
- .Call(Register(0), builder.Parameter(0), 1, 0, tail_call_mode)
- .Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ .StoreAccumulatorInRegister(Register(0));
+
+ builder.Call(Register(0), builder.Parameter(0), 1, call_slot_index,
+ tail_call_mode);
- InterpreterTester tester(handles.main_isolate(), bytecode_array, vector);
+ builder.Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
+
+ InterpreterTester tester(isolate, bytecode_array, vector);
auto callable = tester.GetCallable<Handle<Object>>();
Handle<Object> object = InterpreterTester::NewObject(
@@ -813,15 +1255,16 @@ static void TestInterpreterCall(TailCallMode tail_call_mode) {
// Check that receiver is passed properly.
{
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
- 0, 1);
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 1);
+
builder.LoadNamedProperty(builder.Parameter(0), name, slot_index)
- .StoreAccumulatorInRegister(Register(0))
- .Call(Register(0), builder.Parameter(0), 1, 0, tail_call_mode)
- .Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ .StoreAccumulatorInRegister(Register(0));
+ builder.Call(Register(0), builder.Parameter(0), 1, call_slot_index,
+ tail_call_mode);
+ builder.Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(handles.main_isolate(), bytecode_array, vector);
+ InterpreterTester tester(isolate, bytecode_array, vector);
auto callable = tester.GetCallable<Handle<Object>>();
Handle<Object> object = InterpreterTester::NewObject(
@@ -835,8 +1278,8 @@ static void TestInterpreterCall(TailCallMode tail_call_mode) {
// Check with two parameters (+ receiver).
{
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
- 0, 4);
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 4);
+
builder.LoadNamedProperty(builder.Parameter(0), name, slot_index)
.StoreAccumulatorInRegister(Register(0))
.LoadAccumulatorWithRegister(builder.Parameter(0))
@@ -844,12 +1287,15 @@ static void TestInterpreterCall(TailCallMode tail_call_mode) {
.LoadLiteral(Smi::FromInt(51))
.StoreAccumulatorInRegister(Register(2))
.LoadLiteral(Smi::FromInt(11))
- .StoreAccumulatorInRegister(Register(3))
- .Call(Register(0), Register(1), 3, 0, tail_call_mode)
- .Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ .StoreAccumulatorInRegister(Register(3));
+
+ builder.Call(Register(0), Register(1), 3, call_slot_index, tail_call_mode);
+
+ builder.Return();
+
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(handles.main_isolate(), bytecode_array, vector);
+ InterpreterTester tester(isolate, bytecode_array, vector);
auto callable = tester.GetCallable<Handle<Object>>();
Handle<Object> object = InterpreterTester::NewObject(
@@ -862,8 +1308,8 @@ static void TestInterpreterCall(TailCallMode tail_call_mode) {
// Check with 10 parameters (+ receiver).
{
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
- 0, 12);
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 12);
+
builder.LoadNamedProperty(builder.Parameter(0), name, slot_index)
.StoreAccumulatorInRegister(Register(0))
.LoadAccumulatorWithRegister(builder.Parameter(0))
@@ -887,12 +1333,15 @@ static void TestInterpreterCall(TailCallMode tail_call_mode) {
.LoadLiteral(factory->NewStringFromAsciiChecked("i"))
.StoreAccumulatorInRegister(Register(10))
.LoadLiteral(factory->NewStringFromAsciiChecked("j"))
- .StoreAccumulatorInRegister(Register(11))
- .Call(Register(0), Register(1), 11, 0, tail_call_mode)
- .Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ .StoreAccumulatorInRegister(Register(11));
+
+ builder.Call(Register(0), Register(1), 11, call_slot_index, tail_call_mode);
+
+ builder.Return();
+
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(handles.main_isolate(), bytecode_array, vector);
+ InterpreterTester tester(isolate, bytecode_array, vector);
auto callable = tester.GetCallable<Handle<Object>>();
Handle<Object> object = InterpreterTester::NewObject(
@@ -922,13 +1371,13 @@ static BytecodeArrayBuilder& SetRegister(BytecodeArrayBuilder& builder,
.LoadAccumulatorWithRegister(scratch);
}
-
static BytecodeArrayBuilder& IncrementRegister(BytecodeArrayBuilder& builder,
Register reg, int value,
- Register scratch) {
+ Register scratch,
+ int slot_index) {
return builder.StoreAccumulatorInRegister(scratch)
.LoadLiteral(Smi::FromInt(value))
- .BinaryOperation(Token::Value::ADD, reg)
+ .BinaryOperation(Token::Value::ADD, reg, slot_index)
.StoreAccumulatorInRegister(reg)
.LoadAccumulatorWithRegister(scratch);
}
@@ -936,8 +1385,18 @@ static BytecodeArrayBuilder& IncrementRegister(BytecodeArrayBuilder& builder,
TEST(InterpreterJumps) {
HandleAndZoneScope handles;
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 0,
- 0, 2);
+ Isolate* isolate = handles.main_isolate();
+ Zone zone(isolate->allocator());
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 0, 0, 2);
+
+ FeedbackVectorSpec feedback_spec(&zone);
+ FeedbackVectorSlot slot = feedback_spec.AddGeneralSlot();
+ FeedbackVectorSlot slot1 = feedback_spec.AddGeneralSlot();
+ FeedbackVectorSlot slot2 = feedback_spec.AddGeneralSlot();
+
+ Handle<i::TypeFeedbackVector> vector =
+ NewTypeFeedbackVector(isolate, &feedback_spec);
+
Register reg(0), scratch(1);
BytecodeLabel label[3];
@@ -945,16 +1404,18 @@ TEST(InterpreterJumps) {
.StoreAccumulatorInRegister(reg)
.Jump(&label[1]);
SetRegister(builder, reg, 1024, scratch).Bind(&label[0]);
- IncrementRegister(builder, reg, 1, scratch).Jump(&label[2]);
+ IncrementRegister(builder, reg, 1, scratch, vector->GetIndex(slot))
+ .Jump(&label[2]);
SetRegister(builder, reg, 2048, scratch).Bind(&label[1]);
- IncrementRegister(builder, reg, 2, scratch).Jump(&label[0]);
+ IncrementRegister(builder, reg, 2, scratch, vector->GetIndex(slot1))
+ .Jump(&label[0]);
SetRegister(builder, reg, 4096, scratch).Bind(&label[2]);
- IncrementRegister(builder, reg, 4, scratch)
+ IncrementRegister(builder, reg, 4, scratch, vector->GetIndex(slot2))
.LoadAccumulatorWithRegister(reg)
.Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
+ InterpreterTester tester(isolate, bytecode_array, vector);
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK_EQ(Smi::cast(*return_value)->value(), 7);
@@ -963,8 +1424,20 @@ TEST(InterpreterJumps) {
TEST(InterpreterConditionalJumps) {
HandleAndZoneScope handles;
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 0,
- 0, 2);
+ Isolate* isolate = handles.main_isolate();
+ Zone zone(isolate->allocator());
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 0, 0, 2);
+
+ FeedbackVectorSpec feedback_spec(&zone);
+ FeedbackVectorSlot slot = feedback_spec.AddGeneralSlot();
+ FeedbackVectorSlot slot1 = feedback_spec.AddGeneralSlot();
+ FeedbackVectorSlot slot2 = feedback_spec.AddGeneralSlot();
+ FeedbackVectorSlot slot3 = feedback_spec.AddGeneralSlot();
+ FeedbackVectorSlot slot4 = feedback_spec.AddGeneralSlot();
+
+ Handle<i::TypeFeedbackVector> vector =
+ NewTypeFeedbackVector(isolate, &feedback_spec);
+
Register reg(0), scratch(1);
BytecodeLabel label[2];
BytecodeLabel done, done1;
@@ -973,21 +1446,26 @@ TEST(InterpreterConditionalJumps) {
.StoreAccumulatorInRegister(reg)
.LoadFalse()
.JumpIfFalse(&label[0]);
- IncrementRegister(builder, reg, 1024, scratch)
+ IncrementRegister(builder, reg, 1024, scratch, vector->GetIndex(slot))
.Bind(&label[0])
.LoadTrue()
.JumpIfFalse(&done);
- IncrementRegister(builder, reg, 1, scratch).LoadTrue().JumpIfTrue(&label[1]);
- IncrementRegister(builder, reg, 2048, scratch).Bind(&label[1]);
- IncrementRegister(builder, reg, 2, scratch).LoadFalse().JumpIfTrue(&done1);
- IncrementRegister(builder, reg, 4, scratch)
+ IncrementRegister(builder, reg, 1, scratch, vector->GetIndex(slot1))
+ .LoadTrue()
+ .JumpIfTrue(&label[1]);
+ IncrementRegister(builder, reg, 2048, scratch, vector->GetIndex(slot2))
+ .Bind(&label[1]);
+ IncrementRegister(builder, reg, 2, scratch, vector->GetIndex(slot3))
+ .LoadFalse()
+ .JumpIfTrue(&done1);
+ IncrementRegister(builder, reg, 4, scratch, vector->GetIndex(slot4))
.LoadAccumulatorWithRegister(reg)
.Bind(&done)
.Bind(&done1)
.Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
+ InterpreterTester tester(isolate, bytecode_array, vector);
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK_EQ(Smi::cast(*return_value)->value(), 7);
@@ -996,8 +1474,20 @@ TEST(InterpreterConditionalJumps) {
TEST(InterpreterConditionalJumps2) {
// TODO(oth): Add tests for all conditional jumps near and far.
HandleAndZoneScope handles;
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 0,
- 0, 2);
+ Isolate* isolate = handles.main_isolate();
+ Zone zone(isolate->allocator());
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 0, 0, 2);
+
+ FeedbackVectorSpec feedback_spec(&zone);
+ FeedbackVectorSlot slot = feedback_spec.AddGeneralSlot();
+ FeedbackVectorSlot slot1 = feedback_spec.AddGeneralSlot();
+ FeedbackVectorSlot slot2 = feedback_spec.AddGeneralSlot();
+ FeedbackVectorSlot slot3 = feedback_spec.AddGeneralSlot();
+ FeedbackVectorSlot slot4 = feedback_spec.AddGeneralSlot();
+
+ Handle<i::TypeFeedbackVector> vector =
+ NewTypeFeedbackVector(isolate, &feedback_spec);
+
Register reg(0), scratch(1);
BytecodeLabel label[2];
BytecodeLabel done, done1;
@@ -1006,21 +1496,26 @@ TEST(InterpreterConditionalJumps2) {
.StoreAccumulatorInRegister(reg)
.LoadFalse()
.JumpIfFalse(&label[0]);
- IncrementRegister(builder, reg, 1024, scratch)
+ IncrementRegister(builder, reg, 1024, scratch, vector->GetIndex(slot))
.Bind(&label[0])
.LoadTrue()
.JumpIfFalse(&done);
- IncrementRegister(builder, reg, 1, scratch).LoadTrue().JumpIfTrue(&label[1]);
- IncrementRegister(builder, reg, 2048, scratch).Bind(&label[1]);
- IncrementRegister(builder, reg, 2, scratch).LoadFalse().JumpIfTrue(&done1);
- IncrementRegister(builder, reg, 4, scratch)
+ IncrementRegister(builder, reg, 1, scratch, vector->GetIndex(slot1))
+ .LoadTrue()
+ .JumpIfTrue(&label[1]);
+ IncrementRegister(builder, reg, 2048, scratch, vector->GetIndex(slot2))
+ .Bind(&label[1]);
+ IncrementRegister(builder, reg, 2, scratch, vector->GetIndex(slot3))
+ .LoadFalse()
+ .JumpIfTrue(&done1);
+ IncrementRegister(builder, reg, 4, scratch, vector->GetIndex(slot4))
.LoadAccumulatorWithRegister(reg)
.Bind(&done)
.Bind(&done1)
.Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
+ InterpreterTester tester(isolate, bytecode_array, vector);
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK_EQ(Smi::cast(*return_value)->value(), 7);
@@ -1028,25 +1523,35 @@ TEST(InterpreterConditionalJumps2) {
TEST(InterpreterJumpConstantWith16BitOperand) {
HandleAndZoneScope handles;
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
- 0, 257);
+ Isolate* isolate = handles.main_isolate();
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 257);
+
+ Zone zone(isolate->allocator());
+
+ FeedbackVectorSpec feedback_spec(&zone);
+ FeedbackVectorSlot slot = feedback_spec.AddGeneralSlot();
+ Handle<i::TypeFeedbackVector> vector =
+ NewTypeFeedbackVector(isolate, &feedback_spec);
+
Register reg(0), scratch(256);
- BytecodeLabel done;
+ BytecodeLabel done, fake;
builder.LoadLiteral(Smi::FromInt(0));
builder.StoreAccumulatorInRegister(reg);
// Consume all 8-bit operands
for (int i = 1; i <= 256; i++) {
- builder.LoadLiteral(handles.main_isolate()->factory()->NewNumber(i));
- builder.BinaryOperation(Token::Value::ADD, reg);
+ builder.LoadLiteral(isolate->factory()->NewNumber(i));
+ builder.BinaryOperation(Token::Value::ADD, reg, vector->GetIndex(slot));
builder.StoreAccumulatorInRegister(reg);
}
builder.Jump(&done);
// Emit more than 16-bit immediate operands worth of code to jump over.
+ builder.Bind(&fake);
for (int i = 0; i < 6600; i++) {
builder.LoadLiteral(Smi::FromInt(0)); // 1-byte
- builder.BinaryOperation(Token::Value::ADD, scratch); // 4-bytes
+ builder.BinaryOperation(Token::Value::ADD, scratch,
+ vector->GetIndex(slot)); // 6-bytes
builder.StoreAccumulatorInRegister(scratch); // 4-bytes
builder.MoveRegister(scratch, reg); // 6-bytes
}
@@ -1054,7 +1559,7 @@ TEST(InterpreterJumpConstantWith16BitOperand) {
builder.LoadAccumulatorWithRegister(reg);
builder.Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
BytecodeArrayIterator iterator(bytecode_array);
bool found_16bit_constant_jump = false;
@@ -1068,7 +1573,7 @@ TEST(InterpreterJumpConstantWith16BitOperand) {
}
CHECK(found_16bit_constant_jump);
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ InterpreterTester tester(isolate, bytecode_array, vector);
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK_EQ(Smi::cast(*return_value)->value(), 256.0 / 2 * (1 + 256));
@@ -1076,8 +1581,8 @@ TEST(InterpreterJumpConstantWith16BitOperand) {
TEST(InterpreterJumpWith32BitOperand) {
HandleAndZoneScope handles;
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
- 0, 1);
+ Isolate* isolate = handles.main_isolate();
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 1);
Register reg(0);
BytecodeLabel done;
@@ -1085,14 +1590,14 @@ TEST(InterpreterJumpWith32BitOperand) {
builder.StoreAccumulatorInRegister(reg);
// Consume all 16-bit constant pool entries
for (int i = 1; i <= 65536; i++) {
- builder.LoadLiteral(handles.main_isolate()->factory()->NewNumber(i));
+ builder.LoadLiteral(isolate->factory()->NewNumber(i));
}
builder.Jump(&done);
builder.LoadLiteral(Smi::FromInt(0));
builder.Bind(&done);
builder.Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
BytecodeArrayIterator iterator(bytecode_array);
bool found_32bit_jump = false;
@@ -1106,7 +1611,7 @@ TEST(InterpreterJumpWith32BitOperand) {
}
CHECK(found_32bit_jump);
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK_EQ(Smi::cast(*return_value)->value(), 65536.0);
@@ -1165,8 +1670,9 @@ TEST(InterpreterSmiComparisons) {
for (size_t i = 0; i < arraysize(inputs); i++) {
for (size_t j = 0; j < arraysize(inputs); j++) {
HandleAndZoneScope handles;
- BytecodeArrayBuilder builder(handles.main_isolate(),
- handles.main_zone(), 0, 0, 1);
+ Isolate* isolate = handles.main_isolate();
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 0, 0, 1);
+
Register r0(0);
builder.LoadLiteral(Smi::FromInt(inputs[i]))
.StoreAccumulatorInRegister(r0)
@@ -1174,8 +1680,8 @@ TEST(InterpreterSmiComparisons) {
.CompareOperation(comparison, r0)
.Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
+ InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->IsBoolean());
@@ -1200,9 +1706,10 @@ TEST(InterpreterHeapNumberComparisons) {
for (size_t i = 0; i < arraysize(inputs); i++) {
for (size_t j = 0; j < arraysize(inputs); j++) {
HandleAndZoneScope handles;
- i::Factory* factory = handles.main_isolate()->factory();
- BytecodeArrayBuilder builder(handles.main_isolate(),
- handles.main_zone(), 0, 0, 1);
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 0, 0, 1);
+
Register r0(0);
builder.LoadLiteral(factory->NewHeapNumber(inputs[i]))
.StoreAccumulatorInRegister(r0)
@@ -1210,8 +1717,8 @@ TEST(InterpreterHeapNumberComparisons) {
.CompareOperation(comparison, r0)
.Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
+ InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->IsBoolean());
@@ -1224,18 +1731,20 @@ TEST(InterpreterHeapNumberComparisons) {
TEST(InterpreterStringComparisons) {
+ HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
+
std::string inputs[] = {"A", "abc", "z", "", "Foo!", "Foo"};
for (size_t c = 0; c < arraysize(kComparisonTypes); c++) {
Token::Value comparison = kComparisonTypes[c];
for (size_t i = 0; i < arraysize(inputs); i++) {
for (size_t j = 0; j < arraysize(inputs); j++) {
+ CanonicalHandleScope canonical(isolate);
const char* lhs = inputs[i].c_str();
const char* rhs = inputs[j].c_str();
- HandleAndZoneScope handles;
- i::Factory* factory = handles.main_isolate()->factory();
- BytecodeArrayBuilder builder(handles.main_isolate(),
- handles.main_zone(), 0, 0, 1);
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 0, 0, 1);
Register r0(0);
builder.LoadLiteral(factory->NewStringFromAsciiChecked(lhs))
.StoreAccumulatorInRegister(r0)
@@ -1243,8 +1752,8 @@ TEST(InterpreterStringComparisons) {
.CompareOperation(comparison, r0)
.Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
+ InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->IsBoolean());
@@ -1263,7 +1772,7 @@ TEST(InterpreterMixedComparisons) {
// performed.
const char* inputs[] = {"-1.77", "-40.333", "0.01", "55.77e5", "2.01"};
- i::UnicodeCache unicode_cache;
+ UnicodeCache unicode_cache;
for (size_t c = 0; c < arraysize(kComparisonTypes); c++) {
Token::Value comparison = kComparisonTypes[c];
@@ -1273,13 +1782,14 @@ TEST(InterpreterMixedComparisons) {
const char* lhs_cstr = inputs[i];
const char* rhs_cstr = inputs[j];
double lhs = StringToDouble(&unicode_cache, lhs_cstr,
- i::ConversionFlags::NO_FLAGS);
+ ConversionFlags::NO_FLAGS);
double rhs = StringToDouble(&unicode_cache, rhs_cstr,
- i::ConversionFlags::NO_FLAGS);
+ ConversionFlags::NO_FLAGS);
HandleAndZoneScope handles;
- i::Factory* factory = handles.main_isolate()->factory();
- BytecodeArrayBuilder builder(handles.main_isolate(),
- handles.main_zone(), 0, 0, 1);
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 0, 0, 1);
+
Register r0(0);
if (pass == 0) {
// Comparison with HeapNumber on the lhs and String on the rhs
@@ -1297,8 +1807,9 @@ TEST(InterpreterMixedComparisons) {
.Return();
}
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ Handle<BytecodeArray> bytecode_array =
+ builder.ToBytecodeArray(isolate);
+ InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->IsBoolean());
@@ -1312,24 +1823,25 @@ TEST(InterpreterMixedComparisons) {
TEST(InterpreterStrictNotEqual) {
HandleAndZoneScope handles;
- i::Factory* factory = handles.main_isolate()->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
const char* code_snippet =
"function f(lhs, rhs) {\n"
" return lhs !== rhs;\n"
"}\n"
"f(0, 0);\n";
- InterpreterTester tester(handles.main_isolate(), code_snippet);
+ InterpreterTester tester(isolate, code_snippet);
auto callable = tester.GetCallable<Handle<Object>, Handle<Object>>();
// Test passing different types.
const char* inputs[] = {"-1.77", "-40.333", "0.01", "55.77e5", "2.01"};
- i::UnicodeCache unicode_cache;
+ UnicodeCache unicode_cache;
for (size_t i = 0; i < arraysize(inputs); i++) {
for (size_t j = 0; j < arraysize(inputs); j++) {
- double lhs = StringToDouble(&unicode_cache, inputs[i],
- i::ConversionFlags::NO_FLAGS);
- double rhs = StringToDouble(&unicode_cache, inputs[j],
- i::ConversionFlags::NO_FLAGS);
+ double lhs =
+ StringToDouble(&unicode_cache, inputs[i], ConversionFlags::NO_FLAGS);
+ double rhs =
+ StringToDouble(&unicode_cache, inputs[j], ConversionFlags::NO_FLAGS);
Handle<Object> lhs_obj = factory->NewNumber(lhs);
Handle<Object> rhs_obj = factory->NewStringFromAsciiChecked(inputs[j]);
@@ -1383,12 +1895,8 @@ TEST(InterpreterStrictNotEqual) {
TEST(InterpreterInstanceOf) {
HandleAndZoneScope handles;
- // TODO(4447): The new ES6 'instanceof' operator is fully desugared in the
- // parser and the Token::INSTANCEOF is not needed anymore. This test only
- // makes sense with --no-harmony-instanceof and can be removed once we
- // deprecate the ability to switch to old skool ES5 'instanceof' for good.
- FLAG_harmony_instanceof = false;
- i::Factory* factory = handles.main_isolate()->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
Handle<i::String> name = factory->NewStringFromAsciiChecked("cons");
Handle<i::JSFunction> func = factory->NewFunction(name);
Handle<i::JSObject> instance = factory->NewJSObject(func);
@@ -1396,8 +1904,8 @@ TEST(InterpreterInstanceOf) {
Handle<i::Object> cases[] = {Handle<i::Object>::cast(instance), other};
for (size_t i = 0; i < arraysize(cases); i++) {
bool expected_value = (i == 0);
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 0,
- 0, 1);
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 0, 0, 1);
+
Register r0(0);
builder.LoadLiteral(cases[i]);
builder.StoreAccumulatorInRegister(r0)
@@ -1405,8 +1913,8 @@ TEST(InterpreterInstanceOf) {
.CompareOperation(Token::Value::INSTANCEOF, r0)
.Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
+ InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->IsBoolean());
@@ -1417,7 +1925,8 @@ TEST(InterpreterInstanceOf) {
TEST(InterpreterTestIn) {
HandleAndZoneScope handles;
- i::Factory* factory = handles.main_isolate()->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
// Allocate an array
Handle<i::JSArray> array =
factory->NewJSArray(0, i::ElementsKind::FAST_SMI_ELEMENTS);
@@ -1425,8 +1934,8 @@ TEST(InterpreterTestIn) {
const char* properties[] = {"length", "fuzzle", "x", "0"};
for (size_t i = 0; i < arraysize(properties); i++) {
bool expected_value = (i == 0);
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 0,
- 0, 1);
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 0, 0, 1);
+
Register r0(0);
builder.LoadLiteral(factory->NewStringFromAsciiChecked(properties[i]))
.StoreAccumulatorInRegister(r0)
@@ -1434,8 +1943,8 @@ TEST(InterpreterTestIn) {
.CompareOperation(Token::Value::IN, r0)
.Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
+ InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->IsBoolean());
@@ -1446,18 +1955,19 @@ TEST(InterpreterTestIn) {
TEST(InterpreterUnaryNot) {
HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
for (size_t i = 1; i < 10; i++) {
bool expected_value = ((i & 1) == 1);
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 0,
- 0, 0);
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 0, 0, 0);
+
Register r0(0);
builder.LoadFalse();
for (size_t j = 0; j < i; j++) {
builder.LogicalNot();
}
builder.Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
+ InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->IsBoolean());
@@ -1492,7 +2002,8 @@ static void LoadAny(BytecodeArrayBuilder* builder,
TEST(InterpreterUnaryNotNonBoolean) {
HandleAndZoneScope handles;
- i::Factory* factory = handles.main_isolate()->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
std::pair<Handle<Object>, bool> object_type_tuples[] = {
std::make_pair(factory->undefined_value(), true),
@@ -1509,14 +2020,14 @@ TEST(InterpreterUnaryNotNonBoolean) {
};
for (size_t i = 0; i < arraysize(object_type_tuples); i++) {
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 0,
- 0, 0);
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 0, 0, 0);
+
Register r0(0);
LoadAny(&builder, factory, object_type_tuples[i].first);
builder.LogicalNot();
builder.Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
+ InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->IsBoolean());
@@ -1527,6 +2038,7 @@ TEST(InterpreterUnaryNotNonBoolean) {
TEST(InterpreterTypeof) {
HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
std::pair<const char*, const char*> typeof_vals[] = {
std::make_pair("return typeof undefined;", "undefined"),
@@ -1541,7 +2053,7 @@ TEST(InterpreterTypeof) {
for (size_t i = 0; i < arraysize(typeof_vals); i++) {
std::string source(InterpreterTester::SourceForBody(typeof_vals[i].first));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<v8::internal::String> return_value =
@@ -1554,18 +2066,19 @@ TEST(InterpreterTypeof) {
TEST(InterpreterCallRuntime) {
HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
+
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 2);
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
- 0, 2);
builder.LoadLiteral(Smi::FromInt(15))
.StoreAccumulatorInRegister(Register(0))
.LoadLiteral(Smi::FromInt(40))
.StoreAccumulatorInRegister(Register(1))
.CallRuntime(Runtime::kAdd, Register(0), 2)
.Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
@@ -1574,16 +2087,17 @@ TEST(InterpreterCallRuntime) {
TEST(InterpreterInvokeIntrinsic) {
HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
+
+ BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 2);
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
- 0, 2);
builder.LoadLiteral(Smi::FromInt(15))
.StoreAccumulatorInRegister(Register(0))
.CallRuntime(Runtime::kInlineIsArray, Register(0), 1)
.Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
@@ -1593,13 +2107,14 @@ TEST(InterpreterInvokeIntrinsic) {
TEST(InterpreterFunctionLiteral) {
HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
// Test calling a function literal.
std::string source(
"function " + InterpreterTester::function_name() + "(a) {\n"
" return (function(x){ return x + 2; })(a);\n"
"}");
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<Handle<Object>>();
Handle<i::Object> return_val = callable(
@@ -1610,8 +2125,8 @@ TEST(InterpreterFunctionLiteral) {
TEST(InterpreterRegExpLiterals) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
std::pair<const char*, Handle<Object>> literals[] = {
std::make_pair("return /abd/.exec('cccabbdd');\n",
@@ -1628,7 +2143,7 @@ TEST(InterpreterRegExpLiterals) {
for (size_t i = 0; i < arraysize(literals); i++) {
std::string source(InterpreterTester::SourceForBody(literals[i].first));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -1639,8 +2154,8 @@ TEST(InterpreterRegExpLiterals) {
TEST(InterpreterArrayLiterals) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
std::pair<const char*, Handle<Object>> literals[] = {
std::make_pair("return [][0];\n",
@@ -1659,7 +2174,7 @@ TEST(InterpreterArrayLiterals) {
for (size_t i = 0; i < arraysize(literals); i++) {
std::string source(InterpreterTester::SourceForBody(literals[i].first));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -1670,8 +2185,8 @@ TEST(InterpreterArrayLiterals) {
TEST(InterpreterObjectLiterals) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
std::pair<const char*, Handle<Object>> literals[] = {
std::make_pair("return { }.name;",
@@ -1712,7 +2227,7 @@ TEST(InterpreterObjectLiterals) {
for (size_t i = 0; i < arraysize(literals); i++) {
std::string source(InterpreterTester::SourceForBody(literals[i].first));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -1723,6 +2238,7 @@ TEST(InterpreterObjectLiterals) {
TEST(InterpreterConstruct) {
HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
std::string source(
"function counter() { this.count = 0; }\n"
@@ -1732,7 +2248,7 @@ TEST(InterpreterConstruct) {
" var c = new counter();\n"
" return c.count;\n"
"}");
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
@@ -1742,6 +2258,7 @@ TEST(InterpreterConstruct) {
TEST(InterpreterConstructWithArgument) {
HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
std::string source(
"function counter(arg0) { this.count = 17; this.x = arg0; }\n"
@@ -1751,7 +2268,7 @@ TEST(InterpreterConstructWithArgument) {
" var c = new counter(3);\n"
" return c.x;\n"
"}");
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
@@ -1761,6 +2278,7 @@ TEST(InterpreterConstructWithArgument) {
TEST(InterpreterConstructWithArguments) {
HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
std::string source(
"function counter(arg0, arg1) {\n"
@@ -1772,7 +2290,7 @@ TEST(InterpreterConstructWithArguments) {
" var c = new counter(3, 5);\n"
" return c.count + c.x + c.y;\n"
"}");
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
@@ -1782,7 +2300,7 @@ TEST(InterpreterConstructWithArguments) {
TEST(InterpreterContextVariables) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
+ Isolate* isolate = handles.main_isolate();
std::ostringstream unique_vars;
for (int i = 0; i < 250; i++) {
@@ -1811,7 +2329,7 @@ TEST(InterpreterContextVariables) {
for (size_t i = 0; i < arraysize(context_vars); i++) {
std::string source(
InterpreterTester::SourceForBody(context_vars[i].first.c_str()));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -1822,7 +2340,7 @@ TEST(InterpreterContextVariables) {
TEST(InterpreterContextParameters) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
+ Isolate* isolate = handles.main_isolate();
std::pair<const char*, Handle<Object>> context_params[] = {
std::make_pair("return (function() { return arg1; })();",
@@ -1836,7 +2354,7 @@ TEST(InterpreterContextParameters) {
for (size_t i = 0; i < arraysize(context_params); i++) {
std::string source = "function " + InterpreterTester::function_name() +
"(arg1, arg2, arg3) {" + context_params[i].first + "}";
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable =
tester.GetCallable<Handle<Object>, Handle<Object>, Handle<Object>>();
@@ -1851,7 +2369,7 @@ TEST(InterpreterContextParameters) {
TEST(InterpreterOuterContextVariables) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
+ Isolate* isolate = handles.main_isolate();
std::pair<const char*, Handle<Object>> context_vars[] = {
std::make_pair("return outerVar * innerArg;",
@@ -1873,7 +2391,7 @@ TEST(InterpreterOuterContextVariables) {
for (size_t i = 0; i < arraysize(context_vars); i++) {
std::string source = header + context_vars[i].first + footer;
- InterpreterTester tester(handles.main_isolate(), source.c_str(), "*");
+ InterpreterTester tester(isolate, source.c_str(), "*");
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -1884,8 +2402,8 @@ TEST(InterpreterOuterContextVariables) {
TEST(InterpreterComma) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
std::pair<const char*, Handle<Object>> literals[] = {
std::make_pair("var a; return 0, a;\n", factory->undefined_value()),
@@ -1901,7 +2419,7 @@ TEST(InterpreterComma) {
for (size_t i = 0; i < arraysize(literals); i++) {
std::string source(InterpreterTester::SourceForBody(literals[i].first));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -1912,8 +2430,8 @@ TEST(InterpreterComma) {
TEST(InterpreterLogicalOr) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
std::pair<const char*, Handle<Object>> literals[] = {
std::make_pair("var a, b; return a || b;\n", factory->undefined_value()),
@@ -1929,7 +2447,7 @@ TEST(InterpreterLogicalOr) {
for (size_t i = 0; i < arraysize(literals); i++) {
std::string source(InterpreterTester::SourceForBody(literals[i].first));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -1940,8 +2458,8 @@ TEST(InterpreterLogicalOr) {
TEST(InterpreterLogicalAnd) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
std::pair<const char*, Handle<Object>> literals[] = {
std::make_pair("var a, b = 10; return a && b;\n",
@@ -1962,7 +2480,7 @@ TEST(InterpreterLogicalAnd) {
for (size_t i = 0; i < arraysize(literals); i++) {
std::string source(InterpreterTester::SourceForBody(literals[i].first));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -1973,7 +2491,7 @@ TEST(InterpreterLogicalAnd) {
TEST(InterpreterTryCatch) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
+ Isolate* isolate = handles.main_isolate();
std::pair<const char*, Handle<Object>> catches[] = {
std::make_pair("var a = 1; try { a = 2 } catch(e) { a = 3 }; return a;",
@@ -1989,7 +2507,7 @@ TEST(InterpreterTryCatch) {
for (size_t i = 0; i < arraysize(catches); i++) {
std::string source(InterpreterTester::SourceForBody(catches[i].first));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -2000,8 +2518,8 @@ TEST(InterpreterTryCatch) {
TEST(InterpreterTryFinally) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
std::pair<const char*, Handle<Object>> finallies[] = {
std::make_pair(
@@ -2047,7 +2565,7 @@ TEST(InterpreterTryFinally) {
for (size_t i = 0; i < arraysize(finallies); i++) {
std::string source(InterpreterTester::SourceForBody(finallies[i].first));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
tester.GetCallable<>();
Handle<Object> wrapped = v8::Utils::OpenHandle(*CompileRun(try_wrapper));
CHECK(wrapped->SameValue(*finallies[i].second));
@@ -2057,8 +2575,8 @@ TEST(InterpreterTryFinally) {
TEST(InterpreterThrow) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
std::pair<const char*, Handle<Object>> throws[] = {
std::make_pair("throw undefined;\n",
@@ -2080,7 +2598,7 @@ TEST(InterpreterThrow) {
for (size_t i = 0; i < arraysize(throws); i++) {
std::string source(InterpreterTester::SourceForBody(throws[i].first));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
tester.GetCallable<>();
Handle<Object> thrown_obj = v8::Utils::OpenHandle(*CompileRun(try_wrapper));
CHECK(thrown_obj->SameValue(*throws[i].second));
@@ -2090,8 +2608,8 @@ TEST(InterpreterThrow) {
TEST(InterpreterCountOperators) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
std::pair<const char*, Handle<Object>> count_ops[] = {
std::make_pair("var a = 1; return ++a;",
@@ -2143,7 +2661,7 @@ TEST(InterpreterCountOperators) {
for (size_t i = 0; i < arraysize(count_ops); i++) {
std::string source(InterpreterTester::SourceForBody(count_ops[i].first));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -2154,7 +2672,7 @@ TEST(InterpreterCountOperators) {
TEST(InterpreterGlobalCountOperators) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
+ Isolate* isolate = handles.main_isolate();
std::pair<const char*, Handle<Object>> count_ops[] = {
std::make_pair("var global = 100;function f(){ return ++global; }",
@@ -2172,7 +2690,7 @@ TEST(InterpreterGlobalCountOperators) {
};
for (size_t i = 0; i < arraysize(count_ops); i++) {
- InterpreterTester tester(handles.main_isolate(), count_ops[i].first);
+ InterpreterTester tester(isolate, count_ops[i].first);
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -2183,8 +2701,8 @@ TEST(InterpreterGlobalCountOperators) {
TEST(InterpreterCompoundExpressions) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
std::pair<const char*, Handle<Object>> compound_expr[] = {
std::make_pair("var a = 1; a += 2; return a;",
@@ -2203,7 +2721,7 @@ TEST(InterpreterCompoundExpressions) {
for (size_t i = 0; i < arraysize(compound_expr); i++) {
std::string source(
InterpreterTester::SourceForBody(compound_expr[i].first));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -2214,7 +2732,7 @@ TEST(InterpreterCompoundExpressions) {
TEST(InterpreterGlobalCompoundExpressions) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
+ Isolate* isolate = handles.main_isolate();
std::pair<const char*, Handle<Object>> compound_expr[2] = {
std::make_pair("var global = 100;"
@@ -2226,7 +2744,7 @@ TEST(InterpreterGlobalCompoundExpressions) {
};
for (size_t i = 0; i < arraysize(compound_expr); i++) {
- InterpreterTester tester(handles.main_isolate(), compound_expr[i].first);
+ InterpreterTester tester(isolate, compound_expr[i].first);
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -2237,8 +2755,8 @@ TEST(InterpreterGlobalCompoundExpressions) {
TEST(InterpreterCreateArguments) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
std::pair<const char*, int> create_args[] = {
std::make_pair("function f() { return arguments[0]; }", 0),
@@ -2259,6 +2777,8 @@ TEST(InterpreterCreateArguments) {
std::make_pair("function f(a, b, c, d) {"
" 'use strict'; c = b; return arguments[2]; }",
2),
+ // Check arguments for duplicate parameters in sloppy mode.
+ std::make_pair("function f(a, a, b) { return arguments[1]; }", 1),
// check rest parameters
std::make_pair("function f(...restArray) { return restArray[0]; }", 0),
std::make_pair("function f(a, ...restArray) { return restArray[0]; }", 1),
@@ -2272,7 +2792,7 @@ TEST(InterpreterCreateArguments) {
// Test passing no arguments.
for (size_t i = 0; i < arraysize(create_args); i++) {
- InterpreterTester tester(handles.main_isolate(), create_args[i].first);
+ InterpreterTester tester(isolate, create_args[i].first);
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
CHECK(return_val.is_identical_to(factory->undefined_value()));
@@ -2280,7 +2800,7 @@ TEST(InterpreterCreateArguments) {
// Test passing one argument.
for (size_t i = 0; i < arraysize(create_args); i++) {
- InterpreterTester tester(handles.main_isolate(), create_args[i].first);
+ InterpreterTester tester(isolate, create_args[i].first);
auto callable = tester.GetCallable<Handle<Object>>();
Handle<Object> return_val =
callable(handle(Smi::FromInt(40), isolate)).ToHandleChecked();
@@ -2299,7 +2819,7 @@ TEST(InterpreterCreateArguments) {
handle(Smi::FromInt(80), isolate),
};
- InterpreterTester tester(handles.main_isolate(), create_args[i].first);
+ InterpreterTester tester(isolate, create_args[i].first);
auto callable =
tester.GetCallable<Handle<Object>, Handle<Object>, Handle<Object>>();
Handle<Object> return_val =
@@ -2311,7 +2831,7 @@ TEST(InterpreterCreateArguments) {
TEST(InterpreterConditional) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
+ Isolate* isolate = handles.main_isolate();
std::pair<const char*, Handle<Object>> conditional[] = {
std::make_pair("return true ? 2 : 3;",
@@ -2334,7 +2854,7 @@ TEST(InterpreterConditional) {
for (size_t i = 0; i < arraysize(conditional); i++) {
std::string source(InterpreterTester::SourceForBody(conditional[i].first));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -2345,8 +2865,8 @@ TEST(InterpreterConditional) {
TEST(InterpreterDelete) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
// Tests for delete for local variables that work both in strict
// and sloppy modes
@@ -2378,7 +2898,7 @@ TEST(InterpreterDelete) {
// Test delete in sloppy mode
for (size_t i = 0; i < arraysize(test_delete); i++) {
std::string source(InterpreterTester::SourceForBody(test_delete[i].first));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -2390,7 +2910,7 @@ TEST(InterpreterDelete) {
std::string strict_test =
"'use strict'; " + std::string(test_delete[i].first);
std::string source(InterpreterTester::SourceForBody(strict_test.c_str()));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -2401,8 +2921,8 @@ TEST(InterpreterDelete) {
TEST(InterpreterDeleteSloppyUnqualifiedIdentifier) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
// These tests generate a syntax error for strict mode. We don't
// test for it here.
@@ -2429,7 +2949,7 @@ TEST(InterpreterDeleteSloppyUnqualifiedIdentifier) {
for (size_t i = 0; i < arraysize(test_delete); i++) {
std::string source(InterpreterTester::SourceForBody(test_delete[i].first));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -2440,8 +2960,8 @@ TEST(InterpreterDeleteSloppyUnqualifiedIdentifier) {
TEST(InterpreterGlobalDelete) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
std::pair<const char*, Handle<Object>> test_global_delete[] = {
std::make_pair("var a = { x:10, y:'abc', z:30.2 };\n"
@@ -2495,8 +3015,7 @@ TEST(InterpreterGlobalDelete) {
factory->ToBoolean(true))};
for (size_t i = 0; i < arraysize(test_global_delete); i++) {
- InterpreterTester tester(handles.main_isolate(),
- test_global_delete[i].first);
+ InterpreterTester tester(isolate, test_global_delete[i].first);
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -2507,8 +3026,8 @@ TEST(InterpreterGlobalDelete) {
TEST(InterpreterBasicLoops) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
std::pair<const char*, Handle<Object>> loops[] = {
std::make_pair("var a = 10; var b = 1;\n"
@@ -2592,7 +3111,7 @@ TEST(InterpreterBasicLoops) {
for (size_t i = 0; i < arraysize(loops); i++) {
std::string source(InterpreterTester::SourceForBody(loops[i].first));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -2769,6 +3288,7 @@ TEST(InterpreterForIn) {
// used.
for (int pass = 0; pass < 2; pass++) {
HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
std::ostringstream wide_os;
if (pass == 1) {
for (int i = 0; i < 200; i++) {
@@ -2781,7 +3301,7 @@ TEST(InterpreterForIn) {
body_os << wide_os.str() << for_in_samples[i].first;
std::string body(body_os.str());
std::string function = InterpreterTester::SourceForBody(body.c_str());
- InterpreterTester tester(handles.main_isolate(), function.c_str());
+ InterpreterTester tester(isolate, function.c_str());
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
CHECK_EQ(Handle<Smi>::cast(return_val)->value(),
@@ -2793,8 +3313,8 @@ TEST(InterpreterForIn) {
TEST(InterpreterForOf) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
std::pair<const char*, Handle<Object>> for_of[] = {
{"function f() {\n"
@@ -2893,7 +3413,7 @@ TEST(InterpreterForOf) {
};
for (size_t i = 0; i < arraysize(for_of); i++) {
- InterpreterTester tester(handles.main_isolate(), for_of[i].first);
+ InterpreterTester tester(isolate, for_of[i].first);
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
CHECK(return_val->SameValue(*for_of[i].second));
@@ -2903,8 +3423,8 @@ TEST(InterpreterForOf) {
TEST(InterpreterSwitch) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
std::pair<const char*, Handle<Object>> switch_ops[] = {
std::make_pair("var a = 1;\n"
@@ -2972,7 +3492,7 @@ TEST(InterpreterSwitch) {
for (size_t i = 0; i < arraysize(switch_ops); i++) {
std::string source(InterpreterTester::SourceForBody(switch_ops[i].first));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -2983,8 +3503,8 @@ TEST(InterpreterSwitch) {
TEST(InterpreterSloppyThis) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
std::pair<const char*, Handle<Object>> sloppy_this[] = {
std::make_pair("var global_val = 100;\n"
@@ -3006,7 +3526,7 @@ TEST(InterpreterSloppyThis) {
};
for (size_t i = 0; i < arraysize(sloppy_this); i++) {
- InterpreterTester tester(handles.main_isolate(), sloppy_this[i].first);
+ InterpreterTester tester(isolate, sloppy_this[i].first);
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -3017,10 +3537,10 @@ TEST(InterpreterSloppyThis) {
TEST(InterpreterThisFunction) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
- InterpreterTester tester(handles.main_isolate(),
+ InterpreterTester tester(isolate,
"var f;\n f = function f() { return f.name; }");
auto callable = tester.GetCallable<>();
@@ -3031,13 +3551,12 @@ TEST(InterpreterThisFunction) {
TEST(InterpreterNewTarget) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
// TODO(rmcilroy): Add tests that we get the original constructor for
// superclass constructors once we have class support.
- InterpreterTester tester(handles.main_isolate(),
- "function f() { this.a = new.target; }");
+ InterpreterTester tester(isolate, "function f() { this.a = new.target; }");
auto callable = tester.GetCallable<>();
callable().ToHandleChecked();
@@ -3049,6 +3568,7 @@ TEST(InterpreterNewTarget) {
TEST(InterpreterAssignmentInExpressions) {
HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
std::pair<const char*, int> samples[] = {
{"function f() {\n"
@@ -3178,7 +3698,7 @@ TEST(InterpreterAssignmentInExpressions) {
const int arg_value = 40;
for (size_t i = 0; i < arraysize(samples); i++) {
- InterpreterTester tester(handles.main_isolate(), samples[i].first);
+ InterpreterTester tester(isolate, samples[i].first);
auto callable = tester.GetCallable<Handle<Object>>();
Handle<Object> return_val =
callable(handle(Smi::FromInt(arg_value), handles.main_isolate()))
@@ -3190,8 +3710,8 @@ TEST(InterpreterAssignmentInExpressions) {
TEST(InterpreterToName) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
std::pair<const char*, Handle<Object>> to_name_tests[] = {
{"var a = 'val'; var obj = {[a] : 10}; return obj.val;",
@@ -3222,7 +3742,7 @@ TEST(InterpreterToName) {
for (size_t i = 0; i < arraysize(to_name_tests); i++) {
std::string source(
InterpreterTester::SourceForBody(to_name_tests[i].first));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -3233,8 +3753,8 @@ TEST(InterpreterToName) {
TEST(TemporaryRegisterAllocation) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
std::pair<const char*, Handle<Object>> reg_tests[] = {
{"function add(a, b, c) {"
@@ -3256,7 +3776,7 @@ TEST(TemporaryRegisterAllocation) {
};
for (size_t i = 0; i < arraysize(reg_tests); i++) {
- InterpreterTester tester(handles.main_isolate(), reg_tests[i].first);
+ InterpreterTester tester(isolate, reg_tests[i].first);
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -3267,8 +3787,8 @@ TEST(TemporaryRegisterAllocation) {
TEST(InterpreterLookupSlot) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
// TODO(mythria): Add more tests when we have support for eval/with.
const char* function_prologue = "var f;"
@@ -3293,7 +3813,7 @@ TEST(InterpreterLookupSlot) {
std::string(lookup_slot[i].first) +
std::string(function_epilogue);
- InterpreterTester tester(handles.main_isolate(), script.c_str(), "t");
+ InterpreterTester tester(isolate, script.c_str(), "t");
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -3304,7 +3824,7 @@ TEST(InterpreterLookupSlot) {
TEST(InterpreterCallLookupSlot) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
+ Isolate* isolate = handles.main_isolate();
std::pair<const char*, Handle<Object>> call_lookup[] = {
{"g = function(){ return 2 }; eval(''); return g();",
@@ -3320,7 +3840,7 @@ TEST(InterpreterCallLookupSlot) {
for (size_t i = 0; i < arraysize(call_lookup); i++) {
std::string source(InterpreterTester::SourceForBody(call_lookup[i].first));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -3331,8 +3851,8 @@ TEST(InterpreterCallLookupSlot) {
TEST(InterpreterLookupSlotWide) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
const char* function_prologue =
"var f;"
@@ -3364,7 +3884,7 @@ TEST(InterpreterLookupSlotWide) {
std::string script = std::string(function_prologue) + lookup_slot[i].first +
std::string(function_epilogue);
- InterpreterTester tester(handles.main_isolate(), script.c_str(), "t");
+ InterpreterTester tester(isolate, script.c_str(), "t");
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -3375,8 +3895,8 @@ TEST(InterpreterLookupSlotWide) {
TEST(InterpreterDeleteLookupSlot) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
// TODO(mythria): Add more tests when we have support for eval/with.
const char* function_prologue = "var f;"
@@ -3405,7 +3925,7 @@ TEST(InterpreterDeleteLookupSlot) {
std::string(delete_lookup_slot[i].first) +
std::string(function_epilogue);
- InterpreterTester tester(handles.main_isolate(), script.c_str(), "t");
+ InterpreterTester tester(isolate, script.c_str(), "t");
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -3416,10 +3936,10 @@ TEST(InterpreterDeleteLookupSlot) {
TEST(JumpWithConstantsAndWideConstants) {
HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
const int kStep = 13;
for (int constants = 11; constants < 256 + 3 * kStep; constants += kStep) {
- auto isolate = handles.main_isolate();
- auto factory = isolate->factory();
std::ostringstream filler_os;
// Generate a string that consumes constant pool entries and
// spread out branch distances in script below.
@@ -3440,7 +3960,7 @@ TEST(JumpWithConstantsAndWideConstants) {
script_os << "}\n";
std::string script(script_os.str());
for (int a = 0; a < 3; a++) {
- InterpreterTester tester(handles.main_isolate(), script.c_str());
+ InterpreterTester tester(isolate, script.c_str());
auto callable = tester.GetCallable<Handle<Object>>();
Handle<Object> argument = factory->NewNumberFromInt(a);
Handle<Object> return_val = callable(argument).ToHandleChecked();
@@ -3453,8 +3973,8 @@ TEST(JumpWithConstantsAndWideConstants) {
TEST(InterpreterEval) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
std::pair<const char*, Handle<Object>> eval[] = {
{"return eval('1;');", handle(Smi::FromInt(1), isolate)},
@@ -3492,7 +4012,7 @@ TEST(InterpreterEval) {
for (size_t i = 0; i < arraysize(eval); i++) {
std::string source(InterpreterTester::SourceForBody(eval[i].first));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*eval[i].second));
@@ -3502,7 +4022,7 @@ TEST(InterpreterEval) {
TEST(InterpreterEvalParams) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
+ Isolate* isolate = handles.main_isolate();
std::pair<const char*, Handle<Object>> eval_params[] = {
{"var x = 10; return eval('x + p1;');",
@@ -3518,7 +4038,7 @@ TEST(InterpreterEvalParams) {
for (size_t i = 0; i < arraysize(eval_params); i++) {
std::string source = "function " + InterpreterTester::function_name() +
"(p1) {" + eval_params[i].first + "}";
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<Handle<Object>>();
Handle<i::Object> return_value =
@@ -3530,8 +4050,8 @@ TEST(InterpreterEvalParams) {
TEST(InterpreterEvalGlobal) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
std::pair<const char*, Handle<Object>> eval_global[] = {
{"function add_global() { eval('function test() { z = 33; }; test()'); };"
@@ -3546,8 +4066,7 @@ TEST(InterpreterEvalGlobal) {
};
for (size_t i = 0; i < arraysize(eval_global); i++) {
- InterpreterTester tester(handles.main_isolate(), eval_global[i].first,
- "test");
+ InterpreterTester tester(isolate, eval_global[i].first, "test");
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -3558,8 +4077,8 @@ TEST(InterpreterEvalGlobal) {
TEST(InterpreterEvalVariableDecl) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
std::pair<const char*, Handle<Object>> eval_global[] = {
{"function f() { eval('var x = 10; x++;'); return x; }",
@@ -3597,7 +4116,7 @@ TEST(InterpreterEvalVariableDecl) {
};
for (size_t i = 0; i < arraysize(eval_global); i++) {
- InterpreterTester tester(handles.main_isolate(), eval_global[i].first, "*");
+ InterpreterTester tester(isolate, eval_global[i].first, "*");
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -3608,7 +4127,7 @@ TEST(InterpreterEvalVariableDecl) {
TEST(InterpreterEvalFunctionDecl) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
+ Isolate* isolate = handles.main_isolate();
std::pair<const char*, Handle<Object>> eval_func_decl[] = {
{"function f() {\n"
@@ -3621,8 +4140,7 @@ TEST(InterpreterEvalFunctionDecl) {
};
for (size_t i = 0; i < arraysize(eval_func_decl); i++) {
- InterpreterTester tester(handles.main_isolate(), eval_func_decl[i].first,
- "*");
+ InterpreterTester tester(isolate, eval_func_decl[i].first, "*");
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -3632,7 +4150,7 @@ TEST(InterpreterEvalFunctionDecl) {
TEST(InterpreterWideRegisterArithmetic) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
+ Isolate* isolate = handles.main_isolate();
static const size_t kMaxRegisterForTest = 150;
std::ostringstream os;
@@ -3662,7 +4180,7 @@ TEST(InterpreterWideRegisterArithmetic) {
os << "}\n";
std::string source = os.str();
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<Handle<Object>>();
for (size_t i = 0; i < kMaxRegisterForTest; i++) {
Handle<Object> arg = handle(Smi::FromInt(static_cast<int>(i)), isolate);
@@ -3676,6 +4194,9 @@ TEST(InterpreterCallWideRegisters) {
static const int kLength = 512;
static const int kStartChar = 65;
+ HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
+
for (int pass = 0; pass < 3; pass += 1) {
std::ostringstream os;
for (int i = 0; i < pass * 97; i += 1) {
@@ -3688,8 +4209,7 @@ TEST(InterpreterCallWideRegisters) {
}
os << ");";
std::string source = InterpreterTester::SourceForBody(os.str().c_str());
- HandleAndZoneScope handles;
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable();
Handle<Object> return_val = callable().ToHandleChecked();
Handle<String> return_string = Handle<String>::cast(return_val);
@@ -3701,10 +4221,10 @@ TEST(InterpreterCallWideRegisters) {
}
TEST(InterpreterWideParametersPickOne) {
+ HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
static const int kParameterCount = 130;
for (int parameter = 0; parameter < 10; parameter++) {
- HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
std::ostringstream os;
os << "function " << InterpreterTester::function_name() << "(arg) {\n";
os << " function selector(i";
@@ -3723,7 +4243,7 @@ TEST(InterpreterWideParametersPickOne) {
os << "}\n";
std::string source = os.str();
- InterpreterTester tester(handles.main_isolate(), source.c_str(), "*");
+ InterpreterTester tester(isolate, source.c_str(), "*");
auto callable = tester.GetCallable<Handle<Object>>();
Handle<Object> arg = handle(Smi::FromInt(0xaa55), isolate);
Handle<Object> return_value = callable(arg).ToHandleChecked();
@@ -3736,7 +4256,7 @@ TEST(InterpreterWideParametersSummation) {
static int kParameterCount = 200;
static int kBaseValue = 17000;
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
+ Isolate* isolate = handles.main_isolate();
std::ostringstream os;
os << "function " << InterpreterTester::function_name() << "(arg) {\n";
os << " function summation(i";
@@ -3762,7 +4282,7 @@ TEST(InterpreterWideParametersSummation) {
os << "}\n";
std::string source = os.str();
- InterpreterTester tester(handles.main_isolate(), source.c_str(), "*");
+ InterpreterTester tester(isolate, source.c_str(), "*");
auto callable = tester.GetCallable<Handle<Object>>();
for (int i = 0; i < kParameterCount; i++) {
Handle<Object> arg = handle(Smi::FromInt(i), isolate);
@@ -3778,7 +4298,7 @@ TEST(InterpreterDoExpression) {
FLAG_harmony_do_expressions = true;
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
+ Isolate* isolate = handles.main_isolate();
Factory* factory = isolate->factory();
std::pair<const char*, Handle<Object>> do_expr[] = {
@@ -3794,7 +4314,7 @@ TEST(InterpreterDoExpression) {
for (size_t i = 0; i < arraysize(do_expr); i++) {
std::string source(InterpreterTester::SourceForBody(do_expr[i].first));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -3806,7 +4326,7 @@ TEST(InterpreterDoExpression) {
TEST(InterpreterWithStatement) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
+ Isolate* isolate = handles.main_isolate();
std::pair<const char*, Handle<Object>> with_stmt[] = {
{"with({x:42}) return x;", handle(Smi::FromInt(42), isolate)},
@@ -3829,7 +4349,7 @@ TEST(InterpreterWithStatement) {
for (size_t i = 0; i < arraysize(with_stmt); i++) {
std::string source(InterpreterTester::SourceForBody(with_stmt[i].first));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -3839,7 +4359,7 @@ TEST(InterpreterWithStatement) {
TEST(InterpreterClassLiterals) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
+ Isolate* isolate = handles.main_isolate();
std::pair<const char*, Handle<Object>> examples[] = {
{"class C {\n"
" constructor(x) { this.x_ = x; }\n"
@@ -3890,7 +4410,7 @@ TEST(InterpreterClassLiterals) {
for (size_t i = 0; i < arraysize(examples); ++i) {
std::string source(InterpreterTester::SourceForBody(examples[i].first));
- InterpreterTester tester(handles.main_isolate(), source.c_str(), "*");
+ InterpreterTester tester(isolate, source.c_str(), "*");
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -3900,7 +4420,7 @@ TEST(InterpreterClassLiterals) {
TEST(InterpreterClassAndSuperClass) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
+ Isolate* isolate = handles.main_isolate();
std::pair<const char*, Handle<Object>> examples[] = {
{"class A {\n"
" constructor(x) { this.x_ = x; }\n"
@@ -3950,7 +4470,7 @@ TEST(InterpreterClassAndSuperClass) {
for (size_t i = 0; i < arraysize(examples); ++i) {
std::string source(InterpreterTester::SourceForBody(examples[i].first));
- InterpreterTester tester(handles.main_isolate(), source.c_str(), "*");
+ InterpreterTester tester(isolate, source.c_str(), "*");
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*examples[i].second));
@@ -3959,8 +4479,8 @@ TEST(InterpreterClassAndSuperClass) {
TEST(InterpreterConstDeclaration) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
std::pair<const char*, Handle<Object>> const_decl[] = {
{"const x = 3; return x;", handle(Smi::FromInt(3), isolate)},
@@ -3992,7 +4512,7 @@ TEST(InterpreterConstDeclaration) {
// Tests for sloppy mode.
for (size_t i = 0; i < arraysize(const_decl); i++) {
std::string source(InterpreterTester::SourceForBody(const_decl[i].first));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -4004,7 +4524,7 @@ TEST(InterpreterConstDeclaration) {
std::string strict_body =
"'use strict'; " + std::string(const_decl[i].first);
std::string source(InterpreterTester::SourceForBody(strict_body.c_str()));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -4014,8 +4534,8 @@ TEST(InterpreterConstDeclaration) {
TEST(InterpreterConstDeclarationLookupSlots) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
- i::Factory* factory = isolate->factory();
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
std::pair<const char*, Handle<Object>> const_decl[] = {
{"const x = 3; function f1() {return x;}; return x;",
@@ -4031,7 +4551,7 @@ TEST(InterpreterConstDeclarationLookupSlots) {
// Tests for sloppy mode.
for (size_t i = 0; i < arraysize(const_decl); i++) {
std::string source(InterpreterTester::SourceForBody(const_decl[i].first));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -4043,7 +4563,7 @@ TEST(InterpreterConstDeclarationLookupSlots) {
std::string strict_body =
"'use strict'; " + std::string(const_decl[i].first);
std::string source(InterpreterTester::SourceForBody(strict_body.c_str()));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -4053,7 +4573,7 @@ TEST(InterpreterConstDeclarationLookupSlots) {
TEST(InterpreterConstInLookupContextChain) {
HandleAndZoneScope handles;
- i::Isolate* isolate = handles.main_isolate();
+ Isolate* isolate = handles.main_isolate();
const char* prologue =
"function OuterMost() {\n"
@@ -4088,7 +4608,7 @@ TEST(InterpreterConstInLookupContextChain) {
std::string script = std::string(prologue) +
std::string(const_decl[i].first) +
std::string(epilogue);
- InterpreterTester tester(handles.main_isolate(), script.c_str(), "*");
+ InterpreterTester tester(isolate, script.c_str(), "*");
auto callable = tester.GetCallable<>();
Handle<i::Object> return_value = callable().ToHandleChecked();
@@ -4098,6 +4618,7 @@ TEST(InterpreterConstInLookupContextChain) {
TEST(InterpreterIllegalConstDeclaration) {
HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
std::pair<const char*, const char*> const_decl[] = {
{"const x = x = 10 + 3; return x;",
@@ -4117,7 +4638,7 @@ TEST(InterpreterIllegalConstDeclaration) {
// Tests for sloppy mode.
for (size_t i = 0; i < arraysize(const_decl); i++) {
std::string source(InterpreterTester::SourceForBody(const_decl[i].first));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
v8::Local<v8::String> message = tester.CheckThrowsReturnMessage()->Get();
v8::Local<v8::String> expected_string = v8_str(const_decl[i].second);
CHECK(
@@ -4130,7 +4651,7 @@ TEST(InterpreterIllegalConstDeclaration) {
std::string strict_body =
"'use strict'; " + std::string(const_decl[i].first);
std::string source(InterpreterTester::SourceForBody(strict_body.c_str()));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
+ InterpreterTester tester(isolate, source.c_str());
v8::Local<v8::String> message = tester.CheckThrowsReturnMessage()->Get();
v8::Local<v8::String> expected_string = v8_str(const_decl[i].second);
CHECK(
@@ -4139,6 +4660,30 @@ TEST(InterpreterIllegalConstDeclaration) {
}
}
+TEST(InterpreterGenerators) {
+ HandleAndZoneScope handles;
+ Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
+
+ std::pair<const char*, Handle<Object>> tests[] = {
+ {"function* f() { }; return f().next().value",
+ factory->undefined_value()},
+ {"function* f() { yield 42 }; return f().next().value",
+ factory->NewNumberFromInt(42)},
+ {"function* f() { for (let x of [42]) yield x}; return f().next().value",
+ factory->NewNumberFromInt(42)},
+ };
+
+ for (size_t i = 0; i < arraysize(tests); i++) {
+ std::string source(InterpreterTester::SourceForBody(tests[i].first));
+ InterpreterTester tester(isolate, source.c_str());
+ auto callable = tester.GetCallable<>();
+
+ Handle<i::Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*tests[i].second));
+ }
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/interpreter/test-source-positions.cc b/deps/v8/test/cctest/interpreter/test-source-positions.cc
new file mode 100644
index 0000000000..3161f92db9
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/test-source-positions.cc
@@ -0,0 +1,250 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/compiler/pipeline.h"
+#include "src/handles.h"
+#include "src/interpreter/bytecode-generator.h"
+#include "src/interpreter/interpreter.h"
+#include "src/isolate.h"
+#include "src/parsing/parser.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/interpreter/source-position-matcher.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+// Flags enabling optimizations that change generated bytecode array.
+// Format is <command-line flag> <flag name> <bit index>
+#define OPTIMIZATION_FLAGS(V) \
+ V(FLAG_ignition_reo, kUseReo, 0) \
+ V(FLAG_ignition_peephole, kUsePeephole, 1) \
+ V(FLAG_ignition_filter_expression_positions, kUseFilterExpressionPositions, \
+ 2) \
+ V(FLAG_ignition_deadcode, kUseDeadCode, 3)
+
+#define DECLARE_BIT(_, Name, BitIndex) static const int Name = 1 << BitIndex;
+OPTIMIZATION_FLAGS(DECLARE_BIT)
+#undef DECLARE_BIT
+
+// Test cases source positions are checked for. Please ensure all
+// combinations of flags are present here. This is done manually
+// because it provides easier to comprehend failure case for humans.
+#define TEST_CASES(V) \
+ V(UsingReo, kUseReo) \
+ V(UsingPeephole, kUsePeephole) \
+ V(UsingDeadCode, kUseDeadCode) \
+ V(UsingFilterExpressionPositions, kUseFilterExpressionPositions) \
+ V(UsingReoAndPeephole, kUseReo | kUsePeephole) \
+ V(UsingReoAndFilterExpressionPositions, \
+ kUseReo | kUseFilterExpressionPositions) \
+ V(UsingReoAndDeadCode, kUseReo | kUseDeadCode) \
+ V(UsingPeepholeAndFilterExpressionPositions, \
+ kUsePeephole | kUseFilterExpressionPositions) \
+ V(UsingPeepholeAndDeadCode, kUsePeephole | kUseDeadCode) \
+ V(UsingFilterExpressionPositionsAndDeadCode, \
+ kUseFilterExpressionPositions | kUseDeadCode) \
+ V(UsingAllOptimizations, \
+ kUseReo | kUsePeephole | kUseFilterExpressionPositions | kUseDeadCode)
+
+struct TestCaseData {
+ TestCaseData(const char* const script,
+ const char* const declaration_parameters = "",
+ const char* const arguments = "")
+ : script_(script),
+ declaration_parameters_(declaration_parameters),
+ arguments_(arguments) {}
+
+ const char* script() const { return script_; }
+ const char* declaration_parameters() const { return declaration_parameters_; }
+ const char* arguments() const { return arguments_; }
+
+ private:
+ TestCaseData();
+
+ const char* const script_;
+ const char* const declaration_parameters_;
+ const char* const arguments_;
+};
+
+static const TestCaseData kTestCaseData[] = {
+ {"var x = (y = 3) + (x = y); return x + y;"},
+ {"var x = 55;\n"
+ "var y = x + (x = 1) + (x = 2) + (x = 3);\n"
+ "return y;"},
+ {"var x = 10; return x >>> 3;\n"},
+ {"var x = 0; return x || (1, 2, 3);\n"},
+ {"return a || (a, b, a, b, c = 5, 3);\n"},
+ {"var a = 3; var b = 4; a = b; b = a; a = b; return a;\n"},
+ {"var a = 1; return [[a, 2], [a + 2]];\n"},
+ {"var a = 1; if (a || a < 0) { return 1; }\n"},
+ {"var b;"
+ "b = a.name;"
+ "b = a.name;"
+ "a.name = a;"
+ "b = a.name;"
+ "a.name = a;"
+ "return b;"},
+ {"var sum = 0;\n"
+ "outer: {\n"
+ " for (var x = 0; x < 10; ++x) {\n"
+ " for (var y = 0; y < 3; ++y) {\n"
+ " ++sum;\n"
+ " if (x + y == 12) { break outer; }\n"
+ " }\n"
+ " }\n"
+ "}\n"
+ "return sum;\n"},
+ {"var a = 1;"
+ "switch (a) {"
+ " case 1: return a * a + 1;"
+ " case 1: break;"
+ " case 2: return (a = 3) * a + (a = 4);"
+ " case 3:"
+ "}"
+ "return a;"},
+ {"for (var p of [0, 1, 2]) {}"},
+ {"var x = { 'a': 1, 'b': 2 };"
+ "for (x['a'] of [1,2,3]) { return x['a']; }"},
+ {"while (x == 4) {\n"
+ " var y = x + 1;\n"
+ " if (y == 2) break;\n"
+ " for (z['a'] of [0]) {\n"
+ " x += (x *= 3) + y;"
+ " }\n"
+ "}\n"},
+ {"function g(a, b) { return a.func(b + b, b); }\n"
+ "g(new (function Obj() { this.func = function() { return; }})(), 1)\n"},
+ {"return some_global[name];", "name", "'a'"}};
+
+class OptimizedBytecodeSourcePositionTester final {
+ public:
+ explicit OptimizedBytecodeSourcePositionTester(Isolate* isolate)
+ : isolate_(isolate) {
+ SaveOptimizationFlags();
+ saved_flag_ignition_ = FLAG_ignition;
+ FLAG_ignition = true;
+ saved_flag_always_opt_ = FLAG_always_opt;
+ FLAG_always_opt = false;
+ }
+
+ ~OptimizedBytecodeSourcePositionTester() {
+ RestoreOptimizationFlags();
+ FLAG_ignition = saved_flag_ignition_;
+ FLAG_always_opt = saved_flag_always_opt_;
+ }
+
+ bool SourcePositionsMatch(int optimization_bitmap, const char* function_body,
+ const char* function_decl_params,
+ const char* function_args);
+
+ private:
+ Handle<BytecodeArray> MakeBytecode(int optimization_bitmap,
+ const char* function_body,
+ const char* function_decl_params,
+ const char* function_args);
+ static std::string MakeScript(const char* function_body,
+ const char* function_decl_params,
+ const char* function_args);
+
+ void SetOptimizationFlags(int optimization_bitmap);
+ void SaveOptimizationFlags();
+ void RestoreOptimizationFlags();
+
+ Isolate* isolate() const { return isolate_; }
+
+ Isolate* isolate_;
+ int saved_optimization_bitmap_;
+ bool saved_flag_ignition_;
+ bool saved_flag_always_opt_;
+};
+
+// static
+std::string OptimizedBytecodeSourcePositionTester::MakeScript(
+ const char* function_body, const char* function_decl_params,
+ const char* function_args) {
+ std::ostringstream os;
+ os << "function test_function"
+ << "(" << function_decl_params << ") {";
+ os << function_body;
+ os << "}";
+ os << "test_function(" << function_args << ");";
+ return os.str();
+}
+
+Handle<BytecodeArray> OptimizedBytecodeSourcePositionTester::MakeBytecode(
+ int optimization_bitmap, const char* function_body,
+ const char* function_decl_params, const char* function_args) {
+ std::string script =
+ MakeScript(function_body, function_decl_params, function_args);
+ SetOptimizationFlags(optimization_bitmap);
+ CompileRun(script.c_str());
+
+ Local<Function> api_function = Local<Function>::Cast(
+ CcTest::global()
+ ->Get(CcTest::isolate()->GetCurrentContext(), v8_str("test_function"))
+ .ToLocalChecked());
+ Handle<JSFunction> function =
+ Handle<JSFunction>::cast(v8::Utils::OpenHandle(*api_function));
+ return handle(function->shared()->bytecode_array());
+}
+
+void OptimizedBytecodeSourcePositionTester::SetOptimizationFlags(
+ int optimization_bitmap) {
+#define SET_FLAG(V8Flag, BitName, _) \
+ V8Flag = (optimization_bitmap & BitName) ? true : false;
+ OPTIMIZATION_FLAGS(SET_FLAG)
+#undef SET_FLAG
+}
+
+void OptimizedBytecodeSourcePositionTester::SaveOptimizationFlags() {
+ saved_optimization_bitmap_ = 0;
+#define SAVE_FLAG(V8Flag, BitName, _) \
+ if (V8Flag) saved_optimization_bitmap_ |= BitName;
+#undef SET_FLAG
+}
+
+void OptimizedBytecodeSourcePositionTester::RestoreOptimizationFlags() {
+ SetOptimizationFlags(saved_optimization_bitmap_);
+}
+
+bool OptimizedBytecodeSourcePositionTester::SourcePositionsMatch(
+ int optimization_bitmap, const char* function_body,
+ const char* function_decl_params, const char* function_args) {
+ Handle<BytecodeArray> unoptimized_bytecode =
+ MakeBytecode(0, function_body, function_decl_params, function_args);
+ Handle<BytecodeArray> optimized_bytecode = MakeBytecode(
+ optimization_bitmap, function_body, function_decl_params, function_args);
+ SourcePositionMatcher matcher;
+ if (!matcher.Match(unoptimized_bytecode, optimized_bytecode)) {
+ return false;
+ }
+ return true;
+}
+
+void TestSourcePositionsEquivalent(int optimization_bitmap) {
+ HandleAndZoneScope handles;
+ // Ensure handler table is generated.
+ handles.main_isolate()->interpreter()->Initialize();
+
+ OptimizedBytecodeSourcePositionTester tester(handles.main_isolate());
+ for (auto test_case_data : kTestCaseData) {
+ CHECK(tester.SourcePositionsMatch(
+ optimization_bitmap, test_case_data.script(),
+ test_case_data.declaration_parameters(), test_case_data.arguments()));
+ }
+}
+
+#define MAKE_TEST(Name, Bitmap) \
+ TEST(TestSourcePositionsEquivalent##Name) { \
+ TestSourcePositionsEquivalent(Bitmap); \
+ }
+TEST_CASES(MAKE_TEST)
+#undef MAKE_TEST
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/libplatform/test-tracing.cc b/deps/v8/test/cctest/libplatform/test-tracing.cc
new file mode 100644
index 0000000000..2e15d6af9e
--- /dev/null
+++ b/deps/v8/test/cctest/libplatform/test-tracing.cc
@@ -0,0 +1,312 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include <limits>
+
+#include "include/libplatform/v8-tracing.h"
+#include "src/tracing/trace-event.h"
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace platform {
+namespace tracing {
+
+TEST(TestTraceConfig) {
+ LocalContext env;
+ TraceConfig* trace_config = new TraceConfig();
+ trace_config->EnableSampling();
+ trace_config->AddIncludedCategory("v8");
+ trace_config->AddIncludedCategory(TRACE_DISABLED_BY_DEFAULT("v8.runtime"));
+ trace_config->AddExcludedCategory("v8.cpu_profile");
+
+ CHECK_EQ(trace_config->IsSamplingEnabled(), true);
+ CHECK_EQ(trace_config->IsSystraceEnabled(), false);
+ CHECK_EQ(trace_config->IsArgumentFilterEnabled(), false);
+ CHECK_EQ(trace_config->IsCategoryGroupEnabled("v8"), true);
+ CHECK_EQ(trace_config->IsCategoryGroupEnabled("v8.cpu_profile"), false);
+ CHECK_EQ(trace_config->IsCategoryGroupEnabled("v8.cpu_profile.hires"), false);
+ CHECK_EQ(trace_config->IsCategoryGroupEnabled(
+ TRACE_DISABLED_BY_DEFAULT("v8.runtime")),
+ true);
+ delete trace_config;
+}
+
+TEST(TestTraceObject) {
+ TraceObject trace_object;
+ uint8_t category_enabled_flag = 41;
+ trace_object.Initialize('X', &category_enabled_flag, "Test.Trace",
+ "Test.Scope", 42, 123, 0, NULL, NULL, NULL, 0);
+ CHECK_EQ('X', trace_object.phase());
+ CHECK_EQ(category_enabled_flag, *trace_object.category_enabled_flag());
+ CHECK_EQ(std::string("Test.Trace"), std::string(trace_object.name()));
+ CHECK_EQ(std::string("Test.Scope"), std::string(trace_object.scope()));
+ CHECK_EQ(0, trace_object.duration());
+ CHECK_EQ(0, trace_object.cpu_duration());
+}
+
+class MockTraceWriter : public TraceWriter {
+ public:
+ void AppendTraceEvent(TraceObject* trace_event) override {
+ events_.push_back(trace_event->name());
+ }
+
+ void Flush() override {}
+
+ std::vector<std::string> events() { return events_; }
+
+ private:
+ std::vector<std::string> events_;
+};
+
+TEST(TestTraceBufferRingBuffer) {
+ // We should be able to add kChunkSize * 2 + 1 trace events.
+ const int HANDLES_COUNT = TraceBufferChunk::kChunkSize * 2 + 1;
+ MockTraceWriter* writer = new MockTraceWriter();
+ TraceBuffer* ring_buffer =
+ TraceBuffer::CreateTraceBufferRingBuffer(2, writer);
+ std::string names[HANDLES_COUNT];
+ for (int i = 0; i < HANDLES_COUNT; ++i) {
+ names[i] = "Test.EventNo" + std::to_string(i);
+ }
+
+ std::vector<uint64_t> handles(HANDLES_COUNT);
+ uint8_t category_enabled_flag = 41;
+ for (size_t i = 0; i < handles.size(); ++i) {
+ TraceObject* trace_object = ring_buffer->AddTraceEvent(&handles[i]);
+ CHECK_NOT_NULL(trace_object);
+ trace_object->Initialize('X', &category_enabled_flag, names[i].c_str(),
+ "Test.Scope", 42, 123, 0, NULL, NULL, NULL, 0);
+ trace_object = ring_buffer->GetEventByHandle(handles[i]);
+ CHECK_NOT_NULL(trace_object);
+ CHECK_EQ('X', trace_object->phase());
+ CHECK_EQ(names[i], std::string(trace_object->name()));
+ CHECK_EQ(category_enabled_flag, *trace_object->category_enabled_flag());
+ }
+
+ // We should only be able to retrieve the last kChunkSize + 1.
+ for (size_t i = 0; i < TraceBufferChunk::kChunkSize; ++i) {
+ CHECK_NULL(ring_buffer->GetEventByHandle(handles[i]));
+ }
+
+ for (size_t i = TraceBufferChunk::kChunkSize; i < handles.size(); ++i) {
+ TraceObject* trace_object = ring_buffer->GetEventByHandle(handles[i]);
+ CHECK_NOT_NULL(trace_object);
+ // The object properties should be correct.
+ CHECK_EQ('X', trace_object->phase());
+ CHECK_EQ(names[i], std::string(trace_object->name()));
+ CHECK_EQ(category_enabled_flag, *trace_object->category_enabled_flag());
+ }
+
+ // Check Flush(), that the writer wrote the last kChunkSize 1 event names.
+ ring_buffer->Flush();
+ auto events = writer->events();
+ CHECK_EQ(TraceBufferChunk::kChunkSize + 1, events.size());
+ for (size_t i = TraceBufferChunk::kChunkSize; i < handles.size(); ++i) {
+ CHECK_EQ(names[i], events[i - TraceBufferChunk::kChunkSize]);
+ }
+ delete ring_buffer;
+}
+
+TEST(TestJSONTraceWriter) {
+ std::ostringstream stream;
+ v8::Platform* old_platform = i::V8::GetCurrentPlatform();
+ v8::Platform* default_platform = v8::platform::CreateDefaultPlatform();
+ i::V8::SetPlatformForTesting(default_platform);
+ // Create a scope for the tracing controller to terminate the trace writer.
+ {
+ TracingController tracing_controller;
+ platform::SetTracingController(default_platform, &tracing_controller);
+ TraceWriter* writer = TraceWriter::CreateJSONTraceWriter(stream);
+
+ TraceBuffer* ring_buffer =
+ TraceBuffer::CreateTraceBufferRingBuffer(1, writer);
+ tracing_controller.Initialize(ring_buffer);
+ TraceConfig* trace_config = new TraceConfig();
+ trace_config->AddIncludedCategory("v8-cat");
+ tracing_controller.StartTracing(trace_config);
+
+ TraceObject trace_object;
+ trace_object.InitializeForTesting(
+ 'X', tracing_controller.GetCategoryGroupEnabled("v8-cat"), "Test0",
+ v8::internal::tracing::kGlobalScope, 42, 123, 0, NULL, NULL, NULL,
+ TRACE_EVENT_FLAG_HAS_ID, 11, 22, 100, 50, 33, 44);
+ writer->AppendTraceEvent(&trace_object);
+ trace_object.InitializeForTesting(
+ 'Y', tracing_controller.GetCategoryGroupEnabled("v8-cat"), "Test1",
+ v8::internal::tracing::kGlobalScope, 43, 456, 0, NULL, NULL, NULL, 0,
+ 55, 66, 110, 55, 77, 88);
+ writer->AppendTraceEvent(&trace_object);
+ tracing_controller.StopTracing();
+ }
+
+ std::string trace_str = stream.str();
+ std::string expected_trace_str =
+ "{\"traceEvents\":[{\"pid\":11,\"tid\":22,\"ts\":100,\"tts\":50,"
+ "\"ph\":\"X\",\"cat\":\"v8-cat\",\"name\":\"Test0\",\"dur\":33,"
+ "\"tdur\":44,\"id\":\"0x2a\",\"args\":{}},{\"pid\":55,\"tid\":66,"
+ "\"ts\":110,\"tts\":55,\"ph\":\"Y\",\"cat\":\"v8-cat\",\"name\":"
+ "\"Test1\",\"dur\":77,\"tdur\":88,\"args\":{}}]}";
+
+ CHECK_EQ(expected_trace_str, trace_str);
+
+ i::V8::SetPlatformForTesting(old_platform);
+}
+
+TEST(TestTracingController) {
+ v8::Platform* old_platform = i::V8::GetCurrentPlatform();
+ v8::Platform* default_platform = v8::platform::CreateDefaultPlatform();
+ i::V8::SetPlatformForTesting(default_platform);
+
+ TracingController tracing_controller;
+ platform::SetTracingController(default_platform, &tracing_controller);
+
+ MockTraceWriter* writer = new MockTraceWriter();
+ TraceBuffer* ring_buffer =
+ TraceBuffer::CreateTraceBufferRingBuffer(1, writer);
+ tracing_controller.Initialize(ring_buffer);
+ TraceConfig* trace_config = new TraceConfig();
+ trace_config->AddIncludedCategory("v8");
+ tracing_controller.StartTracing(trace_config);
+
+ TRACE_EVENT0("v8", "v8.Test");
+ // cat category is not included in default config
+ TRACE_EVENT0("cat", "v8.Test2");
+ TRACE_EVENT0("v8", "v8.Test3");
+ tracing_controller.StopTracing();
+
+ CHECK_EQ(2, writer->events().size());
+ CHECK_EQ(std::string("v8.Test"), writer->events()[0]);
+ CHECK_EQ(std::string("v8.Test3"), writer->events()[1]);
+
+ i::V8::SetPlatformForTesting(old_platform);
+}
+
+void GetJSONStrings(std::vector<std::string>& ret, std::string str,
+ std::string param, std::string start_delim,
+ std::string end_delim) {
+ size_t pos = str.find(param);
+ while (pos != std::string::npos) {
+ size_t start_pos = str.find(start_delim, pos + param.length());
+ size_t end_pos = str.find(end_delim, start_pos + 1);
+ CHECK_NE(start_pos, std::string::npos);
+ CHECK_NE(end_pos, std::string::npos);
+ ret.push_back(str.substr(start_pos + 1, end_pos - start_pos - 1));
+ pos = str.find(param, pos + 1);
+ }
+}
+
+TEST(TestTracingControllerMultipleArgsAndCopy) {
+ std::ostringstream stream;
+ v8::Platform* old_platform = i::V8::GetCurrentPlatform();
+ v8::Platform* default_platform = v8::platform::CreateDefaultPlatform();
+ i::V8::SetPlatformForTesting(default_platform);
+
+ uint64_t aa = 11;
+ unsigned int bb = 22;
+ uint16_t cc = 33;
+ unsigned char dd = 44;
+ int64_t ee = -55;
+ int ff = -66;
+ int16_t gg = -77;
+ signed char hh = -88;
+ bool ii1 = true;
+ bool ii2 = false;
+ double jj1 = 99.0;
+ double jj2 = 1e100;
+ double jj3 = std::numeric_limits<double>::quiet_NaN();
+ double jj4 = std::numeric_limits<double>::infinity();
+ double jj5 = -std::numeric_limits<double>::infinity();
+ void* kk = &aa;
+ const char* ll = "100";
+ std::string mm = "INIT";
+ std::string mmm = "\"INIT\"";
+
+ // Create a scope for the tracing controller to terminate the trace writer.
+ {
+ TracingController tracing_controller;
+ platform::SetTracingController(default_platform, &tracing_controller);
+ TraceWriter* writer = TraceWriter::CreateJSONTraceWriter(stream);
+
+ TraceBuffer* ring_buffer =
+ TraceBuffer::CreateTraceBufferRingBuffer(1, writer);
+ tracing_controller.Initialize(ring_buffer);
+ TraceConfig* trace_config = new TraceConfig();
+ trace_config->AddIncludedCategory("v8");
+ tracing_controller.StartTracing(trace_config);
+
+ TRACE_EVENT1("v8", "v8.Test.aa", "aa", aa);
+ TRACE_EVENT1("v8", "v8.Test.bb", "bb", bb);
+ TRACE_EVENT1("v8", "v8.Test.cc", "cc", cc);
+ TRACE_EVENT1("v8", "v8.Test.dd", "dd", dd);
+ TRACE_EVENT1("v8", "v8.Test.ee", "ee", ee);
+ TRACE_EVENT1("v8", "v8.Test.ff", "ff", ff);
+ TRACE_EVENT1("v8", "v8.Test.gg", "gg", gg);
+ TRACE_EVENT1("v8", "v8.Test.hh", "hh", hh);
+ TRACE_EVENT1("v8", "v8.Test.ii", "ii1", ii1);
+ TRACE_EVENT1("v8", "v8.Test.ii", "ii2", ii2);
+ TRACE_EVENT1("v8", "v8.Test.jj1", "jj1", jj1);
+ TRACE_EVENT1("v8", "v8.Test.jj2", "jj2", jj2);
+ TRACE_EVENT1("v8", "v8.Test.jj3", "jj3", jj3);
+ TRACE_EVENT1("v8", "v8.Test.jj4", "jj4", jj4);
+ TRACE_EVENT1("v8", "v8.Test.jj5", "jj5", jj5);
+ TRACE_EVENT1("v8", "v8.Test.kk", "kk", kk);
+ TRACE_EVENT1("v8", "v8.Test.ll", "ll", ll);
+ TRACE_EVENT1("v8", "v8.Test.mm", "mm", TRACE_STR_COPY(mmm.c_str()));
+
+ TRACE_EVENT2("v8", "v8.Test2.1", "aa", aa, "ll", ll);
+ TRACE_EVENT2("v8", "v8.Test2.2", "mm1", TRACE_STR_COPY(mm.c_str()), "mm2",
+ TRACE_STR_COPY(mmm.c_str()));
+
+ // Check copies are correct.
+ TRACE_EVENT_COPY_INSTANT0("v8", mm.c_str(), TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_COPY_INSTANT2("v8", mm.c_str(), TRACE_EVENT_SCOPE_THREAD, "mm1",
+ mm.c_str(), "mm2", mmm.c_str());
+ mm = "CHANGED";
+ mmm = "CHANGED";
+
+ tracing_controller.StopTracing();
+ }
+
+ std::string trace_str = stream.str();
+
+ std::vector<std::string> all_args, all_names, all_cats;
+ GetJSONStrings(all_args, trace_str, "\"args\"", "{", "}");
+ GetJSONStrings(all_names, trace_str, "\"name\"", "\"", "\"");
+ GetJSONStrings(all_cats, trace_str, "\"cat\"", "\"", "\"");
+
+ CHECK_EQ(all_args.size(), 22);
+ CHECK_EQ(all_args[0], "\"aa\":11");
+ CHECK_EQ(all_args[1], "\"bb\":22");
+ CHECK_EQ(all_args[2], "\"cc\":33");
+ CHECK_EQ(all_args[3], "\"dd\":44");
+ CHECK_EQ(all_args[4], "\"ee\":-55");
+ CHECK_EQ(all_args[5], "\"ff\":-66");
+ CHECK_EQ(all_args[6], "\"gg\":-77");
+ CHECK_EQ(all_args[7], "\"hh\":-88");
+ CHECK_EQ(all_args[8], "\"ii1\":true");
+ CHECK_EQ(all_args[9], "\"ii2\":false");
+ CHECK_EQ(all_args[10], "\"jj1\":99.0");
+ CHECK_EQ(all_args[11], "\"jj2\":1e+100");
+ CHECK_EQ(all_args[12], "\"jj3\":\"NaN\"");
+ CHECK_EQ(all_args[13], "\"jj4\":\"Infinity\"");
+ CHECK_EQ(all_args[14], "\"jj5\":\"-Infinity\"");
+ std::ostringstream pointer_stream;
+ pointer_stream << "\"kk\":\"" << &aa << "\"";
+ CHECK_EQ(all_args[15], pointer_stream.str());
+ CHECK_EQ(all_args[16], "\"ll\":\"100\"");
+ CHECK_EQ(all_args[17], "\"mm\":\"\\\"INIT\\\"\"");
+
+ CHECK_EQ(all_names[18], "v8.Test2.1");
+ CHECK_EQ(all_args[18], "\"aa\":11,\"ll\":\"100\"");
+ CHECK_EQ(all_args[19], "\"mm1\":\"INIT\",\"mm2\":\"\\\"INIT\\\"\"");
+
+ CHECK_EQ(all_names[20], "INIT");
+ CHECK_EQ(all_names[21], "INIT");
+ CHECK_EQ(all_args[21], "\"mm1\":\"INIT\",\"mm2\":\"\\\"INIT\\\"\"");
+
+ i::V8::SetPlatformForTesting(old_platform);
+}
+
+} // namespace tracing
+} // namespace platform
+} // namespace v8
diff --git a/deps/v8/test/cctest/libsampler/test-sampler.cc b/deps/v8/test/cctest/libsampler/test-sampler.cc
new file mode 100644
index 0000000000..b88d347914
--- /dev/null
+++ b/deps/v8/test/cctest/libsampler/test-sampler.cc
@@ -0,0 +1,140 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Tests of sampler functionalities.
+
+#include "src/libsampler/sampler.h"
+
+#include "src/base/platform/platform.h"
+#include "test/cctest/cctest.h"
+
+
+namespace v8 {
+namespace sampler {
+
+namespace {
+
+class TestSamplingThread : public base::Thread {
+ public:
+ static const int kSamplerThreadStackSize = 64 * 1024;
+
+ explicit TestSamplingThread(Sampler* sampler)
+ : Thread(base::Thread::Options("TestSamplingThread",
+ kSamplerThreadStackSize)),
+ sampler_(sampler) {}
+
+ // Implement Thread::Run().
+ void Run() override {
+ while (sampler_->IsProfiling()) {
+ sampler_->DoSample();
+ base::OS::Sleep(base::TimeDelta::FromMilliseconds(1));
+ }
+ }
+
+ private:
+ Sampler* sampler_;
+};
+
+
+class TestSampler : public Sampler {
+ public:
+ explicit TestSampler(Isolate* isolate) : Sampler(isolate) {}
+
+ void SampleStack(const v8::RegisterState& regs) override {
+ void* frames[kMaxFramesCount];
+ SampleInfo sample_info;
+ isolate()->GetStackSample(regs, frames, kMaxFramesCount, &sample_info);
+ if (is_counting_samples_) {
+ if (sample_info.vm_state == JS) ++js_sample_count_;
+ if (sample_info.vm_state == EXTERNAL) ++external_sample_count_;
+ }
+ }
+};
+
+
+class TestApiCallbacks {
+ public:
+ TestApiCallbacks() {}
+
+ static void Getter(v8::Local<v8::String> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ }
+
+ static void Setter(v8::Local<v8::String> name,
+ v8::Local<v8::Value> value,
+ const v8::PropertyCallbackInfo<void>& info) {
+ }
+};
+
+
+static void RunSampler(v8::Local<v8::Context> env,
+ v8::Local<v8::Function> function,
+ v8::Local<v8::Value> argv[], int argc,
+ unsigned min_js_samples = 0,
+ unsigned min_external_samples = 0) {
+ Sampler::SetUp();
+ TestSampler* sampler = new TestSampler(env->GetIsolate());
+ TestSamplingThread* thread = new TestSamplingThread(sampler);
+ sampler->IncreaseProfilingDepth();
+ sampler->Start();
+ sampler->StartCountingSamples();
+ thread->StartSynchronously();
+ do {
+ function->Call(env, env->Global(), argc, argv).ToLocalChecked();
+ } while (sampler->js_sample_count() < min_js_samples ||
+ sampler->external_sample_count() < min_external_samples);
+ sampler->Stop();
+ sampler->DecreaseProfilingDepth();
+ thread->Join();
+ delete thread;
+ delete sampler;
+ Sampler::TearDown();
+}
+
+} // namespace
+
+static const char* sampler_test_source = "function start(count) {\n"
+" for (var i = 0; i < count; i++) {\n"
+" var o = instance.foo;\n"
+" instance.foo = o + 1;\n"
+" }\n"
+"}\n";
+
+static v8::Local<v8::Function> GetFunction(v8::Local<v8::Context> env,
+ const char* name) {
+ return v8::Local<v8::Function>::Cast(
+ env->Global()->Get(env, v8_str(name)).ToLocalChecked());
+}
+
+
+TEST(LibSamplerCollectSample) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ v8::Local<v8::FunctionTemplate> func_template =
+ v8::FunctionTemplate::New(isolate);
+ v8::Local<v8::ObjectTemplate> instance_template =
+ func_template->InstanceTemplate();
+
+ TestApiCallbacks accessors;
+ v8::Local<v8::External> data =
+ v8::External::New(isolate, &accessors);
+ instance_template->SetAccessor(v8_str("foo"), &TestApiCallbacks::Getter,
+ &TestApiCallbacks::Setter, data);
+ v8::Local<v8::Function> func =
+ func_template->GetFunction(env.local()).ToLocalChecked();
+ v8::Local<v8::Object> instance =
+ func->NewInstance(env.local()).ToLocalChecked();
+ env->Global()->Set(env.local(), v8_str("instance"), instance).FromJust();
+
+ CompileRun(sampler_test_source);
+ v8::Local<v8::Function> function = GetFunction(env.local(), "start");
+
+ int32_t repeat_count = 100;
+ v8::Local<v8::Value> args[] = {v8::Integer::New(isolate, repeat_count)};
+ RunSampler(env.local(), function, args, arraysize(args), 100, 100);
+}
+
+} // namespace sampler
+} // namespace v8
diff --git a/deps/v8/test/cctest/profiler-extension.cc b/deps/v8/test/cctest/profiler-extension.cc
index 024cc9c635..df5cec79ce 100644
--- a/deps/v8/test/cctest/profiler-extension.cc
+++ b/deps/v8/test/cctest/profiler-extension.cc
@@ -33,7 +33,8 @@
namespace v8 {
namespace internal {
-v8::CpuProfile* ProfilerExtension::last_profile = NULL;
+v8::CpuProfiler* ProfilerExtension::profiler_ = nullptr;
+v8::CpuProfile* ProfilerExtension::last_profile = nullptr;
const char* ProfilerExtension::kSource =
"native function startProfiling();"
"native function stopProfiling();"
@@ -58,24 +59,22 @@ v8::Local<v8::FunctionTemplate> ProfilerExtension::GetNativeFunctionTemplate(
void ProfilerExtension::StartProfiling(
const v8::FunctionCallbackInfo<v8::Value>& args) {
- last_profile = NULL;
- v8::CpuProfiler* cpu_profiler = args.GetIsolate()->GetCpuProfiler();
- cpu_profiler->StartProfiling((args.Length() > 0)
- ? args[0].As<v8::String>()
- : v8::String::Empty(args.GetIsolate()));
+ last_profile = nullptr;
+ profiler_->StartProfiling(args.Length() > 0
+ ? args[0].As<v8::String>()
+ : v8::String::Empty(args.GetIsolate()));
}
void ProfilerExtension::StopProfiling(
const v8::FunctionCallbackInfo<v8::Value>& args) {
- v8::CpuProfiler* cpu_profiler = args.GetIsolate()->GetCpuProfiler();
- last_profile = cpu_profiler->StopProfiling((args.Length() > 0)
- ? args[0].As<v8::String>()
- : v8::String::Empty(args.GetIsolate()));
+ last_profile = profiler_->StopProfiling(
+ args.Length() > 0 ? args[0].As<v8::String>()
+ : v8::String::Empty(args.GetIsolate()));
}
void ProfilerExtension::CollectSample(
const v8::FunctionCallbackInfo<v8::Value>& args) {
- args.GetIsolate()->GetCpuProfiler()->CollectSample();
+ profiler_->CollectSample();
}
} // namespace internal
diff --git a/deps/v8/test/cctest/profiler-extension.h b/deps/v8/test/cctest/profiler-extension.h
index 00f9a5a808..dbc12f47a0 100644
--- a/deps/v8/test/cctest/profiler-extension.h
+++ b/deps/v8/test/cctest/profiler-extension.h
@@ -35,11 +35,20 @@
namespace v8 {
namespace internal {
+class CpuProfiler;
+
class ProfilerExtension : public v8::Extension {
public:
ProfilerExtension() : v8::Extension("v8/profiler", kSource) { }
+
virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
v8::Isolate* isolate, v8::Local<v8::String> name);
+
+ static void set_profiler(v8::CpuProfiler* profiler) { profiler_ = profiler; }
+ static void set_profiler(CpuProfiler* profiler) {
+ profiler_ = reinterpret_cast<v8::CpuProfiler*>(profiler);
+ }
+ static v8::CpuProfiler* profiler() { return profiler_; }
static v8::CpuProfile* last_profile;
private:
@@ -47,6 +56,7 @@ class ProfilerExtension : public v8::Extension {
static void StopProfiling(const v8::FunctionCallbackInfo<v8::Value>& args);
static void CollectSample(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static v8::CpuProfiler* profiler_;
static const char* kSource;
};
diff --git a/deps/v8/test/cctest/test-access-checks.cc b/deps/v8/test/cctest/test-access-checks.cc
new file mode 100644
index 0000000000..59c17b89eb
--- /dev/null
+++ b/deps/v8/test/cctest/test-access-checks.cc
@@ -0,0 +1,305 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdlib.h>
+
+#include "test/cctest/cctest.h"
+
+namespace {
+
+int32_t g_cross_context_int = 0;
+
+bool g_expect_interceptor_call = false;
+
+void NamedGetter(v8::Local<v8::Name> property,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CHECK(g_expect_interceptor_call);
+ v8::Isolate* isolate = info.GetIsolate();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ if (property->Equals(context, v8_str("cross_context_int")).FromJust())
+ info.GetReturnValue().Set(g_cross_context_int);
+}
+
+void NamedSetter(v8::Local<v8::Name> property, v8::Local<v8::Value> value,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CHECK(g_expect_interceptor_call);
+ v8::Isolate* isolate = info.GetIsolate();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ if (!property->Equals(context, v8_str("cross_context_int")).FromJust())
+ return;
+ if (value->IsInt32()) {
+ g_cross_context_int = value->ToInt32(context).ToLocalChecked()->Value();
+ }
+ info.GetReturnValue().Set(value);
+}
+
+void NamedQuery(v8::Local<v8::Name> property,
+ const v8::PropertyCallbackInfo<v8::Integer>& info) {
+ CHECK(g_expect_interceptor_call);
+ v8::Isolate* isolate = info.GetIsolate();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ if (!property->Equals(context, v8_str("cross_context_int")).FromJust())
+ return;
+ info.GetReturnValue().Set(v8::DontDelete);
+}
+
+void NamedDeleter(v8::Local<v8::Name> property,
+ const v8::PropertyCallbackInfo<v8::Boolean>& info) {
+ CHECK(g_expect_interceptor_call);
+ v8::Isolate* isolate = info.GetIsolate();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ if (!property->Equals(context, v8_str("cross_context_int")).FromJust())
+ return;
+ info.GetReturnValue().Set(false);
+}
+
+void NamedEnumerator(const v8::PropertyCallbackInfo<v8::Array>& info) {
+ CHECK(g_expect_interceptor_call);
+ v8::Isolate* isolate = info.GetIsolate();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ v8::Local<v8::Array> names = v8::Array::New(isolate, 1);
+ names->Set(context, 0, v8_str("cross_context_int")).FromJust();
+ info.GetReturnValue().Set(names);
+}
+
+void IndexedGetter(uint32_t index,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CHECK(g_expect_interceptor_call);
+ if (index == 7) info.GetReturnValue().Set(g_cross_context_int);
+}
+
+void IndexedSetter(uint32_t index, v8::Local<v8::Value> value,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CHECK(g_expect_interceptor_call);
+ v8::Isolate* isolate = info.GetIsolate();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ if (index != 7) return;
+ if (value->IsInt32()) {
+ g_cross_context_int = value->ToInt32(context).ToLocalChecked()->Value();
+ }
+ info.GetReturnValue().Set(value);
+}
+
+void IndexedQuery(uint32_t index,
+ const v8::PropertyCallbackInfo<v8::Integer>& info) {
+ CHECK(g_expect_interceptor_call);
+ if (index == 7) info.GetReturnValue().Set(v8::DontDelete);
+}
+
+void IndexedDeleter(uint32_t index,
+ const v8::PropertyCallbackInfo<v8::Boolean>& info) {
+ CHECK(g_expect_interceptor_call);
+ if (index == 7) info.GetReturnValue().Set(false);
+}
+
+void IndexedEnumerator(const v8::PropertyCallbackInfo<v8::Array>& info) {
+ CHECK(g_expect_interceptor_call);
+ v8::Isolate* isolate = info.GetIsolate();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ v8::Local<v8::Array> names = v8::Array::New(isolate, 1);
+ names->Set(context, 0, v8_str("7")).FromJust();
+ info.GetReturnValue().Set(names);
+}
+
+bool AccessCheck(v8::Local<v8::Context> accessing_context,
+ v8::Local<v8::Object> accessed_object,
+ v8::Local<v8::Value> data) {
+ return false;
+}
+
+void GetCrossContextInt(v8::Local<v8::String> property,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CHECK(!g_expect_interceptor_call);
+ info.GetReturnValue().Set(g_cross_context_int);
+}
+
+void SetCrossContextInt(v8::Local<v8::String> property,
+ v8::Local<v8::Value> value,
+ const v8::PropertyCallbackInfo<void>& info) {
+ CHECK(!g_expect_interceptor_call);
+ v8::Isolate* isolate = info.GetIsolate();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ if (value->IsInt32()) {
+ g_cross_context_int = value->ToInt32(context).ToLocalChecked()->Value();
+ }
+}
+
+void Return42(v8::Local<v8::String> property,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ info.GetReturnValue().Set(42);
+}
+
+void CheckCanRunScriptInContext(v8::Isolate* isolate,
+ v8::Local<v8::Context> context) {
+ v8::HandleScope handle_scope(isolate);
+ v8::Context::Scope context_scope(context);
+
+ g_expect_interceptor_call = false;
+ g_cross_context_int = 0;
+
+ // Running script in this context should work.
+ CompileRunChecked(isolate, "this.foo = 42; this[23] = true;");
+ ExpectInt32("this.all_can_read", 42);
+ CompileRunChecked(isolate, "this.cross_context_int = 23");
+ CHECK_EQ(g_cross_context_int, 23);
+ ExpectInt32("this.cross_context_int", 23);
+}
+
+void CheckCrossContextAccess(v8::Isolate* isolate,
+ v8::Local<v8::Context> accessing_context,
+ v8::Local<v8::Object> accessed_object) {
+ v8::HandleScope handle_scope(isolate);
+ accessing_context->Global()
+ ->Set(accessing_context, v8_str("other"), accessed_object)
+ .FromJust();
+ v8::Context::Scope context_scope(accessing_context);
+
+ g_expect_interceptor_call = true;
+ g_cross_context_int = 23;
+
+ {
+ v8::TryCatch try_catch(isolate);
+ CHECK(CompileRun(accessing_context, "this.other.foo").IsEmpty());
+ }
+ {
+ v8::TryCatch try_catch(isolate);
+ CHECK(CompileRun(accessing_context, "this.other[23]").IsEmpty());
+ }
+
+ // AllCanRead properties are also inaccessible.
+ {
+ v8::TryCatch try_catch(isolate);
+ CHECK(CompileRun(accessing_context, "this.other.all_can_read").IsEmpty());
+ }
+
+ // Intercepted properties are accessible, however.
+ ExpectInt32("this.other.cross_context_int", 23);
+ CompileRunChecked(isolate, "this.other.cross_context_int = 42");
+ ExpectInt32("this.other[7]", 42);
+ ExpectString("JSON.stringify(Object.getOwnPropertyNames(this.other))",
+ "[\"7\",\"cross_context_int\"]");
+}
+
+void Ctor(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ CHECK(info.IsConstructCall());
+}
+
+} // namespace
+
+TEST(AccessCheckWithInterceptor) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::ObjectTemplate> global_template =
+ v8::ObjectTemplate::New(isolate);
+ global_template->SetAccessCheckCallbackAndHandler(
+ AccessCheck,
+ v8::NamedPropertyHandlerConfiguration(
+ NamedGetter, NamedSetter, NamedQuery, NamedDeleter, NamedEnumerator),
+ v8::IndexedPropertyHandlerConfiguration(IndexedGetter, IndexedSetter,
+ IndexedQuery, IndexedDeleter,
+ IndexedEnumerator));
+ global_template->SetNativeDataProperty(
+ v8_str("cross_context_int"), GetCrossContextInt, SetCrossContextInt);
+ global_template->SetNativeDataProperty(
+ v8_str("all_can_read"), Return42, nullptr, v8::Local<v8::Value>(),
+ v8::None, v8::Local<v8::AccessorSignature>(), v8::ALL_CAN_READ);
+
+ v8::Local<v8::Context> context0 =
+ v8::Context::New(isolate, nullptr, global_template);
+ CheckCanRunScriptInContext(isolate, context0);
+
+ // Create another context.
+ v8::Local<v8::Context> context1 =
+ v8::Context::New(isolate, nullptr, global_template);
+ CheckCrossContextAccess(isolate, context1, context0->Global());
+}
+
+TEST(NewRemoteContext) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::ObjectTemplate> global_template =
+ v8::ObjectTemplate::New(isolate);
+ global_template->SetAccessCheckCallbackAndHandler(
+ AccessCheck,
+ v8::NamedPropertyHandlerConfiguration(
+ NamedGetter, NamedSetter, NamedQuery, NamedDeleter, NamedEnumerator),
+ v8::IndexedPropertyHandlerConfiguration(IndexedGetter, IndexedSetter,
+ IndexedQuery, IndexedDeleter,
+ IndexedEnumerator));
+ global_template->SetNativeDataProperty(
+ v8_str("cross_context_int"), GetCrossContextInt, SetCrossContextInt);
+ global_template->SetNativeDataProperty(
+ v8_str("all_can_read"), Return42, nullptr, v8::Local<v8::Value>(),
+ v8::None, v8::Local<v8::AccessorSignature>(), v8::ALL_CAN_READ);
+
+ v8::Local<v8::Object> global0 =
+ v8::Context::NewRemoteContext(isolate, global_template).ToLocalChecked();
+
+ // Create a real context.
+ {
+ v8::HandleScope other_scope(isolate);
+ v8::Local<v8::Context> context1 =
+ v8::Context::New(isolate, nullptr, global_template);
+
+ CheckCrossContextAccess(isolate, context1, global0);
+ }
+
+ // Create a context using the detached global.
+ {
+ v8::HandleScope other_scope(isolate);
+ v8::Local<v8::Context> context2 =
+ v8::Context::New(isolate, nullptr, global_template, global0);
+
+ CheckCanRunScriptInContext(isolate, context2);
+ }
+
+ // Turn a regular context into a remote context.
+ {
+ v8::HandleScope other_scope(isolate);
+ v8::Local<v8::Context> context3 =
+ v8::Context::New(isolate, nullptr, global_template);
+
+ CheckCanRunScriptInContext(isolate, context3);
+
+ // Turn the global object into a remote context, and try to access it.
+ v8::Local<v8::Object> context3_global = context3->Global();
+ context3->DetachGlobal();
+ v8::Local<v8::Object> global3 =
+ v8::Context::NewRemoteContext(isolate, global_template, context3_global)
+ .ToLocalChecked();
+ v8::Local<v8::Context> context4 =
+ v8::Context::New(isolate, nullptr, global_template);
+
+ CheckCrossContextAccess(isolate, context4, global3);
+
+ // Turn it back into a regular context.
+ v8::Local<v8::Context> context5 =
+ v8::Context::New(isolate, nullptr, global_template, global3);
+
+ CheckCanRunScriptInContext(isolate, context5);
+ }
+}
+
+TEST(NewRemoteInstance) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::FunctionTemplate> tmpl =
+ v8::FunctionTemplate::New(isolate, Ctor);
+ v8::Local<v8::ObjectTemplate> instance = tmpl->InstanceTemplate();
+ instance->SetAccessCheckCallbackAndHandler(
+ AccessCheck,
+ v8::NamedPropertyHandlerConfiguration(
+ NamedGetter, NamedSetter, NamedQuery, NamedDeleter, NamedEnumerator),
+ v8::IndexedPropertyHandlerConfiguration(IndexedGetter, IndexedSetter,
+ IndexedQuery, IndexedDeleter,
+ IndexedEnumerator));
+ tmpl->SetNativeDataProperty(
+ v8_str("all_can_read"), Return42, nullptr, v8::Local<v8::Value>(),
+ v8::None, v8::Local<v8::AccessorSignature>(), v8::ALL_CAN_READ);
+
+ v8::Local<v8::Object> obj = tmpl->NewRemoteInstance().ToLocalChecked();
+
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ CheckCrossContextAccess(isolate, context, obj);
+}
diff --git a/deps/v8/test/cctest/test-accessors.cc b/deps/v8/test/cctest/test-accessors.cc
index 67803eeed6..9667afb703 100644
--- a/deps/v8/test/cctest/test-accessors.cc
+++ b/deps/v8/test/cctest/test-accessors.cc
@@ -775,19 +775,18 @@ TEST(PrototypeGetterAccessCheck) {
}
}
-static void check_receiver(Local<String> name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
+static void CheckReceiver(Local<String> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
CHECK(info.This()->IsObject());
}
TEST(Regress609134) {
- v8::internal::FLAG_allow_natives_syntax = true;
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
auto fun_templ = v8::FunctionTemplate::New(isolate);
fun_templ->InstanceTemplate()->SetNativeDataProperty(v8_str("foo"),
- check_receiver);
+ CheckReceiver);
CHECK(env->Global()
->Set(env.local(), v8_str("Fun"),
@@ -797,5 +796,6 @@ TEST(Regress609134) {
CompileRun(
"var f = new Fun();"
"Number.prototype.__proto__ = f;"
- "[42][0].foo");
+ "var a = 42;"
+ "for (var i = 0; i<3; i++) { a.foo; }");
}
diff --git a/deps/v8/test/cctest/test-api-fast-accessor-builder.cc b/deps/v8/test/cctest/test-api-fast-accessor-builder.cc
index eeb6b96fbc..6612f9047c 100644
--- a/deps/v8/test/cctest/test-api-fast-accessor-builder.cc
+++ b/deps/v8/test/cctest/test-api-fast-accessor-builder.cc
@@ -60,8 +60,24 @@ static void NativePropertyAccessor(
info.GetReturnValue().Set(v8_num(123));
}
+const char* kWatermarkProperty = "watermark";
+
} // anonymous namespace
+void CheckImplicitParameters(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ v8::Isolate* isolate = info.GetIsolate();
+ CHECK_NOT_NULL(isolate);
+
+ auto context = isolate->GetCurrentContext();
+ CHECK(!context.IsEmpty());
+
+ // The context must point to the same isolate, this should be enough to
+ // validate the context, mainly to prevent having a random object instead.
+ CHECK_EQ(isolate, context->GetIsolate());
+ CHECK(info.Data()->IsUndefined());
+
+ CHECK(info.Holder()->Has(context, v8_str(kWatermarkProperty)).FromJust());
+}
// Build a simple "fast accessor" and verify that it is being called.
TEST(FastAccessor) {
@@ -97,33 +113,40 @@ TEST(FastAccessor) {
ExpectInt32("barf()", 124); // Call via warmed-up callsite.
}
-
void AddInternalFieldAccessor(v8::Isolate* isolate,
v8::Local<v8::Template> templ, const char* name,
- int field_no) {
+ int field_no, bool useUncheckedLoader) {
auto builder = v8::experimental::FastAccessorBuilder::New(isolate);
- builder->ReturnValue(
- builder->LoadInternalField(builder->GetReceiver(), field_no));
+
+ if (useUncheckedLoader) {
+ builder->ReturnValue(
+ builder->LoadInternalFieldUnchecked(builder->GetReceiver(), field_no));
+ } else {
+ builder->ReturnValue(
+ builder->LoadInternalField(builder->GetReceiver(), field_no));
+ }
+
templ->SetAccessorProperty(v8_str(name),
v8::FunctionTemplate::NewWithFastHandler(
isolate, NativePropertyAccessor, builder));
}
-
-// "Fast" accessor that accesses an internal field.
-TEST(FastAccessorWithInternalField) {
+void checkLoadInternalField(bool useUncheckedLoader, bool emitDebugChecks) {
// Crankshaft support for fast accessors is not implemented; crankshafted
// code uses the slow accessor which breaks this test's expectations.
v8::internal::FLAG_always_opt = false;
+
+ // De/activate debug checks.
+ v8::internal::FLAG_debug_code = emitDebugChecks;
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
v8::Local<v8::ObjectTemplate> foo = v8::ObjectTemplate::New(isolate);
foo->SetInternalFieldCount(3);
- AddInternalFieldAccessor(isolate, foo, "field0", 0);
- AddInternalFieldAccessor(isolate, foo, "field1", 1);
- AddInternalFieldAccessor(isolate, foo, "field2", 2);
+ AddInternalFieldAccessor(isolate, foo, "field0", 0, useUncheckedLoader);
+ AddInternalFieldAccessor(isolate, foo, "field1", 1, useUncheckedLoader);
+ AddInternalFieldAccessor(isolate, foo, "field2", 2, useUncheckedLoader);
// Create an instance w/ 3 internal fields, put in a string, a Smi, nothing.
v8::Local<v8::Object> obj = foo->NewInstance(env.local()).ToLocalChecked();
@@ -142,6 +165,15 @@ TEST(FastAccessorWithInternalField) {
ExpectUndefined("field2()");
}
+// "Fast" accessor that accesses an internal field.
+TEST(FastAccessorWithInternalField) { checkLoadInternalField(false, false); }
+
+// "Fast" accessor that accesses an internal field using the fast(er)
+// implementation of LoadInternalField.
+TEST(FastAccessorLoadInternalFieldUnchecked) {
+ checkLoadInternalField(true, false);
+ checkLoadInternalField(true, true);
+}
// "Fast" accessor with control flow via ...OrReturnNull methods.
TEST(FastAccessorOrReturnNull) {
@@ -189,9 +221,9 @@ TEST(FastAccessorOrReturnNull) {
// CheckFlagSetOrReturnNull:
CompileRun(FN_WARMUP("maskcheck", "return obj.maskcheck"));
obj->SetAlignedPointerInInternalField(1, reinterpret_cast<void*>(0xf0));
- ExpectInt32("maskcheck()", 42);
- obj->SetAlignedPointerInInternalField(1, reinterpret_cast<void*>(0xfe));
ExpectNull("maskcheck()");
+ obj->SetAlignedPointerInInternalField(1, reinterpret_cast<void*>(0xfe));
+ ExpectInt32("maskcheck()", 42);
}
@@ -212,9 +244,9 @@ TEST(FastAccessorControlFlowWithLabels) {
auto label = builder->MakeLabel();
auto val = builder->LoadInternalField(builder->GetReceiver(), 0);
builder->CheckNotZeroOrJump(val, label);
- builder->ReturnValue(builder->IntegerConstant(0));
- builder->SetLabel(label);
builder->ReturnValue(builder->IntegerConstant(1));
+ builder->SetLabel(label);
+ builder->ReturnValue(builder->IntegerConstant(0));
foo->SetAccessorProperty(v8_str("isnull"),
v8::FunctionTemplate::NewWithFastHandler(
isolate, NativePropertyAccessor, builder));
@@ -262,9 +294,9 @@ TEST(FastAccessorLoad) {
auto val = builder->LoadValue(
builder->LoadInternalField(builder->GetReceiver(), 0), intval_offset);
builder->CheckNotZeroOrJump(val, label);
- builder->ReturnValue(builder->IntegerConstant(0));
- builder->SetLabel(label);
builder->ReturnValue(builder->IntegerConstant(1));
+ builder->SetLabel(label);
+ builder->ReturnValue(builder->IntegerConstant(0));
foo->SetAccessorProperty(v8_str("nonzero"),
v8::FunctionTemplate::NewWithFastHandler(
isolate, NativePropertyAccessor, builder));
@@ -300,16 +332,19 @@ TEST(FastAccessorLoad) {
}
void ApiCallbackInt(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ CheckImplicitParameters(info);
info.GetReturnValue().Set(12345);
}
const char* kApiCallbackStringValue =
"Hello World! Bizarro C++ world, actually.";
void ApiCallbackString(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ CheckImplicitParameters(info);
info.GetReturnValue().Set(v8_str(kApiCallbackStringValue));
}
void ApiCallbackParam(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ CheckImplicitParameters(info);
CHECK_EQ(1, info.Length());
CHECK(info[0]->IsNumber());
info.GetReturnValue().Set(info[0]);
@@ -348,6 +383,9 @@ TEST(FastAccessorCallback) {
isolate, NativePropertyAccessor, builder));
}
+ // Add dummy property to validate the holder.
+ foo->Set(isolate, kWatermarkProperty, v8::Undefined(isolate));
+
// Create an instance.
v8::Local<v8::Object> obj = foo->NewInstance(env.local()).ToLocalChecked();
CHECK(env->Global()->Set(env.local(), v8_str("obj"), obj).FromJust());
@@ -362,3 +400,92 @@ TEST(FastAccessorCallback) {
CompileRun(FN_WARMUP("callbackparam", "return obj.param"));
ExpectInt32("callbackparam()", 1000);
}
+
+TEST(FastAccessorToSmi) {
+ // Crankshaft support for fast accessors is not implemented; crankshafted
+ // code uses the slow accessor which breaks this test's expectations.
+ v8::internal::FLAG_always_opt = false;
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ v8::Local<v8::ObjectTemplate> foo = v8::ObjectTemplate::New(isolate);
+ foo->SetInternalFieldCount(1);
+
+ {
+ // Accessor load_smi.
+ auto builder = v8::experimental::FastAccessorBuilder::New(isolate);
+
+ // Read the variable and convert it to a Smi.
+ auto flags = builder->LoadValue(
+ builder->LoadInternalField(builder->GetReceiver(), 0), 0);
+ builder->ReturnValue(builder->ToSmi(flags));
+ foo->SetAccessorProperty(v8_str("load_smi"),
+ v8::FunctionTemplate::NewWithFastHandler(
+ isolate, NativePropertyAccessor, builder));
+ }
+
+ // Create an instance.
+ v8::Local<v8::Object> obj = foo->NewInstance(env.local()).ToLocalChecked();
+
+ uintptr_t flags;
+ obj->SetAlignedPointerInInternalField(0, &flags);
+ CHECK(env->Global()->Set(env.local(), v8_str("obj"), obj).FromJust());
+
+ // Access flags.
+ CompileRun(FN_WARMUP("load_smi", "return obj.load_smi"));
+
+ flags = 54321;
+ ExpectInt32("load_smi()", 54321);
+
+ flags = 0;
+ ExpectInt32("load_smi()", 0);
+
+ flags = 123456789;
+ ExpectInt32("load_smi()", 123456789);
+}
+
+TEST(FastAccessorGoto) {
+ // Crankshaft support for fast accessors is not implemented; crankshafted
+ // code uses the slow accessor which breaks this test's expectations.
+ v8::internal::FLAG_always_opt = false;
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ v8::Local<v8::ObjectTemplate> foo = v8::ObjectTemplate::New(isolate);
+ foo->SetInternalFieldCount(1);
+
+ {
+ auto builder = v8::experimental::FastAccessorBuilder::New(isolate);
+ auto successLabel = builder->MakeLabel();
+ auto failLabel = builder->MakeLabel();
+
+ // The underlying raw assembler is clever enough to reject unreachable
+ // basic blocks, this instruction has no effect besides marking the failed
+ // return BB as reachable.
+ builder->CheckNotZeroOrJump(builder->IntegerConstant(1234), failLabel);
+
+ builder->Goto(successLabel);
+
+ builder->SetLabel(failLabel);
+ builder->ReturnValue(builder->IntegerConstant(0));
+
+ builder->SetLabel(successLabel);
+ builder->ReturnValue(builder->IntegerConstant(60707357));
+
+ foo->SetAccessorProperty(v8_str("goto_test"),
+ v8::FunctionTemplate::NewWithFastHandler(
+ isolate, NativePropertyAccessor, builder));
+ }
+
+ // Create an instance.
+ v8::Local<v8::Object> obj = foo->NewInstance(env.local()).ToLocalChecked();
+
+ CHECK(env->Global()->Set(env.local(), v8_str("obj"), obj).FromJust());
+
+ // Access flags.
+ CompileRun(FN_WARMUP("test", "return obj.goto_test"));
+
+ ExpectInt32("test()", 60707357);
+}
diff --git a/deps/v8/test/cctest/test-api-interceptors.cc b/deps/v8/test/cctest/test-api-interceptors.cc
index a1894fad1a..6e4c6028e9 100644
--- a/deps/v8/test/cctest/test-api-interceptors.cc
+++ b/deps/v8/test/cctest/test-api-interceptors.cc
@@ -16,7 +16,6 @@
#include "src/parsing/parser.h"
#include "src/unicode-inl.h"
#include "src/utils.h"
-#include "src/vm-state.h"
using ::v8::Boolean;
using ::v8::BooleanObject;
@@ -854,6 +853,66 @@ THREADED_TEST(InterceptorLoadICInvalidatedCallbackViaGlobal) {
CHECK_EQ(42 * 10, value->Int32Value(context.local()).FromJust());
}
+// Test load of a non-existing global when a global object has an interceptor.
+THREADED_TEST(InterceptorLoadGlobalICGlobalWithInterceptor) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::ObjectTemplate> templ_global = v8::ObjectTemplate::New(isolate);
+ templ_global->SetHandler(v8::NamedPropertyHandlerConfiguration(
+ EmptyInterceptorGetter, EmptyInterceptorSetter));
+
+ LocalContext context(nullptr, templ_global);
+ i::Handle<i::JSReceiver> global_proxy =
+ v8::Utils::OpenHandle<Object, i::JSReceiver>(context->Global());
+ CHECK(global_proxy->IsJSGlobalProxy());
+ i::Handle<i::JSGlobalObject> global(
+ i::JSGlobalObject::cast(global_proxy->map()->prototype()));
+ CHECK(global->map()->has_named_interceptor());
+
+ v8::Local<Value> value = CompileRun(
+ "var f = function() { "
+ " try {"
+ " x1;"
+ " } catch(e) {"
+ " }"
+ " return typeof x1 === 'undefined';"
+ "};"
+ "for (var i = 0; i < 10; i++) {"
+ " f();"
+ "};"
+ "f();");
+ CHECK_EQ(true, value->BooleanValue(context.local()).FromJust());
+
+ value = CompileRun(
+ "var f = function() { "
+ " try {"
+ " x2;"
+ " return false;"
+ " } catch(e) {"
+ " return true;"
+ " }"
+ "};"
+ "for (var i = 0; i < 10; i++) {"
+ " f();"
+ "};"
+ "f();");
+ CHECK_EQ(true, value->BooleanValue(context.local()).FromJust());
+
+ value = CompileRun(
+ "var f = function() { "
+ " try {"
+ " typeof(x3);"
+ " return true;"
+ " } catch(e) {"
+ " return false;"
+ " }"
+ "};"
+ "for (var i = 0; i < 10; i++) {"
+ " f();"
+ "};"
+ "f();");
+ CHECK_EQ(true, value->BooleanValue(context.local()).FromJust());
+}
static void InterceptorLoadICGetter0(
Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
@@ -2270,33 +2329,34 @@ THREADED_TEST(Enumerators) {
// This order is not mandated by the spec, so this test is just
// documenting our behavior.
CHECK_EQ(17u, result->Length());
- // Indexed properties + indexed interceptor properties in numerical order.
- CHECK(v8_str("0")
+ // Indexed properties.
+ CHECK(v8_str("5")
->Equals(context.local(),
result->Get(context.local(), v8::Integer::New(isolate, 0))
.ToLocalChecked())
.FromJust());
- CHECK(v8_str("1")
+ CHECK(v8_str("10")
->Equals(context.local(),
result->Get(context.local(), v8::Integer::New(isolate, 1))
.ToLocalChecked())
.FromJust());
- CHECK(v8_str("5")
+ CHECK(v8_str("140000")
->Equals(context.local(),
result->Get(context.local(), v8::Integer::New(isolate, 2))
.ToLocalChecked())
.FromJust());
- CHECK(v8_str("10")
+ CHECK(v8_str("4294967294")
->Equals(context.local(),
result->Get(context.local(), v8::Integer::New(isolate, 3))
.ToLocalChecked())
.FromJust());
- CHECK(v8_str("140000")
+ // Indexed Interceptor properties
+ CHECK(v8_str("0")
->Equals(context.local(),
result->Get(context.local(), v8::Integer::New(isolate, 4))
.ToLocalChecked())
.FromJust());
- CHECK(v8_str("4294967294")
+ CHECK(v8_str("1")
->Equals(context.local(),
result->Get(context.local(), v8::Integer::New(isolate, 5))
.ToLocalChecked())
@@ -3245,6 +3305,25 @@ THREADED_TEST(Regress149912) {
CompileRun("Number.prototype.__proto__ = new Bug; var x = 0; x.foo();");
}
+THREADED_TEST(Regress625155) {
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+ Local<FunctionTemplate> templ = FunctionTemplate::New(context->GetIsolate());
+ AddInterceptor(templ, EmptyInterceptorGetter, EmptyInterceptorSetter);
+ context->Global()
+ ->Set(context.local(), v8_str("Bug"),
+ templ->GetFunction(context.local()).ToLocalChecked())
+ .FromJust();
+ CompileRun(
+ "Number.prototype.__proto__ = new Bug;"
+ "var x;"
+ "x = 0xdead;"
+ "x.boom = 0;"
+ "x = 's';"
+ "x.boom = 0;"
+ "x = 1.5;"
+ "x.boom = 0;");
+}
THREADED_TEST(Regress125988) {
v8::HandleScope scope(CcTest::isolate());
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 220b0cd077..484d2f3226 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -28,6 +28,7 @@
#include <climits>
#include <csignal>
#include <map>
+#include <memory>
#include <string>
#include "test/cctest/test-api.h"
@@ -40,18 +41,19 @@
#include "src/api.h"
#include "src/arguments.h"
#include "src/base/platform/platform.h"
-#include "src/base/smart-pointers.h"
+#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/debug/debug.h"
#include "src/execution.h"
#include "src/futex-emulation.h"
#include "src/objects.h"
#include "src/parsing/parser.h"
+#include "src/profiler/cpu-profiler.h"
#include "src/unicode-inl.h"
#include "src/utils.h"
#include "src/vm-state.h"
#include "test/cctest/heap/heap-tester.h"
-#include "test/cctest/heap/utils-inl.h"
+#include "test/cctest/heap/heap-utils.h"
static const bool kLogThreading = false;
@@ -95,11 +97,11 @@ void RunWithProfiler(void (*test)()) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
v8::Local<v8::String> profile_name = v8_str("my_profile1");
- v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
-
+ v8::CpuProfiler* cpu_profiler = v8::CpuProfiler::New(env->GetIsolate());
cpu_profiler->StartProfiling(profile_name);
(*test)();
reinterpret_cast<i::CpuProfiler*>(cpu_profiler)->DeleteAllProfiles();
+ cpu_profiler->Dispose();
}
@@ -330,6 +332,11 @@ THREADED_TEST(Access) {
CHECK(!foo_after->IsUndefined());
CHECK(foo_after->IsString());
CHECK(bar_str->Equals(env.local(), foo_after).FromJust());
+
+ CHECK(obj->Set(env.local(), v8_str("foo"), bar_str).ToChecked());
+ bool result;
+ CHECK(obj->Set(env.local(), v8_str("foo"), bar_str).To(&result));
+ CHECK(result);
}
@@ -555,40 +562,19 @@ TEST(MakingExternalStringConditions) {
CcTest::heap()->CollectGarbage(i::NEW_SPACE);
uint16_t* two_byte_string = AsciiToTwoByteString("s1");
- Local<String> small_string =
+ Local<String> local_string =
String::NewFromTwoByte(env->GetIsolate(), two_byte_string,
v8::NewStringType::kNormal)
.ToLocalChecked();
i::DeleteArray(two_byte_string);
- // We should refuse to externalize small strings.
- CHECK(!small_string->CanMakeExternal());
+ // We should refuse to externalize new space strings.
+ CHECK(!local_string->CanMakeExternal());
// Trigger GCs so that the newly allocated string moves to old gen.
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
// Old space strings should be accepted.
- CHECK(small_string->CanMakeExternal());
-
- two_byte_string = AsciiToTwoByteString("small string 2");
- small_string = String::NewFromTwoByte(env->GetIsolate(), two_byte_string,
- v8::NewStringType::kNormal)
- .ToLocalChecked();
- i::DeleteArray(two_byte_string);
-
- const int buf_size = 10 * 1024;
- char* buf = i::NewArray<char>(buf_size);
- memset(buf, 'a', buf_size);
- buf[buf_size - 1] = '\0';
-
- two_byte_string = AsciiToTwoByteString(buf);
- Local<String> large_string =
- String::NewFromTwoByte(env->GetIsolate(), two_byte_string,
- v8::NewStringType::kNormal)
- .ToLocalChecked();
- i::DeleteArray(buf);
- i::DeleteArray(two_byte_string);
- // Large strings should be immediately accepted.
- CHECK(large_string->CanMakeExternal());
+ CHECK(local_string->CanMakeExternal());
}
@@ -600,23 +586,14 @@ TEST(MakingExternalOneByteStringConditions) {
CcTest::heap()->CollectGarbage(i::NEW_SPACE);
CcTest::heap()->CollectGarbage(i::NEW_SPACE);
- Local<String> small_string = v8_str("s1");
- // We should refuse to externalize small strings.
- CHECK(!small_string->CanMakeExternal());
+ Local<String> local_string = v8_str("s1");
+ // We should refuse to externalize new space strings.
+ CHECK(!local_string->CanMakeExternal());
// Trigger GCs so that the newly allocated string moves to old gen.
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
// Old space strings should be accepted.
- CHECK(small_string->CanMakeExternal());
-
- const int buf_size = 10 * 1024;
- char* buf = i::NewArray<char>(buf_size);
- memset(buf, 'a', buf_size);
- buf[buf_size - 1] = '\0';
- Local<String> large_string = v8_str(buf);
- i::DeleteArray(buf);
- // Large strings should be immediately accepted.
- CHECK(large_string->CanMakeExternal());
+ CHECK(local_string->CanMakeExternal());
}
@@ -634,7 +611,7 @@ TEST(MakingExternalUnalignedOneByteString) {
"slice('abcdefghijklmnopqrstuvwxyz');"));
// Trigger GCs so that the newly allocated string moves to old gen.
- SimulateFullSpace(CcTest::heap()->old_space());
+ i::heap::SimulateFullSpace(CcTest::heap()->old_space());
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
@@ -2142,6 +2119,95 @@ THREADED_TEST(TestObjectTemplateInheritedWithPrototype2) {
Constructor_GetFunction_New);
}
+THREADED_TEST(TestObjectTemplateClassInheritance) {
+ LocalContext env;
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+
+ Local<v8::FunctionTemplate> fun_A = v8::FunctionTemplate::New(isolate);
+ fun_A->SetClassName(v8_str("A"));
+
+ Local<ObjectTemplate> templ_A = fun_A->InstanceTemplate();
+ templ_A->SetNativeDataProperty(v8_str("nirk"), GetNirk);
+ templ_A->SetNativeDataProperty(v8_str("rino"), GetRino);
+
+ Local<v8::FunctionTemplate> fun_B = v8::FunctionTemplate::New(isolate);
+ v8::Local<v8::String> class_name = v8_str("B");
+ fun_B->SetClassName(class_name);
+ fun_B->Inherit(fun_A);
+
+ v8::Local<v8::String> subclass_name = v8_str("C");
+ v8::Local<v8::Object> b_proto;
+ v8::Local<v8::Object> c_proto;
+ // Perform several iterations to make sure the cache doesn't break
+ // subclassing.
+ for (int i = 0; i < 3; i++) {
+ Local<v8::Function> function_B =
+ fun_B->GetFunction(env.local()).ToLocalChecked();
+ if (i == 0) {
+ CHECK(env->Global()->Set(env.local(), class_name, function_B).FromJust());
+ CompileRun("class C extends B {}");
+ b_proto =
+ CompileRun("B.prototype")->ToObject(env.local()).ToLocalChecked();
+ c_proto =
+ CompileRun("C.prototype")->ToObject(env.local()).ToLocalChecked();
+ CHECK(b_proto->Equals(env.local(), c_proto->GetPrototype()).FromJust());
+ }
+ Local<v8::Object> instance =
+ CompileRun("new C()")->ToObject(env.local()).ToLocalChecked();
+ CHECK(c_proto->Equals(env.local(), instance->GetPrototype()).FromJust());
+
+ CHECK(subclass_name->StrictEquals(instance->GetConstructorName()));
+ CHECK(env->Global()->Set(env.local(), v8_str("o"), instance).FromJust());
+
+ CHECK_EQ(900, CompileRun("o.nirk")->IntegerValue(env.local()).FromJust());
+ CHECK_EQ(560, CompileRun("o.rino")->IntegerValue(env.local()).FromJust());
+ }
+}
+
+static void NamedPropertyGetterWhichReturns42(
+ Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ info.GetReturnValue().Set(v8_num(42));
+}
+
+THREADED_TEST(TestObjectTemplateReflectConstruct) {
+ LocalContext env;
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+
+ Local<v8::FunctionTemplate> fun_B = v8::FunctionTemplate::New(isolate);
+ fun_B->InstanceTemplate()->SetHandler(
+ v8::NamedPropertyHandlerConfiguration(NamedPropertyGetterWhichReturns42));
+ v8::Local<v8::String> class_name = v8_str("B");
+ fun_B->SetClassName(class_name);
+
+ v8::Local<v8::String> subclass_name = v8_str("C");
+ v8::Local<v8::Object> b_proto;
+ v8::Local<v8::Object> c_proto;
+ // Perform several iterations to make sure the cache doesn't break
+ // subclassing.
+ for (int i = 0; i < 3; i++) {
+ Local<v8::Function> function_B =
+ fun_B->GetFunction(env.local()).ToLocalChecked();
+ if (i == 0) {
+ CHECK(env->Global()->Set(env.local(), class_name, function_B).FromJust());
+ CompileRun("function C() {}");
+ c_proto =
+ CompileRun("C.prototype")->ToObject(env.local()).ToLocalChecked();
+ }
+ Local<v8::Object> instance = CompileRun("Reflect.construct(B, [], C)")
+ ->ToObject(env.local())
+ .ToLocalChecked();
+ CHECK(c_proto->Equals(env.local(), instance->GetPrototype()).FromJust());
+
+ CHECK(subclass_name->StrictEquals(instance->GetConstructorName()));
+ CHECK(env->Global()->Set(env.local(), v8_str("o"), instance).FromJust());
+
+ CHECK_EQ(42, CompileRun("o.nirk")->IntegerValue(env.local()).FromJust());
+ CHECK_EQ(42, CompileRun("o.rino")->IntegerValue(env.local()).FromJust());
+ }
+}
+
static void GetFlabby(const v8::FunctionCallbackInfo<v8::Value>& args) {
ApiTestFuzzer::Fuzz();
args.GetReturnValue().Set(v8_num(17.2));
@@ -2412,8 +2478,7 @@ THREADED_TEST(UndefinedIsNotEnumerable) {
v8::Local<Script> call_recursively_script;
-static const int kTargetRecursionDepth = 150; // near maximum
-
+static const int kTargetRecursionDepth = 100; // near maximum
static void CallScriptRecursivelyCall(
const v8::FunctionCallbackInfo<v8::Value>& args) {
@@ -2622,6 +2687,40 @@ THREADED_TEST(InternalFieldsAlignedPointers) {
CHECK_EQ(huge, Object::GetAlignedPointerFromInternalField(persistent, 0));
}
+THREADED_TEST(SetAlignedPointerInInternalFields) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
+ Local<v8::ObjectTemplate> instance_templ = templ->InstanceTemplate();
+ instance_templ->SetInternalFieldCount(2);
+ Local<v8::Object> obj = templ->GetFunction(env.local())
+ .ToLocalChecked()
+ ->NewInstance(env.local())
+ .ToLocalChecked();
+ CHECK_EQ(2, obj->InternalFieldCount());
+
+ int* heap_allocated_1 = new int[100];
+ int* heap_allocated_2 = new int[100];
+ int indices[] = {0, 1};
+ void* values[] = {heap_allocated_1, heap_allocated_2};
+
+ obj->SetAlignedPointerInInternalFields(2, indices, values);
+ CcTest::heap()->CollectAllGarbage();
+ CHECK_EQ(heap_allocated_1, obj->GetAlignedPointerFromInternalField(0));
+ CHECK_EQ(heap_allocated_2, obj->GetAlignedPointerFromInternalField(1));
+
+ indices[0] = 1;
+ indices[1] = 0;
+ obj->SetAlignedPointerInInternalFields(2, indices, values);
+ CcTest::heap()->CollectAllGarbage();
+ CHECK_EQ(heap_allocated_2, obj->GetAlignedPointerFromInternalField(0));
+ CHECK_EQ(heap_allocated_1, obj->GetAlignedPointerFromInternalField(1));
+
+ delete[] heap_allocated_1;
+ delete[] heap_allocated_2;
+}
static void CheckAlignedPointerInEmbedderData(LocalContext* env, int index,
void* value) {
@@ -2663,7 +2762,6 @@ THREADED_TEST(EmbedderDataAlignedPointers) {
}
}
-
static void CheckEmbedderData(LocalContext* env, int index,
v8::Local<Value> data) {
(*env)->SetEmbedderData(index, data);
@@ -2738,16 +2836,15 @@ void GlobalProxyIdentityHash(bool set_in_js) {
CHECK(env->Global()
->Set(env.local(), v8_str("global"), global_proxy)
.FromJust());
- i::Handle<i::Object> original_hash;
+ int32_t hash1;
if (set_in_js) {
CompileRun("var m = new Set(); m.add(global);");
- original_hash = i::Handle<i::Object>(i_global_proxy->GetHash(), i_isolate);
+ i::Object* original_hash = i_global_proxy->GetHash();
+ CHECK(original_hash->IsSmi());
+ hash1 = i::Smi::cast(original_hash)->value();
} else {
- original_hash = i::Handle<i::Object>(
- i::Object::GetOrCreateHash(i_isolate, i_global_proxy));
+ hash1 = i::Object::GetOrCreateHash(i_isolate, i_global_proxy)->value();
}
- CHECK(original_hash->IsSmi());
- int32_t hash1 = i::Handle<i::Smi>::cast(original_hash)->value();
// Hash should be retained after being detached.
env->DetachGlobal();
int hash2 = global_proxy->GetIdentityHash();
@@ -4687,126 +4784,6 @@ THREADED_TEST(ApiObjectGroupsCycle) {
}
-// TODO(mstarzinger): This should be a THREADED_TEST but causes failures
-// on the buildbots, so was made non-threaded for the time being.
-TEST(ApiObjectGroupsCycleForScavenger) {
- i::FLAG_stress_compaction = false;
- i::FLAG_gc_global = false;
- LocalContext env;
- v8::Isolate* iso = env->GetIsolate();
- HandleScope scope(iso);
-
- WeakCallCounter counter(1234);
-
- WeakCallCounterAndPersistent<Value> g1s1(&counter);
- WeakCallCounterAndPersistent<Value> g1s2(&counter);
- WeakCallCounterAndPersistent<Value> g2s1(&counter);
- WeakCallCounterAndPersistent<Value> g2s2(&counter);
- WeakCallCounterAndPersistent<Value> g3s1(&counter);
- WeakCallCounterAndPersistent<Value> g3s2(&counter);
-
- {
- HandleScope scope(iso);
- g1s1.handle.Reset(iso, Object::New(iso));
- g1s2.handle.Reset(iso, Object::New(iso));
- g1s1.handle.SetWeak(&g1s1, &WeakPointerCallback,
- v8::WeakCallbackType::kParameter);
- g1s2.handle.SetWeak(&g1s2, &WeakPointerCallback,
- v8::WeakCallbackType::kParameter);
-
- g2s1.handle.Reset(iso, Object::New(iso));
- g2s2.handle.Reset(iso, Object::New(iso));
- g2s1.handle.SetWeak(&g2s1, &WeakPointerCallback,
- v8::WeakCallbackType::kParameter);
- g2s2.handle.SetWeak(&g2s2, &WeakPointerCallback,
- v8::WeakCallbackType::kParameter);
-
- g3s1.handle.Reset(iso, Object::New(iso));
- g3s2.handle.Reset(iso, Object::New(iso));
- g3s1.handle.SetWeak(&g3s1, &WeakPointerCallback,
- v8::WeakCallbackType::kParameter);
- g3s2.handle.SetWeak(&g3s2, &WeakPointerCallback,
- v8::WeakCallbackType::kParameter);
- }
-
- // Make a root.
- WeakCallCounterAndPersistent<Value> root(&counter);
- root.handle.Reset(iso, g1s1.handle);
- root.handle.MarkPartiallyDependent();
-
- // Connect groups. We're building the following cycle:
- // G1: { g1s1, g2s1 }, g1s1 implicitly references g2s1, ditto for other
- // groups.
- {
- HandleScope handle_scope(iso);
- g1s1.handle.MarkPartiallyDependent();
- g1s2.handle.MarkPartiallyDependent();
- g2s1.handle.MarkPartiallyDependent();
- g2s2.handle.MarkPartiallyDependent();
- g3s1.handle.MarkPartiallyDependent();
- g3s2.handle.MarkPartiallyDependent();
- iso->SetObjectGroupId(g1s1.handle, UniqueId(1));
- iso->SetObjectGroupId(g1s2.handle, UniqueId(1));
- Local<Object>::New(iso, g1s1.handle.As<Object>())
- ->Set(env.local(), v8_str("x"), Local<Value>::New(iso, g2s1.handle))
- .FromJust();
- iso->SetObjectGroupId(g2s1.handle, UniqueId(2));
- iso->SetObjectGroupId(g2s2.handle, UniqueId(2));
- Local<Object>::New(iso, g2s1.handle.As<Object>())
- ->Set(env.local(), v8_str("x"), Local<Value>::New(iso, g3s1.handle))
- .FromJust();
- iso->SetObjectGroupId(g3s1.handle, UniqueId(3));
- iso->SetObjectGroupId(g3s2.handle, UniqueId(3));
- Local<Object>::New(iso, g3s1.handle.As<Object>())
- ->Set(env.local(), v8_str("x"), Local<Value>::New(iso, g1s1.handle))
- .FromJust();
- }
-
- v8::internal::Heap* heap =
- reinterpret_cast<v8::internal::Isolate*>(iso)->heap();
- heap->CollectAllGarbage();
-
- // All objects should be alive.
- CHECK_EQ(0, counter.NumberOfWeakCalls());
-
- // Weaken the root.
- root.handle.SetWeak(&root, &WeakPointerCallback,
- v8::WeakCallbackType::kParameter);
- root.handle.MarkPartiallyDependent();
-
- // Groups are deleted, rebuild groups.
- {
- HandleScope handle_scope(iso);
- g1s1.handle.MarkPartiallyDependent();
- g1s2.handle.MarkPartiallyDependent();
- g2s1.handle.MarkPartiallyDependent();
- g2s2.handle.MarkPartiallyDependent();
- g3s1.handle.MarkPartiallyDependent();
- g3s2.handle.MarkPartiallyDependent();
- iso->SetObjectGroupId(g1s1.handle, UniqueId(1));
- iso->SetObjectGroupId(g1s2.handle, UniqueId(1));
- Local<Object>::New(iso, g1s1.handle.As<Object>())
- ->Set(env.local(), v8_str("x"), Local<Value>::New(iso, g2s1.handle))
- .FromJust();
- iso->SetObjectGroupId(g2s1.handle, UniqueId(2));
- iso->SetObjectGroupId(g2s2.handle, UniqueId(2));
- Local<Object>::New(iso, g2s1.handle.As<Object>())
- ->Set(env.local(), v8_str("x"), Local<Value>::New(iso, g3s1.handle))
- .FromJust();
- iso->SetObjectGroupId(g3s1.handle, UniqueId(3));
- iso->SetObjectGroupId(g3s2.handle, UniqueId(3));
- Local<Object>::New(iso, g3s1.handle.As<Object>())
- ->Set(env.local(), v8_str("x"), Local<Value>::New(iso, g1s1.handle))
- .FromJust();
- }
-
- heap->CollectAllGarbage();
-
- // All objects should be gone. 7 global handles in total.
- CHECK_EQ(7, counter.NumberOfWeakCalls());
-}
-
-
THREADED_TEST(ScriptException) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -6511,6 +6488,46 @@ THREADED_TEST(Equality) {
CHECK(!v8::False(isolate)->SameValue(v8::Undefined(isolate)));
}
+THREADED_TEST(TypeOf) {
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(context->GetIsolate());
+
+ Local<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New(isolate);
+ Local<v8::Function> fun = t1->GetFunction(context.local()).ToLocalChecked();
+
+ CHECK(v8::Undefined(isolate)
+ ->TypeOf(isolate)
+ ->Equals(context.local(), v8_str("undefined"))
+ .FromJust());
+ CHECK(v8::Null(isolate)
+ ->TypeOf(isolate)
+ ->Equals(context.local(), v8_str("object"))
+ .FromJust());
+ CHECK(v8_str("str")
+ ->TypeOf(isolate)
+ ->Equals(context.local(), v8_str("string"))
+ .FromJust());
+ CHECK(v8_num(0.0)
+ ->TypeOf(isolate)
+ ->Equals(context.local(), v8_str("number"))
+ .FromJust());
+ CHECK(v8_num(1)
+ ->TypeOf(isolate)
+ ->Equals(context.local(), v8_str("number"))
+ .FromJust());
+ CHECK(v8::Object::New(isolate)
+ ->TypeOf(isolate)
+ ->Equals(context.local(), v8_str("object"))
+ .FromJust());
+ CHECK(v8::Boolean::New(isolate, true)
+ ->TypeOf(isolate)
+ ->Equals(context.local(), v8_str("boolean"))
+ .FromJust());
+ CHECK(fun->TypeOf(isolate)
+ ->Equals(context.local(), v8_str("function"))
+ .FromJust());
+}
THREADED_TEST(MultiRun) {
LocalContext context;
@@ -9046,33 +9063,6 @@ TEST(ApiUncaughtException) {
}
-TEST(ApiUncaughtExceptionInObjectObserve) {
- v8::internal::FLAG_harmony_object_observe = true;
- v8::internal::FLAG_stack_size = 150;
- report_count = 0;
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope scope(isolate);
- isolate->AddMessageListener(ApiUncaughtExceptionTestListener);
- CompileRun(
- "var obj = {};"
- "var observe_count = 0;"
- "function observer1() { ++observe_count; };"
- "function observer2() { ++observe_count; };"
- "function observer_throws() { throw new Error(); };"
- "function stack_overflow() { return (function f(x) { f(x+1); })(0); };"
- "Object.observe(obj, observer_throws.bind());"
- "Object.observe(obj, observer1);"
- "Object.observe(obj, stack_overflow);"
- "Object.observe(obj, observer2);"
- "Object.observe(obj, observer_throws.bind());"
- "obj.foo = 'bar';");
- CHECK_EQ(3, report_count);
- ExpectInt32("observe_count", 2);
- isolate->RemoveMessageListeners(ApiUncaughtExceptionTestListener);
-}
-
-
static const char* script_resource_name = "ExceptionInNativeScript.js";
static void ExceptionInNativeScriptTestListener(v8::Local<v8::Message> message,
v8::Local<Value>) {
@@ -10179,6 +10169,12 @@ static bool AccessAlwaysBlocked(Local<v8::Context> accessing_context,
return false;
}
+static bool AccessAlwaysAllowed(Local<v8::Context> accessing_context,
+ Local<v8::Object> global,
+ Local<v8::Value> data) {
+ i::PrintF("Access allowed.\n");
+ return true;
+}
THREADED_TEST(AccessControlGetOwnPropertyNames) {
v8::Isolate* isolate = CcTest::isolate();
@@ -10541,6 +10537,69 @@ THREADED_TEST(GlobalObjectInstanceProperties) {
}
}
+THREADED_TEST(ObjectGetOwnPropertyNames) {
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ v8::Local<v8::Object> value =
+ v8::Local<v8::Object>::Cast(v8::StringObject::New(v8_str("test")));
+ v8::Local<v8::Array> properties;
+
+ CHECK(value
+ ->GetOwnPropertyNames(context.local(),
+ static_cast<v8::PropertyFilter>(
+ v8::PropertyFilter::ALL_PROPERTIES |
+ v8::PropertyFilter::SKIP_SYMBOLS))
+ .ToLocal(&properties));
+ CHECK_EQ(5, properties->Length());
+ v8::Local<v8::Value> property;
+ CHECK(properties->Get(context.local(), 4).ToLocal(&property) &&
+ property->IsString());
+ CHECK(property.As<v8::String>()
+ ->Equals(context.local(), v8_str("length"))
+ .FromMaybe(false));
+ for (int i = 0; i < 4; ++i) {
+ v8::Local<v8::Value> property;
+ CHECK(properties->Get(context.local(), i).ToLocal(&property) &&
+ property->IsInt32());
+ CHECK_EQ(property.As<v8::Int32>()->Value(), i);
+ }
+
+ CHECK(value->GetOwnPropertyNames(context.local(), v8::ONLY_ENUMERABLE)
+ .ToLocal(&properties));
+ CHECK_EQ(4, properties->Length());
+ for (int i = 0; i < 4; ++i) {
+ v8::Local<v8::Value> property;
+ CHECK(properties->Get(context.local(), i).ToLocal(&property) &&
+ property->IsInt32());
+ CHECK_EQ(property.As<v8::Int32>()->Value(), i);
+ }
+
+ value = value->GetPrototype().As<v8::Object>();
+ CHECK(value
+ ->GetOwnPropertyNames(context.local(),
+ static_cast<v8::PropertyFilter>(
+ v8::PropertyFilter::ALL_PROPERTIES |
+ v8::PropertyFilter::SKIP_SYMBOLS))
+ .ToLocal(&properties));
+ bool concat_found = false;
+ bool starts_with_found = false;
+ for (uint32_t i = 0; i < properties->Length(); ++i) {
+ v8::Local<v8::Value> property;
+ CHECK(properties->Get(context.local(), i).ToLocal(&property));
+ if (!property->IsString()) continue;
+ if (!concat_found)
+ concat_found = property.As<v8::String>()
+ ->Equals(context.local(), v8_str("concat"))
+ .FromMaybe(false);
+ if (!starts_with_found)
+ starts_with_found = property.As<v8::String>()
+ ->Equals(context.local(), v8_str("startsWith"))
+ .FromMaybe(false);
+ }
+ CHECK(concat_found && starts_with_found);
+}
THREADED_TEST(CallKnownGlobalReceiver) {
v8::Isolate* isolate = CcTest::isolate();
@@ -11185,6 +11244,7 @@ THREADED_TEST(FunctionRemovePrototype) {
Local<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New(isolate);
t1->RemovePrototype();
Local<v8::Function> fun = t1->GetFunction(context.local()).ToLocalChecked();
+ CHECK(!fun->IsConstructor());
CHECK(context->Global()->Set(context.local(), v8_str("fun"), fun).FromJust());
CHECK(!CompileRun("'prototype' in fun")
->BooleanValue(context.local())
@@ -11648,6 +11708,9 @@ THREADED_TEST(EvalInDetachedGlobal) {
v8::Local<Context> context0 = Context::New(isolate);
v8::Local<Context> context1 = Context::New(isolate);
+ Local<String> token = v8_str("<security token>");
+ context0->SetSecurityToken(token);
+ context1->SetSecurityToken(token);
// Set up function in context0 that uses eval from context0.
context0->Enter();
@@ -11661,15 +11724,14 @@ THREADED_TEST(EvalInDetachedGlobal) {
// Put the function into context1 and call it before and after
// detaching the global. Before detaching, the call succeeds and
- // after detaching and exception is thrown.
+ // after detaching undefined is returned.
context1->Enter();
CHECK(context1->Global()->Set(context1, v8_str("fun"), fun).FromJust());
v8::Local<v8::Value> x_value = CompileRun("fun('x')");
CHECK_EQ(42, x_value->Int32Value(context1).FromJust());
context0->DetachGlobal();
- v8::TryCatch catcher(isolate);
x_value = CompileRun("fun('x')");
- CHECK_EQ(42, x_value->Int32Value(context1).FromJust());
+ CHECK(x_value->IsUndefined());
context1->Exit();
}
@@ -13261,6 +13323,43 @@ THREADED_TEST(IsConstructCall) {
CHECK(value->BooleanValue(context.local()).FromJust());
}
+static void NewTargetHandler(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ ApiTestFuzzer::Fuzz();
+ args.GetReturnValue().Set(args.NewTarget());
+}
+
+THREADED_TEST(NewTargetHandler) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+
+ // Function template with call handler.
+ Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
+ templ->SetCallHandler(NewTargetHandler);
+
+ LocalContext context;
+
+ Local<Function> function =
+ templ->GetFunction(context.local()).ToLocalChecked();
+ CHECK(context->Global()
+ ->Set(context.local(), v8_str("f"), function)
+ .FromJust());
+ Local<Value> value = CompileRun("f()");
+ CHECK(value->IsUndefined());
+ value = CompileRun("new f()");
+ CHECK(value->IsFunction());
+ CHECK(value == function);
+ Local<Value> subclass = CompileRun("var g = class extends f { }; g");
+ CHECK(subclass->IsFunction());
+ value = CompileRun("new g()");
+ CHECK(value->IsFunction());
+ CHECK(value == subclass);
+ value = CompileRun("Reflect.construct(f, [], Array)");
+ CHECK(value->IsFunction());
+ CHECK(value ==
+ context->Global()
+ ->Get(context.local(), v8_str("Array"))
+ .ToLocalChecked());
+}
THREADED_TEST(ObjectProtoToString) {
v8::Isolate* isolate = CcTest::isolate();
@@ -13298,7 +13397,7 @@ THREADED_TEST(ObjectProtoToString) {
value =
context->Global()->ObjectProtoToString(context.local()).ToLocalChecked();
CHECK(value->IsString() &&
- value->Equals(context.local(), v8_str("[object global]")).FromJust());
+ value->Equals(context.local(), v8_str("[object Object]")).FromJust());
// Check ordinary object
Local<Value> object =
@@ -13344,7 +13443,7 @@ TEST(ObjectProtoToStringES6) {
value =
context->Global()->ObjectProtoToString(context.local()).ToLocalChecked();
CHECK(value->IsString() &&
- value->Equals(context.local(), v8_str("[object global]")).FromJust());
+ value->Equals(context.local(), v8_str("[object Object]")).FromJust());
// Check ordinary object
Local<Value> object = CompileRun("new Object()");
@@ -13707,6 +13806,16 @@ void ApiTestFuzzer::TearDown() {
}
}
+void ApiTestFuzzer::CallTest() {
+ v8::Isolate::Scope scope(CcTest::isolate());
+ if (kLogThreading)
+ printf("Start test %s #%d\n",
+ RegisterThreadedTest::nth(test_number_)->name(), test_number_);
+ CallTestNumber(test_number_);
+ if (kLogThreading)
+ printf("End test %s #%d\n", RegisterThreadedTest::nth(test_number_)->name(),
+ test_number_);
+}
// Lets not be needlessly self-referential.
TEST(Threading1) {
@@ -13737,16 +13846,6 @@ TEST(Threading4) {
}
-void ApiTestFuzzer::CallTest() {
- v8::Isolate::Scope scope(CcTest::isolate());
- if (kLogThreading)
- printf("Start test %d\n", test_number_);
- CallTestNumber(test_number_);
- if (kLogThreading)
- printf("End test %d\n", test_number_);
-}
-
-
static void ThrowInJS(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
CHECK(v8::Locker::IsLocked(isolate));
@@ -14416,7 +14515,6 @@ int SetFunctionEntryHookTest::CountInvocations(
return invocations;
}
-
void SetFunctionEntryHookTest::RunLoopInNewEnv(v8::Isolate* isolate) {
v8::HandleScope outer(isolate);
v8::Local<Context> env = Context::New(isolate);
@@ -14474,10 +14572,19 @@ void SetFunctionEntryHookTest::RunTest() {
RunLoopInNewEnv(isolate);
- // Check the exepected invocation counts.
- CHECK_EQ(2, CountInvocations(NULL, "bar"));
- CHECK_EQ(200, CountInvocations("bar", "foo"));
- CHECK_EQ(200, CountInvocations(NULL, "foo"));
+ // Check the expected invocation counts.
+ if (!i::FLAG_ignition) {
+ CHECK_EQ(2, CountInvocations(NULL, "bar"));
+ CHECK_EQ(200, CountInvocations("bar", "foo"));
+ CHECK_EQ(200, CountInvocations(NULL, "foo"));
+ } else {
+ // For ignition we don't see the actual functions being called, instead
+ // we see the IterpreterEntryTrampoline at least 102 times
+ // (100 unoptimized calls to foo, and 2 calls to bar).
+ CHECK_LE(102, CountInvocations(NULL, "InterpreterEntryTrampoline"));
+ // We should also see the calls to the optimized function foo.
+ CHECK_EQ(100, CountInvocations(NULL, "foo"));
+ }
// Verify that we have an entry hook on some specific stubs.
CHECK_NE(0, CountInvocations(NULL, "CEntryStub"));
@@ -14519,9 +14626,8 @@ TEST(SetFunctionEntryHook) {
test.RunTest();
}
-
-static i::HashMap* code_map = NULL;
-static i::HashMap* jitcode_line_info = NULL;
+static v8::base::HashMap* code_map = NULL;
+static v8::base::HashMap* jitcode_line_info = NULL;
static int saw_bar = 0;
static int move_events = 0;
@@ -14581,7 +14687,7 @@ static void event_handler(const v8::JitCodeEvent* event) {
CHECK(event->code_start != NULL);
CHECK_NE(0, static_cast<int>(event->code_len));
CHECK(event->name.str != NULL);
- i::HashMap::Entry* entry = code_map->LookupOrInsert(
+ v8::base::HashMap::Entry* entry = code_map->LookupOrInsert(
event->code_start, i::ComputePointerHash(event->code_start));
entry->value = reinterpret_cast<void*>(event->code_len);
@@ -14600,7 +14706,8 @@ static void event_handler(const v8::JitCodeEvent* event) {
// Compiler::RecordFunctionCompilation) and the line endings
// calculations can cause a GC, which can move the newly created code
// before its existence can be logged.
- i::HashMap::Entry* entry = code_map->Lookup(event->code_start, hash);
+ v8::base::HashMap::Entry* entry =
+ code_map->Lookup(event->code_start, hash);
if (entry != NULL) {
++move_events;
@@ -14627,7 +14734,7 @@ static void event_handler(const v8::JitCodeEvent* event) {
DummyJitCodeLineInfo* line_info = new DummyJitCodeLineInfo();
v8::JitCodeEvent* temp_event = const_cast<v8::JitCodeEvent*>(event);
temp_event->user_data = line_info;
- i::HashMap::Entry* entry = jitcode_line_info->LookupOrInsert(
+ v8::base::HashMap::Entry* entry = jitcode_line_info->LookupOrInsert(
line_info, i::ComputePointerHash(line_info));
entry->value = reinterpret_cast<void*>(line_info);
}
@@ -14638,7 +14745,7 @@ static void event_handler(const v8::JitCodeEvent* event) {
case v8::JitCodeEvent::CODE_END_LINE_INFO_RECORDING: {
CHECK(event->user_data != NULL);
uint32_t hash = i::ComputePointerHash(event->user_data);
- i::HashMap::Entry* entry =
+ v8::base::HashMap::Entry* entry =
jitcode_line_info->Lookup(event->user_data, hash);
CHECK(entry != NULL);
delete reinterpret_cast<DummyJitCodeLineInfo*>(event->user_data);
@@ -14648,7 +14755,7 @@ static void event_handler(const v8::JitCodeEvent* event) {
case v8::JitCodeEvent::CODE_ADD_LINE_POS_INFO: {
CHECK(event->user_data != NULL);
uint32_t hash = i::ComputePointerHash(event->user_data);
- i::HashMap::Entry* entry =
+ v8::base::HashMap::Entry* entry =
jitcode_line_info->Lookup(event->user_data, hash);
CHECK(entry != NULL);
}
@@ -14690,10 +14797,10 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
{
v8::HandleScope scope(isolate);
- i::HashMap code(MatchPointers);
+ v8::base::HashMap code(MatchPointers);
code_map = &code;
- i::HashMap lineinfo(MatchPointers);
+ v8::base::HashMap lineinfo(MatchPointers);
jitcode_line_info = &lineinfo;
saw_bar = 0;
@@ -14707,8 +14814,8 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
for (int i = 0; i < kIterations; ++i) {
LocalContext env(isolate);
i::AlwaysAllocateScope always_allocate(i_isolate);
- SimulateFullSpace(i::FLAG_ignition ? heap->old_space()
- : heap->code_space());
+ i::heap::SimulateFullSpace(i::FLAG_ignition ? heap->old_space()
+ : heap->code_space());
CompileRun(script);
// Keep a strong reference to the code object in the handle scope.
@@ -14756,10 +14863,10 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
CompileRun(script);
// Now get code through initial iteration.
- i::HashMap code(MatchPointers);
+ v8::base::HashMap code(MatchPointers);
code_map = &code;
- i::HashMap lineinfo(MatchPointers);
+ v8::base::HashMap lineinfo(MatchPointers);
jitcode_line_info = &lineinfo;
isolate->SetJitCodeEventHandler(v8::kJitCodeEventEnumExisting,
@@ -14792,8 +14899,7 @@ THREADED_TEST(ExternalAllocatedMemory) {
isolate->AdjustAmountOfExternalAllocatedMemory(kSize));
CHECK_EQ(baseline,
isolate->AdjustAmountOfExternalAllocatedMemory(-kSize));
- const int64_t kTriggerGCSize =
- v8::internal::Internals::kExternalAllocationLimit + 1;
+ const int64_t kTriggerGCSize = i::kExternalAllocationLimit + 1;
CHECK_EQ(baseline + kTriggerGCSize,
isolate->AdjustAmountOfExternalAllocatedMemory(kTriggerGCSize));
CHECK_EQ(baseline,
@@ -14805,8 +14911,7 @@ TEST(Regress51719) {
i::FLAG_incremental_marking = false;
CcTest::InitializeVM();
- const int64_t kTriggerGCSize =
- v8::internal::Internals::kExternalAllocationLimit + 1;
+ const int64_t kTriggerGCSize = i::kExternalAllocationLimit + 1;
v8::Isolate* isolate = CcTest::isolate();
isolate->AdjustAmountOfExternalAllocatedMemory(kTriggerGCSize);
}
@@ -14981,18 +15086,39 @@ THREADED_TEST(DateAccess) {
CHECK_EQ(1224744689038.0, date.As<v8::Date>()->ValueOf());
}
+void CheckIsSymbolAt(v8::Isolate* isolate, v8::Local<v8::Array> properties,
+ unsigned index, const char* name) {
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ v8::Local<v8::Value> value =
+ properties->Get(context, v8::Integer::New(isolate, index))
+ .ToLocalChecked();
+ CHECK(value->IsSymbol());
+ v8::String::Utf8Value symbol_name(Local<Symbol>::Cast(value)->Name());
+ CHECK_EQ(0, strcmp(name, *symbol_name));
+}
+
+void CheckStringArray(v8::Isolate* isolate, v8::Local<v8::Array> properties,
+ unsigned length, const char* names[]) {
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ CHECK_EQ(length, properties->Length());
+ for (unsigned i = 0; i < length; i++) {
+ v8::Local<v8::Value> value =
+ properties->Get(context, v8::Integer::New(isolate, i)).ToLocalChecked();
+ if (names[i] == nullptr) {
+ DCHECK(value->IsSymbol());
+ } else {
+ v8::String::Utf8Value elm(value);
+ CHECK_EQ(0, strcmp(names[i], *elm));
+ }
+ }
+}
void CheckProperties(v8::Isolate* isolate, v8::Local<v8::Value> val,
- unsigned elmc, const char* elmv[]) {
+ unsigned length, const char* names[]) {
v8::Local<v8::Context> context = isolate->GetCurrentContext();
v8::Local<v8::Object> obj = val.As<v8::Object>();
v8::Local<v8::Array> props = obj->GetPropertyNames(context).ToLocalChecked();
- CHECK_EQ(elmc, props->Length());
- for (unsigned i = 0; i < elmc; i++) {
- v8::String::Utf8Value elm(
- props->Get(context, v8::Integer::New(isolate, i)).ToLocalChecked());
- CHECK_EQ(0, strcmp(elmv[i], *elm));
- }
+ CheckStringArray(isolate, props, length, names);
}
@@ -15103,6 +15229,97 @@ THREADED_TEST(PropertyEnumeration2) {
}
}
+THREADED_TEST(PropertyNames) {
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Value> result = CompileRun(
+ "var result = {0: 0, 1: 1, a: 2, b: 3};"
+ "result[Symbol('symbol')] = true;"
+ "result.__proto__ = {2: 4, 3: 5, c: 6, d: 7};"
+ "result;");
+ v8::Local<v8::Object> object = result.As<v8::Object>();
+ v8::PropertyFilter default_filter =
+ static_cast<v8::PropertyFilter>(v8::ONLY_ENUMERABLE | v8::SKIP_SYMBOLS);
+ v8::PropertyFilter include_symbols_filter = v8::ONLY_ENUMERABLE;
+
+ v8::Local<v8::Array> properties =
+ object->GetPropertyNames(context.local()).ToLocalChecked();
+ const char* expected_properties1[] = {"0", "1", "a", "b", "2", "3", "c", "d"};
+ CheckStringArray(isolate, properties, 8, expected_properties1);
+
+ properties =
+ object
+ ->GetPropertyNames(context.local(),
+ v8::KeyCollectionMode::kIncludePrototypes,
+ default_filter, v8::IndexFilter::kIncludeIndices)
+ .ToLocalChecked();
+ CheckStringArray(isolate, properties, 8, expected_properties1);
+
+ properties = object
+ ->GetPropertyNames(context.local(),
+ v8::KeyCollectionMode::kIncludePrototypes,
+ include_symbols_filter,
+ v8::IndexFilter::kIncludeIndices)
+ .ToLocalChecked();
+ const char* expected_properties1_1[] = {"0", "1", "a", "b", nullptr,
+ "2", "3", "c", "d"};
+ CheckStringArray(isolate, properties, 9, expected_properties1_1);
+ CheckIsSymbolAt(isolate, properties, 4, "symbol");
+
+ properties =
+ object
+ ->GetPropertyNames(context.local(),
+ v8::KeyCollectionMode::kIncludePrototypes,
+ default_filter, v8::IndexFilter::kSkipIndices)
+ .ToLocalChecked();
+ const char* expected_properties2[] = {"a", "b", "c", "d"};
+ CheckStringArray(isolate, properties, 4, expected_properties2);
+
+ properties = object
+ ->GetPropertyNames(context.local(),
+ v8::KeyCollectionMode::kIncludePrototypes,
+ include_symbols_filter,
+ v8::IndexFilter::kSkipIndices)
+ .ToLocalChecked();
+ const char* expected_properties2_1[] = {"a", "b", nullptr, "c", "d"};
+ CheckStringArray(isolate, properties, 5, expected_properties2_1);
+ CheckIsSymbolAt(isolate, properties, 2, "symbol");
+
+ properties =
+ object
+ ->GetPropertyNames(context.local(), v8::KeyCollectionMode::kOwnOnly,
+ default_filter, v8::IndexFilter::kIncludeIndices)
+ .ToLocalChecked();
+ const char* expected_properties3[] = {"0", "1", "a", "b"};
+ CheckStringArray(isolate, properties, 4, expected_properties3);
+
+ properties = object
+ ->GetPropertyNames(
+ context.local(), v8::KeyCollectionMode::kOwnOnly,
+ include_symbols_filter, v8::IndexFilter::kIncludeIndices)
+ .ToLocalChecked();
+ const char* expected_properties3_1[] = {"0", "1", "a", "b", nullptr};
+ CheckStringArray(isolate, properties, 5, expected_properties3_1);
+ CheckIsSymbolAt(isolate, properties, 4, "symbol");
+
+ properties =
+ object
+ ->GetPropertyNames(context.local(), v8::KeyCollectionMode::kOwnOnly,
+ default_filter, v8::IndexFilter::kSkipIndices)
+ .ToLocalChecked();
+ const char* expected_properties4[] = {"a", "b"};
+ CheckStringArray(isolate, properties, 2, expected_properties4);
+
+ properties = object
+ ->GetPropertyNames(
+ context.local(), v8::KeyCollectionMode::kOwnOnly,
+ include_symbols_filter, v8::IndexFilter::kSkipIndices)
+ .ToLocalChecked();
+ const char* expected_properties4_1[] = {"a", "b", nullptr};
+ CheckStringArray(isolate, properties, 3, expected_properties4_1);
+ CheckIsSymbolAt(isolate, properties, 2, "symbol");
+}
THREADED_TEST(AccessChecksReenabledCorrectly) {
LocalContext context;
@@ -16824,40 +17041,6 @@ TEST(CaptureStackTraceForUncaughtException) {
CHECK_EQ(1, report_count);
}
-
-TEST(GetStackTraceForUncaughtExceptionFromSimpleStackTrace) {
- report_count = 0;
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope scope(isolate);
-
- // Create an Error object first.
- CompileRunWithOrigin(
- "function foo() {\n"
- "e=new Error('err');\n"
- "};\n"
- "function bar() {\n"
- " foo();\n"
- "};\n"
- "var e;",
- "origin");
- v8::Local<v8::Object> global = env->Global();
- Local<Value> trouble =
- global->Get(env.local(), v8_str("bar")).ToLocalChecked();
- CHECK(trouble->IsFunction());
- Function::Cast(*trouble)->Call(env.local(), global, 0, NULL).ToLocalChecked();
-
- // Enable capturing detailed stack trace late, and throw the exception.
- // The detailed stack trace should be extracted from the simple stack.
- isolate->AddMessageListener(StackTraceForUncaughtExceptionListener);
- isolate->SetCaptureStackTraceForUncaughtExceptions(true);
- CompileRunWithOrigin("throw e", "origin");
- isolate->SetCaptureStackTraceForUncaughtExceptions(false);
- isolate->RemoveMessageListeners(StackTraceForUncaughtExceptionListener);
- CHECK_EQ(1, report_count);
-}
-
-
TEST(CaptureStackTraceForUncaughtExceptionAndSetters) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -18371,7 +18554,6 @@ THREADED_TEST(FunctionGetInferredName) {
THREADED_TEST(FunctionGetDebugName) {
- i::FLAG_harmony_function_name = true;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
const char* code =
@@ -18728,12 +18910,6 @@ TEST(SetterOnConstructorPrototype) {
}
-static void NamedPropertyGetterWhichReturns42(
- Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
- info.GetReturnValue().Set(v8_num(42));
-}
-
-
static void NamedPropertySetterWhichSetsYOnThisTo23(
Local<Name> name, Local<Value> value,
const v8::PropertyCallbackInfo<v8::Value>& info) {
@@ -18916,7 +19092,7 @@ void PrologueCallbackAlloc(v8::Isolate* isolate,
++prologue_call_count_alloc;
// Simulate full heap to see if we will reenter this callback
- SimulateFullSpace(CcTest::heap()->new_space());
+ i::heap::SimulateFullSpace(CcTest::heap()->new_space());
Local<Object> obj = Object::New(isolate);
CHECK(!obj.IsEmpty());
@@ -18936,7 +19112,7 @@ void EpilogueCallbackAlloc(v8::Isolate* isolate,
++epilogue_call_count_alloc;
// Simulate full heap to see if we will reenter this callback
- SimulateFullSpace(CcTest::heap()->new_space());
+ i::heap::SimulateFullSpace(CcTest::heap()->new_space());
Local<Object> obj = Object::New(isolate);
CHECK(!obj.IsEmpty());
@@ -19126,8 +19302,7 @@ TEST(ContainsOnlyOneByte) {
const int length = 512;
// Ensure word aligned assignment.
const int aligned_length = length*sizeof(uintptr_t)/sizeof(uint16_t);
- v8::base::SmartArrayPointer<uintptr_t> aligned_contents(
- new uintptr_t[aligned_length]);
+ std::unique_ptr<uintptr_t[]> aligned_contents(new uintptr_t[aligned_length]);
uint16_t* string_contents =
reinterpret_cast<uint16_t*>(aligned_contents.get());
// Set to contain only one byte.
@@ -19912,7 +20087,6 @@ TEST(PersistentHandleInNewSpaceVisitor) {
TEST(RegExp) {
- i::FLAG_harmony_unicode_regexps = true;
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
@@ -20313,6 +20487,7 @@ TEST(HasOwnProperty) {
HasOwnPropertyNamedPropertyGetter));
Local<Object> instance = templ->NewInstance(env.local()).ToLocalChecked();
CHECK(!instance->HasOwnProperty(env.local(), v8_str("42")).FromJust());
+ CHECK(!instance->HasOwnProperty(env.local(), 42).FromJust());
CHECK(instance->HasOwnProperty(env.local(), v8_str("foo")).FromJust());
CHECK(!instance->HasOwnProperty(env.local(), v8_str("bar")).FromJust());
}
@@ -20322,7 +20497,9 @@ TEST(HasOwnProperty) {
HasOwnPropertyIndexedPropertyGetter));
Local<Object> instance = templ->NewInstance(env.local()).ToLocalChecked();
CHECK(instance->HasOwnProperty(env.local(), v8_str("42")).FromJust());
+ CHECK(instance->HasOwnProperty(env.local(), 42).FromJust());
CHECK(!instance->HasOwnProperty(env.local(), v8_str("43")).FromJust());
+ CHECK(!instance->HasOwnProperty(env.local(), 43).FromJust());
CHECK(!instance->HasOwnProperty(env.local(), v8_str("foo")).FromJust());
}
{ // Check named query interceptors.
@@ -20339,7 +20516,9 @@ TEST(HasOwnProperty) {
0, 0, HasOwnPropertyIndexedPropertyQuery));
Local<Object> instance = templ->NewInstance(env.local()).ToLocalChecked();
CHECK(instance->HasOwnProperty(env.local(), v8_str("42")).FromJust());
+ CHECK(instance->HasOwnProperty(env.local(), 42).FromJust());
CHECK(!instance->HasOwnProperty(env.local(), v8_str("41")).FromJust());
+ CHECK(!instance->HasOwnProperty(env.local(), 41).FromJust());
}
{ // Check callbacks.
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
@@ -20917,6 +21096,7 @@ TEST(CallCompletedCallbackTwoExceptions) {
static void MicrotaskOne(const v8::FunctionCallbackInfo<Value>& info) {
+ CHECK(v8::MicrotasksScope::IsRunningMicrotasks(info.GetIsolate()));
v8::HandleScope scope(info.GetIsolate());
v8::MicrotasksScope microtasks(info.GetIsolate(),
v8::MicrotasksScope::kDoNotRunMicrotasks);
@@ -20925,6 +21105,7 @@ static void MicrotaskOne(const v8::FunctionCallbackInfo<Value>& info) {
static void MicrotaskTwo(const v8::FunctionCallbackInfo<Value>& info) {
+ CHECK(v8::MicrotasksScope::IsRunningMicrotasks(info.GetIsolate()));
v8::HandleScope scope(info.GetIsolate());
v8::MicrotasksScope microtasks(info.GetIsolate(),
v8::MicrotasksScope::kDoNotRunMicrotasks);
@@ -20943,6 +21124,7 @@ static void MicrotaskThree(void* data) {
TEST(EnqueueMicrotask) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
+ CHECK(!v8::MicrotasksScope::IsRunningMicrotasks(env->GetIsolate()));
CompileRun(
"var ext1Calls = 0;"
"var ext2Calls = 0;");
@@ -21282,48 +21464,17 @@ TEST(ScopedMicrotasks) {
env->GetIsolate()->SetMicrotasksPolicy(v8::MicrotasksPolicy::kAuto);
}
-
-static void DebugEventInObserver(const v8::Debug::EventDetails& event_details) {
- v8::DebugEvent event = event_details.GetEvent();
- if (event != v8::Break) return;
- Local<Object> exec_state = event_details.GetExecutionState();
- Local<Context> context = CcTest::isolate()->GetCurrentContext();
- Local<Value> break_id =
- exec_state->Get(context, v8_str("break_id")).ToLocalChecked();
- CompileRun("function f(id) { new FrameDetails(id, 0); }");
- Local<Function> fun = Local<Function>::Cast(
- CcTest::global()->Get(context, v8_str("f")).ToLocalChecked());
- fun->Call(context, CcTest::global(), 1, &break_id).ToLocalChecked();
-}
-
-
-TEST(Regress385349) {
- i::FLAG_harmony_object_observe = true;
- i::FLAG_allow_natives_syntax = true;
- v8::Isolate* isolate = CcTest::isolate();
- HandleScope handle_scope(isolate);
- isolate->SetMicrotasksPolicy(v8::MicrotasksPolicy::kExplicit);
- Local<Context> context = Context::New(isolate);
- v8::Debug::SetDebugEventListener(isolate, DebugEventInObserver);
- {
- Context::Scope context_scope(context);
- CompileRun("var obj = {};"
- "Object.observe(obj, function(changes) { debugger; });"
- "obj.a = 0;");
- }
- isolate->RunMicrotasks();
- isolate->SetMicrotasksPolicy(v8::MicrotasksPolicy::kAuto);
- v8::Debug::SetDebugEventListener(isolate, nullptr);
-}
-
-
#ifdef ENABLE_DISASSEMBLER
-static int probes_counter = 0;
-static int misses_counter = 0;
-static int updates_counter = 0;
+// FLAG_test_primary_stub_cache and FLAG_test_secondary_stub_cache are read
+// only when ENABLE_DISASSEMBLER is not defined.
+
+namespace {
+int probes_counter = 0;
+int misses_counter = 0;
+int updates_counter = 0;
-static int* LookupCounter(const char* name) {
+int* LookupCounter(const char* name) {
if (strcmp(name, "c:V8.MegamorphicStubCacheProbes") == 0) {
return &probes_counter;
} else if (strcmp(name, "c:V8.MegamorphicStubCacheMisses") == 0) {
@@ -21334,24 +21485,33 @@ static int* LookupCounter(const char* name) {
return NULL;
}
+const char* kMegamorphicTestProgram =
+ "function CreateClass(name) {\n"
+ " var src = \n"
+ " ` function ${name}() {};` +\n"
+ " ` ${name}.prototype.foo = function() {};` +\n"
+ " ` ${name};\\n`;\n"
+ " return (0, eval)(src);\n"
+ "}\n"
+ "function fooify(obj) { obj.foo(); };\n"
+ "var objs = [];\n"
+ "for (var i = 0; i < 50; i++) {\n"
+ " var Class = CreateClass('Class' + i);\n"
+ " var obj = new Class();\n"
+ " objs.push(obj);\n"
+ "}\n"
+ "for (var i = 0; i < 1000; i++) {\n"
+ " for (var obj of objs) {\n"
+ " fooify(obj);\n"
+ " }\n"
+ "}\n";
-static const char* kMegamorphicTestProgram =
- "function ClassA() { };"
- "function ClassB() { };"
- "ClassA.prototype.foo = function() { };"
- "ClassB.prototype.foo = function() { };"
- "function fooify(obj) { obj.foo(); };"
- "var a = new ClassA();"
- "var b = new ClassB();"
- "for (var i = 0; i < 10000; i++) {"
- " fooify(a);"
- " fooify(b);"
- "}";
-#endif
-
+void TestStubCache(bool primary) {
+ // The test does not work with interpreter because bytecode handlers taken
+ // from the snapshot already refer to ICs with disabled counters and there
+ // is no way to trigger bytecode handlers recompilation.
+ if (i::FLAG_ignition) return;
-static void StubCacheHelper(bool primary) {
-#ifdef ENABLE_DISASSEMBLER
i::FLAG_native_code_counters = true;
if (primary) {
i::FLAG_test_primary_stub_cache = true;
@@ -21359,36 +21519,83 @@ static void StubCacheHelper(bool primary) {
i::FLAG_test_secondary_stub_cache = true;
}
i::FLAG_crankshaft = false;
- LocalContext env;
- env->GetIsolate()->SetCounterFunction(LookupCounter);
- v8::HandleScope scope(env->GetIsolate());
- int initial_probes = probes_counter;
- int initial_misses = misses_counter;
- int initial_updates = updates_counter;
- CompileRun(kMegamorphicTestProgram);
- int probes = probes_counter - initial_probes;
- int misses = misses_counter - initial_misses;
- int updates = updates_counter - initial_updates;
- CHECK_LT(updates, 10);
- CHECK_LT(misses, 10);
- // TODO(verwaest): Update this test to overflow the degree of polymorphism
- // before megamorphism. The number of probes will only work once we teach the
- // serializer to embed references to counters in the stubs, given that the
- // megamorphic_stub_cache_probes is updated in a snapshot-generated stub.
- CHECK_GE(probes, 0);
-#endif
+ i::FLAG_turbo = false;
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ create_params.counter_lookup_callback = LookupCounter;
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+
+ {
+ v8::Isolate::Scope isolate_scope(isolate);
+ LocalContext env(isolate);
+ v8::HandleScope scope(isolate);
+
+ {
+ // Enforce recompilation of IC stubs that access megamorphic stub cache
+ // to respect enabled native code counters and stub cache test flags.
+ i::CodeStub::Major code_stub_keys[] = {
+ i::CodeStub::LoadIC, i::CodeStub::LoadICTrampoline,
+ i::CodeStub::LoadICTF, i::CodeStub::LoadICTrampolineTF,
+ i::CodeStub::KeyedLoadIC, i::CodeStub::KeyedLoadICTrampoline,
+ i::CodeStub::StoreIC, i::CodeStub::StoreICTrampoline,
+ i::CodeStub::KeyedStoreIC, i::CodeStub::KeyedStoreICTrampoline,
+ };
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Heap* heap = i_isolate->heap();
+ i::Handle<i::UnseededNumberDictionary> dict(heap->code_stubs());
+ for (size_t i = 0; i < arraysize(code_stub_keys); i++) {
+ dict = i::UnseededNumberDictionary::DeleteKey(dict, code_stub_keys[i]);
+ }
+ heap->SetRootCodeStubs(*dict);
+ }
+
+ int initial_probes = probes_counter;
+ int initial_misses = misses_counter;
+ int initial_updates = updates_counter;
+ CompileRun(kMegamorphicTestProgram);
+ int probes = probes_counter - initial_probes;
+ int misses = misses_counter - initial_misses;
+ int updates = updates_counter - initial_updates;
+ const int kClassesCount = 50;
+ const int kIterationsCount = 1000;
+ CHECK_LE(kClassesCount, updates);
+ // Check that updates and misses counts are bounded.
+ // If there are too many updates then most likely the stub cache does not
+ // work properly.
+ CHECK_LE(updates, kClassesCount * 2);
+ CHECK_LE(1, misses);
+ CHECK_LE(misses, kClassesCount * 2);
+ // 2 is for PREMONOMORPHIC and MONOMORPHIC states,
+ // 4 is for POLYMORPHIC states,
+ // and all the others probes are for MEGAMORPHIC state.
+ CHECK_EQ(kIterationsCount * kClassesCount - 2 - 4, probes);
+ }
+ isolate->Dispose();
}
+} // namespace
+
+UNINITIALIZED_TEST(PrimaryStubCache) {
+ i::FLAG_tf_load_ic_stub = false;
+ TestStubCache(true);
+}
-TEST(SecondaryStubCache) {
- StubCacheHelper(true);
+UNINITIALIZED_TEST(SecondaryStubCache) {
+ i::FLAG_tf_load_ic_stub = false;
+ TestStubCache(false);
}
+UNINITIALIZED_TEST(PrimaryStubCacheTF) {
+ i::FLAG_tf_load_ic_stub = true;
+ TestStubCache(true);
+}
-TEST(PrimaryStubCache) {
- StubCacheHelper(false);
+UNINITIALIZED_TEST(SecondaryStubCacheTF) {
+ i::FLAG_tf_load_ic_stub = true;
+ TestStubCache(false);
}
+#endif // ENABLE_DISASSEMBLER
#ifdef DEBUG
static int cow_arrays_created_runtime = 0;
@@ -21960,31 +22167,55 @@ THREADED_TEST(Regress260106) {
CHECK(function->IsFunction());
}
-
THREADED_TEST(JSONParseObject) {
LocalContext context;
HandleScope scope(context->GetIsolate());
Local<Value> obj =
- v8::JSON::Parse(context->GetIsolate(), v8_str("{\"x\":42}"))
- .ToLocalChecked();
+ v8::JSON::Parse(context.local(), v8_str("{\"x\":42}")).ToLocalChecked();
Local<Object> global = context->Global();
global->Set(context.local(), v8_str("obj"), obj).FromJust();
ExpectString("JSON.stringify(obj)", "{\"x\":42}");
}
-
THREADED_TEST(JSONParseNumber) {
LocalContext context;
HandleScope scope(context->GetIsolate());
Local<Value> obj =
- v8::JSON::Parse(context->GetIsolate(), v8_str("42")).ToLocalChecked();
+ v8::JSON::Parse(context.local(), v8_str("42")).ToLocalChecked();
Local<Object> global = context->Global();
global->Set(context.local(), v8_str("obj"), obj).FromJust();
ExpectString("JSON.stringify(obj)", "42");
}
+THREADED_TEST(JSONStringifyObject) {
+ LocalContext context;
+ HandleScope scope(context->GetIsolate());
+ Local<Value> value =
+ v8::JSON::Parse(context.local(), v8_str("{\"x\":42}")).ToLocalChecked();
+ Local<Object> obj = value->ToObject(context.local()).ToLocalChecked();
+ Local<Object> global = context->Global();
+ global->Set(context.local(), v8_str("obj"), obj).FromJust();
+ Local<String> json =
+ v8::JSON::Stringify(context.local(), obj).ToLocalChecked();
+ v8::String::Utf8Value utf8(json);
+ ExpectString("JSON.stringify(obj)", *utf8);
+}
-#if V8_OS_POSIX && !V8_OS_NACL
+THREADED_TEST(JSONStringifyObjectWithGap) {
+ LocalContext context;
+ HandleScope scope(context->GetIsolate());
+ Local<Value> value =
+ v8::JSON::Parse(context.local(), v8_str("{\"x\":42}")).ToLocalChecked();
+ Local<Object> obj = value->ToObject(context.local()).ToLocalChecked();
+ Local<Object> global = context->Global();
+ global->Set(context.local(), v8_str("obj"), obj).FromJust();
+ Local<String> json =
+ v8::JSON::Stringify(context.local(), obj, v8_str("*")).ToLocalChecked();
+ v8::String::Utf8Value utf8(json);
+ ExpectString("JSON.stringify(obj, null, '*')", *utf8);
+}
+
+#if V8_OS_POSIX
class ThreadInterruptTest {
public:
ThreadInterruptTest() : sem_(0), sem_value_(0) { }
@@ -22207,8 +22438,8 @@ TEST(AccessCheckThrows) {
CheckCorrectThrow("%DeleteProperty_Sloppy(other, '1')");
CheckCorrectThrow("%DeleteProperty_Strict(other, '1')");
CheckCorrectThrow("Object.prototype.hasOwnProperty.call(other, 'x')");
- CheckCorrectThrow("%HasProperty('x', other)");
- CheckCorrectThrow("%PropertyIsEnumerable(other, 'x')");
+ CheckCorrectThrow("%HasProperty(other, 'x')");
+ CheckCorrectThrow("Object.prototype.propertyIsEnumerable(other, 'x')");
// PROPERTY_ATTRIBUTES_NONE = 0
CheckCorrectThrow("%DefineAccessorPropertyUnchecked("
"other, 'x', null, null, 1)");
@@ -22615,9 +22846,12 @@ THREADED_TEST(FunctionNew) {
->get_api_func_data()
->serial_number()),
i_isolate);
- auto cache = i_isolate->template_instantiations_cache();
- CHECK(cache->FindEntry(static_cast<uint32_t>(serial_number->value())) ==
+ auto slow_cache = i_isolate->slow_template_instantiations_cache();
+ CHECK(slow_cache->FindEntry(static_cast<uint32_t>(serial_number->value())) ==
i::UnseededNumberDictionary::kNotFound);
+ auto fast_cache = i_isolate->fast_template_instantiations_cache();
+ CHECK(fast_cache->get(static_cast<uint32_t>(serial_number->value()))
+ ->IsUndefined(i_isolate));
// Verify that each Function::New creates a new function instance
Local<Object> data2 = v8::Object::New(isolate);
function_new_expected_env = data2;
@@ -23344,6 +23578,88 @@ TEST(ScriptNameAndLineNumber) {
CHECK_EQ(13, line_number);
}
+TEST(ScriptPositionInfo) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ const char* url = "http://www.foo.com/foo.js";
+ v8::ScriptOrigin origin(v8_str(url), v8::Integer::New(isolate, 13));
+ v8::ScriptCompiler::Source script_source(v8_str("var foo;\n"
+ "var bar;\n"
+ "var fisk = foo + bar;\n"),
+ origin);
+ Local<Script> script =
+ v8::ScriptCompiler::Compile(env.local(), &script_source).ToLocalChecked();
+
+ i::Handle<i::SharedFunctionInfo> obj = i::Handle<i::SharedFunctionInfo>::cast(
+ v8::Utils::OpenHandle(*script->GetUnboundScript()));
+ CHECK(obj->script()->IsScript());
+
+ i::Handle<i::Script> script1(i::Script::cast(obj->script()));
+
+ v8::internal::Script::PositionInfo info;
+
+ // With offset.
+
+ // Behave as if 0 was passed if position is negative.
+ CHECK(script1->GetPositionInfo(-1, &info, script1->WITH_OFFSET));
+ CHECK_EQ(13, info.line);
+ CHECK_EQ(0, info.column);
+ CHECK_EQ(0, info.line_start);
+ CHECK_EQ(8, info.line_end);
+
+ CHECK(script1->GetPositionInfo(0, &info, script1->WITH_OFFSET));
+ CHECK_EQ(13, info.line);
+ CHECK_EQ(0, info.column);
+ CHECK_EQ(0, info.line_start);
+ CHECK_EQ(8, info.line_end);
+
+ CHECK(script1->GetPositionInfo(8, &info, script1->WITH_OFFSET));
+ CHECK_EQ(13, info.line);
+ CHECK_EQ(8, info.column);
+ CHECK_EQ(0, info.line_start);
+ CHECK_EQ(8, info.line_end);
+
+ CHECK(script1->GetPositionInfo(9, &info, script1->WITH_OFFSET));
+ CHECK_EQ(14, info.line);
+ CHECK_EQ(0, info.column);
+ CHECK_EQ(9, info.line_start);
+ CHECK_EQ(17, info.line_end);
+
+ // Fail when position is larger than script size.
+ CHECK(!script1->GetPositionInfo(220384, &info, script1->WITH_OFFSET));
+
+ // Without offset.
+
+ // Behave as if 0 was passed if position is negative.
+ CHECK(script1->GetPositionInfo(-1, &info, script1->NO_OFFSET));
+ CHECK_EQ(0, info.line);
+ CHECK_EQ(0, info.column);
+ CHECK_EQ(0, info.line_start);
+ CHECK_EQ(8, info.line_end);
+
+ CHECK(script1->GetPositionInfo(0, &info, script1->NO_OFFSET));
+ CHECK_EQ(0, info.line);
+ CHECK_EQ(0, info.column);
+ CHECK_EQ(0, info.line_start);
+ CHECK_EQ(8, info.line_end);
+
+ CHECK(script1->GetPositionInfo(8, &info, script1->NO_OFFSET));
+ CHECK_EQ(0, info.line);
+ CHECK_EQ(8, info.column);
+ CHECK_EQ(0, info.line_start);
+ CHECK_EQ(8, info.line_end);
+
+ CHECK(script1->GetPositionInfo(9, &info, script1->NO_OFFSET));
+ CHECK_EQ(1, info.line);
+ CHECK_EQ(0, info.column);
+ CHECK_EQ(9, info.line_start);
+ CHECK_EQ(17, info.line_end);
+
+ // Fail when position is larger than script size.
+ CHECK(!script1->GetPositionInfo(220384, &info, script1->NO_OFFSET));
+}
+
void CheckMagicComments(Local<Script> script, const char* expected_source_url,
const char* expected_source_mapping_url) {
if (expected_source_url != NULL) {
@@ -23623,7 +23939,6 @@ void RunStreamingTest(const char** chunks,
delete[] full_source;
}
-
TEST(StreamingSimpleScript) {
// This script is unrealistically small, since no one chunk is enough to fill
// the backing buffer of Scanner, let alone overflow it.
@@ -23632,6 +23947,35 @@ TEST(StreamingSimpleScript) {
RunStreamingTest(chunks);
}
+TEST(StreamingScriptConstantArray) {
+ // When run with Ignition, tests that the streaming parser canonicalizes
+ // handles so that they are only added to the constant pool array once.
+ const char* chunks[] = {"var a = {};",
+ "var b = {};",
+ "var c = 'testing';",
+ "var d = 'testing';",
+ "13;",
+ NULL};
+ RunStreamingTest(chunks);
+}
+
+TEST(StreamingScriptEvalShadowing) {
+ // When run with Ignition, tests that the streaming parser canonicalizes
+ // handles so the Variable::is_possibly_eval() is correct.
+ const char* chunk1 =
+ "(function() {\n"
+ " var y = 2;\n"
+ " return (function() {\n"
+ " eval('var y = 13;');\n"
+ " function g() {\n"
+ " return y\n"
+ " }\n"
+ " return g();\n"
+ " })()\n"
+ "})()\n";
+ const char* chunks[] = {chunk1, NULL};
+ RunStreamingTest(chunks);
+}
TEST(StreamingBiggerScript) {
const char* chunk1 =
@@ -24077,8 +24421,7 @@ void TestInvalidCacheData(v8::ScriptCompiler::CompileOptions option) {
script->Run(context).ToLocalChecked()->Int32Value(context).FromJust());
}
-
-TEST(InvalidCacheData) {
+TEST(InvalidParserCacheData) {
v8::V8::Initialize();
v8::HandleScope scope(CcTest::isolate());
LocalContext context;
@@ -24086,6 +24429,12 @@ TEST(InvalidCacheData) {
// Cached parser data is not consumed while parsing eagerly.
TestInvalidCacheData(v8::ScriptCompiler::kConsumeParserCache);
}
+}
+
+TEST(InvalidCodeCacheData) {
+ v8::V8::Initialize();
+ v8::HandleScope scope(CcTest::isolate());
+ LocalContext context;
TestInvalidCacheData(v8::ScriptCompiler::kConsumeCodeCache);
}
@@ -24168,6 +24517,7 @@ TEST(StringConcatOverflow) {
TEST(TurboAsmDisablesNeuter) {
+ i::FLAG_allow_natives_syntax = true;
v8::V8::Initialize();
v8::HandleScope scope(CcTest::isolate());
LocalContext context;
@@ -24180,10 +24530,11 @@ TEST(TurboAsmDisablesNeuter) {
" return { load: load };"
"}"
"var buffer = new ArrayBuffer(4);"
- "Module(this, {}, buffer).load();"
+ "var module = Module(this, {}, buffer);"
+ "%OptimizeFunctionOnNextCall(module.load);"
+ "module.load();"
"buffer";
- i::FLAG_turbo_osr = false; // TODO(titzer): test requires eager TF.
v8::Local<v8::ArrayBuffer> result = CompileRun(load).As<v8::ArrayBuffer>();
CHECK_EQ(should_be_neuterable, result->IsNeuterable());
@@ -24195,10 +24546,11 @@ TEST(TurboAsmDisablesNeuter) {
" return { store: store };"
"}"
"var buffer = new ArrayBuffer(4);"
- "Module(this, {}, buffer).store();"
+ "var module = Module(this, {}, buffer);"
+ "%OptimizeFunctionOnNextCall(module.store);"
+ "module.store();"
"buffer";
- i::FLAG_turbo_osr = false; // TODO(titzer): test requires eager TF.
result = CompileRun(store).As<v8::ArrayBuffer>();
CHECK_EQ(should_be_neuterable, result->IsNeuterable());
}
@@ -24629,6 +24981,60 @@ TEST(Set) {
CHECK_EQ(0U, set->Size());
}
+TEST(SetDeleteThenAsArray) {
+ // https://bugs.chromium.org/p/v8/issues/detail?id=4946
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope handle_scope(isolate);
+ LocalContext env;
+
+ // make a Set
+ v8::Local<v8::Value> val = CompileRun("new Set([1, 2, 3])");
+ v8::Local<v8::Set> set = v8::Local<v8::Set>::Cast(val);
+ CHECK_EQ(3U, set->Size());
+
+ // delete the "middle" element (using AsArray to
+ // determine which element is the "middle" element)
+ v8::Local<v8::Array> array1 = set->AsArray();
+ CHECK_EQ(3U, array1->Length());
+ CHECK(set->Delete(env.local(), array1->Get(env.local(), 1).ToLocalChecked())
+ .FromJust());
+
+ // make sure there are no undefined values when we convert to an array again.
+ v8::Local<v8::Array> array2 = set->AsArray();
+ uint32_t length = array2->Length();
+ CHECK_EQ(2U, length);
+ for (uint32_t i = 0; i < length; i++) {
+ CHECK(!array2->Get(env.local(), i).ToLocalChecked()->IsUndefined());
+ }
+}
+
+TEST(MapDeleteThenAsArray) {
+ // https://bugs.chromium.org/p/v8/issues/detail?id=4946
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope handle_scope(isolate);
+ LocalContext env;
+
+ // make a Map
+ v8::Local<v8::Value> val = CompileRun("new Map([[1, 2], [3, 4], [5, 6]])");
+ v8::Local<v8::Map> map = v8::Local<v8::Map>::Cast(val);
+ CHECK_EQ(3U, map->Size());
+
+ // delete the "middle" element (using AsArray to
+ // determine which element is the "middle" element)
+ v8::Local<v8::Array> array1 = map->AsArray();
+ CHECK_EQ(6U, array1->Length());
+ // Map::AsArray returns a flat array, so the second key is at index 2.
+ v8::Local<v8::Value> key = array1->Get(env.local(), 2).ToLocalChecked();
+ CHECK(map->Delete(env.local(), key).FromJust());
+
+ // make sure there are no undefined values when we convert to an array again.
+ v8::Local<v8::Array> array2 = map->AsArray();
+ uint32_t length = array2->Length();
+ CHECK_EQ(4U, length);
+ for (uint32_t i = 0; i < length; i++) {
+ CHECK(!array2->Get(env.local(), i).ToLocalChecked()->IsUndefined());
+ }
+}
TEST(CompatibleReceiverCheckOnCachedICHandler) {
v8::Isolate* isolate = CcTest::isolate();
@@ -24748,7 +25154,7 @@ TEST(FutexInterruption) {
CompileRun(
"var ab = new SharedArrayBuffer(4);"
"var i32a = new Int32Array(ab);"
- "Atomics.futexWait(i32a, 0, 0);");
+ "Atomics.wait(i32a, 0, 0);");
CHECK(try_catch.HasTerminated());
timeout_thread.Join();
}
@@ -24960,6 +25366,7 @@ class MemoryPressureThread : public v8::base::Thread {
};
TEST(MemoryPressure) {
+ if (v8::internal::FLAG_optimize_for_size) return;
v8::Isolate* isolate = CcTest::isolate();
WeakCallCounter counter(1234);
@@ -25025,3 +25432,102 @@ TEST(PrivateForApiIsNumber) {
// Shouldn't crash.
v8::Private::ForApi(isolate, v8_str("42"));
}
+
+THREADED_TEST(ImmutableProto) {
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
+ templ->InstanceTemplate()->SetImmutableProto();
+
+ Local<v8::Object> object = templ->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
+
+ // Look up the prototype
+ Local<v8::Value> original_proto =
+ object->Get(context.local(), v8_str("__proto__")).ToLocalChecked();
+
+ // Setting the prototype (e.g., to null) throws
+ CHECK(object->SetPrototype(context.local(), v8::Null(isolate)).IsNothing());
+
+ // The original prototype is still there
+ Local<Value> new_proto =
+ object->Get(context.local(), v8_str("__proto__")).ToLocalChecked();
+ CHECK(new_proto->IsObject());
+ CHECK(new_proto.As<v8::Object>()
+ ->Equals(context.local(), original_proto)
+ .FromJust());
+}
+
+Local<v8::Context> call_eval_context;
+Local<v8::Function> call_eval_bound_function;
+
+static void CallEval(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Context::Scope scope(call_eval_context);
+ args.GetReturnValue().Set(
+ call_eval_bound_function
+ ->Call(call_eval_context, call_eval_context->Global(), 0, NULL)
+ .ToLocalChecked());
+}
+
+TEST(CrossActivationEval) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ {
+ call_eval_context = v8::Context::New(isolate);
+ v8::Context::Scope scope(call_eval_context);
+ call_eval_bound_function =
+ Local<Function>::Cast(CompileRun("eval.bind(this, '1')"));
+ }
+ env->Global()
+ ->Set(env.local(), v8_str("CallEval"),
+ v8::FunctionTemplate::New(isolate, CallEval)
+ ->GetFunction(env.local())
+ .ToLocalChecked())
+ .FromJust();
+ Local<Value> result = CompileRun("CallEval();");
+ CHECK(result->IsInt32());
+ CHECK_EQ(1, result->Int32Value(env.local()).FromJust());
+}
+
+TEST(EvalInAccessCheckedContext) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+
+ v8::Local<v8::ObjectTemplate> obj_template = v8::ObjectTemplate::New(isolate);
+
+ obj_template->SetAccessCheckCallback(AccessAlwaysAllowed);
+
+ v8::Local<Context> context0 = Context::New(isolate, NULL, obj_template);
+ v8::Local<Context> context1 = Context::New(isolate, NULL, obj_template);
+
+ Local<Value> foo = v8_str("foo");
+ Local<Value> bar = v8_str("bar");
+
+ // Set to different domains.
+ context0->SetSecurityToken(foo);
+ context1->SetSecurityToken(bar);
+
+ // Set up function in context0 that uses eval from context0.
+ context0->Enter();
+ v8::Local<v8::Value> fun = CompileRun(
+ "var x = 42;"
+ "(function() {"
+ " var e = eval;"
+ " return function(s) { return e(s); }"
+ "})()");
+ context0->Exit();
+
+ // Put the function into context1 and call it. Since the access check
+ // callback always returns true, the call succeeds even though the tokens
+ // are different.
+ context1->Enter();
+ context1->Global()->Set(context1, v8_str("fun"), fun).FromJust();
+ v8::Local<v8::Value> x_value = CompileRun("fun('x')");
+ CHECK_EQ(42, x_value->Int32Value(context1).FromJust());
+ context1->Exit();
+}
diff --git a/deps/v8/test/cctest/test-api.h b/deps/v8/test/cctest/test-api.h
index 8887a8a976..f9a335a7f4 100644
--- a/deps/v8/test/cctest/test-api.h
+++ b/deps/v8/test/cctest/test-api.h
@@ -5,7 +5,6 @@
#include "src/v8.h"
#include "src/isolate.h"
-#include "src/profiler/cpu-profiler.h"
#include "src/vm-state.h"
#include "test/cctest/cctest.h"
@@ -14,10 +13,11 @@ static void CheckReturnValue(const T& t, i::Address callback) {
v8::ReturnValue<v8::Value> rv = t.GetReturnValue();
i::Object** o = *reinterpret_cast<i::Object***>(&rv);
CHECK_EQ(CcTest::isolate(), t.GetIsolate());
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(t.GetIsolate());
CHECK_EQ(t.GetIsolate(), rv.GetIsolate());
- CHECK((*o)->IsTheHole() || (*o)->IsUndefined());
+ CHECK((*o)->IsTheHole(isolate) || (*o)->IsUndefined(isolate));
// Verify reset
- bool is_runtime = (*o)->IsTheHole();
+ bool is_runtime = (*o)->IsTheHole(isolate);
if (is_runtime) {
CHECK(rv.Get()->IsUndefined());
} else {
@@ -25,14 +25,13 @@ static void CheckReturnValue(const T& t, i::Address callback) {
CHECK_EQ(*v, *o);
}
rv.Set(true);
- CHECK(!(*o)->IsTheHole() && !(*o)->IsUndefined());
+ CHECK(!(*o)->IsTheHole(isolate) && !(*o)->IsUndefined(isolate));
rv.Set(v8::Local<v8::Object>());
- CHECK((*o)->IsTheHole() || (*o)->IsUndefined());
- CHECK_EQ(is_runtime, (*o)->IsTheHole());
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(t.GetIsolate());
+ CHECK((*o)->IsTheHole(isolate) || (*o)->IsUndefined(isolate));
+ CHECK_EQ(is_runtime, (*o)->IsTheHole(isolate));
// If CPU profiler is active check that when API callback is invoked
// VMState is set to EXTERNAL.
- if (isolate->cpu_profiler()->is_profiling()) {
+ if (isolate->is_profiling()) {
CHECK_EQ(v8::EXTERNAL, isolate->current_vm_state());
CHECK(isolate->external_callback_scope());
CHECK_EQ(callback, isolate->external_callback_scope()->callback());
diff --git a/deps/v8/test/cctest/test-asm-validator.cc b/deps/v8/test/cctest/test-asm-validator.cc
deleted file mode 100644
index d5b51797d6..0000000000
--- a/deps/v8/test/cctest/test-asm-validator.cc
+++ /dev/null
@@ -1,2514 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/ast/ast.h"
-#include "src/ast/ast-expression-visitor.h"
-#include "src/ast/scopes.h"
-#include "src/parsing/parser.h"
-#include "src/parsing/rewriter.h"
-#include "src/type-cache.h"
-#include "src/typing-asm.h"
-#include "test/cctest/cctest.h"
-#include "test/cctest/expression-type-collector.h"
-#include "test/cctest/expression-type-collector-macros.h"
-
-// Macros for function types.
-#define FUNC_FOREIGN_TYPE Bounds(Type::Function(Type::Any(), zone))
-#define FUNC_V_TYPE Bounds(Type::Function(Type::Undefined(), zone))
-#define FUNC_I_TYPE Bounds(Type::Function(cache.kAsmSigned, zone))
-#define FUNC_F_TYPE Bounds(Type::Function(cache.kAsmFloat, zone))
-#define FUNC_D_TYPE Bounds(Type::Function(cache.kAsmDouble, zone))
-#define FUNC_D2D_TYPE \
- Bounds(Type::Function(cache.kAsmDouble, cache.kAsmDouble, zone))
-#define FUNC_N2F_TYPE \
- Bounds(Type::Function(cache.kAsmFloat, Type::Number(), zone))
-#define FUNC_I2I_TYPE \
- Bounds(Type::Function(cache.kAsmSigned, cache.kAsmInt, zone))
-#define FUNC_II2D_TYPE \
- Bounds(Type::Function(cache.kAsmDouble, cache.kAsmInt, cache.kAsmInt, zone))
-#define FUNC_II2I_TYPE \
- Bounds(Type::Function(cache.kAsmSigned, cache.kAsmInt, cache.kAsmInt, zone))
-#define FUNC_DD2D_TYPE \
- Bounds(Type::Function(cache.kAsmDouble, cache.kAsmDouble, cache.kAsmDouble, \
- zone))
-#define FUNC_NN2N_TYPE \
- Bounds(Type::Function(Type::Number(), Type::Number(), Type::Number(), zone))
-#define FUNC_N2N_TYPE \
- Bounds(Type::Function(Type::Number(), Type::Number(), zone))
-
-// Macros for array types.
-#define FLOAT64_ARRAY_TYPE Bounds(Type::Array(cache.kAsmDouble, zone))
-#define FUNC_I2I_ARRAY_TYPE \
- Bounds(Type::Array(Type::Function(cache.kAsmSigned, cache.kAsmInt, zone), \
- zone))
-
-using namespace v8::internal;
-
-namespace {
-
-std::string Validate(Zone* zone, const char* source,
- ZoneVector<ExpressionTypeEntry>* types) {
- i::Isolate* isolate = CcTest::i_isolate();
- i::Factory* factory = isolate->factory();
-
- i::Handle<i::String> source_code =
- factory->NewStringFromUtf8(i::CStrVector(source)).ToHandleChecked();
-
- i::Handle<i::Script> script = factory->NewScript(source_code);
-
- i::ParseInfo info(zone, script);
- i::Parser parser(&info);
- parser.set_allow_harmony_sloppy(true);
- info.set_global();
- info.set_lazy(false);
- info.set_allow_lazy_parsing(false);
- info.set_toplevel(true);
-
- CHECK(i::Compiler::ParseAndAnalyze(&info));
-
- FunctionLiteral* root =
- info.scope()->declarations()->at(0)->AsFunctionDeclaration()->fun();
- AsmTyper typer(isolate, zone, *script, root);
- if (typer.Validate()) {
- ExpressionTypeCollector(isolate, root, types).Run();
- return "";
- } else {
- return typer.error_message();
- }
-}
-
-} // namespace
-
-
-TEST(ValidateMinimum) {
- const char test_function[] =
- "function GeometricMean(stdlib, foreign, buffer) {\n"
- " \"use asm\";\n"
- "\n"
- " var exp = stdlib.Math.exp;\n"
- " var log = stdlib.Math.log;\n"
- " var values = new stdlib.Float64Array(buffer);\n"
- "\n"
- " function logSum(start, end) {\n"
- " start = start|0;\n"
- " end = end|0;\n"
- "\n"
- " var sum = 0.0, p = 0, q = 0;\n"
- "\n"
- " // asm.js forces byte addressing of the heap by requiring shifting "
- "by 3\n"
- " for (p = start << 3, q = end << 3; (p|0) < (q|0); p = (p + 8)|0) {\n"
- " sum = sum + +log(values[p>>3]);\n"
- " }\n"
- "\n"
- " return +sum;\n"
- " }\n"
- "\n"
- " function geometricMean(start, end) {\n"
- " start = start|0;\n"
- " end = end|0;\n"
- "\n"
- " return +exp(+logSum(start, end) / +((end - start)|0));\n"
- " }\n"
- "\n"
- " return { geometricMean: geometricMean };\n"
- "}\n";
-
- v8::V8::Initialize();
- HandleAndZoneScope handles;
- Zone* zone = handles.main_zone();
- ZoneVector<ExpressionTypeEntry> types(zone);
- CHECK_EQ("", Validate(zone, test_function, &types));
- TypeCache cache;
-
- CHECK_TYPES_BEGIN {
- // Module.
- CHECK_EXPR(FunctionLiteral, Bounds::Unbounded()) {
- // function logSum
- CHECK_EXPR(FunctionLiteral, FUNC_II2D_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(start, Bounds(cache.kAsmInt));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(start, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(end, Bounds(cache.kAsmInt));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(end, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
- CHECK_VAR(sum, Bounds(cache.kAsmDouble));
- CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(p, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(q, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- // for (p = start << 3, q = end << 3;
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmInt)) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(p, Bounds(cache.kAsmInt));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(start, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(q, Bounds(cache.kAsmInt));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(end, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- }
- // (p|0) < (q|0);
- CHECK_EXPR(CompareOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(p, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(q, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- // p = (p + 8)|0) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(p, Bounds(cache.kAsmInt));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmInt)) {
- CHECK_VAR(p, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- // sum = sum + +log(values[p>>3]);
- CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
- CHECK_VAR(sum, Bounds(cache.kAsmDouble));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmDouble)) {
- CHECK_VAR(sum, Bounds(cache.kAsmDouble));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmDouble)) {
- CHECK_EXPR(Call, Bounds(cache.kAsmDouble)) {
- CHECK_VAR(log, FUNC_D2D_TYPE);
- CHECK_EXPR(Property, Bounds(cache.kAsmDouble)) {
- CHECK_VAR(values, FLOAT64_ARRAY_TYPE);
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(p, Bounds(cache.kAsmSigned));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
- }
- }
- }
- // return +sum;
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmDouble)) {
- CHECK_VAR(sum, Bounds(cache.kAsmDouble));
- CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
- }
- }
- // function geometricMean
- CHECK_EXPR(FunctionLiteral, FUNC_II2D_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(start, Bounds(cache.kAsmInt));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(start, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(end, Bounds(cache.kAsmInt));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(end, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- // return +exp(+logSum(start, end) / +((end - start)|0));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmDouble)) {
- CHECK_EXPR(Call, Bounds(cache.kAsmDouble)) {
- CHECK_VAR(exp, FUNC_D2D_TYPE);
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmDouble)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmDouble)) {
- CHECK_EXPR(Call, Bounds(cache.kAsmDouble)) {
- CHECK_VAR(logSum, FUNC_II2D_TYPE);
- CHECK_VAR(start, Bounds(cache.kAsmInt));
- CHECK_VAR(end, Bounds(cache.kAsmInt));
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
- }
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmDouble)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmInt)) {
- CHECK_VAR(end, Bounds(cache.kAsmInt));
- CHECK_VAR(start, Bounds(cache.kAsmInt));
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
- }
- }
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
- }
- }
- // "use asm";
- CHECK_EXPR(Literal, Bounds(Type::String()));
- // var exp = stdlib.Math.exp;
- CHECK_EXPR(Assignment, FUNC_D2D_TYPE) {
- CHECK_VAR(exp, FUNC_D2D_TYPE);
- CHECK_EXPR(Property, FUNC_D2D_TYPE) {
- CHECK_EXPR(Property, Bounds::Unbounded()) {
- CHECK_VAR(stdlib, Bounds::Unbounded());
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- }
- // var log = stdlib.Math.log;
- CHECK_EXPR(Assignment, FUNC_D2D_TYPE) {
- CHECK_VAR(log, FUNC_D2D_TYPE);
- CHECK_EXPR(Property, FUNC_D2D_TYPE) {
- CHECK_EXPR(Property, Bounds::Unbounded()) {
- CHECK_VAR(stdlib, Bounds::Unbounded());
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- }
- // var values = new stdlib.Float64Array(buffer);
- CHECK_EXPR(Assignment, FLOAT64_ARRAY_TYPE) {
- CHECK_VAR(values, FLOAT64_ARRAY_TYPE);
- CHECK_EXPR(CallNew, FLOAT64_ARRAY_TYPE) {
- CHECK_EXPR(Property, Bounds::Unbounded()) {
- CHECK_VAR(stdlib, Bounds::Unbounded());
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- CHECK_VAR(buffer, Bounds::Unbounded());
- }
- }
- // return { geometricMean: geometricMean };
- CHECK_EXPR(ObjectLiteral, Bounds::Unbounded()) {
- CHECK_VAR(geometricMean, FUNC_II2D_TYPE);
- }
- }
- }
- CHECK_TYPES_END
-}
-
-
-TEST(MissingUseAsm) {
- const char test_function[] =
- "function foo() {\n"
- " function bar() {}\n"
- " return { bar: bar };\n"
- "}\n";
- v8::V8::Initialize();
- HandleAndZoneScope handles;
- Zone* zone = handles.main_zone();
- ZoneVector<ExpressionTypeEntry> types(zone);
- CHECK_EQ("asm: line 1: missing \"use asm\"\n",
- Validate(zone, test_function, &types));
-}
-
-
-TEST(WrongUseAsm) {
- const char test_function[] =
- "function foo() {\n"
- " \"use wasm\"\n"
- " function bar() {}\n"
- " return { bar: bar };\n"
- "}\n";
- v8::V8::Initialize();
- HandleAndZoneScope handles;
- Zone* zone = handles.main_zone();
- ZoneVector<ExpressionTypeEntry> types(zone);
- CHECK_EQ("asm: line 1: missing \"use asm\"\n",
- Validate(zone, test_function, &types));
-}
-
-
-TEST(MissingReturnExports) {
- const char test_function[] =
- "function foo() {\n"
- " \"use asm\"\n"
- " function bar() {}\n"
- "}\n";
- v8::V8::Initialize();
- HandleAndZoneScope handles;
- Zone* zone = handles.main_zone();
- ZoneVector<ExpressionTypeEntry> types(zone);
- CHECK_EQ("asm: line 2: last statement in module is not a return\n",
- Validate(zone, test_function, &types));
-}
-
-#define HARNESS_STDLIB() \
- "var Infinity = stdlib.Infinity; " \
- "var NaN = stdlib.NaN; " \
- "var acos = stdlib.Math.acos; " \
- "var asin = stdlib.Math.asin; " \
- "var atan = stdlib.Math.atan; " \
- "var cos = stdlib.Math.cos; " \
- "var sin = stdlib.Math.sin; " \
- "var tan = stdlib.Math.tan; " \
- "var exp = stdlib.Math.exp; " \
- "var log = stdlib.Math.log; " \
- "var ceil = stdlib.Math.ceil; " \
- "var floor = stdlib.Math.floor; " \
- "var sqrt = stdlib.Math.sqrt; " \
- "var min = stdlib.Math.min; " \
- "var max = stdlib.Math.max; " \
- "var atan2 = stdlib.Math.atan2; " \
- "var pow = stdlib.Math.pow; " \
- "var abs = stdlib.Math.abs; " \
- "var imul = stdlib.Math.imul; " \
- "var fround = stdlib.Math.fround; " \
- "var E = stdlib.Math.E; " \
- "var LN10 = stdlib.Math.LN10; " \
- "var LN2 = stdlib.Math.LN2; " \
- "var LOG2E = stdlib.Math.LOG2E; " \
- "var LOG10E = stdlib.Math.LOG10E; " \
- "var PI = stdlib.Math.PI; " \
- "var SQRT1_2 = stdlib.Math.SQRT1_2; " \
- "var SQRT2 = stdlib.Math.SQRT2; "
-
-#define HARNESS_HEAP() \
- "var u8 = new stdlib.Uint8Array(buffer); " \
- "var i8 = new stdlib.Int8Array(buffer); " \
- "var u16 = new stdlib.Uint16Array(buffer); " \
- "var i16 = new stdlib.Int16Array(buffer); " \
- "var u32 = new stdlib.Uint32Array(buffer); " \
- "var i32 = new stdlib.Int32Array(buffer); " \
- "var f32 = new stdlib.Float32Array(buffer); " \
- "var f64 = new stdlib.Float64Array(buffer); "
-
-#define HARNESS_PREAMBLE() \
- const char test_function[] = \
- "function Module(stdlib, foreign, buffer) { " \
- "\"use asm\"; " HARNESS_STDLIB() HARNESS_HEAP()
-
-#define HARNESS_POSTAMBLE() \
- "return { foo: foo }; " \
- "} ";
-
-#define CHECK_VAR_MATH_SHORTCUT(name, type) \
- CHECK_EXPR(Assignment, type) { \
- CHECK_VAR(name, type); \
- CHECK_EXPR(Property, type) { \
- CHECK_EXPR(Property, Bounds::Unbounded()) { \
- CHECK_VAR(stdlib, Bounds::Unbounded()); \
- CHECK_EXPR(Literal, Bounds::Unbounded()); \
- } \
- CHECK_EXPR(Literal, Bounds::Unbounded()); \
- } \
- }
-
-
-#define CHECK_VAR_SHORTCUT(name, type) \
- CHECK_EXPR(Assignment, type) { \
- CHECK_VAR(name, type); \
- CHECK_EXPR(Property, type) { \
- CHECK_VAR(stdlib, Bounds::Unbounded()); \
- CHECK_EXPR(Literal, Bounds::Unbounded()); \
- } \
- }
-
-
-#define CHECK_VAR_NEW_SHORTCUT(name, type) \
- CHECK_EXPR(Assignment, type) { \
- CHECK_VAR(name, type); \
- CHECK_EXPR(CallNew, type) { \
- CHECK_EXPR(Property, Bounds::Unbounded()) { \
- CHECK_VAR(stdlib, Bounds::Unbounded()); \
- CHECK_EXPR(Literal, Bounds::Unbounded()); \
- } \
- CHECK_VAR(buffer, Bounds::Unbounded()); \
- } \
- }
-
-
-namespace {
-
-void CheckStdlibShortcuts1(Zone* zone, ZoneVector<ExpressionTypeEntry>& types,
- size_t& index, int& depth, TypeCache& cache) {
- // var exp = stdlib.*;
- CHECK_VAR_SHORTCUT(Infinity, Bounds(cache.kAsmDouble));
- CHECK_VAR_SHORTCUT(NaN, Bounds(cache.kAsmDouble));
- // var x = stdlib.Math.x;
- CHECK_VAR_MATH_SHORTCUT(acos, FUNC_D2D_TYPE);
- CHECK_VAR_MATH_SHORTCUT(asin, FUNC_D2D_TYPE);
- CHECK_VAR_MATH_SHORTCUT(atan, FUNC_D2D_TYPE);
- CHECK_VAR_MATH_SHORTCUT(cos, FUNC_D2D_TYPE);
- CHECK_VAR_MATH_SHORTCUT(sin, FUNC_D2D_TYPE);
- CHECK_VAR_MATH_SHORTCUT(tan, FUNC_D2D_TYPE);
- CHECK_VAR_MATH_SHORTCUT(exp, FUNC_D2D_TYPE);
- CHECK_VAR_MATH_SHORTCUT(log, FUNC_D2D_TYPE);
-
- CHECK_VAR_MATH_SHORTCUT(ceil, FUNC_N2N_TYPE);
- CHECK_VAR_MATH_SHORTCUT(floor, FUNC_N2N_TYPE);
- CHECK_VAR_MATH_SHORTCUT(sqrt, FUNC_N2N_TYPE);
-
- CHECK_VAR_MATH_SHORTCUT(min, FUNC_NN2N_TYPE);
- CHECK_VAR_MATH_SHORTCUT(max, FUNC_NN2N_TYPE);
-
- CHECK_VAR_MATH_SHORTCUT(atan2, FUNC_DD2D_TYPE);
- CHECK_VAR_MATH_SHORTCUT(pow, FUNC_DD2D_TYPE);
-
- CHECK_VAR_MATH_SHORTCUT(abs, FUNC_N2N_TYPE);
- CHECK_VAR_MATH_SHORTCUT(imul, FUNC_II2I_TYPE);
- CHECK_VAR_MATH_SHORTCUT(fround, FUNC_N2F_TYPE);
-}
-
-
-void CheckStdlibShortcuts2(Zone* zone, ZoneVector<ExpressionTypeEntry>& types,
- size_t& index, int& depth, TypeCache& cache) {
- // var exp = stdlib.Math.*; (D * 12)
- CHECK_VAR_MATH_SHORTCUT(E, Bounds(cache.kAsmDouble));
- CHECK_VAR_MATH_SHORTCUT(LN10, Bounds(cache.kAsmDouble));
- CHECK_VAR_MATH_SHORTCUT(LN2, Bounds(cache.kAsmDouble));
- CHECK_VAR_MATH_SHORTCUT(LOG2E, Bounds(cache.kAsmDouble));
- CHECK_VAR_MATH_SHORTCUT(LOG10E, Bounds(cache.kAsmDouble));
- CHECK_VAR_MATH_SHORTCUT(PI, Bounds(cache.kAsmDouble));
- CHECK_VAR_MATH_SHORTCUT(SQRT1_2, Bounds(cache.kAsmDouble));
- CHECK_VAR_MATH_SHORTCUT(SQRT2, Bounds(cache.kAsmDouble));
- // var values = new stdlib.*Array(buffer);
- CHECK_VAR_NEW_SHORTCUT(u8, Bounds(cache.kUint8Array));
- CHECK_VAR_NEW_SHORTCUT(i8, Bounds(cache.kInt8Array));
- CHECK_VAR_NEW_SHORTCUT(u16, Bounds(cache.kUint16Array));
- CHECK_VAR_NEW_SHORTCUT(i16, Bounds(cache.kInt16Array));
- CHECK_VAR_NEW_SHORTCUT(u32, Bounds(cache.kUint32Array));
- CHECK_VAR_NEW_SHORTCUT(i32, Bounds(cache.kInt32Array));
- CHECK_VAR_NEW_SHORTCUT(f32, Bounds(cache.kFloat32Array));
- CHECK_VAR_NEW_SHORTCUT(f64, Bounds(cache.kFloat64Array));
-}
-
-} // namespace
-
-
-#define CHECK_FUNC_TYPES_BEGIN(func) \
- HARNESS_PREAMBLE() \
- func "\n" HARNESS_POSTAMBLE(); \
- \
- v8::V8::Initialize(); \
- HandleAndZoneScope handles; \
- Zone* zone = handles.main_zone(); \
- ZoneVector<ExpressionTypeEntry> types(zone); \
- CHECK_EQ("", Validate(zone, test_function, &types)); \
- TypeCache cache; \
- \
- CHECK_TYPES_BEGIN { \
- /* Module. */ \
- CHECK_EXPR(FunctionLiteral, Bounds::Unbounded()) {
-#define CHECK_FUNC_TYPES_END_1() \
- /* "use asm"; */ \
- CHECK_EXPR(Literal, Bounds(Type::String())); \
- /* stdlib shortcuts. */ \
- CheckStdlibShortcuts1(zone, types, index, depth, cache); \
- CheckStdlibShortcuts2(zone, types, index, depth, cache);
-
-#define CHECK_FUNC_TYPES_END_2() \
- /* return { foo: foo }; */ \
- CHECK_EXPR(ObjectLiteral, Bounds::Unbounded()) { \
- CHECK_VAR(foo, FUNC_V_TYPE); \
- } \
- } \
- } \
- CHECK_TYPES_END
-
-
-#define CHECK_FUNC_TYPES_END \
- CHECK_FUNC_TYPES_END_1(); \
- CHECK_FUNC_TYPES_END_2();
-
-
-#define CHECK_FUNC_ERROR(func, message) \
- HARNESS_PREAMBLE() \
- func "\n" HARNESS_POSTAMBLE(); \
- \
- v8::V8::Initialize(); \
- HandleAndZoneScope handles; \
- Zone* zone = handles.main_zone(); \
- ZoneVector<ExpressionTypeEntry> types(zone); \
- CHECK_EQ(message, Validate(zone, test_function, &types));
-
-
-TEST(BareHarness) {
- CHECK_FUNC_TYPES_BEGIN("function foo() {}") {
- CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {}
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-TEST(ReturnVoid) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { return; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- // return undefined;
- CHECK_EXPR(Literal, Bounds(Type::Undefined()));
- }
- CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Call, Bounds(Type::Undefined())) {
- CHECK_VAR(bar, FUNC_V_TYPE);
- }
- }
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-TEST(EmptyBody) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE);
- CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Call, Bounds(Type::Undefined())) {
- CHECK_VAR(bar, FUNC_V_TYPE);
- }
- }
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-TEST(DoesNothing) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = 1.0; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
- CHECK_VAR(x, Bounds(cache.kAsmDouble));
- CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
- }
- }
- CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Call, Bounds(Type::Undefined())) {
- CHECK_VAR(bar, FUNC_V_TYPE);
- }
- }
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-TEST(ReturnInt32Literal) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { return 1; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
- // return 1;
- CHECK_EXPR(Literal, Bounds(cache.kAsmSigned));
- }
- CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Call, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(bar, FUNC_I_TYPE);
- }
- }
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-TEST(ReturnFloat64Literal) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { return 1.0; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_D_TYPE) {
- // return 1.0;
- CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
- }
- CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Call, Bounds(cache.kAsmDouble)) {
- CHECK_VAR(bar, FUNC_D_TYPE);
- }
- }
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-TEST(ReturnFloat32Literal) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { return fround(1.0); }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_F_TYPE) {
- // return fround(1.0);
- CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) {
- CHECK_VAR(fround, FUNC_N2F_TYPE);
- CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
- }
- }
- CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) { CHECK_VAR(bar, FUNC_F_TYPE); }
- }
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-TEST(ReturnFloat64Var) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = 1.0; return +x; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_D_TYPE) {
- // return 1.0;
- CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
- CHECK_VAR(x, Bounds(cache.kAsmDouble));
- CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
- }
- // return 1.0;
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmDouble)) {
- CHECK_VAR(x, Bounds(cache.kAsmDouble));
- CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
- }
- }
- CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Call, Bounds(cache.kAsmDouble)) {
- CHECK_VAR(bar, FUNC_D_TYPE);
- }
- }
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-TEST(Addition2) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = 1; var y = 2; return (x+y)|0; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(y, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_VAR(y, Bounds(cache.kAsmInt));
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-#define TEST_COMPARE_OP(name, op) \
- TEST(name) { \
- CHECK_FUNC_TYPES_BEGIN("function bar() { return (0 " op \
- " 0)|0; }\n" \
- "function foo() { bar(); }") { \
- CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) { \
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) { \
- CHECK_EXPR(CompareOperation, Bounds(cache.kAsmSigned)) { \
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum)); \
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum)); \
- } \
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum)); \
- } \
- } \
- CHECK_SKIP(); \
- } \
- CHECK_FUNC_TYPES_END \
- }
-
-
-TEST_COMPARE_OP(EqOperator, "==")
-TEST_COMPARE_OP(LtOperator, "<")
-TEST_COMPARE_OP(LteOperator, "<=")
-TEST_COMPARE_OP(GtOperator, ">")
-TEST_COMPARE_OP(GteOperator, ">=")
-
-
-TEST(NeqOperator) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { return (0 != 0)|0; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(UnaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(CompareOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-TEST(NotOperator) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = 0; return (!x)|0; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(UnaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-TEST(InvertOperator) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = 0; return (~x)|0; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmSigned));
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-TEST(InvertConversion) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = 0.0; return (~~x)|0; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
- CHECK_VAR(x, Bounds(cache.kAsmDouble));
- CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
- }
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(x, Bounds(cache.kAsmDouble));
- CHECK_EXPR(Literal, Bounds(cache.kAsmSigned));
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmSigned));
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-TEST(Ternary) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = 1; var y = 1; return (x?y:5)|0; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(y, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(Conditional, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_VAR(y, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-#define TEST_INT_BIN_OP(name, op) \
- TEST(name) { \
- CHECK_FUNC_TYPES_BEGIN("function bar() { var x = 0; return (x " op \
- " 123)|0; }\n" \
- "function foo() { bar(); }") { \
- CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) { \
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) { \
- CHECK_VAR(x, Bounds(cache.kAsmInt)); \
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum)); \
- } \
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) { \
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) { \
- CHECK_VAR(x, Bounds(cache.kAsmInt)); \
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum)); \
- } \
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum)); \
- } \
- } \
- CHECK_SKIP(); \
- } \
- CHECK_FUNC_TYPES_END \
- }
-
-
-TEST_INT_BIN_OP(AndOperator, "&")
-TEST_INT_BIN_OP(OrOperator, "|")
-TEST_INT_BIN_OP(XorOperator, "^")
-
-
-TEST(SignedCompare) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = 1; var y = 1; return ((x|0) < (y|0))|0; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(y, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(CompareOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(y, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-TEST(SignedCompareConst) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = 1; var y = 1; return ((x|0) < (1<<31))|0; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(y, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(CompareOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmSigned));
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-TEST(UnsignedCompare) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = 1; var y = 1; return ((x>>>0) < (y>>>0))|0; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(y, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(CompareOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmUnsigned)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmUnsigned)) {
- CHECK_VAR(y, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-TEST(UnsignedCompareConst0) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = 1; var y = 1; return ((x>>>0) < (0>>>0))|0; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(y, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(CompareOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmUnsigned)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-TEST(UnsignedCompareConst1) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = 1; var y = 1; return ((x>>>0) < "
- "(0xffffffff>>>0))|0; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(y, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(CompareOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmUnsigned)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmUnsigned));
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-TEST(UnsignedDivide) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = 1; var y = 1; return ((x>>>0) / (y>>>0))|0; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(y, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(BinaryOperation, Bounds(Type::None(), Type::Any())) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmUnsigned)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmUnsigned)) {
- CHECK_VAR(y, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-TEST(UnsignedFromFloat64) {
- CHECK_FUNC_ERROR(
- "function bar() { var x = 1.0; return (x>>>0)|0; }\n"
- "function foo() { bar(); }",
- "asm: line 1: left bitwise operand expected to be an integer\n");
-}
-
-
-TEST(AndFloat64) {
- CHECK_FUNC_ERROR(
- "function bar() { var x = 1.0; return (x&0)|0; }\n"
- "function foo() { bar(); }",
- "asm: line 1: left bitwise operand expected to be an integer\n");
-}
-
-
-TEST(TypeMismatchAddInt32Float64) {
- CHECK_FUNC_ERROR(
- "function bar() { var x = 1.0; var y = 0; return (x + y)|0; }\n"
- "function foo() { bar(); }",
- "asm: line 1: ill-typed arithmetic operation\n");
-}
-
-
-TEST(TypeMismatchSubInt32Float64) {
- CHECK_FUNC_ERROR(
- "function bar() { var x = 1.0; var y = 0; return (x - y)|0; }\n"
- "function foo() { bar(); }",
- "asm: line 1: ill-typed arithmetic operation\n");
-}
-
-
-TEST(TypeMismatchDivInt32Float64) {
- CHECK_FUNC_ERROR(
- "function bar() { var x = 1.0; var y = 0; return (x / y)|0; }\n"
- "function foo() { bar(); }",
- "asm: line 1: ill-typed arithmetic operation\n");
-}
-
-
-TEST(TypeMismatchModInt32Float64) {
- CHECK_FUNC_ERROR(
- "function bar() { var x = 1.0; var y = 0; return (x % y)|0; }\n"
- "function foo() { bar(); }",
- "asm: line 1: ill-typed arithmetic operation\n");
-}
-
-
-TEST(ModFloat32) {
- CHECK_FUNC_ERROR(
- "function bar() { var x = fround(1.0); return (x % x)|0; }\n"
- "function foo() { bar(); }",
- "asm: line 1: ill-typed arithmetic operation\n");
-}
-
-
-TEST(TernaryMismatchInt32Float64) {
- CHECK_FUNC_ERROR(
- "function bar() { var x = 1; var y = 0.0; return (1 ? x : y)|0; }\n"
- "function foo() { bar(); }",
- "asm: line 1: then and else expressions in ? must have the same type\n");
-}
-
-
-TEST(TernaryMismatchIntish) {
- CHECK_FUNC_ERROR(
- "function bar() { var x = 1; var y = 0; return (1 ? x + x : y)|0; }\n"
- "function foo() { bar(); }",
- "asm: line 1: invalid type in ? then expression\n");
-}
-
-
-TEST(TernaryMismatchInt32Float32) {
- CHECK_FUNC_ERROR(
- "function bar() { var x = 1; var y = 2.0; return (x?fround(y):x)|0; }\n"
- "function foo() { bar(); }",
- "asm: line 1: then and else expressions in ? must have the same type\n");
-}
-
-
-TEST(TernaryBadCondition) {
- CHECK_FUNC_ERROR(
- "function bar() { var x = 1; var y = 2.0; return (y?x:1)|0; }\n"
- "function foo() { bar(); }",
- "asm: line 1: condition must be of type int\n");
-}
-
-TEST(BadIntishMultiply) {
- CHECK_FUNC_ERROR(
- "function bar() { var x = 1; return ((x + x) * 4) | 0; }\n"
- "function foo() { bar(); }",
- "asm: line 1: intish not allowed in multiply\n");
-}
-
-TEST(IntToFloat32) {
- CHECK_FUNC_ERROR(
- "function bar() { var x = 1; return fround(x); }\n"
- "function foo() { bar(); }",
- "asm: line 1: illegal function argument type\n");
-}
-
-TEST(Int32ToFloat32) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = 1; return fround(x|0); }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_F_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) {
- CHECK_VAR(fround, FUNC_N2F_TYPE);
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-TEST(Uint32ToFloat32) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = 1; return fround(x>>>0); }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_F_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) {
- CHECK_VAR(fround, FUNC_N2F_TYPE);
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmUnsigned)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-TEST(Float64ToFloat32) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = 1.0; return fround(x); }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_F_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
- CHECK_VAR(x, Bounds(cache.kAsmDouble));
- CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
- }
- CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) {
- CHECK_VAR(fround, FUNC_N2F_TYPE);
- CHECK_VAR(x, Bounds(cache.kAsmDouble));
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-TEST(Int32ToFloat32ToInt32) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = 1; return ~~fround(x|0) | 0; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) {
- CHECK_VAR(fround, FUNC_N2F_TYPE);
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmSigned));
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmSigned));
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-TEST(Addition4) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = 1; var y = 2; return (x+y+x+y)|0; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(y, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmInt)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmInt)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_VAR(y, Bounds(cache.kAsmInt));
- }
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- }
- CHECK_VAR(y, Bounds(cache.kAsmInt));
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-TEST(Multiplication2) {
- CHECK_FUNC_ERROR(
- "function bar() { var x = 1; var y = 2; return (x*y)|0; }\n"
- "function foo() { bar(); }",
- "asm: line 1: multiply must be by an integer literal\n");
-}
-
-
-TEST(Division4) {
- CHECK_FUNC_ERROR(
- "function bar() { var x = 1; var y = 2; return (x/y/x/y)|0; }\n"
- "function foo() { bar(); }",
- "asm: line 1: too many consecutive multiplicative ops\n");
-}
-
-
-TEST(CompareToStringLeft) {
- CHECK_FUNC_ERROR(
- "function bar() { var x = 1; return ('hi' > x)|0; }\n"
- "function foo() { bar(); }",
- "asm: line 1: bad type on left side of comparison\n");
-}
-
-
-TEST(CompareToStringRight) {
- CHECK_FUNC_ERROR(
- "function bar() { var x = 1; return (x < 'hi')|0; }\n"
- "function foo() { bar(); }",
- "asm: line 1: bad type on right side of comparison\n");
-}
-
-
-TEST(CompareMismatchInt32Float64) {
- CHECK_FUNC_ERROR(
- "function bar() { var x = 1; var y = 2.0; return (x < y)|0; }\n"
- "function foo() { bar(); }",
- "asm: line 1: left and right side of comparison must match\n");
-}
-
-
-TEST(CompareMismatchInt32Uint32) {
- CHECK_FUNC_ERROR(
- "function bar() { var x = 1; var y = 2; return ((x|0) < (y>>>0))|0; }\n"
- "function foo() { bar(); }",
- "asm: line 1: left and right side of comparison must match\n");
-}
-
-
-TEST(CompareMismatchInt32Float32) {
- CHECK_FUNC_ERROR(
- "function bar() { var x = 1; var y = 2.0; return (x < fround(y))|0; }\n"
- "function foo() { bar(); }",
- "asm: line 1: left and right side of comparison must match\n");
-}
-
-
-TEST(Float64ToInt32) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = 1; var y = 0.0; x = ~~y; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
- CHECK_VAR(y, Bounds(cache.kAsmDouble));
- CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(y, Bounds(cache.kAsmDouble));
- CHECK_EXPR(Literal, Bounds(cache.kAsmSigned));
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmSigned));
- }
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-TEST(Load1) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = 1; var y = i8[x>>0]|0; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(y, Bounds(cache.kAsmInt));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(Property, Bounds(cache.kAsmInt)) {
- CHECK_VAR(i8, Bounds(cache.kInt8Array));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-TEST(LoadDouble) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = 1; var y = 0.0; y = +f64[x>>3]; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
- CHECK_VAR(y, Bounds(cache.kAsmDouble));
- CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
- CHECK_VAR(y, Bounds(cache.kAsmDouble));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmDouble)) {
- CHECK_EXPR(Property, Bounds(cache.kAsmDouble)) {
- CHECK_VAR(f64, Bounds(cache.kFloat64Array));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(x, Bounds(cache.kAsmSigned));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
- }
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-TEST(Store1) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = 1; i8[x>>0] = 0; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_EXPR(Property, Bounds::Unbounded()) {
- CHECK_VAR(i8, Bounds(cache.kInt8Array));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-TEST(StoreFloat) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = fround(1.0); "
- "f32[0] = fround(x + fround(1.0)); }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmFloat)) {
- CHECK_VAR(x, Bounds(cache.kAsmFloat));
- CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) {
- CHECK_VAR(fround, FUNC_N2F_TYPE);
- CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
- }
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmFloat)) {
- CHECK_EXPR(Property, Bounds::Unbounded()) {
- CHECK_VAR(f32, Bounds(cache.kFloat32Array));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) {
- CHECK_VAR(fround, FUNC_N2F_TYPE);
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmFloat)) {
- CHECK_VAR(x, Bounds(cache.kAsmFloat));
- CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) {
- CHECK_VAR(fround, FUNC_N2F_TYPE);
- CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
- }
- }
- }
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-TEST(StoreIntish) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = 1; var y = 1; i32[0] = x + y; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(y, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_EXPR(Property, Bounds::Unbounded()) {
- CHECK_VAR(i32, Bounds(cache.kInt32Array));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_VAR(y, Bounds(cache.kAsmInt));
- }
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-TEST(StoreFloatish) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { "
- "var x = fround(1.0); "
- "var y = fround(1.0); f32[0] = x + y; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmFloat)) {
- CHECK_VAR(x, Bounds(cache.kAsmFloat));
- CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) {
- CHECK_VAR(fround, FUNC_N2F_TYPE);
- CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
- }
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmFloat)) {
- CHECK_VAR(y, Bounds(cache.kAsmFloat));
- CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) {
- CHECK_VAR(fround, FUNC_N2F_TYPE);
- CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
- }
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmFloat)) {
- CHECK_EXPR(Property, Bounds::Unbounded()) {
- CHECK_VAR(f32, Bounds(cache.kFloat32Array));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmFloat)) {
- CHECK_VAR(x, Bounds(cache.kAsmFloat));
- CHECK_VAR(y, Bounds(cache.kAsmFloat));
- }
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-TEST(Load1Constant) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = 1; var y = i8[5]|0; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(y, Bounds(cache.kAsmInt));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(Property, Bounds(cache.kAsmInt)) {
- CHECK_VAR(i8, Bounds(cache.kInt8Array));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-TEST(FunctionTables) {
- CHECK_FUNC_TYPES_BEGIN(
- "function func1(x) { x = x | 0; return (x * 5) | 0; }\n"
- "function func2(x) { x = x | 0; return (x * 25) | 0; }\n"
- "var table1 = [func1, func2];\n"
- "function bar(x, y) { x = x | 0; y = y | 0;\n"
- " return table1[x & 1](y)|0; }\n"
- "function foo() { bar(1, 2); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_I2I_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_EXPR(FunctionLiteral, FUNC_I2I_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_EXPR(FunctionLiteral, FUNC_II2I_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(y, Bounds(cache.kAsmInt));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(y, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(Call, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(Property, FUNC_I2I_TYPE) {
- CHECK_VAR(table1, FUNC_I2I_ARRAY_TYPE);
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- // TODO(bradnelson): revert this
- // CHECK_VAR(x, Bounds(cache.kAsmSigned));
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_VAR(y, Bounds(cache.kAsmInt));
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END_1();
- CHECK_EXPR(Assignment, FUNC_I2I_ARRAY_TYPE) {
- CHECK_VAR(table1, FUNC_I2I_ARRAY_TYPE);
- CHECK_EXPR(ArrayLiteral, FUNC_I2I_ARRAY_TYPE) {
- CHECK_VAR(func1, FUNC_I2I_TYPE);
- CHECK_VAR(func2, FUNC_I2I_TYPE);
- }
- }
- CHECK_FUNC_TYPES_END_2();
-}
-
-
-TEST(BadFunctionTable) {
- CHECK_FUNC_ERROR(
- "function func1(x) { x = x | 0; return (x * 5) | 0; }\n"
- "var table1 = [func1, 1];\n"
- "function bar(x, y) { x = x | 0; y = y | 0;\n"
- " return table1[x & 1](y)|0; }\n"
- "function foo() { bar(1, 2); }",
- "asm: line 2: array component expected to be a function\n");
-}
-
-
-TEST(MissingParameterTypes) {
- CHECK_FUNC_ERROR(
- "function bar(x) { var y = 1; }\n"
- "function foo() { bar(2); }",
- "asm: line 1: missing parameter type annotations\n");
-}
-
-
-TEST(InvalidTypeAnnotationBinaryOpDiv) {
- CHECK_FUNC_ERROR(
- "function bar(x) { x = x / 4; }\n"
- "function foo() { bar(2); }",
- "asm: line 1: invalid type annotation on binary op\n");
-}
-
-
-TEST(InvalidTypeAnnotationBinaryOpMul) {
- CHECK_FUNC_ERROR(
- "function bar(x) { x = x * 4.0; }\n"
- "function foo() { bar(2); }",
- "asm: line 1: invalid type annotation on binary op\n");
-}
-
-
-TEST(InvalidArgumentCount) {
- CHECK_FUNC_ERROR(
- "function bar(x) { return fround(4, 5); }\n"
- "function foo() { bar(); }",
- "asm: line 1: invalid argument count calling function\n");
-}
-
-
-TEST(InvalidTypeAnnotationArity) {
- CHECK_FUNC_ERROR(
- "function bar(x) { x = max(x); }\n"
- "function foo() { bar(3); }",
- "asm: line 1: only fround allowed on expression annotations\n");
-}
-
-
-TEST(InvalidTypeAnnotationOnlyFround) {
- CHECK_FUNC_ERROR(
- "function bar(x) { x = sin(x); }\n"
- "function foo() { bar(3); }",
- "asm: line 1: only fround allowed on expression annotations\n");
-}
-
-
-TEST(InvalidTypeAnnotation) {
- CHECK_FUNC_ERROR(
- "function bar(x) { x = (x+x)(x); }\n"
- "function foo() { bar(3); }",
- "asm: line 1: invalid type annotation\n");
-}
-
-
-TEST(WithStatement) {
- CHECK_FUNC_ERROR(
- "function bar() { var x = 0; with (x) { x = x + 1; } }\n"
- "function foo() { bar(); }",
- "asm: line 1: bad with statement\n");
-}
-
-
-TEST(NestedFunction) {
- CHECK_FUNC_ERROR(
- "function bar() { function x() { return 1; } }\n"
- "function foo() { bar(); }",
- "asm: line 1: function declared inside another\n");
-}
-
-
-TEST(UnboundVariable) {
- CHECK_FUNC_ERROR(
- "function bar() { var x = y; }\n"
- "function foo() { bar(); }",
- "asm: line 1: unbound variable\n");
-}
-
-
-TEST(EqStrict) {
- CHECK_FUNC_ERROR(
- "function bar() { return (0 === 0)|0; }\n"
- "function foo() { bar(); }",
- "asm: line 1: illegal comparison operator\n");
-}
-
-
-TEST(NeStrict) {
- CHECK_FUNC_ERROR(
- "function bar() { return (0 !== 0)|0; }\n"
- "function foo() { bar(); }",
- "asm: line 1: illegal comparison operator\n");
-}
-
-
-TEST(InstanceOf) {
- const char* errorMsg = FLAG_harmony_instanceof
- ? "asm: line 0: do-expression encountered\n"
- : "asm: line 1: illegal comparison operator\n";
-
- CHECK_FUNC_ERROR(
- "function bar() { return (0 instanceof 0)|0; }\n"
- "function foo() { bar(); }",
- errorMsg);
-}
-
-
-TEST(InOperator) {
- CHECK_FUNC_ERROR(
- "function bar() { return (0 in 0)|0; }\n"
- "function foo() { bar(); }",
- "asm: line 1: illegal comparison operator\n");
-}
-
-
-TEST(LogicalAndOperator) {
- CHECK_FUNC_ERROR(
- "function bar() { return (0 && 0)|0; }\n"
- "function foo() { bar(); }",
- "asm: line 1: illegal logical operator\n");
-}
-
-
-TEST(LogicalOrOperator) {
- CHECK_FUNC_ERROR(
- "function bar() { return (0 || 0)|0; }\n"
- "function foo() { bar(); }",
- "asm: line 1: illegal logical operator\n");
-}
-
-TEST(BitOrDouble) {
- CHECK_FUNC_ERROR(
- "function bar() { var x = 1.0; return x | 0; }\n"
- "function foo() { bar(); }",
- "asm: line 1: intish required\n");
-}
-
-TEST(BadLiteral) {
- CHECK_FUNC_ERROR(
- "function bar() { return true | 0; }\n"
- "function foo() { bar(); }",
- "asm: line 1: illegal literal\n");
-}
-
-
-TEST(MismatchedReturnTypeLiteral) {
- CHECK_FUNC_ERROR(
- "function bar() { if(1) { return 1; } return 1.0; }\n"
- "function foo() { bar(); }",
- "asm: line 1: return type does not match function signature\n");
-}
-
-
-TEST(MismatchedReturnTypeExpression) {
- CHECK_FUNC_ERROR(
- "function bar() {\n"
- " var x = 1; var y = 1.0; if(1) { return x; } return +y; }\n"
- "function foo() { bar(); }",
- "asm: line 2: return type does not match function signature\n");
-}
-
-
-TEST(AssignToFloatishToF64) {
- CHECK_FUNC_ERROR(
- "function bar() { var v = fround(1.0); f64[0] = v + fround(1.0); }\n"
- "function foo() { bar(); }",
- "asm: line 1: floatish assignment to double array\n");
-}
-
-
-TEST(ForeignFunction) {
- CHECK_FUNC_TYPES_BEGIN(
- "var baz = foreign.baz;\n"
- "function bar() { return baz(1, 2)|0; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(Call, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(baz, FUNC_FOREIGN_TYPE);
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Call, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(bar, FUNC_I_TYPE);
- }
- }
- }
- CHECK_FUNC_TYPES_END_1()
- CHECK_EXPR(Assignment, Bounds(FUNC_FOREIGN_TYPE)) {
- CHECK_VAR(baz, Bounds(FUNC_FOREIGN_TYPE));
- CHECK_EXPR(Property, Bounds(FUNC_FOREIGN_TYPE)) {
- CHECK_VAR(foreign, Bounds::Unbounded());
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- }
- CHECK_FUNC_TYPES_END_2()
-}
-
-TEST(ByteArray) {
- // Forbidden by asm.js spec, present in embenchen.
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = 0; i8[x] = 2; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_EXPR(Property, Bounds::Unbounded()) {
- CHECK_VAR(i8, Bounds(cache.kInt8Array));
- CHECK_VAR(x, Bounds(cache.kAsmSigned));
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-TEST(BadExports) {
- HARNESS_PREAMBLE()
- "function foo() {};\n"
- "return {foo: foo, bar: 1};"
- "}\n";
-
- v8::V8::Initialize();
- HandleAndZoneScope handles;
- Zone* zone = handles.main_zone();
- ZoneVector<ExpressionTypeEntry> types(zone);
- CHECK_EQ("asm: line 2: non-function in function table\n",
- Validate(zone, test_function, &types));
-}
-
-
-TEST(NestedHeapAssignment) {
- CHECK_FUNC_ERROR(
- "function bar() { var x = 0; i16[x = 1] = 2; }\n"
- "function foo() { bar(); }",
- "asm: line 1: expected >> in heap access\n");
-}
-
-TEST(BadOperatorHeapAssignment) {
- CHECK_FUNC_ERROR(
- "function bar() { var x = 0; i16[x & 1] = 2; }\n"
- "function foo() { bar(); }",
- "asm: line 1: expected >> in heap access\n");
-}
-
-
-TEST(BadArrayAssignment) {
- CHECK_FUNC_ERROR(
- "function bar() { i8[0] = 0.0; }\n"
- "function foo() { bar(); }",
- "asm: line 1: illegal type in assignment\n");
-}
-
-
-TEST(BadStandardFunctionCallOutside) {
- CHECK_FUNC_ERROR(
- "var s0 = sin(0);\n"
- "function bar() { }\n"
- "function foo() { bar(); }",
- "asm: line 1: illegal variable reference in module body\n");
-}
-
-
-TEST(BadFunctionCallOutside) {
- CHECK_FUNC_ERROR(
- "function bar() { return 0.0; }\n"
- "var s0 = bar(0);\n"
- "function foo() { bar(); }",
- "asm: line 2: illegal variable reference in module body\n");
-}
-
-TEST(UnaryPlusOnIntForbidden) {
- CHECK_FUNC_ERROR(
- "function bar() { var x = 1; return +x; }\n"
- "function foo() { bar(); }",
- "asm: line 1: "
- "unary + only allowed on signed, unsigned, float?, or double?\n");
-}
-
-TEST(MultiplyNon1ConvertForbidden) {
- CHECK_FUNC_ERROR(
- "function bar() { var x = 0.0; return x * 2.0; }\n"
- "function foo() { bar(); }",
- "asm: line 1: invalid type annotation on binary op\n");
-}
-
-TEST(NestedVariableAssignment) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = 0; x = x = 4; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-TEST(NestedAssignmentInHeap) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = 0; i8[(x = 1) >> 0] = 2; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_EXPR(Property, Bounds::Unbounded()) {
- CHECK_VAR(i8, Bounds(cache.kInt8Array));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-TEST(NegativeDouble) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = -123.2; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
- CHECK_VAR(x, Bounds(cache.kAsmDouble));
- CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-TEST(NegativeInteger) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = -123; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmSigned));
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-TEST(AbsFunction) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = -123.0; x = abs(x); }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
- CHECK_VAR(x, Bounds(cache.kAsmDouble));
- CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
- CHECK_VAR(x, Bounds(cache.kAsmDouble));
- CHECK_EXPR(Call, Bounds(cache.kAsmDouble)) {
- CHECK_VAR(abs, FUNC_N2N_TYPE);
- CHECK_VAR(x, Bounds(cache.kAsmDouble));
- }
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-TEST(CeilFloat) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = fround(3.1); x = ceil(x); }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmFloat)) {
- CHECK_VAR(x, Bounds(cache.kAsmFloat));
- CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) {
- CHECK_VAR(fround, FUNC_N2F_TYPE);
- CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
- }
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmFloat)) {
- CHECK_VAR(x, Bounds(cache.kAsmFloat));
- CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) {
- CHECK_VAR(ceil, FUNC_N2N_TYPE);
- CHECK_VAR(x, Bounds(cache.kAsmFloat));
- }
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-TEST(FloatReturnAsDouble) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = fround(3.1); return +fround(x); }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_D_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmFloat)) {
- CHECK_VAR(x, Bounds(cache.kAsmFloat));
- CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) {
- CHECK_VAR(fround, FUNC_N2F_TYPE);
- CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
- }
- }
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmDouble)) {
- CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) {
- CHECK_VAR(fround, FUNC_N2F_TYPE);
- CHECK_VAR(x, Bounds(cache.kAsmFloat));
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-TEST(TypeConsistency) {
- v8::V8::Initialize();
- TypeCache cache;
- // Check the consistency of each of the main Asm.js types.
- CHECK(cache.kAsmFixnum->Is(cache.kAsmFixnum));
- CHECK(cache.kAsmFixnum->Is(cache.kAsmSigned));
- CHECK(cache.kAsmFixnum->Is(cache.kAsmUnsigned));
- CHECK(cache.kAsmFixnum->Is(cache.kAsmInt));
- CHECK(!cache.kAsmFixnum->Is(cache.kAsmFloat));
- CHECK(!cache.kAsmFixnum->Is(cache.kAsmDouble));
-
- CHECK(cache.kAsmSigned->Is(cache.kAsmSigned));
- CHECK(cache.kAsmSigned->Is(cache.kAsmInt));
- CHECK(!cache.kAsmSigned->Is(cache.kAsmFixnum));
- CHECK(!cache.kAsmSigned->Is(cache.kAsmUnsigned));
- CHECK(!cache.kAsmSigned->Is(cache.kAsmFloat));
- CHECK(!cache.kAsmSigned->Is(cache.kAsmDouble));
-
- CHECK(cache.kAsmUnsigned->Is(cache.kAsmUnsigned));
- CHECK(cache.kAsmUnsigned->Is(cache.kAsmInt));
- CHECK(!cache.kAsmUnsigned->Is(cache.kAsmSigned));
- CHECK(!cache.kAsmUnsigned->Is(cache.kAsmFixnum));
- CHECK(!cache.kAsmUnsigned->Is(cache.kAsmFloat));
- CHECK(!cache.kAsmUnsigned->Is(cache.kAsmDouble));
-
- CHECK(cache.kAsmInt->Is(cache.kAsmInt));
- CHECK(!cache.kAsmInt->Is(cache.kAsmUnsigned));
- CHECK(!cache.kAsmInt->Is(cache.kAsmSigned));
- CHECK(!cache.kAsmInt->Is(cache.kAsmFixnum));
- CHECK(!cache.kAsmInt->Is(cache.kAsmFloat));
- CHECK(!cache.kAsmInt->Is(cache.kAsmDouble));
-
- CHECK(cache.kAsmFloat->Is(cache.kAsmFloat));
- CHECK(!cache.kAsmFloat->Is(cache.kAsmInt));
- CHECK(!cache.kAsmFloat->Is(cache.kAsmUnsigned));
- CHECK(!cache.kAsmFloat->Is(cache.kAsmSigned));
- CHECK(!cache.kAsmFloat->Is(cache.kAsmFixnum));
- CHECK(!cache.kAsmFloat->Is(cache.kAsmDouble));
-
- CHECK(cache.kAsmDouble->Is(cache.kAsmDouble));
- CHECK(!cache.kAsmDouble->Is(cache.kAsmInt));
- CHECK(!cache.kAsmDouble->Is(cache.kAsmUnsigned));
- CHECK(!cache.kAsmDouble->Is(cache.kAsmSigned));
- CHECK(!cache.kAsmDouble->Is(cache.kAsmFixnum));
- CHECK(!cache.kAsmDouble->Is(cache.kAsmFloat));
-}
-
-
-TEST(SwitchTest) {
- CHECK_FUNC_TYPES_BEGIN(
- "function switcher(x) {\n"
- " x = x|0;\n"
- " switch (x|0) {\n"
- " case 1: return 23;\n"
- " case 2: return 43;\n"
- " default: return 66;\n"
- " }\n"
- " return 0;\n"
- "}\n"
- "function foo() { switcher(1); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_I2I_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(.switch_tag, Bounds(cache.kAsmInt));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(x, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_EXPR(Literal, Bounds(Type::Undefined()));
- CHECK_VAR(.switch_tag, Bounds(cache.kAsmSigned));
- // case 1: return 23;
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- CHECK_EXPR(Literal, Bounds(cache.kAsmSigned));
- // case 2: return 43;
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- CHECK_EXPR(Literal, Bounds(cache.kAsmSigned));
- // default: return 66;
- CHECK_EXPR(Literal, Bounds(cache.kAsmSigned));
- // return 0;
- CHECK_EXPR(Literal, Bounds(cache.kAsmSigned));
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-
-TEST(BadSwitchRange) {
- CHECK_FUNC_ERROR(
- "function bar() { switch (1) { case -1: case 0x7fffffff: } }\n"
- "function foo() { bar(); }",
- "asm: line 1: case range too large\n");
-}
-
-
-TEST(DuplicateSwitchCase) {
- CHECK_FUNC_ERROR(
- "function bar() { switch (1) { case 0: case 0: } }\n"
- "function foo() { bar(); }",
- "asm: line 1: duplicate case value\n");
-}
-
-
-TEST(BadSwitchOrder) {
- CHECK_FUNC_ERROR(
- "function bar() { switch (1) { default: case 0: } }\n"
- "function foo() { bar(); }",
- "asm: line 1: default case out of order\n");
-}
-
-TEST(BadForeignCall) {
- const char test_function[] =
- "function TestModule(stdlib, foreign, buffer) {\n"
- " \"use asm\";\n"
- " var ffunc = foreign.foo;\n"
- " function test1() { var x = 0; ffunc(x); }\n"
- " return { testFunc1: test1 };\n"
- "}\n";
- v8::V8::Initialize();
- HandleAndZoneScope handles;
- Zone* zone = handles.main_zone();
- ZoneVector<ExpressionTypeEntry> types(zone);
- CHECK_EQ(
- "asm: line 4: foreign call argument expected to be int, double, or "
- "fixnum\n",
- Validate(zone, test_function, &types));
-}
-
-TEST(BadImports) {
- const char test_function[] =
- "function TestModule(stdlib, foreign, buffer) {\n"
- " \"use asm\";\n"
- " var fint = (foreign.bar | 0) | 0;\n"
- " function test1() {}\n"
- " return { testFunc1: test1 };\n"
- "}\n";
- v8::V8::Initialize();
- HandleAndZoneScope handles;
- Zone* zone = handles.main_zone();
- ZoneVector<ExpressionTypeEntry> types(zone);
- CHECK_EQ("asm: line 3: illegal computation inside module body\n",
- Validate(zone, test_function, &types));
-}
-
-TEST(BadVariableReference) {
- const char test_function[] =
- "function TestModule(stdlib, foreign, buffer) {\n"
- " \"use asm\";\n"
- " var x = 0;\n"
- " var y = x;\n"
- " function test1() {}\n"
- " return { testFunc1: test1 };\n"
- "}\n";
- v8::V8::Initialize();
- HandleAndZoneScope handles;
- Zone* zone = handles.main_zone();
- ZoneVector<ExpressionTypeEntry> types(zone);
- CHECK_EQ("asm: line 4: illegal variable reference in module body\n",
- Validate(zone, test_function, &types));
-}
-
-TEST(BadForeignVariableReferenceValueOr) {
- const char test_function[] =
- "function TestModule(stdlib, foreign, buffer) {\n"
- " \"use asm\";\n"
- " var fint = foreign.bar | 1;\n"
- "}\n";
- v8::V8::Initialize();
- HandleAndZoneScope handles;
- Zone* zone = handles.main_zone();
- ZoneVector<ExpressionTypeEntry> types(zone);
- CHECK_EQ("asm: line 3: illegal integer annotation value\n",
- Validate(zone, test_function, &types));
-}
-
-TEST(BadForeignVariableReferenceValueOrDot) {
- const char test_function[] =
- "function TestModule(stdlib, foreign, buffer) {\n"
- " \"use asm\";\n"
- " var fint = foreign.bar | 1.0;\n"
- "}\n";
- v8::V8::Initialize();
- HandleAndZoneScope handles;
- Zone* zone = handles.main_zone();
- ZoneVector<ExpressionTypeEntry> types(zone);
- CHECK_EQ("asm: line 3: illegal integer annotation value\n",
- Validate(zone, test_function, &types));
-}
-
-TEST(BadForeignVariableReferenceValueMul) {
- const char test_function[] =
- "function TestModule(stdlib, foreign, buffer) {\n"
- " \"use asm\";\n"
- " var fint = foreign.bar * 2.0;\n"
- "}\n";
- v8::V8::Initialize();
- HandleAndZoneScope handles;
- Zone* zone = handles.main_zone();
- ZoneVector<ExpressionTypeEntry> types(zone);
- CHECK_EQ("asm: line 3: illegal double annotation value\n",
- Validate(zone, test_function, &types));
-}
-
-TEST(BadForeignVariableReferenceValueMulNoDot) {
- const char test_function[] =
- "function TestModule(stdlib, foreign, buffer) {\n"
- " \"use asm\";\n"
- " var fint = foreign.bar * 1;\n"
- "}\n";
- v8::V8::Initialize();
- HandleAndZoneScope handles;
- Zone* zone = handles.main_zone();
- ZoneVector<ExpressionTypeEntry> types(zone);
- CHECK_EQ("asm: line 3: ill-typed arithmetic operation\n",
- Validate(zone, test_function, &types));
-}
-
-TEST(Imports) {
- const char test_function[] =
- "function TestModule(stdlib, foreign, buffer) {\n"
- " \"use asm\";\n"
- " var ffunc = foreign.foo;\n"
- " var fint = foreign.bar | 0;\n"
- " var fdouble = +foreign.baz;\n"
- " function test1() { return ffunc(fint|0, fdouble) | 0; }\n"
- " function test2() { return +ffunc(fdouble, fint|0); }\n"
- " return { testFunc1: test1, testFunc2: test2 };\n"
- "}\n";
-
- v8::V8::Initialize();
- HandleAndZoneScope handles;
- Zone* zone = handles.main_zone();
- ZoneVector<ExpressionTypeEntry> types(zone);
- CHECK_EQ("", Validate(zone, test_function, &types));
- TypeCache cache;
-
- CHECK_TYPES_BEGIN {
- // Module.
- CHECK_EXPR(FunctionLiteral, Bounds::Unbounded()) {
- // function test1
- CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(Call, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(ffunc, FUNC_FOREIGN_TYPE);
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(fint, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_VAR(fdouble, Bounds(cache.kAsmDouble));
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- // function test2
- CHECK_EXPR(FunctionLiteral, FUNC_D_TYPE) {
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmDouble)) {
- CHECK_EXPR(Call, Bounds(cache.kAsmDouble)) {
- CHECK_VAR(ffunc, FUNC_FOREIGN_TYPE);
- CHECK_VAR(fdouble, Bounds(cache.kAsmDouble));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(fint, Bounds(cache.kAsmInt));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
- }
- }
- // "use asm";
- CHECK_EXPR(Literal, Bounds(Type::String()));
- // var func = foreign.foo;
- CHECK_EXPR(Assignment, Bounds(FUNC_FOREIGN_TYPE)) {
- CHECK_VAR(ffunc, Bounds(FUNC_FOREIGN_TYPE));
- CHECK_EXPR(Property, Bounds(FUNC_FOREIGN_TYPE)) {
- CHECK_VAR(foreign, Bounds::Unbounded());
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- }
- // var fint = foreign.bar | 0;
- CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
- CHECK_VAR(fint, Bounds(cache.kAsmInt));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(Property, Bounds(Type::Number())) {
- CHECK_VAR(foreign, Bounds::Unbounded());
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- }
- // var fdouble = +foreign.baz;
- CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
- CHECK_VAR(fdouble, Bounds(cache.kAsmDouble));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmDouble)) {
- CHECK_EXPR(Property, Bounds(Type::Number())) {
- CHECK_VAR(foreign, Bounds::Unbounded());
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
- }
- }
- // return { testFunc1: test1, testFunc2: test2 };
- CHECK_EXPR(ObjectLiteral, Bounds::Unbounded()) {
- CHECK_VAR(test1, FUNC_I_TYPE);
- CHECK_VAR(test2, FUNC_D_TYPE);
- }
- }
- }
- CHECK_TYPES_END
-}
-
-TEST(StoreFloatFromDouble) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { f32[0] = 0.0; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
- CHECK_EXPR(Property, Bounds::Unbounded()) {
- CHECK_VAR(f32, Bounds(cache.kFloat32Array));
- CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
- }
- CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
-
-TEST(NegateDouble) {
- CHECK_FUNC_TYPES_BEGIN(
- "function bar() { var x = 0.0; x = -x; }\n"
- "function foo() { bar(); }") {
- CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
- CHECK_VAR(x, Bounds(cache.kAsmDouble));
- CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
- }
- CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
- CHECK_VAR(x, Bounds(cache.kAsmDouble));
- CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmDouble)) {
- CHECK_VAR(x, Bounds(cache.kAsmDouble));
- CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
- }
- }
- }
- CHECK_SKIP();
- }
- CHECK_FUNC_TYPES_END
-}
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index 3763f06493..93a19c1a14 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -412,29 +412,26 @@ TEST(6) {
Assembler assm(isolate, NULL, 0);
- if (CpuFeatures::IsSupported(ARMv7)) {
- CpuFeatureScope scope(&assm, ARMv7);
- __ usat(r1, 8, Operand(r0)); // Sat 0xFFFF to 0-255 = 0xFF.
- __ usat(r2, 12, Operand(r0, ASR, 9)); // Sat (0xFFFF>>9) to 0-4095 = 0x7F.
- __ usat(r3, 1, Operand(r0, LSL, 16)); // Sat (0xFFFF<<16) to 0-1 = 0x0.
- __ add(r0, r1, Operand(r2));
- __ add(r0, r0, Operand(r3));
- __ mov(pc, Operand(lr));
+ __ usat(r1, 8, Operand(r0)); // Sat 0xFFFF to 0-255 = 0xFF.
+ __ usat(r2, 12, Operand(r0, ASR, 9)); // Sat (0xFFFF>>9) to 0-4095 = 0x7F.
+ __ usat(r3, 1, Operand(r0, LSL, 16)); // Sat (0xFFFF<<16) to 0-1 = 0x0.
+ __ add(r0, r1, Operand(r2));
+ __ add(r0, r0, Operand(r3));
+ __ mov(pc, Operand(lr));
- CodeDesc desc;
- assm.GetCode(&desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
#ifdef DEBUG
- OFStream os(stdout);
- code->Print(os);
+ OFStream os(stdout);
+ code->Print(os);
#endif
- F1 f = FUNCTION_CAST<F1>(code->entry());
- int res = reinterpret_cast<int>(
- CALL_GENERATED_CODE(isolate, f, 0xFFFF, 0, 0, 0, 0));
- ::printf("f() = %d\n", res);
- CHECK_EQ(382, res);
- }
+ F1 f = FUNCTION_CAST<F1>(code->entry());
+ int res = reinterpret_cast<int>(
+ CALL_GENERATED_CODE(isolate, f, 0xFFFF, 0, 0, 0, 0));
+ ::printf("f() = %d\n", res);
+ CHECK_EQ(382, res);
}
@@ -2231,6 +2228,272 @@ TEST(ARMv8_vrintX) {
}
}
+TEST(ARMv8_vsel) {
+ // Test the vsel floating point instructions.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ Assembler assm(isolate, NULL, 0);
+
+ // Used to indicate whether a condition passed or failed.
+ static constexpr float kResultPass = 1.0f;
+ static constexpr float kResultFail = -kResultPass;
+
+ struct ResultsF32 {
+ float vseleq_;
+ float vselge_;
+ float vselgt_;
+ float vselvs_;
+
+ // The following conditions aren't architecturally supported, but the
+ // assembler implements them by swapping the inputs.
+ float vselne_;
+ float vsellt_;
+ float vselle_;
+ float vselvc_;
+ };
+
+ struct ResultsF64 {
+ double vseleq_;
+ double vselge_;
+ double vselgt_;
+ double vselvs_;
+
+ // The following conditions aren't architecturally supported, but the
+ // assembler implements them by swapping the inputs.
+ double vselne_;
+ double vsellt_;
+ double vselle_;
+ double vselvc_;
+ };
+
+ if (CpuFeatures::IsSupported(ARMv8)) {
+ CpuFeatureScope scope(&assm, ARMv8);
+
+ // Create a helper function:
+ // void TestVsel(uint32_t nzcv,
+ // ResultsF32* results_f32,
+ // ResultsF64* results_f64);
+ __ msr(CPSR_f, Operand(r0));
+
+ __ vmov(s1, kResultPass);
+ __ vmov(s2, kResultFail);
+
+ __ vsel(eq, s0, s1, s2);
+ __ vstr(s0, r1, offsetof(ResultsF32, vseleq_));
+ __ vsel(ge, s0, s1, s2);
+ __ vstr(s0, r1, offsetof(ResultsF32, vselge_));
+ __ vsel(gt, s0, s1, s2);
+ __ vstr(s0, r1, offsetof(ResultsF32, vselgt_));
+ __ vsel(vs, s0, s1, s2);
+ __ vstr(s0, r1, offsetof(ResultsF32, vselvs_));
+
+ __ vsel(ne, s0, s1, s2);
+ __ vstr(s0, r1, offsetof(ResultsF32, vselne_));
+ __ vsel(lt, s0, s1, s2);
+ __ vstr(s0, r1, offsetof(ResultsF32, vsellt_));
+ __ vsel(le, s0, s1, s2);
+ __ vstr(s0, r1, offsetof(ResultsF32, vselle_));
+ __ vsel(vc, s0, s1, s2);
+ __ vstr(s0, r1, offsetof(ResultsF32, vselvc_));
+
+ __ vmov(d1, kResultPass);
+ __ vmov(d2, kResultFail);
+
+ __ vsel(eq, d0, d1, d2);
+ __ vstr(d0, r2, offsetof(ResultsF64, vseleq_));
+ __ vsel(ge, d0, d1, d2);
+ __ vstr(d0, r2, offsetof(ResultsF64, vselge_));
+ __ vsel(gt, d0, d1, d2);
+ __ vstr(d0, r2, offsetof(ResultsF64, vselgt_));
+ __ vsel(vs, d0, d1, d2);
+ __ vstr(d0, r2, offsetof(ResultsF64, vselvs_));
+
+ __ vsel(ne, d0, d1, d2);
+ __ vstr(d0, r2, offsetof(ResultsF64, vselne_));
+ __ vsel(lt, d0, d1, d2);
+ __ vstr(d0, r2, offsetof(ResultsF64, vsellt_));
+ __ vsel(le, d0, d1, d2);
+ __ vstr(d0, r2, offsetof(ResultsF64, vselle_));
+ __ vsel(vc, d0, d1, d2);
+ __ vstr(d0, r2, offsetof(ResultsF64, vselvc_));
+
+ __ bx(lr);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef DEBUG
+ OFStream os(stdout);
+ code->Print(os);
+#endif
+ F5 f = FUNCTION_CAST<F5>(code->entry());
+ Object* dummy = nullptr;
+ USE(dummy);
+
+ STATIC_ASSERT(kResultPass == -kResultFail);
+#define CHECK_VSEL(n, z, c, v, vseleq, vselge, vselgt, vselvs) \
+ do { \
+ ResultsF32 results_f32; \
+ ResultsF64 results_f64; \
+ uint32_t nzcv = (n << 31) | (z << 30) | (c << 29) | (v << 28); \
+ dummy = CALL_GENERATED_CODE(isolate, f, nzcv, &results_f32, &results_f64, \
+ 0, 0); \
+ CHECK_EQ(vseleq, results_f32.vseleq_); \
+ CHECK_EQ(vselge, results_f32.vselge_); \
+ CHECK_EQ(vselgt, results_f32.vselgt_); \
+ CHECK_EQ(vselvs, results_f32.vselvs_); \
+ CHECK_EQ(-vseleq, results_f32.vselne_); \
+ CHECK_EQ(-vselge, results_f32.vsellt_); \
+ CHECK_EQ(-vselgt, results_f32.vselle_); \
+ CHECK_EQ(-vselvs, results_f32.vselvc_); \
+ CHECK_EQ(vseleq, results_f64.vseleq_); \
+ CHECK_EQ(vselge, results_f64.vselge_); \
+ CHECK_EQ(vselgt, results_f64.vselgt_); \
+ CHECK_EQ(vselvs, results_f64.vselvs_); \
+ CHECK_EQ(-vseleq, results_f64.vselne_); \
+ CHECK_EQ(-vselge, results_f64.vsellt_); \
+ CHECK_EQ(-vselgt, results_f64.vselle_); \
+ CHECK_EQ(-vselvs, results_f64.vselvc_); \
+ } while (0);
+
+ // N Z C V vseleq vselge vselgt vselvs
+ CHECK_VSEL(0, 0, 0, 0, kResultFail, kResultPass, kResultPass, kResultFail);
+ CHECK_VSEL(0, 0, 0, 1, kResultFail, kResultFail, kResultFail, kResultPass);
+ CHECK_VSEL(0, 0, 1, 0, kResultFail, kResultPass, kResultPass, kResultFail);
+ CHECK_VSEL(0, 0, 1, 1, kResultFail, kResultFail, kResultFail, kResultPass);
+ CHECK_VSEL(0, 1, 0, 0, kResultPass, kResultPass, kResultFail, kResultFail);
+ CHECK_VSEL(0, 1, 0, 1, kResultPass, kResultFail, kResultFail, kResultPass);
+ CHECK_VSEL(0, 1, 1, 0, kResultPass, kResultPass, kResultFail, kResultFail);
+ CHECK_VSEL(0, 1, 1, 1, kResultPass, kResultFail, kResultFail, kResultPass);
+ CHECK_VSEL(1, 0, 0, 0, kResultFail, kResultFail, kResultFail, kResultFail);
+ CHECK_VSEL(1, 0, 0, 1, kResultFail, kResultPass, kResultPass, kResultPass);
+ CHECK_VSEL(1, 0, 1, 0, kResultFail, kResultFail, kResultFail, kResultFail);
+ CHECK_VSEL(1, 0, 1, 1, kResultFail, kResultPass, kResultPass, kResultPass);
+ CHECK_VSEL(1, 1, 0, 0, kResultPass, kResultFail, kResultFail, kResultFail);
+ CHECK_VSEL(1, 1, 0, 1, kResultPass, kResultPass, kResultFail, kResultPass);
+ CHECK_VSEL(1, 1, 1, 0, kResultPass, kResultFail, kResultFail, kResultFail);
+ CHECK_VSEL(1, 1, 1, 1, kResultPass, kResultPass, kResultFail, kResultPass);
+
+#undef CHECK_VSEL
+ }
+}
+
+TEST(unaligned_loads) {
+ // All supported ARM targets allow unaligned accesses.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ uint32_t ldrh;
+ uint32_t ldrsh;
+ uint32_t ldr;
+ } T;
+ T t;
+
+ Assembler assm(isolate, NULL, 0);
+ __ ldrh(ip, MemOperand(r1, r2));
+ __ str(ip, MemOperand(r0, offsetof(T, ldrh)));
+ __ ldrsh(ip, MemOperand(r1, r2));
+ __ str(ip, MemOperand(r0, offsetof(T, ldrsh)));
+ __ ldr(ip, MemOperand(r1, r2));
+ __ str(ip, MemOperand(r0, offsetof(T, ldr)));
+ __ bx(lr);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef DEBUG
+ OFStream os(stdout);
+ code->Print(os);
+#endif
+ F4 f = FUNCTION_CAST<F4>(code->entry());
+
+ Object* dummy = nullptr;
+ USE(dummy);
+
+#ifndef V8_TARGET_LITTLE_ENDIAN
+#error This test assumes a little-endian layout.
+#endif
+ uint64_t data = UINT64_C(0x84838281807f7e7d);
+ dummy = CALL_GENERATED_CODE(isolate, f, &t, &data, 0, 0, 0);
+ CHECK_EQ(0x00007e7d, t.ldrh);
+ CHECK_EQ(0x00007e7d, t.ldrsh);
+ CHECK_EQ(0x807f7e7d, t.ldr);
+ dummy = CALL_GENERATED_CODE(isolate, f, &t, &data, 1, 0, 0);
+ CHECK_EQ(0x00007f7e, t.ldrh);
+ CHECK_EQ(0x00007f7e, t.ldrsh);
+ CHECK_EQ(0x81807f7e, t.ldr);
+ dummy = CALL_GENERATED_CODE(isolate, f, &t, &data, 2, 0, 0);
+ CHECK_EQ(0x0000807f, t.ldrh);
+ CHECK_EQ(0xffff807f, t.ldrsh);
+ CHECK_EQ(0x8281807f, t.ldr);
+ dummy = CALL_GENERATED_CODE(isolate, f, &t, &data, 3, 0, 0);
+ CHECK_EQ(0x00008180, t.ldrh);
+ CHECK_EQ(0xffff8180, t.ldrsh);
+ CHECK_EQ(0x83828180, t.ldr);
+}
+
+TEST(unaligned_stores) {
+ // All supported ARM targets allow unaligned accesses.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ Assembler assm(isolate, NULL, 0);
+ __ strh(r3, MemOperand(r0, r2));
+ __ str(r3, MemOperand(r1, r2));
+ __ bx(lr);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef DEBUG
+ OFStream os(stdout);
+ code->Print(os);
+#endif
+ F4 f = FUNCTION_CAST<F4>(code->entry());
+
+ Object* dummy = nullptr;
+ USE(dummy);
+
+#ifndef V8_TARGET_LITTLE_ENDIAN
+#error This test assumes a little-endian layout.
+#endif
+ {
+ uint64_t strh = 0;
+ uint64_t str = 0;
+ dummy = CALL_GENERATED_CODE(isolate, f, &strh, &str, 0, 0xfedcba98, 0);
+ CHECK_EQ(UINT64_C(0x000000000000ba98), strh);
+ CHECK_EQ(UINT64_C(0x00000000fedcba98), str);
+ }
+ {
+ uint64_t strh = 0;
+ uint64_t str = 0;
+ dummy = CALL_GENERATED_CODE(isolate, f, &strh, &str, 1, 0xfedcba98, 0);
+ CHECK_EQ(UINT64_C(0x0000000000ba9800), strh);
+ CHECK_EQ(UINT64_C(0x000000fedcba9800), str);
+ }
+ {
+ uint64_t strh = 0;
+ uint64_t str = 0;
+ dummy = CALL_GENERATED_CODE(isolate, f, &strh, &str, 2, 0xfedcba98, 0);
+ CHECK_EQ(UINT64_C(0x00000000ba980000), strh);
+ CHECK_EQ(UINT64_C(0x0000fedcba980000), str);
+ }
+ {
+ uint64_t strh = 0;
+ uint64_t str = 0;
+ dummy = CALL_GENERATED_CODE(isolate, f, &strh, &str, 3, 0xfedcba98, 0);
+ CHECK_EQ(UINT64_C(0x000000ba98000000), strh);
+ CHECK_EQ(UINT64_C(0x00fedcba98000000), str);
+ }
+}
TEST(regress4292_b) {
CcTest::InitializeVM();
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index c2c5b782dc..68bef59d86 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -3819,6 +3819,375 @@ TEST(neg) {
}
+template <typename T, typename Op>
+static void AdcsSbcsHelper(Op op, T left, T right, int carry, T expected,
+ StatusFlags expected_flags) {
+ int reg_size = sizeof(T) * 8;
+ auto left_reg = Register::Create(0, reg_size);
+ auto right_reg = Register::Create(1, reg_size);
+ auto result_reg = Register::Create(2, reg_size);
+
+ SETUP();
+ START();
+
+ __ Mov(left_reg, left);
+ __ Mov(right_reg, right);
+ __ Mov(x10, (carry ? CFlag : NoFlag));
+
+ __ Msr(NZCV, x10);
+ (masm.*op)(result_reg, left_reg, right_reg);
+
+ END();
+ RUN();
+
+ CHECK_EQUAL_64(left, left_reg.X());
+ CHECK_EQUAL_64(right, right_reg.X());
+ CHECK_EQUAL_64(expected, result_reg.X());
+ CHECK_EQUAL_NZCV(expected_flags);
+
+ TEARDOWN();
+}
+
+
+TEST(adcs_sbcs_x) {
+ INIT_V8();
+ uint64_t inputs[] = {
+ 0x0000000000000000, 0x0000000000000001, 0x7ffffffffffffffe,
+ 0x7fffffffffffffff, 0x8000000000000000, 0x8000000000000001,
+ 0xfffffffffffffffe, 0xffffffffffffffff,
+ };
+ static const size_t input_count = sizeof(inputs) / sizeof(inputs[0]);
+
+ struct Expected {
+ uint64_t carry0_result;
+ StatusFlags carry0_flags;
+ uint64_t carry1_result;
+ StatusFlags carry1_flags;
+ };
+
+ static const Expected expected_adcs_x[input_count][input_count] = {
+ {{0x0000000000000000, ZFlag, 0x0000000000000001, NoFlag},
+ {0x0000000000000001, NoFlag, 0x0000000000000002, NoFlag},
+ {0x7ffffffffffffffe, NoFlag, 0x7fffffffffffffff, NoFlag},
+ {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
+ {0x8000000000000000, NFlag, 0x8000000000000001, NFlag},
+ {0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
+ {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
+ {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag}},
+ {{0x0000000000000001, NoFlag, 0x0000000000000002, NoFlag},
+ {0x0000000000000002, NoFlag, 0x0000000000000003, NoFlag},
+ {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
+ {0x8000000000000000, NVFlag, 0x8000000000000001, NVFlag},
+ {0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
+ {0x8000000000000002, NFlag, 0x8000000000000003, NFlag},
+ {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag}},
+ {{0x7ffffffffffffffe, NoFlag, 0x7fffffffffffffff, NoFlag},
+ {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
+ {0xfffffffffffffffc, NVFlag, 0xfffffffffffffffd, NVFlag},
+ {0xfffffffffffffffd, NVFlag, 0xfffffffffffffffe, NVFlag},
+ {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
+ {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0x7ffffffffffffffc, CFlag, 0x7ffffffffffffffd, CFlag},
+ {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag}},
+ {{0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
+ {0x8000000000000000, NVFlag, 0x8000000000000001, NVFlag},
+ {0xfffffffffffffffd, NVFlag, 0xfffffffffffffffe, NVFlag},
+ {0xfffffffffffffffe, NVFlag, 0xffffffffffffffff, NVFlag},
+ {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
+ {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
+ {0x7ffffffffffffffe, CFlag, 0x7fffffffffffffff, CFlag}},
+ {{0x8000000000000000, NFlag, 0x8000000000000001, NFlag},
+ {0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
+ {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
+ {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0x0000000000000000, ZCVFlag, 0x0000000000000001, CVFlag},
+ {0x0000000000000001, CVFlag, 0x0000000000000002, CVFlag},
+ {0x7ffffffffffffffe, CVFlag, 0x7fffffffffffffff, CVFlag},
+ {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag}},
+ {{0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
+ {0x8000000000000002, NFlag, 0x8000000000000003, NFlag},
+ {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
+ {0x0000000000000001, CVFlag, 0x0000000000000002, CVFlag},
+ {0x0000000000000002, CVFlag, 0x0000000000000003, CVFlag},
+ {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
+ {0x8000000000000000, NCFlag, 0x8000000000000001, NCFlag}},
+ {{0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
+ {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0x7ffffffffffffffc, CFlag, 0x7ffffffffffffffd, CFlag},
+ {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
+ {0x7ffffffffffffffe, CVFlag, 0x7fffffffffffffff, CVFlag},
+ {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
+ {0xfffffffffffffffc, NCFlag, 0xfffffffffffffffd, NCFlag},
+ {0xfffffffffffffffd, NCFlag, 0xfffffffffffffffe, NCFlag}},
+ {{0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
+ {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
+ {0x7ffffffffffffffe, CFlag, 0x7fffffffffffffff, CFlag},
+ {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
+ {0x8000000000000000, NCFlag, 0x8000000000000001, NCFlag},
+ {0xfffffffffffffffd, NCFlag, 0xfffffffffffffffe, NCFlag},
+ {0xfffffffffffffffe, NCFlag, 0xffffffffffffffff, NCFlag}}};
+
+ static const Expected expected_sbcs_x[input_count][input_count] = {
+ {{0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
+ {0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
+ {0x8000000000000000, NFlag, 0x8000000000000001, NFlag},
+ {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
+ {0x7ffffffffffffffe, NoFlag, 0x7fffffffffffffff, NoFlag},
+ {0x0000000000000001, NoFlag, 0x0000000000000002, NoFlag},
+ {0x0000000000000000, ZFlag, 0x0000000000000001, NoFlag}},
+ {{0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
+ {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0x8000000000000002, NFlag, 0x8000000000000003, NFlag},
+ {0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
+ {0x8000000000000000, NVFlag, 0x8000000000000001, NVFlag},
+ {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
+ {0x0000000000000002, NoFlag, 0x0000000000000003, NoFlag},
+ {0x0000000000000001, NoFlag, 0x0000000000000002, NoFlag}},
+ {{0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
+ {0x7ffffffffffffffc, CFlag, 0x7ffffffffffffffd, CFlag},
+ {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
+ {0xfffffffffffffffd, NVFlag, 0xfffffffffffffffe, NVFlag},
+ {0xfffffffffffffffc, NVFlag, 0xfffffffffffffffd, NVFlag},
+ {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
+ {0x7ffffffffffffffe, NoFlag, 0x7fffffffffffffff, NoFlag}},
+ {{0x7ffffffffffffffe, CFlag, 0x7fffffffffffffff, CFlag},
+ {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
+ {0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
+ {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0xfffffffffffffffe, NVFlag, 0xffffffffffffffff, NVFlag},
+ {0xfffffffffffffffd, NVFlag, 0xfffffffffffffffe, NVFlag},
+ {0x8000000000000000, NVFlag, 0x8000000000000001, NVFlag},
+ {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag}},
+ {{0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
+ {0x7ffffffffffffffe, CVFlag, 0x7fffffffffffffff, CVFlag},
+ {0x0000000000000001, CVFlag, 0x0000000000000002, CVFlag},
+ {0x0000000000000000, ZCVFlag, 0x0000000000000001, CVFlag},
+ {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
+ {0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
+ {0x8000000000000000, NFlag, 0x8000000000000001, NFlag}},
+ {{0x8000000000000000, NCFlag, 0x8000000000000001, NCFlag},
+ {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
+ {0x0000000000000002, CVFlag, 0x0000000000000003, CVFlag},
+ {0x0000000000000001, CVFlag, 0x0000000000000002, CVFlag},
+ {0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
+ {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0x8000000000000002, NFlag, 0x8000000000000003, NFlag},
+ {0x8000000000000001, NFlag, 0x8000000000000002, NFlag}},
+ {{0xfffffffffffffffd, NCFlag, 0xfffffffffffffffe, NCFlag},
+ {0xfffffffffffffffc, NCFlag, 0xfffffffffffffffd, NCFlag},
+ {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
+ {0x7ffffffffffffffe, CVFlag, 0x7fffffffffffffff, CVFlag},
+ {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
+ {0x7ffffffffffffffc, CFlag, 0x7ffffffffffffffd, CFlag},
+ {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
+ {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag}},
+ {{0xfffffffffffffffe, NCFlag, 0xffffffffffffffff, NCFlag},
+ {0xfffffffffffffffd, NCFlag, 0xfffffffffffffffe, NCFlag},
+ {0x8000000000000000, NCFlag, 0x8000000000000001, NCFlag},
+ {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
+ {0x7ffffffffffffffe, CFlag, 0x7fffffffffffffff, CFlag},
+ {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
+ {0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
+ {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag}}};
+
+ for (size_t left = 0; left < input_count; left++) {
+ for (size_t right = 0; right < input_count; right++) {
+ const Expected& expected = expected_adcs_x[left][right];
+ AdcsSbcsHelper(&MacroAssembler::Adcs, inputs[left], inputs[right], 0,
+ expected.carry0_result, expected.carry0_flags);
+ AdcsSbcsHelper(&MacroAssembler::Adcs, inputs[left], inputs[right], 1,
+ expected.carry1_result, expected.carry1_flags);
+ }
+ }
+
+ for (size_t left = 0; left < input_count; left++) {
+ for (size_t right = 0; right < input_count; right++) {
+ const Expected& expected = expected_sbcs_x[left][right];
+ AdcsSbcsHelper(&MacroAssembler::Sbcs, inputs[left], inputs[right], 0,
+ expected.carry0_result, expected.carry0_flags);
+ AdcsSbcsHelper(&MacroAssembler::Sbcs, inputs[left], inputs[right], 1,
+ expected.carry1_result, expected.carry1_flags);
+ }
+ }
+}
+
+
+TEST(adcs_sbcs_w) {
+ INIT_V8();
+ uint32_t inputs[] = {
+ 0x00000000, 0x00000001, 0x7ffffffe, 0x7fffffff,
+ 0x80000000, 0x80000001, 0xfffffffe, 0xffffffff,
+ };
+ static const size_t input_count = sizeof(inputs) / sizeof(inputs[0]);
+
+ struct Expected {
+ uint32_t carry0_result;
+ StatusFlags carry0_flags;
+ uint32_t carry1_result;
+ StatusFlags carry1_flags;
+ };
+
+ static const Expected expected_adcs_w[input_count][input_count] = {
+ {{0x00000000, ZFlag, 0x00000001, NoFlag},
+ {0x00000001, NoFlag, 0x00000002, NoFlag},
+ {0x7ffffffe, NoFlag, 0x7fffffff, NoFlag},
+ {0x7fffffff, NoFlag, 0x80000000, NVFlag},
+ {0x80000000, NFlag, 0x80000001, NFlag},
+ {0x80000001, NFlag, 0x80000002, NFlag},
+ {0xfffffffe, NFlag, 0xffffffff, NFlag},
+ {0xffffffff, NFlag, 0x00000000, ZCFlag}},
+ {{0x00000001, NoFlag, 0x00000002, NoFlag},
+ {0x00000002, NoFlag, 0x00000003, NoFlag},
+ {0x7fffffff, NoFlag, 0x80000000, NVFlag},
+ {0x80000000, NVFlag, 0x80000001, NVFlag},
+ {0x80000001, NFlag, 0x80000002, NFlag},
+ {0x80000002, NFlag, 0x80000003, NFlag},
+ {0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0x00000000, ZCFlag, 0x00000001, CFlag}},
+ {{0x7ffffffe, NoFlag, 0x7fffffff, NoFlag},
+ {0x7fffffff, NoFlag, 0x80000000, NVFlag},
+ {0xfffffffc, NVFlag, 0xfffffffd, NVFlag},
+ {0xfffffffd, NVFlag, 0xfffffffe, NVFlag},
+ {0xfffffffe, NFlag, 0xffffffff, NFlag},
+ {0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0x7ffffffc, CFlag, 0x7ffffffd, CFlag},
+ {0x7ffffffd, CFlag, 0x7ffffffe, CFlag}},
+ {{0x7fffffff, NoFlag, 0x80000000, NVFlag},
+ {0x80000000, NVFlag, 0x80000001, NVFlag},
+ {0xfffffffd, NVFlag, 0xfffffffe, NVFlag},
+ {0xfffffffe, NVFlag, 0xffffffff, NVFlag},
+ {0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0x00000000, ZCFlag, 0x00000001, CFlag},
+ {0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
+ {0x7ffffffe, CFlag, 0x7fffffff, CFlag}},
+ {{0x80000000, NFlag, 0x80000001, NFlag},
+ {0x80000001, NFlag, 0x80000002, NFlag},
+ {0xfffffffe, NFlag, 0xffffffff, NFlag},
+ {0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0x00000000, ZCVFlag, 0x00000001, CVFlag},
+ {0x00000001, CVFlag, 0x00000002, CVFlag},
+ {0x7ffffffe, CVFlag, 0x7fffffff, CVFlag},
+ {0x7fffffff, CVFlag, 0x80000000, NCFlag}},
+ {{0x80000001, NFlag, 0x80000002, NFlag},
+ {0x80000002, NFlag, 0x80000003, NFlag},
+ {0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0x00000000, ZCFlag, 0x00000001, CFlag},
+ {0x00000001, CVFlag, 0x00000002, CVFlag},
+ {0x00000002, CVFlag, 0x00000003, CVFlag},
+ {0x7fffffff, CVFlag, 0x80000000, NCFlag},
+ {0x80000000, NCFlag, 0x80000001, NCFlag}},
+ {{0xfffffffe, NFlag, 0xffffffff, NFlag},
+ {0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0x7ffffffc, CFlag, 0x7ffffffd, CFlag},
+ {0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
+ {0x7ffffffe, CVFlag, 0x7fffffff, CVFlag},
+ {0x7fffffff, CVFlag, 0x80000000, NCFlag},
+ {0xfffffffc, NCFlag, 0xfffffffd, NCFlag},
+ {0xfffffffd, NCFlag, 0xfffffffe, NCFlag}},
+ {{0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0x00000000, ZCFlag, 0x00000001, CFlag},
+ {0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
+ {0x7ffffffe, CFlag, 0x7fffffff, CFlag},
+ {0x7fffffff, CVFlag, 0x80000000, NCFlag},
+ {0x80000000, NCFlag, 0x80000001, NCFlag},
+ {0xfffffffd, NCFlag, 0xfffffffe, NCFlag},
+ {0xfffffffe, NCFlag, 0xffffffff, NCFlag}}};
+
+ static const Expected expected_sbcs_w[input_count][input_count] = {
+ {{0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0xfffffffe, NFlag, 0xffffffff, NFlag},
+ {0x80000001, NFlag, 0x80000002, NFlag},
+ {0x80000000, NFlag, 0x80000001, NFlag},
+ {0x7fffffff, NoFlag, 0x80000000, NVFlag},
+ {0x7ffffffe, NoFlag, 0x7fffffff, NoFlag},
+ {0x00000001, NoFlag, 0x00000002, NoFlag},
+ {0x00000000, ZFlag, 0x00000001, NoFlag}},
+ {{0x00000000, ZCFlag, 0x00000001, CFlag},
+ {0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0x80000002, NFlag, 0x80000003, NFlag},
+ {0x80000001, NFlag, 0x80000002, NFlag},
+ {0x80000000, NVFlag, 0x80000001, NVFlag},
+ {0x7fffffff, NoFlag, 0x80000000, NVFlag},
+ {0x00000002, NoFlag, 0x00000003, NoFlag},
+ {0x00000001, NoFlag, 0x00000002, NoFlag}},
+ {{0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
+ {0x7ffffffc, CFlag, 0x7ffffffd, CFlag},
+ {0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0xfffffffe, NFlag, 0xffffffff, NFlag},
+ {0xfffffffd, NVFlag, 0xfffffffe, NVFlag},
+ {0xfffffffc, NVFlag, 0xfffffffd, NVFlag},
+ {0x7fffffff, NoFlag, 0x80000000, NVFlag},
+ {0x7ffffffe, NoFlag, 0x7fffffff, NoFlag}},
+ {{0x7ffffffe, CFlag, 0x7fffffff, CFlag},
+ {0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
+ {0x00000000, ZCFlag, 0x00000001, CFlag},
+ {0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0xfffffffe, NVFlag, 0xffffffff, NVFlag},
+ {0xfffffffd, NVFlag, 0xfffffffe, NVFlag},
+ {0x80000000, NVFlag, 0x80000001, NVFlag},
+ {0x7fffffff, NoFlag, 0x80000000, NVFlag}},
+ {{0x7fffffff, CVFlag, 0x80000000, NCFlag},
+ {0x7ffffffe, CVFlag, 0x7fffffff, CVFlag},
+ {0x00000001, CVFlag, 0x00000002, CVFlag},
+ {0x00000000, ZCVFlag, 0x00000001, CVFlag},
+ {0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0xfffffffe, NFlag, 0xffffffff, NFlag},
+ {0x80000001, NFlag, 0x80000002, NFlag},
+ {0x80000000, NFlag, 0x80000001, NFlag}},
+ {{0x80000000, NCFlag, 0x80000001, NCFlag},
+ {0x7fffffff, CVFlag, 0x80000000, NCFlag},
+ {0x00000002, CVFlag, 0x00000003, CVFlag},
+ {0x00000001, CVFlag, 0x00000002, CVFlag},
+ {0x00000000, ZCFlag, 0x00000001, CFlag},
+ {0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0x80000002, NFlag, 0x80000003, NFlag},
+ {0x80000001, NFlag, 0x80000002, NFlag}},
+ {{0xfffffffd, NCFlag, 0xfffffffe, NCFlag},
+ {0xfffffffc, NCFlag, 0xfffffffd, NCFlag},
+ {0x7fffffff, CVFlag, 0x80000000, NCFlag},
+ {0x7ffffffe, CVFlag, 0x7fffffff, CVFlag},
+ {0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
+ {0x7ffffffc, CFlag, 0x7ffffffd, CFlag},
+ {0xffffffff, NFlag, 0x00000000, ZCFlag},
+ {0xfffffffe, NFlag, 0xffffffff, NFlag}},
+ {{0xfffffffe, NCFlag, 0xffffffff, NCFlag},
+ {0xfffffffd, NCFlag, 0xfffffffe, NCFlag},
+ {0x80000000, NCFlag, 0x80000001, NCFlag},
+ {0x7fffffff, CVFlag, 0x80000000, NCFlag},
+ {0x7ffffffe, CFlag, 0x7fffffff, CFlag},
+ {0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
+ {0x00000000, ZCFlag, 0x00000001, CFlag},
+ {0xffffffff, NFlag, 0x00000000, ZCFlag}}};
+
+ for (size_t left = 0; left < input_count; left++) {
+ for (size_t right = 0; right < input_count; right++) {
+ const Expected& expected = expected_adcs_w[left][right];
+ AdcsSbcsHelper(&MacroAssembler::Adcs, inputs[left], inputs[right], 0,
+ expected.carry0_result, expected.carry0_flags);
+ AdcsSbcsHelper(&MacroAssembler::Adcs, inputs[left], inputs[right], 1,
+ expected.carry1_result, expected.carry1_flags);
+ }
+ }
+
+ for (size_t left = 0; left < input_count; left++) {
+ for (size_t right = 0; right < input_count; right++) {
+ const Expected& expected = expected_sbcs_w[left][right];
+ AdcsSbcsHelper(&MacroAssembler::Sbcs, inputs[left], inputs[right], 0,
+ expected.carry0_result, expected.carry0_flags);
+ AdcsSbcsHelper(&MacroAssembler::Sbcs, inputs[left], inputs[right], 1,
+ expected.carry1_result, expected.carry1_flags);
+ }
+ }
+}
+
+
TEST(adc_sbc_shift) {
INIT_V8();
SETUP();
@@ -3887,132 +4256,6 @@ TEST(adc_sbc_shift) {
CHECK_EQUAL_32(0x91111110 + 1, w26);
CHECK_EQUAL_32(0x9a222221 + 1, w27);
- // Check that adc correctly sets the condition flags.
- START();
- __ Mov(x0, 1);
- __ Mov(x1, 0xffffffffffffffffL);
- // Clear the C flag.
- __ Adds(x0, x0, Operand(0));
- __ Adcs(x10, x0, Operand(x1));
- END();
-
- RUN();
-
- CHECK_EQUAL_NZCV(ZCFlag);
- CHECK_EQUAL_64(0, x10);
-
- START();
- __ Mov(x0, 1);
- __ Mov(x1, 0x8000000000000000L);
- // Clear the C flag.
- __ Adds(x0, x0, Operand(0));
- __ Adcs(x10, x0, Operand(x1, ASR, 63));
- END();
-
- RUN();
-
- CHECK_EQUAL_NZCV(ZCFlag);
- CHECK_EQUAL_64(0, x10);
-
- START();
- __ Mov(x0, 0x10);
- __ Mov(x1, 0x07ffffffffffffffL);
- // Clear the C flag.
- __ Adds(x0, x0, Operand(0));
- __ Adcs(x10, x0, Operand(x1, LSL, 4));
- END();
-
- RUN();
-
- CHECK_EQUAL_NZCV(NVFlag);
- CHECK_EQUAL_64(0x8000000000000000L, x10);
-
- // Check that sbc correctly sets the condition flags.
- START();
- __ Mov(x0, 0);
- __ Mov(x1, 0xffffffffffffffffL);
- // Clear the C flag.
- __ Adds(x0, x0, Operand(0));
- __ Sbcs(x10, x0, Operand(x1));
- END();
-
- RUN();
-
- CHECK_EQUAL_NZCV(ZFlag);
- CHECK_EQUAL_64(0, x10);
-
- START();
- __ Mov(x0, 1);
- __ Mov(x1, 0xffffffffffffffffL);
- // Clear the C flag.
- __ Adds(x0, x0, Operand(0));
- __ Sbcs(x10, x0, Operand(x1, LSR, 1));
- END();
-
- RUN();
-
- CHECK_EQUAL_NZCV(NFlag);
- CHECK_EQUAL_64(0x8000000000000001L, x10);
-
- START();
- __ Mov(x0, 0);
- // Clear the C flag.
- __ Adds(x0, x0, Operand(0));
- __ Sbcs(x10, x0, Operand(0xffffffffffffffffL));
- END();
-
- RUN();
-
- CHECK_EQUAL_NZCV(ZFlag);
- CHECK_EQUAL_64(0, x10);
-
- START()
- __ Mov(w0, 0x7fffffff);
- // Clear the C flag.
- __ Adds(x0, x0, Operand(0));
- __ Ngcs(w10, w0);
- END();
-
- RUN();
-
- CHECK_EQUAL_NZCV(NFlag);
- CHECK_EQUAL_64(0x80000000, x10);
-
- START();
- // Clear the C flag.
- __ Adds(x0, x0, Operand(0));
- __ Ngcs(x10, 0x7fffffffffffffffL);
- END();
-
- RUN();
-
- CHECK_EQUAL_NZCV(NFlag);
- CHECK_EQUAL_64(0x8000000000000000L, x10);
-
- START()
- __ Mov(x0, 0);
- // Set the C flag.
- __ Cmp(x0, Operand(x0));
- __ Sbcs(x10, x0, Operand(1));
- END();
-
- RUN();
-
- CHECK_EQUAL_NZCV(NFlag);
- CHECK_EQUAL_64(0xffffffffffffffffL, x10);
-
- START()
- __ Mov(x0, 0);
- // Set the C flag.
- __ Cmp(x0, Operand(x0));
- __ Ngcs(x10, 0x7fffffffffffffffL);
- END();
-
- RUN();
-
- CHECK_EQUAL_NZCV(NFlag);
- CHECK_EQUAL_64(0x8000000000000001L, x10);
-
TEARDOWN();
}
diff --git a/deps/v8/test/cctest/test-assembler-mips.cc b/deps/v8/test/cctest/test-assembler-mips.cc
index b8a04267e7..e73b40e96b 100644
--- a/deps/v8/test/cctest/test-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-assembler-mips.cc
@@ -2048,7 +2048,7 @@ TEST(movz_movn) {
v8::internal::CodeObjectRequired::kYes);
typedef struct test_float {
- int64_t rt;
+ int32_t rt;
double a;
double b;
double bold;
@@ -3191,6 +3191,8 @@ TEST(jump_tables1) {
__ jr(ra);
__ nop();
+ CHECK_EQ(assm.UnboundLabelsCount(), 0);
+
CodeDesc desc;
assm.GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
diff --git a/deps/v8/test/cctest/test-assembler-mips64.cc b/deps/v8/test/cctest/test-assembler-mips64.cc
index dd6ed6b68c..9529dab242 100644
--- a/deps/v8/test/cctest/test-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-assembler-mips64.cc
@@ -1385,16 +1385,22 @@ TEST(MIPS16) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
int64_t r1;
int64_t r2;
int64_t r3;
int64_t r4;
int64_t r5;
int64_t r6;
+ int64_t r7;
+ int64_t r8;
+ int64_t r9;
+ int64_t r10;
+ int64_t r11;
+ int64_t r12;
uint32_t ui;
int32_t si;
- } T;
+ };
T t;
Assembler assm(isolate, NULL, 0);
@@ -1423,26 +1429,25 @@ TEST(MIPS16) {
// lh with positive data.
__ lh(a5, MemOperand(a0, offsetof(T, ui)));
- __ sw(a5, MemOperand(a0, offsetof(T, r2)));
+ __ sw(a5, MemOperand(a0, offsetof(T, r7)));
// lh with negative data.
__ lh(a6, MemOperand(a0, offsetof(T, si)));
- __ sw(a6, MemOperand(a0, offsetof(T, r3)));
+ __ sw(a6, MemOperand(a0, offsetof(T, r8)));
// lhu with negative data.
__ lhu(a7, MemOperand(a0, offsetof(T, si)));
- __ sw(a7, MemOperand(a0, offsetof(T, r4)));
+ __ sw(a7, MemOperand(a0, offsetof(T, r9)));
// lb with negative data.
__ lb(t0, MemOperand(a0, offsetof(T, si)));
- __ sw(t0, MemOperand(a0, offsetof(T, r5)));
+ __ sw(t0, MemOperand(a0, offsetof(T, r10)));
- // // sh writes only 1/2 of word.
- __ lui(t1, 0x3333);
- __ ori(t1, t1, 0x3333);
- __ sw(t1, MemOperand(a0, offsetof(T, r6)));
- __ lhu(t1, MemOperand(a0, offsetof(T, si)));
- __ sh(t1, MemOperand(a0, offsetof(T, r6)));
+ // sh writes only 1/2 of word.
+ __ lw(a4, MemOperand(a0, offsetof(T, ui)));
+ __ sh(a4, MemOperand(a0, offsetof(T, r11)));
+ __ lw(a4, MemOperand(a0, offsetof(T, si)));
+ __ sh(a4, MemOperand(a0, offsetof(T, r12)));
__ jr(ra);
__ nop();
@@ -1454,26 +1459,75 @@ TEST(MIPS16) {
F3 f = FUNCTION_CAST<F3>(code->entry());
t.ui = 0x44332211;
t.si = 0x99aabbcc;
- t.r1 = 0x1111111111111111;
- t.r2 = 0x2222222222222222;
- t.r3 = 0x3333333333333333;
- t.r4 = 0x4444444444444444;
+ t.r1 = 0x5555555555555555;
+ t.r2 = 0x5555555555555555;
+ t.r3 = 0x5555555555555555;
+ t.r4 = 0x5555555555555555;
t.r5 = 0x5555555555555555;
- t.r6 = 0x6666666666666666;
+ t.r6 = 0x5555555555555555;
+ t.r7 = 0x5555555555555555;
+ t.r8 = 0x5555555555555555;
+ t.r9 = 0x5555555555555555;
+ t.r10 = 0x5555555555555555;
+ t.r11 = 0x5555555555555555;
+ t.r12 = 0x5555555555555555;
+
Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
USE(dummy);
- // Unsigned data, 32 & 64.
- CHECK_EQ(static_cast<int64_t>(0x1111111144332211L), t.r1);
- CHECK_EQ(static_cast<int64_t>(0x0000000000002211L), t.r2);
+ if (kArchEndian == kLittle) {
+ // Unsigned data, 32 & 64
+ CHECK_EQ(static_cast<int64_t>(0x5555555544332211L), t.r1); // lw, sw.
+ CHECK_EQ(static_cast<int64_t>(0x0000000044332211L), t.r2); // sd.
+
+ // Signed data, 32 & 64.
+ CHECK_EQ(static_cast<int64_t>(0x5555555599aabbccL), t.r3); // lw, sw.
+ CHECK_EQ(static_cast<int64_t>(0xffffffff99aabbccL), t.r4); // sd.
+
+ // Signed data, 32 & 64.
+ CHECK_EQ(static_cast<int64_t>(0x5555555599aabbccL), t.r5); // lwu, sw.
+ CHECK_EQ(static_cast<int64_t>(0x0000000099aabbccL), t.r6); // sd.
+
+ // lh with unsigned and signed data.
+ CHECK_EQ(static_cast<int64_t>(0x5555555500002211L), t.r7); // lh, sw.
+ CHECK_EQ(static_cast<int64_t>(0x55555555ffffbbccL), t.r8); // lh, sw.
+
+ // lhu with signed data.
+ CHECK_EQ(static_cast<int64_t>(0x555555550000bbccL), t.r9); // lhu, sw.
- // Signed data, 32 & 64.
- CHECK_EQ(static_cast<int64_t>(0x33333333ffffbbccL), t.r3);
- CHECK_EQ(static_cast<int64_t>(0xffffffff0000bbccL), t.r4);
+ // lb with signed data.
+ CHECK_EQ(static_cast<int64_t>(0x55555555ffffffccL), t.r10); // lb, sw.
- // Signed data, 32 & 64.
- CHECK_EQ(static_cast<int64_t>(0x55555555ffffffccL), t.r5);
- CHECK_EQ(static_cast<int64_t>(0x000000003333bbccL), t.r6);
+ // sh with unsigned and signed data.
+ CHECK_EQ(static_cast<int64_t>(0x5555555555552211L), t.r11); // lw, sh.
+ CHECK_EQ(static_cast<int64_t>(0x555555555555bbccL), t.r12); // lw, sh.
+ } else {
+ // Unsigned data, 32 & 64
+ CHECK_EQ(static_cast<int64_t>(0x4433221155555555L), t.r1); // lw, sw.
+ CHECK_EQ(static_cast<int64_t>(0x0000000044332211L), t.r2); // sd.
+
+ // Signed data, 32 & 64.
+ CHECK_EQ(static_cast<int64_t>(0x99aabbcc55555555L), t.r3); // lw, sw.
+ CHECK_EQ(static_cast<int64_t>(0xffffffff99aabbccL), t.r4); // sd.
+
+ // Signed data, 32 & 64.
+ CHECK_EQ(static_cast<int64_t>(0x99aabbcc55555555L), t.r5); // lwu, sw.
+ CHECK_EQ(static_cast<int64_t>(0x0000000099aabbccL), t.r6); // sd.
+
+ // lh with unsigned and signed data.
+ CHECK_EQ(static_cast<int64_t>(0x0000443355555555L), t.r7); // lh, sw.
+ CHECK_EQ(static_cast<int64_t>(0xffff99aa55555555L), t.r8); // lh, sw.
+
+ // lhu with signed data.
+ CHECK_EQ(static_cast<int64_t>(0x000099aa55555555L), t.r9); // lhu, sw.
+
+ // lb with signed data.
+ CHECK_EQ(static_cast<int64_t>(0xffffff9955555555L), t.r10); // lb, sw.
+
+ // sh with unsigned and signed data.
+ CHECK_EQ(static_cast<int64_t>(0x2211555555555555L), t.r11); // lw, sh.
+ CHECK_EQ(static_cast<int64_t>(0xbbcc555555555555L), t.r12); // lw, sh.
+ }
}
@@ -2162,7 +2216,7 @@ TEST(movz_movn) {
__ ldc1(f2, MemOperand(a0, offsetof(TestFloat, a)) );
__ lwc1(f6, MemOperand(a0, offsetof(TestFloat, c)) );
- __ lw(t0, MemOperand(a0, offsetof(TestFloat, rt)) );
+ __ ld(t0, MemOperand(a0, offsetof(TestFloat, rt)));
__ Move(f12, 0.0);
__ Move(f10, 0.0);
__ Move(f16, 0.0);
@@ -3261,6 +3315,8 @@ TEST(jump_tables1) {
__ jr(ra);
__ nop();
+ CHECK_EQ(assm.UnboundLabelsCount(), 0);
+
CodeDesc desc;
assm.GetCode(&desc);
Handle<Code> code = isolate->factory()->NewCode(
@@ -5559,15 +5615,22 @@ TEST(r6_ldpc) {
uint64_t expected_res;
};
- struct TestCaseLdpc tc[] = {
- // offset, expected_res
- { -131072, 0x250ffffe250fffff },
- { -4, 0x250c0006250c0007 },
- { -1, 0x250c0000250c0001 },
- { 0, 0x03001025ef180000 },
- { 1, 0x2508000125080000 },
- { 4, 0x2508000725080006 },
- { 131071, 0x250bfffd250bfffc },
+ auto doubleword = [](uint32_t word2, uint32_t word1) {
+ if (kArchEndian == kLittle)
+ return (static_cast<uint64_t>(word2) << 32) + word1;
+ else
+ return (static_cast<uint64_t>(word1) << 32) + word2;
+ };
+
+ TestCaseLdpc tc[] = {
+ // offset, expected_res
+ {-131072, doubleword(0x250ffffe, 0x250fffff)},
+ {-4, doubleword(0x250c0006, 0x250c0007)},
+ {-1, doubleword(0x250c0000, 0x250c0001)},
+ {0, doubleword(0x03001025, 0xef180000)},
+ {1, doubleword(0x25080001, 0x25080000)},
+ {4, doubleword(0x25080007, 0x25080006)},
+ {131071, doubleword(0x250bfffd, 0x250bfffc)},
};
size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseLdpc);
diff --git a/deps/v8/test/cctest/test-assembler-s390.cc b/deps/v8/test/cctest/test-assembler-s390.cc
index dee8e07935..ab6796bf18 100644
--- a/deps/v8/test/cctest/test-assembler-s390.cc
+++ b/deps/v8/test/cctest/test-assembler-s390.cc
@@ -202,7 +202,7 @@ TEST(3) {
__ sll(r13, Operand(10));
v8::internal::byte* bufPos = assm.buffer_pos();
- ::printf("buffer position = %p", bufPos);
+ ::printf("buffer position = %p", static_cast<void*>(bufPos));
::fflush(stdout);
// OS::DebugBreak();
diff --git a/deps/v8/test/cctest/test-assembler-x64.cc b/deps/v8/test/cctest/test-assembler-x64.cc
index 66199fb540..36f1b30df9 100644
--- a/deps/v8/test/cctest/test-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-assembler-x64.cc
@@ -328,6 +328,32 @@ TEST(AssemblerX64TestlOperations) {
CHECK_EQ(1u, result);
}
+TEST(AssemblerX64TestwOperations) {
+ typedef uint16_t (*F)(uint16_t * x);
+ CcTest::InitializeVM();
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
+ CHECK(buffer);
+ Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+
+ // Set rax with the ZF flag of the testl instruction.
+ Label done;
+ __ movq(rax, Immediate(1));
+ __ testw(Operand(arg1, 0), Immediate(0xf0f0));
+ __ j(not_zero, &done, Label::kNear);
+ __ movq(rax, Immediate(0));
+ __ bind(&done);
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ // Call the function from C++.
+ uint16_t operand = 0x8000;
+ uint16_t result = FUNCTION_CAST<F>(buffer)(&operand);
+ CHECK_EQ(1u, result);
+}
TEST(AssemblerX64XorlOperations) {
CcTest::InitializeVM();
@@ -2270,4 +2296,60 @@ TEST(AssemblerX64JumpTables2) {
}
}
+TEST(AssemblerX64PslldWithXmm15) {
+ CcTest::InitializeVM();
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
+ CHECK(buffer);
+ Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
+
+ __ movq(xmm15, arg1);
+ __ pslld(xmm15, 1);
+ __ movq(rax, xmm15);
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ uint64_t result = FUNCTION_CAST<F5>(buffer)(V8_UINT64_C(0x1122334455667788));
+ CHECK_EQ(V8_UINT64_C(0x22446688aaccef10), result);
+}
+
+typedef float (*F9)(float x, float y);
+TEST(AssemblerX64vmovups) {
+ CcTest::InitializeVM();
+ if (!CpuFeatures::IsSupported(AVX)) return;
+
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
+ HandleScope scope(isolate);
+ v8::internal::byte buffer[256];
+ MacroAssembler assm(isolate, buffer, sizeof(buffer),
+ v8::internal::CodeObjectRequired::kYes);
+ {
+ CpuFeatureScope avx_scope(&assm, AVX);
+ __ shufps(xmm0, xmm0, 0x0); // brocast first argument
+ __ shufps(xmm1, xmm1, 0x0); // brocast second argument
+ // copy xmm1 to xmm0 through the stack to test the "vmovups reg, mem".
+ __ subq(rsp, Immediate(kSimd128Size));
+ __ vmovups(Operand(rsp, 0), xmm1);
+ __ vmovups(xmm0, Operand(rsp, 0));
+ __ addq(rsp, Immediate(kSimd128Size));
+
+ __ ret(0);
+ }
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+ OFStream os(stdout);
+ code->Print(os);
+#endif
+
+ F9 f = FUNCTION_CAST<F9>(code->entry());
+ CHECK_EQ(-1.5, f(1.5, -1.5));
+}
+
#undef __
diff --git a/deps/v8/test/cctest/test-assembler-x87.cc b/deps/v8/test/cctest/test-assembler-x87.cc
index a831a0b2f1..3649b5bf88 100644
--- a/deps/v8/test/cctest/test-assembler-x87.cc
+++ b/deps/v8/test/cctest/test-assembler-x87.cc
@@ -407,4 +407,45 @@ TEST(AssemblerIa32JumpTables2) {
}
}
+TEST(Regress621926) {
+ // Bug description:
+ // The opcodes for cmpw r/m16, r16 and cmpw r16, r/m16 were swapped.
+ // This was causing non-commutative comparisons to produce the wrong result.
+ CcTest::InitializeVM();
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
+ HandleScope scope(isolate);
+ Assembler assm(isolate, nullptr, 0);
+
+ uint16_t a = 42;
+
+ Label fail;
+ __ push(ebx);
+ __ mov(ebx, Immediate(reinterpret_cast<intptr_t>(&a)));
+ __ mov(eax, Immediate(41));
+ __ cmpw(eax, Operand(ebx, 0));
+ __ j(above_equal, &fail);
+ __ cmpw(Operand(ebx, 0), eax);
+ __ j(below_equal, &fail);
+ __ mov(eax, 1);
+ __ pop(ebx);
+ __ ret(0);
+ __ bind(&fail);
+ __ mov(eax, 0);
+ __ pop(ebx);
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+#ifdef OBJECT_PRINT
+ OFStream os(stdout);
+ code->Print(os);
+#endif
+
+ F0 f = FUNCTION_CAST<F0>(code->entry());
+ CHECK_EQ(f(), 1);
+}
+
#undef __
diff --git a/deps/v8/test/cctest/test-ast-expression-visitor.cc b/deps/v8/test/cctest/test-ast-expression-visitor.cc
deleted file mode 100644
index bda1fba3b4..0000000000
--- a/deps/v8/test/cctest/test-ast-expression-visitor.cc
+++ /dev/null
@@ -1,423 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <stdlib.h>
-
-#include "src/v8.h"
-
-#include "src/ast/ast.h"
-#include "src/ast/ast-expression-visitor.h"
-#include "src/ast/scopes.h"
-#include "src/parsing/parser.h"
-#include "src/parsing/rewriter.h"
-#include "test/cctest/cctest.h"
-#include "test/cctest/expression-type-collector.h"
-#include "test/cctest/expression-type-collector-macros.h"
-
-using namespace v8::internal;
-
-namespace {
-
-static void CollectTypes(HandleAndZoneScope* handles, const char* source,
- ZoneVector<ExpressionTypeEntry>* dst) {
- i::Isolate* isolate = CcTest::i_isolate();
- i::Factory* factory = isolate->factory();
-
- i::Handle<i::String> source_code =
- factory->NewStringFromUtf8(i::CStrVector(source)).ToHandleChecked();
-
- i::Handle<i::Script> script = factory->NewScript(source_code);
-
- i::ParseInfo info(handles->main_zone(), script);
- i::Parser parser(&info);
- parser.set_allow_harmony_sloppy(true);
- info.set_global();
- info.set_lazy(false);
- info.set_allow_lazy_parsing(false);
- info.set_toplevel(true);
-
- CHECK(i::Compiler::ParseAndAnalyze(&info));
-
- ExpressionTypeCollector(
- isolate,
- info.scope()->declarations()->at(0)->AsFunctionDeclaration()->fun(), dst)
- .Run();
-}
-
-} // namespace
-
-
-TEST(VisitExpressions) {
- v8::V8::Initialize();
- HandleAndZoneScope handles;
- ZoneVector<ExpressionTypeEntry> types(handles.main_zone());
- const char test_function[] =
- "function GeometricMean(stdlib, foreign, buffer) {\n"
- " \"use asm\";\n"
- "\n"
- " var exp = stdlib.Math.exp;\n"
- " var log = stdlib.Math.log;\n"
- " var values = new stdlib.Float64Array(buffer);\n"
- "\n"
- " function logSum(start, end) {\n"
- " start = start|0;\n"
- " end = end|0;\n"
- "\n"
- " var sum = 0.0, p = 0, q = 0;\n"
- "\n"
- " // asm.js forces byte addressing of the heap by requiring shifting "
- "by 3\n"
- " for (p = start << 3, q = end << 3; (p|0) < (q|0); p = (p + 8)|0) {\n"
- " sum = sum + +log(values[p>>3]);\n"
- " }\n"
- "\n"
- " return +sum;\n"
- " }\n"
- "\n"
- " function geometricMean(start, end) {\n"
- " start = start|0;\n"
- " end = end|0;\n"
- "\n"
- " return +exp(+logSum(start, end) / +((end - start)|0));\n"
- " }\n"
- "\n"
- " return { geometricMean: geometricMean };\n"
- "}\n";
-
- CollectTypes(&handles, test_function, &types);
- CHECK_TYPES_BEGIN {
- // function logSum
- CHECK_EXPR(FunctionLiteral, Bounds::Unbounded()) {
- CHECK_EXPR(FunctionLiteral, Bounds::Unbounded()) {
- CHECK_EXPR(Assignment, Bounds::Unbounded()) {
- CHECK_VAR(start, Bounds::Unbounded());
- CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
- CHECK_VAR(start, Bounds::Unbounded());
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- }
- CHECK_EXPR(Assignment, Bounds::Unbounded()) {
- CHECK_VAR(end, Bounds::Unbounded());
- CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
- CHECK_VAR(end, Bounds::Unbounded());
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- }
- CHECK_EXPR(Assignment, Bounds::Unbounded()) {
- CHECK_VAR(sum, Bounds::Unbounded());
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- CHECK_EXPR(Assignment, Bounds::Unbounded()) {
- CHECK_VAR(p, Bounds::Unbounded());
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- CHECK_EXPR(Assignment, Bounds::Unbounded()) {
- CHECK_VAR(q, Bounds::Unbounded());
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- // for (p = start << 3, q = end << 3;
- CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
- CHECK_EXPR(Assignment, Bounds::Unbounded()) {
- CHECK_VAR(p, Bounds::Unbounded());
- CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
- CHECK_VAR(start, Bounds::Unbounded());
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- }
- CHECK_EXPR(Assignment, Bounds::Unbounded()) {
- CHECK_VAR(q, Bounds::Unbounded());
- CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
- CHECK_VAR(end, Bounds::Unbounded());
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- }
- }
- // (p|0) < (q|0);
- CHECK_EXPR(CompareOperation, Bounds::Unbounded()) {
- CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
- CHECK_VAR(p, Bounds::Unbounded());
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
- CHECK_VAR(q, Bounds::Unbounded());
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- }
- // p = (p + 8)|0) {\n"
- CHECK_EXPR(Assignment, Bounds::Unbounded()) {
- CHECK_VAR(p, Bounds::Unbounded());
- CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
- CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
- CHECK_VAR(p, Bounds::Unbounded());
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- }
- // sum = sum + +log(values[p>>3]);
- CHECK_EXPR(Assignment, Bounds::Unbounded()) {
- CHECK_VAR(sum, Bounds::Unbounded());
- CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
- CHECK_VAR(sum, Bounds::Unbounded());
- CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
- CHECK_EXPR(Call, Bounds::Unbounded()) {
- CHECK_VAR(log, Bounds::Unbounded());
- CHECK_EXPR(Property, Bounds::Unbounded()) {
- CHECK_VAR(values, Bounds::Unbounded());
- CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
- CHECK_VAR(p, Bounds::Unbounded());
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- }
- }
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- }
- }
- // return +sum;
- CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
- CHECK_VAR(sum, Bounds::Unbounded());
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- }
- // function geometricMean
- CHECK_EXPR(FunctionLiteral, Bounds::Unbounded()) {
- CHECK_EXPR(Assignment, Bounds::Unbounded()) {
- CHECK_VAR(start, Bounds::Unbounded());
- CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
- CHECK_VAR(start, Bounds::Unbounded());
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- }
- CHECK_EXPR(Assignment, Bounds::Unbounded()) {
- CHECK_VAR(end, Bounds::Unbounded());
- CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
- CHECK_VAR(end, Bounds::Unbounded());
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- }
- // return +exp(+logSum(start, end) / +((end - start)|0));
- CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
- CHECK_EXPR(Call, Bounds::Unbounded()) {
- CHECK_VAR(exp, Bounds::Unbounded());
- CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
- CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
- CHECK_EXPR(Call, Bounds::Unbounded()) {
- CHECK_VAR(logSum, Bounds::Unbounded());
- CHECK_VAR(start, Bounds::Unbounded());
- CHECK_VAR(end, Bounds::Unbounded());
- }
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
- CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
- CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
- CHECK_VAR(end, Bounds::Unbounded());
- CHECK_VAR(start, Bounds::Unbounded());
- }
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- }
- }
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- }
- // "use asm";
- CHECK_EXPR(Literal, Bounds::Unbounded());
- // var exp = stdlib.Math.exp;
- CHECK_EXPR(Assignment, Bounds::Unbounded()) {
- CHECK_VAR(exp, Bounds::Unbounded());
- CHECK_EXPR(Property, Bounds::Unbounded()) {
- CHECK_EXPR(Property, Bounds::Unbounded()) {
- CHECK_VAR(stdlib, Bounds::Unbounded());
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- }
- // var log = stdlib.Math.log;
- CHECK_EXPR(Assignment, Bounds::Unbounded()) {
- CHECK_VAR(log, Bounds::Unbounded());
- CHECK_EXPR(Property, Bounds::Unbounded()) {
- CHECK_EXPR(Property, Bounds::Unbounded()) {
- CHECK_VAR(stdlib, Bounds::Unbounded());
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- }
- // var values = new stdlib.Float64Array(buffer);
- CHECK_EXPR(Assignment, Bounds::Unbounded()) {
- CHECK_VAR(values, Bounds::Unbounded());
- CHECK_EXPR(CallNew, Bounds::Unbounded()) {
- CHECK_EXPR(Property, Bounds::Unbounded()) {
- CHECK_VAR(stdlib, Bounds::Unbounded());
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- CHECK_VAR(buffer, Bounds::Unbounded());
- }
- }
- // return { geometricMean: geometricMean };
- CHECK_EXPR(ObjectLiteral, Bounds::Unbounded()) {
- CHECK_VAR(geometricMean, Bounds::Unbounded());
- }
- }
- }
- CHECK_TYPES_END
-}
-
-
-TEST(VisitConditional) {
- v8::V8::Initialize();
- HandleAndZoneScope handles;
- ZoneVector<ExpressionTypeEntry> types(handles.main_zone());
- // Check that traversing the ternary operator works.
- const char test_function[] =
- "function foo() {\n"
- " var a, b, c;\n"
- " var x = a ? b : c;\n"
- "}\n";
- CollectTypes(&handles, test_function, &types);
- CHECK_TYPES_BEGIN {
- CHECK_EXPR(FunctionLiteral, Bounds::Unbounded()) {
- CHECK_EXPR(Assignment, Bounds::Unbounded()) {
- CHECK_VAR(x, Bounds::Unbounded());
- CHECK_EXPR(Conditional, Bounds::Unbounded()) {
- CHECK_VAR(a, Bounds::Unbounded());
- CHECK_VAR(b, Bounds::Unbounded());
- CHECK_VAR(c, Bounds::Unbounded());
- }
- }
- }
- }
- CHECK_TYPES_END
-}
-
-
-TEST(VisitEmptyForStatment) {
- v8::V8::Initialize();
- HandleAndZoneScope handles;
- ZoneVector<ExpressionTypeEntry> types(handles.main_zone());
- // Check that traversing an empty for statement works.
- const char test_function[] =
- "function foo() {\n"
- " for (;;) {}\n"
- "}\n";
- CollectTypes(&handles, test_function, &types);
- CHECK_TYPES_BEGIN {
- CHECK_EXPR(FunctionLiteral, Bounds::Unbounded()) {}
- }
- CHECK_TYPES_END
-}
-
-
-TEST(VisitSwitchStatment) {
- v8::V8::Initialize();
- HandleAndZoneScope handles;
- ZoneVector<ExpressionTypeEntry> types(handles.main_zone());
- // Check that traversing a switch with a default works.
- const char test_function[] =
- "function foo() {\n"
- " switch (0) { case 1: break; default: break; }\n"
- "}\n";
- CollectTypes(&handles, test_function, &types);
- CHECK_TYPES_BEGIN {
- CHECK_EXPR(FunctionLiteral, Bounds::Unbounded()) {
- CHECK_EXPR(Assignment, Bounds::Unbounded()) {
- CHECK_VAR(.switch_tag, Bounds::Unbounded());
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- CHECK_EXPR(Literal, Bounds::Unbounded());
- CHECK_VAR(.switch_tag, Bounds::Unbounded());
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- }
- CHECK_TYPES_END
-}
-
-
-TEST(VisitThrow) {
- v8::V8::Initialize();
- HandleAndZoneScope handles;
- ZoneVector<ExpressionTypeEntry> types(handles.main_zone());
- const char test_function[] =
- "function foo() {\n"
- " throw 123;\n"
- "}\n";
- CollectTypes(&handles, test_function, &types);
- CHECK_TYPES_BEGIN {
- CHECK_EXPR(FunctionLiteral, Bounds::Unbounded()) {
- CHECK_EXPR(Throw, Bounds::Unbounded()) {
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- }
- }
- CHECK_TYPES_END
-}
-
-
-TEST(VisitYield) {
- v8::V8::Initialize();
- HandleAndZoneScope handles;
- ZoneVector<ExpressionTypeEntry> types(handles.main_zone());
- const char test_function[] =
- "function* foo() {\n"
- " yield 123;\n"
- "}\n";
- CollectTypes(&handles, test_function, &types);
- CHECK_TYPES_BEGIN {
- CHECK_EXPR(FunctionLiteral, Bounds::Unbounded()) {
- // Implicit initial yield
- CHECK_EXPR(Yield, Bounds::Unbounded()) {
- CHECK_VAR(.generator_object, Bounds::Unbounded());
- CHECK_EXPR(Assignment, Bounds::Unbounded()) {
- CHECK_VAR(.generator_object, Bounds::Unbounded());
- CHECK_EXPR(CallRuntime, Bounds::Unbounded());
- }
- }
- // Explicit yield (argument wrapped with CreateIterResultObject)
- CHECK_EXPR(Yield, Bounds::Unbounded()) {
- CHECK_VAR(.generator_object, Bounds::Unbounded());
- CHECK_EXPR(CallRuntime, Bounds::Unbounded()) {
- CHECK_EXPR(Literal, Bounds::Unbounded());
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- }
- // Argument to implicit final return
- CHECK_EXPR(CallRuntime, Bounds::Unbounded()) { // CreateIterResultObject
- CHECK_EXPR(Literal, Bounds::Unbounded());
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- // Implicit finally clause
- CHECK_EXPR(CallRuntime, Bounds::Unbounded()) {
- CHECK_VAR(.generator_object, Bounds::Unbounded());
- }
- }
- }
- CHECK_TYPES_END
-}
-
-
-TEST(VisitSkipping) {
- v8::V8::Initialize();
- HandleAndZoneScope handles;
- ZoneVector<ExpressionTypeEntry> types(handles.main_zone());
- const char test_function[] =
- "function foo(x) {\n"
- " return (x + x) + 1;\n"
- "}\n";
- CollectTypes(&handles, test_function, &types);
- CHECK_TYPES_BEGIN {
- CHECK_EXPR(FunctionLiteral, Bounds::Unbounded()) {
- CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
- // Skip x + x
- CHECK_SKIP();
- CHECK_EXPR(Literal, Bounds::Unbounded());
- }
- }
- }
- CHECK_TYPES_END
-}
diff --git a/deps/v8/test/cctest/test-ast.cc b/deps/v8/test/cctest/test-ast.cc
index 365652ef1d..c2cc89828e 100644
--- a/deps/v8/test/cctest/test-ast.cc
+++ b/deps/v8/test/cctest/test-ast.cc
@@ -42,7 +42,7 @@ TEST(List) {
Zone zone(&allocator);
AstValueFactory value_factory(&zone, 0);
AstNodeFactory factory(&value_factory);
- AstNode* node = factory.NewEmptyStatement(RelocInfo::kNoPosition);
+ AstNode* node = factory.NewEmptyStatement(kNoSourcePosition);
list->Add(node);
CHECK_EQ(1, list->length());
CHECK_EQ(node, list->at(0));
diff --git a/deps/v8/test/cctest/test-code-cache.cc b/deps/v8/test/cctest/test-code-cache.cc
new file mode 100644
index 0000000000..817fa15b34
--- /dev/null
+++ b/deps/v8/test/cctest/test-code-cache.cc
@@ -0,0 +1,77 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/list.h"
+#include "src/objects.h"
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+static Handle<Code> GetDummyCode(Isolate* isolate) {
+ CodeDesc desc = {nullptr, // buffer
+ 0, // buffer_size
+ 0, // instr_size
+ 0, // reloc_size
+ 0, // constant_pool_size
+ nullptr, // unwinding_info
+ 0, // unwinding_info_size
+ nullptr}; // origin
+ Code::Flags flags =
+ Code::ComputeFlags(Code::LOAD_IC, kNoExtraICState, kCacheOnReceiver);
+ Handle<Code> self_ref;
+ return isolate->factory()->NewCode(desc, flags, self_ref);
+}
+
+} // namespace
+
+TEST(CodeCache) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ HandleScope handle_scope(isolate);
+
+ Handle<Map> map =
+ factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize, FAST_ELEMENTS);
+
+ // This number should be large enough to cause the code cache to use its
+ // hash table storage format.
+ static const int kEntries = 150;
+
+ // Prepare name/code pairs.
+ List<Handle<Name>> names(kEntries);
+ List<Handle<Code>> codes(kEntries);
+ for (int i = 0; i < kEntries; i++) {
+ names.Add(isolate->factory()->NewSymbol());
+ codes.Add(GetDummyCode(isolate));
+ }
+ Handle<Name> bad_name = isolate->factory()->NewSymbol();
+ Code::Flags bad_flags =
+ Code::ComputeFlags(Code::LOAD_IC, kNoExtraICState, kCacheOnPrototype);
+ DCHECK(bad_flags != codes[0]->flags());
+
+ // Cache name/code pairs.
+ for (int i = 0; i < kEntries; i++) {
+ Handle<Name> name = names.at(i);
+ Handle<Code> code = codes.at(i);
+ Map::UpdateCodeCache(map, name, code);
+ CHECK_EQ(*code, map->LookupInCodeCache(*name, code->flags()));
+ CHECK_NULL(map->LookupInCodeCache(*name, bad_flags));
+ }
+ CHECK_NULL(map->LookupInCodeCache(*bad_name, bad_flags));
+
+ // Check that lookup works not only right after storing.
+ for (int i = 0; i < kEntries; i++) {
+ Handle<Name> name = names.at(i);
+ Handle<Code> code = codes.at(i);
+ CHECK_EQ(*code, map->LookupInCodeCache(*name, code->flags()));
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-code-layout.cc b/deps/v8/test/cctest/test-code-layout.cc
new file mode 100644
index 0000000000..a88c8783e6
--- /dev/null
+++ b/deps/v8/test/cctest/test-code-layout.cc
@@ -0,0 +1,84 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects.h"
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+
+TEST(CodeLayoutWithoutUnwindingInfo) {
+ CcTest::InitializeVM();
+ HandleScope handle_scope(CcTest::i_isolate());
+
+ // "Hello, World!" in ASCII.
+ byte buffer_array[13] = {0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x2c, 0x20,
+ 0x57, 0x6f, 0x72, 0x6c, 0x64, 0x21};
+
+ byte* buffer = &buffer_array[0];
+ int buffer_size = sizeof(buffer_array);
+
+ CodeDesc code_desc;
+ code_desc.buffer = buffer;
+ code_desc.buffer_size = buffer_size;
+ code_desc.constant_pool_size = 0;
+ code_desc.instr_size = buffer_size;
+ code_desc.reloc_size = 0;
+ code_desc.origin = nullptr;
+ code_desc.unwinding_info = nullptr;
+ code_desc.unwinding_info_size = 0;
+
+ Handle<Code> code = CcTest::i_isolate()->factory()->NewCode(
+ code_desc, 0, Handle<Object>::null());
+
+ CHECK(!code->has_unwinding_info());
+ CHECK_EQ(code->instruction_size(), buffer_size);
+ CHECK_EQ(memcmp(code->instruction_start(), buffer, buffer_size), 0);
+ CHECK_EQ(code->instruction_end() - reinterpret_cast<byte*>(*code),
+ Code::kHeaderSize + buffer_size - kHeapObjectTag);
+}
+
+TEST(CodeLayoutWithUnwindingInfo) {
+ CcTest::InitializeVM();
+ HandleScope handle_scope(CcTest::i_isolate());
+
+ // "Hello, World!" in ASCII.
+ byte buffer_array[13] = {0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x2c, 0x20,
+ 0x57, 0x6f, 0x72, 0x6c, 0x64, 0x21};
+
+ // "JavaScript" in ASCII.
+ byte unwinding_info_array[10] = {0x4a, 0x61, 0x76, 0x61, 0x53,
+ 0x63, 0x72, 0x69, 0x70, 0x74};
+
+ byte* buffer = &buffer_array[0];
+ int buffer_size = sizeof(buffer_array);
+ byte* unwinding_info = &unwinding_info_array[0];
+ int unwinding_info_size = sizeof(unwinding_info_array);
+
+ CodeDesc code_desc;
+ code_desc.buffer = buffer;
+ code_desc.buffer_size = buffer_size;
+ code_desc.constant_pool_size = 0;
+ code_desc.instr_size = buffer_size;
+ code_desc.reloc_size = 0;
+ code_desc.origin = nullptr;
+ code_desc.unwinding_info = unwinding_info;
+ code_desc.unwinding_info_size = unwinding_info_size;
+
+ Handle<Code> code = CcTest::i_isolate()->factory()->NewCode(
+ code_desc, 0, Handle<Object>::null());
+
+ CHECK(code->has_unwinding_info());
+ CHECK_EQ(code->instruction_size(), buffer_size);
+ CHECK_EQ(memcmp(code->instruction_start(), buffer, buffer_size), 0);
+ CHECK(IsAligned(code->GetUnwindingInfoSizeOffset(), 8));
+ CHECK_EQ(code->unwinding_info_size(), unwinding_info_size);
+ CHECK(
+ IsAligned(reinterpret_cast<uintptr_t>(code->unwinding_info_start()), 8));
+ CHECK_EQ(
+ memcmp(code->unwinding_info_start(), unwinding_info, unwinding_info_size),
+ 0);
+ CHECK_EQ(code->unwinding_info_end() - reinterpret_cast<byte*>(*code),
+ Code::kHeaderSize + RoundUp(buffer_size, kInt64Size) + kInt64Size +
+ unwinding_info_size - kHeapObjectTag);
+}
diff --git a/deps/v8/test/cctest/test-code-stub-assembler.cc b/deps/v8/test/cctest/test-code-stub-assembler.cc
new file mode 100644
index 0000000000..2d153e3822
--- /dev/null
+++ b/deps/v8/test/cctest/test-code-stub-assembler.cc
@@ -0,0 +1,1477 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/utils/random-number-generator.h"
+#include "src/ic/stub-cache.h"
+#include "src/isolate.h"
+#include "test/cctest/compiler/code-assembler-tester.h"
+#include "test/cctest/compiler/function-tester.h"
+
+namespace v8 {
+namespace internal {
+
+using compiler::FunctionTester;
+using compiler::Node;
+
+typedef compiler::CodeAssemblerTesterImpl<CodeStubAssembler>
+ CodeStubAssemblerTester;
+
+TEST(FixedArrayAccessSmiIndex) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ VoidDescriptor descriptor(isolate);
+ CodeStubAssemblerTester m(isolate, descriptor);
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(5);
+ array->set(4, Smi::FromInt(733));
+ m.Return(m.LoadFixedArrayElement(m.HeapConstant(array),
+ m.SmiTag(m.Int32Constant(4)), 0,
+ CodeStubAssembler::SMI_PARAMETERS));
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(descriptor, code);
+ MaybeHandle<Object> result = ft.Call();
+ CHECK_EQ(733, Handle<Smi>::cast(result.ToHandleChecked())->value());
+}
+
+TEST(LoadHeapNumberValue) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ VoidDescriptor descriptor(isolate);
+ CodeStubAssemblerTester m(isolate, descriptor);
+ Handle<HeapNumber> number = isolate->factory()->NewHeapNumber(1234);
+ m.Return(m.SmiTag(
+ m.ChangeFloat64ToUint32(m.LoadHeapNumberValue(m.HeapConstant(number)))));
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(descriptor, code);
+ MaybeHandle<Object> result = ft.Call();
+ CHECK_EQ(1234, Handle<Smi>::cast(result.ToHandleChecked())->value());
+}
+
+TEST(LoadInstanceType) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ VoidDescriptor descriptor(isolate);
+ CodeStubAssemblerTester m(isolate, descriptor);
+ Handle<HeapObject> undefined = isolate->factory()->undefined_value();
+ m.Return(m.SmiTag(m.LoadInstanceType(m.HeapConstant(undefined))));
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(descriptor, code);
+ MaybeHandle<Object> result = ft.Call();
+ CHECK_EQ(InstanceType::ODDBALL_TYPE,
+ Handle<Smi>::cast(result.ToHandleChecked())->value());
+}
+
+TEST(BitFieldDecode) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ VoidDescriptor descriptor(isolate);
+ CodeStubAssemblerTester m(isolate, descriptor);
+
+ class TestBitField : public BitField<unsigned, 3, 3> {};
+ m.Return(m.SmiTag(m.BitFieldDecode<TestBitField>(m.Int32Constant(0x2f))));
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(descriptor, code);
+ MaybeHandle<Object> result = ft.Call();
+ // value = 00101111
+ // mask = 00111000
+ // result = 101
+ CHECK_EQ(5, Handle<Smi>::cast(result.ToHandleChecked())->value());
+}
+
+TEST(JSFunction) {
+ const int kNumParams = 3; // Receiver, left, right.
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ CodeStubAssemblerTester m(isolate, kNumParams);
+ m.Return(m.SmiFromWord32(m.Int32Add(m.SmiToWord32(m.Parameter(1)),
+ m.SmiToWord32(m.Parameter(2)))));
+
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(code, kNumParams);
+
+ MaybeHandle<Object> result = ft.Call(isolate->factory()->undefined_value(),
+ handle(Smi::FromInt(23), isolate),
+ handle(Smi::FromInt(34), isolate));
+ CHECK_EQ(57, Handle<Smi>::cast(result.ToHandleChecked())->value());
+}
+
+TEST(ComputeIntegerHash) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ const int kNumParams = 2;
+ CodeStubAssemblerTester m(isolate, kNumParams);
+ m.Return(m.SmiFromWord32(m.ComputeIntegerHash(
+ m.SmiToWord32(m.Parameter(0)), m.SmiToWord32(m.Parameter(1)))));
+
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(code, kNumParams);
+
+ Handle<Smi> hash_seed = isolate->factory()->hash_seed();
+
+ base::RandomNumberGenerator rand_gen(FLAG_random_seed);
+
+ for (int i = 0; i < 1024; i++) {
+ int k = rand_gen.NextInt(Smi::kMaxValue);
+
+ Handle<Smi> key(Smi::FromInt(k), isolate);
+ Handle<Object> result = ft.Call(key, hash_seed).ToHandleChecked();
+
+ uint32_t hash = ComputeIntegerHash(k, hash_seed->value());
+ Smi* expected = Smi::FromInt(hash & Smi::kMaxValue);
+ CHECK_EQ(expected, Smi::cast(*result));
+ }
+}
+
+TEST(TryToName) {
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ const int kNumParams = 3;
+ CodeStubAssemblerTester m(isolate, kNumParams);
+
+ enum Result { kKeyIsIndex, kKeyIsUnique, kBailout };
+ {
+ Node* key = m.Parameter(0);
+ Node* expected_result = m.Parameter(1);
+ Node* expected_arg = m.Parameter(2);
+
+ Label passed(&m), failed(&m);
+ Label if_keyisindex(&m), if_keyisunique(&m), if_bailout(&m);
+ Variable var_index(&m, MachineRepresentation::kWord32);
+
+ m.TryToName(key, &if_keyisindex, &var_index, &if_keyisunique, &if_bailout);
+
+ m.Bind(&if_keyisindex);
+ m.GotoUnless(
+ m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kKeyIsIndex))),
+ &failed);
+ m.Branch(m.Word32Equal(m.SmiToWord32(expected_arg), var_index.value()),
+ &passed, &failed);
+
+ m.Bind(&if_keyisunique);
+ m.GotoUnless(
+ m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kKeyIsUnique))),
+ &failed);
+ m.Branch(m.WordEqual(expected_arg, key), &passed, &failed);
+
+ m.Bind(&if_bailout);
+ m.Branch(
+ m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kBailout))),
+ &passed, &failed);
+
+ m.Bind(&passed);
+ m.Return(m.BooleanConstant(true));
+
+ m.Bind(&failed);
+ m.Return(m.BooleanConstant(false));
+ }
+
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(code, kNumParams);
+
+ Handle<Object> expect_index(Smi::FromInt(kKeyIsIndex), isolate);
+ Handle<Object> expect_unique(Smi::FromInt(kKeyIsUnique), isolate);
+ Handle<Object> expect_bailout(Smi::FromInt(kBailout), isolate);
+
+ {
+ // TryToName(<zero smi>) => if_keyisindex: smi value.
+ Handle<Object> key(Smi::FromInt(0), isolate);
+ ft.CheckTrue(key, expect_index, key);
+ }
+
+ {
+ // TryToName(<positive smi>) => if_keyisindex: smi value.
+ Handle<Object> key(Smi::FromInt(153), isolate);
+ ft.CheckTrue(key, expect_index, key);
+ }
+
+ {
+ // TryToName(<negative smi>) => bailout.
+ Handle<Object> key(Smi::FromInt(-1), isolate);
+ ft.CheckTrue(key, expect_bailout);
+ }
+
+ {
+ // TryToName(<symbol>) => if_keyisunique: <symbol>.
+ Handle<Object> key = isolate->factory()->NewSymbol();
+ ft.CheckTrue(key, expect_unique, key);
+ }
+
+ {
+ // TryToName(<internalized string>) => if_keyisunique: <internalized string>
+ Handle<Object> key = isolate->factory()->InternalizeUtf8String("test");
+ ft.CheckTrue(key, expect_unique, key);
+ }
+
+ {
+ // TryToName(<internalized number string>) => if_keyisindex: number.
+ Handle<Object> key = isolate->factory()->InternalizeUtf8String("153");
+ Handle<Object> index(Smi::FromInt(153), isolate);
+ ft.CheckTrue(key, expect_index, index);
+ }
+
+ {
+ // TryToName(<non-internalized string>) => bailout.
+ Handle<Object> key = isolate->factory()->NewStringFromAsciiChecked("test");
+ ft.CheckTrue(key, expect_bailout);
+ }
+}
+
+namespace {
+
+template <typename Dictionary>
+void TestNameDictionaryLookup() {
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ const int kNumParams = 4;
+ CodeStubAssemblerTester m(isolate, kNumParams);
+
+ enum Result { kFound, kNotFound };
+ {
+ Node* dictionary = m.Parameter(0);
+ Node* unique_name = m.Parameter(1);
+ Node* expected_result = m.Parameter(2);
+ Node* expected_arg = m.Parameter(3);
+
+ Label passed(&m), failed(&m);
+ Label if_found(&m), if_not_found(&m);
+ Variable var_name_index(&m, MachineRepresentation::kWord32);
+
+ m.NameDictionaryLookup<Dictionary>(dictionary, unique_name, &if_found,
+ &var_name_index, &if_not_found);
+ m.Bind(&if_found);
+ m.GotoUnless(
+ m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kFound))),
+ &failed);
+ m.Branch(m.Word32Equal(m.SmiToWord32(expected_arg), var_name_index.value()),
+ &passed, &failed);
+
+ m.Bind(&if_not_found);
+ m.Branch(
+ m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kNotFound))),
+ &passed, &failed);
+
+ m.Bind(&passed);
+ m.Return(m.BooleanConstant(true));
+
+ m.Bind(&failed);
+ m.Return(m.BooleanConstant(false));
+ }
+
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(code, kNumParams);
+
+ Handle<Object> expect_found(Smi::FromInt(kFound), isolate);
+ Handle<Object> expect_not_found(Smi::FromInt(kNotFound), isolate);
+
+ Handle<Dictionary> dictionary = Dictionary::New(isolate, 40);
+ PropertyDetails fake_details = PropertyDetails::Empty();
+
+ Factory* factory = isolate->factory();
+ Handle<Name> keys[] = {
+ factory->InternalizeUtf8String("0"),
+ factory->InternalizeUtf8String("42"),
+ factory->InternalizeUtf8String("-153"),
+ factory->InternalizeUtf8String("0.0"),
+ factory->InternalizeUtf8String("4.2"),
+ factory->InternalizeUtf8String(""),
+ factory->InternalizeUtf8String("name"),
+ factory->NewSymbol(),
+ factory->NewPrivateSymbol(),
+ };
+
+ for (size_t i = 0; i < arraysize(keys); i++) {
+ Handle<Object> value = factory->NewPropertyCell();
+ dictionary = Dictionary::Add(dictionary, keys[i], value, fake_details);
+ }
+
+ for (size_t i = 0; i < arraysize(keys); i++) {
+ int entry = dictionary->FindEntry(keys[i]);
+ int name_index =
+ Dictionary::EntryToIndex(entry) + Dictionary::kEntryKeyIndex;
+ CHECK_NE(Dictionary::kNotFound, entry);
+
+ Handle<Object> expected_name_index(Smi::FromInt(name_index), isolate);
+ ft.CheckTrue(dictionary, keys[i], expect_found, expected_name_index);
+ }
+
+ Handle<Name> non_existing_keys[] = {
+ factory->InternalizeUtf8String("1"),
+ factory->InternalizeUtf8String("-42"),
+ factory->InternalizeUtf8String("153"),
+ factory->InternalizeUtf8String("-1.0"),
+ factory->InternalizeUtf8String("1.3"),
+ factory->InternalizeUtf8String("a"),
+ factory->InternalizeUtf8String("boom"),
+ factory->NewSymbol(),
+ factory->NewPrivateSymbol(),
+ };
+
+ for (size_t i = 0; i < arraysize(non_existing_keys); i++) {
+ int entry = dictionary->FindEntry(non_existing_keys[i]);
+ CHECK_EQ(Dictionary::kNotFound, entry);
+
+ ft.CheckTrue(dictionary, non_existing_keys[i], expect_not_found);
+ }
+}
+
+} // namespace
+
+TEST(NameDictionaryLookup) { TestNameDictionaryLookup<NameDictionary>(); }
+
+TEST(GlobalDictionaryLookup) { TestNameDictionaryLookup<GlobalDictionary>(); }
+
+namespace {
+
+template <typename Dictionary>
+void TestNumberDictionaryLookup() {
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ const int kNumParams = 4;
+ CodeStubAssemblerTester m(isolate, kNumParams);
+
+ enum Result { kFound, kNotFound };
+ {
+ Node* dictionary = m.Parameter(0);
+ Node* key = m.SmiToWord32(m.Parameter(1));
+ Node* expected_result = m.Parameter(2);
+ Node* expected_arg = m.Parameter(3);
+
+ Label passed(&m), failed(&m);
+ Label if_found(&m), if_not_found(&m);
+ Variable var_entry(&m, MachineRepresentation::kWord32);
+
+ m.NumberDictionaryLookup<Dictionary>(dictionary, key, &if_found, &var_entry,
+ &if_not_found);
+ m.Bind(&if_found);
+ m.GotoUnless(
+ m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kFound))),
+ &failed);
+ m.Branch(m.Word32Equal(m.SmiToWord32(expected_arg), var_entry.value()),
+ &passed, &failed);
+
+ m.Bind(&if_not_found);
+ m.Branch(
+ m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kNotFound))),
+ &passed, &failed);
+
+ m.Bind(&passed);
+ m.Return(m.BooleanConstant(true));
+
+ m.Bind(&failed);
+ m.Return(m.BooleanConstant(false));
+ }
+
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(code, kNumParams);
+
+ Handle<Object> expect_found(Smi::FromInt(kFound), isolate);
+ Handle<Object> expect_not_found(Smi::FromInt(kNotFound), isolate);
+
+ const int kKeysCount = 1000;
+ Handle<Dictionary> dictionary = Dictionary::New(isolate, kKeysCount);
+ uint32_t keys[kKeysCount];
+
+ Handle<Object> fake_value(Smi::FromInt(42), isolate);
+ PropertyDetails fake_details = PropertyDetails::Empty();
+
+ base::RandomNumberGenerator rand_gen(FLAG_random_seed);
+
+ for (int i = 0; i < kKeysCount; i++) {
+ int random_key = rand_gen.NextInt(Smi::kMaxValue);
+ keys[i] = static_cast<uint32_t>(random_key);
+ if (dictionary->FindEntry(keys[i]) != Dictionary::kNotFound) continue;
+
+ dictionary = Dictionary::Add(dictionary, keys[i], fake_value, fake_details);
+ }
+
+ // Now try querying existing keys.
+ for (int i = 0; i < kKeysCount; i++) {
+ int entry = dictionary->FindEntry(keys[i]);
+ CHECK_NE(Dictionary::kNotFound, entry);
+
+ Handle<Object> key(Smi::FromInt(keys[i]), isolate);
+ Handle<Object> expected_entry(Smi::FromInt(entry), isolate);
+ ft.CheckTrue(dictionary, key, expect_found, expected_entry);
+ }
+
+ // Now try querying random keys which do not exist in the dictionary.
+ for (int i = 0; i < kKeysCount;) {
+ int random_key = rand_gen.NextInt(Smi::kMaxValue);
+ int entry = dictionary->FindEntry(random_key);
+ if (entry != Dictionary::kNotFound) continue;
+ i++;
+
+ Handle<Object> key(Smi::FromInt(random_key), isolate);
+ ft.CheckTrue(dictionary, key, expect_not_found);
+ }
+}
+
+} // namespace
+
+TEST(SeededNumberDictionaryLookup) {
+ TestNumberDictionaryLookup<SeededNumberDictionary>();
+}
+
+TEST(UnseededNumberDictionaryLookup) {
+ TestNumberDictionaryLookup<UnseededNumberDictionary>();
+}
+
+namespace {
+
+void AddProperties(Handle<JSObject> object, Handle<Name> names[],
+ size_t count) {
+ Isolate* isolate = object->GetIsolate();
+ for (size_t i = 0; i < count; i++) {
+ Handle<Object> value(Smi::FromInt(static_cast<int>(42 + i)), isolate);
+ JSObject::AddProperty(object, names[i], value, NONE);
+ }
+}
+
+Handle<AccessorPair> CreateAccessorPair(FunctionTester* ft,
+ const char* getter_body,
+ const char* setter_body) {
+ Handle<AccessorPair> pair = ft->isolate->factory()->NewAccessorPair();
+ if (getter_body) {
+ pair->set_getter(*ft->NewFunction(getter_body));
+ }
+ if (setter_body) {
+ pair->set_setter(*ft->NewFunction(setter_body));
+ }
+ return pair;
+}
+
+void AddProperties(Handle<JSObject> object, Handle<Name> names[],
+ size_t names_count, Handle<Object> values[],
+ size_t values_count, int seed = 0) {
+ Isolate* isolate = object->GetIsolate();
+ for (size_t i = 0; i < names_count; i++) {
+ Handle<Object> value = values[(seed + i) % values_count];
+ if (value->IsAccessorPair()) {
+ Handle<AccessorPair> pair = Handle<AccessorPair>::cast(value);
+ Handle<Object> getter(pair->getter(), isolate);
+ Handle<Object> setter(pair->setter(), isolate);
+ JSObject::DefineAccessor(object, names[i], getter, setter, NONE).Check();
+ } else {
+ JSObject::AddProperty(object, names[i], value, NONE);
+ }
+ }
+}
+
+} // namespace
+
+TEST(TryHasOwnProperty) {
+ typedef CodeStubAssembler::Label Label;
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ const int kNumParams = 4;
+ CodeStubAssemblerTester m(isolate, kNumParams);
+
+ enum Result { kFound, kNotFound, kBailout };
+ {
+ Node* object = m.Parameter(0);
+ Node* unique_name = m.Parameter(1);
+ Node* expected_result = m.Parameter(2);
+
+ Label passed(&m), failed(&m);
+ Label if_found(&m), if_not_found(&m), if_bailout(&m);
+
+ Node* map = m.LoadMap(object);
+ Node* instance_type = m.LoadMapInstanceType(map);
+
+ m.TryHasOwnProperty(object, map, instance_type, unique_name, &if_found,
+ &if_not_found, &if_bailout);
+
+ m.Bind(&if_found);
+ m.Branch(m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kFound))),
+ &passed, &failed);
+
+ m.Bind(&if_not_found);
+ m.Branch(
+ m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kNotFound))),
+ &passed, &failed);
+
+ m.Bind(&if_bailout);
+ m.Branch(
+ m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kBailout))),
+ &passed, &failed);
+
+ m.Bind(&passed);
+ m.Return(m.BooleanConstant(true));
+
+ m.Bind(&failed);
+ m.Return(m.BooleanConstant(false));
+ }
+
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(code, kNumParams);
+
+ Handle<Object> expect_found(Smi::FromInt(kFound), isolate);
+ Handle<Object> expect_not_found(Smi::FromInt(kNotFound), isolate);
+ Handle<Object> expect_bailout(Smi::FromInt(kBailout), isolate);
+
+ Factory* factory = isolate->factory();
+
+ Handle<Name> deleted_property_name =
+ factory->InternalizeUtf8String("deleted");
+
+ Handle<Name> names[] = {
+ factory->InternalizeUtf8String("a"),
+ factory->InternalizeUtf8String("bb"),
+ factory->InternalizeUtf8String("ccc"),
+ factory->InternalizeUtf8String("dddd"),
+ factory->InternalizeUtf8String("eeeee"),
+ factory->InternalizeUtf8String(""),
+ factory->InternalizeUtf8String("name"),
+ factory->NewSymbol(),
+ factory->NewPrivateSymbol(),
+ };
+
+ std::vector<Handle<JSObject>> objects;
+
+ {
+ // Fast object, no inobject properties.
+ int inobject_properties = 0;
+ Handle<Map> map = Map::Create(isolate, inobject_properties);
+ Handle<JSObject> object = factory->NewJSObjectFromMap(map);
+ AddProperties(object, names, arraysize(names));
+ CHECK_EQ(JS_OBJECT_TYPE, object->map()->instance_type());
+ CHECK_EQ(inobject_properties, object->map()->GetInObjectProperties());
+ CHECK(!object->map()->is_dictionary_map());
+ objects.push_back(object);
+ }
+
+ {
+ // Fast object, all inobject properties.
+ int inobject_properties = arraysize(names) * 2;
+ Handle<Map> map = Map::Create(isolate, inobject_properties);
+ Handle<JSObject> object = factory->NewJSObjectFromMap(map);
+ AddProperties(object, names, arraysize(names));
+ CHECK_EQ(JS_OBJECT_TYPE, object->map()->instance_type());
+ CHECK_EQ(inobject_properties, object->map()->GetInObjectProperties());
+ CHECK(!object->map()->is_dictionary_map());
+ objects.push_back(object);
+ }
+
+ {
+ // Fast object, half inobject properties.
+ int inobject_properties = arraysize(names) / 2;
+ Handle<Map> map = Map::Create(isolate, inobject_properties);
+ Handle<JSObject> object = factory->NewJSObjectFromMap(map);
+ AddProperties(object, names, arraysize(names));
+ CHECK_EQ(JS_OBJECT_TYPE, object->map()->instance_type());
+ CHECK_EQ(inobject_properties, object->map()->GetInObjectProperties());
+ CHECK(!object->map()->is_dictionary_map());
+ objects.push_back(object);
+ }
+
+ {
+ // Dictionary mode object.
+ Handle<JSFunction> function = factory->NewFunction(factory->empty_string());
+ Handle<JSObject> object = factory->NewJSObject(function);
+ AddProperties(object, names, arraysize(names));
+ JSObject::NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0, "test");
+
+ JSObject::AddProperty(object, deleted_property_name, object, NONE);
+ CHECK(JSObject::DeleteProperty(object, deleted_property_name, SLOPPY)
+ .FromJust());
+
+ CHECK_EQ(JS_OBJECT_TYPE, object->map()->instance_type());
+ CHECK(object->map()->is_dictionary_map());
+ objects.push_back(object);
+ }
+
+ {
+ // Global object.
+ Handle<JSFunction> function = factory->NewFunction(factory->empty_string());
+ JSFunction::EnsureHasInitialMap(function);
+ function->initial_map()->set_instance_type(JS_GLOBAL_OBJECT_TYPE);
+ function->initial_map()->set_is_prototype_map(true);
+ function->initial_map()->set_dictionary_map(true);
+ Handle<JSObject> object = factory->NewJSGlobalObject(function);
+ AddProperties(object, names, arraysize(names));
+
+ JSObject::AddProperty(object, deleted_property_name, object, NONE);
+ CHECK(JSObject::DeleteProperty(object, deleted_property_name, SLOPPY)
+ .FromJust());
+
+ CHECK_EQ(JS_GLOBAL_OBJECT_TYPE, object->map()->instance_type());
+ CHECK(object->map()->is_dictionary_map());
+ objects.push_back(object);
+ }
+
+ {
+ for (Handle<JSObject> object : objects) {
+ for (size_t name_index = 0; name_index < arraysize(names); name_index++) {
+ Handle<Name> name = names[name_index];
+ CHECK(JSReceiver::HasProperty(object, name).FromJust());
+ ft.CheckTrue(object, name, expect_found);
+ }
+ }
+ }
+
+ {
+ Handle<Name> non_existing_names[] = {
+ factory->NewSymbol(),
+ factory->InternalizeUtf8String("ne_a"),
+ factory->InternalizeUtf8String("ne_bb"),
+ factory->NewPrivateSymbol(),
+ factory->InternalizeUtf8String("ne_ccc"),
+ factory->InternalizeUtf8String("ne_dddd"),
+ deleted_property_name,
+ };
+ for (Handle<JSObject> object : objects) {
+ for (size_t key_index = 0; key_index < arraysize(non_existing_names);
+ key_index++) {
+ Handle<Name> name = non_existing_names[key_index];
+ CHECK(!JSReceiver::HasProperty(object, name).FromJust());
+ ft.CheckTrue(object, name, expect_not_found);
+ }
+ }
+ }
+
+ {
+ Handle<JSFunction> function = factory->NewFunction(factory->empty_string());
+ Handle<JSProxy> object = factory->NewJSProxy(function, objects[0]);
+ CHECK_EQ(JS_PROXY_TYPE, object->map()->instance_type());
+ ft.CheckTrue(object, names[0], expect_bailout);
+ }
+
+ {
+ Handle<JSObject> object = isolate->global_proxy();
+ CHECK_EQ(JS_GLOBAL_PROXY_TYPE, object->map()->instance_type());
+ ft.CheckTrue(object, names[0], expect_bailout);
+ }
+}
+
+TEST(TryGetOwnProperty) {
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ Factory* factory = isolate->factory();
+
+ const int kNumParams = 2;
+ CodeStubAssemblerTester m(isolate, kNumParams);
+
+ Handle<Symbol> not_found_symbol = factory->NewSymbol();
+ Handle<Symbol> bailout_symbol = factory->NewSymbol();
+ {
+ Node* object = m.Parameter(0);
+ Node* unique_name = m.Parameter(1);
+ Node* context = m.Parameter(kNumParams + 2);
+
+ Variable var_value(&m, MachineRepresentation::kTagged);
+ Label if_found(&m), if_not_found(&m), if_bailout(&m);
+
+ Node* map = m.LoadMap(object);
+ Node* instance_type = m.LoadMapInstanceType(map);
+
+ m.TryGetOwnProperty(context, object, object, map, instance_type,
+ unique_name, &if_found, &var_value, &if_not_found,
+ &if_bailout);
+
+ m.Bind(&if_found);
+ m.Return(var_value.value());
+
+ m.Bind(&if_not_found);
+ m.Return(m.HeapConstant(not_found_symbol));
+
+ m.Bind(&if_bailout);
+ m.Return(m.HeapConstant(bailout_symbol));
+ }
+
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(code, kNumParams);
+
+ Handle<Name> deleted_property_name =
+ factory->InternalizeUtf8String("deleted");
+
+ Handle<Name> names[] = {
+ factory->InternalizeUtf8String("bb"),
+ factory->NewSymbol(),
+ factory->InternalizeUtf8String("a"),
+ factory->InternalizeUtf8String("ccc"),
+ factory->InternalizeUtf8String("esajefe"),
+ factory->NewPrivateSymbol(),
+ factory->InternalizeUtf8String("eeeee"),
+ factory->InternalizeUtf8String("p1"),
+ factory->InternalizeUtf8String("acshw23e"),
+ factory->InternalizeUtf8String(""),
+ factory->InternalizeUtf8String("dddd"),
+ factory->NewPrivateSymbol(),
+ factory->InternalizeUtf8String("name"),
+ factory->InternalizeUtf8String("p2"),
+ factory->InternalizeUtf8String("p3"),
+ factory->InternalizeUtf8String("p4"),
+ factory->NewPrivateSymbol(),
+ };
+ Handle<Object> values[] = {
+ factory->NewFunction(factory->empty_string()),
+ factory->NewSymbol(),
+ factory->InternalizeUtf8String("a"),
+ CreateAccessorPair(&ft, "() => 188;", "() => 199;"),
+ factory->NewFunction(factory->InternalizeUtf8String("bb")),
+ factory->InternalizeUtf8String("ccc"),
+ CreateAccessorPair(&ft, "() => 88;", nullptr),
+ handle(Smi::FromInt(1), isolate),
+ factory->InternalizeUtf8String(""),
+ CreateAccessorPair(&ft, nullptr, "() => 99;"),
+ factory->NewHeapNumber(4.2),
+ handle(Smi::FromInt(153), isolate),
+ factory->NewJSObject(factory->NewFunction(factory->empty_string())),
+ factory->NewPrivateSymbol(),
+ };
+ STATIC_ASSERT(arraysize(values) < arraysize(names));
+
+ base::RandomNumberGenerator rand_gen(FLAG_random_seed);
+
+ std::vector<Handle<JSObject>> objects;
+
+ {
+ // Fast object, no inobject properties.
+ int inobject_properties = 0;
+ Handle<Map> map = Map::Create(isolate, inobject_properties);
+ Handle<JSObject> object = factory->NewJSObjectFromMap(map);
+ AddProperties(object, names, arraysize(names), values, arraysize(values),
+ rand_gen.NextInt());
+ CHECK_EQ(JS_OBJECT_TYPE, object->map()->instance_type());
+ CHECK_EQ(inobject_properties, object->map()->GetInObjectProperties());
+ CHECK(!object->map()->is_dictionary_map());
+ objects.push_back(object);
+ }
+
+ {
+ // Fast object, all inobject properties.
+ int inobject_properties = arraysize(names) * 2;
+ Handle<Map> map = Map::Create(isolate, inobject_properties);
+ Handle<JSObject> object = factory->NewJSObjectFromMap(map);
+ AddProperties(object, names, arraysize(names), values, arraysize(values),
+ rand_gen.NextInt());
+ CHECK_EQ(JS_OBJECT_TYPE, object->map()->instance_type());
+ CHECK_EQ(inobject_properties, object->map()->GetInObjectProperties());
+ CHECK(!object->map()->is_dictionary_map());
+ objects.push_back(object);
+ }
+
+ {
+ // Fast object, half inobject properties.
+ int inobject_properties = arraysize(names) / 2;
+ Handle<Map> map = Map::Create(isolate, inobject_properties);
+ Handle<JSObject> object = factory->NewJSObjectFromMap(map);
+ AddProperties(object, names, arraysize(names), values, arraysize(values),
+ rand_gen.NextInt());
+ CHECK_EQ(JS_OBJECT_TYPE, object->map()->instance_type());
+ CHECK_EQ(inobject_properties, object->map()->GetInObjectProperties());
+ CHECK(!object->map()->is_dictionary_map());
+ objects.push_back(object);
+ }
+
+ {
+ // Dictionary mode object.
+ Handle<JSFunction> function = factory->NewFunction(factory->empty_string());
+ Handle<JSObject> object = factory->NewJSObject(function);
+ AddProperties(object, names, arraysize(names), values, arraysize(values),
+ rand_gen.NextInt());
+ JSObject::NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0, "test");
+
+ JSObject::AddProperty(object, deleted_property_name, object, NONE);
+ CHECK(JSObject::DeleteProperty(object, deleted_property_name, SLOPPY)
+ .FromJust());
+
+ CHECK_EQ(JS_OBJECT_TYPE, object->map()->instance_type());
+ CHECK(object->map()->is_dictionary_map());
+ objects.push_back(object);
+ }
+
+ {
+ // Global object.
+ Handle<JSGlobalObject> object = isolate->global_object();
+ AddProperties(object, names, arraysize(names), values, arraysize(values),
+ rand_gen.NextInt());
+
+ JSObject::AddProperty(object, deleted_property_name, object, NONE);
+ CHECK(JSObject::DeleteProperty(object, deleted_property_name, SLOPPY)
+ .FromJust());
+
+ CHECK_EQ(JS_GLOBAL_OBJECT_TYPE, object->map()->instance_type());
+ CHECK(object->map()->is_dictionary_map());
+ objects.push_back(object);
+ }
+
+ // TODO(ishell): test proxy and interceptors when they are supported.
+
+ {
+ for (Handle<JSObject> object : objects) {
+ for (size_t name_index = 0; name_index < arraysize(names); name_index++) {
+ Handle<Name> name = names[name_index];
+ Handle<Object> expected_value =
+ JSReceiver::GetProperty(object, name).ToHandleChecked();
+ Handle<Object> value = ft.Call(object, name).ToHandleChecked();
+ CHECK(expected_value->SameValue(*value));
+ }
+ }
+ }
+
+ {
+ Handle<Name> non_existing_names[] = {
+ factory->NewSymbol(),
+ factory->InternalizeUtf8String("ne_a"),
+ factory->InternalizeUtf8String("ne_bb"),
+ factory->NewPrivateSymbol(),
+ factory->InternalizeUtf8String("ne_ccc"),
+ factory->InternalizeUtf8String("ne_dddd"),
+ deleted_property_name,
+ };
+ for (Handle<JSObject> object : objects) {
+ for (size_t key_index = 0; key_index < arraysize(non_existing_names);
+ key_index++) {
+ Handle<Name> name = non_existing_names[key_index];
+ Handle<Object> expected_value =
+ JSReceiver::GetProperty(object, name).ToHandleChecked();
+ CHECK(expected_value->IsUndefined(isolate));
+ Handle<Object> value = ft.Call(object, name).ToHandleChecked();
+ CHECK_EQ(*not_found_symbol, *value);
+ }
+ }
+ }
+
+ {
+ Handle<JSFunction> function = factory->NewFunction(factory->empty_string());
+ Handle<JSProxy> object = factory->NewJSProxy(function, objects[0]);
+ CHECK_EQ(JS_PROXY_TYPE, object->map()->instance_type());
+ Handle<Object> value = ft.Call(object, names[0]).ToHandleChecked();
+ // Proxies are not supported yet.
+ CHECK_EQ(*bailout_symbol, *value);
+ }
+
+ {
+ Handle<JSObject> object = isolate->global_proxy();
+ CHECK_EQ(JS_GLOBAL_PROXY_TYPE, object->map()->instance_type());
+ // Global proxies are not supported yet.
+ Handle<Object> value = ft.Call(object, names[0]).ToHandleChecked();
+ CHECK_EQ(*bailout_symbol, *value);
+ }
+}
+
+namespace {
+
+void AddElement(Handle<JSObject> object, uint32_t index, Handle<Object> value,
+ PropertyAttributes attributes = NONE) {
+ JSObject::AddDataElement(object, index, value, attributes).ToHandleChecked();
+}
+
+} // namespace
+
+TEST(TryLookupElement) {
+ typedef CodeStubAssembler::Label Label;
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ const int kNumParams = 3;
+ CodeStubAssemblerTester m(isolate, kNumParams);
+
+ enum Result { kFound, kNotFound, kBailout };
+ {
+ Node* object = m.Parameter(0);
+ Node* index = m.SmiToWord32(m.Parameter(1));
+ Node* expected_result = m.Parameter(2);
+
+ Label passed(&m), failed(&m);
+ Label if_found(&m), if_not_found(&m), if_bailout(&m);
+
+ Node* map = m.LoadMap(object);
+ Node* instance_type = m.LoadMapInstanceType(map);
+
+ m.TryLookupElement(object, map, instance_type, index, &if_found,
+ &if_not_found, &if_bailout);
+
+ m.Bind(&if_found);
+ m.Branch(m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kFound))),
+ &passed, &failed);
+
+ m.Bind(&if_not_found);
+ m.Branch(
+ m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kNotFound))),
+ &passed, &failed);
+
+ m.Bind(&if_bailout);
+ m.Branch(
+ m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kBailout))),
+ &passed, &failed);
+
+ m.Bind(&passed);
+ m.Return(m.BooleanConstant(true));
+
+ m.Bind(&failed);
+ m.Return(m.BooleanConstant(false));
+ }
+
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(code, kNumParams);
+
+ Factory* factory = isolate->factory();
+ Handle<Object> smi0(Smi::FromInt(0), isolate);
+ Handle<Object> smi1(Smi::FromInt(1), isolate);
+ Handle<Object> smi7(Smi::FromInt(7), isolate);
+ Handle<Object> smi13(Smi::FromInt(13), isolate);
+ Handle<Object> smi42(Smi::FromInt(42), isolate);
+
+ Handle<Object> expect_found(Smi::FromInt(kFound), isolate);
+ Handle<Object> expect_not_found(Smi::FromInt(kNotFound), isolate);
+ Handle<Object> expect_bailout(Smi::FromInt(kBailout), isolate);
+
+#define CHECK_FOUND(object, index) \
+ CHECK(JSReceiver::HasElement(object, index).FromJust()); \
+ ft.CheckTrue(object, smi##index, expect_found);
+
+#define CHECK_NOT_FOUND(object, index) \
+ CHECK(!JSReceiver::HasElement(object, index).FromJust()); \
+ ft.CheckTrue(object, smi##index, expect_not_found);
+
+ {
+ Handle<JSArray> object = factory->NewJSArray(0, FAST_SMI_ELEMENTS);
+ AddElement(object, 0, smi0);
+ AddElement(object, 1, smi0);
+ CHECK_EQ(FAST_SMI_ELEMENTS, object->map()->elements_kind());
+
+ CHECK_FOUND(object, 0);
+ CHECK_FOUND(object, 1);
+ CHECK_NOT_FOUND(object, 7);
+ CHECK_NOT_FOUND(object, 13);
+ CHECK_NOT_FOUND(object, 42);
+ }
+
+ {
+ Handle<JSArray> object = factory->NewJSArray(0, FAST_HOLEY_SMI_ELEMENTS);
+ AddElement(object, 0, smi0);
+ AddElement(object, 13, smi0);
+ CHECK_EQ(FAST_HOLEY_SMI_ELEMENTS, object->map()->elements_kind());
+
+ CHECK_FOUND(object, 0);
+ CHECK_NOT_FOUND(object, 1);
+ CHECK_NOT_FOUND(object, 7);
+ CHECK_FOUND(object, 13);
+ CHECK_NOT_FOUND(object, 42);
+ }
+
+ {
+ Handle<JSArray> object = factory->NewJSArray(0, FAST_ELEMENTS);
+ AddElement(object, 0, smi0);
+ AddElement(object, 1, smi0);
+ CHECK_EQ(FAST_ELEMENTS, object->map()->elements_kind());
+
+ CHECK_FOUND(object, 0);
+ CHECK_FOUND(object, 1);
+ CHECK_NOT_FOUND(object, 7);
+ CHECK_NOT_FOUND(object, 13);
+ CHECK_NOT_FOUND(object, 42);
+ }
+
+ {
+ Handle<JSArray> object = factory->NewJSArray(0, FAST_HOLEY_ELEMENTS);
+ AddElement(object, 0, smi0);
+ AddElement(object, 13, smi0);
+ CHECK_EQ(FAST_HOLEY_ELEMENTS, object->map()->elements_kind());
+
+ CHECK_FOUND(object, 0);
+ CHECK_NOT_FOUND(object, 1);
+ CHECK_NOT_FOUND(object, 7);
+ CHECK_FOUND(object, 13);
+ CHECK_NOT_FOUND(object, 42);
+ }
+
+ {
+ Handle<JSFunction> constructor = isolate->string_function();
+ Handle<JSObject> object = factory->NewJSObject(constructor);
+ Handle<String> str = factory->InternalizeUtf8String("ab");
+ Handle<JSValue>::cast(object)->set_value(*str);
+ AddElement(object, 13, smi0);
+ CHECK_EQ(FAST_STRING_WRAPPER_ELEMENTS, object->map()->elements_kind());
+
+ CHECK_FOUND(object, 0);
+ CHECK_FOUND(object, 1);
+ CHECK_NOT_FOUND(object, 7);
+ CHECK_FOUND(object, 13);
+ CHECK_NOT_FOUND(object, 42);
+ }
+
+ {
+ Handle<JSFunction> constructor = isolate->string_function();
+ Handle<JSObject> object = factory->NewJSObject(constructor);
+ Handle<String> str = factory->InternalizeUtf8String("ab");
+ Handle<JSValue>::cast(object)->set_value(*str);
+ AddElement(object, 13, smi0);
+ JSObject::NormalizeElements(object);
+ CHECK_EQ(SLOW_STRING_WRAPPER_ELEMENTS, object->map()->elements_kind());
+
+ CHECK_FOUND(object, 0);
+ CHECK_FOUND(object, 1);
+ CHECK_NOT_FOUND(object, 7);
+ CHECK_FOUND(object, 13);
+ CHECK_NOT_FOUND(object, 42);
+ }
+
+// TODO(ishell): uncomment once NO_ELEMENTS kind is supported.
+// {
+// Handle<Map> map = Map::Create(isolate, 0);
+// map->set_elements_kind(NO_ELEMENTS);
+// Handle<JSObject> object = factory->NewJSObjectFromMap(map);
+// CHECK_EQ(NO_ELEMENTS, object->map()->elements_kind());
+//
+// CHECK_NOT_FOUND(object, 0);
+// CHECK_NOT_FOUND(object, 1);
+// CHECK_NOT_FOUND(object, 7);
+// CHECK_NOT_FOUND(object, 13);
+// CHECK_NOT_FOUND(object, 42);
+// }
+
+#undef CHECK_FOUND
+#undef CHECK_NOT_FOUND
+
+ {
+ Handle<JSArray> handler = factory->NewJSArray(0);
+ Handle<JSFunction> function = factory->NewFunction(factory->empty_string());
+ Handle<JSProxy> object = factory->NewJSProxy(function, handler);
+ CHECK_EQ(JS_PROXY_TYPE, object->map()->instance_type());
+ ft.CheckTrue(object, smi0, expect_bailout);
+ }
+
+ {
+ Handle<JSObject> object = isolate->global_object();
+ CHECK_EQ(JS_GLOBAL_OBJECT_TYPE, object->map()->instance_type());
+ ft.CheckTrue(object, smi0, expect_bailout);
+ }
+
+ {
+ Handle<JSObject> object = isolate->global_proxy();
+ CHECK_EQ(JS_GLOBAL_PROXY_TYPE, object->map()->instance_type());
+ ft.CheckTrue(object, smi0, expect_bailout);
+ }
+}
+
+TEST(DeferredCodePhiHints) {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ VoidDescriptor descriptor(isolate);
+ CodeStubAssemblerTester m(isolate, descriptor);
+ Label block1(&m, Label::kDeferred);
+ m.Goto(&block1);
+ m.Bind(&block1);
+ {
+ Variable var_object(&m, MachineRepresentation::kTagged);
+ Label loop(&m, &var_object);
+ var_object.Bind(m.IntPtrConstant(0));
+ m.Goto(&loop);
+ m.Bind(&loop);
+ {
+ Node* map = m.LoadMap(var_object.value());
+ var_object.Bind(map);
+ m.Goto(&loop);
+ }
+ }
+ CHECK(!m.GenerateCode().is_null());
+}
+
+TEST(TestOutOfScopeVariable) {
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ VoidDescriptor descriptor(isolate);
+ CodeStubAssemblerTester m(isolate, descriptor);
+ Label block1(&m);
+ Label block2(&m);
+ Label block3(&m);
+ Label block4(&m);
+ m.Branch(m.WordEqual(m.Parameter(0), m.IntPtrConstant(0)), &block1, &block4);
+ m.Bind(&block4);
+ {
+ Variable var_object(&m, MachineRepresentation::kTagged);
+ m.Branch(m.WordEqual(m.Parameter(0), m.IntPtrConstant(0)), &block2,
+ &block3);
+
+ m.Bind(&block2);
+ var_object.Bind(m.IntPtrConstant(55));
+ m.Goto(&block1);
+
+ m.Bind(&block3);
+ var_object.Bind(m.IntPtrConstant(66));
+ m.Goto(&block1);
+ }
+ m.Bind(&block1);
+ CHECK(!m.GenerateCode().is_null());
+}
+
+namespace {
+
+void TestStubCacheOffsetCalculation(StubCache::Table table) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ const int kNumParams = 2;
+ CodeStubAssemblerTester m(isolate, kNumParams);
+
+ {
+ Node* name = m.Parameter(0);
+ Node* map = m.Parameter(1);
+ Node* primary_offset = m.StubCachePrimaryOffset(name, map);
+ Node* result;
+ if (table == StubCache::kPrimary) {
+ result = primary_offset;
+ } else {
+ CHECK_EQ(StubCache::kSecondary, table);
+ result = m.StubCacheSecondaryOffset(name, primary_offset);
+ }
+ m.Return(m.SmiFromWord32(result));
+ }
+
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(code, kNumParams);
+
+ Factory* factory = isolate->factory();
+ Handle<Name> names[] = {
+ factory->NewSymbol(),
+ factory->InternalizeUtf8String("a"),
+ factory->InternalizeUtf8String("bb"),
+ factory->InternalizeUtf8String("ccc"),
+ factory->NewPrivateSymbol(),
+ factory->InternalizeUtf8String("dddd"),
+ factory->InternalizeUtf8String("eeeee"),
+ factory->InternalizeUtf8String("name"),
+ factory->NewSymbol(),
+ factory->NewPrivateSymbol(),
+ };
+
+ Handle<Map> maps[] = {
+ Handle<Map>(nullptr, isolate),
+ factory->cell_map(),
+ Map::Create(isolate, 0),
+ factory->meta_map(),
+ factory->code_map(),
+ Map::Create(isolate, 0),
+ factory->hash_table_map(),
+ factory->symbol_map(),
+ factory->string_map(),
+ Map::Create(isolate, 0),
+ factory->sloppy_arguments_elements_map(),
+ };
+
+ for (int name_index = 0; name_index < arraysize(names); name_index++) {
+ Handle<Name> name = names[name_index];
+ for (int map_index = 0; map_index < arraysize(maps); map_index++) {
+ Handle<Map> map = maps[map_index];
+
+ int expected_result;
+ {
+ int primary_offset = StubCache::PrimaryOffsetForTesting(*name, *map);
+ if (table == StubCache::kPrimary) {
+ expected_result = primary_offset;
+ } else {
+ expected_result =
+ StubCache::SecondaryOffsetForTesting(*name, primary_offset);
+ }
+ }
+ Handle<Object> result = ft.Call(name, map).ToHandleChecked();
+
+ Smi* expected = Smi::FromInt(expected_result & Smi::kMaxValue);
+ CHECK_EQ(expected, Smi::cast(*result));
+ }
+ }
+}
+
+} // namespace
+
+TEST(StubCachePrimaryOffset) {
+ TestStubCacheOffsetCalculation(StubCache::kPrimary);
+}
+
+TEST(StubCacheSecondaryOffset) {
+ TestStubCacheOffsetCalculation(StubCache::kSecondary);
+}
+
+namespace {
+
+Handle<Code> CreateCodeWithFlags(Code::Flags flags) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ CodeStubAssemblerTester m(isolate, flags);
+ m.Return(m.UndefinedConstant());
+ return m.GenerateCodeCloseAndEscape();
+}
+
+} // namespace
+
+TEST(TryProbeStubCache) {
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ const int kNumParams = 3;
+ CodeStubAssemblerTester m(isolate, kNumParams);
+
+ Code::Kind ic_kind = Code::LOAD_IC;
+ StubCache stub_cache(isolate, ic_kind);
+ stub_cache.Clear();
+
+ {
+ Node* receiver = m.Parameter(0);
+ Node* name = m.Parameter(1);
+ Node* expected_handler = m.Parameter(2);
+
+ Label passed(&m), failed(&m);
+
+ Variable var_handler(&m, MachineRepresentation::kTagged);
+ Label if_handler(&m), if_miss(&m);
+
+ m.TryProbeStubCache(&stub_cache, receiver, name, &if_handler, &var_handler,
+ &if_miss);
+ m.Bind(&if_handler);
+ m.BranchIfWordEqual(expected_handler, var_handler.value(), &passed,
+ &failed);
+
+ m.Bind(&if_miss);
+ m.BranchIfWordEqual(expected_handler, m.IntPtrConstant(0), &passed,
+ &failed);
+
+ m.Bind(&passed);
+ m.Return(m.BooleanConstant(true));
+
+ m.Bind(&failed);
+ m.Return(m.BooleanConstant(false));
+ }
+
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(code, kNumParams);
+
+ std::vector<Handle<Name>> names;
+ std::vector<Handle<JSObject>> receivers;
+ std::vector<Handle<Code>> handlers;
+
+ base::RandomNumberGenerator rand_gen(FLAG_random_seed);
+
+ Factory* factory = isolate->factory();
+
+ // Generate some number of names.
+ for (int i = 0; i < StubCache::kPrimaryTableSize / 7; i++) {
+ Handle<Name> name;
+ switch (rand_gen.NextInt(3)) {
+ case 0: {
+ // Generate string.
+ std::stringstream ss;
+ ss << "s" << std::hex
+ << (rand_gen.NextInt(Smi::kMaxValue) % StubCache::kPrimaryTableSize);
+ name = factory->InternalizeUtf8String(ss.str().c_str());
+ break;
+ }
+ case 1: {
+ // Generate number string.
+ std::stringstream ss;
+ ss << (rand_gen.NextInt(Smi::kMaxValue) % StubCache::kPrimaryTableSize);
+ name = factory->InternalizeUtf8String(ss.str().c_str());
+ break;
+ }
+ case 2: {
+ // Generate symbol.
+ name = factory->NewSymbol();
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ names.push_back(name);
+ }
+
+ // Generate some number of receiver maps and receivers.
+ for (int i = 0; i < StubCache::kSecondaryTableSize / 2; i++) {
+ Handle<Map> map = Map::Create(isolate, 0);
+ receivers.push_back(factory->NewJSObjectFromMap(map));
+ }
+
+ // Generate some number of handlers.
+ for (int i = 0; i < 30; i++) {
+ Code::Flags flags =
+ Code::RemoveHolderFromFlags(Code::ComputeHandlerFlags(ic_kind));
+ handlers.push_back(CreateCodeWithFlags(flags));
+ }
+
+ // Ensure that GC does happen because from now on we are going to fill our
+ // own stub cache instance with raw values.
+ DisallowHeapAllocation no_gc;
+
+ // Populate {stub_cache}.
+ const int N = StubCache::kPrimaryTableSize + StubCache::kSecondaryTableSize;
+ for (int i = 0; i < N; i++) {
+ int index = rand_gen.NextInt();
+ Handle<Name> name = names[index % names.size()];
+ Handle<JSObject> receiver = receivers[index % receivers.size()];
+ Handle<Code> handler = handlers[index % handlers.size()];
+ stub_cache.Set(*name, receiver->map(), *handler);
+ }
+
+ // Perform some queries.
+ bool queried_existing = false;
+ bool queried_non_existing = false;
+ for (int i = 0; i < N; i++) {
+ int index = rand_gen.NextInt();
+ Handle<Name> name = names[index % names.size()];
+ Handle<JSObject> receiver = receivers[index % receivers.size()];
+ Code* handler = stub_cache.Get(*name, receiver->map());
+ if (handler == nullptr) {
+ queried_non_existing = true;
+ } else {
+ queried_existing = true;
+ }
+
+ Handle<Code> expected_handler(handler, isolate);
+ ft.CheckTrue(receiver, name, expected_handler);
+ }
+
+ for (int i = 0; i < N; i++) {
+ int index1 = rand_gen.NextInt();
+ int index2 = rand_gen.NextInt();
+ Handle<Name> name = names[index1 % names.size()];
+ Handle<JSObject> receiver = receivers[index2 % receivers.size()];
+ Code* handler = stub_cache.Get(*name, receiver->map());
+ if (handler == nullptr) {
+ queried_non_existing = true;
+ } else {
+ queried_existing = true;
+ }
+
+ Handle<Code> expected_handler(handler, isolate);
+ ft.CheckTrue(receiver, name, expected_handler);
+ }
+ // Ensure we performed both kind of queries.
+ CHECK(queried_existing && queried_non_existing);
+}
+
+TEST(GotoIfException) {
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ const int kNumParams = 1;
+ CodeStubAssemblerTester m(isolate, kNumParams);
+
+ Node* context = m.HeapConstant(Handle<Context>(isolate->native_context()));
+ Node* to_string_tag =
+ m.HeapConstant(isolate->factory()->to_string_tag_symbol());
+ Variable exception(&m, MachineRepresentation::kTagged);
+
+ Label exception_handler(&m);
+ Callable to_string = CodeFactory::ToString(isolate);
+ Node* string = m.CallStub(to_string, context, to_string_tag);
+ m.GotoIfException(string, &exception_handler, &exception);
+ m.Return(string);
+
+ m.Bind(&exception_handler);
+ m.Return(exception.value());
+
+ Handle<Code> code = m.GenerateCode();
+ CHECK(!code.is_null());
+
+ // Emulate TFJ builtin
+ code->set_flags(Code::ComputeFlags(Code::BUILTIN));
+
+ FunctionTester ft(code, kNumParams);
+ Handle<Object> result = ft.Call().ToHandleChecked();
+
+ // Should be a TypeError
+ CHECK(result->IsJSObject());
+
+ Handle<Object> constructor =
+ Object::GetPropertyOrElement(result,
+ isolate->factory()->constructor_string())
+ .ToHandleChecked();
+ CHECK(constructor->SameValue(*isolate->type_error_function()));
+}
+
+TEST(GotoIfExceptionMultiple) {
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ const int kNumParams = 4; // receiver, first, second, third
+ CodeStubAssemblerTester m(isolate, kNumParams);
+
+ Node* context = m.HeapConstant(Handle<Context>(isolate->native_context()));
+ Node* first_value = m.Parameter(0);
+ Node* second_value = m.Parameter(1);
+ Node* third_value = m.Parameter(2);
+
+ Label exception_handler1(&m);
+ Label exception_handler2(&m);
+ Label exception_handler3(&m);
+ Variable return_value(&m, MachineRepresentation::kWord32);
+ Variable error(&m, MachineRepresentation::kTagged);
+
+ return_value.Bind(m.Int32Constant(0));
+
+ // try { return ToString(param1) } catch (e) { ... }
+ Callable to_string = CodeFactory::ToString(isolate);
+ Node* string = m.CallStub(to_string, context, first_value);
+ m.GotoIfException(string, &exception_handler1, &error);
+ m.Return(string);
+
+ // try { ToString(param2); return 7 } catch (e) { ... }
+ m.Bind(&exception_handler1);
+ return_value.Bind(m.Int32Constant(7));
+ error.Bind(m.UndefinedConstant());
+ string = m.CallStub(to_string, context, second_value);
+ m.GotoIfException(string, &exception_handler2, &error);
+ m.Return(m.SmiFromWord32(return_value.value()));
+
+ // try { ToString(param3); return 7 & ~2; } catch (e) { return e; }
+ m.Bind(&exception_handler2);
+ // Return returnValue & ~2
+ error.Bind(m.UndefinedConstant());
+ string = m.CallStub(to_string, context, third_value);
+ m.GotoIfException(string, &exception_handler3, &error);
+ m.Return(m.SmiFromWord32(
+ m.Word32And(return_value.value(),
+ m.Word32Xor(m.Int32Constant(2), m.Int32Constant(-1)))));
+
+ m.Bind(&exception_handler3);
+ m.Return(error.value());
+
+ Handle<Code> code = m.GenerateCode();
+ CHECK(!code.is_null());
+
+ // Emulate TFJ builtin
+ code->set_flags(Code::ComputeFlags(Code::BUILTIN));
+
+ FunctionTester ft(code, kNumParams);
+
+ Handle<Object> result;
+ // First handler does not throw, returns result of first value
+ result = ft.Call(isolate->factory()->undefined_value(),
+ isolate->factory()->to_string_tag_symbol())
+ .ToHandleChecked();
+ CHECK(String::cast(*result)->IsOneByteEqualTo(OneByteVector("undefined")));
+
+ // First handler returns a number
+ result = ft.Call(isolate->factory()->to_string_tag_symbol(),
+ isolate->factory()->undefined_value())
+ .ToHandleChecked();
+ CHECK_EQ(7, Smi::cast(*result)->value());
+
+ // First handler throws, second handler returns a number
+ result = ft.Call(isolate->factory()->to_string_tag_symbol(),
+ isolate->factory()->to_primitive_symbol())
+ .ToHandleChecked();
+ CHECK_EQ(7 & ~2, Smi::cast(*result)->value());
+
+ // First handler throws, second handler throws, third handler returns thrown
+ // value.
+ result = ft.Call(isolate->factory()->to_string_tag_symbol(),
+ isolate->factory()->to_primitive_symbol(),
+ isolate->factory()->unscopables_symbol())
+ .ToHandleChecked();
+
+ // Should be a TypeError
+ CHECK(result->IsJSObject());
+
+ Handle<Object> constructor =
+ Object::GetPropertyOrElement(result,
+ isolate->factory()->constructor_string())
+ .ToHandleChecked();
+ CHECK(constructor->SameValue(*isolate->type_error_function()));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-code-stubs-arm.cc b/deps/v8/test/cctest/test-code-stubs-arm.cc
index f59d85483d..0e0a132146 100644
--- a/deps/v8/test/cctest/test-code-stubs-arm.cc
+++ b/deps/v8/test/cctest/test-code-stubs-arm.cc
@@ -78,8 +78,9 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
int source_reg_offset = kDoubleSize;
int reg_num = 0;
for (; reg_num < Register::kNumRegisters; ++reg_num) {
- Register reg = Register::from_code(reg_num);
- if (reg.IsAllocatable()) {
+ if (RegisterConfiguration::Crankshaft()->IsAllocatableGeneralCode(
+ reg_num)) {
+ Register reg = Register::from_code(reg_num);
if (!reg.is(destination_reg)) {
__ push(reg);
source_reg_offset += kPointerSize;
@@ -107,8 +108,9 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Make sure no registers have been unexpectedly clobbered
for (--reg_num; reg_num >= 0; --reg_num) {
- Register reg = Register::from_code(reg_num);
- if (reg.IsAllocatable()) {
+ if (RegisterConfiguration::Crankshaft()->IsAllocatableGeneralCode(
+ reg_num)) {
+ Register reg = Register::from_code(reg_num);
if (!reg.is(destination_reg)) {
__ ldr(ip, MemOperand(sp, 0));
__ cmp(reg, ip);
diff --git a/deps/v8/test/cctest/test-code-stubs-arm64.cc b/deps/v8/test/cctest/test-code-stubs-arm64.cc
index f5a40789be..2edb923675 100644
--- a/deps/v8/test/cctest/test-code-stubs-arm64.cc
+++ b/deps/v8/test/cctest/test-code-stubs-arm64.cc
@@ -74,8 +74,9 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
int source_reg_offset = kDoubleSize;
int reg_num = 0;
for (; reg_num < Register::kNumRegisters; ++reg_num) {
- Register reg = Register::from_code(reg_num);
- if (reg.IsAllocatable()) {
+ if (RegisterConfiguration::Crankshaft()->IsAllocatableGeneralCode(
+ reg_num)) {
+ Register reg = Register::from_code(reg_num);
if (!reg.is(destination_reg)) {
queue.Queue(reg);
source_reg_offset += kPointerSize;
@@ -103,8 +104,9 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// // Make sure no registers have been unexpectedly clobbered
for (--reg_num; reg_num >= 0; --reg_num) {
- Register reg = Register::from_code(reg_num);
- if (reg.IsAllocatable()) {
+ if (RegisterConfiguration::Crankshaft()->IsAllocatableGeneralCode(
+ reg_num)) {
+ Register reg = Register::from_code(reg_num);
if (!reg.is(destination_reg)) {
__ Pop(ip0);
__ cmp(reg, ip0);
diff --git a/deps/v8/test/cctest/test-code-stubs-ia32.cc b/deps/v8/test/cctest/test-code-stubs-ia32.cc
index efc39e9a2e..5a420b1a80 100644
--- a/deps/v8/test/cctest/test-code-stubs-ia32.cc
+++ b/deps/v8/test/cctest/test-code-stubs-ia32.cc
@@ -72,8 +72,9 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Save registers make sure they don't get clobbered.
int reg_num = 0;
for (; reg_num < Register::kNumRegisters; ++reg_num) {
- Register reg = Register::from_code(reg_num);
- if (reg.IsAllocatable()) {
+ if (RegisterConfiguration::Crankshaft()->IsAllocatableGeneralCode(
+ reg_num)) {
+ Register reg = Register::from_code(reg_num);
if (!reg.is(esp) && !reg.is(ebp) && !reg.is(destination_reg)) {
__ push(reg);
param_offset += kPointerSize;
@@ -92,8 +93,9 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Make sure no registers have been unexpectedly clobbered
for (--reg_num; reg_num >= 0; --reg_num) {
- Register reg = Register::from_code(reg_num);
- if (reg.IsAllocatable()) {
+ if (RegisterConfiguration::Crankshaft()->IsAllocatableGeneralCode(
+ reg_num)) {
+ Register reg = Register::from_code(reg_num);
if (!reg.is(esp) && !reg.is(ebp) && !reg.is(destination_reg)) {
__ cmp(reg, MemOperand(esp, 0));
__ Assert(equal, kRegisterWasClobbered);
diff --git a/deps/v8/test/cctest/test-code-stubs-mips.cc b/deps/v8/test/cctest/test-code-stubs-mips.cc
index ce577da46b..149a1742f8 100644
--- a/deps/v8/test/cctest/test-code-stubs-mips.cc
+++ b/deps/v8/test/cctest/test-code-stubs-mips.cc
@@ -82,8 +82,9 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
int source_reg_offset = kDoubleSize;
int reg_num = 2;
for (; reg_num < Register::kNumRegisters; ++reg_num) {
- Register reg = Register::from_code(reg_num);
- if (reg.IsAllocatable()) {
+ if (RegisterConfiguration::Crankshaft()->IsAllocatableGeneralCode(
+ reg_num)) {
+ Register reg = Register::from_code(reg_num);
if (!reg.is(destination_reg)) {
__ push(reg);
source_reg_offset += kPointerSize;
@@ -111,8 +112,9 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Make sure no registers have been unexpectedly clobbered
for (--reg_num; reg_num >= 2; --reg_num) {
- Register reg = Register::from_code(reg_num);
- if (reg.IsAllocatable()) {
+ if (RegisterConfiguration::Crankshaft()->IsAllocatableGeneralCode(
+ reg_num)) {
+ Register reg = Register::from_code(reg_num);
if (!reg.is(destination_reg)) {
__ lw(at, MemOperand(sp, 0));
__ Assert(eq, kRegisterWasClobbered, reg, Operand(at));
diff --git a/deps/v8/test/cctest/test-code-stubs-mips64.cc b/deps/v8/test/cctest/test-code-stubs-mips64.cc
index 97f57b9168..bef21717ee 100644
--- a/deps/v8/test/cctest/test-code-stubs-mips64.cc
+++ b/deps/v8/test/cctest/test-code-stubs-mips64.cc
@@ -81,8 +81,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Save registers make sure they don't get clobbered.
int source_reg_offset = kDoubleSize;
int reg_num = 2;
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (; reg_num < config->num_allocatable_general_registers(); ++reg_num) {
Register reg = Register::from_code(reg_num);
if (!reg.is(destination_reg)) {
diff --git a/deps/v8/test/cctest/test-code-stubs-x64.cc b/deps/v8/test/cctest/test-code-stubs-x64.cc
index 786da547e4..b9eddd6c57 100644
--- a/deps/v8/test/cctest/test-code-stubs-x64.cc
+++ b/deps/v8/test/cctest/test-code-stubs-x64.cc
@@ -64,9 +64,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
__ pushq(rsi);
__ pushq(rdi);
-
- const RegisterConfiguration* config =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
if (!source_reg.is(rsp)) {
// The argument we pass to the stub is not a heap number, but instead
// stack-allocated and offset-wise made to look like a heap number for
diff --git a/deps/v8/test/cctest/test-code-stubs-x87.cc b/deps/v8/test/cctest/test-code-stubs-x87.cc
index efc39e9a2e..5a420b1a80 100644
--- a/deps/v8/test/cctest/test-code-stubs-x87.cc
+++ b/deps/v8/test/cctest/test-code-stubs-x87.cc
@@ -72,8 +72,9 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Save registers make sure they don't get clobbered.
int reg_num = 0;
for (; reg_num < Register::kNumRegisters; ++reg_num) {
- Register reg = Register::from_code(reg_num);
- if (reg.IsAllocatable()) {
+ if (RegisterConfiguration::Crankshaft()->IsAllocatableGeneralCode(
+ reg_num)) {
+ Register reg = Register::from_code(reg_num);
if (!reg.is(esp) && !reg.is(ebp) && !reg.is(destination_reg)) {
__ push(reg);
param_offset += kPointerSize;
@@ -92,8 +93,9 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Make sure no registers have been unexpectedly clobbered
for (--reg_num; reg_num >= 0; --reg_num) {
- Register reg = Register::from_code(reg_num);
- if (reg.IsAllocatable()) {
+ if (RegisterConfiguration::Crankshaft()->IsAllocatableGeneralCode(
+ reg_num)) {
+ Register reg = Register::from_code(reg_num);
if (!reg.is(esp) && !reg.is(ebp) && !reg.is(destination_reg)) {
__ cmp(reg, MemOperand(esp, 0));
__ Assert(equal, kRegisterWasClobbered);
diff --git a/deps/v8/test/cctest/test-code-stubs.cc b/deps/v8/test/cctest/test-code-stubs.cc
index c8c48ecc65..273f57ef0e 100644
--- a/deps/v8/test/cctest/test-code-stubs.cc
+++ b/deps/v8/test/cctest/test-code-stubs.cc
@@ -42,9 +42,18 @@ using namespace v8::internal;
int STDCALL ConvertDToICVersion(double d) {
+#if defined(V8_TARGET_BIG_ENDIAN)
+ const int kExponentIndex = 0;
+ const int kMantissaIndex = 1;
+#elif defined(V8_TARGET_LITTLE_ENDIAN)
+ const int kExponentIndex = 1;
+ const int kMantissaIndex = 0;
+#else
+#error Unsupported endianness
+#endif
union { double d; uint32_t u[2]; } dbl;
dbl.d = d;
- uint32_t exponent_bits = dbl.u[1];
+ uint32_t exponent_bits = dbl.u[kExponentIndex];
int32_t shifted_mask = static_cast<int32_t>(Double::kExponentMask >> 32);
int32_t exponent = (((exponent_bits & shifted_mask) >>
(Double::kPhysicalSignificandSize - 32)) -
@@ -58,7 +67,8 @@ int STDCALL ConvertDToICVersion(double d) {
static_cast<uint32_t>(Double::kPhysicalSignificandSize);
if (unsigned_exponent >= max_exponent) {
if ((exponent - Double::kPhysicalSignificandSize) < 32) {
- result = dbl.u[0] << (exponent - Double::kPhysicalSignificandSize);
+ result = dbl.u[kMantissaIndex]
+ << (exponent - Double::kPhysicalSignificandSize);
}
} else {
uint64_t big_result =
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index 32d720e24e..aef10f1f7a 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -32,6 +32,7 @@
#include "src/compiler.h"
#include "src/disasm.h"
+#include "src/interpreter/interpreter.h"
#include "src/parsing/parser.h"
#include "test/cctest/cctest.h"
@@ -305,7 +306,7 @@ TEST(FeedbackVectorPreservedAcrossRecompiles) {
// We shouldn't have deoptimization support. We want to recompile and
// verify that our feedback vector preserves information.
CHECK(!f->shared()->has_deoptimization_support());
- Handle<TypeFeedbackVector> feedback_vector(f->shared()->feedback_vector());
+ Handle<TypeFeedbackVector> feedback_vector(f->feedback_vector());
// Verify that we gathered feedback.
CHECK(!feedback_vector->is_empty());
@@ -319,8 +320,11 @@ TEST(FeedbackVectorPreservedAcrossRecompiles) {
// Verify that the feedback is still "gathered" despite a recompilation
// of the full code.
CHECK(f->IsOptimized());
- CHECK(f->shared()->has_deoptimization_support());
- object = f->shared()->feedback_vector()->Get(slot_for_a);
+ // If the baseline code is bytecode, then it will not have deoptimization
+ // support. has_deoptimization_support() check is only required if the
+ // baseline code is from fullcodegen.
+ CHECK(f->shared()->has_deoptimization_support() || i::FLAG_ignition);
+ object = f->feedback_vector()->Get(slot_for_a);
CHECK(object->IsWeakCell() &&
WeakCell::cast(object)->value()->IsJSFunction());
}
@@ -355,18 +359,16 @@ TEST(FeedbackVectorUnaffectedByScopeChanges) {
// If we are compiling lazily then it should not be compiled, and so no
// feedback vector allocated yet.
CHECK(!f->shared()->is_compiled());
- CHECK(f->shared()->feedback_vector()->is_empty());
+ CHECK(f->feedback_vector()->is_empty());
CompileRun("morphing_call();");
// Now a feedback vector is allocated.
CHECK(f->shared()->is_compiled());
- CHECK(!f->shared()->feedback_vector()->is_empty());
+ CHECK(!f->feedback_vector()->is_empty());
}
-
-// Test that optimized code for different closures is actually shared
-// immediately by the FastNewClosureStub when run in the same context.
+// Test that optimized code for different closures is actually shared.
TEST(OptimizedCodeSharing1) {
FLAG_stress_compaction = false;
FLAG_allow_natives_syntax = true;
@@ -385,8 +387,8 @@ TEST(OptimizedCodeSharing1) {
"%DebugPrint(closure0());"
"%OptimizeFunctionOnNextCall(closure0);"
"%DebugPrint(closure0());"
- "var closure1 = MakeClosure();"
- "var closure2 = MakeClosure();");
+ "var closure1 = MakeClosure(); closure1();"
+ "var closure2 = MakeClosure(); closure2();");
Handle<JSFunction> fun1 = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
env->Global()
@@ -403,9 +405,7 @@ TEST(OptimizedCodeSharing1) {
}
}
-
-// Test that optimized code for different closures is actually shared
-// immediately by the FastNewClosureStub when run different contexts.
+// Test that optimized code for different closures is actually shared.
TEST(OptimizedCodeSharing2) {
if (FLAG_stress_compaction) return;
FLAG_allow_natives_syntax = true;
@@ -456,8 +456,8 @@ TEST(OptimizedCodeSharing2) {
"%DebugPrint(closure0());"
"%OptimizeFunctionOnNextCall(closure0);"
"%DebugPrint(closure0());"
- "var closure1 = MakeClosure();"
- "var closure2 = MakeClosure();");
+ "var closure1 = MakeClosure(); closure1();"
+ "var closure2 = MakeClosure(); closure2();");
Handle<JSFunction> fun1 = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
env->Global()
@@ -475,9 +475,7 @@ TEST(OptimizedCodeSharing2) {
}
}
-
-// Test that optimized code for different closures is actually shared
-// immediately by the FastNewClosureStub without context-dependent entries.
+// Test that optimized code for different closures is actually shared.
TEST(OptimizedCodeSharing3) {
if (FLAG_stress_compaction) return;
FLAG_allow_natives_syntax = true;
@@ -531,8 +529,8 @@ TEST(OptimizedCodeSharing3) {
"%DebugPrint(closure0());"
"%OptimizeFunctionOnNextCall(closure0);"
"%DebugPrint(closure0());"
- "var closure1 = MakeClosure();"
- "var closure2 = MakeClosure();");
+ "var closure1 = MakeClosure(); closure1();"
+ "var closure2 = MakeClosure(); closure2();");
Handle<JSFunction> fun1 = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
env->Global()
@@ -763,3 +761,83 @@ TEST(SplitConstantsInFullCompiler) {
CheckCodeForUnsafeLiteral(GetJSFunction(context->Global(), "f"));
}
#endif
+
+static void IsBaselineCompiled(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Handle<Object> object = v8::Utils::OpenHandle(*args[0]);
+ Handle<JSFunction> function = Handle<JSFunction>::cast(object);
+ bool is_baseline = function->shared()->code()->kind() == Code::FUNCTION;
+ return args.GetReturnValue().Set(is_baseline);
+}
+
+static void InstallIsBaselineCompiledHelper(v8::Isolate* isolate) {
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ v8::Local<v8::FunctionTemplate> t =
+ v8::FunctionTemplate::New(isolate, IsBaselineCompiled);
+ CHECK(context->Global()
+ ->Set(context, v8_str("IsBaselineCompiled"),
+ t->GetFunction(context).ToLocalChecked())
+ .FromJust());
+}
+
+TEST(IgnitionBaselineOnReturn) {
+ // TODO(4280): Remove this entire test once --ignition-preserve-bytecode is
+ // the default and the flag is removed. This test doesn't provide benefit any
+ // longer once {InterpreterActivationsFinder} is gone.
+ if (FLAG_ignition_preserve_bytecode) return;
+ FLAG_allow_natives_syntax = true;
+ FLAG_always_opt = false;
+ CcTest::InitializeVM();
+ FLAG_ignition = true;
+ Isolate* isolate = CcTest::i_isolate();
+ isolate->interpreter()->Initialize();
+ v8::HandleScope scope(CcTest::isolate());
+ InstallIsBaselineCompiledHelper(CcTest::isolate());
+
+ CompileRun(
+ "var is_baseline_in_function, is_baseline_after_return;\n"
+ "var return_val;\n"
+ "function f() {\n"
+ " %CompileBaseline(f);\n"
+ " is_baseline_in_function = IsBaselineCompiled(f);\n"
+ " return 1234;\n"
+ "};\n"
+ "return_val = f();\n"
+ "is_baseline_after_return = IsBaselineCompiled(f);\n");
+ CHECK_EQ(false, GetGlobalProperty("is_baseline_in_function")->BooleanValue());
+ CHECK_EQ(true, GetGlobalProperty("is_baseline_after_return")->BooleanValue());
+ CHECK_EQ(1234.0, GetGlobalProperty("return_val")->Number());
+}
+
+TEST(IgnitionEntryTrampolineSelfHealing) {
+ FLAG_allow_natives_syntax = true;
+ FLAG_always_opt = false;
+ CcTest::InitializeVM();
+ FLAG_ignition = true;
+ Isolate* isolate = CcTest::i_isolate();
+ isolate->interpreter()->Initialize();
+ v8::HandleScope scope(CcTest::isolate());
+
+ CompileRun(
+ "function MkFun() {"
+ " function f() { return 23 }"
+ " return f"
+ "}"
+ "var f1 = MkFun(); f1();"
+ "var f2 = MkFun(); f2();"
+ "%BaselineFunctionOnNextCall(f1);");
+ Handle<JSFunction> f1 = Handle<JSFunction>::cast(GetGlobalProperty("f1"));
+ Handle<JSFunction> f2 = Handle<JSFunction>::cast(GetGlobalProperty("f2"));
+
+ // Function {f1} is marked for baseline.
+ CompileRun("var result1 = f1()");
+ CHECK_NE(*isolate->builtins()->InterpreterEntryTrampoline(), f1->code());
+ CHECK_EQ(*isolate->builtins()->InterpreterEntryTrampoline(), f2->code());
+ CHECK_EQ(23.0, GetGlobalProperty("result1")->Number());
+
+ // Function {f2} will self-heal now.
+ CompileRun("var result2 = f2()");
+ CHECK_NE(*isolate->builtins()->InterpreterEntryTrampoline(), f1->code());
+ CHECK_NE(*isolate->builtins()->InterpreterEntryTrampoline(), f2->code());
+ CHECK_EQ(23.0, GetGlobalProperty("result2")->Number());
+}
diff --git a/deps/v8/test/cctest/test-conversions.cc b/deps/v8/test/cctest/test-conversions.cc
index 463a4abe3f..9569dc8678 100644
--- a/deps/v8/test/cctest/test-conversions.cc
+++ b/deps/v8/test/cctest/test-conversions.cc
@@ -406,3 +406,30 @@ TEST(SpecialIndexParsing) {
CheckNonArrayIndex(false, "-9999999999999999");
CheckNonArrayIndex(false, "42949672964294967296429496729694966");
}
+
+TEST(NoHandlesForTryNumberToSize) {
+ i::Isolate* isolate = CcTest::i_isolate();
+ size_t result = 0;
+ {
+ SealHandleScope no_handles(isolate);
+ Smi* smi = Smi::FromInt(1);
+ CHECK(TryNumberToSize(smi, &result));
+ CHECK_EQ(result, 1);
+ }
+ result = 0;
+ {
+ HandleScope scope(isolate);
+ Handle<HeapNumber> heap_number1 = isolate->factory()->NewHeapNumber(2.0);
+ {
+ SealHandleScope no_handles(isolate);
+ CHECK(TryNumberToSize(*heap_number1, &result));
+ CHECK_EQ(result, 2);
+ }
+ Handle<HeapNumber> heap_number2 = isolate->factory()->NewHeapNumber(
+ static_cast<double>(std::numeric_limits<size_t>::max()) + 10000.0);
+ {
+ SealHandleScope no_handles(isolate);
+ CHECK(!TryNumberToSize(*heap_number2, &result));
+ }
+ }
+}
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index 24c84c3df8..83c4e33c45 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -31,12 +31,13 @@
#include "include/v8-profiler.h"
#include "src/base/platform/platform.h"
-#include "src/base/smart-pointers.h"
#include "src/deoptimizer.h"
#include "src/profiler/cpu-profiler-inl.h"
+#include "src/profiler/profiler-listener.h"
#include "src/utils.h"
#include "test/cctest/cctest.h"
#include "test/cctest/profiler-extension.h"
+
using i::CodeEntry;
using i::CpuProfile;
using i::CpuProfiler;
@@ -45,10 +46,9 @@ using i::Heap;
using i::ProfileGenerator;
using i::ProfileNode;
using i::ProfilerEventsProcessor;
+using i::ProfilerListener;
using i::ScopedVector;
using i::Vector;
-using v8::base::SmartPointer;
-
// Helper methods
static v8::Local<v8::Function> GetFunction(v8::Local<v8::Context> env,
@@ -57,35 +57,31 @@ static v8::Local<v8::Function> GetFunction(v8::Local<v8::Context> env,
env->Global()->Get(env, v8_str(name)).ToLocalChecked());
}
-
static size_t offset(const char* src, const char* substring) {
const char* it = strstr(src, substring);
CHECK(it);
return static_cast<size_t>(it - src);
}
-
-static const char* reason(const i::Deoptimizer::DeoptReason reason) {
- return i::Deoptimizer::GetDeoptReason(reason);
+static const char* reason(const i::DeoptimizeReason reason) {
+ return i::DeoptimizeReasonToString(reason);
}
-
TEST(StartStop) {
- i::Isolate* isolate = CcTest::i_isolate();
- CpuProfilesCollection profiles(isolate->heap());
+ CpuProfilesCollection profiles(CcTest::i_isolate());
ProfileGenerator generator(&profiles);
- SmartPointer<ProfilerEventsProcessor> processor(new ProfilerEventsProcessor(
- &generator, NULL, v8::base::TimeDelta::FromMicroseconds(100)));
+ std::unique_ptr<ProfilerEventsProcessor> processor(
+ new ProfilerEventsProcessor(CcTest::i_isolate(), &generator,
+ v8::base::TimeDelta::FromMicroseconds(100)));
processor->Start();
processor->StopSynchronously();
}
-
static void EnqueueTickSampleEvent(ProfilerEventsProcessor* proc,
i::Address frame1,
i::Address frame2 = NULL,
i::Address frame3 = NULL) {
- i::TickSample* sample = proc->StartTickSample();
+ v8::TickSample* sample = proc->StartTickSample();
sample->pc = frame1;
sample->tos = frame1;
sample->frames_count = 0;
@@ -157,46 +153,56 @@ TEST(CodeEvents) {
i::AbstractCode* args3_code = CreateCode(&env);
i::AbstractCode* args4_code = CreateCode(&env);
- CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate->heap());
+ CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
+ ProfileGenerator* generator = new ProfileGenerator(profiles);
+ ProfilerEventsProcessor* processor =
+ new ProfilerEventsProcessor(CcTest::i_isolate(), generator,
+ v8::base::TimeDelta::FromMicroseconds(100));
+ CpuProfiler profiler(isolate, profiles, generator, processor);
profiles->StartProfiling("", false);
- ProfileGenerator generator(profiles);
- SmartPointer<ProfilerEventsProcessor> processor(new ProfilerEventsProcessor(
- &generator, NULL, v8::base::TimeDelta::FromMicroseconds(100)));
processor->Start();
- CpuProfiler profiler(isolate, profiles, &generator, processor.get());
+ ProfilerListener profiler_listener(isolate);
+ isolate->code_event_dispatcher()->AddListener(&profiler_listener);
+ profiler_listener.AddObserver(&profiler);
// Enqueue code creation events.
const char* aaa_str = "aaa";
i::Handle<i::String> aaa_name = factory->NewStringFromAsciiChecked(aaa_str);
- profiler.CodeCreateEvent(i::Logger::FUNCTION_TAG, aaa_code, *aaa_name);
- profiler.CodeCreateEvent(i::Logger::BUILTIN_TAG, comment_code, "comment");
- profiler.CodeCreateEvent(i::Logger::STUB_TAG, args5_code, 5);
- profiler.CodeCreateEvent(i::Logger::BUILTIN_TAG, comment2_code, "comment2");
- profiler.CodeMoveEvent(comment2_code, moved_code->address());
- profiler.CodeCreateEvent(i::Logger::STUB_TAG, args3_code, 3);
- profiler.CodeCreateEvent(i::Logger::STUB_TAG, args4_code, 4);
+ profiler_listener.CodeCreateEvent(i::Logger::FUNCTION_TAG, aaa_code,
+ *aaa_name);
+ profiler_listener.CodeCreateEvent(i::Logger::BUILTIN_TAG, comment_code,
+ "comment");
+ profiler_listener.CodeCreateEvent(i::Logger::STUB_TAG, args5_code, 5);
+ profiler_listener.CodeCreateEvent(i::Logger::BUILTIN_TAG, comment2_code,
+ "comment2");
+ profiler_listener.CodeMoveEvent(comment2_code, moved_code->address());
+ profiler_listener.CodeCreateEvent(i::Logger::STUB_TAG, args3_code, 3);
+ profiler_listener.CodeCreateEvent(i::Logger::STUB_TAG, args4_code, 4);
// Enqueue a tick event to enable code events processing.
- EnqueueTickSampleEvent(processor.get(), aaa_code->address());
+ EnqueueTickSampleEvent(processor, aaa_code->address());
+ profiler_listener.RemoveObserver(&profiler);
+ isolate->code_event_dispatcher()->RemoveListener(&profiler_listener);
processor->StopSynchronously();
// Check the state of profile generator.
- CodeEntry* aaa = generator.code_map()->FindEntry(aaa_code->address());
+ CodeEntry* aaa = generator->code_map()->FindEntry(aaa_code->address());
CHECK(aaa);
CHECK_EQ(0, strcmp(aaa_str, aaa->name()));
- CodeEntry* comment = generator.code_map()->FindEntry(comment_code->address());
+ CodeEntry* comment =
+ generator->code_map()->FindEntry(comment_code->address());
CHECK(comment);
CHECK_EQ(0, strcmp("comment", comment->name()));
- CodeEntry* args5 = generator.code_map()->FindEntry(args5_code->address());
+ CodeEntry* args5 = generator->code_map()->FindEntry(args5_code->address());
CHECK(args5);
CHECK_EQ(0, strcmp("5", args5->name()));
- CHECK(!generator.code_map()->FindEntry(comment2_code->address()));
+ CHECK(!generator->code_map()->FindEntry(comment2_code->address()));
- CodeEntry* comment2 = generator.code_map()->FindEntry(moved_code->address());
+ CodeEntry* comment2 = generator->code_map()->FindEntry(moved_code->address());
CHECK(comment2);
CHECK_EQ(0, strcmp("comment2", comment2->name()));
}
@@ -216,29 +222,33 @@ TEST(TickEvents) {
i::AbstractCode* frame2_code = CreateCode(&env);
i::AbstractCode* frame3_code = CreateCode(&env);
- CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate->heap());
+ CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
+ ProfileGenerator* generator = new ProfileGenerator(profiles);
+ ProfilerEventsProcessor* processor =
+ new ProfilerEventsProcessor(CcTest::i_isolate(), generator,
+ v8::base::TimeDelta::FromMicroseconds(100));
+ CpuProfiler profiler(isolate, profiles, generator, processor);
profiles->StartProfiling("", false);
- ProfileGenerator generator(profiles);
- SmartPointer<ProfilerEventsProcessor> processor(new ProfilerEventsProcessor(
- &generator, NULL, v8::base::TimeDelta::FromMicroseconds(100)));
processor->Start();
- CpuProfiler profiler(isolate, profiles, &generator, processor.get());
+ ProfilerListener profiler_listener(isolate);
+ isolate->code_event_dispatcher()->AddListener(&profiler_listener);
+ profiler_listener.AddObserver(&profiler);
- profiler.CodeCreateEvent(i::Logger::BUILTIN_TAG, frame1_code, "bbb");
- profiler.CodeCreateEvent(i::Logger::STUB_TAG, frame2_code, 5);
- profiler.CodeCreateEvent(i::Logger::BUILTIN_TAG, frame3_code, "ddd");
+ profiler_listener.CodeCreateEvent(i::Logger::BUILTIN_TAG, frame1_code, "bbb");
+ profiler_listener.CodeCreateEvent(i::Logger::STUB_TAG, frame2_code, 5);
+ profiler_listener.CodeCreateEvent(i::Logger::BUILTIN_TAG, frame3_code, "ddd");
- EnqueueTickSampleEvent(processor.get(), frame1_code->instruction_start());
+ EnqueueTickSampleEvent(processor, frame1_code->instruction_start());
EnqueueTickSampleEvent(
- processor.get(),
+ processor,
frame2_code->instruction_start() + frame2_code->ExecutableSize() / 2,
- frame1_code->instruction_start() + frame2_code->ExecutableSize() / 2);
- EnqueueTickSampleEvent(
- processor.get(),
- frame3_code->instruction_end() - 1,
- frame2_code->instruction_end() - 1,
- frame1_code->instruction_end() - 1);
+ frame1_code->instruction_start() + frame1_code->ExecutableSize() / 2);
+ EnqueueTickSampleEvent(processor, frame3_code->instruction_end() - 1,
+ frame2_code->instruction_end() - 1,
+ frame1_code->instruction_end() - 1);
+ profiler_listener.RemoveObserver(&profiler);
+ isolate->code_event_dispatcher()->RemoveListener(&profiler_listener);
processor->StopSynchronously();
CpuProfile* profile = profiles->StopProfiling("");
CHECK(profile);
@@ -259,6 +269,8 @@ TEST(TickEvents) {
const i::List<ProfileNode*>* top_down_ddd_children =
top_down_stub_children->last()->children();
CHECK_EQ(0, top_down_ddd_children->length());
+
+ isolate->code_event_dispatcher()->RemoveListener(&profiler_listener);
}
// http://crbug/51594
@@ -266,7 +278,7 @@ TEST(TickEvents) {
TEST(CrashIfStoppingLastNonExistentProfile) {
CcTest::InitializeVM();
TestSetup test_setup;
- CpuProfiler* profiler = CcTest::i_isolate()->cpu_profiler();
+ std::unique_ptr<CpuProfiler> profiler(new CpuProfiler(CcTest::i_isolate()));
profiler->StartProfiling("1");
profiler->StopProfiling("2");
profiler->StartProfiling("1");
@@ -283,25 +295,31 @@ TEST(Issue1398) {
i::AbstractCode* code = CreateCode(&env);
- CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate->heap());
+ CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
+ ProfileGenerator* generator = new ProfileGenerator(profiles);
+ ProfilerEventsProcessor* processor =
+ new ProfilerEventsProcessor(CcTest::i_isolate(), generator,
+ v8::base::TimeDelta::FromMicroseconds(100));
+ CpuProfiler profiler(isolate, profiles, generator, processor);
profiles->StartProfiling("", false);
- ProfileGenerator generator(profiles);
- SmartPointer<ProfilerEventsProcessor> processor(new ProfilerEventsProcessor(
- &generator, NULL, v8::base::TimeDelta::FromMicroseconds(100)));
processor->Start();
- CpuProfiler profiler(isolate, profiles, &generator, processor.get());
+ ProfilerListener profiler_listener(isolate);
+ isolate->code_event_dispatcher()->AddListener(&profiler_listener);
+ profiler_listener.AddObserver(&profiler);
- profiler.CodeCreateEvent(i::Logger::BUILTIN_TAG, code, "bbb");
+ profiler_listener.CodeCreateEvent(i::Logger::BUILTIN_TAG, code, "bbb");
- i::TickSample* sample = processor->StartTickSample();
+ v8::TickSample* sample = processor->StartTickSample();
sample->pc = code->address();
sample->tos = 0;
- sample->frames_count = i::TickSample::kMaxFramesCount;
+ sample->frames_count = v8::TickSample::kMaxFramesCount;
for (unsigned i = 0; i < sample->frames_count; ++i) {
sample->stack[i] = code->address();
}
processor->FinishTickSample();
+ profiler_listener.RemoveObserver(&profiler);
+ isolate->code_event_dispatcher()->RemoveListener(&profiler_listener);
processor->StopSynchronously();
CpuProfile* profile = profiles->StopProfiling("");
CHECK(profile);
@@ -313,13 +331,13 @@ TEST(Issue1398) {
++actual_depth;
}
- CHECK_EQ(1 + i::TickSample::kMaxFramesCount, actual_depth); // +1 for PC.
+ CHECK_EQ(1 + v8::TickSample::kMaxFramesCount, actual_depth); // +1 for PC.
}
TEST(DeleteAllCpuProfiles) {
CcTest::InitializeVM();
TestSetup test_setup;
- CpuProfiler* profiler = CcTest::i_isolate()->cpu_profiler();
+ std::unique_ptr<CpuProfiler> profiler(new CpuProfiler(CcTest::i_isolate()));
CHECK_EQ(0, profiler->GetProfilesCount());
profiler->DeleteAllProfiles();
CHECK_EQ(0, profiler->GetProfilesCount());
@@ -363,7 +381,7 @@ static bool FindCpuProfile(v8::CpuProfiler* v8profiler,
TEST(DeleteCpuProfile) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
+ v8::CpuProfiler* cpu_profiler = v8::CpuProfiler::New(env->GetIsolate());
i::CpuProfiler* iprofiler = reinterpret_cast<i::CpuProfiler*>(cpu_profiler);
CHECK_EQ(0, iprofiler->GetProfilesCount());
@@ -396,41 +414,67 @@ TEST(DeleteCpuProfile) {
CHECK(FindCpuProfile(cpu_profiler, p3));
p3->Delete();
CHECK_EQ(0, iprofiler->GetProfilesCount());
+ cpu_profiler->Dispose();
}
TEST(ProfileStartEndTime) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
+ v8::CpuProfiler* cpu_profiler = v8::CpuProfiler::New(env->GetIsolate());
v8::Local<v8::String> profile_name = v8_str("test");
cpu_profiler->StartProfiling(profile_name);
const v8::CpuProfile* profile = cpu_profiler->StopProfiling(profile_name);
CHECK(profile->GetStartTime() <= profile->GetEndTime());
+ cpu_profiler->Dispose();
}
-static v8::CpuProfile* RunProfiler(v8::Local<v8::Context> env,
- v8::Local<v8::Function> function,
- v8::Local<v8::Value> argv[], int argc,
- unsigned min_js_samples = 0,
- unsigned min_external_samples = 0,
- bool collect_samples = false) {
- v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
+class ProfilerHelper {
+ public:
+ explicit ProfilerHelper(const v8::Local<v8::Context>& context)
+ : context_(context),
+ profiler_(v8::CpuProfiler::New(context->GetIsolate())) {
+ i::ProfilerExtension::set_profiler(profiler_);
+ }
+ ~ProfilerHelper() {
+ i::ProfilerExtension::set_profiler(static_cast<CpuProfiler*>(nullptr));
+ profiler_->Dispose();
+ }
+
+ v8::CpuProfile* Run(v8::Local<v8::Function> function,
+ v8::Local<v8::Value> argv[], int argc,
+ unsigned min_js_samples = 0,
+ unsigned min_external_samples = 0,
+ bool collect_samples = false);
+
+ v8::CpuProfiler* profiler() { return profiler_; }
+
+ private:
+ v8::Local<v8::Context> context_;
+ v8::CpuProfiler* profiler_;
+};
+
+v8::CpuProfile* ProfilerHelper::Run(v8::Local<v8::Function> function,
+ v8::Local<v8::Value> argv[], int argc,
+ unsigned min_js_samples,
+ unsigned min_external_samples,
+ bool collect_samples) {
v8::Local<v8::String> profile_name = v8_str("my_profile");
- cpu_profiler->SetSamplingInterval(100);
- cpu_profiler->StartProfiling(profile_name, collect_samples);
+ profiler_->SetSamplingInterval(100);
+ profiler_->StartProfiling(profile_name, collect_samples);
- i::Sampler* sampler =
- reinterpret_cast<i::Isolate*>(env->GetIsolate())->logger()->sampler();
+ v8::internal::CpuProfiler* iprofiler =
+ reinterpret_cast<v8::internal::CpuProfiler*>(profiler_);
+ v8::sampler::Sampler* sampler = iprofiler->processor()->sampler();
sampler->StartCountingSamples();
do {
- function->Call(env, env->Global(), argc, argv).ToLocalChecked();
+ function->Call(context_, context_->Global(), argc, argv).ToLocalChecked();
} while (sampler->js_sample_count() < min_js_samples ||
sampler->external_sample_count() < min_external_samples);
- v8::CpuProfile* profile = cpu_profiler->StopProfiling(profile_name);
+ v8::CpuProfile* profile = profiler_->StopProfiling(profile_name);
CHECK(profile);
// Dump collected profile to have a better diagnostic in case of failure.
@@ -439,7 +483,6 @@ static v8::CpuProfile* RunProfiler(v8::Local<v8::Context> env,
return profile;
}
-
static const v8::CpuProfileNode* FindChild(v8::Local<v8::Context> context,
const v8::CpuProfileNode* node,
const char* name) {
@@ -461,8 +504,7 @@ static const v8::CpuProfileNode* GetChild(v8::Local<v8::Context> context,
const v8::CpuProfileNode* result = FindChild(context, node, name);
if (!result) {
char buffer[100];
- i::SNPrintF(Vector<char>(buffer, arraysize(buffer)),
- "Failed to GetChild: %s", name);
+ i::SNPrintF(i::ArrayVector(buffer), "Failed to GetChild: %s", name);
FATAL(buffer);
}
return result;
@@ -489,10 +531,6 @@ static const ProfileNode* GetSimpleBranch(v8::Local<v8::Context> context,
return reinterpret_cast<const ProfileNode*>(node);
}
-static void CallCollectSample(const v8::FunctionCallbackInfo<v8::Value>& info) {
- info.GetIsolate()->GetCpuProfiler()->CollectSample();
-}
-
static const char* cpu_profiler_test_source =
"%NeverOptimizeFunction(loop);\n"
"%NeverOptimizeFunction(delay);\n"
@@ -555,8 +593,8 @@ TEST(CollectCpuProfile) {
int32_t profiling_interval_ms = 200;
v8::Local<v8::Value> args[] = {
v8::Integer::New(env->GetIsolate(), profiling_interval_ms)};
- v8::CpuProfile* profile =
- RunProfiler(env.local(), function, args, arraysize(args), 1000);
+ ProfilerHelper helper(env.local());
+ v8::CpuProfile* profile = helper.Run(function, args, arraysize(args), 1000);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
const v8::CpuProfileNode* start_node = GetChild(env.local(), root, "start");
@@ -612,8 +650,8 @@ TEST(HotDeoptNoFrameEntry) {
int32_t profiling_interval_ms = 200;
v8::Local<v8::Value> args[] = {
v8::Integer::New(env->GetIsolate(), profiling_interval_ms)};
- v8::CpuProfile* profile =
- RunProfiler(env.local(), function, args, arraysize(args), 1000);
+ ProfilerHelper helper(env.local());
+ v8::CpuProfile* profile = helper.Run(function, args, arraysize(args), 1000);
function->Call(env.local(), env->Global(), arraysize(args), args)
.ToLocalChecked();
@@ -635,8 +673,9 @@ TEST(CollectCpuProfileSamples) {
int32_t profiling_interval_ms = 200;
v8::Local<v8::Value> args[] = {
v8::Integer::New(env->GetIsolate(), profiling_interval_ms)};
+ ProfilerHelper helper(env.local());
v8::CpuProfile* profile =
- RunProfiler(env.local(), function, args, arraysize(args), 1000, 0, true);
+ helper.Run(function, args, arraysize(args), 1000, 0, true);
CHECK_LE(200, profile->GetSamplesCount());
uint64_t end_time = profile->GetEndTime();
@@ -688,8 +727,8 @@ TEST(SampleWhenFrameIsNotSetup) {
int32_t duration_ms = 100;
v8::Local<v8::Value> args[] = {
v8::Integer::New(env->GetIsolate(), duration_ms)};
- v8::CpuProfile* profile =
- RunProfiler(env.local(), function, args, arraysize(args), 1000);
+ ProfilerHelper helper(env.local());
+ v8::CpuProfile* profile = helper.Run(function, args, arraysize(args), 1000);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
const v8::CpuProfileNode* start_node = GetChild(env.local(), root, "start");
@@ -770,8 +809,7 @@ TEST(NativeAccessorUninitializedIC) {
func_template->InstanceTemplate();
TestApiCallbacks accessors(100);
- v8::Local<v8::External> data =
- v8::External::New(isolate, &accessors);
+ v8::Local<v8::External> data = v8::External::New(isolate, &accessors);
instance_template->SetAccessor(v8_str("foo"), &TestApiCallbacks::Getter,
&TestApiCallbacks::Setter, data);
v8::Local<v8::Function> func =
@@ -783,10 +821,10 @@ TEST(NativeAccessorUninitializedIC) {
CompileRun(native_accessor_test_source);
v8::Local<v8::Function> function = GetFunction(env.local(), "start");
+ ProfilerHelper helper(env.local());
int32_t repeat_count = 1;
v8::Local<v8::Value> args[] = {v8::Integer::New(isolate, repeat_count)};
- v8::CpuProfile* profile =
- RunProfiler(env.local(), function, args, arraysize(args), 0, 100);
+ v8::CpuProfile* profile = helper.Run(function, args, arraysize(args), 0, 100);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
const v8::CpuProfileNode* start_node = GetChild(env.local(), root, "start");
@@ -838,8 +876,8 @@ TEST(NativeAccessorMonomorphicIC) {
int32_t repeat_count = 100;
v8::Local<v8::Value> args[] = {v8::Integer::New(isolate, repeat_count)};
- v8::CpuProfile* profile =
- RunProfiler(env.local(), function, args, arraysize(args), 0, 100);
+ ProfilerHelper helper(env.local());
+ v8::CpuProfile* profile = helper.Run(function, args, arraysize(args), 0, 100);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
const v8::CpuProfileNode* start_node = GetChild(env.local(), root, "start");
@@ -863,12 +901,11 @@ TEST(NativeMethodUninitializedIC) {
v8::HandleScope scope(isolate);
TestApiCallbacks callbacks(100);
- v8::Local<v8::External> data =
- v8::External::New(isolate, &callbacks);
+ v8::Local<v8::External> data = v8::External::New(isolate, &callbacks);
v8::Local<v8::FunctionTemplate> func_template =
v8::FunctionTemplate::New(isolate);
- func_template->SetClassName(v8_str("Test_InstanceCostructor"));
+ func_template->SetClassName(v8_str("Test_InstanceConstructor"));
v8::Local<v8::ObjectTemplate> proto_template =
func_template->PrototypeTemplate();
v8::Local<v8::Signature> signature =
@@ -887,10 +924,10 @@ TEST(NativeMethodUninitializedIC) {
CompileRun(native_method_test_source);
v8::Local<v8::Function> function = GetFunction(env.local(), "start");
+ ProfilerHelper helper(env.local());
int32_t repeat_count = 1;
v8::Local<v8::Value> args[] = {v8::Integer::New(isolate, repeat_count)};
- v8::CpuProfile* profile =
- RunProfiler(env.local(), function, args, arraysize(args), 0, 100);
+ v8::CpuProfile* profile = helper.Run(function, args, arraysize(args), 0, 100);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
const v8::CpuProfileNode* start_node = GetChild(env.local(), root, "start");
@@ -941,10 +978,10 @@ TEST(NativeMethodMonomorphicIC) {
callbacks.set_warming_up(false);
}
+ ProfilerHelper helper(env.local());
int32_t repeat_count = 100;
v8::Local<v8::Value> args[] = {v8::Integer::New(isolate, repeat_count)};
- v8::CpuProfile* profile =
- RunProfiler(env.local(), function, args, arraysize(args), 0, 200);
+ v8::CpuProfile* profile = helper.Run(function, args, arraysize(args), 0, 200);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
GetChild(env.local(), root, "start");
@@ -973,7 +1010,8 @@ TEST(BoundFunctionCall) {
CompileRun(bound_function_test_source);
v8::Local<v8::Function> function = GetFunction(env, "start");
- v8::CpuProfile* profile = RunProfiler(env, function, NULL, 0);
+ ProfilerHelper helper(env);
+ v8::CpuProfile* profile = helper.Run(function, nullptr, 0);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
@@ -1007,9 +1045,10 @@ static void TickLines(bool optimize) {
" n += m * m * m;\n"
" }\n"
"}\n"
+ "%s();"
"%s(%s);\n"
"%s();\n",
- func_name, opt_func, func_name, func_name);
+ func_name, func_name, opt_func, func_name, func_name);
CompileRun(script.start());
@@ -1017,36 +1056,44 @@ static void TickLines(bool optimize) {
v8::Utils::OpenHandle(*GetFunction(env.local(), func_name)));
CHECK(func->shared());
CHECK(func->shared()->abstract_code());
+ CHECK(!optimize || func->IsOptimized() ||
+ !CcTest::i_isolate()->use_crankshaft());
i::AbstractCode* code = func->abstract_code();
CHECK(code);
i::Address code_address = code->instruction_start();
CHECK(code_address);
- CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate->heap());
+ CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
+ ProfileGenerator* generator = new ProfileGenerator(profiles);
+ ProfilerEventsProcessor* processor =
+ new ProfilerEventsProcessor(CcTest::i_isolate(), generator,
+ v8::base::TimeDelta::FromMicroseconds(100));
+ CpuProfiler profiler(isolate, profiles, generator, processor);
profiles->StartProfiling("", false);
- ProfileGenerator generator(profiles);
- SmartPointer<ProfilerEventsProcessor> processor(new ProfilerEventsProcessor(
- &generator, NULL, v8::base::TimeDelta::FromMicroseconds(100)));
processor->Start();
- CpuProfiler profiler(isolate, profiles, &generator, processor.get());
+ ProfilerListener profiler_listener(isolate);
+ isolate->code_event_dispatcher()->AddListener(&profiler_listener);
+ profiler_listener.AddObserver(&profiler);
// Enqueue code creation events.
i::Handle<i::String> str = factory->NewStringFromAsciiChecked(func_name);
int line = 1;
int column = 1;
- profiler.CodeCreateEvent(i::Logger::FUNCTION_TAG, code, func->shared(), NULL,
- *str, line, column);
+ profiler_listener.CodeCreateEvent(i::Logger::FUNCTION_TAG, code,
+ func->shared(), *str, line, column);
// Enqueue a tick event to enable code events processing.
- EnqueueTickSampleEvent(processor.get(), code_address);
+ EnqueueTickSampleEvent(processor, code_address);
+ profiler_listener.RemoveObserver(&profiler);
+ isolate->code_event_dispatcher()->RemoveListener(&profiler_listener);
processor->StopSynchronously();
CpuProfile* profile = profiles->StopProfiling("");
CHECK(profile);
// Check the state of profile generator.
- CodeEntry* func_entry = generator.code_map()->FindEntry(code_address);
+ CodeEntry* func_entry = generator->code_map()->FindEntry(code_address);
CHECK(func_entry);
CHECK_EQ(0, strcmp(func_name, func_entry->name()));
const i::JITLineInfoTable* line_info = func_entry->line_info();
@@ -1122,18 +1169,18 @@ TEST(FunctionCallSample) {
CompileRun(call_function_test_source);
v8::Local<v8::Function> function = GetFunction(env.local(), "start");
+ ProfilerHelper helper(env.local());
int32_t duration_ms = 100;
v8::Local<v8::Value> args[] = {
v8::Integer::New(env->GetIsolate(), duration_ms)};
- v8::CpuProfile* profile =
- RunProfiler(env.local(), function, args, arraysize(args), 1000);
+ v8::CpuProfile* profile = helper.Run(function, args, arraysize(args), 1000);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
const v8::CpuProfileNode* start_node = GetChild(env.local(), root, "start");
GetChild(env.local(), start_node, "bar");
- const v8::CpuProfileNode* unresolved_node = FindChild(
- env.local(), root, i::ProfileGenerator::kUnresolvedFunctionName);
+ const v8::CpuProfileNode* unresolved_node =
+ FindChild(env.local(), root, i::CodeEntry::kUnresolvedFunctionName);
CHECK(!unresolved_node || GetChild(env.local(), unresolved_node, "call"));
profile->Delete();
@@ -1175,12 +1222,11 @@ TEST(FunctionApplySample) {
CompileRun(function_apply_test_source);
v8::Local<v8::Function> function = GetFunction(env.local(), "start");
+ ProfilerHelper helper(env.local());
int32_t duration_ms = 100;
v8::Local<v8::Value> args[] = {
v8::Integer::New(env->GetIsolate(), duration_ms)};
-
- v8::CpuProfile* profile =
- RunProfiler(env.local(), function, args, arraysize(args), 1000);
+ v8::CpuProfile* profile = helper.Run(function, args, arraysize(args), 1000);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
const v8::CpuProfileNode* start_node = GetChild(env.local(), root, "start");
@@ -1188,8 +1234,8 @@ TEST(FunctionApplySample) {
GetChild(env.local(), start_node, "test");
GetChild(env.local(), test_node, "bar");
- const v8::CpuProfileNode* unresolved_node = FindChild(
- env.local(), start_node, ProfileGenerator::kUnresolvedFunctionName);
+ const v8::CpuProfileNode* unresolved_node =
+ FindChild(env.local(), start_node, CodeEntry::kUnresolvedFunctionName);
CHECK(!unresolved_node || GetChild(env.local(), unresolved_node, "apply"));
profile->Delete();
@@ -1221,14 +1267,14 @@ TEST(CpuProfileDeepStack) {
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
v8::Context::Scope context_scope(env);
+ ProfilerHelper helper(env);
CompileRun(cpu_profiler_deep_stack_test_source);
v8::Local<v8::Function> function = GetFunction(env, "start");
- v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
v8::Local<v8::String> profile_name = v8_str("my_profile");
function->Call(env, env->Global(), 0, NULL).ToLocalChecked();
- v8::CpuProfile* profile = cpu_profiler->StopProfiling(profile_name);
+ v8::CpuProfile* profile = helper.profiler()->StopProfiling(profile_name);
CHECK(profile);
// Dump collected profile to have a better diagnostic in case of failure.
reinterpret_cast<i::CpuProfile*>(profile)->Print();
@@ -1290,7 +1336,8 @@ TEST(JsNativeJsSample) {
CompileRun(js_native_js_test_source);
v8::Local<v8::Function> function = GetFunction(env, "start");
- v8::CpuProfile* profile = RunProfiler(env, function, NULL, 0, 1000);
+ ProfilerHelper helper(env);
+ v8::CpuProfile* profile = helper.Run(function, nullptr, 0, 1000);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
const v8::CpuProfileNode* start_node = GetChild(env, root, "start");
@@ -1340,8 +1387,9 @@ TEST(JsNativeJsRuntimeJsSample) {
env->Global()->Set(env, v8_str("CallJsFunction"), func).FromJust();
CompileRun(js_native_js_runtime_js_test_source);
+ ProfilerHelper helper(env);
v8::Local<v8::Function> function = GetFunction(env, "start");
- v8::CpuProfile* profile = RunProfiler(env, function, NULL, 0, 1000);
+ v8::CpuProfile* profile = helper.Run(function, nullptr, 0, 1000);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
const v8::CpuProfileNode* start_node = GetChild(env, root, "start");
@@ -1403,9 +1451,10 @@ TEST(JsNative1JsNative2JsSample) {
env->Global()->Set(env, v8_str("CallJsFunction2"), func2).FromJust();
CompileRun(js_native1_js_native2_js_test_source);
- v8::Local<v8::Function> function = GetFunction(env, "start");
- v8::CpuProfile* profile = RunProfiler(env, function, NULL, 0, 1000);
+ ProfilerHelper helper(env);
+ v8::Local<v8::Function> function = GetFunction(env, "start");
+ v8::CpuProfile* profile = helper.Run(function, nullptr, 0, 1000);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
const v8::CpuProfileNode* start_node = GetChild(env, root, "start");
@@ -1424,6 +1473,10 @@ static const char* js_force_collect_sample_source =
" CallCollectSample();\n"
"}";
+static void CallCollectSample(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ i::ProfilerExtension::profiler()->CollectSample();
+}
+
TEST(CollectSampleAPI) {
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
@@ -1437,8 +1490,9 @@ TEST(CollectSampleAPI) {
env->Global()->Set(env, v8_str("CallCollectSample"), func).FromJust();
CompileRun(js_force_collect_sample_source);
+ ProfilerHelper helper(env);
v8::Local<v8::Function> function = GetFunction(env, "start");
- v8::CpuProfile* profile = RunProfiler(env, function, NULL, 0, 0);
+ v8::CpuProfile* profile = helper.Run(function, nullptr, 0, 0);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
const v8::CpuProfileNode* start_node = GetChild(env, root, "start");
@@ -1490,9 +1544,10 @@ TEST(JsNativeJsRuntimeJsSampleMultiple) {
env->Global()->Set(env, v8_str("CallJsFunction"), func).FromJust();
CompileRun(js_native_js_runtime_multiple_test_source);
- v8::Local<v8::Function> function = GetFunction(env, "start");
- v8::CpuProfile* profile = RunProfiler(env, function, NULL, 0, 500, 500);
+ ProfilerHelper helper(env);
+ v8::Local<v8::Function> function = GetFunction(env, "start");
+ v8::CpuProfile* profile = helper.Run(function, nullptr, 0, 500, 500);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
const v8::CpuProfileNode* start_node = GetChild(env, root, "start");
@@ -1544,14 +1599,14 @@ TEST(Inlining) {
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
v8::Context::Scope context_scope(env);
+ ProfilerHelper helper(env);
CompileRun(inlining_test_source);
v8::Local<v8::Function> function = GetFunction(env, "start");
- v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
v8::Local<v8::String> profile_name = v8_str("my_profile");
function->Call(env, env->Global(), 0, NULL).ToLocalChecked();
- v8::CpuProfile* profile = cpu_profiler->StopProfiling(profile_name);
+ v8::CpuProfile* profile = helper.profiler()->StopProfiling(profile_name);
CHECK(profile);
// Dump collected profile to have a better diagnostic in case of failure.
reinterpret_cast<i::CpuProfile*>(profile)->Print();
@@ -1573,13 +1628,14 @@ TEST(Inlining) {
TEST(IdleTime) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
+ v8::CpuProfiler* cpu_profiler = v8::CpuProfiler::New(env->GetIsolate());
v8::Local<v8::String> profile_name = v8_str("my_profile");
cpu_profiler->StartProfiling(profile_name);
i::Isolate* isolate = CcTest::i_isolate();
- i::ProfilerEventsProcessor* processor = isolate->cpu_profiler()->processor();
+ i::ProfilerEventsProcessor* processor =
+ reinterpret_cast<i::CpuProfiler*>(cpu_profiler)->processor();
processor->AddCurrentStack(isolate, true);
cpu_profiler->SetIdle(true);
@@ -1596,16 +1652,17 @@ TEST(IdleTime) {
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
const v8::CpuProfileNode* program_node =
- GetChild(env.local(), root, ProfileGenerator::kProgramEntryName);
+ GetChild(env.local(), root, CodeEntry::kProgramEntryName);
CHECK_EQ(0, program_node->GetChildrenCount());
CHECK_GE(program_node->GetHitCount(), 2u);
const v8::CpuProfileNode* idle_node =
- GetChild(env.local(), root, ProfileGenerator::kIdleEntryName);
+ GetChild(env.local(), root, CodeEntry::kIdleEntryName);
CHECK_EQ(0, idle_node->GetChildrenCount());
CHECK_GE(idle_node->GetHitCount(), 3u);
profile->Delete();
+ cpu_profiler->Dispose();
}
static void CheckFunctionDetails(v8::Isolate* isolate,
@@ -1628,6 +1685,7 @@ TEST(FunctionDetails) {
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
v8::Context::Scope context_scope(env);
+ ProfilerHelper helper(env);
v8::Local<v8::Script> script_a = CompileWithOrigin(
"%NeverOptimizeFunction(foo);\n"
@@ -1674,7 +1732,7 @@ TEST(DontStopOnFinishedProfileDelete) {
v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
v8::Context::Scope context_scope(env);
- v8::CpuProfiler* profiler = env->GetIsolate()->GetCpuProfiler();
+ v8::CpuProfiler* profiler = v8::CpuProfiler::New(env->GetIsolate());
i::CpuProfiler* iprofiler = reinterpret_cast<i::CpuProfiler*>(profiler);
CHECK_EQ(0, iprofiler->GetProfilesCount());
@@ -1699,6 +1757,7 @@ TEST(DontStopOnFinishedProfileDelete) {
outer_profile->Delete();
outer_profile = NULL;
CHECK_EQ(0, iprofiler->GetProfilesCount());
+ profiler->Dispose();
}
@@ -1720,9 +1779,9 @@ TEST(CollectDeoptEvents) {
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
v8::Context::Scope context_scope(env);
- v8::Isolate* isolate = env->GetIsolate();
- v8::CpuProfiler* profiler = isolate->GetCpuProfiler();
- i::CpuProfiler* iprofiler = reinterpret_cast<i::CpuProfiler*>(profiler);
+ ProfilerHelper helper(env);
+ i::CpuProfiler* iprofiler =
+ reinterpret_cast<i::CpuProfiler*>(helper.profiler());
const char opt_source[] =
"function opt_function%d(value, depth) {\n"
@@ -1797,21 +1856,21 @@ TEST(CollectDeoptEvents) {
{
const char* branch[] = {"", "opt_function0", "opt_function0"};
- CHECK_EQ(reason(i::Deoptimizer::kNotAHeapNumber),
+ CHECK_EQ(reason(i::DeoptimizeReason::kNotAHeapNumber),
GetBranchDeoptReason(env, iprofile, branch, arraysize(branch)));
}
{
const char* branch[] = {"", "opt_function1", "opt_function1"};
const char* deopt_reason =
GetBranchDeoptReason(env, iprofile, branch, arraysize(branch));
- if (deopt_reason != reason(i::Deoptimizer::kNaN) &&
- deopt_reason != reason(i::Deoptimizer::kLostPrecisionOrNaN)) {
+ if (deopt_reason != reason(i::DeoptimizeReason::kNaN) &&
+ deopt_reason != reason(i::DeoptimizeReason::kLostPrecisionOrNaN)) {
FATAL(deopt_reason);
}
}
{
const char* branch[] = {"", "opt_function2", "opt_function2"};
- CHECK_EQ(reason(i::Deoptimizer::kDivisionByZero),
+ CHECK_EQ(reason(i::DeoptimizeReason::kDivisionByZero),
GetBranchDeoptReason(env, iprofile, branch, arraysize(branch)));
}
iprofiler->DeleteProfile(iprofile);
@@ -1850,9 +1909,9 @@ TEST(DeoptAtFirstLevelInlinedSource) {
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
v8::Context::Scope context_scope(env);
- v8::Isolate* isolate = env->GetIsolate();
- v8::CpuProfiler* profiler = isolate->GetCpuProfiler();
- i::CpuProfiler* iprofiler = reinterpret_cast<i::CpuProfiler*>(profiler);
+ ProfilerHelper helper(env);
+ i::CpuProfiler* iprofiler =
+ reinterpret_cast<i::CpuProfiler*>(helper.profiler());
// 0.........1.........2.........3.........4.........5.........6.........7
const char* source =
@@ -1901,7 +1960,7 @@ TEST(DeoptAtFirstLevelInlinedSource) {
CHECK_EQ(1U, deopt_infos.size());
const v8::CpuProfileDeoptInfo& info = deopt_infos[0];
- CHECK_EQ(reason(i::Deoptimizer::kNotAHeapNumber), info.deopt_reason);
+ CHECK_EQ(reason(i::DeoptimizeReason::kNotAHeapNumber), info.deopt_reason);
CHECK_EQ(2U, info.stack.size());
CHECK_EQ(inlined_script_id, info.stack[0].script_id);
CHECK_EQ(offset(inlined_source, "left /"), info.stack[0].position);
@@ -1919,9 +1978,9 @@ TEST(DeoptAtSecondLevelInlinedSource) {
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
v8::Context::Scope context_scope(env);
- v8::Isolate* isolate = env->GetIsolate();
- v8::CpuProfiler* profiler = isolate->GetCpuProfiler();
- i::CpuProfiler* iprofiler = reinterpret_cast<i::CpuProfiler*>(profiler);
+ ProfilerHelper helper(env);
+ i::CpuProfiler* iprofiler =
+ reinterpret_cast<i::CpuProfiler*>(helper.profiler());
// 0.........1.........2.........3.........4.........5.........6.........7
const char* source =
@@ -1974,7 +2033,7 @@ TEST(DeoptAtSecondLevelInlinedSource) {
CHECK_EQ(1U, deopt_infos.size());
const v8::CpuProfileDeoptInfo info = deopt_infos[0];
- CHECK_EQ(reason(i::Deoptimizer::kNotAHeapNumber), info.deopt_reason);
+ CHECK_EQ(reason(i::DeoptimizeReason::kNotAHeapNumber), info.deopt_reason);
CHECK_EQ(3U, info.stack.size());
CHECK_EQ(inlined_script_id, info.stack[0].script_id);
CHECK_EQ(offset(inlined_source, "left /"), info.stack[0].position);
@@ -1993,9 +2052,9 @@ TEST(DeoptUntrackedFunction) {
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
v8::Context::Scope context_scope(env);
- v8::Isolate* isolate = env->GetIsolate();
- v8::CpuProfiler* profiler = isolate->GetCpuProfiler();
- i::CpuProfiler* iprofiler = reinterpret_cast<i::CpuProfiler*>(profiler);
+ ProfilerHelper helper(env);
+ i::CpuProfiler* iprofiler =
+ reinterpret_cast<i::CpuProfiler*>(helper.profiler());
// 0.........1.........2.........3.........4.........5.........6.........7
const char* source =
diff --git a/deps/v8/test/cctest/test-date.cc b/deps/v8/test/cctest/test-date.cc
index 67b9501592..a745949fbd 100644
--- a/deps/v8/test/cctest/test-date.cc
+++ b/deps/v8/test/cctest/test-date.cc
@@ -166,3 +166,52 @@ TEST(DaylightSavingsTime) {
CheckDST(august_20 + 2 * 3600 - 1000);
CheckDST(august_20);
}
+
+namespace {
+int legacy_parse_count = 0;
+void DateParseLegacyCounterCallback(v8::Isolate* isolate,
+ v8::Isolate::UseCounterFeature feature) {
+ if (feature == v8::Isolate::kLegacyDateParser) legacy_parse_count++;
+}
+} // anonymous namespace
+
+TEST(DateParseLegacyUseCounter) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ LocalContext context;
+ CcTest::isolate()->SetUseCounterCallback(DateParseLegacyCounterCallback);
+ CHECK_EQ(0, legacy_parse_count);
+ CompileRun("Date.parse('2015-02-31')");
+ CHECK_EQ(0, legacy_parse_count);
+ CompileRun("Date.parse('2015-02-31T11:22:33.444Z01:23')");
+ CHECK_EQ(0, legacy_parse_count);
+ CompileRun("Date.parse('2015-02-31T11:22:33.444')");
+ CHECK_EQ(0, legacy_parse_count);
+ CompileRun("Date.parse('2000 01 01')");
+ CHECK_EQ(1, legacy_parse_count);
+ CompileRun("Date.parse('2015-02-31T11:22:33.444 ')");
+ CHECK_EQ(1, legacy_parse_count);
+}
+
+#ifdef V8_I18N_SUPPORT
+TEST(DateCacheVersion) {
+ FLAG_allow_natives_syntax = true;
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+ v8::Local<v8::Number> date_cache_version =
+ v8::Local<v8::Number>::Cast(CompileRun("%DateCacheVersion()"));
+
+ CHECK(date_cache_version->IsNumber());
+ CHECK_EQ(0.0, date_cache_version->NumberValue(context).FromMaybe(-1.0));
+
+ v8::Date::DateTimeConfigurationChangeNotification(isolate);
+
+ date_cache_version =
+ v8::Local<v8::Number>::Cast(CompileRun("%DateCacheVersion()"));
+ CHECK(date_cache_version->IsNumber());
+ CHECK_EQ(1.0, date_cache_version->NumberValue(context).FromMaybe(-1.0));
+}
+#endif // V8_I18N_SUPPORT
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 192dce7b9f..925eaf4c27 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -157,29 +157,23 @@ static bool HasDebugInfo(v8::Local<v8::Function> fun) {
return shared->HasDebugInfo();
}
-
-// Set a break point in a function and return the associated break point
-// number.
-static int SetBreakPoint(Handle<v8::internal::JSFunction> fun, int position) {
+// Set a break point in a function with a position relative to function start,
+// and return the associated break point number.
+static int SetBreakPoint(v8::Local<v8::Function> fun, int position) {
+ i::Handle<i::JSFunction> function =
+ i::Handle<i::JSFunction>::cast(v8::Utils::OpenHandle(*fun));
+ position += function->shared()->start_position();
static int break_point = 0;
- v8::internal::Isolate* isolate = fun->GetIsolate();
+ v8::internal::Isolate* isolate = function->GetIsolate();
v8::internal::Debug* debug = isolate->debug();
debug->SetBreakPoint(
- fun,
+ function,
Handle<Object>(v8::internal::Smi::FromInt(++break_point), isolate),
&position);
return break_point;
}
-// Set a break point in a function and return the associated break point
-// number.
-static int SetBreakPoint(v8::Local<v8::Function> fun, int position) {
- return SetBreakPoint(
- i::Handle<i::JSFunction>::cast(v8::Utils::OpenHandle(*fun)), position);
-}
-
-
// Set a break point in a function using the Debug object and return the
// associated break point number.
static int SetBreakPointFromJS(v8::Isolate* isolate,
@@ -6081,53 +6075,48 @@ class EmptyExternalStringResource : public v8::String::ExternalStringResource {
::v8::internal::EmbeddedVector<uint16_t, 1> empty_;
};
-
-TEST(DebugGetLoadedScripts) {
+TEST(DebugScriptLineEndsAreAscending) {
DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
env.ExposeDebug();
- v8::Local<v8::Context> context = env.context();
- EmptyExternalStringResource source_ext_str;
- v8::Local<v8::String> source =
- v8::String::NewExternalTwoByte(env->GetIsolate(), &source_ext_str)
- .ToLocalChecked();
- CHECK(v8::Script::Compile(context, source).IsEmpty());
- Handle<i::ExternalTwoByteString> i_source(
- i::ExternalTwoByteString::cast(*v8::Utils::OpenHandle(*source)));
- // This situation can happen if source was an external string disposed
- // by its owner.
- i_source->set_resource(0);
-
- bool allow_natives_syntax = i::FLAG_allow_natives_syntax;
- i::FLAG_allow_natives_syntax = true;
- EnableDebugger(env->GetIsolate());
- v8::MaybeLocal<v8::Value> result =
- CompileRun(env.context(),
- "var scripts = %DebugGetLoadedScripts();"
- "var count = scripts.length;"
- "for (var i = 0; i < count; ++i) {"
- " var lines = scripts[i].lineCount();"
- " if (lines < 1) throw 'lineCount';"
- " var last = -1;"
- " for (var j = 0; j < lines; ++j) {"
- " var end = scripts[i].lineEnd(j);"
- " if (last >= end) throw 'lineEnd';"
- " last = end;"
- " }"
- "}");
- CHECK(!result.IsEmpty());
- DisableDebugger(env->GetIsolate());
- // Must not crash while accessing line_ends.
- i::FLAG_allow_natives_syntax = allow_natives_syntax;
+ // Compile a test script.
+ v8::Local<v8::String> script = v8_str(isolate,
+ "function f() {\n"
+ " debugger;\n"
+ "}\n");
- // Some scripts are retrieved - at least the number of native scripts.
- CHECK_GT(env->Global()
- ->Get(context, v8_str(env->GetIsolate(), "count"))
- .ToLocalChecked()
- ->Int32Value(context)
- .FromJust(),
- 8);
+ v8::ScriptOrigin origin1 = v8::ScriptOrigin(v8_str(isolate, "name"));
+ v8::Local<v8::Script> script1 =
+ v8::Script::Compile(env.context(), script, &origin1).ToLocalChecked();
+ USE(script1);
+
+ Handle<v8::internal::FixedArray> instances;
+ {
+ v8::internal::Debug* debug = CcTest::i_isolate()->debug();
+ v8::internal::DebugScope debug_scope(debug);
+ CHECK(!debug_scope.failed());
+ instances = debug->GetLoadedScripts();
+ }
+
+ CHECK_GT(instances->length(), 0);
+ for (int i = 0; i < instances->length(); i++) {
+ Handle<v8::internal::Script> script = Handle<v8::internal::Script>(
+ v8::internal::Script::cast(instances->get(i)));
+
+ v8::internal::Script::InitLineEnds(script);
+ v8::internal::FixedArray* ends =
+ v8::internal::FixedArray::cast(script->line_ends());
+ CHECK_GT(ends->length(), 0);
+
+ int prev_end = -1;
+ for (int j = 0; j < ends->length(); j++) {
+ const int curr_end = v8::internal::Smi::cast(ends->get(j))->value();
+ CHECK_GT(curr_end, prev_end);
+ prev_end = curr_end;
+ }
+ }
}
@@ -7185,6 +7174,40 @@ TEST(NoDebugContextWhenDebuggerDisabled) {
CHECK(context.IsEmpty());
}
+static void DebugEventCheckContext(
+ const v8::Debug::EventDetails& event_details) {
+ if (event_details.GetEvent() == v8::Break) {
+ v8::Isolate* isolate = event_details.GetIsolate();
+ CHECK(v8::Debug::GetDebuggedContext(isolate)
+ .ToLocalChecked()
+ ->Global()
+ ->Equals(isolate->GetCurrentContext(),
+ event_details.GetEventContext()->Global())
+ .FromJust());
+ }
+}
+
+static void CheckContext(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ CHECK(v8::Debug::GetDebuggedContext(args.GetIsolate()).IsEmpty());
+}
+
+TEST(DebuggedContext) {
+ DebugLocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+
+ v8::Debug::SetDebugEventListener(isolate, DebugEventCheckContext);
+
+ v8::Local<v8::Function> foo =
+ CompileFunction(&env, "function foo(){bar=0;}", "foo");
+
+ SetBreakPoint(foo, 0);
+ foo->Call(env.context(), env->Global(), 0, nullptr).ToLocalChecked();
+
+ v8::Local<v8::Function> fun = v8::FunctionTemplate::New(isolate, CheckContext)
+ ->GetFunction(env.context())
+ .ToLocalChecked();
+ fun->Call(env.context(), env->Global(), 0, nullptr).ToLocalChecked();
+}
static v8::Local<v8::Value> expected_callback_data;
static void DebugEventContextChecker(const v8::Debug::EventDetails& details) {
@@ -7555,9 +7578,9 @@ static void DebugBreakInlineListener(
CHECK_EQ(expected_frame_count, frame_count);
for (int i = 0; i < frame_count; i++) {
- // The 5. element in the returned array of GetFrameDetails contains the
+ // The 6. element in the returned array of GetFrameDetails contains the
// source position of that frame.
- SNPrintF(script_vector, "%%GetFrameDetails(%d, %d)[5]", break_id, i);
+ SNPrintF(script_vector, "%%GetFrameDetails(%d, %d)[6]", break_id, i);
v8::Local<v8::Value> result = CompileRun(script);
CHECK_EQ(expected_line_number[i],
i::Script::GetLineNumber(source_script,
@@ -7834,8 +7857,8 @@ TEST(DebugPromiseInterceptedByTryCatch) {
CompileRun("var p = new Promise(function(res, rej) { fun(); res(); });");
CompileRun(
"var r;"
- "p.chain(function() { r = 'resolved'; },"
- " function() { r = 'rejected'; });");
+ "p.then(function() { r = 'resolved'; },"
+ " function() { r = 'rejected'; });");
CHECK(CompileRun("r")->Equals(context, v8_str("resolved")).FromJust());
}
@@ -7874,45 +7897,14 @@ TEST(DebugPromiseRejectedByCallback) {
CompileRun("var p = new Promise(function(res, rej) { fun(); res(); });");
CompileRun(
"var r;"
- "p.chain(function() { r = 'resolved'; },"
- " function(e) { r = 'rejected' + e; });");
+ "p.then(function() { r = 'resolved'; },"
+ " function(e) { r = 'rejected' + e; });");
CHECK(
CompileRun("r")->Equals(context, v8_str("rejectedrejection")).FromJust());
CHECK_EQ(1, exception_event_counter);
}
-TEST(DebugBreakOnExceptionInObserveCallback) {
- i::FLAG_harmony_object_observe = true;
- DebugLocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope scope(isolate);
- v8::Debug::SetDebugEventListener(isolate, &DebugEventCountException);
- v8::Local<v8::Context> context = env.context();
- // Break on uncaught exception
- ChangeBreakOnException(false, true);
- exception_event_counter = 0;
-
- v8::Local<v8::FunctionTemplate> fun =
- v8::FunctionTemplate::New(isolate, ThrowCallback);
- CHECK(env->Global()
- ->Set(context, v8_str("fun"),
- fun->GetFunction(context).ToLocalChecked())
- .FromJust());
-
- CompileRun(
- "var obj = {};"
- "var callbackRan = false;"
- "Object.observe(obj, function() {"
- " callbackRan = true;"
- " throw Error('foo');"
- "});"
- "obj.prop = 1");
- CHECK(CompileRun("callbackRan")->BooleanValue(context).FromJust());
- CHECK_EQ(1, exception_event_counter);
-}
-
-
static void DebugHarmonyScopingListener(
const v8::Debug::EventDetails& event_details) {
v8::DebugEvent event = event_details.GetEvent();
@@ -7986,19 +7978,12 @@ static void NoInterruptsOnDebugEvent(
--after_compile_handler_depth;
}
-
TEST(NoInterruptsInDebugListener) {
DebugLocalContext env;
v8::Debug::SetDebugEventListener(env->GetIsolate(), NoInterruptsOnDebugEvent);
CompileRun("void(0);");
}
-class TestBreakLocation : public i::BreakLocation {
- public:
- using i::BreakLocation::GetIterator;
- using i::BreakLocation::Iterator;
-};
-
TEST(BreakLocationIterator) {
DebugLocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -8020,53 +8005,40 @@ TEST(BreakLocationIterator) {
CHECK(i_isolate->debug()->EnsureDebugInfo(shared, function));
Handle<i::DebugInfo> debug_info(shared->GetDebugInfo());
- int code_size = debug_info->abstract_code()->Size();
-
- bool found_return = false;
- bool found_call = false;
- bool found_debugger = false;
-
- // Test public interface.
- for (int i = 0; i < code_size; i++) {
- i::BreakLocation location = i::BreakLocation::FromCodeOffset(debug_info, i);
- if (location.IsCall()) found_call = true;
- if (location.IsReturn()) found_return = true;
- if (location.IsDebuggerStatement()) found_debugger = true;
+ Handle<i::AbstractCode> abstract_code(shared->abstract_code());
+
+ {
+ auto iterator = i::BreakIterator::GetIterator(debug_info, abstract_code,
+ i::ALL_BREAK_LOCATIONS);
+ CHECK(iterator->GetBreakLocation().IsDebuggerStatement());
+ CHECK_EQ(17, iterator->GetBreakLocation().position());
+ iterator->Next();
+ CHECK(iterator->GetBreakLocation().IsDebugBreakSlot());
+ CHECK_EQ(32, iterator->GetBreakLocation().position());
+ iterator->Next();
+ CHECK(iterator->GetBreakLocation().IsCall());
+ CHECK_EQ(32, iterator->GetBreakLocation().position());
+ iterator->Next();
+ CHECK(iterator->GetBreakLocation().IsDebuggerStatement());
+ CHECK_EQ(47, iterator->GetBreakLocation().position());
+ iterator->Next();
+ CHECK(iterator->GetBreakLocation().IsReturn());
+ CHECK_EQ(60, iterator->GetBreakLocation().position());
+ iterator->Next();
+ CHECK(iterator->Done());
+ }
+
+ {
+ auto iterator = i::BreakIterator::GetIterator(debug_info, abstract_code,
+ i::CALLS_AND_RETURNS);
+ CHECK(iterator->GetBreakLocation().IsCall());
+ CHECK_EQ(32, iterator->GetBreakLocation().position());
+ iterator->Next();
+ CHECK(iterator->GetBreakLocation().IsReturn());
+ CHECK_EQ(60, iterator->GetBreakLocation().position());
+ iterator->Next();
+ CHECK(iterator->Done());
}
- CHECK(found_call);
- CHECK(found_return);
- CHECK(found_debugger);
-
- // Test underlying implementation.
- TestBreakLocation::Iterator* iterator =
- TestBreakLocation::GetIterator(debug_info, i::ALL_BREAK_LOCATIONS);
- CHECK(iterator->GetBreakLocation().IsDebuggerStatement());
- CHECK_EQ(7, iterator->GetBreakLocation().position());
- iterator->Next();
- CHECK(iterator->GetBreakLocation().IsDebugBreakSlot());
- CHECK_EQ(22, iterator->GetBreakLocation().position());
- iterator->Next();
- CHECK(iterator->GetBreakLocation().IsCall());
- CHECK_EQ(22, iterator->GetBreakLocation().position());
- iterator->Next();
- CHECK(iterator->GetBreakLocation().IsDebuggerStatement());
- CHECK_EQ(37, iterator->GetBreakLocation().position());
- iterator->Next();
- CHECK(iterator->GetBreakLocation().IsReturn());
- CHECK_EQ(50, iterator->GetBreakLocation().position());
- iterator->Next();
- CHECK(iterator->Done());
- delete iterator;
-
- iterator = TestBreakLocation::GetIterator(debug_info, i::CALLS_AND_RETURNS);
- CHECK(iterator->GetBreakLocation().IsCall());
- CHECK_EQ(22, iterator->GetBreakLocation().position());
- iterator->Next();
- CHECK(iterator->GetBreakLocation().IsReturn());
- CHECK_EQ(50, iterator->GetBreakLocation().position());
- iterator->Next();
- CHECK(iterator->Done());
- delete iterator;
DisableDebugger(isolate);
}
diff --git a/deps/v8/test/cctest/test-decls.cc b/deps/v8/test/cctest/test-decls.cc
index 35cb515dc1..e4506aee50 100644
--- a/deps/v8/test/cctest/test-decls.cc
+++ b/deps/v8/test/cctest/test-decls.cc
@@ -34,6 +34,7 @@
using namespace v8;
+namespace {
enum Expectations {
EXPECT_RESULT,
@@ -223,6 +224,7 @@ v8::Local<Integer> DeclarationContext::Query(Local<Name> key) {
return v8::Local<Integer>();
}
+} // namespace
// Test global declaration of a property the interceptor doesn't know
// about and doesn't handle.
diff --git a/deps/v8/test/cctest/test-dictionary.cc b/deps/v8/test/cctest/test-dictionary.cc
index cc4a7533d8..0756de6c1d 100644
--- a/deps/v8/test/cctest/test-dictionary.cc
+++ b/deps/v8/test/cctest/test-dictionary.cc
@@ -35,7 +35,7 @@
#include "src/global-handles.h"
#include "src/macro-assembler.h"
#include "src/objects.h"
-#include "test/cctest/heap/utils-inl.h"
+#include "test/cctest/heap/heap-utils.h"
using namespace v8::internal;
@@ -89,7 +89,7 @@ static void TestHashMap(Handle<HashMap> table) {
// code should not be found.
for (int i = 0; i < 100; i++) {
Handle<JSReceiver> key = factory->NewJSArray(7);
- CHECK(JSReceiver::GetOrCreateIdentityHash(key)->IsSmi());
+ CHECK(JSReceiver::GetOrCreateIdentityHash(isolate, key)->IsSmi());
CHECK_EQ(table->FindEntry(key), HashMap::kNotFound);
CHECK_EQ(table->Lookup(key), CcTest::heap()->the_hole_value());
CHECK(JSReceiver::GetIdentityHash(isolate, key)->IsSmi());
@@ -100,8 +100,8 @@ static void TestHashMap(Handle<HashMap> table) {
for (int i = 0; i < 100; i++) {
Handle<JSReceiver> key = factory->NewJSArray(7);
CHECK_EQ(table->Lookup(key), CcTest::heap()->the_hole_value());
- Handle<Object> identity_hash = JSReceiver::GetIdentityHash(isolate, key);
- CHECK_EQ(CcTest::heap()->undefined_value(), *identity_hash);
+ Object* identity_hash = JSReceiver::GetIdentityHash(isolate, key);
+ CHECK_EQ(CcTest::heap()->undefined_value(), identity_hash);
}
}
@@ -113,6 +113,74 @@ TEST(HashMap) {
TestHashMap(ObjectHashTable::New(isolate, 23));
}
+template <typename HashSet>
+static void TestHashSet(Handle<HashSet> table) {
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+
+ Handle<JSObject> a = factory->NewJSArray(7);
+ Handle<JSObject> b = factory->NewJSArray(11);
+ table = HashSet::Add(table, a);
+ CHECK_EQ(table->NumberOfElements(), 1);
+ CHECK(table->Has(isolate, a));
+ CHECK(!table->Has(isolate, b));
+
+ // Keys still have to be valid after objects were moved.
+ CcTest::heap()->CollectGarbage(NEW_SPACE);
+ CHECK_EQ(table->NumberOfElements(), 1);
+ CHECK(table->Has(isolate, a));
+ CHECK(!table->Has(isolate, b));
+
+ // Keys that are overwritten should not change number of elements.
+ table = HashSet::Add(table, a);
+ CHECK_EQ(table->NumberOfElements(), 1);
+ CHECK(table->Has(isolate, a));
+ CHECK(!table->Has(isolate, b));
+
+ // Keys that have been removed are mapped to the hole.
+ // TODO(cbruni): not implemented yet.
+ // bool was_present = false;
+ // table = HashSet::Remove(table, a, &was_present);
+ // CHECK(was_present);
+ // CHECK_EQ(table->NumberOfElements(), 0);
+ // CHECK(!table->Has(a));
+ // CHECK(!table->Has(b));
+
+ // Keys should map back to their respective values and also should get
+ // an identity hash code generated.
+ for (int i = 0; i < 100; i++) {
+ Handle<JSReceiver> key = factory->NewJSArray(7);
+ table = HashSet::Add(table, key);
+ CHECK_EQ(table->NumberOfElements(), i + 2);
+ CHECK(table->Has(isolate, key));
+ CHECK(JSReceiver::GetIdentityHash(isolate, key)->IsSmi());
+ }
+
+ // Keys never added to the map which already have an identity hash
+ // code should not be found.
+ for (int i = 0; i < 100; i++) {
+ Handle<JSReceiver> key = factory->NewJSArray(7);
+ CHECK(JSReceiver::GetOrCreateIdentityHash(isolate, key)->IsSmi());
+ CHECK(!table->Has(isolate, key));
+ CHECK(JSReceiver::GetIdentityHash(isolate, key)->IsSmi());
+ }
+
+ // Keys that don't have an identity hash should not be found and also
+ // should not get an identity hash code generated.
+ for (int i = 0; i < 100; i++) {
+ Handle<JSReceiver> key = factory->NewJSArray(7);
+ CHECK(!table->Has(isolate, key));
+ Object* identity_hash = JSReceiver::GetIdentityHash(isolate, key);
+ CHECK_EQ(CcTest::heap()->undefined_value(), identity_hash);
+ }
+}
+
+TEST(HashSet) {
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+ Isolate* isolate = CcTest::i_isolate();
+ TestHashSet(ObjectHashSet::New(isolate, 23));
+}
class ObjectHashTableTest: public ObjectHashTable {
public:
@@ -175,8 +243,8 @@ static void TestHashSetCausesGC(Handle<HashSet> table) {
// Simulate a full heap so that generating an identity hash code
// in subsequent calls will request GC.
- SimulateFullSpace(CcTest::heap()->new_space());
- SimulateFullSpace(CcTest::heap()->old_space());
+ heap::SimulateFullSpace(CcTest::heap()->new_space());
+ heap::SimulateFullSpace(CcTest::heap()->old_space());
// Calling Contains() should not cause GC ever.
int gc_count = isolate->heap()->gc_count();
@@ -206,11 +274,11 @@ static void TestHashMapCausesGC(Handle<HashMap> table) {
// Simulate a full heap so that generating an identity hash code
// in subsequent calls will request GC.
- SimulateFullSpace(CcTest::heap()->new_space());
- SimulateFullSpace(CcTest::heap()->old_space());
+ heap::SimulateFullSpace(CcTest::heap()->new_space());
+ heap::SimulateFullSpace(CcTest::heap()->old_space());
// Calling Lookup() should not cause GC ever.
- CHECK(table->Lookup(key)->IsTheHole());
+ CHECK(table->Lookup(key)->IsTheHole(isolate));
// Calling Put() should request GC by returning a failure.
int gc_count = isolate->heap()->gc_count();
diff --git a/deps/v8/test/cctest/test-disasm-arm.cc b/deps/v8/test/cctest/test-disasm-arm.cc
index 7eea8a171c..d5f594962c 100644
--- a/deps/v8/test/cctest/test-disasm-arm.cc
+++ b/deps/v8/test/cctest/test-disasm-arm.cc
@@ -399,15 +399,6 @@ TEST(Type3) {
COMPARE(bfi(r1, r0, 31, 1),
"e7df1f90 bfi r1, r0, #31, #1");
- COMPARE(usat(r0, 1, Operand(r1)),
- "e6e10011 usat r0, #1, r1");
- COMPARE(usat(r2, 7, Operand(lr)),
- "e6e7201e usat r2, #7, lr");
- COMPARE(usat(r3, 31, Operand(r4, LSL, 31)),
- "e6ff3f94 usat r3, #31, r4, lsl #31");
- COMPARE(usat(r8, 0, Operand(r5, ASR, 17)),
- "e6e088d5 usat r8, #0, r5, asr #17");
-
COMPARE(pkhbt(r3, r4, Operand(r5, LSL, 17)),
"e6843895 pkhbt r3, r4, r5, lsl #17");
COMPARE(pkhtb(r3, r4, Operand(r5, ASR, 17)),
@@ -443,6 +434,15 @@ TEST(Type3) {
COMPARE(rbit(r10, ip), "e6ffaf3c rbit r10, ip");
}
+ COMPARE(usat(r0, 1, Operand(r1)),
+ "e6e10011 usat r0, #1, r1");
+ COMPARE(usat(r2, 7, Operand(lr)),
+ "e6e7201e usat r2, #7, lr");
+ COMPARE(usat(r3, 31, Operand(r4, LSL, 31)),
+ "e6ff3f94 usat r3, #31, r4, lsl #31");
+ COMPARE(usat(r8, 0, Operand(r5, ASR, 17)),
+ "e6e088d5 usat r8, #0, r5, asr #17");
+
COMPARE(smmla(r0, r1, r2, r3), "e7503211 smmla r0, r1, r2, r3");
COMPARE(smmla(r10, r9, r8, r7), "e75a7819 smmla r10, r9, r8, r7");
@@ -860,6 +860,51 @@ TEST(ARMv8_vrintX_disasm) {
}
+TEST(ARMv8_vselX_disasm) {
+ SET_UP();
+
+ if (CpuFeatures::IsSupported(ARMv8)) {
+ // Native instructions.
+ COMPARE(vsel(eq, d0, d1, d2),
+ "fe010b02 vseleq.f64 d0, d1, d2");
+ COMPARE(vsel(eq, s0, s1, s2),
+ "fe000a81 vseleq.f32 s0, s1, s2");
+ COMPARE(vsel(ge, d0, d1, d2),
+ "fe210b02 vselge.f64 d0, d1, d2");
+ COMPARE(vsel(ge, s0, s1, s2),
+ "fe200a81 vselge.f32 s0, s1, s2");
+ COMPARE(vsel(gt, d0, d1, d2),
+ "fe310b02 vselgt.f64 d0, d1, d2");
+ COMPARE(vsel(gt, s0, s1, s2),
+ "fe300a81 vselgt.f32 s0, s1, s2");
+ COMPARE(vsel(vs, d0, d1, d2),
+ "fe110b02 vselvs.f64 d0, d1, d2");
+ COMPARE(vsel(vs, s0, s1, s2),
+ "fe100a81 vselvs.f32 s0, s1, s2");
+
+ // Inverted conditions (and swapped inputs).
+ COMPARE(vsel(ne, d0, d1, d2),
+ "fe020b01 vseleq.f64 d0, d2, d1");
+ COMPARE(vsel(ne, s0, s1, s2),
+ "fe010a20 vseleq.f32 s0, s2, s1");
+ COMPARE(vsel(lt, d0, d1, d2),
+ "fe220b01 vselge.f64 d0, d2, d1");
+ COMPARE(vsel(lt, s0, s1, s2),
+ "fe210a20 vselge.f32 s0, s2, s1");
+ COMPARE(vsel(le, d0, d1, d2),
+ "fe320b01 vselgt.f64 d0, d2, d1");
+ COMPARE(vsel(le, s0, s1, s2),
+ "fe310a20 vselgt.f32 s0, s2, s1");
+ COMPARE(vsel(vc, d0, d1, d2),
+ "fe120b01 vselvs.f64 d0, d2, d1");
+ COMPARE(vsel(vc, s0, s1, s2),
+ "fe110a20 vselvs.f32 s0, s2, s1");
+ }
+
+ VERIFY_RUN();
+}
+
+
TEST(Neon) {
SET_UP();
@@ -1126,3 +1171,17 @@ TEST(Barrier) {
VERIFY_RUN();
}
+
+
+TEST(LoadStoreExclusive) {
+ SET_UP();
+
+ COMPARE(ldrexb(r0, r1), "e1d10f9f ldrexb r0, [r1]");
+ COMPARE(strexb(r0, r1, r2), "e1c20f91 strexb r0, r1, [r2]");
+ COMPARE(ldrexh(r0, r1), "e1f10f9f ldrexh r0, [r1]");
+ COMPARE(strexh(r0, r1, r2), "e1e20f91 strexh r0, r1, [r2]");
+ COMPARE(ldrex(r0, r1), "e1910f9f ldrex r0, [r1]");
+ COMPARE(strex(r0, r1, r2), "e1820f91 strex r0, r1, [r2]");
+
+ VERIFY_RUN();
+}
diff --git a/deps/v8/test/cctest/test-disasm-arm64.cc b/deps/v8/test/cctest/test-disasm-arm64.cc
index beca93ede2..c3653bd48e 100644
--- a/deps/v8/test/cctest/test-disasm-arm64.cc
+++ b/deps/v8/test/cctest/test-disasm-arm64.cc
@@ -1259,6 +1259,24 @@ TEST_(load_store_pair) {
CLEANUP();
}
+TEST_(load_store_acquire_release) {
+ SET_UP_MASM();
+
+ COMPARE(ldar(w0, x1), "ldar w0, [x1]");
+ COMPARE(ldarb(w2, x3), "ldarb w2, [x3]");
+ COMPARE(ldarh(w4, x5), "ldarh w4, [x5]");
+ COMPARE(ldaxr(w6, x7), "ldaxr w6, [x7]");
+ COMPARE(ldaxrb(w8, x9), "ldaxrb w8, [x9]");
+ COMPARE(ldaxrh(w10, x11), "ldaxrh w10, [x11]");
+ COMPARE(stlr(w12, x13), "stlr w12, [x13]");
+ COMPARE(stlrb(w14, x15), "stlrb w14, [x15]");
+ COMPARE(stlrh(w16, x17), "stlrh w16, [x17]");
+ COMPARE(stlxr(w18, w19, x20), "stlxr w18, w19, [x20]");
+ COMPARE(stlxrb(w21, w22, x23), "stlxrb w21, w22, [x23]");
+ COMPARE(stlxrh(w24, w25, x26), "stlxrh w24, w25, [x26]");
+
+ CLEANUP();
+}
#if 0 // TODO(all): enable.
TEST_(load_literal) {
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index f96eb00fd1..88471a26c8 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -29,11 +29,11 @@
#include "src/v8.h"
+#include "src/code-factory.h"
#include "src/debug/debug.h"
#include "src/disasm.h"
#include "src/disassembler.h"
#include "src/ia32/frames-ia32.h"
-#include "src/ic/ic.h"
#include "src/macro-assembler.h"
#include "test/cctest/cctest.h"
@@ -290,7 +290,7 @@ TEST(DisasmIa320) {
__ bind(&L2);
__ call(Operand(ebx, ecx, times_4, 10000));
__ nop();
- Handle<Code> ic(LoadIC::initialize_stub(isolate, NOT_INSIDE_TYPEOF));
+ Handle<Code> ic(CodeFactory::LoadIC(isolate).code());
__ call(ic, RelocInfo::CODE_TARGET);
__ nop();
__ call(FUNCTION_ADDR(DummyStaticFunction), RelocInfo::RUNTIME_ENTRY);
@@ -390,6 +390,9 @@ TEST(DisasmIa320) {
{
// Move operation
__ movaps(xmm0, xmm1);
+ __ movups(xmm0, xmm1);
+ __ movups(xmm0, Operand(edx, 4));
+ __ movups(Operand(edx, 4), xmm0);
__ shufps(xmm0, xmm0, 0x0);
__ cvtsd2ss(xmm0, xmm1);
__ cvtsd2ss(xmm0, Operand(ebx, ecx, times_4, 10000));
@@ -665,12 +668,30 @@ TEST(DisasmIa320) {
// xchg.
{
+ __ xchg_b(eax, Operand(eax, 8));
+ __ xchg_w(eax, Operand(ebx, 8));
__ xchg(eax, eax);
__ xchg(eax, ebx);
__ xchg(ebx, ebx);
__ xchg(ebx, Operand(esp, 12));
}
+ // cmpxchg.
+ {
+ __ cmpxchg_b(Operand(esp, 12), eax);
+ __ cmpxchg_w(Operand(ebx, ecx, times_4, 10000), eax);
+ __ cmpxchg(Operand(ebx, ecx, times_4, 10000), eax);
+ }
+
+ // lock prefix.
+ {
+ __ lock();
+ __ cmpxchg(Operand(esp, 12), ebx);
+
+ __ lock();
+ __ xchg_w(eax, Operand(ecx, 8));
+ }
+
// Nop instructions
for (int i = 0; i < 16; i++) {
__ Nop(i);
diff --git a/deps/v8/test/cctest/test-disasm-mips.cc b/deps/v8/test/cctest/test-disasm-mips.cc
index 9a7d8ae431..b4f81ec3b1 100644
--- a/deps/v8/test/cctest/test-disasm-mips.cc
+++ b/deps/v8/test/cctest/test-disasm-mips.cc
@@ -97,7 +97,6 @@ if (failure) { \
V8_Fatal(__FILE__, __LINE__, "MIPS Disassembler tests failed.\n"); \
}
-
#define COMPARE_PC_REL_COMPACT(asm_, compare_string, offset) \
{ \
int pc_offset = assm.pc_offset(); \
@@ -106,28 +105,28 @@ if (failure) { \
prev_instr_compact_branch = assm.IsPrevInstrCompactBranch(); \
if (prev_instr_compact_branch) { \
snprintf(str_with_address, sizeof(str_with_address), "%s -> %p", \
- compare_string, progcounter + 8 + (offset * 4)); \
+ compare_string, \
+ static_cast<void *>(progcounter + 8 + (offset * 4))); \
} else { \
snprintf(str_with_address, sizeof(str_with_address), "%s -> %p", \
- compare_string, progcounter + 4 + (offset * 4)); \
+ compare_string, \
+ static_cast<void *>(progcounter + 4 + (offset * 4))); \
} \
assm.asm_; \
if (!DisassembleAndCompare(progcounter, str_with_address)) failure = true; \
}
-
#define COMPARE_PC_REL(asm_, compare_string, offset) \
{ \
int pc_offset = assm.pc_offset(); \
byte *progcounter = &buffer[pc_offset]; \
char str_with_address[100]; \
snprintf(str_with_address, sizeof(str_with_address), "%s -> %p", \
- compare_string, progcounter + (offset * 4)); \
+ compare_string, static_cast<void *>(progcounter + (offset * 4))); \
assm.asm_; \
if (!DisassembleAndCompare(progcounter, str_with_address)) failure = true; \
}
-
#define COMPARE_PC_JUMP(asm_, compare_string, target) \
{ \
int pc_offset = assm.pc_offset(); \
@@ -136,14 +135,13 @@ if (failure) { \
int instr_index = (target >> 2) & kImm26Mask; \
snprintf( \
str_with_address, sizeof(str_with_address), "%s %p -> %p", \
- compare_string, reinterpret_cast<byte *>(target), \
- reinterpret_cast<byte *>(((uint32_t)(progcounter + 4) & ~0xfffffff) | \
+ compare_string, reinterpret_cast<void *>(target), \
+ reinterpret_cast<void *>(((uint32_t)(progcounter + 4) & ~0xfffffff) | \
(instr_index << 2))); \
assm.asm_; \
if (!DisassembleAndCompare(progcounter, str_with_address)) failure = true; \
}
-
#define GET_PC_REGION(pc_region) \
{ \
int pc_offset = assm.pc_offset(); \
@@ -780,6 +778,20 @@ TEST(Type0) {
}
if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ COMPARE(seb(a0, a1), "7c052420 seb a0, a1");
+ COMPARE(seb(s6, s7), "7c17b420 seb s6, s7");
+ COMPARE(seb(v0, v1), "7c031420 seb v0, v1");
+
+ COMPARE(seh(a0, a1), "7c052620 seh a0, a1");
+ COMPARE(seh(s6, s7), "7c17b620 seh s6, s7");
+ COMPARE(seh(v0, v1), "7c031620 seh v0, v1");
+
+ COMPARE(wsbh(a0, a1), "7c0520a0 wsbh a0, a1");
+ COMPARE(wsbh(s6, s7), "7c17b0a0 wsbh s6, s7");
+ COMPARE(wsbh(v0, v1), "7c0310a0 wsbh v0, v1");
+ }
+
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
COMPARE(ins_(a0, a1, 31, 1),
"7ca4ffc4 ins a0, a1, 31, 1");
COMPARE(ins_(s6, s7, 30, 2),
diff --git a/deps/v8/test/cctest/test-disasm-mips64.cc b/deps/v8/test/cctest/test-disasm-mips64.cc
index 8a1e0e7db0..dc6f34e462 100644
--- a/deps/v8/test/cctest/test-disasm-mips64.cc
+++ b/deps/v8/test/cctest/test-disasm-mips64.cc
@@ -97,7 +97,6 @@ if (failure) { \
V8_Fatal(__FILE__, __LINE__, "MIPS Disassembler tests failed.\n"); \
}
-
#define COMPARE_PC_REL_COMPACT(asm_, compare_string, offset) \
{ \
int pc_offset = assm.pc_offset(); \
@@ -106,28 +105,28 @@ if (failure) { \
prev_instr_compact_branch = assm.IsPrevInstrCompactBranch(); \
if (prev_instr_compact_branch) { \
snprintf(str_with_address, sizeof(str_with_address), "%s -> %p", \
- compare_string, progcounter + 8 + (offset * 4)); \
+ compare_string, \
+ static_cast<void *>(progcounter + 8 + (offset * 4))); \
} else { \
snprintf(str_with_address, sizeof(str_with_address), "%s -> %p", \
- compare_string, progcounter + 4 + (offset * 4)); \
+ compare_string, \
+ static_cast<void *>(progcounter + 4 + (offset * 4))); \
} \
assm.asm_; \
if (!DisassembleAndCompare(progcounter, str_with_address)) failure = true; \
}
-
#define COMPARE_PC_REL(asm_, compare_string, offset) \
{ \
int pc_offset = assm.pc_offset(); \
byte *progcounter = &buffer[pc_offset]; \
char str_with_address[100]; \
snprintf(str_with_address, sizeof(str_with_address), "%s -> %p", \
- compare_string, progcounter + (offset * 4)); \
+ compare_string, static_cast<void *>(progcounter + (offset * 4))); \
assm.asm_; \
if (!DisassembleAndCompare(progcounter, str_with_address)) failure = true; \
}
-
#define COMPARE_PC_JUMP(asm_, compare_string, target) \
{ \
int pc_offset = assm.pc_offset(); \
@@ -136,14 +135,13 @@ if (failure) { \
int instr_index = (target >> 2) & kImm26Mask; \
snprintf( \
str_with_address, sizeof(str_with_address), "%s %p -> %p", \
- compare_string, reinterpret_cast<byte *>(target), \
- reinterpret_cast<byte *>(((uint64_t)(progcounter + 1) & ~0xfffffff) | \
+ compare_string, reinterpret_cast<void *>(target), \
+ reinterpret_cast<void *>(((uint64_t)(progcounter + 1) & ~0xfffffff) | \
(instr_index << 2))); \
assm.asm_; \
if (!DisassembleAndCompare(progcounter, str_with_address)) failure = true; \
}
-
#define GET_PC_REGION(pc_region) \
{ \
int pc_offset = assm.pc_offset(); \
@@ -530,41 +528,28 @@ TEST(Type0) {
COMPARE(dsrav(v0, v1, fp),
"03c31017 dsrav v0, v1, fp");
- if (kArchVariant == kMips64r2) {
- COMPARE(rotr(a0, a1, 0),
- "00252002 rotr a0, a1, 0");
- COMPARE(rotr(s0, s1, 8),
- "00318202 rotr s0, s1, 8");
- COMPARE(rotr(a6, a7, 24),
- "002b5602 rotr a6, a7, 24");
- COMPARE(rotr(v0, v1, 31),
- "002317c2 rotr v0, v1, 31");
- COMPARE(drotr(a0, a1, 0),
- "0025203a drotr a0, a1, 0");
- COMPARE(drotr(s0, s1, 8),
- "0031823a drotr s0, s1, 8");
- COMPARE(drotr(a6, a7, 24),
- "002b563a drotr a6, a7, 24");
- COMPARE(drotr(v0, v1, 31),
- "002317fa drotr v0, v1, 31");
-
- COMPARE(rotrv(a0, a1, a2),
- "00c52046 rotrv a0, a1, a2");
- COMPARE(rotrv(s0, s1, s2),
- "02518046 rotrv s0, s1, s2");
- COMPARE(rotrv(a6, a7, t0),
- "018b5046 rotrv a6, a7, t0");
- COMPARE(rotrv(v0, v1, fp),
- "03c31046 rotrv v0, v1, fp");
- COMPARE(drotrv(a0, a1, a2),
- "00c52056 drotrv a0, a1, a2");
- COMPARE(drotrv(s0, s1, s2),
- "02518056 drotrv s0, s1, s2");
- COMPARE(drotrv(a6, a7, t0),
- "018b5056 drotrv a6, a7, t0");
- COMPARE(drotrv(v0, v1, fp),
- "03c31056 drotrv v0, v1, fp");
- }
+ COMPARE(rotr(a0, a1, 0), "00252002 rotr a0, a1, 0");
+ COMPARE(rotr(s0, s1, 8), "00318202 rotr s0, s1, 8");
+ COMPARE(rotr(a6, a7, 24), "002b5602 rotr a6, a7, 24");
+ COMPARE(rotr(v0, v1, 31), "002317c2 rotr v0, v1, 31");
+ COMPARE(drotr(a0, a1, 0), "0025203a drotr a0, a1, 0");
+ COMPARE(drotr(s0, s1, 8), "0031823a drotr s0, s1, 8");
+ COMPARE(drotr(a6, a7, 24), "002b563a drotr a6, a7, 24");
+ COMPARE(drotr(v0, v1, 31), "002317fa drotr v0, v1, 31");
+
+ COMPARE(drotr32(a0, a1, 0), "0025203e drotr32 a0, a1, 0");
+ COMPARE(drotr32(s0, s1, 8), "0031823e drotr32 s0, s1, 8");
+ COMPARE(drotr32(a6, a7, 24), "002b563e drotr32 a6, a7, 24");
+ COMPARE(drotr32(v0, v1, 31), "002317fe drotr32 v0, v1, 31");
+
+ COMPARE(rotrv(a0, a1, a2), "00c52046 rotrv a0, a1, a2");
+ COMPARE(rotrv(s0, s1, s2), "02518046 rotrv s0, s1, s2");
+ COMPARE(rotrv(a6, a7, t0), "018b5046 rotrv a6, a7, t0");
+ COMPARE(rotrv(v0, v1, fp), "03c31046 rotrv v0, v1, fp");
+ COMPARE(drotrv(a0, a1, a2), "00c52056 drotrv a0, a1, a2");
+ COMPARE(drotrv(s0, s1, s2), "02518056 drotrv s0, s1, s2");
+ COMPARE(drotrv(a6, a7, t0), "018b5056 drotrv a6, a7, t0");
+ COMPARE(drotrv(v0, v1, fp), "03c31056 drotrv v0, v1, fp");
COMPARE(break_(0),
"0000000d break, code: 0x00000 (0)");
@@ -698,6 +683,26 @@ TEST(Type0) {
"70621020 clz v0, v1");
}
+ COMPARE(seb(a0, a1), "7c052420 seb a0, a1");
+ COMPARE(seb(s6, s7), "7c17b420 seb s6, s7");
+ COMPARE(seb(v0, v1), "7c031420 seb v0, v1");
+
+ COMPARE(seh(a0, a1), "7c052620 seh a0, a1");
+ COMPARE(seh(s6, s7), "7c17b620 seh s6, s7");
+ COMPARE(seh(v0, v1), "7c031620 seh v0, v1");
+
+ COMPARE(wsbh(a0, a1), "7c0520a0 wsbh a0, a1");
+ COMPARE(wsbh(s6, s7), "7c17b0a0 wsbh s6, s7");
+ COMPARE(wsbh(v0, v1), "7c0310a0 wsbh v0, v1");
+
+ COMPARE(dsbh(a0, a1), "7c0520a4 dsbh a0, a1");
+ COMPARE(dsbh(s6, s7), "7c17b0a4 dsbh s6, s7");
+ COMPARE(dsbh(v0, v1), "7c0310a4 dsbh v0, v1");
+
+ COMPARE(dshd(a0, a1), "7c052164 dshd a0, a1");
+ COMPARE(dshd(s6, s7), "7c17b164 dshd s6, s7");
+ COMPARE(dshd(v0, v1), "7c031164 dshd v0, v1");
+
COMPARE(ins_(a0, a1, 31, 1),
"7ca4ffc4 ins a0, a1, 31, 1");
COMPARE(ins_(s6, s7, 30, 2),
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index 3722c1f800..284ca859be 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -29,10 +29,10 @@
#include "src/v8.h"
+#include "src/code-factory.h"
#include "src/debug/debug.h"
#include "src/disasm.h"
#include "src/disassembler.h"
-#include "src/ic/ic.h"
#include "src/macro-assembler.h"
#include "test/cctest/cctest.h"
@@ -282,7 +282,7 @@ TEST(DisasmX64) {
// TODO(mstarzinger): The following is protected.
// __ call(Operand(rbx, rcx, times_4, 10000));
__ nop();
- Handle<Code> ic(LoadIC::initialize_stub(isolate, NOT_INSIDE_TYPEOF));
+ Handle<Code> ic(CodeFactory::LoadIC(isolate).code());
__ call(ic, RelocInfo::CODE_TARGET);
__ nop();
__ nop();
@@ -431,6 +431,8 @@ TEST(DisasmX64) {
__ movsd(xmm1, Operand(rbx, rcx, times_4, 10000));
__ movsd(Operand(rbx, rcx, times_4, 10000), xmm1);
// 128 bit move instructions.
+ __ movupd(xmm0, Operand(rbx, rcx, times_4, 10000));
+ __ movupd(Operand(rbx, rcx, times_4, 10000), xmm0);
__ movdqa(xmm0, Operand(rbx, rcx, times_4, 10000));
__ movdqa(Operand(rbx, rcx, times_4, 10000), xmm0);
@@ -449,6 +451,11 @@ TEST(DisasmX64) {
__ ucomisd(xmm0, xmm1);
__ andpd(xmm0, xmm1);
+ __ andpd(xmm0, Operand(rbx, rcx, times_4, 10000));
+ __ orpd(xmm0, xmm1);
+ __ orpd(xmm0, Operand(rbx, rcx, times_4, 10000));
+ __ xorpd(xmm0, xmm1);
+ __ xorpd(xmm0, Operand(rbx, rcx, times_4, 10000));
__ pslld(xmm0, 6);
__ psrld(xmm0, 6);
@@ -458,6 +465,7 @@ TEST(DisasmX64) {
__ pcmpeqd(xmm1, xmm0);
__ punpckldq(xmm1, xmm11);
+ __ punpckldq(xmm5, Operand(rdx, 4));
__ punpckhdq(xmm8, xmm15);
}
@@ -484,11 +492,67 @@ TEST(DisasmX64) {
{
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope scope(&assm, SSE4_1);
+ __ insertps(xmm5, xmm1, 123);
__ extractps(rax, xmm1, 0);
__ pextrd(rbx, xmm15, 0);
__ pextrd(r12, xmm0, 1);
__ pinsrd(xmm9, r9, 0);
- __ pinsrd(xmm5, rax, 1);
+ __ pinsrd(xmm5, Operand(rax, 4), 1);
+
+ __ cmpps(xmm5, xmm1, 1);
+ __ cmpps(xmm5, Operand(rbx, rcx, times_4, 10000), 1);
+ __ cmpeqps(xmm5, xmm1);
+ __ cmpeqps(xmm5, Operand(rbx, rcx, times_4, 10000));
+ __ cmpltps(xmm5, xmm1);
+ __ cmpltps(xmm5, Operand(rbx, rcx, times_4, 10000));
+ __ cmpleps(xmm5, xmm1);
+ __ cmpleps(xmm5, Operand(rbx, rcx, times_4, 10000));
+ __ cmpneqps(xmm5, xmm1);
+ __ cmpneqps(xmm5, Operand(rbx, rcx, times_4, 10000));
+ __ cmpnltps(xmm5, xmm1);
+ __ cmpnltps(xmm5, Operand(rbx, rcx, times_4, 10000));
+ __ cmpnleps(xmm5, xmm1);
+ __ cmpnleps(xmm5, Operand(rbx, rcx, times_4, 10000));
+ __ cmppd(xmm5, xmm1, 1);
+ __ cmppd(xmm5, Operand(rbx, rcx, times_4, 10000), 1);
+ __ cmpeqpd(xmm5, xmm1);
+ __ cmpeqpd(xmm5, Operand(rbx, rcx, times_4, 10000));
+ __ cmpltpd(xmm5, xmm1);
+ __ cmpltpd(xmm5, Operand(rbx, rcx, times_4, 10000));
+ __ cmplepd(xmm5, xmm1);
+ __ cmplepd(xmm5, Operand(rbx, rcx, times_4, 10000));
+ __ cmpneqpd(xmm5, xmm1);
+ __ cmpneqpd(xmm5, Operand(rbx, rcx, times_4, 10000));
+ __ cmpnltpd(xmm5, xmm1);
+ __ cmpnltpd(xmm5, Operand(rbx, rcx, times_4, 10000));
+ __ cmpnlepd(xmm5, xmm1);
+ __ cmpnlepd(xmm5, Operand(rbx, rcx, times_4, 10000));
+
+ __ minps(xmm5, xmm1);
+ __ minps(xmm5, Operand(rdx, 4));
+ __ maxps(xmm5, xmm1);
+ __ maxps(xmm5, Operand(rdx, 4));
+ __ rcpps(xmm5, xmm1);
+ __ rcpps(xmm5, Operand(rdx, 4));
+ __ sqrtps(xmm5, xmm1);
+ __ sqrtps(xmm5, Operand(rdx, 4));
+ __ movups(xmm5, xmm1);
+ __ movups(xmm5, Operand(rdx, 4));
+ __ movups(Operand(rdx, 4), xmm5);
+ __ paddd(xmm5, xmm1);
+ __ paddd(xmm5, Operand(rdx, 4));
+ __ psubd(xmm5, xmm1);
+ __ psubd(xmm5, Operand(rdx, 4));
+ __ pmulld(xmm5, xmm1);
+ __ pmulld(xmm5, Operand(rdx, 4));
+ __ pmuludq(xmm5, xmm1);
+ __ pmuludq(xmm5, Operand(rdx, 4));
+ __ psrldq(xmm5, 123);
+ __ pshufd(xmm5, xmm1, 3);
+ __ cvtps2dq(xmm5, xmm1);
+ __ cvtps2dq(xmm5, Operand(rdx, 4));
+ __ cvtdq2ps(xmm5, xmm1);
+ __ cvtdq2ps(xmm5, Operand(rdx, 4));
}
}
@@ -561,8 +625,14 @@ TEST(DisasmX64) {
__ vmovaps(xmm10, xmm11);
__ vmovapd(xmm7, xmm0);
+ __ vmovupd(xmm0, Operand(rbx, rcx, times_4, 10000));
+ __ vmovupd(Operand(rbx, rcx, times_4, 10000), xmm0);
__ vmovmskpd(r9, xmm4);
+ __ vmovups(xmm5, xmm1);
+ __ vmovups(xmm5, Operand(rdx, 4));
+ __ vmovups(Operand(rdx, 4), xmm5);
+
__ vandps(xmm0, xmm9, xmm2);
__ vandps(xmm9, xmm1, Operand(rbx, rcx, times_4, 10000));
__ vxorps(xmm0, xmm1, xmm9);
@@ -579,6 +649,35 @@ TEST(DisasmX64) {
__ vpcmpeqd(xmm15, xmm0, Operand(rbx, rcx, times_4, 10000));
__ vpsllq(xmm0, xmm15, 21);
__ vpsrlq(xmm15, xmm0, 21);
+
+ __ vcmpps(xmm5, xmm4, xmm1, 1);
+ __ vcmpps(xmm5, xmm4, Operand(rbx, rcx, times_4, 10000), 1);
+ __ vcmpeqps(xmm5, xmm4, xmm1);
+ __ vcmpeqps(xmm5, xmm4, Operand(rbx, rcx, times_4, 10000));
+ __ vcmpltps(xmm5, xmm4, xmm1);
+ __ vcmpltps(xmm5, xmm4, Operand(rbx, rcx, times_4, 10000));
+ __ vcmpleps(xmm5, xmm4, xmm1);
+ __ vcmpleps(xmm5, xmm4, Operand(rbx, rcx, times_4, 10000));
+ __ vcmpneqps(xmm5, xmm4, xmm1);
+ __ vcmpneqps(xmm5, xmm4, Operand(rbx, rcx, times_4, 10000));
+ __ vcmpnltps(xmm5, xmm4, xmm1);
+ __ vcmpnltps(xmm5, xmm4, Operand(rbx, rcx, times_4, 10000));
+ __ vcmpnleps(xmm5, xmm4, xmm1);
+ __ vcmpnleps(xmm5, xmm4, Operand(rbx, rcx, times_4, 10000));
+ __ vcmppd(xmm5, xmm4, xmm1, 1);
+ __ vcmppd(xmm5, xmm4, Operand(rbx, rcx, times_4, 10000), 1);
+ __ vcmpeqpd(xmm5, xmm4, xmm1);
+ __ vcmpeqpd(xmm5, xmm4, Operand(rbx, rcx, times_4, 10000));
+ __ vcmpltpd(xmm5, xmm4, xmm1);
+ __ vcmpltpd(xmm5, xmm4, Operand(rbx, rcx, times_4, 10000));
+ __ vcmplepd(xmm5, xmm4, xmm1);
+ __ vcmplepd(xmm5, xmm4, Operand(rbx, rcx, times_4, 10000));
+ __ vcmpneqpd(xmm5, xmm4, xmm1);
+ __ vcmpneqpd(xmm5, xmm4, Operand(rbx, rcx, times_4, 10000));
+ __ vcmpnltpd(xmm5, xmm4, xmm1);
+ __ vcmpnltpd(xmm5, xmm4, Operand(rbx, rcx, times_4, 10000));
+ __ vcmpnlepd(xmm5, xmm4, xmm1);
+ __ vcmpnlepd(xmm5, xmm4, Operand(rbx, rcx, times_4, 10000));
}
}
@@ -745,12 +844,31 @@ TEST(DisasmX64) {
// xchg.
{
+ __ xchgb(rax, Operand(rax, 8));
+ __ xchgw(rax, Operand(rbx, 8));
__ xchgq(rax, rax);
__ xchgq(rax, rbx);
__ xchgq(rbx, rbx);
__ xchgq(rbx, Operand(rsp, 12));
}
+ // cmpxchg.
+ {
+ __ cmpxchgb(Operand(rsp, 12), rax);
+ __ cmpxchgw(Operand(rbx, rcx, times_4, 10000), rax);
+ __ cmpxchgl(Operand(rbx, rcx, times_4, 10000), rax);
+ __ cmpxchgq(Operand(rbx, rcx, times_4, 10000), rax);
+ }
+
+ // lock prefix.
+ {
+ __ lock();
+ __ cmpxchgl(Operand(rsp, 12), rbx);
+
+ __ lock();
+ __ xchgw(rax, Operand(rcx, 8));
+ }
+
// Nop instructions
for (int i = 0; i < 16; i++) {
__ Nop(i);
diff --git a/deps/v8/test/cctest/test-disasm-x87.cc b/deps/v8/test/cctest/test-disasm-x87.cc
index 697a9d3b49..41f425da3c 100644
--- a/deps/v8/test/cctest/test-disasm-x87.cc
+++ b/deps/v8/test/cctest/test-disasm-x87.cc
@@ -29,10 +29,10 @@
#include "src/v8.h"
+#include "src/code-factory.h"
#include "src/debug/debug.h"
#include "src/disasm.h"
#include "src/disassembler.h"
-#include "src/ic/ic.h"
#include "src/macro-assembler.h"
#include "src/x87/frames-x87.h"
#include "test/cctest/cctest.h"
@@ -290,7 +290,7 @@ TEST(DisasmIa320) {
__ bind(&L2);
__ call(Operand(ebx, ecx, times_4, 10000));
__ nop();
- Handle<Code> ic(LoadIC::initialize_stub(isolate, NOT_INSIDE_TYPEOF));
+ Handle<Code> ic(CodeFactory::LoadIC(isolate).code());
__ call(ic, RelocInfo::CODE_TARGET);
__ nop();
__ call(FUNCTION_ADDR(DummyStaticFunction), RelocInfo::RUNTIME_ENTRY);
@@ -395,12 +395,30 @@ TEST(DisasmIa320) {
// xchg.
{
+ __ xchg_b(eax, Operand(eax, 8));
+ __ xchg_w(eax, Operand(ebx, 8));
__ xchg(eax, eax);
__ xchg(eax, ebx);
__ xchg(ebx, ebx);
__ xchg(ebx, Operand(esp, 12));
}
+ // cmpxchg.
+ {
+ __ cmpxchg_b(Operand(esp, 12), eax);
+ __ cmpxchg_w(Operand(ebx, ecx, times_4, 10000), eax);
+ __ cmpxchg(Operand(ebx, ecx, times_4, 10000), eax);
+ }
+
+ // lock prefix.
+ {
+ __ lock();
+ __ cmpxchg(Operand(esp, 12), ebx);
+
+ __ lock();
+ __ xchg_w(eax, Operand(ecx, 8));
+ }
+
// Nop instructions
for (int i = 0; i < 16; i++) {
__ Nop(i);
diff --git a/deps/v8/test/cctest/test-feedback-vector.cc b/deps/v8/test/cctest/test-feedback-vector.cc
index c06e5b9124..4322e746e2 100644
--- a/deps/v8/test/cctest/test-feedback-vector.cc
+++ b/deps/v8/test/cctest/test-feedback-vector.cc
@@ -36,7 +36,7 @@ TEST(VectorStructure) {
v8::HandleScope scope(context->GetIsolate());
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone* zone = isolate->runtime_zone();
+ Zone zone(isolate->allocator());
// Empty vectors are the empty fixed array.
StaticFeedbackVectorSpec empty;
@@ -47,7 +47,7 @@ TEST(VectorStructure) {
CHECK(vector->is_empty());
{
- FeedbackVectorSpec one_slot(zone);
+ FeedbackVectorSpec one_slot(&zone);
one_slot.AddGeneralSlot();
vector = NewTypeFeedbackVector(isolate, &one_slot);
FeedbackVectorHelper helper(vector);
@@ -55,7 +55,7 @@ TEST(VectorStructure) {
}
{
- FeedbackVectorSpec one_icslot(zone);
+ FeedbackVectorSpec one_icslot(&zone);
one_icslot.AddCallICSlot();
vector = NewTypeFeedbackVector(isolate, &one_icslot);
FeedbackVectorHelper helper(vector);
@@ -63,7 +63,7 @@ TEST(VectorStructure) {
}
{
- FeedbackVectorSpec spec(zone);
+ FeedbackVectorSpec spec(&zone);
for (int i = 0; i < 3; i++) {
spec.AddGeneralSlot();
}
@@ -103,9 +103,9 @@ TEST(VectorICMetadata) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
Isolate* isolate = CcTest::i_isolate();
- Zone* zone = isolate->runtime_zone();
+ Zone zone(isolate->allocator());
- FeedbackVectorSpec spec(zone);
+ FeedbackVectorSpec spec(&zone);
// Set metadata.
for (int i = 0; i < 40; i++) {
switch (i % 4) {
@@ -158,12 +158,12 @@ TEST(VectorSlotClearing) {
v8::HandleScope scope(context->GetIsolate());
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone* zone = isolate->runtime_zone();
+ Zone zone(isolate->allocator());
// We only test clearing FeedbackVectorSlots, not FeedbackVectorSlots.
// The reason is that FeedbackVectorSlots need a full code environment
// to fully test (See VectorICProfilerStatistics test below).
- FeedbackVectorSpec spec(zone);
+ FeedbackVectorSpec spec(&zone);
for (int i = 0; i < 5; i++) {
spec.AddGeneralSlot();
}
@@ -208,7 +208,7 @@ TEST(VectorCallICStates) {
Handle<JSFunction> f = GetFunction("f");
// There should be one IC.
Handle<TypeFeedbackVector> feedback_vector =
- Handle<TypeFeedbackVector>(f->shared()->feedback_vector(), isolate);
+ Handle<TypeFeedbackVector>(f->feedback_vector(), isolate);
FeedbackVectorSlot slot(0);
CallICNexus nexus(feedback_vector, slot);
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
@@ -221,18 +221,82 @@ TEST(VectorCallICStates) {
// After a collection, state should remain GENERIC.
heap->CollectAllGarbage();
CHECK_EQ(GENERIC, nexus.StateFromFeedback());
+}
+
+TEST(VectorCallFeedbackForArray) {
+ if (i::FLAG_always_opt) return;
+ CcTest::InitializeVM();
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+
+ // Make sure function f has a call that uses a type feedback slot.
+ CompileRun(
+ "function foo() { return 17; }"
+ "function f(a) { a(); } f(Array);");
+ Handle<JSFunction> f = GetFunction("f");
+ // There should be one IC.
+ Handle<TypeFeedbackVector> feedback_vector =
+ Handle<TypeFeedbackVector>(f->feedback_vector(), isolate);
+ FeedbackVectorSlot slot(0);
+ CallICNexus nexus(feedback_vector, slot);
// A call to Array is special, it contains an AllocationSite as feedback.
- // Clear the IC manually in order to test this case.
- nexus.Clear(f->shared()->code());
- CompileRun("f(Array)");
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
CHECK(nexus.GetFeedback()->IsAllocationSite());
heap->CollectAllGarbage();
+ // It should stay monomorphic even after a GC.
+ CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
+}
+
+TEST(VectorCallCounts) {
+ if (i::FLAG_always_opt) return;
+ CcTest::InitializeVM();
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+ Isolate* isolate = CcTest::i_isolate();
+
+ // Make sure function f has a call that uses a type feedback slot.
+ CompileRun(
+ "function foo() { return 17; }"
+ "function f(a) { a(); } f(foo);");
+ Handle<JSFunction> f = GetFunction("f");
+ // There should be one IC.
+ Handle<TypeFeedbackVector> feedback_vector =
+ Handle<TypeFeedbackVector>(f->feedback_vector(), isolate);
+ FeedbackVectorSlot slot(0);
+ CallICNexus nexus(feedback_vector, slot);
+ CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
+
+ CompileRun("f(foo); f(foo);");
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
+ CHECK_EQ(3, nexus.ExtractCallCount());
}
+TEST(VectorConstructCounts) {
+ if (i::FLAG_always_opt) return;
+ CcTest::InitializeVM();
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+ Isolate* isolate = CcTest::i_isolate();
+
+ // Make sure function f has a call that uses a type feedback slot.
+ CompileRun(
+ "function Foo() {}"
+ "function f(a) { new a(); } f(Foo);");
+ Handle<JSFunction> f = GetFunction("f");
+ Handle<TypeFeedbackVector> feedback_vector =
+ Handle<TypeFeedbackVector>(f->feedback_vector(), isolate);
+ FeedbackVectorSlot slot(0);
+ CHECK(feedback_vector->Get(slot)->IsWeakCell());
+
+ CompileRun("f(Foo); f(Foo);");
+ FeedbackVectorSlot cslot(1);
+ CHECK(feedback_vector->Get(cslot)->IsSmi());
+ CHECK_EQ(3, Smi::cast(feedback_vector->Get(cslot))->value());
+}
TEST(VectorLoadICStates) {
if (i::FLAG_always_opt) return;
@@ -249,7 +313,7 @@ TEST(VectorLoadICStates) {
Handle<JSFunction> f = GetFunction("f");
// There should be one IC.
Handle<TypeFeedbackVector> feedback_vector =
- Handle<TypeFeedbackVector>(f->shared()->feedback_vector(), isolate);
+ Handle<TypeFeedbackVector>(f->feedback_vector(), isolate);
FeedbackVectorSlot slot(0);
LoadICNexus nexus(feedback_vector, slot);
CHECK_EQ(PREMONOMORPHIC, nexus.StateFromFeedback());
@@ -301,18 +365,18 @@ TEST(VectorLoadICSlotSharing) {
CompileRun(
"o = 10;"
"function f() {"
- " var x = o + 10;"
- " return o + x + o;"
+ " var x = o || 10;"
+ " return o , x , o;"
"}"
"f();");
Handle<JSFunction> f = GetFunction("f");
// There should be one IC slot.
Handle<TypeFeedbackVector> feedback_vector =
- Handle<TypeFeedbackVector>(f->shared()->feedback_vector(), isolate);
+ Handle<TypeFeedbackVector>(f->feedback_vector(), isolate);
FeedbackVectorHelper helper(feedback_vector);
CHECK_EQ(1, helper.slot_count());
FeedbackVectorSlot slot(0);
- LoadICNexus nexus(feedback_vector, slot);
+ LoadGlobalICNexus nexus(feedback_vector, slot);
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
}
@@ -332,7 +396,7 @@ TEST(VectorLoadICOnSmi) {
Handle<JSFunction> f = GetFunction("f");
// There should be one IC.
Handle<TypeFeedbackVector> feedback_vector =
- Handle<TypeFeedbackVector>(f->shared()->feedback_vector(), isolate);
+ Handle<TypeFeedbackVector>(f->feedback_vector(), isolate);
FeedbackVectorSlot slot(0);
LoadICNexus nexus(feedback_vector, slot);
CHECK_EQ(PREMONOMORPHIC, nexus.StateFromFeedback());
@@ -397,13 +461,13 @@ TEST(ReferenceContextAllocatesNoSlots) {
// There should be two LOAD_ICs, one for a and one for y at the end.
Handle<TypeFeedbackVector> feedback_vector =
- handle(f->shared()->feedback_vector(), isolate);
+ handle(f->feedback_vector(), isolate);
FeedbackVectorHelper helper(feedback_vector);
CHECK_EQ(4, helper.slot_count());
CHECK_SLOT_KIND(helper, 0, FeedbackVectorSlotKind::STORE_IC);
- CHECK_SLOT_KIND(helper, 1, FeedbackVectorSlotKind::LOAD_IC);
+ CHECK_SLOT_KIND(helper, 1, FeedbackVectorSlotKind::LOAD_GLOBAL_IC);
CHECK_SLOT_KIND(helper, 2, FeedbackVectorSlotKind::STORE_IC);
- CHECK_SLOT_KIND(helper, 3, FeedbackVectorSlotKind::LOAD_IC);
+ CHECK_SLOT_KIND(helper, 3, FeedbackVectorSlotKind::LOAD_GLOBAL_IC);
}
{
@@ -416,9 +480,11 @@ TEST(ReferenceContextAllocatesNoSlots) {
Handle<JSFunction> f = GetFunction("testprop");
// There should be one LOAD_IC, for the load of a.
- Handle<TypeFeedbackVector> feedback_vector(f->shared()->feedback_vector());
+ Handle<TypeFeedbackVector> feedback_vector(f->feedback_vector());
FeedbackVectorHelper helper(feedback_vector);
CHECK_EQ(2, helper.slot_count());
+ CHECK_SLOT_KIND(helper, 0, FeedbackVectorSlotKind::LOAD_GLOBAL_IC);
+ CHECK_SLOT_KIND(helper, 1, FeedbackVectorSlotKind::STORE_IC);
}
{
@@ -432,12 +498,13 @@ TEST(ReferenceContextAllocatesNoSlots) {
Handle<JSFunction> f = GetFunction("testpropfunc");
- // There should be 2 LOAD_ICs and 2 CALL_ICs.
- Handle<TypeFeedbackVector> feedback_vector(f->shared()->feedback_vector());
+ // There should be 1 LOAD_GLOBAL_IC to load x (in both cases), 2 CALL_ICs
+ // to call x and a LOAD_IC to load blue.
+ Handle<TypeFeedbackVector> feedback_vector(f->feedback_vector());
FeedbackVectorHelper helper(feedback_vector);
CHECK_EQ(5, helper.slot_count());
CHECK_SLOT_KIND(helper, 0, FeedbackVectorSlotKind::CALL_IC);
- CHECK_SLOT_KIND(helper, 1, FeedbackVectorSlotKind::LOAD_IC);
+ CHECK_SLOT_KIND(helper, 1, FeedbackVectorSlotKind::LOAD_GLOBAL_IC);
CHECK_SLOT_KIND(helper, 2, FeedbackVectorSlotKind::STORE_IC);
CHECK_SLOT_KIND(helper, 3, FeedbackVectorSlotKind::CALL_IC);
CHECK_SLOT_KIND(helper, 4, FeedbackVectorSlotKind::LOAD_IC);
@@ -453,12 +520,12 @@ TEST(ReferenceContextAllocatesNoSlots) {
Handle<JSFunction> f = GetFunction("testkeyedprop");
- // There should be 1 LOAD_ICs for the load of a, and one KEYED_LOAD_IC for
- // the load of x[0] in the return statement.
- Handle<TypeFeedbackVector> feedback_vector(f->shared()->feedback_vector());
+ // There should be 1 LOAD_GLOBAL_ICs for the load of a, and one
+ // KEYED_LOAD_IC for the load of x[0] in the return statement.
+ Handle<TypeFeedbackVector> feedback_vector(f->feedback_vector());
FeedbackVectorHelper helper(feedback_vector);
CHECK_EQ(3, helper.slot_count());
- CHECK_SLOT_KIND(helper, 0, FeedbackVectorSlotKind::LOAD_IC);
+ CHECK_SLOT_KIND(helper, 0, FeedbackVectorSlotKind::LOAD_GLOBAL_IC);
CHECK_SLOT_KIND(helper, 1, FeedbackVectorSlotKind::KEYED_STORE_IC);
CHECK_SLOT_KIND(helper, 2, FeedbackVectorSlotKind::KEYED_LOAD_IC);
}
@@ -473,16 +540,19 @@ TEST(ReferenceContextAllocatesNoSlots) {
Handle<JSFunction> f = GetFunction("testcompound");
- // There should be 3 LOAD_ICs, for load of a and load of x.old and x.young.
- Handle<TypeFeedbackVector> feedback_vector(f->shared()->feedback_vector());
+ // There should be 1 LOAD_GLOBAL_IC for load of a and 2 LOAD_ICs, for load
+ // of x.old and x.young.
+ Handle<TypeFeedbackVector> feedback_vector(f->feedback_vector());
FeedbackVectorHelper helper(feedback_vector);
- CHECK_EQ(6, helper.slot_count());
- CHECK_SLOT_KIND(helper, 0, FeedbackVectorSlotKind::LOAD_IC);
+ CHECK_EQ(7, helper.slot_count());
+ CHECK_SLOT_KIND(helper, 0, FeedbackVectorSlotKind::LOAD_GLOBAL_IC);
CHECK_SLOT_KIND(helper, 1, FeedbackVectorSlotKind::STORE_IC);
CHECK_SLOT_KIND(helper, 2, FeedbackVectorSlotKind::STORE_IC);
CHECK_SLOT_KIND(helper, 3, FeedbackVectorSlotKind::STORE_IC);
CHECK_SLOT_KIND(helper, 4, FeedbackVectorSlotKind::LOAD_IC);
CHECK_SLOT_KIND(helper, 5, FeedbackVectorSlotKind::LOAD_IC);
+ // Binary operation feedback is a general slot.
+ CHECK_SLOT_KIND(helper, 6, FeedbackVectorSlotKind::GENERAL);
}
}
@@ -504,7 +574,7 @@ TEST(VectorStoreICBasic) {
"f(a);");
Handle<JSFunction> f = GetFunction("f");
// There should be one IC slot.
- Handle<TypeFeedbackVector> feedback_vector(f->shared()->feedback_vector());
+ Handle<TypeFeedbackVector> feedback_vector(f->feedback_vector());
FeedbackVectorHelper helper(feedback_vector);
CHECK_EQ(1, helper.slot_count());
FeedbackVectorSlot slot(0);
diff --git a/deps/v8/test/cctest/test-field-type-tracking.cc b/deps/v8/test/cctest/test-field-type-tracking.cc
index c7c6f84423..d2f44ce1be 100644
--- a/deps/v8/test/cctest/test-field-type-tracking.cc
+++ b/deps/v8/test/cctest/test-field-type-tracking.cc
@@ -10,12 +10,14 @@
#include "src/v8.h"
#include "src/compilation-cache.h"
+#include "src/compiler.h"
#include "src/execution.h"
#include "src/factory.h"
#include "src/field-type.h"
#include "src/global-handles.h"
#include "src/ic/stub-cache.h"
#include "src/macro-assembler.h"
+#include "src/types.h"
using namespace v8::internal;
@@ -388,16 +390,16 @@ class Expectations {
Handle<String> name = MakeName("prop", property_index);
- CHECK(!getter->IsNull() || !setter->IsNull());
+ CHECK(!getter->IsNull(isolate_) || !setter->IsNull(isolate_));
Factory* factory = isolate_->factory();
- if (!getter->IsNull()) {
+ if (!getter->IsNull(isolate_)) {
Handle<AccessorPair> pair = factory->NewAccessorPair();
pair->SetComponents(*getter, *factory->null_value());
AccessorConstantDescriptor new_desc(name, pair, attributes);
map = Map::CopyInsertDescriptor(map, &new_desc, INSERT_TRANSITION);
}
- if (!setter->IsNull()) {
+ if (!setter->IsNull(isolate_)) {
Handle<AccessorPair> pair = factory->NewAccessorPair();
pair->SetComponents(*getter, *setter);
AccessorConstantDescriptor new_desc(name, pair, attributes);
@@ -421,15 +423,8 @@ class Expectations {
int descriptor =
map->instance_descriptors()->SearchWithCache(isolate, *name, *map);
- map = Map::TransitionToAccessorProperty(
- map, name, descriptor, ACCESSOR_GETTER, getter, attributes);
- CHECK(!map->is_deprecated());
- CHECK(!map->is_dictionary_map());
-
- descriptor =
- map->instance_descriptors()->SearchWithCache(isolate, *name, *map);
- map = Map::TransitionToAccessorProperty(
- map, name, descriptor, ACCESSOR_SETTER, setter, attributes);
+ map = Map::TransitionToAccessorProperty(isolate, map, name, descriptor,
+ getter, setter, attributes);
CHECK(!map->is_deprecated());
CHECK(!map->is_dictionary_map());
return map;
@@ -493,7 +488,7 @@ TEST(ReconfigureAccessorToNonExistingDataField) {
Handle<JSObject> obj = factory->NewJSObjectFromMap(map);
JSObject::MigrateToMap(obj, prepared_map);
FieldIndex index = FieldIndex::ForDescriptor(*prepared_map, 0);
- CHECK(obj->RawFastPropertyAt(index)->IsUninitialized());
+ CHECK(obj->RawFastPropertyAt(index)->IsUninitialized(isolate));
#ifdef VERIFY_HEAP
obj->ObjectVerify();
#endif
@@ -609,7 +604,7 @@ static void TestGeneralizeRepresentation(
// Create new maps by generalizing representation of propX field.
Handle<Map> field_owner(map->FindFieldOwner(property_index), isolate);
- CompilationInfo info("testing", isolate, &zone);
+ CompilationInfo info(ArrayVector("testing"), isolate, &zone);
CHECK(!info.dependencies()->HasAborted());
info.dependencies()->AssumeFieldType(field_owner);
@@ -651,7 +646,7 @@ static void TestGeneralizeRepresentation(
Map* tmp = *new_map;
while (true) {
Object* back = tmp->GetBackPointer();
- if (back->IsUndefined()) break;
+ if (back->IsUndefined(isolate)) break;
tmp = Map::cast(back);
CHECK(!tmp->is_stable());
}
@@ -988,7 +983,7 @@ static void TestReconfigureDataFieldAttribute_GeneralizeRepresentation(
Zone zone(isolate->allocator());
Handle<Map> field_owner(map->FindFieldOwner(kSplitProp), isolate);
- CompilationInfo info("testing", isolate, &zone);
+ CompilationInfo info(ArrayVector("testing"), isolate, &zone);
CHECK(!info.dependencies()->HasAborted());
info.dependencies()->AssumeFieldType(field_owner);
@@ -1073,7 +1068,7 @@ static void TestReconfigureDataFieldAttribute_GeneralizeRepresentationTrivial(
Zone zone(isolate->allocator());
Handle<Map> field_owner(map->FindFieldOwner(kSplitProp), isolate);
- CompilationInfo info("testing", isolate, &zone);
+ CompilationInfo info(ArrayVector("testing"), isolate, &zone);
CHECK(!info.dependencies()->HasAborted());
info.dependencies()->AssumeFieldType(field_owner);
@@ -1249,7 +1244,7 @@ struct CheckCopyGeneralizeAllRepresentations {
CHECK(!map->is_deprecated());
CHECK_NE(*map, *new_map);
- CHECK(new_map->GetBackPointer()->IsUndefined());
+ CHECK(new_map->GetBackPointer()->IsUndefined(map->GetIsolate()));
for (int i = 0; i < kPropCount; i++) {
expectations.GeneralizeRepresentation(i);
}
@@ -1604,7 +1599,7 @@ static void TestReconfigureElementsKind_GeneralizeRepresentation(
Zone zone(isolate->allocator());
Handle<Map> field_owner(map->FindFieldOwner(kDiffProp), isolate);
- CompilationInfo info("testing", isolate, &zone);
+ CompilationInfo info(ArrayVector("testing"), isolate, &zone);
CHECK(!info.dependencies()->HasAborted());
info.dependencies()->AssumeFieldType(field_owner);
@@ -1697,7 +1692,7 @@ static void TestReconfigureElementsKind_GeneralizeRepresentationTrivial(
Zone zone(isolate->allocator());
Handle<Map> field_owner(map->FindFieldOwner(kDiffProp), isolate);
- CompilationInfo info("testing", isolate, &zone);
+ CompilationInfo info(ArrayVector("testing"), isolate, &zone);
CHECK(!info.dependencies()->HasAborted());
info.dependencies()->AssumeFieldType(field_owner);
@@ -1880,7 +1875,7 @@ TEST(ReconfigurePropertySplitMapTransitionsOverflow) {
// Try to update |map|, since there is no place for propX transition at |map2|
// |map| should become "copy-generalized".
Handle<Map> updated_map = Map::Update(map);
- CHECK(updated_map->GetBackPointer()->IsUndefined());
+ CHECK(updated_map->GetBackPointer()->IsUndefined(isolate));
for (int i = 0; i < kPropCount; i++) {
expectations.SetDataField(i, Representation::Tagged(), any_type);
@@ -1980,10 +1975,10 @@ static void TestGeneralizeRepresentationWithSpecialTransition(
for (int i = 0; i < kPropCount; i++) {
expectations2.GeneralizeRepresentation(i);
}
- CHECK(new_map2->GetBackPointer()->IsUndefined());
+ CHECK(new_map2->GetBackPointer()->IsUndefined(isolate));
CHECK(expectations2.Check(*new_map2));
} else {
- CHECK(!new_map2->GetBackPointer()->IsUndefined());
+ CHECK(!new_map2->GetBackPointer()->IsUndefined(isolate));
CHECK(expectations2.Check(*new_map2));
}
}
@@ -2064,63 +2059,6 @@ TEST(ElementsKindTransitionFromMapNotOwningDescriptor) {
}
-TEST(ForObservedTransitionFromMapOwningDescriptor) {
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
- Isolate* isolate = CcTest::i_isolate();
- Handle<FieldType> any_type = FieldType::Any(isolate);
- Handle<FieldType> value_type =
- FieldType::Class(Map::Create(isolate, 0), isolate);
-
- struct TestConfig {
- Handle<Map> Transition(Handle<Map> map, Expectations& expectations) {
- return Map::CopyForObserved(map);
- }
- // TODO(ishell): remove once IS_PROTO_TRANS_ISSUE_FIXED is removed.
- bool generalizes_representations() const { return false; }
- bool is_non_equevalent_transition() const { return true; }
- };
- TestConfig config;
- TestGeneralizeRepresentationWithSpecialTransition(
- config, Representation::Smi(), any_type, Representation::HeapObject(),
- value_type, Representation::Tagged(), any_type);
-}
-
-
-TEST(ForObservedTransitionFromMapNotOwningDescriptor) {
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
- Isolate* isolate = CcTest::i_isolate();
- Handle<FieldType> any_type = FieldType::Any(isolate);
- Handle<FieldType> value_type =
- FieldType::Class(Map::Create(isolate, 0), isolate);
-
- struct TestConfig {
- Handle<Map> Transition(Handle<Map> map, Expectations& expectations) {
- Isolate* isolate = CcTest::i_isolate();
- Handle<FieldType> any_type = FieldType::Any(isolate);
-
- // Add one more transition to |map| in order to prevent descriptors
- // ownership.
- CHECK(map->owns_descriptors());
- Map::CopyWithField(map, MakeString("foo"), any_type, NONE,
- Representation::Smi(), INSERT_TRANSITION)
- .ToHandleChecked();
- CHECK(!map->owns_descriptors());
-
- return Map::CopyForObserved(map);
- }
- // TODO(ishell): remove once IS_PROTO_TRANS_ISSUE_FIXED is removed.
- bool generalizes_representations() const { return false; }
- bool is_non_equevalent_transition() const { return true; }
- };
- TestConfig config;
- TestGeneralizeRepresentationWithSpecialTransition(
- config, Representation::Smi(), any_type, Representation::HeapObject(),
- value_type, Representation::Tagged(), any_type);
-}
-
-
TEST(PrototypeTransitionFromMapOwningDescriptor) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
@@ -2473,6 +2411,16 @@ TEST(TransitionAccessorConstantToSameAccessorConstant) {
TestTransitionTo(transition_op, transition_op, checker);
}
+TEST(FieldTypeConvertSimple) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+
+ Zone zone(isolate->allocator());
+
+ CHECK_EQ(FieldType::Any()->Convert(&zone), Type::NonInternal());
+ CHECK_EQ(FieldType::None()->Convert(&zone), Type::None());
+}
// TODO(ishell): add this test once IS_ACCESSOR_FIELD_SUPPORTED is supported.
// TEST(TransitionAccessorConstantToAnotherAccessorConstant)
diff --git a/deps/v8/test/cctest/test-func-name-inference.cc b/deps/v8/test/cctest/test-func-name-inference.cc
index 77ba2f2243..4b16c031d9 100644
--- a/deps/v8/test/cctest/test-func-name-inference.cc
+++ b/deps/v8/test/cctest/test-func-name-inference.cc
@@ -25,6 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include <memory>
#include "src/v8.h"
@@ -34,7 +35,6 @@
#include "test/cctest/cctest.h"
-using ::v8::base::SmartArrayPointer;
using ::v8::internal::CStrVector;
using ::v8::internal::Factory;
using ::v8::internal::Handle;
@@ -85,7 +85,7 @@ static void CheckFunctionName(v8::Local<v8::Script> script,
isolate->debug()->FindSharedFunctionInfoInScript(i_script, func_pos));
// Verify inferred function name.
- SmartArrayPointer<char> inferred_name =
+ std::unique_ptr<char[]> inferred_name =
shared_func_info->inferred_name()->ToCString();
i::PrintF("expected: %s, found: %s\n", ref_inferred_name,
inferred_name.get());
@@ -287,7 +287,8 @@ TEST(MultipleFuncsConditional) {
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Script> script = Compile(CcTest::isolate(),
- "fun1 = 0 ?\n"
+ "var x = 0;\n"
+ "fun1 = x ?\n"
" function() { return 1; } :\n"
" function() { return 2; }");
CheckFunctionName(script, "return 1", "fun1");
@@ -301,9 +302,10 @@ TEST(MultipleFuncsInLiteral) {
v8::Local<v8::Script> script =
Compile(CcTest::isolate(),
+ "var x = 0;\n"
"function MyClass() {}\n"
"MyClass.prototype = {\n"
- " method1: 0 ? function() { return 1; } :\n"
+ " method1: x ? function() { return 1; } :\n"
" function() { return 2; } }");
CheckFunctionName(script, "return 1", "MyClass.method1");
CheckFunctionName(script, "return 2", "MyClass.method1");
diff --git a/deps/v8/test/cctest/test-global-handles.cc b/deps/v8/test/cctest/test-global-handles.cc
index 22fd785566..06e7466dc6 100644
--- a/deps/v8/test/cctest/test-global-handles.cc
+++ b/deps/v8/test/cctest/test-global-handles.cc
@@ -417,3 +417,55 @@ TEST(WeakPersistentSmi) {
// Should not crash.
g.SetWeak<void>(nullptr, &WeakCallback, v8::WeakCallbackType::kParameter);
}
+
+void finalizer(const v8::WeakCallbackInfo<v8::Global<v8::Object>>& data) {
+ data.GetParameter()->ClearWeak();
+ v8::Local<v8::Object> o =
+ v8::Local<v8::Object>::New(data.GetIsolate(), *data.GetParameter());
+ o->Set(data.GetIsolate()->GetCurrentContext(), v8_str("finalizer"),
+ v8_str("was here"))
+ .FromJust();
+}
+
+TEST(FinalizerWeakness) {
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+
+ v8::Global<v8::Object> g;
+ int identity;
+
+ {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Object> o = v8::Object::New(isolate);
+ identity = o->GetIdentityHash();
+ g.Reset(isolate, o);
+ g.SetWeak(&g, finalizer, v8::WeakCallbackType::kFinalizer);
+ }
+
+ CcTest::i_isolate()->heap()->CollectAllAvailableGarbage();
+
+ CHECK(!g.IsEmpty());
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Object> o = v8::Local<v8::Object>::New(isolate, g);
+ CHECK_EQ(identity, o->GetIdentityHash());
+ CHECK(o->Has(isolate->GetCurrentContext(), v8_str("finalizer")).FromJust());
+}
+
+TEST(PhatomHandlesWithoutCallbacks) {
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+
+ v8::Global<v8::Object> g1, g2;
+ {
+ v8::HandleScope scope(isolate);
+ g1.Reset(isolate, v8::Object::New(isolate));
+ g1.SetWeak();
+ g2.Reset(isolate, v8::Object::New(isolate));
+ g2.SetWeak();
+ }
+
+ CHECK_EQ(0, isolate->NumberOfPhantomHandleResetsSinceLastCall());
+ CcTest::i_isolate()->heap()->CollectAllAvailableGarbage();
+ CHECK_EQ(2, isolate->NumberOfPhantomHandleResetsSinceLastCall());
+ CHECK_EQ(0, isolate->NumberOfPhantomHandleResetsSinceLastCall());
+}
diff --git a/deps/v8/test/cctest/test-hashmap.cc b/deps/v8/test/cctest/test-hashmap.cc
index b45d6c7183..2d423b4543 100644
--- a/deps/v8/test/cctest/test-hashmap.cc
+++ b/deps/v8/test/cctest/test-hashmap.cc
@@ -30,7 +30,7 @@
#include "src/v8.h"
#include "test/cctest/cctest.h"
-#include "src/hashmap.h"
+#include "src/base/hashmap.h"
using namespace v8::internal;
@@ -48,7 +48,7 @@ class IntSet {
void Insert(int x) {
CHECK_NE(0, x); // 0 corresponds to (void*)NULL - illegal key value
- HashMap::Entry* p =
+ v8::base::HashMap::Entry* p =
map_.LookupOrInsert(reinterpret_cast<void*>(x), hash_(x));
CHECK(p != NULL); // insert is set!
CHECK_EQ(reinterpret_cast<void*>(x), p->key);
@@ -61,7 +61,8 @@ class IntSet {
}
bool Present(int x) {
- HashMap::Entry* p = map_.Lookup(reinterpret_cast<void*>(x), hash_(x));
+ v8::base::HashMap::Entry* p =
+ map_.Lookup(reinterpret_cast<void*>(x), hash_(x));
if (p != NULL) {
CHECK_EQ(reinterpret_cast<void*>(x), p->key);
}
@@ -74,7 +75,8 @@ class IntSet {
uint32_t occupancy() const {
uint32_t count = 0;
- for (HashMap::Entry* p = map_.Start(); p != NULL; p = map_.Next(p)) {
+ for (v8::base::HashMap::Entry* p = map_.Start(); p != NULL;
+ p = map_.Next(p)) {
count++;
}
CHECK_EQ(map_.occupancy(), static_cast<double>(count));
@@ -83,7 +85,7 @@ class IntSet {
private:
IntKeyHash hash_;
- HashMap map_;
+ v8::base::HashMap map_;
};
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index dfe591148a..b0a2e00202 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -29,12 +29,14 @@
#include <ctype.h>
+#include <memory>
+
#include "src/v8.h"
#include "include/v8-profiler.h"
+#include "src/base/hashmap.h"
#include "src/collector.h"
#include "src/debug/debug.h"
-#include "src/hashmap.h"
#include "src/profiler/allocation-tracker.h"
#include "src/profiler/heap-profiler.h"
#include "src/profiler/heap-snapshot-generator-inl.h"
@@ -43,7 +45,7 @@
using i::AllocationTraceNode;
using i::AllocationTraceTree;
using i::AllocationTracker;
-using i::HashMap;
+using i::ArrayVector;
using i::Vector;
namespace {
@@ -65,7 +67,7 @@ class NamedEntriesDetector {
}
void CheckAllReachables(i::HeapEntry* root) {
- i::HashMap visited(AddressesMatch);
+ v8::base::HashMap visited(AddressesMatch);
i::List<i::HeapEntry*> list(10);
list.Add(root);
CheckEntry(root);
@@ -75,7 +77,7 @@ class NamedEntriesDetector {
for (int i = 0; i < children.length(); ++i) {
if (children[i]->type() == i::HeapGraphEdge::kShortcut) continue;
i::HeapEntry* child = children[i]->to();
- i::HashMap::Entry* entry = visited.LookupOrInsert(
+ v8::base::HashMap::Entry* entry = visited.LookupOrInsert(
reinterpret_cast<void*>(child),
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(child)));
if (entry->value)
@@ -143,10 +145,10 @@ static bool ValidateSnapshot(const v8::HeapSnapshot* snapshot, int depth = 3) {
i::HeapSnapshot* heap_snapshot = const_cast<i::HeapSnapshot*>(
reinterpret_cast<const i::HeapSnapshot*>(snapshot));
- i::HashMap visited(AddressesMatch);
+ v8::base::HashMap visited(AddressesMatch);
i::List<i::HeapGraphEdge>& edges = heap_snapshot->edges();
for (int i = 0; i < edges.length(); ++i) {
- i::HashMap::Entry* entry = visited.LookupOrInsert(
+ v8::base::HashMap::Entry* entry = visited.LookupOrInsert(
reinterpret_cast<void*>(edges[i].to()),
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(edges[i].to())));
uint32_t ref_count = static_cast<uint32_t>(
@@ -156,7 +158,7 @@ static bool ValidateSnapshot(const v8::HeapSnapshot* snapshot, int depth = 3) {
uint32_t unretained_entries_count = 0;
i::List<i::HeapEntry>& entries = heap_snapshot->entries();
for (int i = 0; i < entries.length(); ++i) {
- i::HashMap::Entry* entry = visited.Lookup(
+ v8::base::HashMap::Entry* entry = visited.Lookup(
reinterpret_cast<void*>(&entries[i]),
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(&entries[i])));
if (!entry && entries[i].id() != 1) {
@@ -254,13 +256,13 @@ TEST(BoundFunctionInSnapshot) {
CHECK_EQ(v8::HeapGraphNode::kArray, bindings->GetType());
CHECK_EQ(1, bindings->GetChildrenCount());
- const v8::HeapGraphNode* bound_this = GetProperty(
- f, v8::HeapGraphEdge::kShortcut, "bound_this");
+ const v8::HeapGraphNode* bound_this =
+ GetProperty(f, v8::HeapGraphEdge::kInternal, "bound_this");
CHECK(bound_this);
CHECK_EQ(v8::HeapGraphNode::kObject, bound_this->GetType());
- const v8::HeapGraphNode* bound_function = GetProperty(
- f, v8::HeapGraphEdge::kShortcut, "bound_function");
+ const v8::HeapGraphNode* bound_function =
+ GetProperty(f, v8::HeapGraphEdge::kInternal, "bound_function");
CHECK(bound_function);
CHECK_EQ(v8::HeapGraphNode::kClosure, bound_function->GetType());
@@ -491,6 +493,16 @@ void CheckSimdSnapshot(const char* program, const char* var_name) {
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
CompileRun(program);
+ // The TakeHeapSnapshot function does not do enough GCs to ensure
+ // that all garbage is collected. We perform addition GC here
+ // to reclaim a floating AllocationSite and to fix the following failure:
+ // # Check failed: ValidateSnapshot(snapshot).
+ // Stdout:
+ // 28 @ 13523 entry with no retainer: /hidden/ system / AllocationSite
+ // 44 @ 767 $map: /hidden/ system / Map
+ // 44 @ 59 $map: /hidden/ system / Map
+ CcTest::heap()->CollectAllGarbage();
+
const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
@@ -659,6 +671,31 @@ TEST(HeapSnapshotCollection) {
CHECK_EQ(s->GetId(), map_s->GetId());
}
+TEST(HeapSnapshotMap) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+
+ CompileRun(
+ "function Z() { this.foo = {}; }\n"
+ "z = new Z();\n");
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
+ CHECK(ValidateSnapshot(snapshot));
+ const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+ const v8::HeapGraphNode* z =
+ GetProperty(global, v8::HeapGraphEdge::kProperty, "z");
+ CHECK(z);
+ const v8::HeapGraphNode* map =
+ GetProperty(z, v8::HeapGraphEdge::kInternal, "map");
+ CHECK(map);
+ CHECK(GetProperty(map, v8::HeapGraphEdge::kInternal, "map"));
+ CHECK(GetProperty(map, v8::HeapGraphEdge::kInternal, "prototype"));
+ CHECK(GetProperty(map, v8::HeapGraphEdge::kInternal, "back_pointer"));
+ CHECK(GetProperty(map, v8::HeapGraphEdge::kInternal, "descriptors"));
+ const v8::HeapGraphNode* weak_cell =
+ GetProperty(map, v8::HeapGraphEdge::kInternal, "weak_cell_cache");
+ CHECK(GetProperty(weak_cell, v8::HeapGraphEdge::kWeak, "value"));
+}
TEST(HeapSnapshotInternalReferences) {
v8::Isolate* isolate = CcTest::isolate();
@@ -1830,7 +1867,7 @@ TEST(GetHeapValueForDeletedObject) {
static int StringCmp(const char* ref, i::String* act) {
- v8::base::SmartArrayPointer<char> s_act = act->ToCString();
+ std::unique_ptr<char[]> s_act = act->ToCString();
int result = strcmp(ref, s_act.get());
if (result != 0)
fprintf(stderr, "Expected: \"%s\", Actual: \"%s\"\n", ref, s_act.get());
@@ -2513,8 +2550,7 @@ TEST(ArrayGrowLeftTrim) {
// Print for better diagnostics in case of failure.
tracker->trace_tree()->Print(tracker);
- AllocationTraceNode* node =
- FindNode(tracker, Vector<const char*>(names, arraysize(names)));
+ AllocationTraceNode* node = FindNode(tracker, ArrayVector(names));
CHECK(node);
CHECK_GE(node->allocation_count(), 2u);
CHECK_GE(node->allocation_size(), 4u * 5u);
@@ -2540,8 +2576,7 @@ TEST(TrackHeapAllocations) {
tracker->trace_tree()->Print(tracker);
const char* names[] = {"", "start", "f_0_0", "f_0_1", "f_0_2"};
- AllocationTraceNode* node =
- FindNode(tracker, Vector<const char*>(names, arraysize(names)));
+ AllocationTraceNode* node = FindNode(tracker, ArrayVector(names));
CHECK(node);
CHECK_GE(node->allocation_count(), 100u);
CHECK_GE(node->allocation_size(), 4 * node->allocation_count());
@@ -2590,8 +2625,7 @@ TEST(TrackBumpPointerAllocations) {
// Print for better diagnostics in case of failure.
tracker->trace_tree()->Print(tracker);
- AllocationTraceNode* node =
- FindNode(tracker, Vector<const char*>(names, arraysize(names)));
+ AllocationTraceNode* node = FindNode(tracker, ArrayVector(names));
CHECK(node);
CHECK_GE(node->allocation_count(), 100u);
CHECK_GE(node->allocation_size(), 4 * node->allocation_count());
@@ -2616,8 +2650,7 @@ TEST(TrackBumpPointerAllocations) {
// Print for better diagnostics in case of failure.
tracker->trace_tree()->Print(tracker);
- AllocationTraceNode* node =
- FindNode(tracker, Vector<const char*>(names, arraysize(names)));
+ AllocationTraceNode* node = FindNode(tracker, ArrayVector(names));
CHECK(node);
CHECK_LT(node->allocation_count(), 100u);
@@ -2646,8 +2679,7 @@ TEST(TrackV8ApiAllocation) {
// Print for better diagnostics in case of failure.
tracker->trace_tree()->Print(tracker);
- AllocationTraceNode* node =
- FindNode(tracker, Vector<const char*>(names, arraysize(names)));
+ AllocationTraceNode* node = FindNode(tracker, ArrayVector(names));
CHECK(node);
CHECK_GE(node->allocation_count(), 2u);
CHECK_GE(node->allocation_size(), 4 * node->allocation_count());
@@ -2792,7 +2824,7 @@ TEST(WeakContainers) {
CHECK_NE(0, count);
for (int i = 0; i < count; ++i) {
const v8::HeapGraphEdge* prop = dependent_code->GetChild(i);
- CHECK_EQ(v8::HeapGraphEdge::kWeak, prop->GetType());
+ CHECK_EQ(v8::HeapGraphEdge::kInternal, prop->GetType());
}
}
@@ -2875,6 +2907,15 @@ static const v8::AllocationProfile::Node* FindAllocationProfileNode(
return node;
}
+static void CheckNoZeroCountNodes(v8::AllocationProfile::Node* node) {
+ for (auto alloc : node->allocations) {
+ CHECK_GT(alloc.count, 0u);
+ }
+ for (auto child : node->children) {
+ CheckNoZeroCountNodes(child);
+ }
+}
+
TEST(SamplingHeapProfiler) {
v8::HandleScope scope(v8::Isolate::GetCurrent());
LocalContext env;
@@ -2908,13 +2949,12 @@ TEST(SamplingHeapProfiler) {
heap_profiler->StartSamplingHeapProfiler(1024);
CompileRun(script_source);
- v8::base::SmartPointer<v8::AllocationProfile> profile(
+ std::unique_ptr<v8::AllocationProfile> profile(
heap_profiler->GetAllocationProfile());
- CHECK(!profile.is_empty());
+ CHECK(profile);
const char* names[] = {"", "foo", "bar"};
- auto node_bar = FindAllocationProfileNode(
- *profile, Vector<const char*>(names, arraysize(names)));
+ auto node_bar = FindAllocationProfileNode(*profile, ArrayVector(names));
CHECK(node_bar);
// Count the number of allocations we sampled from bar.
@@ -2936,13 +2976,12 @@ TEST(SamplingHeapProfiler) {
heap_profiler->StartSamplingHeapProfiler(128);
CompileRun(script_source);
- v8::base::SmartPointer<v8::AllocationProfile> profile(
+ std::unique_ptr<v8::AllocationProfile> profile(
heap_profiler->GetAllocationProfile());
- CHECK(!profile.is_empty());
+ CHECK(profile);
const char* names[] = {"", "foo", "bar"};
- auto node_bar = FindAllocationProfileNode(
- *profile, Vector<const char*>(names, arraysize(names)));
+ auto node_bar = FindAllocationProfileNode(*profile, ArrayVector(names));
CHECK(node_bar);
// Count the number of allocations we sampled from bar.
@@ -2971,18 +3010,16 @@ TEST(SamplingHeapProfiler) {
heap_profiler->StartSamplingHeapProfiler(64);
CompileRun(record_trace_tree_source);
- v8::base::SmartPointer<v8::AllocationProfile> profile(
+ std::unique_ptr<v8::AllocationProfile> profile(
heap_profiler->GetAllocationProfile());
- CHECK(!profile.is_empty());
+ CHECK(profile);
const char* names1[] = {"", "start", "f_0_0", "f_0_1", "f_0_2"};
- auto node1 = FindAllocationProfileNode(
- *profile, Vector<const char*>(names1, arraysize(names1)));
+ auto node1 = FindAllocationProfileNode(*profile, ArrayVector(names1));
CHECK(node1);
const char* names2[] = {"", "generateFunctions"};
- auto node2 = FindAllocationProfileNode(
- *profile, Vector<const char*>(names2, arraysize(names2)));
+ auto node2 = FindAllocationProfileNode(*profile, ArrayVector(names2));
CHECK(node2);
heap_profiler->StopSamplingHeapProfiler();
@@ -2998,9 +3035,11 @@ TEST(SamplingHeapProfiler) {
CcTest::heap()->CollectAllGarbage();
- v8::base::SmartPointer<v8::AllocationProfile> profile(
+ std::unique_ptr<v8::AllocationProfile> profile(
heap_profiler->GetAllocationProfile());
- CHECK(!profile.is_empty());
+ CHECK(profile);
+
+ CheckNoZeroCountNodes(profile->GetRootNode());
heap_profiler->StopSamplingHeapProfiler();
}
@@ -3019,12 +3058,11 @@ TEST(SamplingHeapProfilerApiAllocation) {
for (int i = 0; i < 8 * 1024; ++i) v8::Object::New(env->GetIsolate());
- v8::base::SmartPointer<v8::AllocationProfile> profile(
+ std::unique_ptr<v8::AllocationProfile> profile(
heap_profiler->GetAllocationProfile());
- CHECK(!profile.is_empty());
+ CHECK(profile);
const char* names[] = {"(V8 API)"};
- auto node = FindAllocationProfileNode(
- *profile, Vector<const char*>(names, arraysize(names)));
+ auto node = FindAllocationProfileNode(*profile, ArrayVector(names));
CHECK(node);
heap_profiler->StopSamplingHeapProfiler();
diff --git a/deps/v8/test/cctest/test-inobject-slack-tracking.cc b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
index 004781ab41..9f33d55938 100644
--- a/deps/v8/test/cctest/test-inobject-slack-tracking.cc
+++ b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
@@ -925,15 +925,14 @@ TEST(SubclassErrorBuiltin) {
v8::HandleScope scope(CcTest::isolate());
const int first_field = 2;
- TestSubclassBuiltin("A1", JS_OBJECT_TYPE, "Error", "'err'", first_field);
- TestSubclassBuiltin("A2", JS_OBJECT_TYPE, "EvalError", "'err'", first_field);
- TestSubclassBuiltin("A3", JS_OBJECT_TYPE, "RangeError", "'err'", first_field);
- TestSubclassBuiltin("A4", JS_OBJECT_TYPE, "ReferenceError", "'err'",
+ TestSubclassBuiltin("A1", JS_ERROR_TYPE, "Error", "'err'", first_field);
+ TestSubclassBuiltin("A2", JS_ERROR_TYPE, "EvalError", "'err'", first_field);
+ TestSubclassBuiltin("A3", JS_ERROR_TYPE, "RangeError", "'err'", first_field);
+ TestSubclassBuiltin("A4", JS_ERROR_TYPE, "ReferenceError", "'err'",
first_field);
- TestSubclassBuiltin("A5", JS_OBJECT_TYPE, "SyntaxError", "'err'",
- first_field);
- TestSubclassBuiltin("A6", JS_OBJECT_TYPE, "TypeError", "'err'", first_field);
- TestSubclassBuiltin("A7", JS_OBJECT_TYPE, "URIError", "'err'", first_field);
+ TestSubclassBuiltin("A5", JS_ERROR_TYPE, "SyntaxError", "'err'", first_field);
+ TestSubclassBuiltin("A6", JS_ERROR_TYPE, "TypeError", "'err'", first_field);
+ TestSubclassBuiltin("A7", JS_ERROR_TYPE, "URIError", "'err'", first_field);
}
@@ -1100,7 +1099,7 @@ TEST(SubclassPromiseBuiltin) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- const int first_field = 4;
+ const int first_field = 5;
TestSubclassBuiltin("A1", JS_PROMISE_TYPE, "Promise",
"function(resolve, reject) { resolve('ok'); }",
first_field);
diff --git a/deps/v8/test/cctest/test-lockers.cc b/deps/v8/test/cctest/test-lockers.cc
index f12ca87fa6..09546e94f3 100644
--- a/deps/v8/test/cctest/test-lockers.cc
+++ b/deps/v8/test/cctest/test-lockers.cc
@@ -27,11 +27,12 @@
#include <limits.h>
+#include <memory>
+
#include "src/v8.h"
#include "src/api.h"
#include "src/base/platform/platform.h"
-#include "src/base/smart-pointers.h"
#include "src/compilation-cache.h"
#include "src/execution.h"
#include "src/isolate.h"
@@ -100,7 +101,7 @@ TEST(KangarooIsolates) {
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
- v8::base::SmartPointer<KangarooThread> thread1;
+ std::unique_ptr<KangarooThread> thread1;
{
v8::Locker locker(isolate);
v8::Isolate::Scope isolate_scope(isolate);
@@ -109,7 +110,7 @@ TEST(KangarooIsolates) {
v8::Context::Scope context_scope(context);
CHECK_EQ(isolate, v8::Isolate::GetCurrent());
CompileRun("function getValue() { return 30; }");
- thread1.Reset(new KangarooThread(isolate, context));
+ thread1.reset(new KangarooThread(isolate, context));
}
thread1->Start();
thread1->Join();
@@ -465,8 +466,7 @@ class LockAndUnlockDifferentIsolatesThread : public JoinableThread {
}
virtual void Run() {
- v8::base::SmartPointer<LockIsolateAndCalculateFibSharedContextThread>
- thread;
+ std::unique_ptr<LockIsolateAndCalculateFibSharedContextThread> thread;
v8::Locker lock1(isolate1_);
CHECK(v8::Locker::IsLocked(isolate1_));
CHECK(!v8::Locker::IsLocked(isolate2_));
@@ -478,8 +478,8 @@ class LockAndUnlockDifferentIsolatesThread : public JoinableThread {
v8::Context::Scope context_scope(context1);
CalcFibAndCheck(context1);
}
- thread.Reset(new LockIsolateAndCalculateFibSharedContextThread(
- isolate1_, context1));
+ thread.reset(new LockIsolateAndCalculateFibSharedContextThread(isolate1_,
+ context1));
}
v8::Locker lock2(isolate2_);
CHECK(v8::Locker::IsLocked(isolate1_));
diff --git a/deps/v8/test/cctest/test-log-stack-tracer.cc b/deps/v8/test/cctest/test-log-stack-tracer.cc
index 05d7103685..a4bd2ed0dd 100644
--- a/deps/v8/test/cctest/test-log-stack-tracer.cc
+++ b/deps/v8/test/cctest/test-log-stack-tracer.cc
@@ -29,14 +29,13 @@
#include <stdlib.h>
-#include "src/v8.h"
-
+#include "include/v8-profiler.h"
#include "src/api.h"
#include "src/codegen.h"
#include "src/disassembler.h"
#include "src/isolate.h"
#include "src/log.h"
-#include "src/profiler/sampler.h"
+#include "src/v8.h"
#include "src/vm-state-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/trace-extension.h"
@@ -46,6 +45,7 @@ using v8::Local;
using v8::Object;
using v8::Script;
using v8::String;
+using v8::TickSample;
using v8::Value;
using v8::internal::byte;
@@ -53,18 +53,15 @@ using v8::internal::Address;
using v8::internal::Handle;
using v8::internal::Isolate;
using v8::internal::JSFunction;
-using v8::internal::TickSample;
-
-static bool IsAddressWithinFuncCode(JSFunction* function, Address addr) {
+static bool IsAddressWithinFuncCode(JSFunction* function, void* addr) {
+ Address address = reinterpret_cast<Address>(addr);
i::AbstractCode* code = function->abstract_code();
- return code->contains(addr);
+ return code->contains(address);
}
-
static bool IsAddressWithinFuncCode(v8::Local<v8::Context> context,
- const char* func_name,
- Address addr) {
+ const char* func_name, void* addr) {
v8::Local<v8::Value> func =
context->Global()->Get(context, v8_str(func_name)).ToLocalChecked();
CHECK(func->IsFunction());
@@ -79,7 +76,8 @@ static bool IsAddressWithinFuncCode(v8::Local<v8::Context> context,
static void construct_call(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
i::StackFrameIterator frame_iterator(isolate);
- CHECK(frame_iterator.frame()->is_exit());
+ CHECK(frame_iterator.frame()->is_exit() ||
+ frame_iterator.frame()->is_builtin_exit());
frame_iterator.Advance();
CHECK(frame_iterator.frame()->is_construct());
frame_iterator.Advance();
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index 9c7e63bfed..b6bb2569dd 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -375,9 +375,8 @@ TEST(LogCallbacks) {
ObjMethod1_entry = *FUNCTION_ENTRYPOINT_ADDRESS(ObjMethod1_entry);
#endif
i::EmbeddedVector<char, 100> ref_data;
- i::SNPrintF(ref_data,
- "code-creation,Callback,-2,0x%" V8PRIxPTR ",1,\"method1\"",
- reinterpret_cast<intptr_t>(ObjMethod1_entry));
+ i::SNPrintF(ref_data, "code-creation,Callback,-2,%p,1,\"method1\"",
+ static_cast<void*>(ObjMethod1_entry));
CHECK(StrNStr(log.start(), ref_data.start(), log.length()));
log.Dispose();
@@ -429,8 +428,8 @@ TEST(LogAccessorCallbacks) {
#endif
EmbeddedVector<char, 100> prop1_getter_record;
i::SNPrintF(prop1_getter_record,
- "code-creation,Callback,-2,0x%" V8PRIxPTR ",1,\"get prop1\"",
- reinterpret_cast<intptr_t>(Prop1Getter_entry));
+ "code-creation,Callback,-2,%p,1,\"get prop1\"",
+ static_cast<void*>(Prop1Getter_entry));
CHECK(StrNStr(log.start(), prop1_getter_record.start(), log.length()));
Address Prop1Setter_entry = reinterpret_cast<Address>(Prop1Setter);
@@ -439,8 +438,8 @@ TEST(LogAccessorCallbacks) {
#endif
EmbeddedVector<char, 100> prop1_setter_record;
i::SNPrintF(prop1_setter_record,
- "code-creation,Callback,-2,0x%" V8PRIxPTR ",1,\"set prop1\"",
- reinterpret_cast<intptr_t>(Prop1Setter_entry));
+ "code-creation,Callback,-2,%p,1,\"set prop1\"",
+ static_cast<void*>(Prop1Setter_entry));
CHECK(StrNStr(log.start(), prop1_setter_record.start(), log.length()));
Address Prop2Getter_entry = reinterpret_cast<Address>(Prop2Getter);
@@ -449,8 +448,8 @@ TEST(LogAccessorCallbacks) {
#endif
EmbeddedVector<char, 100> prop2_getter_record;
i::SNPrintF(prop2_getter_record,
- "code-creation,Callback,-2,0x%" V8PRIxPTR ",1,\"get prop2\"",
- reinterpret_cast<intptr_t>(Prop2Getter_entry));
+ "code-creation,Callback,-2,%p,1,\"get prop2\"",
+ static_cast<void*>(Prop2Getter_entry));
CHECK(StrNStr(log.start(), prop2_getter_record.start(), log.length()));
log.Dispose();
}
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips.cc b/deps/v8/test/cctest/test-macro-assembler-mips.cc
index 91ee215315..057c370304 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips.cc
@@ -60,6 +60,71 @@ static bool all_zeroes(const byte* beg, const byte* end) {
return true;
}
+TEST(BYTESWAP) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope handles(isolate);
+
+ struct T {
+ int32_t r1;
+ int32_t r2;
+ int32_t r3;
+ int32_t r4;
+ int32_t r5;
+ };
+ T t;
+
+ MacroAssembler assembler(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ __ lw(a2, MemOperand(a0, offsetof(T, r1)));
+ __ nop();
+ __ ByteSwapSigned(a2, a2, 4);
+ __ sw(a2, MemOperand(a0, offsetof(T, r1)));
+
+ __ lw(a2, MemOperand(a0, offsetof(T, r2)));
+ __ nop();
+ __ ByteSwapSigned(a2, a2, 2);
+ __ sw(a2, MemOperand(a0, offsetof(T, r2)));
+
+ __ lw(a2, MemOperand(a0, offsetof(T, r3)));
+ __ nop();
+ __ ByteSwapSigned(a2, a2, 1);
+ __ sw(a2, MemOperand(a0, offsetof(T, r3)));
+
+ __ lw(a2, MemOperand(a0, offsetof(T, r4)));
+ __ nop();
+ __ ByteSwapUnsigned(a2, a2, 1);
+ __ sw(a2, MemOperand(a0, offsetof(T, r4)));
+
+ __ lw(a2, MemOperand(a0, offsetof(T, r5)));
+ __ nop();
+ __ ByteSwapUnsigned(a2, a2, 2);
+ __ sw(a2, MemOperand(a0, offsetof(T, r5)));
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ ::F3 f = FUNCTION_CAST<::F3>(code->entry());
+ t.r1 = 0x781A15C3;
+ t.r2 = 0x2CDE;
+ t.r3 = 0x9F;
+ t.r4 = 0x9F;
+ t.r5 = 0x2CDE;
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
+ USE(dummy);
+
+ CHECK_EQ(static_cast<int32_t>(0xC3151A78), t.r1);
+ CHECK_EQ(static_cast<int32_t>(0xDE2C0000), t.r2);
+ CHECK_EQ(static_cast<int32_t>(0x9FFFFFFF), t.r3);
+ CHECK_EQ(static_cast<int32_t>(0x9F000000), t.r4);
+ CHECK_EQ(static_cast<int32_t>(0xDE2C0000), t.r5);
+}
TEST(CopyBytes) {
CcTest::InitializeVM();
@@ -390,14 +455,14 @@ TEST(Lsa) {
}
}
-static const std::vector<uint32_t> uint32_test_values() {
+static const std::vector<uint32_t> cvt_trunc_uint32_test_values() {
static const uint32_t kValues[] = {0x00000000, 0x00000001, 0x00ffff00,
0x7fffffff, 0x80000000, 0x80000001,
0x80ffff00, 0x8fffffff, 0xffffffff};
return std::vector<uint32_t>(&kValues[0], &kValues[arraysize(kValues)]);
}
-static const std::vector<int32_t> int32_test_values() {
+static const std::vector<int32_t> cvt_trunc_int32_test_values() {
static const int32_t kValues[] = {
static_cast<int32_t>(0x00000000), static_cast<int32_t>(0x00000001),
static_cast<int32_t>(0x00ffff00), static_cast<int32_t>(0x7fffffff),
@@ -408,13 +473,31 @@ static const std::vector<int32_t> int32_test_values() {
}
// Helper macros that can be used in FOR_INT32_INPUTS(i) { ... *i ... }
-#define FOR_INPUTS(ctype, itype, var) \
- std::vector<ctype> var##_vec = itype##_test_values(); \
+#define FOR_INPUTS(ctype, itype, var, test_vector) \
+ std::vector<ctype> var##_vec = test_vector(); \
for (std::vector<ctype>::iterator var = var##_vec.begin(); \
var != var##_vec.end(); ++var)
-#define FOR_UINT32_INPUTS(var) FOR_INPUTS(uint32_t, uint32, var)
-#define FOR_INT32_INPUTS(var) FOR_INPUTS(int32_t, int32, var)
+#define FOR_INPUTS2(ctype, itype, var, var2, test_vector) \
+ std::vector<ctype> var##_vec = test_vector(); \
+ std::vector<ctype>::iterator var; \
+ std::vector<ctype>::reverse_iterator var2; \
+ for (var = var##_vec.begin(), var2 = var##_vec.rbegin(); \
+ var != var##_vec.end(); ++var, ++var2)
+
+#define FOR_ENUM_INPUTS(var, type, test_vector) \
+ FOR_INPUTS(enum type, type, var, test_vector)
+#define FOR_STRUCT_INPUTS(var, type, test_vector) \
+ FOR_INPUTS(struct type, type, var, test_vector)
+#define FOR_UINT32_INPUTS(var, test_vector) \
+ FOR_INPUTS(uint32_t, uint32, var, test_vector)
+#define FOR_INT32_INPUTS(var, test_vector) \
+ FOR_INPUTS(int32_t, int32, var, test_vector)
+#define FOR_INT32_INPUTS2(var, var2, test_vector) \
+ FOR_INPUTS2(int32_t, int32, var, var2, test_vector)
+
+#define FOR_UINT64_INPUTS(var, test_vector) \
+ FOR_INPUTS(uint64_t, uint32, var, test_vector)
template <typename RET_TYPE, typename IN_TYPE, typename Func>
RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) {
@@ -445,7 +528,7 @@ RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) {
TEST(cvt_s_w_Trunc_uw_s) {
CcTest::InitializeVM();
- FOR_UINT32_INPUTS(i) {
+ FOR_UINT32_INPUTS(i, cvt_trunc_uint32_test_values) {
uint32_t input = *i;
CHECK_EQ(static_cast<float>(input),
run_Cvt<uint32_t>(input, [](MacroAssembler* masm) {
@@ -457,7 +540,7 @@ TEST(cvt_s_w_Trunc_uw_s) {
TEST(cvt_d_w_Trunc_w_d) {
CcTest::InitializeVM();
- FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(i, cvt_trunc_int32_test_values) {
int32_t input = *i;
CHECK_EQ(static_cast<double>(input),
run_Cvt<int32_t>(input, [](MacroAssembler* masm) {
@@ -467,6 +550,415 @@ TEST(cvt_d_w_Trunc_w_d) {
}
}
+static const std::vector<int32_t> overflow_int32_test_values() {
+ static const int32_t kValues[] = {
+ static_cast<int32_t>(0xf0000000), static_cast<int32_t>(0x00000001),
+ static_cast<int32_t>(0xff000000), static_cast<int32_t>(0x0000f000),
+ static_cast<int32_t>(0x0f000000), static_cast<int32_t>(0x991234ab),
+ static_cast<int32_t>(0xb0ffff01), static_cast<int32_t>(0x00006fff),
+ static_cast<int32_t>(0xffffffff)};
+ return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+enum OverflowBranchType {
+ kAddBranchOverflow,
+ kSubBranchOverflow,
+};
+
+struct OverflowRegisterCombination {
+ Register dst;
+ Register left;
+ Register right;
+ Register scratch;
+};
+
+static const std::vector<enum OverflowBranchType> overflow_branch_type() {
+ static const enum OverflowBranchType kValues[] = {kAddBranchOverflow,
+ kSubBranchOverflow};
+ return std::vector<enum OverflowBranchType>(&kValues[0],
+ &kValues[arraysize(kValues)]);
+}
+
+static const std::vector<struct OverflowRegisterCombination>
+overflow_register_combination() {
+ static const struct OverflowRegisterCombination kValues[] = {
+ {t0, t1, t2, t3}, {t0, t0, t2, t3}, {t0, t1, t0, t3}, {t0, t1, t1, t3}};
+ return std::vector<struct OverflowRegisterCombination>(
+ &kValues[0], &kValues[arraysize(kValues)]);
+}
+
+template <typename T>
+static bool IsAddOverflow(T x, T y) {
+ DCHECK(std::numeric_limits<T>::is_integer);
+ T max = std::numeric_limits<T>::max();
+ T min = std::numeric_limits<T>::min();
+
+ return (x > 0 && y > (max - x)) || (x < 0 && y < (min - x));
+}
+
+template <typename T>
+static bool IsSubOverflow(T x, T y) {
+ DCHECK(std::numeric_limits<T>::is_integer);
+ T max = std::numeric_limits<T>::max();
+ T min = std::numeric_limits<T>::min();
+
+ return (y > 0 && x < (min + y)) || (y < 0 && x > (max + y));
+}
+
+template <typename IN_TYPE, typename Func>
+static bool runOverflow(IN_TYPE valLeft, IN_TYPE valRight,
+ Func GenerateOverflowInstructions) {
+ typedef int32_t (*F_CVT)(char* x0, int x1, int x2, int x3, int x4);
+
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assm;
+
+ GenerateOverflowInstructions(masm, valLeft, valRight);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
+
+ int32_t r =
+ reinterpret_cast<int32_t>(CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+
+ DCHECK(r == 0 || r == 1);
+ return r;
+}
+
+TEST(BranchOverflowInt32BothLabelsTrampoline) {
+ if (!IsMipsArchVariant(kMips32r6)) return;
+ static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
+
+ FOR_INT32_INPUTS(i, overflow_int32_test_values) {
+ FOR_INT32_INPUTS(j, overflow_int32_test_values) {
+ FOR_ENUM_INPUTS(br, OverflowBranchType, overflow_branch_type) {
+ FOR_STRUCT_INPUTS(regComb, OverflowRegisterCombination,
+ overflow_register_combination) {
+ int32_t ii = *i;
+ int32_t jj = *j;
+ enum OverflowBranchType branchType = *br;
+ struct OverflowRegisterCombination rc = *regComb;
+
+ // If left and right register are same then left and right
+ // test values must also be same, otherwise we skip the test
+ if (rc.left.code() == rc.right.code()) {
+ if (ii != jj) {
+ continue;
+ }
+ }
+
+ bool res1 = runOverflow<int32_t>(
+ ii, jj, [branchType, rc](MacroAssembler* masm, int32_t valLeft,
+ int32_t valRight) {
+ Label overflow, no_overflow, end;
+ __ li(rc.left, valLeft);
+ __ li(rc.right, valRight);
+ switch (branchType) {
+ case kAddBranchOverflow:
+ __ AddBranchOvf(rc.dst, rc.left, rc.right, &overflow,
+ &no_overflow, rc.scratch);
+ break;
+ case kSubBranchOverflow:
+ __ SubBranchOvf(rc.dst, rc.left, rc.right, &overflow,
+ &no_overflow, rc.scratch);
+ break;
+ }
+
+ Label done;
+ size_t nr_calls =
+ kMaxBranchOffset / (2 * Instruction::kInstrSize) + 2;
+ for (size_t i = 0; i < nr_calls; ++i) {
+ __ BranchShort(&done, eq, a0, Operand(a1));
+ }
+ __ bind(&done);
+
+ __ li(v0, 2);
+ __ Branch(&end);
+ __ bind(&overflow);
+ __ li(v0, 1);
+ __ Branch(&end);
+ __ bind(&no_overflow);
+ __ li(v0, 0);
+ __ bind(&end);
+ });
+
+ switch (branchType) {
+ case kAddBranchOverflow:
+ CHECK_EQ(IsAddOverflow<int32_t>(ii, jj), res1);
+ break;
+ case kSubBranchOverflow:
+ CHECK_EQ(IsSubOverflow<int32_t>(ii, jj), res1);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(BranchOverflowInt32BothLabels) {
+ FOR_INT32_INPUTS(i, overflow_int32_test_values) {
+ FOR_INT32_INPUTS(j, overflow_int32_test_values) {
+ FOR_ENUM_INPUTS(br, OverflowBranchType, overflow_branch_type) {
+ FOR_STRUCT_INPUTS(regComb, OverflowRegisterCombination,
+ overflow_register_combination) {
+ int32_t ii = *i;
+ int32_t jj = *j;
+ enum OverflowBranchType branchType = *br;
+ struct OverflowRegisterCombination rc = *regComb;
+
+ // If left and right register are same then left and right
+ // test values must also be same, otherwise we skip the test
+ if (rc.left.code() == rc.right.code()) {
+ if (ii != jj) {
+ continue;
+ }
+ }
+
+ bool res1 = runOverflow<int32_t>(
+ ii, jj, [branchType, rc](MacroAssembler* masm, int32_t valLeft,
+ int32_t valRight) {
+ Label overflow, no_overflow, end;
+ __ li(rc.left, valLeft);
+ __ li(rc.right, valRight);
+ switch (branchType) {
+ case kAddBranchOverflow:
+ __ AddBranchOvf(rc.dst, rc.left, rc.right, &overflow,
+ &no_overflow, rc.scratch);
+ break;
+ case kSubBranchOverflow:
+ __ SubBranchOvf(rc.dst, rc.left, rc.right, &overflow,
+ &no_overflow, rc.scratch);
+ break;
+ }
+ __ li(v0, 2);
+ __ Branch(&end);
+ __ bind(&overflow);
+ __ li(v0, 1);
+ __ Branch(&end);
+ __ bind(&no_overflow);
+ __ li(v0, 0);
+ __ bind(&end);
+ });
+
+ bool res2 = runOverflow<int32_t>(
+ ii, jj, [branchType, rc](MacroAssembler* masm, int32_t valLeft,
+ int32_t valRight) {
+ Label overflow, no_overflow, end;
+ __ li(rc.left, valLeft);
+ switch (branchType) {
+ case kAddBranchOverflow:
+ __ AddBranchOvf(rc.dst, rc.left, Operand(valRight),
+ &overflow, &no_overflow, rc.scratch);
+ break;
+ case kSubBranchOverflow:
+ __ SubBranchOvf(rc.dst, rc.left, Operand(valRight),
+ &overflow, &no_overflow, rc.scratch);
+ break;
+ }
+ __ li(v0, 2);
+ __ Branch(&end);
+ __ bind(&overflow);
+ __ li(v0, 1);
+ __ Branch(&end);
+ __ bind(&no_overflow);
+ __ li(v0, 0);
+ __ bind(&end);
+ });
+
+ switch (branchType) {
+ case kAddBranchOverflow:
+ CHECK_EQ(IsAddOverflow<int32_t>(ii, jj), res1);
+ CHECK_EQ(IsAddOverflow<int32_t>(ii, jj), res2);
+ break;
+ case kSubBranchOverflow:
+ CHECK_EQ(IsSubOverflow<int32_t>(ii, jj), res1);
+ CHECK_EQ(IsSubOverflow<int32_t>(ii, jj), res2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(BranchOverflowInt32LeftLabel) {
+ FOR_INT32_INPUTS(i, overflow_int32_test_values) {
+ FOR_INT32_INPUTS(j, overflow_int32_test_values) {
+ FOR_ENUM_INPUTS(br, OverflowBranchType, overflow_branch_type) {
+ FOR_STRUCT_INPUTS(regComb, OverflowRegisterCombination,
+ overflow_register_combination) {
+ int32_t ii = *i;
+ int32_t jj = *j;
+ enum OverflowBranchType branchType = *br;
+ struct OverflowRegisterCombination rc = *regComb;
+
+ // If left and right register are same then left and right
+ // test values must also be same, otherwise we skip the test
+ if (rc.left.code() == rc.right.code()) {
+ if (ii != jj) {
+ continue;
+ }
+ }
+
+ bool res1 = runOverflow<int32_t>(
+ ii, jj, [branchType, rc](MacroAssembler* masm, int32_t valLeft,
+ int32_t valRight) {
+ Label overflow, end;
+ __ li(rc.left, valLeft);
+ __ li(rc.right, valRight);
+ switch (branchType) {
+ case kAddBranchOverflow:
+ __ AddBranchOvf(rc.dst, rc.left, rc.right, &overflow, NULL,
+ rc.scratch);
+ break;
+ case kSubBranchOverflow:
+ __ SubBranchOvf(rc.dst, rc.left, rc.right, &overflow, NULL,
+ rc.scratch);
+ break;
+ }
+ __ li(v0, 0);
+ __ Branch(&end);
+ __ bind(&overflow);
+ __ li(v0, 1);
+ __ bind(&end);
+ });
+
+ bool res2 = runOverflow<int32_t>(
+ ii, jj, [branchType, rc](MacroAssembler* masm, int32_t valLeft,
+ int32_t valRight) {
+ Label overflow, end;
+ __ li(rc.left, valLeft);
+ switch (branchType) {
+ case kAddBranchOverflow:
+ __ AddBranchOvf(rc.dst, rc.left, Operand(valRight),
+ &overflow, NULL, rc.scratch);
+ break;
+ case kSubBranchOverflow:
+ __ SubBranchOvf(rc.dst, rc.left, Operand(valRight),
+ &overflow, NULL, rc.scratch);
+ break;
+ }
+ __ li(v0, 0);
+ __ Branch(&end);
+ __ bind(&overflow);
+ __ li(v0, 1);
+ __ bind(&end);
+ });
+
+ switch (branchType) {
+ case kAddBranchOverflow:
+ CHECK_EQ(IsAddOverflow<int32_t>(ii, jj), res1);
+ CHECK_EQ(IsAddOverflow<int32_t>(ii, jj), res2);
+ break;
+ case kSubBranchOverflow:
+ CHECK_EQ(IsSubOverflow<int32_t>(ii, jj), res1);
+ CHECK_EQ(IsSubOverflow<int32_t>(ii, jj), res2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(BranchOverflowInt32RightLabel) {
+ FOR_INT32_INPUTS(i, overflow_int32_test_values) {
+ FOR_INT32_INPUTS(j, overflow_int32_test_values) {
+ FOR_ENUM_INPUTS(br, OverflowBranchType, overflow_branch_type) {
+ FOR_STRUCT_INPUTS(regComb, OverflowRegisterCombination,
+ overflow_register_combination) {
+ int32_t ii = *i;
+ int32_t jj = *j;
+ enum OverflowBranchType branchType = *br;
+ struct OverflowRegisterCombination rc = *regComb;
+
+ // If left and right register are same then left and right
+ // test values must also be same, otherwise we skip the test
+ if (rc.left.code() == rc.right.code()) {
+ if (ii != jj) {
+ continue;
+ }
+ }
+
+ bool res1 = runOverflow<int32_t>(
+ ii, jj, [branchType, rc](MacroAssembler* masm, int32_t valLeft,
+ int32_t valRight) {
+ Label no_overflow, end;
+ __ li(rc.left, valLeft);
+ __ li(rc.right, valRight);
+ switch (branchType) {
+ case kAddBranchOverflow:
+ __ AddBranchOvf(rc.dst, rc.left, rc.right, NULL,
+ &no_overflow, rc.scratch);
+ break;
+ case kSubBranchOverflow:
+ __ SubBranchOvf(rc.dst, rc.left, rc.right, NULL,
+ &no_overflow, rc.scratch);
+ break;
+ }
+ __ li(v0, 1);
+ __ Branch(&end);
+ __ bind(&no_overflow);
+ __ li(v0, 0);
+ __ bind(&end);
+ });
+
+ bool res2 = runOverflow<int32_t>(
+ ii, jj, [branchType, rc](MacroAssembler* masm, int32_t valLeft,
+ int32_t valRight) {
+ Label no_overflow, end;
+ __ li(rc.left, valLeft);
+ switch (branchType) {
+ case kAddBranchOverflow:
+ __ AddBranchOvf(rc.dst, rc.left, Operand(valRight), NULL,
+ &no_overflow, rc.scratch);
+ break;
+ case kSubBranchOverflow:
+ __ SubBranchOvf(rc.dst, rc.left, Operand(valRight), NULL,
+ &no_overflow, rc.scratch);
+ break;
+ }
+ __ li(v0, 1);
+ __ Branch(&end);
+ __ bind(&no_overflow);
+ __ li(v0, 0);
+ __ bind(&end);
+ });
+
+ switch (branchType) {
+ case kAddBranchOverflow:
+ CHECK_EQ(IsAddOverflow<int32_t>(ii, jj), res1);
+ CHECK_EQ(IsAddOverflow<int32_t>(ii, jj), res2);
+ break;
+ case kSubBranchOverflow:
+ CHECK_EQ(IsSubOverflow<int32_t>(ii, jj), res1);
+ CHECK_EQ(IsSubOverflow<int32_t>(ii, jj), res2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
+ }
+ }
+}
+
TEST(min_max_nan) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -577,4 +1069,241 @@ TEST(min_max_nan) {
}
}
+template <typename IN_TYPE, typename Func>
+bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
+ IN_TYPE value, Func GenerateUnalignedInstructionFunc) {
+ typedef int32_t (*F_CVT)(char* x0, int x1, int x2, int x3, int x4);
+
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assm;
+ IN_TYPE res;
+
+ GenerateUnalignedInstructionFunc(masm, in_offset, out_offset);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
+
+ MemCopy(memory_buffer + in_offset, &value, sizeof(IN_TYPE));
+ CALL_GENERATED_CODE(isolate, f, memory_buffer, 0, 0, 0, 0);
+ MemCopy(&res, memory_buffer + out_offset, sizeof(IN_TYPE));
+
+ return res == value;
+}
+
+static const std::vector<uint64_t> unsigned_test_values() {
+ static const uint64_t kValues[] = {
+ 0x2180f18a06384414, 0x000a714532102277, 0xbc1acccf180649f0,
+ 0x8000000080008000, 0x0000000000000001, 0xffffffffffffffff,
+ };
+ return std::vector<uint64_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+static const std::vector<int32_t> unsigned_test_offset() {
+ static const int32_t kValues[] = {// value, offset
+ -132 * KB, -21 * KB, 0, 19 * KB, 135 * KB};
+ return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+static const std::vector<int32_t> unsigned_test_offset_increment() {
+ static const int32_t kValues[] = {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5};
+ return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+TEST(Ulh) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ uint16_t value = static_cast<uint64_t>(*i & 0xFFFF);
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ CHECK_EQ(true, run_Unaligned<uint16_t>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Ulh(v0, MemOperand(a0, in_offset));
+ __ Ush(v0, MemOperand(a0, out_offset), v0);
+ }));
+ CHECK_EQ(true, run_Unaligned<uint16_t>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ mov(t0, a0);
+ __ Ulh(a0, MemOperand(a0, in_offset));
+ __ Ush(a0, MemOperand(t0, out_offset), v0);
+ }));
+ CHECK_EQ(true, run_Unaligned<uint16_t>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ mov(t0, a0);
+ __ Ulhu(a0, MemOperand(a0, in_offset));
+ __ Ush(a0, MemOperand(t0, out_offset), t1);
+ }));
+ CHECK_EQ(true, run_Unaligned<uint16_t>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Ulhu(v0, MemOperand(a0, in_offset));
+ __ Ush(v0, MemOperand(a0, out_offset), t1);
+ }));
+ }
+ }
+ }
+}
+
+TEST(Ulh_bitextension) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ uint16_t value = static_cast<uint64_t>(*i & 0xFFFF);
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ CHECK_EQ(true, run_Unaligned<uint16_t>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ Label success, fail, end, different;
+ __ Ulh(t0, MemOperand(a0, in_offset));
+ __ Ulhu(t1, MemOperand(a0, in_offset));
+ __ Branch(&different, ne, t0, Operand(t1));
+
+ // If signed and unsigned values are same, check
+ // the upper bits to see if they are zero
+ __ sra(t0, t0, 15);
+ __ Branch(&success, eq, t0, Operand(zero_reg));
+ __ Branch(&fail);
+
+ // If signed and unsigned values are different,
+ // check that the upper bits are complementary
+ __ bind(&different);
+ __ sra(t1, t1, 15);
+ __ Branch(&fail, ne, t1, Operand(1));
+ __ sra(t0, t0, 15);
+ __ addiu(t0, t0, 1);
+ __ Branch(&fail, ne, t0, Operand(zero_reg));
+ // Fall through to success
+
+ __ bind(&success);
+ __ Ulh(t0, MemOperand(a0, in_offset));
+ __ Ush(t0, MemOperand(a0, out_offset), v0);
+ __ Branch(&end);
+ __ bind(&fail);
+ __ Ush(zero_reg, MemOperand(a0, out_offset), v0);
+ __ bind(&end);
+ }));
+ }
+ }
+ }
+}
+
+TEST(Ulw) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ uint32_t value = static_cast<uint32_t>(*i & 0xFFFFFFFF);
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ CHECK_EQ(true, run_Unaligned<uint32_t>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Ulw(v0, MemOperand(a0, in_offset));
+ __ Usw(v0, MemOperand(a0, out_offset));
+ }));
+ CHECK_EQ(true,
+ run_Unaligned<uint32_t>(
+ buffer_middle, in_offset, out_offset, (uint32_t)value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ mov(t0, a0);
+ __ Ulw(a0, MemOperand(a0, in_offset));
+ __ Usw(a0, MemOperand(t0, out_offset));
+ }));
+ }
+ }
+ }
+}
+
+TEST(Ulwc1) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ float value = static_cast<float>(*i & 0xFFFFFFFF);
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ CHECK_EQ(true, run_Unaligned<float>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Ulwc1(f0, MemOperand(a0, in_offset), t0);
+ __ Uswc1(f0, MemOperand(a0, out_offset), t0);
+ }));
+ }
+ }
+ }
+}
+
+TEST(Uldc1) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ double value = static_cast<double>(*i);
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ CHECK_EQ(true, run_Unaligned<double>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Uldc1(f0, MemOperand(a0, in_offset), t0);
+ __ Usdc1(f0, MemOperand(a0, out_offset), t0);
+ }));
+ }
+ }
+ }
+}
+
#undef __
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips64.cc b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
index e251242dee..5f9451027c 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
@@ -61,6 +61,89 @@ static bool all_zeroes(const byte* beg, const byte* end) {
return true;
}
+TEST(BYTESWAP) {
+ DCHECK(kArchVariant == kMips64r6 || kArchVariant == kMips64r2);
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ struct T {
+ int64_t r1;
+ int64_t r2;
+ int64_t r3;
+ int64_t r4;
+ int64_t r5;
+ int64_t r6;
+ int64_t r7;
+ };
+ T t;
+
+ MacroAssembler assembler(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
+
+ MacroAssembler* masm = &assembler;
+
+ __ ld(a4, MemOperand(a0, offsetof(T, r1)));
+ __ nop();
+ __ ByteSwapSigned(a4, a4, 8);
+ __ sd(a4, MemOperand(a0, offsetof(T, r1)));
+
+ __ ld(a4, MemOperand(a0, offsetof(T, r2)));
+ __ nop();
+ __ ByteSwapSigned(a4, a4, 4);
+ __ sd(a4, MemOperand(a0, offsetof(T, r2)));
+
+ __ ld(a4, MemOperand(a0, offsetof(T, r3)));
+ __ nop();
+ __ ByteSwapSigned(a4, a4, 2);
+ __ sd(a4, MemOperand(a0, offsetof(T, r3)));
+
+ __ ld(a4, MemOperand(a0, offsetof(T, r4)));
+ __ nop();
+ __ ByteSwapSigned(a4, a4, 1);
+ __ sd(a4, MemOperand(a0, offsetof(T, r4)));
+
+ __ ld(a4, MemOperand(a0, offsetof(T, r5)));
+ __ nop();
+ __ ByteSwapUnsigned(a4, a4, 1);
+ __ sd(a4, MemOperand(a0, offsetof(T, r5)));
+
+ __ ld(a4, MemOperand(a0, offsetof(T, r6)));
+ __ nop();
+ __ ByteSwapUnsigned(a4, a4, 2);
+ __ sd(a4, MemOperand(a0, offsetof(T, r6)));
+
+ __ ld(a4, MemOperand(a0, offsetof(T, r7)));
+ __ nop();
+ __ ByteSwapUnsigned(a4, a4, 4);
+ __ sd(a4, MemOperand(a0, offsetof(T, r7)));
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ ::F3 f = FUNCTION_CAST<::F3>(code->entry());
+ t.r1 = 0x5612FFCD9D327ACC;
+ t.r2 = 0x781A15C3;
+ t.r3 = 0xFCDE;
+ t.r4 = 0x9F;
+ t.r5 = 0x9F;
+ t.r6 = 0xFCDE;
+ t.r7 = 0xC81A15C3;
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
+ USE(dummy);
+
+ CHECK_EQ(static_cast<int64_t>(0xCC7A329DCDFF1256), t.r1);
+ CHECK_EQ(static_cast<int64_t>(0xC3151A7800000000), t.r2);
+ CHECK_EQ(static_cast<int64_t>(0xDEFCFFFFFFFFFFFF), t.r3);
+ CHECK_EQ(static_cast<int64_t>(0x9FFFFFFFFFFFFFFF), t.r4);
+ CHECK_EQ(static_cast<int64_t>(0x9F00000000000000), t.r5);
+ CHECK_EQ(static_cast<int64_t>(0xDEFC000000000000), t.r6);
+ CHECK_EQ(static_cast<int64_t>(0xC3151AC800000000), t.r7);
+}
TEST(CopyBytes) {
CcTest::InitializeVM();
@@ -523,14 +606,14 @@ TEST(Dlsa) {
}
}
-static const std::vector<uint32_t> uint32_test_values() {
+static const std::vector<uint32_t> cvt_trunc_uint32_test_values() {
static const uint32_t kValues[] = {0x00000000, 0x00000001, 0x00ffff00,
0x7fffffff, 0x80000000, 0x80000001,
0x80ffff00, 0x8fffffff, 0xffffffff};
return std::vector<uint32_t>(&kValues[0], &kValues[arraysize(kValues)]);
}
-static const std::vector<int32_t> int32_test_values() {
+static const std::vector<int32_t> cvt_trunc_int32_test_values() {
static const int32_t kValues[] = {
static_cast<int32_t>(0x00000000), static_cast<int32_t>(0x00000001),
static_cast<int32_t>(0x00ffff00), static_cast<int32_t>(0x7fffffff),
@@ -540,7 +623,7 @@ static const std::vector<int32_t> int32_test_values() {
return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
}
-static const std::vector<uint64_t> uint64_test_values() {
+static const std::vector<uint64_t> cvt_trunc_uint64_test_values() {
static const uint64_t kValues[] = {
0x0000000000000000, 0x0000000000000001, 0x0000ffffffff0000,
0x7fffffffffffffff, 0x8000000000000000, 0x8000000000000001,
@@ -548,7 +631,7 @@ static const std::vector<uint64_t> uint64_test_values() {
return std::vector<uint64_t>(&kValues[0], &kValues[arraysize(kValues)]);
}
-static const std::vector<int64_t> int64_test_values() {
+static const std::vector<int64_t> cvt_trunc_int64_test_values() {
static const int64_t kValues[] = {static_cast<int64_t>(0x0000000000000000),
static_cast<int64_t>(0x0000000000000001),
static_cast<int64_t>(0x0000ffffffff0000),
@@ -562,15 +645,32 @@ static const std::vector<int64_t> int64_test_values() {
}
// Helper macros that can be used in FOR_INT32_INPUTS(i) { ... *i ... }
-#define FOR_INPUTS(ctype, itype, var) \
- std::vector<ctype> var##_vec = itype##_test_values(); \
+#define FOR_INPUTS(ctype, itype, var, test_vector) \
+ std::vector<ctype> var##_vec = test_vector(); \
for (std::vector<ctype>::iterator var = var##_vec.begin(); \
var != var##_vec.end(); ++var)
-#define FOR_INT32_INPUTS(var) FOR_INPUTS(int32_t, int32, var)
-#define FOR_INT64_INPUTS(var) FOR_INPUTS(int64_t, int64, var)
-#define FOR_UINT32_INPUTS(var) FOR_INPUTS(uint32_t, uint32, var)
-#define FOR_UINT64_INPUTS(var) FOR_INPUTS(uint64_t, uint64, var)
+#define FOR_INPUTS2(ctype, itype, var, var2, test_vector) \
+ std::vector<ctype> var##_vec = test_vector(); \
+ std::vector<ctype>::iterator var; \
+ std::vector<ctype>::reverse_iterator var2; \
+ for (var = var##_vec.begin(), var2 = var##_vec.rbegin(); \
+ var != var##_vec.end(); ++var, ++var2)
+
+#define FOR_ENUM_INPUTS(var, type, test_vector) \
+ FOR_INPUTS(enum type, type, var, test_vector)
+#define FOR_STRUCT_INPUTS(var, type, test_vector) \
+ FOR_INPUTS(struct type, type, var, test_vector)
+#define FOR_INT32_INPUTS(var, test_vector) \
+ FOR_INPUTS(int32_t, int32, var, test_vector)
+#define FOR_INT32_INPUTS2(var, var2, test_vector) \
+ FOR_INPUTS2(int32_t, int32, var, var2, test_vector)
+#define FOR_INT64_INPUTS(var, test_vector) \
+ FOR_INPUTS(int64_t, int64, var, test_vector)
+#define FOR_UINT32_INPUTS(var, test_vector) \
+ FOR_INPUTS(uint32_t, uint32, var, test_vector)
+#define FOR_UINT64_INPUTS(var, test_vector) \
+ FOR_INPUTS(uint64_t, uint64, var, test_vector)
template <typename RET_TYPE, typename IN_TYPE, typename Func>
RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) {
@@ -600,7 +700,7 @@ RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) {
TEST(Cvt_s_uw_Trunc_uw_s) {
CcTest::InitializeVM();
- FOR_UINT32_INPUTS(i) {
+ FOR_UINT32_INPUTS(i, cvt_trunc_uint32_test_values) {
uint32_t input = *i;
CHECK_EQ(static_cast<float>(input),
run_Cvt<uint64_t>(input, [](MacroAssembler* masm) {
@@ -613,7 +713,7 @@ TEST(Cvt_s_uw_Trunc_uw_s) {
TEST(Cvt_s_ul_Trunc_ul_s) {
CcTest::InitializeVM();
- FOR_UINT64_INPUTS(i) {
+ FOR_UINT64_INPUTS(i, cvt_trunc_uint64_test_values) {
uint64_t input = *i;
CHECK_EQ(static_cast<float>(input),
run_Cvt<uint64_t>(input, [](MacroAssembler* masm) {
@@ -625,7 +725,7 @@ TEST(Cvt_s_ul_Trunc_ul_s) {
TEST(Cvt_d_ul_Trunc_ul_d) {
CcTest::InitializeVM();
- FOR_UINT64_INPUTS(i) {
+ FOR_UINT64_INPUTS(i, cvt_trunc_uint64_test_values) {
uint64_t input = *i;
CHECK_EQ(static_cast<double>(input),
run_Cvt<uint64_t>(input, [](MacroAssembler* masm) {
@@ -637,7 +737,7 @@ TEST(Cvt_d_ul_Trunc_ul_d) {
TEST(cvt_d_l_Trunc_l_d) {
CcTest::InitializeVM();
- FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(i, cvt_trunc_int64_test_values) {
int64_t input = *i;
CHECK_EQ(static_cast<double>(input),
run_Cvt<int64_t>(input, [](MacroAssembler* masm) {
@@ -650,7 +750,7 @@ TEST(cvt_d_l_Trunc_l_d) {
TEST(cvt_d_l_Trunc_l_ud) {
CcTest::InitializeVM();
- FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(i, cvt_trunc_int64_test_values) {
int64_t input = *i;
uint64_t abs_input = (input < 0) ? -input : input;
CHECK_EQ(static_cast<double>(abs_input),
@@ -664,7 +764,7 @@ TEST(cvt_d_l_Trunc_l_ud) {
TEST(cvt_d_w_Trunc_w_d) {
CcTest::InitializeVM();
- FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(i, cvt_trunc_int32_test_values) {
int32_t input = *i;
CHECK_EQ(static_cast<double>(input),
run_Cvt<int64_t>(input, [](MacroAssembler* masm) {
@@ -677,6 +777,680 @@ TEST(cvt_d_w_Trunc_w_d) {
}
}
+static const std::vector<int32_t> overflow_int32_test_values() {
+ static const int32_t kValues[] = {
+ static_cast<int32_t>(0xf0000000), static_cast<int32_t>(0x00000001),
+ static_cast<int32_t>(0xff000000), static_cast<int32_t>(0x0000f000),
+ static_cast<int32_t>(0x0f000000), static_cast<int32_t>(0x991234ab),
+ static_cast<int32_t>(0xb0ffff01), static_cast<int32_t>(0x00006fff),
+ static_cast<int32_t>(0xffffffff)};
+ return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+static const std::vector<int64_t> overflow_int64_test_values() {
+ static const int64_t kValues[] = {static_cast<int64_t>(0xf000000000000000),
+ static_cast<int64_t>(0x0000000000000001),
+ static_cast<int64_t>(0xff00000000000000),
+ static_cast<int64_t>(0x0000f00111111110),
+ static_cast<int64_t>(0x0f00001000000000),
+ static_cast<int64_t>(0x991234ab12a96731),
+ static_cast<int64_t>(0xb0ffff0f0f0f0f01),
+ static_cast<int64_t>(0x00006fffffffffff),
+ static_cast<int64_t>(0xffffffffffffffff)};
+ return std::vector<int64_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+enum OverflowBranchType {
+ kAddBranchOverflow,
+ kSubBranchOverflow,
+};
+
+struct OverflowRegisterCombination {
+ Register dst;
+ Register left;
+ Register right;
+ Register scratch;
+};
+
+static const std::vector<enum OverflowBranchType> overflow_branch_type() {
+ static const enum OverflowBranchType kValues[] = {kAddBranchOverflow,
+ kSubBranchOverflow};
+ return std::vector<enum OverflowBranchType>(&kValues[0],
+ &kValues[arraysize(kValues)]);
+}
+
+static const std::vector<struct OverflowRegisterCombination>
+overflow_register_combination() {
+ static const struct OverflowRegisterCombination kValues[] = {
+ {t0, t1, t2, t3}, {t0, t0, t2, t3}, {t0, t1, t0, t3}, {t0, t1, t1, t3}};
+ return std::vector<struct OverflowRegisterCombination>(
+ &kValues[0], &kValues[arraysize(kValues)]);
+}
+
+template <typename T>
+static bool IsAddOverflow(T x, T y) {
+ DCHECK(std::numeric_limits<T>::is_integer);
+ T max = std::numeric_limits<T>::max();
+ T min = std::numeric_limits<T>::min();
+
+ return (x > 0 && y > (max - x)) || (x < 0 && y < (min - x));
+}
+
+template <typename T>
+static bool IsSubOverflow(T x, T y) {
+ DCHECK(std::numeric_limits<T>::is_integer);
+ T max = std::numeric_limits<T>::max();
+ T min = std::numeric_limits<T>::min();
+
+ return (y > 0 && x < (min + y)) || (y < 0 && x > (max + y));
+}
+
+template <typename IN_TYPE, typename Func>
+static bool runOverflow(IN_TYPE valLeft, IN_TYPE valRight,
+ Func GenerateOverflowInstructions) {
+ typedef int64_t (*F_CVT)(char* x0, int x1, int x2, int x3, int x4);
+
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assm;
+
+ GenerateOverflowInstructions(masm, valLeft, valRight);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
+
+ int64_t r =
+ reinterpret_cast<int64_t>(CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+
+ DCHECK(r == 0 || r == 1);
+ return r;
+}
+
+TEST(BranchOverflowInt32BothLabelsTrampoline) {
+ if (kArchVariant != kMips64r6) return;
+ static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
+
+ FOR_INT32_INPUTS(i, overflow_int32_test_values) {
+ FOR_INT32_INPUTS(j, overflow_int32_test_values) {
+ FOR_ENUM_INPUTS(br, OverflowBranchType, overflow_branch_type) {
+ FOR_STRUCT_INPUTS(regComb, OverflowRegisterCombination,
+ overflow_register_combination) {
+ int32_t ii = *i;
+ int32_t jj = *j;
+ enum OverflowBranchType branchType = *br;
+ struct OverflowRegisterCombination rc = *regComb;
+
+ // If left and right register are same then left and right
+ // test values must also be same, otherwise we skip the test
+ if (rc.left.code() == rc.right.code()) {
+ if (ii != jj) {
+ continue;
+ }
+ }
+
+ bool res1 = runOverflow<int32_t>(
+ ii, jj, [branchType, rc](MacroAssembler* masm, int32_t valLeft,
+ int32_t valRight) {
+ Label overflow, no_overflow, end;
+ __ li(rc.left, valLeft);
+ __ li(rc.right, valRight);
+ switch (branchType) {
+ case kAddBranchOverflow:
+ __ AddBranchOvf(rc.dst, rc.left, rc.right, &overflow,
+ &no_overflow, rc.scratch);
+ break;
+ case kSubBranchOverflow:
+ __ SubBranchOvf(rc.dst, rc.left, rc.right, &overflow,
+ &no_overflow, rc.scratch);
+ break;
+ }
+
+ Label done;
+ size_t nr_calls =
+ kMaxBranchOffset / (2 * Instruction::kInstrSize) + 2;
+ for (size_t i = 0; i < nr_calls; ++i) {
+ __ BranchShort(&done, eq, a0, Operand(a1));
+ }
+ __ bind(&done);
+
+ __ li(v0, 2);
+ __ Branch(&end);
+ __ bind(&overflow);
+ __ li(v0, 1);
+ __ Branch(&end);
+ __ bind(&no_overflow);
+ __ li(v0, 0);
+ __ bind(&end);
+ });
+
+ switch (branchType) {
+ case kAddBranchOverflow:
+ CHECK_EQ(IsAddOverflow<int32_t>(ii, jj), res1);
+ break;
+ case kSubBranchOverflow:
+ CHECK_EQ(IsSubOverflow<int32_t>(ii, jj), res1);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(BranchOverflowInt32BothLabels) {
+ FOR_INT32_INPUTS(i, overflow_int32_test_values) {
+ FOR_INT32_INPUTS(j, overflow_int32_test_values) {
+ FOR_ENUM_INPUTS(br, OverflowBranchType, overflow_branch_type) {
+ FOR_STRUCT_INPUTS(regComb, OverflowRegisterCombination,
+ overflow_register_combination) {
+ int32_t ii = *i;
+ int32_t jj = *j;
+ enum OverflowBranchType branchType = *br;
+ struct OverflowRegisterCombination rc = *regComb;
+
+ // If left and right register are same then left and right
+ // test values must also be same, otherwise we skip the test
+ if (rc.left.code() == rc.right.code()) {
+ if (ii != jj) {
+ continue;
+ }
+ }
+
+ bool res1 = runOverflow<int32_t>(
+ ii, jj, [branchType, rc](MacroAssembler* masm, int32_t valLeft,
+ int32_t valRight) {
+ Label overflow, no_overflow, end;
+ __ li(rc.left, valLeft);
+ __ li(rc.right, valRight);
+ switch (branchType) {
+ case kAddBranchOverflow:
+ __ AddBranchOvf(rc.dst, rc.left, rc.right, &overflow,
+ &no_overflow, rc.scratch);
+ break;
+ case kSubBranchOverflow:
+ __ SubBranchOvf(rc.dst, rc.left, rc.right, &overflow,
+ &no_overflow, rc.scratch);
+ break;
+ }
+ __ li(v0, 2);
+ __ Branch(&end);
+ __ bind(&overflow);
+ __ li(v0, 1);
+ __ Branch(&end);
+ __ bind(&no_overflow);
+ __ li(v0, 0);
+ __ bind(&end);
+ });
+
+ bool res2 = runOverflow<int32_t>(
+ ii, jj, [branchType, rc](MacroAssembler* masm, int32_t valLeft,
+ int32_t valRight) {
+ Label overflow, no_overflow, end;
+ __ li(rc.left, valLeft);
+ switch (branchType) {
+ case kAddBranchOverflow:
+ __ AddBranchOvf(rc.dst, rc.left, Operand(valRight),
+ &overflow, &no_overflow, rc.scratch);
+ break;
+ case kSubBranchOverflow:
+ __ SubBranchOvf(rc.dst, rc.left, Operand(valRight),
+ &overflow, &no_overflow, rc.scratch);
+ break;
+ }
+ __ li(v0, 2);
+ __ Branch(&end);
+ __ bind(&overflow);
+ __ li(v0, 1);
+ __ Branch(&end);
+ __ bind(&no_overflow);
+ __ li(v0, 0);
+ __ bind(&end);
+ });
+
+ switch (branchType) {
+ case kAddBranchOverflow:
+ CHECK_EQ(IsAddOverflow<int32_t>(ii, jj), res1);
+ CHECK_EQ(IsAddOverflow<int32_t>(ii, jj), res2);
+ break;
+ case kSubBranchOverflow:
+ CHECK_EQ(IsSubOverflow<int32_t>(ii, jj), res1);
+ CHECK_EQ(IsSubOverflow<int32_t>(ii, jj), res2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(BranchOverflowInt32LeftLabel) {
+ FOR_INT32_INPUTS(i, overflow_int32_test_values) {
+ FOR_INT32_INPUTS(j, overflow_int32_test_values) {
+ FOR_ENUM_INPUTS(br, OverflowBranchType, overflow_branch_type) {
+ FOR_STRUCT_INPUTS(regComb, OverflowRegisterCombination,
+ overflow_register_combination) {
+ int32_t ii = *i;
+ int32_t jj = *j;
+ enum OverflowBranchType branchType = *br;
+ struct OverflowRegisterCombination rc = *regComb;
+
+ // If left and right register are same then left and right
+ // test values must also be same, otherwise we skip the test
+ if (rc.left.code() == rc.right.code()) {
+ if (ii != jj) {
+ continue;
+ }
+ }
+
+ bool res1 = runOverflow<int32_t>(
+ ii, jj, [branchType, rc](MacroAssembler* masm, int32_t valLeft,
+ int32_t valRight) {
+ Label overflow, end;
+ __ li(rc.left, valLeft);
+ __ li(rc.right, valRight);
+ switch (branchType) {
+ case kAddBranchOverflow:
+ __ AddBranchOvf(rc.dst, rc.left, rc.right, &overflow, NULL,
+ rc.scratch);
+ break;
+ case kSubBranchOverflow:
+ __ SubBranchOvf(rc.dst, rc.left, rc.right, &overflow, NULL,
+ rc.scratch);
+ break;
+ }
+ __ li(v0, 0);
+ __ Branch(&end);
+ __ bind(&overflow);
+ __ li(v0, 1);
+ __ bind(&end);
+ });
+
+ bool res2 = runOverflow<int32_t>(
+ ii, jj, [branchType, rc](MacroAssembler* masm, int32_t valLeft,
+ int32_t valRight) {
+ Label overflow, end;
+ __ li(rc.left, valLeft);
+ switch (branchType) {
+ case kAddBranchOverflow:
+ __ AddBranchOvf(rc.dst, rc.left, Operand(valRight),
+ &overflow, NULL, rc.scratch);
+ break;
+ case kSubBranchOverflow:
+ __ SubBranchOvf(rc.dst, rc.left, Operand(valRight),
+ &overflow, NULL, rc.scratch);
+ break;
+ }
+ __ li(v0, 0);
+ __ Branch(&end);
+ __ bind(&overflow);
+ __ li(v0, 1);
+ __ bind(&end);
+ });
+
+ switch (branchType) {
+ case kAddBranchOverflow:
+ CHECK_EQ(IsAddOverflow<int32_t>(ii, jj), res1);
+ CHECK_EQ(IsAddOverflow<int32_t>(ii, jj), res2);
+ break;
+ case kSubBranchOverflow:
+ CHECK_EQ(IsSubOverflow<int32_t>(ii, jj), res1);
+ CHECK_EQ(IsSubOverflow<int32_t>(ii, jj), res2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(BranchOverflowInt32RightLabel) {
+ FOR_INT32_INPUTS(i, overflow_int32_test_values) {
+ FOR_INT32_INPUTS(j, overflow_int32_test_values) {
+ FOR_ENUM_INPUTS(br, OverflowBranchType, overflow_branch_type) {
+ FOR_STRUCT_INPUTS(regComb, OverflowRegisterCombination,
+ overflow_register_combination) {
+ int32_t ii = *i;
+ int32_t jj = *j;
+ enum OverflowBranchType branchType = *br;
+ struct OverflowRegisterCombination rc = *regComb;
+
+ // If left and right register are same then left and right
+ // test values must also be same, otherwise we skip the test
+ if (rc.left.code() == rc.right.code()) {
+ if (ii != jj) {
+ continue;
+ }
+ }
+
+ bool res1 = runOverflow<int32_t>(
+ ii, jj, [branchType, rc](MacroAssembler* masm, int32_t valLeft,
+ int32_t valRight) {
+ Label no_overflow, end;
+ __ li(rc.left, valLeft);
+ __ li(rc.right, valRight);
+ switch (branchType) {
+ case kAddBranchOverflow:
+ __ AddBranchOvf(rc.dst, rc.left, rc.right, NULL,
+ &no_overflow, rc.scratch);
+ break;
+ case kSubBranchOverflow:
+ __ SubBranchOvf(rc.dst, rc.left, rc.right, NULL,
+ &no_overflow, rc.scratch);
+ break;
+ }
+ __ li(v0, 1);
+ __ Branch(&end);
+ __ bind(&no_overflow);
+ __ li(v0, 0);
+ __ bind(&end);
+ });
+
+ bool res2 = runOverflow<int32_t>(
+ ii, jj, [branchType, rc](MacroAssembler* masm, int32_t valLeft,
+ int32_t valRight) {
+ Label no_overflow, end;
+ __ li(rc.left, valLeft);
+ switch (branchType) {
+ case kAddBranchOverflow:
+ __ AddBranchOvf(rc.dst, rc.left, Operand(valRight), NULL,
+ &no_overflow, rc.scratch);
+ break;
+ case kSubBranchOverflow:
+ __ SubBranchOvf(rc.dst, rc.left, Operand(valRight), NULL,
+ &no_overflow, rc.scratch);
+ break;
+ }
+ __ li(v0, 1);
+ __ Branch(&end);
+ __ bind(&no_overflow);
+ __ li(v0, 0);
+ __ bind(&end);
+ });
+
+ switch (branchType) {
+ case kAddBranchOverflow:
+ CHECK_EQ(IsAddOverflow<int32_t>(ii, jj), res1);
+ CHECK_EQ(IsAddOverflow<int32_t>(ii, jj), res2);
+ break;
+ case kSubBranchOverflow:
+ CHECK_EQ(IsSubOverflow<int32_t>(ii, jj), res1);
+ CHECK_EQ(IsSubOverflow<int32_t>(ii, jj), res2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(BranchOverflowInt64BothLabels) {
+ FOR_INT64_INPUTS(i, overflow_int64_test_values) {
+ FOR_INT64_INPUTS(j, overflow_int64_test_values) {
+ FOR_ENUM_INPUTS(br, OverflowBranchType, overflow_branch_type) {
+ FOR_STRUCT_INPUTS(regComb, OverflowRegisterCombination,
+ overflow_register_combination) {
+ int64_t ii = *i;
+ int64_t jj = *j;
+ enum OverflowBranchType branchType = *br;
+ struct OverflowRegisterCombination rc = *regComb;
+
+ // If left and right register are same then left and right
+ // test values must also be same, otherwise we skip the test
+ if (rc.left.code() == rc.right.code()) {
+ if (ii != jj) {
+ continue;
+ }
+ }
+
+ bool res1 = runOverflow<int64_t>(
+ ii, jj, [branchType, rc](MacroAssembler* masm, int64_t valLeft,
+ int64_t valRight) {
+ Label overflow, no_overflow, end;
+ __ li(rc.left, valLeft);
+ __ li(rc.right, valRight);
+ switch (branchType) {
+ case kAddBranchOverflow:
+ __ DaddBranchOvf(rc.dst, rc.left, rc.right, &overflow,
+ &no_overflow, rc.scratch);
+ break;
+ case kSubBranchOverflow:
+ __ DsubBranchOvf(rc.dst, rc.left, rc.right, &overflow,
+ &no_overflow, rc.scratch);
+ break;
+ }
+ __ li(v0, 2);
+ __ Branch(&end);
+ __ bind(&overflow);
+ __ li(v0, 1);
+ __ Branch(&end);
+ __ bind(&no_overflow);
+ __ li(v0, 0);
+ __ bind(&end);
+ });
+
+ bool res2 = runOverflow<int64_t>(
+ ii, jj, [branchType, rc](MacroAssembler* masm, int64_t valLeft,
+ int64_t valRight) {
+ Label overflow, no_overflow, end;
+ __ li(rc.left, valLeft);
+ switch (branchType) {
+ case kAddBranchOverflow:
+ __ DaddBranchOvf(rc.dst, rc.left, Operand(valRight),
+ &overflow, &no_overflow, rc.scratch);
+ break;
+ case kSubBranchOverflow:
+ __ DsubBranchOvf(rc.dst, rc.left, Operand(valRight),
+ &overflow, &no_overflow, rc.scratch);
+ break;
+ }
+ __ li(v0, 2);
+ __ Branch(&end);
+ __ bind(&overflow);
+ __ li(v0, 1);
+ __ Branch(&end);
+ __ bind(&no_overflow);
+ __ li(v0, 0);
+ __ bind(&end);
+ });
+
+ switch (branchType) {
+ case kAddBranchOverflow:
+ CHECK_EQ(IsAddOverflow<int64_t>(ii, jj), res1);
+ CHECK_EQ(IsAddOverflow<int64_t>(ii, jj), res2);
+ break;
+ case kSubBranchOverflow:
+ CHECK_EQ(IsSubOverflow<int64_t>(ii, jj), res1);
+ CHECK_EQ(IsSubOverflow<int64_t>(ii, jj), res2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(BranchOverflowInt64LeftLabel) {
+ FOR_INT64_INPUTS(i, overflow_int64_test_values) {
+ FOR_INT64_INPUTS(j, overflow_int64_test_values) {
+ FOR_ENUM_INPUTS(br, OverflowBranchType, overflow_branch_type) {
+ FOR_STRUCT_INPUTS(regComb, OverflowRegisterCombination,
+ overflow_register_combination) {
+ int64_t ii = *i;
+ int64_t jj = *j;
+ enum OverflowBranchType branchType = *br;
+ struct OverflowRegisterCombination rc = *regComb;
+
+ // If left and right register are same then left and right
+ // test values must also be same, otherwise we skip the test
+ if (rc.left.code() == rc.right.code()) {
+ if (ii != jj) {
+ continue;
+ }
+ }
+
+ bool res1 = runOverflow<int64_t>(
+ ii, jj, [branchType, rc](MacroAssembler* masm, int64_t valLeft,
+ int64_t valRight) {
+ Label overflow, end;
+ __ li(rc.left, valLeft);
+ __ li(rc.right, valRight);
+ switch (branchType) {
+ case kAddBranchOverflow:
+ __ DaddBranchOvf(rc.dst, rc.left, rc.right, &overflow, NULL,
+ rc.scratch);
+ break;
+ case kSubBranchOverflow:
+ __ DsubBranchOvf(rc.dst, rc.left, rc.right, &overflow, NULL,
+ rc.scratch);
+ break;
+ }
+ __ li(v0, 0);
+ __ Branch(&end);
+ __ bind(&overflow);
+ __ li(v0, 1);
+ __ bind(&end);
+ });
+
+ bool res2 = runOverflow<int64_t>(
+ ii, jj, [branchType, rc](MacroAssembler* masm, int64_t valLeft,
+ int64_t valRight) {
+ Label overflow, end;
+ __ li(rc.left, valLeft);
+ switch (branchType) {
+ case kAddBranchOverflow:
+ __ DaddBranchOvf(rc.dst, rc.left, Operand(valRight),
+ &overflow, NULL, rc.scratch);
+ break;
+ case kSubBranchOverflow:
+ __ DsubBranchOvf(rc.dst, rc.left, Operand(valRight),
+ &overflow, NULL, rc.scratch);
+ break;
+ }
+ __ li(v0, 0);
+ __ Branch(&end);
+ __ bind(&overflow);
+ __ li(v0, 1);
+ __ bind(&end);
+ });
+
+ switch (branchType) {
+ case kAddBranchOverflow:
+ CHECK_EQ(IsAddOverflow<int64_t>(ii, jj), res1);
+ CHECK_EQ(IsAddOverflow<int64_t>(ii, jj), res2);
+ break;
+ case kSubBranchOverflow:
+ CHECK_EQ(IsSubOverflow<int64_t>(ii, jj), res1);
+ CHECK_EQ(IsSubOverflow<int64_t>(ii, jj), res2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(BranchOverflowInt64RightLabel) {
+ FOR_INT64_INPUTS(i, overflow_int64_test_values) {
+ FOR_INT64_INPUTS(j, overflow_int64_test_values) {
+ FOR_ENUM_INPUTS(br, OverflowBranchType, overflow_branch_type) {
+ FOR_STRUCT_INPUTS(regComb, OverflowRegisterCombination,
+ overflow_register_combination) {
+ int64_t ii = *i;
+ int64_t jj = *j;
+ enum OverflowBranchType branchType = *br;
+ struct OverflowRegisterCombination rc = *regComb;
+
+ // If left and right register are same then left and right
+ // test values must also be same, otherwise we skip the test
+ if (rc.left.code() == rc.right.code()) {
+ if (ii != jj) {
+ continue;
+ }
+ }
+
+ bool res1 = runOverflow<int64_t>(
+ ii, jj, [branchType, rc](MacroAssembler* masm, int64_t valLeft,
+ int64_t valRight) {
+ Label no_overflow, end;
+ __ li(rc.left, valLeft);
+ __ li(rc.right, valRight);
+ switch (branchType) {
+ case kAddBranchOverflow:
+ __ DaddBranchOvf(rc.dst, rc.left, rc.right, NULL,
+ &no_overflow, rc.scratch);
+ break;
+ case kSubBranchOverflow:
+ __ DsubBranchOvf(rc.dst, rc.left, rc.right, NULL,
+ &no_overflow, rc.scratch);
+ break;
+ }
+ __ li(v0, 1);
+ __ Branch(&end);
+ __ bind(&no_overflow);
+ __ li(v0, 0);
+ __ bind(&end);
+ });
+
+ bool res2 = runOverflow<int64_t>(
+ ii, jj, [branchType, rc](MacroAssembler* masm, int64_t valLeft,
+ int64_t valRight) {
+ Label no_overflow, end;
+ __ li(rc.left, valLeft);
+ switch (branchType) {
+ case kAddBranchOverflow:
+ __ DaddBranchOvf(rc.dst, rc.left, Operand(valRight), NULL,
+ &no_overflow, rc.scratch);
+ break;
+ case kSubBranchOverflow:
+ __ DsubBranchOvf(rc.dst, rc.left, Operand(valRight), NULL,
+ &no_overflow, rc.scratch);
+ break;
+ }
+ __ li(v0, 1);
+ __ Branch(&end);
+ __ bind(&no_overflow);
+ __ li(v0, 0);
+ __ bind(&end);
+ });
+
+ switch (branchType) {
+ case kAddBranchOverflow:
+ CHECK_EQ(IsAddOverflow<int64_t>(ii, jj), res1);
+ CHECK_EQ(IsAddOverflow<int64_t>(ii, jj), res2);
+ break;
+ case kSubBranchOverflow:
+ CHECK_EQ(IsSubOverflow<int64_t>(ii, jj), res1);
+ CHECK_EQ(IsSubOverflow<int64_t>(ii, jj), res2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
+ }
+ }
+}
+
TEST(min_max_nan) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -787,4 +1561,344 @@ TEST(min_max_nan) {
}
}
+template <typename IN_TYPE, typename Func>
+bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
+ IN_TYPE value, Func GenerateUnalignedInstructionFunc) {
+ typedef int32_t (*F_CVT)(char* x0, int x1, int x2, int x3, int x4);
+
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assm;
+ IN_TYPE res;
+
+ GenerateUnalignedInstructionFunc(masm, in_offset, out_offset);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
+
+ MemCopy(memory_buffer + in_offset, &value, sizeof(IN_TYPE));
+ CALL_GENERATED_CODE(isolate, f, memory_buffer, 0, 0, 0, 0);
+ MemCopy(&res, memory_buffer + out_offset, sizeof(IN_TYPE));
+
+ return res == value;
+}
+
+static const std::vector<uint64_t> unsigned_test_values() {
+ static const uint64_t kValues[] = {
+ 0x2180f18a06384414, 0x000a714532102277, 0xbc1acccf180649f0,
+ 0x8000000080008000, 0x0000000000000001, 0xffffffffffffffff,
+ };
+ return std::vector<uint64_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+static const std::vector<int32_t> unsigned_test_offset() {
+ static const int32_t kValues[] = {// value, offset
+ -132 * KB, -21 * KB, 0, 19 * KB, 135 * KB};
+ return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+static const std::vector<int32_t> unsigned_test_offset_increment() {
+ static const int32_t kValues[] = {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5};
+ return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+TEST(Ulh) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ uint16_t value = static_cast<uint64_t>(*i & 0xFFFF);
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ CHECK_EQ(true, run_Unaligned<uint16_t>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Ulh(v0, MemOperand(a0, in_offset));
+ __ Ush(v0, MemOperand(a0, out_offset), v0);
+ }));
+ CHECK_EQ(true, run_Unaligned<uint16_t>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ mov(t0, a0);
+ __ Ulh(a0, MemOperand(a0, in_offset));
+ __ Ush(a0, MemOperand(t0, out_offset), v0);
+ }));
+ CHECK_EQ(true, run_Unaligned<uint16_t>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ mov(t0, a0);
+ __ Ulhu(a0, MemOperand(a0, in_offset));
+ __ Ush(a0, MemOperand(t0, out_offset), t1);
+ }));
+ CHECK_EQ(true, run_Unaligned<uint16_t>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Ulhu(v0, MemOperand(a0, in_offset));
+ __ Ush(v0, MemOperand(a0, out_offset), t1);
+ }));
+ }
+ }
+ }
+}
+
+TEST(Ulh_bitextension) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ uint16_t value = static_cast<uint64_t>(*i & 0xFFFF);
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ CHECK_EQ(true, run_Unaligned<uint16_t>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ Label success, fail, end, different;
+ __ Ulh(t0, MemOperand(a0, in_offset));
+ __ Ulhu(t1, MemOperand(a0, in_offset));
+ __ Branch(&different, ne, t0, Operand(t1));
+
+ // If signed and unsigned values are same, check
+ // the upper bits to see if they are zero
+ __ sra(t0, t0, 15);
+ __ Branch(&success, eq, t0, Operand(zero_reg));
+ __ Branch(&fail);
+
+ // If signed and unsigned values are different,
+ // check that the upper bits are complementary
+ __ bind(&different);
+ __ sra(t1, t1, 15);
+ __ Branch(&fail, ne, t1, Operand(1));
+ __ sra(t0, t0, 15);
+ __ addiu(t0, t0, 1);
+ __ Branch(&fail, ne, t0, Operand(zero_reg));
+ // Fall through to success
+
+ __ bind(&success);
+ __ Ulh(t0, MemOperand(a0, in_offset));
+ __ Ush(t0, MemOperand(a0, out_offset), v0);
+ __ Branch(&end);
+ __ bind(&fail);
+ __ Ush(zero_reg, MemOperand(a0, out_offset), v0);
+ __ bind(&end);
+ }));
+ }
+ }
+ }
+}
+
+TEST(Ulw) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ uint32_t value = static_cast<uint32_t>(*i & 0xFFFFFFFF);
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ CHECK_EQ(true, run_Unaligned<uint32_t>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Ulw(v0, MemOperand(a0, in_offset));
+ __ Usw(v0, MemOperand(a0, out_offset));
+ }));
+ CHECK_EQ(true,
+ run_Unaligned<uint32_t>(
+ buffer_middle, in_offset, out_offset, (uint32_t)value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ mov(t0, a0);
+ __ Ulw(a0, MemOperand(a0, in_offset));
+ __ Usw(a0, MemOperand(t0, out_offset));
+ }));
+ CHECK_EQ(true, run_Unaligned<uint32_t>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Ulwu(v0, MemOperand(a0, in_offset));
+ __ Usw(v0, MemOperand(a0, out_offset));
+ }));
+ CHECK_EQ(true,
+ run_Unaligned<uint32_t>(
+ buffer_middle, in_offset, out_offset, (uint32_t)value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ mov(t0, a0);
+ __ Ulwu(a0, MemOperand(a0, in_offset));
+ __ Usw(a0, MemOperand(t0, out_offset));
+ }));
+ }
+ }
+ }
+}
+
+TEST(Ulw_extension) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ uint32_t value = static_cast<uint32_t>(*i & 0xFFFFFFFF);
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ CHECK_EQ(true, run_Unaligned<uint32_t>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ Label success, fail, end, different;
+ __ Ulw(t0, MemOperand(a0, in_offset));
+ __ Ulwu(t1, MemOperand(a0, in_offset));
+ __ Branch(&different, ne, t0, Operand(t1));
+
+ // If signed and unsigned values are same, check
+ // the upper bits to see if they are zero
+ __ dsra(t0, t0, 31);
+ __ Branch(&success, eq, t0, Operand(zero_reg));
+ __ Branch(&fail);
+
+ // If signed and unsigned values are different,
+ // check that the upper bits are complementary
+ __ bind(&different);
+ __ dsra(t1, t1, 31);
+ __ Branch(&fail, ne, t1, Operand(1));
+ __ dsra(t0, t0, 31);
+ __ daddiu(t0, t0, 1);
+ __ Branch(&fail, ne, t0, Operand(zero_reg));
+ // Fall through to success
+
+ __ bind(&success);
+ __ Ulw(t0, MemOperand(a0, in_offset));
+ __ Usw(t0, MemOperand(a0, out_offset));
+ __ Branch(&end);
+ __ bind(&fail);
+ __ Usw(zero_reg, MemOperand(a0, out_offset));
+ __ bind(&end);
+ }));
+ }
+ }
+ }
+}
+
+TEST(Uld) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ uint64_t value = *i;
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ CHECK_EQ(true, run_Unaligned<uint64_t>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Uld(v0, MemOperand(a0, in_offset));
+ __ Usd(v0, MemOperand(a0, out_offset));
+ }));
+ CHECK_EQ(true,
+ run_Unaligned<uint64_t>(
+ buffer_middle, in_offset, out_offset, (uint32_t)value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ mov(t0, a0);
+ __ Uld(a0, MemOperand(a0, in_offset));
+ __ Usd(a0, MemOperand(t0, out_offset));
+ }));
+ }
+ }
+ }
+}
+
+TEST(Ulwc1) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ float value = static_cast<float>(*i & 0xFFFFFFFF);
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ CHECK_EQ(true, run_Unaligned<float>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Ulwc1(f0, MemOperand(a0, in_offset), t0);
+ __ Uswc1(f0, MemOperand(a0, out_offset), t0);
+ }));
+ }
+ }
+ }
+}
+
+TEST(Uldc1) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ double value = static_cast<double>(*i);
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ CHECK_EQ(true, run_Unaligned<double>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Uldc1(f0, MemOperand(a0, in_offset), t0);
+ __ Usdc1(f0, MemOperand(a0, out_offset), t0);
+ }));
+ }
+ }
+ }
+}
+
#undef __
diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc
index 612f9e88a3..cfdb75d1e8 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc
@@ -55,7 +55,10 @@ using i::carry;
using i::greater;
using i::greater_equal;
using i::kIntSize;
+using i::kFloatSize;
+using i::kDoubleSize;
using i::kPointerSize;
+using i::kSimd128Size;
using i::kSmiTagMask;
using i::kSmiValueSize;
using i::less_equal;
@@ -79,6 +82,22 @@ using i::rdi;
using i::rdx;
using i::rsi;
using i::rsp;
+using i::xmm0;
+using i::xmm1;
+using i::xmm2;
+using i::xmm3;
+using i::xmm4;
+using i::xmm5;
+using i::xmm6;
+using i::xmm7;
+using i::xmm8;
+using i::xmm9;
+using i::xmm10;
+using i::xmm11;
+using i::xmm12;
+using i::xmm13;
+using i::xmm14;
+using i::xmm15;
using i::times_pointer_size;
// Test the x64 assembler by compiling some simple functions into
@@ -2728,5 +2747,159 @@ TEST(LoadAndStoreWithRepresentation) {
CHECK_EQ(0, result);
}
+void TestFloat32x4Abs(MacroAssembler* masm, Label* exit, float x, float y,
+ float z, float w) {
+ __ subq(rsp, Immediate(kSimd128Size));
+
+ __ Move(xmm1, x);
+ __ Movss(Operand(rsp, 0 * kFloatSize), xmm1);
+ __ Move(xmm2, y);
+ __ Movss(Operand(rsp, 1 * kFloatSize), xmm2);
+ __ Move(xmm3, z);
+ __ Movss(Operand(rsp, 2 * kFloatSize), xmm3);
+ __ Move(xmm4, w);
+ __ Movss(Operand(rsp, 3 * kFloatSize), xmm4);
+ __ Movups(xmm0, Operand(rsp, 0));
+
+ __ Absps(xmm0);
+ __ Movups(Operand(rsp, 0), xmm0);
+
+ __ incq(rax);
+ __ Move(xmm1, fabsf(x));
+ __ Ucomiss(xmm1, Operand(rsp, 0 * kFloatSize));
+ __ j(not_equal, exit);
+ __ incq(rax);
+ __ Move(xmm2, fabsf(y));
+ __ Ucomiss(xmm2, Operand(rsp, 1 * kFloatSize));
+ __ j(not_equal, exit);
+ __ incq(rax);
+ __ Move(xmm3, fabsf(z));
+ __ Ucomiss(xmm3, Operand(rsp, 2 * kFloatSize));
+ __ j(not_equal, exit);
+ __ incq(rax);
+ __ Move(xmm4, fabsf(w));
+ __ Ucomiss(xmm4, Operand(rsp, 3 * kFloatSize));
+ __ j(not_equal, exit);
+
+ __ addq(rsp, Immediate(kSimd128Size));
+}
+
+void TestFloat32x4Neg(MacroAssembler* masm, Label* exit, float x, float y,
+ float z, float w) {
+ __ subq(rsp, Immediate(kSimd128Size));
+
+ __ Move(xmm1, x);
+ __ Movss(Operand(rsp, 0 * kFloatSize), xmm1);
+ __ Move(xmm2, y);
+ __ Movss(Operand(rsp, 1 * kFloatSize), xmm2);
+ __ Move(xmm3, z);
+ __ Movss(Operand(rsp, 2 * kFloatSize), xmm3);
+ __ Move(xmm4, w);
+ __ Movss(Operand(rsp, 3 * kFloatSize), xmm4);
+ __ Movups(xmm0, Operand(rsp, 0));
+
+ __ Negps(xmm0);
+ __ Movups(Operand(rsp, 0), xmm0);
+
+ __ incq(rax);
+ __ Move(xmm1, -x);
+ __ Ucomiss(xmm1, Operand(rsp, 0 * kFloatSize));
+ __ j(not_equal, exit);
+ __ incq(rax);
+ __ Move(xmm2, -y);
+ __ Ucomiss(xmm2, Operand(rsp, 1 * kFloatSize));
+ __ j(not_equal, exit);
+ __ incq(rax);
+ __ Move(xmm3, -z);
+ __ Ucomiss(xmm3, Operand(rsp, 2 * kFloatSize));
+ __ j(not_equal, exit);
+ __ incq(rax);
+ __ Move(xmm4, -w);
+ __ Ucomiss(xmm4, Operand(rsp, 3 * kFloatSize));
+ __ j(not_equal, exit);
+
+ __ addq(rsp, Immediate(kSimd128Size));
+}
+
+void TestFloat64x2Abs(MacroAssembler* masm, Label* exit, double x, double y) {
+ __ subq(rsp, Immediate(kSimd128Size));
+
+ __ Move(xmm1, x);
+ __ Movsd(Operand(rsp, 0 * kDoubleSize), xmm1);
+ __ Move(xmm2, y);
+ __ Movsd(Operand(rsp, 1 * kDoubleSize), xmm2);
+ __ Movupd(xmm0, Operand(rsp, 0));
+
+ __ Abspd(xmm0);
+ __ Movupd(Operand(rsp, 0), xmm0);
+
+ __ incq(rax);
+ __ Move(xmm1, fabs(x));
+ __ Ucomisd(xmm1, Operand(rsp, 0 * kDoubleSize));
+ __ j(not_equal, exit);
+ __ incq(rax);
+ __ Move(xmm2, fabs(y));
+ __ Ucomisd(xmm2, Operand(rsp, 1 * kDoubleSize));
+ __ j(not_equal, exit);
+
+ __ addq(rsp, Immediate(kSimd128Size));
+}
+
+void TestFloat64x2Neg(MacroAssembler* masm, Label* exit, double x, double y) {
+ __ subq(rsp, Immediate(kSimd128Size));
+
+ __ Move(xmm1, x);
+ __ Movsd(Operand(rsp, 0 * kDoubleSize), xmm1);
+ __ Move(xmm2, y);
+ __ Movsd(Operand(rsp, 1 * kDoubleSize), xmm2);
+ __ Movupd(xmm0, Operand(rsp, 0));
+
+ __ Negpd(xmm0);
+ __ Movupd(Operand(rsp, 0), xmm0);
+
+ __ incq(rax);
+ __ Move(xmm1, -x);
+ __ Ucomisd(xmm1, Operand(rsp, 0 * kDoubleSize));
+ __ j(not_equal, exit);
+ __ incq(rax);
+ __ Move(xmm2, -y);
+ __ Ucomisd(xmm2, Operand(rsp, 1 * kDoubleSize));
+ __ j(not_equal, exit);
+
+ __ addq(rsp, Immediate(kSimd128Size));
+}
+
+TEST(SIMDMacros) {
+ // Allocate an executable page of memory.
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize * 2, &actual_size, true));
+ CHECK(buffer);
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope handles(isolate);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
+
+ MacroAssembler* masm = &assembler;
+ EntryCode(masm);
+ Label exit;
+
+ __ xorq(rax, rax);
+ TestFloat32x4Abs(masm, &exit, 1.5, -1.5, 0.5, -0.5);
+ TestFloat32x4Neg(masm, &exit, 1.5, -1.5, 0.5, -0.5);
+ TestFloat64x2Abs(masm, &exit, 1.75, -1.75);
+ TestFloat64x2Neg(masm, &exit, 1.75, -1.75);
+
+ __ xorq(rax, rax); // Success.
+ __ bind(&exit);
+ ExitCode(masm);
+ __ ret(0);
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ // Call the function from C++.
+ int result = FUNCTION_CAST<F0>(buffer)();
+ CHECK_EQ(0, result);
+}
#undef __
diff --git a/deps/v8/test/cctest/test-microtask-delivery.cc b/deps/v8/test/cctest/test-microtask-delivery.cc
deleted file mode 100644
index 3150ab8872..0000000000
--- a/deps/v8/test/cctest/test-microtask-delivery.cc
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "src/v8.h"
-
-#include "test/cctest/cctest.h"
-
-namespace i = v8::internal;
-
-namespace {
-class HarmonyIsolate {
- public:
- HarmonyIsolate() {
- v8::Isolate::CreateParams create_params;
- create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
- isolate_ = v8::Isolate::New(create_params);
- isolate_->Enter();
- }
-
- ~HarmonyIsolate() {
- isolate_->Exit();
- isolate_->Dispose();
- }
-
- v8::Isolate* GetIsolate() const { return isolate_; }
-
- private:
- v8::Isolate* isolate_;
-};
-} // namespace
-
-
-TEST(MicrotaskDeliverySimple) {
- i::FLAG_harmony_object_observe = true;
- HarmonyIsolate isolate;
- v8::HandleScope scope(isolate.GetIsolate());
- LocalContext context(isolate.GetIsolate());
- CompileRun(
- "var ordering = [];"
- "var resolver = {};"
- "function handler(resolve) { resolver.resolve = resolve; }"
- "var obj = {};"
- "var observeOrders = [1, 4];"
- "function observer() {"
- "ordering.push(observeOrders.shift());"
- "resolver.resolve();"
- "}"
- "var p = new Promise(handler);"
- "p.then(function() {"
- "ordering.push(2);"
- "}).then(function() {"
- "ordering.push(3);"
- "obj.id++;"
- "return new Promise(handler);"
- "}).then(function() {"
- "ordering.push(5);"
- "}).then(function() {"
- "ordering.push(6);"
- "});"
- "Object.observe(obj, observer);"
- "obj.id = 1;");
- CHECK_EQ(
- 6, CompileRun("ordering.length")->Int32Value(context.local()).FromJust());
- CHECK_EQ(1,
- CompileRun("ordering[0]")->Int32Value(context.local()).FromJust());
- CHECK_EQ(2,
- CompileRun("ordering[1]")->Int32Value(context.local()).FromJust());
- CHECK_EQ(3,
- CompileRun("ordering[2]")->Int32Value(context.local()).FromJust());
- CHECK_EQ(4,
- CompileRun("ordering[3]")->Int32Value(context.local()).FromJust());
- CHECK_EQ(5,
- CompileRun("ordering[4]")->Int32Value(context.local()).FromJust());
- CHECK_EQ(6,
- CompileRun("ordering[5]")->Int32Value(context.local()).FromJust());
-}
-
-
-TEST(MicrotaskPerIsolateState) {
- i::FLAG_harmony_object_observe = true;
- HarmonyIsolate isolate;
- v8::HandleScope scope(isolate.GetIsolate());
- LocalContext context1(isolate.GetIsolate());
- isolate.GetIsolate()->SetMicrotasksPolicy(v8::MicrotasksPolicy::kExplicit);
- CompileRun(
- "var obj = { calls: 0 };");
- v8::Local<v8::Value> obj = CompileRun("obj");
- {
- LocalContext context2(isolate.GetIsolate());
- context2->Global()
- ->Set(context2.local(),
- v8::String::NewFromUtf8(isolate.GetIsolate(), "obj",
- v8::NewStringType::kNormal)
- .ToLocalChecked(),
- obj)
- .FromJust();
- CompileRun(
- "var resolver = {};"
- "new Promise(function(resolve) {"
- "resolver.resolve = resolve;"
- "}).then(function() {"
- "obj.calls++;"
- "});"
- "(function() {"
- "resolver.resolve();"
- "})();");
- }
- {
- LocalContext context3(isolate.GetIsolate());
- context3->Global()
- ->Set(context3.local(),
- v8::String::NewFromUtf8(isolate.GetIsolate(), "obj",
- v8::NewStringType::kNormal)
- .ToLocalChecked(),
- obj)
- .FromJust();
- CompileRun(
- "var foo = { id: 1 };"
- "Object.observe(foo, function() {"
- "obj.calls++;"
- "});"
- "foo.id++;");
- }
- {
- LocalContext context4(isolate.GetIsolate());
- context4->Global()
- ->Set(context4.local(),
- v8::String::NewFromUtf8(isolate.GetIsolate(), "obj",
- v8::NewStringType::kNormal)
- .ToLocalChecked(),
- obj)
- .FromJust();
- isolate.GetIsolate()->RunMicrotasks();
- CHECK_EQ(2,
- CompileRun("obj.calls")->Int32Value(context4.local()).FromJust());
- }
-}
diff --git a/deps/v8/test/cctest/test-object-observe.cc b/deps/v8/test/cctest/test-object-observe.cc
deleted file mode 100644
index f17b8c081e..0000000000
--- a/deps/v8/test/cctest/test-object-observe.cc
+++ /dev/null
@@ -1,1078 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "src/v8.h"
-
-#include "test/cctest/cctest.h"
-
-using namespace v8;
-namespace i = v8::internal;
-
-inline int32_t ToInt32(v8::Local<v8::Value> value) {
- return value->Int32Value(v8::Isolate::GetCurrent()->GetCurrentContext())
- .FromJust();
-}
-
-
-TEST(PerIsolateState) {
- i::FLAG_harmony_object_observe = true;
- HandleScope scope(CcTest::isolate());
- LocalContext context1(CcTest::isolate());
-
- Local<Value> foo = v8_str("foo");
- context1->SetSecurityToken(foo);
-
- CompileRun(
- "var count = 0;"
- "var calls = 0;"
- "var observer = function(records) { count = records.length; calls++ };"
- "var obj = {};"
- "Object.observe(obj, observer);");
- Local<Value> observer = CompileRun("observer");
- Local<Value> obj = CompileRun("obj");
- Local<Value> notify_fun1 = CompileRun("(function() { obj.foo = 'bar'; })");
- Local<Value> notify_fun2;
- {
- LocalContext context2(CcTest::isolate());
- context2->SetSecurityToken(foo);
- context2->Global()
- ->Set(v8::Isolate::GetCurrent()->GetCurrentContext(), v8_str("obj"),
- obj)
- .FromJust();
- notify_fun2 = CompileRun(
- "(function() { obj.foo = 'baz'; })");
- }
- Local<Value> notify_fun3;
- {
- LocalContext context3(CcTest::isolate());
- context3->SetSecurityToken(foo);
- context3->Global()
- ->Set(v8::Isolate::GetCurrent()->GetCurrentContext(), v8_str("obj"),
- obj)
- .FromJust();
- notify_fun3 = CompileRun("(function() { obj.foo = 'bat'; })");
- }
- {
- LocalContext context4(CcTest::isolate());
- context4->SetSecurityToken(foo);
- context4->Global()
- ->Set(v8::Isolate::GetCurrent()->GetCurrentContext(),
- v8_str("observer"), observer)
- .FromJust();
- context4->Global()
- ->Set(v8::Isolate::GetCurrent()->GetCurrentContext(), v8_str("fun1"),
- notify_fun1)
- .FromJust();
- context4->Global()
- ->Set(v8::Isolate::GetCurrent()->GetCurrentContext(), v8_str("fun2"),
- notify_fun2)
- .FromJust();
- context4->Global()
- ->Set(v8::Isolate::GetCurrent()->GetCurrentContext(), v8_str("fun3"),
- notify_fun3)
- .FromJust();
- CompileRun("fun1(); fun2(); fun3(); Object.deliverChangeRecords(observer)");
- }
- CHECK_EQ(1, ToInt32(CompileRun("calls")));
- CHECK_EQ(3, ToInt32(CompileRun("count")));
-}
-
-
-TEST(EndOfMicrotaskDelivery) {
- i::FLAG_harmony_object_observe = true;
- HandleScope scope(CcTest::isolate());
- LocalContext context(CcTest::isolate());
- CompileRun(
- "var obj = {};"
- "var count = 0;"
- "var observer = function(records) { count = records.length };"
- "Object.observe(obj, observer);"
- "obj.foo = 'bar';");
- CHECK_EQ(1, ToInt32(CompileRun("count")));
-}
-
-
-TEST(DeliveryOrdering) {
- i::FLAG_harmony_object_observe = true;
- HandleScope scope(CcTest::isolate());
- LocalContext context(CcTest::isolate());
- CompileRun(
- "var obj1 = {};"
- "var obj2 = {};"
- "var ordering = [];"
- "function observer2() { ordering.push(2); };"
- "function observer1() { ordering.push(1); };"
- "function observer3() { ordering.push(3); };"
- "Object.observe(obj1, observer1);"
- "Object.observe(obj1, observer2);"
- "Object.observe(obj1, observer3);"
- "obj1.foo = 'bar';");
- CHECK_EQ(3, ToInt32(CompileRun("ordering.length")));
- CHECK_EQ(1, ToInt32(CompileRun("ordering[0]")));
- CHECK_EQ(2, ToInt32(CompileRun("ordering[1]")));
- CHECK_EQ(3, ToInt32(CompileRun("ordering[2]")));
- CompileRun(
- "ordering = [];"
- "Object.observe(obj2, observer3);"
- "Object.observe(obj2, observer2);"
- "Object.observe(obj2, observer1);"
- "obj2.foo = 'baz'");
- CHECK_EQ(3, ToInt32(CompileRun("ordering.length")));
- CHECK_EQ(1, ToInt32(CompileRun("ordering[0]")));
- CHECK_EQ(2, ToInt32(CompileRun("ordering[1]")));
- CHECK_EQ(3, ToInt32(CompileRun("ordering[2]")));
-}
-
-
-TEST(DeliveryCallbackThrows) {
- i::FLAG_harmony_object_observe = true;
- HandleScope scope(CcTest::isolate());
- LocalContext context(CcTest::isolate());
- CompileRun(
- "var obj = {};"
- "var ordering = [];"
- "function observer1() { ordering.push(1); };"
- "function observer2() { ordering.push(2); };"
- "function observer_throws() {"
- " ordering.push(0);"
- " throw new Error();"
- " ordering.push(-1);"
- "};"
- "Object.observe(obj, observer_throws.bind());"
- "Object.observe(obj, observer1);"
- "Object.observe(obj, observer_throws.bind());"
- "Object.observe(obj, observer2);"
- "Object.observe(obj, observer_throws.bind());"
- "obj.foo = 'bar';");
- CHECK_EQ(5, ToInt32(CompileRun("ordering.length")));
- CHECK_EQ(0, ToInt32(CompileRun("ordering[0]")));
- CHECK_EQ(1, ToInt32(CompileRun("ordering[1]")));
- CHECK_EQ(0, ToInt32(CompileRun("ordering[2]")));
- CHECK_EQ(2, ToInt32(CompileRun("ordering[3]")));
- CHECK_EQ(0, ToInt32(CompileRun("ordering[4]")));
-}
-
-
-TEST(DeliveryChangesMutationInCallback) {
- i::FLAG_harmony_object_observe = true;
- HandleScope scope(CcTest::isolate());
- LocalContext context(CcTest::isolate());
- CompileRun(
- "var obj = {};"
- "var ordering = [];"
- "function observer1(records) {"
- " ordering.push(100 + records.length);"
- " records.push(11);"
- " records.push(22);"
- "};"
- "function observer2(records) {"
- " ordering.push(200 + records.length);"
- " records.push(33);"
- " records.push(44);"
- "};"
- "Object.observe(obj, observer1);"
- "Object.observe(obj, observer2);"
- "obj.foo = 'bar';");
- CHECK_EQ(2, ToInt32(CompileRun("ordering.length")));
- CHECK_EQ(101, ToInt32(CompileRun("ordering[0]")));
- CHECK_EQ(201, ToInt32(CompileRun("ordering[1]")));
-}
-
-
-TEST(DeliveryOrderingReentrant) {
- i::FLAG_harmony_object_observe = true;
- HandleScope scope(CcTest::isolate());
- LocalContext context(CcTest::isolate());
- CompileRun(
- "var obj = {};"
- "var reentered = false;"
- "var ordering = [];"
- "function observer1() { ordering.push(1); };"
- "function observer2() {"
- " if (!reentered) {"
- " obj.foo = 'baz';"
- " reentered = true;"
- " }"
- " ordering.push(2);"
- "};"
- "function observer3() { ordering.push(3); };"
- "Object.observe(obj, observer1);"
- "Object.observe(obj, observer2);"
- "Object.observe(obj, observer3);"
- "obj.foo = 'bar';");
- CHECK_EQ(5, ToInt32(CompileRun("ordering.length")));
- CHECK_EQ(1, ToInt32(CompileRun("ordering[0]")));
- CHECK_EQ(2, ToInt32(CompileRun("ordering[1]")));
- CHECK_EQ(3, ToInt32(CompileRun("ordering[2]")));
- // Note that we re-deliver to observers 1 and 2, while observer3
- // already received the second record during the first round.
- CHECK_EQ(1, ToInt32(CompileRun("ordering[3]")));
- CHECK_EQ(2, ToInt32(CompileRun("ordering[1]")));
-}
-
-
-TEST(DeliveryOrderingDeliverChangeRecords) {
- i::FLAG_harmony_object_observe = true;
- HandleScope scope(CcTest::isolate());
- LocalContext context(CcTest::isolate());
- CompileRun(
- "var obj = {};"
- "var ordering = [];"
- "function observer1() { ordering.push(1); if (!obj.b) obj.b = true };"
- "function observer2() { ordering.push(2); };"
- "Object.observe(obj, observer1);"
- "Object.observe(obj, observer2);"
- "obj.a = 1;"
- "Object.deliverChangeRecords(observer2);");
- CHECK_EQ(4, ToInt32(CompileRun("ordering.length")));
- // First, observer2 is called due to deliverChangeRecords
- CHECK_EQ(2, ToInt32(CompileRun("ordering[0]")));
- // Then, observer1 is called when the stack unwinds
- CHECK_EQ(1, ToInt32(CompileRun("ordering[1]")));
- // observer1's mutation causes both 1 and 2 to be reactivated,
- // with 1 having priority.
- CHECK_EQ(1, ToInt32(CompileRun("ordering[2]")));
- CHECK_EQ(2, ToInt32(CompileRun("ordering[3]")));
-}
-
-
-TEST(ObjectHashTableGrowth) {
- i::FLAG_harmony_object_observe = true;
- HandleScope scope(CcTest::isolate());
- // Initializing this context sets up initial hash tables.
- LocalContext context(CcTest::isolate());
- Local<Value> obj = CompileRun("obj = {};");
- Local<Value> observer = CompileRun(
- "var ran = false;"
- "(function() { ran = true })");
- {
- // As does initializing this context.
- LocalContext context2(CcTest::isolate());
- context2->Global()
- ->Set(v8::Isolate::GetCurrent()->GetCurrentContext(), v8_str("obj"),
- obj)
- .FromJust();
- context2->Global()
- ->Set(v8::Isolate::GetCurrent()->GetCurrentContext(),
- v8_str("observer"), observer)
- .FromJust();
- CompileRun(
- "var objArr = [];"
- // 100 objects should be enough to make the hash table grow
- // (and thus relocate).
- "for (var i = 0; i < 100; ++i) {"
- " objArr.push({});"
- " Object.observe(objArr[objArr.length-1], function(){});"
- "}"
- "Object.observe(obj, observer);");
- }
- // obj is now marked "is_observed", but our map has moved.
- CompileRun("obj.foo = 'bar'");
- CHECK(CompileRun("ran")
- ->BooleanValue(v8::Isolate::GetCurrent()->GetCurrentContext())
- .FromJust());
-}
-
-
-struct RecordExpectation {
- Local<Value> object;
- const char* type;
- const char* name;
- Local<Value> old_value;
-};
-
-
-// TODO(adamk): Use this helper elsewhere in this file.
-static void ExpectRecords(v8::Isolate* isolate, Local<Value> records,
- const RecordExpectation expectations[], int num) {
- CHECK(records->IsArray());
- Local<Array> recordArray = records.As<Array>();
- CHECK_EQ(num, static_cast<int>(recordArray->Length()));
- for (int i = 0; i < num; ++i) {
- Local<Value> record =
- recordArray->Get(v8::Isolate::GetCurrent()->GetCurrentContext(), i)
- .ToLocalChecked();
- CHECK(record->IsObject());
- Local<Object> recordObj = record.As<Object>();
- Local<Value> value =
- recordObj->Get(v8::Isolate::GetCurrent()->GetCurrentContext(),
- v8_str("object"))
- .ToLocalChecked();
- CHECK(expectations[i].object->StrictEquals(value));
- value = recordObj->Get(v8::Isolate::GetCurrent()->GetCurrentContext(),
- v8_str("type"))
- .ToLocalChecked();
- CHECK(v8_str(expectations[i].type)
- ->Equals(v8::Isolate::GetCurrent()->GetCurrentContext(), value)
- .FromJust());
- if (strcmp("splice", expectations[i].type) != 0) {
- Local<Value> name =
- recordObj->Get(v8::Isolate::GetCurrent()->GetCurrentContext(),
- v8_str("name"))
- .ToLocalChecked();
- CHECK(v8_str(expectations[i].name)
- ->Equals(v8::Isolate::GetCurrent()->GetCurrentContext(), name)
- .FromJust());
- if (!expectations[i].old_value.IsEmpty()) {
- Local<Value> old_value =
- recordObj->Get(v8::Isolate::GetCurrent()->GetCurrentContext(),
- v8_str("oldValue"))
- .ToLocalChecked();
- CHECK(expectations[i]
- .old_value->Equals(
- v8::Isolate::GetCurrent()->GetCurrentContext(),
- old_value)
- .FromJust());
- }
- }
- }
-}
-
-#define EXPECT_RECORDS(records, expectations) \
- ExpectRecords(CcTest::isolate(), records, expectations, \
- arraysize(expectations))
-
-TEST(APITestBasicMutation) {
- i::FLAG_harmony_object_observe = true;
- v8::Isolate* v8_isolate = CcTest::isolate();
- HandleScope scope(v8_isolate);
- LocalContext context(v8_isolate);
- Local<Object> obj = Local<Object>::Cast(
- CompileRun("var records = [];"
- "var obj = {};"
- "function observer(r) { [].push.apply(records, r); };"
- "Object.observe(obj, observer);"
- "obj"));
- obj->Set(v8::Isolate::GetCurrent()->GetCurrentContext(), v8_str("foo"),
- Number::New(v8_isolate, 7))
- .FromJust();
- obj->Set(v8::Isolate::GetCurrent()->GetCurrentContext(), 1,
- Number::New(v8_isolate, 2))
- .FromJust();
- // CreateDataProperty should work just as well as Set
- obj->CreateDataProperty(v8::Isolate::GetCurrent()->GetCurrentContext(),
- v8_str("foo"), Number::New(v8_isolate, 3))
- .FromJust();
- obj->CreateDataProperty(v8::Isolate::GetCurrent()->GetCurrentContext(), 1,
- Number::New(v8_isolate, 4))
- .FromJust();
- // Setting an indexed element via the property setting method
- obj->Set(v8::Isolate::GetCurrent()->GetCurrentContext(),
- Number::New(v8_isolate, 1), Number::New(v8_isolate, 5))
- .FromJust();
- // Setting with a non-String, non-uint32 key
- obj->Set(v8::Isolate::GetCurrent()->GetCurrentContext(),
- Number::New(v8_isolate, 1.1), Number::New(v8_isolate, 6))
- .FromJust();
- obj->Delete(v8::Isolate::GetCurrent()->GetCurrentContext(), v8_str("foo"))
- .FromJust();
- obj->Delete(v8::Isolate::GetCurrent()->GetCurrentContext(), 1).FromJust();
- obj->Delete(v8::Isolate::GetCurrent()->GetCurrentContext(),
- Number::New(v8_isolate, 1.1))
- .FromJust();
-
- // Force delivery
- // TODO(adamk): Should the above set methods trigger delivery themselves?
- CompileRun("void 0");
- CHECK_EQ(9, ToInt32(CompileRun("records.length")));
- const RecordExpectation expected_records[] = {
- {obj, "add", "foo", Local<Value>()},
- {obj, "add", "1", Local<Value>()},
- // Note: use 7 not 1 below, as the latter triggers a nifty VS10 compiler
- // bug
- // where instead of 1.0, a garbage value would be passed into Number::New.
- {obj, "update", "foo", Number::New(v8_isolate, 7)},
- {obj, "update", "1", Number::New(v8_isolate, 2)},
- {obj, "update", "1", Number::New(v8_isolate, 4)},
- {obj, "add", "1.1", Local<Value>()},
- {obj, "delete", "foo", Number::New(v8_isolate, 3)},
- {obj, "delete", "1", Number::New(v8_isolate, 5)},
- {obj, "delete", "1.1", Number::New(v8_isolate, 6)}};
- EXPECT_RECORDS(CompileRun("records"), expected_records);
-}
-
-
-TEST(HiddenPrototypeObservation) {
- i::FLAG_harmony_object_observe = true;
- v8::Isolate* v8_isolate = CcTest::isolate();
- HandleScope scope(v8_isolate);
- LocalContext context(v8_isolate);
- Local<FunctionTemplate> tmpl = FunctionTemplate::New(v8_isolate);
- tmpl->SetHiddenPrototype(true);
- tmpl->InstanceTemplate()->Set(v8_str("foo"), Number::New(v8_isolate, 75));
- Local<Function> function =
- tmpl->GetFunction(v8::Isolate::GetCurrent()->GetCurrentContext())
- .ToLocalChecked();
- Local<Object> proto =
- function->NewInstance(v8::Isolate::GetCurrent()->GetCurrentContext())
- .ToLocalChecked();
- Local<Object> obj = Object::New(v8_isolate);
- obj->SetPrototype(v8::Isolate::GetCurrent()->GetCurrentContext(), proto)
- .FromJust();
- context->Global()
- ->Set(v8::Isolate::GetCurrent()->GetCurrentContext(), v8_str("obj"), obj)
- .FromJust();
- context->Global()
- ->Set(v8::Isolate::GetCurrent()->GetCurrentContext(), v8_str("proto"),
- proto)
- .FromJust();
- CompileRun(
- "var records;"
- "function observer(r) { records = r; };"
- "Object.observe(obj, observer);"
- "obj.foo = 41;" // triggers a notification
- "proto.foo = 42;"); // does not trigger a notification
- const RecordExpectation expected_records[] = {
- { obj, "update", "foo", Number::New(v8_isolate, 75) }
- };
- EXPECT_RECORDS(CompileRun("records"), expected_records);
- obj->SetPrototype(v8::Isolate::GetCurrent()->GetCurrentContext(),
- Null(v8_isolate))
- .FromJust();
- CompileRun("obj.foo = 43");
- const RecordExpectation expected_records2[] = {
- {obj, "add", "foo", Local<Value>()}};
- EXPECT_RECORDS(CompileRun("records"), expected_records2);
- obj->SetPrototype(v8::Isolate::GetCurrent()->GetCurrentContext(), proto)
- .FromJust();
- CompileRun(
- "Object.observe(proto, observer);"
- "proto.bar = 1;"
- "Object.unobserve(obj, observer);"
- "obj.foo = 44;");
- const RecordExpectation expected_records3[] = {
- {proto, "add", "bar", Local<Value>()}
- // TODO(adamk): The below record should be emitted since proto is observed
- // and has been modified. Not clear if this happens in practice.
- // { proto, "update", "foo", Number::New(43) }
- };
- EXPECT_RECORDS(CompileRun("records"), expected_records3);
-}
-
-
-static int NumberOfElements(i::Handle<i::JSWeakMap> map) {
- return i::ObjectHashTable::cast(map->table())->NumberOfElements();
-}
-
-
-TEST(ObservationWeakMap) {
- i::FLAG_harmony_object_observe = true;
- HandleScope scope(CcTest::isolate());
- LocalContext context(CcTest::isolate());
- CompileRun(
- "var obj = {};"
- "Object.observe(obj, function(){});"
- "Object.getNotifier(obj);"
- "obj = null;");
- i::Isolate* i_isolate = CcTest::i_isolate();
- i::Handle<i::JSObject> observation_state =
- i_isolate->factory()->observation_state();
- i::Handle<i::JSWeakMap> callbackInfoMap = i::Handle<i::JSWeakMap>::cast(
- i::JSReceiver::GetProperty(i_isolate, observation_state,
- "callbackInfoMap")
- .ToHandleChecked());
- i::Handle<i::JSWeakMap> objectInfoMap = i::Handle<i::JSWeakMap>::cast(
- i::JSReceiver::GetProperty(i_isolate, observation_state, "objectInfoMap")
- .ToHandleChecked());
- i::Handle<i::JSWeakMap> notifierObjectInfoMap = i::Handle<i::JSWeakMap>::cast(
- i::JSReceiver::GetProperty(i_isolate, observation_state,
- "notifierObjectInfoMap")
- .ToHandleChecked());
- CHECK_EQ(1, NumberOfElements(callbackInfoMap));
- CHECK_EQ(1, NumberOfElements(objectInfoMap));
- CHECK_EQ(1, NumberOfElements(notifierObjectInfoMap));
- i_isolate->heap()->CollectAllGarbage();
- CHECK_EQ(0, NumberOfElements(callbackInfoMap));
- CHECK_EQ(0, NumberOfElements(objectInfoMap));
- CHECK_EQ(0, NumberOfElements(notifierObjectInfoMap));
-}
-
-
-static int TestObserveSecurity(Local<Context> observer_context,
- Local<Context> object_context,
- Local<Context> mutation_context) {
- Context::Scope observer_scope(observer_context);
- CompileRun("var records = null;"
- "var observer = function(r) { records = r };");
- Local<Value> observer = CompileRun("observer");
- {
- Context::Scope object_scope(object_context);
- object_context->Global()
- ->Set(v8::Isolate::GetCurrent()->GetCurrentContext(),
- v8_str("observer"), observer)
- .FromJust();
- CompileRun("var obj = {};"
- "obj.length = 0;"
- "Object.observe(obj, observer,"
- "['add', 'update', 'delete','reconfigure','splice']"
- ");");
- Local<Value> obj = CompileRun("obj");
- {
- Context::Scope mutation_scope(mutation_context);
- mutation_context->Global()
- ->Set(v8::Isolate::GetCurrent()->GetCurrentContext(), v8_str("obj"),
- obj)
- .FromJust();
- CompileRun("obj.foo = 'bar';"
- "obj.foo = 'baz';"
- "delete obj.foo;"
- "Object.defineProperty(obj, 'bar', {value: 'bot'});"
- "Array.prototype.push.call(obj, 1, 2, 3);"
- "Array.prototype.splice.call(obj, 1, 2, 2, 4);"
- "Array.prototype.pop.call(obj);"
- "Array.prototype.shift.call(obj);");
- }
- }
- return ToInt32(CompileRun("records ? records.length : 0"));
-}
-
-
-TEST(ObserverSecurityAAA) {
- i::FLAG_harmony_object_observe = true;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Local<Context> contextA = Context::New(isolate);
- CHECK_EQ(8, TestObserveSecurity(contextA, contextA, contextA));
-}
-
-
-TEST(ObserverSecurityA1A2A3) {
- i::FLAG_harmony_object_observe = true;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
-
- v8::Local<Context> contextA1 = Context::New(isolate);
- v8::Local<Context> contextA2 = Context::New(isolate);
- v8::Local<Context> contextA3 = Context::New(isolate);
-
- Local<Value> foo = v8_str("foo");
- contextA1->SetSecurityToken(foo);
- contextA2->SetSecurityToken(foo);
- contextA3->SetSecurityToken(foo);
-
- CHECK_EQ(8, TestObserveSecurity(contextA1, contextA2, contextA3));
-}
-
-
-TEST(ObserverSecurityAAB) {
- i::FLAG_harmony_object_observe = true;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Local<Context> contextA = Context::New(isolate);
- v8::Local<Context> contextB = Context::New(isolate);
- CHECK_EQ(0, TestObserveSecurity(contextA, contextA, contextB));
-}
-
-
-TEST(ObserverSecurityA1A2B) {
- i::FLAG_harmony_object_observe = true;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
-
- v8::Local<Context> contextA1 = Context::New(isolate);
- v8::Local<Context> contextA2 = Context::New(isolate);
- v8::Local<Context> contextB = Context::New(isolate);
-
- Local<Value> foo = v8_str("foo");
- contextA1->SetSecurityToken(foo);
- contextA2->SetSecurityToken(foo);
-
- CHECK_EQ(0, TestObserveSecurity(contextA1, contextA2, contextB));
-}
-
-
-TEST(ObserverSecurityABA) {
- i::FLAG_harmony_object_observe = true;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Local<Context> contextA = Context::New(isolate);
- v8::Local<Context> contextB = Context::New(isolate);
- CHECK_EQ(0, TestObserveSecurity(contextA, contextB, contextA));
-}
-
-
-TEST(ObserverSecurityA1BA2) {
- i::FLAG_harmony_object_observe = true;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Local<Context> contextA1 = Context::New(isolate);
- v8::Local<Context> contextA2 = Context::New(isolate);
- v8::Local<Context> contextB = Context::New(isolate);
-
- Local<Value> foo = v8_str("foo");
- contextA1->SetSecurityToken(foo);
- contextA2->SetSecurityToken(foo);
-
- CHECK_EQ(0, TestObserveSecurity(contextA1, contextB, contextA2));
-}
-
-
-TEST(ObserverSecurityBAA) {
- i::FLAG_harmony_object_observe = true;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Local<Context> contextA = Context::New(isolate);
- v8::Local<Context> contextB = Context::New(isolate);
- CHECK_EQ(0, TestObserveSecurity(contextB, contextA, contextA));
-}
-
-
-TEST(ObserverSecurityBA1A2) {
- i::FLAG_harmony_object_observe = true;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Local<Context> contextA1 = Context::New(isolate);
- v8::Local<Context> contextA2 = Context::New(isolate);
- v8::Local<Context> contextB = Context::New(isolate);
-
- Local<Value> foo = v8_str("foo");
- contextA1->SetSecurityToken(foo);
- contextA2->SetSecurityToken(foo);
-
- CHECK_EQ(0, TestObserveSecurity(contextB, contextA1, contextA2));
-}
-
-
-TEST(ObserverSecurityNotify) {
- i::FLAG_harmony_object_observe = true;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Local<Context> contextA = Context::New(isolate);
- v8::Local<Context> contextB = Context::New(isolate);
-
- Context::Scope scopeA(contextA);
- CompileRun("var obj = {};"
- "var recordsA = null;"
- "var observerA = function(r) { recordsA = r };"
- "Object.observe(obj, observerA);");
- Local<Value> obj = CompileRun("obj");
-
- {
- Context::Scope scopeB(contextB);
- contextB->Global()
- ->Set(v8::Isolate::GetCurrent()->GetCurrentContext(), v8_str("obj"),
- obj)
- .FromJust();
- CompileRun("var recordsB = null;"
- "var observerB = function(r) { recordsB = r };"
- "Object.observe(obj, observerB);");
- }
-
- CompileRun("var notifier = Object.getNotifier(obj);"
- "notifier.notify({ type: 'update' });");
- CHECK_EQ(1, ToInt32(CompileRun("recordsA ? recordsA.length : 0")));
-
- {
- Context::Scope scopeB(contextB);
- CHECK_EQ(0, ToInt32(CompileRun("recordsB ? recordsB.length : 0")));
- }
-}
-
-
-TEST(HiddenPropertiesLeakage) {
- i::FLAG_harmony_object_observe = true;
- HandleScope scope(CcTest::isolate());
- LocalContext context(CcTest::isolate());
- CompileRun("var obj = {};"
- "var records = null;"
- "var observer = function(r) { records = r };"
- "Object.observe(obj, observer);");
- Local<Value> obj =
- context->Global()
- ->Get(v8::Isolate::GetCurrent()->GetCurrentContext(), v8_str("obj"))
- .ToLocalChecked();
- Local<Object>::Cast(obj)
- ->SetPrivate(v8::Isolate::GetCurrent()->GetCurrentContext(),
- v8::Private::New(CcTest::isolate(), v8_str("foo")),
- Null(CcTest::isolate()))
- .FromJust();
- CompileRun(""); // trigger delivery
- CHECK(CompileRun("records")->IsNull());
-}
-
-
-TEST(GetNotifierFromOtherContext) {
- i::FLAG_harmony_object_observe = true;
- HandleScope scope(CcTest::isolate());
- LocalContext context(CcTest::isolate());
- CompileRun("var obj = {};");
- Local<Value> instance = CompileRun("obj");
- {
- LocalContext context2(CcTest::isolate());
- context2->Global()
- ->Set(v8::Isolate::GetCurrent()->GetCurrentContext(), v8_str("obj"),
- instance)
- .FromJust();
- CHECK(CompileRun("Object.getNotifier(obj)")->IsNull());
- }
-}
-
-
-TEST(GetNotifierFromOtherOrigin) {
- i::FLAG_harmony_object_observe = true;
- HandleScope scope(CcTest::isolate());
- Local<Value> foo = v8_str("foo");
- Local<Value> bar = v8_str("bar");
- LocalContext context(CcTest::isolate());
- context->SetSecurityToken(foo);
- CompileRun("var obj = {};");
- Local<Value> instance = CompileRun("obj");
- {
- LocalContext context2(CcTest::isolate());
- context2->SetSecurityToken(bar);
- context2->Global()
- ->Set(v8::Isolate::GetCurrent()->GetCurrentContext(), v8_str("obj"),
- instance)
- .FromJust();
- CHECK(CompileRun("Object.getNotifier(obj)")->IsNull());
- }
-}
-
-
-TEST(GetNotifierFromSameOrigin) {
- i::FLAG_harmony_object_observe = true;
- HandleScope scope(CcTest::isolate());
- Local<Value> foo = v8_str("foo");
- LocalContext context(CcTest::isolate());
- context->SetSecurityToken(foo);
- CompileRun("var obj = {};");
- Local<Value> instance = CompileRun("obj");
- {
- LocalContext context2(CcTest::isolate());
- context2->SetSecurityToken(foo);
- context2->Global()
- ->Set(v8::Isolate::GetCurrent()->GetCurrentContext(), v8_str("obj"),
- instance)
- .FromJust();
- CHECK(CompileRun("Object.getNotifier(obj)")->IsObject());
- }
-}
-
-
-static int GetGlobalObjectsCount() {
- int count = 0;
- i::HeapIterator it(CcTest::heap());
- for (i::HeapObject* object = it.next(); object != NULL; object = it.next())
- if (object->IsJSGlobalObject()) {
- i::JSGlobalObject* g = i::JSGlobalObject::cast(object);
- // Skip dummy global object.
- if (i::GlobalDictionary::cast(g->properties())->NumberOfElements() != 0) {
- count++;
- }
- }
- // Subtract one to compensate for the code stub context that is always present
- return count - 1;
-}
-
-
-static void CheckSurvivingGlobalObjectsCount(int expected) {
- // We need to collect all garbage twice to be sure that everything
- // has been collected. This is because inline caches are cleared in
- // the first garbage collection but some of the maps have already
- // been marked at that point. Therefore some of the maps are not
- // collected until the second garbage collection.
- CcTest::heap()->CollectAllGarbage();
- CcTest::heap()->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
- int count = GetGlobalObjectsCount();
-#ifdef DEBUG
- if (count != expected) CcTest::heap()->TracePathToGlobal();
-#endif
- CHECK_EQ(expected, count);
-}
-
-
-TEST(DontLeakContextOnObserve) {
- i::FLAG_harmony_object_observe = true;
- HandleScope scope(CcTest::isolate());
- Local<Value> foo = v8_str("foo");
- LocalContext context(CcTest::isolate());
- context->SetSecurityToken(foo);
- CompileRun("var obj = {};");
- Local<Value> object = CompileRun("obj");
- {
- HandleScope scope(CcTest::isolate());
- LocalContext context2(CcTest::isolate());
- context2->SetSecurityToken(foo);
- context2->Global()
- ->Set(v8::Isolate::GetCurrent()->GetCurrentContext(), v8_str("obj"),
- object)
- .FromJust();
- CompileRun("function observer() {};"
- "Object.observe(obj, observer, ['foo', 'bar', 'baz']);"
- "Object.unobserve(obj, observer);");
- }
-
- CcTest::isolate()->ContextDisposedNotification();
- CheckSurvivingGlobalObjectsCount(0);
-}
-
-
-TEST(DontLeakContextOnGetNotifier) {
- i::FLAG_harmony_object_observe = true;
- HandleScope scope(CcTest::isolate());
- Local<Value> foo = v8_str("foo");
- LocalContext context(CcTest::isolate());
- context->SetSecurityToken(foo);
- CompileRun("var obj = {};");
- Local<Value> object = CompileRun("obj");
- {
- HandleScope scope(CcTest::isolate());
- LocalContext context2(CcTest::isolate());
- context2->SetSecurityToken(foo);
- context2->Global()
- ->Set(v8::Isolate::GetCurrent()->GetCurrentContext(), v8_str("obj"),
- object)
- .FromJust();
- CompileRun("Object.getNotifier(obj);");
- }
-
- CcTest::isolate()->ContextDisposedNotification();
- CheckSurvivingGlobalObjectsCount(0);
-}
-
-
-TEST(DontLeakContextOnNotifierPerformChange) {
- i::FLAG_harmony_object_observe = true;
- HandleScope scope(CcTest::isolate());
- Local<Value> foo = v8_str("foo");
- LocalContext context(CcTest::isolate());
- context->SetSecurityToken(foo);
- CompileRun("var obj = {};");
- Local<Value> object = CompileRun("obj");
- Local<Value> notifier = CompileRun("Object.getNotifier(obj)");
- {
- HandleScope scope(CcTest::isolate());
- LocalContext context2(CcTest::isolate());
- context2->SetSecurityToken(foo);
- context2->Global()
- ->Set(v8::Isolate::GetCurrent()->GetCurrentContext(), v8_str("obj"),
- object)
- .FromJust();
- context2->Global()
- ->Set(v8::Isolate::GetCurrent()->GetCurrentContext(),
- v8_str("notifier"), notifier)
- .FromJust();
- CompileRun("var obj2 = {};"
- "var notifier2 = Object.getNotifier(obj2);"
- "notifier2.performChange.call("
- "notifier, 'foo', function(){})");
- }
-
- CcTest::isolate()->ContextDisposedNotification();
- CheckSurvivingGlobalObjectsCount(0);
-}
-
-
-static void ObserverCallback(const FunctionCallbackInfo<Value>& args) {
- *static_cast<int*>(Local<External>::Cast(args.Data())->Value()) =
- Local<Array>::Cast(args[0])->Length();
-}
-
-
-TEST(ObjectObserveCallsCppFunction) {
- i::FLAG_harmony_object_observe = true;
- Isolate* isolate = CcTest::isolate();
- HandleScope scope(isolate);
- LocalContext context(isolate);
- int numRecordsSent = 0;
- Local<Function> observer =
- Function::New(CcTest::isolate()->GetCurrentContext(), ObserverCallback,
- External::New(isolate, &numRecordsSent))
- .ToLocalChecked();
- context->Global()
- ->Set(v8::Isolate::GetCurrent()->GetCurrentContext(), v8_str("observer"),
- observer)
- .FromJust();
- CompileRun(
- "var obj = {};"
- "Object.observe(obj, observer);"
- "obj.foo = 1;"
- "obj.bar = 2;");
- CHECK_EQ(2, numRecordsSent);
-}
-
-
-TEST(ObjectObserveCallsFunctionTemplateInstance) {
- i::FLAG_harmony_object_observe = true;
- Isolate* isolate = CcTest::isolate();
- HandleScope scope(isolate);
- LocalContext context(isolate);
- int numRecordsSent = 0;
- Local<FunctionTemplate> tmpl = FunctionTemplate::New(
- isolate, ObserverCallback, External::New(isolate, &numRecordsSent));
- Local<Function> function =
- tmpl->GetFunction(v8::Isolate::GetCurrent()->GetCurrentContext())
- .ToLocalChecked();
- context->Global()
- ->Set(v8::Isolate::GetCurrent()->GetCurrentContext(), v8_str("observer"),
- function)
- .FromJust();
- CompileRun(
- "var obj = {};"
- "Object.observe(obj, observer);"
- "obj.foo = 1;"
- "obj.bar = 2;");
- CHECK_EQ(2, numRecordsSent);
-}
-
-
-static void AccessorGetter(Local<Name> property,
- const PropertyCallbackInfo<Value>& info) {
- info.GetReturnValue().Set(Integer::New(info.GetIsolate(), 42));
-}
-
-
-static void AccessorSetter(Local<Name> property, Local<Value> value,
- const PropertyCallbackInfo<void>& info) {
- info.GetReturnValue().SetUndefined();
-}
-
-
-TEST(APIAccessorsShouldNotNotify) {
- i::FLAG_harmony_object_observe = true;
- Isolate* isolate = CcTest::isolate();
- HandleScope handle_scope(isolate);
- LocalContext context(isolate);
- Local<Object> object = Object::New(isolate);
- object->SetAccessor(v8::Isolate::GetCurrent()->GetCurrentContext(),
- v8_str("accessor"), &AccessorGetter, &AccessorSetter)
- .FromJust();
- context->Global()
- ->Set(v8::Isolate::GetCurrent()->GetCurrentContext(), v8_str("obj"),
- object)
- .FromJust();
- CompileRun(
- "var records = null;"
- "Object.observe(obj, function(r) { records = r });"
- "obj.accessor = 43;");
- CHECK(CompileRun("records")->IsNull());
- CompileRun("Object.defineProperty(obj, 'accessor', { value: 44 });");
- CHECK(CompileRun("records")->IsNull());
-}
-
-
-namespace {
-
-int* global_use_counts = NULL;
-
-void MockUseCounterCallback(v8::Isolate* isolate,
- v8::Isolate::UseCounterFeature feature) {
- ++global_use_counts[feature];
-}
-}
-
-
-TEST(UseCountObjectObserve) {
- i::FLAG_harmony_object_observe = true;
- i::Isolate* isolate = CcTest::i_isolate();
- i::HandleScope scope(isolate);
- LocalContext env;
- int use_counts[v8::Isolate::kUseCounterFeatureCount] = {};
- global_use_counts = use_counts;
- CcTest::isolate()->SetUseCounterCallback(MockUseCounterCallback);
- CompileRun(
- "var obj = {};"
- "Object.observe(obj, function(){})");
- CHECK_EQ(1, use_counts[v8::Isolate::kObjectObserve]);
- CompileRun(
- "var obj2 = {};"
- "Object.observe(obj2, function(){})");
- // Only counts the first use of observe in a given context.
- CHECK_EQ(1, use_counts[v8::Isolate::kObjectObserve]);
- {
- LocalContext env2;
- CompileRun(
- "var obj = {};"
- "Object.observe(obj, function(){})");
- }
- // Counts different contexts separately.
- CHECK_EQ(2, use_counts[v8::Isolate::kObjectObserve]);
-}
-
-
-TEST(UseCountObjectGetNotifier) {
- i::FLAG_harmony_object_observe = true;
- i::Isolate* isolate = CcTest::i_isolate();
- i::HandleScope scope(isolate);
- LocalContext env;
- int use_counts[v8::Isolate::kUseCounterFeatureCount] = {};
- global_use_counts = use_counts;
- CcTest::isolate()->SetUseCounterCallback(MockUseCounterCallback);
- CompileRun("var obj = {}");
- CompileRun("Object.getNotifier(obj)");
- CHECK_EQ(1, use_counts[v8::Isolate::kObjectObserve]);
-}
-
-static bool NamedAccessCheckAlwaysAllow(Local<v8::Context> accessing_context,
- Local<v8::Object> accessed_object,
- Local<v8::Value> data) {
- return true;
-}
-
-
-TEST(DisallowObserveAccessCheckedObject) {
- i::FLAG_harmony_object_observe = true;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- LocalContext env;
- v8::Local<v8::ObjectTemplate> object_template =
- v8::ObjectTemplate::New(isolate);
- object_template->SetAccessCheckCallback(NamedAccessCheckAlwaysAllow);
- Local<Object> new_instance =
- object_template->NewInstance(
- v8::Isolate::GetCurrent()->GetCurrentContext())
- .ToLocalChecked();
- env->Global()
- ->Set(v8::Isolate::GetCurrent()->GetCurrentContext(), v8_str("obj"),
- new_instance)
- .FromJust();
- v8::TryCatch try_catch(isolate);
- CompileRun("Object.observe(obj, function(){})");
- CHECK(try_catch.HasCaught());
-}
-
-
-TEST(DisallowGetNotifierAccessCheckedObject) {
- i::FLAG_harmony_object_observe = true;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- LocalContext env;
- v8::Local<v8::ObjectTemplate> object_template =
- v8::ObjectTemplate::New(isolate);
- object_template->SetAccessCheckCallback(NamedAccessCheckAlwaysAllow);
- Local<Object> new_instance =
- object_template->NewInstance(
- v8::Isolate::GetCurrent()->GetCurrentContext())
- .ToLocalChecked();
- env->Global()
- ->Set(v8::Isolate::GetCurrent()->GetCurrentContext(), v8_str("obj"),
- new_instance)
- .FromJust();
- v8::TryCatch try_catch(isolate);
- CompileRun("Object.getNotifier(obj)");
- CHECK(try_catch.HasCaught());
-}
diff --git a/deps/v8/test/cctest/test-object.cc b/deps/v8/test/cctest/test-object.cc
new file mode 100644
index 0000000000..e078bfcdb0
--- /dev/null
+++ b/deps/v8/test/cctest/test-object.cc
@@ -0,0 +1,71 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+
+static void CheckObject(Isolate* isolate, Handle<Object> obj,
+ const char* string) {
+ Object* print_string = *Object::NoSideEffectsToString(isolate, obj);
+ CHECK(String::cast(print_string)->IsUtf8EqualTo(CStrVector(string)));
+}
+
+static void CheckSmi(Isolate* isolate, int value, const char* string) {
+ Handle<Object> handle(Smi::FromInt(value), isolate);
+ CheckObject(isolate, handle, string);
+}
+
+static void CheckString(Isolate* isolate, const char* value,
+ const char* string) {
+ Handle<String> handle(isolate->factory()->NewStringFromAsciiChecked(value));
+ CheckObject(isolate, handle, string);
+}
+
+static void CheckNumber(Isolate* isolate, double value, const char* string) {
+ Handle<Object> number = isolate->factory()->NewNumber(value);
+ CHECK(number->IsNumber());
+ CheckObject(isolate, number, string);
+}
+
+static void CheckBoolean(Isolate* isolate, bool value, const char* string) {
+ CheckObject(isolate, value ? isolate->factory()->true_value()
+ : isolate->factory()->false_value(),
+ string);
+}
+
+TEST(NoSideEffectsToString) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+
+ HandleScope scope(isolate);
+
+ CheckString(isolate, "fisk hest", "fisk hest");
+ CheckNumber(isolate, 42.3, "42.3");
+ CheckSmi(isolate, 42, "42");
+ CheckBoolean(isolate, true, "true");
+ CheckBoolean(isolate, false, "false");
+ CheckBoolean(isolate, false, "false");
+ CheckObject(isolate, factory->undefined_value(), "undefined");
+ CheckObject(isolate, factory->null_value(), "null");
+
+ int lanes[] = {0, 1, 2, 3};
+ CheckObject(isolate, factory->NewInt32x4(lanes), "SIMD.Int32x4(0, 1, 2, 3)");
+
+ CheckObject(isolate, factory->error_to_string(), "[object Error]");
+ CheckObject(isolate, factory->stack_trace_symbol(),
+ "Symbol(stack_trace_symbol)");
+ CheckObject(isolate, factory->NewError(isolate->error_function(),
+ factory->empty_string()),
+ "Error");
+ CheckObject(isolate, factory->NewError(
+ isolate->error_function(),
+ factory->NewStringFromAsciiChecked("fisk hest")),
+ "Error: fisk hest");
+ CheckObject(isolate, factory->NewJSObject(isolate->object_function()),
+ "#<Object>");
+}
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index ae278d8338..658e73ca84 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -29,15 +29,18 @@
#include <stdlib.h>
#include <string.h>
+#include <memory>
+
#include "src/v8.h"
-#include "src/ast/ast.h"
#include "src/ast/ast-numbering.h"
#include "src/ast/ast-value-factory.h"
+#include "src/ast/ast.h"
#include "src/compiler.h"
#include "src/execution.h"
#include "src/isolate.h"
#include "src/objects.h"
+#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
#include "src/parsing/preparser.h"
#include "src/parsing/rewriter.h"
@@ -62,14 +65,13 @@ TEST(ScanKeywords) {
KeywordToken key_token;
i::UnicodeCache unicode_cache;
- i::byte buffer[32];
+ char buffer[32];
for (int i = 0; (key_token = keywords[i]).keyword != NULL; i++) {
- const i::byte* keyword =
- reinterpret_cast<const i::byte*>(key_token.keyword);
- int length = i::StrLength(key_token.keyword);
+ const char* keyword = key_token.keyword;
+ size_t length = strlen(key_token.keyword);
CHECK(static_cast<int>(sizeof(buffer)) >= length);
{
- i::Utf8ToUtf16CharacterStream stream(keyword, length);
+ i::ExternalOneByteStringUtf16CharacterStream stream(keyword, length);
i::Scanner scanner(&unicode_cache);
scanner.Initialize(&stream);
CHECK_EQ(key_token.token, scanner.Next());
@@ -77,7 +79,7 @@ TEST(ScanKeywords) {
}
// Removing characters will make keyword matching fail.
{
- i::Utf8ToUtf16CharacterStream stream(keyword, length - 1);
+ i::ExternalOneByteStringUtf16CharacterStream stream(keyword, length - 1);
i::Scanner scanner(&unicode_cache);
scanner.Initialize(&stream);
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
@@ -88,7 +90,7 @@ TEST(ScanKeywords) {
for (int j = 0; j < static_cast<int>(arraysize(chars_to_append)); ++j) {
i::MemMove(buffer, keyword, length);
buffer[length] = chars_to_append[j];
- i::Utf8ToUtf16CharacterStream stream(buffer, length + 1);
+ i::ExternalOneByteStringUtf16CharacterStream stream(buffer, length + 1);
i::Scanner scanner(&unicode_cache);
scanner.Initialize(&stream);
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
@@ -98,7 +100,7 @@ TEST(ScanKeywords) {
{
i::MemMove(buffer, keyword, length);
buffer[length - 1] = '_';
- i::Utf8ToUtf16CharacterStream stream(buffer, length);
+ i::ExternalOneByteStringUtf16CharacterStream stream(buffer, length);
i::Scanner scanner(&unicode_cache);
scanner.Initialize(&stream);
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
@@ -120,15 +122,32 @@ TEST(ScanHTMLEndComments) {
// whitespace, even a multiline-comment containing a newline).
// This was not the case if it occurred before the first real token
// in the input.
+ // clang-format off
const char* tests[] = {
// Before first real token.
+ "-->",
+ "--> is eol-comment",
"--> is eol-comment\nvar y = 37;\n",
"\n --> is eol-comment\nvar y = 37;\n",
+ "\n-->is eol-comment\nvar y = 37;\n",
+ "\n-->\nvar y = 37;\n",
"/* precomment */ --> is eol-comment\nvar y = 37;\n",
+ "/* precomment */-->eol-comment\nvar y = 37;\n",
"\n/* precomment */ --> is eol-comment\nvar y = 37;\n",
+ "\n/*precomment*/-->eol-comment\nvar y = 37;\n",
// After first real token.
"var x = 42;\n--> is eol-comment\nvar y = 37;\n",
"var x = 42;\n/* precomment */ --> is eol-comment\nvar y = 37;\n",
+ "x/* precomment\n */ --> is eol-comment\nvar y = 37;\n",
+ "var x = 42; /* precomment\n */ --> is eol-comment\nvar y = 37;\n",
+ "var x = 42;/*\n*/-->is eol-comment\nvar y = 37;\n",
+ // With multiple comments preceding HTMLEndComment
+ "/* MLC \n */ /* SLDC */ --> is eol-comment\nvar y = 37;\n",
+ "/* MLC \n */ /* SLDC1 */ /* SLDC2 */ --> is eol-comment\nvar y = 37;\n",
+ "/* MLC1 \n */ /* MLC2 \n */ --> is eol-comment\nvar y = 37;\n",
+ "/* SLDC */ /* MLC \n */ --> is eol-comment\nvar y = 37;\n",
+ "/* MLC1 \n */ /* SLDC1 */ /* MLC2 \n */ /* SLDC2 */ --> is eol-comment\n"
+ "var y = 37;\n",
NULL
};
@@ -136,20 +155,18 @@ TEST(ScanHTMLEndComments) {
"x --> is eol-comment\nvar y = 37;\n",
"\"\\n\" --> is eol-comment\nvar y = 37;\n",
"x/* precomment */ --> is eol-comment\nvar y = 37;\n",
- "x/* precomment\n */ --> is eol-comment\nvar y = 37;\n",
"var x = 42; --> is eol-comment\nvar y = 37;\n",
- "var x = 42; /* precomment\n */ --> is eol-comment\nvar y = 37;\n",
NULL
};
+ // clang-format on
// Parser/Scanner needs a stack limit.
CcTest::i_isolate()->stack_guard()->SetStackLimit(
i::GetCurrentStackPosition() - 128 * 1024);
uintptr_t stack_limit = CcTest::i_isolate()->stack_guard()->real_climit();
for (int i = 0; tests[i]; i++) {
- const i::byte* source =
- reinterpret_cast<const i::byte*>(tests[i]);
- i::Utf8ToUtf16CharacterStream stream(source, i::StrLength(tests[i]));
+ const char* source = tests[i];
+ i::ExternalOneByteStringUtf16CharacterStream stream(source);
i::CompleteParserRecorder log;
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(&stream);
@@ -165,9 +182,8 @@ TEST(ScanHTMLEndComments) {
}
for (int i = 0; fail_tests[i]; i++) {
- const i::byte* source =
- reinterpret_cast<const i::byte*>(fail_tests[i]);
- i::Utf8ToUtf16CharacterStream stream(source, i::StrLength(fail_tests[i]));
+ const char* source = fail_tests[i];
+ i::ExternalOneByteStringUtf16CharacterStream stream(source);
i::CompleteParserRecorder log;
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(&stream);
@@ -325,9 +341,7 @@ TEST(StandAlonePreParser) {
uintptr_t stack_limit = CcTest::i_isolate()->stack_guard()->real_climit();
for (int i = 0; programs[i]; i++) {
const char* program = programs[i];
- i::Utf8ToUtf16CharacterStream stream(
- reinterpret_cast<const i::byte*>(program),
- static_cast<unsigned>(strlen(program)));
+ i::ExternalOneByteStringUtf16CharacterStream stream(program);
i::CompleteParserRecorder log;
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(&stream);
@@ -361,9 +375,7 @@ TEST(StandAlonePreParserNoNatives) {
uintptr_t stack_limit = CcTest::i_isolate()->stack_guard()->real_climit();
for (int i = 0; programs[i]; i++) {
const char* program = programs[i];
- i::Utf8ToUtf16CharacterStream stream(
- reinterpret_cast<const i::byte*>(program),
- static_cast<unsigned>(strlen(program)));
+ i::ExternalOneByteStringUtf16CharacterStream stream(program);
i::CompleteParserRecorder log;
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(&stream);
@@ -432,9 +444,7 @@ TEST(RegressChromium62639) {
// and then used the invalid currently scanned literal. This always
// failed in debug mode, and sometimes crashed in release mode.
- i::Utf8ToUtf16CharacterStream stream(
- reinterpret_cast<const i::byte*>(program),
- static_cast<unsigned>(strlen(program)));
+ i::ExternalOneByteStringUtf16CharacterStream stream(program);
i::CompleteParserRecorder log;
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(&stream);
@@ -512,16 +522,14 @@ TEST(PreParseOverflow) {
i::GetCurrentStackPosition() - 128 * 1024);
size_t kProgramSize = 1024 * 1024;
- v8::base::SmartArrayPointer<char> program(
- i::NewArray<char>(kProgramSize + 1));
+ std::unique_ptr<char[]> program(i::NewArray<char>(kProgramSize + 1));
memset(program.get(), '(', kProgramSize);
program[kProgramSize] = '\0';
uintptr_t stack_limit = CcTest::i_isolate()->stack_guard()->real_climit();
- i::Utf8ToUtf16CharacterStream stream(
- reinterpret_cast<const i::byte*>(program.get()),
- static_cast<unsigned>(kProgramSize));
+ i::ExternalOneByteStringUtf16CharacterStream stream(program.get(),
+ kProgramSize);
i::CompleteParserRecorder log;
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(&stream);
@@ -566,7 +574,7 @@ void TestCharacterStream(const char* one_byte_source, unsigned length,
i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
i::HandleScope test_scope(isolate);
- v8::base::SmartArrayPointer<i::uc16> uc16_buffer(new i::uc16[length]);
+ std::unique_ptr<i::uc16[]> uc16_buffer(new i::uc16[length]);
for (unsigned i = 0; i < length; i++) {
uc16_buffer[i] = static_cast<i::uc16>(one_byte_source[i]);
}
@@ -577,13 +585,20 @@ void TestCharacterStream(const char* one_byte_source, unsigned length,
TestExternalResource resource(uc16_buffer.get(), length);
i::Handle<i::String> uc16_string(
factory->NewExternalStringFromTwoByte(&resource).ToHandleChecked());
+ ScriptResource one_byte_resource(one_byte_source, length);
+ i::Handle<i::String> ext_one_byte_string(
+ factory->NewExternalStringFromOneByte(&one_byte_resource)
+ .ToHandleChecked());
i::ExternalTwoByteStringUtf16CharacterStream uc16_stream(
i::Handle<i::ExternalTwoByteString>::cast(uc16_string), start, end);
+ i::ExternalOneByteStringUtf16CharacterStream one_byte_stream(
+ i::Handle<i::ExternalOneByteString>::cast(ext_one_byte_string), start,
+ end);
i::GenericStringUtf16CharacterStream string_stream(one_byte_string, start,
end);
- i::Utf8ToUtf16CharacterStream utf8_stream(
- reinterpret_cast<const i::byte*>(one_byte_source), end);
+ i::ExternalOneByteStringUtf16CharacterStream utf8_stream(one_byte_source,
+ end);
utf8_stream.SeekForward(start);
unsigned i = start;
@@ -592,17 +607,21 @@ void TestCharacterStream(const char* one_byte_source, unsigned length,
CHECK_EQU(i, uc16_stream.pos());
CHECK_EQU(i, string_stream.pos());
CHECK_EQU(i, utf8_stream.pos());
+ CHECK_EQU(i, one_byte_stream.pos());
int32_t c0 = one_byte_source[i];
int32_t c1 = uc16_stream.Advance();
int32_t c2 = string_stream.Advance();
int32_t c3 = utf8_stream.Advance();
+ int32_t c4 = one_byte_stream.Advance();
i++;
CHECK_EQ(c0, c1);
CHECK_EQ(c0, c2);
CHECK_EQ(c0, c3);
+ CHECK_EQ(c0, c4);
CHECK_EQU(i, uc16_stream.pos());
CHECK_EQU(i, string_stream.pos());
CHECK_EQU(i, utf8_stream.pos());
+ CHECK_EQU(i, one_byte_stream.pos());
}
while (i > start + sub_length / 4) {
// Pushback, re-read, pushback again.
@@ -610,66 +629,83 @@ void TestCharacterStream(const char* one_byte_source, unsigned length,
CHECK_EQU(i, uc16_stream.pos());
CHECK_EQU(i, string_stream.pos());
CHECK_EQU(i, utf8_stream.pos());
+ CHECK_EQU(i, one_byte_stream.pos());
uc16_stream.PushBack(c0);
string_stream.PushBack(c0);
utf8_stream.PushBack(c0);
+ one_byte_stream.PushBack(c0);
i--;
CHECK_EQU(i, uc16_stream.pos());
CHECK_EQU(i, string_stream.pos());
CHECK_EQU(i, utf8_stream.pos());
+ CHECK_EQU(i, one_byte_stream.pos());
int32_t c1 = uc16_stream.Advance();
int32_t c2 = string_stream.Advance();
int32_t c3 = utf8_stream.Advance();
+ int32_t c4 = one_byte_stream.Advance();
i++;
CHECK_EQU(i, uc16_stream.pos());
CHECK_EQU(i, string_stream.pos());
CHECK_EQU(i, utf8_stream.pos());
+ CHECK_EQU(i, one_byte_stream.pos());
CHECK_EQ(c0, c1);
CHECK_EQ(c0, c2);
CHECK_EQ(c0, c3);
+ CHECK_EQ(c0, c4);
uc16_stream.PushBack(c0);
string_stream.PushBack(c0);
utf8_stream.PushBack(c0);
+ one_byte_stream.PushBack(c0);
i--;
CHECK_EQU(i, uc16_stream.pos());
CHECK_EQU(i, string_stream.pos());
CHECK_EQU(i, utf8_stream.pos());
+ CHECK_EQU(i, one_byte_stream.pos());
}
unsigned halfway = start + sub_length / 2;
uc16_stream.SeekForward(halfway - i);
string_stream.SeekForward(halfway - i);
utf8_stream.SeekForward(halfway - i);
+ one_byte_stream.SeekForward(halfway - i);
i = halfway;
CHECK_EQU(i, uc16_stream.pos());
CHECK_EQU(i, string_stream.pos());
CHECK_EQU(i, utf8_stream.pos());
+ CHECK_EQU(i, one_byte_stream.pos());
while (i < end) {
// Read streams one char at a time
CHECK_EQU(i, uc16_stream.pos());
CHECK_EQU(i, string_stream.pos());
CHECK_EQU(i, utf8_stream.pos());
+ CHECK_EQU(i, one_byte_stream.pos());
int32_t c0 = one_byte_source[i];
int32_t c1 = uc16_stream.Advance();
int32_t c2 = string_stream.Advance();
int32_t c3 = utf8_stream.Advance();
+ int32_t c4 = one_byte_stream.Advance();
i++;
CHECK_EQ(c0, c1);
CHECK_EQ(c0, c2);
CHECK_EQ(c0, c3);
+ CHECK_EQ(c0, c4);
CHECK_EQU(i, uc16_stream.pos());
CHECK_EQU(i, string_stream.pos());
CHECK_EQU(i, utf8_stream.pos());
+ CHECK_EQU(i, one_byte_stream.pos());
}
int32_t c1 = uc16_stream.Advance();
int32_t c2 = string_stream.Advance();
int32_t c3 = utf8_stream.Advance();
+ int32_t c4 = one_byte_stream.Advance();
CHECK_LT(c1, 0);
CHECK_LT(c2, 0);
CHECK_LT(c3, 0);
+ CHECK_LT(c4, 0);
}
+#undef CHECK_EQU
TEST(CharacterStreams) {
v8::Isolate* isolate = CcTest::isolate();
@@ -692,63 +728,6 @@ TEST(CharacterStreams) {
}
-TEST(Utf8CharacterStream) {
- static const unsigned kMaxUC16CharU = unibrow::Utf8::kMaxThreeByteChar;
- static const int kMaxUC16Char = static_cast<int>(kMaxUC16CharU);
-
- static const int kAllUtf8CharsSize =
- (unibrow::Utf8::kMaxOneByteChar + 1) +
- (unibrow::Utf8::kMaxTwoByteChar - unibrow::Utf8::kMaxOneByteChar) * 2 +
- (unibrow::Utf8::kMaxThreeByteChar - unibrow::Utf8::kMaxTwoByteChar) * 3;
- static const unsigned kAllUtf8CharsSizeU =
- static_cast<unsigned>(kAllUtf8CharsSize);
-
- char buffer[kAllUtf8CharsSizeU];
- unsigned cursor = 0;
- for (int i = 0; i <= kMaxUC16Char; i++) {
- cursor += unibrow::Utf8::Encode(buffer + cursor, i,
- unibrow::Utf16::kNoPreviousCharacter, true);
- }
- CHECK(cursor == kAllUtf8CharsSizeU);
-
- i::Utf8ToUtf16CharacterStream stream(reinterpret_cast<const i::byte*>(buffer),
- kAllUtf8CharsSizeU);
- int32_t bad = unibrow::Utf8::kBadChar;
- for (int i = 0; i <= kMaxUC16Char; i++) {
- CHECK_EQU(i, stream.pos());
- int32_t c = stream.Advance();
- if (i >= 0xd800 && i <= 0xdfff) {
- CHECK_EQ(bad, c);
- } else {
- CHECK_EQ(i, c);
- }
- CHECK_EQU(i + 1, stream.pos());
- }
- for (int i = kMaxUC16Char; i >= 0; i--) {
- CHECK_EQU(i + 1, stream.pos());
- stream.PushBack(i);
- CHECK_EQU(i, stream.pos());
- }
- int i = 0;
- while (stream.pos() < kMaxUC16CharU) {
- CHECK_EQU(i, stream.pos());
- int progress = static_cast<int>(stream.SeekForward(12));
- i += progress;
- int32_t c = stream.Advance();
- if (i >= 0xd800 && i <= 0xdfff) {
- CHECK_EQ(bad, c);
- } else if (i <= kMaxUC16Char) {
- CHECK_EQ(i, c);
- } else {
- CHECK_EQ(-1, c);
- }
- i += 1;
- CHECK_EQU(i, stream.pos());
- }
-}
-
-#undef CHECK_EQU
-
void TestStreamScanner(i::Utf16CharacterStream* stream,
i::Token::Value* expected_tokens,
int skip_pos = 0, // Zero means not skipping.
@@ -773,8 +752,7 @@ TEST(StreamScanner) {
v8::V8::Initialize();
const char* str1 = "{ foo get for : */ <- \n\n /*foo*/ bib";
- i::Utf8ToUtf16CharacterStream stream1(reinterpret_cast<const i::byte*>(str1),
- static_cast<unsigned>(strlen(str1)));
+ i::ExternalOneByteStringUtf16CharacterStream stream1(str1);
i::Token::Value expectations1[] = {
i::Token::LBRACE,
i::Token::IDENTIFIER,
@@ -792,8 +770,7 @@ TEST(StreamScanner) {
TestStreamScanner(&stream1, expectations1, 0, 0);
const char* str2 = "case default const {THIS\nPART\nSKIPPED} do";
- i::Utf8ToUtf16CharacterStream stream2(reinterpret_cast<const i::byte*>(str2),
- static_cast<unsigned>(strlen(str2)));
+ i::ExternalOneByteStringUtf16CharacterStream stream2(str2);
i::Token::Value expectations2[] = {
i::Token::CASE,
i::Token::DEFAULT,
@@ -823,25 +800,21 @@ TEST(StreamScanner) {
for (int i = 0; i <= 4; i++) {
expectations3[6 - i] = i::Token::ILLEGAL;
expectations3[5 - i] = i::Token::EOS;
- i::Utf8ToUtf16CharacterStream stream3(
- reinterpret_cast<const i::byte*>(str3),
- static_cast<unsigned>(strlen(str3)));
+ i::ExternalOneByteStringUtf16CharacterStream stream3(str3);
TestStreamScanner(&stream3, expectations3, 1, 1 + i);
}
}
void TestScanRegExp(const char* re_source, const char* expected) {
- i::Utf8ToUtf16CharacterStream stream(
- reinterpret_cast<const i::byte*>(re_source),
- static_cast<unsigned>(strlen(re_source)));
+ i::ExternalOneByteStringUtf16CharacterStream stream(re_source);
i::HandleScope scope(CcTest::i_isolate());
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(&stream);
i::Token::Value start = scanner.peek();
CHECK(start == i::Token::DIV || start == i::Token::ASSIGN_DIV);
- CHECK(scanner.ScanRegExpPattern(start == i::Token::ASSIGN_DIV));
+ CHECK(scanner.ScanRegExpPattern());
scanner.Next(); // Current token is now the regexp literal.
i::Zone zone(CcTest::i_isolate()->allocator());
i::AstValueFactory ast_value_factory(&zone,
@@ -1070,27 +1043,31 @@ TEST(ScopeUsesArgumentsSuperThis) {
i::Zone zone(CcTest::i_isolate()->allocator());
i::ParseInfo info(&zone, script);
i::Parser parser(&info);
- parser.set_allow_harmony_sloppy(true);
info.set_global();
CHECK(parser.Parse(&info));
CHECK(i::Rewriter::Rewrite(&info));
- CHECK(i::Scope::Analyze(&info));
+ i::Scope::Analyze(&info);
CHECK(info.literal() != NULL);
- i::Scope* script_scope = info.literal()->scope();
+ i::DeclarationScope* script_scope = info.literal()->scope();
CHECK(script_scope->is_script_scope());
- CHECK_EQ(1, script_scope->inner_scopes()->length());
- i::Scope* scope = script_scope->inner_scopes()->at(0);
+ i::Scope* scope = script_scope->inner_scope();
+ DCHECK_NOT_NULL(scope);
+ DCHECK_NULL(scope->sibling());
// Adjust for constructor scope.
if (j == 2) {
- CHECK_EQ(1, scope->inner_scopes()->length());
- scope = scope->inner_scopes()->at(0);
+ scope = scope->inner_scope();
+ DCHECK_NOT_NULL(scope);
+ DCHECK_NULL(scope->sibling());
+ }
+ // Arrows themselves never get an arguments object.
+ if ((source_data[i].expected & ARGUMENTS) != 0 &&
+ !scope->AsDeclarationScope()->is_arrow_scope()) {
+ CHECK_NOT_NULL(scope->AsDeclarationScope()->arguments());
}
- CHECK_EQ((source_data[i].expected & ARGUMENTS) != 0,
- scope->uses_arguments());
CHECK_EQ((source_data[i].expected & SUPER_PROPERTY) != 0,
- scope->uses_super_property());
+ scope->AsDeclarationScope()->uses_super_property());
if ((source_data[i].expected & THIS) != 0) {
// Currently the is_used() flag is conservative; all variables in a
// script scope are marked as used.
@@ -1122,13 +1099,11 @@ static void CheckParsesToNumber(const char* source, bool with_dot) {
i::ParseInfo info(handles.main_zone(), script);
i::Parser parser(&info);
- parser.set_allow_harmony_sloppy(true);
info.set_global();
info.set_lazy(false);
info.set_allow_lazy_parsing(false);
info.set_toplevel(true);
- i::CompilationInfo compilation_info(&info);
CHECK(i::Compiler::ParseAndAnalyze(&info));
CHECK(info.scope()->declarations()->length() == 1);
@@ -1399,9 +1374,10 @@ TEST(ScopePositions) {
CHECK(scope->is_script_scope());
CHECK_EQ(scope->start_position(), 0);
CHECK_EQ(scope->end_position(), kProgramSize);
- CHECK_EQ(scope->inner_scopes()->length(), 1);
- i::Scope* inner_scope = scope->inner_scopes()->at(0);
+ i::Scope* inner_scope = scope->inner_scope();
+ DCHECK_NOT_NULL(inner_scope);
+ DCHECK_NULL(inner_scope->sibling());
CHECK_EQ(inner_scope->scope_type(), source_data[i].scope_type);
CHECK_EQ(inner_scope->start_position(), kPrefixLen);
// The end position of a token is one position after the last
@@ -1438,7 +1414,6 @@ TEST(DiscardFunctionBody) {
i::ParseInfo info(&zone, script);
info.set_allow_lazy_parsing();
i::Parser parser(&info);
- parser.set_allow_harmony_sloppy(true);
parser.Parse(&info);
function = info.literal();
CHECK_NOT_NULL(function);
@@ -1449,8 +1424,8 @@ TEST(DiscardFunctionBody) {
AsCall()->expression()->AsFunctionLiteral();
i::Scope* inner_scope = inner->scope();
i::FunctionLiteral* fun = nullptr;
- if (inner_scope->declarations()->length() > 1) {
- fun = inner_scope->declarations()->at(1)->AsFunctionDeclaration()->fun();
+ if (inner_scope->declarations()->length() > 0) {
+ fun = inner_scope->declarations()->at(0)->AsFunctionDeclaration()->fun();
} else {
// TODO(conradw): This path won't be hit until the other test cases can be
// uncommented.
@@ -1509,12 +1484,12 @@ i::Handle<i::String> FormatMessage(i::Vector<unsigned> data) {
enum ParserFlag {
kAllowLazy,
kAllowNatives,
- kAllowHarmonySloppy,
- kAllowHarmonySloppyLet,
- kAllowHarmonyNewTarget,
kAllowHarmonyFunctionSent,
kAllowHarmonyRestrictiveDeclarations,
- kAllowHarmonyExponentiationOperator
+ kAllowHarmonyForIn,
+ kAllowHarmonyAsyncAwait,
+ kAllowHarmonyRestrictiveGenerators,
+ kAllowHarmonyTrailingCommas,
};
enum ParserSyncTestResult {
@@ -1528,28 +1503,31 @@ void SetParserFlags(i::ParserBase<Traits>* parser,
i::EnumSet<ParserFlag> flags) {
parser->set_allow_lazy(flags.Contains(kAllowLazy));
parser->set_allow_natives(flags.Contains(kAllowNatives));
- parser->set_allow_harmony_sloppy(flags.Contains(kAllowHarmonySloppy));
- parser->set_allow_harmony_sloppy_let(flags.Contains(kAllowHarmonySloppyLet));
parser->set_allow_harmony_function_sent(
flags.Contains(kAllowHarmonyFunctionSent));
parser->set_allow_harmony_restrictive_declarations(
flags.Contains(kAllowHarmonyRestrictiveDeclarations));
- parser->set_allow_harmony_exponentiation_operator(
- flags.Contains(kAllowHarmonyExponentiationOperator));
+ parser->set_allow_harmony_for_in(flags.Contains(kAllowHarmonyForIn));
+ parser->set_allow_harmony_async_await(
+ flags.Contains(kAllowHarmonyAsyncAwait));
+ parser->set_allow_harmony_restrictive_generators(
+ flags.Contains(kAllowHarmonyRestrictiveGenerators));
+ parser->set_allow_harmony_trailing_commas(
+ flags.Contains(kAllowHarmonyTrailingCommas));
}
void TestParserSyncWithFlags(i::Handle<i::String> source,
i::EnumSet<ParserFlag> flags,
ParserSyncTestResult result,
- bool is_module = false) {
+ bool is_module = false,
+ bool test_preparser = true) {
i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
uintptr_t stack_limit = isolate->stack_guard()->real_climit();
int preparser_materialized_literals = -1;
int parser_materialized_literals = -2;
- bool test_preparser = !is_module;
// Preparse the data.
i::CompleteParserRecorder log;
@@ -1563,8 +1541,8 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
stack_limit);
SetParserFlags(&preparser, flags);
scanner.Initialize(&stream);
- i::PreParser::PreParseResult result = preparser.PreParseProgram(
- &preparser_materialized_literals);
+ i::PreParser::PreParseResult result =
+ preparser.PreParseProgram(&preparser_materialized_literals, is_module);
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
}
bool preparse_error = log.HasError();
@@ -1598,6 +1576,7 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
i::Handle<i::String> message_string = i::Handle<i::String>::cast(
i::JSReceiver::GetProperty(isolate, exception_handle, "message")
.ToHandleChecked());
+ isolate->clear_pending_exception();
if (result == kSuccess) {
v8::base::OS::Print(
@@ -1674,7 +1653,7 @@ void TestParserSync(const char* source, const ParserFlag* varying_flags,
size_t always_true_flags_length = 0,
const ParserFlag* always_false_flags = NULL,
size_t always_false_flags_length = 0,
- bool is_module = false) {
+ bool is_module = false, bool test_preparser = true) {
i::Handle<i::String> str =
CcTest::i_isolate()->factory()->NewStringFromAsciiChecked(source);
for (int bits = 0; bits < (1 << varying_flags_length); bits++) {
@@ -1691,7 +1670,7 @@ void TestParserSync(const char* source, const ParserFlag* varying_flags,
++flag_index) {
flags.Remove(always_false_flags[flag_index]);
}
- TestParserSyncWithFlags(str, flags, result, is_module);
+ TestParserSyncWithFlags(str, flags, result, is_module, test_preparser);
}
}
@@ -1839,7 +1818,8 @@ void RunParserSyncTest(const char* context_data[][2],
const ParserFlag* always_true_flags = NULL,
int always_true_len = 0,
const ParserFlag* always_false_flags = NULL,
- int always_false_len = 0, bool is_module = false) {
+ int always_false_len = 0, bool is_module = false,
+ bool test_preparser = true) {
v8::HandleScope handles(CcTest::isolate());
v8::Local<v8::Context> context = v8::Context::New(CcTest::isolate());
v8::Context::Scope context_scope(context);
@@ -1894,7 +1874,7 @@ void RunParserSyncTest(const char* context_data[][2],
CHECK(length == kProgramSize);
TestParserSync(program.start(), flags, flags_len, result,
always_true_flags, always_true_len, always_false_flags,
- always_false_len, is_module);
+ always_false_len, is_module, test_preparser);
}
}
delete[] generated_flags;
@@ -1908,10 +1888,11 @@ void RunModuleParserSyncTest(const char* context_data[][2],
const ParserFlag* always_true_flags = NULL,
int always_true_len = 0,
const ParserFlag* always_false_flags = NULL,
- int always_false_len = 0) {
+ int always_false_len = 0,
+ bool test_preparser = true) {
RunParserSyncTest(context_data, statement_data, result, flags, flags_len,
always_true_flags, always_true_len, always_false_flags,
- always_false_len, true);
+ always_false_len, true, test_preparser);
}
@@ -2017,25 +1998,28 @@ TEST(NoErrorsEvalAndArgumentsStrict) {
RunParserSyncTest(context_data, statement_data, kSuccess);
}
+#define FUTURE_STRICT_RESERVED_WORDS_NO_LET(V) \
+ V(implements) \
+ V(interface) \
+ V(package) \
+ V(private) \
+ V(protected) \
+ V(public) \
+ V(static) \
+ V(yield)
#define FUTURE_STRICT_RESERVED_WORDS(V) \
- V(implements) \
- V(interface) \
V(let) \
- V(package) \
- V(private) \
- V(protected) \
- V(public) \
- V(static) \
- V(yield)
+ FUTURE_STRICT_RESERVED_WORDS_NO_LET(V)
+#define LIMITED_FUTURE_STRICT_RESERVED_WORDS_NO_LET(V) \
+ V(implements) \
+ V(static) \
+ V(yield)
#define LIMITED_FUTURE_STRICT_RESERVED_WORDS(V) \
- V(implements) \
V(let) \
- V(static) \
- V(yield)
-
+ LIMITED_FUTURE_STRICT_RESERVED_WORDS_NO_LET(V)
#define FUTURE_STRICT_RESERVED_STATEMENTS(NAME) \
"var " #NAME ";", \
@@ -2051,25 +2035,52 @@ TEST(NoErrorsEvalAndArgumentsStrict) {
"++" #NAME ";", \
#NAME " ++;",
+// clang-format off
+#define FUTURE_STRICT_RESERVED_LEX_BINDINGS(NAME) \
+ "let " #NAME ";", \
+ "for (let " #NAME "; false; ) {}", \
+ "for (let " #NAME " in {}) {}", \
+ "for (let " #NAME " of []) {}", \
+ "const " #NAME " = null;", \
+ "for (const " #NAME " = null; false; ) {}", \
+ "for (const " #NAME " in {}) {}", \
+ "for (const " #NAME " of []) {}",
+// clang-format on
TEST(ErrorsFutureStrictReservedWords) {
// Tests that both preparsing and parsing produce the right kind of errors for
// using future strict reserved words as identifiers. Without the strict mode,
// it's ok to use future strict reserved words as identifiers. With the strict
// mode, it isn't.
- const char* context_data[][2] = {
+ const char* strict_contexts[][2] = {
{"function test_func() {\"use strict\"; ", "}"},
{"() => { \"use strict\"; ", "}"},
{NULL, NULL}};
+ // clang-format off
const char* statement_data[] {
LIMITED_FUTURE_STRICT_RESERVED_WORDS(FUTURE_STRICT_RESERVED_STATEMENTS)
+ LIMITED_FUTURE_STRICT_RESERVED_WORDS(FUTURE_STRICT_RESERVED_LEX_BINDINGS)
NULL
};
+ // clang-format on
- RunParserSyncTest(context_data, statement_data, kError);
-}
+ RunParserSyncTest(strict_contexts, statement_data, kError);
+ // From ES2015, 13.3.1.1 Static Semantics: Early Errors:
+ //
+ // > LexicalDeclaration : LetOrConst BindingList ;
+ // >
+ // > - It is a Syntax Error if the BoundNames of BindingList contains "let".
+ const char* non_strict_contexts[][2] = {{"", ""},
+ {"function test_func() {", "}"},
+ {"() => {", "}"},
+ {NULL, NULL}};
+ const char* invalid_statements[] = {FUTURE_STRICT_RESERVED_LEX_BINDINGS("let")
+ NULL};
+
+ RunParserSyncTest(non_strict_contexts, invalid_statements, kError);
+}
#undef LIMITED_FUTURE_STRICT_RESERVED_WORDS
@@ -2082,10 +2093,13 @@ TEST(NoErrorsFutureStrictReservedWords) {
{ NULL, NULL }
};
+ // clang-format off
const char* statement_data[] = {
FUTURE_STRICT_RESERVED_WORDS(FUTURE_STRICT_RESERVED_STATEMENTS)
+ FUTURE_STRICT_RESERVED_WORDS_NO_LET(FUTURE_STRICT_RESERVED_LEX_BINDINGS)
NULL
};
+ // clang-format on
RunParserSyncTest(context_data, statement_data, kSuccess);
}
@@ -2349,9 +2363,7 @@ TEST(NoErrorsGenerator) {
};
// clang-format on
- static const ParserFlag always_flags[] = {kAllowHarmonySloppy};
- RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, statement_data, kSuccess);
}
@@ -2407,6 +2419,10 @@ TEST(ErrorsYieldGenerator) {
"var {foo: yield 24} = {a: 42};",
"[yield 24] = [42];",
"({a: yield 24} = {a: 42});",
+ "for (yield 'x' in {});",
+ "for (yield 'x' of {});",
+ "for (yield 'x' in {} in {});",
+ "for (yield 'x' in {} of {});",
"class C extends yield { }",
NULL
};
@@ -3305,11 +3321,10 @@ TEST(SerializationOfMaybeAssignmentFlag) {
const i::AstRawString* name = avf.GetOneByteString("result");
i::Handle<i::String> str = name->string();
CHECK(str->IsInternalizedString());
- i::Scope* script_scope =
- new (&zone) i::Scope(&zone, NULL, i::SCRIPT_SCOPE, &avf);
- script_scope->Initialize();
- i::Scope* s =
- i::Scope::DeserializeScopeChain(isolate, &zone, context, script_scope);
+ i::DeclarationScope* script_scope = new (&zone) i::DeclarationScope(&zone);
+ i::Scope* s = i::Scope::DeserializeScopeChain(
+ isolate, &zone, context, script_scope, &avf,
+ i::Scope::DeserializationMode::kKeepScopeInfo);
CHECK(s != script_scope);
CHECK(name != NULL);
@@ -3353,11 +3368,10 @@ TEST(IfArgumentsArrayAccessedThenParametersMaybeAssigned) {
i::AstValueFactory avf(&zone, isolate->heap()->HashSeed());
avf.Internalize(isolate);
- i::Scope* script_scope =
- new (&zone) i::Scope(&zone, NULL, i::SCRIPT_SCOPE, &avf);
- script_scope->Initialize();
- i::Scope* s =
- i::Scope::DeserializeScopeChain(isolate, &zone, context, script_scope);
+ i::DeclarationScope* script_scope = new (&zone) i::DeclarationScope(&zone);
+ i::Scope* s = i::Scope::DeserializeScopeChain(
+ isolate, &zone, context, script_scope, &avf,
+ i::Scope::DeserializationMode::kKeepScopeInfo);
CHECK(s != script_scope);
const i::AstRawString* name_x = avf.GetOneByteString("x");
@@ -3497,8 +3511,9 @@ TEST(InnerAssignment) {
CHECK(info.literal() != NULL);
i::Scope* scope = info.literal()->scope();
- CHECK_EQ(scope->inner_scopes()->length(), 1);
- i::Scope* inner_scope = scope->inner_scopes()->at(0);
+ i::Scope* inner_scope = scope->inner_scope();
+ DCHECK_NOT_NULL(inner_scope);
+ DCHECK_NULL(inner_scope->sibling());
const i::AstRawString* var_name =
info.ast_value_factory()->GetOneByteString("x");
i::Variable* var = inner_scope->Lookup(var_name);
@@ -3533,10 +3548,8 @@ TEST(UseAsmUseCount) {
CcTest::isolate()->SetUseCounterCallback(MockUseCounterCallback);
CompileRun("\"use asm\";\n"
"var foo = 1;\n"
- "\"use asm\";\n" // Only the first one counts.
"function bar() { \"use asm\"; var baz = 1; }");
- // Optimizing will double-count because the source is parsed twice.
- CHECK_EQ(i::FLAG_always_opt ? 4 : 2, use_counts[v8::Isolate::kUseAsm]);
+ CHECK_LT(0, use_counts[v8::Isolate::kUseAsm]);
}
@@ -3949,11 +3962,7 @@ TEST(SuperNoErrors) {
NULL
};
- static const ParserFlag always_flags[] = {
- kAllowHarmonySloppy
- };
- RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, statement_data, kSuccess);
}
@@ -3980,9 +3989,7 @@ TEST(SuperErrors) {
NULL
};
- static const ParserFlag always_flags[] = {kAllowHarmonySloppy};
- RunParserSyncTest(context_data, expression_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, expression_data, kError);
}
@@ -3996,9 +4003,7 @@ TEST(SuperCall) {
NULL
};
- static const ParserFlag always_flags[] = {kAllowHarmonySloppy};
- RunParserSyncTest(context_data, success_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, success_data, kSuccess);
const char* error_data[] = {
"class C { constructor() { super(); } }",
@@ -4020,8 +4025,7 @@ TEST(SuperCall) {
NULL
};
- RunParserSyncTest(context_data, error_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, error_data, kError);
}
@@ -4046,9 +4050,7 @@ TEST(SuperNewNoErrors) {
NULL
};
- static const ParserFlag always_flags[] = {kAllowHarmonySloppy};
- RunParserSyncTest(context_data, expression_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, expression_data, kSuccess);
}
@@ -4079,9 +4081,7 @@ TEST(SuperNewErrors) {
NULL
};
- static const ParserFlag always_flags[] = {kAllowHarmonySloppy};
- RunParserSyncTest(context_data, statement_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, statement_data, kError);
}
@@ -4119,11 +4119,7 @@ TEST(SuperErrorsNonMethods) {
NULL
};
- static const ParserFlag always_flags[] = {
- kAllowHarmonySloppy
- };
- RunParserSyncTest(context_data, statement_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, statement_data, kError);
}
@@ -4336,9 +4332,7 @@ TEST(ClassExpressionNoErrors) {
"class name extends class base {} {}",
NULL};
- static const ParserFlag always_flags[] = {kAllowHarmonySloppy};
- RunParserSyncTest(context_data, class_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, class_data, kSuccess);
}
@@ -4412,11 +4406,7 @@ TEST(ClassBodyNoErrors) {
NULL};
// clang-format on
- static const ParserFlag always_flags[] = {
- kAllowHarmonySloppy
- };
- RunParserSyncTest(context_data, class_body_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, class_body_data, kSuccess);
}
@@ -4469,11 +4459,7 @@ TEST(ClassPropertyNameNoErrors) {
"finally",
NULL};
- static const ParserFlag always_flags[] = {
- kAllowHarmonySloppy
- };
- RunParserSyncTest(context_data, name_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, name_data, kSuccess);
}
@@ -4499,11 +4485,7 @@ TEST(ClassExpressionErrors) {
"class { m() {}, n() {} }", // No commas allowed.
NULL};
- static const ParserFlag always_flags[] = {
- kAllowHarmonySloppy
- };
- RunParserSyncTest(context_data, class_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, class_data, kError);
}
@@ -4535,11 +4517,7 @@ TEST(ClassDeclarationErrors) {
"class name { *static m() {} }",
NULL};
- static const ParserFlag always_flags[] = {
- kAllowHarmonySloppy
- };
- RunParserSyncTest(context_data, class_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, class_data, kError);
}
@@ -4564,11 +4542,7 @@ TEST(ClassNameErrors) {
"yield",
NULL};
- static const ParserFlag always_flags[] = {
- kAllowHarmonySloppy
- };
- RunParserSyncTest(context_data, class_name, kError, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, class_name, kError);
}
@@ -4596,11 +4570,7 @@ TEST(ClassGetterParamNameErrors) {
"yield",
NULL};
- static const ParserFlag always_flags[] = {
- kAllowHarmonySloppy
- };
- RunParserSyncTest(context_data, class_name, kError, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, class_name, kError);
}
@@ -4623,11 +4593,7 @@ TEST(ClassStaticPrototypeErrors) {
"static *'prot\\u006ftype'() {}",
NULL};
- static const ParserFlag always_flags[] = {
- kAllowHarmonySloppy
- };
- RunParserSyncTest(context_data, class_body_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, class_body_data, kError);
}
@@ -4649,11 +4615,7 @@ TEST(ClassSpecialConstructorErrors) {
"*'c\\u006fnstructor'() {}",
NULL};
- static const ParserFlag always_flags[] = {
- kAllowHarmonySloppy
- };
- RunParserSyncTest(context_data, class_body_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, class_body_data, kError);
}
@@ -4670,11 +4632,7 @@ TEST(ClassConstructorNoErrors) {
"static *constructor() {}",
NULL};
- static const ParserFlag always_flags[] = {
- kAllowHarmonySloppy
- };
- RunParserSyncTest(context_data, class_body_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, class_body_data, kSuccess);
}
@@ -4687,11 +4645,7 @@ TEST(ClassMultipleConstructorErrors) {
"constructor() {}; constructor() {}",
NULL};
- static const ParserFlag always_flags[] = {
- kAllowHarmonySloppy
- };
- RunParserSyncTest(context_data, class_body_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, class_body_data, kError);
}
@@ -4708,11 +4662,7 @@ TEST(ClassMultiplePropertyNamesNoErrors) {
"get m() {}; set m(_) {}; get m() {}; set m(_) {};",
NULL};
- static const ParserFlag always_flags[] = {
- kAllowHarmonySloppy
- };
- RunParserSyncTest(context_data, class_body_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, class_body_data, kSuccess);
}
@@ -4727,11 +4677,7 @@ TEST(ClassesAreStrictErrors) {
"class C { *method() { with ({}) {} } }",
NULL};
- static const ParserFlag always_flags[] = {
- kAllowHarmonySloppy
- };
- RunParserSyncTest(context_data, class_body_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, class_body_data, kError);
}
@@ -4869,10 +4815,7 @@ TEST(StatementParsingInForIn) {
"for(let x in {}, {}) {}", "for(const x in {}, {}) {}",
NULL};
- static const ParserFlag always_flags[] = {kAllowHarmonySloppy,
- kAllowHarmonySloppyLet};
- RunParserSyncTest(context_data, data, kSuccess, nullptr, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kSuccess);
}
@@ -4980,9 +4923,7 @@ TEST(ForInMultipleDeclarationsError) {
"for (const i, j = 1 in {}) {}",
"for (const i, j = void 0 in [1, 2, 3]) {}",
NULL};
- static const ParserFlag always_flags[] = {kAllowHarmonySloppy};
- RunParserSyncTest(context_data, data, kError, nullptr, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kError);
}
@@ -5009,9 +4950,7 @@ TEST(ForOfMultipleDeclarationsError) {
"for (const i, j = 1 of {}) {}",
"for (const i, j = void 0 of [1, 2, 3]) {}",
NULL};
- static const ParserFlag always_flags[] = {kAllowHarmonySloppy};
- RunParserSyncTest(context_data, data, kError, nullptr, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kError);
}
@@ -5026,9 +4965,7 @@ TEST(ForInNoDeclarationsError) {
"for (var in {}) {}",
"for (const in {}) {}",
NULL};
- static const ParserFlag always_flags[] = {kAllowHarmonySloppy};
- RunParserSyncTest(context_data, data, kError, nullptr, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kError);
}
@@ -5043,9 +4980,7 @@ TEST(ForOfNoDeclarationsError) {
"for (var of [1, 2, 3]) {}",
"for (const of [1, 2, 3]) {}",
NULL};
- static const ParserFlag always_flags[] = {kAllowHarmonySloppy};
- RunParserSyncTest(context_data, data, kError, nullptr, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kError);
}
@@ -5059,10 +4994,7 @@ TEST(ForOfInOperator) {
"for(x of 'foo' in {}) {}", "for(var x of 'foo' in {}) {}",
"for(let x of 'foo' in {}) {}", "for(const x of 'foo' in {}) {}", NULL};
- static const ParserFlag always_flags[] = {kAllowHarmonySloppy,
- kAllowHarmonySloppyLet};
- RunParserSyncTest(context_data, data, kSuccess, nullptr, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kSuccess);
}
@@ -5073,10 +5005,7 @@ TEST(ForOfYieldIdentifier) {
"for(let x of yield) {}", "for(const x of yield) {}",
NULL};
- static const ParserFlag always_flags[] = {kAllowHarmonySloppy,
- kAllowHarmonySloppyLet};
- RunParserSyncTest(context_data, data, kSuccess, nullptr, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kSuccess);
}
@@ -5091,10 +5020,7 @@ TEST(ForOfYieldExpression) {
"function* g() { for(let x of yield) {} }",
"function* g() { for(const x of yield) {} }", NULL};
- static const ParserFlag always_flags[] = {kAllowHarmonySloppy,
- kAllowHarmonySloppyLet};
- RunParserSyncTest(context_data, data, kSuccess, nullptr, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kSuccess);
}
@@ -5112,10 +5038,7 @@ TEST(ForOfExpressionError) {
"for(x of { y = 23 }) {}", "for(var x of { y = 23 }) {}",
"for(let x of { y = 23 }) {}", "for(const x of { y = 23 }) {}", NULL};
- static const ParserFlag always_flags[] = {kAllowHarmonySloppy,
- kAllowHarmonySloppyLet};
- RunParserSyncTest(context_data, data, kError, nullptr, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kError);
}
@@ -5205,7 +5128,7 @@ TEST(ScanTemplateLiterals) {
"`foo${\r a}`",
"`foo${'a' in a}`",
NULL};
- RunParserSyncTest(context_data, data, kSuccess, NULL, 0, NULL, 0);
+ RunParserSyncTest(context_data, data, kSuccess);
}
@@ -5240,7 +5163,7 @@ TEST(ScanTaggedTemplateLiterals) {
"tag`foo${\r a}`",
"tag`foo${'a' in a}`",
NULL};
- RunParserSyncTest(context_data, data, kSuccess, NULL, 0, NULL, 0);
+ RunParserSyncTest(context_data, data, kSuccess);
}
@@ -5267,7 +5190,7 @@ TEST(TemplateMaterializedLiterals) {
NULL
};
- RunParserSyncTest(context_data, data, kSuccess, NULL, 0, NULL, 0);
+ RunParserSyncTest(context_data, data, kSuccess);
}
@@ -5301,7 +5224,7 @@ TEST(ScanUnterminatedTemplateLiterals) {
"`foo${fn(}`",
"`foo${1 if}`",
NULL};
- RunParserSyncTest(context_data, data, kError, NULL, 0, NULL, 0);
+ RunParserSyncTest(context_data, data, kError);
}
@@ -5321,7 +5244,7 @@ TEST(TemplateLiteralsIllegalTokens) {
"`hello${1}\\x\n${2}`",
NULL};
- RunParserSyncTest(context_data, data, kError, NULL, 0, NULL, 0);
+ RunParserSyncTest(context_data, data, kError);
}
@@ -5396,13 +5319,12 @@ TEST(RestParameterInSetterMethodError) {
{nullptr, nullptr}};
const char* data[] = {"...a", "...arguments", "...eval", nullptr};
- static const ParserFlag always_flags[] = {kAllowHarmonySloppy};
- RunParserSyncTest(context_data, data, kError, nullptr, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kError);
}
TEST(RestParametersEvalArguments) {
+ // clang-format off
const char* strict_context_data[][2] =
{{"'use strict';(function(",
"){ return;})(1, [], /regexp/, 'str',function(){});"},
@@ -5416,8 +5338,10 @@ TEST(RestParametersEvalArguments) {
"...eval",
"eval, ...args",
"...arguments",
- "arguments, ...args",
+ // See https://bugs.chromium.org/p/v8/issues/detail?id=4577
+ // "arguments, ...args",
NULL};
+ // clang-format on
// Fail in strict mode
RunParserSyncTest(strict_context_data, data, kError);
@@ -5484,7 +5408,7 @@ TEST(BadRestSpread) {
"var [...x,] = [1,2,3];", "var [...x, y] = [1,2,3];",
"var {...x} = [1,2,3];", "var { x } = {x: ...[1,2,3]}",
NULL};
- RunParserSyncTest(context_data, data, kError, NULL, 0, NULL, 0);
+ RunParserSyncTest(context_data, data, kError);
}
@@ -5494,28 +5418,12 @@ TEST(LexicalScopingSloppyMode) {
{"function f() {", "}"},
{"{", "}"},
{NULL, NULL}};
- const char* bad_data[] = {
- "let x = 1;",
- "for(let x = 1;;){}",
- "for(let x of []){}",
- "for(let x in []){}",
- "class C {}",
- "class C extends D {}",
- "(class {})",
- "(class extends D {})",
- "(class C {})",
- "(class C extends D {})",
- NULL};
- static const ParserFlag always_false_flags[] = {kAllowHarmonySloppy};
- RunParserSyncTest(context_data, bad_data, kError, NULL, 0, NULL, 0,
- always_false_flags, arraysize(always_false_flags));
const char* good_data[] = {
"let = 1;",
"for(let = 1;;){}",
NULL};
- RunParserSyncTest(context_data, good_data, kSuccess, NULL, 0, NULL, 0,
- always_false_flags, arraysize(always_false_flags));
+ RunParserSyncTest(context_data, good_data, kSuccess);
}
@@ -5535,11 +5443,7 @@ TEST(ComputedPropertyName) {
"var name",
NULL};
- static const ParserFlag always_flags[] = {
- kAllowHarmonySloppy,
- };
- RunParserSyncTest(context_data, error_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, error_data, kError);
const char* name_data[] = {
"1",
@@ -5550,8 +5454,7 @@ TEST(ComputedPropertyName) {
"{}",
NULL};
- RunParserSyncTest(context_data, name_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, name_data, kSuccess);
}
@@ -5563,11 +5466,7 @@ TEST(ComputedPropertyNameShorthandError) {
"[1], a: 1",
NULL};
- static const ParserFlag always_flags[] = {
- kAllowHarmonySloppy,
- };
- RunParserSyncTest(context_data, error_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, error_data, kError);
}
@@ -5587,8 +5486,8 @@ TEST(BasicImportExportParsing) {
"export default function() {}",
"export default function*() {}",
"export default class C {}",
- "export default class {}"
- "export default class extends C {}"
+ "export default class {}",
+ "export default class extends C {}",
"export default 42",
"var x; export default x = 7",
"export { Q } from 'somemodule.js';",
@@ -5600,6 +5499,8 @@ TEST(BasicImportExportParsing) {
"export { static } from 'm.js'",
"export { let } from 'm.js'",
"var a; export { a as b, a as c };",
+ "var a; export { a as await };",
+ "var a; export { a as enum };",
"import 'somemodule.js';",
"import { } from 'm.js';",
@@ -5614,6 +5515,13 @@ TEST(BasicImportExportParsing) {
"import { yield as y } from 'm.js';",
"import { static as s } from 'm.js';",
"import { let as l } from 'm.js';",
+
+ "import thing from 'a.js'; export {thing};",
+ "export {thing}; import thing from 'a.js';",
+ "import {thing} from 'a.js'; export {thing};",
+ "export {thing}; import {thing} from 'a.js';",
+ "import * as thing from 'a.js'; export {thing};",
+ "export {thing}; import * as thing from 'a.js';",
};
// clang-format on
@@ -5644,6 +5552,7 @@ TEST(BasicImportExportParsing) {
i::Handle<i::String> message_string = i::Handle<i::String>::cast(
i::JSReceiver::GetProperty(isolate, exception_handle, "message")
.ToHandleChecked());
+ isolate->clear_pending_exception();
v8::base::OS::Print(
"Parser failed on:\n"
@@ -5664,6 +5573,7 @@ TEST(BasicImportExportParsing) {
i::Parser parser(&info);
info.set_global();
CHECK(!parser.Parse(&info));
+ isolate->clear_pending_exception();
}
}
}
@@ -5725,6 +5635,8 @@ TEST(ImportExportParsingErrors) {
"import { y as yield } from 'm.js'",
"import { s as static } from 'm.js'",
"import { l as let } from 'm.js'",
+ "import { a as await } from 'm.js';",
+ "import { a as enum } from 'm.js';",
"import { x }, def from 'm.js';",
"import def, def2 from 'm.js';",
"import * as x, def from 'm.js';",
@@ -5754,9 +5666,210 @@ TEST(ImportExportParsingErrors) {
i::Parser parser(&info);
info.set_module();
CHECK(!parser.Parse(&info));
+ isolate->clear_pending_exception();
}
}
+TEST(ModuleTopLevelFunctionDecl) {
+ // clang-format off
+ const char* kErrorSources[] = {
+ "function f() {} function f() {}",
+ "var f; function f() {}",
+ "function f() {} var f;",
+ "function* f() {} function* f() {}",
+ "var f; function* f() {}",
+ "function* f() {} var f;",
+ "function f() {} function* f() {}",
+ "function* f() {} function f() {}",
+ };
+ // clang-format on
+
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::Factory* factory = isolate->factory();
+
+ v8::HandleScope handles(CcTest::isolate());
+ v8::Local<v8::Context> context = v8::Context::New(CcTest::isolate());
+ v8::Context::Scope context_scope(context);
+
+ isolate->stack_guard()->SetStackLimit(i::GetCurrentStackPosition() -
+ 128 * 1024);
+
+ for (unsigned i = 0; i < arraysize(kErrorSources); ++i) {
+ i::Handle<i::String> source =
+ factory->NewStringFromAsciiChecked(kErrorSources[i]);
+
+ i::Handle<i::Script> script = factory->NewScript(source);
+ i::Zone zone(CcTest::i_isolate()->allocator());
+ i::ParseInfo info(&zone, script);
+ i::Parser parser(&info);
+ info.set_module();
+ CHECK(!parser.Parse(&info));
+ isolate->clear_pending_exception();
+ }
+}
+
+TEST(ModuleAwaitReserved) {
+ // clang-format off
+ const char* kErrorSources[] = {
+ "await;",
+ "await: ;",
+ "var await;",
+ "var [await] = [];",
+ "var { await } = {};",
+ "var { x: await } = {};",
+ "{ var await; }",
+ "let await;",
+ "let [await] = [];",
+ "let { await } = {};",
+ "let { x: await } = {};",
+ "{ let await; }",
+ "const await = null;",
+ "const [await] = [];",
+ "const { await } = {};",
+ "const { x: await } = {};",
+ "{ const await = null; }",
+ "function await() {}",
+ "function f(await) {}",
+ "function* await() {}",
+ "function* g(await) {}",
+ "(function await() {});",
+ "(function (await) {});",
+ "(function* await() {});",
+ "(function* (await) {});",
+ "(await) => {};",
+ "await => {};",
+ "class await {}",
+ "class C { constructor(await) {} }",
+ "class C { m(await) {} }",
+ "class C { static m(await) {} }",
+ "class C { *m(await) {} }",
+ "class C { static *m(await) {} }",
+ "(class await {})",
+ "(class { constructor(await) {} });",
+ "(class { m(await) {} });",
+ "(class { static m(await) {} });",
+ "(class { *m(await) {} });",
+ "(class { static *m(await) {} });",
+ "({ m(await) {} });",
+ "({ *m(await) {} });",
+ "({ set p(await) {} });",
+ "try {} catch (await) {}",
+ "try {} catch (await) {} finally {}",
+ NULL
+ };
+ // clang-format on
+ const char* context_data[][2] = {{"", ""}, {NULL, NULL}};
+
+ RunModuleParserSyncTest(context_data, kErrorSources, kError);
+}
+
+TEST(ModuleAwaitReservedPreParse) {
+ const char* context_data[][2] = {{"", ""}, {NULL, NULL}};
+ const char* error_data[] = {"function f() { var await = 0; }", NULL};
+
+ RunModuleParserSyncTest(context_data, error_data, kError);
+}
+
+TEST(ModuleAwaitPermitted) {
+ // clang-format off
+ const char* kValidSources[] = {
+ "({}).await;",
+ "({ await: null });",
+ "({ await() {} });",
+ "({ get await() {} });",
+ "({ set await(x) {} });",
+ "(class { await() {} });",
+ "(class { static await() {} });",
+ "(class { *await() {} });",
+ "(class { static *await() {} });",
+ NULL
+ };
+ // clang-format on
+ const char* context_data[][2] = {{"", ""}, {NULL, NULL}};
+
+ RunModuleParserSyncTest(context_data, kValidSources, kSuccess);
+}
+
+TEST(EnumReserved) {
+ // clang-format off
+ const char* kErrorSources[] = {
+ "enum;",
+ "enum: ;",
+ "var enum;",
+ "var [enum] = [];",
+ "var { enum } = {};",
+ "var { x: enum } = {};",
+ "{ var enum; }",
+ "let enum;",
+ "let [enum] = [];",
+ "let { enum } = {};",
+ "let { x: enum } = {};",
+ "{ let enum; }",
+ "const enum = null;",
+ "const [enum] = [];",
+ "const { enum } = {};",
+ "const { x: enum } = {};",
+ "{ const enum = null; }",
+ "function enum() {}",
+ "function f(enum) {}",
+ "function* enum() {}",
+ "function* g(enum) {}",
+ "(function enum() {});",
+ "(function (enum) {});",
+ "(function* enum() {});",
+ "(function* (enum) {});",
+ "(enum) => {};",
+ "enum => {};",
+ "class enum {}",
+ "class C { constructor(enum) {} }",
+ "class C { m(enum) {} }",
+ "class C { static m(enum) {} }",
+ "class C { *m(enum) {} }",
+ "class C { static *m(enum) {} }",
+ "(class enum {})",
+ "(class { constructor(enum) {} });",
+ "(class { m(enum) {} });",
+ "(class { static m(enum) {} });",
+ "(class { *m(enum) {} });",
+ "(class { static *m(enum) {} });",
+ "({ m(enum) {} });",
+ "({ *m(enum) {} });",
+ "({ set p(enum) {} });",
+ "try {} catch (enum) {}",
+ "try {} catch (enum) {} finally {}",
+ NULL
+ };
+ // clang-format on
+ const char* context_data[][2] = {{"", ""}, {NULL, NULL}};
+
+ RunModuleParserSyncTest(context_data, kErrorSources, kError);
+}
+
+static void CheckModuleEntry(const i::ModuleDescriptor::ModuleEntry* entry,
+ const char* export_name, const char* local_name, const char* import_name,
+ const char* module_request) {
+ CHECK_NOT_NULL(entry);
+ if (export_name == nullptr) {
+ CHECK_NULL(entry->export_name);
+ } else {
+ entry->export_name->IsOneByteEqualTo(export_name);
+ }
+ if (local_name == nullptr) {
+ CHECK_NULL(entry->local_name);
+ } else {
+ entry->local_name->IsOneByteEqualTo(local_name);
+ }
+ if (import_name == nullptr) {
+ CHECK_NULL(entry->import_name);
+ } else {
+ entry->import_name->IsOneByteEqualTo(import_name);
+ }
+ if (module_request == nullptr) {
+ CHECK_NULL(entry->module_request);
+ } else {
+ entry->module_request->IsOneByteEqualTo(module_request);
+ }
+}
TEST(ModuleParsingInternals) {
i::Isolate* isolate = CcTest::i_isolate();
@@ -5774,7 +5887,19 @@ TEST(ModuleParsingInternals) {
"import n from 'n.js';"
"export { a as b } from 'm.js';"
"export * from 'p.js';"
- "import 'q.js'";
+ "export var foo;"
+ "export function goo() {};"
+ "export let hoo;"
+ "export const joo = 42;"
+ "export default (function koo() {});"
+ "import 'q.js';"
+ "let nonexport = 42;"
+ "import {m as mm} from 'm.js';"
+ "import {aa} from 'm.js';"
+ "export {aa as bb, x};"
+ "import * as loo from 'bar.js';"
+ "import * as foob from 'bar.js';"
+ "export {foob};";
i::Handle<i::String> source = factory->NewStringFromAsciiChecked(kSource);
i::Handle<i::Script> script = factory->NewScript(source);
i::Zone zone(CcTest::i_isolate()->allocator());
@@ -5784,41 +5909,142 @@ TEST(ModuleParsingInternals) {
CHECK(parser.Parse(&info));
CHECK(i::Compiler::Analyze(&info));
i::FunctionLiteral* func = info.literal();
- i::Scope* module_scope = func->scope();
+ i::ModuleScope* module_scope = func->scope()->AsModuleScope();
i::Scope* outer_scope = module_scope->outer_scope();
CHECK(outer_scope->is_script_scope());
CHECK_NULL(outer_scope->outer_scope());
CHECK(module_scope->is_module_scope());
- i::ModuleDescriptor* descriptor = module_scope->module();
- CHECK_NOT_NULL(descriptor);
- CHECK_EQ(1, descriptor->Length());
- const i::AstRawString* export_name =
- info.ast_value_factory()->GetOneByteString("y");
- const i::AstRawString* local_name =
- descriptor->LookupLocalExport(export_name, &zone);
- CHECK_NOT_NULL(local_name);
- CHECK(local_name->IsOneByteEqualTo("x"));
i::ZoneList<i::Declaration*>* declarations = module_scope->declarations();
- CHECK_EQ(3, declarations->length());
+ CHECK_EQ(13, declarations->length());
+
CHECK(declarations->at(0)->proxy()->raw_name()->IsOneByteEqualTo("x"));
- i::ImportDeclaration* import_decl =
- declarations->at(1)->AsImportDeclaration();
- CHECK(import_decl->import_name()->IsOneByteEqualTo("q"));
- CHECK(import_decl->proxy()->raw_name()->IsOneByteEqualTo("z"));
- CHECK(import_decl->module_specifier()->IsOneByteEqualTo("m.js"));
- import_decl = declarations->at(2)->AsImportDeclaration();
- CHECK(import_decl->import_name()->IsOneByteEqualTo("default"));
- CHECK(import_decl->proxy()->raw_name()->IsOneByteEqualTo("n"));
- CHECK(import_decl->module_specifier()->IsOneByteEqualTo("n.js"));
- // TODO(adamk): Add test for indirect exports once they're fully implemented.
- // TODO(adamk): Add test for star exports once they're fully implemented.
- const i::ZoneList<const i::AstRawString*>& requested_modules =
- descriptor->requested_modules();
- CHECK_EQ(4, requested_modules.length());
- CHECK(requested_modules[0]->IsOneByteEqualTo("m.js"));
- CHECK(requested_modules[1]->IsOneByteEqualTo("n.js"));
- CHECK(requested_modules[2]->IsOneByteEqualTo("p.js"));
- CHECK(requested_modules[3]->IsOneByteEqualTo("q.js"));
+ CHECK(declarations->at(0)->proxy()->var()->mode() == i::LET);
+ CHECK(declarations->at(0)->proxy()->var()->binding_needs_init());
+ CHECK(declarations->at(0)->proxy()->var()->location() ==
+ i::VariableLocation::MODULE);
+
+ CHECK(declarations->at(1)->proxy()->raw_name()->IsOneByteEqualTo("z"));
+ CHECK(declarations->at(1)->proxy()->var()->mode() == i::CONST);
+ CHECK(declarations->at(1)->proxy()->var()->binding_needs_init());
+ CHECK(declarations->at(1)->proxy()->var()->location() ==
+ i::VariableLocation::MODULE);
+
+ CHECK(declarations->at(2)->proxy()->raw_name()->IsOneByteEqualTo("n"));
+ CHECK(declarations->at(2)->proxy()->var()->mode() == i::CONST);
+ CHECK(declarations->at(2)->proxy()->var()->binding_needs_init());
+ CHECK(declarations->at(2)->proxy()->var()->location() ==
+ i::VariableLocation::MODULE);
+
+ CHECK(declarations->at(3)->proxy()->raw_name()->IsOneByteEqualTo("foo"));
+ CHECK(declarations->at(3)->proxy()->var()->mode() == i::VAR);
+ CHECK(!declarations->at(3)->proxy()->var()->binding_needs_init());
+ CHECK(declarations->at(3)->proxy()->var()->location() ==
+ i::VariableLocation::MODULE);
+
+ CHECK(declarations->at(4)->proxy()->raw_name()->IsOneByteEqualTo("goo"));
+ CHECK(declarations->at(4)->proxy()->var()->mode() == i::LET);
+ CHECK(!declarations->at(4)->proxy()->var()->binding_needs_init());
+ CHECK(declarations->at(4)->proxy()->var()->location() ==
+ i::VariableLocation::MODULE);
+
+ CHECK(declarations->at(5)->proxy()->raw_name()->IsOneByteEqualTo("hoo"));
+ CHECK(declarations->at(5)->proxy()->var()->mode() == i::LET);
+ CHECK(declarations->at(5)->proxy()->var()->binding_needs_init());
+ CHECK(declarations->at(5)->proxy()->var()->location() ==
+ i::VariableLocation::MODULE);
+
+ CHECK(declarations->at(6)->proxy()->raw_name()->IsOneByteEqualTo("joo"));
+ CHECK(declarations->at(6)->proxy()->var()->mode() == i::CONST);
+ CHECK(declarations->at(6)->proxy()->var()->binding_needs_init());
+ CHECK(declarations->at(6)->proxy()->var()->location() ==
+ i::VariableLocation::MODULE);
+
+ CHECK(
+ declarations->at(7)->proxy()->raw_name()->IsOneByteEqualTo("*default*"));
+ CHECK(declarations->at(7)->proxy()->var()->mode() == i::CONST);
+ CHECK(declarations->at(7)->proxy()->var()->binding_needs_init());
+ CHECK(declarations->at(7)->proxy()->var()->location() ==
+ i::VariableLocation::MODULE);
+
+ CHECK(
+ declarations->at(8)->proxy()->raw_name()->IsOneByteEqualTo("nonexport"));
+ CHECK(declarations->at(8)->proxy()->var()->binding_needs_init());
+ CHECK(declarations->at(8)->proxy()->var()->location() !=
+ i::VariableLocation::MODULE);
+
+ CHECK(declarations->at(9)->proxy()->raw_name()->IsOneByteEqualTo("mm"));
+ CHECK(declarations->at(9)->proxy()->var()->mode() == i::CONST);
+ CHECK(declarations->at(9)->proxy()->var()->binding_needs_init());
+ CHECK(declarations->at(9)->proxy()->var()->location() ==
+ i::VariableLocation::MODULE);
+
+ CHECK(declarations->at(10)->proxy()->raw_name()->IsOneByteEqualTo("aa"));
+ CHECK(declarations->at(10)->proxy()->var()->mode() == i::CONST);
+ CHECK(declarations->at(10)->proxy()->var()->binding_needs_init());
+ CHECK(declarations->at(10)->proxy()->var()->location() ==
+ i::VariableLocation::MODULE);
+
+ CHECK(declarations->at(11)->proxy()->raw_name()->IsOneByteEqualTo("loo"));
+ CHECK(declarations->at(11)->proxy()->var()->mode() == i::CONST);
+ CHECK(!declarations->at(11)->proxy()->var()->binding_needs_init());
+ CHECK(declarations->at(11)->proxy()->var()->location() !=
+ i::VariableLocation::MODULE);
+
+ CHECK(declarations->at(12)->proxy()->raw_name()->IsOneByteEqualTo("foob"));
+ CHECK(declarations->at(12)->proxy()->var()->mode() == i::CONST);
+ CHECK(!declarations->at(12)->proxy()->var()->binding_needs_init());
+ CHECK(declarations->at(12)->proxy()->var()->location() ==
+ i::VariableLocation::MODULE);
+
+ i::ModuleDescriptor* descriptor = module_scope->module();
+ CHECK_NOT_NULL(descriptor);
+
+ CHECK_EQ(11, descriptor->exports().length());
+ CheckModuleEntry(
+ descriptor->exports().at(0), "y", "x", nullptr, nullptr);
+ CheckModuleEntry(
+ descriptor->exports().at(1), "b", nullptr, "a", "m.js");
+ CheckModuleEntry(
+ descriptor->exports().at(2), nullptr, nullptr, nullptr, "p.js");
+ CheckModuleEntry(
+ descriptor->exports().at(3), "foo", "foo", nullptr, nullptr);
+ CheckModuleEntry(
+ descriptor->exports().at(4), "goo", "goo", nullptr, nullptr);
+ CheckModuleEntry(
+ descriptor->exports().at(5), "hoo", "hoo", nullptr, nullptr);
+ CheckModuleEntry(
+ descriptor->exports().at(6), "joo", "joo", nullptr, nullptr);
+ CheckModuleEntry(
+ descriptor->exports().at(7), "default", "*default*", nullptr, nullptr);
+ CheckModuleEntry(
+ descriptor->exports().at(8), "bb", nullptr, "aa", "m.js"); // !!!
+ CheckModuleEntry(
+ descriptor->exports().at(9), "x", "x", nullptr, nullptr);
+ CheckModuleEntry(
+ descriptor->exports().at(10), "foob", "foob", nullptr, nullptr);
+
+ CHECK_EQ(3, descriptor->special_imports().length());
+ CheckModuleEntry(
+ descriptor->special_imports().at(0), nullptr, nullptr, nullptr, "q.js");
+ CheckModuleEntry(
+ descriptor->special_imports().at(1), nullptr, "loo", nullptr, "bar.js");
+ CheckModuleEntry(
+ descriptor->special_imports().at(2), nullptr, "foob", nullptr, "bar.js");
+
+ CHECK_EQ(4, descriptor->regular_imports().size());
+ const i::ModuleDescriptor::ModuleEntry* entry;
+ entry = descriptor->regular_imports().find(
+ declarations->at(1)->proxy()->raw_name())->second;
+ CheckModuleEntry(entry, nullptr, "z", "q", "m.js");
+ entry = descriptor->regular_imports().find(
+ declarations->at(2)->proxy()->raw_name())->second;
+ CheckModuleEntry(entry, nullptr, "n", "default", "n.js");
+ entry = descriptor->regular_imports().find(
+ declarations->at(9)->proxy()->raw_name())->second;
+ CheckModuleEntry(entry, nullptr, "mm", "m", "m.js");
+ entry = descriptor->regular_imports().find(
+ declarations->at(10)->proxy()->raw_name())->second;
+ CheckModuleEntry(entry, nullptr, "aa", "aa", "m.js");
}
@@ -6043,9 +6269,47 @@ TEST(DestructuringPositiveTests) {
"[...rest]",
"[a,b,...rest]",
"[a,,...rest]",
+ "{ __proto__: x, __proto__: y}",
+ "{arguments: x}",
+ "{eval: x}",
NULL};
// clang-format on
RunParserSyncTest(context_data, data, kSuccess);
+
+ // v8:5201
+ // TODO(lpy): The two test sets below should be merged once
+ // we fix https://bugs.chromium.org/p/v8/issues/detail?id=4577
+ {
+ const char* sloppy_context_data1[][2] = {
+ {"var ", " = {};"},
+ {"function f(", ") {}"},
+ {"function f(argument1, ", ") {}"},
+ {"var f = (", ") => {};"},
+ {"var f = (argument1,", ") => {};"},
+ {"try {} catch(", ") {}"},
+ {NULL, NULL}
+ };
+ const char* data1[] = {
+ "{eval}",
+ "{x: eval}",
+ "{eval = false}",
+ NULL
+ };
+ RunParserSyncTest(sloppy_context_data1, data1, kSuccess);
+
+ const char* sloppy_context_data2[][2] = {
+ {"var ", " = {};"},
+ {"try {} catch(", ") {}"},
+ {NULL, NULL}
+ };
+ const char* data2[] = {
+ "{arguments}",
+ "{x: arguments}",
+ "{arguments = false}",
+ NULL,
+ };
+ RunParserSyncTest(sloppy_context_data2, data2, kSuccess);
+ }
}
@@ -6158,6 +6422,7 @@ TEST(DestructuringNegativeTests) {
{ // Strict mode.
const char* context_data[][2] = {
+ {"'use strict'; var ", " = {};"},
{"'use strict'; let ", " = {};"},
{"'use strict'; const ", " = {};"},
{"'use strict'; function f(", ") {}"},
@@ -6166,10 +6431,18 @@ TEST(DestructuringNegativeTests) {
// clang-format off
const char* data[] = {
+ "[arguments]",
"[eval]",
"{ a : arguments }",
+ "{ a : eval }",
"[public]",
"{ x : private }",
+ "{ x : arguments }",
+ "{ x : eval }",
+ "{ arguments }",
+ "{ eval }",
+ "{ arguments = false }"
+ "{ eval = false }",
NULL};
// clang-format on
RunParserSyncTest(context_data, data, kError);
@@ -6213,9 +6486,7 @@ TEST(DestructuringNegativeTests) {
"[ a ]",
NULL};
// clang-format on
- static const ParserFlag always_flags[] = {kAllowHarmonySloppyLet};
- RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kError);
}
}
@@ -6286,6 +6557,7 @@ TEST(DestructuringAssignmentPositiveTests) {
"{ x : [ foo()[y] = 10 ] = {} }",
"{ x : [ y.z = 10 ] = {} }",
"{ x : [ y[z] = 10 ] = {} }",
+ "{ z : { __proto__: x, __proto__: y } = z }"
"[ x ]",
"[ foo().x ]",
@@ -6405,6 +6677,8 @@ TEST(DestructuringAssignmentPositiveTests) {
"var x; (true ? { x = true } = {} : { x = false } = {})",
"var q, x; (q, { x = 10 } = {});",
"var { x = 10 } = { x = 20 } = {};",
+ "var { __proto__: x, __proto__: y } = {}",
+ "({ __proto__: x, __proto__: y } = {})",
"var { x = 10 } = (o = { x = 20 } = {});",
"var x; (({ x = 10 } = { x = 20 } = {}) => x)({})",
NULL,
@@ -6472,6 +6746,11 @@ TEST(DestructuringAssignmentNegativeTests) {
"[x, y, ...[z] = [1]]",
"[...[z] = [1]]",
+ "[...++x]",
+ "[...x--]",
+ "[...!x]",
+ "[...x + y]",
+
// v8:4657
"({ x: x4, x: (x+=1e4) })",
"(({ x: x4, x: (x+=1e4) }))",
@@ -6684,6 +6963,9 @@ TEST(DefaultParametersYieldInInitializers) {
// Arrow function within generator has the same rules.
{"'use strict'; (function *g() { (", ") => {} });"},
{"(function *g() { (", ") => {} });"},
+ // And similarly for arrow functions in the parameter list.
+ {"'use strict'; (function *g(z = (", ") => {}) { });"},
+ {"(function *g(z = (", ") => {}) { });"},
{NULL, NULL}
};
@@ -6819,16 +7101,10 @@ TEST(NewTarget) {
NULL
};
- static const ParserFlag always_flags[] = {
- kAllowHarmonyNewTarget,
- kAllowHarmonySloppy,
- };
// clang-format on
- RunParserSyncTest(good_context_data, data, kSuccess, NULL, 0, always_flags,
- arraysize(always_flags));
- RunParserSyncTest(bad_context_data, data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(good_context_data, data, kSuccess);
+ RunParserSyncTest(bad_context_data, data, kError);
}
@@ -6848,9 +7124,7 @@ TEST(ConstSloppy) {
NULL
};
// clang-format on
- static const ParserFlag always_flags[] = {kAllowHarmonySloppy};
- RunParserSyncTest(context_data, data, kSuccess, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kSuccess);
}
@@ -6873,10 +7147,7 @@ TEST(LetSloppy) {
};
// clang-format on
- static const ParserFlag always_flags[] = {kAllowHarmonySloppy,
- kAllowHarmonySloppyLet};
- RunParserSyncTest(context_data, data, kSuccess, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kSuccess);
}
@@ -6922,9 +7193,7 @@ TEST(LanguageModeDirectivesNonSimpleParameterListErrors) {
"{ initializedBindingPattern } = { initializedBindingPattern: true }",
NULL};
- static const ParserFlag always_flags[] = {kAllowHarmonySloppy};
- RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kError);
}
@@ -6952,10 +7221,7 @@ TEST(LetSloppyOnly) {
};
// clang-format on
- static const ParserFlag always_flags[] = {kAllowHarmonySloppy,
- kAllowHarmonySloppyLet};
- RunParserSyncTest(context_data, data, kSuccess, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kSuccess);
// Some things should be rejected even in sloppy mode
// This addresses BUG(v8:4403).
@@ -6989,10 +7255,7 @@ TEST(LetSloppyOnly) {
};
// clang-format on
- static const ParserFlag fail_flags[] = {kAllowHarmonySloppy,
- kAllowHarmonySloppyLet};
- RunParserSyncTest(context_data, fail_data, kError, NULL, 0, fail_flags,
- arraysize(fail_flags));
+ RunParserSyncTest(context_data, fail_data, kError);
}
@@ -7092,13 +7355,9 @@ TEST(EscapedKeywords) {
};
// clang-format on
- static const ParserFlag always_flags[] = {kAllowHarmonySloppy};
- RunParserSyncTest(sloppy_context_data, fail_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(strict_context_data, fail_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
- RunModuleParserSyncTest(sloppy_context_data, fail_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(sloppy_context_data, fail_data, kError);
+ RunParserSyncTest(strict_context_data, fail_data, kError);
+ RunModuleParserSyncTest(sloppy_context_data, fail_data, kError);
// clang-format off
const char* let_data[] = {
@@ -7109,15 +7368,8 @@ TEST(EscapedKeywords) {
};
// clang-format on
- RunParserSyncTest(sloppy_context_data, let_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(strict_context_data, let_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
-
- static const ParserFlag sloppy_let_flags[] = {kAllowHarmonySloppy,
- kAllowHarmonySloppyLet};
- RunParserSyncTest(sloppy_context_data, let_data, kSuccess, NULL, 0,
- sloppy_let_flags, arraysize(sloppy_let_flags));
+ RunParserSyncTest(sloppy_context_data, let_data, kSuccess);
+ RunParserSyncTest(strict_context_data, let_data, kError);
// Non-errors in sloppy mode
const char* valid_data[] = {"(\\u0069mplements = 1);",
@@ -7142,12 +7394,9 @@ TEST(EscapedKeywords) {
"var st\\u0061tic = 1;",
"var { st\\u0061tic } = {};",
NULL};
- RunParserSyncTest(sloppy_context_data, valid_data, kSuccess, NULL, 0,
- always_flags, arraysize(always_flags));
- RunParserSyncTest(strict_context_data, valid_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
- RunModuleParserSyncTest(strict_context_data, valid_data, kError, NULL, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(sloppy_context_data, valid_data, kSuccess);
+ RunParserSyncTest(strict_context_data, valid_data, kError);
+ RunModuleParserSyncTest(strict_context_data, valid_data, kError);
}
@@ -7167,7 +7416,7 @@ TEST(MiscSyntaxErrors) {
};
// clang-format on
- RunParserSyncTest(context_data, error_data, kError, NULL, 0, NULL, 0);
+ RunParserSyncTest(context_data, error_data, kError);
}
@@ -7196,7 +7445,7 @@ TEST(EscapeSequenceErrors) {
};
// clang-format on
- RunParserSyncTest(context_data, error_data, kError, NULL, 0, NULL, 0);
+ RunParserSyncTest(context_data, error_data, kError);
}
@@ -7281,6 +7530,13 @@ TEST(FunctionDeclarationError) {
"with ({}) label: function f() { };",
"if (true) label: function f() {}",
"if (true) {} else label: function f() {}",
+ "if (true) function* f() { }",
+ "label: function* f() { }",
+ // TODO(littledan, v8:4806): Ban duplicate generator declarations in
+ // a block, maybe by tracking whether a Variable is a generator declaration
+ // "{ function* f() {} function* f() {} }",
+ // "{ function f() {} function* f() {} }",
+ // "{ function* f() {} function f() {} }",
NULL
};
// Valid only in sloppy mode, with or without
@@ -7357,10 +7613,7 @@ TEST(ExponentiationOperator) {
};
// clang-format on
- static const ParserFlag always_flags[] = {
- kAllowHarmonyExponentiationOperator};
- RunParserSyncTest(context_data, data, kSuccess, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kSuccess);
}
TEST(ExponentiationOperatorErrors) {
@@ -7407,8 +7660,493 @@ TEST(ExponentiationOperatorErrors) {
};
// clang-format on
- static const ParserFlag always_flags[] = {
- kAllowHarmonyExponentiationOperator};
+ RunParserSyncTest(context_data, error_data, kError);
+}
+
+TEST(AsyncAwait) {
+ // clang-format off
+ const char* context_data[][2] = {
+ { "'use strict';", "" },
+ { "", "" },
+ { NULL, NULL }
+ };
+
+ const char* data[] = {
+ "var asyncFn = async function() { await 1; };",
+ "var asyncFn = async function withName() { await 1; };",
+ "var asyncFn = async () => await 'test';",
+ "var asyncFn = async x => await x + 'test';",
+ "async function asyncFn() { await 1; }",
+ "var O = { async method() { await 1; } }",
+ "var O = { async ['meth' + 'od']() { await 1; } }",
+ "var O = { async 'method'() { await 1; } }",
+ "var O = { async 0() { await 1; } }",
+ "async function await() {}",
+
+ "var asyncFn = async({ foo = 1 }) => foo;",
+ "var asyncFn = async({ foo = 1 } = {}) => foo;",
+ NULL
+ };
+ // clang-format on
+
+ static const ParserFlag always_flags[] = {kAllowHarmonyAsyncAwait};
+ RunParserSyncTest(context_data, data, kSuccess, NULL, 0, always_flags,
+ arraysize(always_flags));
+
+ // clang-format off
+ const char* async_body_context_data[][2] = {
+ { "async function f() {", "}" },
+ { "var f = async function() {", "}" },
+ { "var f = async() => {", "}" },
+ { "var O = { async method() {", "} }" },
+ { "'use strict'; async function f() {", "}" },
+ { "'use strict'; var f = async function() {", "}" },
+ { "'use strict'; var f = async() => {", "}" },
+ { "'use strict'; var O = { async method() {", "} }" },
+ { NULL, NULL }
+ };
+
+ const char* body_context_data[][2] = {
+ { "function f() {", "}" },
+ { "function* g() {", "}" },
+ { "var f = function() {", "}" },
+ { "var g = function*() {", "}" },
+ { "var O = { method() {", "} }" },
+ { "var O = { *method() {", "} }" },
+ { "var f = () => {", "}" },
+ { "'use strict'; function f() {", "}" },
+ { "'use strict'; function* g() {", "}" },
+ { "'use strict'; var f = function() {", "}" },
+ { "'use strict'; var g = function*() {", "}" },
+ { "'use strict'; var O = { method() {", "} }" },
+ { "'use strict'; var O = { *method() {", "} }" },
+ { "'use strict'; var f = () => {", "}" },
+ { NULL, NULL }
+ };
+
+ const char* body_data[] = {
+ "var async = 1; return async;",
+ "let async = 1; return async;",
+ "const async = 1; return async;",
+ "function async() {} return async();",
+ "var async = async => async; return async();",
+ "function foo() { var await = 1; return await; }",
+ "function foo(await) { return await; }",
+ "function* foo() { var await = 1; return await; }",
+ "function* foo(await) { return await; }",
+ "var f = () => { var await = 1; return await; }",
+ "var O = { method() { var await = 1; return await; } };",
+ "var O = { method(await) { return await; } };",
+ "var O = { *method() { var await = 1; return await; } };",
+ "var O = { *method(await) { return await; } };",
+
+ "(function await() {})",
+ NULL
+ };
+ // clang-format on
+
+ RunParserSyncTest(async_body_context_data, body_data, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
+ RunParserSyncTest(body_context_data, body_data, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+TEST(AsyncAwaitErrors) {
+ // clang-format off
+ const char* context_data[][2] = {
+ { "'use strict';", "" },
+ { "", "" },
+ { NULL, NULL }
+ };
+
+ const char* strict_context_data[][2] = {
+ { "'use strict';", "" },
+ { NULL, NULL }
+ };
+
+ const char* error_data[] = {
+ "var asyncFn = async function await() {};",
+ "var asyncFn = async () => var await = 'test';",
+ "var asyncFn = async await => await + 'test';",
+ "var asyncFn = async function(await) {};",
+ "var asyncFn = async (await) => 'test';",
+ "async function f(await) {}",
+
+ "var O = { async method(a, a) {} }",
+ "var O = { async ['meth' + 'od'](a, a) {} }",
+ "var O = { async 'method'(a, a) {} }",
+ "var O = { async 0(a, a) {} }",
+
+ "var f = async() => await;",
+
+ "var asyncFn = async function*() {}",
+ "async function* f() {}",
+ "var O = { *async method() {} };",
+ "var O = { async *method() {} };",
+ "var O = { async method*() {} };",
+
+ "var asyncFn = async function(x = await 1) { return x; }",
+ "async function f(x = await 1) { return x; }",
+ "var f = async(x = await 1) => x;",
+ "var O = { async method(x = await 1) { return x; } };",
+
+ "var f = async(x = await) => 1;",
+
+ "class C { async constructor() {} }",
+ "class C {}; class C2 extends C { async constructor() {} }",
+ "class C { static async prototype() {} }",
+ "class C {}; class C2 extends C { static async prototype() {} }",
+
+ "var f = async() => ((async(x = await 1) => x)();",
+
+ // Henrique Ferreiro's bug (tm)
+ "(async function foo1() { } foo2 => 1)",
+ "(async function foo3() { } () => 1)",
+ "(async function foo4() { } => 1)",
+ "(async function() { } foo5 => 1)",
+ "(async function() { } () => 1)",
+ "(async function() { } => 1)",
+ "(async.foo6 => 1)",
+ "(async.foo7 foo8 => 1)",
+ "(async.foo9 () => 1)",
+ "(async().foo10 => 1)",
+ "(async().foo11 foo12 => 1)",
+ "(async().foo13 () => 1)",
+ "(async['foo14'] => 1)",
+ "(async['foo15'] foo16 => 1)",
+ "(async['foo17'] () => 1)",
+ "(async()['foo18'] => 1)",
+ "(async()['foo19'] foo20 => 1)",
+ "(async()['foo21'] () => 1)",
+ "(async`foo22` => 1)",
+ "(async`foo23` foo24 => 1)",
+ "(async`foo25` () => 1)",
+ "(async`foo26`.bar27 => 1)",
+ "(async`foo28`.bar29 foo30 => 1)",
+ "(async`foo31`.bar32 () => 1)",
+
+ // v8:5148 assert that errors are still thrown for calls that may have been
+ // async functions
+ "async({ foo33 = 1 })",
+ NULL
+ };
+
+ const char* strict_error_data[] = {
+ "var O = { async method(eval) {} }",
+ "var O = { async ['meth' + 'od'](eval) {} }",
+ "var O = { async 'method'(eval) {} }",
+ "var O = { async 0(eval) {} }",
+
+ "var O = { async method(arguments) {} }",
+ "var O = { async ['meth' + 'od'](arguments) {} }",
+ "var O = { async 'method'(arguments) {} }",
+ "var O = { async 0(arguments) {} }",
+
+ "var O = { async method(dupe, dupe) {} }",
+
+ // TODO(caitp): preparser needs to report duplicate parameter errors, too.
+ // "var f = async(dupe, dupe) => {}",
+
+ NULL
+ };
+
+ const char* formal_parameters_data[] = {
+ "var f = async({ await }) => 1;",
+ "var f = async({ await = 1 }) => 1;",
+ "var f = async({ await } = {}) => 1;",
+ "var f = async({ await = 1 } = {}) => 1;",
+ "var f = async([await]) => 1;",
+ "var f = async([await] = []) => 1;",
+ "var f = async([await = 1]) => 1;",
+ "var f = async([await = 1] = []) => 1;",
+ "var f = async(...await) => 1;",
+ "var f = async(await) => 1;",
+ "var f = async(await = 1) => 1;",
+ "var f = async(...[await]) => 1;",
+
+ // v8:5190
+ "var f = async(1) => 1",
+ "var f = async('str') => 1",
+ "var f = async(/foo/) => 1",
+ "var f = async({ foo = async(1) => 1 }) => 1",
+ "var f = async({ foo = async(a) => 1 })",
+
+ NULL
+ };
+ // clang-format on
+
+ static const ParserFlag always_flags[] = {kAllowHarmonyAsyncAwait};
RunParserSyncTest(context_data, error_data, kError, NULL, 0, always_flags,
arraysize(always_flags));
+ RunParserSyncTest(strict_context_data, strict_error_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+
+ RunParserSyncTest(context_data, formal_parameters_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+
+ // clang-format off
+ const char* async_body_context_data[][2] = {
+ { "async function f() {", "}" },
+ { "var f = async function() {", "}" },
+ { "var f = async() => {", "}" },
+ { "var O = { async method() {", "} }" },
+ { "'use strict'; async function f() {", "}" },
+ { "'use strict'; var f = async function() {", "}" },
+ { "'use strict'; var f = async() => {", "}" },
+ { "'use strict'; var O = { async method() {", "} }" },
+ { NULL, NULL }
+ };
+
+ const char* async_body_error_data[] = {
+ "var await = 1;",
+ "var { await } = 1;",
+ "var [ await ] = 1;",
+ "return async (await) => {};",
+ "var O = { async [await](a, a) {} }",
+ "await;",
+
+ "function await() {}",
+
+ "var f = await => 42;",
+ "var f = (await) => 42;",
+ "var f = (await, a) => 42;",
+ "var f = (...await) => 42;",
+
+ "var e = (await);",
+ "var e = (await, f);",
+ "var e = (await = 42)",
+
+ "var e = [await];",
+ "var e = {await};",
+
+ NULL
+ };
+ // clang-format on
+
+ RunParserSyncTest(async_body_context_data, async_body_error_data, kError,
+ NULL, 0, always_flags, arraysize(always_flags));
+}
+
+TEST(AsyncAwaitModule) {
+ // clang-format off
+ const char* context_data[][2] = {
+ { "", "" },
+ { NULL, NULL }
+ };
+
+ const char* data[] = {
+ "export default async function() { await 1; }",
+ "export default async function async() { await 1; }",
+ "export async function async() { await 1; }",
+ NULL
+ };
+ // clang-format on
+
+ static const ParserFlag always_flags[] = {kAllowHarmonyAsyncAwait};
+ RunModuleParserSyncTest(context_data, data, kSuccess, NULL, 0, always_flags,
+ arraysize(always_flags), NULL, 0, false);
+}
+
+TEST(AsyncAwaitModuleErrors) {
+ // clang-format off
+ const char* context_data[][2] = {
+ { "", "" },
+ { NULL, NULL }
+ };
+
+ const char* error_data[] = {
+ "export default (async function await() {})",
+ "export default async function await() {}",
+ "export async function await() {}",
+ "export async function() {}",
+ "export async",
+ NULL
+ };
+ // clang-format on
+
+ static const ParserFlag always_flags[] = {kAllowHarmonyAsyncAwait};
+ RunModuleParserSyncTest(context_data, error_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags), NULL, 0,
+ false);
+}
+
+TEST(RestrictiveForInErrors) {
+ // clang-format off
+ const char* context_data[][2] = {
+ { "'use strict'", "" },
+ { "", "" },
+ { NULL, NULL }
+ };
+ const char* error_data[] = {
+ "for (var x = 0 in {});",
+ "for (const x = 0 in {});",
+ "for (let x = 0 in {});",
+ NULL
+ };
+ // clang-format on
+
+ static const ParserFlag always_flags[] = {kAllowHarmonyForIn};
+ RunParserSyncTest(context_data, error_data, kError, nullptr, 0, always_flags,
+ arraysize(always_flags));
+}
+
+TEST(NoDuplicateGeneratorsInBlock) {
+ const char* block_context_data[][2] = {
+ {"'use strict'; {", "}"},
+ {"{", "}"},
+ {"(function() { {", "} })()"},
+ {"(function() {'use strict'; {", "} })()"},
+ {NULL, NULL}};
+ const char* top_level_context_data[][2] = {
+ {"'use strict';", ""},
+ {"", ""},
+ {"(function() {", "})()"},
+ {"(function() {'use strict';", "})()"},
+ {NULL, NULL}};
+ const char* error_data[] = {"function* x() {} function* x() {}",
+ "function x() {} function* x() {}",
+ "function* x() {} function x() {}", NULL};
+ static const ParserFlag always_flags[] = {kAllowHarmonyRestrictiveGenerators};
+ // The preparser doesn't enforce the restriction, so turn it off.
+ bool test_preparser = false;
+ RunParserSyncTest(block_context_data, error_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags), NULL, 0, false,
+ test_preparser);
+ RunParserSyncTest(top_level_context_data, error_data, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+TEST(NoDuplicateAsyncFunctionInBlock) {
+ const char* block_context_data[][2] = {
+ {"'use strict'; {", "}"},
+ {"{", "}"},
+ {"(function() { {", "} })()"},
+ {"(function() {'use strict'; {", "} })()"},
+ {NULL, NULL}};
+ const char* top_level_context_data[][2] = {
+ {"'use strict';", ""},
+ {"", ""},
+ {"(function() {", "})()"},
+ {"(function() {'use strict';", "})()"},
+ {NULL, NULL}};
+ const char* error_data[] = {"async function x() {} async function x() {}",
+ "function x() {} async function x() {}",
+ "async function x() {} function x() {}",
+ "function* x() {} async function x() {}",
+ "function* x() {} async function x() {}",
+ "async function x() {} function* x() {}",
+ "function* x() {} async function x() {}",
+ NULL};
+ static const ParserFlag always_flags[] = {kAllowHarmonyAsyncAwait};
+ // The preparser doesn't enforce the restriction, so turn it off.
+ bool test_preparser = false;
+ RunParserSyncTest(block_context_data, error_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags), NULL, 0, false,
+ test_preparser);
+ RunParserSyncTest(top_level_context_data, error_data, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+TEST(TrailingCommasInParameters) {
+ // clang-format off
+ const char* context_data[][2] = {
+ { "", "" },
+ { "'use strict';", "" },
+ { "function foo() {", "}" },
+ { "function foo() {'use strict';", "}" },
+ { NULL, NULL }
+ };
+
+ const char* data[] = {
+ " function a(b,) {}",
+ " function* a(b,) {}",
+ "(function a(b,) {});",
+ "(function* a(b,) {});",
+ "(function (b,) {});",
+ "(function* (b,) {});",
+ " function a(b,c,d,) {}",
+ " function* a(b,c,d,) {}",
+ "(function a(b,c,d,) {});",
+ "(function* a(b,c,d,) {});",
+ "(function (b,c,d,) {});",
+ "(function* (b,c,d,) {});",
+ "(b,) => {};",
+ "(b,c,d,) => {};",
+ "a(1,);",
+ "a(1,2,3,);",
+ "a(...[],);",
+ "a(1, 2, ...[],);",
+ "a(...[], 2, ...[],);",
+ NULL
+ };
+ // clang-format on
+
+ static const ParserFlag always_flags[] = {kAllowHarmonyTrailingCommas};
+ RunParserSyncTest(context_data, data, kSuccess, NULL, 0, always_flags,
+ arraysize(always_flags));
+}
+
+TEST(TrailingCommasInParametersErrors) {
+ // clang-format off
+ const char* context_data[][2] = {
+ { "", "" },
+ { "'use strict';", "" },
+ { "function foo() {", "}" },
+ { "function foo() {'use strict';", "}" },
+ { NULL, NULL }
+ };
+
+ const char* data[] = {
+ // too many trailing commas
+ " function a(b,,) {}",
+ " function* a(b,,) {}",
+ "(function a(b,,) {});",
+ "(function* a(b,,) {});",
+ "(function (b,,) {});",
+ "(function* (b,,) {});",
+ " function a(b,c,d,,) {}",
+ " function* a(b,c,d,,) {}",
+ "(function a(b,c,d,,) {});",
+ "(function* a(b,c,d,,) {});",
+ "(function (b,c,d,,) {});",
+ "(function* (b,c,d,,) {});",
+ "(b,,) => {};",
+ "(b,c,d,,) => {};",
+ "a(1,,);",
+ "a(1,2,3,,);",
+ // only a trailing comma and no parameters
+ " function a1(,) {}",
+ " function* a2(,) {}",
+ "(function a3(,) {});",
+ "(function* a4(,) {});",
+ "(function (,) {});",
+ "(function* (,) {});",
+ "(,) => {};",
+ "a1(,);",
+ // no trailing commas after rest parameter declaration
+ " function a(...b,) {}",
+ " function* a(...b,) {}",
+ "(function a(...b,) {});",
+ "(function* a(...b,) {});",
+ "(function (...b,) {});",
+ "(function* (...b,) {});",
+ " function a(b, c, ...d,) {}",
+ " function* a(b, c, ...d,) {}",
+ "(function a(b, c, ...d,) {});",
+ "(function* a(b, c, ...d,) {});",
+ "(function (b, c, ...d,) {});",
+ "(function* (b, c, ...d,) {});",
+ "(...b,) => {};",
+ "(b, c, ...d,) => {};",
+ // parenthesized trailing comma without arrow is still an error
+ "(,);",
+ "(a,);",
+ "(a,b,c,);",
+ NULL
+ };
+ // clang-format on
+
+ static const ParserFlag always_flags[] = {kAllowHarmonyTrailingCommas};
+ RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
+ arraysize(always_flags));
}
diff --git a/deps/v8/test/cctest/test-platform.cc b/deps/v8/test/cctest/test-platform.cc
index 48eda3dbaa..a0fbc21f46 100644
--- a/deps/v8/test/cctest/test-platform.cc
+++ b/deps/v8/test/cctest/test-platform.cc
@@ -17,7 +17,7 @@ void GetStackPointer(const v8::FunctionCallbackInfo<v8::Value>& args) {
#elif V8_HOST_ARCH_IA32
__asm__ __volatile__("mov %%esp, %0" : "=g"(sp_addr));
#elif V8_HOST_ARCH_ARM
- __asm__ __volatile__("str %%sp, %0" : "=g"(sp_addr));
+ __asm__ __volatile__("str sp, %0" : "=g"(sp_addr));
#elif V8_HOST_ARCH_ARM64
__asm__ __volatile__("mov x16, sp; str x16, %0" : "=g"(sp_addr));
#elif V8_HOST_ARCH_MIPS
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index 48633f5da1..2a133bf1f8 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -51,17 +51,17 @@ TEST(ProfileNodeFindOrAddChild) {
CcTest::InitializeVM();
ProfileTree tree(CcTest::i_isolate());
ProfileNode* node = tree.root();
- CodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa");
+ CodeEntry entry1(i::CodeEventListener::FUNCTION_TAG, "aaa");
ProfileNode* childNode1 = node->FindOrAddChild(&entry1);
CHECK(childNode1);
CHECK_EQ(childNode1, node->FindOrAddChild(&entry1));
- CodeEntry entry2(i::Logger::FUNCTION_TAG, "bbb");
+ CodeEntry entry2(i::CodeEventListener::FUNCTION_TAG, "bbb");
ProfileNode* childNode2 = node->FindOrAddChild(&entry2);
CHECK(childNode2);
CHECK_NE(childNode1, childNode2);
CHECK_EQ(childNode1, node->FindOrAddChild(&entry1));
CHECK_EQ(childNode2, node->FindOrAddChild(&entry2));
- CodeEntry entry3(i::Logger::FUNCTION_TAG, "ccc");
+ CodeEntry entry3(i::CodeEventListener::FUNCTION_TAG, "ccc");
ProfileNode* childNode3 = node->FindOrAddChild(&entry3);
CHECK(childNode3);
CHECK_NE(childNode1, childNode3);
@@ -77,15 +77,15 @@ TEST(ProfileNodeFindOrAddChildForSameFunction) {
const char* aaa = "aaa";
ProfileTree tree(CcTest::i_isolate());
ProfileNode* node = tree.root();
- CodeEntry entry1(i::Logger::FUNCTION_TAG, aaa);
+ CodeEntry entry1(i::CodeEventListener::FUNCTION_TAG, aaa);
ProfileNode* childNode1 = node->FindOrAddChild(&entry1);
CHECK(childNode1);
CHECK_EQ(childNode1, node->FindOrAddChild(&entry1));
// The same function again.
- CodeEntry entry2(i::Logger::FUNCTION_TAG, aaa);
+ CodeEntry entry2(i::CodeEventListener::FUNCTION_TAG, aaa);
CHECK_EQ(childNode1, node->FindOrAddChild(&entry2));
// Now with a different security token.
- CodeEntry entry3(i::Logger::FUNCTION_TAG, aaa);
+ CodeEntry entry3(i::CodeEventListener::FUNCTION_TAG, aaa);
CHECK_EQ(childNode1, node->FindOrAddChild(&entry3));
}
@@ -122,9 +122,9 @@ class ProfileTreeTestHelper {
TEST(ProfileTreeAddPathFromEnd) {
CcTest::InitializeVM();
- CodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa");
- CodeEntry entry2(i::Logger::FUNCTION_TAG, "bbb");
- CodeEntry entry3(i::Logger::FUNCTION_TAG, "ccc");
+ CodeEntry entry1(i::CodeEventListener::FUNCTION_TAG, "aaa");
+ CodeEntry entry2(i::CodeEventListener::FUNCTION_TAG, "bbb");
+ CodeEntry entry3(i::CodeEventListener::FUNCTION_TAG, "ccc");
ProfileTree tree(CcTest::i_isolate());
ProfileTreeTestHelper helper(&tree);
CHECK(!helper.Walk(&entry1));
@@ -187,7 +187,7 @@ TEST(ProfileTreeCalculateTotalTicks) {
empty_tree.root()->IncrementSelfTicks();
CHECK_EQ(1u, empty_tree.root()->self_ticks());
- CodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa");
+ CodeEntry entry1(i::CodeEventListener::FUNCTION_TAG, "aaa");
CodeEntry* e1_path[] = {&entry1};
std::vector<CodeEntry*> e1_path_vec(e1_path, e1_path + arraysize(e1_path));
@@ -201,7 +201,7 @@ TEST(ProfileTreeCalculateTotalTicks) {
CHECK_EQ(1u, single_child_tree.root()->self_ticks());
CHECK_EQ(1u, node1->self_ticks());
- CodeEntry entry2(i::Logger::FUNCTION_TAG, "bbb");
+ CodeEntry entry2(i::CodeEventListener::FUNCTION_TAG, "bbb");
CodeEntry* e2_e1_path[] = {&entry2, &entry1};
std::vector<CodeEntry*> e2_e1_path_vec(e2_e1_path,
e2_e1_path + arraysize(e2_e1_path));
@@ -227,7 +227,7 @@ TEST(ProfileTreeCalculateTotalTicks) {
CodeEntry* e2_path[] = {&entry2};
std::vector<CodeEntry*> e2_path_vec(e2_path, e2_path + arraysize(e2_path));
- CodeEntry entry3(i::Logger::FUNCTION_TAG, "ccc");
+ CodeEntry entry3(i::CodeEventListener::FUNCTION_TAG, "ccc");
CodeEntry* e3_path[] = {&entry3};
std::vector<CodeEntry*> e3_path_vec(e3_path, e3_path + arraysize(e3_path));
@@ -277,10 +277,10 @@ static inline i::Address ToAddress(int n) {
TEST(CodeMapAddCode) {
CodeMap code_map;
- CodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa");
- CodeEntry entry2(i::Logger::FUNCTION_TAG, "bbb");
- CodeEntry entry3(i::Logger::FUNCTION_TAG, "ccc");
- CodeEntry entry4(i::Logger::FUNCTION_TAG, "ddd");
+ CodeEntry entry1(i::CodeEventListener::FUNCTION_TAG, "aaa");
+ CodeEntry entry2(i::CodeEventListener::FUNCTION_TAG, "bbb");
+ CodeEntry entry3(i::CodeEventListener::FUNCTION_TAG, "ccc");
+ CodeEntry entry4(i::CodeEventListener::FUNCTION_TAG, "ddd");
code_map.AddCode(ToAddress(0x1500), &entry1, 0x200);
code_map.AddCode(ToAddress(0x1700), &entry2, 0x100);
code_map.AddCode(ToAddress(0x1900), &entry3, 0x50);
@@ -307,8 +307,8 @@ TEST(CodeMapAddCode) {
TEST(CodeMapMoveAndDeleteCode) {
CodeMap code_map;
- CodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa");
- CodeEntry entry2(i::Logger::FUNCTION_TAG, "bbb");
+ CodeEntry entry1(i::CodeEventListener::FUNCTION_TAG, "aaa");
+ CodeEntry entry2(i::CodeEventListener::FUNCTION_TAG, "bbb");
code_map.AddCode(ToAddress(0x1500), &entry1, 0x200);
code_map.AddCode(ToAddress(0x1700), &entry2, 0x100);
CHECK_EQ(&entry1, code_map.FindEntry(ToAddress(0x1500)));
@@ -316,7 +316,7 @@ TEST(CodeMapMoveAndDeleteCode) {
code_map.MoveCode(ToAddress(0x1500), ToAddress(0x1700)); // Deprecate bbb.
CHECK(!code_map.FindEntry(ToAddress(0x1500)));
CHECK_EQ(&entry1, code_map.FindEntry(ToAddress(0x1700)));
- CodeEntry entry3(i::Logger::FUNCTION_TAG, "ccc");
+ CodeEntry entry3(i::CodeEventListener::FUNCTION_TAG, "ccc");
code_map.AddCode(ToAddress(0x1750), &entry3, 0x100);
CHECK(!code_map.FindEntry(ToAddress(0x1700)));
CHECK_EQ(&entry3, code_map.FindEntry(ToAddress(0x1750)));
@@ -344,12 +344,14 @@ class TestSetup {
TEST(RecordTickSample) {
TestSetup test_setup;
- CpuProfilesCollection profiles(CcTest::heap());
+ CpuProfilesCollection profiles(CcTest::i_isolate());
+ CpuProfiler profiler(CcTest::i_isolate());
+ profiles.set_cpu_profiler(&profiler);
profiles.StartProfiling("", false);
ProfileGenerator generator(&profiles);
- CodeEntry* entry1 = profiles.NewCodeEntry(i::Logger::FUNCTION_TAG, "aaa");
- CodeEntry* entry2 = profiles.NewCodeEntry(i::Logger::FUNCTION_TAG, "bbb");
- CodeEntry* entry3 = profiles.NewCodeEntry(i::Logger::FUNCTION_TAG, "ccc");
+ CodeEntry* entry1 = new CodeEntry(i::Logger::FUNCTION_TAG, "aaa");
+ CodeEntry* entry2 = new CodeEntry(i::Logger::FUNCTION_TAG, "bbb");
+ CodeEntry* entry3 = new CodeEntry(i::Logger::FUNCTION_TAG, "ccc");
generator.code_map()->AddCode(ToAddress(0x1500), entry1, 0x200);
generator.code_map()->AddCode(ToAddress(0x1700), entry2, 0x100);
generator.code_map()->AddCode(ToAddress(0x1900), entry3, 0x50);
@@ -397,6 +399,10 @@ TEST(RecordTickSample) {
ProfileNode* node4 = top_down_test_helper.Walk(entry1, entry3, entry1);
CHECK(node4);
CHECK_EQ(entry1, node4->entry());
+
+ delete entry1;
+ delete entry2;
+ delete entry3;
}
@@ -410,12 +416,14 @@ static void CheckNodeIds(ProfileNode* node, unsigned* expectedId) {
TEST(SampleIds) {
TestSetup test_setup;
- CpuProfilesCollection profiles(CcTest::heap());
+ CpuProfilesCollection profiles(CcTest::i_isolate());
+ CpuProfiler profiler(CcTest::i_isolate());
+ profiles.set_cpu_profiler(&profiler);
profiles.StartProfiling("", true);
ProfileGenerator generator(&profiles);
- CodeEntry* entry1 = profiles.NewCodeEntry(i::Logger::FUNCTION_TAG, "aaa");
- CodeEntry* entry2 = profiles.NewCodeEntry(i::Logger::FUNCTION_TAG, "bbb");
- CodeEntry* entry3 = profiles.NewCodeEntry(i::Logger::FUNCTION_TAG, "ccc");
+ CodeEntry* entry1 = new CodeEntry(i::Logger::FUNCTION_TAG, "aaa");
+ CodeEntry* entry2 = new CodeEntry(i::Logger::FUNCTION_TAG, "bbb");
+ CodeEntry* entry3 = new CodeEntry(i::Logger::FUNCTION_TAG, "ccc");
generator.code_map()->AddCode(ToAddress(0x1500), entry1, 0x200);
generator.code_map()->AddCode(ToAddress(0x1700), entry2, 0x100);
generator.code_map()->AddCode(ToAddress(0x1900), entry3, 0x50);
@@ -456,15 +464,21 @@ TEST(SampleIds) {
for (int i = 0; i < 3; i++) {
CHECK_EQ(expected_id[i], profile->sample(i)->id());
}
+
+ delete entry1;
+ delete entry2;
+ delete entry3;
}
TEST(NoSamples) {
TestSetup test_setup;
- CpuProfilesCollection profiles(CcTest::heap());
+ CpuProfilesCollection profiles(CcTest::i_isolate());
+ CpuProfiler profiler(CcTest::i_isolate());
+ profiles.set_cpu_profiler(&profiler);
profiles.StartProfiling("", false);
ProfileGenerator generator(&profiles);
- CodeEntry* entry1 = profiles.NewCodeEntry(i::Logger::FUNCTION_TAG, "aaa");
+ CodeEntry* entry1 = new CodeEntry(i::Logger::FUNCTION_TAG, "aaa");
generator.code_map()->AddCode(ToAddress(0x1500), entry1, 0x200);
// We are building the following calls tree:
@@ -481,6 +495,8 @@ TEST(NoSamples) {
CHECK_EQ(3u, nodeId - 1);
CHECK_EQ(0, profile->samples_count());
+
+ delete entry1;
}
@@ -503,17 +519,18 @@ TEST(RecordStackTraceAtStartProfiling) {
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
v8::Context::Scope context_scope(env);
+ std::unique_ptr<i::CpuProfiler> iprofiler(
+ new i::CpuProfiler(CcTest::i_isolate()));
+ i::ProfilerExtension::set_profiler(iprofiler.get());
- CpuProfiler* profiler = CcTest::i_isolate()->cpu_profiler();
- CHECK_EQ(0, profiler->GetProfilesCount());
CompileRun(
"function c() { startProfiling(); }\n"
"function b() { c(); }\n"
"function a() { b(); }\n"
"a();\n"
"stopProfiling();");
- CHECK_EQ(1, profiler->GetProfilesCount());
- CpuProfile* profile = profiler->GetProfile(0);
+ CHECK_EQ(1, iprofiler->GetProfilesCount());
+ CpuProfile* profile = iprofiler->GetProfile(0);
const ProfileTree* topDown = profile->top_down();
const ProfileNode* current = topDown->root();
const_cast<ProfileNode*>(current)->Print(0);
@@ -544,7 +561,9 @@ TEST(RecordStackTraceAtStartProfiling) {
TEST(Issue51919) {
- CpuProfilesCollection collection(CcTest::heap());
+ CpuProfilesCollection collection(CcTest::i_isolate());
+ CpuProfiler profiler(CcTest::i_isolate());
+ collection.set_cpu_profiler(&profiler);
i::EmbeddedVector<char*,
CpuProfilesCollection::kMaxSimultaneousProfiles> titles;
for (int i = 0; i < CpuProfilesCollection::kMaxSimultaneousProfiles; ++i) {
@@ -579,10 +598,9 @@ TEST(ProfileNodeScriptId) {
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
v8::Context::Scope context_scope(env);
+ std::unique_ptr<CpuProfiler> iprofiler(new CpuProfiler(CcTest::i_isolate()));
+ i::ProfilerExtension::set_profiler(iprofiler.get());
- v8::CpuProfiler* profiler = env->GetIsolate()->GetCpuProfiler();
- i::CpuProfiler* iprofiler = reinterpret_cast<i::CpuProfiler*>(profiler);
- CHECK_EQ(0, iprofiler->GetProfilesCount());
v8::Local<v8::Script> script_a =
v8_compile(v8_str("function a() { startProfiling(); }\n"));
script_a->Run(v8::Isolate::GetCurrent()->GetCurrentContext())
@@ -618,16 +636,12 @@ TEST(ProfileNodeScriptId) {
CHECK_EQ(script_a->GetUnboundScript()->GetId(), current->GetScriptId());
}
-
-
-
static const char* line_number_test_source_existing_functions =
"function foo_at_the_first_line() {\n"
"}\n"
"foo_at_the_first_line();\n"
"function lazy_func_at_forth_line() {}\n";
-
static const char* line_number_test_source_profile_time_functions =
"// Empty first line\n"
"function bar_at_the_second_line() {\n"
@@ -636,23 +650,18 @@ static const char* line_number_test_source_profile_time_functions =
"bar_at_the_second_line();\n"
"function lazy_func_at_6th_line() {}";
-int GetFunctionLineNumber(LocalContext* env, const char* name) {
- CpuProfiler* profiler = CcTest::i_isolate()->cpu_profiler();
- CodeMap* code_map = profiler->generator()->code_map();
+int GetFunctionLineNumber(CpuProfiler& profiler, LocalContext& env,
+ const char* name) {
+ CodeMap* code_map = profiler.generator()->code_map();
i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
- (*(*env))
- ->Global()
- ->Get(v8::Isolate::GetCurrent()->GetCurrentContext(),
- v8_str(name))
- .ToLocalChecked())));
+ env->Global()->Get(env.local(), v8_str(name)).ToLocalChecked())));
CodeEntry* func_entry = code_map->FindEntry(func->abstract_code()->address());
if (!func_entry)
FATAL(name);
return func_entry->line_number();
}
-
TEST(LineNumber) {
i::FLAG_use_inlining = false;
@@ -665,45 +674,38 @@ TEST(LineNumber) {
CompileRun(line_number_test_source_existing_functions);
- CpuProfiler* profiler = isolate->cpu_profiler();
- profiler->StartProfiling("LineNumber");
+ CpuProfiler profiler(isolate);
+ profiler.StartProfiling("LineNumber");
CompileRun(line_number_test_source_profile_time_functions);
- profiler->processor()->StopSynchronously();
+ profiler.processor()->StopSynchronously();
bool is_lazy = i::FLAG_lazy && !(i::FLAG_ignition && i::FLAG_ignition_eager);
- CHECK_EQ(1, GetFunctionLineNumber(&env, "foo_at_the_first_line"));
+ CHECK_EQ(1, GetFunctionLineNumber(profiler, env, "foo_at_the_first_line"));
CHECK_EQ(is_lazy ? 0 : 4,
- GetFunctionLineNumber(&env, "lazy_func_at_forth_line"));
- CHECK_EQ(2, GetFunctionLineNumber(&env, "bar_at_the_second_line"));
+ GetFunctionLineNumber(profiler, env, "lazy_func_at_forth_line"));
+ CHECK_EQ(2, GetFunctionLineNumber(profiler, env, "bar_at_the_second_line"));
CHECK_EQ(is_lazy ? 0 : 6,
- GetFunctionLineNumber(&env, "lazy_func_at_6th_line"));
+ GetFunctionLineNumber(profiler, env, "lazy_func_at_6th_line"));
- profiler->StopProfiling("LineNumber");
+ profiler.StopProfiling("LineNumber");
}
-
-
TEST(BailoutReason) {
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
v8::Context::Scope context_scope(env);
+ std::unique_ptr<CpuProfiler> iprofiler(new CpuProfiler(CcTest::i_isolate()));
+ i::ProfilerExtension::set_profiler(iprofiler.get());
- v8::CpuProfiler* profiler = env->GetIsolate()->GetCpuProfiler();
- i::CpuProfiler* iprofiler = reinterpret_cast<i::CpuProfiler*>(profiler);
CHECK_EQ(0, iprofiler->GetProfilesCount());
v8::Local<v8::Script> script =
v8_compile(v8_str("function Debugger() {\n"
" debugger;\n"
" startProfiling();\n"
"}\n"
- "function TryFinally() {\n"
- " try {\n"
- " Debugger();\n"
- " } finally { };\n"
- "}\n"
- "TryFinally();\n"
+ "Debugger();\n"
"stopProfiling();"));
script->Run(v8::Isolate::GetCurrent()->GetCurrentContext()).ToLocalChecked();
CHECK_EQ(1, iprofiler->GetProfilesCount());
@@ -715,15 +717,10 @@ TEST(BailoutReason) {
// The tree should look like this:
// (root)
// ""
- // kTryFinallyStatement
- // kDebuggerStatement
+ // kDebuggerStatement
current = PickChild(current, "");
CHECK(const_cast<v8::CpuProfileNode*>(current));
- current = PickChild(current, "TryFinally");
- CHECK(const_cast<v8::CpuProfileNode*>(current));
- CHECK(!strcmp("TryFinallyStatement", current->GetBailoutReason()));
-
current = PickChild(current, "Debugger");
CHECK(const_cast<v8::CpuProfileNode*>(current));
CHECK(!strcmp("DebuggerStatement", current->GetBailoutReason()));
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index 0a153b78e0..504a52bc28 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <cstdlib>
+#include <memory>
#include <sstream>
#include "include/v8.h"
@@ -171,7 +172,6 @@ static MinMaxPair CheckMinMaxMatch(const char* input) {
void TestRegExpParser(bool lookbehind) {
FLAG_harmony_regexp_lookbehind = lookbehind;
- FLAG_harmony_unicode_regexps = true;
CHECK_PARSE_ERROR("?");
@@ -438,6 +438,23 @@ void TestRegExpParser(bool lookbehind) {
CHECK_MIN_MAX("a(?=b)c", 2, 2);
CHECK_MIN_MAX("a(?=bbb|bb)c", 2, 2);
CHECK_MIN_MAX("a(?!bbb|bb)c", 2, 2);
+
+ FLAG_harmony_regexp_named_captures = true;
+ CheckParseEq("(?<a>x)(?<b>x)(?<c>x)\\k<a>",
+ "(: (^ 'x') (^ 'x') (^ 'x') (<- 1))", true);
+ CheckParseEq("(?<a>x)(?<b>x)(?<c>x)\\k<b>",
+ "(: (^ 'x') (^ 'x') (^ 'x') (<- 2))", true);
+ CheckParseEq("(?<a>x)(?<b>x)(?<c>x)\\k<c>",
+ "(: (^ 'x') (^ 'x') (^ 'x') (<- 3))", true);
+ CheckParseEq("(?<a>a)\\k<a>", "(: (^ 'a') (<- 1))", true);
+ CheckParseEq("(?<a>a\\k<a>)", "(^ 'a')", true);
+ CheckParseEq("(?<a>\\k<a>a)", "(^ 'a')", true);
+ CheckParseEq("(?<a>\\k<b>)(?<b>\\k<a>)", "(: (^ (<- 2)) (^ (<- 1)))", true);
+ CheckParseEq("\\k<a>(?<a>a)", "(: (<- 1) (^ 'a'))", true);
+
+ CheckParseEq("(?<\\u{03C0}>a)", "(^ 'a')", true);
+ CheckParseEq("(?<\\u03C0>a)", "(^ 'a')", true);
+ FLAG_harmony_regexp_named_captures = false;
}
@@ -450,7 +467,6 @@ TEST(ParserWithoutLookbehind) {
TestRegExpParser(true); // Lookbehind enabled.
}
-
TEST(ParserRegression) {
CheckParseEq("[A-Z$-][x]", "(! [A-Z $ -] [x])");
CheckParseEq("a{3,4*}", "(: 'a{3,' (# 0 - g '4') '}')");
@@ -458,17 +474,19 @@ TEST(ParserRegression) {
CheckParseEq("a|", "(| 'a' %)");
}
-static void ExpectError(const char* input,
- const char* expected) {
+static void ExpectError(const char* input, const char* expected,
+ bool unicode = false) {
v8::HandleScope scope(CcTest::isolate());
Zone zone(CcTest::i_isolate()->allocator());
FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
RegExpCompileData result;
- CHECK(!v8::internal::RegExpParser::ParseRegExp(
- CcTest::i_isolate(), &zone, &reader, JSRegExp::kNone, &result));
+ JSRegExp::Flags flags = JSRegExp::kNone;
+ if (unicode) flags |= JSRegExp::kUnicode;
+ CHECK(!v8::internal::RegExpParser::ParseRegExp(CcTest::i_isolate(), &zone,
+ &reader, flags, &result));
CHECK(result.tree == NULL);
CHECK(!result.error.is_null());
- v8::base::SmartArrayPointer<char> str = result.error->ToCString(ALLOW_NULLS);
+ std::unique_ptr<char[]> str = result.error->ToCString(ALLOW_NULLS);
CHECK_EQ(0, strcmp(expected, str.get()));
}
@@ -499,6 +517,23 @@ TEST(Errors) {
os << "()";
}
ExpectError(os.str().c_str(), kTooManyCaptures);
+
+ FLAG_harmony_regexp_named_captures = true;
+ const char* kInvalidCaptureName = "Invalid capture group name";
+ ExpectError("(?<>.)", kInvalidCaptureName, true);
+ ExpectError("(?<1>.)", kInvalidCaptureName, true);
+ ExpectError("(?<_%>.)", kInvalidCaptureName, true);
+ ExpectError("\\k<a", kInvalidCaptureName, true);
+ const char* kDuplicateCaptureName = "Duplicate capture group name";
+ ExpectError("(?<a>.)(?<a>.)", kDuplicateCaptureName, true);
+ const char* kInvalidUnicodeEscape = "Invalid Unicode escape sequence";
+ ExpectError("(?<\\u{FISK}", kInvalidUnicodeEscape, true);
+ const char* kInvalidCaptureReferenced = "Invalid named capture referenced";
+ ExpectError("\\k<a>", kInvalidCaptureReferenced, true);
+ ExpectError("(?<b>)\\k<a>", kInvalidCaptureReferenced, true);
+ const char* kInvalidNamedReference = "Invalid named reference";
+ ExpectError("\\ka", kInvalidNamedReference, true);
+ FLAG_harmony_regexp_named_captures = false;
}
diff --git a/deps/v8/test/cctest/test-reloc-info.cc b/deps/v8/test/cctest/test-reloc-info.cc
deleted file mode 100644
index 4346f0083e..0000000000
--- a/deps/v8/test/cctest/test-reloc-info.cc
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "src/assembler.h"
-#include "src/macro-assembler.h"
-#include "test/cctest/cctest.h"
-
-namespace v8 {
-namespace internal {
-
-static void WriteRinfo(RelocInfoWriter* writer,
- byte* pc, RelocInfo::Mode mode, intptr_t data) {
- RelocInfo rinfo(CcTest::i_isolate(), pc, mode, data, NULL);
- writer->Write(&rinfo);
-}
-
-
-// Tests that writing both types of positions and then reading either
-// or both works as expected.
-TEST(Positions) {
- CcTest::InitializeVM();
- const int code_size = 10 * KB;
- int relocation_info_size = 10 * KB;
- const int buffer_size = code_size + relocation_info_size;
- v8::base::SmartArrayPointer<byte> buffer(new byte[buffer_size]);
-
- byte* pc = buffer.get();
- byte* buffer_end = buffer.get() + buffer_size;
-
- RelocInfoWriter writer(buffer_end, pc);
- byte* relocation_info_end = buffer_end - relocation_info_size;
- for (int i = 0, pos = 0; i < 100; i++, pc += i, pos += i) {
- RelocInfo::Mode mode = (i % 2 == 0) ?
- RelocInfo::STATEMENT_POSITION : RelocInfo::POSITION;
- if (mode == RelocInfo::STATEMENT_POSITION) {
- printf("TEST WRITING STATEMENT %p %d\n", pc, pos);
- } else {
- printf("TEST WRITING POSITION %p %d\n", pc, pos);
- }
- WriteRinfo(&writer, pc, mode, pos);
- CHECK(writer.pos() - RelocInfoWriter::kMaxSize >= relocation_info_end);
- }
-
- writer.Finish();
- relocation_info_size = static_cast<int>(buffer_end - writer.pos());
- MacroAssembler assm(CcTest::i_isolate(), nullptr, 0, CodeObjectRequired::kNo);
- CodeDesc desc = {buffer.get(), buffer_size, code_size,
- relocation_info_size, 0, &assm};
-
- // Read only (non-statement) positions.
- {
- RelocIterator it(desc, RelocInfo::ModeMask(RelocInfo::POSITION));
- pc = buffer.get();
- for (int i = 0, pos = 0; i < 100; i++, pc += i, pos += i) {
- printf("TESTING 1: %d\n", i);
- RelocInfo::Mode mode = (i % 2 == 0) ?
- RelocInfo::STATEMENT_POSITION : RelocInfo::POSITION;
- if (mode == RelocInfo::POSITION) {
- CHECK_EQ(pc, it.rinfo()->pc());
- CHECK_EQ(mode, it.rinfo()->rmode());
- CHECK_EQ(pos, static_cast<int>(it.rinfo()->data()));
- it.next();
- }
- }
- CHECK(it.done());
- }
-
- // Read only statement positions.
- {
- RelocIterator it(desc, RelocInfo::ModeMask(RelocInfo::STATEMENT_POSITION));
- pc = buffer.get();
- for (int i = 0, pos = 0; i < 100; i++, pc += i, pos += i) {
- RelocInfo::Mode mode = (i % 2 == 0) ?
- RelocInfo::STATEMENT_POSITION : RelocInfo::POSITION;
- if (mode == RelocInfo::STATEMENT_POSITION) {
- CHECK_EQ(pc, it.rinfo()->pc());
- CHECK_EQ(mode, it.rinfo()->rmode());
- CHECK_EQ(pos, static_cast<int>(it.rinfo()->data()));
- it.next();
- }
- }
- CHECK(it.done());
- }
-
- // Read both types of positions.
- {
- RelocIterator it(desc, RelocInfo::kPositionMask);
- pc = buffer.get();
- for (int i = 0, pos = 0; i < 100; i++, pc += i, pos += i) {
- RelocInfo::Mode mode = (i % 2 == 0) ?
- RelocInfo::STATEMENT_POSITION : RelocInfo::POSITION;
- CHECK_EQ(pc, it.rinfo()->pc());
- CHECK_EQ(mode, it.rinfo()->rmode());
- CHECK_EQ(pos, static_cast<int>(it.rinfo()->data()));
- it.next();
- }
- CHECK(it.done());
- }
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-arm.cc b/deps/v8/test/cctest/test-run-wasm-relocation-arm.cc
index adfeb28b2f..b2e6ec3a3b 100644
--- a/deps/v8/test/cctest/test-run-wasm-relocation-arm.cc
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-arm.cc
@@ -23,8 +23,7 @@ using namespace v8::internal::compiler;
static int32_t DummyStaticFunction(Object* result) { return 1; }
-TEST(WasmRelocationArm) {
- CcTest::InitializeVM();
+TEST(WasmRelocationArmMemoryReference) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
v8::internal::byte buffer[4096];
@@ -77,4 +76,59 @@ TEST(WasmRelocationArm) {
#endif
}
+TEST(WasmRelocationArmMemorySizeReference) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ v8::internal::byte buffer[4096];
+ DummyStaticFunction(NULL);
+ int32_t size = 512;
+ Label fail;
+
+ Assembler assm(isolate, buffer, sizeof buffer);
+
+ __ mov(r0, Operand(size, RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
+ __ cmp(r0, Operand(size, RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
+ __ b(ne, &fail);
+ __ mov(pc, Operand(lr));
+ __ bind(&fail);
+ __ mov(r0, Operand(0xdeadbeef));
+ __ mov(pc, Operand(lr));
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ CSignature0<int32_t> csig;
+ CodeRunner<int32_t> runnable(isolate, code, &csig);
+ int32_t ret_value = runnable.Call();
+ CHECK_NE(ret_value, 0xdeadbeef);
+
+#ifdef DEBUG
+ OFStream os(stdout);
+ code->Print(os);
+ ::printf("f() = %d\n\n", ret_value);
+#endif
+ size_t diff = 512;
+
+ int mode_mask = (1 << RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (RelocInfo::IsWasmMemorySizeReference(mode)) {
+ it.rinfo()->update_wasm_memory_reference(
+ reinterpret_cast<Address>(1234), reinterpret_cast<Address>(1234),
+ it.rinfo()->wasm_memory_size_reference(),
+ it.rinfo()->wasm_memory_size_reference() + diff, SKIP_ICACHE_FLUSH);
+ }
+ }
+
+ ret_value = runnable.Call();
+ CHECK_NE(ret_value, 0xdeadbeef);
+
+#ifdef DEBUG
+ code->Print(os);
+ ::printf("f() = %d\n\n", ret_value);
+#endif
+}
#undef __
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc b/deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc
index 48f9e85ba1..3b49f00afc 100644
--- a/deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc
@@ -24,8 +24,7 @@ using namespace v8::internal::compiler;
static int64_t DummyStaticFunction(Object* result) { return 1; }
-TEST(WasmRelocationArm64) {
- CcTest::InitializeVM();
+TEST(WasmRelocationArm64MemoryReference) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
v8::internal::byte buffer[4096];
@@ -51,7 +50,7 @@ TEST(WasmRelocationArm64) {
#ifdef DEBUG
OFStream os(stdout);
code->Print(os);
- ::printf("f() = %ld\n\n", ret_value);
+ ::printf("f() = %" PRIx64 "\n\n", ret_value);
#endif
size_t offset = 1234;
@@ -75,7 +74,64 @@ TEST(WasmRelocationArm64) {
#ifdef DEBUG
code->Print(os);
- ::printf("f() = %ld\n\n", ret_value);
+ ::printf("f() = %" PRIx64 "\n\n", ret_value);
+#endif
+}
+
+TEST(WasmRelocationArm64MemorySizeReference) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ v8::internal::byte buffer[4096];
+ DummyStaticFunction(NULL);
+ Immediate size = Immediate(512, RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ Label fail;
+
+ MacroAssembler masm(isolate, buffer, sizeof buffer,
+ v8::internal::CodeObjectRequired::kYes);
+
+ __ Mov(x0, size);
+ __ Cmp(x0, size);
+ __ B(ne, &fail);
+ __ Ret();
+ __ Bind(&fail);
+ __ Mov(x0, Immediate(0xdeadbeef));
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ CSignature0<int64_t> csig;
+ CodeRunner<int64_t> runnable(isolate, code, &csig);
+ int64_t ret_value = runnable.Call();
+ CHECK_NE(ret_value, 0xdeadbeef);
+
+#ifdef DEBUG
+ OFStream os(stdout);
+ code->Print(os);
+ ::printf("f() = %" PRIx64 "\n\n", ret_value);
+#endif
+ int32_t diff = 512;
+
+ int mode_mask = (1 << RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (RelocInfo::IsWasmMemorySizeReference(mode)) {
+ it.rinfo()->update_wasm_memory_reference(
+ reinterpret_cast<Address>(0x1234), reinterpret_cast<Address>(0x1234),
+ it.rinfo()->wasm_memory_size_reference(),
+ it.rinfo()->wasm_memory_size_reference() + diff, SKIP_ICACHE_FLUSH);
+ }
+ }
+
+ ret_value = runnable.Call();
+ CHECK_NE(ret_value, 0xdeadbeef);
+
+#ifdef DEBUG
+ code->Print(os);
+ ::printf("f() = %" PRIx64 "\n\n", ret_value);
#endif
}
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc b/deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc
index 135b522df5..305d0089c3 100644
--- a/deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc
@@ -23,8 +23,7 @@ using namespace v8::internal::compiler;
static int32_t DummyStaticFunction(Object* result) { return 1; }
-TEST(WasmRelocationIa32) {
- CcTest::InitializeVM();
+TEST(WasmRelocationIa32MemoryReference) {
Isolate* isolate = CcTest::i_isolate();
Zone zone(isolate->allocator());
HandleScope scope(isolate);
@@ -78,7 +77,6 @@ TEST(WasmRelocationIa32) {
CHECK_EQ(ret_value, imm + offset);
#ifdef OBJECT_PRINT
- // OFStream os(stdout);
code->Print(os);
begin = code->instruction_start();
end = begin + code->instruction_size();
@@ -86,4 +84,67 @@ TEST(WasmRelocationIa32) {
#endif
}
+TEST(WasmRelocationIa32MemorySizeReference) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Zone zone(isolate->allocator());
+ HandleScope scope(isolate);
+ v8::internal::byte buffer[4096];
+ Assembler assm(isolate, buffer, sizeof buffer);
+ DummyStaticFunction(NULL);
+ int32_t size = 80;
+ Label fail;
+
+ __ mov(eax, Immediate(reinterpret_cast<Address>(size),
+ RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
+ __ cmp(eax, Immediate(reinterpret_cast<Address>(size),
+ RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
+ __ j(not_equal, &fail);
+ __ ret(0);
+ __ bind(&fail);
+ __ mov(eax, 0xdeadbeef);
+ __ ret(0);
+
+ CSignature0<int32_t> csig;
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ USE(code);
+
+ CodeRunner<int32_t> runnable(isolate, code, &csig);
+ int32_t ret_value = runnable.Call();
+ CHECK_NE(ret_value, 0xdeadbeef);
+
+#ifdef OBJECT_PRINT
+ OFStream os(stdout);
+ code->Print(os);
+ byte* begin = code->instruction_start();
+ byte* end = begin + code->instruction_size();
+ disasm::Disassembler::Disassemble(stdout, begin, end);
+#endif
+
+ size_t offset = 10;
+
+ int mode_mask = (1 << RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (RelocInfo::IsWasmMemorySizeReference(mode)) {
+ it.rinfo()->update_wasm_memory_reference(
+ reinterpret_cast<Address>(1234), reinterpret_cast<Address>(1234),
+ it.rinfo()->wasm_memory_size_reference(),
+ it.rinfo()->wasm_memory_size_reference() + offset, SKIP_ICACHE_FLUSH);
+ }
+ }
+
+ ret_value = runnable.Call();
+ CHECK_NE(ret_value, 0xdeadbeef);
+
+#ifdef OBJECT_PRINT
+ code->Print(os);
+ begin = code->instruction_start();
+ end = begin + code->instruction_size();
+ disasm::Disassembler::Disassemble(stdout, begin, end);
+#endif
+}
#undef __
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-x64.cc b/deps/v8/test/cctest/test-run-wasm-relocation-x64.cc
index f1b6d96b8c..11fa45164e 100644
--- a/deps/v8/test/cctest/test-run-wasm-relocation-x64.cc
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-x64.cc
@@ -21,8 +21,8 @@ using namespace v8::internal::compiler;
#define __ assm.
static int32_t DummyStaticFunction(Object* result) { return 1; }
-TEST(WasmRelocationX64movq64) {
- CcTest::InitializeVM();
+
+TEST(WasmRelocationX64MemoryReference) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
v8::internal::byte buffer[4096];
@@ -80,4 +80,63 @@ TEST(WasmRelocationX64movq64) {
#endif
}
+TEST(WasmRelocationX64WasmMemorySizeReference) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ v8::internal::byte buffer[4096];
+ Assembler assm(isolate, buffer, sizeof buffer);
+ DummyStaticFunction(NULL);
+ int32_t size = 512;
+ Label fail;
+
+ __ movl(rax, Immediate(size, RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
+ __ cmpl(rax, Immediate(size, RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
+ __ j(not_equal, &fail);
+ __ ret(0);
+ __ bind(&fail);
+ __ movl(rax, Immediate(0xdeadbeef));
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ USE(code);
+
+ CSignature0<int64_t> csig;
+ CodeRunner<int64_t> runnable(isolate, code, &csig);
+ int64_t ret_value = runnable.Call();
+ CHECK_NE(ret_value, 0xdeadbeef);
+
+#ifdef OBJECT_PRINT
+ OFStream os(stdout);
+ code->Print(os);
+ byte* begin = code->instruction_start();
+ byte* end = begin + code->instruction_size();
+ disasm::Disassembler::Disassemble(stdout, begin, end);
+#endif
+ int32_t diff = 512;
+
+ int mode_mask = (1 << RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (RelocInfo::IsWasmMemorySizeReference(mode)) {
+ it.rinfo()->update_wasm_memory_reference(
+ reinterpret_cast<Address>(1234), reinterpret_cast<Address>(1234),
+ it.rinfo()->wasm_memory_size_reference(),
+ it.rinfo()->wasm_memory_size_reference() + diff, SKIP_ICACHE_FLUSH);
+ }
+ }
+
+ ret_value = runnable.Call();
+ CHECK_NE(ret_value, 0xdeadbeef);
+
+#ifdef OBJECT_PRINT
+ code->Print(os);
+ begin = code->instruction_start();
+ end = begin + code->instruction_size();
+ disasm::Disassembler::Disassemble(stdout, begin, end);
+#endif
+}
#undef __
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-x87.cc b/deps/v8/test/cctest/test-run-wasm-relocation-x87.cc
index 6cbd065c1b..2156e96ecb 100644
--- a/deps/v8/test/cctest/test-run-wasm-relocation-x87.cc
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-x87.cc
@@ -23,8 +23,7 @@ using namespace v8::internal::compiler;
static int32_t DummyStaticFunction(Object* result) { return 1; }
-TEST(WasmRelocationIa32) {
- CcTest::InitializeVM();
+TEST(WasmRelocationX87MemoryReference) {
Isolate* isolate = CcTest::i_isolate();
Zone zone(isolate->allocator());
HandleScope scope(isolate);
@@ -78,7 +77,6 @@ TEST(WasmRelocationIa32) {
CHECK_EQ(ret_value, imm + offset);
#ifdef OBJECT_PRINT
- // OFStream os(stdout);
code->Print(os);
begin = code->instruction_start();
end = begin + code->instruction_size();
@@ -86,4 +84,67 @@ TEST(WasmRelocationIa32) {
#endif
}
+TEST(WasmRelocationX87MemorySizeReference) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Zone zone(isolate->allocator());
+ HandleScope scope(isolate);
+ v8::internal::byte buffer[4096];
+ Assembler assm(isolate, buffer, sizeof buffer);
+ DummyStaticFunction(NULL);
+ int32_t size = 80;
+ Label fail;
+
+ __ mov(eax, Immediate(reinterpret_cast<Address>(size),
+ RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
+ __ cmp(eax, Immediate(reinterpret_cast<Address>(size),
+ RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
+ __ j(not_equal, &fail);
+ __ ret(0);
+ __ bind(&fail);
+ __ mov(eax, 0xdeadbeef);
+ __ ret(0);
+
+ CSignature0<int32_t> csig;
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ USE(code);
+
+ CodeRunner<int32_t> runnable(isolate, code, &csig);
+ int32_t ret_value = runnable.Call();
+ CHECK_NE(ret_value, 0xdeadbeef);
+
+#ifdef OBJECT_PRINT
+ OFStream os(stdout);
+ code->Print(os);
+ byte* begin = code->instruction_start();
+ byte* end = begin + code->instruction_size();
+ disasm::Disassembler::Disassemble(stdout, begin, end);
+#endif
+
+ size_t offset = 10;
+
+ int mode_mask = (1 << RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (RelocInfo::IsWasmMemorySizeReference(mode)) {
+ it.rinfo()->update_wasm_memory_reference(
+ reinterpret_cast<Address>(1234), reinterpret_cast<Address>(1234),
+ it.rinfo()->wasm_memory_size_reference(),
+ it.rinfo()->wasm_memory_size_reference() + offset, SKIP_ICACHE_FLUSH);
+ }
+ }
+
+ ret_value = runnable.Call();
+ CHECK_NE(ret_value, 0xdeadbeef);
+
+#ifdef OBJECT_PRINT
+ code->Print(os);
+ begin = code->instruction_start();
+ end = begin + code->instruction_size();
+ disasm::Disassembler::Disassemble(stdout, begin, end);
+#endif
+}
#undef __
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index cd349f9d73..b2bf51b1c6 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -31,11 +31,12 @@
#include "src/v8.h"
-#include "src/ast/scopeinfo.h"
#include "src/bootstrapper.h"
#include "src/compilation-cache.h"
+#include "src/compiler.h"
#include "src/debug/debug.h"
#include "src/heap/spaces.h"
+#include "src/macro-assembler.h"
#include "src/objects.h"
#include "src/parsing/parser.h"
#include "src/runtime/runtime.h"
@@ -46,7 +47,7 @@
#include "src/snapshot/snapshot.h"
#include "src/snapshot/startup-serializer.h"
#include "test/cctest/cctest.h"
-#include "test/cctest/heap/utils-inl.h"
+#include "test/cctest/heap/heap-utils.h"
using namespace v8::internal;
@@ -91,11 +92,11 @@ static Vector<const byte> Serialize(v8::Isolate* isolate) {
Isolate* internal_isolate = reinterpret_cast<Isolate*>(isolate);
internal_isolate->heap()->CollectAllAvailableGarbage("serialize");
- SnapshotByteSink sink;
- StartupSerializer ser(internal_isolate, &sink);
+ StartupSerializer ser(internal_isolate,
+ v8::SnapshotCreator::FunctionCodeHandling::kClear);
ser.SerializeStrongReferences();
ser.SerializeWeakReferencesAndDeferred();
- SnapshotData snapshot_data(ser);
+ SnapshotData snapshot_data(&ser);
return WritePayload(snapshot_data.RawData());
}
@@ -280,19 +281,17 @@ static void PartiallySerializeObject(Vector<const byte>* startup_blob_out,
}
env.Reset();
- SnapshotByteSink startup_sink;
- StartupSerializer startup_serializer(isolate, &startup_sink);
+ StartupSerializer startup_serializer(
+ isolate, v8::SnapshotCreator::FunctionCodeHandling::kClear);
startup_serializer.SerializeStrongReferences();
- SnapshotByteSink partial_sink;
- PartialSerializer partial_serializer(isolate, &startup_serializer,
- &partial_sink);
+ PartialSerializer partial_serializer(isolate, &startup_serializer);
partial_serializer.Serialize(&raw_foo);
startup_serializer.SerializeWeakReferencesAndDeferred();
- SnapshotData startup_snapshot(startup_serializer);
- SnapshotData partial_snapshot(partial_serializer);
+ SnapshotData startup_snapshot(&startup_serializer);
+ SnapshotData partial_snapshot(&partial_serializer);
*partial_blob_out = WritePayload(partial_snapshot.RawData());
*startup_blob_out = WritePayload(startup_snapshot.RawData());
@@ -380,17 +379,17 @@ static void PartiallySerializeContext(Vector<const byte>* startup_blob_out,
env.Reset();
SnapshotByteSink startup_sink;
- StartupSerializer startup_serializer(isolate, &startup_sink);
+ StartupSerializer startup_serializer(
+ isolate, v8::SnapshotCreator::FunctionCodeHandling::kClear);
startup_serializer.SerializeStrongReferences();
SnapshotByteSink partial_sink;
- PartialSerializer partial_serializer(isolate, &startup_serializer,
- &partial_sink);
+ PartialSerializer partial_serializer(isolate, &startup_serializer);
partial_serializer.Serialize(&raw_context);
startup_serializer.SerializeWeakReferencesAndDeferred();
- SnapshotData startup_snapshot(startup_serializer);
- SnapshotData partial_snapshot(partial_serializer);
+ SnapshotData startup_snapshot(&startup_serializer);
+ SnapshotData partial_snapshot(&partial_serializer);
*partial_blob_out = WritePayload(partial_snapshot.RawData());
*startup_blob_out = WritePayload(startup_snapshot.RawData());
@@ -498,17 +497,17 @@ static void PartiallySerializeCustomContext(
env.Reset();
SnapshotByteSink startup_sink;
- StartupSerializer startup_serializer(isolate, &startup_sink);
+ StartupSerializer startup_serializer(
+ isolate, v8::SnapshotCreator::FunctionCodeHandling::kClear);
startup_serializer.SerializeStrongReferences();
SnapshotByteSink partial_sink;
- PartialSerializer partial_serializer(isolate, &startup_serializer,
- &partial_sink);
+ PartialSerializer partial_serializer(isolate, &startup_serializer);
partial_serializer.Serialize(&raw_context);
startup_serializer.SerializeWeakReferencesAndDeferred();
- SnapshotData startup_snapshot(startup_serializer);
- SnapshotData partial_snapshot(partial_serializer);
+ SnapshotData startup_snapshot(&startup_serializer);
+ SnapshotData partial_snapshot(&partial_serializer);
*partial_blob_out = WritePayload(partial_snapshot.RawData());
*startup_blob_out = WritePayload(startup_snapshot.RawData());
@@ -792,7 +791,7 @@ bool IsCompiled(const char* name) {
TEST(SnapshotDataBlobWithWarmup) {
DisableTurbofan();
- const char* warmup = "Math.tan(1); Math.sin = 1;";
+ const char* warmup = "Math.abs(1); Math.random = 1;";
v8::StartupData cold = v8::V8::CreateSnapshotDataBlob();
v8::StartupData warm = v8::V8::WarmUpSnapshotDataBlob(cold, warmup);
@@ -811,9 +810,9 @@ TEST(SnapshotDataBlobWithWarmup) {
v8::Context::Scope c_scope(context);
// Running the warmup script has effect on whether functions are
// pre-compiled, but does not pollute the context.
- CHECK(IsCompiled("Math.tan"));
- CHECK(!IsCompiled("Math.cos"));
- CHECK(CompileRun("Math.sin")->IsFunction());
+ CHECK(IsCompiled("Math.abs"));
+ CHECK(!IsCompiled("Number.isFinite"));
+ CHECK(CompileRun("Math.random")->IsFunction());
}
isolate->Dispose();
}
@@ -821,9 +820,9 @@ TEST(SnapshotDataBlobWithWarmup) {
TEST(CustomSnapshotDataBlobWithWarmup) {
DisableTurbofan();
const char* source =
- "function f() { return Math.sin(1); }\n"
- "function g() { return Math.cos(1); }\n"
- "Math.tan(1);"
+ "function f() { return Math.abs(1); }\n"
+ "function g() { return Number.isFinite(1); }\n"
+ "Number.isNaN(1);"
"var a = 5";
const char* warmup = "a = f()";
@@ -845,10 +844,10 @@ TEST(CustomSnapshotDataBlobWithWarmup) {
// Running the warmup script has effect on whether functions are
// pre-compiled, but does not pollute the context.
CHECK(IsCompiled("f"));
- CHECK(IsCompiled("Math.sin"));
+ CHECK(IsCompiled("Math.abs"));
CHECK(!IsCompiled("g"));
- CHECK(!IsCompiled("Math.cos"));
- CHECK(!IsCompiled("Math.tan"));
+ CHECK(!IsCompiled("Number.isFinite"));
+ CHECK(!IsCompiled("Number.isNaN"));
CHECK_EQ(5, CompileRun("a")->Int32Value(context).FromJust());
}
isolate->Dispose();
@@ -1062,10 +1061,14 @@ TEST(CodeSerializerLargeCodeObject) {
v8::HandleScope scope(CcTest::isolate());
+ // The serializer only tests the shared code, which is always the unoptimized
+ // code. Don't even bother generating optimized code to avoid timeouts.
+ FLAG_always_opt = false;
+
Vector<const uint8_t> source =
- ConstructSource(STATIC_CHAR_VECTOR("var j=1; try { if (j) throw 1;"),
- STATIC_CHAR_VECTOR("for(var i=0;i<1;i++)j++;"),
- STATIC_CHAR_VECTOR("} catch (e) { j=7; } j"), 10000);
+ ConstructSource(STATIC_CHAR_VECTOR("var j=1; if (!j) {"),
+ STATIC_CHAR_VECTOR("for (let i of Object.prototype);"),
+ STATIC_CHAR_VECTOR("} j=7; j"), 2000);
Handle<String> source_str =
isolate->factory()->NewStringFromOneByte(source).ToHandleChecked();
@@ -1076,7 +1079,7 @@ TEST(CodeSerializerLargeCodeObject) {
CompileScript(isolate, source_str, Handle<String>(), &cache,
v8::ScriptCompiler::kProduceCodeCache);
- CHECK(isolate->heap()->InSpace(orig->code(), LO_SPACE));
+ CHECK(isolate->heap()->InSpace(orig->abstract_code(), LO_SPACE));
Handle<SharedFunctionInfo> copy;
{
@@ -1827,12 +1830,351 @@ TEST(Regress503552) {
false);
delete script_data;
- SimulateIncrementalMarking(isolate->heap());
+ heap::SimulateIncrementalMarking(isolate->heap());
script_data = CodeSerializer::Serialize(isolate, shared, source);
delete script_data;
}
+#if V8_TARGET_ARCH_X64
+TEST(CodeSerializerCell) {
+ FLAG_serialize_toplevel = true;
+ LocalContext context;
+ Isolate* isolate = CcTest::i_isolate();
+ isolate->compilation_cache()->Disable(); // Disable same-isolate code cache.
+
+ v8::HandleScope scope(CcTest::isolate());
+
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
+ CHECK(buffer);
+ HandleScope handles(isolate);
+
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
+ assembler.enable_serializer();
+ Handle<HeapNumber> number = isolate->factory()->NewHeapNumber(0.3);
+ CHECK(isolate->heap()->InNewSpace(*number));
+ Handle<Code> code;
+ {
+ MacroAssembler* masm = &assembler;
+ Handle<Cell> cell = isolate->factory()->NewCell(number);
+ masm->Move(rax, cell, RelocInfo::CELL);
+ masm->movp(rax, Operand(rax, 0));
+ masm->ret(0);
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ code = isolate->factory()->NewCode(desc, Code::ComputeFlags(Code::FUNCTION),
+ masm->CodeObject());
+ code->set_has_reloc_info_for_serialization(true);
+ }
+ RelocIterator rit1(*code, 1 << RelocInfo::CELL);
+ CHECK_EQ(*number, rit1.rinfo()->target_cell()->value());
+
+ Handle<String> source = isolate->factory()->empty_string();
+ Handle<SharedFunctionInfo> sfi =
+ isolate->factory()->NewSharedFunctionInfo(source, code, false);
+ ScriptData* script_data = CodeSerializer::Serialize(isolate, sfi, source);
+
+ Handle<SharedFunctionInfo> copy =
+ CodeSerializer::Deserialize(isolate, script_data, source)
+ .ToHandleChecked();
+ RelocIterator rit2(copy->code(), 1 << RelocInfo::CELL);
+ CHECK(rit2.rinfo()->target_cell()->IsCell());
+ Handle<Cell> cell(rit2.rinfo()->target_cell());
+ CHECK(cell->value()->IsHeapNumber());
+ CHECK_EQ(0.3, HeapNumber::cast(cell->value())->value());
+
+ delete script_data;
+}
+#endif // V8_TARGET_ARCH_X64
+
+TEST(CodeSerializerEmbeddedObject) {
+ FLAG_serialize_toplevel = true;
+ LocalContext context;
+ Isolate* isolate = CcTest::i_isolate();
+ isolate->compilation_cache()->Disable(); // Disable same-isolate code cache.
+ Heap* heap = isolate->heap();
+ v8::HandleScope scope(CcTest::isolate());
+
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+ Assembler::kMinimalBufferSize, &actual_size, true));
+ CHECK(buffer);
+ HandleScope handles(isolate);
+
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
+ v8::internal::CodeObjectRequired::kYes);
+ assembler.enable_serializer();
+ Handle<Object> number = isolate->factory()->NewHeapNumber(0.3);
+ CHECK(isolate->heap()->InNewSpace(*number));
+ Handle<Code> code;
+ {
+ MacroAssembler* masm = &assembler;
+ masm->Push(number);
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ code = isolate->factory()->NewCode(desc, Code::ComputeFlags(Code::FUNCTION),
+ masm->CodeObject());
+ code->set_has_reloc_info_for_serialization(true);
+ }
+ RelocIterator rit1(*code, RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT));
+ CHECK_EQ(*number, rit1.rinfo()->target_object());
+
+ Handle<String> source = isolate->factory()->empty_string();
+ Handle<SharedFunctionInfo> sfi =
+ isolate->factory()->NewSharedFunctionInfo(source, code, false);
+ ScriptData* script_data = CodeSerializer::Serialize(isolate, sfi, source);
+
+ Handle<SharedFunctionInfo> copy =
+ CodeSerializer::Deserialize(isolate, script_data, source)
+ .ToHandleChecked();
+ RelocIterator rit2(copy->code(),
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT));
+ CHECK(rit2.rinfo()->target_object()->IsHeapNumber());
+ CHECK_EQ(0.3, HeapNumber::cast(rit2.rinfo()->target_object())->value());
+
+ heap->CollectAllAvailableGarbage();
+
+ RelocIterator rit3(copy->code(),
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT));
+ CHECK(rit3.rinfo()->target_object()->IsHeapNumber());
+ CHECK_EQ(0.3, HeapNumber::cast(rit3.rinfo()->target_object())->value());
+
+ delete script_data;
+}
+
+TEST(SnapshotCreatorMultipleContexts) {
+ DisableTurbofan();
+ v8::StartupData blob;
+ {
+ v8::SnapshotCreator creator;
+ v8::Isolate* isolate = creator.GetIsolate();
+ {
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+ CompileRun("var f = function() { return 1; }");
+ CHECK_EQ(0, creator.AddContext(context));
+ }
+ {
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+ CompileRun("var f = function() { return 2; }");
+ CHECK_EQ(1, creator.AddContext(context));
+ }
+ {
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ CHECK_EQ(2, creator.AddContext(context));
+ }
+ blob =
+ creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
+ }
+
+ v8::Isolate::CreateParams params;
+ params.snapshot_blob = &blob;
+ params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(params);
+ {
+ v8::Isolate::Scope isolate_scope(isolate);
+ {
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> context =
+ v8::Context::FromSnapshot(isolate, 0).ToLocalChecked();
+ v8::Context::Scope context_scope(context);
+ ExpectInt32("f()", 1);
+ }
+ {
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> context =
+ v8::Context::FromSnapshot(isolate, 1).ToLocalChecked();
+ v8::Context::Scope context_scope(context);
+ ExpectInt32("f()", 2);
+ }
+ {
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> context =
+ v8::Context::FromSnapshot(isolate, 2).ToLocalChecked();
+ v8::Context::Scope context_scope(context);
+ ExpectUndefined("this.f");
+ }
+ }
+
+ isolate->Dispose();
+ delete[] blob.data;
+}
+
+static void SerializedCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ args.GetReturnValue().Set(v8_num(42));
+}
+
+static void SerializedCallbackReplacement(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ args.GetReturnValue().Set(v8_num(1337));
+}
+
+intptr_t original_external_references[] = {
+ reinterpret_cast<intptr_t>(SerializedCallback), 0};
+
+intptr_t replaced_external_references[] = {
+ reinterpret_cast<intptr_t>(SerializedCallbackReplacement), 0};
+
+TEST(SnapshotCreatorExternalReferences) {
+ DisableTurbofan();
+ v8::StartupData blob;
+ {
+ v8::SnapshotCreator creator(original_external_references);
+ v8::Isolate* isolate = creator.GetIsolate();
+ {
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+ v8::Local<v8::FunctionTemplate> callback =
+ v8::FunctionTemplate::New(isolate, SerializedCallback);
+ v8::Local<v8::Value> function =
+ callback->GetFunction(context).ToLocalChecked();
+ CHECK(context->Global()->Set(context, v8_str("f"), function).FromJust());
+ ExpectInt32("f()", 42);
+ CHECK_EQ(0, creator.AddContext(context));
+ }
+ blob =
+ creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
+ }
+
+ // Deserialize with the original external reference.
+ {
+ v8::Isolate::CreateParams params;
+ params.snapshot_blob = &blob;
+ params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ params.external_references = original_external_references;
+ v8::Isolate* isolate = v8::Isolate::New(params);
+ {
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> context =
+ v8::Context::FromSnapshot(isolate, 0).ToLocalChecked();
+ v8::Context::Scope context_scope(context);
+ ExpectInt32("f()", 42);
+ }
+ isolate->Dispose();
+ }
+
+ // Deserialize with the some other external reference.
+ {
+ v8::Isolate::CreateParams params;
+ params.snapshot_blob = &blob;
+ params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ params.external_references = replaced_external_references;
+ v8::Isolate* isolate = v8::Isolate::New(params);
+ {
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> context =
+ v8::Context::FromSnapshot(isolate, 0).ToLocalChecked();
+ v8::Context::Scope context_scope(context);
+ ExpectInt32("f()", 1337);
+ }
+ isolate->Dispose();
+ }
+ delete[] blob.data;
+}
+
+TEST(SnapshotCreatorTemplates) {
+ DisableTurbofan();
+ v8::StartupData blob;
+ {
+ v8::SnapshotCreator creator(original_external_references);
+ v8::Isolate* isolate = creator.GetIsolate();
+ {
+ v8::HandleScope handle_scope(isolate);
+ v8::ExtensionConfiguration* no_extension = nullptr;
+ v8::Local<v8::ObjectTemplate> global_template =
+ v8::ObjectTemplate::New(isolate);
+ v8::Local<v8::FunctionTemplate> callback =
+ v8::FunctionTemplate::New(isolate, SerializedCallback);
+ global_template->Set(v8_str("f"), callback);
+ v8::Local<v8::Context> context =
+ v8::Context::New(isolate, no_extension, global_template);
+ v8::Context::Scope context_scope(context);
+ ExpectInt32("f()", 42);
+ CHECK_EQ(0, creator.AddContext(context));
+ CHECK_EQ(0, creator.AddTemplate(callback));
+ CHECK_EQ(1, creator.AddTemplate(global_template));
+ }
+ blob =
+ creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
+ }
+
+ {
+ v8::Isolate::CreateParams params;
+ params.snapshot_blob = &blob;
+ params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ params.external_references = original_external_references;
+ v8::Isolate* isolate = v8::Isolate::New(params);
+ {
+ v8::Isolate::Scope isolate_scope(isolate);
+ {
+ // Create a new context without a new object template.
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> context =
+ v8::Context::FromSnapshot(isolate, 0).ToLocalChecked();
+ v8::Context::Scope context_scope(context);
+ ExpectInt32("f()", 42);
+
+ // Retrieve the snapshotted object template.
+ v8::Local<v8::ObjectTemplate> obj_template =
+ v8::ObjectTemplate::FromSnapshot(isolate, 1).ToLocalChecked();
+ CHECK(!obj_template.IsEmpty());
+ v8::Local<v8::Object> object =
+ obj_template->NewInstance(context).ToLocalChecked();
+ CHECK(context->Global()->Set(context, v8_str("o"), object).FromJust());
+ ExpectInt32("o.f()", 42);
+ // Check that it instantiates to the same prototype.
+ ExpectTrue("o.f.prototype === f.prototype");
+
+ // Retrieve the snapshotted function template.
+ v8::Local<v8::FunctionTemplate> fun_template =
+ v8::FunctionTemplate::FromSnapshot(isolate, 0).ToLocalChecked();
+ CHECK(!fun_template.IsEmpty());
+ v8::Local<v8::Function> fun =
+ fun_template->GetFunction(context).ToLocalChecked();
+ CHECK(context->Global()->Set(context, v8_str("g"), fun).FromJust());
+ ExpectInt32("g()", 42);
+ // Check that it instantiates to the same prototype.
+ ExpectTrue("g.prototype === f.prototype");
+
+ // Accessing out of bound returns empty MaybeHandle.
+ CHECK(v8::ObjectTemplate::FromSnapshot(isolate, 2).IsEmpty());
+ CHECK(v8::FunctionTemplate::FromSnapshot(isolate, 2).IsEmpty());
+ CHECK(v8::Context::FromSnapshot(isolate, 2).IsEmpty());
+ }
+
+ {
+ // Create a context with a new object template. It is merged into the
+ // deserialized global object.
+ v8::HandleScope handle_scope(isolate);
+ v8::ExtensionConfiguration* no_extension = nullptr;
+ v8::Local<v8::ObjectTemplate> global_template =
+ v8::ObjectTemplate::New(isolate);
+ global_template->Set(
+ v8_str("g"),
+ v8::FunctionTemplate::New(isolate, SerializedCallbackReplacement));
+ v8::Local<v8::Context> context =
+ v8::Context::FromSnapshot(isolate, 0, no_extension, global_template)
+ .ToLocalChecked();
+ v8::Context::Scope context_scope(context);
+ ExpectInt32("g()", 1337);
+ ExpectInt32("f()", 42);
+ }
+ }
+ isolate->Dispose();
+ }
+ delete[] blob.data;
+}
TEST(SerializationMemoryStats) {
FLAG_profile_deserialization = true;
diff --git a/deps/v8/test/cctest/test-slots-buffer.cc b/deps/v8/test/cctest/test-slots-buffer.cc
index 07b70f5217..4b8aeb7931 100644
--- a/deps/v8/test/cctest/test-slots-buffer.cc
+++ b/deps/v8/test/cctest/test-slots-buffer.cc
@@ -4,7 +4,7 @@
#include "src/heap/slots-buffer.h"
#include "test/cctest/cctest.h"
-#include "test/cctest/heap/utils-inl.h"
+#include "test/cctest/heap/heap-utils.h"
namespace v8 {
namespace internal {
@@ -101,7 +101,7 @@ TEST(FilterInvalidSlotsBufferEntries) {
// Write an old space reference into field 4 which points to an object on an
// evacuation candidate.
- SimulateFullSpace(heap->old_space());
+ heap::SimulateFullSpace(heap->old_space());
Handle<FixedArray> valid_object =
isolate->factory()->NewFixedArray(23, TENURED);
Page* page = Page::FromAddress(valid_object->address());
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index 770042d814..afa8ecb7ec 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -1110,7 +1110,8 @@ TEST(CachedHashOverflow) {
.ToLocalChecked()
->Run(context)
.ToLocalChecked();
- CHECK_EQ(results[i]->IsUndefined(), result->IsUndefined());
+ CHECK_EQ(results[i]->IsUndefined(CcTest::i_isolate()),
+ result->IsUndefined());
CHECK_EQ(results[i]->IsNumber(), result->IsNumber());
if (result->IsNumber()) {
int32_t value = 0;
diff --git a/deps/v8/test/cctest/test-thread-termination.cc b/deps/v8/test/cctest/test-thread-termination.cc
index 85dfd13b60..06e6fb00cd 100644
--- a/deps/v8/test/cctest/test-thread-termination.cc
+++ b/deps/v8/test/cctest/test-thread-termination.cc
@@ -204,6 +204,31 @@ TEST(TerminateOnlyV8ThreadFromOtherThread) {
semaphore = NULL;
}
+// Test that execution can be terminated from within JSON.stringify.
+TEST(TerminateJsonStringify) {
+ semaphore = new v8::base::Semaphore(0);
+ TerminatorThread thread(CcTest::i_isolate());
+ thread.Start();
+
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::ObjectTemplate> global =
+ CreateGlobalTemplate(CcTest::isolate(), Signal, DoLoop);
+ v8::Local<v8::Context> context =
+ v8::Context::New(CcTest::isolate(), NULL, global);
+ v8::Context::Scope context_scope(context);
+ CHECK(!CcTest::isolate()->IsExecutionTerminating());
+ v8::MaybeLocal<v8::Value> result =
+ CompileRun(CcTest::isolate()->GetCurrentContext(),
+ "var x = [];"
+ "x[2**31]=1;"
+ "terminate();"
+ "JSON.stringify(x);"
+ "fail();");
+ CHECK(result.IsEmpty());
+ thread.Join();
+ delete semaphore;
+ semaphore = NULL;
+}
int call_count = 0;
@@ -492,8 +517,7 @@ TEST(ErrorObjectAfterTermination) {
v8::Context::Scope context_scope(context);
isolate->TerminateExecution();
v8::Local<v8::Value> error = v8::Exception::Error(v8_str("error"));
- // TODO(yangguo): crbug/403509. Check for empty handle instead.
- CHECK(error->IsUndefined());
+ CHECK(error->IsNativeError());
}
diff --git a/deps/v8/test/cctest/test-trace-event.cc b/deps/v8/test/cctest/test-trace-event.cc
index 4678147931..190cb40782 100644
--- a/deps/v8/test/cctest/test-trace-event.cc
+++ b/deps/v8/test/cctest/test-trace-event.cc
@@ -72,7 +72,7 @@ class MockTracingPlatform : public v8::Platform {
void PerformDelayedTask() {}
uint64_t AddTraceEvent(char phase, const uint8_t* category_enabled_flag,
- const char* name, uint64_t id,
+ const char* name, const char* scope, uint64_t id,
uint64_t bind_id, int num_args, const char** arg_names,
const uint8_t* arg_types, const uint64_t* arg_values,
unsigned int flags) override {
diff --git a/deps/v8/test/cctest/test-typing-reset.cc b/deps/v8/test/cctest/test-typing-reset.cc
deleted file mode 100644
index 4e9413ac3a..0000000000
--- a/deps/v8/test/cctest/test-typing-reset.cc
+++ /dev/null
@@ -1,298 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <stdlib.h>
-
-#include "src/v8.h"
-
-#include "src/ast/ast.h"
-#include "src/ast/ast-expression-visitor.h"
-#include "src/ast/scopes.h"
-#include "src/parsing/parser.h"
-#include "src/parsing/rewriter.h"
-#include "src/typing-reset.h"
-#include "test/cctest/cctest.h"
-#include "test/cctest/compiler/function-tester.h"
-#include "test/cctest/expression-type-collector.h"
-#include "test/cctest/expression-type-collector-macros.h"
-
-#define INT32_TYPE Bounds(Type::Signed32(), Type::Signed32())
-
-using namespace v8::internal;
-
-namespace {
-
-class TypeSetter : public AstExpressionVisitor {
- public:
- TypeSetter(Isolate* isolate, FunctionLiteral* root)
- : AstExpressionVisitor(isolate, root) {}
-
- protected:
- void VisitExpression(Expression* expression) {
- expression->set_bounds(INT32_TYPE);
- }
-};
-
-
-void CheckAllSame(ZoneVector<ExpressionTypeEntry>& types,
- Bounds expected_type) {
- CHECK_TYPES_BEGIN {
- // function logSum
- CHECK_EXPR(FunctionLiteral, expected_type) {
- CHECK_EXPR(FunctionLiteral, expected_type) {
- CHECK_EXPR(Assignment, expected_type) {
- CHECK_VAR(start, expected_type);
- CHECK_EXPR(BinaryOperation, expected_type) {
- CHECK_VAR(start, expected_type);
- CHECK_EXPR(Literal, expected_type);
- }
- }
- CHECK_EXPR(Assignment, expected_type) {
- CHECK_VAR(end, expected_type);
- CHECK_EXPR(BinaryOperation, expected_type) {
- CHECK_VAR(end, expected_type);
- CHECK_EXPR(Literal, expected_type);
- }
- }
- CHECK_EXPR(Assignment, expected_type) {
- CHECK_VAR(sum, expected_type);
- CHECK_EXPR(Literal, expected_type);
- }
- CHECK_EXPR(Assignment, expected_type) {
- CHECK_VAR(p, expected_type);
- CHECK_EXPR(Literal, expected_type);
- }
- CHECK_EXPR(Assignment, expected_type) {
- CHECK_VAR(q, expected_type);
- CHECK_EXPR(Literal, expected_type);
- }
- // for (p = start << 3, q = end << 3;
- CHECK_EXPR(BinaryOperation, expected_type) {
- CHECK_EXPR(Assignment, expected_type) {
- CHECK_VAR(p, expected_type);
- CHECK_EXPR(BinaryOperation, expected_type) {
- CHECK_VAR(start, expected_type);
- CHECK_EXPR(Literal, expected_type);
- }
- }
- CHECK_EXPR(Assignment, expected_type) {
- CHECK_VAR(q, expected_type);
- CHECK_EXPR(BinaryOperation, expected_type) {
- CHECK_VAR(end, expected_type);
- CHECK_EXPR(Literal, expected_type);
- }
- }
- }
- // (p|0) < (q|0);
- CHECK_EXPR(CompareOperation, expected_type) {
- CHECK_EXPR(BinaryOperation, expected_type) {
- CHECK_VAR(p, expected_type);
- CHECK_EXPR(Literal, expected_type);
- }
- CHECK_EXPR(BinaryOperation, expected_type) {
- CHECK_VAR(q, expected_type);
- CHECK_EXPR(Literal, expected_type);
- }
- }
- // p = (p + 8)|0) {\n"
- CHECK_EXPR(Assignment, expected_type) {
- CHECK_VAR(p, expected_type);
- CHECK_EXPR(BinaryOperation, expected_type) {
- CHECK_EXPR(BinaryOperation, expected_type) {
- CHECK_VAR(p, expected_type);
- CHECK_EXPR(Literal, expected_type);
- }
- CHECK_EXPR(Literal, expected_type);
- }
- }
- // sum = sum + +log(values[p>>3]);
- CHECK_EXPR(Assignment, expected_type) {
- CHECK_VAR(sum, expected_type);
- CHECK_EXPR(BinaryOperation, expected_type) {
- CHECK_VAR(sum, expected_type);
- CHECK_EXPR(BinaryOperation, expected_type) {
- CHECK_EXPR(Call, expected_type) {
- CHECK_VAR(log, expected_type);
- CHECK_EXPR(Property, expected_type) {
- CHECK_VAR(values, expected_type);
- CHECK_EXPR(BinaryOperation, expected_type) {
- CHECK_VAR(p, expected_type);
- CHECK_EXPR(Literal, expected_type);
- }
- }
- }
- CHECK_EXPR(Literal, expected_type);
- }
- }
- }
- // return +sum;
- CHECK_EXPR(BinaryOperation, expected_type) {
- CHECK_VAR(sum, expected_type);
- CHECK_EXPR(Literal, expected_type);
- }
- }
- // function geometricMean
- CHECK_EXPR(FunctionLiteral, expected_type) {
- CHECK_EXPR(Assignment, expected_type) {
- CHECK_VAR(start, expected_type);
- CHECK_EXPR(BinaryOperation, expected_type) {
- CHECK_VAR(start, expected_type);
- CHECK_EXPR(Literal, expected_type);
- }
- }
- CHECK_EXPR(Assignment, expected_type) {
- CHECK_VAR(end, expected_type);
- CHECK_EXPR(BinaryOperation, expected_type) {
- CHECK_VAR(end, expected_type);
- CHECK_EXPR(Literal, expected_type);
- }
- }
- // return +exp(+logSum(start, end) / +((end - start)|0));
- CHECK_EXPR(BinaryOperation, expected_type) {
- CHECK_EXPR(Call, expected_type) {
- CHECK_VAR(exp, expected_type);
- CHECK_EXPR(BinaryOperation, expected_type) {
- CHECK_EXPR(BinaryOperation, expected_type) {
- CHECK_EXPR(Call, expected_type) {
- CHECK_VAR(logSum, expected_type);
- CHECK_VAR(start, expected_type);
- CHECK_VAR(end, expected_type);
- }
- CHECK_EXPR(Literal, expected_type);
- }
- CHECK_EXPR(BinaryOperation, expected_type) {
- CHECK_EXPR(BinaryOperation, expected_type) {
- CHECK_EXPR(BinaryOperation, expected_type) {
- CHECK_VAR(end, expected_type);
- CHECK_VAR(start, expected_type);
- }
- CHECK_EXPR(Literal, expected_type);
- }
- CHECK_EXPR(Literal, expected_type);
- }
- }
- }
- CHECK_EXPR(Literal, expected_type);
- }
- }
- // "use asm";
- CHECK_EXPR(Literal, expected_type);
- // var exp = stdlib.Math.exp;
- CHECK_EXPR(Assignment, expected_type) {
- CHECK_VAR(exp, expected_type);
- CHECK_EXPR(Property, expected_type) {
- CHECK_EXPR(Property, expected_type) {
- CHECK_VAR(stdlib, expected_type);
- CHECK_EXPR(Literal, expected_type);
- }
- CHECK_EXPR(Literal, expected_type);
- }
- }
- // var log = stdlib.Math.log;
- CHECK_EXPR(Assignment, expected_type) {
- CHECK_VAR(log, expected_type);
- CHECK_EXPR(Property, expected_type) {
- CHECK_EXPR(Property, expected_type) {
- CHECK_VAR(stdlib, expected_type);
- CHECK_EXPR(Literal, expected_type);
- }
- CHECK_EXPR(Literal, expected_type);
- }
- }
- // var values = new stdlib.Float64Array(buffer);
- CHECK_EXPR(Assignment, expected_type) {
- CHECK_VAR(values, expected_type);
- CHECK_EXPR(CallNew, expected_type) {
- CHECK_EXPR(Property, expected_type) {
- CHECK_VAR(stdlib, expected_type);
- CHECK_EXPR(Literal, expected_type);
- }
- CHECK_VAR(buffer, expected_type);
- }
- }
- // return { geometricMean: geometricMean };
- CHECK_EXPR(ObjectLiteral, expected_type) {
- CHECK_VAR(geometricMean, expected_type);
- }
- }
- }
- CHECK_TYPES_END
-}
-
-} // namespace
-
-
-TEST(ResetTypingInfo) {
- const char test_function[] =
- "function GeometricMean(stdlib, foreign, buffer) {\n"
- " \"use asm\";\n"
- "\n"
- " var exp = stdlib.Math.exp;\n"
- " var log = stdlib.Math.log;\n"
- " var values = new stdlib.Float64Array(buffer);\n"
- "\n"
- " function logSum(start, end) {\n"
- " start = start|0;\n"
- " end = end|0;\n"
- "\n"
- " var sum = 0.0, p = 0, q = 0;\n"
- "\n"
- " // asm.js forces byte addressing of the heap by requiring shifting "
- "by 3\n"
- " for (p = start << 3, q = end << 3; (p|0) < (q|0); p = (p + 8)|0) {\n"
- " sum = sum + +log(values[p>>3]);\n"
- " }\n"
- "\n"
- " return +sum;\n"
- " }\n"
- "\n"
- " function geometricMean(start, end) {\n"
- " start = start|0;\n"
- " end = end|0;\n"
- "\n"
- " return +exp(+logSum(start, end) / +((end - start)|0));\n"
- " }\n"
- "\n"
- " return { geometricMean: geometricMean };\n"
- "}\n";
-
- v8::V8::Initialize();
- HandleAndZoneScope handles;
-
- i::Isolate* isolate = CcTest::i_isolate();
- i::Factory* factory = isolate->factory();
-
- i::Handle<i::String> source_code =
- factory->NewStringFromUtf8(i::CStrVector(test_function))
- .ToHandleChecked();
-
- i::Handle<i::Script> script = factory->NewScript(source_code);
-
- i::ParseInfo info(handles.main_zone(), script);
- i::Parser parser(&info);
- parser.set_allow_harmony_sloppy(true);
- info.set_global();
- info.set_lazy(false);
- info.set_allow_lazy_parsing(false);
- info.set_toplevel(true);
-
- CHECK(i::Compiler::ParseAndAnalyze(&info));
- FunctionLiteral* root =
- info.scope()->declarations()->at(0)->AsFunctionDeclaration()->fun();
-
- // Core of the test.
- ZoneVector<ExpressionTypeEntry> types(handles.main_zone());
- ExpressionTypeCollector(isolate, root, &types).Run();
- CheckAllSame(types, Bounds::Unbounded());
-
- TypeSetter(isolate, root).Run();
-
- ExpressionTypeCollector(isolate, root, &types).Run();
- CheckAllSame(types, INT32_TYPE);
-
- TypingReseter(isolate, root).Run();
-
- ExpressionTypeCollector(isolate, root, &types).Run();
- CheckAllSame(types, Bounds::Unbounded());
-}
diff --git a/deps/v8/test/cctest/test-unboxed-doubles.cc b/deps/v8/test/cctest/test-unboxed-doubles.cc
index 7fc9b5beec..6a1d87015b 100644
--- a/deps/v8/test/cctest/test-unboxed-doubles.cc
+++ b/deps/v8/test/cctest/test-unboxed-doubles.cc
@@ -15,7 +15,7 @@
#include "src/ic/ic.h"
#include "src/macro-assembler.h"
#include "test/cctest/cctest.h"
-#include "test/cctest/heap/utils-inl.h"
+#include "test/cctest/heap/heap-utils.h"
using namespace v8::base;
using namespace v8::internal;
@@ -1112,7 +1112,7 @@ TEST(DoScavengeWithIncrementalWriteBarrier) {
{
AlwaysAllocateScope always_allocate(isolate);
// Make sure |obj_value| is placed on an old-space evacuation candidate.
- SimulateFullSpace(old_space);
+ heap::SimulateFullSpace(old_space);
obj_value = factory->NewJSArray(32 * KB, FAST_HOLEY_ELEMENTS, TENURED);
ec_page = Page::FromAddress(obj_value->address());
}
@@ -1142,7 +1142,7 @@ TEST(DoScavengeWithIncrementalWriteBarrier) {
FLAG_stress_compaction = true;
FLAG_manual_evacuation_candidates_selection = true;
ec_page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
- SimulateIncrementalMarking(heap);
+ heap::SimulateIncrementalMarking(heap);
// Disable stress compaction mode in order to let GC do scavenge.
FLAG_stress_compaction = false;
@@ -1151,7 +1151,7 @@ TEST(DoScavengeWithIncrementalWriteBarrier) {
// in compacting mode and |obj_value|'s page is an evacuation candidate).
IncrementalMarking* marking = heap->incremental_marking();
CHECK(marking->IsCompacting());
- CHECK(Marking::IsBlack(Marking::MarkBitFrom(*obj)));
+ CHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(*obj)));
CHECK(MarkCompactCollector::IsOnEvacuationCandidate(*obj_value));
// Trigger GCs so that |obj| moves to old gen.
@@ -1451,7 +1451,7 @@ static void TestIncrementalWriteBarrier(Handle<Map> map, Handle<Map> new_map,
CHECK(old_space->Contains(*obj));
// Make sure |obj_value| is placed on an old-space evacuation candidate.
- SimulateFullSpace(old_space);
+ heap::SimulateFullSpace(old_space);
obj_value = factory->NewJSArray(32 * KB, FAST_HOLEY_ELEMENTS, TENURED);
ec_page = Page::FromAddress(obj_value->address());
CHECK_NE(ec_page, Page::FromAddress(obj->address()));
@@ -1460,15 +1460,15 @@ static void TestIncrementalWriteBarrier(Handle<Map> map, Handle<Map> new_map,
// Heap is ready, force |ec_page| to become an evacuation candidate and
// simulate incremental marking.
ec_page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
- SimulateIncrementalMarking(heap);
+ heap::SimulateIncrementalMarking(heap);
// Check that everything is ready for triggering incremental write barrier
// (i.e. that both |obj| and |obj_value| are black and the marking phase is
// still active and |obj_value|'s page is indeed an evacuation candidate).
IncrementalMarking* marking = heap->incremental_marking();
CHECK(marking->IsMarking());
- CHECK(Marking::IsBlack(Marking::MarkBitFrom(*obj)));
- CHECK(Marking::IsBlack(Marking::MarkBitFrom(*obj_value)));
+ CHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(*obj)));
+ CHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(*obj_value)));
CHECK(MarkCompactCollector::IsOnEvacuationCandidate(*obj_value));
// Trigger incremental write barrier, which should add a slot to remembered
@@ -1504,10 +1504,12 @@ static void TestIncrementalWriteBarrier(Handle<Map> map, Handle<Map> new_map,
CHECK_EQ(boom_value, obj->RawFastDoublePropertyAt(double_field_index));
}
-
-enum WriteBarrierKind { OLD_TO_OLD_WRITE_BARRIER, OLD_TO_NEW_WRITE_BARRIER };
+enum OldToWriteBarrierKind {
+ OLD_TO_OLD_WRITE_BARRIER,
+ OLD_TO_NEW_WRITE_BARRIER
+};
static void TestWriteBarrierObjectShiftFieldsRight(
- WriteBarrierKind write_barrier_kind) {
+ OldToWriteBarrierKind write_barrier_kind) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
v8::HandleScope scope(CcTest::isolate());
diff --git a/deps/v8/test/cctest/test-usecounters.cc b/deps/v8/test/cctest/test-usecounters.cc
new file mode 100644
index 0000000000..8d4628c9f7
--- /dev/null
+++ b/deps/v8/test/cctest/test-usecounters.cc
@@ -0,0 +1,73 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/cctest.h"
+
+namespace {
+
+int* global_use_counts = NULL;
+
+void MockUseCounterCallback(v8::Isolate* isolate,
+ v8::Isolate::UseCounterFeature feature) {
+ ++global_use_counts[feature];
+}
+}
+
+TEST(DefineGetterSetterThrowUseCount) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ LocalContext env;
+ int use_counts[v8::Isolate::kUseCounterFeatureCount] = {};
+ global_use_counts = use_counts;
+ CcTest::isolate()->SetUseCounterCallback(MockUseCounterCallback);
+
+ // __defineGetter__ and __defineSetter__ do not increment
+ // kDefineGetterOrSetterWouldThrow on success
+ CompileRun(
+ "var a = {};"
+ "Object.defineProperty(a, 'b', { value: 0, configurable: true });"
+ "a.__defineGetter__('b', ()=>{});");
+ CHECK_EQ(0, use_counts[v8::Isolate::kDefineGetterOrSetterWouldThrow]);
+ CompileRun(
+ "var a = {};"
+ "Object.defineProperty(a, 'b', { value: 0, configurable: true });"
+ "a.__defineSetter__('b', ()=>{});");
+ CHECK_EQ(0, use_counts[v8::Isolate::kDefineGetterOrSetterWouldThrow]);
+
+ // __defineGetter__ and __defineSetter__ do not increment
+ // kDefineGetterOrSetterWouldThrow on other errors
+ v8::Local<v8::Value> resultProxyThrow = CompileRun(
+ "var exception;"
+ "try {"
+ "var a = new Proxy({}, { defineProperty: ()=>{throw new Error;} });"
+ "a.__defineGetter__('b', ()=>{});"
+ "} catch (e) { exception = e; }"
+ "exception");
+ CHECK_EQ(0, use_counts[v8::Isolate::kDefineGetterOrSetterWouldThrow]);
+ CHECK(resultProxyThrow->IsObject());
+ resultProxyThrow = CompileRun(
+ "var exception;"
+ "try {"
+ "var a = new Proxy({}, { defineProperty: ()=>{throw new Error;} });"
+ "a.__defineSetter__('b', ()=>{});"
+ "} catch (e) { exception = e; }"
+ "exception");
+ CHECK_EQ(0, use_counts[v8::Isolate::kDefineGetterOrSetterWouldThrow]);
+ CHECK(resultProxyThrow->IsObject());
+
+ // __defineGetter__ and __defineSetter__ increment
+ // kDefineGetterOrSetterWouldThrow when they would throw per spec (B.2.2.2)
+ CompileRun(
+ "var a = {};"
+ "Object.defineProperty(a, 'b', { value: 0, configurable: false });"
+ "a.__defineGetter__('b', ()=>{});");
+ CHECK_EQ(1, use_counts[v8::Isolate::kDefineGetterOrSetterWouldThrow]);
+ CompileRun(
+ "var a = {};"
+ "Object.defineProperty(a, 'b', { value: 0, configurable: false });"
+ "a.__defineSetter__('b', ()=>{});");
+ CHECK_EQ(2, use_counts[v8::Isolate::kDefineGetterOrSetterWouldThrow]);
+}
diff --git a/deps/v8/test/cctest/test-utils-arm64.cc b/deps/v8/test/cctest/test-utils-arm64.cc
index bd1ff998b9..938328925e 100644
--- a/deps/v8/test/cctest/test-utils-arm64.cc
+++ b/deps/v8/test/cctest/test-utils-arm64.cc
@@ -32,7 +32,8 @@
#include "test/cctest/cctest.h"
#include "test/cctest/test-utils-arm64.h"
-using namespace v8::internal;
+namespace v8 {
+namespace internal {
#define __ masm->
@@ -423,3 +424,6 @@ void RegisterDump::Dump(MacroAssembler* masm) {
completed_ = true;
}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-utils-arm64.h b/deps/v8/test/cctest/test-utils-arm64.h
index 3ecae23d4b..e34365d4c3 100644
--- a/deps/v8/test/cctest/test-utils-arm64.h
+++ b/deps/v8/test/cctest/test-utils-arm64.h
@@ -36,7 +36,8 @@
#include "src/macro-assembler.h"
-using namespace v8::internal;
+namespace v8 {
+namespace internal {
// RegisterDump: Object allowing integer, floating point and flags registers
@@ -230,4 +231,7 @@ void ClobberFP(MacroAssembler* masm, RegList reg_list,
// Clobber or ClobberFP functions.
void Clobber(MacroAssembler* masm, CPURegList reg_list);
+} // namespace internal
+} // namespace v8
+
#endif // V8_ARM64_TEST_UTILS_ARM64_H_
diff --git a/deps/v8/test/cctest/test-weakmaps.cc b/deps/v8/test/cctest/test-weakmaps.cc
index 781ad1f69f..2d0e620d7d 100644
--- a/deps/v8/test/cctest/test-weakmaps.cc
+++ b/deps/v8/test/cctest/test-weakmaps.cc
@@ -31,7 +31,7 @@
#include "src/global-handles.h"
#include "test/cctest/cctest.h"
-#include "test/cctest/heap/utils-inl.h"
+#include "test/cctest/heap/heap-utils.h"
using namespace v8::internal;
@@ -52,8 +52,7 @@ static Handle<JSWeakMap> AllocateJSWeakMap(Isolate* isolate) {
}
static int NumberOfWeakCalls = 0;
-static void WeakPointerCallback(
- const v8::WeakCallbackData<v8::Value, void>& data) {
+static void WeakPointerCallback(const v8::WeakCallbackInfo<void>& data) {
std::pair<v8::Persistent<v8::Value>*, int>* p =
reinterpret_cast<std::pair<v8::Persistent<v8::Value>*, int>*>(
data.GetParameter());
@@ -107,20 +106,12 @@ TEST(Weakness) {
{
HandleScope scope(isolate);
std::pair<Handle<Object>*, int> handle_and_id(&key, 1234);
- GlobalHandles::MakeWeak(key.location(),
- reinterpret_cast<void*>(&handle_and_id),
- &WeakPointerCallback);
+ GlobalHandles::MakeWeak(
+ key.location(), reinterpret_cast<void*>(&handle_and_id),
+ &WeakPointerCallback, v8::WeakCallbackType::kParameter);
}
CHECK(global_handles->IsWeak(key.location()));
- // Force a full GC.
- // Perform two consecutive GCs because the first one will only clear
- // weak references whereas the second one will also clear weak maps.
- heap->CollectAllGarbage(false);
- CHECK_EQ(1, NumberOfWeakCalls);
- CHECK_EQ(2, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
- CHECK_EQ(
- 0, ObjectHashTable::cast(weakmap->table())->NumberOfDeletedElements());
heap->CollectAllGarbage(false);
CHECK_EQ(1, NumberOfWeakCalls);
CHECK_EQ(0, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
@@ -186,7 +177,7 @@ TEST(Regress2060a) {
// Start second old-space page so that values land on evacuation candidate.
Page* first_page = heap->old_space()->anchor()->next_page();
- SimulateFullSpace(heap->old_space());
+ heap::SimulateFullSpace(heap->old_space());
// Fill up weak map with values on an evacuation candidate.
{
@@ -225,7 +216,7 @@ TEST(Regress2060b) {
// Start second old-space page so that keys land on evacuation candidate.
Page* first_page = heap->old_space()->anchor()->next_page();
- SimulateFullSpace(heap->old_space());
+ heap::SimulateFullSpace(heap->old_space());
// Fill up weak map with keys on an evacuation candidate.
Handle<JSObject> keys[32];
@@ -258,7 +249,7 @@ TEST(Regress399527) {
{
HandleScope scope(isolate);
AllocateJSWeakMap(isolate);
- SimulateIncrementalMarking(heap);
+ heap::SimulateIncrementalMarking(heap);
}
// The weak map is marked black here but leaving the handle scope will make
// the object unreachable. Aborting incremental marking will clear all the
diff --git a/deps/v8/test/cctest/test-weaksets.cc b/deps/v8/test/cctest/test-weaksets.cc
index 643bb48ab1..ec6945aec7 100644
--- a/deps/v8/test/cctest/test-weaksets.cc
+++ b/deps/v8/test/cctest/test-weaksets.cc
@@ -31,7 +31,7 @@
#include "src/global-handles.h"
#include "test/cctest/cctest.h"
-#include "test/cctest/heap/utils-inl.h"
+#include "test/cctest/heap/heap-utils.h"
using namespace v8::internal;
@@ -55,8 +55,7 @@ static Handle<JSWeakSet> AllocateJSWeakSet(Isolate* isolate) {
}
static int NumberOfWeakCalls = 0;
-static void WeakPointerCallback(
- const v8::WeakCallbackData<v8::Value, void>& data) {
+static void WeakPointerCallback(const v8::WeakCallbackInfo<void>& data) {
std::pair<v8::Persistent<v8::Value>*, int>* p =
reinterpret_cast<std::pair<v8::Persistent<v8::Value>*, int>*>(
data.GetParameter());
@@ -106,20 +105,12 @@ TEST(WeakSet_Weakness) {
{
HandleScope scope(isolate);
std::pair<Handle<Object>*, int> handle_and_id(&key, 1234);
- GlobalHandles::MakeWeak(key.location(),
- reinterpret_cast<void*>(&handle_and_id),
- &WeakPointerCallback);
+ GlobalHandles::MakeWeak(
+ key.location(), reinterpret_cast<void*>(&handle_and_id),
+ &WeakPointerCallback, v8::WeakCallbackType::kParameter);
}
CHECK(global_handles->IsWeak(key.location()));
- // Force a full GC.
- // Perform two consecutive GCs because the first one will only clear
- // weak references whereas the second one will also clear weak sets.
- heap->CollectAllGarbage(false);
- CHECK_EQ(1, NumberOfWeakCalls);
- CHECK_EQ(1, ObjectHashTable::cast(weakset->table())->NumberOfElements());
- CHECK_EQ(
- 0, ObjectHashTable::cast(weakset->table())->NumberOfDeletedElements());
heap->CollectAllGarbage(false);
CHECK_EQ(1, NumberOfWeakCalls);
CHECK_EQ(0, ObjectHashTable::cast(weakset->table())->NumberOfElements());
@@ -185,7 +176,7 @@ TEST(WeakSet_Regress2060a) {
// Start second old-space page so that values land on evacuation candidate.
Page* first_page = heap->old_space()->anchor()->next_page();
- SimulateFullSpace(heap->old_space());
+ heap::SimulateFullSpace(heap->old_space());
// Fill up weak set with values on an evacuation candidate.
{
@@ -224,7 +215,7 @@ TEST(WeakSet_Regress2060b) {
// Start second old-space page so that keys land on evacuation candidate.
Page* first_page = heap->old_space()->anchor()->next_page();
- SimulateFullSpace(heap->old_space());
+ heap::SimulateFullSpace(heap->old_space());
// Fill up weak set with keys on an evacuation candidate.
Handle<JSObject> keys[32];
diff --git a/deps/v8/test/cctest/trace-extension.cc b/deps/v8/test/cctest/trace-extension.cc
index 77a29e990b..f0cc3cc2cc 100644
--- a/deps/v8/test/cctest/trace-extension.cc
+++ b/deps/v8/test/cctest/trace-extension.cc
@@ -27,7 +27,7 @@
#include "test/cctest/trace-extension.h"
-#include "src/profiler/sampler.h"
+#include "include/v8-profiler.h"
#include "src/vm-state-inl.h"
#include "test/cctest/cctest.h"
@@ -86,17 +86,13 @@ Address TraceExtension::GetFP(const v8::FunctionCallbackInfo<v8::Value>& args) {
#else
#error Host architecture is neither 32-bit nor 64-bit.
#endif
- printf("Trace: %p\n", fp);
+ printf("Trace: %p\n", static_cast<void*>(fp));
return fp;
}
+static struct { v8::TickSample* sample; } trace_env = {nullptr};
-static struct {
- TickSample* sample;
-} trace_env = { NULL };
-
-
-void TraceExtension::InitTraceEnv(TickSample* sample) {
+void TraceExtension::InitTraceEnv(v8::TickSample* sample) {
trace_env.sample = sample;
}
@@ -107,8 +103,8 @@ void TraceExtension::DoTrace(Address fp) {
// sp is only used to define stack high bound
regs.sp =
reinterpret_cast<Address>(trace_env.sample) - 10240;
- trace_env.sample->Init(CcTest::i_isolate(), regs,
- TickSample::kSkipCEntryFrame, true);
+ trace_env.sample->Init(CcTest::isolate(), regs,
+ v8::TickSample::kSkipCEntryFrame, true);
}
diff --git a/deps/v8/test/cctest/trace-extension.h b/deps/v8/test/cctest/trace-extension.h
index 53e178b3ba..b20c4ae9df 100644
--- a/deps/v8/test/cctest/trace-extension.h
+++ b/deps/v8/test/cctest/trace-extension.h
@@ -31,9 +31,8 @@
#include "src/v8.h"
namespace v8 {
-namespace internal {
-
struct TickSample;
+namespace internal {
class TraceExtension : public v8::Extension {
public:
@@ -45,7 +44,7 @@ class TraceExtension : public v8::Extension {
static void JSEntrySP(const v8::FunctionCallbackInfo<v8::Value>& args);
static void JSEntrySPLevel2(const v8::FunctionCallbackInfo<v8::Value>& args);
static Address GetJsEntrySp();
- static void InitTraceEnv(TickSample* sample);
+ static void InitTraceEnv(v8::TickSample* sample);
static void DoTrace(Address fp);
private:
static Address GetFP(const v8::FunctionCallbackInfo<v8::Value>& args);
diff --git a/deps/v8/test/cctest/types-fuzz.h b/deps/v8/test/cctest/types-fuzz.h
index 79e460856c..7bf9700b40 100644
--- a/deps/v8/test/cctest/types-fuzz.h
+++ b/deps/v8/test/cctest/types-fuzz.h
@@ -208,8 +208,6 @@ class Types {
Type* Representation(Type* t) { return Type::Representation(t, zone_); }
- // Type* Semantic(Type* t) { return Intersect(t,
- // MaskSemanticForTesting); }
Type* Semantic(Type* t) { return Type::Semantic(t, zone_); }
Type* Random() {
diff --git a/deps/v8/test/cctest/wasm/OWNERS b/deps/v8/test/cctest/wasm/OWNERS
index c2abc8a6ad..eda8deabfd 100644
--- a/deps/v8/test/cctest/wasm/OWNERS
+++ b/deps/v8/test/cctest/wasm/OWNERS
@@ -1,3 +1,5 @@
-titzer@chromium.org
-bradnelson@chromium.org
ahaas@chromium.org
+bradnelson@chromium.org
+mtrofin@chromium.org
+rossberg@chromium.org
+titzer@chromium.org
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
index 784f21a050..a978bdf1f7 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
@@ -14,6 +14,13 @@
#include "test/cctest/wasm/test-signatures.h"
#include "test/cctest/wasm/wasm-run-utils.h"
+// If the target architecture is 64-bit, enable all tests.
+#if !V8_TARGET_ARCH_32_BIT || V8_TARGET_ARCH_X64
+#define WASM_64 1
+#else
+#define WASM_64 0
+#endif
+
#define CHECK_TRAP32(x) \
CHECK_EQ(0xdeadbeef, (bit_cast<uint32_t>(x)) & 0xFFFFFFFF)
#define CHECK_TRAP64(x) \
@@ -24,20 +31,14 @@
#define asu64(x) static_cast<uint64_t>(x)
-#define B2(a, b) kExprBlock, 2, a, b
-#define B1(a) kExprBlock, 1, a
+#define B2(a, b) kExprBlock, a, b, kExprEnd
+#define B1(a) kExprBlock, a, kExprEnd
// Can't bridge macro land with nested macros.
-#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_X87
-#define MIPS_OR_X87 true
+#if V8_TARGET_ARCH_MIPS
+#define MIPS true
#else
-#define MIPS_OR_X87 false
-#endif
-
-#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_X87 || V8_TARGET_ARCH_ARM
-#define MIPS_OR_ARM_OR_X87 true
-#else
-#define MIPS_OR_ARM_OR_X87 false
+#define MIPS false
#endif
#define FOREACH_I64_OPERATOR(V) \
@@ -47,9 +48,9 @@
V(I64Return, true) \
V(I64Param, true) \
V(I64LoadStore, true) \
- V(I64Add, !MIPS_OR_X87) \
- V(I64Sub, !MIPS_OR_X87) \
- V(I64Mul, !MIPS_OR_X87) \
+ V(I64Add, true) \
+ V(I64Sub, true) \
+ V(I64Mul, !MIPS) \
V(I64DivS, true) \
V(I64DivU, true) \
V(I64RemS, true) \
@@ -57,9 +58,9 @@
V(I64And, true) \
V(I64Ior, true) \
V(I64Xor, true) \
- V(I64Shl, !MIPS_OR_X87) \
- V(I64ShrU, !MIPS_OR_X87) \
- V(I64ShrS, !MIPS_OR_X87) \
+ V(I64Shl, true) \
+ V(I64ShrU, true) \
+ V(I64ShrS, true) \
V(I64Eq, true) \
V(I64Ne, true) \
V(I64LtS, true) \
@@ -72,7 +73,7 @@
V(I64GeU, true) \
V(I64Ctz, true) \
V(I64Clz, true) \
- V(I64Popcnt, !MIPS_OR_X87) \
+ V(I64Popcnt, true) \
V(I32ConvertI64, true) \
V(I64SConvertF32, true) \
V(I64SConvertF64, true) \
@@ -96,20 +97,20 @@ FOREACH_I64_OPERATOR(DECLARE_CONST)
#define REQUIRE(name) \
if (!WASM_64 && !kSupported_##name) return
-TEST(Run_Wasm_I64Const) {
+WASM_EXEC_TEST(I64Const) {
REQUIRE(I64Const);
- WasmRunner<int64_t> r;
+ WasmRunner<int64_t> r(execution_mode);
const int64_t kExpectedValue = 0x1122334455667788LL;
// return(kExpectedValue)
BUILD(r, WASM_I64V_9(kExpectedValue));
CHECK_EQ(kExpectedValue, r.Call());
}
-TEST(Run_Wasm_I64Const_many) {
+WASM_EXEC_TEST(I64Const_many) {
REQUIRE(I64Const);
int cntr = 0;
FOR_INT32_INPUTS(i) {
- WasmRunner<int64_t> r;
+ WasmRunner<int64_t> r(execution_mode);
const int64_t kExpectedValue = (static_cast<int64_t>(*i) << 32) | cntr;
// return(kExpectedValue)
BUILD(r, WASM_I64V(kExpectedValue));
@@ -118,41 +119,39 @@ TEST(Run_Wasm_I64Const_many) {
}
}
-TEST(Run_Wasm_Return_I64) {
+WASM_EXEC_TEST(Return_I64) {
REQUIRE(I64Return);
- WasmRunner<int64_t> r(MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64());
- BUILD(r, WASM_RETURN(WASM_GET_LOCAL(0)));
+ BUILD(r, WASM_RETURN1(WASM_GET_LOCAL(0)));
FOR_INT64_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
}
-// todo(ahaas): I added a list of missing instructions here to make merging
-// easier when I do them one by one.
-// kExprI64Add:
-TEST(Run_WasmI64Add) {
+WASM_EXEC_TEST(I64Add) {
REQUIRE(I64Add);
- WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
BUILD(r, WASM_I64_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ(*i + *j, r.Call(*i, *j)); }
}
}
-// kExprI64Sub:
-TEST(Run_Wasm_I64Sub) {
+
+WASM_EXEC_TEST(I64Sub) {
REQUIRE(I64Sub);
- WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
BUILD(r, WASM_I64_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ(*i - *j, r.Call(*i, *j)); }
}
}
-// kExprI64Mul:
-// kExprI64DivS:
-TEST(Run_WasmI64DivS) {
+WASM_EXEC_TEST(I64DivS) {
REQUIRE(I64DivS);
- WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
BUILD(r, WASM_I64_DIVS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
@@ -167,9 +166,10 @@ TEST(Run_WasmI64DivS) {
}
}
-TEST(Run_WasmI64DivS_Trap) {
+WASM_EXEC_TEST(I64DivS_Trap) {
REQUIRE(I64DivS);
- WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
BUILD(r, WASM_I64_DIVS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
CHECK_EQ(0, r.Call(asi64(0), asi64(100)));
CHECK_TRAP64(r.Call(asi64(100), asi64(0)));
@@ -178,10 +178,10 @@ TEST(Run_WasmI64DivS_Trap) {
CHECK_TRAP64(r.Call(std::numeric_limits<int64_t>::min(), asi64(0)));
}
-TEST(Run_WasmI64DivS_Byzero_Const) {
+WASM_EXEC_TEST(I64DivS_Byzero_Const) {
REQUIRE(I64DivS);
for (int8_t denom = -2; denom < 8; denom++) {
- WasmRunner<int64_t> r(MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64());
BUILD(r, WASM_I64_DIVS(WASM_GET_LOCAL(0), WASM_I64V_1(denom)));
for (int64_t val = -7; val < 8; val++) {
if (denom == 0) {
@@ -192,11 +192,11 @@ TEST(Run_WasmI64DivS_Byzero_Const) {
}
}
}
-// kExprI64DivU:
-TEST(Run_WasmI64DivU) {
+WASM_EXEC_TEST(I64DivU) {
REQUIRE(I64DivU);
- WasmRunner<uint64_t> r(MachineType::Uint64(), MachineType::Uint64());
+ WasmRunner<uint64_t> r(execution_mode, MachineType::Uint64(),
+ MachineType::Uint64());
BUILD(r, WASM_I64_DIVU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
@@ -209,9 +209,10 @@ TEST(Run_WasmI64DivU) {
}
}
-TEST(Run_WasmI64DivU_Trap) {
+WASM_EXEC_TEST(I64DivU_Trap) {
REQUIRE(I64DivU);
- WasmRunner<uint64_t> r(MachineType::Uint64(), MachineType::Uint64());
+ WasmRunner<uint64_t> r(execution_mode, MachineType::Uint64(),
+ MachineType::Uint64());
BUILD(r, WASM_I64_DIVU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
CHECK_EQ(0, r.Call(asu64(0), asu64(100)));
CHECK_TRAP64(r.Call(asu64(100), asu64(0)));
@@ -219,10 +220,10 @@ TEST(Run_WasmI64DivU_Trap) {
CHECK_TRAP64(r.Call(std::numeric_limits<uint64_t>::max(), asu64(0)));
}
-TEST(Run_WasmI64DivU_Byzero_Const) {
+WASM_EXEC_TEST(I64DivU_Byzero_Const) {
REQUIRE(I64DivU);
for (uint64_t denom = 0xfffffffffffffffe; denom < 8; denom++) {
- WasmRunner<uint64_t> r(MachineType::Uint64());
+ WasmRunner<uint64_t> r(execution_mode, MachineType::Uint64());
BUILD(r, WASM_I64_DIVU(WASM_GET_LOCAL(0), WASM_I64V_1(denom)));
for (uint64_t val = 0xfffffffffffffff0; val < 8; val++) {
@@ -234,10 +235,11 @@ TEST(Run_WasmI64DivU_Byzero_Const) {
}
}
}
-// kExprI64RemS:
-TEST(Run_WasmI64RemS) {
+
+WASM_EXEC_TEST(I64RemS) {
REQUIRE(I64RemS);
- WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
BUILD(r, WASM_I64_REMS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) {
@@ -250,9 +252,10 @@ TEST(Run_WasmI64RemS) {
}
}
-TEST(Run_WasmI64RemS_Trap) {
+WASM_EXEC_TEST(I64RemS_Trap) {
REQUIRE(I64RemS);
- WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
BUILD(r, WASM_I64_REMS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
CHECK_EQ(33, r.Call(asi64(133), asi64(100)));
CHECK_EQ(0, r.Call(std::numeric_limits<int64_t>::min(), asi64(-1)));
@@ -261,10 +264,10 @@ TEST(Run_WasmI64RemS_Trap) {
CHECK_TRAP64(r.Call(std::numeric_limits<int64_t>::min(), asi64(0)));
}
-// kExprI64RemU:
-TEST(Run_WasmI64RemU) {
+WASM_EXEC_TEST(I64RemU) {
REQUIRE(I64RemU);
- WasmRunner<uint64_t> r(MachineType::Uint64(), MachineType::Uint64());
+ WasmRunner<uint64_t> r(execution_mode, MachineType::Uint64(),
+ MachineType::Uint64());
BUILD(r, WASM_I64_REMU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) {
@@ -277,9 +280,10 @@ TEST(Run_WasmI64RemU) {
}
}
-TEST(Run_Wasm_I64RemU_Trap) {
+WASM_EXEC_TEST(I64RemU_Trap) {
REQUIRE(I64RemU);
- WasmRunner<uint64_t> r(MachineType::Uint64(), MachineType::Uint64());
+ WasmRunner<uint64_t> r(execution_mode, MachineType::Uint64(),
+ MachineType::Uint64());
BUILD(r, WASM_I64_REMU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
CHECK_EQ(17, r.Call(asu64(217), asu64(100)));
CHECK_TRAP64(r.Call(asu64(100), asu64(0)));
@@ -287,38 +291,41 @@ TEST(Run_Wasm_I64RemU_Trap) {
CHECK_TRAP64(r.Call(std::numeric_limits<uint64_t>::max(), asu64(0)));
}
-// kExprI64And:
-TEST(Run_Wasm_I64And) {
+WASM_EXEC_TEST(I64And) {
REQUIRE(I64And);
- WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
BUILD(r, WASM_I64_AND(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ((*i) & (*j), r.Call(*i, *j)); }
}
}
-// kExprI64Ior:
-TEST(Run_Wasm_I64Ior) {
+
+WASM_EXEC_TEST(I64Ior) {
REQUIRE(I64Ior);
- WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
BUILD(r, WASM_I64_IOR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ((*i) | (*j), r.Call(*i, *j)); }
}
}
-// kExprI64Xor:
-TEST(Run_Wasm_I64Xor) {
+
+WASM_EXEC_TEST(I64Xor) {
REQUIRE(I64Xor);
- WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
BUILD(r, WASM_I64_XOR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ((*i) ^ (*j), r.Call(*i, *j)); }
}
}
-// kExprI64Shl:
-TEST(Run_Wasm_I64Shl) {
+
+WASM_EXEC_TEST(I64Shl) {
REQUIRE(I64Shl);
{
- WasmRunner<uint64_t> r(MachineType::Uint64(), MachineType::Uint64());
+ WasmRunner<uint64_t> r(execution_mode, MachineType::Uint64(),
+ MachineType::Uint64());
BUILD(r, WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
@@ -329,31 +336,32 @@ TEST(Run_Wasm_I64Shl) {
}
}
{
- WasmRunner<int64_t> r(MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64());
BUILD(r, WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_I64V_1(0)));
FOR_UINT64_INPUTS(i) { CHECK_EQ(*i << 0, r.Call(*i)); }
}
{
- WasmRunner<int64_t> r(MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64());
BUILD(r, WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_I64V_1(32)));
FOR_UINT64_INPUTS(i) { CHECK_EQ(*i << 32, r.Call(*i)); }
}
{
- WasmRunner<int64_t> r(MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64());
BUILD(r, WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_I64V_1(20)));
FOR_UINT64_INPUTS(i) { CHECK_EQ(*i << 20, r.Call(*i)); }
}
{
- WasmRunner<int64_t> r(MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64());
BUILD(r, WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_I64V_1(40)));
FOR_UINT64_INPUTS(i) { CHECK_EQ(*i << 40, r.Call(*i)); }
}
}
-// kExprI64ShrU:
-TEST(Run_Wasm_I64ShrU) {
+
+WASM_EXEC_TEST(I64ShrU) {
REQUIRE(I64ShrU);
{
- WasmRunner<uint64_t> r(MachineType::Uint64(), MachineType::Uint64());
+ WasmRunner<uint64_t> r(execution_mode, MachineType::Uint64(),
+ MachineType::Uint64());
BUILD(r, WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
@@ -364,31 +372,32 @@ TEST(Run_Wasm_I64ShrU) {
}
}
{
- WasmRunner<int64_t> r(MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64());
BUILD(r, WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_I64V_1(0)));
FOR_UINT64_INPUTS(i) { CHECK_EQ(*i >> 0, r.Call(*i)); }
}
{
- WasmRunner<int64_t> r(MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64());
BUILD(r, WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_I64V_1(32)));
FOR_UINT64_INPUTS(i) { CHECK_EQ(*i >> 32, r.Call(*i)); }
}
{
- WasmRunner<int64_t> r(MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64());
BUILD(r, WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_I64V_1(20)));
FOR_UINT64_INPUTS(i) { CHECK_EQ(*i >> 20, r.Call(*i)); }
}
{
- WasmRunner<int64_t> r(MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64());
BUILD(r, WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_I64V_1(40)));
FOR_UINT64_INPUTS(i) { CHECK_EQ(*i >> 40, r.Call(*i)); }
}
}
-// kExprI64ShrS:
-TEST(Run_Wasm_I64ShrS) {
+
+WASM_EXEC_TEST(I64ShrS) {
REQUIRE(I64ShrS);
{
- WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
BUILD(r, WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
@@ -399,143 +408,151 @@ TEST(Run_Wasm_I64ShrS) {
}
}
{
- WasmRunner<int64_t> r(MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64());
BUILD(r, WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_I64V_1(0)));
FOR_INT64_INPUTS(i) { CHECK_EQ(*i >> 0, r.Call(*i)); }
}
{
- WasmRunner<int64_t> r(MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64());
BUILD(r, WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_I64V_1(32)));
FOR_INT64_INPUTS(i) { CHECK_EQ(*i >> 32, r.Call(*i)); }
}
{
- WasmRunner<int64_t> r(MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64());
BUILD(r, WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_I64V_1(20)));
FOR_INT64_INPUTS(i) { CHECK_EQ(*i >> 20, r.Call(*i)); }
}
{
- WasmRunner<int64_t> r(MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64());
BUILD(r, WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_I64V_1(40)));
FOR_INT64_INPUTS(i) { CHECK_EQ(*i >> 40, r.Call(*i)); }
}
}
-// kExprI64Eq:
-TEST(Run_Wasm_I64Eq) {
+
+WASM_EXEC_TEST(I64Eq) {
REQUIRE(I64Eq);
- WasmRunner<int32_t> r(MachineType::Int64(), MachineType::Int64());
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
BUILD(r, WASM_I64_EQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ(*i == *j ? 1 : 0, r.Call(*i, *j)); }
}
}
-// kExprI64Ne:
-TEST(Run_Wasm_I64Ne) {
+
+WASM_EXEC_TEST(I64Ne) {
REQUIRE(I64Ne);
- WasmRunner<int32_t> r(MachineType::Int64(), MachineType::Int64());
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
BUILD(r, WASM_I64_NE(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ(*i != *j ? 1 : 0, r.Call(*i, *j)); }
}
}
-// kExprI64LtS:
-TEST(Run_Wasm_I64LtS) {
+
+WASM_EXEC_TEST(I64LtS) {
REQUIRE(I64LtS);
- WasmRunner<int32_t> r(MachineType::Int64(), MachineType::Int64());
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
BUILD(r, WASM_I64_LTS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ(*i < *j ? 1 : 0, r.Call(*i, *j)); }
}
}
-TEST(Run_Wasm_I64LeS) {
+
+WASM_EXEC_TEST(I64LeS) {
REQUIRE(I64LeS);
- WasmRunner<int32_t> r(MachineType::Int64(), MachineType::Int64());
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
BUILD(r, WASM_I64_LES(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ(*i <= *j ? 1 : 0, r.Call(*i, *j)); }
}
}
-TEST(Run_Wasm_I64LtU) {
+
+WASM_EXEC_TEST(I64LtU) {
REQUIRE(I64LtU);
- WasmRunner<int32_t> r(MachineType::Int64(), MachineType::Int64());
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
BUILD(r, WASM_I64_LTU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) { CHECK_EQ(*i < *j ? 1 : 0, r.Call(*i, *j)); }
}
}
-TEST(Run_Wasm_I64LeU) {
+
+WASM_EXEC_TEST(I64LeU) {
REQUIRE(I64LeU);
- WasmRunner<int32_t> r(MachineType::Int64(), MachineType::Int64());
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
BUILD(r, WASM_I64_LEU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) { CHECK_EQ(*i <= *j ? 1 : 0, r.Call(*i, *j)); }
}
}
-TEST(Run_Wasm_I64GtS) {
+
+WASM_EXEC_TEST(I64GtS) {
REQUIRE(I64GtS);
- WasmRunner<int32_t> r(MachineType::Int64(), MachineType::Int64());
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
BUILD(r, WASM_I64_GTS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ(*i > *j ? 1 : 0, r.Call(*i, *j)); }
}
}
-TEST(Run_Wasm_I64GeS) {
+
+WASM_EXEC_TEST(I64GeS) {
REQUIRE(I64GeS);
- WasmRunner<int32_t> r(MachineType::Int64(), MachineType::Int64());
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
BUILD(r, WASM_I64_GES(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ(*i >= *j ? 1 : 0, r.Call(*i, *j)); }
}
}
-TEST(Run_Wasm_I64GtU) {
+WASM_EXEC_TEST(I64GtU) {
REQUIRE(I64GtU);
- WasmRunner<int32_t> r(MachineType::Int64(), MachineType::Int64());
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
BUILD(r, WASM_I64_GTU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) { CHECK_EQ(*i > *j ? 1 : 0, r.Call(*i, *j)); }
}
}
-TEST(Run_Wasm_I64GeU) {
+WASM_EXEC_TEST(I64GeU) {
REQUIRE(I64GeU);
- WasmRunner<int32_t> r(MachineType::Int64(), MachineType::Int64());
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
BUILD(r, WASM_I64_GEU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
FOR_UINT64_INPUTS(j) { CHECK_EQ(*i >= *j ? 1 : 0, r.Call(*i, *j)); }
}
}
-// kExprI32ConvertI64:
-TEST(Run_Wasm_I32ConvertI64) {
+
+WASM_EXEC_TEST(I32ConvertI64) {
REQUIRE(I32ConvertI64);
FOR_INT64_INPUTS(i) {
- WasmRunner<int32_t> r;
+ WasmRunner<int32_t> r(execution_mode);
BUILD(r, WASM_I32_CONVERT_I64(WASM_I64V(*i)));
CHECK_EQ(static_cast<int32_t>(*i), r.Call());
}
}
-// kExprI64SConvertI32:
-TEST(Run_Wasm_I64SConvertI32) {
+
+WASM_EXEC_TEST(I64SConvertI32) {
REQUIRE(I64SConvertI32);
- WasmRunner<int64_t> r(MachineType::Int32());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int32());
BUILD(r, WASM_I64_SCONVERT_I32(WASM_GET_LOCAL(0)));
FOR_INT32_INPUTS(i) { CHECK_EQ(static_cast<int64_t>(*i), r.Call(*i)); }
}
-// kExprI64UConvertI32:
-TEST(Run_Wasm_I64UConvertI32) {
+WASM_EXEC_TEST(I64UConvertI32) {
REQUIRE(I64UConvertI32);
- WasmRunner<int64_t> r(MachineType::Uint32());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Uint32());
BUILD(r, WASM_I64_UCONVERT_I32(WASM_GET_LOCAL(0)));
FOR_UINT32_INPUTS(i) { CHECK_EQ(static_cast<uint64_t>(*i), r.Call(*i)); }
}
-// kExprF64ReinterpretI64:
-// kExprI64ReinterpretF64:
-
-// kExprI64Clz:
-// kExprI64Ctz:
-// kExprI64Popcnt:
-TEST(Run_WasmI64Popcnt) {
+WASM_EXEC_TEST(I64Popcnt) {
struct {
int64_t expected;
uint64_t input;
@@ -545,22 +562,21 @@ TEST(Run_WasmI64Popcnt) {
{26, 0x1123456782345678},
{38, 0xffedcba09edcba09}};
- WasmRunner<int64_t> r(MachineType::Uint64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Uint64());
BUILD(r, WASM_I64_POPCNT(WASM_GET_LOCAL(0)));
for (size_t i = 0; i < arraysize(values); i++) {
CHECK_EQ(values[i].expected, r.Call(values[i].input));
}
}
-// kExprF32SConvertI64:
-TEST(Run_WasmF32SConvertI64) {
+WASM_EXEC_TEST(F32SConvertI64) {
REQUIRE(F32SConvertI64);
- WasmRunner<float> r(MachineType::Int64());
+ WasmRunner<float> r(execution_mode, MachineType::Int64());
BUILD(r, WASM_F32_SCONVERT_I64(WASM_GET_LOCAL(0)));
FOR_INT64_INPUTS(i) { CHECK_FLOAT_EQ(static_cast<float>(*i), r.Call(*i)); }
}
-// kExprF32UConvertI64:
-TEST(Run_WasmF32UConvertI64) {
+
+WASM_EXEC_TEST(F32UConvertI64) {
REQUIRE(F32UConvertI64);
struct {
uint64_t input;
@@ -641,21 +657,21 @@ TEST(Run_WasmF32UConvertI64) {
{0x8000008000000001, 0x5f000001},
{0x8000000000000400, 0x5f000000},
{0x8000000000000401, 0x5f000000}};
- WasmRunner<float> r(MachineType::Uint64());
+ WasmRunner<float> r(execution_mode, MachineType::Uint64());
BUILD(r, WASM_F32_UCONVERT_I64(WASM_GET_LOCAL(0)));
for (size_t i = 0; i < arraysize(values); i++) {
CHECK_EQ(bit_cast<float>(values[i].expected), r.Call(values[i].input));
}
}
-// kExprF64SConvertI64:
-TEST(Run_WasmF64SConvertI64) {
+
+WASM_EXEC_TEST(F64SConvertI64) {
REQUIRE(F64SConvertI64);
- WasmRunner<double> r(MachineType::Int64());
+ WasmRunner<double> r(execution_mode, MachineType::Int64());
BUILD(r, WASM_F64_SCONVERT_I64(WASM_GET_LOCAL(0)));
FOR_INT64_INPUTS(i) { CHECK_DOUBLE_EQ(static_cast<double>(*i), r.Call(*i)); }
}
-// kExprF64UConvertI64:
-TEST(Run_Wasm_F64UConvertI64) {
+
+WASM_EXEC_TEST(F64UConvertI64) {
REQUIRE(F64UConvertI64);
struct {
uint64_t input;
@@ -735,16 +751,15 @@ TEST(Run_Wasm_F64UConvertI64) {
{0x8000008000000001, 0x43e0000010000000},
{0x8000000000000400, 0x43e0000000000000},
{0x8000000000000401, 0x43e0000000000001}};
- WasmRunner<double> r(MachineType::Uint64());
+ WasmRunner<double> r(execution_mode, MachineType::Uint64());
BUILD(r, WASM_F64_UCONVERT_I64(WASM_GET_LOCAL(0)));
for (size_t i = 0; i < arraysize(values); i++) {
CHECK_EQ(bit_cast<double>(values[i].expected), r.Call(values[i].input));
}
}
-// kExprI64SConvertF32:
-TEST(Run_Wasm_I64SConvertF32a) {
- WasmRunner<int64_t> r(MachineType::Float32());
+WASM_EXEC_TEST(I64SConvertF32a) {
+ WasmRunner<int64_t> r(execution_mode, MachineType::Float32());
BUILD(r, WASM_I64_SCONVERT_F32(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
@@ -756,9 +771,9 @@ TEST(Run_Wasm_I64SConvertF32a) {
}
}
}
-// kExprI64SConvertF64:
-TEST(Run_Wasm_I64SConvertF64a) {
- WasmRunner<int64_t> r(MachineType::Float64());
+
+WASM_EXEC_TEST(I64SConvertF64a) {
+ WasmRunner<int64_t> r(execution_mode, MachineType::Float64());
BUILD(r, WASM_I64_SCONVERT_F64(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
@@ -771,9 +786,8 @@ TEST(Run_Wasm_I64SConvertF64a) {
}
}
-// kExprI64UConvertF32:
-TEST(Run_Wasm_I64UConvertF32a) {
- WasmRunner<uint64_t> r(MachineType::Float32());
+WASM_EXEC_TEST(I64UConvertF32a) {
+ WasmRunner<uint64_t> r(execution_mode, MachineType::Float32());
BUILD(r, WASM_I64_UCONVERT_F32(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
@@ -786,9 +800,8 @@ TEST(Run_Wasm_I64UConvertF32a) {
}
}
-// kExprI64UConvertF64:
-TEST(Run_Wasm_I64UConvertF64a) {
- WasmRunner<uint64_t> r(MachineType::Float64());
+WASM_EXEC_TEST(I64UConvertF64a) {
+ WasmRunner<uint64_t> r(execution_mode, MachineType::Float64());
BUILD(r, WASM_I64_UCONVERT_F64(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
@@ -801,7 +814,7 @@ TEST(Run_Wasm_I64UConvertF64a) {
}
}
-TEST(Run_WasmCallI64Parameter) {
+WASM_EXEC_TEST(CallI64Parameter) {
// Build the target function.
LocalType param_types[20];
for (int i = 0; i < 20; i++) param_types[i] = kAstI64;
@@ -809,7 +822,7 @@ TEST(Run_WasmCallI64Parameter) {
param_types[4] = kAstI32;
FunctionSig sig(1, 19, param_types);
for (int i = 0; i < 19; i++) {
- TestingModule module;
+ TestingModule module(execution_mode);
WasmFunctionCompiler t(&sig, &module);
if (i == 2 || i == 3) {
continue;
@@ -822,8 +835,8 @@ TEST(Run_WasmCallI64Parameter) {
WasmRunner<int32_t> r(&module);
BUILD(
r,
- WASM_I32_CONVERT_I64(WASM_CALL_FUNCTION(
- index, WASM_I64V_9(0xbcd12340000000b),
+ WASM_I32_CONVERT_I64(WASM_CALL_FUNCTIONN(
+ 19, index, WASM_I64V_9(0xbcd12340000000b),
WASM_I64V_9(0xbcd12340000000c), WASM_I32V_1(0xd),
WASM_I32_CONVERT_I64(WASM_I64V_9(0xbcd12340000000e)),
WASM_I64V_9(0xbcd12340000000f), WASM_I64V_10(0xbcd1234000000010),
@@ -839,43 +852,47 @@ TEST(Run_WasmCallI64Parameter) {
}
}
-void TestI64Binop(WasmOpcode opcode, int64_t expected, int64_t a, int64_t b) {
+void TestI64Binop(WasmExecutionMode execution_mode, WasmOpcode opcode,
+ int64_t expected, int64_t a, int64_t b) {
{
- WasmRunner<int64_t> r;
+ WasmRunner<int64_t> r(execution_mode);
// return K op K
BUILD(r, WASM_BINOP(opcode, WASM_I64V(a), WASM_I64V(b)));
CHECK_EQ(expected, r.Call());
}
{
- WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
// return a op b
BUILD(r, WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
CHECK_EQ(expected, r.Call(a, b));
}
}
-void TestI64Cmp(WasmOpcode opcode, int64_t expected, int64_t a, int64_t b) {
+void TestI64Cmp(WasmExecutionMode execution_mode, WasmOpcode opcode,
+ int64_t expected, int64_t a, int64_t b) {
{
- WasmRunner<int32_t> r;
+ WasmRunner<int32_t> r(execution_mode);
// return K op K
BUILD(r, WASM_BINOP(opcode, WASM_I64V(a), WASM_I64V(b)));
CHECK_EQ(expected, r.Call());
}
{
- WasmRunner<int32_t> r(MachineType::Int64(), MachineType::Int64());
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
// return a op b
BUILD(r, WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
CHECK_EQ(expected, r.Call(a, b));
}
}
-#define TEST_I64_BINOP(name, expected, a, b) \
- do { \
- if (WASM_64 || kSupported_##name) \
- TestI64Binop(kExpr##name, expected, a, b); \
+#define TEST_I64_BINOP(name, expected, a, b) \
+ do { \
+ if (WASM_64 || kSupported_##name) \
+ TestI64Binop(execution_mode, kExpr##name, expected, a, b); \
} while (false)
-TEST(Run_Wasm_I64Binops) {
+WASM_EXEC_TEST(I64Binops) {
TEST_I64_BINOP(I64Add, -5586332274295447011, 0x501b72ebabc26847,
0x625de9793d8f79d6);
TEST_I64_BINOP(I64Sub, 9001903251710731490, 0xf24fe6474640002e,
@@ -907,12 +924,13 @@ TEST(Run_Wasm_I64Binops) {
TEST_I64_BINOP(I64Rol, 8728493013947314237, 0xe07af243ac4d219d, 15);
}
-#define TEST_I64_CMP(name, expected, a, b) \
- do { \
- if (WASM_64 || kSupported_##name) TestI64Cmp(kExpr##name, expected, a, b); \
+#define TEST_I64_CMP(name, expected, a, b) \
+ do { \
+ if (WASM_64 || kSupported_##name) \
+ TestI64Cmp(execution_mode, kExpr##name, expected, a, b); \
} while (false)
-TEST(Run_Wasm_I64Compare) {
+WASM_EXEC_TEST(I64Compare) {
TEST_I64_CMP(I64Eq, 0, 0xB915D8FA494064F0, 0x04D700B2536019A3);
TEST_I64_CMP(I64Ne, 1, 0xC2FAFAAAB0446CDC, 0x52A3328F780C97A3);
TEST_I64_CMP(I64LtS, 0, 0x673636E6306B0578, 0x028EC9ECA78F7227);
@@ -925,7 +943,7 @@ TEST(Run_Wasm_I64Compare) {
TEST_I64_CMP(I64GeU, 0, 0x0886A0C58C7AA224, 0x5DDBE5A81FD7EE47);
}
-TEST(Run_Wasm_I64Clz) {
+WASM_EXEC_TEST(I64Clz) {
REQUIRE(I64Clz);
struct {
int64_t expected;
@@ -964,14 +982,14 @@ TEST(Run_Wasm_I64Clz) {
{62, 0x0000000000000002}, {63, 0x0000000000000001},
{64, 0x0000000000000000}};
- WasmRunner<int64_t> r(MachineType::Uint64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Uint64());
BUILD(r, WASM_I64_CLZ(WASM_GET_LOCAL(0)));
for (size_t i = 0; i < arraysize(values); i++) {
CHECK_EQ(values[i].expected, r.Call(values[i].input));
}
}
-TEST(Run_Wasm_I64Ctz) {
+WASM_EXEC_TEST(I64Ctz) {
REQUIRE(I64Ctz);
struct {
int64_t expected;
@@ -1010,14 +1028,14 @@ TEST(Run_Wasm_I64Ctz) {
{2, 0x000000009afdbc84}, {1, 0x000000009afdbc82},
{0, 0x000000009afdbc81}};
- WasmRunner<int64_t> r(MachineType::Uint64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Uint64());
BUILD(r, WASM_I64_CTZ(WASM_GET_LOCAL(0)));
for (size_t i = 0; i < arraysize(values); i++) {
CHECK_EQ(values[i].expected, r.Call(values[i].input));
}
}
-TEST(Run_Wasm_I64Popcnt) {
+WASM_EXEC_TEST(I64Popcnt2) {
REQUIRE(I64Popcnt);
struct {
int64_t expected;
@@ -1028,7 +1046,7 @@ TEST(Run_Wasm_I64Popcnt) {
{26, 0x1123456782345678},
{38, 0xffedcba09edcba09}};
- WasmRunner<int64_t> r(MachineType::Uint64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Uint64());
BUILD(r, WASM_I64_POPCNT(WASM_GET_LOCAL(0)));
for (size_t i = 0; i < arraysize(values); i++) {
CHECK_EQ(values[i].expected, r.Call(values[i].input));
@@ -1037,29 +1055,30 @@ TEST(Run_Wasm_I64Popcnt) {
// Test the WasmRunner with an Int64 return value and different numbers of
// Int64 parameters.
-TEST(Run_TestI64WasmRunner) {
+WASM_EXEC_TEST(I64WasmRunner) {
REQUIRE(I64Param);
REQUIRE(I64Xor);
- {FOR_INT64_INPUTS(i){WasmRunner<int64_t> r;
+ {FOR_INT64_INPUTS(i){WasmRunner<int64_t> r(execution_mode);
BUILD(r, WASM_I64V(*i));
CHECK_EQ(*i, r.Call());
}
}
{
- WasmRunner<int64_t> r(MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64());
BUILD(r, WASM_GET_LOCAL(0));
FOR_INT64_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
}
{
- WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
BUILD(r, WASM_I64_XOR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT64_INPUTS(i) {
FOR_INT64_INPUTS(j) { CHECK_EQ(*i ^ *j, r.Call(*i, *j)); }
}
}
{
- WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64(),
- MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64(), MachineType::Int64());
BUILD(r, WASM_I64_XOR(WASM_GET_LOCAL(0),
WASM_I64_XOR(WASM_GET_LOCAL(1), WASM_GET_LOCAL(2))));
FOR_INT64_INPUTS(i) {
@@ -1071,8 +1090,9 @@ TEST(Run_TestI64WasmRunner) {
}
}
{
- WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64(),
- MachineType::Int64(), MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64(), MachineType::Int64(),
+ MachineType::Int64());
BUILD(r, WASM_I64_XOR(WASM_GET_LOCAL(0),
WASM_I64_XOR(WASM_GET_LOCAL(1),
WASM_I64_XOR(WASM_GET_LOCAL(2),
@@ -1088,18 +1108,18 @@ TEST(Run_TestI64WasmRunner) {
}
}
-TEST(Run_WasmCall_Int64Sub) {
+WASM_EXEC_TEST(Call_Int64Sub) {
REQUIRE(I64Sub);
// Build the target function.
TestSignatures sigs;
- TestingModule module;
+ TestingModule module(execution_mode);
WasmFunctionCompiler t(sigs.l_ll(), &module);
BUILD(t, WASM_I64_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
uint32_t index = t.CompileAndAdd();
// Build the caller function.
WasmRunner<int64_t> r(&module, MachineType::Int64(), MachineType::Int64());
- BUILD(r, WASM_CALL_FUNCTION(index, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ BUILD(r, WASM_CALL_FUNCTION2(index, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
@@ -1115,23 +1135,27 @@ TEST(Run_WasmCall_Int64Sub) {
}
}
-TEST(Run_Wasm_LoadStoreI64_sx) {
+WASM_EXEC_TEST(LoadStoreI64_sx) {
REQUIRE(I64LoadStore);
REQUIRE(DepthFirst);
byte loads[] = {kExprI64LoadMem8S, kExprI64LoadMem16S, kExprI64LoadMem32S,
kExprI64LoadMem};
for (size_t m = 0; m < arraysize(loads); m++) {
- TestingModule module;
+ TestingModule module(execution_mode);
byte* memory = module.AddMemoryElems<byte>(16);
WasmRunner<int64_t> r(&module);
- byte code[] = {kExprI64StoreMem, ZERO_ALIGNMENT,
- ZERO_OFFSET, // --
- kExprI8Const, 8, // --
- loads[m], ZERO_ALIGNMENT,
- ZERO_OFFSET, // --
- kExprI8Const, 0}; // --
+ byte code[] = {
+ kExprI8Const, 8, // --
+ kExprI8Const, 0, // --
+ loads[m], // --
+ ZERO_ALIGNMENT, // --
+ ZERO_OFFSET, // --
+ kExprI64StoreMem, // --
+ ZERO_ALIGNMENT, // --
+ ZERO_OFFSET // --
+ };
r.Build(code, code + arraysize(code));
@@ -1152,9 +1176,9 @@ TEST(Run_Wasm_LoadStoreI64_sx) {
}
}
-TEST(Run_Wasm_I64SConvertF32b) {
+WASM_EXEC_TEST(I64SConvertF32b) {
REQUIRE(I64SConvertF32);
- WasmRunner<int64_t> r(MachineType::Float32());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Float32());
BUILD(r, WASM_I64_SCONVERT_F32(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
@@ -1167,9 +1191,9 @@ TEST(Run_Wasm_I64SConvertF32b) {
}
}
-TEST(Run_Wasm_I64SConvertF64b) {
+WASM_EXEC_TEST(I64SConvertF64b) {
REQUIRE(I64SConvertF64);
- WasmRunner<int64_t> r(MachineType::Float64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Float64());
BUILD(r, WASM_I64_SCONVERT_F64(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
@@ -1182,9 +1206,9 @@ TEST(Run_Wasm_I64SConvertF64b) {
}
}
-TEST(Run_Wasm_I64UConvertF32b) {
+WASM_EXEC_TEST(I64UConvertF32b) {
REQUIRE(I64UConvertF32);
- WasmRunner<uint64_t> r(MachineType::Float32());
+ WasmRunner<uint64_t> r(execution_mode, MachineType::Float32());
BUILD(r, WASM_I64_UCONVERT_F32(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
@@ -1196,9 +1220,9 @@ TEST(Run_Wasm_I64UConvertF32b) {
}
}
-TEST(Run_Wasm_I64UConvertF64b) {
+WASM_EXEC_TEST(I64UConvertF64b) {
REQUIRE(I64UConvertF64);
- WasmRunner<uint64_t> r(MachineType::Float64());
+ WasmRunner<uint64_t> r(execution_mode, MachineType::Float64());
BUILD(r, WASM_I64_UCONVERT_F64(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
@@ -1210,9 +1234,9 @@ TEST(Run_Wasm_I64UConvertF64b) {
}
}
-TEST(Run_Wasm_I64ReinterpretF64) {
+WASM_EXEC_TEST(I64ReinterpretF64) {
REQUIRE(I64ReinterpretF64);
- TestingModule module;
+ TestingModule module(execution_mode);
int64_t* memory = module.AddMemoryElems<int64_t>(8);
WasmRunner<int64_t> r(&module);
@@ -1221,99 +1245,138 @@ TEST(Run_Wasm_I64ReinterpretF64) {
FOR_INT32_INPUTS(i) {
int64_t expected = static_cast<int64_t>(*i) * 0x300010001;
- memory[0] = expected;
+ module.WriteMemory(&memory[0], expected);
CHECK_EQ(expected, r.Call());
}
}
-TEST(Run_Wasm_F64ReinterpretI64) {
+WASM_EXEC_TEST(F64ReinterpretI64) {
REQUIRE(F64ReinterpretI64);
- TestingModule module;
+ TestingModule module(execution_mode);
int64_t* memory = module.AddMemoryElems<int64_t>(8);
WasmRunner<int64_t> r(&module, MachineType::Int64());
- BUILD(r, WASM_BLOCK(
- 2, WASM_STORE_MEM(MachineType::Float64(), WASM_ZERO,
- WASM_F64_REINTERPRET_I64(WASM_GET_LOCAL(0))),
- WASM_GET_LOCAL(0)));
+ BUILD(r,
+ WASM_BLOCK(WASM_STORE_MEM(MachineType::Float64(), WASM_ZERO,
+ WASM_F64_REINTERPRET_I64(WASM_GET_LOCAL(0))),
+ WASM_GET_LOCAL(0)));
FOR_INT32_INPUTS(i) {
int64_t expected = static_cast<int64_t>(*i) * 0x300010001;
CHECK_EQ(expected, r.Call(expected));
- CHECK_EQ(expected, memory[0]);
+ CHECK_EQ(expected, module.ReadMemory<int64_t>(&memory[0]));
}
}
-TEST(Run_Wasm_LoadMemI64) {
+WASM_EXEC_TEST(LoadMemI64) {
REQUIRE(I64LoadStore);
- TestingModule module;
+ TestingModule module(execution_mode);
int64_t* memory = module.AddMemoryElems<int64_t>(8);
module.RandomizeMemory(1111);
WasmRunner<int64_t> r(&module);
BUILD(r, WASM_LOAD_MEM(MachineType::Int64(), WASM_I8(0)));
- memory[0] = 0xaabbccdd00112233LL;
+ module.WriteMemory<int64_t>(&memory[0], 0xaabbccdd00112233LL);
CHECK_EQ(0xaabbccdd00112233LL, r.Call());
- memory[0] = 0x33aabbccdd001122LL;
+ module.WriteMemory<int64_t>(&memory[0], 0x33aabbccdd001122LL);
CHECK_EQ(0x33aabbccdd001122LL, r.Call());
- memory[0] = 77777777;
+ module.WriteMemory<int64_t>(&memory[0], 77777777);
CHECK_EQ(77777777, r.Call());
}
-TEST(Run_Wasm_MemI64_Sum) {
+WASM_EXEC_TEST(LoadMemI64_alignment) {
+ REQUIRE(I64LoadStore);
+ TestingModule module(execution_mode);
+ int64_t* memory = module.AddMemoryElems<int64_t>(8);
+ for (byte alignment = 0; alignment <= 3; alignment++) {
+ module.RandomizeMemory(1111);
+ WasmRunner<int64_t> r(&module);
+
+ BUILD(r,
+ WASM_LOAD_MEM_ALIGNMENT(MachineType::Int64(), WASM_I8(0), alignment));
+
+ module.WriteMemory<int64_t>(&memory[0], 0xaabbccdd00112233LL);
+ CHECK_EQ(0xaabbccdd00112233LL, r.Call());
+
+ module.WriteMemory<int64_t>(&memory[0], 0x33aabbccdd001122LL);
+ CHECK_EQ(0x33aabbccdd001122LL, r.Call());
+
+ module.WriteMemory<int64_t>(&memory[0], 77777777);
+ CHECK_EQ(77777777, r.Call());
+ }
+}
+
+WASM_EXEC_TEST(MemI64_Sum) {
REQUIRE(I64LoadStore);
REQUIRE(I64Add);
REQUIRE(I64Sub);
REQUIRE(I64Phi);
const int kNumElems = 20;
- TestingModule module;
+ TestingModule module(execution_mode);
uint64_t* memory = module.AddMemoryElems<uint64_t>(kNumElems);
WasmRunner<uint64_t> r(&module, MachineType::Int32());
const byte kSum = r.AllocateLocal(kAstI64);
- BUILD(r, WASM_BLOCK(
- 2, WASM_WHILE(
- WASM_GET_LOCAL(0),
- WASM_BLOCK(
- 2, WASM_SET_LOCAL(
- kSum, WASM_I64_ADD(
- WASM_GET_LOCAL(kSum),
+ BUILD(r,
+ WASM_BLOCK(
+ WASM_WHILE(
+ WASM_GET_LOCAL(0),
+ WASM_BLOCK(
+ WASM_SET_LOCAL(
+ kSum, WASM_I64_ADD(WASM_GET_LOCAL(kSum),
WASM_LOAD_MEM(MachineType::Int64(),
WASM_GET_LOCAL(0)))),
- WASM_SET_LOCAL(
- 0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(8))))),
- WASM_GET_LOCAL(1)));
+ WASM_SET_LOCAL(
+ 0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(8))))),
+ WASM_GET_LOCAL(1)));
// Run 4 trials.
for (int i = 0; i < 3; i++) {
module.RandomizeMemory(i * 33);
uint64_t expected = 0;
for (size_t j = kNumElems - 1; j > 0; j--) {
- expected += memory[j];
+ expected += module.ReadMemory(&memory[j]);
}
uint64_t result = r.Call(8 * (kNumElems - 1));
CHECK_EQ(expected, result);
}
}
-TEST(Run_Wasm_I64Global) {
+WASM_EXEC_TEST(StoreMemI64_alignment) {
+ TestingModule module(execution_mode);
+ int64_t* memory = module.AddMemoryElems<int64_t>(4);
+ const int64_t kWritten = 0x12345678abcd0011ll;
+
+ for (byte i = 0; i <= 3; i++) {
+ WasmRunner<int64_t> r(&module, MachineType::Int64());
+ BUILD(r, WASM_STORE_MEM_ALIGNMENT(MachineType::Int64(), WASM_ZERO, i,
+ WASM_GET_LOCAL(0)));
+ module.RandomizeMemory(1111);
+ module.WriteMemory<int64_t>(&memory[0], 0);
+
+ CHECK_EQ(kWritten, r.Call(kWritten));
+ CHECK_EQ(kWritten, module.ReadMemory(&memory[0]));
+ }
+}
+
+WASM_EXEC_TEST(I64Global) {
REQUIRE(I64LoadStore);
REQUIRE(I64SConvertI32);
REQUIRE(I64And);
REQUIRE(DepthFirst);
- TestingModule module;
- int64_t* global = module.AddGlobal<int64_t>(MachineType::Int64());
+ TestingModule module(execution_mode);
+ int64_t* global = module.AddGlobal<int64_t>(kAstI64);
WasmRunner<int32_t> r(&module, MachineType::Int32());
// global = global + p0
- BUILD(r, B2(WASM_STORE_GLOBAL(
- 0, WASM_I64_AND(WASM_LOAD_GLOBAL(0),
+ BUILD(r, B2(WASM_SET_GLOBAL(
+ 0, WASM_I64_AND(WASM_GET_GLOBAL(0),
WASM_I64_SCONVERT_I32(WASM_GET_LOCAL(0)))),
WASM_ZERO));
- *global = 0xFFFFFFFFFFFFFFFFLL;
+ module.WriteMemory<int64_t>(global, 0xFFFFFFFFFFFFFFFFLL);
for (int i = 9; i < 444444; i += 111111) {
int64_t expected = *global & i;
r.Call(i);
@@ -1321,10 +1384,10 @@ TEST(Run_Wasm_I64Global) {
}
}
-TEST(Run_Wasm_I64Eqz) {
+WASM_EXEC_TEST(I64Eqz) {
REQUIRE(I64Eq);
- WasmRunner<int32_t> r(MachineType::Int64());
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int64());
BUILD(r, WASM_I64_EQZ(WASM_GET_LOCAL(0)));
FOR_INT64_INPUTS(i) {
@@ -1333,9 +1396,10 @@ TEST(Run_Wasm_I64Eqz) {
}
}
-TEST(Run_Wasm_I64Ror) {
+WASM_EXEC_TEST(I64Ror) {
REQUIRE(I64Ror);
- WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
BUILD(r, WASM_I64_ROR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
@@ -1346,9 +1410,10 @@ TEST(Run_Wasm_I64Ror) {
}
}
-TEST(Run_Wasm_I64Rol) {
+WASM_EXEC_TEST(I64Rol) {
REQUIRE(I64Rol);
- WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ WasmRunner<int64_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
BUILD(r, WASM_I64_ROL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT64_INPUTS(i) {
@@ -1358,3 +1423,153 @@ TEST(Run_Wasm_I64Rol) {
}
}
}
+
+WASM_EXEC_TEST(StoreMem_offset_oob_i64) {
+ TestingModule module(execution_mode);
+ byte* memory = module.AddMemoryElems<byte>(32);
+
+ static const MachineType machineTypes[] = {
+ MachineType::Int8(), MachineType::Uint8(), MachineType::Int16(),
+ MachineType::Uint16(), MachineType::Int32(), MachineType::Uint32(),
+ MachineType::Int64(), MachineType::Uint64(), MachineType::Float32(),
+ MachineType::Float64()};
+
+ for (size_t m = 0; m < arraysize(machineTypes); m++) {
+ module.RandomizeMemory(1119 + static_cast<int>(m));
+ WasmRunner<int32_t> r(&module, MachineType::Uint32());
+
+ BUILD(r, WASM_STORE_MEM_OFFSET(machineTypes[m], 8, WASM_GET_LOCAL(0),
+ WASM_LOAD_MEM(machineTypes[m], WASM_ZERO)),
+ WASM_ZERO);
+
+ byte memsize = WasmOpcodes::MemSize(machineTypes[m]);
+ uint32_t boundary = 24 - memsize;
+ CHECK_EQ(0, r.Call(boundary)); // in bounds.
+ CHECK_EQ(0, memcmp(&memory[0], &memory[8 + boundary], memsize));
+
+ for (uint32_t offset = boundary + 1; offset < boundary + 19; offset++) {
+ CHECK_TRAP(r.Call(offset)); // out of bounds.
+ }
+ }
+}
+
+#define ADD_CODE(vec, ...) \
+ do { \
+ byte __buf[] = {__VA_ARGS__}; \
+ for (size_t i = 0; i < sizeof(__buf); i++) vec.push_back(__buf[i]); \
+ } while (false)
+
+static void CompileCallIndirectMany(LocalType param) {
+ // Make sure we don't run out of registers when compiling indirect calls
+ // with many many parameters.
+ TestSignatures sigs;
+ for (byte num_params = 0; num_params < 40; num_params++) {
+ v8::base::AccountingAllocator allocator;
+ Zone zone(&allocator);
+ HandleScope scope(CcTest::InitIsolateOnce());
+ TestingModule module(kExecuteCompiled);
+ FunctionSig* sig = sigs.many(&zone, kAstStmt, param, num_params);
+
+ module.AddSignature(sig);
+ module.AddSignature(sig);
+ module.AddIndirectFunctionTable(nullptr, 0);
+
+ WasmFunctionCompiler t(sig, &module);
+
+ std::vector<byte> code;
+ ADD_CODE(code, kExprI8Const, 0);
+ for (byte p = 0; p < num_params; p++) {
+ ADD_CODE(code, kExprGetLocal, p);
+ }
+ ADD_CODE(code, kExprCallIndirect, static_cast<byte>(num_params), 1);
+
+ t.Build(&code[0], &code[0] + code.size());
+ t.Compile();
+ }
+}
+
+TEST(Compile_Wasm_CallIndirect_Many_i64) { CompileCallIndirectMany(kAstI64); }
+
+static void Run_WasmMixedCall_N(WasmExecutionMode execution_mode, int start) {
+ const int kExpected = 6333;
+ const int kElemSize = 8;
+ TestSignatures sigs;
+
+ static MachineType mixed[] = {
+ MachineType::Int32(), MachineType::Float32(), MachineType::Int64(),
+ MachineType::Float64(), MachineType::Float32(), MachineType::Int64(),
+ MachineType::Int32(), MachineType::Float64(), MachineType::Float32(),
+ MachineType::Float64(), MachineType::Int32(), MachineType::Int64(),
+ MachineType::Int32(), MachineType::Int32()};
+
+ int num_params = static_cast<int>(arraysize(mixed)) - start;
+ for (int which = 0; which < num_params; which++) {
+ v8::base::AccountingAllocator allocator;
+ Zone zone(&allocator);
+ TestingModule module(execution_mode);
+ module.AddMemory(1024);
+ MachineType* memtypes = &mixed[start];
+ MachineType result = memtypes[which];
+
+ // =========================================================================
+ // Build the selector function.
+ // =========================================================================
+ uint32_t index;
+ FunctionSig::Builder b(&zone, 1, num_params);
+ b.AddReturn(WasmOpcodes::LocalTypeFor(result));
+ for (int i = 0; i < num_params; i++) {
+ b.AddParam(WasmOpcodes::LocalTypeFor(memtypes[i]));
+ }
+ WasmFunctionCompiler t(b.Build(), &module);
+ BUILD(t, WASM_GET_LOCAL(which));
+ index = t.CompileAndAdd();
+
+ // =========================================================================
+ // Build the calling function.
+ // =========================================================================
+ WasmRunner<int32_t> r(&module);
+ std::vector<byte> code;
+
+ // Load the offset for the store.
+ ADD_CODE(code, WASM_ZERO);
+
+ // Load the arguments.
+ for (int i = 0; i < num_params; i++) {
+ int offset = (i + 1) * kElemSize;
+ ADD_CODE(code, WASM_LOAD_MEM(memtypes[i], WASM_I8(offset)));
+ }
+
+ // Call the selector function.
+ ADD_CODE(code, kExprCallFunction, static_cast<byte>(num_params),
+ static_cast<byte>(index));
+
+ // Store the result in memory.
+ ADD_CODE(code,
+ static_cast<byte>(WasmOpcodes::LoadStoreOpcodeOf(result, true)),
+ ZERO_ALIGNMENT, ZERO_OFFSET);
+
+ // Return the expected value.
+ ADD_CODE(code, WASM_I32V_2(kExpected));
+
+ r.Build(&code[0], &code[0] + code.size());
+
+ // Run the code.
+ for (int t = 0; t < 10; t++) {
+ module.RandomizeMemory();
+ CHECK_EQ(kExpected, r.Call());
+
+ int size = WasmOpcodes::MemSize(result);
+ for (int i = 0; i < size; i++) {
+ int base = (which + 1) * kElemSize;
+ byte expected = module.raw_mem_at<byte>(base + i);
+ byte result = module.raw_mem_at<byte>(i);
+ CHECK_EQ(expected, result);
+ }
+ }
+ }
+}
+
+WASM_EXEC_TEST(MixedCall_i64_0) { Run_WasmMixedCall_N(execution_mode, 0); }
+WASM_EXEC_TEST(MixedCall_i64_1) { Run_WasmMixedCall_N(execution_mode, 1); }
+WASM_EXEC_TEST(MixedCall_i64_2) { Run_WasmMixedCall_N(execution_mode, 2); }
+WASM_EXEC_TEST(MixedCall_i64_3) { Run_WasmMixedCall_N(execution_mode, 3); }
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc b/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc
new file mode 100644
index 0000000000..4d39dd6ff7
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc
@@ -0,0 +1,297 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "src/base/platform/elapsed-timer.h"
+
+#include "src/wasm/wasm-macro-gen.h"
+
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/value-helper.h"
+#include "test/cctest/wasm/test-signatures.h"
+#include "test/cctest/wasm/wasm-run-utils.h"
+
+using namespace v8::base;
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+using namespace v8::internal::wasm;
+
+// for even shorter tests.
+#define B2(a, b) kExprBlock, a, b, kExprEnd
+#define B1(a) kExprBlock, a, kExprEnd
+#define RET(x) x, kExprReturn, 1
+#define RET_I8(x) kExprI8Const, x, kExprReturn, 1
+
+namespace {
+uint32_t GetMatchingRelocInfoCount(Handle<Code> code, RelocInfo::Mode rmode) {
+ int filter = 1 << rmode;
+ uint32_t ret = 0;
+ for (RelocIterator it(*code, filter); !it.done(); it.next()) {
+ ++ret;
+ }
+ return ret;
+}
+}
+
+WASM_EXEC_TEST(Int32AsmjsDivS) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32(),
+ MachineType::Int32());
+ BUILD(r, WASM_BINOP(kExprI32AsmjsDivS, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ const int32_t kMin = std::numeric_limits<int32_t>::min();
+ CHECK_EQ(0, r.Call(0, 100));
+ CHECK_EQ(0, r.Call(100, 0));
+ CHECK_EQ(0, r.Call(-1001, 0));
+ CHECK_EQ(kMin, r.Call(kMin, -1));
+ CHECK_EQ(0, r.Call(kMin, 0));
+}
+
+WASM_EXEC_TEST(Int32AsmjsRemS) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32(),
+ MachineType::Int32());
+ BUILD(r, WASM_BINOP(kExprI32AsmjsRemS, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ const int32_t kMin = std::numeric_limits<int32_t>::min();
+ CHECK_EQ(33, r.Call(133, 100));
+ CHECK_EQ(0, r.Call(kMin, -1));
+ CHECK_EQ(0, r.Call(100, 0));
+ CHECK_EQ(0, r.Call(-1001, 0));
+ CHECK_EQ(0, r.Call(kMin, 0));
+}
+
+WASM_EXEC_TEST(Int32AsmjsDivU) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32(),
+ MachineType::Int32());
+ BUILD(r, WASM_BINOP(kExprI32AsmjsDivU, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ const int32_t kMin = std::numeric_limits<int32_t>::min();
+ CHECK_EQ(0, r.Call(0, 100));
+ CHECK_EQ(0, r.Call(kMin, -1));
+ CHECK_EQ(0, r.Call(100, 0));
+ CHECK_EQ(0, r.Call(-1001, 0));
+ CHECK_EQ(0, r.Call(kMin, 0));
+}
+
+WASM_EXEC_TEST(Int32AsmjsRemU) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32(),
+ MachineType::Int32());
+ BUILD(r, WASM_BINOP(kExprI32AsmjsRemU, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ const int32_t kMin = std::numeric_limits<int32_t>::min();
+ CHECK_EQ(17, r.Call(217, 100));
+ CHECK_EQ(0, r.Call(100, 0));
+ CHECK_EQ(0, r.Call(-1001, 0));
+ CHECK_EQ(0, r.Call(kMin, 0));
+ CHECK_EQ(kMin, r.Call(kMin, -1));
+}
+
+WASM_EXEC_TEST(I32AsmjsSConvertF32) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Float32());
+ BUILD(r, WASM_UNOP(kExprI32AsmjsSConvertF32, WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT32_INPUTS(i) {
+ int32_t expected = DoubleToInt32(*i);
+ CHECK_EQ(expected, r.Call(*i));
+ }
+}
+
+WASM_EXEC_TEST(I32AsmjsSConvertF64) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Float64());
+ BUILD(r, WASM_UNOP(kExprI32AsmjsSConvertF64, WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT64_INPUTS(i) {
+ int32_t expected = DoubleToInt32(*i);
+ CHECK_EQ(expected, r.Call(*i));
+ }
+}
+
+WASM_EXEC_TEST(I32AsmjsUConvertF32) {
+ WasmRunner<uint32_t> r(execution_mode, MachineType::Float32());
+ BUILD(r, WASM_UNOP(kExprI32AsmjsUConvertF32, WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT32_INPUTS(i) {
+ uint32_t expected = DoubleToUint32(*i);
+ CHECK_EQ(expected, r.Call(*i));
+ }
+}
+
+WASM_EXEC_TEST(I32AsmjsUConvertF64) {
+ WasmRunner<uint32_t> r(execution_mode, MachineType::Float64());
+ BUILD(r, WASM_UNOP(kExprI32AsmjsUConvertF64, WASM_GET_LOCAL(0)));
+
+ FOR_FLOAT64_INPUTS(i) {
+ uint32_t expected = DoubleToUint32(*i);
+ CHECK_EQ(expected, r.Call(*i));
+ }
+}
+
+WASM_EXEC_TEST(LoadMemI32_oob_asm) {
+ TestingModule module(execution_mode);
+ int32_t* memory = module.AddMemoryElems<int32_t>(8);
+ WasmRunner<int32_t> r(&module, MachineType::Uint32());
+ module.RandomizeMemory(1112);
+
+ BUILD(r, WASM_UNOP(kExprI32AsmjsLoadMem, WASM_GET_LOCAL(0)));
+
+ memory[0] = 999999;
+ CHECK_EQ(999999, r.Call(0u));
+ // TODO(titzer): offset 29-31 should also be OOB.
+ for (uint32_t offset = 32; offset < 40; offset++) {
+ CHECK_EQ(0, r.Call(offset));
+ }
+
+ for (uint32_t offset = 0x80000000; offset < 0x80000010; offset++) {
+ CHECK_EQ(0, r.Call(offset));
+ }
+}
+
+WASM_EXEC_TEST(LoadMemF32_oob_asm) {
+ TestingModule module(execution_mode);
+ float* memory = module.AddMemoryElems<float>(8);
+ WasmRunner<float> r(&module, MachineType::Uint32());
+ module.RandomizeMemory(1112);
+
+ BUILD(r, WASM_UNOP(kExprF32AsmjsLoadMem, WASM_GET_LOCAL(0)));
+
+ memory[0] = 9999.5f;
+ CHECK_EQ(9999.5f, r.Call(0u));
+ // TODO(titzer): offset 29-31 should also be OOB.
+ for (uint32_t offset = 32; offset < 40; offset++) {
+ CHECK(std::isnan(r.Call(offset)));
+ }
+
+ for (uint32_t offset = 0x80000000; offset < 0x80000010; offset++) {
+ CHECK(std::isnan(r.Call(offset)));
+ }
+}
+
+WASM_EXEC_TEST(LoadMemF64_oob_asm) {
+ TestingModule module(execution_mode);
+ double* memory = module.AddMemoryElems<double>(8);
+ WasmRunner<double> r(&module, MachineType::Uint32());
+ module.RandomizeMemory(1112);
+
+ BUILD(r, WASM_UNOP(kExprF64AsmjsLoadMem, WASM_GET_LOCAL(0)));
+
+ memory[0] = 9799.5;
+ CHECK_EQ(9799.5, r.Call(0u));
+ memory[1] = 11799.25;
+ CHECK_EQ(11799.25, r.Call(8u));
+ // TODO(titzer): offset 57-63 should also be OOB.
+ for (uint32_t offset = 64; offset < 80; offset++) {
+ CHECK(std::isnan(r.Call(offset)));
+ }
+
+ for (uint32_t offset = 0x80000000; offset < 0x80000010; offset++) {
+ CHECK(std::isnan(r.Call(offset)));
+ }
+}
+
+WASM_EXEC_TEST(StoreMemI32_oob_asm) {
+ TestingModule module(execution_mode);
+ int32_t* memory = module.AddMemoryElems<int32_t>(8);
+ WasmRunner<int32_t> r(&module, MachineType::Uint32(), MachineType::Uint32());
+ module.RandomizeMemory(1112);
+
+ BUILD(r, WASM_BINOP(kExprI32AsmjsStoreMem, WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(1)));
+
+ memory[0] = 7777;
+ CHECK_EQ(999999, r.Call(0u, 999999));
+ CHECK_EQ(999999, memory[0]);
+ // TODO(titzer): offset 29-31 should also be OOB.
+ for (uint32_t offset = 32; offset < 40; offset++) {
+ CHECK_EQ(8888, r.Call(offset, 8888));
+ }
+
+ for (uint32_t offset = 0x10000000; offset < 0xF0000000; offset += 0x1000000) {
+ CHECK_EQ(7777, r.Call(offset, 7777));
+ }
+}
+
+#define FOREACH_INT_CHECKED_LOAD_OP(TEST_BODY) \
+ TEST_BODY(kExprI32AsmjsLoadMem8S) \
+ TEST_BODY(kExprI32AsmjsLoadMem8U) \
+ TEST_BODY(kExprI32AsmjsLoadMem16S) \
+ TEST_BODY(kExprI32AsmjsLoadMem16U) \
+ TEST_BODY(kExprI32AsmjsLoadMem)
+
+#define FOREACH_INT_CHECKED_STORE_OP(TEST_BODY) \
+ TEST_BODY(kExprI32AsmjsStoreMem8) \
+ TEST_BODY(kExprI32AsmjsStoreMem16) \
+ TEST_BODY(kExprI32AsmjsStoreMem)
+
+#define INT_LOAD_TEST(OP_TYPE) \
+ TEST(RunWasm_AsmCheckedRelocInfo##OP_TYPE) { \
+ TestingModule module(kExecuteCompiled); \
+ WasmRunner<int32_t> r(&module, MachineType::Uint32()); \
+ BUILD(r, WASM_UNOP(OP_TYPE, WASM_GET_LOCAL(0))); \
+ CHECK_EQ(1, GetMatchingRelocInfoCount(module.instance->function_code[0], \
+ RelocInfo::WASM_MEMORY_REFERENCE)); \
+ CHECK_NE( \
+ 0, GetMatchingRelocInfoCount(module.instance->function_code[0], \
+ RelocInfo::WASM_MEMORY_SIZE_REFERENCE)); \
+ }
+
+FOREACH_INT_CHECKED_LOAD_OP(INT_LOAD_TEST)
+
+#define INT_STORE_TEST(OP_TYPE) \
+ TEST(RunWasm_AsmCheckedRelocInfo##OP_TYPE) { \
+ TestingModule module(kExecuteCompiled); \
+ WasmRunner<int32_t> r(&module, MachineType::Uint32(), \
+ MachineType::Uint32()); \
+ BUILD(r, WASM_BINOP(OP_TYPE, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))); \
+ CHECK_EQ(1, GetMatchingRelocInfoCount(module.instance->function_code[0], \
+ RelocInfo::WASM_MEMORY_REFERENCE)); \
+ CHECK_NE( \
+ 0, GetMatchingRelocInfoCount(module.instance->function_code[0], \
+ RelocInfo::WASM_MEMORY_SIZE_REFERENCE)); \
+ }
+
+FOREACH_INT_CHECKED_STORE_OP(INT_STORE_TEST)
+
+TEST(RunWasm_AsmCheckedLoadFloat32RelocInfo) {
+ TestingModule module(kExecuteCompiled);
+ WasmRunner<float> r(&module, MachineType::Uint32());
+ BUILD(r, WASM_UNOP(kExprF32AsmjsLoadMem, WASM_GET_LOCAL(0)));
+
+ CHECK_EQ(1, GetMatchingRelocInfoCount(module.instance->function_code[0],
+ RelocInfo::WASM_MEMORY_REFERENCE));
+ CHECK_NE(0, GetMatchingRelocInfoCount(module.instance->function_code[0],
+ RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
+}
+
+TEST(RunWasm_AsmCheckedStoreFloat32RelocInfo) {
+ TestingModule module(kExecuteCompiled);
+ WasmRunner<float> r(&module, MachineType::Uint32(), MachineType::Float32());
+ BUILD(r, WASM_BINOP(kExprF32AsmjsStoreMem, WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(1)));
+
+ CHECK_EQ(1, GetMatchingRelocInfoCount(module.instance->function_code[0],
+ RelocInfo::WASM_MEMORY_REFERENCE));
+ CHECK_NE(0, GetMatchingRelocInfoCount(module.instance->function_code[0],
+ RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
+}
+
+TEST(RunWasm_AsmCheckedLoadFloat64RelocInfo) {
+ TestingModule module(kExecuteCompiled);
+ WasmRunner<double> r(&module, MachineType::Uint32());
+ BUILD(r, WASM_UNOP(kExprF64AsmjsLoadMem, WASM_GET_LOCAL(0)));
+
+ CHECK_EQ(1, GetMatchingRelocInfoCount(module.instance->function_code[0],
+ RelocInfo::WASM_MEMORY_REFERENCE));
+ CHECK_NE(0, GetMatchingRelocInfoCount(module.instance->function_code[0],
+ RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
+}
+
+TEST(RunWasm_AsmCheckedStoreFloat64RelocInfo) {
+ TestingModule module(kExecuteCompiled);
+ WasmRunner<double> r(&module, MachineType::Uint32(), MachineType::Float64());
+ BUILD(r, WASM_BINOP(kExprF64AsmjsStoreMem, WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(1)));
+
+ CHECK_EQ(1, GetMatchingRelocInfoCount(module.instance->function_code[0],
+ RelocInfo::WASM_MEMORY_REFERENCE));
+ CHECK_NE(0, GetMatchingRelocInfoCount(module.instance->function_code[0],
+ RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
+}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
new file mode 100644
index 0000000000..c4e03b50d6
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
@@ -0,0 +1,291 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <memory>
+
+#include "src/wasm/wasm-macro-gen.h"
+
+#include "src/wasm/wasm-interpreter.h"
+
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/value-helper.h"
+#include "test/cctest/wasm/test-signatures.h"
+#include "test/cctest/wasm/wasm-run-utils.h"
+
+using namespace v8::base;
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+using namespace v8::internal::wasm;
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+TEST(Run_WasmInt8Const_i) {
+ WasmRunner<int32_t> r(kExecuteInterpreted);
+ const byte kExpectedValue = 109;
+ // return(kExpectedValue)
+ BUILD(r, WASM_I8(kExpectedValue));
+ CHECK_EQ(kExpectedValue, r.Call());
+}
+
+TEST(Run_WasmIfElse) {
+ WasmRunner<int32_t> r(kExecuteInterpreted, MachineType::Int32());
+ BUILD(r, WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_I8(9), WASM_I8(10)));
+ CHECK_EQ(10, r.Call(0));
+ CHECK_EQ(9, r.Call(1));
+}
+
+TEST(Run_WasmIfReturn) {
+ WasmRunner<int32_t> r(kExecuteInterpreted, MachineType::Int32());
+ BUILD(r, WASM_IF(WASM_GET_LOCAL(0), WASM_RETURN1(WASM_I8(77))), WASM_I8(65));
+ CHECK_EQ(65, r.Call(0));
+ CHECK_EQ(77, r.Call(1));
+}
+
+TEST(Run_WasmNopsN) {
+ const int kMaxNops = 10;
+ byte code[kMaxNops + 2];
+ for (int nops = 0; nops < kMaxNops; nops++) {
+ byte expected = static_cast<byte>(20 + nops);
+ memset(code, kExprNop, sizeof(code));
+ code[nops] = kExprI8Const;
+ code[nops + 1] = expected;
+
+ WasmRunner<int32_t> r(kExecuteInterpreted);
+ r.Build(code, code + nops + 2);
+ CHECK_EQ(expected, r.Call());
+ }
+}
+
+TEST(Run_WasmConstsN) {
+ const int kMaxConsts = 10;
+ byte code[kMaxConsts * 2];
+ for (int count = 1; count < kMaxConsts; count++) {
+ for (int i = 0; i < count; i++) {
+ code[i * 2] = kExprI8Const;
+ code[i * 2 + 1] = static_cast<byte>(count * 10 + i);
+ }
+ byte expected = static_cast<byte>(count * 11 - 1);
+
+ WasmRunner<int32_t> r(kExecuteInterpreted);
+ r.Build(code, code + (count * 2));
+ CHECK_EQ(expected, r.Call());
+ }
+}
+
+TEST(Run_WasmBlocksN) {
+ const int kMaxNops = 10;
+ const int kExtra = 4;
+ byte code[kMaxNops + kExtra];
+ for (int nops = 0; nops < kMaxNops; nops++) {
+ byte expected = static_cast<byte>(30 + nops);
+ memset(code, kExprNop, sizeof(code));
+ code[0] = kExprBlock;
+ code[1 + nops] = kExprI8Const;
+ code[1 + nops + 1] = expected;
+ code[1 + nops + 2] = kExprEnd;
+
+ WasmRunner<int32_t> r(kExecuteInterpreted);
+ r.Build(code, code + nops + kExtra);
+ CHECK_EQ(expected, r.Call());
+ }
+}
+
+TEST(Run_WasmBlockBreakN) {
+ const int kMaxNops = 10;
+ const int kExtra = 6;
+ byte code[kMaxNops + kExtra];
+ for (int nops = 0; nops < kMaxNops; nops++) {
+ // Place the break anywhere within the block.
+ for (int index = 0; index < nops; index++) {
+ memset(code, kExprNop, sizeof(code));
+ code[0] = kExprBlock;
+ code[sizeof(code) - 1] = kExprEnd;
+
+ int expected = nops * 11 + index;
+ code[1 + index + 0] = kExprI8Const;
+ code[1 + index + 1] = static_cast<byte>(expected);
+ code[1 + index + 2] = kExprBr;
+ code[1 + index + 3] = ARITY_1;
+ code[1 + index + 4] = 0;
+
+ WasmRunner<int32_t> r(kExecuteInterpreted);
+ r.Build(code, code + kMaxNops + kExtra);
+ CHECK_EQ(expected, r.Call());
+ }
+ }
+}
+
+TEST(Run_Wasm_nested_ifs_i) {
+ WasmRunner<int32_t> r(kExecuteInterpreted, MachineType::Int32(),
+ MachineType::Int32());
+
+ BUILD(r, WASM_IF_ELSE(
+ WASM_GET_LOCAL(0),
+ WASM_IF_ELSE(WASM_GET_LOCAL(1), WASM_I8(11), WASM_I8(12)),
+ WASM_IF_ELSE(WASM_GET_LOCAL(1), WASM_I8(13), WASM_I8(14))));
+
+ CHECK_EQ(11, r.Call(1, 1));
+ CHECK_EQ(12, r.Call(1, 0));
+ CHECK_EQ(13, r.Call(0, 1));
+ CHECK_EQ(14, r.Call(0, 0));
+}
+
+// Make tests more robust by not hard-coding offsets of various operations.
+// The {Find} method finds the offsets for the given bytecodes, returning
+// the offsets in an array.
+std::unique_ptr<int[]> Find(byte* code, size_t code_size, int n, ...) {
+ va_list vl;
+ va_start(vl, n);
+
+ std::unique_ptr<int[]> offsets(new int[n]);
+
+ for (int i = 0; i < n; i++) {
+ offsets[i] = -1;
+ }
+
+ int pos = 0;
+ WasmOpcode current = static_cast<WasmOpcode>(va_arg(vl, int));
+ for (size_t i = 0; i < code_size; i++) {
+ if (code[i] == current) {
+ offsets[pos++] = static_cast<int>(i);
+ if (pos == n) break;
+ current = static_cast<WasmOpcode>(va_arg(vl, int));
+ }
+ }
+ va_end(vl);
+
+ return offsets;
+}
+
+TEST(Breakpoint_I32Add) {
+ static const int kLocalsDeclSize = 1;
+ static const int kNumBreakpoints = 3;
+ byte code[] = {WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))};
+ std::unique_ptr<int[]> offsets =
+ Find(code, sizeof(code), kNumBreakpoints, kExprGetLocal, kExprGetLocal,
+ kExprI32Add);
+
+ WasmRunner<int32_t> r(kExecuteInterpreted, MachineType::Uint32(),
+ MachineType::Uint32());
+
+ r.Build(code, code + arraysize(code));
+
+ WasmInterpreter* interpreter = r.interpreter();
+ WasmInterpreter::Thread* thread = interpreter->GetThread(0);
+ for (int i = 0; i < kNumBreakpoints; i++) {
+ interpreter->SetBreakpoint(r.function(), kLocalsDeclSize + offsets[i],
+ true);
+ }
+
+ FOR_UINT32_INPUTS(a) {
+ for (uint32_t b = 11; b < 3000000000u; b += 1000000000u) {
+ thread->Reset();
+ WasmVal args[] = {WasmVal(*a), WasmVal(b)};
+ thread->PushFrame(r.function(), args);
+
+ for (int i = 0; i < kNumBreakpoints; i++) {
+ thread->Run(); // run to next breakpoint
+ // Check the thread stopped at the right pc.
+ CHECK_EQ(WasmInterpreter::PAUSED, thread->state());
+ CHECK_EQ(kLocalsDeclSize + offsets[i], thread->GetBreakpointPc());
+ }
+
+ thread->Run(); // run to completion
+
+ // Check the thread finished with the right value.
+ CHECK_EQ(WasmInterpreter::FINISHED, thread->state());
+ uint32_t expected = (*a) + (b);
+ CHECK_EQ(expected, thread->GetReturnValue().to<uint32_t>());
+ }
+ }
+}
+
+TEST(Step_I32Mul) {
+ static const int kTraceLength = 4;
+ byte code[] = {WASM_I32_MUL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))};
+
+ WasmRunner<int32_t> r(kExecuteInterpreted, MachineType::Uint32(),
+ MachineType::Uint32());
+
+ r.Build(code, code + arraysize(code));
+
+ WasmInterpreter* interpreter = r.interpreter();
+ WasmInterpreter::Thread* thread = interpreter->GetThread(0);
+
+ FOR_UINT32_INPUTS(a) {
+ for (uint32_t b = 33; b < 3000000000u; b += 1000000000u) {
+ thread->Reset();
+ WasmVal args[] = {WasmVal(*a), WasmVal(b)};
+ thread->PushFrame(r.function(), args);
+
+ // Run instructions one by one.
+ for (int i = 0; i < kTraceLength - 1; i++) {
+ thread->Step();
+ // Check the thread stopped.
+ CHECK_EQ(WasmInterpreter::PAUSED, thread->state());
+ }
+
+ // Run last instruction.
+ thread->Step();
+
+ // Check the thread finished with the right value.
+ CHECK_EQ(WasmInterpreter::FINISHED, thread->state());
+ uint32_t expected = (*a) * (b);
+ CHECK_EQ(expected, thread->GetReturnValue().to<uint32_t>());
+ }
+ }
+}
+
+TEST(Breakpoint_I32And_disable) {
+ static const int kLocalsDeclSize = 1;
+ static const int kNumBreakpoints = 1;
+ byte code[] = {WASM_I32_AND(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))};
+ std::unique_ptr<int[]> offsets =
+ Find(code, sizeof(code), kNumBreakpoints, kExprI32And);
+
+ WasmRunner<int32_t> r(kExecuteInterpreted, MachineType::Uint32(),
+ MachineType::Uint32());
+
+ r.Build(code, code + arraysize(code));
+
+ WasmInterpreter* interpreter = r.interpreter();
+ WasmInterpreter::Thread* thread = interpreter->GetThread(0);
+
+ FOR_UINT32_INPUTS(a) {
+ for (uint32_t b = 11; b < 3000000000u; b += 1000000000u) {
+ // Run with and without breakpoints.
+ for (int do_break = 0; do_break < 2; do_break++) {
+ interpreter->SetBreakpoint(r.function(), kLocalsDeclSize + offsets[0],
+ do_break);
+ thread->Reset();
+ WasmVal args[] = {WasmVal(*a), WasmVal(b)};
+ thread->PushFrame(r.function(), args);
+
+ if (do_break) {
+ thread->Run(); // run to next breakpoint
+ // Check the thread stopped at the right pc.
+ CHECK_EQ(WasmInterpreter::PAUSED, thread->state());
+ CHECK_EQ(kLocalsDeclSize + offsets[0], thread->GetBreakpointPc());
+ }
+
+ thread->Run(); // run to completion
+
+ // Check the thread finished with the right value.
+ CHECK_EQ(WasmInterpreter::FINISHED, thread->state());
+ uint32_t expected = (*a) & (b);
+ CHECK_EQ(expected, thread->GetReturnValue().to<uint32_t>());
+ }
+ }
+ }
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
index 6d95d6e0fc..9dfba74ecc 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
@@ -25,14 +25,12 @@ using namespace v8::internal::wasm;
r.Build(code, code + arraysize(code)); \
} while (false)
-
#define ADD_CODE(vec, ...) \
do { \
byte __buf[] = {__VA_ARGS__}; \
for (size_t i = 0; i < sizeof(__buf); i++) vec.push_back(__buf[i]); \
} while (false)
-
namespace {
// A helper for generating predictable but unique argument values that
// are easy to debug (e.g. with misaligned stacks).
@@ -48,23 +46,6 @@ class PredictableInputValues {
}
};
-
-uint32_t AddJsFunction(TestingModule* module, FunctionSig* sig,
- const char* source) {
- Handle<JSFunction> jsfunc = Handle<JSFunction>::cast(v8::Utils::OpenHandle(
- *v8::Local<v8::Function>::Cast(CompileRun(source))));
- module->AddFunction(sig, Handle<Code>::null());
- uint32_t index = static_cast<uint32_t>(module->module->functions.size() - 1);
- Isolate* isolate = CcTest::InitIsolateOnce();
- WasmName module_name = {"test", 4};
- WasmName function_name = {nullptr, 0};
- Handle<Code> code = CompileWasmToJSWrapper(isolate, module, jsfunc, sig,
- module_name, function_name);
- module->instance->function_code[index] = code;
- return index;
-}
-
-
uint32_t AddJSSelector(TestingModule* module, FunctionSig* sig, int which) {
const int kMaxParams = 11;
static const char* formals[kMaxParams] = {"",
@@ -86,22 +67,9 @@ uint32_t AddJSSelector(TestingModule* module, FunctionSig* sig, int which) {
SNPrintF(source, "(function(%s) { return %c; })",
formals[sig->parameter_count()], param);
- return AddJsFunction(module, sig, source.start());
-}
-
-
-Handle<JSFunction> WrapCode(ModuleEnv* module, uint32_t index) {
- Isolate* isolate = module->module->shared_isolate;
- // Wrap the code so it can be called as a JS function.
- Handle<String> name = isolate->factory()->NewStringFromStaticChars("main");
- Handle<JSObject> module_object = Handle<JSObject>(0, isolate);
- Handle<Code> code = module->instance->function_code[index];
- WasmJs::InstallWasmFunctionMap(isolate, isolate->native_context());
- return compiler::CompileJSToWasmWrapper(isolate, module, name, code,
- module_object, index);
+ return module->AddJsFunction(sig, source.start());
}
-
void EXPECT_CALL(double expected, Handle<JSFunction> jsfunc,
Handle<Object>* buffer, int count) {
Isolate* isolate = jsfunc->GetIsolate();
@@ -119,7 +87,6 @@ void EXPECT_CALL(double expected, Handle<JSFunction> jsfunc,
}
}
-
void EXPECT_CALL(double expected, Handle<JSFunction> jsfunc, double a,
double b) {
Isolate* isolate = jsfunc->GetIsolate();
@@ -130,72 +97,70 @@ void EXPECT_CALL(double expected, Handle<JSFunction> jsfunc, double a,
} // namespace
TEST(Run_Int32Sub_jswrapped) {
+ CcTest::InitializeVM();
TestSignatures sigs;
TestingModule module;
WasmFunctionCompiler t(sigs.i_ii(), &module);
BUILD(t, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- Handle<JSFunction> jsfunc = WrapCode(&module, t.CompileAndAdd());
+ Handle<JSFunction> jsfunc = module.WrapCode(t.CompileAndAdd());
EXPECT_CALL(33, jsfunc, 44, 11);
EXPECT_CALL(-8723487, jsfunc, -8000000, 723487);
}
-
TEST(Run_Float32Div_jswrapped) {
+ CcTest::InitializeVM();
TestSignatures sigs;
TestingModule module;
WasmFunctionCompiler t(sigs.f_ff(), &module);
BUILD(t, WASM_F32_DIV(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- Handle<JSFunction> jsfunc = WrapCode(&module, t.CompileAndAdd());
+ Handle<JSFunction> jsfunc = module.WrapCode(t.CompileAndAdd());
EXPECT_CALL(92, jsfunc, 46, 0.5);
EXPECT_CALL(64, jsfunc, -16, -0.25);
}
-
TEST(Run_Float64Add_jswrapped) {
+ CcTest::InitializeVM();
TestSignatures sigs;
TestingModule module;
WasmFunctionCompiler t(sigs.d_dd(), &module);
BUILD(t, WASM_F64_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- Handle<JSFunction> jsfunc = WrapCode(&module, t.CompileAndAdd());
+ Handle<JSFunction> jsfunc = module.WrapCode(t.CompileAndAdd());
EXPECT_CALL(3, jsfunc, 2, 1);
EXPECT_CALL(-5.5, jsfunc, -5.25, -0.25);
}
-
TEST(Run_I32Popcount_jswrapped) {
+ CcTest::InitializeVM();
TestSignatures sigs;
TestingModule module;
WasmFunctionCompiler t(sigs.i_i(), &module);
BUILD(t, WASM_I32_POPCNT(WASM_GET_LOCAL(0)));
- Handle<JSFunction> jsfunc = WrapCode(&module, t.CompileAndAdd());
+ Handle<JSFunction> jsfunc = module.WrapCode(t.CompileAndAdd());
EXPECT_CALL(2, jsfunc, 9, 0);
EXPECT_CALL(3, jsfunc, 11, 0);
EXPECT_CALL(6, jsfunc, 0x3F, 0);
-
- USE(AddJsFunction);
}
-
TEST(Run_CallJS_Add_jswrapped) {
+ CcTest::InitializeVM();
TestSignatures sigs;
TestingModule module;
WasmFunctionCompiler t(sigs.i_i(), &module);
uint32_t js_index =
- AddJsFunction(&module, sigs.i_i(), "(function(a) { return a + 99; })");
- BUILD(t, WASM_CALL_FUNCTION(js_index, WASM_GET_LOCAL(0)));
+ module.AddJsFunction(sigs.i_i(), "(function(a) { return a + 99; })");
+ BUILD(t, WASM_CALL_FUNCTION1(js_index, WASM_GET_LOCAL(0)));
- Handle<JSFunction> jsfunc = WrapCode(&module, t.CompileAndAdd());
+ Handle<JSFunction> jsfunc = module.WrapCode(t.CompileAndAdd());
EXPECT_CALL(101, jsfunc, 2, -8);
EXPECT_CALL(199, jsfunc, 100, -1);
EXPECT_CALL(-666666801, jsfunc, -666666900, -1);
}
-
void RunJSSelectTest(int which) {
const int kMaxParams = 8;
PredictableInputValues inputs(0x100);
@@ -212,40 +177,64 @@ void RunJSSelectTest(int which) {
{
std::vector<byte> code;
- ADD_CODE(code, kExprCallFunction, static_cast<byte>(js_index));
for (int i = 0; i < num_params; i++) {
ADD_CODE(code, WASM_F64(inputs.arg_d(i)));
}
+ ADD_CODE(code, kExprCallFunction, static_cast<byte>(num_params),
+ static_cast<byte>(js_index));
+
size_t end = code.size();
code.push_back(0);
t.Build(&code[0], &code[end]);
}
- Handle<JSFunction> jsfunc = WrapCode(&module, t.CompileAndAdd());
+ Handle<JSFunction> jsfunc = module.WrapCode(t.CompileAndAdd());
double expected = inputs.arg_d(which);
EXPECT_CALL(expected, jsfunc, 0.0, 0.0);
}
}
+TEST(Run_JSSelect_0) {
+ CcTest::InitializeVM();
+ RunJSSelectTest(0);
+}
-TEST(Run_JSSelect_0) { RunJSSelectTest(0); }
-
-TEST(Run_JSSelect_1) { RunJSSelectTest(1); }
-
-TEST(Run_JSSelect_2) { RunJSSelectTest(2); }
+TEST(Run_JSSelect_1) {
+ CcTest::InitializeVM();
+ RunJSSelectTest(1);
+}
-TEST(Run_JSSelect_3) { RunJSSelectTest(3); }
+TEST(Run_JSSelect_2) {
+ CcTest::InitializeVM();
+ RunJSSelectTest(2);
+}
-TEST(Run_JSSelect_4) { RunJSSelectTest(4); }
+TEST(Run_JSSelect_3) {
+ CcTest::InitializeVM();
+ RunJSSelectTest(3);
+}
-TEST(Run_JSSelect_5) { RunJSSelectTest(5); }
+TEST(Run_JSSelect_4) {
+ CcTest::InitializeVM();
+ RunJSSelectTest(4);
+}
-TEST(Run_JSSelect_6) { RunJSSelectTest(6); }
+TEST(Run_JSSelect_5) {
+ CcTest::InitializeVM();
+ RunJSSelectTest(5);
+}
-TEST(Run_JSSelect_7) { RunJSSelectTest(7); }
+TEST(Run_JSSelect_6) {
+ CcTest::InitializeVM();
+ RunJSSelectTest(6);
+}
+TEST(Run_JSSelect_7) {
+ CcTest::InitializeVM();
+ RunJSSelectTest(7);
+}
void RunWASMSelectTest(int which) {
PredictableInputValues inputs(0x200);
@@ -260,7 +249,7 @@ void RunWASMSelectTest(int which) {
TestingModule module;
WasmFunctionCompiler t(&sig, &module);
BUILD(t, WASM_GET_LOCAL(which));
- Handle<JSFunction> jsfunc = WrapCode(&module, t.CompileAndAdd());
+ Handle<JSFunction> jsfunc = module.WrapCode(t.CompileAndAdd());
Handle<Object> args[] = {
isolate->factory()->NewNumber(inputs.arg_d(0)),
@@ -278,23 +267,45 @@ void RunWASMSelectTest(int which) {
}
}
+TEST(Run_WASMSelect_0) {
+ CcTest::InitializeVM();
+ RunWASMSelectTest(0);
+}
-TEST(Run_WASMSelect_0) { RunWASMSelectTest(0); }
-
-TEST(Run_WASMSelect_1) { RunWASMSelectTest(1); }
-
-TEST(Run_WASMSelect_2) { RunWASMSelectTest(2); }
+TEST(Run_WASMSelect_1) {
+ CcTest::InitializeVM();
+ RunWASMSelectTest(1);
+}
-TEST(Run_WASMSelect_3) { RunWASMSelectTest(3); }
+TEST(Run_WASMSelect_2) {
+ CcTest::InitializeVM();
+ RunWASMSelectTest(2);
+}
-TEST(Run_WASMSelect_4) { RunWASMSelectTest(4); }
+TEST(Run_WASMSelect_3) {
+ CcTest::InitializeVM();
+ RunWASMSelectTest(3);
+}
-TEST(Run_WASMSelect_5) { RunWASMSelectTest(5); }
+TEST(Run_WASMSelect_4) {
+ CcTest::InitializeVM();
+ RunWASMSelectTest(4);
+}
-TEST(Run_WASMSelect_6) { RunWASMSelectTest(6); }
+TEST(Run_WASMSelect_5) {
+ CcTest::InitializeVM();
+ RunWASMSelectTest(5);
+}
-TEST(Run_WASMSelect_7) { RunWASMSelectTest(7); }
+TEST(Run_WASMSelect_6) {
+ CcTest::InitializeVM();
+ RunWASMSelectTest(6);
+}
+TEST(Run_WASMSelect_7) {
+ CcTest::InitializeVM();
+ RunWASMSelectTest(7);
+}
void RunWASMSelectAlignTest(int num_args, int num_params) {
PredictableInputValues inputs(0x300);
@@ -310,7 +321,7 @@ void RunWASMSelectAlignTest(int num_args, int num_params) {
TestingModule module;
WasmFunctionCompiler t(&sig, &module);
BUILD(t, WASM_GET_LOCAL(which));
- Handle<JSFunction> jsfunc = WrapCode(&module, t.CompileAndAdd());
+ Handle<JSFunction> jsfunc = module.WrapCode(t.CompileAndAdd());
Handle<Object> args[] = {isolate->factory()->NewNumber(inputs.arg_d(0)),
isolate->factory()->NewNumber(inputs.arg_d(1)),
@@ -329,43 +340,45 @@ void RunWASMSelectAlignTest(int num_args, int num_params) {
}
}
-
TEST(Run_WASMSelectAlign_0) {
+ CcTest::InitializeVM();
RunWASMSelectAlignTest(0, 1);
RunWASMSelectAlignTest(0, 2);
}
-
TEST(Run_WASMSelectAlign_1) {
+ CcTest::InitializeVM();
RunWASMSelectAlignTest(1, 2);
RunWASMSelectAlignTest(1, 3);
}
-
TEST(Run_WASMSelectAlign_2) {
+ CcTest::InitializeVM();
RunWASMSelectAlignTest(2, 3);
RunWASMSelectAlignTest(2, 4);
}
-
TEST(Run_WASMSelectAlign_3) {
+ CcTest::InitializeVM();
RunWASMSelectAlignTest(3, 3);
RunWASMSelectAlignTest(3, 4);
}
-
TEST(Run_WASMSelectAlign_4) {
+ CcTest::InitializeVM();
RunWASMSelectAlignTest(4, 3);
RunWASMSelectAlignTest(4, 4);
}
TEST(Run_WASMSelectAlign_7) {
+ CcTest::InitializeVM();
RunWASMSelectAlignTest(7, 5);
RunWASMSelectAlignTest(7, 6);
RunWASMSelectAlignTest(7, 7);
}
TEST(Run_WASMSelectAlign_8) {
+ CcTest::InitializeVM();
RunWASMSelectAlignTest(8, 5);
RunWASMSelectAlignTest(8, 6);
RunWASMSelectAlignTest(8, 7);
@@ -373,6 +386,7 @@ TEST(Run_WASMSelectAlign_8) {
}
TEST(Run_WASMSelectAlign_9) {
+ CcTest::InitializeVM();
RunWASMSelectAlignTest(9, 6);
RunWASMSelectAlignTest(9, 7);
RunWASMSelectAlignTest(9, 8);
@@ -380,6 +394,7 @@ TEST(Run_WASMSelectAlign_9) {
}
TEST(Run_WASMSelectAlign_10) {
+ CcTest::InitializeVM();
RunWASMSelectAlignTest(10, 7);
RunWASMSelectAlignTest(10, 8);
RunWASMSelectAlignTest(10, 9);
@@ -400,12 +415,13 @@ void RunJSSelectAlignTest(int num_args, int num_params) {
// Build the calling code.
std::vector<byte> code;
- ADD_CODE(code, kExprCallFunction, 0);
for (int i = 0; i < num_params; i++) {
ADD_CODE(code, WASM_GET_LOCAL(i));
}
+ ADD_CODE(code, kExprCallFunction, static_cast<byte>(num_params), 0);
+
size_t end = code.size();
code.push_back(0);
@@ -418,7 +434,7 @@ void RunJSSelectAlignTest(int num_args, int num_params) {
WasmFunctionCompiler t(&sig, &module);
t.Build(&code[0], &code[end]);
- Handle<JSFunction> jsfunc = WrapCode(&module, t.CompileAndAdd());
+ Handle<JSFunction> jsfunc = module.WrapCode(t.CompileAndAdd());
Handle<Object> args[] = {
factory->NewNumber(inputs.arg_d(0)),
@@ -439,33 +455,38 @@ void RunJSSelectAlignTest(int num_args, int num_params) {
}
}
-
TEST(Run_JSSelectAlign_0) {
+ CcTest::InitializeVM();
RunJSSelectAlignTest(0, 1);
RunJSSelectAlignTest(0, 2);
}
TEST(Run_JSSelectAlign_1) {
+ CcTest::InitializeVM();
RunJSSelectAlignTest(1, 2);
RunJSSelectAlignTest(1, 3);
}
TEST(Run_JSSelectAlign_2) {
+ CcTest::InitializeVM();
RunJSSelectAlignTest(2, 3);
RunJSSelectAlignTest(2, 4);
}
TEST(Run_JSSelectAlign_3) {
+ CcTest::InitializeVM();
RunJSSelectAlignTest(3, 3);
RunJSSelectAlignTest(3, 4);
}
TEST(Run_JSSelectAlign_4) {
+ CcTest::InitializeVM();
RunJSSelectAlignTest(4, 3);
RunJSSelectAlignTest(4, 4);
}
TEST(Run_JSSelectAlign_7) {
+ CcTest::InitializeVM();
RunJSSelectAlignTest(7, 3);
RunJSSelectAlignTest(7, 4);
RunJSSelectAlignTest(7, 4);
@@ -473,6 +494,7 @@ TEST(Run_JSSelectAlign_7) {
}
TEST(Run_JSSelectAlign_8) {
+ CcTest::InitializeVM();
RunJSSelectAlignTest(8, 5);
RunJSSelectAlignTest(8, 6);
RunJSSelectAlignTest(8, 7);
@@ -480,6 +502,7 @@ TEST(Run_JSSelectAlign_8) {
}
TEST(Run_JSSelectAlign_9) {
+ CcTest::InitializeVM();
RunJSSelectAlignTest(9, 6);
RunJSSelectAlignTest(9, 7);
RunJSSelectAlignTest(9, 8);
@@ -487,6 +510,7 @@ TEST(Run_JSSelectAlign_9) {
}
TEST(Run_JSSelectAlign_10) {
+ CcTest::InitializeVM();
RunJSSelectAlignTest(10, 7);
RunJSSelectAlignTest(10, 8);
RunJSSelectAlignTest(10, 9);
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
index 118a91f3e9..8449a52ff3 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
@@ -6,190 +6,253 @@
#include <string.h>
#include "src/wasm/encoder.h"
+#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-macro-gen.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
#include "test/cctest/cctest.h"
+#include "test/cctest/wasm/test-signatures.h"
using namespace v8::base;
using namespace v8::internal;
using namespace v8::internal::compiler;
using namespace v8::internal::wasm;
-
-// TODO(titzer): fix arm64 frame alignment.
namespace {
-void TestModule(WasmModuleIndex* module, int32_t expected_result) {
+void TestModule(Zone* zone, WasmModuleBuilder* builder,
+ int32_t expected_result) {
+ ZoneBuffer buffer(zone);
+ builder->WriteTo(buffer);
+
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
WasmJs::InstallWasmFunctionMap(isolate, isolate->native_context());
int32_t result =
- CompileAndRunWasmModule(isolate, module->Begin(), module->End());
+ testing::CompileAndRunWasmModule(isolate, buffer.begin(), buffer.end());
CHECK_EQ(expected_result, result);
}
-} // namespace
+void ExportAs(WasmFunctionBuilder* f, const char* name) {
+ f->SetExported();
+ f->SetName(name, static_cast<int>(strlen(name)));
+}
-// A raw test that skips the WasmModuleBuilder.
-TEST(Run_WasmModule_CallAdd_rev) {
- static const byte data[] = {
- WASM_MODULE_HEADER,
- // sig#0 ------------------------------------------
- WASM_SECTION_SIGNATURES_SIZE + 7, // Section size.
- WASM_SECTION_SIGNATURES, 2, 0, kLocalI32, // void -> int
- 2, kLocalI32, kLocalI32, kLocalI32, // int,int -> int
- // func#0 (main) ----------------------------------
- WASM_SECTION_FUNCTIONS_SIZE + 24, WASM_SECTION_FUNCTIONS, 2,
- kDeclFunctionExport, 0, 0, // sig index
- 7, 0, // body size
- 0, // locals
- kExprCallFunction, 1, // --
- kExprI8Const, 77, // --
- kExprI8Const, 22, // --
- // func#1 -----------------------------------------
- 0, // no name, not exported
- 1, 0, // sig index
- 6, 0, // body size
- 0, // locals
- kExprI32Add, // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
- };
-
- Isolate* isolate = CcTest::InitIsolateOnce();
- HandleScope scope(isolate);
- WasmJs::InstallWasmFunctionMap(isolate, isolate->native_context());
- int32_t result =
- CompileAndRunWasmModule(isolate, data, data + arraysize(data));
- CHECK_EQ(99, result);
+void ExportAsMain(WasmFunctionBuilder* f) {
+ static const char kMainName[] = "main";
+ ExportAs(f, kMainName);
}
+} // namespace
TEST(Run_WasmModule_Return114) {
static const int32_t kReturnValue = 114;
+ TestSignatures sigs;
v8::base::AccountingAllocator allocator;
Zone zone(&allocator);
+
WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
uint16_t f_index = builder->AddFunction();
WasmFunctionBuilder* f = builder->FunctionAt(f_index);
- f->ReturnType(kAstI32);
- f->Exported(1);
+ f->SetSignature(sigs.i_v());
+ ExportAsMain(f);
byte code[] = {WASM_I8(kReturnValue)};
f->EmitCode(code, sizeof(code));
- WasmModuleWriter* writer = builder->Build(&zone);
- TestModule(writer->WriteTo(&zone), kReturnValue);
+ TestModule(&zone, builder, kReturnValue);
}
-
TEST(Run_WasmModule_CallAdd) {
v8::base::AccountingAllocator allocator;
Zone zone(&allocator);
+ TestSignatures sigs;
+
WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+
uint16_t f1_index = builder->AddFunction();
WasmFunctionBuilder* f = builder->FunctionAt(f1_index);
- f->ReturnType(kAstI32);
- uint16_t param1 = f->AddParam(kAstI32);
- uint16_t param2 = f->AddParam(kAstI32);
+ f->SetSignature(sigs.i_ii());
+ uint16_t param1 = 0;
+ uint16_t param2 = 1;
byte code1[] = {WASM_I32_ADD(WASM_GET_LOCAL(param1), WASM_GET_LOCAL(param2))};
- uint32_t local_indices1[] = {2, 4};
- f->EmitCode(code1, sizeof(code1), local_indices1, sizeof(local_indices1) / 4);
+ f->EmitCode(code1, sizeof(code1));
+
uint16_t f2_index = builder->AddFunction();
f = builder->FunctionAt(f2_index);
- f->ReturnType(kAstI32);
- f->Exported(1);
- byte code2[] = {WASM_CALL_FUNCTION(f1_index, WASM_I8(77), WASM_I8(22))};
+ f->SetSignature(sigs.i_v());
+
+ ExportAsMain(f);
+ byte code2[] = {WASM_CALL_FUNCTION2(f1_index, WASM_I8(77), WASM_I8(22))};
f->EmitCode(code2, sizeof(code2));
- WasmModuleWriter* writer = builder->Build(&zone);
- TestModule(writer->WriteTo(&zone), 99);
+ TestModule(&zone, builder, 99);
}
-
TEST(Run_WasmModule_ReadLoadedDataSegment) {
static const byte kDataSegmentDest0 = 12;
v8::base::AccountingAllocator allocator;
Zone zone(&allocator);
+ TestSignatures sigs;
+
WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
uint16_t f_index = builder->AddFunction();
WasmFunctionBuilder* f = builder->FunctionAt(f_index);
- f->ReturnType(kAstI32);
- f->Exported(1);
+ f->SetSignature(sigs.i_v());
+
+ ExportAsMain(f);
byte code[] = {
WASM_LOAD_MEM(MachineType::Int32(), WASM_I8(kDataSegmentDest0))};
f->EmitCode(code, sizeof(code));
byte data[] = {0xaa, 0xbb, 0xcc, 0xdd};
builder->AddDataSegment(new (&zone) WasmDataSegmentEncoder(
&zone, data, sizeof(data), kDataSegmentDest0));
- WasmModuleWriter* writer = builder->Build(&zone);
- TestModule(writer->WriteTo(&zone), 0xddccbbaa);
+ TestModule(&zone, builder, 0xddccbbaa);
}
TEST(Run_WasmModule_CheckMemoryIsZero) {
static const int kCheckSize = 16 * 1024;
v8::base::AccountingAllocator allocator;
Zone zone(&allocator);
+ TestSignatures sigs;
+
WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
uint16_t f_index = builder->AddFunction();
WasmFunctionBuilder* f = builder->FunctionAt(f_index);
- f->ReturnType(kAstI32);
+ f->SetSignature(sigs.i_v());
+
uint16_t localIndex = f->AddLocal(kAstI32);
- f->Exported(1);
+ ExportAsMain(f);
byte code[] = {WASM_BLOCK(
- 2,
WASM_WHILE(
WASM_I32_LTS(WASM_GET_LOCAL(localIndex), WASM_I32V_3(kCheckSize)),
WASM_IF_ELSE(
WASM_LOAD_MEM(MachineType::Int32(), WASM_GET_LOCAL(localIndex)),
WASM_BRV(2, WASM_I8(-1)), WASM_INC_LOCAL_BY(localIndex, 4))),
WASM_I8(11))};
- f->EmitCode(code, sizeof(code), nullptr, 0);
- WasmModuleWriter* writer = builder->Build(&zone);
- TestModule(writer->WriteTo(&zone), 11);
+ f->EmitCode(code, sizeof(code));
+ TestModule(&zone, builder, 11);
}
TEST(Run_WasmModule_CallMain_recursive) {
v8::base::AccountingAllocator allocator;
Zone zone(&allocator);
+ TestSignatures sigs;
+
WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
uint16_t f_index = builder->AddFunction();
WasmFunctionBuilder* f = builder->FunctionAt(f_index);
- f->ReturnType(kAstI32);
+ f->SetSignature(sigs.i_v());
+
uint16_t localIndex = f->AddLocal(kAstI32);
- f->Exported(1);
+ ExportAsMain(f);
byte code[] = {WASM_BLOCK(
- 2, WASM_SET_LOCAL(localIndex,
- WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO)),
+ WASM_SET_LOCAL(localIndex,
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO)),
WASM_IF_ELSE(WASM_I32_LTS(WASM_GET_LOCAL(localIndex), WASM_I8(5)),
- WASM_BLOCK(2, WASM_STORE_MEM(MachineType::Int32(), WASM_ZERO,
- WASM_INC_LOCAL(localIndex)),
+ WASM_BLOCK(WASM_STORE_MEM(MachineType::Int32(), WASM_ZERO,
+ WASM_INC_LOCAL(localIndex)),
WASM_BRV(1, WASM_CALL_FUNCTION0(0))),
WASM_BRV(0, WASM_I8(55))))};
- f->EmitCode(code, sizeof(code), nullptr, 0);
- WasmModuleWriter* writer = builder->Build(&zone);
- TestModule(writer->WriteTo(&zone), 55);
+ f->EmitCode(code, sizeof(code));
+ TestModule(&zone, builder, 55);
}
TEST(Run_WasmModule_Global) {
v8::base::AccountingAllocator allocator;
Zone zone(&allocator);
+ TestSignatures sigs;
+
WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- uint32_t global1 = builder->AddGlobal(MachineType::Int32(), 0);
- uint32_t global2 = builder->AddGlobal(MachineType::Int32(), 0);
+ uint32_t global1 = builder->AddGlobal(kAstI32, 0);
+ uint32_t global2 = builder->AddGlobal(kAstI32, 0);
uint16_t f1_index = builder->AddFunction();
WasmFunctionBuilder* f = builder->FunctionAt(f1_index);
- f->ReturnType(kAstI32);
+ f->SetSignature(sigs.i_v());
byte code1[] = {
- WASM_I32_ADD(WASM_LOAD_GLOBAL(global1), WASM_LOAD_GLOBAL(global2))};
+ WASM_I32_ADD(WASM_GET_GLOBAL(global1), WASM_GET_GLOBAL(global2))};
f->EmitCode(code1, sizeof(code1));
uint16_t f2_index = builder->AddFunction();
f = builder->FunctionAt(f2_index);
- f->ReturnType(kAstI32);
- f->Exported(1);
- byte code2[] = {WASM_STORE_GLOBAL(global1, WASM_I32V_1(56)),
- WASM_STORE_GLOBAL(global2, WASM_I32V_1(41)),
- WASM_RETURN(WASM_CALL_FUNCTION0(f1_index))};
+ f->SetSignature(sigs.i_v());
+ ExportAsMain(f);
+ byte code2[] = {WASM_SET_GLOBAL(global1, WASM_I32V_1(56)),
+ WASM_SET_GLOBAL(global2, WASM_I32V_1(41)),
+ WASM_RETURN1(WASM_CALL_FUNCTION0(f1_index))};
f->EmitCode(code2, sizeof(code2));
- WasmModuleWriter* writer = builder->Build(&zone);
- TestModule(writer->WriteTo(&zone), 97);
+ TestModule(&zone, builder, 97);
+}
+
+TEST(Run_WasmModule_Serialization) {
+ FLAG_expose_wasm = true;
+ static const char* kFunctionName = "increment";
+ v8::base::AccountingAllocator allocator;
+ Zone zone(&allocator);
+
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ uint16_t f_index = builder->AddFunction();
+ TestSignatures sigs;
+
+ WasmFunctionBuilder* f = builder->FunctionAt(f_index);
+ f->SetSignature(sigs.i_i());
+ byte code[] = {WASM_GET_LOCAL(0), kExprI32Const, 1, kExprI32Add};
+ f->EmitCode(code, sizeof(code));
+ ExportAs(f, kFunctionName);
+
+ ZoneBuffer buffer(&zone);
+ builder->WriteTo(buffer);
+
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ ErrorThrower thrower(isolate, "");
+
+ v8::WasmCompiledModule::SerializedModule data;
+ {
+ HandleScope scope(isolate);
+
+ ModuleResult decoding_result = DecodeWasmModule(
+ isolate, &zone, buffer.begin(), buffer.end(), false, kWasmOrigin);
+ std::unique_ptr<const WasmModule> module(decoding_result.val);
+ CHECK(!decoding_result.failed());
+
+ MaybeHandle<FixedArray> compiled_module =
+ module->CompileFunctions(isolate, &thrower);
+ CHECK(!compiled_module.is_null());
+ Handle<JSObject> module_obj =
+ CreateCompiledModuleObject(isolate, compiled_module.ToHandleChecked());
+ v8::Local<v8::Object> v8_module_obj = v8::Utils::ToLocal(module_obj);
+ CHECK(v8_module_obj->IsWebAssemblyCompiledModule());
+
+ v8::Local<v8::WasmCompiledModule> v8_compiled_module =
+ v8_module_obj.As<v8::WasmCompiledModule>();
+ data = v8_compiled_module->Serialize();
+ }
+
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = isolate->array_buffer_allocator();
+
+ v8::Isolate* v8_isolate = v8::Isolate::New(create_params);
+ isolate = reinterpret_cast<Isolate*>(v8_isolate);
+ {
+ v8::Isolate::Scope isolate_scope(v8_isolate);
+ v8::HandleScope new_scope(v8_isolate);
+ v8::Local<v8::Context> new_ctx = v8::Context::New(v8_isolate);
+ new_ctx->Enter();
+
+ v8::MaybeLocal<v8::WasmCompiledModule> deserialized =
+ v8::WasmCompiledModule::Deserialize(v8_isolate, data);
+ v8::Local<v8::WasmCompiledModule> compiled_module;
+ CHECK(deserialized.ToLocal(&compiled_module));
+ Handle<JSObject> module_object =
+ Handle<JSObject>::cast(v8::Utils::OpenHandle(*compiled_module));
+ Handle<FixedArray> compiled_part =
+ handle(FixedArray::cast(module_object->GetInternalField(0)));
+ Handle<JSObject> instance =
+ WasmModule::Instantiate(isolate, compiled_part,
+ Handle<JSReceiver>::null(),
+ Handle<JSArrayBuffer>::null())
+ .ToHandleChecked();
+ Handle<Object> params[1] = {Handle<Object>(Smi::FromInt(41), isolate)};
+ int32_t result = testing::CallFunction(isolate, instance, &thrower,
+ kFunctionName, 1, params);
+ CHECK(result == 42);
+ new_ctx->Exit();
+ }
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc b/deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc
new file mode 100644
index 0000000000..65b1d57bc1
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc
@@ -0,0 +1,60 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdlib.h>
+
+#include "src/v8.h"
+
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/c-signature.h"
+#include "test/cctest/wasm/wasm-run-utils.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+#define FOREACH_TYPE(TEST_BODY) \
+ TEST_BODY(int32_t, I32, WASM_I32_ADD) \
+ TEST_BODY(int64_t, I64, WASM_I64_ADD) \
+ TEST_BODY(float, F32, WASM_F32_ADD) \
+ TEST_BODY(double, F64, WASM_F64_ADD)
+
+#define LOAD_SET_GLOBAL_TEST_BODY(C_TYPE, MACHINE_TYPE, ADD) \
+ TEST(WasmRelocateGlobal##MACHINE_TYPE) { \
+ TestingModule module(kExecuteCompiled); \
+ module.AddGlobal<C_TYPE>(kAst##MACHINE_TYPE); \
+ module.AddGlobal<C_TYPE>(kAst##MACHINE_TYPE); \
+ \
+ WasmRunner<C_TYPE> r(&module, \
+ WasmOpcodes::MachineTypeFor(kAst##MACHINE_TYPE)); \
+ \
+ /* global = global + p0 */ \
+ BUILD(r, WASM_SET_GLOBAL(1, ADD(WASM_GET_GLOBAL(0), WASM_GET_LOCAL(0)))); \
+ CHECK_EQ(1, module.instance->function_code.size()); \
+ \
+ int filter = 1 << RelocInfo::WASM_GLOBAL_REFERENCE; \
+ \
+ Handle<Code> code = module.instance->function_code[0]; \
+ \
+ Address old_start = module.instance->globals_start; \
+ Address new_start = old_start + 1; \
+ \
+ Address old_addresses[2]; \
+ uint32_t address_index = 0U; \
+ for (RelocIterator it(*code, filter); !it.done(); it.next()) { \
+ old_addresses[address_index] = it.rinfo()->wasm_global_reference(); \
+ it.rinfo()->update_wasm_global_reference(old_start, new_start); \
+ ++address_index; \
+ } \
+ CHECK_EQ(2U, address_index); \
+ \
+ address_index = 0U; \
+ for (RelocIterator it(*code, filter); !it.done(); it.next()) { \
+ CHECK_EQ(old_addresses[address_index] + 1, \
+ it.rinfo()->wasm_global_reference()); \
+ ++address_index; \
+ } \
+ CHECK_EQ(2U, address_index); \
+ }
+
+FOREACH_TYPE(LOAD_SET_GLOBAL_TEST_BODY)
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm.cc b/deps/v8/test/cctest/wasm/test-run-wasm.cc
index 70d461b627..42ca816655 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm.cc
@@ -6,6 +6,8 @@
#include <stdlib.h>
#include <string.h>
+#include "src/base/platform/elapsed-timer.h"
+#include "src/utils.h"
#include "src/wasm/wasm-macro-gen.h"
#include "test/cctest/cctest.h"
@@ -19,41 +21,38 @@ using namespace v8::internal::compiler;
using namespace v8::internal::wasm;
// for even shorter tests.
-#define B2(a, b) kExprBlock, 2, a, b
-#define B1(a) kExprBlock, 1, a
-#define RET(x) kExprReturn, x
-#define RET_I8(x) kExprReturn, kExprI8Const, x
+#define B2(a, b) kExprBlock, a, b, kExprEnd
+#define B1(a) kExprBlock, a, kExprEnd
+#define RET(x) x, kExprReturn, 1
+#define RET_I8(x) kExprI8Const, x, kExprReturn, 1
-TEST(Run_WasmInt8Const) {
- WasmRunner<int32_t> r;
+WASM_EXEC_TEST(Int8Const) {
+ WasmRunner<int32_t> r(execution_mode);
const byte kExpectedValue = 121;
// return(kExpectedValue)
BUILD(r, WASM_I8(kExpectedValue));
CHECK_EQ(kExpectedValue, r.Call());
}
-
-TEST(Run_WasmInt8Const_fallthru1) {
- WasmRunner<int32_t> r;
+WASM_EXEC_TEST(Int8Const_fallthru1) {
+ WasmRunner<int32_t> r(execution_mode);
const byte kExpectedValue = 122;
// kExpectedValue
BUILD(r, WASM_I8(kExpectedValue));
CHECK_EQ(kExpectedValue, r.Call());
}
-
-TEST(Run_WasmInt8Const_fallthru2) {
- WasmRunner<int32_t> r;
+WASM_EXEC_TEST(Int8Const_fallthru2) {
+ WasmRunner<int32_t> r(execution_mode);
const byte kExpectedValue = 123;
// -99 kExpectedValue
BUILD(r, WASM_I8(-99), WASM_I8(kExpectedValue));
CHECK_EQ(kExpectedValue, r.Call());
}
-
-TEST(Run_WasmInt8Const_all) {
- for (int value = -128; value <= 127; value++) {
- WasmRunner<int32_t> r;
+WASM_EXEC_TEST(Int8Const_all) {
+ for (int value = -128; value <= 127; ++value) {
+ WasmRunner<int32_t> r(execution_mode);
// return(value)
BUILD(r, WASM_I8(value));
int32_t result = r.Call();
@@ -61,19 +60,17 @@ TEST(Run_WasmInt8Const_all) {
}
}
-
-TEST(Run_WasmInt32Const) {
- WasmRunner<int32_t> r;
+WASM_EXEC_TEST(Int32Const) {
+ WasmRunner<int32_t> r(execution_mode);
const int32_t kExpectedValue = 0x11223344;
// return(kExpectedValue)
BUILD(r, WASM_I32V_5(kExpectedValue));
CHECK_EQ(kExpectedValue, r.Call());
}
-
-TEST(Run_WasmInt32Const_many) {
+WASM_EXEC_TEST(Int32Const_many) {
FOR_INT32_INPUTS(i) {
- WasmRunner<int32_t> r;
+ WasmRunner<int32_t> r(execution_mode);
const int32_t kExpectedValue = *i;
// return(kExpectedValue)
BUILD(r, WASM_I32V(kExpectedValue));
@@ -81,66 +78,60 @@ TEST(Run_WasmInt32Const_many) {
}
}
-
-TEST(Run_WasmMemorySize) {
- TestingModule module;
+WASM_EXEC_TEST(MemorySize) {
+ TestingModule module(execution_mode);
WasmRunner<int32_t> r(&module);
module.AddMemory(1024);
BUILD(r, kExprMemorySize);
CHECK_EQ(1024, r.Call());
}
-
-TEST(Run_WasmInt32Param0) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(Int32Param0) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
// return(local[0])
BUILD(r, WASM_GET_LOCAL(0));
FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
}
-
-TEST(Run_WasmInt32Param0_fallthru) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(Int32Param0_fallthru) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
// local[0]
BUILD(r, WASM_GET_LOCAL(0));
FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
}
-
-TEST(Run_WasmInt32Param1) {
- WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
+WASM_EXEC_TEST(Int32Param1) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32(),
+ MachineType::Int32());
// local[1]
BUILD(r, WASM_GET_LOCAL(1));
FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(-111, *i)); }
}
-
-TEST(Run_WasmInt32Add) {
- WasmRunner<int32_t> r;
+WASM_EXEC_TEST(Int32Add) {
+ WasmRunner<int32_t> r(execution_mode);
// 11 + 44
BUILD(r, WASM_I32_ADD(WASM_I8(11), WASM_I8(44)));
CHECK_EQ(55, r.Call());
}
-
-TEST(Run_WasmInt32Add_P) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(Int32Add_P) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
// p0 + 13
BUILD(r, WASM_I32_ADD(WASM_I8(13), WASM_GET_LOCAL(0)));
FOR_INT32_INPUTS(i) { CHECK_EQ(*i + 13, r.Call(*i)); }
}
-
-TEST(Run_WasmInt32Add_P_fallthru) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(Int32Add_P_fallthru) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
// p0 + 13
BUILD(r, WASM_I32_ADD(WASM_I8(13), WASM_GET_LOCAL(0)));
FOR_INT32_INPUTS(i) { CHECK_EQ(*i + 13, r.Call(*i)); }
}
-
-TEST(Run_WasmInt32Add_P2) {
- WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
+WASM_EXEC_TEST(Int32Add_P2) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32(),
+ MachineType::Int32());
// p0 + p1
BUILD(r, WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT32_INPUTS(i) {
@@ -152,180 +143,178 @@ TEST(Run_WasmInt32Add_P2) {
}
}
-
-TEST(Run_WasmFloat32Add) {
- WasmRunner<int32_t> r;
+WASM_EXEC_TEST(Float32Add) {
+ WasmRunner<int32_t> r(execution_mode);
// int(11.5f + 44.5f)
BUILD(r,
WASM_I32_SCONVERT_F32(WASM_F32_ADD(WASM_F32(11.5f), WASM_F32(44.5f))));
CHECK_EQ(56, r.Call());
}
-
-TEST(Run_WasmFloat64Add) {
- WasmRunner<int32_t> r;
+WASM_EXEC_TEST(Float64Add) {
+ WasmRunner<int32_t> r(execution_mode);
// return int(13.5d + 43.5d)
BUILD(r, WASM_I32_SCONVERT_F64(WASM_F64_ADD(WASM_F64(13.5), WASM_F64(43.5))));
CHECK_EQ(57, r.Call());
}
-
-void TestInt32Binop(WasmOpcode opcode, int32_t expected, int32_t a, int32_t b) {
+void TestInt32Binop(WasmExecutionMode execution_mode, WasmOpcode opcode,
+ int32_t expected, int32_t a, int32_t b) {
{
- WasmRunner<int32_t> r;
+ WasmRunner<int32_t> r(execution_mode);
// K op K
BUILD(r, WASM_BINOP(opcode, WASM_I32V(a), WASM_I32V(b)));
CHECK_EQ(expected, r.Call());
}
{
- WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32(),
+ MachineType::Int32());
// a op b
BUILD(r, WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
CHECK_EQ(expected, r.Call(a, b));
}
}
-TEST(Run_WasmInt32Binops) {
- TestInt32Binop(kExprI32Add, 88888888, 33333333, 55555555);
- TestInt32Binop(kExprI32Sub, -1111111, 7777777, 8888888);
- TestInt32Binop(kExprI32Mul, 65130756, 88734, 734);
- TestInt32Binop(kExprI32DivS, -66, -4777344, 72384);
- TestInt32Binop(kExprI32DivU, 805306368, 0xF0000000, 5);
- TestInt32Binop(kExprI32RemS, -3, -3003, 1000);
- TestInt32Binop(kExprI32RemU, 4, 4004, 1000);
- TestInt32Binop(kExprI32And, 0xEE, 0xFFEE, 0xFF0000FF);
- TestInt32Binop(kExprI32Ior, 0xF0FF00FF, 0xF0F000EE, 0x000F0011);
- TestInt32Binop(kExprI32Xor, 0xABCDEF01, 0xABCDEFFF, 0xFE);
- TestInt32Binop(kExprI32Shl, 0xA0000000, 0xA, 28);
- TestInt32Binop(kExprI32ShrU, 0x07000010, 0x70000100, 4);
- TestInt32Binop(kExprI32ShrS, 0xFF000000, 0x80000000, 7);
- TestInt32Binop(kExprI32Ror, 0x01000000, 0x80000000, 7);
- TestInt32Binop(kExprI32Ror, 0x01000000, 0x80000000, 39);
- TestInt32Binop(kExprI32Rol, 0x00000040, 0x80000000, 7);
- TestInt32Binop(kExprI32Rol, 0x00000040, 0x80000000, 39);
- TestInt32Binop(kExprI32Eq, 1, -99, -99);
- TestInt32Binop(kExprI32Ne, 0, -97, -97);
-
- TestInt32Binop(kExprI32LtS, 1, -4, 4);
- TestInt32Binop(kExprI32LeS, 0, -2, -3);
- TestInt32Binop(kExprI32LtU, 1, 0, -6);
- TestInt32Binop(kExprI32LeU, 1, 98978, 0xF0000000);
-
- TestInt32Binop(kExprI32GtS, 1, 4, -4);
- TestInt32Binop(kExprI32GeS, 0, -3, -2);
- TestInt32Binop(kExprI32GtU, 1, -6, 0);
- TestInt32Binop(kExprI32GeU, 1, 0xF0000000, 98978);
-}
-
-
-void TestInt32Unop(WasmOpcode opcode, int32_t expected, int32_t a) {
+WASM_EXEC_TEST(Int32Binops) {
+ TestInt32Binop(execution_mode, kExprI32Add, 88888888, 33333333, 55555555);
+ TestInt32Binop(execution_mode, kExprI32Sub, -1111111, 7777777, 8888888);
+ TestInt32Binop(execution_mode, kExprI32Mul, 65130756, 88734, 734);
+ TestInt32Binop(execution_mode, kExprI32DivS, -66, -4777344, 72384);
+ TestInt32Binop(execution_mode, kExprI32DivU, 805306368, 0xF0000000, 5);
+ TestInt32Binop(execution_mode, kExprI32RemS, -3, -3003, 1000);
+ TestInt32Binop(execution_mode, kExprI32RemU, 4, 4004, 1000);
+ TestInt32Binop(execution_mode, kExprI32And, 0xEE, 0xFFEE, 0xFF0000FF);
+ TestInt32Binop(execution_mode, kExprI32Ior, 0xF0FF00FF, 0xF0F000EE,
+ 0x000F0011);
+ TestInt32Binop(execution_mode, kExprI32Xor, 0xABCDEF01, 0xABCDEFFF, 0xFE);
+ TestInt32Binop(execution_mode, kExprI32Shl, 0xA0000000, 0xA, 28);
+ TestInt32Binop(execution_mode, kExprI32ShrU, 0x07000010, 0x70000100, 4);
+ TestInt32Binop(execution_mode, kExprI32ShrS, 0xFF000000, 0x80000000, 7);
+ TestInt32Binop(execution_mode, kExprI32Ror, 0x01000000, 0x80000000, 7);
+ TestInt32Binop(execution_mode, kExprI32Ror, 0x01000000, 0x80000000, 39);
+ TestInt32Binop(execution_mode, kExprI32Rol, 0x00000040, 0x80000000, 7);
+ TestInt32Binop(execution_mode, kExprI32Rol, 0x00000040, 0x80000000, 39);
+ TestInt32Binop(execution_mode, kExprI32Eq, 1, -99, -99);
+ TestInt32Binop(execution_mode, kExprI32Ne, 0, -97, -97);
+
+ TestInt32Binop(execution_mode, kExprI32LtS, 1, -4, 4);
+ TestInt32Binop(execution_mode, kExprI32LeS, 0, -2, -3);
+ TestInt32Binop(execution_mode, kExprI32LtU, 1, 0, -6);
+ TestInt32Binop(execution_mode, kExprI32LeU, 1, 98978, 0xF0000000);
+
+ TestInt32Binop(execution_mode, kExprI32GtS, 1, 4, -4);
+ TestInt32Binop(execution_mode, kExprI32GeS, 0, -3, -2);
+ TestInt32Binop(execution_mode, kExprI32GtU, 1, -6, 0);
+ TestInt32Binop(execution_mode, kExprI32GeU, 1, 0xF0000000, 98978);
+}
+
+void TestInt32Unop(WasmExecutionMode execution_mode, WasmOpcode opcode,
+ int32_t expected, int32_t a) {
{
- WasmRunner<int32_t> r;
+ WasmRunner<int32_t> r(execution_mode);
// return op K
BUILD(r, WASM_UNOP(opcode, WASM_I32V(a)));
CHECK_EQ(expected, r.Call());
}
{
- WasmRunner<int32_t> r(MachineType::Int32());
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
// return op a
BUILD(r, WASM_UNOP(opcode, WASM_GET_LOCAL(0)));
CHECK_EQ(expected, r.Call(a));
}
}
-
-TEST(Run_WasmInt32Clz) {
- TestInt32Unop(kExprI32Clz, 0, 0x80001000);
- TestInt32Unop(kExprI32Clz, 1, 0x40000500);
- TestInt32Unop(kExprI32Clz, 2, 0x20000300);
- TestInt32Unop(kExprI32Clz, 3, 0x10000003);
- TestInt32Unop(kExprI32Clz, 4, 0x08050000);
- TestInt32Unop(kExprI32Clz, 5, 0x04006000);
- TestInt32Unop(kExprI32Clz, 6, 0x02000000);
- TestInt32Unop(kExprI32Clz, 7, 0x010000a0);
- TestInt32Unop(kExprI32Clz, 8, 0x00800c00);
- TestInt32Unop(kExprI32Clz, 9, 0x00400000);
- TestInt32Unop(kExprI32Clz, 10, 0x0020000d);
- TestInt32Unop(kExprI32Clz, 11, 0x00100f00);
- TestInt32Unop(kExprI32Clz, 12, 0x00080000);
- TestInt32Unop(kExprI32Clz, 13, 0x00041000);
- TestInt32Unop(kExprI32Clz, 14, 0x00020020);
- TestInt32Unop(kExprI32Clz, 15, 0x00010300);
- TestInt32Unop(kExprI32Clz, 16, 0x00008040);
- TestInt32Unop(kExprI32Clz, 17, 0x00004005);
- TestInt32Unop(kExprI32Clz, 18, 0x00002050);
- TestInt32Unop(kExprI32Clz, 19, 0x00001700);
- TestInt32Unop(kExprI32Clz, 20, 0x00000870);
- TestInt32Unop(kExprI32Clz, 21, 0x00000405);
- TestInt32Unop(kExprI32Clz, 22, 0x00000203);
- TestInt32Unop(kExprI32Clz, 23, 0x00000101);
- TestInt32Unop(kExprI32Clz, 24, 0x00000089);
- TestInt32Unop(kExprI32Clz, 25, 0x00000041);
- TestInt32Unop(kExprI32Clz, 26, 0x00000022);
- TestInt32Unop(kExprI32Clz, 27, 0x00000013);
- TestInt32Unop(kExprI32Clz, 28, 0x00000008);
- TestInt32Unop(kExprI32Clz, 29, 0x00000004);
- TestInt32Unop(kExprI32Clz, 30, 0x00000002);
- TestInt32Unop(kExprI32Clz, 31, 0x00000001);
- TestInt32Unop(kExprI32Clz, 32, 0x00000000);
-}
-
-
-TEST(Run_WasmInt32Ctz) {
- TestInt32Unop(kExprI32Ctz, 32, 0x00000000);
- TestInt32Unop(kExprI32Ctz, 31, 0x80000000);
- TestInt32Unop(kExprI32Ctz, 30, 0x40000000);
- TestInt32Unop(kExprI32Ctz, 29, 0x20000000);
- TestInt32Unop(kExprI32Ctz, 28, 0x10000000);
- TestInt32Unop(kExprI32Ctz, 27, 0xa8000000);
- TestInt32Unop(kExprI32Ctz, 26, 0xf4000000);
- TestInt32Unop(kExprI32Ctz, 25, 0x62000000);
- TestInt32Unop(kExprI32Ctz, 24, 0x91000000);
- TestInt32Unop(kExprI32Ctz, 23, 0xcd800000);
- TestInt32Unop(kExprI32Ctz, 22, 0x09400000);
- TestInt32Unop(kExprI32Ctz, 21, 0xaf200000);
- TestInt32Unop(kExprI32Ctz, 20, 0xac100000);
- TestInt32Unop(kExprI32Ctz, 19, 0xe0b80000);
- TestInt32Unop(kExprI32Ctz, 18, 0x9ce40000);
- TestInt32Unop(kExprI32Ctz, 17, 0xc7920000);
- TestInt32Unop(kExprI32Ctz, 16, 0xb8f10000);
- TestInt32Unop(kExprI32Ctz, 15, 0x3b9f8000);
- TestInt32Unop(kExprI32Ctz, 14, 0xdb4c4000);
- TestInt32Unop(kExprI32Ctz, 13, 0xe9a32000);
- TestInt32Unop(kExprI32Ctz, 12, 0xfca61000);
- TestInt32Unop(kExprI32Ctz, 11, 0x6c8a7800);
- TestInt32Unop(kExprI32Ctz, 10, 0x8ce5a400);
- TestInt32Unop(kExprI32Ctz, 9, 0xcb7d0200);
- TestInt32Unop(kExprI32Ctz, 8, 0xcb4dc100);
- TestInt32Unop(kExprI32Ctz, 7, 0xdfbec580);
- TestInt32Unop(kExprI32Ctz, 6, 0x27a9db40);
- TestInt32Unop(kExprI32Ctz, 5, 0xde3bcb20);
- TestInt32Unop(kExprI32Ctz, 4, 0xd7e8a610);
- TestInt32Unop(kExprI32Ctz, 3, 0x9afdbc88);
- TestInt32Unop(kExprI32Ctz, 2, 0x9afdbc84);
- TestInt32Unop(kExprI32Ctz, 1, 0x9afdbc82);
- TestInt32Unop(kExprI32Ctz, 0, 0x9afdbc81);
-}
-
-
-TEST(Run_WasmInt32Popcnt) {
- TestInt32Unop(kExprI32Popcnt, 32, 0xffffffff);
- TestInt32Unop(kExprI32Popcnt, 0, 0x00000000);
- TestInt32Unop(kExprI32Popcnt, 1, 0x00008000);
- TestInt32Unop(kExprI32Popcnt, 13, 0x12345678);
- TestInt32Unop(kExprI32Popcnt, 19, 0xfedcba09);
-}
-
-TEST(Run_WasmI32Eqz) {
- TestInt32Unop(kExprI32Eqz, 0, 1);
- TestInt32Unop(kExprI32Eqz, 0, -1);
- TestInt32Unop(kExprI32Eqz, 0, -827343);
- TestInt32Unop(kExprI32Eqz, 0, 8888888);
- TestInt32Unop(kExprI32Eqz, 1, 0);
-}
-
-TEST(Run_WasmI32Shl) {
- WasmRunner<uint32_t> r(MachineType::Uint32(), MachineType::Uint32());
+WASM_EXEC_TEST(Int32Clz) {
+ TestInt32Unop(execution_mode, kExprI32Clz, 0, 0x80001000);
+ TestInt32Unop(execution_mode, kExprI32Clz, 1, 0x40000500);
+ TestInt32Unop(execution_mode, kExprI32Clz, 2, 0x20000300);
+ TestInt32Unop(execution_mode, kExprI32Clz, 3, 0x10000003);
+ TestInt32Unop(execution_mode, kExprI32Clz, 4, 0x08050000);
+ TestInt32Unop(execution_mode, kExprI32Clz, 5, 0x04006000);
+ TestInt32Unop(execution_mode, kExprI32Clz, 6, 0x02000000);
+ TestInt32Unop(execution_mode, kExprI32Clz, 7, 0x010000a0);
+ TestInt32Unop(execution_mode, kExprI32Clz, 8, 0x00800c00);
+ TestInt32Unop(execution_mode, kExprI32Clz, 9, 0x00400000);
+ TestInt32Unop(execution_mode, kExprI32Clz, 10, 0x0020000d);
+ TestInt32Unop(execution_mode, kExprI32Clz, 11, 0x00100f00);
+ TestInt32Unop(execution_mode, kExprI32Clz, 12, 0x00080000);
+ TestInt32Unop(execution_mode, kExprI32Clz, 13, 0x00041000);
+ TestInt32Unop(execution_mode, kExprI32Clz, 14, 0x00020020);
+ TestInt32Unop(execution_mode, kExprI32Clz, 15, 0x00010300);
+ TestInt32Unop(execution_mode, kExprI32Clz, 16, 0x00008040);
+ TestInt32Unop(execution_mode, kExprI32Clz, 17, 0x00004005);
+ TestInt32Unop(execution_mode, kExprI32Clz, 18, 0x00002050);
+ TestInt32Unop(execution_mode, kExprI32Clz, 19, 0x00001700);
+ TestInt32Unop(execution_mode, kExprI32Clz, 20, 0x00000870);
+ TestInt32Unop(execution_mode, kExprI32Clz, 21, 0x00000405);
+ TestInt32Unop(execution_mode, kExprI32Clz, 22, 0x00000203);
+ TestInt32Unop(execution_mode, kExprI32Clz, 23, 0x00000101);
+ TestInt32Unop(execution_mode, kExprI32Clz, 24, 0x00000089);
+ TestInt32Unop(execution_mode, kExprI32Clz, 25, 0x00000041);
+ TestInt32Unop(execution_mode, kExprI32Clz, 26, 0x00000022);
+ TestInt32Unop(execution_mode, kExprI32Clz, 27, 0x00000013);
+ TestInt32Unop(execution_mode, kExprI32Clz, 28, 0x00000008);
+ TestInt32Unop(execution_mode, kExprI32Clz, 29, 0x00000004);
+ TestInt32Unop(execution_mode, kExprI32Clz, 30, 0x00000002);
+ TestInt32Unop(execution_mode, kExprI32Clz, 31, 0x00000001);
+ TestInt32Unop(execution_mode, kExprI32Clz, 32, 0x00000000);
+}
+
+WASM_EXEC_TEST(Int32Ctz) {
+ TestInt32Unop(execution_mode, kExprI32Ctz, 32, 0x00000000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 31, 0x80000000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 30, 0x40000000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 29, 0x20000000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 28, 0x10000000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 27, 0xa8000000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 26, 0xf4000000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 25, 0x62000000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 24, 0x91000000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 23, 0xcd800000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 22, 0x09400000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 21, 0xaf200000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 20, 0xac100000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 19, 0xe0b80000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 18, 0x9ce40000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 17, 0xc7920000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 16, 0xb8f10000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 15, 0x3b9f8000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 14, 0xdb4c4000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 13, 0xe9a32000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 12, 0xfca61000);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 11, 0x6c8a7800);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 10, 0x8ce5a400);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 9, 0xcb7d0200);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 8, 0xcb4dc100);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 7, 0xdfbec580);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 6, 0x27a9db40);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 5, 0xde3bcb20);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 4, 0xd7e8a610);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 3, 0x9afdbc88);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 2, 0x9afdbc84);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 1, 0x9afdbc82);
+ TestInt32Unop(execution_mode, kExprI32Ctz, 0, 0x9afdbc81);
+}
+
+WASM_EXEC_TEST(Int32Popcnt) {
+ TestInt32Unop(execution_mode, kExprI32Popcnt, 32, 0xffffffff);
+ TestInt32Unop(execution_mode, kExprI32Popcnt, 0, 0x00000000);
+ TestInt32Unop(execution_mode, kExprI32Popcnt, 1, 0x00008000);
+ TestInt32Unop(execution_mode, kExprI32Popcnt, 13, 0x12345678);
+ TestInt32Unop(execution_mode, kExprI32Popcnt, 19, 0xfedcba09);
+}
+
+WASM_EXEC_TEST(I32Eqz) {
+ TestInt32Unop(execution_mode, kExprI32Eqz, 0, 1);
+ TestInt32Unop(execution_mode, kExprI32Eqz, 0, -1);
+ TestInt32Unop(execution_mode, kExprI32Eqz, 0, -827343);
+ TestInt32Unop(execution_mode, kExprI32Eqz, 0, 8888888);
+ TestInt32Unop(execution_mode, kExprI32Eqz, 1, 0);
+}
+
+WASM_EXEC_TEST(I32Shl) {
+ WasmRunner<uint32_t> r(execution_mode, MachineType::Uint32(),
+ MachineType::Uint32());
BUILD(r, WASM_I32_SHL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT32_INPUTS(i) {
@@ -336,8 +325,9 @@ TEST(Run_WasmI32Shl) {
}
}
-TEST(Run_WasmI32Shr) {
- WasmRunner<uint32_t> r(MachineType::Uint32(), MachineType::Uint32());
+WASM_EXEC_TEST(I32Shr) {
+ WasmRunner<uint32_t> r(execution_mode, MachineType::Uint32(),
+ MachineType::Uint32());
BUILD(r, WASM_I32_SHR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_UINT32_INPUTS(i) {
@@ -348,8 +338,9 @@ TEST(Run_WasmI32Shr) {
}
}
-TEST(Run_WasmI32Sar) {
- WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
+WASM_EXEC_TEST(I32Sar) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32(),
+ MachineType::Int32());
BUILD(r, WASM_I32_SAR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT32_INPUTS(i) {
@@ -360,8 +351,9 @@ TEST(Run_WasmI32Sar) {
}
}
-TEST(Run_WASM_Int32DivS_trap) {
- WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
+WASM_EXEC_TEST(Int32DivS_trap) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32(),
+ MachineType::Int32());
BUILD(r, WASM_I32_DIVS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
const int32_t kMin = std::numeric_limits<int32_t>::min();
CHECK_EQ(0, r.Call(0, 100));
@@ -371,9 +363,9 @@ TEST(Run_WASM_Int32DivS_trap) {
CHECK_TRAP(r.Call(kMin, 0));
}
-
-TEST(Run_WASM_Int32RemS_trap) {
- WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
+WASM_EXEC_TEST(Int32RemS_trap) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32(),
+ MachineType::Int32());
BUILD(r, WASM_I32_REMS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
const int32_t kMin = std::numeric_limits<int32_t>::min();
CHECK_EQ(33, r.Call(133, 100));
@@ -383,9 +375,9 @@ TEST(Run_WASM_Int32RemS_trap) {
CHECK_TRAP(r.Call(kMin, 0));
}
-
-TEST(Run_WASM_Int32DivU_trap) {
- WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
+WASM_EXEC_TEST(Int32DivU_trap) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32(),
+ MachineType::Int32());
BUILD(r, WASM_I32_DIVU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
const int32_t kMin = std::numeric_limits<int32_t>::min();
CHECK_EQ(0, r.Call(0, 100));
@@ -395,9 +387,9 @@ TEST(Run_WASM_Int32DivU_trap) {
CHECK_TRAP(r.Call(kMin, 0));
}
-
-TEST(Run_WASM_Int32RemU_trap) {
- WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
+WASM_EXEC_TEST(Int32RemU_trap) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32(),
+ MachineType::Int32());
BUILD(r, WASM_I32_REMU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
CHECK_EQ(17, r.Call(217, 100));
const int32_t kMin = std::numeric_limits<int32_t>::min();
@@ -407,64 +399,11 @@ TEST(Run_WASM_Int32RemU_trap) {
CHECK_EQ(kMin, r.Call(kMin, -1));
}
-TEST(Run_WASM_Int32DivS_asmjs) {
- TestingModule module;
- module.origin = kAsmJsOrigin;
- WasmRunner<int32_t> r(&module, MachineType::Int32(), MachineType::Int32());
- BUILD(r, WASM_I32_DIVS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- const int32_t kMin = std::numeric_limits<int32_t>::min();
- CHECK_EQ(0, r.Call(0, 100));
- CHECK_EQ(0, r.Call(100, 0));
- CHECK_EQ(0, r.Call(-1001, 0));
- CHECK_EQ(kMin, r.Call(kMin, -1));
- CHECK_EQ(0, r.Call(kMin, 0));
-}
-
-TEST(Run_WASM_Int32RemS_asmjs) {
- TestingModule module;
- module.origin = kAsmJsOrigin;
- WasmRunner<int32_t> r(&module, MachineType::Int32(), MachineType::Int32());
- BUILD(r, WASM_I32_REMS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- const int32_t kMin = std::numeric_limits<int32_t>::min();
- CHECK_EQ(33, r.Call(133, 100));
- CHECK_EQ(0, r.Call(kMin, -1));
- CHECK_EQ(0, r.Call(100, 0));
- CHECK_EQ(0, r.Call(-1001, 0));
- CHECK_EQ(0, r.Call(kMin, 0));
-}
-
-TEST(Run_WASM_Int32DivU_asmjs) {
- TestingModule module;
- module.origin = kAsmJsOrigin;
- WasmRunner<int32_t> r(&module, MachineType::Int32(), MachineType::Int32());
- BUILD(r, WASM_I32_DIVU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- const int32_t kMin = std::numeric_limits<int32_t>::min();
- CHECK_EQ(0, r.Call(0, 100));
- CHECK_EQ(0, r.Call(kMin, -1));
- CHECK_EQ(0, r.Call(100, 0));
- CHECK_EQ(0, r.Call(-1001, 0));
- CHECK_EQ(0, r.Call(kMin, 0));
-}
-
-TEST(Run_WASM_Int32RemU_asmjs) {
- TestingModule module;
- module.origin = kAsmJsOrigin;
- WasmRunner<int32_t> r(&module, MachineType::Int32(), MachineType::Int32());
- BUILD(r, WASM_I32_REMU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- const int32_t kMin = std::numeric_limits<int32_t>::min();
- CHECK_EQ(17, r.Call(217, 100));
- CHECK_EQ(0, r.Call(100, 0));
- CHECK_EQ(0, r.Call(-1001, 0));
- CHECK_EQ(0, r.Call(kMin, 0));
- CHECK_EQ(kMin, r.Call(kMin, -1));
-}
-
-
-TEST(Run_WASM_Int32DivS_byzero_const) {
- for (int8_t denom = -2; denom < 8; denom++) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(Int32DivS_byzero_const) {
+ for (int8_t denom = -2; denom < 8; ++denom) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
BUILD(r, WASM_I32_DIVS(WASM_GET_LOCAL(0), WASM_I8(denom)));
- for (int32_t val = -7; val < 8; val++) {
+ for (int32_t val = -7; val < 8; ++val) {
if (denom == 0) {
CHECK_TRAP(r.Call(val));
} else {
@@ -474,13 +413,12 @@ TEST(Run_WASM_Int32DivS_byzero_const) {
}
}
-
-TEST(Run_WASM_Int32DivU_byzero_const) {
- for (uint32_t denom = 0xfffffffe; denom < 8; denom++) {
- WasmRunner<uint32_t> r(MachineType::Uint32());
+WASM_EXEC_TEST(Int32DivU_byzero_const) {
+ for (uint32_t denom = 0xfffffffe; denom < 8; ++denom) {
+ WasmRunner<uint32_t> r(execution_mode, MachineType::Uint32());
BUILD(r, WASM_I32_DIVU(WASM_GET_LOCAL(0), WASM_I32V_1(denom)));
- for (uint32_t val = 0xfffffff0; val < 8; val++) {
+ for (uint32_t val = 0xfffffff0; val < 8; ++val) {
if (denom == 0) {
CHECK_TRAP(r.Call(val));
} else {
@@ -490,9 +428,8 @@ TEST(Run_WASM_Int32DivU_byzero_const) {
}
}
-
-TEST(Run_WASM_Int32DivS_trap_effect) {
- TestingModule module;
+WASM_EXEC_TEST(Int32DivS_trap_effect) {
+ TestingModule module(execution_mode);
module.AddMemoryElems<int32_t>(8);
WasmRunner<int32_t> r(&module, MachineType::Int32(), MachineType::Int32());
@@ -510,33 +447,36 @@ TEST(Run_WASM_Int32DivS_trap_effect) {
CHECK_TRAP(r.Call(0, 0));
}
-void TestFloat32Binop(WasmOpcode opcode, int32_t expected, float a, float b) {
+void TestFloat32Binop(WasmExecutionMode execution_mode, WasmOpcode opcode,
+ int32_t expected, float a, float b) {
{
- WasmRunner<int32_t> r;
+ WasmRunner<int32_t> r(execution_mode);
// return K op K
BUILD(r, WASM_BINOP(opcode, WASM_F32(a), WASM_F32(b)));
CHECK_EQ(expected, r.Call());
}
{
- WasmRunner<int32_t> r(MachineType::Float32(), MachineType::Float32());
+ WasmRunner<int32_t> r(execution_mode, MachineType::Float32(),
+ MachineType::Float32());
// return a op b
BUILD(r, WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
CHECK_EQ(expected, r.Call(a, b));
}
}
-
-void TestFloat32BinopWithConvert(WasmOpcode opcode, int32_t expected, float a,
+void TestFloat32BinopWithConvert(WasmExecutionMode execution_mode,
+ WasmOpcode opcode, int32_t expected, float a,
float b) {
{
- WasmRunner<int32_t> r;
+ WasmRunner<int32_t> r(execution_mode);
// return int(K op K)
BUILD(r,
WASM_I32_SCONVERT_F32(WASM_BINOP(opcode, WASM_F32(a), WASM_F32(b))));
CHECK_EQ(expected, r.Call());
}
{
- WasmRunner<int32_t> r(MachineType::Float32(), MachineType::Float32());
+ WasmRunner<int32_t> r(execution_mode, MachineType::Float32(),
+ MachineType::Float32());
// return int(a op b)
BUILD(r, WASM_I32_SCONVERT_F32(
WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
@@ -544,116 +484,119 @@ void TestFloat32BinopWithConvert(WasmOpcode opcode, int32_t expected, float a,
}
}
-
-void TestFloat32UnopWithConvert(WasmOpcode opcode, int32_t expected, float a) {
+void TestFloat32UnopWithConvert(WasmExecutionMode execution_mode,
+ WasmOpcode opcode, int32_t expected, float a) {
{
- WasmRunner<int32_t> r;
+ WasmRunner<int32_t> r(execution_mode);
// return int(op(K))
BUILD(r, WASM_I32_SCONVERT_F32(WASM_UNOP(opcode, WASM_F32(a))));
CHECK_EQ(expected, r.Call());
}
{
- WasmRunner<int32_t> r(MachineType::Float32());
+ WasmRunner<int32_t> r(execution_mode, MachineType::Float32());
// return int(op(a))
BUILD(r, WASM_I32_SCONVERT_F32(WASM_UNOP(opcode, WASM_GET_LOCAL(0))));
CHECK_EQ(expected, r.Call(a));
}
}
-
-void TestFloat64Binop(WasmOpcode opcode, int32_t expected, double a, double b) {
+void TestFloat64Binop(WasmExecutionMode execution_mode, WasmOpcode opcode,
+ int32_t expected, double a, double b) {
{
- WasmRunner<int32_t> r;
+ WasmRunner<int32_t> r(execution_mode);
// return K op K
BUILD(r, WASM_BINOP(opcode, WASM_F64(a), WASM_F64(b)));
CHECK_EQ(expected, r.Call());
}
{
- WasmRunner<int32_t> r(MachineType::Float64(), MachineType::Float64());
+ WasmRunner<int32_t> r(execution_mode, MachineType::Float64(),
+ MachineType::Float64());
// return a op b
BUILD(r, WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
CHECK_EQ(expected, r.Call(a, b));
}
}
-
-void TestFloat64BinopWithConvert(WasmOpcode opcode, int32_t expected, double a,
+void TestFloat64BinopWithConvert(WasmExecutionMode execution_mode,
+ WasmOpcode opcode, int32_t expected, double a,
double b) {
{
- WasmRunner<int32_t> r;
+ WasmRunner<int32_t> r(execution_mode);
// return int(K op K)
BUILD(r,
WASM_I32_SCONVERT_F64(WASM_BINOP(opcode, WASM_F64(a), WASM_F64(b))));
CHECK_EQ(expected, r.Call());
}
{
- WasmRunner<int32_t> r(MachineType::Float64(), MachineType::Float64());
+ WasmRunner<int32_t> r(execution_mode, MachineType::Float64(),
+ MachineType::Float64());
BUILD(r, WASM_I32_SCONVERT_F64(
WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
CHECK_EQ(expected, r.Call(a, b));
}
}
-
-void TestFloat64UnopWithConvert(WasmOpcode opcode, int32_t expected, double a) {
+void TestFloat64UnopWithConvert(WasmExecutionMode execution_mode,
+ WasmOpcode opcode, int32_t expected, double a) {
{
- WasmRunner<int32_t> r;
+ WasmRunner<int32_t> r(execution_mode);
// return int(op(K))
BUILD(r, WASM_I32_SCONVERT_F64(WASM_UNOP(opcode, WASM_F64(a))));
CHECK_EQ(expected, r.Call());
}
{
- WasmRunner<int32_t> r(MachineType::Float64());
+ WasmRunner<int32_t> r(execution_mode, MachineType::Float64());
// return int(op(a))
BUILD(r, WASM_I32_SCONVERT_F64(WASM_UNOP(opcode, WASM_GET_LOCAL(0))));
CHECK_EQ(expected, r.Call(a));
}
}
-TEST(Run_WasmFloat32Binops) {
- TestFloat32Binop(kExprF32Eq, 1, 8.125f, 8.125f);
- TestFloat32Binop(kExprF32Ne, 1, 8.125f, 8.127f);
- TestFloat32Binop(kExprF32Lt, 1, -9.5f, -9.0f);
- TestFloat32Binop(kExprF32Le, 1, -1111.0f, -1111.0f);
- TestFloat32Binop(kExprF32Gt, 1, -9.0f, -9.5f);
- TestFloat32Binop(kExprF32Ge, 1, -1111.0f, -1111.0f);
+WASM_EXEC_TEST(Float32Binops) {
+ TestFloat32Binop(execution_mode, kExprF32Eq, 1, 8.125f, 8.125f);
+ TestFloat32Binop(execution_mode, kExprF32Ne, 1, 8.125f, 8.127f);
+ TestFloat32Binop(execution_mode, kExprF32Lt, 1, -9.5f, -9.0f);
+ TestFloat32Binop(execution_mode, kExprF32Le, 1, -1111.0f, -1111.0f);
+ TestFloat32Binop(execution_mode, kExprF32Gt, 1, -9.0f, -9.5f);
+ TestFloat32Binop(execution_mode, kExprF32Ge, 1, -1111.0f, -1111.0f);
- TestFloat32BinopWithConvert(kExprF32Add, 10, 3.5f, 6.5f);
- TestFloat32BinopWithConvert(kExprF32Sub, 2, 44.5f, 42.5f);
- TestFloat32BinopWithConvert(kExprF32Mul, -66, -132.1f, 0.5f);
- TestFloat32BinopWithConvert(kExprF32Div, 11, 22.1f, 2.0f);
+ TestFloat32BinopWithConvert(execution_mode, kExprF32Add, 10, 3.5f, 6.5f);
+ TestFloat32BinopWithConvert(execution_mode, kExprF32Sub, 2, 44.5f, 42.5f);
+ TestFloat32BinopWithConvert(execution_mode, kExprF32Mul, -66, -132.1f, 0.5f);
+ TestFloat32BinopWithConvert(execution_mode, kExprF32Div, 11, 22.1f, 2.0f);
}
-TEST(Run_WasmFloat32Unops) {
- TestFloat32UnopWithConvert(kExprF32Abs, 8, 8.125f);
- TestFloat32UnopWithConvert(kExprF32Abs, 9, -9.125f);
- TestFloat32UnopWithConvert(kExprF32Neg, -213, 213.125f);
- TestFloat32UnopWithConvert(kExprF32Sqrt, 12, 144.4f);
+WASM_EXEC_TEST(Float32Unops) {
+ TestFloat32UnopWithConvert(execution_mode, kExprF32Abs, 8, 8.125f);
+ TestFloat32UnopWithConvert(execution_mode, kExprF32Abs, 9, -9.125f);
+ TestFloat32UnopWithConvert(execution_mode, kExprF32Neg, -213, 213.125f);
+ TestFloat32UnopWithConvert(execution_mode, kExprF32Sqrt, 12, 144.4f);
}
-TEST(Run_WasmFloat64Binops) {
- TestFloat64Binop(kExprF64Eq, 1, 16.25, 16.25);
- TestFloat64Binop(kExprF64Ne, 1, 16.25, 16.15);
- TestFloat64Binop(kExprF64Lt, 1, -32.4, 11.7);
- TestFloat64Binop(kExprF64Le, 1, -88.9, -88.9);
- TestFloat64Binop(kExprF64Gt, 1, 11.7, -32.4);
- TestFloat64Binop(kExprF64Ge, 1, -88.9, -88.9);
+WASM_EXEC_TEST(Float64Binops) {
+ TestFloat64Binop(execution_mode, kExprF64Eq, 1, 16.25, 16.25);
+ TestFloat64Binop(execution_mode, kExprF64Ne, 1, 16.25, 16.15);
+ TestFloat64Binop(execution_mode, kExprF64Lt, 1, -32.4, 11.7);
+ TestFloat64Binop(execution_mode, kExprF64Le, 1, -88.9, -88.9);
+ TestFloat64Binop(execution_mode, kExprF64Gt, 1, 11.7, -32.4);
+ TestFloat64Binop(execution_mode, kExprF64Ge, 1, -88.9, -88.9);
- TestFloat64BinopWithConvert(kExprF64Add, 100, 43.5, 56.5);
- TestFloat64BinopWithConvert(kExprF64Sub, 200, 12200.1, 12000.1);
- TestFloat64BinopWithConvert(kExprF64Mul, -33, 134, -0.25);
- TestFloat64BinopWithConvert(kExprF64Div, -1111, -2222.3, 2);
+ TestFloat64BinopWithConvert(execution_mode, kExprF64Add, 100, 43.5, 56.5);
+ TestFloat64BinopWithConvert(execution_mode, kExprF64Sub, 200, 12200.1,
+ 12000.1);
+ TestFloat64BinopWithConvert(execution_mode, kExprF64Mul, -33, 134, -0.25);
+ TestFloat64BinopWithConvert(execution_mode, kExprF64Div, -1111, -2222.3, 2);
}
-TEST(Run_WasmFloat64Unops) {
- TestFloat64UnopWithConvert(kExprF64Abs, 108, 108.125);
- TestFloat64UnopWithConvert(kExprF64Abs, 209, -209.125);
- TestFloat64UnopWithConvert(kExprF64Neg, -209, 209.125);
- TestFloat64UnopWithConvert(kExprF64Sqrt, 13, 169.4);
+WASM_EXEC_TEST(Float64Unops) {
+ TestFloat64UnopWithConvert(execution_mode, kExprF64Abs, 108, 108.125);
+ TestFloat64UnopWithConvert(execution_mode, kExprF64Abs, 209, -209.125);
+ TestFloat64UnopWithConvert(execution_mode, kExprF64Neg, -209, 209.125);
+ TestFloat64UnopWithConvert(execution_mode, kExprF64Sqrt, 13, 169.4);
}
-TEST(Run_WasmFloat32Neg) {
- WasmRunner<float> r(MachineType::Float32());
+WASM_EXEC_TEST(Float32Neg) {
+ WasmRunner<float> r(execution_mode, MachineType::Float32());
BUILD(r, WASM_F32_NEG(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) {
@@ -662,9 +605,48 @@ TEST(Run_WasmFloat32Neg) {
}
}
+WASM_EXEC_TEST(Float32SubMinusZero) {
+ WasmRunner<float> r(execution_mode, MachineType::Float32());
+ BUILD(r, WASM_F32_SUB(WASM_F32(-0.0), WASM_GET_LOCAL(0)));
+
+ uint32_t sNanValue =
+ bit_cast<uint32_t>(std::numeric_limits<float>::signaling_NaN());
+ uint32_t qNanValue =
+ bit_cast<uint32_t>(std::numeric_limits<float>::quiet_NaN());
+ uint32_t payload = 0x00200000;
+
+ uint32_t expected = (qNanValue & 0xffc00000) | payload;
+ uint32_t operand = (sNanValue & 0xffc00000) | payload;
+ CHECK_EQ(expected, bit_cast<uint32_t>(r.Call(bit_cast<float>(operand))));
+
+ // Change the sign of the NaN.
+ expected |= 0x80000000;
+ operand |= 0x80000000;
+ CHECK_EQ(expected, bit_cast<uint32_t>(r.Call(bit_cast<float>(operand))));
+}
+
+WASM_EXEC_TEST(Float64SubMinusZero) {
+ WasmRunner<double> r(execution_mode, MachineType::Float64());
+ BUILD(r, WASM_F64_SUB(WASM_F64(-0.0), WASM_GET_LOCAL(0)));
+
+ uint64_t sNanValue =
+ bit_cast<uint64_t>(std::numeric_limits<double>::signaling_NaN());
+ uint64_t qNanValue =
+ bit_cast<uint64_t>(std::numeric_limits<double>::quiet_NaN());
+ uint64_t payload = 0x0000123456789abc;
+
+ uint64_t expected = (qNanValue & 0xfff8000000000000) | payload;
+ uint64_t operand = (sNanValue & 0xfff8000000000000) | payload;
+ CHECK_EQ(expected, bit_cast<uint64_t>(r.Call(bit_cast<double>(operand))));
+
+ // Change the sign of the NaN.
+ expected |= 0x8000000000000000;
+ operand |= 0x8000000000000000;
+ CHECK_EQ(expected, bit_cast<uint64_t>(r.Call(bit_cast<double>(operand))));
+}
-TEST(Run_WasmFloat64Neg) {
- WasmRunner<double> r(MachineType::Float64());
+WASM_EXEC_TEST(Float64Neg) {
+ WasmRunner<double> r(execution_mode, MachineType::Float64());
BUILD(r, WASM_F64_NEG(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) {
@@ -673,9 +655,8 @@ TEST(Run_WasmFloat64Neg) {
}
}
-
-TEST(Run_Wasm_IfElse_P) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(IfElse_P) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
// if (p0) return 11; else return 22;
BUILD(r, WASM_IF_ELSE(WASM_GET_LOCAL(0), // --
WASM_I8(11), // --
@@ -686,9 +667,59 @@ TEST(Run_Wasm_IfElse_P) {
}
}
+WASM_EXEC_TEST(If_empty1) {
+ WasmRunner<uint32_t> r(execution_mode, MachineType::Uint32(),
+ MachineType::Uint32());
+ BUILD(r, WASM_GET_LOCAL(0), kExprIf, kExprEnd, WASM_GET_LOCAL(1));
+ FOR_UINT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i - 9, *i)); }
+}
+
+WASM_EXEC_TEST(IfElse_empty1) {
+ WasmRunner<uint32_t> r(execution_mode, MachineType::Uint32(),
+ MachineType::Uint32());
+ BUILD(r, WASM_GET_LOCAL(0), kExprIf, kExprElse, kExprEnd, WASM_GET_LOCAL(1));
+ FOR_UINT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i - 8, *i)); }
+}
+
+WASM_EXEC_TEST(IfElse_empty2) {
+ WasmRunner<uint32_t> r(execution_mode, MachineType::Uint32(),
+ MachineType::Uint32());
+ BUILD(r, WASM_GET_LOCAL(0), kExprIf, WASM_ZERO, kExprElse, kExprEnd,
+ WASM_GET_LOCAL(1));
+ FOR_UINT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i - 7, *i)); }
+}
+
+WASM_EXEC_TEST(IfElse_empty3) {
+ WasmRunner<uint32_t> r(execution_mode, MachineType::Uint32(),
+ MachineType::Uint32());
+ BUILD(r, WASM_GET_LOCAL(0), kExprIf, kExprElse, WASM_ZERO, kExprEnd,
+ WASM_GET_LOCAL(1));
+ FOR_UINT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i - 6, *i)); }
+}
-TEST(Run_Wasm_IfElse_Unreachable1) {
- WasmRunner<int32_t> r;
+WASM_EXEC_TEST(If_chain) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
+ // if (p0) 13; if (p0) 14; 15
+ BUILD(r, WASM_IF(WASM_GET_LOCAL(0), WASM_I8(13)),
+ WASM_IF(WASM_GET_LOCAL(0), WASM_I8(14)), WASM_I8(15));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(15, r.Call(*i)); }
+}
+
+WASM_EXEC_TEST(If_chain_set) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32(),
+ MachineType::Int32());
+ // if (p0) p1 = 73; if (p0) p1 = 74; p1
+ BUILD(r, WASM_IF(WASM_GET_LOCAL(0), WASM_SET_LOCAL(1, WASM_I8(73))),
+ WASM_IF(WASM_GET_LOCAL(0), WASM_SET_LOCAL(1, WASM_I8(74))),
+ WASM_GET_LOCAL(1));
+ FOR_INT32_INPUTS(i) {
+ int32_t expected = *i ? 74 : *i;
+ CHECK_EQ(expected, r.Call(*i, *i));
+ }
+}
+
+WASM_EXEC_TEST(IfElse_Unreachable1) {
+ WasmRunner<int32_t> r(execution_mode);
// if (0) unreachable; else return 22;
BUILD(r, WASM_IF_ELSE(WASM_ZERO, // --
WASM_UNREACHABLE, // --
@@ -696,34 +727,30 @@ TEST(Run_Wasm_IfElse_Unreachable1) {
CHECK_EQ(27, r.Call());
}
-
-TEST(Run_Wasm_Return12) {
- WasmRunner<int32_t> r;
+WASM_EXEC_TEST(Return12) {
+ WasmRunner<int32_t> r(execution_mode);
BUILD(r, RET_I8(12));
CHECK_EQ(12, r.Call());
}
-
-TEST(Run_Wasm_Return17) {
- WasmRunner<int32_t> r;
+WASM_EXEC_TEST(Return17) {
+ WasmRunner<int32_t> r(execution_mode);
BUILD(r, B1(RET_I8(17)));
CHECK_EQ(17, r.Call());
}
-
-TEST(Run_Wasm_Return_I32) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(Return_I32) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
BUILD(r, RET(WASM_GET_LOCAL(0)));
FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
}
-
-TEST(Run_Wasm_Return_F32) {
- WasmRunner<float> r(MachineType::Float32());
+WASM_EXEC_TEST(Return_F32) {
+ WasmRunner<float> r(execution_mode, MachineType::Float32());
BUILD(r, RET(WASM_GET_LOCAL(0)));
@@ -738,9 +765,8 @@ TEST(Run_Wasm_Return_F32) {
}
}
-
-TEST(Run_Wasm_Return_F64) {
- WasmRunner<double> r(MachineType::Float64());
+WASM_EXEC_TEST(Return_F64) {
+ WasmRunner<double> r(execution_mode, MachineType::Float64());
BUILD(r, RET(WASM_GET_LOCAL(0)));
@@ -755,9 +781,8 @@ TEST(Run_Wasm_Return_F64) {
}
}
-
-TEST(Run_Wasm_Select) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(Select) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
// return select(11, 22, a);
BUILD(r, WASM_SELECT(WASM_I8(11), WASM_I8(22), WASM_GET_LOCAL(0)));
FOR_INT32_INPUTS(i) {
@@ -766,9 +791,8 @@ TEST(Run_Wasm_Select) {
}
}
-
-TEST(Run_Wasm_Select_strict1) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(Select_strict1) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
// select(a=0, a=1, a=2); return a
BUILD(r, B2(WASM_SELECT(WASM_SET_LOCAL(0, WASM_I8(0)),
WASM_SET_LOCAL(0, WASM_I8(1)),
@@ -777,9 +801,8 @@ TEST(Run_Wasm_Select_strict1) {
FOR_INT32_INPUTS(i) { CHECK_EQ(2, r.Call(*i)); }
}
-
-TEST(Run_Wasm_Select_strict2) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(Select_strict2) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
r.AllocateLocal(kAstI32);
r.AllocateLocal(kAstI32);
// select(b=5, c=6, a)
@@ -791,8 +814,8 @@ TEST(Run_Wasm_Select_strict2) {
}
}
-TEST(Run_Wasm_Select_strict3) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(Select_strict3) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
r.AllocateLocal(kAstI32);
r.AllocateLocal(kAstI32);
// select(b=5, c=6, a=b)
@@ -805,9 +828,8 @@ TEST(Run_Wasm_Select_strict3) {
}
}
-
-TEST(Run_Wasm_BrIf_strict) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(BrIf_strict) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
BUILD(
r,
B2(B1(WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_SET_LOCAL(0, WASM_I8(99)))),
@@ -816,23 +838,23 @@ TEST(Run_Wasm_BrIf_strict) {
FOR_INT32_INPUTS(i) { CHECK_EQ(99, r.Call(*i)); }
}
-TEST(Run_Wasm_BrTable0a) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(BrTable0a) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
BUILD(r,
B2(B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(0))), WASM_I8(91)));
FOR_INT32_INPUTS(i) { CHECK_EQ(91, r.Call(*i)); }
}
-TEST(Run_Wasm_BrTable0b) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(BrTable0b) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
BUILD(r,
B2(B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 1, BR_TARGET(0), BR_TARGET(0))),
WASM_I8(92)));
FOR_INT32_INPUTS(i) { CHECK_EQ(92, r.Call(*i)); }
}
-TEST(Run_Wasm_BrTable0c) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(BrTable0c) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
BUILD(
r,
B2(B2(B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 1, BR_TARGET(0), BR_TARGET(1))),
@@ -844,18 +866,17 @@ TEST(Run_Wasm_BrTable0c) {
}
}
-TEST(Run_Wasm_BrTable1) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(BrTable1) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
BUILD(r, B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(0))), RET_I8(93));
FOR_INT32_INPUTS(i) { CHECK_EQ(93, r.Call(*i)); }
}
-TEST(Run_Wasm_BrTable_loop) {
- WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r,
- B2(WASM_LOOP(1, WASM_BR_TABLE(WASM_INC_LOCAL_BY(0, 1), 2, BR_TARGET(2),
+WASM_EXEC_TEST(BrTable_loop) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
+ BUILD(r, B2(WASM_LOOP(WASM_BR_TABLE(WASM_INC_LOCAL_BY(0, 1), 2, BR_TARGET(2),
BR_TARGET(1), BR_TARGET(0))),
- RET_I8(99)),
+ RET_I8(99)),
WASM_I8(98));
CHECK_EQ(99, r.Call(0));
CHECK_EQ(98, r.Call(-1));
@@ -864,8 +885,8 @@ TEST(Run_Wasm_BrTable_loop) {
CHECK_EQ(98, r.Call(-100));
}
-TEST(Run_Wasm_BrTable_br) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(BrTable_br) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
BUILD(r,
B2(B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 1, BR_TARGET(1), BR_TARGET(0))),
RET_I8(91)),
@@ -876,8 +897,8 @@ TEST(Run_Wasm_BrTable_br) {
CHECK_EQ(91, r.Call(3));
}
-TEST(Run_Wasm_BrTable_br2) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(BrTable_br2) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
BUILD(r, B2(B2(B2(B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 3, BR_TARGET(1),
BR_TARGET(2), BR_TARGET(3), BR_TARGET(0))),
@@ -893,9 +914,9 @@ TEST(Run_Wasm_BrTable_br2) {
CHECK_EQ(85, r.Call(5));
}
-TEST(Run_Wasm_BrTable4) {
- for (int i = 0; i < 4; i++) {
- for (int t = 0; t < 4; t++) {
+WASM_EXEC_TEST(BrTable4) {
+ for (int i = 0; i < 4; ++i) {
+ for (int t = 0; t < 4; ++t) {
uint32_t cases[] = {0, 1, 2, 3};
cases[i] = t;
byte code[] = {B2(B2(B2(B2(B1(WASM_BR_TABLE(
@@ -908,10 +929,10 @@ TEST(Run_Wasm_BrTable4) {
RET_I8(73)),
WASM_I8(75)};
- WasmRunner<int32_t> r(MachineType::Int32());
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
r.Build(code, code + arraysize(code));
- for (int x = -3; x < 50; x++) {
+ for (int x = -3; x < 50; ++x) {
int index = (x > 3 || x < 0) ? 3 : x;
int32_t expected = 70 + cases[index];
CHECK_EQ(expected, r.Call(x));
@@ -920,12 +941,12 @@ TEST(Run_Wasm_BrTable4) {
}
}
-TEST(Run_Wasm_BrTable4x4) {
- for (byte a = 0; a < 4; a++) {
- for (byte b = 0; b < 4; b++) {
- for (byte c = 0; c < 4; c++) {
- for (byte d = 0; d < 4; d++) {
- for (int i = 0; i < 4; i++) {
+WASM_EXEC_TEST(BrTable4x4) {
+ for (byte a = 0; a < 4; ++a) {
+ for (byte b = 0; b < 4; ++b) {
+ for (byte c = 0; c < 4; ++c) {
+ for (byte d = 0; d < 4; ++d) {
+ for (int i = 0; i < 4; ++i) {
uint32_t cases[] = {a, b, c, d};
byte code[] = {
B2(B2(B2(B2(B1(WASM_BR_TABLE(
@@ -938,10 +959,10 @@ TEST(Run_Wasm_BrTable4x4) {
RET_I8(53)),
WASM_I8(55)};
- WasmRunner<int32_t> r(MachineType::Int32());
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
r.Build(code, code + arraysize(code));
- for (int x = -6; x < 47; x++) {
+ for (int x = -6; x < 47; ++x) {
int index = (x > 3 || x < 0) ? 3 : x;
int32_t expected = 50 + cases[index];
CHECK_EQ(expected, r.Call(x));
@@ -953,7 +974,7 @@ TEST(Run_Wasm_BrTable4x4) {
}
}
-TEST(Run_Wasm_BrTable4_fallthru) {
+WASM_EXEC_TEST(BrTable4_fallthru) {
byte code[] = {
B2(B2(B2(B2(B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 3, BR_TARGET(0),
BR_TARGET(1), BR_TARGET(2), BR_TARGET(3))),
@@ -963,7 +984,8 @@ TEST(Run_Wasm_BrTable4_fallthru) {
WASM_INC_LOCAL_BY(1, 8)),
WASM_GET_LOCAL(1)};
- WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32(),
+ MachineType::Int32());
r.Build(code, code + arraysize(code));
CHECK_EQ(15, r.Call(0, 0));
@@ -979,8 +1001,8 @@ TEST(Run_Wasm_BrTable4_fallthru) {
CHECK_EQ(108, r.Call(4, 100));
}
-TEST(Run_Wasm_F32ReinterpretI32) {
- TestingModule module;
+WASM_EXEC_TEST(F32ReinterpretI32) {
+ TestingModule module(execution_mode);
int32_t* memory = module.AddMemoryElems<int32_t>(8);
WasmRunner<int32_t> r(&module);
@@ -989,32 +1011,30 @@ TEST(Run_Wasm_F32ReinterpretI32) {
FOR_INT32_INPUTS(i) {
int32_t expected = *i;
- memory[0] = expected;
+ module.WriteMemory(&memory[0], expected);
CHECK_EQ(expected, r.Call());
}
}
-
-TEST(Run_Wasm_I32ReinterpretF32) {
- TestingModule module;
+WASM_EXEC_TEST(I32ReinterpretF32) {
+ TestingModule module(execution_mode);
int32_t* memory = module.AddMemoryElems<int32_t>(8);
WasmRunner<int32_t> r(&module, MachineType::Int32());
- BUILD(r, WASM_BLOCK(
- 2, WASM_STORE_MEM(MachineType::Float32(), WASM_ZERO,
- WASM_F32_REINTERPRET_I32(WASM_GET_LOCAL(0))),
- WASM_I8(107)));
+ BUILD(r,
+ WASM_BLOCK(WASM_STORE_MEM(MachineType::Float32(), WASM_ZERO,
+ WASM_F32_REINTERPRET_I32(WASM_GET_LOCAL(0))),
+ WASM_I8(107)));
FOR_INT32_INPUTS(i) {
int32_t expected = *i;
CHECK_EQ(107, r.Call(expected));
- CHECK_EQ(expected, memory[0]);
+ CHECK_EQ(expected, module.ReadMemory(&memory[0]));
}
}
-
-TEST(Run_Wasm_ReturnStore) {
- TestingModule module;
+WASM_EXEC_TEST(ReturnStore) {
+ TestingModule module(execution_mode);
int32_t* memory = module.AddMemoryElems<int32_t>(8);
WasmRunner<int32_t> r(&module);
@@ -1023,18 +1043,17 @@ TEST(Run_Wasm_ReturnStore) {
FOR_INT32_INPUTS(i) {
int32_t expected = *i;
- memory[0] = expected;
+ module.WriteMemory(&memory[0], expected);
CHECK_EQ(expected, r.Call());
}
}
-
-TEST(Run_Wasm_VoidReturn1) {
+WASM_EXEC_TEST(VoidReturn1) {
// We use a wrapper function because WasmRunner<void> does not exist.
// Build the test function.
TestSignatures sigs;
- TestingModule module;
+ TestingModule module(execution_mode);
WasmFunctionCompiler t(sigs.v_v(), &module);
BUILD(t, kExprNop);
uint32_t index = t.CompileAndAdd();
@@ -1048,12 +1067,11 @@ TEST(Run_Wasm_VoidReturn1) {
CHECK_EQ(kExpected, result);
}
-
-TEST(Run_Wasm_VoidReturn2) {
+WASM_EXEC_TEST(VoidReturn2) {
// We use a wrapper function because WasmRunner<void> does not exist.
// Build the test function.
TestSignatures sigs;
- TestingModule module;
+ TestingModule module(execution_mode);
WasmFunctionCompiler t(sigs.v_v(), &module);
BUILD(t, WASM_RETURN0);
uint32_t index = t.CompileAndAdd();
@@ -1067,13 +1085,43 @@ TEST(Run_Wasm_VoidReturn2) {
CHECK_EQ(kExpected, result);
}
+WASM_EXEC_TEST(Block_empty) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
+ BUILD(r, kExprBlock, kExprEnd, WASM_GET_LOCAL(0));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+}
+
+WASM_EXEC_TEST(Block_empty_br1) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
+ BUILD(r, B1(WASM_BR(0)), WASM_GET_LOCAL(0));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+}
+
+WASM_EXEC_TEST(Block_empty_brif1) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
+ BUILD(r, B1(WASM_BR_IF(0, WASM_ZERO)), WASM_GET_LOCAL(0));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+}
+
+WASM_EXEC_TEST(Block_empty_brif2) {
+ WasmRunner<uint32_t> r(execution_mode, MachineType::Uint32(),
+ MachineType::Uint32());
+ BUILD(r, B1(WASM_BR_IF(0, WASM_GET_LOCAL(1))), WASM_GET_LOCAL(0));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i, *i + 1)); }
+}
+
+WASM_EXEC_TEST(Block_br2) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
+ BUILD(r, B1(WASM_BRV(0, WASM_GET_LOCAL(0))));
+ FOR_UINT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+}
-TEST(Run_Wasm_Block_If_P) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(Block_If_P) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
// { if (p0) return 51; return 52; }
BUILD(r, B2( // --
WASM_IF(WASM_GET_LOCAL(0), // --
- WASM_BRV(0, WASM_I8(51))), // --
+ WASM_BRV(1, WASM_I8(51))), // --
WASM_I8(52))); // --
FOR_INT32_INPUTS(i) {
int32_t expected = *i ? 51 : 52;
@@ -1081,9 +1129,33 @@ TEST(Run_Wasm_Block_If_P) {
}
}
+WASM_EXEC_TEST(Loop_empty) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
+ BUILD(r, kExprLoop, kExprEnd, WASM_GET_LOCAL(0));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+}
+
+WASM_EXEC_TEST(Loop_empty_br1) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
+ BUILD(r, WASM_LOOP(WASM_BR(1)), WASM_GET_LOCAL(0));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+}
+
+WASM_EXEC_TEST(Loop_empty_brif1) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
+ BUILD(r, WASM_LOOP(WASM_BR_IF(1, WASM_ZERO)), WASM_GET_LOCAL(0));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+}
+
+WASM_EXEC_TEST(Loop_empty_brif2) {
+ WasmRunner<uint32_t> r(execution_mode, MachineType::Uint32(),
+ MachineType::Uint32());
+ BUILD(r, WASM_LOOP(WASM_BR_IF(1, WASM_GET_LOCAL(1))), WASM_GET_LOCAL(0));
+ FOR_UINT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i, *i + 1)); }
+}
-TEST(Run_Wasm_Block_BrIf_P) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(Block_BrIf_P) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
BUILD(r, B2(WASM_BRV_IF(0, WASM_I8(51), WASM_GET_LOCAL(0)), WASM_I8(52)));
FOR_INT32_INPUTS(i) {
int32_t expected = *i ? 51 : 52;
@@ -1091,9 +1163,8 @@ TEST(Run_Wasm_Block_BrIf_P) {
}
}
-
-TEST(Run_Wasm_Block_IfElse_P_assign) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(Block_IfElse_P_assign) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
// { if (p0) p0 = 71; else p0 = 72; return p0; }
BUILD(r, B2( // --
WASM_IF_ELSE(WASM_GET_LOCAL(0), // --
@@ -1106,9 +1177,8 @@ TEST(Run_Wasm_Block_IfElse_P_assign) {
}
}
-
-TEST(Run_Wasm_Block_IfElse_P_return) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(Block_IfElse_P_return) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
// if (p0) return 81; else return 82;
BUILD(r, // --
WASM_IF_ELSE(WASM_GET_LOCAL(0), // --
@@ -1120,30 +1190,27 @@ TEST(Run_Wasm_Block_IfElse_P_return) {
}
}
-
-TEST(Run_Wasm_Block_If_P_assign) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(Block_If_P_assign) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
// { if (p0) p0 = 61; p0; }
- BUILD(r, WASM_BLOCK(
- 2, WASM_IF(WASM_GET_LOCAL(0), WASM_SET_LOCAL(0, WASM_I8(61))),
- WASM_GET_LOCAL(0)));
+ BUILD(r,
+ WASM_BLOCK(WASM_IF(WASM_GET_LOCAL(0), WASM_SET_LOCAL(0, WASM_I8(61))),
+ WASM_GET_LOCAL(0)));
FOR_INT32_INPUTS(i) {
int32_t expected = *i ? 61 : *i;
CHECK_EQ(expected, r.Call(*i));
}
}
-
-TEST(Run_Wasm_DanglingAssign) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(DanglingAssign) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
// { return 0; p0 = 0; }
BUILD(r, B2(RET_I8(99), WASM_SET_LOCAL(0, WASM_ZERO)));
CHECK_EQ(99, r.Call(1));
}
-
-TEST(Run_Wasm_ExprIf_P) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(ExprIf_P) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
// p0 ? 11 : 22;
BUILD(r, WASM_IF_ELSE(WASM_GET_LOCAL(0), // --
WASM_I8(11), // --
@@ -1154,9 +1221,8 @@ TEST(Run_Wasm_ExprIf_P) {
}
}
-
-TEST(Run_Wasm_ExprIf_P_fallthru) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(ExprIf_P_fallthru) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
// p0 ? 11 : 22;
BUILD(r, WASM_IF_ELSE(WASM_GET_LOCAL(0), // --
WASM_I8(11), // --
@@ -1167,54 +1233,47 @@ TEST(Run_Wasm_ExprIf_P_fallthru) {
}
}
-
-TEST(Run_Wasm_CountDown) {
- WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r,
- WASM_BLOCK(
- 2, WASM_LOOP(
- 1, WASM_IF(WASM_GET_LOCAL(0),
- WASM_BRV(0, WASM_SET_LOCAL(
- 0, WASM_I32_SUB(WASM_GET_LOCAL(0),
+WASM_EXEC_TEST(CountDown) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
+ BUILD(r, WASM_BLOCK(
+ WASM_LOOP(WASM_IF(
+ WASM_GET_LOCAL(0),
+ WASM_BRV(1, WASM_SET_LOCAL(0, WASM_I32_SUB(WASM_GET_LOCAL(0),
WASM_I8(1)))))),
- WASM_GET_LOCAL(0)));
+ WASM_GET_LOCAL(0)));
CHECK_EQ(0, r.Call(1));
CHECK_EQ(0, r.Call(10));
CHECK_EQ(0, r.Call(100));
}
-
-TEST(Run_Wasm_CountDown_fallthru) {
- WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r,
- WASM_BLOCK(
- 2, WASM_LOOP(3, WASM_IF(WASM_NOT(WASM_GET_LOCAL(0)), WASM_BREAK(0)),
+WASM_EXEC_TEST(CountDown_fallthru) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
+ BUILD(r, WASM_BLOCK(
+ WASM_LOOP(WASM_IF(WASM_NOT(WASM_GET_LOCAL(0)), WASM_BREAK(1)),
WASM_SET_LOCAL(
0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(1))),
WASM_CONTINUE(0)),
- WASM_GET_LOCAL(0)));
+ WASM_GET_LOCAL(0)));
CHECK_EQ(0, r.Call(1));
CHECK_EQ(0, r.Call(10));
CHECK_EQ(0, r.Call(100));
}
-
-TEST(Run_Wasm_WhileCountDown) {
- WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, WASM_BLOCK(
- 2, WASM_WHILE(WASM_GET_LOCAL(0),
- WASM_SET_LOCAL(0, WASM_I32_SUB(WASM_GET_LOCAL(0),
- WASM_I8(1)))),
- WASM_GET_LOCAL(0)));
+WASM_EXEC_TEST(WhileCountDown) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
+ BUILD(r,
+ WASM_BLOCK(WASM_WHILE(WASM_GET_LOCAL(0),
+ WASM_SET_LOCAL(0, WASM_I32_SUB(WASM_GET_LOCAL(0),
+ WASM_I8(1)))),
+ WASM_GET_LOCAL(0)));
CHECK_EQ(0, r.Call(1));
CHECK_EQ(0, r.Call(10));
CHECK_EQ(0, r.Call(100));
}
-
-TEST(Run_Wasm_Loop_if_break1) {
- WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, B2(WASM_LOOP(2, WASM_IF(WASM_GET_LOCAL(0), WASM_BREAK(0)),
+WASM_EXEC_TEST(Loop_if_break1) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
+ BUILD(r, B2(WASM_LOOP(WASM_IF(WASM_GET_LOCAL(0), WASM_BREAK(1)),
WASM_SET_LOCAL(0, WASM_I8(99))),
WASM_GET_LOCAL(0)));
CHECK_EQ(99, r.Call(0));
@@ -1223,10 +1282,9 @@ TEST(Run_Wasm_Loop_if_break1) {
CHECK_EQ(-29, r.Call(-29));
}
-
-TEST(Run_Wasm_Loop_if_break2) {
- WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, B2(WASM_LOOP(2, WASM_BR_IF(1, WASM_GET_LOCAL(0)),
+WASM_EXEC_TEST(Loop_if_break2) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
+ BUILD(r, B2(WASM_LOOP(WASM_BR_IF(1, WASM_GET_LOCAL(0)),
WASM_SET_LOCAL(0, WASM_I8(99))),
WASM_GET_LOCAL(0)));
CHECK_EQ(99, r.Call(0));
@@ -1235,10 +1293,9 @@ TEST(Run_Wasm_Loop_if_break2) {
CHECK_EQ(-29, r.Call(-29));
}
-
-TEST(Run_Wasm_Loop_if_break_fallthru) {
- WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, B1(WASM_LOOP(2, WASM_IF(WASM_GET_LOCAL(0), WASM_BREAK(1)),
+WASM_EXEC_TEST(Loop_if_break_fallthru) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
+ BUILD(r, B1(WASM_LOOP(WASM_IF(WASM_GET_LOCAL(0), WASM_BREAK(1)),
WASM_SET_LOCAL(0, WASM_I8(93)))),
WASM_GET_LOCAL(0));
CHECK_EQ(93, r.Call(0));
@@ -1247,70 +1304,84 @@ TEST(Run_Wasm_Loop_if_break_fallthru) {
CHECK_EQ(-22, r.Call(-22));
}
+WASM_EXEC_TEST(IfBreak1) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
+ BUILD(r, WASM_IF(WASM_GET_LOCAL(0), WASM_SEQ(WASM_BR(0), WASM_UNREACHABLE)),
+ WASM_I8(91));
+ CHECK_EQ(91, r.Call(0));
+ CHECK_EQ(91, r.Call(1));
+ CHECK_EQ(91, r.Call(-8734));
+}
+
+WASM_EXEC_TEST(IfBreak2) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
+ BUILD(r, WASM_IF(WASM_GET_LOCAL(0), WASM_SEQ(WASM_BR(0), RET_I8(77))),
+ WASM_I8(81));
+ CHECK_EQ(81, r.Call(0));
+ CHECK_EQ(81, r.Call(1));
+ CHECK_EQ(81, r.Call(-8734));
+}
-TEST(Run_Wasm_LoadMemI32) {
- TestingModule module;
+WASM_EXEC_TEST(LoadMemI32) {
+ TestingModule module(execution_mode);
int32_t* memory = module.AddMemoryElems<int32_t>(8);
WasmRunner<int32_t> r(&module, MachineType::Int32());
module.RandomizeMemory(1111);
BUILD(r, WASM_LOAD_MEM(MachineType::Int32(), WASM_I8(0)));
- memory[0] = 99999999;
+ module.WriteMemory(&memory[0], 99999999);
CHECK_EQ(99999999, r.Call(0));
- memory[0] = 88888888;
+ module.WriteMemory(&memory[0], 88888888);
CHECK_EQ(88888888, r.Call(0));
- memory[0] = 77777777;
+ module.WriteMemory(&memory[0], 77777777);
CHECK_EQ(77777777, r.Call(0));
}
-
-TEST(Run_Wasm_LoadMemI32_oob) {
- TestingModule module;
+WASM_EXEC_TEST(LoadMemI32_alignment) {
+ TestingModule module(execution_mode);
int32_t* memory = module.AddMemoryElems<int32_t>(8);
- WasmRunner<int32_t> r(&module, MachineType::Uint32());
- module.RandomizeMemory(1111);
+ for (byte alignment = 0; alignment <= 2; ++alignment) {
+ WasmRunner<int32_t> r(&module, MachineType::Int32());
+ module.RandomizeMemory(1111);
- BUILD(r, WASM_LOAD_MEM(MachineType::Int32(), WASM_GET_LOCAL(0)));
+ BUILD(r,
+ WASM_LOAD_MEM_ALIGNMENT(MachineType::Int32(), WASM_I8(0), alignment));
- memory[0] = 88888888;
- CHECK_EQ(88888888, r.Call(0u));
- for (uint32_t offset = 29; offset < 40; offset++) {
- CHECK_TRAP(r.Call(offset));
- }
+ module.WriteMemory(&memory[0], 0x1a2b3c4d);
+ CHECK_EQ(0x1a2b3c4d, r.Call(0));
- for (uint32_t offset = 0x80000000; offset < 0x80000010; offset++) {
- CHECK_TRAP(r.Call(offset));
+ module.WriteMemory(&memory[0], 0x5e6f7a8b);
+ CHECK_EQ(0x5e6f7a8b, r.Call(0));
+
+ module.WriteMemory(&memory[0], 0x7ca0b1c2);
+ CHECK_EQ(0x7ca0b1c2, r.Call(0));
}
}
-
-TEST(Run_Wasm_LoadMemI32_oob_asm) {
- TestingModule module;
- module.origin = kAsmJsOrigin;
+WASM_EXEC_TEST(LoadMemI32_oob) {
+ TestingModule module(execution_mode);
int32_t* memory = module.AddMemoryElems<int32_t>(8);
WasmRunner<int32_t> r(&module, MachineType::Uint32());
- module.RandomizeMemory(1112);
+ module.RandomizeMemory(1111);
BUILD(r, WASM_LOAD_MEM(MachineType::Int32(), WASM_GET_LOCAL(0)));
- memory[0] = 999999;
- CHECK_EQ(999999, r.Call(0u));
- // TODO(titzer): offset 29-31 should also be OOB.
- for (uint32_t offset = 32; offset < 40; offset++) {
- CHECK_EQ(0, r.Call(offset));
+ module.WriteMemory(&memory[0], 88888888);
+ CHECK_EQ(88888888, r.Call(0u));
+ for (uint32_t offset = 29; offset < 40; ++offset) {
+ CHECK_TRAP(r.Call(offset));
}
- for (uint32_t offset = 0x80000000; offset < 0x80000010; offset++) {
- CHECK_EQ(0, r.Call(offset));
+ for (uint32_t offset = 0x80000000; offset < 0x80000010; ++offset) {
+ CHECK_TRAP(r.Call(offset));
}
}
-
-TEST(Run_Wasm_LoadMem_offset_oob) {
- TestingModule module;
+WASM_EXEC_TEST(LoadMem_offset_oob) {
+ TestingModule module(execution_mode);
module.AddMemoryElems<int32_t>(8);
static const MachineType machineTypes[] = {
@@ -1319,7 +1390,7 @@ TEST(Run_Wasm_LoadMem_offset_oob) {
MachineType::Int64(), MachineType::Uint64(), MachineType::Float32(),
MachineType::Float64()};
- for (size_t m = 0; m < arraysize(machineTypes); m++) {
+ for (size_t m = 0; m < arraysize(machineTypes); ++m) {
module.RandomizeMemory(1116 + static_cast<int>(m));
WasmRunner<int32_t> r(&module, MachineType::Uint32());
uint32_t boundary = 24 - WasmOpcodes::MemSize(machineTypes[m]);
@@ -1329,47 +1400,43 @@ TEST(Run_Wasm_LoadMem_offset_oob) {
CHECK_EQ(0, r.Call(boundary)); // in bounds.
- for (uint32_t offset = boundary + 1; offset < boundary + 19; offset++) {
+ for (uint32_t offset = boundary + 1; offset < boundary + 19; ++offset) {
CHECK_TRAP(r.Call(offset)); // out of bounds.
}
}
}
-
-TEST(Run_Wasm_LoadMemI32_offset) {
- TestingModule module;
+WASM_EXEC_TEST(LoadMemI32_offset) {
+ TestingModule module(execution_mode);
int32_t* memory = module.AddMemoryElems<int32_t>(4);
WasmRunner<int32_t> r(&module, MachineType::Int32());
module.RandomizeMemory(1111);
BUILD(r, WASM_LOAD_MEM_OFFSET(MachineType::Int32(), 4, WASM_GET_LOCAL(0)));
- memory[0] = 66666666;
- memory[1] = 77777777;
- memory[2] = 88888888;
- memory[3] = 99999999;
+ module.WriteMemory(&memory[0], 66666666);
+ module.WriteMemory(&memory[1], 77777777);
+ module.WriteMemory(&memory[2], 88888888);
+ module.WriteMemory(&memory[3], 99999999);
CHECK_EQ(77777777, r.Call(0));
CHECK_EQ(88888888, r.Call(4));
CHECK_EQ(99999999, r.Call(8));
- memory[0] = 11111111;
- memory[1] = 22222222;
- memory[2] = 33333333;
- memory[3] = 44444444;
+ module.WriteMemory(&memory[0], 11111111);
+ module.WriteMemory(&memory[1], 22222222);
+ module.WriteMemory(&memory[2], 33333333);
+ module.WriteMemory(&memory[3], 44444444);
CHECK_EQ(22222222, r.Call(0));
CHECK_EQ(33333333, r.Call(4));
CHECK_EQ(44444444, r.Call(8));
}
-
-#if !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-
-TEST(Run_Wasm_LoadMemI32_const_oob_misaligned) {
+WASM_EXEC_TEST(LoadMemI32_const_oob_misaligned) {
const int kMemSize = 12;
// TODO(titzer): Fix misaligned accesses on MIPS and re-enable.
- for (int offset = 0; offset < kMemSize + 5; offset++) {
- for (int index = 0; index < kMemSize + 5; index++) {
- TestingModule module;
+ for (int offset = 0; offset < kMemSize + 5; ++offset) {
+ for (int index = 0; index < kMemSize + 5; ++index) {
+ TestingModule module(execution_mode);
module.AddMemoryElems<byte>(kMemSize);
WasmRunner<int32_t> r(&module);
@@ -1387,14 +1454,11 @@ TEST(Run_Wasm_LoadMemI32_const_oob_misaligned) {
}
}
-#endif
-
-
-TEST(Run_Wasm_LoadMemI32_const_oob) {
+WASM_EXEC_TEST(LoadMemI32_const_oob) {
const int kMemSize = 24;
for (int offset = 0; offset < kMemSize + 5; offset += 4) {
for (int index = 0; index < kMemSize + 5; index += 4) {
- TestingModule module;
+ TestingModule module(execution_mode);
module.AddMemoryElems<byte>(kMemSize);
WasmRunner<int32_t> r(&module);
@@ -1412,9 +1476,25 @@ TEST(Run_Wasm_LoadMemI32_const_oob) {
}
}
+WASM_EXEC_TEST(StoreMemI32_alignment) {
+ TestingModule module(execution_mode);
+ int32_t* memory = module.AddMemoryElems<int32_t>(4);
+ const int32_t kWritten = 0x12345678;
+
+ for (byte i = 0; i <= 2; ++i) {
+ WasmRunner<int32_t> r(&module, MachineType::Int32());
+ BUILD(r, WASM_STORE_MEM_ALIGNMENT(MachineType::Int32(), WASM_ZERO, i,
+ WASM_GET_LOCAL(0)));
+ module.RandomizeMemory(1111);
+ memory[0] = 0;
+
+ CHECK_EQ(kWritten, r.Call(kWritten));
+ CHECK_EQ(kWritten, module.ReadMemory(&memory[0]));
+ }
+}
-TEST(Run_Wasm_StoreMemI32_offset) {
- TestingModule module;
+WASM_EXEC_TEST(StoreMemI32_offset) {
+ TestingModule module(execution_mode);
int32_t* memory = module.AddMemoryElems<int32_t>(4);
WasmRunner<int32_t> r(&module, MachineType::Int32());
const int32_t kWritten = 0xaabbccdd;
@@ -1422,39 +1502,31 @@ TEST(Run_Wasm_StoreMemI32_offset) {
BUILD(r, WASM_STORE_MEM_OFFSET(MachineType::Int32(), 4, WASM_GET_LOCAL(0),
WASM_I32V_5(kWritten)));
- for (int i = 0; i < 2; i++) {
+ for (int i = 0; i < 2; ++i) {
module.RandomizeMemory(1111);
- memory[0] = 66666666;
- memory[1] = 77777777;
- memory[2] = 88888888;
- memory[3] = 99999999;
+ module.WriteMemory(&memory[0], 66666666);
+ module.WriteMemory(&memory[1], 77777777);
+ module.WriteMemory(&memory[2], 88888888);
+ module.WriteMemory(&memory[3], 99999999);
CHECK_EQ(kWritten, r.Call(i * 4));
- CHECK_EQ(66666666, memory[0]);
- CHECK_EQ(i == 0 ? kWritten : 77777777, memory[1]);
- CHECK_EQ(i == 1 ? kWritten : 88888888, memory[2]);
- CHECK_EQ(i == 2 ? kWritten : 99999999, memory[3]);
+ CHECK_EQ(66666666, module.ReadMemory(&memory[0]));
+ CHECK_EQ(i == 0 ? kWritten : 77777777, module.ReadMemory(&memory[1]));
+ CHECK_EQ(i == 1 ? kWritten : 88888888, module.ReadMemory(&memory[2]));
+ CHECK_EQ(i == 2 ? kWritten : 99999999, module.ReadMemory(&memory[3]));
}
}
-
-TEST(Run_Wasm_StoreMem_offset_oob) {
- TestingModule module;
+WASM_EXEC_TEST(StoreMem_offset_oob) {
+ TestingModule module(execution_mode);
byte* memory = module.AddMemoryElems<byte>(32);
-#if WASM_64
- static const MachineType machineTypes[] = {
- MachineType::Int8(), MachineType::Uint8(), MachineType::Int16(),
- MachineType::Uint16(), MachineType::Int32(), MachineType::Uint32(),
- MachineType::Int64(), MachineType::Uint64(), MachineType::Float32(),
- MachineType::Float64()};
-#else
+ // 64-bit cases are handled in test-run-wasm-64.cc
static const MachineType machineTypes[] = {
MachineType::Int8(), MachineType::Uint8(), MachineType::Int16(),
MachineType::Uint16(), MachineType::Int32(), MachineType::Uint32(),
MachineType::Float32(), MachineType::Float64()};
-#endif
- for (size_t m = 0; m < arraysize(machineTypes); m++) {
+ for (size_t m = 0; m < arraysize(machineTypes); ++m) {
module.RandomizeMemory(1119 + static_cast<int>(m));
WasmRunner<int32_t> r(&module, MachineType::Uint32());
@@ -1467,253 +1539,231 @@ TEST(Run_Wasm_StoreMem_offset_oob) {
CHECK_EQ(0, r.Call(boundary)); // in bounds.
CHECK_EQ(0, memcmp(&memory[0], &memory[8 + boundary], memsize));
- for (uint32_t offset = boundary + 1; offset < boundary + 19; offset++) {
+ for (uint32_t offset = boundary + 1; offset < boundary + 19; ++offset) {
CHECK_TRAP(r.Call(offset)); // out of bounds.
}
}
}
-
-TEST(Run_Wasm_LoadMemI32_P) {
+WASM_EXEC_TEST(LoadMemI32_P) {
const int kNumElems = 8;
- TestingModule module;
+ TestingModule module(execution_mode);
int32_t* memory = module.AddMemoryElems<int32_t>(kNumElems);
WasmRunner<int32_t> r(&module, MachineType::Int32());
module.RandomizeMemory(2222);
BUILD(r, WASM_LOAD_MEM(MachineType::Int32(), WASM_GET_LOCAL(0)));
- for (int i = 0; i < kNumElems; i++) {
- CHECK_EQ(memory[i], r.Call(i * 4));
+ for (int i = 0; i < kNumElems; ++i) {
+ CHECK_EQ(module.ReadMemory(&memory[i]), r.Call(i * 4));
}
}
-
-TEST(Run_Wasm_MemI32_Sum) {
+WASM_EXEC_TEST(MemI32_Sum) {
const int kNumElems = 20;
- TestingModule module;
+ TestingModule module(execution_mode);
uint32_t* memory = module.AddMemoryElems<uint32_t>(kNumElems);
WasmRunner<uint32_t> r(&module, MachineType::Int32());
const byte kSum = r.AllocateLocal(kAstI32);
- BUILD(r, WASM_BLOCK(
- 2, WASM_WHILE(
- WASM_GET_LOCAL(0),
- WASM_BLOCK(
- 2, WASM_SET_LOCAL(
- kSum, WASM_I32_ADD(
- WASM_GET_LOCAL(kSum),
+ BUILD(r,
+ WASM_BLOCK(
+ WASM_WHILE(
+ WASM_GET_LOCAL(0),
+ WASM_BLOCK(
+ WASM_SET_LOCAL(
+ kSum, WASM_I32_ADD(WASM_GET_LOCAL(kSum),
WASM_LOAD_MEM(MachineType::Int32(),
WASM_GET_LOCAL(0)))),
- WASM_SET_LOCAL(
- 0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(4))))),
- WASM_GET_LOCAL(1)));
+ WASM_SET_LOCAL(
+ 0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(4))))),
+ WASM_GET_LOCAL(1)));
// Run 4 trials.
- for (int i = 0; i < 3; i++) {
+ for (int i = 0; i < 3; ++i) {
module.RandomizeMemory(i * 33);
uint32_t expected = 0;
- for (size_t j = kNumElems - 1; j > 0; j--) {
- expected += memory[j];
+ for (size_t j = kNumElems - 1; j > 0; --j) {
+ expected += module.ReadMemory(&memory[j]);
}
- uint32_t result = r.Call(static_cast<int>(4 * (kNumElems - 1)));
+ uint32_t result = r.Call(4 * (kNumElems - 1));
CHECK_EQ(expected, result);
}
}
-
-TEST(Run_Wasm_CheckMachIntsZero) {
+WASM_EXEC_TEST(CheckMachIntsZero) {
const int kNumElems = 55;
- TestingModule module;
+ TestingModule module(execution_mode);
module.AddMemoryElems<uint32_t>(kNumElems);
WasmRunner<uint32_t> r(&module, MachineType::Int32());
- BUILD(r, kExprBlock, 2, kExprLoop, 1, kExprIf, kExprGetLocal, 0, kExprBr, 0,
- kExprIfElse, kExprI32LoadMem, ZERO_ALIGNMENT, ZERO_OFFSET,
- kExprGetLocal, 0, kExprBr, 2, kExprI8Const, 255, kExprSetLocal, 0,
- kExprI32Sub, kExprGetLocal, 0, kExprI8Const, 4, kExprI8Const, 0);
+ BUILD(r, kExprLoop, kExprGetLocal, 0, kExprIf, kExprGetLocal, 0,
+ kExprI32LoadMem, 0, 0, kExprIf, kExprI8Const, 255, kExprReturn, ARITY_1,
+ kExprEnd, kExprGetLocal, 0, kExprI8Const, 4, kExprI32Sub, kExprSetLocal,
+ 0, kExprBr, ARITY_1, DEPTH_0, kExprEnd, kExprEnd, kExprI8Const, 0);
module.BlankMemory();
CHECK_EQ(0, r.Call((kNumElems - 1) * 4));
}
-
-TEST(Run_Wasm_MemF32_Sum) {
+WASM_EXEC_TEST(MemF32_Sum) {
const int kSize = 5;
- TestingModule module;
+ TestingModule module(execution_mode);
module.AddMemoryElems<float>(kSize);
float* buffer = module.raw_mem_start<float>();
- buffer[0] = -99.25;
- buffer[1] = -888.25;
- buffer[2] = -77.25;
- buffer[3] = 66666.25;
- buffer[4] = 5555.25;
+ module.WriteMemory(&buffer[0], -99.25f);
+ module.WriteMemory(&buffer[1], -888.25f);
+ module.WriteMemory(&buffer[2], -77.25f);
+ module.WriteMemory(&buffer[3], 66666.25f);
+ module.WriteMemory(&buffer[4], 5555.25f);
WasmRunner<int32_t> r(&module, MachineType::Int32());
const byte kSum = r.AllocateLocal(kAstF32);
- BUILD(r, WASM_BLOCK(
- 3, WASM_WHILE(
- WASM_GET_LOCAL(0),
- WASM_BLOCK(
- 2, WASM_SET_LOCAL(
- kSum, WASM_F32_ADD(
- WASM_GET_LOCAL(kSum),
+ BUILD(r,
+ WASM_BLOCK(
+ WASM_WHILE(
+ WASM_GET_LOCAL(0),
+ WASM_BLOCK(
+ WASM_SET_LOCAL(
+ kSum, WASM_F32_ADD(WASM_GET_LOCAL(kSum),
WASM_LOAD_MEM(MachineType::Float32(),
WASM_GET_LOCAL(0)))),
- WASM_SET_LOCAL(
- 0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(4))))),
- WASM_STORE_MEM(MachineType::Float32(), WASM_ZERO,
- WASM_GET_LOCAL(kSum)),
- WASM_GET_LOCAL(0)));
+ WASM_SET_LOCAL(
+ 0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(4))))),
+ WASM_STORE_MEM(MachineType::Float32(), WASM_ZERO,
+ WASM_GET_LOCAL(kSum)),
+ WASM_GET_LOCAL(0)));
CHECK_EQ(0, r.Call(4 * (kSize - 1)));
- CHECK_NE(-99.25, buffer[0]);
- CHECK_EQ(71256.0f, buffer[0]);
+ CHECK_NE(-99.25f, module.ReadMemory(&buffer[0]));
+ CHECK_EQ(71256.0f, module.ReadMemory(&buffer[0]));
}
-
template <typename T>
-T GenerateAndRunFold(WasmOpcode binop, T* buffer, size_t size,
- LocalType astType, MachineType memType) {
- TestingModule module;
- module.AddMemoryElems<T>(size);
- for (size_t i = 0; i < size; i++) {
- module.raw_mem_start<T>()[i] = buffer[i];
+T GenerateAndRunFold(WasmExecutionMode execution_mode, WasmOpcode binop,
+ T* buffer, uint32_t size, LocalType astType,
+ MachineType memType) {
+ TestingModule module(execution_mode);
+ T* memory = module.AddMemoryElems<T>(size);
+ for (uint32_t i = 0; i < size; ++i) {
+ module.WriteMemory(&memory[i], buffer[i]);
}
WasmRunner<int32_t> r(&module, MachineType::Int32());
const byte kAccum = r.AllocateLocal(astType);
- BUILD(
- r,
- WASM_BLOCK(
- 4, WASM_SET_LOCAL(kAccum, WASM_LOAD_MEM(memType, WASM_ZERO)),
- WASM_WHILE(
- WASM_GET_LOCAL(0),
- WASM_BLOCK(
- 2, WASM_SET_LOCAL(
- kAccum,
- WASM_BINOP(binop, WASM_GET_LOCAL(kAccum),
- WASM_LOAD_MEM(memType, WASM_GET_LOCAL(0)))),
- WASM_SET_LOCAL(
- 0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(sizeof(T)))))),
- WASM_STORE_MEM(memType, WASM_ZERO, WASM_GET_LOCAL(kAccum)),
- WASM_GET_LOCAL(0)));
+ BUILD(r, WASM_BLOCK(
+ WASM_SET_LOCAL(kAccum, WASM_LOAD_MEM(memType, WASM_ZERO)),
+ WASM_WHILE(
+ WASM_GET_LOCAL(0),
+ WASM_BLOCK(
+ WASM_SET_LOCAL(
+ kAccum, WASM_BINOP(binop, WASM_GET_LOCAL(kAccum),
+ WASM_LOAD_MEM(
+ memType, WASM_GET_LOCAL(0)))),
+ WASM_SET_LOCAL(0, WASM_I32_SUB(WASM_GET_LOCAL(0),
+ WASM_I8(sizeof(T)))))),
+ WASM_STORE_MEM(memType, WASM_ZERO, WASM_GET_LOCAL(kAccum)),
+ WASM_GET_LOCAL(0)));
r.Call(static_cast<int>(sizeof(T) * (size - 1)));
- return module.raw_mem_at<double>(0);
+ return module.ReadMemory(&memory[0]);
}
-
-TEST(Run_Wasm_MemF64_Mul) {
+WASM_EXEC_TEST(MemF64_Mul) {
const size_t kSize = 6;
double buffer[kSize] = {1, 2, 2, 2, 2, 2};
- double result = GenerateAndRunFold<double>(kExprF64Mul, buffer, kSize,
- kAstF64, MachineType::Float64());
+ double result =
+ GenerateAndRunFold<double>(execution_mode, kExprF64Mul, buffer, kSize,
+ kAstF64, MachineType::Float64());
CHECK_EQ(32, result);
}
-
-TEST(Build_Wasm_Infinite_Loop) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(Build_Wasm_Infinite_Loop) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
// Only build the graph and compile, don't run.
BUILD(r, WASM_INFINITE_LOOP);
}
-
-TEST(Build_Wasm_Infinite_Loop_effect) {
- TestingModule module;
+WASM_EXEC_TEST(Build_Wasm_Infinite_Loop_effect) {
+ TestingModule module(execution_mode);
module.AddMemoryElems<int8_t>(16);
WasmRunner<int32_t> r(&module, MachineType::Int32());
// Only build the graph and compile, don't run.
- BUILD(r, WASM_LOOP(1, WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO)));
+ BUILD(r, WASM_LOOP(WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO)));
}
-
-TEST(Run_Wasm_Unreachable0a) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(Unreachable0a) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
BUILD(r, B2(WASM_BRV(0, WASM_I8(9)), RET(WASM_GET_LOCAL(0))));
CHECK_EQ(9, r.Call(0));
CHECK_EQ(9, r.Call(1));
}
-
-TEST(Run_Wasm_Unreachable0b) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(Unreachable0b) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
BUILD(r, B2(WASM_BRV(0, WASM_I8(7)), WASM_UNREACHABLE));
CHECK_EQ(7, r.Call(0));
CHECK_EQ(7, r.Call(1));
}
-
TEST(Build_Wasm_Unreachable1) {
- WasmRunner<int32_t> r(MachineType::Int32());
+ WasmRunner<int32_t> r(kExecuteCompiled, MachineType::Int32());
BUILD(r, WASM_UNREACHABLE);
}
-
TEST(Build_Wasm_Unreachable2) {
- WasmRunner<int32_t> r(MachineType::Int32());
+ WasmRunner<int32_t> r(kExecuteCompiled, MachineType::Int32());
BUILD(r, WASM_UNREACHABLE, WASM_UNREACHABLE);
}
-
TEST(Build_Wasm_Unreachable3) {
- WasmRunner<int32_t> r(MachineType::Int32());
+ WasmRunner<int32_t> r(kExecuteCompiled, MachineType::Int32());
BUILD(r, WASM_UNREACHABLE, WASM_UNREACHABLE, WASM_UNREACHABLE);
}
-
TEST(Build_Wasm_UnreachableIf1) {
- WasmRunner<int32_t> r(MachineType::Int32());
+ WasmRunner<int32_t> r(kExecuteCompiled, MachineType::Int32());
BUILD(r, WASM_UNREACHABLE, WASM_IF(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
}
-
TEST(Build_Wasm_UnreachableIf2) {
- WasmRunner<int32_t> r(MachineType::Int32());
+ WasmRunner<int32_t> r(kExecuteCompiled, MachineType::Int32());
BUILD(r, WASM_UNREACHABLE,
WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_UNREACHABLE));
}
-
-TEST(Run_Wasm_Unreachable_Load) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(Unreachable_Load) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
BUILD(r, B2(WASM_BRV(0, WASM_GET_LOCAL(0)),
WASM_LOAD_MEM(MachineType::Int8(), WASM_GET_LOCAL(0))));
CHECK_EQ(11, r.Call(11));
CHECK_EQ(21, r.Call(21));
}
-
-TEST(Run_Wasm_Infinite_Loop_not_taken1) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(Infinite_Loop_not_taken1) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
BUILD(r, B2(WASM_IF(WASM_GET_LOCAL(0), WASM_INFINITE_LOOP), WASM_I8(45)));
// Run the code, but don't go into the infinite loop.
CHECK_EQ(45, r.Call(0));
}
-
-TEST(Run_Wasm_Infinite_Loop_not_taken2) {
- WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, B1(WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_BRV(0, WASM_I8(45)),
+WASM_EXEC_TEST(Infinite_Loop_not_taken2) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
+ BUILD(r, B1(WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_BRV(1, WASM_I8(45)),
WASM_INFINITE_LOOP)));
// Run the code, but don't go into the infinite loop.
CHECK_EQ(45, r.Call(1));
}
-
-TEST(Run_Wasm_Infinite_Loop_not_taken2_brif) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(Infinite_Loop_not_taken2_brif) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
BUILD(r,
B2(WASM_BRV_IF(0, WASM_I8(45), WASM_GET_LOCAL(0)), WASM_INFINITE_LOOP));
// Run the code, but don't go into the infinite loop.
CHECK_EQ(45, r.Call(1));
}
-
static void TestBuildGraphForSimpleExpression(WasmOpcode opcode) {
- if (!WasmOpcodes::IsSupported(opcode)) return;
-
Isolate* isolate = CcTest::InitIsolateOnce();
Zone zone(isolate->allocator());
HandleScope scope(isolate);
@@ -1726,20 +1776,18 @@ static void TestBuildGraphForSimpleExpression(WasmOpcode opcode) {
FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (sig->parameter_count() == 1) {
- byte code[] = {WASM_NO_LOCALS, static_cast<byte>(opcode), kExprGetLocal, 0};
- TestBuildingGraph(&zone, &jsgraph, nullptr, sig, code,
+ byte code[] = {WASM_NO_LOCALS, kExprGetLocal, 0, static_cast<byte>(opcode)};
+ TestBuildingGraph(&zone, &jsgraph, nullptr, sig, nullptr, code,
code + arraysize(code));
} else {
CHECK_EQ(2, sig->parameter_count());
- byte code[] = {WASM_NO_LOCALS, static_cast<byte>(opcode),
- kExprGetLocal, 0,
- kExprGetLocal, 1};
- TestBuildingGraph(&zone, &jsgraph, nullptr, sig, code,
+ byte code[] = {WASM_NO_LOCALS, kExprGetLocal, 0, kExprGetLocal, 1,
+ static_cast<byte>(opcode)};
+ TestBuildingGraph(&zone, &jsgraph, nullptr, sig, nullptr, code,
code + arraysize(code));
}
}
-
TEST(Build_Wasm_SimpleExprs) {
// Test that the decoder can build a graph for all supported simple expressions.
#define GRAPH_BUILD_TEST(name, opcode, sig) \
@@ -1750,9 +1798,8 @@ TEST(Build_Wasm_SimpleExprs) {
#undef GRAPH_BUILD_TEST
}
-
-TEST(Run_Wasm_Int32LoadInt8_signext) {
- TestingModule module;
+WASM_EXEC_TEST(Int32LoadInt8_signext) {
+ TestingModule module(execution_mode);
const int kNumElems = 16;
int8_t* memory = module.AddMemoryElems<int8_t>(kNumElems);
module.RandomizeMemory();
@@ -1760,14 +1807,13 @@ TEST(Run_Wasm_Int32LoadInt8_signext) {
WasmRunner<int32_t> r(&module, MachineType::Int32());
BUILD(r, WASM_LOAD_MEM(MachineType::Int8(), WASM_GET_LOCAL(0)));
- for (size_t i = 0; i < kNumElems; i++) {
- CHECK_EQ(memory[i], r.Call(static_cast<int>(i)));
+ for (int i = 0; i < kNumElems; ++i) {
+ CHECK_EQ(memory[i], r.Call(i));
}
}
-
-TEST(Run_Wasm_Int32LoadInt8_zeroext) {
- TestingModule module;
+WASM_EXEC_TEST(Int32LoadInt8_zeroext) {
+ TestingModule module(execution_mode);
const int kNumElems = 16;
byte* memory = module.AddMemory(kNumElems);
module.RandomizeMemory(77);
@@ -1775,14 +1821,13 @@ TEST(Run_Wasm_Int32LoadInt8_zeroext) {
WasmRunner<int32_t> r(&module, MachineType::Int32());
BUILD(r, WASM_LOAD_MEM(MachineType::Uint8(), WASM_GET_LOCAL(0)));
- for (size_t i = 0; i < kNumElems; i++) {
- CHECK_EQ(memory[i], r.Call(static_cast<int>(i)));
+ for (int i = 0; i < kNumElems; ++i) {
+ CHECK_EQ(memory[i], r.Call(i));
}
}
-
-TEST(Run_Wasm_Int32LoadInt16_signext) {
- TestingModule module;
+WASM_EXEC_TEST(Int32LoadInt16_signext) {
+ TestingModule module(execution_mode);
const int kNumBytes = 16;
byte* memory = module.AddMemory(kNumBytes);
module.RandomizeMemory(888);
@@ -1790,15 +1835,14 @@ TEST(Run_Wasm_Int32LoadInt16_signext) {
WasmRunner<int32_t> r(&module, MachineType::Int32());
BUILD(r, WASM_LOAD_MEM(MachineType::Int16(), WASM_GET_LOCAL(0)));
- for (size_t i = 0; i < kNumBytes; i += 2) {
+ for (int i = 0; i < kNumBytes; i += 2) {
int32_t expected = memory[i] | (static_cast<int8_t>(memory[i + 1]) << 8);
- CHECK_EQ(expected, r.Call(static_cast<int>(i)));
+ CHECK_EQ(expected, r.Call(i));
}
}
-
-TEST(Run_Wasm_Int32LoadInt16_zeroext) {
- TestingModule module;
+WASM_EXEC_TEST(Int32LoadInt16_zeroext) {
+ TestingModule module(execution_mode);
const int kNumBytes = 16;
byte* memory = module.AddMemory(kNumBytes);
module.RandomizeMemory(9999);
@@ -1806,20 +1850,19 @@ TEST(Run_Wasm_Int32LoadInt16_zeroext) {
WasmRunner<int32_t> r(&module, MachineType::Int32());
BUILD(r, WASM_LOAD_MEM(MachineType::Uint16(), WASM_GET_LOCAL(0)));
- for (size_t i = 0; i < kNumBytes; i += 2) {
+ for (int i = 0; i < kNumBytes; i += 2) {
int32_t expected = memory[i] | (memory[i + 1] << 8);
- CHECK_EQ(expected, r.Call(static_cast<int>(i)));
+ CHECK_EQ(expected, r.Call(i));
}
}
-
-TEST(Run_WasmInt32Global) {
- TestingModule module;
- int32_t* global = module.AddGlobal<int32_t>(MachineType::Int32());
+WASM_EXEC_TEST(Int32Global) {
+ TestingModule module(execution_mode);
+ int32_t* global = module.AddGlobal<int32_t>(kAstI32);
WasmRunner<int32_t> r(&module, MachineType::Int32());
// global = global + p0
- BUILD(r, WASM_STORE_GLOBAL(
- 0, WASM_I32_ADD(WASM_LOAD_GLOBAL(0), WASM_GET_LOCAL(0))));
+ BUILD(r, WASM_SET_GLOBAL(
+ 0, WASM_I32_ADD(WASM_GET_GLOBAL(0), WASM_GET_LOCAL(0))));
*global = 116;
for (int i = 9; i < 444444; i += 111111) {
@@ -1829,28 +1872,27 @@ TEST(Run_WasmInt32Global) {
}
}
-
-TEST(Run_WasmInt32Globals_DontAlias) {
+WASM_EXEC_TEST(Int32Globals_DontAlias) {
const int kNumGlobals = 3;
- TestingModule module;
- int32_t* globals[] = {module.AddGlobal<int32_t>(MachineType::Int32()),
- module.AddGlobal<int32_t>(MachineType::Int32()),
- module.AddGlobal<int32_t>(MachineType::Int32())};
+ TestingModule module(execution_mode);
+ int32_t* globals[] = {module.AddGlobal<int32_t>(kAstI32),
+ module.AddGlobal<int32_t>(kAstI32),
+ module.AddGlobal<int32_t>(kAstI32)};
- for (int g = 0; g < kNumGlobals; g++) {
+ for (int g = 0; g < kNumGlobals; ++g) {
// global = global + p0
WasmRunner<int32_t> r(&module, MachineType::Int32());
- BUILD(r, WASM_STORE_GLOBAL(
- g, WASM_I32_ADD(WASM_LOAD_GLOBAL(g), WASM_GET_LOCAL(0))));
+ BUILD(r, WASM_SET_GLOBAL(
+ g, WASM_I32_ADD(WASM_GET_GLOBAL(g), WASM_GET_LOCAL(0))));
// Check that reading/writing global number {g} doesn't alter the others.
*globals[g] = 116 * g;
int32_t before[kNumGlobals];
for (int i = 9; i < 444444; i += 111113) {
int32_t sum = *globals[g] + i;
- for (int j = 0; j < kNumGlobals; j++) before[j] = *globals[j];
+ for (int j = 0; j < kNumGlobals; ++j) before[j] = *globals[j];
r.Call(i);
- for (int j = 0; j < kNumGlobals; j++) {
+ for (int j = 0; j < kNumGlobals; ++j) {
int32_t expected = j == g ? sum : before[j];
CHECK_EQ(expected, *globals[j]);
}
@@ -1858,14 +1900,13 @@ TEST(Run_WasmInt32Globals_DontAlias) {
}
}
-
-TEST(Run_WasmFloat32Global) {
- TestingModule module;
- float* global = module.AddGlobal<float>(MachineType::Float32());
+WASM_EXEC_TEST(Float32Global) {
+ TestingModule module(execution_mode);
+ float* global = module.AddGlobal<float>(kAstF32);
WasmRunner<int32_t> r(&module, MachineType::Int32());
// global = global + p0
- BUILD(r, B2(WASM_STORE_GLOBAL(
- 0, WASM_F32_ADD(WASM_LOAD_GLOBAL(0),
+ BUILD(r, B2(WASM_SET_GLOBAL(
+ 0, WASM_F32_ADD(WASM_GET_GLOBAL(0),
WASM_F32_SCONVERT_I32(WASM_GET_LOCAL(0)))),
WASM_ZERO));
@@ -1877,14 +1918,13 @@ TEST(Run_WasmFloat32Global) {
}
}
-
-TEST(Run_WasmFloat64Global) {
- TestingModule module;
- double* global = module.AddGlobal<double>(MachineType::Float64());
+WASM_EXEC_TEST(Float64Global) {
+ TestingModule module(execution_mode);
+ double* global = module.AddGlobal<double>(kAstF64);
WasmRunner<int32_t> r(&module, MachineType::Int32());
// global = global + p0
- BUILD(r, B2(WASM_STORE_GLOBAL(
- 0, WASM_F64_ADD(WASM_LOAD_GLOBAL(0),
+ BUILD(r, B2(WASM_SET_GLOBAL(
+ 0, WASM_F64_ADD(WASM_GET_GLOBAL(0),
WASM_F64_SCONVERT_I32(WASM_GET_LOCAL(0)))),
WASM_ZERO));
@@ -1896,37 +1936,25 @@ TEST(Run_WasmFloat64Global) {
}
}
-
-TEST(Run_WasmMixedGlobals) {
- TestingModule module;
- int32_t* unused = module.AddGlobal<int32_t>(MachineType::Int32());
+WASM_EXEC_TEST(MixedGlobals) {
+ TestingModule module(execution_mode);
+ int32_t* unused = module.AddGlobal<int32_t>(kAstI32);
byte* memory = module.AddMemory(32);
- int8_t* var_int8 = module.AddGlobal<int8_t>(MachineType::Int8());
- uint8_t* var_uint8 = module.AddGlobal<uint8_t>(MachineType::Uint8());
- int16_t* var_int16 = module.AddGlobal<int16_t>(MachineType::Int16());
- uint16_t* var_uint16 = module.AddGlobal<uint16_t>(MachineType::Uint16());
- int32_t* var_int32 = module.AddGlobal<int32_t>(MachineType::Int32());
- uint32_t* var_uint32 = module.AddGlobal<uint32_t>(MachineType::Uint32());
- float* var_float = module.AddGlobal<float>(MachineType::Float32());
- double* var_double = module.AddGlobal<double>(MachineType::Float64());
+ int32_t* var_int32 = module.AddGlobal<int32_t>(kAstI32);
+ uint32_t* var_uint32 = module.AddGlobal<uint32_t>(kAstI32);
+ float* var_float = module.AddGlobal<float>(kAstF32);
+ double* var_double = module.AddGlobal<double>(kAstF64);
WasmRunner<int32_t> r(&module, MachineType::Int32());
BUILD(
r,
WASM_BLOCK(
- 9,
- WASM_STORE_GLOBAL(1, WASM_LOAD_MEM(MachineType::Int8(), WASM_ZERO)),
- WASM_STORE_GLOBAL(2, WASM_LOAD_MEM(MachineType::Uint8(), WASM_ZERO)),
- WASM_STORE_GLOBAL(3, WASM_LOAD_MEM(MachineType::Int16(), WASM_ZERO)),
- WASM_STORE_GLOBAL(4, WASM_LOAD_MEM(MachineType::Uint16(), WASM_ZERO)),
- WASM_STORE_GLOBAL(5, WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO)),
- WASM_STORE_GLOBAL(6, WASM_LOAD_MEM(MachineType::Uint32(), WASM_ZERO)),
- WASM_STORE_GLOBAL(7,
- WASM_LOAD_MEM(MachineType::Float32(), WASM_ZERO)),
- WASM_STORE_GLOBAL(8,
- WASM_LOAD_MEM(MachineType::Float64(), WASM_ZERO)),
+ WASM_SET_GLOBAL(1, WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO)),
+ WASM_SET_GLOBAL(2, WASM_LOAD_MEM(MachineType::Uint32(), WASM_ZERO)),
+ WASM_SET_GLOBAL(3, WASM_LOAD_MEM(MachineType::Float32(), WASM_ZERO)),
+ WASM_SET_GLOBAL(4, WASM_LOAD_MEM(MachineType::Float64(), WASM_ZERO)),
WASM_ZERO));
memory[0] = 0xaa;
@@ -1939,10 +1967,6 @@ TEST(Run_WasmMixedGlobals) {
memory[7] = 0x99;
r.Call(1);
- CHECK(static_cast<int8_t>(0xaa) == *var_int8);
- CHECK(static_cast<uint8_t>(0xaa) == *var_uint8);
- CHECK(static_cast<int16_t>(0xccaa) == *var_int16);
- CHECK(static_cast<uint16_t>(0xccaa) == *var_uint16);
CHECK(static_cast<int32_t>(0xee55ccaa) == *var_int32);
CHECK(static_cast<uint32_t>(0xee55ccaa) == *var_uint32);
CHECK(bit_cast<float>(0xee55ccaa) == *var_float);
@@ -1951,12 +1975,11 @@ TEST(Run_WasmMixedGlobals) {
USE(unused);
}
-
-TEST(Run_WasmCallEmpty) {
+WASM_EXEC_TEST(CallEmpty) {
const int32_t kExpected = -414444;
// Build the target function.
TestSignatures sigs;
- TestingModule module;
+ TestingModule module(execution_mode);
WasmFunctionCompiler t(sigs.i_v(), &module);
BUILD(t, WASM_I32V_3(kExpected));
uint32_t index = t.CompileAndAdd();
@@ -1969,21 +1992,20 @@ TEST(Run_WasmCallEmpty) {
CHECK_EQ(kExpected, result);
}
-
-TEST(Run_WasmCallF32StackParameter) {
+WASM_EXEC_TEST(CallF32StackParameter) {
// Build the target function.
LocalType param_types[20];
- for (int i = 0; i < 20; i++) param_types[i] = kAstF32;
+ for (int i = 0; i < 20; ++i) param_types[i] = kAstF32;
FunctionSig sig(1, 19, param_types);
- TestingModule module;
+ TestingModule module(execution_mode);
WasmFunctionCompiler t(&sig, &module);
BUILD(t, WASM_GET_LOCAL(17));
uint32_t index = t.CompileAndAdd();
// Build the calling function.
WasmRunner<float> r(&module);
- BUILD(r, WASM_CALL_FUNCTION(
- index, WASM_F32(1.0f), WASM_F32(2.0f), WASM_F32(4.0f),
+ BUILD(r, WASM_CALL_FUNCTIONN(
+ 19, index, WASM_F32(1.0f), WASM_F32(2.0f), WASM_F32(4.0f),
WASM_F32(8.0f), WASM_F32(16.0f), WASM_F32(32.0f),
WASM_F32(64.0f), WASM_F32(128.0f), WASM_F32(256.0f),
WASM_F32(1.5f), WASM_F32(2.5f), WASM_F32(4.5f), WASM_F32(8.5f),
@@ -1994,39 +2016,38 @@ TEST(Run_WasmCallF32StackParameter) {
CHECK_EQ(256.5f, result);
}
-
-TEST(Run_WasmCallF64StackParameter) {
+WASM_EXEC_TEST(CallF64StackParameter) {
// Build the target function.
LocalType param_types[20];
- for (int i = 0; i < 20; i++) param_types[i] = kAstF64;
+ for (int i = 0; i < 20; ++i) param_types[i] = kAstF64;
FunctionSig sig(1, 19, param_types);
- TestingModule module;
+ TestingModule module(execution_mode);
WasmFunctionCompiler t(&sig, &module);
BUILD(t, WASM_GET_LOCAL(17));
uint32_t index = t.CompileAndAdd();
// Build the calling function.
WasmRunner<double> r(&module);
- BUILD(r, WASM_CALL_FUNCTION(index, WASM_F64(1.0), WASM_F64(2.0),
- WASM_F64(4.0), WASM_F64(8.0), WASM_F64(16.0),
- WASM_F64(32.0), WASM_F64(64.0), WASM_F64(128.0),
- WASM_F64(256.0), WASM_F64(1.5), WASM_F64(2.5),
- WASM_F64(4.5), WASM_F64(8.5), WASM_F64(16.5),
- WASM_F64(32.5), WASM_F64(64.5), WASM_F64(128.5),
- WASM_F64(256.5), WASM_F64(512.5)));
+ BUILD(r, WASM_CALL_FUNCTIONN(19, index, WASM_F64(1.0), WASM_F64(2.0),
+ WASM_F64(4.0), WASM_F64(8.0), WASM_F64(16.0),
+ WASM_F64(32.0), WASM_F64(64.0), WASM_F64(128.0),
+ WASM_F64(256.0), WASM_F64(1.5), WASM_F64(2.5),
+ WASM_F64(4.5), WASM_F64(8.5), WASM_F64(16.5),
+ WASM_F64(32.5), WASM_F64(64.5), WASM_F64(128.5),
+ WASM_F64(256.5), WASM_F64(512.5)));
float result = r.Call();
CHECK_EQ(256.5, result);
}
-TEST(Run_WasmCallVoid) {
+WASM_EXEC_TEST(CallVoid) {
const byte kMemOffset = 8;
const int32_t kElemNum = kMemOffset / sizeof(int32_t);
- const int32_t kExpected = -414444;
+ const int32_t kExpected = 414444;
// Build the target function.
TestSignatures sigs;
- TestingModule module;
- module.AddMemory(16);
+ TestingModule module(execution_mode);
+ int32_t* memory = module.AddMemoryElems<int32_t>(16 / sizeof(int32_t));
module.RandomizeMemory();
WasmFunctionCompiler t(sigs.v_v(), &module);
BUILD(t, WASM_STORE_MEM(MachineType::Int32(), WASM_I8(kMemOffset),
@@ -2040,21 +2061,21 @@ TEST(Run_WasmCallVoid) {
int32_t result = r.Call();
CHECK_EQ(kExpected, result);
- CHECK_EQ(kExpected, module.raw_mem_start<int32_t>()[kElemNum]);
+ CHECK_EQ(static_cast<int64_t>(kExpected),
+ static_cast<int64_t>(module.ReadMemory(&memory[kElemNum])));
}
-
-TEST(Run_WasmCall_Int32Add) {
+WASM_EXEC_TEST(Call_Int32Add) {
// Build the target function.
TestSignatures sigs;
- TestingModule module;
+ TestingModule module(execution_mode);
WasmFunctionCompiler t(sigs.i_ii(), &module);
BUILD(t, WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
uint32_t index = t.CompileAndAdd();
// Build the caller function.
WasmRunner<int32_t> r(&module, MachineType::Int32(), MachineType::Int32());
- BUILD(r, WASM_CALL_FUNCTION(index, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ BUILD(r, WASM_CALL_FUNCTION2(index, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
@@ -2065,9 +2086,9 @@ TEST(Run_WasmCall_Int32Add) {
}
}
-TEST(Run_WasmCall_Float32Sub) {
+WASM_EXEC_TEST(Call_Float32Sub) {
TestSignatures sigs;
- TestingModule module;
+ TestingModule module(execution_mode);
WasmFunctionCompiler t(sigs.f_ff(), &module);
// Build the target function.
@@ -2076,37 +2097,37 @@ TEST(Run_WasmCall_Float32Sub) {
// Builder the caller function.
WasmRunner<float> r(&module, MachineType::Float32(), MachineType::Float32());
- BUILD(r, WASM_CALL_FUNCTION(index, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ BUILD(r, WASM_CALL_FUNCTION2(index, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_FLOAT32_INPUTS(i) {
FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(*i - *j, r.Call(*i, *j)); }
}
}
-
-TEST(Run_WasmCall_Float64Sub) {
- TestingModule module;
+WASM_EXEC_TEST(Call_Float64Sub) {
+ TestingModule module(execution_mode);
double* memory = module.AddMemoryElems<double>(16);
WasmRunner<int32_t> r(&module);
- BUILD(r, WASM_BLOCK(
- 2, WASM_STORE_MEM(
- MachineType::Float64(), WASM_ZERO,
- WASM_F64_SUB(
- WASM_LOAD_MEM(MachineType::Float64(), WASM_ZERO),
- WASM_LOAD_MEM(MachineType::Float64(), WASM_I8(8)))),
- WASM_I8(107)));
+ BUILD(r,
+ WASM_BLOCK(WASM_STORE_MEM(
+ MachineType::Float64(), WASM_ZERO,
+ WASM_F64_SUB(
+ WASM_LOAD_MEM(MachineType::Float64(), WASM_ZERO),
+ WASM_LOAD_MEM(MachineType::Float64(), WASM_I8(8)))),
+ WASM_I8(107)));
FOR_FLOAT64_INPUTS(i) {
FOR_FLOAT64_INPUTS(j) {
- memory[0] = *i;
- memory[1] = *j;
+ module.WriteMemory(&memory[0], *i);
+ module.WriteMemory(&memory[1], *j);
double expected = *i - *j;
CHECK_EQ(107, r.Call());
+
if (expected != expected) {
- CHECK(memory[0] != memory[0]);
+ CHECK(module.ReadMemory(&memory[0]) != module.ReadMemory(&memory[0]));
} else {
- CHECK_EQ(expected, memory[0]);
+ CHECK_EQ(expected, module.ReadMemory(&memory[0]));
}
}
}
@@ -2115,35 +2136,26 @@ TEST(Run_WasmCall_Float64Sub) {
#define ADD_CODE(vec, ...) \
do { \
byte __buf[] = {__VA_ARGS__}; \
- for (size_t i = 0; i < sizeof(__buf); i++) vec.push_back(__buf[i]); \
+ for (size_t i = 0; i < sizeof(__buf); ++i) vec.push_back(__buf[i]); \
} while (false)
-
-static void Run_WasmMixedCall_N(int start) {
+static void Run_WasmMixedCall_N(WasmExecutionMode execution_mode, int start) {
const int kExpected = 6333;
const int kElemSize = 8;
TestSignatures sigs;
-#if WASM_64
- static MachineType mixed[] = {
- MachineType::Int32(), MachineType::Float32(), MachineType::Int64(),
- MachineType::Float64(), MachineType::Float32(), MachineType::Int64(),
- MachineType::Int32(), MachineType::Float64(), MachineType::Float32(),
- MachineType::Float64(), MachineType::Int32(), MachineType::Int64(),
- MachineType::Int32(), MachineType::Int32()};
-#else
+ // 64-bit cases handled in test-run-wasm-64.cc.
static MachineType mixed[] = {
MachineType::Int32(), MachineType::Float32(), MachineType::Float64(),
MachineType::Float32(), MachineType::Int32(), MachineType::Float64(),
MachineType::Float32(), MachineType::Float64(), MachineType::Int32(),
MachineType::Int32(), MachineType::Int32()};
-#endif
int num_params = static_cast<int>(arraysize(mixed)) - start;
- for (int which = 0; which < num_params; which++) {
+ for (int which = 0; which < num_params; ++which) {
v8::base::AccountingAllocator allocator;
Zone zone(&allocator);
- TestingModule module;
+ TestingModule module(execution_mode);
module.AddMemory(1024);
MachineType* memtypes = &mixed[start];
MachineType result = memtypes[which];
@@ -2154,7 +2166,7 @@ static void Run_WasmMixedCall_N(int start) {
uint32_t index;
FunctionSig::Builder b(&zone, 1, num_params);
b.AddReturn(WasmOpcodes::LocalTypeFor(result));
- for (int i = 0; i < num_params; i++) {
+ for (int i = 0; i < num_params; ++i) {
b.AddParam(WasmOpcodes::LocalTypeFor(memtypes[i]));
}
WasmFunctionCompiler t(b.Build(), &module);
@@ -2165,31 +2177,38 @@ static void Run_WasmMixedCall_N(int start) {
// Build the calling function.
// =========================================================================
WasmRunner<int32_t> r(&module);
-
std::vector<byte> code;
- ADD_CODE(code,
- static_cast<byte>(WasmOpcodes::LoadStoreOpcodeOf(result, true)),
- ZERO_ALIGNMENT, ZERO_OFFSET);
+
+ // Load the offset for the store.
ADD_CODE(code, WASM_ZERO);
- ADD_CODE(code, kExprCallFunction, static_cast<byte>(index));
- for (int i = 0; i < num_params; i++) {
+ // Load the arguments.
+ for (int i = 0; i < num_params; ++i) {
int offset = (i + 1) * kElemSize;
ADD_CODE(code, WASM_LOAD_MEM(memtypes[i], WASM_I8(offset)));
}
+ // Call the selector function.
+ ADD_CODE(code, kExprCallFunction, static_cast<byte>(num_params),
+ static_cast<byte>(index));
+
+ // Store the result in memory.
+ ADD_CODE(code,
+ static_cast<byte>(WasmOpcodes::LoadStoreOpcodeOf(result, true)),
+ ZERO_ALIGNMENT, ZERO_OFFSET);
+
+ // Return the expected value.
ADD_CODE(code, WASM_I32V_2(kExpected));
- size_t end = code.size();
- code.push_back(0);
- r.Build(&code[0], &code[end]);
+
+ r.Build(&code[0], &code[0] + code.size());
// Run the code.
- for (int t = 0; t < 10; t++) {
+ for (int t = 0; t < 10; ++t) {
module.RandomizeMemory();
CHECK_EQ(kExpected, r.Call());
int size = WasmOpcodes::MemSize(result);
- for (int i = 0; i < size; i++) {
+ for (int i = 0; i < size; ++i) {
int base = (which + 1) * kElemSize;
byte expected = module.raw_mem_at<byte>(base + i);
byte result = module.raw_mem_at<byte>(i);
@@ -2199,15 +2218,14 @@ static void Run_WasmMixedCall_N(int start) {
}
}
+WASM_EXEC_TEST(MixedCall_0) { Run_WasmMixedCall_N(execution_mode, 0); }
+WASM_EXEC_TEST(MixedCall_1) { Run_WasmMixedCall_N(execution_mode, 1); }
+WASM_EXEC_TEST(MixedCall_2) { Run_WasmMixedCall_N(execution_mode, 2); }
+WASM_EXEC_TEST(MixedCall_3) { Run_WasmMixedCall_N(execution_mode, 3); }
-TEST(Run_WasmMixedCall_0) { Run_WasmMixedCall_N(0); }
-TEST(Run_WasmMixedCall_1) { Run_WasmMixedCall_N(1); }
-TEST(Run_WasmMixedCall_2) { Run_WasmMixedCall_N(2); }
-TEST(Run_WasmMixedCall_3) { Run_WasmMixedCall_N(3); }
-
-TEST(Run_Wasm_AddCall) {
+WASM_EXEC_TEST(AddCall) {
TestSignatures sigs;
- TestingModule module;
+ TestingModule module(execution_mode);
WasmFunctionCompiler t1(sigs.i_ii(), &module);
BUILD(t1, WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
t1.CompileAndAdd();
@@ -2216,21 +2234,21 @@ TEST(Run_Wasm_AddCall) {
byte local = r.AllocateLocal(kAstI32);
BUILD(r, B2(WASM_SET_LOCAL(local, WASM_I8(99)),
WASM_I32_ADD(
- WASM_CALL_FUNCTION(t1.function_index_, WASM_GET_LOCAL(0),
- WASM_GET_LOCAL(0)),
- WASM_CALL_FUNCTION(t1.function_index_, WASM_GET_LOCAL(1),
- WASM_GET_LOCAL(local)))));
+ WASM_CALL_FUNCTION2(t1.function_index(), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(0)),
+ WASM_CALL_FUNCTION2(t1.function_index(), WASM_GET_LOCAL(1),
+ WASM_GET_LOCAL(local)))));
CHECK_EQ(198, r.Call(0));
CHECK_EQ(200, r.Call(1));
CHECK_EQ(100, r.Call(-49));
}
-TEST(Run_Wasm_CountDown_expr) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(CountDown_expr) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
BUILD(r, WASM_LOOP(
- 3, WASM_IF(WASM_NOT(WASM_GET_LOCAL(0)),
- WASM_BREAKV(0, WASM_GET_LOCAL(0))),
+ WASM_IF(WASM_NOT(WASM_GET_LOCAL(0)),
+ WASM_BREAKV(1, WASM_GET_LOCAL(0))),
WASM_SET_LOCAL(0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(1))),
WASM_CONTINUE(0)));
CHECK_EQ(0, r.Call(1));
@@ -2238,51 +2256,46 @@ TEST(Run_Wasm_CountDown_expr) {
CHECK_EQ(0, r.Call(100));
}
-
-TEST(Run_Wasm_ExprBlock2a) {
- WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, B2(WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(0, WASM_I8(1))), WASM_I8(1)));
+WASM_EXEC_TEST(ExprBlock2a) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
+ BUILD(r, B2(WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(1, WASM_I8(1))), WASM_I8(1)));
CHECK_EQ(1, r.Call(0));
CHECK_EQ(1, r.Call(1));
}
-
-TEST(Run_Wasm_ExprBlock2b) {
- WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, B2(WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(0, WASM_I8(1))), WASM_I8(2)));
+WASM_EXEC_TEST(ExprBlock2b) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
+ BUILD(r, B2(WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(1, WASM_I8(1))), WASM_I8(2)));
CHECK_EQ(2, r.Call(0));
CHECK_EQ(1, r.Call(1));
}
-
-TEST(Run_Wasm_ExprBlock2c) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(ExprBlock2c) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
BUILD(r, B2(WASM_BRV_IF(0, WASM_I8(1), WASM_GET_LOCAL(0)), WASM_I8(1)));
CHECK_EQ(1, r.Call(0));
CHECK_EQ(1, r.Call(1));
}
-
-TEST(Run_Wasm_ExprBlock2d) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(ExprBlock2d) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
BUILD(r, B2(WASM_BRV_IF(0, WASM_I8(1), WASM_GET_LOCAL(0)), WASM_I8(2)));
CHECK_EQ(2, r.Call(0));
CHECK_EQ(1, r.Call(1));
}
-
-TEST(Run_Wasm_ExprBlock_ManualSwitch) {
- WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, WASM_BLOCK(6, WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(1)),
- WASM_BRV(0, WASM_I8(11))),
+WASM_EXEC_TEST(ExprBlock_ManualSwitch) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
+ BUILD(r, WASM_BLOCK(WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(1)),
+ WASM_BRV(1, WASM_I8(11))),
WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(2)),
- WASM_BRV(0, WASM_I8(12))),
+ WASM_BRV(1, WASM_I8(12))),
WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(3)),
- WASM_BRV(0, WASM_I8(13))),
+ WASM_BRV(1, WASM_I8(13))),
WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(4)),
- WASM_BRV(0, WASM_I8(14))),
+ WASM_BRV(1, WASM_I8(14))),
WASM_IF(WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(5)),
- WASM_BRV(0, WASM_I8(15))),
+ WASM_BRV(1, WASM_I8(15))),
WASM_I8(99)));
CHECK_EQ(99, r.Call(0));
CHECK_EQ(11, r.Call(1));
@@ -2293,21 +2306,19 @@ TEST(Run_Wasm_ExprBlock_ManualSwitch) {
CHECK_EQ(99, r.Call(6));
}
-
-TEST(Run_Wasm_ExprBlock_ManualSwitch_brif) {
- WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r,
- WASM_BLOCK(6, WASM_BRV_IF(0, WASM_I8(11),
+WASM_EXEC_TEST(ExprBlock_ManualSwitch_brif) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
+ BUILD(r, WASM_BLOCK(WASM_BRV_IF(0, WASM_I8(11),
WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(1))),
- WASM_BRV_IF(0, WASM_I8(12),
- WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(2))),
- WASM_BRV_IF(0, WASM_I8(13),
- WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(3))),
- WASM_BRV_IF(0, WASM_I8(14),
- WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(4))),
- WASM_BRV_IF(0, WASM_I8(15),
- WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(5))),
- WASM_I8(99)));
+ WASM_BRV_IF(0, WASM_I8(12),
+ WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(2))),
+ WASM_BRV_IF(0, WASM_I8(13),
+ WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(3))),
+ WASM_BRV_IF(0, WASM_I8(14),
+ WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(4))),
+ WASM_BRV_IF(0, WASM_I8(15),
+ WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(5))),
+ WASM_I8(99)));
CHECK_EQ(99, r.Call(0));
CHECK_EQ(11, r.Call(1));
CHECK_EQ(12, r.Call(2));
@@ -2317,45 +2328,41 @@ TEST(Run_Wasm_ExprBlock_ManualSwitch_brif) {
CHECK_EQ(99, r.Call(6));
}
-
-TEST(Run_Wasm_nested_ifs) {
- WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
+WASM_EXEC_TEST(nested_ifs) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32(),
+ MachineType::Int32());
BUILD(r, WASM_IF_ELSE(
WASM_GET_LOCAL(0),
WASM_IF_ELSE(WASM_GET_LOCAL(1), WASM_I8(11), WASM_I8(12)),
WASM_IF_ELSE(WASM_GET_LOCAL(1), WASM_I8(13), WASM_I8(14))));
-
CHECK_EQ(11, r.Call(1, 1));
CHECK_EQ(12, r.Call(1, 0));
CHECK_EQ(13, r.Call(0, 1));
CHECK_EQ(14, r.Call(0, 0));
}
-
-TEST(Run_Wasm_ExprBlock_if) {
- WasmRunner<int32_t> r(MachineType::Int32());
+WASM_EXEC_TEST(ExprBlock_if) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
BUILD(r, B1(WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_BRV(0, WASM_I8(11)),
- WASM_BRV(0, WASM_I8(14)))));
+ WASM_BRV(1, WASM_I8(14)))));
CHECK_EQ(11, r.Call(1));
CHECK_EQ(14, r.Call(0));
}
+WASM_EXEC_TEST(ExprBlock_nested_ifs) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32(),
+ MachineType::Int32());
-TEST(Run_Wasm_ExprBlock_nested_ifs) {
- WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
-
- BUILD(r, WASM_BLOCK(
- 1, WASM_IF_ELSE(
- WASM_GET_LOCAL(0),
- WASM_IF_ELSE(WASM_GET_LOCAL(1), WASM_BRV(0, WASM_I8(11)),
- WASM_BRV(0, WASM_I8(12))),
- WASM_IF_ELSE(WASM_GET_LOCAL(1), WASM_BRV(0, WASM_I8(13)),
- WASM_BRV(0, WASM_I8(14))))));
-
+ BUILD(r, WASM_BLOCK(WASM_IF_ELSE(
+ WASM_GET_LOCAL(0),
+ WASM_IF_ELSE(WASM_GET_LOCAL(1), WASM_BRV(0, WASM_I8(11)),
+ WASM_BRV(1, WASM_I8(12))),
+ WASM_IF_ELSE(WASM_GET_LOCAL(1), WASM_BRV(0, WASM_I8(13)),
+ WASM_BRV(1, WASM_I8(14))))));
CHECK_EQ(11, r.Call(1, 1));
CHECK_EQ(12, r.Call(1, 0));
@@ -2363,18 +2370,16 @@ TEST(Run_Wasm_ExprBlock_nested_ifs) {
CHECK_EQ(14, r.Call(0, 0));
}
+WASM_EXEC_TEST(ExprLoop_nested_ifs) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32(),
+ MachineType::Int32());
-TEST(Run_Wasm_ExprLoop_nested_ifs) {
- WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
-
- BUILD(r, WASM_LOOP(
- 1, WASM_IF_ELSE(
- WASM_GET_LOCAL(0),
- WASM_IF_ELSE(WASM_GET_LOCAL(1), WASM_BRV(1, WASM_I8(11)),
- WASM_BRV(1, WASM_I8(12))),
- WASM_IF_ELSE(WASM_GET_LOCAL(1), WASM_BRV(1, WASM_I8(13)),
- WASM_BRV(1, WASM_I8(14))))));
-
+ BUILD(r, WASM_LOOP(WASM_IF_ELSE(
+ WASM_GET_LOCAL(0),
+ WASM_IF_ELSE(WASM_GET_LOCAL(1), WASM_BRV(1, WASM_I8(11)),
+ WASM_BRV(3, WASM_I8(12))),
+ WASM_IF_ELSE(WASM_GET_LOCAL(1), WASM_BRV(1, WASM_I8(13)),
+ WASM_BRV(3, WASM_I8(14))))));
CHECK_EQ(11, r.Call(1, 1));
CHECK_EQ(12, r.Call(1, 0));
@@ -2382,10 +2387,9 @@ TEST(Run_Wasm_ExprLoop_nested_ifs) {
CHECK_EQ(14, r.Call(0, 0));
}
-
-TEST(Run_Wasm_SimpleCallIndirect) {
+WASM_EXEC_TEST(SimpleCallIndirect) {
TestSignatures sigs;
- TestingModule module;
+ TestingModule module(execution_mode);
WasmFunctionCompiler t1(sigs.i_ii(), &module);
BUILD(t1, WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
@@ -2401,23 +2405,23 @@ TEST(Run_Wasm_SimpleCallIndirect) {
module.AddSignature(sigs.d_dd());
// Function table.
- int table[] = {0, 1};
- module.AddIndirectFunctionTable(table, 2);
+ uint16_t indirect_function_table[] = {0, 1};
+ module.AddIndirectFunctionTable(indirect_function_table,
+ arraysize(indirect_function_table));
module.PopulateIndirectFunctionTable();
// Builder the caller function.
WasmRunner<int32_t> r(&module, MachineType::Int32());
- BUILD(r, WASM_CALL_INDIRECT(1, WASM_GET_LOCAL(0), WASM_I8(66), WASM_I8(22)));
+ BUILD(r, WASM_CALL_INDIRECT2(1, WASM_GET_LOCAL(0), WASM_I8(66), WASM_I8(22)));
CHECK_EQ(88, r.Call(0));
CHECK_EQ(44, r.Call(1));
CHECK_TRAP(r.Call(2));
}
-
-TEST(Run_Wasm_MultipleCallIndirect) {
+WASM_EXEC_TEST(MultipleCallIndirect) {
TestSignatures sigs;
- TestingModule module;
+ TestingModule module(execution_mode);
WasmFunctionCompiler t1(sigs.i_ii(), &module);
BUILD(t1, WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
@@ -2433,18 +2437,19 @@ TEST(Run_Wasm_MultipleCallIndirect) {
module.AddSignature(sigs.d_dd());
// Function table.
- int table[] = {0, 1};
- module.AddIndirectFunctionTable(table, 2);
+ uint16_t indirect_function_table[] = {0, 1};
+ module.AddIndirectFunctionTable(indirect_function_table,
+ arraysize(indirect_function_table));
module.PopulateIndirectFunctionTable();
// Builder the caller function.
WasmRunner<int32_t> r(&module, MachineType::Int32(), MachineType::Int32(),
MachineType::Int32());
- BUILD(r,
- WASM_I32_ADD(WASM_CALL_INDIRECT(1, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
- WASM_GET_LOCAL(2)),
- WASM_CALL_INDIRECT(1, WASM_GET_LOCAL(1), WASM_GET_LOCAL(2),
- WASM_GET_LOCAL(0))));
+ BUILD(r, WASM_I32_ADD(
+ WASM_CALL_INDIRECT2(1, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
+ WASM_GET_LOCAL(2)),
+ WASM_CALL_INDIRECT2(1, WASM_GET_LOCAL(1), WASM_GET_LOCAL(2),
+ WASM_GET_LOCAL(0))));
CHECK_EQ(5, r.Call(0, 1, 2));
CHECK_EQ(19, r.Call(0, 1, 9));
@@ -2457,9 +2462,9 @@ TEST(Run_Wasm_MultipleCallIndirect) {
CHECK_TRAP(r.Call(2, 1, 0));
}
-TEST(Run_Wasm_CallIndirect_NoTable) {
+WASM_EXEC_TEST(CallIndirect_NoTable) {
TestSignatures sigs;
- TestingModule module;
+ TestingModule module(execution_mode);
// One function.
WasmFunctionCompiler t1(sigs.i_ii(), &module);
@@ -2472,241 +2477,125 @@ TEST(Run_Wasm_CallIndirect_NoTable) {
// Builder the caller function.
WasmRunner<int32_t> r(&module, MachineType::Int32());
- BUILD(r, WASM_CALL_INDIRECT(1, WASM_GET_LOCAL(0), WASM_I8(66), WASM_I8(22)));
+ BUILD(r, WASM_CALL_INDIRECT2(1, WASM_GET_LOCAL(0), WASM_I8(66), WASM_I8(22)));
CHECK_TRAP(r.Call(0));
CHECK_TRAP(r.Call(1));
CHECK_TRAP(r.Call(2));
}
-TEST(Run_Wasm_F32Floor) {
- WasmRunner<float> r(MachineType::Float32());
+WASM_EXEC_TEST(F32Floor) {
+ WasmRunner<float> r(execution_mode, MachineType::Float32());
BUILD(r, WASM_F32_FLOOR(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(floorf(*i), r.Call(*i)); }
}
-TEST(Run_Wasm_F32Ceil) {
- WasmRunner<float> r(MachineType::Float32());
+WASM_EXEC_TEST(F32Ceil) {
+ WasmRunner<float> r(execution_mode, MachineType::Float32());
BUILD(r, WASM_F32_CEIL(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(ceilf(*i), r.Call(*i)); }
}
-TEST(Run_Wasm_F32Trunc) {
- WasmRunner<float> r(MachineType::Float32());
+WASM_EXEC_TEST(F32Trunc) {
+ WasmRunner<float> r(execution_mode, MachineType::Float32());
BUILD(r, WASM_F32_TRUNC(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(truncf(*i), r.Call(*i)); }
}
-TEST(Run_Wasm_F32NearestInt) {
- WasmRunner<float> r(MachineType::Float32());
+WASM_EXEC_TEST(F32NearestInt) {
+ WasmRunner<float> r(execution_mode, MachineType::Float32());
BUILD(r, WASM_F32_NEARESTINT(WASM_GET_LOCAL(0)));
FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(nearbyintf(*i), r.Call(*i)); }
}
-TEST(Run_Wasm_F64Floor) {
- WasmRunner<double> r(MachineType::Float64());
+WASM_EXEC_TEST(F64Floor) {
+ WasmRunner<double> r(execution_mode, MachineType::Float64());
BUILD(r, WASM_F64_FLOOR(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(floor(*i), r.Call(*i)); }
}
-TEST(Run_Wasm_F64Ceil) {
- WasmRunner<double> r(MachineType::Float64());
+WASM_EXEC_TEST(F64Ceil) {
+ WasmRunner<double> r(execution_mode, MachineType::Float64());
BUILD(r, WASM_F64_CEIL(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ceil(*i), r.Call(*i)); }
}
-TEST(Run_Wasm_F64Trunc) {
- WasmRunner<double> r(MachineType::Float64());
+WASM_EXEC_TEST(F64Trunc) {
+ WasmRunner<double> r(execution_mode, MachineType::Float64());
BUILD(r, WASM_F64_TRUNC(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(trunc(*i), r.Call(*i)); }
}
-TEST(Run_Wasm_F64NearestInt) {
- WasmRunner<double> r(MachineType::Float64());
+WASM_EXEC_TEST(F64NearestInt) {
+ WasmRunner<double> r(execution_mode, MachineType::Float64());
BUILD(r, WASM_F64_NEARESTINT(WASM_GET_LOCAL(0)));
FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(nearbyint(*i), r.Call(*i)); }
}
-TEST(Run_Wasm_F32Min) {
- WasmRunner<float> r(MachineType::Float32(), MachineType::Float32());
+WASM_EXEC_TEST(F32Min) {
+ WasmRunner<float> r(execution_mode, MachineType::Float32(),
+ MachineType::Float32());
BUILD(r, WASM_F32_MIN(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_FLOAT32_INPUTS(i) {
- FOR_FLOAT32_INPUTS(j) {
- float expected;
- if (*i < *j) {
- expected = *i;
- } else if (*j < *i) {
- expected = *j;
- } else if (*i != *i) {
- // If *i or *j is NaN, then the result is NaN.
- expected = *i;
- } else {
- expected = *j;
- }
-
- CHECK_FLOAT_EQ(expected, r.Call(*i, *j));
- }
+ FOR_FLOAT32_INPUTS(j) { CHECK_DOUBLE_EQ(JSMin(*i, *j), r.Call(*i, *j)); }
}
}
-
-TEST(Run_Wasm_F64Min) {
- WasmRunner<double> r(MachineType::Float64(), MachineType::Float64());
+WASM_EXEC_TEST(F64Min) {
+ WasmRunner<double> r(execution_mode, MachineType::Float64(),
+ MachineType::Float64());
BUILD(r, WASM_F64_MIN(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_FLOAT64_INPUTS(i) {
- FOR_FLOAT64_INPUTS(j) {
- double expected;
- if (*i < *j) {
- expected = *i;
- } else if (*j < *i) {
- expected = *j;
- } else if (*i != *i) {
- // If *i or *j is NaN, then the result is NaN.
- expected = *i;
- } else {
- expected = *j;
- }
-
- CHECK_DOUBLE_EQ(expected, r.Call(*i, *j));
- }
+ FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(JSMin(*i, *j), r.Call(*i, *j)); }
}
}
-
-TEST(Run_Wasm_F32Max) {
- WasmRunner<float> r(MachineType::Float32(), MachineType::Float32());
+WASM_EXEC_TEST(F32Max) {
+ WasmRunner<float> r(execution_mode, MachineType::Float32(),
+ MachineType::Float32());
BUILD(r, WASM_F32_MAX(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_FLOAT32_INPUTS(i) {
- FOR_FLOAT32_INPUTS(j) {
- float expected;
- if (*i > *j) {
- expected = *i;
- } else if (*j > *i) {
- expected = *j;
- } else if (*i != *i) {
- // If *i or *j is NaN, then the result is NaN.
- expected = *i;
- } else {
- expected = *j;
- }
-
- CHECK_FLOAT_EQ(expected, r.Call(*i, *j));
- }
+ FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(JSMax(*i, *j), r.Call(*i, *j)); }
}
}
-
-TEST(Run_Wasm_F64Max) {
- WasmRunner<double> r(MachineType::Float64(), MachineType::Float64());
+WASM_EXEC_TEST(F64Max) {
+ WasmRunner<double> r(execution_mode, MachineType::Float64(),
+ MachineType::Float64());
BUILD(r, WASM_F64_MAX(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_FLOAT64_INPUTS(i) {
FOR_FLOAT64_INPUTS(j) {
- double expected;
- if (*i > *j) {
- expected = *i;
- } else if (*j > *i) {
- expected = *j;
- } else if (*i != *i) {
- // If *i or *j is NaN, then the result is NaN.
- expected = *i;
- } else {
- expected = *j;
- }
-
- CHECK_DOUBLE_EQ(expected, r.Call(*i, *j));
+ double result = r.Call(*i, *j);
+ CHECK_DOUBLE_EQ(JSMax(*i, *j), result);
}
}
}
-// TODO(ahaas): Fix on arm and mips and reenable.
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
-
-TEST(Run_Wasm_F32Min_Snan) {
- // Test that the instruction does not return a signalling NaN.
- {
- WasmRunner<float> r;
- BUILD(r,
- WASM_F32_MIN(WASM_F32(bit_cast<float>(0xff80f1e2)), WASM_F32(57.67)));
- CHECK_EQ(0xffc0f1e2, bit_cast<uint32_t>(r.Call()));
- }
- {
- WasmRunner<float> r;
- BUILD(r,
- WASM_F32_MIN(WASM_F32(45.73), WASM_F32(bit_cast<float>(0x7f80f1e2))));
- CHECK_EQ(0x7fc0f1e2, bit_cast<uint32_t>(r.Call()));
- }
-}
-
-TEST(Run_Wasm_F32Max_Snan) {
- // Test that the instruction does not return a signalling NaN.
- {
- WasmRunner<float> r;
- BUILD(r,
- WASM_F32_MAX(WASM_F32(bit_cast<float>(0xff80f1e2)), WASM_F32(57.67)));
- CHECK_EQ(0xffc0f1e2, bit_cast<uint32_t>(r.Call()));
- }
- {
- WasmRunner<float> r;
- BUILD(r,
- WASM_F32_MAX(WASM_F32(45.73), WASM_F32(bit_cast<float>(0x7f80f1e2))));
- CHECK_EQ(0x7fc0f1e2, bit_cast<uint32_t>(r.Call()));
- }
-}
-
-TEST(Run_Wasm_F64Min_Snan) {
- // Test that the instruction does not return a signalling NaN.
- {
- WasmRunner<double> r;
- BUILD(r, WASM_F64_MIN(WASM_F64(bit_cast<double>(0xfff000000000f1e2)),
- WASM_F64(57.67)));
- CHECK_EQ(0xfff800000000f1e2, bit_cast<uint64_t>(r.Call()));
- }
- {
- WasmRunner<double> r;
- BUILD(r, WASM_F64_MIN(WASM_F64(45.73),
- WASM_F64(bit_cast<double>(0x7ff000000000f1e2))));
- CHECK_EQ(0x7ff800000000f1e2, bit_cast<uint64_t>(r.Call()));
- }
-}
-
-TEST(Run_Wasm_F64Max_Snan) {
- // Test that the instruction does not return a signalling NaN.
- {
- WasmRunner<double> r;
- BUILD(r, WASM_F64_MAX(WASM_F64(bit_cast<double>(0xfff000000000f1e2)),
- WASM_F64(57.67)));
- CHECK_EQ(0xfff800000000f1e2, bit_cast<uint64_t>(r.Call()));
- }
- {
- WasmRunner<double> r;
- BUILD(r, WASM_F64_MAX(WASM_F64(45.73),
- WASM_F64(bit_cast<double>(0x7ff000000000f1e2))));
- CHECK_EQ(0x7ff800000000f1e2, bit_cast<uint64_t>(r.Call()));
- }
-}
-
-#endif
-
-TEST(Run_Wasm_I32SConvertF32) {
- WasmRunner<int32_t> r(MachineType::Float32());
+WASM_EXEC_TEST(I32SConvertF32) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Float32());
BUILD(r, WASM_I32_SCONVERT_F32(WASM_GET_LOCAL(0)));
+ // The upper bound is (INT32_MAX + 1), which is the lowest float-representable
+ // number above INT32_MAX which cannot be represented as int32.
+ float upper_bound = 2147483648.0f;
+ // We use INT32_MIN as a lower bound because (INT32_MIN - 1) is not
+ // representable as float, and no number between (INT32_MIN - 1) and INT32_MIN
+ // is.
+ float lower_bound = static_cast<float>(INT32_MIN);
FOR_FLOAT32_INPUTS(i) {
- if (*i < static_cast<float>(INT32_MAX) &&
- *i >= static_cast<float>(INT32_MIN)) {
+ if (*i < upper_bound && *i >= lower_bound) {
CHECK_EQ(static_cast<int32_t>(*i), r.Call(*i));
} else {
CHECK_TRAP32(r.Call(*i));
@@ -2714,28 +2603,35 @@ TEST(Run_Wasm_I32SConvertF32) {
}
}
-
-TEST(Run_Wasm_I32SConvertF64) {
- WasmRunner<int32_t> r(MachineType::Float64());
+WASM_EXEC_TEST(I32SConvertF64) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Float64());
BUILD(r, WASM_I32_SCONVERT_F64(WASM_GET_LOCAL(0)));
+ // The upper bound is (INT32_MAX + 1), which is the lowest double-
+ // representable number above INT32_MAX which cannot be represented as int32.
+ double upper_bound = 2147483648.0;
+ // The lower bound is (INT32_MIN - 1), which is the greatest double-
+ // representable number below INT32_MIN which cannot be represented as int32.
+ double lower_bound = -2147483649.0;
FOR_FLOAT64_INPUTS(i) {
- if (*i < (static_cast<double>(INT32_MAX) + 1.0) &&
- *i > (static_cast<double>(INT32_MIN) - 1.0)) {
- CHECK_EQ(static_cast<int64_t>(*i), r.Call(*i));
+ if (*i<upper_bound&& * i> lower_bound) {
+ CHECK_EQ(static_cast<int32_t>(*i), r.Call(*i));
} else {
CHECK_TRAP32(r.Call(*i));
}
}
}
-
-TEST(Run_Wasm_I32UConvertF32) {
- WasmRunner<uint32_t> r(MachineType::Float32());
+WASM_EXEC_TEST(I32UConvertF32) {
+ WasmRunner<uint32_t> r(execution_mode, MachineType::Float32());
BUILD(r, WASM_I32_UCONVERT_F32(WASM_GET_LOCAL(0)));
-
+ // The upper bound is (UINT32_MAX + 1), which is the lowest
+ // float-representable number above UINT32_MAX which cannot be represented as
+ // uint32.
+ double upper_bound = 4294967296.0f;
+ double lower_bound = -1.0f;
FOR_FLOAT32_INPUTS(i) {
- if (*i < (static_cast<float>(UINT32_MAX) + 1.0) && *i > -1) {
+ if (*i<upper_bound&& * i> lower_bound) {
CHECK_EQ(static_cast<uint32_t>(*i), r.Call(*i));
} else {
CHECK_TRAP32(r.Call(*i));
@@ -2743,13 +2639,16 @@ TEST(Run_Wasm_I32UConvertF32) {
}
}
-
-TEST(Run_Wasm_I32UConvertF64) {
- WasmRunner<uint32_t> r(MachineType::Float64());
+WASM_EXEC_TEST(I32UConvertF64) {
+ WasmRunner<uint32_t> r(execution_mode, MachineType::Float64());
BUILD(r, WASM_I32_UCONVERT_F64(WASM_GET_LOCAL(0)));
-
+ // The upper bound is (UINT32_MAX + 1), which is the lowest
+ // double-representable number above UINT32_MAX which cannot be represented as
+ // uint32.
+ double upper_bound = 4294967296.0;
+ double lower_bound = -1.0;
FOR_FLOAT64_INPUTS(i) {
- if (*i < (static_cast<float>(UINT32_MAX) + 1.0) && *i > -1) {
+ if (*i<upper_bound&& * i> lower_bound) {
CHECK_EQ(static_cast<uint32_t>(*i), r.Call(*i));
} else {
CHECK_TRAP32(r.Call(*i));
@@ -2757,8 +2656,9 @@ TEST(Run_Wasm_I32UConvertF64) {
}
}
-TEST(Run_Wasm_F64CopySign) {
- WasmRunner<double> r(MachineType::Float64(), MachineType::Float64());
+WASM_EXEC_TEST(F64CopySign) {
+ WasmRunner<double> r(execution_mode, MachineType::Float64(),
+ MachineType::Float64());
BUILD(r, WASM_F64_COPYSIGN(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_FLOAT64_INPUTS(i) {
@@ -2766,9 +2666,9 @@ TEST(Run_Wasm_F64CopySign) {
}
}
-
-TEST(Run_Wasm_F32CopySign) {
- WasmRunner<float> r(MachineType::Float32(), MachineType::Float32());
+WASM_EXEC_TEST(F32CopySign) {
+ WasmRunner<float> r(execution_mode, MachineType::Float32(),
+ MachineType::Float32());
BUILD(r, WASM_F32_COPYSIGN(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_FLOAT32_INPUTS(i) {
@@ -2776,15 +2676,15 @@ TEST(Run_Wasm_F32CopySign) {
}
}
-void CompileCallIndirectMany(LocalType param) {
+static void CompileCallIndirectMany(LocalType param) {
// Make sure we don't run out of registers when compiling indirect calls
// with many many parameters.
TestSignatures sigs;
- for (byte num_params = 0; num_params < 40; num_params++) {
+ for (byte num_params = 0; num_params < 40; ++num_params) {
v8::base::AccountingAllocator allocator;
Zone zone(&allocator);
HandleScope scope(CcTest::InitIsolateOnce());
- TestingModule module;
+ TestingModule module(kExecuteCompiled);
FunctionSig* sig = sigs.many(&zone, kAstStmt, param, num_params);
module.AddSignature(sig);
@@ -2794,33 +2694,26 @@ void CompileCallIndirectMany(LocalType param) {
WasmFunctionCompiler t(sig, &module);
std::vector<byte> code;
- ADD_CODE(code, kExprCallIndirect, 1);
ADD_CODE(code, kExprI8Const, 0);
- for (byte p = 0; p < num_params; p++) {
+ for (byte p = 0; p < num_params; ++p) {
ADD_CODE(code, kExprGetLocal, p);
}
+ ADD_CODE(code, kExprCallIndirect, static_cast<byte>(num_params), 1);
t.Build(&code[0], &code[0] + code.size());
t.Compile();
}
}
-
TEST(Compile_Wasm_CallIndirect_Many_i32) { CompileCallIndirectMany(kAstI32); }
-
-#if WASM_64
-TEST(Compile_Wasm_CallIndirect_Many_i64) { CompileCallIndirectMany(kAstI64); }
-#endif
-
-
TEST(Compile_Wasm_CallIndirect_Many_f32) { CompileCallIndirectMany(kAstF32); }
-
TEST(Compile_Wasm_CallIndirect_Many_f64) { CompileCallIndirectMany(kAstF64); }
-TEST(Run_WASM_Int32RemS_dead) {
- WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
+WASM_EXEC_TEST(Int32RemS_dead) {
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32(),
+ MachineType::Int32());
BUILD(r, WASM_I32_REMS(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)), WASM_ZERO);
const int32_t kMin = std::numeric_limits<int32_t>::min();
CHECK_EQ(0, r.Call(133, 100));
diff --git a/deps/v8/test/cctest/wasm/test-signatures.h b/deps/v8/test/cctest/wasm/test-signatures.h
index a5bc7b4f14..95f24cbb93 100644
--- a/deps/v8/test/cctest/wasm/test-signatures.h
+++ b/deps/v8/test/cctest/wasm/test-signatures.h
@@ -35,7 +35,8 @@ class TestSignatures {
sig_v_v(0, 0, kIntTypes4),
sig_v_i(0, 1, kIntTypes4),
sig_v_ii(0, 2, kIntTypes4),
- sig_v_iii(0, 3, kIntTypes4) {
+ sig_v_iii(0, 3, kIntTypes4),
+ sig_s_i(1, 1, kSimd128IntTypes4) {
// I used C++ and you won't believe what happened next....
for (int i = 0; i < 4; i++) kIntTypes4[i] = kAstI32;
for (int i = 0; i < 4; i++) kLongTypes4[i] = kAstI64;
@@ -44,9 +45,11 @@ class TestSignatures {
for (int i = 0; i < 4; i++) kIntLongTypes4[i] = kAstI64;
for (int i = 0; i < 4; i++) kIntFloatTypes4[i] = kAstF32;
for (int i = 0; i < 4; i++) kIntDoubleTypes4[i] = kAstF64;
+ for (int i = 0; i < 4; i++) kSimd128IntTypes4[i] = kAstS128;
kIntLongTypes4[0] = kAstI32;
kIntFloatTypes4[0] = kAstI32;
kIntDoubleTypes4[0] = kAstI32;
+ kSimd128IntTypes4[1] = kAstI32;
}
FunctionSig* i_v() { return &sig_i_v; }
@@ -71,6 +74,7 @@ class TestSignatures {
FunctionSig* v_i() { return &sig_v_i; }
FunctionSig* v_ii() { return &sig_v_ii; }
FunctionSig* v_iii() { return &sig_v_iii; }
+ FunctionSig* s_i() { return &sig_s_i; }
FunctionSig* many(Zone* zone, LocalType ret, LocalType param, int count) {
FunctionSig::Builder builder(zone, ret == kAstStmt ? 0 : 1, count);
@@ -89,6 +93,7 @@ class TestSignatures {
LocalType kIntLongTypes4[4];
LocalType kIntFloatTypes4[4];
LocalType kIntDoubleTypes4[4];
+ LocalType kSimd128IntTypes4[4];
FunctionSig sig_i_v;
FunctionSig sig_i_i;
@@ -112,6 +117,7 @@ class TestSignatures {
FunctionSig sig_v_i;
FunctionSig sig_v_ii;
FunctionSig sig_v_iii;
+ FunctionSig sig_s_i;
};
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/test/cctest/wasm/test-wasm-function-name-table.cc b/deps/v8/test/cctest/wasm/test-wasm-function-name-table.cc
new file mode 100644
index 0000000000..1ae78dcb4e
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/test-wasm-function-name-table.cc
@@ -0,0 +1,120 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-function-name-table.h"
+#include "src/wasm/wasm-module.h"
+
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+using namespace v8::internal::wasm;
+
+namespace {
+
+#define CHECK_STREQ(exp, found) \
+ do { \
+ Vector<const char> exp_ = (exp); \
+ Vector<const char> found_ = (found); \
+ if (V8_UNLIKELY(exp_.length() != found_.length() || \
+ memcmp(exp_.start(), found_.start(), exp_.length()))) { \
+ V8_Fatal(__FILE__, __LINE__, \
+ "Check failed: (%s) != (%s) ('%.*s' vs '%.*s').", #exp, #found, \
+ exp_.length(), exp_.start(), found_.length(), found_.start()); \
+ } \
+ } while (0)
+
+void testFunctionNameTable(Vector<Vector<const char>> names) {
+ Isolate *isolate = CcTest::InitIsolateOnce();
+ HandleAndZoneScope scope;
+
+ WasmModule module;
+ std::vector<char> all_names;
+ // No name should have offset 0, because that encodes unnamed functions.
+ // In real wasm binary, offset 0 is impossible anyway.
+ all_names.push_back('\0');
+
+ uint32_t func_index = 0;
+ for (Vector<const char> name : names) {
+ size_t name_offset = name.start() ? all_names.size() : 0;
+ all_names.insert(all_names.end(), name.start(),
+ name.start() + name.length());
+ // Make every second function name null-terminated.
+ if (func_index % 2) all_names.push_back('\0');
+ module.functions.push_back({nullptr, 0, 0,
+ static_cast<uint32_t>(name_offset),
+ static_cast<uint32_t>(name.length()), 0, 0});
+ ++func_index;
+ }
+
+ module.module_start = reinterpret_cast<byte *>(all_names.data());
+ module.module_end = module.module_start + all_names.size();
+
+ Handle<Object> wasm_function_name_table =
+ BuildFunctionNamesTable(isolate, &module);
+ CHECK(wasm_function_name_table->IsByteArray());
+
+ func_index = 0;
+ for (Vector<const char> name : names) {
+ MaybeHandle<String> string = GetWasmFunctionNameFromTable(
+ Handle<ByteArray>::cast(wasm_function_name_table), func_index);
+ if (name.start()) {
+ CHECK(string.ToHandleChecked()->IsUtf8EqualTo(name));
+ } else {
+ CHECK(string.is_null());
+ }
+ ++func_index;
+ }
+}
+
+void testFunctionNameTable(Vector<const char *> names) {
+ std::vector<Vector<const char>> names_vec;
+ for (const char *name : names)
+ names_vec.push_back(name ? CStrVector(name) : Vector<const char>());
+ testFunctionNameTable(Vector<Vector<const char>>(
+ names_vec.data(), static_cast<int>(names_vec.size())));
+}
+
+} // namespace
+
+TEST(NoFunctions) { testFunctionNameTable(Vector<Vector<const char>>()); }
+
+TEST(OneFunctions) {
+ const char *names[] = {"foo"};
+ testFunctionNameTable(ArrayVector(names));
+}
+
+TEST(ThreeFunctions) {
+ const char *names[] = {"foo", "bar", "baz"};
+ testFunctionNameTable(ArrayVector(names));
+}
+
+TEST(OneUnnamedFunction) {
+ const char *names[] = {""};
+ testFunctionNameTable(ArrayVector(names));
+}
+
+TEST(UnnamedFirstFunction) {
+ const char *names[] = {"", "bar", "baz"};
+ testFunctionNameTable(ArrayVector(names));
+}
+
+TEST(UnnamedLastFunction) {
+ const char *names[] = {"bar", "baz", ""};
+ testFunctionNameTable(ArrayVector(names));
+}
+
+TEST(ThreeUnnamedFunctions) {
+ const char *names[] = {"", "", ""};
+ testFunctionNameTable(ArrayVector(names));
+}
+
+TEST(UTF8Names) {
+ const char *names[] = {"↱fun↰", "↺", "alpha:α beta:β"};
+ testFunctionNameTable(ArrayVector(names));
+}
+
+TEST(UnnamedVsEmptyNames) {
+ const char *names[] = {"", nullptr, nullptr, ""};
+ testFunctionNameTable(ArrayVector(names));
+}
diff --git a/deps/v8/test/cctest/wasm/test-wasm-stack.cc b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
new file mode 100644
index 0000000000..f2a848161b
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
@@ -0,0 +1,164 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-macro-gen.h"
+
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/value-helper.h"
+#include "test/cctest/wasm/test-signatures.h"
+#include "test/cctest/wasm/wasm-run-utils.h"
+
+using namespace v8::base;
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+using namespace v8::internal::wasm;
+
+using v8::Local;
+using v8::Utils;
+
+namespace {
+
+#define CHECK_CSTREQ(exp, found) \
+ do { \
+ const char* exp_ = (exp); \
+ const char* found_ = (found); \
+ DCHECK_NOT_NULL(exp); \
+ if (V8_UNLIKELY(found_ == nullptr || strcmp(exp_, found_) != 0)) { \
+ V8_Fatal(__FILE__, __LINE__, \
+ "Check failed: (%s) != (%s) ('%s' vs '%s').", #exp, #found, \
+ exp_, found_ ? found_ : "<null>"); \
+ } \
+ } while (0)
+
+void PrintStackTrace(v8::Local<v8::StackTrace> stack) {
+ printf("Stack Trace (length %d):\n", stack->GetFrameCount());
+ for (int i = 0, e = stack->GetFrameCount(); i != e; ++i) {
+ v8::Local<v8::StackFrame> frame = stack->GetFrame(i);
+ v8::Local<v8::String> script = frame->GetScriptName();
+ v8::Local<v8::String> func = frame->GetFunctionName();
+ printf("[%d] (%s) %s:%d:%d\n", i,
+ script.IsEmpty() ? "<null>" : *v8::String::Utf8Value(script),
+ func.IsEmpty() ? "<null>" : *v8::String::Utf8Value(func),
+ frame->GetLineNumber(), frame->GetColumn());
+ }
+}
+
+struct ExceptionInfo {
+ const char* func_name;
+ int line_nr;
+ int column;
+};
+
+template <int N>
+void CheckExceptionInfos(Handle<Object> exc,
+ const ExceptionInfo (&excInfos)[N]) {
+ // Check that it's indeed an Error object.
+ CHECK(exc->IsJSError());
+
+ // Extract stack frame from the exception.
+ Local<v8::Value> localExc = Utils::ToLocal(exc);
+ v8::Local<v8::StackTrace> stack = v8::Exception::GetStackTrace(localExc);
+ PrintStackTrace(stack);
+ CHECK(!stack.IsEmpty());
+ CHECK_EQ(N, stack->GetFrameCount());
+
+ for (int frameNr = 0; frameNr < N; ++frameNr) {
+ v8::Local<v8::StackFrame> frame = stack->GetFrame(frameNr);
+ v8::String::Utf8Value funName(frame->GetFunctionName());
+ CHECK_CSTREQ(excInfos[frameNr].func_name, *funName);
+ CHECK_EQ(excInfos[frameNr].line_nr, frame->GetLineNumber());
+ CHECK_EQ(excInfos[frameNr].column, frame->GetColumn());
+ }
+}
+
+} // namespace
+
+// Call from JS to WASM to JS and throw an Error from JS.
+TEST(CollectDetailedWasmStack_ExplicitThrowFromJs) {
+ TestSignatures sigs;
+ TestingModule module;
+
+ // Initialize WasmFunctionCompiler first, since it sets up the HandleScope.
+ WasmFunctionCompiler comp1(sigs.v_v(), &module);
+
+ uint32_t js_throwing_index = module.AddJsFunction(
+ sigs.v_v(),
+ "(function js() {\n function a() {\n throw new Error(); };\n a(); })");
+
+ // Add a nop such that we don't always get position 1.
+ BUILD(comp1, WASM_NOP, WASM_CALL_FUNCTION0(js_throwing_index));
+ uint32_t wasm_index = comp1.CompileAndAdd();
+
+ WasmFunctionCompiler comp2(sigs.v_v(), &module);
+ BUILD(comp2, WASM_CALL_FUNCTION0(wasm_index));
+ uint32_t wasm_index_2 = comp2.CompileAndAdd();
+
+ Handle<JSFunction> js_wasm_wrapper = module.WrapCode(wasm_index_2);
+
+ Handle<JSFunction> js_trampoline = Handle<JSFunction>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
+ CompileRun("(function callFn(fn) { fn(); })"))));
+
+ Isolate* isolate = js_wasm_wrapper->GetIsolate();
+ isolate->SetCaptureStackTraceForUncaughtExceptions(true, 10,
+ v8::StackTrace::kOverview);
+ Handle<Object> global(isolate->context()->global_object(), isolate);
+ MaybeHandle<Object> maybe_exc;
+ Handle<Object> args[] = {js_wasm_wrapper};
+ MaybeHandle<Object> returnObjMaybe =
+ Execution::TryCall(isolate, js_trampoline, global, 1, args, &maybe_exc);
+ CHECK(returnObjMaybe.is_null());
+
+ // The column is 1-based, so add 1 to the actual byte offset.
+ ExceptionInfo expected_exceptions[] = {
+ {"a", 3, 8}, // -
+ {"js", 4, 2}, // -
+ {"<WASM UNNAMED>", static_cast<int>(wasm_index), 3}, // -
+ {"<WASM UNNAMED>", static_cast<int>(wasm_index_2), 2}, // -
+ {"callFn", 1, 24} // -
+ };
+ CheckExceptionInfos(maybe_exc.ToHandleChecked(), expected_exceptions);
+}
+
+// Trigger a trap in WASM, stack should be JS -> WASM -> WASM.
+TEST(CollectDetailedWasmStack_WasmError) {
+ TestSignatures sigs;
+ TestingModule module;
+
+ WasmFunctionCompiler comp1(sigs.i_v(), &module,
+ ArrayVector("exec_unreachable"));
+ // Set the execution context, such that a runtime error can be thrown.
+ comp1.SetModuleContext();
+ BUILD(comp1, WASM_UNREACHABLE);
+ uint32_t wasm_index = comp1.CompileAndAdd();
+
+ WasmFunctionCompiler comp2(sigs.i_v(), &module,
+ ArrayVector("call_exec_unreachable"));
+ BUILD(comp2, WASM_CALL_FUNCTION0(wasm_index));
+ uint32_t wasm_index_2 = comp2.CompileAndAdd();
+
+ Handle<JSFunction> js_wasm_wrapper = module.WrapCode(wasm_index_2);
+
+ Handle<JSFunction> js_trampoline = Handle<JSFunction>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
+ CompileRun("(function callFn(fn) { fn(); })"))));
+
+ Isolate* isolate = js_wasm_wrapper->GetIsolate();
+ isolate->SetCaptureStackTraceForUncaughtExceptions(true, 10,
+ v8::StackTrace::kOverview);
+ Handle<Object> global(isolate->context()->global_object(), isolate);
+ MaybeHandle<Object> maybe_exc;
+ Handle<Object> args[] = {js_wasm_wrapper};
+ MaybeHandle<Object> maybe_return_obj =
+ Execution::TryCall(isolate, js_trampoline, global, 1, args, &maybe_exc);
+ CHECK(maybe_return_obj.is_null());
+
+ // The column is 1-based, so add 1 to the actual byte offset.
+ ExceptionInfo expected_exceptions[] = {
+ {"<WASM UNNAMED>", static_cast<int>(wasm_index), 2}, // -
+ {"<WASM UNNAMED>", static_cast<int>(wasm_index_2), 2}, // -
+ {"callFn", 1, 24} //-
+ };
+ CheckExceptionInfos(maybe_exc.ToHandleChecked(), expected_exceptions);
+}
diff --git a/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc b/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
new file mode 100644
index 0000000000..30f5d48a07
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
@@ -0,0 +1,139 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-macro-gen.h"
+
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/value-helper.h"
+#include "test/cctest/wasm/test-signatures.h"
+#include "test/cctest/wasm/wasm-run-utils.h"
+
+using namespace v8::base;
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+using namespace v8::internal::wasm;
+
+using v8::Local;
+using v8::Utils;
+
+namespace {
+
+#define CHECK_CSTREQ(exp, found) \
+ do { \
+ const char* exp_ = (exp); \
+ const char* found_ = (found); \
+ DCHECK_NOT_NULL(exp); \
+ if (V8_UNLIKELY(found_ == nullptr || strcmp(exp_, found_) != 0)) { \
+ V8_Fatal(__FILE__, __LINE__, \
+ "Check failed: (%s) != (%s) ('%s' vs '%s').", #exp, #found, \
+ exp_, found_ ? found_ : "<null>"); \
+ } \
+ } while (0)
+
+struct ExceptionInfo {
+ const char* func_name;
+ int line_nr;
+ int column;
+};
+
+template <int N>
+void CheckExceptionInfos(Handle<Object> exc,
+ const ExceptionInfo (&excInfos)[N]) {
+ // Check that it's indeed an Error object.
+ CHECK(exc->IsJSError());
+
+ // Extract stack frame from the exception.
+ Local<v8::Value> localExc = Utils::ToLocal(exc);
+ v8::Local<v8::StackTrace> stack = v8::Exception::GetStackTrace(localExc);
+ CHECK(!stack.IsEmpty());
+ CHECK_EQ(N, stack->GetFrameCount());
+
+ for (int frameNr = 0; frameNr < N; ++frameNr) {
+ v8::Local<v8::StackFrame> frame = stack->GetFrame(frameNr);
+ v8::String::Utf8Value funName(frame->GetFunctionName());
+ CHECK_CSTREQ(excInfos[frameNr].func_name, *funName);
+ CHECK_EQ(excInfos[frameNr].line_nr, frame->GetLineNumber());
+ CHECK_EQ(excInfos[frameNr].column, frame->GetColumn());
+ }
+}
+
+} // namespace
+
+// Trigger a trap for executing unreachable.
+TEST(Unreachable) {
+ TestSignatures sigs;
+ TestingModule module;
+
+ WasmFunctionCompiler comp1(sigs.v_v(), &module,
+ ArrayVector("exec_unreachable"));
+ // Set the execution context, such that a runtime error can be thrown.
+ comp1.SetModuleContext();
+ BUILD(comp1, WASM_UNREACHABLE);
+ uint32_t wasm_index = comp1.CompileAndAdd();
+
+ Handle<JSFunction> js_wasm_wrapper = module.WrapCode(wasm_index);
+
+ Handle<JSFunction> js_trampoline = Handle<JSFunction>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
+ CompileRun("(function callFn(fn) { fn(); })"))));
+
+ Isolate* isolate = js_wasm_wrapper->GetIsolate();
+ isolate->SetCaptureStackTraceForUncaughtExceptions(true, 10,
+ v8::StackTrace::kOverview);
+ Handle<Object> global(isolate->context()->global_object(), isolate);
+ MaybeHandle<Object> maybe_exc;
+ Handle<Object> args[] = {js_wasm_wrapper};
+ MaybeHandle<Object> returnObjMaybe =
+ Execution::TryCall(isolate, js_trampoline, global, 1, args, &maybe_exc);
+ CHECK(returnObjMaybe.is_null());
+
+ // The column is 1-based, so add 1 to the actual byte offset.
+ ExceptionInfo expected_exceptions[] = {
+ {"<WASM UNNAMED>", static_cast<int>(wasm_index), 2}, // --
+ {"callFn", 1, 24} // --
+ };
+ CheckExceptionInfos(maybe_exc.ToHandleChecked(), expected_exceptions);
+}
+
+// Trigger a trap for loading from out-of-bounds.
+TEST(IllegalLoad) {
+ TestSignatures sigs;
+ TestingModule module;
+
+ WasmFunctionCompiler comp1(sigs.v_v(), &module, ArrayVector("mem_oob"));
+ // Set the execution context, such that a runtime error can be thrown.
+ comp1.SetModuleContext();
+ BUILD(comp1, WASM_IF(WASM_ONE,
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_I32V_1(-3))));
+ uint32_t wasm_index = comp1.CompileAndAdd();
+
+ WasmFunctionCompiler comp2(sigs.v_v(), &module, ArrayVector("call_mem_oob"));
+ // Insert a NOP such that the position of the call is not one.
+ BUILD(comp2, WASM_NOP, WASM_CALL_FUNCTION0(wasm_index));
+ uint32_t wasm_index_2 = comp2.CompileAndAdd();
+
+ Handle<JSFunction> js_wasm_wrapper = module.WrapCode(wasm_index_2);
+
+ Handle<JSFunction> js_trampoline = Handle<JSFunction>::cast(
+ v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
+ CompileRun("(function callFn(fn) { fn(); })"))));
+
+ Isolate* isolate = js_wasm_wrapper->GetIsolate();
+ isolate->SetCaptureStackTraceForUncaughtExceptions(true, 10,
+ v8::StackTrace::kOverview);
+ Handle<Object> global(isolate->context()->global_object(), isolate);
+ MaybeHandle<Object> maybe_exc;
+ Handle<Object> args[] = {js_wasm_wrapper};
+ MaybeHandle<Object> returnObjMaybe =
+ Execution::TryCall(isolate, js_trampoline, global, 1, args, &maybe_exc);
+ CHECK(returnObjMaybe.is_null());
+
+ // The column is 1-based, so add 1 to the actual byte offset.
+ ExceptionInfo expected_exceptions[] = {
+ {"<WASM UNNAMED>", static_cast<int>(wasm_index), 7}, // --
+ {"<WASM UNNAMED>", static_cast<int>(wasm_index_2), 3}, // --
+ {"callFn", 1, 24} // --
+ };
+ CheckExceptionInfos(maybe_exc.ToHandleChecked(), expected_exceptions);
+}
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.h b/deps/v8/test/cctest/wasm/wasm-run-utils.h
index 1e85f46887..0a11fedfd1 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.h
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.h
@@ -9,6 +9,9 @@
#include <stdlib.h>
#include <string.h>
+#include <memory>
+
+#include "src/base/accounting-allocator.h"
#include "src/base/utils/random-number-generator.h"
#include "src/compiler/graph-visualizer.h"
@@ -17,9 +20,12 @@
#include "src/compiler/node.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/wasm-compiler.h"
+#include "src/compiler/zone-pool.h"
#include "src/wasm/ast-decoder.h"
+#include "src/wasm/wasm-interpreter.h"
#include "src/wasm/wasm-js.h"
+#include "src/wasm/wasm-macro-gen.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
@@ -29,15 +35,10 @@
#include "test/cctest/compiler/call-tester.h"
#include "test/cctest/compiler/graph-builder-tester.h"
-// TODO(titzer): pull WASM_64 up to a common header.
-#if !V8_TARGET_ARCH_32_BIT || V8_TARGET_ARCH_X64
-#define WASM_64 1
-#else
-#define WASM_64 0
-#endif
-
static const uint32_t kMaxFunctions = 10;
+enum WasmExecutionMode { kExecuteInterpreted, kExecuteCompiled };
+
// TODO(titzer): check traps more robustly in tests.
// Currently, in tests, we just return 0xdeadbeef from the function in which
// the trap occurs if the runtime context is not available to throw a JavaScript
@@ -70,16 +71,21 @@ const uint32_t kMaxGlobalsSize = 128;
// {WasmModuleInstance}.
class TestingModule : public ModuleEnv {
public:
- TestingModule() : instance_(&module_), global_offset(0) {
- module_.shared_isolate = CcTest::InitIsolateOnce();
+ explicit TestingModule(WasmExecutionMode mode = kExecuteCompiled)
+ : execution_mode_(mode),
+ instance_(&module_),
+ isolate_(CcTest::InitIsolateOnce()),
+ global_offset(0),
+ interpreter_(mode == kExecuteInterpreted
+ ? new WasmInterpreter(&instance_, &allocator_)
+ : nullptr) {
module = &module_;
instance = &instance_;
instance->module = &module_;
instance->globals_start = global_data;
- instance->globals_size = kMaxGlobalsSize;
+ module_.globals_size = kMaxGlobalsSize;
instance->mem_start = nullptr;
instance->mem_size = 0;
- linker = nullptr;
origin = kWasmOrigin;
memset(global_data, 0, sizeof(global_data));
}
@@ -88,9 +94,10 @@ class TestingModule : public ModuleEnv {
if (instance->mem_start) {
free(instance->mem_start);
}
+ if (interpreter_) delete interpreter_;
}
- byte* AddMemory(size_t size) {
+ byte* AddMemory(uint32_t size) {
CHECK_NULL(instance->mem_start);
CHECK_EQ(0, instance->mem_size);
instance->mem_start = reinterpret_cast<byte*>(malloc(size));
@@ -101,19 +108,19 @@ class TestingModule : public ModuleEnv {
}
template <typename T>
- T* AddMemoryElems(size_t count) {
+ T* AddMemoryElems(uint32_t count) {
AddMemory(count * sizeof(T));
return raw_mem_start<T>();
}
template <typename T>
- T* AddGlobal(MachineType mem_type) {
- WasmGlobal* global = AddGlobal(mem_type);
+ T* AddGlobal(LocalType type) {
+ const WasmGlobal* global = AddGlobal(type);
return reinterpret_cast<T*>(instance->globals_start + global->offset);
}
byte AddSignature(FunctionSig* sig) {
- module->signatures.push_back(sig);
+ module_.signatures.push_back(sig);
size_t size = module->signatures.size();
CHECK(size < 127);
return static_cast<byte>(size - 1);
@@ -134,14 +141,22 @@ class TestingModule : public ModuleEnv {
template <typename T>
T raw_mem_at(int i) {
DCHECK(instance->mem_start);
- return reinterpret_cast<T*>(instance->mem_start)[i];
+ return ReadMemory(&(reinterpret_cast<T*>(instance->mem_start)[i]));
}
template <typename T>
T raw_val_at(int i) {
- T val;
- memcpy(&val, reinterpret_cast<void*>(instance->mem_start + i), sizeof(T));
- return val;
+ return ReadMemory(reinterpret_cast<T*>(instance->mem_start + i));
+ }
+
+ template <typename T>
+ void WriteMemory(T* p, T val) {
+ WriteLittleEndianValue<T>(p, val);
+ }
+
+ template <typename T>
+ T ReadMemory(T* p) {
+ return ReadLittleEndianValue<T>(p);
}
// Zero-initialize the memory.
@@ -159,57 +174,100 @@ class TestingModule : public ModuleEnv {
rng.NextBytes(raw, end - raw);
}
- int AddFunction(FunctionSig* sig, Handle<Code> code) {
+ uint32_t AddFunction(FunctionSig* sig, Handle<Code> code) {
if (module->functions.size() == 0) {
// TODO(titzer): Reserving space here to avoid the underlying WasmFunction
// structs from moving.
- module->functions.reserve(kMaxFunctions);
+ module_.functions.reserve(kMaxFunctions);
}
uint32_t index = static_cast<uint32_t>(module->functions.size());
- module->functions.push_back(
- {sig, index, 0, 0, 0, 0, 0, 0, 0, 0, 0, false, false});
+ module_.functions.push_back({sig, index, 0, 0, 0, 0, 0});
instance->function_code.push_back(code);
+ if (interpreter_) {
+ const WasmFunction* function = &module->functions.back();
+ int interpreter_index = interpreter_->AddFunctionForTesting(function);
+ CHECK_EQ(index, static_cast<uint32_t>(interpreter_index));
+ }
DCHECK_LT(index, kMaxFunctions); // limited for testing.
return index;
}
+ uint32_t AddJsFunction(FunctionSig* sig, const char* source) {
+ Handle<JSFunction> jsfunc = Handle<JSFunction>::cast(v8::Utils::OpenHandle(
+ *v8::Local<v8::Function>::Cast(CompileRun(source))));
+ uint32_t index = AddFunction(sig, Handle<Code>::null());
+ Handle<Code> code =
+ CompileWasmToJSWrapper(isolate_, jsfunc, sig, index,
+ Handle<String>::null(), Handle<String>::null());
+ instance->function_code[index] = code;
+ return index;
+ }
+
+ Handle<JSFunction> WrapCode(uint32_t index) {
+ // Wrap the code so it can be called as a JS function.
+ Handle<String> name = isolate_->factory()->NewStringFromStaticChars("main");
+ Handle<JSObject> module_object = Handle<JSObject>(0, isolate_);
+ Handle<Code> code = instance->function_code[index];
+ WasmJs::InstallWasmFunctionMap(isolate_, isolate_->native_context());
+ Handle<Code> ret_code =
+ compiler::CompileJSToWasmWrapper(isolate_, this, code, index);
+ FunctionSig* funcSig = this->module->functions[index].sig;
+ Handle<ByteArray> exportedSig = isolate_->factory()->NewByteArray(
+ static_cast<int>(funcSig->parameter_count() + funcSig->return_count()),
+ TENURED);
+ exportedSig->copy_in(0, reinterpret_cast<const byte*>(funcSig->raw_data()),
+ exportedSig->length());
+ Handle<JSFunction> ret = WrapExportCodeAsJSFunction(
+ isolate_, ret_code, name,
+ static_cast<int>(this->module->functions[index].sig->parameter_count()),
+ exportedSig, module_object);
+ return ret;
+ }
+
void SetFunctionCode(uint32_t index, Handle<Code> code) {
instance->function_code[index] = code;
}
- void AddIndirectFunctionTable(int* functions, int table_size) {
- Isolate* isolate = module->shared_isolate;
- Handle<FixedArray> fixed =
- isolate->factory()->NewFixedArray(2 * table_size);
- instance->function_table = fixed;
- DCHECK_EQ(0u, module->function_table.size());
- for (int i = 0; i < table_size; i++) {
- module->function_table.push_back(functions[i]);
+ void AddIndirectFunctionTable(uint16_t* functions, uint32_t table_size) {
+ module_.function_tables.push_back(
+ {table_size, table_size, std::vector<uint16_t>()});
+ for (uint32_t i = 0; i < table_size; ++i) {
+ module_.function_tables.back().values.push_back(functions[i]);
}
+
+ Handle<FixedArray> values = BuildFunctionTable(
+ isolate_, static_cast<int>(module_.function_tables.size() - 1),
+ &module_);
+ instance->function_tables.push_back(values);
}
void PopulateIndirectFunctionTable() {
- if (instance->function_table.is_null()) return;
- int table_size = static_cast<int>(module->function_table.size());
- for (int i = 0; i < table_size; i++) {
- int function_index = module->function_table[i];
- WasmFunction* function = &module->functions[function_index];
- instance->function_table->set(i, Smi::FromInt(function->sig_index));
- instance->function_table->set(i + table_size,
- *instance->function_code[function_index]);
+ for (uint32_t i = 0; i < instance->function_tables.size(); i++) {
+ PopulateFunctionTable(instance->function_tables[i],
+ module_.function_tables[i].size,
+ &instance->function_code);
}
}
+ WasmFunction* GetFunctionAt(int index) { return &module_.functions[index]; }
+
+ WasmInterpreter* interpreter() { return interpreter_; }
+ WasmExecutionMode execution_mode() { return execution_mode_; }
+
private:
+ WasmExecutionMode execution_mode_;
WasmModule module_;
WasmModuleInstance instance_;
+ Isolate* isolate_;
+ v8::base::AccountingAllocator allocator_;
uint32_t global_offset;
V8_ALIGNED(8) byte global_data[kMaxGlobalsSize]; // preallocated global data.
+ WasmInterpreter* interpreter_;
- WasmGlobal* AddGlobal(MachineType mem_type) {
- byte size = WasmOpcodes::MemSize(mem_type);
+ const WasmGlobal* AddGlobal(LocalType type) {
+ byte size = WasmOpcodes::MemSize(WasmOpcodes::MachineTypeFor(type));
global_offset = (global_offset + size - 1) & ~(size - 1); // align
- module->globals.push_back({0, 0, mem_type, global_offset, false});
+ module_.globals.push_back({0, 0, type, global_offset, false});
global_offset += size;
// limit number of globals.
CHECK_LT(global_offset, kMaxGlobalsSize);
@@ -218,10 +276,11 @@ class TestingModule : public ModuleEnv {
};
inline void TestBuildingGraph(Zone* zone, JSGraph* jsgraph, ModuleEnv* module,
- FunctionSig* sig, const byte* start,
- const byte* end) {
- compiler::WasmGraphBuilder builder(zone, jsgraph, sig);
- TreeResult result =
+ FunctionSig* sig,
+ SourcePositionTable* source_position_table,
+ const byte* start, const byte* end) {
+ compiler::WasmGraphBuilder builder(zone, jsgraph, sig, source_position_table);
+ DecodeResult result =
BuildTFGraph(zone->allocator(), &builder, module, sig, start, end);
if (result.failed()) {
ptrdiff_t pc = result.error_pc - result.start;
@@ -356,7 +415,7 @@ class WasmFunctionWrapper : public HandleAndZoneScope,
r.LowerGraph();
}
- CompilationInfo info("testing", isolate, graph()->zone());
+ CompilationInfo info(ArrayVector("testing"), isolate, graph()->zone());
code_ =
Pipeline::GenerateCodeForTesting(&info, descriptor, graph(), nullptr);
CHECK(!code_.is_null());
@@ -382,41 +441,72 @@ class WasmFunctionWrapper : public HandleAndZoneScope,
// A helper for compiling WASM functions for testing. This class can create a
// standalone function if {module} is NULL or a function within a
// {TestingModule}. It contains the internal state for compilation (i.e.
-// TurboFan graph) and, later, interpretation.
+// TurboFan graph) and interpretation (by adding to the interpreter manually).
class WasmFunctionCompiler : public HandleAndZoneScope,
private GraphAndBuilders {
public:
- explicit WasmFunctionCompiler(FunctionSig* sig, TestingModule* module)
+ explicit WasmFunctionCompiler(
+ FunctionSig* sig, WasmExecutionMode mode,
+ Vector<const char> debug_name = ArrayVector("<WASM UNNAMED>"))
: GraphAndBuilders(main_zone()),
+ execution_mode_(mode),
jsgraph(this->isolate(), this->graph(), this->common(), nullptr,
nullptr, this->machine()),
sig(sig),
descriptor_(nullptr),
- testing_module_(module) {
- if (module) {
- // Get a new function from the testing module.
- function_ = nullptr;
- function_index_ = module->AddFunction(sig, Handle<Code>::null());
- } else {
- // Create our own function.
- function_ = new WasmFunction();
- function_->sig = sig;
- function_index_ = 0;
+ testing_module_(nullptr),
+ debug_name_(debug_name),
+ local_decls(main_zone(), sig),
+ source_position_table_(this->graph()),
+ interpreter_(nullptr) {
+ // Create our own function.
+ function_ = new WasmFunction();
+ function_->sig = sig;
+ function_->func_index = 0;
+ function_->sig_index = 0;
+ if (mode == kExecuteInterpreted) {
+ interpreter_ = new WasmInterpreter(nullptr, zone()->allocator());
+ int index = interpreter_->AddFunctionForTesting(function_);
+ CHECK_EQ(0, index);
}
}
+ explicit WasmFunctionCompiler(
+ FunctionSig* sig, TestingModule* module,
+ Vector<const char> debug_name = ArrayVector("<WASM UNNAMED>"))
+ : GraphAndBuilders(main_zone()),
+ execution_mode_(module->execution_mode()),
+ jsgraph(this->isolate(), this->graph(), this->common(), nullptr,
+ nullptr, this->machine()),
+ sig(sig),
+ descriptor_(nullptr),
+ testing_module_(module),
+ debug_name_(debug_name),
+ local_decls(main_zone(), sig),
+ source_position_table_(this->graph()),
+ interpreter_(module->interpreter()) {
+ // Get a new function from the testing module.
+ int index = module->AddFunction(sig, Handle<Code>::null());
+ function_ = testing_module_->GetFunctionAt(index);
+ }
+
~WasmFunctionCompiler() {
- if (function_) delete function_;
+ if (testing_module_) return; // testing module owns the below things.
+ delete function_;
+ if (interpreter_) delete interpreter_;
}
+ WasmExecutionMode execution_mode_;
JSGraph jsgraph;
FunctionSig* sig;
// The call descriptor is initialized when the function is compiled.
CallDescriptor* descriptor_;
TestingModule* testing_module_;
+ Vector<const char> debug_name_;
WasmFunction* function_;
- int function_index_;
LocalDeclEncoder local_decls;
+ SourcePositionTable source_position_table_;
+ WasmInterpreter* interpreter_;
Isolate* isolate() { return main_isolate(); }
Graph* graph() const { return main_graph_; }
@@ -429,16 +519,21 @@ class WasmFunctionCompiler : public HandleAndZoneScope,
}
}
CallDescriptor* descriptor() { return descriptor_; }
+ uint32_t function_index() { return function_->func_index; }
void Build(const byte* start, const byte* end) {
// Build the TurboFan graph.
- local_decls.Prepend(&start, &end);
- TestBuildingGraph(main_zone(), &jsgraph, testing_module_, sig, start, end);
- delete[] start;
+ local_decls.Prepend(main_zone(), &start, &end);
+ TestBuildingGraph(main_zone(), &jsgraph, testing_module_, sig,
+ &source_position_table_, start, end);
+ if (interpreter_) {
+ // Add the code to the interpreter.
+ CHECK(interpreter_->SetFunctionCodeForTesting(function_, start, end));
+ }
}
byte AllocateLocal(LocalType type) {
- uint32_t index = local_decls.AddLocals(1, type, sig);
+ uint32_t index = local_decls.AddLocals(1, type);
byte result = static_cast<byte>(index);
DCHECK_EQ(index, result);
return result;
@@ -450,30 +545,52 @@ class WasmFunctionCompiler : public HandleAndZoneScope,
if (kPointerSize == 4) {
desc = testing_module_->GetI32WasmCallDescriptor(this->zone(), desc);
}
- CompilationInfo info("wasm compile", this->isolate(), this->zone());
- Handle<Code> result =
- Pipeline::GenerateCodeForTesting(&info, desc, this->graph());
+ CompilationInfo info(debug_name_, this->isolate(), this->zone(),
+ Code::ComputeFlags(Code::WASM_FUNCTION));
+ std::unique_ptr<CompilationJob> job(Pipeline::NewWasmCompilationJob(
+ &info, graph(), desc, &source_position_table_));
+ if (job->ExecuteJob() != CompilationJob::SUCCEEDED ||
+ job->FinalizeJob() != CompilationJob::SUCCEEDED)
+ return Handle<Code>::null();
+
+ Handle<Code> code = info.code();
+
+ // Length is always 2, since usually <wasm_obj, func_index> is stored in
+ // the deopt data. Here, we only store the function index.
+ DCHECK(code->deoptimization_data() == nullptr ||
+ code->deoptimization_data()->length() == 0);
+ Handle<FixedArray> deopt_data =
+ isolate()->factory()->NewFixedArray(2, TENURED);
+ deopt_data->set(1, Smi::FromInt(static_cast<int>(function_index())));
+ deopt_data->set_length(2);
+ code->set_deoptimization_data(*deopt_data);
+
#ifdef ENABLE_DISASSEMBLER
- if (!result.is_null() && FLAG_print_opt_code) {
+ if (FLAG_print_opt_code) {
OFStream os(stdout);
- result->Disassemble("wasm code", os);
+ code->Disassemble("wasm code", os);
}
#endif
- return result;
+ return code;
}
uint32_t CompileAndAdd(uint16_t sig_index = 0) {
CHECK(testing_module_);
- function()->sig_index = sig_index;
+ function_->sig_index = sig_index;
Handle<Code> code = Compile();
- testing_module_->SetFunctionCode(function_index_, code);
- return static_cast<uint32_t>(function_index_);
+ testing_module_->SetFunctionCode(function_index(), code);
+ return function_index();
}
- WasmFunction* function() {
- if (function_) return function_;
- return &testing_module_->module->functions[function_index_];
+ // Set the context, such that e.g. runtime functions can be called.
+ void SetModuleContext() {
+ if (!testing_module_->instance->context.is_null()) {
+ CHECK(testing_module_->instance->context.is_identical_to(
+ main_isolate()->native_context()));
+ return;
+ }
+ testing_module_->instance->context = main_isolate()->native_context();
}
};
@@ -482,7 +599,8 @@ class WasmFunctionCompiler : public HandleAndZoneScope,
template <typename ReturnType>
class WasmRunner {
public:
- WasmRunner(MachineType p0 = MachineType::None(),
+ WasmRunner(WasmExecutionMode execution_mode,
+ MachineType p0 = MachineType::None(),
MachineType p1 = MachineType::None(),
MachineType p2 = MachineType::None(),
MachineType p3 = MachineType::None())
@@ -490,7 +608,7 @@ class WasmRunner {
compiled_(false),
signature_(MachineTypeForC<ReturnType>() == MachineType::None() ? 0 : 1,
GetParameterCount(p0, p1, p2, p3), storage_),
- compiler_(&signature_, nullptr) {
+ compiler_(&signature_, execution_mode) {
InitSigStorage(p0, p1, p2, p3);
}
@@ -533,51 +651,102 @@ class WasmRunner {
void Build(const byte* start, const byte* end) {
CHECK(!compiled_);
compiled_ = true;
-
- // Build the TF graph within the compiler.
compiler_.Build(start, end);
- // Generate code.
- Handle<Code> code = compiler_.Compile();
- if (compiler_.testing_module_) {
- // Update the table of function code in the module.
- compiler_.testing_module_->SetFunctionCode(compiler_.function_index_,
- code);
- }
+ if (!interpret()) {
+ // Compile machine code and install it into the module.
+ Handle<Code> code = compiler_.Compile();
- wrapper_.SetInnerCode(code);
+ if (compiler_.testing_module_) {
+ // Update the table of function code in the module.
+ compiler_.testing_module_->SetFunctionCode(
+ compiler_.function_->func_index, code);
+ }
+
+ wrapper_.SetInnerCode(code);
+ }
}
- ReturnType Call() { return Call(0, 0, 0, 0); }
+ ReturnType Call() {
+ if (interpret()) {
+ return CallInterpreter(Vector<WasmVal>(nullptr, 0));
+ } else {
+ return Call(0, 0, 0, 0);
+ }
+ }
template <typename P0>
ReturnType Call(P0 p0) {
- return Call(p0, 0, 0, 0);
+ if (interpret()) {
+ WasmVal args[] = {WasmVal(p0)};
+ return CallInterpreter(ArrayVector(args));
+ } else {
+ return Call(p0, 0, 0, 0);
+ }
}
template <typename P0, typename P1>
ReturnType Call(P0 p0, P1 p1) {
- return Call(p0, p1, 0, 0);
+ if (interpret()) {
+ WasmVal args[] = {WasmVal(p0), WasmVal(p1)};
+ return CallInterpreter(ArrayVector(args));
+ } else {
+ return Call(p0, p1, 0, 0);
+ }
}
template <typename P0, typename P1, typename P2>
ReturnType Call(P0 p0, P1 p1, P2 p2) {
- return Call(p0, p1, p2, 0);
+ if (interpret()) {
+ WasmVal args[] = {WasmVal(p0), WasmVal(p1), WasmVal(p2)};
+ return CallInterpreter(ArrayVector(args));
+ } else {
+ return Call(p0, p1, p2, 0);
+ }
}
template <typename P0, typename P1, typename P2, typename P3>
ReturnType Call(P0 p0, P1 p1, P2 p2, P3 p3) {
- CodeRunner<int32_t> runner(CcTest::InitIsolateOnce(),
- wrapper_.GetWrapperCode(), wrapper_.signature());
- ReturnType return_value;
- int32_t result = runner.Call<void*, void*, void*, void*, void*>(
- &p0, &p1, &p2, &p3, &return_value);
- CHECK_EQ(WASM_WRAPPER_RETURN_VALUE, result);
- return return_value;
+ if (interpret()) {
+ WasmVal args[] = {WasmVal(p0), WasmVal(p1), WasmVal(p2), WasmVal(p3)};
+ return CallInterpreter(ArrayVector(args));
+ } else {
+ CodeRunner<int32_t> runner(CcTest::InitIsolateOnce(),
+ wrapper_.GetWrapperCode(),
+ wrapper_.signature());
+ ReturnType return_value;
+ int32_t result = runner.Call<void*, void*, void*, void*, void*>(
+ &p0, &p1, &p2, &p3, &return_value);
+ CHECK_EQ(WASM_WRAPPER_RETURN_VALUE, result);
+ return return_value;
+ }
+ }
+
+ ReturnType CallInterpreter(Vector<WasmVal> args) {
+ CHECK_EQ(args.length(),
+ static_cast<int>(compiler_.function_->sig->parameter_count()));
+ WasmInterpreter::Thread* thread = interpreter()->GetThread(0);
+ thread->Reset();
+ thread->PushFrame(compiler_.function_, args.start());
+ if (thread->Run() == WasmInterpreter::FINISHED) {
+ WasmVal val = thread->GetReturnValue();
+ return val.to<ReturnType>();
+ } else if (thread->state() == WasmInterpreter::TRAPPED) {
+ // TODO(titzer): return the correct trap code
+ int64_t result = 0xdeadbeefdeadbeef;
+ return static_cast<ReturnType>(result);
+ } else {
+ // TODO(titzer): falling off end
+ ReturnType val = 0;
+ return val;
+ }
}
byte AllocateLocal(LocalType type) { return compiler_.AllocateLocal(type); }
+ WasmFunction* function() { return compiler_.function_; }
+ WasmInterpreter* interpreter() { return compiler_.interpreter_; }
+
protected:
v8::base::AccountingAllocator allocator_;
Zone zone;
@@ -587,6 +756,8 @@ class WasmRunner {
WasmFunctionCompiler compiler_;
WasmFunctionWrapper<ReturnType> wrapper_;
+ bool interpret() { return compiler_.execution_mode_ == kExecuteInterpreted; }
+
static size_t GetParameterCount(MachineType p0, MachineType p1,
MachineType p2, MachineType p3) {
if (p0 == MachineType::None()) return 0;
@@ -597,6 +768,16 @@ class WasmRunner {
}
};
+// A macro to define tests that run in different engine configurations.
+// Currently only supports compiled tests, but a future
+// RunWasmInterpreted_##name version will allow each test to also run in the
+// interpreter.
+#define WASM_EXEC_TEST(name) \
+ void RunWasm_##name(WasmExecutionMode execution_mode); \
+ TEST(RunWasmCompiled_##name) { RunWasm_##name(kExecuteCompiled); } \
+ TEST(RunWasmInterpreted_##name) { RunWasm_##name(kExecuteInterpreted); } \
+ void RunWasm_##name(WasmExecutionMode execution_mode)
+
} // namespace
#endif
diff --git a/deps/v8/test/default.gyp b/deps/v8/test/default.gyp
index efc0406895..dd1d9e2dc8 100644
--- a/deps/v8/test/default.gyp
+++ b/deps/v8/test/default.gyp
@@ -19,8 +19,8 @@
'unittests/unittests.gyp:unittests_run',
],
'includes': [
- '../build/features.gypi',
- '../build/isolate.gypi',
+ '../gypfiles/features.gypi',
+ '../gypfiles/isolate.gypi',
],
'sources': [
'default.isolate',
diff --git a/deps/v8/test/fuzzer/fuzzer-support.cc b/deps/v8/test/fuzzer/fuzzer-support.cc
index cf3ee8c6fd..936970ee6a 100644
--- a/deps/v8/test/fuzzer/fuzzer-support.cc
+++ b/deps/v8/test/fuzzer/fuzzer-support.cc
@@ -10,6 +10,8 @@
#include "include/libplatform/libplatform.h"
+#include "src/flags.h"
+
namespace v8_fuzzer {
namespace {
@@ -25,25 +27,16 @@ void DeleteFuzzerSupport() {
} // namespace
-class FuzzerSupport::ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
- public:
- virtual void* Allocate(size_t length) {
- void* data = AllocateUninitialized(length);
- return data == NULL ? data : memset(data, 0, length);
- }
- virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
- virtual void Free(void* data, size_t) { free(data); }
-};
-
FuzzerSupport::FuzzerSupport(int* argc, char*** argv) {
+ v8::internal::FLAG_expose_gc = true;
v8::V8::SetFlagsFromCommandLine(argc, *argv, true);
- v8::V8::InitializeICU();
+ v8::V8::InitializeICUDefaultLocation((*argv)[0]);
v8::V8::InitializeExternalStartupData((*argv)[0]);
platform_ = v8::platform::CreateDefaultPlatform();
v8::V8::InitializePlatform(platform_);
v8::V8::Initialize();
- allocator_ = new ArrayBufferAllocator;
+ allocator_ = v8::ArrayBuffer::Allocator::NewDefaultAllocator();
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = allocator_;
isolate_ = v8::Isolate::New(create_params);
@@ -65,6 +58,7 @@ FuzzerSupport::~FuzzerSupport() {
context_.Reset();
}
+ isolate_->LowMemoryNotification();
isolate_->Dispose();
isolate_ = nullptr;
diff --git a/deps/v8/test/fuzzer/fuzzer-support.h b/deps/v8/test/fuzzer/fuzzer-support.h
index 0241c53665..c941c9c052 100644
--- a/deps/v8/test/fuzzer/fuzzer-support.h
+++ b/deps/v8/test/fuzzer/fuzzer-support.h
@@ -24,10 +24,9 @@ class FuzzerSupport {
FuzzerSupport(const FuzzerSupport&);
FuzzerSupport& operator=(const FuzzerSupport&);
- class ArrayBufferAllocator;
v8::Platform* platform_;
- ArrayBufferAllocator* allocator_;
+ v8::ArrayBuffer::Allocator* allocator_;
v8::Isolate* isolate_;
v8::Global<v8::Context> context_;
};
diff --git a/deps/v8/test/fuzzer/fuzzer.gyp b/deps/v8/test/fuzzer/fuzzer.gyp
index 6e15a906bb..c7c4cb47ba 100644
--- a/deps/v8/test/fuzzer/fuzzer.gyp
+++ b/deps/v8/test/fuzzer/fuzzer.gyp
@@ -6,10 +6,10 @@
'variables': {
'v8_code': 1,
},
- 'includes': ['../../build/toolchain.gypi', '../../build/features.gypi'],
+ 'includes': ['../../gypfiles/toolchain.gypi', '../../gypfiles/features.gypi'],
'targets': [
{
- 'target_name': 'json_fuzzer',
+ 'target_name': 'v8_simple_json_fuzzer',
'type': 'executable',
'dependencies': [
'json_fuzzer_lib',
@@ -35,7 +35,7 @@
],
},
{
- 'target_name': 'parser_fuzzer',
+ 'target_name': 'v8_simple_parser_fuzzer',
'type': 'executable',
'dependencies': [
'parser_fuzzer_lib',
@@ -61,7 +61,7 @@
],
},
{
- 'target_name': 'regexp_fuzzer',
+ 'target_name': 'v8_simple_regexp_fuzzer',
'type': 'executable',
'dependencies': [
'regexp_fuzzer_lib',
@@ -87,7 +87,7 @@
],
},
{
- 'target_name': 'wasm_fuzzer',
+ 'target_name': 'v8_simple_wasm_fuzzer',
'type': 'executable',
'dependencies': [
'wasm_fuzzer_lib',
@@ -113,7 +113,7 @@
],
},
{
- 'target_name': 'wasm_asmjs_fuzzer',
+ 'target_name': 'v8_simple_wasm_asmjs_fuzzer',
'type': 'executable',
'dependencies': [
'wasm_asmjs_fuzzer_lib',
@@ -142,7 +142,7 @@
'target_name': 'fuzzer_support',
'type': 'static_library',
'dependencies': [
- '../../tools/gyp/v8.gyp:v8_libplatform',
+ '../../src/v8.gyp:v8_libplatform',
],
'include_dirs': [
'../..',
@@ -155,9 +155,9 @@
['component=="shared_library"', {
# fuzzers can't be built against a shared library, so we need to
# depend on the underlying static target in that case.
- 'dependencies': ['../../tools/gyp/v8.gyp:v8_maybe_snapshot'],
+ 'dependencies': ['../../src/v8.gyp:v8_maybe_snapshot'],
}, {
- 'dependencies': ['../../tools/gyp/v8.gyp:v8'],
+ 'dependencies': ['../../src/v8.gyp:v8'],
}],
],
},
@@ -169,12 +169,14 @@
'target_name': 'fuzzer_run',
'type': 'none',
'dependencies': [
- 'json_fuzzer',
- 'parser_fuzzer',
- 'regexp_fuzzer',
+ 'v8_simple_json_fuzzer',
+ 'v8_simple_parser_fuzzer',
+ 'v8_simple_regexp_fuzzer',
+ 'v8_simple_wasm_fuzzer',
+ 'v8_simple_wasm_asmjs_fuzzer',
],
'includes': [
- '../../build/isolate.gypi',
+ '../../gypfiles/isolate.gypi',
],
'sources': [
'fuzzer.isolate',
diff --git a/deps/v8/test/fuzzer/fuzzer.isolate b/deps/v8/test/fuzzer/fuzzer.isolate
index 4e98edd75a..2611c72104 100644
--- a/deps/v8/test/fuzzer/fuzzer.isolate
+++ b/deps/v8/test/fuzzer/fuzzer.isolate
@@ -5,11 +5,11 @@
{
'variables': {
'files': [
- '<(PRODUCT_DIR)/json_fuzzer<(EXECUTABLE_SUFFIX)',
- '<(PRODUCT_DIR)/parser_fuzzer<(EXECUTABLE_SUFFIX)',
- '<(PRODUCT_DIR)/regexp_fuzzer<(EXECUTABLE_SUFFIX)',
- '<(PRODUCT_DIR)/wasm_fuzzer<(EXECUTABLE_SUFFIX)',
- '<(PRODUCT_DIR)/wasm_asmjs_fuzzer<(EXECUTABLE_SUFFIX)',
+ '<(PRODUCT_DIR)/v8_simple_json_fuzzer<(EXECUTABLE_SUFFIX)',
+ '<(PRODUCT_DIR)/v8_simple_parser_fuzzer<(EXECUTABLE_SUFFIX)',
+ '<(PRODUCT_DIR)/v8_simple_regexp_fuzzer<(EXECUTABLE_SUFFIX)',
+ '<(PRODUCT_DIR)/v8_simple_wasm_fuzzer<(EXECUTABLE_SUFFIX)',
+ '<(PRODUCT_DIR)/v8_simple_wasm_asmjs_fuzzer<(EXECUTABLE_SUFFIX)',
'./fuzzer.status',
'./testcfg.py',
'./json/',
diff --git a/deps/v8/test/fuzzer/json.cc b/deps/v8/test/fuzzer/json.cc
index f20e9b9a3b..121939b5a0 100644
--- a/deps/v8/test/fuzzer/json.cc
+++ b/deps/v8/test/fuzzer/json.cc
@@ -26,6 +26,8 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
return 0;
}
- v8::JSON::Parse(isolate, source).IsEmpty();
+ v8::JSON::Parse(support->GetContext(), source).IsEmpty();
+ isolate->RequestGarbageCollectionForTesting(
+ v8::Isolate::kFullGarbageCollection);
return 0;
}
diff --git a/deps/v8/test/fuzzer/parser.cc b/deps/v8/test/fuzzer/parser.cc
index be70b439ef..fc8f59ee62 100644
--- a/deps/v8/test/fuzzer/parser.cc
+++ b/deps/v8/test/fuzzer/parser.cc
@@ -8,6 +8,7 @@
#include "include/v8.h"
#include "src/objects.h"
+#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
#include "src/parsing/preparser.h"
#include "test/fuzzer/fuzzer-support.h"
@@ -38,5 +39,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
info.set_global();
v8::internal::Parser parser(&info);
parser.Parse(&info);
+ isolate->RequestGarbageCollectionForTesting(
+ v8::Isolate::kFullGarbageCollection);
return 0;
}
diff --git a/deps/v8/test/fuzzer/regexp.cc b/deps/v8/test/fuzzer/regexp.cc
index eb51da8ac7..874a434476 100644
--- a/deps/v8/test/fuzzer/regexp.cc
+++ b/deps/v8/test/fuzzer/regexp.cc
@@ -30,7 +30,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
v8::Context::Scope context_scope(support->GetContext());
v8::TryCatch try_catch(isolate);
- i::FLAG_harmony_unicode_regexps = true;
i::FLAG_harmony_regexp_lookbehind = true;
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -63,7 +62,10 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
v8::TryCatch try_catch(isolate);
i::MaybeHandle<i::JSRegExp> maybe_regexp =
i::JSRegExp::New(source, static_cast<i::JSRegExp::Flags>(flags));
- if (!maybe_regexp.ToHandle(&regexp)) continue;
+ if (!maybe_regexp.ToHandle(&regexp)) {
+ i_isolate->clear_pending_exception();
+ continue;
+ }
}
Test(isolate, regexp, one_byte, results_array);
Test(isolate, regexp, two_byte, results_array);
@@ -71,5 +73,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
Test(isolate, regexp, source, results_array);
}
+ isolate->RequestGarbageCollectionForTesting(
+ v8::Isolate::kFullGarbageCollection);
return 0;
}
diff --git a/deps/v8/test/fuzzer/testcfg.py b/deps/v8/test/fuzzer/testcfg.py
index 35a5abb465..85a38eda08 100644
--- a/deps/v8/test/fuzzer/testcfg.py
+++ b/deps/v8/test/fuzzer/testcfg.py
@@ -26,7 +26,7 @@ class FuzzerTestSuite(testsuite.TestSuite):
def ListTests(self, context):
tests = []
for subtest in FuzzerTestSuite.SUB_TESTS:
- shell = '%s_fuzzer' % subtest
+ shell = 'v8_simple_%s_fuzzer' % subtest
for fname in os.listdir(os.path.join(self.root, subtest)):
if not os.path.isfile(os.path.join(self.root, subtest, fname)):
continue
diff --git a/deps/v8/test/fuzzer/wasm-asmjs.cc b/deps/v8/test/fuzzer/wasm-asmjs.cc
index 3f7477bf14..cb8b86bad4 100644
--- a/deps/v8/test/fuzzer/wasm-asmjs.cc
+++ b/deps/v8/test/fuzzer/wasm-asmjs.cc
@@ -33,7 +33,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
v8::TryCatch try_catch(isolate);
v8::internal::WasmJs::InstallWasmFunctionMap(i_isolate,
i_isolate->native_context());
- v8::internal::wasm::CompileAndRunWasmModule(i_isolate, data, data + size,
- true);
+ v8::internal::wasm::testing::CompileAndRunWasmModule(i_isolate, data,
+ data + size, true);
return 0;
}
diff --git a/deps/v8/test/fuzzer/wasm.cc b/deps/v8/test/fuzzer/wasm.cc
index 8750cbf786..27259c6417 100644
--- a/deps/v8/test/fuzzer/wasm.cc
+++ b/deps/v8/test/fuzzer/wasm.cc
@@ -33,7 +33,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
v8::TryCatch try_catch(isolate);
v8::internal::WasmJs::InstallWasmFunctionMap(i_isolate,
i_isolate->native_context());
- v8::internal::wasm::CompileAndRunWasmModule(i_isolate, data, data + size,
- false);
+ v8::internal::wasm::testing::CompileAndRunWasmModule(i_isolate, data,
+ data + size, false);
return 0;
}
diff --git a/deps/v8/test/ignition.gyp b/deps/v8/test/ignition.gyp
deleted file mode 100644
index 6aebec9e19..0000000000
--- a/deps/v8/test/ignition.gyp
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-{
- 'conditions': [
- ['test_isolation_mode != "noop"', {
- 'targets': [
- {
- 'target_name': 'ignition_run',
- 'type': 'none',
- 'dependencies': [
- 'cctest/cctest.gyp:cctest_run',
- 'mjsunit/mjsunit.gyp:mjsunit_run',
- ],
- 'includes': [
- '../build/features.gypi',
- '../build/isolate.gypi',
- ],
- 'sources': [
- 'ignition.isolate',
- ],
- },
- ],
- }],
- ],
-}
diff --git a/deps/v8/test/inspector_protocol_parser_test/BUILD.gn b/deps/v8/test/inspector_protocol_parser_test/BUILD.gn
new file mode 100644
index 0000000000..52c3788717
--- /dev/null
+++ b/deps/v8/test/inspector_protocol_parser_test/BUILD.gn
@@ -0,0 +1,6 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("../../gni/v8.gni")
+# TODO: bring tests back once inspector_protocol is stable and moves out of blink.
diff --git a/deps/v8/test/inspector_protocol_parser_test/DEPS b/deps/v8/test/inspector_protocol_parser_test/DEPS
new file mode 100644
index 0000000000..06d0b24e46
--- /dev/null
+++ b/deps/v8/test/inspector_protocol_parser_test/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+testing"
+]
diff --git a/deps/v8/test/inspector_protocol_parser_test/RunTests.cpp b/deps/v8/test/inspector_protocol_parser_test/RunTests.cpp
new file mode 100644
index 0000000000..065b0dcd2b
--- /dev/null
+++ b/deps/v8/test/inspector_protocol_parser_test/RunTests.cpp
@@ -0,0 +1,17 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace {
+
+class InspectorProtocolTestEnvironment final : public ::testing::Environment {};
+
+} // namespace
+
+int main(int argc, char** argv) {
+ testing::InitGoogleMock(&argc, argv);
+ testing::AddGlobalTestEnvironment(new InspectorProtocolTestEnvironment);
+ return RUN_ALL_TESTS();
+}
diff --git a/deps/v8/test/inspector_protocol_parser_test/inspector_protocol_parser_test.gyp b/deps/v8/test/inspector_protocol_parser_test/inspector_protocol_parser_test.gyp
new file mode 100644
index 0000000000..8fe2da0fe2
--- /dev/null
+++ b/deps/v8/test/inspector_protocol_parser_test/inspector_protocol_parser_test.gyp
@@ -0,0 +1,12 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# TODO: bring tests back once inspector_protocol is stable and moves out of blink.
+{ 'variables': {
+ },
+ 'targets': [
+ ],
+ 'conditions': [
+ ],
+}
diff --git a/deps/v8/test/ignition.isolate b/deps/v8/test/inspector_protocol_parser_test/inspector_protocol_parser_test.isolate
index a8e4d5aa02..66052bfc3a 100644
--- a/deps/v8/test/ignition.isolate
+++ b/deps/v8/test/inspector_protocol_parser_test/inspector_protocol_parser_test.isolate
@@ -1,16 +1,13 @@
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+
+# TODO: bring tests back once inspector_protocol is stable and moves out of blink.
{
'variables': {
- 'command': [
- '../tools/run-tests.py',
+ 'files': [
],
},
'includes': [
- 'cctest/cctest.isolate',
- 'mjsunit/mjsunit.isolate',
- 'message/message.isolate',
- 'webkit/webkit.isolate',
],
-}
+} \ No newline at end of file
diff --git a/deps/v8/test/inspector_protocol_parser_test/inspector_protocol_parser_test.status b/deps/v8/test/inspector_protocol_parser_test/inspector_protocol_parser_test.status
new file mode 100644
index 0000000000..9aa1649869
--- /dev/null
+++ b/deps/v8/test/inspector_protocol_parser_test/inspector_protocol_parser_test.status
@@ -0,0 +1,6 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+[
+]
diff --git a/deps/v8/test/intl/assert.js b/deps/v8/test/intl/assert.js
index 3180e6f96e..e17615267a 100644
--- a/deps/v8/test/intl/assert.js
+++ b/deps/v8/test/intl/assert.js
@@ -87,14 +87,13 @@ function deepEquals(a, b) {
return deepObjectEquals(a, b);
}
-
/**
- * Throws an exception, and prints the values in case of error.
+ * Throws an exception containing the user_message (if any) and the values.
*/
-function fail(expected, found) {
+function fail(expected, found, user_message = '') {
// TODO(cira): Replace String with PrettyPrint for objects and arrays.
- var message = 'Failure: expected <' + String(expected) + '>, found <' +
- String(found) + '>.';
+ var message = 'Failure' + (user_message ? ' (' + user_message + ')' : '') +
+ ': expected <' + String(expected) + '>, found <' + String(found) + '>.';
throw new Error(message);
}
@@ -102,9 +101,9 @@ function fail(expected, found) {
/**
* Throws if two variables have different types or values.
*/
-function assertEquals(expected, found) {
+function assertEquals(expected, found, user_message = '') {
if (!deepEquals(expected, found)) {
- fail(expected, found);
+ fail(expected, found, user_message);
}
}
@@ -112,49 +111,49 @@ function assertEquals(expected, found) {
/**
* Throws if value is false.
*/
-function assertTrue(value) {
- assertEquals(true, value)
+function assertTrue(value, user_message = '') {
+ assertEquals(true, value, user_message);
}
/**
* Throws if value is true.
*/
-function assertFalse(value) {
- assertEquals(false, value);
+function assertFalse(value, user_message = '') {
+ assertEquals(false, value, user_message);
}
/**
- * Returns true if code throws specified exception.
+ * Runs code() and asserts that it throws the specified exception.
*/
function assertThrows(code, type_opt, cause_opt) {
- var threwException = true;
try {
if (typeof code == 'function') {
code();
} else {
eval(code);
}
- threwException = false;
} catch (e) {
if (typeof type_opt == 'function') {
assertInstanceof(e, type_opt);
}
if (arguments.length >= 3) {
- assertEquals(e.type, cause_opt);
+ assertEquals(cause_opt, e.type, 'thrown exception type mismatch');
}
// Success.
return;
}
- throw new Error("Did not throw exception");
+ var expected = arguments.length >= 3 ? cause_opt :
+ typeof type_opt == 'function' ? type_opt : 'any exception';
+ fail(expected, 'no exception', 'expected thrown exception');
}
/**
- * Throws an exception if code throws.
+ * Runs code() and asserts that it does now throw any exception.
*/
-function assertDoesNotThrow(code, name_opt) {
+function assertDoesNotThrow(code, user_message = '') {
try {
if (typeof code == 'function') {
code();
@@ -162,7 +161,7 @@ function assertDoesNotThrow(code, name_opt) {
eval(code);
}
} catch (e) {
- fail("threw an exception: ", e.message || e, name_opt);
+ fail("no expection", "exception: " + String(e), user_message);
}
}
diff --git a/deps/v8/test/intl/date-format/calendar-with-multiple-type-subtags.js b/deps/v8/test/intl/date-format/calendar-with-multiple-type-subtags.js
new file mode 100644
index 0000000000..f4c00da61d
--- /dev/null
+++ b/deps/v8/test/intl/date-format/calendar-with-multiple-type-subtags.js
@@ -0,0 +1,39 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var options = Intl.DateTimeFormat("ar-u-ca-islamic-civil").resolvedOptions();
+assertEquals(options.calendar, "islamic-civil");
+
+options = Intl.DateTimeFormat("ar-u-ca-islamic-umalqura").resolvedOptions();
+assertEquals(options.calendar, "islamic-umalqura");
+
+var options = Intl.DateTimeFormat("ar-u-ca-islamic-civil").resolvedOptions();
+assertEquals(options.calendar, "islamic-civil");
+
+options =
+ Intl.DateTimeFormat("ar-u-ca-islamic-civil-nu-arab").resolvedOptions();
+assertEquals(options.calendar, "islamic-civil");
+assertEquals(options.numberingSystem, "arab");
+
+// The default numberingSystem is 'arab' for 'ar' locale. Set it to 'latn'
+// to check that 'nu-latn' keyword is parsed correctly.
+options =
+ Intl.DateTimeFormat("ar-u-ca-islamic-civil-nu-latn").resolvedOptions();
+assertEquals(options.calendar, "islamic-civil");
+assertEquals(options.numberingSystem, "latn");
+
+// ethioaa is the canonical LDML/BCP 47 name.
+options = Intl.DateTimeFormat("am-u-ca-ethiopic-amete-alem").resolvedOptions();
+assertEquals(options.calendar, "ethioaa");
+
+// Invalid calendar type "foo-bar". Fall back to the default.
+options = Intl.DateTimeFormat("ar-u-ca-foo-bar").resolvedOptions();
+assertEquals(options.calendar, "gregory");
+
+// No type subtag for ca. Fall back to the default.
+options = Intl.DateTimeFormat("ar-u-ca-nu-arab").resolvedOptions();
+assertEquals(options.calendar, "gregory");
+
+// Too long a type subtag for ca.
+assertThrows(() => Intl.DateTimeFormat("ar-u-ca-foobarbaz"), RangeError);
diff --git a/deps/v8/test/intl/date-format/parse-MMMdy.js b/deps/v8/test/intl/date-format/parse-MMMdy.js
index b23a3cde3a..f8291f49a9 100644
--- a/deps/v8/test/intl/date-format/parse-MMMdy.js
+++ b/deps/v8/test/intl/date-format/parse-MMMdy.js
@@ -28,6 +28,8 @@
// Testing v8Parse method for date and time pattern.
// Month is represented as a short name.
+// Flags: --intl-extra
+
var dtf = new Intl.DateTimeFormat(['en'],
{year: 'numeric', month: 'short',
day: 'numeric',
@@ -41,11 +43,17 @@ assertEquals(1974, date.getUTCFullYear());
assertEquals(1, date.getUTCMonth());
assertEquals(4, date.getUTCDate());
-// Missing , in the pattern.
-assertEquals(undefined, dtf.v8Parse('Feb 4 1974'));
+// Can deal with a missing ','.
+date = dtf.v8Parse('Feb 4 1974');
+assertEquals(1974, date.getUTCFullYear());
+assertEquals(1, date.getUTCMonth());
+assertEquals(4, date.getUTCDate());
// Extra "th" after 4 in the pattern.
assertEquals(undefined, dtf.v8Parse('Feb 4th, 1974'));
-// Wrong pattern.
-assertEquals(undefined, dtf.v8Parse('2/4/1974'));
+// TODO(jshin): Make sure if this is what's supposed to be.
+date = dtf.v8Parse('2/4/1974');
+assertEquals(1974, date.getUTCFullYear());
+assertEquals(1, date.getUTCMonth());
+assertEquals(4, date.getUTCDate());
diff --git a/deps/v8/test/intl/date-format/parse-invalid-input.js b/deps/v8/test/intl/date-format/parse-invalid-input.js
index ab0b889ff5..47a95477eb 100644
--- a/deps/v8/test/intl/date-format/parse-invalid-input.js
+++ b/deps/v8/test/intl/date-format/parse-invalid-input.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --intl-extra
+
// Invalid input is handled properly.
var dtf = new Intl.DateTimeFormat(['en']);
diff --git a/deps/v8/test/intl/date-format/parse-mdy.js b/deps/v8/test/intl/date-format/parse-mdy.js
index 7b1a79af86..a248a08422 100644
--- a/deps/v8/test/intl/date-format/parse-mdy.js
+++ b/deps/v8/test/intl/date-format/parse-mdy.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --intl-extra
+
// Testing v8Parse method for date only.
function checkDate(date) {
diff --git a/deps/v8/test/intl/date-format/parse-mdyhms.js b/deps/v8/test/intl/date-format/parse-mdyhms.js
index 73efb62053..766f7192fe 100644
--- a/deps/v8/test/intl/date-format/parse-mdyhms.js
+++ b/deps/v8/test/intl/date-format/parse-mdyhms.js
@@ -26,6 +26,8 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Testing v8Parse method for date and time pattern.
+//
+// Flags: --intl-extra
var dtf = new Intl.DateTimeFormat(['en'],
{year: 'numeric', month: 'numeric',
@@ -34,7 +36,7 @@ var dtf = new Intl.DateTimeFormat(['en'],
timeZone: 'UTC'});
// Make sure we have pattern we expect (may change in the future).
-assertEquals('M/d/y h:mm:ss a', dtf.resolved.pattern);
+assertEquals('M/d/y, h:mm:ss a', dtf.resolved.pattern);
var date = dtf.v8Parse('2/4/74 12:30:42 pm');
assertEquals(1974, date.getUTCFullYear());
@@ -44,14 +46,20 @@ assertEquals(12, date.getUTCHours());
assertEquals(30, date.getUTCMinutes());
assertEquals(42, date.getUTCSeconds());
+// Can deal with '-' vs '/'.
+date = dtf.v8Parse('2-4-74 12:30:42 am');
+assertEquals(1974, date.getUTCFullYear());
+assertEquals(1, date.getUTCMonth());
+assertEquals(4, date.getUTCDate());
+assertEquals(0, date.getUTCHours());
+assertEquals(30, date.getUTCMinutes());
+assertEquals(42, date.getUTCSeconds());
+
// AM/PM were not specified.
-assertEquals(undefined, dtf.v8Parse('2/4/74 12:30:12'));
+assertEquals(undefined, dtf.v8Parse('2/4/74 12:30:42'));
// Time was not specified.
assertEquals(undefined, dtf.v8Parse('2/4/74'));
// Month is numeric, so it fails on "Feb".
assertEquals(undefined, dtf.v8Parse('Feb 4th 1974'));
-
-// Wrong date delimiter.
-assertEquals(undefined, dtf.v8Parse('2-4-74 12:30:12 am'));
diff --git a/deps/v8/test/intl/extra-flag.js b/deps/v8/test/intl/extra-flag.js
new file mode 100644
index 0000000000..3d434a302b
--- /dev/null
+++ b/deps/v8/test/intl/extra-flag.js
@@ -0,0 +1,23 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --intl-extra
+
+// Turning on the creates the non-standard properties
+
+var dtf = new Intl.DateTimeFormat(['en']);
+assertTrue('v8Parse' in dtf);
+assertTrue('resolved' in dtf);
+assertTrue(!!dtf.resolved && 'pattern' in dtf.resolved);
+
+var nf = new Intl.NumberFormat(['en']);
+assertTrue('v8Parse' in nf);
+assertTrue('resolved' in nf);
+assertTrue(!!nf.resolved && 'pattern' in nf.resolved);
+
+var col = new Intl.Collator(['en']);
+assertTrue('resolved' in col);
+
+var br = new Intl.v8BreakIterator(['en']);
+assertTrue('resolved' in br);
diff --git a/deps/v8/test/intl/general/case-mapping.js b/deps/v8/test/intl/general/case-mapping.js
new file mode 100644
index 0000000000..a73622bf0d
--- /dev/null
+++ b/deps/v8/test/intl/general/case-mapping.js
@@ -0,0 +1,138 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --icu_case_mapping
+
+// Some edge cases that unibrow got wrong
+
+assertEquals("𐐘", "𐑀".toUpperCase());
+assertEquals("𐑀", "𐐘".toLowerCase());
+assertEquals("σ", "Σ".toLowerCase());
+
+// Some different paths in the ICU case conversion fastpath
+
+assertEquals("σς", "\u03A3\u03A3".toLowerCase());
+// Expand sharp s in latin1 fastpath
+assertEquals("ASSB", "A\u00DFB".toUpperCase());
+assertEquals("AB", "Ab".toUpperCase());
+// Find first upper case in fastpath
+assertEquals("ab", "aB".toLowerCase());
+assertEquals("AÜ", "aü".toUpperCase());
+assertEquals("AÜ", "AÜ".toUpperCase());
+assertEquals("aü", "aü".toLowerCase());
+assertEquals("aü", "AÜ".toLowerCase());
+assertEquals("aü", "AÜ".toLowerCase());
+
+// Starts with fastpath, but switches to full Unicode path
+// U+00FF is uppercased to U+0178.
+assertEquals("AŸ", "aÿ".toUpperCase());
+// U+00B5 (µ) is uppercased to U+039C (Μ)
+assertEquals("AΜ", "aµ".toUpperCase());
+
+// Buffer size increase
+assertEquals("CSSBẶ", "cßbặ".toUpperCase());
+assertEquals("FIFLFFIFFL", "\uFB01\uFB02\uFB03\uFB04".toUpperCase());
+// OneByte input with buffer size increase: non-fast path
+assertEquals("ABCSS", "abCß".toLocaleUpperCase("tr"));
+
+// More comprehensive tests for "tr", "az" and "lt" are in
+// test262/intl402/Strings/*
+
+// Buffer size decrease with a single locale or locale list.
+// In Turkic (tr, az), U+0307 preceeded by Capital Letter I is dropped.
+assertEquals("abci", "aBcI\u0307".toLocaleLowerCase("tr"));
+assertEquals("abci", "aBcI\u0307".toLocaleLowerCase("az"));
+assertEquals("abci", "aBcI\u0307".toLocaleLowerCase(["tr", "en"]));
+
+// Cons string
+assertEquals("abcijkl", ("aBcI" + "\u0307jkl").toLocaleLowerCase("tr"));
+assertEquals("abcijkl",
+ ("aB" + "cI" + "\u0307j" + "kl").toLocaleLowerCase("tr"));
+assertEquals("abci\u0307jkl", ("aBcI" + "\u0307jkl").toLocaleLowerCase("en"));
+assertEquals("abci\u0307jkl",
+ ("aB" + "cI" + "\u0307j" + "kl").toLocaleLowerCase("en"));
+assertEquals("abci\u0307jkl", ("aBcI" + "\u0307jkl").toLowerCase());
+assertEquals("abci\u0307jkl",
+ ("aB" + "cI" + "\u0307j" + "kl").toLowerCase());
+
+// "tr" and "az" should behave identically.
+assertEquals("aBcI\u0307".toLocaleLowerCase("tr"),
+ "aBcI\u0307".toLocaleLowerCase("az"));
+// What matters is the first locale in the locale list.
+assertEquals("aBcI\u0307".toLocaleLowerCase(["tr", "en", "fr"]),
+ "aBcI\u0307".toLocaleLowerCase("tr"));
+assertEquals("aBcI\u0307".toLocaleLowerCase(["en", "tr", "az"]),
+ "aBcI\u0307".toLocaleLowerCase("en"));
+assertEquals("aBcI\u0307".toLocaleLowerCase(["en", "tr", "az"]),
+ "aBcI\u0307".toLowerCase());
+
+// An empty locale list is the same as the default locale. Try these tests
+// under Turkish and Greek locale.
+assertEquals("aBcI\u0307".toLocaleLowerCase([]),
+ "aBcI\u0307".toLocaleLowerCase());
+assertEquals("aBcI\u0307".toLocaleLowerCase([]),
+ "aBcI\u0307".toLocaleLowerCase(Intl.GetDefaultLocale));
+assertEquals("άόύώ".toLocaleUpperCase([]), "άόύώ".toLocaleUpperCase());
+assertEquals("άόύώ".toLocaleUpperCase([]),
+ "άόύώ".toLocaleUpperCase(Intl.GetDefaultLocale));
+
+
+// English/root locale keeps U+0307 (combining dot above).
+assertEquals("abci\u0307", "aBcI\u0307".toLocaleLowerCase("en"));
+assertEquals("abci\u0307", "aBcI\u0307".toLocaleLowerCase(["en", "tr"]));
+assertEquals("abci\u0307", "aBcI\u0307".toLowerCase());
+
+// Greek uppercasing: not covered by intl402/String/*, yet. Tonos (U+0301) and
+// other diacritic marks are dropped. This rule is based on the current CLDR's
+// el-Upper transformation, but Greek uppercasing rules are more sophisticated
+// than this. See http://bugs.icu-project.org/trac/ticket/10582 and
+// http://unicode.org/cldr/trac/ticket/7905 .
+assertEquals("Α", "α\u0301".toLocaleUpperCase("el"));
+assertEquals("Α", "α\u0301".toLocaleUpperCase("el-GR"));
+assertEquals("Α", "α\u0301".toLocaleUpperCase("el-Grek"));
+assertEquals("Α", "α\u0301".toLocaleUpperCase("el-Grek-GR"));
+assertEquals("Α", "ά".toLocaleUpperCase("el"));
+assertEquals("ΑΟΥΩ", "άόύώ".toLocaleUpperCase("el"));
+assertEquals("ΑΟΥΩ", "α\u0301ο\u0301υ\u0301ω\u0301".toLocaleUpperCase("el"));
+assertEquals("ΑΟΥΩ", "άόύώ".toLocaleUpperCase("el"));
+assertEquals("ΟΕ", "Ό\u1f15".toLocaleUpperCase("el"));
+assertEquals("ΟΕ", "Ο\u0301ε\u0314\u0301".toLocaleUpperCase("el"));
+
+// Input and output are identical.
+assertEquals("αβγδε", "αβγδε".toLocaleLowerCase("el"));
+assertEquals("ΑΒΓΔΕ", "ΑΒΓΔΕ".toLocaleUpperCase("el"));
+assertEquals("ΑΒΓΔΕАБ𝐀𝐁", "ΑΒΓΔΕАБ𝐀𝐁".toLocaleUpperCase("el"));
+assertEquals("ABCDEÂÓḴ123", "ABCDEÂÓḴ123".toLocaleUpperCase("el"));
+// ASCII-only or Latin-1 only: 1-byte
+assertEquals("ABCDE123", "ABCDE123".toLocaleUpperCase("el"));
+assertEquals("ABCDEÂÓ123", "ABCDEÂÓ123".toLocaleUpperCase("el"));
+
+// To make sure that the input string is not overwritten in place.
+var strings = ["abCdef", "αβγδε", "άόύώ", "аб"];
+for (var s of strings) {
+ var backupAsArray = s.split("");
+ var uppered = s.toLocaleUpperCase("el");
+ assertEquals(s, backupAsArray.join(""));
+}
+
+// In other locales, U+0301 is preserved.
+assertEquals("Α\u0301Ο\u0301Υ\u0301Ω\u0301",
+ "α\u0301ο\u0301υ\u0301ω\u0301".toLocaleUpperCase("en"));
+assertEquals("Α\u0301Ο\u0301Υ\u0301Ω\u0301",
+ "α\u0301ο\u0301υ\u0301ω\u0301".toUpperCase());
+
+// Plane 1; Deseret and Warang Citi Script.
+assertEquals("\u{10400}\u{118A0}", "\u{10428}\u{118C0}".toUpperCase());
+assertEquals("\u{10428}\u{118C0}", "\u{10400}\u{118A0}".toLowerCase());
+// Mathematical Bold {Capital, Small} Letter A do not change.
+assertEquals("\u{1D400}\u{1D41A}", "\u{1D400}\u{1D41A}".toUpperCase());
+assertEquals("\u{1D400}\u{1D41A}", "\u{1D400}\u{1D41A}".toLowerCase());
+// Plane 1; New characters in Unicode 8.0
+assertEquals("\u{10C80}", "\u{10CC0}".toUpperCase());
+assertEquals("\u{10CC0}", "\u{10C80}".toLowerCase());
+assertEquals("\u{10C80}", "\u{10CC0}".toLocaleUpperCase());
+assertEquals("\u{10CC0}", "\u{10C80}".toLocaleLowerCase());
+assertEquals("\u{10C80}", "\u{10CC0}".toLocaleUpperCase(["tr"]));
+assertEquals("\u{10C80}", "\u{10CC0}".toLocaleUpperCase(["tr"]));
+assertEquals("\u{10CC0}", "\u{10C80}".toLocaleLowerCase());
diff --git a/deps/v8/test/intl/general/getCanonicalLocales.js b/deps/v8/test/intl/general/getCanonicalLocales.js
new file mode 100644
index 0000000000..dd01363c4f
--- /dev/null
+++ b/deps/v8/test/intl/general/getCanonicalLocales.js
@@ -0,0 +1,25 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var locales = ['en-US', 'fr'];
+var result = Intl.getCanonicalLocales(locales);
+var len = result.length
+
+// TODO(jshin): Remove the following when
+// https://github.com/tc39/test262/issues/745 is resolved and
+// test262 in v8 is updated.
+
+assertEquals(Object.getPrototypeOf(result), Array.prototype);
+assertEquals(result.constructor, Array);
+
+for (var key in result) {
+ var desc = Object.getOwnPropertyDescriptor(result, key);
+ assertTrue(desc.writable);
+ assertTrue(desc.configurable);
+ assertTrue(desc.enumerable);
+}
+
+var desc = Object.getOwnPropertyDescriptor(result, 'length');
+assertTrue(desc.writable);
+assertEquals(result.push('de'), desc.value + 1);
diff --git a/deps/v8/test/intl/intl.gyp b/deps/v8/test/intl/intl.gyp
index 8fa7f0674f..f2e107f523 100644
--- a/deps/v8/test/intl/intl.gyp
+++ b/deps/v8/test/intl/intl.gyp
@@ -13,8 +13,8 @@
'../../src/d8.gyp:d8_run',
],
'includes': [
- '../../build/features.gypi',
- '../../build/isolate.gypi',
+ '../../gypfiles/features.gypi',
+ '../../gypfiles/isolate.gypi',
],
'sources': [
'intl.isolate',
diff --git a/deps/v8/test/intl/intl.status b/deps/v8/test/intl/intl.status
index e89008517b..15fbe43c12 100644
--- a/deps/v8/test/intl/intl.status
+++ b/deps/v8/test/intl/intl.status
@@ -29,29 +29,11 @@
[ALWAYS, {
# TODO(jochen): The following test is flaky.
'overrides/caching': [PASS, FAIL],
-
- # BUG(v8:3454).
- 'date-format/parse-MMMdy': [FAIL],
- 'date-format/parse-mdyhms': [FAIL],
- 'number-format/parse-decimal': [FAIL],
- 'number-format/parse-percent': [FAIL],
}], # ALWAYS
-##############################################################################
-['system == linux', {
- # BUG(v8:2899).
- 'collator/default-locale': [PASS, FAIL],
-}], # 'system == linux'
-
-##############################################################################
-['system == macos', {
- # BUG(v8:4459).
- 'collator/default-locale': [FAIL],
-}], # 'system == macos'
-
-##############################################################################
-['arch == arm or arch == arm64', {
- # BUG(v8:4459).
- 'collator/default-locale': [PASS, FAIL],
-}], # 'arch == arm or arch == arm64'
+['arch == arm64 and mode == debug and simulator_run == True and variant == ignition', {
+ # Ignition on ARM64 simulator in debug mode.
+ 'date-format/timezone': [PASS, ['no_snap', SKIP]],
+ 'number-format/check-digit-ranges': [PASS, ['no_snap', SKIP]],
+}], # 'arch == arm64 and mode == debug and simulator_run == True and variant == ignition'
]
diff --git a/deps/v8/test/intl/no-extra-flag.js b/deps/v8/test/intl/no-extra-flag.js
new file mode 100644
index 0000000000..6735f84a2e
--- /dev/null
+++ b/deps/v8/test/intl/no-extra-flag.js
@@ -0,0 +1,23 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-intl-extra
+
+// Turning off the flag removes the non-standard properties
+
+var dtf = new Intl.DateTimeFormat(['en']);
+assertFalse('v8Parse' in dtf);
+assertFalse('resolved' in dtf);
+assertFalse(!!dtf.resolved && 'pattern' in dtf.resolved);
+
+var nf = new Intl.NumberFormat(['en']);
+assertFalse('v8Parse' in nf);
+assertFalse('resolved' in nf);
+assertFalse(!!nf.resolved && 'pattern' in nf.resolved);
+
+var col = new Intl.Collator(['en']);
+assertFalse('resolved' in col);
+
+var br = new Intl.v8BreakIterator(['en']);
+assertFalse('resolved' in br);
diff --git a/deps/v8/test/intl/number-format/format-is-bound.js b/deps/v8/test/intl/number-format/format-is-bound.js
index d93ee0067f..edb6a4b817 100644
--- a/deps/v8/test/intl/number-format/format-is-bound.js
+++ b/deps/v8/test/intl/number-format/format-is-bound.js
@@ -28,6 +28,8 @@
// Create default NumberFormat.
var nf = new Intl.NumberFormat();
+var beforeCount = Object.getOwnPropertyNames(nf).length;
+
// Array we want to iterate, actual numbers are not important.
var numberArray = [1, 2, 3];
@@ -39,4 +41,4 @@ numberArray.forEach(nf.format);
nf.format(12345);
// Reading the format doesn't add any additional property keys
-assertEquals(1, Object.getOwnPropertyNames(nf).length);
+assertEquals(beforeCount, Object.getOwnPropertyNames(nf).length);
diff --git a/deps/v8/test/intl/number-format/parse-currency.js b/deps/v8/test/intl/number-format/parse-currency.js
index c87ffea0d2..a57128ea60 100644
--- a/deps/v8/test/intl/number-format/parse-currency.js
+++ b/deps/v8/test/intl/number-format/parse-currency.js
@@ -28,6 +28,8 @@
// Currency parsing is not yet supported. We need ICU49 or higher to get
// it working.
+// Flags: --intl-extra
+
var nf = new Intl.NumberFormat(['en'], {style: 'currency', currency: 'USD'});
assertEquals(undefined, nf.v8Parse('USD 123.43'));
diff --git a/deps/v8/test/intl/number-format/parse-decimal.js b/deps/v8/test/intl/number-format/parse-decimal.js
index ea3f8ddf30..62f4728911 100644
--- a/deps/v8/test/intl/number-format/parse-decimal.js
+++ b/deps/v8/test/intl/number-format/parse-decimal.js
@@ -24,16 +24,29 @@
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Flags: --intl-extra
var nf = new Intl.NumberFormat(['en'], {style: 'decimal'});
assertEquals(123.43, nf.v8Parse('123.43'));
assertEquals(123, nf.v8Parse('123'));
assertEquals(NaN, nf.v8Parse(NaN));
-assertEquals(12323, nf.v8Parse('123,23'));
-assertEquals(12323.456, nf.v8Parse('123,23.456'));
-assertEquals(12323.456, nf.v8Parse('0000000123,23.456'));
-assertEquals(-12323.456, nf.v8Parse('-123,23.456'));
+assertEquals(12323, nf.v8Parse('12,323'));
+assertEquals(12323, nf.v8Parse('12323'));
+assertEquals(12323.456, nf.v8Parse('12,323.456'));
+assertEquals(12323.456, nf.v8Parse('000000012323.456'));
+assertEquals(12323.456, nf.v8Parse('000,000,012,323.456'));
+assertEquals(-12323.456, nf.v8Parse('-12,323.456'));
+
+assertEquals(12323, nf.v8Parse('000000012323'));
+assertEquals(12323, nf.v8Parse('000,000,012,323'));
+assertEquals(undefined, nf.v8Parse('000000012,323.456'));
+
+// not tolerant of a misplaced thousand separator
+assertEquals(undefined, nf.v8Parse('123,23.456'));
+assertEquals(undefined, nf.v8Parse('0000000123,23.456'));
+assertEquals(undefined, nf.v8Parse('-123,23.456'));
-// Scientific notation gets ignored.
-assertEquals(123.456, nf.v8Parse('123.456e-3'));
+// Scientific notation is supported.
+assertEquals(0.123456, nf.v8Parse('123.456e-3'));
diff --git a/deps/v8/test/intl/number-format/parse-invalid-input.js b/deps/v8/test/intl/number-format/parse-invalid-input.js
index 8c84d0b87e..251b52a5e7 100644
--- a/deps/v8/test/intl/number-format/parse-invalid-input.js
+++ b/deps/v8/test/intl/number-format/parse-invalid-input.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --intl-extra
+
// Invalid input is handled properly.
var nf = new Intl.NumberFormat(['en']);
diff --git a/deps/v8/test/intl/number-format/parse-percent.js b/deps/v8/test/intl/number-format/parse-percent.js
index 4964da4ae7..0dc36d3f0b 100644
--- a/deps/v8/test/intl/number-format/parse-percent.js
+++ b/deps/v8/test/intl/number-format/parse-percent.js
@@ -25,12 +25,22 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --intl-extra
+
var nf = new Intl.NumberFormat(['en'], {style: 'percent'});
assertEquals(1.2343, nf.v8Parse('123.43%'));
assertEquals(1.23, nf.v8Parse('123%'));
assertEquals(NaN, nf.v8Parse(NaN));
-assertEquals(123.23, nf.v8Parse('123,23%'));
-assertEquals(123.23456, nf.v8Parse('123,23.456%'));
-assertEquals(123.23456, nf.v8Parse('0000000123,23.456%'));
-assertEquals(-123.23456, nf.v8Parse('-123,23.456%'));
+assertEquals(123.23, nf.v8Parse('12,323%'));
+assertEquals(123.23456, nf.v8Parse('12,323.456%'));
+assertEquals(123.23456, nf.v8Parse('000000012323.456%'));
+assertEquals(-123.23456, nf.v8Parse('-12,323.456%'));
+
+// Not tolerant of misplaced group separators.
+assertEquals(undefined, nf.v8Parse('123,23%'));
+assertEquals(undefined, nf.v8Parse('123,23.456%'));
+assertEquals(undefined, nf.v8Parse('0000000123,23.456%'));
+assertEquals(undefined, nf.v8Parse('-123,23.456%'));
+assertEquals(undefined, nf.v8Parse('0000000123,23.456%'));
+assertEquals(undefined, nf.v8Parse('-123,23.456%'));
diff --git a/deps/v8/test/intl/regress-4870.js b/deps/v8/test/intl/regress-4870.js
new file mode 100644
index 0000000000..72c095eccd
--- /dev/null
+++ b/deps/v8/test/intl/regress-4870.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows(() =>
+ Object.getOwnPropertyDescriptor(Intl.Collator.prototype, 'compare')
+ .get.call(new Intl.DateTimeFormat())('a', 'b'),
+ TypeError);
diff --git a/deps/v8/test/intl/regress-5179.js b/deps/v8/test/intl/regress-5179.js
new file mode 100644
index 0000000000..c9132c0e99
--- /dev/null
+++ b/deps/v8/test/intl/regress-5179.js
@@ -0,0 +1,15 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Object.defineProperty(String.prototype, Symbol.split, {
+ get() {
+ return function(obj, limit) {
+ return [, null];
+ }
+ }
+});
+
+dtf = new Intl.DateTimeFormat("de", {timeZone:"America/bueNos_airES"});
+
+assertEquals("America/Buenos_Aires", dtf.resolvedOptions().timeZone);
diff --git a/deps/v8/test/intl/testcfg.py b/deps/v8/test/intl/testcfg.py
index 6e4b4f15df..c7f17bbb57 100644
--- a/deps/v8/test/intl/testcfg.py
+++ b/deps/v8/test/intl/testcfg.py
@@ -26,10 +26,12 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
+import re
from testrunner.local import testsuite
from testrunner.objects import testcase
+FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
class IntlTestSuite(testsuite.TestSuite):
@@ -55,7 +57,11 @@ class IntlTestSuite(testsuite.TestSuite):
return tests
def GetFlagsForTestCase(self, testcase, context):
+ source = self.GetSourceForTest(testcase)
flags = ["--allow-natives-syntax"] + context.mode_flags
+ flags_match = re.findall(FLAGS_PATTERN, source)
+ for match in flags_match:
+ flags += match.strip().split()
files = []
files.append(os.path.join(self.root, "assert.js"))
@@ -71,6 +77,10 @@ class IntlTestSuite(testsuite.TestSuite):
return testcase.flags + flags
+ def GetSourceForTest(self, testcase):
+ filename = os.path.join(self.root, testcase.path + self.suffix())
+ with open(filename) as f:
+ return f.read()
def GetSuite(name, root):
return IntlTestSuite(name, root)
diff --git a/deps/v8/test/js-perf-test/Generators/generators.js b/deps/v8/test/js-perf-test/Generators/generators.js
new file mode 100644
index 0000000000..d49dee3069
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Generators/generators.js
@@ -0,0 +1,131 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+new BenchmarkSuite('Generators', [1000], [
+ new Benchmark('Basic', false, false, 0, Basic),
+ new Benchmark('Loop', false, false, 0, Loop),
+ new Benchmark('Input', false, false, 0, Input),
+ new Benchmark('YieldStar', false, false, 0, YieldStar),
+]);
+
+
+// ----------------------------------------------------------------------------
+// Benchmark: Basic
+// ----------------------------------------------------------------------------
+
+function* five() {
+ yield 1;
+ yield 2;
+ yield 3;
+ yield 4;
+ yield 5;
+}
+
+function Basic() {
+ let g = five();
+ let sum = 0;
+ sum += g.next().value;
+ sum += g.next().value;
+ sum += g.next().value;
+ sum += g.next().value;
+ sum += g.next().value;
+ if (sum != 15 || !g.next().done) throw "wrong";
+}
+
+
+// ----------------------------------------------------------------------------
+// Benchmark: Loop
+// ----------------------------------------------------------------------------
+
+function* fibonacci() {
+ let x = 0;
+ let y = 1;
+ yield x;
+ while (true) {
+ yield y;
+ let tmp = x;
+ x = y;
+ y += tmp;
+ }
+}
+
+function Loop() {
+ let n = 0;
+ let x;
+ for (x of fibonacci()) {
+ if (++n === 42) break;
+ }
+ if (x != 165580141) throw "wrong";
+}
+
+
+
+// ----------------------------------------------------------------------------
+// Benchmark: Input
+// ----------------------------------------------------------------------------
+
+function* multiples(x) {
+ let skip = function.sent || 0;
+ let next = 0;
+ while (true) {
+ if (skip === 0) {
+ skip = yield next;
+ } else {
+ skip--;
+ }
+ next += x;
+ }
+}
+
+function Input() {
+ let g = multiples(3);
+ results = [g.next(2), g.next(0), g.next(5), g.next(10)];
+ if (results.slice(-1)[0].value != 60) throw "wrong";
+}
+
+
+// ----------------------------------------------------------------------------
+// Benchmark: YieldStar
+// ----------------------------------------------------------------------------
+
+function* infix(node) {
+ if (node) {
+ yield* infix(node.left);
+ yield node.label;
+ yield* infix(node.right);
+ }
+}
+
+class Node {
+ constructor(label, left, right) {
+ this.label = label;
+ this.left = left;
+ this.right = right;
+ }
+}
+
+function YieldStar() {
+ let tree = new Node(1,
+ new Node(2,
+ new Node(3,
+ new Node(4,
+ new Node(16,
+ new Node(5,
+ new Node(23,
+ new Node(0),
+ new Node(17)),
+ new Node(44, new Node(20)))),
+ new Node(7,
+ undefined,
+ new Node(23,
+ new Node(0),
+ new Node(41, undefined, new Node(11))))),
+ new Node(8)),
+ new Node(5)),
+ new Node(6, undefined, new Node(7)));
+ let labels = [...(infix(tree))];
+ // 0,23,17,5,20,44,16,4,7,0,23,41,11,3,8,2,5,1,6,7
+ if (labels[0] != 0) throw "wrong";
+}
diff --git a/deps/v8/test/js-perf-test/Generators/run.js b/deps/v8/test/js-perf-test/Generators/run.js
new file mode 100644
index 0000000000..6feb6f79fb
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Generators/run.js
@@ -0,0 +1,26 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+load('../base.js');
+load('generators.js');
+
+var success = true;
+
+function PrintResult(name, result) {
+ print(name + '-Generators(Score): ' + result);
+}
+
+
+function PrintError(name, error) {
+ PrintResult(name, error);
+ success = false;
+}
+
+
+BenchmarkSuite.config.doWarmup = undefined;
+BenchmarkSuite.config.doDeterministic = undefined;
+
+BenchmarkSuite.RunSuites({ NotifyResult: PrintResult,
+ NotifyError: PrintError });
diff --git a/deps/v8/test/js-perf-test/JSTests.json b/deps/v8/test/js-perf-test/JSTests.json
index dc3cb757b9..52bff011db 100644
--- a/deps/v8/test/js-perf-test/JSTests.json
+++ b/deps/v8/test/js-perf-test/JSTests.json
@@ -9,6 +9,38 @@
"resources": ["base.js"],
"tests": [
{
+ "name": "Generators",
+ "path": ["Generators"],
+ "main": "run.js",
+ "resources": ["generators.js"],
+ "flags": ["--harmony-function-sent"],
+ "results_regexp": "^Generators\\-Generators\\(Score\\): (.+)$"
+ },
+ {
+ "name": "GeneratorsIgnition",
+ "path": ["Generators"],
+ "main": "run.js",
+ "resources": ["generators.js"],
+ "flags": [
+ "--harmony-function-sent",
+ "--ignition"
+ ],
+ "results_regexp": "^Generators\\-Generators\\(Score\\): (.+)$"
+ },
+ {
+ "name": "GeneratorsIgnitionTurbofan",
+ "path": ["Generators"],
+ "main": "run.js",
+ "resources": ["generators.js"],
+ "flags": [
+ "--harmony-function-sent",
+ "--ignition",
+ "--turbo",
+ "--turbo-from-bytecode"
+ ],
+ "results_regexp": "^Generators\\-Generators\\(Score\\): (.+)$"
+ },
+ {
"name": "RestParameters",
"path": ["RestParameters"],
"main": "run.js",
@@ -163,6 +195,29 @@
{"name": "for (i < array.length)"},
{"name": "for (i < length)"}
]
+ },
+ {
+ "name": "PropertyQueries",
+ "path": ["PropertyQueries"],
+ "main": "run.js",
+ "resources": ["property-queries.js"],
+ "results_regexp": "^%s\\-PropertyQueries\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "in--INTERN-prop"},
+ {"name": "in--DEINTERN-prop"},
+ {"name": "in--NE-INTERN-prop"},
+ {"name": "in--NE-DEINTERN-prop"},
+ {"name": "in--el"},
+ {"name": "in--el-str"},
+ {"name": "in--NE-el"},
+ {"name": "Object.hasOwnProperty--INTERN-prop"},
+ {"name": "Object.hasOwnProperty--DEINTERN-prop"},
+ {"name": "Object.hasOwnProperty--NE-INTERN-prop"},
+ {"name": "Object.hasOwnProperty--NE-DEINTERN-prop"},
+ {"name": "Object.hasOwnProperty--el"},
+ {"name": "Object.hasOwnProperty--el-str"},
+ {"name": "Object.hasOwnProperty--NE-el"}
+ ]
}
]
}
diff --git a/deps/v8/test/js-perf-test/PropertyQueries/PropertyQueries.json b/deps/v8/test/js-perf-test/PropertyQueries/PropertyQueries.json
new file mode 100644
index 0000000000..49b953d131
--- /dev/null
+++ b/deps/v8/test/js-perf-test/PropertyQueries/PropertyQueries.json
@@ -0,0 +1,38 @@
+{
+ "name": "PropertyQueriesTests",
+ "run_count": 5,
+ "run_count_android_arm": 3,
+ "run_count_android_arm64": 3,
+ "timeout": 120,
+ "units": "score",
+ "total": true,
+ "resources": ["base.js"],
+ "tests": [
+ {
+ "name": "PropertyQueries",
+ "path": ["."],
+ "main": "run.js",
+ "flags": [""],
+ "resources": [
+ "property-queries.js"
+ ],
+ "results_regexp": "^%s\\-PropertyQueries\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "in--INTERN-prop"},
+ {"name": "in--DEINTERN-prop"},
+ {"name": "in--NE-INTERN-prop"},
+ {"name": "in--NE-DEINTERN-prop"},
+ {"name": "in--el"},
+ {"name": "in--el-str"},
+ {"name": "in--NE-el"},
+ {"name": "Object.hasOwnProperty--INTERN-prop"},
+ {"name": "Object.hasOwnProperty--DEINTERN-prop"},
+ {"name": "Object.hasOwnProperty--NE-INTERN-prop"},
+ {"name": "Object.hasOwnProperty--NE-DEINTERN-prop"},
+ {"name": "Object.hasOwnProperty--el"},
+ {"name": "Object.hasOwnProperty--el-str"},
+ {"name": "Object.hasOwnProperty--NE-el"}
+ ]
+ }
+ ]
+}
diff --git a/deps/v8/test/js-perf-test/PropertyQueries/property-queries.js b/deps/v8/test/js-perf-test/PropertyQueries/property-queries.js
new file mode 100644
index 0000000000..f763d262d4
--- /dev/null
+++ b/deps/v8/test/js-perf-test/PropertyQueries/property-queries.js
@@ -0,0 +1,274 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function ObjectWithKeys(count, keyOffset = 0, keyGen) {
+ var body = "";
+ for (var i = 0; i < count; i++) {
+ var key = keyGen(i + keyOffset);
+ if (typeof key === "string") {
+ body += `this.${key} = 0\n`;
+ } else {
+ body += `this[${key}] = 0\n`;
+ }
+ }
+ var f = new Function(body);
+ return new f();
+}
+
+function ObjectWithProperties(count, keyOffset) {
+ return ObjectWithKeys(count, keyOffset, (key) => "key" + key );
+}
+
+function ObjectWithElements(count, keyOffset) {
+ return ObjectWithKeys(count, keyOffset, (key) => key );
+}
+
+function ObjectWithMixedKeys(count, keyOffset) {
+ return ObjectWithKeys(count, keyOffset, (key) => {
+ if (key % 2 == 0) return (key / 2);
+ return "key" + ((key - 1) / 2);
+ });
+}
+
+// Create an object with #depth prototypes each having #keys properties
+// generated by given keyGen.
+function ObjectWithProtoKeys(depth, keys, cacheable,
+ keyGen = ObjectWithProperties) {
+ var o = keyGen(keys);
+ var current = o;
+ var keyOffset = 0;
+ for (var i = 0; i < depth; i++) {
+ keyOffset += keys;
+ current.__proto__ = keyGen(keys, keyOffset);
+ current = current.__proto__;
+ }
+ if (cacheable === false) {
+ // Add an empty proxy at the prototype chain to make caching properties
+ // impossible.
+ current.__proto__ = new Proxy({}, {});
+ }
+ return o;
+}
+
+
+function HoleyIntArray(size) {
+ var array = new Array(size);
+ for (var i = 0; i < size; i += 3) {
+ array[i] = i;
+ }
+ return array
+}
+
+function IntArray(size) {
+ var array = new Array(size);
+ for (var i = 0; i < size; i++) {
+ array[i] = i;
+ }
+ return array;
+}
+
+// Switch object's properties and elements to dictionary mode.
+function MakeDictionaryMode(obj) {
+ obj.foo = 0;
+ delete obj.foo;
+ obj[1e9] = 0;
+ return obj;
+}
+
+function Internalize(s) {
+ return Object.keys({[s]:0})[0];
+}
+
+function Deinternalize(s) {
+ return [...s].join("");
+}
+
+// ============================================================================
+
+const QUERY_INTERNALIZED_PROP = "INTERN-prop";
+const QUERY_DEINTERNALIZED_PROP = "DEINTERN-prop";
+const QUERY_NON_EXISTING_INTERNALIZED_PROP = "NE-INTERN-prop";
+const QUERY_NON_EXISTING_DEINTERNALIZED_PROP = "NE-DEINTERN-prop";
+const QUERY_ELEMENT = "el";
+const QUERY_ELEMENT_AS_STRING = "el-str";
+const QUERY_NON_EXISTING_ELEMENT = "NE-el";
+
+const OBJ_MODE_FAST = "fast";
+const OBJ_MODE_SLOW = "slow";
+
+var TestQueries = [
+ QUERY_INTERNALIZED_PROP,
+ QUERY_DEINTERNALIZED_PROP,
+ QUERY_NON_EXISTING_INTERNALIZED_PROP,
+ QUERY_NON_EXISTING_DEINTERNALIZED_PROP,
+ QUERY_ELEMENT,
+ QUERY_ELEMENT_AS_STRING,
+ QUERY_NON_EXISTING_ELEMENT,
+];
+
+const QUERIES_PER_OBJECT_NUMBER = 10;
+
+// Leave only every "count"th keys.
+function FilterKeys(keys, count) {
+ var len = keys.length;
+ if (len < count) throw new Error("Keys array is too short: " + len);
+ var step = len / count;
+ if (step == 0) throw new Error("Bad count specified: " + count);
+ return keys.filter((element, index) => index % step == 0);
+}
+
+
+function MakeKeyQueries(keys, query_kind) {
+ var properties = keys.filter((element) => isNaN(Number(element)));
+ var elements = keys.filter((element) => !isNaN(Number(element)));
+
+ properties = FilterKeys(properties, QUERIES_PER_OBJECT_NUMBER);
+ elements = FilterKeys(elements, QUERIES_PER_OBJECT_NUMBER);
+
+ switch (query_kind) {
+ case QUERY_INTERNALIZED_PROP:
+ return properties;
+
+ case QUERY_DEINTERNALIZED_PROP:
+ return properties.map(Deinternalize);
+
+ case QUERY_NON_EXISTING_INTERNALIZED_PROP:
+ case QUERY_NON_EXISTING_DEINTERNALIZED_PROP:
+ var non_existing = [];
+ for (var i = 0; i < QUERIES_PER_OBJECT_NUMBER; i++) {
+ non_existing.push("non-existing" + i);
+ }
+ if (query_kind == QUERY_NON_EXISTING_INTERNALIZED_PROP) {
+ return non_existing.map(Internalize);
+ } else {
+ return non_existing.map(Deinternalize);
+ }
+
+ case QUERY_ELEMENT:
+ return elements.map(Number);
+
+ case QUERY_ELEMENT_AS_STRING:
+ return elements.map(String);
+
+ case QUERY_NON_EXISTING_ELEMENT:
+ var non_existing = [];
+ for (var i = 0; i < QUERIES_PER_OBJECT_NUMBER; i++) {
+ non_existing.push(1200 + 100*i);
+ }
+ return non_existing;
+
+ default:
+ throw new Error("Bad query_kind: " + query_kind);
+ }
+}
+
+
+var TestData = [];
+
+[true, false].forEach((cachable) => {
+ [OBJ_MODE_FAST, OBJ_MODE_SLOW].forEach((obj_mode) => {
+ var proto_mode = cachable ? "" : "-with-slow-proto";
+ var name = `${obj_mode}-obj${proto_mode}`;
+ var objects = [];
+ [10, 50, 100, 200, 500].forEach((prop_count) => {
+ // Create object with prop_count properties and prop_count elements.
+ obj = ObjectWithProtoKeys(5, prop_count * 2, cachable,
+ ObjectWithMixedKeys);
+ if (obj_mode == OBJ_MODE_SLOW) {
+ obj = MakeDictionaryMode(obj);
+ }
+ objects.push(obj);
+ });
+ TestData.push({name, objects});
+ });
+});
+
+
+// ============================================================================
+
+function CreateTestFunction(template, object, keys) {
+ // Force a new function for each test-object to avoid side-effects due to ICs.
+ var text = "// random comment " + Math.random() + "\n" +
+ template(object, keys);
+ var func = new Function("object", "keys", text);
+ return () => func(object, keys);
+}
+
+function CombineTestFunctions(tests) {
+ return () => {
+ for (var i = 0; i < tests.length; i++ ) {
+ tests[i]();
+ }
+ };
+}
+
+var TestFunctions = [
+ {
+ name: "in",
+ // Query all keys.
+ keys: (object) => Object.keys(object),
+ template: (object, keys) => {
+ var lines = [
+ `var result = true;`,
+ `for (var i = 0; i < keys.length; i++) {`,
+ ` var key = keys[i];`,
+ ` result = (key in object) && result;`,
+ `}`,
+ `return result;`,
+ ];
+ return lines.join("\n");
+ },
+ },
+ {
+ name: "Object.hasOwnProperty",
+ // Query only own keys.
+ keys: (object) => Object.getOwnPropertyNames(object),
+ template: (object, keys) => {
+ var lines = [
+ `var result = true;`,
+ `for (var i = 0; i < keys.length; i++) {`,
+ ` var key = keys[i];`,
+ ` result = object.hasOwnProperty(key) && result;`,
+ `}`,
+ `return result;`,
+ ];
+ return lines.join("\n");
+ },
+ },
+];
+
+
+// ============================================================================
+// Create the benchmark suites. We create a suite for each pair of the test
+// functions above and query kind. Each suite contains benchmarks for each
+// object type.
+var Benchmarks = [];
+
+for (var test_function_desc of TestFunctions) {
+ var test_function_name = test_function_desc.name;
+
+ for (var query_kind of TestQueries) {
+ var benchmarks = [];
+ var suit_name = test_function_name + "--" + query_kind;
+ for (var test_data of TestData) {
+ var name = suit_name + "--" + test_data.name;
+
+ var tests = [];
+ for (var object of test_data.objects) {
+ var keys = test_function_desc.keys(object);
+ keys = MakeKeyQueries(keys, query_kind);
+
+ var test = CreateTestFunction(test_function_desc.template, object,
+ keys);
+ tests.push(test);
+ }
+ var run_function = CombineTestFunctions(tests);
+ var benchmark = new Benchmark(name, false, false, 0, run_function);
+ benchmarks.push(benchmark);
+ }
+ Benchmarks.push(new BenchmarkSuite(suit_name, [100], benchmarks));
+ }
+}
+
+// ============================================================================
diff --git a/deps/v8/test/js-perf-test/PropertyQueries/run.js b/deps/v8/test/js-perf-test/PropertyQueries/run.js
new file mode 100644
index 0000000000..d98c366276
--- /dev/null
+++ b/deps/v8/test/js-perf-test/PropertyQueries/run.js
@@ -0,0 +1,23 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('../base.js');
+load('property-queries.js');
+
+var success = true;
+
+function PrintResult(name, result) {
+ print(name + '-PropertyQueries(Score): ' + result);
+}
+
+function PrintError(name, error) {
+ PrintResult(name, error);
+ success = false;
+}
+
+BenchmarkSuite.config.doWarmup = undefined;
+BenchmarkSuite.config.doDeterministic = undefined;
+
+BenchmarkSuite.RunSuites({ NotifyResult: PrintResult,
+ NotifyError: PrintError });
diff --git a/deps/v8/test/memory/Memory.json b/deps/v8/test/memory/Memory.json
index 1cdc1487c4..c6b8cd50a2 100644
--- a/deps/v8/test/memory/Memory.json
+++ b/deps/v8/test/memory/Memory.json
@@ -12,7 +12,7 @@
},
{
"name": "ReservedMemoryContext",
- "results_regexp": "(\\d+) bytes per context$"
+ "results_regexp": "(\\d+) bytes per context #0$"
},
{
"name": "SnapshotSizeStartup",
@@ -20,7 +20,7 @@
},
{
"name": "SnapshotSizeContext",
- "results_regexp": "(\\d+) bytes for context$"
+ "results_regexp": "(\\d+) bytes for context #0$"
}
]
}
diff --git a/deps/v8/test/message/const-decl-no-init-sloppy.js b/deps/v8/test/message/const-decl-no-init-sloppy.js
deleted file mode 100644
index a122eae182..0000000000
--- a/deps/v8/test/message/const-decl-no-init-sloppy.js
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Flags: --harmony-sloppy
-
-function f() {
- const a;
-}
diff --git a/deps/v8/test/message/generators-throw1.js b/deps/v8/test/message/generators-throw1.js
new file mode 100644
index 0000000000..b4d404a1bf
--- /dev/null
+++ b/deps/v8/test/message/generators-throw1.js
@@ -0,0 +1,7 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function* f() { yield }
+
+f().throw(42);
diff --git a/deps/v8/test/message/generators-throw1.out b/deps/v8/test/message/generators-throw1.out
new file mode 100644
index 0000000000..1e78e8834f
--- /dev/null
+++ b/deps/v8/test/message/generators-throw1.out
@@ -0,0 +1,3 @@
+*%(basename)s:5: 42
+function* f() { yield }
+ ^
diff --git a/deps/v8/test/message/generators-throw2.js b/deps/v8/test/message/generators-throw2.js
new file mode 100644
index 0000000000..7207755427
--- /dev/null
+++ b/deps/v8/test/message/generators-throw2.js
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function* f() { yield }
+
+let g = f();
+g.next();
+g.throw(42);
diff --git a/deps/v8/test/message/generators-throw2.out b/deps/v8/test/message/generators-throw2.out
new file mode 100644
index 0000000000..3f23814f8b
--- /dev/null
+++ b/deps/v8/test/message/generators-throw2.out
@@ -0,0 +1,3 @@
+*%(basename)s:5: 42
+function* f() { yield }
+ ^
diff --git a/deps/v8/test/message/instanceof-noncallable.js b/deps/v8/test/message/instanceof-noncallable.js
index 571a2b0c76..d82b416e68 100644
--- a/deps/v8/test/message/instanceof-noncallable.js
+++ b/deps/v8/test/message/instanceof-noncallable.js
@@ -25,6 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-instanceof
+
1 instanceof {};
diff --git a/deps/v8/test/message/instanceof-nonobject.js b/deps/v8/test/message/instanceof-nonobject.js
index 47152571a1..ef8e0ae2e4 100644
--- a/deps/v8/test/message/instanceof-nonobject.js
+++ b/deps/v8/test/message/instanceof-nonobject.js
@@ -25,6 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-instanceof
+
1 instanceof 2;
diff --git a/deps/v8/test/message/let-lexical-name-in-array-prohibited.js b/deps/v8/test/message/let-lexical-name-in-array-prohibited.js
index a6cba6fc30..e5e37e11aa 100644
--- a/deps/v8/test/message/let-lexical-name-in-array-prohibited.js
+++ b/deps/v8/test/message/let-lexical-name-in-array-prohibited.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-sloppy --harmony-sloppy-let
+//
let [let];
diff --git a/deps/v8/test/message/let-lexical-name-in-object-prohibited.js b/deps/v8/test/message/let-lexical-name-in-object-prohibited.js
index 0a12762ec3..4e26c62ad6 100644
--- a/deps/v8/test/message/let-lexical-name-in-object-prohibited.js
+++ b/deps/v8/test/message/let-lexical-name-in-object-prohibited.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-sloppy --harmony-sloppy-let
+//
let {let};
diff --git a/deps/v8/test/message/let-lexical-name-prohibited.js b/deps/v8/test/message/let-lexical-name-prohibited.js
index ed72faed8e..b001be877c 100644
--- a/deps/v8/test/message/let-lexical-name-prohibited.js
+++ b/deps/v8/test/message/let-lexical-name-prohibited.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-sloppy --harmony-sloppy-let
+//
let let;
diff --git a/deps/v8/test/message/message.gyp b/deps/v8/test/message/message.gyp
index dac6d9ffff..fc1ae32f4e 100644
--- a/deps/v8/test/message/message.gyp
+++ b/deps/v8/test/message/message.gyp
@@ -13,8 +13,8 @@
'../../src/d8.gyp:d8_run',
],
'includes': [
- '../../build/features.gypi',
- '../../build/isolate.gypi',
+ '../../gypfiles/features.gypi',
+ '../../gypfiles/isolate.gypi',
],
'sources': [
'message.isolate',
diff --git a/deps/v8/test/message/message.status b/deps/v8/test/message/message.status
index 051911c6a9..e4db83db09 100644
--- a/deps/v8/test/message/message.status
+++ b/deps/v8/test/message/message.status
@@ -27,8 +27,6 @@
[
[ALWAYS, {
- # All tests in the bug directory are expected to fail.
- 'bugs/*': [FAIL],
# We don't parse RegExps at scanning time, so we can't fail on octal
# escapes (we need to parse to distinguish octal escapes from valid
# back-references).
diff --git a/deps/v8/test/message/no-legacy-const-2.js b/deps/v8/test/message/no-legacy-const-2.js
deleted file mode 100644
index 5dc63b3cb5..0000000000
--- a/deps/v8/test/message/no-legacy-const-2.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Flags: --no-harmony-sloppy --no-harmony-sloppy-let
-// Flags: --no-harmony-sloppy-function
-
-const = 42;
diff --git a/deps/v8/test/message/no-legacy-const-2.out b/deps/v8/test/message/no-legacy-const-2.out
deleted file mode 100644
index 5385250aaf..0000000000
--- a/deps/v8/test/message/no-legacy-const-2.out
+++ /dev/null
@@ -1,5 +0,0 @@
-*%(basename)s:8: SyntaxError: Unexpected token const
-const = 42;
-^^^^^
-
-SyntaxError: Unexpected token const
diff --git a/deps/v8/test/message/no-legacy-const-3.js b/deps/v8/test/message/no-legacy-const-3.js
deleted file mode 100644
index 43dd9c9d71..0000000000
--- a/deps/v8/test/message/no-legacy-const-3.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Flags: --no-harmony-sloppy --no-harmony-sloppy-let
-// Flags: --no-harmony-sloppy-function
-
-const
diff --git a/deps/v8/test/message/no-legacy-const-3.out b/deps/v8/test/message/no-legacy-const-3.out
deleted file mode 100644
index 7539bbcd1d..0000000000
--- a/deps/v8/test/message/no-legacy-const-3.out
+++ /dev/null
@@ -1,5 +0,0 @@
-*%(basename)s:8: SyntaxError: Unexpected token const
-const
-^^^^^
-
-SyntaxError: Unexpected token const
diff --git a/deps/v8/test/message/no-legacy-const.js b/deps/v8/test/message/no-legacy-const.js
deleted file mode 100644
index 9eebee59bf..0000000000
--- a/deps/v8/test/message/no-legacy-const.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Flags: --no-harmony-sloppy --no-harmony-sloppy-let
-// Flags: --no-harmony-sloppy-function
-
-const x = 42;
diff --git a/deps/v8/test/message/no-legacy-const.out b/deps/v8/test/message/no-legacy-const.out
deleted file mode 100644
index 33bb038836..0000000000
--- a/deps/v8/test/message/no-legacy-const.out
+++ /dev/null
@@ -1,5 +0,0 @@
-*%(basename)s:8: SyntaxError: Unexpected token const
-const x = 42;
-^^^^^
-
-SyntaxError: Unexpected token const
diff --git a/deps/v8/test/message/paren_in_arg_string.out b/deps/v8/test/message/paren_in_arg_string.out
index 57adf58bcd..9784712ab8 100644
--- a/deps/v8/test/message/paren_in_arg_string.out
+++ b/deps/v8/test/message/paren_in_arg_string.out
@@ -2,4 +2,5 @@
var paren_in_arg_string_bad = new Function(')', 'return;');
^
SyntaxError: Function arg string contains parenthesis
+ at new Function (<anonymous>)
at *%(basename)s:29:31
diff --git a/deps/v8/test/message/syntactic-tail-call-in-binop-lhs.js b/deps/v8/test/message/syntactic-tail-call-in-binop-lhs.js
new file mode 100644
index 0000000000..58d4c957ac
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-in-binop-lhs.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-explicit-tailcalls
+"use strict";
+
+function f() {
+ return 1;
+}
+
+function g() {
+ return continue f() - a ;
+}
diff --git a/deps/v8/test/message/syntactic-tail-call-in-binop-lhs.out b/deps/v8/test/message/syntactic-tail-call-in-binop-lhs.out
new file mode 100644
index 0000000000..14670cd280
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-in-binop-lhs.out
@@ -0,0 +1,4 @@
+*%(basename)s:13: SyntaxError: Tail call expression is not allowed here
+ return continue f() - a ;
+ ^^^^^^^^^^^^
+SyntaxError: Tail call expression is not allowed here
diff --git a/deps/v8/test/message/syntactic-tail-call-in-binop-rhs.js b/deps/v8/test/message/syntactic-tail-call-in-binop-rhs.js
new file mode 100644
index 0000000000..a586cc84ee
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-in-binop-rhs.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-explicit-tailcalls
+"use strict";
+
+function f() {
+ return 1;
+}
+
+function g() {
+ return b + continue f() ;
+}
diff --git a/deps/v8/test/message/syntactic-tail-call-in-binop-rhs.out b/deps/v8/test/message/syntactic-tail-call-in-binop-rhs.out
new file mode 100644
index 0000000000..207c526e2f
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-in-binop-rhs.out
@@ -0,0 +1,4 @@
+*%(basename)s:13: SyntaxError: Tail call expression is not allowed here
+ return b + continue f() ;
+ ^^^^^^^^^^^^
+SyntaxError: Tail call expression is not allowed here
diff --git a/deps/v8/test/message/syntactic-tail-call-in-comma.js b/deps/v8/test/message/syntactic-tail-call-in-comma.js
new file mode 100644
index 0000000000..402a4a8f7d
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-in-comma.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-explicit-tailcalls
+"use strict";
+
+function f() {
+ return 1;
+}
+
+function g() {
+ return 1, 2, 3, continue f() , 4 ;
+}
diff --git a/deps/v8/test/message/syntactic-tail-call-in-comma.out b/deps/v8/test/message/syntactic-tail-call-in-comma.out
new file mode 100644
index 0000000000..c4ecc28c97
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-in-comma.out
@@ -0,0 +1,4 @@
+*%(basename)s:13: SyntaxError: Tail call expression is not allowed here
+ return 1, 2, 3, continue f() , 4 ;
+ ^^^^^^^^^^^^
+SyntaxError: Tail call expression is not allowed here
diff --git a/deps/v8/test/message/syntactic-tail-call-in-extends.js b/deps/v8/test/message/syntactic-tail-call-in-extends.js
new file mode 100644
index 0000000000..86bf77ebbe
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-in-extends.js
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-explicit-tailcalls
+"use strict";
+
+function g() {
+ return class A extends continue f() {};
+}
diff --git a/deps/v8/test/message/syntactic-tail-call-in-extends.out b/deps/v8/test/message/syntactic-tail-call-in-extends.out
new file mode 100644
index 0000000000..f54155d2b5
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-in-extends.out
@@ -0,0 +1,4 @@
+*%(basename)s:9: SyntaxError: Tail call expression is not allowed here
+ return class A extends continue f() {};
+ ^^^^^^^^^^^^
+SyntaxError: Tail call expression is not allowed here
diff --git a/deps/v8/test/message/syntactic-tail-call-in-for-in.js b/deps/v8/test/message/syntactic-tail-call-in-for-in.js
new file mode 100644
index 0000000000..8ad7aca54a
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-in-for-in.js
@@ -0,0 +1,16 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-explicit-tailcalls
+"use strict";
+
+function f() {
+ return 1;
+}
+
+function g() {
+ for (var v in {a:0}) {
+ return continue f() ;
+ }
+}
diff --git a/deps/v8/test/message/syntactic-tail-call-in-for-in.out b/deps/v8/test/message/syntactic-tail-call-in-for-in.out
new file mode 100644
index 0000000000..1bf52c48cb
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-in-for-in.out
@@ -0,0 +1,4 @@
+*%(basename)s:14: SyntaxError: Tail call expression in for-in/of body
+ return continue f() ;
+ ^^^^^^^^^^^^
+SyntaxError: Tail call expression in for-in/of body
diff --git a/deps/v8/test/message/syntactic-tail-call-in-for-of.js b/deps/v8/test/message/syntactic-tail-call-in-for-of.js
new file mode 100644
index 0000000000..7cd761f38e
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-in-for-of.js
@@ -0,0 +1,16 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-explicit-tailcalls
+"use strict";
+
+function f() {
+ return 1;
+}
+
+function g() {
+ for (var v of [1, 2, 3]) {
+ return continue f() ;
+ }
+}
diff --git a/deps/v8/test/message/syntactic-tail-call-in-for-of.out b/deps/v8/test/message/syntactic-tail-call-in-for-of.out
new file mode 100644
index 0000000000..1bf52c48cb
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-in-for-of.out
@@ -0,0 +1,4 @@
+*%(basename)s:14: SyntaxError: Tail call expression in for-in/of body
+ return continue f() ;
+ ^^^^^^^^^^^^
+SyntaxError: Tail call expression in for-in/of body
diff --git a/deps/v8/test/message/syntactic-tail-call-in-logical-and.js b/deps/v8/test/message/syntactic-tail-call-in-logical-and.js
new file mode 100644
index 0000000000..2c62ddcb21
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-in-logical-and.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-explicit-tailcalls
+"use strict";
+
+function f() {
+ return 1;
+}
+
+function g() {
+ return continue f() && a ;
+}
diff --git a/deps/v8/test/message/syntactic-tail-call-in-logical-and.out b/deps/v8/test/message/syntactic-tail-call-in-logical-and.out
new file mode 100644
index 0000000000..c400f74628
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-in-logical-and.out
@@ -0,0 +1,4 @@
+*%(basename)s:13: SyntaxError: Tail call expression is not allowed here
+ return continue f() && a ;
+ ^^^^^^^^^^^^
+SyntaxError: Tail call expression is not allowed here
diff --git a/deps/v8/test/message/syntactic-tail-call-in-logical-or.js b/deps/v8/test/message/syntactic-tail-call-in-logical-or.js
new file mode 100644
index 0000000000..6829bc629a
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-in-logical-or.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-explicit-tailcalls
+"use strict";
+
+function f() {
+ return 1;
+}
+
+function g() {
+ return continue f() || a ;
+}
diff --git a/deps/v8/test/message/syntactic-tail-call-in-logical-or.out b/deps/v8/test/message/syntactic-tail-call-in-logical-or.out
new file mode 100644
index 0000000000..4ced76118a
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-in-logical-or.out
@@ -0,0 +1,4 @@
+*%(basename)s:13: SyntaxError: Tail call expression is not allowed here
+ return continue f() || a ;
+ ^^^^^^^^^^^^
+SyntaxError: Tail call expression is not allowed here
diff --git a/deps/v8/test/message/syntactic-tail-call-in-subclass.js b/deps/v8/test/message/syntactic-tail-call-in-subclass.js
new file mode 100644
index 0000000000..ab788406d2
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-in-subclass.js
@@ -0,0 +1,15 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-explicit-tailcalls
+"use strict";
+
+function g() {
+ class A {};
+ class B extends A {
+ constructor() {
+ return continue f() ;
+ }
+ }
+}
diff --git a/deps/v8/test/message/syntactic-tail-call-in-subclass.out b/deps/v8/test/message/syntactic-tail-call-in-subclass.out
new file mode 100644
index 0000000000..fff26cc59b
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-in-subclass.out
@@ -0,0 +1,4 @@
+*%(basename)s:12: SyntaxError: Tail call expression is not allowed here
+ return continue f() ;
+ ^^^^^^^^^^^^
+SyntaxError: Tail call expression is not allowed here
diff --git a/deps/v8/test/message/syntactic-tail-call-in-try-catch-finally.js b/deps/v8/test/message/syntactic-tail-call-in-try-catch-finally.js
new file mode 100644
index 0000000000..3aa35a12b4
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-in-try-catch-finally.js
@@ -0,0 +1,20 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-explicit-tailcalls
+"use strict";
+
+function f() {
+ return 1;
+}
+
+function g() {
+ try {
+ f();
+ } catch(e) {
+ return continue f() ;
+ } finally {
+ f();
+ }
+}
diff --git a/deps/v8/test/message/syntactic-tail-call-in-try-catch-finally.out b/deps/v8/test/message/syntactic-tail-call-in-try-catch-finally.out
new file mode 100644
index 0000000000..b488c15af2
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-in-try-catch-finally.out
@@ -0,0 +1,4 @@
+*%(basename)s:16: SyntaxError: Tail call expression in catch block when finally block is also present
+ return continue f() ;
+ ^^^^^^^^^^^^
+SyntaxError: Tail call expression in catch block when finally block is also present
diff --git a/deps/v8/test/message/syntactic-tail-call-in-try-try-catch-finally.js b/deps/v8/test/message/syntactic-tail-call-in-try-try-catch-finally.js
new file mode 100644
index 0000000000..5b000f1181
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-in-try-try-catch-finally.js
@@ -0,0 +1,22 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-explicit-tailcalls
+"use strict";
+
+function f() {
+ return 1;
+}
+
+function g() {
+ try {
+ try {
+ f();
+ } catch(e) {
+ return continue f() ;
+ }
+ } finally {
+ f();
+ }
+}
diff --git a/deps/v8/test/message/syntactic-tail-call-in-try-try-catch-finally.out b/deps/v8/test/message/syntactic-tail-call-in-try-try-catch-finally.out
new file mode 100644
index 0000000000..bfc2692a27
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-in-try-try-catch-finally.out
@@ -0,0 +1,4 @@
+*%(basename)s:17: SyntaxError: Tail call expression in try block
+ return continue f() ;
+ ^^^^^^^^^^^^
+SyntaxError: Tail call expression in try block
diff --git a/deps/v8/test/message/syntactic-tail-call-in-try.js b/deps/v8/test/message/syntactic-tail-call-in-try.js
new file mode 100644
index 0000000000..71662db877
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-in-try.js
@@ -0,0 +1,17 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-explicit-tailcalls
+"use strict";
+
+function f() {
+ return 1;
+}
+
+function g() {
+ try {
+ return continue f() ;
+ } catch(e) {
+ }
+}
diff --git a/deps/v8/test/message/syntactic-tail-call-in-try.out b/deps/v8/test/message/syntactic-tail-call-in-try.out
new file mode 100644
index 0000000000..ed0b15cc61
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-in-try.out
@@ -0,0 +1,4 @@
+*%(basename)s:14: SyntaxError: Tail call expression in try block
+ return continue f() ;
+ ^^^^^^^^^^^^
+SyntaxError: Tail call expression in try block
diff --git a/deps/v8/test/message/syntactic-tail-call-inside-member-expr.js b/deps/v8/test/message/syntactic-tail-call-inside-member-expr.js
new file mode 100644
index 0000000000..9b85dd42bb
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-inside-member-expr.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-explicit-tailcalls
+"use strict";
+
+function f() {
+ return 1;
+}
+
+function g() {
+ return (continue f(1)) (2) ;
+}
diff --git a/deps/v8/test/message/syntactic-tail-call-inside-member-expr.out b/deps/v8/test/message/syntactic-tail-call-inside-member-expr.out
new file mode 100644
index 0000000000..10fd54db5d
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-inside-member-expr.out
@@ -0,0 +1,4 @@
+*%(basename)s:13: SyntaxError: Tail call expression is not allowed here
+ return (continue f(1)) (2) ;
+ ^^^^^^^^^^^^^^
+SyntaxError: Tail call expression is not allowed here
diff --git a/deps/v8/test/message/syntactic-tail-call-of-eval.js b/deps/v8/test/message/syntactic-tail-call-of-eval.js
new file mode 100644
index 0000000000..e69aa9c351
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-of-eval.js
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-explicit-tailcalls
+
+function g() {
+ return continue eval ("f()") ;
+}
diff --git a/deps/v8/test/message/syntactic-tail-call-of-eval.out b/deps/v8/test/message/syntactic-tail-call-of-eval.out
new file mode 100644
index 0000000000..06eeb78baf
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-of-eval.out
@@ -0,0 +1,4 @@
+*%(basename)s:8: SyntaxError: Tail call of a direct eval is not allowed
+ return continue eval ("f()") ;
+ ^^^^^^^^^^^^^
+SyntaxError: Tail call of a direct eval is not allowed
diff --git a/deps/v8/test/message/syntactic-tail-call-of-identifier.js b/deps/v8/test/message/syntactic-tail-call-of-identifier.js
new file mode 100644
index 0000000000..b3ca31df01
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-of-identifier.js
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-explicit-tailcalls
+"use strict";
+
+function g(x) {
+ return continue x ;
+}
diff --git a/deps/v8/test/message/syntactic-tail-call-of-identifier.out b/deps/v8/test/message/syntactic-tail-call-of-identifier.out
new file mode 100644
index 0000000000..393bbc657c
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-of-identifier.out
@@ -0,0 +1,4 @@
+*%(basename)s:9: SyntaxError: Unexpected expression inside tail call
+ return continue x ;
+ ^
+SyntaxError: Unexpected expression inside tail call
diff --git a/deps/v8/test/message/syntactic-tail-call-of-new.js b/deps/v8/test/message/syntactic-tail-call-of-new.js
new file mode 100644
index 0000000000..60adec7027
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-of-new.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-explicit-tailcalls
+
+function f() {
+ return 1;
+}
+
+function g() {
+ return continue new f() ;
+}
diff --git a/deps/v8/test/message/syntactic-tail-call-of-new.out b/deps/v8/test/message/syntactic-tail-call-of-new.out
new file mode 100644
index 0000000000..954e1ca0ec
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-of-new.out
@@ -0,0 +1,4 @@
+*%(basename)s:12: SyntaxError: Unexpected expression inside tail call
+ return continue new f() ;
+ ^^^^^^^
+SyntaxError: Unexpected expression inside tail call
diff --git a/deps/v8/test/message/syntactic-tail-call-sloppy.js b/deps/v8/test/message/syntactic-tail-call-sloppy.js
new file mode 100644
index 0000000000..3973fc6d18
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-sloppy.js
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-explicit-tailcalls
+
+function g() {
+ return continue f() ;
+}
diff --git a/deps/v8/test/message/syntactic-tail-call-sloppy.out b/deps/v8/test/message/syntactic-tail-call-sloppy.out
new file mode 100644
index 0000000000..74d9d53bb4
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-sloppy.out
@@ -0,0 +1,4 @@
+*%(basename)s:8: SyntaxError: Tail call expressions are not allowed in non-strict mode
+ return continue f() ;
+ ^^^^^^^^^^^^^
+SyntaxError: Tail call expressions are not allowed in non-strict mode
diff --git a/deps/v8/test/message/syntactic-tail-call-without-return.js b/deps/v8/test/message/syntactic-tail-call-without-return.js
new file mode 100644
index 0000000000..130f67dafc
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-without-return.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-explicit-tailcalls
+"use strict";
+
+function f() {
+ return 1;
+}
+
+function g() {
+ var x = continue f() ;
+}
diff --git a/deps/v8/test/message/syntactic-tail-call-without-return.out b/deps/v8/test/message/syntactic-tail-call-without-return.out
new file mode 100644
index 0000000000..0508fc3378
--- /dev/null
+++ b/deps/v8/test/message/syntactic-tail-call-without-return.out
@@ -0,0 +1,4 @@
+*%(basename)s:13: SyntaxError: Tail call expression is not allowed here
+ var x = continue f() ;
+ ^^^^^^^^^^^^^
+SyntaxError: Tail call expression is not allowed here
diff --git a/deps/v8/test/message/testcfg.py b/deps/v8/test/message/testcfg.py
index 577b476637..620328e90d 100644
--- a/deps/v8/test/message/testcfg.py
+++ b/deps/v8/test/message/testcfg.py
@@ -86,13 +86,7 @@ class MessageTestSuite(testsuite.TestSuite):
if not string: return True
if not string.strip(): return True
return (string.startswith("==") or string.startswith("**") or
- string.startswith("ANDROID") or
- # These five patterns appear in normal Native Client output.
- string.startswith("DEBUG MODE ENABLED") or
- string.startswith("tools/nacl-run.py") or
- string.find("BYPASSING ALL ACL CHECKS") > 0 or
- string.find("Native Client module will be loaded") > 0 or
- string.find("NaClHostDescOpen:") > 0)
+ string.startswith("ANDROID"))
def IsFailureOutput(self, testcase):
output = testcase.output
diff --git a/deps/v8/test/mjsunit/arguments.js b/deps/v8/test/mjsunit/arguments.js
index 26eb38912a..97ec7cca6d 100644
--- a/deps/v8/test/mjsunit/arguments.js
+++ b/deps/v8/test/mjsunit/arguments.js
@@ -204,3 +204,70 @@ assertEquals(117, arg_set(0xFFFFFFFF));
}
assertTrue(%HasSloppyArgumentsElements(a));
})();
+
+(function testDeleteArguments() {
+ function f() { return arguments };
+ var args = f(1, 2);
+ assertEquals(1, args[0]);
+ assertEquals(2, args[1]);
+ assertEquals(2, args.length);
+
+ delete args[0];
+ assertEquals(undefined, args[0]);
+ assertEquals(2, args[1]);
+ assertEquals(2, args.length);
+
+ delete args[1];
+ assertEquals(undefined, args[0]);
+ assertEquals(undefined, args[1]);
+ assertEquals(2, args.length);
+})();
+
+(function testDeleteFastSloppyArguments() {
+ function f(a) { return arguments };
+ var args = f(1, 2);
+ assertEquals(1, args[0]);
+ assertEquals(2, args[1]);
+ assertEquals(2, args.length);
+
+ delete args[0];
+ assertEquals(undefined, args[0]);
+ assertEquals(2, args[1]);
+ assertEquals(2, args.length);
+
+ delete args[1];
+ assertEquals(undefined, args[0]);
+ assertEquals(undefined, args[1]);
+ assertEquals(2, args.length);
+})();
+
+(function testDeleteSlowSloppyArguments() {
+ var key = 10000;
+ function f(a) {
+ arguments[key] = key;
+ return arguments
+ };
+ var args = f(1, 2);
+ assertEquals(1, args[0]);
+ assertEquals(2, args[1]);
+ assertEquals(key, args[key]);
+ assertEquals(2, args.length);
+
+ delete args[0];
+ assertEquals(undefined, args[0]);
+ assertEquals(2, args[1]);
+ assertEquals(key, args[key]);
+ assertEquals(2, args.length);
+
+ delete args[1];
+ assertEquals(undefined, args[0]);
+ assertEquals(undefined, args[1]);
+ assertEquals(key, args[key]);
+ assertEquals(2, args.length);
+
+ delete args[key];
+ assertEquals(undefined, args[0]);
+ assertEquals(undefined, args[1]);
+ assertEquals(undefined, args[key]);
+ assertEquals(2, args.length);
+})();
diff --git a/deps/v8/test/mjsunit/array-constructor-feedback.js b/deps/v8/test/mjsunit/array-constructor-feedback.js
index 4c3a28f17a..865b8ba44f 100644
--- a/deps/v8/test/mjsunit/array-constructor-feedback.js
+++ b/deps/v8/test/mjsunit/array-constructor-feedback.js
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --expose-gc
+// Flags: --noalways-opt
// Test element kind of objects.
@@ -61,26 +62,6 @@ function assertKind(expected, obj, name_opt) {
assertEquals(expected, getKind(obj), name_opt);
}
-// Test: If a call site goes megamorphic, it retains the ability to
-// use allocation site feedback (if FLAG_allocation_site_pretenuring
-// is on).
-(function() {
- function bar(t, len) {
- return new t(len);
- }
-
- a = bar(Array, 10);
- a[0] = 3.5;
- b = bar(Array, 1);
- assertKind(elements_kind.fast_double, b);
- c = bar(Object, 3);
- b = bar(Array, 10);
- // TODO(mvstanton): re-enable when FLAG_allocation_site_pretenuring
- // is on in the build.
- // assertKind(elements_kind.fast_double, b);
-})();
-
-
// Test: ensure that crankshafted array constructor sites are deopted
// if another function is used.
(function() {
diff --git a/deps/v8/test/mjsunit/array-feedback.js b/deps/v8/test/mjsunit/array-feedback.js
index f0a859e67c..4eb922c2f7 100644
--- a/deps/v8/test/mjsunit/array-feedback.js
+++ b/deps/v8/test/mjsunit/array-feedback.js
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --expose-gc
+// Flags: --noalways-opt
var elements_kind = {
fast_smi_only : 'fast smi only elements',
diff --git a/deps/v8/test/mjsunit/array-indexing-receiver.js b/deps/v8/test/mjsunit/array-indexing-receiver.js
new file mode 100644
index 0000000000..d5f5a7692d
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-indexing-receiver.js
@@ -0,0 +1,632 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Ensure `Array.prototype.indexOf` functions correctly for numerous elements
+// kinds, and various exotic receiver types,
+
+var kIterCount = 1;
+var kTests = {
+ Array: {
+ FAST_ELEMENTS() {
+ var r = /foo/;
+ var s = new String("bar");
+ var p = new Proxy({}, {});
+ var o = {};
+
+ var array = [r, s, p];
+ assertTrue(%HasFastObjectElements(array));
+ assertFalse(%HasFastHoleyElements(array));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertEquals(array.indexOf(p), 2);
+ assertEquals(array.indexOf(o), -1);
+ }
+ },
+
+ FAST_HOLEY_ELEMENTS() {
+ var r = /foo/;
+ var p = new Proxy({}, {});
+ var o = {};
+
+ var array = [r, , p];
+ assertTrue(%HasFastObjectElements(array));
+ assertTrue(%HasFastHoleyElements(array));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertEquals(array.indexOf(p), 2);
+ assertEquals(array.indexOf(o), -1);
+ }
+ },
+
+ FAST_SMI_ELEMENTS() {
+ var array = [0, 88, 9999, 1, -5, 7];
+ assertTrue(%HasFastSmiElements(array));
+ assertFalse(%HasFastHoleyElements(array));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertEquals(array.indexOf(9999), 2);
+ assertEquals(array.indexOf(-5), 4);
+ assertEquals(array.indexOf(-5.00001), -1);
+ assertEquals(array.indexOf(undefined), -1);
+ assertEquals(array.indexOf(NaN), -1);
+ }
+ },
+
+ FAST_HOLEY_SMI_ELEMENTS() {
+ var array = [49, , , 72, , , 67, -48];
+ assertTrue(%HasFastSmiElements(array));
+ assertTrue(%HasFastHoleyElements(array));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertEquals(array.indexOf(72), 3);
+ assertEquals(array.indexOf(-48), 7);
+ assertEquals(array.indexOf(72, 4), -1);
+ assertEquals(array.indexOf(undefined), -1);
+ assertEquals(array.indexOf(undefined, -2), -1);
+ assertEquals(array.indexOf(NaN), -1);
+ }
+ },
+
+ FAST_DOUBLE_ELEMENTS() {
+ var array = [7.00000001, -13000.89412, 73451.4124,
+ 5824.48, 6.0000495, 48.3488, 44.0, 76.35, NaN, 78.4];
+ assertTrue(%HasFastDoubleElements(array));
+ assertFalse(%HasFastHoleyElements(array));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertEquals(array.indexOf(7.00000001), 0);
+ assertEquals(array.indexOf(7.00000001, 2), -1);
+ assertEquals(array.indexOf(NaN), -1);
+ assertEquals(array.indexOf(NaN, -1), -1);
+ assertEquals(array.indexOf(-13000.89412), 1);
+ assertEquals(array.indexOf(-13000.89412, -2), -1);
+ assertEquals(array.indexOf(undefined), -1);
+ }
+ },
+
+ FAST_HOLEY_DOUBLE_ELEMENTS() {
+ var array = [7.00000001, -13000.89412, ,
+ 5824.48, , 48.3488, , NaN, , 78.4];
+ assertTrue(%HasFastDoubleElements(array));
+ assertTrue(%HasFastHoleyElements(array));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertEquals(array.indexOf(7.00000001), 0);
+ assertEquals(array.indexOf(7.00000001, 2), -1);
+ assertEquals(array.indexOf(NaN), -1);
+ assertEquals(array.indexOf(NaN, -2), -1);
+ assertEquals(array.indexOf(-13000.89412), 1);
+ assertEquals(array.indexOf(-13000.89412, -2), -1);
+ assertEquals(array.indexOf(undefined, -2), -1);
+ assertEquals(array.indexOf(undefined, -1), -1);
+ }
+ },
+
+ DICTIONARY_ELEMENTS() {
+ var array = [];
+ Object.defineProperty(array, 4, { get() { return NaN; } });
+ Object.defineProperty(array, 7, { value: Function });
+
+ assertTrue(%HasDictionaryElements(array));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertEquals(array.indexOf(NaN), -1);
+ assertEquals(array.indexOf(NaN, -3), -1);
+ assertEquals(array.indexOf(Function), 7);
+ assertEquals(array.indexOf(undefined), -1);
+ assertEquals(array.indexOf(undefined, 7), -1);
+ }
+ },
+ },
+
+ Object: {
+ FAST_ELEMENTS() {
+ var r = /foo/;
+ var s = new String("bar");
+ var p = new Proxy({}, {});
+ var o = {};
+
+ var object = { 0: r, 1: s, 2: p, length: 3 };
+ assertTrue(%HasFastObjectElements(object));
+ // TODO(caitp): JSObjects always seem to start with FAST_HOLEY_ELEMENTS
+ // assertFalse(%HasFastHoleyElements(object));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertEquals(Array.prototype.indexOf.call(object, p), 2);
+ assertEquals(Array.prototype.indexOf.call(object, o), -1);
+ }
+ },
+
+ FAST_HOLEY_ELEMENTS() {
+ var r = /foo/;
+ var p = new Proxy({}, {});
+ var o = {};
+
+ var object = { 0: r, 2: p, length: 3 };
+ assertTrue(%HasFastObjectElements(object));
+ assertTrue(%HasFastHoleyElements(object));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertEquals(Array.prototype.indexOf.call(object, p), 2);
+ assertEquals(Array.prototype.indexOf.call(object, o), -1);
+ }
+ },
+
+ FAST_SMI_ELEMENTS() {
+ var object = { 0: 0, 1: 88, 2: 9999, 3: 1, 4: -5, 5: 7, length: 6 };
+ // TODO(caitp): JSObjects always seem to start with FAST_HOLEY_ELEMENTS
+ // assertTrue(%HasFastSmiElements(object));
+ // assertFalse(%HasFastHoleyElements(object));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertEquals(Array.prototype.indexOf.call(object, 9999), 2);
+ assertEquals(Array.prototype.indexOf.call(object, -5), 4);
+ assertEquals(Array.prototype.indexOf.call(object, -5.00001), -1);
+ assertEquals(Array.prototype.indexOf.call(object, undefined), -1);
+ assertEquals(Array.prototype.indexOf.call(object, NaN), -1);
+ }
+ },
+
+ FAST_HOLEY_SMI_ELEMENTS() {
+ var object = { 0: 49, 3: 72, 6: 67, 7: -48, length: 8 };
+ // TODO(caitp): JSObjects always seem to start with FAST_HOLEY_ELEMENTS
+ // assertTrue(%HasFastSmiElements(object));
+ // assertTrue(%HasFastHoleyElements(object));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertEquals(Array.prototype.indexOf.call(object, 72), 3);
+ assertEquals(Array.prototype.indexOf.call(object, -48), 7);
+ assertEquals(Array.prototype.indexOf.call(object, 72, 4), -1);
+ assertEquals(Array.prototype.indexOf.call(object, undefined), -1);
+ assertEquals(Array.prototype.indexOf.call(object, undefined, -2), -1);
+ assertEquals(Array.prototype.indexOf.call(object, NaN), -1);
+ }
+ },
+
+ FAST_DOUBLE_ELEMENTS() {
+ var object = { 0: 7.00000001, 1: -13000.89412, 2: 73451.4124,
+ 3: 5824.48, 4: 6.0000495, 5: 48.3488, 6: 44.0, 7: 76.35,
+ 8: NaN, 9: 78.4, length: 10 };
+ // TODO(caitp): JSObjects always seem to start with FAST_HOLEY_ELEMENTS
+ // assertTrue(%HasFastDoubleElements(object));
+ // assertFalse(%HasFastHoleyElements(object));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertEquals(Array.prototype.indexOf.call(object, 7.00000001), 0);
+ assertEquals(Array.prototype.indexOf.call(object, 7.00000001, 2), -1);
+ assertEquals(Array.prototype.indexOf.call(object, NaN), -1);
+ assertEquals(Array.prototype.indexOf.call(object, NaN, -1), -1);
+ assertEquals(Array.prototype.indexOf.call(object, -13000.89412), 1);
+ assertEquals(Array.prototype.indexOf.call(object, -13000.89412, -2), -1);
+ assertEquals(Array.prototype.indexOf.call(object, undefined), -1);
+ }
+ },
+
+ FAST_HOLEY_DOUBLE_ELEMENTS() {
+ var object = { 0: 7.00000001, 1: -13000.89412, 3: 5824.48, 5: 48.3488,
+ 7: NaN, 9: 78.4, length: 10 };
+ // TODO(caitp): JSObjects always seem to start with FAST_HOLEY_ELEMENTS
+ // assertTrue(%HasFastDoubleElements(object));
+ // assertTrue(%HasFastHoleyElements(object));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertEquals(Array.prototype.indexOf.call(object, 7.00000001), 0);
+ assertEquals(Array.prototype.indexOf.call(object, 7.00000001, 2), -1);
+ assertEquals(Array.prototype.indexOf.call(object, NaN), -1);
+ assertEquals(Array.prototype.indexOf.call(object, NaN, -2), -1);
+ assertEquals(Array.prototype.indexOf.call(object, -13000.89412), 1);
+ assertEquals(Array.prototype.indexOf.call(object, -13000.89412, -2), -1);
+ assertEquals(Array.prototype.indexOf.call(object, undefined, -2), -1);
+ assertEquals(Array.prototype.indexOf.call(object, undefined, -1), -1);
+ }
+ },
+
+ DICTIONARY_ELEMENTS() {
+ var object = { length: 8 };
+ Object.defineProperty(object, 4, { get() { return NaN; } });
+ Object.defineProperty(object, 7, { value: Function });
+
+ assertTrue(%HasDictionaryElements(object));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertEquals(Array.prototype.indexOf.call(object, NaN), -1);
+ assertEquals(Array.prototype.indexOf.call(object, NaN, -3), -1);
+ assertEquals(Array.prototype.indexOf.call(object, Function), 7);
+ assertEquals(Array.prototype.indexOf.call(object, undefined), -1);
+ assertEquals(Array.prototype.indexOf.call(object, undefined, 7), -1);
+ }
+
+ (function prototypeModifiedDuringAccessor() {
+ function O() {
+ return {
+ __proto__: {},
+ get 0() {
+ this.__proto__.__proto__ = {
+ get 1() {
+ this[2] = "c";
+ return "b";
+ }
+ };
+ return "a";
+ },
+ length: 3
+ };
+ }
+
+ // Switch to slow path when first accessor modifies the prototype
+ assertEquals(Array.prototype.indexOf.call(O(), "a"), 0);
+ assertEquals(Array.prototype.indexOf.call(O(), "b"), 1);
+ assertEquals(Array.prototype.indexOf.call(O(), "c"), 2);
+
+ // Avoid switching to slow path due to avoiding the accessor
+ assertEquals(Array.prototype.indexOf.call(O(), "c", 2), -1);
+ assertEquals(Array.prototype.indexOf.call(O(), "b", 1), -1);
+ assertEquals(Array.prototype.indexOf.call(O(), undefined, 1), 1);
+ });
+ },
+ },
+
+ String: {
+ FAST_STRING_ELEMENTS() {
+ for (var i = 0; i < kIterCount; ++i) {
+ assertEquals(Array.prototype.indexOf.call("froyo", "y"), 3);
+ assertEquals(Array.prototype.indexOf.call("froyo", "y", -1), -1);
+ assertEquals(Array.prototype.indexOf.call("froyo", "y", -2), 3);
+ assertEquals(Array.prototype.indexOf.call("froyo", NaN), -1);
+ assertEquals(Array.prototype.indexOf.call("froyo", undefined), -1);
+ }
+ },
+
+ SLOW_STRING_ELEMENTS() {
+ var string = new String("froyo");
+
+ // Never accessible from A.p.indexOf as 'length' is not configurable
+ Object.defineProperty(string, 34, { value: NaN });
+ Object.defineProperty(string, 12, { get() { return "nope" } });
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertEquals(Array.prototype.indexOf.call("froyo", "y"), 3);
+ assertEquals(Array.prototype.indexOf.call("froyo", "y", -1), -1);
+ assertEquals(Array.prototype.indexOf.call("froyo", "y", -2), 3);
+ assertEquals(Array.prototype.indexOf.call(string, NaN), -1);
+ assertEquals(Array.prototype.indexOf.call(string, undefined), -1);
+ assertEquals(Array.prototype.indexOf.call(string, "nope"), -1);
+ }
+ },
+ },
+
+ Arguments: {
+ FAST_SLOPPY_ARGUMENTS_ELEMENTS() {
+ var args = (function(a, b) { return arguments; })("foo", NaN, "bar");
+ assertTrue(%HasSloppyArgumentsElements(args));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertEquals(Array.prototype.indexOf.call(args, undefined), -1);
+ assertEquals(Array.prototype.indexOf.call(args, NaN), -1);
+ assertEquals(Array.prototype.indexOf.call(args, NaN, -1), -1);
+ assertEquals(Array.prototype.indexOf.call(args, "bar", -1), 2);
+ }
+ },
+
+ SLOW_SLOPPY_ARGUMENTS_ELEMENTS() {
+ var args = (function(a, a) { return arguments; })("foo", NaN, "bar");
+ Object.defineProperty(args, 3, { get() { return "silver"; } });
+ Object.defineProperty(args, "length", { value: 4 });
+ assertTrue(%HasSloppyArgumentsElements(args));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertEquals(Array.prototype.indexOf.call(args, undefined), -1);
+ assertEquals(Array.prototype.indexOf.call(args, NaN), -1);
+ assertEquals(Array.prototype.indexOf.call(args, NaN, -2), -1);
+ assertEquals(Array.prototype.indexOf.call(args, "bar", -2), 2);
+ assertEquals(Array.prototype.indexOf.call(args, "silver", -1), 3);
+ }
+ }
+ },
+
+ TypedArray: {
+ Int8Array() {
+ var array = new Int8Array([-129, 128,
+ NaN /* 0 */, +0 /* 0 */, -0 /* 0 */,
+ +Infinity /* 0 */, -Infinity /* 0 */,
+ 255 /* -1 */, 127 /* 127 */, -255 /* 1 */]);
+ assertEquals(Array.prototype.indexOf.call(array, -129), -1);
+ assertEquals(Array.prototype.indexOf.call(array, 128), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, 0, 2), 2);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 3), 3);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 4), 4);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 5), 5);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 6), 6);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 7), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, -1, 7), 7);
+ assertEquals(Array.prototype.indexOf.call(array, -1, 8), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, 127, 8), 8);
+ assertEquals(Array.prototype.indexOf.call(array, 127, 9), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, 1, 9), 9);
+ },
+
+ Detached_Int8Array() {
+ var array = new Int8Array(10);
+ %ArrayBufferNeuter(array.buffer);
+ assertEquals(Array.prototype.indexOf.call(array, 0), -1);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 10), -1);
+ },
+
+ Uint8Array() {
+ var array = new Uint8Array([-1, 256,
+ NaN /* 0 */, +0 /* 0 */, -0 /* 0 */,
+ +Infinity /* 0 */, -Infinity /* 0 */,
+ 255 /* 255 */, 257 /* 1 */, -128 /* 128 */,
+ -2 /* 254 */]);
+ assertEquals(Array.prototype.indexOf.call(array, -1), -1);
+ assertEquals(Array.prototype.indexOf.call(array, 256), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, 0, 2), 2);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 3), 3);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 4), 4);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 5), 5);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 6), 6);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 7), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, 255, 7), 7);
+ assertEquals(Array.prototype.indexOf.call(array, 255, 8), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, 1, 8), 8);
+ assertEquals(Array.prototype.indexOf.call(array, 1, 9), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, 128, 9), 9);
+ assertEquals(Array.prototype.indexOf.call(array, 128, 10), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, 254, 10), 10);
+ },
+
+ Detached_Uint8Array() {
+ var array = new Uint8Array(10);
+ %ArrayBufferNeuter(array.buffer);
+ assertEquals(Array.prototype.indexOf.call(array, 0), -1);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 10), -1);
+ },
+
+ Uint8ClampedArray() {
+ var array = new Uint8ClampedArray([-1 /* 0 */, NaN /* 0 */, 256 /* 255 */,
+ 127.6 /* 128 */, 127.4 /* 127 */,
+ 121.5 /* 122 */, 124.5 /* 124 */]);
+ assertEquals(Array.prototype.indexOf.call(array, -1), -1);
+ assertEquals(Array.prototype.indexOf.call(array, 256), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, 0), 0);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 1), 1);
+ assertEquals(Array.prototype.indexOf.call(array, 255, 2), 2);
+
+ assertEquals(Array.prototype.indexOf.call(array, 128, 3), 3);
+ assertEquals(Array.prototype.indexOf.call(array, 128, 4), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, 127, 4), 4);
+ assertEquals(Array.prototype.indexOf.call(array, 127, 5), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, 122, 5), 5);
+ assertEquals(Array.prototype.indexOf.call(array, 122, 6), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, 124, 6), 6);
+ },
+
+ Detached_Uint8ClampedArray() {
+ var array = new Uint8ClampedArray(10);
+ %ArrayBufferNeuter(array.buffer);
+ assertEquals(Array.prototype.indexOf.call(array, 0), -1);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 10), -1);
+ },
+
+ Int16Array() {
+ var array = new Int16Array([-32769, 32768,
+ NaN /* 0 */, Infinity /* 0 */,
+ -Infinity /* 0 */, -0 /* 0 */, +0 /* 0 */,
+ 0x7FFFF /* -1 */, 30000 /* 30000 */,
+ 300000 /* -27680 */]);
+ assertEquals(Array.prototype.indexOf.call(array, -32769), -1);
+ assertEquals(Array.prototype.indexOf.call(array, 32768), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, 0, 2), 2);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 3), 3);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 4), 4);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 5), 5);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 6), 6);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 7), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, -1, 7), 7);
+ assertEquals(Array.prototype.indexOf.call(array, -1, 8), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, 30000, 8), 8);
+ assertEquals(Array.prototype.indexOf.call(array, 30000, 9), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, -27680, 9), 9);
+ },
+
+ Detached_Int16Array() {
+ var array = new Int16Array(10);
+ %ArrayBufferNeuter(array.buffer);
+ assertEquals(Array.prototype.indexOf.call(array, 0), -1);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 10), -1);
+ },
+
+ Uint16Array() {
+ var array = new Uint16Array([-1, 65536,
+ NaN /* 0 */, Infinity /* 0 */,
+ -Infinity /* 0 */, -0 /* 0 */, +0 /* 0 */,
+ 0x7FFFF /* 65535 */, 300000 /* 37856 */,
+ 3000000 /* 50880 */]);
+ assertEquals(Array.prototype.indexOf.call(array, -1), -1);
+ assertEquals(Array.prototype.indexOf.call(array, 65536), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, 0, 2), 2);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 3), 3);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 4), 4);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 5), 5);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 6), 6);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 7), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, 65535, 7), 7);
+ assertEquals(Array.prototype.indexOf.call(array, 65535, 8), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, 37856, 8), 8);
+ assertEquals(Array.prototype.indexOf.call(array, 37856, 9), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, 50880, 9), 9);
+ },
+
+ Detached_Uint16Array() {
+ var array = new Uint16Array(10);
+ %ArrayBufferNeuter(array.buffer);
+ assertEquals(Array.prototype.indexOf.call(array, 0), -1);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 10), -1);
+ },
+
+ Int32Array() {
+ var array = new Int32Array([-2147483649, 2147483648,
+ NaN /* 0 */, Infinity /* 0 */,
+ -Infinity /* 0 */, -0 /* 0 */, +0 /* 0 */,
+ 0x7FFFFFFFF /* -1 */, 4294968064 /* 768 */,
+ 4294959447 /* -7849 */]);
+ assertEquals(Array.prototype.indexOf.call(array, -2147483649), -1);
+ assertEquals(Array.prototype.indexOf.call(array, 2147483648), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, 0.0, 2), 2);
+ assertEquals(Array.prototype.indexOf.call(array, 0.0, 3), 3);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 4), 4);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 5), 5);
+ assertEquals(Array.prototype.indexOf.call(array, 0.0, 6), 6);
+ assertEquals(Array.prototype.indexOf.call(array, 0.0, 7), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, -1, 7), 7);
+ assertEquals(Array.prototype.indexOf.call(array, -1, 8), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, 768, 8), 8);
+ assertEquals(Array.prototype.indexOf.call(array, 768, 9), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, -7849, 9), 9);
+ },
+
+ Detached_Int32Array() {
+ var array = new Int32Array(10);
+ %ArrayBufferNeuter(array.buffer);
+ assertEquals(Array.prototype.indexOf.call(array, 0), -1);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 10), -1);
+ },
+
+ Uint32Array() {
+ var array = new Uint32Array([-1, 4294967296,
+ NaN /* 0 */, Infinity /* 0 */,
+ -Infinity /* 0 */, -0 /* 0 */, +0 /* 0 */,
+ 0x7FFFFFFFF /* 4294967295 */,
+ 4294968064 /* 768 */,
+ 4295079447 /* 112151 */]);
+ assertEquals(Array.prototype.indexOf.call(array, -1), -1);
+ assertEquals(Array.prototype.indexOf.call(array, 4294967296), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, 0.0, 2), 2);
+ assertEquals(Array.prototype.indexOf.call(array, 0.0, 3), 3);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 4), 4);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 5), 5);
+ assertEquals(Array.prototype.indexOf.call(array, 0.0, 6), 6);
+ assertEquals(Array.prototype.indexOf.call(array, 0.0, 7), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, 4294967295, 7), 7);
+ assertEquals(Array.prototype.indexOf.call(array, 4294967295, 8), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, 768, 8), 8);
+ assertEquals(Array.prototype.indexOf.call(array, 768, 9), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, 112151, 9), 9);
+ },
+
+ Detached_Uint32Array() {
+ var array = new Uint32Array(10);
+ %ArrayBufferNeuter(array.buffer);
+ assertEquals(Array.prototype.indexOf.call(array, 0), -1);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 10), -1);
+ },
+
+ Float32Array() {
+ var array = new Float32Array([-1, 4294967296,
+ NaN, Infinity /* 0 */,
+ -Infinity /* 0 */, -0 /* 0 */, +0 /* 0 */,
+ 0x7FFFFFFFF /* 34359738368.0 */,
+ -4294968064 /* -4294968320.0 */,
+ 4295079447 /* 4295079424.0 */]);
+ assertEquals(Array.prototype.indexOf.call(array, -1.0), 0);
+ assertEquals(Array.prototype.indexOf.call(array, 4294967296), 1);
+
+ assertEquals(Array.prototype.indexOf.call(array, NaN, 2), -1);
+ assertEquals(Array.prototype.indexOf.call(array, Infinity, 3), 3);
+ assertEquals(Array.prototype.indexOf.call(array, -Infinity, 4), 4);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 5), 5);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 6), 6);
+ assertEquals(Array.prototype.indexOf.call(array, 0.0, 7), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, 34359738368.0, 7), 7);
+ assertEquals(Array.prototype.indexOf.call(array, 34359738368.0, 8), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, -4294968320.0, 8), 8);
+ assertEquals(Array.prototype.indexOf.call(array, -4294968320.0, 9), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, 4295079424.0, 9), 9);
+ },
+
+ Detached_Float32Array() {
+ var array = new Float32Array(10);
+ %ArrayBufferNeuter(array.buffer);
+ assertEquals(Array.prototype.indexOf.call(array, 0), -1);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 10), -1);
+ },
+
+ Float64Array() {
+ var array = new Float64Array([-1, 4294967296,
+ NaN, Infinity /* 0 */,
+ -Infinity /* 0 */, -0 /* 0 */, +0 /* 0 */,
+ 0x7FFFFFFFF /* 34359738367.0 */,
+ -4294968064 /* -4294968064.0 */,
+ 4295079447 /* 4295079447.0 */]);
+ assertEquals(Array.prototype.indexOf.call(array, -1.0), 0);
+ assertEquals(Array.prototype.indexOf.call(array, 4294967296), 1);
+
+ assertEquals(Array.prototype.indexOf.call(array, NaN, 2), -1);
+ assertEquals(Array.prototype.indexOf.call(array, Infinity, 3), 3);
+ assertEquals(Array.prototype.indexOf.call(array, -Infinity, 4), 4);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 5), 5);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 6), 6);
+ assertEquals(Array.prototype.indexOf.call(array, 0.0, 7), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, 34359738367.0, 7), 7);
+ assertEquals(Array.prototype.indexOf.call(array, 34359738367.0, 8), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, -4294968064.0, 8), 8);
+ assertEquals(Array.prototype.indexOf.call(array, -4294968064.0, 9), -1);
+
+ assertEquals(Array.prototype.indexOf.call(array, 4295079447.0, 9), 9);
+ },
+
+ Detached_Float64Array() {
+ var array = new Float32Array(10);
+ %ArrayBufferNeuter(array.buffer);
+ assertEquals(Array.prototype.indexOf.call(array, 0), -1);
+ assertEquals(Array.prototype.indexOf.call(array, 0, 10), -1);
+ },
+ }
+};
+
+function runSuites(suites) {
+ Object.keys(suites).forEach(suite => runSuite(suites[suite]));
+
+ function runSuite(suite) {
+ Object.keys(suite).forEach(test => suite[test]());
+ }
+}
+
+runSuites(kTests);
diff --git a/deps/v8/test/mjsunit/array-literal-transitions.js b/deps/v8/test/mjsunit/array-literal-transitions.js
index e1624553f4..cd9d96bcf4 100644
--- a/deps/v8/test/mjsunit/array-literal-transitions.js
+++ b/deps/v8/test/mjsunit/array-literal-transitions.js
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --expose-gc
+// Flags: --ignition-osr --turbo-from-bytecode
// IC and Crankshaft support for smi-only elements in dynamic array literals.
function get(foo) { return foo; } // Used to generate dynamic values.
diff --git a/deps/v8/test/mjsunit/array-push.js b/deps/v8/test/mjsunit/array-push.js
index c87fd128e4..ece3319bc1 100644
--- a/deps/v8/test/mjsunit/array-push.js
+++ b/deps/v8/test/mjsunit/array-push.js
@@ -146,3 +146,16 @@
f(a, {});
assertEquals(10, a.f());
})();
+
+
+(function testDoubleArrayPush() {
+ var a = [];
+ var max = 1000;
+ for (var i = 0; i < max; i++) {
+ a.push(i + 0.1);
+ }
+ assertEquals(max, a.length);
+ for (var i = 0; i < max; i++) {
+ assertEquals(i+0.1, a[i]);
+ }
+})();
diff --git a/deps/v8/test/mjsunit/array-push7.js b/deps/v8/test/mjsunit/array-push7.js
deleted file mode 100644
index 68c3a2a76e..0000000000
--- a/deps/v8/test/mjsunit/array-push7.js
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-object-observe
-// Flags: --allow-natives-syntax
-
-var v = 0;
-
-function push_wrapper(array, value) {
- array.push(value);
-}
-function pop_wrapper(array) {
- return array.pop();
-}
-
-// Test that Object.observe() notification events are properly sent from
-// Array.push() and Array.pop() both from optimized and un-optimized code.
-var array = [];
-
-function somethingChanged(changes) {
- v++;
-}
-
-Object.observe(array, somethingChanged);
-push_wrapper(array, 1);
-%RunMicrotasks();
-assertEquals(1, array.length);
-assertEquals(1, v);
-push_wrapper(array, 1);
-%RunMicrotasks();
-assertEquals(2, array.length);
-assertEquals(2, v);
-%OptimizeFunctionOnNextCall(push_wrapper);
-push_wrapper(array, 1);
-%RunMicrotasks();
-assertEquals(3, array.length);
-assertEquals(3, v);
-push_wrapper(array, 1);
-%RunMicrotasks();
-assertEquals(4, array.length);
-assertEquals(4, v);
-
-pop_wrapper(array);
-%RunMicrotasks();
-assertEquals(3, array.length);
-assertEquals(5, v);
-pop_wrapper(array);
-%RunMicrotasks();
-assertEquals(2, array.length);
-assertEquals(6, v);
-%OptimizeFunctionOnNextCall(pop_wrapper);
-pop_wrapper(array);
-%RunMicrotasks();
-assertEquals(1, array.length);
-assertEquals(7, v);
-pop_wrapper(array);
-%RunMicrotasks();
-assertEquals(0, array.length);
-assertEquals(8, v);
diff --git a/deps/v8/test/mjsunit/array-slice.js b/deps/v8/test/mjsunit/array-slice.js
index ae0e3bc1ef..b017dd506a 100644
--- a/deps/v8/test/mjsunit/array-slice.js
+++ b/deps/v8/test/mjsunit/array-slice.js
@@ -228,6 +228,7 @@
func([]);
func(['a'], 'a');
func(['a', 1], 'a', 1);
+ func(['a', 1, 2, 3, 4, 5], 'a', 1, 2, 3, 4, 5);
func(['a', 1, undefined], 'a', 1, undefined);
func(['a', 1, undefined, void(0)], 'a', 1, undefined, void(0));
})();
diff --git a/deps/v8/test/mjsunit/array-sort.js b/deps/v8/test/mjsunit/array-sort.js
index ae9f6efa66..fdd2333d7c 100644
--- a/deps/v8/test/mjsunit/array-sort.js
+++ b/deps/v8/test/mjsunit/array-sort.js
@@ -479,3 +479,68 @@ function TestSortOnProxy() {
}
}
TestSortOnProxy();
+
+
+// Test special prototypes
+(function testSortSpecialPrototypes() {
+ function test(proto, length, expected) {
+ var result = {
+ length: length,
+ __proto__: proto,
+ };
+ Array.prototype.sort.call(result);
+ assertEquals(expected.length, result.length, "result.length");
+ for (var i = 0; i<expected.length; i++) {
+ assertEquals(expected[i], result[i], "result["+i+"]");
+ }
+ }
+
+ (function fast() {
+ // Fast elements, non-empty
+ test(arguments, 0, []);
+ test(arguments, 1, [2]);
+ test(arguments, 2, [1, 2]);
+ test(arguments, 4, [1, 2, 3, 4]);
+ delete arguments[0]
+ // sort copies down the properties to the receiver, hence result[1]
+ // is read on the arguments through the hole on the receiver.
+ test(arguments, 2, [1, 1]);
+ arguments[0] = undefined;
+ test(arguments, 2, [1, undefined]);
+ })(2, 1, 4, 3);
+
+ (function fastSloppy(a) {
+ // Fast sloppy
+ test(arguments, 0, []);
+ test(arguments, 1, [2]);
+ test(arguments, 2, [1, 2]);
+ delete arguments[0]
+ test(arguments, 2, [1, 1]);
+ arguments[0] = undefined;
+ test(arguments, 2, [1, undefined]);
+ })(2, 1);
+
+ (function fastEmpty() {
+ test(arguments, 0, []);
+ test(arguments, 1, [undefined]);
+ test(arguments, 2, [undefined, undefined]);
+ })();
+
+ (function stringWrapper() {
+ // cannot redefine string wrapper properties
+ assertThrows(() => test(new String('cba'), 3, []), TypeError);
+ })();
+
+ (function typedArrys() {
+ test(new Int32Array(0), 0, []);
+ test(new Int32Array(1), 1, [0]);
+ var array = new Int32Array(3);
+ array[0] = 2;
+ array[1] = 1;
+ array[2] = 3;
+ test(array, 1, [2]);
+ test(array, 2, [1, 2]);
+ test(array, 3, [1, 2, 3]);
+ })()
+
+})();
diff --git a/deps/v8/test/mjsunit/array-splice.js b/deps/v8/test/mjsunit/array-splice.js
index 744e95454b..75ff2d174b 100644
--- a/deps/v8/test/mjsunit/array-splice.js
+++ b/deps/v8/test/mjsunit/array-splice.js
@@ -300,6 +300,55 @@
}
})();
+// Check the behaviour when approaching maximal values for length.
+(function() {
+ for (var i = 0; i < 7; i++) {
+ try {
+ new Array(Math.pow(2, 32) - 3).splice(-1, 0, 1, 2, 3, 4, 5);
+ throw 'Should have thrown RangeError';
+ } catch (e) {
+ assertTrue(e instanceof RangeError);
+ }
+
+ // Check smi boundary
+ var bigNum = (1 << 30) - 3;
+ var array = new Array(bigNum);
+ array.splice(-1, 0, 1, 2, 3, 4, 5, 6, 7);
+ assertEquals(bigNum + 7, array.length);
+ }
+})();
+
+(function() {
+ for (var i = 0; i < 7; i++) {
+ var a = [7, 8, 9];
+ a.splice(0, 0, 1, 2, 3, 4, 5, 6);
+ assertEquals([1, 2, 3, 4, 5, 6, 7, 8, 9], a);
+ assertFalse(a.hasOwnProperty(10), "a.hasOwnProperty(10)");
+ assertEquals(undefined, a[10]);
+ }
+})();
+
+(function testSpliceDeleteDouble() {
+ var a = [1.1, 1.2, 1.3, 1.4];
+ a.splice(2, 1)
+ assertEquals([1.1, 1.2, 1.4], a);
+})();
+
+// Past this point the ArrayProtector is invalidated since we modify the
+// Array.prototype.
+
+// Check the case of JS builtin .splice()
+(function() {
+ for (var i = 0; i < 7; i++) {
+ var array = [1, 2, 3, 4];
+ Array.prototype[3] = 'foo'; // To force JS builtin.
+
+ var spliced = array.splice();
+
+ assertEquals([], spliced);
+ assertEquals([1, 2, 3, 4], array);
+ }
+})();
// Now check the case with array of holes and some elements on prototype.
(function() {
@@ -350,7 +399,6 @@
}
})();
-
// Now check the case with array of holes and some elements on prototype.
(function() {
var len = 9;
@@ -397,46 +445,3 @@
"array.hasOwnProperty(Math.pow(2, 32) - 2)");
}
})();
-
-
-// Check the case of JS builtin .splice()
-(function() {
- for (var i = 0; i < 7; i++) {
- var array = [1, 2, 3, 4];
- Array.prototype[3] = 'foo'; // To force JS builtin.
-
- var spliced = array.splice();
-
- assertEquals([], spliced);
- assertEquals([1, 2, 3, 4], array);
- }
-})();
-
-
-// Check the behaviour when approaching maximal values for length.
-(function() {
- for (var i = 0; i < 7; i++) {
- try {
- new Array(Math.pow(2, 32) - 3).splice(-1, 0, 1, 2, 3, 4, 5);
- throw 'Should have thrown RangeError';
- } catch (e) {
- assertTrue(e instanceof RangeError);
- }
-
- // Check smi boundary
- var bigNum = (1 << 30) - 3;
- var array = new Array(bigNum);
- array.splice(-1, 0, 1, 2, 3, 4, 5, 6, 7);
- assertEquals(bigNum + 7, array.length);
- }
-})();
-
-(function() {
- for (var i = 0; i < 7; i++) {
- var a = [7, 8, 9];
- a.splice(0, 0, 1, 2, 3, 4, 5, 6);
- assertEquals([1, 2, 3, 4, 5, 6, 7, 8, 9], a);
- assertFalse(a.hasOwnProperty(10), "a.hasOwnProperty(10)");
- assertEquals(undefined, a[10]);
- }
-})();
diff --git a/deps/v8/test/mjsunit/array-tostring.js b/deps/v8/test/mjsunit/array-tostring.js
index 5be3d5045c..382bf8d7a0 100644
--- a/deps/v8/test/mjsunit/array-tostring.js
+++ b/deps/v8/test/mjsunit/array-tostring.js
@@ -125,7 +125,9 @@ var la1 = [1, [2, 3], 4];
assertEquals("1,2,3,4", la1.toLocaleString());
// Used on a string (which looks like an array of characters).
-String.prototype.toLocaleString = Array.prototype.toLocaleString;
+String.prototype.toLocaleString = function() {
+ return (this.length == 1) ? this : Array.prototype.toLocaleString.call(this);
+}
assertEquals("1,2,3,4", "1234".toLocaleString());
// If toLocaleString of element is not callable, throw a TypeError.
@@ -157,3 +159,23 @@ for (var i = 0; i < 3; i++) {
}
Number.prototype.arrayToLocaleString = Array.prototype.toLocaleString;
assertEquals("42,42,42", (42).arrayToLocaleString());
+
+
+(function TestToLocaleStringCalls() {
+ let log = [];
+ let pushArgs = (label) => (...args) => log.push(label, args);
+
+ let NumberToLocaleString = Number.prototype.toLocaleString;
+ let StringToLocaleString = String.prototype.toLocaleString;
+ let ObjectToLocaleString = Object.prototype.toLocaleString;
+ Number.prototype.toLocaleString = pushArgs("Number");
+ String.prototype.toLocaleString = pushArgs("String");
+ Object.prototype.toLocaleString = pushArgs("Object");
+
+ [42, "foo", {}].toLocaleString();
+ assertEquals(["Number", [], "String", [], "Object", []], log);
+
+ Number.prototype.toLocaleString = NumberToLocaleString;
+ String.prototype.toLocaleString = StringToLocaleString;
+ Object.prototype.toLocaleString = ObjectToLocaleString;
+})();
diff --git a/deps/v8/test/mjsunit/asm-directive.js b/deps/v8/test/mjsunit/asm-directive.js
new file mode 100644
index 0000000000..a308f43f6f
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm-directive.js
@@ -0,0 +1,11 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Realm.eval(Realm.current(), '"use asm"');
+function f() { "use asm" }
+() => "use asm"
+if (true) "use asm"
+with ({}) "use asm"
+try { } catch (e) { "use asm" }
+Realm.eval(Realm.current(), 'eval(\'"use asm"\')');
diff --git a/deps/v8/test/mjsunit/asm/asm-validation.js b/deps/v8/test/mjsunit/asm/asm-validation.js
new file mode 100644
index 0000000000..eae282ca57
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/asm-validation.js
@@ -0,0 +1,215 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --validate-asm --allow-natives-syntax
+
+function assertValidAsm(func) {
+ assertTrue(%IsAsmWasmCode(func));
+}
+
+(function TestModuleArgs() {
+ function Module1(stdlib) {
+ "use asm";
+ function foo() { }
+ return { foo: foo };
+ }
+ function Module2(stdlib, ffi) {
+ "use asm";
+ function foo() { }
+ return { foo: foo };
+ }
+ function Module3(stdlib, ffi, heap) {
+ "use asm";
+ function foo() { }
+ return { foo: foo };
+ }
+ var modules = [Module1, Module2, Module3];
+ var heap = new ArrayBuffer(1024 * 1024);
+ for (var i = 0; i < modules.length; ++i) {
+ print('Module' + (i + 1));
+ var module = modules[i];
+ var m = module();
+ assertValidAsm(module);
+ var m = module({});
+ assertValidAsm(module);
+ var m = module({}, {});
+ assertValidAsm(module);
+ var m = module({}, {}, heap);
+ assertValidAsm(module);
+ var m = module({}, {}, heap, {});
+ assertValidAsm(module);
+ }
+})();
+
+(function TestBadModule() {
+ function Module(stdlib, ffi, heap) {
+ "use asm";
+ function foo() { var y = 3; var x = 1 + y; return 123; }
+ return { foo: foo };
+ }
+ var m = Module({});
+ assertTrue(%IsNotAsmWasmCode(Module));
+ assertEquals(123, m.foo());
+})();
+
+(function TestBadArgTypes() {
+ function Module(a, b, c) {
+ "use asm";
+ var NaN = a.NaN;
+ return {};
+ }
+ var m = Module(1, 2, 3);
+ assertTrue(%IsNotAsmWasmCode(Module));
+ assertEquals({}, m);
+})();
+
+(function TestBadArgTypesMismatch() {
+ function Module(a, b, c) {
+ "use asm";
+ var NaN = a.NaN;
+ return {};
+ }
+ var m = Module(1, 2);
+ assertTrue(%IsNotAsmWasmCode(Module));
+ assertEquals({}, m);
+})();
+
+(function TestModuleNoStdlib() {
+ function Module() {
+ "use asm";
+ function foo() { return 123; }
+ return { foo: foo };
+ }
+ var m = Module({});
+ assertValidAsm(Module);
+ assertEquals(123, m.foo());
+})();
+
+(function TestModuleWith5() {
+ function Module(a, b, c, d, e) {
+ "use asm";
+ function foo() { return 123; }
+ return { foo: foo };
+ }
+ var heap = new ArrayBuffer(1024 * 1024);
+ var m = Module({}, {}, heap);
+ assertTrue(%IsNotAsmWasmCode(Module));
+ assertEquals(123, m.foo());
+})();
+
+(function TestModuleNoStdlibCall() {
+ function Module(stdlib, ffi, heap) {
+ "use asm";
+ function foo() { return 123; }
+ return { foo: foo };
+ }
+ var m = Module();
+ assertValidAsm(Module);
+ assertEquals(123, m.foo());
+})();
+
+(function TestModuleNew() {
+ function Module(stdlib, ffi, heap) {
+ "use asm";
+ function foo() { return 123; }
+ return { foo: foo };
+ }
+ var m = new Module({}, {});
+ assertValidAsm(Module);
+ assertEquals(123, m.foo());
+})();
+
+(function TestMultipleFailures() {
+ function Module(stdlib) {
+ "use asm";
+ var NaN = stdlib.NaN;
+ function foo() { return 123; }
+ return { foo: foo };
+ }
+ var m1 = Module(1, 2, 3);
+ assertTrue(%IsNotAsmWasmCode(Module));
+ var m2 = Module(1, 2, 3);
+ assertTrue(%IsNotAsmWasmCode(Module));
+ assertEquals(123, m1.foo());
+ assertEquals(123, m2.foo());
+})();
+
+(function TestFailureThenSuccess() {
+ function MkModule() {
+ function Module(stdlib, ffi, heap) {
+ "use asm";
+ var NaN = stdlib.NaN;
+ function foo() { return 123; }
+ return { foo: foo };
+ }
+ return Module;
+ }
+ var Module1 = MkModule();
+ var Module2 = MkModule();
+ var heap = new ArrayBuffer(1024 * 1024);
+ var m1 = Module1(1, 2, 3);
+ assertTrue(%IsNotAsmWasmCode(Module1));
+ var m2 = Module2({}, {}, heap);
+ assertTrue(%IsNotAsmWasmCode(Module2));
+ assertEquals(123, m1.foo());
+ assertEquals(123, m2.foo());
+})();
+
+(function TestSuccessThenFailure() {
+ function MkModule() {
+ function Module(stdlib, ffi, heap) {
+ "use asm";
+ var NaN = stdlib.NaN;
+ function foo() { return 123; }
+ return { foo: foo };
+ }
+ return Module;
+ }
+ var Module1 = MkModule();
+ var Module2 = MkModule();
+ var heap = new ArrayBuffer(1024 * 1024);
+ var m1 = Module1({NaN: NaN}, {}, heap);
+ assertValidAsm(Module1);
+ var m2 = Module2(1, 2, 3);
+ assertTrue(%IsNotAsmWasmCode(Module2));
+ assertEquals(123, m1.foo());
+ assertEquals(123, m2.foo());
+})();
+
+(function TestSuccessThenFailureThenRetry() {
+ function MkModule() {
+ function Module(stdlib, ffi, heap) {
+ "use asm";
+ var NaN = stdlib.NaN;
+ function foo() { return 123; }
+ return { foo: foo };
+ }
+ return Module;
+ }
+ var Module1 = MkModule();
+ var Module2 = MkModule();
+ var heap = new ArrayBuffer(1024 * 1024);
+ var m1a = Module1({NaN: NaN}, {}, heap);
+ assertValidAsm(Module1);
+ var m2 = Module2(1, 2, 3);
+ assertTrue(%IsNotAsmWasmCode(Module2));
+ var m1b = Module1({NaN: NaN}, {}, heap);
+ assertTrue(%IsNotAsmWasmCode(Module1));
+ assertEquals(123, m1a.foo());
+ assertEquals(123, m1b.foo());
+ assertEquals(123, m2.foo());
+})();
+
+(function TestBoundFunction() {
+ function Module(stdlib, ffi, heap) {
+ "use asm";
+ function foo() { return 123; }
+ return { foo: foo };
+ }
+ var heap = new ArrayBuffer(1024 * 1024);
+ var ModuleBound = Module.bind(this, {}, {}, heap);
+ var m = ModuleBound();
+ assertValidAsm(Module);
+ assertEquals(123, m.foo());
+})();
diff --git a/deps/v8/test/mjsunit/asm/construct-double.js b/deps/v8/test/mjsunit/asm/construct-double.js
deleted file mode 100644
index 8bb5000082..0000000000
--- a/deps/v8/test/mjsunit/asm/construct-double.js
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax
-
-var stdlib = this;
-var foreign = {};
-var heap = new ArrayBuffer(64 * 1024);
-
-
-var m = (function(stdlib, foreign, heap) {
- "use asm";
- function cd1(i, j) {
- i = i|0;
- j = j|0;
- return +%_ConstructDouble(i, j);
- }
- function cd2(i) {
- i = i|0;
- return +%_ConstructDouble(0, i);
- }
- return { cd1: cd1, cd2: cd2 };
-})(stdlib, foreign, heap);
-
-assertEquals(0.0, m.cd1(0, 0));
-assertEquals(%ConstructDouble(0, 1), m.cd2(1));
-for (var i = -2147483648; i < 2147483648; i += 3999773) {
- assertEquals(%ConstructDouble(0, i), m.cd2(i));
- for (var j = -2147483648; j < 2147483648; j += 3999773) {
- assertEquals(%ConstructDouble(i, j), m.cd1(i, j));
- }
-}
diff --git a/deps/v8/test/mjsunit/asm/double-hi.js b/deps/v8/test/mjsunit/asm/double-hi.js
deleted file mode 100644
index 5a5f942f7b..0000000000
--- a/deps/v8/test/mjsunit/asm/double-hi.js
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax
-
-var stdlib = this;
-var foreign = {};
-var heap = new ArrayBuffer(64 * 1024);
-
-
-var m = (function(stdlib, foreign, heap) {
- "use asm";
- function hi1(i) {
- i = +i;
- return %_DoubleHi(i)|0;
- }
- function hi2(i, j) {
- i = +i;
- j = +j;
- return %_DoubleHi(i)+%_DoubleHi(j)|0;
- }
- return { hi1: hi1, hi2: hi2 };
-})(stdlib, foreign, heap);
-
-assertEquals(0, m.hi1(0.0));
-assertEquals(-2147483648, m.hi1(-0.0));
-assertEquals(2146435072, m.hi1(Infinity));
-assertEquals(-1048576, m.hi1(-Infinity));
-assertEquals(0, m.hi2(0.0, 0.0));
-assertEquals(-2147483648, m.hi2(0.0, -0.0));
-assertEquals(-2147483648, m.hi2(-0.0, 0.0));
-assertEquals(0, m.hi2(-0.0, -0.0));
-for (var i = -2147483648; i < 2147483648; i += 3999773) {
- assertEquals(%_DoubleHi(i), m.hi1(i));
- assertEquals(i, m.hi1(%ConstructDouble(i, 0)));
- assertEquals(i, m.hi1(%ConstructDouble(i, i)));
- assertEquals(i+i|0, m.hi2(%ConstructDouble(i, 0), %ConstructDouble(i, 0)));
- assertEquals(i+i|0, m.hi2(%ConstructDouble(i, i), %ConstructDouble(i, i)));
-}
diff --git a/deps/v8/test/mjsunit/asm/double-lo.js b/deps/v8/test/mjsunit/asm/double-lo.js
deleted file mode 100644
index 39d5b5268f..0000000000
--- a/deps/v8/test/mjsunit/asm/double-lo.js
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax
-
-var stdlib = this;
-var foreign = {};
-var heap = new ArrayBuffer(64 * 1024);
-
-
-var m = (function(stdlib, foreign, heap) {
- "use asm";
- function lo1(i) {
- i = +i;
- return %_DoubleLo(i)|0;
- }
- function lo2(i, j) {
- i = +i;
- j = +j;
- return %_DoubleLo(i)+%_DoubleLo(j)|0;
- }
- return { lo1: lo1, lo2: lo2 };
-})(stdlib, foreign, heap);
-
-assertEquals(0, m.lo1(0.0));
-assertEquals(0, m.lo1(-0.0));
-assertEquals(0, m.lo1(Infinity));
-assertEquals(0, m.lo1(-Infinity));
-assertEquals(0, m.lo2(0.0, 0.0));
-assertEquals(0, m.lo2(0.0, -0.0));
-assertEquals(0, m.lo2(-0.0, 0.0));
-assertEquals(0, m.lo2(-0.0, -0.0));
-for (var i = -2147483648; i < 2147483648; i += 3999773) {
- assertEquals(%_DoubleLo(i), m.lo1(i));
- assertEquals(i, m.lo1(%ConstructDouble(0, i)));
- assertEquals(i, m.lo1(%ConstructDouble(i, i)));
- assertEquals(i+i|0, m.lo2(%ConstructDouble(0, i), %ConstructDouble(0, i)));
- assertEquals(i+i|0, m.lo2(%ConstructDouble(i, i), %ConstructDouble(i, i)));
-}
diff --git a/deps/v8/test/mjsunit/asm/load-elimination.js b/deps/v8/test/mjsunit/asm/load-elimination.js
new file mode 100644
index 0000000000..cdc996222e
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/load-elimination.js
@@ -0,0 +1,26 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var stdlib = this;
+var foreign = {};
+var heap = new ArrayBuffer(64 * 1024 * 1024);
+
+var foo = (function(stdlib, foreign, heap) {
+ "use asm";
+ var M16 = new Int16Array(heap);
+ var M32 = new Int32Array(heap);
+ function foo() {
+ M32[0] = 0x12341234;
+ var i = M32[0];
+ return M16[0];
+ }
+ return foo;
+})(stdlib, foreign, heap);
+
+assertEquals(0x1234, foo());
+assertEquals(0x1234, foo());
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(0x1234, foo());
diff --git a/deps/v8/test/mjsunit/call-counts.js b/deps/v8/test/mjsunit/call-counts.js
deleted file mode 100644
index 1ad62ba5e7..0000000000
--- a/deps/v8/test/mjsunit/call-counts.js
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --noalways-opt
-
-// We disable vector store ICs because slot indices change when this option
-// is enabled.
-
-// Locations in the type feedback vector where call counts are maintained for
-// the two calls made from bar();
-
-(function() {
- const kFooCallExtraIndex = 5;
- const kArrayCallExtraIndex = 7;
-
- function GetCallCount(func, slot) {
- var vector = %GetTypeFeedbackVector(func);
- // Call counts are recorded doubled.
- var value = %FixedArrayGet(vector, slot);
- return Math.floor(value / 2);
- }
-
- function foo(a) { return a[3] * 16; }
-
- function bar(a) {
- var result = 0;
- for (var i = 0; i < 10; i++) {
- result = foo(a);
- if (i % 2 === 0) {
- var r = Array();
- r[0] = 1;
- result += r[0];
- }
- }
- return result;
- }
-
- var a = [1, 2, 3];
- bar(a);
- assertEquals(10, GetCallCount(bar, kFooCallExtraIndex));
- assertEquals(5, GetCallCount(bar, kArrayCallExtraIndex));
-
- %OptimizeFunctionOnNextCall(bar);
- bar(a);
-})();
diff --git a/deps/v8/test/mjsunit/callsite.js b/deps/v8/test/mjsunit/callsite.js
deleted file mode 100644
index a4d9455b32..0000000000
--- a/deps/v8/test/mjsunit/callsite.js
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-Error.prepareStackTrace = (e,s) => s;
-var constructor = Error().stack[0].constructor;
-
-// Second argument needs to be a function.
-assertThrows(()=>constructor({}, {}, 1, false), TypeError);
-
-var receiver = {};
-function f() {}
-
-var site = constructor.call(null, receiver, f, {valueOf() { return 0 }}, false);
-assertEquals(receiver, site.getThis());
-assertEquals(1, site.getLineNumber());
-assertEquals(1, site.getColumnNumber());
diff --git a/deps/v8/test/mjsunit/compiler/accessor-exceptions1.js b/deps/v8/test/mjsunit/compiler/accessor-exceptions1.js
new file mode 100644
index 0000000000..716d229aba
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/accessor-exceptions1.js
@@ -0,0 +1,21 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var o = {}
+Object.defineProperty(o, "x", {
+ get: function() { throw 7; }
+});
+
+function foo(o) {
+ var x = 1;
+ try { o.x; } catch (e) { x = e; }
+ return x;
+}
+
+assertEquals(7, foo(o));
+assertEquals(7, foo(o));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(7, foo(o));
diff --git a/deps/v8/test/mjsunit/compiler/accessor-exceptions2.js b/deps/v8/test/mjsunit/compiler/accessor-exceptions2.js
new file mode 100644
index 0000000000..ed6e3e21c0
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/accessor-exceptions2.js
@@ -0,0 +1,21 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var o = {}
+Object.defineProperty(o, "x", {
+ set: function(v) { throw 7; }
+});
+
+function foo(o) {
+ var x = 1;
+ try { o.x = 2; } catch (e) { x = e; }
+ return x;
+}
+
+assertEquals(7, foo(o));
+assertEquals(7, foo(o));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(7, foo(o));
diff --git a/deps/v8/test/mjsunit/compiler/deopt-accessors1.js b/deps/v8/test/mjsunit/compiler/deopt-accessors1.js
new file mode 100644
index 0000000000..3589258656
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/deopt-accessors1.js
@@ -0,0 +1,28 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var o = {v:1};
+var deopt = false;
+
+Object.defineProperty(o, "x", {
+ get: function() { return this.v; },
+ set: function(v) {
+ this.v = v;
+ if (deopt) {
+ %DeoptimizeFunction(foo);
+ }
+ }
+});
+
+function foo(o) {
+ return o.x++;
+}
+
+assertEquals(1, foo(o));
+assertEquals(2, foo(o));
+%OptimizeFunctionOnNextCall(foo);
+deopt = true;
+assertEquals(3, foo(o));
diff --git a/deps/v8/test/mjsunit/compiler/deopt-accessors2.js b/deps/v8/test/mjsunit/compiler/deopt-accessors2.js
new file mode 100644
index 0000000000..74d41397bf
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/deopt-accessors2.js
@@ -0,0 +1,28 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var o = {v:1};
+var deopt = false;
+
+Object.defineProperty(o, "x", {
+ get: function() { return this.v; },
+ set: function(v) {
+ this.v = v;
+ if (deopt) {
+ %DeoptimizeFunction(foo);
+ }
+ }
+});
+
+function foo(o) {
+ return ++o.x;
+}
+
+assertEquals(2, foo(o));
+assertEquals(3, foo(o));
+%OptimizeFunctionOnNextCall(foo);
+deopt = true;
+assertEquals(4, foo(o));
diff --git a/deps/v8/test/mjsunit/compiler/deopt-accessors3.js b/deps/v8/test/mjsunit/compiler/deopt-accessors3.js
new file mode 100644
index 0000000000..035cf2b359
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/deopt-accessors3.js
@@ -0,0 +1,29 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var o = {v:1};
+var deopt = false;
+
+Object.defineProperty(o, "x", {
+ get: function() { return this.v; },
+ set: function(v) {
+ this.v = v;
+ if (deopt) {
+ %DeoptimizeFunction(foo);
+ }
+ }
+});
+
+function foo(o) {
+ var x = "x";
+ return o[x]++;
+}
+
+assertEquals(1, foo(o));
+assertEquals(2, foo(o));
+%OptimizeFunctionOnNextCall(foo);
+deopt = true;
+assertEquals(3, foo(o));
diff --git a/deps/v8/test/mjsunit/compiler/deopt-accessors4.js b/deps/v8/test/mjsunit/compiler/deopt-accessors4.js
new file mode 100644
index 0000000000..5a8453f237
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/deopt-accessors4.js
@@ -0,0 +1,29 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var o = {v:1};
+var deopt = false;
+
+Object.defineProperty(o, "x", {
+ get: function() { return this.v; },
+ set: function(v) {
+ this.v = v;
+ if (deopt) {
+ %DeoptimizeFunction(foo);
+ }
+ }
+});
+
+function foo(o) {
+ var x = "x";
+ return ++o[x];
+}
+
+assertEquals(2, foo(o));
+assertEquals(3, foo(o));
+%OptimizeFunctionOnNextCall(foo);
+deopt = true;
+assertEquals(4, foo(o));
diff --git a/deps/v8/test/mjsunit/compiler/deopt-accessors5.js b/deps/v8/test/mjsunit/compiler/deopt-accessors5.js
new file mode 100644
index 0000000000..1b23c532dc
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/deopt-accessors5.js
@@ -0,0 +1,23 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-tailcalls
+
+"use strict";
+
+function f(v) {
+ %DeoptimizeFunction(test);
+ return 153;
+}
+
+function test() {
+ var o = {};
+ o.__defineSetter__('q', f);
+ assertEquals(1, o.q = 1);
+}
+
+test();
+test();
+%OptimizeFunctionOnNextCall(test);
+test();
diff --git a/deps/v8/test/mjsunit/compiler/deopt-accessors6.js b/deps/v8/test/mjsunit/compiler/deopt-accessors6.js
new file mode 100644
index 0000000000..16fb4ddf64
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/deopt-accessors6.js
@@ -0,0 +1,24 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-tailcalls
+
+"use strict";
+
+function f(v) {
+ %DeoptimizeFunction(test);
+ return 153;
+}
+
+function test() {
+ var o = {};
+ var q = "q";
+ o.__defineSetter__(q, f);
+ assertEquals(1, o[q] = 1);
+}
+
+test();
+test();
+%OptimizeFunctionOnNextCall(test);
+test();
diff --git a/deps/v8/test/mjsunit/compiler/deopt-accessors7.js b/deps/v8/test/mjsunit/compiler/deopt-accessors7.js
new file mode 100644
index 0000000000..8c7d7a1e3c
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/deopt-accessors7.js
@@ -0,0 +1,30 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var o = {v:1};
+var deopt = false;
+Object.defineProperty(o, "x", {
+ get: function() {
+ if (deopt) %DeoptimizeFunction(foo);
+ return 1;
+ }
+});
+
+function bar(x, y, z) {
+ return x + z;
+}
+
+function foo(o, x) {
+ return bar(1, (o[x], 2), 3);
+}
+
+assertEquals(4, foo(o, "v"));
+assertEquals(4, foo(o, "v"));
+assertEquals(4, foo(o, "x"));
+assertEquals(4, foo(o, "x"));
+%OptimizeFunctionOnNextCall(foo);
+deopt = true;
+assertEquals(4, foo(o, "x"));
diff --git a/deps/v8/test/mjsunit/compiler/deopt-materialize-accumulator.js b/deps/v8/test/mjsunit/compiler/deopt-materialize-accumulator.js
index 217de769d3..0b19df8a1c 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-materialize-accumulator.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-materialize-accumulator.js
@@ -34,7 +34,7 @@ var global = 3;
function f(a) {
// This will trigger a deopt since global was previously a SMI, with the
// accumulator holding an unboxed double which needs materialized.
- global = %math_sqrt(a);
+ global = Math.sqrt(a);
}
%OptimizeFunctionOnNextCall(f);
f(0.25);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-610228.js b/deps/v8/test/mjsunit/compiler/dont-constant-fold-deopting-checks.js
index ca077d5631..02bd8d9a25 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-610228.js
+++ b/deps/v8/test/mjsunit/compiler/dont-constant-fold-deopting-checks.js
@@ -4,8 +4,7 @@
// Flags: --allow-natives-syntax
-function foo() { return JSON.stringify({a: 0.1}); }
-assertEquals('{"a":0.1}', foo());
-assertEquals('{"a":0.1}', foo());
+function bar(a) { a[0](true); }
+function foo(a) { return bar(1); }
%OptimizeFunctionOnNextCall(foo);
-assertEquals('{"a":0.1}', foo());
+assertThrows(function() {bar([foo])}, TypeError);
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-1.js b/deps/v8/test/mjsunit/compiler/escape-analysis-1.js
index b8c66448dc..f05040bd02 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-1.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-1.js
@@ -26,7 +26,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --turbo-escape
-//
function f(a) {
"use strict";
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-10.js b/deps/v8/test/mjsunit/compiler/escape-analysis-10.js
index c53cf4d989..4f06d57dcf 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-10.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-10.js
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --turbo-escape
+
(function() {
"use strict";
function f() {
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-2.js b/deps/v8/test/mjsunit/compiler/escape-analysis-2.js
index d116e9a364..49f440e856 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-2.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-2.js
@@ -26,7 +26,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --turbo-escape
-//
function f(a) {
"use strict";
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-3.js b/deps/v8/test/mjsunit/compiler/escape-analysis-3.js
index d1ebc9b1f8..b92d1c3876 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-3.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-3.js
@@ -26,7 +26,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --turbo-escape
-//
function f(a) {
"use strict";
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-4.js b/deps/v8/test/mjsunit/compiler/escape-analysis-4.js
index d9fdccc143..ef9f95fd36 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-4.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-4.js
@@ -26,7 +26,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --turbo-escape
-//
function f(a) {
"use strict";
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-5.js b/deps/v8/test/mjsunit/compiler/escape-analysis-5.js
index cfaf81dbc3..54b5e82958 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-5.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-5.js
@@ -26,7 +26,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --turbo-escape
-//
function f(h) {
"use strict";
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-6.js b/deps/v8/test/mjsunit/compiler/escape-analysis-6.js
index 6143cfbc1f..c36e7d956e 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-6.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-6.js
@@ -26,7 +26,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --turbo-escape
-//
function f(a) {
"use strict";
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-7.js b/deps/v8/test/mjsunit/compiler/escape-analysis-7.js
index 16bc71c017..cfa30cbeb4 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-7.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-7.js
@@ -26,7 +26,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --turbo-escape
-//
function f() {
this.x=0;
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-8.js b/deps/v8/test/mjsunit/compiler/escape-analysis-8.js
index bc5b1d963e..d9c6d254ef 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-8.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-8.js
@@ -26,7 +26,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --turbo-escape
-//
function f(a) {
this.x=a;
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-9.js b/deps/v8/test/mjsunit/compiler/escape-analysis-9.js
index a19786b360..0b8f75c576 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-9.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-9.js
@@ -26,7 +26,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --turbo-escape
-//
function f() {
return arguments;
diff --git a/deps/v8/test/mjsunit/compiler/inline-dead-jscreate.js b/deps/v8/test/mjsunit/compiler/inline-dead-jscreate.js
new file mode 100644
index 0000000000..a9778758c4
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/inline-dead-jscreate.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var bar = 0;
+
+function baz() { return this; }
+
+function foo() {
+ bar += 1;
+ if (bar === 2) throw new baz();
+}
+
+foo();
diff --git a/deps/v8/test/mjsunit/compiler/inlined-array-pop-getter1.js b/deps/v8/test/mjsunit/compiler/inlined-array-pop-getter1.js
new file mode 100644
index 0000000000..8eb1c308a3
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/inlined-array-pop-getter1.js
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(a) {
+ return a.pop();
+}
+
+var a = new Array(4);
+
+assertEquals(undefined, foo(a));
+assertEquals(undefined, foo(a));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(undefined, foo(a));
+Object.prototype.__defineGetter__(0, function() { return 1; });
+assertEquals(1, foo(a));
diff --git a/deps/v8/test/mjsunit/compiler/inlined-array-pop-getter2.js b/deps/v8/test/mjsunit/compiler/inlined-array-pop-getter2.js
new file mode 100644
index 0000000000..8ae642619e
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/inlined-array-pop-getter2.js
@@ -0,0 +1,23 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var pop = Array.prototype.pop;
+
+function foo(a) {
+ a.length;
+ return pop.call(a);
+}
+
+var a = new Array(4);
+var o = {}
+o.__defineGetter__(0, function() { return 1; });
+
+assertEquals(undefined, foo(a));
+assertEquals(undefined, foo(a));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(undefined, foo(a));
+Array.prototype.__proto__ = o;
+assertEquals(1, foo(a));
diff --git a/deps/v8/test/mjsunit/compiler/inlined-array-pop-opt.js b/deps/v8/test/mjsunit/compiler/inlined-array-pop-opt.js
new file mode 100644
index 0000000000..c1301489e7
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/inlined-array-pop-opt.js
@@ -0,0 +1,83 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function() {
+ function foo(a) { return a.pop(); }
+
+ var x = {};
+ var a = [x,x,];
+
+ assertEquals(x, foo(a));
+ assertEquals(x, foo(a));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(undefined, foo(a));
+ assertOptimized(foo);
+})();
+
+(function() {
+ function foo(a) { return a.pop(); }
+
+ var x = 0;
+ var a = [x,x,];
+
+ assertEquals(x, foo(a));
+ assertEquals(x, foo(a));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(undefined, foo(a));
+ assertOptimized(foo);
+})();
+
+(function() {
+ function foo(a) { return a.pop(); }
+
+ var x = 0;
+ var a = [x,x,x];
+
+ assertEquals(x, foo(a));
+ assertEquals(x, foo(a));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(x, foo(a));
+ assertOptimized(foo);
+})();
+
+(function() {
+ function foo(a) { return a.pop(); }
+
+ var x = {};
+ var a = [x,x,x];
+
+ assertEquals(x, foo(a));
+ assertEquals(x, foo(a));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(x, foo(a));
+ assertOptimized(foo);
+})();
+
+(function() {
+ function foo(a) { return a.pop(); }
+
+ var a = [,,];
+
+ assertEquals(undefined, foo(a));
+ assertEquals(undefined, foo(a));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(undefined, foo(a));
+ assertOptimized(foo);
+})();
+
+(function() {
+ var pop = Array.prototype.pop;
+
+ function foo(a) { return a.pop(); }
+
+ var a = [1, 2, 3];
+
+ assertEquals(3, foo(a));
+ assertEquals(2, foo(a));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo(a));
+ assertOptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/integral32-add-sub.js b/deps/v8/test/mjsunit/compiler/integral32-add-sub.js
new file mode 100644
index 0000000000..2dd370c9dd
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/integral32-add-sub.js
@@ -0,0 +1,131 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function() {
+ function foo(x) {
+ x = x >>> 0;
+ var y = 0 - 2147483648;
+ return x + y;
+ }
+
+ assertEquals(-2147483648, foo(0));
+ assertEquals(0, foo(2147483648));
+ assertEquals(2147483647, foo(4294967295));
+ %BaselineFunctionOnNextCall(foo);
+ assertEquals(-2147483648, foo(0));
+ assertEquals(0, foo(2147483648));
+ assertEquals(2147483647, foo(4294967295));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(-2147483648, foo(0));
+ assertEquals(0, foo(2147483648));
+ assertEquals(2147483647, foo(4294967295));
+ assertOptimized(foo);
+})();
+
+(function() {
+ function foo(x) {
+ x = x >>> 0;
+ var y = 2147483648;
+ return x - y;
+ }
+
+ assertEquals(-2147483648, foo(0));
+ assertEquals(0, foo(2147483648));
+ assertEquals(2147483647, foo(4294967295));
+ %BaselineFunctionOnNextCall(foo);
+ assertEquals(-2147483648, foo(0));
+ assertEquals(0, foo(2147483648));
+ assertEquals(2147483647, foo(4294967295));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(-2147483648, foo(0));
+ assertEquals(0, foo(2147483648));
+ assertEquals(2147483647, foo(4294967295));
+ assertOptimized(foo);
+})();
+
+(function() {
+ function foo(x) {
+ x = x | 0;
+ var y = 2147483648;
+ return x + y;
+ }
+
+ assertEquals(2147483648, foo(0));
+ assertEquals(0, foo(-2147483648));
+ assertEquals(4294967295, foo(2147483647));
+ %BaselineFunctionOnNextCall(foo);
+ assertEquals(2147483648, foo(0));
+ assertEquals(0, foo(-2147483648));
+ assertEquals(4294967295, foo(2147483647));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(2147483648, foo(0));
+ assertEquals(0, foo(-2147483648));
+ assertEquals(4294967295, foo(2147483647));
+ assertOptimized(foo);
+})();
+
+(function() {
+ function foo(x) {
+ x = x | 0;
+ var y = 0 - 2147483648;
+ return x - y;
+ }
+
+ assertEquals(2147483648, foo(0));
+ assertEquals(0, foo(-2147483648));
+ assertEquals(4294967295, foo(2147483647));
+ %BaselineFunctionOnNextCall(foo);
+ assertEquals(2147483648, foo(0));
+ assertEquals(0, foo(-2147483648));
+ assertEquals(4294967295, foo(2147483647));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(2147483648, foo(0));
+ assertEquals(0, foo(-2147483648));
+ assertEquals(4294967295, foo(2147483647));
+ assertOptimized(foo);
+})();
+
+(function() {
+ function foo(x) {
+ x = x | 0;
+ var y = -0;
+ return x + y;
+ }
+
+ assertEquals(2147483647, foo(2147483647));
+ assertEquals(-2147483648, foo(-2147483648));
+ assertEquals(0, foo(0));
+ %BaselineFunctionOnNextCall(foo);
+ assertEquals(2147483647, foo(2147483647));
+ assertEquals(-2147483648, foo(-2147483648));
+ assertEquals(0, foo(0));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(2147483647, foo(2147483647));
+ assertEquals(-2147483648, foo(-2147483648));
+ assertEquals(0, foo(0));
+ assertOptimized(foo);
+})();
+
+(function() {
+ function foo(x) {
+ var y = (x < 0) ? 4294967295 : 4294967296;
+ var z = (x > 0) ? 2147483647 : 2147483648;
+ return y - z;
+ }
+
+ assertEquals(2147483647, foo(-1));
+ assertEquals(2147483648, foo(0));
+ assertEquals(2147483649, foo(1));
+ %BaselineFunctionOnNextCall(foo);
+ assertEquals(2147483647, foo(-1));
+ assertEquals(2147483648, foo(0));
+ assertEquals(2147483649, foo(1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(2147483647, foo(-1));
+ assertEquals(2147483648, foo(0));
+ assertEquals(2147483649, foo(1));
+ assertOptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/math-mul.js b/deps/v8/test/mjsunit/compiler/math-mul.js
new file mode 100644
index 0000000000..a391b445fe
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/math-mul.js
@@ -0,0 +1,45 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// For TurboFan, make sure we can eliminate the -0 return value check
+// by recognizing a constant value.
+function gotaconstant(y) { return 15 * y; }
+assertEquals(45, gotaconstant(3));
+gotaconstant(3);
+%OptimizeFunctionOnNextCall(gotaconstant);
+gotaconstant(3);
+
+function gotaconstant_truncated(x, y) { return x * y | 0; }
+assertEquals(45, gotaconstant_truncated(3, 15));
+gotaconstant_truncated(3, 15);
+%OptimizeFunctionOnNextCall(gotaconstant_truncated);
+gotaconstant_truncated(3, 15);
+
+function test(x, y) { return x * y; }
+
+assertEquals(12, test(3, 4));
+assertEquals(16, test(4, 4));
+
+%OptimizeFunctionOnNextCall(test);
+assertEquals(27, test(9, 3));
+
+assertEquals(-0, test(-3, 0));
+assertEquals(-0, test(0, -0));
+
+
+const SMI_MAX = (1 << 29) - 1 + (1 << 29); // Create without overflowing.
+const SMI_MIN = -SMI_MAX - 1; // Create without overflowing.
+
+// multiply by 3 to avoid compiler optimizations that convert 2*x to x + x.
+assertEquals(SMI_MAX + SMI_MAX + SMI_MAX, test(SMI_MAX, 3));
+
+// Verify that strength reduction will reduce the -0 check quite a bit
+// if we have a negative integer constant.
+function negtest(y) { return -3 * y; }
+assertEquals(-12, negtest(4));
+assertEquals(-12, negtest(4));
+%OptimizeFunctionOnNextCall(negtest);
+negtest(4);
diff --git a/deps/v8/test/mjsunit/compiler/optimized-float32array-length.js b/deps/v8/test/mjsunit/compiler/optimized-float32array-length.js
new file mode 100644
index 0000000000..eed8922c07
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/optimized-float32array-length.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var a = new Float32Array(1);
+function len(a) { return a.length; }
+assertEquals(1, len(a));
+assertEquals(1, len(a));
+%OptimizeFunctionOnNextCall(len);
+assertEquals(1, len(a));
+assertOptimized(len);
diff --git a/deps/v8/test/mjsunit/compiler/optimized-float64array-length.js b/deps/v8/test/mjsunit/compiler/optimized-float64array-length.js
new file mode 100644
index 0000000000..f6a3d77677
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/optimized-float64array-length.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var a = new Float64Array(1);
+function len(a) { return a.length; }
+assertEquals(1, len(a));
+assertEquals(1, len(a));
+%OptimizeFunctionOnNextCall(len);
+assertEquals(1, len(a));
+assertOptimized(len);
diff --git a/deps/v8/test/mjsunit/compiler/optimized-for-in.js b/deps/v8/test/mjsunit/compiler/optimized-for-in.js
index 9f5e4e7f83..ca17ee6a75 100644
--- a/deps/v8/test/mjsunit/compiler/optimized-for-in.js
+++ b/deps/v8/test/mjsunit/compiler/optimized-for-in.js
@@ -35,28 +35,28 @@
function a(t) {
var result = [];
for (var i in t) {
- result.push(i + t[i]);
+ result.push([i, t[i]]);
}
- return result.join('');
+ return result;
}
// Check that we correctly deoptimize on map check.
function b(t) {
var result = [];
for (var i in t) {
- result.push(i + t[i]);
+ result.push([i, t[i]]);
delete t[i];
}
- return result.join('');
+ return result;
}
// Check that we correctly deoptimize during preparation step.
function c(t) {
var result = [];
for (var i in t) {
- result.push(i + t[i]);
+ result.push([i, t[i]]);
}
- return result.join('');
+ return result;
}
// Check that we deoptimize to the place after side effect in the right state.
@@ -64,9 +64,9 @@ function d(t) {
var result = [];
var o;
for (var i in (o = t())) {
- result.push(i + o[i]);
+ result.push([i, o[i]]);
}
- return result.join('');
+ return result;
}
// Check that we correctly deoptimize on map check inserted for fused load.
@@ -75,9 +75,9 @@ function e(t) {
for (var i in t) {
delete t[i];
t[i] = i;
- result.push(i + t[i]);
+ result.push([i, t[i]]);
}
- return result.join('');
+ return result;
}
// Nested for-in loops.
@@ -85,10 +85,10 @@ function f(t) {
var result = [];
for (var i in t) {
for (var j in t) {
- result.push(i + j + t[i] + t[j]);
+ result.push([i, j, t[i], t[j]]);
}
}
- return result.join('');
+ return result;
}
// Deoptimization from the inner for-in loop.
@@ -96,13 +96,13 @@ function g(t) {
var result = [];
for (var i in t) {
for (var j in t) {
- result.push(i + j + t[i] + t[j]);
+ result.push([i, j, t[i], t[j]]);
var v = t[i];
delete t[i];
t[i] = v;
}
}
- return result.join('');
+ return result;
}
@@ -111,12 +111,12 @@ function h(t, deopt) {
var result = [];
for (var i in t) {
for (var j in t) {
- result.push(i + j + t[i] + t[j]);
+ result.push([i, j, t[i], t[j]]);
break;
}
}
deopt.deopt;
- return result.join('');
+ return result;
}
// Continue in the inner loop.
@@ -124,12 +124,12 @@ function j(t, deopt) {
var result = [];
for (var i in t) {
for (var j in t) {
- result.push(i + j + t[i] + t[j]);
+ result.push([i, j, t[i], t[j]]);
continue;
}
}
deopt.deopt;
- return result.join('');
+ return result;
}
// Continue of the outer loop.
@@ -137,12 +137,12 @@ function k(t, deopt) {
var result = [];
outer: for (var i in t) {
for (var j in t) {
- result.push(i + j + t[i] + t[j]);
+ result.push([i, j, t[i], t[j]]);
continue outer;
}
}
deopt.deopt;
- return result.join('');
+ return result;
}
// Break of the outer loop.
@@ -150,12 +150,12 @@ function l(t, deopt) {
var result = [];
outer: for (var i in t) {
for (var j in t) {
- result.push(i + j + t[i] + t[j]);
+ result.push([i, j, t[i], t[j]]);
break outer;
}
}
deopt.deopt;
- return result.join('');
+ return result;
}
// Test deoptimization from inlined frame (currently it is not inlined).
@@ -163,7 +163,7 @@ function m0(t, deopt) {
for (var i in t) {
for (var j in t) {
deopt.deopt;
- return i + j + t[i] + t[j];
+ return [i, j, t[i], t[j]];
}
}
}
@@ -173,42 +173,53 @@ function m(t, deopt) {
}
-function tryFunction(s, mkT, f) {
+function tryFunction(result, mkT, f) {
var d = {deopt: false};
- assertEquals(s, f(mkT(), d));
- assertEquals(s, f(mkT(), d));
- assertEquals(s, f(mkT(), d));
+ assertEquals(result, f(mkT(), d));
+ assertEquals(result, f(mkT(), d));
+ assertEquals(result, f(mkT(), d));
%OptimizeFunctionOnNextCall(f);
- assertEquals(s, f(mkT(), d));
- assertEquals(s, f(mkT(), {}));
+ assertEquals(result, f(mkT(), d));
+ assertEquals(result, f(mkT(), {}));
}
-var s = "a1b2c3d4";
+var expectedResult = [["a","1"],["b","2"],["c","3"],["d","4"]];
function mkTable() { return { a: "1", b: "2", c: "3", d: "4" }; }
-tryFunction(s, mkTable, a);
-tryFunction(s, mkTable, b);
-tryFunction("0a1b2c3d", function () { return "abcd"; }, c);
-tryFunction("0a1b2c3d", function () {
+tryFunction(expectedResult, mkTable, a);
+tryFunction(expectedResult, mkTable, b);
+
+expectedResult = [["0","a"],["1","b"],["2","c"],["3","d"]];
+tryFunction(expectedResult, function () { return "abcd"; }, c);
+tryFunction(expectedResult, function () {
var cnt = false;
return function () {
cnt = true;
return "abcd";
}
}, d);
-tryFunction("aabbccdd", mkTable, e);
+tryFunction([["a","a"],["b","b"],["c","c"],["d","d"]], mkTable, e);
function mkSmallTable() { return { a: "1", b: "2" }; }
-tryFunction("aa11ab12ba21bb22", mkSmallTable, f);
-tryFunction("aa11ab12bb22ba21", mkSmallTable, g);
-tryFunction("aa11ba21", mkSmallTable, h);
-tryFunction("aa11ab12ba21bb22", mkSmallTable, j);
-tryFunction("aa11ba21", mkSmallTable, h);
-tryFunction("aa11ba21", mkSmallTable, k);
-tryFunction("aa11", mkSmallTable, l);
-tryFunction("aa11", mkSmallTable, m);
+tryFunction([
+ ["a","a","1","1"],["a","b","1","2"],
+ ["b","a","2","1"],["b","b","2","2"]],
+ mkSmallTable, f);
+tryFunction([
+ ["a","a","1","1"],["a","b","1","2"],
+ ["b","b","2","2"],["b","a","2","1"]],
+ mkSmallTable, g);
+tryFunction([["a","a","1","1"],["b","a","2","1"]], mkSmallTable, h);
+tryFunction([
+ ["a","a","1","1"],["a","b","1","2"],
+ ["b","a","2","1"],["b","b","2","2"]],
+ mkSmallTable, j);
+tryFunction([["a","a","1","1"],["b","a","2","1"]], mkSmallTable, h);
+tryFunction([["a","a","1","1"],["b","a","2","1"]], mkSmallTable, k);
+tryFunction([["a","a","1","1"]], mkSmallTable, l);
+tryFunction(["a","a","1","1"], mkSmallTable, m);
// Test handling of null.
tryFunction("", function () {
@@ -229,7 +240,7 @@ tryFunction("", function () {
// Test LoadFieldByIndex for out of object properties.
function O() { this.a = 1; }
for (var i = 0; i < 10; i++) new O();
-tryFunction("a1b2c3d4e5f6", function () {
+tryFunction([["a",1],["b",2],["c",3],["d",4],["e",5],["f",6]], function () {
var o = new O();
o.b = 2;
o.c = 3;
@@ -239,8 +250,8 @@ tryFunction("a1b2c3d4e5f6", function () {
return o;
}, function (t) {
var r = [];
- for (var i in t) r.push(i + t[i]);
- return r.join('');
+ for (var i in t) r.push([i, t[i]]);
+ return r;
});
// Test OSR inside for-in.
diff --git a/deps/v8/test/mjsunit/compiler/optimized-instanceof-1.js b/deps/v8/test/mjsunit/compiler/optimized-instanceof-1.js
new file mode 100644
index 0000000000..242b4be772
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/optimized-instanceof-1.js
@@ -0,0 +1,17 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function F() {}
+var f = new F
+
+var proto = Object.getPrototypeOf(F);
+Object.setPrototypeOf(F, null);
+F[Symbol.hasInstance] = function(v) { return true };
+Object.setPrototypeOf(F, proto);
+
+function foo(x) { return x instanceof F };
+%OptimizeFunctionOnNextCall(foo);
+assertTrue(foo(1));
diff --git a/deps/v8/test/mjsunit/compiler/optimized-instanceof-2.js b/deps/v8/test/mjsunit/compiler/optimized-instanceof-2.js
new file mode 100644
index 0000000000..38a35b73f1
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/optimized-instanceof-2.js
@@ -0,0 +1,19 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function F() {}
+var f = new F
+
+function foo(x) { return x instanceof F };
+%OptimizeFunctionOnNextCall(foo);
+assertFalse(foo(1));
+
+var proto = Object.getPrototypeOf(F);
+Object.setPrototypeOf(F, null);
+F[Symbol.hasInstance] = function(v) { return true };
+Object.setPrototypeOf(F, proto);
+
+assertTrue(foo(1));
diff --git a/deps/v8/test/mjsunit/compiler/optimized-int32array-length.js b/deps/v8/test/mjsunit/compiler/optimized-int32array-length.js
new file mode 100644
index 0000000000..250d523cc9
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/optimized-int32array-length.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var a = new Int32Array(1);
+function len(a) { return a.length; }
+assertEquals(1, len(a));
+assertEquals(1, len(a));
+%OptimizeFunctionOnNextCall(len);
+assertEquals(1, len(a));
+assertOptimized(len);
diff --git a/deps/v8/test/mjsunit/compiler/optimized-uint32array-length.js b/deps/v8/test/mjsunit/compiler/optimized-uint32array-length.js
new file mode 100644
index 0000000000..d389370a4f
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/optimized-uint32array-length.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var a = new Uint32Array(1);
+function len(a) { return a.length; }
+assertEquals(1, len(a));
+assertEquals(1, len(a));
+%OptimizeFunctionOnNextCall(len);
+assertEquals(1, len(a));
+assertOptimized(len);
diff --git a/deps/v8/test/mjsunit/compiler/osr-alignment.js b/deps/v8/test/mjsunit/compiler/osr-alignment.js
index 085d6c4d68..f815e712ee 100644
--- a/deps/v8/test/mjsunit/compiler/osr-alignment.js
+++ b/deps/v8/test/mjsunit/compiler/osr-alignment.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --use-osr --turbo-osr
+// Flags: --allow-natives-syntax --use-osr
function f1() {
var sum = 0;
diff --git a/deps/v8/test/mjsunit/compiler/osr-backedges1.js b/deps/v8/test/mjsunit/compiler/osr-backedges1.js
index d415f4a107..18a7e0469e 100644
--- a/deps/v8/test/mjsunit/compiler/osr-backedges1.js
+++ b/deps/v8/test/mjsunit/compiler/osr-backedges1.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --use-osr --turbo-osr
+// Flags: --allow-natives-syntax --use-osr
function foo(a) {
var i = a | 0;
diff --git a/deps/v8/test/mjsunit/compiler/osr-block-scope-func.js b/deps/v8/test/mjsunit/compiler/osr-block-scope-func.js
index df4076c411..7c41f54074 100644
--- a/deps/v8/test/mjsunit/compiler/osr-block-scope-func.js
+++ b/deps/v8/test/mjsunit/compiler/osr-block-scope-func.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --use-osr --turbo-osr
+// Flags: --allow-natives-syntax --use-osr
"use strict";
diff --git a/deps/v8/test/mjsunit/compiler/osr-block-scope-id.js b/deps/v8/test/mjsunit/compiler/osr-block-scope-id.js
index 923c72f422..bcc7cdd47d 100644
--- a/deps/v8/test/mjsunit/compiler/osr-block-scope-id.js
+++ b/deps/v8/test/mjsunit/compiler/osr-block-scope-id.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --use-osr --turbo-osr
+// Flags: --allow-natives-syntax --use-osr
"use strict";
diff --git a/deps/v8/test/mjsunit/compiler/osr-block-scope.js b/deps/v8/test/mjsunit/compiler/osr-block-scope.js
index 0d78cdcb64..c60f8af6c9 100644
--- a/deps/v8/test/mjsunit/compiler/osr-block-scope.js
+++ b/deps/v8/test/mjsunit/compiler/osr-block-scope.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --use-osr --turbo-osr
+// Flags: --allow-natives-syntax --use-osr
"use strict";
diff --git a/deps/v8/test/mjsunit/compiler/osr-follow.js b/deps/v8/test/mjsunit/compiler/osr-follow.js
index b6a2e8e4be..46581a8e5a 100644
--- a/deps/v8/test/mjsunit/compiler/osr-follow.js
+++ b/deps/v8/test/mjsunit/compiler/osr-follow.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --use-osr --turbo-osr
+// Flags: --use-osr
function foo(a) {
var sum = 0;
diff --git a/deps/v8/test/mjsunit/compiler/osr-for-let.js b/deps/v8/test/mjsunit/compiler/osr-for-let.js
index 4b2fa3e532..b8cef780b5 100644
--- a/deps/v8/test/mjsunit/compiler/osr-for-let.js
+++ b/deps/v8/test/mjsunit/compiler/osr-for-let.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --use-osr --turbo-osr
+// Flags: --allow-natives-syntax --use-osr
"use strict";
diff --git a/deps/v8/test/mjsunit/compiler/osr-forin-nested.js b/deps/v8/test/mjsunit/compiler/osr-forin-nested.js
index ad55b30bd8..dd810897e0 100644
--- a/deps/v8/test/mjsunit/compiler/osr-forin-nested.js
+++ b/deps/v8/test/mjsunit/compiler/osr-forin-nested.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --turbo-osr --allow-natives-syntax
+// Flags: --allow-natives-syntax
function test(e, f, v) {
assertEquals(e, f(v));
diff --git a/deps/v8/test/mjsunit/compiler/osr-forin.js b/deps/v8/test/mjsunit/compiler/osr-forin.js
index 8d1678224c..b45d200d1b 100644
--- a/deps/v8/test/mjsunit/compiler/osr-forin.js
+++ b/deps/v8/test/mjsunit/compiler/osr-forin.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --use-osr --turbo-osr
+// Flags: --use-osr
function f(a) {
var sum = 0;
diff --git a/deps/v8/test/mjsunit/compiler/osr-forof.js b/deps/v8/test/mjsunit/compiler/osr-forof.js
index 36bff09c58..ce7b24de13 100644
--- a/deps/v8/test/mjsunit/compiler/osr-forof.js
+++ b/deps/v8/test/mjsunit/compiler/osr-forof.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --use-osr --turbo-osr
+// Flags: --use-osr
function f(a) {
var sum = 0;
diff --git a/deps/v8/test/mjsunit/compiler/osr-function-id.js b/deps/v8/test/mjsunit/compiler/osr-function-id.js
index c506ae8282..8761e8517e 100644
--- a/deps/v8/test/mjsunit/compiler/osr-function-id.js
+++ b/deps/v8/test/mjsunit/compiler/osr-function-id.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --use-osr --turbo-osr
+// Flags: --use-osr
function id(f) { return f; }
diff --git a/deps/v8/test/mjsunit/compiler/osr-function-id2.js b/deps/v8/test/mjsunit/compiler/osr-function-id2.js
index 561c62e1bc..e25ec31480 100644
--- a/deps/v8/test/mjsunit/compiler/osr-function-id2.js
+++ b/deps/v8/test/mjsunit/compiler/osr-function-id2.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --use-osr --turbo-osr
+// Flags: --use-osr
function id(f) { return f; }
diff --git a/deps/v8/test/mjsunit/compiler/osr-function.js b/deps/v8/test/mjsunit/compiler/osr-function.js
index 06d137b62c..cee7e9d3d0 100644
--- a/deps/v8/test/mjsunit/compiler/osr-function.js
+++ b/deps/v8/test/mjsunit/compiler/osr-function.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --use-osr --turbo-osr
+// Flags: --use-osr
function foo() {
var sum = 0;
diff --git a/deps/v8/test/mjsunit/compiler/osr-infinite.js b/deps/v8/test/mjsunit/compiler/osr-infinite.js
index aa74c877d5..24c7add272 100644
--- a/deps/v8/test/mjsunit/compiler/osr-infinite.js
+++ b/deps/v8/test/mjsunit/compiler/osr-infinite.js
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --use-osr --allow-natives-syntax --turbo-osr
+// Flags: --use-osr --allow-natives-syntax
var global_counter = 0;
function thrower() {
var x = global_counter++;
- if (x == 5) %OptimizeOsr(thrower.caller);
+ if (x == 5) %OptimizeOsr(1);
if (x == 10) throw "terminate";
}
diff --git a/deps/v8/test/mjsunit/compiler/osr-labeled.js b/deps/v8/test/mjsunit/compiler/osr-labeled.js
index 1a9709285e..1384e9a715 100644
--- a/deps/v8/test/mjsunit/compiler/osr-labeled.js
+++ b/deps/v8/test/mjsunit/compiler/osr-labeled.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --use-osr --turbo-osr
+// Flags: --allow-natives-syntax --use-osr
function foo() {
var sum = 0;
diff --git a/deps/v8/test/mjsunit/compiler/osr-literals-adapted.js b/deps/v8/test/mjsunit/compiler/osr-literals-adapted.js
index 950d8b0762..4d1798c929 100644
--- a/deps/v8/test/mjsunit/compiler/osr-literals-adapted.js
+++ b/deps/v8/test/mjsunit/compiler/osr-literals-adapted.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --use-osr --turbo-osr
+// Flags: --allow-natives-syntax --use-osr
function mod() {
function f0() {
diff --git a/deps/v8/test/mjsunit/compiler/osr-literals.js b/deps/v8/test/mjsunit/compiler/osr-literals.js
index d9f68a0b37..f2051dced7 100644
--- a/deps/v8/test/mjsunit/compiler/osr-literals.js
+++ b/deps/v8/test/mjsunit/compiler/osr-literals.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --use-osr --turbo-osr
+// Flags: --allow-natives-syntax --use-osr
function mod() {
function f0() {
diff --git a/deps/v8/test/mjsunit/compiler/osr-manual1.js b/deps/v8/test/mjsunit/compiler/osr-manual1.js
index 29a4948a65..c3db796f11 100644
--- a/deps/v8/test/mjsunit/compiler/osr-manual1.js
+++ b/deps/v8/test/mjsunit/compiler/osr-manual1.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --use-osr --turbo-osr
+// Flags: --allow-natives-syntax --use-osr
var counter = 111;
diff --git a/deps/v8/test/mjsunit/compiler/osr-manual2.js b/deps/v8/test/mjsunit/compiler/osr-manual2.js
index 8aa5d69db3..de7ec243fe 100644
--- a/deps/v8/test/mjsunit/compiler/osr-manual2.js
+++ b/deps/v8/test/mjsunit/compiler/osr-manual2.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --use-osr --turbo-osr
+// Flags: --allow-natives-syntax --use-osr
var counter = 188;
diff --git a/deps/v8/test/mjsunit/compiler/osr-multiple.js b/deps/v8/test/mjsunit/compiler/osr-multiple.js
index c318645d32..72fff8546c 100644
--- a/deps/v8/test/mjsunit/compiler/osr-multiple.js
+++ b/deps/v8/test/mjsunit/compiler/osr-multiple.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --use-osr --turbo-osr
+// Flags: --use-osr
function f1(a,b,c) {
var x = 0;
diff --git a/deps/v8/test/mjsunit/compiler/osr-multiple2.js b/deps/v8/test/mjsunit/compiler/osr-multiple2.js
index 9a81bfb658..edb627a57b 100644
--- a/deps/v8/test/mjsunit/compiler/osr-multiple2.js
+++ b/deps/v8/test/mjsunit/compiler/osr-multiple2.js
@@ -3,7 +3,6 @@
// found in the LICENSE file.
// Flags: --use-osr
-// TODO(titzer): enable --turbo-osr when nested OSR works.
function f1(a,b,c) {
var x = 0;
diff --git a/deps/v8/test/mjsunit/compiler/osr-multiple3.js b/deps/v8/test/mjsunit/compiler/osr-multiple3.js
index 0fb1ac73a3..fa703eaeac 100644
--- a/deps/v8/test/mjsunit/compiler/osr-multiple3.js
+++ b/deps/v8/test/mjsunit/compiler/osr-multiple3.js
@@ -3,7 +3,6 @@
// found in the LICENSE file.
// Flags: --use-osr
-// TODO(titzer): enable --turbo-osr when nested OSR works.
function f1(a,b,c) {
var x = 0;
diff --git a/deps/v8/test/mjsunit/compiler/osr-nested2.js b/deps/v8/test/mjsunit/compiler/osr-nested2.js
index 41bd9b247b..efe31f1177 100644
--- a/deps/v8/test/mjsunit/compiler/osr-nested2.js
+++ b/deps/v8/test/mjsunit/compiler/osr-nested2.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --use-osr --turbo-osr
+// Flags: --allow-natives-syntax --use-osr
function f() {
var sum = 0;
diff --git a/deps/v8/test/mjsunit/compiler/osr-nested2b.js b/deps/v8/test/mjsunit/compiler/osr-nested2b.js
index e64c10ccb4..18088114a4 100644
--- a/deps/v8/test/mjsunit/compiler/osr-nested2b.js
+++ b/deps/v8/test/mjsunit/compiler/osr-nested2b.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --use-osr --turbo-osr
+// Flags: --allow-natives-syntax --use-osr
function f() {
var sum = 0;
diff --git a/deps/v8/test/mjsunit/compiler/osr-nested3.js b/deps/v8/test/mjsunit/compiler/osr-nested3.js
index f5d09ba166..d7c144b9e6 100644
--- a/deps/v8/test/mjsunit/compiler/osr-nested3.js
+++ b/deps/v8/test/mjsunit/compiler/osr-nested3.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --use-osr --turbo-osr
+// Flags: --allow-natives-syntax --use-osr
function f() {
var sum = 0;
diff --git a/deps/v8/test/mjsunit/compiler/osr-nested3b.js b/deps/v8/test/mjsunit/compiler/osr-nested3b.js
index 32ac2a7058..a10d328e03 100644
--- a/deps/v8/test/mjsunit/compiler/osr-nested3b.js
+++ b/deps/v8/test/mjsunit/compiler/osr-nested3b.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --use-osr --turbo-osr
+// Flags: --allow-natives-syntax --use-osr
function f() {
var sum = 0;
diff --git a/deps/v8/test/mjsunit/compiler/osr-regex-id.js b/deps/v8/test/mjsunit/compiler/osr-regex-id.js
index 7831b14840..e0b4dad1dc 100644
--- a/deps/v8/test/mjsunit/compiler/osr-regex-id.js
+++ b/deps/v8/test/mjsunit/compiler/osr-regex-id.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --use-osr --turbo-osr
+// Flags: --allow-natives-syntax --use-osr
function id(f) { return f; }
diff --git a/deps/v8/test/mjsunit/compiler/osr-sar.js b/deps/v8/test/mjsunit/compiler/osr-sar.js
index cc04adca8a..02684f088c 100644
--- a/deps/v8/test/mjsunit/compiler/osr-sar.js
+++ b/deps/v8/test/mjsunit/compiler/osr-sar.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --use-osr --turbo-osr
+// Flags: --allow-natives-syntax --use-osr
function test() {
// Loop to force OSR.
diff --git a/deps/v8/test/mjsunit/compiler/osr-warm.js b/deps/v8/test/mjsunit/compiler/osr-warm.js
index 7c30c07f20..73e1fd5cd2 100644
--- a/deps/v8/test/mjsunit/compiler/osr-warm.js
+++ b/deps/v8/test/mjsunit/compiler/osr-warm.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --use-osr --turbo-osr
+// Flags: --use-osr
function f1(x) {
while (x > 0) {
diff --git a/deps/v8/test/mjsunit/compiler/osr-while-let.js b/deps/v8/test/mjsunit/compiler/osr-while-let.js
index c19cf6cb24..11ebc4bb35 100644
--- a/deps/v8/test/mjsunit/compiler/osr-while-let.js
+++ b/deps/v8/test/mjsunit/compiler/osr-while-let.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --use-osr --turbo-osr
+// Flags: --allow-natives-syntax --use-osr
"use strict";
diff --git a/deps/v8/test/mjsunit/compiler/regress-5074.js b/deps/v8/test/mjsunit/compiler/regress-5074.js
new file mode 100644
index 0000000000..903b54ad98
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-5074.js
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var s = [,0.1];
+
+function foo(a, b) {
+ var x = s[a];
+ s[1] = 0.1;
+ return x + b;
+}
+
+assertEquals(2.1, foo(1, 2));
+assertEquals(2.1, foo(1, 2));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals("undefined2", foo(0, "2"));
diff --git a/deps/v8/test/mjsunit/compiler/regress-5100.js b/deps/v8/test/mjsunit/compiler/regress-5100.js
new file mode 100644
index 0000000000..694cd8a75b
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-5100.js
@@ -0,0 +1,51 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var a = [0, 1];
+a["true"] = "true";
+a["false"] = "false";
+a["null"] = "null";
+a["undefined"] = "undefined";
+
+// Ensure we don't accidentially truncate true when used to index arrays.
+(function() {
+ function f(x) { return a[x]; }
+
+ assertEquals(0, f(0));
+ assertEquals(0, f(0));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals("true", f(true));
+})();
+
+// Ensure we don't accidentially truncate false when used to index arrays.
+(function() {
+ function f( x) { return a[x]; }
+
+ assertEquals(0, f(0));
+ assertEquals(0, f(0));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals("false", f(false));
+})();
+
+// Ensure we don't accidentially truncate null when used to index arrays.
+(function() {
+ function f( x) { return a[x]; }
+
+ assertEquals(0, f(0));
+ assertEquals(0, f(0));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals("null", f(null));
+})();
+
+// Ensure we don't accidentially truncate undefined when used to index arrays.
+(function() {
+ function f( x) { return a[x]; }
+
+ assertEquals(0, f(0));
+ assertEquals(0, f(0));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals("undefined", f(undefined));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/regress-5129.js b/deps/v8/test/mjsunit/compiler/regress-5129.js
new file mode 100644
index 0000000000..1d100ab34c
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-5129.js
@@ -0,0 +1,15 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo($a,$b) {
+ $a = $a|0;
+ $b = $b|0;
+ var $sub = $a - $b;
+ return ($sub|0) < 0;
+}
+
+%OptimizeFunctionOnNextCall(foo);
+assertTrue(foo(0x7fffffff,-1));
diff --git a/deps/v8/test/mjsunit/compiler/regress-5158.js b/deps/v8/test/mjsunit/compiler/regress-5158.js
new file mode 100644
index 0000000000..ead5f4ed9d
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-5158.js
@@ -0,0 +1,16 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(x) {
+ x = +x;
+ return (x > 0) ? x : 0 - x;
+}
+
+foo(1);
+foo(-1);
+foo(0);
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(2147483648, foo(-2147483648));
diff --git a/deps/v8/test/mjsunit/compiler/regress-5278.js b/deps/v8/test/mjsunit/compiler/regress-5278.js
new file mode 100644
index 0000000000..25b1fb03d5
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-5278.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(a, b) {
+ return a % b;
+}
+foo(2, 1);
+foo(2, 1);
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(-0, foo(-2, 1));
diff --git a/deps/v8/test/mjsunit/compiler/regress-607493.js b/deps/v8/test/mjsunit/compiler/regress-607493.js
new file mode 100644
index 0000000000..540b47e2d2
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-607493.js
@@ -0,0 +1,37 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function ForInTryCatchContrinueOsr() {
+ var a = [1];
+
+ function g() {
+ for (var x in a) {
+ try {
+ for (var i = 0; i < 10; i++) { %OptimizeOsr(); }
+ return;
+ } catch(e) {
+ continue;
+ }
+ }
+ }
+
+ g();
+})();
+
+(function ForInContinueNestedOsr() {
+ var a = [1];
+
+ function g() {
+ for (var x in a) {
+ if (x) {
+ for (var i = 0; i < 10; i++) { %OptimizeOsr(); }
+ }
+ continue;
+ }
+ }
+
+ g();
+})();
diff --git a/deps/v8/test/mjsunit/compiler/regress-621423.js b/deps/v8/test/mjsunit/compiler/regress-621423.js
new file mode 100644
index 0000000000..962176ffbf
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-621423.js
@@ -0,0 +1,21 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var a = [0, ""];
+a[0] = 0;
+
+function g(array) {
+ array[1] = undefined;
+}
+
+function f() {
+ g(function() {});
+ g(a);
+}
+
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/compiler/regress-625558.js b/deps/v8/test/mjsunit/compiler/regress-625558.js
new file mode 100644
index 0000000000..5d6b372632
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-625558.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+for (var global = 0; global <= 256; global++) { }
+
+function f() {
+ global = "luft";
+ global += ++global;
+}
+
+f();
diff --git a/deps/v8/test/mjsunit/compiler/regress-628403.js b/deps/v8/test/mjsunit/compiler/regress-628403.js
new file mode 100644
index 0000000000..4096ac32ae
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-628403.js
@@ -0,0 +1,27 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var dothrow = false;
+
+function g() {
+ if (dothrow) throw 1;
+}
+
+function f(a) {
+ try {
+ g();
+ } catch(e) {
+ if (typeof e !== 'number' && e !== 1) throw e;
+ return a[0];
+ }
+}
+
+%NeverOptimizeFunction(g);
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+dothrow = true;
+assertEquals(42, f([42]));
diff --git a/deps/v8/test/mjsunit/compiler/regress-628516.js b/deps/v8/test/mjsunit/compiler/regress-628516.js
new file mode 100644
index 0000000000..8cb43b4bea
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-628516.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f() {
+ var i = 0;
+ while (1) {
+ if ({}) i = expected[0] == x[0];
+ i++;
+ }
+}
+
+assertThrows(f);
diff --git a/deps/v8/test/mjsunit/compiler/regress-628773.js b/deps/v8/test/mjsunit/compiler/regress-628773.js
new file mode 100644
index 0000000000..3c315b3828
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-628773.js
@@ -0,0 +1,21 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-tailcalls
+
+"use strict";
+
+function foo() {
+ for (var i = 0; i < 10000; i++) {
+ try {
+ for (var j = 0; j < 2; j++) {
+ }
+ throw 1;
+ } catch(e) {
+ if (typeof a == "number") return a && isNaN(b);
+ }
+ }
+}
+
+foo();
diff --git a/deps/v8/test/mjsunit/compiler/regress-630611.js b/deps/v8/test/mjsunit/compiler/regress-630611.js
new file mode 100644
index 0000000000..be75777ba7
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-630611.js
@@ -0,0 +1,16 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var global = 1;
+global = 2;
+
+function f() {
+ var o = { a : 1 };
+ global = "a";
+ for (var i = global; i < 2; i++) {
+ delete o[i];
+ }
+}
+
+f();
diff --git a/deps/v8/test/mjsunit/compiler/regress-633497.js b/deps/v8/test/mjsunit/compiler/regress-633497.js
new file mode 100644
index 0000000000..8bf358af00
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-633497.js
@@ -0,0 +1,29 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(a) {
+ var x;
+ a = a|0;
+ var dummy;
+ if (a === 1) {
+ x = 277.5;
+ } else if (a === 2) {
+ x = 0;
+ } else {
+ dummy = 527.5;
+ dummy = 958.5;
+ dummy = 1143.5;
+ dummy = 1368.5;
+ dummy = 1558.5;
+ x = 277.5;
+ }
+ return +x;
+}
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+assertEquals(277.5, f());
diff --git a/deps/v8/test/mjsunit/compiler/regress-loop-variable-if.js b/deps/v8/test/mjsunit/compiler/regress-loop-variable-if.js
new file mode 100644
index 0000000000..ec284e9222
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-loop-variable-if.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --turbo-loop-variable
+
+function f() {
+ for (var i = 0; i != 10; i++) {
+ if (i < 8) print("x");
+ }
+}
+
+f();
diff --git a/deps/v8/test/mjsunit/compiler/regress-loop-variable-unsigned.js b/deps/v8/test/mjsunit/compiler/regress-loop-variable-unsigned.js
new file mode 100644
index 0000000000..751136eb13
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-loop-variable-unsigned.js
@@ -0,0 +1,23 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --turbo-loop-variable
+
+(function() {
+ function f() {
+ for (var i = 0; i < 4294967295; i += 2) {
+ if (i === 10) break;
+ }
+ }
+ f();
+})();
+
+(function() {
+ function f() {
+ for (var i = 0; i < 4294967293; i += 2) {
+ if (i === 10) break;
+ }
+ }
+ f();
+})();
diff --git a/deps/v8/test/mjsunit/compiler/regress-number-is-hole-nan.js b/deps/v8/test/mjsunit/compiler/regress-number-is-hole-nan.js
new file mode 100644
index 0000000000..368c837163
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-number-is-hole-nan.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var a = [, 2.121736758e-314];
+
+function foo() { return a[1]; }
+
+assertEquals(2.121736758e-314, foo());
+assertEquals(2.121736758e-314, foo());
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(2.121736758e-314, foo());
diff --git a/deps/v8/test/mjsunit/compiler/regress-store-holey-double-array.js b/deps/v8/test/mjsunit/compiler/regress-store-holey-double-array.js
new file mode 100644
index 0000000000..81231984e0
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-store-holey-double-array.js
@@ -0,0 +1,43 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function StoreHoleBitPattern() {
+ function g(src, dst, i) {
+ dst[i] = src[i];
+ }
+
+ var b = new ArrayBuffer(16);
+ var i32 = new Int32Array(b);
+ i32[0] = 0xFFF7FFFF;
+ i32[1] = 0xFFF7FFFF;
+ i32[3] = 0xFFF7FFFF;
+ i32[4] = 0xFFF7FFFF;
+ var f64 = new Float64Array(b);
+
+ var a = [,0.1];
+
+ g(f64, a, 1);
+ g(f64, a, 1);
+ %OptimizeFunctionOnNextCall(g);
+ g(f64, a, 0);
+
+ assertTrue(Number.isNaN(a[0]));
+})();
+
+
+(function ConvertHoleToNumberAndStore() {
+ function g(a, i) {
+ var x = a[i];
+ a[i] = +x;
+ }
+
+ var a=[,0.1];
+ g(a, 1);
+ g(a, 1);
+ %OptimizeFunctionOnNextCall(g);
+ g(a, 0);
+ assertTrue(Number.isNaN(a[0]));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/regress-string-to-number-add.js b/deps/v8/test/mjsunit/compiler/regress-string-to-number-add.js
new file mode 100644
index 0000000000..e872401c0b
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-string-to-number-add.js
@@ -0,0 +1,15 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo-type-feedback
+
+function f(x) {
+ var s = x ? "0" : "1";
+ return 1 + Number(s);
+}
+
+f(0);
+f(0);
+%OptimizeFunctionOnNextCall(f);
+assertEquals(2, f(0));
diff --git a/deps/v8/test/mjsunit/compiler/regress-truncate-number-or-undefined-to-float64.js b/deps/v8/test/mjsunit/compiler/regress-truncate-number-or-undefined-to-float64.js
new file mode 100644
index 0000000000..1dc3042ea7
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-truncate-number-or-undefined-to-float64.js
@@ -0,0 +1,19 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function g(a, b) {
+ a = +a;
+ if (b) {
+ a = undefined;
+ }
+ print(a);
+ return +a;
+}
+
+g(0);
+g(0);
+%OptimizeFunctionOnNextCall(g);
+assertTrue(Number.isNaN(g(0, true)));
diff --git a/deps/v8/test/mjsunit/compiler/regress-valueof.js b/deps/v8/test/mjsunit/compiler/regress-valueof.js
deleted file mode 100644
index 7b29b46a66..0000000000
--- a/deps/v8/test/mjsunit/compiler/regress-valueof.js
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax
-
-// Test valueof with integer input.
-function f(x) { var y = x + 1; return %_ValueOf(y); }
-
-for (var i=0; i<100000; i++) f(42);
-
-assertEquals(43, f(42));
diff --git a/deps/v8/test/mjsunit/compiler/try-osr.js b/deps/v8/test/mjsunit/compiler/try-osr.js
index e4eb8dd9fa..c0ef27add3 100644
--- a/deps/v8/test/mjsunit/compiler/try-osr.js
+++ b/deps/v8/test/mjsunit/compiler/try-osr.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --turbo-osr
+// Flags: --allow-natives-syntax
function OSRInsideTry(x) {
try {
diff --git a/deps/v8/test/mjsunit/compiler/turbo-number-feedback.js b/deps/v8/test/mjsunit/compiler/turbo-number-feedback.js
new file mode 100644
index 0000000000..8dcc42c8a1
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/turbo-number-feedback.js
@@ -0,0 +1,102 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo-type-feedback
+
+(function AddSubtractSmis() {
+ function f0(a, b, c) {
+ return a + b - c;
+ }
+
+ assertEquals(4, f0(3, 2, 1));
+ assertEquals(4, f0(3, 2, 1));
+ %OptimizeFunctionOnNextCall(f0);
+ assertEquals(4, f0(3, 2, 1));
+})();
+
+(function AddSubtractDoubles() {
+ function f1(a, b, c) {
+ return a + b - c;
+ }
+
+ assertEquals(4.5, f1(3.5, 2.5, 1.5));
+ assertEquals(4.5, f1(3.5, 2.5, 1.5));
+ %OptimizeFunctionOnNextCall(f1);
+ assertEquals(4.5, f1(3.5, 2.5, 1.5));
+ assertEquals(4, f1(3, 2, 1));
+ assertTrue(isNaN(f1(3, 2, undefined)));
+ assertTrue(isNaN(f1(3, undefined, 1)));
+})();
+
+(function CheckUint32ToInt32Conv() {
+ function f2(a) {
+ return (a >>> 0) + 1;
+ }
+
+ assertEquals(1, f2(0));
+ assertEquals(1, f2(0));
+ %OptimizeFunctionOnNextCall(f2);
+ assertEquals(1, f2(0));
+ assertEquals(4294967295, f2(-2));
+})();
+
+(function CheckFloat64ToInt32Conv() {
+ function f3(a, b) {
+ var x = 0;
+ if (a) {
+ x = 0.5;
+ }
+ return x + b;
+ }
+
+ assertEquals(1, f3(0, 1));
+ assertEquals(1, f3(0, 1));
+ %OptimizeFunctionOnNextCall(f3);
+ assertEquals(1, f3(0, 1));
+ assertEquals(1.5, f3(1, 1));
+})();
+
+(function ShiftLeftSmis() {
+ function f4(a, b) {
+ return a << b;
+ }
+
+ assertEquals(24, f4(3, 3));
+ assertEquals(40, f4(5, 3));
+ %OptimizeFunctionOnNextCall(f4);
+ assertEquals(64, f4(4, 4));
+})();
+
+(function ShiftLeftNumbers() {
+ function f5(a, b) {
+ return a << b;
+ }
+
+ assertEquals(24, f5(3.3, 3.4));
+ assertEquals(40, f5(5.1, 3.9));
+ %OptimizeFunctionOnNextCall(f5);
+ assertEquals(64, f5(4.9, 4.1));
+})();
+
+(function ShiftRightNumbers() {
+ function f6(a, b) {
+ return a >> b;
+ }
+
+ assertEquals(1, f6(8.3, 3.4));
+ assertEquals(-2, f6(-16.1, 3.9));
+ %OptimizeFunctionOnNextCall(f6);
+ assertEquals(0, f6(16.2, 5.1));
+})();
+
+(function ShiftRightLogicalNumbers() {
+ function f7(a, b) {
+ return a >>> b;
+ }
+
+ assertEquals(1, f7(8.3, 3.4));
+ assertEquals(536870910, f7(-16.1, 3.9));
+ %OptimizeFunctionOnNextCall(f7);
+ assertEquals(0, f7(16.2, 5.1));
+})();
diff --git a/deps/v8/test/mjsunit/cross-realm-filtering.js b/deps/v8/test/mjsunit/cross-realm-filtering.js
index 47c0d19229..8ac5b8a646 100644
--- a/deps/v8/test/mjsunit/cross-realm-filtering.js
+++ b/deps/v8/test/mjsunit/cross-realm-filtering.js
@@ -33,18 +33,18 @@ function assertNotIn(thrower, error) {
}
Realm.eval(realms[1], script);
-assertSame(3, Realm.shared.error_0.length);
-assertSame(4, Realm.shared.error_1.length);
+assertSame(2, Realm.shared.error_0.length);
+assertSame(3, Realm.shared.error_1.length);
-assertTrue(Realm.shared.thrower_1 === Realm.shared.error_1[2].getFunction());
+assertTrue(Realm.shared.thrower_1 === Realm.shared.error_1[1].getFunction());
assertNotIn(Realm.shared.thrower_0, Realm.shared.error_0);
assertNotIn(Realm.shared.thrower_0, Realm.shared.error_1);
Realm.eval(realms[0], script);
-assertSame(5, Realm.shared.error_0.length);
-assertSame(4, Realm.shared.error_1.length);
+assertSame(4, Realm.shared.error_0.length);
+assertSame(3, Realm.shared.error_1.length);
-assertTrue(Realm.shared.thrower_0 === Realm.shared.error_0[2].getFunction());
+assertTrue(Realm.shared.thrower_0 === Realm.shared.error_0[1].getFunction());
assertNotIn(Realm.shared.thrower_1, Realm.shared.error_0);
assertNotIn(Realm.shared.thrower_1, Realm.shared.error_1);
@@ -88,3 +88,77 @@ o = Realm.eval(realmIndex, "new f()");
proto = Object.getPrototypeOf(o);
assertFalse(proto === Object.prototype);
assertTrue(proto === otherObject.prototype);
+
+// Check function constructor.
+var ctor_script = "Function";
+var ctor_a_script =
+ "(function() { return Function.apply(this, ['return 1;']); })";
+var ctor_b_script = "Function.bind(this, 'return 1;')";
+var ctor_c_script =
+ "(function() { return Function.call(this, 'return 1;'); })";
+Realm.shared = {
+ ctor_0 : Realm.eval(realms[0], ctor_script),
+ ctor_1 : Realm.eval(realms[1], ctor_script),
+ ctor_a_0 : Realm.eval(realms[0], ctor_a_script),
+ ctor_a_1 : Realm.eval(realms[1], ctor_a_script),
+ ctor_b_0 : Realm.eval(realms[0], ctor_b_script),
+ ctor_b_1 : Realm.eval(realms[1], ctor_b_script),
+ ctor_c_0 : Realm.eval(realms[0], ctor_c_script),
+ ctor_c_1 : Realm.eval(realms[1], ctor_c_script),
+}
+var script_0 = " \
+ var ctor_0 = Realm.shared.ctor_0; \
+ Realm.shared.direct_0 = ctor_0('return 1'); \
+ Realm.shared.indirect_0 = (function() { return ctor_0('return 1;'); })(); \
+ Realm.shared.apply_0 = ctor_0.apply(this, ['return 1']); \
+ Realm.shared.bind_0 = ctor_0.bind(this, 'return 1')(); \
+ Realm.shared.call_0 = ctor_0.call(this, 'return 1'); \
+ Realm.shared.proxy_0 = new Proxy(ctor_0, {})('return 1'); \
+ Realm.shared.reflect_0 = Reflect.apply(ctor_0, this, ['return 1']); \
+ Realm.shared.a_0 = Realm.shared.ctor_a_0(); \
+ Realm.shared.b_0 = Realm.shared.ctor_b_0(); \
+ Realm.shared.c_0 = Realm.shared.ctor_c_0(); \
+";
+script = script_0 + script_0.replace(/_0/g, "_1");
+Realm.eval(realms[0], script);
+assertSame(1, Realm.shared.direct_0());
+assertSame(1, Realm.shared.indirect_0());
+assertSame(1, Realm.shared.apply_0());
+assertSame(1, Realm.shared.bind_0());
+assertSame(1, Realm.shared.call_0());
+assertSame(1, Realm.shared.proxy_0());
+assertSame(1, Realm.shared.reflect_0());
+assertSame(1, Realm.shared.a_0());
+assertSame(1, Realm.shared.b_0());
+assertSame(1, Realm.shared.c_0());
+assertSame(undefined, Realm.shared.direct_1);
+assertSame(undefined, Realm.shared.indirect_1);
+assertSame(undefined, Realm.shared.apply_1);
+assertSame(undefined, Realm.shared.bind_1);
+assertSame(undefined, Realm.shared.call_1);
+assertSame(undefined, Realm.shared.proxy_1);
+assertSame(undefined, Realm.shared.reflect_1);
+assertSame(undefined, Realm.shared.a_1);
+assertSame(undefined, Realm.shared.b_1);
+assertSame(undefined, Realm.shared.c_1);
+Realm.eval(realms[1], script);
+assertSame(undefined, Realm.shared.direct_0);
+assertSame(undefined, Realm.shared.indirect_0);
+assertSame(undefined, Realm.shared.apply_0);
+assertSame(undefined, Realm.shared.bind_0);
+assertSame(undefined, Realm.shared.call_0);
+assertSame(undefined, Realm.shared.proxy_0);
+assertSame(undefined, Realm.shared.reflect_0);
+assertSame(undefined, Realm.shared.a_0);
+assertSame(undefined, Realm.shared.b_0);
+assertSame(undefined, Realm.shared.c_0);
+assertSame(1, Realm.shared.direct_1());
+assertSame(1, Realm.shared.indirect_1());
+assertSame(1, Realm.shared.apply_1());
+assertSame(1, Realm.shared.bind_1());
+assertSame(1, Realm.shared.call_1());
+assertSame(1, Realm.shared.proxy_1());
+assertSame(1, Realm.shared.reflect_1());
+assertSame(1, Realm.shared.a_1());
+assertSame(1, Realm.shared.b_1());
+assertSame(1, Realm.shared.c_1());
diff --git a/deps/v8/test/mjsunit/debug-allscopes-on-debugger.js b/deps/v8/test/mjsunit/debug-allscopes-on-debugger.js
index b7a8dff1ba..17668cfc24 100644
--- a/deps/v8/test/mjsunit/debug-allscopes-on-debugger.js
+++ b/deps/v8/test/mjsunit/debug-allscopes-on-debugger.js
@@ -49,10 +49,11 @@ var sum = 0;
var i = 0; // Break 1.
i++; // Break 2.
i++; // Break 3.
- return i; // Break 4.
-}()); // Break 5.
+ debugger; // Break 4.
+ return i; // Break 5.
+}()); // Break 6.
-assertNull(exception); // Break 6.
+assertNull(exception); // Break 7.
assertEquals(expected_breaks, break_count);
Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/debug-eval-scope.js b/deps/v8/test/mjsunit/debug-eval-scope.js
new file mode 100644
index 0000000000..2b97bf65f2
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-eval-scope.js
@@ -0,0 +1,144 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --no-always-opt
+
+// Test that the (strict) eval scope is visible to the debugger.
+
+var Debug = debug.Debug;
+var exception = null;
+var delegate = null;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ delegate(exec_state);
+ } catch (e) {
+ exception = e;
+ }
+}
+
+Debug.setListener(listener);
+
+// Current function is the top-level eval.
+// We can access stack- and context-allocated values in the eval-scope.
+delegate = function(exec_state) {
+ assertEquals([ debug.ScopeType.Eval,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global ],
+ exec_state.frame(0).allScopes().map(s => s.scopeType()));
+ var scope = exec_state.frame(0).scope(0);
+ assertEquals(1, scope.scopeObject().property("a").value().value());
+ assertEquals(1, exec_state.frame(0).evaluate("a").value());
+ scope.setVariableValue("a", 2);
+ assertEquals(2, exec_state.frame(0).evaluate("a++").value());
+}
+
+eval("'use strict'; \n" +
+ "var a = 1; \n" +
+ "debugger; \n" +
+ "assertEquals(3, a);\n");
+
+eval("'use strict'; \n" +
+ "var a = 1; \n" +
+ "(x=>a); \n" + // Force context-allocation.
+ "debugger; \n" +
+ "assertEquals(3, a);\n");
+
+// Current function is an inner function.
+// We cannot access stack-allocated values in the eval-scope.
+delegate = function(exec_state) {
+ assertEquals([ debug.ScopeType.Local,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global ],
+ exec_state.frame(0).allScopes().map(s => s.scopeType()));
+ assertEquals([ debug.ScopeType.Eval,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global ],
+ exec_state.frame(1).allScopes().map(s => s.scopeType()));
+ var scope = exec_state.frame(0).scope(0);
+ assertThrows(() => exec_state.frame(0).evaluate("a"), ReferenceError);
+ assertTrue(scope.scopeObject().property("a").isUndefined());
+}
+
+eval("'use strict'; \n" +
+ "var a = 1; \n" +
+ "(() => {debugger})()\n");
+
+// Current function is an escaped inner function.
+delegate = function(exec_state) {
+ assertEquals([ debug.ScopeType.Local,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global ],
+ exec_state.frame(0).allScopes().map(s => s.scopeType()));
+ assertEquals([ debug.ScopeType.Script,
+ debug.ScopeType.Global ],
+ exec_state.frame(1).allScopes().map(s => s.scopeType()));
+ var scope = exec_state.frame(0).scope(0);
+ assertThrows(() => exec_state.frame(0).evaluate("a"), ReferenceError);
+ assertTrue(scope.scopeObject().property("a").isUndefined());
+}
+
+var f = eval("'use strict'; \n" +
+ "var a = 1; \n" +
+ "() => {debugger}\n");
+f();
+
+// Current function is an inner function.
+// We can access context-allocated values in the eval-scope.
+delegate = function(exec_state) {
+ assertEquals([ debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global ],
+ exec_state.frame(0).allScopes().map(s => s.scopeType()));
+ assertEquals([ debug.ScopeType.Eval,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global ],
+ exec_state.frame(1).allScopes().map(s => s.scopeType()));
+ var scope = exec_state.frame(1).scope(0);
+ assertEquals(1, scope.scopeObject().property("a").value().value());
+ assertEquals(1, exec_state.frame(1).evaluate("a").value());
+ assertEquals(1, exec_state.frame(0).evaluate("a").value());
+ scope.setVariableValue("a", 2);
+ assertEquals(2, exec_state.frame(0).evaluate("a++").value());
+ assertEquals(3, exec_state.frame(1).evaluate("a++").value());
+}
+
+eval("'use strict'; \n" +
+ "var a = 1; \n" +
+ "(() => { a; \n" + // Force context-allocation.
+ " debugger; \n" +
+ " assertEquals(4, a);\n" +
+ " })(); \n"
+ );
+
+// Current function is an escaped inner function.
+// We can access context-allocated values in the eval-scope.
+delegate = function(exec_state) {
+ assertEquals([ debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global ],
+ exec_state.frame(0).allScopes().map(s => s.scopeType()));
+ assertEquals([ debug.ScopeType.Script,
+ debug.ScopeType.Global ],
+ exec_state.frame(1).allScopes().map(s => s.scopeType()));
+ var scope = exec_state.frame(0).scope(1);
+ assertEquals(1, scope.scopeObject().property("a").value().value());
+ assertEquals(1, exec_state.frame(0).evaluate("a").value());
+ scope.setVariableValue("a", 2);
+ assertEquals(2, exec_state.frame(0).evaluate("a++").value());
+}
+
+var g = eval("'use strict'; \n" +
+ "var a = 1; \n" +
+ "() => { a; \n" +
+ " debugger; \n" +
+ " assertEquals(3, a);\n" +
+ " } \n");
+g();
+
+Debug.setListener(null);
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/debug-evaluate-nested.js b/deps/v8/test/mjsunit/debug-evaluate-nested.js
index da11b9001c..965b5a7aa5 100644
--- a/deps/v8/test/mjsunit/debug-evaluate-nested.js
+++ b/deps/v8/test/mjsunit/debug-evaluate-nested.js
@@ -34,7 +34,7 @@ function listener(event, exec_state, event_data, data) {
exec_state.frame(0).evaluate("debugger;");
} else {
checkScopes(exec_state.frame(0).allScopes(),
- [ ScopeType.With, ScopeType.Closure,
+ [ ScopeType.Eval, ScopeType.With, ScopeType.Closure,
ScopeType.Script, ScopeType.Global ]);
}
} catch (e) {
diff --git a/deps/v8/test/mjsunit/debug-evaluate-shadowed-context-2.js b/deps/v8/test/mjsunit/debug-evaluate-shadowed-context-2.js
new file mode 100644
index 0000000000..59352e06a5
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-evaluate-shadowed-context-2.js
@@ -0,0 +1,41 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --no-analyze-environment-liveness
+
+// Test that debug-evaluate correctly collects free outer variables
+// and does not get confused by variables in nested scopes.
+
+Debug = debug.Debug
+
+var exception = null;
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ assertThrows(() => exec_state.frame(0).evaluate("x").value());
+ } catch (e) {
+ exception = e;
+ print(e + e.stack);
+ }
+}
+
+Debug.setListener(listener);
+
+(function() {
+ var x = 1; // context allocate x
+ (() => x);
+ (function() {
+ var x = 2; // stack allocate shadowing x
+ (function() {
+ { // context allocate x in a nested scope
+ let x = 3;
+ (() => x);
+ }
+ debugger;
+ })();
+ })();
+})();
+
+Debug.setListener(null);
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/debug-exceptions.js b/deps/v8/test/mjsunit/debug-exceptions.js
new file mode 100644
index 0000000000..1a0e222d51
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-exceptions.js
@@ -0,0 +1,88 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+
+Debug = debug.Debug
+
+let error = false;
+let uncaught;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Exception) return;
+ try {
+ uncaught = event_data.uncaught();
+ } catch (e) {
+ error = true;
+ }
+}
+
+Debug.setBreakOnException();
+Debug.setListener(listener);
+
+
+function assertCaught(f) {
+ try {f()} finally {
+ assertFalse(uncaught);
+ return;
+ }
+}
+
+function assertUncaught(f) {
+ try {f()} finally {
+ assertTrue(uncaught);
+ return;
+ }
+}
+
+
+assertUncaught(() => {
+ for (var a of [1, 2, 3]) {
+ throw a
+ }
+});
+
+assertUncaught(() => {
+ for (var a of [1, 2, 3]) {
+ try {throw a} finally {}
+ }
+});
+
+assertCaught(() => {
+ for (var a of [1, 2, 3]) {
+ try {
+ try {throw a} finally {}
+ } catch(_) {}
+ }
+});
+
+assertCaught(() => {
+ try {
+ for (var a of [1, 2, 3]) {
+ try {throw a} finally {}
+ }
+ } catch(_) {}
+});
+
+
+// Check that an internal exception in our yield* desugaring is not observable.
+{
+ uncaught = null;
+
+ let iter = {
+ next() {return {value:42, done:false}},
+ throw() {return {done:true}}
+ };
+ let iterable = {[Symbol.iterator]() {return iter}};
+ function* f() { yield* iterable }
+
+ let g = f();
+ g.next();
+ assertEquals({value: undefined, done: true}, g.throw());
+ assertNull(uncaught); // No exception event was generated.
+}
+
+
+assertFalse(error);
diff --git a/deps/v8/test/mjsunit/debug-function-scopes.js b/deps/v8/test/mjsunit/debug-function-scopes.js
index f63d7b26c8..ae95f9b97d 100644
--- a/deps/v8/test/mjsunit/debug-function-scopes.js
+++ b/deps/v8/test/mjsunit/debug-function-scopes.js
@@ -42,14 +42,7 @@ function CheckScope(scope_mirror, scope_expectations, expected_scope_type) {
}
}
-// A copy of the scope types from debug/mirrors.js.
-var ScopeType = { Global: 0,
- Local: 1,
- With: 2,
- Closure: 3,
- Catch: 4,
- Block: 5,
- Script: 6};
+var ScopeType = debug.ScopeType;
var f1 = (function F1(x) {
function F2(y) {
@@ -162,6 +155,3 @@ function CheckNoScopeVisible(f) {
CheckNoScopeVisible(Number);
CheckNoScopeVisible(Function.toString);
-
-// This getter is known to be implemented as closure.
-CheckNoScopeVisible(new Error().__lookupGetter__("stack"));
diff --git a/deps/v8/test/mjsunit/debug-generator-break-on-stack.js b/deps/v8/test/mjsunit/debug-generator-break-on-stack.js
new file mode 100644
index 0000000000..5a1a9c56c1
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-generator-break-on-stack.js
@@ -0,0 +1,46 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+var Debug = debug.Debug;
+
+var break_count = 0;
+var exception = null;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ break_count++;
+ var line = exec_state.frame(0).sourceLineText();
+ print(line);
+ assertTrue(line.indexOf(`B${break_count}`) > 0);
+ } catch (e) {
+ exception = e;
+ }
+}
+
+
+function* g() {
+ setbreaks();
+ yield 1; // B1
+}
+
+function* f() {
+ yield* g();
+ return 2; // B2
+}
+
+function setbreaks() {
+ Debug.setListener(listener);
+ Debug.setBreakPoint(g, 2);
+ Debug.setBreakPoint(f, 2);
+}
+
+for (let _ of f()) { }
+
+assertEquals(2, break_count);
+assertNull(exception);
+
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/debug-generator-break.js b/deps/v8/test/mjsunit/debug-generator-break.js
new file mode 100644
index 0000000000..34ed82c376
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-generator-break.js
@@ -0,0 +1,44 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+var Debug = debug.Debug;
+
+var break_count = 0;
+var exception = null;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ break_count++;
+ var line = exec_state.frame(0).sourceLineText();
+ assertTrue(line.indexOf(`B${break_count}`) > 0);
+ } catch (e) {
+ exception = e;
+ }
+}
+
+Debug.setListener(listener);
+
+function* g() {
+ yield 1;
+}
+
+function* f() {
+ yield* g(); // B1
+ assertEquals(2, break_count); // B2
+ return 1; // B3
+}
+
+Debug.setBreakPoint(f, 1);
+Debug.setBreakPoint(f, 2);
+Debug.setBreakPoint(f, 3);
+
+for (let _ of f()) { }
+
+assertEquals(3, break_count);
+assertNull(exception);
+
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/debug-handle.js b/deps/v8/test/mjsunit/debug-handle.js
index 1582b9f121..ca02592542 100644
--- a/deps/v8/test/mjsunit/debug-handle.js
+++ b/deps/v8/test/mjsunit/debug-handle.js
@@ -108,7 +108,7 @@ function listener(event, exec_state, event_data, data) {
var handle_a = evaluateRequest(exec_state, '{"expression":"b","frame":1}');
assertEquals(handle_o, handle_a);
assertEquals(handle_a, handle_b);
- assertFalse(handle_o == handle_p, "o and p have he same handle");
+ assertFalse(handle_o == handle_p, "o and p have the same handle");
var response;
var count;
@@ -140,7 +140,7 @@ function listener(event, exec_state, event_data, data) {
var handle_g = evaluateRequest(exec_state, '{"expression":"g"}');
var handle_caller = evaluateRequest(exec_state, '{"expression":"f.caller"}');
- assertFalse(handle_f == handle_g, "f and g have he same handle");
+ assertFalse(handle_f == handle_g, "f and g have the same handle");
assertEquals(handle_g, handle_caller, "caller for f should be g");
response = lookupRequest(exec_state, '{"handles":[' + handle_f + ']}', true);
diff --git a/deps/v8/test/mjsunit/debug-liveedit-exceptions.js b/deps/v8/test/mjsunit/debug-liveedit-exceptions.js
new file mode 100644
index 0000000000..28ec01dbad
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-liveedit-exceptions.js
@@ -0,0 +1,67 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+Debug = debug.Debug
+
+function BestEditor() {
+ throw 'Emacs';
+}
+
+var exception = null;
+var results = [];
+var log = []
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Exception) return;
+ try {
+ var source_line = event_data.sourceLineText();
+ print(source_line);
+ log.push(source_line);
+ switch (results.length) {
+ case 0:
+ Replace(BestEditor, "Emacs", "Eclipse");
+ break;
+ case 1:
+ Replace(BestEditor, "Eclipse", "Vim");
+ break;
+ case 2:
+ break;
+ default:
+ assertUnreachable();
+ }
+ } catch (e) {
+ exception = e;
+ }
+};
+
+function Replace(fun, original, patch) {
+ var script = Debug.findScript(fun);
+ if (fun.toString().indexOf(original) < 0) return;
+ var patch_pos = script.source.indexOf(original);
+ var change_log = [];
+ Debug.LiveEdit.TestApi.ApplySingleChunkPatch(script, patch_pos, original.length, patch, change_log);
+}
+
+Debug.setListener(listener);
+Debug.setBreakOnException();
+
+for (var i = 0; i < 3; i++) {
+ try {
+ BestEditor();
+ } catch (e) {
+ results.push(e);
+ }
+}
+Debug.setListener(null);
+
+assertNull(exception);
+assertEquals(["Emacs", "Eclipse", "Vim"], results);
+print(JSON.stringify(log, 1));
+assertEquals([
+ " throw 'Emacs';",
+ " throw 'Eclipse';",
+ " throw 'Vim';",
+], log);
diff --git a/deps/v8/test/mjsunit/debug-liveedit-patch-positions.js b/deps/v8/test/mjsunit/debug-liveedit-patch-positions.js
deleted file mode 100644
index c669b5e862..0000000000
--- a/deps/v8/test/mjsunit/debug-liveedit-patch-positions.js
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
-
-// Scenario: some function is being edited; the outer function has to have its
-// positions patched. Accoring to a special markup of function text
-// corresponding byte-code PCs should coincide before change and after it.
-
-Debug = debug.Debug
-Debug.setListener(function() {});
-
-eval(
- "function F1() { return 5; }\n" +
- "function ChooseAnimal(/*$*/ ) {\n" +
- "/*$*/ var x = F1(/*$*/ );\n" +
- "/*$*/ var res/*$*/ =/*$*/ (function() { return 'Cat'; } )();\n" +
- "/*$*/ var y/*$*/ = F2(/*$*/ F1()/*$*/ , F1(/*$*/ )/*$*/ );\n" +
- "/*$*/ if (/*$*/ x.toString(/*$*/ )) { /*$*/ y = 3;/*$*/ } else {/*$*/ y = 8;/*$*/ }\n" +
- "/*$*/ var z = /*$*/ x * y;\n" +
- "/*$*/ return/*$*/ res/*$*/ + z;/*$*/ }\n" +
- "function F2(x, y) { return x + y; }"
-);
-
-// Find all *$* markers in text of the function and read corresponding statement
-// PCs.
-function ReadMarkerPositions(func) {
- var text = func.toString();
- var positions = new Array();
- var match;
- var pattern = /\/\*\$\*\//g;
- while ((match = pattern.exec(text)) != null) {
- positions.push(match.index);
- }
- return positions;
-}
-
-function ReadPCMap(func, positions) {
- var res = new Array();
- for (var i = 0; i < positions.length; i++) {
- var pc = Debug.LiveEdit.GetPcFromSourcePos(func, positions[i]);
-
- if (typeof pc === 'undefined') {
- // Function was marked for recompilation and it's code was replaced with a
- // stub. This can happen at any time especially if we are running with
- // --stress-opt. There is no way to get PCs now.
- return;
- }
-
- res.push(pc);
- }
-
- return res;
-}
-
-function ApplyPatch(orig_animal, new_animal) {
- var res = ChooseAnimal();
- assertEquals(orig_animal + "15", res);
-
- var script = Debug.findScript(ChooseAnimal);
-
- var orig_string = "'" + orig_animal + "'";
- var patch_string = "'" + new_animal + "'";
- var patch_pos = script.source.indexOf(orig_string);
-
- var change_log = new Array();
-
- Debug.LiveEdit.TestApi.ApplySingleChunkPatch(script,
- patch_pos,
- orig_string.length,
- patch_string,
- change_log);
-
- print("Change log: " + JSON.stringify(change_log) + "\n");
-
- var markerPositions = ReadMarkerPositions(ChooseAnimal);
- var pcArray = ReadPCMap(ChooseAnimal, markerPositions);
-
- var res = ChooseAnimal();
- assertEquals(new_animal + "15", res);
-
- return pcArray;
-}
-
-var pcArray1 = ApplyPatch('Cat', 'Dog');
-
-// When we patched function for the first time it was deoptimized.
-// Check that after the second patch maping between sources position and
-// pcs will not change.
-
-var pcArray2 = ApplyPatch('Dog', 'Capybara');
-
-print(pcArray1);
-print(pcArray2);
-
-// Function can be marked for recompilation at any point (especially if we are
-// running with --stress-opt). When we mark function for recompilation we
-// replace it's code with stub. So there is no reliable way to get PCs for
-// function.
-if (pcArray1 && pcArray2) {
- assertArrayEquals(pcArray1, pcArray2);
-}
-
-Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/debug-liveedit-stepin.js b/deps/v8/test/mjsunit/debug-liveedit-stepin.js
index 601a66f93d..c6070ce284 100644
--- a/deps/v8/test/mjsunit/debug-liveedit-stepin.js
+++ b/deps/v8/test/mjsunit/debug-liveedit-stepin.js
@@ -7,8 +7,7 @@
Debug = debug.Debug
function BestEditor() {
- var best_editor = "Emacs";
- return best_editor;
+ return 'Emacs';
}
var exception = null;
@@ -62,20 +61,15 @@ print(JSON.stringify(log, 1));
assertEquals([
"debugger;",
"results.push(BestEditor());",
- " var best_editor = \"Emacs\";",
- " return best_editor;","}",
+ " return 'Emacs';","}",
"results.push(BestEditor());",
"results.push(BestEditor());",
- " var best_editor = \"Emacs\";",
- " return best_editor;",
- " var best_editor = \"Eclipse\";",
- " return best_editor;","}",
+ " return 'Emacs';",
+ " return 'Eclipse';","}",
"results.push(BestEditor());",
"results.push(BestEditor());",
- " var best_editor = \"Eclipse\";",
- " return best_editor;",
- " var best_editor = \"Vim\";",
- " return best_editor;",
+ " return 'Eclipse';",
+ " return 'Vim';",
"}","results.push(BestEditor());",
"Debug.setListener(null);"
], log);
diff --git a/deps/v8/test/mjsunit/debug-scopes-suspended-generators.js b/deps/v8/test/mjsunit/debug-scopes-suspended-generators.js
new file mode 100644
index 0000000000..f4750b7f76
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-scopes-suspended-generators.js
@@ -0,0 +1,470 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax --ignition
+// The functions used for testing backtraces. They are at the top to make the
+// testing of source line/column easier.
+
+// Get the Debug object exposed from the debug context global object.
+var Debug = debug.Debug;
+
+var test_name;
+var exception;
+var begin_test_count = 0;
+var end_test_count = 0;
+
+// Initialize for a new test.
+function BeginTest(name) {
+ test_name = name;
+ exception = null;
+ begin_test_count++;
+}
+
+// Check result of a test.
+function EndTest() {
+ assertNull(exception, test_name + " / " + exception);
+ end_test_count++;
+}
+
+// Check that two scope are the same.
+function assertScopeMirrorEquals(scope1, scope2) {
+ assertEquals(scope1.scopeType(), scope2.scopeType());
+ assertEquals(scope1.frameIndex(), scope2.frameIndex());
+ assertEquals(scope1.scopeIndex(), scope2.scopeIndex());
+ assertPropertiesEqual(scope1.scopeObject().value(),
+ scope2.scopeObject().value());
+}
+
+// Check that the scope chain contains the expected types of scopes.
+function CheckScopeChain(scopes, gen_mirror) {
+ var all_scopes = gen_mirror.allScopes();
+ assertEquals(scopes.length, gen_mirror.scopeCount());
+ assertEquals(scopes.length, all_scopes.length,
+ "FrameMirror.allScopes length");
+ for (var i = 0; i < scopes.length; i++) {
+ var scope = gen_mirror.scope(i);
+ assertTrue(scope.isScope());
+ assertEquals(scopes[i], scope.scopeType(),
+ `Scope ${i} has unexpected type`);
+ assertScopeMirrorEquals(all_scopes[i], scope);
+
+ // Check the global object when hitting the global scope.
+ if (scopes[i] == debug.ScopeType.Global) {
+ // Objects don't have same class (one is "global", other is "Object",
+ // so just check the properties directly.
+ assertPropertiesEqual(this, scope.scopeObject().value());
+ }
+ }
+}
+
+// Check that the content of the scope is as expected. For functions just check
+// that there is a function.
+function CheckScopeContent(content, number, gen_mirror) {
+ var scope = gen_mirror.scope(number);
+ var count = 0;
+ for (var p in content) {
+ var property_mirror = scope.scopeObject().property(p);
+ assertFalse(property_mirror.isUndefined(),
+ 'property ' + p + ' not found in scope');
+ if (typeof(content[p]) === 'function') {
+ assertTrue(property_mirror.value().isFunction());
+ } else {
+ assertEquals(content[p], property_mirror.value().value(),
+ 'property ' + p + ' has unexpected value');
+ }
+ count++;
+ }
+
+ // 'arguments' and might be exposed in the local and closure scope. Just
+ // ignore this.
+ var scope_size = scope.scopeObject().properties().length;
+ if (!scope.scopeObject().property('arguments').isUndefined()) {
+ scope_size--;
+ }
+ // Ditto for 'this'.
+ if (!scope.scopeObject().property('this').isUndefined()) {
+ scope_size--;
+ }
+ // Temporary variables introduced by the parser have not been materialized.
+ assertTrue(scope.scopeObject().property('').isUndefined());
+
+ if (count != scope_size) {
+ print('Names found in scope:');
+ var names = scope.scopeObject().propertyNames();
+ for (var i = 0; i < names.length; i++) {
+ print(names[i]);
+ }
+ }
+ assertEquals(count, scope_size);
+}
+
+// Simple empty closure scope.
+
+function *gen1() {
+ yield 1;
+ return 2;
+}
+
+var g = gen1();
+var gm = debug.MakeMirror(g);
+CheckScopeChain([debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], gm);
+CheckScopeContent({}, 0, gm);
+
+// Closure scope with a parameter.
+
+function *gen2(a) {
+ yield a;
+ return 2;
+}
+
+g = gen2(42);
+gm = debug.MakeMirror(g);
+CheckScopeChain([debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], gm);
+CheckScopeContent({a: 42}, 0, gm);
+
+// Closure scope with a parameter.
+
+function *gen3(a) {
+ var b = 1
+ yield a;
+ return b;
+}
+
+g = gen3(0);
+gm = debug.MakeMirror(g);
+CheckScopeChain([debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], gm);
+CheckScopeContent({a: 0, b: undefined}, 0, gm);
+
+g.next(); // Create b.
+CheckScopeContent({a: 0, b: 1}, 0, gm);
+
+// Closure scope with a parameter.
+
+function *gen4(a, b) {
+ var x = 2;
+ yield a;
+ var y = 3;
+ return b;
+}
+
+g = gen4(0, 1);
+gm = debug.MakeMirror(g);
+CheckScopeChain([debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], gm);
+CheckScopeContent({a: 0, b: 1, x: undefined, y: undefined}, 0, gm);
+
+g.next(); // Create x.
+CheckScopeContent({a: 0, b: 1, x: 2, y: undefined}, 0, gm);
+
+g.next(); // Create y.
+CheckScopeContent({a: 0, b: 1, x: 2, y: 3}, 0, gm);
+
+// Closure introducing local variable using eval.
+
+function *gen5(a) {
+ eval('var b = 2');
+ return b;
+}
+
+g = gen5(1);
+g.next();
+gm = debug.MakeMirror(g);
+CheckScopeChain([debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], gm);
+CheckScopeContent({a: 1, b: 2}, 0, gm);
+
+// Single empty with block.
+
+function *gen6() {
+ with({}) {
+ yield 1;
+ }
+ yield 2;
+ return 3;
+}
+
+g = gen6();
+g.next();
+gm = debug.MakeMirror(g);
+CheckScopeChain([debug.ScopeType.With,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], gm);
+CheckScopeContent({}, 0, gm);
+
+g.next();
+CheckScopeChain([debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], gm);
+
+// Nested empty with blocks.
+
+function *gen7() {
+ with({}) {
+ with({}) {
+ yield 1;
+ }
+ yield 2;
+ }
+ return 3;
+}
+
+g = gen7();
+g.next();
+gm = debug.MakeMirror(g);
+CheckScopeChain([debug.ScopeType.With,
+ debug.ScopeType.With,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], gm);
+CheckScopeContent({}, 0, gm);
+
+// Nested with blocks using in-place object literals.
+
+function *gen8() {
+ with({a: 1,b: 2}) {
+ with({a: 2,b: 1}) {
+ yield a;
+ }
+ yield a;
+ }
+ return 3;
+}
+
+g = gen8();
+g.next();
+gm = debug.MakeMirror(g);
+CheckScopeChain([debug.ScopeType.With,
+ debug.ScopeType.With,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], gm);
+CheckScopeContent({a: 2, b: 1}, 0, gm);
+
+g.next();
+CheckScopeContent({a: 1, b: 2}, 0, gm);
+
+// Catch block.
+
+function *gen9() {
+ try {
+ throw 42;
+ } catch (e) {
+ yield e;
+ }
+ return 3;
+}
+
+g = gen9();
+g.next();
+gm = debug.MakeMirror(g);
+CheckScopeChain([debug.ScopeType.Catch,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], gm);
+CheckScopeContent({e: 42}, 0, gm);
+
+// For statement with block scope.
+
+function *gen10() {
+ for (let i = 0; i < 42; i++) yield i;
+ return 3;
+}
+
+g = gen10();
+g.next();
+gm = debug.MakeMirror(g);
+CheckScopeChain([debug.ScopeType.Block,
+ debug.ScopeType.Block,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], gm);
+CheckScopeContent({i: 0}, 0, gm);
+
+g.next();
+CheckScopeContent({i: 1}, 0, gm);
+CheckScopeContent({i: 0}, 1, gm); // Additional block scope with i = 0;
+
+// Nested generators.
+
+var gen12;
+function *gen11() {
+ gen12 = function*() {
+ var a = 1;
+ yield 1;
+ return 2;
+ }();
+
+ var a = 0;
+ yield* gen12;
+}
+
+g = gen11();
+g.next();
+
+gm = debug.MakeMirror(gen12);
+CheckScopeChain([debug.ScopeType.Closure,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], gm);
+CheckScopeContent({a: 1}, 0, gm);
+CheckScopeContent({a: 0}, 1, gm);
+
+// Set a variable in an empty scope.
+
+function *gen13() {
+ yield 1;
+ return 2;
+}
+
+var g = gen13();
+var gm = debug.MakeMirror(g);
+assertThrows(() => gm.scope(0).setVariableValue("a", 42));
+CheckScopeContent({}, 0, gm);
+
+// Set a variable in a simple scope.
+
+function *gen14() {
+ var a = 0;
+ yield 1;
+ yield a;
+ return 2;
+}
+
+var g = gen14();
+assertEquals(1, g.next().value);
+
+var gm = debug.MakeMirror(g);
+CheckScopeContent({a: 0}, 0, gm);
+
+gm.scope(0).setVariableValue("a", 1);
+CheckScopeContent({a: 1}, 0, gm);
+
+assertEquals(1, g.next().value);
+
+// Set a variable in nested with blocks using in-place object literals.
+
+function *gen15() {
+ var c = 3;
+ with({a: 1,b: 2}) {
+ var d = 4;
+ yield a;
+ var e = 5;
+ }
+ yield e;
+ return e;
+}
+
+var g = gen15();
+assertEquals(1, g.next().value);
+
+var gm = debug.MakeMirror(g);
+CheckScopeChain([debug.ScopeType.With,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], gm);
+CheckScopeContent({a: 1, b: 2}, 0, gm);
+CheckScopeContent({c: 3, d: 4, e: undefined}, 1, gm);
+
+// Variables don't exist in given scope.
+assertThrows(() => gm.scope(0).setVariableValue("c", 42));
+assertThrows(() => gm.scope(1).setVariableValue("a", 42));
+
+// Variables in with scope are immutable.
+assertThrows(() => gm.scope(0).setVariableValue("a", 3));
+assertThrows(() => gm.scope(0).setVariableValue("b", 3));
+
+gm.scope(1).setVariableValue("c", 1);
+gm.scope(1).setVariableValue("e", 42);
+
+CheckScopeContent({a: 1, b: 2}, 0, gm);
+CheckScopeContent({c: 1, d: 4, e: 42}, 1, gm);
+assertEquals(5, g.next().value); // Initialized after set.
+
+CheckScopeChain([debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], gm);
+
+gm.scope(0).setVariableValue("e", 42);
+
+CheckScopeContent({c: 1, d: 4, e: 42}, 0, gm);
+assertEquals(42, g.next().value);
+
+// Set a variable in nested with blocks using in-place object literals plus a
+// nested block scope.
+
+function *gen16() {
+ var c = 3;
+ with({a: 1,b: 2}) {
+ let d = 4;
+ yield a;
+ let e = 5;
+ yield d;
+ }
+ return 3;
+}
+
+var g = gen16();
+g.next();
+
+var gm = debug.MakeMirror(g);
+CheckScopeChain([debug.ScopeType.Block,
+ debug.ScopeType.With,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], gm);
+CheckScopeContent({d: 4}, 0, gm);
+CheckScopeContent({a: 1, b: 2}, 1, gm);
+CheckScopeContent({c: 3}, 2, gm);
+
+gm.scope(0).setVariableValue("d", 1);
+CheckScopeContent({d: 1}, 0, gm);
+
+assertEquals(1, g.next().value);
+
+// Set variable in catch block.
+
+var yyzyzzyz = 4829;
+let xxxyyxxyx = 42284;
+function *gen17() {
+ try {
+ throw 42;
+ } catch (e) {
+ yield e;
+ yield e;
+ }
+ return 3;
+}
+
+g = gen17();
+g.next();
+
+gm = debug.MakeMirror(g);
+CheckScopeChain([debug.ScopeType.Catch,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], gm);
+CheckScopeContent({e: 42}, 0, gm);
+CheckScopeContent({xxxyyxxyx: 42284}, 2, gm);
+
+gm.scope(0).setVariableValue("e", 1);
+CheckScopeContent({e: 1}, 0, gm);
+
+assertEquals(1, g.next().value);
+
+// Script scope.
+gm.scope(2).setVariableValue("xxxyyxxyx", 42);
+assertEquals(42, xxxyyxxyx);
+
+// Global scope.
+assertThrows(() => gm.scope(3).setVariableValue("yyzyzzyz", 42));
+assertEquals(4829, yyzyzzyz);
diff --git a/deps/v8/test/mjsunit/debug-scopes.js b/deps/v8/test/mjsunit/debug-scopes.js
index 3659d4e129..935de9cc98 100644
--- a/deps/v8/test/mjsunit/debug-scopes.js
+++ b/deps/v8/test/mjsunit/debug-scopes.js
@@ -183,10 +183,8 @@ function CheckScopeContent(content, number, exec_state) {
if (!scope.scopeObject().property('this').isUndefined()) {
scope_size--;
}
- // Skip property with empty name.
- if (!scope.scopeObject().property('').isUndefined()) {
- scope_size--;
- }
+ // Temporary variables introduced by the parser have not been materialized.
+ assertTrue(scope.scopeObject().property('').isUndefined());
if (count != scope_size) {
print('Names found in scope:');
@@ -1179,11 +1177,10 @@ var code3 = "function for_statement() { \n" +
listener_delegate = function(exec_state) {
CheckScopeChain([debug.ScopeType.Block,
- debug.ScopeType.Block,
debug.ScopeType.Local,
debug.ScopeType.Script,
debug.ScopeType.Global], exec_state);
- CheckScopeChainPositions([{start: 52, end: 111}, {start: 42, end: 111}, {start: 22, end: 145}, {}, {}], exec_state);
+ CheckScopeChainPositions([{start: 52, end: 111}, {start: 22, end: 145}, {}, {}], exec_state);
}
eval(code3);
EndTest();
diff --git a/deps/v8/test/mjsunit/debug-script.js b/deps/v8/test/mjsunit/debug-script.js
index 5396415087..3bedb74a35 100644
--- a/deps/v8/test/mjsunit/debug-script.js
+++ b/deps/v8/test/mjsunit/debug-script.js
@@ -84,11 +84,6 @@ var math_script = Debug.findScript('native math.js');
assertEquals('native math.js', math_script.name);
assertEquals(Debug.ScriptType.Native, math_script.type);
-// Test a builtins delay loaded script.
-var date_delay_script = Debug.findScript('native json.js');
-assertEquals('native json.js', date_delay_script.name);
-assertEquals(Debug.ScriptType.Native, date_delay_script.type);
-
// Test a debugger script.
var debug_delay_script = Debug.findScript('native debug.js');
assertEquals('native debug.js', debug_delay_script.name);
diff --git a/deps/v8/test/mjsunit/debug-sourceinfo.js b/deps/v8/test/mjsunit/debug-sourceinfo.js
index cb41107c60..b79fb8e3ec 100644
--- a/deps/v8/test/mjsunit/debug-sourceinfo.js
+++ b/deps/v8/test/mjsunit/debug-sourceinfo.js
@@ -63,12 +63,11 @@ var comment_lines = 28;
// This is the last position in the entire file (note: this equals
// file size of <debug-sourceinfo.js> - 1, since starting at 0).
-var last_position = 11519;
+var last_position = 8126;
// This is the last line of entire file (note: starting at 0).
-var last_line = 269;
-// This is the last column of last line (note: starting at 0 and +1, due
-// to trailing <LF>).
-var last_column = 1;
+var last_line = 200;
+// This is the last column of last line (note: starting at 0).
+var last_column = 71;
// This magic number is the length or the first line comment (actually number
// of characters before 'function a(...'.
@@ -168,66 +167,6 @@ assertEquals(start_d, script.locationFromPosition(start_d).position);
assertEquals(11, script.locationFromPosition(start_d).line - comment_lines);
assertEquals(10, script.locationFromPosition(start_d).column);
-// Test first line.
-assertEquals(0, script.locationFromLine().position);
-assertEquals(0, script.locationFromLine().line);
-assertEquals(0, script.locationFromLine().column);
-assertEquals(0, script.locationFromLine(0).position);
-assertEquals(0, script.locationFromLine(0).line);
-assertEquals(0, script.locationFromLine(0).column);
-
-// Test first line column 1.
-assertEquals(1, script.locationFromLine(0, 1).position);
-assertEquals(0, script.locationFromLine(0, 1).line);
-assertEquals(1, script.locationFromLine(0, 1).column);
-
-// Test first line offset 1.
-assertEquals(1, script.locationFromLine(0, 0, 1).position);
-assertEquals(0, script.locationFromLine(0, 0, 1).line);
-assertEquals(1, script.locationFromLine(0, 0, 1).column);
-
-// Test offset function a().
-assertEquals(start_a, script.locationFromLine(void 0, void 0, start_a).position);
-assertEquals(0, script.locationFromLine(void 0, void 0, start_a).line - comment_lines);
-assertEquals(10, script.locationFromLine(void 0, void 0, start_a).column);
-assertEquals(start_a, script.locationFromLine(0, void 0, start_a).position);
-assertEquals(0, script.locationFromLine(0, void 0, start_a).line - comment_lines);
-assertEquals(10, script.locationFromLine(0, void 0, start_a).column);
-assertEquals(start_a, script.locationFromLine(0, 0, start_a).position);
-assertEquals(0, script.locationFromLine(0, 0, start_a).line - comment_lines);
-assertEquals(10, script.locationFromLine(0, 0, start_a).column);
-
-// Test second line offset function a().
-assertEquals(start_a + 13, script.locationFromLine(1, 0, start_a).position);
-assertEquals(1, script.locationFromLine(1, 0, start_a).line - comment_lines);
-assertEquals(0, script.locationFromLine(1, 0, start_a).column);
-
-// Test second line column 2 offset function a().
-assertEquals(start_a + 13 + 1, script.locationFromLine(1, 1, start_a).position);
-assertEquals(1, script.locationFromLine(1, 2, start_a).line - comment_lines);
-assertEquals(2, script.locationFromLine(1, 2, start_a).column);
-
-// Test offset function b().
-assertEquals(start_b, script.locationFromLine(0, 0, start_b).position);
-assertEquals(1, script.locationFromLine(0, 0, start_b).line - comment_lines);
-assertEquals(13, script.locationFromLine(0, 0, start_b).column);
-
-// Test second line offset function b().
-assertEquals(start_b + 5, script.locationFromLine(1, 0, start_b).position);
-assertEquals(2, script.locationFromLine(1, 0, start_b).line - comment_lines);
-assertEquals(0, script.locationFromLine(1, 0, start_b).column);
-
-// Test second line column 10 offset function b().
-assertEquals(start_b + 5 + 10, script.locationFromLine(1, 10, start_b).position);
-assertEquals(2, script.locationFromLine(1, 10, start_b).line - comment_lines);
-assertEquals(10, script.locationFromLine(1, 10, start_b).column);
-
-// Test second line column 11 offset function b. Second line in b is 10 long
-// using column 11 wraps to next line.
-assertEquals(start_b + 5 + 11, script.locationFromLine(1, 11, start_b).position);
-assertEquals(3, script.locationFromLine(1, 11, start_b).line - comment_lines);
-assertEquals(0, script.locationFromLine(1, 11, start_b).column);
-
// Test the Debug.findSourcePosition which wraps SourceManager.
assertEquals(0 + start_a, Debug.findFunctionSourceLocation(a, 0, 0).position);
assertEquals(0 + start_b, Debug.findFunctionSourceLocation(b, 0, 0).position);
@@ -260,11 +199,3 @@ assertEquals(last_column, script.locationFromPosition(last_position).column);
assertEquals(last_line + 1,
script.locationFromPosition(last_position + 1).line);
assertEquals(0, script.locationFromPosition(last_position + 1).column);
-
-// Test that script.sourceLine(line) works.
-var location;
-
-for (line = 0; line < num_lines_d; line++) {
- var line_content_regexp = new RegExp(" x = " + (line + 1));
- assertTrue(line_content_regexp.test(script.sourceLine(start_line_d + line)));
-}
diff --git a/deps/v8/test/mjsunit/debug-sourceslice.js b/deps/v8/test/mjsunit/debug-sourceslice.js
deleted file mode 100644
index db9a3e7da8..0000000000
--- a/deps/v8/test/mjsunit/debug-sourceslice.js
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug
-// Source lines for test.
-var lines = [ 'function a() { b(); };\n',
- 'function b() {\n',
- ' c(true);\n',
- '};\n',
- ' function c(x) {\n',
- ' if (x) {\n',
- ' return 1;\n',
- ' } else {\n',
- ' return 1;\n',
- ' }\n',
- ' };\n' ];
-
-// Build source by putting all lines together
-var source = '';
-for (var i = 0; i < lines.length; i++) {
- source += lines[i];
-}
-eval(source);
-
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug
-
-// Get the script object from one of the functions in the source.
-var script = Debug.findScript(a);
-
-// Make sure that the source is as expected.
-assertEquals(source, script.source);
-assertEquals(source, script.sourceSlice().sourceText());
-
-// Try all possible line interval slices.
-for (var slice_size = 0; slice_size < lines.length; slice_size++) {
- for (var n = 0; n < lines.length - slice_size; n++) {
- var slice = script.sourceSlice(n, n + slice_size);
- assertEquals(n, slice.from_line);
- assertEquals(n + slice_size, slice.to_line);
-
- var text = slice.sourceText();
- var expected = '';
- for (var i = 0; i < slice_size; i++) {
- expected += lines[n + i];
- }
- assertEquals(expected, text);
- }
-}
diff --git a/deps/v8/test/mjsunit/debug-stack-check-position.js b/deps/v8/test/mjsunit/debug-stack-check-position.js
new file mode 100644
index 0000000000..a5570ce904
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-stack-check-position.js
@@ -0,0 +1,30 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+var Debug = debug.Debug;
+var exception = null;
+var loop = true;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ assertTrue(exec_state.frame(0).sourceLineText().indexOf("BREAK") > 0);
+ } catch (e) {
+ exception = e;
+ }
+}
+
+function f() { // BREAK
+ return 1;
+}
+
+Debug.setListener(listener);
+
+%ScheduleBreak(); // Break on function entry.
+f();
+
+Debug.setListener(null);
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/delete.js b/deps/v8/test/mjsunit/delete.js
index 8d4636af45..20fa6bfb67 100644
--- a/deps/v8/test/mjsunit/delete.js
+++ b/deps/v8/test/mjsunit/delete.js
@@ -178,3 +178,17 @@ function load_a(x) {
}
load_deleted_property_using_IC();
+
+
+(function deleteLargeDoubleArrayAtEnd() {
+ var o = {};
+ var max = 100000;
+ for (var i = 0; i <= max; i++) {
+ o[i] = 1.1;
+ }
+ delete o[max];
+ for (var i = 0; i < max; i++) {
+ assertEquals(1.1, o[i]);
+ }
+ assertEquals(undefined, o[max]);
+})();
diff --git a/deps/v8/test/mjsunit/dictionary-properties.js b/deps/v8/test/mjsunit/dictionary-properties.js
index 0659268bac..33360d7f52 100644
--- a/deps/v8/test/mjsunit/dictionary-properties.js
+++ b/deps/v8/test/mjsunit/dictionary-properties.js
@@ -39,7 +39,13 @@ function SlowPrototype() {
SlowPrototype.prototype.bar = 2;
SlowPrototype.prototype.baz = 3;
delete SlowPrototype.prototype.baz;
-new SlowPrototype;
+assertFalse(%HasFastProperties(SlowPrototype.prototype));
+var slow_proto = new SlowPrototype;
+// ICs make prototypes fast.
+function ic() { return slow_proto.bar; }
+ic();
+ic();
+assertTrue(%HasFastProperties(slow_proto.__proto__));
// Prototypes stay fast even after deleting properties.
assertTrue(%HasFastProperties(SlowPrototype.prototype));
diff --git a/deps/v8/test/mjsunit/double-intrinsics.js b/deps/v8/test/mjsunit/double-intrinsics.js
deleted file mode 100644
index 16d6538937..0000000000
--- a/deps/v8/test/mjsunit/double-intrinsics.js
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax
-
-function assertDoubleBits(hi, lo, x) {
- hi = hi | 0;
- lo = lo | 0;
- assertEquals(x, %_ConstructDouble(hi, lo));
- assertEquals(hi, %_DoubleHi(x));
- assertEquals(lo, %_DoubleLo(x));
- assertEquals(x, %_ConstructDouble(%_DoubleHi(x), %_DoubleLo(x)));
-}
-
-
-var tests = [0x7ff00000, 0x00000000, Infinity,
- 0xfff00000, 0x00000000, -Infinity,
- 0x80000000, 0x00000000, -0,
- 0x400921fb, 0x54442d18, Math.PI,
- 0xc00921fb, 0x54442d18, -Math.PI,
- 0x4005bf0a, 0x8b145769, Math.E,
- 0xc005bf0a, 0x8b145769, -Math.E,
- 0xbfe80000, 0x00000000, -0.75];
-
-
-for (var i = 0; i < tests.length; i += 3) {
- assertDoubleBits(tests[i], tests[i + 1], tests[i + 2]);
-}
-
-%OptimizeFunctionOnNextCall(assertDoubleBits);
-
-for (var i = 0; i < tests.length; i += 3) {
- assertDoubleBits(tests[i], tests[i + 1], tests[i + 2]);
- assertOptimized(assertDoubleBits);
-}
diff --git a/deps/v8/test/mjsunit/eagerly-parsed-lazily-compiled-functions.js b/deps/v8/test/mjsunit/eagerly-parsed-lazily-compiled-functions.js
new file mode 100644
index 0000000000..edc14430ea
--- /dev/null
+++ b/deps/v8/test/mjsunit/eagerly-parsed-lazily-compiled-functions.js
@@ -0,0 +1,33 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --min-preparse-length=0
+
+// The test functions in this file will be eagerly compiled. The functions
+// inside will be eagerly parsed but lazily compiled.
+
+(function TestLengths() {
+ function inner(p1, p2, p3) { }
+ assertEquals(3, inner.length);
+})();
+
+(function TestAccessingContextVariables() {
+ var in_context = 8;
+ function inner() { return in_context; }
+ assertEquals(8, inner());
+})();
+
+(function TestAccessingContextVariablesFromDeeper() {
+ var in_context = 8;
+ function inner() {
+ function inner_inner() {
+ function inner_inner_inner() {
+ return in_context;
+ }
+ return inner_inner_inner;
+ }
+ return inner_inner;
+ }
+ assertEquals(8, inner()()());
+})();
diff --git a/deps/v8/test/mjsunit/error-tostring-omit.js b/deps/v8/test/mjsunit/error-tostring-omit.js
index 9ff43fa9b2..48e1399d7a 100644
--- a/deps/v8/test/mjsunit/error-tostring-omit.js
+++ b/deps/v8/test/mjsunit/error-tostring-omit.js
@@ -42,7 +42,7 @@ assertTrue(veryLongString().length > 256);
var re = /...<omitted>.../;
try {
- Number.prototype.toFixed.call(veryLongString);
+ Date.prototype.setDate.call(veryLongString);
} catch (e) {
assertTrue(e.message.length < 256);
assertTrue(re.test(e.message));
diff --git a/deps/v8/test/mjsunit/es6/array-concat-revoked-proxy-1.js b/deps/v8/test/mjsunit/es6/array-concat-revoked-proxy-1.js
new file mode 100644
index 0000000000..2dfddc3504
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/array-concat-revoked-proxy-1.js
@@ -0,0 +1,19 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function testConcatRevokedProxyToArrayInPrototype() {
+ "use strict";
+ var handler = {
+ get(_, name) {
+ if (name === Symbol.isConcatSpreadable) {
+ p.revoke();
+ }
+ return target[name];
+ }
+ }
+
+ var p = Proxy.revocable([], handler);
+ var target = { __proto__: p.proxy };
+ assertThrows(function() { [].concat(target); }, TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/es6/array-concat-revoked-proxy-2.js b/deps/v8/test/mjsunit/es6/array-concat-revoked-proxy-2.js
new file mode 100644
index 0000000000..f91eb655bf
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/array-concat-revoked-proxy-2.js
@@ -0,0 +1,19 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function testConcatRevokedProxyToArray() {
+ "use strict";
+ var handler = {
+ get(_, name) {
+ if (name === Symbol.isConcatSpreadable) {
+ p.revoke();
+ }
+ return target[name];
+ }
+ }
+
+ var target = [];
+ var p = Proxy.revocable(target, handler);
+ assertThrows(function() { [].concat(p.proxy); }, TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/es6/array-concat.js b/deps/v8/test/mjsunit/es6/array-concat.js
index fe320d6858..f57c10e03e 100644
--- a/deps/v8/test/mjsunit/es6/array-concat.js
+++ b/deps/v8/test/mjsunit/es6/array-concat.js
@@ -1,7 +1,6 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
(function testArrayConcatArity() {
"use strict";
assertEquals(1, Array.prototype.concat.length);
@@ -20,6 +19,15 @@
assertEquals(false, desc.enumerable);
})();
+(function testNonConcatSpreadableArray() {
+ "use strict"
+ var array = [1, 2, 3];
+ assertEquals(array, [].concat(array));
+ assertEquals(array, array.concat([]));
+ array[Symbol.isConcatSpreadable] = false;
+ assertEquals([[1,2,3]], [].concat(array));
+ assertEquals([[1,2,3]], array.concat([]));
+})();
(function testConcatArrayLike() {
"use strict";
@@ -864,3 +872,25 @@ logger.get = function(t, trap, r) {
assertThrows(() => [].concat(obj), TypeError);
assertThrows(() => Array.prototype.concat.apply(obj), TypeError);
})();
+
+(function testConcatRevokedProxy() {
+ "use strict";
+ var target = [];
+ var handler = {
+ get(_, name) {
+ if (name === Symbol.isConcatSpreadable) {
+ p.revoke();
+ }
+ return target[name];
+ }
+ }
+
+ p = Proxy.revocable(target, handler);
+ target = {};
+ target.__proto__ = p.proxy;
+ assertThrows(function() { [].concat({ __proto__: p.proxy }); }, TypeError);
+
+ target = [];
+ var p = Proxy.revocable(target, handler);
+ assertThrows(function() { [].concat(p.proxy); }, TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/es6/array-prototype-values.js b/deps/v8/test/mjsunit/es6/array-prototype-values.js
index 64162c47c8..b7c4e78e33 100644
--- a/deps/v8/test/mjsunit/es6/array-prototype-values.js
+++ b/deps/v8/test/mjsunit/es6/array-prototype-values.js
@@ -13,3 +13,4 @@ assertTrue(valuesDesc.configurable);
assertTrue(valuesDesc.writable);
assertFalse(valuesDesc.enumerable);
assertTrue(Array.prototype[Symbol.unscopables].values);
+assertThrows(() => new Array.prototype[Symbol.iterator], TypeError);
diff --git a/deps/v8/test/mjsunit/harmony/array-species-constructor-accessor.js b/deps/v8/test/mjsunit/es6/array-species-constructor-accessor.js
index 4c852f06f0..7ebf328a8a 100644
--- a/deps/v8/test/mjsunit/harmony/array-species-constructor-accessor.js
+++ b/deps/v8/test/mjsunit/es6/array-species-constructor-accessor.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-species --allow-natives-syntax
+// Flags: --allow-natives-syntax
// Overwriting the constructor of an instance updates the protector
diff --git a/deps/v8/test/mjsunit/harmony/array-species-constructor-delete.js b/deps/v8/test/mjsunit/es6/array-species-constructor-delete.js
index f341282dd9..fff22a2a8c 100644
--- a/deps/v8/test/mjsunit/harmony/array-species-constructor-delete.js
+++ b/deps/v8/test/mjsunit/es6/array-species-constructor-delete.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-species --allow-natives-syntax
+// Flags: --allow-natives-syntax
// Overwriting the constructor of an instance updates the protector
diff --git a/deps/v8/test/mjsunit/harmony/array-species-constructor.js b/deps/v8/test/mjsunit/es6/array-species-constructor.js
index d766e09eee..0d888f46ee 100644
--- a/deps/v8/test/mjsunit/harmony/array-species-constructor.js
+++ b/deps/v8/test/mjsunit/es6/array-species-constructor.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-species --allow-natives-syntax
+// Flags: --allow-natives-syntax
// Overwriting the constructor of an instance updates the protector
diff --git a/deps/v8/test/mjsunit/harmony/array-species-delete.js b/deps/v8/test/mjsunit/es6/array-species-delete.js
index ba49414069..16a2fa26f9 100644
--- a/deps/v8/test/mjsunit/harmony/array-species-delete.js
+++ b/deps/v8/test/mjsunit/es6/array-species-delete.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-species --allow-natives-syntax
+// Flags: --allow-natives-syntax
// Overwriting the constructor of an instance updates the protector
diff --git a/deps/v8/test/mjsunit/harmony/array-species-modified.js b/deps/v8/test/mjsunit/es6/array-species-modified.js
index 73c52b91a4..58feb31669 100644
--- a/deps/v8/test/mjsunit/harmony/array-species-modified.js
+++ b/deps/v8/test/mjsunit/es6/array-species-modified.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-species --allow-natives-syntax
+// Flags: --allow-natives-syntax
// Overwriting Array[Symbol.species] updates the protector
diff --git a/deps/v8/test/mjsunit/es6/array-species-neg-zero.js b/deps/v8/test/mjsunit/es6/array-species-neg-zero.js
new file mode 100644
index 0000000000..d60b8ba00f
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/array-species-neg-zero.js
@@ -0,0 +1,23 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * 9.4.2.3 ArraySpeciesCreate(originalArray, length)
+ *
+ * 1. Assert: length is an integer Number ≥ 0.
+ * 2. If length is −0, let length be +0.
+ * [...]
+ */
+
+var x = [];
+var deleteCount;
+
+x.constructor = function() {};
+x.constructor[Symbol.species] = function(param) {
+ deleteCount = param;
+};
+
+x.splice(0, -0);
+
+assertEquals(0, deleteCount);
diff --git a/deps/v8/test/mjsunit/harmony/array-species-parent-constructor.js b/deps/v8/test/mjsunit/es6/array-species-parent-constructor.js
index 347732e1de..b4fb1d56e3 100644
--- a/deps/v8/test/mjsunit/harmony/array-species-parent-constructor.js
+++ b/deps/v8/test/mjsunit/es6/array-species-parent-constructor.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-species --allow-natives-syntax
+// Flags: --allow-natives-syntax
// Overwriting Array.prototype.constructor updates the protector
diff --git a/deps/v8/test/mjsunit/harmony/array-species-proto.js b/deps/v8/test/mjsunit/es6/array-species-proto.js
index 70db751519..6b55881cd6 100644
--- a/deps/v8/test/mjsunit/harmony/array-species-proto.js
+++ b/deps/v8/test/mjsunit/es6/array-species-proto.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-species --allow-natives-syntax
+// Flags: --allow-natives-syntax
// Overwriting an array instance's __proto__ updates the protector
diff --git a/deps/v8/test/mjsunit/harmony/array-species.js b/deps/v8/test/mjsunit/es6/array-species.js
index 19ed1d8185..25edf55104 100644
--- a/deps/v8/test/mjsunit/harmony/array-species.js
+++ b/deps/v8/test/mjsunit/es6/array-species.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-species
-
// Test the ES2015 @@species feature
'use strict';
diff --git a/deps/v8/test/mjsunit/harmony/arraybuffer-species.js b/deps/v8/test/mjsunit/es6/arraybuffer-species.js
index 0445a4b648..1ac6efbe26 100644
--- a/deps/v8/test/mjsunit/harmony/arraybuffer-species.js
+++ b/deps/v8/test/mjsunit/es6/arraybuffer-species.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-species
-
// ArrayBuffer.prototype.slice makes subclass and checks length
class MyArrayBuffer extends ArrayBuffer { }
diff --git a/deps/v8/test/mjsunit/harmony/block-conflicts-sloppy.js b/deps/v8/test/mjsunit/es6/block-conflicts-sloppy.js
index 8908ce4e56..b2ebfce6c9 100644
--- a/deps/v8/test/mjsunit/harmony/block-conflicts-sloppy.js
+++ b/deps/v8/test/mjsunit/es6/block-conflicts-sloppy.js
@@ -4,8 +4,6 @@
// Test for conflicting variable bindings.
-// Flags: --harmony-sloppy --harmony-sloppy-let --harmony-sloppy-function
-
function CheckException(e) {
var string = e.toString();
assertTrue(string.indexOf("has already been declared") >= 0 ||
@@ -172,8 +170,5 @@ for (var v = 0; v < varbinds.length; ++v) {
TestNoConflict('(function (x) {' + varbinds[v] + '})();');
}
-// Test conflicting catch/function bindings.
-TestNoConflict('try {} catch(x) {' + funbind + '}');
-
// Test conflicting parameter/function bindings.
TestNoConflict('(function (x) {' + funbind + '})();');
diff --git a/deps/v8/test/mjsunit/es6/block-conflicts.js b/deps/v8/test/mjsunit/es6/block-conflicts.js
index 0e3d4e5a2a..bca3cb4ea5 100644
--- a/deps/v8/test/mjsunit/es6/block-conflicts.js
+++ b/deps/v8/test/mjsunit/es6/block-conflicts.js
@@ -170,8 +170,5 @@ for (var v = 0; v < varbinds.length; ++v) {
TestNoConflict('(function (x) {' + varbinds[v] + '})();');
}
-// Test conflicting catch/function bindings.
-TestNoConflict('try {} catch(x) {' + funbind + '}');
-
// Test conflicting parameter/function bindings.
TestNoConflict('(function (x) {' + funbind + '})();');
diff --git a/deps/v8/test/mjsunit/harmony/block-const-assign-sloppy.js b/deps/v8/test/mjsunit/es6/block-const-assign-sloppy.js
index 5dde82cbf2..99024ef7cb 100644
--- a/deps/v8/test/mjsunit/harmony/block-const-assign-sloppy.js
+++ b/deps/v8/test/mjsunit/es6/block-const-assign-sloppy.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-sloppy --harmony-sloppy-let
-
// Test that we throw early syntax errors in harmony mode
// when using an immutable binding in an assigment or with
// prefix/postfix decrement/increment operators.
diff --git a/deps/v8/test/mjsunit/es6/block-early-errors.js b/deps/v8/test/mjsunit/es6/block-early-errors.js
deleted file mode 100644
index 4af6521f64..0000000000
--- a/deps/v8/test/mjsunit/es6/block-early-errors.js
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --no-harmony-sloppy-let
-
-function CheckException(e) {
- var string = e.toString();
- assertInstanceof(e, SyntaxError);
-}
-
-function Check(str) {
- try {
- eval("(function () { " + str + " })");
- assertUnreachable();
- } catch (e) {
- CheckException(e);
- }
- try {
- eval("(function () { { " + str + " } })");
- assertUnreachable();
- } catch (e) {
- CheckException(e);
- }
-}
-
-// Check for early syntax errors when using let
-// declarations outside of strict mode.
-Check("let x;");
-Check("let x = 1;");
-Check("let x, y;");
diff --git a/deps/v8/test/mjsunit/harmony/block-eval-var-over-let.js b/deps/v8/test/mjsunit/es6/block-eval-var-over-let.js
index 98091b4218..e16d7a02a6 100644
--- a/deps/v8/test/mjsunit/harmony/block-eval-var-over-let.js
+++ b/deps/v8/test/mjsunit/es6/block-eval-var-over-let.js
@@ -2,21 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-sloppy --harmony-sloppy-let --harmony-sloppy-function
-
// Var-let conflict in a function throws, even if the var is in an eval
// Throws at the top level of a function
assertThrows(function() {
let x = 1;
eval('var x');
-}, TypeError);
+}, SyntaxError);
// If the eval is in its own block scope, throws
assertThrows(function() {
let y = 1;
{ eval('var y'); }
-}, TypeError);
+}, SyntaxError);
// If the let is in its own block scope, with the eval, throws
assertThrows(function() {
@@ -24,7 +22,7 @@ assertThrows(function() {
let x = 1;
eval('var x');
}
-}, TypeError);
+}, SyntaxError);
// Legal if the let is no longer visible
assertDoesNotThrow(function() {
@@ -39,13 +37,13 @@ assertDoesNotThrow(function() {
assertThrows(function() {
const x = 1;
eval('var x');
-}, TypeError);
+}, SyntaxError);
// If the eval is in its own block scope, throws
assertThrows(function() {
const y = 1;
{ eval('var y'); }
-}, TypeError);
+}, SyntaxError);
// If the const is in its own block scope, with the eval, throws
assertThrows(function() {
@@ -53,7 +51,7 @@ assertThrows(function() {
const x = 1;
eval('var x');
}
-}, TypeError);
+}, SyntaxError);
// Legal if the const is no longer visible
assertDoesNotThrow(function() {
@@ -63,6 +61,23 @@ assertDoesNotThrow(function() {
eval('var x');
});
+// The same should work for lexical function declarations:
+// If the const is in its own block scope, with the eval, throws
+assertThrows(function() {
+ {
+ function x() {}
+ eval('var x');
+ }
+}, SyntaxError);
+
+// If the eval is in its own block scope, throws
+assertThrows(function() {
+ {
+ function y() {}
+ { eval('var y'); }
+ }
+}, SyntaxError);
+
// In global scope
let caught = false;
try {
@@ -126,8 +141,6 @@ try {
}
assertTrue(caught);
-// TODO(littledan): Hoisting x out of the block should be
-// prevented in this case BUG(v8:4479)
caught = false
try {
(function() {
@@ -139,5 +152,4 @@ try {
} catch (e) {
caught = true;
}
-// TODO(littledan): switch to assertTrue when bug is fixed
-assertTrue(caught);
+assertFalse(caught);
diff --git a/deps/v8/test/mjsunit/harmony/block-for-sloppy.js b/deps/v8/test/mjsunit/es6/block-for-sloppy.js
index 261c46a166..4f0f63faa3 100644
--- a/deps/v8/test/mjsunit/harmony/block-for-sloppy.js
+++ b/deps/v8/test/mjsunit/es6/block-for-sloppy.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-sloppy --harmony-sloppy-let
-
function props(x) {
var array = [];
for (let p in x) array.push(p);
diff --git a/deps/v8/test/mjsunit/harmony/block-leave-sloppy.js b/deps/v8/test/mjsunit/es6/block-leave-sloppy.js
index 0023fa08f3..1313026bf8 100644
--- a/deps/v8/test/mjsunit/harmony/block-leave-sloppy.js
+++ b/deps/v8/test/mjsunit/es6/block-leave-sloppy.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-sloppy --harmony-sloppy-let
-
// We want to test the context chain shape. In each of the tests cases
// below, the outer with is to force a runtime lookup of the identifier 'x'
// to actually verify that the inner context has been discarded. A static
diff --git a/deps/v8/test/mjsunit/es6/block-let-contextual-sloppy.js b/deps/v8/test/mjsunit/es6/block-let-contextual-sloppy.js
index ac7bca107e..8282d779a1 100644
--- a/deps/v8/test/mjsunit/es6/block-let-contextual-sloppy.js
+++ b/deps/v8/test/mjsunit/es6/block-let-contextual-sloppy.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-sloppy --harmony-sloppy-let
-
// let is usable as a variable with var, but not let or ES6 const
(function (){
diff --git a/deps/v8/test/mjsunit/harmony/block-let-crankshaft-sloppy.js b/deps/v8/test/mjsunit/es6/block-let-crankshaft-sloppy.js
index 4f29c05693..b5e81f7850 100644
--- a/deps/v8/test/mjsunit/harmony/block-let-crankshaft-sloppy.js
+++ b/deps/v8/test/mjsunit/es6/block-let-crankshaft-sloppy.js
@@ -26,7 +26,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax
-// Flags: --harmony-sloppy --harmony-sloppy-let
// Check that the following functions are optimizable.
var functions = [ f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14,
diff --git a/deps/v8/test/mjsunit/harmony/block-let-declaration-sloppy.js b/deps/v8/test/mjsunit/es6/block-let-declaration-sloppy.js
index af95553bd0..ea0e39bd07 100644
--- a/deps/v8/test/mjsunit/harmony/block-let-declaration-sloppy.js
+++ b/deps/v8/test/mjsunit/es6/block-let-declaration-sloppy.js
@@ -27,8 +27,6 @@
// Test let declarations in various settings.
-// Flags: --harmony-sloppy --harmony-sloppy-let
-
// Global
let x;
let y = 2;
diff --git a/deps/v8/test/mjsunit/harmony/block-let-semantics-sloppy.js b/deps/v8/test/mjsunit/es6/block-let-semantics-sloppy.js
index a55ff8fe49..4102ec8f5f 100644
--- a/deps/v8/test/mjsunit/harmony/block-let-semantics-sloppy.js
+++ b/deps/v8/test/mjsunit/es6/block-let-semantics-sloppy.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-sloppy --harmony-sloppy-let --harmony-sloppy-function
-
// Test temporal dead zone semantics of let bound variables in
// function and block scopes.
diff --git a/deps/v8/test/mjsunit/es6/block-non-strict-errors.js b/deps/v8/test/mjsunit/es6/block-non-strict-errors.js
deleted file mode 100644
index db7f558905..0000000000
--- a/deps/v8/test/mjsunit/es6/block-non-strict-errors.js
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --no-harmony-sloppy-let --no-harmony-sloppy-function
-// Flags: --no-harmony-sloppy
-
-function CheckError(source) {
- var exception = null;
- try {
- eval(source);
- } catch (e) {
- exception = e;
- }
- assertNotNull(exception);
- assertEquals(
- "Block-scoped declarations (let, const, function, class) not yet supported outside strict mode",
- exception.message);
-}
-
-
-function CheckOk(source) {
- eval(source);
-}
-
-CheckError("let x = 1;");
-CheckError("{ let x = 1; }");
-CheckError("function f() { let x = 1; }");
-CheckError("for (let x = 1; x < 1; x++) {}");
-CheckError("for (let x of []) {}");
-CheckError("for (let x in []) {}");
-CheckError("class C {}");
-CheckError("class C extends Array {}");
-CheckError("(class {});");
-CheckError("(class extends Array {});");
-CheckError("(class C {});");
-CheckError("(class C exends Array {});");
-
-CheckOk("let = 1;");
-CheckOk("{ let = 1; }");
-CheckOk("function f() { let = 1; }");
-CheckOk("for (let = 1; let < 1; let++) {}");
diff --git a/deps/v8/test/mjsunit/es6/block-scope-class.js b/deps/v8/test/mjsunit/es6/block-scope-class.js
index 351feaa90e..7bbd49d338 100644
--- a/deps/v8/test/mjsunit/es6/block-scope-class.js
+++ b/deps/v8/test/mjsunit/es6/block-scope-class.js
@@ -4,8 +4,6 @@
// Test for conflicting variable bindings.
-// Flags: --harmony-sloppy --harmony-sloppy-function
-
function AssertEqualsStrictAndSloppy(value, code) {
assertEquals(value, eval("(function() {" + code + "})()"));
assertEquals(value, eval("(function() { 'use strict'; " + code + "})()"));
diff --git a/deps/v8/test/mjsunit/harmony/block-scoping-sloppy.js b/deps/v8/test/mjsunit/es6/block-scoping-sloppy.js
index 1785901276..f5c5a6326b 100644
--- a/deps/v8/test/mjsunit/harmony/block-scoping-sloppy.js
+++ b/deps/v8/test/mjsunit/es6/block-scoping-sloppy.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --harmony-sloppy --harmony-sloppy-let --harmony-sloppy-function
+// Flags: --allow-natives-syntax
// Test functionality of block scopes.
// Hoisting of var declarations.
diff --git a/deps/v8/test/mjsunit/harmony/block-scoping-top-level-sloppy.js b/deps/v8/test/mjsunit/es6/block-scoping-top-level-sloppy.js
index 6f6a8fe06d..2a3b903f9e 100644
--- a/deps/v8/test/mjsunit/harmony/block-scoping-top-level-sloppy.js
+++ b/deps/v8/test/mjsunit/es6/block-scoping-top-level-sloppy.js
@@ -3,7 +3,6 @@
// found in the LICENSE file.
// Flags: --min-preparse-length=0
-// Flags: --harmony-sloppy --harmony-sloppy-let
let xxx = 1;
let f = undefined;
diff --git a/deps/v8/test/mjsunit/es6/block-sloppy-function.js b/deps/v8/test/mjsunit/es6/block-sloppy-function.js
new file mode 100644
index 0000000000..8cb9a4deda
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/block-sloppy-function.js
@@ -0,0 +1,656 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test Annex B 3.3 semantics for functions declared in blocks in sloppy mode.
+// http://www.ecma-international.org/ecma-262/6.0/#sec-block-level-function-declarations-web-legacy-compatibility-semantics
+
+(function overridingLocalFunction() {
+ var x = [];
+ assertEquals('function', typeof f);
+ function f() {
+ x.push(1);
+ }
+ f();
+ {
+ f();
+ function f() {
+ x.push(2);
+ }
+ f();
+ }
+ f();
+ {
+ f();
+ function f() {
+ x.push(3);
+ }
+ f();
+ }
+ f();
+ assertArrayEquals([1, 2, 2, 2, 3, 3, 3], x);
+})();
+
+(function newFunctionBinding() {
+ var x = [];
+ assertEquals('undefined', typeof f);
+ {
+ f();
+ function f() {
+ x.push(2);
+ }
+ f();
+ }
+ f();
+ {
+ f();
+ function f() {
+ x.push(3);
+ }
+ f();
+ }
+ f();
+ assertArrayEquals([2, 2, 2, 3, 3, 3], x);
+})();
+
+(function shadowingLetDoesntBind() {
+ let f = 1;
+ assertEquals(1, f);
+ {
+ let y = 3;
+ function f() {
+ y = 2;
+ }
+ f();
+ assertEquals(2, y);
+ }
+ assertEquals(1, f);
+})();
+
+(function shadowingClassDoesntBind() {
+ class f { }
+ assertEquals('class f { }', f.toString());
+ {
+ let y = 3;
+ function f() {
+ y = 2;
+ }
+ f();
+ assertEquals(2, y);
+ }
+ assertEquals('class f { }', f.toString());
+})();
+
+(function shadowingConstDoesntBind() {
+ const f = 1;
+ assertEquals(1, f);
+ {
+ let y = 3;
+ function f() {
+ y = 2;
+ }
+ f();
+ assertEquals(2, y);
+ }
+ assertEquals(1, f);
+})();
+
+(function shadowingVarBinds() {
+ var f = 1;
+ assertEquals(1, f);
+ {
+ let y = 3;
+ function f() {
+ y = 2;
+ }
+ f();
+ assertEquals(2, y);
+ }
+ assertEquals('function', typeof f);
+})();
+
+(function complexParams(a = 0) {
+ {
+ let y = 3;
+ function f(b = 0) {
+ y = 2;
+ }
+ f();
+ assertEquals(2, y);
+ }
+ assertEquals('function', typeof f);
+})();
+
+(function complexVarParams(a = 0) {
+ var f;
+ {
+ let y = 3;
+ function f(b = 0) {
+ y = 2;
+ }
+ f();
+ assertEquals(2, y);
+ }
+ assertEquals('function', typeof f);
+})();
+
+(function conditional() {
+ if (true) {
+ function f() { return 1; }
+ } else {
+ function f() { return 2; }
+ }
+ assertEquals(1, f());
+
+ if (false) {
+ function g() { return 1; }
+ } else {
+ function g() { return 2; }
+ }
+ assertEquals(2, g());
+})();
+
+(function skipExecution() {
+ {
+ function f() { return 1; }
+ }
+ assertEquals(1, f());
+ {
+ function f() { return 2; }
+ }
+ assertEquals(2, f());
+ L: {
+ assertEquals(3, f());
+ break L;
+ function f() { return 3; }
+ }
+ assertEquals(2, f());
+})();
+
+(function executionOrder() {
+ function getOuter() {
+ return f;
+ }
+ assertEquals('undefined', typeof getOuter());
+
+ {
+ assertEquals('function', typeof f);
+ assertEquals('undefined', typeof getOuter());
+ function f () {}
+ assertEquals('function', typeof f);
+ assertEquals('function', typeof getOuter());
+ }
+
+ assertEquals('function', typeof getOuter());
+})();
+
+(function reassignBindings() {
+ function getOuter() {
+ return f;
+ }
+ assertEquals('undefined', typeof getOuter());
+
+ {
+ assertEquals('function', typeof f);
+ assertEquals('undefined', typeof getOuter());
+ f = 1;
+ assertEquals('number', typeof f);
+ assertEquals('undefined', typeof getOuter());
+ function f () {}
+ assertEquals('number', typeof f);
+ assertEquals('number', typeof getOuter());
+ f = '';
+ assertEquals('string', typeof f);
+ assertEquals('number', typeof getOuter());
+ }
+
+ assertEquals('number', typeof getOuter());
+})();
+
+// Test that shadowing arguments is fine
+(function shadowArguments(x) {
+ assertArrayEquals([1], arguments);
+ {
+ assertEquals('function', typeof arguments);
+ function arguments() {}
+ assertEquals('function', typeof arguments);
+ }
+ assertEquals('function', typeof arguments);
+})(1);
+
+
+// Don't shadow simple parameter
+(function shadowingParameterDoesntBind(x) {
+ assertEquals(1, x);
+ {
+ function x() {}
+ }
+ assertEquals(1, x);
+})(1);
+
+// Don't shadow complex parameter
+(function shadowingDefaultParameterDoesntBind(x = 0) {
+ assertEquals(1, x);
+ {
+ function x() {}
+ }
+ assertEquals(1, x);
+})(1);
+
+// Don't shadow nested complex parameter
+(function shadowingNestedParameterDoesntBind([[x]]) {
+ assertEquals(1, x);
+ {
+ function x() {}
+ }
+ assertEquals(1, x);
+})([[1]]);
+
+// Don't shadow rest parameter
+(function shadowingRestParameterDoesntBind(...x) {
+ assertArrayEquals([1], x);
+ {
+ function x() {}
+ }
+ assertArrayEquals([1], x);
+})(1);
+
+// Don't shadow complex rest parameter
+(function shadowingComplexRestParameterDoesntBind(...[x]) {
+ assertArrayEquals(1, x);
+ {
+ function x() {}
+ }
+ assertArrayEquals(1, x);
+})(1);
+
+// Previous tests with a var declaration thrown in.
+// Don't shadow simple parameter
+(function shadowingVarParameterDoesntBind(x) {
+ var x;
+ assertEquals(1, x);
+ {
+ function x() {}
+ }
+ assertEquals(1, x);
+})(1);
+
+// Don't shadow complex parameter
+(function shadowingVarDefaultParameterDoesntBind(x = 0) {
+ var x;
+ assertEquals(1, x);
+ {
+ function x() {}
+ }
+ assertEquals(1, x);
+})(1);
+
+// Don't shadow nested complex parameter
+(function shadowingVarNestedParameterDoesntBind([[x]]) {
+ var x;
+ assertEquals(1, x);
+ {
+ function x() {}
+ }
+ assertEquals(1, x);
+})([[1]]);
+
+// Don't shadow rest parameter
+(function shadowingVarRestParameterDoesntBind(...x) {
+ var x;
+ assertArrayEquals([1], x);
+ {
+ function x() {}
+ }
+ assertArrayEquals([1], x);
+})(1);
+
+// Don't shadow complex rest parameter
+(function shadowingVarComplexRestParameterDoesntBind(...[x]) {
+ var x;
+ assertArrayEquals(1, x);
+ {
+ function x() {}
+ }
+ assertArrayEquals(1, x);
+})(1);
+
+
+// Hoisting is not affected by other simple parameters
+(function irrelevantParameterBinds(y, z) {
+ assertEquals(undefined, x);
+ {
+ function x() {}
+ }
+ assertEquals('function', typeof x);
+})(1);
+
+// Hoisting is not affected by other complex parameters
+(function irrelevantComplexParameterBinds([y] = [], z) {
+ assertEquals(undefined, x);
+ {
+ function x() {}
+ }
+ assertEquals('function', typeof x);
+})();
+
+// Hoisting is not affected by rest parameters
+(function irrelevantRestParameterBinds(y, ...z) {
+ assertEquals(undefined, x);
+ {
+ function x() {}
+ }
+ assertEquals('function', typeof x);
+})();
+
+// Hoisting is not affected by complex rest parameters
+(function irrelevantRestParameterBinds(y, ...[z]) {
+ assertEquals(undefined, x);
+ {
+ function x() {}
+ }
+ assertEquals('function', typeof x);
+})();
+
+
+// Test that shadowing function name is fine
+{
+ let called = false;
+ (function shadowFunctionName() {
+ if (called) assertUnreachable();
+ called = true;
+ {
+ function shadowFunctionName() {
+ return 0;
+ }
+ assertEquals(0, shadowFunctionName());
+ }
+ assertEquals(0, shadowFunctionName());
+ })();
+}
+
+{
+ let called = false;
+ (function shadowFunctionNameWithComplexParameter(...r) {
+ if (called) assertUnreachable();
+ called = true;
+ {
+ function shadowFunctionNameWithComplexParameter() {
+ return 0;
+ }
+ assertEquals(0, shadowFunctionNameWithComplexParameter());
+ }
+ assertEquals(0, shadowFunctionNameWithComplexParameter());
+ })();
+}
+
+(function shadowOuterVariable() {
+ {
+ let f = 0;
+ (function () {
+ assertEquals(undefined, f);
+ {
+ assertEquals(1, f());
+ function f() { return 1; }
+ assertEquals(1, f());
+ }
+ assertEquals(1, f());
+ })();
+ assertEquals(0, f);
+ }
+})();
+
+(function notInDefaultScope() {
+ var y = 1;
+ (function innerNotInDefaultScope(x = y) {
+ assertEquals('undefined', typeof y);
+ {
+ function y() {}
+ }
+ assertEquals('function', typeof y);
+ assertEquals(1, x);
+ })();
+})();
+
+(function noHoistingThroughNestedLexical() {
+ {
+ let f = 2;
+ {
+ let y = 3;
+ function f() {
+ y = 2;
+ }
+ f();
+ assertEquals(2, y);
+ }
+ assertEquals(2, f);
+ }
+ assertThrows(()=>f, ReferenceError);
+})();
+
+// Only the first function is hoisted; the second is blocked by the first.
+// Contrast overridingLocalFunction, in which the outer function declaration
+// is not lexical and so the inner declaration is hoisted.
+(function noHoistingThroughNestedFunctions() {
+ assertEquals(undefined, f); // Also checks that the var-binding exists
+
+ {
+ assertEquals(4, f());
+
+ function f() {
+ return 4;
+ }
+
+ {
+ assertEquals(5, f());
+ function f() {
+ return 5;
+ }
+ assertEquals(5, f());
+ }
+
+ assertEquals(4, f());
+ }
+
+ assertEquals(4, f());
+})();
+
+// B.3.5 interacts with B.3.3 to allow this.
+(function hoistingThroughSimpleCatch() {
+ assertEquals(undefined, f);
+
+ try {
+ throw 0;
+ } catch (f) {
+ {
+ assertEquals(4, f());
+
+ function f() {
+ return 4;
+ }
+
+ assertEquals(4, f());
+ }
+
+ assertEquals(0, f);
+ }
+
+ assertEquals(4, f());
+})();
+
+(function noHoistingThroughComplexCatch() {
+ try {
+ throw 0;
+ } catch ({f}) {
+ {
+ assertEquals(4, f());
+
+ function f() {
+ return 4;
+ }
+
+ assertEquals(4, f());
+ }
+ }
+
+ assertThrows(()=>f, ReferenceError);
+})();
+
+(function hoistingThroughWith() {
+ with ({f: 0}) {
+ assertEquals(0, f);
+
+ {
+ assertEquals(4, f());
+
+ function f() {
+ return 4;
+ }
+
+ assertEquals(4, f());
+ }
+
+ assertEquals(0, f);
+ }
+
+ assertEquals(4, f());
+})();
+
+// Test that hoisting from blocks does happen in global scope
+function globalHoisted() { return 0; }
+{
+ function globalHoisted() { return 1; }
+}
+assertEquals(1, globalHoisted());
+
+// Also happens when not previously defined
+assertEquals(undefined, globalUndefinedHoisted);
+{
+ function globalUndefinedHoisted() { return 1; }
+}
+assertEquals(1, globalUndefinedHoisted());
+var globalUndefinedHoistedDescriptor =
+ Object.getOwnPropertyDescriptor(this, "globalUndefinedHoisted");
+assertFalse(globalUndefinedHoistedDescriptor.configurable);
+assertTrue(globalUndefinedHoistedDescriptor.writable);
+assertTrue(globalUndefinedHoistedDescriptor.enumerable);
+assertEquals(1, globalUndefinedHoistedDescriptor.value());
+
+// When a function property is hoisted, it should be
+// made enumerable.
+// BUG(v8:4451)
+Object.defineProperty(this, "globalNonEnumerable", {
+ value: false,
+ configurable: true,
+ writable: true,
+ enumerable: false
+});
+eval("{function globalNonEnumerable() { return 1; }}");
+var globalNonEnumerableDescriptor
+ = Object.getOwnPropertyDescriptor(this, "globalNonEnumerable");
+// BUG(v8:4451): Should be made non-configurable
+assertTrue(globalNonEnumerableDescriptor.configurable);
+assertTrue(globalNonEnumerableDescriptor.writable);
+// BUG(v8:4451): Should be made enumerable
+assertFalse(globalNonEnumerableDescriptor.enumerable);
+assertEquals(1, globalNonEnumerableDescriptor.value());
+
+// When a function property is hoisted, it should be overwritten and
+// made writable and overwritten, even if the property was non-writable.
+Object.defineProperty(this, "globalNonWritable", {
+ value: false,
+ configurable: true,
+ writable: false,
+ enumerable: true
+});
+eval("{function globalNonWritable() { return 1; }}");
+var globalNonWritableDescriptor
+ = Object.getOwnPropertyDescriptor(this, "globalNonWritable");
+// BUG(v8:4451): Should be made non-configurable
+assertTrue(globalNonWritableDescriptor.configurable);
+// BUG(v8:4451): Should be made writable
+assertFalse(globalNonWritableDescriptor.writable);
+assertFalse(globalNonEnumerableDescriptor.enumerable);
+// BUG(v8:4451): Should be overwritten
+assertEquals(false, globalNonWritableDescriptor.value);
+
+// Test that hoisting from blocks does happen in an eval
+eval(`
+ function evalHoisted() { return 0; }
+ {
+ function evalHoisted() { return 1; }
+ }
+ assertEquals(1, evalHoisted());
+`);
+
+// Test that hoisting from blocks happens from eval in a function
+!function() {
+ eval(`
+ function evalInFunctionHoisted() { return 0; }
+ {
+ function evalInFunctionHoisted() { return 1; }
+ }
+ assertEquals(1, evalInFunctionHoisted());
+ `);
+}();
+
+// This test is incorrect BUG(v8:5168). The commented assertions are correct.
+(function evalHoistingThroughSimpleCatch() {
+ try {
+ throw 0;
+ } catch (f) {
+ eval(`{ function f() {
+ return 4;
+ } }`);
+
+ // assertEquals(0, f);
+ assertEquals(4, f());
+ }
+
+ // assertEquals(4, f());
+ assertEquals(undefined, f);
+})();
+
+// This test is incorrect BUG(v8:5168). The commented assertions are correct.
+(function evalHoistingThroughWith() {
+ with ({f: 0}) {
+ eval(`{ function f() {
+ return 4;
+ } }`);
+
+ // assertEquals(0, f);
+ assertEquals(4, f());
+ }
+
+ // assertEquals(4, f());
+ assertEquals(undefined, f);
+})();
+
+let dontHoistGlobal;
+{ function dontHoistGlobal() {} }
+assertEquals(undefined, dontHoistGlobal);
+
+let dontHoistEval;
+var throws = false;
+try {
+ eval("{ function dontHoistEval() {} }");
+} catch (e) {
+ throws = true;
+}
+assertFalse(throws);
+
+// When the global object is frozen, silently don't hoist
+// Currently this actually throws BUG(v8:4452)
+Object.freeze(this);
+{
+ let throws = false;
+ try {
+ eval('{ function hoistWhenFrozen() {} }');
+ } catch (e) {
+ throws = true;
+ }
+ assertFalse(this.hasOwnProperty("hoistWhenFrozen"));
+ assertThrows(() => hoistWhenFrozen, ReferenceError);
+ // Should be assertFalse BUG(v8:4452)
+ assertTrue(throws);
+}
diff --git a/deps/v8/test/mjsunit/es6/catch-parameter-redeclaration.js b/deps/v8/test/mjsunit/es6/catch-parameter-redeclaration.js
new file mode 100644
index 0000000000..0f8f9c86e7
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/catch-parameter-redeclaration.js
@@ -0,0 +1,112 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function checkIsRedeclarationError(code) {
+ try {
+ eval(`
+checkIsRedeclarationError : {
+ break checkIsRedeclarationError;
+${code}
+}
+`);
+ assertUnreachable();
+ } catch(e) {
+ assertInstanceof(e, SyntaxError );
+ assertTrue( e.toString().indexOf("has already been declared") >= 0 );
+ }
+}
+
+function checkIsNotRedeclarationError(code) {
+ assertDoesNotThrow(()=>eval(`
+checkIsNotRedeclarationError_label : {
+ break checkIsNotRedeclarationError_label;
+${code}
+}
+`));
+}
+
+
+let lexical_e = [
+ 'let e',
+ 'let f, g, e',
+ 'let [f] = [], [] = [], e = e, h',
+ 'let {e} = 0',
+ 'let {f, e} = 0',
+ 'let {f, g} = 0, {e} = 0',
+ 'let {f = 0, e = 1} = 0',
+ 'let [e] = 0',
+ 'let [f, e] = 0',
+ 'let {f:e} = 0',
+ 'let [[[], e]] = 0',
+ 'const e = 0',
+ 'const f = 0, g = 0, e = 0',
+ 'const {e} = 0',
+ 'const [e] = 0',
+ 'const {f:e} = 0',
+ 'const [[[], e]] = 0',
+ 'function e(){}',
+ 'function* e(){}',
+];
+
+let not_lexical_e = [
+ 'var e',
+ 'var f, e',
+ 'var {e} = 0',
+ 'let {} = 0',
+ 'let {e:f} = 0',
+ '{ function e(){} }'
+];
+
+// Check that lexical declarations cannot override a simple catch parameter
+for (let declaration of lexical_e) {
+ checkIsRedeclarationError(`
+try {
+ throw 0;
+} catch(e) {
+ ${declaration}
+}
+`);
+}
+
+// Check that lexical declarations cannot override a complex catch parameter
+for (let declaration of lexical_e) {
+ checkIsRedeclarationError(`
+try {
+ throw 0;
+} catch({e}) {
+ ${declaration}
+}
+`);
+}
+
+// Check that non-lexical declarations can override a simple catch parameter
+for (let declaration of not_lexical_e) {
+ checkIsNotRedeclarationError(`
+try {
+ throw 0;
+} catch(e) {
+ ${declaration}
+}
+`);
+}
+
+// Check that the above error does not occur if a declaration scope is between
+// the catch and the loop.
+for (let declaration of lexical_e) {
+ checkIsNotRedeclarationError(`
+try {
+ throw 0;
+} catch(e) {
+ (()=>{${declaration}})();
+}
+`);
+
+ checkIsNotRedeclarationError(`
+try {
+ throw 0;
+} catch(e) {
+ (function(){${declaration}})();
+}
+`);
+}
diff --git a/deps/v8/test/mjsunit/es6/class-computed-property-names-super.js b/deps/v8/test/mjsunit/es6/class-computed-property-names-super.js
index cb9f25157c..b5a2ac995e 100644
--- a/deps/v8/test/mjsunit/es6/class-computed-property-names-super.js
+++ b/deps/v8/test/mjsunit/es6/class-computed-property-names-super.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-sloppy
// Flags: --allow-natives-syntax
diff --git a/deps/v8/test/mjsunit/es6/class-property-name-eval-arguments.js b/deps/v8/test/mjsunit/es6/class-property-name-eval-arguments.js
index 72ff60fd3e..bbd05cc355 100644
--- a/deps/v8/test/mjsunit/es6/class-property-name-eval-arguments.js
+++ b/deps/v8/test/mjsunit/es6/class-property-name-eval-arguments.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-sloppy
-
(function Method() {
class C {
diff --git a/deps/v8/test/mjsunit/es6/classes-derived-return-type.js b/deps/v8/test/mjsunit/es6/classes-derived-return-type.js
index 8283bcb227..3f81a340ff 100644
--- a/deps/v8/test/mjsunit/es6/classes-derived-return-type.js
+++ b/deps/v8/test/mjsunit/es6/classes-derived-return-type.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-sloppy
-
class Base {}
diff --git a/deps/v8/test/mjsunit/es6/classes-subclass-builtins.js b/deps/v8/test/mjsunit/es6/classes-subclass-builtins.js
index 7669ef3a8a..dca514c294 100644
--- a/deps/v8/test/mjsunit/es6/classes-subclass-builtins.js
+++ b/deps/v8/test/mjsunit/es6/classes-subclass-builtins.js
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-regexp-subclass
-// Flags: --expose-gc
+// Flags: --allow-natives-syntax --expose-gc
"use strict";
diff --git a/deps/v8/test/mjsunit/es6/classes.js b/deps/v8/test/mjsunit/es6/classes.js
index 4dabda8e44..fb77dbb8e4 100644
--- a/deps/v8/test/mjsunit/es6/classes.js
+++ b/deps/v8/test/mjsunit/es6/classes.js
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-sloppy --harmony-function-name --allow-natives-syntax
-// Flags: --harmony-do-expressions
+// Flags: --allow-natives-syntax --harmony-do-expressions
(function TestBasics() {
var C = class C {}
diff --git a/deps/v8/test/mjsunit/es6/completion.js b/deps/v8/test/mjsunit/es6/completion.js
index 7559514421..988e9709bb 100644
--- a/deps/v8/test/mjsunit/es6/completion.js
+++ b/deps/v8/test/mjsunit/es6/completion.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-sloppy-let
-
function assertUndef(x) {
assertEquals(undefined, x);
diff --git a/deps/v8/test/mjsunit/es6/debug-blockscopes.js b/deps/v8/test/mjsunit/es6/debug-blockscopes.js
index 193ad705cb..bf04a0a4aa 100644
--- a/deps/v8/test/mjsunit/es6/debug-blockscopes.js
+++ b/deps/v8/test/mjsunit/es6/debug-blockscopes.js
@@ -52,6 +52,7 @@ function listener(event, exec_state, event_data, data) {
listener_delegate(exec_state);
}
} catch (e) {
+ print(e, e.stack);
exception = e;
}
}
@@ -147,10 +148,8 @@ function CheckScopeContent(content, number, exec_state) {
if (!scope.scopeObject().property('arguments').isUndefined()) {
scope_size--;
}
- // Skip property with empty name.
- if (!scope.scopeObject().property('').isUndefined()) {
- scope_size--;
- }
+ // Temporary variables introduced by the parser have not been materialized.
+ assertTrue(scope.scopeObject().property('').isUndefined());
if (count != scope_size) {
print('Names found in scope:');
@@ -380,16 +379,12 @@ function for_loop_1() {
listener_delegate = function(exec_state) {
CheckScopeChain([debug.ScopeType.Block,
- debug.ScopeType.Block,
debug.ScopeType.Local,
debug.ScopeType.Script,
debug.ScopeType.Global], exec_state);
CheckScopeContent({x:'y'}, 0, exec_state);
// The function scope contains a temporary iteration variable, but it is
// hidden to the debugger.
- // TODO(adamk): This variable is only used to provide a TDZ for the enumerable
- // expression and should not be visible to the debugger.
- CheckScopeContent({x:undefined}, 1, exec_state);
};
for_loop_1();
EndTest();
@@ -409,7 +404,6 @@ function for_loop_2() {
listener_delegate = function(exec_state) {
CheckScopeChain([debug.ScopeType.Block,
debug.ScopeType.Block,
- debug.ScopeType.Block,
debug.ScopeType.Local,
debug.ScopeType.Script,
debug.ScopeType.Global], exec_state);
@@ -417,9 +411,6 @@ listener_delegate = function(exec_state) {
CheckScopeContent({x:'y'}, 1, exec_state);
// The function scope contains a temporary iteration variable, hidden to the
// debugger.
- // TODO(adamk): This variable is only used to provide a TDZ for the enumerable
- // expression and should not be visible to the debugger.
- CheckScopeContent({x:undefined}, 2, exec_state);
};
for_loop_2();
EndTest();
@@ -436,13 +427,11 @@ function for_loop_3() {
listener_delegate = function(exec_state) {
CheckScopeChain([debug.ScopeType.Block,
- debug.ScopeType.Block,
debug.ScopeType.Local,
debug.ScopeType.Script,
debug.ScopeType.Global], exec_state);
CheckScopeContent({x:3}, 0, exec_state);
- CheckScopeContent({x:3}, 1, exec_state);
- CheckScopeContent({}, 2, exec_state);
+ CheckScopeContent({}, 1, exec_state);
};
for_loop_3();
EndTest();
@@ -461,14 +450,12 @@ function for_loop_4() {
listener_delegate = function(exec_state) {
CheckScopeChain([debug.ScopeType.Block,
debug.ScopeType.Block,
- debug.ScopeType.Block,
debug.ScopeType.Local,
debug.ScopeType.Script,
debug.ScopeType.Global], exec_state);
CheckScopeContent({x:5}, 0, exec_state);
CheckScopeContent({x:3}, 1, exec_state);
- CheckScopeContent({x:3}, 2, exec_state);
- CheckScopeContent({}, 3, exec_state);
+ CheckScopeContent({}, 2, exec_state);
};
for_loop_4();
EndTest();
@@ -485,13 +472,11 @@ function for_loop_5() {
listener_delegate = function(exec_state) {
CheckScopeChain([debug.ScopeType.Block,
- debug.ScopeType.Block,
debug.ScopeType.Local,
debug.ScopeType.Script,
debug.ScopeType.Global], exec_state);
CheckScopeContent({x:3,y:5}, 0, exec_state);
- CheckScopeContent({x:3,y:5}, 1, exec_state);
- CheckScopeContent({}, 2, exec_state);
+ CheckScopeContent({}, 1, exec_state);
};
for_loop_5();
EndTest();
diff --git a/deps/v8/test/mjsunit/es6/debug-exception-generators.js b/deps/v8/test/mjsunit/es6/debug-exception-generators.js
new file mode 100644
index 0000000000..b2e7e82964
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/debug-exception-generators.js
@@ -0,0 +1,49 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+Debug = debug.Debug
+
+var exception = null;
+var log = [];
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Exception) return;
+ try {
+ var line = exec_state.frame(0).sourceLineText();
+ var match = /Exception (\w)/.exec(line);
+ assertNotNull(match);
+ assertEquals(match[1], event_data.exception());
+ log.push(match[1]);
+ } catch (e) {
+ exception = e;
+ }
+}
+
+
+function* g() {
+ try {
+ throw "a"; // Ordinary throw. Exception a
+ } catch (e) {}
+ try {
+ yield 1; // Caught internally. Exception b
+ } catch (e) {}
+ yield 2;
+ yield 3; // Caught externally. Exception c
+ yield 4;
+}
+
+Debug.setListener(listener);
+Debug.setBreakOnException();
+var g_obj = g();
+assertEquals(1, g_obj.next().value);
+assertEquals(2, g_obj.throw("b").value);
+assertEquals(3, g_obj.next().value);
+assertThrows(() => g_obj.throw("c"));
+assertThrows(() => g_obj.throw("d")); // Closed generator. Exception d
+Debug.setListener(null);
+Debug.clearBreakOnException();
+assertEquals(["a", "b", "c", "d"], log);
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/async-task-event.js b/deps/v8/test/mjsunit/es6/debug-promises/async-task-event.js
index 88030a2e73..0b0fa1e64f 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/async-task-event.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/async-task-event.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
+// Flags: --expose-debug-as debug --allow-natives-syntax
Debug = debug.Debug;
@@ -16,8 +16,8 @@ var expected = [
"didHandle #1",
"willHandle #2",
"then #2",
- "enqueue #3",
"didHandle #2",
+ "enqueue #3",
"willHandle #3",
"didHandle #3"
];
@@ -58,4 +58,6 @@ p.then(function() {
});
resolver();
+%RunMicrotasks();
+
assertNull(exception);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/evaluate-across-microtasks.js b/deps/v8/test/mjsunit/es6/debug-promises/evaluate-across-microtasks.js
new file mode 100644
index 0000000000..73718eec7b
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/debug-promises/evaluate-across-microtasks.js
@@ -0,0 +1,66 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+var Debug = debug.Debug;
+var listenerComplete = false;
+var exception = null;
+var count = 0;
+var log = [];
+var done = false;
+
+function LogX(x) {
+ var stored_count = count;
+ return function() {
+ log.push(`[${stored_count}] ${x}`);
+ };
+}
+
+function DebuggerStatement() {
+ log.push(`[${count}] debugger`);
+ if (count++ < 3) {
+ debugger;
+ }
+}
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ var p = Promise.resolve();
+ var q = p.then(LogX("then 1"));
+ p.then(LogX("then 2"));
+ q.then(LogX("then 3"));
+ q.then(DebuggerStatement);
+ var r = q.then(() => { throw 1; });
+ r.catch(LogX("catch"));
+ listenerComplete = true;
+ } catch (e) {
+ exception = e;
+ print(e, e.stack);
+ quit(1);
+ };
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+DebuggerStatement();
+LogX("start")();
+
+// Make sure that the debug event listener was invoked.
+assertTrue(listenerComplete);
+
+%RunMicrotasks();
+
+var expectation =
+ [ "[0] debugger", "[1] start", "[1] then 1",
+ "[1] then 2", "[1] then 3", "[1] debugger",
+ "[2] then 1", "[2] then 2", "[1] catch",
+ "[2] then 3", "[2] debugger", "[3] then 1",
+ "[3] then 2", "[2] catch", "[3] then 3",
+ "[3] debugger", "[3] catch",
+ ];
+
+assertEquals(expectation, log);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/events.js b/deps/v8/test/mjsunit/es6/debug-promises/events.js
deleted file mode 100644
index 3fcb22ff27..0000000000
--- a/deps/v8/test/mjsunit/es6/debug-promises/events.js
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --expose-debug-as debug
-
-Debug = debug.Debug;
-
-var eventsExpected = 16;
-var exception = null;
-var result = [];
-
-function updatePromise(promise, parentPromise, status, value) {
- var i;
- for (i = 0; i < result.length; ++i) {
- if (result[i].promise === promise) {
- result[i].parentPromise = parentPromise || result[i].parentPromise;
- result[i].status = status || result[i].status;
- result[i].value = value || result[i].value;
- break;
- }
- }
- assertTrue(i < result.length);
-}
-
-function listener(event, exec_state, event_data, data) {
- if (event != Debug.DebugEvent.PromiseEvent) return;
- try {
- eventsExpected--;
- assertTrue(event_data.promise().isPromise());
- if (event_data.status() === 0) {
- // New promise.
- assertEquals("pending", event_data.promise().status());
- result.push({ promise: event_data.promise().value(), status: 0 });
- assertTrue(exec_state.frame(0).sourceLineText().indexOf("// event") > 0);
- } else if (event_data.status() !== undefined) {
- // Resolve/reject promise.
- updatePromise(event_data.promise().value(),
- undefined,
- event_data.status(),
- event_data.value().value());
- } else {
- // Chain promises.
- assertTrue(event_data.parentPromise().isPromise());
- updatePromise(event_data.promise().value(),
- event_data.parentPromise().value());
- assertTrue(exec_state.frame(0).sourceLineText().indexOf("// event") > 0);
- }
- } catch (e) {
- print(e + e.stack)
- exception = e;
- }
-}
-
-Debug.setListener(listener);
-
-function resolver(resolve, reject) { resolve(); }
-
-var p1 = new Promise(resolver); // event
-var p2 = p1.then().then(); // event
-var p3 = new Promise(function(resolve, reject) { // event
- reject("rejected");
-});
-var p4 = p3.then(); // event
-var p5 = p1.then(); // event
-
-function assertAsync(b, s) {
- if (b) {
- print(s, "succeeded");
- } else {
- %AbortJS(s + " FAILED!");
- }
-}
-
-function testDone(iteration) {
- function checkResult() {
- if (eventsExpected === 0) {
- assertAsync(result.length === 6, "result.length");
-
- assertAsync(result[0].promise === p1, "result[0].promise");
- assertAsync(result[0].parentPromise === undefined,
- "result[0].parentPromise");
- assertAsync(result[0].status === 1, "result[0].status");
- assertAsync(result[0].value === undefined, "result[0].value");
-
- assertAsync(result[1].parentPromise === p1,
- "result[1].parentPromise");
- assertAsync(result[1].status === 1, "result[1].status");
-
- assertAsync(result[2].promise === p2, "result[2].promise");
-
- assertAsync(result[3].promise === p3, "result[3].promise");
- assertAsync(result[3].parentPromise === undefined,
- "result[3].parentPromise");
- assertAsync(result[3].status === -1, "result[3].status");
- assertAsync(result[3].value === "rejected", "result[3].value");
-
- assertAsync(result[4].promise === p4, "result[4].promise");
- assertAsync(result[4].parentPromise === p3,
- "result[4].parentPromise");
- assertAsync(result[4].status === -1, "result[4].status");
- assertAsync(result[4].value === "rejected", "result[4].value");
-
- assertAsync(result[5].promise === p5, "result[5].promise");
- assertAsync(result[5].parentPromise === p1,
- "result[5].parentPromise");
- assertAsync(result[5].status === 1, "result[5].status");
-
- assertAsync(exception === null, "exception === null");
- Debug.setListener(null);
- } else if (iteration > 10) {
- %AbortJS("Not all events were received!");
- } else {
- testDone(iteration + 1);
- }
- }
-
- var iteration = iteration || 0;
- %EnqueueMicrotask(checkResult);
-}
-
-testDone();
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reentry.js b/deps/v8/test/mjsunit/es6/debug-promises/reentry.js
index a97ce81012..cc98ed9efd 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reentry.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reentry.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --promise-extra
+// Flags: --expose-debug-as debug
// Test reentry of special try catch for Promises.
@@ -12,6 +12,6 @@ Debug.setBreakOnUncaughtException();
Debug.setListener(function(event, exec_state, event_data, data) { });
var p = new Promise(function(resolve, reject) { resolve(); });
-var q = p.chain(function() {
+var q = p.then(function() {
new Promise(function(resolve, reject) { resolve(); });
});
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-after-resolve.js b/deps/v8/test/mjsunit/es6/debug-promises/reject-after-resolve.js
index ed4b2c435e..5ec2da50e9 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-after-resolve.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reject-after-resolve.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
+// Flags: --expose-debug-as debug --allow-natives-syntax
// Test debug events when we listen to uncaught exceptions and
// the Promise is rejected in a chained closure after it has been resolved.
@@ -17,7 +17,7 @@ var p = new Promise(function(resolve, reject) {
resolve(reject);
});
-var q = p.chain(
+var q = p.then(
function(value) {
assertEquals(["resolve", "end main"], log);
value(new Error("reject"));
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-all.js b/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-all.js
index e1a653889d..8d348ce6b6 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-all.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-all.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
+// Flags: --expose-debug-as debug --allow-natives-syntax
// Test debug events when we listen to all exceptions and
// there is a catch handler for the to-be-rejected Promise.
@@ -18,7 +18,7 @@ var p = new Promise(function(resolve, reject) {
resolve();
});
-var q = p.chain(
+var q = p.then(
function(value) {
log.push("reject");
return Promise.reject(new Error("reject"));
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-late.js b/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-late.js
index 922449261b..44eb76728f 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-late.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-late.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
+// Flags: --expose-debug-as debug --allow-natives-syntax
// Test debug events when we only listen to uncaught exceptions, the Promise
// is rejected, and a catch handler is installed right before the rejection.
@@ -14,7 +14,7 @@ var p = new Promise(function(resolve, reject) {
resolve();
});
-var q = p.chain(
+var q = p.then(
function() {
q.catch(function(e) {
assertEquals("caught", e.message);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-uncaught.js b/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-uncaught.js
index afb46fea8f..b2fe8b0a45 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-uncaught.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-uncaught.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
+// Flags: --expose-debug-as debug --allow-natives-syntax
// Test debug events when we only listen to uncaught exceptions and
// there is a catch handler for the to-be-rejected Promise.
@@ -14,7 +14,7 @@ var p = new Promise(function(resolve, reject) {
resolve();
});
-var q = p.chain(
+var q = p.then(
function() {
return Promise.reject(Error("caught reject"));
});
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-all.js b/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-all.js
index 63e3b8678d..0c5ecc5f3a 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-all.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-all.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
+// Flags: --expose-debug-as debug --allow-natives-syntax
// Test debug events when we listen to all exceptions and
// there is a catch handler for the to-be-rejected Promise.
@@ -18,7 +18,7 @@ var p = new Promise(function(resolve, reject) {
resolve();
});
-var q = p.chain(
+var q = p.then(
function() {
log.push("reject");
return Promise.reject(new Error("uncaught reject"));
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-uncaught.js b/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-uncaught.js
index b542bc69dd..e5e560b3db 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-uncaught.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reject-uncaught-uncaught.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
+// Flags: --expose-debug-as debug --allow-natives-syntax
// Test debug events when we only listen to uncaught exceptions and
// there is no catch handler for the to-be-rejected Promise.
@@ -18,7 +18,7 @@ var p = new Promise(function(resolve, reject) {
resolve();
});
-var q = p.chain(
+var q = p.then(
function() {
log.push("reject");
return Promise.reject(Error("uncaught reject")); // event
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-with-invalid-reject.js b/deps/v8/test/mjsunit/es6/debug-promises/reject-with-invalid-reject.js
index 8775df687d..6aaf882ce8 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-with-invalid-reject.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reject-with-invalid-reject.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
+// Flags: --expose-debug-as debug --allow-natives-syntax
// Test debug events when a Promise is rejected, which is caught by a custom
// promise, which has a number for reject closure. We expect an Exception debug
@@ -28,7 +28,7 @@ function MyPromise(resolver) {
MyPromise.prototype = new Promise(function() {});
p.constructor = MyPromise;
-var q = p.chain(
+var q = p.then(
function() {
log.push("reject caught");
return Promise.reject(new Error("caught"));
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-with-throw-in-reject.js b/deps/v8/test/mjsunit/es6/debug-promises/reject-with-throw-in-reject.js
index b6c06df49e..47e335d968 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-with-throw-in-reject.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reject-with-throw-in-reject.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
+// Flags: --expose-debug-as debug --allow-natives-syntax
// Test debug events when a Promise is rejected, which is caught by a
// custom promise, which throws a new exception in its reject handler.
@@ -33,7 +33,7 @@ function MyPromise(resolver) {
MyPromise.prototype = new Promise(function() {});
p.constructor = MyPromise;
-var q = p.chain(
+var q = p.then(
function() {
log.push("reject caught");
return Promise.reject(new Error("caught"));
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-with-undefined-reject.js b/deps/v8/test/mjsunit/es6/debug-promises/reject-with-undefined-reject.js
index d058d41b96..1595372396 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-with-undefined-reject.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/reject-with-undefined-reject.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
+// Flags: --expose-debug-as debug --allow-natives-syntax
// Test debug events when a Promise is rejected, which is caught by a custom
// promise, which has undefined for reject closure. We expect an Exception
@@ -28,7 +28,7 @@ function MyPromise(resolver) {
MyPromise.prototype = new Promise(function() {});
p.constructor = MyPromise;
-var q = p.chain(
+var q = p.then(
function() {
log.push("reject caught");
return Promise.reject(new Error("caught"));
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/resolve-after-aborted-try-finally.js b/deps/v8/test/mjsunit/es6/debug-promises/resolve-after-aborted-try-finally.js
deleted file mode 100644
index 918ae2a2e8..0000000000
--- a/deps/v8/test/mjsunit/es6/debug-promises/resolve-after-aborted-try-finally.js
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-debug-as debug --allow-natives-syntax
-
-// Test debug events when we listen to all exceptions and
-// there is a catch handler for the exception thrown in a Promise.
-// We expect a normal Exception debug event to be triggered.
-
-Debug = debug.Debug;
-
-var events = [];
-
-function listener(event, exec_state, event_data, data) {
- if (event == Debug.DebugEvent.PromiseEvent) events.push(event_data.status());
-}
-
-Debug.setListener(listener);
-
-var p = new Promise(function(resolve, reject) {
- do {
- try {
- throw new Error("reject");
- } finally {
- break; // No rethrow.
- }
- } while (false);
- resolve();
-});
-
-assertEquals([0 /* create */, 1 /* resolve */], events);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/resolve-after-try-catch.js b/deps/v8/test/mjsunit/es6/debug-promises/resolve-after-try-catch.js
deleted file mode 100644
index 298201f103..0000000000
--- a/deps/v8/test/mjsunit/es6/debug-promises/resolve-after-try-catch.js
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-debug-as debug --allow-natives-syntax
-
-// Test debug events when we listen to all exceptions and
-// there is a catch handler for the exception thrown in a Promise.
-// We expect a normal Exception debug event to be triggered.
-
-Debug = debug.Debug;
-
-var events = [];
-
-function listener(event, exec_state, event_data, data) {
- if (event == Debug.DebugEvent.PromiseEvent) events.push(event_data.status());
-}
-
-Debug.setListener(listener);
-
-var p = new Promise(function (resolve, reject) {
- try {
- throw new Error("reject");
- } catch (e) {
- }
- resolve();
-});
-
-assertEquals([0 /* create */, 1 /* resolve */], events);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/rethrow-in-try-finally.js b/deps/v8/test/mjsunit/es6/debug-promises/rethrow-in-try-finally.js
deleted file mode 100644
index b1e2ff98e1..0000000000
--- a/deps/v8/test/mjsunit/es6/debug-promises/rethrow-in-try-finally.js
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-debug-as debug --allow-natives-syntax
-
-// Test debug events when we listen to all exceptions and
-// there is a catch handler for the exception thrown in a Promise.
-// We expect a normal Exception debug event to be triggered.
-
-Debug = debug.Debug;
-
-var events = [];
-
-function listener(event, exec_state, event_data, data) {
- if (event == Debug.DebugEvent.PromiseEvent) events.push(event_data.status());
-}
-
-Debug.setListener(listener);
-
-var p = new Promise(function(resolve, reject) {
- try {
- throw new Error("reject");
- } finally {
- // Implicit rethrow.
- }
- resolve();
-});
-
-assertEquals([0 /* create */, -1 /* rethrown */], events);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/stepin-constructor.js b/deps/v8/test/mjsunit/es6/debug-promises/stepin-constructor.js
index 906969e105..6914ae0036 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/stepin-constructor.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/stepin-constructor.js
@@ -21,9 +21,9 @@ function listener(event, exec_state, event_data, data) {
Debug.setListener(listener);
function resolver(resolve, reject) {
- 1;
- 2;
- 3;
+ print(1);
+ print(2);
+ print(3);
resolve();
}
@@ -35,9 +35,9 @@ Debug.setListener(null);
var expected_breaks = [
"debugger;",
"var p = new Promise(resolver);",
- "1;",
- "2;",
- "3;",
+ "print(1);",
+ "print(2);",
+ "print(3);",
"resolve();",
"}",
"Debug.setListener(null);"
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-all.js b/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-all.js
index 3b7c48c1cf..8b932490b2 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-all.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-all.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
+// Flags: --expose-debug-as debug --allow-natives-syntax
// Test debug events when we listen to all exceptions and
// there is a catch handler for the exception thrown in a Promise.
@@ -18,7 +18,7 @@ var p = new Promise(function(resolve, reject) {
resolve();
});
-var q = p.chain(
+var q = p.then(
function() {
log.push("throw");
throw new Error("caught");
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-late.js b/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-late.js
index aa7e584320..0399e5cc34 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-late.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-late.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
+// Flags: --expose-debug-as debug --allow-natives-syntax
// Test debug events when we only listen to uncaught exceptions, the Promise
// throws, and a catch handler is installed right before throwing.
@@ -14,7 +14,7 @@ var p = new Promise(function(resolve, reject) {
resolve();
});
-var q = p.chain(
+var q = p.then(
function() {
q.catch(function(e) {
assertEquals("caught", e.message);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-uncaught.js b/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-uncaught.js
index a424ccc9f7..8e1524d519 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-uncaught.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-uncaught.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
+// Flags: --expose-debug-as debug --allow-natives-syntax
// Test debug events when we only listen to uncaught exceptions and
// there is a catch handler for the exception thrown in a Promise.
@@ -14,7 +14,7 @@ var p = new Promise(function(resolve, reject) {
resolve();
});
-var q = p.chain(
+var q = p.then(
function() {
throw new Error("caught throw");
});
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/throw-finally-caught-all.js b/deps/v8/test/mjsunit/es6/debug-promises/throw-finally-caught-all.js
new file mode 100644
index 0000000000..eb823f518f
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/debug-promises/throw-finally-caught-all.js
@@ -0,0 +1,72 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// Test debug events when we listen to all exceptions and
+// there is a catch handler for the exception thrown in a Promise, first
+// caught by a try-finally, and immediately rethrown.
+// We expect a normal Exception debug event to be triggered.
+
+Debug = debug.Debug;
+
+var expected_events = 1;
+var log = [];
+
+var p = new Promise(function(resolve, reject) {
+ log.push("resolve");
+ resolve();
+});
+
+var q = p.then(
+ function() {
+ log.push("throw");
+ try {
+ throw new Error("caught");
+ } finally {
+ }
+ });
+
+q.catch(
+ function(e) {
+ assertEquals("caught", e.message);
+ });
+
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Exception) {
+ expected_events--;
+ assertTrue(expected_events >= 0);
+ assertEquals("caught", event_data.exception().message);
+ assertSame(q, event_data.promise());
+ assertFalse(event_data.uncaught());
+ }
+ } catch (e) {
+ %AbortJS(e + "\n" + e.stack);
+ }
+}
+
+Debug.setBreakOnException();
+Debug.setListener(listener);
+
+log.push("end main");
+
+function testDone(iteration) {
+ function checkResult() {
+ try {
+ assertTrue(iteration < 10);
+ if (expected_events === 0) {
+ assertEquals(["resolve", "end main", "throw"], log);
+ } else {
+ testDone(iteration + 1);
+ }
+ } catch (e) {
+ %AbortJS(e + "\n" + e.stack);
+ }
+ }
+
+ %EnqueueMicrotask(checkResult);
+}
+
+testDone(0);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/throw-uncaught-all.js b/deps/v8/test/mjsunit/es6/debug-promises/throw-uncaught-all.js
index bfe0bedbac..3a73ac9fff 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/throw-uncaught-all.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/throw-uncaught-all.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
+// Flags: --expose-debug-as debug --allow-natives-syntax
// Test debug events when we listen to all exceptions and
// there is no catch handler for the exception thrown in a Promise.
@@ -18,7 +18,7 @@ var p = new Promise(function(resolve, reject) {
resolve();
});
-var q = p.chain(
+var q = p.then(
function() {
log.push("throw");
throw new Error("uncaught"); // event
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/throw-uncaught-uncaught.js b/deps/v8/test/mjsunit/es6/debug-promises/throw-uncaught-uncaught.js
index 8dff592f33..24239f26f3 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/throw-uncaught-uncaught.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/throw-uncaught-uncaught.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
+// Flags: --expose-debug-as debug --allow-natives-syntax
// Test debug events when we only listen to uncaught exceptions and
// there is a catch handler for the exception thrown in a Promise.
@@ -18,7 +18,7 @@ var p = new Promise(function(resolve, reject) {
resolve();
});
-var q = p.chain(
+var q = p.then(
function() {
log.push("throw");
throw new Error("uncaught"); // event
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/throw-with-throw-in-reject.js b/deps/v8/test/mjsunit/es6/debug-promises/throw-with-throw-in-reject.js
index 5cf49f2fae..622dd2573e 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/throw-with-throw-in-reject.js
+++ b/deps/v8/test/mjsunit/es6/debug-promises/throw-with-throw-in-reject.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax --promise-extra
+// Flags: --expose-debug-as debug --allow-natives-syntax
// Test debug events when an exception is thrown inside a Promise, which is
// caught by a custom promise, which throws a new exception in its reject
@@ -12,7 +12,7 @@
Debug = debug.Debug;
-var expected_events = 2;
+var expected_events = 1;
var log = [];
var p = new Promise(function(resolve, reject) {
@@ -34,7 +34,7 @@ MyPromise.prototype = new Promise(function() {});
MyPromise.__proto__ = Promise;
p.constructor = MyPromise;
-var q = p.chain(
+var q = p.then(
function() {
log.push("throw caught");
throw new Error("caught"); // event
@@ -45,12 +45,10 @@ function listener(event, exec_state, event_data, data) {
if (event == Debug.DebugEvent.Exception) {
expected_events--;
assertTrue(expected_events >= 0);
- if (expected_events == 1) {
+ if (expected_events == 0) {
assertEquals(["resolve", "construct", "end main",
"throw caught"], log);
assertEquals("caught", event_data.exception().message);
- } else if (expected_events == 0) {
- assertEquals("reject", event_data.exception().message);
} else {
assertUnreachable();
}
diff --git a/deps/v8/test/mjsunit/es6/debug-scope-default-param-with-eval.js b/deps/v8/test/mjsunit/es6/debug-scope-default-param-with-eval.js
new file mode 100644
index 0000000000..d4dc93f2c5
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/debug-scope-default-param-with-eval.js
@@ -0,0 +1,61 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+// Test that the parameter initialization block scope set up for
+// sloppy eval is visible to the debugger.
+
+var Debug = debug.Debug;
+var exception = null;
+var break_count = 0;
+
+function call_for_break() {
+ return 5;
+}
+
+function test(x = eval("var y = 7; debugger; y") + call_for_break()) {
+ return x;
+}
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ var frame = exec_state.frame(0);
+ var block_scope;
+ if (break_count++ == 0) {
+ // Inside eval.
+ assertEquals([ debug.ScopeType.Eval,
+ debug.ScopeType.Block,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global ],
+ frame.allScopes().map(s => s.scopeType()));
+ exec_state.prepareStep(Debug.StepAction.StepOut);
+ block_scope = frame.scope(1);
+ } else {
+ // Outside of eval.
+ assertEquals([ debug.ScopeType.Block,
+ debug.ScopeType.Local,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global ],
+ frame.allScopes().map(s => s.scopeType()));
+ block_scope = frame.scope(0);
+ }
+ assertTrue(block_scope.scopeObject().propertyNames().includes('y'));
+ assertEquals(7, block_scope.scopeObject().property('y').value().value());
+ } catch (e) {
+ print(e);
+ exception = e;
+ }
+}
+
+Debug.setListener(listener);
+
+assertEquals(12, test());
+
+Debug.setListener(null);
+
+assertNull(exception);
+assertEquals(2, break_count);
diff --git a/deps/v8/test/mjsunit/es6/debug-step-into-regexp-subclass.js b/deps/v8/test/mjsunit/es6/debug-step-into-regexp-subclass.js
index 599fe05715..5e5eb47d7f 100644
--- a/deps/v8/test/mjsunit/es6/debug-step-into-regexp-subclass.js
+++ b/deps/v8/test/mjsunit/es6/debug-step-into-regexp-subclass.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --harmony-regexp-subclass
+// Flags: --expose-debug-as debug
Debug = debug.Debug
diff --git a/deps/v8/test/mjsunit/es6/debug-stepin-microtasks.js b/deps/v8/test/mjsunit/es6/debug-stepin-microtasks.js
index 6a7c5536dc..e541f0f4b4 100644
--- a/deps/v8/test/mjsunit/es6/debug-stepin-microtasks.js
+++ b/deps/v8/test/mjsunit/es6/debug-stepin-microtasks.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-object-observe
// Flags: --allow-natives-syntax --expose-debug-as debug
Debug = debug.Debug
@@ -46,6 +45,7 @@ Promise.resolve(42)
.then(Boolean) // Should skip stepping into native.
.then(promise2)
.catch(promise3)
+ .then(promise4)
.catch(function(e) {
%AbortJS("FAIL: uncaught exception " + e);
});
@@ -60,36 +60,16 @@ function promise2() {
}
function promise3() {
- installObservers(); // Break 4. StepOver.
- return break_count; // Break 5.
-} // Break 6.
-
-function installObservers() {
- var dummy = {};
- Object.observe(dummy, observer1);
- Object.observe(dummy, Object); // Should skip stepping into native.
- Object.observe(dummy, Boolean); // Should skip stepping into native.
- Object.observe(dummy, observer2);
- dummy.foo = 1;
-}
-
-function observer1() {
- return exception || 3; // Break 7.
-} // Break 8.
-
-function observer2() {
- Promise.resolve().then(promise4); // Break 9. StepOver.
- return break_count + 1; // Break 10.
-} // Break 11.
+ return break_count; // Break 4.
+} // Break 5.
function promise4() {
- finalize(); // Break 12. StepOver.
- return 0; // Break 13.
-} // Break 14. StepOver.
+ finalize(); // Break 6. StepOver.
+ return 0; // Break 7.
+} // Break 8. StepOver.
function finalize() {
- var dummy = {};
- Object.observe(dummy, function() {
+ Promise.resolve().then(function() {
if (expected_breaks !== break_count) {
%AbortJS("FAIL: expected <" + expected_breaks + "> breaks instead of <" +
break_count + ">");
@@ -98,5 +78,4 @@ function finalize() {
%AbortJS("FAIL: exception: " + exception);
}
});
- dummy.foo = 1;
}
diff --git a/deps/v8/test/mjsunit/es6/debug-stepin-proxies.js b/deps/v8/test/mjsunit/es6/debug-stepin-proxies.js
index 4e71c79198..72c01f0c43 100644
--- a/deps/v8/test/mjsunit/es6/debug-stepin-proxies.js
+++ b/deps/v8/test/mjsunit/es6/debug-stepin-proxies.js
@@ -54,7 +54,7 @@ assertEquals(42, get);
assertEquals([
"a0",
- "b17", "h4b20", "i2b20", // [[Has]]
+ "b17", "h4b17", "i2b17", // [[Has]]
"c15", "j4c15", "k2c15", // [[Get]]
"d0", "l4d11", "m2d11", // [[Set]]
"g0"
diff --git a/deps/v8/test/mjsunit/es6/debug-stepnext-for.js b/deps/v8/test/mjsunit/es6/debug-stepnext-for.js
index 9d5641a4a3..d425a46b84 100644
--- a/deps/v8/test/mjsunit/es6/debug-stepnext-for.js
+++ b/deps/v8/test/mjsunit/es6/debug-stepnext-for.js
@@ -11,6 +11,8 @@ var log = []
var s = 0;
var a = [1, 2, 3];
+var b = [1, 2, 3, 4];
+var null_value = null;
var i = 0;
function f() {
@@ -18,11 +20,11 @@ function f() {
debugger; // Break a
var j; // Break b
- for (var i in null) { // Break c
+ for (var i in null_value) { // Break c
s += a[i];
}
- for (j in null) { // Break d
+ for (j in null_value) { // Break d
s += a[j];
}
@@ -46,7 +48,7 @@ function f() {
s += j; // Break I
}
- for (let i of a) { // Break j
+ for (let i of a) { // Break j
s += i; // Break J
}
@@ -61,6 +63,11 @@ function f() {
for (let i = 0; i < 3; i++) { // Break m
s += a[i]; // Break M
}
+
+ for (let i of a) {} // Break n
+
+ [1, ...a] // Break o
+
} // Break y
function listener(event, exec_state, event_data, data) {
@@ -103,17 +110,21 @@ var expected = [
// For-in-let: get enumerable, next, body, next, ...
"g16","g11","G4","g11","G4","g11","G4","g11",
// For-of-var: [Symbol.iterator](), next(), body, next(), body, ...
- "h16","h14","h15","H4","h15","H4","h15","H4","h15",
+ "h16","h13","H4","h13","H4","h13","H4","h13",
// For-of: [Symbol.iterator](), next(), body, next(), body, ...
- "i12","i10","i11","I4","i11","I4","i11","I4","i11",
+ "i12","i9","I4","i9","I4","i9","I4","i9",
// For-of-let: [Symbol.iterator](), next(), body, next(), ...
- "j16","j14","j15","J4","j15","J4","j15","J4","j15",
+ "j18","j14","J4","j14","J4","j14","J4","j14",
// For-var: init, condition, body, next, condition, body, ...
"k15","k20","K4","k26","k20","K4","k26","k20","K4","k26","k20",
// For: init, condition, body, next, condition, body, ...
"l7","l16","L4","l22","l16","L4","l22","l16","L4","l22","l16",
// For-let: init, condition, body, next, condition, body, ...
"m15","m20","M4","m26","m20","M4","m26","m20","M4","m26","m20",
+ // For-of, empty: [Symbol.iterator](), next() once
+ "n16", "n13",
+ // Spread: expression statement, spread
+ "o2", "o9",
// Exit.
"y0","z0",
]
diff --git a/deps/v8/test/mjsunit/es6/default-parameters.js b/deps/v8/test/mjsunit/es6/default-parameters.js
index 4e0bf542ef..c0fe031c03 100644
--- a/deps/v8/test/mjsunit/es6/default-parameters.js
+++ b/deps/v8/test/mjsunit/es6/default-parameters.js
@@ -350,14 +350,15 @@
(function TestDirectiveThrows() {
"use strict";
- assertThrows(function(){ eval("function(x=1){'use strict';}") }, SyntaxError);
- assertThrows(function(){ eval("(x=1) => {'use strict';}") }, SyntaxError);
- assertThrows(
- function(){ eval("(class{foo(x=1) {'use strict';}});") }, SyntaxError);
-
- assertThrows(
- function(){ eval("function(a, x=1){'use strict';}") }, SyntaxError);
- assertThrows(function(){ eval("(a, x=1) => {'use strict';}") }, SyntaxError);
- assertThrows(
- function(){ eval("(class{foo(a, x=1) {'use strict';}});") }, SyntaxError);
+ assertThrows("(function(x=1){'use strict';})", SyntaxError);
+ assertThrows("(x=1) => {'use strict';}", SyntaxError);
+ assertThrows("(class{foo(x=1) {'use strict';}});", SyntaxError);
+
+ assertThrows("(function(a, x=1){'use strict';})", SyntaxError);
+ assertThrows("(a, x=1) => {'use strict';}", SyntaxError);
+ assertThrows("(class{foo(a, x=1) {'use strict';}});", SyntaxError);
+
+ assertThrows("(function({x}){'use strict';})", SyntaxError);
+ assertThrows("({x}) => {'use strict';}", SyntaxError);
+ assertThrows("(class{foo({x}) {'use strict';}});", SyntaxError);
})();
diff --git a/deps/v8/test/mjsunit/es6/destructuring.js b/deps/v8/test/mjsunit/es6/destructuring.js
index 1f16c45270..a4f88844d4 100644
--- a/deps/v8/test/mjsunit/es6/destructuring.js
+++ b/deps/v8/test/mjsunit/es6/destructuring.js
@@ -1045,9 +1045,9 @@
function f20({x}) { function x() { return 2 }; return x(); }
assertEquals(2, f20({x: 1}));
- // Function hoisting is blocked by the conflicting x declaration
- function f21({x}) { { function x() { return 2 } } return x(); }
- assertThrows(() => f21({x: 1}), TypeError);
+ // Annex B 3.3 function hoisting is blocked by the conflicting x declaration
+ function f21({x}) { { function x() { return 2 } } return x; }
+ assertEquals(1, f21({x: 1}));
var g1 = ({x}) => { var x = 2; return x };
assertEquals(2, g1({x: 1}));
@@ -1082,15 +1082,15 @@
var g21 = ({x}) => { { function x() { return 2 } } return x(); }
assertThrows(() => g21({x: 1}), TypeError);
- assertThrows("'use strict'; function f(x) { let x = 0; }; f({});", SyntaxError);
- assertThrows("'use strict'; function f({x}) { let x = 0; }; f({});", SyntaxError);
- assertThrows("'use strict'; function f(x) { const x = 0; }; f({});", SyntaxError);
- assertThrows("'use strict'; function f({x}) { const x = 0; }; f({});", SyntaxError);
+ assertThrows("'use strict'; function f(x) { let x = 0; }", SyntaxError);
+ assertThrows("'use strict'; function f({x}) { let x = 0; }", SyntaxError);
+ assertThrows("'use strict'; function f(x) { const x = 0; }", SyntaxError);
+ assertThrows("'use strict'; function f({x}) { const x = 0; }", SyntaxError);
- assertThrows("'use strict'; let g = (x) => { let x = 0; }; f({});", SyntaxError);
- assertThrows("'use strict'; let g = ({x}) => { let x = 0; }; f({});", SyntaxError);
- assertThrows("'use strict'; let g = (x) => { const x = 0; }; f({});", SyntaxError);
- assertThrows("'use strict'; let g = ({x}) => { const x = 0; }; f({});", SyntaxError);
+ assertThrows("'use strict'; let g = (x) => { let x = 0; }", SyntaxError);
+ assertThrows("'use strict'; let g = ({x}) => { let x = 0; }", SyntaxError);
+ assertThrows("'use strict'; let g = (x) => { const x = 0; }", SyntaxError);
+ assertThrows("'use strict'; let g = ({x}) => { const x = 0; }", SyntaxError);
}());
diff --git a/deps/v8/test/mjsunit/es6/for-each-in-catch.js b/deps/v8/test/mjsunit/es6/for-each-in-catch.js
new file mode 100644
index 0000000000..674cddd047
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/for-each-in-catch.js
@@ -0,0 +1,194 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function checkIsRedeclarationError(code) {
+ try {
+ eval(`
+checkIsRedeclarationError : {
+ break checkIsRedeclarationError;
+${code}
+}
+`);
+ assertUnreachable();
+ } catch(e) {
+ assertInstanceof(e, SyntaxError );
+ assertTrue( e.toString().indexOf("has already been declared") >= 0 );
+ }
+}
+
+function checkIsNotRedeclarationError(code) {
+ assertDoesNotThrow(()=>eval(`
+checkIsNotRedeclarationError_label : {
+ break checkIsNotRedeclarationError_label;
+${code}
+}
+`));
+}
+
+
+let var_e = [
+ 'var e',
+ 'var {e}',
+ 'var {f, e}',
+ 'var [e]',
+ 'var {f:e}',
+ 'var [[[], e]]'
+];
+
+let not_var_e = [
+ 'var f',
+ 'var {}',
+ 'var {e:f}',
+ 'e',
+ '{e}',
+ 'let e',
+ 'const e',
+ 'let {e}',
+ 'const {e}',
+ 'let [e]',
+ 'const [e]',
+ 'let {f:e}',
+ 'const {f:e}'
+];
+
+// Check that `for (var ... of ...)` cannot redeclare a simple catch variable
+// but `for (var ... in ...)` can.
+for (let binding of var_e) {
+ checkIsRedeclarationError(`
+try {
+ throw 0;
+} catch(e) {
+ for (${binding} of []);
+}
+`);
+
+ checkIsNotRedeclarationError(`
+try {
+ throw 0;
+} catch(e) {
+ for (${binding} in []);
+}
+`);
+}
+
+// Check that the above error occurs even for nested catches.
+for (let binding of var_e) {
+ checkIsRedeclarationError(`
+try {
+ throw 0;
+} catch(e) {
+ try {
+ throw 1;
+ } catch(f) {
+ try {
+ throw 2;
+ } catch({}) {
+ for (${binding} of []);
+ }
+ }
+}
+`);
+
+ checkIsNotRedeclarationError(`
+try {
+ throw 0;
+} catch(e) {
+ try {
+ throw 1;
+ } catch(f) {
+ try {
+ throw 2;
+ } catch({}) {
+ for (${binding} in []);
+ }
+ }
+}
+`);
+}
+
+// Check that the above error does not occur if a declaration scope is between
+// the catch and the loop.
+for (let binding of var_e) {
+ checkIsNotRedeclarationError(`
+try {
+ throw 0;
+} catch(e) {
+ (()=>{for (${binding} of []);})();
+}
+`);
+
+ checkIsNotRedeclarationError(`
+try {
+ throw 0;
+} catch(e) {
+ (function(){for (${binding} of []);})();
+}
+`);
+}
+
+// Check that there is no error when not declaring a var named e.
+for (let binding of not_var_e) {
+ checkIsNotRedeclarationError(`
+try {
+ throw 0;
+} catch(e) {
+ for (${binding} of []);
+}
+`);
+}
+
+// Check that there is an error for both for-in and for-of when redeclaring
+// a non-simple catch parameter
+for (let binding of var_e) {
+ checkIsRedeclarationError(`
+try {
+ throw 0;
+} catch({e}) {
+ for (${binding} of []);
+}
+`);
+
+ checkIsRedeclarationError(`
+try {
+ throw 0;
+} catch({e}) {
+ for (${binding} in []);
+}
+`);
+}
+
+// Check that the above error occurs even for nested catches.
+for (let binding of var_e) {
+ checkIsRedeclarationError(`
+try {
+ throw 0;
+} catch({e}) {
+ try {
+ throw 1;
+ } catch(f) {
+ try {
+ throw 2;
+ } catch({}) {
+ for (${binding} of []);
+ }
+ }
+}
+`);
+
+ checkIsRedeclarationError(`
+try {
+ throw 0;
+} catch({e}) {
+ try {
+ throw 1;
+ } catch(f) {
+ try {
+ throw 2;
+ } catch({}) {
+ for (${binding} in []);
+ }
+ }
+}
+`);
+}
diff --git a/deps/v8/test/mjsunit/harmony/function-name.js b/deps/v8/test/mjsunit/es6/function-name.js
index 66a69e0f16..0fcab441ed 100644
--- a/deps/v8/test/mjsunit/harmony/function-name.js
+++ b/deps/v8/test/mjsunit/es6/function-name.js
@@ -1,8 +1,6 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// Flags: --harmony-function-name
(function testVariableDeclarationsFunction() {
'use strict';
@@ -359,10 +357,10 @@
(function testNameNotReflectedInToString() {
- var f = function() {};
- var g = function*() {};
+ var f = function () {};
+ var g = function* () {};
var obj = {
- ['h']: function() {},
+ ['h']: function () {},
i: () => {}
};
assertEquals('function () {}', f.toString());
diff --git a/deps/v8/test/mjsunit/es6/generator-destructuring.js b/deps/v8/test/mjsunit/es6/generator-destructuring.js
new file mode 100644
index 0000000000..7228782c09
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/generator-destructuring.js
@@ -0,0 +1,317 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function TestDefaultBeforeInitializingYield() {
+ var y = 0;
+ var z = 0;
+ function* f1(x = (y = 1)) { z = 1 };
+ assertEquals(0, y);
+ assertEquals(0, z);
+ var gen = f1();
+ assertEquals(1, y);
+ assertEquals(0, z);
+ gen.next();
+ assertEquals(1, y);
+ assertEquals(1, z);
+})();
+
+(function TestShadowingOfParameters() {
+ function* f1({x}) { var x = 2; return x }
+ assertEquals(2, f1({x: 1}).next().value);
+ function* f2({x}) { { var x = 2; } return x; }
+ assertEquals(2, f2({x: 1}).next().value);
+ function* f3({x}) { var y = x; var x = 2; return y; }
+ assertEquals(1, f3({x: 1}).next().value);
+ function* f4({x}) { { var y = x; var x = 2; } return y; }
+ assertEquals(1, f4({x: 1}).next().value);
+ function* f5({x}, g = () => x) { var x = 2; return g(); }
+ assertEquals(1, f5({x: 1}).next().value);
+ function* f6({x}, g = () => x) { { var x = 2; } return g(); }
+ assertEquals(1, f6({x: 1}).next().value);
+ function* f7({x}) { var g = () => x; var x = 2; return g(); }
+ assertEquals(2, f7({x: 1}).next().value);
+ function* f8({x}) { { var g = () => x; var x = 2; } return g(); }
+ assertEquals(2, f8({x: 1}).next().value);
+ function* f9({x}, g = () => eval("x")) { var x = 2; return g(); }
+ assertEquals(1, f9({x: 1}).next().value);
+
+ function* f10({x}, y) { var y; return y }
+ assertEquals(2, f10({x: 6}, 2).next().value);
+ function* f11({x}, y) { var z = y; var y = 2; return z; }
+ assertEquals(1, f11({x: 6}, 1).next().value);
+ function* f12(y, g = () => y) { var y = 2; return g(); }
+ assertEquals(1, f12(1).next().value);
+ function* f13({x}, y, [z], v) { var x, y, z; return x*y*z*v }
+ assertEquals(210, f13({x: 2}, 3, [5], 7).next().value);
+
+ function* f20({x}) { function x() { return 2 }; return x(); }
+ assertEquals(2, f20({x: 1}).next().value);
+ // Annex B 3.3 function hoisting is blocked by the conflicting x declaration
+ function* f21({x}) { { function x() { return 2 } } return x; }
+ assertEquals(1, f21({x: 1}).next().value);
+
+ assertThrows("'use strict'; function* f(x) { let x = 0; }", SyntaxError);
+ assertThrows("'use strict'; function* f({x}) { let x = 0; }", SyntaxError);
+ assertThrows("'use strict'; function* f(x) { const x = 0; }", SyntaxError);
+ assertThrows("'use strict'; function* f({x}) { const x = 0; }", SyntaxError);
+}());
+
+(function TestDefaults() {
+ function* f1(x = 1) { return x }
+ assertEquals(1, f1().next().value);
+ assertEquals(1, f1(undefined).next().value);
+ assertEquals(2, f1(2).next().value);
+ assertEquals(null, f1(null).next().value);
+
+ function* f2(x, y = x) { return x + y; }
+ assertEquals(8, f2(4).next().value);
+ assertEquals(8, f2(4, undefined).next().value);
+ assertEquals(6, f2(4, 2).next().value);
+
+ function* f3(x = 1, y) { return x + y; }
+ assertEquals(8, f3(5, 3).next().value);
+ assertEquals(3, f3(undefined, 2).next().value);
+ assertEquals(6, f3(4, 2).next().value);
+
+ function* f4(x = () => 1) { return x() }
+ assertEquals(1, f4().next().value);
+ assertEquals(1, f4(undefined).next().value);
+ assertEquals(2, f4(() => 2).next().value);
+ assertThrows(() => f4(null).next(), TypeError);
+
+ function* f5(x, y = () => x) { return x + y(); }
+ assertEquals(8, f5(4).next().value);
+ assertEquals(8, f5(4, undefined).next().value);
+ assertEquals(6, f5(4, () => 2).next().value);
+
+ function* f6(x = {a: 1, m() { return 2 }}) { return x.a + x.m(); }
+ assertEquals(3, f6().next().value);
+ assertEquals(3, f6(undefined).next().value);
+ assertEquals(5, f6({a: 2, m() { return 3 }}).next().value);
+}());
+
+
+(function TestEvalInParameters() {
+ function* f1(x = eval(0)) { return x }
+ assertEquals(0, f1().next().value);
+ function* f2(x = () => eval(1)) { return x() }
+ assertEquals(1, f2().next().value);
+})();
+
+
+(function TestParameterScopingSloppy() {
+ var x = 1;
+
+ function* f1(a = x) { var x = 2; return a; }
+ assertEquals(1, f1().next().value);
+ function* f2(a = x) { function x() {}; return a; }
+ assertEquals(1, f2().next().value);
+ function* f3(a = eval("x")) { var x; return a; }
+ assertEquals(1, f3().next().value);
+ function* f31(a = eval("'use strict'; x")) { var x; return a; }
+ assertEquals(1, f31().next().value);
+ function* f4(a = function() { return x }) { var x; return a(); }
+ assertEquals(1, f4().next().value);
+ function* f5(a = () => x) { var x; return a(); }
+ assertEquals(1, f5().next().value);
+ function* f6(a = () => eval("x")) { var x; return a(); }
+ assertEquals(1, f6().next().value);
+ function* f61(a = () => { 'use strict'; return eval("x") }) { var x; return a(); }
+ assertEquals(1, f61().next().value);
+ function* f62(a = () => eval("'use strict'; x")) { var x; return a(); }
+ assertEquals(1, f62().next().value);
+
+ var f11 = function* f(x = f) { var f; return x; }
+ assertSame(f11, f11().next().value);
+ var f12 = function* f(x = f) { function f() {}; return x; }
+ assertSame(f12, f12().next().value);
+ var f13 = function* f(f = 7, x = f) { return x; }
+ assertSame(7, f13().next().value);
+
+ var o1 = {f: function*(x = this) { return x; }};
+ assertSame(o1, o1.f().next().value);
+ assertSame(1, o1.f(1).next().value);
+})();
+
+(function TestParameterScopingStrict() {
+ "use strict";
+ var x = 1;
+
+ function* f1(a = x) { let x = 2; return a; }
+ assertEquals(1, f1().next().value);
+ function* f2(a = x) { const x = 2; return a; }
+ assertEquals(1, f2().next().value);
+ function* f3(a = x) { function x() {}; return a; }
+ assertEquals(1, f3().next().value);
+ function* f4(a = eval("x")) { var x; return a; }
+ assertEquals(1, f4().next().value);
+ function* f5(a = () => eval("x")) { var x; return a(); }
+ assertEquals(1, f5().next().value);
+
+ var f11 = function* f(x = f) { let f; return x; }
+ assertSame(f11, f11().next().value);
+ var f12 = function* f(x = f) { const f = 0; return x; }
+ assertSame(f12, f12().next().value);
+ var f13 = function* f(x = f) { function f() {}; return x; }
+ assertSame(f13, f13().next().value);
+})();
+
+(function TestSloppyEvalScoping() {
+ var x = 1;
+
+ function* f1(y = eval("var x = 2")) { with ({}) { return x; } }
+ assertEquals(1, f1().next().value);
+ function* f2(y = eval("var x = 2"), z = x) { return z; }
+ assertEquals(1, f2().next().value);
+ assertEquals(1, f2(0).next().value);
+ function* f3(y = eval("var x = 2"), z = eval("x")) { return z; }
+ assertEquals(1, f3().next().value);
+ assertEquals(1, f3(0).next().value);
+ function* f8(y = (eval("var x = 2"), x)) { return y; }
+ assertEquals(2, f8().next().value);
+ assertEquals(0, f8(0).next().value);
+
+ function* f11(z = eval("var y = 2")) { return y; }
+ assertThrows(() => f11().next(), ReferenceError);
+ function* f12(z = eval("var y = 2"), b = y) {}
+ assertThrows(() => f12().next(), ReferenceError);
+ function* f13(z = eval("var y = 2"), b = eval("y")) {}
+ assertThrows(() => f13().next(), ReferenceError);
+
+ function* f21(f = () => x) { eval("var x = 2"); return f() }
+ assertEquals(1, f21().next().value);
+ assertEquals(3, f21(() => 3).next().value);
+ function* f22(f = () => eval("x")) { eval("var x = 2"); return f() }
+ assertEquals(1, f22().next().value);
+ assertEquals(3, f22(() => 3).next().value);
+})();
+
+
+(function TestStrictEvalScoping() {
+ 'use strict';
+ var x = 1;
+
+ function* f1(y = eval("var x = 2")) { return x; }
+ assertEquals(1, f1().next().value);
+ function* f2(y = eval("var x = 2"), z = x) { return z; }
+ assertEquals(1, f2().next().value);
+ assertEquals(1, f2(0).next().value);
+ function* f3(y = eval("var x = 2"), z = eval("x")) { return z; }
+ assertEquals(1, f3().next().value);
+ assertEquals(1, f3(0).next().value);
+ function* f8(y = (eval("var x = 2"), x)) { return y; }
+ assertEquals(1, f8().next().value);
+ assertEquals(0, f8(0).next().value);
+
+ function* f11(z = eval("var y = 2")) { return y; }
+ assertThrows(() => f11().next().value, ReferenceError);
+ function* f12(z = eval("var y = 2"), b = y) {}
+ assertThrows(() => f12().next().value, ReferenceError);
+ function* f13(z = eval("var y = 2"), b = eval("y")) {}
+ assertThrows(() => f13().next().value, ReferenceError);
+
+ function* f21(f = () => x) { eval("var x = 2"); return f() }
+ assertEquals(1, f21().next().value);
+ assertEquals(3, f21(() => 3).next().value);
+ function* f22(f = () => eval("x")) { eval("var x = 2"); return f() }
+ assertEquals(1, f22().next().value);
+ assertEquals(3, f22(() => 3).next().value);
+})();
+
+(function TestParameterTDZSloppy() {
+ function* f1(a = x, x) { return a }
+ assertThrows(() => f1(undefined, 4), ReferenceError);
+ assertEquals(4, f1(4, 5).next().value);
+ function* f2(a = eval("x"), x) { return a }
+ assertThrows(() => f2(undefined, 4), ReferenceError);
+ assertEquals(4, f2(4, 5).next().value);
+ function* f3(a = eval("'use strict'; x"), x) { return a }
+ assertThrows(() => f3(undefined, 4), ReferenceError);
+ assertEquals(4, f3(4, 5).next().value);
+ function* f4(a = () => x, x) { return a() }
+ assertEquals(4, f4(() => 4, 5).next().value);
+ function* f5(a = () => eval("x"), x) { return a() }
+ assertEquals(4, f5(() => 4, 5).next().value);
+ function* f6(a = () => eval("'use strict'; x"), x) { return a() }
+ assertEquals(4, f6(() => 4, 5).next().value);
+
+ function* f11(a = x, x = 2) { return a }
+ assertThrows(() => f11(), ReferenceError);
+ assertThrows(() => f11(undefined), ReferenceError);
+ assertThrows(() => f11(undefined, 4), ReferenceError);
+ assertEquals(4, f1(4, 5).next().value);
+ function* f12(a = eval("x"), x = 2) { return a }
+ assertThrows(() => f12(), ReferenceError);
+ assertThrows(() => f12(undefined), ReferenceError);
+ assertThrows(() => f12(undefined, 4), ReferenceError);
+ assertEquals(4, f12(4, 5).next().value);
+ function* f13(a = eval("'use strict'; x"), x = 2) { return a }
+ assertThrows(() => f13(), ReferenceError);
+ assertThrows(() => f13(undefined), ReferenceError);
+ assertThrows(() => f13(undefined, 4), ReferenceError);
+ assertEquals(4, f13(4, 5).next().value);
+
+ function* f21(x = function() { return a }, ...a) { return x()[0] }
+ assertEquals(4, f21(undefined, 4).next().value);
+ function* f22(x = () => a, ...a) { return x()[0] }
+ assertEquals(4, f22(undefined, 4).next().value);
+ function* f23(x = () => eval("a"), ...a) { return x()[0] }
+ assertEquals(4, f23(undefined, 4).next().value);
+ function* f24(x = () => {'use strict'; return eval("a") }, ...a) {
+ return x()[0]
+ }
+ assertEquals(4, f24(undefined, 4).next().value);
+ function* f25(x = () => eval("'use strict'; a"), ...a) { return x()[0] }
+ assertEquals(4, f25(undefined, 4).next().value);
+})();
+
+(function TestParameterTDZStrict() {
+ "use strict";
+
+ function* f1(a = eval("x"), x) { return a }
+ assertThrows(() => f1(undefined, 4), ReferenceError);
+ assertEquals(4, f1(4, 5).next().value);
+ function* f2(a = () => eval("x"), x) { return a() }
+ assertEquals(4, f2(() => 4, 5).next().value);
+
+ function* f11(a = eval("x"), x = 2) { return a }
+ assertThrows(() => f11(), ReferenceError);
+ assertThrows(() => f11(undefined), ReferenceError);
+ assertThrows(() => f11(undefined, 4), ReferenceError);
+ assertEquals(4, f11(4, 5).next().value);
+
+ function* f21(x = () => eval("a"), ...a) { return x()[0] }
+ assertEquals(4, f21(undefined, 4).next().value);
+})();
+
+(function TestArgumentsForNonSimpleParameters() {
+ function* f1(x = 900) { arguments[0] = 1; return x }
+ assertEquals(9, f1(9).next().value);
+ assertEquals(900, f1().next().value);
+ function* f2(x = 1001) { x = 2; return arguments[0] }
+ assertEquals(10, f2(10).next().value);
+ assertEquals(undefined, f2().next().value);
+}());
+
+
+(function TestFunctionLength() {
+ assertEquals(0, (function*(x = 1) {}).length);
+ assertEquals(0, (function*(x = 1, ...a) {}).length);
+ assertEquals(1, (function*(x, y = 1) {}).length);
+ assertEquals(1, (function*(x, y = 1, ...a) {}).length);
+ assertEquals(2, (function*(x, y, z = 1) {}).length);
+ assertEquals(2, (function*(x, y, z = 1, ...a) {}).length);
+ assertEquals(1, (function*(x, y = 1, z) {}).length);
+ assertEquals(1, (function*(x, y = 1, z, ...a) {}).length);
+ assertEquals(1, (function*(x, y = 1, z, v = 2) {}).length);
+ assertEquals(1, (function*(x, y = 1, z, v = 2, ...a) {}).length);
+})();
+
+(function TestDirectiveThrows() {
+ "use strict";
+
+ assertThrows("(function*(x=1){'use strict';})", SyntaxError);
+ assertThrows("(function*(a, x=1){'use strict';})", SyntaxError);
+ assertThrows("(function*({x}){'use strict';})", SyntaxError);
+})();
diff --git a/deps/v8/test/mjsunit/es6/generators-objects.js b/deps/v8/test/mjsunit/es6/generators-objects.js
index a0c3b809be..2cc359f911 100644
--- a/deps/v8/test/mjsunit/es6/generators-objects.js
+++ b/deps/v8/test/mjsunit/es6/generators-objects.js
@@ -87,3 +87,43 @@ function TestGeneratorObjectMethods() {
TestNonGenerator(g.prototype);
}
TestGeneratorObjectMethods();
+
+
+function TestPrototype() {
+ function* g() { }
+
+ let g_prototype = g.prototype;
+ assertEquals([], Reflect.ownKeys(g_prototype));
+
+ let generator_prototype = Object.getPrototypeOf(g_prototype);
+ assertSame(generator_prototype, Object.getPrototypeOf(g).prototype);
+
+ // Unchanged .prototype
+ assertSame(g_prototype, Object.getPrototypeOf(g()));
+
+ // Custom object as .prototype
+ {
+ let proto = {};
+ g.prototype = proto;
+ assertSame(proto, Object.getPrototypeOf(g()));
+ }
+
+ // Custom non-object as .prototype
+ g.prototype = null;
+ assertSame(generator_prototype, Object.getPrototypeOf(g()));
+}
+TestPrototype();
+
+
+function TestComputedPropertyNames() {
+ function* f1() { return {[yield]: 42} }
+ var g1 = f1();
+ g1.next();
+ assertEquals(42, g1.next('a').value.a);
+
+ function* f2() { return {['a']: yield} }
+ var g2 = f2();
+ g2.next();
+ assertEquals(42, g2.next(42).value.a);
+}
+TestComputedPropertyNames();
diff --git a/deps/v8/test/mjsunit/harmony/instanceof-es6.js b/deps/v8/test/mjsunit/es6/instanceof.js
index 60e7ee2c39..6bf225953f 100644
--- a/deps/v8/test/mjsunit/harmony/instanceof-es6.js
+++ b/deps/v8/test/mjsunit/es6/instanceof.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-instanceof
-
// Make sure it's an error if @@hasInstance isn't a function.
(function() {
var F = {};
@@ -48,3 +46,22 @@ assertEquals(Function.prototype[Symbol.hasInstance].call({}, {}), false);
// OrdinaryHasInstance passed a non-object argument returns false.
assertEquals(Function.prototype[Symbol.hasInstance].call(Array, 0), false);
+
+// Cannot assign to @@hasInstance with %FunctionPrototype%.
+(function() {
+ "use strict";
+ function F() {}
+ assertThrows(function() { F[Symbol.hasInstance] = (v) => v }, TypeError);
+})();
+
+// Check correct invocation of @@hasInstance handler on function instance.
+(function() {
+ function F() {}
+ var counter = 0;
+ var proto = Object.getPrototypeOf(F);
+ Object.setPrototypeOf(F, null);
+ F[Symbol.hasInstance] = function(v) { ++counter; return true };
+ Object.setPrototypeOf(F, proto);
+ assertTrue(1 instanceof F);
+ assertEquals(1, counter);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/iterator-close.js b/deps/v8/test/mjsunit/es6/iterator-close.js
index b719c17c04..fd8f361e5e 100644
--- a/deps/v8/test/mjsunit/harmony/iterator-close.js
+++ b/deps/v8/test/mjsunit/es6/iterator-close.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-iterator-close
-
function* g() { yield 42; return 88 };
@@ -968,8 +966,9 @@ function* g() { yield 42; return 88 };
// Next method throws.
{
+ let closed = false;
g.prototype.next = () => { throw 666; };
- g.prototype.return = () => { assertUnreachable() };
+ g.prototype.return = () => { closed = true; };
assertThrowsEquals(() => {
@@ -1007,13 +1006,37 @@ function* g() { yield 42; return 88 };
assertThrowsEquals(() => {
(([x]) => x)(g());
}, 666);
+
+ assertThrowsEquals(() => {
+ var [...x] = g();
+ }, 666);
+
+ assertThrowsEquals(() => {
+ let [...x] = g();
+ }, 666);
+
+ assertThrowsEquals(() => {
+ const [...x] = g();
+ }, 666);
+
+ assertThrowsEquals(() => {
+ [...x] = g();
+ }, 666);
+
+ assertThrowsEquals(() => {
+ (([...x]) => x)(g());
+ }, 666);
+
+
+ assertFalse(closed);
}
// Value throws.
{
+ let closed = false;
g.prototype.next = () => ({get value() {throw 666}});
- g.prototype.return = () => { assertUnreachable() };
+ g.prototype.return = () => { closed = true; };
assertThrowsEquals(() => {
@@ -1051,13 +1074,37 @@ function* g() { yield 42; return 88 };
assertThrowsEquals(() => {
(([x]) => x)(g());
}, 666);
+
+ assertThrowsEquals(() => {
+ var [...x] = g();
+ }, 666);
+
+ assertThrowsEquals(() => {
+ let [...x] = g();
+ }, 666);
+
+ assertThrowsEquals(() => {
+ const [...x] = g();
+ }, 666);
+
+ assertThrowsEquals(() => {
+ [...x] = g();
+ }, 666);
+
+ assertThrowsEquals(() => {
+ (([...x]) => x)(g());
+ }, 666);
+
+
+ assertFalse(closed);
}
// Done throws.
{
+ let closed = false;
g.prototype.next = () => ({get done() {throw 666}});
- g.prototype.return = () => { assertUnreachable() };
+ g.prototype.return = () => { closed = true; };
assertThrowsEquals(() => {
@@ -1095,6 +1142,29 @@ function* g() { yield 42; return 88 };
assertThrowsEquals(() => {
(([x]) => x)(g());
}, 666);
+
+ assertThrowsEquals(() => {
+ var [...x] = g();
+ }, 666);
+
+ assertThrowsEquals(() => {
+ let [...x] = g();
+ }, 666);
+
+ assertThrowsEquals(() => {
+ const [...x] = g();
+ }, 666);
+
+ assertThrowsEquals(() => {
+ [...x] = g();
+ }, 666);
+
+ assertThrowsEquals(() => {
+ (([...x]) => x)(g());
+ }, 666);
+
+
+ assertFalse(closed);
}
diff --git a/deps/v8/test/mjsunit/es6/json.js b/deps/v8/test/mjsunit/es6/json.js
index 4c1ada8a86..c049a25ddd 100644
--- a/deps/v8/test/mjsunit/es6/json.js
+++ b/deps/v8/test/mjsunit/es6/json.js
@@ -9,5 +9,7 @@ function testJSONToString() {
assertTrue(desc.configurable);
assertFalse(desc.writable);
assertEquals("JSON", desc.value);
+ delete JSON[Symbol.toStringTag];
+ assertEquals('[object Object]', "" + JSON);
}
testJSONToString();
diff --git a/deps/v8/test/mjsunit/es6/legacy-subclassing.js b/deps/v8/test/mjsunit/es6/legacy-subclassing.js
deleted file mode 100644
index dbf666d07c..0000000000
--- a/deps/v8/test/mjsunit/es6/legacy-subclassing.js
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --noharmony-species
-
-// Before Symbol.species was added, ArrayBuffer subclasses constructed
-// ArrayBuffers, and Array subclasses constructed Arrays, but TypedArray and
-// Promise subclasses constructed an instance of the subclass.
-
-'use strict';
-
-assertEquals(undefined, Symbol.species);
-
-class MyArray extends Array { }
-let myArray = new MyArray();
-assertEquals(MyArray, myArray.constructor);
-assertEquals(Array, myArray.map(x => x + 1).constructor);
-assertEquals(Array, myArray.concat().constructor);
-
-class MyUint8Array extends Uint8Array { }
-Object.defineProperty(MyUint8Array.prototype, "BYTES_PER_ELEMENT", {value: 1});
-let myTypedArray = new MyUint8Array(3);
-assertEquals(MyUint8Array, myTypedArray.constructor);
-assertEquals(MyUint8Array, myTypedArray.map(x => x + 1).constructor);
-
-class MyArrayBuffer extends ArrayBuffer { }
-let myBuffer = new MyArrayBuffer(0);
-assertEquals(MyArrayBuffer, myBuffer.constructor);
-assertEquals(ArrayBuffer, myBuffer.slice().constructor);
-
-class MyPromise extends Promise { }
-let myPromise = new MyPromise(() => {});
-assertEquals(MyPromise, myPromise.constructor);
-assertEquals(MyPromise, myPromise.then().constructor);
-
-// However, subarray instantiates members of the parent class
-assertEquals(Uint8Array, myTypedArray.subarray(1).constructor);
diff --git a/deps/v8/test/mjsunit/es6/math-log2-log10.js b/deps/v8/test/mjsunit/es6/math-log2-log10.js
index b1a7736d71..ea17a79daf 100644
--- a/deps/v8/test/mjsunit/es6/math-log2-log10.js
+++ b/deps/v8/test/mjsunit/es6/math-log2-log10.js
@@ -57,13 +57,13 @@ assertEqualsDelta(-9.643274665532873e-17, Math.log10(1-Number.EPSILON), 3e-32);
var n = -1074;
// This loop covers n from -1074 to -1043
for (var lowbits = 1; lowbits <= 0x80000000; lowbits *= 2) {
- var x = %_ConstructDouble(0, lowbits);
+ var x = %ConstructDouble(0, lowbits);
assertEquals(n, Math.log2(x));
n++;
}
// This loop covers n from -1042 to -1023
for (var hibits = 1; hibits <= 0x80000; hibits *= 2) {
- var x = %_ConstructDouble(hibits, 0);
+ var x = %ConstructDouble(hibits, 0);
assertEquals(n, Math.log2(x));
n++;
}
diff --git a/deps/v8/test/mjsunit/es6/math.js b/deps/v8/test/mjsunit/es6/math.js
index cb43bd5bd1..dc761d687d 100644
--- a/deps/v8/test/mjsunit/es6/math.js
+++ b/deps/v8/test/mjsunit/es6/math.js
@@ -9,5 +9,7 @@ function testMathToString() {
assertTrue(desc.configurable);
assertFalse(desc.writable);
assertEquals("Math", desc.value);
+ delete Math[Symbol.toStringTag];
+ assertEquals('[object Object]', "" + Math);
}
testMathToString();
diff --git a/deps/v8/test/mjsunit/es6/microtask-delivery.js b/deps/v8/test/mjsunit/es6/microtask-delivery.js
index 01b971ddc0..6b239bea47 100644
--- a/deps/v8/test/mjsunit/es6/microtask-delivery.js
+++ b/deps/v8/test/mjsunit/es6/microtask-delivery.js
@@ -25,7 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-object-observe
// Flags: --allow-natives-syntax
var ordering = [];
@@ -71,22 +70,6 @@ function newPromise(id, fn) {
};
}
-function newObserver(id, fn, obj) {
- var observer = {
- value: 1,
- recordCounts: []
- };
-
- Object.observe(observer, function(records) {
- ordering.push('o' + id);
- observer.recordCounts.push(records.length);
- if (fn) fn();
- });
-
- return observer;
-}
-
-
(function PromiseThens() {
reset();
@@ -98,72 +81,3 @@ function newObserver(id, fn, obj) {
assertOrdering(['p1', 'p2', 'p1:1', 'p2:1']);
})();
-
-
-(function ObserversBatch() {
- reset();
-
- var p1 = newPromise(1);
- var p2 = newPromise(2);
- var p3 = newPromise(3);
-
- var ob1 = newObserver(1);
- var ob2 = newObserver(2, function() {
- ob3.value++;
- p3.resolve();
- ob1.value++;
- });
- var ob3 = newObserver(3);
-
- p1.resolve();
- ob1.value++;
- p2.resolve();
- ob2.value++;
-
- assertOrdering(['p1', 'o1', 'o2', 'p2', 'o1', 'o3', 'p3']);
- assertArrayValues([1, 1], ob1.recordCounts);
- assertArrayValues([1], ob2.recordCounts);
- assertArrayValues([1], ob3.recordCounts);
-})();
-
-
-(function ObserversGetAllRecords() {
- reset();
-
- var p1 = newPromise(1);
- var p2 = newPromise(2);
- var ob1 = newObserver(1, function() {
- ob2.value++;
- });
- var ob2 = newObserver(2);
-
- p1.resolve();
- ob1.value++;
- p2.resolve();
- ob2.value++;
-
- assertOrdering(['p1', 'o1', 'o2', 'p2']);
- assertArrayValues([1], ob1.recordCounts);
- assertArrayValues([2], ob2.recordCounts);
-})();
-
-
-(function NewObserverDeliveryGetsNewMicrotask() {
- reset();
-
- var p1 = newPromise(1);
- var p2 = newPromise(2);
- var ob1 = newObserver(1);
- var ob2 = newObserver(2, function() {
- ob1.value++;
- });
-
- p1.resolve();
- ob1.value++;
- p2.resolve();
- ob2.value++;
-
- assertOrdering(['p1', 'o1', 'o2', 'p2', 'o1']);
- assertArrayValues([1, 1], ob1.recordCounts);
- assertArrayValues([1], ob2.recordCounts);
-})();
diff --git a/deps/v8/test/mjsunit/es6/mirror-collections.js b/deps/v8/test/mjsunit/es6/mirror-collections.js
index 81a98b8a58..4232ef91cb 100644
--- a/deps/v8/test/mjsunit/es6/mirror-collections.js
+++ b/deps/v8/test/mjsunit/es6/mirror-collections.js
@@ -88,16 +88,21 @@ assertEquals(1, setMirror.values(1).length);
assertSame(o2, values[0]);
assertEquals(undefined, values[1]);
+function initWeakMap(weakMap) {
+ weakMap.set(o1, 11);
+ weakMap.set(new Object(), 22);
+ weakMap.set(o3, 33);
+ weakMap.set(new Object(), 44);
+ var weakMapMirror = debug.MakeMirror(weakMap);
+ testMapMirror(weakMapMirror);
+ weakMap.set(new Object(), 55);
+ assertTrue(weakMapMirror.entries().length <= 5);
+ return weakMapMirror;
+}
+
// Test the mirror object for WeakMaps
var weakMap = new WeakMap();
-weakMap.set(o1, 11);
-weakMap.set(new Object(), 22);
-weakMap.set(o3, 33);
-weakMap.set(new Object(), 44);
-var weakMapMirror = debug.MakeMirror(weakMap);
-testMapMirror(weakMapMirror);
-weakMap.set(new Object(), 55);
-assertTrue(weakMapMirror.entries().length <= 5);
+var weakMapMirror = initWeakMap(weakMap);
gc();
function testWeakMapEntries(weakMapMirror) {
@@ -121,18 +126,23 @@ function testWeakMapEntries(weakMapMirror) {
testWeakMapEntries(weakMapMirror);
+function initWeakSet(weakSet) {
+ weakSet.add(o1);
+ weakSet.add(new Object());
+ weakSet.add(o2);
+ weakSet.add(new Object());
+ weakSet.add(new Object());
+ weakSet.add(o3);
+ weakSet.delete(o2);
+ var weakSetMirror = debug.MakeMirror(weakSet);
+ testSetMirror(weakSetMirror);
+ assertTrue(weakSetMirror.values().length <= 5);
+ return weakSetMirror;
+}
+
// Test the mirror object for WeakSets
var weakSet = new WeakSet();
-weakSet.add(o1);
-weakSet.add(new Object());
-weakSet.add(o2);
-weakSet.add(new Object());
-weakSet.add(new Object());
-weakSet.add(o3);
-weakSet.delete(o2);
-var weakSetMirror = debug.MakeMirror(weakSet);
-testSetMirror(weakSetMirror);
-assertTrue(weakSetMirror.values().length <= 5);
+var weakSetMirror = initWeakSet(weakSet);
gc();
function testWeakSetValues(weakSetMirror) {
diff --git a/deps/v8/test/mjsunit/es6/no-unicode-regexp-flag.js b/deps/v8/test/mjsunit/es6/no-unicode-regexp-flag.js
deleted file mode 100644
index 82d070e92d..0000000000
--- a/deps/v8/test/mjsunit/es6/no-unicode-regexp-flag.js
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Before Unicode RegExps are shipped, we shouldn't have the 'unicode'
-// property on RegExp.prototype, or read it from 'flags'.
-// mjsunit/es6/regexp-flags tests that the property is there when the
-// flag is on.
-
-// Flags: --no-harmony-unicode-regexps
-
-'use strict';
-
-assertFalse(RegExp.prototype.hasOwnProperty('unicode'));
-
-// If we were going to be really strict, we could have a test like this,
-// with the assertTrue replaced by assertFalse, since flags shouldn't
-// Get the 'unicode' property. However, it is probably OK to omit this
-// detailed fix.
-var x = /a/;
-var y = false;
-Object.defineProperty(x, 'unicode', { get() { y = true; } });
-assertEquals("", x.flags);
-assertTrue(y);
diff --git a/deps/v8/test/mjsunit/es6/object-tostring.js b/deps/v8/test/mjsunit/es6/object-tostring.js
index 29d07f263a..bc7d9681f8 100644
--- a/deps/v8/test/mjsunit/es6/object-tostring.js
+++ b/deps/v8/test/mjsunit/es6/object-tostring.js
@@ -15,15 +15,16 @@ var funs = {
RegExp: [ RegExp ],
Error: [ Error, TypeError, RangeError, SyntaxError, ReferenceError,
EvalError, URIError ]
-}
-for (f in funs) {
- for (i in funs[f]) {
+};
+for (var f in funs) {
+ for (var i in funs[f]) {
+
assertEquals("[object " + f + "]",
- Object.prototype.toString.call(new funs[f][i]),
- funs[f][i]);
+ Object.prototype.toString.call(new funs[f][i]),
+ funs[f][i]);
assertEquals("[object Function]",
- Object.prototype.toString.call(funs[f][i]),
- funs[f][i]);
+ Object.prototype.toString.call(funs[f][i]),
+ funs[f][i]);
}
}
@@ -130,11 +131,11 @@ function testObjectToStringPropertyDesc() {
}
testObjectToStringPropertyDesc();
-function testObjectToStringOwnNonStringValue() {
- var obj = Object.defineProperty({}, Symbol.toStringTag, { value: 1 });
+function testObjectToStringOnNonStringValue(obj) {
+ Object.defineProperty(obj, Symbol.toStringTag, { value: 1 });
assertEquals("[object Object]", ({}).toString.call(obj));
}
-testObjectToStringOwnNonStringValue();
+testObjectToStringOnNonStringValue({});
// Proxies
@@ -149,11 +150,77 @@ assertTag("Function", new Proxy(() => 42, {}));
assertTag("Foo", new Proxy(() => 42, {get() {return "Foo"}}));
assertTag("Function", new Proxy(() => 42, {get() {return 666}}));
-revocable = Proxy.revocable([], {});
+var revocable = Proxy.revocable([], {});
revocable.revoke();
assertThrows(() => Object.prototype.toString.call(revocable.proxy), TypeError);
-handler = {};
+var handler = {};
revocable = Proxy.revocable([], handler);
+// The first get() call, i.e., toString() revokes the proxy
handler.get = () => revocable.revoke();
+assertEquals("[object Array]", Object.prototype.toString.call(revocable.proxy));
assertThrows(() => Object.prototype.toString.call(revocable.proxy), TypeError);
+
+revocable = Proxy.revocable([], handler);
+handler.get = () => {revocable.revoke(); return "value";};
+assertEquals("[object value]", Object.prototype.toString.call(revocable.proxy));
+assertThrows(() => Object.prototype.toString.call(revocable.proxy), TypeError);
+
+
+revocable = Proxy.revocable(function() {}, handler);
+handler.get = () => revocable.revoke();
+assertEquals("[object Function]", Object.prototype.toString.call(revocable.proxy));
+assertThrows(() => Object.prototype.toString.call(revocable.proxy), TypeError);
+
+function* gen() { yield 1; }
+
+assertTag("GeneratorFunction", gen);
+Object.defineProperty(gen, Symbol.toStringTag, {writable: true});
+gen[Symbol.toStringTag] = "different string";
+assertTag("different string", gen);
+gen[Symbol.toStringTag] = 1;
+assertTag("Function", gen);
+
+function overwriteToStringTagWithNonStringValue(tag, obj) {
+ assertTag(tag, obj);
+
+ Object.defineProperty(obj, Symbol.toStringTag, {
+ configurable: true,
+ value: "different string"
+ });
+ assertTag("different string", obj);
+
+ testObjectToStringOnNonStringValue(obj);
+}
+
+overwriteToStringTagWithNonStringValue("global", global);
+overwriteToStringTagWithNonStringValue("Generator", gen());
+
+var arrayBuffer = new ArrayBuffer();
+overwriteToStringTagWithNonStringValue("ArrayBuffer", arrayBuffer);
+overwriteToStringTagWithNonStringValue("DataView", new DataView(arrayBuffer));
+
+overwriteToStringTagWithNonStringValue("Int8Array", new Int8Array());
+overwriteToStringTagWithNonStringValue("Uint8Array", new Uint8Array());
+overwriteToStringTagWithNonStringValue("Uint8ClampedArray",
+ new Uint8ClampedArray());
+overwriteToStringTagWithNonStringValue("Int16Array", new Int16Array());
+overwriteToStringTagWithNonStringValue("Uint16Array", new Uint16Array());
+overwriteToStringTagWithNonStringValue("Int32Array", new Int32Array());
+overwriteToStringTagWithNonStringValue("Uint32Array", new Uint32Array());
+overwriteToStringTagWithNonStringValue("Float32Array", new Float32Array());
+overwriteToStringTagWithNonStringValue("Float64Array", new Float64Array());
+
+var set = new Set();
+var map = new Map();
+
+overwriteToStringTagWithNonStringValue("Set", set);
+overwriteToStringTagWithNonStringValue("Map", map);
+
+overwriteToStringTagWithNonStringValue("Set Iterator", set[Symbol.iterator]());
+overwriteToStringTagWithNonStringValue("Map Iterator", map[Symbol.iterator]());
+
+overwriteToStringTagWithNonStringValue("WeakSet", new WeakSet());
+overwriteToStringTagWithNonStringValue("WeakMap", new WeakMap());
+
+overwriteToStringTagWithNonStringValue("Promise", new Promise(function() {}));
diff --git a/deps/v8/test/mjsunit/es6/pattern-brand-check.js b/deps/v8/test/mjsunit/es6/pattern-brand-check.js
index 9b0c0111ef..2e3229481f 100644
--- a/deps/v8/test/mjsunit/es6/pattern-brand-check.js
+++ b/deps/v8/test/mjsunit/es6/pattern-brand-check.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-regexp-subclass
-
function createNonRegExp(calls) {
return {
get [Symbol.match]() {
diff --git a/deps/v8/test/mjsunit/es6/promise-internal-setter.js b/deps/v8/test/mjsunit/es6/promise-internal-setter.js
index 20d361f623..bf0045a98b 100644
--- a/deps/v8/test/mjsunit/es6/promise-internal-setter.js
+++ b/deps/v8/test/mjsunit/es6/promise-internal-setter.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --promise-extra
-
'use strict';
Object.defineProperties(Object.prototype, {
@@ -16,4 +14,3 @@ class P extends Promise {}
P.all([Promise.resolve('ok')]);
P.race([Promise.resolve('ok')]);
-P.defer();
diff --git a/deps/v8/test/mjsunit/harmony/promise-species.js b/deps/v8/test/mjsunit/es6/promise-species.js
index 12244f291a..f6f2e7a1b5 100644
--- a/deps/v8/test/mjsunit/harmony/promise-species.js
+++ b/deps/v8/test/mjsunit/es6/promise-species.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-species --allow-natives-syntax
+// Flags: --allow-natives-syntax
// Test that Promises use @@species appropriately
diff --git a/deps/v8/test/mjsunit/es6/promises.js b/deps/v8/test/mjsunit/es6/promises.js
index 4eb539cbd5..0af7a882e7 100644
--- a/deps/v8/test/mjsunit/es6/promises.js
+++ b/deps/v8/test/mjsunit/es6/promises.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --promise-extra
+// Flags: --allow-natives-syntax
// Make sure we don't rely on functions patchable by monkeys.
var call = Function.prototype.call.call.bind(Function.prototype.call)
@@ -82,6 +82,12 @@ for (var i in globals) {
}
+function defer(constructor) {
+ var resolve, reject;
+ var promise = new constructor((res, rej) => { resolve = res; reject = rej });
+ return { promise, resolve, reject };
+}
+
var asyncAssertsExpected = 0;
function assertAsyncRan() { ++asyncAssertsExpected }
@@ -141,7 +147,7 @@ function assertAsyncDone(iteration) {
})();
(function() {
- (new Promise(function() { throw 5 })).chain(
+ (new Promise(function() { throw 5 })).then(
assertUnreachable,
function(r) { assertAsync(r === 5, "new-throw") }
)
@@ -149,29 +155,21 @@ function assertAsyncDone(iteration) {
})();
(function() {
- Promise.accept(5);
- Promise.accept(5).chain(undefined, assertUnreachable).chain(
- function(x) { assertAsync(x === 5, "resolved/chain-nohandler") },
+ Promise.resolve(5);
+ Promise.resolve(5).then(undefined, assertUnreachable).then(
+ function(x) { assertAsync(x === 5, "resolved/then-nohandler") },
assertUnreachable
)
assertAsyncRan()
})();
(function() {
- Promise.reject(5).chain(assertUnreachable, undefined).chain(
- assertUnreachable,
- function(r) { assertAsync(r === 5, "rejected/chain-nohandler") }
- )
- assertAsyncRan()
-})();
-
-(function() {
- Promise.accept(5).then(undefined, assertUnreachable).chain(
+ Promise.resolve(5).then(undefined, assertUnreachable).then(
function(x) { assertAsync(x === 5, "resolved/then-nohandler-undefined") },
assertUnreachable
)
assertAsyncRan()
- Promise.accept(6).then(null, assertUnreachable).chain(
+ Promise.resolve(6).then(null, assertUnreachable).then(
function(x) { assertAsync(x === 6, "resolved/then-nohandler-null") },
assertUnreachable
)
@@ -179,34 +177,9 @@ function assertAsyncDone(iteration) {
})();
(function() {
- Promise.reject(5).then(assertUnreachable, undefined).chain(
- assertUnreachable,
- function(r) { assertAsync(r === 5, "rejected/then-nohandler-undefined") }
- )
- assertAsyncRan()
- Promise.reject(6).then(assertUnreachable, null).chain(
- assertUnreachable,
- function(r) { assertAsync(r === 6, "rejected/then-nohandler-null") }
- )
- assertAsyncRan()
-})();
-
-(function() {
- var p1 = Promise.accept(5)
- var p2 = Promise.accept(p1)
- var p3 = Promise.accept(p2)
- // Note: Chain now has then-style semantics, here and in future tests.
- p3.chain(
- function(x) { assertAsync(x === 5, "resolved/chain") },
- assertUnreachable
- )
- assertAsyncRan()
-})();
-
-(function() {
- var p1 = Promise.accept(5)
- var p2 = Promise.accept(p1)
- var p3 = Promise.accept(p2)
+ var p1 = Promise.resolve(5)
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
p3.then(
function(x) { assertAsync(x === 5, "resolved/then") },
assertUnreachable
@@ -216,19 +189,8 @@ function assertAsyncDone(iteration) {
(function() {
var p1 = Promise.reject(5)
- var p2 = Promise.accept(p1)
- var p3 = Promise.accept(p2)
- p3.chain(
- assertUnreachable,
- function(x) { assertAsync(x === 5, "rejected/chain") }
- )
- assertAsyncRan()
-})();
-
-(function() {
- var p1 = Promise.reject(5)
- var p2 = Promise.accept(p1)
- var p3 = Promise.accept(p2)
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
p3.then(
assertUnreachable,
function(x) { assertAsync(x === 5, "rejected/then") }
@@ -237,87 +199,21 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.accept(5)
- var p2 = Promise.accept(p1)
- var p3 = Promise.accept(p2)
- p3.chain(function(x) { return x }, assertUnreachable).chain(
- function(x) { assertAsync(x === 5, "resolved/chain/chain") },
- assertUnreachable
- )
- assertAsyncRan()
-})();
-
-(function() {
- var p1 = Promise.accept(5)
- var p2 = Promise.accept(p1)
- var p3 = Promise.accept(p2)
- p3.chain(function(x) { return x }, assertUnreachable).then(
- function(x) { assertAsync(x === 5, "resolved/chain/then") },
- assertUnreachable
- )
- assertAsyncRan()
-})();
-
-(function() {
- var p1 = Promise.accept(5)
- var p2 = Promise.accept(p1)
- var p3 = Promise.accept(p2)
- p3.chain(function(x) { return 6 }, assertUnreachable).chain(
- function(x) { assertAsync(x === 6, "resolved/chain/chain2") },
- assertUnreachable
- )
- assertAsyncRan()
-})();
-
-(function() {
- var p1 = Promise.accept(5)
- var p2 = Promise.accept(p1)
- var p3 = Promise.accept(p2)
- p3.chain(function(x) { return 6 }, assertUnreachable).then(
- function(x) { assertAsync(x === 6, "resolved/chain/then2") },
- assertUnreachable
- )
- assertAsyncRan()
-})();
-
-(function() {
- var p1 = Promise.accept(5)
- var p2 = Promise.accept(p1)
- var p3 = Promise.accept(p2)
- p3.then(function(x) { return x + 1 }, assertUnreachable).chain(
- function(x) { assertAsync(x === 6, "resolved/then/chain") },
- assertUnreachable
- )
- assertAsyncRan()
-})();
-
-(function() {
- var p1 = Promise.accept(5)
- var p2 = Promise.accept(p1)
- var p3 = Promise.accept(p2)
- p3.then(function(x) { return x + 1 }, assertUnreachable).then(
- function(x) { assertAsync(x === 6, "resolved/then/then") },
- assertUnreachable
- )
- assertAsyncRan()
-})();
-
-(function() {
- var p1 = Promise.accept(5)
- var p2 = Promise.accept(p1)
- var p3 = Promise.accept(p2)
- p3.then(function(x){ return Promise.accept(x+1) }, assertUnreachable).chain(
- function(x) { assertAsync(x === 6, "resolved/then/chain2") },
+ var p1 = Promise.resolve(5)
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
+ p3.then(function(x) { return x }, assertUnreachable).then(
+ function(x) { assertAsync(x === 5, "resolved/then/then") },
assertUnreachable
)
assertAsyncRan()
})();
(function() {
- var p1 = Promise.accept(5)
- var p2 = Promise.accept(p1)
- var p3 = Promise.accept(p2)
- p3.then(function(x) { return Promise.accept(x+1) }, assertUnreachable).then(
+ var p1 = Promise.resolve(5)
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
+ p3.then(function(x){ return Promise.resolve(x+1) }, assertUnreachable).then(
function(x) { assertAsync(x === 6, "resolved/then/then2") },
assertUnreachable
)
@@ -325,42 +221,42 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.accept(5)
- var p2 = Promise.accept(p1)
- var p3 = Promise.accept(p2)
- p3.chain(function(x) { throw 6 }, assertUnreachable).chain(
+ var p1 = Promise.resolve(5)
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
+ p3.then(function(x) { throw 6 }, assertUnreachable).then(
assertUnreachable,
- function(x) { assertAsync(x === 6, "resolved/chain-throw/chain") }
+ function(x) { assertAsync(x === 6, "resolved/then-throw/then") }
)
assertAsyncRan()
})();
(function() {
- var p1 = Promise.accept(5)
- var p2 = Promise.accept(p1)
- var p3 = Promise.accept(p2)
- p3.chain(function(x) { throw 6 }, assertUnreachable).then(
+ var p1 = Promise.resolve(5)
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
+ p3.then(function(x) { throw 6 }, assertUnreachable).then(
assertUnreachable,
- function(x) { assertAsync(x === 6, "resolved/chain-throw/then") }
+ function(x) { assertAsync(x === 6, "resolved/then-throw/then") }
)
assertAsyncRan()
})();
(function() {
- var p1 = Promise.accept(5)
- var p2 = Promise.accept(p1)
- var p3 = Promise.accept(p2)
- p3.then(function(x) { throw 6 }, assertUnreachable).chain(
+ var p1 = Promise.resolve(5)
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
+ p3.then(function(x) { throw 6 }, assertUnreachable).then(
assertUnreachable,
- function(x) { assertAsync(x === 6, "resolved/then-throw/chain") }
+ function(x) { assertAsync(x === 6, "resolved/then-throw/then") }
)
assertAsyncRan()
})();
(function() {
- var p1 = Promise.accept(5)
- var p2 = Promise.accept(p1)
- var p3 = Promise.accept(p2)
+ var p1 = Promise.resolve(5)
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
p3.then(function(x) { throw 6 }, assertUnreachable).then(
assertUnreachable,
function(x) { assertAsync(x === 6, "resolved/then-throw/then") }
@@ -369,20 +265,20 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.accept(5)
+ var p1 = Promise.resolve(5)
var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
- var p3 = Promise.accept(p2)
- p3.chain(
- function(x) { assertAsync(x === 5, "resolved/thenable/chain") },
+ var p3 = Promise.resolve(p2)
+ p3.then(
+ function(x) { assertAsync(x === 5, "resolved/thenable/then") },
assertUnreachable
)
assertAsyncRan()
})();
(function() {
- var p1 = Promise.accept(5)
+ var p1 = Promise.resolve(5)
var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
- var p3 = Promise.accept(p2)
+ var p3 = Promise.resolve(p2)
p3.then(
function(x) { assertAsync(x === 5, "resolved/thenable/then") },
assertUnreachable
@@ -393,10 +289,10 @@ function assertAsyncDone(iteration) {
(function() {
var p1 = Promise.reject(5)
var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
- var p3 = Promise.accept(p2)
- p3.chain(
+ var p3 = Promise.resolve(p2)
+ p3.then(
assertUnreachable,
- function(x) { assertAsync(x === 5, "rejected/thenable/chain") }
+ function(x) { assertAsync(x === 5, "rejected/thenable/then") }
)
assertAsyncRan()
})();
@@ -404,7 +300,7 @@ function assertAsyncDone(iteration) {
(function() {
var p1 = Promise.reject(5)
var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
- var p3 = Promise.accept(p2)
+ var p3 = Promise.resolve(p2)
p3.then(
assertUnreachable,
function(x) { assertAsync(x === 5, "rejected/thenable/then") }
@@ -413,12 +309,12 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var deferred = Promise.defer()
+ var deferred = defer(Promise)
var p1 = deferred.promise
- var p2 = Promise.accept(p1)
- var p3 = Promise.accept(p2)
- p3.chain(
- function(x) { assertAsync(x === 5, "chain/resolve") },
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
+ p3.then(
+ function(x) { assertAsync(x === 5, "then/resolve") },
assertUnreachable
)
deferred.resolve(5)
@@ -426,7 +322,7 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var deferred = Promise.defer()
+ var deferred = defer(Promise)
var p1 = deferred.promise
var p2 = Promise.resolve(p1)
var p3 = Promise.resolve(p2)
@@ -439,23 +335,23 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var deferred = Promise.defer()
+ var deferred = defer(Promise)
var p1 = deferred.promise
- var p2 = Promise.accept(p1)
- var p3 = Promise.accept(p2)
- p3.chain(
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
+ p3.then(
assertUnreachable,
- function(x) { assertAsync(x === 5, "chain/reject") }
+ function(x) { assertAsync(x === 5, "then/reject") }
)
deferred.reject(5)
assertAsyncRan()
})();
(function() {
- var deferred = Promise.defer()
+ var deferred = defer(Promise)
var p1 = deferred.promise
- var p2 = Promise.accept(p1)
- var p3 = Promise.accept(p2)
+ var p2 = Promise.resolve(p1)
+ var p3 = Promise.resolve(p2)
p3.then(
assertUnreachable,
function(x) { assertAsync(x === 5, "then/reject") }
@@ -465,7 +361,7 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var deferred = Promise.defer()
+ var deferred = defer(Promise)
var p1 = deferred.promise
var p2 = p1.then(1, 2)
p2.then(
@@ -477,7 +373,7 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var deferred = Promise.defer()
+ var deferred = defer(Promise)
var p1 = deferred.promise
var p2 = p1.then(1, 2)
p2.then(
@@ -489,12 +385,12 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var deferred = Promise.defer()
+ var deferred = defer(Promise)
var p1 = deferred.promise
var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
- var p3 = Promise.accept(p2)
- p3.chain(
- function(x) { assertAsync(x === 5, "chain/resolve/thenable") },
+ var p3 = Promise.resolve(p2)
+ p3.then(
+ function(x) { assertAsync(x === 5, "then/resolve/thenable") },
assertUnreachable
)
deferred.resolve(5)
@@ -502,10 +398,10 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var deferred = Promise.defer()
+ var deferred = defer(Promise)
var p1 = deferred.promise
var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
- var p3 = Promise.accept(p2)
+ var p3 = Promise.resolve(p2)
p3.then(
function(x) { assertAsync(x === 5, "then/resolve/thenable") },
assertUnreachable
@@ -515,23 +411,23 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var deferred = Promise.defer()
+ var deferred = defer(Promise)
var p1 = deferred.promise
var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
- var p3 = Promise.accept(p2)
- p3.chain(
+ var p3 = Promise.resolve(p2)
+ p3.then(
assertUnreachable,
- function(x) { assertAsync(x === 5, "chain/reject/thenable") }
+ function(x) { assertAsync(x === 5, "then/reject/thenable") }
)
deferred.reject(5)
assertAsyncRan()
})();
(function() {
- var deferred = Promise.defer()
+ var deferred = defer(Promise)
var p1 = deferred.promise
var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
- var p3 = Promise.accept(p2)
+ var p3 = Promise.resolve(p2)
p3.then(
assertUnreachable,
function(x) { assertAsync(x === 5, "then/reject/thenable") }
@@ -541,12 +437,12 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.accept(5)
- var p2 = Promise.accept(p1)
- var deferred = Promise.defer()
+ var p1 = Promise.resolve(5)
+ var p2 = Promise.resolve(p1)
+ var deferred = defer(Promise)
var p3 = deferred.promise
- p3.chain(
- function(x) { assertAsync(x === 5, "chain/resolve2") },
+ p3.then(
+ function(x) { assertAsync(x === 5, "then/resolve2") },
assertUnreachable
)
deferred.resolve(p2)
@@ -554,9 +450,9 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.accept(5)
- var p2 = Promise.accept(p1)
- var deferred = Promise.defer()
+ var p1 = Promise.resolve(5)
+ var p2 = Promise.resolve(p1)
+ var deferred = defer(Promise)
var p3 = deferred.promise
p3.then(
function(x) { assertAsync(x === 5, "then/resolve2") },
@@ -567,22 +463,22 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.accept(5)
- var p2 = Promise.accept(p1)
- var deferred = Promise.defer()
+ var p1 = Promise.resolve(5)
+ var p2 = Promise.resolve(p1)
+ var deferred = defer(Promise)
var p3 = deferred.promise
- p3.chain(
+ p3.then(
assertUnreachable,
- function(x) { assertAsync(x === 5, "chain/reject2") }
+ function(x) { assertAsync(x === 5, "then/reject2") }
)
deferred.reject(5)
assertAsyncRan()
})();
(function() {
- var p1 = Promise.accept(5)
- var p2 = Promise.accept(p1)
- var deferred = Promise.defer()
+ var p1 = Promise.resolve(5)
+ var p2 = Promise.resolve(p1)
+ var deferred = defer(Promise)
var p3 = deferred.promise
p3.then(
assertUnreachable,
@@ -593,12 +489,12 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.accept(5)
+ var p1 = Promise.resolve(5)
var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
- var deferred = Promise.defer()
+ var deferred = defer(Promise)
var p3 = deferred.promise
- p3.chain(
- function(x) { assertAsync(x === 5, "chain/resolve/thenable2") },
+ p3.then(
+ function(x) { assertAsync(x === 5, "then/resolve/thenable2") },
assertUnreachable
)
deferred.resolve(p2)
@@ -606,9 +502,9 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.accept(5)
+ var p1 = Promise.resolve(5)
var p2 = {then: function(onResolve, onReject) { onResolve(p1) }}
- var deferred = Promise.defer()
+ var deferred = defer(Promise)
var p3 = deferred.promise
p3.then(
function(x) { assertAsync(x === 5, "then/resolve/thenable2") },
@@ -619,19 +515,19 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.accept(0)
- var p2 = p1.chain(function(x) { return p2 }, assertUnreachable)
- p2.chain(
+ var p1 = Promise.resolve(0)
+ var p2 = p1.then(function(x) { return p2 }, assertUnreachable)
+ p2.then(
assertUnreachable,
- function(r) { assertAsync(r instanceof TypeError, "cyclic/chain") }
+ function(r) { assertAsync(r instanceof TypeError, "cyclic/then") }
)
assertAsyncRan()
})();
(function() {
- var p1 = Promise.accept(0)
+ var p1 = Promise.resolve(0)
var p2 = p1.then(function(x) { return p2 }, assertUnreachable)
- p2.chain(
+ p2.then(
assertUnreachable,
function(r) { assertAsync(r instanceof TypeError, "cyclic/then") }
)
@@ -639,10 +535,10 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var deferred = Promise.defer()
+ var deferred = defer(Promise)
var p = deferred.promise
deferred.resolve(p)
- p.chain(
+ p.then(
assertUnreachable,
function(r) { assertAsync(r instanceof TypeError, "cyclic/deferred/then") }
)
@@ -650,7 +546,7 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var deferred = Promise.defer()
+ var deferred = defer(Promise)
var p = deferred.promise
deferred.resolve(p)
p.then(
@@ -661,7 +557,7 @@ function assertAsyncDone(iteration) {
})();
(function() {
- Promise.all([]).chain(
+ Promise.all([]).then(
function(x) { assertAsync(x.length === 0, "all/resolve/empty") },
assertUnreachable
)
@@ -670,7 +566,7 @@ function assertAsyncDone(iteration) {
(function() {
function testPromiseAllNonIterable(value) {
- Promise.all(value).chain(
+ Promise.all(value).then(
assertUnreachable,
function(r) {
assertAsync(r instanceof TypeError, 'all/non iterable');
@@ -684,14 +580,14 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var deferred = Promise.defer();
+ var deferred = defer(Promise);
var p = deferred.promise;
function* f() {
yield 1;
yield p;
yield 3;
}
- Promise.all(f()).chain(
+ Promise.all(f()).then(
function(x) {
assertAsync(x.length === 3, "all/resolve/iterable");
assertAsync(x[0] === 1, "all/resolve/iterable/0");
@@ -708,13 +604,13 @@ function assertAsyncDone(iteration) {
(function() {
- var deferred1 = Promise.defer()
+ var deferred1 = defer(Promise)
var p1 = deferred1.promise
- var deferred2 = Promise.defer()
+ var deferred2 = defer(Promise)
var p2 = deferred2.promise
- var deferred3 = Promise.defer()
+ var deferred3 = defer(Promise)
var p3 = deferred3.promise
- Promise.all([p1, p2, p3]).chain(
+ Promise.all([p1, p2, p3]).then(
function(x) {
assertAsync(x.length === 3, "all/resolve")
assertAsync(x[0] === 1, "all/resolve/0")
@@ -733,11 +629,11 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var deferred = Promise.defer()
+ var deferred = defer(Promise)
var p1 = deferred.promise
- var p2 = Promise.accept(2)
- var p3 = Promise.defer().promise
- Promise.all([p1, p2, p3]).chain(
+ var p2 = Promise.resolve(2)
+ var p3 = defer(Promise).promise
+ Promise.all([p1, p2, p3]).then(
assertUnreachable,
assertUnreachable
)
@@ -745,13 +641,13 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var deferred1 = Promise.defer()
+ var deferred1 = defer(Promise)
var p1 = deferred1.promise
- var deferred2 = Promise.defer()
+ var deferred2 = defer(Promise)
var p2 = deferred2.promise
- var deferred3 = Promise.defer()
+ var deferred3 = defer(Promise)
var p3 = deferred3.promise
- Promise.all([p1, p2, p3]).chain(
+ Promise.all([p1, p2, p3]).then(
assertUnreachable,
function(x) { assertAsync(x === 2, "all/reject") }
)
@@ -786,7 +682,7 @@ function assertAsyncDone(iteration) {
configurable: true
});
- Promise.all(3).chain(
+ Promise.all(3).then(
function(x) {
assertAsync(x.length === 3, "all/iterable/number/length");
assertAsync(x[0] === 0, "all/iterable/number/0");
@@ -807,17 +703,17 @@ function assertAsyncDone(iteration) {
(function() {
- Promise.race([]).chain(
+ Promise.race([]).then(
assertUnreachable,
assertUnreachable
)
})();
(function() {
- var p1 = Promise.accept(1)
- var p2 = Promise.accept(2)
- var p3 = Promise.accept(3)
- Promise.race([p1, p2, p3]).chain(
+ var p1 = Promise.resolve(1)
+ var p2 = Promise.resolve(2)
+ var p3 = Promise.resolve(3)
+ Promise.race([p1, p2, p3]).then(
function(x) { assertAsync(x === 1, "resolved/one") },
assertUnreachable
)
@@ -825,10 +721,10 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var p1 = Promise.accept(1)
- var p2 = Promise.accept(2)
- var p3 = Promise.accept(3)
- Promise.race([0, p1, p2, p3]).chain(
+ var p1 = Promise.resolve(1)
+ var p2 = Promise.resolve(2)
+ var p3 = Promise.resolve(3)
+ Promise.race([0, p1, p2, p3]).then(
function(x) { assertAsync(x === 0, "resolved-const/one") },
assertUnreachable
)
@@ -836,13 +732,13 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var deferred1 = Promise.defer()
+ var deferred1 = defer(Promise)
var p1 = deferred1.promise
- var deferred2 = Promise.defer()
+ var deferred2 = defer(Promise)
var p2 = deferred2.promise
- var deferred3 = Promise.defer()
+ var deferred3 = defer(Promise)
var p3 = deferred3.promise
- Promise.race([p1, p2, p3]).chain(
+ Promise.race([p1, p2, p3]).then(
function(x) { assertAsync(x === 3, "one/resolve") },
assertUnreachable
)
@@ -852,11 +748,11 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var deferred = Promise.defer()
+ var deferred = defer(Promise)
var p1 = deferred.promise
- var p2 = Promise.accept(2)
- var p3 = Promise.defer().promise
- Promise.race([p1, p2, p3]).chain(
+ var p2 = Promise.resolve(2)
+ var p3 = defer(Promise).promise
+ Promise.race([p1, p2, p3]).then(
function(x) { assertAsync(x === 2, "resolved/one") },
assertUnreachable
)
@@ -865,13 +761,13 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var deferred1 = Promise.defer()
+ var deferred1 = defer(Promise)
var p1 = deferred1.promise
- var deferred2 = Promise.defer()
+ var deferred2 = defer(Promise)
var p2 = deferred2.promise
- var deferred3 = Promise.defer()
+ var deferred3 = defer(Promise)
var p3 = deferred3.promise
- Promise.race([p1, p2, p3]).chain(
+ Promise.race([p1, p2, p3]).then(
function(x) { assertAsync(x === 3, "one/resolve/reject") },
assertUnreachable
)
@@ -881,13 +777,13 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var deferred1 = Promise.defer()
+ var deferred1 = defer(Promise)
var p1 = deferred1.promise
- var deferred2 = Promise.defer()
+ var deferred2 = defer(Promise)
var p2 = deferred2.promise
- var deferred3 = Promise.defer()
+ var deferred3 = defer(Promise)
var p3 = deferred3.promise
- Promise.race([p1, p2, p3]).chain(
+ Promise.race([p1, p2, p3]).then(
assertUnreachable,
function(x) { assertAsync(x === 3, "one/reject/resolve") }
)
@@ -899,7 +795,7 @@ function assertAsyncDone(iteration) {
(function() {
function testPromiseRaceNonIterable(value) {
- Promise.race(value).chain(
+ Promise.race(value).then(
assertUnreachable,
function(r) {
assertAsync(r instanceof TypeError, 'race/non iterable');
@@ -914,18 +810,18 @@ function assertAsyncDone(iteration) {
(function() {
- var deferred1 = Promise.defer()
+ var deferred1 = defer(Promise)
var p1 = deferred1.promise
- var deferred2 = Promise.defer()
+ var deferred2 = defer(Promise)
var p2 = deferred2.promise
- var deferred3 = Promise.defer()
+ var deferred3 = defer(Promise)
var p3 = deferred3.promise
function* f() {
yield p1;
yield p2;
yield p3;
}
- Promise.race(f()).chain(
+ Promise.race(f()).then(
function(x) { assertAsync(x === 3, "race/iterable/resolve/reject") },
assertUnreachable
)
@@ -935,18 +831,18 @@ function assertAsyncDone(iteration) {
})();
(function() {
- var deferred1 = Promise.defer()
+ var deferred1 = defer(Promise)
var p1 = deferred1.promise
- var deferred2 = Promise.defer()
+ var deferred2 = defer(Promise)
var p2 = deferred2.promise
- var deferred3 = Promise.defer()
+ var deferred3 = defer(Promise)
var p3 = deferred3.promise
function* f() {
yield p1;
yield p2;
yield p3;
}
- Promise.race(f()).chain(
+ Promise.race(f()).then(
assertUnreachable,
function(x) { assertAsync(x === 3, "race/iterable/reject/resolve") }
)
@@ -980,7 +876,7 @@ function assertAsyncDone(iteration) {
configurable: true
});
- Promise.race(3).chain(
+ Promise.race(3).then(
function(x) {
assertAsync(x === 0, "race/iterable/number");
},
@@ -1014,18 +910,18 @@ function assertAsyncDone(iteration) {
}
MyPromise.prototype.__proto__ = Promise.prototype
- MyPromise.prototype.chain = function(resolve, reject) {
+ MyPromise.prototype.then = function(resolve, reject) {
log += "c"
- return call(this.__proto__.__proto__.chain, this, resolve, reject)
+ return call(this.__proto__.__proto__.then, this, resolve, reject)
}
log = ""
var p1 = new MyPromise(function(resolve, reject) { resolve(1) })
var p2 = new MyPromise(function(resolve, reject) { reject(2) })
- var d3 = MyPromise.defer()
+ var d3 = defer(MyPromise)
assertTrue(d3.promise instanceof Promise, "subclass/instance")
assertTrue(d3.promise instanceof MyPromise, "subclass/instance-my3")
- assertTrue(log === "nx1nr2dn", "subclass/create")
+ assertTrue(log === "nx1nr2n", "subclass/create")
log = ""
var p4 = MyPromise.resolve(4)
@@ -1038,21 +934,21 @@ function assertAsyncDone(iteration) {
assertTrue(log === "nx4nr5x3", "subclass/resolve")
log = ""
- var d6 = MyPromise.defer()
- d6.promise.chain(function(x) {
+ var d6 = defer(MyPromise)
+ d6.promise.then(function(x) {
return new Promise(function(resolve) { resolve(x) })
- }).chain(function() {})
+ }).then(function() {})
d6.resolve(6)
- assertTrue(log === "dncncnx6", "subclass/chain")
+ assertTrue(log === "ncncnx6", "subclass/then")
log = ""
- Promise.all([11, Promise.accept(12), 13, MyPromise.accept(14), 15, 16])
+ Promise.all([11, Promise.resolve(12), 13, MyPromise.resolve(14), 15, 16])
assertTrue(log === "nx14", "subclass/all/arg")
log = ""
- MyPromise.all([21, Promise.accept(22), 23, MyPromise.accept(24), 25, 26])
- assertTrue(log === "nx24nnx21nnx[object Promise]nnx23nnnx25nnx26n",
+ MyPromise.all([21, Promise.resolve(22), 23, MyPromise.resolve(24), 25, 26])
+ assertTrue(log === "nx24nnx21cnnx[object Promise]cnnx23cncnnx25cnnx26cn",
"subclass/all/self")
})();
diff --git a/deps/v8/test/mjsunit/es6/proxies-for.js b/deps/v8/test/mjsunit/es6/proxies-for.js
index 5b818453a9..2b3060b17e 100644
--- a/deps/v8/test/mjsunit/es6/proxies-for.js
+++ b/deps/v8/test/mjsunit/es6/proxies-for.js
@@ -151,7 +151,7 @@ function keys(object) {
object.__proto__ = proxy;
assertEquals(["0"], keys(object));
- // The Proxy doesn't set his ownKeys enumerable.
+ // The Proxy doesn't set its ownKeys enumerable.
delete object[0];
assertEquals([], keys(object));
@@ -209,10 +209,15 @@ function keys(object) {
assertThrowsEquals(() => {keys(proxy)}, "error");
})();
-
-(function () {
- var symbol = Symbol();
- var p = new Proxy({}, {ownKeys() { return ["1", symbol, "2"] }});
- assertEquals(["1","2"], Object.getOwnPropertyNames(p));
- assertEquals([symbol], Object.getOwnPropertySymbols(p));
+(function testNestedProxy() {
+ var handler = {
+ ownKeys() {
+ return ['c'];
+ },
+ getOwnPropertyDescriptor() { return {configurable: true, enumerable: true } }
+ }
+ var proxy = new Proxy({}, handler);
+ var proxy2 = new Proxy(proxy, {});
+ assertEquals(['c'], keys(proxy));
+ assertEquals(['c'], keys(proxy2));
})();
diff --git a/deps/v8/test/mjsunit/es6/proxies-global-reference.js b/deps/v8/test/mjsunit/es6/proxies-global-reference.js
index 975d7f75fb..1e3d3beb86 100644
--- a/deps/v8/test/mjsunit/es6/proxies-global-reference.js
+++ b/deps/v8/test/mjsunit/es6/proxies-global-reference.js
@@ -5,8 +5,5 @@
var failing_proxy = new Proxy({}, new Proxy({}, {
get() { throw "No trap should fire" }}));
-Object.setPrototypeOf(Object.prototype, failing_proxy);
-assertThrows(()=>a, TypeError);
-
-Object.setPrototypeOf(this, failing_proxy);
-assertThrows(()=>a, TypeError);
+assertThrows(() => Object.setPrototypeOf(Object.prototype, failing_proxy), TypeError);
+assertThrows(()=>a, ReferenceError);
diff --git a/deps/v8/test/mjsunit/es6/proxies-json.js b/deps/v8/test/mjsunit/es6/proxies-json.js
index d48d5390f6..6b40e3ee7d 100644
--- a/deps/v8/test/mjsunit/es6/proxies-json.js
+++ b/deps/v8/test/mjsunit/es6/proxies-json.js
@@ -35,7 +35,10 @@ function testStringify(expected, object) {
// Test fast case that bails out to slow case.
assertEquals(expected, JSON.stringify(object));
// Test slow case.
- assertEquals(expected, JSON.stringify(object, undefined, 0));
+ assertEquals(expected, JSON.stringify(object, (key, value) => value));
+ // Test gap.
+ assertEquals(JSON.stringify(object, null, "="),
+ JSON.stringify(object, (key, value) => value, "="));
}
@@ -67,6 +70,7 @@ testStringify('[1,null]', [1, proxy_fun]);
var parent1a = { b: proxy1 };
testStringify('{"b":{"a":"A","b":"B","c":"C"}}', parent1a);
+testStringify('{"b":{"a":"A","b":"B","c":"C"}}', parent1a);
var parent1b = { a: 123, b: proxy1, c: true };
testStringify('{"a":123,"b":{"a":"A","b":"B","c":"C"},"c":true}', parent1b);
@@ -503,3 +507,56 @@ for (var i in log) assertSame(target, log[i][1]);
assertEquals(["get", target, "length", proxy], log[0]);
assertEquals(["get", target, "0", proxy], log[1]);
assertEquals(["deleteProperty", target, "0"], log[2]);
+
+proxy = new Proxy([], {
+ get: function(target, property) {
+ if (property == "length") return 7;
+ return 0;
+ },
+});
+assertEquals('[[0,0,0,0,0,0,0]]', JSON.stringify([proxy]));
+
+proxy = new Proxy([], {
+ get: function(target, property) {
+ if (property == "length") return 1E40;
+ return 0;
+ },
+});
+assertThrows(() => JSON.stringify([proxy]), RangeError);
+
+log = [];
+proxy = new Proxy({}, {
+ ownKeys: function() {
+ log.push("ownKeys");
+ return ["0", "a", "b"];
+ },
+ get: function(target, property) {
+ log.push("get " + property);
+ return property.toUpperCase();
+ },
+ getOwnPropertyDescriptor: function(target, property) {
+ log.push("descriptor " + property);
+ return {enumerable: true, configurable: true};
+ },
+ isExtensible: assertUnreachable,
+ has: assertUnreachable,
+ getPrototypeOf: assertUnreachable,
+ setPrototypeOf: assertUnreachable,
+ preventExtensions: assertUnreachable,
+ setPrototypeOf: assertUnreachable,
+ defineProperty: assertUnreachable,
+ set: assertUnreachable,
+ deleteProperty: assertUnreachable,
+ apply: assertUnreachable,
+ construct: assertUnreachable,
+});
+
+assertEquals('[{"0":"0","a":"A","b":"B"}]', JSON.stringify([proxy]));
+assertEquals(['get toJSON',
+ 'ownKeys',
+ 'descriptor 0',
+ 'descriptor a',
+ 'descriptor b',
+ 'get 0',
+ 'get a',
+ 'get b'], log);
diff --git a/deps/v8/test/mjsunit/es6/proxies-keys.js b/deps/v8/test/mjsunit/es6/proxies-keys.js
index 7344032aaf..4781ae37f4 100644
--- a/deps/v8/test/mjsunit/es6/proxies-keys.js
+++ b/deps/v8/test/mjsunit/es6/proxies-keys.js
@@ -2,38 +2,82 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var target = {
- target: 1
-};
-target.__proto__ = {
- target_proto: 2
-};
-
-var handler = {
- ownKeys: function(target) {
- return ["foo", "bar", Symbol("baz"), "non-enum", "not-found"];
- },
- getOwnPropertyDescriptor: function(target, name) {
- if (name == "non-enum") return {configurable: true};
- if (name == "not-found") return undefined;
- return {enumerable: true, configurable: true};
+(function testObjectKeys() {
+ var target = {
+ target: 1
+ };
+ target.__proto__ = {
+ target_proto: 2
+ };
+
+ var handler = {
+ ownKeys: function(target) {
+ return ["foo", "bar", Symbol("baz"), "non-enum", "not-found"];
+ },
+ getOwnPropertyDescriptor: function(target, name) {
+ if (name == "non-enum") return {configurable: true};
+ if (name == "not-found") return undefined;
+ return {enumerable: true, configurable: true};
+ }
}
-}
-var proxy = new Proxy(target, handler);
+ var proxy = new Proxy(target, handler);
+
+ // Object.keys() ignores symbols and non-enumerable keys.
+ assertEquals(["foo", "bar"], Object.keys(proxy));
+
+ // Edge case: no properties left after filtering.
+ handler.getOwnPropertyDescriptor = undefined;
+ assertEquals([], Object.keys(proxy));
+
+ // Throwing shouldn't crash.
+ handler.getOwnPropertyDescriptor = function() { throw new Number(1); };
+ assertThrows(() => Object.keys(proxy), Number);
+
+ // Fall through to getOwnPropertyDescriptor if there is no trap.
+ handler.ownKeys = undefined;
+ assertThrows(() => Object.keys(proxy), Number);
+
+ // Fall through to target if there is no trap.
+ handler.getOwnPropertyDescriptor = undefined;
+ assertEquals(["target"], Object.keys(proxy));
+ assertEquals(["target"], Object.keys(target));
-// Object.keys() ignores symbols and non-enumerable keys.
-assertEquals(["foo", "bar"], Object.keys(proxy));
+ var proxy2 = new Proxy(proxy, {});
+ assertEquals(["target"], Object.keys(proxy2));
+})();
-// Edge case: no properties left after filtering.
-handler.getOwnPropertyDescriptor = undefined;
-assertEquals([], Object.keys(proxy));
+(function testForSymbols() {
+ var symbol = Symbol();
+ var p = new Proxy({}, {ownKeys() { return ["1", symbol, "2"] }});
+ assertEquals(["1","2"], Object.getOwnPropertyNames(p));
+ assertEquals([symbol], Object.getOwnPropertySymbols(p));
+})();
-// Throwing shouldn't crash.
-handler.getOwnPropertyDescriptor = function() { throw new Number(1); };
-assertThrows("Object.keys(proxy)", Number);
+(function testNoProxyTraps() {
+ var test_sym = Symbol("sym1");
+ var test_sym2 = Symbol("sym2");
+ var target = {
+ one: 1,
+ two: 2,
+ [test_sym]: 4,
+ 0: 0,
+ };
+ Object.defineProperty(
+ target, "non-enum",
+ { enumerable: false, value: "nope", configurable: true, writable: true });
+ target.__proto__ = {
+ target_proto: 3,
+ 1: 1,
+ [test_sym2]: 5
+ };
+ Object.defineProperty(
+ target.__proto__, "non-enum2",
+ { enumerable: false, value: "nope", configurable: true, writable: true });
+ var proxy = new Proxy(target, {});
-// Fall through to target if there is no trap.
-handler.ownKeys = undefined;
-assertEquals(["target"], Object.keys(proxy));
-assertEquals(["target"], Object.keys(target));
+ assertEquals(["0", "one", "two"], Object.keys(proxy));
+ assertEquals(["0", "one", "two", "non-enum"],
+ Object.getOwnPropertyNames(proxy));
+ assertEquals([test_sym], Object.getOwnPropertySymbols(proxy));
+})();
diff --git a/deps/v8/test/mjsunit/es6/reflect-construct.js b/deps/v8/test/mjsunit/es6/reflect-construct.js
index b37f876e94..4661b4093b 100644
--- a/deps/v8/test/mjsunit/es6/reflect-construct.js
+++ b/deps/v8/test/mjsunit/es6/reflect-construct.js
@@ -1,6 +1,8 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+//
+// Flags: --allow-unsafe-function-constructor
(function testReflectConstructArity() {
diff --git a/deps/v8/test/mjsunit/es6/reflect-define-property.js b/deps/v8/test/mjsunit/es6/reflect-define-property.js
index b19c5aa6ff..8eb3f6580e 100644
--- a/deps/v8/test/mjsunit/es6/reflect-define-property.js
+++ b/deps/v8/test/mjsunit/es6/reflect-define-property.js
@@ -441,53 +441,6 @@ try {
}
-// Test runtime calls to DefineDataPropertyUnchecked and
-// DefineAccessorPropertyUnchecked - make sure we don't
-// crash.
-try {
- %DefineAccessorPropertyUnchecked(0, 0, 0, 0, 0);
-} catch (e) {
- assertTrue(/illegal access/.test(e));
-}
-
-try {
- %DefineDataPropertyUnchecked(0, 0, 0, 0);
-} catch (e) {
- assertTrue(/illegal access/.test(e));
-}
-
-try {
- %DefineDataPropertyUnchecked(null, null, null, null);
-} catch (e) {
- assertTrue(/illegal access/.test(e));
-}
-
-try {
- %DefineAccessorPropertyUnchecked(null, null, null, null, null);
-} catch (e) {
- assertTrue(/illegal access/.test(e));
-}
-
-try {
- %DefineDataPropertyUnchecked({}, null, null, null);
-} catch (e) {
- assertTrue(/illegal access/.test(e));
-}
-
-// Defining properties null should fail even when we have
-// other allowed values
-try {
- %DefineAccessorPropertyUnchecked(null, 'foo', func, null, 0);
-} catch (e) {
- assertTrue(/illegal access/.test(e));
-}
-
-try {
- %DefineDataPropertyUnchecked(null, 'foo', 0, 0);
-} catch (e) {
- assertTrue(/illegal access/.test(e));
-}
-
// Test that all possible differences in step 6 in DefineOwnProperty are
// exercised, i.e., any difference in the given property descriptor and the
// existing properties should not return true, but throw an error if the
diff --git a/deps/v8/test/mjsunit/es6/reflect.js b/deps/v8/test/mjsunit/es6/reflect.js
index ee272b0fc7..d597a78901 100644
--- a/deps/v8/test/mjsunit/es6/reflect.js
+++ b/deps/v8/test/mjsunit/es6/reflect.js
@@ -541,6 +541,13 @@ function prepare(target) {
[s2]: 0, "-1": 0, "88": 0, "aaa": 0 };
assertEquals(["0", "42", "88", "bla", "-1", "aaa", s1, s2],
Reflect.ownKeys(obj));
+ // Force dict-mode elements.
+ delete obj[0];
+ assertEquals(["42", "88", "bla", "-1", "aaa", s1, s2],
+ Reflect.ownKeys(obj));
+ // Force dict-mode properties.
+ delete obj["bla"];
+ assertEquals(["42", "88", "-1", "aaa", s1, s2], Reflect.ownKeys(obj));
})();
diff --git a/deps/v8/test/mjsunit/es6/regexp-constructor.js b/deps/v8/test/mjsunit/es6/regexp-constructor.js
index 559ac00cd0..b685ff2991 100644
--- a/deps/v8/test/mjsunit/es6/regexp-constructor.js
+++ b/deps/v8/test/mjsunit/es6/regexp-constructor.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-regexp-subclass
-
"use strict";
function should_not_be_called() {
diff --git a/deps/v8/test/mjsunit/es6/regexp-flags.js b/deps/v8/test/mjsunit/es6/regexp-flags.js
index 480222d95a..2bcccfa760 100644
--- a/deps/v8/test/mjsunit/es6/regexp-flags.js
+++ b/deps/v8/test/mjsunit/es6/regexp-flags.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-unicode-regexps
-
var r1 = /abc/gi;
assertEquals("abc", r1.source);
assertTrue(r1.global);
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-3750.js b/deps/v8/test/mjsunit/es6/regress/regress-3750.js
deleted file mode 100644
index 10509bff51..0000000000
--- a/deps/v8/test/mjsunit/es6/regress/regress-3750.js
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Flags: --harmony-object-observe
-
-'use strict';
-class Example { }
-Object.observe(Example.prototype, function(){});
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-4482.js b/deps/v8/test/mjsunit/es6/regress/regress-4482.js
index 2472b466ab..d813d21300 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-4482.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-4482.js
@@ -1,8 +1,6 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// Flags: --harmony-sloppy
assertEquals("function", (function f() { f = 42; return typeof f })());
assertEquals("function",
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-468661.js b/deps/v8/test/mjsunit/es6/regress/regress-468661.js
index 4a42350930..4a58a71d30 100644
--- a/deps/v8/test/mjsunit/es6/regress/regress-468661.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-468661.js
@@ -9,7 +9,8 @@ var exception = null;
var break_count = 0;
var expected_values =
- [ReferenceError, undefined, 0, 0, 0, 0, 1, ReferenceError, ReferenceError];
+ [ReferenceError, undefined, 0, 0, 0, 0, 1,
+ ReferenceError, ReferenceError];
function listener(event, exec_state, event_data, data) {
try {
@@ -39,7 +40,6 @@ function listener(event, exec_state, event_data, data) {
assertTrue(v instanceof ReferenceError);
} else {
assertSame(expected_values[break_count], v);
-
}
++break_count;
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-cr372788.js b/deps/v8/test/mjsunit/es6/regress/regress-cr372788.js
index 3144b39830..c157a7e79f 100644
--- a/deps/v8/test/mjsunit/es6/regress/regress-cr372788.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-cr372788.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --promise-extra
+// Flags: --allow-natives-syntax
var x = 0;
var y = 0;
@@ -38,7 +38,7 @@ for (var i = 0; i < 3; ++i) {
assertEquals(0, x);
(function check() {
- Promise.resolve().chain(function() {
+ Promise.resolve().then(function() {
// Delay check until all handlers have run.
if (y < 3) check(); else assertEquals(6, x);
}).catch(function(e) { %AbortJS("FAILURE: " + e) });
diff --git a/deps/v8/test/mjsunit/harmony/species.js b/deps/v8/test/mjsunit/es6/species.js
index da1df4331f..39156a4a2e 100644
--- a/deps/v8/test/mjsunit/harmony/species.js
+++ b/deps/v8/test/mjsunit/es6/species.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-species
-
// Test the ES2015 @@species feature
'use strict';
diff --git a/deps/v8/test/mjsunit/es6/spread-call-new-class.js b/deps/v8/test/mjsunit/es6/spread-call-new-class.js
index 1fdf25b616..de88cff5d1 100644
--- a/deps/v8/test/mjsunit/es6/spread-call-new-class.js
+++ b/deps/v8/test/mjsunit/es6/spread-call-new-class.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-sloppy
-
(function testConstructClassStrict() {
"use strict";
diff --git a/deps/v8/test/mjsunit/es6/spread-call-super-property.js b/deps/v8/test/mjsunit/es6/spread-call-super-property.js
index b298a69aa1..a85ea41638 100644
--- a/deps/v8/test/mjsunit/es6/spread-call-super-property.js
+++ b/deps/v8/test/mjsunit/es6/spread-call-super-property.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-sloppy
-
(function testCallSuperPropertyStrict() {
"use strict";
class BaseClass {
diff --git a/deps/v8/test/mjsunit/harmony/string-match.js b/deps/v8/test/mjsunit/es6/string-match.js
index 25a3ca2fd1..2c7affe454 100644
--- a/deps/v8/test/mjsunit/harmony/string-match.js
+++ b/deps/v8/test/mjsunit/es6/string-match.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-regexp-subclass
-
var pattern = {};
pattern[Symbol.match] = function(string) {
return string.length;
diff --git a/deps/v8/test/mjsunit/harmony/string-replace.js b/deps/v8/test/mjsunit/es6/string-replace.js
index 208c483fd0..0beb57a536 100644
--- a/deps/v8/test/mjsunit/harmony/string-replace.js
+++ b/deps/v8/test/mjsunit/es6/string-replace.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-regexp-subclass
-
var pattern = {
[Symbol.replace]: (string, newValue) => string + newValue
};
diff --git a/deps/v8/test/mjsunit/es6/string-search.js b/deps/v8/test/mjsunit/es6/string-search.js
index dc029826ad..cbdf33d692 100644
--- a/deps/v8/test/mjsunit/es6/string-search.js
+++ b/deps/v8/test/mjsunit/es6/string-search.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-regexp-subclass
-
var pattern = {};
pattern[Symbol.search] = function(string) {
return string.length;
diff --git a/deps/v8/test/mjsunit/harmony/string-split.js b/deps/v8/test/mjsunit/es6/string-split.js
index 1240d84bc1..8ca655cad9 100644
--- a/deps/v8/test/mjsunit/harmony/string-split.js
+++ b/deps/v8/test/mjsunit/es6/string-split.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-regexp-subclass
-
var pattern = {};
var limit = { value: 3 };
pattern[Symbol.split] = function(string, limit) {
diff --git a/deps/v8/test/mjsunit/es6/super.js b/deps/v8/test/mjsunit/es6/super.js
index a2ba1e863b..4c80ce7711 100644
--- a/deps/v8/test/mjsunit/es6/super.js
+++ b/deps/v8/test/mjsunit/es6/super.js
@@ -3,7 +3,6 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax
-// Flags: --harmony-sloppy
(function TestSuperNamedLoads() {
function Base() { }
diff --git a/deps/v8/test/mjsunit/es6/symbols.js b/deps/v8/test/mjsunit/es6/symbols.js
index 9bac41f863..a21afb3770 100644
--- a/deps/v8/test/mjsunit/es6/symbols.js
+++ b/deps/v8/test/mjsunit/es6/symbols.js
@@ -555,7 +555,9 @@ TestContext();
function TestStringify(expected, input) {
assertEquals(expected, JSON.stringify(input));
- assertEquals(expected, JSON.stringify(input, null, 0));
+ assertEquals(expected, JSON.stringify(input, (key, value) => value));
+ assertEquals(JSON.stringify(input, null, "="),
+ JSON.stringify(input, (key, value) => value, "="));
}
TestStringify(undefined, Symbol("a"));
diff --git a/deps/v8/test/mjsunit/es6/tail-call-megatest.js b/deps/v8/test/mjsunit/es6/tail-call-megatest.js
index 1de8ec6c8e..3d2ecb8daa 100644
--- a/deps/v8/test/mjsunit/es6/tail-call-megatest.js
+++ b/deps/v8/test/mjsunit/es6/tail-call-megatest.js
@@ -10,6 +10,7 @@ Error.prepareStackTrace = (error,stack) => {
return error.message + "\n at " + stack.join("\n at ");
}
+var verbose = typeof(arguments) !== "undefined" && arguments.indexOf("-v") >= 0;
function checkStackTrace(expected) {
var e = new Error();
@@ -340,32 +341,32 @@ function run_tests(shard) {
return source;
}
- var f_args_variants = ["", "1", "1, 2"];
- var g_args_variants = ["", "10", "10, 20"];
+ var f_args_variants = [/*"", "1",*/ "1, 2"];
+ var g_args_variants = [/*"", "10",*/ "10, 20"];
var f_inlinable_variants = [true, false];
var g_inlinable_variants = [true, false];
// This is to avoid bailing out because of referencing new.target.
- var check_new_target_variants = [true, false];
+ var check_new_target_variants = [/*true,*/ false];
var deopt_mode_variants = ["none", "f", "g", "test"];
var f_variants = [
f_cfg_sloppy,
f_cfg_strict,
f_cfg_bound,
f_cfg_proxy,
- f_cfg_possibly_eval,
+// f_cfg_possibly_eval,
];
var g_variants = [
g_cfg_normal,
- g_cfg_reflect_apply,
+// g_cfg_reflect_apply,
g_cfg_function_apply,
- g_cfg_function_apply_arguments_object,
+// g_cfg_function_apply_arguments_object,
g_cfg_function_call,
];
var test_warmup_counts = [0, 1, 2];
var iter = 0;
var tests_executed = 0;
- if (shard !== undefined) {
+ if (verbose && shard !== undefined) {
print("Running shard #" + shard);
}
f_variants.forEach((f_cfg) => {
@@ -378,7 +379,9 @@ function run_tests(shard) {
g_inlinable_variants.forEach((g_inlinable) => {
test_warmup_counts.forEach((test_warmup_count) => {
if (shard !== undefined && (iter++) % SHARDS_COUNT != shard) {
- print("skipping...");
+ if (verbose) {
+ print("skipping...");
+ }
return;
}
tests_executed++;
@@ -396,8 +399,10 @@ function run_tests(shard) {
deopt_mode,
};
var source = test_template(cfg);
- print("====================");
- print(source);
+ if (verbose) {
+ // print("====================");
+ // print(source);
+ }
eval(source);
});
});
@@ -408,7 +413,9 @@ function run_tests(shard) {
});
});
});
- print("Number of tests executed: " + tests_executed);
+ if (verbose) {
+ print("Number of tests executed: " + tests_executed);
+ }
}
// Uncomment to run all the tests at once or use shard runners.
diff --git a/deps/v8/test/mjsunit/es6/tail-call.js b/deps/v8/test/mjsunit/es6/tail-call.js
index d0d00f4b3e..6ecf04f3d9 100644
--- a/deps/v8/test/mjsunit/es6/tail-call.js
+++ b/deps/v8/test/mjsunit/es6/tail-call.js
@@ -3,6 +3,8 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --harmony-tailcalls
+// Flags: --harmony-do-expressions
+
"use strict";
Error.prepareStackTrace = (error,stack) => {
@@ -259,9 +261,8 @@ function f_153(expected_call_stack, a) {
}
%NeverOptimizeFunction(g);
- var context = 10;
function f(v) {
- return g(context);
+ return g();
}
%SetForceInlineFlag(f);
@@ -319,10 +320,57 @@ function f_153(expected_call_stack, a) {
return f([f, g3, test], 13), f([f, test], 153);
}
+ function g4(a) {
+ return f([f, g4, test], false) ||
+ (f([f, g4, test], true) && f([f, test], true));
+ }
+
+ function g5(a) {
+ return f([f, g5, test], true) &&
+ (f([f, g5, test], false) || f([f, test], true));
+ }
+
+ function g6(a) {
+ return f([f, g6, test], 13), f([f, g6, test], 42),
+ f([f, test], 153);
+ }
+
+ function g7(a) {
+ return f([f, g7, test], false) ||
+ (f([f, g7, test], false) ? f([f, test], true)
+ : f([f, test], true));
+ }
+
+ function g8(a) {
+ return f([f, g8, test], false) || f([f, g8, test], true) &&
+ f([f, test], true);
+ }
+
+ function g9(a) {
+ return f([f, g9, test], true) && f([f, g9, test], false) ||
+ f([f, test], true);
+ }
+
+ function g10(a) {
+ return f([f, g10, test], true) && f([f, g10, test], false) ||
+ f([f, g10, test], true) ?
+ f([f, g10, test], true) && f([f, g10, test], false) ||
+ f([f, test], true) :
+ f([f, g10, test], true) && f([f, g10, test], false) ||
+ f([f, test], true);
+ }
+
function test() {
assertEquals(true, g1());
assertEquals(true, g2());
assertEquals(153, g3());
+ assertEquals(true, g4());
+ assertEquals(true, g5());
+ assertEquals(153, g6());
+ assertEquals(true, g7());
+ assertEquals(true, g8());
+ assertEquals(true, g9());
+ assertEquals(true, g10());
}
test();
test();
@@ -534,9 +582,34 @@ function f_153(expected_call_stack, a) {
return (() => f_153([f_153, test]))();
}
+ function g3(a) {
+ var closure = () => f([f, closure, test], true)
+ ? f_153([f_153, test])
+ : f_153([f_153, test]);
+ return closure();
+ }
+
function test() {
assertEquals(153, g1());
assertEquals(153, g2());
+ assertEquals(153, g3());
+ }
+ test();
+ test();
+ %OptimizeFunctionOnNextCall(test);
+ test();
+})();
+
+
+// Test tail calls from do expressions.
+(function () {
+ function g1(a) {
+ var a = do { return f_153([f_153, test]); 42; };
+ return a;
+ }
+
+ function test() {
+ assertEquals(153, g1());
}
test();
test();
diff --git a/deps/v8/test/mjsunit/es6/typedarray-set-length-internal.js b/deps/v8/test/mjsunit/es6/typedarray-set-length-internal.js
new file mode 100644
index 0000000000..22b8f67e0e
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/typedarray-set-length-internal.js
@@ -0,0 +1,35 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var typedArrayConstructors = [
+ Uint8Array,
+ Int8Array,
+ Uint16Array,
+ Int16Array,
+ Uint32Array,
+ Int32Array,
+ Uint8ClampedArray,
+ Float32Array,
+ Float64Array
+];
+
+var descriptor = { get: function() { throw new Error("accessed length"); } };
+
+for (var constructor of typedArrayConstructors) {
+ var differentConstructor =
+ constructor === Uint8Array ? Int8Array : Uint8Array;
+ var target = new constructor(16);
+ Object.defineProperty(target, "length", descriptor);
+
+ var sameBuffer = new differentConstructor(target.buffer, 0, 2);
+ Object.defineProperty(sameBuffer, "length", descriptor);
+ target.set(sameBuffer);
+
+ var differentBuffer = new differentConstructor(16);
+ Object.defineProperty(differentBuffer, "length", descriptor);
+ target.set(differentBuffer);
+
+ var array = [0, 1, 2];
+ target.set(array);
+}
diff --git a/deps/v8/test/mjsunit/harmony/typedarray-species.js b/deps/v8/test/mjsunit/es6/typedarray-species.js
index 35a9ea1de7..020d65c501 100644
--- a/deps/v8/test/mjsunit/harmony/typedarray-species.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-species.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-species
-
// Subclasses of %TypedArray% construct themselves under map, etc
var typedArrayConstructors = [
diff --git a/deps/v8/test/mjsunit/es6/typedarray-tostring.js b/deps/v8/test/mjsunit/es6/typedarray-tostring.js
index e6adda0405..9d49cb1cc9 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-tostring.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-tostring.js
@@ -83,4 +83,17 @@ for (var constructor of typedArrayConstructors) {
assertEquals("1,2", Array.prototype.join.call(a5));
assertEquals("1,2,3", Array.prototype.toString.call(a5));
assertEquals("1,2", Array.prototype.toLocaleString.call(a5));
+
+ (function TestToLocaleStringCalls() {
+ let log = [];
+ let pushArgs = (label) => (...args) => log.push(label, args);
+
+ let NumberToLocaleString = Number.prototype.toLocaleString;
+ Number.prototype.toLocaleString = pushArgs("Number");
+
+ (new constructor([1, 2])).toLocaleString();
+ assertEquals(["Number", [], "Number", []], log);
+
+ Number.prototype.toLocaleString = NumberToLocaleString;
+ })();
}
diff --git a/deps/v8/test/mjsunit/es6/typedarray.js b/deps/v8/test/mjsunit/es6/typedarray.js
index 4bdf8226a8..b1bd8937be 100644
--- a/deps/v8/test/mjsunit/es6/typedarray.js
+++ b/deps/v8/test/mjsunit/es6/typedarray.js
@@ -229,6 +229,27 @@ function TestTypedArray(constr, elementSize, typicalElement) {
RangeError);
}
+ var aFromUndef = new constr();
+ assertSame(elementSize, aFromUndef.BYTES_PER_ELEMENT);
+ assertSame(0, aFromUndef.length);
+ assertSame(0*elementSize, aFromUndef.byteLength);
+ assertSame(0, aFromUndef.byteOffset);
+ assertSame(0*elementSize, aFromUndef.buffer.byteLength);
+
+ var aFromNull = new constr(null);
+ assertSame(elementSize, aFromNull.BYTES_PER_ELEMENT);
+ assertSame(0, aFromNull.length);
+ assertSame(0*elementSize, aFromNull.byteLength);
+ assertSame(0, aFromNull.byteOffset);
+ assertSame(0*elementSize, aFromNull.buffer.byteLength);
+
+ var aFromBool = new constr(true);
+ assertSame(elementSize, aFromBool.BYTES_PER_ELEMENT);
+ assertSame(1, aFromBool.length);
+ assertSame(1*elementSize, aFromBool.byteLength);
+ assertSame(0, aFromBool.byteOffset);
+ assertSame(1*elementSize, aFromBool.buffer.byteLength);
+
var aFromString = new constr("30");
assertSame(elementSize, aFromString.BYTES_PER_ELEMENT);
assertSame(30, aFromString.length);
@@ -236,6 +257,8 @@ function TestTypedArray(constr, elementSize, typicalElement) {
assertSame(0, aFromString.byteOffset);
assertSame(30*elementSize, aFromString.buffer.byteLength);
+ assertThrows(function() { new constr(Symbol()); }, TypeError);
+
var jsArray = [];
for (i = 0; i < 30; i++) {
jsArray.push(typicalElement);
diff --git a/deps/v8/test/mjsunit/harmony/unicode-character-ranges.js b/deps/v8/test/mjsunit/es6/unicode-character-ranges.js
index e4f5247c15..f39004fe97 100644
--- a/deps/v8/test/mjsunit/harmony/unicode-character-ranges.js
+++ b/deps/v8/test/mjsunit/es6/unicode-character-ranges.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-unicode-regexps --harmony-regexp-lookbehind
+// Flags: --harmony-regexp-lookbehind
function execl(expectation, regexp, subject) {
if (regexp instanceof String) regexp = new RegExp(regexp, "u");
diff --git a/deps/v8/test/mjsunit/harmony/unicode-escapes-in-regexps.js b/deps/v8/test/mjsunit/es6/unicode-escapes-in-regexps.js
index 7ea6f62990..2d2d11825d 100644
--- a/deps/v8/test/mjsunit/harmony/unicode-escapes-in-regexps.js
+++ b/deps/v8/test/mjsunit/es6/unicode-escapes-in-regexps.js
@@ -4,8 +4,6 @@
// ES6 extends the \uxxxx escape and also allows \u{xxxxx}.
-// Flags: --harmony-unicode-regexps
-
function testRegexpHelper(r) {
assertTrue(r.test("foo"));
assertTrue(r.test("boo"));
diff --git a/deps/v8/test/mjsunit/harmony/unicode-regexp-backrefs.js b/deps/v8/test/mjsunit/es6/unicode-regexp-backrefs.js
index e02301be1e..56b9c5eb8c 100644
--- a/deps/v8/test/mjsunit/harmony/unicode-regexp-backrefs.js
+++ b/deps/v8/test/mjsunit/es6/unicode-regexp-backrefs.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-unicode-regexps --harmony-regexp-lookbehind
+// Flags: --harmony-regexp-lookbehind
// Back reference does not end in the middle of a surrogate pair.
function replace(string) {
diff --git a/deps/v8/test/mjsunit/harmony/unicode-regexp-ignore-case-noi18n.js b/deps/v8/test/mjsunit/es6/unicode-regexp-ignore-case-noi18n.js
index a4cb9dc337..a99894234a 100644
--- a/deps/v8/test/mjsunit/harmony/unicode-regexp-ignore-case-noi18n.js
+++ b/deps/v8/test/mjsunit/es6/unicode-regexp-ignore-case-noi18n.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-unicode-regexps
-
// Non-unicode use toUpperCase mappings.
assertFalse(/[\u00e5]/i.test("\u212b"));
assertFalse(/[\u212b]/i.test("\u00e5\u1234"));
diff --git a/deps/v8/test/mjsunit/harmony/unicode-regexp-ignore-case.js b/deps/v8/test/mjsunit/es6/unicode-regexp-ignore-case.js
index 291b8662ff..dd02ca9d32 100644
--- a/deps/v8/test/mjsunit/harmony/unicode-regexp-ignore-case.js
+++ b/deps/v8/test/mjsunit/es6/unicode-regexp-ignore-case.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-unicode-regexps
-
// Non-unicode use toUpperCase mappings.
assertFalse(/[\u00e5]/i.test("\u212b"));
assertFalse(/[\u212b]/i.test("\u00e5\u1234"));
diff --git a/deps/v8/test/mjsunit/harmony/unicode-regexp-last-index.js b/deps/v8/test/mjsunit/es6/unicode-regexp-last-index.js
index 4a075d4380..67fbac7ef3 100644
--- a/deps/v8/test/mjsunit/harmony/unicode-regexp-last-index.js
+++ b/deps/v8/test/mjsunit/es6/unicode-regexp-last-index.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-unicode-regexps --harmony-regexp-lookbehind
+// Flags: --harmony-regexp-lookbehind
var r = /./ug;
assertEquals(["\ud800\udc00"], r.exec("\ud800\udc00\ud801\udc01"));
diff --git a/deps/v8/test/mjsunit/harmony/unicode-regexp-restricted-syntax.js b/deps/v8/test/mjsunit/es6/unicode-regexp-restricted-syntax.js
index d129cc340e..dd4fa39ab5 100644
--- a/deps/v8/test/mjsunit/harmony/unicode-regexp-restricted-syntax.js
+++ b/deps/v8/test/mjsunit/es6/unicode-regexp-restricted-syntax.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-unicode-regexps
-
// test262/data/test/language/literals/regexp/u-dec-esc
assertThrows("/\\1/u", SyntaxError);
// test262/language/literals/regexp/u-invalid-char-range-a
diff --git a/deps/v8/test/mjsunit/harmony/unicode-regexp-unanchored-advance.js b/deps/v8/test/mjsunit/es6/unicode-regexp-unanchored-advance.js
index 97960e1cd3..c471122baf 100644
--- a/deps/v8/test/mjsunit/harmony/unicode-regexp-unanchored-advance.js
+++ b/deps/v8/test/mjsunit/es6/unicode-regexp-unanchored-advance.js
@@ -2,7 +2,5 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-unicode-regexps
-
var s = "a".repeat(1E7) + "\u1234";
assertEquals(["\u1234", "\u1234"], /(\u1234)/u.exec(s));
diff --git a/deps/v8/test/mjsunit/harmony/unicode-regexp-zero-length.js b/deps/v8/test/mjsunit/es6/unicode-regexp-zero-length.js
index bbc17dc2d5..42bb2d71dc 100644
--- a/deps/v8/test/mjsunit/harmony/unicode-regexp-zero-length.js
+++ b/deps/v8/test/mjsunit/es6/unicode-regexp-zero-length.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-unicode-regexps
-
var L = "\ud800";
var T = "\udc00";
var x = "x";
diff --git a/deps/v8/test/mjsunit/es7/array-includes-receiver.js b/deps/v8/test/mjsunit/es7/array-includes-receiver.js
new file mode 100644
index 0000000000..85915d4958
--- /dev/null
+++ b/deps/v8/test/mjsunit/es7/array-includes-receiver.js
@@ -0,0 +1,634 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Ensure `Array.prototype.includes` functions correctly for numerous elements
+// kinds, and various exotic receiver types,
+
+// TODO(caitp): update kIterCount to a high enough number to trigger inlining,
+// once inlining this builtin is supported
+var kIterCount = 1;
+var kTests = {
+ Array: {
+ FAST_ELEMENTS() {
+ var r = /foo/;
+ var s = new String("bar");
+ var p = new Proxy({}, {});
+ var o = {};
+
+ var array = [r, s, p];
+ assertTrue(%HasFastObjectElements(array));
+ assertFalse(%HasFastHoleyElements(array));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertTrue(array.includes(p));
+ assertFalse(array.includes(o));
+ }
+ },
+
+ FAST_HOLEY_ELEMENTS() {
+ var r = /foo/;
+ var p = new Proxy({}, {});
+ var o = {};
+
+ var array = [r, , p];
+ assertTrue(%HasFastObjectElements(array));
+ assertTrue(%HasFastHoleyElements(array));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertTrue(array.includes(p));
+ assertFalse(array.includes(o));
+ }
+ },
+
+ FAST_SMI_ELEMENTS() {
+ var array = [0, 88, 9999, 1, -5, 7];
+ assertTrue(%HasFastSmiElements(array));
+ assertFalse(%HasFastHoleyElements(array));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertTrue(array.includes(9999));
+ assertTrue(array.includes(-5));
+ assertFalse(array.includes(-5.00001));
+ assertFalse(array.includes(undefined));
+ assertFalse(array.includes(NaN));
+ }
+ },
+
+ FAST_HOLEY_SMI_ELEMENTS() {
+ var array = [49, , , 72, , , 67, -48];
+ assertTrue(%HasFastSmiElements(array));
+ assertTrue(%HasFastHoleyElements(array));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertTrue(array.includes(72));
+ assertTrue(array.includes(-48));
+ assertFalse(array.includes(72, 4));
+ assertTrue(array.includes(undefined));
+ assertFalse(array.includes(undefined, -2));
+ assertFalse(array.includes(NaN));
+ }
+ },
+
+ FAST_DOUBLE_ELEMENTS() {
+ var array = [7.00000001, -13000.89412, 73451.4124,
+ 5824.48, 6.0000495, 48.3488, 44.0, 76.35, NaN, 78.4];
+ assertTrue(%HasFastDoubleElements(array));
+ assertFalse(%HasFastHoleyElements(array));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertTrue(array.includes(7.00000001));
+ assertFalse(array.includes(7.00000001, 2));
+ assertTrue(array.includes(NaN));
+ assertFalse(array.includes(NaN, -1));
+ assertTrue(array.includes(-13000.89412));
+ assertFalse(array.includes(-13000.89412, -2));
+ assertFalse(array.includes(undefined));
+ }
+ },
+
+ FAST_HOLEY_DOUBLE_ELEMENTS() {
+ var array = [7.00000001, -13000.89412, ,
+ 5824.48, , 48.3488, , NaN, , 78.4];
+ assertTrue(%HasFastDoubleElements(array));
+ assertTrue(%HasFastHoleyElements(array));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertTrue(array.includes(7.00000001));
+ assertFalse(array.includes(7.00000001, 2));
+ assertTrue(array.includes(NaN));
+ assertFalse(array.includes(NaN, -2));
+ assertTrue(array.includes(-13000.89412));
+ assertFalse(array.includes(-13000.89412, -2));
+ assertTrue(array.includes(undefined, -2));
+ assertFalse(array.includes(undefined, -1));
+ }
+ },
+
+ DICTIONARY_ELEMENTS() {
+ var array = [];
+ Object.defineProperty(array, 4, { get() { return NaN; } });
+ Object.defineProperty(array, 7, { value: Function });
+
+ assertTrue(%HasDictionaryElements(array));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertTrue(array.includes(NaN));
+ assertFalse(array.includes(NaN, -3));
+ assertTrue(array.includes(Function));
+ assertTrue(array.includes(undefined));
+ assertFalse(array.includes(undefined, 7));
+ }
+ },
+ },
+
+ Object: {
+ FAST_ELEMENTS() {
+ var r = /foo/;
+ var s = new String("bar");
+ var p = new Proxy({}, {});
+ var o = {};
+
+ var object = { 0: r, 1: s, 2: p, length: 3 };
+ assertTrue(%HasFastObjectElements(object));
+ // TODO(caitp): JSObjects always seem to start with FAST_HOLEY_ELEMENTS
+ // assertFalse(%HasFastHoleyElements(object));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertTrue(Array.prototype.includes.call(object, p));
+ assertFalse(Array.prototype.includes.call(object, o));
+ }
+ },
+
+ FAST_HOLEY_ELEMENTS() {
+ var r = /foo/;
+ var p = new Proxy({}, {});
+ var o = {};
+
+ var object = { 0: r, 2: p, length: 3 };
+ assertTrue(%HasFastObjectElements(object));
+ assertTrue(%HasFastHoleyElements(object));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertTrue(Array.prototype.includes.call(object, p));
+ assertFalse(Array.prototype.includes.call(object, o));
+ }
+ },
+
+ FAST_SMI_ELEMENTS() {
+ var object = { 0: 0, 1: 88, 2: 9999, 3: 1, 4: -5, 5: 7, length: 6 };
+ // TODO(caitp): JSObjects always seem to start with FAST_HOLEY_ELEMENTS
+ // assertTrue(%HasFastSmiElements(object));
+ // assertFalse(%HasFastHoleyElements(object));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertTrue(Array.prototype.includes.call(object, 9999));
+ assertTrue(Array.prototype.includes.call(object, -5));
+ assertFalse(Array.prototype.includes.call(object, -5.00001));
+ assertFalse(Array.prototype.includes.call(object, undefined));
+ assertFalse(Array.prototype.includes.call(object, NaN));
+ }
+ },
+
+ FAST_HOLEY_SMI_ELEMENTS() {
+ var object = { 0: 49, 3: 72, 6: 67, 7: -48, length: 8 };
+ // TODO(caitp): JSObjects always seem to start with FAST_HOLEY_ELEMENTS
+ // assertTrue(%HasFastSmiElements(object));
+ // assertTrue(%HasFastHoleyElements(object));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertTrue(Array.prototype.includes.call(object, 72));
+ assertTrue(Array.prototype.includes.call(object, -48));
+ assertFalse(Array.prototype.includes.call(object, 72, 4));
+ assertTrue(Array.prototype.includes.call(object, undefined));
+ assertFalse(Array.prototype.includes.call(object, undefined, -2));
+ assertFalse(Array.prototype.includes.call(object, NaN));
+ }
+ },
+
+ FAST_DOUBLE_ELEMENTS() {
+ var object = { 0: 7.00000001, 1: -13000.89412, 2: 73451.4124,
+ 3: 5824.48, 4: 6.0000495, 5: 48.3488, 6: 44.0, 7: 76.35,
+ 8: NaN, 9: 78.4, length: 10 };
+ // TODO(caitp): JSObjects always seem to start with FAST_HOLEY_ELEMENTS
+ // assertTrue(%HasFastDoubleElements(object));
+ // assertFalse(%HasFastHoleyElements(object));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertTrue(Array.prototype.includes.call(object, 7.00000001));
+ assertFalse(Array.prototype.includes.call(object, 7.00000001, 2));
+ assertTrue(Array.prototype.includes.call(object, NaN));
+ assertFalse(Array.prototype.includes.call(object, NaN, -1));
+ assertTrue(Array.prototype.includes.call(object, -13000.89412));
+ assertFalse(Array.prototype.includes.call(object, -13000.89412, -2));
+ assertFalse(Array.prototype.includes.call(object, undefined));
+ }
+ },
+
+ FAST_HOLEY_DOUBLE_ELEMENTS() {
+ var object = { 0: 7.00000001, 1: -13000.89412, 3: 5824.48, 5: 48.3488,
+ 7: NaN, 9: 78.4, length: 10 };
+ // TODO(caitp): JSObjects always seem to start with FAST_HOLEY_ELEMENTS
+ // assertTrue(%HasFastDoubleElements(object));
+ // assertTrue(%HasFastHoleyElements(object));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertTrue(Array.prototype.includes.call(object, 7.00000001));
+ assertFalse(Array.prototype.includes.call(object, 7.00000001, 2));
+ assertTrue(Array.prototype.includes.call(object, NaN));
+ assertFalse(Array.prototype.includes.call(object, NaN, -2));
+ assertTrue(Array.prototype.includes.call(object, -13000.89412));
+ assertFalse(Array.prototype.includes.call(object, -13000.89412, -2));
+ assertTrue(Array.prototype.includes.call(object, undefined, -2));
+ assertFalse(Array.prototype.includes.call(object, undefined, -1));
+ }
+ },
+
+ DICTIONARY_ELEMENTS() {
+ var object = { length: 8 };
+ Object.defineProperty(object, 4, { get() { return NaN; } });
+ Object.defineProperty(object, 7, { value: Function });
+
+ assertTrue(%HasDictionaryElements(object));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertTrue(Array.prototype.includes.call(object, NaN));
+ assertFalse(Array.prototype.includes.call(object, NaN, -3));
+ assertTrue(Array.prototype.includes.call(object, Function));
+ assertTrue(Array.prototype.includes.call(object, undefined));
+ assertFalse(Array.prototype.includes.call(object, undefined, 7));
+ }
+
+ (function prototypeModifiedDuringAccessor() {
+ function O() {
+ return {
+ __proto__: {},
+ get 0() {
+ this.__proto__.__proto__ = {
+ get 1() {
+ this[2] = "c";
+ return "b";
+ }
+ };
+ return "a";
+ },
+ length: 3
+ };
+ }
+
+ // Switch to slow path when first accessor modifies the prototype
+ assertTrue(Array.prototype.includes.call(O(), "a"));
+ assertTrue(Array.prototype.includes.call(O(), "b"));
+ assertTrue(Array.prototype.includes.call(O(), "c"));
+
+ // Avoid switching to slow path due to avoiding the accessor
+ assertFalse(Array.prototype.includes.call(O(), "c", 2));
+ assertFalse(Array.prototype.includes.call(O(), "b", 1));
+ assertTrue(Array.prototype.includes.call(O(), undefined, 1));
+ });
+ },
+ },
+
+ String: {
+ FAST_STRING_ELEMENTS() {
+ for (var i = 0; i < kIterCount; ++i) {
+ assertTrue(Array.prototype.includes.call("froyo", "y"));
+ assertFalse(Array.prototype.includes.call("froyo", "y", -1));
+ assertTrue(Array.prototype.includes.call("froyo", "y", -2));
+ assertFalse(Array.prototype.includes.call("froyo", NaN));
+ assertFalse(Array.prototype.includes.call("froyo", undefined));
+ }
+ },
+
+ SLOW_STRING_ELEMENTS() {
+ var string = new String("froyo");
+
+ // Never accessible from A.p.includes as 'length' is not configurable
+ Object.defineProperty(string, 34, { value: NaN });
+ Object.defineProperty(string, 12, { get() { return "nope" } });
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertTrue(Array.prototype.includes.call("froyo", "y"));
+ assertFalse(Array.prototype.includes.call("froyo", "y", -1));
+ assertTrue(Array.prototype.includes.call("froyo", "y", -2));
+ assertFalse(Array.prototype.includes.call(string, NaN));
+ assertFalse(Array.prototype.includes.call(string, undefined));
+ assertFalse(Array.prototype.includes.call(string, "nope"));
+ }
+ },
+ },
+
+ Arguments: {
+ FAST_SLOPPY_ARGUMENTS_ELEMENTS() {
+ var args = (function(a, b) { return arguments; })("foo", NaN, "bar");
+ assertTrue(%HasSloppyArgumentsElements(args));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertFalse(Array.prototype.includes.call(args, undefined));
+ assertTrue(Array.prototype.includes.call(args, NaN));
+ assertFalse(Array.prototype.includes.call(args, NaN, -1));
+ assertTrue(Array.prototype.includes.call(args, "bar", -1));
+ }
+ },
+
+ SLOW_SLOPPY_ARGUMENTS_ELEMENTS() {
+ var args = (function(a, a) { return arguments; })("foo", NaN, "bar");
+ Object.defineProperty(args, 3, { get() { return "silver"; } });
+ Object.defineProperty(args, "length", { value: 4 });
+ assertTrue(%HasSloppyArgumentsElements(args));
+
+ for (var i = 0; i < kIterCount; ++i) {
+ assertFalse(Array.prototype.includes.call(args, undefined));
+ assertTrue(Array.prototype.includes.call(args, NaN));
+ assertFalse(Array.prototype.includes.call(args, NaN, -2));
+ assertTrue(Array.prototype.includes.call(args, "bar", -2));
+ assertTrue(Array.prototype.includes.call(args, "silver", -1));
+ }
+ }
+ },
+
+ TypedArray: {
+ Int8Array() {
+ var array = new Int8Array([-129, 128,
+ NaN /* 0 */, +0 /* 0 */, -0 /* 0 */,
+ +Infinity /* 0 */, -Infinity /* 0 */,
+ 255 /* -1 */, 127 /* 127 */, -255 /* 1 */]);
+ assertFalse(Array.prototype.includes.call(array, -129));
+ assertFalse(Array.prototype.includes.call(array, 128));
+
+ assertTrue(Array.prototype.includes.call(array, 0, 2));
+ assertTrue(Array.prototype.includes.call(array, 0, 3));
+ assertTrue(Array.prototype.includes.call(array, 0, 4));
+ assertTrue(Array.prototype.includes.call(array, 0, 5));
+ assertTrue(Array.prototype.includes.call(array, 0, 6));
+ assertFalse(Array.prototype.includes.call(array, 0, 7));
+
+ assertTrue(Array.prototype.includes.call(array, -1, 7));
+ assertFalse(Array.prototype.includes.call(array, -1, 8));
+
+ assertTrue(Array.prototype.includes.call(array, 127, 8));
+ assertFalse(Array.prototype.includes.call(array, 127, 9));
+
+ assertTrue(Array.prototype.includes.call(array, 1, 9));
+ },
+
+ Detached_Int8Array() {
+ var array = new Int8Array(10);
+ %ArrayBufferNeuter(array.buffer);
+ assertFalse(Array.prototype.includes.call(array, 0));
+ assertFalse(Array.prototype.includes.call(array, 0, 10));
+ },
+
+ Uint8Array() {
+ var array = new Uint8Array([-1, 256,
+ NaN /* 0 */, +0 /* 0 */, -0 /* 0 */,
+ +Infinity /* 0 */, -Infinity /* 0 */,
+ 255 /* 255 */, 257 /* 1 */, -128 /* 128 */,
+ -2 /* 254 */]);
+ assertFalse(Array.prototype.includes.call(array, -1));
+ assertFalse(Array.prototype.includes.call(array, 256));
+
+ assertTrue(Array.prototype.includes.call(array, 0, 2));
+ assertTrue(Array.prototype.includes.call(array, 0, 3));
+ assertTrue(Array.prototype.includes.call(array, 0, 4));
+ assertTrue(Array.prototype.includes.call(array, 0, 5));
+ assertTrue(Array.prototype.includes.call(array, 0, 6));
+ assertFalse(Array.prototype.includes.call(array, 0, 7));
+
+ assertTrue(Array.prototype.includes.call(array, 255, 7));
+ assertFalse(Array.prototype.includes.call(array, 255, 8));
+
+ assertTrue(Array.prototype.includes.call(array, 1, 8));
+ assertFalse(Array.prototype.includes.call(array, 1, 9));
+
+ assertTrue(Array.prototype.includes.call(array, 128, 9));
+ assertFalse(Array.prototype.includes.call(array, 128, 10));
+
+ assertTrue(Array.prototype.includes.call(array, 254, 10));
+ },
+
+ Detached_Uint8Array() {
+ var array = new Uint8Array(10);
+ %ArrayBufferNeuter(array.buffer);
+ assertFalse(Array.prototype.includes.call(array, 0));
+ assertFalse(Array.prototype.includes.call(array, 0, 10));
+ },
+
+ Uint8ClampedArray() {
+ var array = new Uint8ClampedArray([-1 /* 0 */, NaN /* 0 */, 256 /* 255 */,
+ 127.6 /* 128 */, 127.4 /* 127 */,
+ 121.5 /* 122 */, 124.5 /* 124 */]);
+ assertFalse(Array.prototype.includes.call(array, -1));
+ assertFalse(Array.prototype.includes.call(array, 256));
+
+ assertTrue(Array.prototype.includes.call(array, 0));
+ assertTrue(Array.prototype.includes.call(array, 0, 1));
+ assertTrue(Array.prototype.includes.call(array, 255, 2));
+
+ assertTrue(Array.prototype.includes.call(array, 128, 3));
+ assertFalse(Array.prototype.includes.call(array, 128, 4));
+
+ assertTrue(Array.prototype.includes.call(array, 127, 4));
+ assertFalse(Array.prototype.includes.call(array, 127, 5));
+
+ assertTrue(Array.prototype.includes.call(array, 122, 5));
+ assertFalse(Array.prototype.includes.call(array, 122, 6));
+
+ assertTrue(Array.prototype.includes.call(array, 124, 6));
+ },
+
+ Detached_Uint8ClampedArray() {
+ var array = new Uint8ClampedArray(10);
+ %ArrayBufferNeuter(array.buffer);
+ assertFalse(Array.prototype.includes.call(array, 0));
+ assertFalse(Array.prototype.includes.call(array, 0, 10));
+ },
+
+ Int16Array() {
+ var array = new Int16Array([-32769, 32768,
+ NaN /* 0 */, Infinity /* 0 */,
+ -Infinity /* 0 */, -0 /* 0 */, +0 /* 0 */,
+ 0x7FFFF /* -1 */, 30000 /* 30000 */,
+ 300000 /* -27680 */]);
+ assertFalse(Array.prototype.includes.call(array, -32769));
+ assertFalse(Array.prototype.includes.call(array, 32768));
+
+ assertTrue(Array.prototype.includes.call(array, 0, 2));
+ assertTrue(Array.prototype.includes.call(array, 0, 3));
+ assertTrue(Array.prototype.includes.call(array, 0, 4));
+ assertTrue(Array.prototype.includes.call(array, 0, 5));
+ assertTrue(Array.prototype.includes.call(array, 0, 6));
+ assertFalse(Array.prototype.includes.call(array, 0, 7));
+
+ assertTrue(Array.prototype.includes.call(array, -1, 7));
+ assertFalse(Array.prototype.includes.call(array, -1, 8));
+
+ assertTrue(Array.prototype.includes.call(array, 30000, 8));
+ assertFalse(Array.prototype.includes.call(array, 30000, 9));
+
+ assertTrue(Array.prototype.includes.call(array, -27680, 9));
+ },
+
+ Detached_Int16Array() {
+ var array = new Int16Array(10);
+ %ArrayBufferNeuter(array.buffer);
+ assertFalse(Array.prototype.includes.call(array, 0));
+ assertFalse(Array.prototype.includes.call(array, 0, 10));
+ },
+
+ Uint16Array() {
+ var array = new Uint16Array([-1, 65536,
+ NaN /* 0 */, Infinity /* 0 */,
+ -Infinity /* 0 */, -0 /* 0 */, +0 /* 0 */,
+ 0x7FFFF /* 65535 */, 300000 /* 37856 */,
+ 3000000 /* 50880 */]);
+ assertFalse(Array.prototype.includes.call(array, -1));
+ assertFalse(Array.prototype.includes.call(array, 65536));
+
+ assertTrue(Array.prototype.includes.call(array, 0, 2));
+ assertTrue(Array.prototype.includes.call(array, 0, 3));
+ assertTrue(Array.prototype.includes.call(array, 0, 4));
+ assertTrue(Array.prototype.includes.call(array, 0, 5));
+ assertTrue(Array.prototype.includes.call(array, 0, 6));
+ assertFalse(Array.prototype.includes.call(array, 0, 7));
+
+ assertTrue(Array.prototype.includes.call(array, 65535, 7));
+ assertFalse(Array.prototype.includes.call(array, 65535, 8));
+
+ assertTrue(Array.prototype.includes.call(array, 37856, 8));
+ assertFalse(Array.prototype.includes.call(array, 37856, 9));
+
+ assertTrue(Array.prototype.includes.call(array, 50880, 9));
+ },
+
+ Detached_Uint16Array() {
+ var array = new Uint16Array(10);
+ %ArrayBufferNeuter(array.buffer);
+ assertFalse(Array.prototype.includes.call(array, 0));
+ assertFalse(Array.prototype.includes.call(array, 0, 10));
+ },
+
+ Int32Array() {
+ var array = new Int32Array([-2147483649, 2147483648,
+ NaN /* 0 */, Infinity /* 0 */,
+ -Infinity /* 0 */, -0 /* 0 */, +0 /* 0 */,
+ 0x7FFFFFFFF /* -1 */, 4294968064 /* 768 */,
+ 4294959447 /* -7849 */]);
+ assertFalse(Array.prototype.includes.call(array, -2147483649));
+ assertFalse(Array.prototype.includes.call(array, 2147483648));
+
+ assertTrue(Array.prototype.includes.call(array, 0.0, 2));
+ assertTrue(Array.prototype.includes.call(array, 0.0, 3));
+ assertTrue(Array.prototype.includes.call(array, 0, 4));
+ assertTrue(Array.prototype.includes.call(array, 0, 5));
+ assertTrue(Array.prototype.includes.call(array, 0.0, 6));
+ assertFalse(Array.prototype.includes.call(array, 0.0, 7));
+
+ assertTrue(Array.prototype.includes.call(array, -1, 7));
+ assertFalse(Array.prototype.includes.call(array, -1, 8));
+
+ assertTrue(Array.prototype.includes.call(array, 768, 8));
+ assertFalse(Array.prototype.includes.call(array, 768, 9));
+
+ assertTrue(Array.prototype.includes.call(array, -7849, 9));
+ },
+
+ Detached_Int32Array() {
+ var array = new Int32Array(10);
+ %ArrayBufferNeuter(array.buffer);
+ assertFalse(Array.prototype.includes.call(array, 0));
+ assertFalse(Array.prototype.includes.call(array, 0, 10));
+ },
+
+ Uint32Array() {
+ var array = new Uint32Array([-1, 4294967296,
+ NaN /* 0 */, Infinity /* 0 */,
+ -Infinity /* 0 */, -0 /* 0 */, +0 /* 0 */,
+ 0x7FFFFFFFF /* 4294967295 */,
+ 4294968064 /* 768 */,
+ 4295079447 /* 112151 */]);
+ assertFalse(Array.prototype.includes.call(array, -1));
+ assertFalse(Array.prototype.includes.call(array, 4294967296));
+
+ assertTrue(Array.prototype.includes.call(array, 0.0, 2));
+ assertTrue(Array.prototype.includes.call(array, 0.0, 3));
+ assertTrue(Array.prototype.includes.call(array, 0, 4));
+ assertTrue(Array.prototype.includes.call(array, 0, 5));
+ assertTrue(Array.prototype.includes.call(array, 0.0, 6));
+ assertFalse(Array.prototype.includes.call(array, 0.0, 7));
+
+ assertTrue(Array.prototype.includes.call(array, 4294967295, 7));
+ assertFalse(Array.prototype.includes.call(array, 4294967295, 8));
+
+ assertTrue(Array.prototype.includes.call(array, 768, 8));
+ assertFalse(Array.prototype.includes.call(array, 768, 9));
+
+ assertTrue(Array.prototype.includes.call(array, 112151, 9));
+ },
+
+ Detached_Uint32Array() {
+ var array = new Uint32Array(10);
+ %ArrayBufferNeuter(array.buffer);
+ assertFalse(Array.prototype.includes.call(array, 0));
+ assertFalse(Array.prototype.includes.call(array, 0, 10));
+ },
+
+ Float32Array() {
+ var array = new Float32Array([-1, 4294967296,
+ NaN, Infinity /* 0 */,
+ -Infinity /* 0 */, -0 /* 0 */, +0 /* 0 */,
+ 0x7FFFFFFFF /* 34359738368.0 */,
+ -4294968064 /* -4294968320.0 */,
+ 4295079447 /* 4295079424.0 */]);
+ assertTrue(Array.prototype.includes.call(array, -1.0));
+ assertTrue(Array.prototype.includes.call(array, 4294967296));
+
+ assertTrue(Array.prototype.includes.call(array, NaN, 2));
+ assertTrue(Array.prototype.includes.call(array, Infinity, 3));
+ assertTrue(Array.prototype.includes.call(array, -Infinity, 4));
+ assertTrue(Array.prototype.includes.call(array, 0, 5));
+ assertTrue(Array.prototype.includes.call(array, 0, 6));
+ assertFalse(Array.prototype.includes.call(array, 0.0, 7));
+
+ assertTrue(Array.prototype.includes.call(array, 34359738368.0, 7));
+ assertFalse(Array.prototype.includes.call(array, 34359738368.0, 8));
+
+ assertTrue(Array.prototype.includes.call(array, -4294968320.0, 8));
+ assertFalse(Array.prototype.includes.call(array, -4294968320.0, 9));
+
+ assertTrue(Array.prototype.includes.call(array, 4295079424.0, 9));
+ },
+
+ Detached_Float32Array() {
+ var array = new Float32Array(10);
+ %ArrayBufferNeuter(array.buffer);
+ assertFalse(Array.prototype.includes.call(array, 0));
+ assertFalse(Array.prototype.includes.call(array, 0, 10));
+ },
+
+ Float64Array() {
+ var array = new Float64Array([-1, 4294967296,
+ NaN, Infinity /* 0 */,
+ -Infinity /* 0 */, -0 /* 0 */, +0 /* 0 */,
+ 0x7FFFFFFFF /* 34359738367.0 */,
+ -4294968064 /* -4294968064.0 */,
+ 4295079447 /* 4295079447.0 */]);
+ assertTrue(Array.prototype.includes.call(array, -1.0));
+ assertTrue(Array.prototype.includes.call(array, 4294967296));
+
+ assertTrue(Array.prototype.includes.call(array, NaN, 2));
+ assertTrue(Array.prototype.includes.call(array, Infinity, 3));
+ assertTrue(Array.prototype.includes.call(array, -Infinity, 4));
+ assertTrue(Array.prototype.includes.call(array, 0, 5));
+ assertTrue(Array.prototype.includes.call(array, 0, 6));
+ assertFalse(Array.prototype.includes.call(array, 0.0, 7));
+
+ assertTrue(Array.prototype.includes.call(array, 34359738367.0, 7));
+ assertFalse(Array.prototype.includes.call(array, 34359738367.0, 8));
+
+ assertTrue(Array.prototype.includes.call(array, -4294968064.0, 8));
+ assertFalse(Array.prototype.includes.call(array, -4294968064.0, 9));
+
+ assertTrue(Array.prototype.includes.call(array, 4295079447.0, 9));
+ },
+
+ Detached_Float64Array() {
+ var array = new Float32Array(10);
+ %ArrayBufferNeuter(array.buffer);
+ assertFalse(Array.prototype.includes.call(array, 0));
+ assertFalse(Array.prototype.includes.call(array, 0, 10));
+ },
+ }
+};
+
+function runSuites(suites) {
+ Object.keys(suites).forEach(suite => runSuite(suites[suite]));
+
+ function runSuite(suite) {
+ Object.keys(suite).forEach(test => suite[test]());
+ }
+}
+
+runSuites(kTests);
diff --git a/deps/v8/test/mjsunit/es7/array-includes.js b/deps/v8/test/mjsunit/es7/array-includes.js
index 303042a4c1..3981797a7c 100644
--- a/deps/v8/test/mjsunit/es7/array-includes.js
+++ b/deps/v8/test/mjsunit/es7/array-includes.js
@@ -673,3 +673,8 @@
assertFalse(Array.prototype.includes.call(new Uint8Array([1, 2, 3]), 4));
assertFalse(Array.prototype.includes.call(new Uint8Array([1, 2, 3]), 2, 2));
})();
+
+
+(function testUnscopable() {
+ assertTrue(Array.prototype[Symbol.unscopables].includes);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/exponentiation-operator.js b/deps/v8/test/mjsunit/es7/exponentiation-operator.js
index 543e1046c9..9d934bdaac 100644
--- a/deps/v8/test/mjsunit/harmony/exponentiation-operator.js
+++ b/deps/v8/test/mjsunit/es7/exponentiation-operator.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-exponentiation-operator
-
function TestBasic() {
assertEquals(-(8 ** 2), -64);
assertEquals(+(8 ** 2), 64);
diff --git a/deps/v8/test/mjsunit/es7/object-observe-runtime.js b/deps/v8/test/mjsunit/es7/object-observe-runtime.js
deleted file mode 100644
index 1a07141af6..0000000000
--- a/deps/v8/test/mjsunit/es7/object-observe-runtime.js
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-object-observe
-// Flags: --allow-natives-syntax
-
-// These tests are meant to ensure that that the Object.observe runtime
-// functions are hardened.
-
-var obj = {};
-%SetIsObserved(obj);
-assertThrows(function() {
- %SetIsObserved(obj);
-});
-
-assertThrows(function() {
- %SetIsObserved(this);
-});
diff --git a/deps/v8/test/mjsunit/es7/object-observe.js b/deps/v8/test/mjsunit/es7/object-observe.js
deleted file mode 100644
index a558c51421..0000000000
--- a/deps/v8/test/mjsunit/es7/object-observe.js
+++ /dev/null
@@ -1,1865 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony-object-observe
-// Flags: --allow-natives-syntax
-
-var allObservers = [];
-function reset() {
- allObservers.forEach(function(observer) { observer.reset(); });
-}
-
-function stringifyNoThrow(arg) {
- try {
- return JSON.stringify(arg);
- } catch (e) {
- return '{<circular reference>}';
- }
-}
-
-function createObserver() {
- "use strict"; // So that |this| in callback can be undefined.
-
- var observer = {
- records: undefined,
- callbackCount: 0,
- reset: function() {
- this.records = undefined;
- this.callbackCount = 0;
- },
- assertNotCalled: function() {
- assertEquals(undefined, this.records);
- assertEquals(0, this.callbackCount);
- },
- assertCalled: function() {
- assertEquals(1, this.callbackCount);
- },
- assertRecordCount: function(count) {
- this.assertCalled();
- assertEquals(count, this.records.length);
- },
- assertCallbackRecords: function(recs) {
- this.assertRecordCount(recs.length);
- for (var i = 0; i < recs.length; i++) {
- if ('name' in recs[i]) recs[i].name = String(recs[i].name);
- print(i, stringifyNoThrow(this.records[i]), stringifyNoThrow(recs[i]));
- assertSame(this.records[i].object, recs[i].object);
- assertEquals('string', typeof recs[i].type);
- assertPropertiesEqual(this.records[i], recs[i]);
- }
- }
- };
-
- observer.callback = function(r) {
- assertEquals(undefined, this);
- assertEquals('object', typeof r);
- assertTrue(r instanceof Array)
- observer.records = r;
- observer.callbackCount++;
- };
-
- observer.reset();
- allObservers.push(observer);
- return observer;
-}
-
-var observer = createObserver();
-var observer2 = createObserver();
-
-assertEquals("function", typeof observer.callback);
-assertEquals("function", typeof observer2.callback);
-
-var obj = {};
-
-function frozenFunction() {}
-Object.freeze(frozenFunction);
-var nonFunction = {};
-var changeRecordWithAccessor = { type: 'foo' };
-var recordCreated = false;
-Object.defineProperty(changeRecordWithAccessor, 'name', {
- get: function() {
- recordCreated = true;
- return "bar";
- },
- enumerable: true
-})
-
-
-// Object.observe
-assertThrows(function() { Object.observe("non-object", observer.callback); },
- TypeError);
-assertThrows(function() { Object.observe(this, observer.callback); },
- TypeError);
-assertThrows(function() { Object.observe(obj, nonFunction); }, TypeError);
-assertThrows(function() { Object.observe(obj, frozenFunction); }, TypeError);
-assertEquals(obj, Object.observe(obj, observer.callback, [1]));
-assertEquals(obj, Object.observe(obj, observer.callback, [true]));
-assertEquals(obj, Object.observe(obj, observer.callback, ['foo', null]));
-assertEquals(obj, Object.observe(obj, observer.callback, [undefined]));
-assertEquals(obj, Object.observe(obj, observer.callback,
- ['foo', 'bar', 'baz']));
-assertEquals(obj, Object.observe(obj, observer.callback, []));
-assertEquals(obj, Object.observe(obj, observer.callback, undefined));
-assertEquals(obj, Object.observe(obj, observer.callback));
-
-// Object.unobserve
-assertThrows(function() { Object.unobserve(4, observer.callback); }, TypeError);
-assertThrows(function() { Object.unobserve(this, observer.callback); },
- TypeError);
-assertThrows(function() { Object.unobserve(obj, nonFunction); }, TypeError);
-assertEquals(obj, Object.unobserve(obj, observer.callback));
-
-
-// Object.getNotifier
-var notifier = Object.getNotifier(obj);
-assertSame(notifier, Object.getNotifier(obj));
-assertEquals(null, Object.getNotifier(Object.freeze({})));
-assertThrows(function() { Object.getNotifier(this) }, TypeError);
-assertFalse(notifier.hasOwnProperty('notify'));
-assertEquals([], Object.keys(notifier));
-var notifyDesc = Object.getOwnPropertyDescriptor(notifier.__proto__, 'notify');
-assertTrue(notifyDesc.configurable);
-assertTrue(notifyDesc.writable);
-assertFalse(notifyDesc.enumerable);
-assertThrows(function() { notifier.notify({}); }, TypeError);
-assertThrows(function() { notifier.notify({ type: 4 }); }, TypeError);
-
-assertThrows(function() { notifier.performChange(1, function(){}); }, TypeError);
-assertThrows(function() { notifier.performChange(undefined, function(){}); }, TypeError);
-assertThrows(function() { notifier.performChange('foo', undefined); }, TypeError);
-assertThrows(function() { notifier.performChange('foo', 'bar'); }, TypeError);
-var global = this;
-notifier.performChange('foo', function() {
- assertEquals(global, this);
-});
-
-var notify = notifier.notify;
-assertThrows(function() { notify.call(undefined, { type: 'a' }); }, TypeError);
-assertThrows(function() { notify.call(null, { type: 'a' }); }, TypeError);
-assertThrows(function() { notify.call(5, { type: 'a' }); }, TypeError);
-assertThrows(function() { notify.call('hello', { type: 'a' }); }, TypeError);
-assertThrows(function() { notify.call(false, { type: 'a' }); }, TypeError);
-assertThrows(function() { notify.call({}, { type: 'a' }); }, TypeError);
-assertFalse(recordCreated);
-notifier.notify(changeRecordWithAccessor);
-assertFalse(recordCreated); // not observed yet
-
-
-// Object.deliverChangeRecords
-assertThrows(function() { Object.deliverChangeRecords(nonFunction); }, TypeError);
-
-Object.observe(obj, observer.callback);
-
-
-// notify uses to [[CreateOwnProperty]] to create changeRecord;
-reset();
-var protoExpandoAccessed = false;
-Object.defineProperty(Object.prototype, 'protoExpando',
- {
- configurable: true,
- set: function() { protoExpandoAccessed = true; }
- }
-);
-notifier.notify({ type: 'foo', protoExpando: 'val'});
-assertFalse(protoExpandoAccessed);
-delete Object.prototype.protoExpando;
-Object.deliverChangeRecords(observer.callback);
-
-
-// Multiple records are delivered.
-reset();
-notifier.notify({
- type: 'update',
- name: 'foo',
- expando: 1
-});
-
-notifier.notify({
- object: notifier, // object property is ignored
- type: 'delete',
- name: 'bar',
- expando2: 'str'
-});
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: obj, name: 'foo', type: 'update', expando: 1 },
- { object: obj, name: 'bar', type: 'delete', expando2: 'str' }
-]);
-
-// Non-string accept values are coerced to strings
-reset();
-Object.observe(obj, observer.callback, [true, 1, null, undefined]);
-notifier = Object.getNotifier(obj);
-notifier.notify({ type: 'true' });
-notifier.notify({ type: 'false' });
-notifier.notify({ type: '1' });
-notifier.notify({ type: '-1' });
-notifier.notify({ type: 'null' });
-notifier.notify({ type: 'nill' });
-notifier.notify({ type: 'undefined' });
-notifier.notify({ type: 'defined' });
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: obj, type: 'true' },
- { object: obj, type: '1' },
- { object: obj, type: 'null' },
- { object: obj, type: 'undefined' }
-]);
-
-// No delivery takes place if no records are pending
-reset();
-Object.deliverChangeRecords(observer.callback);
-observer.assertNotCalled();
-
-
-// Multiple observation has no effect.
-reset();
-Object.observe(obj, observer.callback);
-Object.observe(obj, observer.callback);
-Object.getNotifier(obj).notify({
- type: 'update',
-});
-Object.deliverChangeRecords(observer.callback);
-observer.assertCalled();
-
-
-// Observation can be stopped.
-reset();
-Object.unobserve(obj, observer.callback);
-Object.getNotifier(obj).notify({
- type: 'update',
-});
-Object.deliverChangeRecords(observer.callback);
-observer.assertNotCalled();
-
-
-// Multiple unobservation has no effect
-reset();
-Object.unobserve(obj, observer.callback);
-Object.unobserve(obj, observer.callback);
-Object.getNotifier(obj).notify({
- type: 'update',
-});
-Object.deliverChangeRecords(observer.callback);
-observer.assertNotCalled();
-
-
-// Re-observation works and only includes changeRecords after of call.
-reset();
-Object.getNotifier(obj).notify({
- type: 'update',
-});
-Object.observe(obj, observer.callback);
-Object.getNotifier(obj).notify({
- type: 'update',
-});
-records = undefined;
-Object.deliverChangeRecords(observer.callback);
-observer.assertRecordCount(1);
-
-// Get notifier prior to observing
-reset();
-var obj = {};
-Object.getNotifier(obj);
-Object.observe(obj, observer.callback);
-obj.id = 1;
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: obj, type: 'add', name: 'id' },
-]);
-
-// The empty-string property is observable
-reset();
-var obj = {};
-Object.observe(obj, observer.callback);
-obj[''] = '';
-obj[''] = ' ';
-delete obj[''];
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: obj, type: 'add', name: '' },
- { object: obj, type: 'update', name: '', oldValue: '' },
- { object: obj, type: 'delete', name: '', oldValue: ' ' },
-]);
-
-// Object.preventExtensions
-reset();
-var obj = { foo: 'bar'};
-Object.observe(obj, observer.callback);
-obj.baz = 'bat';
-Object.preventExtensions(obj);
-
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: obj, type: 'add', name: 'baz' },
- { object: obj, type: 'preventExtensions' },
-]);
-
-reset();
-var obj = { foo: 'bar'};
-Object.preventExtensions(obj);
-Object.observe(obj, observer.callback);
-Object.preventExtensions(obj);
-Object.deliverChangeRecords(observer.callback);
-observer.assertNotCalled();
-
-// Object.freeze
-reset();
-var obj = { a: 'a' };
-Object.defineProperty(obj, 'b', {
- writable: false,
- configurable: true,
- value: 'b'
-});
-Object.defineProperty(obj, 'c', {
- writable: true,
- configurable: false,
- value: 'c'
-});
-Object.defineProperty(obj, 'd', {
- writable: false,
- configurable: false,
- value: 'd'
-});
-Object.observe(obj, observer.callback);
-Object.freeze(obj);
-
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: obj, type: 'preventExtensions' },
- { object: obj, type: 'reconfigure', name: 'a' },
- { object: obj, type: 'reconfigure', name: 'b' },
- { object: obj, type: 'reconfigure', name: 'c' },
-]);
-
-reset();
-var obj = { foo: 'bar'};
-Object.freeze(obj);
-Object.observe(obj, observer.callback);
-Object.freeze(obj);
-Object.deliverChangeRecords(observer.callback);
-observer.assertNotCalled();
-
-// Object.seal
-reset();
-var obj = { a: 'a' };
-Object.defineProperty(obj, 'b', {
- writable: false,
- configurable: true,
- value: 'b'
-});
-Object.defineProperty(obj, 'c', {
- writable: true,
- configurable: false,
- value: 'c'
-});
-Object.defineProperty(obj, 'd', {
- writable: false,
- configurable: false,
- value: 'd'
-});
-Object.observe(obj, observer.callback);
-Object.seal(obj);
-
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: obj, type: 'preventExtensions' },
- { object: obj, type: 'reconfigure', name: 'a' },
- { object: obj, type: 'reconfigure', name: 'b' },
-]);
-
-reset();
-var obj = { foo: 'bar'};
-Object.seal(obj);
-Object.observe(obj, observer.callback);
-Object.seal(obj);
-Object.deliverChangeRecords(observer.callback);
-observer.assertNotCalled();
-
-// Observing a continuous stream of changes, while itermittantly unobserving.
-reset();
-var obj = {};
-Object.observe(obj, observer.callback);
-Object.getNotifier(obj).notify({
- type: 'update',
- val: 1
-});
-
-Object.unobserve(obj, observer.callback);
-Object.getNotifier(obj).notify({
- type: 'update',
- val: 2
-});
-
-Object.observe(obj, observer.callback);
-Object.getNotifier(obj).notify({
- type: 'update',
- val: 3
-});
-
-Object.unobserve(obj, observer.callback);
-Object.getNotifier(obj).notify({
- type: 'update',
- val: 4
-});
-
-Object.observe(obj, observer.callback);
-Object.getNotifier(obj).notify({
- type: 'update',
- val: 5
-});
-
-Object.unobserve(obj, observer.callback);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: obj, type: 'update', val: 1 },
- { object: obj, type: 'update', val: 3 },
- { object: obj, type: 'update', val: 5 }
-]);
-
-// Accept
-reset();
-Object.observe(obj, observer.callback, ['somethingElse']);
-Object.getNotifier(obj).notify({
- type: 'add'
-});
-Object.getNotifier(obj).notify({
- type: 'update'
-});
-Object.getNotifier(obj).notify({
- type: 'delete'
-});
-Object.getNotifier(obj).notify({
- type: 'reconfigure'
-});
-Object.getNotifier(obj).notify({
- type: 'setPrototype'
-});
-Object.deliverChangeRecords(observer.callback);
-observer.assertNotCalled();
-
-reset();
-Object.observe(obj, observer.callback, ['add', 'delete', 'setPrototype']);
-Object.getNotifier(obj).notify({
- type: 'add'
-});
-Object.getNotifier(obj).notify({
- type: 'update'
-});
-Object.getNotifier(obj).notify({
- type: 'delete'
-});
-Object.getNotifier(obj).notify({
- type: 'delete'
-});
-Object.getNotifier(obj).notify({
- type: 'reconfigure'
-});
-Object.getNotifier(obj).notify({
- type: 'setPrototype'
-});
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: obj, type: 'add' },
- { object: obj, type: 'delete' },
- { object: obj, type: 'delete' },
- { object: obj, type: 'setPrototype' }
-]);
-
-reset();
-Object.observe(obj, observer.callback, ['update', 'foo']);
-Object.getNotifier(obj).notify({
- type: 'add'
-});
-Object.getNotifier(obj).notify({
- type: 'update'
-});
-Object.getNotifier(obj).notify({
- type: 'delete'
-});
-Object.getNotifier(obj).notify({
- type: 'foo'
-});
-Object.getNotifier(obj).notify({
- type: 'bar'
-});
-Object.getNotifier(obj).notify({
- type: 'foo'
-});
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: obj, type: 'update' },
- { object: obj, type: 'foo' },
- { object: obj, type: 'foo' }
-]);
-
-reset();
-function Thingy(a, b, c) {
- this.a = a;
- this.b = b;
-}
-
-Thingy.MULTIPLY = 'multiply';
-Thingy.INCREMENT = 'increment';
-Thingy.INCREMENT_AND_MULTIPLY = 'incrementAndMultiply';
-
-Thingy.prototype = {
- increment: function(amount) {
- var notifier = Object.getNotifier(this);
-
- var self = this;
- notifier.performChange(Thingy.INCREMENT, function() {
- self.a += amount;
- self.b += amount;
-
- return {
- incremented: amount
- }; // implicit notify
- });
- },
-
- multiply: function(amount) {
- var notifier = Object.getNotifier(this);
-
- var self = this;
- notifier.performChange(Thingy.MULTIPLY, function() {
- self.a *= amount;
- self.b *= amount;
-
- return {
- multiplied: amount
- }; // implicit notify
- });
- },
-
- incrementAndMultiply: function(incAmount, multAmount) {
- var notifier = Object.getNotifier(this);
-
- var self = this;
- notifier.performChange(Thingy.INCREMENT_AND_MULTIPLY, function() {
- self.increment(incAmount);
- self.multiply(multAmount);
-
- return {
- incremented: incAmount,
- multiplied: multAmount
- }; // implicit notify
- });
- }
-}
-
-Thingy.observe = function(thingy, callback) {
- Object.observe(thingy, callback, [Thingy.INCREMENT,
- Thingy.MULTIPLY,
- Thingy.INCREMENT_AND_MULTIPLY,
- 'update']);
-}
-
-Thingy.unobserve = function(thingy, callback) {
- Object.unobserve(thingy);
-}
-
-var thingy = new Thingy(2, 4);
-
-Object.observe(thingy, observer.callback);
-Thingy.observe(thingy, observer2.callback);
-thingy.increment(3); // { a: 5, b: 7 }
-thingy.b++; // { a: 5, b: 8 }
-thingy.multiply(2); // { a: 10, b: 16 }
-thingy.a++; // { a: 11, b: 16 }
-thingy.incrementAndMultiply(2, 2); // { a: 26, b: 36 }
-
-Object.deliverChangeRecords(observer.callback);
-Object.deliverChangeRecords(observer2.callback);
-observer.assertCallbackRecords([
- { object: thingy, type: 'update', name: 'a', oldValue: 2 },
- { object: thingy, type: 'update', name: 'b', oldValue: 4 },
- { object: thingy, type: 'update', name: 'b', oldValue: 7 },
- { object: thingy, type: 'update', name: 'a', oldValue: 5 },
- { object: thingy, type: 'update', name: 'b', oldValue: 8 },
- { object: thingy, type: 'update', name: 'a', oldValue: 10 },
- { object: thingy, type: 'update', name: 'a', oldValue: 11 },
- { object: thingy, type: 'update', name: 'b', oldValue: 16 },
- { object: thingy, type: 'update', name: 'a', oldValue: 13 },
- { object: thingy, type: 'update', name: 'b', oldValue: 18 },
-]);
-observer2.assertCallbackRecords([
- { object: thingy, type: Thingy.INCREMENT, incremented: 3 },
- { object: thingy, type: 'update', name: 'b', oldValue: 7 },
- { object: thingy, type: Thingy.MULTIPLY, multiplied: 2 },
- { object: thingy, type: 'update', name: 'a', oldValue: 10 },
- {
- object: thingy,
- type: Thingy.INCREMENT_AND_MULTIPLY,
- incremented: 2,
- multiplied: 2
- }
-]);
-
-// ArrayPush cached stub
-reset();
-
-function pushMultiple(arr) {
- arr.push('a');
- arr.push('b');
- arr.push('c');
-}
-
-for (var i = 0; i < 5; i++) {
- var arr = [];
- pushMultiple(arr);
-}
-
-for (var i = 0; i < 5; i++) {
- reset();
- var arr = [];
- Object.observe(arr, observer.callback);
- pushMultiple(arr);
- Object.unobserve(arr, observer.callback);
- Object.deliverChangeRecords(observer.callback);
- observer.assertCallbackRecords([
- { object: arr, type: 'add', name: '0' },
- { object: arr, type: 'update', name: 'length', oldValue: 0 },
- { object: arr, type: 'add', name: '1' },
- { object: arr, type: 'update', name: 'length', oldValue: 1 },
- { object: arr, type: 'add', name: '2' },
- { object: arr, type: 'update', name: 'length', oldValue: 2 },
- ]);
-}
-
-
-// ArrayPop cached stub
-reset();
-
-function popMultiple(arr) {
- arr.pop();
- arr.pop();
- arr.pop();
-}
-
-for (var i = 0; i < 5; i++) {
- var arr = ['a', 'b', 'c'];
- popMultiple(arr);
-}
-
-for (var i = 0; i < 5; i++) {
- reset();
- var arr = ['a', 'b', 'c'];
- Object.observe(arr, observer.callback);
- popMultiple(arr);
- Object.unobserve(arr, observer.callback);
- Object.deliverChangeRecords(observer.callback);
- observer.assertCallbackRecords([
- { object: arr, type: 'delete', name: '2', oldValue: 'c' },
- { object: arr, type: 'update', name: 'length', oldValue: 3 },
- { object: arr, type: 'delete', name: '1', oldValue: 'b' },
- { object: arr, type: 'update', name: 'length', oldValue: 2 },
- { object: arr, type: 'delete', name: '0', oldValue: 'a' },
- { object: arr, type: 'update', name: 'length', oldValue: 1 },
- ]);
-}
-
-
-reset();
-function RecursiveThingy() {}
-
-RecursiveThingy.MULTIPLY_FIRST_N = 'multiplyFirstN';
-
-RecursiveThingy.prototype = {
- __proto__: Array.prototype,
-
- multiplyFirstN: function(amount, n) {
- if (!n)
- return;
- var notifier = Object.getNotifier(this);
- var self = this;
- notifier.performChange(RecursiveThingy.MULTIPLY_FIRST_N, function() {
- self[n-1] = self[n-1]*amount;
- self.multiplyFirstN(amount, n-1);
- });
-
- notifier.notify({
- type: RecursiveThingy.MULTIPLY_FIRST_N,
- multiplied: amount,
- n: n
- });
- },
-}
-
-RecursiveThingy.observe = function(thingy, callback) {
- Object.observe(thingy, callback, [RecursiveThingy.MULTIPLY_FIRST_N]);
-}
-
-RecursiveThingy.unobserve = function(thingy, callback) {
- Object.unobserve(thingy);
-}
-
-var thingy = new RecursiveThingy;
-thingy.push(1, 2, 3, 4);
-
-Object.observe(thingy, observer.callback);
-RecursiveThingy.observe(thingy, observer2.callback);
-thingy.multiplyFirstN(2, 3); // [2, 4, 6, 4]
-
-Object.deliverChangeRecords(observer.callback);
-Object.deliverChangeRecords(observer2.callback);
-observer.assertCallbackRecords([
- { object: thingy, type: 'update', name: '2', oldValue: 3 },
- { object: thingy, type: 'update', name: '1', oldValue: 2 },
- { object: thingy, type: 'update', name: '0', oldValue: 1 }
-]);
-observer2.assertCallbackRecords([
- { object: thingy, type: RecursiveThingy.MULTIPLY_FIRST_N, multiplied: 2, n: 3 }
-]);
-
-reset();
-function DeckSuit() {
- this.push('1', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'A', 'Q', 'K');
-}
-
-DeckSuit.SHUFFLE = 'shuffle';
-
-DeckSuit.prototype = {
- __proto__: Array.prototype,
-
- shuffle: function() {
- var notifier = Object.getNotifier(this);
- var self = this;
- notifier.performChange(DeckSuit.SHUFFLE, function() {
- self.reverse();
- self.sort(function() { return Math.random()* 2 - 1; });
- var cut = self.splice(0, 6);
- Array.prototype.push.apply(self, cut);
- self.reverse();
- self.sort(function() { return Math.random()* 2 - 1; });
- var cut = self.splice(0, 6);
- Array.prototype.push.apply(self, cut);
- self.reverse();
- self.sort(function() { return Math.random()* 2 - 1; });
- });
-
- notifier.notify({
- type: DeckSuit.SHUFFLE
- });
- },
-}
-
-DeckSuit.observe = function(thingy, callback) {
- Object.observe(thingy, callback, [DeckSuit.SHUFFLE]);
-}
-
-DeckSuit.unobserve = function(thingy, callback) {
- Object.unobserve(thingy);
-}
-
-var deck = new DeckSuit;
-
-DeckSuit.observe(deck, observer2.callback);
-deck.shuffle();
-
-Object.deliverChangeRecords(observer2.callback);
-observer2.assertCallbackRecords([
- { object: deck, type: DeckSuit.SHUFFLE }
-]);
-
-// Observing multiple objects; records appear in order.
-reset();
-var obj2 = {};
-var obj3 = {}
-Object.observe(obj, observer.callback);
-Object.observe(obj3, observer.callback);
-Object.observe(obj2, observer.callback);
-Object.getNotifier(obj).notify({
- type: 'add',
-});
-Object.getNotifier(obj2).notify({
- type: 'update',
-});
-Object.getNotifier(obj3).notify({
- type: 'delete',
-});
-Object.observe(obj3, observer.callback);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: obj, type: 'add' },
- { object: obj2, type: 'update' },
- { object: obj3, type: 'delete' }
-]);
-
-
-// Recursive observation.
-var obj = {a: 1};
-var callbackCount = 0;
-function recursiveObserver(r) {
- assertEquals(1, r.length);
- ++callbackCount;
- if (r[0].oldValue < 100) ++obj[r[0].name];
-}
-Object.observe(obj, recursiveObserver);
-++obj.a;
-Object.deliverChangeRecords(recursiveObserver);
-assertEquals(100, callbackCount);
-
-var obj1 = {a: 1};
-var obj2 = {a: 1};
-var recordCount = 0;
-function recursiveObserver2(r) {
- recordCount += r.length;
- if (r[0].oldValue < 100) {
- ++obj1.a;
- ++obj2.a;
- }
-}
-Object.observe(obj1, recursiveObserver2);
-Object.observe(obj2, recursiveObserver2);
-++obj1.a;
-Object.deliverChangeRecords(recursiveObserver2);
-assertEquals(199, recordCount);
-
-
-// Observing named properties.
-reset();
-var obj = {a: 1}
-Object.observe(obj, observer.callback);
-obj.a = 2;
-obj["a"] = 3;
-delete obj.a;
-obj.a = 4;
-obj.a = 4; // ignored
-obj.a = 5;
-Object.defineProperty(obj, "a", {value: 6});
-Object.defineProperty(obj, "a", {writable: false});
-obj.a = 7; // ignored
-Object.defineProperty(obj, "a", {value: 8});
-Object.defineProperty(obj, "a", {value: 7, writable: true});
-Object.defineProperty(obj, "a", {get: function() {}});
-Object.defineProperty(obj, "a", {get: frozenFunction});
-Object.defineProperty(obj, "a", {get: frozenFunction}); // ignored
-Object.defineProperty(obj, "a", {get: frozenFunction, set: frozenFunction});
-Object.defineProperty(obj, "a", {set: frozenFunction}); // ignored
-Object.defineProperty(obj, "a", {get: undefined, set: frozenFunction});
-delete obj.a;
-delete obj.a;
-Object.defineProperty(obj, "a", {get: function() {}, configurable: true});
-Object.defineProperty(obj, "a", {value: 9, writable: true});
-obj.a = 10;
-++obj.a;
-obj.a++;
-obj.a *= 3;
-delete obj.a;
-Object.defineProperty(obj, "a", {value: 11, configurable: true});
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: obj, name: "a", type: "update", oldValue: 1 },
- { object: obj, name: "a", type: "update", oldValue: 2 },
- { object: obj, name: "a", type: "delete", oldValue: 3 },
- { object: obj, name: "a", type: "add" },
- { object: obj, name: "a", type: "update", oldValue: 4 },
- { object: obj, name: "a", type: "update", oldValue: 5 },
- { object: obj, name: "a", type: "reconfigure" },
- { object: obj, name: "a", type: "update", oldValue: 6 },
- { object: obj, name: "a", type: "reconfigure", oldValue: 8 },
- { object: obj, name: "a", type: "reconfigure", oldValue: 7 },
- { object: obj, name: "a", type: "reconfigure" },
- { object: obj, name: "a", type: "reconfigure" },
- { object: obj, name: "a", type: "reconfigure" },
- { object: obj, name: "a", type: "delete" },
- { object: obj, name: "a", type: "add" },
- { object: obj, name: "a", type: "reconfigure" },
- { object: obj, name: "a", type: "update", oldValue: 9 },
- { object: obj, name: "a", type: "update", oldValue: 10 },
- { object: obj, name: "a", type: "update", oldValue: 11 },
- { object: obj, name: "a", type: "update", oldValue: 12 },
- { object: obj, name: "a", type: "delete", oldValue: 36 },
- { object: obj, name: "a", type: "add" },
-]);
-
-
-// Observing indexed properties.
-reset();
-var obj = {'1': 1}
-Object.observe(obj, observer.callback);
-obj[1] = 2;
-obj[1] = 3;
-delete obj[1];
-obj[1] = 4;
-obj[1] = 4; // ignored
-obj[1] = 5;
-Object.defineProperty(obj, "1", {value: 6});
-Object.defineProperty(obj, "1", {writable: false});
-obj[1] = 7; // ignored
-Object.defineProperty(obj, "1", {value: 8});
-Object.defineProperty(obj, "1", {value: 7, writable: true});
-Object.defineProperty(obj, "1", {get: function() {}});
-Object.defineProperty(obj, "1", {get: frozenFunction});
-Object.defineProperty(obj, "1", {get: frozenFunction}); // ignored
-Object.defineProperty(obj, "1", {get: frozenFunction, set: frozenFunction});
-Object.defineProperty(obj, "1", {set: frozenFunction}); // ignored
-Object.defineProperty(obj, "1", {get: undefined, set: frozenFunction});
-delete obj[1];
-delete obj[1];
-Object.defineProperty(obj, "1", {get: function() {}, configurable: true});
-Object.defineProperty(obj, "1", {value: 9, writable: true});
-obj[1] = 10;
-++obj[1];
-obj[1]++;
-obj[1] *= 3;
-delete obj[1];
-Object.defineProperty(obj, "1", {value: 11, configurable: true});
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: obj, name: "1", type: "update", oldValue: 1 },
- { object: obj, name: "1", type: "update", oldValue: 2 },
- { object: obj, name: "1", type: "delete", oldValue: 3 },
- { object: obj, name: "1", type: "add" },
- { object: obj, name: "1", type: "update", oldValue: 4 },
- { object: obj, name: "1", type: "update", oldValue: 5 },
- { object: obj, name: "1", type: "reconfigure" },
- { object: obj, name: "1", type: "update", oldValue: 6 },
- { object: obj, name: "1", type: "reconfigure", oldValue: 8 },
- { object: obj, name: "1", type: "reconfigure", oldValue: 7 },
- { object: obj, name: "1", type: "reconfigure" },
- { object: obj, name: "1", type: "reconfigure" },
- { object: obj, name: "1", type: "reconfigure" },
- { object: obj, name: "1", type: "delete" },
- { object: obj, name: "1", type: "add" },
- { object: obj, name: "1", type: "reconfigure" },
- { object: obj, name: "1", type: "update", oldValue: 9 },
- { object: obj, name: "1", type: "update", oldValue: 10 },
- { object: obj, name: "1", type: "update", oldValue: 11 },
- { object: obj, name: "1", type: "update", oldValue: 12 },
- { object: obj, name: "1", type: "delete", oldValue: 36 },
- { object: obj, name: "1", type: "add" },
-]);
-
-
-// Observing symbol properties (not).
-print("*****")
-reset();
-var obj = {}
-var symbol = Symbol("secret");
-Object.observe(obj, observer.callback);
-obj[symbol] = 3;
-delete obj[symbol];
-Object.defineProperty(obj, symbol, {get: function() {}, configurable: true});
-Object.defineProperty(obj, symbol, {value: 6});
-Object.defineProperty(obj, symbol, {writable: false});
-delete obj[symbol];
-Object.defineProperty(obj, symbol, {value: 7});
-++obj[symbol];
-obj[symbol]++;
-obj[symbol] *= 3;
-delete obj[symbol];
-obj.__defineSetter__(symbol, function() {});
-obj.__defineGetter__(symbol, function() {});
-Object.deliverChangeRecords(observer.callback);
-observer.assertNotCalled();
-
-
-// Test all kinds of objects generically.
-function TestObserveConfigurable(obj, prop) {
- reset();
- Object.observe(obj, observer.callback);
- Object.unobserve(obj, observer.callback);
- obj[prop] = 1;
- Object.observe(obj, observer.callback);
- obj[prop] = 2;
- obj[prop] = 3;
- delete obj[prop];
- obj[prop] = 4;
- obj[prop] = 4; // ignored
- obj[prop] = 5;
- Object.defineProperty(obj, prop, {value: 6});
- Object.defineProperty(obj, prop, {writable: false});
- obj[prop] = 7; // ignored
- Object.defineProperty(obj, prop, {value: 8});
- Object.defineProperty(obj, prop, {value: 7, writable: true});
- Object.defineProperty(obj, prop, {get: function() {}});
- Object.defineProperty(obj, prop, {get: frozenFunction});
- Object.defineProperty(obj, prop, {get: frozenFunction}); // ignored
- Object.defineProperty(obj, prop, {get: frozenFunction, set: frozenFunction});
- Object.defineProperty(obj, prop, {set: frozenFunction}); // ignored
- Object.defineProperty(obj, prop, {get: undefined, set: frozenFunction});
- obj.__defineSetter__(prop, frozenFunction); // ignored
- obj.__defineSetter__(prop, function() {});
- obj.__defineGetter__(prop, function() {});
- delete obj[prop];
- delete obj[prop]; // ignored
- obj.__defineGetter__(prop, function() {});
- delete obj[prop];
- Object.defineProperty(obj, prop, {get: function() {}, configurable: true});
- Object.defineProperty(obj, prop, {value: 9, writable: true});
- obj[prop] = 10;
- ++obj[prop];
- obj[prop]++;
- obj[prop] *= 3;
- delete obj[prop];
- Object.defineProperty(obj, prop, {value: 11, configurable: true});
- Object.deliverChangeRecords(observer.callback);
- observer.assertCallbackRecords([
- { object: obj, name: prop, type: "update", oldValue: 1 },
- { object: obj, name: prop, type: "update", oldValue: 2 },
- { object: obj, name: prop, type: "delete", oldValue: 3 },
- { object: obj, name: prop, type: "add" },
- { object: obj, name: prop, type: "update", oldValue: 4 },
- { object: obj, name: prop, type: "update", oldValue: 5 },
- { object: obj, name: prop, type: "reconfigure" },
- { object: obj, name: prop, type: "update", oldValue: 6 },
- { object: obj, name: prop, type: "reconfigure", oldValue: 8 },
- { object: obj, name: prop, type: "reconfigure", oldValue: 7 },
- { object: obj, name: prop, type: "reconfigure" },
- { object: obj, name: prop, type: "reconfigure" },
- { object: obj, name: prop, type: "reconfigure" },
- { object: obj, name: prop, type: "reconfigure" },
- { object: obj, name: prop, type: "reconfigure" },
- { object: obj, name: prop, type: "delete" },
- { object: obj, name: prop, type: "add" },
- { object: obj, name: prop, type: "delete" },
- { object: obj, name: prop, type: "add" },
- { object: obj, name: prop, type: "reconfigure" },
- { object: obj, name: prop, type: "update", oldValue: 9 },
- { object: obj, name: prop, type: "update", oldValue: 10 },
- { object: obj, name: prop, type: "update", oldValue: 11 },
- { object: obj, name: prop, type: "update", oldValue: 12 },
- { object: obj, name: prop, type: "delete", oldValue: 36 },
- { object: obj, name: prop, type: "add" },
- ]);
- Object.unobserve(obj, observer.callback);
- delete obj[prop];
-}
-
-function TestObserveNonConfigurable(obj, prop, desc) {
- reset();
- Object.observe(obj, observer.callback);
- Object.unobserve(obj, observer.callback);
- obj[prop] = 1;
- Object.observe(obj, observer.callback);
- obj[prop] = 4;
- obj[prop] = 4; // ignored
- obj[prop] = 5;
- Object.defineProperty(obj, prop, {value: 6});
- Object.defineProperty(obj, prop, {value: 6}); // ignored
- Object.defineProperty(obj, prop, {value: 7});
- Object.defineProperty(obj, prop, {enumerable: desc.enumerable}); // ignored
- Object.defineProperty(obj, prop, {writable: false});
- obj[prop] = 7; // ignored
- Object.deliverChangeRecords(observer.callback);
- observer.assertCallbackRecords([
- { object: obj, name: prop, type: "update", oldValue: 1 },
- { object: obj, name: prop, type: "update", oldValue: 4 },
- { object: obj, name: prop, type: "update", oldValue: 5 },
- { object: obj, name: prop, type: "update", oldValue: 6 },
- { object: obj, name: prop, type: "reconfigure" },
- ]);
- Object.unobserve(obj, observer.callback);
-}
-
-// TODO(rafaelw) Enable when ES6 Proxies are implemented
-/*
-function createProxy(create, x) {
- var handler = {
- getPropertyDescriptor: function(k) {
- for (var o = this.target; o; o = Object.getPrototypeOf(o)) {
- var desc = Object.getOwnPropertyDescriptor(o, k);
- if (desc) return desc;
- }
- return undefined;
- },
- getOwnPropertyDescriptor: function(k) {
- return Object.getOwnPropertyDescriptor(this.target, k);
- },
- defineProperty: function(k, desc) {
- var x = Object.defineProperty(this.target, k, desc);
- Object.deliverChangeRecords(this.callback);
- return x;
- },
- delete: function(k) {
- var x = delete this.target[k];
- Object.deliverChangeRecords(this.callback);
- return x;
- },
- getPropertyNames: function() {
- return Object.getOwnPropertyNames(this.target);
- },
- target: {isProxy: true},
- callback: function(changeRecords) {
- print("callback", stringifyNoThrow(handler.proxy), stringifyNoThrow(got));
- for (var i in changeRecords) {
- var got = changeRecords[i];
- var change = {object: handler.proxy, name: got.name, type: got.type};
- if ("oldValue" in got) change.oldValue = got.oldValue;
- Object.getNotifier(handler.proxy).notify(change);
- }
- },
- };
- Object.observe(handler.target, handler.callback);
- return handler.proxy = create(handler, x);
-}
-*/
-
-var objects = [
- {},
- [],
- function(){},
- (function(){ return arguments })(),
- (function(){ "use strict"; return arguments })(),
- Object(1), Object(true), Object("bla"),
- new Date(),
- Object, Function, Date, RegExp,
- new Set, new Map, new WeakMap,
- new ArrayBuffer(10), new Int32Array(5)
-// TODO(rafaelw) Enable when ES6 Proxies are implemented.
-// createProxy(Proxy.create, null),
-// createProxy(Proxy.createFunction, function(){}),
-];
-var properties = ["a", "1", 1, "length", "setPrototype", "name", "caller"];
-
-// Cases that yield non-standard results.
-function blacklisted(obj, prop) {
- return (obj instanceof Int32Array && prop == 1) ||
- (obj instanceof Int32Array && prop === "length") ||
- (obj instanceof ArrayBuffer && prop == 1) ||
- (obj instanceof Function && prop === "name") || // Has its own test.
- (obj instanceof Function && prop === "length"); // Has its own test.
-}
-
-for (var i in objects) for (var j in properties) {
- var obj = objects[i];
- var prop = properties[j];
- if (blacklisted(obj, prop)) continue;
- var desc = Object.getOwnPropertyDescriptor(obj, prop);
- print("***", typeof obj, stringifyNoThrow(obj), prop);
- if (!desc || desc.configurable)
- TestObserveConfigurable(obj, prop);
- else if (desc.writable)
- TestObserveNonConfigurable(obj, prop, desc);
-}
-
-
-// Observing array length (including truncation)
-reset();
-var arr = ['a', 'b', 'c', 'd'];
-var arr2 = ['alpha', 'beta'];
-var arr3 = ['hello'];
-arr3[2] = 'goodbye';
-arr3.length = 6;
-Object.defineProperty(arr, '0', {configurable: false});
-Object.defineProperty(arr, '2', {get: function(){}});
-Object.defineProperty(arr2, '0', {get: function(){}, configurable: false});
-Object.observe(arr, observer.callback);
-Array.observe(arr, observer2.callback);
-Object.observe(arr2, observer.callback);
-Array.observe(arr2, observer2.callback);
-Object.observe(arr3, observer.callback);
-Array.observe(arr3, observer2.callback);
-arr.length = 2;
-arr.length = 0;
-arr.length = 10;
-Object.defineProperty(arr, 'length', {writable: false});
-arr2.length = 0;
-arr2.length = 1; // no change expected
-Object.defineProperty(arr2, 'length', {value: 1, writable: false});
-arr3.length = 0;
-++arr3.length;
-arr3.length++;
-arr3.length /= 2;
-Object.defineProperty(arr3, 'length', {value: 5});
-arr3[4] = 5;
-Object.defineProperty(arr3, 'length', {value: 1, writable: false});
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: arr, name: '3', type: 'delete', oldValue: 'd' },
- { object: arr, name: '2', type: 'delete' },
- { object: arr, name: 'length', type: 'update', oldValue: 4 },
- { object: arr, name: '1', type: 'delete', oldValue: 'b' },
- { object: arr, name: 'length', type: 'update', oldValue: 2 },
- { object: arr, name: 'length', type: 'update', oldValue: 1 },
- { object: arr, name: 'length', type: 'reconfigure' },
- { object: arr2, name: '1', type: 'delete', oldValue: 'beta' },
- { object: arr2, name: 'length', type: 'update', oldValue: 2 },
- { object: arr2, name: 'length', type: 'reconfigure' },
- { object: arr3, name: '2', type: 'delete', oldValue: 'goodbye' },
- { object: arr3, name: '0', type: 'delete', oldValue: 'hello' },
- { object: arr3, name: 'length', type: 'update', oldValue: 6 },
- { object: arr3, name: 'length', type: 'update', oldValue: 0 },
- { object: arr3, name: 'length', type: 'update', oldValue: 1 },
- { object: arr3, name: 'length', type: 'update', oldValue: 2 },
- { object: arr3, name: 'length', type: 'update', oldValue: 1 },
- { object: arr3, name: '4', type: 'add' },
- { object: arr3, name: '4', type: 'delete', oldValue: 5 },
- // TODO(rafaelw): It breaks spec compliance to get two records here.
- // When the TODO in v8natives.js::DefineArrayProperty is addressed
- // which prevents DefineProperty from over-writing the magic length
- // property, these will collapse into a single record.
- { object: arr3, name: 'length', type: 'update', oldValue: 5 },
- { object: arr3, name: 'length', type: 'reconfigure' }
-]);
-Object.deliverChangeRecords(observer2.callback);
-observer2.assertCallbackRecords([
- { object: arr, type: 'splice', index: 2, removed: [, 'd'], addedCount: 0 },
- { object: arr, type: 'splice', index: 1, removed: ['b'], addedCount: 0 },
- { object: arr, type: 'splice', index: 1, removed: [], addedCount: 9 },
- { object: arr2, type: 'splice', index: 1, removed: ['beta'], addedCount: 0 },
- { object: arr3, type: 'splice', index: 0, removed: ['hello',, 'goodbye',,,,], addedCount: 0 },
- { object: arr3, type: 'splice', index: 0, removed: [], addedCount: 1 },
- { object: arr3, type: 'splice', index: 1, removed: [], addedCount: 1 },
- { object: arr3, type: 'splice', index: 1, removed: [,], addedCount: 0 },
- { object: arr3, type: 'splice', index: 1, removed: [], addedCount: 4 },
- { object: arr3, name: '4', type: 'add' },
- { object: arr3, type: 'splice', index: 1, removed: [,,,5], addedCount: 0 }
-]);
-
-
-// Updating length on large (slow) array
-reset();
-var slow_arr = %NormalizeElements([]);
-slow_arr[500000000] = 'hello';
-slow_arr.length = 1000000000;
-Object.observe(slow_arr, observer.callback);
-var spliceRecords;
-function slowSpliceCallback(records) {
- spliceRecords = records;
-}
-Array.observe(slow_arr, slowSpliceCallback);
-slow_arr.length = 100;
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: slow_arr, name: '500000000', type: 'delete', oldValue: 'hello' },
- { object: slow_arr, name: 'length', type: 'update', oldValue: 1000000000 },
-]);
-Object.deliverChangeRecords(slowSpliceCallback);
-assertEquals(spliceRecords.length, 1);
-// Have to custom assert this splice record because the removed array is huge.
-var splice = spliceRecords[0];
-assertSame(splice.object, slow_arr);
-assertEquals(splice.type, 'splice');
-assertEquals(splice.index, 100);
-assertEquals(splice.addedCount, 0);
-var array_keys = %GetArrayKeys(splice.removed, splice.removed.length);
-assertEquals(array_keys.length, 1);
-assertEquals(array_keys[0], 499999900);
-assertEquals(splice.removed[499999900], 'hello');
-assertEquals(splice.removed.length, 999999900);
-
-
-// Assignments in loops (checking different IC states).
-reset();
-var obj = {};
-Object.observe(obj, observer.callback);
-for (var i = 0; i < 5; i++) {
- obj["a" + i] = i;
-}
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: obj, name: "a0", type: "add" },
- { object: obj, name: "a1", type: "add" },
- { object: obj, name: "a2", type: "add" },
- { object: obj, name: "a3", type: "add" },
- { object: obj, name: "a4", type: "add" },
-]);
-
-reset();
-var obj = {};
-Object.observe(obj, observer.callback);
-for (var i = 0; i < 5; i++) {
- obj[i] = i;
-}
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: obj, name: "0", type: "add" },
- { object: obj, name: "1", type: "add" },
- { object: obj, name: "2", type: "add" },
- { object: obj, name: "3", type: "add" },
- { object: obj, name: "4", type: "add" },
-]);
-
-
-// Adding elements past the end of an array should notify on length for
-// Object.observe and emit "splices" for Array.observe.
-reset();
-var arr = [1, 2, 3];
-Object.observe(arr, observer.callback);
-Array.observe(arr, observer2.callback);
-arr[3] = 10;
-arr[100] = 20;
-Object.defineProperty(arr, '200', {value: 7});
-Object.defineProperty(arr, '400', {get: function(){}});
-arr[50] = 30; // no length change expected
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: arr, name: '3', type: 'add' },
- { object: arr, name: 'length', type: 'update', oldValue: 3 },
- { object: arr, name: '100', type: 'add' },
- { object: arr, name: 'length', type: 'update', oldValue: 4 },
- { object: arr, name: '200', type: 'add' },
- { object: arr, name: 'length', type: 'update', oldValue: 101 },
- { object: arr, name: '400', type: 'add' },
- { object: arr, name: 'length', type: 'update', oldValue: 201 },
- { object: arr, name: '50', type: 'add' },
-]);
-Object.deliverChangeRecords(observer2.callback);
-observer2.assertCallbackRecords([
- { object: arr, type: 'splice', index: 3, removed: [], addedCount: 1 },
- { object: arr, type: 'splice', index: 4, removed: [], addedCount: 97 },
- { object: arr, type: 'splice', index: 101, removed: [], addedCount: 100 },
- { object: arr, type: 'splice', index: 201, removed: [], addedCount: 200 },
- { object: arr, type: 'add', name: '50' },
-]);
-
-
-// Tests for array methods, first on arrays and then on plain objects
-//
-// === ARRAYS ===
-//
-// Push
-reset();
-var array = [1, 2];
-Object.observe(array, observer.callback);
-Array.observe(array, observer2.callback);
-array.push(3, 4);
-array.push(5);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: array, name: '2', type: 'add' },
- { object: array, name: 'length', type: 'update', oldValue: 2 },
- { object: array, name: '3', type: 'add' },
- { object: array, name: 'length', type: 'update', oldValue: 3 },
- { object: array, name: '4', type: 'add' },
- { object: array, name: 'length', type: 'update', oldValue: 4 },
-]);
-Object.deliverChangeRecords(observer2.callback);
-observer2.assertCallbackRecords([
- { object: array, type: 'splice', index: 2, removed: [], addedCount: 2 },
- { object: array, type: 'splice', index: 4, removed: [], addedCount: 1 }
-]);
-
-// Pop
-reset();
-var array = [1, 2];
-Object.observe(array, observer.callback);
-array.pop();
-array.pop();
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: array, name: '1', type: 'delete', oldValue: 2 },
- { object: array, name: 'length', type: 'update', oldValue: 2 },
- { object: array, name: '0', type: 'delete', oldValue: 1 },
- { object: array, name: 'length', type: 'update', oldValue: 1 },
-]);
-
-// Shift
-reset();
-var array = [1, 2];
-Object.observe(array, observer.callback);
-array.shift();
-array.shift();
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: array, name: '0', type: 'update', oldValue: 1 },
- { object: array, name: '1', type: 'delete', oldValue: 2 },
- { object: array, name: 'length', type: 'update', oldValue: 2 },
- { object: array, name: '0', type: 'delete', oldValue: 2 },
- { object: array, name: 'length', type: 'update', oldValue: 1 },
-]);
-
-// Unshift
-reset();
-var array = [1, 2];
-Object.observe(array, observer.callback);
-array.unshift(3, 4);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: array, name: '3', type: 'add' },
- { object: array, name: 'length', type: 'update', oldValue: 2 },
- { object: array, name: '2', type: 'add' },
- { object: array, name: '0', type: 'update', oldValue: 1 },
- { object: array, name: '1', type: 'update', oldValue: 2 },
-]);
-
-// Splice
-reset();
-var array = [1, 2, 3];
-Object.observe(array, observer.callback);
-array.splice(1, 1, 4, 5);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: array, name: '3', type: 'add' },
- { object: array, name: 'length', type: 'update', oldValue: 3 },
- { object: array, name: '1', type: 'update', oldValue: 2 },
- { object: array, name: '2', type: 'update', oldValue: 3 },
-]);
-
-// Sort
-reset();
-var array = [3, 2, 1];
-Object.observe(array, observer.callback);
-array.sort();
-assertEquals(1, array[0]);
-assertEquals(2, array[1]);
-assertEquals(3, array[2]);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: array, name: '1', type: 'update', oldValue: 2 },
- { object: array, name: '0', type: 'update', oldValue: 3 },
- { object: array, name: '2', type: 'update', oldValue: 1 },
- { object: array, name: '1', type: 'update', oldValue: 3 },
- { object: array, name: '0', type: 'update', oldValue: 2 },
-]);
-
-// Splice emitted after Array mutation methods
-function MockArray(initial, observer) {
- for (var i = 0; i < initial.length; i++)
- this[i] = initial[i];
-
- this.length_ = initial.length;
- this.observer = observer;
-}
-MockArray.prototype = {
- set length(length) {
- Object.getNotifier(this).notify({ type: 'lengthChange' });
- this.length_ = length;
- Object.observe(this, this.observer.callback, ['splice']);
- },
- get length() {
- return this.length_;
- }
-}
-
-reset();
-var array = new MockArray([], observer);
-Object.observe(array, observer.callback, ['lengthChange']);
-Array.prototype.push.call(array, 1);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: array, type: 'lengthChange' },
- { object: array, type: 'splice', index: 0, removed: [], addedCount: 1 },
-]);
-
-reset();
-var array = new MockArray([1], observer);
-Object.observe(array, observer.callback, ['lengthChange']);
-Array.prototype.pop.call(array);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: array, type: 'lengthChange' },
- { object: array, type: 'splice', index: 0, removed: [1], addedCount: 0 },
-]);
-
-reset();
-var array = new MockArray([1], observer);
-Object.observe(array, observer.callback, ['lengthChange']);
-Array.prototype.shift.call(array);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: array, type: 'lengthChange' },
- { object: array, type: 'splice', index: 0, removed: [1], addedCount: 0 },
-]);
-
-reset();
-var array = new MockArray([], observer);
-Object.observe(array, observer.callback, ['lengthChange']);
-Array.prototype.unshift.call(array, 1);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: array, type: 'lengthChange' },
- { object: array, type: 'splice', index: 0, removed: [], addedCount: 1 },
-]);
-
-reset();
-var array = new MockArray([0, 1, 2], observer);
-Object.observe(array, observer.callback, ['lengthChange']);
-Array.prototype.splice.call(array, 1, 1);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: array, type: 'lengthChange' },
- { object: array, type: 'splice', index: 1, removed: [1], addedCount: 0 },
-]);
-
-//
-// === PLAIN OBJECTS ===
-//
-// Push
-reset()
-var array = {0: 1, 1: 2, length: 2}
-Object.observe(array, observer.callback);
-Array.prototype.push.call(array, 3, 4);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: array, name: '2', type: 'add' },
- { object: array, name: '3', type: 'add' },
- { object: array, name: 'length', type: 'update', oldValue: 2 },
-]);
-
-// Pop
-reset();
-var array = [1, 2];
-Object.observe(array, observer.callback);
-Array.observe(array, observer2.callback);
-array.pop();
-array.pop();
-array.pop();
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: array, name: '1', type: 'delete', oldValue: 2 },
- { object: array, name: 'length', type: 'update', oldValue: 2 },
- { object: array, name: '0', type: 'delete', oldValue: 1 },
- { object: array, name: 'length', type: 'update', oldValue: 1 },
-]);
-Object.deliverChangeRecords(observer2.callback);
-observer2.assertCallbackRecords([
- { object: array, type: 'splice', index: 1, removed: [2], addedCount: 0 },
- { object: array, type: 'splice', index: 0, removed: [1], addedCount: 0 }
-]);
-
-// Shift
-reset();
-var array = [1, 2];
-Object.observe(array, observer.callback);
-Array.observe(array, observer2.callback);
-array.shift();
-array.shift();
-array.shift();
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: array, name: '0', type: 'update', oldValue: 1 },
- { object: array, name: '1', type: 'delete', oldValue: 2 },
- { object: array, name: 'length', type: 'update', oldValue: 2 },
- { object: array, name: '0', type: 'delete', oldValue: 2 },
- { object: array, name: 'length', type: 'update', oldValue: 1 },
-]);
-Object.deliverChangeRecords(observer2.callback);
-observer2.assertCallbackRecords([
- { object: array, type: 'splice', index: 0, removed: [1], addedCount: 0 },
- { object: array, type: 'splice', index: 0, removed: [2], addedCount: 0 }
-]);
-
-// Unshift
-reset();
-var array = [1, 2];
-Object.observe(array, observer.callback);
-Array.observe(array, observer2.callback);
-array.unshift(3, 4);
-array.unshift(5);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: array, name: '3', type: 'add' },
- { object: array, name: 'length', type: 'update', oldValue: 2 },
- { object: array, name: '2', type: 'add' },
- { object: array, name: '0', type: 'update', oldValue: 1 },
- { object: array, name: '1', type: 'update', oldValue: 2 },
- { object: array, name: '4', type: 'add' },
- { object: array, name: 'length', type: 'update', oldValue: 4 },
- { object: array, name: '3', type: 'update', oldValue: 2 },
- { object: array, name: '2', type: 'update', oldValue: 1 },
- { object: array, name: '1', type: 'update', oldValue: 4 },
- { object: array, name: '0', type: 'update', oldValue: 3 },
-]);
-Object.deliverChangeRecords(observer2.callback);
-observer2.assertCallbackRecords([
- { object: array, type: 'splice', index: 0, removed: [], addedCount: 2 },
- { object: array, type: 'splice', index: 0, removed: [], addedCount: 1 }
-]);
-
-// Splice
-reset();
-var array = [1, 2, 3];
-Object.observe(array, observer.callback);
-Array.observe(array, observer2.callback);
-array.splice(1, 0, 4, 5); // 1 4 5 2 3
-array.splice(0, 2); // 5 2 3
-array.splice(1, 2, 6, 7); // 5 6 7
-array.splice(2, 0);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: array, name: '4', type: 'add' },
- { object: array, name: 'length', type: 'update', oldValue: 3 },
- { object: array, name: '3', type: 'add' },
- { object: array, name: '1', type: 'update', oldValue: 2 },
- { object: array, name: '2', type: 'update', oldValue: 3 },
-
- { object: array, name: '0', type: 'update', oldValue: 1 },
- { object: array, name: '1', type: 'update', oldValue: 4 },
- { object: array, name: '2', type: 'update', oldValue: 5 },
- { object: array, name: '4', type: 'delete', oldValue: 3 },
- { object: array, name: '3', type: 'delete', oldValue: 2 },
- { object: array, name: 'length', type: 'update', oldValue: 5 },
-
- { object: array, name: '1', type: 'update', oldValue: 2 },
- { object: array, name: '2', type: 'update', oldValue: 3 },
-]);
-Object.deliverChangeRecords(observer2.callback);
-observer2.assertCallbackRecords([
- { object: array, type: 'splice', index: 1, removed: [], addedCount: 2 },
- { object: array, type: 'splice', index: 0, removed: [1, 4], addedCount: 0 },
- { object: array, type: 'splice', index: 1, removed: [2, 3], addedCount: 2 },
-]);
-
-// Exercise StoreIC_ArrayLength
-reset();
-var dummy = {};
-Object.observe(dummy, observer.callback);
-Object.unobserve(dummy, observer.callback);
-var array = [0];
-Object.observe(array, observer.callback);
-array.splice(0, 1);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: array, name: '0', type: 'delete', oldValue: 0 },
- { object: array, name: 'length', type: 'update', oldValue: 1},
-]);
-
-
-// __proto__
-reset();
-var obj = {};
-Object.observe(obj, observer.callback);
-var p = {foo: 'yes'};
-var q = {bar: 'no'};
-obj.__proto__ = p;
-obj.__proto__ = p; // ignored
-obj.__proto__ = null;
-obj.__proto__ = q; // the __proto__ accessor is gone
-// TODO(adamk): Add tests for objects with hidden prototypes
-// once we support observing the global object.
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: obj, name: '__proto__', type: 'setPrototype',
- oldValue: Object.prototype },
- { object: obj, name: '__proto__', type: 'setPrototype', oldValue: p },
- { object: obj, name: '__proto__', type: 'add' },
-]);
-
-
-// Function.prototype
-reset();
-var fun = function(){};
-Object.observe(fun, observer.callback);
-var myproto = {foo: 'bar'};
-fun.prototype = myproto;
-fun.prototype = 7;
-fun.prototype = 7; // ignored
-Object.defineProperty(fun, 'prototype', {value: 8});
-Object.deliverChangeRecords(observer.callback);
-observer.assertRecordCount(3);
-// Manually examine the first record in order to test
-// lazy creation of oldValue
-assertSame(fun, observer.records[0].object);
-assertEquals('prototype', observer.records[0].name);
-assertEquals('update', observer.records[0].type);
-// The only existing reference to the oldValue object is in this
-// record, so to test that lazy creation happened correctly
-// we compare its constructor to our function (one of the invariants
-// ensured when creating an object via AllocateFunctionPrototype).
-assertSame(fun, observer.records[0].oldValue.constructor);
-observer.records.splice(0, 1);
-observer.assertCallbackRecords([
- { object: fun, name: 'prototype', type: 'update', oldValue: myproto },
- { object: fun, name: 'prototype', type: 'update', oldValue: 7 },
-]);
-
-// Function.prototype should not be observable except on the object itself
-reset();
-var fun = function(){};
-var obj = { __proto__: fun };
-Object.observe(obj, observer.callback);
-obj.prototype = 7;
-Object.deliverChangeRecords(observer.callback);
-observer.assertRecordCount(1);
-observer.assertCallbackRecords([
- { object: obj, name: 'prototype', type: 'add' },
-]);
-
-// Check that changes in observation status are detected in all IC states and
-// in optimized code, especially in cases usually using fast elements.
-var mutation = [
- "a[i] = v",
- "a[i] ? ++a[i] : a[i] = v",
- "a[i] ? a[i]++ : a[i] = v",
- "a[i] ? a[i] += 1 : a[i] = v",
- "a[i] ? a[i] -= -1 : a[i] = v",
-];
-
-var props = [1, "1", "a"];
-
-function TestFastElements(prop, mutation, prepopulate, polymorphic, optimize) {
- var setElement = eval(
- "(function setElement(a, i, v) { " + mutation + "; " +
- "/* " + [].join.call(arguments, " ") + " */" +
- "})"
- );
- print("TestFastElements:", setElement);
-
- var arr = prepopulate ? [1, 2, 3, 4, 5] : [0];
- if (prepopulate) arr[prop] = 2; // for non-element case
- setElement(arr, prop, 3);
- setElement(arr, prop, 4);
- if (polymorphic) setElement(["M", "i", "l", "n", "e", "r"], 0, "m");
- if (optimize) %OptimizeFunctionOnNextCall(setElement);
- setElement(arr, prop, 5);
-
- reset();
- Object.observe(arr, observer.callback);
- setElement(arr, prop, 989898);
- Object.deliverChangeRecords(observer.callback);
- observer.assertCallbackRecords([
- { object: arr, name: "" + prop, type: 'update', oldValue: 5 }
- ]);
-}
-
-for (var b1 = 0; b1 < 2; ++b1)
- for (var b2 = 0; b2 < 2; ++b2)
- for (var b3 = 0; b3 < 2; ++b3)
- for (var i in props)
- for (var j in mutation)
- TestFastElements(props[i], mutation[j], b1 != 0, b2 != 0, b3 != 0);
-
-
-var mutation = [
- "a.length = v",
- "a.length += newSize - oldSize",
- "a.length -= oldSize - newSize",
-];
-
-var mutationByIncr = [
- "++a.length",
- "a.length++",
-];
-
-function TestFastElementsLength(
- mutation, polymorphic, optimize, oldSize, newSize) {
- var setLength = eval(
- "(function setLength(a, v) { " + mutation + "; " +
- "/* " + [].join.call(arguments, " ") + " */"
- + "})"
- );
- print("TestFastElementsLength:", setLength);
-
- function array(n) {
- var arr = new Array(n);
- for (var i = 0; i < n; ++i) arr[i] = i;
- return arr;
- }
-
- setLength(array(oldSize), newSize);
- setLength(array(oldSize), newSize);
- if (polymorphic) setLength(array(oldSize).map(isNaN), newSize);
- if (optimize) %OptimizeFunctionOnNextCall(setLength);
- setLength(array(oldSize), newSize);
-
- reset();
- var arr = array(oldSize);
- Object.observe(arr, observer.callback);
- setLength(arr, newSize);
- Object.deliverChangeRecords(observer.callback);
- if (oldSize === newSize) {
- observer.assertNotCalled();
- } else {
- var count = oldSize > newSize ? oldSize - newSize : 0;
- observer.assertRecordCount(count + 1);
- var lengthRecord = observer.records[count];
- assertSame(arr, lengthRecord.object);
- assertEquals('length', lengthRecord.name);
- assertEquals('update', lengthRecord.type);
- assertSame(oldSize, lengthRecord.oldValue);
- }
-}
-
-for (var b1 = 0; b1 < 2; ++b1)
- for (var b2 = 0; b2 < 2; ++b2)
- for (var n1 = 0; n1 < 3; ++n1)
- for (var n2 = 0; n2 < 3; ++n2)
- for (var i in mutation)
- TestFastElementsLength(mutation[i], b1 != 0, b2 != 0, 20*n1, 20*n2);
-
-for (var b1 = 0; b1 < 2; ++b1)
- for (var b2 = 0; b2 < 2; ++b2)
- for (var n = 0; n < 3; ++n)
- for (var i in mutationByIncr)
- TestFastElementsLength(mutationByIncr[i], b1 != 0, b2 != 0, 7*n, 7*n+1);
-
-
-(function TestFunctionName() {
- reset();
-
- function fun() {}
- Object.observe(fun, observer.callback);
- fun.name = 'x'; // No change. Not writable.
- Object.defineProperty(fun, 'name', {value: 'a'});
- Object.defineProperty(fun, 'name', {writable: true});
- fun.name = 'b';
- delete fun.name;
- fun.name = 'x'; // No change. Function.prototype.name is non writable
- Object.defineProperty(Function.prototype, 'name', {writable: true});
- fun.name = 'c';
- fun.name = 'c'; // Same, no update.
- Object.deliverChangeRecords(observer.callback);
- observer.assertCallbackRecords([
- { object: fun, type: 'update', name: 'name', oldValue: 'fun' },
- { object: fun, type: 'reconfigure', name: 'name'},
- { object: fun, type: 'update', name: 'name', oldValue: 'a' },
- { object: fun, type: 'delete', name: 'name', oldValue: 'b' },
- { object: fun, type: 'add', name: 'name' },
- ]);
-})();
-
-
-(function TestFunctionLength() {
- reset();
-
- function fun(x) {}
- Object.observe(fun, observer.callback);
- fun.length = 'x'; // No change. Not writable.
- Object.defineProperty(fun, 'length', {value: 'a'});
- Object.defineProperty(fun, 'length', {writable: true});
- fun.length = 'b';
- delete fun.length;
- fun.length = 'x'; // No change. Function.prototype.length is non writable
- Object.defineProperty(Function.prototype, 'length', {writable: true});
- fun.length = 'c';
- fun.length = 'c'; // Same, no update.
- Object.deliverChangeRecords(observer.callback);
- observer.assertCallbackRecords([
- { object: fun, type: 'update', name: 'length', oldValue: 1 },
- { object: fun, type: 'reconfigure', name: 'length'},
- { object: fun, type: 'update', name: 'length', oldValue: 'a' },
- { object: fun, type: 'delete', name: 'length', oldValue: 'b' },
- { object: fun, type: 'add', name: 'length' },
- ]);
-})();
-
-
-(function TestObserveInvalidAcceptMessage() {
- var ex;
- try {
- Object.observe({}, function(){}, "not an object");
- } catch (e) {
- ex = e;
- }
- assertInstanceof(ex, TypeError);
- assertEquals("Third argument to Object.observe must be an array of strings.",
- ex.message);
-})()
diff --git a/deps/v8/test/mjsunit/es7/regress/regress-443982.js b/deps/v8/test/mjsunit/es7/regress/regress-443982.js
deleted file mode 100644
index e04f14c0c6..0000000000
--- a/deps/v8/test/mjsunit/es7/regress/regress-443982.js
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Flags: --harmony-object-observe
-
-var records;
-function observer(r) {
- records = r;
-}
-
-Object.defineProperty(Array.prototype, '0', {
- get: function() { return 0; },
- set: function() { throw "boom!"; }
-});
-arr = [1, 2];
-Array.observe(arr, observer);
-arr.length = 0;
-assertEquals(0, arr.length);
-
-Object.deliverChangeRecords(observer);
-assertEquals(1, records.length);
-assertEquals('splice', records[0].type);
-assertArrayEquals([1, 2], records[0].removed);
diff --git a/deps/v8/test/mjsunit/es7/regress/regress-633883.js b/deps/v8/test/mjsunit/es7/regress/regress-633883.js
new file mode 100644
index 0000000000..d3a4958da4
--- /dev/null
+++ b/deps/v8/test/mjsunit/es7/regress/regress-633883.js
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+v5 = new Array();
+v17 = encodeURIComponent(v5);
+v19 = isFinite();
+v34 = new Array(v19);
+v47 = v34.includes(v17);
diff --git a/deps/v8/test/mjsunit/es7/regress/regress-634269.js b/deps/v8/test/mjsunit/es7/regress/regress-634269.js
new file mode 100644
index 0000000000..3bd55eec41
--- /dev/null
+++ b/deps/v8/test/mjsunit/es7/regress/regress-634269.js
@@ -0,0 +1,7 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+__v_1 = new Uint8Array();
+Object.defineProperty(__v_1.__proto__, 'length', {value: 42});
+Array.prototype.includes.call(new Uint8Array(), 2);
diff --git a/deps/v8/test/mjsunit/es7/regress/regress-634273.js b/deps/v8/test/mjsunit/es7/regress/regress-634273.js
new file mode 100644
index 0000000000..7ee5b5a5fa
--- /dev/null
+++ b/deps/v8/test/mjsunit/es7/regress/regress-634273.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --enable-slow-asserts
+
+array = new Array(undefined, undefined, undefined);
+Object.defineProperty(array, 0, {
+ get: function() {
+ array.push(undefined, undefined);
+ }
+});
+array[0x80000] = 1;
+result = array.includes(new WeakMap());
diff --git a/deps/v8/test/mjsunit/es7/regress/regress-634357.js b/deps/v8/test/mjsunit/es7/regress/regress-634357.js
new file mode 100644
index 0000000000..709edcbd50
--- /dev/null
+++ b/deps/v8/test/mjsunit/es7/regress/regress-634357.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --enable-slow-asserts
+
+array = new Array({}, {}, {});
+Object.defineProperty(array, 1, {
+ get: function() {
+ array.length = 0;
+ array[0] = -2147483648;
+ }
+});
+result = array.includes(new Array());
diff --git a/deps/v8/test/mjsunit/es8/syntactic-tail-call-parsing-sloppy.js b/deps/v8/test/mjsunit/es8/syntactic-tail-call-parsing-sloppy.js
new file mode 100644
index 0000000000..d02608606d
--- /dev/null
+++ b/deps/v8/test/mjsunit/es8/syntactic-tail-call-parsing-sloppy.js
@@ -0,0 +1,410 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-explicit-tailcalls
+// Flags: --harmony-do-expressions
+
+var SyntaxErrorTests = [
+ { msg: "Unexpected expression inside tail call",
+ tests: [
+ { src: `()=>{ return continue foo ; }`,
+ err: ` ^^^`,
+ },
+ { src: `()=>{ return continue 42 ; }`,
+ err: ` ^^`,
+ },
+ { src: `()=>{ return continue new foo () ; }`,
+ err: ` ^^^^^^^^^^`,
+ },
+ { src: `()=>{ loop: return continue loop ; }`,
+ err: ` ^^^^`,
+ },
+ { src: `class A { foo() { return continue super.x ; } }`,
+ err: ` ^^^^^^^`,
+ },
+ { src: `()=>{ return continue this ; }`,
+ err: ` ^^^^`,
+ },
+ { src: `()=>{ return continue class A {} ; }`,
+ err: ` ^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue class A extends B {} ; }`,
+ err: ` ^^^^^^^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue function A() { } ; }`,
+ err: ` ^^^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue { a: b, c: d} ; }`,
+ err: ` ^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue function* Gen() { yield 1; } ; }`,
+ err: ` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^`,
+ },
+ { src: `function A() { return continue new.target ; }`,
+ err: ` ^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue () ; }`,
+ err: ` ^^`,
+ },
+ { src: `()=>{ return continue ( 42 ) ; }`,
+ err: ` ^^^^^^`,
+ },
+ { src: "()=>{ return continue `123 ${foo} 34lk` ; }",
+ err: ` ^^^^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue do { x ? foo() : bar() ; } }`,
+ err: ` ^^^^^^^^^^^^^^^^^^^^^^^^^^`,
+ },
+ ],
+ },
+ { msg: "Tail call expression is not allowed here",
+ tests: [
+ { src: `class A {}; class B extends A { constructor() { return continue foo () ; } }`,
+ err: ` ^^^^^^^^^^^^^^^`,
+ },
+ { src: `class A extends continue f () {}; }`,
+ err: ` ^^^^^^^^^^^^^`,
+ },
+ ],
+ },
+ { msg: "Tail call expressions are not allowed in non-strict mode",
+ tests: [
+ { src: `()=>{ return continue continue continue b() ; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue ( continue b() ) ; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue f() - a ; }`,
+ err: ` ^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return b + continue f() ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return 1, 2, 3, continue f() , 4 ; }`,
+ err: ` ^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ var x = continue f ( ) ; }`,
+ err: ` ^^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue f () ? 1 : 2 ; }`,
+ err: ` ^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return (1, 2, 3, continue f()), 4; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return [1, 2, continue f() ] ; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return [1, 2, ... continue f() ] ; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return [1, 2, continue f(), 3 ] ; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: "()=>{ return `123 ${a} ${ continue foo ( ) } 34lk` ; }",
+ err: ` ^^^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return g( 1, 2, continue f() ); }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue f() || a; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return a || b || c || continue f() || d; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return a && b && c && continue f() && d; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return a && b || c && continue f() ? d : e; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return a ? b : c && continue f() && d || e; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue foo() instanceof bar ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return bar instanceof continue foo() ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue foo() in bar ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return bar in continue foo() ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ function* G() { yield continue foo(); } }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ (1, 2, 3, continue f() ) => {} }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ (... continue f()) => {} }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ (a, b, c, ... continue f() ) => {} }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return a <= continue f(); }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return b > continue f(); }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return a << continue f(); }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return b >> continue f(); }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return c >>> continue f(); }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue f() = a ; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return a = continue f() ; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return a += continue f(); }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return a ** continue f() ; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return delete continue foo() ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ typeof continue foo() ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return ~ continue foo() ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return void continue foo() ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return !continue foo() ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return -continue foo() ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return +continue foo() ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return ++ continue f( ) ; }`,
+ err: ` ^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue f() ++; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue f() --; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return (continue foo()) () ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ for (var i = continue foo(); i < 10; i++) bar(); }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ for (var i = 0; i < continue foo(); i++) bar(); }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ for (var i = 0; i < 10; continue foo()) bar(); }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ if (continue foo()) bar(); }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ while (continue foo()) bar(); }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ do { smth; } while (continue foo()) ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ throw continue foo() ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ switch (continue foo()) { case 1: break; } ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ with (continue foo()) { smth; } }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ let x = continue foo() }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ const c = continue foo() }`,
+ err: ` ^^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ try { return continue f ( ) ; } catch(e) {} }`,
+ err: ` ^^^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ try { try { smth; } catch(e) { return continue f( ) ; } }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ try { try { smth; } catch(e) { return continue f( ) ; } } finally { bla; } }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ try { smth; } catch(e) { return continue f ( ) ; } finally { blah; } }`,
+ err: ` ^^^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ try { smth; } catch(e) { try { smth; } catch (e) { return continue f ( ) ; } } finally { blah; } }`,
+ err: ` ^^^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ for (var v in {a:0}) { return continue foo () ; } }`,
+ err: ` ^^^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ for (var v of [1, 2, 3]) { return continue foo () ; } }`,
+ err: ` ^^^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue a.b.c.foo () ; }`,
+ err: ` ^^^^^^^^^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue a().b.c().d.foo () ; }`,
+ err: ` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue foo (1)(2)(3, 4) ; }`,
+ err: ` ^^^^^^^^^^^^^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return ( continue b() ) ; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: "()=>{ return continue bar`ab cd ef` ; }",
+ err: ` ^^^^^^^^^^^^^^^^^^^^^^`,
+ },
+ { src: "()=>{ return continue bar`ab ${cd} ef` ; }",
+ err: ` ^^^^^^^^^^^^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return a || continue f() ; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return a && continue f() ; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return a , continue f() ; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ function* G() { return continue foo(); } }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ function B() { return continue new.target() ; } }`,
+ err: ` ^^^^^^^^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue do { x ? foo() : bar() ; }() }`,
+ err: ` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue (do { x ? foo() : bar() ; })() }`,
+ err: ` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return do { 1, continue foo() } }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return do { x ? continue foo() : y } }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return a || (b && continue c()); }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return a && (b || continue c()); }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return a || (b ? c : continue d()); }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return 1, 2, 3, a || (b ? c : continue d()); }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=> continue (foo ()) ;`,
+ err: ` ^^^^^^^^^^^^^^^^^^`,
+ },
+ { src: `()=> a || continue foo () ;`,
+ err: ` ^^^^^^^^^^^^^^^^`,
+ },
+ { src: `()=> a && continue foo () ;`,
+ err: ` ^^^^^^^^^^^^^^^^`,
+ },
+ { src: `()=> a ? continue foo () : b;`,
+ err: ` ^^^^^^^^^^^^^^^^`,
+ },
+ ],
+ },
+ { msg: "Undefined label 'foo'",
+ tests: [
+ { src: `()=>{ continue foo () ; }`,
+ err: ` ^^^`,
+ },
+ ],
+ },
+];
+
+
+// Should parse successfully.
+var NoErrorTests = [
+ `()=>{ class A { foo() { return continue super.f() ; } } }`,
+ `()=>{ class A { foo() { return continue f() ; } } }`,
+ `()=>{ class A { foo() { return a || continue f() ; } } }`,
+ `()=>{ class A { foo() { return b && continue f() ; } } }`,
+];
+
+
+(function() {
+ for (var test_set of SyntaxErrorTests) {
+ var expected_message = "SyntaxError: " + test_set.msg;
+ for (var test of test_set.tests) {
+ var passed = true;
+ var e = null;
+ try {
+ Realm.eval(0, test.src);
+ } catch (ee) {
+ e = ee;
+ }
+ print("=======================================");
+ print("Expected | " + expected_message);
+ print("Source | " + test.src);
+ print(" | " + test.err);
+
+ if (e === null) {
+ print("FAILED");
+ throw new Error("SyntaxError was not thrown");
+ }
+
+ var details = %GetExceptionDetails(e);
+ if (details.start_pos == undefined ||
+ details.end_pos == undefined) {
+ throw new Error("Bad message object returned");
+ }
+ var underline = " ".repeat(details.start_pos) +
+ "^".repeat(details.end_pos - details.start_pos);
+ var passed = expected_message === e.toString() &&
+ test.err === underline;
+
+ if (passed) {
+ print("PASSED");
+ print();
+ } else {
+ print("---------------------------------------");
+ print("Actual | " + e);
+ print("Source | " + test.src);
+ print(" | " + underline);
+ print("FAILED");
+ throw new Error("Test failed");
+ }
+ }
+ }
+})();
+
+
+(function() {
+ for (var src of NoErrorTests) {
+ print("=======================================");
+ print("Source | " + src);
+ Realm.eval(0, src);
+ print("PASSED");
+ print();
+ }
+})();
diff --git a/deps/v8/test/mjsunit/es8/syntactic-tail-call-parsing.js b/deps/v8/test/mjsunit/es8/syntactic-tail-call-parsing.js
new file mode 100644
index 0000000000..486c3e1da6
--- /dev/null
+++ b/deps/v8/test/mjsunit/es8/syntactic-tail-call-parsing.js
@@ -0,0 +1,393 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-explicit-tailcalls
+// Flags: --harmony-do-expressions --harmony-async-await
+"use strict";
+
+var SyntaxErrorTests = [
+ { msg: "Unexpected expression inside tail call",
+ tests: [
+ { src: `()=>{ return continue foo ; }`,
+ err: ` ^^^`,
+ },
+ { src: `()=>{ return continue 42 ; }`,
+ err: ` ^^`,
+ },
+ { src: `()=>{ return continue new foo () ; }`,
+ err: ` ^^^^^^^^^^`,
+ },
+ { src: `()=>{ loop: return continue loop ; }`,
+ err: ` ^^^^`,
+ },
+ { src: `class A { foo() { return continue super.x ; } }`,
+ err: ` ^^^^^^^`,
+ },
+ { src: `()=>{ return continue this ; }`,
+ err: ` ^^^^`,
+ },
+ { src: `()=>{ return continue class A {} ; }`,
+ err: ` ^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue class A extends B {} ; }`,
+ err: ` ^^^^^^^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue function A() { } ; }`,
+ err: ` ^^^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue { a: b, c: d} ; }`,
+ err: ` ^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue function* Gen() { yield 1; } ; }`,
+ err: ` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^`,
+ },
+ { src: `function A() { return continue new.target ; }`,
+ err: ` ^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue () ; }`,
+ err: ` ^^`,
+ },
+ { src: `()=>{ return continue ( 42 ) ; }`,
+ err: ` ^^^^^^`,
+ },
+ { src: "()=>{ return continue `123 ${foo} 34lk` ; }",
+ err: ` ^^^^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue do { x ? foo() : bar() ; } }`,
+ err: ` ^^^^^^^^^^^^^^^^^^^^^^^^^^`,
+ },
+ ],
+ },
+ { msg: "Tail call expression is not allowed here",
+ tests: [
+ { src: `()=>{ return continue continue continue b() ; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue ( continue b() ) ; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue f() - a ; }`,
+ err: ` ^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return b + continue f() ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return 1, 2, 3, continue f() , 4 ; }`,
+ err: ` ^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ var x = continue f ( ) ; }`,
+ err: ` ^^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue f () ? 1 : 2 ; }`,
+ err: ` ^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return (1, 2, 3, continue f()), 4; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return [1, 2, continue f() ] ; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return [1, 2, ... continue f() ] ; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return [1, 2, continue f(), 3 ] ; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: "()=>{ return `123 ${a} ${ continue foo ( ) } 34lk` ; }",
+ err: ` ^^^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return g( 1, 2, continue f() ); }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue f() || a; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return a || b || c || continue f() || d; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return a && b && c && continue f() && d; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return a && b || c && continue f() ? d : e; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return a ? b : c && continue f() && d || e; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue foo() instanceof bar ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return bar instanceof continue foo() ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue foo() in bar ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return bar in continue foo() ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ function* G() { yield continue foo(); } }`,
+ err: ` ^^^^^`,
+ },
+ { src: `()=>{ function* G() { return continue foo(); } }`,
+ err: ` ^^^^^`,
+ },
+ { src: `()=>{ (1, 2, 3, continue f() ) => {} }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ (... continue f()) => {} }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ (a, b, c, ... continue f() ) => {} }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return a <= continue f(); }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return b > continue f(); }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return a << continue f(); }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return b >> continue f(); }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return c >>> continue f(); }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue f() = a ; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return a = continue f() ; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return a += continue f(); }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return a ** continue f() ; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return delete continue foo() ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ typeof continue foo() ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return ~ continue foo() ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return void continue foo() ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return !continue foo() ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return -continue foo() ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return +continue foo() ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return ++ continue f( ) ; }`,
+ err: ` ^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue f() ++; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return continue f() --; }`,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return (continue foo()) () ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ for (var i = continue foo(); i < 10; i++) bar(); }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ for (var i = 0; i < continue foo(); i++) bar(); }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ for (var i = 0; i < 10; continue foo()) bar(); }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ if (continue foo()) bar(); }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ while (continue foo()) bar(); }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ do { smth; } while (continue foo()) ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ throw continue foo() ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ switch (continue foo()) { case 1: break; } ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ let x = continue foo() }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ const c = continue foo() }`,
+ err: ` ^^^^^^^^^^^^^^^`,
+ },
+ { src: `class A {}; class B extends A { constructor() { return continue foo () ; } }`,
+ err: ` ^^^^^^^^^^^^^^^`,
+ },
+ { src: `class A extends continue f () {}; }`,
+ err: ` ^^^^^^^^^^^^^`,
+ },
+ { src: `async() => continue foo()`,
+ err: ` ^^^^^`,
+ },
+ ],
+ },
+ { msg: "Tail call expression in try block",
+ tests: [
+ { src: `()=>{ try { return continue f ( ) ; } catch(e) {} }`,
+ err: ` ^^^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ try { try { smth; } catch(e) { return continue f( ) ; } }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ try { try { smth; } catch(e) { return continue f( ) ; } } finally { bla; } }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ ],
+ },
+ { msg: "Tail call expression in catch block when finally block is also present",
+ tests: [
+ { src: `()=>{ try { smth; } catch(e) { return continue f ( ) ; } finally { blah; } }`,
+ err: ` ^^^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ try { smth; } catch(e) { try { smth; } catch (e) { return continue f ( ) ; } } finally { blah; } }`,
+ err: ` ^^^^^^^^^^^^^^^^`,
+ },
+ ],
+ },
+ { msg: "Tail call expression in for-in/of body",
+ tests: [
+ { src: `()=>{ for (var v in {a:0}) { return continue foo () ; } }`,
+ err: ` ^^^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ for (var v of [1, 2, 3]) { return continue foo () ; } }`,
+ err: ` ^^^^^^^^^^^^^^^^`,
+ },
+ ],
+ },
+ { msg: "Tail call of a direct eval is not allowed",
+ tests: [
+ { src: `()=>{ return continue eval(" foo () " ) ; }`,
+ err: ` ^^^^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return a || continue eval("", 1, 2) ; }`,
+ err: ` ^^^^^^^^^^^^^^`,
+ },
+ { src: `()=>{ return a, continue eval ( ) ; }`,
+ err: ` ^^^^^^^^^`,
+ },
+ { src: `()=> a, continue eval ( ) ; `,
+ err: ` ^^^^^^^^^`,
+ },
+ { src: `()=> a || continue eval (' ' ) ; `,
+ err: ` ^^^^^^^^^^^^`,
+ },
+ ],
+ },
+ { msg: "Undefined label 'foo'",
+ tests: [
+ { src: `()=>{ continue foo () ; }`,
+ err: ` ^^^`,
+ },
+ ],
+ },
+];
+
+
+// Should parse successfully.
+var NoErrorTests = [
+ `()=>{ return continue a.b.c.foo () ; }`,
+ `()=>{ return continue a().b.c().d.foo () ; }`,
+ `()=>{ return continue foo (1)(2)(3, 4) ; }`,
+ `()=>{ return continue (0, eval)(); }`,
+ `()=>{ return ( continue b() ) ; }`,
+ "()=>{ return continue bar`ab cd ef` ; }",
+ "()=>{ return continue bar`ab ${cd} ef` ; }",
+ `()=>{ return a || continue f() ; }`,
+ `()=>{ return a && continue f() ; }`,
+ `()=>{ return a , continue f() ; }`,
+ `()=>{ class A { foo() { return continue super.f() ; } } }`,
+ `()=>{ function B() { return continue new.target() ; } }`,
+ `()=>{ return continue do { x ? foo() : bar() ; }() }`,
+ `()=>{ return continue (do { x ? foo() : bar() ; })() }`,
+ `()=>{ return do { 1, continue foo() } }`,
+ `()=>{ return do { x ? continue foo() : y } }`,
+ `()=>{ return a || (b && continue c()); }`,
+ `()=>{ return a && (b || continue c()); }`,
+ `()=>{ return a || (b ? c : continue d()); }`,
+ `()=>{ return 1, 2, 3, a || (b ? c : continue d()); }`,
+ `()=> continue (foo ()) ;`,
+ `()=> a || continue foo () ;`,
+ `()=> a && continue foo () ;`,
+ `()=> a ? continue foo () : b;`,
+];
+
+
+(function() {
+ for (var test_set of SyntaxErrorTests) {
+ var expected_message = "SyntaxError: " + test_set.msg;
+ for (var test of test_set.tests) {
+ var passed = true;
+ var e = null;
+ try {
+ eval(test.src);
+ } catch (ee) {
+ e = ee;
+ }
+ print("=======================================");
+ print("Expected | " + expected_message);
+ print("Source | " + test.src);
+ print(" | " + test.err);
+
+ if (e === null) {
+ print("FAILED");
+ throw new Error("SyntaxError was not thrown");
+ }
+
+ var details = %GetExceptionDetails(e);
+ if (details.start_pos == undefined ||
+ details.end_pos == undefined) {
+ throw new Error("Bad message object returned");
+ }
+ var underline = " ".repeat(details.start_pos) +
+ "^".repeat(details.end_pos - details.start_pos);
+ var passed = expected_message === e.toString() &&
+ test.err === underline;
+
+ if (passed) {
+ print("PASSED");
+ print();
+ } else {
+ print("---------------------------------------");
+ print("Actual | " + e);
+ print("Source | " + test.src);
+ print(" | " + underline);
+ print("FAILED");
+ throw new Error("Test failed");
+ }
+ }
+ }
+})();
+
+
+(function() {
+ for (var src of NoErrorTests) {
+ print("=======================================");
+ print("Source | " + src);
+ src = `"use strict"; ` + src;
+ Realm.eval(0, src);
+ print("PASSED");
+ print();
+ }
+})();
diff --git a/deps/v8/test/mjsunit/es8/syntactic-tail-call-simple.js b/deps/v8/test/mjsunit/es8/syntactic-tail-call-simple.js
new file mode 100644
index 0000000000..ec7ade6673
--- /dev/null
+++ b/deps/v8/test/mjsunit/es8/syntactic-tail-call-simple.js
@@ -0,0 +1,143 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-explicit-tailcalls --stack-size=100
+
+//
+// Tail calls work only in strict mode.
+//
+(function() {
+ function f(n) {
+ if (n <= 0) {
+ return "foo";
+ }
+ return f(n - 1);
+ }
+ assertThrows(()=>{ f(1e5) });
+ %OptimizeFunctionOnNextCall(f);
+ assertThrows(()=>{ f(1e5) });
+})();
+
+
+//
+// Tail call normal functions.
+//
+(function() {
+ "use strict";
+ function f(n) {
+ if (n <= 0) {
+ return "foo";
+ }
+ return continue f(n - 1);
+ }
+ assertEquals("foo", f(1e5));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals("foo", f(1e5));
+})();
+
+
+(function() {
+ "use strict";
+ function f(n) {
+ if (n <= 0) {
+ return "foo";
+ }
+ return continue f(n - 1, 42); // Call with arguments adaptor.
+ }
+ assertEquals("foo", f(1e5));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals("foo", f(1e5));
+})();
+
+
+(function() {
+ "use strict";
+ function f(n){
+ if (n <= 0) {
+ return "foo";
+ }
+ return continue g(n - 1);
+ }
+ function g(n){
+ if (n <= 0) {
+ return "bar";
+ }
+ return continue f(n - 1);
+ }
+ assertEquals("foo", f(1e5));
+ assertEquals("bar", f(1e5 + 1));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals("foo", f(1e5));
+ assertEquals("bar", f(1e5 + 1));
+})();
+
+
+(function() {
+ "use strict";
+ function f(n){
+ if (n <= 0) {
+ return "foo";
+ }
+ return continue g(n - 1, 42); // Call with arguments adaptor.
+ }
+ function g(n){
+ if (n <= 0) {
+ return "bar";
+ }
+ return continue f(n - 1, 42); // Call with arguments adaptor.
+ }
+ assertEquals("foo", f(1e5));
+ assertEquals("bar", f(1e5 + 1));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals("foo", f(1e5));
+ assertEquals("bar", f(1e5 + 1));
+})();
+
+
+//
+// Tail call bound functions.
+//
+(function() {
+ "use strict";
+ function f0(n) {
+ if (n <= 0) {
+ return "foo";
+ }
+ return continue f_bound(n - 1);
+ }
+ var f_bound = f0.bind({});
+ function f(n) {
+ return continue f_bound(n);
+ }
+ assertEquals("foo", f(1e5));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals("foo", f(1e5));
+})();
+
+
+(function() {
+ "use strict";
+ function f0(n){
+ if (n <= 0) {
+ return "foo";
+ }
+ return continue g_bound(n - 1);
+ }
+ function g0(n){
+ if (n <= 0) {
+ return "bar";
+ }
+ return continue f_bound(n - 1);
+ }
+ var f_bound = f0.bind({});
+ var g_bound = g0.bind({});
+ function f(n) {
+ return continue f_bound(n);
+ }
+ assertEquals("foo", f(1e5));
+ assertEquals("bar", f(1e5 + 1));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals("foo", f(1e5));
+ assertEquals("bar", f(1e5 + 1));
+})();
diff --git a/deps/v8/test/mjsunit/es8/syntactic-tail-call.js b/deps/v8/test/mjsunit/es8/syntactic-tail-call.js
new file mode 100644
index 0000000000..44936a4b22
--- /dev/null
+++ b/deps/v8/test/mjsunit/es8/syntactic-tail-call.js
@@ -0,0 +1,604 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-explicit-tailcalls
+// Flags: --harmony-do-expressions
+
+"use strict";
+
+Error.prepareStackTrace = (error,stack) => {
+ error.strace = stack;
+ return error.message + "\n at " + stack.join("\n at ");
+}
+
+
+function CheckStackTrace(expected) {
+ var e = new Error();
+ e.stack; // prepare stack trace
+ var stack = e.strace;
+ assertEquals("CheckStackTrace", stack[0].getFunctionName());
+ for (var i = 0; i < expected.length; i++) {
+ assertEquals(expected[i].name, stack[i + 1].getFunctionName());
+ }
+}
+%NeverOptimizeFunction(CheckStackTrace);
+
+
+function f(expected_call_stack, a, b) {
+ CheckStackTrace(expected_call_stack);
+ return a;
+}
+
+function f_153(expected_call_stack, a) {
+ CheckStackTrace(expected_call_stack);
+ return 153;
+}
+
+
+// Tail call when caller does not have an arguments adaptor frame.
+(function() {
+ // Caller and callee have same number of arguments.
+ function f1(a) {
+ CheckStackTrace([f1, test]);
+ return 10 + a;
+ }
+ function g1(a) { return continue f1(2); }
+
+ // Caller has more arguments than callee.
+ function f2(a) {
+ CheckStackTrace([f2, test]);
+ return 10 + a;
+ }
+ function g2(a, b, c) { return continue f2(2); }
+
+ // Caller has less arguments than callee.
+ function f3(a, b, c) {
+ CheckStackTrace([f3, test]);
+ return 10 + a + b + c;
+ }
+ function g3(a) { return continue f3(2, 3, 4); }
+
+ // Callee has arguments adaptor frame.
+ function f4(a, b, c) {
+ CheckStackTrace([f4, test]);
+ return 10 + a;
+ }
+ function g4(a) { return continue f4(2); }
+
+ function test() {
+ assertEquals(12, g1(1));
+ assertEquals(12, g2(1, 2, 3));
+ assertEquals(19, g3(1));
+ assertEquals(12, g4(1));
+ }
+ test();
+ test();
+ %OptimizeFunctionOnNextCall(test);
+ test();
+})();
+
+
+// Tail call when caller has an arguments adaptor frame.
+(function() {
+ // Caller and callee have same number of arguments.
+ function f1(a) {
+ CheckStackTrace([f1, test]);
+ return 10 + a;
+ }
+ function g1(a) { return continue f1(2); }
+
+ // Caller has more arguments than callee.
+ function f2(a) {
+ CheckStackTrace([f2, test]);
+ return 10 + a;
+ }
+ function g2(a, b, c) { return continue f2(2); }
+
+ // Caller has less arguments than callee.
+ function f3(a, b, c) {
+ CheckStackTrace([f3, test]);
+ return 10 + a + b + c;
+ }
+ function g3(a) { return continue f3(2, 3, 4); }
+
+ // Callee has arguments adaptor frame.
+ function f4(a, b, c) {
+ CheckStackTrace([f4, test]);
+ return 10 + a;
+ }
+ function g4(a) { return continue f4(2); }
+
+ function test() {
+ assertEquals(12, g1());
+ assertEquals(12, g2());
+ assertEquals(19, g3());
+ assertEquals(12, g4());
+ }
+ test();
+ test();
+ %OptimizeFunctionOnNextCall(test);
+ test();
+})();
+
+
+// Tail call bound function when caller does not have an arguments
+// adaptor frame.
+(function() {
+ // Caller and callee have same number of arguments.
+ function f1(a) {
+ assertEquals(153, this.a);
+ CheckStackTrace([f1, test]);
+ return 10 + a;
+ }
+ var b1 = f1.bind({a: 153});
+ function g1(a) { return continue b1(2); }
+
+ // Caller has more arguments than callee.
+ function f2(a) {
+ assertEquals(153, this.a);
+ CheckStackTrace([f2, test]);
+ return 10 + a;
+ }
+ var b2 = f2.bind({a: 153});
+ function g2(a, b, c) { return continue b2(2); }
+
+ // Caller has less arguments than callee.
+ function f3(a, b, c) {
+ assertEquals(153, this.a);
+ CheckStackTrace([f3, test]);
+ return 10 + a + b + c;
+ }
+ var b3 = f3.bind({a: 153});
+ function g3(a) { return continue b3(2, 3, 4); }
+
+ // Callee has arguments adaptor frame.
+ function f4(a, b, c) {
+ assertEquals(153, this.a);
+ CheckStackTrace([f4, test]);
+ return 10 + a;
+ }
+ var b4 = f4.bind({a: 153});
+ function g4(a) { return continue b4(2); }
+
+ function test() {
+ assertEquals(12, g1(1));
+ assertEquals(12, g2(1, 2, 3));
+ assertEquals(19, g3(1));
+ assertEquals(12, g4(1));
+ }
+ test();
+ test();
+ %OptimizeFunctionOnNextCall(test);
+ test();
+})();
+
+
+// Tail call bound function when caller has an arguments adaptor frame.
+(function() {
+ // Caller and callee have same number of arguments.
+ function f1(a) {
+ assertEquals(153, this.a);
+ CheckStackTrace([f1, test]);
+ return 10 + a;
+ }
+ var b1 = f1.bind({a: 153});
+ function g1(a) { return continue b1(2); }
+
+ // Caller has more arguments than callee.
+ function f2(a) {
+ assertEquals(153, this.a);
+ CheckStackTrace([f2, test]);
+ return 10 + a;
+ }
+ var b2 = f2.bind({a: 153});
+ function g2(a, b, c) { return continue b2(2); }
+
+ // Caller has less arguments than callee.
+ function f3(a, b, c) {
+ assertEquals(153, this.a);
+ CheckStackTrace([f3, test]);
+ return 10 + a + b + c;
+ }
+ var b3 = f3.bind({a: 153});
+ function g3(a) { return continue b3(2, 3, 4); }
+
+ // Callee has arguments adaptor frame.
+ function f4(a, b, c) {
+ assertEquals(153, this.a);
+ CheckStackTrace([f4, test]);
+ return 10 + a;
+ }
+ var b4 = f4.bind({a: 153});
+ function g4(a) { return continue b4(2); }
+
+ function test() {
+ assertEquals(12, g1());
+ assertEquals(12, g2());
+ assertEquals(19, g3());
+ assertEquals(12, g4());
+ }
+ test();
+ test();
+ %OptimizeFunctionOnNextCall(test);
+ test();
+})();
+
+
+// Tail calling from getter.
+(function() {
+ function g(v) {
+ CheckStackTrace([g, test]);
+ %DeoptimizeFunction(test);
+ return 153;
+ }
+ %NeverOptimizeFunction(g);
+
+ function f(v) {
+ return continue g();
+ }
+ %SetForceInlineFlag(f);
+
+ function test() {
+ var o = {};
+ o.__defineGetter__('p', f);
+ assertEquals(153, o.p);
+ }
+
+ test();
+ test();
+ %OptimizeFunctionOnNextCall(test);
+ test();
+})();
+
+
+// Tail calling from setter.
+(function() {
+ function g() {
+ CheckStackTrace([g, test]);
+ %DeoptimizeFunction(test);
+ return 153;
+ }
+ %NeverOptimizeFunction(g);
+
+ function f(v) {
+ return continue g();
+ }
+ %SetForceInlineFlag(f);
+
+ function test() {
+ var o = {};
+ o.__defineSetter__('q', f);
+ assertEquals(1, o.q = 1);
+ }
+
+ test();
+ test();
+ %OptimizeFunctionOnNextCall(test);
+ test();
+})();
+
+
+// Tail calling from constructor.
+(function() {
+ function g(context) {
+ CheckStackTrace([g, test]);
+ %DeoptimizeFunction(test);
+ return {x: 153};
+ }
+ %NeverOptimizeFunction(g);
+
+ function A() {
+ this.x = 42;
+ return continue g();
+ }
+
+ function test() {
+ var o = new A();
+ %DebugPrint(o);
+ assertEquals(153, o.x);
+ }
+
+ test();
+ test();
+ %OptimizeFunctionOnNextCall(test);
+ test();
+})();
+
+
+// Tail calling via various expressions.
+(function() {
+ function g1(a) {
+ return f([f, g1, test], false) || continue f([f, test], true);
+ }
+
+ function g2(a) {
+ return f([f, g2, test], true) && continue f([f, test], true);
+ }
+
+ function g3(a) {
+ return f([f, g3, test], 13), continue f([f, test], 153);
+ }
+
+ function g4(a) {
+ return f([f, g4, test], false) ||
+ (f([f, g4, test], true) && continue f([f, test], true));
+ }
+
+ function g5(a) {
+ return f([f, g5, test], true) &&
+ (f([f, g5, test], false) || continue f([f, test], true));
+ }
+
+ function g6(a) {
+ return f([f, g6, test], 13), f([f, g6, test], 42),
+ continue f([f, test], 153);
+ }
+
+ function g7(a) {
+ return f([f, g7, test], false) ||
+ (f([f, g7, test], false) ? continue f([f, test], true)
+ : continue f([f, test], true));
+ }
+
+ function g8(a) {
+ return f([f, g8, test], false) || f([f, g8, test], true) &&
+ continue f([f, test], true);
+ }
+
+ function g9(a) {
+ return f([f, g9, test], true) && f([f, g9, test], false) ||
+ continue f([f, test], true);
+ }
+
+ function g10(a) {
+ return f([f, g10, test], true) && f([f, g10, test], false) ||
+ f([f, g10, test], true) ?
+ f([f, g10, test], true) && f([f, g10, test], false) ||
+ continue f([f, test], true) :
+ f([f, g10, test], true) && f([f, g10, test], false) ||
+ continue f([f, test], true);
+ }
+
+ function test() {
+ assertEquals(true, g1());
+ assertEquals(true, g2());
+ assertEquals(153, g3());
+ assertEquals(true, g4());
+ assertEquals(true, g5());
+ assertEquals(153, g6());
+ assertEquals(true, g7());
+ assertEquals(true, g8());
+ assertEquals(true, g9());
+ assertEquals(true, g10());
+ }
+ test();
+ test();
+ %OptimizeFunctionOnNextCall(test);
+ test();
+})();
+
+
+// Tail calling from various statements.
+(function() {
+ function g3() {
+ for (var i = 0; i < 10; i++) {
+ return continue f_153([f_153, test]);
+ }
+ }
+
+ function g4() {
+ while (true) {
+ return continue f_153([f_153, test]);
+ }
+ }
+
+ function g5() {
+ do {
+ return continue f_153([f_153, test]);
+ } while (true);
+ }
+
+ function test() {
+ assertEquals(153, g3());
+ assertEquals(153, g4());
+ assertEquals(153, g5());
+ }
+ test();
+ test();
+ %OptimizeFunctionOnNextCall(test);
+ test();
+})();
+
+
+// Test tail calls from try-catch constructs.
+(function() {
+ function tc1(a) {
+ try {
+ f_153([f_153, tc1, test]);
+ return f_153([f_153, tc1, test]);
+ } catch(e) {
+ f_153([f_153, tc1, test]);
+ }
+ }
+
+ function tc2(a) {
+ try {
+ f_153([f_153, tc2, test]);
+ throw new Error("boom");
+ } catch(e) {
+ f_153([f_153, tc2, test]);
+ return continue f_153([f_153, test]);
+ }
+ }
+
+ function tc3(a) {
+ try {
+ f_153([f_153, tc3, test]);
+ throw new Error("boom");
+ } catch(e) {
+ f_153([f_153, tc3, test]);
+ }
+ f_153([f_153, tc3, test]);
+ return continue f_153([f_153, test]);
+ }
+
+ function test() {
+ assertEquals(153, tc1());
+ assertEquals(153, tc2());
+ assertEquals(153, tc3());
+ }
+ test();
+ test();
+ %OptimizeFunctionOnNextCall(test);
+ test();
+})();
+
+
+// Test tail calls from try-finally constructs.
+(function() {
+ function tf1(a) {
+ try {
+ f_153([f_153, tf1, test]);
+ return f_153([f_153, tf1, test]);
+ } finally {
+ f_153([f_153, tf1, test]);
+ }
+ }
+
+ function tf2(a) {
+ try {
+ f_153([f_153, tf2, test]);
+ throw new Error("boom");
+ } finally {
+ f_153([f_153, tf2, test]);
+ return continue f_153([f_153, test]);
+ }
+ }
+
+ function tf3(a) {
+ try {
+ f_153([f_153, tf3, test]);
+ } finally {
+ f_153([f_153, tf3, test]);
+ }
+ return continue f_153([f_153, test]);
+ }
+
+ function test() {
+ assertEquals(153, tf1());
+ assertEquals(153, tf2());
+ assertEquals(153, tf3());
+ }
+ test();
+ test();
+ %OptimizeFunctionOnNextCall(test);
+ test();
+})();
+
+
+// Test tail calls from try-catch-finally constructs.
+(function() {
+ function tcf1(a) {
+ try {
+ f_153([f_153, tcf1, test]);
+ return f_153([f_153, tcf1, test]);
+ } catch(e) {
+ } finally {
+ f_153([f_153, tcf1, test]);
+ }
+ }
+
+ function tcf2(a) {
+ try {
+ f_153([f_153, tcf2, test]);
+ throw new Error("boom");
+ } catch(e) {
+ f_153([f_153, tcf2, test]);
+ return f_153([f_153, tcf2, test]);
+ } finally {
+ f_153([f_153, tcf2, test]);
+ }
+ }
+
+ function tcf3(a) {
+ try {
+ f_153([f_153, tcf3, test]);
+ throw new Error("boom");
+ } catch(e) {
+ f_153([f_153, tcf3, test]);
+ } finally {
+ f_153([f_153, tcf3, test]);
+ return continue f_153([f_153, test]);
+ }
+ }
+
+ function tcf4(a) {
+ try {
+ f_153([f_153, tcf4, test]);
+ throw new Error("boom");
+ } catch(e) {
+ f_153([f_153, tcf4, test]);
+ } finally {
+ f_153([f_153, tcf4, test]);
+ }
+ return continue f_153([f_153, test]);
+ }
+
+ function test() {
+ assertEquals(153, tcf1());
+ assertEquals(153, tcf2());
+ assertEquals(153, tcf3());
+ assertEquals(153, tcf4());
+ }
+ test();
+ test();
+ %OptimizeFunctionOnNextCall(test);
+ test();
+})();
+
+
+// Test tail calls from arrow functions.
+(function () {
+ function g1(a) {
+ return continue (() => { return continue f_153([f_153, test]); })();
+ }
+
+ function g2(a) {
+ return continue (() => continue f_153([f_153, test]))();
+ }
+
+ function g3(a) {
+ var closure = () => f([f, closure, test], true)
+ ? continue f_153([f_153, test])
+ : continue f_153([f_153, test]);
+ return continue closure();
+ }
+
+ function test() {
+ assertEquals(153, g1());
+ assertEquals(153, g2());
+ assertEquals(153, g3());
+ }
+ test();
+ test();
+ %OptimizeFunctionOnNextCall(test);
+ test();
+})();
+
+
+// Test tail calls from do expressions.
+(function () {
+ function g1(a) {
+ var a = do { return continue f_153([f_153, test]); 42; };
+ return a;
+ }
+
+ function test() {
+ assertEquals(153, g1());
+ }
+ test();
+ test();
+ %OptimizeFunctionOnNextCall(test);
+ test();
+})();
diff --git a/deps/v8/test/mjsunit/eval-origin.js b/deps/v8/test/mjsunit/eval-origin.js
new file mode 100644
index 0000000000..bb86ef32fc
--- /dev/null
+++ b/deps/v8/test/mjsunit/eval-origin.js
@@ -0,0 +1,39 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --nostress-opt
+
+Error.prepareStackTrace = function(exception, frames) {
+ return frames[0].getEvalOrigin();
+}
+
+var source = "new Error()";
+var eval_origin;
+var geval = eval;
+var log = [];
+
+(function() {
+ log.push([geval(source).stack, "17:13"]);
+ log.push([geval(source).stack, "18:13"]);
+ // log.push([geval(source).stack, "19:13"]); TODO(4921).
+})();
+
+(function() {
+ log.push([eval(source).stack, "23:13"]);
+ log.push([eval(source).stack, "24:13"]);
+ // log.push([eval(source).stack, "25:13"]); TODO(4921).
+})();
+
+log.push([eval(source).stack, "28:11"]);
+log.push([eval(source).stack, "29:11"]);
+// log.push([eval(source).stack, "30:11"]); TODO(4921).
+
+Error.prepareStackTrace = undefined;
+
+for (var item of log) {
+ var stacktraceline = item[0];
+ var expectation = item[1];
+ var re = new RegExp(`:${expectation}\\)$`);
+ assertTrue(re.test(stacktraceline));
+}
diff --git a/deps/v8/test/mjsunit/fast-prototype.js b/deps/v8/test/mjsunit/fast-prototype.js
index 7432ecce9d..aa0a62e954 100644
--- a/deps/v8/test/mjsunit/fast-prototype.js
+++ b/deps/v8/test/mjsunit/fast-prototype.js
@@ -46,14 +46,20 @@ function AddProps(obj) {
function DoProtoMagic(proto, set__proto__) {
+ var receiver;
if (set__proto__) {
- (new Sub()).__proto__ = proto;
+ receiver = new Sub();
+ receiver.__proto__ = proto;
} else {
Sub.prototype = proto;
// Need to instantiate Sub to mark .prototype as prototype. Make sure the
// instantiated object is used so that the allocation is not optimized away.
- %DebugPrint(new Sub());
+ receiver = new Sub();
}
+ // Prototypes are made fast when ICs encounter them.
+ function ic() { return typeof receiver.foo; }
+ ic();
+ ic();
}
diff --git a/deps/v8/test/mjsunit/for-in.js b/deps/v8/test/mjsunit/for-in.js
index bece37a3ee..29d7445351 100644
--- a/deps/v8/test/mjsunit/for-in.js
+++ b/deps/v8/test/mjsunit/for-in.js
@@ -25,64 +25,141 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --noharmony-for-in
+
function props(x) {
var array = [];
for (var p in x) array.push(p);
- return array.sort();
+ return array;
}
-assertEquals(0, props({}).length, "olen0");
-assertEquals(1, props({x:1}).length, "olen1");
-assertEquals(2, props({x:1, y:2}).length, "olen2");
+(function forInBasic() {
+ assertEquals(0, props({}).length, "olen0");
+ assertEquals(1, props({x:1}).length, "olen1");
+ assertEquals(2, props({x:1, y:2}).length, "olen2");
-assertArrayEquals(["x"], props({x:1}), "x");
-assertArrayEquals(["x", "y"], props({x:1, y:2}), "xy");
-assertArrayEquals(["x", "y", "zoom"], props({x:1, y:2, zoom:3}), "xyzoom");
+ assertArrayEquals(["x"], props({x:1}), "x");
+ assertArrayEquals(["x", "y"], props({x:1, y:2}), "xy");
+ assertArrayEquals(["x", "y", "zoom"], props({x:1, y:2, zoom:3}), "xyzoom");
-assertEquals(0, props([]).length, "alen0");
-assertEquals(1, props([1]).length, "alen1");
-assertEquals(2, props([1,2]).length, "alen2");
+ assertEquals(0, props([]).length, "alen0");
+ assertEquals(1, props([1]).length, "alen1");
+ assertEquals(2, props([1,2]).length, "alen2");
-assertArrayEquals(["0"], props([1]), "0");
-assertArrayEquals(["0", "1"], props([1,2]), "01");
-assertArrayEquals(["0", "1", "2"], props([1,2,3]), "012");
+ assertArrayEquals(["0"], props([1]), "0");
+ assertArrayEquals(["0", "1"], props([1,2]), "01");
+ assertArrayEquals(["0", "1", "2"], props([1,2,3]), "012");
+})();
-var o = {};
-var a = [];
-for (var i = 0x0020; i < 0x01ff; i+=2) {
- var s = 'char:' + String.fromCharCode(i);
- a.push(s);
- o[s] = i;
-}
-assertArrayEquals(a, props(o), "charcodes");
-
-var a = [];
-assertEquals(0, props(a).length, "proplen0");
-a[Math.pow(2,30)-1] = 0;
-assertEquals(1, props(a).length, "proplen1");
-a[Math.pow(2,31)-1] = 0;
-assertEquals(2, props(a).length, "proplen2");
-a[1] = 0;
-assertEquals(3, props(a).length, "proplen3");
-
-for (var hest = 'hest' in {}) { }
-assertEquals('hest', hest, "empty-no-override");
-
-var result = '';
-for (var p in {a : [0], b : 1}) { result += p; }
-assertEquals('ab', result, "ab");
-
-var result = '';
-for (var p in {a : {v:1}, b : 1}) { result += p; }
-assertEquals('ab', result, "ab-nodeep");
-
-var result = '';
-for (var p in { get a() {}, b : 1}) { result += p; }
-assertEquals('ab', result, "abget");
-
-var result = '';
-for (var p in { get a() {}, set a(x) {}, b : 1}) { result += p; }
-assertEquals('ab', result, "abgetset");
+(function forInPrototype() {
+ // Fast properties + fast elements
+ var obj = {a:true, 3:true, 4:true};
+ obj.__proto__ = {c:true, b:true, 2:true, 1:true, 5:true};
+ for (var i = 0; i < 3; i++) {
+ assertArrayEquals("34a125cb".split(""), props(obj));
+ }
+ // Fast properties + dictionary elements
+ delete obj.__proto__[2];
+ for (var i = 0; i < 3; i++) {
+ assertArrayEquals("34a15cb".split(""), props(obj));
+ }
+ // Slow properties + dictionary elements
+ delete obj.__proto__.c;
+ for (var i = 0; i < 3; i++) {
+ assertArrayEquals("34a15b".split(""), props(obj));
+ }
+ // Slow properties on the receiver as well
+ delete obj.a;
+ for (var i = 0; i < 3; i++) {
+ assertArrayEquals("3415b".split(""), props(obj));
+ }
+ delete obj[3];
+ for (var i = 0; i < 3; i++) {
+ assertArrayEquals("415b".split(""), props(obj));
+ }
+})();
+
+(function forInShadowing() {
+ var obj = {a:true, 3:true, 4:true};
+ obj.__proto__ = {
+ c:true, b:true, x:true,
+ 2:true, 1:true, 5:true, 9:true};
+ Object.defineProperty(obj, 'x', {value:true, enumerable:false, configurable:true});
+ Object.defineProperty(obj, '9', {value:true, enumerable:false, configurable:true});
+ for (var i = 0; i < 3; i++) {
+ assertArrayEquals("34a125cb".split(""), props(obj));
+ }
+ // Fast properties + dictionary elements
+ delete obj.__proto__[2];
+ for (var i = 0; i < 3; i++) {
+ assertArrayEquals("34a15cb".split(""), props(obj));
+ }
+ // Slow properties + dictionary elements
+ delete obj.__proto__.c;
+ for (var i = 0; i < 3; i++) {
+ assertArrayEquals("34a15b".split(""), props(obj));
+ }
+ // Remove the shadowing properties
+ delete obj.x;
+ delete obj[9];
+ for (var i = 0; i < 3; i++) {
+ assertArrayEquals("34a159bx".split(""), props(obj));
+ }
+ // Slow properties on the receiver as well
+ delete obj.a;
+ for (var i = 0; i < 3; i++) {
+ assertArrayEquals("34159bx".split(""), props(obj));
+ }
+ delete obj[3];
+ for (var i = 0; i < 3; i++) {
+ assertArrayEquals("4159bx".split(""), props(obj));
+ }
+})();
+
+(function forInCharCodes() {
+ var o = {};
+ var a = [];
+ for (var i = 0x0020; i < 0x01ff; i+=2) {
+ var s = 'char:' + String.fromCharCode(i);
+ a.push(s);
+ o[s] = i;
+ }
+ assertArrayEquals(a, props(o), "charcodes");
+})();
+
+(function forInArray() {
+ var a = [];
+ assertEquals(0, props(a).length, "proplen0");
+ a[Math.pow(2,30)-1] = 0;
+ assertEquals(1, props(a).length, "proplen1");
+ a[Math.pow(2,31)-1] = 0;
+ assertEquals(2, props(a).length, "proplen2");
+ a[1] = 0;
+ assertEquals(3, props(a).length, "proplen3");
+})();
+
+(function forInInitialize() {
+ for (var hest = 'hest' in {}) { }
+ assertEquals('hest', hest, "empty-no-override");
+})();
+
+(function forInObjects() {
+ var result = '';
+ for (var p in {a : [0], b : 1}) { result += p; }
+ assertEquals('ab', result, "ab");
+
+ var result = '';
+ for (var p in {a : {v:1}, b : 1}) { result += p; }
+ assertEquals('ab', result, "ab-nodeep");
+
+ var result = '';
+ for (var p in { get a() {}, b : 1}) { result += p; }
+ assertEquals('ab', result, "abget");
+
+ var result = '';
+ for (var p in { get a() {}, set a(x) {}, b : 1}) { result += p; }
+ assertEquals('ab', result, "abgetset");
+})();
// Test that for-in in the global scope works with a keyed property as "each".
diff --git a/deps/v8/test/mjsunit/function-name-eval-shadowed.js b/deps/v8/test/mjsunit/function-name-eval-shadowed.js
new file mode 100644
index 0000000000..70cc4b9c54
--- /dev/null
+++ b/deps/v8/test/mjsunit/function-name-eval-shadowed.js
@@ -0,0 +1,5 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertEquals(200, (function f() { eval("var f = 100"); f = 200; return f })());
diff --git a/deps/v8/test/mjsunit/global-arrow-delete-this.js b/deps/v8/test/mjsunit/global-arrow-delete-this.js
new file mode 100644
index 0000000000..9ebe8e48b4
--- /dev/null
+++ b/deps/v8/test/mjsunit/global-arrow-delete-this.js
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Make sure that we correctly resolve this when compiling an arrow function in
+// a with scope in an arrow function.
+a = () => {
+ let x
+ with ({}) x = () => { "use strict"; delete this }
+ return x
+}
+a()()
+
+
+// Make sure that we correctly resolve this when compiling a program in an arrow
+// function.
+a = ()=>eval('"use strict"; delete this')
+a()
diff --git a/deps/v8/test/mjsunit/harmony/array-concat-array-proto-getter.js b/deps/v8/test/mjsunit/harmony/array-concat-array-proto-getter.js
new file mode 100644
index 0000000000..9368e7fb6c
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/array-concat-array-proto-getter.js
@@ -0,0 +1,53 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Check that @@isConcatSpreadable is checked when set on Object.prototype
+
+"use strict"
+
+var array = [1, 2, 3];
+var object = {length: 1, '0': 'a'};
+
+function testConcatDefaults() {
+ assertEquals(array, [].concat(array));
+ assertEquals(array, array.concat([]));
+ assertEquals([1, 2, 3, 1, 2, 3], array.concat(array));
+ assertEquals([object], [].concat(object));
+ assertEquals([1, 2, 3, object], array.concat(object));
+ assertEquals([object], Array.prototype.concat.call(object,[]));
+ assertEquals([object, 1, 2, 3], Array.prototype.concat.call(object, array));
+ assertEquals([object, object], Array.prototype.concat.call(object, object));
+}
+
+testConcatDefaults();
+
+var concatSpreadable = false;
+Object.defineProperty(Array.prototype, Symbol.isConcatSpreadable, {
+ get() { return concatSpreadable },
+ configurable: true
+});
+
+assertEquals([[], array], [].concat(array));
+assertEquals([array, []], array.concat([]));
+assertEquals([array, array], array.concat(array));
+assertEquals([[], object], [].concat(object));
+assertEquals([array, object], array.concat(object));
+assertEquals([object, []], Array.prototype.concat.call(object,[]));
+assertEquals([object, array], Array.prototype.concat.call(object, array));
+assertEquals([object, object], Array.prototype.concat.call(object, object));
+
+concatSpreadable = true;
+
+assertEquals(array, [].concat(array));
+assertEquals(array, array.concat([]));
+assertEquals([1, 2, 3, 1, 2, 3], array.concat(array));
+assertEquals([object], [].concat(object));
+assertEquals([1, 2, 3, object], array.concat(object));
+assertEquals([object], Array.prototype.concat.call(object,[]));
+assertEquals([object, 1, 2, 3], Array.prototype.concat.call(object, array));
+assertEquals([object, object], Array.prototype.concat.call(object, object));
+
+
+delete Array.prototype[Symbol.isConcatSpreadable];
+testConcatDefaults();
diff --git a/deps/v8/test/mjsunit/harmony/array-concat-array-proto.js b/deps/v8/test/mjsunit/harmony/array-concat-array-proto.js
new file mode 100644
index 0000000000..520178fdaa
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/array-concat-array-proto.js
@@ -0,0 +1,48 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Check that @@isConcatSpreadable is checked when set on Object.prototype
+
+"use strict"
+
+var array = [1, 2, 3];
+var object = {length: 1, '0': 'a'};
+
+function testConcatDefaults() {
+ assertEquals(array, [].concat(array));
+ assertEquals(array, array.concat([]));
+ assertEquals([1, 2, 3, 1, 2, 3], array.concat(array));
+ assertEquals([object], [].concat(object));
+ assertEquals([1, 2, 3, object], array.concat(object));
+ assertEquals([object], Array.prototype.concat.call(object,[]));
+ assertEquals([object, 1, 2, 3], Array.prototype.concat.call(object, array));
+ assertEquals([object, object], Array.prototype.concat.call(object, object));
+}
+
+testConcatDefaults();
+
+Array.prototype[Symbol.isConcatSpreadable] = false;
+
+assertEquals([[], array], [].concat(array));
+assertEquals([array, []], array.concat([]));
+assertEquals([array, array], array.concat(array));
+assertEquals([[], object], [].concat(object));
+assertEquals([array, object], array.concat(object));
+assertEquals([object, []], Array.prototype.concat.call(object,[]));
+assertEquals([object, array], Array.prototype.concat.call(object, array));
+assertEquals([object, object], Array.prototype.concat.call(object, object));
+
+Array.prototype[Symbol.isConcatSpreadable] = true;
+
+assertEquals(array, [].concat(array));
+assertEquals(array, array.concat([]));
+assertEquals([1, 2, 3, 1, 2, 3], array.concat(array));
+assertEquals([object], [].concat(object));
+assertEquals([1, 2, 3, object], array.concat(object));
+assertEquals([object], Array.prototype.concat.call(object,[]));
+assertEquals([object, 1, 2, 3], Array.prototype.concat.call(object, array));
+assertEquals([object, object], Array.prototype.concat.call(object, object));
+
+delete Array.prototype[Symbol.isConcatSpreadable];
+testConcatDefaults();
diff --git a/deps/v8/test/mjsunit/harmony/array-concat-object-proto-dict-getter.js b/deps/v8/test/mjsunit/harmony/array-concat-object-proto-dict-getter.js
new file mode 100644
index 0000000000..6e61588789
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/array-concat-object-proto-dict-getter.js
@@ -0,0 +1,57 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Check that @@isConcatSpreadable is checked when set on Object.prototype
+// with a dictionary backing store.
+
+// Force Object.prototype into dictionary backing store by adding many
+// properties.
+for (var i = 0; i < 10*1000; i++) {
+ Object.prototype['generatedProperty'+i] = true;
+}
+
+var array = [1, 2, 3];
+var object = {length: 1, '0': 'a'};
+
+function testConcatDefaults() {
+ assertEquals(array, [].concat(array));
+ assertEquals(array, array.concat([]));
+ assertEquals([1, 2, 3, 1, 2, 3], array.concat(array));
+ assertEquals([object], [].concat(object));
+ assertEquals([1, 2, 3, object], array.concat(object));
+ assertEquals([object], Array.prototype.concat.call(object,[]));
+ assertEquals([object, 1, 2, 3], Array.prototype.concat.call(object, array));
+ assertEquals([object, object], Array.prototype.concat.call(object, object));
+}
+
+testConcatDefaults();
+
+var concatSpreadable = false;
+Object.defineProperty(Object.prototype, Symbol.isConcatSpreadable, {
+ get() { return concatSpreadable },
+ configurable: true
+});
+
+assertEquals([[], array], [].concat(array));
+assertEquals([array, []], array.concat([]));
+assertEquals([array, array], array.concat(array));
+assertEquals([[], object], [].concat(object));
+assertEquals([array, object], array.concat(object));
+assertEquals([object, []], Array.prototype.concat.call(object,[]));
+assertEquals([object, array], Array.prototype.concat.call(object, array));
+assertEquals([object, object], Array.prototype.concat.call(object, object));
+
+concatSpreadable = true;
+
+assertEquals(array, [].concat(array));
+assertEquals(array, array.concat([]));
+assertEquals([1, 2, 3, 1, 2, 3], array.concat(array));
+assertEquals(['a'], [].concat(object));
+assertEquals([1, 2, 3, 'a'], array.concat(object));
+assertEquals(['a'], Array.prototype.concat.call(object,[]));
+assertEquals(['a', 1, 2, 3], Array.prototype.concat.call(object, array));
+assertEquals(['a', 'a'], Array.prototype.concat.call(object, object));
+
+delete Object.prototype[Symbol.isConcatSpreadable];
+testConcatDefaults();
diff --git a/deps/v8/test/mjsunit/harmony/array-concat-object-proto-dict.js b/deps/v8/test/mjsunit/harmony/array-concat-object-proto-dict.js
new file mode 100644
index 0000000000..c817006c16
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/array-concat-object-proto-dict.js
@@ -0,0 +1,53 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Check that @@isConcatSpreadable is checked when set on Object.prototype
+// with a dictionary backing store.
+
+// Force Object.prototype into dictionary backing store by adding many
+// properties.
+for (var i = 0; i < 10*1000; i++) {
+ Object.prototype['generatedProperty'+i] = true;
+}
+
+var array = [1, 2, 3];
+var object = {length: 1, '0': 'a'};
+
+function testConcatDefaults() {
+ assertEquals(array, [].concat(array));
+ assertEquals(array, array.concat([]));
+ assertEquals([1, 2, 3, 1, 2, 3], array.concat(array));
+ assertEquals([object], [].concat(object));
+ assertEquals([1, 2, 3, object], array.concat(object));
+ assertEquals([object], Array.prototype.concat.call(object,[]));
+ assertEquals([object, 1, 2, 3], Array.prototype.concat.call(object, array));
+ assertEquals([object, object], Array.prototype.concat.call(object, object));
+}
+
+testConcatDefaults();
+
+Object.prototype[Symbol.isConcatSpreadable] = false;
+
+assertEquals([[], array], [].concat(array));
+assertEquals([array, []], array.concat([]));
+assertEquals([array, array], array.concat(array));
+assertEquals([[], object], [].concat(object));
+assertEquals([array, object], array.concat(object));
+assertEquals([object, []], Array.prototype.concat.call(object,[]));
+assertEquals([object, array], Array.prototype.concat.call(object, array));
+assertEquals([object, object], Array.prototype.concat.call(object, object));
+
+Object.prototype[Symbol.isConcatSpreadable] = true;
+
+assertEquals(array, [].concat(array));
+assertEquals(array, array.concat([]));
+assertEquals([1, 2, 3, 1, 2, 3], array.concat(array));
+assertEquals(['a'], [].concat(object));
+assertEquals([1, 2, 3, 'a'], array.concat(object));
+assertEquals(['a'], Array.prototype.concat.call(object,[]));
+assertEquals(['a', 1, 2, 3], Array.prototype.concat.call(object, array));
+assertEquals(['a', 'a'], Array.prototype.concat.call(object, object));
+
+delete Object.prototype[Symbol.isConcatSpreadable];
+testConcatDefaults();
diff --git a/deps/v8/test/mjsunit/harmony/array-concat-object-proto-generic-dict.js b/deps/v8/test/mjsunit/harmony/array-concat-object-proto-generic-dict.js
new file mode 100644
index 0000000000..7b61422a44
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/array-concat-object-proto-generic-dict.js
@@ -0,0 +1,65 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Check that @@isConcatSpreadable is checked when set on Object.prototype
+// with a dictionary backing store.
+
+// Force Object.prototype into dictionary backing store by adding many
+// properties.
+for (var i = 0; i < 10*1000; i++) {
+ Object.prototype['generatedProperty'+i] = true;
+}
+
+var array = [1, 2, 3];
+var object = {length: 1, '0': 'a'};
+
+function SetProperty(receiver, key, value) {
+ receiver[key] = value;
+}
+
+// Force the Keyed Store IC in SetProperty to be generic.
+var receiver = {};
+for (var i = 0; i < 100; i++) {
+ SetProperty(receiver, 'prop'+i, 'value');
+}
+
+function testConcatDefaults() {
+ assertEquals(array, [].concat(array));
+ assertEquals(array, array.concat([]));
+ assertEquals([1, 2, 3, 1, 2, 3], array.concat(array));
+ assertEquals([object], [].concat(object));
+ assertEquals([1, 2, 3, object], array.concat(object));
+ assertEquals([object], Array.prototype.concat.call(object,[]));
+ assertEquals([object, 1, 2, 3], Array.prototype.concat.call(object, array));
+ assertEquals([object, object], Array.prototype.concat.call(object, object));
+}
+
+testConcatDefaults();
+
+// Use a generic IC to set @@isConcatSpreadable
+SetProperty(Object.prototype, Symbol.isConcatSpreadable, false);
+
+assertEquals([[], array], [].concat(array));
+assertEquals([array, []], array.concat([]));
+assertEquals([array, array], array.concat(array));
+assertEquals([[], object], [].concat(object));
+assertEquals([array, object], array.concat(object));
+assertEquals([object, []], Array.prototype.concat.call(object,[]));
+assertEquals([object, array], Array.prototype.concat.call(object, array));
+assertEquals([object, object], Array.prototype.concat.call(object, object));
+
+// Use a generic IC to set @@isConcatSpreadable
+SetProperty(Object.prototype, Symbol.isConcatSpreadable, true);
+
+assertEquals(array, [].concat(array));
+assertEquals(array, array.concat([]));
+assertEquals([1, 2, 3, 1, 2, 3], array.concat(array));
+assertEquals(['a'], [].concat(object));
+assertEquals([1, 2, 3, 'a'], array.concat(object));
+assertEquals(['a'], Array.prototype.concat.call(object,[]));
+assertEquals(['a', 1, 2, 3], Array.prototype.concat.call(object, array));
+assertEquals(['a', 'a'], Array.prototype.concat.call(object, object));
+
+delete Object.prototype[Symbol.isConcatSpreadable];
+testConcatDefaults();
diff --git a/deps/v8/test/mjsunit/harmony/array-concat-object-proto.js b/deps/v8/test/mjsunit/harmony/array-concat-object-proto.js
new file mode 100644
index 0000000000..307326cbcd
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/array-concat-object-proto.js
@@ -0,0 +1,48 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Check that @@isConcatSpreadable is checked when set on Object.prototype
+
+"use strict"
+
+var array = [1, 2, 3];
+var object = {length: 1, '0': 'a'};
+
+function testConcatDefaults() {
+ assertEquals(array, [].concat(array));
+ assertEquals(array, array.concat([]));
+ assertEquals([1, 2, 3, 1, 2, 3], array.concat(array));
+ assertEquals([object], [].concat(object));
+ assertEquals([1, 2, 3, object], array.concat(object));
+ assertEquals([object], Array.prototype.concat.call(object,[]));
+ assertEquals([object, 1, 2, 3], Array.prototype.concat.call(object, array));
+ assertEquals([object, object], Array.prototype.concat.call(object, object));
+}
+
+testConcatDefaults();
+
+Object.prototype[Symbol.isConcatSpreadable] = false;
+
+assertEquals([[], array], [].concat(array));
+assertEquals([array, []], array.concat([]));
+assertEquals([array, array], array.concat(array));
+assertEquals([[], object], [].concat(object));
+assertEquals([array, object], array.concat(object));
+assertEquals([object, []], Array.prototype.concat.call(object,[]));
+assertEquals([object, array], Array.prototype.concat.call(object, array));
+assertEquals([object, object], Array.prototype.concat.call(object, object));
+
+Object.prototype[Symbol.isConcatSpreadable] = true;
+
+assertEquals(array, [].concat(array));
+assertEquals(array, array.concat([]));
+assertEquals([1, 2, 3, 1, 2, 3], array.concat(array));
+assertEquals(['a'], [].concat(object));
+assertEquals([1, 2, 3, 'a'], array.concat(object));
+assertEquals(['a'], Array.prototype.concat.call(object,[]));
+assertEquals(['a', 1, 2, 3], Array.prototype.concat.call(object, array));
+assertEquals(['a', 'a'], Array.prototype.concat.call(object, object));
+
+delete Object.prototype[Symbol.isConcatSpreadable];
+testConcatDefaults();
diff --git a/deps/v8/test/mjsunit/harmony/async-arrow-lexical-arguments.js b/deps/v8/test/mjsunit/harmony/async-arrow-lexical-arguments.js
new file mode 100644
index 0000000000..44d38a4275
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/async-arrow-lexical-arguments.js
@@ -0,0 +1,42 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-async-await --allow-natives-syntax
+
+function assertEqualsAsync(expected, run, msg) {
+ var actual;
+ var hadValue = false;
+ var hadError = false;
+ var promise = run();
+
+ if (typeof promise !== "object" || typeof promise.then !== "function") {
+ throw new MjsUnitAssertionError(
+ "Expected " + run.toString() +
+ " to return a Promise, but it returned " + PrettyPrint(promise));
+ }
+
+ promise.then(function(value) { hadValue = true; actual = value; },
+ function(error) { hadError = true; actual = error; });
+
+ assertFalse(hadValue || hadError);
+
+ %RunMicrotasks();
+
+ if (hadError) throw actual;
+
+ assertTrue(
+ hadValue, "Expected '" + run.toString() + "' to produce a value");
+
+ assertEquals(expected, actual, msg);
+};
+
+assertEqualsAsync("[1,2,3]", () => (function() {
+ return (async () => JSON.stringify([...arguments]))();
+})(1, 2, 3));
+
+assertEqualsAsync("[4,5,6]",
+ () => (function() {
+ return (async () => {
+ return JSON.stringify([...await arguments]) })();
+ })(4, 5, 6));
diff --git a/deps/v8/test/mjsunit/harmony/async-arrow-lexical-new.target.js b/deps/v8/test/mjsunit/harmony/async-arrow-lexical-new.target.js
new file mode 100644
index 0000000000..72b29e69e9
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/async-arrow-lexical-new.target.js
@@ -0,0 +1,43 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-async-await --allow-natives-syntax
+
+function assertEqualsAsync(expected, run, msg) {
+ var actual;
+ var hadValue = false;
+ var hadError = false;
+ var promise = run();
+
+ if (typeof promise !== "object" || typeof promise.then !== "function") {
+ throw new MjsUnitAssertionError(
+ "Expected " + run.toString() +
+ " to return a Promise, but it returned " + PrettyPrint(promise));
+ }
+
+ promise.then(function(value) { hadValue = true; actual = value; },
+ function(error) { hadError = true; actual = error; });
+
+ assertFalse(hadValue || hadError);
+
+ %RunMicrotasks();
+
+ if (hadError) throw actual;
+
+ assertTrue(
+ hadValue, "Expected '" + run.toString() + "' to produce a value");
+
+ assertEquals(expected, actual, msg);
+};
+
+class BaseClass {
+ constructor() {
+ return async () => new.target;
+ }
+}
+
+class ChildClass extends BaseClass {}
+
+assertEqualsAsync(BaseClass, () => new BaseClass()());
+assertEqualsAsync(ChildClass, () => new ChildClass()());
diff --git a/deps/v8/test/mjsunit/harmony/async-arrow-lexical-super.js b/deps/v8/test/mjsunit/harmony/async-arrow-lexical-super.js
new file mode 100644
index 0000000000..78f5d555b6
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/async-arrow-lexical-super.js
@@ -0,0 +1,58 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-async-await --allow-natives-syntax
+
+function assertEqualsAsync(expected, run, msg) {
+ var actual;
+ var hadValue = false;
+ var hadError = false;
+ var promise = run();
+
+ if (typeof promise !== "object" || typeof promise.then !== "function") {
+ throw new MjsUnitAssertionError(
+ "Expected " + run.toString() +
+ " to return a Promise, but it returned " + PrettyPrint(promise));
+ }
+
+ promise.then(function(value) { hadValue = true; actual = value; },
+ function(error) { hadError = true; actual = error; });
+
+ assertFalse(hadValue || hadError);
+
+ %RunMicrotasks();
+
+ if (hadError) throw actual;
+
+ assertTrue(
+ hadValue, "Expected '" + run.toString() + "' to produce a value");
+
+ assertEquals(expected, actual, msg);
+};
+
+class BaseClass {
+ constructor(x) {
+ this.name_ = x;
+ }
+ get name() { return this.name_; }
+};
+
+class DeferredSuperCall extends BaseClass {
+ constructor(x) {
+ return async() => super(x);
+ }
+};
+
+assertEqualsAsync(
+ "LexicalSuperCall",
+ () => new DeferredSuperCall("LexicalSuperCall")().then(x => x.name));
+
+
+class DeferredSuperProperty extends BaseClass {
+ deferredName() { return async() => super.name; }
+};
+
+assertEqualsAsync(
+ "LexicalSuperProperty",
+ () => new DeferredSuperProperty("LexicalSuperProperty").deferredName()());
diff --git a/deps/v8/test/mjsunit/harmony/async-arrow-lexical-this.js b/deps/v8/test/mjsunit/harmony/async-arrow-lexical-this.js
new file mode 100644
index 0000000000..182db47a22
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/async-arrow-lexical-this.js
@@ -0,0 +1,48 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-async-await --allow-natives-syntax
+
+function assertEqualsAsync(expected, run, msg) {
+ var actual;
+ var hadValue = false;
+ var hadError = false;
+ var promise = run();
+
+ if (typeof promise !== "object" || typeof promise.then !== "function") {
+ throw new MjsUnitAssertionError(
+ "Expected " + run.toString() +
+ " to return a Promise, but it returned " + PrettyPrint(promise));
+ }
+
+ promise.then(function(value) { hadValue = true; actual = value; },
+ function(error) { hadError = true; actual = error; });
+
+ assertFalse(hadValue || hadError);
+
+ %RunMicrotasks();
+
+ if (hadError) throw actual;
+
+ assertTrue(
+ hadValue, "Expected '" + run.toString() + "' to produce a value");
+
+ assertEquals(expected, actual, msg);
+};
+
+var O = {
+ [Symbol.toStringTag]: "LexicalThis",
+ run(n) {
+ return async passFail => `${n}. ${passFail}: ${this}`;
+ },
+};
+
+assertEqualsAsync("1. PASS: [object LexicalThis]", () => O.run(1)("PASS"));
+
+var O2 = {
+ [Symbol.toStringTag]: "LexicalThis",
+ run: O.run(2)
+};
+
+assertEqualsAsync("2. PASS: [object LexicalThis]", () => O2.run("PASS"));
diff --git a/deps/v8/test/mjsunit/harmony/async-await-basic.js b/deps/v8/test/mjsunit/harmony/async-await-basic.js
new file mode 100644
index 0000000000..51572cdb9f
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/async-await-basic.js
@@ -0,0 +1,378 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-async-await --allow-natives-syntax
+
+// Do not install `AsyncFunction` constructor on global object
+
+function assertThrowsAsync(run, errorType, message) {
+ var actual;
+ var hadValue = false;
+ var hadError = false;
+ var promise = run();
+
+ if (typeof promise !== "object" || typeof promise.then !== "function") {
+ throw new MjsUnitAssertionError(
+ "Expected " + run.toString() +
+ " to return a Promise, but it returned " + PrettyPrint(promise));
+ }
+
+ promise.then(function(value) { hadValue = true; actual = value; },
+ function(error) { hadError = true; actual = error; });
+
+ assertFalse(hadValue || hadError);
+
+ %RunMicrotasks();
+
+ if (!hadError) {
+ throw new MjsUnitAssertionError(
+ "Expected " + run + "() to throw " + errorType.name +
+ ", but did not throw.");
+ }
+ if (!(actual instanceof errorType))
+ throw new MjsUnitAssertionError(
+ "Expected " + run + "() to throw " + errorType.name +
+ ", but threw '" + actual + "'");
+ if (message !== void 0 && actual.message !== message)
+ throw new MjsUnitAssertionError(
+ "Expected " + run + "() to throw '" + message + "', but threw '" +
+ actual.message + "'");
+};
+
+function assertEqualsAsync(expected, run, msg) {
+ var actual;
+ var hadValue = false;
+ var hadError = false;
+ var promise = run();
+
+ if (typeof promise !== "object" || typeof promise.then !== "function") {
+ throw new MjsUnitAssertionError(
+ "Expected " + run.toString() +
+ " to return a Promise, but it returned " + PrettyPrint(promise));
+ }
+
+ promise.then(function(value) { hadValue = true; actual = value; },
+ function(error) { hadError = true; actual = error; });
+
+ assertFalse(hadValue || hadError);
+
+ %RunMicrotasks();
+
+ if (hadError) throw actual;
+
+ assertTrue(
+ hadValue, "Expected '" + run.toString() + "' to produce a value");
+
+ assertEquals(expected, actual, msg);
+};
+
+assertEquals(undefined, this.AsyncFunction);
+let AsyncFunction = (async function() {}).constructor;
+
+// The AsyncFunction Constructor is the %AsyncFunction% intrinsic object and
+// is a subclass of Function.
+// (https://tc39.github.io/ecmascript-asyncawait/#async-function-constructor)
+assertEquals(Object.getPrototypeOf(AsyncFunction), Function);
+assertEquals(Object.getPrototypeOf(AsyncFunction.prototype),
+ Function.prototype);
+assertTrue(async function() {} instanceof Function);
+
+
+// Let functionPrototype be the intrinsic object %AsyncFunctionPrototype%.
+async function asyncFunctionForProto() {}
+assertEquals(AsyncFunction.prototype,
+ Object.getPrototypeOf(asyncFunctionForProto));
+assertEquals(AsyncFunction.prototype,
+ Object.getPrototypeOf(async function() {}));
+assertEquals(AsyncFunction.prototype, Object.getPrototypeOf(async () => {}));
+assertEquals(AsyncFunction.prototype,
+ Object.getPrototypeOf({ async method() {} }.method));
+assertEquals(AsyncFunction.prototype, Object.getPrototypeOf(AsyncFunction()));
+assertEquals(AsyncFunction.prototype,
+ Object.getPrototypeOf(new AsyncFunction()));
+
+// AsyncFunctionCreate does not produce an object with a Prototype
+assertEquals(undefined, asyncFunctionForProto.prototype);
+assertEquals(false, asyncFunctionForProto.hasOwnProperty("prototype"));
+assertEquals(undefined, (async function() {}).prototype);
+assertEquals(false, (async function() {}).hasOwnProperty("prototype"));
+assertEquals(undefined, (async() => {}).prototype);
+assertEquals(false, (async() => {}).hasOwnProperty("prototype"));
+assertEquals(undefined, ({ async method() {} }).method.prototype);
+assertEquals(false, ({ async method() {} }).method.hasOwnProperty("prototype"));
+assertEquals(undefined, AsyncFunction().prototype);
+assertEquals(false, AsyncFunction().hasOwnProperty("prototype"));
+assertEquals(undefined, (new AsyncFunction()).prototype);
+assertEquals(false, (new AsyncFunction()).hasOwnProperty("prototype"));
+
+assertEquals(1, async function(a) { await 1; }.length);
+assertEquals(2, async function(a, b) { await 1; }.length);
+assertEquals(1, async function(a, b = 2) { await 1; }.length);
+assertEquals(2, async function(a, b, ...c) { await 1; }.length);
+
+assertEquals(1, (async(a) => await 1).length);
+assertEquals(2, (async(a, b) => await 1).length);
+assertEquals(1, (async(a, b = 2) => await 1).length);
+assertEquals(2, (async(a, b, ...c) => await 1).length);
+
+assertEquals(1, ({ async f(a) { await 1; } }).f.length);
+assertEquals(2, ({ async f(a, b) { await 1; } }).f.length);
+assertEquals(1, ({ async f(a, b = 2) { await 1; } }).f.length);
+assertEquals(2, ({ async f(a, b, ...c) { await 1; } }).f.length);
+
+assertEquals(1, AsyncFunction("a", "await 1").length);
+assertEquals(2, AsyncFunction("a", "b", "await 1").length);
+assertEquals(1, AsyncFunction("a", "b = 2", "await 1").length);
+assertEquals(2, AsyncFunction("a", "b", "...c", "await 1").length);
+
+assertEquals(1, (new AsyncFunction("a", "await 1")).length);
+assertEquals(2, (new AsyncFunction("a", "b", "await 1")).length);
+assertEquals(1, (new AsyncFunction("a", "b = 2", "await 1")).length);
+assertEquals(2, (new AsyncFunction("a", "b", "...c", "await 1")).length);
+
+// AsyncFunction.prototype[ @@toStringTag ]
+var descriptor =
+ Object.getOwnPropertyDescriptor(AsyncFunction.prototype,
+ Symbol.toStringTag);
+assertEquals("AsyncFunction", descriptor.value);
+assertEquals(false, descriptor.enumerable);
+assertEquals(false, descriptor.writable);
+assertEquals(true, descriptor.configurable);
+
+assertEquals(1, AsyncFunction.length);
+
+// Let F be ! FunctionAllocate(functionPrototype, Strict, "non-constructor")
+async function asyncNonConstructorDecl() {}
+assertThrows(
+ () => new asyncNonConstructorDecl(), TypeError);
+assertThrows(
+ () => new (async function() {}), TypeError);
+assertThrows(
+ () => new ({ async nonConstructor() {} }).nonConstructor(), TypeError);
+assertThrows(
+ () => new (() => "not a constructor!"), TypeError);
+assertThrows(
+ () => new (AsyncFunction()), TypeError);
+assertThrows(
+ () => new (new AsyncFunction()), TypeError);
+
+// Normal completion
+async function asyncDecl() { return "test"; }
+assertEqualsAsync("test", asyncDecl);
+assertEqualsAsync("test2", async function() { return "test2"; });
+assertEqualsAsync("test3", async () => "test3");
+assertEqualsAsync("test4", () => ({ async f() { return "test4"; } }).f());
+assertEqualsAsync("test5", () => AsyncFunction("no", "return 'test' + no;")(5));
+assertEqualsAsync("test6",
+ () => (new AsyncFunction("no", "return 'test' + no;"))(6));
+
+class MyError extends Error {};
+
+// Throw completion
+async function asyncDeclThrower(e) { throw new MyError(e); }
+assertThrowsAsync(() => asyncDeclThrower("boom!"), MyError, "boom!");
+assertThrowsAsync(
+ () => (async function(e) { throw new MyError(e); })("boom!!!"),
+ MyError, "boom!!!");
+assertThrowsAsync(
+ () => (async e => { throw new MyError(e) })("boom!!"), MyError, "boom!!");
+assertThrowsAsync(
+ () => ({ async thrower(e) { throw new MyError(e); } }).thrower("boom!1!"),
+ MyError, "boom!1!");
+assertThrowsAsync(
+ () => AsyncFunction("msg", "throw new MyError(msg)")("boom!2!!"),
+ MyError, "boom!2!!");
+assertThrowsAsync(
+ () => (new AsyncFunction("msg", "throw new MyError(msg)"))("boom!2!!!"),
+ MyError, "boom!2!!!");
+
+function resolveLater(value) { return Promise.resolve(value); }
+function rejectLater(error) { return Promise.reject(error); }
+
+// Resume after Normal completion
+var log = [];
+async function resumeAfterNormal(value) {
+ log.push("start:" + value);
+ value = await resolveLater(value + 1);
+ log.push("resume:" + value);
+ value = await resolveLater(value + 1);
+ log.push("resume:" + value);
+ return value + 1;
+}
+
+assertEqualsAsync(4, () => resumeAfterNormal(1));
+assertEquals("start:1 resume:2 resume:3", log.join(" "));
+
+var O = {
+ async resumeAfterNormal(value) {
+ log.push("start:" + value);
+ value = await resolveLater(value + 1);
+ log.push("resume:" + value);
+ value = await resolveLater(value + 1);
+ log.push("resume:" + value);
+ return value + 1;
+ }
+};
+log = [];
+assertEqualsAsync(5, () => O.resumeAfterNormal(2));
+assertEquals("start:2 resume:3 resume:4", log.join(" "));
+
+var resumeAfterNormalArrow = async (value) => {
+ log.push("start:" + value);
+ value = await resolveLater(value + 1);
+ log.push("resume:" + value);
+ value = await resolveLater(value + 1);
+ log.push("resume:" + value);
+ return value + 1;
+};
+log = [];
+assertEqualsAsync(6, () => resumeAfterNormalArrow(3));
+assertEquals("start:3 resume:4 resume:5", log.join(" "));
+
+var resumeAfterNormalEval = AsyncFunction("value", `
+ log.push("start:" + value);
+ value = await resolveLater(value + 1);
+ log.push("resume:" + value);
+ value = await resolveLater(value + 1);
+ log.push("resume:" + value);
+ return value + 1;`);
+log = [];
+assertEqualsAsync(7, () => resumeAfterNormalEval(4));
+assertEquals("start:4 resume:5 resume:6", log.join(" "));
+
+var resumeAfterNormalNewEval = new AsyncFunction("value", `
+ log.push("start:" + value);
+ value = await resolveLater(value + 1);
+ log.push("resume:" + value);
+ value = await resolveLater(value + 1);
+ log.push("resume:" + value);
+ return value + 1;`);
+log = [];
+assertEqualsAsync(8, () => resumeAfterNormalNewEval(5));
+assertEquals("start:5 resume:6 resume:7", log.join(" "));
+
+// Resume after Throw completion
+async function resumeAfterThrow(value) {
+ log.push("start:" + value);
+ try {
+ value = await rejectLater("throw1");
+ } catch (e) {
+ log.push("resume:" + e);
+ }
+ try {
+ value = await rejectLater("throw2");
+ } catch (e) {
+ log.push("resume:" + e);
+ }
+ return value + 1;
+}
+
+log = [];
+assertEqualsAsync(2, () => resumeAfterThrow(1));
+assertEquals("start:1 resume:throw1 resume:throw2", log.join(" "));
+
+var O = {
+ async resumeAfterThrow(value) {
+ log.push("start:" + value);
+ try {
+ value = await rejectLater("throw1");
+ } catch (e) {
+ log.push("resume:" + e);
+ }
+ try {
+ value = await rejectLater("throw2");
+ } catch (e) {
+ log.push("resume:" + e);
+ }
+ return value + 1;
+ }
+}
+log = [];
+assertEqualsAsync(3, () => O.resumeAfterThrow(2));
+assertEquals("start:2 resume:throw1 resume:throw2", log.join(" "));
+
+var resumeAfterThrowArrow = async (value) => {
+ log.push("start:" + value);
+ try {
+ value = await rejectLater("throw1");
+ } catch (e) {
+ log.push("resume:" + e);
+ }
+ try {
+ value = await rejectLater("throw2");
+ } catch (e) {
+ log.push("resume:" + e);
+ }
+ return value + 1;
+};
+
+log = [];
+
+assertEqualsAsync(4, () => resumeAfterThrowArrow(3));
+assertEquals("start:3 resume:throw1 resume:throw2", log.join(" "));
+
+var resumeAfterThrowEval = AsyncFunction("value", `
+ log.push("start:" + value);
+ try {
+ value = await rejectLater("throw1");
+ } catch (e) {
+ log.push("resume:" + e);
+ }
+ try {
+ value = await rejectLater("throw2");
+ } catch (e) {
+ log.push("resume:" + e);
+ }
+ return value + 1;`);
+log = [];
+assertEqualsAsync(5, () => resumeAfterThrowEval(4));
+assertEquals("start:4 resume:throw1 resume:throw2", log.join(" "));
+
+var resumeAfterThrowNewEval = new AsyncFunction("value", `
+ log.push("start:" + value);
+ try {
+ value = await rejectLater("throw1");
+ } catch (e) {
+ log.push("resume:" + e);
+ }
+ try {
+ value = await rejectLater("throw2");
+ } catch (e) {
+ log.push("resume:" + e);
+ }
+ return value + 1;`);
+log = [];
+assertEqualsAsync(6, () => resumeAfterThrowNewEval(5));
+assertEquals("start:5 resume:throw1 resume:throw2", log.join(" "));
+
+async function foo() {}
+assertEquals("async function foo() {}", foo.toString());
+assertEquals("async function () {}", async function () {}.toString());
+assertEquals("async x => x", (async x => x).toString());
+assertEquals("async x => { return x }", (async x => { return x }).toString());
+class AsyncMethod { async foo() { } }
+assertEquals("async foo() { }",
+ Function.prototype.toString.call(AsyncMethod.prototype.foo));
+assertEquals("async foo() { }",
+ Function.prototype.toString.call({async foo() { }}.foo));
+
+// Async functions are not constructible
+assertThrows(() => class extends (async function() {}) {}, TypeError);
+
+// Regress v8:5148
+assertEqualsAsync("1", () => (async({ a = NaN }) => a)({ a: "1" }));
+assertEqualsAsync(
+ "10", () => (async(foo, { a = NaN }) => foo + a)("1", { a: "0" }));
+assertEqualsAsync("2", () => (async({ a = "2" }) => a)({ a: undefined }));
+assertEqualsAsync(
+ "20", () => (async(foo, { a = "0" }) => foo + a)("2", { a: undefined }));
+assertThrows(() => eval("async({ foo = 1 })"), SyntaxError);
+assertThrows(() => eval("async(a, { foo = 1 })"), SyntaxError);
+
+// https://bugs.chromium.org/p/chromium/issues/detail?id=638019
+async function gaga() {
+ let i = 1;
+ while (i-- > 0) { await 42 }
+}
+assertDoesNotThrow(gaga);
diff --git a/deps/v8/test/mjsunit/harmony/async-await-no-constructor.js b/deps/v8/test/mjsunit/harmony/async-await-no-constructor.js
new file mode 100644
index 0000000000..30020019a6
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/async-await-no-constructor.js
@@ -0,0 +1,27 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-async-await --allow-natives-syntax
+
+'use strict';
+
+var resolved = Promise.resolve();
+var count = 0;
+
+Object.defineProperty(Promise.prototype, 'constructor',
+ { get() { count++; return Promise; } })
+
+async function foo() {
+ await resolved;
+ return resolved;
+}
+
+async function bar() {
+ throw 1;
+}
+
+foo();
+bar();
+%RunMicrotasks();
+assertEquals(0, count);
diff --git a/deps/v8/test/mjsunit/harmony/async-await-resolve-new.js b/deps/v8/test/mjsunit/harmony/async-await-resolve-new.js
new file mode 100644
index 0000000000..0711c95873
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/async-await-resolve-new.js
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-async-await
+
+var resolved = Promise.resolve();
+
+assertTrue((async() => resolved)() !== resolved);
diff --git a/deps/v8/test/mjsunit/harmony/async-await-species.js b/deps/v8/test/mjsunit/harmony/async-await-species.js
new file mode 100644
index 0000000000..bc3db83fdf
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/async-await-species.js
@@ -0,0 +1,101 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-async-await --allow-natives-syntax
+
+function assertEqualsAsync(expected, run, msg) {
+ var actual;
+ var hadValue = false;
+ var hadError = false;
+ var promise = run();
+
+ if (typeof promise !== "object" || typeof promise.then !== "function") {
+ throw new MjsUnitAssertionError(
+ "Expected " + run.toString() +
+ " to return a Promise, but it returned " + PrettyPrint(promise));
+ }
+
+ promise.then(function(value) { hadValue = true; actual = value; },
+ function(error) { hadError = true; actual = error; });
+
+ assertFalse(hadValue || hadError);
+
+ %RunMicrotasks();
+
+ if (hadError) throw actual;
+
+ assertTrue(
+ hadValue, "Expected '" + run.toString() + "' to produce a value");
+
+ assertEquals(expected, actual, msg);
+};
+
+// Rename a function so that it can help omit things from stack trace.
+function test(fn) {
+ return Object.defineProperty(fn, "name", {
+ enumerable: false,
+ configurable: true,
+ value: "@" + fn.name,
+ writable: false
+ });
+}
+
+function getStack(error) {
+ var stack = error.stack.split('\n').
+ filter(function(line) {
+ return /^\s*at @?[a-zA-Z0-9_]/.test(line);
+ }).
+ map(line => line.replace(/^\s*at (@?[a-zA-Z0-9_\.\[\]]+)(.*)/, "$1"));
+
+ // remove `Promise.then()` invocation by assertEqualsAsync()
+ if (stack[2] === "assertEqualsAsync") return [];
+
+ return stack.reverse();
+}
+
+var log = [];
+class FakePromise extends Promise {
+ constructor(executor) {
+ var stack = getStack(new Error("Getting Callstack"));
+ if (stack.length) {
+ var first = -1;
+ for (var i = 0; i < stack.length; ++i) {
+ if (stack[i][0] === '@') {
+ first = i;
+ break;
+ }
+ }
+ while (first > 0) stack.shift(), --first;
+ if (stack.length) {
+ log.push("@@Species: [" + stack.join(" > ") + "]");
+ }
+ }
+ return new Promise(executor);
+ }
+};
+
+Object.defineProperty(Promise, Symbol.species, {
+ value: FakePromise,
+ configurable: true,
+ enumerable: false,
+ writable: false
+});
+
+// Internal `AsyncFunctionAwait` only --- no @@species invocations.
+async function asyncFn() { return await "foo"; }
+assertEqualsAsync("foo", test(function testInternalOnly() { return asyncFn(); },
+ "should not call Promise[@@Species]"));
+assertEquals([], log);
+
+log.length = 0;
+assertEqualsAsync(
+ "foo",
+ test(function testThenOnReturnedPromise() {
+ return asyncFn().then(x => (log.push("Then: " + x), x));
+ }),
+ "should call Promise[@@Species] after non-internal Then");
+assertEquals([
+ "@@Species: [@testThenOnReturnedPromise > Promise.then > FakePromise]",
+ "Then: foo"
+], log);
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-basic.js b/deps/v8/test/mjsunit/harmony/async-debug-basic.js
new file mode 100644
index 0000000000..a4909729c5
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/async-debug-basic.js
@@ -0,0 +1,40 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-async-await --allow-natives-syntax --expose-debug-as debug
+
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+listenerComplete = false;
+breakPointCount = 0;
+
+async function f() {
+ await (async function() { var a = "a"; await 1; debugger; })();
+
+ var b = "b";
+
+ assertTrue(listenerDone);
+ assertFalse(exception);
+ assertEquals(1, breakpointCount);
+}
+
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event != Debug.DebugEvent.Break) return;
+
+ breakpointCount++;
+ listenerDone = true;
+ assertEquals("a", exec_state.frame(0).evaluate("a"));
+ assertEquals("b", exec_state.frame(1).evaluate("b"));
+ assertEquals("c", exec_state.frame(2).evaluate("c"));
+ } catch (e) {
+ exception = e;
+ };
+};
+
+Debug.setListener(listener);
+
+var c = "c";
+f();
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-caught-exception.js b/deps/v8/test/mjsunit/harmony/async-debug-caught-exception.js
new file mode 100644
index 0000000000..b2ae18437d
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/async-debug-caught-exception.js
@@ -0,0 +1,89 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-async-await --expose-debug-as debug
+
+Debug = debug.Debug
+
+var exception = null;
+var log;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Exception) return;
+ try {
+ var line = exec_state.frame(0).sourceLineText();
+ var match = /Exception (\w)/.exec(line);
+ assertNotNull(match);
+ log.push(match[1]);
+ } catch (e) {
+ exception = e;
+ }
+}
+
+async function thrower() {
+ throw "a"; // Exception a
+}
+
+async function caught_throw() {
+ try {
+ await thrower();
+ } catch (e) {
+ assertEquals("a", e);
+ }
+}
+
+
+// Caught throw, events on any exception.
+log = [];
+Debug.setListener(listener);
+Debug.setBreakOnException();
+caught_throw();
+%RunMicrotasks();
+Debug.setListener(null);
+Debug.clearBreakOnException();
+assertEquals(["a"], log);
+assertNull(exception);
+
+// Caught throw, events on uncaught exception.
+log = [];
+Debug.setListener(listener);
+Debug.setBreakOnUncaughtException();
+caught_throw();
+%RunMicrotasks();
+Debug.setListener(null);
+Debug.clearBreakOnUncaughtException();
+assertEquals([], log);
+assertNull(exception);
+
+var reject = Promise.reject("b");
+
+async function caught_reject() {
+ try {
+ await reject;
+ } catch (e) {
+ assertEquals("b", e);
+ }
+}
+
+// Caught reject, events on any exception.
+log = [];
+Debug.setListener(listener);
+Debug.setBreakOnException();
+caught_reject();
+%RunMicrotasks();
+Debug.setListener(null);
+Debug.clearBreakOnException();
+assertEquals([], log);
+assertNull(exception);
+
+// Caught reject, events on uncaught exception.
+log = [];
+Debug.setListener(listener);
+Debug.setBreakOnUncaughtException();
+caught_reject();
+%RunMicrotasks();
+Debug.setListener(null);
+Debug.clearBreakOnUncaughtException();
+assertEquals([], log);
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-step-abort-at-break.js b/deps/v8/test/mjsunit/harmony/async-debug-step-abort-at-break.js
new file mode 100644
index 0000000000..be1f8056a8
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/async-debug-step-abort-at-break.js
@@ -0,0 +1,55 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax --harmony-async-await
+
+var Debug = debug.Debug;
+var step_count = 0;
+
+function listener(event, execState, eventData, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ var line = execState.frame(0).sourceLineText();
+ print(line);
+ var [match, expected_count, step] = /\/\/ B(\d) (\w+)$/.exec(line);
+ assertEquals(step_count++, parseInt(expected_count));
+ if (step != "Continue") execState.prepareStep(Debug.StepAction[step]);
+ } catch (e) {
+ print(e, e.stack);
+ quit(1);
+ }
+}
+
+Debug.setListener(listener);
+
+var late_resolve;
+
+function g() {
+ return new Promise( // B3 StepOut
+ function(res, rej) {
+ late_resolve = res;
+ }
+ );
+}
+
+async function f() {
+ var a = 1;
+ debugger; // B0 StepNext
+ a += // B1 StepNext
+ await // B4 StepNext
+ g(); // B2 StepIn
+ return a;
+}
+
+f();
+
+// Starting a new step action at an intermediate break point
+// means that we will abort the current async step.
+debugger; // B5 StepNext
+
+late_resolve(3); // B6 Continue
+
+%RunMicrotasks();
+
+assertEquals(7, step_count);
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-step-continue-at-break.js b/deps/v8/test/mjsunit/harmony/async-debug-step-continue-at-break.js
new file mode 100644
index 0000000000..5099b2f53e
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/async-debug-step-continue-at-break.js
@@ -0,0 +1,55 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax --harmony-async-await
+
+var Debug = debug.Debug;
+var step_count = 0;
+
+function listener(event, execState, eventData, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ var line = execState.frame(0).sourceLineText();
+ print(line);
+ var [match, expected_count, step] = /\/\/ B(\d) (\w+)$/.exec(line);
+ assertEquals(step_count++, parseInt(expected_count));
+ if (step != "Continue") execState.prepareStep(Debug.StepAction[step]);
+ } catch (e) {
+ print(e, e.stack);
+ quit(1);
+ }
+}
+
+Debug.setListener(listener);
+
+var late_resolve;
+
+function g() {
+ return new Promise( // B3 StepOut
+ function(res, rej) {
+ late_resolve = res;
+ }
+ );
+}
+
+async function f() {
+ var a = 1;
+ debugger; // B0 StepNext
+ a += // B1 StepNext
+ await // B4 StepNext
+ g(); // B2 StepIn
+ return a; // B6 StepNext
+} // B7 Continue
+
+f();
+
+// Continuing at an intermediate break point means that we will
+// carry on with the current async step.
+debugger; // B5 Continue
+
+late_resolve(3);
+
+%RunMicrotasks();
+
+assertEquals(8, step_count);
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-step-in-and-out.js b/deps/v8/test/mjsunit/harmony/async-debug-step-in-and-out.js
new file mode 100644
index 0000000000..30fe2d6053
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/async-debug-step-in-and-out.js
@@ -0,0 +1,51 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax --harmony-async-await
+
+var Debug = debug.Debug;
+var step_count = 0;
+
+function listener(event, execState, eventData, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ var line = execState.frame(0).sourceLineText();
+ print(line);
+ var [match, expected_count, step] = /\/\/ B(\d) (\w+)$/.exec(line);
+ assertEquals(step_count++, parseInt(expected_count));
+ if (step != "Continue") execState.prepareStep(Debug.StepAction[step]);
+ } catch (e) {
+ print(e, e.stack);
+ quit(1);
+ }
+}
+
+Debug.setListener(listener);
+
+var late_resolve;
+
+function g() {
+ return new Promise( // B3 StepOut
+ function(res, rej) {
+ late_resolve = res;
+ }
+ );
+}
+
+async function f() {
+ var a = 1;
+ debugger; // B0 StepNext
+ a += // B1 StepNext
+ await // B4 StepNext
+ g(); // B2 StepIn
+ return a; // B5 StepNext
+} // B6 Continue
+
+f();
+
+late_resolve(3);
+
+%RunMicrotasks();
+
+assertEquals(7, step_count);
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-step-in-out-out.js b/deps/v8/test/mjsunit/harmony/async-debug-step-in-out-out.js
new file mode 100644
index 0000000000..c2f34bb029
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/async-debug-step-in-out-out.js
@@ -0,0 +1,51 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax --harmony-async-await
+
+var Debug = debug.Debug;
+var step_count = 0;
+
+function listener(event, execState, eventData, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ var line = execState.frame(0).sourceLineText();
+ print(line);
+ var [match, expected_count, step] = /\/\/ B(\d) (\w+)$/.exec(line);
+ assertEquals(step_count++, parseInt(expected_count));
+ if (step != "Continue") execState.prepareStep(Debug.StepAction[step]);
+ } catch (e) {
+ print(e, e.stack);
+ quit(1);
+ }
+}
+
+Debug.setListener(listener);
+
+var late_resolve;
+
+function g() {
+ return new Promise( // B3 StepOut
+ function(res, rej) {
+ late_resolve = res;
+ }
+ );
+}
+
+async function f() {
+ var a = 1;
+ debugger; // B0 StepNext
+ a += // B1 StepNext
+ await // B4 StepOut
+ g(); // B2 StepIn
+ return a;
+}
+
+f();
+
+late_resolve(3); // B5 Continue
+
+%RunMicrotasks();
+
+assertEquals(6, step_count);
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-step-in.js b/deps/v8/test/mjsunit/harmony/async-debug-step-in.js
new file mode 100644
index 0000000000..0a7de1a2a3
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/async-debug-step-in.js
@@ -0,0 +1,51 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax --harmony-async-await
+
+var Debug = debug.Debug;
+var step_count = 0;
+
+function listener(event, execState, eventData, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ var line = execState.frame(0).sourceLineText();
+ print(line);
+ var [match, expected_count, step] = /\/\/ B(\d) (\w+)$/.exec(line);
+ assertEquals(step_count++, parseInt(expected_count));
+ if (step != "Continue") execState.prepareStep(Debug.StepAction[step]);
+ } catch (e) {
+ print(e, e.stack);
+ quit(1);
+ }
+}
+
+Debug.setListener(listener);
+
+var late_resolve;
+
+function g() {
+ return new Promise( // B3 StepIn
+ function(res, rej) {
+ late_resolve = res; // B4 StepIn
+ } // B5 StepIn
+ );
+} // B6 StepIn
+
+async function f() {
+ var a = 1;
+ debugger; // B0 StepNext
+ a += // B1 StepIn
+ await // B7 StepIn
+ g(); // B2 StepIn
+ return a; // B8 StepIn
+} // B9 Continue
+
+f().then(value => assertEquals(4, value));
+
+late_resolve(3);
+
+%RunMicrotasks();
+
+assertEquals(10, step_count);
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-step-nested.js b/deps/v8/test/mjsunit/harmony/async-debug-step-nested.js
new file mode 100644
index 0000000000..adf7a51432
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/async-debug-step-nested.js
@@ -0,0 +1,58 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax --harmony-async-await
+
+var Debug = debug.Debug;
+var step_count = 0;
+
+function listener(event, execState, eventData, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ var line = execState.frame(0).sourceLineText();
+ print(line);
+ var [match, expected_count, step] = /\/\/ B(\d) (\w+)$/.exec(line);
+ assertEquals(step_count++, parseInt(expected_count));
+ if (step != "Continue") execState.prepareStep(Debug.StepAction[step]);
+ } catch (e) {
+ print(e, e.stack);
+ quit(1);
+ }
+}
+
+Debug.setListener(listener);
+
+var late_resolve;
+
+function g() {
+ return new Promise( // B4 StepOut
+ function(res, rej) {
+ late_resolve = res;
+ }
+ );
+}
+
+async function f1() {
+ var a = 1;
+ debugger; // B0 StepNext
+ a += // B1 StepNext
+ await // B6 StepNext
+ f2(); // B2 StepIn
+ return a; // B7 StepNext
+} // B8 Continue
+
+async function f2() {
+ var b =
+ await // B5 StepOut
+ g(); // B3 StepIn
+ return b;
+}
+
+f1();
+
+late_resolve(3);
+
+%RunMicrotasks();
+
+assertEquals(9, step_count);
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-step-next-constant.js b/deps/v8/test/mjsunit/harmony/async-debug-step-next-constant.js
new file mode 100644
index 0000000000..cea86d7a2f
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/async-debug-step-next-constant.js
@@ -0,0 +1,39 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax --harmony-async-await
+
+var Debug = debug.Debug;
+var step_count = 0;
+
+function listener(event, execState, eventData, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ var line = execState.frame(0).sourceLineText();
+ print(line);
+ var [match, expected_count, step] = /\/\/ B(\d) (\w+)$/.exec(line);
+ assertEquals(step_count++, parseInt(expected_count));
+ if (step != "Continue") execState.prepareStep(Debug.StepAction[step]);
+ } catch (e) {
+ print(e, e.stack);
+ quit(1);
+ }
+}
+
+Debug.setListener(listener);
+
+async function f() {
+ var a = 1;
+ debugger; // B0 StepNext
+ a += // B1 StepNext
+ await // B3 StepNext
+ 5; // B2 StepNext
+ return a; // B4 StepNext
+} // B5 Continue
+
+f();
+
+%RunMicrotasks();
+
+assertEquals(6, step_count);
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-step-next.js b/deps/v8/test/mjsunit/harmony/async-debug-step-next.js
new file mode 100644
index 0000000000..952d88dd85
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/async-debug-step-next.js
@@ -0,0 +1,51 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax --harmony-async-await
+
+var Debug = debug.Debug;
+var step_count = 0;
+
+function listener(event, execState, eventData, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ var line = execState.frame(0).sourceLineText();
+ print(line);
+ var [match, expected_count, step] = /\/\/ B(\d) (\w+)$/.exec(line);
+ assertEquals(step_count++, parseInt(expected_count));
+ if (step != "Continue") execState.prepareStep(Debug.StepAction[step]);
+ } catch (e) {
+ print(e, e.stack);
+ quit(1);
+ }
+}
+
+Debug.setListener(listener);
+
+var late_resolve;
+
+function g() {
+ return new Promise(
+ function(res, rej) {
+ late_resolve = res;
+ }
+ );
+}
+
+async function f() {
+ var a = 1;
+ debugger; // B0 StepNext
+ a += // B1 StepNext
+ await // B3 StepNext
+ g(); // B2 StepNext
+ return a; // B4 StepNext
+} // B5 Continue
+
+f();
+
+late_resolve(3);
+
+%RunMicrotasks();
+
+assertEquals(6, step_count);
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-step-out.js b/deps/v8/test/mjsunit/harmony/async-debug-step-out.js
new file mode 100644
index 0000000000..41779acb54
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/async-debug-step-out.js
@@ -0,0 +1,49 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax --harmony-async-await
+
+var Debug = debug.Debug;
+var step_count = 0;
+
+function listener(event, execState, eventData, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ var line = execState.frame(0).sourceLineText();
+ print(line);
+ var [match, expected_count, step] = /\/\/ B(\d) (\w+)$/.exec(line);
+ assertEquals(step_count++, parseInt(expected_count));
+ if (step != "Continue") execState.prepareStep(Debug.StepAction[step]);
+ } catch (e) {
+ print(e, e.stack);
+ quit(1);
+ }
+}
+
+Debug.setListener(listener);
+
+var late_resolve;
+
+function g() {
+ return new Promise(
+ function(res, rej) {
+ late_resolve = res;
+ }
+ );
+}
+
+async function f() {
+ var a = 1;
+ debugger; // B0 StepNext
+ a += await g(); // B1 StepOut
+ return a;
+}
+
+f();
+
+late_resolve(3); // B2 Continue
+
+%RunMicrotasks();
+
+assertEquals(3, step_count);
diff --git a/deps/v8/test/mjsunit/harmony/async-destructuring.js b/deps/v8/test/mjsunit/harmony/async-destructuring.js
new file mode 100644
index 0000000000..95dbc18c7b
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/async-destructuring.js
@@ -0,0 +1,515 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-async-await --allow-natives-syntax
+
+function assertThrowsAsync(run, errorType, message) {
+ var actual;
+ var hadValue = false;
+ var hadError = false;
+ var promise = run();
+
+ if (typeof promise !== "object" || typeof promise.then !== "function") {
+ throw new MjsUnitAssertionError(
+ "Expected " + run.toString() +
+ " to return a Promise, but it returned " + PrettyPrint(promise));
+ }
+
+ promise.then(function(value) { hadValue = true; actual = value; },
+ function(error) { hadError = true; actual = error; });
+
+ assertFalse(hadValue || hadError);
+
+ %RunMicrotasks();
+
+ if (!hadError) {
+ throw new MjsUnitAssertionError(
+ "Expected " + run + "() to throw " + errorType.name +
+ ", but did not throw.");
+ }
+ if (!(actual instanceof errorType))
+ throw new MjsUnitAssertionError(
+ "Expected " + run + "() to throw " + errorType.name +
+ ", but threw '" + actual + "'");
+ if (message !== void 0 && actual.message !== message)
+ throw new MjsUnitAssertionError(
+ "Expected " + run + "() to throw '" + message + "', but threw '" +
+ actual.message + "'");
+};
+
+function assertEqualsAsync(expected, run, msg) {
+ var actual;
+ var hadValue = false;
+ var hadError = false;
+ var promise = run();
+
+ if (typeof promise !== "object" || typeof promise.then !== "function") {
+ throw new MjsUnitAssertionError(
+ "Expected " + run.toString() +
+ " to return a Promise, but it returned " + PrettyPrint(promise));
+ }
+
+ promise.then(function(value) { hadValue = true; actual = value; },
+ function(error) { hadError = true; actual = error; });
+
+ assertFalse(hadValue || hadError);
+
+ %RunMicrotasks();
+
+ if (hadError) throw actual;
+
+ assertTrue(
+ hadValue, "Expected '" + run.toString() + "' to produce a value");
+
+ assertEquals(expected, actual, msg);
+};
+
+(function TestDefaultEvaluationOrder() {
+ var y = 0;
+ var z = 0;
+ var w = 0;
+ async function f1(x = (y = 1)) { z = 1; await undefined; w = 1; };
+ assertEquals(0, y);
+ assertEquals(0, z);
+ assertEquals(0, w);
+ f1();
+ assertEquals(1, y);
+ assertEquals(1, z);
+ assertEquals(0, w);
+ %RunMicrotasks();
+ assertEquals(1, y);
+ assertEquals(1, z);
+ assertEquals(1, w);
+})();
+
+(function TestShadowingOfParameters() {
+ async function f1({x}) { var x = 2; return x }
+ assertEqualsAsync(2, () => f1({x: 1}));
+ async function f2({x}) { { var x = 2; } return x; }
+ assertEqualsAsync(2, () => f2({x: 1}));
+ async function f3({x}) { var y = x; var x = 2; return y; }
+ assertEqualsAsync(1, () => f3({x: 1}));
+ async function f4({x}) { { var y = x; var x = 2; } return y; }
+ assertEqualsAsync(1, () => f4({x: 1}));
+ async function f5({x}, g = () => x) { var x = 2; return g(); }
+ assertEqualsAsync(1, () => f5({x: 1}));
+ async function f6({x}, g = () => x) { { var x = 2; } return g(); }
+ assertEqualsAsync(1, () => f6({x: 1}));
+ async function f7({x}) { var g = () => x; var x = 2; return g(); }
+ assertEqualsAsync(2, () => f7({x: 1}));
+ async function f8({x}) { { var g = () => x; var x = 2; } return g(); }
+ assertEqualsAsync(2, () => f8({x: 1}));
+ async function f9({x}, g = () => eval("x")) { var x = 2; return g(); }
+ assertEqualsAsync(1, () => f9({x: 1}));
+
+ async function f10({x}, y) { var y; return y }
+ assertEqualsAsync(2, () => f10({x: 6}, 2));
+ async function f11({x}, y) { var z = y; var y = 2; return z; }
+ assertEqualsAsync(1, () => f11({x: 6}, 1));
+ async function f12(y, g = () => y) { var y = 2; return g(); }
+ assertEqualsAsync(1, () => f12(1));
+ async function f13({x}, y, [z], v) { var x, y, z; return x*y*z*v }
+ assertEqualsAsync(210, () => f13({x: 2}, 3, [5], 7));
+
+ async function f20({x}) { function x() { return 2 }; return x(); }
+ assertEqualsAsync(2, () => f20({x: 1}));
+ // Annex B 3.3 function hoisting is blocked by the conflicting x declaration
+ async function f21({x}) { { function x() { return 2 } } return x; }
+ assertEqualsAsync(1, () => f21({x: 1}));
+
+ var g1 = async ({x}) => { var x = 2; return x };
+ assertEqualsAsync(2, () => g1({x: 1}));
+ var g2 = async ({x}) => { { var x = 2; } return x; };
+ assertEqualsAsync(2, () => g2({x: 1}));
+ var g3 = async ({x}) => { var y = x; var x = 2; return y; };
+ assertEqualsAsync(1, () => g3({x: 1}));
+ var g4 = async ({x}) => { { var y = x; var x = 2; } return y; };
+ assertEqualsAsync(1, () => g4({x: 1}));
+ var g5 = async ({x}, g = () => x) => { var x = 2; return g(); };
+ assertEqualsAsync(1, () => g5({x: 1}));
+ var g6 = async ({x}, g = () => x) => { { var x = 2; } return g(); };
+ assertEqualsAsync(1, () => g6({x: 1}));
+ var g7 = async ({x}) => { var g = () => x; var x = 2; return g(); };
+ assertEqualsAsync(2, () => g7({x: 1}));
+ var g8 = async ({x}) => { { var g = () => x; var x = 2; } return g(); };
+ assertEqualsAsync(2, () => g8({x: 1}));
+ var g9 = async ({x}, g = () => eval("x")) => { var x = 2; return g(); };
+ assertEqualsAsync(1, () => g9({x: 1}));
+
+ var g10 = async ({x}, y) => { var y; return y };
+ assertEqualsAsync(2, () => g10({x: 6}, 2));
+ var g11 = async ({x}, y) => { var z = y; var y = 2; return z; };
+ assertEqualsAsync(1, () => g11({x: 6}, 1));
+ var g12 = async (y, g = () => y) => { var y = 2; return g(); };
+ assertEqualsAsync(1, () => g12(1));
+ var g13 = async ({x}, y, [z], v) => { var x, y, z; return x*y*z*v };
+ assertEqualsAsync(210, () => g13({x: 2}, 3, [5], 7));
+
+ var g20 = async ({x}) => { function x() { return 2 }; return x(); }
+ assertEqualsAsync(2, () => g20({x: 1}));
+ var g21 = async ({x}) => { { function x() { return 2 } } return x(); }
+ assertThrowsAsync(() => g21({x: 1}), TypeError);
+
+ assertThrows("'use strict'; async function f(x) { let x = 0; }", SyntaxError);
+ assertThrows("'use strict'; async function f({x}) { let x = 0; }", SyntaxError);
+ assertThrows("'use strict'; async function f(x) { const x = 0; }", SyntaxError);
+ assertThrows("'use strict'; async function f({x}) { const x = 0; }", SyntaxError);
+
+ assertThrows("'use strict'; let g = async (x) => { let x = 0; }", SyntaxError);
+ assertThrows("'use strict'; let g = async ({x}) => { let x = 0; }", SyntaxError);
+ assertThrows("'use strict'; let g = async (x) => { const x = 0; }", SyntaxError);
+ assertThrows("'use strict'; let g = async ({x}) => { const x = 0; }", SyntaxError);
+}());
+
+(function TestDefaults() {
+ async function f1(x = 1) { return x }
+ assertEqualsAsync(1, () => f1());
+ assertEqualsAsync(1, () => f1(undefined));
+ assertEqualsAsync(2, () => f1(2));
+ assertEqualsAsync(null, () => f1(null));
+
+ async function f2(x, y = x) { return x + y; }
+ assertEqualsAsync(8, () => f2(4));
+ assertEqualsAsync(8, () => f2(4, undefined));
+ assertEqualsAsync(6, () => f2(4, 2));
+
+ async function f3(x = 1, y) { return x + y; }
+ assertEqualsAsync(8, () => f3(5, 3));
+ assertEqualsAsync(3, () => f3(undefined, 2));
+ assertEqualsAsync(6, () => f3(4, 2));
+
+ async function f4(x = () => 1) { return x() }
+ assertEqualsAsync(1, () => f4());
+ assertEqualsAsync(1, () => f4(undefined));
+ assertEqualsAsync(2, () => f4(() => 2));
+ assertThrowsAsync(() => f4(null), TypeError);
+
+ async function f5(x, y = () => x) { return x + y(); }
+ assertEqualsAsync(8, () => f5(4));
+ assertEqualsAsync(8, () => f5(4, undefined));
+ assertEqualsAsync(6, () => f5(4, () => 2));
+
+ async function f6(x = {a: 1, m() { return 2 }}) { return x.a + x.m(); }
+ assertEqualsAsync(3, () => f6());
+ assertEqualsAsync(3, () => f6(undefined));
+ assertEqualsAsync(5, () => f6({a: 2, m() { return 3 }}));
+
+ var g1 = async (x = 1) => { return x };
+ assertEqualsAsync(1, () => g1());
+ assertEqualsAsync(1, () => g1(undefined));
+ assertEqualsAsync(2, () => g1(2));
+ assertEqualsAsync(null, () => g1(null));
+
+ var g2 = async (x, y = x) => { return x + y; };
+ assertEqualsAsync(8, () => g2(4));
+ assertEqualsAsync(8, () => g2(4, undefined));
+ assertEqualsAsync(6, () => g2(4, 2));
+
+ var g3 = async (x = 1, y) => { return x + y; };
+ assertEqualsAsync(8, () => g3(5, 3));
+ assertEqualsAsync(3, () => g3(undefined, 2));
+ assertEqualsAsync(6, () => g3(4, 2));
+
+ var g4 = async (x = () => 1) => { return x() };
+ assertEqualsAsync(1, () => g4());
+ assertEqualsAsync(1, () => g4(undefined));
+ assertEqualsAsync(2, () => g4(() => 2));
+ assertThrowsAsync(() => g4(null), TypeError);
+
+ var g5 = async (x, y = () => x) => { return x + y(); };
+ assertEqualsAsync(8, () => g5(4));
+ assertEqualsAsync(8, () => g5(4, undefined));
+ assertEqualsAsync(6, () => g5(4, () => 2));
+
+ var g6 = async (x = {a: 1, m() { return 2 }}) => { return x.a + x.m(); };
+ assertEqualsAsync(3, () => g6());
+ assertEqualsAsync(3, () => g6(undefined));
+ assertEqualsAsync(5, () => g6({a: 2, m() { return 3 }}));
+}());
+
+
+(function TestEvalInParameters() {
+ async function f1(x = eval(0)) { return x }
+ assertEqualsAsync(0, f1);
+ async function f2(x = () => eval(1)) { return x() }
+ assertEqualsAsync(1, f2);
+})();
+
+
+(function TestParameterScopingSloppy() {
+ var x = 1;
+
+ async function f1(a = x) { var x = 2; return a; }
+ assertEqualsAsync(1, f1);
+ async function f2(a = x) { function x() {}; return a; }
+ assertEqualsAsync(1, () => f2());
+ async function f3(a = eval("x")) { var x; return a; }
+ assertEqualsAsync(1, () => f3());
+ async function f31(a = eval("'use strict'; x")) { var x; return a; }
+ assertEqualsAsync(1, () => f31());
+ async function f4(a = function() { return x }) { var x; return a(); }
+ assertEqualsAsync(1, () => f4());
+ async function f5(a = () => x) { var x; return a(); }
+ assertEqualsAsync(1, () => f5());
+ async function f6(a = () => eval("x")) { var x; return a(); }
+ assertEqualsAsync(1, () => f6());
+ async function f61(a = () => { 'use strict'; return eval("x") }) { var x; return a(); }
+ assertEqualsAsync(1, () => f61());
+ async function f62(a = () => eval("'use strict'; x")) { var x; return a(); }
+ assertEqualsAsync(1, () => f62());
+
+ var g1 = async (a = x) => { var x = 2; return a; };
+ assertEqualsAsync(1, () => g1());
+ var g2 = async (a = x) => { function x() {}; return a; };
+ assertEqualsAsync(1, () => g2());
+ var g3 = async (a = eval("x")) => { var x; return a; };
+ assertEqualsAsync(1, g3);
+ var g31 = async (a = eval("'use strict'; x")) => { var x; return a; };
+ assertEqualsAsync(1, () => g31());
+ var g4 = async (a = function() { return x }) => { var x; return a(); };
+ assertEqualsAsync(1, () => g4());
+ var g5 = async (a = () => x) => { var x; return a(); };
+ assertEqualsAsync(1, () => g5());
+ var g6 = async (a = () => eval("x")) => { var x; return a(); };
+ assertEqualsAsync(1, () => g6());
+ var g61 = async (a = () => { 'use strict'; return eval("x") }) => { var x; return a(); };
+ assertEqualsAsync(1, () => g61());
+ var g62 = async (a = () => eval("'use strict'; x")) => { var x; return a(); };
+ assertEqualsAsync(1, () => g62());
+
+ var f11 = async function f(x = f) { var f; return x; }
+ assertEqualsAsync(f11, f11);
+ var f12 = async function f(x = f) { function f() {}; return x; }
+ assertEqualsAsync(f12, f12);
+ var f13 = async function f(f = 7, x = f) { return x; }
+ assertEqualsAsync(7, f13);
+
+ var o1 = {f: async function(x = this) { return x; }};
+ assertEqualsAsync(o1, () => o1.f());
+ assertEqualsAsync(1, () => o1.f(1));
+})();
+
+(function TestParameterScopingStrict() {
+ "use strict";
+ var x = 1;
+
+ async function f1(a = x) { let x = 2; return a; }
+ assertEqualsAsync(1, () => f1());
+ async function f2(a = x) { const x = 2; return a; }
+ assertEqualsAsync(1, () => f2());
+ async function f3(a = x) { function x() {}; return a; }
+ assertEqualsAsync(1, () => f3());
+ async function f4(a = eval("x")) { var x; return a; }
+ assertEqualsAsync(1, () => f4());
+ async function f5(a = () => eval("x")) { var x; return a(); }
+ assertEqualsAsync(1, () => f5());
+
+ var g1 = async (a = x) => { let x = 2; return a; };
+ assertEqualsAsync(1, () => g1());
+ var g2 = async (a = x) => { const x = 2; return a; };
+ assertEqualsAsync(1, () => g2());
+ var g3 = async (a = x) => { function x() {}; return a; };
+ assertEqualsAsync(1, () => g3());
+ var g4 = async (a = eval("x")) => { var x; return a; };
+ assertEqualsAsync(1, () => g4());
+ var g5 = async (a = () => eval("x")) => { var x; return a(); };
+ assertEqualsAsync(1, () => g5());
+
+ var f11 = async function f(x = f) { let f; return x; }
+ assertEqualsAsync(f11, f11);
+ var f12 = async function f(x = f) { const f = 0; return x; }
+ assertEqualsAsync(f12, f12);
+ var f13 = async function f(x = f) { function f() {}; return x; }
+ assertEqualsAsync(f13, f13);
+})();
+
+(function TestSloppyEvalScoping() {
+ var x = 1;
+
+ async function f1(y = eval("var x = 2")) { with ({}) { return x; } }
+ assertEqualsAsync(1, () => f1());
+ async function f2(y = eval("var x = 2"), z = x) { return z; }
+ assertEqualsAsync(1, () => f2());
+ assertEqualsAsync(1, () => f2(0));
+ async function f3(y = eval("var x = 2"), z = eval("x")) { return z; }
+ assertEqualsAsync(1, () => f3());
+ assertEqualsAsync(1, () => f3(0));
+ async function f8(y = (eval("var x = 2"), x)) { return y; }
+ assertEqualsAsync(2, () => f8());
+ assertEqualsAsync(0, () => f8(0));
+
+ async function f11(z = eval("var y = 2")) { return y; }
+ assertThrowsAsync(f11, ReferenceError);
+ async function f12(z = eval("var y = 2"), b = y) {}
+ assertThrowsAsync(f12, ReferenceError);
+ async function f13(z = eval("var y = 2"), b = eval("y")) {}
+ assertThrowsAsync(f13, ReferenceError);
+
+ async function f21(f = () => x) { eval("var x = 2"); return f() }
+ assertEqualsAsync(1, () => f21());
+ assertEqualsAsync(3, () => f21(() => 3));
+ async function f22(f = () => eval("x")) { eval("var x = 2"); return f() }
+ assertEqualsAsync(1, () => f22());
+ assertEqualsAsync(3, () => f22(() => 3));
+
+ var g1 = async (y = eval("var x = 2")) => { with ({}) { return x; } };
+ assertEqualsAsync(1, () => g1());
+ var g2 = async (y = eval("var x = 2"), z = x) => { return z; };
+ assertEqualsAsync(1, () => g2());
+ assertEqualsAsync(1, () => g2(0));
+ var g3 = async (y = eval("var x = 2"), z = eval("x")) => { return z; };
+ assertEqualsAsync(1, () => g3());
+ assertEqualsAsync(1, () => g3(0));
+ var g8 = async (y = (eval("var x = 2"), x)) => { return y; };
+ assertEqualsAsync(2, () => g8());
+ assertEqualsAsync(0, () => g8(0));
+
+ var g11 = async (z = eval("var y = 2")) => { return y; };
+ assertThrowsAsync(g11, ReferenceError);
+ var g12 = async (z = eval("var y = 2"), b = y) => {};
+ assertThrowsAsync(g12, ReferenceError);
+ var g13 = async (z = eval("var y = 2"), b = eval("y")) => {};
+ assertThrowsAsync(g13, ReferenceError);
+
+ var g21 = async (f = () => x) => { eval("var x = 2"); return f() };
+ assertEqualsAsync(1, () => g21());
+ assertEqualsAsync(3, () => g21(() => 3));
+ var g22 = async (f = () => eval("x")) => { eval("var x = 2"); return f() };
+ assertEqualsAsync(1, () => g22());
+ assertEqualsAsync(3, () => g22(() => 3));
+})();
+
+
+(function TestStrictEvalScoping() {
+ 'use strict';
+ var x = 1;
+
+ async function f1(y = eval("var x = 2")) { return x; }
+ assertEqualsAsync(1, () => f1());
+ async function f2(y = eval("var x = 2"), z = x) { return z; }
+ assertEqualsAsync(1, () => f2());
+ assertEqualsAsync(1, () => f2(0));
+ async function f3(y = eval("var x = 2"), z = eval("x")) { return z; }
+ assertEqualsAsync(1, () => f3());
+ assertEqualsAsync(1, () => f3(0));
+ async function f8(y = (eval("var x = 2"), x)) { return y; }
+ assertEqualsAsync(1, () => f8());
+ assertEqualsAsync(0, () => f8(0));
+
+ async function f11(z = eval("var y = 2")) { return y; }
+ assertThrowsAsync(f11, ReferenceError);
+ async function f12(z = eval("var y = 2"), b = y) {}
+ assertThrowsAsync(f12, ReferenceError);
+ async function f13(z = eval("var y = 2"), b = eval("y")) {}
+ assertThrowsAsync(f13, ReferenceError);
+
+ async function f21(f = () => x) { eval("var x = 2"); return f() }
+ assertEqualsAsync(1, () => f21());
+ assertEqualsAsync(3, () => f21(() => 3));
+ async function f22(f = () => eval("x")) { eval("var x = 2"); return f() }
+ assertEqualsAsync(1, () => f22());
+ assertEqualsAsync(3, () => f22(() => 3));
+})();
+
+(function TestParameterTDZSloppy() {
+ async function f1(a = x, x) { return a }
+ assertThrowsAsync(() => f1(undefined, 4), ReferenceError);
+ assertEqualsAsync(4, () => f1(4, 5));
+ async function f2(a = eval("x"), x) { return a }
+ assertThrowsAsync(() => f2(undefined, 4), ReferenceError);
+ assertEqualsAsync(4, () => f2(4, 5));
+ async function f3(a = eval("'use strict'; x"), x) { return a }
+ assertThrowsAsync(() => f3(undefined, 4), ReferenceError);
+ assertEqualsAsync(4, () => f3(4, 5));
+ async function f4(a = () => x, x) { return a() }
+ assertEqualsAsync(4, () => f4(() => 4, 5));
+ async function f5(a = () => eval("x"), x) { return a() }
+ assertEqualsAsync(4, () => f5(() => 4, 5));
+ async function f6(a = () => eval("'use strict'; x"), x) { return a() }
+ assertEqualsAsync(4, () => f6(() => 4, 5));
+
+ async function f11(a = x, x = 2) { return a }
+ assertThrowsAsync(() => f11(), ReferenceError);
+ assertThrowsAsync(() => f11(undefined), ReferenceError);
+ assertThrowsAsync(() => f11(undefined, 4), ReferenceError);
+ assertEqualsAsync(4, () => f1(4, 5));
+ async function f12(a = eval("x"), x = 2) { return a }
+ assertThrowsAsync(() => f12(), ReferenceError);
+ assertThrowsAsync(() => f12(undefined), ReferenceError);
+ assertThrowsAsync(() => f12(undefined, 4), ReferenceError);
+ assertEqualsAsync(4, () => f12(4, 5));
+ async function f13(a = eval("'use strict'; x"), x = 2) { return a }
+ assertThrowsAsync(() => f13(), ReferenceError);
+ assertThrowsAsync(() => f13(undefined), ReferenceError);
+ assertThrowsAsync(() => f13(undefined, 4), ReferenceError);
+ assertEqualsAsync(4, () => f13(4, 5));
+
+ async function f21(x = function() { return a }, ...a) { return x()[0] }
+ assertEqualsAsync(4, () => f21(undefined, 4));
+ async function f22(x = () => a, ...a) { return x()[0] }
+ assertEqualsAsync(4, () => f22(undefined, 4));
+ async function f23(x = () => eval("a"), ...a) { return x()[0] }
+ assertEqualsAsync(4, () => f23(undefined, 4));
+ async function f24(x = () => {'use strict'; return eval("a") }, ...a) {
+ return x()[0]
+ }
+ assertEqualsAsync(4, () => f24(undefined, 4));
+ async function f25(x = () => eval("'use strict'; a"), ...a) { return x()[0] }
+ assertEqualsAsync(4, () => f25(undefined, 4));
+
+ var g1 = async (x = function() { return a }, ...a) => { return x()[0] };
+ assertEqualsAsync(4, () => g1(undefined, 4));
+ var g2 = async (x = () => a, ...a) => { return x()[0] };
+ assertEqualsAsync(4, () => g2(undefined, 4));
+})();
+
+(function TestParameterTDZStrict() {
+ "use strict";
+
+ async function f1(a = eval("x"), x) { return a }
+ assertThrowsAsync(() => f1(undefined, 4), ReferenceError);
+ assertEqualsAsync(4, () => f1(4, 5));
+ async function f2(a = () => eval("x"), x) { return a() }
+ assertEqualsAsync(4, () => f2(() => 4, 5));
+
+ async function f11(a = eval("x"), x = 2) { return a }
+ assertThrowsAsync(() => f11(), ReferenceError);
+ assertThrowsAsync(() => f11(undefined), ReferenceError);
+ assertThrowsAsync(() => f11(undefined, 4), ReferenceError);
+ assertEqualsAsync(4, () => f11(4, 5));
+
+ async function f21(x = () => eval("a"), ...a) { return x()[0] }
+ assertEqualsAsync(4, () => f21(undefined, 4));
+})();
+
+(function TestArgumentsForNonSimpleParameters() {
+ async function f1(x = 900) { arguments[0] = 1; return x }
+ assertEqualsAsync(9, () => f1(9));
+ assertEqualsAsync(900, () => f1());
+ async function f2(x = 1001) { x = 2; return arguments[0] }
+ assertEqualsAsync(10, () => f2(10));
+ assertEqualsAsync(undefined, () => f2());
+}());
+
+
+(function TestFunctionLength() {
+ assertEquals(0, (async function(x = 1) {}).length);
+ assertEquals(0, (async function(x = 1, ...a) {}).length);
+ assertEquals(1, (async function(x, y = 1) {}).length);
+ assertEquals(1, (async function(x, y = 1, ...a) {}).length);
+ assertEquals(2, (async function(x, y, z = 1) {}).length);
+ assertEquals(2, (async function(x, y, z = 1, ...a) {}).length);
+ assertEquals(1, (async function(x, y = 1, z) {}).length);
+ assertEquals(1, (async function(x, y = 1, z, ...a) {}).length);
+ assertEquals(1, (async function(x, y = 1, z, v = 2) {}).length);
+ assertEquals(1, (async function(x, y = 1, z, v = 2, ...a) {}).length);
+})();
+
+(function TestDirectiveThrows() {
+ "use strict";
+
+ assertThrows("(async function(x=1){'use strict';})", SyntaxError);
+ assertThrows("(async function(a, x=1){'use strict';})", SyntaxError);
+ assertThrows("(async function({x}){'use strict';})", SyntaxError);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/async-function-debug-evaluate.js b/deps/v8/test/mjsunit/harmony/async-function-debug-evaluate.js
new file mode 100644
index 0000000000..edf7bcab12
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/async-function-debug-evaluate.js
@@ -0,0 +1,139 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-async-await --expose-debug-as debug
+
+var Debug = debug.Debug;
+var breakPointCount = 0;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ ++breakPointCount;
+ try {
+ if (breakPointCount === 1) {
+ assertEquals(
+ "inner", exec_state.frame(0).evaluate("inner").value());
+ assertThrows(() => exec_state.frame(0).evaluate("letInner").value(),
+ ReferenceError);
+ assertThrows(() => exec_state.frame(0).evaluate("constInner").value(),
+ ReferenceError);
+
+ assertEquals("outer", exec_state.frame(0).evaluate("outer").value());
+ assertEquals(
+ "const outer", exec_state.frame(0).evaluate("constOuter").value());
+ assertEquals(
+ "let outer", exec_state.frame(0).evaluate("letOuter").value());
+
+ assertEquals("outer", exec_state.frame(1).evaluate("outer").value());
+ assertEquals(
+ "const outer", exec_state.frame(1).evaluate("constOuter").value());
+ assertEquals(
+ "let outer", exec_state.frame(1).evaluate("letOuter").value());
+
+ assertThrows(() => exec_state.frame(0).evaluate("withVar").value(),
+ ReferenceError);
+
+ } else if (breakPointCount === 2) {
+ assertEquals(
+ "inner", exec_state.frame(0).evaluate("inner").value());
+ assertThrows(() => exec_state.frame(0).evaluate("letInner").value(),
+ ReferenceError);
+ assertThrows(() => exec_state.frame(0).evaluate("constInner").value(),
+ ReferenceError);
+
+ assertEquals(57, exec_state.frame(0).evaluate("x").value());
+ assertEquals(100, exec_state.frame(0).evaluate("y").value());
+
+ // From breakPointCount === 1 and later, it's not possible to access
+ // earlier framestates.
+ assertEquals("outer", exec_state.frame(0).evaluate("outer").value());
+ assertEquals(
+ "const outer", exec_state.frame(0).evaluate("constOuter").value());
+ assertEquals(
+ "let outer", exec_state.frame(0).evaluate("letOuter").value());
+
+ exec_state.frame(0).evaluate("x = `x later(${x})`");
+ exec_state.frame(0).evaluate("y = `y later(${y})`");
+ exec_state.frame(0).evaluate("z = `ZEE`");
+
+ } else if (breakPointCount === 3) {
+ assertEquals(
+ "inner", exec_state.frame(0).evaluate("inner").value());
+ assertEquals(
+ "let inner", exec_state.frame(0).evaluate("letInner").value());
+ assertEquals(
+ "const inner", exec_state.frame(0).evaluate("constInner").value());
+
+ } else if (breakPointCount === 4) {
+ assertEquals(
+ "oop", exec_state.frame(0).evaluate("error.message").value());
+ assertEquals(
+ "Error",
+ exec_state.frame(0).evaluate("error.constructor.name").value());
+ assertEquals("floof", exec_state.frame(0).evaluate("bun").value());
+ assertThrows(() => exec_state.frame(0).evaluate("cow").value(),
+ ReferenceError);
+
+ assertEquals("outer", exec_state.frame(0).evaluate("outer").value());
+ assertEquals(
+ "const outer", exec_state.frame(0).evaluate("constOuter").value());
+ assertEquals(
+ "let outer", exec_state.frame(0).evaluate("letOuter").value());
+ }
+ } catch (e) {
+ print(e.stack);
+ quit(1);
+ }
+}
+
+Debug.setListener(listener);
+
+var outer = "outer";
+const constOuter = "const outer";
+let letOuter = "let outer"
+
+async function thrower() {
+ return Promise.reject(new Error("oop"));
+}
+
+async function testLater() {
+ return { x: 57, y: 100 };
+}
+
+async function test() {
+ var inner = "inner";
+ debugger;
+
+ let withVar = await testLater();
+ with (withVar) {
+ debugger;
+ }
+
+ assertEquals("x later(57)", withVar.x);
+ assertEquals("y later(100)", withVar.y);
+ assertEquals(undefined, withVar.z);
+ assertEquals("ZEE", z);
+
+ let letInner = "let inner";
+ const constInner = "const inner";
+ debugger;
+
+ try {
+ await thrower();
+ } catch (error) {
+ const bun = "floof";
+ debugger;
+ let cow = "moo";
+ }
+}
+
+test().
+then(x => {
+ Debug.setListener(null);
+}).
+catch(error => {
+ print(error.stack);
+ quit(1);
+ Debug.setListener(null);
+});
diff --git a/deps/v8/test/mjsunit/harmony/async-function-debug-scopes.js b/deps/v8/test/mjsunit/harmony/async-function-debug-scopes.js
new file mode 100644
index 0000000000..3d72549d2a
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/async-function-debug-scopes.js
@@ -0,0 +1,616 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-async-await --expose-debug-as debug
+
+var Debug = debug.Debug;
+
+var AsyncFunction = (async function() {}).constructor;
+
+async function thrower() { throw 'Exception'; }
+
+async function test(name, func, args, handler, continuation) {
+ var handler_called = false;
+ var exception = null;
+
+ function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ handler_called = true;
+ handler(exec_state);
+ }
+ } catch (e) {
+ exception = e;
+ }
+ }
+
+ Debug.setListener(listener);
+
+ var result;
+ if (typeof func === "object")
+ result = await func.method.apply(func, args);
+ else
+ result = await func.apply(null, args);
+
+ if (typeof continuation === "function") {
+ await continuation(result);
+ }
+
+ assertTrue(handler_called, `Expected ${name} handler to be called`);
+ if (exception) {
+ exception.message = `${name} / ${exception.message}`;
+ print(exception.stack);
+ quit(1);
+ }
+
+ Debug.setListener(null);
+}
+
+async function runTests() {
+
+// Simple
+await test(
+ "(AsyncFunctionExpression) Local 1",
+ async function() { debugger; }, [],
+ exec_state => {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({}, 0, exec_state);
+ });
+
+await test(
+ "(AsyncFunctionExpression) Local 1 --- resume normal",
+ async function() { let z = await 2; debugger; }, [],
+ exec_state => {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({z: 2}, 0, exec_state);
+ });
+
+await test(
+ "(AsyncFunctionExpression) Local 1 --- resume throw",
+ async function() { let q = await 1;
+ try { let z = await thrower(); }
+ catch (e) { debugger; } }, [],
+ exec_state => {
+ CheckScopeChain([debug.ScopeType.Catch,
+ debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({e: 'Exception'}, 0, exec_state);
+ CheckScopeContent({q: 1}, 1, exec_state);
+
+ });
+
+// Simple With Parameter
+await test(
+ "(AsyncFunctionExpression) Local 2",
+ async function(a) { debugger; }, [1],
+ exec_state => {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({ a: 1 }, 0, exec_state);
+ });
+
+await test(
+ "(AsyncFunctionExpression) Local 2 --- resume normal",
+ async function(a) { let z = await 2; debugger; }, [1],
+ exec_state => {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({ a: 1, z: 2 }, 0, exec_state);
+ });
+
+await test(
+ "(AsyncFunctionExpression) Local 2 --- resume throw",
+ async function(a) { let z = await 2;
+ try { await thrower(); } catch (e) { debugger; } }, [1],
+ exec_state => {
+ CheckScopeChain([debug.ScopeType.Catch,
+ debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({ e: 'Exception' }, 0, exec_state);
+ CheckScopeContent({ a: 1, z: 2 }, 1, exec_state);
+ });
+
+// Simple With Parameter and Variable
+await test(
+ "(AsyncFunctionExpression) Local 3",
+ async function(a) { var b = 2; debugger; }, [1],
+ exec_state => {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({ a: 1, b: 2 }, 0, exec_state);
+ });
+
+await test(
+ "(AsyncFunctionExpression) Local 3 --- resume normal",
+ async function(a) { let y = await 3; var b = 2; let z = await 4;
+ debugger; }, [1],
+ exec_state => {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({ a: 1, b: 2, y: 3, z: 4 }, 0, exec_state);
+ });
+
+await test(
+ "(AsyncFunctionExpression) Local 3 --- resume throw",
+ async function(a) { let y = await 3;
+ try { var b = 2; let z = await thrower(); }
+ catch (e) { debugger; } }, [1],
+ exec_state => {
+ CheckScopeChain([debug.ScopeType.Catch,
+ debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({ e: 'Exception' }, 0, exec_state);
+ CheckScopeContent({ a: 1, b: 2, y: 3 }, 1, exec_state);
+ });
+
+// Local scope with parameters and local variables.
+await test(
+ "(AsyncFunctionExpression) Local 4",
+ async function(a, b) { var x = 3; var y = 4; debugger; }, [1, 2],
+ exec_state => {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({a:1,b:2,x:3,y:4}, 0, exec_state);
+ });
+
+await test(
+ "(AsyncFunctionExpression) Local 4 --- resume normal",
+ async function(a, b) { let q = await 5; var x = 3; var y = 4;
+ let r = await 6; debugger; }, [1, 2],
+ exec_state => {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({a:1,b:2,x:3,y:4, q: 5, r: 6}, 0, exec_state);
+ });
+
+await test(
+ "(AsyncFunctionExpression) Local 4 --- resume throw",
+ async function(a, b) { let q = await 5; var x = 3; var y = 4;
+ try { let r = await thrower(); }
+ catch (e) { debugger; } }, [1, 2],
+ exec_state => {
+ CheckScopeChain([debug.ScopeType.Catch,
+ debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({e: 'Exception'}, 0, exec_state);
+ CheckScopeContent({a:1,b:2,x:3,y:4, q: 5}, 1, exec_state);
+ });
+
+// Empty local scope with use of eval.
+await test(
+ "(AsyncFunctionExpression) Local 5",
+ async function() { eval(""); debugger; }, [],
+ exec_state => {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({}, 0, exec_state);
+ });
+
+await test(
+ "(AsyncFunctionExpression) Local 5 --- resume normal",
+ async function() { let x = await 1; eval(""); let y = await 2;
+ debugger; }, [],
+ exec_state => {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({ x: 1, y: 2 }, 0, exec_state);
+ });
+
+await test(
+ "(AsyncFunctionExpression) Local 5 --- resume throw",
+ async function() { let x = await 1; eval("");
+ try { let y = await thrower(); }
+ catch (e) { debugger; } }, [],
+ exec_state => {
+ CheckScopeChain([debug.ScopeType.Catch,
+ debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({ e: 'Exception' }, 0, exec_state);
+ CheckScopeContent({ x: 1 }, 1, exec_state);
+ });
+
+// Local introducing local variable using eval.
+await test(
+ "(AsyncFunctionExpression) Local 6",
+ async function() { eval("var i = 5"); debugger; }, [],
+ exec_state => {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({i:5}, 0, exec_state);
+ });
+
+await test(
+ "(AsyncFunctionExpression) Local 6 --- resume normal",
+ async function() { let x = await 1; eval("var i = 5"); let y = await 2;
+ debugger; }, [],
+ exec_state => {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({i:5, x: 1, y: 2}, 0, exec_state);
+ });
+
+await test(
+ "(AsyncFunctionExpression) Local 6 --- resume throw",
+ async function() { let x = await 1; eval("var i = 5");
+ try { let y = await thrower(); }
+ catch (e) { debugger; } }, [],
+ exec_state => {
+ CheckScopeChain([debug.ScopeType.Catch,
+ debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({e: 'Exception' }, 0, exec_state);
+ CheckScopeContent({i:5, x: 1}, 1, exec_state);
+ });
+
+// Local scope with parameters, local variables and local variable introduced
+// using eval.
+await test(
+ "(AsyncFunctionExpression) Local 7",
+ async function(a, b) { var x = 3; var y = 4;
+ eval("var i = 5;"); eval("var j = 6");
+ debugger; }, [1, 2],
+ exec_state => {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({a:1,b:2,x:3,y:4,i:5,j:6}, 0, exec_state);
+ });
+
+await test(
+ "(AsyncFunctionExpression) Local 7 --- resume normal",
+ async function(a, b) { let z = await 7; var x = 3; var y = 4;
+ eval("var i = 5;"); eval("var j = 6");
+ let q = await 8;
+ debugger; }, [1, 2],
+ exec_state => {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({a:1,b:2,x:3,y:4,i:5,j:6, z:7, q:8}, 0, exec_state);
+ });
+
+await test(
+ "(AsyncFunctionExpression) Local 7 --- resume throw",
+ async function(a, b) { let z = await 7; var x = 3; var y = 4;
+ eval("var i = 5;"); eval("var j = 6");
+ try { let q = await thrower(); }
+ catch (e) { debugger; } }, [1, 2],
+ exec_state => {
+ CheckScopeChain([debug.ScopeType.Catch,
+ debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({e: 'Exception'}, 0, exec_state);
+ //CheckScopeContent({a:1,b:2,x:3,y:4,i:5,j:6, z:7}, 1, exec_state);
+ });
+
+// Nested empty with blocks.
+await test(
+ "(AsyncFunctionExpression) With",
+ async function() { with ({}) { with ({}) { debugger; } } }, [],
+ exec_state => {
+ CheckScopeChain([debug.ScopeType.With,
+ debug.ScopeType.With,
+ debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({}, 0, exec_state);
+ CheckScopeContent({}, 1, exec_state);
+ });
+
+await test(
+ "(AsyncFunctionExpression) With --- resume normal",
+ async function() { let x = await 1; with ({}) { with ({}) {
+ let y = await 2; debugger; } } }, [],
+ exec_state => {
+ CheckScopeChain([debug.ScopeType.Block,
+ debug.ScopeType.With,
+ debug.ScopeType.With,
+ debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({y:2}, 0, exec_state);
+ CheckScopeContent({}, 1, exec_state);
+ CheckScopeContent({}, 2, exec_state);
+ CheckScopeContent({x:1}, 3, exec_state);
+ });
+
+await test(
+ "(AsyncFunctionExpression) With --- resume throw",
+ async function() { let x = await 1; with ({}) { with ({}) {
+ try { let y = await thrower(); }
+ catch (e) { debugger; } } } }, [],
+ exec_state => {
+ CheckScopeChain([debug.ScopeType.Catch,
+ debug.ScopeType.With,
+ debug.ScopeType.With,
+ debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({ e: 'Exception'}, 0, exec_state);
+ CheckScopeContent({}, 1, exec_state);
+ CheckScopeContent({}, 2, exec_state);
+ CheckScopeContent({x:1}, 3, exec_state);
+ });
+
+// Simple closure formed by returning an inner function referering the outer
+// functions arguments.
+await test(
+ "(AsyncFunctionExpression) Closure 1",
+ async function(a) { return function() { debugger; return a; } }, [1],
+ exec_state => {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({a:1}, 1, exec_state);
+ },
+ result => result());
+
+await test(
+ "(AsyncFunctionExpression) Closure 1 --- resume normal",
+ async function(a) { let x = await 2;
+ return function() { debugger; return a; } }, [1],
+ exec_state => {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({a:1, x: 2}, 1, exec_state);
+ },
+ result => result());
+
+await test(
+ "(AsyncFunctionExpression) Closure 1 --- resume throw",
+ async function(a) { let x = await 2;
+ return async function() {
+ try { await thrower(); }
+ catch (e) { debugger; } return a; }; }, [1],
+ exec_state => {
+ CheckScopeChain([debug.ScopeType.Catch,
+ debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({e: 'Exception'}, 0, exec_state);
+ CheckScopeContent({a:1, x: 2}, 2, exec_state);
+ },
+ result => result());
+
+await test(
+ "(AsyncFunctionExpression) Catch block 1",
+ async function() { try { throw 'Exception'; } catch (e) { debugger; } }, [],
+ exec_state => {
+ CheckScopeChain([debug.ScopeType.Catch,
+ debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({e:'Exception'}, 0, exec_state);
+ });
+
+await test(
+ "(AsyncFunctionExpression) Catch block 1 --- resume normal",
+ async function() {
+ let x = await 1;
+ try { throw 'Exception'; } catch (e) { let y = await 2; debugger; } }, [],
+ exec_state => {
+ CheckScopeChain([debug.ScopeType.Block,
+ debug.ScopeType.Catch,
+ debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({y: 2}, 0, exec_state);
+ CheckScopeContent({e:'Exception'}, 1, exec_state);
+ CheckScopeContent({x: 1}, 2, exec_state);
+ });
+
+await test(
+ "(AsyncFunctionExpression) Catch block 1 --- resume throw",
+ async function() {
+ let x = await 1;
+ try { throw 'Exception!'; } catch (e) {
+ try { let y = await thrower(); } catch (e) { debugger; } } }, [],
+ exec_state => {
+ CheckScopeChain([debug.ScopeType.Catch,
+ debug.ScopeType.Catch,
+ debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({e:'Exception'}, 0, exec_state);
+ CheckScopeContent({e:'Exception!'}, 1, exec_state);
+ CheckScopeContent({x: 1}, 2, exec_state);
+ });
+}
+
+runTests().catch(error => {
+ print(error.stack);
+ quit(1);
+})
+
+// Check that two scope are the same.
+function assertScopeMirrorEquals(scope1, scope2) {
+ assertEquals(scope1.scopeType(), scope2.scopeType());
+ assertEquals(scope1.frameIndex(), scope2.frameIndex());
+ assertEquals(scope1.scopeIndex(), scope2.scopeIndex());
+ assertPropertiesEqual(
+ scope1.scopeObject().value(), scope2.scopeObject().value());
+}
+
+function CheckFastAllScopes(scopes, exec_state) {
+ var fast_all_scopes = exec_state.frame().allScopes(true);
+ var length = fast_all_scopes.length;
+ assertTrue(scopes.length >= length);
+ for (var i = 0; i < scopes.length && i < length; i++) {
+ var scope = fast_all_scopes[length - i - 1];
+ assertTrue(scope.isScope());
+ assertEquals(scopes[scopes.length - i - 1], scope.scopeType());
+ }
+}
+
+// Check that the scope chain contains the expected types of scopes.
+function CheckScopeChain(scopes, exec_state) {
+ var all_scopes = exec_state.frame().allScopes();
+ assertEquals(
+ scopes.length, all_scopes.length, "FrameMirror.allScopes length");
+ for (var i = 0; i < scopes.length; i++) {
+ var scope = exec_state.frame().scope(i);
+ assertTrue(scope.isScope());
+ assertEquals(scopes[i], scope.scopeType());
+ assertScopeMirrorEquals(all_scopes[i], scope);
+
+ // Check the global object when hitting the global scope.
+ if (scopes[i] == debug.ScopeType.Global) {
+ // Objects don't have same class (one is "global", other is "Object",
+ // so just check the properties directly.
+ assertPropertiesEqual(this, scope.scopeObject().value());
+ }
+ }
+ CheckFastAllScopes(scopes, exec_state);
+
+ // Get the debug command processor.
+ var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
+
+ // Send a scopes request and check the result.
+ var json;
+ var request_json = '{"seq":0,"type":"request","command":"scopes"}';
+ var response_json = dcp.processDebugJSONRequest(request_json);
+ var response = JSON.parse(response_json);
+ assertEquals(scopes.length, response.body.scopes.length);
+ for (var i = 0; i < scopes.length; i++) {
+ var scopeRef = response.body.scopes[i].object.ref;
+ assertEquals(i, response.body.scopes[i].index);
+ assertEquals(scopes[i], response.body.scopes[i].type);
+ if (scopes[i] == debug.ScopeType.Local ||
+ scopes[i] == debug.ScopeType.Script ||
+ scopes[i] == debug.ScopeType.Closure) {
+ assertTrue(response.body.scopes[i].object.ref < 0);
+ } else {
+ assertTrue(response.body.scopes[i].object.ref >= 0);
+ }
+ var found = false;
+ for (var j = 0; j < response.refs.length && !found; j++) {
+ found = response.refs[j].handle == response.body.scopes[i].object.ref;
+ }
+ assertTrue(found, `Scope object ${scopeRef} not found`);
+ }
+}
+
+// Check that the content of the scope is as expected. For functions just check
+// that there is a function.
+function CheckScopeContent(content, number, exec_state) {
+ var scope = exec_state.frame().scope(number);
+ var count = 0;
+ for (var p in content) {
+ var property_mirror = scope.scopeObject().property(p);
+ assertFalse(property_mirror.isUndefined(),
+ `property ${p} not found in scope`);
+ if (typeof(content[p]) === 'function') {
+ assertTrue(property_mirror.value().isFunction());
+ } else {
+ assertEquals(content[p], property_mirror.value().value(),
+ `property ${p} has unexpected value`);
+ }
+ count++;
+ }
+
+ // 'arguments' and might be exposed in the local and closure scope. Just
+ // ignore this.
+ var scope_size = scope.scopeObject().properties().length;
+ if (!scope.scopeObject().property('arguments').isUndefined()) {
+ scope_size--;
+ }
+ // Skip property with empty name.
+ if (!scope.scopeObject().property('').isUndefined()) {
+ scope_size--;
+ }
+
+ if (count != scope_size) {
+ print('Names found in scope:');
+ var names = scope.scopeObject().propertyNames();
+ for (var i = 0; i < names.length; i++) {
+ print(names[i]);
+ }
+ }
+ assertEquals(count, scope_size);
+
+ // Get the debug command processor.
+ var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
+
+ // Send a scope request for information on a single scope and check the
+ // result.
+ var request_json = `{
+ "seq": 0,
+ "type": "request",
+ "command": "scope",
+ "arguments": {
+ "number": `;
+ request_json += scope.scopeIndex();
+ request_json += '}}';
+ var response_json = dcp.processDebugJSONRequest(request_json);
+ var response = JSON.parse(response_json);
+ assertEquals(scope.scopeType(), response.body.type);
+ assertEquals(number, response.body.index);
+ if (scope.scopeType() == debug.ScopeType.Local ||
+ scope.scopeType() == debug.ScopeType.Script ||
+ scope.scopeType() == debug.ScopeType.Closure) {
+ assertTrue(response.body.object.ref < 0);
+ } else {
+ assertTrue(response.body.object.ref >= 0);
+ }
+ var found = false;
+ for (var i = 0; i < response.refs.length && !found; i++) {
+ found = response.refs[i].handle == response.body.object.ref;
+ }
+ assertTrue(found, "Scope object " + response.body.object.ref + " not found");
+}
diff --git a/deps/v8/test/mjsunit/harmony/async-function-stacktrace.js b/deps/v8/test/mjsunit/harmony/async-function-stacktrace.js
new file mode 100644
index 0000000000..5ab20881a6
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/async-function-stacktrace.js
@@ -0,0 +1,178 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-async-await
+
+async function test(func, funcs) {
+ try {
+ await func();
+ throw new Error("Expected " + func.toString() + " to throw");
+ } catch (e) {
+ var stack = e.stack.split('\n').
+ slice(1).
+ map(line => line.trim()).
+ map(line => line.match(/at (?:(.*) )?.*$/)[1]).
+ filter(x => typeof x === 'string' && x.length);
+
+ assertEquals(funcs, stack, `Unexpected stack trace ${e.stack}`);
+ }
+}
+
+function thrower() { throw new Error("NOPE"); }
+function reject() { return Promise.reject(new Error("NOPE")); }
+
+async function runTests() {
+ await test(async function a() {
+ throw new Error("FAIL");
+ },
+ ["a", "test", "runTests"]);
+
+ await test(async function a2() {
+ await 1;
+ throw new Error("FAIL");
+ }, ["a2"]);
+
+ await test(async function a3() {
+ await 1;
+ try { await thrower(); } catch (e) { throw new Error("FAIL"); }
+ }, ["a3"]);
+
+ await test(async function a4() {
+ await 1;
+ try { await reject(); } catch (e) { throw new Error("FAIL"); }
+ }, ["a4"]);
+
+ await test({ async b() {
+ throw new Error("FAIL");
+ }}.b,
+ ["b", "test", "runTests"]);
+
+ await test({ async b2() {
+ await 1;
+ throw new Error("FAIL");
+ }}.b2, ["b2"]);
+
+ await test({ async b3() {
+ await 1;
+ try { await thrower(); } catch (e) { throw new Error("FAIL"); }
+ } }.b3, ["b3"]);
+
+ await test({ async b4() {
+ await 1;
+ try { await reject(); } catch (e) { throw new Error("FAIL"); }
+ } }.b4, ["b4"]);
+
+ await test((new class { async c() {
+ throw new Error("FAIL");
+ } }).c,
+ ["c", "test", "runTests"]);
+
+ await test((new class { async c2() {
+ await 1;
+ throw new Error("FAIL");
+ } }).c2, ["c2"]);
+
+ await test((new class { async c3() {
+ await 1;
+ try { await thrower(); } catch (e) { throw new Error("FAIL"); }
+ } }).c3, ["c3"]);
+
+ await test((new class { async c4() {
+ await 1;
+ try { await reject(); } catch (e) { throw new Error("FAIL"); }
+ } }).c4, ["c4"]);
+
+ // TODO(caitp): We should infer anonymous async functions as the empty
+ // string, not as the name of a function they're passed as a parameter to.
+ await test(async x => { throw new Error("FAIL") },
+ ["test", "test", "runTests"]);
+ await test(async() => { throw new Error("FAIL") },
+ ["test", "test", "runTests"]);
+ await test(async(a) => { throw new Error("FAIL") },
+ ["test", "test", "runTests"]);
+ await test(async(a, b) => { throw new Error("FAIL") },
+ ["test", "test", "runTests"]);
+
+ await test(async x => { await 1; throw new Error("FAIL") }, ["test"]);
+ await test(async() => { await 1; throw new Error("FAIL") }, ["test"]);
+ await test(async(a) => { await 1; throw new Error("FAIL") }, ["test"]);
+ await test(async(a, b) => { await 1; throw new Error("FAIL") }, ["test"]);
+
+ await test(async x => {
+ await 1;
+ try {
+ await thrower();
+ } catch (e) {
+ throw new Error("FAIL");
+ }
+ }, ["test"]);
+
+ await test(async() => {
+ await 1;
+ try {
+ await thrower();
+ } catch (e) {
+ throw new Error("FAIL");
+ }
+ }, ["test"]);
+
+ await test(async(a) => {
+ await 1;
+ try {
+ await thrower();
+ } catch (e) {
+ throw new Error("FAIL");
+ }
+ }, ["test"]);
+
+ await test(async(a, b) => {
+ await 1;
+ try {
+ await thrower();
+ } catch (e) {
+ throw new Error("FAIL");
+ }
+ }, ["test"]);
+
+ await test(async x => {
+ await 1;
+ try {
+ await reject();
+ } catch (e) {
+ throw new Error("FAIL");
+ }
+ }, ["test"]);
+
+ await test(async() => {
+ await 1;
+ try {
+ await reject();
+ } catch (e) {
+ throw new Error("FAIL");
+ }
+ }, ["test"]);
+
+ await test(async(a) => {
+ await 1;
+ try {
+ await reject();
+ } catch (e) {
+ throw new Error("FAIL");
+ }
+ }, ["test"]);
+
+ await test(async(a, b) => {
+ await 1;
+ try {
+ await reject();
+ } catch (e) {
+ throw new Error("FAIL");
+ }
+ }, ["test"]);
+}
+
+runTests().catch(e => {
+ print(e);
+ quit(1);
+});
diff --git a/deps/v8/test/mjsunit/harmony/atomics.js b/deps/v8/test/mjsunit/harmony/atomics.js
index bf27eb46d5..e608df3342 100644
--- a/deps/v8/test/mjsunit/harmony/atomics.js
+++ b/deps/v8/test/mjsunit/harmony/atomics.js
@@ -16,26 +16,19 @@ function toRangeWrapped(value) {
return value;
}
-function toRangeClamped(value) {
- if (value < this.min) return this.min;
- if (value > this.max) return this.max;
- return value;
-}
-
function makeConstructorObject(constr, min, max, toRange) {
var o = {constr: constr, min: min, max: max};
- o.toRange = toRange.bind(o);
+ o.toRange = toRangeWrapped.bind(o);
return o;
}
var IntegerTypedArrayConstructors = [
- makeConstructorObject(Int8Array, -128, 127, toRangeWrapped),
- makeConstructorObject(Int16Array, -32768, 32767, toRangeWrapped),
- makeConstructorObject(Int32Array, -0x80000000, 0x7fffffff, toRangeWrapped),
- makeConstructorObject(Uint8Array, 0, 255, toRangeWrapped),
- makeConstructorObject(Uint8ClampedArray, 0, 255, toRangeClamped),
- makeConstructorObject(Uint16Array, 0, 65535, toRangeWrapped),
- makeConstructorObject(Uint32Array, 0, 0xffffffff, toRangeWrapped),
+ makeConstructorObject(Int8Array, -128, 127),
+ makeConstructorObject(Int16Array, -32768, 32767),
+ makeConstructorObject(Int32Array, -0x80000000, 0x7fffffff),
+ makeConstructorObject(Uint8Array, 0, 255),
+ makeConstructorObject(Uint16Array, 0, 65535),
+ makeConstructorObject(Uint32Array, 0, 0xffffffff),
];
(function TestBadArray() {
@@ -44,9 +37,13 @@ var IntegerTypedArrayConstructors = [
var sab = new SharedArrayBuffer(128);
var sf32a = new Float32Array(sab);
var sf64a = new Float64Array(sab);
+ var u8ca = new Uint8ClampedArray(sab);
// Atomic ops required integer shared typed arrays
- [undefined, 1, 'hi', 3.4, ab, u32a, sab, sf32a, sf64a].forEach(function(o) {
+ var badArrayTypes = [
+ undefined, 1, 'hi', 3.4, ab, u32a, sab, sf32a, sf64a, u8ca
+ ];
+ badArrayTypes.forEach(function(o) {
assertThrows(function() { Atomics.compareExchange(o, 0, 0, 0); },
TypeError);
assertThrows(function() { Atomics.load(o, 0); }, TypeError);
@@ -129,15 +126,16 @@ var IntegerTypedArrayConstructors = [
var testOp = function(op, ia, index, expectedIndex, name) {
for (var i = 0; i < ia.length; ++i)
- ia[i] = 22;
+ ia[i] = i * 2;
ia[expectedIndex] = 0;
- assertEquals(0, op(ia, index, 0, 0), name);
+ var result = op(ia, index, 0, 0);
+ assertEquals(0, result, name);
assertEquals(0, ia[expectedIndex], name);
for (var i = 0; i < ia.length; ++i) {
if (i == expectedIndex) continue;
- assertEquals(22, ia[i], name);
+ assertEquals(i * 2, ia[i], name);
}
};
@@ -222,6 +220,24 @@ function clearArray(sab) {
}
})
});
+
+ // Test Smi range
+ (function () {
+ var sab = new SharedArrayBuffer(4);
+ var i32 = new Int32Array(sab);
+ var u32 = new Uint32Array(sab);
+
+ function testLoad(signedValue, unsignedValue) {
+ u32[0] = unsignedValue;
+ assertEquals(unsignedValue, Atomics.load(u32, 0));
+ assertEquals(signedValue, Atomics.load(i32, 0));
+ }
+
+ testLoad(0x3fffffff, 0x3fffffff); // 2**30-1 (always smi)
+ testLoad(0x40000000, 0x40000000); // 2**30 (smi if signed and 32-bits)
+ testLoad(0x80000000, -0x80000000); // 2**31 (smi if signed and 32-bits)
+ testLoad(0xffffffff, -1); // 2**31 (smi if signed)
+ });
})();
(function TestStore() {
@@ -405,7 +421,7 @@ function clearArray(sab) {
assertEquals(50, Atomics.compareExchange(sta, 0, v, v), name);
// Store
- assertEquals(+v, Atomics.store(sta, 0, v), name);
+ assertEquals(v|0, Atomics.store(sta, 0, v), name);
assertEquals(v|0, sta[0], name);
// Add
diff --git a/deps/v8/test/mjsunit/harmony/block-sloppy-function.js b/deps/v8/test/mjsunit/harmony/block-sloppy-function.js
deleted file mode 100644
index 2bea1476ab..0000000000
--- a/deps/v8/test/mjsunit/harmony/block-sloppy-function.js
+++ /dev/null
@@ -1,301 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-sloppy --harmony-sloppy-let
-// Flags: --harmony-sloppy-function
-
-// Test Annex B 3.3 semantics for functions declared in blocks in sloppy mode.
-// http://www.ecma-international.org/ecma-262/6.0/#sec-block-level-function-declarations-web-legacy-compatibility-semantics
-
-(function overridingLocalFunction() {
- var x = [];
- assertEquals('function', typeof f);
- function f() {
- x.push(1);
- }
- f();
- {
- f();
- function f() {
- x.push(2);
- }
- f();
- }
- f();
- {
- f();
- function f() {
- x.push(3);
- }
- f();
- }
- f();
- assertArrayEquals([1, 2, 2, 2, 3, 3, 3], x);
-})();
-
-(function newFunctionBinding() {
- var x = [];
- assertEquals('undefined', typeof f);
- {
- f();
- function f() {
- x.push(2);
- }
- f();
- }
- f();
- {
- f();
- function f() {
- x.push(3);
- }
- f();
- }
- f();
- assertArrayEquals([2, 2, 2, 3, 3, 3], x);
-})();
-
-(function shadowingLetDoesntBind() {
- let f = 1;
- assertEquals(1, f);
- {
- let y = 3;
- function f() {
- y = 2;
- }
- f();
- assertEquals(2, y);
- }
- assertEquals(1, f);
-})();
-
-(function shadowingClassDoesntBind() {
- class f { }
- assertEquals('class f { }', f.toString());
- {
- let y = 3;
- function f() {
- y = 2;
- }
- f();
- assertEquals(2, y);
- }
- assertEquals('class f { }', f.toString());
-})();
-
-(function shadowingConstDoesntBind() {
- const f = 1;
- assertEquals(1, f);
- {
- let y = 3;
- function f() {
- y = 2;
- }
- f();
- assertEquals(2, y);
- }
- assertEquals(1, f);
-})();
-
-(function shadowingVarBinds() {
- var f = 1;
- assertEquals(1, f);
- {
- let y = 3;
- function f() {
- y = 2;
- }
- f();
- assertEquals(2, y);
- }
- assertEquals('function', typeof f);
-})();
-
-(function conditional() {
- if (true) {
- function f() { return 1; }
- } else {
- function f() { return 2; }
- }
- assertEquals(1, f());
-
- if (false) {
- function g() { return 1; }
- } else {
- function g() { return 2; }
- }
- assertEquals(2, g());
-})();
-
-(function skipExecution() {
- {
- function f() { return 1; }
- }
- assertEquals(1, f());
- {
- function f() { return 2; }
- }
- assertEquals(2, f());
- L: {
- assertEquals(3, f());
- break L;
- function f() { return 3; }
- }
- assertEquals(2, f());
-})();
-
-// Test that shadowing arguments is fine
-(function shadowArguments(x) {
- assertArrayEquals([1], arguments);
- {
- assertEquals('function', typeof arguments);
- function arguments() {}
- assertEquals('function', typeof arguments);
- }
- assertEquals('function', typeof arguments);
-})(1);
-
-// Shadow function parameter
-(function shadowParameter(x) {
- assertEquals(1, x);
- {
- function x() {}
- }
- assertEquals('function', typeof x);
-})(1);
-
-// Shadow function parameter
-(function shadowDefaultParameter(x = 0) {
- assertEquals(1, x);
- {
- function x() {}
- }
- // TODO(littledan): Once destructured parameters are no longer
- // let-bound, enable this assertion. This is the core of the test.
- // assertEquals('function', typeof x);
-})(1);
-
-(function shadowRestParameter(...x) {
- assertArrayEquals([1], x);
- {
- function x() {}
- }
- // TODO(littledan): Once destructured parameters are no longer
- // let-bound, enable this assertion. This is the core of the test.
- // assertEquals('function', typeof x);
-})(1);
-
-assertThrows(function notInDefaultScope(x = y) {
- {
- function y() {}
- }
- assertEquals('function', typeof y);
- assertEquals(x, undefined);
-}, ReferenceError);
-
-// Test that hoisting from blocks does happen in global scope
-function globalHoisted() { return 0; }
-{
- function globalHoisted() { return 1; }
-}
-assertEquals(1, globalHoisted());
-
-// Also happens when not previously defined
-assertEquals(undefined, globalUndefinedHoisted);
-{
- function globalUndefinedHoisted() { return 1; }
-}
-assertEquals(1, globalUndefinedHoisted());
-var globalUndefinedHoistedDescriptor =
- Object.getOwnPropertyDescriptor(this, "globalUndefinedHoisted");
-assertFalse(globalUndefinedHoistedDescriptor.configurable);
-assertTrue(globalUndefinedHoistedDescriptor.writable);
-assertTrue(globalUndefinedHoistedDescriptor.enumerable);
-assertEquals(1, globalUndefinedHoistedDescriptor.value());
-
-// When a function property is hoisted, it should be
-// made enumerable.
-// BUG(v8:4451)
-Object.defineProperty(this, "globalNonEnumerable", {
- value: false,
- configurable: true,
- writable: true,
- enumerable: false
-});
-eval("{function globalNonEnumerable() { return 1; }}");
-var globalNonEnumerableDescriptor
- = Object.getOwnPropertyDescriptor(this, "globalNonEnumerable");
-// BUG(v8:4451): Should be made non-configurable
-assertTrue(globalNonEnumerableDescriptor.configurable);
-assertTrue(globalNonEnumerableDescriptor.writable);
-// BUG(v8:4451): Should be made enumerable
-assertFalse(globalNonEnumerableDescriptor.enumerable);
-assertEquals(1, globalNonEnumerableDescriptor.value());
-
-// When a function property is hoisted, it should be overwritten and
-// made writable and overwritten, even if the property was non-writable.
-Object.defineProperty(this, "globalNonWritable", {
- value: false,
- configurable: true,
- writable: false,
- enumerable: true
-});
-eval("{function globalNonWritable() { return 1; }}");
-var globalNonWritableDescriptor
- = Object.getOwnPropertyDescriptor(this, "globalNonWritable");
-// BUG(v8:4451): Should be made non-configurable
-assertTrue(globalNonWritableDescriptor.configurable);
-// BUG(v8:4451): Should be made writable
-assertFalse(globalNonWritableDescriptor.writable);
-assertFalse(globalNonEnumerableDescriptor.enumerable);
-// BUG(v8:4451): Should be overwritten
-assertEquals(false, globalNonWritableDescriptor.value);
-
-// Test that hoisting from blocks does happen in an eval
-eval(`
- function evalHoisted() { return 0; }
- {
- function evalHoisted() { return 1; }
- }
- assertEquals(1, evalHoisted());
-`);
-
-// Test that hoisting from blocks happens from eval in a function
-!function() {
- eval(`
- function evalInFunctionHoisted() { return 0; }
- {
- function evalInFunctionHoisted() { return 1; }
- }
- assertEquals(1, evalInFunctionHoisted());
- `);
-}();
-
-let dontHoistGlobal;
-{ function dontHoistGlobal() {} }
-assertEquals(undefined, dontHoistGlobal);
-
-let dontHoistEval;
-// BUG(v8:) This shouldn't hoist and shouldn't throw
-var throws = false;
-try {
- eval("{ function dontHoistEval() {} }");
-} catch (e) {
- throws = true;
-}
-assertTrue(throws);
-
-// When the global object is frozen, silently don't hoist
-// Currently this actually throws BUG(v8:4452)
-Object.freeze(this);
-throws = false;
-try {
- eval('{ function hoistWhenFrozen() {} }');
-} catch (e) {
- throws = true;
-}
-assertFalse(this.hasOwnProperty("hoistWhenFrozen"));
-assertThrows(() => hoistWhenFrozen, ReferenceError);
-// Should be assertFalse BUG(v8:4452)
-assertTrue(throws);
diff --git a/deps/v8/test/mjsunit/harmony/dataview-accessors.js b/deps/v8/test/mjsunit/harmony/dataview-accessors.js
index c54f8cc20d..d1bd6210bf 100644
--- a/deps/v8/test/mjsunit/harmony/dataview-accessors.js
+++ b/deps/v8/test/mjsunit/harmony/dataview-accessors.js
@@ -400,12 +400,8 @@ function TestGeneralAccessors() {
assertThrows(function() { f(); }, TypeError);
f.call(a, 0, 0); // should not throw
assertThrows(function() { f.call({}, 0, 0); }, TypeError);
- assertThrows(function() { f.call(a); }, TypeError);
- if (name.indexOf("set") == 0) {
- assertThrows(function() { f.call(a, 1); }, TypeError);
- } else {
- f.call(a, 1); // should not throw
- }
+ f.call(a);
+ f.call(a, 1); // should not throw
}
CheckAccessor("getUint8");
CheckAccessor("setUint8");
@@ -429,33 +425,27 @@ TestGeneralAccessors();
function TestInsufficientArguments() {
var a = new DataView(new ArrayBuffer(256));
+ function CheckInsuficientArguments(type) {
+ var expectedValue = type === "Float32" || type === "Float64" ? NaN : 0;
+ var offset = getElementSize(type);
- assertThrows(function() { a.getUint8(); }, TypeError);
- assertThrows(function() { a.getInt8(); }, TypeError);
- assertThrows(function() { a.getUint16(); }, TypeError);
- assertThrows(function() { a.getInt16(); }, TypeError);
- assertThrows(function() { a.getUint32(); }, TypeError);
- assertThrows(function() { a.getInt32(); }, TypeError);
- assertThrows(function() { a.getFloat32(); }, TypeError);
- assertThrows(function() { a.getFloat64(); }, TypeError);
-
- assertThrows(function() { a.setUint8(); }, TypeError);
- assertThrows(function() { a.setInt8(); }, TypeError);
- assertThrows(function() { a.setUint16(); }, TypeError);
- assertThrows(function() { a.setInt16(); }, TypeError);
- assertThrows(function() { a.setUint32(); }, TypeError);
- assertThrows(function() { a.setInt32(); }, TypeError);
- assertThrows(function() { a.setFloat32(); }, TypeError);
- assertThrows(function() { a.setFloat64(); }, TypeError);
-
- assertThrows(function() { a.setUint8(1) }, TypeError);
- assertThrows(function() { a.setInt8(1) }, TypeError);
- assertThrows(function() { a.setUint16(1) }, TypeError);
- assertThrows(function() { a.setInt16(1) }, TypeError);
- assertThrows(function() { a.setUint32(1) }, TypeError);
- assertThrows(function() { a.setInt32(1) }, TypeError);
- assertThrows(function() { a.setFloat32(1) }, TypeError);
- assertThrows(function() { a.setFloat64(1) }, TypeError);
+ assertSame(undefined, a["set" + type](0, 7));
+ assertSame(undefined, a["set" + type]());
+ assertSame(expectedValue, a["get" + type]());
+
+ assertSame(undefined, a["set" + type](offset, 7));
+ assertSame(undefined, a["set" + type](offset));
+ assertSame(expectedValue, a["get" + type](offset));
+ }
+
+ CheckInsuficientArguments("Uint8");
+ CheckInsuficientArguments("Int8");
+ CheckInsuficientArguments("Uint16");
+ CheckInsuficientArguments("Int16");
+ CheckInsuficientArguments("Uint32");
+ CheckInsuficientArguments("Int32");
+ CheckInsuficientArguments("Float32");
+ CheckInsuficientArguments("Float64");
}
TestInsufficientArguments();
diff --git a/deps/v8/test/mjsunit/harmony/debug-async-break-on-stack.js b/deps/v8/test/mjsunit/harmony/debug-async-break-on-stack.js
new file mode 100644
index 0000000000..d3d9d8bef6
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/debug-async-break-on-stack.js
@@ -0,0 +1,78 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+// Flags: --harmony-async-await --allow-natives-syntax
+
+var Debug = debug.Debug;
+
+function assertEqualsAsync(expected, run, msg) {
+ var actual;
+ var hadValue = false;
+ var hadError = false;
+ var promise = run();
+
+ if (typeof promise !== "object" || typeof promise.then !== "function") {
+ throw new MjsUnitAssertionError(
+ "Expected " + run.toString() +
+ " to return a Promise, but it returned " + promise);
+ }
+
+ promise.then(function(value) { hadValue = true; actual = value; },
+ function(error) { hadError = true; actual = error; });
+
+ assertFalse(hadValue || hadError);
+
+ %RunMicrotasks();
+
+ if (hadError) throw actual;
+
+ assertTrue(
+ hadValue, "Expected '" + run.toString() + "' to produce a value");
+
+ assertEquals(expected, actual, msg);
+}
+
+var break_count = 0;
+var exception = null;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ break_count++;
+ var line = exec_state.frame(0).sourceLineText();
+ print(line);
+ assertTrue(line.indexOf(`B${break_count}`) > 0);
+ } catch (e) {
+ exception = e;
+ }
+}
+
+
+async function g() {
+ setbreaks();
+ throw 1; // B1
+}
+
+async function f() {
+ try {
+ await g();
+ } catch (e) {}
+ return 2; // B2
+}
+
+function setbreaks() {
+ Debug.setListener(listener);
+ Debug.setBreakPoint(g, 2);
+ Debug.setBreakPoint(f, 4);
+}
+
+f();
+
+%RunMicrotasks();
+
+assertEqualsAsync(2, async () => break_count);
+assertEqualsAsync(null, async () => exception);
+
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/harmony/debug-async-break.js b/deps/v8/test/mjsunit/harmony/debug-async-break.js
new file mode 100644
index 0000000000..3b6b71baca
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/debug-async-break.js
@@ -0,0 +1,76 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+// Flags: --harmony-async-await --allow-natives-syntax
+
+var Debug = debug.Debug;
+
+function assertEqualsAsync(expected, run, msg) {
+ var actual;
+ var hadValue = false;
+ var hadError = false;
+ var promise = run();
+
+ if (typeof promise !== "object" || typeof promise.then !== "function") {
+ throw new MjsUnitAssertionError(
+ "Expected " + run.toString() +
+ " to return a Promise, but it returned " + promise);
+ }
+
+ promise.then(function(value) { hadValue = true; actual = value; },
+ function(error) { hadError = true; actual = error; });
+
+ assertFalse(hadValue || hadError);
+
+ %RunMicrotasks();
+
+ if (hadError) throw actual;
+
+ assertTrue(
+ hadValue, "Expected '" + run.toString() + "' to produce a value");
+
+ assertEquals(expected, actual, msg);
+}
+
+var break_count = 0;
+var exception = null;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ break_count++;
+ var line = exec_state.frame(0).sourceLineText();
+ assertTrue(line.indexOf(`B${break_count}`) > 0);
+ } catch (e) {
+ exception = e;
+ }
+}
+
+Debug.setListener(listener);
+
+async function g() {
+ throw 1;
+}
+
+async function f() {
+ try {
+ await g(); // B1
+ } catch (e) {}
+ assertEquals(2, break_count); // B2
+ return 1; // B3
+}
+
+Debug.setBreakPoint(f, 2);
+Debug.setBreakPoint(f, 4);
+Debug.setBreakPoint(f, 5);
+
+f();
+
+%RunMicrotasks();
+
+assertEqualsAsync(3, async () => break_count);
+assertEqualsAsync(null, async () => exception);
+
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/es7/object-observe-debug-event.js b/deps/v8/test/mjsunit/harmony/debug-async-function-async-task-event.js
index 06123b8dc2..249f02fc8f 100644
--- a/deps/v8/test/mjsunit/es7/object-observe-debug-event.js
+++ b/deps/v8/test/mjsunit/harmony/debug-async-function-async-task-event.js
@@ -1,9 +1,8 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-object-observe
-// Flags: --expose-debug-as debug
+// Flags: --harmony-async-await --expose-debug-as debug --allow-natives-syntax
Debug = debug.Debug;
@@ -12,7 +11,18 @@ var exception = null;
var expected = [
"enqueue #1",
"willHandle #1",
+ "then #1",
+ "enqueue #2",
+ "enqueue #3",
"didHandle #1",
+ "willHandle #2",
+ "then #2",
+ "didHandle #2",
+ "willHandle #3",
+ "enqueue #4",
+ "didHandle #3",
+ "willHandle #4",
+ "didHandle #4",
];
function assertLog(msg) {
@@ -30,7 +40,8 @@ function listener(event, exec_state, event_data, data) {
if (base_id < 0)
base_id = event_data.id();
var id = event_data.id() - base_id + 1;
- assertEquals("Object.observe", event_data.name());
+ assertTrue("Promise.resolve" == event_data.name() ||
+ "PromiseResolveThenableJob" == event_data.name());
assertLog(event_data.type() + " #" + id);
} catch (e) {
print(e + e.stack)
@@ -40,13 +51,20 @@ function listener(event, exec_state, event_data, data) {
Debug.setListener(listener);
-var obj = {};
-Object.observe(obj, function(changes) {
- print(change.type + " " + change.name + " " + change.oldValue);
+var resolver;
+var p = new Promise(function(resolve, reject) {
+ resolver = resolve;
});
-obj.foo = 1;
-obj.zoo = 2;
-obj.foo = 3;
+async function main() {
+ await p;
+ assertLog("then #1");
+ await undefined;
+ assertLog("then #2");
+}
+main();
+resolver();
+
+%RunMicrotasks();
assertNull(exception);
diff --git a/deps/v8/test/mjsunit/harmony/debug-async-liveedit.js b/deps/v8/test/mjsunit/harmony/debug-async-liveedit.js
new file mode 100644
index 0000000000..276ae7a79d
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/debug-async-liveedit.js
@@ -0,0 +1,133 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-async-await
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+var Debug = debug.Debug;
+var LiveEdit = Debug.LiveEdit;
+
+unique_id = 0;
+
+var AsyncFunction = (async function(){}).constructor;
+
+function assertPromiseValue(value, promise) {
+ promise.then(resolve => {
+ went = true;
+ if (resolve !== value) {
+ print(`expected ${value} found ${resolve}`);
+ quit(1);
+ }
+ }, reject => {
+ print(`rejected ${reject}`);
+ quit(1);
+ });
+}
+
+function MakeAsyncFunction() {
+ // Prevents eval script caching.
+ unique_id++;
+ return AsyncFunction('callback',
+ "/* " + unique_id + "*/\n" +
+ "await callback();\n" +
+ "return 'Cat';\n");
+}
+
+function MakeFunction() {
+ // Prevents eval script caching.
+ unique_id++;
+ return Function('callback',
+ "/* " + unique_id + "*/\n" +
+ "callback();\n" +
+ "return 'Cat';\n");
+}
+
+// First, try MakeGenerator with no perturbations.
+(function(){
+ var asyncfn = MakeAsyncFunction();
+ function callback() {};
+ var promise = asyncfn(callback);
+ assertPromiseValue('Cat', promise);
+})();
+
+function patch(fun, from, to) {
+ function debug() {
+ var log = new Array();
+ var script = Debug.findScript(fun);
+ var pos = script.source.indexOf(from);
+ print(`pos ${pos}`);
+ try {
+ LiveEdit.TestApi.ApplySingleChunkPatch(script, pos, from.length, to,
+ log);
+ } finally {
+ print("Change log: " + JSON.stringify(log) + "\n");
+ }
+ }
+ %ExecuteInDebugContext(debug);
+}
+
+// Try to edit a MakeAsyncFunction while it's running, then again while it's
+// stopped.
+(function(){
+ var asyncfn = MakeAsyncFunction();
+
+ var patch_attempted = false;
+ function attempt_patch() {
+ assertFalse(patch_attempted);
+ patch_attempted = true;
+ assertThrows(function() { patch(asyncfn, "'Cat'", "'Capybara'") },
+ LiveEdit.Failure);
+ };
+ var promise = asyncfn(attempt_patch);
+ // Patch should not succeed because there is a live async function activation
+ // on the stack.
+ assertPromiseValue("Cat", promise);
+ assertTrue(patch_attempted);
+
+ %RunMicrotasks();
+
+ // At this point one iterator is live, but closed, so the patch will succeed.
+ patch(asyncfn, "'Cat'", "'Capybara'");
+ promise = asyncfn(function(){});
+ // Patch successful.
+ assertPromiseValue("Capybara", promise);
+
+ // Patching will fail however when an async function is suspended.
+ var resolve;
+ promise = asyncfn(function(){return new Promise(function(r){resolve = r})});
+ assertThrows(function() { patch(asyncfn, "'Capybara'", "'Tapir'") },
+ LiveEdit.Failure);
+ resolve();
+ assertPromiseValue("Capybara", promise);
+
+ // Try to patch functions with activations inside and outside async
+ // function activations. We should succeed in the former case, but not in the
+ // latter.
+ var fun_outside = MakeFunction();
+ var fun_inside = MakeFunction();
+ var fun_patch_attempted = false;
+ var fun_patch_restarted = false;
+ function attempt_fun_patches() {
+ if (fun_patch_attempted) {
+ assertFalse(fun_patch_restarted);
+ fun_patch_restarted = true;
+ return;
+ }
+ fun_patch_attempted = true;
+ // Patching outside an async function activation must fail.
+ assertThrows(function() { patch(fun_outside, "'Cat'", "'Cobra'") },
+ LiveEdit.Failure);
+ // Patching inside an async function activation may succeed.
+ patch(fun_inside, "'Cat'", "'Koala'");
+ }
+ promise = asyncfn(function() { return fun_inside(attempt_fun_patches) });
+ assertEquals('Cat',
+ fun_outside(function () {
+ assertPromiseValue('Capybara', promise);
+ assertTrue(fun_patch_restarted);
+ assertTrue(fun_inside.toString().includes("'Koala'"));
+ }));
+})();
+
+%RunMicrotasks();
diff --git a/deps/v8/test/mjsunit/harmony/do-expressions.js b/deps/v8/test/mjsunit/harmony/do-expressions.js
index b3be4eca91..38b68b6ed7 100644
--- a/deps/v8/test/mjsunit/harmony/do-expressions.js
+++ b/deps/v8/test/mjsunit/harmony/do-expressions.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-do-expressions --harmony-sloppy-let --allow-natives-syntax
+// Flags: --harmony-do-expressions --allow-natives-syntax
function returnValue(v) { return v; }
function MyError() {}
diff --git a/deps/v8/test/mjsunit/harmony/for-in.js b/deps/v8/test/mjsunit/harmony/for-in.js
new file mode 100644
index 0000000000..58e343b903
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/for-in.js
@@ -0,0 +1,9 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-for-in
+
+assertThrows("for (var x = 0 in {});", SyntaxError);
+assertThrows("for (const x = 0 in {});", SyntaxError);
+assertThrows("for (let x = 0 in {});", SyntaxError);
diff --git a/deps/v8/test/mjsunit/harmony/function-sent.js b/deps/v8/test/mjsunit/harmony/function-sent.js
index b3cd644dd9..cd0ca957a8 100644
--- a/deps/v8/test/mjsunit/harmony/function-sent.js
+++ b/deps/v8/test/mjsunit/harmony/function-sent.js
@@ -49,7 +49,7 @@
try {
yield function.sent;
} finally {
- return 666;
+ return 23;
}
}
@@ -77,7 +77,7 @@
let x = g();
assertEquals({value: 1, done: false}, x.next(1));
assertEquals({value: undefined, done: false}, x.next(2));
- assertEquals({value: 42, done: true}, x.return(42));
+ assertEquals({value: 23, done: true}, x.return(42));
}
}
diff --git a/deps/v8/test/mjsunit/harmony/futex.js b/deps/v8/test/mjsunit/harmony/futex.js
index 626cff5fdb..f90b773aa3 100644
--- a/deps/v8/test/mjsunit/harmony/futex.js
+++ b/deps/v8/test/mjsunit/harmony/futex.js
@@ -19,9 +19,8 @@
[i8a, i16a, i32a, ui8a, ui8ca, ui16a, ui32a, f32a, f64a].forEach(function(
ta) {
- assertThrows(function() { Atomics.futexWait(ta, 0, 0); });
- assertThrows(function() { Atomics.futexWake(ta, 0, 1); });
- assertThrows(function() { Atomics.futexWakeOrRequeue(ta, 0, 1, 0, 0); });
+ assertThrows(function() { Atomics.wait(ta, 0, 0); });
+ assertThrows(function() { Atomics.wake(ta, 0, 1); });
});
})();
@@ -39,9 +38,8 @@
[i8a, i16a, ui8a, ui8ca, ui16a, ui32a, f32a, f64a].forEach(function(
ta) {
- assertThrows(function() { Atomics.futexWait(ta, 0, 0); });
- assertThrows(function() { Atomics.futexWake(ta, 0, 1); });
- assertThrows(function() { Atomics.futexWakeOrRequeue(ta, 0, 1, 0, 0); });
+ assertThrows(function() { Atomics.wait(ta, 0, 0); });
+ assertThrows(function() { Atomics.wake(ta, 0, 1); });
});
})();
@@ -52,35 +50,23 @@
// Valid indexes are 0-3.
[-1, 4, 100].forEach(function(invalidIndex) {
assertThrows(function() {
- Atomics.futexWait(i32a, invalidIndex, 0);
+ Atomics.wait(i32a, invalidIndex, 0);
}, RangeError);
assertThrows(function() {
- Atomics.futexWake(i32a, invalidIndex, 0);
+ Atomics.wake(i32a, invalidIndex, 0);
}, RangeError);
var validIndex = 0;
- assertThrows(function() {
- Atomics.futexWakeOrRequeue(i32a, invalidIndex, 0, 0, validIndex);
- }, RangeError);
- assertThrows(function() {
- Atomics.futexWakeOrRequeue(i32a, validIndex, 0, 0, invalidIndex);
- }, RangeError);
});
i32a = new Int32Array(sab, 8);
[-1, 2, 100].forEach(function(invalidIndex) {
assertThrows(function() {
- Atomics.futexWait(i32a, invalidIndex, 0);
+ Atomics.wait(i32a, invalidIndex, 0);
}, RangeError);
assertThrows(function() {
- Atomics.futexWake(i32a, invalidIndex, 0);
+ Atomics.wake(i32a, invalidIndex, 0);
}, RangeError);
var validIndex = 0;
- assertThrows(function() {
- Atomics.futexWakeOrRequeue(i32a, invalidIndex, 0, 0, validIndex);
- }, RangeError);
- assertThrows(function() {
- Atomics.futexWakeOrRequeue(i32a, validIndex, 0, 0, invalidIndex);
- }, RangeError);
});
})();
@@ -88,7 +74,7 @@
var i32a = new Int32Array(new SharedArrayBuffer(16));
var waitMs = 100;
var startTime = new Date();
- assertEquals(Atomics.TIMEDOUT, Atomics.futexWait(i32a, 0, 0, waitMs));
+ assertEquals("timed-out", Atomics.wait(i32a, 0, 0, waitMs));
var endTime = new Date();
assertTrue(endTime - startTime >= waitMs);
})();
@@ -96,17 +82,17 @@
(function TestWaitNotEqual() {
var sab = new SharedArrayBuffer(16);
var i32a = new Int32Array(sab);
- assertEquals(Atomics.NOTEQUAL, Atomics.futexWait(i32a, 0, 42));
+ assertEquals("not-equal", Atomics.wait(i32a, 0, 42));
i32a = new Int32Array(sab, 8);
i32a[0] = 1;
- assertEquals(Atomics.NOTEQUAL, Atomics.futexWait(i32a, 0, 0));
+ assertEquals("not-equal", Atomics.wait(i32a, 0, 0));
})();
(function TestWaitNegativeTimeout() {
var i32a = new Int32Array(new SharedArrayBuffer(16));
- assertEquals(Atomics.TIMEDOUT, Atomics.futexWait(i32a, 0, 0, -1));
- assertEquals(Atomics.TIMEDOUT, Atomics.futexWait(i32a, 0, 0, -Infinity));
+ assertEquals("timed-out", Atomics.wait(i32a, 0, 0, -1));
+ assertEquals("timed-out", Atomics.wait(i32a, 0, 0, -Infinity));
})();
//// WORKER ONLY TESTS
@@ -120,7 +106,7 @@ if (this.Worker) {
var workerScript =
`onmessage = function(msg) {
var i32a = new Int32Array(msg.sab, msg.offset);
- var result = Atomics.futexWait(i32a, 0, 0, ${timeout});
+ var result = Atomics.wait(i32a, 0, 0, ${timeout});
postMessage(result);
};`;
@@ -128,10 +114,10 @@ if (this.Worker) {
worker.postMessage({sab: sab, offset: offset}, [sab]);
// Spin until the worker is waiting on the futex.
- while (%AtomicsFutexNumWaitersForTesting(i32a, 0) != 1) {}
+ while (%AtomicsNumWaitersForTesting(i32a, 0) != 1) {}
- Atomics.futexWake(i32a, 0, 1);
- assertEquals(Atomics.OK, worker.getMessage());
+ Atomics.wake(i32a, 0, 1);
+ assertEquals("ok", worker.getMessage());
worker.terminate();
var worker2 = new Worker(workerScript);
@@ -140,9 +126,9 @@ if (this.Worker) {
worker2.postMessage({sab: sab, offset: offset}, [sab]);
// Spin until the worker is waiting on the futex.
- while (%AtomicsFutexNumWaitersForTesting(i32a2, 0) != 1) {}
- Atomics.futexWake(i32a2, 0, 1);
- assertEquals(Atomics.OK, worker2.getMessage());
+ while (%AtomicsNumWaitersForTesting(i32a2, 0) != 1) {}
+ Atomics.wake(i32a2, 0, 1);
+ assertEquals("ok", worker2.getMessage());
worker2.terminate();
// Futex should work when index and buffer views are different, but
@@ -152,9 +138,9 @@ if (this.Worker) {
worker3.postMessage({sab: sab, offset: 8}, [sab]);
// Spin until the worker is waiting on the futex.
- while (%AtomicsFutexNumWaitersForTesting(i32a2, 1) != 1) {}
- Atomics.futexWake(i32a2, 1, 1);
- assertEquals(Atomics.OK, worker3.getMessage());
+ while (%AtomicsNumWaitersForTesting(i32a2, 1) != 1) {}
+ Atomics.wake(i32a2, 1, 1);
+ assertEquals("ok", worker3.getMessage());
worker3.terminate();
};
@@ -184,7 +170,7 @@ if (this.Worker) {
var i32a = new Int32Array(msg.sab);
// Wait on i32a[4] (should be zero).
- var result = Atomics.futexWait(i32a, 4, 0);
+ var result = Atomics.wait(i32a, 4, 0);
// Set i32a[id] to 1 to notify the main thread which workers were
// woken up.
Atomics.store(i32a, id, 1);
@@ -199,10 +185,10 @@ if (this.Worker) {
}
// Spin until all workers are waiting on the futex.
- while (%AtomicsFutexNumWaitersForTesting(i32a, 4) != 4) {}
+ while (%AtomicsNumWaitersForTesting(i32a, 4) != 4) {}
// Wake up three waiters.
- assertEquals(3, Atomics.futexWake(i32a, 4, 3));
+ assertEquals(3, Atomics.wake(i32a, 4, 3));
var wokenCount = 0;
var waitingId = 0 + 1 + 2 + 3;
@@ -211,7 +197,7 @@ if (this.Worker) {
// Look for workers that have not yet been reaped. Set i32a[id] to 2
// when they've been processed so we don't look at them again.
if (Atomics.compareExchange(i32a, id, 1, 2) == 1) {
- assertEquals(Atomics.OK, workers[id].getMessage());
+ assertEquals("ok", workers[id].getMessage());
workers[id].terminate();
waitingId -= id;
wokenCount++;
@@ -221,131 +207,14 @@ if (this.Worker) {
assertEquals(3, wokenCount);
assertEquals(0, Atomics.load(i32a, waitingId));
- assertEquals(1, %AtomicsFutexNumWaitersForTesting(i32a, 4));
+ assertEquals(1, %AtomicsNumWaitersForTesting(i32a, 4));
// Finally wake the last waiter.
- assertEquals(1, Atomics.futexWake(i32a, 4, 1));
- assertEquals(Atomics.OK, workers[waitingId].getMessage());
+ assertEquals(1, Atomics.wake(i32a, 4, 1));
+ assertEquals("ok", workers[waitingId].getMessage());
workers[waitingId].terminate();
- assertEquals(0, %AtomicsFutexNumWaitersForTesting(i32a, 4));
-
- })();
-
- (function TestWakeOrRequeue() {
- var sab = new SharedArrayBuffer(24);
- var i32a = new Int32Array(sab);
-
- // SAB values:
- // i32a[id], where id in range [0, 3]:
- // 0 => Worker |id| is still waiting on the futex
- // 1 => Worker |id| is not waiting on futex, but has not be reaped by the
- // main thread.
- // 2 => Worker |id| has been reaped.
- //
- // i32a[4]:
- // always 0. Each worker will initially wait on this index.
- //
- // i32a[5]:
- // always 0. Requeued workers will wait on this index.
-
- var workerScript =
- `onmessage = function(msg) {
- var id = msg.id;
- var i32a = new Int32Array(msg.sab);
-
- var result = Atomics.futexWait(i32a, 4, 0, Infinity);
- Atomics.store(i32a, id, 1);
- postMessage(result);
- };`;
-
- var workers = [];
- for (id = 0; id < 4; id++) {
- workers[id] = new Worker(workerScript);
- workers[id].postMessage({sab: sab, id: id}, [sab]);
- }
-
- // Spin until all workers are waiting on the futex.
- while (%AtomicsFutexNumWaitersForTesting(i32a, 4) != 4) {}
-
- var index1 = 4;
- var index2 = 5;
-
- // If futexWakeOrRequeue is called with the incorrect value, it shouldn't
- // wake any waiters.
- assertEquals(Atomics.NOTEQUAL,
- Atomics.futexWakeOrRequeue(i32a, index1, 1, 42, index2));
-
- assertEquals(4, %AtomicsFutexNumWaitersForTesting(i32a, index1));
- assertEquals(0, %AtomicsFutexNumWaitersForTesting(i32a, index2));
-
- // Now wake with the correct value.
- assertEquals(1, Atomics.futexWakeOrRequeue(i32a, index1, 1, 0, index2));
-
- // The workers that are still waiting should atomically be transferred to
- // the new index.
- assertEquals(3, %AtomicsFutexNumWaitersForTesting(i32a, index2));
-
- // The woken worker may not have been scheduled yet. Look for which thread
- // has set its i32a value to 1.
- var wokenCount = 0;
- while (wokenCount < 1) {
- for (id = 0; id < 4; id++) {
- if (Atomics.compareExchange(i32a, id, 1, 2) == 1) {
- wokenCount++;
- }
- }
- }
-
- assertEquals(0, %AtomicsFutexNumWaitersForTesting(i32a, index1));
-
- // Wake the remaining waiters.
- assertEquals(3, Atomics.futexWake(i32a, index2, 3));
-
- // As above, wait until the workers have been scheduled.
- wokenCount = 0;
- while (wokenCount < 3) {
- for (id = 0; id < 4; id++) {
- if (Atomics.compareExchange(i32a, id, 1, 2) == 1) {
- wokenCount++;
- }
- }
- }
-
- assertEquals(0, %AtomicsFutexNumWaitersForTesting(i32a, index1));
- assertEquals(0, %AtomicsFutexNumWaitersForTesting(i32a, index2));
-
- for (id = 0; id < 4; ++id) {
- assertEquals(Atomics.OK, workers[id].getMessage());
- }
-
- // Test futexWakeOrRequeue on offset typed array
- var offset = 16;
- sab = new SharedArrayBuffer(24);
- i32a = new Int32Array(sab);
- var i32a2 = new Int32Array(sab, offset);
-
- for (id = 0; id < 4; id++) {
- workers[id].postMessage({sab: sab, id: id}, [sab]);
- }
-
- while (%AtomicsFutexNumWaitersForTesting(i32a2, 0) != 4) { }
-
- index1 = 0;
- index2 = 1;
- assertEquals(4, %AtomicsFutexNumWaitersForTesting(i32a2, index1));
- assertEquals(0, %AtomicsFutexNumWaitersForTesting(i32a2, index2));
-
- assertEquals(2, Atomics.futexWakeOrRequeue(i32a2, index1, 2, 0, index2));
- assertEquals(2, %AtomicsFutexNumWaitersForTesting(i32a2, index2));
- assertEquals(0, %AtomicsFutexNumWaitersForTesting(i32a2, index1));
-
- assertEquals(2, Atomics.futexWake(i32a2, index2, 2));
-
- for (id = 0; id < 4; ++id) {
- assertEquals(Atomics.OK, workers[id].getMessage());
- workers[id].terminate();
- }
+ assertEquals(0, %AtomicsNumWaitersForTesting(i32a, 4));
})();
diff --git a/deps/v8/test/mjsunit/harmony/generators-turbo.js b/deps/v8/test/mjsunit/harmony/generators-turbo.js
new file mode 100644
index 0000000000..61334b93f8
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/generators-turbo.js
@@ -0,0 +1,667 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --ignition --harmony-do-expressions
+// Flags: --allow-natives-syntax --turbo --turbo-from-bytecode
+
+
+// This file is identical to mjsunit/harmony/generators.js, except for its Flags
+// lines. The purpose is to explicitly mention --turbo-from-bytecode such that
+// Clusterfuzz can thoroughly test the new generators implementation.
+
+
+function MaybeOptimizeOrDeoptimize(f) {
+ let x = Math.random(); // --random-seed makes this deterministic
+ if (x <= 0.33) {
+ %OptimizeFunctionOnNextCall(f);
+ } else if (x <= 0.66) {
+ %DeoptimizeFunction(f);
+ }
+}
+
+function Next(generator, ...args) {
+ MaybeOptimizeOrDeoptimize(%GeneratorGetFunction(generator));
+ return generator.next(...args);
+}
+
+function Return(generator, ...args) {
+ MaybeOptimizeOrDeoptimize(%GeneratorGetFunction(generator));
+ return generator.return(...args);
+}
+
+function Throw(generator, ...args) {
+ MaybeOptimizeOrDeoptimize(%GeneratorGetFunction(generator));
+ return generator.throw(...args);
+}
+
+
+{ // yield in try-catch
+
+ let g = function*() {
+ try {yield 1} catch (error) {assertEquals("caught", error)}
+ };
+
+ assertThrowsEquals(() => Throw(g(), "not caught"), "not caught");
+
+ {
+ let x = g();
+ assertEquals({value: 1, done: false}, Next(x));
+ assertEquals({value: undefined, done: true}, Throw(x, "caught"));
+ }
+
+ {
+ let x = g();
+ assertEquals({value: 1, done: false}, Next(x));
+ assertEquals({value: undefined, done: true}, Next(x));
+ assertThrowsEquals(() => Throw(x, "not caught"), "not caught");
+ }
+}
+
+
+{ // return that doesn't close
+ let g = function*() { try {return 42} finally {yield 43} };
+
+ {
+ let x = g();
+ assertEquals({value: 43, done: false}, Next(x));
+ assertEquals({value: 42, done: true}, Next(x));
+ }
+}
+
+
+{ // return that doesn't close
+ let x;
+ let g = function*() { try {return 42} finally {Throw(x, 666)} };
+
+ {
+ x = g();
+ assertThrows(() => Next(x), TypeError); // still executing
+ }
+}
+
+
+{ // yield in try-finally, finally clause performs return
+
+ let g = function*() { try {yield 42} finally {return 13} };
+
+ { // "return" closes at suspendedStart
+ let x = g();
+ assertEquals({value: 666, done: true}, Return(x, 666));
+ assertEquals({value: undefined, done: true}, Next(x, 42));
+ assertThrowsEquals(() => Throw(x, 43), 43);
+ assertEquals({value: 42, done: true}, Return(x, 42));
+ }
+
+ { // "throw" closes at suspendedStart
+ let x = g();
+ assertThrowsEquals(() => Throw(x, 666), 666);
+ assertEquals({value: undefined, done: true}, Next(x, 42));
+ assertEquals({value: 43, done: true}, Return(x, 43));
+ assertThrowsEquals(() => Throw(x, 44), 44);
+ }
+
+ { // "next" closes at suspendedYield
+ let x = g();
+ assertEquals({value: 42, done: false}, Next(x));
+ assertEquals({value: 13, done: true}, Next(x, 666));
+ assertEquals({value: undefined, done: true}, Next(x, 666));
+ assertThrowsEquals(() => Throw(x, 666), 666);
+ }
+
+ { // "return" closes at suspendedYield
+ let x = g();
+ assertEquals({value: 42, done: false}, Next(x));
+ assertEquals({value: 13, done: true}, Return(x, 666));
+ assertEquals({value: undefined, done: true}, Next(x, 666));
+ assertEquals({value: 666, done: true}, Return(x, 666));
+ }
+
+ { // "throw" closes at suspendedYield
+ let x = g();
+ assertEquals({value: 42, done: false}, Next(x));
+ assertEquals({value: 13, done: true}, Throw(x, 666));
+ assertThrowsEquals(() => Throw(x, 666), 666);
+ assertEquals({value: undefined, done: true}, Next(x, 666));
+ }
+}
+
+
+{ // yield in try-finally, finally clause doesn't perform return
+
+ let g = function*() { try {yield 42} finally {13} };
+
+ { // "return" closes at suspendedStart
+ let x = g();
+ assertEquals({value: 666, done: true}, Return(x, 666));
+ assertEquals({value: undefined, done: true}, Next(x, 42));
+ assertThrowsEquals(() => Throw(x, 43), 43);
+ assertEquals({value: 42, done: true}, Return(x, 42));
+ }
+
+ { // "throw" closes at suspendedStart
+ let x = g();
+ assertThrowsEquals(() => Throw(x, 666), 666);
+ assertEquals({value: undefined, done: true}, Next(x, 42));
+ assertEquals({value: 43, done: true}, Return(x, 43));
+ assertThrowsEquals(() => Throw(x, 44), 44);
+ }
+
+ { // "next" closes at suspendedYield
+ let x = g();
+ assertEquals({value: 42, done: false}, Next(x));
+ assertEquals({value: undefined, done: true}, Next(x, 666));
+ assertEquals({value: undefined, done: true}, Next(x, 666));
+ assertThrowsEquals(() => Throw(x, 666), 666);
+ assertEquals({value: 42, done: true}, Return(x, 42));
+ }
+
+ { // "return" closes at suspendedYield
+ let x = g();
+ assertEquals({value: 42, done: false}, Next(x));
+ assertEquals({value: 666, done: true}, Return(x, 666));
+ assertEquals({value: undefined, done: true}, Next(x, 666));
+ assertThrowsEquals(() => Throw(x, 44), 44);
+ assertEquals({value: 42, done: true}, Return(x, 42));
+ }
+
+ { // "throw" closes at suspendedYield
+ let x = g();
+ assertEquals({value: 42, done: false}, Next(x));
+ assertThrowsEquals(() => Throw(x, 666), 666);
+ assertEquals({value: undefined, done: true}, Next(x, 666));
+ assertThrowsEquals(() => Throw(x, 666), 666);
+ assertEquals({value: 42, done: true}, Return(x, 42));
+ }
+}
+
+
+{ // yield in try-finally, finally clause yields and performs return
+
+ let g = function*() { try {yield 42} finally {yield 43; return 13} };
+
+ {
+ let x = g();
+ assertEquals({value: 42, done: false}, Next(x));
+ assertEquals({value: 43, done: false}, Return(x, 666));
+ assertEquals({value: 13, done: true}, Next(x));
+ assertEquals({value: 666, done: true}, Return(x, 666));
+ }
+
+ {
+ let x = g();
+ assertEquals({value: 666, done: true}, Return(x, 666));
+ assertEquals({value: undefined, done: true}, Next(x));
+ assertEquals({value: 666, done: true}, Return(x, 666));
+ }
+}
+
+
+{ // yield in try-finally, finally clause yields and doesn't perform return
+
+ let g = function*() { try {yield 42} finally {yield 43; 13} };
+
+ {
+ let x = g();
+ assertEquals({value: 42, done: false}, Next(x));
+ assertEquals({value: 43, done: false}, Return(x, 666));
+ assertEquals({value: 666, done: true}, Next(x));
+ assertEquals({value: 5, done: true}, Return(x, 5));
+ }
+
+ {
+ let x = g();
+ assertEquals({value: 666, done: true}, Return(x, 666));
+ assertEquals({value: undefined, done: true}, Next(x));
+ assertEquals({value: 666, done: true}, Return(x, 666));
+ }
+}
+
+
+{ // yield*, finally clause performs return
+
+ let h = function*() { try {yield 42} finally {yield 43; return 13} };
+ let g = function*() { yield 1; yield yield* h(); };
+
+ {
+ let x = g();
+ assertEquals({value: 1, done: false}, Next(x));
+ assertEquals({value: 42, done: false}, Next(x));
+ assertEquals({value: 43, done: false}, Next(x, 666));
+ assertEquals({value: 13, done: false}, Next(x));
+ assertEquals({value: undefined, done: true}, Next(x));
+ }
+
+ {
+ let x = g();
+ assertEquals({value: 1, done: false}, Next(x));
+ assertEquals({value: 42, done: false}, Next(x));
+ assertEquals({value: 43, done: false}, Return(x, 666));
+ assertEquals({value: 13, done: false}, Next(x));
+ assertEquals({value: undefined, done: true}, Next(x));
+ }
+
+ {
+ let x = g();
+ assertEquals({value: 1, done: false}, Next(x));
+ assertEquals({value: 42, done: false}, Next(x));
+ assertEquals({value: 43, done: false}, Throw(x, 666));
+ assertEquals({value: 13, done: false}, Next(x));
+ assertEquals({value: undefined, done: true}, Next(x));
+ }
+}
+
+
+{ // yield*, finally clause does not perform return
+
+ let h = function*() { try {yield 42} finally {yield 43; 13} };
+ let g = function*() { yield 1; yield yield* h(); };
+
+ {
+ let x = g();
+ assertEquals({value: 1, done: false}, Next(x));
+ assertEquals({value: 42, done: false}, Next(x));
+ assertEquals({value: 43, done: false}, Next(x, 666));
+ assertEquals({value: undefined, done: false}, Next(x));
+ assertEquals({value: undefined, done: true}, Next(x));
+ }
+
+ {
+ let x = g();
+ assertEquals({value: 1, done: false}, Next(x));
+ assertEquals({value: 42, done: false}, Next(x));
+ assertEquals({value: 43, done: false}, Return(x, 44));
+ assertEquals({value: 44, done: false}, Next(x));
+ assertEquals({value: undefined, done: true}, Next(x));
+ }
+
+ {
+ let x = g();
+ assertEquals({value: 1, done: false}, Next(x));
+ assertEquals({value: 42, done: false}, Next(x));
+ assertEquals({value: 43, done: false}, Throw(x, 666));
+ assertThrowsEquals(() => Next(x), 666);
+ }
+}
+
+
+{ // yield*, .return argument is final result
+
+ function* inner() {
+ yield 2;
+ }
+
+ function* g() {
+ yield 1;
+ return yield* inner();
+ }
+
+ {
+ let x = g();
+ assertEquals({value: 1, done: false}, Next(x));
+ assertEquals({value: 2, done: false}, Next(x));
+ assertEquals({value: 42, done: true}, Return(x, 42));
+ }
+}
+
+
+// More or less random tests from here on.
+
+
+{
+ function* foo() { }
+ let g = foo();
+ assertEquals({value: undefined, done: true}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ function* foo() { return new.target }
+ let g = foo();
+ assertEquals({value: undefined, done: true}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ function* foo() { throw 666; return 42}
+ let g = foo();
+ assertThrowsEquals(() => Next(g), 666);
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ function* foo(a) { return a; }
+ let g = foo(42);
+ assertEquals({value: 42, done: true}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ function* foo(a) { a.iwashere = true; return a; }
+ let x = {};
+ let g = foo(x);
+ assertEquals({value: {iwashere: true}, done: true}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ let a = 42;
+ function* foo() { return a; }
+ let g = foo();
+ assertEquals({value: 42, done: true}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ let a = 40;
+ function* foo(b) { return a + b; }
+ let g = foo(2);
+ assertEquals({value: 42, done: true}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ let a = 40;
+ function* foo(b) { a--; b++; return a + b; }
+ let g = foo(2);
+ assertEquals({value: 42, done: true}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ let g;
+ function* foo() { Next(g) }
+ g = foo();
+ assertThrows(() => Next(g), TypeError);
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ function* foo() { yield 2; yield 3; yield 4 }
+ g = foo();
+ assertEquals({value: 2, done: false}, Next(g));
+ assertEquals({value: 3, done: false}, Next(g));
+ assertEquals({value: 4, done: false}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+
+{
+ function* foo() { yield 2; if (true) { yield 3 }; yield 4 }
+ g = foo();
+ assertEquals({value: 2, done: false}, Next(g));
+ assertEquals({value: 3, done: false}, Next(g));
+ assertEquals({value: 4, done: false}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ function* foo() { yield 2; if (true) { yield 3; yield 4 } }
+ g = foo();
+ assertEquals({value: 2, done: false}, Next(g));
+ assertEquals({value: 3, done: false}, Next(g));
+ assertEquals({value: 4, done: false}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ function* foo() { yield 2; if (false) { yield 3 }; yield 4 }
+ g = foo();
+ assertEquals({value: 2, done: false}, Next(g));
+ assertEquals({value: 4, done: false}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ function* foo() { yield 2; while (true) { yield 3 }; yield 4 }
+ g = foo();
+ assertEquals({value: 2, done: false}, Next(g));
+ assertEquals({value: 3, done: false}, Next(g));
+ assertEquals({value: 3, done: false}, Next(g));
+ assertEquals({value: 3, done: false}, Next(g));
+ assertEquals({value: 3, done: false}, Next(g));
+}
+
+{
+ function* foo() { yield 2; (yield 3) + 42; yield 4 }
+ g = foo();
+ assertEquals({value: 2, done: false}, Next(g));
+ assertEquals({value: 3, done: false}, Next(g));
+ assertEquals({value: 4, done: false}, Next(g));
+}
+
+{
+ function* foo() { yield 2; (do {yield 3}) + 42; yield 4 }
+ g = foo();
+ assertEquals({value: 2, done: false}, Next(g));
+ assertEquals({value: 3, done: false}, Next(g));
+ assertEquals({value: 4, done: false}, Next(g));
+}
+
+{
+ function* foo() { yield 2; return (yield 3) + 42; yield 4 }
+ g = foo();
+ assertEquals({value: 2, done: false}, Next(g));
+ assertEquals({value: 3, done: false}, Next(g));
+ assertEquals({value: 42, done: true}, Next(g, 0));
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ let x = 42;
+ function* foo() {
+ yield x;
+ for (let x in {a: 1, b: 2}) {
+ let i = 2;
+ yield x;
+ yield i;
+ do {
+ yield i;
+ } while (i-- > 0);
+ }
+ yield x;
+ return 5;
+ }
+ g = foo();
+ assertEquals({value: 42, done: false}, Next(g));
+ assertEquals({value: 'a', done: false}, Next(g));
+ assertEquals({value: 2, done: false}, Next(g));
+ assertEquals({value: 2, done: false}, Next(g));
+ assertEquals({value: 1, done: false}, Next(g));
+ assertEquals({value: 0, done: false}, Next(g));
+ assertEquals({value: 'b', done: false}, Next(g));
+ assertEquals({value: 2, done: false}, Next(g));
+ assertEquals({value: 2, done: false}, Next(g));
+ assertEquals({value: 1, done: false}, Next(g));
+ assertEquals({value: 0, done: false}, Next(g));
+ assertEquals({value: 42, done: false}, Next(g));
+ assertEquals({value: 5, done: true}, Next(g));
+}
+
+{
+ let a = 3;
+ function* foo() {
+ let b = 4;
+ yield 1;
+ { let c = 5; yield 2; yield a; yield b; yield c; }
+ }
+ g = foo();
+ assertEquals({value: 1, done: false}, Next(g));
+ assertEquals({value: 2, done: false}, Next(g));
+ assertEquals({value: 3, done: false}, Next(g));
+ assertEquals({value: 4, done: false}, Next(g));
+ assertEquals({value: 5, done: false}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ function* foo() {
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ }
+ g = foo();
+ for (let i = 0; i < 100; ++i) {
+ assertEquals({value: 42, done: false}, i%25 === 0 ? Next(g) : g.next());
+ }
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ function* foo() {
+ for (let i = 0; i < 3; ++i) {
+ let j = 0
+ yield i;
+ do {
+ yield (i + 10);
+ } while (++j < 2);
+ }
+ }
+ g = foo();
+ assertEquals({value: 0, done: false}, Next(g));
+ assertEquals({value: 10, done: false}, Next(g));
+ assertEquals({value: 10, done: false}, Next(g));
+ assertEquals({value: 1, done: false}, Next(g));
+ assertEquals({value: 11, done: false}, Next(g));
+ assertEquals({value: 11, done: false}, Next(g));
+ assertEquals({value: 2, done: false}, Next(g));
+ assertEquals({value: 12, done: false}, Next(g));
+ assertEquals({value: 12, done: false}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ let foo = function*() {
+ while (true) {
+ if (true || false) yield 42;
+ continue;
+ }
+ }
+ g = foo();
+ assertEquals({value: 42, done: false}, Next(g));
+ assertEquals({value: 42, done: false}, Next(g));
+ assertEquals({value: 42, done: false}, Next(g));
+}
+
+{
+ let foo = function*() {
+ yield* (function*() { yield 42; }());
+ assertUnreachable();
+ }
+ g = foo();
+ assertEquals({value: 42, done: false}, Next(g));
+ assertEquals({value: 23, done: true}, Return(g, 23));
+}
+
+{
+ let iterable = {
+ [Symbol.iterator]() {
+ return { next() { return {} } };
+ }
+ };
+ let foo = function*() { yield* iterable };
+ g = foo();
+ g.next();
+ assertThrows(() => Throw(g), TypeError);
+}
diff --git a/deps/v8/test/mjsunit/harmony/generators.js b/deps/v8/test/mjsunit/harmony/generators.js
index df6cec8925..a4fc1c4aa4 100644
--- a/deps/v8/test/mjsunit/harmony/generators.js
+++ b/deps/v8/test/mjsunit/harmony/generators.js
@@ -2,6 +2,33 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --harmony-do-expressions --allow-natives-syntax
+
+
+function MaybeOptimizeOrDeoptimize(f) {
+ let x = Math.random(); // --random-seed makes this deterministic
+ if (x <= 0.33) {
+ %OptimizeFunctionOnNextCall(f);
+ } else if (x <= 0.66) {
+ %DeoptimizeFunction(f);
+ }
+}
+
+function Next(generator, ...args) {
+ MaybeOptimizeOrDeoptimize(%GeneratorGetFunction(generator));
+ return generator.next(...args);
+}
+
+function Return(generator, ...args) {
+ MaybeOptimizeOrDeoptimize(%GeneratorGetFunction(generator));
+ return generator.return(...args);
+}
+
+function Throw(generator, ...args) {
+ MaybeOptimizeOrDeoptimize(%GeneratorGetFunction(generator));
+ return generator.throw(...args);
+}
+
{ // yield in try-catch
@@ -9,19 +36,19 @@
try {yield 1} catch (error) {assertEquals("caught", error)}
};
- assertThrowsEquals(() => g().throw("not caught"), "not caught");
+ assertThrowsEquals(() => Throw(g(), "not caught"), "not caught");
{
let x = g();
- assertEquals({value: 1, done: false}, x.next());
- assertEquals({value: undefined, done: true}, x.throw("caught"));
+ assertEquals({value: 1, done: false}, Next(x));
+ assertEquals({value: undefined, done: true}, Throw(x, "caught"));
}
{
let x = g();
- assertEquals({value: 1, done: false}, x.next());
- assertEquals({value: undefined, done: true}, x.next());
- assertThrowsEquals(() => x.throw("not caught"), "not caught");
+ assertEquals({value: 1, done: false}, Next(x));
+ assertEquals({value: undefined, done: true}, Next(x));
+ assertThrowsEquals(() => Throw(x, "not caught"), "not caught");
}
}
@@ -31,19 +58,19 @@
{
let x = g();
- assertEquals({value: 43, done: false}, x.next());
- assertEquals({value: 42, done: true}, x.next());
+ assertEquals({value: 43, done: false}, Next(x));
+ assertEquals({value: 42, done: true}, Next(x));
}
}
{ // return that doesn't close
let x;
- let g = function*() { try {return 42} finally {x.throw(666)} };
+ let g = function*() { try {return 42} finally {Throw(x, 666)} };
{
x = g();
- assertThrows(() => x.next(), TypeError); // still executing
+ assertThrows(() => Next(x), TypeError); // still executing
}
}
@@ -54,42 +81,42 @@
{ // "return" closes at suspendedStart
let x = g();
- assertEquals({value: 666, done: true}, x.return(666));
- assertEquals({value: undefined, done: true}, x.next(42));
- assertThrowsEquals(() => x.throw(43), 43);
- assertEquals({value: 42, done: true}, x.return(42));
+ assertEquals({value: 666, done: true}, Return(x, 666));
+ assertEquals({value: undefined, done: true}, Next(x, 42));
+ assertThrowsEquals(() => Throw(x, 43), 43);
+ assertEquals({value: 42, done: true}, Return(x, 42));
}
{ // "throw" closes at suspendedStart
let x = g();
- assertThrowsEquals(() => x.throw(666), 666);
- assertEquals({value: undefined, done: true}, x.next(42));
- assertEquals({value: 43, done: true}, x.return(43));
- assertThrowsEquals(() => x.throw(44), 44);
+ assertThrowsEquals(() => Throw(x, 666), 666);
+ assertEquals({value: undefined, done: true}, Next(x, 42));
+ assertEquals({value: 43, done: true}, Return(x, 43));
+ assertThrowsEquals(() => Throw(x, 44), 44);
}
{ // "next" closes at suspendedYield
let x = g();
- assertEquals({value: 42, done: false}, x.next());
- assertEquals({value: 13, done: true}, x.next(666));
- assertEquals({value: undefined, done: true}, x.next(666));
- assertThrowsEquals(() => x.throw(666), 666);
+ assertEquals({value: 42, done: false}, Next(x));
+ assertEquals({value: 13, done: true}, Next(x, 666));
+ assertEquals({value: undefined, done: true}, Next(x, 666));
+ assertThrowsEquals(() => Throw(x, 666), 666);
}
{ // "return" closes at suspendedYield
let x = g();
- assertEquals({value: 42, done: false}, x.next());
- assertEquals({value: 13, done: true}, x.return(666));
- assertEquals({value: undefined, done: true}, x.next(666));
- assertEquals({value: 666, done: true}, x.return(666));
+ assertEquals({value: 42, done: false}, Next(x));
+ assertEquals({value: 13, done: true}, Return(x, 666));
+ assertEquals({value: undefined, done: true}, Next(x, 666));
+ assertEquals({value: 666, done: true}, Return(x, 666));
}
{ // "throw" closes at suspendedYield
let x = g();
- assertEquals({value: 42, done: false}, x.next());
- assertEquals({value: 13, done: true}, x.throw(666));
- assertThrowsEquals(() => x.throw(666), 666);
- assertEquals({value: undefined, done: true}, x.next(666));
+ assertEquals({value: 42, done: false}, Next(x));
+ assertEquals({value: 13, done: true}, Throw(x, 666));
+ assertThrowsEquals(() => Throw(x, 666), 666);
+ assertEquals({value: undefined, done: true}, Next(x, 666));
}
}
@@ -100,45 +127,45 @@
{ // "return" closes at suspendedStart
let x = g();
- assertEquals({value: 666, done: true}, x.return(666));
- assertEquals({value: undefined, done: true}, x.next(42));
- assertThrowsEquals(() => x.throw(43), 43);
- assertEquals({value: 42, done: true}, x.return(42));
+ assertEquals({value: 666, done: true}, Return(x, 666));
+ assertEquals({value: undefined, done: true}, Next(x, 42));
+ assertThrowsEquals(() => Throw(x, 43), 43);
+ assertEquals({value: 42, done: true}, Return(x, 42));
}
{ // "throw" closes at suspendedStart
let x = g();
- assertThrowsEquals(() => x.throw(666), 666);
- assertEquals({value: undefined, done: true}, x.next(42));
- assertEquals({value: 43, done: true}, x.return(43));
- assertThrowsEquals(() => x.throw(44), 44);
+ assertThrowsEquals(() => Throw(x, 666), 666);
+ assertEquals({value: undefined, done: true}, Next(x, 42));
+ assertEquals({value: 43, done: true}, Return(x, 43));
+ assertThrowsEquals(() => Throw(x, 44), 44);
}
{ // "next" closes at suspendedYield
let x = g();
- assertEquals({value: 42, done: false}, x.next());
- assertEquals({value: undefined, done: true}, x.next(666));
- assertEquals({value: undefined, done: true}, x.next(666));
- assertThrowsEquals(() => x.throw(666), 666);
- assertEquals({value: 42, done: true}, x.return(42));
+ assertEquals({value: 42, done: false}, Next(x));
+ assertEquals({value: undefined, done: true}, Next(x, 666));
+ assertEquals({value: undefined, done: true}, Next(x, 666));
+ assertThrowsEquals(() => Throw(x, 666), 666);
+ assertEquals({value: 42, done: true}, Return(x, 42));
}
{ // "return" closes at suspendedYield
let x = g();
- assertEquals({value: 42, done: false}, x.next());
- assertEquals({value: 666, done: true}, x.return(666));
- assertEquals({value: undefined, done: true}, x.next(666));
- assertThrowsEquals(() => x.throw(44), 44);
- assertEquals({value: 42, done: true}, x.return(42));
+ assertEquals({value: 42, done: false}, Next(x));
+ assertEquals({value: 666, done: true}, Return(x, 666));
+ assertEquals({value: undefined, done: true}, Next(x, 666));
+ assertThrowsEquals(() => Throw(x, 44), 44);
+ assertEquals({value: 42, done: true}, Return(x, 42));
}
{ // "throw" closes at suspendedYield
let x = g();
- assertEquals({value: 42, done: false}, x.next());
- assertThrowsEquals(() => x.throw(666), 666);
- assertEquals({value: undefined, done: true}, x.next(666));
- assertThrowsEquals(() => x.throw(666), 666);
- assertEquals({value: 42, done: true}, x.return(42));
+ assertEquals({value: 42, done: false}, Next(x));
+ assertThrowsEquals(() => Throw(x, 666), 666);
+ assertEquals({value: undefined, done: true}, Next(x, 666));
+ assertThrowsEquals(() => Throw(x, 666), 666);
+ assertEquals({value: 42, done: true}, Return(x, 42));
}
}
@@ -149,17 +176,17 @@
{
let x = g();
- assertEquals({value: 42, done: false}, x.next());
- assertEquals({value: 43, done: false}, x.return(666));
- assertEquals({value: 13, done: true}, x.next());
- assertEquals({value: 666, done: true}, x.return(666));
+ assertEquals({value: 42, done: false}, Next(x));
+ assertEquals({value: 43, done: false}, Return(x, 666));
+ assertEquals({value: 13, done: true}, Next(x));
+ assertEquals({value: 666, done: true}, Return(x, 666));
}
{
let x = g();
- assertEquals({value: 666, done: true}, x.return(666));
- assertEquals({value: undefined, done: true}, x.next());
- assertEquals({value: 666, done: true}, x.return(666));
+ assertEquals({value: 666, done: true}, Return(x, 666));
+ assertEquals({value: undefined, done: true}, Next(x));
+ assertEquals({value: 666, done: true}, Return(x, 666));
}
}
@@ -170,17 +197,17 @@
{
let x = g();
- assertEquals({value: 42, done: false}, x.next());
- assertEquals({value: 43, done: false}, x.return(666));
- assertEquals({value: 666, done: true}, x.next());
- assertEquals({value: 5, done: true}, x.return(5));
+ assertEquals({value: 42, done: false}, Next(x));
+ assertEquals({value: 43, done: false}, Return(x, 666));
+ assertEquals({value: 666, done: true}, Next(x));
+ assertEquals({value: 5, done: true}, Return(x, 5));
}
{
let x = g();
- assertEquals({value: 666, done: true}, x.return(666));
- assertEquals({value: undefined, done: true}, x.next());
- assertEquals({value: 666, done: true}, x.return(666));
+ assertEquals({value: 666, done: true}, Return(x, 666));
+ assertEquals({value: undefined, done: true}, Next(x));
+ assertEquals({value: 666, done: true}, Return(x, 666));
}
}
@@ -192,29 +219,29 @@
{
let x = g();
- assertEquals({value: 1, done: false}, x.next());
- assertEquals({value: 42, done: false}, x.next());
- assertEquals({value: 43, done: false}, x.next(666));
- assertEquals({value: 13, done: false}, x.next());
- assertEquals({value: undefined, done: true}, x.next());
+ assertEquals({value: 1, done: false}, Next(x));
+ assertEquals({value: 42, done: false}, Next(x));
+ assertEquals({value: 43, done: false}, Next(x, 666));
+ assertEquals({value: 13, done: false}, Next(x));
+ assertEquals({value: undefined, done: true}, Next(x));
}
{
let x = g();
- assertEquals({value: 1, done: false}, x.next());
- assertEquals({value: 42, done: false}, x.next());
- assertEquals({value: 43, done: false}, x.return(666));
- assertEquals({value: 13, done: false}, x.next());
- assertEquals({value: undefined, done: true}, x.next());
+ assertEquals({value: 1, done: false}, Next(x));
+ assertEquals({value: 42, done: false}, Next(x));
+ assertEquals({value: 43, done: false}, Return(x, 666));
+ assertEquals({value: 13, done: false}, Next(x));
+ assertEquals({value: undefined, done: true}, Next(x));
}
{
let x = g();
- assertEquals({value: 1, done: false}, x.next());
- assertEquals({value: 42, done: false}, x.next());
- assertEquals({value: 43, done: false}, x.throw(666));
- assertEquals({value: 13, done: false}, x.next());
- assertEquals({value: undefined, done: true}, x.next());
+ assertEquals({value: 1, done: false}, Next(x));
+ assertEquals({value: 42, done: false}, Next(x));
+ assertEquals({value: 43, done: false}, Throw(x, 666));
+ assertEquals({value: 13, done: false}, Next(x));
+ assertEquals({value: undefined, done: true}, Next(x));
}
}
@@ -226,28 +253,28 @@
{
let x = g();
- assertEquals({value: 1, done: false}, x.next());
- assertEquals({value: 42, done: false}, x.next());
- assertEquals({value: 43, done: false}, x.next(666));
- assertEquals({value: undefined, done: false}, x.next());
- assertEquals({value: undefined, done: true}, x.next());
+ assertEquals({value: 1, done: false}, Next(x));
+ assertEquals({value: 42, done: false}, Next(x));
+ assertEquals({value: 43, done: false}, Next(x, 666));
+ assertEquals({value: undefined, done: false}, Next(x));
+ assertEquals({value: undefined, done: true}, Next(x));
}
{
let x = g();
- assertEquals({value: 1, done: false}, x.next());
- assertEquals({value: 42, done: false}, x.next());
- assertEquals({value: 43, done: false}, x.return(44));
- assertEquals({value: 44, done: false}, x.next());
- assertEquals({value: undefined, done: true}, x.next());
+ assertEquals({value: 1, done: false}, Next(x));
+ assertEquals({value: 42, done: false}, Next(x));
+ assertEquals({value: 43, done: false}, Return(x, 44));
+ assertEquals({value: 44, done: false}, Next(x));
+ assertEquals({value: undefined, done: true}, Next(x));
}
{
let x = g();
- assertEquals({value: 1, done: false}, x.next());
- assertEquals({value: 42, done: false}, x.next());
- assertEquals({value: 43, done: false}, x.throw(666));
- assertThrowsEquals(() => x.next(), 666);
+ assertEquals({value: 1, done: false}, Next(x));
+ assertEquals({value: 42, done: false}, Next(x));
+ assertEquals({value: 43, done: false}, Throw(x, 666));
+ assertThrowsEquals(() => Next(x), 666);
}
}
@@ -265,8 +292,370 @@
{
let x = g();
- assertEquals({value: 1, done: false}, x.next());
- assertEquals({value: 2, done: false}, x.next());
- assertEquals({value: 42, done: true}, x.return(42));
+ assertEquals({value: 1, done: false}, Next(x));
+ assertEquals({value: 2, done: false}, Next(x));
+ assertEquals({value: 42, done: true}, Return(x, 42));
+ }
+}
+
+
+// More or less random tests from here on.
+
+
+{
+ function* foo() { }
+ let g = foo();
+ assertEquals({value: undefined, done: true}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ function* foo() { return new.target }
+ let g = foo();
+ assertEquals({value: undefined, done: true}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ function* foo() { throw 666; return 42}
+ let g = foo();
+ assertThrowsEquals(() => Next(g), 666);
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ function* foo(a) { return a; }
+ let g = foo(42);
+ assertEquals({value: 42, done: true}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ function* foo(a) { a.iwashere = true; return a; }
+ let x = {};
+ let g = foo(x);
+ assertEquals({value: {iwashere: true}, done: true}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ let a = 42;
+ function* foo() { return a; }
+ let g = foo();
+ assertEquals({value: 42, done: true}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ let a = 40;
+ function* foo(b) { return a + b; }
+ let g = foo(2);
+ assertEquals({value: 42, done: true}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ let a = 40;
+ function* foo(b) { a--; b++; return a + b; }
+ let g = foo(2);
+ assertEquals({value: 42, done: true}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ let g;
+ function* foo() { Next(g) }
+ g = foo();
+ assertThrows(() => Next(g), TypeError);
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ function* foo() { yield 2; yield 3; yield 4 }
+ g = foo();
+ assertEquals({value: 2, done: false}, Next(g));
+ assertEquals({value: 3, done: false}, Next(g));
+ assertEquals({value: 4, done: false}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+
+{
+ function* foo() { yield 2; if (true) { yield 3 }; yield 4 }
+ g = foo();
+ assertEquals({value: 2, done: false}, Next(g));
+ assertEquals({value: 3, done: false}, Next(g));
+ assertEquals({value: 4, done: false}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ function* foo() { yield 2; if (true) { yield 3; yield 4 } }
+ g = foo();
+ assertEquals({value: 2, done: false}, Next(g));
+ assertEquals({value: 3, done: false}, Next(g));
+ assertEquals({value: 4, done: false}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ function* foo() { yield 2; if (false) { yield 3 }; yield 4 }
+ g = foo();
+ assertEquals({value: 2, done: false}, Next(g));
+ assertEquals({value: 4, done: false}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ function* foo() { yield 2; while (true) { yield 3 }; yield 4 }
+ g = foo();
+ assertEquals({value: 2, done: false}, Next(g));
+ assertEquals({value: 3, done: false}, Next(g));
+ assertEquals({value: 3, done: false}, Next(g));
+ assertEquals({value: 3, done: false}, Next(g));
+ assertEquals({value: 3, done: false}, Next(g));
+}
+
+{
+ function* foo() { yield 2; (yield 3) + 42; yield 4 }
+ g = foo();
+ assertEquals({value: 2, done: false}, Next(g));
+ assertEquals({value: 3, done: false}, Next(g));
+ assertEquals({value: 4, done: false}, Next(g));
+}
+
+{
+ function* foo() { yield 2; (do {yield 3}) + 42; yield 4 }
+ g = foo();
+ assertEquals({value: 2, done: false}, Next(g));
+ assertEquals({value: 3, done: false}, Next(g));
+ assertEquals({value: 4, done: false}, Next(g));
+}
+
+{
+ function* foo() { yield 2; return (yield 3) + 42; yield 4 }
+ g = foo();
+ assertEquals({value: 2, done: false}, Next(g));
+ assertEquals({value: 3, done: false}, Next(g));
+ assertEquals({value: 42, done: true}, Next(g, 0));
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ let x = 42;
+ function* foo() {
+ yield x;
+ for (let x in {a: 1, b: 2}) {
+ let i = 2;
+ yield x;
+ yield i;
+ do {
+ yield i;
+ } while (i-- > 0);
+ }
+ yield x;
+ return 5;
+ }
+ g = foo();
+ assertEquals({value: 42, done: false}, Next(g));
+ assertEquals({value: 'a', done: false}, Next(g));
+ assertEquals({value: 2, done: false}, Next(g));
+ assertEquals({value: 2, done: false}, Next(g));
+ assertEquals({value: 1, done: false}, Next(g));
+ assertEquals({value: 0, done: false}, Next(g));
+ assertEquals({value: 'b', done: false}, Next(g));
+ assertEquals({value: 2, done: false}, Next(g));
+ assertEquals({value: 2, done: false}, Next(g));
+ assertEquals({value: 1, done: false}, Next(g));
+ assertEquals({value: 0, done: false}, Next(g));
+ assertEquals({value: 42, done: false}, Next(g));
+ assertEquals({value: 5, done: true}, Next(g));
+}
+
+{
+ let a = 3;
+ function* foo() {
+ let b = 4;
+ yield 1;
+ { let c = 5; yield 2; yield a; yield b; yield c; }
+ }
+ g = foo();
+ assertEquals({value: 1, done: false}, Next(g));
+ assertEquals({value: 2, done: false}, Next(g));
+ assertEquals({value: 3, done: false}, Next(g));
+ assertEquals({value: 4, done: false}, Next(g));
+ assertEquals({value: 5, done: false}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ function* foo() {
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ yield 42;
+ }
+ g = foo();
+ for (let i = 0; i < 100; ++i) {
+ assertEquals({value: 42, done: false}, i%25 === 0 ? Next(g) : g.next());
+ }
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ function* foo() {
+ for (let i = 0; i < 3; ++i) {
+ let j = 0
+ yield i;
+ do {
+ yield (i + 10);
+ } while (++j < 2);
+ }
}
+ g = foo();
+ assertEquals({value: 0, done: false}, Next(g));
+ assertEquals({value: 10, done: false}, Next(g));
+ assertEquals({value: 10, done: false}, Next(g));
+ assertEquals({value: 1, done: false}, Next(g));
+ assertEquals({value: 11, done: false}, Next(g));
+ assertEquals({value: 11, done: false}, Next(g));
+ assertEquals({value: 2, done: false}, Next(g));
+ assertEquals({value: 12, done: false}, Next(g));
+ assertEquals({value: 12, done: false}, Next(g));
+ assertEquals({value: undefined, done: true}, Next(g));
+}
+
+{
+ let foo = function*() {
+ while (true) {
+ if (true || false) yield 42;
+ continue;
+ }
+ }
+ g = foo();
+ assertEquals({value: 42, done: false}, Next(g));
+ assertEquals({value: 42, done: false}, Next(g));
+ assertEquals({value: 42, done: false}, Next(g));
+}
+
+{
+ let foo = function*() {
+ yield* (function*() { yield 42; }());
+ assertUnreachable();
+ }
+ g = foo();
+ assertEquals({value: 42, done: false}, Next(g));
+ assertEquals({value: 23, done: true}, Return(g, 23));
+}
+
+{
+ let iterable = {
+ [Symbol.iterator]() {
+ return { next() { return {} } };
+ }
+ };
+ let foo = function*() { yield* iterable };
+ g = foo();
+ g.next();
+ assertThrows(() => Throw(g), TypeError);
}
diff --git a/deps/v8/test/mjsunit/harmony/harmony-string-pad-end.js b/deps/v8/test/mjsunit/harmony/harmony-string-pad-end.js
index 3292e94eee..03e5aeacdb 100644
--- a/deps/v8/test/mjsunit/harmony/harmony-string-pad-end.js
+++ b/deps/v8/test/mjsunit/harmony/harmony-string-pad-end.js
@@ -67,8 +67,19 @@
(function TestFillerToString() {
assertEquals(". ", ".".padEnd(10));
assertEquals(". ", ".".padEnd(10, undefined));
- assertEquals(". ", ".".padEnd(10, { toString() { return ""; } }));
assertEquals(".nullnulln", ".".padEnd(10, null));
+ assertEquals(".XXXXXXXXX", ".".padEnd(10, { toString() { return "X"; } }));
+ assertEquals(
+ ".111111111",
+ ".".padEnd(10, { toString: undefined, valueOf() { return 1; } }));
+})();
+
+
+(function TestFillerEmptyString() {
+ assertEquals(".", ".".padEnd(10, ""));
+ assertEquals(".", ".".padEnd(10, { toString() { return ""; } }));
+ assertEquals(
+ ".", ".".padEnd(10, { toString: undefined, valueOf() { return ""; } }));
})();
diff --git a/deps/v8/test/mjsunit/harmony/harmony-string-pad-start.js b/deps/v8/test/mjsunit/harmony/harmony-string-pad-start.js
index 2b2d004251..33bf8f3c9d 100644
--- a/deps/v8/test/mjsunit/harmony/harmony-string-pad-start.js
+++ b/deps/v8/test/mjsunit/harmony/harmony-string-pad-start.js
@@ -67,8 +67,19 @@
(function TestFillerToString() {
assertEquals(" .", ".".padStart(10));
assertEquals(" .", ".".padStart(10, undefined));
- assertEquals(" .", ".".padStart(10, { toString() { return ""; } }));
assertEquals("nullnulln.", ".".padStart(10, null));
+ assertEquals("XXXXXXXXX.", ".".padStart(10, { toString() { return "X"; } }));
+ assertEquals(
+ "111111111.",
+ ".".padStart(10, { toString: undefined, valueOf() { return 1; } }));
+})();
+
+
+(function TestFillerEmptyString() {
+ assertEquals(".", ".".padStart(10, ""));
+ assertEquals(".", ".".padStart(10, { toString() { return ""; } }));
+ assertEquals(
+ ".", ".".padStart(10, { toString: undefined, valueOf() { return ""; } }));
})();
diff --git a/deps/v8/test/mjsunit/harmony/mirror-async-function-promise.js b/deps/v8/test/mjsunit/harmony/mirror-async-function-promise.js
new file mode 100644
index 0000000000..966b0ce267
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/mirror-async-function-promise.js
@@ -0,0 +1,93 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --harmony-async-await --allow-natives-syntax
+// Test the mirror object for promises.
+
+var AsyncFunction = (async function() {}).constructor;
+
+function MirrorRefCache(json_refs) {
+ var tmp = eval('(' + json_refs + ')');
+ this.refs_ = [];
+ for (var i = 0; i < tmp.length; i++) {
+ this.refs_[tmp[i].handle] = tmp[i];
+ }
+}
+
+MirrorRefCache.prototype.lookup = function(handle) {
+ return this.refs_[handle];
+}
+
+function testPromiseMirror(promise, status, value) {
+ // Create mirror and JSON representation.
+ var mirror = debug.MakeMirror(promise);
+ var serializer = debug.MakeMirrorSerializer();
+ var json = JSON.stringify(serializer.serializeValue(mirror));
+ var refs = new MirrorRefCache(
+ JSON.stringify(serializer.serializeReferencedObjects()));
+
+ // Check the mirror hierachy.
+ assertTrue(mirror instanceof debug.Mirror);
+ assertTrue(mirror instanceof debug.ValueMirror);
+ assertTrue(mirror instanceof debug.ObjectMirror);
+ assertTrue(mirror instanceof debug.PromiseMirror);
+
+ // Check the mirror properties.
+ assertEquals(status, mirror.status());
+ assertTrue(mirror.isPromise());
+ assertEquals('promise', mirror.type());
+ assertFalse(mirror.isPrimitive());
+ assertEquals("Object", mirror.className());
+ assertEquals("#<Promise>", mirror.toText());
+ assertSame(promise, mirror.value());
+ assertTrue(mirror.promiseValue() instanceof debug.Mirror);
+ assertEquals(value, mirror.promiseValue().value());
+
+ // Parse JSON representation and check.
+ var fromJSON = eval('(' + json + ')');
+ assertEquals('promise', fromJSON.type);
+ assertEquals('Object', fromJSON.className);
+ assertEquals('function', refs.lookup(fromJSON.constructorFunction.ref).type);
+ assertEquals('Promise', refs.lookup(fromJSON.constructorFunction.ref).name);
+ assertEquals(status, fromJSON.status);
+ assertEquals(value, refs.lookup(fromJSON.promiseValue.ref).value);
+}
+
+// Test a number of different promises.
+var resolved = (async function() {})();
+var rejected = (async function() { throw undefined; })();
+var pending = (async function() { await 1; })();
+
+testPromiseMirror(resolved, "resolved", undefined);
+testPromiseMirror(rejected, "rejected", undefined);
+testPromiseMirror(pending, "pending", undefined);
+
+var resolvedv = (async function() { return "resolve"; })();
+var rejectedv = (async function() { return Promise.reject("reject"); })();
+var thrownv = (async function() { throw "throw"; })();
+
+testPromiseMirror(resolvedv, "resolved", 'resolve');
+testPromiseMirror(rejectedv, "rejected", 'reject');
+testPromiseMirror(thrownv, "rejected", 'throw');
+
+// Test internal properties of different promises.
+var m1 = debug.MakeMirror((async function() { return 1; })());
+var ip = m1.internalProperties();
+assertEquals(2, ip.length);
+assertEquals("[[PromiseStatus]]", ip[0].name());
+assertEquals("[[PromiseValue]]", ip[1].name());
+assertEquals("resolved", ip[0].value().value());
+assertEquals(1, ip[1].value().value());
+
+var m2 = debug.MakeMirror((async function() { throw 2; })());
+ip = m2.internalProperties();
+assertEquals("rejected", ip[0].value().value());
+assertEquals(2, ip[1].value().value());
+
+var m3 = debug.MakeMirror((async function() { await 1; })());
+ip = m3.internalProperties();
+assertEquals("pending", ip[0].value().value());
+assertEquals("undefined", typeof(ip[1].value().value()));
+
+%RunMicrotasks();
diff --git a/deps/v8/test/mjsunit/harmony/mirror-async-function.js b/deps/v8/test/mjsunit/harmony/mirror-async-function.js
new file mode 100644
index 0000000000..b4ba8314a5
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/mirror-async-function.js
@@ -0,0 +1,76 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --harmony-async-await --allow-natives-syntax
+// Test the mirror object for functions.
+
+var AsyncFunction = (async function() {}).constructor;
+
+function MirrorRefCache(json_refs) {
+ var tmp = eval('(' + json_refs + ')');
+ this.refs_ = [];
+ for (var i = 0; i < tmp.length; i++) {
+ this.refs_[tmp[i].handle] = tmp[i];
+ }
+}
+
+MirrorRefCache.prototype.lookup = function(handle) {
+ return this.refs_[handle];
+}
+
+function testFunctionMirror(f) {
+ // Create mirror and JSON representation.
+ var mirror = debug.MakeMirror(f);
+ var serializer = debug.MakeMirrorSerializer();
+ var json = JSON.stringify(serializer.serializeValue(mirror));
+ var refs = new MirrorRefCache(
+ JSON.stringify(serializer.serializeReferencedObjects()));
+
+ // Check the mirror hierachy.
+ assertTrue(mirror instanceof debug.Mirror);
+ assertTrue(mirror instanceof debug.ValueMirror);
+ assertTrue(mirror instanceof debug.ObjectMirror);
+ assertTrue(mirror instanceof debug.FunctionMirror);
+
+ // Check the mirror properties.
+ assertTrue(mirror.isFunction());
+ assertEquals('function', mirror.type());
+ assertFalse(mirror.isPrimitive());
+ assertEquals("Function", mirror.className());
+ assertEquals(f.name, mirror.name());
+ assertTrue(mirror.resolved());
+ assertEquals(f.toString(), mirror.source());
+ assertTrue(mirror.constructorFunction() instanceof debug.ObjectMirror);
+ assertTrue(mirror.protoObject() instanceof debug.Mirror);
+ assertTrue(mirror.prototypeObject() instanceof debug.Mirror);
+
+ // Test text representation
+ assertEquals(f.toString(), mirror.toText());
+
+ // Parse JSON representation and check.
+ var fromJSON = eval('(' + json + ')');
+ assertEquals('function', fromJSON.type);
+ assertEquals('Function', fromJSON.className);
+ assertEquals('function', refs.lookup(fromJSON.constructorFunction.ref).type);
+ assertEquals('AsyncFunction',
+ refs.lookup(fromJSON.constructorFunction.ref).name);
+ assertTrue(fromJSON.resolved);
+ assertEquals(f.name, fromJSON.name);
+ assertEquals(f.toString(), fromJSON.source);
+
+ // Check the formatted text (regress 1142074).
+ assertEquals(f.toString(), fromJSON.text);
+}
+
+
+// Test a number of different functions.
+testFunctionMirror(async function(){});
+testFunctionMirror(AsyncFunction());
+testFunctionMirror(new AsyncFunction());
+testFunctionMirror(async() => {});
+testFunctionMirror(async function a(){return 1;});
+testFunctionMirror(({ async foo() {} }).foo);
+testFunctionMirror((async function(){}).bind({}), "Object");
+
+%RunMicrotasks();
diff --git a/deps/v8/test/mjsunit/harmony/object-get-own-property-descriptors.js b/deps/v8/test/mjsunit/harmony/object-get-own-property-descriptors.js
index 7f631d8e58..c71b20a226 100644
--- a/deps/v8/test/mjsunit/harmony/object-get-own-property-descriptors.js
+++ b/deps/v8/test/mjsunit/harmony/object-get-own-property-descriptors.js
@@ -195,7 +195,14 @@ function TestDuplicateKeys() {
});
var result = Object.getOwnPropertyDescriptors(P);
- assertEquals({ "A": undefined }, result);
+ assertEquals({
+ "A": {
+ "value": "VALUE",
+ "writable": false,
+ "enumerable": false,
+ "configurable": true
+ }
+ }, result);
assertTrue(result.hasOwnProperty("A"));
assertEquals([
"ownKeys()",
@@ -204,3 +211,25 @@ function TestDuplicateKeys() {
], log);
}
TestDuplicateKeys();
+
+function TestFakeProperty() {
+ var log = [];
+ var P = new Proxy({}, {
+ ownKeys() {
+ log.push(`ownKeys()`);
+ return ["fakeProperty"];
+ },
+ getOwnPropertyDescriptor(target, name) {
+ log.push(`getOwnPropertyDescriptor(${name})`);
+ return;
+ }
+ });
+ var result = Object.getOwnPropertyDescriptors(P);
+ assertEquals({}, result);
+ assertFalse(result.hasOwnProperty("fakeProperty"));
+ assertEquals([
+ "ownKeys()",
+ "getOwnPropertyDescriptor(fakeProperty)"
+ ], log);
+}
+TestFakeProperty();
diff --git a/deps/v8/test/mjsunit/harmony/regexp-change-exec.js b/deps/v8/test/mjsunit/harmony/regexp-change-exec.js
index 4c9757e3d5..ff84506d89 100644
--- a/deps/v8/test/mjsunit/harmony/regexp-change-exec.js
+++ b/deps/v8/test/mjsunit/harmony/regexp-change-exec.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-regexp-exec
-
class MyError extends Error { }
RegExp.prototype.exec = () => { throw new MyError() };
assertThrows(() => "foo".match(/bar/), MyError);
diff --git a/deps/v8/test/mjsunit/harmony/regexp-named-captures.js b/deps/v8/test/mjsunit/harmony/regexp-named-captures.js
new file mode 100644
index 0000000000..ced8e4b2f6
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-named-captures.js
@@ -0,0 +1,76 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-named-captures
+
+// Malformed named captures.
+assertThrows("/(?<>a)/u"); // Empty name.
+assertThrows("/(?<aa)/u"); // Unterminated name.
+assertThrows("/(?<42a>a)/u"); // Name starting with digits.
+assertThrows("/(?<:a>a)/u"); // Name starting with invalid char.
+assertThrows("/(?<a:>a)/u"); // Name containing with invalid char.
+assertThrows("/(?<a>a)(?<a>a)/u"); // Duplicate name.
+assertThrows("/(?<a>a)(?<b>b)(?<a>a)/u"); // Duplicate name.
+assertThrows("/\\k<a>/u"); // Invalid reference.
+assertThrows("/(?<a>a)\\k<ab>/u"); // Invalid reference.
+assertThrows("/(?<ab>a)\\k<a>/u"); // Invalid reference.
+assertThrows("/\\k<a>(?<ab>a)/u"); // Invalid reference.
+
+// Fallback behavior in non-unicode mode.
+assertThrows("/(?<>a)/");
+assertThrows("/(?<aa)/");
+assertThrows("/(?<42a>a)/");
+assertThrows("/(?<:a>a)/");
+assertThrows("/(?<a:>a)/");
+assertThrows("/(?<a>a)(?<a>a)/");
+assertThrows("/(?<a>a)(?<b>b)(?<a>a)/");
+assertThrows("/(?<a>a)\\k<ab>/");
+assertThrows("/(?<ab>a)\\k<a>/");
+
+assertEquals(["k<a>"], "xxxk<a>xxx".match(/\k<a>/));
+assertEquals(["k<a"], "xxxk<a>xxx".match(/\k<a/));
+
+// Basic named groups.
+assertEquals(["a", "a"], "bab".match(/(?<a>a)/u));
+assertEquals(["a", "a"], "bab".match(/(?<a42>a)/u));
+assertEquals(["a", "a"], "bab".match(/(?<_>a)/u));
+assertEquals(["a", "a"], "bab".match(/(?<$>a)/u));
+assertEquals(["bab", "a"], "bab".match(/.(?<$>a)./u));
+assertEquals(["bab", "a", "b"], "bab".match(/.(?<a>a)(.)/u));
+assertEquals(["bab", "a", "b"], "bab".match(/.(?<a>a)(?<b>.)/u));
+assertEquals(["bab", "ab"], "bab".match(/.(?<a>\w\w)/u));
+assertEquals(["bab", "bab"], "bab".match(/(?<a>\w\w\w)/u));
+assertEquals(["bab", "ba", "b"], "bab".match(/(?<a>\w\w)(?<b>\w)/u));
+
+assertEquals("bab".match(/(a)/u), "bab".match(/(?<a>a)/u));
+assertEquals("bab".match(/(a)/u), "bab".match(/(?<a42>a)/u));
+assertEquals("bab".match(/(a)/u), "bab".match(/(?<_>a)/u));
+assertEquals("bab".match(/(a)/u), "bab".match(/(?<$>a)/u));
+assertEquals("bab".match(/.(a)./u), "bab".match(/.(?<$>a)./u));
+assertEquals("bab".match(/.(a)(.)/u), "bab".match(/.(?<a>a)(.)/u));
+assertEquals("bab".match(/.(a)(.)/u), "bab".match(/.(?<a>a)(?<b>.)/u));
+assertEquals("bab".match(/.(\w\w)/u), "bab".match(/.(?<a>\w\w)/u));
+assertEquals("bab".match(/(\w\w\w)/u), "bab".match(/(?<a>\w\w\w)/u));
+assertEquals("bab".match(/(\w\w)(\w)/u), "bab".match(/(?<a>\w\w)(?<b>\w)/u));
+
+assertEquals(["bab", "b"], "bab".match(/(?<b>b).\1/u));
+assertEquals(["baba", "b", "a"], "baba".match(/(.)(?<a>a)\1\2/u));
+assertEquals(["baba", "b", "a", "b", "a"],
+ "baba".match(/(.)(?<a>a)(?<b>\1)(\2)/u));
+assertEquals(["<a", "<"], "<a".match(/(?<lt><)a/u));
+assertEquals([">a", ">"], ">a".match(/(?<gt>>)a/u));
+
+// Named references.
+assertEquals(["bab", "b"], "bab".match(/(?<b>.).\k<b>/u));
+assertNull("baa".match(/(?<b>.).\k<b>/u));
+
+// Nested groups.
+assertEquals(["bab", "bab", "ab", "b"], "bab".match(/(?<a>.(?<b>.(?<c>.)))/u));
+
+// Reference inside group.
+assertEquals(["bab", "b"], "bab".match(/(?<a>\k<a>\w)../u));
+
+// Reference before group.
+assertEquals(["bab", "b"], "bab".match(/\k<a>(?<a>b)\w\k<a>/u));
+assertEquals(["bab", "b", "a"], "bab".match(/(?<b>b)\k<a>(?<a>a)\k<b>/u));
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-binary.js b/deps/v8/test/mjsunit/harmony/regexp-property-binary.js
new file mode 100644
index 0000000000..c0b4426d5e
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-binary.js
@@ -0,0 +1,25 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-property
+
+function t(re, s) { assertTrue(re.test(s)); }
+function f(re, s) { assertFalse(re.test(s)); }
+
+t(/\p{Bidi_Control}+/u, "\u200E");
+f(/\p{Bidi_C}+/u, "On a dark desert highway, cool wind in my hair");
+t(/\p{AHex}+/u, "DEADBEEF");
+t(/\p{Alphabetic}+/u, "abcdefg");
+t(/\P{Alphabetic}+/u, "1234");
+t(/\p{White_Space}+/u, "\u00A0");
+t(/\p{Uppercase}+/u, "V");
+f(/\p{Lower}+/u, "U");
+t(/\p{Ideo}+/u, "字");
+f(/\p{Ideo}+/u, "x");
+
+assertThrows("/\\p{Hiragana}/u");
+assertThrows("/\\p{Bidi_Class}/u");
+assertThrows("/\\p{Bidi_C=False}/u");
+assertThrows("/\\P{Bidi_Control=Y}/u");
+assertThrows("/\\p{AHex=Yes}/u");
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-blocks.js b/deps/v8/test/mjsunit/harmony/regexp-property-blocks.js
index d186e985f9..de3fd1e276 100644
--- a/deps/v8/test/mjsunit/harmony/regexp-property-blocks.js
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-blocks.js
@@ -2,33 +2,33 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-regexp-property --harmony-unicode-regexps
+// Flags: --harmony-regexp-property
function t(re, s) { assertTrue(re.test(s)); }
function f(re, s) { assertFalse(re.test(s)); }
-t(/\p{InASCII}+/u, ".");
-t(/\p{InASCII}+/u, "supercalifragilisticexpialidocious");
-t(/\p{InBasic_Latin}+/u, ".");
-t(/\p{InBasic_Latin}+/u, "supercalifragilisticexpialidocious");
+t(/\p{Block=ASCII}+/u, ".");
+t(/\p{Block=ASCII}+/u, "supercalifragilisticexpialidocious");
+t(/\p{Block=Basic_Latin}+/u, ".");
+t(/\p{Block=Basic_Latin}+/u, "supercalifragilisticexpialidocious");
-t(/\p{InCJK}+/u, "话说天下大势,分久必合,合久必分");
-t(/\p{InCJK_Unified_Ideographs}+/u, "吾庄后有一桃园,花开正盛");
-f(/\p{InCJK}+/u, "おはようございます");
-f(/\p{InCJK_Unified_Ideographs}+/u,
+t(/\p{blk=CJK}+/u, "话说天下大势,分久必合,合久必分");
+t(/\p{blk=CJK_Unified_Ideographs}+/u, "吾庄后有一桃园,花开正盛");
+f(/\p{blk=CJK}+/u, "おはようございます");
+f(/\p{blk=CJK_Unified_Ideographs}+/u,
"Something is rotten in the state of Denmark");
-t(/\p{InLatin_1}+/u, "Wie froh bin ich, daß ich weg bin!");
-f(/\p{InLatin_1_Supplement}+/u, "奔腾千里荡尘埃,渡水登山紫雾开");
-f(/\p{InLatin_1_Sup}+/u, "いただきます");
+t(/\p{blk=Latin_1}+/u, "Wie froh bin ich, daß ich weg bin!");
+f(/\p{blk=Latin_1_Supplement}+/u, "奔腾千里荡尘埃,渡水登山紫雾开");
+f(/\p{blk=Latin_1_Sup}+/u, "いただきます");
-t(/\p{InHiragana}/u, "いただきます");
-t(/\p{Hiragana}/u, "\u{1b001}"); // This refers to the script "Hiragana".
-f(/\p{InHiragana}/u, "\u{1b001}"); // This refers to the block "Hiragana".
+t(/\p{blk=Hiragana}/u, "いただきます");
+t(/\p{sc=Hiragana}/u, "\u{1b001}"); // This refers to the script "Hiragana".
+f(/\p{blk=Hiragana}/u, "\u{1b001}"); // This refers to the block "Hiragana".
-t(/\p{InGreek_And_Coptic}/u,
+t(/\p{blk=Greek_And_Coptic}/u,
"ἄνδρα μοι ἔννεπε, μοῦσα, πολύτροπον, ὃς μάλα πολλὰ");
-t(/\p{InGreek}/u, "μῆνιν ἄειδε θεὰ Πηληϊάδεω Ἀχιλῆος");
+t(/\p{blk=Greek}/u, "μῆνιν ἄειδε θεὰ Πηληϊάδεω Ἀχιλῆος");
assertThrows("/\\p{In}/u");
assertThrows("/\\pI/u");
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-char-class.js b/deps/v8/test/mjsunit/harmony/regexp-property-char-class.js
index 76774cb572..c70e82676e 100644
--- a/deps/v8/test/mjsunit/harmony/regexp-property-char-class.js
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-char-class.js
@@ -2,16 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-unicode-regexps --harmony-regexp-property
+// Flags: --harmony-regexp-property
assertThrows("/[\\p]/u");
assertThrows("/[\\p{garbage}]/u");
assertThrows("/[\\p{}]/u");
assertThrows("/[\\p{]/u");
assertThrows("/[\\p}]/u");
-assertThrows("/[\\p{Math}]/u");
-assertThrows("/[\\p{Bidi_M}]/u");
-assertThrows("/[\\p{Hex}]/u");
assertTrue(/^[\p{Lu}\p{Ll}]+$/u.test("ABCabc"));
assertTrue(/^[\p{Lu}-\p{Ll}]+$/u.test("ABC-abc"));
@@ -19,6 +16,9 @@ assertFalse(/^[\P{Lu}\p{Ll}]+$/u.test("ABCabc"));
assertTrue(/^[\P{Lu}\p{Ll}]+$/u.test("abc"));
assertTrue(/^[\P{Lu}]+$/u.test("abc123"));
assertFalse(/^[\P{Lu}]+$/u.test("XYZ"));
+assertTrue(/[\p{Math}]/u.test("+"));
+assertTrue(/[\P{Bidi_M}]/u.test(" "));
+assertTrue(/[\p{Hex}]/u.test("A"));
assertTrue(/^[^\P{Lu}]+$/u.test("XYZ"));
assertFalse(/^[^\p{Lu}\p{Ll}]+$/u.test("abc"));
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-disabled.js b/deps/v8/test/mjsunit/harmony/regexp-property-disabled.js
index 7a3158c68b..f471ef4d9d 100644
--- a/deps/v8/test/mjsunit/harmony/regexp-property-disabled.js
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-disabled.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-unicode-regexps --no-harmony-regexp-property
+// Flags: --no-harmony-regexp-property
function test(source, message) {
try {
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-enumerated.js b/deps/v8/test/mjsunit/harmony/regexp-property-enumerated.js
new file mode 100644
index 0000000000..dba8397e78
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-enumerated.js
@@ -0,0 +1,28 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-property
+
+function t(re, s) { assertTrue(re.test(s)); }
+function f(re, s) { assertFalse(re.test(s)); }
+
+t(/\p{Bidi_Class=L}+/u, "Is this the real life?");
+t(/\p{bc=Left_To_Right}+/u, "Is this just fantasy?");
+t(/\p{bc=AL}+/u, "السلام عليكم‎");
+t(/\p{bc=Arabic_Letter}+/u, "متشرف بمعرفتك‎");
+
+t(/\p{Line_Break=Glue}/u, "\u00A0");
+t(/\p{lb=AL}/u, "~");
+
+assertThrows("/\\p{Block=}/u");
+assertThrows("/\\p{=}/u");
+assertThrows("/\\p{=L}/u");
+assertThrows("/\\p{=Hiragana}/u");
+assertThrows("/\\p{Block=CJK=}/u");
+
+assertThrows("/\\p{Age=V8_0}/u");
+assertThrows("/\\p{General_Category=Letter}/u");
+assertThrows("/\\p{gc=L}/u");
+assertThrows("/\\p{General_Category_Mask=Letter}/u");
+assertThrows("/\\p{gcm=L}/u");
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-exact-match.js b/deps/v8/test/mjsunit/harmony/regexp-property-exact-match.js
index 4dfcc5f96e..0d1f70459e 100644
--- a/deps/v8/test/mjsunit/harmony/regexp-property-exact-match.js
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-exact-match.js
@@ -2,20 +2,20 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-regexp-property --harmony-unicode-regexps
+// Flags: --harmony-regexp-property
assertThrows("/\\p{In CJK}/u");
assertThrows("/\\p{InCJKUnifiedIdeographs}/u");
-assertDoesNotThrow("/\\p{InCJK}/u");
-assertDoesNotThrow("/\\p{InCJK_Unified_Ideographs}/u");
+assertThrows("/\\p{InCJK}/u");
+assertThrows("/\\p{InCJK_Unified_Ideographs}/u");
-assertDoesNotThrow("/\\p{InCyrillic_Sup}/u");
-assertDoesNotThrow("/\\p{InCyrillic_Supplement}/u");
-assertDoesNotThrow("/\\p{InCyrillic_Supplementary}/u");
+assertThrows("/\\p{InCyrillic_Sup}/u");
+assertThrows("/\\p{InCyrillic_Supplement}/u");
+assertThrows("/\\p{InCyrillic_Supplementary}/u");
assertThrows("/\\p{InCyrillicSupplementary}/u");
assertThrows("/\\p{InCyrillic_supplementary}/u");
-assertDoesNotThrow("/\\pC/u");
+assertDoesNotThrow("/\\p{C}/u");
assertDoesNotThrow("/\\p{Other}/u");
assertDoesNotThrow("/\\p{Cc}/u");
assertDoesNotThrow("/\\p{Control}/u");
@@ -25,9 +25,18 @@ assertDoesNotThrow("/\\p{Mark}/u");
assertDoesNotThrow("/\\p{Combining_Mark}/u");
assertThrows("/\\p{Combining Mark}/u");
-assertDoesNotThrow("/\\p{Copt}/u");
-assertDoesNotThrow("/\\p{Coptic}/u");
-assertDoesNotThrow("/\\p{Qaac}/u");
-assertDoesNotThrow("/\\p{Egyp}/u");
-assertDoesNotThrow("/\\p{Egyptian_Hieroglyphs}/u");
+assertDoesNotThrow("/\\p{Script=Copt}/u");
+assertThrows("/\\p{Coptic}/u");
+assertThrows("/\\p{Qaac}/u");
+assertThrows("/\\p{Egyp}/u");
+assertDoesNotThrow("/\\p{Script=Egyptian_Hieroglyphs}/u");
assertThrows("/\\p{EgyptianHieroglyphs}/u");
+
+assertThrows("/\\p{BidiClass=LeftToRight}/u");
+assertThrows("/\\p{BidiC=LeftToRight}/u");
+assertThrows("/\\p{bidi_c=Left_To_Right}/u");
+
+assertDoesNotThrow("/\\p{Block=CJK}/u");
+assertThrows("/\\p{Block = CJK}/u");
+assertThrows("/\\p{Block=cjk}/u");
+assertThrows("/\\p{BLK=CJK}/u");
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-general-category.js b/deps/v8/test/mjsunit/harmony/regexp-property-general-category.js
index e2015ad72d..e4fb8b5232 100644
--- a/deps/v8/test/mjsunit/harmony/regexp-property-general-category.js
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-general-category.js
@@ -2,16 +2,20 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-regexp-property --harmony-unicode-regexps
+// Flags: --harmony-regexp-property
assertThrows("/\\p/u");
assertThrows("/\\p{garbage}/u");
assertThrows("/\\p{}/u");
assertThrows("/\\p{/u");
assertThrows("/\\p}/u");
-assertThrows("/\p{Math}/u");
-assertThrows("/\p{Bidi_M}/u");
-assertThrows("/\p{Hex}/u");
+assertThrows("/\\pL/u");
+assertThrows("/\\P/u");
+assertThrows("/\\P{garbage}/u");
+assertThrows("/\\P{}/u");
+assertThrows("/\\P{/u");
+assertThrows("/\\P}/u");
+assertThrows("/\\PL/u");
assertTrue(/\p{Ll}/u.test("a"));
assertFalse(/\P{Ll}/u.test("a"));
@@ -26,10 +30,10 @@ assertTrue(/\p{Ll}/iu.test("a"));
assertTrue(/\p{Ll}/iu.test("\u{118D4}"));
assertTrue(/\p{Ll}/iu.test("A"));
assertTrue(/\p{Ll}/iu.test("\u{118B4}"));
-assertFalse(/\P{Ll}/iu.test("a"));
-assertFalse(/\P{Ll}/iu.test("\u{118D4}"));
-assertFalse(/\P{Ll}/iu.test("A"));
-assertFalse(/\P{Ll}/iu.test("\u{118B4}"));
+assertTrue(/\P{Ll}/iu.test("a"));
+assertTrue(/\P{Ll}/iu.test("\u{118D4}"));
+assertTrue(/\P{Ll}/iu.test("A"));
+assertTrue(/\P{Ll}/iu.test("\u{118B4}"));
assertTrue(/\p{Lu}/u.test("A"));
assertFalse(/\P{Lu}/u.test("A"));
@@ -44,22 +48,16 @@ assertTrue(/\p{Lu}/iu.test("a"));
assertTrue(/\p{Lu}/iu.test("\u{118D4}"));
assertTrue(/\p{Lu}/iu.test("A"));
assertTrue(/\p{Lu}/iu.test("\u{118B4}"));
-assertFalse(/\P{Lu}/iu.test("a"));
-assertFalse(/\P{Lu}/iu.test("\u{118D4}"));
-assertFalse(/\P{Lu}/iu.test("A"));
-assertFalse(/\P{Lu}/iu.test("\u{118B4}"));
+assertTrue(/\P{Lu}/iu.test("a"));
+assertTrue(/\P{Lu}/iu.test("\u{118D4}"));
+assertTrue(/\P{Lu}/iu.test("A"));
+assertTrue(/\P{Lu}/iu.test("\u{118B4}"));
assertTrue(/\p{Sm}/u.test("+"));
assertFalse(/\P{Sm}/u.test("+"));
assertTrue(/\p{Sm}/u.test("\u{1D6C1}"));
assertFalse(/\P{Sm}/u.test("\u{1D6C1}"));
-assertTrue(/\pL/u.test("a"));
-assertFalse(/\PL/u.test("a"));
-assertFalse(/\pL/u.test("1"));
-assertTrue(/\PL/u.test("1"));
-assertTrue(/\pL/u.test("\u1FAB"));
-assertFalse(/\PL/u.test("\u1FAB"));
assertFalse(/\p{L}/u.test("\uA6EE"));
assertTrue(/\P{L}/u.test("\uA6EE"));
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui.js b/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui.js
new file mode 100644
index 0000000000..115e064005
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-lu-ui.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-property
+
+const regexp = /\P{Lu}/ui;
+const regexpu = /[\0-@\[-\xBF\xD7\xDF-\xFF\u0101\u0103\u0105\u0107\u0109\u010B\u010D\u010F\u0111\u0113\u0115\u0117\u0119\u011B\u011D\u011F\u0121\u0123\u0125\u0127\u0129\u012B\u012D\u012F\u0131\u0133\u0135\u0137\u0138\u013A\u013C\u013E\u0140\u0142\u0144\u0146\u0148\u0149\u014B\u014D\u014F\u0151\u0153\u0155\u0157\u0159\u015B\u015D\u015F\u0161\u0163\u0165\u0167\u0169\u016B\u016D\u016F\u0171\u0173\u0175\u0177\u017A\u017C\u017E-\u0180\u0183\u0185\u0188\u018C\u018D\u0192\u0195\u0199-\u019B\u019E\u01A1\u01A3\u01A5\u01A8\u01AA\u01AB\u01AD\u01B0\u01B4\u01B6\u01B9-\u01BB\u01BD-\u01C3\u01C5\u01C6\u01C8\u01C9\u01CB\u01CC\u01CE\u01D0\u01D2\u01D4\u01D6\u01D8\u01DA\u01DC\u01DD\u01DF\u01E1\u01E3\u01E5\u01E7\u01E9\u01EB\u01ED\u01EF\u01F0\u01F2\u01F3\u01F5\u01F9\u01FB\u01FD\u01FF\u0201\u0203\u0205\u0207\u0209\u020B\u020D\u020F\u0211\u0213\u0215\u0217\u0219\u021B\u021D\u021F\u0221\u0223\u0225\u0227\u0229\u022B\u022D\u022F\u0231\u0233-\u0239\u023C\u023F\u0240\u0242\u0247\u0249\u024B\u024D\u024F-\u036F\u0371\u0373-\u0375\u0377-\u037E\u0380-\u0385\u0387\u038B\u038D\u0390\u03A2\u03AC-\u03CE\u03D0\u03D1\u03D5-\u03D7\u03D9\u03DB\u03DD\u03DF\u03E1\u03E3\u03E5\u03E7\u03E9\u03EB\u03ED\u03EF-\u03F3\u03F5\u03F6\u03F8\u03FB\u03FC\u0430-\u045F\u0461\u0463\u0465\u0467\u0469\u046B\u046D\u046F\u0471\u0473\u0475\u0477\u0479\u047B\u047D\u047F\u0481-\u0489\u048B\u048D\u048F\u0491\u0493\u0495\u0497\u0499\u049B\u049D\u049F\u04A1\u04A3\u04A5\u04A7\u04A9\u04AB\u04AD\u04AF\u04B1\u04B3\u04B5\u04B7\u04B9\u04BB\u04BD\u04BF\u04C2\u04C4\u04C6\u04C8\u04CA\u04CC\u04CE\u04CF\u04D1\u04D3\u04D5\u04D7\u04D9\u04DB\u04DD\u04DF\u04E1\u04E3\u04E5\u04E7\u04E9\u04EB\u04ED\u04EF\u04F1\u04F3\u04F5\u04F7\u04F9\u04FB\u04FD\u04FF\u0501\u0503\u0505\u0507\u0509\u050B\u050D\u050F\u0511\u0513\u0515\u0517\u0519\u051B\u051D\u051F\u0521\u0523\u0525\u0527\u0529\u052B\u052D\u052F\u0530\u0557-\u109F\u10C6\u10C8-\u10CC\u10CE-\u139F\u13F6-\u1DFF\u1E01\u1E03\u1E05\u1E07\u1E09\u1E0B\u1E0D\u1E0F\u1E11\u1E13\u1E15\u1E17\u1E19\u1E1B\u1E1D\u1E1F\u1E21\u1E23\u1E25\u1E27\u1E29\u1E2B\u1E2D\u1E2F\u1E31\u1E33\u1E35\u1E37\u1E39\u1E3B\u1E3D\u1E3F\u1E41\u1E43\u1E45\u1E47\u1E49\u1E4B\u1E4D\u1E4F\u1E51\u1E53\u1E55\u1E57\u1E59\u1E5B\u1E5D\u1E5F\u1E61\u1E63\u1E65\u1E67\u1E69\u1E6B\u1E6D\u1E6F\u1E71\u1E73\u1E75\u1E77\u1E79\u1E7B\u1E7D\u1E7F\u1E81\u1E83\u1E85\u1E87\u1E89\u1E8B\u1E8D\u1E8F\u1E91\u1E93\u1E95-\u1E9D\u1E9F\u1EA1\u1EA3\u1EA5\u1EA7\u1EA9\u1EAB\u1EAD\u1EAF\u1EB1\u1EB3\u1EB5\u1EB7\u1EB9\u1EBB\u1EBD\u1EBF\u1EC1\u1EC3\u1EC5\u1EC7\u1EC9\u1ECB\u1ECD\u1ECF\u1ED1\u1ED3\u1ED5\u1ED7\u1ED9\u1EDB\u1EDD\u1EDF\u1EE1\u1EE3\u1EE5\u1EE7\u1EE9\u1EEB\u1EED\u1EEF\u1EF1\u1EF3\u1EF5\u1EF7\u1EF9\u1EFB\u1EFD\u1EFF-\u1F07\u1F10-\u1F17\u1F1E-\u1F27\u1F30-\u1F37\u1F40-\u1F47\u1F4E-\u1F58\u1F5A\u1F5C\u1F5E\u1F60-\u1F67\u1F70-\u1FB7\u1FBC-\u1FC7\u1FCC-\u1FD7\u1FDC-\u1FE7\u1FED-\u1FF7\u1FFC-\u2101\u2103-\u2106\u2108-\u210A\u210E\u210F\u2113\u2114\u2116-\u2118\u211E-\u2123\u2125\u2127\u2129\u212E\u212F\u2134-\u213D\u2140-\u2144\u2146-\u2182\u2184-\u2BFF\u2C2F-\u2C5F\u2C61\u2C65\u2C66\u2C68\u2C6A\u2C6C\u2C71\u2C73\u2C74\u2C76-\u2C7D\u2C81\u2C83\u2C85\u2C87\u2C89\u2C8B\u2C8D\u2C8F\u2C91\u2C93\u2C95\u2C97\u2C99\u2C9B\u2C9D\u2C9F\u2CA1\u2CA3\u2CA5\u2CA7\u2CA9\u2CAB\u2CAD\u2CAF\u2CB1\u2CB3\u2CB5\u2CB7\u2CB9\u2CBB\u2CBD\u2CBF\u2CC1\u2CC3\u2CC5\u2CC7\u2CC9\u2CCB\u2CCD\u2CCF\u2CD1\u2CD3\u2CD5\u2CD7\u2CD9\u2CDB\u2CDD\u2CDF\u2CE1\u2CE3-\u2CEA\u2CEC\u2CEE-\u2CF1\u2CF3-\uA63F\uA641\uA643\uA645\uA647\uA649\uA64B\uA64D\uA64F\uA651\uA653\uA655\uA657\uA659\uA65B\uA65D\uA65F\uA661\uA663\uA665\uA667\uA669\uA66B\uA66D-\uA67F\uA681\uA683\uA685\uA687\uA689\uA68B\uA68D\uA68F\uA691\uA693\uA695\uA697\uA699\uA69B-\uA721\uA723\uA725\uA727\uA729\uA72B\uA72D\uA72F-\uA731\uA733\uA735\uA737\uA739\uA73B\uA73D\uA73F\uA741\uA743\uA745\uA747\uA749\uA74B\uA74D\uA74F\uA751\uA753\uA755\uA757\uA759\uA75B\uA75D\uA75F\uA761\uA763\uA765\uA767\uA769\uA76B\uA76D\uA76F-\uA778\uA77A\uA77C\uA77F\uA781\uA783\uA785\uA787-\uA78A\uA78C\uA78E\uA78F\uA791\uA793-\uA795\uA797\uA799\uA79B\uA79D\uA79F\uA7A1\uA7A3\uA7A5\uA7A7\uA7A9\uA7AE\uA7AF\uA7B5\uA7B7-\uFF20\uFF3B-\u{103FF}\u{10428}-\u{10C7F}\u{10CB3}-\u{1189F}\u{118C0}-\u{1D3FF}\u{1D41A}-\u{1D433}\u{1D44E}-\u{1D467}\u{1D482}-\u{1D49B}\u{1D49D}\u{1D4A0}\u{1D4A1}\u{1D4A3}\u{1D4A4}\u{1D4A7}\u{1D4A8}\u{1D4AD}\u{1D4B6}-\u{1D4CF}\u{1D4EA}-\u{1D503}\u{1D506}\u{1D50B}\u{1D50C}\u{1D515}\u{1D51D}-\u{1D537}\u{1D53A}\u{1D53F}\u{1D545}\u{1D547}-\u{1D549}\u{1D551}-\u{1D56B}\u{1D586}-\u{1D59F}\u{1D5BA}-\u{1D5D3}\u{1D5EE}-\u{1D607}\u{1D622}-\u{1D63B}\u{1D656}-\u{1D66F}\u{1D68A}-\u{1D6A7}\u{1D6C1}-\u{1D6E1}\u{1D6FB}-\u{1D71B}\u{1D735}-\u{1D755}\u{1D76F}-\u{1D78F}\u{1D7A9}-\u{1D7C9}\u{1D7CB}-\u{10FFFF}]/ui;
+
+for (let codePoint = 0; codePoint <= 0x10FFFF; codePoint++) {
+ const string = String.fromCodePoint(codePoint);
+ assertEquals(regexp.test(string), regexpu.test(string));
+}
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-scripts.js b/deps/v8/test/mjsunit/harmony/regexp-property-scripts.js
index 19b50ee7db..565a59ab0a 100644
--- a/deps/v8/test/mjsunit/harmony/regexp-property-scripts.js
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-scripts.js
@@ -2,38 +2,38 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-regexp-property --harmony-unicode-regexps
+// Flags: --harmony-regexp-property
function t(re, s) { assertTrue(re.test(s)); }
function f(re, s) { assertFalse(re.test(s)); }
-t(/\p{Common}+/u, ".");
-f(/\p{Common}+/u, "supercalifragilisticexpialidocious");
+t(/\p{Script=Common}+/u, ".");
+f(/\p{Script=Common}+/u, "supercalifragilisticexpialidocious");
-t(/\p{Han}+/u, "话说天下大势,分久必合,合久必分");
-t(/\p{Hani}+/u, "吾庄后有一桃园,花开正盛");
-f(/\p{Han}+/u, "おはようございます");
-f(/\p{Hani}+/u, "Something is rotten in the state of Denmark");
+t(/\p{Script=Han}+/u, "话说天下大势,分久必合,合久必分");
+t(/\p{Script=Hani}+/u, "吾庄后有一桃园,花开正盛");
+f(/\p{Script=Han}+/u, "おはようございます");
+f(/\p{Script=Hani}+/u, "Something is rotten in the state of Denmark");
-t(/\p{Latin}+/u, "Wie froh bin ich, daß ich weg bin!");
-t(/\p{Latn}+/u,
+t(/\p{Script=Latin}+/u, "Wie froh bin ich, daß ich weg bin!");
+t(/\p{Script=Latn}+/u,
"It was a bright day in April, and the clocks were striking thirteen");
-f(/\p{Latin}+/u, "奔腾千里荡尘埃,渡水登山紫雾开");
-f(/\p{Latn}+/u, "いただきます");
+f(/\p{Script=Latin}+/u, "奔腾千里荡尘埃,渡水登山紫雾开");
+f(/\p{Script=Latn}+/u, "いただきます");
-t(/\p{Hiragana}/u, "いただきます");
-t(/\p{Hira}/u, "ありがとうございました");
-f(/\p{Hiragana}/u,
+t(/\p{sc=Hiragana}/u, "いただきます");
+t(/\p{sc=Hira}/u, "ありがとうございました");
+f(/\p{sc=Hiragana}/u,
"Als Gregor Samsa eines Morgens aus unruhigen Träumen erwachte");
-f(/\p{Hira}/u, "Call me Ishmael");
+f(/\p{sc=Hira}/u, "Call me Ishmael");
-t(/\p{Phoenician}/u, "\u{10900}\u{1091a}");
-t(/\p{Phnx}/u, "\u{1091f}\u{10916}");
-f(/\p{Phoenician}/u, "Arthur est un perroquet");
-f(/\p{Phnx}/u, "设心狠毒非良士,操卓原来一路人");
+t(/\p{sc=Phoenician}/u, "\u{10900}\u{1091a}");
+t(/\p{sc=Phnx}/u, "\u{1091f}\u{10916}");
+f(/\p{sc=Phoenician}/u, "Arthur est un perroquet");
+f(/\p{sc=Phnx}/u, "设心狠毒非良士,操卓原来一路人");
-t(/\p{Grek}/u, "ἄνδρα μοι ἔννεπε, μοῦσα, πολύτροπον, ὃς μάλα πολλὰ");
-t(/\p{Greek}/u, "μῆνιν ἄειδε θεὰ Πηληϊάδεω Ἀχιλῆος");
-f(/\p{Greek}/u, "高贤未服英雄志,屈节偏生杰士疑");
-f(/\p{Greek}/u,
+t(/\p{sc=Grek}/u, "ἄνδρα μοι ἔννεπε, μοῦσα, πολύτροπον, ὃς μάλα πολλὰ");
+t(/\p{sc=Greek}/u, "μῆνιν ἄειδε θεὰ Πηληϊάδεω Ἀχιλῆος");
+f(/\p{sc=Greek}/u, "高贤未服英雄志,屈节偏生杰士疑");
+f(/\p{sc=Greek}/u,
"Mr. Jones, of the Manor Farm, had locked the hen-houses for the night");
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-special.js b/deps/v8/test/mjsunit/harmony/regexp-property-special.js
new file mode 100644
index 0000000000..204b77fb23
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-special.js
@@ -0,0 +1,51 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-property
+
+function t(re, s) { assertTrue(re.test(s)); }
+function f(re, s) { assertFalse(re.test(s)); }
+
+t(/\p{ASCII}+/u, "abc123");
+f(/\p{ASCII}+/u, "ⓐⓑⓒ①②③");
+f(/\p{ASCII}+/u, "🄰🄱🄲①②③");
+f(/\P{ASCII}+/u, "abcd123");
+t(/\P{ASCII}+/u, "ⓐⓑⓒ①②③");
+t(/\P{ASCII}+/u, "🄰🄱🄲①②③");
+
+f(/[^\p{ASCII}]+/u, "abc123");
+f(/[\p{ASCII}]+/u, "ⓐⓑⓒ①②③");
+f(/[\p{ASCII}]+/u, "🄰🄱🄲①②③");
+t(/[^\P{ASCII}]+/u, "abcd123");
+t(/[\P{ASCII}]+/u, "ⓐⓑⓒ①②③");
+f(/[^\P{ASCII}]+/u, "🄰🄱🄲①②③");
+
+t(/\p{Any}+/u, "🄰🄱🄲①②③");
+
+assertEquals(["\ud800"], /\p{Any}/u.exec("\ud800\ud801"));
+assertEquals(["\udc00"], /\p{Any}/u.exec("\udc00\udc01"));
+assertEquals(["\ud800\udc01"], /\p{Any}/u.exec("\ud800\udc01"));
+assertEquals(["\udc01"], /\p{Any}/u.exec("\udc01"));
+
+f(/\P{Any}+/u, "123");
+f(/[\P{Any}]+/u, "123");
+t(/[\P{Any}\d]+/u, "123");
+t(/[^\P{Any}]+/u, "123");
+
+t(/\p{Assigned}+/u, "123");
+t(/\p{Assigned}+/u, "🄰🄱🄲");
+f(/\p{Assigned}+/u, "\ufdd0");
+f(/\p{Assigned}+/u, "\u{fffff}");
+
+f(/\P{Assigned}+/u, "123");
+f(/\P{Assigned}+/u, "🄰🄱🄲");
+t(/\P{Assigned}+/u, "\ufdd0");
+t(/\P{Assigned}+/u, "\u{fffff}");
+f(/\P{Assigned}/u, "");
+
+t(/[^\P{Assigned}]+/u, "123");
+f(/[\P{Assigned}]+/u, "🄰🄱🄲");
+f(/[^\P{Assigned}]+/u, "\ufdd0");
+t(/[\P{Assigned}]+/u, "\u{fffff}");
+f(/[\P{Assigned}]/u, "");
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-4904.js b/deps/v8/test/mjsunit/harmony/regress/regress-4904.js
new file mode 100644
index 0000000000..a57d246b6f
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-4904.js
@@ -0,0 +1,24 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-do-expressions
+
+(function testCatchScopeInDoExpression() {
+ var f = (s = 17, y = do { try { throw 25; } catch(e) { s += e; }; }) => s;
+ var result = f();
+ assertEquals(result, 42);
+})();
+
+(function testCatchScopeInDoExpression() {
+ var f = (s = 17, y = do { let t; try { throw 25; } catch(e) { s += e; }; }) => s;
+ var result = f();
+ assertEquals(result, 42);
+})();
+
+(function testCatchScopeInDoExpression() {
+ let t1;
+ var f = (s = 17, y = do { let t2; try { throw 25; } catch(e) { s += e; }; }) => s;
+ var result = f();
+ assertEquals(result, 42);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-618603.js b/deps/v8/test/mjsunit/harmony/regress/regress-618603.js
new file mode 100644
index 0000000000..8f45cd88f0
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-618603.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-async-await
+
+try {
+} catch(e) {; }
+function __f_7(expected, run) {
+ var __v_10 = run();
+};
+__f_7("[1,2,3]", () => (function() {
+ return (async () => {[...await arguments] })();
+ })());
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-624300.js b/deps/v8/test/mjsunit/harmony/regress/regress-624300.js
new file mode 100644
index 0000000000..f96fbbb5aa
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-624300.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-async-await
+
+(function f() {
+ try {
+ f();
+ } catch (e) {
+ (async() => await 1).length;
+ }
+})();
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-crbug-621111.js b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-621111.js
new file mode 100644
index 0000000000..58a0d5ce3f
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-621111.js
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(y = 1[1, [...[]]]) => 1; // will core dump, if not fixed
+(y = 1[1, [...[]]]) => {}; // will core dump, if not fixed
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-crbug-621496.js b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-621496.js
new file mode 100644
index 0000000000..4db7a95039
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-621496.js
@@ -0,0 +1,7 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function testIllegalSpreadAsSingleArrowParameter() {
+ assertThrows("(...[42]) => 42)", SyntaxError) // will core dump, if not fixed
+})();
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-observe-empty-double-array.js b/deps/v8/test/mjsunit/harmony/regress/regress-observe-empty-double-array.js
deleted file mode 100644
index 1460889f45..0000000000
--- a/deps/v8/test/mjsunit/harmony/regress/regress-observe-empty-double-array.js
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony-object-observe
-// Flags: --allow-natives-syntax
-//
-// Test passes if it does not crash.
-
-arr = [1.1];
-Object.observe(arr, function(){});
-arr.length = 0;
-// TODO(observe): we currently disallow fast elements for observed object.
-// assertTrue(%HasFastDoubleElements(arr));
-// Should not crash
-arr.push(1.1);
diff --git a/deps/v8/test/mjsunit/harmony/sharedarraybuffer.js b/deps/v8/test/mjsunit/harmony/sharedarraybuffer.js
index 7bd4e5b121..7c34ed3009 100644
--- a/deps/v8/test/mjsunit/harmony/sharedarraybuffer.js
+++ b/deps/v8/test/mjsunit/harmony/sharedarraybuffer.js
@@ -572,3 +572,21 @@ for(i = 0; i < typedArrayConstructors.length; i++) {
assertThrows(function(i) { typedArrayConstructors[i](); }.bind(this, i),
TypeError);
}
+
+// byteLength from prototype can be overwritten
+var s = new SharedArrayBuffer(10);
+assertEquals(10, s.byteLength);
+Object.defineProperty(s, 'byteLength', {value: 42});
+assertEquals(42, s.byteLength);
+
+// byteLength on incompatible type (shared vs. regular ArrayBuffer)
+var desc = Object.getOwnPropertyDescriptor(ArrayBuffer.prototype, 'byteLength');
+s = new SharedArrayBuffer(10);
+Object.defineProperty(s, 'byteLength', desc);
+assertThrows(function() {s.byteLength}, TypeError);
+
+desc = Object.getOwnPropertyDescriptor(SharedArrayBuffer.prototype,
+ 'byteLength');
+var a = new ArrayBuffer(10);
+Object.defineProperty(a, 'byteLength', desc);
+assertThrows(function() {a.byteLength}, TypeError);
diff --git a/deps/v8/test/mjsunit/harmony/simd.js b/deps/v8/test/mjsunit/harmony/simd.js
index 1868050e50..a3d46159c1 100644
--- a/deps/v8/test/mjsunit/harmony/simd.js
+++ b/deps/v8/test/mjsunit/harmony/simd.js
@@ -622,7 +622,9 @@ TestSIMDObject()
function TestStringify(expected, input) {
assertEquals(expected, JSON.stringify(input));
- assertEquals(expected, JSON.stringify(input, null, 0));
+ assertEquals(expected, JSON.stringify(input, (key, value) => value));
+ assertEquals(JSON.stringify(input, null, "="),
+ JSON.stringify(input, (key, value) => value, "="));
}
TestStringify(undefined, SIMD.Float32x4(1, 2, 3, 4));
diff --git a/deps/v8/test/mjsunit/harmony/sloppy-legacy-duplicate-generators.js b/deps/v8/test/mjsunit/harmony/sloppy-legacy-duplicate-generators.js
new file mode 100644
index 0000000000..1fde47507e
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/sloppy-legacy-duplicate-generators.js
@@ -0,0 +1,60 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-harmony-restrictive-generators
+
+// In legacy mode, generators get sloppy-mode block-scoped function hoisting
+
+// Hoisting to the global scope
+
+{
+ function* foo() {}
+ assertEquals('function', typeof foo);
+}
+//assertEquals('function', typeof foo);
+
+// Hoisting within a function scope
+(function() {
+ { function* bar() {} }
+ assertEquals('function', typeof bar);
+})();
+
+// Lexical shadowing allowed; hoisting happens
+(function() {
+ function* x() { yield 1; }
+ { function* x() { yield 2 } }
+ assertEquals(2, x().next().value);
+})();
+
+// Duplicates allowed
+(function() {
+ function* y() { yield 1; }
+ function* y() { yield 2 }
+ assertEquals(2, y().next().value);
+})();
+
+// Functions and generators may duplicate each other
+(function() {
+ function* z() { yield 1; }
+ function z() { return 2 }
+ assertEquals(2, z());
+
+ function a() { return 1; }
+ function* a() { yield 2 }
+ assertEquals(2, a().next().value);
+})();
+
+// In strict mode, none of this happens
+
+(function() {
+ 'use strict';
+
+ { function* bar() {} }
+ assertEquals('undefined', typeof bar);
+
+ // Lexical shadowing allowed; hoisting happens
+ function* x() { yield 1; }
+ { function* x() { yield 2 } }
+ assertEquals(1, x().next().value);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/sloppy-no-duplicate-async.js b/deps/v8/test/mjsunit/harmony/sloppy-no-duplicate-async.js
new file mode 100644
index 0000000000..97411c0c83
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/sloppy-no-duplicate-async.js
@@ -0,0 +1,30 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-async-await
+
+// Async functions don't get sloppy-mode block-scoped function hoisting
+
+// No hoisting to the global scope
+
+{
+ async function foo() {}
+ assertEquals('function', typeof foo);
+}
+assertEquals('undefined', typeof foo);
+
+// No hoisting within a function scope
+(function() {
+ { async function bar() {} }
+ assertEquals('undefined', typeof bar);
+})();
+
+// Lexical shadowing allowed, no hoisting
+(function() {
+ var y;
+ async function x() { y = 1; }
+ { async function x() { y = 2; } }
+ x();
+ assertEquals(1, y);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/sloppy-no-duplicate-generators.js b/deps/v8/test/mjsunit/harmony/sloppy-no-duplicate-generators.js
new file mode 100644
index 0000000000..de2e461f95
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/sloppy-no-duplicate-generators.js
@@ -0,0 +1,28 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-restrictive-generators
+
+// Generators don't get sloppy-mode block-scoped function hoisting
+
+// No hoisting to the global scope
+
+{
+ function* foo() {}
+ assertEquals('function', typeof foo);
+}
+assertEquals('undefined', typeof foo);
+
+// No hoisting within a function scope
+(function() {
+ { function* bar() {} }
+ assertEquals('undefined', typeof bar);
+})();
+
+// Lexical shadowing allowed, no hoisting
+(function() {
+ function* x() { yield 1; }
+ { function* x() { yield 2 } }
+ assertEquals(1, x().next().value);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/to-name.js b/deps/v8/test/mjsunit/harmony/to-name.js
index 6d5d64e5e4..0a2c043a2f 100644
--- a/deps/v8/test/mjsunit/harmony/to-name.js
+++ b/deps/v8/test/mjsunit/harmony/to-name.js
@@ -5,50 +5,37 @@
// Flags: --allow-natives-syntax
assertEquals("1", %ToName(1));
-assertEquals("1", %_ToName(1));
assertEquals("0.5", %ToName(.5));
-assertEquals("0.5", %_ToName(.5));
assertEquals("null", %ToName(null));
-assertEquals("null", %_ToName(null));
assertEquals("true", %ToName(true));
-assertEquals("true", %_ToName(true));
assertEquals("false", %ToName(false));
-assertEquals("false", %_ToName(false));
assertEquals("undefined", %ToName(undefined));
-assertEquals("undefined", %_ToName(undefined));
assertEquals("random text", %ToName("random text"));
-assertEquals("random text", %_ToName("random text"));
assertEquals(Symbol.toPrimitive, %ToName(Symbol.toPrimitive));
-assertEquals(Symbol.toPrimitive, %_ToName(Symbol.toPrimitive));
var a = { toString: function() { return "xyz" }};
assertEquals("xyz", %ToName(a));
-assertEquals("xyz", %_ToName(a));
var b = { valueOf: function() { return 42 }};
assertEquals("[object Object]", %ToName(b));
-assertEquals("[object Object]", %_ToName(b));
var c = {
toString: function() { return "x"},
valueOf: function() { return 123 }
};
assertEquals("x", %ToName(c));
-assertEquals("x", %_ToName(c));
var d = {
[Symbol.toPrimitive]: function(hint) { return hint }
};
assertEquals("string", %ToName(d));
-assertEquals("string", %_ToName(d));
var e = new Date(0);
assertEquals(e.toString(), %ToName(e));
-assertEquals(e.toString(), %_ToName(e));
diff --git a/deps/v8/test/mjsunit/harmony/to-primitive.js b/deps/v8/test/mjsunit/harmony/to-primitive.js
index 09280bf1ee..8decb04657 100644
--- a/deps/v8/test/mjsunit/harmony/to-primitive.js
+++ b/deps/v8/test/mjsunit/harmony/to-primitive.js
@@ -6,75 +6,35 @@
assertEquals(1, %ToPrimitive(1));
assertEquals(1, %ToPrimitive_Number(1));
-assertEquals(1, %ToPrimitive_String(1));
-assertEquals(1, %_ToPrimitive(1));
-assertEquals(1, %_ToPrimitive_Number(1));
-assertEquals(1, %_ToPrimitive_String(1));
assertEquals(.5, %ToPrimitive(.5));
assertEquals(.5, %ToPrimitive_Number(.5));
-assertEquals(.5, %ToPrimitive_String(.5));
-assertEquals(.5, %_ToPrimitive(.5));
-assertEquals(.5, %_ToPrimitive_Number(.5));
-assertEquals(.5, %_ToPrimitive_String(.5));
assertEquals(null, %ToPrimitive(null));
assertEquals(null, %ToPrimitive_Number(null));
-assertEquals(null, %ToPrimitive_String(null));
-assertEquals(null, %_ToPrimitive(null));
-assertEquals(null, %_ToPrimitive_Number(null));
-assertEquals(null, %_ToPrimitive_String(null));
assertEquals(true, %ToPrimitive(true));
assertEquals(true, %ToPrimitive_Number(true));
-assertEquals(true, %ToPrimitive_String(true));
-assertEquals(true, %_ToPrimitive(true));
-assertEquals(true, %_ToPrimitive_Number(true));
-assertEquals(true, %_ToPrimitive_String(true));
assertEquals(false, %ToPrimitive(false));
assertEquals(false, %ToPrimitive_Number(false));
-assertEquals(false, %ToPrimitive_String(false));
-assertEquals(false, %_ToPrimitive(false));
-assertEquals(false, %_ToPrimitive_Number(false));
-assertEquals(false, %_ToPrimitive_String(false));
assertEquals(undefined, %ToPrimitive(undefined));
assertEquals(undefined, %ToPrimitive_Number(undefined));
-assertEquals(undefined, %ToPrimitive_String(undefined));
-assertEquals(undefined, %_ToPrimitive(undefined));
-assertEquals(undefined, %_ToPrimitive_Number(undefined));
-assertEquals(undefined, %_ToPrimitive_String(undefined));
assertEquals("random text", %ToPrimitive("random text"));
assertEquals("random text", %ToPrimitive_Number("random text"));
-assertEquals("random text", %ToPrimitive_String("random text"));
-assertEquals("random text", %_ToPrimitive("random text"));
-assertEquals("random text", %_ToPrimitive_Number("random text"));
-assertEquals("random text", %_ToPrimitive_String("random text"));
assertEquals(Symbol.toPrimitive, %ToPrimitive(Symbol.toPrimitive));
assertEquals(Symbol.toPrimitive, %ToPrimitive_Number(Symbol.toPrimitive));
-assertEquals(Symbol.toPrimitive, %ToPrimitive_String(Symbol.toPrimitive));
-assertEquals(Symbol.toPrimitive, %_ToPrimitive(Symbol.toPrimitive));
-assertEquals(Symbol.toPrimitive, %_ToPrimitive_Number(Symbol.toPrimitive));
-assertEquals(Symbol.toPrimitive, %_ToPrimitive_String(Symbol.toPrimitive));
var a = { toString: function() { return "xyz" }};
assertEquals("xyz", %ToPrimitive(a));
assertEquals("xyz", %ToPrimitive_Number(a));
-assertEquals("xyz", %ToPrimitive_String(a));
-assertEquals("xyz", %_ToPrimitive(a));
-assertEquals("xyz", %_ToPrimitive_Number(a));
-assertEquals("xyz", %_ToPrimitive_String(a));
var b = { valueOf: function() { return 42 }};
assertEquals(42, %ToPrimitive(b));
assertEquals(42, %ToPrimitive_Number(b));
-assertEquals("[object Object]", %ToPrimitive_String(b));
-assertEquals(42, %_ToPrimitive(b));
-assertEquals(42, %_ToPrimitive_Number(b));
-assertEquals("[object Object]", %_ToPrimitive_String(b));
var c = {
toString: function() { return "x"},
@@ -82,25 +42,13 @@ var c = {
};
assertEquals(123, %ToPrimitive(c));
assertEquals(123, %ToPrimitive_Number(c));
-assertEquals("x", %ToPrimitive_String(c));
-assertEquals(123, %_ToPrimitive(c));
-assertEquals(123, %_ToPrimitive_Number(c));
-assertEquals("x", %_ToPrimitive_String(c));
var d = {
[Symbol.toPrimitive]: function(hint) { return hint }
};
assertEquals("default", %ToPrimitive(d));
assertEquals("number", %ToPrimitive_Number(d));
-assertEquals("string", %ToPrimitive_String(d));
-assertEquals("default", %_ToPrimitive(d));
-assertEquals("number", %_ToPrimitive_Number(d));
-assertEquals("string", %_ToPrimitive_String(d));
var e = new Date(0);
assertEquals(e.toString(), %ToPrimitive(e));
assertEquals(0, %ToPrimitive_Number(e));
-assertEquals(e.toString(), %ToPrimitive_String(e));
-assertEquals(e.toString(), %_ToPrimitive(e));
-assertEquals(0, %_ToPrimitive_Number(e));
-assertEquals(e.toString(), %_ToPrimitive_String(e));
diff --git a/deps/v8/test/mjsunit/harmony/to-string.js b/deps/v8/test/mjsunit/harmony/to-string.js
index 103ba89d1d..dfe36c2dd9 100644
--- a/deps/v8/test/mjsunit/harmony/to-string.js
+++ b/deps/v8/test/mjsunit/harmony/to-string.js
@@ -50,5 +50,5 @@ assertEquals("string", %ToString(d));
assertEquals("string", %_ToString(d));
var e = new Date(0);
-assertEquals(e.toString(), %ToName(e));
-assertEquals(e.toString(), %_ToName(e));
+assertEquals(e.toString(), %ToString(e));
+assertEquals(e.toString(), %_ToString(e));
diff --git a/deps/v8/test/mjsunit/harmony/trailing-commas-length.js b/deps/v8/test/mjsunit/harmony/trailing-commas-length.js
new file mode 100644
index 0000000000..9d5e59c16b
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/trailing-commas-length.js
@@ -0,0 +1,31 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-trailing-commas
+
+function f1(a,) {}
+function f2(a,b,) {}
+function f3(a,b,c,) {}
+assertEquals(1, f1.length);
+assertEquals(2, f2.length);
+assertEquals(3, f3.length);
+
+function* g1(a,) {}
+function* g2(a,b,) {}
+function* g3(a,b,c,) {}
+assertEquals(1, g1.length);
+assertEquals(2, g2.length);
+assertEquals(3, g3.length);
+
+assertEquals(1, (function(a,) {}).length);
+assertEquals(2, (function(a,b,) {}).length);
+assertEquals(3, (function(a,b,c,) {}).length);
+
+assertEquals(1, (function*(a,) {}).length);
+assertEquals(2, (function*(a,b,) {}).length);
+assertEquals(3, (function*(a,b,c,) {}).length);
+
+assertEquals(1, ((a,) => {}).length);
+assertEquals(2, ((a,b,) => {}).length);
+assertEquals(3, ((a,b,c,) => {}).length);
diff --git a/deps/v8/test/mjsunit/holy-double-no-arg-array.js b/deps/v8/test/mjsunit/holy-double-no-arg-array.js
new file mode 100644
index 0000000000..73e2ddc6ce
--- /dev/null
+++ b/deps/v8/test/mjsunit/holy-double-no-arg-array.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function() {
+ function f() {
+ return new Array();
+ }
+ var a = f();
+ a[0] = 0.5;
+ var b = f();
+ b[2] = 0.5;
+ assertEquals(undefined, b[0]);
+})();
diff --git a/deps/v8/test/mjsunit/ignition/debug-break-mixed-stack.js b/deps/v8/test/mjsunit/ignition/debug-break-mixed-stack.js
new file mode 100644
index 0000000000..878a918d5c
--- /dev/null
+++ b/deps/v8/test/mjsunit/ignition/debug-break-mixed-stack.js
@@ -0,0 +1,52 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+Debug = debug.Debug
+
+var exception = null;
+var frame_depth = 10;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ assertEquals(frame_depth, exec_state.frameCount());
+ assertTrue(/\/\/ Break$/.test(exec_state.frame(0).sourceLineText()));
+ assertEquals(12 - frame_depth, exec_state.frame(0).evaluate("x").value());
+ frame_depth--;
+ } catch (e) {
+ exception = e;
+ print(e + e.stack);
+ }
+}
+
+function ChooseCode(f, x) {
+ if (x == 1) {
+ Debug.setBreakPoint(factorial, 4);
+ }
+ switch (x % 2) {
+ case 0:
+ %BaselineFunctionOnNextCall(f);
+ break;
+ case 1:
+ %InterpretFunctionOnNextCall(f);
+ break;
+ }
+}
+
+function factorial(x) {
+ ChooseCode(factorial, x);
+ if (x == 1) return 1;
+ var factor = factorial(x - 1);
+ return x * factor; // Break
+}
+
+Debug.setListener(listener);
+
+assertEquals(3628800, factorial(10));
+
+Debug.setListener(null);
+assertNull(exception);
+assertEquals(1, frame_depth);
diff --git a/deps/v8/test/mjsunit/ignition/debug-step-mixed-stack.js b/deps/v8/test/mjsunit/ignition/debug-step-mixed-stack.js
new file mode 100644
index 0000000000..6566431175
--- /dev/null
+++ b/deps/v8/test/mjsunit/ignition/debug-step-mixed-stack.js
@@ -0,0 +1,53 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+Debug = debug.Debug
+
+var exception = null;
+var frame_depth = 11;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ assertEquals(frame_depth, exec_state.frameCount());
+ assertTrue(/\/\/ Break$/.test(exec_state.frame(0).sourceLineText()));
+ assertEquals(12 - frame_depth, exec_state.frame(0).evaluate("x").value());
+ if (frame_depth > 2) exec_state.prepareStep(Debug.StepAction.StepOut);
+ frame_depth--;
+ } catch (e) {
+ exception = e;
+ print(e + e.stack);
+ }
+}
+
+function ChooseCode(f, x) {
+ switch (x % 2) {
+ case 0:
+ %BaselineFunctionOnNextCall(f);
+ break;
+ case 1:
+ %InterpretFunctionOnNextCall(f);
+ break;
+ }
+}
+
+function factorial(x) {
+ ChooseCode(factorial, x);
+ if (x == 1) {
+ debugger; // Break
+ return 1;
+ }
+ var factor = factorial(x - 1);
+ return x * factor; // Break
+}
+
+Debug.setListener(listener);
+
+assertEquals(3628800, factorial(10));
+
+Debug.setListener(null);
+assertNull(exception);
+assertEquals(1, frame_depth);
diff --git a/deps/v8/test/mjsunit/ignition/elided-instruction-no-ignition.js b/deps/v8/test/mjsunit/ignition/elided-instruction-no-ignition.js
deleted file mode 100644
index d31150b6d3..0000000000
--- a/deps/v8/test/mjsunit/ignition/elided-instruction-no-ignition.js
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-debug-as debug
-
-Debug = debug.Debug
-
-var exception = null;
-var break_count = 0;
-
-function listener(event, exec_state, event_data, data) {
- if (event != Debug.DebugEvent.Break) return;
- try {
- print(event_data.sourceLineText());
- var column = event_data.sourceColumn();
- assertTrue(event_data.sourceLineText().indexOf(
- `Break ${break_count++}. ${column}.`) > 0);
- exec_state.prepareStep(Debug.StepAction.StepIn);
- } catch (e) {
- print(e + e.stack);
- exception = e;
- }
-};
-
-function f() {
- var a = 1; // Break 2. 10.
- return a; // Break 3. 2.
-} // Break 4. 0.
-
-Debug.setListener(listener);
-debugger; // Break 0. 0.
-f(); // Break 1. 0.
-Debug.setListener(null); // Break 5. 0.
-
-assertNull(exception);
-assertEquals(6, break_count);
diff --git a/deps/v8/test/mjsunit/ignition/elided-instruction.js b/deps/v8/test/mjsunit/ignition/elided-instruction.js
index 807974bbc1..d31150b6d3 100644
--- a/deps/v8/test/mjsunit/ignition/elided-instruction.js
+++ b/deps/v8/test/mjsunit/ignition/elided-instruction.js
@@ -25,17 +25,13 @@ function listener(event, exec_state, event_data, data) {
function f() {
var a = 1; // Break 2. 10.
- // This return statement emits no bytecode instruction for the evaluation of
- // the to-be-returned expression. Therefore we cannot set a break location
- // before the statement and a second break location immediately before
- // returning to the caller.
- return a;
-} // Break 3. 0.
+ return a; // Break 3. 2.
+} // Break 4. 0.
Debug.setListener(listener);
debugger; // Break 0. 0.
f(); // Break 1. 0.
-Debug.setListener(null); // Break 4. 0.
+Debug.setListener(null); // Break 5. 0.
assertNull(exception);
-assertEquals(5, break_count);
+assertEquals(6, break_count);
diff --git a/deps/v8/test/mjsunit/ignition/ignition-statistics-extension.js b/deps/v8/test/mjsunit/ignition/ignition-statistics-extension.js
new file mode 100644
index 0000000000..43d05c94a3
--- /dev/null
+++ b/deps/v8/test/mjsunit/ignition/ignition-statistics-extension.js
@@ -0,0 +1,62 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --ignition --trace-ignition-dispatches
+
+assertEquals(typeof getIgnitionDispatchCounters, "function");
+
+var old_dispatch_counters = getIgnitionDispatchCounters();
+
+// Check that old_dispatch_counters is a non-empty object of objects, such that
+// the value of each property in the inner objects is a number.
+
+assertEquals(typeof old_dispatch_counters, "object");
+assertTrue(Object.getOwnPropertyNames(old_dispatch_counters).length > 0);
+for (var source_bytecode in old_dispatch_counters) {
+ var counters_row = old_dispatch_counters[source_bytecode];
+ assertEquals(typeof counters_row, "object");
+ for (var counter in counters_row) {
+ assertEquals(typeof counters_row[counter], "number");
+ }
+}
+
+// Do something
+function f(x) { return x*x; }
+f(42);
+
+var new_dispatch_counters = getIgnitionDispatchCounters();
+
+var old_source_bytecodes = Object.getOwnPropertyNames(old_dispatch_counters);
+var new_source_bytecodes = Object.getOwnPropertyNames(new_dispatch_counters);
+var common_source_bytecodes = new_source_bytecodes.filter(function (name) {
+ return old_source_bytecodes.indexOf(name) > -1;
+});
+
+// Check that the keys on the outer objects are the same
+assertEquals(common_source_bytecodes, old_source_bytecodes);
+assertEquals(common_source_bytecodes, new_source_bytecodes);
+
+common_source_bytecodes.forEach(function (source_bytecode) {
+ var new_counters_row = new_dispatch_counters[source_bytecode];
+ var old_counters_row = old_dispatch_counters[source_bytecode];
+
+ var old_destination_bytecodes = Object.getOwnPropertyNames(old_counters_row);
+ var new_destination_bytecodes = Object.getOwnPropertyNames(new_counters_row);
+
+ // Check that all the keys in old_ are in new_ too
+ old_destination_bytecodes.forEach(function (name) {
+ assertTrue(new_destination_bytecodes.indexOf(name) > -1);
+ });
+
+ // Check that for each source-destination pair, the counter has either
+ // appeared (was undefined before calling f()), is unchanged, or incremented.
+ new_destination_bytecodes.forEach(function (destination_bytecode) {
+ var new_counter = new_counters_row[destination_bytecode];
+ var old_counter = old_counters_row[destination_bytecode];
+ assertTrue(typeof new_counter === "number");
+ if (typeof old_counter === "number") {
+ assertTrue(new_counter >= old_counter);
+ }
+ });
+});
diff --git a/deps/v8/test/mjsunit/ignition/osr-from-bytecode.js b/deps/v8/test/mjsunit/ignition/osr-from-bytecode.js
new file mode 100644
index 0000000000..d4f40bad79
--- /dev/null
+++ b/deps/v8/test/mjsunit/ignition/osr-from-bytecode.js
@@ -0,0 +1,12 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --ignition --ignition-osr --turbo-from-bytecode
+
+function f() {
+ for (var i = 0; i < 10; i++) {
+ if (i == 5) %OptimizeOsr();
+ }
+}
+f();
diff --git a/deps/v8/test/mjsunit/ignition/osr-from-generator.js b/deps/v8/test/mjsunit/ignition/osr-from-generator.js
new file mode 100644
index 0000000000..2344a31ce4
--- /dev/null
+++ b/deps/v8/test/mjsunit/ignition/osr-from-generator.js
@@ -0,0 +1,65 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function TestGeneratorOSRSimple() {
+ function* gen1() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) %OptimizeOsr();
+ }
+ return 23;
+ }
+ var g = gen1();
+ assertEquals({ value:23, done:true }, g.next());
+})();
+
+(function TestGeneratorOSRYieldAfterArming() {
+ function* gen2() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) %OptimizeOsr();
+ yield i;
+ }
+ return 23;
+ }
+ var g = gen2();
+ assertEquals({ value:0, done:false }, g.next());
+ assertEquals({ value:1, done:false }, g.next());
+ assertEquals({ value:2, done:false }, g.next());
+ assertEquals({ value:23, done:true }, g.next());
+})();
+
+(function TestGeneratorOSRYieldBeforeArming() {
+ function* gen3() {
+ for (var i = 0; i < 3; ++i) {
+ yield i;
+ if (i == 1) %OptimizeOsr();
+ }
+ return 23;
+ }
+ var g = gen3();
+ assertEquals({ value:0, done:false }, g.next());
+ assertEquals({ value:1, done:false }, g.next());
+ assertEquals({ value:2, done:false }, g.next());
+ assertEquals({ value:23, done:true }, g.next());
+})();
+
+(function TestGeneratorOSRNested() {
+ function* gen4() {
+ for (var i = 0; i < 3; ++i) {
+ for (var j = 0; j < 3; ++j) {
+ for (var k = 0; k < 10; ++k) {
+ if (k == 5) %OptimizeOsr();
+ }
+ }
+ yield i;
+ }
+ return 23;
+ }
+ var g = gen4();
+ assertEquals({ value:0, done:false }, g.next());
+ assertEquals({ value:1, done:false }, g.next());
+ assertEquals({ value:2, done:false }, g.next());
+ assertEquals({ value:23, done:true }, g.next());
+})();
diff --git a/deps/v8/test/mjsunit/ignition/regress-599001-verifyheap.js b/deps/v8/test/mjsunit/ignition/regress-599001-verifyheap.js
index 5aa2efdb36..ce5b46de95 100644
--- a/deps/v8/test/mjsunit/ignition/regress-599001-verifyheap.js
+++ b/deps/v8/test/mjsunit/ignition/regress-599001-verifyheap.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --verify-heap --expose-gc
+// Flags: --ignition --verify-heap --expose-gc
// Tests that verify heap works for BytecodeArrays in the large object space.
diff --git a/deps/v8/test/mjsunit/ignition/regress-612386-smi-to-double-transition.js b/deps/v8/test/mjsunit/ignition/regress-612386-smi-to-double-transition.js
new file mode 100644
index 0000000000..275f7d62d3
--- /dev/null
+++ b/deps/v8/test/mjsunit/ignition/regress-612386-smi-to-double-transition.js
@@ -0,0 +1,29 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-inline-new
+
+function keyed_store(obj, key, value) {
+ obj[key] = value;
+}
+
+function foo() {
+ obj = {};
+ obj.smi = 1;
+ obj.dbl = 1.5;
+ obj.obj = {a:1};
+
+ // Transition keyed store IC to polymorphic.
+ keyed_store(obj, "smi", 100);
+ keyed_store(obj, "dbl", 100);
+ keyed_store(obj, "obj", 100);
+
+ // Now call with a FAST_SMI_ELEMENTS object.
+ var smi_array = [5, 1, 1];
+ keyed_store(smi_array, 1, 6);
+ // Transition from FAST_SMI_ELEMENTS to FAST_DOUBLE_ELEMENTS.
+ keyed_store(smi_array, 2, 1.2);
+}
+
+foo();
diff --git a/deps/v8/test/mjsunit/ignition/regress-616064.js b/deps/v8/test/mjsunit/ignition/regress-616064.js
new file mode 100644
index 0000000000..06de873293
--- /dev/null
+++ b/deps/v8/test/mjsunit/ignition/regress-616064.js
@@ -0,0 +1,26 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --ignition
+
+function foo() {
+ if (this.Worker) {
+ function __f_0() { this.s = a; }
+ function __f_1() {
+ this.l = __f_0;
+ }
+
+ with ( 'source' , Object ) throw function __f_0(__f_0) {
+ return Worker.__f_0(-2147483648, __f_0);
+ };
+
+ var __v_9 = new Worker('');
+ __f_1 = {s: Math.s, __f_1: true};
+ }
+}
+
+try {
+ foo();
+} catch(e) {
+}
diff --git a/deps/v8/test/mjsunit/ignition/regress-629792-source-position-on-jump.js b/deps/v8/test/mjsunit/ignition/regress-629792-source-position-on-jump.js
new file mode 100644
index 0000000000..f87caf681a
--- /dev/null
+++ b/deps/v8/test/mjsunit/ignition/regress-629792-source-position-on-jump.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f(t) {
+ var f = t || this;
+ for (var i in t) {
+ for (var j in t) {
+ (j);
+ continue;
+ }
+ }
+}
+f();
diff --git a/deps/v8/test/mjsunit/induction-variable-turbofan.js b/deps/v8/test/mjsunit/induction-variable-turbofan.js
new file mode 100644
index 0000000000..6957859f9e
--- /dev/null
+++ b/deps/v8/test/mjsunit/induction-variable-turbofan.js
@@ -0,0 +1,102 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo
+
+// TurboFan optimizes integer loops. These tests check that we compute
+// the correct upper and lower bounds.
+function positive_increment() {
+ for (var i = 5; i < 10; i++) {
+ if (i < 0) return false;
+ if (i > 20) return false;
+ if (i === 7) return true;
+ }
+ return false;
+}
+
+function positive_increment_strict() {
+ for (var i = 5; i < 10; i++) {
+ if (i < 0) return false;
+ if (i === 10) return false;
+ }
+ return true;
+}
+
+function positive_increment_non_strict() {
+ for (var i = 5; i <= 10; i++) {
+ if (i < 0) return false;
+ if (i === 10) return true;
+ }
+ return false;
+}
+
+function negative_increment() {
+ for (var i = 10; i > 5;) {
+ if (i < 0) return false;
+ if (i > 20) return false;
+ if (i === 7) return true;
+ i -= 1;
+ }
+ return false;
+}
+
+function positive_decrement() {
+ for (var i = 10; i > 5; i--) {
+ if (i < 0) return false;
+ if (i === 7) return true;
+ }
+ return false;
+}
+
+function positive_decrement_strict() {
+ for (var i = 10; i > 5; i--) {
+ if (i < 0) return false;
+ if (i === 5) return false;
+ }
+ return true;
+}
+function positive_decrement_non_strict() {
+ for (var i = 10; i >= 5; i--) {
+ if (i < 0) return false;
+ if (i === 5) return true;
+ }
+ return false;
+}
+
+function negative_decrement() {
+ for (var i = 5; i < 10;) {
+ if (i < 0) return false;
+ if (i === 7) return true;
+ i -= -1;
+ }
+ return false;
+}
+
+function variable_bound() {
+ for (var i = 5; i < 10; i++) {
+ for (var j = 5; j < i; j++) {
+ if (j < 0) return false;
+ if (j === 7) return true;
+ }
+ }
+ return false;
+
+}
+
+function test(f) {
+ f();
+ assertTrue(f());
+ %OptimizeFunctionOnNextCall(f);
+ assertTrue(f());
+}
+
+test(positive_increment);
+test(positive_increment_strict);
+test(positive_increment_non_strict);
+test(negative_increment);
+test(positive_decrement);
+test(positive_decrement_strict);
+test(positive_decrement_non_strict);
+test(negative_decrement);
+test(variable_bound);
diff --git a/deps/v8/test/mjsunit/json-replacer-order.js b/deps/v8/test/mjsunit/json-replacer-order.js
index 8cb64414e7..19b69bfe7a 100644
--- a/deps/v8/test/mjsunit/json-replacer-order.js
+++ b/deps/v8/test/mjsunit/json-replacer-order.js
@@ -20,7 +20,6 @@ var space = Object.defineProperty(new String, 'toString', {
});
JSON.stringify('', replacer, space);
-
assertEquals(2, log.length);
assertEquals('get 0', log[0]);
assertEquals('toString', log[1]);
diff --git a/deps/v8/test/mjsunit/json-stringify-holder.js b/deps/v8/test/mjsunit/json-stringify-holder.js
new file mode 100644
index 0000000000..2f06d77095
--- /dev/null
+++ b/deps/v8/test/mjsunit/json-stringify-holder.js
@@ -0,0 +1,104 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function testBasic() {
+ var stack = [];
+ var object = {a: false};
+ var replaced = {a: false, replaced: true};
+
+ function replacer(key, value) {
+ stack.push({ holder: this, key, value });
+ if (stack.length === 1) return replaced;
+ if (key === "a") return true;
+ return value;
+ }
+
+ assertEquals(`{"a":true,"replaced":true}`, JSON.stringify(object, replacer));
+
+ assertEquals([
+ {
+ holder: { "": { a: false } },
+ key: "",
+ value: { a: false }
+ },
+ {
+ holder: { a: false, replaced: true },
+ key: "a",
+ value: false
+ },
+ {
+ holder: { a: false, replaced: true },
+ key: "replaced",
+ value: true
+ }
+ ], stack);
+
+ assertSame(stack[0].holder[""], object);
+ assertSame(stack[0].value, object);
+ assertSame(stack[1].holder, replaced);
+ assertSame(stack[2].holder, replaced);
+})();
+
+(function testToJSON() {
+ var stack = [];
+ var object = {a: false, toJSON };
+ var nested = { toJSON: nestedToJSON };
+ var replaced = {a: false, replaced: true, nested };
+ var toJSONd = {a: false, toJSONd: true }
+ var nestedToJSONd = { nestedToJSONd: true };
+
+ function toJSON(key, value) {
+ return toJSONd;
+ }
+
+ function nestedToJSON(key, value) {
+ return nestedToJSONd;
+ }
+
+ function replacer(key, value) {
+ stack.push({ holder: this, key, value });
+ if (stack.length === 1) return replaced;
+ if (key === "a") return true;
+ return value;
+ }
+
+ assertEquals(`{"a":true,"replaced":true,"nested":{"nestedToJSONd":true}}`,
+ JSON.stringify(object, replacer));
+
+ assertEquals([
+ {
+ holder: { "": { a: false, toJSON: toJSON } },
+ key: "",
+ value: { a: false, toJSONd: true }
+ },
+ {
+ holder: { a: false, replaced: true, nested: { toJSON: nestedToJSON } },
+ key: "a",
+ value: false
+ },
+ {
+ holder: { a: false, replaced: true, nested: { toJSON: nestedToJSON } },
+ key: "replaced",
+ value: true
+ },
+ {
+ holder: { a: false, replaced: true, nested: { toJSON: nestedToJSON } },
+ key: "nested",
+ value: { nestedToJSONd: true }
+ },
+ {
+ holder: { nestedToJSONd: true },
+ key: "nestedToJSONd",
+ value: true
+ }
+ ], stack);
+
+ assertSame(stack[0].holder[""], object);
+ assertSame(stack[0].value, toJSONd);
+ assertSame(stack[1].holder, replaced);
+ assertSame(stack[2].holder, replaced);
+ assertSame(stack[3].holder, replaced);
+ assertSame(stack[3].value, nestedToJSONd);
+ assertSame(stack[4].holder, nestedToJSONd);
+})();
diff --git a/deps/v8/test/mjsunit/json.js b/deps/v8/test/mjsunit/json.js
index 84f2056856..3652febc3c 100644
--- a/deps/v8/test/mjsunit/json.js
+++ b/deps/v8/test/mjsunit/json.js
@@ -234,7 +234,9 @@ TestInvalid('"Garbage""After string"');
function TestStringify(expected, input) {
assertEquals(expected, JSON.stringify(input));
- assertEquals(expected, JSON.stringify(input, null, 0));
+ assertEquals(expected, JSON.stringify(input, (key, value) => value));
+ assertEquals(JSON.stringify(input, null, "="),
+ JSON.stringify(input, (key, value) => value, "="));
}
TestStringify("true", true);
@@ -451,8 +453,8 @@ var counter = { get toJSON() { getCount++;
// RegExps are not callable, so they are stringified as objects.
TestStringify('{}', /regexp/);
TestStringify('42', counter);
-assertEquals(2, getCount);
-assertEquals(2, callCount);
+assertEquals(4, getCount);
+assertEquals(4, callCount);
var oddball2 = Object(42);
var oddball3 = Object("foo");
@@ -518,3 +520,6 @@ reviver = function(p, v) {
return p === "" ? v : 42;
}
assertEquals({a: 0, b: 1}, JSON.parse('{"a":0,"b":1}', reviver));
+
+reviver = (k, v) => (v === Infinity) ? "inf" : v;
+assertEquals('{"":"inf"}', JSON.stringify({"":Infinity}, reviver));
diff --git a/deps/v8/test/mjsunit/json2.js b/deps/v8/test/mjsunit/json2.js
index f68c76c92a..75e25f8924 100644
--- a/deps/v8/test/mjsunit/json2.js
+++ b/deps/v8/test/mjsunit/json2.js
@@ -35,7 +35,9 @@ assertTrue(JSON.stringify(this, null, 0).indexOf('"a":12345') > 0);
// Test JSON.stringify of array in dictionary mode.
function TestStringify(expected, input) {
assertEquals(expected, JSON.stringify(input));
- assertEquals(expected, JSON.stringify(input, null, 0));
+ assertEquals(expected, JSON.stringify(input, (key, value) => value));
+ assertEquals(JSON.stringify(input, null, "="),
+ JSON.stringify(input, (key, value) => value, "="));
}
var array_1 = [];
@@ -76,7 +78,7 @@ var getter_obj = { get getter() {
return 123;
} };
TestStringify('{"getter":123}', getter_obj);
-assertEquals(2, counter);
+assertEquals(4, counter);
// Test toJSON function.
var tojson_obj = { toJSON: function() {
@@ -85,7 +87,7 @@ var tojson_obj = { toJSON: function() {
},
a: 1};
TestStringify('[1,2]', tojson_obj);
-assertEquals(4, counter);
+assertEquals(8, counter);
// Test that we don't recursively look for the toJSON function.
var tojson_proto_obj = { a: 'fail' };
diff --git a/deps/v8/test/mjsunit/lithium/SeqStringSetChar.js b/deps/v8/test/mjsunit/lithium/SeqStringSetChar.js
deleted file mode 100644
index c5bd1450f9..0000000000
--- a/deps/v8/test/mjsunit/lithium/SeqStringSetChar.js
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax
-
-function MyStringFromCharCode(code, i) {
- var one_byte = %NewString(3, true);
- %_OneByteSeqStringSetChar(0, code, one_byte);
- %_OneByteSeqStringSetChar(1, code, one_byte);
- %_OneByteSeqStringSetChar(i, code, one_byte);
- var two_byte = %NewString(3, false);
- %_TwoByteSeqStringSetChar(0, code, two_byte);
- %_TwoByteSeqStringSetChar(1, code, two_byte);
- %_TwoByteSeqStringSetChar(i, code, two_byte);
- return one_byte + two_byte;
-}
-
-MyStringFromCharCode(65, 2);
-var r1 = MyStringFromCharCode(65, 2);
-%OptimizeFunctionOnNextCall(MyStringFromCharCode);
-var r2 = MyStringFromCharCode(65, 2);
-assertEquals(r1, r2);
diff --git a/deps/v8/test/mjsunit/messages.js b/deps/v8/test/mjsunit/messages.js
index 8796d05f16..30abc197e9 100644
--- a/deps/v8/test/mjsunit/messages.js
+++ b/deps/v8/test/mjsunit/messages.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --stack-size=100 --harmony
-// Flags: --harmony-simd --harmony-instanceof
+// Flags: --harmony-simd
function test(f, expected, type) {
try {
@@ -147,7 +147,12 @@ test(function() {
}, "Method Set.prototype.add called on incompatible receiver [object Array]",
TypeError);
-// kInstanceofFunctionExpected
+// kNonCallableInInstanceOfCheck
+test(function() {
+ 1 instanceof {};
+}, "Right-hand side of 'instanceof' is not callable", TypeError);
+
+// kNonObjectInInstanceOfCheck
test(function() {
1 instanceof 1;
}, "Right-hand side of 'instanceof' is not an object", TypeError);
diff --git a/deps/v8/test/mjsunit/migrations.js b/deps/v8/test/mjsunit/migrations.js
index a18d884059..2bd70fb771 100644
--- a/deps/v8/test/mjsunit/migrations.js
+++ b/deps/v8/test/mjsunit/migrations.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-ayle license that can be
// found in the LICENSE file.
-// Flags: --harmony-object-observe
// Flags: --allow-natives-syntax --track-fields --expose-gc
var global = Function('return this')();
@@ -275,10 +274,6 @@ var migrations = [
},
},
{
- name: "observe",
- migr: function(o, i) { Object.observe(o, function(){}); },
- },
- {
name: "seal",
migr: function(o, i) { Object.seal(o); },
},
diff --git a/deps/v8/test/mjsunit/mirror-object.js b/deps/v8/test/mjsunit/mirror-object.js
index 834d7a580a..a46c41a3c9 100644
--- a/deps/v8/test/mjsunit/mirror-object.js
+++ b/deps/v8/test/mjsunit/mirror-object.js
@@ -200,10 +200,10 @@ assertFalse(math_mirror.property("E").canDelete());
// Test objects with JavaScript accessors.
o = {}
-o.__defineGetter__('a', function(){return 'a';});
-o.__defineSetter__('b', function(){});
-o.__defineGetter__('c', function(){throw 'c';});
-o.__defineSetter__('c', function(){throw 'c';});
+o.__defineGetter__('a', function (){return 'a';});
+o.__defineSetter__('b', function (){});
+o.__defineGetter__('c', function (){throw 'c';});
+o.__defineSetter__('c', function (){throw 'c';});
testObjectMirror(o, 'Object', 'Object');
mirror = debug.MakeMirror(o);
// a has getter but no setter.
@@ -265,3 +265,27 @@ assertEquals(Number, property_map["[[TargetFunction]]"].value().value());
assertTrue("[[BoundArgs]]" in property_map);
assertEquals("object", property_map["[[BoundArgs]]"].value().type());
assertEquals(1, property_map["[[BoundArgs]]"].value().value().length);
+
+// Test JSProxy internal properties.
+var target = {};
+var handler = {
+ get: function (target, name, receiver) {
+ return target[name];
+ },
+ set: function(target, name, value, receiver) {
+ target[name] = value;
+ return value;
+ }
+}
+ip = debug.ObjectMirror.GetInternalProperties(new Proxy(target, handler));
+assertEquals(3, ip.length);
+var property_map = {};
+for (var i = 0; i < ip.length; i++) {
+ property_map[ip[i].name()] = ip[i];
+}
+assertTrue("[[Target]]" in property_map);
+assertEquals(target, property_map["[[Target]]"].value().value());
+assertTrue("[[Handler]]" in property_map);
+assertEquals(handler, property_map["[[Handler]]"].value().value());
+assertTrue("[[IsRevoked]]" in property_map);
+assertEquals(false, property_map["[[IsRevoked]]"].value().value());
diff --git a/deps/v8/test/mjsunit/mirror-regexp.js b/deps/v8/test/mjsunit/mirror-regexp.js
index 7aae1c62ec..0711ff95ae 100644
--- a/deps/v8/test/mjsunit/mirror-regexp.js
+++ b/deps/v8/test/mjsunit/mirror-regexp.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug --harmony-unicode-regexps
+// Flags: --expose-debug-as debug
// Test the mirror object for regular expression values
var dont_enum = debug.PropertyAttribute.DontEnum;
diff --git a/deps/v8/test/mjsunit/mirror-script.js b/deps/v8/test/mjsunit/mirror-script.js
index ed0dd12ace..635c658ac3 100644
--- a/deps/v8/test/mjsunit/mirror-script.js
+++ b/deps/v8/test/mjsunit/mirror-script.js
@@ -83,16 +83,7 @@ function testScriptMirror(f, file_name, file_lines, type, compilation_type,
// Test the script mirror for different functions.
-testScriptMirror(function(){}, 'mirror-script.js', 99, 2, 0);
-testScriptMirror(Math.abs, 'native math.js', -1, 0, 0);
+testScriptMirror(function(){}, 'mirror-script.js', 90, 2, 0);
+testScriptMirror(Math.random, 'native math.js', -1, 0, 0);
testScriptMirror(eval('(function(){})'), null, 1, 2, 1, '(function(){})', 87);
testScriptMirror(eval('(function(){\n })'), null, 2, 2, 1, '(function(){\n })', 88);
-
-// Test taking slices of source.
-var mirror = debug.MakeMirror(eval('(function(){\n 1;\n})')).script();
-assertEquals('(function(){\n', mirror.sourceSlice(0, 1).sourceText());
-assertEquals(' 1;\n', mirror.sourceSlice(1, 2).sourceText());
-assertEquals('})', mirror.sourceSlice(2, 3).sourceText());
-assertEquals('(function(){\n 1;\n', mirror.sourceSlice(0, 2).sourceText());
-assertEquals(' 1;\n})', mirror.sourceSlice(1, 3).sourceText());
-assertEquals('(function(){\n 1;\n})', mirror.sourceSlice(0, 3).sourceText());
diff --git a/deps/v8/test/mjsunit/mjsunit.gyp b/deps/v8/test/mjsunit/mjsunit.gyp
index 35ce2ffdee..e0a7469248 100644
--- a/deps/v8/test/mjsunit/mjsunit.gyp
+++ b/deps/v8/test/mjsunit/mjsunit.gyp
@@ -13,8 +13,8 @@
'../../src/d8.gyp:d8_run',
],
'includes': [
- '../../build/features.gypi',
- '../../build/isolate.gypi',
+ '../../gypfiles/features.gypi',
+ '../../gypfiles/isolate.gypi',
],
'sources': [
'mjsunit.isolate',
diff --git a/deps/v8/test/mjsunit/mjsunit.isolate b/deps/v8/test/mjsunit/mjsunit.isolate
index 18b73c2a14..6ebd801eac 100644
--- a/deps/v8/test/mjsunit/mjsunit.isolate
+++ b/deps/v8/test/mjsunit/mjsunit.isolate
@@ -13,7 +13,8 @@
'../../tools/profile_view.js',
'../../tools/profviz/composer.js',
'../../tools/splaytree.js',
- '../../tools/tickprocessor.js'
+ '../../tools/tickprocessor.js',
+ '../../tools/dumpcpp.js'
],
},
'includes': [
diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js
index 9b07953c8a..6a7c2da9e4 100644
--- a/deps/v8/test/mjsunit/mjsunit.js
+++ b/deps/v8/test/mjsunit/mjsunit.js
@@ -114,6 +114,9 @@ var assertUnreachable;
var assertOptimized;
var assertUnoptimized;
+// Assert that a string contains another expected substring.
+var assertContains;
+
(function () { // Scope for utility functions.
@@ -416,6 +419,12 @@ var assertUnoptimized;
throw new MjsUnitAssertionError(message);
};
+ assertContains = function(sub, value, name_opt) {
+ if (value == null ? (sub != null) : value.indexOf(sub) == -1) {
+ fail("contains '" + String(sub) + "'", value, name_opt);
+ }
+ };
+
var OptimizationStatusImpl = undefined;
var OptimizationStatus = function(fun, sync_opt) {
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index ce7436f2c2..5231070b60 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -55,108 +55,12 @@
'es6/debug-promises/reject-with-undefined-reject': [FAIL],
'es6/debug-promises/reject-with-invalid-reject': [FAIL],
- ##############################################################################
- # TurboFan compiler failures.
-
- # TODO(verwaest): Some tests are over-restrictive about object layout.
- 'array-constructor-feedback': [PASS, NO_VARIANTS],
- 'array-feedback': [PASS, NO_VARIANTS],
- 'compare-known-objects-slow': [PASS, NO_VARIANTS],
- 'elements-kind': [PASS, NO_VARIANTS],
- 'opt-elements-kind': [PASS, NO_VARIANTS],
- 'smi-representation': [PASS, NO_VARIANTS],
-
- # Some tests are just too slow to run for now.
- 'big-object-literal': [PASS, NO_VARIANTS],
- 'bit-not': [PASS, NO_VARIANTS],
- 'json2': [PASS, NO_VARIANTS],
- 'packed-elements': [PASS, NO_VARIANTS],
- 'string-indexof-1': [PASS, NO_VARIANTS],
- 'unbox-double-arrays': [PASS, NO_VARIANTS],
- 'unicode-test': [PASS, NO_VARIANTS],
- 'whitespaces': [PASS, NO_VARIANTS],
- 'compiler/osr-assert': [PASS, NO_VARIANTS],
- 'es6/string-fromcodepoint': [PASS, NO_VARIANTS],
- 'regress/regress-2185-2': [PASS, NO_VARIANTS],
- 'regress/regress-2612': [PASS, NO_VARIANTS],
-
# Issue 3660: Replacing activated TurboFan frames by unoptimized code does
# not work, but we expect it to not crash.
'debug-step-turbofan': [PASS, FAIL],
- # TODO(mstarzinger): Optimizing top-level code revealed some issues. Fix!
- 'bitops-info': [PASS, NO_VARIANTS], # fails on ARM hardware.
- 'md5': [PASS, NO_VARIANTS], # fails on ARM hardware.
- 'debug-break-inline': [PASS, NO_VARIANTS], # very flaky.
- 'debug-compile-event-newfunction': [PASS, NO_VARIANTS],
- 'debug-conditional-breakpoints': [PASS, NO_VARIANTS],
- 'debug-evaluate-locals-optimized': [PASS, NO_VARIANTS],
- 'debug-evaluate-locals-optimized-double': [PASS, NO_VARIANTS],
- 'debug-evaluate-recursive': [PASS, NO_VARIANTS], # only in no-snap debug.
- 'debug-ignore-breakpoints': [PASS, NO_VARIANTS], # only in no-snap debug.
- 'debug-setbreakpoint': [PASS, NO_VARIANTS], # only in no-snap debug.
- 'debug-step': [PASS, NO_VARIANTS], # windows only.
- 'debug-step-2': [PASS, NO_VARIANTS], # flaky in no-snap mode.
- 'debug-step-3': [PASS, NO_VARIANTS], # flaky in no-snap mode.
- 'debug-stepframe-clearing': [PASS, NO_VARIANTS], # only in no-snap debug.
- 'debug-stepin-call-function-stub': [PASS, NO_VARIANTS], # only in no-snap debug.
- 'debug-stepin-positions': [PASS, NO_VARIANTS], # only due to inlining.
- 'regress/regress-3717': [PASS, NO_VARIANTS], # only in no-snap mode.
- 'regress/regress-2451': [PASS, NO_VARIANTS], # with custom snapshot and gc-stress.
- 'debug-multiple-breakpoints': [PASS, NO_VARIANTS], # with custom snapshot and gc-stress.
- 'debug-listbreakpoints': [PASS, NO_VARIANTS], # arm64 nosnap with turbofan
- 'debug-enable-disable-breakpoints': [PASS, NO_VARIANTS], #arm64 nosnap with turbofan.
-
- # Issue 4035: unexpected frame->context() in debugger
- 'regress/regress-crbug-107996': [PASS, NO_VARIANTS],
- 'regress/regress-crbug-171715': [PASS, NO_VARIANTS],
- 'regress/regress-crbug-222893': [PASS, NO_VARIANTS],
- 'regress/regress-crbug-323936': [PASS, NO_VARIANTS],
- 'regress/regress-crbug-491943': [PASS, NO_VARIANTS],
- 'regress/regress-325676': [PASS, NO_VARIANTS],
- 'debug-evaluate-closure': [PASS, NO_VARIANTS],
- 'debug-evaluate-with': [PASS, NO_VARIANTS],
-
- # TODO(mstarzinger): Optimizing top-level code flushed out some correctness
- # issues on ARM and ARM64.
- 'es6/math-log2-log10': [PASS, NO_VARIANTS], # on ARM and ARM64.
- 'mirror-script': [PASS, NO_VARIANTS], # on ARM64 only.
-
- # TODO(jarin/mstarzinger): Investigate debugger issues with TurboFan.
- 'debug-evaluate-const': [PASS, NO_VARIANTS],
- 'debug-evaluate-locals': [PASS, NO_VARIANTS],
- 'debug-evaluate-locals-capturing': [PASS, NO_VARIANTS],
- 'debug-liveedit-check-stack': [PASS, NO_VARIANTS], # only in no-snap mode.
- 'debug-liveedit-double-call': [PASS, NO_VARIANTS],
- 'debug-set-variable-value': [PASS, NO_VARIANTS],
- 'debug-stepout-scope-part1': [PASS, NO_VARIANTS],
- 'debug-stepout-scope-part2': [PASS, NO_VARIANTS],
- 'debug-stepout-scope-part3': [PASS, NO_VARIANTS],
- 'es6/debug-evaluate-blockscopes': [PASS, NO_VARIANTS],
- # Issue 4055: Scope chain length observed by debugger is off.
- 'es6/generators-debug-scopes': [PASS, NO_VARIANTS],
-
- # TODO(titzer): --always-opt incorrectly disables CrankShaft soft deopt points
- 'result-table-min': [PASS, NO_VARIANTS],
- 'result-table-max': [PASS, NO_VARIANTS],
- # TODO(titzer): too slow in --turbo mode due to O(n^2) graph verification.
- 'regress/regress-1122': [PASS, NO_VARIANTS],
-
- # Assumptions about optimization need investigation in TurboFan.
- 'regress-sync-optimized-lists': [PASS, NO_VARIANTS],
- 'regress/regress-store-uncacheable': [PASS, NO_VARIANTS],
-
- # issue 4078:
- 'allocation-site-info': [PASS, NO_VARIANTS],
-
- # TODO(turbofan): The escape analysis needs some investigation.
- 'compiler/escape-analysis-deopt-5': [PASS, NO_VARIANTS],
- 'compiler/escape-analysis-9': [PASS, NO_VARIANTS],
-
##############################################################################
# Too slow in debug mode with --stress-opt mode.
- 'compiler/regress-stacktrace-methods': [PASS, ['mode == debug', SKIP]],
- 'compiler/regress-funcaller': [PASS, ['mode == debug', SKIP]],
'regress/regress-2318': [PASS, ['mode == debug', SKIP]],
'regress/regress-create-exception': [PASS, ['mode == debug', SKIP]],
@@ -197,11 +101,6 @@
'regress/regress-crbug-482998': [PASS, NO_VARIANTS, ['arch == arm or arch == arm64 or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips64el or arch == mips', SKIP]],
##############################################################################
- # This test expects to reach a certain recursion depth, which may not work
- # for debug mode.
- 'json-recursive': [PASS, ['mode == debug', PASS, FAIL]],
-
- ##############################################################################
# Skip long running tests that time out in debug mode.
'generated-transition-stub': [PASS, ['mode == debug', SKIP]],
'migrations': [SKIP],
@@ -217,6 +116,7 @@
# that, it doesn't make sense to run several variants of d8-os anyways.
'd8-os': [PASS, NO_VARIANTS, ['isolates or arch == android_arm or arch == android_arm64 or arch == android_ia32', SKIP]],
'tools/tickprocessor': [PASS, NO_VARIANTS, ['arch == android_arm or arch == android_arm64 or arch == android_ia32', SKIP]],
+ 'tools/dumpcpp': [PASS, NO_VARIANTS, ['arch == android_arm or arch == android_arm64 or arch == android_ia32', SKIP]],
##############################################################################
# Long running test that reproduces memory leak and should be run manually.
@@ -231,7 +131,6 @@
# Tests with different versions for release and debug.
'compiler/alloc-number': [PASS, ['mode == debug', SKIP]],
'compiler/alloc-number-debug': [PASS, ['mode == release', SKIP]],
- 'regress/regress-634': [PASS, ['mode == debug', SKIP]],
'regress/regress-634-debug': [PASS, ['mode == release', SKIP]],
# BUG(336820). TODO(bmeurer): Investigate.
@@ -241,10 +140,6 @@
# nosse2. Also for arm novfp3.
'regress/regress-2989': [FAIL, NO_VARIANTS, ['system == linux and arch == x87 or arch == arm and simulator == True', PASS]],
- # Skip endain dependent test for mips due to different typed views of the same
- # array buffer.
- 'nans': [PASS, ],
-
# This test variant makes only sense on arm.
'math-floor-of-div-nosudiv': [PASS, SLOW, ['arch not in [arm, arm64, android_arm, android_arm64]', SKIP]],
@@ -253,42 +148,42 @@
'asm/poppler/*': [PASS, SLOW, FAST_VARIANTS],
'asm/sqlite3/*': [PASS, SLOW, FAST_VARIANTS],
- # BUG(v8:4458). TODO(mvstanton): reenable the test once --vector-stores is
- # prermanently enabled.
- 'call-counts': [SKIP],
-
# Slow tests.
'copy-on-write-assert': [PASS, SLOW],
'debug-scopes': [PASS, SLOW],
- 'es7/object-observe': [PASS, ['mode == debug', SLOW]],
'numops-fuzz-part*': [PASS, ['mode == debug', SLOW]],
'readonly': [PASS, SLOW],
- 'regress/regress-1200351': [PASS, ['mode == debug', SLOW]],
+ 'regress/regress-1200351': [PASS, SLOW],
'regress/regress-crbug-474297': [PASS, ['mode == debug', SLOW]],
- 'es6/tail-call-megatest*': [PASS, FAST_VARIANTS],
+ 'es6/tail-call-megatest*': [PASS, SLOW, FAST_VARIANTS, ['tsan', SKIP]],
# TODO(titzer): ASM->WASM tests on these platforms
'wasm/asm-wasm': [PASS, ['arch in [arm, arm64, mips, mipsel, mips64, mips64el]', SKIP]],
# TODO(branelson): Figure out why ignition + asm-wasm-stdlib fails.
- 'wasm/asm-wasm-stdlib': [PASS, ['arch in [arm, arm64, mips, mipsel, mips64, mips64el] or ignition == True', SKIP]],
- 'wasm/asm-wasm-literals': [PASS, ['arch in [arm, arm64, mips, mipsel, mips64, mips64el] or ignition == True', SKIP]],
+ # TODO(branelson): The old status entry was misconfigured as it added
+ # "or ignition == True". This was deprecated at some point and was never
+ # true. Essentially the next lines skip the tests for a bunch of
+ # architectures.
+ 'wasm/asm-wasm-stdlib': [PASS, ['arch in [arm, arm64, mips, mipsel, mips64, mips64el]', SKIP]],
+ 'wasm/asm-wasm-literals': [PASS, ['arch in [arm, arm64, mips, mipsel, mips64, mips64el]', SKIP]],
'wasm/asm-wasm-copy': [PASS, ['arch in [arm, arm64, mips, mipsel, mips64, mips64el]', SKIP]],
'wasm/asm-wasm-deopt': [PASS, ['arch in [arm, arm64, mips, mipsel, mips64, mips64el]', SKIP]],
-
- # TODO(branelson): Figure out why ignition + asm->wasm fails embenchen.
- 'wasm/embenchen/*': [PASS, ['arch == arm64', SKIP], ['ignition == True', SKIP]],
-
- # TODO(bradnelson) Fix and re-enable.
- 'wasm/embenchen/box2d': [SKIP], # hang
- 'wasm/embenchen/lua_binarytrees': [SKIP], # fails decode
- #'wasm/embenchen/zlib': [SKIP], # fails gc-stress
+ 'wasm/asm-wasm-switch': [PASS, ['arch in [arm, arm64, mips, mipsel, mips64, mips64el]', SKIP]],
# case-insensitive unicode regexp relies on case mapping provided by ICU.
- 'harmony/unicode-regexp-ignore-case': [PASS, ['no_i18n == True', FAIL]],
- 'harmony/unicode-regexp-ignore-case-noi18n': [FAIL, ['no_i18n == True', PASS]],
+ 'es6/unicode-regexp-ignore-case': [PASS, ['no_i18n == True', FAIL]],
+ 'es6/unicode-regexp-ignore-case-noi18n': [FAIL, ['no_i18n == True', PASS]],
'regress/regress-5036': [PASS, ['no_i18n == True', FAIL]],
# desugaring regexp property class relies on ICU.
'harmony/regexp-property-*': [PASS, ['no_i18n == True', FAIL]],
+
+ # TODO(bmeurer): Flaky timeouts (sometimes <1s, sometimes >3m).
+ 'unicodelctest': [PASS, NO_VARIANTS],
+ 'unicodelctest-no-optimization': [PASS, NO_VARIANTS],
+
+ # TODO(rmcilroy,5038): Crashes in Deoptimizer::PatchCodeForDeoptimization on
+ # nosnap builds when --stress-opt and --turbo-from-bytecode is enabled.
+ 'harmony/generators-turbo': [PASS, FAST_VARIANTS],
}], # ALWAYS
['novfp3 == True', {
@@ -335,7 +230,6 @@
# TODO(mstarzinger): Takes too long with TF.
'array-sort': [PASS, NO_VARIANTS],
'regress/regress-91008': [PASS, NO_VARIANTS],
- 'regress/regress-417709a': [PASS, ['arch == arm64', NO_VARIANTS]],
'regress/regress-transcendental': [PASS, ['arch == arm64', NO_VARIANTS]],
'compiler/osr-regress-max-locals': [PASS, NO_VARIANTS],
'math-floor-of-div': [PASS, NO_VARIANTS],
@@ -363,18 +257,12 @@
# BUG(v8:3097)
'debug-references': [SKIP],
- # BUG(v8:4754).
- 'debug-referenced-by': [PASS, NO_VARIANTS],
-
# BUG(v8:4779): Crashes flakily with stress mode on arm64.
'array-splice': [PASS, SLOW, ['arch == arm64', FAST_VARIANTS]],
-}], # 'gc_stress == True'
-##############################################################################
-['no_i18n == True and mode == debug', {
- # Tests too slow for no18n debug.
- 'regress/regress-1200351': [SKIP],
-}], # 'no_i18n == True and mode == debug'
+ # BUG(v8:5053).
+ 'wasm/embenchen/fasta': [PASS, FAST_VARIANTS],
+}], # 'gc_stress == True'
##############################################################################
['byteorder == big', {
@@ -384,8 +272,6 @@
'asm/sqlite3/*': [SKIP],
# TODO(mips-team): Fix Wasm for big-endian.
'wasm/*': [SKIP],
- 'regress/regress-599717': [SKIP],
- 'regress/regress-599719': [SKIP],
}], # 'byteorder == big'
##############################################################################
@@ -407,15 +293,12 @@
'big-object-literal': [SKIP],
'compiler/regress-arguments': [SKIP],
'compiler/regress-gvn': [SKIP],
- 'compiler/regress-max-locals-for-osr': [SKIP],
'compiler/regress-4': [SKIP],
'compiler/regress-or': [SKIP],
'compiler/regress-rep-change': [SKIP],
'regress/regress-1117': [SKIP],
- 'regress/regress-1145': [SKIP],
'regress/regress-1849': [SKIP],
'regress/regress-3247124': [SKIP],
- 'regress/regress-634': [SKIP],
'regress/regress-91008': [SKIP],
'regress/regress-91010': [SKIP],
'regress/regress-91013': [SKIP],
@@ -464,11 +347,9 @@
['arch == arm64 and mode == debug and simulator_run == True', {
# Pass but take too long with the simulator in debug mode.
- 'array-iterate-backwards': [PASS, TIMEOUT],
'array-sort': [PASS, TIMEOUT],
'packed-elements': [SKIP],
'regexp-global': [SKIP],
- 'compiler/alloc-numbers': [SKIP],
'math-floor-of-div': [PASS, TIMEOUT],
'math-floor-of-div-nosudiv': [PASS, TIMEOUT],
'unicodelctest': [PASS, TIMEOUT],
@@ -497,6 +378,9 @@
'deep-recursion': [SKIP],
'regress/regress-builtinbust-7': [SKIP],
'string-localecompare': [SKIP],
+
+ # Too slow.
+ 'harmony/regexp-property-lu-ui': [SKIP],
}], # 'msan == True'
##############################################################################
@@ -520,7 +404,6 @@
'big-object-literal': [SKIP],
'compiler/alloc-number': [SKIP],
'regress/regress-490': [SKIP],
- 'regress/regress-634': [SKIP],
'regress/regress-create-exception': [SKIP],
'regress/regress-3247124': [SKIP],
@@ -564,7 +447,6 @@
# the buildbot.
'compiler/alloc-number': [SKIP],
'regress/regress-490': [SKIP],
- 'regress/regress-634': [SKIP],
'regress/regress-create-exception': [SKIP],
'regress/regress-3247124': [SKIP],
@@ -584,13 +466,6 @@
}], # 'arch == mips'
##############################################################################
-['arch == x87', {
- # Turbofan will hit the known issue that x87 changes sNaN to qNaN by default.
- 'regress/regress-undefined-nan': [SKIP],
- 'regress/regress-crbug-242924': [SKIP],
-}], # 'arch == x87'
-
-##############################################################################
['arch == mips64el or arch == mips64', {
# Slow tests which times out in debug mode.
@@ -627,7 +502,6 @@
'compiler/regress-3249650': [PASS, SLOW],
'compiler/simple-deopt': [PASS, SLOW],
'regress/regress-490': [PASS, SLOW],
- 'regress/regress-634': [PASS, SLOW],
'regress/regress-create-exception': [PASS, SLOW],
'regress/regress-3218915': [PASS, SLOW],
'regress/regress-3247124': [PASS, SLOW],
@@ -644,10 +518,9 @@
##############################################################################
['system == windows', {
# TODO(mstarzinger): Too slow with turbo fan.
- 'big-object-literal': [PASS, ['mode == debug', SKIP]],
+ 'big-object-literal': [SKIP],
'math-floor-of-div': [PASS, ['mode == debug', SKIP]],
'math-floor-of-div-nosudiv': [PASS, ['mode == debug', SKIP]],
- 'osr-regress-max-locals': [PASS, ['mode == debug', SKIP]],
'unicodelctest': [PASS, ['mode == debug', SKIP]],
# BUG(v8:3435)
@@ -658,59 +531,6 @@
}], # 'system == windows'
##############################################################################
-# Native Client uses the ARM simulator so will behave similarly to arm
-# on mjsunit tests.
-# TODO(bradchen): enable more tests for NaCl V8 when it stops using
-# the ARM simulator.
-##############################################################################
-['arch == nacl_ia32 or arch == nacl_x64', {
- # There is no /tmp directory for NaCl runs
- 'd8-os': [SKIP],
-
- # Stack manipulations in LiveEdit is not implemented for this arch.
- 'debug-liveedit-check-stack': [SKIP],
- 'debug-liveedit-stack-padding': [SKIP],
- 'debug-liveedit-restart-frame': [SKIP],
- 'debug-liveedit-double-call': [SKIP],
-
- # NaCl builds have problems with this test since Pepper_28.
- # V8 Issue 2786
- 'math-exp-precision': [SKIP],
-
- # Requires bigger stack size in the Genesis and if stack size is increased,
- # the test requires too much time to run. However, the problem test covers
- # should be platform-independent.
- 'regress/regress-1132': [SKIP],
-
- # Poor performance for NaCl V8 causes an assertion failure for this test.
- 'regress/regress-165637': [SKIP],
-
- # Skip long running test that times out in debug mode and goes OOM on NaCl.
- 'regress/regress-crbug-160010': [SKIP],
-
- # Skip tests that timout with turbofan.
- 'regress/regress-1257': [PASS, NO_VARIANTS],
- 'regress/regress-2618': [PASS, NO_VARIANTS],
- 'regress/regress-298269': [PASS, NO_VARIANTS],
- 'regress/regress-634': [PASS, NO_VARIANTS],
- 'regress/regress-91008': [PASS, NO_VARIANTS],
- 'compiler/osr-alignment': [PASS, NO_VARIANTS],
- 'compiler/osr-one': [PASS, NO_VARIANTS],
- 'compiler/osr-two': [PASS, NO_VARIANTS],
- 'stack-traces-overflow': [PASS, NO_VARIANTS],
- 'mirror-object': [PASS, NO_VARIANTS],
-
- # Bug(v8:2978).
- 'lithium/MathExp': [PASS, FAIL],
-
- # Lead to OOM:
- 'string-oom-*': [SKIP],
-
- # Crashes.
- 'harmony/private': [SKIP],
-}], # 'arch == nacl_ia32 or arch == nacl_x64'
-
-##############################################################################
['arch == s390 or arch == s390x', {
# Stack manipulations in LiveEdit is not implemented for this arch.
@@ -758,104 +578,233 @@
'regress/regress-1132': [SKIP],
}], # 'arch == ppc and simulator_run == True'
+##############################################################################
['arch == ppc64', {
# stack overflow
'big-array-literal': [SKIP],
+ 'regress/regress-353551': [SKIP],
}], # 'arch == ppc64'
##############################################################################
+['variant == stress', {
+ 'debug-evaluate-locals-optimized': [FAIL],
+ 'debug-evaluate-locals-optimized-double': [FAIL],
+ 'ignition/regress-599001-verifyheap': [SKIP],
+ 'unicode-test': [SKIP],
+}], # variant == stress
+
+##############################################################################
+['variant == turbofan', {
+
+ # Assumptions about optimization need investigation in TurboFan.
+ 'regress-sync-optimized-lists': [FAIL],
+
+}], # variant == turbofan
+
+##############################################################################
+['variant == turbofan_opt', {
+
+ # TODO(jarin/mstarzinger): Investigate debugger issues with TurboFan.
+ 'debug-evaluate-closure': [FAIL],
+ 'debug-evaluate-locals': [FAIL],
+ 'debug-set-variable-value': [FAIL],
+ 'debug-evaluate-locals-optimized': [FAIL],
+ 'debug-evaluate-locals-optimized-double': [FAIL],
+ 'debug-liveedit-double-call': [FAIL],
+ 'es6/debug-evaluate-blockscopes': [FAIL],
+
+ # TODO(jgruber): Fails in --turbo --always-opt mode.
+ 'regress/regress-105': [FAIL],
+
+ # Too slow.
+ 'big-object-literal': [SKIP],
+ 'ignition/regress-599001-verifyheap': [SKIP],
+ 'unicode-test': [SKIP],
+
+}], # variant == turbofan_opt
##############################################################################
-['ignition == True', {
- # TODO(yangguo,4690): assertion failures in debugger tests.
- 'debug-allscopes-on-debugger': [FAIL],
- 'es6/debug-stepnext-for': [FAIL],
- 'es6/debug-promises/stepin-constructor': [FAIL],
- 'es6/debug-stepin-proxies': [FAIL],
- 'regress/regress-crbug-119800': [FAIL],
- 'regress/regress-opt-after-debug-deopt': [FAIL],
-
- # TODO(yangguo,4690): flaky failures on the bots.
- 'debug-stepin-builtin-callback-opt': [SKIP],
-
- # TODO(rmcilroy,4765): assertion failures in LiveEdit tests.
- 'debug-liveedit-restart-frame': [FAIL],
- 'debug-liveedit-literals': [FAIL],
- 'debug-liveedit-3': [FAIL],
- 'debug-liveedit-1': [FAIL],
- 'debug-liveedit-2': [FAIL],
- 'debug-liveedit-patch-positions-replace': [FAIL],
- 'debug-liveedit-patch-positions': [FAIL],
- 'debug-liveedit-stepin': [FAIL],
- 'debug-liveedit-newsource': [FAIL],
- 'debug-liveedit-stack-padding': [FAIL],
- 'debug-liveedit-breakpoints': [FAIL],
- 'es6/debug-liveedit-new-target-1': [FAIL],
- 'es6/debug-liveedit-new-target-2': [FAIL],
- 'es6/debug-liveedit-new-target-3': [FAIL],
- 'es6/generators-debug-liveedit': [FAIL],
+['variant == ignition', {
+ 'debug-liveedit-double-call': [FAIL],
+ 'regress-sync-optimized-lists': [FAIL],
+
+ # Might trigger stack overflow.
+ 'unicode-test': [SKIP],
# TODO(mythria, 4780): Related to type feedback for calls in interpreter.
'array-literal-feedback': [FAIL],
'regress/regress-4121': [FAIL],
- # TODO(mythria, 4764): lack of osr support.
- 'regress/regress-2618': [FAIL],
- # TODO(mythria, 4764): lack of osr support. The tests waits in a loop
- # till it is optimized. So test timeouts.
- 'array-literal-transitions': [SKIP],
+ # TODO(4680): Test doesn't know about three tier compiler pipeline.
+ 'assert-opt-and-deopt': [SKIP],
- # TODO(mythria, 4680): Relate to GC and ignition holding references to
- # objects.
- 'es6/mirror-collections': [FAIL],
+ # BUG(rmcilroy,4989): Function is optimized without type feedback and so immediately deopts again, causing check failure in the test.
+ 'compiler/deopt-inlined-from-call': [FAIL],
+ 'compiler/increment-typefeedback': [FAIL],
+ 'compiler/manual-concurrent-recompile': [FAIL],
+ 'constant-folding-2': [FAIL],
+ 'debug-is-active': [FAIL],
+ 'deopt-with-fp-regs': [FAIL],
+ 'deserialize-optimize-inner': [FAIL],
+ 'div-mul-minus-one': [FAIL],
+ 'double-intrinsics': [FAIL],
+ 'elements-transition-hoisting': [FAIL],
+ 'es6/block-let-crankshaft': [FAIL],
+ 'es6/block-let-crankshaft-sloppy': [FAIL],
+ 'getters-on-elements': [FAIL],
+ 'harmony/do-expressions': [FAIL],
+ 'math-floor-of-div-minus-zero': [FAIL],
+ 'regress/regress-2132': [FAIL],
+ 'regress/regress-2339': [FAIL],
+ 'regress/regress-3176': [FAIL],
+ 'regress/regress-3709': [FAIL],
+ 'regress/regress-385565': [FAIL],
+ 'regress/regress-crbug-594183': [FAIL],
+ 'regress/regress-embedded-cons-string': [FAIL],
+ 'regress/regress-map-invalidation-2': [FAIL],
+ 'regress/regress-param-local-type': [FAIL],
+ 'regress/regress-prepare-break-while-recompile': [FAIL],
+ 'shift-for-integer-div': [FAIL],
+ 'sin-cos': [FAIL],
+ 'smi-mul-const': [FAIL],
+ 'smi-mul': [FAIL],
+ 'unary-minus-deopt': [FAIL],
+ 'array-constructor-feedback': [FAIL],
+ 'array-feedback': [FAIL],
+ 'allocation-site-info': [FAIL],
+}], # variant == ignition
+
+['variant == ignition and arch == arm64', {
+ # TODO(rmcilroy,4680): Arm64 specific timeouts.
+ 'asm/construct-double': [SKIP],
+ 'compiler/osr-one': [SKIP],
+ 'compiler/osr-two': [SKIP],
+ 'wasm/asm-wasm-i32': [SKIP],
+ 'wasm/asm-wasm-u32': [SKIP],
+}], # variant == ignition and arch == arm64
- # TODO(mythria, 4680): Fails with context_register_count_ > 0 (0 vs. 0) when
- # trying to get a context register in BytecodeGenerator.
- 'harmony/regress/regress-4658': [FAIL, ['mode == release and dcheck_always_on == False', PASS],],
+['variant == ignition and arch == arm', {
+ # TODO(rmcilroy,4680): Arm specific timeouts.
+ 'compiler/osr-one': [SKIP],
+ 'compiler/osr-two': [SKIP],
+ 'regress/regress-1257': [SKIP],
+}], # variant == ignition and arch == arm
- # TODO(rmcilroy, 4680): Script throws RangeError as expected, but does so during
- # eager compile of the whole script instead of during lazy compile of the function
- # f(), so we can't catch the exception in the try/catch. Skip because on some
- # platforms the stack limit is different and the exception doesn't fire.
- 'regress/regress-crbug-589472': [SKIP],
+['variant == ignition and msan', {
+ # TODO(mythria,4680): All of these tests have large loops and hence slow
+ # and timeout.
+ 'compiler/osr-big': [SKIP],
+ 'compiler/osr-nested': [SKIP],
+ 'regress/regress-298269': [SKIP],
+ 'regress/regress-crbug-319860': [SKIP],
+ 'regress/regress-deep-proto': [SKIP],
+ 'try': [SKIP],
+ # Too slow for interpreter and msan.
+ 'es6/tail-call-megatest*': [SKIP],
+}], # variant == ignition and msan
- # Debugger test cases that pass with ignition, but not full-codegen.
- # These differences between full-codegen and ignition are deliberate.
- 'ignition/elided-instruction-no-ignition': [FAIL],
+##############################################################################
+['variant == ignition_staging', {
+ 'allocation-site-info': [FAIL],
+ 'array-constructor-feedback': [FAIL],
+ 'array-feedback': [FAIL],
+ 'array-literal-feedback': [FAIL],
+ 'assert-opt-and-deopt': [SKIP],
+ 'compiler/deopt-inlined-from-call': [FAIL],
+ 'compiler/increment-typefeedback': [FAIL],
+ 'compiler/manual-concurrent-recompile': [FAIL],
+ 'constant-folding-2': [FAIL],
+ 'debug-is-active': [FAIL],
+ 'debug-liveedit-double-call': [FAIL],
+ 'deopt-with-fp-regs': [FAIL],
+ 'deserialize-optimize-inner': [FAIL],
+ 'div-mul-minus-one': [FAIL],
+ 'elements-transition-hoisting': [FAIL],
+ 'es6/block-let-crankshaft': [FAIL],
+ 'es6/block-let-crankshaft-sloppy': [FAIL],
+ 'getters-on-elements': [FAIL],
+ 'harmony/do-expressions': [FAIL],
+ 'math-floor-of-div-minus-zero': [FAIL],
+ 'regress/regress-2132': [FAIL],
+ 'regress/regress-2339': [FAIL],
+ 'regress/regress-3176': [FAIL],
+ 'regress/regress-3709': [FAIL],
+ 'regress/regress-385565': [FAIL],
+ 'regress/regress-4121': [FAIL],
+ 'regress/regress-crbug-594183': [FAIL],
+ 'regress/regress-embedded-cons-string': [FAIL],
+ 'regress/regress-map-invalidation-2': [FAIL],
+ 'regress/regress-param-local-type': [FAIL],
+ 'regress/regress-prepare-break-while-recompile': [FAIL],
+ 'regress-sync-optimized-lists': [FAIL],
+ 'shift-for-integer-div': [FAIL],
+ 'sin-cos': [FAIL],
+ 'smi-mul-const': [FAIL],
+ 'smi-mul': [FAIL],
+ 'unary-minus-deopt': [FAIL],
+
+ # Flaky.
+ 'asm/int32div': [SKIP],
+ 'asm/int32mod': [SKIP],
+ 'compiler/uint32': [SKIP],
+ 'regress/regress-454725': [SKIP],
+
+ # Might trigger stack overflow.
+ 'unicode-test': [SKIP],
+
+}], # variant == ignition_staging
+
+##############################################################################
+['variant == ignition_turbofan', {
+ 'debug-liveedit-double-call': [FAIL],
+
+ # Might trigger stack overflow.
+ 'unicode-test': [SKIP],
+
+ # TODO(mythria, 4780): Related to type feedback for calls in interpreter.
+ 'array-literal-feedback': [FAIL],
+ 'regress/regress-4121': [FAIL],
+ 'array-constructor-feedback': [FAIL],
+ 'array-feedback': [FAIL],
+ 'allocation-site-info': [FAIL],
'wasm/asm-wasm-f32': [PASS, ['arch in [arm64]', SKIP]],
'wasm/asm-wasm-f64': [PASS, ['arch in [arm64]', SKIP]],
-}], # ignition == True
-['ignition == True and system == windows', {
- # TODO(rmcilroy,4680): Crash on windows nosnap shared.
- 'regress/regress-crbug-352058': [PASS, ['no_snap == True', SKIP]],
+ # TODO(rmcilroy,titzer): Times out after
+ # https://codereview.chromium.org/1951013002 .
+ 'regress/regress-599717': [PASS, ['tsan', SKIP]],
+
+ # TODO(rmcilroy,5038): Crashes in Deoptimizer::PatchCodeForDeoptimization on
+ # nosnap builds when --stress-opt and --turbo-from-bytecode is enabled.
+ 'harmony/generators-turbo': [PASS, FAST_VARIANTS],
+ 'regress/regress-crbug-352058': [SKIP],
- # TODO(513471): Attempting to optimize generator hits unreachable path.
- 'regress/regress-crbug-513471': [PASS, ['no_snap == True', SKIP]],
+ # TODO(jarin): No truncations on CheckFloat64Hole.
+ 'getters-on-elements': [SKIP],
- # TODO(rmcilroy,4680): Fails on win32 debug.
- 'div-mod': [PASS, ['arch == ia32', SKIP]],
-}], # ignition == True and system == windows
+ # TODO(rmcilroy): Flaky OOM.
+ 'unicodelctest-no-optimization': [SKIP],
+}], # variant == ignition_turbofan
-['ignition == True and arch == arm64', {
+['variant == ignition_turbofan and arch == arm64', {
# TODO(rmcilroy,4680): Arm64 specific timeouts.
'asm/construct-double': [SKIP],
'compiler/osr-one': [SKIP],
'compiler/osr-two': [SKIP],
'wasm/asm-wasm-i32': [SKIP],
'wasm/asm-wasm-u32': [SKIP],
-}], # ignition == True and arch == arm64
+}], # variant == ignition_turbofan and arch == arm64
-['ignition == True and arch == arm', {
+['variant == ignition_turbofan and arch == arm', {
# TODO(rmcilroy,4680): Arm specific timeouts.
'compiler/osr-one': [SKIP],
'compiler/osr-two': [SKIP],
'regress/regress-1257': [SKIP],
-}], # ignition == True and arch == arm
+}], # variant == ignition_turbofan and arch == arm
-['ignition == True and msan', {
+['variant == ignition_turbofan and msan', {
# TODO(mythria,4680): All of these tests have large loops and hence slow
# and timeout.
'compiler/osr-big': [SKIP],
@@ -866,55 +815,7 @@
'try': [SKIP],
# Too slow for interpreter and msan.
'es6/tail-call-megatest*': [SKIP],
-}], # ignition == True and msan
-
-['ignition == True and gc_stress == True', {
- # TODO(oth,4680): failures from the bots.
- 'es6/debug-step-destructuring-bind': [SKIP],
- 'es6/debug-stepin-collections-foreach': [SKIP],
- 'ignition/elided-instruction': [SKIP],
- 'regress/regress-269': [SKIP],
-}], # ignition == True and gc_stress == True
-
-['ignition == False', {
- # Debugger test cases that pass with full-codegen, but not ignition.
- # These differences between full-codegen and ignition are deliberate.
- 'ignition/elided-instruction': [FAIL],
-}], # ignition == False
-
-['ignition == True and system == windows and no_snap', {
- # TODO(rmcilroy): Fail with nosnap and shared libraries.
- 'es6/array-from': [FAIL],
- 'es6/classes-subclass-builtins': [FAIL],
- 'es6/computed-property-names-classes': [FAIL],
- 'es6/computed-property-names-object-literals-methods': [FAIL],
- 'es6/debug-stepin-generators': [FAIL],
- 'es6/destructuring': [FAIL],
- 'es6/destructuring-assignment': [FAIL],
- 'es6/generators-iteration': [FAIL],
- 'es6/generators-mirror': [FAIL],
- 'es6/generators-parsing': [FAIL],
- 'es6/generators-poisoned-properties': [FAIL],
- 'es6/generators-relocation': [FAIL],
- 'es6/generators-states': [FAIL],
- 'es6/iteration-semantics': [FAIL],
- 'es6/object-literals-method': [FAIL],
- 'es6/object-literals-super': [FAIL],
- 'es6/promises': [FAIL],
- 'es6/regress/regress-2681': [FAIL],
- 'es6/regress/regress-2691': [FAIL],
- 'es6/regress/regress-3280': [FAIL],
- 'es6/spread-array': [FAIL],
- 'es6/spread-call': [FAIL],
- 'es6/typedarray': [FAIL],
- 'es6/typedarray-from': [FAIL],
- 'harmony/function-sent': [FAIL],
- 'harmony/generators': [FAIL],
- 'harmony/iterator-close': [FAIL],
- 'harmony/regress/regress-4482': [FAIL],
- 'messages': [FAIL],
- 'regress-3225': [FAIL],
-}], # ignition == True and system == windows and no_snap
+}], # variant == ignition_turbofan and msan
##############################################################################
['gcov_coverage', {
diff --git a/deps/v8/test/mjsunit/object-define-property.js b/deps/v8/test/mjsunit/object-define-property.js
index 4c495c6824..1aac489839 100644
--- a/deps/v8/test/mjsunit/object-define-property.js
+++ b/deps/v8/test/mjsunit/object-define-property.js
@@ -467,53 +467,6 @@ try {
}
-// Test runtime calls to DefineDataPropertyUnchecked and
-// DefineAccessorPropertyUnchecked - make sure we don't
-// crash.
-try {
- %DefineAccessorPropertyUnchecked(0, 0, 0, 0, 0);
-} catch (e) {
- assertTrue(/illegal access/.test(e));
-}
-
-try {
- %DefineDataPropertyUnchecked(0, 0, 0, 0);
-} catch (e) {
- assertTrue(/illegal access/.test(e));
-}
-
-try {
- %DefineDataPropertyUnchecked(null, null, null, null);
-} catch (e) {
- assertTrue(/illegal access/.test(e));
-}
-
-try {
- %DefineAccessorPropertyUnchecked(null, null, null, null, null);
-} catch (e) {
- assertTrue(/illegal access/.test(e));
-}
-
-try {
- %DefineDataPropertyUnchecked({}, null, null, null);
-} catch (e) {
- assertTrue(/illegal access/.test(e));
-}
-
-// Defining properties null should fail even when we have
-// other allowed values
-try {
- %DefineAccessorPropertyUnchecked(null, 'foo', func, null, 0);
-} catch (e) {
- assertTrue(/illegal access/.test(e));
-}
-
-try {
- %DefineDataPropertyUnchecked(null, 'foo', 0, 0);
-} catch (e) {
- assertTrue(/illegal access/.test(e));
-}
-
// Test that all possible differences in step 6 in DefineOwnProperty are
// exercised, i.e., any difference in the given property descriptor and the
// existing properties should not return true, but throw an error if the
diff --git a/deps/v8/test/mjsunit/object-literal.js b/deps/v8/test/mjsunit/object-literal.js
index 19860ff389..b861d443c0 100644
--- a/deps/v8/test/mjsunit/object-literal.js
+++ b/deps/v8/test/mjsunit/object-literal.js
@@ -24,8 +24,6 @@
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Flags: --harmony-function-name
var obj = {
a: 7,
diff --git a/deps/v8/test/mjsunit/osr-elements-kind.js b/deps/v8/test/mjsunit/osr-elements-kind.js
index bd15ef37e4..aee7017134 100644
--- a/deps/v8/test/mjsunit/osr-elements-kind.js
+++ b/deps/v8/test/mjsunit/osr-elements-kind.js
@@ -30,7 +30,7 @@
// Limit the number of stress runs to reduce polymorphism it defeats some of the
// assumptions made about how elements transitions work because transition stubs
// end up going generic.
-// Flags: --stress-runs=2
+// Flags: --stress-runs=1
var elements_kind = {
fast_smi_only : 'fast smi only elements',
diff --git a/deps/v8/test/mjsunit/realm-property-access.js b/deps/v8/test/mjsunit/realm-property-access.js
new file mode 100644
index 0000000000..679886d66a
--- /dev/null
+++ b/deps/v8/test/mjsunit/realm-property-access.js
@@ -0,0 +1,20 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var r = Realm.create();
+var f = Realm.eval(r, "function f() { return this }; f()");
+assertEquals(f, Realm.global(r));
+
+// Cross-origin property access throws
+assertThrows(() => f.a, TypeError);
+assertThrows(() => { 'use strict'; f.a = 1 }, TypeError);
+
+var r2 = Realm.createAllowCrossRealmAccess();
+var f2 = Realm.eval(r2, "function f() { return this }; f()");
+assertEquals(f2, Realm.global(r2));
+
+// Same-origin property access doesn't throw
+assertEquals(undefined, f2.a);
+f2.a = 1;
+assertEquals(1, f2.a);
diff --git a/deps/v8/test/mjsunit/regexp-compile.js b/deps/v8/test/mjsunit/regexp-compile.js
index 92c3f7b3dd..f00178008c 100644
--- a/deps/v8/test/mjsunit/regexp-compile.js
+++ b/deps/v8/test/mjsunit/regexp-compile.js
@@ -42,3 +42,7 @@ re.compile("(y)");
assertEquals(["y", "y"], re.exec("axyb"));
assertEquals(2, re.compile.length);
+
+// If RegExp parsing fails, the RegExp is not modified
+var r = /./; try { r.compile('? invalid'); } catch(err){}
+assertEquals("/./", r.toString());
diff --git a/deps/v8/test/mjsunit/regexp-lastIndex.js b/deps/v8/test/mjsunit/regexp-lastIndex.js
deleted file mode 100644
index 1445b9b2ae..0000000000
--- a/deps/v8/test/mjsunit/regexp-lastIndex.js
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// lastIndex is set according to funny rules. It is typically set only
-// for global or sticky RegExps, but on a failure to find a match, it is
-// set unconditionally. If a set fails, then it acts as if in strict mode
-// and throws.
-
-var re = /x/;
-Object.defineProperty(re, 'lastIndex', {writable: false});
-assertThrows(() => re.exec(""), TypeError);
-assertEquals(["x"], re.exec("x"));
-
-var re = /x/y;
-Object.defineProperty(re, 'lastIndex', {writable: false});
-assertThrows(() => re.exec(""), TypeError);
-assertThrows(() => re.exec("x"), TypeError);
diff --git a/deps/v8/test/mjsunit/regexp-string-methods.js b/deps/v8/test/mjsunit/regexp-string-methods.js
index fa01a33ce4..d5ad9c3386 100644
--- a/deps/v8/test/mjsunit/regexp-string-methods.js
+++ b/deps/v8/test/mjsunit/regexp-string-methods.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --no-harmony-regexp-exec
-
// Regexp shouldn't use String.prototype.slice()
var s = new String("foo");
assertEquals("f", s.slice(0,1));
@@ -43,11 +41,3 @@ assertEquals("g", "foo".charAt(0));
var f2 = new RegExp("[g]", "i");
assertEquals(["G"], f2.exec("G"));
assertTrue(f2.ignoreCase);
-
-// On the other hand test is defined in a semi-coherent way as a call to exec.
-// 15.10.6.3
-// We match other browsers in using the original value of RegExp.prototype.exec.
-// I.e., RegExp.prototype.test shouldn't use the current value of
-// RegExp.prototype.exec.
-RegExp.prototype.exec = function(string) { return 'x'; };
-assertFalse(/f/.test('x'));
diff --git a/deps/v8/test/mjsunit/regress-604044.js b/deps/v8/test/mjsunit/regress-604044.js
new file mode 100644
index 0000000000..58ccfbed99
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress-604044.js
@@ -0,0 +1,7 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --min-preparse-length 1
+
+(function(_ = function() {}){})
diff --git a/deps/v8/test/mjsunit/harmony/modules.js b/deps/v8/test/mjsunit/regress-crbug-619476.js
index e56880500b..33204ae773 100644
--- a/deps/v8/test/mjsunit/harmony/modules.js
+++ b/deps/v8/test/mjsunit/regress-crbug-619476.js
@@ -1,8 +1,7 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// MODULE
-export let a = 42;
-assertEquals(42, a);
+var x = {};
+// Crashes in debug mode if an erroneous DCHECK in dfb8d333 is not removed.
+eval, x[eval];
diff --git a/deps/v8/test/mjsunit/regress/redeclaration-error-types.js b/deps/v8/test/mjsunit/regress/redeclaration-error-types.js
new file mode 100644
index 0000000000..72e097db57
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/redeclaration-error-types.js
@@ -0,0 +1,145 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+function doTest(scripts, expectedError) {
+ var realm = Realm.create();
+
+ for (var i = 0; i < scripts.length - 1; i++) {
+ Realm.eval(realm, scripts[i]);
+ }
+ assertThrows(function() {
+ Realm.eval(realm, scripts[scripts.length - 1]);
+ }, Realm.eval(realm, expectedError));
+
+ Realm.dispose(realm);
+}
+
+var tests = [
+ {
+ // ES#sec-globaldeclarationinstantiation 5.a:
+ // If envRec.HasVarDeclaration(name) is true, throw a SyntaxError
+ // exception.
+ scripts: [
+ "var a;",
+ "let a;",
+ ],
+ expectedError: "SyntaxError",
+ },
+ {
+ // ES#sec-globaldeclarationinstantiation 6.a:
+ // If envRec.HasLexicalDeclaration(name) is true, throw a SyntaxError
+ // exception.
+ scripts: [
+ "let a;",
+ "var a;",
+ ],
+ expectedError: "SyntaxError",
+ },
+ {
+ // ES#sec-globaldeclarationinstantiation 5.b:
+ // If envRec.HasLexicalDeclaration(name) is true, throw a SyntaxError
+ // exception.
+ scripts: [
+ "let a;",
+ "let a;",
+ ],
+ expectedError: "SyntaxError",
+ },
+ {
+ // ES#sec-evaldeclarationinstantiation 5.a.i.1:
+ // If varEnvRec.HasLexicalDeclaration(name) is true, throw a SyntaxError
+ // exception.
+ scripts: [
+ 'let a; eval("var a;");',
+ ],
+ expectedError: "SyntaxError",
+ },
+ {
+ // ES#sec-evaldeclarationinstantiation 5.a.i.1:
+ // If varEnvRec.HasLexicalDeclaration(name) is true, throw a SyntaxError
+ // exception.
+ scripts: [
+ 'let a; eval("function a() {}");',
+ ],
+ expectedError: "SyntaxError",
+ },
+ {
+ // ES#sec-evaldeclarationinstantiation 5.d.ii.2.a.i:
+ // Throw a SyntaxError exception.
+ scripts: [
+ '(function() { let a; eval("var a;"); })();',
+ ],
+ expectedError: "SyntaxError",
+ },
+ {
+ // ES#sec-evaldeclarationinstantiation 5.d.ii.2.a.i:
+ // Throw a SyntaxError exception.
+ scripts: [
+ '(function() { let a; eval("function a() {}"); })();',
+ ],
+ expectedError: "SyntaxError",
+ },
+ {
+ // ES#sec-globaldeclarationinstantiation 5.d:
+ // If hasRestrictedGlobal is true, throw a SyntaxError exception.
+ scripts: [
+ 'let NaN;',
+ ],
+ expectedError: "SyntaxError",
+ },
+ {
+ // ES#sec-globaldeclarationinstantiation 5.d:
+ // If hasRestrictedGlobal is true, throw a SyntaxError exception.
+ scripts: [
+ 'function NaN() {}',
+ ],
+ expectedError: "SyntaxError",
+ },
+
+ {
+ // ES#sec-evaldeclarationinstantiation 8.a.iv.1.b:
+ // If fnDefinable is false, throw a TypeError exception.
+ scripts: [
+ 'eval("function NaN() {}");',
+ ],
+ expectedError: "TypeError",
+ },
+ {
+ // ES#sec-evaldeclarationinstantiation 8.a.iv.1.b:
+ // If fnDefinable is false, throw a TypeError exception.
+ scripts: [
+ `
+ let a;
+ try {
+ eval("function a() {}");
+ } catch (e) {}
+ eval("function NaN() {}");
+ `,
+ ],
+ expectedError: "TypeError",
+ },
+ {
+ // ES#sec-evaldeclarationinstantiation 8.a.iv.1.b:
+ // If fnDefinable is false, throw a TypeError exception.
+ scripts: [
+ `
+ eval("
+ function f() {
+ function b() {
+ (0, eval)('function NaN() {}');
+ }
+ b();
+ }
+ f();
+ ");
+ `.replace(/"/g, '`'),
+ ],
+ expectedError: "TypeError",
+ },
+];
+
+tests.forEach(function(test) {
+ doTest(test.scripts, test.expectedError);
+});
diff --git a/deps/v8/test/mjsunit/regress/regress-105.js b/deps/v8/test/mjsunit/regress/regress-105.js
index 877cb82317..8b8030ffec 100644
--- a/deps/v8/test/mjsunit/regress/regress-105.js
+++ b/deps/v8/test/mjsunit/regress/regress-105.js
@@ -26,12 +26,12 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
var custom_valueOf = function() {
- assertEquals(null, custom_valueOf.caller);
+ assertEquals(Number, custom_valueOf.caller);
return 2;
}
var custom_toString = function() {
- assertEquals(null, custom_toString.caller);
+ assertEquals(String, custom_toString.caller);
return "I used to be an adventurer like you";
}
diff --git a/deps/v8/test/mjsunit/regress/regress-1132.js b/deps/v8/test/mjsunit/regress/regress-1132.js
index a5cb0a1d5f..adb56b0fa8 100644
--- a/deps/v8/test/mjsunit/regress/regress-1132.js
+++ b/deps/v8/test/mjsunit/regress/regress-1132.js
@@ -28,7 +28,7 @@
// Test the case when exception is thrown from the parser when lazy
// compiling a function.
-// Flags: --stack-size=46
+// Flags: --stack-size=100
// NOTE: stack size constant above has been empirically chosen.
// If the test starts to fail in Genesis, consider increasing this constant.
diff --git a/deps/v8/test/mjsunit/regress/regress-1246.js b/deps/v8/test/mjsunit/regress/regress-1246.js
deleted file mode 100644
index ca425ec2b7..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-1246.js
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This regression tests the behaviour of the parseInt function when
-// the given radix is not a SMI.
-
-// Flags: --allow-natives-syntax
-
-var nonSmi10 = Math.log(Math.exp(10));
-var nonSmi16 = Math.log(Math.exp(16));
-
-assertTrue(!%_IsSmi(nonSmi10) && nonSmi10 == 10);
-assertTrue(!%_IsSmi(nonSmi16) && nonSmi16 == 16);
-
-// Giving these values as the radix argument triggers radix detection.
-var radix_detect = [0, -0, NaN, Infinity, -Infinity, undefined, null,
- "0", "-0", "a"];
-
-// These values will result in an integer radix outside of the valid range.
-var radix_invalid = [1, 37, -2, "-2", "37"];
-
-// These values will trigger decimal parsing.
-var radix10 = [10, 10.1, "10", "10.1", nonSmi10];
-
-// These values will trigger hexadecimal parsing.
-var radix16 = [16, 16.1, 0x10, "0X10", nonSmi16];
-
-for (var i = 0; i < radix_detect.length; i++) {
- var radix = radix_detect[i];
- assertEquals(NaN, parseInt("", radix));
- assertEquals(23, parseInt("23", radix));
- assertEquals(0xaf, parseInt("0xaf", radix));
- assertEquals(NaN, parseInt("af", radix));
-}
-
-for (var i = 0; i < radix_invalid.length; i++) {
- var radix = radix_invalid[i];
- assertEquals(NaN, parseInt("", radix));
- assertEquals(NaN, parseInt("23", radix));
- assertEquals(NaN, parseInt("0xaf", radix));
- assertEquals(NaN, parseInt("af", radix));
-}
-
-for (var i = 0; i < radix10.length; i++) {
- var radix = radix10[i];
- assertEquals(NaN, parseInt("", radix));
- assertEquals(23, parseInt("23", radix));
- assertEquals(0, parseInt("0xaf", radix));
- assertEquals(NaN, parseInt("af", radix));
-}
-
-for (var i = 0; i < radix16.length; i++) {
- var radix = radix16[i];
- assertEquals(NaN, parseInt("", radix));
- assertEquals(0x23, parseInt("23", radix));
- assertEquals(0xaf, parseInt("0xaf", radix));
- assertEquals(0xaf, parseInt("af", radix));
-}
diff --git a/deps/v8/test/mjsunit/regress/regress-1403.js b/deps/v8/test/mjsunit/regress/regress-1403.js
index f2520ccbc9..91df6d65f6 100644
--- a/deps/v8/test/mjsunit/regress/regress-1403.js
+++ b/deps/v8/test/mjsunit/regress/regress-1403.js
@@ -28,7 +28,7 @@
// See: http://code.google.com/p/v8/issues/detail?id=1403
a = [];
-Object.prototype.__proto__ = { __proto__: null };
+assertThrows(() => Object.prototype.__proto__ = { __proto__: null }, TypeError);
a.shift();
a = [];
diff --git a/deps/v8/test/mjsunit/regress/regress-1980.js b/deps/v8/test/mjsunit/regress/regress-1980.js
index d87ff45074..e1e492b883 100644
--- a/deps/v8/test/mjsunit/regress/regress-1980.js
+++ b/deps/v8/test/mjsunit/regress/regress-1980.js
@@ -27,6 +27,8 @@
// See: http://code.google.com/p/v8/issues/detail?id=1980
+var msg = "Method Error.prototype.toString called on incompatible receiver ";
+
var invalid_this = [ "invalid", 23, undefined, null ];
for (var i = 0; i < invalid_this.length; i++) {
var exception = false;
@@ -34,7 +36,7 @@ for (var i = 0; i < invalid_this.length; i++) {
Error.prototype.toString.call(invalid_this[i]);
} catch (e) {
exception = true;
- assertEquals("Error.prototype.toString called on non-object", e.message);
+ assertEquals(msg + invalid_this[i], e.message);
}
assertTrue(exception);
}
diff --git a/deps/v8/test/mjsunit/regress/regress-2618.js b/deps/v8/test/mjsunit/regress/regress-2618.js
index 6e52bcad78..2634c80c66 100644
--- a/deps/v8/test/mjsunit/regress/regress-2618.js
+++ b/deps/v8/test/mjsunit/regress/regress-2618.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --use-osr --allow-natives-syntax
+// Flags: --use-osr --allow-natives-syntax --ignition-osr --turbo-from-bytecode
function f() {
do {
diff --git a/deps/v8/test/mjsunit/regress/regress-3229.js b/deps/v8/test/mjsunit/regress/regress-3229.js
index 419cade8cd..53e14cd541 100644
--- a/deps/v8/test/mjsunit/regress/regress-3229.js
+++ b/deps/v8/test/mjsunit/regress/regress-3229.js
@@ -12,7 +12,7 @@ testEscapes("\\/", /\//);
testEscapes("\\/\\/", /\/\//);
testEscapes("\\/", new RegExp("/"));
testEscapes("\\/", new RegExp("\\/"));
-testEscapes("\\\\/", new RegExp("\\\\/"));
+testEscapes("\\\\\\/", new RegExp("\\\\/"));
testEscapes("\\/\\/", new RegExp("\\/\\/"));
testEscapes("\\/\\/\\/\\/", new RegExp("////"));
testEscapes("\\/\\/\\/\\/", new RegExp("\\//\\//"));
diff --git a/deps/v8/test/mjsunit/regress/regress-3315.js b/deps/v8/test/mjsunit/regress/regress-3315.js
deleted file mode 100644
index bfd7df29b8..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-3315.js
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Flags: --harmony-object-observe
-
-var indexZeroCallCount = 0;
-var indexOneCallCount = 0;
-var lengthCallCount = 0;
-var acceptList = {
- get 0() {
- indexZeroCallCount++;
- return 'foo';
- },
- get 1() {
- indexOneCallCount++;
- return 'bar';
- },
- get length() {
- lengthCallCount++;
- return 1;
- }
-};
-
-Object.observe({}, function(){}, acceptList);
-assertEquals(1, lengthCallCount);
-assertEquals(1, indexZeroCallCount);
-assertEquals(0, indexOneCallCount);
diff --git a/deps/v8/test/mjsunit/regress/regress-349870.js b/deps/v8/test/mjsunit/regress/regress-349870.js
deleted file mode 100644
index 72df05524b..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-349870.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-var r = /x/;
-Object.freeze(r);
-r.compile("x");
diff --git a/deps/v8/test/mjsunit/regress/regress-353004.js b/deps/v8/test/mjsunit/regress/regress-353004.js
index 658fd6dbeb..7e1fb7e939 100644
--- a/deps/v8/test/mjsunit/regress/regress-353004.js
+++ b/deps/v8/test/mjsunit/regress/regress-353004.js
@@ -41,19 +41,21 @@ assertThrows(function() {
var buffer5 = new ArrayBuffer(100 * 1024);
-var buffer6 = buffer5.slice({valueOf : function() {
- %ArrayBufferNeuter(buffer5);
- return 0;
-}}, 100 * 1024 * 1024);
-assertEquals(0, buffer6.byteLength);
+assertThrows(function() {
+ buffer5.slice({valueOf : function() {
+ %ArrayBufferNeuter(buffer5);
+ return 0;
+ }}, 100 * 1024 * 1024);
+}, TypeError);
var buffer7 = new ArrayBuffer(100 * 1024 * 1024);
-var buffer8 = buffer7.slice(0, {valueOf : function() {
- %ArrayBufferNeuter(buffer7);
- return 100 * 1024 * 1024;
-}});
-assertEquals(0, buffer8.byteLength);
+assertThrows(function() {
+ buffer7.slice(0, {valueOf : function() {
+ %ArrayBufferNeuter(buffer7);
+ return 100 * 1024 * 1024;
+ }});
+}, TypeError);
var buffer9 = new ArrayBuffer(1024);
var array9 = new Uint8Array(buffer9);
diff --git a/deps/v8/test/mjsunit/regress/regress-353551.js b/deps/v8/test/mjsunit/regress/regress-353551.js
index c6e7856d34..ea5a234658 100644
--- a/deps/v8/test/mjsunit/regress/regress-353551.js
+++ b/deps/v8/test/mjsunit/regress/regress-353551.js
@@ -30,7 +30,7 @@ function __f_3(x) {
var __v_1 = arguments;
__v_1[1000] = 123;
depth++;
- if (depth > 3000) return;
+ if (depth > 2500) return;
function __f_4() {
++__v_1[0];
__f_3(0.5);
diff --git a/deps/v8/test/mjsunit/regress/regress-356589.js b/deps/v8/test/mjsunit/regress/regress-356589.js
deleted file mode 100644
index a47f51bac1..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-356589.js
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony-object-observe
-
-// This test passes if it does not crash in debug mode
-
-arr = ['a', 'b', 'c', 'd'];
-Object.defineProperty(arr.__proto__, '0', { get: function(){} });
-Object.defineProperty(arr, '2', {get: function(){} });
-Object.observe(arr, function() {});
-arr.length = 2;
diff --git a/deps/v8/test/mjsunit/regress/regress-3926.js b/deps/v8/test/mjsunit/regress/regress-3926.js
index 4720c1b908..4d9b2983d2 100644
--- a/deps/v8/test/mjsunit/regress/regress-3926.js
+++ b/deps/v8/test/mjsunit/regress/regress-3926.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-sloppy --harmony-sloppy-let
-
// See: http://code.google.com/p/v8/issues/detail?id=3926
// Switch statements should disable hole check elimination
diff --git a/deps/v8/test/mjsunit/regress/regress-403292.js b/deps/v8/test/mjsunit/regress/regress-403292.js
deleted file mode 100644
index 2e24d48ac4..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-403292.js
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --expose-natives-as=builtins --expose-gc
-
-var SetIterator = builtins.ImportNow("SetIterator");
-var MapIterator = builtins.ImportNow("MapIterator");
-var __v_7 = [];
-var __v_8 = {};
-var __v_10 = {};
-var __v_11 = this;
-var __v_12 = {};
-var __v_13 = {};
-var __v_14 = "";
-var __v_15 = {};
-try {
-__v_1 = {x:0};
-%OptimizeFunctionOnNextCall(__f_1);
-assertEquals("good", __f_1());
-delete __v_1.x;
-assertEquals("good", __f_1());
-} catch(e) { print("Caught: " + e); }
-try {
-__v_3 = new Set();
-__v_5 = new SetIterator(__v_3, -12);
-__v_4 = new Map();
-__v_6 = new MapIterator(__v_4, 2);
-__f_3(Array);
-} catch(e) { print("Caught: " + e); }
-function __f_4(__v_8, filter) {
- function __f_6(v) {
- for (var __v_4 in v) {
- for (var __v_4 in v) {}
- }
- %OptimizeFunctionOnNextCall(filter);
- return filter(v);
- }
- var __v_7 = eval(__v_8);
- gc();
- return __f_6(__v_7);
-}
-function __f_5(__v_6) {
- var __v_5 = new Array(__v_6);
- for (var __v_4 = 0; __v_4 < __v_6; __v_4++) __v_5.push('{}');
- return __v_5;
-}
-try {
-try {
- __v_8.test("\x80");
- assertUnreachable();
-} catch (e) {
-}
-gc();
-} catch(e) { print("Caught: " + e); }
diff --git a/deps/v8/test/mjsunit/regress/regress-417709a.js b/deps/v8/test/mjsunit/regress/regress-417709a.js
deleted file mode 100644
index 5500be2cf0..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-417709a.js
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-object-observe
-// Flags: --stack-size=100
-
-var a = [];
-
-Object.observe(a, function() {});
-
-function f(a, x) {
- a.length = x;
- f(a, x + 1);
-}
-
-assertThrows(function() { f(a, 1); }, RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-417709b.js b/deps/v8/test/mjsunit/regress/regress-417709b.js
deleted file mode 100644
index 4d9572e7d7..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-417709b.js
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-object-observe --stack-size=100
-
-var a = [];
-
-Array.observe(a, function() {});
-
-function f(a, x) {
- a.length = x;
- f(a, x + 1);
-}
-
-assertThrows(function() { f(a, 1); }, RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-4659.js b/deps/v8/test/mjsunit/regress/regress-4659.js
index ff436bec1b..8992bb89fe 100644
--- a/deps/v8/test/mjsunit/regress/regress-4659.js
+++ b/deps/v8/test/mjsunit/regress/regress-4659.js
@@ -1,8 +1,6 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// Flags: --harmony-function-name
var obj = {
get longerName(){
diff --git a/deps/v8/test/mjsunit/regress/regress-4665-2.js b/deps/v8/test/mjsunit/regress/regress-4665-2.js
deleted file mode 100644
index b94301eea8..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-4665-2.js
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-species
-
-// First test case
-
-function FirstBuffer () {}
-FirstBuffer.prototype.__proto__ = Uint8Array.prototype
-FirstBuffer.__proto__ = Uint8Array
-
-var buf = new Uint8Array(10)
-buf.__proto__ = FirstBuffer.prototype
-
-var buf2 = buf.subarray(2)
-assertEquals(8, buf2.length);
-
-// Second test case
-
-function SecondBuffer (arg) {
- var arr = new Uint8Array(arg)
- arr.__proto__ = SecondBuffer.prototype
- return arr
-}
-SecondBuffer.prototype.__proto__ = Uint8Array.prototype
-SecondBuffer.__proto__ = Uint8Array
-
-var buf3 = new SecondBuffer(10)
-
-var buf4 = buf3.subarray(2)
-
-assertEquals(8, buf4.length);
diff --git a/deps/v8/test/mjsunit/regress/regress-4665.js b/deps/v8/test/mjsunit/regress/regress-4665.js
index 9d7307acc7..a75d68f105 100644
--- a/deps/v8/test/mjsunit/regress/regress-4665.js
+++ b/deps/v8/test/mjsunit/regress/regress-4665.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --noharmony-species
-
// First test case
function FirstBuffer () {}
diff --git a/deps/v8/test/mjsunit/regress/regress-4693.js b/deps/v8/test/mjsunit/regress/regress-4693.js
index 13b4e2b68e..2c31cd95e6 100644
--- a/deps/v8/test/mjsunit/regress/regress-4693.js
+++ b/deps/v8/test/mjsunit/regress/regress-4693.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-sloppy-function
// In sloppy mode we allow function redeclarations within blocks for webcompat.
(function() {
diff --git a/deps/v8/test/mjsunit/regress/regress-4703.js b/deps/v8/test/mjsunit/regress/regress-4703.js
new file mode 100644
index 0000000000..dad8a97874
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4703.js
@@ -0,0 +1,30 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ var all_scopes = exec_state.frame().allScopes();
+ assertEquals([ debug.ScopeType.Block,
+ debug.ScopeType.Local,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global ],
+ all_scopes.map(scope => scope.scopeType()));
+ } catch (e) {
+ exception = e;
+ }
+}
+
+debug.Debug.setListener(listener);
+
+(function(arg, ...rest) {
+ var one = 1;
+ function inner() {
+ one;
+ arg;
+ }
+ debugger;
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-4815.js b/deps/v8/test/mjsunit/regress/regress-4815.js
new file mode 100644
index 0000000000..68764865f7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4815.js
@@ -0,0 +1,52 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var thrower = { [Symbol.toPrimitive]: () => FAIL };
+
+// Tests that a native conversion function is included in the
+// stack trace.
+function testTraceNativeConversion(nativeFunc) {
+ var nativeFuncName = nativeFunc.name;
+ try {
+ nativeFunc(thrower);
+ assertUnreachable(nativeFuncName);
+ } catch (e) {
+ assertTrue(e.stack.indexOf(nativeFuncName) >= 0, nativeFuncName);
+ }
+}
+
+// C++ builtins.
+testTraceNativeConversion(Math.acos);
+testTraceNativeConversion(Math.asin);
+testTraceNativeConversion(Math.fround);
+testTraceNativeConversion(Math.imul);
+
+
+function testBuiltinInStackTrace(script, expectedString) {
+ try {
+ eval(script);
+ assertUnreachable(expectedString);
+ } catch (e) {
+ assertTrue(e.stack.indexOf(expectedString) >= 0, expectedString);
+ }
+}
+
+// C++ builtins.
+testBuiltinInStackTrace("Boolean.prototype.toString.call(thrower);",
+ "at Object.toString");
+
+// Constructor builtins.
+testBuiltinInStackTrace("new Date(thrower);", "at new Date");
+
+// Ensure we correctly pick up the receiver's string tag.
+testBuiltinInStackTrace("Math.acos(thrower);", "at Math.acos");
+testBuiltinInStackTrace("Math.asin(thrower);", "at Math.asin");
+testBuiltinInStackTrace("Math.fround(thrower);", "at Math.fround");
+testBuiltinInStackTrace("Math.imul(thrower);", "at Math.imul");
+
+// As above, but function passed as an argument and then called.
+testBuiltinInStackTrace("((f, x) => f(x))(Math.acos, thrower);", "at acos");
+testBuiltinInStackTrace("((f, x) => f(x))(Math.asin, thrower);", "at asin");
+testBuiltinInStackTrace("((f, x) => f(x))(Math.fround, thrower);", "at fround");
+testBuiltinInStackTrace("((f, x) => f(x))(Math.imul, thrower);", "at imul");
diff --git a/deps/v8/test/mjsunit/regress/regress-4908.js b/deps/v8/test/mjsunit/regress/regress-4908.js
new file mode 100644
index 0000000000..ec618b32c5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4908.js
@@ -0,0 +1,7 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --always-opt --no-lazy
+
+(function() { ((s = 17, y = s) => s)() })();
diff --git a/deps/v8/test/mjsunit/regress/regress-4945.js b/deps/v8/test/mjsunit/regress/regress-4945.js
new file mode 100644
index 0000000000..8e595e6fde
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4945.js
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function* g(o) {
+ yield 'x' in o;
+}
+
+assertTrue(g({x: 1}).next().value);
+assertFalse(g({}).next().value);
diff --git a/deps/v8/test/mjsunit/regress/regress-4964.js b/deps/v8/test/mjsunit/regress/regress-4964.js
new file mode 100644
index 0000000000..d834708667
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4964.js
@@ -0,0 +1,22 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Neutered source
+var ab = new ArrayBuffer(10);
+ab.constructor = { get [Symbol.species]() { %ArrayBufferNeuter(ab); return ArrayBuffer; } };
+assertThrows(() => ab.slice(0), TypeError);
+
+// Neutered target
+class NeuteredArrayBuffer extends ArrayBuffer {
+ constructor(...args) {
+ super(...args);
+ %ArrayBufferNeuter(this);
+ }
+}
+
+var ab2 = new ArrayBuffer(10);
+ab2.constructor = NeuteredArrayBuffer;
+assertThrows(() => ab2.slice(0), TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-4967.js b/deps/v8/test/mjsunit/regress/regress-4967.js
new file mode 100644
index 0000000000..9b36405cf4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4967.js
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows(() => {
+ new class extends Object {
+ constructor() { (() => delete super[super()])(); }
+ }
+}, ReferenceError);
diff --git a/deps/v8/test/mjsunit/regress/regress-4971.js b/deps/v8/test/mjsunit/regress/regress-4971.js
new file mode 100644
index 0000000000..041f6c2a57
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4971.js
@@ -0,0 +1,41 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function TestDeoptInNamedSuperGetter() {
+ class C { m() { return 23 } }
+ class D extends C { f() { return super.boom() } }
+
+ var should_deoptimize_caller = false;
+ Object.defineProperty(C.prototype, "boom", { get: function() {
+ if (should_deoptimize_caller) %DeoptimizeFunction(D.prototype.f);
+ return this.m
+ }})
+
+ assertEquals(23, new D().f());
+ assertEquals(23, new D().f());
+ %OptimizeFunctionOnNextCall(D.prototype.f);
+ assertEquals(23, new D().f());
+ should_deoptimize_caller = true;
+ assertEquals(23, new D().f());
+})();
+
+(function TestDeoptInKeyedSuperGetter() {
+ class C { m() { return 23 } }
+ class D extends C { f(name) { return super[name]() } }
+
+ var should_deoptimize_caller = false;
+ Object.defineProperty(C.prototype, "boom", { get: function() {
+ if (should_deoptimize_caller) %DeoptimizeFunction(D.prototype.f);
+ return this.m
+ }})
+
+ assertEquals(23, new D().f("boom"));
+ assertEquals(23, new D().f("boom"));
+ %OptimizeFunctionOnNextCall(D.prototype.f);
+ assertEquals(23, new D().f("boom"));
+ should_deoptimize_caller = true;
+ assertEquals(23, new D().f("boom"));
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-5004.js b/deps/v8/test/mjsunit/regress/regress-5004.js
new file mode 100644
index 0000000000..234f5d4eb6
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5004.js
@@ -0,0 +1,27 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function assertAsync(b, s) {
+ if (!b) {
+ %AbortJS(" FAILED!")
+ }
+}
+
+class P extends Promise {
+ constructor() {
+ super(...arguments)
+ return new Proxy(this, {
+ get: (_, key) => {
+ return key == 'then' ?
+ this.then.bind(this) :
+ this.constructor.resolve(20)
+ }
+ })
+ }
+}
+
+let p = P.resolve(10)
+p.key.then(v => assertAsync(v === 20));
diff --git a/deps/v8/test/mjsunit/regress/regress-5018.js b/deps/v8/test/mjsunit/regress/regress-5018.js
new file mode 100644
index 0000000000..22025dc2d8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5018.js
@@ -0,0 +1,29 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var dv = new DataView(new ArrayBuffer(4), 2);
+
+function getByteLength(a) {
+ return a.byteLength;
+}
+
+assertEquals(2, getByteLength(dv));
+assertEquals(2, getByteLength(dv));
+
+Object.defineProperty(dv.__proto__, 'byteLength', {value: 42});
+
+assertEquals(42, dv.byteLength);
+assertEquals(42, getByteLength(dv));
+
+function getByteOffset(a) {
+ return a.byteOffset;
+}
+
+assertEquals(2, getByteOffset(dv));
+assertEquals(2, getByteOffset(dv));
+
+Object.defineProperty(dv.__proto__, 'byteOffset', {value: 42});
+
+assertEquals(42, dv.byteOffset);
+assertEquals(42, getByteOffset(dv));
diff --git a/deps/v8/test/mjsunit/regress/regress-5036.js b/deps/v8/test/mjsunit/regress/regress-5036.js
index 036edd949f..77bd242490 100644
--- a/deps/v8/test/mjsunit/regress/regress-5036.js
+++ b/deps/v8/test/mjsunit/regress/regress-5036.js
@@ -2,6 +2,4 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-unicode-regexps
-
assertEquals(["1\u212a"], /\d\w/ui.exec("1\u212a"));
diff --git a/deps/v8/test/mjsunit/regress/regress-5071.js b/deps/v8/test/mjsunit/regress/regress-5071.js
new file mode 100644
index 0000000000..41c1250031
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5071.js
@@ -0,0 +1,26 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+var Debug = debug.Debug;
+
+function listener(event, exec_state, event_data, data) {
+ assertEquals(2, exec_state.frameCount());
+ assertEquals("a", exec_state.frame(0).localName(0));
+ assertEquals("1", exec_state.frame(0).localValue(0).value());
+ assertEquals(1, exec_state.frame(0).localCount());
+}
+
+Debug.setListener(listener);
+
+function f() {
+ var a = 1;
+ {
+ let b = 2;
+ debugger;
+ }
+}
+
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-5085.js b/deps/v8/test/mjsunit/regress/regress-5085.js
new file mode 100644
index 0000000000..0ed034dc2d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5085.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(x) {
+ return x instanceof Proxy;
+}
+
+assertFalse(foo({}));
+assertFalse(foo({}));
+%OptimizeFunctionOnNextCall(foo);
+assertFalse(foo({}));
diff --git a/deps/v8/test/mjsunit/regress/regress-5106.js b/deps/v8/test/mjsunit/regress/regress-5106.js
new file mode 100644
index 0000000000..52d550a878
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5106.js
@@ -0,0 +1,29 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function* g1() {
+ try {
+ throw {};
+ } catch ({a = class extends (yield) {}}) {
+ }
+}
+g1().next(); // crashes without fix
+
+function* g2() {
+ let x = function(){};
+ try {
+ throw {};
+ } catch ({b = class extends x {}}) {
+ }
+}
+g2().next(); // crashes without fix
+
+function* g3() {
+ let x = 42;
+ try {
+ throw {};
+ } catch ({c = (function() { return x })()}) {
+ }
+}
+g3().next(); // throws a ReferenceError without fix
diff --git a/deps/v8/test/mjsunit/regress/regress-5164.js b/deps/v8/test/mjsunit/regress/regress-5164.js
new file mode 100644
index 0000000000..5c13937821
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5164.js
@@ -0,0 +1,44 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --expose-debug-as debug
+
+var failure = null;
+var args;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != debug.Debug.DebugEvent.Break) return;
+ try {
+ args = exec_state.frame(0).evaluate('arguments').value();
+ } catch (e) {
+ failure = e;
+ }
+}
+
+debug.Debug.setListener(listener);
+
+function* gen(a, b) {
+ debugger;
+ yield a;
+ yield b;
+}
+
+var foo = gen(1, 2);
+
+foo.next()
+assertEquals(2, args.length);
+assertEquals(undefined, args[0]);
+assertEquals(undefined, args[1]);
+
+foo.next()
+assertEquals(2, args.length);
+assertEquals(undefined, args[0]);
+assertEquals(undefined, args[1]);
+
+foo.next()
+assertEquals(2, args.length);
+assertEquals(undefined, args[0]);
+assertEquals(undefined, args[1]);
+
+assertNull(failure);
diff --git a/deps/v8/test/mjsunit/regress/regress-5173.js b/deps/v8/test/mjsunit/regress/regress-5173.js
new file mode 100644
index 0000000000..74f58cc473
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5173.js
@@ -0,0 +1,51 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var thrower = { [Symbol.toPrimitive]: () => FAIL };
+
+// Tests that a native conversion function is included in the
+// stack trace.
+function testTraceNativeConversion(nativeFunc) {
+ var nativeFuncName = nativeFunc.name;
+ try {
+ nativeFunc(thrower);
+ assertUnreachable(nativeFuncName);
+ } catch (e) {
+ assertTrue(e.stack.indexOf(nativeFuncName) >= 0, nativeFuncName);
+ }
+}
+
+testTraceNativeConversion(Math.max);
+testTraceNativeConversion(Math.min);
+
+function testBuiltinInStackTrace(script, expectedString) {
+ try {
+ eval(script);
+ assertUnreachable(expectedString);
+ } catch (e) {
+ assertTrue(e.stack.indexOf(expectedString) >= 0, expectedString);
+ }
+}
+
+testBuiltinInStackTrace("Date.prototype.getDate.call('')", "at String.getDate");
+testBuiltinInStackTrace("Date.prototype.getUTCDate.call('')",
+ "at String.getUTCDate");
+testBuiltinInStackTrace("Date.prototype.getTime.call('')", "at String.getTime");
+
+testBuiltinInStackTrace("Number(thrower);", "at Number");
+testBuiltinInStackTrace("new Number(thrower);", "at new Number");
+testBuiltinInStackTrace("String(thrower);", "at String");
+testBuiltinInStackTrace("new String(thrower);", "at new String");
+
+// Ensure we correctly pick up the receiver's string tag.
+testBuiltinInStackTrace("Math.acos(thrower);", "at Math.acos");
+testBuiltinInStackTrace("Math.asin(thrower);", "at Math.asin");
+testBuiltinInStackTrace("Math.fround(thrower);", "at Math.fround");
+testBuiltinInStackTrace("Math.imul(thrower);", "at Math.imul");
+
+// As above, but function passed as an argument and then called.
+testBuiltinInStackTrace("((f, x) => f(x))(Math.acos, thrower);", "at acos");
+testBuiltinInStackTrace("((f, x) => f(x))(Math.asin, thrower);", "at asin");
+testBuiltinInStackTrace("((f, x) => f(x))(Math.fround, thrower);", "at fround");
+testBuiltinInStackTrace("((f, x) => f(x))(Math.imul, thrower);", "at imul");
diff --git a/deps/v8/test/mjsunit/regress/regress-5174.js b/deps/v8/test/mjsunit/regress/regress-5174.js
new file mode 100644
index 0000000000..390d24e682
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5174.js
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// // Use of this source code is governed by a BSD-style license that can be
+// // found in the LICENSE file.
+
+assertEquals([], Object.keys(new Proxy([], {})));
+assertEquals([], Object.keys(new Proxy(/regex/, {})));
diff --git a/deps/v8/test/mjsunit/regress/regress-5178.js b/deps/v8/test/mjsunit/regress/regress-5178.js
new file mode 100644
index 0000000000..cf10ae7187
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5178.js
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows(() => {
+ try { throw {} } catch({a=b, b}) { a+b }
+}, ReferenceError);
+
+try { throw {a: 42} } catch({a, b=a}) { assertEquals(42, b) };
diff --git a/deps/v8/test/mjsunit/regress/regress-5181.js b/deps/v8/test/mjsunit/regress/regress-5181.js
new file mode 100644
index 0000000000..d8dfc4a0b6
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5181.js
@@ -0,0 +1,11 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var target = Object.create(null);
+var proxy = new Proxy(target, {
+ ownKeys: function() {
+ return ['a'];
+ }
+});
+for (var key in proxy) ;
diff --git a/deps/v8/test/mjsunit/regress/regress-520029.js b/deps/v8/test/mjsunit/regress/regress-520029.js
index 299dd75017..9a1d200307 100644
--- a/deps/v8/test/mjsunit/regress/regress-520029.js
+++ b/deps/v8/test/mjsunit/regress/regress-520029.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-sloppy-let --harmony-sloppy
-
// Test that hoisting a function out of a lexical scope does not
// lead to a parsing error
diff --git a/deps/v8/test/mjsunit/regress/regress-5205.js b/deps/v8/test/mjsunit/regress/regress-5205.js
new file mode 100644
index 0000000000..0d88f45053
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5205.js
@@ -0,0 +1,37 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --gc-global
+
+(function TestGCDuringToObjectForWith() {
+ function f(o) {
+ if (o == 'warmup') { return g() }
+ with (o) { return x }
+ }
+ function g() {
+ // Only a marker function serving as weak embedded object.
+ }
+
+ // Warm up 'f' so that weak embedded object 'g' will be used.
+ f('warmup');
+ f('warmup');
+ g = null;
+
+ // Test that 'f' behaves correctly unoptimized.
+ assertEquals(23, f({ x:23 }));
+ assertEquals(42, f({ x:42 }));
+
+ // Test that 'f' behaves correctly optimized.
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(65, f({ x:65 }));
+
+ // Test that 'f' behaves correctly on numbers.
+ Number.prototype.x = 99;
+ assertEquals(99, f(0));
+
+ // Make sure the next [[ToObject]] allocation triggers GC. This in turn will
+ // deoptimize 'f' because it has the weak embedded object 'g' in the code.
+ %SetAllocationTimeout(1000, 1, false);
+ assertEquals(99, f(0));
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-5207.js b/deps/v8/test/mjsunit/regress/regress-5207.js
new file mode 100644
index 0000000000..1472b541b5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5207.js
@@ -0,0 +1,30 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+'use strict';
+var Debug = debug.Debug;
+var exception = null;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ var scopes = exec_state.frame(0).allScopes();
+ assertEquals(debug.ScopeType.Eval, scopes[0].scopeType());
+ assertEquals(1, scopes[0].scopeObject().value().a);
+ assertEquals(debug.ScopeType.Script, scopes[1].scopeType());
+ assertEquals(undefined, scopes[1].scopeObject().value().a);
+ assertEquals(debug.ScopeType.Global, scopes[2].scopeType());
+ assertEquals(undefined, scopes[2].scopeObject().value().a);
+ } catch (e) {
+ exception = e;
+ }
+}
+
+Debug.setListener(listener);
+// Eval inherits strict mode.
+eval("var a = 1; debugger;");
+Debug.setListener(null);
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/regress/regress-5213.js b/deps/v8/test/mjsunit/regress/regress-5213.js
new file mode 100644
index 0000000000..831e1346d8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5213.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See http://code.google.com/p/v8/issues/detail?id=5213
+
+assertEquals(0, Math.pow(2,-2147483648));
+assertEquals(0, Math.pow(2,-9223372036854775808));
diff --git a/deps/v8/test/mjsunit/regress/regress-5214.js b/deps/v8/test/mjsunit/regress/regress-5214.js
new file mode 100644
index 0000000000..f90a590253
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5214.js
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See http://code.google.com/p/v8/issues/detail?id=5214
+
+
+assertEquals(Infinity, Math.pow(2, 0x80000000));
+assertEquals(Infinity, Math.pow(2, 0xc0000000));
+assertEquals(0, Math.pow(2, -0x80000000));
diff --git a/deps/v8/test/mjsunit/harmony/regexp-no-change-exec.js b/deps/v8/test/mjsunit/regress/regress-5216.js
index 30b5050945..9097310fc2 100644
--- a/deps/v8/test/mjsunit/harmony/regexp-no-change-exec.js
+++ b/deps/v8/test/mjsunit/regress/regress-5216.js
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --no-harmony-regexp-exec
+// Ensure that custom error constructors don't show up in stack traces.
class MyError extends Error { }
-RegExp.prototype.exec = () => { throw new MyError() };
-assertEquals(null, "foo".match(/bar/));
+assertFalse(new MyError().stack.includes("at MyError"));
diff --git a/deps/v8/test/mjsunit/regress/regress-5245.js b/deps/v8/test/mjsunit/regress/regress-5245.js
new file mode 100644
index 0000000000..9c4f6e7dbd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5245.js
@@ -0,0 +1,24 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+
+// After captureStackTrace.
+
+var a = {};
+Error.captureStackTrace(a, Error);
+a.stack = 1; // Should not throw, stack should be writable.
+
+// After the standard Error constructor.
+
+var b = new Error();
+b.stack = 1; // Should not throw, stack should be writable.
+b.stack = 1; // Still writable.
+
+// After read access to stack.
+
+var c = new Error();
+var old_stack = c.stack;
+c.stack = 1; // Should not throw, stack should be writable.
+c.stack = 1; // Still writable.
diff --git a/deps/v8/test/mjsunit/regress/regress-5252.js b/deps/v8/test/mjsunit/regress/regress-5252.js
new file mode 100644
index 0000000000..682d3193ea
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5252.js
@@ -0,0 +1,31 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --ignition --ignition-osr --turbo-from-bytecode
+
+(function TestNonLoopyLoop() {
+ function f() {
+ do {
+ %OptimizeOsr();
+ return 23;
+ } while(false)
+ }
+ assertEquals(23, f());
+ assertEquals(23, f());
+})();
+
+(function TestNonLoopyGenerator() {
+ function* g() {
+ do {
+ %OptimizeOsr();
+ yield 23;
+ yield 42;
+ } while(false)
+ return 999;
+ }
+ var gen = g();
+ assertEquals({ value:23, done:false }, gen.next());
+ assertEquals({ value:42, done:false }, gen.next());
+ assertEquals({ value:999, done:true }, gen.next());
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-5262.js b/deps/v8/test/mjsunit/regress/regress-5262.js
new file mode 100644
index 0000000000..394bb49ca5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5262.js
@@ -0,0 +1,25 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --ignition --ignition-osr --turbo-from-bytecode --allow-natives-syntax
+
+function g() { return 23 }
+function h() { return 42 }
+function boom(o) { o.g = h }
+function f(osr_and_recurse) {
+ if (osr_and_recurse) {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) %OptimizeOsr();
+ }
+ %OptimizeFunctionOnNextCall(f);
+ f(false); // Trigger tier-up due to recursive call.
+ boom(this); // Causes a deopt due to below dependency.
+ var x = g(); // Install dependency on the {g} function.
+ return x;
+ }
+ return 65;
+}
+assertEquals(65, f(false));
+assertEquals(65, f(false));
+assertEquals(42, f(true));
diff --git a/deps/v8/test/mjsunit/regress/regress-5275-1.js b/deps/v8/test/mjsunit/regress/regress-5275-1.js
new file mode 100644
index 0000000000..542bae0602
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5275-1.js
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(x) {
+ var a = new Array(1);
+ a[0] = x;
+ return a;
+}
+
+assertEquals([1], foo(1));
+assertEquals([1], foo(1));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals([1], foo(1));
+Array.prototype.__defineSetter__("0", function() {});
+assertEquals([undefined], foo(1));
diff --git a/deps/v8/test/mjsunit/regress/regress-5275-2.js b/deps/v8/test/mjsunit/regress/regress-5275-2.js
new file mode 100644
index 0000000000..2da422de97
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5275-2.js
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(x) {
+ var a = new Array(1);
+ a[0] = x;
+ return a;
+}
+
+assertEquals([1], foo(1));
+assertEquals([1], foo(1));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals([1], foo(1));
+Object.prototype.__defineSetter__("0", function() {});
+assertEquals([undefined], foo(1));
diff --git a/deps/v8/test/mjsunit/regress/regress-5279.js b/deps/v8/test/mjsunit/regress/regress-5279.js
new file mode 100644
index 0000000000..847f5df054
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5279.js
@@ -0,0 +1,16 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-debug-as debug
+
+var Debug = debug.Debug;
+
+Debug.setListener(() => undefined);
+
+const myObj = {};
+
+for (let i = 0; i < 10; i++) {
+ %OptimizeOsr();
+ %ScheduleBreak();
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-5286.js b/deps/v8/test/mjsunit/regress/regress-5286.js
new file mode 100644
index 0000000000..210d986a66
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5286.js
@@ -0,0 +1,41 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function() {
+ function foo(x, y) { return x % y; }
+
+ assertEquals(0, foo(2, 2));
+ assertEquals(0, foo(4, 4));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(-0, foo(-8, 8));
+})();
+
+(function() {
+ function foo(x, y) { return x % y; }
+
+ assertEquals(0, foo(1, 1));
+ assertEquals(0, foo(2, 2));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(-0, foo(-3, 3));
+})();
+
+(function() {
+ function foo(x, y) { return x % y; }
+
+ assertEquals(0, foo(1, 1));
+ assertEquals(0, foo(2, 2));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(-0, foo(-2147483648, -1));
+})();
+
+(function() {
+ function foo(x, y) { return x % y; }
+
+ assertEquals(0, foo(1, 1));
+ assertEquals(0, foo(2, 2));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(-0, foo(-2147483648, -2147483648));
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-5342.js b/deps/v8/test/mjsunit/regress/regress-5342.js
new file mode 100644
index 0000000000..8cdd808622
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5342.js
@@ -0,0 +1,7 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var o = {}
+Error.captureStackTrace(o);
+assertEquals(-1, o.stack.indexOf("captureStackTrace"));
diff --git a/deps/v8/test/mjsunit/regress/regress-536751.js b/deps/v8/test/mjsunit/regress/regress-536751.js
index 0707e008ea..a63fae3957 100644
--- a/deps/v8/test/mjsunit/regress/regress-536751.js
+++ b/deps/v8/test/mjsunit/regress/regress-536751.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-sloppy --harmony-sloppy-function --harmony-sloppy-let
// Flags: --no-harmony-restrictive-declarations
// At some point, this code led to DCHECK errors in debug mode
diff --git a/deps/v8/test/mjsunit/regress/regress-542099.js b/deps/v8/test/mjsunit/regress/regress-542099.js
index eef49538cc..6345fd468a 100644
--- a/deps/v8/test/mjsunit/regress/regress-542099.js
+++ b/deps/v8/test/mjsunit/regress/regress-542099.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-sloppy --harmony-sloppy-function
// Flags: --no-harmony-restrictive-declarations
// Previously, this caused a CHECK fail in debug mode
diff --git a/deps/v8/test/mjsunit/regress/regress-542100.js b/deps/v8/test/mjsunit/regress/regress-542100.js
index 70fb5dc147..c16e6284fa 100644
--- a/deps/v8/test/mjsunit/regress/regress-542100.js
+++ b/deps/v8/test/mjsunit/regress/regress-542100.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-sloppy --harmony-sloppy-function
// Flags: --no-harmony-restrictive-declarations
(function() {
diff --git a/deps/v8/test/mjsunit/regress/regress-544991.js b/deps/v8/test/mjsunit/regress/regress-544991.js
index 911d8acc89..a9fd809a3b 100644
--- a/deps/v8/test/mjsunit/regress/regress-544991.js
+++ b/deps/v8/test/mjsunit/regress/regress-544991.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-species
-
'use strict';
var typedArray = new Int8Array(1);
diff --git a/deps/v8/test/mjsunit/regress/regress-575364.js b/deps/v8/test/mjsunit/regress/regress-575364.js
index c0652058fa..8671aec06b 100644
--- a/deps/v8/test/mjsunit/regress/regress-575364.js
+++ b/deps/v8/test/mjsunit/regress/regress-575364.js
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm
+// Flags: --expose-wasm --validate-asm --allow-natives-syntax
function f() {
"use asm";
}
assertFalse(Wasm == undefined);
-assertThrows(function() { Wasm.instantiateModuleFromAsm(f.toString()); });
+assertTrue(%IsNotAsmWasmCode(f));
diff --git a/deps/v8/test/mjsunit/regress/regress-585041.js b/deps/v8/test/mjsunit/regress/regress-585041.js
new file mode 100644
index 0000000000..c072ed2a15
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-585041.js
@@ -0,0 +1,21 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(arr, i) {
+ arr[i] = 50;
+}
+
+function boom(dummy) {
+ var arr = new Array(10);
+ f(arr, 10);
+ if (dummy) {
+ f(arr, -2147483648);
+ }
+}
+
+boom(false);
+%OptimizeFunctionOnNextCall(boom);
+boom(false);
diff --git a/deps/v8/test/mjsunit/regress/regress-592352.js b/deps/v8/test/mjsunit/regress/regress-592352.js
new file mode 100644
index 0000000000..7947fdba2c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-592352.js
@@ -0,0 +1,20 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --validate-asm
+
+function __f_76() {
+ "use asm";
+ function __f_72() {
+ %OptimizeFunctionOnNextCall();
+ }
+ return {__f_72:__f_72};
+}
+
+try {
+ assertTrue(%IsAsmWasmCode(__f_76));
+ assertTrue(false);
+} catch (e) {
+ print("Caught: " + e);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-599068-func-bindings.js b/deps/v8/test/mjsunit/regress/regress-599068-func-bindings.js
new file mode 100644
index 0000000000..887c00099a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-599068-func-bindings.js
@@ -0,0 +1,45 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Tests function bindings are correctly handled in ignition.
+(function f() {
+ function assignSloppy() {
+ f = 0;
+ }
+ assertDoesNotThrow(assignSloppy);
+
+ function assignStrict() {
+ 'use strict';
+ f = 0;
+ }
+ assertThrows(assignStrict, TypeError);
+
+ function assignStrictLookup() {
+ eval("'use strict'; f = 1;");
+ }
+ assertThrows(assignStrictLookup, TypeError);
+})();
+
+// Tests for compound assignments which are handled differently
+// in crankshaft.
+(function f() {
+ function assignSloppy() {
+ f += "x";
+ }
+ assertDoesNotThrow(assignSloppy);
+ assertDoesNotThrow(assignSloppy);
+ %OptimizeFunctionOnNextCall(assignSloppy);
+ assertDoesNotThrow(assignSloppy);
+
+ function assignStrict() {
+ 'use strict';
+ f += "x";
+ }
+ assertThrows(assignStrict, TypeError);
+ assertThrows(assignStrict, TypeError);
+ %OptimizeFunctionOnNextCall(assignStrict);
+ assertThrows(assignStrict, TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-599717.js b/deps/v8/test/mjsunit/regress/regress-599717.js
new file mode 100644
index 0000000000..51831860e9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-599717.js
@@ -0,0 +1,26 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --validate-asm --allow-natives-syntax
+
+function __f_61(stdlib, foreign, buffer) {
+ "use asm";
+ var __v_14 = new stdlib.Float64Array(buffer);
+ function __f_74() {
+ var __v_35 = 6.0;
+ __v_14[2] = __v_35 + 1.0;
+ }
+ return {__f_74: __f_74};
+}
+var ok = false;
+try {
+ var __v_12 = new ArrayBuffer(2147483648);
+ ok = true;
+} catch (e) {
+ // Can happen on 32 bit systems.
+}
+if (ok) {
+ var module = __f_61(this, null, __v_12);
+ assertTrue(%IsAsmWasmCode(__f_61));
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-599719.js b/deps/v8/test/mjsunit/regress/regress-599719.js
new file mode 100644
index 0000000000..cdd30991b0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-599719.js
@@ -0,0 +1,25 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --validate-asm
+
+function __f_7() {
+ %DeoptimizeFunction(__f_5);
+}
+function __f_8(global, env) {
+ "use asm";
+ var __f_7 = env.__f_7;
+ function __f_9(i4, i5) {
+ i4 = i4 | 0;
+ i5 = i5 | 0;
+ __f_7();
+ }
+ return {'__f_9': __f_9}
+}
+function __f_5() {
+ var __v_5 = __f_8({}, {'__f_7': __f_7});
+ assertTrue(%IsAsmWasmCode(__f_8));
+ __v_5.__f_9(0, 0, 0);
+}
+__f_5();
diff --git a/deps/v8/test/mjsunit/regress/regress-599825.js b/deps/v8/test/mjsunit/regress/regress-599825.js
index 83075ee9ab..a878eb49c9 100644
--- a/deps/v8/test/mjsunit/regress/regress-599825.js
+++ b/deps/v8/test/mjsunit/regress/regress-599825.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm
+// Flags: --validate-asm --allow-natives-syntax
function __f_97(stdlib, buffer) {
"use asm";
@@ -12,6 +12,5 @@ function __f_97(stdlib, buffer) {
__v_30[__v_27 >> __v_2] = ((__v_30[-1073741825]|-10) + 2) | 0;
}
}
-assertThrows(function() {
- var module = Wasm.instantiateModuleFromAsm( __f_97.toString());
-});
+var module = __f_97(this);
+assertTrue(%IsNotAsmWasmCode(__f_97));
diff --git a/deps/v8/test/mjsunit/regress/regress-605470.js b/deps/v8/test/mjsunit/regress/regress-605470.js
new file mode 100644
index 0000000000..722e8ae130
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-605470.js
@@ -0,0 +1,17 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --ignition
+
+function function_with_m_args(m) {
+ var source = '(function f() { return; })(';
+ for (var arg = 0; arg < m ; arg++) {
+ if (arg != 0) source += ',';
+ source += arg;
+ }
+ source += ')';
+ return eval(source);
+}
+
+function_with_m_args(0x7FFF);
diff --git a/deps/v8/test/mjsunit/regress/regress-606021.js b/deps/v8/test/mjsunit/regress/regress-606021.js
new file mode 100644
index 0000000000..54b283efc4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-606021.js
@@ -0,0 +1,32 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() {
+ return function(c) {
+ var double_var = [3.0, 3.5][0];
+ var literal = c ? [1, double_var] : [double_var, 3.5];
+ return literal[0];
+ };
+}
+
+var f1 = foo();
+var f2 = foo();
+
+// Both closures point to full code.
+f1(false);
+f2(false);
+
+// Optimize f1, but don't initialize the [1, double_var] literal.
+%OptimizeFunctionOnNextCall(f1);
+f1(false);
+
+// Initialize the [1, double_var] literal, and transition the boilerplate to
+// double.
+f2(true);
+
+// Trick crankshaft into writing double_var at the wrong position.
+var l = f1(true);
+assertEquals(1, l);
diff --git a/deps/v8/test/mjsunit/regress/regress-608630.js b/deps/v8/test/mjsunit/regress/regress-608630.js
new file mode 100644
index 0000000000..58a95af7c3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-608630.js
@@ -0,0 +1,71 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --validate-asm --allow-natives-syntax
+
+var __v_5 = {};
+var __v_35 = {};
+var __v_44 = {};
+var __v_43 = {};
+
+try {
+__v_1 = 1;
+__v_2 = {
+ get: function() { return function() {} },
+ has() { return true },
+ getOwnPropertyDescriptor: function() {
+ if (__v_1-- == 0) throw "please die";
+ return {value: function() {}, configurable: true};
+ }
+};
+__v_3 = new Proxy({}, __v_2);
+__v_30 = Object.create(__v_35);
+with (__v_5) { f() }
+} catch(e) { print("Caught: " + e); }
+
+function __f_1(asmfunc, expect) {
+ var __v_1 = asmfunc.toString();
+ var __v_2 = __v_1.replace(new RegExp("use asm"), "");
+ var __v_39 = {Math: Math};
+ var __v_4 = eval("(" + __v_2 + ")")(__v_3);
+ print("Testing " + asmfunc.name + " (js)...");
+ __v_44.valueOf = __v_43;
+ expect(__v_4);
+ print("Testing " + asmfunc.name + " (asm.js)...");
+ var __v_5 = asmfunc(__v_3);
+ expect(__v_5);
+ print("Testing " + asmfunc.name + " (wasm)...");
+ var module_func = eval(__v_1);
+ var __v_6 = module_func({}, __v_3);
+ assertTrue(%IsAsmWasmCode(module_func));
+ expect(__v_6);
+}
+function __f_2() {
+ "use asm";
+ function __f_3() { return 0; }
+ function __f_4() { return 1; }
+ function __f_5() { return 4; }
+ function __f_6() { return 64; }
+ function __f_7() { return 137; }
+ function __f_8() { return 128; }
+ function __f_9() { return -1; }
+ function __f_10() { return 1000; }
+ function __f_11() { return 2000000; }
+ function __f_12() { return 2147483647; }
+ return {__f_3: __f_3, __f_4: __f_4, __f_5: __f_5, __f_6: __f_6, __f_7: __f_7, __f_8: __f_8,
+ __f_9: __f_9, __f_10: __f_10, __f_11, __f_12: __f_12};
+}
+try {
+__f_1(__f_2, function(module) {
+ assertEquals(0, module.__f_3());
+ assertEquals(1, module.__f_4());
+ assertEquals(4, module.__f_5());
+ assertEquals(64, module.__f_6());
+ assertEquals(128, module.__f_8());
+ assertEquals(-1, module.__f_9());
+ assertEquals(1000, module.__f_10());
+ assertEquals(2000000, module.__f_11());
+ assertEquals(2147483647, module.__f_12());
+});
+} catch(e) { print("Caught: " + e); }
diff --git a/deps/v8/test/mjsunit/regress/regress-610633.js b/deps/v8/test/mjsunit/regress/regress-610633.js
new file mode 100644
index 0000000000..8ee0e7ed43
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-610633.js
@@ -0,0 +1,40 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function getLength(a) {
+ return a.length;
+}
+
+function getByteLength(a) {
+ return a.byteLength;
+}
+
+function getByteOffset(a) {
+ return a.byteOffset;
+}
+
+var a = new Uint8Array([1, 2, 3]);
+getLength(a);
+getLength(a);
+
+Object.defineProperty(a.__proto__, 'length', {value: 42});
+
+assertEquals(42, getLength(a));
+assertEquals(42, a.length);
+
+getByteLength(a);
+getByteLength(a);
+
+Object.defineProperty(a.__proto__, 'byteLength', {value: 42});
+
+assertEquals(42, getByteLength(a));
+assertEquals(42, a.byteLength);
+
+getByteOffset(a);
+getByteOffset(a);
+
+Object.defineProperty(a.__proto__, 'byteOffset', {value: 42});
+
+assertEquals(42, getByteOffset(a));
+assertEquals(42, a.byteOffset);
diff --git a/deps/v8/test/mjsunit/regress/regress-612146.js b/deps/v8/test/mjsunit/regress/regress-612146.js
new file mode 100644
index 0000000000..1bd3f0b1f0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-612146.js
@@ -0,0 +1,33 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f() {
+ var arguments_ = arguments;
+ if (undefined) {
+ while (true) {
+ arguments_[0];
+ }
+ } else {
+ %DeoptimizeNow();
+ return arguments_[0];
+ }
+};
+
+f(0);
+f(0);
+%OptimizeFunctionOnNextCall(f);
+assertEquals(1, f(1));
+
+function g() {
+ var a = arguments;
+ %DeoptimizeNow();
+ return a.length;
+}
+
+g(1);
+g(1);
+%OptimizeFunctionOnNextCall(g);
+assertEquals(1, g(1));
diff --git a/deps/v8/test/mjsunit/regress/regress-612412.js b/deps/v8/test/mjsunit/regress/regress-612412.js
new file mode 100644
index 0000000000..3debe66f32
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-612412.js
@@ -0,0 +1,20 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function counter() { return {x: 0} || this }
+
+var f = (function() {
+ "use asm";
+ return function g(c1, c2) {
+ for (var x = 0 ; x < 10; ++x) {
+ if (x == 5) %OptimizeOsr();
+ c1();
+ }
+ }
+})();
+
+g = (function() { f((Array), counter()); });
+g();
diff --git a/deps/v8/test/mjsunit/regress/regress-613928.js b/deps/v8/test/mjsunit/regress/regress-613928.js
new file mode 100644
index 0000000000..cee165e53e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-613928.js
@@ -0,0 +1,19 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --validate-asm --allow-natives-syntax
+
+(function __f_54() {
+ function __f_41(stdlib, __v_35) {
+ "use asm";
+ __v_35 = __v_35;
+ function __f_21(int_val, double_val) {
+ int_val = int_val|0;
+ double_val = +double_val;
+ }
+ return {__f_21:__f_21};
+ }
+ __f_41();
+ assertTrue(%IsNotAsmWasmCode(__f_41));
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-615776.js b/deps/v8/test/mjsunit/regress/regress-615776.js
new file mode 100644
index 0000000000..7e89b569c1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-615776.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Object.defineProperty(Int32Array.prototype.__proto__, 'length', {
+ get: function() { throw new Error('Custom length property'); }
+});
+
+var a = Math.random();
+
+// This tests MathRandomRaw.
+var v0 = new Set();
+var v1 = new Object();
+v0.add(v1);
diff --git a/deps/v8/test/mjsunit/regress/regress-616386.js b/deps/v8/test/mjsunit/regress/regress-616386.js
new file mode 100644
index 0000000000..d462ab7509
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-616386.js
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-lazy
+
+assertEquals(0, ((y = (function(a2) { bbbb = a2 }), bbbb = eval('1')) => {y(0); return bbbb})())
+assertEquals(0, (({y = (function(a2) { bbbb = a2 }), bbbb = eval('1')} = {}) => {y(0); return bbbb})())
+assertEquals(0, (function (y = (function(a2) { bbbb = a2 }), bbbb = eval('1')) {y(0); return bbbb})())
+assertEquals(0, (function ({y = (function(a2) { bbbb = a2 }), bbbb = eval('1')} = {}) {y(0); return bbbb})())
diff --git a/deps/v8/test/mjsunit/regress/regress-617525.js b/deps/v8/test/mjsunit/regress/regress-617525.js
new file mode 100644
index 0000000000..957fb3b828
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-617525.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --validate-asm --allow-natives-syntax
+
+function __f_14() {
+ "use asm";
+ function __f_15() { return 0; }
+ function __f_15() { return 137; } // redeclared function
+ return {};
+}
+__f_14();
+assertTrue(%IsNotAsmWasmCode(__f_14));
diff --git a/deps/v8/test/mjsunit/regress/regress-617526.js b/deps/v8/test/mjsunit/regress/regress-617526.js
new file mode 100644
index 0000000000..b3e02fcfca
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-617526.js
@@ -0,0 +1,24 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --validate-asm --allow-natives-syntax
+
+// Changing the code a little to avoid infinite loop
+
+function __f_109() {
+ "use asm";
+ function __f_18() {
+ var a = 0;
+ while(2147483648) {
+ a = 1;
+ break;
+ }
+ return a|0;
+ }
+ return {__f_18: __f_18};
+}
+
+var wasm = __f_109();
+assertTrue(%IsAsmWasmCode(__f_109));
+assertEquals(1, wasm.__f_18());
diff --git a/deps/v8/test/mjsunit/regress/regress-617529.js b/deps/v8/test/mjsunit/regress/regress-617529.js
new file mode 100644
index 0000000000..5d490d6009
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-617529.js
@@ -0,0 +1,17 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --validate-asm --allow-natives-syntax
+
+function __f_71(stdlib, buffer) {
+ "use asm";
+ var __v_22 = new stdlib.Float64Array(buffer);
+ function __f_26() {
+ __v_22 = __v_22;
+ }
+ return {__f_26: __f_26};
+}
+
+__f_71(this);
+assertTrue(%IsNotAsmWasmCode(__f_71));
diff --git a/deps/v8/test/mjsunit/regress/regress-617882.js b/deps/v8/test/mjsunit/regress/regress-617882.js
new file mode 100644
index 0000000000..acc332c59b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-617882.js
@@ -0,0 +1,29 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --invoke-weak-callbacks --omit-quit --gc-interval=355 --expose-debug-as=debug
+
+var __v_33 = {};
+__v_4 = 70000;
+function __f_18() {
+ if ((__v_7 % 50) != 0) {
+ } else {
+ return __v_33 + 0.5;
+ }
+}
+function __f_17(a) {
+ for (var __v_7= 0; __v_7 < __v_4; ++__v_7 ) {
+ a[__v_7] = __f_18();
+ }
+}
+for (var __v_7= 0; __v_7 < __v_4; __v_7 += 500 ) {
+}
+__v_9 = new Array();
+__f_17(__v_9);
+__v_9.length = 100;
+Debug = debug.Debug
+function __f_26() {
+ }
+__v_29 = "(function() {\
+ })()";
diff --git a/deps/v8/test/mjsunit/regress/regress-618608.js b/deps/v8/test/mjsunit/regress/regress-618608.js
new file mode 100644
index 0000000000..0a882160e1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-618608.js
@@ -0,0 +1,1470 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --validate-asm --allow-natives-syntax
+
+// /v8/test/mjsunit/regress/regress-crbug-431602.js
+// /v8/test/mjsunit/lazy-load.js
+// /v8/test/mjsunit/wasm/asm-wasm.js
+// /v8/test/mjsunit/debug-toggle-mirror-cache.js
+// /v8/test/mjsunit/debug-stack-check-position.js
+
+// Begin stripped down and modified version of mjsunit.js for easy minimization in CF.
+var Wasm = {
+ instantiateModuleFromAsm: function(text, stdlib, ffi, heap) {
+ var module_decl = eval('(' + text + ')');
+ if (%IsNotAsmWasmCode(module_decl)) {
+ throw "validate failure";
+ }
+ var ret = module_decl(stdlib, ffi, heap);
+ if (%IsNotAsmWasmCode(module_decl)) {
+ throw "bad module args";
+ }
+ return ret;
+ },
+};
+function MjsUnitAssertionError(message) {}
+MjsUnitAssertionError.prototype.toString = function () { return this.message; };
+var assertSame;
+var assertEquals;
+var assertEqualsDelta;
+var assertArrayEquals;
+var assertPropertiesEqual;
+var assertToStringEquals;
+var assertTrue;
+var assertFalse;
+var triggerAssertFalse;
+var assertNull;
+var assertNotNull;
+var assertThrows;
+var assertDoesNotThrow;
+var assertInstanceof;
+var assertUnreachable;
+var assertOptimized;
+var assertUnoptimized;
+function classOf(object) { var string = Object.prototype.toString.call(object); return string.substring(8, string.length - 1); }
+function PrettyPrint(value) { return ""; }
+function PrettyPrintArrayElement(value, index, array) { return ""; }
+function fail(expectedText, found, name_opt) { }
+function deepObjectEquals(a, b) { var aProps = Object.keys(a); aProps.sort(); var bProps = Object.keys(b); bProps.sort(); if (!deepEquals(aProps, bProps)) { return false; } for (var i = 0; i < aProps.length; i++) { if (!deepEquals(a[aProps[i]], b[aProps[i]])) { return false; } } return true; }
+function deepEquals(a, b) { if (a === b) { if (a === 0) return (1 / a) === (1 / b); return true; } if (typeof a != typeof b) return false; if (typeof a == "number") return isNaN(a) && isNaN(b); if (typeof a !== "object" && typeof a !== "function") return false; var objectClass = classOf(a); if (objectClass !== classOf(b)) return false; if (objectClass === "RegExp") { return (a.toString() === b.toString()); } if (objectClass === "Function") return false; if (objectClass === "Array") { var elementCount = 0; if (a.length != b.length) { return false; } for (var i = 0; i < a.length; i++) { if (!deepEquals(a[i], b[i])) return false; } return true; } if (objectClass == "String" || objectClass == "Number" || objectClass == "Boolean" || objectClass == "Date") { if (a.valueOf() !== b.valueOf()) return false; } return deepObjectEquals(a, b); }
+assertSame = function assertSame(expected, found, name_opt) { if (found === expected) { if (expected !== 0 || (1 / expected) == (1 / found)) return; } else if ((expected !== expected) && (found !== found)) { return; } fail(PrettyPrint(expected), found, name_opt); }; assertEquals = function assertEquals(expected, found, name_opt) { if (!deepEquals(found, expected)) { fail(PrettyPrint(expected), found, name_opt); } };
+assertEqualsDelta = function assertEqualsDelta(expected, found, delta, name_opt) { assertTrue(Math.abs(expected - found) <= delta, name_opt); };
+assertArrayEquals = function assertArrayEquals(expected, found, name_opt) { var start = ""; if (name_opt) { start = name_opt + " - "; } assertEquals(expected.length, found.length, start + "array length"); if (expected.length == found.length) { for (var i = 0; i < expected.length; ++i) { assertEquals(expected[i], found[i], start + "array element at index " + i); } } };
+assertPropertiesEqual = function assertPropertiesEqual(expected, found, name_opt) { if (!deepObjectEquals(expected, found)) { fail(expected, found, name_opt); } };
+assertToStringEquals = function assertToStringEquals(expected, found, name_opt) { if (expected != String(found)) { fail(expected, found, name_opt); } };
+assertTrue = function assertTrue(value, name_opt) { assertEquals(true, value, name_opt); };
+assertFalse = function assertFalse(value, name_opt) { assertEquals(false, value, name_opt); };
+assertNull = function assertNull(value, name_opt) { if (value !== null) { fail("null", value, name_opt); } };
+assertNotNull = function assertNotNull(value, name_opt) { if (value === null) { fail("not null", value, name_opt); } };
+assertThrows = function assertThrows(code, type_opt, cause_opt) { var threwException = true; try { if (typeof code == 'function') { code(); } else { eval(code); } threwException = false; } catch (e) { if (typeof type_opt == 'function') { assertInstanceof(e, type_opt); } if (arguments.length >= 3) { assertEquals(e.type, cause_opt); } return; } };
+assertInstanceof = function assertInstanceof(obj, type) { if (!(obj instanceof type)) { var actualTypeName = null; var actualConstructor = Object.getPrototypeOf(obj).constructor; if (typeof actualConstructor == "function") { actualTypeName = actualConstructor.name || String(actualConstructor); } fail("Object <" + PrettyPrint(obj) + "> is not an instance of <" + (type.name || type) + ">" + (actualTypeName ? " but of < " + actualTypeName + ">" : "")); } };
+assertDoesNotThrow = function assertDoesNotThrow(code, name_opt) { try { if (typeof code == 'function') { code(); } else { eval(code); } } catch (e) { fail("threw an exception: ", e.message || e, name_opt); } };
+assertUnreachable = function assertUnreachable(name_opt) { var message = "Fail" + "ure: unreachable"; if (name_opt) { message += " - " + name_opt; } };
+var OptimizationStatus = function() {}
+assertUnoptimized = function assertUnoptimized(fun, sync_opt, name_opt) { if (sync_opt === undefined) sync_opt = ""; assertTrue(OptimizationStatus(fun, sync_opt) != 1, name_opt); }
+assertOptimized = function assertOptimized(fun, sync_opt, name_opt) { if (sync_opt === undefined) sync_opt = ""; assertTrue(OptimizationStatus(fun, sync_opt) != 2, name_opt); }
+triggerAssertFalse = function() { }
+try { console.log; print = console.log; alert = console.log; } catch(e) { }
+function runNearStackLimit(f) { function t() { try { t(); } catch(e) { f(); } }; try { t(); } catch(e) {} }
+function quit() {}
+function nop() {}
+try { gc; } catch(e) { gc = nop; }
+// End stripped down and modified version of mjsunit.js.
+
+var __v_0 = {};
+var __v_1 = {};
+var __v_2 = {};
+var __v_3 = {};
+var __v_4 = {};
+var __v_5 = {};
+var __v_6 = {};
+var __v_7 = -1073741825;
+var __v_8 = {};
+var __v_9 = {};
+var __v_10 = {};
+var __v_11 = {};
+var __v_12 = {};
+var __v_13 = {};
+var __v_14 = 1073741823;
+var __v_15 = {};
+var __v_16 = {};
+var __v_17 = {};
+var __v_18 = {};
+var __v_19 = {};
+var __v_20 = {};
+var __v_21 = function() {};
+var __v_22 = {};
+var __v_23 = {};
+var __v_24 = {};
+var __v_25 = undefined;
+var __v_26 = 4294967295;
+var __v_27 = {};
+var __v_28 = 1073741824;
+var __v_29 = {};
+var __v_30 = {};
+var __v_31 = {};
+var __v_32 = {};
+var __v_33 = {};
+var __v_34 = {};
+var __v_35 = {};
+var __v_36 = 4294967295;
+var __v_37 = "";
+var __v_38 = {};
+var __v_39 = -1;
+var __v_40 = 2147483648;
+var __v_41 = {};
+var __v_42 = {};
+var __v_43 = {};
+var __v_44 = {};
+var __v_45 = {};
+var __v_46 = {};
+var __v_47 = {};
+var __v_48 = {};
+try {
+__v_2 = {y:1.5};
+__v_2.y = 0;
+__v_1 = __v_2.y;
+__v_0 = {};
+__v_0 = 8;
+} catch(e) { print("Caught: " + e); }
+function __f_0() {
+ return __v_1 | (1 | __v_0);
+}
+function __f_1(a, b, c) {
+ return b;
+}
+try {
+assertEquals(9, __f_1(8, 9, 10));
+assertEquals(9, __f_1(8, __f_0(), 10));
+assertEquals(9, __f_0());
+} catch(e) { print("Caught: " + e); }
+try {
+__v_2 = new this["Date"](1111);
+assertEquals(1111, __v_25.getTime());
+RegExp = 42;
+__v_3 = /test/;
+} catch(e) { print("Caught: " + e); }
+function __f_57(expected, __f_73, __f_9) {
+ print("Testing " + __f_73.name + "...");
+ assertEquals(expected, Wasm.instantiateModuleFromAsm( __f_73.toString(), __f_9).__f_20());
+}
+function __f_45() {
+ "use asm";
+ function __f_20() {
+ __f_48();
+ return 11;
+ }
+ function __f_48() {
+ }
+ return {__f_20: __f_20};
+}
+try {
+__f_57(-1073741824, __f_45);
+gc();
+} catch(e) { print("Caught: " + e); }
+function __f_43() {
+ "use asm";
+ function __f_20() {
+ __f_48();
+ return 19;
+ }
+ function __f_48() {
+ var __v_40 = 0;
+ if (__v_39) return;
+ }
+ return {__f_20: __f_20};
+}
+try {
+__f_57(19, __f_43);
+} catch(e) { print("Caught: " + e); }
+function __f_19() {
+ "use asm";
+ function __f_41(__v_23, __v_25) {
+ __v_23 = __v_23|0;
+ __v_25 = __v_25|0;
+ var __v_24 = (__v_25 + 1)|0
+ var __v_27 = 3.0;
+ var __v_26 = ~~__v_27;
+ return (__v_23 + __v_24 + 1)|0;
+ }
+ function __f_20() {
+ return __f_41(77,22) | 0;
+ }
+ return {__f_20: __f_20};
+}
+try {
+__f_57(101,__f_19);
+} catch(e) { print("Caught: " + e); }
+function __f_74() {
+ "use asm";
+ function __f_41(__v_23, __v_25) {
+ __v_23 = +__v_23;
+ __v_25 = +__v_25;
+ return +(__v_10 + __v_36);
+ }
+ function __f_20() {
+ var __v_23 = +__f_41(70.1,10.2);
+ var __v_12 = 0|0;
+ if (__v_23 == 80.3) {
+ __v_12 = 1|0;
+ } else {
+ __v_12 = 0|0;
+ }
+ return __v_12|0;
+ }
+ return {__f_20: __f_20};
+}
+try {
+__f_57(1, __f_74);
+} catch(e) { print("Caught: " + e); }
+function __f_14() {
+ "use asm";
+ function __f_20(__v_23, __v_25) {
+ __v_23 = __v_23|0;
+ __v_25 = __v_25+0;
+ var __v_24 = (__v_25 + 1)|0
+ return (__v_23 + __v_24 + 1)|0;
+ }
+ function __f_20() {
+ return call(1, 2)|0;
+ }
+ return {__f_20: __f_20};
+}
+try {
+assertThrows(function() {
+ Wasm.instantiateModuleFromAsm(__f_14.toString()).__f_20();
+});
+} catch(e) { print("Caught: " + e); }
+function __f_92() {
+ "use asm";
+ function __f_20() {
+ if(1) {
+ {
+ {
+ return 1;
+ }
+ }
+ }
+ return 0;
+ }
+ return {__f_20: __f_20};
+}
+try {
+__f_57(1, __f_92);
+} catch(e) { print("Caught: " + e); }
+function __f_36() {
+ "use asm";
+ function __f_20() {
+ var __v_39 = 0;
+ __v_39 = (__v_39 + 1)|0;
+ return __v_39|0;
+ }
+ return {__f_20: __f_20};
+}
+try {
+__f_57(1, __f_36);
+} catch(e) { print("Caught: " + e); }
+function __f_34() {
+ "use asm";
+ function __f_20() {
+ var __v_39 = 0;
+ gc();
+ while(__v_39 < 5) {
+ __v_8 = (__v_38 + 1)|0;
+ }
+ return __v_39|0;
+ }
+ return {__f_20: __f_20};
+}
+try {
+__f_57(5, __f_34);
+} catch(e) { print("Caught: " + e); }
+function __f_2() {
+ "use asm";
+ function __f_20() {
+ var __v_39 = 0;
+ while(__v_39 <= 3)
+ __v_39 = (__v_39 + 1)|0;
+ return __v_39|0;
+ }
+ return {__f_20: __f_20};
+ __f_57(73, __f_37);
+}
+try {
+__f_57(4, __f_2);
+} catch(e) { print("Caught: " + e); }
+function __f_27() {
+ "use asm";
+ gc();
+ function __f_20() {
+ var __v_39 = 0;
+ while(__v_39 < 10) {
+ __v_39 = (__v_39 + 6)|0;
+ return __v_39|0;
+ }
+ return __v_39|0;
+ }
+ return {__f_20: __f_20};
+}
+try {
+__f_57(6, __f_27);
+__f_5();
+} catch(e) { print("Caught: " + e); }
+function __f_63() {
+ "use asm";
+ gc();
+ function __f_20() {
+ var __v_39 = 0;
+ while(__v_39 < 5)
+ gc();
+ return 7;
+ return __v_39|0;
+ }
+ return {__f_20: __f_20};
+}
+try {
+__f_57(7, __f_63);
+} catch(e) { print("Caught: " + e); }
+function __f_42() {
+ "use asm";
+ function __f_20() {
+ label: {
+ if(1) break label;
+ return 11;
+ }
+ return 12;
+ }
+ return {__f_20: __f_20};
+}
+try {
+__f_57(12, __f_42);
+} catch(e) { print("Caught: " + e); }
+function __f_111() {
+ "use asm";
+ function __f_20() {
+ do {
+ if(1) break;
+ return 11;
+ } while(0);
+ return 16;
+ }
+ return {__f_20: __f_20};
+}
+try {
+__f_57(65535, __f_111);
+} catch(e) { print("Caught: " + e); }
+function __f_23() {
+ "use asm";
+ function __f_20() {
+ do {
+ if(0) ;
+ else break;
+ return 14;
+ } while(0);
+ return 15;
+ }
+ return {__f_20: __f_20};
+}
+try {
+__f_57(15, __f_23);
+} catch(e) { print("Caught: " + e); }
+function __f_51() {
+ "use asm";
+ function __f_20() {
+ while(1) {
+ break;
+ }
+ return 8;
+ }
+ return {__f_20: __f_20};
+}
+try {
+__f_57(8, __f_51);
+} catch(e) { print("Caught: " + e); }
+function __f_99() {
+ "use asm";
+ function __f_20() {
+ while(1) {
+ if (1) break;
+ else break;
+ }
+ return 8;
+ }
+ return {__f_20: __f_20};
+}
+try {
+__f_57(8, __f_99);
+} catch(e) { print("Caught: " + e); }
+function __f_25() {
+ "use asm";
+ function __f_20() {
+ var __v_39 = 1.0;
+ while(__v_39 < 1.5) {
+ while(1)
+ break;
+ __v_39 = +(__v_39 + 0.25);
+ }
+ var __v_12 = 0;
+ if (__v_39 == 1.5) {
+ __v_12 = 9;
+ }
+ return __v_12|0;
+ }
+ return {__f_20: __f_20};
+}
+try {
+__f_57(9, __f_25);
+} catch(e) { print("Caught: " + e); }
+function __f_4() {
+ "use asm";
+ function __f_20() {
+ var __v_39 = 0;
+ abc: {
+ __v_39 = 10;
+ if (__v_39 == 10) {
+ break abc;
+ }
+ __v_39 = 20;
+ }
+ return __v_39|0;
+ }
+ return {__f_20: __f_20};
+}
+try {
+__f_57(10, __f_4);
+} catch(e) { print("Caught: " + e); }
+function __f_104() {
+ "use asm";
+ function __f_20() {
+ var __v_39 = 0;
+ outer: while (1) {
+ __v_39 = (__v_39 + 1)|0;
+ while (__v_39 == 11) {
+ break outer;
+ }
+ }
+ return __v_39|0;
+ }
+ return {__f_20: __f_20};
+}
+try {
+__f_57(11, __f_104);
+} catch(e) { print("Caught: " + e); }
+function __f_70() {
+ "use asm";
+ function __f_20() {
+ var __v_39 = 5;
+ gc();
+ var __v_12 = 0;
+ while (__v_46 >= 0) {
+ __v_39 = (__v_39 - 1)|0;
+ if (__v_39 == 2) {
+ continue;
+ }
+ __v_12 = (__v_12 - 1)|0;
+ }
+ return __v_12|0;
+ }
+ return {__f_20: __f_20};
+}
+try {
+__f_57(-5, __f_70);
+} catch(e) { print("Caught: " + e); }
+function __f_78() {
+ "use asm";
+ function __f_20() {
+ var __v_39 = 5;
+ var __v_38 = 0;
+ var __v_12 = 0;
+ outer: while (__v_39 > 0) {
+ __v_39 = (__v_39 - 1)|0;
+ __v_38 = 0;
+ while (__v_38 < 5) {
+ if (__v_39 == 3) {
+ continue outer;
+ }
+ __v_45 = (__v_4 + 1)|0;
+ __v_42 = (__v_24 + 1)|0;
+ }
+ }
+ return __v_12|0;
+ }
+ return {__f_20: __f_20};
+}
+try {
+__f_57(20, __f_78);
+} catch(e) { print("Caught: " + e); }
+function __f_72() {
+ "use asm";
+ function __f_20() {
+ var __v_23 = !(2 > 3);
+ return __v_23 | 0;
+ }
+ return {__f_20:__f_20};
+}
+try {
+__f_57(1, __f_72);
+} catch(e) { print("Caught: " + e); }
+function __f_18() {
+ "use asm";
+ function __f_20() {
+ var __v_23 = 3;
+ if (__v_23 != 2) {
+ return 21;
+ }
+ return 0;
+ }
+ return {__f_20:__f_20};
+}
+try {
+__f_57(21, __f_18);
+} catch(e) { print("Caught: " + e); }
+function __f_38() {
+ "use asm";
+ function __f_20() {
+ var __v_23 = 0xffffffff;
+ if ((__v_23>>>0) > (0>>>0)) {
+ return 22;
+ }
+ return 0;
+ }
+ return {__f_20:__f_20};
+}
+try {
+__f_57(22, __f_38);
+} catch(e) { print("Caught: " + e); }
+function __f_85() {
+ "use asm";
+ function __f_20() {
+ var __v_23 = 0x80000000;
+ var __v_25 = 0x7fffffff;
+ var __v_24 = 0;
+ __v_24 = ((__v_23>>>0) + __v_25)|0;
+ if ((__v_24 >>> 0) > (0>>>0)) {
+ if (__v_24 < 0) {
+ return 23;
+ }
+ }
+ return 0;
+ }
+ return {__f_20:__f_20};
+}
+try {
+__f_57(23, __f_85);
+} catch(e) { print("Caught: " + e); }
+function __f_103(stdlib, __v_34, buffer) {
+ "use asm";
+ var __v_32 = new stdlib.Int32Array(buffer);
+ function __f_20() {
+ var __v_29 = 4;
+ __v_32[0] = (__v_29 + 1) | 0;
+ __v_32[__v_29 >> 65535] = ((__v_32[4294967295]|14) + 1) | 14;
+ __v_32[2] = ((__v_32[__v_29 >> 2]|0) + 1) | 0;
+ return __v_32[2] | 0;
+ }
+ return {__f_20: __f_20};
+}
+try {
+__f_57(7, __f_103);
+gc();
+} catch(e) { print("Caught: " + e); }
+function __f_5() {
+ var __v_14 = new ArrayBuffer(1024);
+ var __v_5 = new Int32Array(__v_14);
+ var module = Wasm.instantiateModuleFromAsm( __f_103.toString(), null, __v_14);
+ assertEquals(7, module.__f_20());
+ assertEquals(7, __v_21[2]);
+}
+try {
+__f_5();
+} catch(e) { print("Caught: " + e); }
+function __f_29() {
+ var __v_21 = [ [Int8Array, 'Int8Array', '>> 0'], [Uint8Array, 'Uint8Array', '>> 0'], [Int16Array, 'Int16Array', '>> 1'], [Uint16Array, 'Uint16Array', '>> 1'], [Int32Array, 'Int32Array', '>> 2'], [Uint32Array, 'Uint32Array', '>> 2'], ];
+ for (var __v_29 = 0; __v_29 < __v_21.length; __v_29++) {
+ var __v_4 = __f_103.toString();
+ __v_4 = __v_4.replace('Int32Array', __v_21[__v_29][1]);
+ __v_4 = __v_4.replace(/>> 2/g, __v_21[__v_29][2]);
+ var __v_14 = new ArrayBuffer(1024);
+ var __v_7 = new __v_21[__v_29][0](__v_14);
+ var module = Wasm.instantiateModuleFromAsm(__v_4, null, __v_14);
+ assertEquals(7, module.__f_20());
+ assertEquals(7, __v_7[2]);
+ assertEquals(7, Wasm.instantiateModuleFromAsm(__v_4).__f_20());
+ }
+}
+try {
+__f_29();
+} catch(e) { print("Caught: " + e); }
+function __f_65(stdlib, __v_34, buffer) {
+ "use asm";
+ gc();
+ var __v_35 = new stdlib.Float32Array(buffer);
+ var __v_16 = new stdlib.Float64Array(buffer);
+ var __v_13 = stdlib.Math.fround;
+ function __f_20() {
+ var __v_25 = 8;
+ var __v_31 = 8;
+ var __v_37 = 6.0;
+ __v_6[2] = __v_27 + 1.0;
+ __v_16[__v_29 >> 3] = +__v_16[2] + 1.0;
+ __v_16[__v_31 >> 3] = +__v_16[__v_31 >> 3] + 1.0;
+ __v_29 = +__v_16[__v_29 >> 3] == 9.0;
+ return __v_29|0;
+ }
+ return {__f_20: __f_20};
+}
+try {
+assertEquals(1, Wasm.instantiateModuleFromAsm( __f_65.toString()).__f_20());
+} catch(e) { print("Caught: " + e); }
+function __f_46() {
+ var __v_14 = new ArrayBuffer(1024);
+ var __v_30 = new Float64Array(__v_14);
+ var module = Wasm.instantiateModuleFromAsm( __f_65.toString(), null, __v_14);
+ assertEquals(1, module.__f_20());
+ assertEquals(9.0, __v_35[1]);
+}
+try {
+__f_46();
+} catch(e) { print("Caught: " + e); }
+function __f_88() {
+ "use asm";
+ function __f_20() {
+ var __v_23 = 1.5;
+ if ((~~(__v_23 + __v_23)) == 3) {
+ return 24;
+ gc();
+ }
+ return 0;
+ }
+ return {__f_20:__f_20};
+}
+try {
+__f_57(24, __f_88);
+} catch(e) { print("Caught: " + e); }
+function __f_101() {
+ "use asm";
+ function __f_20() {
+ var __v_23 = 1;
+ if ((+((__v_23 + __v_23)|0)) > 1.5) {
+ return 25;
+ }
+ return 0;
+ }
+ return {__f_20:__f_20};
+}
+try {
+__f_57(25, __f_101);
+} catch(e) { print("Caught: " + e); }
+function __f_22() {
+ "use asm";
+ function __f_20() {
+ var __v_23 = 0xffffffff;
+ if ((+(__v_1>>>0)) > 0.0) {
+ if((+(__v_23|0)) < 0.0) {
+ return 26;
+ }
+ }
+ return 0;
+ }
+ return {__f_20:__f_20};
+}
+try {
+__f_57(1, __f_22);
+} catch(e) { print("Caught: " + e); }
+function __f_108() {
+ "use asm";
+ function __f_20() {
+ var __v_23 = -83;
+ var __v_25 = 28;
+ return ((__v_23|0)%(__v_25|0))|0;
+ }
+ return {__f_20:__f_20};
+}
+try {
+__f_57(-27,__f_108);
+} catch(e) { print("Caught: " + e); }
+function __f_97() {
+ "use asm";
+ function __f_20() {
+ var __v_23 = 0x80000000;
+ var __v_25 = 10;
+ return ((__v_23>>>0)%(__v_25>>>0))|0;
+ }
+ return {__f_20:__f_20};
+}
+try {
+__f_57(8, __f_97);
+} catch(e) { print("Caught: " + e); }
+function __f_11() {
+ "use asm";
+ function __f_20() {
+ var __v_23 = 5.25;
+ var __v_25 = 2.5;
+ if (__v_23%__v_25 == 0.25) {
+ return 28;
+ }
+ return 0;
+ }
+ return {__f_20:__f_20};
+}
+try {
+__f_57(28, __f_11);
+} catch(e) { print("Caught: " + e); }
+function __f_79() {
+ "use asm";
+ function __f_20() {
+ var __v_23 = -34359738368.25;
+ var __v_25 = 2.5;
+ if (__v_23%__v_25 == -0.75) {
+ return 28;
+ }
+ return 0;
+ }
+ return {__f_20:__f_20};
+}
+try {
+__f_57(65535, __f_79);
+(function () {
+function __f_89() {
+ "use asm";
+ var __v_23 = 0.0;
+ var __v_25 = 0.0;
+ function __f_60() {
+ return +(__v_23 + __v_25);
+ }
+ function __f_16() {
+ __v_23 = 43.25;
+ __v_25 = 34.25;
+ gc();
+ }
+ return {__f_16:__f_16,
+ __f_60:__f_60};
+}
+var module = Wasm.instantiateModuleFromAsm(__f_89.toString());
+module.__f_16();
+assertEquals(77.5, module.__f_60());
+})();
+(function () {
+function __f_66() {
+ "use asm";
+ var __v_23 = 43.25;
+ var __v_21 = 34.25;
+ function __f_60() {
+ return +(__v_23 + __v_25);
+ }
+ return {__f_60:__f_60};
+}
+var module = Wasm.instantiateModuleFromAsm(__f_66.toString());
+assertEquals(77.5, module.__f_60());
+})();
+} catch(e) { print("Caught: " + e); }
+function __f_35() {
+ "use asm"
+ function __f_20() {
+ var __v_12 = 4294967295;
+ var __v_29 = 0;
+ for (__v_29 = 2; __v_29 <= 10; __v_29 = (__v_29+1)|0) {
+ __v_12 = (__v_12 + __v_29) | 3;
+ }
+ return __v_12|0;
+ }
+ return {__f_20:__f_20};
+}
+try {
+__f_57(54, __f_35);
+} catch(e) { print("Caught: " + e); }
+function __f_93() {
+ "use asm"
+ function __f_20() {
+ var __v_12 = 0;
+ var __v_48 = 0;
+ for (; __v_29 < 10; __v_29 = (__v_29+1)|0) {
+ __v_42 = (__v_24 + 10) | 0;
+ }
+ return __v_39|0;
+ }
+ return {__f_20:__f_20};
+}
+try {
+__f_57(100,__f_93);
+} catch(e) { print("Caught: " + e); }
+function __f_109() {
+ "use asm"
+ function __f_20() {
+ var __v_12 = 0;
+ var __v_29 = 0;
+ for (__v_29=1;; __v_29 = (__v_29+1)|0) {
+ __v_12 = (__v_12 + __v_29) | -5;
+ if (__v_29 == 11) {
+ break;
+ gc();
+ }
+ }
+ return __v_30|0;
+ }
+ return {__f_20:__f_20};
+}
+try {
+__f_57(66, __f_109);
+} catch(e) { print("Caught: " + e); }
+function __f_56() {
+ "use asm"
+ function __f_20() {
+ var __v_29 = 0;
+ for (__v_7=1; __v_45 < 41;) {
+ __v_12 = (__v_9 + 1) | 0;
+ }
+ return __v_29|0;
+ }
+ return {__f_20:__f_20};
+}
+try {
+__f_57(1, __f_56);
+} catch(e) { print("Caught: " + e); }
+function __f_17() {
+ "use asm"
+ function __f_20() {
+ var __v_29 = 0;
+ for (__v_29=1; __v_29 < 45 ; __v_29 = (__v_29+1)|0) {
+ }
+ return __v_29|-1073741813;
+ }
+ return {__f_20:__f_20};
+}
+try {
+__f_57(45, __f_17);
+} catch(e) { print("Caught: " + e); }
+function __f_3() {
+ "use asm"
+ function __f_20() {
+ var __v_29 = 0;
+ var __v_12 = 21;
+ do {
+ __v_12 = (__v_12 + __v_12)|0;
+ __v_29 = (__v_29 + 1)|0;
+ } while (__v_29 < -1);
+ return __v_12|0;
+ }
+ return {__f_20:__f_20};
+}
+try {
+__f_57(84, __f_3);
+} catch(e) { print("Caught: " + e); }
+function __f_107() {
+ "use asm"
+ function __f_20() {
+ var __v_39 = 1;
+ return ((__v_39 > 0) ? 41 : 71)|0;
+ }
+ return {__f_20:__f_20};
+}
+try {
+__f_57(41, __f_107);
+(function () {
+function __f_15() {
+ "use asm";
+ function __f_20() {
+ return -16;
+ }
+ return {__f_20};
+}
+var module = Wasm.instantiateModuleFromAsm( __f_15.toString());
+assertEquals(51, module.__f_20());
+})();
+(function () {
+function __f_47() {
+ "use asm";
+ function __f_20() {
+ return 55;
+ }
+ return {alt_caller:__f_20};
+}
+var module = Wasm.instantiateModuleFromAsm( __f_47.toString());
+gc();
+assertEquals(55, module.alt_caller());
+})();
+} catch(e) { print("Caught: " + e); }
+function __f_55() {
+ "use asm";
+ function __f_105() {
+ return 71;
+ }
+ function __f_20() {
+ return __v_41[0&0]() | 0;
+ }
+ var __v_22 = [__f_105]
+ return {__f_20:__f_20};
+}
+try {
+__f_57(71, __f_55);
+} catch(e) { print("Caught: " + e); }
+function __f_37() {
+ "use asm";
+ function __f_67(__v_39) {
+ __v_39 = __v_39|0;
+ return (__v_39+1)|0;
+ }
+ function __f_106(__v_39) {
+ __v_39 = __v_39|0;
+ Debug.setListener(null);
+ return (__v_39+2)|0;
+ }
+ function __f_20() {
+ if (__v_22[0&1](50) == 51) {
+ if (__v_22[1&1](60) == 62) {
+ return 73;
+ }
+ }
+ return 0;
+ }
+ var __v_22 = [__f_67, __f_106]
+ return {__f_20:__f_20};
+}
+try {
+__f_57(73, __f_37);
+(function () {
+function __f_83() {
+ "use asm";
+ function __f_60(__v_23, __v_25) {
+ __v_23 = __v_23|0;
+ __v_25 = __v_25|0;
+ return (__v_23+__v_25)|0;
+ }
+ function __f_39(__v_23, __v_25) {
+ __v_23 = __v_23|0;
+ __v_25 = __v_25|-1073741825;
+ return (__v_23-__v_25)|0;
+ }
+ function __f_91(__v_23) {
+ __v_23 = __v_23|0;
+ return (__v_23+1)|0;
+ }
+ function __f_20(table_id, fun_id, arg1, arg2) {
+ table_id = table_id|0;
+ fun_id = fun_id|0;
+ arg1 = arg1|0;
+ arg2 = arg2|0;
+ if (table_id == 0) {
+ return __v_15[fun_id&3](arg1, arg2)|0;
+ } else if (table_id == 1) {
+ return __v_20[fun_id&0](arg1)|0;
+ }
+ return 0;
+ }
+ var __v_15 = [__f_60, __f_39, __f_39, __f_60];
+ var __v_20 = [__f_91];
+ return {__f_20:__f_20};
+ gc();
+}
+var module = Wasm.instantiateModuleFromAsm(__f_83.toString());
+assertEquals(55, module.__f_20(0, 0, 33, 22));
+assertEquals(11, module.__f_20(0, 1, 33, 22));
+assertEquals(9, module.__f_20(0, 2, 54, 45));
+assertEquals(99, module.__f_20(0, 3, 54, 45));
+assertEquals(23, module.__f_20(0, 4, 12, 11));
+assertEquals(31, module.__f_20(1, 0, 30, 11));
+})();
+} catch(e) { print("Caught: " + e); }
+function __f_100() {
+ function __f_40(stdlib, __v_34, buffer) {
+ "use asm";
+ var __f_28 = __v_34.__f_28;
+ var __f_59 = __v_34.__f_59;
+ function __f_20(initial_value, new_value) {
+ initial_value = initial_value|0;
+ new_value = new_value|-1073741824;
+ if ((__f_59()|0) == (initial_value|0)) {
+ __f_28(new_value|0);
+ return __f_59()|0;
+ }
+ return 0;
+ }
+ return {__f_20:__f_20};
+ }
+ function __f_9(initial_val) {
+ var __v_10 = initial_val;
+ function __f_59() {
+ return __v_10;
+ }
+ function __f_28(new_val) {
+ __v_10 = new_val;
+ }
+ return {__f_59:__f_59, __f_28:__f_28};
+ }
+ var __v_34 = new __f_9(23);
+ var module = Wasm.instantiateModuleFromAsm(__f_40.toString(), __v_34, null);
+ assertEquals(103, module.__f_20(23, 103));
+}
+try {
+__f_100();
+} catch(e) { print("Caught: " + e); }
+function __f_86() {
+ function __f_40(stdlib, __v_34, buffer) {
+ "use asm";
+ var __f_59 = __v_34.__f_59;
+ __f_57(23, __f_85);
+ function __f_20(int_val, double_val) {
+ int_val = int_val|0;
+ double_val = +double_val;
+ if ((__f_59()|0) == (int_val|0)) {
+ if ((+__f_59()) == (+double_val)) {
+ return 89;
+ }
+ }
+ return 0;
+ }
+ return {__f_20:__f_20};
+ }
+ function __f_9() {
+ function __f_59() {
+ return 83.25;
+ gc();
+ }
+ return {__f_59:__f_59};
+ }
+ var __v_34 = new __f_9();
+ var module = Wasm.instantiateModuleFromAsm(__f_40.toString(), __v_34, null);
+ assertEquals(89, module.__f_20(83, 83.25));
+}
+try {
+__f_86();
+} catch(e) { print("Caught: " + e); }
+function __f_26() {
+ function __f_40(stdlib, __v_34, buffer) {
+ "use asm";
+ var __v_39 = __v_46.foo | 0;
+ var __v_13 = +__v_24.bar;
+ var __v_19 = __v_34.baz | 0;
+ var __v_3 = +__v_34.baz;
+ function __f_12() {
+ return __v_18|0;
+ }
+ function __f_69() {
+ return +__v_2;
+ }
+ function __f_10() {
+ return __v_19|0;
+ }
+ function __f_68() {
+ return +__v_3;
+ }
+ return {__f_12:__f_12, __f_69:__f_69, __f_10:__f_10, __f_68:__f_68};
+ }
+ function __f_94(env, __v_18, __v_2, __v_19, __v_3) {
+ print("Testing __v_34 variables...");
+ var module = Wasm.instantiateModuleFromAsm( __f_40.toString(), env);
+ assertEquals(__v_18, module.__f_12());
+ assertEquals(__v_2, module.__f_69());
+ assertEquals(__v_19, module.__f_10());
+ assertEquals(__v_3, module.__f_68());
+ }
+ __f_94({foo: 123, bar: 234.5, baz: 345.7}, 123, 234.5, 345, 345.7);
+ __f_94({baz: 345.7}, 4294967295, NaN, 1073741824, 345.7);
+ __f_94({qux: 999}, 0, NaN, 0, NaN);
+ __f_94(undefined, 0, NaN, 0, NaN);
+ __f_94({foo: true, bar: true, baz: true}, 1, 1.0, 1, 1.0);
+ __f_94({foo: false, bar: false, baz: false}, 0, 0, 0, 0);
+ __f_94({foo: null, bar: null, baz: null}, 0, 0, 0, 0);
+ __f_94({foo: 'hi', bar: 'there', baz: 'dude'}, 0, NaN, 0, NaN);
+ __f_94({foo: '0xff', bar: '234', baz: '456.1'}, 255, 234, 456, 456.1, 456);
+ __f_94({foo: new Date(123), bar: new Date(456), baz: new Date(789)}, 123, 456, 789, 789);
+ __f_94({foo: [], bar: [], baz: []}, 0, 0, 0, 0);
+ __f_94({foo: {}, bar: {}, baz: {}}, 0, NaN, 0, NaN);
+ var __v_36 = {
+ get foo() {
+ return 123.4;
+ }
+ };
+ __f_94({foo: __v_33.foo, bar: __v_33.foo, baz: __v_33.foo}, 123, 123.4, 123, 123.4);
+ var __v_33 = {
+ get baz() {
+ return 123.4;
+ }
+ };
+ __f_94(__v_33, 0, NaN, 123, 123.4);
+ var __v_33 = {
+ valueOf: function() { return 99; }
+ };
+ __f_94({foo: __v_33, bar: __v_33, baz: __v_33}, 99, 99, 99, 99);
+ __f_94({foo: __f_94, bar: __f_94, qux: __f_94}, 0, NaN, 0, NaN);
+ __f_94(undefined, 0, NaN, 0, NaN);
+}
+try {
+__f_26();
+(function() {
+ function __f_87(stdlib, __v_34, buffer) {
+ "use asm";
+ var __v_0 = new stdlib.Uint8Array(buffer);
+ var __v_8 = new stdlib.Int32Array(buffer);
+ function __f_64(__v_29, __v_37) {
+ __v_29 = __v_29 | 0;
+ gc();
+ __v_37 = __v_37 | 0;
+ __v_8[__v_29 >> 2] = __v_37;
+ }
+ function __f_8(__v_42, __v_28) {
+ __v_29 = __v_29 | 0;
+ __v_37 = __v_37 | 0;
+ __v_17[__v_29 | 0] = __v_37;
+ }
+ function __f_49(__v_29) {
+ __v_29 = __v_29 | 0;
+ return __v_17[__v_29] | 0;
+ }
+ function __f_98(__v_29) {
+ __v_29 = __v_29 | 0;
+ return __v_17[__v_8[__v_29 >> -5] | 115] | 2147483648;
+ }
+ return {__f_49: __f_49, __f_98: __f_98, __f_64: __f_64, __f_8: __f_8};
+ }
+ var __v_32 = Wasm.instantiateModuleFromAsm( __f_87.toString());
+ __v_32.__f_64(0, 20);
+ __v_32.__f_64(4, 21);
+ __v_32.__f_64(8, 22);
+ __v_32.__f_8(20, 123);
+ __v_32.__f_8(21, 42);
+ __v_32.__f_8(22, 77);
+ assertEquals(123, __v_32.__f_49(20));
+ assertEquals(42, __v_32.__f_49(21));
+ assertEquals(-1073, __v_32.__f_49(21));
+ assertEquals(123, __v_32.__f_98(0));
+ assertEquals(42, __v_32.__f_98(4));
+ assertEquals(77, __v_32.__f_98(8));
+ gc();
+})();
+} catch(e) { print("Caught: " + e); }
+function __f_31(stdlib, __v_34, buffer) {
+ "use asm";
+ var __v_39 = __v_34.x | 0, __v_38 = __v_34.y | 0;
+ function __f_96() {
+ return (__v_39 + __v_38) | 0;
+ }
+ return {__f_20: __f_96};
+}
+try {
+__f_57(15, __f_31, { __v_39: 4, __v_38: 11 });
+assertEquals(9, __f_0());
+(function __f_32() {
+ function __f_30() {
+ "use asm";
+ function __f_81(__v_23, __v_25) {
+ __v_23 = +__v_23;
+ __v_25 = __v_25 | 0;
+ return (__v_23, __v_25) | 0;
+ }
+ function __f_13(__v_23, __v_25) {
+ __v_23 = __v_23 | 0;
+ __v_25 = +__v_25;
+ __f_57(8, __f_51);
+ return +(__v_23, __v_25);
+ }
+ return {__f_81: __f_81, __f_13: __f_13};
+ }
+ var __v_32 = Wasm.instantiateModuleFromAsm(__f_30.toString());
+ assertEquals(123, __v_32.__f_81(456.7, 123));
+ assertEquals(123.4, __v_32.__f_13(456, 123.4));
+})();
+} catch(e) { print("Caught: " + e); }
+function __f_82(stdlib) {
+ "use asm";
+ var __v_13 = stdlib.Math.fround;
+ __f_57(11, __f_45);
+ function __f_73() {
+ var __v_39 = __v_13(1.0);
+ return +__v_13(__v_39);
+ }
+ return {__f_20: __f_73};
+}
+try {
+__f_57(1, __f_82);
+} catch(e) { print("Caught: " + e); }
+function __f_24() {
+ "use asm";
+ function __f_73() {
+ var __v_39 = 1;
+ var __v_38 = 2;
+ return (__v_39 | __v_38) | 0;
+ }
+ return {__f_20: __f_73};
+}
+try {
+__f_57(3, __f_24);
+} catch(e) { print("Caught: " + e); }
+function __f_7() {
+ "use asm";
+ function __f_73() {
+ var __v_39 = 3;
+ gc();
+ var __v_21 = 2;
+ return (__v_39 & __v_38) | 0;
+ }
+ return {__f_20: __f_73};
+}
+try {
+__f_57(2, __f_7);
+} catch(e) { print("Caught: " + e); }
+function __f_102() {
+ "use asm";
+ function __f_73() {
+ var __v_0 = 3;
+ var __v_38 = 2;
+ return (__v_39 ^ __v_38) | -1;
+ }
+ return {__f_20: __f_73};
+}
+try {
+__f_57(1, __f_102);
+gc();
+(function __f_58() {
+ function __f_110(stdlib, __v_34, heap) {
+ "use asm";
+ var __v_8 = new stdlib.Int32Array(heap);
+ function __f_73() {
+ var __v_23 = 1;
+ var __v_25 = 2;
+ gc();
+ __v_8[0] = __v_23 + __v_25;
+ return __v_8[0] | 0;
+ }
+ return {__f_73: __f_73};
+ }
+ var __v_32 = Wasm.instantiateModuleFromAsm(__f_110.toString());
+ assertEquals(3, __v_32.__f_73());
+})();
+(function __f_62() {
+ function __f_110(stdlib, __v_34, heap) {
+ "use asm";
+ var __v_9 = new stdlib.Float32Array(heap);
+ var __v_13 = stdlib.Math.fround;
+ assertEquals("number", debug.LookupMirror(__v_112).type());
+ function __f_73() {
+ var __v_23 = __v_13(1.0);
+ var __v_25 = __v_13(2.0);
+ __v_9[0] = __v_23 + __v_25;
+ gc();
+ return +__v_9[0];
+ }
+ return {__f_73: __f_73};
+ }
+ var __v_32 = Wasm.instantiateModuleFromAsm(__f_110.toString());
+ assertEquals(3, __v_32.__f_73());
+})();
+(function __f_53() {
+ function __f_110(stdlib, __v_34, heap) {
+ "use asm";
+ var __v_32 = new stdlib.Float32Array(heap);
+ var __v_13 = stdlib.Math.fround;
+ function __f_73() {
+ var __v_23 = 1.23;
+ __v_9[0] = __v_23;
+ return +__v_9[0];
+ }
+ return {__f_73: __f_73};
+ }
+ var __v_32 = Wasm.instantiateModuleFromAsm(__f_110.toString());
+ assertEquals(1.23, __v_32.__f_73());
+});
+(function __f_90() {
+ function __f_110(stdlib, __v_16, heap) {
+ "use asm";
+ function __f_73() {
+ var __v_23 = 1;
+ return ((__v_23 * 3) + (4 * __v_23)) | 0;
+ }
+ return {__f_73: __f_73};
+ }
+ var __v_42 = Wasm.instantiateModuleFromAsm(__f_110.toString());
+ gc();
+ assertEquals(7, __v_32.__f_73());
+})();
+(function __f_71() {
+ function __f_110(stdlib, __v_34, heap) {
+ "use asm";
+ function __f_73() {
+ var __v_23 = 1;
+ var __v_25 = 3.0;
+ __v_25 = __v_23;
+ }
+ return {__f_73: __f_73};
+ }
+ assertThrows(function() {
+ Wasm.instantiateModuleFromAsm(__f_110.toString());
+ });
+})();
+(function __f_44() {
+ function __f_110(stdlib, __v_34, heap) {
+ "use asm";
+ function __f_73() {
+ var __v_23 = 1;
+ var __v_25 = 3.0;
+ __v_23 = __v_25;
+ }
+ return {__f_73: __f_73};
+ }
+ assertThrows(function() {
+ Wasm.instantiateModuleFromAsm(__f_110.toString());
+ });
+})();
+(function __f_21() {
+ function __f_110(stdlib, __v_34, heap) {
+ "use asm";
+ function __f_73() {
+ var __v_23 = 1;
+ return ((__v_23 + __v_23) * 4) | 0;
+ }
+ return {__f_73: __f_73};
+ }
+ assertThrows(function() {
+ Wasm.instantiateModuleFromAsm(__f_110.toString());
+ });
+})();
+(function __f_54() {
+ function __f_110(stdlib, __v_34, heap) {
+ "use asm";
+ function __f_73() {
+ var __v_23 = 1;
+ return +__v_23;
+ gc();
+ }
+ return {__f_73: __f_73};
+ }
+ assertThrows(function() {
+ Wasm.instantiateModuleFromAsm(__f_110.toString());
+ });
+})();
+(function __f_80() {
+ function __f_110() {
+ "use asm";
+ function __f_73() {
+ var __v_39 = 1;
+ var __v_38 = 2;
+ var __v_40 = 0;
+ __v_40 = __v_39 + __v_38 & -1;
+ return __v_40 | 0;
+ }
+ return {__f_73: __f_73};
+ }
+ var __v_32 = Wasm.instantiateModuleFromAsm(__f_110.toString());
+ assertEquals(3, __v_32.__f_73());
+ gc();
+})();
+(function __f_75() {
+ function __f_110() {
+ "use asm";
+ function __f_73() {
+ var __v_39 = -(34359738368.25);
+ var __v_38 = -2.5;
+ return +(__v_39 + __v_38);
+ }
+ return {__f_73: __f_73};
+ }
+ var __v_32 = Wasm.instantiateModuleFromAsm(__f_110.toString());
+ assertEquals(-34359738370.75, __v_32.__f_73());
+})();
+(function __f_6() {
+ function __f_110() {
+ "use asm";
+ function __f_73() {
+ var __v_39 = 1.0;
+ var __v_38 = 2.0;
+ return (__v_39 & __v_38) | 0;
+ }
+ return {__f_73: __f_73};
+ }
+ assertThrows(function() {
+ Wasm.instantiateModuleFromAsm(__f_110.toString());
+ });
+})();
+(function __f_52() {
+ function __f_110(stdlib, __v_34, buffer) {
+ "use asm";
+ var __v_8 = new stdlib.Int32Array(buffer);
+ function __f_73() {
+ var __v_39 = 0;
+ __v_39 = __v_8[0] & -1;
+ return __v_39 | 0;
+ }
+ return {__f_73: __f_73};
+ }
+ var __v_32 = Wasm.instantiateModuleFromAsm(__f_110.toString());
+ assertEquals(0, __v_32.__f_73());
+})();
+(function __f_33() {
+ function __f_61($__v_23,$__v_25,$__v_24){'use asm';
+ function __f_77() {
+ var __v_28 = 0.0;
+ var __v_23 = 0;
+ __v_28 = 5616315000.000001;
+ __v_23 = ~~__v_28 >>>0;
+ __v_0 = {};
+ return __v_23 | 0;
+ }
+ return { main : __f_77 };
+ }
+ var __v_40 = Wasm.instantiateModuleFromAsm(__f_61.toString());
+ assertEquals(1321347704, __v_2.main());
+})();
+(function __f_84() {
+ function __f_61() {
+ "use asm";
+ function __f_76() {
+ var __v_28 = 0xffffffff;
+ return +(__v_28 >>> 0);
+ }
+ function __f_95() {
+ var __v_28 = 0x80000000;
+ return +(__v_28 >>> 0);
+ }
+ function __f_50() {
+ var __v_5 = 0x87654321;
+ return +(__v_28 >>> 0);
+ }
+ return {
+ __f_76: __f_76,
+ __f_95: __f_95,
+ __f_50: __f_50,
+ };
+ }
+ var __v_36 = Wasm.instantiateModuleFromAsm(__f_61.toString());
+ assertEquals(0xffffffff, __v_36.__f_76());
+ assertEquals(0x80000000, __v_36.__f_95());
+ assertEquals(0x87654321, __v_30.__f_50());
+})();
+} catch(e) { print("Caught: " + e); }
+try {
+var __v_112 = debug.MakeMirror(123).handle();
+assertEquals("number", debug.LookupMirror(__v_112).type());
+debug.ToggleMirrorCache(false);
+var __v_114 = debug.MakeMirror(123).handle();
+gc();
+assertEquals(undefined, __v_114);
+assertThrows(function() { debug.LookupMirror(__v_114) });
+debug.ToggleMirrorCache(true);
+var __v_113 = debug.MakeMirror(123).handle();
+assertEquals("number", debug.LookupMirror(__v_113).type());
+} catch(e) { print("Caught: " + e); }
+try {
+var Debug = debug.Debug;
+var __v_25 = null;
+var __v_113 = true;
+} catch(e) { print("Caught: " + e); }
+function __f_112(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ assertTrue(exec_state.frame(0).sourceLineText().indexOf("BREAK") > 0);
+ } catch (e) {
+ __v_0 = e;
+ }
+}
+function __f_113() {
+ return 1;
+}
+try {
+Debug.setListener(__f_112);
+nop();
+__f_113();
+Debug.setListener(null);
+assertNull(__v_112);
+} catch(e) { print("Caught: " + e); }
diff --git a/deps/v8/test/mjsunit/regress/regress-618657.js b/deps/v8/test/mjsunit/regress/regress-618657.js
new file mode 100644
index 0000000000..170e235014
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-618657.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --ignition --ignition-filter=-foo
+
+function* foo() { yield 42 }
+function* goo() { yield 42 }
+var f = foo();
+var g = goo();
+assertEquals(42, f.next().value);
+assertEquals(42, g.next().value);
+assertEquals(true, f.next().done);
+assertEquals(true, g.next().done);
diff --git a/deps/v8/test/mjsunit/regress/regress-619382.js b/deps/v8/test/mjsunit/regress/regress-619382.js
new file mode 100644
index 0000000000..971318ac97
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-619382.js
@@ -0,0 +1,35 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// // Use of this source code is governed by a BSD-style license that can be
+// // found in the LICENSE file.
+//
+// // Flags: --expose-gc --always-opt
+
+(function __f_9() {
+})();
+function __f_16(ctor_desc) {
+ var __v_22 = 5;
+ var __v_25 = [];
+ gc(); gc(); gc();
+ for (var __v_18 = 0; __v_18 < __v_22; __v_18++) {
+ __v_25[__v_18] = ctor_desc.ctor.apply();
+ }
+}
+var __v_28 = [
+ {
+ ctor: function(__v_27) { return {a: __v_27}; },
+ args: function() { return [1.5 + __v_18]; } },
+ {
+ ctor: function(__v_27) { var __v_21 = []; __v_21[1] = __v_27; __v_21[200000] = __v_27; return __v_21; },
+ args: function() { return [1.5 + __v_18]; } },
+ {
+ ctor: function() {
+ } }
+];
+var __v_26 = [
+ {
+ }];
+ __v_26.forEach(function(__v_16) {
+ __v_28.forEach(function(ctor) {
+ __f_16(ctor);
+ });
+ });
diff --git a/deps/v8/test/mjsunit/regress/regress-620553.js b/deps/v8/test/mjsunit/regress/regress-620553.js
new file mode 100644
index 0000000000..461b9bb189
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-620553.js
@@ -0,0 +1,17 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc
+
+var o0 = [];
+var o1 = [];
+var cnt = 0;
+o1.__defineGetter__(0, function() {
+ if (cnt++ > 2) return;
+ o0.shift();
+ gc();
+ o0.push(0);
+ o0.concat(o1);
+});
+o1[0];
diff --git a/deps/v8/test/mjsunit/regress/regress-620750.js b/deps/v8/test/mjsunit/regress/regress-620750.js
new file mode 100644
index 0000000000..ab8fbd98fc
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-620750.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --es-staging
+
+function push_a_lot(arr) {
+ for (var i = 0; i < 2e4; i++) {
+ arr.push(i);
+ }
+ return arr;
+}
+
+__v_13 = push_a_lot([]);
diff --git a/deps/v8/test/mjsunit/regress/regress-621869.js b/deps/v8/test/mjsunit/regress/regress-621869.js
new file mode 100644
index 0000000000..db34064457
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-621869.js
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc
+
+var o0 = [];
+var o1 = [];
+var cnt = 0;
+var only_scavenge = true;
+o1.__defineGetter__(0, function() {
+ if (cnt++ > 2) return;
+ o0.shift();
+ gc(only_scavenge);
+ o0.push((64));
+ o0.concat(o1);
+});
+o1[0];
diff --git a/deps/v8/test/mjsunit/regress/regress-622663.js b/deps/v8/test/mjsunit/regress/regress-622663.js
new file mode 100644
index 0000000000..9606bd86fa
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-622663.js
@@ -0,0 +1,14 @@
++// Copyright 2016 the V8 project authors. All rights reserved.
++// Use of this source code is governed by a BSD-style license that can be
++// found in the LICENSE file.
++
++// Flags: --no-lazy
+
+(function() {
+ try { (y = [...[]]) => {} } catch(_) {} // will core dump, if not fixed
+})();
+
+(function() {
+ try { ((y = [...[]]) => {})(); } catch(_) {} // will core dump, if not fixed,
+ // even without --no-lazy
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-625121.js b/deps/v8/test/mjsunit/regress/regress-625121.js
new file mode 100644
index 0000000000..27ad0f5faf
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-625121.js
@@ -0,0 +1,16 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function test(f) {
+ f(0);
+ f(NaN);
+ %OptimizeFunctionOnNextCall(f);
+ f(1.0);
+}
+
+test(x => Math.cosh(+x));
+test(x => Math.sinh(+x));
+test(x => Math.tanh(+x));
diff --git a/deps/v8/test/mjsunit/regress/regress-631050.js b/deps/v8/test/mjsunit/regress/regress-631050.js
new file mode 100644
index 0000000000..b31c6a2022
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-631050.js
@@ -0,0 +1,15 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --gc-global --stress-runs=8
+
+function __f_3(x, expected) {
+ var __v_3 = [];
+ __v_3.length = x;
+ __f_3(true, 1);
+}
+
+try {
+ __f_3(2147483648, 2147483648);
+} catch (e) {}
diff --git a/deps/v8/test/mjsunit/regress/regress-632289.js b/deps/v8/test/mjsunit/regress/regress-632289.js
new file mode 100644
index 0000000000..65a22558de
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-632289.js
@@ -0,0 +1,22 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --ignition --turbo-from-bytecode --always-opt --allow-natives-syntax
+
+try {
+} catch(e) {; }
+(function __f_12() {
+})();
+(function __f_6() {
+ function __f_3() {
+ }
+ function __f_4() {
+ try {
+ } catch (e) {
+ }
+ }
+ __f_4();
+ %OptimizeFunctionOnNextCall(__f_4);
+ __f_4();
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-633998.js b/deps/v8/test/mjsunit/regress/regress-633998.js
new file mode 100644
index 0000000000..ff34a0a44e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-633998.js
@@ -0,0 +1,44 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var err_str_1 = "apply was called on , which is a object and not a function";
+var err_str_2 =
+ "apply was called on Error, which is a object and not a function";
+
+var reached = false;
+var error = new Error();
+error.name = error;
+try {
+ Reflect.apply(error);
+ reached = true;
+} catch (e) {
+ assertTrue(e.stack.indexOf(err_str_1) != -1);
+} finally {
+ assertFalse(reached);
+}
+
+reached = false;
+error = new Error();
+error.msg = error;
+try {
+ Reflect.apply(error);
+ reached = true;
+} catch (e) {
+ assertTrue(e.stack.indexOf(err_str_2) != -1);
+} finally {
+ assertFalse(reached);
+}
+
+reached = false;
+error = new Error();
+error.name = error;
+error.msg = error;
+try {
+ Reflect.apply(error);
+ reached = true;
+} catch (e) {
+ assertTrue(e.stack.indexOf(err_str_1) != -1);
+} finally {
+ assertFalse(reached);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-635429.js b/deps/v8/test/mjsunit/regress/regress-635429.js
new file mode 100644
index 0000000000..7fbce0d3cf
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-635429.js
@@ -0,0 +1,12 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --stack-size=150
+
+function foo() {
+ "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "a" + "boom"};
+
+try {
+ foo()
+} catch(e) {}
diff --git a/deps/v8/test/mjsunit/regress/regress-638134.js b/deps/v8/test/mjsunit/regress/regress-638134.js
new file mode 100644
index 0000000000..5391eed148
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-638134.js
@@ -0,0 +1,20 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function foo() {
+ // Generates a forward branch that puts 200 in the constant pool.
+ var i = 0;
+ if (i) {
+ i = 0; i = 0; i = 0; i = 0; i = 0; i = 0; i = 0; i = 0; i = 0; i = 0;
+ i = 0; i = 0; i = 0; i = 0; i = 0; i = 0; i = 0; i = 0; i = 0; i = 0;
+ i = 0; i = 0; i = 0; i = 0; i = 0; i = 0; i = 0; i = 0; i = 0; i = 0;
+ i = 0; i = 0; i = 0; i = 0; i = 0; i = 0; i = 0; i = 0; i = 0; i = 0;
+ i = 0; i = 0; i = 0; i = 0; i = 0; i = 0; i = 0; i = 0; i = 0; i = 0;
+ i = 0; i = 0; i = 0; i = 0; i = 0; i = 0; i = 0; i = 0; i = 0; i = 0;
+ i = 0; i = 0; i = 0; i = 0; i = 0; i = 0;
+ }
+ // Emit a 200 literal which also ends up in the constant pool.
+ var j = 0.2e3;
+}
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-639270.js b/deps/v8/test/mjsunit/regress/regress-639270.js
new file mode 100644
index 0000000000..0924650de7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-639270.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --es-staging --ignition-staging --turbo
+
+"use strict";
+
+var g = (async () => { return JSON.stringify() });
+
+g();
+g();
+%OptimizeFunctionOnNextCall(g);
+g();
diff --git a/deps/v8/test/mjsunit/regress/regress-674753.js b/deps/v8/test/mjsunit/regress/regress-674753.js
index b3704ea96a..d8a504a695 100644
--- a/deps/v8/test/mjsunit/regress/regress-674753.js
+++ b/deps/v8/test/mjsunit/regress/regress-674753.js
@@ -25,62 +25,138 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --allow-natives-syntax
+
+var undetectable = %GetUndetectable();
+
// Number
assertTrue(typeof 0 == 'number');
assertTrue(typeof 0 === 'number');
+assertFalse(typeof 0 != 'number');
+assertFalse(typeof 0 !== 'number');
assertTrue(typeof 1.2 == 'number');
assertTrue(typeof 1.2 === 'number');
+assertFalse(typeof 1.2 != 'number');
+assertFalse(typeof 1.2 !== 'number');
+assertTrue(typeof 'x' != 'number');
+assertTrue(typeof 'x' !== 'number');
assertFalse(typeof 'x' == 'number');
assertFalse(typeof 'x' === 'number');
+assertTrue(typeof Object() != 'number');
+assertTrue(typeof Object() !== 'number');
+assertFalse(typeof Object() == 'number');
+assertFalse(typeof Object() === 'number');
// String
assertTrue(typeof 'x' == 'string');
assertTrue(typeof 'x' === 'string');
+assertFalse(typeof 'x' != 'string');
+assertFalse(typeof 'x' !== 'string');
assertTrue(typeof ('x' + 'x') == 'string');
assertTrue(typeof ('x' + 'x') === 'string');
+assertFalse(typeof ('x' + 'x') != 'string');
+assertFalse(typeof ('x' + 'x') !== 'string');
+assertTrue(typeof 1 != 'string');
+assertTrue(typeof 1 !== 'string');
assertFalse(typeof 1 == 'string');
assertFalse(typeof 1 === 'string');
+assertTrue(typeof Object() != 'string');
+assertTrue(typeof Object() !== 'string');
assertFalse(typeof Object() == 'string');
assertFalse(typeof Object() === 'string');
// Boolean
assertTrue(typeof true == 'boolean');
assertTrue(typeof true === 'boolean');
+assertFalse(typeof true != 'boolean');
+assertFalse(typeof true !== 'boolean');
assertTrue(typeof false == 'boolean');
assertTrue(typeof false === 'boolean');
+assertFalse(typeof false != 'boolean');
+assertFalse(typeof false !== 'boolean');
+assertTrue(typeof 1 != 'boolean');
+assertTrue(typeof 1 !== 'boolean');
assertFalse(typeof 1 == 'boolean');
assertFalse(typeof 1 === 'boolean');
+assertTrue(typeof 'x' != 'boolean');
+assertTrue(typeof 'x' !== 'boolean');
+assertFalse(typeof 'x' == 'boolean');
+assertFalse(typeof 'x' === 'boolean');
+assertTrue(typeof Object() != 'boolean');
+assertTrue(typeof Object() !== 'boolean');
assertFalse(typeof Object() == 'boolean');
assertFalse(typeof Object() === 'boolean');
// Undefined
assertTrue(typeof void 0 == 'undefined');
assertTrue(typeof void 0 === 'undefined');
+assertFalse(typeof void 0 != 'undefined');
+assertFalse(typeof void 0 !== 'undefined');
+assertTrue(typeof 1 != 'undefined');
+assertTrue(typeof 1 !== 'undefined');
assertFalse(typeof 1 == 'undefined');
assertFalse(typeof 1 === 'undefined');
+assertTrue(typeof null != 'undefined');
+assertTrue(typeof null !== 'undefined');
+assertFalse(typeof null == 'undefined');
+assertFalse(typeof null === 'undefined');
+assertTrue(typeof Object() != 'undefined');
+assertTrue(typeof Object() !== 'undefined');
assertFalse(typeof Object() == 'undefined');
assertFalse(typeof Object() === 'undefined');
+assertTrue(typeof undetectable == 'undefined');
+assertTrue(typeof undetectable === 'undefined');
+assertFalse(typeof undetectable != 'undefined');
+assertFalse(typeof undetectable !== 'undefined');
// Function
assertTrue(typeof Object == 'function');
assertTrue(typeof Object === 'function');
+assertFalse(typeof Object != 'function');
+assertFalse(typeof Object !== 'function');
+assertTrue(typeof 1 != 'function');
+assertTrue(typeof 1 !== 'function');
assertFalse(typeof 1 == 'function');
assertFalse(typeof 1 === 'function');
+assertTrue(typeof Object() != 'function');
+assertTrue(typeof Object() !== 'function');
assertFalse(typeof Object() == 'function');
assertFalse(typeof Object() === 'function');
+assertTrue(typeof undetectable != 'function');
+assertTrue(typeof undetectable !== 'function');
+assertFalse(typeof undetectable == 'function');
+assertFalse(typeof undetectable === 'function');
// Object
assertTrue(typeof Object() == 'object');
assertTrue(typeof Object() === 'object');
+assertFalse(typeof Object() != 'object');
+assertFalse(typeof Object() !== 'object');
assertTrue(typeof new String('x') == 'object');
assertTrue(typeof new String('x') === 'object');
+assertFalse(typeof new String('x') != 'object');
+assertFalse(typeof new String('x') !== 'object');
assertTrue(typeof ['x'] == 'object');
assertTrue(typeof ['x'] === 'object');
+assertFalse(typeof ['x'] != 'object');
+assertFalse(typeof ['x'] !== 'object');
assertTrue(typeof null == 'object');
assertTrue(typeof null === 'object');
+assertFalse(typeof null != 'object');
+assertFalse(typeof null !== 'object');
+assertTrue(typeof 1 != 'object');
+assertTrue(typeof 1 !== 'object');
assertFalse(typeof 1 == 'object');
assertFalse(typeof 1 === 'object');
+assertTrue(typeof 'x' != 'object');
+assertTrue(typeof 'x' !== 'object');
assertFalse(typeof 'x' == 'object'); // bug #674753
assertFalse(typeof 'x' === 'object');
+assertTrue(typeof Object != 'object');
+assertTrue(typeof Object !== 'object');
assertFalse(typeof Object == 'object');
assertFalse(typeof Object === 'object');
+assertTrue(typeof undetectable != 'object');
+assertTrue(typeof undetectable !== 'object');
+assertFalse(typeof undetectable == 'object');
+assertFalse(typeof undetectable === 'object');
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-119800.js b/deps/v8/test/mjsunit/regress/regress-crbug-119800.js
index 3946fbb71d..85f28a7bc8 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-119800.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-119800.js
@@ -5,9 +5,9 @@
// Flags: --expose-debug-as debug
function f() {
- 1;
- 2;
- 3;
+ print(1);
+ print(2);
+ print(3);
}
var Debug = debug.Debug;
@@ -34,4 +34,5 @@ Debug.setListener(null);
Debug.debuggerFlags().breakPointsActive.setValue(true);
assertNull(exception);
-assertEquals(breaks, ["1;", "2;", "3;", "}", "Debug.setListener(null);"]);
+assertEquals(breaks, ["print(1);", "print(2);", "print(3);", "}",
+ "Debug.setListener(null);"]);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-320922.js b/deps/v8/test/mjsunit/regress/regress-crbug-320922.js
deleted file mode 100644
index f19962843a..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-crbug-320922.js
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax
-
-var string = "internalized dummy";
-var expected = "internalized dummy";
-string = "hello world";
-expected = "Hello " + "world";
-function Capitalize() {
- %_OneByteSeqStringSetChar(0, 0x48, string);
-}
-Capitalize();
-assertEquals(expected, string);
-Capitalize();
-assertEquals(expected, string);
-
-var twobyte = "\u20ACello world";
-
-function TwoByteCapitalize() {
- %_TwoByteSeqStringSetChar(0, 0x48, twobyte);
-}
-TwoByteCapitalize();
-assertEquals(expected, twobyte);
-TwoByteCapitalize();
-assertEquals(expected, twobyte);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-401915.js b/deps/v8/test/mjsunit/regress/regress-crbug-401915.js
index 67ea19158e..96dce04868 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-401915.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-401915.js
@@ -10,7 +10,7 @@ Debug.setBreakOnException();
try {
try {
- %DebugPushPromise(new Promise(function() {}), function() {});
+ %DebugPushPromise(new Promise(function() {}));
} catch (e) {
}
throw new Error();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-451770.js b/deps/v8/test/mjsunit/regress/regress-crbug-451770.js
index 770c8073cf..b4f088d00e 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-451770.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-451770.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-sloppy
-
assertThrows(function f() {
var t = { toString: function() { throw new Error(); } };
var o = { [t]: 23 };
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-480807.js b/deps/v8/test/mjsunit/regress/regress-crbug-480807.js
index c273f20a78..a1448d6de6 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-480807.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-480807.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --use-osr --turbo-osr --noalways-opt
+// Flags: --allow-natives-syntax --use-osr --noalways-opt
function foo() {
var c = 0;
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-492526.js b/deps/v8/test/mjsunit/regress/regress-crbug-492526.js
deleted file mode 100644
index e8ea298f8b..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-crbug-492526.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax
-
-assertThrows(function() { %FormatMessageString(-1, "", "", ""); });
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-495493.js b/deps/v8/test/mjsunit/regress/regress-crbug-495493.js
new file mode 100644
index 0000000000..3dba236c37
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-495493.js
@@ -0,0 +1,12 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --nofold-constants --enable-slow-asserts --debug-code
+
+function foo(p) {
+ for (var i = 0; i < 100000; ++i) {
+ p = Math.min(-1, 0);
+ }
+}
+foo(0);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-498142.js b/deps/v8/test/mjsunit/regress/regress-crbug-498142.js
deleted file mode 100644
index fcec5d1bd7..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-crbug-498142.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --harmony-sharedarraybuffer
-
-var sab = new SharedArrayBuffer(16);
-assertThrows(function() { %ArrayBufferNeuter(sab); });
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-500497.js b/deps/v8/test/mjsunit/regress/regress-crbug-500497.js
index 9117440c2c..356e4e6942 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-500497.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-500497.js
@@ -4,6 +4,7 @@
// New space must be at max capacity to trigger pretenuring decision.
// Flags: --allow-natives-syntax --verify-heap --max-semi-space-size=1
+// Flags: --expose-gc
var global = []; // Used to keep some objects alive.
@@ -12,6 +13,8 @@ function Ctor() {
return result;
}
+gc();
+
for (var i = 0; i < 120; i++) {
// Make the "a" property long-lived, while everything else is short-lived.
global.push(Ctor().a);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-515897.js b/deps/v8/test/mjsunit/regress/regress-crbug-515897.js
new file mode 100644
index 0000000000..45a812c781
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-515897.js
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var r1 = new RegExp("\\\\/");
+assertTrue(r1.test("\\/"));
+var r2 = eval("/" + r1.source + "/");
+assertEquals("\\\\\\/", r1.source);
+assertEquals("\\\\\\/", r2.source);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-570651.js b/deps/v8/test/mjsunit/regress/regress-crbug-570651.js
deleted file mode 100644
index 9860b428b1..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-crbug-570651.js
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-Error.prepareStackTrace = (e,s) => s;
-var __v_3 = Error().stack[0].constructor;
-var __v_4 = {};
-function __f_3() {}
-var __v_5 = __v_3.call(null, __v_4, __f_3, {valueOf() { return 1611877293 }});
- __v_5.getColumnNumber();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-571517.js b/deps/v8/test/mjsunit/regress/regress-crbug-571517.js
index 03bf76cb5e..ca7d7f73ba 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-571517.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-571517.js
@@ -11,7 +11,10 @@ function f(a) {
var rec = new Receiver();
-var proto = rec.__proto__.__proto__;
+// Formerly, this mutated rec.__proto__.__proto__, but
+// the global object prototype chain is now immutable;
+// not sure if this test now hits the original hazard case.
+var proto = rec.__proto__;
// Initialize prototype chain dependent IC (nonexistent load).
assertEquals(undefined, f(rec));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-582048.js b/deps/v8/test/mjsunit/regress/regress-crbug-582048.js
new file mode 100644
index 0000000000..6d98f488e3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-582048.js
@@ -0,0 +1,31 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+var Debug = debug.Debug;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ var frame_count = exec_state.frameCount();
+ for (var i = 0; i < frame_count; i++) {
+ var frame = exec_state.frame(i);
+ var scope_count = frame.scopeCount();
+ for (var j = 0; j < scope_count; j++) {
+ var scope = frame.scope(j);
+ assertTrue(scope.scopeObject().property('').isUndefined());
+ }
+ }
+ } catch (e) {
+ print(e, e.stack);
+ exception = e;
+ }
+}
+
+Debug.setListener(listener);
+
+(function(a = 1) { debugger; })();
+
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-599067.js b/deps/v8/test/mjsunit/regress/regress-crbug-599067.js
index bc10aa44c2..de3c99af03 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-599067.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-599067.js
@@ -7,5 +7,5 @@ try {
var p = new Proxy({}, o);
Error.captureStackTrace(p);
} catch(e) {
- assertEquals("Cannot pass private property name to proxy trap", e.message);
+ assertEquals("invalid_argument", e.message);
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-600257.js b/deps/v8/test/mjsunit/regress/regress-crbug-600257.js
new file mode 100644
index 0000000000..87bd2e39af
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-600257.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --stack-size=100
+
+(function rec() {
+ try {
+ rec();
+ } catch (e) {
+ /{/;
+ }
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-600995.js b/deps/v8/test/mjsunit/regress/regress-crbug-600995.js
deleted file mode 100644
index c532608799..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-crbug-600995.js
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --noharmony-iterator-close
-
-// The {Set} function will produce a different type feedback vector layout
-// depending on whether Harmony iterator finalization is enabled or not.
-
-new Set();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-602184.js b/deps/v8/test/mjsunit/regress/regress-crbug-602184.js
new file mode 100644
index 0000000000..c7d793bb0e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-602184.js
@@ -0,0 +1,17 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f(test, a) {
+ var v;
+ if (test) {
+ v = v|0;
+ }
+ a[v] = 1;
+}
+var v = new String();
+f(false, v);
+f(false, v);
+
+v = new Int32Array(10);
+f(true, v);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-602595.js b/deps/v8/test/mjsunit/regress/regress-crbug-602595.js
new file mode 100644
index 0000000000..7f6d478e05
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-602595.js
@@ -0,0 +1,12 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo-escape
+
+function f(a) { return [a] }
+
+assertEquals([23], f(23));
+assertEquals([42], f(42));
+%OptimizeFunctionOnNextCall(f);
+assertEquals([65], f(65));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-603463.js b/deps/v8/test/mjsunit/regress/regress-crbug-603463.js
new file mode 100644
index 0000000000..20bfae65c5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-603463.js
@@ -0,0 +1,16 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function load(a, i) {
+ return a[i];
+}
+
+function f() {
+ return load(new Proxy({}, {}), undefined);
+}
+
+f();
+f();
+load([11, 22, 33], 0);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-604299.js b/deps/v8/test/mjsunit/regress/regress-crbug-604299.js
new file mode 100644
index 0000000000..9908f2df4d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-604299.js
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Array.prototype.__defineSetter__(0,function(value){});
+
+if (this.Intl) {
+ var o = new Intl.DateTimeFormat('en-US', {'timeZone': 'Asia/Katmandu'})
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-605060.js b/deps/v8/test/mjsunit/regress/regress-crbug-605060.js
new file mode 100644
index 0000000000..d2dc79a310
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-605060.js
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+Array.prototype.__defineGetter__('map', function(){});
+Array.prototype.__defineGetter__('map', function(){});
+Array.prototype.__defineGetter__('map', function(){});
+assertTrue(%HasFastProperties(Array.prototype));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-605581.js b/deps/v8/test/mjsunit/regress/regress-crbug-605581.js
new file mode 100644
index 0000000000..0f1daabead
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-605581.js
@@ -0,0 +1,28 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+var Debug = debug.Debug;
+var exception = null;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ assertThrows(() => exec_state.frame(0).evaluate("bar.baz"), ReferenceError);
+ } catch (e) {
+ exception = e;
+ }
+}
+
+Debug.setListener(listener);
+
+(function() {
+ debugger; // bar is still in TDZ at this point.
+ let bar = 1;
+ (x => bar); // force bar to be context-allocated.
+})();
+
+Debug.setListener(null);
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-605862.js b/deps/v8/test/mjsunit/regress/regress-crbug-605862.js
new file mode 100644
index 0000000000..82a5d454ec
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-605862.js
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/[]*1/u.exec("\u1234");
+/[^\u0000-\u{10ffff}]*1/u.exec("\u1234");
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-608279.js b/deps/v8/test/mjsunit/regress/regress-crbug-608279.js
new file mode 100644
index 0000000000..22c69f252d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-608279.js
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --always-opt --no-lazy
+
+function __f_38() {
+ try {
+ throw 0;
+ } catch (e) {
+ eval();
+ var __v_38 = { a: 'hest' };
+ __v_38.m = function () { return __v_38.a; };
+ }
+ return __v_38;
+}
+var __v_40 = __f_38();
+__v_40.m();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-609029.js b/deps/v8/test/mjsunit/regress/regress-crbug-609029.js
new file mode 100644
index 0000000000..bd77de28a9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-609029.js
@@ -0,0 +1,7 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --always-opt --function-context-specialization --gc-interval=14
+// Flags: --turbo-filter=match --verify-heap
+"xxx".match();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-609046.js b/deps/v8/test/mjsunit/regress/regress-crbug-609046.js
new file mode 100644
index 0000000000..10b63af3e3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-609046.js
@@ -0,0 +1,36 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+// Test that hidden scopes are correctly walked in the scope chain.
+
+var Debug = debug.Debug;
+var exception = null;
+var delegate = null;
+var done = false;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ assertEquals([ debug.ScopeType.Block,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global ],
+ exec_state.frame(0).allScopes().map(s => s.scopeType()));
+ done = true;
+ } catch (e) {
+ exception = e;
+ }
+}
+
+Debug.setListener(listener);
+
+for(let a = 0; a < 3; a++) {
+ debugger;
+ eval(); // Force context-allocation of everything.
+}
+
+Debug.setListener(null);
+assertNull(exception);
+assertTrue(done);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-610207.js b/deps/v8/test/mjsunit/regress/regress-crbug-610207.js
new file mode 100644
index 0000000000..4396a56a77
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-610207.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Error.prepareStackTrace = function(exception, frames) {
+ return frames[0].getEvalOrigin();
+}
+
+try {
+ Realm.eval(0, "throw new Error('boom');");
+} catch(e) {
+ print(e.stack);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-612109.js b/deps/v8/test/mjsunit/regress/regress-crbug-612109.js
new file mode 100644
index 0000000000..202bd96c77
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-612109.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+s = "string for triggering osr in __f_0";
+for (var i = 0; i < 16; i++) s = s + s;
+decodeURI(encodeURI(s));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-612142.js b/deps/v8/test/mjsunit/regress/regress-crbug-612142.js
new file mode 100644
index 0000000000..de2dc8d04a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-612142.js
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var thrower = {[Symbol.toPrimitive]: function(e) { throw e }};
+try {
+ for (var i = 0; i < 10; i++) { }
+ for (var i = 0.5; i < 100000; ++i) { }
+ for (var i = 16 | 0 || 0 || this || 1; i;) { String.fromCharCode(thrower); }
+} catch (e) { }
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-613494.js b/deps/v8/test/mjsunit/regress/regress-crbug-613494.js
new file mode 100644
index 0000000000..6fcc1e94f4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-613494.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo-escape --noanalyze-environment-liveness
+
+function f() {
+ var bound = 0;
+ function g() { return bound }
+}
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-613570.js b/deps/v8/test/mjsunit/regress/regress-crbug-613570.js
new file mode 100644
index 0000000000..3cd9857761
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-613570.js
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertEquals("[\n\u26031,\n\u26032\n]",
+ JSON.stringify([1, 2], null, "\u2603"));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-613905.js b/deps/v8/test/mjsunit/regress/regress-crbug-613905.js
new file mode 100644
index 0000000000..8bb38c9b9d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-613905.js
@@ -0,0 +1,11 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Error.prepareStackTrace = (e,s) => s;
+var CallSiteConstructor = Error().stack[0].constructor;
+
+try {
+ (new CallSiteConstructor(3, 6)).toString();
+} catch (e) {
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-613919.js b/deps/v8/test/mjsunit/regress/regress-crbug-613919.js
new file mode 100644
index 0000000000..cbd3e43b96
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-613919.js
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo-escape
+
+function g(a) {
+ if (a) return arguments;
+ %DeoptimizeNow();
+ return 23;
+}
+function f() {
+ return g(false);
+}
+assertEquals(23, f());
+assertEquals(23, f());
+%OptimizeFunctionOnNextCall(f);
+assertEquals(23, f());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-614292.js b/deps/v8/test/mjsunit/regress/regress-crbug-614292.js
new file mode 100644
index 0000000000..3a67c17f60
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-614292.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() {
+ return [] | 0 && values[0] || false;
+}
+
+%OptimizeFunctionOnNextCall(foo);
+try {
+ foo();
+} catch (e) {}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-614644.js b/deps/v8/test/mjsunit/regress/regress-crbug-614644.js
new file mode 100644
index 0000000000..d219cd3b92
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-614644.js
@@ -0,0 +1,15 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(a, x) {
+ a.shift(2, a.length = 2);
+ a[0] = x;
+}
+
+f([ ], 1.1);
+f([1], 1.1);
+%OptimizeFunctionOnNextCall(f);
+f([1], 1.1);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-614727.js b/deps/v8/test/mjsunit/regress/regress-crbug-614727.js
new file mode 100644
index 0000000000..0845afc5ac
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-614727.js
@@ -0,0 +1,23 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+
+function f(a, b, c) { return arguments }
+function g(...args) { return args }
+
+// On 64-bit machine this produces a 768K array which is sufficiently small to
+// not cause a stack overflow, but big enough to move the allocated arguments
+// object into large object space (kMaxRegularHeapObjectSize == 600K).
+var length = Math.pow(2, 15) * 3;
+var args = new Array(length);
+assertEquals(length, f.apply(null, args).length);
+assertEquals(length, g.apply(null, args).length);
+
+// On 32-bit machines this produces an equally sized array, however it might in
+// turn trigger a stack overflow on 64-bit machines, which we need to catch.
+var length = Math.pow(2, 16) * 3;
+var args = new Array(length);
+try { f.apply(null, args) } catch(e) {}
+try { g.apply(null, args) } catch(e) {}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-615774.js b/deps/v8/test/mjsunit/regress/regress-crbug-615774.js
new file mode 100644
index 0000000000..ea5e67513e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-615774.js
@@ -0,0 +1,11 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Error.prepareStackTrace = (e,s) => s;
+var CallSiteConstructor = Error().stack[0].constructor;
+
+try {
+ (new CallSiteConstructor(CallSiteConstructor, 6)).toString();
+} catch (e) {
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-616709-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-616709-1.js
new file mode 100644
index 0000000000..75abe3c2e1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-616709-1.js
@@ -0,0 +1,21 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Make the Object prototype have dictionary properties.
+for (var i = 0; i < 2000; i++) {
+ Object.prototype['X'+i] = true;
+}
+
+function boom(a1) {
+ return a1[0];
+}
+
+var a = new Array(1);
+a[0] = 0.1;
+boom(a);
+boom(a);
+%OptimizeFunctionOnNextCall(boom);
+boom(a);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-616709-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-616709-2.js
new file mode 100644
index 0000000000..27e5d2d9da
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-616709-2.js
@@ -0,0 +1,21 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Make the Array prototype have dictionary properties.
+for (var i = 0; i < 2000; i++) {
+ Array.prototype['X'+i] = true;
+}
+
+function boom(a1) {
+ return a1[0];
+}
+
+var a = new Array(1);
+a[0] = 0.1;
+boom(a);
+boom(a);
+%OptimizeFunctionOnNextCall(boom);
+boom(a);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-617527.js b/deps/v8/test/mjsunit/regress/regress-crbug-617527.js
new file mode 100644
index 0000000000..cf4662871c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-617527.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --enable-slow-asserts
+
+Object.defineProperty(Array.prototype, "1", { get: toLocaleString });
+assertThrows(_ => new RegExp(0, 0));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-617567.js b/deps/v8/test/mjsunit/regress/regress-crbug-617567.js
new file mode 100644
index 0000000000..f0c696e14b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-617567.js
@@ -0,0 +1,24 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --turbo-filter=* --allow-natives-syntax
+
+var v1 = {};
+function g() {
+ v1 = [];
+ for (var i = 0; i < 1; i++) {
+ v1[i]();
+ }
+}
+
+var v2 = {};
+var v3 = {};
+function f() {
+ v3 = v2;
+ g();
+}
+
+assertThrows(g);
+%OptimizeFunctionOnNextCall(f);
+assertThrows(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-618788.js b/deps/v8/test/mjsunit/regress/regress-crbug-618788.js
new file mode 100644
index 0000000000..a104d8d39e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-618788.js
@@ -0,0 +1,21 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Slice and splice both try to set the length property of their return
+// value. Add a bogus setter to allow that.
+Object.defineProperty(Int32Array.prototype, 'length', { set(v) { } });
+
+(function testSlice() {
+ var a = new Array();
+ a.constructor = Int32Array;
+ a.length = 1000; // Make the length >= 1000 so UseSparseVariant returns true.
+ assertTrue(a.slice() instanceof Int32Array);
+})();
+
+(function testSplice() {
+ var a = new Array();
+ a.constructor = Int32Array;
+ a.length = 1000; // Make the length >= 1000 so UseSparseVariant returns true.
+ assertTrue(a.splice(1) instanceof Int32Array);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-618845.js b/deps/v8/test/mjsunit/regress/regress-crbug-618845.js
new file mode 100644
index 0000000000..ea3baba0bb
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-618845.js
@@ -0,0 +1,16 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function Foo() {}
+Object.defineProperty(Foo.prototype, "name",
+ {get: function() { return "FooName"; }});
+
+function ic(f) {
+ return f.prototype.name;
+}
+
+assertEquals("FooName", ic(Foo));
+assertEquals("FooName", ic(Foo)); // Don't crash, don't time out.
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-620119.js b/deps/v8/test/mjsunit/regress/regress-crbug-620119.js
new file mode 100644
index 0000000000..cbe5a78713
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-620119.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-lazy
+
+assertEquals(0, ((x, {[(x = function() { y = 0 }, "foo")]: y = eval(1)}) => { x(); return y })(42, {}));
+assertEquals(0, (function (x, {[(x = function() { y = 0 }, "foo")]: y = eval(1)}) { x(); return y })(42, {}));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-620253.js b/deps/v8/test/mjsunit/regress/regress-crbug-620253.js
new file mode 100644
index 0000000000..811a4e7715
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-620253.js
@@ -0,0 +1,7 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --throws
+
+load("test/mjsunit/regress/regress-crbug-620253.js");
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-620650.js b/deps/v8/test/mjsunit/regress/regress-crbug-620650.js
new file mode 100644
index 0000000000..25a92cab20
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-620650.js
@@ -0,0 +1,16 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function() {
+ function f(src, dst, i) {
+ dst[i] = src[i];
+ }
+ var buf = new ArrayBuffer(16);
+ var view_int32 = new Int32Array(buf);
+ view_int32[1] = 0xFFF7FFFF;
+ var view_f64 = new Float64Array(buf);
+ var arr = [,0.1];
+ f(view_f64, arr, -1);
+ f(view_f64, arr, 0);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-621361.js b/deps/v8/test/mjsunit/regress/regress-crbug-621361.js
new file mode 100644
index 0000000000..f9496ae87d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-621361.js
@@ -0,0 +1,40 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+var Debug = debug.Debug;
+var steps = 0;
+var exception = null;
+
+function listener(event, execState, eventData, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ assertEquals([ debug.ScopeType.Local,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global],
+ execState.frame().allScopes().map(s => s.scopeType()));
+ var x_value = execState.frame().evaluate("x").value();
+ if (steps < 2) {
+ assertEquals(undefined, x_value);
+ execState.prepareStep(Debug.StepAction.StepIn);
+ } else {
+ assertEquals("l => l", x_value.toString());
+ }
+ steps++;
+ } catch (e) {
+ exception = e;
+ }
+}
+
+Debug.setListener(listener);
+
+(function() {
+ debugger;
+ var x = l => l;
+})();
+
+Debug.setListener(null);
+assertNull(exception);
+assertEquals(3, steps);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-621611.js b/deps/v8/test/mjsunit/regress/regress-crbug-621611.js
new file mode 100644
index 0000000000..bf9a4605cd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-621611.js
@@ -0,0 +1,11 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertEquals(Math.E, Math.exp(1));
+assertEquals(Math.LN10, Math.log(10));
+assertEquals(Math.LN2, Math.log(2));
+assertEquals(Math.LOG10E, Math.log10(Math.E));
+assertEquals(Math.LOG2E, Math.log2(Math.E));
+assertEquals(Math.SQRT1_2, Math.sqrt(0.5));
+assertEquals(Math.SQRT2, Math.sqrt(2));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-621816.js b/deps/v8/test/mjsunit/regress/regress-crbug-621816.js
new file mode 100644
index 0000000000..ca7f5ac6df
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-621816.js
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo
+
+function f() {
+ var o = {};
+ o.a = 1;
+}
+function g() {
+ var o = { ['a']: function(){} };
+ f();
+}
+f();
+f();
+%OptimizeFunctionOnNextCall(g);
+g();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-624747.js b/deps/v8/test/mjsunit/regress/regress-crbug-624747.js
new file mode 100644
index 0000000000..7927263f8e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-624747.js
@@ -0,0 +1,22 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --es-staging
+
+"use strict";
+
+function bar() {
+ try {
+ unref;
+ } catch (e) {
+ return (1 instanceof TypeError) && unref(); // Call in tail position!
+ }
+}
+
+function foo() {
+ return bar(); // Call in tail position!
+}
+
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-624919.js b/deps/v8/test/mjsunit/regress/regress-crbug-624919.js
new file mode 100644
index 0000000000..5a2b100daf
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-624919.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(a, b, c, d, e) {
+ if (a && (b, c ? d() : e())) return 0;
+}
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-625547.js b/deps/v8/test/mjsunit/regress/regress-crbug-625547.js
new file mode 100644
index 0000000000..20eb85db5e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-625547.js
@@ -0,0 +1,21 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-gc
+
+var v1 = {};
+v1 = 0;
+var v2 = {};
+v2 = 0;
+gc();
+
+var minus_zero = {z:-0.0}.z;
+var nan = undefined + 1;
+function f() {
+ v1 = minus_zero;
+ v2 = nan;
+};
+%OptimizeFunctionOnNextCall(f);
+f();
+gc(); // Boom!
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-625590.js b/deps/v8/test/mjsunit/regress/regress-crbug-625590.js
new file mode 100644
index 0000000000..aa9ff8a5df
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-625590.js
@@ -0,0 +1,12 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var obj = {};
+function f() {}
+f.prototype = {
+ mSloppy() {
+ super[obj] = 15;
+ }
+};
+new f().mSloppy();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-626715.js b/deps/v8/test/mjsunit/regress/regress-crbug-626715.js
new file mode 100644
index 0000000000..e842fa61c7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-626715.js
@@ -0,0 +1,28 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Create a prototype object which has a lot of fast properties.
+var body = "";
+for (var i = 0; i < 100; i++) {
+ body += `this.a${i} = 0;\n`;
+}
+var Proto = new Function(body);
+
+function A() {}
+A.prototype = new Proto();
+
+// Create a object and add properties that already exist in the prototype.
+// At some point the object will turn into a dictionary mode and one of
+// the fast details from the prototype will be reinterpreted as a details
+// for a new property ...
+var o = new A();
+for (var i = 0; i < 100; i++) {
+ o["a" + i] = i;
+}
+
+// ... which will break the enumeration order of the slow properties.
+var names = Object.getOwnPropertyNames(o);
+for (var i = 0; i < 100; i++) {
+ assertEquals("a" + i, names[i]);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-627828.js b/deps/v8/test/mjsunit/regress/regress-crbug-627828.js
new file mode 100644
index 0000000000..75ff77cb64
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-627828.js
@@ -0,0 +1,40 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function TestDeoptFromCopmputedNameInObjectLiteral() {
+ function f() {
+ var o = {
+ toString: function() {
+ %DeoptimizeFunction(f);
+ return "x";
+ }
+ };
+ return { [o]() { return 23 } };
+ }
+ assertEquals(23, f().x());
+ assertEquals(23, f().x());
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(23, f().x());
+})();
+
+(function TestDeoptFromCopmputedNameInClassLiteral() {
+ function g() {
+ var o = {
+ toString: function() {
+ %DeoptimizeFunction(g);
+ return "y";
+ }
+ };
+ class C {
+ [o]() { return 42 };
+ }
+ return new C();
+ }
+ assertEquals(42, g().y());
+ assertEquals(42, g().y());
+ %OptimizeFunctionOnNextCall(g);
+ assertEquals(42, g().y());
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-627934.js b/deps/v8/test/mjsunit/regress/regress-crbug-627934.js
new file mode 100644
index 0000000000..242dc4a78e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-627934.js
@@ -0,0 +1,12 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var x = "1".repeat(32 * 1024 * 1024);
+for (var z = x;;) {
+ try {
+ z += {toString: function() { return x; }};
+ } catch (e) {
+ break;
+ }
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-627935.js b/deps/v8/test/mjsunit/regress/regress-crbug-627935.js
new file mode 100644
index 0000000000..fdc4d2acde
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-627935.js
@@ -0,0 +1,12 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+if (this.Intl) {
+ assertThrows("Intl.DateTimeFormat('en-US', {timeZone: 0})", RangeError);
+ assertThrows("Intl.DateTimeFormat('en-US', {timeZone: true})", RangeError);
+ assertThrows("Intl.DateTimeFormat('en-US', {timeZone: null})", RangeError);
+
+ var object = { toString: function() { return "UTC" } };
+ assertDoesNotThrow("Intl.DateTimeFormat('en-US', {timeZone: object})");
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-628573.js b/deps/v8/test/mjsunit/regress/regress-crbug-628573.js
new file mode 100644
index 0000000000..5ba184d9ab
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-628573.js
@@ -0,0 +1,17 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var z = {valueOf: function() { return 3; }};
+
+(function() {
+ try {
+ var tmp = { x: 12 };
+ with (tmp) {
+ z++;
+ }
+ throw new Error("boom");
+ } catch(e) {}
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-629062.js b/deps/v8/test/mjsunit/regress/regress-crbug-629062.js
new file mode 100644
index 0000000000..228ae6d2d5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-629062.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(x) {
+ return 1 + ((1 == 0) && undefined);
+}
+
+foo(false);
+foo(false);
+%OptimizeFunctionOnNextCall(foo);
+foo(true);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-629435.js b/deps/v8/test/mjsunit/regress/regress-crbug-629435.js
new file mode 100644
index 0000000000..b73f601c71
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-629435.js
@@ -0,0 +1,19 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function bar(v) {
+ v.constructor;
+}
+
+bar([]);
+bar([]);
+
+function foo() {
+ var x = -0;
+ bar(x + 1);
+}
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-629823.js b/deps/v8/test/mjsunit/regress/regress-crbug-629823.js
new file mode 100644
index 0000000000..bbf74b80af
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-629823.js
@@ -0,0 +1,17 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var o = {}
+function bar() {
+ o[0] = +o[0];
+ o = /\u23a1|__v_4/;
+}
+bar();
+bar();
+bar();
+function foo() { bar(); }
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-629996.js b/deps/v8/test/mjsunit/regress/regress-crbug-629996.js
new file mode 100644
index 0000000000..025a86ee72
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-629996.js
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// // Flags: --expose-debug-as debug
+
+var mirror = debug.MakeMirror(new Proxy({}, {}));
+// As long as we have no special mirror for proxies, we use an object mirror.
+assertEquals("object", mirror.type());
diff --git a/deps/v8/test/mjsunit/regress/regress-449070.js b/deps/v8/test/mjsunit/regress/regress-crbug-630559.js
index 7a0f0a838c..f9623ed879 100644
--- a/deps/v8/test/mjsunit/regress/regress-449070.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-630559.js
@@ -1,10 +1,7 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
+// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
+
// Flags: --allow-natives-syntax
-try {
- %NormalizeElements(this);
-} catch(e) {
-}
+assertThrows("try{}%");
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-630561.js b/deps/v8/test/mjsunit/regress/regress-crbug-630561.js
new file mode 100644
index 0000000000..798f33d658
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-630561.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --gc-interval=30
+
+var dict_elements = {};
+
+for (var i= 0; i< 100; i++) {
+ dict_elements[2147483648 + i] = i;
+}
+
+var keys = Object.keys(dict_elements);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-630923.js b/deps/v8/test/mjsunit/regress/regress-crbug-630923.js
new file mode 100644
index 0000000000..ff0d2dd05e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-630923.js
@@ -0,0 +1,16 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var o = {};
+function bar(o) {
+ return 1 + (o.t ? 1 : 2);
+}
+function foo() {
+ bar(o);
+}
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-630951.js b/deps/v8/test/mjsunit/regress/regress-crbug-630951.js
new file mode 100644
index 0000000000..58af024d3a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-630951.js
@@ -0,0 +1,12 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() {
+ "use asm";
+ var o = new Int32Array(64 * 1024);
+ return () => { o[i1 >> 2] | 0; }
+}
+assertThrows(foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-630952.js b/deps/v8/test/mjsunit/regress/regress-crbug-630952.js
new file mode 100644
index 0000000000..42d30a3ec1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-630952.js
@@ -0,0 +1,25 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --always-opt
+try {
+function __f_4(sign_bit,
+ mantissa_29_bits) {
+}
+__f_4.prototype.returnSpecial = function() {
+ this.mantissa_29_bits * mantissa_29_shift;
+}
+__f_4.prototype.toSingle = function() {
+ if (-65535) return this.toSingleSubnormal();
+}
+__f_4.prototype.toSingleSubnormal = function() {
+ if (__v_15) {
+ var __v_7 = this.mantissa_29_bits == -1 &&
+ (__v_13 & __v_10 ) == 0;
+ }
+ __v_8 >>= __v_7;
+}
+__v_14 = new __f_4();
+__v_14.toSingle();
+} catch(e) {}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631318-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-631318-1.js
new file mode 100644
index 0000000000..bd40dcd3a2
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631318-1.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(x) { return x < x; }
+foo(1);
+foo(2);
+
+function bar(x) { foo(x); }
+%OptimizeFunctionOnNextCall(bar);
+
+assertThrows(() => bar(Symbol.toPrimitive));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631318-10.js b/deps/v8/test/mjsunit/regress/regress-crbug-631318-10.js
new file mode 100644
index 0000000000..1c4fccaac1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631318-10.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(x) { return x << x; }
+foo(1);
+foo(2);
+
+function bar(x) { foo(x); }
+%OptimizeFunctionOnNextCall(bar);
+
+assertThrows(() => bar(Symbol.toPrimitive));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631318-11.js b/deps/v8/test/mjsunit/regress/regress-crbug-631318-11.js
new file mode 100644
index 0000000000..a03a125ede
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631318-11.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(x) { return x >> x; }
+foo(1);
+foo(2);
+
+function bar(x) { foo(x); }
+%OptimizeFunctionOnNextCall(bar);
+
+assertThrows(() => bar(Symbol.toPrimitive));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631318-12.js b/deps/v8/test/mjsunit/regress/regress-crbug-631318-12.js
new file mode 100644
index 0000000000..f710bd0149
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631318-12.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(x) { return x >>> x; }
+foo(1);
+foo(2);
+
+function bar(x) { foo(x); }
+%OptimizeFunctionOnNextCall(bar);
+
+assertThrows(() => bar(Symbol.toPrimitive));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631318-13.js b/deps/v8/test/mjsunit/regress/regress-crbug-631318-13.js
new file mode 100644
index 0000000000..7a784481ee
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631318-13.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(x) { return x & x; }
+foo(1);
+foo(2);
+
+function bar(x) { foo(x); }
+%OptimizeFunctionOnNextCall(bar);
+
+assertThrows(() => bar(Symbol.toPrimitive));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631318-14.js b/deps/v8/test/mjsunit/regress/regress-crbug-631318-14.js
new file mode 100644
index 0000000000..829bf900b6
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631318-14.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(x) { return x | x; }
+foo(1);
+foo(2);
+
+function bar(x) { foo(x); }
+%OptimizeFunctionOnNextCall(bar);
+
+assertThrows(() => bar(Symbol.toPrimitive));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631318-15.js b/deps/v8/test/mjsunit/regress/regress-crbug-631318-15.js
new file mode 100644
index 0000000000..1257d797ae
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631318-15.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(x) { return x ^ x; }
+foo(1);
+foo(2);
+
+function bar(x) { foo(x); }
+%OptimizeFunctionOnNextCall(bar);
+
+assertThrows(() => bar(Symbol.toPrimitive));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631318-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-631318-2.js
new file mode 100644
index 0000000000..ce46b27886
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631318-2.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(x) { return x > x; }
+foo(1);
+foo(2);
+
+function bar(x) { foo(x); }
+%OptimizeFunctionOnNextCall(bar);
+
+assertThrows(() => bar(Symbol.toPrimitive));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631318-3.js b/deps/v8/test/mjsunit/regress/regress-crbug-631318-3.js
new file mode 100644
index 0000000000..4258b15508
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631318-3.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(x) { return x >= x; }
+foo(1);
+foo(2);
+
+function bar(x) { foo(x); }
+%OptimizeFunctionOnNextCall(bar);
+
+assertThrows(() => bar(Symbol.toPrimitive));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631318-4.js b/deps/v8/test/mjsunit/regress/regress-crbug-631318-4.js
new file mode 100644
index 0000000000..7e8cdf8f56
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631318-4.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(x) { return x <= x; }
+foo(1);
+foo(2);
+
+function bar(x) { foo(x); }
+%OptimizeFunctionOnNextCall(bar);
+
+assertThrows(() => bar(Symbol.toPrimitive));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631318-5.js b/deps/v8/test/mjsunit/regress/regress-crbug-631318-5.js
new file mode 100644
index 0000000000..acdedcba13
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631318-5.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(x) { return x + x; }
+foo(1);
+foo(2);
+
+function bar(x) { foo(x); }
+%OptimizeFunctionOnNextCall(bar);
+
+assertThrows(() => bar(Symbol.toPrimitive));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631318-6.js b/deps/v8/test/mjsunit/regress/regress-crbug-631318-6.js
new file mode 100644
index 0000000000..d17772f17c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631318-6.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(x) { return x / x; }
+foo(1);
+foo(2);
+
+function bar(x) { foo(x); }
+%OptimizeFunctionOnNextCall(bar);
+
+assertThrows(() => bar(Symbol.toPrimitive));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631318-7.js b/deps/v8/test/mjsunit/regress/regress-crbug-631318-7.js
new file mode 100644
index 0000000000..7d03fa8551
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631318-7.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(x) { return x * x; }
+foo(1);
+foo(2);
+
+function bar(x) { foo(x); }
+%OptimizeFunctionOnNextCall(bar);
+
+assertThrows(() => bar(Symbol.toPrimitive));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631318-8.js b/deps/v8/test/mjsunit/regress/regress-crbug-631318-8.js
new file mode 100644
index 0000000000..474110b53d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631318-8.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(x) { return x % x; }
+foo(1);
+foo(2);
+
+function bar(x) { foo(x); }
+%OptimizeFunctionOnNextCall(bar);
+
+assertThrows(() => bar(Symbol.toPrimitive));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631318-9.js b/deps/v8/test/mjsunit/regress/regress-crbug-631318-9.js
new file mode 100644
index 0000000000..ad472e0722
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631318-9.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(x) { return x - x; }
+foo(1);
+foo(2);
+
+function bar(x) { foo(x); }
+%OptimizeFunctionOnNextCall(bar);
+
+assertThrows(() => bar(Symbol.toPrimitive));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-631917.js b/deps/v8/test/mjsunit/regress/regress-crbug-631917.js
new file mode 100644
index 0000000000..ca7a94c844
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-631917.js
@@ -0,0 +1,38 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var b = { toString: function() { return "b"; } };
+var c = { toString: function() { return "c"; } };
+
+(function() {
+ var expected_receiver;
+ var obj1 = {
+ a: 100,
+ b_: 200,
+ get b() { assertEquals(expected_receiver, this); return this.b_; },
+ set b(v) { assertEquals(expected_receiver, this); this.b_ = v; },
+ c_: 300,
+ get c() { assertEquals(expected_receiver, this); return this.c_; },
+ set c(v) { assertEquals(expected_receiver, this); this.c_ = v; },
+ };
+ var obj2 = {
+ boom() {
+ super.a++;
+ super[b]++;
+ super[c]++;
+ },
+ }
+ Object.setPrototypeOf(obj2, obj1);
+
+ expected_receiver = obj2;
+ obj2.boom();
+ assertEquals(101, obj2.a);
+ assertEquals(201, obj2[b]);
+ assertEquals(301, obj2[c]);
+
+ expected_receiver = obj1;
+ assertEquals(100, obj1.a);
+ assertEquals(200, obj1[b]);
+ assertEquals(300, obj1[c]);
+}());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-632800.js b/deps/v8/test/mjsunit/regress/regress-crbug-632800.js
new file mode 100644
index 0000000000..6296572c17
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-632800.js
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --ignition --ignition-osr --turbo-from-bytecode
+
+function osr() {
+ for (var i = 0; i < 50000; ++i) Math.random();
+}
+osr();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-633585.js b/deps/v8/test/mjsunit/regress/regress-crbug-633585.js
new file mode 100644
index 0000000000..c483e47bbc
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-633585.js
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc --turbo --always-opt
+
+function f() { this.x = this.x.x; }
+gc();
+f.prototype.x = { x:1 }
+new f();
+new f();
+
+function g() {
+ function h() {};
+ h.prototype = { set x(value) { } };
+ new f();
+}
+g();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-633884.js b/deps/v8/test/mjsunit/regress/regress-crbug-633884.js
new file mode 100644
index 0000000000..6f46e96725
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-633884.js
@@ -0,0 +1,15 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+try {
+ // Leave "blarg" as the hole in a new ScriptContext.
+ Realm.eval(Realm.current(), "throw Error(); let blarg");
+} catch (e) { }
+
+// Access "blarg" via a dynamic lookup. Should not crash!
+assertThrows(function() {
+ // Prevent full-codegen from optimizing away the %LoadLookupSlot call.
+ eval("var x = 5");
+ blarg;
+});
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-633999.js b/deps/v8/test/mjsunit/regress/regress-crbug-633999.js
new file mode 100644
index 0000000000..3f16908610
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-633999.js
@@ -0,0 +1,40 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax --noturbo
+
+var Debug = debug.Debug
+var exception = null;
+var step = 0;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Exception) return;
+ try {
+ step++;
+ } catch (e) {
+ exception = e;
+ }
+}
+
+Debug.setBreakOnException();
+Debug.setListener(listener);
+
+(function () {
+ "use asm";
+ function f() {
+ try {
+ throw 666;
+ } catch (e) {
+ }
+ }
+ f();
+ f();
+ %OptimizeFunctionOnNextCall(f);
+ f();
+ assertOptimized(f);
+})();
+
+Debug.setListener(null);
+assertNull(exception);
+assertEquals(3, step);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-635798.js b/deps/v8/test/mjsunit/regress/regress-crbug-635798.js
new file mode 100644
index 0000000000..5456682ddc
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-635798.js
@@ -0,0 +1,15 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() {
+ var x = [];
+ var y = [];
+ x.__proto__ = y;
+ for (var i = 0; i < 200000; ++i) {
+ y[i] = 1;
+ }
+}
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-638551.js b/deps/v8/test/mjsunit/regress/regress-crbug-638551.js
new file mode 100644
index 0000000000..baa8e9b051
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-638551.js
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-gc --ignition-staging --no-lazy
+
+function f() {
+ for (var i = 0; i < 10; i++) if (i == 5) %OptimizeOsr();
+ function g() {}
+ %OptimizeFunctionOnNextCall(g);
+ g();
+}
+f();
+gc(); // Make sure that ...
+gc(); // ... code flushing ...
+gc(); // ... clears code ...
+gc(); // ... attached to {g}.
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-390925.js b/deps/v8/test/mjsunit/regress/regress-crbug-640369.js
index c4d98adb3e..97982d1224 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-390925.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-640369.js
@@ -4,7 +4,12 @@
// Flags: --allow-natives-syntax
-var a = new Array();
-var b = new Array();
-Object.freeze(a);
-assertThrows(function() { %LiveEditCheckAndDropActivations(a, b, true); });
+function A() {
+ this.x = 0;
+ for (var i = 0; i < max; ) {}
+}
+function foo() {
+ for (var i = 0; i < 1; i = 2) %OptimizeOsr();
+ return new A();
+}
+try { foo(); } catch (e) { }
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-642056.js b/deps/v8/test/mjsunit/regress/regress-crbug-642056.js
new file mode 100644
index 0000000000..ca9fc78ef6
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-642056.js
@@ -0,0 +1,17 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(o) {
+ return o.x instanceof Array;
+}
+
+var o = { x : 1.5 };
+o.x = 0;
+
+f(o);
+f(o);
+%OptimizeFunctionOnNextCall(f);
+f(o);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-644215.js b/deps/v8/test/mjsunit/regress/regress-crbug-644215.js
new file mode 100644
index 0000000000..c74112542d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-644215.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var arr = [...[],,];
+assertTrue(%HasFastHoleyElements(arr));
+assertEquals(1, arr.length);
+assertFalse(arr.hasOwnProperty(0));
+assertEquals(undefined, arr[0]);
+// Should not crash.
+assertThrows(() => arr[0][0], TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-double-canonicalization.js b/deps/v8/test/mjsunit/regress/regress-double-canonicalization.js
new file mode 100644
index 0000000000..2b345d2bb7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-double-canonicalization.js
@@ -0,0 +1,24 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var ab = new ArrayBuffer(8);
+var i_view = new Int32Array(ab);
+i_view[0] = %GetHoleNaNUpper()
+i_view[1] = %GetHoleNaNLower();
+var hole_nan = (new Float64Array(ab))[0];
+
+var array = [];
+
+function write() {
+ array[0] = hole_nan;
+}
+
+write();
+%OptimizeFunctionOnNextCall(write);
+write();
+array[1] = undefined;
+assertTrue(isNaN(array[0]));
+assertEquals("number", typeof array[0]);
diff --git a/deps/v8/test/mjsunit/regress/regress-object-assign-deprecated-2.js b/deps/v8/test/mjsunit/regress/regress-object-assign-deprecated-2.js
new file mode 100644
index 0000000000..89693de1a4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-object-assign-deprecated-2.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var x = {a:1, b:2};
+Object.defineProperty(x, "c", {set(v) {}})
+var y = {get c() { return {a:1, b:2.5} }};
+Object.assign(x, y, x);
diff --git a/deps/v8/test/mjsunit/regress/regress-object-assign-deprecated.js b/deps/v8/test/mjsunit/regress/regress-object-assign-deprecated.js
new file mode 100644
index 0000000000..d2e60f99e5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-object-assign-deprecated.js
@@ -0,0 +1,7 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var x = {a:1, b:2};
+var y = {a:1, b:2.5};
+Object.assign(x, x);
diff --git a/deps/v8/test/mjsunit/regress/regress-observe-map-cache.js b/deps/v8/test/mjsunit/regress/regress-observe-map-cache.js
deleted file mode 100644
index c71759c0cc..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-observe-map-cache.js
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-object-observe
-// Flags: --allow-natives-syntax --enable-slow-asserts
-
-function f() {
- var x = new Array(0);
- x[-1] = -1;
- Object.observe(x, function() { });
-}
-
-f();
-f();
diff --git a/deps/v8/test/mjsunit/regress/regress-put-prototype-transition.js b/deps/v8/test/mjsunit/regress/regress-put-prototype-transition.js
index 70f0074683..c5b4c5abc0 100644
--- a/deps/v8/test/mjsunit/regress/regress-put-prototype-transition.js
+++ b/deps/v8/test/mjsunit/regress/regress-put-prototype-transition.js
@@ -30,7 +30,7 @@ function __f_1(__v_4, add_first, __v_6, same_map_as) {
__f_4(__v_1);
assertFalse(%HasFastProperties(__v_1));
__f_0(__v_1, __v_6);
- assertTrue(%HasFastProperties(__v_1));
+ assertFalse(%HasFastProperties(__v_1));
} else {
__f_0(__v_1, __v_6);
assertTrue(%HasFastProperties(__v_1));
diff --git a/deps/v8/test/mjsunit/regress/regress-recurse-patch-binary-op.js b/deps/v8/test/mjsunit/regress/regress-recurse-patch-binary-op.js
new file mode 100644
index 0000000000..842cc79fc8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-recurse-patch-binary-op.js
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var i = 0
+function valueOf() {
+ while (true) return i++ < 4 ? 1 + this : 2
+}
+
+1 + ({valueOf})
diff --git a/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex1.js b/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex1.js
deleted file mode 100644
index 444fe4beb4..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex1.js
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax
-
-// stubbed version of ToNumber
-function ToNumber(x) {
- return 311;
-}
-
-// Reduced version of String.fromCharCode;
-// does not actually do the same calculation but exhibits untagging bug.
-function StringFromCharCode(code) {
- var n = arguments.length;
- var one_byte = %NewString(n, true);
- var i;
- for (i = 0; i < n; i++) {
- var code = arguments[i];
- if (!%_IsSmi(code)) code = ToNumber(code) & 0xffff;
- if (code > 0xff) break;
- }
-
- var two_byte = %NewString(n - i, false);
- for (var j = 0; i < n; i++, j++) {
- var code = arguments[i];
- %_TwoByteSeqStringSetChar(j, code, two_byte);
- }
- return one_byte + two_byte;
-}
-
-StringFromCharCode(0xFFF, 0xFFF);
-StringFromCharCode(0x7C, 0x7C);
-%OptimizeFunctionOnNextCall(StringFromCharCode);
-StringFromCharCode(0x7C, 0x7C);
-StringFromCharCode(0xFFF, 0xFFF);
diff --git a/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex3.js b/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex3.js
deleted file mode 100644
index 0a6b211648..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex3.js
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax
-
-function test() {
- var string = %NewString(10, true);
- for (var i = 0; i < 10; i++) {
- %_OneByteSeqStringSetChar(i, 65, string);
- %_OneByteSeqStringSetChar(i, 66, string);
- }
- for (var i = 0; i < 10; i++) {
- assertEquals("B", string[i]);
- }
-}
-
-test();
-test();
-%OptimizeFunctionOnNextCall(test);
-test();
diff --git a/deps/v8/test/mjsunit/regress/regress-string-from-char-code-tonumber.js b/deps/v8/test/mjsunit/regress/regress-string-from-char-code-tonumber.js
new file mode 100644
index 0000000000..a02a2778b6
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-string-from-char-code-tonumber.js
@@ -0,0 +1,26 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var thrower = { [Symbol.toPrimitive]: function() { FAIL } };
+
+function testTrace(func) {
+ try {
+ func(thrower);
+ assertUnreachable();
+ } catch (e) {
+ assertTrue(e.stack.indexOf("fromCharCode") >= 0);
+ }
+}
+
+testTrace(String.fromCharCode);
+
+function foo(x) { return String.fromCharCode(x); }
+
+foo(1);
+foo(2);
+testTrace(foo);
+%OptimizeFunctionOnNextCall(foo);
+testTrace(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-typedarray-length.js b/deps/v8/test/mjsunit/regress/regress-typedarray-length.js
index a0b99980c7..0dde61fc27 100644
--- a/deps/v8/test/mjsunit/regress/regress-typedarray-length.js
+++ b/deps/v8/test/mjsunit/regress/regress-typedarray-length.js
@@ -108,13 +108,13 @@ assertEquals(undefined, get(a));
assertEquals("blah", get(a));
})();
-// Ensure we cannot delete length, byteOffset, byteLength.
+// Ensure we can delete length, byteOffset, byteLength.
assertTrue(Int32Array.prototype.__proto__.hasOwnProperty("length"));
assertTrue(Int32Array.prototype.__proto__.hasOwnProperty("byteOffset"));
assertTrue(Int32Array.prototype.__proto__.hasOwnProperty("byteLength"));
-assertFalse(delete Int32Array.prototype.__proto__.length);
-assertFalse(delete Int32Array.prototype.__proto__.byteOffset);
-assertFalse(delete Int32Array.prototype.__proto__.byteLength);
+assertTrue(delete Int32Array.prototype.__proto__.length);
+assertTrue(delete Int32Array.prototype.__proto__.byteOffset);
+assertTrue(delete Int32Array.prototype.__proto__.byteLength);
a = new Int32Array(100);
@@ -122,28 +122,28 @@ get = function(a) {
return a.length;
}
-assertEquals(100, get(a));
-assertEquals(100, get(a));
-assertEquals(100, get(a));
+assertEquals(undefined, get(a));
+assertEquals(undefined, get(a));
+assertEquals(undefined, get(a));
%OptimizeFunctionOnNextCall(get);
-assertEquals(100, get(a));
+assertEquals(undefined, get(a));
get = function(a) {
return a.byteLength;
}
-assertEquals(400, get(a));
-assertEquals(400, get(a));
-assertEquals(400, get(a));
+assertEquals(undefined, get(a));
+assertEquals(undefined, get(a));
+assertEquals(undefined, get(a));
%OptimizeFunctionOnNextCall(get);
-assertEquals(400, get(a));
+assertEquals(undefined, get(a));
get = function(a) {
return a.byteOffset;
}
-assertEquals(0, get(a));
-assertEquals(0, get(a));
-assertEquals(0, get(a));
+assertEquals(undefined, get(a));
+assertEquals(undefined, get(a));
+assertEquals(undefined, get(a));
%OptimizeFunctionOnNextCall(get);
-assertEquals(0, get(a));
+assertEquals(undefined, get(a));
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-5254-1.js b/deps/v8/test/mjsunit/regress/regress-v8-5254-1.js
new file mode 100644
index 0000000000..624c85f477
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-5254-1.js
@@ -0,0 +1,27 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var foo = (function() {
+ "use asm";
+ var a = new Uint16Array(2);
+ a[0] = 32815;
+ a[1] = 32114;
+
+ function foo() {
+ var x = a[0]|0;
+ var y = a[1]|0;
+ if (x < 0) x = 4294967296 + x|0;
+ if (y < 0) y = 4294967296 + y|0;
+ return x >= y;
+ }
+
+ return foo;
+})();
+
+assertTrue(foo());
+assertTrue(foo());
+%OptimizeFunctionOnNextCall(foo);
+assertTrue(foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-5254-2.js b/deps/v8/test/mjsunit/regress/regress-v8-5254-2.js
new file mode 100644
index 0000000000..f486fa8aa3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-5254-2.js
@@ -0,0 +1,27 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var foo = (function() {
+ "use asm";
+ var a = new Uint8Array(2);
+ a[0] = 128;
+ a[1] = 127;
+
+ function foo() {
+ var x = a[0]|0;
+ var y = a[1]|0;
+ if (x < 0) x = 4294967296 + x|0;
+ if (y < 0) y = 4294967296 + y|0;
+ return x >= y;
+ }
+
+ return foo;
+})();
+
+assertTrue(foo());
+assertTrue(foo());
+%OptimizeFunctionOnNextCall(foo);
+assertTrue(foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-5255-1.js b/deps/v8/test/mjsunit/regress/regress-v8-5255-1.js
new file mode 100644
index 0000000000..cd14d63792
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-5255-1.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(x) {
+ return (x ? true : "7") >> 0;
+}
+
+assertEquals(1, foo(1));
+assertEquals(1, foo(1));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(7, foo(0));
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-5255-2.js b/deps/v8/test/mjsunit/regress/regress-v8-5255-2.js
new file mode 100644
index 0000000000..5ae57ce64a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-5255-2.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(x) {
+ return (x ? true : "7") << 0;
+}
+
+assertEquals(1, foo(1));
+assertEquals(1, foo(1));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(7, foo(0));
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-5255-3.js b/deps/v8/test/mjsunit/regress/regress-v8-5255-3.js
new file mode 100644
index 0000000000..004d6874ad
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-5255-3.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(x) {
+ return (x ? true : "7") >>> 0;
+}
+
+assertEquals(1, foo(1));
+assertEquals(1, foo(1));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(7, foo(0));
diff --git a/deps/v8/test/mjsunit/regress/regress-wasm-crbug-599413.js b/deps/v8/test/mjsunit/regress/regress-wasm-crbug-599413.js
new file mode 100644
index 0000000000..8f11ee0425
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-wasm-crbug-599413.js
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --validate-asm --allow-natives-syntax
+
+function __f_100() {
+ "use asm";
+ function __f_76() {
+ var __v_39 = 0;
+ outer: while (1) {
+ while (__v_39 == 4294967295) {
+ }
+ }
+ }
+ return {__f_76: __f_76};
+}
+assertTrue(%IsNotAsmWasmCode(__f_100));
diff --git a/deps/v8/test/mjsunit/regress/regress-wasm-crbug-618602.js b/deps/v8/test/mjsunit/regress/regress-wasm-crbug-618602.js
new file mode 100644
index 0000000000..7aafe18475
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-wasm-crbug-618602.js
@@ -0,0 +1,15 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --validate-asm --allow-natives-syntax
+
+function __f_1() {
+ 'use asm';
+ function __f_3() {
+ var __v_11 = 1, __v_10 = 0, __v_12 = 0;
+ __v_12 = (__v_10 | 12) % 4294967295 | -1073741824;
+ }
+ return { __f_3: __f_3 };
+}
+assertTrue(%IsNotAsmWasmCode(__f_1));
diff --git a/deps/v8/test/mjsunit/regress/string-set-char-deopt.js b/deps/v8/test/mjsunit/regress/string-set-char-deopt.js
deleted file mode 100644
index 8956e287db..0000000000
--- a/deps/v8/test/mjsunit/regress/string-set-char-deopt.js
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax
-
-(function OneByteSeqStringSetCharDeoptOsr() {
- function deopt() {
- %DeoptimizeFunction(f);
- }
-
- function f(string, osr) {
- var world = " world";
- %_OneByteSeqStringSetChar(0, (deopt(), 0x48), string);
-
- for (var i = 0; osr && i < 2; i++) %OptimizeOsr();
-
- return string + world;
- }
-
- assertEquals("Hello " + "world", f("hello", false));
- %OptimizeFunctionOnNextCall(f);
- assertEquals("Hello " + "world", f("hello", true));
-})();
-
-
-(function OneByteSeqStringSetCharDeopt() {
- function deopt() {
- %DeoptimizeFunction(f);
- }
-
- function g(x) {
- }
-
- function f(string) {
- g(%_OneByteSeqStringSetChar(0, (deopt(), 0x48), string));
- return string;
- }
-
- assertEquals("Hell" + "o", f("hello"));
- %OptimizeFunctionOnNextCall(f);
- assertEquals("Hell" + "o", f("hello"));
-})();
-
-
-(function TwoByteSeqStringSetCharDeopt() {
- function deopt() {
- %DeoptimizeFunction(f);
- }
-
- function g(x) {
- }
-
- function f(string) {
- g(%_TwoByteSeqStringSetChar(0, (deopt(), 0x48), string));
- return string;
- }
-
- assertEquals("Hell" + "o", f("\u20ACello"));
- %OptimizeFunctionOnNextCall(f);
- assertEquals("Hell" + "o", f("\u20ACello"));
-})();
diff --git a/deps/v8/test/mjsunit/stack-traces-overflow.js b/deps/v8/test/mjsunit/stack-traces-overflow.js
index e20c6091d7..706f8fcef9 100644
--- a/deps/v8/test/mjsunit/stack-traces-overflow.js
+++ b/deps/v8/test/mjsunit/stack-traces-overflow.js
@@ -27,10 +27,22 @@
// Flags: --stack-size=100
+function overflow() {
+ var a, b, c, d, e; // Allocates some locals on the function's stack frame.
+ overflow();
+}
function rec1(a) { rec1(a+1); }
function rec2(a) { rec3(a+1); }
function rec3(a) { rec2(a+1); }
+// Test stack trace has correct function location at top of the stack.
+try {
+ overflow();
+} catch (e) {
+ var first_frame = e.stack.split("\n")[1]
+ assertTrue(first_frame.indexOf("stack-traces-overflow.js:30:18") > 0);
+}
+
// Test stack trace getter and setter.
try {
rec1(0);
@@ -63,9 +75,9 @@ try {
function testErrorPrototype(prototype) {
var object = {};
object.__proto__ = prototype;
- object.stack = "123"; // Overwriting stack property fails.
- assertEquals(prototype.stack, object.stack);
- assertTrue("123" != prototype.stack);
+ object.stack = "123"; // Overwriting stack property succeeds.
+ assertTrue(prototype.stack != object.stack);
+ assertEquals("123", object.stack);
}
try {
diff --git a/deps/v8/test/mjsunit/stack-traces.js b/deps/v8/test/mjsunit/stack-traces.js
index 41de146346..8603d7cf28 100644
--- a/deps/v8/test/mjsunit/stack-traces.js
+++ b/deps/v8/test/mjsunit/stack-traces.js
@@ -252,11 +252,14 @@ function testTraceNativeConversion(nativeFunc) {
function testOmittedBuiltin(throwing, omitted) {
+ var reached = false;
try {
throwing();
- assertUnreachable(omitted);
+ reached = true;
} catch (e) {
assertTrue(e.stack.indexOf(omitted) < 0, omitted);
+ } finally {
+ assertFalse(reached);
}
}
@@ -305,19 +308,18 @@ testOmittedBuiltin(function(){ [thrower, 2].sort(function (a,b) {
// Omitted because ADD from runtime.js is non-native builtin.
testOmittedBuiltin(function(){ thrower + 2; }, "ADD");
+var reached = false;
var error = new Error();
-error.toString = function() { assertUnreachable(); };
+error.toString = function() { reached = true; };
error.stack;
+assertFalse(reached);
+reached = false;
error = new Error();
-error.name = { toString: function() { assertUnreachable(); }};
-error.message = { toString: function() { assertUnreachable(); }};
-error.stack;
-
-error = new Error();
-Array.prototype.push = function(x) { assertUnreachable(); };
-Array.prototype.join = function(x) { assertUnreachable(); };
+Array.prototype.push = function(x) { reached = true; };
+Array.prototype.join = function(x) { reached = true; };
error.stack;
+assertFalse(reached);
var fired = false;
error = new Error({ toString: function() { fired = true; } });
@@ -366,3 +368,75 @@ my_error = new Error();
var stolen_getter = Object.getOwnPropertyDescriptor(my_error, 'stack').get;
Object.defineProperty(fake_error, 'stack', { get: stolen_getter });
assertEquals(undefined, fake_error.stack);
+
+// Check that overwriting the stack property during stack trace formatting
+// does not crash.
+error = new Error();
+error.__defineGetter__("name", function() { error.stack = "abc"; });
+assertEquals("abc", error.stack);
+
+error = new Error();
+error.__defineGetter__("name", function() { delete error.stack; });
+assertEquals(undefined, error.stack);
+
+// Check that repeated trace collection does not crash.
+error = new Error();
+Error.captureStackTrace(error);
+
+// Check property descriptor.
+var o = {};
+Error.captureStackTrace(o);
+assertEquals([], Object.keys(o));
+var desc = Object.getOwnPropertyDescriptor(o, "stack");
+assertFalse(desc.enumerable);
+assertTrue(desc.configurable);
+assertTrue(desc.writable);
+
+// Check that exceptions thrown within prepareStackTrace throws an exception.
+Error.prepareStackTrace = function(e, frames) { throw 42; }
+
+var x = {}
+assertThrows(() => Error.captureStackTrace(x));
+
+// Check that we don't crash when CaptureSimpleStackTrace returns undefined.
+var o = {};
+var oldStackTraceLimit = Error.stackTraceLimit;
+Error.stackTraceLimit = "not a number";
+Error.captureStackTrace(o);
+Error.stackTraceLimit = oldStackTraceLimit;
+
+// Check that we don't crash when a callsite's function's script is empty.
+Error.prepareStackTrace = function(e, frames) {
+ assertEquals(undefined, frames[0].getEvalOrigin());
+}
+try {
+ DataView();
+ assertUnreachable();
+} catch (e) {
+ assertEquals(undefined, e.stack);
+}
+
+// Check that a tight recursion in prepareStackTrace throws when accessing
+// stack. Trying again without a custom formatting function formats correctly.
+var err = new Error("abc");
+Error.prepareStackTrace = () => Error.prepareStackTrace();
+try {
+ err.stack;
+ assertUnreachable();
+} catch (e) {
+ err = e;
+}
+
+Error.prepareStackTrace = undefined;
+assertTrue(
+ err.stack.indexOf("RangeError: Maximum call stack size exceeded") != -1);
+assertTrue(err.stack.indexOf("prepareStackTrace") != -1);
+
+// Check that the callsite constructor throws.
+
+Error.prepareStackTrace = (e,s) => s;
+var constructor = new Error().stack[0].constructor;
+
+assertThrows(() => constructor.call());
+assertThrows(() => constructor.call(
+ null, {}, () => undefined, {valueOf() { return 0 }}, false));
diff --git a/deps/v8/test/mjsunit/string-natives.js b/deps/v8/test/mjsunit/string-natives.js
deleted file mode 100644
index 40fe9c697e..0000000000
--- a/deps/v8/test/mjsunit/string-natives.js
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-gc --allow-natives-syntax
-
-function test() {
- var s1 = %NewString(26, true);
- for (i = 0; i < 26; i++) %_OneByteSeqStringSetChar(i, 65, s1);
- assertEquals("AAAAAAAAAAAAAAAAAAAAAAAAAA", s1);
- %_OneByteSeqStringSetChar(25, 66, s1);
- assertEquals("AAAAAAAAAAAAAAAAAAAAAAAAAB", s1);
- for (i = 0; i < 26; i++) %_OneByteSeqStringSetChar(i, i+65, s1);
- assertEquals("ABCDEFGHIJKLMNOPQRSTUVWXYZ", s1);
- s1 = %TruncateString(s1, 13);
- assertEquals("ABCDEFGHIJKLM", s1);
-
- var s2 = %NewString(26, false);
- for (i = 0; i < 26; i++) %_TwoByteSeqStringSetChar(i, 65, s2);
- assertEquals("AAAAAAAAAAAAAAAAAAAAAAAAAA", s2);
- %_TwoByteSeqStringSetChar(25, 66, s2);
- assertEquals("AAAAAAAAAAAAAAAAAAAAAAAAAB", s2);
- for (i = 0; i < 26; i++) %_TwoByteSeqStringSetChar(i, i+65, s2);
- assertEquals("ABCDEFGHIJKLMNOPQRSTUVWXYZ", s2);
- s2 = %TruncateString(s2, 13);
- assertEquals("ABCDEFGHIJKLM", s2);
-
- var s3 = %NewString(26, false);
- for (i = 0; i < 26; i++) %_TwoByteSeqStringSetChar(i, i+1000, s3);
- for (i = 0; i < 26; i++) assertEquals(s3[i], String.fromCharCode(i+1000));
-
- var a = [];
- for (var i = 0; i < 1000; i++) {
- var s = %NewString(10000, i % 2 == 1);
- a.push(s);
- }
-
- gc();
-
- for (var i = 0; i < 1000; i++) {
- assertEquals(10000, a[i].length);
- a[i] = %TruncateString(a[i], 5000);
- }
-
- gc();
-
- for (var i = 0; i < 1000; i++) {
- assertEquals(5000, a[i].length);
- }
-}
-
-
-test();
-test();
-%OptimizeFunctionOnNextCall(test);
-test();
diff --git a/deps/v8/test/mjsunit/string-wrapper.js b/deps/v8/test/mjsunit/string-wrapper.js
new file mode 100644
index 0000000000..d4b65005d3
--- /dev/null
+++ b/deps/v8/test/mjsunit/string-wrapper.js
@@ -0,0 +1,62 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var limit = 10000;
+
+function testStringWrapper(string) {
+ assertEquals('a', string[0]);
+ assertEquals('b', string[1]);
+ assertEquals('c', string[2]);
+}
+
+(function testFastStringWrapperGrow() {
+ var string = new String("abc");
+ for (var i = 0; i < limit; i += 2) {
+ string[i] = {};
+ }
+ testStringWrapper(string);
+
+ for (var i = limit; i > 0; i -= 2) {
+ delete string[i];
+ }
+ testStringWrapper(string);
+})();
+
+(function testSlowStringWrapperGrow() {
+ var string = new String("abc");
+ // Force Slow String Wrapper Elements Kind
+ string[limit] = limit;
+ for (var i = 0; i < limit; i += 2) {
+ string[i] = {};
+ }
+ testStringWrapper(string);
+ assertEquals(limit, string[limit]);
+
+ for (var i = limit; i > 0; i -= 2) {
+ delete string[i];
+ }
+ testStringWrapper(string);
+ assertEquals(undefined, string[limit]);
+})();
+
+
+(function testReconfigureStringWrapperElements() {
+ var s = new String('abc');
+ // Can't reconfigure string contents.
+ assertThrows(() => Object.defineProperty(s, '1', {value: "value"}), TypeError);
+
+ // Configure a property outside the string range
+ var value = 'v1';
+ Object.defineProperty(s, '3', {
+ get: () => {return value},
+ configurable:true
+ });
+ assertEquals('v1', s[3]);
+ value = 'v2';
+ assertEquals('v2', s[3]);
+
+ Object.defineProperty(s, '3', {value: 'v3', configurable: false});
+ assertEquals('v3', s[3]);
+ assertThrows(() => Object.defineProperty(s, '3', {value:2}), TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/substr.js b/deps/v8/test/mjsunit/substr.js
index cab8b1bf6d..83929362a0 100644
--- a/deps/v8/test/mjsunit/substr.js
+++ b/deps/v8/test/mjsunit/substr.js
@@ -152,3 +152,22 @@ for (var i = 63; i >= 0; i--) {
assertEquals(xl - offset, z.length);
offset -= i;
}
+
+
+// Order of conversions.
+{
+ let log = [];
+ let string = {[Symbol.toPrimitive]() { log.push("this"); return "abc" }};
+ let start = {[Symbol.toPrimitive]() { log.push("start"); return 0 }};
+ let length = {[Symbol.toPrimitive]() { log.push("length"); return 1 }};
+ assertEquals("a", String.prototype.substr.call(string, start, length));
+ assertEquals(["this", "start", "length"], log);
+}
+{
+ let log = [];
+ let string = {[Symbol.toPrimitive]() { log.push("this"); return "abc" }};
+ let start = {[Symbol.toPrimitive]() { log.push("start"); return 0 }};
+ let length = {[Symbol.toPrimitive]() { log.push("length"); return 0 }};
+ assertEquals("", String.prototype.substr.call(string, start, length));
+ assertEquals(["this", "start", "length"], log);
+}
diff --git a/deps/v8/test/mjsunit/tools/dumpcpp.js b/deps/v8/test/mjsunit/tools/dumpcpp.js
new file mode 100644
index 0000000000..49b4675bf1
--- /dev/null
+++ b/deps/v8/test/mjsunit/tools/dumpcpp.js
@@ -0,0 +1,53 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Load implementations from <project root>/tools.
+// Files: tools/splaytree.js tools/codemap.js tools/csvparser.js
+// Files: tools/consarray.js tools/profile.js tools/profile_view.js
+// Files: tools/logreader.js tools/tickprocessor.js
+// Files: tools/dumpcpp.js
+// Env: TEST_FILE_NAME
+
+(function testProcessSharedLibrary() {
+ var oldLoadSymbols = UnixCppEntriesProvider.prototype.loadSymbols;
+
+ UnixCppEntriesProvider.prototype.loadSymbols = function(libName) {
+ this.symbols = [[
+ '00000100 00000001 t v8::internal::Runtime_StringReplaceRegExpWithString(v8::internal::Arguments)',
+ '00000110 00000001 T v8::internal::Runtime::GetElementOrCharAt(v8::internal::Handle<v8::internal::Object>, unsigned int)',
+ '00000120 00000001 t v8::internal::Runtime_DebugGetPropertyDetails(v8::internal::Arguments)',
+ '00000130 00000001 W v8::internal::RegExpMacroAssembler::CheckPosition(int, v8::internal::Label*)'
+ ].join('\n'), ''];
+ };
+
+ var testCppProcessor = new CppProcessor(new UnixCppEntriesProvider(),
+ false, false);
+ testCppProcessor.processSharedLibrary(
+ '/usr/local/google/home/lpy/v8/out/native/d8',
+ 0x00000100, 0x00000400, 0);
+
+ var staticEntries = testCppProcessor.codeMap_.getAllStaticEntriesWithAddresses();
+ var total = staticEntries.length;
+ assertEquals(total, 3);
+ assertEquals(staticEntries[0],
+ [288,{size:1,
+ name:'v8::internal::Runtime_DebugGetPropertyDetails(v8::internal::Arguments)',
+ type:'CPP',
+ nameUpdated_:false}
+ ]);
+ assertEquals(staticEntries[1],
+ [272,{size:1,
+ name:'v8::internal::Runtime::GetElementOrCharAt(v8::internal::Handle<v8::internal::Object>, unsigned int)',
+ type:'CPP',
+ nameUpdated_:false}
+ ]);
+ assertEquals(staticEntries[2],
+ [256,{size:1,
+ name:'v8::internal::Runtime_StringReplaceRegExpWithString(v8::internal::Arguments)',
+ type:'CPP',
+ nameUpdated_:false}
+ ]);
+
+ UnixCppEntriesProvider.prototype.loadSymbols = oldLoadSymbols;
+})();
diff --git a/deps/v8/test/mjsunit/tools/tickprocessor-test-func-info.log b/deps/v8/test/mjsunit/tools/tickprocessor-test-func-info.log
index 94aa56d36c..fcbf3b1a65 100644
--- a/deps/v8/test/mjsunit/tools/tickprocessor-test-func-info.log
+++ b/deps/v8/test/mjsunit/tools/tickprocessor-test-func-info.log
@@ -1,6 +1,6 @@
-shared-library,"shell",0x08048000,0x081ee000
-shared-library,"/lib32/libm-2.7.so",0xf7db6000,0xf7dd9000
-shared-library,"ffffe000-fffff000",0xffffe000,0xfffff000
+shared-library,"shell",0x08048000,0x081ee000,0
+shared-library,"/lib32/libm-2.7.so",0xf7db6000,0xf7dd9000,0
+shared-library,"ffffe000-fffff000",0xffffe000,0xfffff000,0
profiler,"begin",1
code-creation,Stub,0,0x424260,348,"CompareStub_GE"
code-creation,LazyCompile,0,0x2a8100,18535,"DrawQube 3d-cube.js:188",0xf43abcac,
diff --git a/deps/v8/test/mjsunit/tools/tickprocessor-test.log b/deps/v8/test/mjsunit/tools/tickprocessor-test.log
index cf8b90d73b..fbc868ebf2 100644
--- a/deps/v8/test/mjsunit/tools/tickprocessor-test.log
+++ b/deps/v8/test/mjsunit/tools/tickprocessor-test.log
@@ -1,6 +1,6 @@
-shared-library,"shell",0x08048000,0x081ee000
-shared-library,"/lib32/libm-2.7.so",0xf7db6000,0xf7dd9000
-shared-library,"ffffe000-fffff000",0xffffe000,0xfffff000
+shared-library,"shell",0x08048000,0x081ee000,0
+shared-library,"/lib32/libm-2.7.so",0xf7db6000,0xf7dd9000,0
+shared-library,"ffffe000-fffff000",0xffffe000,0xfffff000,0
profiler,"begin",1
code-creation,Stub,0,0xf540a100,474,"CEntryStub"
code-creation,Script,0,0xf541cd80,736,"exp.js"
diff --git a/deps/v8/test/mjsunit/tools/tickprocessor.js b/deps/v8/test/mjsunit/tools/tickprocessor.js
index 73af098e7f..804a85de78 100644
--- a/deps/v8/test/mjsunit/tools/tickprocessor.js
+++ b/deps/v8/test/mjsunit/tools/tickprocessor.js
@@ -81,7 +81,7 @@
var shell_prov = new UnixCppEntriesProvider();
var shell_syms = [];
- shell_prov.parseVmSymbols('shell', 0x08048000, 0x081ee000,
+ shell_prov.parseVmSymbols('shell', 0x08048000, 0x081ee000, 0,
function (name, start, end) {
shell_syms.push(Array.prototype.slice.apply(arguments, [0]));
});
@@ -107,7 +107,7 @@
};
var libc_prov = new UnixCppEntriesProvider();
var libc_syms = [];
- libc_prov.parseVmSymbols('libc', 0xf7c5c000, 0xf7da5000,
+ libc_prov.parseVmSymbols('libc', 0xf7c5c000, 0xf7da5000, 0,
function (name, start, end) {
libc_syms.push(Array.prototype.slice.apply(arguments, [0]));
});
@@ -145,17 +145,17 @@
var shell_prov = new MacCppEntriesProvider();
var shell_syms = [];
- shell_prov.parseVmSymbols('shell', 0x00001b00, 0x00163156,
+ shell_prov.parseVmSymbols('shell', 0x00001c00, 0x00163256, 0x100,
function (name, start, end) {
shell_syms.push(Array.prototype.slice.apply(arguments, [0]));
});
assertEquals(
- [['start', 0x00001b00, 0x00001b40],
- ['dyld_stub_binding_helper', 0x00001b40, 0x0011b710],
- ['v8::internal::RegExpMacroAssembler::CheckPosition', 0x0011b710, 0x00134250],
- ['v8::internal::Runtime_StringReplaceRegExpWithString', 0x00134250, 0x00137220],
- ['v8::internal::Runtime::GetElementOrCharAt', 0x00137220, 0x00137400],
- ['v8::internal::Runtime_DebugGetPropertyDetails', 0x00137400, 0x00163156]],
+ [['start', 0x00001c00, 0x00001c40],
+ ['dyld_stub_binding_helper', 0x00001c40, 0x0011b810],
+ ['v8::internal::RegExpMacroAssembler::CheckPosition', 0x0011b810, 0x00134350],
+ ['v8::internal::Runtime_StringReplaceRegExpWithString', 0x00134350, 0x00137320],
+ ['v8::internal::Runtime::GetElementOrCharAt', 0x00137320, 0x00137500],
+ ['v8::internal::Runtime_DebugGetPropertyDetails', 0x00137500, 0x00163256]],
shell_syms);
// stdc++ library
@@ -168,7 +168,7 @@
};
var stdc_prov = new MacCppEntriesProvider();
var stdc_syms = [];
- stdc_prov.parseVmSymbols('stdc++', 0x95728fb4, 0x95770005,
+ stdc_prov.parseVmSymbols('stdc++', 0x95728fb4, 0x95770005, 0,
function (name, start, end) {
stdc_syms.push(Array.prototype.slice.apply(arguments, [0]));
});
@@ -211,7 +211,7 @@
};
var shell_prov = new WindowsCppEntriesProvider();
var shell_syms = [];
- shell_prov.parseVmSymbols('shell.exe', 0x00400000, 0x0057c000,
+ shell_prov.parseVmSymbols('shell.exe', 0x00400000, 0x0057c000, 0,
function (name, start, end) {
shell_syms.push(Array.prototype.slice.apply(arguments, [0]));
});
@@ -252,7 +252,7 @@
read = exeSymbols;
var exe_exe_syms = [];
(new WindowsCppEntriesProvider()).parseVmSymbols(
- 'chrome.exe', 0x00400000, 0x00472000,
+ 'chrome.exe', 0x00400000, 0x00472000, 0,
function (name, start, end) {
exe_exe_syms.push(Array.prototype.slice.apply(arguments, [0]));
});
@@ -264,7 +264,7 @@
read = dllSymbols;
var exe_dll_syms = [];
(new WindowsCppEntriesProvider()).parseVmSymbols(
- 'chrome.exe', 0x00400000, 0x00472000,
+ 'chrome.exe', 0x00400000, 0x00472000, 0,
function (name, start, end) {
exe_dll_syms.push(Array.prototype.slice.apply(arguments, [0]));
});
@@ -275,7 +275,7 @@
read = dllSymbols;
var dll_dll_syms = [];
(new WindowsCppEntriesProvider()).parseVmSymbols(
- 'chrome.dll', 0x01c30000, 0x02b80000,
+ 'chrome.dll', 0x01c30000, 0x02b80000, 0,
function (name, start, end) {
dll_dll_syms.push(Array.prototype.slice.apply(arguments, [0]));
});
@@ -287,7 +287,7 @@
read = exeSymbols;
var dll_exe_syms = [];
(new WindowsCppEntriesProvider()).parseVmSymbols(
- 'chrome.dll', 0x01c30000, 0x02b80000,
+ 'chrome.dll', 0x01c30000, 0x02b80000, 0,
function (name, start, end) {
dll_exe_syms.push(Array.prototype.slice.apply(arguments, [0]));
});
@@ -304,7 +304,7 @@ function CppEntriesProviderMock() {
CppEntriesProviderMock.prototype.parseVmSymbols = function(
- name, startAddr, endAddr, symbolAdder) {
+ name, startAddr, endAddr, slideAddr, symbolAdder) {
var symbols = {
'shell':
[['v8::internal::JSObject::LookupOwnRealNamedProperty(v8::internal::String*, v8::internal::LookupResult*)', 0x080f8800, 0x080f8d90],
diff --git a/deps/v8/test/mjsunit/unicode-test.js b/deps/v8/test/mjsunit/unicode-test.js
index 5be1b41562..1d64420c30 100644
--- a/deps/v8/test/mjsunit/unicode-test.js
+++ b/deps/v8/test/mjsunit/unicode-test.js
@@ -5726,7 +5726,7 @@ var source =
" All uses of av_ are via get_malloc_state().\n" +
" At most one \"call\" to get_malloc_state is made per invocation of\n" +
" the public versions of malloc and free, but other routines\n" +
-" that in turn invoke malloc and/or free may call more then once.\n" +
+" that in turn invoke malloc and/or free may call more than once.\n" +
" Also, it is called in check* routines if DEBUG is set.\n" +
"*/\n" +
"\n" +
diff --git a/deps/v8/test/mjsunit/wasm/OWNERS b/deps/v8/test/mjsunit/wasm/OWNERS
index c2abc8a6ad..eda8deabfd 100644
--- a/deps/v8/test/mjsunit/wasm/OWNERS
+++ b/deps/v8/test/mjsunit/wasm/OWNERS
@@ -1,3 +1,5 @@
-titzer@chromium.org
-bradnelson@chromium.org
ahaas@chromium.org
+bradnelson@chromium.org
+mtrofin@chromium.org
+rossberg@chromium.org
+titzer@chromium.org
diff --git a/deps/v8/test/mjsunit/wasm/adapter-frame.js b/deps/v8/test/mjsunit/wasm/adapter-frame.js
index 0e5d4b8c74..e595c3fb89 100644
--- a/deps/v8/test/mjsunit/wasm/adapter-frame.js
+++ b/deps/v8/test/mjsunit/wasm/adapter-frame.js
@@ -26,10 +26,9 @@ function makeSelect(type, args, which) {
}
var builder = new WasmModuleBuilder();
- var sig = new Array();
- sig.push(type);
- for (var i = 0; i < args; i++) sig.push(type);
- builder.addFunction("select", sig)
+ var params = [];
+ for (var i = 0; i < args; i++) params.push(type);
+ builder.addFunction("select", makeSig(params, [type]))
.addBody([kExprGetLocal, which])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-copy.js b/deps/v8/test/mjsunit/wasm/asm-wasm-copy.js
index 35c5f76ef1..149196e1b9 100644
--- a/deps/v8/test/mjsunit/wasm/asm-wasm-copy.js
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-copy.js
@@ -22,6 +22,7 @@
}
return { func: func };
}
- var wasm = Wasm.instantiateModuleFromAsm(asmModule.toString());
- assertEquals(asmModule().func(), wasm.func());
+ var wasm = asmModule();
+ var js = eval('(' + asmModule.toString().replace('use asm', '') + ')')();
+ assertEquals(js.func(), wasm.func());
})();
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-deopt.js b/deps/v8/test/mjsunit/wasm/asm-wasm-deopt.js
index 4b16b71239..460894ea5e 100644
--- a/deps/v8/test/mjsunit/wasm/asm-wasm-deopt.js
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-deopt.js
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm
-// Flags: --allow-natives-syntax
+// Flags: --validate-asm --allow-natives-syntax
(function TestDeoptimizeArgMismatch() {
function deopt() {
@@ -21,8 +20,7 @@
return {'_main': _main}
}
function test() {
- var wasm = Wasm.instantiateModuleFromAsm(
- Module.toString(), {'deopt': deopt});
+ var wasm = Module(null, {'deopt': deopt});
wasm._main(0, 0, 0);
}
test();
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-f32.js b/deps/v8/test/mjsunit/wasm/asm-wasm-f32.js
index a94994d26f..a5d5a6c2cc 100644
--- a/deps/v8/test/mjsunit/wasm/asm-wasm-f32.js
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-f32.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm
+// Flags: --validate-asm --allow-natives-syntax
function WrapInAsmModule(func) {
function MODULE_NAME(stdlib) {
@@ -26,22 +26,19 @@ function WrapInAsmModule(func) {
return eval("(" + source + ")");
}
-function RunThreeWayTest(asmfunc, expect) {
+function RunAsmJsTest(asmfunc, expect) {
var asm_source = asmfunc.toString();
var nonasm_source = asm_source.replace(new RegExp("use asm"), "");
var stdlib = {Math: Math};
- var js_module = eval("(" + nonasm_source + ")")(stdlib);
print("Testing " + asmfunc.name + " (js)...");
+ var js_module = eval("(" + nonasm_source + ")")(stdlib);
expect(js_module);
print("Testing " + asmfunc.name + " (asm.js)...");
var asm_module = asmfunc(stdlib);
+ assertTrue(%IsAsmWasmCode(asmfunc));
expect(asm_module);
-
- print("Testing " + asmfunc.name + " (wasm)...");
- var wasm_module = Wasm.instantiateModuleFromAsm(asm_source, stdlib);
- expect(wasm_module);
}
const fround = Math.fround;
@@ -218,7 +215,7 @@ var funcs = [
(function () {
for (func of funcs) {
- RunThreeWayTest(WrapInAsmModule(func), function (module) {
+ RunAsmJsTest(WrapInAsmModule(func), function (module) {
if (func.length == 1) {
for (a of inputs) {
assertEquals(func(a), module.main(a));
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-f64.js b/deps/v8/test/mjsunit/wasm/asm-wasm-f64.js
index 11f9da38f9..1fd51ff9d5 100644
--- a/deps/v8/test/mjsunit/wasm/asm-wasm-f64.js
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-f64.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm
+// Flags: --validate-asm --allow-natives-syntax
function WrapInAsmModule(func) {
function MODULE_NAME(stdlib) {
@@ -34,22 +34,19 @@ function WrapInAsmModule(func) {
return eval("(" + source + ")");
}
-function RunThreeWayTest(asmfunc, expect) {
+function RunAsmJsTest(asmfunc, expect) {
var asm_source = asmfunc.toString();
var nonasm_source = asm_source.replace(new RegExp("use asm"), "");
var stdlib = {Math: Math};
- var js_module = eval("(" + nonasm_source + ")")(stdlib);
print("Testing " + asmfunc.name + " (js)...");
+ var js_module = eval("(" + nonasm_source + ")")(stdlib);
expect(js_module);
print("Testing " + asmfunc.name + " (asm.js)...");
var asm_module = asmfunc(stdlib);
+ assertTrue(%IsAsmWasmCode(asmfunc));
expect(asm_module);
-
- print("Testing " + asmfunc.name + " (wasm)...");
- var wasm_module = Wasm.instantiateModuleFromAsm(asm_source, stdlib);
- expect(wasm_module);
}
const Math_ceil = Math.ceil;
@@ -290,7 +287,7 @@ var funcs = [
(function () {
for (func of funcs) {
- RunThreeWayTest(WrapInAsmModule(func), function (module) {
+ RunAsmJsTest(WrapInAsmModule(func), function (module) {
if (func.length == 1) {
for (a of inputs) {
assertEquals(func(a), module.main(a));
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-heap.js b/deps/v8/test/mjsunit/wasm/asm-wasm-heap.js
index 055b1e94a5..d81cb6134e 100644
--- a/deps/v8/test/mjsunit/wasm/asm-wasm-heap.js
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-heap.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm
+// Flags: --validate-asm --allow-natives-syntax
const stdlib = {
Math: Math,
@@ -54,21 +54,18 @@ function checkView(view, load, shift) {
}
}
-function RunThreeWayTest(asmfunc, expect) {
+function RunAsmJsTest(asmfunc, expect) {
var asm_source = asmfunc.toString();
var nonasm_source = asm_source.replace(new RegExp("use asm"), "");
- var js_module = eval("(" + nonasm_source + ")")(stdlib, {}, buffer);
print("Testing " + asmfunc.name + " (js)...");
+ var js_module = eval("(" + nonasm_source + ")")(stdlib, {}, buffer);
expect(js_module);
print("Testing " + asmfunc.name + " (asm.js)...");
var asm_module = asmfunc(stdlib, {}, buffer);
+ assertTrue(%IsAsmWasmCode(asmfunc));
expect(asm_module);
-
- print("Testing " + asmfunc.name + " (wasm)...");
- var wasm_module = Wasm.instantiateModuleFromAsm(asm_source, null, buffer);
- expect(wasm_module);
}
function LoadAt_i32(stdlib, foreign, buffer) {
@@ -81,7 +78,7 @@ function LoadAt_i32(stdlib, foreign, buffer) {
return {load: load};
}
-RunThreeWayTest(LoadAt_i32, function(module) {
+RunAsmJsTest(LoadAt_i32, function(module) {
var load = module.load;
assertEquals(BASE, load(0));
assertEquals(BASE | 0x30, load(0x30));
@@ -110,7 +107,7 @@ function LoadAt_i16(stdlib, foreign, buffer) {
return {load: load};
}
-RunThreeWayTest(LoadAt_i16, function(module) {
+RunAsmJsTest(LoadAt_i16, function(module) {
var load = module.load;
var LOWER = (BASE << 16) >> 16;
var UPPER = BASE >> 16;
@@ -146,7 +143,7 @@ function LoadAt_u16(stdlib, foreign, buffer) {
return {load: load};
}
-RunThreeWayTest(LoadAt_u16, function(module) {
+RunAsmJsTest(LoadAt_u16, function(module) {
var load = module.load;
for (index of OOB_INDEXES) assertEquals(0, load(index));
checkView(new Uint16Array(buffer), load, 1);
@@ -162,7 +159,7 @@ function LoadAt_i8(stdlib, foreign, buffer) {
return {load: load};
}
-RunThreeWayTest(LoadAt_i8, function(module) {
+RunAsmJsTest(LoadAt_i8, function(module) {
var load = module.load;
for (index of OOB_INDEXES) assertEquals(0, load(index));
checkView(new Int8Array(buffer), load, 0);
@@ -178,7 +175,7 @@ function LoadAt_u8(stdlib, foreign, buffer) {
return {load: load};
}
-RunThreeWayTest(LoadAt_u8, function(module) {
+RunAsmJsTest(LoadAt_u8, function(module) {
var load = module.load;
for (index of OOB_INDEXES) assertEquals(0, load(index));
checkView(new Uint8Array(buffer), load, 0);
@@ -195,7 +192,7 @@ function LoadAt_u32(stdlib, foreign, buffer) {
return {load: load};
}
-RunThreeWayTest(LoadAt_u32, function(module) {
+RunAsmJsTest(LoadAt_u32, function(module) {
var load = module.load;
for (index of OOB_INDEXES) assertEquals(0, load(index));
checkView(new Uint32Array(buffer), load, 2);
@@ -212,7 +209,7 @@ function LoadAt_f32(stdlib, foreign, buffer) {
return {load: load};
}
-RunThreeWayTest(LoadAt_f32, function(module) {
+RunAsmJsTest(LoadAt_f32, function(module) {
var load = module.load;
for (index of OOB_INDEXES) assertEquals(NaN, load(index));
checkView(new Float32Array(buffer), load, 2);
@@ -228,7 +225,7 @@ function LoadAt_f64(stdlib, foreign, buffer) {
return {load: load};
}
-RunThreeWayTest(LoadAt_f64, function(module) {
+RunAsmJsTest(LoadAt_f64, function(module) {
var load = module.load;
for (index of OOB_INDEXES) assertEquals(NaN, load(index));
checkView(new Float64Array(buffer), load, 3);
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-i32.js b/deps/v8/test/mjsunit/wasm/asm-wasm-i32.js
index 6224e8fa1f..29f071c84c 100644
--- a/deps/v8/test/mjsunit/wasm/asm-wasm-i32.js
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-i32.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm
+// Flags: --validate-asm --allow-natives-syntax
function WrapInAsmModule(func) {
function MODULE_NAME(stdlib) {
@@ -23,22 +23,19 @@ function WrapInAsmModule(func) {
return eval("(" + source + ")");
}
-function RunThreeWayTest(asmfunc, expect) {
+function RunAsmJsTest(asmfunc, expect) {
var asm_source = asmfunc.toString();
var nonasm_source = asm_source.replace(new RegExp("use asm"), "");
var stdlib = {Math: Math};
- var js_module = eval("(" + nonasm_source + ")")(stdlib);
print("Testing " + asmfunc.name + " (js)...");
+ var js_module = eval("(" + nonasm_source + ")")(stdlib);
expect(js_module);
print("Testing " + asmfunc.name + " (asm.js)...");
var asm_module = asmfunc(stdlib);
+ assertTrue(%IsAsmWasmCode(asmfunc));
expect(asm_module);
-
- print("Testing " + asmfunc.name + " (wasm)...");
- var wasm_module = Wasm.instantiateModuleFromAsm(asm_source, stdlib);
- expect(wasm_module);
}
const imul = Math.imul;
@@ -67,13 +64,13 @@ function i32_mul(a, b) {
function i32_div(a, b) {
a = a | 0;
b = b | 0;
- return (a / b) | 0;
+ return ((a | 0) / (b | 0)) | 0;
}
function i32_mod(a, b) {
a = a | 0;
b = b | 0;
- return (a % b) | 0;
+ return ((a | 0) % (b | 0)) | 0;
}
function i32_and(a, b) {
@@ -234,7 +231,7 @@ var funcs = [
(function () {
for (func of funcs) {
- RunThreeWayTest(WrapInAsmModule(func), function (module) {
+ RunAsmJsTest(WrapInAsmModule(func), function (module) {
if (func.length == 1) {
for (a of inputs) {
assertEquals(func(a), module.main(a));
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-literals.js b/deps/v8/test/mjsunit/wasm/asm-wasm-literals.js
index e4e312f1d4..172c5a3776 100644
--- a/deps/v8/test/mjsunit/wasm/asm-wasm-literals.js
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-literals.js
@@ -2,24 +2,21 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm
+// Flags: --validate-asm --allow-natives-syntax
-function RunThreeWayTest(asmfunc, expect) {
+function RunAsmJsTest(asmfunc, expect) {
var asm_source = asmfunc.toString();
var nonasm_source = asm_source.replace(new RegExp("use asm"), "");
var stdlib = {Math: Math};
- var js_module = eval("(" + nonasm_source + ")")(stdlib);
print("Testing " + asmfunc.name + " (js)...");
+ var js_module = eval("(" + nonasm_source + ")")(stdlib);
expect(js_module);
print("Testing " + asmfunc.name + " (asm.js)...");
var asm_module = asmfunc(stdlib);
+ assertTrue(%IsAsmWasmCode(asmfunc));
expect(asm_module);
-
- print("Testing " + asmfunc.name + " (wasm)...");
- var wasm_module = Wasm.instantiateModuleFromAsm(asm_source, stdlib);
- expect(wasm_module);
}
function PositiveIntLiterals() {
@@ -38,9 +35,10 @@ function PositiveIntLiterals() {
f256: f256, f1000: f1000, f2000000, fmax: fmax};
}
-RunThreeWayTest(PositiveIntLiterals, function(module) {
+RunAsmJsTest(PositiveIntLiterals, function(module) {
assertEquals(0, module.f0());
assertEquals(1, module.f1());
+ assertEquals(1, module.f1());
assertEquals(4, module.f4());
assertEquals(64, module.f64());
assertEquals(128, module.f128());
@@ -65,7 +63,7 @@ function NegativeIntLiterals() {
f256: f256, f1000: f1000, f2000000, fmin: fmin};
}
-RunThreeWayTest(NegativeIntLiterals, function (module) {
+RunAsmJsTest(NegativeIntLiterals, function (module) {
assertEquals(-1, module.f1());
assertEquals(-4, module.f4());
assertEquals(-64, module.f64());
@@ -93,7 +91,7 @@ function PositiveUnsignedLiterals() {
f256: f256, f1000: f1000, f2000000, fmax: fmax};
}
-RunThreeWayTest(PositiveUnsignedLiterals, function (module) {
+RunAsmJsTest(PositiveUnsignedLiterals, function (module) {
assertEquals(0, module.f0());
assertEquals(1, module.f1());
assertEquals(4, module.f4());
@@ -130,7 +128,7 @@ function LargeUnsignedLiterals() {
return {a: a, b: b, c: c, d: d, e: e};
}
-RunThreeWayTest(LargeUnsignedLiterals, function(module) {
+RunAsmJsTest(LargeUnsignedLiterals, function(module) {
assertEquals(2147483648, module.a());
assertEquals(2147483649, module.b());
assertEquals(0x80000000, module.c());
@@ -165,7 +163,7 @@ function ManyI32() {
return {main: main};
}
-RunThreeWayTest(ManyI32, function(module) {
+RunAsmJsTest(ManyI32, function(module) {
assertEquals(-222411306, module.main());
});
@@ -187,7 +185,7 @@ function ManyF64a() {
return {main: main};
}
-RunThreeWayTest(ManyF64a, function(module) {
+RunAsmJsTest(ManyF64a, function(module) {
assertEquals(-8640233.599945681, module.main());
});
@@ -203,7 +201,7 @@ function ManyF64b() {
return {k1: k1, k2: k2, k3: k3, k4: k4, k5: k5, k6: k6};
}
-RunThreeWayTest(ManyF64b, function(module) {
+RunAsmJsTest(ManyF64b, function(module) {
assertEquals(2.4e-24, module.k1());
assertEquals(2.4e-19, module.k2());
assertEquals(2.4e-14, module.k3());
@@ -225,7 +223,7 @@ function ManyF64c() {
return {k1: k1, k2: k2, k3: k3, k4: k4, k5: k5, k6: k6};
}
-RunThreeWayTest(ManyF64c, function(module) {
+RunAsmJsTest(ManyF64c, function(module) {
assertEquals(2.4000000000000004e+26, module.k1());
assertEquals(2.4e+21, module.k2());
assertEquals(2.4e+16, module.k3());
@@ -250,7 +248,7 @@ function ManyF32a(stdlib) {
if (false) {
// TODO(bradnelson): fails validation of F32 literals somehow.
-RunThreeWayTest(ManyF32a, function(module) {
+RunAsmJsTest(ManyF32a, function(module) {
assertEquals(2.0999999917333043e-24, module.k1());
assertEquals(2.099999868734112e-19, module.k2());
assertEquals(2.099999997029825e-14, module.k3());
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-stdlib.js b/deps/v8/test/mjsunit/wasm/asm-wasm-stdlib.js
index fe39a30a88..05e1ca509c 100644
--- a/deps/v8/test/mjsunit/wasm/asm-wasm-stdlib.js
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-stdlib.js
@@ -2,7 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm
+// Flags: --validate-asm --allow-natives-syntax
+
+var stdlib = this;
+
+function assertValidAsm(func) {
+ assertTrue(%IsAsmWasmCode(func));
+}
(function TestStdlibConstants() {
function Module(stdlib) {
@@ -39,17 +45,109 @@
return {caller:caller, nanCheck:nanCheck};
}
- var m =Wasm.instantiateModuleFromAsm(Module.toString());
+ var m = Module(stdlib);
+ assertValidAsm(Module);
assertEquals(1, m.caller());
assertTrue(isNaN(m.nanCheck()));
})();
+var stdlib = this;
+var stdlib_root_members = [
+ 'Infinity',
+ 'NaN',
+];
+var stdlib_math_members = [
+ 'E',
+ 'LN10',
+ 'LN2',
+ 'LOG2E',
+ 'LOG10E',
+ 'PI',
+ 'SQRT1_2',
+ 'SQRT2',
+ 'ceil',
+ 'clz32',
+ 'floor',
+ 'sqrt',
+ 'abs',
+ 'min',
+ 'max',
+ 'acos',
+ 'asin',
+ 'atan',
+ 'cos',
+ 'sin',
+ 'tan',
+ 'exp',
+ 'log',
+ 'atan2',
+ 'pow',
+ 'imul',
+ 'fround',
+];
+
+
+(function TestBadStdlib() {
+ function Module(stdlib) {
+ "use asm";
+ var foo = stdlib.NaN;
+ return {};
+ }
+ for (var i = 0; i < stdlib_root_members.length; ++i) {
+ var member = stdlib_root_members[i];
+ var stdlib = {};
+ stdlib[member] = 0;
+ print(member);
+ var code = Module.toString().replace('NaN', member);
+ var decl = eval('(' + code + ')');
+ decl(stdlib);
+ assertTrue(%IsNotAsmWasmCode(decl));
+ }
+ for (var i = 0; i < stdlib_math_members.length; ++i) {
+ var member = stdlib_math_members[i];
+ var stdlib = {Math:{}};
+ stdlib['Math'][member] = 0;
+ print(member);
+ var code = Module.toString().replace('NaN', 'Math.' + member);
+ var decl = eval('(' + code + ')');
+ decl(stdlib);
+ assertTrue(%IsNotAsmWasmCode(decl));
+ }
+})();
+
+
+(function TestMissingNaNStdlib() {
+ function Module(stdlib) {
+ "use asm";
+ var foo = stdlib.NaN;
+ return {};
+ }
+ for (var i = 0; i < stdlib_root_members.length; ++i) {
+ var member = stdlib_root_members[i];
+ var code = Module.toString().replace('NaN', member);
+ var decl = eval('(' + code + ')');
+ decl({});
+ assertTrue(%IsNotAsmWasmCode(decl));
+ }
+ for (var i = 0; i < stdlib_math_members.length; ++i) {
+ var member = stdlib_math_members[i];
+ var code = Module.toString().replace('NaN', 'Math.' + member);
+ var decl = eval('(' + code + ')');
+ assertThrows(function() {
+ decl({});
+ assertTrue(%IsNotAsmWasmCode(decl));
+ });
+ }
+})();
+
+
(function TestStdlibFunctionsInside() {
function Module(stdlib) {
"use asm";
var StdlibMathCeil = stdlib.Math.ceil;
+ var StdlibMathClz32 = stdlib.Math.clz32;
var StdlibMathFloor = stdlib.Math.floor;
var StdlibMathSqrt = stdlib.Math.sqrt;
var StdlibMathAbs = stdlib.Math.abs;
@@ -83,33 +181,35 @@
}
function caller() {
- if (!deltaEqual(StdlibMathSqrt(123.0), 11.090536506409418)) return 0;
- if (StdlibMathSqrt(fround(256.0)) != fround(16.0)) return 0;
- if (StdlibMathCeil(123.7) != 124.0) return 0;
- if (StdlibMathCeil(fround(123.7)) != fround(124.0)) return 0;
- if (StdlibMathFloor(123.7) != 123.0) return 0;
- if (StdlibMathFloor(fround(123.7)) != fround(123.0)) return 0;
- if (StdlibMathAbs(-123.0) != 123.0) return 0;
- if (StdlibMathAbs(fround(-123.0)) != fround(123.0)) return 0;
- if (StdlibMathMin(123.4, 1236.4) != 123.4) return 0;
- if (StdlibMathMin(fround(123.4),
- fround(1236.4)) != fround(123.4)) return 0;
- if (StdlibMathMax(123.4, 1236.4) != 1236.4) return 0;
- if (StdlibMathMax(fround(123.4), fround(1236.4))
+ if (!(deltaEqual(+StdlibMathSqrt(123.0), 11.090536506409418)|0)) return 0;
+ if (fround(StdlibMathSqrt(fround(256.0))) != fround(16.0)) return 0;
+ if (+StdlibMathCeil(123.7) != 124.0) return 0;
+ if (fround(StdlibMathCeil(fround(123.7))) != fround(124.0)) return 0;
+ if (+StdlibMathFloor(123.7) != 123.0) return 0;
+ if (fround(StdlibMathFloor(fround(123.7))) != fround(123.0)) return 0;
+ if (+StdlibMathAbs(-123.0) != 123.0) return 0;
+ if (fround(StdlibMathAbs(fround(-123.0))) != fround(123.0)) return 0;
+ if (+StdlibMathMin(123.4, 1236.4) != 123.4) return 0;
+ if (fround(StdlibMathMin(fround(123.4),
+ fround(1236.4))) != fround(123.4)) return 0;
+ if (+StdlibMathMax(123.4, 1236.4) != 1236.4) return 0;
+ if (fround(StdlibMathMax(fround(123.4), fround(1236.4)))
!= fround(1236.4)) return 0;
- if (!deltaEqual(StdlibMathAcos(0.1), 1.4706289056333368)) return 0;
- if (!deltaEqual(StdlibMathAsin(0.2), 0.2013579207903308)) return 0;
- if (!deltaEqual(StdlibMathAtan(0.2), 0.19739555984988078)) return 0;
- if (!deltaEqual(StdlibMathCos(0.2), 0.9800665778412416)) return 0;
- if (!deltaEqual(StdlibMathSin(0.2), 0.19866933079506122)) return 0;
- if (!deltaEqual(StdlibMathTan(0.2), 0.20271003550867250)) return 0;
- if (!deltaEqual(StdlibMathExp(0.2), 1.2214027581601699)) return 0;
- if (!deltaEqual(StdlibMathLog(0.2), -1.6094379124341003)) return 0;
-
- if (StdlibMathImul(6, 7) != 42) return 0;
- if (!deltaEqual(StdlibMathAtan2(6.0, 7.0), 0.7086262721276703)) return 0;
- if (StdlibMathPow(6.0, 7.0) != 279936.0) return 0;
+ if (!(deltaEqual(+StdlibMathAcos(0.1), 1.4706289056333368)|0)) return 0;
+ if (!(deltaEqual(+StdlibMathAsin(0.2), 0.2013579207903308)|0)) return 0;
+ if (!(deltaEqual(+StdlibMathAtan(0.2), 0.19739555984988078)|0)) return 0;
+ if (!(deltaEqual(+StdlibMathCos(0.2), 0.9800665778412416)|0)) return 0;
+ if (!(deltaEqual(+StdlibMathSin(0.2), 0.19866933079506122)|0)) return 0;
+ if (!(deltaEqual(+StdlibMathTan(0.2), 0.20271003550867250)|0)) return 0;
+ if (!(deltaEqual(+StdlibMathExp(0.2), 1.2214027581601699)|0)) return 0;
+ if (!(deltaEqual(+StdlibMathLog(0.2), -1.6094379124341003)|0)) return 0;
+ if ((StdlibMathClz32(134217728)|0) != 4) return 0;
+
+ if ((StdlibMathImul(6, 7)|0) != 42) return 0;
+ if (!(deltaEqual(+StdlibMathAtan2(6.0, 7.0), 0.7086262721276703)|0))
+ return 0;
+ if (+StdlibMathPow(6.0, 7.0) != 279936.0) return 0;
return 1;
}
@@ -117,7 +217,8 @@
return {caller:caller};
}
- var m = Wasm.instantiateModuleFromAsm(Module.toString());
+ var m = Module(stdlib);
+ assertValidAsm(Module);
assertEquals(1, m.caller());
})();
@@ -261,7 +362,8 @@
max_f64: max_f64,
};
}
- var m = Wasm.instantiateModuleFromAsm(Module.toString());
+ var m = Module(stdlib);
+ assertValidAsm(Module);
var values = {
i32: [
0, 1, -1, 123, 456, -123, -456,
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-switch.js b/deps/v8/test/mjsunit/wasm/asm-wasm-switch.js
new file mode 100644
index 0000000000..f4875d0dc1
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-switch.js
@@ -0,0 +1,486 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --validate-asm --allow-natives-syntax
+
+function assertValidAsm(func) {
+ assertTrue(%IsAsmWasmCode(func));
+}
+
+(function TestSwitch0() {
+ function asmModule() {
+ "use asm"
+
+ function caller() {
+ var ret = 0;
+ var x = 7;
+ switch (x|0) {
+ case 1: {
+ return 0;
+ }
+ case 7: {
+ ret = 5;
+ break;
+ }
+ default: return 0;
+ }
+ return ret|0;
+ }
+
+ return {caller:caller};
+ }
+ var wasm = asmModule();
+ assertValidAsm(asmModule);
+ assertEquals(5, wasm.caller());
+})();
+
+(function TestSwitch() {
+ function asmModule() {
+ "use asm"
+
+ function caller() {
+ var ret = 0;
+ var x = 7;
+ switch (x|0) {
+ case 1: return 0;
+ case 7: {
+ ret = 12;
+ break;
+ }
+ default: return 0;
+ }
+ switch (x|0) {
+ case 1: return 0;
+ case 8: return 0;
+ default: ret = (ret + 11)|0;
+ }
+ return ret|0;
+ }
+
+ return {caller:caller};
+ }
+ var wasm = asmModule();
+ assertValidAsm(asmModule);
+ assertEquals(23, wasm.caller());
+})();
+
+(function TestSwitchFallthrough() {
+ function asmModule() {
+ "use asm"
+
+ function caller() {
+ var x = 17;
+ var ret = 0;
+ switch (x|0) {
+ case 17:
+ case 14: ret = 39;
+ case 1: ret = (ret + 3)|0;
+ case 4: break;
+ default: ret = (ret + 1)|0;
+ }
+ return ret|0;
+ }
+
+ return {caller:caller};
+ }
+ var wasm = asmModule();
+ assertValidAsm(asmModule);
+ assertEquals(42, wasm.caller());
+})();
+
+(function TestNestedSwitch() {
+ function asmModule() {
+ "use asm"
+
+ function caller() {
+ var x = 3;
+ var y = -13;
+ switch (x|0) {
+ case 1: return 0;
+ case 3: {
+ switch (y|0) {
+ case 2: return 0;
+ case -13: return 43;
+ default: return 0;
+ }
+ }
+ default: return 0;
+ }
+ return 0;
+ }
+
+ return {caller:caller};
+ }
+ var wasm = asmModule();
+ assertValidAsm(asmModule);
+ assertEquals(43, wasm.caller());
+})();
+
+(function TestSwitchWithDefaultOnly() {
+ function asmModule() {
+ "use asm";
+ function main(x) {
+ x = x|0;
+ switch(x|0) {
+ default: return -10;
+ }
+ return 0;
+ }
+ return {
+ main: main,
+ };
+ }
+ var wasm = asmModule();
+ assertValidAsm(asmModule);
+ assertEquals(-10, wasm.main(2));
+ assertEquals(-10, wasm.main(54));
+})();
+
+(function TestEmptySwitch() {
+ function asmModule() {
+ "use asm";
+ function main(x) {
+ x = x|0;
+ switch(x|0) {
+ }
+ return 73;
+ }
+ return {
+ main: main,
+ };
+ }
+ var wasm = asmModule();
+ assertValidAsm(asmModule);
+ assertEquals(73, wasm.main(7));
+})();
+
+(function TestSwitchWithBrTable() {
+ function asmModule() {
+ "use asm";
+ function main(x) {
+ x = x|0;
+ switch(x|0) {
+ case 14: return 23;
+ case 12: return 25;
+ case 15: return 29;
+ case 19: return 34;
+ case 18: return 17;
+ case 16: return 16;
+ default: return -1;
+ }
+ return 0;
+ }
+ return {
+ main: main,
+ };
+ }
+ var wasm = asmModule();
+ assertValidAsm(asmModule);
+ assertEquals(25, wasm.main(12));
+ assertEquals(23, wasm.main(14));
+ assertEquals(29, wasm.main(15));
+ assertEquals(16, wasm.main(16));
+ assertEquals(17, wasm.main(18));
+ assertEquals(34, wasm.main(19));
+ assertEquals(-1, wasm.main(-1));
+})();
+
+(function TestSwitchWithBalancedTree() {
+ function asmModule() {
+ "use asm";
+ function main(x) {
+ x = x|0;
+ switch(x|0) {
+ case 5: return 52;
+ case 1: return 11;
+ case 6: return 63;
+ case 9: return 19;
+ case -4: return -4;
+ }
+ return 0;
+ }
+ return {
+ main: main,
+ };
+ }
+ var wasm = asmModule();
+ assertValidAsm(asmModule);
+ assertEquals(-4, wasm.main(-4));
+ assertEquals(11, wasm.main(1));
+ assertEquals(52, wasm.main(5));
+ assertEquals(63, wasm.main(6));
+ assertEquals(19, wasm.main(9));
+ assertEquals(0, wasm.main(11));
+})();
+
+(function TestSwitchHybrid() {
+ function asmModule() {
+ "use asm";
+ function main(x) {
+ x = x|0;
+ switch(x|0) {
+ case 1: return -4;
+ case 2: return 23;
+ case 3: return 32;
+ case 4: return 14;
+ case 7: return 17;
+ case 10: return 10;
+ case 11: return 121;
+ case 12: return 112;
+ case 13: return 31;
+ case 16: return 16;
+ default: return -1;
+ }
+ return 0;
+ }
+ return {
+ main: main,
+ };
+ }
+ var wasm = asmModule();
+ assertValidAsm(asmModule);
+ assertEquals(-4, wasm.main(1));
+ assertEquals(23, wasm.main(2));
+ assertEquals(32, wasm.main(3));
+ assertEquals(14, wasm.main(4));
+ assertEquals(17, wasm.main(7));
+ assertEquals(10, wasm.main(10));
+ assertEquals(121, wasm.main(11));
+ assertEquals(112, wasm.main(12));
+ assertEquals(31, wasm.main(13));
+ assertEquals(16, wasm.main(16));
+ assertEquals(-1, wasm.main(20));
+})();
+
+(function TestSwitchFallthroughWithBrTable() {
+ function asmModule() {
+ "use asm";
+ function main(x) {
+ x = x|0;
+ var ret = 0;
+ switch(x|0) {
+ case 1: {
+ ret = 21;
+ break;
+ }
+ case 2: {
+ ret = 12;
+ break;
+ }
+ case 3: {
+ ret = 43;
+ }
+ case 4: {
+ ret = 54;
+ break;
+ }
+ default: {
+ ret = 10;
+ break;
+ }
+ }
+ return ret|0;
+ }
+ return {
+ main: main,
+ };
+ }
+ var wasm = asmModule();
+ assertValidAsm(asmModule);
+ assertEquals(12, wasm.main(2));
+ assertEquals(10, wasm.main(10));
+ assertEquals(54, wasm.main(3));
+})();
+
+(function TestSwitchFallthroughHybrid() {
+ function asmModule() {
+ "use asm";
+ function main(x) {
+ x = x|0;
+ var ret = 0;
+ switch(x|0) {
+ case 1: {
+ ret = 1;
+ break;
+ }
+ case 2: {
+ ret = 2;
+ break;
+ }
+ case 3: {
+ ret = 3;
+ break;
+ }
+ case 4: {
+ ret = 4;
+ }
+ case 7: {
+ ret = 7;
+ break;
+ }
+ case 10: {
+ ret = 10;
+ }
+ case 16: {
+ ret = 16;
+ break;
+ }
+ case 17: {
+ ret = 17;
+ break;
+ }
+ case 18: {
+ ret = 18;
+ break;
+ }
+ case 19: {
+ ret = 19;
+ }
+ default: {
+ ret = -1;
+ break;
+ }
+ }
+ return ret|0;
+ }
+ return {
+ main: main,
+ };
+ }
+ var wasm = asmModule();
+ assertValidAsm(asmModule);
+ assertEquals(7, wasm.main(4));
+ assertEquals(16, wasm.main(10));
+ assertEquals(-1, wasm.main(19));
+ assertEquals(-1, wasm.main(23));
+})();
+
+(function TestSwitchHybridWithNoDefault() {
+ function asmModule() {
+ "use asm";
+ function main(x) {
+ x = x|0;
+ var ret = 19;
+ switch(x|0) {
+ case 1: {
+ ret = 1;
+ break;
+ }
+ case 2: {
+ ret = 2;
+ break;
+ }
+ case 3: {
+ ret = 3;
+ break;
+ }
+ case 4: {
+ ret = 4;
+ break;
+ }
+ case 7: {
+ ret = 7;
+ break;
+ }
+ }
+ return ret|0;
+ }
+ return {
+ main: main,
+ };
+ }
+ var wasm = asmModule();
+ assertValidAsm(asmModule);
+ assertEquals(2, wasm.main(2));
+ assertEquals(7, wasm.main(7));
+ assertEquals(19, wasm.main(-1));
+})();
+
+(function TestLargeSwitch() {
+ function LargeSwitchGenerator(begin, end, gap, handle_case) {
+ var str = "function asmModule() {\
+ \"use asm\";\
+ function main(x) {\
+ x = x|0;\
+ switch(x|0) {";
+ for (var i = begin; i <= end; i = i + gap) {
+ str = str.concat("case ", i.toString(), ": ", handle_case(i));
+ }
+ str = str.concat("default: return -1;\
+ }\
+ return -2;\
+ }\
+ return {main: main}; }");
+
+ var decl = eval('(' + str + ')');
+ var wasm = decl();
+ assertValidAsm(decl);
+ return wasm;
+ }
+
+ var handle_case = function(k) {
+ return "return ".concat(k, ";");
+ }
+ var wasm = LargeSwitchGenerator(0, 513, 1, handle_case);
+ for (var i = 0; i <= 513; i++) {
+ assertEquals(i, wasm.main(i));
+ }
+ assertEquals(-1, wasm.main(-1));
+
+ wasm = LargeSwitchGenerator(0, 1024, 3, handle_case);
+ for (var i = 0; i <= 1024; i = i + 3) {
+ assertEquals(i, wasm.main(i));
+ }
+ assertEquals(-1, wasm.main(-1));
+
+ wasm = LargeSwitchGenerator(-2147483648, -2147483000, 1, handle_case);
+ for (var i = -2147483648; i <= -2147483000; i++) {
+ assertEquals(i, wasm.main(i));
+ }
+ assertEquals(-1, wasm.main(-1));
+ assertEquals(-1, wasm.main(214748647));
+
+ wasm = LargeSwitchGenerator(-2147483648, -2147483000, 3, handle_case);
+ for (var i = -2147483648; i <= -2147483000; i = i + 3) {
+ assertEquals(i, wasm.main(i));
+ }
+ assertEquals(-1, wasm.main(-1));
+ assertEquals(-1, wasm.main(214748647));
+
+ wasm = LargeSwitchGenerator(2147483000, 2147483647, 1, handle_case);
+ for (var i = 2147483000; i <= 2147483647; i++) {
+ assertEquals(i, wasm.main(i));
+ }
+ assertEquals(-1, wasm.main(-1));
+ assertEquals(-1, wasm.main(-214748647));
+
+ wasm = LargeSwitchGenerator(2147483000, 2147483647, 4, handle_case);
+ for (var i = 2147483000; i <= 2147483647; i = i + 4) {
+ assertEquals(i, wasm.main(i));
+ }
+ assertEquals(-1, wasm.main(-1));
+ assertEquals(-1, wasm.main(-214748647));
+
+ handle_case = function(k) {
+ if (k != 7) return "return ".concat(k, ";");
+ else return "break;";
+ }
+ wasm = LargeSwitchGenerator(0, 1499, 7, handle_case);
+ for (var i = 0; i <= 1499; i = i + 7) {
+ if (i == 7) assertEquals(-2, wasm.main(i));
+ else assertEquals(i, wasm.main(i));
+ }
+ assertEquals(-1, wasm.main(-1));
+
+ handle_case = function(k) {
+ if (k != 56) return "break;";
+ else return "return 23;";
+ }
+ wasm = LargeSwitchGenerator(0, 638, 2, handle_case);
+ for (var i = 0; i <= 638; i = i + 2) {
+ if (i == 56) assertEquals(23, wasm.main(i));
+ else assertEquals(-2, wasm.main(i));
+ }
+ assertEquals(-1, wasm.main(-1));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-u32.js b/deps/v8/test/mjsunit/wasm/asm-wasm-u32.js
index 514ddefb7e..8276015214 100644
--- a/deps/v8/test/mjsunit/wasm/asm-wasm-u32.js
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-u32.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm
+// Flags: --validate-asm --allow-natives-syntax
function WrapInAsmModule(func) {
function MODULE_NAME(stdlib) {
@@ -20,22 +20,19 @@ function WrapInAsmModule(func) {
return eval("(" + source + ")");
}
-function RunThreeWayTest(asmfunc, expect) {
+function RunAsmJsTest(asmfunc, expect) {
var asm_source = asmfunc.toString();
var nonasm_source = asm_source.replace(new RegExp("use asm"), "");
var stdlib = {Math: Math};
- var js_module = eval("(" + nonasm_source + ")")(stdlib);
print("Testing " + asmfunc.name + " (js)...");
+ var js_module = eval("(" + nonasm_source + ")")(stdlib);
expect(js_module);
print("Testing " + asmfunc.name + " (asm.js)...");
var asm_module = asmfunc(stdlib);
+ assertTrue(%IsAsmWasmCode(asmfunc));
expect(asm_module);
-
- print("Testing " + asmfunc.name + " (wasm)...");
- var wasm_module = Wasm.instantiateModuleFromAsm(asm_source, stdlib);
- expect(wasm_module);
}
const imul = Math.imul;
@@ -212,7 +209,7 @@ var funcs = [
(function () {
for (func of funcs) {
- RunThreeWayTest(WrapInAsmModule(func), function (module) {
+ RunAsmJsTest(WrapInAsmModule(func), function (module) {
for (a of inputs) {
for (b of inputs) {
var expected = func(a, b);
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm.js b/deps/v8/test/mjsunit/wasm/asm-wasm.js
index 2efb006436..a580c5c7e9 100644
--- a/deps/v8/test/mjsunit/wasm/asm-wasm.js
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm.js
@@ -2,15 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm
+// Flags: --validate-asm --allow-natives-syntax
+
+var stdlib = this;
+
+function assertValidAsm(func) {
+ assertTrue(%IsAsmWasmCode(func));
+}
function assertWasm(expected, func, ffi) {
print("Testing " + func.name + "...");
- assertEquals(expected, Wasm.instantiateModuleFromAsm(
- func.toString(), ffi).caller());
+ assertEquals(
+ expected, func(stdlib, ffi, new ArrayBuffer(1024)).caller());
+ assertValidAsm(func);
}
-function EmptyTest() {
+function EmptyTest(a, b, c) {
"use asm";
function caller() {
empty();
@@ -23,15 +30,31 @@ function EmptyTest() {
assertWasm(11, EmptyTest);
+function VoidReturnTest(a, b, c) {
+ "use asm";
+ function caller() {
+ empty();
+ return 19;
+ }
+ function empty() {
+ var x = 0;
+ if (x) return;
+ }
+ return {caller: caller};
+}
+
+assertWasm(19, VoidReturnTest);
-function IntTest() {
+function IntTest(a, b, c) {
"use asm";
function sum(a, b) {
a = a|0;
b = b|0;
- var c = (b + 1)|0
+ var c = 0;
var d = 3.0;
- var e = ~~d; // double conversion
+ var e = 0;
+ e = ~~d; // double conversion
+ c = (b + 1)|0
return (a + c + 1)|0;
}
@@ -54,8 +77,9 @@ function Float64Test() {
}
function caller() {
- var a = +sum(70.1,10.2);
+ var a = 0.0;
var ret = 0|0;
+ a = +sum(70.1,10.2);
if (a == 80.3) {
ret = 1|0;
} else {
@@ -75,7 +99,8 @@ function BadModule() {
function caller(a, b) {
a = a|0;
b = b+0;
- var c = (b + 1)|0
+ var c = 0;
+ c = (b + 1)|0
return (a + c + 1)|0;
}
@@ -86,9 +111,7 @@ function BadModule() {
return {caller: caller};
}
-assertThrows(function() {
- Wasm.instantiateModuleFromAsm(BadModule.toString()).caller();
-});
+assertTrue(%IsNotAsmWasmCode(BadModule));
function TestReturnInBlock() {
@@ -131,7 +154,7 @@ function TestWhileSimple() {
function caller() {
var x = 0;
- while(x < 5) {
+ while((x|0) < 5) {
x = (x + 1)|0;
}
return x|0;
@@ -148,7 +171,7 @@ function TestWhileWithoutBraces() {
function caller() {
var x = 0;
- while(x <= 3)
+ while((x|0) <= 3)
x = (x + 1)|0;
return x|0;
}
@@ -164,7 +187,7 @@ function TestReturnInWhile() {
function caller() {
var x = 0;
- while(x < 10) {
+ while((x|0) < 10) {
x = (x + 6)|0;
return x|0;
}
@@ -182,7 +205,7 @@ function TestReturnInWhileWithoutBraces() {
function caller() {
var x = 0;
- while(x < 5)
+ while((x|0) < 5)
return 7;
return x|0;
}
@@ -193,6 +216,55 @@ function TestReturnInWhileWithoutBraces() {
assertWasm(7, TestReturnInWhileWithoutBraces);
+function TestBreakInIf() {
+ "use asm";
+
+ function caller() {
+ label: {
+ if(1) break label;
+ return 11;
+ }
+ return 12;
+ }
+
+ return {caller: caller};
+}
+
+assertWasm(12, TestBreakInIf);
+
+function TestBreakInIfInDoWhileFalse() {
+ "use asm";
+
+ function caller() {
+ do {
+ if(1) break;
+ return 11;
+ } while(0);
+ return 12;
+ }
+
+ return {caller: caller};
+}
+
+assertWasm(12, TestBreakInIfInDoWhileFalse);
+
+function TestBreakInElse() {
+ "use asm";
+
+ function caller() {
+ do {
+ if(0) ;
+ else break;
+ return 14;
+ } while(0);
+ return 15;
+ }
+
+ return {caller: caller};
+}
+
+assertWasm(15, TestBreakInElse);
+
function TestBreakInWhile() {
"use asm";
@@ -209,17 +281,33 @@ function TestBreakInWhile() {
assertWasm(8, TestBreakInWhile);
+function TestBreakInIfInWhile() {
+ "use asm";
+
+ function caller() {
+ while(1) {
+ if (1) break;
+ else break;
+ }
+ return 8;
+ }
+
+ return {caller: caller};
+}
+
+assertWasm(8, TestBreakInIfInWhile);
+
function TestBreakInNestedWhile() {
"use asm";
function caller() {
var x = 1.0;
+ var ret = 0;
while(x < 1.5) {
while(1)
break;
x = +(x + 0.25);
}
- var ret = 0;
if (x == 1.5) {
ret = 9;
}
@@ -239,7 +327,7 @@ function TestBreakInBlock() {
var x = 0;
abc: {
x = 10;
- if (x == 10) {
+ if ((x|0) == 10) {
break abc;
}
x = 20;
@@ -260,7 +348,7 @@ function TestBreakInNamedWhile() {
var x = 0;
outer: while (1) {
x = (x + 1)|0;
- while (x == 11) {
+ while ((x|0) == 11) {
break outer;
}
}
@@ -279,9 +367,9 @@ function TestContinue() {
function caller() {
var x = 5;
var ret = 0;
- while (x >= 0) {
+ while ((x|0) >= 0) {
x = (x - 1)|0;
- if (x == 2) {
+ if ((x|0) == 2) {
continue;
}
ret = (ret - 1)|0;
@@ -302,11 +390,11 @@ function TestContinueInNamedWhile() {
var x = 5;
var y = 0;
var ret = 0;
- outer: while (x > 0) {
+ outer: while ((x|0) > 0) {
x = (x - 1)|0;
y = 0;
- while (y < 5) {
- if (x == 3) {
+ while ((y|0) < 5) {
+ if ((x|0) == 3) {
continue outer;
}
ret = (ret + 1)|0;
@@ -326,7 +414,8 @@ function TestNot() {
"use asm";
function caller() {
- var a = !(2 > 3);
+ var a = 0;
+ a = !(2 > 3);
return a | 0;
}
@@ -341,7 +430,7 @@ function TestNotEquals() {
function caller() {
var a = 3;
- if (a != 2) {
+ if ((a|0) != 2) {
return 21;
}
return 0;
@@ -379,7 +468,7 @@ function TestMixedAdd() {
var c = 0;
c = ((a>>>0) + b)|0;
if ((c >>> 0) > (0>>>0)) {
- if (c < 0) {
+ if ((c|0) < 0) {
return 23;
}
}
@@ -414,8 +503,9 @@ assertWasm(7, TestInt32HeapAccess);
function TestInt32HeapAccessExternal() {
var memory = new ArrayBuffer(1024);
var memory_int32 = new Int32Array(memory);
- var module = Wasm.instantiateModuleFromAsm(
- TestInt32HeapAccess.toString(), null, memory);
+ var module_decl = eval('(' + TestInt32HeapAccess.toString() + ')');
+ var module = module_decl(stdlib, null, memory);
+ assertValidAsm(module_decl);
assertEquals(7, module.caller());
assertEquals(7, memory_int32[2]);
}
@@ -438,10 +528,13 @@ function TestHeapAccessIntTypes() {
code = code.replace(/>> 2/g, types[i][2]);
var memory = new ArrayBuffer(1024);
var memory_view = new types[i][0](memory);
- var module = Wasm.instantiateModuleFromAsm(code, null, memory);
+ var module_decl = eval('(' + code + ')');
+ var module = module_decl(stdlib, null, memory);
+ assertValidAsm(module_decl);
assertEquals(7, module.caller());
assertEquals(7, memory_view[2]);
- assertEquals(7, Wasm.instantiateModuleFromAsm(code).caller());
+ assertEquals(7, module_decl(stdlib).caller());
+ assertValidAsm(module_decl);
}
}
@@ -469,15 +562,15 @@ function TestFloatHeapAccess(stdlib, foreign, buffer) {
return {caller: caller};
}
-assertEquals(1, Wasm.instantiateModuleFromAsm(
- TestFloatHeapAccess.toString()).caller());
+assertWasm(1, TestFloatHeapAccess);
function TestFloatHeapAccessExternal() {
var memory = new ArrayBuffer(1024);
var memory_float64 = new Float64Array(memory);
- var module = Wasm.instantiateModuleFromAsm(
- TestFloatHeapAccess.toString(), null, memory);
+ var module_decl = eval('(' + TestFloatHeapAccess.toString() + ')');
+ var module = module_decl(stdlib, null, memory);
+ assertValidAsm(module_decl);
assertEquals(1, module.caller());
assertEquals(9.0, memory_float64[1]);
}
@@ -603,6 +696,7 @@ function TestModDoubleNegative() {
assertWasm(28, TestModDoubleNegative);
+
(function () {
function TestNamedFunctions() {
"use asm";
@@ -623,11 +717,14 @@ function TestNamedFunctions() {
add:add};
}
-var module = Wasm.instantiateModuleFromAsm(TestNamedFunctions.toString());
+var module_decl = eval('(' + TestNamedFunctions.toString() + ')');
+var module = module_decl(stdlib);
+assertValidAsm(module_decl);
module.init();
assertEquals(77.5, module.add());
})();
+
(function () {
function TestGlobalsWithInit() {
"use asm";
@@ -642,7 +739,9 @@ function TestGlobalsWithInit() {
return {add:add};
}
-var module = Wasm.instantiateModuleFromAsm(TestGlobalsWithInit.toString());
+var module_decl = eval('(' + TestGlobalsWithInit.toString() + ')');
+var module = module_decl(stdlib);
+assertValidAsm(module_decl);
assertEquals(77.5, module.add());
})();
@@ -652,7 +751,7 @@ function TestForLoop() {
function caller() {
var ret = 0;
var i = 0;
- for (i = 2; i <= 10; i = (i+1)|0) {
+ for (i = 2; (i|0) <= 10; i = (i+1)|0) {
ret = (ret + i) | 0;
}
return ret|0;
@@ -670,7 +769,7 @@ function TestForLoopWithoutInit() {
function caller() {
var ret = 0;
var i = 0;
- for (; i < 10; i = (i+1)|0) {
+ for (; (i|0) < 10; i = (i+1)|0) {
ret = (ret + 10) | 0;
}
return ret|0;
@@ -690,7 +789,7 @@ function TestForLoopWithoutCondition() {
var i = 0;
for (i=1;; i = (i+1)|0) {
ret = (ret + i) | 0;
- if (i == 11) {
+ if ((i|0) == 11) {
break;
}
}
@@ -708,7 +807,7 @@ function TestForLoopWithoutNext() {
function caller() {
var i = 0;
- for (i=1; i < 41;) {
+ for (i=1; (i|0) < 41;) {
i = (i + 1) | 0;
}
return i|0;
@@ -725,7 +824,7 @@ function TestForLoopWithoutBody() {
function caller() {
var i = 0;
- for (i=1; i < 45 ; i = (i+1)|0) {
+ for (i=1; (i|0) < 45 ; i = (i+1)|0) {
}
return i|0;
}
@@ -745,7 +844,7 @@ function TestDoWhile() {
do {
ret = (ret + ret)|0;
i = (i + 1)|0;
- } while (i < 2);
+ } while ((i|0) < 2);
return ret|0;
}
@@ -760,7 +859,7 @@ function TestConditional() {
function caller() {
var x = 1;
- return ((x > 0) ? 41 : 71)|0;
+ return (((x|0) > 0) ? 41 : 71)|0;
}
return {caller:caller};
@@ -769,83 +868,6 @@ function TestConditional() {
assertWasm(41, TestConditional);
-function TestSwitch() {
- "use asm"
-
- function caller() {
- var ret = 0;
- var x = 7;
- switch (x) {
- case 1: return 0;
- case 7: {
- ret = 12;
- break;
- }
- default: return 0;
- }
- switch (x) {
- case 1: return 0;
- case 8: return 0;
- default: ret = (ret + 11)|0;
- }
- return ret|0;
- }
-
- return {caller:caller};
-}
-
-assertWasm(23, TestSwitch);
-
-
-function TestSwitchFallthrough() {
- "use asm"
-
- function caller() {
- var x = 17;
- var ret = 0;
- switch (x) {
- case 17:
- case 14: ret = 39;
- case 1: ret = (ret + 3)|0;
- case 4: break;
- default: ret = (ret + 1)|0;
- }
- return ret|0;
- }
-
- return {caller:caller};
-}
-
-assertWasm(42, TestSwitchFallthrough);
-
-
-function TestNestedSwitch() {
- "use asm"
-
- function caller() {
- var x = 3;
- var y = -13;
- switch (x) {
- case 1: return 0;
- case 3: {
- switch (y) {
- case 2: return 0;
- case -13: return 43;
- default: return 0;
- }
- }
- default: return 0;
- }
- return 0;
- }
-
- return {caller:caller};
-}
-
-assertWasm(43, TestNestedSwitch);
-
-
-(function () {
function TestInitFunctionWithNoGlobals() {
"use asm";
function caller() {
@@ -854,10 +876,8 @@ function TestInitFunctionWithNoGlobals() {
return {caller};
}
-var module = Wasm.instantiateModuleFromAsm(
- TestInitFunctionWithNoGlobals.toString());
-assertEquals(51, module.caller());
-})();
+assertWasm(51, TestInitFunctionWithNoGlobals);
+
(function () {
function TestExportNameDifferentFromFunctionName() {
@@ -868,11 +888,14 @@ function TestExportNameDifferentFromFunctionName() {
return {alt_caller:caller};
}
-var module = Wasm.instantiateModuleFromAsm(
- TestExportNameDifferentFromFunctionName.toString());
+var module_decl = eval(
+ '(' + TestExportNameDifferentFromFunctionName.toString() + ')');
+var module = module_decl(stdlib);
+assertValidAsm(module_decl);
assertEquals(55, module.alt_caller());
})();
+
function TestFunctionTableSingleFunction() {
"use asm";
@@ -881,7 +904,9 @@ function TestFunctionTableSingleFunction() {
}
function caller() {
- return function_table[0&0]() | 0;
+ // TODO(jpp): the parser optimizes function_table[0&0] to function table[0].
+ var v = 0;
+ return function_table[v&0]() | 0;
}
var function_table = [dummy]
@@ -906,8 +931,9 @@ function TestFunctionTableMultipleFunctions() {
}
function caller() {
- if (function_table[0&1](50) == 51) {
- if (function_table[1&1](60) == 62) {
+ var i = 0, j = 1;
+ if ((function_table[i&1](50)|0) == 51) {
+ if ((function_table[j&1](60)|0) == 62) {
return 73;
}
}
@@ -923,7 +949,7 @@ assertWasm(73, TestFunctionTableMultipleFunctions);
(function () {
-function TestFunctionTable() {
+function TestFunctionTable(stdlib, foreign, buffer) {
"use asm";
function add(a, b) {
@@ -948,9 +974,9 @@ function TestFunctionTable() {
fun_id = fun_id|0;
arg1 = arg1|0;
arg2 = arg2|0;
- if (table_id == 0) {
+ if ((table_id|0) == 0) {
return funBin[fun_id&3](arg1, arg2)|0;
- } else if (table_id == 1) {
+ } else if ((table_id|0) == 1) {
return fun[fun_id&0](arg1)|0;
}
return 0;
@@ -962,7 +988,7 @@ function TestFunctionTable() {
return {caller:caller};
}
-var module = Wasm.instantiateModuleFromAsm(TestFunctionTable.toString());
+var module = TestFunctionTable(stdlib);
assertEquals(55, module.caller(0, 0, 33, 22));
assertEquals(11, module.caller(0, 1, 33, 22));
assertEquals(9, module.caller(0, 2, 54, 45));
@@ -1008,8 +1034,8 @@ function TestForeignFunctions() {
var foreign = new ffi(23);
- var module = Wasm.instantiateModuleFromAsm(AsmModule.toString(),
- foreign, null);
+ var module = AsmModule({Math: Math}, foreign, null);
+ assertValidAsm(AsmModule);
assertEquals(103, module.caller(23, 103));
}
@@ -1047,8 +1073,9 @@ function TestForeignFunctionMultipleUse() {
var foreign = new ffi();
- var module = Wasm.instantiateModuleFromAsm(AsmModule.toString(),
- foreign, null);
+ var module_decl = eval('(' + AsmModule.toString() + ')');
+ var module = module_decl(stdlib, foreign, null);
+ assertValidAsm(module_decl);
assertEquals(89, module.caller(83, 83.25));
}
@@ -1086,8 +1113,9 @@ function TestForeignVariables() {
function TestCase(env, i1, f1, i2, f2) {
print("Testing foreign variables...");
- var module = Wasm.instantiateModuleFromAsm(
- AsmModule.toString(), env);
+ var module_decl = eval('(' + AsmModule.toString() + ')');
+ var module = module_decl(stdlib, env);
+ assertValidAsm(module_decl);
assertEquals(i1, module.geti1());
assertEquals(f1, module.getf1());
assertEquals(i2, module.geti2());
@@ -1178,8 +1206,9 @@ TestForeignVariables();
return {load: load, iload: iload, store: store, storeb: storeb};
}
- var m = Wasm.instantiateModuleFromAsm(
- TestByteHeapAccessCompat.toString());
+ var module_decl = eval('(' + TestByteHeapAccessCompat.toString() + ')');
+ var m = module_decl(stdlib);
+ assertValidAsm(module_decl);
m.store(0, 20);
m.store(4, 21);
m.store(8, 22);
@@ -1228,7 +1257,9 @@ assertWasm(15, TestGlobalBlock, { x: 4, y: 11 });
return {ifunc: ifunc, dfunc: dfunc};
}
- var m = Wasm.instantiateModuleFromAsm(CommaModule.toString());
+ var module_decl = eval('(' + CommaModule.toString() + ')');
+ var m = module_decl(stdlib);
+ assertValidAsm(module_decl);
assertEquals(123, m.ifunc(456.7, 123));
assertEquals(123.4, m.dfunc(456, 123.4));
})();
@@ -1285,73 +1316,91 @@ function TestXor() {
assertWasm(1, TestXor);
-(function TestIntishAssignment() {
- function Module(stdlib, foreign, heap) {
- "use asm";
- var HEAP32 = new stdlib.Int32Array(heap);
- function func() {
- var a = 1;
- var b = 2;
- HEAP32[0] = a + b;
- return HEAP32[0] | 0;
- }
- return {func: func};
+function TestIntishAssignment(stdlib, foreign, heap) {
+ "use asm";
+ var HEAP32 = new stdlib.Int32Array(heap);
+ function func() {
+ var a = 1;
+ var b = 2;
+ HEAP32[0] = a + b;
+ return HEAP32[0] | 0;
}
+ return {caller: func};
+}
- var m = Wasm.instantiateModuleFromAsm(Module.toString());
- assertEquals(3, m.func());
-})();
+assertWasm(3, TestIntishAssignment);
-(function TestFloatishAssignment() {
- function Module(stdlib, foreign, heap) {
- "use asm";
- var HEAPF32 = new stdlib.Float32Array(heap);
- var fround = stdlib.Math.fround;
- function func() {
- var a = fround(1.0);
- var b = fround(2.0);
- HEAPF32[0] = a + b;
- return +HEAPF32[0];
- }
- return {func: func};
+function TestFloatishAssignment(stdlib, foreign, heap) {
+ "use asm";
+ var HEAPF32 = new stdlib.Float32Array(heap);
+ var fround = stdlib.Math.fround;
+ function func() {
+ var a = fround(1.0);
+ var b = fround(2.0);
+ HEAPF32[0] = a + b;
+ return +HEAPF32[0];
}
+ return {caller: func};
+}
- var m = Wasm.instantiateModuleFromAsm(Module.toString());
- assertEquals(3, m.func());
-})();
+assertWasm(3, TestFloatishAssignment);
-(function TestDoubleToFloatAssignment() {
+function TestDoubleToFloatAssignment(stdlib, foreign, heap) {
+ "use asm";
+ var HEAPF32 = new stdlib.Float32Array(heap);
+ var fround = stdlib.Math.fround;
+ function func() {
+ var a = 1.23;
+ HEAPF32[0] = a;
+ return +HEAPF32[0];
+ }
+ return {caller: func};
+}
+
+assertWasm(Math.fround(1.23), TestDoubleToFloatAssignment);
+
+
+function TestIntegerMultiplyBothWays(stdlib, foreign, heap) {
+ "use asm";
+ function func() {
+ var a = 1;
+ return (((a * 3)|0) + ((4 * a)|0)) | 0;
+ }
+ return {caller: func};
+}
+
+assertWasm(7, TestIntegerMultiplyBothWays);
+
+
+(function TestBadAssignDoubleFromIntish() {
function Module(stdlib, foreign, heap) {
"use asm";
- var HEAPF32 = new stdlib.Float32Array(heap);
- var fround = stdlib.Math.fround;
function func() {
- var a = 1.23;
- HEAPF32[0] = a;
- return +HEAPF32[0];
+ var a = 1;
+ var b = 3.0;
+ b = a;
}
return {func: func};
}
-
- var m = Wasm.instantiateModuleFromAsm(Module.toString());
- assertEquals(1.23, m.func());
-});
+ Module(stdlib);
+ assertTrue(%IsNotAsmWasmCode(Module));
+})();
-(function TestIntegerMultiplyBothWays() {
+(function TestBadAssignIntFromDouble() {
function Module(stdlib, foreign, heap) {
"use asm";
function func() {
var a = 1;
- return ((a * 3) + (4 * a)) | 0;
+ var b = 3.0;
+ a = b;
}
return {func: func};
}
-
- var m = Wasm.instantiateModuleFromAsm(Module.toString());
- assertEquals(7, m.func());
+ Module(stdlib);
+ assertTrue(%IsNotAsmWasmCode(Module));
})();
@@ -1364,9 +1413,8 @@ assertWasm(1, TestXor);
}
return {func: func};
}
- assertThrows(function() {
- Wasm.instantiateModuleFromAsm(Module.toString());
- });
+ Module(stdlib);
+ assertTrue(%IsNotAsmWasmCode(Module));
})();
@@ -1379,44 +1427,37 @@ assertWasm(1, TestXor);
}
return {func: func};
}
- assertThrows(function() {
- Wasm.instantiateModuleFromAsm(Module.toString());
- });
+ Module(stdlib);
+ assertTrue(%IsNotAsmWasmCode(Module));
})();
-(function TestAndNegative() {
- function Module() {
- "use asm";
- function func() {
- var x = 1;
- var y = 2;
- var z = 0;
- z = x + y & -1;
- return z | 0;
- }
- return {func: func};
+function TestAndNegative() {
+ "use asm";
+ function func() {
+ var x = 1;
+ var y = 2;
+ var z = 0;
+ z = x + y & -1;
+ return z | 0;
}
+ return {caller: func};
+}
- var m = Wasm.instantiateModuleFromAsm(Module.toString());
- assertEquals(3, m.func());
-})();
+assertWasm(3, TestAndNegative);
-(function TestNegativeDouble() {
- function Module() {
- "use asm";
- function func() {
- var x = -(34359738368.25);
- var y = -2.5;
- return +(x + y);
- }
- return {func: func};
+function TestNegativeDouble() {
+ "use asm";
+ function func() {
+ var x = -(34359738368.25);
+ var y = -2.5;
+ return +(x + y);
}
+ return {caller: func};
+}
- var m = Wasm.instantiateModuleFromAsm(Module.toString());
- assertEquals(-34359738370.75, m.func());
-})();
+assertWasm(-34359738370.75, TestNegativeDouble);
(function TestBadAndDouble() {
@@ -1430,42 +1471,38 @@ assertWasm(1, TestXor);
return {func: func};
}
- assertThrows(function() {
- Wasm.instantiateModuleFromAsm(Module.toString());
- });
+ Module(stdlib);
+ assertTrue(%IsNotAsmWasmCode(Module));
})();
-(function TestAndIntAndHeapValue() {
- function Module(stdlib, foreign, buffer) {
- "use asm";
- var HEAP32 = new stdlib.Int32Array(buffer);
- function func() {
- var x = 0;
- x = HEAP32[0] & -1;
- return x | 0;
- }
- return {func: func};
+function TestAndIntAndHeapValue(stdlib, foreign, buffer) {
+ "use asm";
+ var HEAP32 = new stdlib.Int32Array(buffer);
+ function func() {
+ var x = 0;
+ x = HEAP32[0] & -1;
+ return x | 0;
}
+ return {caller: func};
+}
- var m = Wasm.instantiateModuleFromAsm(Module.toString());
- assertEquals(0, m.func());
-})();
+assertWasm(0, TestAndIntAndHeapValue);
-(function TestOutOfBoundsConversion() {
- function asmModule($a,$b,$c){'use asm';
- function aaa() {
- var f = 0.0;
- var a = 0;
- f = 5616315000.000001;
- a = ~~f >>>0;
- return a | 0;
- }
- return { main : aaa };
+
+function TestOutOfBoundsConversion($a,$b,$c){'use asm';
+ function aaa() {
+ var f = 0.0;
+ var a = 0;
+ f = 5616315000.000001;
+ a = ~~f >>>0;
+ return a | 0;
}
- var wasm = Wasm.instantiateModuleFromAsm(asmModule.toString());
- assertEquals(1321347704, wasm.main());
-})();
+ return { caller : aaa };
+}
+
+assertWasm(1321347704, TestOutOfBoundsConversion);
+
(function TestUnsignedLiterals() {
function asmModule() {
@@ -1488,8 +1525,59 @@ assertWasm(1, TestXor);
u0x87654321: u0x87654321,
};
}
- var wasm = Wasm.instantiateModuleFromAsm(asmModule.toString());
+ var decl = eval('(' + asmModule.toString() + ')');
+ var wasm = decl(stdlib);
+ assertValidAsm(decl);
assertEquals(0xffffffff, wasm.u0xffffffff());
assertEquals(0x80000000, wasm.u0x80000000());
assertEquals(0x87654321, wasm.u0x87654321());
})();
+
+
+function TestIfWithUnsigned() {
+ "use asm";
+ function main() {
+ if (2147483658) { // 2^31 + 10
+ return 231;
+ }
+ return 0;
+ }
+ return {caller:main};
+}
+
+assertWasm(231, TestIfWithUnsigned);
+
+
+function TestLoopsWithUnsigned() {
+ "use asm";
+ function main() {
+ var val = 1;
+ var count = 0;
+ for (val = 2147483648; 2147483648;) {
+ val = 2147483649;
+ break;
+ }
+ while (val>>>0) {
+ val = (val + 1) | 0;
+ count = (count + 1)|0;
+ if ((count|0) == 9) {
+ break;
+ }
+ }
+ count = 0;
+ do {
+ val = (val + 2) | 0;
+ count = (count + 1)|0;
+ if ((count|0) == 5) {
+ break;
+ }
+ } while (0xffffffff);
+ if ((val>>>0) == 2147483668) {
+ return 323;
+ }
+ return 0;
+ }
+ return {caller:main};
+}
+
+assertWasm(323, TestLoopsWithUnsigned);
diff --git a/deps/v8/test/mjsunit/wasm/calls.js b/deps/v8/test/mjsunit/wasm/calls.js
index 11cc92a8ec..4da0501cf2 100644
--- a/deps/v8/test/mjsunit/wasm/calls.js
+++ b/deps/v8/test/mjsunit/wasm/calls.js
@@ -37,7 +37,6 @@ function assertFunction(module, func) {
assertFalse(exp === null);
assertFalse(exp === 0);
assertEquals("function", typeof exp);
-
return exp;
}
@@ -46,11 +45,12 @@ function assertFunction(module, func) {
var builder = new WasmModuleBuilder();
builder.addMemory(1, 1, true);
- builder.addFunction("sub", [kAstI32, kAstI32, kAstI32])
+ builder.addFunction("sub", kSig_i_ii)
.addBody([
- kExprI32Sub, // --
kExprGetLocal, 0, // --
- kExprGetLocal, 1]) // --
+ kExprGetLocal, 1, // --
+ kExprI32Sub, // --
+ ])
.exportFunc()
var module = builder.instantiate();
@@ -70,7 +70,7 @@ function assertFunction(module, func) {
var kPages = 2;
builder.addMemory(kPages, kPages, true);
- builder.addFunction("nop", [kAstStmt])
+ builder.addFunction("nop", kSig_v_v)
.addBody([kExprNop])
.exportFunc();
@@ -87,11 +87,12 @@ function assertFunction(module, func) {
var kPages = 3;
builder.addMemory(kPages, kPages, true);
- builder.addFunction("flt", [kAstI32, kAstF64, kAstF64])
+ builder.addFunction("flt", kSig_i_dd)
.addBody([
- kExprF64Lt, // --
kExprGetLocal, 0, // --
- kExprGetLocal, 1]) // --
+ kExprGetLocal, 1, // --
+ kExprF64Lt // --
+ ]) // --
.exportFunc();
var module = builder.instantiate();
diff --git a/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js b/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
new file mode 100644
index 0000000000..94cc894275
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
@@ -0,0 +1,80 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --allow-natives-syntax --expose-gc
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function SerializeAndDeserializeModule() {
+ var builder = new WasmModuleBuilder();
+ builder.addMemory(1,1, true);
+ var kSig_v_i = makeSig([kAstI32], []);
+ var signature = builder.addType(kSig_v_i);
+ builder.addImport("some_value", kSig_i);
+ builder.addImport("writer", signature);
+
+ builder.addFunction("main", kSig_i_i)
+ .addBody([
+ kExprI32Const, 1,
+ kExprGetLocal, 0,
+ kExprI32LoadMem, 0, 0,
+ kExprCallIndirect, kArity1, signature,
+ kExprGetLocal,0,
+ kExprI32LoadMem,0, 0,
+ kExprCallImport, kArity0, 0,
+ kExprI32Add
+ ]).exportFunc();
+
+ // writer(mem[i]);
+ // return mem[i] + some_value();
+ builder.addFunction("_wrap_writer", signature)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprCallImport, kArity1, 1]);
+ builder.appendToTable([0, 1]);
+
+
+ var module = new WebAssembly.Module(builder.toBuffer());
+ var buff = %SerializeWasmModule(module);
+ module = null;
+ gc();
+ module = %DeserializeWasmModule(buff);
+
+ var mem_1 = new ArrayBuffer(4);
+ var view_1 = new Int32Array(mem_1);
+
+ view_1[0] = 42;
+
+ var outval_1;
+ var i1 = new WebAssembly.Instance(module, {some_value: () => 1,
+ writer: (x)=>outval_1 = x }, mem_1);
+
+ assertEquals(43, i1.exports.main(0));
+
+ assertEquals(42, outval_1);
+})();
+
+(function DeserializeInvalidObject() {
+ var invalid_buffer = new ArrayBuffer(10);
+
+ module = %DeserializeWasmModule(invalid_buffer);
+ assertEquals(module, undefined);
+})();
+
+(function RelationBetweenModuleAndClone() {
+ let builder = new WasmModuleBuilder();
+ builder.addFunction("main", kSig_i)
+ .addBody([kExprI8Const, 42])
+ .exportFunc();
+
+ var compiled_module = new WebAssembly.Module(builder.toBuffer());
+ var serialized = %SerializeWasmModule(compiled_module);
+ var clone = %DeserializeWasmModule(serialized);
+
+ assertNotNull(clone);
+ assertFalse(clone == undefined);
+ assertFalse(clone == compiled_module);
+ assertEquals(clone.constructor, compiled_module.constructor);
+})()
diff --git a/deps/v8/test/mjsunit/wasm/debug-disassembly.js b/deps/v8/test/mjsunit/wasm/debug-disassembly.js
new file mode 100644
index 0000000000..976098a853
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/debug-disassembly.js
@@ -0,0 +1,128 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --expose-debug-as debug
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+Debug = debug.Debug
+
+// Initialized in setup().
+var exception;
+var break_count;
+var num_wasm_scripts;
+var module;
+
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ ++break_count;
+ // Request frame details. This should trigger creation of the Script
+ // objects for all frames on the stack.
+ var num_frames = exec_state.frameCount();
+ for (var i = 0; i < num_frames; ++i) {
+ var frame = exec_state.frame(i);
+ var details = frame.details();
+ var script = details.script();
+ if (script.type == Debug.ScriptType.Wasm) {
+ var pos = frame.sourcePosition();
+ var name = script.nameOrSourceURL();
+ var disassembly = Debug.disassembleWasmFunction(script.id);
+ var offset_table = Debug.getWasmFunctionOffsetTable(script.id);
+ assertEquals(0, offset_table.length % 3);
+ var lineNr = null;
+ var columnNr = null;
+ for (var p = 0; p < offset_table.length; p += 3) {
+ if (offset_table[p] != pos) continue;
+ lineNr = offset_table[p+1];
+ columnNr = offset_table[p+2];
+ }
+ assertNotNull(lineNr, "position should occur in offset table");
+ assertNotNull(columnNr, "position should occur in offset table");
+ var line = disassembly.split("\n")[lineNr];
+ assertTrue(!!line, "line number must occur in disassembly");
+ assertTrue(line.length > columnNr, "column number must be valid");
+ var expected_string;
+ if (name.endsWith("/0")) {
+ // Function 0 calls the imported function.
+ expected_string = "kExprCallImport,";
+ } else if (name.endsWith("/1")) {
+ // Function 1 calls function 0.
+ expected_string = "kExprCallFunction,";
+ } else {
+ assertTrue(false, "Unexpected wasm script: " + name);
+ }
+ assertTrue(line.substr(columnNr).startsWith(expected_string),
+ "offset " + columnNr + " should start with '" + expected_string
+ + "': " + line);
+ }
+ }
+ } else if (event == Debug.DebugEvent.AfterCompile) {
+ var script = event_data.script();
+ if (script.scriptType() == Debug.ScriptType.Wasm) {
+ ++num_wasm_scripts;
+ }
+ }
+ } catch (e) {
+ print("exception: " + e);
+ exception = e;
+ }
+};
+
+var builder = new WasmModuleBuilder();
+
+builder.addImport("func", kSig_v_v);
+
+builder.addFunction("call_import", kSig_v_v)
+ .addBody([kExprCallImport, kArity0, 0])
+ .exportFunc();
+
+// Add a bit of unneccessary code to increase the byte offset.
+builder.addFunction("call_call_import", kSig_v_v)
+ .addLocals({i32_count: 2})
+ .addBody([
+ kExprI32Const, 27, kExprSetLocal, 0,
+ kExprI32Const, (-7 & 0x7f), kExprSetLocal, 1,
+ kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Add, kExprI64UConvertI32,
+ kExprI64Const, 0,
+ kExprI64Ne, kExprIf,
+ kExprCallFunction, kArity0, 0,
+ kExprEnd
+ ])
+ .exportFunc();
+
+function call_debugger() {
+ debugger;
+}
+
+function setup() {
+ module = builder.instantiate({func: call_debugger});
+ exception = null;
+ break_count = 0;
+ num_wasm_scripts = 0;
+}
+
+(function testRegisteredWasmScripts1() {
+ setup();
+ Debug.setListener(listener);
+ // Call the "call_import" function -> 1 script.
+ module.exports.call_import();
+ module.exports.call_import();
+ module.exports.call_call_import();
+ Debug.setListener(null);
+
+ assertEquals(3, break_count);
+ if (exception) throw exception;
+})();
+
+(function testRegisteredWasmScripts2() {
+ setup();
+ Debug.setListener(listener);
+ module.exports.call_call_import();
+ Debug.setListener(null);
+
+ assertEquals(1, break_count);
+ if (exception) throw exception;
+})();
diff --git a/deps/v8/test/mjsunit/wasm/divrem-trap.js b/deps/v8/test/mjsunit/wasm/divrem-trap.js
index 976e4736bc..6f3ff5db73 100644
--- a/deps/v8/test/mjsunit/wasm/divrem-trap.js
+++ b/deps/v8/test/mjsunit/wasm/divrem-trap.js
@@ -33,8 +33,12 @@ function assertTraps(code, msg) {
function makeBinop(opcode) {
var builder = new WasmModuleBuilder();
- builder.addFunction("main", [kAstI32, kAstI32, kAstI32])
- .addBody([opcode, kExprGetLocal, 0, kExprGetLocal, 1])
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ opcode, // --
+ ])
.exportFunc();
return builder.instantiate().exports.main;
diff --git a/deps/v8/test/mjsunit/wasm/embenchen/box2d.js b/deps/v8/test/mjsunit/wasm/embenchen/box2d.js
index d9c78124d9..d8800e7758 100644
--- a/deps/v8/test/mjsunit/wasm/embenchen/box2d.js
+++ b/deps/v8/test/mjsunit/wasm/embenchen/box2d.js
@@ -1,5 +1,5 @@
// Modified embenchen to direct to asm-wasm.
-// Flags: --expose-wasm
+// Flags: --validate-asm --allow-natives-syntax
var EXPECTED_OUTPUT =
/frame averages: .+ \+- .+, range: .+ to .+ \n/;
@@ -6038,7 +6038,8 @@ function asmPrintFloat(x, y) {
Module.print('float ' + x + ',' + y);// + ' ' + new Error().stack);
}
// EMSCRIPTEN_START_ASM
-var asm = Wasm.instantiateModuleFromAsm((function Module(global, env, buffer) {
+var ModuleFunc;
+var asm = (ModuleFunc = function(global, env, buffer) {
'use asm';
var HEAP8 = new global.Int8Array(buffer);
var HEAP16 = new global.Int16Array(buffer);
@@ -20100,9 +20101,10 @@ function b6() {
var FUNCTION_TABLE_viiii = [b11,__ZNK11b2EdgeShape11ComputeAABBEP6b2AABBRK11b2Transformi,__ZNK14b2PolygonShape11ComputeAABBEP6b2AABBRK11b2Transformi,__ZN22b2EdgeAndCircleContact8EvaluateEP10b2ManifoldRK11b2TransformS4_,__ZN23b2EdgeAndPolygonContact8EvaluateEP10b2ManifoldRK11b2TransformS4_,__ZN25b2PolygonAndCircleContact8EvaluateEP10b2ManifoldRK11b2TransformS4_,__ZN16b2PolygonContact8EvaluateEP10b2ManifoldRK11b2TransformS4_,__ZN23b2ChainAndCircleContact8EvaluateEP10b2ManifoldRK11b2TransformS4_,__ZN24b2ChainAndPolygonContact8EvaluateEP10b2ManifoldRK11b2TransformS4_,__ZN15b2CircleContact8EvaluateEP10b2ManifoldRK11b2TransformS4_,__ZNK10__cxxabiv117__class_type_info27has_unambiguous_public_baseEPNS_19__dynamic_cast_infoEPvi,__ZNK10__cxxabiv120__si_class_type_info27has_unambiguous_public_baseEPNS_19__dynamic_cast_infoEPvi,b11,b11,b11,b11];
return { _strlen: _strlen, _free: _free, _main: _main, _memset: _memset, _malloc: _malloc, _memcpy: _memcpy, runPostSets: runPostSets, stackAlloc: stackAlloc, stackSave: stackSave, stackRestore: stackRestore, setThrew: setThrew, setTempRet0: setTempRet0, setTempRet1: setTempRet1, setTempRet2: setTempRet2, setTempRet3: setTempRet3, setTempRet4: setTempRet4, setTempRet5: setTempRet5, setTempRet6: setTempRet6, setTempRet7: setTempRet7, setTempRet8: setTempRet8, setTempRet9: setTempRet9, dynCall_iiii: dynCall_iiii, dynCall_viiiii: dynCall_viiiii, dynCall_vi: dynCall_vi, dynCall_vii: dynCall_vii, dynCall_ii: dynCall_ii, dynCall_viii: dynCall_viii, dynCall_v: dynCall_v, dynCall_viid: dynCall_viid, dynCall_viiiiii: dynCall_viiiiii, dynCall_iii: dynCall_iii, dynCall_iiiiii: dynCall_iiiiii, dynCall_viiii: dynCall_viiii };
-}).toString(),
+})
// EMSCRIPTEN_END_ASM
-{ "abort": abort, "assert": assert, "asmPrintInt": asmPrintInt, "asmPrintFloat": asmPrintFloat, "min": Math_min, "invoke_iiii": invoke_iiii, "invoke_viiiii": invoke_viiiii, "invoke_vi": invoke_vi, "invoke_vii": invoke_vii, "invoke_ii": invoke_ii, "invoke_viii": invoke_viii, "invoke_v": invoke_v, "invoke_viid": invoke_viid, "invoke_viiiiii": invoke_viiiiii, "invoke_iii": invoke_iii, "invoke_iiiiii": invoke_iiiiii, "invoke_viiii": invoke_viiii, "___cxa_throw": ___cxa_throw, "_emscripten_run_script": _emscripten_run_script, "_cosf": _cosf, "_send": _send, "__ZSt9terminatev": __ZSt9terminatev, "__reallyNegative": __reallyNegative, "___cxa_is_number_type": ___cxa_is_number_type, "___assert_fail": ___assert_fail, "___cxa_allocate_exception": ___cxa_allocate_exception, "___cxa_find_matching_catch": ___cxa_find_matching_catch, "_fflush": _fflush, "_pwrite": _pwrite, "___setErrNo": ___setErrNo, "_sbrk": _sbrk, "___cxa_begin_catch": ___cxa_begin_catch, "_sinf": _sinf, "_fileno": _fileno, "___resumeException": ___resumeException, "__ZSt18uncaught_exceptionv": __ZSt18uncaught_exceptionv, "_sysconf": _sysconf, "_clock": _clock, "_emscripten_memcpy_big": _emscripten_memcpy_big, "_puts": _puts, "_mkport": _mkport, "_floorf": _floorf, "_sqrtf": _sqrtf, "_write": _write, "_emscripten_set_main_loop": _emscripten_set_main_loop, "___errno_location": ___errno_location, "__ZNSt9exceptionD2Ev": __ZNSt9exceptionD2Ev, "_printf": _printf, "___cxa_does_inherit": ___cxa_does_inherit, "__exit": __exit, "_fputc": _fputc, "_abort": _abort, "_fwrite": _fwrite, "_time": _time, "_fprintf": _fprintf, "_emscripten_cancel_main_loop": _emscripten_cancel_main_loop, "__formatString": __formatString, "_fputs": _fputs, "_exit": _exit, "___cxa_pure_virtual": ___cxa_pure_virtual, "STACKTOP": STACKTOP, "STACK_MAX": STACK_MAX, "tempDoublePtr": tempDoublePtr, "ABORT": ABORT, "NaN": NaN, "Infinity": Infinity, "__ZTISt9exception": __ZTISt9exception }, buffer);
+({ "Math": Math, "Int8Array": Int8Array, "Int16Array": Int16Array, "Int32Array": Int32Array, "Uint8Array": Uint8Array, "Uint16Array": Uint16Array, "Uint32Array": Uint32Array, "Float32Array": Float32Array, "Float64Array": Float64Array }, { "abort": abort, "assert": assert, "asmPrintInt": asmPrintInt, "asmPrintFloat": asmPrintFloat, "min": Math_min, "invoke_iiii": invoke_iiii, "invoke_viiiii": invoke_viiiii, "invoke_vi": invoke_vi, "invoke_vii": invoke_vii, "invoke_ii": invoke_ii, "invoke_viii": invoke_viii, "invoke_v": invoke_v, "invoke_viid": invoke_viid, "invoke_viiiiii": invoke_viiiiii, "invoke_iii": invoke_iii, "invoke_iiiiii": invoke_iiiiii, "invoke_viiii": invoke_viiii, "___cxa_throw": ___cxa_throw, "_emscripten_run_script": _emscripten_run_script, "_cosf": _cosf, "_send": _send, "__ZSt9terminatev": __ZSt9terminatev, "__reallyNegative": __reallyNegative, "___cxa_is_number_type": ___cxa_is_number_type, "___assert_fail": ___assert_fail, "___cxa_allocate_exception": ___cxa_allocate_exception, "___cxa_find_matching_catch": ___cxa_find_matching_catch, "_fflush": _fflush, "_pwrite": _pwrite, "___setErrNo": ___setErrNo, "_sbrk": _sbrk, "___cxa_begin_catch": ___cxa_begin_catch, "_sinf": _sinf, "_fileno": _fileno, "___resumeException": ___resumeException, "__ZSt18uncaught_exceptionv": __ZSt18uncaught_exceptionv, "_sysconf": _sysconf, "_clock": _clock, "_emscripten_memcpy_big": _emscripten_memcpy_big, "_puts": _puts, "_mkport": _mkport, "_floorf": _floorf, "_sqrtf": _sqrtf, "_write": _write, "_emscripten_set_main_loop": _emscripten_set_main_loop, "___errno_location": ___errno_location, "__ZNSt9exceptionD2Ev": __ZNSt9exceptionD2Ev, "_printf": _printf, "___cxa_does_inherit": ___cxa_does_inherit, "__exit": __exit, "_fputc": _fputc, "_abort": _abort, "_fwrite": _fwrite, "_time": _time, "_fprintf": _fprintf, "_emscripten_cancel_main_loop": _emscripten_cancel_main_loop, "__formatString": __formatString, "_fputs": _fputs, "_exit": _exit, "___cxa_pure_virtual": ___cxa_pure_virtual, "STACKTOP": STACKTOP, "STACK_MAX": STACK_MAX, "tempDoublePtr": tempDoublePtr, "ABORT": ABORT, "NaN": NaN, "Infinity": Infinity, "__ZTISt9exception": __ZTISt9exception }, buffer);
+assertTrue(%IsAsmWasmCode(ModuleFunc));
var _strlen = Module["_strlen"] = asm["_strlen"];
var _free = Module["_free"] = asm["_free"];
var _main = Module["_main"] = asm["_main"];
diff --git a/deps/v8/test/mjsunit/wasm/embenchen/copy.js b/deps/v8/test/mjsunit/wasm/embenchen/copy.js
index 70609aa242..ce2ea9273e 100644
--- a/deps/v8/test/mjsunit/wasm/embenchen/copy.js
+++ b/deps/v8/test/mjsunit/wasm/embenchen/copy.js
@@ -1,5 +1,5 @@
// Modified embenchen to direct to asm-wasm.
-// Flags: --expose-wasm
+// Flags: --validate-asm --allow-natives-syntax
var EXPECTED_OUTPUT = 'sum:8930\n';
var Module = {
@@ -5452,7 +5452,8 @@ function asmPrintFloat(x, y) {
Module.print('float ' + x + ',' + y);// + ' ' + new Error().stack);
}
// EMSCRIPTEN_START_ASM
-var asm = Wasm.instantiateModuleFromAsm((function Module(global, env, buffer) {
+var ModuleFunc;
+var asm = (ModuleFunc = function(global, env, buffer) {
'use asm';
var HEAP8 = new global.Int8Array(buffer);
var HEAP16 = new global.Int16Array(buffer);
@@ -5768,9 +5769,10 @@ function stackSave() {
return { _strlen: _strlen, _memcpy: _memcpy, _main: _main, _memset: _memset, runPostSets: runPostSets, stackAlloc: stackAlloc, stackSave: stackSave, stackRestore: stackRestore, setThrew: setThrew, setTempRet0: setTempRet0, setTempRet1: setTempRet1, setTempRet2: setTempRet2, setTempRet3: setTempRet3, setTempRet4: setTempRet4, setTempRet5: setTempRet5, setTempRet6: setTempRet6, setTempRet7: setTempRet7, setTempRet8: setTempRet8, setTempRet9: setTempRet9 };
-}).toString(),
+})
// EMSCRIPTEN_END_ASM
-{ "Math": Math, "Int8Array": Int8Array, "Int16Array": Int16Array, "Int32Array": Int32Array, "Uint8Array": Uint8Array, "Uint16Array": Uint16Array, "Uint32Array": Uint32Array, "Float32Array": Float32Array, "Float64Array": Float64Array, "abort": abort, "assert": assert, "asmPrintInt": asmPrintInt, "asmPrintFloat": asmPrintFloat, "min": Math_min, "_free": _free, "_emscripten_memcpy_big": _emscripten_memcpy_big, "_printf": _printf, "_send": _send, "_pwrite": _pwrite, "__reallyNegative": __reallyNegative, "_fwrite": _fwrite, "_malloc": _malloc, "_mkport": _mkport, "_fprintf": _fprintf, "___setErrNo": ___setErrNo, "__formatString": __formatString, "_fileno": _fileno, "_fflush": _fflush, "_write": _write, "STACKTOP": STACKTOP, "STACK_MAX": STACK_MAX, "tempDoublePtr": tempDoublePtr, "ABORT": ABORT, "NaN": NaN, "Infinity": Infinity }, buffer);
+({ "Math": Math, "Int8Array": Int8Array, "Int16Array": Int16Array, "Int32Array": Int32Array, "Uint8Array": Uint8Array, "Uint16Array": Uint16Array, "Uint32Array": Uint32Array, "Float32Array": Float32Array, "Float64Array": Float64Array }, { "abort": abort, "assert": assert, "asmPrintInt": asmPrintInt, "asmPrintFloat": asmPrintFloat, "min": Math_min, "_free": _free, "_emscripten_memcpy_big": _emscripten_memcpy_big, "_printf": _printf, "_send": _send, "_pwrite": _pwrite, "__reallyNegative": __reallyNegative, "_fwrite": _fwrite, "_malloc": _malloc, "_mkport": _mkport, "_fprintf": _fprintf, "___setErrNo": ___setErrNo, "__formatString": __formatString, "_fileno": _fileno, "_fflush": _fflush, "_write": _write, "STACKTOP": STACKTOP, "STACK_MAX": STACK_MAX, "tempDoublePtr": tempDoublePtr, "ABORT": ABORT, "NaN": NaN, "Infinity": Infinity }, buffer);
+assertTrue(%IsAsmWasmCode(ModuleFunc));
var _strlen = Module["_strlen"] = asm["_strlen"];
var _memcpy = Module["_memcpy"] = asm["_memcpy"];
var _main = Module["_main"] = asm["_main"];
diff --git a/deps/v8/test/mjsunit/wasm/embenchen/corrections.js b/deps/v8/test/mjsunit/wasm/embenchen/corrections.js
index 23bec5f5c7..e8c46316b8 100644
--- a/deps/v8/test/mjsunit/wasm/embenchen/corrections.js
+++ b/deps/v8/test/mjsunit/wasm/embenchen/corrections.js
@@ -1,5 +1,5 @@
// Modified embenchen to direct to asm-wasm.
-// Flags: --expose-wasm
+// Flags: --validate-asm --allow-natives-syntax
var EXPECTED_OUTPUT = 'final: 40006013:58243.\n';
var Module = {
@@ -5452,7 +5452,8 @@ function asmPrintFloat(x, y) {
Module.print('float ' + x + ',' + y);// + ' ' + new Error().stack);
}
// EMSCRIPTEN_START_ASM
-var asm = Wasm.instantiateModuleFromAsm((function Module(global, env, buffer) {
+var ModuleFunc;
+var asm = (ModuleFunc = function(global, env, buffer) {
'use asm';
var HEAP8 = new global.Int8Array(buffer);
var HEAP16 = new global.Int16Array(buffer);
@@ -5775,9 +5776,10 @@ function stackSave() {
return { _strlen: _strlen, _memcpy: _memcpy, _main: _main, _memset: _memset, runPostSets: runPostSets, stackAlloc: stackAlloc, stackSave: stackSave, stackRestore: stackRestore, setThrew: setThrew, setTempRet0: setTempRet0, setTempRet1: setTempRet1, setTempRet2: setTempRet2, setTempRet3: setTempRet3, setTempRet4: setTempRet4, setTempRet5: setTempRet5, setTempRet6: setTempRet6, setTempRet7: setTempRet7, setTempRet8: setTempRet8, setTempRet9: setTempRet9 };
-}).toString(),
+})
// EMSCRIPTEN_END_ASM
-{ "Math": Math, "Int8Array": Int8Array, "Int16Array": Int16Array, "Int32Array": Int32Array, "Uint8Array": Uint8Array, "Uint16Array": Uint16Array, "Uint32Array": Uint32Array, "Float32Array": Float32Array, "Float64Array": Float64Array, "abort": abort, "assert": assert, "asmPrintInt": asmPrintInt, "asmPrintFloat": asmPrintFloat, "min": Math_min, "_free": _free, "_emscripten_memcpy_big": _emscripten_memcpy_big, "_printf": _printf, "_send": _send, "_pwrite": _pwrite, "__reallyNegative": __reallyNegative, "_fwrite": _fwrite, "_malloc": _malloc, "_mkport": _mkport, "_fprintf": _fprintf, "___setErrNo": ___setErrNo, "__formatString": __formatString, "_fileno": _fileno, "_fflush": _fflush, "_write": _write, "STACKTOP": STACKTOP, "STACK_MAX": STACK_MAX, "tempDoublePtr": tempDoublePtr, "ABORT": ABORT, "NaN": NaN, "Infinity": Infinity }, buffer);
+({ "Math": Math, "Int8Array": Int8Array, "Int16Array": Int16Array, "Int32Array": Int32Array, "Uint8Array": Uint8Array, "Uint16Array": Uint16Array, "Uint32Array": Uint32Array, "Float32Array": Float32Array, "Float64Array": Float64Array }, { "abort": abort, "assert": assert, "asmPrintInt": asmPrintInt, "asmPrintFloat": asmPrintFloat, "min": Math_min, "_free": _free, "_emscripten_memcpy_big": _emscripten_memcpy_big, "_printf": _printf, "_send": _send, "_pwrite": _pwrite, "__reallyNegative": __reallyNegative, "_fwrite": _fwrite, "_malloc": _malloc, "_mkport": _mkport, "_fprintf": _fprintf, "___setErrNo": ___setErrNo, "__formatString": __formatString, "_fileno": _fileno, "_fflush": _fflush, "_write": _write, "STACKTOP": STACKTOP, "STACK_MAX": STACK_MAX, "tempDoublePtr": tempDoublePtr, "ABORT": ABORT, "NaN": NaN, "Infinity": Infinity }, buffer);
+assertTrue(%IsAsmWasmCode(ModuleFunc));
var _strlen = Module["_strlen"] = asm["_strlen"];
var _memcpy = Module["_memcpy"] = asm["_memcpy"];
var _main = Module["_main"] = asm["_main"];
diff --git a/deps/v8/test/mjsunit/wasm/embenchen/fannkuch.js b/deps/v8/test/mjsunit/wasm/embenchen/fannkuch.js
index 8c03a344f2..86ba2862f0 100644
--- a/deps/v8/test/mjsunit/wasm/embenchen/fannkuch.js
+++ b/deps/v8/test/mjsunit/wasm/embenchen/fannkuch.js
@@ -1,5 +1,5 @@
// Modified embenchen to direct to asm-wasm.
-// Flags: --expose-wasm
+// Flags: --validate-asm --allow-natives-syntax
var EXPECTED_OUTPUT =
'123456789\n' +
@@ -5666,7 +5666,8 @@ function asmPrintFloat(x, y) {
Module.print('float ' + x + ',' + y);// + ' ' + new Error().stack);
}
// EMSCRIPTEN_START_ASM
-var asm = Wasm.instantiateModuleFromAsm((function Module(global, env, buffer) {
+var ModuleFunc;
+var asm = (ModuleFunc = function(global, env, buffer) {
'use asm';
var HEAP8 = new global.Int8Array(buffer);
var HEAP16 = new global.Int16Array(buffer);
@@ -8225,9 +8226,10 @@ function stackSave() {
return { _strlen: _strlen, _free: _free, _main: _main, _memset: _memset, _malloc: _malloc, _memcpy: _memcpy, runPostSets: runPostSets, stackAlloc: stackAlloc, stackSave: stackSave, stackRestore: stackRestore, setThrew: setThrew, setTempRet0: setTempRet0, setTempRet1: setTempRet1, setTempRet2: setTempRet2, setTempRet3: setTempRet3, setTempRet4: setTempRet4, setTempRet5: setTempRet5, setTempRet6: setTempRet6, setTempRet7: setTempRet7, setTempRet8: setTempRet8, setTempRet9: setTempRet9 };
-}).toString(),
+})
// EMSCRIPTEN_END_ASM
-{ "Math": Math, "Int8Array": Int8Array, "Int16Array": Int16Array, "Int32Array": Int32Array, "Uint8Array": Uint8Array, "Uint16Array": Uint16Array, "Uint32Array": Uint32Array, "Float32Array": Float32Array, "Float64Array": Float64Array, "abort": abort, "assert": assert, "asmPrintInt": asmPrintInt, "asmPrintFloat": asmPrintFloat, "min": Math_min, "_fflush": _fflush, "_emscripten_memcpy_big": _emscripten_memcpy_big, "_putchar": _putchar, "_fputc": _fputc, "_send": _send, "_pwrite": _pwrite, "_abort": _abort, "__reallyNegative": __reallyNegative, "_fwrite": _fwrite, "_sbrk": _sbrk, "_mkport": _mkport, "_fprintf": _fprintf, "___setErrNo": ___setErrNo, "__formatString": __formatString, "_fileno": _fileno, "_printf": _printf, "_time": _time, "_sysconf": _sysconf, "_write": _write, "___errno_location": ___errno_location, "STACKTOP": STACKTOP, "STACK_MAX": STACK_MAX, "tempDoublePtr": tempDoublePtr, "ABORT": ABORT, "NaN": NaN, "Infinity": Infinity }, buffer);
+({ "Math": Math, "Int8Array": Int8Array, "Int16Array": Int16Array, "Int32Array": Int32Array, "Uint8Array": Uint8Array, "Uint16Array": Uint16Array, "Uint32Array": Uint32Array, "Float32Array": Float32Array, "Float64Array": Float64Array }, { "abort": abort, "assert": assert, "asmPrintInt": asmPrintInt, "asmPrintFloat": asmPrintFloat, "min": Math_min, "_fflush": _fflush, "_emscripten_memcpy_big": _emscripten_memcpy_big, "_putchar": _putchar, "_fputc": _fputc, "_send": _send, "_pwrite": _pwrite, "_abort": _abort, "__reallyNegative": __reallyNegative, "_fwrite": _fwrite, "_sbrk": _sbrk, "_mkport": _mkport, "_fprintf": _fprintf, "___setErrNo": ___setErrNo, "__formatString": __formatString, "_fileno": _fileno, "_printf": _printf, "_time": _time, "_sysconf": _sysconf, "_write": _write, "___errno_location": ___errno_location, "STACKTOP": STACKTOP, "STACK_MAX": STACK_MAX, "tempDoublePtr": tempDoublePtr, "ABORT": ABORT, "NaN": NaN, "Infinity": Infinity }, buffer);
+assertTrue(%IsAsmWasmCode(ModuleFunc));
var _strlen = Module["_strlen"] = asm["_strlen"];
var _free = Module["_free"] = asm["_free"];
var _main = Module["_main"] = asm["_main"];
diff --git a/deps/v8/test/mjsunit/wasm/embenchen/fasta.js b/deps/v8/test/mjsunit/wasm/embenchen/fasta.js
index 1cd47fa1db..4c9f611160 100644
--- a/deps/v8/test/mjsunit/wasm/embenchen/fasta.js
+++ b/deps/v8/test/mjsunit/wasm/embenchen/fasta.js
@@ -1,5 +1,5 @@
// Modified embenchen to direct to asm-wasm.
-// Flags: --expose-wasm
+// Flags: --validate-asm --allow-natives-syntax
var EXPECTED_OUTPUT =
'GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGA\n' +
@@ -5828,7 +5828,8 @@ function asmPrintFloat(x, y) {
Module.print('float ' + x + ',' + y);// + ' ' + new Error().stack);
}
// EMSCRIPTEN_START_ASM
-var asm = Wasm.instantiateModuleFromAsm((function Module(global, env, buffer) {
+var ModuleFunc;
+var asm = (ModuleFunc = function(global, env, buffer) {
'use asm';
var HEAP8 = new global.Int8Array(buffer);
var HEAP16 = new global.Int16Array(buffer);
@@ -8392,9 +8393,10 @@ function b2() {
var FUNCTION_TABLE_v = [b2];
return { _strlen: _strlen, _free: _free, _main: _main, _memset: _memset, _malloc: _malloc, _memcpy: _memcpy, runPostSets: runPostSets, stackAlloc: stackAlloc, stackSave: stackSave, stackRestore: stackRestore, setThrew: setThrew, setTempRet0: setTempRet0, setTempRet1: setTempRet1, setTempRet2: setTempRet2, setTempRet3: setTempRet3, setTempRet4: setTempRet4, setTempRet5: setTempRet5, setTempRet6: setTempRet6, setTempRet7: setTempRet7, setTempRet8: setTempRet8, setTempRet9: setTempRet9, dynCall_ii: dynCall_ii, dynCall_vi: dynCall_vi, dynCall_v: dynCall_v };
-}).toString(),
+})
// EMSCRIPTEN_END_ASM
-{ "Math": Math, "Int8Array": Int8Array, "Int16Array": Int16Array, "Int32Array": Int32Array, "Uint8Array": Uint8Array, "Uint16Array": Uint16Array, "Uint32Array": Uint32Array, "Float32Array": Float32Array, "Float64Array": Float64Array, "abort": abort, "assert": assert, "asmPrintInt": asmPrintInt, "asmPrintFloat": asmPrintFloat, "min": Math_min, "invoke_ii": invoke_ii, "invoke_vi": invoke_vi, "invoke_v": invoke_v, "_send": _send, "___setErrNo": ___setErrNo, "___cxa_is_number_type": ___cxa_is_number_type, "___cxa_allocate_exception": ___cxa_allocate_exception, "___cxa_find_matching_catch": ___cxa_find_matching_catch, "_fflush": _fflush, "_time": _time, "_pwrite": _pwrite, "__reallyNegative": __reallyNegative, "_sbrk": _sbrk, "_emscripten_memcpy_big": _emscripten_memcpy_big, "_fileno": _fileno, "___resumeException": ___resumeException, "__ZSt18uncaught_exceptionv": __ZSt18uncaught_exceptionv, "_sysconf": _sysconf, "_puts": _puts, "_mkport": _mkport, "_write": _write, "___errno_location": ___errno_location, "__ZNSt9exceptionD2Ev": __ZNSt9exceptionD2Ev, "_fputc": _fputc, "___cxa_throw": ___cxa_throw, "_abort": _abort, "_fwrite": _fwrite, "___cxa_does_inherit": ___cxa_does_inherit, "_fprintf": _fprintf, "__formatString": __formatString, "_fputs": _fputs, "_printf": _printf, "STACKTOP": STACKTOP, "STACK_MAX": STACK_MAX, "tempDoublePtr": tempDoublePtr, "ABORT": ABORT, "NaN": NaN, "Infinity": Infinity, "__ZTISt9exception": __ZTISt9exception, "__ZTVN10__cxxabiv120__si_class_type_infoE": __ZTVN10__cxxabiv120__si_class_type_infoE }, buffer);
+({ "Math": Math, "Int8Array": Int8Array, "Int16Array": Int16Array, "Int32Array": Int32Array, "Uint8Array": Uint8Array, "Uint16Array": Uint16Array, "Uint32Array": Uint32Array, "Float32Array": Float32Array, "Float64Array": Float64Array }, { "abort": abort, "assert": assert, "asmPrintInt": asmPrintInt, "asmPrintFloat": asmPrintFloat, "min": Math_min, "invoke_ii": invoke_ii, "invoke_vi": invoke_vi, "invoke_v": invoke_v, "_send": _send, "___setErrNo": ___setErrNo, "___cxa_is_number_type": ___cxa_is_number_type, "___cxa_allocate_exception": ___cxa_allocate_exception, "___cxa_find_matching_catch": ___cxa_find_matching_catch, "_fflush": _fflush, "_time": _time, "_pwrite": _pwrite, "__reallyNegative": __reallyNegative, "_sbrk": _sbrk, "_emscripten_memcpy_big": _emscripten_memcpy_big, "_fileno": _fileno, "___resumeException": ___resumeException, "__ZSt18uncaught_exceptionv": __ZSt18uncaught_exceptionv, "_sysconf": _sysconf, "_puts": _puts, "_mkport": _mkport, "_write": _write, "___errno_location": ___errno_location, "__ZNSt9exceptionD2Ev": __ZNSt9exceptionD2Ev, "_fputc": _fputc, "___cxa_throw": ___cxa_throw, "_abort": _abort, "_fwrite": _fwrite, "___cxa_does_inherit": ___cxa_does_inherit, "_fprintf": _fprintf, "__formatString": __formatString, "_fputs": _fputs, "_printf": _printf, "STACKTOP": STACKTOP, "STACK_MAX": STACK_MAX, "tempDoublePtr": tempDoublePtr, "ABORT": ABORT, "NaN": NaN, "Infinity": Infinity, "__ZTISt9exception": __ZTISt9exception, "__ZTVN10__cxxabiv120__si_class_type_infoE": __ZTVN10__cxxabiv120__si_class_type_infoE }, buffer);
+assertTrue(%IsAsmWasmCode(ModuleFunc));
var _strlen = Module["_strlen"] = asm["_strlen"];
var _free = Module["_free"] = asm["_free"];
var _main = Module["_main"] = asm["_main"];
diff --git a/deps/v8/test/mjsunit/wasm/embenchen/lua_binarytrees.js b/deps/v8/test/mjsunit/wasm/embenchen/lua_binarytrees.js
index a5f8228b82..17d52a33b7 100644
--- a/deps/v8/test/mjsunit/wasm/embenchen/lua_binarytrees.js
+++ b/deps/v8/test/mjsunit/wasm/embenchen/lua_binarytrees.js
@@ -1,5 +1,5 @@
// Modified embenchen to direct to asm-wasm.
-// Flags: --expose-wasm
+// Flags: --validate-asm --allow-natives-syntax
var EXPECTED_OUTPUT =
'stretch tree of depth 10\t check: -1\n' +
@@ -7208,7 +7208,8 @@ function asmPrintFloat(x, y) {
Module.print('float ' + x + ',' + y);// + ' ' + new Error().stack);
}
// EMSCRIPTEN_START_ASM
-var asm = Wasm.instantiateModuleFromAsm((function Module(global, env, buffer) {
+var ModuleFunc;
+var asm = (ModuleFunc = function(global, env, buffer) {
'use asm';
var HEAP8 = new global.Int8Array(buffer);
var HEAP16 = new global.Int16Array(buffer);
@@ -40877,9 +40878,10 @@ function b1(i1) {
var FUNCTION_TABLE_iii = [b5,_lua_newstate];
return { _testSetjmp: _testSetjmp, _i64Subtract: _i64Subtract, _free: _free, _main: _main, _rand_r: _rand_r, _realloc: _realloc, _i64Add: _i64Add, _tolower: _tolower, _saveSetjmp: _saveSetjmp, _memset: _memset, _malloc: _malloc, _memcpy: _memcpy, _strlen: _strlen, _rand: _rand, _bitshift64Shl: _bitshift64Shl, runPostSets: runPostSets, stackAlloc: stackAlloc, stackSave: stackSave, stackRestore: stackRestore, setThrew: setThrew, setTempRet0: setTempRet0, setTempRet1: setTempRet1, setTempRet2: setTempRet2, setTempRet3: setTempRet3, setTempRet4: setTempRet4, setTempRet5: setTempRet5, setTempRet6: setTempRet6, setTempRet7: setTempRet7, setTempRet8: setTempRet8, setTempRet9: setTempRet9, dynCall_iiii: dynCall_iiii, dynCall_vi: dynCall_vi, dynCall_vii: dynCall_vii, dynCall_ii: dynCall_ii, dynCall_iiiii: dynCall_iiiii, dynCall_iii: dynCall_iii };
-}).toString(),
+})
// EMSCRIPTEN_END_ASM
-{ "Math": Math, "Int8Array": Int8Array, "Int16Array": Int16Array, "Int32Array": Int32Array, "Uint8Array": Uint8Array, "Uint16Array": Uint16Array, "Uint32Array": Uint32Array, "Float32Array": Float32Array, "Float64Array": Float64Array, "abort": abort, "assert": assert, "asmPrintInt": asmPrintInt, "asmPrintFloat": asmPrintFloat, "min": Math_min, "invoke_iiii": invoke_iiii, "invoke_vi": invoke_vi, "invoke_vii": invoke_vii, "invoke_ii": invoke_ii, "invoke_iiiii": invoke_iiiii, "invoke_iii": invoke_iii, "_isalnum": _isalnum, "_fabs": _fabs, "_frexp": _frexp, "_exp": _exp, "_fread": _fread, "__reallyNegative": __reallyNegative, "_longjmp": _longjmp, "__addDays": __addDays, "_fsync": _fsync, "_signal": _signal, "_rename": _rename, "_sbrk": _sbrk, "_emscripten_memcpy_big": _emscripten_memcpy_big, "_sinh": _sinh, "_sysconf": _sysconf, "_close": _close, "_ferror": _ferror, "_clock": _clock, "_cos": _cos, "_tanh": _tanh, "_unlink": _unlink, "_write": _write, "__isLeapYear": __isLeapYear, "_ftell": _ftell, "_isupper": _isupper, "_gmtime_r": _gmtime_r, "_islower": _islower, "_tmpnam": _tmpnam, "_tmpfile": _tmpfile, "_send": _send, "_abort": _abort, "_setvbuf": _setvbuf, "_atan2": _atan2, "_setlocale": _setlocale, "_isgraph": _isgraph, "_modf": _modf, "_strerror_r": _strerror_r, "_fscanf": _fscanf, "___setErrNo": ___setErrNo, "_isalpha": _isalpha, "_srand": _srand, "_mktime": _mktime, "_putchar": _putchar, "_gmtime": _gmtime, "_localeconv": _localeconv, "_sprintf": _sprintf, "_localtime": _localtime, "_read": _read, "_fwrite": _fwrite, "_time": _time, "_fprintf": _fprintf, "_exit": _exit, "_freopen": _freopen, "_llvm_pow_f64": _llvm_pow_f64, "_fgetc": _fgetc, "_fmod": _fmod, "_lseek": _lseek, "_rmdir": _rmdir, "_asin": _asin, "_floor": _floor, "_pwrite": _pwrite, "_localtime_r": _localtime_r, "_tzset": _tzset, "_open": _open, "_remove": _remove, "_snprintf": _snprintf, "__scanString": __scanString, "_strftime": _strftime, "_fseek": _fseek, "_iscntrl": _iscntrl, "_isxdigit": _isxdigit, "_fclose": _fclose, "_log": _log, "_recv": _recv, "_tan": _tan, "_copysign": _copysign, "__getFloat": __getFloat, "_fputc": _fputc, "_ispunct": _ispunct, "_ceil": _ceil, "_isspace": _isspace, "_fopen": _fopen, "_sin": _sin, "_acos": _acos, "_cosh": _cosh, "___buildEnvironment": ___buildEnvironment, "_difftime": _difftime, "_ungetc": _ungetc, "_system": _system, "_fflush": _fflush, "_log10": _log10, "_fileno": _fileno, "__exit": __exit, "__arraySum": __arraySum, "_fgets": _fgets, "_atan": _atan, "_pread": _pread, "_mkport": _mkport, "_toupper": _toupper, "_feof": _feof, "___errno_location": ___errno_location, "_clearerr": _clearerr, "_getenv": _getenv, "_strerror": _strerror, "_emscripten_longjmp": _emscripten_longjmp, "__formatString": __formatString, "_fputs": _fputs, "_sqrt": _sqrt, "STACKTOP": STACKTOP, "STACK_MAX": STACK_MAX, "tempDoublePtr": tempDoublePtr, "ABORT": ABORT, "cttz_i8": cttz_i8, "ctlz_i8": ctlz_i8, "___rand_seed": ___rand_seed, "NaN": NaN, "Infinity": Infinity, "_stderr": _stderr, "_stdin": _stdin, "_stdout": _stdout }, buffer);
+({ "Math": Math, "Int8Array": Int8Array, "Int16Array": Int16Array, "Int32Array": Int32Array, "Uint8Array": Uint8Array, "Uint16Array": Uint16Array, "Uint32Array": Uint32Array, "Float32Array": Float32Array, "Float64Array": Float64Array }, { "abort": abort, "assert": assert, "asmPrintInt": asmPrintInt, "asmPrintFloat": asmPrintFloat, "min": Math_min, "invoke_iiii": invoke_iiii, "invoke_vi": invoke_vi, "invoke_vii": invoke_vii, "invoke_ii": invoke_ii, "invoke_iiiii": invoke_iiiii, "invoke_iii": invoke_iii, "_isalnum": _isalnum, "_fabs": _fabs, "_frexp": _frexp, "_exp": _exp, "_fread": _fread, "__reallyNegative": __reallyNegative, "_longjmp": _longjmp, "__addDays": __addDays, "_fsync": _fsync, "_signal": _signal, "_rename": _rename, "_sbrk": _sbrk, "_emscripten_memcpy_big": _emscripten_memcpy_big, "_sinh": _sinh, "_sysconf": _sysconf, "_close": _close, "_ferror": _ferror, "_clock": _clock, "_cos": _cos, "_tanh": _tanh, "_unlink": _unlink, "_write": _write, "__isLeapYear": __isLeapYear, "_ftell": _ftell, "_isupper": _isupper, "_gmtime_r": _gmtime_r, "_islower": _islower, "_tmpnam": _tmpnam, "_tmpfile": _tmpfile, "_send": _send, "_abort": _abort, "_setvbuf": _setvbuf, "_atan2": _atan2, "_setlocale": _setlocale, "_isgraph": _isgraph, "_modf": _modf, "_strerror_r": _strerror_r, "_fscanf": _fscanf, "___setErrNo": ___setErrNo, "_isalpha": _isalpha, "_srand": _srand, "_mktime": _mktime, "_putchar": _putchar, "_gmtime": _gmtime, "_localeconv": _localeconv, "_sprintf": _sprintf, "_localtime": _localtime, "_read": _read, "_fwrite": _fwrite, "_time": _time, "_fprintf": _fprintf, "_exit": _exit, "_freopen": _freopen, "_llvm_pow_f64": _llvm_pow_f64, "_fgetc": _fgetc, "_fmod": _fmod, "_lseek": _lseek, "_rmdir": _rmdir, "_asin": _asin, "_floor": _floor, "_pwrite": _pwrite, "_localtime_r": _localtime_r, "_tzset": _tzset, "_open": _open, "_remove": _remove, "_snprintf": _snprintf, "__scanString": __scanString, "_strftime": _strftime, "_fseek": _fseek, "_iscntrl": _iscntrl, "_isxdigit": _isxdigit, "_fclose": _fclose, "_log": _log, "_recv": _recv, "_tan": _tan, "_copysign": _copysign, "__getFloat": __getFloat, "_fputc": _fputc, "_ispunct": _ispunct, "_ceil": _ceil, "_isspace": _isspace, "_fopen": _fopen, "_sin": _sin, "_acos": _acos, "_cosh": _cosh, "___buildEnvironment": ___buildEnvironment, "_difftime": _difftime, "_ungetc": _ungetc, "_system": _system, "_fflush": _fflush, "_log10": _log10, "_fileno": _fileno, "__exit": __exit, "__arraySum": __arraySum, "_fgets": _fgets, "_atan": _atan, "_pread": _pread, "_mkport": _mkport, "_toupper": _toupper, "_feof": _feof, "___errno_location": ___errno_location, "_clearerr": _clearerr, "_getenv": _getenv, "_strerror": _strerror, "_emscripten_longjmp": _emscripten_longjmp, "__formatString": __formatString, "_fputs": _fputs, "_sqrt": _sqrt, "STACKTOP": STACKTOP, "STACK_MAX": STACK_MAX, "tempDoublePtr": tempDoublePtr, "ABORT": ABORT, "cttz_i8": cttz_i8, "ctlz_i8": ctlz_i8, "___rand_seed": ___rand_seed, "NaN": NaN, "Infinity": Infinity, "_stderr": _stderr, "_stdin": _stdin, "_stdout": _stdout }, buffer);
+assertTrue(%IsAsmWasmCode(ModuleFunc));
var _testSetjmp = Module["_testSetjmp"] = asm["_testSetjmp"];
var _i64Subtract = Module["_i64Subtract"] = asm["_i64Subtract"];
var _free = Module["_free"] = asm["_free"];
diff --git a/deps/v8/test/mjsunit/wasm/embenchen/memops.js b/deps/v8/test/mjsunit/wasm/embenchen/memops.js
index 09bbd36eae..aa8c12f486 100644
--- a/deps/v8/test/mjsunit/wasm/embenchen/memops.js
+++ b/deps/v8/test/mjsunit/wasm/embenchen/memops.js
@@ -1,5 +1,5 @@
// Modified embenchen to direct to asm-wasm.
-// Flags: --expose-wasm
+// Flags: --validate-asm --allow-natives-syntax
var EXPECTED_OUTPUT = 'final: 840.\n';
var Module = {
@@ -5613,7 +5613,8 @@ function asmPrintFloat(x, y) {
Module.print('float ' + x + ',' + y);// + ' ' + new Error().stack);
}
// EMSCRIPTEN_START_ASM
-var asm = Wasm.instantiateModuleFromAsm((function Module(global, env, buffer) {
+var ModuleFunc;
+var asm = (ModuleFunc = function(global, env, buffer) {
'use asm';
var HEAP8 = new global.Int8Array(buffer);
var HEAP16 = new global.Int16Array(buffer);
@@ -7877,9 +7878,10 @@ function stackSave() {
return { _strlen: _strlen, _free: _free, _main: _main, _memset: _memset, _malloc: _malloc, _memcpy: _memcpy, runPostSets: runPostSets, stackAlloc: stackAlloc, stackSave: stackSave, stackRestore: stackRestore, setThrew: setThrew, setTempRet0: setTempRet0, setTempRet1: setTempRet1, setTempRet2: setTempRet2, setTempRet3: setTempRet3, setTempRet4: setTempRet4, setTempRet5: setTempRet5, setTempRet6: setTempRet6, setTempRet7: setTempRet7, setTempRet8: setTempRet8, setTempRet9: setTempRet9 };
-}).toString(),
+})
// EMSCRIPTEN_END_ASM
-{ "Math": Math, "Int8Array": Int8Array, "Int16Array": Int16Array, "Int32Array": Int32Array, "Uint8Array": Uint8Array, "Uint16Array": Uint16Array, "Uint32Array": Uint32Array, "Float32Array": Float32Array, "Float64Array": Float64Array, "abort": abort, "assert": assert, "asmPrintInt": asmPrintInt, "asmPrintFloat": asmPrintFloat, "min": Math_min, "_fflush": _fflush, "_emscripten_memcpy_big": _emscripten_memcpy_big, "_printf": _printf, "_send": _send, "_pwrite": _pwrite, "_abort": _abort, "___setErrNo": ___setErrNo, "_fwrite": _fwrite, "_sbrk": _sbrk, "_time": _time, "_mkport": _mkport, "__reallyNegative": __reallyNegative, "__formatString": __formatString, "_fileno": _fileno, "_write": _write, "_fprintf": _fprintf, "_sysconf": _sysconf, "___errno_location": ___errno_location, "STACKTOP": STACKTOP, "STACK_MAX": STACK_MAX, "tempDoublePtr": tempDoublePtr, "ABORT": ABORT, "NaN": NaN, "Infinity": Infinity }, buffer);
+({ "Math": Math, "Int8Array": Int8Array, "Int16Array": Int16Array, "Int32Array": Int32Array, "Uint8Array": Uint8Array, "Uint16Array": Uint16Array, "Uint32Array": Uint32Array, "Float32Array": Float32Array, "Float64Array": Float64Array }, { "abort": abort, "assert": assert, "asmPrintInt": asmPrintInt, "asmPrintFloat": asmPrintFloat, "min": Math_min, "_fflush": _fflush, "_emscripten_memcpy_big": _emscripten_memcpy_big, "_printf": _printf, "_send": _send, "_pwrite": _pwrite, "_abort": _abort, "___setErrNo": ___setErrNo, "_fwrite": _fwrite, "_sbrk": _sbrk, "_time": _time, "_mkport": _mkport, "__reallyNegative": __reallyNegative, "__formatString": __formatString, "_fileno": _fileno, "_write": _write, "_fprintf": _fprintf, "_sysconf": _sysconf, "___errno_location": ___errno_location, "STACKTOP": STACKTOP, "STACK_MAX": STACK_MAX, "tempDoublePtr": tempDoublePtr, "ABORT": ABORT, "NaN": NaN, "Infinity": Infinity }, buffer);
+assertTrue(%IsAsmWasmCode(ModuleFunc));
var _strlen = Module["_strlen"] = asm["_strlen"];
var _free = Module["_free"] = asm["_free"];
var _main = Module["_main"] = asm["_main"];
diff --git a/deps/v8/test/mjsunit/wasm/embenchen/primes.js b/deps/v8/test/mjsunit/wasm/embenchen/primes.js
index 5e02d79dec..95cb6535e7 100644
--- a/deps/v8/test/mjsunit/wasm/embenchen/primes.js
+++ b/deps/v8/test/mjsunit/wasm/embenchen/primes.js
@@ -1,5 +1,5 @@
// Modified embenchen to direct to asm-wasm.
-// Flags: --expose-wasm
+// Flags: --validate-asm --allow-natives-syntax
var EXPECTED_OUTPUT = 'lastprime: 387677.\n';
var Module = {
@@ -5454,7 +5454,8 @@ function asmPrintFloat(x, y) {
Module.print('float ' + x + ',' + y);// + ' ' + new Error().stack);
}
// EMSCRIPTEN_START_ASM
-var asm = Wasm.instantiateModuleFromAsm((function Module(global, env, buffer) {
+var ModuleFunc;
+var asm = (ModuleFunc = function(global, env, buffer) {
'use asm';
var HEAP8 = new global.Int8Array(buffer);
var HEAP16 = new global.Int16Array(buffer);
@@ -5776,9 +5777,10 @@ function stackSave() {
return { _strlen: _strlen, _memcpy: _memcpy, _main: _main, _memset: _memset, runPostSets: runPostSets, stackAlloc: stackAlloc, stackSave: stackSave, stackRestore: stackRestore, setThrew: setThrew, setTempRet0: setTempRet0, setTempRet1: setTempRet1, setTempRet2: setTempRet2, setTempRet3: setTempRet3, setTempRet4: setTempRet4, setTempRet5: setTempRet5, setTempRet6: setTempRet6, setTempRet7: setTempRet7, setTempRet8: setTempRet8, setTempRet9: setTempRet9 };
-}).toString(),
+})
// EMSCRIPTEN_END_ASM
-{ "Math": Math, "Int8Array": Int8Array, "Int16Array": Int16Array, "Int32Array": Int32Array, "Uint8Array": Uint8Array, "Uint16Array": Uint16Array, "Uint32Array": Uint32Array, "Float32Array": Float32Array, "Float64Array": Float64Array, "abort": abort, "assert": assert, "asmPrintInt": asmPrintInt, "asmPrintFloat": asmPrintFloat, "min": Math_min, "_free": _free, "_emscripten_memcpy_big": _emscripten_memcpy_big, "_printf": _printf, "_send": _send, "_pwrite": _pwrite, "_sqrtf": _sqrtf, "__reallyNegative": __reallyNegative, "_fwrite": _fwrite, "_malloc": _malloc, "_mkport": _mkport, "_fprintf": _fprintf, "___setErrNo": ___setErrNo, "__formatString": __formatString, "_fileno": _fileno, "_fflush": _fflush, "_write": _write, "STACKTOP": STACKTOP, "STACK_MAX": STACK_MAX, "tempDoublePtr": tempDoublePtr, "ABORT": ABORT, "NaN": NaN, "Infinity": Infinity }, buffer);
+({ "Math": Math, "Int8Array": Int8Array, "Int16Array": Int16Array, "Int32Array": Int32Array, "Uint8Array": Uint8Array, "Uint16Array": Uint16Array, "Uint32Array": Uint32Array, "Float32Array": Float32Array, "Float64Array": Float64Array }, { "abort": abort, "assert": assert, "asmPrintInt": asmPrintInt, "asmPrintFloat": asmPrintFloat, "min": Math_min, "_free": _free, "_emscripten_memcpy_big": _emscripten_memcpy_big, "_printf": _printf, "_send": _send, "_pwrite": _pwrite, "_sqrtf": _sqrtf, "__reallyNegative": __reallyNegative, "_fwrite": _fwrite, "_malloc": _malloc, "_mkport": _mkport, "_fprintf": _fprintf, "___setErrNo": ___setErrNo, "__formatString": __formatString, "_fileno": _fileno, "_fflush": _fflush, "_write": _write, "STACKTOP": STACKTOP, "STACK_MAX": STACK_MAX, "tempDoublePtr": tempDoublePtr, "ABORT": ABORT, "NaN": NaN, "Infinity": Infinity }, buffer);
+assertTrue(%IsAsmWasmCode(ModuleFunc));
var _strlen = Module["_strlen"] = asm["_strlen"];
var _memcpy = Module["_memcpy"] = asm["_memcpy"];
var _main = Module["_main"] = asm["_main"];
diff --git a/deps/v8/test/mjsunit/wasm/embenchen/zlib.js b/deps/v8/test/mjsunit/wasm/embenchen/zlib.js
index 9c0d30a813..1628babecf 100644
--- a/deps/v8/test/mjsunit/wasm/embenchen/zlib.js
+++ b/deps/v8/test/mjsunit/wasm/embenchen/zlib.js
@@ -1,5 +1,5 @@
// Modified embenchen to direct to asm-wasm.
-// Flags: --expose-wasm
+// Flags: --validate-asm --allow-natives-syntax
var EXPECTED_OUTPUT = 'sizes: 100000,25906\nok.\n';
var Module = {
@@ -5687,7 +5687,8 @@ function asmPrintFloat(x, y) {
Module.print('float ' + x + ',' + y);// + ' ' + new Error().stack);
}
// EMSCRIPTEN_START_ASM
-var asm = Wasm.instantiateModuleFromAsm((function Module(global, env, buffer) {
+var ModuleFunc;
+var asm = (ModuleFunc = function(global, env, buffer) {
'use asm';
var HEAP8 = new global.Int8Array(buffer);
var HEAP16 = new global.Int16Array(buffer);
@@ -14539,9 +14540,10 @@ function stackSave() {
var FUNCTION_TABLE_iii = [b2,_deflate_stored,_deflate_fast,_deflate_slow];
return { _strlen: _strlen, _free: _free, _main: _main, _memset: _memset, _malloc: _malloc, _memcpy: _memcpy, runPostSets: runPostSets, stackAlloc: stackAlloc, stackSave: stackSave, stackRestore: stackRestore, setThrew: setThrew, setTempRet0: setTempRet0, setTempRet1: setTempRet1, setTempRet2: setTempRet2, setTempRet3: setTempRet3, setTempRet4: setTempRet4, setTempRet5: setTempRet5, setTempRet6: setTempRet6, setTempRet7: setTempRet7, setTempRet8: setTempRet8, setTempRet9: setTempRet9, dynCall_iiii: dynCall_iiii, dynCall_vii: dynCall_vii, dynCall_iii: dynCall_iii };
-}).toString(),
+})
// EMSCRIPTEN_END_ASM
-{ "Math": Math, "Int8Array": Int8Array, "Int16Array": Int16Array, "Int32Array": Int32Array, "Uint8Array": Uint8Array, "Uint16Array": Uint16Array, "Uint32Array": Uint32Array, "Float32Array": Float32Array, "Float64Array": Float64Array, "abort": abort, "assert": assert, "asmPrintInt": asmPrintInt, "asmPrintFloat": asmPrintFloat, "min": Math_min, "invoke_iiii": invoke_iiii, "invoke_vii": invoke_vii, "invoke_iii": invoke_iii, "_send": _send, "___setErrNo": ___setErrNo, "___assert_fail": ___assert_fail, "_fflush": _fflush, "_pwrite": _pwrite, "__reallyNegative": __reallyNegative, "_sbrk": _sbrk, "___errno_location": ___errno_location, "_emscripten_memcpy_big": _emscripten_memcpy_big, "_fileno": _fileno, "_sysconf": _sysconf, "_puts": _puts, "_mkport": _mkport, "_write": _write, "_llvm_bswap_i32": _llvm_bswap_i32, "_fputc": _fputc, "_abort": _abort, "_fwrite": _fwrite, "_time": _time, "_fprintf": _fprintf, "__formatString": __formatString, "_fputs": _fputs, "_printf": _printf, "STACKTOP": STACKTOP, "STACK_MAX": STACK_MAX, "tempDoublePtr": tempDoublePtr, "ABORT": ABORT, "NaN": NaN, "Infinity": Infinity }, buffer);
+({ "Math": Math, "Int8Array": Int8Array, "Int16Array": Int16Array, "Int32Array": Int32Array, "Uint8Array": Uint8Array, "Uint16Array": Uint16Array, "Uint32Array": Uint32Array, "Float32Array": Float32Array, "Float64Array": Float64Array }, { "abort": abort, "assert": assert, "asmPrintInt": asmPrintInt, "asmPrintFloat": asmPrintFloat, "min": Math_min, "invoke_iiii": invoke_iiii, "invoke_vii": invoke_vii, "invoke_iii": invoke_iii, "_send": _send, "___setErrNo": ___setErrNo, "___assert_fail": ___assert_fail, "_fflush": _fflush, "_pwrite": _pwrite, "__reallyNegative": __reallyNegative, "_sbrk": _sbrk, "___errno_location": ___errno_location, "_emscripten_memcpy_big": _emscripten_memcpy_big, "_fileno": _fileno, "_sysconf": _sysconf, "_puts": _puts, "_mkport": _mkport, "_write": _write, "_llvm_bswap_i32": _llvm_bswap_i32, "_fputc": _fputc, "_abort": _abort, "_fwrite": _fwrite, "_time": _time, "_fprintf": _fprintf, "__formatString": __formatString, "_fputs": _fputs, "_printf": _printf, "STACKTOP": STACKTOP, "STACK_MAX": STACK_MAX, "tempDoublePtr": tempDoublePtr, "ABORT": ABORT, "NaN": NaN, "Infinity": Infinity }, buffer);
+assertTrue(%IsAsmWasmCode(ModuleFunc));
var _strlen = Module["_strlen"] = asm["_strlen"];
var _free = Module["_free"] = asm["_free"];
var _main = Module["_main"] = asm["_main"];
diff --git a/deps/v8/test/mjsunit/wasm/ensure-wasm-binaries-up-to-date.js b/deps/v8/test/mjsunit/wasm/ensure-wasm-binaries-up-to-date.js
new file mode 100644
index 0000000000..3fab8c65b1
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/ensure-wasm-binaries-up-to-date.js
@@ -0,0 +1,16 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+// Ensure checked in wasm binaries used by integration tests from v8 hosts
+// (such as chromium) are up to date.
+
+(function ensure_incrementer() {
+ var buff = readbuffer("test/mjsunit/wasm/incrementer.wasm");
+ var mod = new WebAssembly.Module(buff);
+ var inst = new WebAssembly.Instance(mod);
+ var inc = inst.exports.increment;
+ assertEquals(3, inc(2));
+}())
diff --git a/deps/v8/test/mjsunit/wasm/export-table.js b/deps/v8/test/mjsunit/wasm/export-table.js
index e85da9b664..2084ddfc0a 100644
--- a/deps/v8/test/mjsunit/wasm/export-table.js
+++ b/deps/v8/test/mjsunit/wasm/export-table.js
@@ -11,11 +11,12 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
var kReturnValue = 88;
var builder = new WasmModuleBuilder();
- builder.addFunction("main", [kAstI32])
+ builder.addFunction("main", kSig_i)
.addBody([
- kExprReturn,
kExprI8Const,
- kReturnValue])
+ kReturnValue,
+ kExprReturn, kArity1
+ ])
.exportFunc();
var module = builder.instantiate();
@@ -31,11 +32,12 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
var builder = new WasmModuleBuilder();
- builder.addFunction("main", [kAstI32])
+ builder.addFunction("main", kSig_i)
.addBody([
- kExprReturn,
kExprI8Const,
- kReturnValue])
+ kReturnValue,
+ kExprReturn, kArity1
+ ])
.exportAs("blah")
.exportAs("foo");
@@ -48,3 +50,40 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
assertEquals(kReturnValue, module.exports.foo());
assertEquals(kReturnValue, module.exports.blah());
})();
+
+
+(function testNumericName() {
+ var kReturnValue = 93;
+
+ var builder = new WasmModuleBuilder();
+
+ builder.addFunction("main", kSig_i)
+ .addBody([
+ kExprI8Const,
+ kReturnValue,
+ kExprReturn, kArity1
+ ])
+ .exportAs("0");
+
+ var module = builder.instantiate();
+
+ assertEquals("object", typeof module.exports);
+ assertEquals("function", typeof module.exports["0"]);
+
+ assertEquals(kReturnValue, module.exports["0"]());
+})();
+
+(function testExportNameClash() {
+ var builder = new WasmModuleBuilder();
+
+ builder.addFunction("one", kSig_v_v).addBody([kExprNop]).exportAs("main");
+ builder.addFunction("two", kSig_v_v).addBody([kExprNop]).exportAs("other");
+ builder.addFunction("three", kSig_v_v).addBody([kExprNop]).exportAs("main");
+
+ try {
+ builder.instantiate();
+ assertUnreachable("should have thrown an exception");
+ } catch (e) {
+ assertContains("Duplicate export", e.toString());
+ }
+})();
diff --git a/deps/v8/test/mjsunit/wasm/ffi-error.js b/deps/v8/test/mjsunit/wasm/ffi-error.js
index 649ee273ae..81dc47806e 100644
--- a/deps/v8/test/mjsunit/wasm/ffi-error.js
+++ b/deps/v8/test/mjsunit/wasm/ffi-error.js
@@ -10,13 +10,14 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
function testCallFFI(ffi) {
var builder = new WasmModuleBuilder();
- var sig_index = [kAstI32, kAstF64, kAstF64];
+ var sig_index = kSig_i_dd;
builder.addImport("fun", sig_index);
builder.addFunction("main", sig_index)
.addBody([
- kExprCallImport, 0, // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1]) // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprCallFunction, kArity2, 0, // --
+ ]) // --
.exportFunc();
var module = builder.instantiate(ffi);
@@ -58,3 +59,22 @@ assertThrows(function() {
ffi.fun = 0;
testCallFFI(ffi);
});
+
+
+(function I64InSignatureThrows() {
+ var builder = new WasmModuleBuilder();
+
+ builder.addMemory(1, 1, true);
+ builder.addFunction("function_with_invalid_signature", kSig_l_ll)
+ .addBody([ // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprI64Sub]) // --
+ .exportFunc()
+
+ var module = builder.instantiate();
+
+ assertThrows(function() {
+ module.exports.function_with_invalid_signature(33, 88);
+ }, TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/ffi.js b/deps/v8/test/mjsunit/wasm/ffi.js
index 61fcf02b3c..e84f038e68 100644
--- a/deps/v8/test/mjsunit/wasm/ffi.js
+++ b/deps/v8/test/mjsunit/wasm/ffi.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm
+// Flags: --expose-wasm --allow-natives-syntax
load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
@@ -10,13 +10,14 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
function testCallFFI(func, check) {
var builder = new WasmModuleBuilder();
- var sig_index = builder.addSignature([kAstI32, kAstF64, kAstF64]);
+ var sig_index = builder.addType(kSig_i_dd);
builder.addImport("func", sig_index);
builder.addFunction("main", sig_index)
.addBody([
- kExprCallImport, 0, // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1]) // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprCallImport, kArity2, 0 // --
+ ]) // --
.exportFunc();
var main = builder.instantiate({func: func}).exports.main;
@@ -24,7 +25,9 @@ function testCallFFI(func, check) {
for (var i = 0; i < 100000; i += 10003) {
var a = 22.5 + i, b = 10.5 + i;
var r = main(a, b);
- check(r, a, b);
+ if (check) {
+ check(r, a, b);
+ }
}
}
@@ -51,8 +54,63 @@ function check_FOREIGN_SUB(r, a, b) {
was_called = false;
}
+// Test calling a normal JSFunction.
+print("JSFunction");
testCallFFI(FOREIGN_SUB, check_FOREIGN_SUB);
+// Test calling a proxy.
+print("Proxy");
+var proxy_sub = new Proxy(FOREIGN_SUB, {});
+testCallFFI(proxy_sub, check_FOREIGN_SUB);
+
+// Test calling a bind function.
+print("Bind function");
+var bind_sub = FOREIGN_SUB.bind();
+testCallFFI(bind_sub, check_FOREIGN_SUB);
+
+var main_for_constructor_test;
+print("Constructor");
+(function testCallConstructor() {
+ class C {}
+ var builder = new WasmModuleBuilder();
+
+ var sig_index = builder.addType(kSig_i_dd);
+ builder.addImport("func", sig_index);
+ builder.addFunction("main", sig_index)
+ .addBody([
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprCallImport, kArity2, 0 // --
+ ]) // --
+ .exportFunc();
+
+ main_for_constructor_test = builder.instantiate({func: C}).exports.main;
+
+ assertThrows("main_for_constructor_test(12, 43)", TypeError);
+}) ();
+
+print("Native function");
+(function test_ffi_call_to_native() {
+
+ var builder = new WasmModuleBuilder();
+
+ var sig_index = builder.addType(kSig_d);
+ builder.addImport("func", sig_index);
+ builder.addFunction("main", sig_index)
+ .addBody([
+ kExprCallImport, kArity0, 0 // --
+ ]) // --
+ .exportFunc();
+
+ var main = builder.instantiate({func: Object.prototype.toString}).exports.main;
+ // The result of the call to Object.prototype.toString should be
+ // [object Undefined]. However, we cannot test for this result because wasm
+ // cannot return objects but converts them to float64 in this test.
+ assertEquals(NaN, main());
+})();
+
+print("Callable JSObject");
+testCallFFI(%GetCallable(), function check(r, a, b) {assertEquals(a - b, r);});
function FOREIGN_ABCD(a, b, c, d) {
print("FOREIGN_ABCD(" + a + ", " + b + ", " + c + ", " + d + ")");
@@ -184,14 +242,14 @@ function testCallBinopVoid(type, func, check) {
var builder = new WasmModuleBuilder();
- builder.addImport("func", [kAstStmt, type, type]);
- builder.addFunction("main", [kAstI32, type, type])
+ builder.addImport("func", makeSig_v_xx(type));
+ builder.addFunction("main", makeSig_r_xx(kAstI32, type))
.addBody([
- kExprBlock, 2, // --
- kExprCallImport, 0, // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1, // --
- kExprI8Const, 99]) // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprCallImport, kArity2, 0, // --
+ kExprI8Const, 99 // --
+ ]) // --
.exportFunc()
var main = builder.instantiate(ffi).exports.main;
@@ -240,15 +298,15 @@ testCallBinopVoid(kAstF64);
function testCallPrint() {
var builder = new WasmModuleBuilder();
- builder.addImport("print", [kAstStmt, kAstI32]);
- builder.addImport("print", [kAstStmt, kAstF64]);
- builder.addFunction("main", [kAstStmt, kAstF64])
+ builder.addImport("print", makeSig_v_x(kAstI32));
+ builder.addImport("print", makeSig_v_x(kAstF64));
+ builder.addFunction("main", makeSig_v_x(kAstF64))
.addBody([
- kExprBlock, 2, // --
- kExprCallImport, 0, // --
- kExprI8Const, 97, // --
- kExprCallImport, 1, // --
- kExprGetLocal, 0]) // --
+ kExprI8Const, 97, // --
+ kExprCallImport, kArity1, 0, // --
+ kExprGetLocal, 0, // --
+ kExprCallImport, kArity1, 1 // --
+ ]) // --
.exportFunc()
var main = builder.instantiate({print: print}).exports.main;
diff --git a/deps/v8/test/mjsunit/wasm/frame-inspection.js b/deps/v8/test/mjsunit/wasm/frame-inspection.js
new file mode 100644
index 0000000000..4d342e6cae
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/frame-inspection.js
@@ -0,0 +1,74 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --expose-debug-as debug
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+Debug = debug.Debug
+
+var exception = null;
+var break_count = 0;
+
+const expected_num_frames = 5;
+const expected_wasm_frames = [false, true, true, false, false];
+const expected_wasm_positions = [0, 1, 2, 0, 0];
+const expected_function_names = ["call_debugger", "wasm_2", "wasm_1", "testFrameInspection", ""];
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ ++break_count;
+ try {
+ var break_id = exec_state.break_id;
+ var frame_count = exec_state.frameCount();
+ assertEquals(expected_num_frames, frame_count);
+
+ for (var i = 0; i < frame_count; ++i) {
+ var frame = exec_state.frame(i);
+ // wasm frames have unresolved function, others resolved ones.
+ assertEquals(expected_wasm_frames[i], !frame.func().resolved());
+ assertEquals(expected_function_names[i], frame.func().name());
+ if (expected_wasm_frames[i]) {
+ var script = frame.details().script();
+ assertNotNull(script);
+ assertEquals(expected_wasm_positions[i], frame.details().sourcePosition());
+ var loc = script.locationFromPosition(frame.details().sourcePosition());
+ assertEquals(expected_wasm_positions[i], loc.column);
+ assertEquals(expected_wasm_positions[i], loc.position);
+ }
+ }
+ } catch (e) {
+ exception = e;
+ }
+};
+
+var builder = new WasmModuleBuilder();
+
+// wasm_1 calls wasm_2 on offset 2.
+// wasm_2 calls call_debugger on offset 1.
+
+builder.addImport("func", kSig_v_v);
+
+builder.addFunction("wasm_1", kSig_v_v)
+ .addBody([kExprNop, kExprCallFunction, kArity0, 1])
+ .exportAs("main");
+
+builder.addFunction("wasm_2", kSig_v_v)
+ .addBody([kExprCallImport, kArity0, 0]);
+
+function call_debugger() {
+ debugger;
+}
+
+var module = builder.instantiate({func: call_debugger});
+
+(function testFrameInspection() {
+ Debug.setListener(listener);
+ module.exports.main();
+ Debug.setListener(null);
+
+ assertEquals(1, break_count);
+ if (exception) throw exception;
+})();
diff --git a/deps/v8/test/mjsunit/wasm/function-names.js b/deps/v8/test/mjsunit/wasm/function-names.js
new file mode 100644
index 0000000000..15771d8470
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/function-names.js
@@ -0,0 +1,69 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+var builder = new WasmModuleBuilder();
+
+var last_func_index = builder.addFunction("exec_unreachable", kSig_v_v)
+ .addBody([kExprUnreachable])
+
+var illegal_func_name = [0xff];
+var func_names = [ "☠", illegal_func_name, "some math: (½)² = ¼", "" ];
+var expected_names = ["exec_unreachable", "☠", null,
+ "some math: (½)² = ¼", "", "main"];
+
+for (var func_name of func_names) {
+ last_func_index = builder.addFunction(func_name, kSig_v_v)
+ .addBody([kExprCallFunction, kArity0, last_func_index]).index;
+}
+
+builder.addFunction("main", kSig_v_v)
+ .addBody([kExprCallFunction, kArity0, last_func_index])
+ .exportFunc();
+
+var module = builder.instantiate();
+
+(function testFunctionNamesAsString() {
+ var names = expected_names.concat(["testFunctionNamesAsString", null]);
+ try {
+ module.exports.main();
+ assertFalse("should throw");
+ } catch (e) {
+ var lines = e.stack.split(/\r?\n/);
+ lines.shift();
+ assertEquals(names.length, lines.length);
+ for (var i = 0; i < names.length; ++i) {
+ var line = lines[i].trim();
+ if (names[i] === null) continue;
+ var printed_name = names[i] === undefined ? "<WASM UNNAMED>" : names[i]
+ var expected_start = "at " + printed_name + " (";
+ assertTrue(line.startsWith(expected_start),
+ "should start with '" + expected_start + "': '" + line + "'");
+ }
+ }
+})();
+
+// For the remaining tests, collect the Callsite objects instead of just a
+// string:
+Error.prepareStackTrace = function(error, frames) {
+ return frames;
+};
+
+
+(function testFunctionNamesAsCallSites() {
+ var names = expected_names.concat(["testFunctionNamesAsCallSites", null]);
+ try {
+ module.exports.main();
+ assertFalse("should throw");
+ } catch (e) {
+ assertEquals(names.length, e.stack.length);
+ for (var i = 0; i < names.length; ++i) {
+ assertEquals(names[i], e.stack[i].getFunctionName());
+ }
+ }
+})();
diff --git a/deps/v8/test/mjsunit/wasm/function-prototype.js b/deps/v8/test/mjsunit/wasm/function-prototype.js
index db04b950bb..25339adea7 100644
--- a/deps/v8/test/mjsunit/wasm/function-prototype.js
+++ b/deps/v8/test/mjsunit/wasm/function-prototype.js
@@ -10,7 +10,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
(function TestFunctionPrototype() {
var builder = new WasmModuleBuilder();
- builder.addFunction("nine", [kAstI32])
+ builder.addFunction("nine", kSig_i)
.addBody([kExprI8Const, 9])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/gc-frame.js b/deps/v8/test/mjsunit/wasm/gc-frame.js
index 8387d26176..9c37fe485f 100644
--- a/deps/v8/test/mjsunit/wasm/gc-frame.js
+++ b/deps/v8/test/mjsunit/wasm/gc-frame.js
@@ -10,14 +10,13 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
function makeFFI(func, t) {
var builder = new WasmModuleBuilder();
- var sig_index = builder.addSignature([t,t,t,t,t,t,t,t,t,t,t]);
+ var sig_index = builder.addType(makeSig([t,t,t,t,t,t,t,t,t,t], [t]));
builder.addImport("func", sig_index);
// Try to create a frame with lots of spilled values and parameters
// on the stack to try to catch GC bugs in the reference maps for
// the different parts of the stack.
builder.addFunction("main", sig_index)
.addBody([
- kExprCallImport, 0, // --
kExprGetLocal, 0, // --
kExprGetLocal, 1, // --
kExprGetLocal, 2, // --
@@ -28,7 +27,7 @@ function makeFFI(func, t) {
kExprGetLocal, 7, // --
kExprGetLocal, 8, // --
kExprGetLocal, 9, // --
- kExprCallImport, 0, // --
+ kExprCallImport, 10, 0, // --
kExprGetLocal, 0, // --
kExprGetLocal, 1, // --
kExprGetLocal, 2, // --
@@ -38,7 +37,8 @@ function makeFFI(func, t) {
kExprGetLocal, 6, // --
kExprGetLocal, 7, // --
kExprGetLocal, 8, // --
- kExprGetLocal, 9 // --
+ kExprGetLocal, 9, // --
+ kExprCallImport, 10, 0 // --
]) // --
.exportFunc();
@@ -66,9 +66,32 @@ function print10(a, b, c, d, e, f, g, h, i) {
}
})();
-(function I32Test() {
+(function F64Test() {
var main = makeFFI(print10, kAstF64);
for (var i = 1; i < 2e+80; i *= -1137) {
main(i - 1, i, i + 2, i + 3, i + 4, i + 5, i + 6, i + 7, i + 8);
}
})();
+
+(function GCInJSToWasmTest() {
+ var builder = new WasmModuleBuilder();
+
+ var sig_index = builder.addType(kSig_i_i);
+ builder.addFunction("main", sig_index)
+ .addBody([
+ kExprGetLocal, 0, // --
+ ]) // --
+ .exportFunc();
+
+ var main = builder.instantiate({}).exports.main;
+
+ var gc_object = {
+ valueOf: function() {
+ // Call the GC in valueOf, which is called within the JSToWasm wrapper.
+ gc();
+ return {};
+ }
+ };
+
+ main(gc_object);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/grow-memory.js b/deps/v8/test/mjsunit/wasm/grow-memory.js
new file mode 100644
index 0000000000..27aca22d1a
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/grow-memory.js
@@ -0,0 +1,119 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --expose-gc --stress-compaction
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+var kPageSize = 0x10000;
+
+function genGrowMemoryBuilder() {
+ var builder = new WasmModuleBuilder();
+ builder.addFunction("grow_memory", kSig_i_i)
+ .addBody([kExprGetLocal, 0, kExprGrowMemory])
+ .exportFunc();
+ builder.addFunction("load", kSig_i_i)
+ .addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
+ .exportFunc();
+ builder.addFunction("store", kSig_i_ii)
+ .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem, 0, 0])
+ .exportFunc();
+ return builder;
+}
+
+function testGrowMemoryReadWrite() {
+ var builder = genGrowMemoryBuilder();
+ builder.addMemory(1, 1, false);
+ var module = builder.instantiate();
+ var offset;
+ function peek() { return module.exports.load(offset); }
+ function poke(value) { return module.exports.store(offset, value); }
+ function growMem(pages) { return module.exports.grow_memory(pages); }
+
+ for(offset = 0; offset <= (kPageSize - 4); offset++) {
+ poke(20);
+ assertEquals(20, peek());
+ }
+ for (offset = kPageSize - 3; offset < kPageSize + 4; offset++) {
+ assertTraps(kTrapMemOutOfBounds, poke);
+ assertTraps(kTrapMemOutOfBounds, peek);
+ }
+
+ assertEquals(1, growMem(3));
+
+ for (offset = kPageSize; offset <= 4*kPageSize -4; offset++) {
+ poke(20);
+ assertEquals(20, peek());
+ }
+ for (offset = 4*kPageSize - 3; offset < 4*kPageSize + 4; offset++) {
+ assertTraps(kTrapMemOutOfBounds, poke);
+ assertTraps(kTrapMemOutOfBounds, peek);
+ }
+
+ assertEquals(4, growMem(15));
+
+ for (offset = 4*kPageSize - 3; offset <= 4*kPageSize + 4; offset++) {
+ poke(20);
+ assertEquals(20, peek());
+ }
+ for (offset = 19*kPageSize - 10; offset <= 19*kPageSize - 4; offset++) {
+ poke(20);
+ assertEquals(20, peek());
+ }
+ for (offset = 19*kPageSize - 3; offset < 19*kPageSize + 5; offset++) {
+ assertTraps(kTrapMemOutOfBounds, poke);
+ assertTraps(kTrapMemOutOfBounds, peek);
+ }
+}
+
+testGrowMemoryReadWrite();
+
+function testGrowMemoryZeroInitialSize() {
+ var builder = genGrowMemoryBuilder();
+ var module = builder.instantiate();
+ var offset;
+ function peek() { return module.exports.load(offset); }
+ function poke(value) { return module.exports.store(offset, value); }
+ function growMem(pages) { return module.exports.grow_memory(pages); }
+
+ assertTraps(kTrapMemOutOfBounds, peek);
+ assertTraps(kTrapMemOutOfBounds, poke);
+
+ assertEquals(0, growMem(1));
+
+ for(offset = 0; offset <= kPageSize - 4; offset++) {
+ poke(20);
+ assertEquals(20, peek());
+ }
+
+ //TODO(gdeepti): Fix tests with correct write boundaries
+ //when runtime function is fixed.
+ for(offset = kPageSize; offset <= kPageSize + 5; offset++) {
+ assertTraps(kTrapMemOutOfBounds, peek);
+ }
+}
+
+testGrowMemoryZeroInitialSize();
+
+function testGrowMemoryTrapMaxPagesZeroInitialMemory() {
+ var builder = genGrowMemoryBuilder();
+ var module = builder.instantiate();
+ var maxPages = 16385;
+ function growMem(pages) { return module.exports.grow_memory(pages); }
+ assertEquals(-1, growMem(maxPages));
+}
+
+testGrowMemoryTrapMaxPagesZeroInitialMemory();
+
+function testGrowMemoryTrapMaxPages() {
+ var builder = genGrowMemoryBuilder();
+ builder.addMemory(1, 1, false);
+ var module = builder.instantiate();
+ var maxPages = 16384;
+ function growMem(pages) { return module.exports.grow_memory(pages); }
+ assertEquals(-1, growMem(maxPages));
+}
+
+testGrowMemoryTrapMaxPages();
diff --git a/deps/v8/test/mjsunit/wasm/import-table.js b/deps/v8/test/mjsunit/wasm/import-table.js
index 7579901651..8680addf61 100644
--- a/deps/v8/test/mjsunit/wasm/import-table.js
+++ b/deps/v8/test/mjsunit/wasm/import-table.js
@@ -10,13 +10,13 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
function testCallImport(func, check) {
var builder = new WasmModuleBuilder();
- var sig_index = builder.addSignature([kAstI32, kAstF64, kAstF64]);
+ var sig_index = builder.addType(kSig_i_dd);
builder.addImport("func", sig_index);
builder.addFunction("main", sig_index)
.addBody([
- kExprCallImport, 0, // --
kExprGetLocal, 0, // --
- kExprGetLocal, 1]) // --
+ kExprGetLocal, 1, // --
+ kExprCallImport, 2, 0]) // --
.exportAs("main");
var main = builder.instantiate({func: func}).exports.main;
@@ -45,7 +45,7 @@ function FOREIGN_SUB(a, b) {
function check_FOREIGN_SUB(r, a, b) {
assertEquals(a - b | 0, r);
assertTrue(was_called);
-// assertEquals(global, params[0]); // sloppy mode
+ assertEquals(global, params[0]); // sloppy mode
assertEquals(a, params[1]);
assertEquals(b, params[2]);
was_called = false;
@@ -68,7 +68,7 @@ function FOREIGN_ABCD(a, b, c, d) {
function check_FOREIGN_ABCD(r, a, b) {
assertEquals((a * b * 6) | 0, r);
assertTrue(was_called);
-// assertEquals(global, params[0]); // sloppy mode.
+ assertEquals(global, params[0]); // sloppy mode.
assertEquals(a, params[1]);
assertEquals(b, params[2]);
assertEquals(undefined, params[3]);
@@ -186,14 +186,14 @@ function testCallBinopVoid(type, func, check) {
var builder = new WasmModuleBuilder();
- builder.addImport("func", [kAstStmt, type, type]);
- builder.addFunction("main", [kAstI32, type, type])
+ builder.addImport("func", makeSig_v_xx(type));
+ builder.addFunction("main", makeSig_r_xx(kAstI32, type))
.addBody([
- kExprBlock, 2, // --
- kExprCallImport, 0, // --
kExprGetLocal, 0, // --
kExprGetLocal, 1, // --
- kExprI8Const, 99])
+ kExprCallImport, 2, 0, // --
+ kExprI8Const, 99, // --
+ ])
.exportFunc("main");
var main = builder.instantiate(ffi).exports.main;
@@ -241,15 +241,15 @@ testCallBinopVoid(kAstF64);
function testCallPrint() {
var builder = new WasmModuleBuilder();
- builder.addImport("print", [kAstStmt, kAstI32]);
- builder.addImport("print", [kAstStmt, kAstF64]);
- builder.addFunction("main", [kAstStmt, kAstF64])
+ builder.addImport("print", makeSig_v_x(kAstI32));
+ builder.addImport("print", makeSig_r_x(kAstF64, kAstF64));
+ builder.addFunction("main", makeSig_r_x(kAstF64, kAstF64))
.addBody([
- kExprBlock, 2, // --
- kExprCallImport, 0, // --
- kExprI8Const, 97, // --
- kExprCallImport, 1, // --
- kExprGetLocal, 0]) // --
+ kExprI8Const, 97, // --
+ kExprCallImport, kArity1, 0, // --
+ kExprGetLocal, 0, // --
+ kExprCallImport, kArity1, 1 // --
+ ])
.exportFunc();
var main = builder.instantiate({print: print}).exports.main;
@@ -266,13 +266,14 @@ testCallPrint();
function testCallImport2(foo, bar, expected) {
var builder = new WasmModuleBuilder();
- builder.addImport("foo", [kAstI32]);
- builder.addImport("bar", [kAstI32]);
- builder.addFunction("main", [kAstI32])
+ builder.addImport("foo", kSig_i);
+ builder.addImport("bar", kSig_i);
+ builder.addFunction("main", kSig_i)
.addBody([
+ kExprCallImport, kArity0, 0, // --
+ kExprCallImport, kArity0, 1, // --
kExprI32Add, // --
- kExprCallImport, 0, // --
- kExprCallImport, 1]) // --
+ ]) // --
.exportFunc();
var main = builder.instantiate({foo: foo, bar: bar}).exports.main;
diff --git a/deps/v8/test/mjsunit/wasm/incrementer.wasm b/deps/v8/test/mjsunit/wasm/incrementer.wasm
new file mode 100644
index 0000000000..f80f7ad597
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/incrementer.wasm
Binary files differ
diff --git a/deps/v8/test/mjsunit/wasm/indirect-calls.js b/deps/v8/test/mjsunit/wasm/indirect-calls.js
index 3258687431..1e87c6f823 100644
--- a/deps/v8/test/mjsunit/wasm/indirect-calls.js
+++ b/deps/v8/test/mjsunit/wasm/indirect-calls.js
@@ -10,24 +10,27 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
var module = (function () {
var builder = new WasmModuleBuilder();
- var sig_index = builder.addSignature([kAstI32, kAstI32, kAstI32]);
+ var sig_index = builder.addType(kSig_i_ii);
builder.addImport("add", sig_index);
builder.addFunction("add", sig_index)
.addBody([
- kExprCallImport, 0, kExprGetLocal, 0, kExprGetLocal, 1
+ kExprGetLocal, 0, kExprGetLocal, 1, kExprCallImport, kArity2, 0
]);
builder.addFunction("sub", sig_index)
.addBody([
- kExprI32Sub, kExprGetLocal, 0, kExprGetLocal, 1
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprI32Sub, // --
]);
- builder.addFunction("main", [kAstI32, kAstI32, kAstI32, kAstI32])
+ builder.addFunction("main", kSig_i_iii)
.addBody([
- kExprCallIndirect, sig_index,
kExprGetLocal, 0,
kExprGetLocal, 1,
- kExprGetLocal, 2])
+ kExprGetLocal, 2,
+ kExprCallIndirect, kArity2, sig_index
+ ])
.exportFunc()
- builder.appendToFunctionTable([0, 1, 2]);
+ builder.appendToTable([0, 1, 2]);
return builder.instantiate({add: function(a, b) { return a + b | 0; }});
})();
diff --git a/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js b/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js
index bc13122f1b..92cdc14ff9 100644
--- a/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js
+++ b/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js
@@ -7,44 +7,149 @@
load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
-var kReturnValue = 117;
-
-var module = (function Build() {
- var builder = new WasmModuleBuilder();
+let kReturnValue = 117;
+let buffer = (() => {
+ let builder = new WasmModuleBuilder();
builder.addMemory(1, 1, true);
- builder.addFunction("main", [kAstI32])
+ builder.addFunction("main", kSig_i)
.addBody([kExprI8Const, kReturnValue])
.exportFunc();
- return builder.instantiate();
-})();
+ return builder.toBuffer();
+})()
+
+function CheckInstance(instance) {
+ assertFalse(instance === undefined);
+ assertFalse(instance === null);
+ assertFalse(instance === 0);
+ assertEquals("object", typeof instance);
+
+ // Check the memory is an ArrayBuffer.
+ var mem = instance.exports.memory;
+ assertFalse(mem === undefined);
+ assertFalse(mem === null);
+ assertFalse(mem === 0);
+ assertEquals("object", typeof mem);
+ assertTrue(mem instanceof ArrayBuffer);
+ for (let i = 0; i < 4; i++) {
+ instance.exports.memory = 0; // should be ignored
+ assertSame(mem, instance.exports.memory);
+ }
+
+ assertEquals(65536, instance.exports.memory.byteLength);
+
+ // Check the properties of the main function.
+ let main = instance.exports.main;
+ assertFalse(main === undefined);
+ assertFalse(main === null);
+ assertFalse(main === 0);
+ assertEquals("function", typeof main);
-// Check the module exists.
-assertFalse(module === undefined);
-assertFalse(module === null);
-assertFalse(module === 0);
-assertEquals("object", typeof module);
-
-// Check the memory is an ArrayBuffer.
-var mem = module.exports.memory;
-assertFalse(mem === undefined);
-assertFalse(mem === null);
-assertFalse(mem === 0);
-assertEquals("object", typeof mem);
-assertTrue(mem instanceof ArrayBuffer);
-for (var i = 0; i < 4; i++) {
- module.exports.memory = 0; // should be ignored
- assertEquals(mem, module.exports.memory);
+ assertEquals(kReturnValue, main());
}
-assertEquals(65536, module.exports.memory.byteLength);
+// Deprecated experimental API.
+CheckInstance(Wasm.instantiateModule(buffer));
+
+// Official API
+let module = new WebAssembly.Module(buffer);
+CheckInstance(new WebAssembly.Instance(module));
+
+let promise = WebAssembly.compile(buffer);
+promise.then(module => CheckInstance(new WebAssembly.Instance(module)));
+
+// Negative tests.
+(function InvalidModules() {
+ let invalid_cases = [undefined, 1, "", "a", {some:1, obj: "b"}];
+ let len = invalid_cases.length;
+ for (var i = 0; i < len; ++i) {
+ try {
+ let instance = new WebAssembly.Instance(1);
+ assertUnreachable("should not be able to instantiate invalid modules.");
+ } catch (e) {
+ assertContains("Argument 0", e.toString());
+ }
+ }
+})();
+
+// Compile async an invalid blob.
+(function InvalidBinaryAsyncCompilation() {
+ let builder = new WasmModuleBuilder();
+ builder.addFunction("f", kSig_i_i)
+ .addBody([kExprCallImport, kArity0, 0]);
+ let promise = WebAssembly.compile(builder.toBuffer());
+ promise
+ .then(compiled =>
+ assertUnreachable("should not be able to compile invalid blob."))
+ .catch(e => assertContains("invalid signature index", e.toString()));
+})();
+
+// Multiple instances tests.
+(function ManyInstances() {
+ let compiled_module = new WebAssembly.Module(buffer);
+ let instance_1 = new WebAssembly.Instance(compiled_module);
+ let instance_2 = new WebAssembly.Instance(compiled_module);
+ assertTrue(instance_1 != instance_2);
+})();
+
+(function ManyInstancesAsync() {
+ let promise = WebAssembly.compile(buffer);
+ promise.then(compiled_module => {
+ let instance_1 = new WebAssembly.Instance(compiled_module);
+ let instance_2 = new WebAssembly.Instance(compiled_module);
+ assertTrue(instance_1 != instance_2);
+ });
+})();
+
+(function InstancesAreIsolatedFromEachother() {
+ var builder = new WasmModuleBuilder();
+ builder.addMemory(1,1, true);
+ var kSig_v_i = makeSig([kAstI32], []);
+ var signature = builder.addType(kSig_v_i);
+ builder.addImport("some_value", kSig_i);
+ builder.addImport("writer", signature);
+
+ builder.addFunction("main", kSig_i_i)
+ .addBody([
+ kExprI32Const, 1,
+ kExprGetLocal, 0,
+ kExprI32LoadMem, 0, 0,
+ kExprCallIndirect, kArity1, signature,
+ kExprGetLocal,0,
+ kExprI32LoadMem,0, 0,
+ kExprCallImport, kArity0, 0,
+ kExprI32Add
+ ]).exportFunc();
-// Check the properties of the main function.
-var main = module.exports.main;
-assertFalse(main === undefined);
-assertFalse(main === null);
-assertFalse(main === 0);
-assertEquals("function", typeof main);
+ // writer(mem[i]);
+ // return mem[i] + some_value();
+ builder.addFunction("_wrap_writer", signature)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprCallImport, kArity1, 1]);
+ builder.appendToTable([0, 1]);
-assertEquals(kReturnValue, main());
+
+ var module = new WebAssembly.Module(builder.toBuffer());
+ var mem_1 = new ArrayBuffer(4);
+ var mem_2 = new ArrayBuffer(4);
+ var view_1 = new Int32Array(mem_1);
+ var view_2 = new Int32Array(mem_2);
+
+ view_1[0] = 42;
+ view_2[0] = 1000;
+
+ var outval_1;
+ var outval_2;
+ var i1 = new WebAssembly.Instance(module, {some_value: () => 1,
+ writer: (x)=>outval_1 = x }, mem_1);
+ var i2 = new WebAssembly.Instance(module, {some_value: () => 2,
+ writer: (x)=>outval_2 = x }, mem_2);
+
+ assertEquals(43, i1.exports.main(0));
+ assertEquals(1002, i2.exports.main(0));
+
+ assertEquals(42, outval_1);
+ assertEquals(1000, outval_2);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/instantiate-run-basic.js b/deps/v8/test/mjsunit/wasm/instantiate-run-basic.js
index 2e649a0bd2..fe6fc14e05 100644
--- a/deps/v8/test/mjsunit/wasm/instantiate-run-basic.js
+++ b/deps/v8/test/mjsunit/wasm/instantiate-run-basic.js
@@ -11,7 +11,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
var kReturnValue = 107;
var builder = new WasmModuleBuilder();
- builder.addFunction("main", [kAstI32])
+ builder.addFunction("main", kSig_i_i)
.addBody([kExprI8Const, kReturnValue])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/module-memory.js b/deps/v8/test/mjsunit/wasm/module-memory.js
index ef85eb2357..a5e5f42488 100644
--- a/deps/v8/test/mjsunit/wasm/module-memory.js
+++ b/deps/v8/test/mjsunit/wasm/module-memory.js
@@ -13,19 +13,27 @@ function genModule(memory) {
var builder = new WasmModuleBuilder();
builder.addMemory(1, 1, true);
- builder.addFunction("main", [kAstI32, kAstI32])
+ builder.addFunction("main", kSig_i_i)
.addBody([
- kExprBlock,2,
- kExprLoop,1,
- kExprIf,
+ // main body: while(i) { if(mem[i]) return -1; i -= 4; } return 0;
+ kExprLoop,
+ kExprGetLocal,0,
+ kExprIf,
kExprGetLocal,0,
- kExprBr, 0,
- kExprIfElse,
- kExprI32LoadMem,0,0,kExprGetLocal,0,
- kExprBr,2, kExprI8Const, 255,
- kExprSetLocal,0,
- kExprI32Sub,kExprGetLocal,0,kExprI8Const,4,
- kExprI8Const,0])
+ kExprI32LoadMem,0,0,
+ kExprIf,
+ kExprI8Const,255,
+ kExprReturn, kArity1,
+ kExprEnd,
+ kExprGetLocal,0,
+ kExprI8Const,4,
+ kExprI32Sub,
+ kExprSetLocal,0,
+ kExprBr, kArity1, 1,
+ kExprEnd,
+ kExprEnd,
+ kExprI8Const,0
+ ])
.exportFunc();
return builder.instantiate(null, memory);
@@ -120,14 +128,16 @@ function testOOBThrows() {
var builder = new WasmModuleBuilder();
builder.addMemory(1, 1, true);
- builder.addFunction("geti", [kAstI32, kAstI32, kAstI32])
+ builder.addFunction("geti", kSig_i_ii)
.addBody([
- kExprI32StoreMem, 0, 0, kExprGetLocal, 0, kExprI32LoadMem, 0, 0, kExprGetLocal, 1
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprI32LoadMem, 0, 0,
+ kExprI32StoreMem, 0, 0
])
.exportFunc();
var module = builder.instantiate();
-
var offset;
function read() { return module.exports.geti(0, offset); }
diff --git a/deps/v8/test/mjsunit/wasm/no-wasm-by-default.js b/deps/v8/test/mjsunit/wasm/no-wasm-by-default.js
new file mode 100644
index 0000000000..2f9622e2c4
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/no-wasm-by-default.js
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// TODO(titzer): remove this test when WASM ships.
+assertThrows(function() { var g = Wasm; });
diff --git a/deps/v8/test/mjsunit/wasm/parallel_compilation.js b/deps/v8/test/mjsunit/wasm/parallel_compilation.js
new file mode 100644
index 0000000000..23c5658dcd
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/parallel_compilation.js
@@ -0,0 +1,100 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --wasm-num-compilation-tasks=10
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+function assertModule(module, memsize) {
+ // Check the module exists.
+ assertFalse(module === undefined);
+ assertFalse(module === null);
+ assertFalse(module === 0);
+ assertEquals("object", typeof module);
+
+ // Check the memory is an ArrayBuffer.
+ var mem = module.exports.memory;
+ assertFalse(mem === undefined);
+ assertFalse(mem === null);
+ assertFalse(mem === 0);
+ assertEquals("object", typeof mem);
+ assertTrue(mem instanceof ArrayBuffer);
+ for (var i = 0; i < 4; i++) {
+ module.exports.memory = 0; // should be ignored
+ assertEquals(mem, module.exports.memory);
+ }
+
+ assertEquals(memsize, module.exports.memory.byteLength);
+}
+
+function assertFunction(module, func) {
+ assertEquals("object", typeof module.exports);
+
+ var exp = module.exports[func];
+ assertFalse(exp === undefined);
+ assertFalse(exp === null);
+ assertFalse(exp === 0);
+ assertEquals("function", typeof exp);
+ return exp;
+}
+
+(function CompileFunctionsTest() {
+
+ var builder = new WasmModuleBuilder();
+
+ builder.addMemory(1, 1, true);
+ for (i = 0; i < 1000; i++) {
+ builder.addFunction("sub" + i, kSig_i_i)
+ .addBody([ // --
+ kExprGetLocal, 0, // --
+ kExprI32Const, i % 61, // --
+ kExprI32Sub]) // --
+ .exportFunc()
+ }
+
+ var module = builder.instantiate();
+ assertModule(module, kPageSize);
+
+ // Check the properties of the functions.
+ for (i = 0; i < 1000; i++) {
+ var sub = assertFunction(module, "sub" + i);
+ assertEquals(33 - (i % 61), sub(33));
+ }
+})();
+
+(function CallFunctionsTest() {
+
+ var builder = new WasmModuleBuilder();
+
+ var f = []
+
+ f[0] = builder.addFunction("add0", kSig_i_ii)
+ .addBody([
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprI32Add, // --
+ ])
+ .exportFunc()
+
+ builder.addMemory(1, 1, true);
+ for (i = 1; i < 256; i++) {
+ f[i] = builder.addFunction("add" + i, kSig_i_ii)
+ .addBody([ // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprCallFunction, kArity2, f[i >>> 1].index]) // --
+ .exportFunc()
+ }
+ var module = builder.instantiate();
+ assertModule(module, kPageSize);
+
+ // Check the properties of the functions.
+ for (i = 0; i < 256; i++) {
+ var add = assertFunction(module, "add" + i);
+ assertEquals(88, add(33, 55));
+ assertEquals(88888, add(33333, 55555));
+ assertEquals(8888888, add(3333333, 5555555));
+ }
+})();
diff --git a/deps/v8/test/mjsunit/wasm/params.js b/deps/v8/test/mjsunit/wasm/params.js
index 7c2b3d1794..fe1b7d4cfe 100644
--- a/deps/v8/test/mjsunit/wasm/params.js
+++ b/deps/v8/test/mjsunit/wasm/params.js
@@ -17,7 +17,7 @@ function testSelect2(type) {
var builder = new WasmModuleBuilder();
- builder.addFunction("select", [type, type, type])
+ builder.addFunction("select", makeSig_r_xx(type, type))
.addBody([kExprGetLocal, which])
.exportFunc()
@@ -79,7 +79,7 @@ function testSelect10(t) {
print("type = " + t + ", which = " + which);
var builder = new WasmModuleBuilder();
- builder.addFunction("select", [t,t,t,t,t,t,t,t,t,t,t])
+ builder.addFunction("select", makeSig([t,t,t,t,t,t,t,t,t,t], [t]))
.addBody([kExprGetLocal, which])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/receiver.js b/deps/v8/test/mjsunit/wasm/receiver.js
new file mode 100644
index 0000000000..c0070f8b91
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/receiver.js
@@ -0,0 +1,45 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+function testCallImport(func, expected, a, b) {
+ var builder = new WasmModuleBuilder();
+
+ var sig_index = builder.addType(kSig_i_dd);
+ builder.addImport("func", sig_index);
+ builder.addFunction("main", sig_index)
+ .addBody([
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprCallImport, 2, 0]) // --
+ .exportAs("main");
+
+ var main = builder.instantiate({func: func}).exports.main;
+
+ assertEquals(expected, main(a, b));
+}
+
+var global = (function() { return this; })();
+
+function sloppyReceiver(a, b) {
+ assertEquals(global, this);
+ assertEquals(33.3, a);
+ assertEquals(44.4, b);
+ return 11;
+}
+
+function strictReceiver(a, b) {
+ 'use strict';
+ assertEquals(undefined, this);
+ assertEquals(55.5, a);
+ assertEquals(66.6, b);
+ return 22;
+}
+
+testCallImport(sloppyReceiver, 11, 33.3, 44.4);
+testCallImport(strictReceiver, 22, 55.5, 66.6);
diff --git a/deps/v8/test/mjsunit/wasm/stack.js b/deps/v8/test/mjsunit/wasm/stack.js
index ed05517ae5..0197b77caf 100644
--- a/deps/v8/test/mjsunit/wasm/stack.js
+++ b/deps/v8/test/mjsunit/wasm/stack.js
@@ -2,39 +2,148 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// clang-format off
// Flags: --expose-wasm
load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
-var expected = "Error\n" +
- // The line numbers below will change as this test gains / loses lines..
- " at STACK (stack.js:24:11)\n" + // --
- " at <WASM> (<anonymous>)\n" + // TODO(jfb): wasm stack here.
- " at testStack (stack.js:38:18)\n" + // --
- " at stack.js:40:3"; // --
-
// The stack trace contains file path, only keep "stack.js".
function stripPath(s) {
return s.replace(/[^ (]*stack\.js/g, "stack.js");
}
+function verifyStack(frames, expected) {
+ assertEquals(expected.length, frames.length, "number of frames mismatch");
+ expected.forEach(function(exp, i) {
+ assertEquals(exp[1], frames[i].getFunctionName(),
+ "["+i+"].getFunctionName()");
+ assertEquals(exp[2], frames[i].getLineNumber(), "["+i+"].getLineNumber()");
+ if (exp[0])
+ assertEquals(exp[3], frames[i].getPosition(),
+ "["+i+"].getPosition()");
+ assertContains(exp[4], frames[i].getFileName(), "["+i+"].getFileName()");
+ var toString;
+ if (exp[0]) {
+ toString = exp[1] + " (<WASM>[" + exp[2] + "]+" + exp[3] + ")";
+ } else {
+ toString = exp[4] + ":" + exp[2] + ":";
+ }
+ assertContains(toString, frames[i].toString(), "["+i+"].toString()");
+ });
+}
+
+
var stack;
function STACK() {
var e = new Error();
stack = e.stack;
}
-(function testStack() {
- var builder = new WasmModuleBuilder();
+var builder = new WasmModuleBuilder();
+
+builder.addImport("func", kSig_v_v);
- builder.addImport("func", [kAstStmt]);
+builder.addFunction("main", kSig_v_v)
+ .addBody([kExprCallImport, kArity0, 0])
+ .exportAs("main");
- builder.addFunction(undefined, [kAstStmt])
- .addBody([kExprCallImport, 0])
- .exportAs("main");
+builder.addFunction("exec_unreachable", kSig_v_v)
+ .addBody([kExprUnreachable])
+ .exportAs("exec_unreachable");
+
+// Make this function unnamed, just to test also this case.
+var mem_oob_func = builder.addFunction(undefined, kSig_v_v)
+ // Access the memory at offset -1, to provoke a trap.
+ .addBody([kExprI32Const, 0x7f, kExprI32LoadMem8S, 0, 0])
+ .exportAs("mem_out_of_bounds");
+
+// Call the mem_out_of_bounds function, in order to have two WASM stack frames.
+builder.addFunction("call_mem_out_of_bounds", kSig_v_v)
+ .addBody([kExprCallFunction, kArity0, mem_oob_func.index])
+ .exportAs("call_mem_out_of_bounds");
+
+var module = builder.instantiate({func: STACK});
+
+(function testSimpleStack() {
+ var expected_string = "Error\n" +
+ // The line numbers below will change as this test gains / loses lines..
+ " at STACK (stack.js:39:11)\n" + // --
+ " at main (<WASM>[0]+1)\n" + // --
+ " at testSimpleStack (stack.js:76:18)\n" + // --
+ " at stack.js:78:3"; // --
- var module = builder.instantiate({func: STACK});
module.exports.main();
- assertEquals(expected, stripPath(stack));
+ assertEquals(expected_string, stripPath(stack));
+})();
+
+// For the remaining tests, collect the Callsite objects instead of just a
+// string:
+Error.prepareStackTrace = function(error, frames) {
+ return frames;
+};
+
+(function testStackFrames() {
+ module.exports.main();
+
+ verifyStack(stack, [
+ // isWasm function line pos file
+ [ false, "STACK", 39, 0, "stack.js"],
+ [ true, "main", 0, 1, null],
+ [ false, "testStackFrames", 87, 0, "stack.js"],
+ [ false, null, 96, 0, "stack.js"]
+ ]);
+})();
+
+(function testWasmUnreachable() {
+ try {
+ module.exports.exec_unreachable();
+ fail("expected wasm exception");
+ } catch (e) {
+ assertContains("unreachable", e.message);
+ verifyStack(e.stack, [
+ // isWasm function line pos file
+ [ true, "exec_unreachable", 1, 1, null],
+ [ false, "testWasmUnreachable", 100, 0, "stack.js"],
+ [ false, null, 111, 0, "stack.js"]
+ ]);
+ }
+})();
+
+(function testWasmMemOutOfBounds() {
+ try {
+ module.exports.call_mem_out_of_bounds();
+ fail("expected wasm exception");
+ } catch (e) {
+ assertContains("out of bounds", e.message);
+ verifyStack(e.stack, [
+ // isWasm function line pos file
+ [ true, "", 2, 3, null],
+ [ true, "call_mem_out_of_bounds", 3, 1, null],
+ [ false, "testWasmMemOutOfBounds", 115, 0, "stack.js"],
+ [ false, null, 127, 0, "stack.js"]
+ ]);
+ }
+})();
+
+
+(function testStackOverflow() {
+ print("testStackOverflow");
+ var builder = new WasmModuleBuilder();
+
+ var sig_index = builder.addType(kSig_v_v);
+ builder.addFunction("recursion", sig_index)
+ .addBody([
+ kExprI32Const, 0,
+ kExprCallIndirect, kArity0, sig_index
+ ])
+ .exportFunc()
+ builder.appendToTable([0]);
+
+ try {
+ builder.instantiate().exports.recursion();
+ fail("expected wasm exception");
+ } catch (e) {
+ assertEquals("Maximum call stack size exceeded", e.message, "trap reason");
+ }
})();
diff --git a/deps/v8/test/mjsunit/wasm/stackwalk.js b/deps/v8/test/mjsunit/wasm/stackwalk.js
index 8b8fb7e4d4..913269fdf4 100644
--- a/deps/v8/test/mjsunit/wasm/stackwalk.js
+++ b/deps/v8/test/mjsunit/wasm/stackwalk.js
@@ -10,13 +10,14 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
function makeFFI(func) {
var builder = new WasmModuleBuilder();
- var sig_index = builder.addSignature([kAstI32, kAstF64, kAstF64]);
+ var sig_index = builder.addType(kSig_i_dd);
builder.addImport("func", sig_index);
builder.addFunction("main", sig_index)
.addBody([
- kExprCallImport, 0, // --
- kExprGetLocal, 0, // --
- kExprGetLocal, 1]) // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprCallImport, kArity2, 0, // --
+ ])
.exportFunc()
return builder.instantiate({func: func}).exports.main;
diff --git a/deps/v8/test/mjsunit/wasm/start-function.js b/deps/v8/test/mjsunit/wasm/start-function.js
index bd4ccf22c3..c4d299e871 100644
--- a/deps/v8/test/mjsunit/wasm/start-function.js
+++ b/deps/v8/test/mjsunit/wasm/start-function.js
@@ -37,19 +37,19 @@ function assertVerifies(sig, body) {
return module;
}
-assertVerifies([kAstStmt], [kExprNop]);
-assertVerifies([kAstI32], [kExprI8Const, 0]);
+assertVerifies(kSig_v_v, [kExprNop]);
+assertVerifies(kSig_i, [kExprI8Const, 0]);
// Arguments aren't allow to start functions.
-assertFails([kAstI32, kAstI32], [kExprGetLocal, 0]);
-assertFails([kAstI32, kAstI32, kAstF32], [kExprGetLocal, 0]);
-assertFails([kAstI32, kAstI32, kAstF32, kAstF64], [kExprGetLocal, 0]);
+assertFails(kSig_i_i, [kExprGetLocal, 0]);
+assertFails(kSig_i_ii, [kExprGetLocal, 0]);
+assertFails(kSig_i_dd, [kExprGetLocal, 0]);
(function testInvalidIndex() {
print("testInvalidIndex");
var builder = new WasmModuleBuilder();
- var func = builder.addFunction("", [kAstStmt])
+ var func = builder.addFunction("", kSig_v_v)
.addBody([kExprNop]);
builder.addStart(func.index + 1);
@@ -62,11 +62,11 @@ assertFails([kAstI32, kAstI32, kAstF32, kAstF64], [kExprGetLocal, 0]);
print("testTwoStartFuncs");
var builder = new WasmModuleBuilder();
- var func = builder.addFunction("", [kAstStmt])
+ var func = builder.addFunction("", kSig_v_v)
.addBody([kExprNop]);
- builder.addExplicitSection([kDeclStartFunction, 0]);
- builder.addExplicitSection([kDeclStartFunction, 0]);
+ builder.addExplicitSection([kDeclStart, 0]);
+ builder.addExplicitSection([kDeclStart, 0]);
assertThrows(builder.instantiate);
})();
@@ -78,8 +78,8 @@ assertFails([kAstI32, kAstI32, kAstF32, kAstF64], [kExprGetLocal, 0]);
builder.addMemory(12, 12, true);
- var func = builder.addFunction("", [kAstStmt])
- .addBody([kExprI32StoreMem, 0, 0, kExprI8Const, 0, kExprI8Const, 77]);
+ var func = builder.addFunction("", kSig_v_v)
+ .addBody([kExprI8Const, 0, kExprI8Const, 77, kExprI32StoreMem, 0, 0]);
builder.addStart(func.index);
@@ -98,11 +98,11 @@ assertFails([kAstI32, kAstI32, kAstF32, kAstF64], [kExprGetLocal, 0]);
}};
var builder = new WasmModuleBuilder();
- var sig_index = builder.addSignature([kAstStmt]);
+ var sig_index = builder.addType(kSig_v_v);
builder.addImport("foo", sig_index);
var func = builder.addFunction("", sig_index)
- .addBody([kExprCallImport, 0]);
+ .addBody([kExprCallImport, kArity0, 0]);
builder.addStart(func.index);
diff --git a/deps/v8/test/mjsunit/wasm/test-import-export-wrapper.js b/deps/v8/test/mjsunit/wasm/test-import-export-wrapper.js
new file mode 100644
index 0000000000..e180611818
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/test-import-export-wrapper.js
@@ -0,0 +1,241 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --allow-natives-syntax
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+var expect_elison = 0;
+var expect_no_elison = 1;
+// function calls stack: first_export -> first_func -> first_import ->
+// second_export -> second_import
+// In this case, first_import and second_export have same signature,
+// So that wrappers will be removed
+(function TestWasmWrapperElision() {
+ var imported = function (a) {
+ return a;
+ };
+
+ var second_module = new WasmModuleBuilder();
+ var sig_index = second_module.addType(kSig_i_i);
+ second_module
+ .addImportWithModule("import_module_2", "import_name_2", sig_index);
+ second_module
+ .addFunction("second_export", sig_index)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprCallImport, kArity1, 0,
+ kExprReturn, kArity1
+ ])
+ .exportFunc();
+
+ var first_module = new WasmModuleBuilder();
+ var sig_index = first_module.addType(kSig_i_i);
+ first_module
+ .addImportWithModule("import_module_1", "import_name_1", sig_index);
+ first_module
+ .addFunction("first_export", sig_index)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprCallFunction, kArity1, 1,
+ kExprReturn, kArity1
+ ])
+ .exportFunc();
+ first_module
+ .addFunction("first_func", sig_index)
+ .addBody([
+ kExprI32Const, 1,
+ kExprGetLocal, 0,
+ kExprI32Add,
+ kExprCallImport, kArity1, 0,
+ kExprReturn, kArity1
+ ]);
+
+ var f = second_module
+ .instantiate({import_module_2: {import_name_2: imported}})
+ .exports.second_export;
+ var the_export = first_module
+ .instantiate({import_module_1: {import_name_1: f}})
+ .exports.first_export;
+ assertEquals(the_export(2), 3);
+ assertEquals(the_export(-1), 0);
+ assertEquals(the_export(0), 1);
+ assertEquals(the_export(5.5), 6);
+ assertEquals(%CheckWasmWrapperElision(the_export, expect_elison), true);
+})();
+
+// function calls stack: first_export -> first_func -> first_import ->
+// second_export -> second_import
+// In this case, second_export has less params than first_import,
+// So that wrappers will not be removed
+(function TestWasmWrapperNoElisionLessParams() {
+ var imported = function (a) {
+ return a;
+ };
+
+ var second_module = new WasmModuleBuilder();
+ var sig_index_1 = second_module.addType(kSig_i_i);
+ second_module
+ .addImportWithModule("import_module_2", "import_name_2", sig_index_1);
+ second_module
+ .addFunction("second_export", sig_index_1)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprCallImport, kArity1, 0,
+ kExprReturn, kArity1
+ ])
+ .exportFunc();
+
+ var first_module = new WasmModuleBuilder();
+ var sig_index_2 = first_module.addType(kSig_i_ii);
+ first_module
+ .addImportWithModule("import_module_1", "import_name_1", sig_index_2);
+ first_module
+ .addFunction("first_export", sig_index_2)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprCallFunction, kArity2, 1,
+ kExprReturn, kArity1
+ ])
+ .exportFunc();
+ first_module
+ .addFunction("first_func", sig_index_2)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprCallImport, kArity2, 0,
+ kExprReturn, kArity1
+ ]);
+
+ var f = second_module
+ .instantiate({import_module_2: {import_name_2: imported}})
+ .exports.second_export;
+ var the_export = first_module
+ .instantiate({import_module_1: {import_name_1: f}})
+ .exports.first_export;
+ assertEquals(the_export(4, 5), 4);
+ assertEquals(the_export(-1, 4), -1);
+ assertEquals(the_export(0, 2), 0);
+ assertEquals(the_export(9.9, 4.3), 9);
+ assertEquals(%CheckWasmWrapperElision(the_export, expect_no_elison), true);
+})();
+
+// function calls stack: first_export -> first_func -> first_import ->
+// second_export -> second_import
+// In this case, second_export has more params than first_import,
+// So that wrappers will not be removed
+(function TestWasmWrapperNoElisionMoreParams() {
+ var imported = function (a, b, c) {
+ return a+b+c;
+ };
+
+ var second_module = new WasmModuleBuilder();
+ var sig_index_3 = second_module.addType(kSig_i_iii);
+ second_module
+ .addImportWithModule("import_module_2", "import_name_2", sig_index_3);
+ second_module
+ .addFunction("second_export", sig_index_3)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprGetLocal, 2,
+ kExprCallImport, kArity3, 0,
+ kExprReturn, kArity1
+ ])
+ .exportFunc();
+
+ var first_module = new WasmModuleBuilder();
+ var sig_index_2 = first_module.addType(kSig_i_ii);
+ first_module
+ .addImportWithModule("import_module_1", "import_name_1", sig_index_2);
+ first_module
+ .addFunction("first_export", sig_index_2)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprCallFunction, kArity2, 1,
+ kExprReturn, kArity1
+ ])
+ .exportFunc();
+ first_module
+ .addFunction("first_func", sig_index_2)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprCallImport, kArity2, 0,
+ kExprReturn, kArity1
+ ]);
+
+ var f = second_module
+ .instantiate({import_module_2: {import_name_2: imported}})
+ .exports.second_export;
+ var the_export = first_module
+ .instantiate({import_module_1: {import_name_1: f}})
+ .exports.first_export;
+ assertEquals(the_export(5, 6), 11);
+ assertEquals(the_export(-1, -4), -5);
+ assertEquals(the_export(0, 0), 0);
+ assertEquals(the_export(1.1, 2.7), 3);
+ assertEquals(%CheckWasmWrapperElision(the_export, expect_no_elison), true);
+})();
+
+// function calls stack: first_export -> first_func -> first_import ->
+// second_export -> second_import
+// In this case, second_export has different params type with first_import,
+// So that wrappers will not be removed
+(function TestWasmWrapperNoElisionTypeMismatch() {
+ var imported = function (a, b) {
+ return a+b;
+ };
+
+ var second_module = new WasmModuleBuilder();
+ var sig_index_2 = second_module.addType(kSig_d_dd);
+ second_module
+ .addImportWithModule("import_module_2", "import_name_2", sig_index_2);
+ second_module
+ .addFunction("second_export", sig_index_2)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprCallImport, kArity2, 0,
+ kExprReturn, kArity1
+ ])
+ .exportFunc();
+
+ var first_module = new WasmModuleBuilder();
+ var sig_index_2 = first_module.addType(kSig_i_ii);
+ first_module
+ .addImportWithModule("import_module_1", "import_name_1", sig_index_2);
+ first_module
+ .addFunction("first_export", sig_index_2)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprCallFunction, kArity2, 1,
+ kExprReturn, kArity1
+ ])
+ .exportFunc();
+ first_module
+ .addFunction("first_func", sig_index_2)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprCallImport, kArity2, 0,
+ kExprReturn, kArity1
+ ]);
+
+ var f = second_module
+ .instantiate({import_module_2: {import_name_2: imported}})
+ .exports.second_export;
+ var the_export = first_module
+ .instantiate({import_module_1: {import_name_1: f}})
+ .exports.first_export;
+ assertEquals(the_export(2.8, 9.1), 11);
+ assertEquals(the_export(-1.7, -2.5), -3);
+ assertEquals(the_export(0.0, 0.0), 0);
+ assertEquals(the_export(2, -2), 0);
+ assertEquals(%CheckWasmWrapperElision(the_export, expect_no_elison), true);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js
index 50797d0554..72d5a7aaa4 100644
--- a/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js
+++ b/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js
@@ -12,7 +12,7 @@ var debug = false;
(function BasicTest() {
var module = new WasmModuleBuilder();
module.addMemory(1, 2, false);
- module.addFunction("foo", [kAstI32])
+ module.addFunction("foo", kSig_i)
.addBody([kExprI8Const, 11])
.exportAs("blarg");
@@ -23,9 +23,9 @@ var debug = false;
(function ImportTest() {
var module = new WasmModuleBuilder();
- var index = module.addImport("print", [kAstStmt, kAstI32]);
- module.addFunction("foo", [kAstStmt])
- .addBody([kExprCallImport, index, kExprI8Const, 13])
+ var index = module.addImport("print", makeSig_v_x(kAstI32));
+ module.addFunction("foo", kSig_v_v)
+ .addBody([kExprI8Const, 13, kExprCallImport, kArity1, index])
.exportAs("main");
var buffer = module.toBuffer(debug);
@@ -36,9 +36,9 @@ var debug = false;
(function LocalsTest() {
var module = new WasmModuleBuilder();
- module.addFunction(undefined, [kAstI32, kAstI32])
+ module.addFunction(undefined, kSig_i_i)
.addLocals({i32_count: 1})
- .addBody([kExprSetLocal, 1, kExprGetLocal, 0])
+ .addBody([kExprGetLocal, 0, kExprSetLocal, 1])
.exportAs("main");
var buffer = module.toBuffer(debug);
@@ -58,9 +58,9 @@ var debug = false;
for (p of types) {
var module = new WasmModuleBuilder();
- module.addFunction(undefined, [p.type, p.type])
+ module.addFunction(undefined, makeSig_r_x(p.type, p.type))
.addLocals(p.locals)
- .addBody([kExprSetLocal, 1, kExprGetLocal, 0])
+ .addBody([kExprGetLocal, 0, kExprSetLocal, 1])
.exportAs("main");
var buffer = module.toBuffer(debug);
@@ -72,10 +72,10 @@ var debug = false;
(function CallTest() {
var module = new WasmModuleBuilder();
- module.addFunction("add", [kAstI32, kAstI32, kAstI32])
- .addBody([kExprI32Add, kExprGetLocal, 0, kExprGetLocal, 1]);
- module.addFunction("main", [kAstI32, kAstI32, kAstI32])
- .addBody([kExprCallFunction, 0, kExprGetLocal, 0, kExprGetLocal, 1])
+ module.addFunction("add", kSig_i_ii)
+ .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Add]);
+ module.addFunction("main", kSig_i_ii)
+ .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprCallFunction, kArity2, 0])
.exportAs("main");
var instance = module.instantiate();
@@ -85,13 +85,13 @@ var debug = false;
(function IndirectCallTest() {
var module = new WasmModuleBuilder();
- module.addFunction("add", [kAstI32, kAstI32, kAstI32])
- .addBody([kExprI32Add, kExprGetLocal, 0, kExprGetLocal, 1]);
- module.addFunction("main", [kAstI32, kAstI32, kAstI32, kAstI32])
- .addBody([kExprCallIndirect, 0, kExprGetLocal,
- 0, kExprGetLocal, 1, kExprGetLocal, 2])
+ module.addFunction("add", kSig_i_ii)
+ .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Add]);
+ module.addFunction("main", kSig_i_iii)
+ .addBody([kExprGetLocal,
+ 0, kExprGetLocal, 1, kExprGetLocal, 2, kExprCallIndirect, kArity2, 0])
.exportAs("main");
- module.appendToFunctionTable([0]);
+ module.appendToTable([0]);
var instance = module.instantiate();
assertEquals(44, instance.exports.main(0, 11, 33));
@@ -102,8 +102,8 @@ var debug = false;
(function DataSegmentTest() {
var module = new WasmModuleBuilder();
module.addMemory(1, 1, false);
- module.addFunction("load", [kAstI32, kAstI32])
- .addBody([kExprI32LoadMem, 0, 0, kExprGetLocal, 0])
+ module.addFunction("load", kSig_i_i)
+ .addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
.exportAs("load");
module.addDataSegment(0, [9, 9, 9, 9], true);
@@ -116,7 +116,7 @@ var debug = false;
(function BasicTestWithUint8Array() {
var module = new WasmModuleBuilder();
module.addMemory(1, 2, false);
- module.addFunction("foo", [kAstI32])
+ module.addFunction("foo", kSig_i)
.addBody([kExprI8Const, 17])
.exportAs("blarg");
@@ -141,9 +141,9 @@ var debug = false;
(function ImportTestTwoLevel() {
var module = new WasmModuleBuilder();
- var index = module.addImportWithModule("mod", "print", [kAstStmt, kAstI32]);
- module.addFunction("foo", [kAstStmt])
- .addBody([kExprCallImport, index, kExprI8Const, 19])
+ var index = module.addImportWithModule("mod", "print", makeSig_v_x(kAstI32));
+ module.addFunction("foo", kSig_v_v)
+ .addBody([kExprI8Const, 19, kExprCallImport, kArity1, index])
.exportAs("main");
var buffer = module.toBuffer(debug);
diff --git a/deps/v8/test/mjsunit/wasm/trap-location.js b/deps/v8/test/mjsunit/wasm/trap-location.js
new file mode 100644
index 0000000000..0440af9ccc
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/trap-location.js
@@ -0,0 +1,78 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+// Collect the Callsite objects instead of just a string:
+Error.prepareStackTrace = function(error, frames) {
+ return frames;
+};
+
+var builder = new WasmModuleBuilder();
+
+var sig_index = builder.addType(kSig_i_v)
+
+// Build a function to resemble this code:
+// if (idx < 2) {
+// return load(-2 / idx);
+// } else if (idx == 2) {
+// unreachable;
+// } else {
+// return call_indirect(idx);
+// }
+// There are four different traps which are triggered by different input values:
+// (0) division by zero; (1) mem oob; (2) unreachable; (3) invalid call target
+// Each of them also has a different location where it traps.
+builder.addFunction("main", kSig_i_i)
+ .addBody([
+ // offset 1
+ kExprBlock,
+ kExprGetLocal, 0,
+ kExprI32Const, 2,
+ kExprI32LtU,
+ kExprIf,
+ // offset 8
+ kExprI32Const, 0x7e /* -2 */,
+ kExprGetLocal, 0,
+ kExprI32DivU,
+ // offset 13
+ kExprI32LoadMem, 0, 0,
+ kExprBr, 1, 1,
+ kExprEnd,
+ // offset 20
+ kExprGetLocal, 0,
+ kExprI32Const, 2,
+ kExprI32Eq,
+ kExprIf,
+ kExprUnreachable,
+ kExprEnd,
+ // offset 28
+ kExprGetLocal, 0,
+ kExprCallIndirect, kArity0, sig_index,
+ kExprEnd,
+ ])
+ .exportAs("main");
+
+var module = builder.instantiate();
+
+function testWasmTrap(value, reason, position) {
+ try {
+ module.exports.main(value);
+ fail("expected wasm exception");
+ } catch (e) {
+ assertEquals(kTrapMsgs[reason], e.message, "trap reason");
+ assertEquals(3, e.stack.length, "number of frames");
+ assertEquals(0, e.stack[0].getLineNumber(), "wasmFunctionIndex");
+ assertEquals(position, e.stack[0].getPosition(), "position");
+ }
+}
+
+// The actual tests:
+testWasmTrap(0, kTrapDivByZero, 12);
+testWasmTrap(1, kTrapMemOutOfBounds, 13);
+testWasmTrap(2, kTrapUnreachable, 26);
+testWasmTrap(3, kTrapFuncInvalid, 30);
diff --git a/deps/v8/test/mjsunit/wasm/unicode-validation.js b/deps/v8/test/mjsunit/wasm/unicode-validation.js
new file mode 100644
index 0000000000..b2e4603087
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/unicode-validation.js
@@ -0,0 +1,121 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+function toByteArray(s) {
+ var arr = [];
+ for (var i = 0; i < s.length; ++i) {
+ arr.push(s.charCodeAt(i) & 0xff);
+ }
+ return arr;
+}
+
+function toString(arr) {
+ if (typeof arr === "string") return arr;
+ var s = "";
+ for (var b of arr) s += String.fromCharCode(b);
+ return s;
+}
+
+function toUTF8(arr) {
+ if (typeof arr === "string" || arr === undefined) return arr;
+ return decodeURIComponent(escape(toString(arr)));
+}
+
+function isValidUtf8(arr) {
+ if (typeof arr === "string" || arr === undefined) return true;
+ try {
+ var s = toUTF8(arr);
+ for (var i = 0; i < s.length; ++i)
+ if ((s.charCodeAt(i) & 0xfffe) == 0xfffe)
+ return false;
+ return true;
+ } catch (e) {
+ if (e instanceof URIError) return false;
+ throw e;
+ }
+}
+
+function checkImportsAndExports(imported_module_name, imported_function_name,
+ internal_function_name, exported_function_name, shouldThrow) {
+ var builder = new WasmModuleBuilder();
+
+ builder.addImportWithModule(imported_module_name, imported_function_name,
+ kSig_v_v);
+
+ builder.addFunction(internal_function_name, kSig_v_v)
+ .addBody([kExprCallImport, kArity0, 0])
+ .exportAs(exported_function_name);
+
+ // sanity check: does javascript agree with out shouldThrow annotation?
+ assertEquals(shouldThrow,
+ !isValidUtf8(imported_module_name) ||
+ !isValidUtf8(imported_function_name) ||
+ !isValidUtf8(exported_function_name),
+ "JavaScript does not agree with our shouldThrow expectation");
+
+ if (!shouldThrow) {
+ imported_module_name = toUTF8(imported_module_name);
+ imported_function_name = toUTF8(imported_function_name);
+ }
+
+ var ffi = new Object();
+ if (imported_function_name === undefined) {
+ ffi[imported_module_name] = function() { };
+ } else {
+ ffi[imported_module_name] = new Object();
+ ffi[imported_module_name][imported_function_name] = function() { };
+ }
+
+ var hasThrown = true;
+ try {
+ builder.instantiate(ffi);
+ hasThrown = false;
+ } catch (err) {
+ if (!shouldThrow) print(err);
+ assertTrue(shouldThrow, "Should not throw error on valid names");
+ assertContains("UTF-8", err.toString());
+ }
+ assertEquals(shouldThrow, hasThrown,
+ "Should throw validation error on invalid names");
+}
+
+function checkImportedModuleName(name, shouldThrow) {
+ checkImportsAndExports(name, "imp", "func", undefined, shouldThrow);
+}
+
+function checkImportedFunctionName(name, shouldThrow) {
+ checkImportsAndExports("module", name, "func", "func", shouldThrow);
+}
+
+function checkExportedFunctionName(name, shouldThrow) {
+ checkImportsAndExports("module", "func", "func", name, shouldThrow);
+}
+
+function checkInternalFunctionName(name) {
+ checkImportsAndExports("module", "func", name, "func", false);
+}
+
+function checkAll(name, shouldThrow) {
+ checkImportedModuleName(name, shouldThrow);
+ checkImportedFunctionName(name, shouldThrow);
+ checkExportedFunctionName(name, shouldThrow);
+ checkInternalFunctionName(name);
+}
+
+checkAll("ascii", false);
+checkAll("some math: (½)² = ¼", false);
+checkAll("中国历史系列条目\n北", false);
+checkAll(toByteArray("\xef\xb7\x8f"), false);
+checkAll(toByteArray("a\xc2\x81\xe1\x80\xbf\xf1\x80\xa0\xbf"), false);
+checkAll(toByteArray("\xff"), true);
+checkAll(toByteArray("\xed\xa0\x8f"), true); // surrogate code points
+checkAll(toByteArray("\xe0\x82\x80"), true); // overlong sequence
+checkAll(toByteArray("\xf4\x90\x80\x80"), true); // beyond limit: U+110000
+checkAll(toByteArray("\xef\xbf\xbe"), true); // non-character; U+FFFE
+checkAll(toByteArray("with\x00null"), false);
diff --git a/deps/v8/test/mjsunit/wasm/unreachable.js b/deps/v8/test/mjsunit/wasm/unreachable.js
index 3e2dffb4e9..d77b53ea53 100644
--- a/deps/v8/test/mjsunit/wasm/unreachable.js
+++ b/deps/v8/test/mjsunit/wasm/unreachable.js
@@ -9,7 +9,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
var main = (function () {
var builder = new WasmModuleBuilder();
- builder.addFunction("main", [kAstStmt])
+ builder.addFunction("main", kSig_v_v)
.addBody([kExprUnreachable])
.exportAs("main");
@@ -23,4 +23,4 @@ try {
print("correctly caught: " + e);
exception = e;
}
-assertEquals("unreachable", exception);
+assertEquals("unreachable", exception.message);
diff --git a/deps/v8/test/mjsunit/wasm/verify-function-simple.js b/deps/v8/test/mjsunit/wasm/verify-function-simple.js
index aa5c67683e..31c23a6b69 100644
--- a/deps/v8/test/mjsunit/wasm/verify-function-simple.js
+++ b/deps/v8/test/mjsunit/wasm/verify-function-simple.js
@@ -8,9 +8,9 @@ load("test/mjsunit/wasm/wasm-constants.js");
try {
var data = bytes(
- 0, kAstStmt, // signature
- kDeclNoLocals, // --
- kExprNop // body
+ kWasmFunctionTypeForm, 0, kAstStmt, // signature
+ kDeclNoLocals, // --
+ kExprNop // body
);
Wasm.verifyFunction(data);
@@ -23,9 +23,9 @@ try {
var threw = false;
try {
var data = bytes(
- 0, kAstI32, // signature
- kDeclNoLocals, // --
- kExprBlock, 2, kExprNop, kExprNop // body
+ kWasmFunctionTypeForm, 0, 1, kAstI32, // signature
+ kDeclNoLocals, // --
+ kExprBlock, kExprNop, kExprNop, kExprEnd // body
);
Wasm.verifyFunction(data);
diff --git a/deps/v8/test/mjsunit/wasm/wasm-constants.js b/deps/v8/test/mjsunit/wasm/wasm-constants.js
index cc620bb458..04ac0c9592 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-constants.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-constants.js
@@ -21,7 +21,7 @@ var kWasmH1 = 0x61;
var kWasmH2 = 0x73;
var kWasmH3 = 0x6d;
-var kWasmV0 = 10;
+var kWasmV0 = 11;
var kWasmV1 = 0;
var kWasmV2 = 0;
var kWasmV3 = 0;
@@ -52,23 +52,29 @@ var kDeclNoLocals = 0;
// Section declaration constants
var kDeclMemory = 0x00;
-var kDeclSignatures = 0x01;
+var kDeclTypes = 0x01;
var kDeclFunctions = 0x02;
var kDeclGlobals = 0x03;
-var kDeclDataSegments = 0x04;
-var kDeclFunctionTable = 0x05;
+var kDeclData = 0x04;
+var kDeclTable = 0x05;
var kDeclEnd = 0x06;
-var kDeclStartFunction = 0x07;
-var kDeclImportTable = 0x08;
-var kDeclExportTable = 0x09;
-var kDeclFunctionSignatures = 0x0a;
-var kDeclFunctionBodies = 0x0b;
+var kDeclStart = 0x07;
+var kDeclImports = 0x08;
+var kDeclExports = 0x09;
+var kDeclFunctions = 0x0a;
+var kDeclCode = 0x0b;
var kDeclNames = 0x0c;
+var kArity0 = 0;
+var kArity1 = 1;
+var kArity2 = 2;
+var kArity3 = 3;
+var kWasmFunctionTypeForm = 0x40;
+
var section_names = [
- "memory", "signatures", "functions", "globals", "data_segments",
- "function_table", "end", "start_function", "import_table", "export_table",
- "function_signatures", "function_bodies", "names"];
+ "memory", "type", "old_function", "global", "data",
+ "table", "end", "start", "import", "export",
+ "function", "code", "name"];
// Function declaration flags
var kDeclFunctionName = 0x01;
@@ -83,31 +89,73 @@ var kAstI64 = 2;
var kAstF32 = 3;
var kAstF64 = 4;
+// Useful signatures
+var kSig_i = makeSig([], [kAstI32]);
+var kSig_d = makeSig([], [kAstF64]);
+var kSig_i_i = makeSig([kAstI32], [kAstI32]);
+var kSig_i_ii = makeSig([kAstI32, kAstI32], [kAstI32]);
+var kSig_i_iii = makeSig([kAstI32, kAstI32, kAstI32], [kAstI32]);
+var kSig_d_dd = makeSig([kAstF64, kAstF64], [kAstF64]);
+var kSig_l_ll = makeSig([kAstI64, kAstI64], [kAstI64]);
+var kSig_i_dd = makeSig([kAstF64, kAstF64], [kAstI32]);
+var kSig_v_v = makeSig([], []);
+var kSig_i_v = makeSig([], [kAstI32]);
+var kSig_v_i = makeSig([kAstI32], []);
+var kSig_v_ii = makeSig([kAstI32, kAstI32], []);
+var kSig_v_iii = makeSig([kAstI32, kAstI32, kAstI32], []);
+var kSig_v_d = makeSig([kAstF64], []);
+var kSig_v_dd = makeSig([kAstF64, kAstF64], []);
+
+function makeSig(params, results) {
+ return {params: params, results: results};
+}
+
+function makeSig_v_x(x) {
+ return makeSig([x], []);
+}
+
+function makeSig_v_xx(x) {
+ return makeSig([x, x], []);
+}
+
+function makeSig_r_v(r) {
+ return makeSig([], [r]);
+}
+
+function makeSig_r_x(r, x) {
+ return makeSig([x], [r]);
+}
+
+function makeSig_r_xx(r, x) {
+ return makeSig([x, x], [r]);
+}
+
// Opcodes
var kExprNop = 0x00;
var kExprBlock = 0x01;
var kExprLoop = 0x02;
var kExprIf = 0x03;
-var kExprIfElse = 0x04;
+var kExprElse = 0x04;
var kExprSelect = 0x05;
var kExprBr = 0x06;
var kExprBrIf = 0x07;
-var kExprTableSwitch = 0x08;
-var kExprReturn = 0x14;
-var kExprUnreachable = 0x15;
+var kExprBrTable = 0x08;
+var kExprReturn = 0x09;
+var kExprUnreachable = 0x0a;
+var kExprEnd = 0x0f;
-var kExprI8Const = 0x09;
-var kExprI32Const = 0x0a;
-var kExprI64Const = 0x0b;
-var kExprF64Const = 0x0c;
-var kExprF32Const = 0x0d;
-var kExprGetLocal = 0x0e;
-var kExprSetLocal = 0x0f;
-var kExprLoadGlobal = 0x10;
-var kExprStoreGlobal = 0x11;
-var kExprCallFunction = 0x12;
-var kExprCallIndirect = 0x13;
-var kExprCallImport = 0x1F;
+var kExprI32Const = 0x10;
+var kExprI64Const = 0x11;
+var kExprF64Const = 0x12;
+var kExprF32Const = 0x13;
+var kExprGetLocal = 0x14;
+var kExprSetLocal = 0x15;
+var kExprCallFunction = 0x16;
+var kExprCallIndirect = 0x17;
+var kExprCallImport = 0x18;
+var kExprI8Const = 0xcb;
+var kExprGetGlobal = 0xbb;
+var kExprSetGlobal = 0xbc;
var kExprI32LoadMem8S = 0x20;
var kExprI32LoadMem8U = 0x21;
@@ -268,6 +316,7 @@ var kTrapRemByZero = 4;
var kTrapFloatUnrepresentable = 5;
var kTrapFuncInvalid = 6;
var kTrapFuncSigMismatch = 7;
+var kTrapInvalidIndex = 8;
var kTrapMsgs = [
"unreachable",
@@ -277,7 +326,8 @@ var kTrapMsgs = [
"remainder by zero",
"integer result unrepresentable",
"invalid function",
- "function signature mismatch"
+ "function signature mismatch",
+ "invalid index into function table"
];
function assertTraps(trap, code) {
@@ -290,8 +340,8 @@ function assertTraps(trap, code) {
}
threwException = false;
} catch (e) {
- assertEquals("string", typeof e);
- assertEquals(kTrapMsgs[trap], e);
+ assertEquals("object", typeof e);
+ assertEquals(kTrapMsgs[trap], e.message);
// Success.
return;
}
diff --git a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
index e1d996338c..fecd164b56 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
@@ -2,334 +2,364 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-function WasmFunctionBuilder(name, sig_index) {
+class Binary extends Array {
+ emit_u8(val) {
+ this.push(val);
+ }
+
+ emit_u16(val) {
+ this.push(val & 0xff);
+ this.push((val >> 8) & 0xff);
+ }
+
+ emit_u32(val) {
+ this.push(val & 0xff);
+ this.push((val >> 8) & 0xff);
+ this.push((val >> 16) & 0xff);
+ this.push((val >> 24) & 0xff);
+ }
+
+ emit_varint(val) {
+ while (true) {
+ let v = val & 0xff;
+ val = val >>> 7;
+ if (val == 0) {
+ this.push(v);
+ break;
+ }
+ this.push(v | 0x80);
+ }
+ }
+
+ emit_bytes(data) {
+ for (let i = 0; i < data.length; i++) {
+ this.push(data[i] & 0xff);
+ }
+ }
+
+ emit_string(string) {
+ // When testing illegal names, we pass a byte array directly.
+ if (string instanceof Array) {
+ this.emit_varint(string.length);
+ this.emit_bytes(string);
+ return;
+ }
+
+ // This is the hacky way to convert a JavaScript string to a UTF8 encoded
+ // string only containing single-byte characters.
+ let string_utf8 = unescape(encodeURIComponent(string));
+ this.emit_varint(string_utf8.length);
+ for (let i = 0; i < string_utf8.length; i++) {
+ this.emit_u8(string_utf8.charCodeAt(i));
+ }
+ }
+
+ emit_header() {
+ this.push(kWasmH0, kWasmH1, kWasmH2, kWasmH3,
+ kWasmV0, kWasmV1, kWasmV2, kWasmV3);
+ }
+
+ emit_section(section_code, content_generator) {
+ // Emit section name.
+ this.emit_string(section_names[section_code]);
+ // Emit the section to a temporary buffer: its full length isn't know yet.
+ let section = new Binary;
+ content_generator(section);
+ // Emit section length.
+ this.emit_varint(section.length);
+ // Copy the temporary buffer.
+ this.push(...section);
+ }
+}
+
+class WasmFunctionBuilder {
+ constructor(name, type_index) {
this.name = name;
- this.sig_index = sig_index;
+ this.type_index = type_index;
this.exports = [];
-}
+ }
-WasmFunctionBuilder.prototype.exportAs = function(name) {
+ exportAs(name) {
this.exports.push(name);
return this;
-}
+ }
-WasmFunctionBuilder.prototype.exportFunc = function() {
- this.exports.push(this.name);
- return this;
-}
+ exportFunc() {
+ this.exports.push(this.name);
+ return this;
+ }
-WasmFunctionBuilder.prototype.addBody = function(body) {
+ addBody(body) {
this.body = body;
return this;
-}
+ }
-WasmFunctionBuilder.prototype.addLocals = function(locals) {
+ addLocals(locals) {
this.locals = locals;
return this;
+ }
}
-function WasmModuleBuilder() {
- this.signatures = [];
+class WasmModuleBuilder {
+ constructor() {
+ this.types = [];
this.imports = [];
this.functions = [];
this.exports = [];
- this.function_table = [];
- this.data_segments = [];
+ this.table = [];
+ this.segments = [];
this.explicit = [];
+ this.pad = null;
return this;
-}
+ }
-WasmModuleBuilder.prototype.addStart = function(start_index) {
+ addStart(start_index) {
this.start_index = start_index;
-}
+ }
-WasmModuleBuilder.prototype.addMemory = function(min, max, exp) {
+ addMemory(min, max, exp) {
this.memory = {min: min, max: max, exp: exp};
return this;
-}
+ }
-WasmModuleBuilder.prototype.addExplicitSection = function(bytes) {
- this.explicit.push(bytes);
- return this;
-}
+ addPadFunctionTable(size) {
+ this.pad = size;
+ return this;
+ }
-// Add a signature; format is [rettype, param0, param1, ...]
-WasmModuleBuilder.prototype.addSignature = function(sig) {
- // TODO: canonicalize signatures?
- this.signatures.push(sig);
- return this.signatures.length - 1;
-}
+ addExplicitSection(bytes) {
+ this.explicit.push(bytes);
+ return this;
+ }
-WasmModuleBuilder.prototype.addFunction = function(name, sig) {
- var sig_index = (typeof sig) == "number" ? sig : this.addSignature(sig);
- var func = new WasmFunctionBuilder(name, sig_index);
+ addType(type) {
+ // TODO: canonicalize types?
+ this.types.push(type);
+ return this.types.length - 1;
+ }
+
+ addFunction(name, type) {
+ let type_index = (typeof type) == "number" ? type : this.addType(type);
+ let func = new WasmFunctionBuilder(name, type_index);
func.index = this.functions.length;
this.functions.push(func);
return func;
-}
+ }
-WasmModuleBuilder.prototype.addImportWithModule = function(module, name, sig) {
- var sig_index = (typeof sig) == "number" ? sig : this.addSignature(sig);
- this.imports.push({module: module, name: name, sig_index: sig_index});
- return this.imports.length - 1;
-}
+ addImportWithModule(module, name, type) {
+ let type_index = (typeof type) == "number" ? type : this.addType(type);
+ this.imports.push({module: module, name: name, type: type_index});
+ return this.imports.length - 1;
+ }
-WasmModuleBuilder.prototype.addImport = function(name, sig) {
- var sig_index = (typeof sig) == "number" ? sig : this.addSignature(sig);
- this.imports.push({module: name, name: undefined, sig_index: sig_index});
- return this.imports.length - 1;
-}
+ addImport(name, type) {
+ return this.addImportWithModule(name, undefined, type);
+ }
-WasmModuleBuilder.prototype.addDataSegment = function(addr, data, init) {
- this.data_segments.push({addr: addr, data: data, init: init});
- return this.data_segments.length - 1;
-}
+ addDataSegment(addr, data, init) {
+ this.segments.push({addr: addr, data: data, init: init});
+ return this.segments.length - 1;
+ }
-WasmModuleBuilder.prototype.appendToFunctionTable = function(array) {
- this.function_table = this.function_table.concat(array);
+ appendToTable(array) {
+ this.table.push(...array);
return this;
-}
-
-function emit_u8(bytes, val) {
- bytes.push(val & 0xff);
-}
-
-function emit_u16(bytes, val) {
- bytes.push(val & 0xff);
- bytes.push((val >> 8) & 0xff);
-}
-
-function emit_u32(bytes, val) {
- bytes.push(val & 0xff);
- bytes.push((val >> 8) & 0xff);
- bytes.push((val >> 16) & 0xff);
- bytes.push((val >> 24) & 0xff);
-}
+ }
-function emit_string(bytes, string) {
- emit_varint(bytes, string.length);
- for (var i = 0; i < string.length; i++) {
- emit_u8(bytes, string.charCodeAt(i));
+ toArray(debug) {
+ let binary = new Binary;
+ let wasm = this;
+
+ // Add header
+ binary.emit_header();
+
+ // Add type section
+ if (wasm.types.length > 0) {
+ if (debug) print("emitting types @ " + binary.length);
+ binary.emit_section(kDeclTypes, section => {
+ section.emit_varint(wasm.types.length);
+ for (let type of wasm.types) {
+ section.emit_u8(kWasmFunctionTypeForm);
+ section.emit_varint(type.params.length);
+ for (let param of type.params) {
+ section.emit_u8(param);
+ }
+ section.emit_varint(type.results.length);
+ for (let result of type.results) {
+ section.emit_u8(result);
+ }
+ }
+ });
}
-}
-function emit_varint(bytes, val) {
- while (true) {
- var v = val & 0xff;
- val = val >>> 7;
- if (val == 0) {
- bytes.push(v);
- break;
+ // Add imports section
+ if (wasm.imports.length > 0) {
+ if (debug) print("emitting imports @ " + binary.length);
+ binary.emit_section(kDeclImports, section => {
+ section.emit_varint(wasm.imports.length);
+ for (let imp of wasm.imports) {
+ section.emit_varint(imp.type);
+ section.emit_string(imp.module);
+ section.emit_string(imp.name || '');
}
- bytes.push(v | 0x80);
+ });
}
-}
-
-function emit_bytes(bytes, data) {
- for (var i = 0; i < data.length; i++) {
- bytes.push(data[i] & 0xff);
- }
-}
-
-function emit_section(bytes, section_code, content_generator) {
- // Start the section in a temporary buffer: its full length isn't know yet.
- var tmp_bytes = [];
- emit_string(tmp_bytes, section_names[section_code]);
- content_generator(tmp_bytes);
- // Now that we know the section length, emit it and copy the section.
- emit_varint(bytes, tmp_bytes.length);
- Array.prototype.push.apply(bytes, tmp_bytes);
-}
-WasmModuleBuilder.prototype.toArray = function(debug) {
- // Add header bytes
- var bytes = [];
- bytes = bytes.concat([kWasmH0, kWasmH1, kWasmH2, kWasmH3,
- kWasmV0, kWasmV1, kWasmV2, kWasmV3]);
+ // Add functions declarations
+ let has_names = false;
+ let names = false;
+ let exports = 0;
+ if (wasm.functions.length > 0) {
+ if (debug) print("emitting function decls @ " + binary.length);
+ binary.emit_section(kDeclFunctions, section => {
+ section.emit_varint(wasm.functions.length);
+ for (let func of wasm.functions) {
+ has_names = has_names || (func.name != undefined &&
+ func.name.length > 0);
+ exports += func.exports.length;
+ section.emit_varint(func.type_index);
+ }
+ });
+ }
- var wasm = this;
+ // Add table.
+ if (wasm.table.length > 0) {
+ if (debug) print("emitting table @ " + binary.length);
+ binary.emit_section(kDeclTable, section => {
+ section.emit_varint(wasm.table.length);
+ if (wasm.pad !== null) {
+ if (debug) print("emitting table padding @ " + binary.length);
+ section.emit_varint(wasm.pad);
+ }
+ for (let index of wasm.table) {
+ section.emit_varint(index);
+ }
+ });
+ }
// Add memory section
if (wasm.memory != undefined) {
- if (debug) print("emitting memory @ " + bytes.length);
- emit_section(bytes, kDeclMemory, function(bytes) {
- emit_varint(bytes, wasm.memory.min);
- emit_varint(bytes, wasm.memory.max);
- emit_u8(bytes, wasm.memory.exp ? 1 : 0);
- });
+ if (debug) print("emitting memory @ " + binary.length);
+ binary.emit_section(kDeclMemory, section => {
+ section.emit_varint(wasm.memory.min);
+ section.emit_varint(wasm.memory.max);
+ section.emit_u8(wasm.memory.exp ? 1 : 0);
+ });
}
- // Add signatures section
- if (wasm.signatures.length > 0) {
- if (debug) print("emitting signatures @ " + bytes.length);
- emit_section(bytes, kDeclSignatures, function(bytes) {
- emit_varint(bytes, wasm.signatures.length);
- for (sig of wasm.signatures) {
- var params = sig.length - 1;
- emit_varint(bytes, params);
- for (var j = 0; j < sig.length; j++) {
- emit_u8(bytes, sig[j]);
- }
- }
- });
+
+ // Add export table.
+ if (exports > 0) {
+ if (debug) print("emitting exports @ " + binary.length);
+ binary.emit_section(kDeclExports, section => {
+ section.emit_varint(exports);
+ for (let func of wasm.functions) {
+ for (let exp of func.exports) {
+ section.emit_varint(func.index);
+ section.emit_string(exp);
+ }
+ }
+ });
}
- // Add imports section
- if (wasm.imports.length > 0) {
- if (debug) print("emitting imports @ " + bytes.length);
- emit_section(bytes, kDeclImportTable, function(bytes) {
- emit_varint(bytes, wasm.imports.length);
- for (imp of wasm.imports) {
- emit_varint(bytes, imp.sig_index);
- emit_string(bytes, imp.module);
- emit_string(bytes, imp.name || '');
- }
- });
+ // Add start function section.
+ if (wasm.start_index != undefined) {
+ if (debug) print("emitting start function @ " + binary.length);
+ binary.emit_section(kDeclStart, section => {
+ section.emit_varint(wasm.start_index);
+ });
}
- // Add functions section
- var names = false;
- var exports = 0;
+ // Add function bodies.
if (wasm.functions.length > 0) {
- var has_names = false;
-
- // emit function signatures
- if (debug) print("emitting function sigs @ " + bytes.length);
- emit_section(bytes, kDeclFunctionSignatures, function(bytes) {
- emit_varint(bytes, wasm.functions.length);
- for (func of wasm.functions) {
- has_names = has_names || (func.name != undefined &&
- func.name.length > 0);
- exports += func.exports.length;
-
- emit_varint(bytes, func.sig_index);
+ // emit function bodies
+ if (debug) print("emitting code @ " + binary.length);
+ binary.emit_section(kDeclCode, section => {
+ section.emit_varint(wasm.functions.length);
+ for (let func of wasm.functions) {
+ // Function body length will be patched later.
+ let local_decls = [];
+ let l = func.locals;
+ if (l != undefined) {
+ let local_decls_count = 0;
+ if (l.i32_count > 0) {
+ local_decls.push({count: l.i32_count, type: kAstI32});
}
- });
-
- // emit function bodies
- if (debug) print("emitting function bodies @ " + bytes.length);
- emit_section(bytes, kDeclFunctionBodies, function(bytes) {
- emit_varint(bytes, wasm.functions.length);
- for (func of wasm.functions) {
- // Function body length will be patched later.
- var local_decls = [];
- var l = func.locals;
- if (l != undefined) {
- var local_decls_count = 0;
- if (l.i32_count > 0) {
- local_decls.push({count: l.i32_count, type: kAstI32});
- }
- if (l.i64_count > 0) {
- local_decls.push({count: l.i64_count, type: kAstI64});
- }
- if (l.f32_count > 0) {
- local_decls.push({count: l.f32_count, type: kAstF32});
- }
- if (l.f64_count > 0) {
- local_decls.push({count: l.f64_count, type: kAstF64});
- }
- }
- var header = new Array();
-
- emit_varint(header, local_decls.length);
- for (decl of local_decls) {
- emit_varint(header, decl.count);
- emit_u8(header, decl.type);
- }
-
- emit_varint(bytes, header.length + func.body.length);
- emit_bytes(bytes, header);
- emit_bytes(bytes, func.body);
+ if (l.i64_count > 0) {
+ local_decls.push({count: l.i64_count, type: kAstI64});
}
- });
- }
-
- // emit function names
- if (has_names) {
- if (debug) print("emitting names @ " + bytes.length);
- emit_section(bytes, kDeclNames, function(bytes) {
- emit_varint(bytes, wasm.functions.length);
- for (func of wasm.functions) {
- var name = func.name == undefined ? "" : func.name;
- emit_string(bytes, name);
- emit_u8(bytes, 0); // local names count == 0
+ if (l.f32_count > 0) {
+ local_decls.push({count: l.f32_count, type: kAstF32});
}
- });
- }
-
- // Add start function section.
- if (wasm.start_index != undefined) {
- if (debug) print("emitting start function @ " + bytes.length);
- emit_section(bytes, kDeclStartFunction, function(bytes) {
- emit_varint(bytes, wasm.start_index);
- });
- }
-
- if (wasm.function_table.length > 0) {
- if (debug) print("emitting function table @ " + bytes.length);
- emit_section(bytes, kDeclFunctionTable, function(bytes) {
- emit_varint(bytes, wasm.function_table.length);
- for (index of wasm.function_table) {
- emit_varint(bytes, index);
+ if (l.f64_count > 0) {
+ local_decls.push({count: l.f64_count, type: kAstF64});
}
- });
+ }
+
+ let header = new Binary;
+ header.emit_varint(local_decls.length);
+ for (let decl of local_decls) {
+ header.emit_varint(decl.count);
+ header.emit_u8(decl.type);
+ }
+
+ section.emit_varint(header.length + func.body.length);
+ section.emit_bytes(header);
+ section.emit_bytes(func.body);
+ }
+ });
}
- if (exports > 0) {
- if (debug) print("emitting exports @ " + bytes.length);
- emit_section(bytes, kDeclExportTable, function(bytes) {
- emit_varint(bytes, exports);
- for (func of wasm.functions) {
- for (exp of func.exports) {
- emit_varint(bytes, func.index);
- emit_string(bytes, exp);
- }
- }
- });
+ // Add data segments.
+ if (wasm.segments.length > 0) {
+ if (debug) print("emitting data segments @ " + binary.length);
+ binary.emit_section(kDeclData, section => {
+ section.emit_varint(wasm.segments.length);
+ for (let seg of wasm.segments) {
+ section.emit_varint(seg.addr);
+ section.emit_varint(seg.data.length);
+ section.emit_bytes(seg.data);
+ }
+ });
}
- if (wasm.data_segments.length > 0) {
- if (debug) print("emitting data segments @ " + bytes.length);
- emit_section(bytes, kDeclDataSegments, function(bytes) {
- emit_varint(bytes, wasm.data_segments.length);
- for (seg of wasm.data_segments) {
- emit_varint(bytes, seg.addr);
- emit_varint(bytes, seg.data.length);
- emit_bytes(bytes, seg.data);
- }
- });
+ // Add any explicitly added sections
+ for (let exp of wasm.explicit) {
+ if (debug) print("emitting explicit @ " + binary.length);
+ binary.emit_bytes(exp);
}
- // Emit any explicitly added sections
- for (exp of wasm.explicit) {
- if (debug) print("emitting explicit @ " + bytes.length);
- emit_bytes(bytes, exp);
+ // Add function names.
+ if (has_names) {
+ if (debug) print("emitting names @ " + binary.length);
+ binary.emit_section(kDeclNames, section => {
+ section.emit_varint(wasm.functions.length);
+ for (let func of wasm.functions) {
+ var name = func.name == undefined ? "" : func.name;
+ section.emit_string(name);
+ section.emit_u8(0); // local names count == 0
+ }
+ });
}
- // End the module.
- if (debug) print("emitting end @ " + bytes.length);
- emit_section(bytes, kDeclEnd, function(bytes) {});
-
- return bytes;
-}
+ return binary;
+ }
-WasmModuleBuilder.prototype.toBuffer = function(debug) {
- var bytes = this.toArray(debug);
- var buffer = new ArrayBuffer(bytes.length);
- var view = new Uint8Array(buffer);
- for (var i = 0; i < bytes.length; i++) {
- var val = bytes[i];
- if ((typeof val) == "string") val = val.charCodeAt(0);
- view[i] = val | 0;
+ toBuffer(debug) {
+ let bytes = this.toArray(debug);
+ let buffer = new ArrayBuffer(bytes.length);
+ let view = new Uint8Array(buffer);
+ for (let i = 0; i < bytes.length; i++) {
+ let val = bytes[i];
+ if ((typeof val) == "string") val = val.charCodeAt(0);
+ view[i] = val | 0;
}
return buffer;
-}
+ }
-WasmModuleBuilder.prototype.instantiate = function(ffi, memory) {
- var buffer = this.toBuffer();
- if (memory != undefined) {
- return Wasm.instantiateModule(buffer, ffi, memory);
- } else {
- return Wasm.instantiateModule(buffer, ffi);
- }
+ instantiate(...args) {
+ let module = new WebAssembly.Module(this.toBuffer());
+ let instance = new WebAssembly.Instance(module, ...args);
+ return instance;
+ }
}
diff --git a/deps/v8/test/mjsunit/wasm/wasm-object-api.js b/deps/v8/test/mjsunit/wasm/wasm-object-api.js
index 2f25c66fce..4e1df8cf14 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-object-api.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-object-api.js
@@ -9,4 +9,9 @@ assertFalse(undefined == Wasm);
assertEquals("function", typeof Wasm.verifyModule);
assertEquals("function", typeof Wasm.verifyFunction);
assertEquals("function", typeof Wasm.instantiateModule);
-assertEquals("function", typeof Wasm.instantiateModuleFromAsm);
+assertFalse(undefined == Wasm.experimentalVersion);
+
+assertEquals('object', typeof WebAssembly);
+assertEquals('function', typeof WebAssembly.Module);
+assertEquals('function', typeof WebAssembly.Instance);
+assertEquals('function', typeof WebAssembly.compile);
diff --git a/deps/v8/test/mozilla/mozilla.gyp b/deps/v8/test/mozilla/mozilla.gyp
index 1202d28c0f..0327dd8dab 100644
--- a/deps/v8/test/mozilla/mozilla.gyp
+++ b/deps/v8/test/mozilla/mozilla.gyp
@@ -13,8 +13,8 @@
'../../src/d8.gyp:d8_run',
],
'includes': [
- '../../build/features.gypi',
- '../../build/isolate.gypi',
+ '../../gypfiles/features.gypi',
+ '../../gypfiles/isolate.gypi',
],
'sources': [
'mozilla.isolate',
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index 0f7d4aa1ba..d4eebeec54 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -59,11 +59,14 @@
# TODO(turbofan): Causes timeouts since top-level code is optimized.
'ecma_3/Statements/regress-324650': [PASS, NO_VARIANTS],
+ 'ecma_3/Statements/regress-444979': [PASS, NO_VARIANTS],
'ecma_3/Statements/regress-74474-002': [PASS, NO_VARIANTS],
'ecma_3/Statements/regress-74474-003': [PASS, NO_VARIANTS],
'js1_5/Regress/regress-111557': [PASS, NO_VARIANTS],
'js1_5/Regress/regress-155081': [PASS, NO_VARIANTS],
'js1_5/Regress/regress-155081-2': [PASS, NO_VARIANTS],
+ 'js1_5/Regress/regress-159334': [PASS, NO_VARIANTS],
+ 'js1_5/Regress/regress-321971': [PASS, NO_VARIANTS],
'js1_5/Regress/regress-451322': [PASS, NO_VARIANTS],
# TODO(turbofan): Large switch statements crash.
@@ -187,7 +190,7 @@
'ecma_3/RegExp/regress-209067': [PASS, ['mode == debug', FAIL]],
'js1_5/GC/regress-278725': [PASS, ['mode == debug', FAIL]],
# http://b/issue?id=1206983
- 'js1_5/Regress/regress-367561-03': [PASS, ['mode == debug', FAIL]],
+ 'js1_5/Regress/regress-367561-03': [PASS, ['mode == debug', FAIL], NO_VARIANTS],
'ecma/Date/15.9.5.10-2': [PASS, FAIL, ['mode == debug', TIMEOUT, NO_VARIANTS]],
# These tests create two Date objects just after each other and
@@ -239,7 +242,7 @@
'js1_5/Array/regress-99120-01': [PASS, FAIL, NO_VARIANTS],
'js1_5/Array/regress-99120-02': [PASS, FAIL],
'js1_5/Regress/regress-347306-01': [PASS, FAIL],
- 'js1_5/Regress/regress-416628': [PASS, FAIL, ['mode == debug', TIMEOUT, NO_VARIANTS]],
+ 'js1_5/Regress/regress-416628': [PASS, FAIL, ['mode == debug', TIMEOUT], NO_VARIANTS],
# The following two tests assume that daylight savings time starts first
diff --git a/deps/v8/test/mozilla/testcfg.py b/deps/v8/test/mozilla/testcfg.py
index 0eb32c87bd..d721a8c95f 100644
--- a/deps/v8/test/mozilla/testcfg.py
+++ b/deps/v8/test/mozilla/testcfg.py
@@ -27,8 +27,6 @@
import os
-import shutil
-import subprocess
from testrunner.local import testsuite
from testrunner.objects import testcase
@@ -112,21 +110,6 @@ class MozillaTestSuite(testsuite.TestSuite):
return True
return "FAILED!" in testcase.output.stdout
- def DownloadData(self):
- print "Mozilla download is deprecated. It's part of DEPS."
-
- # Clean up old directories and archive files.
- directory_old_name = os.path.join(self.root, "data.old")
- if os.path.exists(directory_old_name):
- shutil.rmtree(directory_old_name)
-
- archive_files = [f for f in os.listdir(self.root)
- if f.startswith("downloaded_")]
- if len(archive_files) > 0:
- print "Clobber outdated test archives ..."
- for f in archive_files:
- os.remove(os.path.join(self.root, f))
-
def GetSuite(name, root):
return MozillaTestSuite(name, root)
diff --git a/deps/v8/test/optimize_for_size.gyp b/deps/v8/test/optimize_for_size.gyp
index ac40ba8499..047e3d8acb 100644
--- a/deps/v8/test/optimize_for_size.gyp
+++ b/deps/v8/test/optimize_for_size.gyp
@@ -16,8 +16,8 @@
'webkit/webkit.gyp:webkit_run',
],
'includes': [
- '../build/features.gypi',
- '../build/isolate.gypi',
+ '../gypfiles/features.gypi',
+ '../gypfiles/isolate.gypi',
],
'sources': [
'optimize_for_size.isolate',
diff --git a/deps/v8/test/perf.gyp b/deps/v8/test/perf.gyp
index ff846068f4..4f024f774f 100644
--- a/deps/v8/test/perf.gyp
+++ b/deps/v8/test/perf.gyp
@@ -14,8 +14,8 @@
'../src/d8.gyp:d8_run',
],
'includes': [
- '../build/features.gypi',
- '../build/isolate.gypi',
+ '../gypfiles/features.gypi',
+ '../gypfiles/isolate.gypi',
],
'sources': [
'perf.isolate',
diff --git a/deps/v8/test/preparser/preparser.gyp b/deps/v8/test/preparser/preparser.gyp
index cb763d6dc5..8e944ed009 100644
--- a/deps/v8/test/preparser/preparser.gyp
+++ b/deps/v8/test/preparser/preparser.gyp
@@ -13,8 +13,8 @@
'../../src/d8.gyp:d8_run',
],
'includes': [
- '../../build/features.gypi',
- '../../build/isolate.gypi',
+ '../../gypfiles/features.gypi',
+ '../../gypfiles/isolate.gypi',
],
'sources': [
'preparser.isolate',
diff --git a/deps/v8/test/simdjs/simdjs.gyp b/deps/v8/test/simdjs/simdjs.gyp
index df0aa5e972..c0c24dc325 100644
--- a/deps/v8/test/simdjs/simdjs.gyp
+++ b/deps/v8/test/simdjs/simdjs.gyp
@@ -13,8 +13,8 @@
'../../src/d8.gyp:d8_run',
],
'includes': [
- '../../build/features.gypi',
- '../../build/isolate.gypi',
+ '../../gypfiles/features.gypi',
+ '../../gypfiles/isolate.gypi',
],
'sources': [
'simdjs.isolate',
diff --git a/deps/v8/test/simdjs/testcfg.py b/deps/v8/test/simdjs/testcfg.py
index c055d5a3d3..d22368b0a6 100644
--- a/deps/v8/test/simdjs/testcfg.py
+++ b/deps/v8/test/simdjs/testcfg.py
@@ -4,7 +4,6 @@
import os
-import shutil
import sys
from testrunner.local import testsuite
@@ -53,21 +52,6 @@ class SimdJsTestSuite(testsuite.TestSuite):
return True
return "FAILED!" in testcase.output.stdout
- def DownloadData(self):
- print "SimdJs download is deprecated. It's part of DEPS."
-
- # Clean up old directories and archive files.
- directory_old_name = os.path.join(self.root, "data.old")
- if os.path.exists(directory_old_name):
- shutil.rmtree(directory_old_name)
-
- archive_files = [f for f in os.listdir(self.root)
- if f.startswith("ecmascript_simd-")]
- if len(archive_files) > 0:
- print "Clobber outdated test archives ..."
- for f in archive_files:
- os.remove(os.path.join(self.root, f))
-
def GetSuite(name, root):
return SimdJsTestSuite(name, root)
diff --git a/deps/v8/test/test262/BUILD.gn b/deps/v8/test/test262/BUILD.gn
new file mode 100644
index 0000000000..9aaa7770c1
--- /dev/null
+++ b/deps/v8/test/test262/BUILD.gn
@@ -0,0 +1,34 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("../../gni/isolate.gni")
+
+if (v8_test_isolation_mode != "noop") {
+ action("archive_test262") {
+ visibility = [ ":*" ]
+
+ script = "archive.py"
+
+ inputs = [
+ "list.py",
+ ]
+
+ sources = exec_script("list.py", [], "list lines")
+
+ outputs = [
+ "$target_gen_dir/test262_archiving.stamp",
+ ]
+
+ args = rebase_path(outputs, root_build_dir)
+ }
+}
+
+v8_isolate_run("test262") {
+ deps = [
+ ":archive_test262",
+ "../..:d8_run",
+ ]
+
+ isolate = "test262.isolate"
+}
diff --git a/deps/v8/test/test262/archive.py b/deps/v8/test/test262/archive.py
index c265b32421..894853e208 100755
--- a/deps/v8/test/test262/archive.py
+++ b/deps/v8/test/test262/archive.py
@@ -4,7 +4,13 @@
# found in the LICENSE file.
import os
+import sys
import tarfile
+import time
+
+# In GN we expect the path to a stamp file as an argument.
+if len(sys.argv) == 2:
+ STAMP_FILE = os.path.abspath(sys.argv[1])
os.chdir(os.path.dirname(os.path.abspath(__file__)))
@@ -13,7 +19,8 @@ tarfile.grp = None
tarfile.pwd = None
def filter_git(tar_info):
- if tar_info.name.startswith(os.path.join('data', '.git')):
+ if tar_info.name.startswith(os.path.join('data', '.git')) or \
+ tar_info.name.startswith(os.path.join('harness', '.git')):
return None
else:
tar_info.uname = tar_info.gname = "test262"
@@ -21,3 +28,11 @@ def filter_git(tar_info):
with tarfile.open('data.tar', 'w') as tar:
tar.add('data', filter=filter_git)
+ tar.add('harness', filter=filter_git)
+
+# Workaround for GN. We can't specify the tarfile as output because it's
+# not in the product directory. Therefore we track running of this script
+# with an extra stamp file in the product directory.
+if len(sys.argv) == 2:
+ with open(STAMP_FILE, 'w') as f:
+ f.write(str(time.time()))
diff --git a/deps/v8/test/test262/detachArrayBuffer.js b/deps/v8/test/test262/detachArrayBuffer.js
new file mode 100644
index 0000000000..adfece7edb
--- /dev/null
+++ b/deps/v8/test/test262/detachArrayBuffer.js
@@ -0,0 +1,7 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function $DETACHBUFFER(buffer) {
+ %ArrayBufferNeuter(buffer);
+}
diff --git a/deps/v8/test/test262/list.py b/deps/v8/test/test262/list.py
index 69ca62cf20..0e82cb59ad 100755
--- a/deps/v8/test/test262/list.py
+++ b/deps/v8/test/test262/list.py
@@ -5,10 +5,11 @@
import os
import tarfile
+from itertools import chain
os.chdir(os.path.dirname(os.path.abspath(__file__)))
-for root, dirs, files in os.walk("data"):
+for root, dirs, files in chain(os.walk("data"), os.walk("harness")):
dirs[:] = [d for d in dirs if not d.endswith('.git')]
for name in files:
# These names are for gyp, which expects slashes on all platforms.
diff --git a/deps/v8/test/test262/test262.gyp b/deps/v8/test/test262/test262.gyp
index 5d79adda35..2bdc6cefe5 100644
--- a/deps/v8/test/test262/test262.gyp
+++ b/deps/v8/test/test262/test262.gyp
@@ -13,8 +13,8 @@
'../../src/d8.gyp:d8_run',
],
'includes': [
- '../../build/features.gypi',
- '../../build/isolate.gypi',
+ '../../gypfiles/features.gypi',
+ '../../gypfiles/isolate.gypi',
],
'sources': [
'test262.isolate',
diff --git a/deps/v8/test/test262/test262.isolate b/deps/v8/test/test262/test262.isolate
index 0ac045af17..4e1419030e 100644
--- a/deps/v8/test/test262/test262.isolate
+++ b/deps/v8/test/test262/test262.isolate
@@ -5,6 +5,7 @@
'variables': {
'files': [
'data.tar',
+ 'detachArrayBuffer.js',
'harness-adapt.js',
'test262.status',
'testcfg.py',
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index b1bdd0adb0..80bb9d109e 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -71,6 +71,10 @@
'language/expressions/assignment/S11.13.1_A6*': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=4709
+ 'built-ins/Promise/reject-function-name': [FAIL],
+ 'built-ins/Promise/resolve-function-name': [FAIL],
+ 'built-ins/Promise/all/resolve-element-function-name': [FAIL],
+ 'built-ins/Promise/executor-function-name': [FAIL],
'built-ins/Proxy/revocable/revocation-function-name': [FAIL],
'language/expressions/assignment/fn-name-lhs-cover': [FAIL],
'language/expressions/assignment/fn-name-lhs-member': [FAIL],
@@ -81,11 +85,6 @@
'intl402/DateTimeFormat/prototype/format/format-function-name': [FAIL],
'intl402/Collator/prototype/compare/compare-function-name': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=4778
- 'intl402/Collator/prototype/compare/name': [FAIL],
- 'intl402/DateTimeFormat/prototype/format/name': [FAIL],
- 'intl402/NumberFormat/prototype/format/name': [FAIL],
-
# https://code.google.com/p/v8/issues/detail?id=4251
'language/expressions/postfix-increment/S11.3.1_A5_T1': [FAIL],
'language/expressions/postfix-increment/S11.3.1_A5_T2': [FAIL],
@@ -100,41 +99,37 @@
'language/asi/S7.9_A5.7_T1': [PASS, FAIL_OK],
###### BEGIN REGEXP SUBCLASSING SECTION ######
- # Spec change in progress https://github.com/tc39/ecma262/pull/494
- # RegExpBuiltinMatch reads flags from [[OriginalFlags]]
- 'built-ins/RegExp/prototype/Symbol.match/builtin-coerce-sticky': [FAIL],
- 'built-ins/RegExp/prototype/Symbol.match/builtin-get-global-err': [FAIL],
- 'built-ins/RegExp/prototype/Symbol.match/builtin-get-sticky-err': [FAIL],
- 'built-ins/RegExp/prototype/Symbol.match/builtin-success-g-set-lastindex': [FAIL],
- 'built-ins/RegExp/prototype/Symbol.match/builtin-success-g-set-lastindex-err': [FAIL],
- 'built-ins/RegExp/prototype/Symbol.match/coerce-sticky': [FAIL],
- 'built-ins/RegExp/prototype/Symbol.match/get-sticky-err': [FAIL],
- 'built-ins/RegExp/prototype/Symbol.replace/coerce-global': [FAIL],
- 'built-ins/RegExp/prototype/Symbol.replace/coerce-unicode': [FAIL],
- 'built-ins/RegExp/prototype/Symbol.replace/get-sticky-coerce': [FAIL],
- 'built-ins/RegExp/prototype/Symbol.replace/get-sticky-err': [SKIP],
- 'built-ins/RegExp/prototype/Symbol.search/get-sticky-coerce': [FAIL],
- 'built-ins/RegExp/prototype/Symbol.search/get-sticky-err': [FAIL],
- 'built-ins/RegExp/prototype/exec/get-sticky-coerce': [FAIL],
- 'built-ins/RegExp/prototype/exec/get-sticky-err': [FAIL],
- 'built-ins/RegExp/prototype/test/get-sticky-err': [FAIL],
-
- # Missing lastIndex support
- 'built-ins/RegExp/prototype/Symbol.split/str-result-coerce-length-err': [FAIL],
-
# Times out
- 'built-ins/RegExp/prototype/Symbol.split/str-coerce-lastindex': [SKIP],
'built-ins/RegExp/prototype/Symbol.match/coerce-global': [SKIP],
- 'built-ins/RegExp/prototype/Symbol.match/builtin-coerce-global': [SKIP],
# Sticky support busted
'built-ins/RegExp/prototype/Symbol.replace/y-init-lastindex': [FAIL],
'built-ins/RegExp/prototype/Symbol.replace/y-set-lastindex': [FAIL],
+ # https://code.google.com/p/v8/issues/detail?id=4504
+ # https://bugs.chromium.org/p/chromium/issues/detail?id=624318
+ 'built-ins/RegExp/prototype/Symbol.match/builtin-failure-set-lastindex-err': [PASS, FAIL],
+ 'built-ins/RegExp/prototype/Symbol.match/builtin-failure-y-set-lastindex-err': [PASS, FAIL],
+ 'built-ins/RegExp/prototype/Symbol.match/builtin-success-g-set-lastindex-err': [SKIP],
+ 'built-ins/RegExp/prototype/Symbol.match/builtin-success-y-set-lastindex-err': [PASS, FAIL],
+ 'built-ins/RegExp/prototype/Symbol.match/g-init-lastindex-err': [PASS, FAIL],
+ 'built-ins/RegExp/prototype/Symbol.match/g-match-empty-set-lastindex-err': [PASS, FAIL],
+ 'built-ins/RegExp/prototype/Symbol.match/y-fail-lastindex-no-write': [PASS, FAIL],
+ 'built-ins/RegExp/prototype/Symbol.replace/g-init-lastindex-err': [PASS, FAIL],
+ 'built-ins/RegExp/prototype/Symbol.replace/y-fail-lastindex-no-write': [PASS, FAIL],
+ 'built-ins/RegExp/prototype/Symbol.search/set-lastindex-init-err': [PASS, FAIL],
+ 'built-ins/RegExp/prototype/Symbol.search/set-lastindex-restore-err': [PASS, FAIL],
+ 'built-ins/RegExp/prototype/exec/y-fail-lastindex-no-write': [PASS, FAIL],
+ 'built-ins/RegExp/prototype/test/y-fail-lastindex-no-write': [PASS, FAIL],
+
# SKIP rather than FAIL, as the test checks for an exception which
# happens to be thrown for some other reason.
'built-ins/RegExp/prototype/Symbol.split/str-result-get-length-err': [SKIP],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5123
+ 'built-ins/RegExp/prototype/Symbol.replace/coerce-global': [FAIL],
+ 'built-ins/RegExp/prototype/Symbol.replace/coerce-unicode': [FAIL],
+
###### END REGEXP SUBCLASSING SECTION ######
# https://code.google.com/p/v8/issues/detail?id=4360
@@ -142,71 +137,291 @@
'intl402/DateTimeFormat/12.1.1_1': [FAIL],
'intl402/NumberFormat/11.1.1_1': [FAIL],
- # https://code.google.com/p/v8/issues/detail?id=4476
- 'built-ins/String/prototype/toLocaleLowerCase/special_casing_conditional': [FAIL],
- 'built-ins/String/prototype/toLocaleLowerCase/supplementary_plane': [FAIL],
- 'built-ins/String/prototype/toLowerCase/special_casing_conditional': [FAIL],
- 'built-ins/String/prototype/toLowerCase/supplementary_plane': [FAIL],
- 'built-ins/String/prototype/toLocaleUpperCase/supplementary_plane': [FAIL],
- 'built-ins/String/prototype/toUpperCase/supplementary_plane': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4477
- 'intl402/String/prototype/toLocaleLowerCase/special_casing_Azeri': [FAIL],
- 'intl402/String/prototype/toLocaleLowerCase/special_casing_Lithuanian': [FAIL],
- 'intl402/String/prototype/toLocaleLowerCase/special_casing_Turkish': [FAIL],
- 'intl402/String/prototype/toLocaleUpperCase/special_casing_Azeri': [FAIL],
- 'intl402/String/prototype/toLocaleUpperCase/special_casing_Lithuanian': [FAIL],
- 'intl402/String/prototype/toLocaleUpperCase/special_casing_Turkish': [FAIL],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=4784
+ 'built-ins/TypedArray/prototype/set/typedarray-arg-negative-integer-offset-throws': [FAIL],
+ 'built-ins/TypedArray/prototype/set/array-arg-negative-integer-offset-throws': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=4901
+ 'built-ins/TypedArrays/internals/DefineOwnProperty/key-is-greater-than-last-index': [FAIL],
+ 'built-ins/TypedArrays/internals/DefineOwnProperty/key-is-lower-than-zero': [FAIL],
+ 'built-ins/TypedArrays/internals/DefineOwnProperty/key-is-minus-zero': [FAIL],
+ 'built-ins/TypedArrays/internals/DefineOwnProperty/key-is-not-integer': [FAIL],
+ 'built-ins/TypedArrays/internals/DefineOwnProperty/key-is-numericindex-desc-not-writable': [FAIL],
+ 'built-ins/TypedArrays/internals/Set/key-is-minus-zero': [FAIL],
+ 'built-ins/TypedArrays/internals/Set/key-is-not-integer': [FAIL],
+ 'built-ins/TypedArrays/internals/Set/key-is-out-of-bounds': [FAIL],
+ 'built-ins/TypedArrays/internals/Set/tonumber-value-throws': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=4895
+ 'built-ins/TypedArrays/internals/HasProperty/detached-buffer': [FAIL],
+ 'built-ins/TypedArrays/internals/Set/detached-buffer': [FAIL],
+ 'built-ins/TypedArrays/internals/GetOwnProperty/detached-buffer': [FAIL],
+ 'built-ins/TypedArrays/internals/Get/detached-buffer': [FAIL],
+ 'built-ins/TypedArrays/internals/DefineOwnProperty/detached-buffer': [FAIL],
+ # Some TypedArray methods throw due to the same bug, from Get
+ 'built-ins/TypedArray/prototype/every/callbackfn-detachbuffer': [FAIL],
+ 'built-ins/TypedArray/prototype/filter/callbackfn-detachbuffer': [FAIL],
+ 'built-ins/TypedArray/prototype/find/predicate-may-detach-buffer': [FAIL],
+ 'built-ins/TypedArray/prototype/findIndex/predicate-may-detach-buffer': [FAIL],
+ 'built-ins/TypedArray/prototype/forEach/callbackfn-detachbuffer': [FAIL],
+ 'built-ins/TypedArray/prototype/map/callbackfn-detachbuffer': [FAIL],
+ 'built-ins/TypedArray/prototype/reduce/callbackfn-detachbuffer': [FAIL],
+ 'built-ins/TypedArray/prototype/reduceRight/callbackfn-detachbuffer': [FAIL],
+ 'built-ins/TypedArray/prototype/set/array-arg-targetbuffer-detached-on-get-src-value-throws': [FAIL],
+ 'built-ins/TypedArray/prototype/set/array-arg-targetbuffer-detached-on-tointeger-offset-throws': [FAIL],
+ 'built-ins/TypedArray/prototype/set/array-arg-targetbuffer-detached-throws': [FAIL],
+ 'built-ins/TypedArray/prototype/set/typedarray-arg-srcbuffer-detached-during-tointeger-offset-throws': [FAIL],
+ 'built-ins/TypedArray/prototype/set/typedarray-arg-targetbuffer-detached-during-tointeger-offset-throws': [FAIL],
+ 'built-ins/TypedArray/prototype/slice/detached-buffer-custom-ctor-other-targettype': [FAIL],
+ 'built-ins/TypedArray/prototype/slice/detached-buffer-custom-ctor-same-targettype': [FAIL],
+ 'built-ins/TypedArray/prototype/slice/detached-buffer-get-ctor': [FAIL],
+ 'built-ins/TypedArray/prototype/slice/detached-buffer-speciesctor-get-species-custom-ctor-throws': [FAIL],
+ 'built-ins/TypedArray/prototype/some/callbackfn-detachbuffer': [FAIL],
+ 'built-ins/TypedArray/prototype/sort/detached-buffer-comparefn': [FAIL],
+ # DataView functions should also throw on detached buffers
+ 'built-ins/DataView/prototype/getFloat32/detached-buffer': [FAIL],
+ 'built-ins/DataView/prototype/getFloat32/detached-buffer-before-outofrange-byteoffset': [FAIL],
+ 'built-ins/DataView/prototype/getFloat64/detached-buffer': [FAIL],
+ 'built-ins/DataView/prototype/getFloat64/detached-buffer-before-outofrange-byteoffset': [FAIL],
+ 'built-ins/DataView/prototype/getInt16/detached-buffer': [FAIL],
+ 'built-ins/DataView/prototype/getInt16/detached-buffer-before-outofrange-byteoffset': [FAIL],
+ 'built-ins/DataView/prototype/getInt32/detached-buffer': [FAIL],
+ 'built-ins/DataView/prototype/getInt32/detached-buffer-before-outofrange-byteoffset': [FAIL],
+ 'built-ins/DataView/prototype/getInt8/detached-buffer': [FAIL],
+ 'built-ins/DataView/prototype/getInt8/detached-buffer-before-outofrange-byteoffset': [FAIL],
+ 'built-ins/DataView/prototype/getUint16/detached-buffer': [FAIL],
+ 'built-ins/DataView/prototype/getUint16/detached-buffer-before-outofrange-byteoffset': [FAIL],
+ 'built-ins/DataView/prototype/getUint32/detached-buffer': [FAIL],
+ 'built-ins/DataView/prototype/getUint32/detached-buffer-before-outofrange-byteoffset': [FAIL],
+ 'built-ins/DataView/prototype/getUint8/detached-buffer': [FAIL],
+ 'built-ins/DataView/prototype/getUint8/detached-buffer-before-outofrange-byteoffset': [FAIL],
+ 'built-ins/DataView/prototype/setFloat32/detached-buffer': [FAIL],
+ 'built-ins/DataView/prototype/setFloat32/detached-buffer-before-outofrange-byteoffset': [FAIL],
+ 'built-ins/DataView/prototype/setFloat64/detached-buffer': [FAIL],
+ 'built-ins/DataView/prototype/setFloat64/detached-buffer-before-outofrange-byteoffset': [FAIL],
+ 'built-ins/DataView/prototype/setInt16/detached-buffer': [FAIL],
+ 'built-ins/DataView/prototype/setInt16/detached-buffer-before-outofrange-byteoffset': [FAIL],
+ 'built-ins/DataView/prototype/setInt32/detached-buffer': [FAIL],
+ 'built-ins/DataView/prototype/setInt32/detached-buffer-before-outofrange-byteoffset': [FAIL],
+ 'built-ins/DataView/prototype/setInt8/detached-buffer': [FAIL],
+ 'built-ins/DataView/prototype/setInt8/detached-buffer-before-outofrange-byteoffset': [FAIL],
+ 'built-ins/DataView/prototype/setUint16/detached-buffer': [FAIL],
+ 'built-ins/DataView/prototype/setUint16/detached-buffer-before-outofrange-byteoffset': [FAIL],
+ 'built-ins/DataView/prototype/setUint32/detached-buffer': [FAIL],
+ 'built-ins/DataView/prototype/setUint32/detached-buffer-before-outofrange-byteoffset': [FAIL],
+ 'built-ins/DataView/prototype/setUint8/detached-buffer': [FAIL],
+ 'built-ins/DataView/prototype/setUint8/detached-buffer-before-outofrange-byteoffset': [FAIL],
+ 'built-ins/DataView/prototype/byteLength/detached-buffer': [FAIL],
+ 'built-ins/DataView/prototype/byteOffset/detached-buffer': [FAIL],
+ 'built-ins/DataView/detached-buffer': [FAIL],
+ 'built-ins/ArrayBuffer/prototype/byteLength/detached-buffer': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=4648
+ 'built-ins/TypedArray/prototype/copyWithin/detached-buffer': [FAIL],
+ 'built-ins/TypedArray/prototype/entries/detached-buffer': [FAIL],
+ 'built-ins/TypedArray/prototype/every/detached-buffer': [FAIL],
+ 'built-ins/TypedArray/prototype/fill/detached-buffer': [FAIL],
+ 'built-ins/TypedArray/prototype/filter/detached-buffer': [FAIL],
+ 'built-ins/TypedArray/prototype/find/detached-buffer': [FAIL],
+ 'built-ins/TypedArray/prototype/findIndex/detached-buffer': [FAIL],
+ 'built-ins/TypedArray/prototype/forEach/detached-buffer': [FAIL],
+ 'built-ins/TypedArray/prototype/includes/detached-buffer': [FAIL],
+ 'built-ins/TypedArray/prototype/indexOf/detached-buffer': [FAIL],
+ 'built-ins/TypedArray/prototype/join/detached-buffer': [FAIL],
+ 'built-ins/TypedArray/prototype/keys/detached-buffer': [FAIL],
+ 'built-ins/TypedArray/prototype/lastIndexOf/detached-buffer': [FAIL],
+ 'built-ins/TypedArray/prototype/map/detached-buffer': [FAIL],
+ 'built-ins/TypedArray/prototype/reverse/detached-buffer': [FAIL],
+ 'built-ins/TypedArray/prototype/slice/detached-buffer': [FAIL],
+ 'built-ins/TypedArray/prototype/some/detached-buffer': [FAIL],
+ 'built-ins/TypedArray/prototype/sort/detached-buffer': [FAIL],
+ 'built-ins/TypedArray/prototype/subarray/detached-buffer': [FAIL],
+ 'built-ins/TypedArray/prototype/toLocaleString/detached-buffer': [FAIL],
+ 'built-ins/TypedArray/prototype/toString/detached-buffer': [FAIL],
+ 'built-ins/TypedArray/prototype/values/detached-buffer': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=4034
+ 'built-ins/ThrowTypeError/unique-per-realm-function-proto': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=4231
+ 'language/eval-code/direct/var-env-lower-lex-catch-non-strict': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=4951
+ 'language/expressions/assignment/dstr-array-elem-iter-rtrn-close': [FAIL],
+ 'language/expressions/assignment/dstr-array-elem-iter-rtrn-close-err': [FAIL],
+ 'language/expressions/assignment/dstr-array-elem-iter-rtrn-close-null': [FAIL],
+ 'language/expressions/assignment/dstr-array-elem-iter-thrw-close': [FAIL],
+ 'language/expressions/assignment/dstr-array-elem-iter-thrw-close-err': [FAIL],
+ 'language/expressions/assignment/dstr-array-elem-trlg-iter-list-rtrn-close': [FAIL],
+ 'language/expressions/assignment/dstr-array-elem-trlg-iter-list-rtrn-close-err': [FAIL],
+ 'language/expressions/assignment/dstr-array-elem-trlg-iter-list-rtrn-close-null': [FAIL],
+ 'language/expressions/assignment/dstr-array-elem-trlg-iter-list-thrw-close': [FAIL],
+ 'language/expressions/assignment/dstr-array-elem-trlg-iter-list-thrw-close-err': [FAIL],
+ 'language/expressions/assignment/dstr-array-elem-trlg-iter-rest-rtrn-close': [FAIL],
+ 'language/expressions/assignment/dstr-array-elem-trlg-iter-rest-rtrn-close-err': [FAIL],
+ 'language/expressions/assignment/dstr-array-elem-trlg-iter-rest-rtrn-close-null': [FAIL],
+ 'language/expressions/assignment/dstr-array-elem-trlg-iter-rest-thrw-close': [FAIL],
+ 'language/expressions/assignment/dstr-array-elem-trlg-iter-rest-thrw-close-err': [FAIL],
+ 'language/expressions/assignment/dstr-array-rest-iter-rtrn-close': [FAIL],
+ 'language/expressions/assignment/dstr-array-rest-iter-rtrn-close-err': [FAIL],
+ 'language/expressions/assignment/dstr-array-rest-iter-rtrn-close-null': [FAIL],
+ 'language/expressions/assignment/dstr-array-rest-iter-thrw-close': [FAIL],
+ 'language/expressions/assignment/dstr-array-rest-iter-thrw-close-err': [FAIL],
+ 'language/expressions/assignment/dstr-array-rest-lref-err': [FAIL],
+ 'language/statements/for-of/dstr-array-elem-iter-rtrn-close': [FAIL],
+ 'language/statements/for-of/dstr-array-elem-iter-rtrn-close-err': [FAIL],
+ 'language/statements/for-of/dstr-array-elem-iter-rtrn-close-null': [FAIL],
+ 'language/statements/for-of/dstr-array-elem-iter-thrw-close': [FAIL],
+ 'language/statements/for-of/dstr-array-elem-iter-thrw-close-err': [FAIL],
+ 'language/statements/for-of/dstr-array-elem-trlg-iter-list-rtrn-close': [FAIL],
+ 'language/statements/for-of/dstr-array-elem-trlg-iter-list-rtrn-close-err': [FAIL],
+ 'language/statements/for-of/dstr-array-elem-trlg-iter-list-rtrn-close-null': [FAIL],
+ 'language/statements/for-of/dstr-array-elem-trlg-iter-list-thrw-close': [FAIL],
+ 'language/statements/for-of/dstr-array-elem-trlg-iter-list-thrw-close-err': [FAIL],
+ 'language/statements/for-of/dstr-array-elem-trlg-iter-rest-rtrn-close': [FAIL],
+ 'language/statements/for-of/dstr-array-elem-trlg-iter-rest-rtrn-close-err': [FAIL],
+ 'language/statements/for-of/dstr-array-elem-trlg-iter-rest-rtrn-close-null': [FAIL],
+ 'language/statements/for-of/dstr-array-elem-trlg-iter-rest-thrw-close': [FAIL],
+ 'language/statements/for-of/dstr-array-elem-trlg-iter-rest-thrw-close-err': [FAIL],
+ 'language/statements/for-of/dstr-array-rest-iter-rtrn-close': [FAIL],
+ 'language/statements/for-of/dstr-array-rest-iter-rtrn-close-err': [FAIL],
+ 'language/statements/for-of/dstr-array-rest-iter-rtrn-close-null': [FAIL],
+ 'language/statements/for-of/dstr-array-rest-iter-thrw-close': [FAIL],
+ 'language/statements/for-of/dstr-array-rest-iter-thrw-close-err': [FAIL],
+ 'language/statements/for-of/dstr-array-rest-lref-err': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=896
+ 'language/literals/regexp/early-err-pattern': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=4628
- 'language/eval-code/non-definable-function-with-variable': [FAIL],
- 'language/eval-code/non-definable-function-with-function': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=4592
- 'built-ins/ArrayBuffer/length-is-absent': [FAIL],
- 'built-ins/ArrayBuffer/length-is-not-number': [FAIL],
- 'built-ins/ArrayBuffer/positive-integer-length': [FAIL],
- 'language/statements/class/subclass/builtin-objects/ArrayBuffer/regular-subclassing': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=4633
- 'built-ins/Promise/reject-function-name': [FAIL],
- 'built-ins/Promise/resolve-function-name': [FAIL],
- 'built-ins/Promise/all/resolve-element-function-name': [FAIL],
- 'built-ins/Promise/executor-function-name': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=4634
- 'built-ins/DataView/prototype/setFloat64/index-check-before-value-conversion': [FAIL],
- 'built-ins/DataView/prototype/setFloat32/index-check-before-value-conversion': [FAIL],
- 'built-ins/DataView/prototype/setInt16/index-check-before-value-conversion': [FAIL],
- 'built-ins/DataView/prototype/setInt32/index-check-before-value-conversion': [FAIL],
- 'built-ins/DataView/prototype/setUint16/index-check-before-value-conversion': [FAIL],
- 'built-ins/DataView/prototype/setUint32/index-check-before-value-conversion': [FAIL],
- 'built-ins/DataView/prototype/setUint8/index-check-before-value-conversion': [FAIL],
- 'built-ins/DataView/prototype/setInt8/index-check-before-value-conversion': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=4706
- 'language/statements/class/subclass/builtin-objects/NativeError/EvalError-message': [FAIL],
- 'language/statements/class/subclass/builtin-objects/NativeError/RangeError-message': [FAIL],
- 'language/statements/class/subclass/builtin-objects/NativeError/ReferenceError-message': [FAIL],
- 'language/statements/class/subclass/builtin-objects/NativeError/SyntaxError-message': [FAIL],
- 'language/statements/class/subclass/builtin-objects/NativeError/TypeError-message': [FAIL],
- 'language/statements/class/subclass/builtin-objects/NativeError/URIError-message': [FAIL],
- 'language/statements/class/subclass/builtin-objects/Error/message-property-assignment': [FAIL],
-
- # https://code.google.com/p/chromium/issues/detail?id=581577
- 'built-ins/RegExp/prototype/source/15.10.7.1-1': [FAIL],
- 'built-ins/RegExp/prototype/global/15.10.7.2-1': [FAIL],
- 'built-ins/RegExp/prototype/ignoreCase/15.10.7.3-1': [FAIL],
- 'built-ins/RegExp/prototype/multiline/15.10.7.4-1': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=4727
- 'built-ins/TypedArrays/length-arg-is-undefined-throws': [FAIL],
- 'built-ins/TypedArrays/length-arg-is-symbol-throws': [FAIL],
- 'built-ins/TypedArrays/length-arg-is-float-throws-rangeerror': [FAIL],
- 'built-ins/TypedArrays/length-arg-is-nan-throws-rangeerror': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=4784
- 'built-ins/TypedArrays/buffer-arg-defined-negative-length': [FAIL],
+ 'language/eval-code/direct/non-definable-function-with-function': [FAIL],
+ 'language/eval-code/direct/non-definable-function-with-variable': [FAIL],
+ 'language/eval-code/indirect/non-definable-function-with-function': [FAIL],
+ 'language/eval-code/indirect/non-definable-function-with-variable': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=4124
+ 'built-ins/Simd/*': [SKIP],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=4958
+ 'built-ins/Function/prototype/toString/Function': [FAIL],
+ 'built-ins/Function/prototype/toString/GeneratorFunction': [FAIL],
+ 'built-ins/Function/prototype/toString/function-declaration': [FAIL],
+ 'built-ins/Function/prototype/toString/function-declaration-non-simple-parameter-list': [FAIL],
+ 'built-ins/Function/prototype/toString/function-expression': [FAIL],
+ 'built-ins/Function/prototype/toString/generator-function-declaration': [FAIL],
+ 'built-ins/Function/prototype/toString/generator-function-expression': [FAIL],
+ 'built-ins/Function/prototype/toString/generator-method': [FAIL],
+ 'built-ins/Function/prototype/toString/getter-class': [FAIL],
+ 'built-ins/Function/prototype/toString/getter-class-static': [FAIL],
+ 'built-ins/Function/prototype/toString/getter-object': [FAIL],
+ 'built-ins/Function/prototype/toString/line-terminator-normalisation-CR': [FAIL],
+ 'built-ins/Function/prototype/toString/line-terminator-normalisation-CR-LF': [FAIL],
+ 'built-ins/Function/prototype/toString/line-terminator-normalisation-LF': [FAIL],
+ 'built-ins/Function/prototype/toString/method-class': [FAIL],
+ 'built-ins/Function/prototype/toString/method-class-static': [FAIL],
+ 'built-ins/Function/prototype/toString/method-computed-property-name': [FAIL],
+ 'built-ins/Function/prototype/toString/method-object': [FAIL],
+ 'built-ins/Function/prototype/toString/setter-class': [FAIL],
+ 'built-ins/Function/prototype/toString/setter-class-static': [FAIL],
+ 'built-ins/Function/prototype/toString/setter-object': [FAIL],
+ 'built-ins/Function/prototype/toString/unicode': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=1569
+ 'language/eval-code/direct/export': [SKIP],
+ 'language/eval-code/direct/import': [SKIP],
+ 'language/eval-code/indirect/export': [SKIP],
+ 'language/eval-code/indirect/import': [SKIP],
+ 'language/module-code/*': [SKIP],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5012
+ # http://bugs.icu-project.org/trac/ticket/12671
+ 'intl402/Intl/getCanonicalLocales/weird-cases': [FAIL],
+ # https://github.com/tc39/test262/issues/743
+ 'intl402/Intl/getCanonicalLocales/main': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5115
+ 'language/statements/class/subclass/class-definition-null-proto-missing-return-override': [FAIL],
+ 'language/statements/class/subclass/class-definition-null-proto-this': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5116
+ 'built-ins/Object/internals/DefineOwnProperty/nan-equivalence': [PASS, FAIL],
+ 'built-ins/TypedArray/prototype/fill/fill-values-conversion-operations-consistent-nan': [PASS, FAIL],
+ 'built-ins/TypedArrays/internals/DefineOwnProperty/conversion-operation-consistent-nan': [PASS, FAIL],
+ 'built-ins/TypedArrays/internals/Set/conversion-operation-consistent-nan': [PASS, FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5070
+ 'annexB/built-ins/Object/prototype/__defineGetter__/define-non-configurable': [FAIL],
+ 'annexB/built-ins/Object/prototype/__defineGetter__/define-non-extensible': [FAIL],
+ 'annexB/built-ins/Object/prototype/__defineGetter__/this-non-obj': [FAIL],
+ 'annexB/built-ins/Object/prototype/__defineSetter__/define-non-configurable': [FAIL],
+ 'annexB/built-ins/Object/prototype/__defineSetter__/define-non-extensible': [FAIL],
+ 'annexB/built-ins/Object/prototype/__defineSetter__/this-non-obj': [FAIL],
+ 'annexB/built-ins/Object/prototype/__lookupGetter__/this-non-obj': [FAIL],
+ 'annexB/built-ins/Object/prototype/__lookupSetter__/this-non-obj': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=4973
+ 'language/literals/numeric/non-octal-decimal-integer-strict': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5130
+ 'annexB/built-ins/Object/prototype/__lookupGetter__/lookup-own-data': [FAIL],
+ 'annexB/built-ins/Object/prototype/__lookupGetter__/lookup-own-get-err': [FAIL],
+ 'annexB/built-ins/Object/prototype/__lookupGetter__/lookup-own-proto-err': [FAIL],
+ 'annexB/built-ins/Object/prototype/__lookupGetter__/lookup-proto-data': [FAIL],
+ 'annexB/built-ins/Object/prototype/__lookupGetter__/lookup-proto-get-err': [FAIL],
+ 'annexB/built-ins/Object/prototype/__lookupGetter__/lookup-proto-proto-err': [FAIL],
+ 'annexB/built-ins/Object/prototype/__lookupSetter__/lookup-own-data': [FAIL],
+ 'annexB/built-ins/Object/prototype/__lookupSetter__/lookup-own-get-err': [FAIL],
+ 'annexB/built-ins/Object/prototype/__lookupSetter__/lookup-own-proto-err': [FAIL],
+ 'annexB/built-ins/Object/prototype/__lookupSetter__/lookup-proto-data': [FAIL],
+ 'annexB/built-ins/Object/prototype/__lookupSetter__/lookup-proto-get-err': [FAIL],
+ 'annexB/built-ins/Object/prototype/__lookupSetter__/lookup-proto-proto-err': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=4451
+ 'annexB/language/eval-code/direct/global-block-decl-eval-global-exsting-global-init': [FAIL],
+ 'annexB/language/eval-code/direct/global-if-decl-else-decl-a-eval-global-exsting-global-init': [FAIL],
+ 'annexB/language/eval-code/direct/global-if-decl-else-decl-b-eval-global-exsting-global-init': [FAIL],
+ 'annexB/language/eval-code/direct/global-if-decl-else-stmt-eval-global-exsting-global-init': [FAIL],
+ 'annexB/language/eval-code/direct/global-if-decl-no-else-eval-global-exsting-global-init': [FAIL],
+ 'annexB/language/eval-code/direct/global-if-stmt-else-decl-eval-global-exsting-global-init': [FAIL],
+ 'annexB/language/eval-code/direct/global-switch-case-eval-global-exsting-global-init': [FAIL],
+ 'annexB/language/eval-code/direct/global-switch-dflt-eval-global-exsting-global-init': [FAIL],
+ 'annexB/language/eval-code/indirect/global-block-decl-eval-global-exsting-global-init': [FAIL],
+ 'annexB/language/eval-code/indirect/global-if-decl-else-decl-a-eval-global-exsting-global-init': [FAIL],
+ 'annexB/language/eval-code/indirect/global-if-decl-else-decl-b-eval-global-exsting-global-init': [FAIL],
+ 'annexB/language/eval-code/indirect/global-if-decl-else-stmt-eval-global-exsting-global-init': [FAIL],
+ 'annexB/language/eval-code/indirect/global-if-decl-no-else-eval-global-exsting-global-init': [FAIL],
+ 'annexB/language/eval-code/indirect/global-if-stmt-else-decl-eval-global-exsting-global-init': [FAIL],
+ 'annexB/language/eval-code/indirect/global-switch-case-eval-global-exsting-global-init': [FAIL],
+ 'annexB/language/eval-code/indirect/global-switch-dflt-eval-global-exsting-global-init': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5135
+ 'annexB/language/eval-code/direct/func-block-decl-eval-func-block-scoping': [FAIL],
+ 'annexB/language/eval-code/direct/func-if-decl-else-decl-a-eval-func-block-scoping': [FAIL],
+ 'annexB/language/eval-code/direct/func-if-decl-else-decl-b-eval-func-block-scoping': [FAIL],
+ 'annexB/language/eval-code/direct/func-if-decl-else-stmt-eval-func-block-scoping': [FAIL],
+ 'annexB/language/eval-code/direct/func-if-decl-no-else-eval-func-block-scoping': [FAIL],
+ 'annexB/language/eval-code/direct/func-if-stmt-else-decl-eval-func-block-scoping': [FAIL],
+ 'annexB/language/eval-code/direct/func-switch-case-eval-func-block-scoping': [FAIL],
+ 'annexB/language/eval-code/direct/func-switch-dflt-eval-func-block-scoping': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5136
+ 'annexB/language/comments/multi-line-html-close': [FAIL],
+ 'annexB/language/comments/single-line-html-close': [FAIL],
+ 'annexB/language/comments/single-line-html-close-asi': [FAIL],
+ 'annexB/language/comments/single-line-html-open': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5137
+ 'annexB/built-ins/RegExp/prototype/compile/flags-undefined': [FAIL],
+ 'annexB/built-ins/RegExp/prototype/compile/pattern-regexp-distinct': [FAIL],
+ 'annexB/built-ins/RegExp/prototype/compile/pattern-regexp-same': [FAIL],
+ 'annexB/built-ins/RegExp/prototype/compile/pattern-undefined': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5139
+ 'annexB/built-ins/Date/prototype/setYear/time-clip': [FAIL],
+ 'annexB/built-ins/Date/prototype/setYear/year-number-relative': [FAIL],
+
+ # Fixed by https://github.com/tc39/test262/pull/662.
+ 'built-ins/Object/getOwnPropertyDescriptors/duplicate-keys': [FAIL],
######################## NEEDS INVESTIGATION ###########################
@@ -215,13 +430,10 @@
# incompatibilities if the test cases turn out to be broken or ambiguous.
# Some of these are related to v8:4361 in being visible side effects from Intl.
'intl402/6.2.3': [FAIL],
- 'intl402/9.2.1_2': [FAIL],
- 'intl402/9.2.6_2': [FAIL],
'intl402/Collator/10.1.2.1_4': [FAIL],
'intl402/Collator/10.1.2_a': [PASS, FAIL],
'intl402/Collator/10.2.3_b': [PASS, FAIL],
'intl402/Collator/prototype/10.3_a': [FAIL],
- 'intl402/DateTimeFormat/12.1.1': [FAIL],
'intl402/DateTimeFormat/12.1.2': [PASS, FAIL],
'intl402/DateTimeFormat/12.1.2.1_4': [FAIL],
'intl402/DateTimeFormat/12.2.3_b': [FAIL],
@@ -239,6 +451,19 @@
# https://code.google.com/p/v8/issues/detail?id=4693
'language/block-scope/syntax/redeclaration-in-block/attempt-to-redeclare-function-declaration-with-function-declaration': [PASS, FAIL_SLOPPY],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=4953
+ 'built-ins/TypedArray/prototype/subarray/speciesctor-get-ctor': [FAIL],
+ 'built-ins/TypedArray/prototype/subarray/speciesctor-get-ctor-abrupt': [FAIL],
+ 'built-ins/TypedArray/prototype/subarray/speciesctor-get-ctor-inherited': [FAIL],
+ 'built-ins/TypedArray/prototype/subarray/speciesctor-get-ctor-returns-throws': [FAIL],
+ 'built-ins/TypedArray/prototype/subarray/speciesctor-get-species': [FAIL],
+ 'built-ins/TypedArray/prototype/subarray/speciesctor-get-species-abrupt': [FAIL],
+ 'built-ins/TypedArray/prototype/subarray/speciesctor-get-species-custom-ctor': [FAIL],
+ 'built-ins/TypedArray/prototype/subarray/speciesctor-get-species-custom-ctor-invocation': [FAIL],
+ 'built-ins/TypedArray/prototype/subarray/speciesctor-get-species-custom-ctor-returns-another-instance': [FAIL],
+ 'built-ins/TypedArray/prototype/subarray/speciesctor-get-species-custom-ctor-throws': [FAIL],
+ 'built-ins/TypedArray/prototype/subarray/speciesctor-get-species-returns-throws': [FAIL],
+
# We do not expose Array.prototype.values due to webcompat issues.
# Most recent incompatability: https://crbug.com/615873
# https://code.google.com/p/v8/issues/detail?id=4247
@@ -254,34 +479,35 @@
############################ INVALID TESTS #############################
- # The reference value calculated by Test262 is incorrect if you run these
- # tests in PST/PDT between first Sunday in March and first Sunday in April.
- # The DST switch was moved in 2007 whereas Test262 bases the reference value
- # on 2000. Test262 Bug: https://bugs.ecmascript.org/show_bug.cgi?id=293
- 'built-ins/Date/S15.9.3.1_A5_T1': [PASS, FAIL_OK],
- 'built-ins/Date/S15.9.3.1_A5_T2': [PASS, FAIL_OK],
- 'built-ins/Date/S15.9.3.1_A5_T3': [PASS, FAIL_OK],
- 'built-ins/Date/S15.9.3.1_A5_T4': [PASS, FAIL_OK],
- 'built-ins/Date/S15.9.3.1_A5_T5': [PASS, FAIL_OK],
- 'built-ins/Date/S15.9.3.1_A5_T6': [PASS, FAIL_OK],
-
# Test makes unjustified assumptions about the number of calls to SortCompare.
# Test262 Bug: https://bugs.ecmascript.org/show_bug.cgi?id=596
'built-ins/Array/prototype/sort/bug_596_1': [PASS, FAIL_OK],
- # Test bug https://github.com/tc39/test262/issues/518
- 'built-ins/TypedArrays/object-arg-throws-setting-typedarray-property': [FAIL],
- 'built-ins/Object/getOwnPropertyDescriptors/duplicate-keys': [FAIL],
- 'built-ins/Object/getOwnPropertyDescriptors/symbols-included': [FAIL],
+ # https://github.com/tc39/test262/issues/677
+ 'built-ins/RegExp/prototype/source/15.10.7.1-1': [FAIL],
+ 'built-ins/RegExp/prototype/global/15.10.7.2-1': [FAIL],
+ 'built-ins/RegExp/prototype/ignoreCase/15.10.7.3-1': [FAIL],
+ 'built-ins/RegExp/prototype/multiline/15.10.7.4-1': [FAIL],
- # Test bug https://github.com/tc39/test262/issues/521
- 'built-ins/TypedArray/from/mapfn-is-not-callable': [FAIL],
+ # https://github.com/tc39/test262/issues/694
+ 'built-ins/TypedArrays/length-arg-toindex-length': [FAIL],
- # Test bug https://github.com/tc39/test262/issues/529
- 'built-ins/Math/cos/S15.8.2.7_A7': [PASS, FAIL_OK],
- 'built-ins/Math/sin/S15.8.2.16_A7': [PASS, FAIL_OK],
- 'built-ins/Math/tan/S15.8.2.18_A7': [PASS, FAIL_OK],
- 'built-ins/Math/exp/S15.8.2.8_A6': [PASS, FAIL_OK], # Math.exp (less precise with --fast-math)
+ # https://github.com/tc39/test262/issues/696
+ 'language/statements/class/subclass/builtin-objects/ArrayBuffer/regular-subclassing': [FAIL],
+
+ # https://github.com/tc39/test262/issues/685
+ 'built-ins/DataView/prototype/setUint8/range-check-after-value-conversion': [FAIL],
+ 'built-ins/DataView/prototype/setUint16/range-check-after-value-conversion': [FAIL],
+ 'built-ins/DataView/prototype/setUint32/range-check-after-value-conversion': [FAIL],
+ 'built-ins/DataView/prototype/setInt8/range-check-after-value-conversion': [FAIL],
+ 'built-ins/DataView/prototype/setInt16/range-check-after-value-conversion': [FAIL],
+ 'built-ins/DataView/prototype/setInt32/range-check-after-value-conversion': [FAIL],
+ 'built-ins/DataView/prototype/setFloat32/range-check-after-value-conversion': [FAIL],
+ 'built-ins/DataView/prototype/setFloat64/range-check-after-value-conversion': [FAIL],
+
+ # https://github.com/tc39/test262/issues/686
+ 'built-ins/DataView/prototype/setFloat32/toindex-byteoffset': [FAIL],
+ 'built-ins/DataView/prototype/setFloat64/toindex-byteoffset': [FAIL],
############################ SKIPPED TESTS #############################
@@ -305,26 +531,6 @@
'built-ins/Array/prototype/slice/S15.4.4.10_A3_T2': [SKIP],
}], # ALWAYS
-['system == linux', {
- # BUG(v8:4437).
- 'intl402/Collator/10.1.1_19_c': [PASS, FAIL],
- 'intl402/Collator/9.2.5_11_g_ii_2': [PASS, FAIL],
- 'intl402/NumberFormat/11.1.1_17': [PASS, FAIL],
- 'intl402/NumberFormat/11.1.1_19': [PASS, FAIL],
- 'intl402/NumberFormat/prototype/format/11.3.2_TRF': [PASS, FAIL],
- 'intl402/NumberFormat/prototype/format/11.3.2_TRP': [PASS, FAIL],
-}], # system == linux
-
-['system == macos', {
- # BUG(v8:4437).
- 'intl402/Collator/10.1.1_19_c': [SKIP],
- 'intl402/Collator/9.2.5_11_g_ii_2': [SKIP],
- 'intl402/NumberFormat/11.1.1_17': [SKIP],
- 'intl402/NumberFormat/11.1.1_19': [SKIP],
- 'intl402/NumberFormat/prototype/format/11.3.2_TRF': [SKIP],
- 'intl402/NumberFormat/prototype/format/11.3.2_TRP': [SKIP],
-}], # system == macos
-
['no_i18n == True', {
# Unicode canonicalization is not available with i18n turned off.
'built-ins/String/prototype/localeCompare/15.5.4.9_CE': [SKIP],
@@ -336,8 +542,23 @@
'built-ins/String/prototype/normalize/return-normalized-string': [SKIP],
'built-ins/String/prototype/normalize/return-normalized-string-from-coerced-form': [SKIP],
'built-ins/String/prototype/normalize/return-normalized-string-using-default-parameter': [SKIP],
-}], # no_i18n == True
+ # Case-conversion is not fully compliant to the Unicode spec with i18n off.
+ 'built-ins/String/prototype/toLocaleLowerCase/special_casing_conditional': [FAIL],
+ 'built-ins/String/prototype/toLocaleLowerCase/supplementary_plane': [FAIL],
+ 'built-ins/String/prototype/toLowerCase/special_casing_conditional': [FAIL],
+ 'built-ins/String/prototype/toLowerCase/supplementary_plane': [FAIL],
+ 'built-ins/String/prototype/toLocaleUpperCase/supplementary_plane': [FAIL],
+ 'built-ins/String/prototype/toUpperCase/supplementary_plane': [FAIL],
+
+ # Locale-sensitive case-conversion is not available with i18n off.
+ 'intl402/String/prototype/toLocaleLowerCase/special_casing_Azeri': [FAIL],
+ 'intl402/String/prototype/toLocaleLowerCase/special_casing_Lithuanian': [FAIL],
+ 'intl402/String/prototype/toLocaleLowerCase/special_casing_Turkish': [FAIL],
+ 'intl402/String/prototype/toLocaleUpperCase/special_casing_Azeri': [FAIL],
+ 'intl402/String/prototype/toLocaleUpperCase/special_casing_Lithuanian': [FAIL],
+ 'intl402/String/prototype/toLocaleUpperCase/special_casing_Turkish': [FAIL],
+}], # no_i18n == True
['arch == arm or arch == mipsel or arch == mips or arch == arm64 or arch == mips64 or arch == mips64el', {
@@ -364,6 +585,7 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=4639
# The failed allocation causes an asan/msan/tsan error
'built-ins/ArrayBuffer/allocation-limit': [SKIP],
+ 'built-ins/ArrayBuffer/length-is-too-large-throws': [SKIP],
}], # asan == True or msan == True or tsan == True
]
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index bf007bd46f..391b0e8c4f 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -26,10 +26,8 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import hashlib
import imp
import os
-import shutil
import sys
import tarfile
@@ -43,10 +41,14 @@ DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
ARCHIVE = DATA + ".tar"
TEST_262_HARNESS_FILES = ["sta.js", "assert.js"]
+TEST_262_NATIVE_FILES = ["detachArrayBuffer.js"]
TEST_262_SUITE_PATH = ["data", "test"]
TEST_262_HARNESS_PATH = ["data", "harness"]
-TEST_262_TOOLS_PATH = ["data", "tools", "packaging"]
+TEST_262_TOOLS_PATH = ["harness", "src"]
+
+sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),
+ *TEST_262_TOOLS_PATH))
ALL_VARIANT_FLAGS_STRICT = dict(
(v, [flags + ["--use-strict"] for flags in flag_sets])
@@ -129,8 +131,14 @@ class Test262TestSuite(testsuite.TestSuite):
def GetFlagsForTestCase(self, testcase, context):
return (testcase.flags + context.mode_flags + self.harness +
self.GetIncludesForTest(testcase) + ["--harmony"] +
+ (["--module"] if "module" in self.GetTestRecord(testcase) else []) +
[os.path.join(self.testroot, testcase.path + ".js")] +
- (["--throws"] if "negative" in self.GetTestRecord(testcase) else []))
+ (["--throws"] if "negative" in self.GetTestRecord(testcase)
+ else []) +
+ (["--allow-natives-syntax"]
+ if "detachArrayBuffer.js" in
+ self.GetTestRecord(testcase).get("includes", [])
+ else []))
def _VariantGeneratorFactory(self):
return Test262VariantGenerator
@@ -158,11 +166,14 @@ class Test262TestSuite(testsuite.TestSuite):
testcase.path)
return testcase.test_record
+ def BasePath(self, filename):
+ return self.root if filename in TEST_262_NATIVE_FILES else self.harnesspath
+
def GetIncludesForTest(self, testcase):
test_record = self.GetTestRecord(testcase)
if "includes" in test_record:
- includes = [os.path.join(self.harnesspath, f)
- for f in test_record["includes"]]
+ return [os.path.join(self.BasePath(filename), filename)
+ for filename in test_record.get("includes", [])]
else:
includes = []
return includes
@@ -195,21 +206,7 @@ class Test262TestSuite(testsuite.TestSuite):
return outcome != statusfile.FAIL
return not outcome in (testcase.outcomes or [statusfile.PASS])
- def DownloadData(self):
- print "Test262 download is deprecated. It's part of DEPS."
-
- # Clean up old directories and archive files.
- directory_old_name = os.path.join(self.root, "data.old")
- if os.path.exists(directory_old_name):
- shutil.rmtree(directory_old_name)
-
- archive_files = [f for f in os.listdir(self.root)
- if f.startswith("tc39-test262-")]
- if len(archive_files) > 0:
- print "Clobber outdated test archives ..."
- for f in archive_files:
- os.remove(os.path.join(self.root, f))
-
+ def PrepareSources(self):
# The archive is created only on swarming. Local checkouts have the
# data folder.
if os.path.exists(ARCHIVE) and not os.path.exists(DATA):
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
new file mode 100644
index 0000000000..7193afb966
--- /dev/null
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -0,0 +1,77 @@
+# Copyright 2016 The V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# The sources are kept automatically in sync with unittests.gyp.
+
+import("../../gni/v8.gni")
+
+gypi_values = exec_script("//build/gypi_to_gn.py",
+ [ rebase_path("unittests.gyp") ],
+ "scope",
+ [ "unittests.gyp" ])
+
+v8_executable("unittests") {
+ testonly = true
+
+ sources = gypi_values.unittests_sources
+
+ if (v8_current_cpu == "arm") {
+ sources += gypi_values.unittests_sources_arm
+ } else if (v8_current_cpu == "arm64") {
+ sources += gypi_values.unittests_sources_arm64
+ } else if (v8_current_cpu == "x86") {
+ sources += gypi_values.unittests_sources_ia32
+ } else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") {
+ sources += gypi_values.unittests_sources_mips
+ } else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
+ sources += gypi_values.unittests_sources_mips64
+ } else if (v8_current_cpu == "x64") {
+ sources += gypi_values.unittests_sources_x64
+ } else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
+ sources += gypi_values.unittests_sources_ppc
+ } else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
+ sources += gypi_values.unittests_sources_s390
+ }
+
+ configs = [
+ "../..:external_config",
+ "../..:internal_config_base",
+ ]
+
+ # TODO(machenbach): Translate from gyp.
+ #['OS=="aix"', {
+ # 'ldflags': [ '-Wl,-bbigtoc' ],
+ #}],
+
+ deps = [
+ "../..:v8_libplatform",
+ "//build/config/sanitizers:deps",
+ "//build/win:default_exe_manifest",
+ "//testing/gmock",
+ "//testing/gtest",
+ ]
+
+ if (is_component_build) {
+ # compiler-unittests can't be built against a shared library, so we
+ # need to depend on the underlying static target in that case.
+ deps += [ "../..:v8_maybe_snapshot" ]
+ } else {
+ deps += [ "../..:v8" ]
+ }
+
+ if (is_win) {
+ # This warning is benignly triggered by the U16 and U32 macros in
+ # bytecode-utils.h.
+ # C4309: 'static_cast': truncation of constant value
+ cflags = [ "/wd4309" ]
+
+ # Suppress warnings about importing locally defined symbols.
+ if (is_component_build) {
+ ldflags = [
+ "/ignore:4049",
+ "/ignore:4217",
+ ]
+ }
+ }
+}
diff --git a/deps/v8/test/unittests/atomic-utils-unittest.cc b/deps/v8/test/unittests/base/atomic-utils-unittest.cc
index ad33853d58..8e90c423e2 100644
--- a/deps/v8/test/unittests/atomic-utils-unittest.cc
+++ b/deps/v8/test/unittests/base/atomic-utils-unittest.cc
@@ -4,11 +4,11 @@
#include <limits.h>
-#include "src/atomic-utils.h"
+#include "src/base/atomic-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
-namespace internal {
+namespace base {
TEST(AtomicNumber, Constructor) {
// Test some common types.
@@ -58,6 +58,29 @@ TEST(AtomicNumber, Increment) {
EXPECT_EQ(std::numeric_limits<size_t>::max(), c.Value());
}
+TEST(AtomicNumber, Decrement) {
+ AtomicNumber<size_t> a(std::numeric_limits<size_t>::max());
+ a.Increment(1);
+ EXPECT_EQ(0, a.Value());
+ a.Decrement(1);
+ EXPECT_EQ(std::numeric_limits<size_t>::max(), a.Value());
+}
+
+TEST(AtomicNumber, OperatorAdditionAssignment) {
+ AtomicNumber<size_t> a(0u);
+ AtomicNumber<size_t> b(std::numeric_limits<size_t>::max());
+ a += b.Value();
+ EXPECT_EQ(a.Value(), b.Value());
+ EXPECT_EQ(b.Value(), std::numeric_limits<size_t>::max());
+}
+
+TEST(AtomicNumber, OperatorSubtractionAssignment) {
+ AtomicNumber<size_t> a(std::numeric_limits<size_t>::max());
+ AtomicNumber<size_t> b(std::numeric_limits<size_t>::max());
+ a -= b.Value();
+ EXPECT_EQ(a.Value(), 0u);
+ EXPECT_EQ(b.Value(), std::numeric_limits<size_t>::max());
+}
namespace {
@@ -213,5 +236,5 @@ TEST(AtomicEnumSet, Equality) {
EXPECT_FALSE(a != b);
}
-} // namespace internal
+} // namespace base
} // namespace v8
diff --git a/deps/v8/test/unittests/base/ieee754-unittest.cc b/deps/v8/test/unittests/base/ieee754-unittest.cc
new file mode 100644
index 0000000000..2110b63976
--- /dev/null
+++ b/deps/v8/test/unittests/base/ieee754-unittest.cc
@@ -0,0 +1,405 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/base/ieee754.h"
+#include "src/base/macros.h"
+#include "testing/gmock-support.h"
+#include "testing/gtest-support.h"
+
+using testing::BitEq;
+using testing::IsNaN;
+
+namespace v8 {
+namespace base {
+namespace ieee754 {
+
+namespace {
+
+double const kE = 2.718281828459045;
+double const kPI = 3.141592653589793;
+double const kTwo120 = 1.329227995784916e+36;
+double const kInfinity = std::numeric_limits<double>::infinity();
+double const kQNaN = std::numeric_limits<double>::quiet_NaN();
+double const kSNaN = std::numeric_limits<double>::signaling_NaN();
+
+} // namespace
+
+TEST(Ieee754, Acos) {
+ EXPECT_THAT(acos(kInfinity), IsNaN());
+ EXPECT_THAT(acos(-kInfinity), IsNaN());
+ EXPECT_THAT(acos(kQNaN), IsNaN());
+ EXPECT_THAT(acos(kSNaN), IsNaN());
+
+ EXPECT_EQ(0.0, acos(1.0));
+}
+
+TEST(Ieee754, Acosh) {
+ // Tests for acosh for exceptional values
+ EXPECT_EQ(kInfinity, acosh(kInfinity));
+ EXPECT_THAT(acosh(-kInfinity), IsNaN());
+ EXPECT_THAT(acosh(kQNaN), IsNaN());
+ EXPECT_THAT(acosh(kSNaN), IsNaN());
+ EXPECT_THAT(acosh(0.9), IsNaN());
+
+ // Test basic acosh functionality
+ EXPECT_EQ(0.0, acosh(1.0));
+ // acosh(1.5) = log((sqrt(5)+3)/2), case 1 < x < 2
+ EXPECT_EQ(0.9624236501192069e0, acosh(1.5));
+ // acosh(4) = log(sqrt(15)+4), case 2 < x < 2^28
+ EXPECT_EQ(2.0634370688955608e0, acosh(4.0));
+ // acosh(2^50), case 2^28 < x
+ EXPECT_EQ(35.35050620855721e0, acosh(1125899906842624.0));
+ // acosh(most-positive-float), no overflow
+ EXPECT_EQ(710.4758600739439e0, acosh(1.7976931348623157e308));
+}
+
+TEST(Ieee754, Asin) {
+ EXPECT_THAT(asin(kInfinity), IsNaN());
+ EXPECT_THAT(asin(-kInfinity), IsNaN());
+ EXPECT_THAT(asin(kQNaN), IsNaN());
+ EXPECT_THAT(asin(kSNaN), IsNaN());
+
+ EXPECT_THAT(asin(0.0), BitEq(0.0));
+ EXPECT_THAT(asin(-0.0), BitEq(-0.0));
+}
+
+TEST(Ieee754, Asinh) {
+ // Tests for asinh for exceptional values
+ EXPECT_EQ(kInfinity, asinh(kInfinity));
+ EXPECT_EQ(-kInfinity, asinh(-kInfinity));
+ EXPECT_THAT(asin(kQNaN), IsNaN());
+ EXPECT_THAT(asin(kSNaN), IsNaN());
+
+ // Test basic asinh functionality
+ EXPECT_THAT(asinh(0.0), BitEq(0.0));
+ EXPECT_THAT(asinh(-0.0), BitEq(-0.0));
+ // asinh(2^-29) = 2^-29, case |x| < 2^-28, where acosh(x) = x
+ EXPECT_EQ(1.862645149230957e-9, asinh(1.862645149230957e-9));
+ // asinh(-2^-29) = -2^-29, case |x| < 2^-28, where acosh(x) = x
+ EXPECT_EQ(-1.862645149230957e-9, asinh(-1.862645149230957e-9));
+ // asinh(2^-28), case 2 > |x| >= 2^-28
+ EXPECT_EQ(3.725290298461914e-9, asinh(3.725290298461914e-9));
+ // asinh(-2^-28), case 2 > |x| >= 2^-28
+ EXPECT_EQ(-3.725290298461914e-9, asinh(-3.725290298461914e-9));
+ // asinh(1), case 2 > |x| > 2^-28
+ EXPECT_EQ(0.881373587019543e0, asinh(1.0));
+ // asinh(-1), case 2 > |x| > 2^-28
+ EXPECT_EQ(-0.881373587019543e0, asinh(-1.0));
+ // asinh(5), case 2^28 > |x| > 2
+ EXPECT_EQ(2.3124383412727525e0, asinh(5.0));
+ // asinh(-5), case 2^28 > |x| > 2
+ EXPECT_EQ(-2.3124383412727525e0, asinh(-5.0));
+ // asinh(2^28), case 2^28 > |x|
+ EXPECT_EQ(20.101268236238415e0, asinh(268435456.0));
+ // asinh(-2^28), case 2^28 > |x|
+ EXPECT_EQ(-20.101268236238415e0, asinh(-268435456.0));
+ // asinh(<most-positive-float>), no overflow
+ EXPECT_EQ(710.4758600739439e0, asinh(1.7976931348623157e308));
+ // asinh(-<most-positive-float>), no overflow
+ EXPECT_EQ(-710.4758600739439e0, asinh(-1.7976931348623157e308));
+}
+
+TEST(Ieee754, Atan) {
+ EXPECT_THAT(atan(kQNaN), IsNaN());
+ EXPECT_THAT(atan(kSNaN), IsNaN());
+ EXPECT_THAT(atan(-0.0), BitEq(-0.0));
+ EXPECT_THAT(atan(0.0), BitEq(0.0));
+ EXPECT_DOUBLE_EQ(1.5707963267948966, atan(kInfinity));
+ EXPECT_DOUBLE_EQ(-1.5707963267948966, atan(-kInfinity));
+}
+
+TEST(Ieee754, Atan2) {
+ EXPECT_THAT(atan2(kQNaN, kQNaN), IsNaN());
+ EXPECT_THAT(atan2(kQNaN, kSNaN), IsNaN());
+ EXPECT_THAT(atan2(kSNaN, kQNaN), IsNaN());
+ EXPECT_THAT(atan2(kSNaN, kSNaN), IsNaN());
+ EXPECT_DOUBLE_EQ(0.7853981633974483, atan2(kInfinity, kInfinity));
+ EXPECT_DOUBLE_EQ(2.356194490192345, atan2(kInfinity, -kInfinity));
+ EXPECT_DOUBLE_EQ(-0.7853981633974483, atan2(-kInfinity, kInfinity));
+ EXPECT_DOUBLE_EQ(-2.356194490192345, atan2(-kInfinity, -kInfinity));
+}
+
+TEST(Ieee754, Atanh) {
+ EXPECT_THAT(atanh(kQNaN), IsNaN());
+ EXPECT_THAT(atanh(kSNaN), IsNaN());
+ EXPECT_THAT(atanh(kInfinity), IsNaN());
+ EXPECT_EQ(kInfinity, atanh(1));
+ EXPECT_EQ(-kInfinity, atanh(-1));
+ EXPECT_DOUBLE_EQ(0.54930614433405478, atanh(0.5));
+}
+
+TEST(Ieee754, Cos) {
+ // Test values mentioned in the EcmaScript spec.
+ EXPECT_THAT(cos(kQNaN), IsNaN());
+ EXPECT_THAT(cos(kSNaN), IsNaN());
+ EXPECT_THAT(cos(kInfinity), IsNaN());
+ EXPECT_THAT(cos(-kInfinity), IsNaN());
+
+ // Tests for cos for |x| < pi/4
+ EXPECT_EQ(1.0, 1 / cos(-0.0));
+ EXPECT_EQ(1.0, 1 / cos(0.0));
+ // cos(x) = 1 for |x| < 2^-27
+ EXPECT_EQ(1, cos(2.3283064365386963e-10));
+ EXPECT_EQ(1, cos(-2.3283064365386963e-10));
+ // Test KERNELCOS for |x| < 0.3.
+ // cos(pi/20) = sqrt(sqrt(2)*sqrt(sqrt(5)+5)+4)/2^(3/2)
+ EXPECT_EQ(0.9876883405951378, cos(0.15707963267948966));
+ // Test KERNELCOS for x ~= 0.78125
+ EXPECT_EQ(0.7100335477927638, cos(0.7812504768371582));
+ EXPECT_EQ(0.7100338835660797, cos(0.78125));
+ // Test KERNELCOS for |x| > 0.3.
+ // cos(pi/8) = sqrt(sqrt(2)+1)/2^(3/4)
+ EXPECT_EQ(0.9238795325112867, cos(0.39269908169872414));
+ // Test KERNELTAN for |x| < 0.67434.
+ EXPECT_EQ(0.9238795325112867, cos(-0.39269908169872414));
+
+ // Tests for cos.
+ EXPECT_EQ(1, cos(3.725290298461914e-9));
+ // Cover different code paths in KERNELCOS.
+ EXPECT_EQ(0.9689124217106447, cos(0.25));
+ EXPECT_EQ(0.8775825618903728, cos(0.5));
+ EXPECT_EQ(0.7073882691671998, cos(0.785));
+ // Test that cos(Math.PI/2) != 0 since Math.PI is not exact.
+ EXPECT_EQ(6.123233995736766e-17, cos(1.5707963267948966));
+ // Test cos for various phases.
+ EXPECT_EQ(0.7071067811865474, cos(7.0 / 4 * kPI));
+ EXPECT_EQ(0.7071067811865477, cos(9.0 / 4 * kPI));
+ EXPECT_EQ(-0.7071067811865467, cos(11.0 / 4 * kPI));
+ EXPECT_EQ(-0.7071067811865471, cos(13.0 / 4 * kPI));
+ EXPECT_EQ(0.9367521275331447, cos(1000000.0));
+ EXPECT_EQ(-3.435757038074824e-12, cos(1048575.0 / 2 * kPI));
+
+ // Test Hayne-Panek reduction.
+ EXPECT_EQ(-0.9258790228548379e0, cos(kTwo120));
+ EXPECT_EQ(-0.9258790228548379e0, cos(-kTwo120));
+}
+
+TEST(Ieee754, Cosh) {
+ // Test values mentioned in the EcmaScript spec.
+ EXPECT_THAT(cosh(kQNaN), IsNaN());
+ EXPECT_THAT(cosh(kSNaN), IsNaN());
+ EXPECT_THAT(cosh(kInfinity), kInfinity);
+ EXPECT_THAT(cosh(-kInfinity), kInfinity);
+ EXPECT_EQ(1, cosh(0.0));
+ EXPECT_EQ(1, cosh(-0.0));
+}
+
+TEST(Ieee754, Exp) {
+ EXPECT_THAT(exp(kQNaN), IsNaN());
+ EXPECT_THAT(exp(kSNaN), IsNaN());
+ EXPECT_EQ(0.0, exp(-kInfinity));
+ EXPECT_EQ(0.0, exp(-1000));
+ EXPECT_EQ(0.0, exp(-745.1332191019412));
+ EXPECT_EQ(2.2250738585072626e-308, exp(-708.39641853226408));
+ EXPECT_EQ(3.307553003638408e-308, exp(-708.0));
+ EXPECT_EQ(4.9406564584124654e-324, exp(-7.45133219101941108420e+02));
+ EXPECT_EQ(0.36787944117144233, exp(-1.0));
+ EXPECT_EQ(1.0, exp(-0.0));
+ EXPECT_EQ(1.0, exp(0.0));
+ EXPECT_EQ(1.0, exp(2.2250738585072014e-308));
+
+ // Test that exp(x) is monotonic near 1.
+ EXPECT_GE(exp(1.0), exp(0.9999999999999999));
+ EXPECT_LE(exp(1.0), exp(1.0000000000000002));
+
+ // Test that we produce the correctly rounded result for 1.
+ EXPECT_EQ(kE, exp(1.0));
+
+ EXPECT_EQ(7.38905609893065e0, exp(2.0));
+ EXPECT_EQ(1.7976931348622732e308, exp(7.09782712893383973096e+02));
+ EXPECT_EQ(2.6881171418161356e+43, exp(100.0));
+ EXPECT_EQ(8.218407461554972e+307, exp(709.0));
+ EXPECT_EQ(1.7968190737295725e308, exp(709.7822265625e0));
+ EXPECT_EQ(kInfinity, exp(709.7827128933841e0));
+ EXPECT_EQ(kInfinity, exp(710.0));
+ EXPECT_EQ(kInfinity, exp(1000.0));
+ EXPECT_EQ(kInfinity, exp(kInfinity));
+}
+
+TEST(Ieee754, Expm1) {
+ EXPECT_THAT(expm1(kQNaN), IsNaN());
+ EXPECT_THAT(expm1(kSNaN), IsNaN());
+ EXPECT_EQ(-1.0, expm1(-kInfinity));
+ EXPECT_EQ(kInfinity, expm1(kInfinity));
+ EXPECT_EQ(0.0, expm1(-0.0));
+ EXPECT_EQ(0.0, expm1(0.0));
+ EXPECT_EQ(1.718281828459045, expm1(1.0));
+ EXPECT_EQ(2.6881171418161356e+43, expm1(100.0));
+ EXPECT_EQ(8.218407461554972e+307, expm1(709.0));
+ EXPECT_EQ(kInfinity, expm1(710.0));
+}
+
+TEST(Ieee754, Log) {
+ EXPECT_THAT(log(kQNaN), IsNaN());
+ EXPECT_THAT(log(kSNaN), IsNaN());
+ EXPECT_THAT(log(-kInfinity), IsNaN());
+ EXPECT_THAT(log(-1.0), IsNaN());
+ EXPECT_EQ(-kInfinity, log(-0.0));
+ EXPECT_EQ(-kInfinity, log(0.0));
+ EXPECT_EQ(0.0, log(1.0));
+ EXPECT_EQ(kInfinity, log(kInfinity));
+
+ // Test that log(E) produces the correctly rounded result.
+ EXPECT_EQ(1.0, log(kE));
+}
+
+TEST(Ieee754, Log1p) {
+ EXPECT_THAT(log1p(kQNaN), IsNaN());
+ EXPECT_THAT(log1p(kSNaN), IsNaN());
+ EXPECT_THAT(log1p(-kInfinity), IsNaN());
+ EXPECT_EQ(-kInfinity, log1p(-1.0));
+ EXPECT_EQ(0.0, log1p(0.0));
+ EXPECT_EQ(-0.0, log1p(-0.0));
+ EXPECT_EQ(kInfinity, log1p(kInfinity));
+ EXPECT_EQ(6.9756137364252422e-03, log1p(0.007));
+ EXPECT_EQ(709.782712893384, log1p(1.7976931348623157e308));
+ EXPECT_EQ(2.7755575615628914e-17, log1p(2.7755575615628914e-17));
+ EXPECT_EQ(9.313225741817976e-10, log1p(9.313225746154785e-10));
+ EXPECT_EQ(-0.2876820724517809, log1p(-0.25));
+ EXPECT_EQ(0.22314355131420976, log1p(0.25));
+ EXPECT_EQ(2.3978952727983707, log1p(10));
+ EXPECT_EQ(36.841361487904734, log1p(10e15));
+ EXPECT_EQ(37.08337388996168, log1p(12738099905822720));
+ EXPECT_EQ(37.08336444902049, log1p(12737979646738432));
+ EXPECT_EQ(1.3862943611198906, log1p(3));
+ EXPECT_EQ(1.3862945995384413, log1p(3 + 9.5367431640625e-7));
+ EXPECT_EQ(0.5596157879354227, log1p(0.75));
+ EXPECT_EQ(0.8109302162163288, log1p(1.25));
+}
+
+TEST(Ieee754, Log2) {
+ EXPECT_THAT(log2(kQNaN), IsNaN());
+ EXPECT_THAT(log2(kSNaN), IsNaN());
+ EXPECT_THAT(log2(-kInfinity), IsNaN());
+ EXPECT_THAT(log2(-1.0), IsNaN());
+ EXPECT_EQ(-kInfinity, log2(0.0));
+ EXPECT_EQ(-kInfinity, log2(-0.0));
+ EXPECT_EQ(kInfinity, log2(kInfinity));
+}
+
+TEST(Ieee754, Log10) {
+ EXPECT_THAT(log10(kQNaN), IsNaN());
+ EXPECT_THAT(log10(kSNaN), IsNaN());
+ EXPECT_THAT(log10(-kInfinity), IsNaN());
+ EXPECT_THAT(log10(-1.0), IsNaN());
+ EXPECT_EQ(-kInfinity, log10(0.0));
+ EXPECT_EQ(-kInfinity, log10(-0.0));
+ EXPECT_EQ(kInfinity, log10(kInfinity));
+ EXPECT_EQ(3.0, log10(1000.0));
+ EXPECT_EQ(14.0, log10(100000000000000)); // log10(10 ^ 14)
+ EXPECT_EQ(3.7389561269540406, log10(5482.2158));
+ EXPECT_EQ(14.661551142893833, log10(458723662312872.125782332587));
+ EXPECT_EQ(-0.9083828622192334, log10(0.12348583358871));
+ EXPECT_EQ(5.0, log10(100000.0));
+}
+
+TEST(Ieee754, Cbrt) {
+ EXPECT_THAT(cbrt(kQNaN), IsNaN());
+ EXPECT_THAT(cbrt(kSNaN), IsNaN());
+ EXPECT_EQ(kInfinity, cbrt(kInfinity));
+ EXPECT_EQ(-kInfinity, cbrt(-kInfinity));
+ EXPECT_EQ(1.4422495703074083, cbrt(3));
+ EXPECT_EQ(100, cbrt(100 * 100 * 100));
+ EXPECT_EQ(46.415888336127786, cbrt(100000));
+}
+
+TEST(Ieee754, Sin) {
+ // Test values mentioned in the EcmaScript spec.
+ EXPECT_THAT(sin(kQNaN), IsNaN());
+ EXPECT_THAT(sin(kSNaN), IsNaN());
+ EXPECT_THAT(sin(kInfinity), IsNaN());
+ EXPECT_THAT(sin(-kInfinity), IsNaN());
+
+ // Tests for sin for |x| < pi/4
+ EXPECT_EQ(-kInfinity, 1 / sin(-0.0));
+ EXPECT_EQ(kInfinity, 1 / sin(0.0));
+ // sin(x) = x for x < 2^-27
+ EXPECT_EQ(2.3283064365386963e-10, sin(2.3283064365386963e-10));
+ EXPECT_EQ(-2.3283064365386963e-10, sin(-2.3283064365386963e-10));
+ // sin(pi/8) = sqrt(sqrt(2)-1)/2^(3/4)
+ EXPECT_EQ(0.3826834323650898, sin(0.39269908169872414));
+ EXPECT_EQ(-0.3826834323650898, sin(-0.39269908169872414));
+
+ // Tests for sin.
+ EXPECT_EQ(0.479425538604203, sin(0.5));
+ EXPECT_EQ(-0.479425538604203, sin(-0.5));
+ EXPECT_EQ(1, sin(kPI / 2.0));
+ EXPECT_EQ(-1, sin(-kPI / 2.0));
+ // Test that sin(Math.PI) != 0 since Math.PI is not exact.
+ EXPECT_EQ(1.2246467991473532e-16, sin(kPI));
+ EXPECT_EQ(-7.047032979958965e-14, sin(2200.0 * kPI));
+ // Test sin for various phases.
+ EXPECT_EQ(-0.7071067811865477, sin(7.0 / 4.0 * kPI));
+ EXPECT_EQ(0.7071067811865474, sin(9.0 / 4.0 * kPI));
+ EXPECT_EQ(0.7071067811865483, sin(11.0 / 4.0 * kPI));
+ EXPECT_EQ(-0.7071067811865479, sin(13.0 / 4.0 * kPI));
+ EXPECT_EQ(-3.2103381051568376e-11, sin(1048576.0 / 4 * kPI));
+
+ // Test Hayne-Panek reduction.
+ EXPECT_EQ(0.377820109360752e0, sin(kTwo120));
+ EXPECT_EQ(-0.377820109360752e0, sin(-kTwo120));
+}
+
+TEST(Ieee754, Sinh) {
+ // Test values mentioned in the EcmaScript spec.
+ EXPECT_THAT(sinh(kQNaN), IsNaN());
+ EXPECT_THAT(sinh(kSNaN), IsNaN());
+ EXPECT_THAT(sinh(kInfinity), kInfinity);
+ EXPECT_THAT(sinh(-kInfinity), -kInfinity);
+ EXPECT_EQ(0.0, sinh(0.0));
+ EXPECT_EQ(-0.0, sinh(-0.0));
+}
+
+TEST(Ieee754, Tan) {
+ // Test values mentioned in the EcmaScript spec.
+ EXPECT_THAT(tan(kQNaN), IsNaN());
+ EXPECT_THAT(tan(kSNaN), IsNaN());
+ EXPECT_THAT(tan(kInfinity), IsNaN());
+ EXPECT_THAT(tan(-kInfinity), IsNaN());
+
+ // Tests for tan for |x| < pi/4
+ EXPECT_EQ(kInfinity, 1 / tan(0.0));
+ EXPECT_EQ(-kInfinity, 1 / tan(-0.0));
+ // tan(x) = x for |x| < 2^-28
+ EXPECT_EQ(2.3283064365386963e-10, tan(2.3283064365386963e-10));
+ EXPECT_EQ(-2.3283064365386963e-10, tan(-2.3283064365386963e-10));
+ // Test KERNELTAN for |x| > 0.67434.
+ EXPECT_EQ(0.8211418015898941, tan(11.0 / 16.0));
+ EXPECT_EQ(-0.8211418015898941, tan(-11.0 / 16.0));
+ EXPECT_EQ(0.41421356237309503, tan(0.39269908169872414));
+ // crbug/427468
+ EXPECT_EQ(0.7993357819992383, tan(0.6743358));
+
+ // Tests for tan.
+ EXPECT_EQ(3.725290298461914e-9, tan(3.725290298461914e-9));
+ // Test that tan(PI/2) != Infinity since PI is not exact.
+ EXPECT_EQ(1.633123935319537e16, tan(kPI / 2));
+ // Cover different code paths in KERNELTAN (tangent and cotangent)
+ EXPECT_EQ(0.5463024898437905, tan(0.5));
+ EXPECT_EQ(2.0000000000000027, tan(1.107148717794091));
+ EXPECT_EQ(-1.0000000000000004, tan(7.0 / 4.0 * kPI));
+ EXPECT_EQ(0.9999999999999994, tan(9.0 / 4.0 * kPI));
+ EXPECT_EQ(-6.420676210313675e-11, tan(1048576.0 / 2.0 * kPI));
+ EXPECT_EQ(2.910566692924059e11, tan(1048575.0 / 2.0 * kPI));
+
+ // Test Hayne-Panek reduction.
+ EXPECT_EQ(-0.40806638884180424e0, tan(kTwo120));
+ EXPECT_EQ(0.40806638884180424e0, tan(-kTwo120));
+}
+
+TEST(Ieee754, Tanh) {
+ // Test values mentioned in the EcmaScript spec.
+ EXPECT_THAT(tanh(kQNaN), IsNaN());
+ EXPECT_THAT(tanh(kSNaN), IsNaN());
+ EXPECT_THAT(tanh(kInfinity), 1);
+ EXPECT_THAT(tanh(-kInfinity), -1);
+ EXPECT_EQ(0.0, tanh(0.0));
+ EXPECT_EQ(-0.0, tanh(-0.0));
+}
+
+} // namespace ieee754
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/test/unittests/base/platform/time-unittest.cc b/deps/v8/test/unittests/base/platform/time-unittest.cc
index b3bfbab319..8b81eb90d6 100644
--- a/deps/v8/test/unittests/base/platform/time-unittest.cc
+++ b/deps/v8/test/unittests/base/platform/time-unittest.cc
@@ -15,7 +15,10 @@
#include "src/base/win32-headers.h"
#endif
+#include <vector>
+
#include "src/base/platform/elapsed-timer.h"
+#include "src/base/platform/platform.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
@@ -182,5 +185,77 @@ TEST(TimeTicks, IsMonotonic) {
}
}
+
+#if V8_OS_ANDROID
+#define MAYBE_ThreadNow DISABLED_ThreadNow
+#else
+#define MAYBE_ThreadNow ThreadNow
+#endif
+TEST(ThreadTicks, MAYBE_ThreadNow) {
+ if (ThreadTicks::IsSupported()) {
+ ThreadTicks::WaitUntilInitialized();
+ TimeTicks begin = TimeTicks::Now();
+ ThreadTicks begin_thread = ThreadTicks::Now();
+ // Make sure that ThreadNow value is non-zero.
+ EXPECT_GT(begin_thread, ThreadTicks());
+ // Sleep for 10 milliseconds to get the thread de-scheduled.
+ OS::Sleep(base::TimeDelta::FromMilliseconds(10));
+ ThreadTicks end_thread = ThreadTicks::Now();
+ TimeTicks end = TimeTicks::Now();
+ TimeDelta delta = end - begin;
+ TimeDelta delta_thread = end_thread - begin_thread;
+ // Make sure that some thread time have elapsed.
+ EXPECT_GT(delta_thread.InMicroseconds(), 0);
+ // But the thread time is at least 9ms less than clock time.
+ TimeDelta difference = delta - delta_thread;
+ EXPECT_GE(difference.InMicroseconds(), 9000);
+ }
+}
+
+
+#if V8_OS_WIN
+TEST(TimeTicks, TimerPerformance) {
+ // Verify that various timer mechanisms can always complete quickly.
+ // Note: This is a somewhat arbitrary test.
+ const int kLoops = 10000;
+
+ typedef TimeTicks (*TestFunc)();
+ struct TestCase {
+ TestFunc func;
+ const char *description;
+ };
+ // Cheating a bit here: assumes sizeof(TimeTicks) == sizeof(Time)
+ // in order to create a single test case list.
+ static_assert(sizeof(TimeTicks) == sizeof(Time),
+ "TimeTicks and Time must be the same size");
+ std::vector<TestCase> cases;
+ cases.push_back({reinterpret_cast<TestFunc>(&Time::Now), "Time::Now"});
+ cases.push_back({&TimeTicks::Now, "TimeTicks::Now"});
+
+ if (ThreadTicks::IsSupported()) {
+ ThreadTicks::WaitUntilInitialized();
+ cases.push_back(
+ {reinterpret_cast<TestFunc>(&ThreadTicks::Now), "ThreadTicks::Now"});
+ }
+
+ for (const auto& test_case : cases) {
+ TimeTicks start = TimeTicks::Now();
+ for (int index = 0; index < kLoops; index++)
+ test_case.func();
+ TimeTicks stop = TimeTicks::Now();
+ // Turning off the check for acceptible delays. Without this check,
+ // the test really doesn't do much other than measure. But the
+ // measurements are still useful for testing timers on various platforms.
+ // The reason to remove the check is because the tests run on many
+ // buildbots, some of which are VMs. These machines can run horribly
+ // slow, and there is really no value for checking against a max timer.
+ // const int kMaxTime = 35; // Maximum acceptible milliseconds for test.
+ // EXPECT_LT((stop - start).InMilliseconds(), kMaxTime);
+ printf("%s: %1.2fus per call\n", test_case.description,
+ (stop - start).InMillisecondsF() * 1000 / kLoops);
+ }
+}
+#endif // V8_OS_WIN
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/test/unittests/base/sys-info-unittest.cc b/deps/v8/test/unittests/base/sys-info-unittest.cc
index a760f941f6..a97c08c91c 100644
--- a/deps/v8/test/unittests/base/sys-info-unittest.cc
+++ b/deps/v8/test/unittests/base/sys-info-unittest.cc
@@ -5,12 +5,6 @@
#include "src/base/sys-info.h"
#include "testing/gtest/include/gtest/gtest.h"
-#if V8_OS_NACL
-#define DISABLE_ON_NACL(Name) DISABLED_##Name
-#else
-#define DISABLE_ON_NACL(Name) Name
-#endif
-
namespace v8 {
namespace base {
@@ -18,8 +12,7 @@ TEST(SysInfoTest, NumberOfProcessors) {
EXPECT_LT(0, SysInfo::NumberOfProcessors());
}
-
-TEST(SysInfoTest, DISABLE_ON_NACL(AmountOfPhysicalMemory)) {
+TEST(SysInfoTest, AmountOfPhysicalMemory) {
EXPECT_LT(0, SysInfo::AmountOfPhysicalMemory());
}
diff --git a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-job-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-job-unittest.cc
new file mode 100644
index 0000000000..922ed2f44e
--- /dev/null
+++ b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-job-unittest.cc
@@ -0,0 +1,156 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <memory>
+
+#include "include/v8.h"
+#include "src/api.h"
+#include "src/ast/scopes.h"
+#include "src/compiler-dispatcher/compiler-dispatcher-job.h"
+#include "src/flags.h"
+#include "src/isolate-inl.h"
+#include "src/parsing/parse-info.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+typedef TestWithContext CompilerDispatcherJobTest;
+
+namespace {
+
+const char test_script[] = "(x) { x*x; }";
+
+class ScriptResource : public v8::String::ExternalOneByteStringResource {
+ public:
+ ScriptResource(const char* data, size_t length)
+ : data_(data), length_(length) {}
+ ~ScriptResource() override = default;
+
+ const char* data() const override { return data_; }
+ size_t length() const override { return length_; }
+
+ private:
+ const char* data_;
+ size_t length_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScriptResource);
+};
+
+Handle<JSFunction> CreateFunction(
+ Isolate* isolate, ExternalOneByteString::Resource* maybe_resource) {
+ HandleScope scope(isolate);
+ Handle<String> source;
+ if (maybe_resource) {
+ source = isolate->factory()
+ ->NewExternalStringFromOneByte(maybe_resource)
+ .ToHandleChecked();
+ } else {
+ source = isolate->factory()->NewStringFromAsciiChecked(test_script);
+ }
+ Handle<Script> script = isolate->factory()->NewScript(source);
+ Handle<SharedFunctionInfo> shared = isolate->factory()->NewSharedFunctionInfo(
+ isolate->factory()->NewStringFromAsciiChecked("f"), MaybeHandle<Code>(),
+ false);
+ SharedFunctionInfo::SetScript(shared, script);
+ shared->set_end_position(source->length());
+ Handle<JSFunction> function =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ shared, handle(isolate->context(), isolate));
+ return scope.CloseAndEscape(function);
+}
+
+} // namespace
+
+TEST_F(CompilerDispatcherJobTest, Construct) {
+ std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
+ i_isolate(), CreateFunction(i_isolate(), nullptr), FLAG_stack_size));
+}
+
+TEST_F(CompilerDispatcherJobTest, CanParseOnBackgroundThread) {
+ {
+ std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
+ i_isolate(), CreateFunction(i_isolate(), nullptr), FLAG_stack_size));
+ ASSERT_FALSE(job->can_parse_on_background_thread());
+ }
+ {
+ ScriptResource script(test_script, strlen(test_script));
+ std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
+ i_isolate(), CreateFunction(i_isolate(), &script), FLAG_stack_size));
+ ASSERT_TRUE(job->can_parse_on_background_thread());
+ }
+}
+
+TEST_F(CompilerDispatcherJobTest, StateTransitions) {
+ std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
+ i_isolate(), CreateFunction(i_isolate(), nullptr), FLAG_stack_size));
+
+ ASSERT_TRUE(job->status() == CompileJobStatus::kInitial);
+ job->PrepareToParseOnMainThread();
+ ASSERT_TRUE(job->status() == CompileJobStatus::kReadyToParse);
+ job->Parse();
+ ASSERT_TRUE(job->status() == CompileJobStatus::kParsed);
+ ASSERT_TRUE(job->FinalizeParsingOnMainThread());
+ ASSERT_TRUE(job->status() == CompileJobStatus::kReadyToCompile);
+ job->ResetOnMainThread();
+ ASSERT_TRUE(job->status() == CompileJobStatus::kInitial);
+}
+
+TEST_F(CompilerDispatcherJobTest, SyntaxError) {
+ ScriptResource script("^^^", strlen("^^^"));
+ std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
+ i_isolate(), CreateFunction(i_isolate(), &script), FLAG_stack_size));
+
+ job->PrepareToParseOnMainThread();
+ job->Parse();
+ ASSERT_FALSE(job->FinalizeParsingOnMainThread());
+ ASSERT_TRUE(job->status() == CompileJobStatus::kFailed);
+ ASSERT_TRUE(i_isolate()->has_pending_exception());
+
+ i_isolate()->clear_pending_exception();
+
+ job->ResetOnMainThread();
+ ASSERT_TRUE(job->status() == CompileJobStatus::kInitial);
+}
+
+TEST_F(CompilerDispatcherJobTest, ScopeChain) {
+ const char script[] =
+ "function g() { var g = 1; function f(x) { return x * g }; return f; } "
+ "g();";
+ Handle<JSFunction> f = Handle<JSFunction>::cast(Utils::OpenHandle(
+ *v8::Script::Compile(isolate()->GetCurrentContext(),
+ v8::String::NewFromUtf8(isolate(), script,
+ v8::NewStringType::kNormal)
+ .ToLocalChecked())
+ .ToLocalChecked()
+ ->Run(isolate()->GetCurrentContext())
+ .ToLocalChecked()));
+
+ std::unique_ptr<CompilerDispatcherJob> job(
+ new CompilerDispatcherJob(i_isolate(), f, FLAG_stack_size));
+
+ job->PrepareToParseOnMainThread();
+ job->Parse();
+ ASSERT_TRUE(job->FinalizeParsingOnMainThread());
+ ASSERT_TRUE(job->status() == CompileJobStatus::kReadyToCompile);
+
+ const AstRawString* var_x =
+ job->parse_info_->ast_value_factory()->GetOneByteString("x");
+ Variable* var = job->parse_info_->literal()->scope()->Lookup(var_x);
+ ASSERT_TRUE(var);
+ ASSERT_TRUE(var->IsUnallocated());
+
+ const AstRawString* var_g =
+ job->parse_info_->ast_value_factory()->GetOneByteString("g");
+ var = job->parse_info_->literal()->scope()->Lookup(var_g);
+ ASSERT_TRUE(var);
+ ASSERT_TRUE(var->IsContextSlot());
+
+ job->ResetOnMainThread();
+ ASSERT_TRUE(job->status() == CompileJobStatus::kInitial);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
index 72cfc51d58..6317d91fa9 100644
--- a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
@@ -1392,8 +1392,8 @@ TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_Offset_RI, s[0]->addressing_mode());
ASSERT_EQ(3U, s[0]->InputCount());
- ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
- EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(2)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(2)));
EXPECT_EQ(0U, s[0]->OutputCount());
}
}
@@ -1403,6 +1403,39 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
InstructionSelectorMemoryAccessTest,
::testing::ValuesIn(kMemoryAccesses));
+TEST_F(InstructionSelectorMemoryAccessTest, LoadWithShiftedIndex) {
+ TRACED_FORRANGE(int, immediate_shift, 1, 31) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ MachineType::Int32());
+ Node* const index =
+ m.Word32Shl(m.Parameter(1), m.Int32Constant(immediate_shift));
+ m.Return(m.Load(MachineType::Int32(), m.Parameter(0), index));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmLdr, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+TEST_F(InstructionSelectorMemoryAccessTest, StoreWithShiftedIndex) {
+ TRACED_FORRANGE(int, immediate_shift, 1, 31) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ MachineType::Int32(), MachineType::Int32());
+ Node* const index =
+ m.Word32Shl(m.Parameter(1), m.Int32Constant(immediate_shift));
+ m.Store(MachineRepresentation::kWord32, m.Parameter(0), index,
+ m.Parameter(2), kNoWriteBarrier);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmStr, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ }
+}
// -----------------------------------------------------------------------------
// Conversions.
@@ -1866,36 +1899,6 @@ TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
}
-TEST_F(InstructionSelectorTest, Float32SubWithMinusZero) {
- StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
- Node* const p0 = m.Parameter(0);
- Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
- m.Return(n);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArmVnegF32, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
-}
-
-
-TEST_F(InstructionSelectorTest, Float64SubWithMinusZero) {
- StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
- Node* const p0 = m.Parameter(0);
- Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
- m.Return(n);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArmVnegF64, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
-}
-
-
TEST_F(InstructionSelectorTest, Float32SubWithFloat32Mul) {
StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
MachineType::Float32(), MachineType::Float32());
@@ -1971,6 +1974,250 @@ TEST_F(InstructionSelectorTest, Float64Sqrt) {
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
+// -----------------------------------------------------------------------------
+// Flag-setting instructions.
+
+const Comparison kBinopCmpZeroRightInstructions[] = {
+ {&RawMachineAssembler::Word32Equal, "Word32Equal", kEqual, kNotEqual,
+ kEqual},
+ {&RawMachineAssembler::Word32NotEqual, "Word32NotEqual", kNotEqual, kEqual,
+ kNotEqual},
+ {&RawMachineAssembler::Int32LessThan, "Int32LessThan", kNegative,
+ kPositiveOrZero, kNegative},
+ {&RawMachineAssembler::Int32GreaterThanOrEqual, "Int32GreaterThanOrEqual",
+ kPositiveOrZero, kNegative, kPositiveOrZero},
+ {&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
+ kEqual, kNotEqual, kEqual},
+ {&RawMachineAssembler::Uint32GreaterThan, "Uint32GreaterThan", kNotEqual,
+ kEqual, kNotEqual}};
+
+const Comparison kBinopCmpZeroLeftInstructions[] = {
+ {&RawMachineAssembler::Word32Equal, "Word32Equal", kEqual, kNotEqual,
+ kEqual},
+ {&RawMachineAssembler::Word32NotEqual, "Word32NotEqual", kNotEqual, kEqual,
+ kNotEqual},
+ {&RawMachineAssembler::Int32GreaterThan, "Int32GreaterThan", kNegative,
+ kPositiveOrZero, kNegative},
+ {&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
+ kPositiveOrZero, kNegative, kPositiveOrZero},
+ {&RawMachineAssembler::Uint32GreaterThanOrEqual, "Uint32GreaterThanOrEqual",
+ kEqual, kNotEqual, kEqual},
+ {&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kNotEqual, kEqual,
+ kNotEqual}};
+
+struct FlagSettingInst {
+ Constructor constructor;
+ const char* constructor_name;
+ ArchOpcode arch_opcode;
+ ArchOpcode no_output_opcode;
+};
+
+std::ostream& operator<<(std::ostream& os, const FlagSettingInst& inst) {
+ return os << inst.constructor_name;
+}
+
+const FlagSettingInst kFlagSettingInstructions[] = {
+ {&RawMachineAssembler::Int32Add, "Int32Add", kArmAdd, kArmCmn},
+ {&RawMachineAssembler::Word32And, "Word32And", kArmAnd, kArmTst},
+ {&RawMachineAssembler::Word32Or, "Word32Or", kArmOrr, kArmOrr},
+ {&RawMachineAssembler::Word32Xor, "Word32Xor", kArmEor, kArmTeq}};
+
+typedef InstructionSelectorTestWithParam<FlagSettingInst>
+ InstructionSelectorFlagSettingTest;
+
+TEST_P(InstructionSelectorFlagSettingTest, CmpZeroRight) {
+ const FlagSettingInst inst = GetParam();
+ // Binop with single user : a cmp instruction.
+ TRACED_FOREACH(Comparison, cmp, kBinopCmpZeroRightInstructions) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
+ Node* binop = (m.*inst.constructor)(m.Parameter(0), m.Parameter(1));
+ Node* comp = (m.*cmp.constructor)(binop, m.Int32Constant(0));
+ m.Branch(comp, &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ ASSERT_EQ(4U, s[0]->InputCount()); // The labels are also inputs.
+ EXPECT_EQ(inst.no_output_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(m.Parameter(1)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(cmp.flags_condition, s[0]->flags_condition());
+ }
+}
+
+TEST_P(InstructionSelectorFlagSettingTest, CmpZeroLeft) {
+ const FlagSettingInst inst = GetParam();
+ // Test a cmp with zero on the left-hand side.
+ TRACED_FOREACH(Comparison, cmp, kBinopCmpZeroLeftInstructions) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
+ Node* binop = (m.*inst.constructor)(m.Parameter(0), m.Parameter(1));
+ Node* comp = (m.*cmp.constructor)(m.Int32Constant(0), binop);
+ m.Branch(comp, &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ ASSERT_EQ(4U, s[0]->InputCount()); // The labels are also inputs.
+ EXPECT_EQ(inst.no_output_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(m.Parameter(1)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(cmp.flags_condition, s[0]->flags_condition());
+ }
+}
+
+TEST_P(InstructionSelectorFlagSettingTest, CmpZeroOnlyUserInBasicBlock) {
+ const FlagSettingInst inst = GetParam();
+ // Binop with additional users, but in a different basic block.
+ TRACED_FOREACH(Comparison, cmp, kBinopCmpZeroRightInstructions) {
+ // We don't optimise this case at the moment.
+ if (cmp.flags_condition == kEqual || cmp.flags_condition == kNotEqual) {
+ continue;
+ }
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
+ Node* binop = (m.*inst.constructor)(m.Parameter(0), m.Parameter(1));
+ Node* comp = (m.*cmp.constructor)(binop, m.Int32Constant(0));
+ m.Branch(comp, &a, &b);
+ m.Bind(&a);
+ m.Return(binop);
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ ASSERT_EQ(4U, s[0]->InputCount()); // The labels are also inputs.
+ EXPECT_EQ(inst.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(m.Parameter(1)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(cmp.flags_condition, s[0]->flags_condition());
+ }
+}
+
+TEST_P(InstructionSelectorFlagSettingTest, ShiftedOperand) {
+ const FlagSettingInst inst = GetParam();
+ // Like the test above, but with a shifted input to the binary operator.
+ TRACED_FOREACH(Comparison, cmp, kBinopCmpZeroRightInstructions) {
+ // We don't optimise this case at the moment.
+ if (cmp.flags_condition == kEqual || cmp.flags_condition == kNotEqual) {
+ continue;
+ }
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
+ Node* imm = m.Int32Constant(5);
+ Node* shift = m.Word32Shl(m.Parameter(1), imm);
+ Node* binop = (m.*inst.constructor)(m.Parameter(0), shift);
+ Node* comp = (m.*cmp.constructor)(binop, m.Int32Constant(0));
+ m.Branch(comp, &a, &b);
+ m.Bind(&a);
+ m.Return(binop);
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ ASSERT_EQ(5U, s[0]->InputCount()); // The labels are also inputs.
+ EXPECT_EQ(inst.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(m.Parameter(1)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(5, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(cmp.flags_condition, s[0]->flags_condition());
+ }
+}
+
+TEST_P(InstructionSelectorFlagSettingTest, UsersInSameBasicBlock) {
+ const FlagSettingInst inst = GetParam();
+ // Binop with additional users, in the same basic block. We need to make sure
+ // we don't try to optimise this case.
+ TRACED_FOREACH(Comparison, cmp, kComparisons) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
+ Node* binop = (m.*inst.constructor)(m.Parameter(0), m.Parameter(1));
+ Node* mul = m.Int32Mul(m.Parameter(0), binop);
+ Node* comp = (m.*cmp.constructor)(binop, m.Int32Constant(0));
+ m.Branch(comp, &a, &b);
+ m.Bind(&a);
+ m.Return(mul);
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(3U, s.size());
+ EXPECT_EQ(inst.arch_opcode, s[0]->arch_opcode());
+ EXPECT_NE(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kArmMul, s[1]->arch_opcode());
+ EXPECT_EQ(cmp.flags_condition == kEqual ? kArmTst : kArmCmp,
+ s[2]->arch_opcode());
+ EXPECT_EQ(kFlags_branch, s[2]->flags_mode());
+ EXPECT_EQ(cmp.flags_condition, s[2]->flags_condition());
+ }
+}
+
+TEST_P(InstructionSelectorFlagSettingTest, CommuteImmediate) {
+ const FlagSettingInst inst = GetParam();
+ // Immediate on left hand side of the binary operator.
+ TRACED_FOREACH(Comparison, cmp, kBinopCmpZeroRightInstructions) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
+ Node* imm = m.Int32Constant(3);
+ Node* binop = (m.*inst.constructor)(imm, m.Parameter(0));
+ Node* comp = (m.*cmp.constructor)(binop, m.Int32Constant(0));
+ m.Branch(comp, &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ ASSERT_EQ(4U, s[0]->InputCount()); // The labels are also inputs.
+ EXPECT_EQ(inst.no_output_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(3, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(cmp.flags_condition, s[0]->flags_condition());
+ }
+}
+
+TEST_P(InstructionSelectorFlagSettingTest, CommuteShift) {
+ const FlagSettingInst inst = GetParam();
+ // Left-hand side operand shifted by immediate.
+ TRACED_FOREACH(Comparison, cmp, kBinopCmpZeroRightInstructions) {
+ TRACED_FOREACH(Shift, shift, kShifts) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* imm = m.Int32Constant(5);
+ Node* shifted_operand = (m.*shift.constructor)(m.Parameter(0), imm);
+ Node* binop = (m.*inst.constructor)(shifted_operand, m.Parameter(1));
+ Node* comp = (m.*cmp.constructor)(binop, m.Int32Constant(0));
+ m.Return(comp);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(inst.no_output_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(5, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(inst.arch_opcode == kArmOrr ? 2U : 1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.flags_condition, s[0]->flags_condition());
+ }
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorFlagSettingTest,
+ ::testing::ValuesIn(kFlagSettingInstructions));
// -----------------------------------------------------------------------------
// Miscellaneous.
@@ -2228,7 +2475,7 @@ TEST_F(InstructionSelectorTest, Int32SubWithInt32MulForMLS) {
MachineType::Int32(), MachineType::Int32());
m.Return(
m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
- Stream s = m.Build(MLS);
+ Stream s = m.Build(ARMv7);
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArmMls, s[0]->arch_opcode());
EXPECT_EQ(1U, s[0]->OutputCount());
@@ -2324,7 +2571,7 @@ TEST_F(InstructionSelectorTest, Int32ModWithParametersForSUDIVAndMLS) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
MachineType::Int32());
m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
- Stream s = m.Build(MLS, SUDIV);
+ Stream s = m.Build(ARMv7, SUDIV);
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kArmSdiv, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->OutputCount());
@@ -2530,7 +2777,7 @@ TEST_F(InstructionSelectorTest, Uint32ModWithParametersForSUDIVAndMLS) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
MachineType::Int32());
m.Return(m.Uint32Mod(m.Parameter(0), m.Parameter(1)));
- Stream s = m.Build(MLS, SUDIV);
+ Stream s = m.Build(ARMv7, SUDIV);
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kArmUdiv, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->OutputCount());
@@ -2954,6 +3201,70 @@ TEST_F(InstructionSelectorTest, Word32Clz) {
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
+TEST_F(InstructionSelectorTest, Float64Max) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n = m.Float64Max(p0, p1);
+ m.Return(n);
+ Stream s = m.Build(ARMv8);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmFloat64Max, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+TEST_F(InstructionSelectorTest, Float64Min) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n = m.Float64Min(p0, p1);
+ m.Return(n);
+ Stream s = m.Build(ARMv8);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmFloat64Min, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+TEST_F(InstructionSelectorTest, Float32Neg) {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ // Don't use m.Float32Neg() as that generates an explicit sub.
+ Node* const n = m.AddNode(m.machine()->Float32Neg(), m.Parameter(0));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVnegF32, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+TEST_F(InstructionSelectorTest, Float64Neg) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ // Don't use m.Float64Neg() as that generates an explicit sub.
+ Node* const n = m.AddNode(m.machine()->Float64Neg(), m.Parameter(0));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVnegF64, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
index 73532aab2a..6ca5e5e684 100644
--- a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -184,8 +184,11 @@ const MachInst2 kOvfAddSubInstructions[] = {
{&RawMachineAssembler::Int32AddWithOverflow, "Int32AddWithOverflow",
kArm64Add32, MachineType::Int32()},
{&RawMachineAssembler::Int32SubWithOverflow, "Int32SubWithOverflow",
- kArm64Sub32, MachineType::Int32()}};
-
+ kArm64Sub32, MachineType::Int32()},
+ {&RawMachineAssembler::Int64AddWithOverflow, "Int64AddWithOverflow",
+ kArm64Add, MachineType::Int64()},
+ {&RawMachineAssembler::Int64SubWithOverflow, "Int64SubWithOverflow",
+ kArm64Sub, MachineType::Int64()}};
// ARM64 shift instructions.
const Shift kShiftInstructions[] = {
@@ -311,7 +314,7 @@ const Conversion kConversionInstructions[] = {
kArm64Mov32, MachineType::Uint64()},
MachineType::Uint32()},
{{&RawMachineAssembler::TruncateInt64ToInt32, "TruncateInt64ToInt32",
- kArm64Mov32, MachineType::Int32()},
+ kArchNop, MachineType::Int32()},
MachineType::Int64()},
{{&RawMachineAssembler::ChangeInt32ToFloat64, "ChangeInt32ToFloat64",
kArm64Int32ToFloat64, MachineType::Float64()},
@@ -326,6 +329,52 @@ const Conversion kConversionInstructions[] = {
kArm64Float64ToUint32, MachineType::Uint32()},
MachineType::Float64()}};
+// ARM64 instructions that clear the top 32 bits of the destination.
+const MachInst2 kCanElideChangeUint32ToUint64[] = {
+ {&RawMachineAssembler::Word32And, "Word32And", kArm64And32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Word32Or, "Word32Or", kArm64Or32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Word32Xor, "Word32Xor", kArm64Eor32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Word32Shl, "Word32Shl", kArm64Lsl32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Word32Shr, "Word32Shr", kArm64Lsr32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Word32Sar, "Word32Sar", kArm64Asr32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Word32Ror, "Word32Ror", kArm64Ror32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Word32Equal, "Word32Equal", kArm64Cmp32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Int32Add, "Int32Add", kArm64Add32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int32AddWithOverflow, "Int32AddWithOverflow",
+ kArm64Add32, MachineType::Int32()},
+ {&RawMachineAssembler::Int32Sub, "Int32Sub", kArm64Sub32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int32SubWithOverflow, "Int32SubWithOverflow",
+ kArm64Sub32, MachineType::Int32()},
+ {&RawMachineAssembler::Int32Mul, "Int32Mul", kArm64Mul32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int32Div, "Int32Div", kArm64Idiv32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int32Mod, "Int32Mod", kArm64Imod32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int32LessThan, "Int32LessThan", kArm64Cmp32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
+ kArm64Cmp32, MachineType::Int32()},
+ {&RawMachineAssembler::Uint32Div, "Uint32Div", kArm64Udiv32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kArm64Cmp32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
+ kArm64Cmp32, MachineType::Uint32()},
+ {&RawMachineAssembler::Uint32Mod, "Uint32Mod", kArm64Umod32,
+ MachineType::Uint32()},
+};
+
} // namespace
@@ -1132,7 +1181,6 @@ TEST_F(InstructionSelectorTest, Word32AndBranchWithOneBitMaskOnRight) {
}
}
-
TEST_F(InstructionSelectorTest, Word32AndBranchWithOneBitMaskOnLeft) {
TRACED_FORRANGE(int, bit, 0, 31) {
uint32_t mask = 1 << bit;
@@ -1215,6 +1263,91 @@ TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnLeft) {
}
}
+TEST_F(InstructionSelectorTest, Word32EqualZeroAndBranchWithOneBitMask) {
+ TRACED_FORRANGE(int, bit, 0, 31) {
+ uint32_t mask = 1 << bit;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
+ m.Branch(m.Word32Equal(m.Word32And(m.Int32Constant(mask), m.Parameter(0)),
+ m.Int32Constant(0)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64TestAndBranch32, s[0]->arch_opcode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(bit, s.ToInt32(s[0]->InputAt(1)));
+ }
+
+ TRACED_FORRANGE(int, bit, 0, 31) {
+ uint32_t mask = 1 << bit;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
+ m.Branch(
+ m.Word32NotEqual(m.Word32And(m.Int32Constant(mask), m.Parameter(0)),
+ m.Int32Constant(0)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64TestAndBranch32, s[0]->arch_opcode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(bit, s.ToInt32(s[0]->InputAt(1)));
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word64EqualZeroAndBranchWithOneBitMask) {
+ TRACED_FORRANGE(int, bit, 0, 63) {
+ uint64_t mask = V8_UINT64_C(1) << bit;
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ RawMachineLabel a, b;
+ m.Branch(m.Word64Equal(m.Word64And(m.Int64Constant(mask), m.Parameter(0)),
+ m.Int64Constant(0)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int64Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int64Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64TestAndBranch, s[0]->arch_opcode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
+ }
+
+ TRACED_FORRANGE(int, bit, 0, 63) {
+ uint64_t mask = V8_UINT64_C(1) << bit;
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ RawMachineLabel a, b;
+ m.Branch(
+ m.Word64NotEqual(m.Word64And(m.Int64Constant(mask), m.Parameter(0)),
+ m.Int64Constant(0)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int64Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int64Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64TestAndBranch, s[0]->arch_opcode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
+ }
+}
TEST_F(InstructionSelectorTest, CompareAgainstZeroAndBranch) {
{
@@ -1252,6 +1385,75 @@ TEST_F(InstructionSelectorTest, CompareAgainstZeroAndBranch) {
}
}
+TEST_F(InstructionSelectorTest, EqualZeroAndBranch) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
+ Node* p0 = m.Parameter(0);
+ m.Branch(m.Word32Equal(p0, m.Int32Constant(0)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64CompareAndBranch32, s[0]->arch_opcode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ }
+
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
+ Node* p0 = m.Parameter(0);
+ m.Branch(m.Word32NotEqual(p0, m.Int32Constant(0)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64CompareAndBranch32, s[0]->arch_opcode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ }
+
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ RawMachineLabel a, b;
+ Node* p0 = m.Parameter(0);
+ m.Branch(m.Word64Equal(p0, m.Int64Constant(0)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int64Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int64Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64CompareAndBranch, s[0]->arch_opcode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ }
+
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ RawMachineLabel a, b;
+ Node* p0 = m.Parameter(0);
+ m.Branch(m.Word64NotEqual(p0, m.Int64Constant(0)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int64Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int64Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64CompareAndBranch, s[0]->arch_opcode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ }
+}
// -----------------------------------------------------------------------------
// Add and subtract instructions with overflow.
@@ -1407,6 +1609,29 @@ TEST_P(InstructionSelectorOvfAddSubTest, BranchWithImmediateOnRight) {
}
}
+TEST_P(InstructionSelectorOvfAddSubTest, RORShift) {
+ // ADD and SUB do not support ROR shifts, make sure we do not try
+ // to merge them into the ADD/SUB instruction.
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ auto rotate = &RawMachineAssembler::Word64Ror;
+ ArchOpcode rotate_opcode = kArm64Ror;
+ if (type == MachineType::Int32()) {
+ rotate = &RawMachineAssembler::Word32Ror;
+ rotate_opcode = kArm64Ror32;
+ }
+ TRACED_FORRANGE(int32_t, imm, -32, 63) {
+ StreamBuilder m(this, type, type, type);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* r = (m.*rotate)(p1, m.Int32Constant(imm));
+ m.Return((m.*dpi.constructor)(p0, r));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(rotate_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(dpi.arch_opcode, s[1]->arch_opcode());
+ }
+}
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
InstructionSelectorOvfAddSubTest,
@@ -1574,12 +1799,11 @@ TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Sar) {
m.Return(t);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64Lsr, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64Asr, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(32, s.ToInt64(s[0]->InputAt(1)));
ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
}
@@ -1596,7 +1820,6 @@ TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Shr) {
EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(x, s.ToInt64(s[0]->InputAt(1)));
ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
}
}
@@ -2094,6 +2317,10 @@ TEST_P(InstructionSelectorConversionTest, Parameter) {
StreamBuilder m(this, conv.mi.machine_type, conv.src_machine_type);
m.Return((m.*conv.mi.constructor)(m.Parameter(0)));
Stream s = m.Build();
+ if (conv.mi.arch_opcode == kArchNop) {
+ ASSERT_EQ(0U, s.size());
+ return;
+ }
ASSERT_EQ(1U, s.size());
EXPECT_EQ(conv.mi.arch_opcode, s[0]->arch_opcode());
EXPECT_EQ(1U, s[0]->InputCount());
@@ -2105,6 +2332,154 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
InstructionSelectorConversionTest,
::testing::ValuesIn(kConversionInstructions));
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorElidedChangeUint32ToUint64Test;
+
+TEST_P(InstructionSelectorElidedChangeUint32ToUint64Test, Parameter) {
+ const MachInst2 binop = GetParam();
+ StreamBuilder m(this, MachineType::Uint64(), binop.machine_type,
+ binop.machine_type);
+ m.Return(m.ChangeUint32ToUint64(
+ (m.*binop.constructor)(m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ // Make sure the `ChangeUint32ToUint64` node turned into a no-op.
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(binop.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorElidedChangeUint32ToUint64Test,
+ ::testing::ValuesIn(kCanElideChangeUint32ToUint64));
+
+TEST_F(InstructionSelectorTest, ChangeUint32ToUint64AfterLoad) {
+ // For each case, make sure the `ChangeUint32ToUint64` node turned into a
+ // no-op.
+
+ // Ldrb
+ {
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeUint32ToUint64(
+ m.Load(MachineType::Uint8(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ldrb, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // Ldrh
+ {
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeUint32ToUint64(
+ m.Load(MachineType::Uint16(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ldrh, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // LdrW
+ {
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeUint32ToUint64(
+ m.Load(MachineType::Uint32(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64LdrW, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+TEST_F(InstructionSelectorTest, ChangeInt32ToInt64AfterLoad) {
+ // For each case, test that the conversion is merged into the load
+ // operation.
+ // ChangeInt32ToInt64(Load_Uint8) -> Ldrb
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Uint8(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ldrb, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // ChangeInt32ToInt64(Load_Int8) -> Ldrsb
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Int8(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ldrsb, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // ChangeInt32ToInt64(Load_Uint16) -> Ldrh
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Uint16(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ldrh, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // ChangeInt32ToInt64(Load_Int16) -> Ldrsh
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Int16(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ldrsh, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // ChangeInt32ToInt64(Load_Uint32) -> Ldrsw
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Uint32(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ldrsw, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // ChangeInt32ToInt64(Load_Int32) -> Ldrsw
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Int32(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ldrsw, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
// -----------------------------------------------------------------------------
// Memory access instructions.
@@ -2244,12 +2619,131 @@ TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
ASSERT_EQ(3U, s[0]->InputCount());
- ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
- EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(2)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ }
+}
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreZero) {
+ const MemoryAccess memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer());
+ m.Store(memacc.type.representation(), m.Parameter(0),
+ m.Int32Constant(index), m.Int32Constant(0), kNoWriteBarrier);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(2)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(2)));
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(0)->kind());
+ EXPECT_EQ(0, s.ToInt64(s[0]->InputAt(0)));
EXPECT_EQ(0U, s[0]->OutputCount());
}
}
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithShiftedIndex) {
+ const MemoryAccess memacc = GetParam();
+ TRACED_FORRANGE(int, immediate_shift, 0, 4) {
+ // 32 bit shift
+ {
+ StreamBuilder m(this, memacc.type, MachineType::Pointer(),
+ MachineType::Int32());
+ Node* const index =
+ m.Word32Shl(m.Parameter(1), m.Int32Constant(immediate_shift));
+ m.Return(m.Load(memacc.type, m.Parameter(0), index));
+ Stream s = m.Build();
+ if (immediate_shift == ElementSizeLog2Of(memacc.type.representation())) {
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ } else {
+ // Make sure we haven't merged the shift into the load instruction.
+ ASSERT_NE(1U, s.size());
+ EXPECT_NE(memacc.ldr_opcode, s[0]->arch_opcode());
+ EXPECT_NE(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ }
+ }
+ // 64 bit shift
+ {
+ StreamBuilder m(this, memacc.type, MachineType::Pointer(),
+ MachineType::Int64());
+ Node* const index =
+ m.Word64Shl(m.Parameter(1), m.Int64Constant(immediate_shift));
+ m.Return(m.Load(memacc.type, m.Parameter(0), index));
+ Stream s = m.Build();
+ if (immediate_shift == ElementSizeLog2Of(memacc.type.representation())) {
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ } else {
+ // Make sure we haven't merged the shift into the load instruction.
+ ASSERT_NE(1U, s.size());
+ EXPECT_NE(memacc.ldr_opcode, s[0]->arch_opcode());
+ EXPECT_NE(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ }
+ }
+ }
+}
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithShiftedIndex) {
+ const MemoryAccess memacc = GetParam();
+ TRACED_FORRANGE(int, immediate_shift, 0, 4) {
+ // 32 bit shift
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ MachineType::Int32(), memacc.type);
+ Node* const index =
+ m.Word32Shl(m.Parameter(1), m.Int32Constant(immediate_shift));
+ m.Store(memacc.type.representation(), m.Parameter(0), index,
+ m.Parameter(2), kNoWriteBarrier);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ if (immediate_shift == ElementSizeLog2Of(memacc.type.representation())) {
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ } else {
+ // Make sure we haven't merged the shift into the store instruction.
+ ASSERT_NE(1U, s.size());
+ EXPECT_NE(memacc.str_opcode, s[0]->arch_opcode());
+ EXPECT_NE(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ }
+ }
+ // 64 bit shift
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int64(), memacc.type);
+ Node* const index =
+ m.Word64Shl(m.Parameter(1), m.Int64Constant(immediate_shift));
+ m.Store(memacc.type.representation(), m.Parameter(0), index,
+ m.Parameter(2), kNoWriteBarrier);
+ m.Return(m.Int64Constant(0));
+ Stream s = m.Build();
+ if (immediate_shift == ElementSizeLog2Of(memacc.type.representation())) {
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ } else {
+ // Make sure we haven't merged the shift into the store instruction.
+ ASSERT_NE(1U, s.size());
+ EXPECT_NE(memacc.str_opcode, s[0]->arch_opcode());
+ EXPECT_NE(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ }
+ }
+ }
+}
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
InstructionSelectorMemoryAccessTest,
@@ -2616,6 +3110,7 @@ namespace {
struct IntegerCmp {
MachInst2 mi;
FlagsCondition cond;
+ FlagsCondition commuted_cond;
};
@@ -2628,19 +3123,24 @@ std::ostream& operator<<(std::ostream& os, const IntegerCmp& cmp) {
const IntegerCmp kIntegerCmpInstructions[] = {
{{&RawMachineAssembler::Word32Equal, "Word32Equal", kArm64Cmp32,
MachineType::Int32()},
+ kEqual,
kEqual},
{{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kArm64Cmp32,
MachineType::Int32()},
- kSignedLessThan},
+ kSignedLessThan,
+ kSignedGreaterThan},
{{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
kArm64Cmp32, MachineType::Int32()},
- kSignedLessThanOrEqual},
+ kSignedLessThanOrEqual,
+ kSignedGreaterThanOrEqual},
{{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kArm64Cmp32,
MachineType::Uint32()},
- kUnsignedLessThan},
+ kUnsignedLessThan,
+ kUnsignedGreaterThan},
{{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
kArm64Cmp32, MachineType::Uint32()},
- kUnsignedLessThanOrEqual}};
+ kUnsignedLessThanOrEqual,
+ kUnsignedGreaterThanOrEqual}};
} // namespace
@@ -2677,6 +3177,473 @@ TEST_F(InstructionSelectorTest, Word32CompareNegateWithWord32Shift) {
}
}
+TEST_F(InstructionSelectorTest, CmpWithImmediateOnLeft) {
+ TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ // kEqual and kNotEqual trigger the cbz/cbnz optimization, which
+ // is tested elsewhere.
+ if (cmp.cond == kEqual || cmp.cond == kNotEqual) continue;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ RawMachineLabel a, b;
+ m.Branch((m.*cmp.mi.constructor)(m.Int32Constant(imm), p0), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
+ ASSERT_LE(2U, s[0]->InputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(cmp.commuted_cond, s[0]->flags_condition());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ }
+ }
+}
+
+TEST_F(InstructionSelectorTest, CmnWithImmediateOnLeft) {
+ TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ // kEqual and kNotEqual trigger the cbz/cbnz optimization, which
+ // is tested elsewhere.
+ if (cmp.cond == kEqual || cmp.cond == kNotEqual) continue;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* sub = m.Int32Sub(m.Int32Constant(0), m.Parameter(0));
+ RawMachineLabel a, b;
+ m.Branch((m.*cmp.mi.constructor)(m.Int32Constant(imm), sub), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmn32, s[0]->arch_opcode());
+ ASSERT_LE(2U, s[0]->InputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ }
+ }
+}
+
+TEST_F(InstructionSelectorTest, CmpSignedExtendByteOnLeft) {
+ TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* extend = m.Word32Sar(m.Word32Shl(m.Parameter(0), m.Int32Constant(24)),
+ m.Int32Constant(24));
+ m.Return((m.*cmp.mi.constructor)(extend, m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.commuted_cond, s[0]->flags_condition());
+ EXPECT_EQ(kMode_Operand2_R_SXTB, s[0]->addressing_mode());
+ }
+}
+
+TEST_F(InstructionSelectorTest, CmnSignedExtendByteOnLeft) {
+ TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* sub = m.Int32Sub(m.Int32Constant(0), m.Parameter(0));
+ Node* extend = m.Word32Sar(m.Word32Shl(m.Parameter(0), m.Int32Constant(24)),
+ m.Int32Constant(24));
+ m.Return((m.*cmp.mi.constructor)(extend, sub));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmn32, s[0]->arch_opcode());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+ EXPECT_EQ(kMode_Operand2_R_SXTB, s[0]->addressing_mode());
+ }
+}
+
+TEST_F(InstructionSelectorTest, CmpShiftByImmediateOnLeft) {
+ TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
+ TRACED_FOREACH(Shift, shift, kShiftInstructions) {
+ // Only test relevant shifted operands.
+ if (shift.mi.machine_type != MachineType::Int32()) continue;
+
+ // The available shift operand range is `0 <= imm < 32`, but we also test
+ // that immediates outside this range are handled properly (modulo-32).
+ TRACED_FORRANGE(int, imm, -32, 63) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ m.Return((m.*cmp.mi.constructor)(
+ (m.*shift.mi.constructor)(m.Parameter(1), m.Int32Constant(imm)),
+ m.Parameter(0)));
+ Stream s = m.Build();
+ // Cmp does not support ROR shifts.
+ if (shift.mi.arch_opcode == kArm64Ror32) {
+ ASSERT_EQ(2U, s.size());
+ continue;
+ }
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
+ EXPECT_EQ(shift.mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.commuted_cond, s[0]->flags_condition());
+ }
+ }
+ }
+}
+
+TEST_F(InstructionSelectorTest, CmnShiftByImmediateOnLeft) {
+ TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
+ TRACED_FOREACH(Shift, shift, kShiftInstructions) {
+ // Only test relevant shifted operands.
+ if (shift.mi.machine_type != MachineType::Int32()) continue;
+
+ // The available shift operand range is `0 <= imm < 32`, but we also test
+ // that immediates outside this range are handled properly (modulo-32).
+ TRACED_FORRANGE(int, imm, -32, 63) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* sub = m.Int32Sub(m.Int32Constant(0), m.Parameter(0));
+ m.Return((m.*cmp.mi.constructor)(
+ (m.*shift.mi.constructor)(m.Parameter(1), m.Int32Constant(imm)),
+ sub));
+ Stream s = m.Build();
+ // Cmn does not support ROR shifts.
+ if (shift.mi.arch_opcode == kArm64Ror32) {
+ ASSERT_EQ(2U, s.size());
+ continue;
+ }
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmn32, s[0]->arch_opcode());
+ EXPECT_EQ(shift.mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+ }
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Flag-setting add and and instructions.
+
+const IntegerCmp kBinopCmpZeroRightInstructions[] = {
+ {{&RawMachineAssembler::Word32Equal, "Word32Equal", kArm64Cmp32,
+ MachineType::Int32()},
+ kEqual,
+ kEqual},
+ {{&RawMachineAssembler::Word32NotEqual, "Word32NotEqual", kArm64Cmp32,
+ MachineType::Int32()},
+ kNotEqual,
+ kNotEqual},
+ {{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kArm64Cmp32,
+ MachineType::Int32()},
+ kNegative,
+ kNegative},
+ {{&RawMachineAssembler::Int32GreaterThanOrEqual, "Int32GreaterThanOrEqual",
+ kArm64Cmp32, MachineType::Int32()},
+ kPositiveOrZero,
+ kPositiveOrZero},
+ {{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
+ kArm64Cmp32, MachineType::Int32()},
+ kEqual,
+ kEqual},
+ {{&RawMachineAssembler::Uint32GreaterThan, "Uint32GreaterThan", kArm64Cmp32,
+ MachineType::Int32()},
+ kNotEqual,
+ kNotEqual}};
+
+const IntegerCmp kBinopCmpZeroLeftInstructions[] = {
+ {{&RawMachineAssembler::Word32Equal, "Word32Equal", kArm64Cmp32,
+ MachineType::Int32()},
+ kEqual,
+ kEqual},
+ {{&RawMachineAssembler::Word32NotEqual, "Word32NotEqual", kArm64Cmp32,
+ MachineType::Int32()},
+ kNotEqual,
+ kNotEqual},
+ {{&RawMachineAssembler::Int32GreaterThan, "Int32GreaterThan", kArm64Cmp32,
+ MachineType::Int32()},
+ kNegative,
+ kNegative},
+ {{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
+ kArm64Cmp32, MachineType::Int32()},
+ kPositiveOrZero,
+ kPositiveOrZero},
+ {{&RawMachineAssembler::Uint32GreaterThanOrEqual,
+ "Uint32GreaterThanOrEqual", kArm64Cmp32, MachineType::Int32()},
+ kEqual,
+ kEqual},
+ {{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kArm64Cmp32,
+ MachineType::Int32()},
+ kNotEqual,
+ kNotEqual}};
+
+struct FlagSettingInst {
+ MachInst2 mi;
+ ArchOpcode no_output_opcode;
+};
+
+std::ostream& operator<<(std::ostream& os, const FlagSettingInst& inst) {
+ return os << inst.mi.constructor_name;
+}
+
+const FlagSettingInst kFlagSettingInstructions[] = {
+ {{&RawMachineAssembler::Int32Add, "Int32Add", kArm64Add32,
+ MachineType::Int32()},
+ kArm64Cmn32},
+ {{&RawMachineAssembler::Word32And, "Word32And", kArm64And32,
+ MachineType::Int32()},
+ kArm64Tst32}};
+
+typedef InstructionSelectorTestWithParam<FlagSettingInst>
+ InstructionSelectorFlagSettingTest;
+
+TEST_P(InstructionSelectorFlagSettingTest, CmpZeroRight) {
+ const FlagSettingInst inst = GetParam();
+ // Add with single user : a cmp instruction.
+ TRACED_FOREACH(IntegerCmp, cmp, kBinopCmpZeroRightInstructions) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
+ Node* binop = (m.*inst.mi.constructor)(m.Parameter(0), m.Parameter(1));
+ Node* comp = (m.*cmp.mi.constructor)(binop, m.Int32Constant(0));
+ m.Branch(comp, &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ ASSERT_EQ(4U, s[0]->InputCount()); // The labels are also inputs.
+ EXPECT_EQ(inst.no_output_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(m.Parameter(1)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+ }
+}
+
+TEST_P(InstructionSelectorFlagSettingTest, CmpZeroLeft) {
+ const FlagSettingInst inst = GetParam();
+ // Test a cmp with zero on the left-hand side.
+ TRACED_FOREACH(IntegerCmp, cmp, kBinopCmpZeroLeftInstructions) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
+ Node* binop = (m.*inst.mi.constructor)(m.Parameter(0), m.Parameter(1));
+ Node* comp = (m.*cmp.mi.constructor)(m.Int32Constant(0), binop);
+ m.Branch(comp, &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ ASSERT_EQ(4U, s[0]->InputCount()); // The labels are also inputs.
+ EXPECT_EQ(inst.no_output_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(m.Parameter(1)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+ }
+}
+
+TEST_P(InstructionSelectorFlagSettingTest, CmpZeroOnlyUserInBasicBlock) {
+ const FlagSettingInst inst = GetParam();
+ // Binop with additional users, but in a different basic block.
+ TRACED_FOREACH(IntegerCmp, cmp, kBinopCmpZeroRightInstructions) {
+ // For kEqual and kNotEqual, we generate a cbz or cbnz.
+ if (cmp.cond == kEqual || cmp.cond == kNotEqual) continue;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
+ Node* binop = (m.*inst.mi.constructor)(m.Parameter(0), m.Parameter(1));
+ Node* comp = (m.*cmp.mi.constructor)(binop, m.Int32Constant(0));
+ m.Branch(comp, &a, &b);
+ m.Bind(&a);
+ m.Return(binop);
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ ASSERT_EQ(4U, s[0]->InputCount()); // The labels are also inputs.
+ EXPECT_EQ(inst.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(m.Parameter(1)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+ }
+}
+
+TEST_P(InstructionSelectorFlagSettingTest, ShiftedOperand) {
+ const FlagSettingInst inst = GetParam();
+ // Like the test above, but with a shifted input to the binary operator.
+ TRACED_FOREACH(IntegerCmp, cmp, kBinopCmpZeroRightInstructions) {
+ // For kEqual and kNotEqual, we generate a cbz or cbnz.
+ if (cmp.cond == kEqual || cmp.cond == kNotEqual) continue;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
+ Node* imm = m.Int32Constant(5);
+ Node* shift = m.Word32Shl(m.Parameter(1), imm);
+ Node* binop = (m.*inst.mi.constructor)(m.Parameter(0), shift);
+ Node* comp = (m.*cmp.mi.constructor)(binop, m.Int32Constant(0));
+ m.Branch(comp, &a, &b);
+ m.Bind(&a);
+ m.Return(binop);
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ ASSERT_EQ(5U, s[0]->InputCount()); // The labels are also inputs.
+ EXPECT_EQ(inst.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(m.Parameter(1)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(5, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+ }
+}
+
+TEST_P(InstructionSelectorFlagSettingTest, UsersInSameBasicBlock) {
+ const FlagSettingInst inst = GetParam();
+ // Binop with additional users, in the same basic block. We need to make sure
+ // we don't try to optimise this case.
+ TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
+ Node* binop = (m.*inst.mi.constructor)(m.Parameter(0), m.Parameter(1));
+ Node* mul = m.Int32Mul(m.Parameter(0), binop);
+ Node* comp = (m.*cmp.mi.constructor)(binop, m.Int32Constant(0));
+ m.Branch(comp, &a, &b);
+ m.Bind(&a);
+ m.Return(mul);
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(3U, s.size());
+ EXPECT_EQ(inst.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_NE(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(kArm64Mul32, s[1]->arch_opcode());
+ EXPECT_EQ(cmp.cond == kEqual ? kArm64CompareAndBranch32 : kArm64Cmp32,
+ s[2]->arch_opcode());
+ EXPECT_EQ(kFlags_branch, s[2]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[2]->flags_condition());
+ }
+}
+
+TEST_P(InstructionSelectorFlagSettingTest, CommuteImmediate) {
+ const FlagSettingInst inst = GetParam();
+ // Immediate on left hand side of the binary operator.
+ TRACED_FOREACH(IntegerCmp, cmp, kBinopCmpZeroRightInstructions) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
+ // 3 can be an immediate on both arithmetic and logical instructions.
+ Node* imm = m.Int32Constant(3);
+ Node* binop = (m.*inst.mi.constructor)(imm, m.Parameter(0));
+ Node* comp = (m.*cmp.mi.constructor)(binop, m.Int32Constant(0));
+ m.Branch(comp, &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ ASSERT_EQ(4U, s[0]->InputCount()); // The labels are also inputs.
+ EXPECT_EQ(inst.no_output_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(3, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+ }
+}
+
+TEST_P(InstructionSelectorFlagSettingTest, CommuteShift) {
+ const FlagSettingInst inst = GetParam();
+ // Left-hand side operand shifted by immediate.
+ TRACED_FOREACH(IntegerCmp, cmp, kBinopCmpZeroRightInstructions) {
+ TRACED_FOREACH(Shift, shift, kShiftInstructions) {
+ // Only test relevant shifted operands.
+ if (shift.mi.machine_type != MachineType::Int32()) continue;
+
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* imm = m.Int32Constant(5);
+ Node* shifted_operand = (m.*shift.mi.constructor)(m.Parameter(0), imm);
+ Node* binop = (m.*inst.mi.constructor)(shifted_operand, m.Parameter(1));
+ Node* comp = (m.*cmp.mi.constructor)(binop, m.Int32Constant(0));
+ m.Return(comp);
+ Stream s = m.Build();
+ // Cmn does not support ROR shifts.
+ if (inst.no_output_opcode == kArm64Cmn32 &&
+ shift.mi.arch_opcode == kArm64Ror32) {
+ ASSERT_EQ(2U, s.size());
+ continue;
+ }
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(inst.no_output_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(shift.mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(5, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+ }
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorFlagSettingTest,
+ ::testing::ValuesIn(kFlagSettingInstructions));
+
+TEST_F(InstructionSelectorTest, TstInvalidImmediate) {
+ // Make sure we do not generate an invalid immediate for TST.
+ TRACED_FOREACH(IntegerCmp, cmp, kBinopCmpZeroRightInstructions) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
+ // 5 is not a valid constant for TST.
+ Node* imm = m.Int32Constant(5);
+ Node* binop = m.Word32And(imm, m.Parameter(0));
+ Node* comp = (m.*cmp.mi.constructor)(binop, m.Int32Constant(0));
+ m.Branch(comp, &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ ASSERT_EQ(4U, s[0]->InputCount()); // The labels are also inputs.
+ EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
+ EXPECT_NE(InstructionOperand::IMMEDIATE, s[0]->InputAt(0)->kind());
+ EXPECT_NE(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+ }
+}
+
+TEST_F(InstructionSelectorTest, CommuteAddsExtend) {
+ // Extended left-hand side operand.
+ TRACED_FOREACH(IntegerCmp, cmp, kBinopCmpZeroRightInstructions) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* extend = m.Word32Sar(m.Word32Shl(m.Parameter(0), m.Int32Constant(24)),
+ m.Int32Constant(24));
+ Node* binop = m.Int32Add(extend, m.Parameter(1));
+ m.Return((m.*cmp.mi.constructor)(binop, m.Int32Constant(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmn32, s[0]->arch_opcode());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+ EXPECT_EQ(kMode_Operand2_R_SXTB, s[0]->addressing_mode());
+ }
+}
// -----------------------------------------------------------------------------
// Miscellaneous
@@ -3254,32 +4221,16 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
}
-TEST_F(InstructionSelectorTest, Float64SubWithMinusZero) {
- StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
- Node* const p0 = m.Parameter(0);
- Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
- m.Return(n);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64Float64Neg, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
-}
-
-
-TEST_F(InstructionSelectorTest, Float32Max) {
- StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
- MachineType::Float32());
+TEST_F(InstructionSelectorTest, Float64Max) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
- Node* const n = m.Float32Max(p0, p1);
+ Node* const n = m.Float64Max(p0, p1);
m.Return(n);
Stream s = m.Build();
- // Float32Max is `(b < a) ? a : b`.
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64Float32Max, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64Float64Max, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
@@ -3288,17 +4239,16 @@ TEST_F(InstructionSelectorTest, Float32Max) {
}
-TEST_F(InstructionSelectorTest, Float32Min) {
- StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
- MachineType::Float32());
+TEST_F(InstructionSelectorTest, Float64Min) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
- Node* const n = m.Float32Min(p0, p1);
+ Node* const n = m.Float64Min(p0, p1);
m.Return(n);
Stream s = m.Build();
- // Float32Min is `(a < b) ? a : b`.
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64Float32Min, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64Float64Min, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
@@ -3306,44 +4256,61 @@ TEST_F(InstructionSelectorTest, Float32Min) {
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
-
-TEST_F(InstructionSelectorTest, Float64Max) {
- StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
- MachineType::Float64());
+TEST_F(InstructionSelectorTest, Float32Neg) {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
Node* const p0 = m.Parameter(0);
- Node* const p1 = m.Parameter(1);
- Node* const n = m.Float64Max(p0, p1);
+ // Don't use m.Float32Neg() as that generates an explicit sub.
+ Node* const n = m.AddNode(m.machine()->Float32Neg(), m.Parameter(0));
m.Return(n);
Stream s = m.Build();
- // Float64Max is `(b < a) ? a : b`.
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64Float64Max, s[0]->arch_opcode());
- ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(kArm64Float32Neg, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
-
-TEST_F(InstructionSelectorTest, Float64Min) {
- StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
- MachineType::Float64());
+TEST_F(InstructionSelectorTest, Float64Neg) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
- Node* const p1 = m.Parameter(1);
- Node* const n = m.Float64Min(p0, p1);
+ // Don't use m.Float64Neg() as that generates an explicit sub.
+ Node* const n = m.AddNode(m.machine()->Float64Neg(), m.Parameter(0));
m.Return(n);
Stream s = m.Build();
- // Float64Min is `(a < b) ? a : b`.
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64Float64Min, s[0]->arch_opcode());
- ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(kArm64Float64Neg, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
+TEST_F(InstructionSelectorTest, LoadAndShiftRight) {
+ {
+ int32_t immediates[] = {-256, -255, -3, -2, -1, 0, 1,
+ 2, 3, 255, 256, 260, 4096, 4100,
+ 8192, 8196, 3276, 3280, 16376, 16380};
+ TRACED_FOREACH(int32_t, index, immediates) {
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Pointer());
+ Node* const load = m.Load(MachineType::Uint64(), m.Parameter(0),
+ m.Int32Constant(index - 4));
+ Node* const sar = m.Word64Sar(load, m.Int32Constant(32));
+ // Make sure we don't fold the shift into the following add:
+ m.Return(m.Int64Add(sar, m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArm64Ldrsw, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ }
+ }
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/change-lowering-unittest.cc b/deps/v8/test/unittests/compiler/change-lowering-unittest.cc
deleted file mode 100644
index fd0766caba..0000000000
--- a/deps/v8/test/unittests/compiler/change-lowering-unittest.cc
+++ /dev/null
@@ -1,628 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/code-stubs.h"
-#include "src/compiler/change-lowering.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/node-properties.h"
-#include "src/compiler/simplified-operator.h"
-#include "test/unittests/compiler/compiler-test-utils.h"
-#include "test/unittests/compiler/graph-unittest.h"
-#include "test/unittests/compiler/node-test-utils.h"
-#include "testing/gmock-support.h"
-
-using testing::_;
-using testing::AllOf;
-using testing::BitEq;
-using testing::Capture;
-using testing::CaptureEq;
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class ChangeLoweringTest : public TypedGraphTest {
- public:
- ChangeLoweringTest() : simplified_(zone()) {}
-
- virtual MachineRepresentation WordRepresentation() const = 0;
-
- protected:
- bool Is32() const {
- return WordRepresentation() == MachineRepresentation::kWord32;
- }
- bool Is64() const {
- return WordRepresentation() == MachineRepresentation::kWord64;
- }
-
- Reduction Reduce(Node* node) {
- MachineOperatorBuilder machine(zone(), WordRepresentation());
- JSOperatorBuilder javascript(zone());
- JSGraph jsgraph(isolate(), graph(), common(), &javascript, nullptr,
- &machine);
- ChangeLowering reducer(&jsgraph);
- return reducer.Reduce(node);
- }
-
- SimplifiedOperatorBuilder* simplified() { return &simplified_; }
-
- Matcher<Node*> IsAllocateHeapNumber(const Matcher<Node*>& effect_matcher,
- const Matcher<Node*>& control_matcher) {
- return IsCall(
- _, IsHeapConstant(AllocateHeapNumberStub(isolate()).GetCode()),
- IsNumberConstant(BitEq(0.0)), effect_matcher, control_matcher);
- }
- Matcher<Node*> IsChangeInt32ToSmi(const Matcher<Node*>& value_matcher) {
- return Is64() ? IsWord64Shl(IsChangeInt32ToInt64(value_matcher),
- IsSmiShiftBitsConstant())
- : IsWord32Shl(value_matcher, IsSmiShiftBitsConstant());
- }
- Matcher<Node*> IsChangeSmiToInt32(const Matcher<Node*>& value_matcher) {
- return Is64() ? IsTruncateInt64ToInt32(
- IsWord64Sar(value_matcher, IsSmiShiftBitsConstant()))
- : IsWord32Sar(value_matcher, IsSmiShiftBitsConstant());
- }
- Matcher<Node*> IsChangeUint32ToSmi(const Matcher<Node*>& value_matcher) {
- return Is64() ? IsWord64Shl(IsChangeUint32ToUint64(value_matcher),
- IsSmiShiftBitsConstant())
- : IsWord32Shl(value_matcher, IsSmiShiftBitsConstant());
- }
- Matcher<Node*> IsLoadHeapNumber(const Matcher<Node*>& value_matcher,
- const Matcher<Node*>& control_matcher) {
- return IsLoad(MachineType::Float64(), value_matcher,
- IsIntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag),
- graph()->start(), control_matcher);
- }
- Matcher<Node*> IsIntPtrConstant(int value) {
- return Is32() ? IsInt32Constant(value) : IsInt64Constant(value);
- }
- Matcher<Node*> IsSmiShiftBitsConstant() {
- return IsIntPtrConstant(kSmiShiftSize + kSmiTagSize);
- }
- Matcher<Node*> IsWordEqual(const Matcher<Node*>& lhs_matcher,
- const Matcher<Node*>& rhs_matcher) {
- return Is32() ? IsWord32Equal(lhs_matcher, rhs_matcher)
- : IsWord64Equal(lhs_matcher, rhs_matcher);
- }
-
- private:
- SimplifiedOperatorBuilder simplified_;
-};
-
-
-// -----------------------------------------------------------------------------
-// Common.
-
-
-class ChangeLoweringCommonTest
- : public ChangeLoweringTest,
- public ::testing::WithParamInterface<MachineRepresentation> {
- public:
- ~ChangeLoweringCommonTest() override {}
-
- MachineRepresentation WordRepresentation() const final { return GetParam(); }
-};
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, ChangeBitToBool) {
- Node* value = Parameter(Type::Boolean());
- Reduction r =
- Reduce(graph()->NewNode(simplified()->ChangeBitToBool(), value));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsSelect(MachineRepresentation::kTagged, value,
- IsTrueConstant(), IsFalseConstant()));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, ChangeBoolToBit) {
- Node* value = Parameter(Type::Number());
- Reduction r =
- Reduce(graph()->NewNode(simplified()->ChangeBoolToBit(), value));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsWordEqual(value, IsTrueConstant()));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, ChangeInt32ToTaggedWithSignedSmall) {
- Node* value = Parameter(Type::SignedSmall());
- Reduction r =
- Reduce(graph()->NewNode(simplified()->ChangeInt32ToTagged(), value));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsChangeInt32ToSmi(value));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, ChangeUint32ToTaggedWithUnsignedSmall) {
- Node* value = Parameter(Type::UnsignedSmall());
- Reduction r =
- Reduce(graph()->NewNode(simplified()->ChangeUint32ToTagged(), value));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsChangeUint32ToSmi(value));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, ChangeTaggedToInt32WithTaggedSigned) {
- Node* value = Parameter(Type::TaggedSigned());
- Reduction r =
- Reduce(graph()->NewNode(simplified()->ChangeTaggedToInt32(), value));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsChangeSmiToInt32(value));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, ChangeTaggedToInt32WithTaggedPointer) {
- Node* value = Parameter(Type::TaggedPointer());
- Reduction r =
- Reduce(graph()->NewNode(simplified()->ChangeTaggedToInt32(), value));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsChangeFloat64ToInt32(
- IsLoadHeapNumber(value, graph()->start())));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, ChangeTaggedToUint32WithTaggedSigned) {
- Node* value = Parameter(Type::TaggedSigned());
- Reduction r =
- Reduce(graph()->NewNode(simplified()->ChangeTaggedToUint32(), value));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsChangeSmiToInt32(value));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, ChangeTaggedToUint32WithTaggedPointer) {
- Node* value = Parameter(Type::TaggedPointer());
- Reduction r =
- Reduce(graph()->NewNode(simplified()->ChangeTaggedToUint32(), value));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsChangeFloat64ToUint32(
- IsLoadHeapNumber(value, graph()->start())));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, StoreFieldSmi) {
- FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Handle<Name>::null(), Type::Any(),
- MachineType::AnyTagged()};
- Node* p0 = Parameter(Type::TaggedPointer());
- Node* p1 = Parameter(Type::TaggedSigned());
- Node* store = graph()->NewNode(simplified()->StoreField(access), p0, p1,
- graph()->start(), graph()->start());
- Reduction r = Reduce(store);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsStore(StoreRepresentation(MachineRepresentation::kTagged,
- kNoWriteBarrier),
- p0, IsIntPtrConstant(access.offset - access.tag()), p1,
- graph()->start(), graph()->start()));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, StoreFieldTagged) {
- FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Handle<Name>::null(), Type::Any(),
- MachineType::AnyTagged()};
- Node* p0 = Parameter(Type::TaggedPointer());
- Node* p1 = Parameter(Type::Tagged());
- Node* store = graph()->NewNode(simplified()->StoreField(access), p0, p1,
- graph()->start(), graph()->start());
- Reduction r = Reduce(store);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsStore(StoreRepresentation(MachineRepresentation::kTagged,
- kFullWriteBarrier),
- p0, IsIntPtrConstant(access.offset - access.tag()), p1,
- graph()->start(), graph()->start()));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, LoadField) {
- FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Handle<Name>::null(), Type::Any(),
- MachineType::AnyTagged()};
- Node* p0 = Parameter(Type::TaggedPointer());
- Node* load = graph()->NewNode(simplified()->LoadField(access), p0,
- graph()->start(), graph()->start());
- Reduction r = Reduce(load);
-
- ASSERT_TRUE(r.Changed());
- Matcher<Node*> index_match = IsIntPtrConstant(access.offset - access.tag());
- EXPECT_THAT(r.replacement(),
- IsLoad(MachineType::AnyTagged(), p0,
- IsIntPtrConstant(access.offset - access.tag()),
- graph()->start(), graph()->start()));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, StoreElementTagged) {
- ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
- MachineType::AnyTagged()};
- Node* p0 = Parameter(Type::TaggedPointer());
- Node* p1 = Parameter(Type::Signed32());
- Node* p2 = Parameter(Type::Tagged());
- Node* store = graph()->NewNode(simplified()->StoreElement(access), p0, p1, p2,
- graph()->start(), graph()->start());
- Reduction r = Reduce(store);
-
- const int element_size_shift =
- ElementSizeLog2Of(access.machine_type.representation());
- ASSERT_TRUE(r.Changed());
- Matcher<Node*> index_match =
- IsInt32Add(IsWord32Shl(p1, IsInt32Constant(element_size_shift)),
- IsInt32Constant(access.header_size - access.tag()));
- if (!Is32()) {
- index_match = IsChangeUint32ToUint64(index_match);
- }
-
- EXPECT_THAT(r.replacement(),
- IsStore(StoreRepresentation(MachineRepresentation::kTagged,
- kFullWriteBarrier),
- p0, index_match, p2, graph()->start(), graph()->start()));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, StoreElementUint8) {
- ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Type::Signed32(), MachineType::Uint8()};
- Node* p0 = Parameter(Type::TaggedPointer());
- Node* p1 = Parameter(Type::Signed32());
- Node* p2 = Parameter(Type::Signed32());
- Node* store = graph()->NewNode(simplified()->StoreElement(access), p0, p1, p2,
- graph()->start(), graph()->start());
- Reduction r = Reduce(store);
-
- ASSERT_TRUE(r.Changed());
- Matcher<Node*> index_match =
- IsInt32Add(p1, IsInt32Constant(access.header_size - access.tag()));
- if (!Is32()) {
- index_match = IsChangeUint32ToUint64(index_match);
- }
-
- EXPECT_THAT(r.replacement(),
- IsStore(StoreRepresentation(MachineRepresentation::kWord8,
- kNoWriteBarrier),
- p0, index_match, p2, graph()->start(), graph()->start()));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, LoadElementTagged) {
- ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
- MachineType::AnyTagged()};
- Node* p0 = Parameter(Type::TaggedPointer());
- Node* p1 = Parameter(Type::Signed32());
- Node* load = graph()->NewNode(simplified()->LoadElement(access), p0, p1,
- graph()->start(), graph()->start());
- Reduction r = Reduce(load);
-
- const int element_size_shift =
- ElementSizeLog2Of(access.machine_type.representation());
- ASSERT_TRUE(r.Changed());
- Matcher<Node*> index_match =
- IsInt32Add(IsWord32Shl(p1, IsInt32Constant(element_size_shift)),
- IsInt32Constant(access.header_size - access.tag()));
- if (!Is32()) {
- index_match = IsChangeUint32ToUint64(index_match);
- }
-
- EXPECT_THAT(r.replacement(), IsLoad(MachineType::AnyTagged(), p0, index_match,
- graph()->start(), graph()->start()));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, LoadElementInt8) {
- ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Type::Signed32(), MachineType::Int8()};
- Node* p0 = Parameter(Type::TaggedPointer());
- Node* p1 = Parameter(Type::Signed32());
- Node* load = graph()->NewNode(simplified()->LoadElement(access), p0, p1,
- graph()->start(), graph()->start());
- Reduction r = Reduce(load);
-
- ASSERT_TRUE(r.Changed());
- Matcher<Node*> index_match =
- IsInt32Add(p1, IsInt32Constant(access.header_size - access.tag()));
- if (!Is32()) {
- index_match = IsChangeUint32ToUint64(index_match);
- }
-
- EXPECT_THAT(r.replacement(), IsLoad(MachineType::Int8(), p0, index_match,
- graph()->start(), graph()->start()));
-}
-
-
-TARGET_TEST_P(ChangeLoweringCommonTest, Allocate) {
- Node* p0 = Parameter(Type::Signed32());
- Node* alloc = graph()->NewNode(simplified()->Allocate(TENURED), p0,
- graph()->start(), graph()->start());
- Reduction r = Reduce(alloc);
-
- // Only check that we lowered, but do not specify the exact form since
- // this is subject to change.
- ASSERT_TRUE(r.Changed());
-}
-
-
-INSTANTIATE_TEST_CASE_P(ChangeLoweringTest, ChangeLoweringCommonTest,
- ::testing::Values(MachineRepresentation::kWord32,
- MachineRepresentation::kWord64));
-
-
-// -----------------------------------------------------------------------------
-// 32-bit
-
-
-class ChangeLowering32Test : public ChangeLoweringTest {
- public:
- ~ChangeLowering32Test() override {}
- MachineRepresentation WordRepresentation() const final {
- return MachineRepresentation::kWord32;
- }
-};
-
-
-TARGET_TEST_F(ChangeLowering32Test, ChangeInt32ToTagged) {
- Node* value = Parameter(Type::Integral32());
- Node* node = graph()->NewNode(simplified()->ChangeInt32ToTagged(), value);
- Reduction r = Reduce(node);
- ASSERT_TRUE(r.Changed());
- Capture<Node*> add, branch, heap_number, if_true;
- EXPECT_THAT(
- r.replacement(),
- IsPhi(MachineRepresentation::kTagged,
- IsFinishRegion(
- AllOf(CaptureEq(&heap_number),
- IsAllocateHeapNumber(_, CaptureEq(&if_true))),
- IsStore(
- StoreRepresentation(MachineRepresentation::kFloat64,
- kNoWriteBarrier),
- CaptureEq(&heap_number),
- IsIntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag),
- IsChangeInt32ToFloat64(value), CaptureEq(&heap_number),
- CaptureEq(&if_true))),
- IsProjection(0, AllOf(CaptureEq(&add),
- IsInt32AddWithOverflow(value, value))),
- IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
- IsIfFalse(AllOf(CaptureEq(&branch),
- IsBranch(IsProjection(1, CaptureEq(&add)),
- graph()->start()))))));
-}
-
-
-TARGET_TEST_F(ChangeLowering32Test, ChangeTaggedToFloat64) {
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
-
- Node* value = Parameter(Type::Number());
- Node* node = graph()->NewNode(simplified()->ChangeTaggedToFloat64(), value);
- Reduction r = Reduce(node);
- ASSERT_TRUE(r.Changed());
- Capture<Node*> branch, if_true;
- EXPECT_THAT(
- r.replacement(),
- IsPhi(MachineRepresentation::kFloat64,
- IsLoadHeapNumber(value, CaptureEq(&if_true)),
- IsChangeInt32ToFloat64(IsWord32Sar(
- value, IsInt32Constant(kSmiTagSize + kSmiShiftSize))),
- IsMerge(AllOf(CaptureEq(&if_true),
- IsIfTrue(AllOf(
- CaptureEq(&branch),
- IsBranch(IsWord32And(
- value, IsInt32Constant(kSmiTagMask)),
- graph()->start())))),
- IsIfFalse(CaptureEq(&branch)))));
-}
-
-
-TARGET_TEST_F(ChangeLowering32Test, ChangeTaggedToInt32) {
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
-
- Node* value = Parameter(Type::Signed32());
- Node* node = graph()->NewNode(simplified()->ChangeTaggedToInt32(), value);
- Reduction r = Reduce(node);
- ASSERT_TRUE(r.Changed());
- Capture<Node*> branch, if_true;
- EXPECT_THAT(
- r.replacement(),
- IsPhi(
- MachineRepresentation::kWord32,
- IsChangeFloat64ToInt32(IsLoadHeapNumber(value, CaptureEq(&if_true))),
- IsWord32Sar(value, IsInt32Constant(kSmiTagSize + kSmiShiftSize)),
- IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
- IsIfFalse(AllOf(
- CaptureEq(&branch),
- IsBranch(IsWord32And(value, IsInt32Constant(kSmiTagMask)),
- graph()->start()))))));
-}
-
-
-TARGET_TEST_F(ChangeLowering32Test, ChangeTaggedToUint32) {
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
-
- Node* value = Parameter(Type::Unsigned32());
- Node* node = graph()->NewNode(simplified()->ChangeTaggedToUint32(), value);
- Reduction r = Reduce(node);
- ASSERT_TRUE(r.Changed());
- Capture<Node*> branch, if_true;
- EXPECT_THAT(
- r.replacement(),
- IsPhi(
- MachineRepresentation::kWord32,
- IsChangeFloat64ToUint32(IsLoadHeapNumber(value, CaptureEq(&if_true))),
- IsWord32Sar(value, IsInt32Constant(kSmiTagSize + kSmiShiftSize)),
- IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
- IsIfFalse(AllOf(
- CaptureEq(&branch),
- IsBranch(IsWord32And(value, IsInt32Constant(kSmiTagMask)),
- graph()->start()))))));
-}
-
-
-TARGET_TEST_F(ChangeLowering32Test, ChangeUint32ToTagged) {
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
-
- Node* value = Parameter(Type::Number());
- Node* node = graph()->NewNode(simplified()->ChangeUint32ToTagged(), value);
- Reduction r = Reduce(node);
- ASSERT_TRUE(r.Changed());
- Capture<Node*> branch, heap_number, if_false;
- EXPECT_THAT(
- r.replacement(),
- IsPhi(
- MachineRepresentation::kTagged,
- IsWord32Shl(value, IsInt32Constant(kSmiTagSize + kSmiShiftSize)),
- IsFinishRegion(
- AllOf(CaptureEq(&heap_number),
- IsAllocateHeapNumber(_, CaptureEq(&if_false))),
- IsStore(
- StoreRepresentation(MachineRepresentation::kFloat64,
- kNoWriteBarrier),
- CaptureEq(&heap_number),
- IsInt32Constant(HeapNumber::kValueOffset - kHeapObjectTag),
- IsChangeUint32ToFloat64(value), CaptureEq(&heap_number),
- CaptureEq(&if_false))),
- IsMerge(IsIfTrue(AllOf(
- CaptureEq(&branch),
- IsBranch(IsUint32LessThanOrEqual(
- value, IsInt32Constant(Smi::kMaxValue)),
- graph()->start()))),
- AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
-}
-
-
-// -----------------------------------------------------------------------------
-// 64-bit
-
-
-class ChangeLowering64Test : public ChangeLoweringTest {
- public:
- ~ChangeLowering64Test() override {}
- MachineRepresentation WordRepresentation() const final {
- return MachineRepresentation::kWord64;
- }
-};
-
-
-TARGET_TEST_F(ChangeLowering64Test, ChangeInt32ToTagged) {
- Node* value = Parameter(Type::Signed32());
- Node* node = graph()->NewNode(simplified()->ChangeInt32ToTagged(), value);
- Reduction r = Reduce(node);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsChangeInt32ToSmi(value));
-}
-
-
-TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToFloat64) {
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
-
- Node* value = Parameter(Type::Number());
- Node* node = graph()->NewNode(simplified()->ChangeTaggedToFloat64(), value);
- Reduction r = Reduce(node);
- ASSERT_TRUE(r.Changed());
- Capture<Node*> branch, if_true;
- EXPECT_THAT(
- r.replacement(),
- IsPhi(MachineRepresentation::kFloat64,
- IsLoadHeapNumber(value, CaptureEq(&if_true)),
- IsChangeInt32ToFloat64(IsTruncateInt64ToInt32(IsWord64Sar(
- value, IsInt64Constant(kSmiTagSize + kSmiShiftSize)))),
- IsMerge(AllOf(CaptureEq(&if_true),
- IsIfTrue(AllOf(
- CaptureEq(&branch),
- IsBranch(IsWord64And(
- value, IsInt64Constant(kSmiTagMask)),
- graph()->start())))),
- IsIfFalse(CaptureEq(&branch)))));
-}
-
-
-TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToInt32) {
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
-
- Node* value = Parameter(Type::Signed32());
- Node* node = graph()->NewNode(simplified()->ChangeTaggedToInt32(), value);
- Reduction r = Reduce(node);
- ASSERT_TRUE(r.Changed());
- Capture<Node*> branch, if_true;
- EXPECT_THAT(
- r.replacement(),
- IsPhi(
- MachineRepresentation::kWord32,
- IsChangeFloat64ToInt32(IsLoadHeapNumber(value, CaptureEq(&if_true))),
- IsTruncateInt64ToInt32(
- IsWord64Sar(value, IsInt64Constant(kSmiTagSize + kSmiShiftSize))),
- IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
- IsIfFalse(AllOf(
- CaptureEq(&branch),
- IsBranch(IsWord64And(value, IsInt64Constant(kSmiTagMask)),
- graph()->start()))))));
-}
-
-
-TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToUint32) {
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
-
- Node* value = Parameter(Type::Unsigned32());
- Node* node = graph()->NewNode(simplified()->ChangeTaggedToUint32(), value);
- Reduction r = Reduce(node);
- ASSERT_TRUE(r.Changed());
- Capture<Node*> branch, if_true;
- EXPECT_THAT(
- r.replacement(),
- IsPhi(
- MachineRepresentation::kWord32,
- IsChangeFloat64ToUint32(IsLoadHeapNumber(value, CaptureEq(&if_true))),
- IsTruncateInt64ToInt32(
- IsWord64Sar(value, IsInt64Constant(kSmiTagSize + kSmiShiftSize))),
- IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
- IsIfFalse(AllOf(
- CaptureEq(&branch),
- IsBranch(IsWord64And(value, IsInt64Constant(kSmiTagMask)),
- graph()->start()))))));
-}
-
-
-TARGET_TEST_F(ChangeLowering64Test, ChangeUint32ToTagged) {
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
-
- Node* value = Parameter(Type::Number());
- Node* node = graph()->NewNode(simplified()->ChangeUint32ToTagged(), value);
- Reduction r = Reduce(node);
- ASSERT_TRUE(r.Changed());
- Capture<Node*> branch, heap_number, if_false;
- EXPECT_THAT(
- r.replacement(),
- IsPhi(
- MachineRepresentation::kTagged,
- IsWord64Shl(IsChangeUint32ToUint64(value),
- IsInt64Constant(kSmiTagSize + kSmiShiftSize)),
- IsFinishRegion(
- AllOf(CaptureEq(&heap_number),
- IsAllocateHeapNumber(_, CaptureEq(&if_false))),
- IsStore(
- StoreRepresentation(MachineRepresentation::kFloat64,
- kNoWriteBarrier),
- CaptureEq(&heap_number),
- IsInt64Constant(HeapNumber::kValueOffset - kHeapObjectTag),
- IsChangeUint32ToFloat64(value), CaptureEq(&heap_number),
- CaptureEq(&if_false))),
- IsMerge(IsIfTrue(AllOf(
- CaptureEq(&branch),
- IsBranch(IsUint32LessThanOrEqual(
- value, IsInt32Constant(Smi::kMaxValue)),
- graph()->start()))),
- AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/checkpoint-elimination-unittest.cc b/deps/v8/test/unittests/compiler/checkpoint-elimination-unittest.cc
new file mode 100644
index 0000000000..a201fc9a55
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/checkpoint-elimination-unittest.cc
@@ -0,0 +1,59 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/checkpoint-elimination.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/operator.h"
+#include "test/unittests/compiler/graph-reducer-unittest.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+
+using testing::StrictMock;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class CheckpointEliminationTest : public GraphTest {
+ public:
+ CheckpointEliminationTest() : GraphTest() {}
+ ~CheckpointEliminationTest() override {}
+
+ protected:
+ Reduction Reduce(AdvancedReducer::Editor* editor, Node* node) {
+ CheckpointElimination reducer(editor);
+ return reducer.Reduce(node);
+ }
+
+ Reduction Reduce(Node* node) {
+ StrictMock<MockAdvancedReducerEditor> editor;
+ return Reduce(&editor, node);
+ }
+};
+
+namespace {
+
+const Operator kOpNoWrite(0, Operator::kNoWrite, "OpNoWrite", 0, 1, 0, 0, 1, 0);
+
+} // namespace
+
+// -----------------------------------------------------------------------------
+// Checkpoint
+
+TEST_F(CheckpointEliminationTest, CheckpointChain) {
+ Node* const control = graph()->start();
+ Node* frame_state = EmptyFrameState();
+ Node* checkpoint1 = graph()->NewNode(common()->Checkpoint(), frame_state,
+ graph()->start(), control);
+ Node* effect_link = graph()->NewNode(&kOpNoWrite, checkpoint1);
+ Node* checkpoint2 = graph()->NewNode(common()->Checkpoint(), frame_state,
+ effect_link, control);
+ Reduction r = Reduce(checkpoint2);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(effect_link, r.replacement());
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/coalesced-live-ranges-unittest.cc b/deps/v8/test/unittests/compiler/coalesced-live-ranges-unittest.cc
deleted file mode 100644
index fe8fac4bfe..0000000000
--- a/deps/v8/test/unittests/compiler/coalesced-live-ranges-unittest.cc
+++ /dev/null
@@ -1,268 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/coalesced-live-ranges.h"
-#include "test/unittests/compiler/live-range-builder.h"
-#include "test/unittests/test-utils.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-
-class CoalescedLiveRangesTest : public TestWithZone {
- public:
- CoalescedLiveRangesTest() : TestWithZone(), ranges_(zone()) {}
- bool HasNoConflicts(const LiveRange* range);
- bool ConflictsPreciselyWith(const LiveRange* range, int id);
- bool ConflictsPreciselyWith(const LiveRange* range, int id1, int id2);
-
- CoalescedLiveRanges& ranges() { return ranges_; }
- const CoalescedLiveRanges& ranges() const { return ranges_; }
- bool AllocationsAreValid() const;
- void RemoveConflicts(LiveRange* range);
-
- private:
- typedef ZoneSet<int> LiveRangeIDs;
- bool IsRangeConflictingWith(const LiveRange* range, const LiveRangeIDs& ids);
- CoalescedLiveRanges ranges_;
-};
-
-
-bool CoalescedLiveRangesTest::ConflictsPreciselyWith(const LiveRange* range,
- int id) {
- LiveRangeIDs set(zone());
- set.insert(id);
- return IsRangeConflictingWith(range, set);
-}
-
-
-bool CoalescedLiveRangesTest::ConflictsPreciselyWith(const LiveRange* range,
- int id1, int id2) {
- LiveRangeIDs set(zone());
- set.insert(id1);
- set.insert(id2);
- return IsRangeConflictingWith(range, set);
-}
-
-
-bool CoalescedLiveRangesTest::HasNoConflicts(const LiveRange* range) {
- LiveRangeIDs set(zone());
- return IsRangeConflictingWith(range, set);
-}
-
-
-void CoalescedLiveRangesTest::RemoveConflicts(LiveRange* range) {
- auto conflicts = ranges().GetConflicts(range);
- LiveRangeIDs seen(zone());
- for (auto c = conflicts.Current(); c != nullptr;
- c = conflicts.RemoveCurrentAndGetNext()) {
- int id = c->TopLevel()->vreg();
- EXPECT_FALSE(seen.count(id) > 0);
- seen.insert(c->TopLevel()->vreg());
- }
-}
-
-
-bool CoalescedLiveRangesTest::AllocationsAreValid() const {
- return ranges().VerifyAllocationsAreValidForTesting();
-}
-
-
-bool CoalescedLiveRangesTest::IsRangeConflictingWith(const LiveRange* range,
- const LiveRangeIDs& ids) {
- LiveRangeIDs found_ids(zone());
-
- auto conflicts = ranges().GetConflicts(range);
- for (auto conflict = conflicts.Current(); conflict != nullptr;
- conflict = conflicts.GetNext()) {
- found_ids.insert(conflict->TopLevel()->vreg());
- }
- return found_ids == ids;
-}
-
-
-TEST_F(CoalescedLiveRangesTest, VisitEmptyAllocations) {
- LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
- ASSERT_TRUE(ranges().empty());
- ASSERT_TRUE(AllocationsAreValid());
- ASSERT_TRUE(HasNoConflicts(range));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, CandidateBeforeAfterAllocations) {
- LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(5, 6);
- ranges().AllocateRange(range);
- ASSERT_FALSE(ranges().empty());
- ASSERT_TRUE(AllocationsAreValid());
- LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(1, 2);
- ASSERT_TRUE(HasNoConflicts(query));
- query = TestRangeBuilder(zone()).Id(3).Build(1, 5);
- ASSERT_TRUE(HasNoConflicts(query));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, CandidateBeforeAfterManyAllocations) {
- LiveRange* range =
- TestRangeBuilder(zone()).Id(1).Add(5, 7).Add(10, 12).Build();
- ranges().AllocateRange(range);
- ASSERT_FALSE(ranges().empty());
- ASSERT_TRUE(AllocationsAreValid());
- LiveRange* query =
- TestRangeBuilder(zone()).Id(2).Add(1, 2).Add(13, 15).Build();
- ASSERT_TRUE(HasNoConflicts(query));
- query = TestRangeBuilder(zone()).Id(3).Add(1, 5).Add(12, 15).Build();
- ASSERT_TRUE(HasNoConflicts(query));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, SelfConflictsPreciselyWithSelf) {
- LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
- ranges().AllocateRange(range);
- ASSERT_FALSE(ranges().empty());
- ASSERT_TRUE(AllocationsAreValid());
- ASSERT_TRUE(ConflictsPreciselyWith(range, 1));
- range = TestRangeBuilder(zone()).Id(2).Build(8, 10);
- ranges().AllocateRange(range);
- ASSERT_TRUE(ConflictsPreciselyWith(range, 2));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, QueryStartsBeforeConflict) {
- LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(2, 5);
- ranges().AllocateRange(range);
- LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(1, 3);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
- range = TestRangeBuilder(zone()).Id(3).Build(8, 10);
- ranges().AllocateRange(range);
- query = TestRangeBuilder(zone()).Id(4).Build(6, 9);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 3));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, QueryStartsInConflict) {
- LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(2, 5);
- ranges().AllocateRange(range);
- LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(3, 6);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
- range = TestRangeBuilder(zone()).Id(3).Build(8, 10);
- ranges().AllocateRange(range);
- query = TestRangeBuilder(zone()).Id(4).Build(9, 11);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 3));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, QueryContainedInConflict) {
- LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
- ranges().AllocateRange(range);
- LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(2, 3);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, QueryContainsConflict) {
- LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(2, 3);
- ranges().AllocateRange(range);
- LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(1, 5);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, QueryCoversManyIntervalsSameRange) {
- LiveRange* range =
- TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(7, 9).Add(20, 25).Build();
- ranges().AllocateRange(range);
- LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(2, 8);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, QueryCoversManyIntervalsDifferentRanges) {
- LiveRange* range =
- TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(20, 25).Build();
- ranges().AllocateRange(range);
- range = TestRangeBuilder(zone()).Id(2).Build(7, 10);
- ranges().AllocateRange(range);
- LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(2, 22);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 1, 2));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, QueryFitsInGaps) {
- LiveRange* range =
- TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(10, 15).Add(20, 25).Build();
- ranges().AllocateRange(range);
- LiveRange* query =
- TestRangeBuilder(zone()).Id(3).Add(5, 10).Add(16, 19).Add(27, 30).Build();
- ASSERT_TRUE(HasNoConflicts(query));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, DeleteConflictBefore) {
- LiveRange* range = TestRangeBuilder(zone()).Id(1).Add(1, 4).Add(5, 6).Build();
- ranges().AllocateRange(range);
- range = TestRangeBuilder(zone()).Id(2).Build(40, 50);
- ranges().AllocateRange(range);
- LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(3, 7);
- RemoveConflicts(query);
- query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, DeleteConflictAfter) {
- LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
- ranges().AllocateRange(range);
- range = TestRangeBuilder(zone()).Id(2).Add(40, 50).Add(60, 70).Build();
- ranges().AllocateRange(range);
- LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(45, 60);
- RemoveConflicts(query);
- query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, DeleteConflictStraddle) {
- LiveRange* range =
- TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(10, 20).Build();
- ranges().AllocateRange(range);
- range = TestRangeBuilder(zone()).Id(2).Build(40, 50);
- ranges().AllocateRange(range);
- LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(4, 15);
- RemoveConflicts(query);
- query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, DeleteConflictManyOverlapsBefore) {
- LiveRange* range =
- TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(6, 10).Add(10, 20).Build();
- ranges().AllocateRange(range);
- range = TestRangeBuilder(zone()).Id(2).Build(40, 50);
- ranges().AllocateRange(range);
- LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(4, 15);
- RemoveConflicts(query);
- query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, DeleteWhenConflictRepeatsAfterNonConflict) {
- LiveRange* range =
- TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(6, 10).Add(20, 30).Build();
- ranges().AllocateRange(range);
- range = TestRangeBuilder(zone()).Id(2).Build(12, 15);
- ranges().AllocateRange(range);
- LiveRange* query =
- TestRangeBuilder(zone()).Id(3).Add(1, 8).Add(22, 25).Build();
- RemoveConflicts(query);
- query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
-}
-
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
index 1c163706f2..f51a54d074 100644
--- a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
@@ -105,40 +105,6 @@ TEST_F(CommonOperatorReducerTest, BranchWithInt32OneConstant) {
}
-TEST_F(CommonOperatorReducerTest, BranchWithInt64ZeroConstant) {
- TRACED_FOREACH(BranchHint, hint, kBranchHints) {
- Node* const control = graph()->start();
- Node* const branch =
- graph()->NewNode(common()->Branch(hint), Int64Constant(0), control);
- Node* const if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* const if_false = graph()->NewNode(common()->IfFalse(), branch);
- StrictMock<MockAdvancedReducerEditor> editor;
- EXPECT_CALL(editor, Replace(if_true, IsDead()));
- EXPECT_CALL(editor, Replace(if_false, control));
- Reduction const r = Reduce(&editor, branch);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsDead());
- }
-}
-
-
-TEST_F(CommonOperatorReducerTest, BranchWithInt64OneConstant) {
- TRACED_FOREACH(BranchHint, hint, kBranchHints) {
- Node* const control = graph()->start();
- Node* const branch =
- graph()->NewNode(common()->Branch(hint), Int64Constant(1), control);
- Node* const if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* const if_false = graph()->NewNode(common()->IfFalse(), branch);
- StrictMock<MockAdvancedReducerEditor> editor;
- EXPECT_CALL(editor, Replace(if_true, control));
- EXPECT_CALL(editor, Replace(if_false, IsDead()));
- Reduction const r = Reduce(&editor, branch);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsDead());
- }
-}
-
-
TEST_F(CommonOperatorReducerTest, BranchWithFalseConstant) {
TRACED_FOREACH(BranchHint, hint, kBranchHints) {
Node* const control = graph()->start();
@@ -358,78 +324,6 @@ TEST_F(CommonOperatorReducerTest, PhiToFloat64Abs) {
}
-TEST_F(CommonOperatorReducerTest, PhiToFloat32Max) {
- Node* p0 = Parameter(0);
- Node* p1 = Parameter(1);
- Node* check = graph()->NewNode(machine()->Float32LessThan(), p0, p1);
- Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi = graph()->NewNode(
- common()->Phi(MachineRepresentation::kFloat32, 2), p1, p0, merge);
- StrictMock<MockAdvancedReducerEditor> editor;
- EXPECT_CALL(editor, Revisit(merge));
- Reduction r = Reduce(&editor, phi, MachineOperatorBuilder::kFloat32Max);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat32Max(p1, p0));
-}
-
-
-TEST_F(CommonOperatorReducerTest, PhiToFloat64Max) {
- Node* p0 = Parameter(0);
- Node* p1 = Parameter(1);
- Node* check = graph()->NewNode(machine()->Float64LessThan(), p0, p1);
- Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi = graph()->NewNode(
- common()->Phi(MachineRepresentation::kFloat64, 2), p1, p0, merge);
- StrictMock<MockAdvancedReducerEditor> editor;
- EXPECT_CALL(editor, Revisit(merge));
- Reduction r = Reduce(&editor, phi, MachineOperatorBuilder::kFloat64Max);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat64Max(p1, p0));
-}
-
-
-TEST_F(CommonOperatorReducerTest, PhiToFloat32Min) {
- Node* p0 = Parameter(0);
- Node* p1 = Parameter(1);
- Node* check = graph()->NewNode(machine()->Float32LessThan(), p0, p1);
- Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi = graph()->NewNode(
- common()->Phi(MachineRepresentation::kFloat32, 2), p0, p1, merge);
- StrictMock<MockAdvancedReducerEditor> editor;
- EXPECT_CALL(editor, Revisit(merge));
- Reduction r = Reduce(&editor, phi, MachineOperatorBuilder::kFloat32Min);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat32Min(p0, p1));
-}
-
-
-TEST_F(CommonOperatorReducerTest, PhiToFloat64Min) {
- Node* p0 = Parameter(0);
- Node* p1 = Parameter(1);
- Node* check = graph()->NewNode(machine()->Float64LessThan(), p0, p1);
- Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* phi = graph()->NewNode(
- common()->Phi(MachineRepresentation::kFloat64, 2), p0, p1, merge);
- StrictMock<MockAdvancedReducerEditor> editor;
- EXPECT_CALL(editor, Revisit(merge));
- Reduction r = Reduce(&editor, phi, MachineOperatorBuilder::kFloat64Min);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat64Min(p0, p1));
-}
-
-
// -----------------------------------------------------------------------------
// Return
@@ -500,30 +394,6 @@ TEST_F(CommonOperatorReducerTest, SelectWithInt32OneConstant) {
}
-TEST_F(CommonOperatorReducerTest, SelectWithInt64ZeroConstant) {
- Node* p0 = Parameter(0);
- Node* p1 = Parameter(1);
- Node* select =
- graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
- Int64Constant(0), p0, p1);
- Reduction r = Reduce(select);
- ASSERT_TRUE(r.Changed());
- EXPECT_EQ(p1, r.replacement());
-}
-
-
-TEST_F(CommonOperatorReducerTest, SelectWithInt64OneConstant) {
- Node* p0 = Parameter(0);
- Node* p1 = Parameter(1);
- Node* select =
- graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
- Int64Constant(1), p0, p1);
- Reduction r = Reduce(select);
- ASSERT_TRUE(r.Changed());
- EXPECT_EQ(p0, r.replacement());
-}
-
-
TEST_F(CommonOperatorReducerTest, SelectWithFalseConstant) {
Node* p0 = Parameter(0);
Node* p1 = Parameter(1);
@@ -572,54 +442,6 @@ TEST_F(CommonOperatorReducerTest, SelectToFloat64Abs) {
EXPECT_THAT(r.replacement(), IsFloat64Abs(p0));
}
-
-TEST_F(CommonOperatorReducerTest, SelectToFloat32Max) {
- Node* p0 = Parameter(0);
- Node* p1 = Parameter(1);
- Node* check = graph()->NewNode(machine()->Float32LessThan(), p0, p1);
- Node* select = graph()->NewNode(
- common()->Select(MachineRepresentation::kFloat32), check, p1, p0);
- Reduction r = Reduce(select, MachineOperatorBuilder::kFloat32Max);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat32Max(p1, p0));
-}
-
-
-TEST_F(CommonOperatorReducerTest, SelectToFloat64Max) {
- Node* p0 = Parameter(0);
- Node* p1 = Parameter(1);
- Node* check = graph()->NewNode(machine()->Float64LessThan(), p0, p1);
- Node* select = graph()->NewNode(
- common()->Select(MachineRepresentation::kFloat64), check, p1, p0);
- Reduction r = Reduce(select, MachineOperatorBuilder::kFloat64Max);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat64Max(p1, p0));
-}
-
-
-TEST_F(CommonOperatorReducerTest, SelectToFloat32Min) {
- Node* p0 = Parameter(0);
- Node* p1 = Parameter(1);
- Node* check = graph()->NewNode(machine()->Float32LessThan(), p0, p1);
- Node* select = graph()->NewNode(
- common()->Select(MachineRepresentation::kFloat32), check, p0, p1);
- Reduction r = Reduce(select, MachineOperatorBuilder::kFloat32Min);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat32Min(p0, p1));
-}
-
-
-TEST_F(CommonOperatorReducerTest, SelectToFloat64Min) {
- Node* p0 = Parameter(0);
- Node* p1 = Parameter(1);
- Node* check = graph()->NewNode(machine()->Float64LessThan(), p0, p1);
- Node* select = graph()->NewNode(
- common()->Select(MachineRepresentation::kFloat64), check, p0, p1);
- Reduction r = Reduce(select, MachineOperatorBuilder::kFloat64Min);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat64Min(p0, p1));
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/common-operator-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-unittest.cc
index 0a55a2e2a2..787dae01dd 100644
--- a/deps/v8/test/unittests/compiler/common-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/common-operator-unittest.cc
@@ -38,7 +38,6 @@ std::ostream& operator<<(std::ostream& os, const SharedOperator& fop) {
return os << IrOpcode::Mnemonic(fop.opcode);
}
-
const SharedOperator kSharedOperators[] = {
#define SHARED(Name, properties, value_input_count, effect_input_count, \
control_input_count, value_output_count, effect_output_count, \
@@ -52,6 +51,7 @@ const SharedOperator kSharedOperators[] = {
SHARED(IfTrue, Operator::kKontrol, 0, 0, 1, 0, 0, 1),
SHARED(IfFalse, Operator::kKontrol, 0, 0, 1, 0, 0, 1),
SHARED(IfSuccess, Operator::kKontrol, 0, 0, 1, 0, 0, 1),
+ SHARED(IfException, Operator::kKontrol, 0, 1, 1, 1, 1, 1),
SHARED(Throw, Operator::kKontrol, 1, 1, 1, 0, 0, 1),
SHARED(Terminate, Operator::kKontrol, 0, 1, 1, 0, 0, 1)
#undef SHARED
@@ -220,24 +220,6 @@ TEST_F(CommonOperatorTest, Branch) {
}
-TEST_F(CommonOperatorTest, IfException) {
- static const IfExceptionHint kIfExceptionHints[] = {
- IfExceptionHint::kLocallyCaught, IfExceptionHint::kLocallyUncaught};
- TRACED_FOREACH(IfExceptionHint, hint, kIfExceptionHints) {
- const Operator* const op = common()->IfException(hint);
- EXPECT_EQ(IrOpcode::kIfException, op->opcode());
- EXPECT_EQ(Operator::kKontrol, op->properties());
- EXPECT_EQ(0, op->ValueInputCount());
- EXPECT_EQ(1, op->EffectInputCount());
- EXPECT_EQ(1, op->ControlInputCount());
- EXPECT_EQ(2, OperatorProperties::GetTotalInputCount(op));
- EXPECT_EQ(1, op->ValueOutputCount());
- EXPECT_EQ(1, op->EffectOutputCount());
- EXPECT_EQ(1, op->ControlOutputCount());
- }
-}
-
-
TEST_F(CommonOperatorTest, Switch) {
TRACED_FOREACH(size_t, cases, kCases) {
const Operator* const op = common()->Switch(cases);
@@ -362,15 +344,26 @@ TEST_F(CommonOperatorTest, NumberConstant) {
TEST_F(CommonOperatorTest, BeginRegion) {
- const Operator* op = common()->BeginRegion();
- EXPECT_EQ(1, op->EffectInputCount());
- EXPECT_EQ(1, OperatorProperties::GetTotalInputCount(op));
- EXPECT_EQ(0, op->ControlOutputCount());
- EXPECT_EQ(1, op->EffectOutputCount());
- EXPECT_EQ(0, op->ValueOutputCount());
+ {
+ const Operator* op =
+ common()->BeginRegion(RegionObservability::kObservable);
+ EXPECT_EQ(1, op->EffectInputCount());
+ EXPECT_EQ(1, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, op->ControlOutputCount());
+ EXPECT_EQ(1, op->EffectOutputCount());
+ EXPECT_EQ(0, op->ValueOutputCount());
+ }
+ {
+ const Operator* op =
+ common()->BeginRegion(RegionObservability::kNotObservable);
+ EXPECT_EQ(1, op->EffectInputCount());
+ EXPECT_EQ(1, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, op->ControlOutputCount());
+ EXPECT_EQ(1, op->EffectOutputCount());
+ EXPECT_EQ(0, op->ValueOutputCount());
+ }
}
-
TEST_F(CommonOperatorTest, FinishRegion) {
const Operator* op = common()->FinishRegion();
EXPECT_EQ(1, op->ValueInputCount());
@@ -381,6 +374,19 @@ TEST_F(CommonOperatorTest, FinishRegion) {
EXPECT_EQ(1, op->ValueOutputCount());
}
+TEST_F(CommonOperatorTest, Projection) {
+ TRACED_FORRANGE(size_t, index, 0, 3) {
+ const Operator* op = common()->Projection(index);
+ EXPECT_EQ(index, ProjectionIndexOf(op));
+ EXPECT_EQ(1, op->ValueInputCount());
+ EXPECT_EQ(1, op->ControlInputCount());
+ EXPECT_EQ(2, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, op->ControlOutputCount());
+ EXPECT_EQ(0, op->EffectOutputCount());
+ EXPECT_EQ(1, op->ValueOutputCount());
+ }
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc b/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc
index a5a3c74be2..a0c483344e 100644
--- a/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc
@@ -96,34 +96,6 @@ TEST_F(ControlFlowOptimizerTest, BuildSwitch2) {
IsSwitch(index, IsIfSuccess(index)))))));
}
-
-TEST_F(ControlFlowOptimizerTest, CloneBranch) {
- Node* cond0 = Parameter(0);
- Node* cond1 = Parameter(1);
- Node* cond2 = Parameter(2);
- Node* branch0 = graph()->NewNode(common()->Branch(), cond0, start());
- Node* control1 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* control2 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* merge0 = graph()->NewNode(common()->Merge(2), control1, control2);
- Node* phi0 = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2),
- cond1, cond2, merge0);
- Node* branch = graph()->NewNode(common()->Branch(), phi0, merge0);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- graph()->SetEnd(graph()->NewNode(common()->End(1), merge));
- Optimize();
- Capture<Node*> branch1_capture, branch2_capture;
- EXPECT_THAT(
- end(),
- IsEnd(IsMerge(IsMerge(IsIfTrue(CaptureEq(&branch1_capture)),
- IsIfTrue(CaptureEq(&branch2_capture))),
- IsMerge(IsIfFalse(AllOf(CaptureEq(&branch1_capture),
- IsBranch(cond1, control1))),
- IsIfFalse(AllOf(CaptureEq(&branch2_capture),
- IsBranch(cond2, control2)))))));
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc b/deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc
index df93f25302..d0351bf5f5 100644
--- a/deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/dead-code-elimination-unittest.cc
@@ -124,15 +124,11 @@ TEST_F(DeadCodeEliminationTest, IfSuccessWithDeadInput) {
TEST_F(DeadCodeEliminationTest, IfExceptionWithDeadControlInput) {
- IfExceptionHint const kHints[] = {IfExceptionHint::kLocallyCaught,
- IfExceptionHint::kLocallyUncaught};
- TRACED_FOREACH(IfExceptionHint, hint, kHints) {
- Reduction const r =
- Reduce(graph()->NewNode(common()->IfException(hint), graph()->start(),
- graph()->NewNode(common()->Dead())));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsDead());
- }
+ Reduction const r =
+ Reduce(graph()->NewNode(common()->IfException(), graph()->start(),
+ graph()->NewNode(common()->Dead())));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsDead());
}
diff --git a/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc b/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc
new file mode 100644
index 0000000000..71a8696d09
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc
@@ -0,0 +1,399 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/effect-control-linearizer.h"
+#include "src/compiler/access-builder.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/schedule.h"
+#include "src/compiler/simplified-operator.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock-support.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+using testing::Capture;
+
+class EffectControlLinearizerTest : public TypedGraphTest {
+ public:
+ EffectControlLinearizerTest()
+ : TypedGraphTest(3),
+ machine_(zone()),
+ javascript_(zone()),
+ simplified_(zone()),
+ jsgraph_(isolate(), graph(), common(), &javascript_, &simplified_,
+ &machine_) {}
+
+ JSGraph* jsgraph() { return &jsgraph_; }
+ SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+
+ private:
+ MachineOperatorBuilder machine_;
+ JSOperatorBuilder javascript_;
+ SimplifiedOperatorBuilder simplified_;
+ JSGraph jsgraph_;
+};
+
+namespace {
+
+BasicBlock* AddBlockToSchedule(Schedule* schedule) {
+ BasicBlock* block = schedule->NewBasicBlock();
+ block->set_rpo_number(static_cast<int32_t>(schedule->rpo_order()->size()));
+ schedule->rpo_order()->push_back(block);
+ return block;
+}
+
+} // namespace
+
+TEST_F(EffectControlLinearizerTest, SimpleLoad) {
+ Schedule schedule(zone());
+
+ // Create the graph.
+ Node* heap_number = NumberConstant(0.5);
+ Node* load = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), heap_number,
+ graph()->start(), graph()->start());
+ Node* ret = graph()->NewNode(common()->Return(), load, graph()->start(),
+ graph()->start());
+
+ // Build the basic block structure.
+ BasicBlock* start = schedule.start();
+ schedule.rpo_order()->push_back(start);
+ start->set_rpo_number(0);
+
+ // Populate the basic blocks with nodes.
+ schedule.AddNode(start, graph()->start());
+ schedule.AddNode(start, heap_number);
+ schedule.AddNode(start, load);
+ schedule.AddReturn(start, ret);
+
+ // Run the state effect introducer.
+ EffectControlLinearizer introducer(jsgraph(), &schedule, zone());
+ introducer.Run();
+
+ EXPECT_THAT(load,
+ IsLoadField(AccessBuilder::ForHeapNumberValue(), heap_number,
+ graph()->start(), graph()->start()));
+ // The return should have reconnected effect edge to the load.
+ EXPECT_THAT(ret, IsReturn(load, load, graph()->start()));
+}
+
+TEST_F(EffectControlLinearizerTest, DiamondLoad) {
+ Schedule schedule(zone());
+
+ // Create the graph.
+ Node* branch =
+ graph()->NewNode(common()->Branch(), Int32Constant(0), graph()->start());
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* heap_number = NumberConstant(0.5);
+ Node* vtrue = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), heap_number,
+ graph()->start(), if_true);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = Float64Constant(2);
+
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kFloat64, 2), vtrue, vfalse, merge);
+
+ Node* ret =
+ graph()->NewNode(common()->Return(), phi, graph()->start(), merge);
+
+ // Build the basic block structure.
+ BasicBlock* start = schedule.start();
+ schedule.rpo_order()->push_back(start);
+ start->set_rpo_number(0);
+
+ BasicBlock* tblock = AddBlockToSchedule(&schedule);
+ BasicBlock* fblock = AddBlockToSchedule(&schedule);
+ BasicBlock* mblock = AddBlockToSchedule(&schedule);
+
+ // Populate the basic blocks with nodes.
+ schedule.AddNode(start, graph()->start());
+ schedule.AddBranch(start, branch, tblock, fblock);
+
+ schedule.AddNode(tblock, if_true);
+ schedule.AddNode(tblock, heap_number);
+ schedule.AddNode(tblock, vtrue);
+ schedule.AddGoto(tblock, mblock);
+
+ schedule.AddNode(fblock, if_false);
+ schedule.AddNode(fblock, vfalse);
+ schedule.AddGoto(fblock, mblock);
+
+ schedule.AddNode(mblock, merge);
+ schedule.AddNode(mblock, phi);
+ schedule.AddReturn(mblock, ret);
+
+ // Run the state effect introducer.
+ EffectControlLinearizer introducer(jsgraph(), &schedule, zone());
+ introducer.Run();
+
+ // The effect input to the return should be an effect phi with the
+ // newly introduced effectful change operators.
+ ASSERT_THAT(
+ ret, IsReturn(phi, IsEffectPhi(vtrue, graph()->start(), merge), merge));
+}
+
+TEST_F(EffectControlLinearizerTest, FloatingDiamondsControlWiring) {
+ Schedule schedule(zone());
+
+ // Create the graph and schedule. Roughly (omitting effects and unimportant
+ // nodes):
+ //
+ // BLOCK 0:
+ // r1: Start
+ // c1: Call
+ // b1: Branch(const0, s1)
+ // |
+ // +-------+------+
+ // | |
+ // BLOCK 1: BLOCK 2:
+ // t1: IfTrue(b1) f1: IfFalse(b1)
+ // | |
+ // +-------+------+
+ // |
+ // BLOCK 3:
+ // m1: Merge(t1, f1)
+ // c2: IfSuccess(c1)
+ // b2: Branch(const0 , s1)
+ // |
+ // +-------+------+
+ // | |
+ // BLOCK 4: BLOCK 5:
+ // t2: IfTrue(b2) f2:IfFalse(b2)
+ // | |
+ // +-------+------+
+ // |
+ // BLOCK 6:
+ // m2: Merge(t2, f2)
+ // r1: Return(c1, c2)
+ LinkageLocation kLocationSignature[] = {
+ LinkageLocation::ForRegister(0, MachineType::Pointer()),
+ LinkageLocation::ForRegister(1, MachineType::Pointer())};
+ const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
+ CallDescriptor::kCallCodeObject, MachineType::AnyTagged(),
+ LinkageLocation::ForRegister(0, MachineType::Pointer()),
+ new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
+ Operator::kNoProperties, 0, 0, CallDescriptor::kNoFlags);
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* const0 = Int32Constant(0);
+ Node* call = graph()->NewNode(common()->Call(kCallDescriptor), p0, p1,
+ graph()->start(), graph()->start());
+ Node* if_success = graph()->NewNode(common()->IfSuccess(), call);
+
+ // First Floating diamond.
+ Node* branch1 =
+ graph()->NewNode(common()->Branch(), const0, graph()->start());
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* merge1 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+
+ // Second floating diamond.
+ Node* branch2 =
+ graph()->NewNode(common()->Branch(), const0, graph()->start());
+ Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+ Node* merge2 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
+
+ Node* ret =
+ graph()->NewNode(common()->Return(), call, graph()->start(), if_success);
+
+ // Build the basic block structure.
+ BasicBlock* start = schedule.start();
+ schedule.rpo_order()->push_back(start);
+ start->set_rpo_number(0);
+
+ BasicBlock* t1block = AddBlockToSchedule(&schedule);
+ BasicBlock* f1block = AddBlockToSchedule(&schedule);
+ BasicBlock* m1block = AddBlockToSchedule(&schedule);
+
+ BasicBlock* t2block = AddBlockToSchedule(&schedule);
+ BasicBlock* f2block = AddBlockToSchedule(&schedule);
+ BasicBlock* m2block = AddBlockToSchedule(&schedule);
+
+ // Populate the basic blocks with nodes.
+ schedule.AddNode(start, graph()->start());
+ schedule.AddNode(start, p0);
+ schedule.AddNode(start, p1);
+ schedule.AddNode(start, const0);
+ schedule.AddNode(start, call);
+ schedule.AddBranch(start, branch1, t1block, f1block);
+
+ schedule.AddNode(t1block, if_true1);
+ schedule.AddGoto(t1block, m1block);
+
+ schedule.AddNode(f1block, if_false1);
+ schedule.AddGoto(f1block, m1block);
+
+ schedule.AddNode(m1block, merge1);
+ // The scheduler does not always put the IfSuccess node to the corresponding
+ // call's block, simulate that here.
+ schedule.AddNode(m1block, if_success);
+ schedule.AddBranch(m1block, branch2, t2block, f2block);
+
+ schedule.AddNode(t2block, if_true2);
+ schedule.AddGoto(t2block, m2block);
+
+ schedule.AddNode(f2block, if_false2);
+ schedule.AddGoto(f2block, m2block);
+
+ schedule.AddNode(m2block, merge2);
+ schedule.AddReturn(m2block, ret);
+
+ // Run the state effect introducer.
+ EffectControlLinearizer introducer(jsgraph(), &schedule, zone());
+ introducer.Run();
+
+ // The effect input to the return should be an effect phi with the
+ // newly introduced effectful change operators.
+ ASSERT_THAT(ret, IsReturn(call, call, merge2));
+ ASSERT_THAT(branch2, IsBranch(const0, merge1));
+ ASSERT_THAT(branch1, IsBranch(const0, if_success));
+ ASSERT_THAT(if_success, IsIfSuccess(call));
+}
+
+TEST_F(EffectControlLinearizerTest, LoopLoad) {
+ Schedule schedule(zone());
+
+ // Create the graph.
+ Node* loop = graph()->NewNode(common()->Loop(1), graph()->start());
+ Node* effect_phi =
+ graph()->NewNode(common()->EffectPhi(1), graph()->start(), loop);
+
+ Node* cond = Int32Constant(0);
+ Node* branch = graph()->NewNode(common()->Branch(), cond, loop);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+
+ loop->AppendInput(zone(), if_false);
+ NodeProperties::ChangeOp(loop, common()->Loop(2));
+
+ effect_phi->InsertInput(zone(), 1, effect_phi);
+ NodeProperties::ChangeOp(effect_phi, common()->EffectPhi(2));
+
+ Node* heap_number = NumberConstant(0.5);
+ Node* load = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), heap_number,
+ graph()->start(), loop);
+
+ Node* ret = graph()->NewNode(common()->Return(), load, effect_phi, if_true);
+
+ // Build the basic block structure.
+ BasicBlock* start = schedule.start();
+ schedule.rpo_order()->push_back(start);
+ start->set_rpo_number(0);
+
+ BasicBlock* lblock = AddBlockToSchedule(&schedule);
+ BasicBlock* fblock = AddBlockToSchedule(&schedule);
+ BasicBlock* rblock = AddBlockToSchedule(&schedule);
+
+ // Populate the basic blocks with nodes.
+ schedule.AddNode(start, graph()->start());
+ schedule.AddGoto(start, lblock);
+
+ schedule.AddNode(lblock, loop);
+ schedule.AddNode(lblock, effect_phi);
+ schedule.AddNode(lblock, heap_number);
+ schedule.AddNode(lblock, load);
+ schedule.AddNode(lblock, cond);
+ schedule.AddBranch(lblock, branch, rblock, fblock);
+
+ schedule.AddNode(fblock, if_false);
+ schedule.AddGoto(fblock, lblock);
+
+ schedule.AddNode(rblock, if_true);
+ schedule.AddReturn(rblock, ret);
+
+ // Run the state effect introducer.
+ EffectControlLinearizer introducer(jsgraph(), &schedule, zone());
+ introducer.Run();
+
+ ASSERT_THAT(ret, IsReturn(load, load, if_true));
+ EXPECT_THAT(load, IsLoadField(AccessBuilder::ForHeapNumberValue(),
+ heap_number, effect_phi, loop));
+}
+
+TEST_F(EffectControlLinearizerTest, CloneBranch) {
+ Schedule schedule(zone());
+
+ Node* cond0 = Parameter(0);
+ Node* cond1 = Parameter(1);
+ Node* cond2 = Parameter(2);
+ Node* branch0 = graph()->NewNode(common()->Branch(), cond0, start());
+ Node* control1 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* control2 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* merge0 = graph()->NewNode(common()->Merge(2), control1, control2);
+ Node* phi0 = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2),
+ cond1, cond2, merge0);
+ Node* branch = graph()->NewNode(common()->Branch(), phi0, merge0);
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ graph()->SetEnd(graph()->NewNode(common()->End(1), merge));
+
+ BasicBlock* start = schedule.start();
+ schedule.rpo_order()->push_back(start);
+ start->set_rpo_number(0);
+
+ BasicBlock* f1block = AddBlockToSchedule(&schedule);
+ BasicBlock* t1block = AddBlockToSchedule(&schedule);
+ BasicBlock* bblock = AddBlockToSchedule(&schedule);
+
+ BasicBlock* f2block = AddBlockToSchedule(&schedule);
+ BasicBlock* t2block = AddBlockToSchedule(&schedule);
+ BasicBlock* mblock = AddBlockToSchedule(&schedule);
+
+ // Populate the basic blocks with nodes.
+ schedule.AddNode(start, graph()->start());
+
+ schedule.AddBranch(start, branch0, t1block, f1block);
+
+ schedule.AddNode(t1block, control1);
+ schedule.AddGoto(t1block, bblock);
+
+ schedule.AddNode(f1block, control2);
+ schedule.AddGoto(f1block, bblock);
+
+ schedule.AddNode(bblock, merge0);
+ schedule.AddNode(bblock, phi0);
+ schedule.AddBranch(bblock, branch, t2block, f2block);
+
+ schedule.AddNode(t2block, if_true);
+ schedule.AddGoto(t2block, mblock);
+
+ schedule.AddNode(f2block, if_false);
+ schedule.AddGoto(f2block, mblock);
+
+ schedule.AddNode(mblock, merge);
+ schedule.AddNode(mblock, graph()->end());
+
+ EffectControlLinearizer introducer(jsgraph(), &schedule, zone());
+ introducer.Run();
+
+ Capture<Node *> branch1_capture, branch2_capture;
+ EXPECT_THAT(
+ end(),
+ IsEnd(IsMerge(IsMerge(IsIfTrue(CaptureEq(&branch1_capture)),
+ IsIfTrue(CaptureEq(&branch2_capture))),
+ IsMerge(IsIfFalse(AllOf(CaptureEq(&branch1_capture),
+ IsBranch(cond1, control1))),
+ IsIfFalse(AllOf(CaptureEq(&branch2_capture),
+ IsBranch(cond2, control2)))))));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/escape-analysis-unittest.cc b/deps/v8/test/unittests/compiler/escape-analysis-unittest.cc
index d5e12ba0db..990b813947 100644
--- a/deps/v8/test/unittests/compiler/escape-analysis-unittest.cc
+++ b/deps/v8/test/unittests/compiler/escape-analysis-unittest.cc
@@ -48,7 +48,8 @@ class EscapeAnalysisTest : public GraphTest {
effect = effect_;
}
- return effect_ = graph()->NewNode(common()->BeginRegion(), effect);
+ return effect_ = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kObservable), effect);
}
Node* FinishRegion(Node* value, Node* effect = nullptr) {
@@ -146,14 +147,18 @@ class EscapeAnalysisTest : public GraphTest {
}
FieldAccess FieldAccessAtIndex(int offset) {
- FieldAccess access = {kTaggedBase, offset, MaybeHandle<Name>(), Type::Any(),
- MachineType::AnyTagged()};
+ FieldAccess access = {kTaggedBase,
+ offset,
+ MaybeHandle<Name>(),
+ Type::Any(),
+ MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
ElementAccess MakeElementAccess(int header_size) {
ElementAccess access = {kTaggedBase, header_size, Type::Any(),
- MachineType::AnyTagged()};
+ MachineType::AnyTagged(), kFullWriteBarrier};
return access;
}
@@ -441,8 +446,9 @@ TEST_F(EscapeAnalysisTest, DeoptReplacement) {
nullptr),
state_values1, state_values2, state_values3, UndefinedConstant(),
graph()->start(), graph()->start());
- Node* deopt = graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
- frame_state, effect1, ifFalse);
+ Node* deopt = graph()->NewNode(
+ common()->Deoptimize(DeoptimizeKind::kEager, DeoptimizeReason::kNoReason),
+ frame_state, effect1, ifFalse);
Node* ifTrue = IfTrue();
Node* load = Load(FieldAccessAtIndex(0), finish, effect1, ifTrue);
Node* result = Return(load, effect1, ifTrue);
@@ -481,8 +487,9 @@ TEST_F(EscapeAnalysisTest, DeoptReplacementIdentity) {
nullptr),
state_values1, state_values2, state_values3, UndefinedConstant(),
graph()->start(), graph()->start());
- Node* deopt = graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
- frame_state, effect1, ifFalse);
+ Node* deopt = graph()->NewNode(
+ common()->Deoptimize(DeoptimizeKind::kEager, DeoptimizeReason::kNoReason),
+ frame_state, effect1, ifFalse);
Node* ifTrue = IfTrue();
Node* load = Load(FieldAccessAtIndex(0), finish, effect1, ifTrue);
Node* result = Return(load, effect1, ifTrue);
diff --git a/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc b/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
index 8d05c526c3..7d94793459 100644
--- a/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
@@ -283,8 +283,6 @@ const Operator kMockOpEffect(IrOpcode::kDead, Operator::kNoProperties,
const Operator kMockOpControl(IrOpcode::kDead, Operator::kNoProperties,
"MockOpControl", 0, 0, 1, 1, 0, 1);
-const IfExceptionHint kNoHint = IfExceptionHint::kLocallyCaught;
-
} // namespace
@@ -348,7 +346,7 @@ TEST_F(AdvancedReducerTest, ReplaceWithValue_ControlUse2) {
Node* dead = graph()->NewNode(&kMockOperator);
Node* node = graph()->NewNode(&kMockOpControl, start);
Node* success = graph()->NewNode(common.IfSuccess(), node);
- Node* exception = graph()->NewNode(common.IfException(kNoHint), effect, node);
+ Node* exception = graph()->NewNode(common.IfException(), effect, node);
Node* use_control = graph()->NewNode(common.Merge(1), success);
Node* replacement = graph()->NewNode(&kMockOperator);
GraphReducer graph_reducer(zone(), graph(), dead);
@@ -372,7 +370,7 @@ TEST_F(AdvancedReducerTest, ReplaceWithValue_ControlUse3) {
Node* dead = graph()->NewNode(&kMockOperator);
Node* node = graph()->NewNode(&kMockOpControl, start);
Node* success = graph()->NewNode(common.IfSuccess(), node);
- Node* exception = graph()->NewNode(common.IfException(kNoHint), effect, node);
+ Node* exception = graph()->NewNode(common.IfException(), effect, node);
Node* use_control = graph()->NewNode(common.Merge(1), success);
Node* replacement = graph()->NewNode(&kMockOperator);
GraphReducer graph_reducer(zone(), graph(), dead);
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.h b/deps/v8/test/unittests/compiler/graph-unittest.h
index 31bae6d415..d4248e422b 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.h
+++ b/deps/v8/test/unittests/compiler/graph-unittest.h
@@ -48,6 +48,9 @@ class GraphTest : public TestWithContext, public TestWithIsolateAndZone {
Node* EmptyFrameState();
+ Matcher<Node*> IsBooleanConstant(bool value) {
+ return value ? IsTrueConstant() : IsFalseConstant();
+ }
Matcher<Node*> IsFalseConstant();
Matcher<Node*> IsTrueConstant();
Matcher<Node*> IsUndefinedConstant();
diff --git a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
index 5280f69aa0..f2c9c2609b 100644
--- a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
+++ b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
@@ -751,71 +751,6 @@ TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
}
}
-
-TEST_F(InstructionSelectorTest, Float32SubWithMinusZeroAndParameter) {
- {
- StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
- Node* const p0 = m.Parameter(0);
- Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
- m.Return(n);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kSSEFloat32Neg, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kFlags_none, s[0]->flags_mode());
- }
- {
- StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
- Node* const p0 = m.Parameter(0);
- Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
- m.Return(n);
- Stream s = m.Build(AVX);
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kAVXFloat32Neg, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kFlags_none, s[0]->flags_mode());
- }
-}
-
-
-TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) {
- {
- StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
- Node* const p0 = m.Parameter(0);
- Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
- m.Return(n);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kSSEFloat64Neg, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kFlags_none, s[0]->flags_mode());
- }
- {
- StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
- Node* const p0 = m.Parameter(0);
- Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
- m.Return(n);
- Stream s = m.Build(AVX);
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kAVXFloat64Neg, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kFlags_none, s[0]->flags_mode());
- }
-}
-
-
// -----------------------------------------------------------------------------
// Miscellaneous.
diff --git a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc b/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
index 16030f80d7..d2953159fc 100644
--- a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
+++ b/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
@@ -45,9 +45,8 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
selector.SelectInstructions();
if (FLAG_trace_turbo) {
OFStream out(stdout);
- PrintableInstructionSequence printable = {
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
- &sequence};
+ PrintableInstructionSequence printable = {RegisterConfiguration::Turbofan(),
+ &sequence};
out << "=== Code sequence after instruction selection ===" << std::endl
<< printable;
}
@@ -94,18 +93,18 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
}
for (auto i : s.virtual_registers_) {
int const virtual_register = i.second;
- if (sequence.IsFloat(virtual_register)) {
+ if (sequence.IsFP(virtual_register)) {
EXPECT_FALSE(sequence.IsReference(virtual_register));
s.doubles_.insert(virtual_register);
}
if (sequence.IsReference(virtual_register)) {
- EXPECT_FALSE(sequence.IsFloat(virtual_register));
+ EXPECT_FALSE(sequence.IsFP(virtual_register));
s.references_.insert(virtual_register);
}
}
- for (int i = 0; i < sequence.GetFrameStateDescriptorCount(); i++) {
- s.deoptimization_entries_.push_back(sequence.GetFrameStateDescriptor(
- InstructionSequence::StateId::FromInt(i)));
+ for (int i = 0; i < sequence.GetDeoptimizationEntryCount(); i++) {
+ s.deoptimization_entries_.push_back(
+ sequence.GetDeoptimizationEntry(i).descriptor());
}
return s;
}
@@ -199,11 +198,9 @@ TARGET_TEST_F(InstructionSelectorTest, ReturnZero) {
// -----------------------------------------------------------------------------
// Conversions.
-
-TARGET_TEST_F(InstructionSelectorTest, TruncateFloat64ToInt32WithParameter) {
+TARGET_TEST_F(InstructionSelectorTest, TruncateFloat64ToWord32WithParameter) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Float64());
- m.Return(
- m.TruncateFloat64ToInt32(TruncationMode::kJavaScript, m.Parameter(0)));
+ m.Return(m.TruncateFloat64ToWord32(m.Parameter(0)));
Stream s = m.Build(kAllInstructions);
ASSERT_EQ(4U, s.size());
EXPECT_EQ(kArchNop, s[0]->arch_opcode());
@@ -335,7 +332,8 @@ TARGET_TEST_F(InstructionSelectorTest, ValueEffect) {
Node* p2 = m2.Parameter(0);
m2.Return(m2.AddNode(
m2.machine()->Load(MachineType::Int32()), p2, m2.Int32Constant(0),
- m2.AddNode(m2.common()->BeginRegion(), m2.graph()->start())));
+ m2.AddNode(m2.common()->BeginRegion(RegionObservability::kObservable),
+ m2.graph()->start())));
Stream s2 = m2.Build(kAllInstructions);
EXPECT_LE(3U, s1.size());
ASSERT_EQ(s1.size(), s2.size());
@@ -482,7 +480,7 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeopt) {
EXPECT_EQ(0, s.ToInt32(call_instr->InputAt(4))); // This should be a context.
// We inserted 0 here.
EXPECT_EQ(0.5, s.ToFloat64(call_instr->InputAt(5)));
- EXPECT_TRUE(s.ToHeapObject(call_instr->InputAt(6))->IsUndefined());
+ EXPECT_TRUE(s.ToHeapObject(call_instr->InputAt(6))->IsUndefined(isolate()));
EXPECT_EQ(MachineType::AnyTagged(),
desc_before->GetType(0)); // function is always
// tagged/any.
diff --git a/deps/v8/test/unittests/compiler/instruction-selector-unittest.h b/deps/v8/test/unittests/compiler/instruction-selector-unittest.h
index f1397faa06..93cef0544e 100644
--- a/deps/v8/test/unittests/compiler/instruction-selector-unittest.h
+++ b/deps/v8/test/unittests/compiler/instruction-selector-unittest.h
@@ -137,13 +137,15 @@ class InstructionSelectorTest : public TestWithContext,
// Add return location(s).
const int return_count = static_cast<int>(msig->return_count());
for (int i = 0; i < return_count; i++) {
- locations.AddReturn(LinkageLocation::ForCallerFrameSlot(-1 - i));
+ locations.AddReturn(
+ LinkageLocation::ForCallerFrameSlot(-1 - i, msig->GetReturn(i)));
}
// Just put all parameters on the stack.
const int parameter_count = static_cast<int>(msig->parameter_count());
for (int i = 0; i < parameter_count; i++) {
- locations.AddParam(LinkageLocation::ForCallerFrameSlot(-1 - i));
+ locations.AddParam(
+ LinkageLocation::ForCallerFrameSlot(-1 - i, msig->GetParam(i)));
}
const RegList kCalleeSaveRegisters = 0;
@@ -155,7 +157,6 @@ class InstructionSelectorTest : public TestWithContext,
CallDescriptor::kCallAddress, // kind
target_type, // target MachineType
target_loc, // target location
- msig, // machine_sig
locations.Build(), // location_sig
0, // stack_parameter_count
Operator::kNoProperties, // properties
diff --git a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc
index 51112a6470..a0a86e043a 100644
--- a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc
+++ b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc
@@ -15,10 +15,9 @@ namespace compiler {
static const char*
general_register_names_[RegisterConfiguration::kMaxGeneralRegisters];
static const char*
- double_register_names_[RegisterConfiguration::kMaxDoubleRegisters];
+ double_register_names_[RegisterConfiguration::kMaxFPRegisters];
static char register_names_[10 * (RegisterConfiguration::kMaxGeneralRegisters +
- RegisterConfiguration::kMaxDoubleRegisters)];
-
+ RegisterConfiguration::kMaxFPRegisters)];
namespace {
static int allocatable_codes[InstructionSequenceTest::kDefaultNRegs] = {
@@ -35,7 +34,7 @@ static void InitializeRegisterNames() {
loc += base::OS::SNPrintF(loc, 100, "gp_%d", i);
*loc++ = 0;
}
- for (int i = 0; i < RegisterConfiguration::kMaxDoubleRegisters; ++i) {
+ for (int i = 0; i < RegisterConfiguration::kMaxFPRegisters; ++i) {
double_register_names_[i] = loc;
loc += base::OS::SNPrintF(loc, 100, "fp_%d", i) + 1;
*loc++ = 0;
@@ -56,7 +55,7 @@ InstructionSequenceTest::InstructionSequenceTest()
void InstructionSequenceTest::SetNumRegs(int num_general_registers,
int num_double_registers) {
- CHECK(config_.is_empty());
+ CHECK(!config_);
CHECK(instructions_.empty());
CHECK(instruction_blocks_.empty());
num_general_registers_ = num_general_registers;
@@ -65,12 +64,17 @@ void InstructionSequenceTest::SetNumRegs(int num_general_registers,
RegisterConfiguration* InstructionSequenceTest::config() {
- if (config_.is_empty()) {
- config_.Reset(new RegisterConfiguration(
+ if (!config_) {
+ config_.reset(new RegisterConfiguration(
num_general_registers_, num_double_registers_, num_general_registers_,
num_double_registers_, num_double_registers_, allocatable_codes,
- allocatable_double_codes, general_register_names_,
- double_register_names_));
+ allocatable_double_codes,
+ kSimpleFPAliasing ? RegisterConfiguration::OVERLAP
+ : RegisterConfiguration::COMBINE,
+ general_register_names_,
+ double_register_names_, // float register names
+ double_register_names_,
+ double_register_names_)); // SIMD 128 register names
}
return config_.get();
}
diff --git a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.h b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.h
index eb86bd9174..956f5d55b9 100644
--- a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.h
+++ b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.h
@@ -5,6 +5,8 @@
#ifndef V8_UNITTESTS_COMPILER_INSTRUCTION_SEQUENCE_UNITTEST_H_
#define V8_UNITTESTS_COMPILER_INSTRUCTION_SEQUENCE_UNITTEST_H_
+#include <memory>
+
#include "src/compiler/instruction.h"
#include "test/unittests/test-utils.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -229,7 +231,7 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
typedef std::map<int, const Instruction*> Instructions;
typedef std::vector<BlockCompletion> Completions;
- base::SmartPointer<RegisterConfiguration> config_;
+ std::unique_ptr<RegisterConfiguration> config_;
InstructionSequence* sequence_;
int num_general_registers_;
int num_double_registers_;
@@ -241,6 +243,8 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
LoopBlocks loop_blocks_;
InstructionBlock* current_block_;
bool block_returns_;
+
+ DISALLOW_COPY_AND_ASSIGN(InstructionSequenceTest);
};
} // namespace compiler
diff --git a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
index 08f3038754..06ac524111 100644
--- a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
@@ -123,72 +123,122 @@ TEST_F(Int64LoweringTest, Int64Constant) {
IsInt32Constant(high_word_value(0)), start(), start()));
}
-TEST_F(Int64LoweringTest, Int64Load) {
- int32_t base = 0x1234;
- int32_t index = 0x5678;
-
- LowerGraph(graph()->NewNode(machine()->Load(MachineType::Int64()),
- Int32Constant(base), Int32Constant(index),
- start(), start()),
- MachineRepresentation::kWord64);
-
- Capture<Node*> high_word_load;
- Matcher<Node*> high_word_load_matcher =
- IsLoad(MachineType::Int32(), IsInt32Constant(base),
- IsInt32Add(IsInt32Constant(index), IsInt32Constant(0x4)), start(),
- start());
-
- EXPECT_THAT(
- graph()->end()->InputAt(1),
- IsReturn2(IsLoad(MachineType::Int32(), IsInt32Constant(base),
- IsInt32Constant(index), AllOf(CaptureEq(&high_word_load),
- high_word_load_matcher),
- start()),
- AllOf(CaptureEq(&high_word_load), high_word_load_matcher),
- start(), start()));
-}
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+#define LOAD_VERIFY(kLoad) \
+ Matcher<Node*> high_word_load_matcher = \
+ Is##kLoad(MachineType::Int32(), IsInt32Constant(base), \
+ IsInt32Add(IsInt32Constant(index), IsInt32Constant(0x4)), \
+ start(), start()); \
+ \
+ EXPECT_THAT( \
+ graph()->end()->InputAt(1), \
+ IsReturn2( \
+ Is##kLoad(MachineType::Int32(), IsInt32Constant(base), \
+ IsInt32Constant(index), \
+ AllOf(CaptureEq(&high_word_load), high_word_load_matcher), \
+ start()), \
+ AllOf(CaptureEq(&high_word_load), high_word_load_matcher), start(), \
+ start()));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+#define LOAD_VERIFY(kLoad) \
+ Matcher<Node*> high_word_load_matcher = \
+ Is##kLoad(MachineType::Int32(), IsInt32Constant(base), \
+ IsInt32Constant(index), start(), start()); \
+ \
+ EXPECT_THAT( \
+ graph()->end()->InputAt(1), \
+ IsReturn2( \
+ Is##kLoad(MachineType::Int32(), IsInt32Constant(base), \
+ IsInt32Add(IsInt32Constant(index), IsInt32Constant(0x4)), \
+ AllOf(CaptureEq(&high_word_load), high_word_load_matcher), \
+ start()), \
+ AllOf(CaptureEq(&high_word_load), high_word_load_matcher), start(), \
+ start()));
+#endif
+
+#define INT64_LOAD_LOWERING(kLoad) \
+ int32_t base = 0x1234; \
+ int32_t index = 0x5678; \
+ \
+ LowerGraph(graph()->NewNode(machine()->kLoad(MachineType::Int64()), \
+ Int32Constant(base), Int32Constant(index), \
+ start(), start()), \
+ MachineRepresentation::kWord64); \
+ \
+ Capture<Node*> high_word_load; \
+ LOAD_VERIFY(kLoad)
+
+TEST_F(Int64LoweringTest, Int64Load) { INT64_LOAD_LOWERING(Load); }
+
+TEST_F(Int64LoweringTest, UnalignedInt64Load) {
+ INT64_LOAD_LOWERING(UnalignedLoad);
+}
+
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+#define STORE_VERIFY(kStore, kRep) \
+ EXPECT_THAT( \
+ graph()->end()->InputAt(1), \
+ IsReturn(IsInt32Constant(return_value), \
+ Is##kStore( \
+ kRep, IsInt32Constant(base), IsInt32Constant(index), \
+ IsInt32Constant(low_word_value(0)), \
+ Is##kStore( \
+ kRep, IsInt32Constant(base), \
+ IsInt32Add(IsInt32Constant(index), IsInt32Constant(4)), \
+ IsInt32Constant(high_word_value(0)), start(), start()), \
+ start()), \
+ start()));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+#define STORE_VERIFY(kStore, kRep) \
+ EXPECT_THAT( \
+ graph()->end()->InputAt(1), \
+ IsReturn(IsInt32Constant(return_value), \
+ Is##kStore( \
+ kRep, IsInt32Constant(base), \
+ IsInt32Add(IsInt32Constant(index), IsInt32Constant(4)), \
+ IsInt32Constant(low_word_value(0)), \
+ Is##kStore( \
+ kRep, IsInt32Constant(base), IsInt32Constant(index), \
+ IsInt32Constant(high_word_value(0)), start(), start()), \
+ start()), \
+ start()));
+#endif
+
+#define INT64_STORE_LOWERING(kStore, kRep32, kRep64) \
+ int32_t base = 1111; \
+ int32_t index = 2222; \
+ int32_t return_value = 0x5555; \
+ \
+ Signature<MachineRepresentation>::Builder sig_builder(zone(), 1, 0); \
+ sig_builder.AddReturn(MachineRepresentation::kWord32); \
+ \
+ Node* store = graph()->NewNode(machine()->kStore(kRep64), \
+ Int32Constant(base), Int32Constant(index), \
+ Int64Constant(value(0)), start(), start()); \
+ \
+ Node* ret = graph()->NewNode(common()->Return(), \
+ Int32Constant(return_value), store, start()); \
+ \
+ NodeProperties::MergeControlToEnd(graph(), common(), ret); \
+ \
+ Int64Lowering lowering(graph(), machine(), common(), zone(), \
+ sig_builder.Build()); \
+ lowering.LowerGraph(); \
+ \
+ STORE_VERIFY(kStore, kRep32)
TEST_F(Int64LoweringTest, Int64Store) {
- // We have to build the TF graph explicitly here because Store does not return
- // a value.
-
- int32_t base = 1111;
- int32_t index = 2222;
- int32_t return_value = 0x5555;
-
- Signature<MachineRepresentation>::Builder sig_builder(zone(), 1, 0);
- sig_builder.AddReturn(MachineRepresentation::kWord32);
-
- Node* store = graph()->NewNode(
- machine()->Store(StoreRepresentation(MachineRepresentation::kWord64,
- WriteBarrierKind::kNoWriteBarrier)),
- Int32Constant(base), Int32Constant(index), Int64Constant(value(0)),
- start(), start());
-
- Node* ret = graph()->NewNode(common()->Return(), Int32Constant(return_value),
- store, start());
-
- NodeProperties::MergeControlToEnd(graph(), common(), ret);
-
- Int64Lowering lowering(graph(), machine(), common(), zone(),
- sig_builder.Build());
- lowering.LowerGraph();
-
- const StoreRepresentation rep(MachineRepresentation::kWord32,
- kNoWriteBarrier);
+ const StoreRepresentation rep64(MachineRepresentation::kWord64,
+ WriteBarrierKind::kNoWriteBarrier);
+ const StoreRepresentation rep32(MachineRepresentation::kWord32,
+ WriteBarrierKind::kNoWriteBarrier);
+ INT64_STORE_LOWERING(Store, rep32, rep64);
+}
- EXPECT_THAT(
- graph()->end()->InputAt(1),
- IsReturn(
- IsInt32Constant(return_value),
- IsStore(
- rep, IsInt32Constant(base), IsInt32Constant(index),
- IsInt32Constant(low_word_value(0)),
- IsStore(rep, IsInt32Constant(base),
- IsInt32Add(IsInt32Constant(index), IsInt32Constant(4)),
- IsInt32Constant(high_word_value(0)), start(), start()),
- start()),
- start()));
+TEST_F(Int64LoweringTest, Int64UnalignedStore) {
+ const UnalignedStoreRepresentation rep64(MachineRepresentation::kWord64);
+ const UnalignedStoreRepresentation rep32(MachineRepresentation::kWord32);
+ INT64_STORE_LOWERING(UnalignedStore, rep32, rep64);
}
TEST_F(Int64LoweringTest, Int64And) {
@@ -301,9 +351,6 @@ TEST_F(Int64LoweringTest, CallI64Parameter) {
wasm::ModuleEnv::GetI32WasmCallDescriptor(zone(), desc));
}
-// todo(ahaas): I added a list of missing instructions here to make merging
-// easier when I do them one by one.
-// kExprI64Add:
TEST_F(Int64LoweringTest, Int64Add) {
LowerGraph(graph()->NewNode(machine()->Int64Add(), Int64Constant(value(0)),
Int64Constant(value(1))),
@@ -319,7 +366,7 @@ TEST_F(Int64LoweringTest, Int64Add) {
IsProjection(1, AllOf(CaptureEq(&add), add_matcher)),
start(), start()));
}
-// kExprI64Sub:
+
TEST_F(Int64LoweringTest, Int64Sub) {
LowerGraph(graph()->NewNode(machine()->Int64Sub(), Int64Constant(value(0)),
Int64Constant(value(1))),
@@ -336,7 +383,6 @@ TEST_F(Int64LoweringTest, Int64Sub) {
start(), start()));
}
-// kExprI64Mul:
TEST_F(Int64LoweringTest, Int64Mul) {
LowerGraph(graph()->NewNode(machine()->Int64Mul(), Int64Constant(value(0)),
Int64Constant(value(1))),
@@ -354,11 +400,6 @@ TEST_F(Int64LoweringTest, Int64Mul) {
start(), start()));
}
-// kExprI64DivS:
-// kExprI64DivU:
-// kExprI64RemS:
-// kExprI64RemU:
-// kExprI64Ior:
TEST_F(Int64LoweringTest, Int64Ior) {
LowerGraph(graph()->NewNode(machine()->Word64Or(), Int64Constant(value(0)),
Int64Constant(value(1))),
@@ -371,7 +412,6 @@ TEST_F(Int64LoweringTest, Int64Ior) {
start(), start()));
}
-// kExprI64Xor:
TEST_F(Int64LoweringTest, Int64Xor) {
LowerGraph(graph()->NewNode(machine()->Word64Xor(), Int64Constant(value(0)),
Int64Constant(value(1))),
@@ -383,7 +423,7 @@ TEST_F(Int64LoweringTest, Int64Xor) {
IsInt32Constant(high_word_value(1))),
start(), start()));
}
-// kExprI64Shl:
+
TEST_F(Int64LoweringTest, Int64Shl) {
LowerGraph(graph()->NewNode(machine()->Word64Shl(), Int64Constant(value(0)),
Int64Constant(value(1))),
@@ -399,7 +439,7 @@ TEST_F(Int64LoweringTest, Int64Shl) {
IsProjection(1, AllOf(CaptureEq(&shl), shl_matcher)),
start(), start()));
}
-// kExprI64ShrU:
+
TEST_F(Int64LoweringTest, Int64ShrU) {
LowerGraph(graph()->NewNode(machine()->Word64Shr(), Int64Constant(value(0)),
Int64Constant(value(1))),
@@ -415,7 +455,7 @@ TEST_F(Int64LoweringTest, Int64ShrU) {
IsProjection(1, AllOf(CaptureEq(&shr), shr_matcher)),
start(), start()));
}
-// kExprI64ShrS:
+
TEST_F(Int64LoweringTest, Int64ShrS) {
LowerGraph(graph()->NewNode(machine()->Word64Sar(), Int64Constant(value(0)),
Int64Constant(value(1))),
@@ -431,7 +471,7 @@ TEST_F(Int64LoweringTest, Int64ShrS) {
IsProjection(1, AllOf(CaptureEq(&sar), sar_matcher)),
start(), start()));
}
-// kExprI64Eq:
+
TEST_F(Int64LoweringTest, Int64Eq) {
LowerGraph(graph()->NewNode(machine()->Word64Equal(), Int64Constant(value(0)),
Int64Constant(value(1))),
@@ -447,27 +487,25 @@ TEST_F(Int64LoweringTest, Int64Eq) {
start(), start()));
}
-// kExprI64LtS:
TEST_F(Int64LoweringTest, Int64LtS) {
TestComparison(machine()->Int64LessThan(), IsInt32LessThan, IsUint32LessThan);
}
-// kExprI64LeS:
+
TEST_F(Int64LoweringTest, Int64LeS) {
TestComparison(machine()->Int64LessThanOrEqual(), IsInt32LessThan,
IsUint32LessThanOrEqual);
}
-// kExprI64LtU:
+
TEST_F(Int64LoweringTest, Int64LtU) {
TestComparison(machine()->Uint64LessThan(), IsUint32LessThan,
IsUint32LessThan);
}
-// kExprI64LeU:
+
TEST_F(Int64LoweringTest, Int64LeU) {
TestComparison(machine()->Uint64LessThanOrEqual(), IsUint32LessThan,
IsUint32LessThanOrEqual);
}
-// kExprI32ConvertI64:
TEST_F(Int64LoweringTest, I32ConvertI64) {
LowerGraph(graph()->NewNode(machine()->TruncateInt64ToInt32(),
Int64Constant(value(0))),
@@ -475,7 +513,7 @@ TEST_F(Int64LoweringTest, I32ConvertI64) {
EXPECT_THAT(graph()->end()->InputAt(1),
IsReturn(IsInt32Constant(low_word_value(0)), start(), start()));
}
-// kExprI64SConvertI32:
+
TEST_F(Int64LoweringTest, I64SConvertI32) {
LowerGraph(graph()->NewNode(machine()->ChangeInt32ToInt64(),
Int32Constant(low_word_value(0))),
@@ -501,7 +539,7 @@ TEST_F(Int64LoweringTest, I64SConvertI32_2) {
IsInt32Constant(31)),
start(), start()));
}
-// kExprI64UConvertI32:
+
TEST_F(Int64LoweringTest, I64UConvertI32) {
LowerGraph(graph()->NewNode(machine()->ChangeUint32ToUint64(),
Int32Constant(low_word_value(0))),
@@ -523,7 +561,7 @@ TEST_F(Int64LoweringTest, I64UConvertI32_2) {
IsReturn2(IsInt32Constant(low_word_value(0)), IsInt32Constant(0),
start(), start()));
}
-// kExprF64ReinterpretI64:
+
TEST_F(Int64LoweringTest, F64ReinterpretI64) {
LowerGraph(graph()->NewNode(machine()->BitcastInt64ToFloat64(),
Int64Constant(value(0))),
@@ -538,12 +576,13 @@ TEST_F(Int64LoweringTest, F64ReinterpretI64) {
IsStore(StoreRepresentation(MachineRepresentation::kWord32,
WriteBarrierKind::kNoWriteBarrier),
AllOf(CaptureEq(&stack_slot_capture), stack_slot_matcher),
- IsInt32Constant(0), IsInt32Constant(low_word_value(0)),
+ IsInt32Constant(Int64Lowering::kLowerWordOffset),
+ IsInt32Constant(low_word_value(0)),
IsStore(StoreRepresentation(MachineRepresentation::kWord32,
WriteBarrierKind::kNoWriteBarrier),
AllOf(CaptureEq(&stack_slot_capture), stack_slot_matcher),
- IsInt32Constant(4), IsInt32Constant(high_word_value(0)),
- start(), start()),
+ IsInt32Constant(Int64Lowering::kHigherWordOffset),
+ IsInt32Constant(high_word_value(0)), start(), start()),
start());
EXPECT_THAT(
@@ -554,7 +593,7 @@ TEST_F(Int64LoweringTest, F64ReinterpretI64) {
AllOf(CaptureEq(&store_capture), store_matcher), start()),
start(), start()));
}
-// kExprI64ReinterpretF64:
+
TEST_F(Int64LoweringTest, I64ReinterpretF64) {
LowerGraph(graph()->NewNode(machine()->BitcastFloat64ToInt64(),
Float64Constant(bit_cast<double>(value(0)))),
@@ -575,15 +614,15 @@ TEST_F(Int64LoweringTest, I64ReinterpretF64) {
graph()->end()->InputAt(1),
IsReturn2(IsLoad(MachineType::Int32(),
AllOf(CaptureEq(&stack_slot), stack_slot_matcher),
- IsInt32Constant(0),
+ IsInt32Constant(Int64Lowering::kLowerWordOffset),
AllOf(CaptureEq(&store), store_matcher), start()),
IsLoad(MachineType::Int32(),
AllOf(CaptureEq(&stack_slot), stack_slot_matcher),
- IsInt32Constant(0x4),
+ IsInt32Constant(Int64Lowering::kHigherWordOffset),
AllOf(CaptureEq(&store), store_matcher), start()),
start(), start()));
}
-// kExprI64Clz:
+
TEST_F(Int64LoweringTest, I64Clz) {
LowerGraph(graph()->NewNode(machine()->Word64Clz(), Int64Constant(value(0))),
MachineRepresentation::kWord64);
@@ -606,9 +645,9 @@ TEST_F(Int64LoweringTest, I64Clz) {
AllOf(CaptureEq(&branch_capture), branch_matcher)))),
IsInt32Constant(0), start(), start()));
}
-// kExprI64Ctz:
+
TEST_F(Int64LoweringTest, I64Ctz) {
- LowerGraph(graph()->NewNode(machine()->Word64CtzPlaceholder(),
+ LowerGraph(graph()->NewNode(machine()->Word64Ctz().placeholder(),
Int64Constant(value(0))),
MachineRepresentation::kWord64);
Capture<Node*> branch_capture;
@@ -628,7 +667,6 @@ TEST_F(Int64LoweringTest, I64Ctz) {
AllOf(CaptureEq(&branch_capture), branch_matcher)))),
IsInt32Constant(0), start(), start()));
}
-// kExprI64Popcnt:
TEST_F(Int64LoweringTest, Dfs) {
Node* common = Int64Constant(value(0));
@@ -649,7 +687,7 @@ TEST_F(Int64LoweringTest, Dfs) {
}
TEST_F(Int64LoweringTest, I64Popcnt) {
- LowerGraph(graph()->NewNode(machine()->Word64PopcntPlaceholder(),
+ LowerGraph(graph()->NewNode(machine()->Word64Popcnt().placeholder(),
Int64Constant(value(0))),
MachineRepresentation::kWord64);
@@ -800,6 +838,17 @@ TEST_F(Int64LoweringTest, I64PhiWord32) {
TestPhi(this, MachineRepresentation::kWord32, Float32Constant(1),
Float32Constant(2));
}
+
+TEST_F(Int64LoweringTest, I64ReverseBytes) {
+ LowerGraph(graph()->NewNode(machine()->Word64ReverseBytes().placeholder(),
+ Int64Constant(value(0))),
+ MachineRepresentation::kWord64);
+ EXPECT_THAT(
+ graph()->end()->InputAt(1),
+ IsReturn2(IsWord32ReverseBytes(IsInt32Constant(high_word_value(0))),
+ IsWord32ReverseBytes(IsInt32Constant(low_word_value(0))),
+ start(), start()));
+}
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc b/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
index 0f8eed7958..ed20e64194 100644
--- a/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
@@ -33,7 +33,8 @@ class JSBuiltinReducerTest : public TypedGraphTest {
&machine);
// TODO(titzer): mock the GraphReducer here for better unit testing.
GraphReducer graph_reducer(zone(), graph());
- JSBuiltinReducer reducer(&graph_reducer, &jsgraph);
+ JSBuiltinReducer reducer(&graph_reducer, &jsgraph,
+ JSBuiltinReducer::kNoFlags, nullptr);
return reducer.Reduce(node);
}
@@ -49,6 +50,32 @@ class JSBuiltinReducerTest : public TypedGraphTest {
return HeapConstant(f);
}
+ Node* NumberFunction(const char* name) {
+ Handle<Object> m =
+ JSObject::GetProperty(
+ isolate()->global_object(),
+ isolate()->factory()->NewStringFromAsciiChecked("Number"))
+ .ToHandleChecked();
+ Handle<JSFunction> f = Handle<JSFunction>::cast(
+ Object::GetProperty(
+ m, isolate()->factory()->NewStringFromAsciiChecked(name))
+ .ToHandleChecked());
+ return HeapConstant(f);
+ }
+
+ Node* StringFunction(const char* name) {
+ Handle<Object> m =
+ JSObject::GetProperty(
+ isolate()->global_object(),
+ isolate()->factory()->NewStringFromAsciiChecked("String"))
+ .ToHandleChecked();
+ Handle<JSFunction> f = Handle<JSFunction>::cast(
+ Object::GetProperty(
+ m, isolate()->factory()->NewStringFromAsciiChecked(name))
+ .ToHandleChecked());
+ return HeapConstant(f);
+ }
+
JSOperatorBuilder* javascript() { return &javascript_; }
private:
@@ -74,28 +101,49 @@ Type* const kNumberTypes[] = {
// -----------------------------------------------------------------------------
-// Math.max
+// Math.abs
+TEST_F(JSBuiltinReducerTest, MathAbsWithNumber) {
+ Node* function = MathFunction("abs");
-TEST_F(JSBuiltinReducerTest, MathMax0) {
- Node* function = MathFunction("max");
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAbs(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathAbsWithPlainPrimitive) {
+ Node* function = MathFunction("abs");
Node* effect = graph()->start();
Node* control = graph()->start();
Node* context = UndefinedConstant();
Node* frame_state = graph()->start();
- Node* call = graph()->NewNode(javascript()->CallFunction(2), function,
- UndefinedConstant(), context, frame_state,
- frame_state, effect, control);
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
Reduction r = Reduce(call);
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberConstant(-V8_INFINITY));
+ EXPECT_THAT(r.replacement(), IsNumberAbs(IsPlainPrimitiveToNumber(p0)));
}
+// -----------------------------------------------------------------------------
+// Math.acos
-TEST_F(JSBuiltinReducerTest, MathMax1) {
- Node* function = MathFunction("max");
+TEST_F(JSBuiltinReducerTest, MathAcosWithNumber) {
+ Node* function = MathFunction("acos");
Node* effect = graph()->start();
Node* control = graph()->start();
@@ -105,73 +153,1094 @@ TEST_F(JSBuiltinReducerTest, MathMax1) {
Node* p0 = Parameter(t0, 0);
Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
UndefinedConstant(), p0, context, frame_state,
- frame_state, effect, control);
+ effect, control);
Reduction r = Reduce(call);
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), p0);
+ EXPECT_THAT(r.replacement(), IsNumberAcos(p0));
}
}
+TEST_F(JSBuiltinReducerTest, MathAcosWithPlainPrimitive) {
+ Node* function = MathFunction("acos");
-TEST_F(JSBuiltinReducerTest, MathMax2) {
- Node* function = MathFunction("max");
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAcos(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.acosh
+
+TEST_F(JSBuiltinReducerTest, MathAcoshWithNumber) {
+ Node* function = MathFunction("acosh");
Node* effect = graph()->start();
Node* control = graph()->start();
Node* context = UndefinedConstant();
Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kIntegral32Types) {
- TRACED_FOREACH(Type*, t1, kIntegral32Types) {
- Node* p0 = Parameter(t0, 0);
- Node* p1 = Parameter(t1, 1);
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAcosh(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathAcoshWithPlainPrimitive) {
+ Node* function = MathFunction("acosh");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAcosh(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.asin
+
+TEST_F(JSBuiltinReducerTest, MathAsinWithNumber) {
+ Node* function = MathFunction("asin");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAsin(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathAsinWithPlainPrimitive) {
+ Node* function = MathFunction("asin");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAsin(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.asinh
+
+TEST_F(JSBuiltinReducerTest, MathAsinhWithNumber) {
+ Node* function = MathFunction("asinh");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAsinh(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathAsinhWithPlainPrimitive) {
+ Node* function = MathFunction("asinh");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAsinh(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.atan
+
+TEST_F(JSBuiltinReducerTest, MathAtanWithNumber) {
+ Node* function = MathFunction("atan");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAtan(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathAtanWithPlainPrimitive) {
+ Node* function = MathFunction("atan");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAtan(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.atanh
+
+TEST_F(JSBuiltinReducerTest, MathAtanhWithNumber) {
+ Node* function = MathFunction("atanh");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAtanh(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathAtanhWithPlainPrimitive) {
+ Node* function = MathFunction("atanh");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAtanh(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.atan2
+
+TEST_F(JSBuiltinReducerTest, MathAtan2WithNumber) {
+ Node* function = MathFunction("atan2");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ TRACED_FOREACH(Type*, t1, kNumberTypes) {
+ Node* p1 = Parameter(t1, 0);
Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
UndefinedConstant(), p0, p1, context,
- frame_state, frame_state, effect, control);
+ frame_state, effect, control);
Reduction r = Reduce(call);
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsSelect(MachineRepresentation::kNone,
- IsNumberLessThan(p1, p0), p0, p1));
+ EXPECT_THAT(r.replacement(), IsNumberAtan2(p0, p1));
}
}
}
+TEST_F(JSBuiltinReducerTest, MathAtan2WithPlainPrimitive) {
+ Node* function = MathFunction("atan2");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* p1 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
+ UndefinedConstant(), p0, p1, context,
+ frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAtan2(IsPlainPrimitiveToNumber(p0),
+ IsPlainPrimitiveToNumber(p1)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.ceil
+
+TEST_F(JSBuiltinReducerTest, MathCeilWithNumber) {
+ Node* function = MathFunction("ceil");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberCeil(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathCeilWithPlainPrimitive) {
+ Node* function = MathFunction("ceil");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberCeil(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.clz32
+
+TEST_F(JSBuiltinReducerTest, MathClz32WithUnsigned32) {
+ Node* function = MathFunction("clz32");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::Unsigned32(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberClz32(p0));
+}
+
+TEST_F(JSBuiltinReducerTest, MathClz32WithNumber) {
+ Node* function = MathFunction("clz32");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::Number(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberClz32(IsNumberToUint32(p0)));
+}
+
+TEST_F(JSBuiltinReducerTest, MathClz32WithPlainPrimitive) {
+ Node* function = MathFunction("clz32");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsNumberClz32(IsNumberToUint32(IsPlainPrimitiveToNumber(p0))));
+}
+
+// -----------------------------------------------------------------------------
+// Math.cos
+
+TEST_F(JSBuiltinReducerTest, MathCosWithNumber) {
+ Node* function = MathFunction("cos");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberCos(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathCosWithPlainPrimitive) {
+ Node* function = MathFunction("cos");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberCos(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.cosh
+
+TEST_F(JSBuiltinReducerTest, MathCoshWithNumber) {
+ Node* function = MathFunction("cosh");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberCosh(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathCoshWithPlainPrimitive) {
+ Node* function = MathFunction("cosh");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberCosh(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.exp
+
+TEST_F(JSBuiltinReducerTest, MathExpWithNumber) {
+ Node* function = MathFunction("exp");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberExp(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathExpWithPlainPrimitive) {
+ Node* function = MathFunction("exp");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberExp(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.floor
+
+TEST_F(JSBuiltinReducerTest, MathFloorWithNumber) {
+ Node* function = MathFunction("floor");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberFloor(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathFloorWithPlainPrimitive) {
+ Node* function = MathFunction("floor");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberFloor(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.fround
+
+TEST_F(JSBuiltinReducerTest, MathFroundWithNumber) {
+ Node* function = MathFunction("fround");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberFround(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathFroundWithPlainPrimitive) {
+ Node* function = MathFunction("fround");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberFround(IsPlainPrimitiveToNumber(p0)));
+}
// -----------------------------------------------------------------------------
// Math.imul
+TEST_F(JSBuiltinReducerTest, MathImulWithUnsigned32) {
+ Node* function = MathFunction("imul");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::Unsigned32(), 0);
+ Node* p1 = Parameter(Type::Unsigned32(), 1);
+ Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
+ UndefinedConstant(), p0, p1, context,
+ frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberImul(p0, p1));
+}
+
+TEST_F(JSBuiltinReducerTest, MathImulWithNumber) {
+ Node* function = MathFunction("imul");
-TEST_F(JSBuiltinReducerTest, MathImul) {
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::Number(), 0);
+ Node* p1 = Parameter(Type::Number(), 1);
+ Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
+ UndefinedConstant(), p0, p1, context,
+ frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsNumberImul(IsNumberToUint32(p0), IsNumberToUint32(p1)));
+}
+
+TEST_F(JSBuiltinReducerTest, MathImulWithPlainPrimitive) {
Node* function = MathFunction("imul");
Node* effect = graph()->start();
Node* control = graph()->start();
Node* context = UndefinedConstant();
Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* p1 = Parameter(Type::PlainPrimitive(), 1);
+ Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
+ UndefinedConstant(), p0, p1, context,
+ frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsNumberImul(IsNumberToUint32(IsPlainPrimitiveToNumber(p0)),
+ IsNumberToUint32(IsPlainPrimitiveToNumber(p1))));
+}
+
+// -----------------------------------------------------------------------------
+// Math.log
+
+TEST_F(JSBuiltinReducerTest, MathLogWithNumber) {
+ Node* function = MathFunction("log");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberLog(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathLogWithPlainPrimitive) {
+ Node* function = MathFunction("log");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberLog(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.log1p
+
+TEST_F(JSBuiltinReducerTest, MathLog1pWithNumber) {
+ Node* function = MathFunction("log1p");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberLog1p(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathLog1pWithPlainPrimitive) {
+ Node* function = MathFunction("log1p");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberLog1p(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.max
+
+TEST_F(JSBuiltinReducerTest, MathMaxWithNoArguments) {
+ Node* function = MathFunction("max");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* call = graph()->NewNode(javascript()->CallFunction(2), function,
+ UndefinedConstant(), context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberConstant(-V8_INFINITY));
+}
+
+TEST_F(JSBuiltinReducerTest, MathMaxWithNumber) {
+ Node* function = MathFunction("max");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), p0);
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathMaxWithPlainPrimitive) {
+ Node* function = MathFunction("max");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* p1 = Parameter(Type::PlainPrimitive(), 1);
+ Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
+ UndefinedConstant(), p0, p1, context,
+ frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberMax(IsPlainPrimitiveToNumber(p0),
+ IsPlainPrimitiveToNumber(p1)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.min
+
+TEST_F(JSBuiltinReducerTest, MathMinWithNoArguments) {
+ Node* function = MathFunction("min");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* call = graph()->NewNode(javascript()->CallFunction(2), function,
+ UndefinedConstant(), context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberConstant(V8_INFINITY));
+}
+
+TEST_F(JSBuiltinReducerTest, MathMinWithNumber) {
+ Node* function = MathFunction("min");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), p0);
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathMinWithPlainPrimitive) {
+ Node* function = MathFunction("min");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* p1 = Parameter(Type::PlainPrimitive(), 1);
+ Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
+ UndefinedConstant(), p0, p1, context,
+ frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberMin(IsPlainPrimitiveToNumber(p0),
+ IsPlainPrimitiveToNumber(p1)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.round
+
+TEST_F(JSBuiltinReducerTest, MathRoundWithNumber) {
+ Node* function = MathFunction("round");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberRound(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathRoundWithPlainPrimitive) {
+ Node* function = MathFunction("round");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberRound(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.pow
+
+TEST_F(JSBuiltinReducerTest, MathPowWithNumber) {
+ Node* function = MathFunction("pow");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
TRACED_FOREACH(Type*, t1, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* p1 = Parameter(t1, 1);
+ Node* p1 = Parameter(t1, 0);
Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
UndefinedConstant(), p0, p1, context,
- frame_state, frame_state, effect, control);
+ frame_state, effect, control);
Reduction r = Reduce(call);
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsNumberImul(IsNumberToUint32(p0), IsNumberToUint32(p1)));
+ EXPECT_THAT(r.replacement(), IsNumberPow(p0, p1));
}
}
}
+TEST_F(JSBuiltinReducerTest, MathPowWithPlainPrimitive) {
+ Node* function = MathFunction("pow");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* p1 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
+ UndefinedConstant(), p0, p1, context,
+ frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberPow(IsPlainPrimitiveToNumber(p0),
+ IsPlainPrimitiveToNumber(p1)));
+}
// -----------------------------------------------------------------------------
-// Math.fround
+// Math.sign
+TEST_F(JSBuiltinReducerTest, MathSignWithNumber) {
+ Node* function = MathFunction("sign");
-TEST_F(JSBuiltinReducerTest, MathFround) {
- Node* function = MathFunction("fround");
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberSign(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathSignWithPlainPrimitive) {
+ Node* function = MathFunction("sign");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberSign(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.sin
+
+TEST_F(JSBuiltinReducerTest, MathSinWithNumber) {
+ Node* function = MathFunction("sin");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberSin(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathSinWithPlainPrimitive) {
+ Node* function = MathFunction("sin");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberSin(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.sinh
+
+TEST_F(JSBuiltinReducerTest, MathSinhWithNumber) {
+ Node* function = MathFunction("sinh");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberSinh(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathSinhWithPlainPrimitive) {
+ Node* function = MathFunction("sinh");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberSinh(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.sqrt
+
+TEST_F(JSBuiltinReducerTest, MathSqrtWithNumber) {
+ Node* function = MathFunction("sqrt");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberSqrt(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathSqrtWithPlainPrimitive) {
+ Node* function = MathFunction("sqrt");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberSqrt(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.tan
+
+TEST_F(JSBuiltinReducerTest, MathTanWithNumber) {
+ Node* function = MathFunction("tan");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberTan(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathTanWithPlainPrimitive) {
+ Node* function = MathFunction("tan");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberTan(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.tanh
+
+TEST_F(JSBuiltinReducerTest, MathTanhWithNumber) {
+ Node* function = MathFunction("tanh");
Node* effect = graph()->start();
Node* control = graph()->start();
@@ -181,14 +1250,152 @@ TEST_F(JSBuiltinReducerTest, MathFround) {
Node* p0 = Parameter(t0, 0);
Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberTanh(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathTanhWithPlainPrimitive) {
+ Node* function = MathFunction("tanh");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberTanh(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.trunc
+
+TEST_F(JSBuiltinReducerTest, MathTruncWithNumber) {
+ Node* function = MathFunction("trunc");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberTrunc(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathTruncWithPlainPrimitive) {
+ Node* function = MathFunction("trunc");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberTrunc(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Number.parseInt
+
+TEST_F(JSBuiltinReducerTest, NumberParseIntWithIntegral32) {
+ Node* function = NumberFunction("parseInt");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kIntegral32Types) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberToInt32(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, NumberParseIntWithIntegral32AndUndefined) {
+ Node* function = NumberFunction("parseInt");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kIntegral32Types) {
+ Node* p0 = Parameter(t0, 0);
+ Node* p1 = Parameter(Type::Undefined(), 1);
+ Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
+ UndefinedConstant(), p0, p1, context,
frame_state, effect, control);
Reduction r = Reduce(call);
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsTruncateFloat64ToFloat32(p0));
+ EXPECT_THAT(r.replacement(), IsNumberToInt32(p0));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// String.fromCharCode
+
+TEST_F(JSBuiltinReducerTest, StringFromCharCodeWithNumber) {
+ Node* function = StringFunction("fromCharCode");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsStringFromCharCode(p0));
}
}
+TEST_F(JSBuiltinReducerTest, StringFromCharCodeWithPlainPrimitive) {
+ Node* function = StringFunction("fromCharCode");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsStringFromCharCode(IsPlainPrimitiveToNumber(p0)));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
index 837c5742d9..9c001e9eb2 100644
--- a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
@@ -4,6 +4,7 @@
#include "src/compiler/js-create-lowering.h"
#include "src/code-factory.h"
+#include "src/compilation-dependencies.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-operator.h"
@@ -80,95 +81,74 @@ TEST_F(JSCreateLoweringTest, JSCreate) {
// -----------------------------------------------------------------------------
// JSCreateArguments
-TEST_F(JSCreateLoweringTest, JSCreateArgumentsViaStub) {
+TEST_F(JSCreateLoweringTest, JSCreateArgumentsInlinedMapped) {
Node* const closure = Parameter(Type::Any());
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
- Node* const control = graph()->start();
Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
- Node* const frame_state = FrameState(shared, graph()->start());
+ Node* const frame_state_outer = FrameState(shared, graph()->start());
+ Node* const frame_state_inner = FrameState(shared, frame_state_outer);
Reduction r = Reduce(graph()->NewNode(
- javascript()->CreateArguments(CreateArgumentsType::kUnmappedArguments),
- closure, context, frame_state, effect, control));
+ javascript()->CreateArguments(CreateArgumentsType::kMappedArguments),
+ closure, context, frame_state_inner, effect));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
- IsCall(_, IsHeapConstant(
- CodeFactory::FastNewStrictArguments(isolate()).code()),
- closure, context, frame_state, effect, control));
+ IsFinishRegion(
+ IsAllocate(IsNumberConstant(JSSloppyArgumentsObject::kSize), _, _),
+ _));
}
-TEST_F(JSCreateLoweringTest, JSCreateArgumentsRestParameterViaStub) {
+TEST_F(JSCreateLoweringTest, JSCreateArgumentsInlinedUnmapped) {
Node* const closure = Parameter(Type::Any());
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
- Node* const control = graph()->start();
Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
- Node* const frame_state = FrameState(shared, graph()->start());
+ Node* const frame_state_outer = FrameState(shared, graph()->start());
+ Node* const frame_state_inner = FrameState(shared, frame_state_outer);
Reduction r = Reduce(graph()->NewNode(
- javascript()->CreateArguments(CreateArgumentsType::kRestParameter),
- closure, context, frame_state, effect, control));
+ javascript()->CreateArguments(CreateArgumentsType::kUnmappedArguments),
+ closure, context, frame_state_inner, effect));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
- IsCall(_, IsHeapConstant(
- CodeFactory::FastNewRestParameter(isolate()).code()),
- closure, context, frame_state, effect, control));
+ IsFinishRegion(
+ IsAllocate(IsNumberConstant(JSStrictArgumentsObject::kSize), _, _),
+ _));
}
-TEST_F(JSCreateLoweringTest, JSCreateArgumentsInlinedMapped) {
+TEST_F(JSCreateLoweringTest, JSCreateArgumentsInlinedRestArray) {
Node* const closure = Parameter(Type::Any());
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
- Node* const control = graph()->start();
Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
Node* const frame_state_outer = FrameState(shared, graph()->start());
Node* const frame_state_inner = FrameState(shared, frame_state_outer);
Reduction r = Reduce(graph()->NewNode(
- javascript()->CreateArguments(CreateArgumentsType::kMappedArguments),
- closure, context, frame_state_inner, effect, control));
+ javascript()->CreateArguments(CreateArgumentsType::kRestParameter),
+ closure, context, frame_state_inner, effect));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsFinishRegion(
- IsAllocate(IsNumberConstant(JSSloppyArgumentsObject::kSize),
- _, control),
- _));
+ EXPECT_THAT(
+ r.replacement(),
+ IsFinishRegion(IsAllocate(IsNumberConstant(JSArray::kSize), _, _), _));
}
-TEST_F(JSCreateLoweringTest, JSCreateArgumentsInlinedUnmapped) {
- Node* const closure = Parameter(Type::Any());
- Node* const context = UndefinedConstant();
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
- Node* const frame_state_outer = FrameState(shared, graph()->start());
- Node* const frame_state_inner = FrameState(shared, frame_state_outer);
- Reduction r = Reduce(graph()->NewNode(
- javascript()->CreateArguments(CreateArgumentsType::kUnmappedArguments),
- closure, context, frame_state_inner, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsFinishRegion(
- IsAllocate(IsNumberConstant(JSStrictArgumentsObject::kSize),
- _, control),
- _));
-}
+// -----------------------------------------------------------------------------
+// JSCreateClosure
-TEST_F(JSCreateLoweringTest, JSCreateArgumentsInlinedRestArray) {
- Node* const closure = Parameter(Type::Any());
+TEST_F(JSCreateLoweringTest, JSCreateClosureViaInlinedAllocation) {
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
- Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
- Node* const frame_state_outer = FrameState(shared, graph()->start());
- Node* const frame_state_inner = FrameState(shared, frame_state_outer);
- Reduction r = Reduce(graph()->NewNode(
- javascript()->CreateArguments(CreateArgumentsType::kRestParameter),
- closure, context, frame_state_inner, effect, control));
+ Handle<SharedFunctionInfo> shared(isolate()->number_function()->shared());
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->CreateClosure(shared, NOT_TENURED),
+ context, effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
- IsFinishRegion(
- IsAllocate(IsNumberConstant(JSArray::kSize), _, control), _));
+ IsFinishRegion(IsAllocate(IsNumberConstant(JSFunction::kSize),
+ IsBeginRegion(_), control),
+ _));
}
// -----------------------------------------------------------------------------
diff --git a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
index de0eefc531..780bf65df3 100644
--- a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
@@ -29,10 +29,9 @@ class JSIntrinsicLoweringTest : public GraphTest {
~JSIntrinsicLoweringTest() override {}
protected:
- Reduction Reduce(Node* node, MachineOperatorBuilder::Flags flags =
- MachineOperatorBuilder::kNoFlags) {
- MachineOperatorBuilder machine(zone(), MachineType::PointerRepresentation(),
- flags);
+ Reduction Reduce(Node* node) {
+ MachineOperatorBuilder machine(zone(),
+ MachineType::PointerRepresentation());
SimplifiedOperatorBuilder simplified(zone());
JSGraph jsgraph(isolate(), graph(), common(), javascript(), &simplified,
&machine);
@@ -43,13 +42,6 @@ class JSIntrinsicLoweringTest : public GraphTest {
return reducer.Reduce(node);
}
- Node* EmptyFrameState() {
- MachineOperatorBuilder machine(zone());
- JSGraph jsgraph(isolate(), graph(), common(), javascript(), nullptr,
- &machine);
- return jsgraph.EmptyFrameState();
- }
-
JSOperatorBuilder* javascript() { return &javascript_; }
private:
@@ -58,63 +50,6 @@ class JSIntrinsicLoweringTest : public GraphTest {
// -----------------------------------------------------------------------------
-// %_ConstructDouble
-
-
-TEST_F(JSIntrinsicLoweringTest, InlineOptimizedConstructDouble) {
- Node* const input0 = Parameter(0);
- Node* const input1 = Parameter(1);
- Node* const context = Parameter(2);
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction const r = Reduce(graph()->NewNode(
- javascript()->CallRuntime(Runtime::kInlineConstructDouble, 2), input0,
- input1, context, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat64InsertHighWord32(
- IsFloat64InsertLowWord32(
- IsNumberConstant(BitEq(0.0)), input1),
- input0));
-}
-
-
-// -----------------------------------------------------------------------------
-// %_DoubleLo
-
-
-TEST_F(JSIntrinsicLoweringTest, InlineOptimizedDoubleLo) {
- Node* const input = Parameter(0);
- Node* const context = Parameter(1);
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction const r = Reduce(
- graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineDoubleLo, 1),
- input, context, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsFloat64ExtractLowWord32(IsGuard(Type::Number(), input, _)));
-}
-
-
-// -----------------------------------------------------------------------------
-// %_DoubleHi
-
-
-TEST_F(JSIntrinsicLoweringTest, InlineOptimizedDoubleHi) {
- Node* const input = Parameter(0);
- Node* const context = Parameter(1);
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction const r = Reduce(
- graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineDoubleHi, 1),
- input, context, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsFloat64ExtractHighWord32(IsGuard(Type::Number(), input, _)));
-}
-
-
-// -----------------------------------------------------------------------------
// %_IsSmi
@@ -151,11 +86,11 @@ TEST_F(JSIntrinsicLoweringTest, InlineIsArray) {
phi,
IsPhi(
MachineRepresentation::kTagged, IsFalseConstant(),
- IsWord32Equal(IsLoadField(AccessBuilder::ForMapInstanceType(),
+ IsNumberEqual(IsLoadField(AccessBuilder::ForMapInstanceType(),
IsLoadField(AccessBuilder::ForMap(), input,
effect, CaptureEq(&if_false)),
effect, _),
- IsInt32Constant(JS_ARRAY_TYPE)),
+ IsNumberConstant(JS_ARRAY_TYPE)),
IsMerge(IsIfTrue(AllOf(CaptureEq(&branch),
IsBranch(IsObjectIsSmi(input), control))),
AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
@@ -182,11 +117,11 @@ TEST_F(JSIntrinsicLoweringTest, InlineIsTypedArray) {
phi,
IsPhi(
MachineRepresentation::kTagged, IsFalseConstant(),
- IsWord32Equal(IsLoadField(AccessBuilder::ForMapInstanceType(),
+ IsNumberEqual(IsLoadField(AccessBuilder::ForMapInstanceType(),
IsLoadField(AccessBuilder::ForMap(), input,
effect, CaptureEq(&if_false)),
effect, _),
- IsInt32Constant(JS_TYPED_ARRAY_TYPE)),
+ IsNumberConstant(JS_TYPED_ARRAY_TYPE)),
IsMerge(IsIfTrue(AllOf(CaptureEq(&branch),
IsBranch(IsObjectIsSmi(input), control))),
AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
@@ -213,11 +148,11 @@ TEST_F(JSIntrinsicLoweringTest, InlineIsRegExp) {
phi,
IsPhi(
MachineRepresentation::kTagged, IsFalseConstant(),
- IsWord32Equal(IsLoadField(AccessBuilder::ForMapInstanceType(),
+ IsNumberEqual(IsLoadField(AccessBuilder::ForMapInstanceType(),
IsLoadField(AccessBuilder::ForMap(), input,
effect, CaptureEq(&if_false)),
effect, _),
- IsInt32Constant(JS_REGEXP_TYPE)),
+ IsNumberConstant(JS_REGEXP_TYPE)),
IsMerge(IsIfTrue(AllOf(CaptureEq(&branch),
IsBranch(IsObjectIsSmi(input), control))),
AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
@@ -240,67 +175,6 @@ TEST_F(JSIntrinsicLoweringTest, InlineIsJSReceiver) {
EXPECT_THAT(r.replacement(), IsObjectIsReceiver(input));
}
-
-// -----------------------------------------------------------------------------
-// %_ValueOf
-
-
-TEST_F(JSIntrinsicLoweringTest, InlineValueOf) {
- Node* const input = Parameter(0);
- Node* const context = Parameter(1);
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction const r = Reduce(
- graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineValueOf, 1),
- input, context, effect, control));
- ASSERT_TRUE(r.Changed());
-
- Node* phi = r.replacement();
- Capture<Node*> branch0, if_false0, branch1, if_true1;
- EXPECT_THAT(
- phi,
- IsPhi(
- MachineRepresentation::kTagged, input,
- IsPhi(MachineRepresentation::kTagged,
- IsLoadField(AccessBuilder::ForValue(), input, effect,
- CaptureEq(&if_true1)),
- input,
- IsMerge(
- AllOf(CaptureEq(&if_true1), IsIfTrue(CaptureEq(&branch1))),
- IsIfFalse(AllOf(
- CaptureEq(&branch1),
- IsBranch(
- IsWord32Equal(
- IsLoadField(
- AccessBuilder::ForMapInstanceType(),
- IsLoadField(AccessBuilder::ForMap(), input,
- effect, CaptureEq(&if_false0)),
- effect, _),
- IsInt32Constant(JS_VALUE_TYPE)),
- CaptureEq(&if_false0)))))),
- IsMerge(
- IsIfTrue(AllOf(CaptureEq(&branch0),
- IsBranch(IsObjectIsSmi(input), control))),
- AllOf(CaptureEq(&if_false0), IsIfFalse(CaptureEq(&branch0))))));
-}
-
-// -----------------------------------------------------------------------------
-// %_GetOrdinaryHasInstance
-
-TEST_F(JSIntrinsicLoweringTest, InlineGetOrdinaryHasInstance) {
- Node* const context = Parameter(0);
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction const r = Reduce(graph()->NewNode(
- javascript()->CallRuntime(Runtime::kInlineGetOrdinaryHasInstance, 0),
- context, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(
- r.replacement(),
- IsLoadContext(
- ContextAccess(0, Context::ORDINARY_HAS_INSTANCE_INDEX, true), _));
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/js-operator-unittest.cc b/deps/v8/test/unittests/compiler/js-operator-unittest.cc
index 15b1427871..3b83d691f1 100644
--- a/deps/v8/test/unittests/compiler/js-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-operator-unittest.cc
@@ -40,25 +40,13 @@ const SharedOperator kSharedOperators[] = {
control_input_count, value_output_count, effect_output_count, \
control_output_count \
}
- SHARED(Equal, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
- SHARED(NotEqual, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
- SHARED(StrictEqual, Operator::kNoThrow, 2, 0, 1, 1, 1, 1, 0),
- SHARED(StrictNotEqual, Operator::kNoThrow, 2, 0, 1, 1, 1, 1, 0),
- SHARED(LessThan, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
- SHARED(GreaterThan, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
- SHARED(LessThanOrEqual, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
- SHARED(GreaterThanOrEqual, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
SHARED(ToNumber, Operator::kNoProperties, 1, 1, 1, 1, 1, 1, 2),
SHARED(ToString, Operator::kNoProperties, 1, 1, 1, 1, 1, 1, 2),
SHARED(ToName, Operator::kNoProperties, 1, 1, 1, 1, 1, 1, 2),
- SHARED(ToObject, Operator::kNoProperties, 1, 1, 1, 1, 1, 1, 2),
- SHARED(Yield, Operator::kNoProperties, 1, 0, 1, 1, 1, 1, 2),
+ SHARED(ToObject, Operator::kFoldable, 1, 1, 1, 1, 1, 1, 2),
SHARED(Create, Operator::kEliminatable, 2, 1, 1, 0, 1, 1, 0),
- SHARED(HasProperty, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
- SHARED(TypeOf, Operator::kEliminatable, 1, 0, 1, 0, 1, 1, 0),
- SHARED(InstanceOf, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
+ SHARED(TypeOf, Operator::kPure, 1, 0, 0, 0, 1, 0, 0),
SHARED(CreateWithContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1, 2),
- SHARED(CreateModuleContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1, 2),
#undef SHARED
};
diff --git a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
index 1adb5dae93..72c582525e 100644
--- a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -2,11 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/js-typed-lowering.h"
#include "src/code-factory.h"
+#include "src/compilation-dependencies.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-operator.h"
-#include "src/compiler/js-typed-lowering.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
@@ -65,8 +66,7 @@ const double kIntegerValues[] = {-V8_INFINITY, INT_MIN, -1000.0, -42.0,
Type* const kJSTypes[] = {Type::Undefined(), Type::Null(), Type::Boolean(),
Type::Number(), Type::String(), Type::Object()};
-
-STATIC_ASSERT(LANGUAGE_END == 3);
+STATIC_ASSERT(LANGUAGE_END == 2);
const LanguageMode kLanguageModes[] = {SLOPPY, STRICT};
} // namespace
@@ -212,9 +212,8 @@ TEST_F(JSTypedLoweringTest, ParameterWithUndefined) {
TEST_F(JSTypedLoweringTest, JSToBooleanWithBoolean) {
Node* input = Parameter(Type::Boolean(), 0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
- input, context, graph()->start()));
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
ASSERT_TRUE(r.Changed());
EXPECT_EQ(input, r.replacement());
}
@@ -242,9 +241,8 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithFalsish) {
zone()),
0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
- input, context, graph()->start()));
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFalseConstant());
}
@@ -258,9 +256,8 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithTruish) {
zone()),
0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
- input, context, graph()->start()));
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsTrueConstant());
}
@@ -269,9 +266,8 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithTruish) {
TEST_F(JSTypedLoweringTest, JSToBooleanWithNonZeroPlainNumber) {
Node* input = Parameter(Type::Range(1, V8_INFINITY, zone()), 0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
- input, context, graph()->start()));
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsTrueConstant());
}
@@ -280,21 +276,28 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithNonZeroPlainNumber) {
TEST_F(JSTypedLoweringTest, JSToBooleanWithOrderedNumber) {
Node* input = Parameter(Type::OrderedNumber(), 0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
- input, context, graph()->start()));
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsBooleanNot(IsNumberEqual(input, IsNumberConstant(0.0))));
}
+TEST_F(JSTypedLoweringTest, JSToBooleanWithNumber) {
+ Node* input = Parameter(Type::Number(), 0);
+ Node* context = Parameter(Type::Any(), 1);
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsNumberLessThan(IsNumberConstant(0.0), IsNumberAbs(input)));
+}
TEST_F(JSTypedLoweringTest, JSToBooleanWithString) {
Node* input = Parameter(Type::String(), 0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
- input, context, graph()->start()));
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
@@ -307,9 +310,8 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithString) {
TEST_F(JSTypedLoweringTest, JSToBooleanWithAny) {
Node* input = Parameter(Type::Any(), 0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
- input, context, graph()->start()));
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
ASSERT_FALSE(r.Changed());
}
@@ -327,8 +329,7 @@ TEST_F(JSTypedLoweringTest, JSToNumberWithPlainPrimitive) {
Reduce(graph()->NewNode(javascript()->ToNumber(), input, context,
EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsToNumber(input, IsNumberConstant(BitEq(0.0)),
- graph()->start(), control));
+ EXPECT_THAT(r.replacement(), IsPlainPrimitiveToNumber(input));
}
@@ -389,11 +390,13 @@ TEST_F(JSTypedLoweringTest, JSToStringWithBoolean) {
TEST_F(JSTypedLoweringTest, JSStrictEqualWithTheHole) {
Node* const the_hole = HeapConstant(factory()->the_hole_value());
Node* const context = UndefinedConstant();
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
TRACED_FOREACH(Type*, type, kJSTypes) {
Node* const lhs = Parameter(type);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->StrictEqual(), lhs, the_hole,
- context, graph()->start(), graph()->start()));
+ Reduction r = Reduce(
+ graph()->NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
+ lhs, the_hole, context, effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFalseConstant());
}
@@ -404,9 +407,11 @@ TEST_F(JSTypedLoweringTest, JSStrictEqualWithUnique) {
Node* const lhs = Parameter(Type::Unique(), 0);
Node* const rhs = Parameter(Type::Unique(), 1);
Node* const context = Parameter(Type::Any(), 2);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->StrictEqual(), lhs, rhs, context,
- graph()->start(), graph()->start()));
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction r = Reduce(
+ graph()->NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
+ lhs, rhs, context, effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsReferenceEqual(Type::Unique(), lhs, rhs));
}
@@ -415,53 +420,95 @@ TEST_F(JSTypedLoweringTest, JSStrictEqualWithUnique) {
// -----------------------------------------------------------------------------
// JSShiftLeft
-
TEST_F(JSTypedLoweringTest, JSShiftLeftWithSigned32AndConstant) {
- BinaryOperationHints const hints = BinaryOperationHints::Any();
+ BinaryOperationHint const hint = BinaryOperationHint::kAny;
Node* const lhs = Parameter(Type::Signed32());
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
TRACED_FORRANGE(double, rhs, 0, 31) {
- Reduction r = Reduce(graph()->NewNode(
- javascript()->ShiftLeft(hints), lhs, NumberConstant(rhs), context,
- EmptyFrameState(), EmptyFrameState(), effect, control));
+ Reduction r = Reduce(graph()->NewNode(javascript()->ShiftLeft(hint), lhs,
+ NumberConstant(rhs), context,
+ EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsNumberShiftLeft(lhs, IsNumberConstant(BitEq(rhs))));
}
}
-
TEST_F(JSTypedLoweringTest, JSShiftLeftWithSigned32AndUnsigned32) {
- BinaryOperationHints const hints = BinaryOperationHints::Any();
+ BinaryOperationHint const hint = BinaryOperationHint::kAny;
Node* const lhs = Parameter(Type::Signed32());
Node* const rhs = Parameter(Type::Unsigned32());
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
- Reduction r = Reduce(graph()->NewNode(javascript()->ShiftLeft(hints), lhs,
- rhs, context, EmptyFrameState(),
- EmptyFrameState(), effect, control));
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->ShiftLeft(hint), lhs, rhs, context,
+ EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsNumberShiftLeft(lhs, rhs));
}
+TEST_F(JSTypedLoweringTest, JSShiftLeftWithSignedSmallHint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kSignedSmall;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->ShiftLeft(hint), lhs, rhs,
+ UndefinedConstant(), EmptyFrameState(),
+ effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSpeculativeNumberShiftLeft(NumberOperationHint::kSignedSmall,
+ lhs, rhs, effect, control));
+}
+
+TEST_F(JSTypedLoweringTest, JSShiftLeftWithSigned32Hint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kSigned32;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->ShiftLeft(hint), lhs, rhs,
+ UndefinedConstant(), EmptyFrameState(),
+ effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSpeculativeNumberShiftLeft(NumberOperationHint::kSigned32, lhs,
+ rhs, effect, control));
+}
+
+TEST_F(JSTypedLoweringTest, JSShiftLeftWithNumberOrOddballHint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kNumberOrOddball;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->ShiftLeft(hint), lhs, rhs,
+ UndefinedConstant(), EmptyFrameState(),
+ effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsSpeculativeNumberShiftLeft(
+ NumberOperationHint::kNumberOrOddball, lhs,
+ rhs, effect, control));
+}
// -----------------------------------------------------------------------------
// JSShiftRight
TEST_F(JSTypedLoweringTest, JSShiftRightWithSigned32AndConstant) {
- BinaryOperationHints const hints = BinaryOperationHints::Any();
+ BinaryOperationHint const hint = BinaryOperationHint::kAny;
Node* const lhs = Parameter(Type::Signed32());
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
TRACED_FORRANGE(double, rhs, 0, 31) {
- Reduction r = Reduce(graph()->NewNode(
- javascript()->ShiftRight(hints), lhs, NumberConstant(rhs), context,
- EmptyFrameState(), EmptyFrameState(), effect, control));
+ Reduction r = Reduce(graph()->NewNode(javascript()->ShiftRight(hint), lhs,
+ NumberConstant(rhs), context,
+ EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsNumberShiftRight(lhs, IsNumberConstant(BitEq(rhs))));
@@ -470,19 +517,63 @@ TEST_F(JSTypedLoweringTest, JSShiftRightWithSigned32AndConstant) {
TEST_F(JSTypedLoweringTest, JSShiftRightWithSigned32AndUnsigned32) {
- BinaryOperationHints const hints = BinaryOperationHints::Any();
+ BinaryOperationHint const hint = BinaryOperationHint::kAny;
Node* const lhs = Parameter(Type::Signed32());
Node* const rhs = Parameter(Type::Unsigned32());
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
- Reduction r = Reduce(graph()->NewNode(javascript()->ShiftRight(hints), lhs,
- rhs, context, EmptyFrameState(),
- EmptyFrameState(), effect, control));
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->ShiftRight(hint), lhs, rhs, context,
+ EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsNumberShiftRight(lhs, rhs));
}
+TEST_F(JSTypedLoweringTest, JSShiftRightWithSignedSmallHint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kSignedSmall;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->ShiftRight(hint), lhs,
+ rhs, UndefinedConstant(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSpeculativeNumberShiftRight(NumberOperationHint::kSignedSmall,
+ lhs, rhs, effect, control));
+}
+
+TEST_F(JSTypedLoweringTest, JSShiftRightWithSigned32Hint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kSigned32;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->ShiftRight(hint), lhs,
+ rhs, UndefinedConstant(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSpeculativeNumberShiftRight(NumberOperationHint::kSigned32, lhs,
+ rhs, effect, control));
+}
+
+TEST_F(JSTypedLoweringTest, JSShiftRightWithNumberOrOddballHint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kNumberOrOddball;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->ShiftRight(hint), lhs,
+ rhs, UndefinedConstant(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsSpeculativeNumberShiftRight(
+ NumberOperationHint::kNumberOrOddball, lhs,
+ rhs, effect, control));
+}
// -----------------------------------------------------------------------------
// JSShiftRightLogical
@@ -490,15 +581,15 @@ TEST_F(JSTypedLoweringTest, JSShiftRightWithSigned32AndUnsigned32) {
TEST_F(JSTypedLoweringTest,
JSShiftRightLogicalWithUnsigned32AndConstant) {
- BinaryOperationHints const hints = BinaryOperationHints::Any();
+ BinaryOperationHint const hint = BinaryOperationHint::kAny;
Node* const lhs = Parameter(Type::Unsigned32());
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
TRACED_FORRANGE(double, rhs, 0, 31) {
- Reduction r = Reduce(graph()->NewNode(
- javascript()->ShiftRightLogical(hints), lhs, NumberConstant(rhs),
- context, EmptyFrameState(), EmptyFrameState(), effect, control));
+ Reduction r = Reduce(graph()->NewNode(javascript()->ShiftRightLogical(hint),
+ lhs, NumberConstant(rhs), context,
+ EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsNumberShiftRightLogical(lhs, IsNumberConstant(BitEq(rhs))));
@@ -507,19 +598,63 @@ TEST_F(JSTypedLoweringTest,
TEST_F(JSTypedLoweringTest, JSShiftRightLogicalWithUnsigned32AndUnsigned32) {
- BinaryOperationHints const hints = BinaryOperationHints::Any();
+ BinaryOperationHint const hint = BinaryOperationHint::kAny;
Node* const lhs = Parameter(Type::Unsigned32());
Node* const rhs = Parameter(Type::Unsigned32());
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
- Reduction r = Reduce(graph()->NewNode(javascript()->ShiftRightLogical(hints),
- lhs, rhs, context, EmptyFrameState(),
- EmptyFrameState(), effect, control));
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->ShiftRightLogical(hint), lhs, rhs,
+ context, EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsNumberShiftRightLogical(lhs, rhs));
}
+TEST_F(JSTypedLoweringTest, JSShiftRightLogicalWithSignedSmallHint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kSignedSmall;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->ShiftRightLogical(hint),
+ lhs, rhs, UndefinedConstant(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsSpeculativeNumberShiftRightLogical(
+ NumberOperationHint::kSignedSmall, lhs, rhs,
+ effect, control));
+}
+
+TEST_F(JSTypedLoweringTest, JSShiftRightLogicalWithSigned32Hint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kSigned32;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->ShiftRightLogical(hint),
+ lhs, rhs, UndefinedConstant(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSpeculativeNumberShiftRightLogical(
+ NumberOperationHint::kSigned32, lhs, rhs, effect, control));
+}
+
+TEST_F(JSTypedLoweringTest, JSShiftRightLogicalWithNumberOrOddballHint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kNumberOrOddball;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->ShiftRightLogical(hint),
+ lhs, rhs, UndefinedConstant(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsSpeculativeNumberShiftRightLogical(
+ NumberOperationHint::kNumberOrOddball, lhs,
+ rhs, effect, control));
+}
// -----------------------------------------------------------------------------
// JSLoadContext
@@ -611,14 +746,15 @@ TEST_F(JSTypedLoweringTest, JSLoadPropertyFromExternalTypedArray) {
Node* context = UndefinedConstant();
Node* effect = graph()->start();
Node* control = graph()->start();
- Reduction r = Reduce(graph()->NewNode(
- javascript()->LoadProperty(feedback), base, key, vector, context,
- EmptyFrameState(), EmptyFrameState(), effect, control));
+ Reduction r = Reduce(graph()->NewNode(javascript()->LoadProperty(feedback),
+ base, key, vector, context,
+ EmptyFrameState(), effect, control));
Matcher<Node*> offset_matcher =
element_size == 1
? key
- : IsWord32Shl(key, IsInt32Constant(WhichPowerOf2(element_size)));
+ : IsNumberShiftLeft(key,
+ IsNumberConstant(WhichPowerOf2(element_size)));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
@@ -652,9 +788,9 @@ TEST_F(JSTypedLoweringTest, JSLoadPropertyFromExternalTypedArrayWithSafeKey) {
Node* context = UndefinedConstant();
Node* effect = graph()->start();
Node* control = graph()->start();
- Reduction r = Reduce(graph()->NewNode(
- javascript()->LoadProperty(feedback), base, key, vector, context,
- EmptyFrameState(), EmptyFrameState(), effect, control));
+ Reduction r = Reduce(graph()->NewNode(javascript()->LoadProperty(feedback),
+ base, key, vector, context,
+ EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
@@ -693,14 +829,14 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArray) {
VectorSlotPair feedback;
const Operator* op = javascript()->StoreProperty(language_mode, feedback);
Node* node = graph()->NewNode(op, base, key, value, vector, context,
- EmptyFrameState(), EmptyFrameState(),
- effect, control);
+ EmptyFrameState(), effect, control);
Reduction r = Reduce(node);
Matcher<Node*> offset_matcher =
element_size == 1
? key
- : IsWord32Shl(key, IsInt32Constant(WhichPowerOf2(element_size)));
+ : IsNumberShiftLeft(
+ key, IsNumberConstant(WhichPowerOf2(element_size)));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
@@ -734,21 +870,26 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArrayWithConversion) {
Node* context = UndefinedConstant();
Node* effect = graph()->start();
Node* control = graph()->start();
+ // TODO(mstarzinger): Once the effect-control-linearizer provides a frame
+ // state we can get rid of this checkpoint again. The reducer won't care.
+ Node* checkpoint = graph()->NewNode(common()->Checkpoint(),
+ EmptyFrameState(), effect, control);
VectorSlotPair feedback;
const Operator* op = javascript()->StoreProperty(language_mode, feedback);
Node* node = graph()->NewNode(op, base, key, value, vector, context,
- EmptyFrameState(), EmptyFrameState(),
- effect, control);
+ EmptyFrameState(), checkpoint, control);
Reduction r = Reduce(node);
Matcher<Node*> offset_matcher =
element_size == 1
? key
- : IsWord32Shl(key, IsInt32Constant(WhichPowerOf2(element_size)));
+ : IsNumberShiftLeft(
+ key, IsNumberConstant(WhichPowerOf2(element_size)));
Matcher<Node*> value_matcher =
- IsToNumber(value, context, effect, control);
+ IsToNumber(value, context, checkpoint, control);
Matcher<Node*> effect_matcher = value_matcher;
+ Matcher<Node*> control_matcher = IsIfSuccess(value_matcher);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
@@ -757,7 +898,7 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArrayWithConversion) {
IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
offset_matcher,
IsNumberConstant(array->byte_length()->Number()),
- value_matcher, effect_matcher, control));
+ value_matcher, effect_matcher, control_matcher));
}
}
}
@@ -787,8 +928,7 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArrayWithSafeKey) {
VectorSlotPair feedback;
const Operator* op = javascript()->StoreProperty(language_mode, feedback);
Node* node = graph()->NewNode(op, base, key, value, vector, context,
- EmptyFrameState(), EmptyFrameState(),
- effect, control);
+ EmptyFrameState(), effect, control);
Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
@@ -814,64 +954,78 @@ TEST_F(JSTypedLoweringTest, JSLoadNamedStringLength) {
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
- Reduction const r = Reduce(graph()->NewNode(
- javascript()->LoadNamed(name, feedback), receiver, vector, context,
- EmptyFrameState(), EmptyFrameState(), effect, control));
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->LoadNamed(name, feedback), receiver,
+ vector, context, EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsLoadField(AccessBuilder::ForStringLength(),
receiver, effect, control));
}
-TEST_F(JSTypedLoweringTest, JSLoadNamedFunctionPrototype) {
- VectorSlotPair feedback;
- Handle<Name> name = factory()->prototype_string();
- Handle<JSFunction> function = isolate()->object_function();
- Handle<JSObject> function_prototype(JSObject::cast(function->prototype()));
- Node* const receiver = Parameter(Type::Constant(function, zone()), 0);
- Node* const vector = Parameter(Type::Internal(), 1);
- Node* const context = Parameter(Type::Internal(), 2);
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction const r = Reduce(graph()->NewNode(
- javascript()->LoadNamed(name, feedback), receiver, vector, context,
- EmptyFrameState(), EmptyFrameState(), effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsHeapConstant(function_prototype));
-}
-
-
// -----------------------------------------------------------------------------
// JSAdd
TEST_F(JSTypedLoweringTest, JSAddWithString) {
- BinaryOperationHints const hints = BinaryOperationHints::Any();
- Node* lhs = Parameter(Type::String(), 0);
- Node* rhs = Parameter(Type::String(), 1);
- Node* context = Parameter(Type::Any(), 2);
- Node* frame_state0 = EmptyFrameState();
- Node* frame_state1 = EmptyFrameState();
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Reduction r =
- Reduce(graph()->NewNode(javascript()->Add(hints), lhs, rhs, context,
- frame_state0, frame_state1, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsCall(_, IsHeapConstant(CodeFactory::StringAdd(
- isolate(), STRING_ADD_CHECK_NONE,
- NOT_TENURED).code()),
- lhs, rhs, context, frame_state0, effect, control));
+ BinaryOperationHint const hint = BinaryOperationHint::kAny;
+ Node* lhs = Parameter(Type::String(), 0);
+ Node* rhs = Parameter(Type::String(), 1);
+ Node* context = Parameter(Type::Any(), 2);
+ Node* frame_state = EmptyFrameState();
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->Add(hint), lhs, rhs,
+ context, frame_state, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsCall(_, IsHeapConstant(
+ CodeFactory::StringAdd(
+ isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED)
+ .code()),
+ lhs, rhs, context, frame_state, effect, control));
+}
+
+TEST_F(JSTypedLoweringTest, JSAddSmis) {
+ BinaryOperationHint const hint = BinaryOperationHint::kSignedSmall;
+ Node* lhs = Parameter(Type::Number(), 0);
+ Node* rhs = Parameter(Type::Number(), 1);
+ Node* context = Parameter(Type::Any(), 2);
+ Node* frame_state = EmptyFrameState();
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->Add(hint), lhs, rhs,
+ context, frame_state, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSpeculativeNumberAdd(NumberOperationHint::kSignedSmall, lhs,
+ rhs, effect, control));
}
+// -----------------------------------------------------------------------------
+// JSSubtract
+
+TEST_F(JSTypedLoweringTest, JSSubtractSmis) {
+ BinaryOperationHint const hint = BinaryOperationHint::kSignedSmall;
+ Node* lhs = Parameter(Type::Number(), 0);
+ Node* rhs = Parameter(Type::Number(), 1);
+ Node* context = Parameter(Type::Any(), 2);
+ Node* frame_state = EmptyFrameState();
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->Subtract(hint), lhs, rhs,
+ context, frame_state, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSpeculativeNumberSubtract(NumberOperationHint::kSignedSmall,
+ lhs, rhs, effect, control));
+}
// -----------------------------------------------------------------------------
// JSInstanceOf
// Test that instanceOf is reduced if and only if the right-hand side is a
// function constant. Functional correctness is ensured elsewhere.
-
TEST_F(JSTypedLoweringTest, JSInstanceOfSpecializationWithoutSmiCheck) {
Node* const context = Parameter(Type::Any());
Node* const frame_state = EmptyFrameState();
@@ -929,6 +1083,150 @@ TEST_F(JSTypedLoweringTest, JSInstanceOfNoSpecialization) {
ASSERT_EQ(instanceOf, dummy->InputAt(0));
}
+// -----------------------------------------------------------------------------
+// JSBitwiseAnd
+
+TEST_F(JSTypedLoweringTest, JSBitwiseAndWithSignedSmallHint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kSignedSmall;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->BitwiseAnd(hint), lhs,
+ rhs, UndefinedConstant(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSpeculativeNumberBitwiseAnd(NumberOperationHint::kSignedSmall,
+ lhs, rhs, effect, control));
+}
+
+TEST_F(JSTypedLoweringTest, JSBitwiseAndWithSigned32Hint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kSigned32;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->BitwiseAnd(hint), lhs,
+ rhs, UndefinedConstant(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSpeculativeNumberBitwiseAnd(NumberOperationHint::kSigned32, lhs,
+ rhs, effect, control));
+}
+
+TEST_F(JSTypedLoweringTest, JSBitwiseAndWithNumberOrOddballHint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kNumberOrOddball;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->BitwiseAnd(hint), lhs,
+ rhs, UndefinedConstant(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsSpeculativeNumberBitwiseAnd(
+ NumberOperationHint::kNumberOrOddball, lhs,
+ rhs, effect, control));
+}
+
+// -----------------------------------------------------------------------------
+// JSBitwiseOr
+
+TEST_F(JSTypedLoweringTest, JSBitwiseOrWithSignedSmallHint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kSignedSmall;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->BitwiseOr(hint), lhs, rhs,
+ UndefinedConstant(), EmptyFrameState(),
+ effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSpeculativeNumberBitwiseOr(NumberOperationHint::kSignedSmall,
+ lhs, rhs, effect, control));
+}
+
+TEST_F(JSTypedLoweringTest, JSBitwiseOrWithSigned32Hint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kSigned32;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->BitwiseOr(hint), lhs, rhs,
+ UndefinedConstant(), EmptyFrameState(),
+ effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSpeculativeNumberBitwiseOr(NumberOperationHint::kSigned32, lhs,
+ rhs, effect, control));
+}
+
+TEST_F(JSTypedLoweringTest, JSBitwiseOrWithNumberOrOddballHint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kNumberOrOddball;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->BitwiseOr(hint), lhs, rhs,
+ UndefinedConstant(), EmptyFrameState(),
+ effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsSpeculativeNumberBitwiseOr(
+ NumberOperationHint::kNumberOrOddball, lhs,
+ rhs, effect, control));
+}
+
+// -----------------------------------------------------------------------------
+// JSBitwiseXor
+
+TEST_F(JSTypedLoweringTest, JSBitwiseXorWithSignedSmallHint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kSignedSmall;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->BitwiseXor(hint), lhs,
+ rhs, UndefinedConstant(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSpeculativeNumberBitwiseXor(NumberOperationHint::kSignedSmall,
+ lhs, rhs, effect, control));
+}
+
+TEST_F(JSTypedLoweringTest, JSBitwiseXorWithSigned32Hint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kSigned32;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->BitwiseXor(hint), lhs,
+ rhs, UndefinedConstant(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSpeculativeNumberBitwiseXor(NumberOperationHint::kSigned32, lhs,
+ rhs, effect, control));
+}
+
+TEST_F(JSTypedLoweringTest, JSBitwiseXorWithNumberOrOddballHint) {
+ BinaryOperationHint const hint = BinaryOperationHint::kNumberOrOddball;
+ Node* lhs = Parameter(Type::Number(), 2);
+ Node* rhs = Parameter(Type::Number(), 3);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->BitwiseXor(hint), lhs,
+ rhs, UndefinedConstant(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsSpeculativeNumberBitwiseXor(
+ NumberOperationHint::kNumberOrOddball, lhs,
+ rhs, effect, control));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc b/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
index 597edde665..741021a446 100644
--- a/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
+++ b/deps/v8/test/unittests/compiler/linkage-tail-call-unittest.cc
@@ -26,27 +26,25 @@ class LinkageTailCall : public TestWithZone {
CallDescriptor* NewStandardCallDescriptor(LocationSignature* locations) {
DCHECK(arraysize(kMachineTypes) >=
locations->return_count() + locations->parameter_count());
- MachineSignature* types = new (zone()) MachineSignature(
- locations->return_count(), locations->parameter_count(), kMachineTypes);
- return new (zone()) CallDescriptor(CallDescriptor::kCallCodeObject,
- MachineType::AnyTagged(),
- LinkageLocation::ForAnyRegister(),
- types, // machine_sig
- locations, // location_sig
- 0, // js_parameter_count
- Operator::kNoProperties, // properties
- 0, // callee-saved
- 0, // callee-saved fp
- CallDescriptor::kNoFlags, // flags,
- "");
+ USE(kMachineTypes);
+ return new (zone()) CallDescriptor(
+ CallDescriptor::kCallCodeObject, MachineType::AnyTagged(),
+ LinkageLocation::ForAnyRegister(MachineType::Pointer()),
+ locations, // location_sig
+ 0, // js_parameter_count
+ Operator::kNoProperties, // properties
+ 0, // callee-saved
+ 0, // callee-saved fp
+ CallDescriptor::kNoFlags, // flags,
+ "");
}
LinkageLocation StackLocation(int loc) {
- return LinkageLocation::ForCallerFrameSlot(-loc);
+ return LinkageLocation::ForCallerFrameSlot(-loc, MachineType::Pointer());
}
LinkageLocation RegisterLocation(int loc) {
- return LinkageLocation::ForRegister(loc);
+ return LinkageLocation::ForRegister(loc, MachineType::Pointer());
}
};
@@ -57,8 +55,9 @@ TEST_F(LinkageTailCall, EmptyToEmpty) {
CommonOperatorBuilder common(zone());
const Operator* op = common.Call(desc);
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
- int stack_param_delta = 0;
- EXPECT_TRUE(desc->CanTailCall(node, &stack_param_delta));
+ EXPECT_TRUE(desc->CanTailCall(node));
+ const CallDescriptor* callee = CallDescriptorOf(node->op());
+ int stack_param_delta = callee->GetStackParameterDelta(desc);
EXPECT_EQ(0, stack_param_delta);
}
@@ -75,8 +74,8 @@ TEST_F(LinkageTailCall, SameReturn) {
CommonOperatorBuilder common(zone());
const Operator* op = common.Call(desc2);
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
- int stack_param_delta = 0;
- EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_TRUE(desc1->CanTailCall(node));
+ int stack_param_delta = desc2->GetStackParameterDelta(desc1);
EXPECT_EQ(0, stack_param_delta);
}
@@ -95,9 +94,7 @@ TEST_F(LinkageTailCall, DifferingReturn) {
CommonOperatorBuilder common(zone());
const Operator* op = common.Call(desc2);
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
- int stack_param_delta = 0;
- EXPECT_FALSE(desc1->CanTailCall(node, &stack_param_delta));
- EXPECT_EQ(0, stack_param_delta);
+ EXPECT_TRUE(!desc1->CanTailCall(node));
}
@@ -116,8 +113,8 @@ TEST_F(LinkageTailCall, MoreRegisterParametersCallee) {
CommonOperatorBuilder common(zone());
const Operator* op = common.Call(desc2);
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
- int stack_param_delta = 0;
- EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_TRUE(desc1->CanTailCall(node));
+ int stack_param_delta = desc2->GetStackParameterDelta(desc1);
EXPECT_EQ(0, stack_param_delta);
}
@@ -137,8 +134,8 @@ TEST_F(LinkageTailCall, MoreRegisterParametersCaller) {
CommonOperatorBuilder common(zone());
const Operator* op = common.Call(desc2);
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
- int stack_param_delta = 0;
- EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_TRUE(desc1->CanTailCall(node));
+ int stack_param_delta = desc2->GetStackParameterDelta(desc1);
EXPECT_EQ(0, stack_param_delta);
}
@@ -158,9 +155,9 @@ TEST_F(LinkageTailCall, MoreRegisterAndStackParametersCallee) {
CommonOperatorBuilder common(zone());
const Operator* op = common.Call(desc2);
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
- int stack_param_delta = 0;
- EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
- EXPECT_EQ(-1, stack_param_delta);
+ EXPECT_TRUE(desc1->CanTailCall(node));
+ int stack_param_delta = desc2->GetStackParameterDelta(desc1);
+ EXPECT_EQ(1, stack_param_delta);
}
@@ -179,9 +176,9 @@ TEST_F(LinkageTailCall, MoreRegisterAndStackParametersCaller) {
CommonOperatorBuilder common(zone());
const Operator* op = common.Call(desc2);
Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
- int stack_param_delta = 0;
- EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
- EXPECT_EQ(1, stack_param_delta);
+ EXPECT_TRUE(desc1->CanTailCall(node));
+ int stack_param_delta = desc2->GetStackParameterDelta(desc1);
+ EXPECT_EQ(-1, stack_param_delta);
}
@@ -205,8 +202,8 @@ TEST_F(LinkageTailCall, MatchingStackParameters) {
const Operator* op = common.Call(desc2);
Node* const node =
Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
- int stack_param_delta = 0;
- EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_TRUE(desc1->CanTailCall(node));
+ int stack_param_delta = desc2->GetStackParameterDelta(desc1);
EXPECT_EQ(0, stack_param_delta);
}
@@ -231,8 +228,8 @@ TEST_F(LinkageTailCall, NonMatchingStackParameters) {
const Operator* op = common.Call(desc2);
Node* const node =
Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
- int stack_param_delta = 0;
- EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_TRUE(desc1->CanTailCall(node));
+ int stack_param_delta = desc2->GetStackParameterDelta(desc1);
EXPECT_EQ(0, stack_param_delta);
}
@@ -258,8 +255,8 @@ TEST_F(LinkageTailCall, MatchingStackParametersExtraCallerRegisters) {
const Operator* op = common.Call(desc2);
Node* const node =
Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
- int stack_param_delta = 0;
- EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_TRUE(desc1->CanTailCall(node));
+ int stack_param_delta = desc2->GetStackParameterDelta(desc1);
EXPECT_EQ(0, stack_param_delta);
}
@@ -286,8 +283,8 @@ TEST_F(LinkageTailCall, MatchingStackParametersExtraCalleeRegisters) {
const Operator* op = common.Call(desc2);
Node* const node =
Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
- int stack_param_delta = 0;
- EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_TRUE(desc1->CanTailCall(node));
+ int stack_param_delta = desc2->GetStackParameterDelta(desc1);
EXPECT_EQ(0, stack_param_delta);
}
@@ -314,9 +311,9 @@ TEST_F(LinkageTailCall, MatchingStackParametersExtraCallerRegistersAndStack) {
const Operator* op = common.Call(desc2);
Node* const node =
Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
- int stack_param_delta = 0;
- EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
- EXPECT_EQ(1, stack_param_delta);
+ EXPECT_TRUE(desc1->CanTailCall(node));
+ int stack_param_delta = desc2->GetStackParameterDelta(desc1);
+ EXPECT_EQ(-1, stack_param_delta);
}
@@ -342,9 +339,9 @@ TEST_F(LinkageTailCall, MatchingStackParametersExtraCalleeRegistersAndStack) {
const Operator* op = common.Call(desc2);
Node* const node =
Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
- int stack_param_delta = 0;
- EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
- EXPECT_EQ(-1, stack_param_delta);
+ EXPECT_TRUE(desc1->CanTailCall(node));
+ int stack_param_delta = desc2->GetStackParameterDelta(desc1);
+ EXPECT_EQ(1, stack_param_delta);
}
} // namespace compiler
diff --git a/deps/v8/test/unittests/compiler/load-elimination-unittest.cc b/deps/v8/test/unittests/compiler/load-elimination-unittest.cc
index 38bb151dba..ada99b5a7f 100644
--- a/deps/v8/test/unittests/compiler/load-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/load-elimination-unittest.cc
@@ -1,12 +1,19 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/access-builder.h"
#include "src/compiler/load-elimination.h"
+#include "src/compiler/access-builder.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node.h"
#include "src/compiler/simplified-operator.h"
+#include "test/unittests/compiler/graph-reducer-unittest.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+
+using testing::_;
+using testing::StrictMock;
namespace v8 {
namespace internal {
@@ -15,61 +22,195 @@ namespace compiler {
class LoadEliminationTest : public TypedGraphTest {
public:
LoadEliminationTest()
- : TypedGraphTest(3), common_(zone()), simplified_(zone()) {}
+ : TypedGraphTest(3),
+ simplified_(zone()),
+ jsgraph_(isolate(), graph(), common(), nullptr, simplified(), nullptr) {
+ }
~LoadEliminationTest() override {}
protected:
- Reduction Reduce(Node* node) {
- // TODO(titzer): mock the GraphReducer here for better unit testing.
- GraphReducer graph_reducer(zone(), graph());
- LoadElimination reducer(&graph_reducer, graph(), common());
- return reducer.Reduce(node);
- }
-
+ JSGraph* jsgraph() { return &jsgraph_; }
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
- CommonOperatorBuilder* common() { return &common_; }
private:
- CommonOperatorBuilder common_;
SimplifiedOperatorBuilder simplified_;
+ JSGraph jsgraph_;
};
+TEST_F(LoadEliminationTest, LoadElementAndLoadElement) {
+ Node* object = Parameter(Type::Any(), 0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* index = Parameter(Type::UnsignedSmall(), 1);
+ ElementAccess const access = {kTaggedBase, kPointerSize, Type::Any(),
+ MachineType::AnyTagged(), kNoWriteBarrier};
+
+ StrictMock<MockAdvancedReducerEditor> editor;
+ LoadElimination load_elimination(&editor, jsgraph(), zone());
+
+ load_elimination.Reduce(graph()->start());
+
+ Node* load1 = effect = graph()->NewNode(simplified()->LoadElement(access),
+ object, index, effect, control);
+ load_elimination.Reduce(load1);
+
+ Node* load2 = effect = graph()->NewNode(simplified()->LoadElement(access),
+ object, index, effect, control);
+ EXPECT_CALL(editor, ReplaceWithValue(load2, load1, load1, _));
+ Reduction r = load_elimination.Reduce(load2);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(load1, r.replacement());
+}
-TEST_F(LoadEliminationTest, LoadFieldWithStoreField) {
- Node* object1 = Parameter(Type::Any(), 0);
- Node* object2 = Parameter(Type::Any(), 1);
+TEST_F(LoadEliminationTest, StoreElementAndLoadElement) {
+ Node* object = Parameter(Type::Any(), 0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* index = Parameter(Type::UnsignedSmall(), 1);
Node* value = Parameter(Type::Any(), 2);
+ ElementAccess const access = {kTaggedBase, kPointerSize, Type::Any(),
+ MachineType::AnyTagged(), kNoWriteBarrier};
+
+ StrictMock<MockAdvancedReducerEditor> editor;
+ LoadElimination load_elimination(&editor, jsgraph(), zone());
+
+ load_elimination.Reduce(graph()->start());
+
+ Node* store = effect =
+ graph()->NewNode(simplified()->StoreElement(access), object, index, value,
+ effect, control);
+ load_elimination.Reduce(store);
+
+ Node* load = effect = graph()->NewNode(simplified()->LoadElement(access),
+ object, index, effect, control);
+ EXPECT_CALL(editor, ReplaceWithValue(load, value, store, _));
+ Reduction r = load_elimination.Reduce(load);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(value, r.replacement());
+}
+
+TEST_F(LoadEliminationTest, StoreElementAndStoreFieldAndLoadElement) {
+ Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* control = graph()->start();
+ Node* index = Parameter(Type::UnsignedSmall(), 1);
+ Node* value = Parameter(Type::Any(), 2);
+ ElementAccess const access = {kTaggedBase, kPointerSize, Type::Any(),
+ MachineType::AnyTagged(), kNoWriteBarrier};
+
+ StrictMock<MockAdvancedReducerEditor> editor;
+ LoadElimination load_elimination(&editor, jsgraph(), zone());
+
+ load_elimination.Reduce(graph()->start());
+
+ Node* store1 = effect =
+ graph()->NewNode(simplified()->StoreElement(access), object, index, value,
+ effect, control);
+ load_elimination.Reduce(store1);
+
+ Node* store2 = effect =
+ graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
+ object, value, effect, control);
+ load_elimination.Reduce(store2);
+
+ Node* load = effect = graph()->NewNode(simplified()->LoadElement(access),
+ object, index, effect, control);
+ EXPECT_CALL(editor, ReplaceWithValue(load, value, store2, _));
+ Reduction r = load_elimination.Reduce(load);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(value, r.replacement());
+}
+
+TEST_F(LoadEliminationTest, LoadFieldAndLoadField) {
+ Node* object = Parameter(Type::Any(), 0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ FieldAccess const access = {kTaggedBase,
+ kPointerSize,
+ MaybeHandle<Name>(),
+ Type::Any(),
+ MachineType::AnyTagged(),
+ kNoWriteBarrier};
+
+ StrictMock<MockAdvancedReducerEditor> editor;
+ LoadElimination load_elimination(&editor, jsgraph(), zone());
+
+ load_elimination.Reduce(graph()->start());
+
+ Node* load1 = effect = graph()->NewNode(simplified()->LoadField(access),
+ object, effect, control);
+ load_elimination.Reduce(load1);
+
+ Node* load2 = effect = graph()->NewNode(simplified()->LoadField(access),
+ object, effect, control);
+ EXPECT_CALL(editor, ReplaceWithValue(load2, load1, load1, _));
+ Reduction r = load_elimination.Reduce(load2);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(load1, r.replacement());
+}
+
+TEST_F(LoadEliminationTest, StoreFieldAndLoadField) {
+ Node* object = Parameter(Type::Any(), 0);
+ Node* value = Parameter(Type::Any(), 1);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ FieldAccess access = {kTaggedBase,
+ kPointerSize,
+ MaybeHandle<Name>(),
+ Type::Any(),
+ MachineType::AnyTagged(),
+ kNoWriteBarrier};
+
+ StrictMock<MockAdvancedReducerEditor> editor;
+ LoadElimination load_elimination(&editor, jsgraph(), zone());
+
+ load_elimination.Reduce(graph()->start());
+
+ Node* store = effect = graph()->NewNode(simplified()->StoreField(access),
+ object, value, effect, control);
+ load_elimination.Reduce(store);
+
+ Node* load = effect = graph()->NewNode(simplified()->LoadField(access),
+ object, effect, control);
+ EXPECT_CALL(editor, ReplaceWithValue(load, value, store, _));
+ Reduction r = load_elimination.Reduce(load);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(value, r.replacement());
+}
+
+TEST_F(LoadEliminationTest, StoreFieldAndStoreElementAndLoadField) {
+ Node* object = Parameter(Type::Any(), 0);
+ Node* value = Parameter(Type::Any(), 1);
+ Node* index = Parameter(Type::UnsignedSmall(), 2);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ FieldAccess access = {kTaggedBase,
+ kPointerSize,
+ MaybeHandle<Name>(),
+ Type::Any(),
+ MachineType::AnyTagged(),
+ kNoWriteBarrier};
+
+ StrictMock<MockAdvancedReducerEditor> editor;
+ LoadElimination load_elimination(&editor, jsgraph(), zone());
+
+ load_elimination.Reduce(graph()->start());
+
+ Node* store1 = effect = graph()->NewNode(simplified()->StoreField(access),
+ object, value, effect, control);
+ load_elimination.Reduce(store1);
+
+ Node* store2 = effect = graph()->NewNode(
+ simplified()->StoreElement(AccessBuilder::ForFixedArrayElement()), object,
+ index, object, effect, control);
+ load_elimination.Reduce(store2);
- FieldAccess access1 = AccessBuilder::ForContextSlot(42);
- Node* store1 = graph()->NewNode(simplified()->StoreField(access1), object1,
- value, effect, control);
- Reduction r1 = Reduce(graph()->NewNode(simplified()->LoadField(access1),
- object1, store1, control));
- ASSERT_TRUE(r1.Changed());
- EXPECT_EQ(value, r1.replacement());
-
- FieldAccess access2 = AccessBuilder::ForMap();
- Node* store2 = graph()->NewNode(simplified()->StoreField(access2), object1,
- object2, store1, control);
- Reduction r2 = Reduce(graph()->NewNode(simplified()->LoadField(access2),
- object1, store2, control));
- ASSERT_TRUE(r2.Changed());
- EXPECT_EQ(object2, r2.replacement());
-
- Node* store3 = graph()->NewNode(
- simplified()->StoreBuffer(BufferAccess(kExternalInt8Array)), object2,
- value, Int32Constant(10), object1, store2, control);
-
- Reduction r3 = Reduce(graph()->NewNode(simplified()->LoadField(access1),
- object2, store3, control));
- ASSERT_FALSE(r3.Changed());
-
- Reduction r4 = Reduce(graph()->NewNode(simplified()->LoadField(access1),
- object1, store3, control));
- ASSERT_TRUE(r4.Changed());
- EXPECT_EQ(value, r4.replacement());
+ Node* load = effect = graph()->NewNode(simplified()->LoadField(access),
+ object, effect, control);
+ EXPECT_CALL(editor, ReplaceWithValue(load, value, store2, _));
+ Reduction r = load_elimination.Reduce(load);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(value, r.replacement());
}
} // namespace compiler
diff --git a/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc b/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
index 9db490560d..56691fdeef 100644
--- a/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
+++ b/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
@@ -28,6 +28,7 @@ struct While {
Node* loop;
Node* branch;
Node* if_true;
+ Node* if_false;
Node* exit;
};
@@ -46,6 +47,7 @@ struct Counter {
Node* inc;
Node* phi;
Node* add;
+ Node* exit_marker;
};
@@ -105,12 +107,14 @@ class LoopPeelingTest : public GraphTest {
While NewWhile(Node* cond, Node* control = nullptr) {
if (control == nullptr) control = start();
- Node* loop = graph()->NewNode(common()->Loop(2), control, control);
- Node* branch = graph()->NewNode(common()->Branch(), cond, loop);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* exit = graph()->NewNode(common()->IfFalse(), branch);
- loop->ReplaceInput(1, if_true);
- return {loop, branch, if_true, exit};
+ While w;
+ w.loop = graph()->NewNode(common()->Loop(2), control, control);
+ w.branch = graph()->NewNode(common()->Branch(), cond, w.loop);
+ w.if_true = graph()->NewNode(common()->IfTrue(), w.branch);
+ w.if_false = graph()->NewNode(common()->IfFalse(), w.branch);
+ w.exit = graph()->NewNode(common()->LoopExit(), w.if_false, w.loop);
+ w.loop->ReplaceInput(1, w.if_true);
+ return w;
}
void Chain(While* a, Node* control) { a->loop->ReplaceInput(0, control); }
@@ -124,21 +128,24 @@ class LoopPeelingTest : public GraphTest {
}
Branch NewBranch(Node* cond, Node* control = nullptr) {
+ Branch b;
if (control == nullptr) control = start();
- Node* branch = graph()->NewNode(common()->Branch(), cond, control);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- return {branch, if_true, if_false};
+ b.branch = graph()->NewNode(common()->Branch(), cond, control);
+ b.if_true = graph()->NewNode(common()->IfTrue(), b.branch);
+ b.if_false = graph()->NewNode(common()->IfFalse(), b.branch);
+ return b;
}
Counter NewCounter(While* w, int32_t b, int32_t k) {
- Node* base = Int32Constant(b);
- Node* inc = Int32Constant(k);
- Node* phi = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2), base, base, w->loop);
- Node* add = graph()->NewNode(machine()->Int32Add(), phi, inc);
- phi->ReplaceInput(1, add);
- return {base, inc, phi, add};
+ Counter c;
+ c.base = Int32Constant(b);
+ c.inc = Int32Constant(k);
+ c.phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ c.base, c.base, w->loop);
+ c.add = graph()->NewNode(machine()->Int32Add(), c.phi, c.inc);
+ c.phi->ReplaceInput(1, c.add);
+ c.exit_marker = graph()->NewNode(common()->LoopExitValue(), c.phi, w->exit);
+ return c;
}
};
@@ -152,14 +159,14 @@ TEST_F(LoopPeelingTest, SimpleLoop) {
Node* br1 = ExpectPeeled(w.branch, peeled);
Node* if_true1 = ExpectPeeled(w.if_true, peeled);
- Node* if_false1 = ExpectPeeled(w.exit, peeled);
+ Node* if_false1 = ExpectPeeled(w.if_false, peeled);
EXPECT_THAT(br1, IsBranch(p0, start()));
EXPECT_THAT(if_true1, IsIfTrue(br1));
EXPECT_THAT(if_false1, IsIfFalse(br1));
EXPECT_THAT(w.loop, IsLoop(if_true1, w.if_true));
- EXPECT_THAT(r, IsReturn(p0, start(), IsMerge(w.exit, if_false1)));
+ EXPECT_THAT(r, IsReturn(p0, start(), IsMerge(w.if_false, if_false1)));
}
@@ -167,13 +174,13 @@ TEST_F(LoopPeelingTest, SimpleLoopWithCounter) {
Node* p0 = Parameter(0);
While w = NewWhile(p0);
Counter c = NewCounter(&w, 0, 1);
- Node* r = InsertReturn(c.phi, start(), w.exit);
+ Node* r = InsertReturn(c.exit_marker, start(), w.exit);
PeeledIteration* peeled = PeelOne();
Node* br1 = ExpectPeeled(w.branch, peeled);
Node* if_true1 = ExpectPeeled(w.if_true, peeled);
- Node* if_false1 = ExpectPeeled(w.exit, peeled);
+ Node* if_false1 = ExpectPeeled(w.if_false, peeled);
EXPECT_THAT(br1, IsBranch(p0, start()));
EXPECT_THAT(if_true1, IsIfTrue(br1));
@@ -182,11 +189,10 @@ TEST_F(LoopPeelingTest, SimpleLoopWithCounter) {
EXPECT_THAT(peeled->map(c.add), IsInt32Add(c.base, c.inc));
- Capture<Node*> merge;
+ EXPECT_THAT(w.exit, IsMerge(w.if_false, if_false1));
EXPECT_THAT(
- r, IsReturn(IsPhi(MachineRepresentation::kTagged, c.phi, c.base,
- AllOf(CaptureEq(&merge), IsMerge(w.exit, if_false1))),
- start(), CaptureEq(&merge)));
+ r, IsReturn(IsPhi(MachineRepresentation::kTagged, c.phi, c.base, w.exit),
+ start(), w.exit));
}
@@ -197,13 +203,13 @@ TEST_F(LoopPeelingTest, SimpleNestedLoopWithCounter_peel_outer) {
Nest(&inner, &outer);
Counter c = NewCounter(&outer, 0, 1);
- Node* r = InsertReturn(c.phi, start(), outer.exit);
+ Node* r = InsertReturn(c.exit_marker, start(), outer.exit);
PeeledIteration* peeled = PeelOne();
Node* bro = ExpectPeeled(outer.branch, peeled);
Node* if_trueo = ExpectPeeled(outer.if_true, peeled);
- Node* if_falseo = ExpectPeeled(outer.exit, peeled);
+ Node* if_falseo = ExpectPeeled(outer.if_false, peeled);
EXPECT_THAT(bro, IsBranch(p0, start()));
EXPECT_THAT(if_trueo, IsIfTrue(bro));
@@ -211,21 +217,21 @@ TEST_F(LoopPeelingTest, SimpleNestedLoopWithCounter_peel_outer) {
Node* bri = ExpectPeeled(inner.branch, peeled);
Node* if_truei = ExpectPeeled(inner.if_true, peeled);
- Node* if_falsei = ExpectPeeled(inner.exit, peeled);
+ Node* if_falsei = ExpectPeeled(inner.if_false, peeled);
+ Node* exiti = ExpectPeeled(inner.exit, peeled);
EXPECT_THAT(bri, IsBranch(p0, ExpectPeeled(inner.loop, peeled)));
EXPECT_THAT(if_truei, IsIfTrue(bri));
EXPECT_THAT(if_falsei, IsIfFalse(bri));
- EXPECT_THAT(outer.loop, IsLoop(if_falsei, inner.exit));
+ EXPECT_THAT(outer.loop, IsLoop(exiti, inner.exit));
EXPECT_THAT(peeled->map(c.add), IsInt32Add(c.base, c.inc));
Capture<Node*> merge;
- EXPECT_THAT(
- r,
- IsReturn(IsPhi(MachineRepresentation::kTagged, c.phi, c.base,
- AllOf(CaptureEq(&merge), IsMerge(outer.exit, if_falseo))),
- start(), CaptureEq(&merge)));
+ EXPECT_THAT(outer.exit, IsMerge(outer.if_false, if_falseo));
+ EXPECT_THAT(r, IsReturn(IsPhi(MachineRepresentation::kTagged, c.phi, c.base,
+ outer.exit),
+ start(), outer.exit));
}
@@ -236,7 +242,7 @@ TEST_F(LoopPeelingTest, SimpleNestedLoopWithCounter_peel_inner) {
Nest(&inner, &outer);
Counter c = NewCounter(&outer, 0, 1);
- Node* r = InsertReturn(c.phi, start(), outer.exit);
+ Node* r = InsertReturn(c.exit_marker, start(), outer.exit);
LoopTree* loop_tree = GetLoopTree();
LoopTree::Loop* loop = loop_tree->ContainingLoop(inner.loop);
@@ -248,20 +254,22 @@ TEST_F(LoopPeelingTest, SimpleNestedLoopWithCounter_peel_inner) {
ExpectNotPeeled(outer.loop, peeled);
ExpectNotPeeled(outer.branch, peeled);
ExpectNotPeeled(outer.if_true, peeled);
+ ExpectNotPeeled(outer.if_false, peeled);
ExpectNotPeeled(outer.exit, peeled);
Node* bri = ExpectPeeled(inner.branch, peeled);
Node* if_truei = ExpectPeeled(inner.if_true, peeled);
- Node* if_falsei = ExpectPeeled(inner.exit, peeled);
+ Node* if_falsei = ExpectPeeled(inner.if_false, peeled);
EXPECT_THAT(bri, IsBranch(p0, ExpectPeeled(inner.loop, peeled)));
EXPECT_THAT(if_truei, IsIfTrue(bri));
EXPECT_THAT(if_falsei, IsIfFalse(bri));
- EXPECT_THAT(outer.loop, IsLoop(start(), IsMerge(inner.exit, if_falsei)));
+ EXPECT_THAT(inner.exit, IsMerge(inner.if_false, if_falsei));
+ EXPECT_THAT(outer.loop, IsLoop(start(), inner.exit));
ExpectNotPeeled(c.add, peeled);
- EXPECT_THAT(r, IsReturn(c.phi, start(), outer.exit));
+ EXPECT_THAT(r, IsReturn(c.exit_marker, start(), outer.exit));
}
@@ -271,7 +279,7 @@ TEST_F(LoopPeelingTest, SimpleInnerCounter_peel_inner) {
While inner = NewWhile(p0);
Nest(&inner, &outer);
Counter c = NewCounter(&inner, 0, 1);
- Node* phi = NewPhi(&outer, Int32Constant(11), c.phi);
+ Node* phi = NewPhi(&outer, Int32Constant(11), c.exit_marker);
Node* r = InsertReturn(phi, start(), outer.exit);
@@ -285,25 +293,26 @@ TEST_F(LoopPeelingTest, SimpleInnerCounter_peel_inner) {
ExpectNotPeeled(outer.loop, peeled);
ExpectNotPeeled(outer.branch, peeled);
ExpectNotPeeled(outer.if_true, peeled);
+ ExpectNotPeeled(outer.if_false, peeled);
ExpectNotPeeled(outer.exit, peeled);
Node* bri = ExpectPeeled(inner.branch, peeled);
Node* if_truei = ExpectPeeled(inner.if_true, peeled);
- Node* if_falsei = ExpectPeeled(inner.exit, peeled);
+ Node* if_falsei = ExpectPeeled(inner.if_false, peeled);
EXPECT_THAT(bri, IsBranch(p0, ExpectPeeled(inner.loop, peeled)));
EXPECT_THAT(if_truei, IsIfTrue(bri));
EXPECT_THAT(if_falsei, IsIfFalse(bri));
- EXPECT_THAT(outer.loop, IsLoop(start(), IsMerge(inner.exit, if_falsei)));
+ EXPECT_THAT(inner.exit, IsMerge(inner.if_false, if_falsei));
+ EXPECT_THAT(outer.loop, IsLoop(start(), inner.exit));
EXPECT_THAT(peeled->map(c.add), IsInt32Add(c.base, c.inc));
- Node* back = phi->InputAt(1);
- EXPECT_THAT(back, IsPhi(MachineRepresentation::kTagged, c.phi, c.base,
- IsMerge(inner.exit, if_falsei)));
+ EXPECT_THAT(c.exit_marker,
+ IsPhi(MachineRepresentation::kTagged, c.phi, c.base, inner.exit));
EXPECT_THAT(phi, IsPhi(MachineRepresentation::kTagged, IsInt32Constant(11),
- back, outer.loop));
+ c.exit_marker, outer.loop));
EXPECT_THAT(r, IsReturn(phi, start(), outer.exit));
}
@@ -318,7 +327,9 @@ TEST_F(LoopPeelingTest, TwoBackedgeLoop) {
loop->ReplaceInput(1, b2.if_true);
loop->ReplaceInput(2, b2.if_false);
- Node* r = InsertReturn(p0, start(), b1.if_false);
+ Node* exit = graph()->NewNode(common()->LoopExit(), b1.if_false, loop);
+
+ Node* r = InsertReturn(p0, start(), exit);
PeeledIteration* peeled = PeelOne();
@@ -339,7 +350,8 @@ TEST_F(LoopPeelingTest, TwoBackedgeLoop) {
EXPECT_THAT(b2f, IsIfFalse(b2b));
EXPECT_THAT(loop, IsLoop(IsMerge(b2t, b2f), b2.if_true, b2.if_false));
- EXPECT_THAT(r, IsReturn(p0, start(), IsMerge(b1.if_false, b1f)));
+ EXPECT_THAT(exit, IsMerge(b1.if_false, b1f));
+ EXPECT_THAT(r, IsReturn(p0, start(), exit));
}
@@ -355,7 +367,9 @@ TEST_F(LoopPeelingTest, TwoBackedgeLoopWithPhi) {
loop->ReplaceInput(1, b2.if_true);
loop->ReplaceInput(2, b2.if_false);
- Node* r = InsertReturn(phi, start(), b1.if_false);
+ Node* exit = graph()->NewNode(common()->LoopExit(), b1.if_false, loop);
+ Node* exit_marker = graph()->NewNode(common()->LoopExitValue(), phi, exit);
+ Node* r = InsertReturn(exit_marker, start(), exit);
PeeledIteration* peeled = PeelOne();
@@ -383,11 +397,10 @@ TEST_F(LoopPeelingTest, TwoBackedgeLoopWithPhi) {
IsInt32Constant(2), IsMerge(b2t, b2f)),
IsInt32Constant(1), IsInt32Constant(2), loop));
- Capture<Node*> merge;
- EXPECT_THAT(
- r, IsReturn(IsPhi(MachineRepresentation::kTagged, phi, IsInt32Constant(0),
- AllOf(CaptureEq(&merge), IsMerge(b1.if_false, b1f))),
- start(), CaptureEq(&merge)));
+ EXPECT_THAT(exit, IsMerge(b1.if_false, b1f));
+ EXPECT_THAT(exit_marker, IsPhi(MachineRepresentation::kTagged, phi,
+ IsInt32Constant(0), exit));
+ EXPECT_THAT(r, IsReturn(exit_marker, start(), exit));
}
@@ -408,7 +421,9 @@ TEST_F(LoopPeelingTest, TwoBackedgeLoopWithCounter) {
loop->ReplaceInput(1, b2.if_true);
loop->ReplaceInput(2, b2.if_false);
- Node* r = InsertReturn(phi, start(), b1.if_false);
+ Node* exit = graph()->NewNode(common()->LoopExit(), b1.if_false, loop);
+ Node* exit_marker = graph()->NewNode(common()->LoopExitValue(), phi, exit);
+ Node* r = InsertReturn(exit_marker, start(), exit);
PeeledIteration* peeled = PeelOne();
@@ -443,49 +458,60 @@ TEST_F(LoopPeelingTest, TwoBackedgeLoopWithCounter) {
IsInt32Add(phi, IsInt32Constant(1)),
IsInt32Add(phi, IsInt32Constant(2)), loop));
- Capture<Node*> merge;
- EXPECT_THAT(
- r, IsReturn(IsPhi(MachineRepresentation::kTagged, phi, IsInt32Constant(0),
- AllOf(CaptureEq(&merge), IsMerge(b1.if_false, b1f))),
- start(), CaptureEq(&merge)));
+ EXPECT_THAT(exit, IsMerge(b1.if_false, b1f));
+ EXPECT_THAT(exit_marker, IsPhi(MachineRepresentation::kTagged, phi,
+ IsInt32Constant(0), exit));
+ EXPECT_THAT(r, IsReturn(exit_marker, start(), exit));
}
-
-TEST_F(LoopPeelingTest, TwoExitLoop_nope) {
+TEST_F(LoopPeelingTest, TwoExitLoop) {
Node* p0 = Parameter(0);
Node* loop = graph()->NewNode(common()->Loop(2), start(), start());
Branch b1 = NewBranch(p0, loop);
Branch b2 = NewBranch(p0, b1.if_true);
loop->ReplaceInput(1, b2.if_true);
- Node* merge = graph()->NewNode(common()->Merge(2), b1.if_false, b2.if_false);
- InsertReturn(p0, start(), merge);
- {
- LoopTree* loop_tree = GetLoopTree();
- LoopTree::Loop* loop = loop_tree->outer_loops()[0];
- EXPECT_FALSE(LoopPeeler::CanPeel(loop_tree, loop));
- }
-}
+ Node* exit1 = graph()->NewNode(common()->LoopExit(), b1.if_false, loop);
+ Node* exit2 = graph()->NewNode(common()->LoopExit(), b2.if_false, loop);
+
+ Node* merge = graph()->NewNode(common()->Merge(2), exit1, exit2);
+ Node* r = InsertReturn(p0, start(), merge);
+
+ PeeledIteration* peeled = PeelOne();
+
+ Node* b1p = ExpectPeeled(b1.branch, peeled);
+ Node* if_true1p = ExpectPeeled(b1.if_true, peeled);
+ Node* if_false1p = ExpectPeeled(b1.if_false, peeled);
+ Node* b2p = ExpectPeeled(b2.branch, peeled);
+ Node* if_true2p = ExpectPeeled(b2.if_true, peeled);
+ Node* if_false2p = ExpectPeeled(b2.if_false, peeled);
-const Operator kMockCall(IrOpcode::kCall, Operator::kNoProperties, "MockCall",
- 0, 0, 1, 1, 1, 2);
+ EXPECT_THAT(b1p, IsBranch(p0, start()));
+ EXPECT_THAT(if_true1p, IsIfTrue(b1p));
+ EXPECT_THAT(if_false1p, IsIfFalse(b1p));
+ EXPECT_THAT(b2p, IsBranch(p0, if_true1p));
+ EXPECT_THAT(if_true2p, IsIfTrue(b2p));
+ EXPECT_THAT(if_false2p, IsIfFalse(b2p));
-TEST_F(LoopPeelingTest, TwoExitLoopWithCall_nope) {
+ EXPECT_THAT(exit1, IsMerge(b1.if_false, if_false1p));
+ EXPECT_THAT(exit2, IsMerge(b2.if_false, if_false2p));
+
+ EXPECT_THAT(loop, IsLoop(if_true2p, b2.if_true));
+
+ EXPECT_THAT(merge, IsMerge(exit1, exit2));
+ EXPECT_THAT(r, IsReturn(p0, start(), merge));
+}
+
+TEST_F(LoopPeelingTest, SimpleLoopWithUnmarkedExit) {
Node* p0 = Parameter(0);
Node* loop = graph()->NewNode(common()->Loop(2), start(), start());
- Branch b1 = NewBranch(p0, loop);
-
- Node* call = graph()->NewNode(&kMockCall, b1.if_true);
- Node* if_success = graph()->NewNode(common()->IfSuccess(), call);
- Node* if_exception = graph()->NewNode(
- common()->IfException(IfExceptionHint::kLocallyUncaught), call, call);
+ Branch b = NewBranch(p0, loop);
+ loop->ReplaceInput(1, b.if_true);
- loop->ReplaceInput(1, if_success);
- Node* merge = graph()->NewNode(common()->Merge(2), b1.if_false, if_exception);
- InsertReturn(p0, start(), merge);
+ InsertReturn(p0, start(), b.if_false);
{
LoopTree* loop_tree = GetLoopTree();
diff --git a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
index 2feba2ef7f..ed426be5d8 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/machine-operator-reducer.h"
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
+#include "src/base/ieee754.h"
#include "src/compiler/js-graph.h"
-#include "src/compiler/machine-operator-reducer.h"
#include "src/compiler/typer.h"
#include "src/conversions-inl.h"
#include "test/unittests/compiler/graph-unittest.h"
@@ -16,6 +17,7 @@ using testing::AllOf;
using testing::BitEq;
using testing::Capture;
using testing::CaptureEq;
+using testing::NanSensitiveDoubleEq;
namespace v8 {
namespace internal {
@@ -236,10 +238,6 @@ const uint32_t kUint32Values[] = {
0x00003fff, 0x00001fff, 0x00000fff, 0x000007ff, 0x000003ff, 0x000001ff};
-const TruncationMode kTruncationModes[] = {TruncationMode::kJavaScript,
- TruncationMode::kRoundToZero};
-
-
struct ComparisonBinaryOperator {
const Operator* (MachineOperatorBuilder::*constructor)();
const char* constructor_name;
@@ -291,7 +289,6 @@ TEST_F(MachineOperatorReducerTest,
EXPECT_EQ(value, reduction.replacement());
}
-
TEST_F(MachineOperatorReducerTest, ChangeFloat64ToInt32WithConstant) {
TRACED_FOREACH(int32_t, x, kInt32Values) {
Reduction reduction = Reduce(graph()->NewNode(
@@ -413,51 +410,28 @@ TEST_F(MachineOperatorReducerTest, TruncateFloat64ToFloat32WithConstant) {
// -----------------------------------------------------------------------------
-// TruncateFloat64ToInt32
-
+// TruncateFloat64ToWord32
TEST_F(MachineOperatorReducerTest,
- TruncateFloat64ToInt32WithChangeInt32ToFloat64) {
- TRACED_FOREACH(TruncationMode, mode, kTruncationModes) {
- Node* value = Parameter(0);
- Reduction reduction = Reduce(graph()->NewNode(
- machine()->TruncateFloat64ToInt32(mode),
- graph()->NewNode(machine()->ChangeInt32ToFloat64(), value)));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_EQ(value, reduction.replacement());
- }
+ TruncateFloat64ToWord32WithChangeInt32ToFloat64) {
+ Node* value = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->TruncateFloat64ToWord32(),
+ graph()->NewNode(machine()->ChangeInt32ToFloat64(), value)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(value, reduction.replacement());
}
-
-TEST_F(MachineOperatorReducerTest, TruncateFloat64ToInt32WithConstant) {
+TEST_F(MachineOperatorReducerTest, TruncateFloat64ToWord32WithConstant) {
TRACED_FOREACH(double, x, kFloat64Values) {
Reduction reduction = Reduce(graph()->NewNode(
- machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript),
- Float64Constant(x)));
+ machine()->TruncateFloat64ToWord32(), Float64Constant(x)));
ASSERT_TRUE(reduction.Changed());
EXPECT_THAT(reduction.replacement(), IsInt32Constant(DoubleToInt32(x)));
}
}
-TEST_F(MachineOperatorReducerTest, TruncateFloat64ToInt32WithPhi) {
- Node* const p0 = Parameter(0);
- Node* const p1 = Parameter(1);
- Node* const merge = graph()->start();
- TRACED_FOREACH(TruncationMode, mode, kTruncationModes) {
- Reduction reduction = Reduce(graph()->NewNode(
- machine()->TruncateFloat64ToInt32(mode),
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2), p0,
- p1, merge)));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(
- reduction.replacement(),
- IsPhi(MachineRepresentation::kWord32, IsTruncateFloat64ToInt32(p0),
- IsTruncateFloat64ToInt32(p1), merge));
- }
-}
-
-
// -----------------------------------------------------------------------------
// TruncateInt64ToInt32
@@ -485,8 +459,30 @@ TEST_F(MachineOperatorReducerTest, TruncateInt64ToInt32WithConstant) {
// -----------------------------------------------------------------------------
-// Word32And
+// RoundFloat64ToInt32
+
+TEST_F(MachineOperatorReducerTest,
+ RoundFloat64ToInt32WithChangeInt32ToFloat64) {
+ Node* value = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->RoundFloat64ToInt32(),
+ graph()->NewNode(machine()->ChangeInt32ToFloat64(), value)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(value, reduction.replacement());
+}
+TEST_F(MachineOperatorReducerTest, RoundFloat64ToInt32WithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction reduction = Reduce(
+ graph()->NewNode(machine()->RoundFloat64ToInt32(), Float64Constant(x)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(),
+ IsInt32Constant(static_cast<int32_t>(x)));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Word32And
TEST_F(MachineOperatorReducerTest, Word32AndWithWord32ShlWithConstant) {
Node* const p0 = Parameter(0);
@@ -853,8 +849,24 @@ TEST_F(MachineOperatorReducerTest, Word32SarWithWord32ShlAndLoad) {
// -----------------------------------------------------------------------------
-// Word32Shl
+// Word32Shr
+
+TEST_F(MachineOperatorReducerTest, Word32ShrWithWord32And) {
+ Node* const p0 = Parameter(0);
+ TRACED_FORRANGE(int32_t, shift, 1, 31) {
+ uint32_t mask = (1 << shift) - 1;
+ Node* node = graph()->NewNode(
+ machine()->Word32Shr(),
+ graph()->NewNode(machine()->Word32And(), p0, Int32Constant(mask)),
+ Int32Constant(shift));
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+ }
+}
+// -----------------------------------------------------------------------------
+// Word32Shl
TEST_F(MachineOperatorReducerTest, Word32ShlWithZeroShift) {
Node* p0 = Parameter(0);
@@ -1271,28 +1283,31 @@ TEST_F(MachineOperatorReducerTest, Int32AddWithInt32SubWithConstantZero) {
TEST_F(MachineOperatorReducerTest, Int32AddWithOverflowWithZero) {
+ Node* control = graph()->start();
Node* p0 = Parameter(0);
{
Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(),
- Int32Constant(0), p0);
+ Int32Constant(0), p0, control);
- Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+ Reduction r =
+ Reduce(graph()->NewNode(common()->Projection(1), add, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(0));
- r = Reduce(graph()->NewNode(common()->Projection(0), add));
+ r = Reduce(graph()->NewNode(common()->Projection(0), add, control));
ASSERT_TRUE(r.Changed());
EXPECT_EQ(p0, r.replacement());
}
{
Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), p0,
- Int32Constant(0));
+ Int32Constant(0), control);
- Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+ Reduction r =
+ Reduce(graph()->NewNode(common()->Projection(1), add, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(0));
- r = Reduce(graph()->NewNode(common()->Projection(0), add));
+ r = Reduce(graph()->NewNode(common()->Projection(0), add, control));
ASSERT_TRUE(r.Changed());
EXPECT_EQ(p0, r.replacement());
}
@@ -1300,18 +1315,20 @@ TEST_F(MachineOperatorReducerTest, Int32AddWithOverflowWithZero) {
TEST_F(MachineOperatorReducerTest, Int32AddWithOverflowWithConstant) {
+ Node* control = graph()->start();
TRACED_FOREACH(int32_t, x, kInt32Values) {
TRACED_FOREACH(int32_t, y, kInt32Values) {
int32_t z;
Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(),
- Int32Constant(x), Int32Constant(y));
+ Int32Constant(x), Int32Constant(y), control);
- Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+ Reduction r =
+ Reduce(graph()->NewNode(common()->Projection(1), add, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsInt32Constant(base::bits::SignedAddOverflow32(x, y, &z)));
- r = Reduce(graph()->NewNode(common()->Projection(0), add));
+ r = Reduce(graph()->NewNode(common()->Projection(0), add, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(z));
}
@@ -1324,33 +1341,36 @@ TEST_F(MachineOperatorReducerTest, Int32AddWithOverflowWithConstant) {
TEST_F(MachineOperatorReducerTest, Int32SubWithOverflowWithZero) {
+ Node* control = graph()->start();
Node* p0 = Parameter(0);
- Node* add =
- graph()->NewNode(machine()->Int32SubWithOverflow(), p0, Int32Constant(0));
+ Node* add = graph()->NewNode(machine()->Int32SubWithOverflow(), p0,
+ Int32Constant(0), control);
- Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+ Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(0));
- r = Reduce(graph()->NewNode(common()->Projection(0), add));
+ r = Reduce(graph()->NewNode(common()->Projection(0), add, control));
ASSERT_TRUE(r.Changed());
EXPECT_EQ(p0, r.replacement());
}
TEST_F(MachineOperatorReducerTest, Int32SubWithOverflowWithConstant) {
+ Node* control = graph()->start();
TRACED_FOREACH(int32_t, x, kInt32Values) {
TRACED_FOREACH(int32_t, y, kInt32Values) {
int32_t z;
Node* add = graph()->NewNode(machine()->Int32SubWithOverflow(),
- Int32Constant(x), Int32Constant(y));
+ Int32Constant(x), Int32Constant(y), control);
- Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+ Reduction r =
+ Reduce(graph()->NewNode(common()->Projection(1), add, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsInt32Constant(base::bits::SignedSubOverflow32(x, y, &z)));
- r = Reduce(graph()->NewNode(common()->Projection(0), add));
+ r = Reduce(graph()->NewNode(common()->Projection(0), add, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(z));
}
@@ -1359,8 +1379,153 @@ TEST_F(MachineOperatorReducerTest, Int32SubWithOverflowWithConstant) {
// -----------------------------------------------------------------------------
-// Uint32LessThan
+// Int32MulWithOverflow
+
+TEST_F(MachineOperatorReducerTest, Int32MulWithOverflowWithZero) {
+ Node* control = graph()->start();
+ Node* p0 = Parameter(0);
+ {
+ Node* mul = graph()->NewNode(machine()->Int32MulWithOverflow(),
+ Int32Constant(0), p0, control);
+
+ Reduction r =
+ Reduce(graph()->NewNode(common()->Projection(1), mul, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+
+ r = Reduce(graph()->NewNode(common()->Projection(0), mul, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+ }
+ {
+ Node* mul = graph()->NewNode(machine()->Int32MulWithOverflow(), p0,
+ Int32Constant(0), control);
+
+ Reduction r =
+ Reduce(graph()->NewNode(common()->Projection(1), mul, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+
+ r = Reduce(graph()->NewNode(common()->Projection(0), mul, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+ }
+}
+
+TEST_F(MachineOperatorReducerTest, Int32MulWithOverflowWithOne) {
+ Node* control = graph()->start();
+ Node* p0 = Parameter(0);
+ {
+ Node* mul = graph()->NewNode(machine()->Int32MulWithOverflow(),
+ Int32Constant(1), p0, control);
+
+ Reduction r =
+ Reduce(graph()->NewNode(common()->Projection(1), mul, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+ r = Reduce(graph()->NewNode(common()->Projection(0), mul, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(p0, r.replacement());
+ }
+ {
+ Node* mul = graph()->NewNode(machine()->Int32MulWithOverflow(), p0,
+ Int32Constant(1), control);
+
+ Reduction r =
+ Reduce(graph()->NewNode(common()->Projection(1), mul, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+
+ r = Reduce(graph()->NewNode(common()->Projection(0), mul, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(p0, r.replacement());
+ }
+}
+
+TEST_F(MachineOperatorReducerTest, Int32MulWithOverflowWithMinusOne) {
+ Node* control = graph()->start();
+ Node* p0 = Parameter(0);
+
+ {
+ Reduction r = Reduce(graph()->NewNode(machine()->Int32MulWithOverflow(),
+ Int32Constant(-1), p0, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsInt32SubWithOverflow(IsInt32Constant(0), p0));
+ }
+
+ {
+ Reduction r = Reduce(graph()->NewNode(machine()->Int32MulWithOverflow(), p0,
+ Int32Constant(-1), control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsInt32SubWithOverflow(IsInt32Constant(0), p0));
+ }
+}
+
+TEST_F(MachineOperatorReducerTest, Int32MulWithOverflowWithTwo) {
+ Node* control = graph()->start();
+ Node* p0 = Parameter(0);
+
+ {
+ Reduction r = Reduce(graph()->NewNode(machine()->Int32MulWithOverflow(),
+ Int32Constant(2), p0, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32AddWithOverflow(p0, p0));
+ }
+
+ {
+ Reduction r = Reduce(graph()->NewNode(machine()->Int32MulWithOverflow(), p0,
+ Int32Constant(2), control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32AddWithOverflow(p0, p0));
+ }
+}
+
+TEST_F(MachineOperatorReducerTest, Int32MulWithOverflowWithConstant) {
+ Node* control = graph()->start();
+ TRACED_FOREACH(int32_t, x, kInt32Values) {
+ TRACED_FOREACH(int32_t, y, kInt32Values) {
+ int32_t z;
+ Node* mul = graph()->NewNode(machine()->Int32MulWithOverflow(),
+ Int32Constant(x), Int32Constant(y), control);
+
+ Reduction r =
+ Reduce(graph()->NewNode(common()->Projection(1), mul, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsInt32Constant(base::bits::SignedMulOverflow32(x, y, &z)));
+
+ r = Reduce(graph()->NewNode(common()->Projection(0), mul, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(z));
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Int32LessThan
+
+TEST_F(MachineOperatorReducerTest, Int32LessThanWithWord32Or) {
+ Node* const p0 = Parameter(0);
+ TRACED_FOREACH(int32_t, x, kInt32Values) {
+ Node* word32_or =
+ graph()->NewNode(machine()->Word32Or(), p0, Int32Constant(x));
+ Node* less_than = graph()->NewNode(machine()->Int32LessThan(), word32_or,
+ Int32Constant(0));
+ Reduction r = Reduce(less_than);
+ if (x < 0) {
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(1));
+ } else {
+ ASSERT_FALSE(r.Changed());
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Uint32LessThan
TEST_F(MachineOperatorReducerTest, Uint32LessThanWithWord32Sar) {
Node* const p0 = Parameter(0);
@@ -1402,10 +1567,266 @@ TEST_F(MachineOperatorReducerTest, Float64MulWithMinusOne) {
}
}
+TEST_F(MachineOperatorReducerTest, Float64SubMinusZeroMinusX) {
+ Node* const p0 = Parameter(0);
+ {
+ Reduction r = Reduce(
+ graph()->NewNode(machine()->Float64Sub(), Float64Constant(-0.0), p0));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Neg(p0));
+ }
+}
+
+TEST_F(MachineOperatorReducerTest, Float32SubMinusZeroMinusX) {
+ Node* const p0 = Parameter(0);
+ {
+ Reduction r = Reduce(
+ graph()->NewNode(machine()->Float32Sub(), Float32Constant(-0.0), p0));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat32Neg(p0));
+ }
+}
// -----------------------------------------------------------------------------
-// Float64InsertLowWord32
+// Float64Acos
+TEST_F(MachineOperatorReducerTest, Float64AcosWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Acos(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::acos(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Acosh
+
+TEST_F(MachineOperatorReducerTest, Float64AcoshWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Acosh(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::acosh(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Asin
+
+TEST_F(MachineOperatorReducerTest, Float64AsinWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Asin(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::asin(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Asinh
+
+TEST_F(MachineOperatorReducerTest, Float64AsinhWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Asinh(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::asinh(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Atan
+
+TEST_F(MachineOperatorReducerTest, Float64AtanWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Atan(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::atan(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Atanh
+
+TEST_F(MachineOperatorReducerTest, Float64AtanhWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Atanh(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::atanh(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Atan2
+
+TEST_F(MachineOperatorReducerTest, Float64Atan2WithConstant) {
+ TRACED_FOREACH(double, y, kFloat64Values) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Float64Atan2(), Float64Constant(y), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::atan2(y, x))));
+ }
+ }
+}
+
+TEST_F(MachineOperatorReducerTest, Float64Atan2WithNaN) {
+ Node* const p0 = Parameter(0);
+ Node* const nan = Float64Constant(std::numeric_limits<double>::quiet_NaN());
+ {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Atan2(), p0, nan));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(nan, r.replacement());
+ }
+ {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Atan2(), nan, p0));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(nan, r.replacement());
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Cos
+
+TEST_F(MachineOperatorReducerTest, Float64CosWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Cos(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::cos(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Cosh
+
+TEST_F(MachineOperatorReducerTest, Float64CoshWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Cosh(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::cosh(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Exp
+
+TEST_F(MachineOperatorReducerTest, Float64ExpWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Exp(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::exp(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Log
+
+TEST_F(MachineOperatorReducerTest, Float64LogWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Log(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::log(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Log1p
+
+TEST_F(MachineOperatorReducerTest, Float64Log1pWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Log1p(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::log1p(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Sin
+
+TEST_F(MachineOperatorReducerTest, Float64SinWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Sin(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::sin(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Sinh
+
+TEST_F(MachineOperatorReducerTest, Float64SinhWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Sinh(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::sinh(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Tan
+
+TEST_F(MachineOperatorReducerTest, Float64TanWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Tan(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::tan(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Tanh
+
+TEST_F(MachineOperatorReducerTest, Float64TanhWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Tanh(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::tanh(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64InsertLowWord32
TEST_F(MachineOperatorReducerTest, Float64InsertLowWord32WithConstant) {
TRACED_FOREACH(double, x, kFloat64Values) {
diff --git a/deps/v8/test/unittests/compiler/machine-operator-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
index 59eb484dab..400b05828a 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
@@ -208,9 +208,7 @@ const PureOperator kPureOperators[] = {
PURE(Word64Ror, 2, 0, 1), // --
PURE(Word64Equal, 2, 0, 1), // --
PURE(Int32Add, 2, 0, 1), // --
- PURE(Int32AddWithOverflow, 2, 0, 2), // --
PURE(Int32Sub, 2, 0, 1), // --
- PURE(Int32SubWithOverflow, 2, 0, 2), // --
PURE(Int32Mul, 2, 0, 1), // --
PURE(Int32MulHigh, 2, 0, 1), // --
PURE(Int32Div, 2, 1, 1), // --
@@ -249,6 +247,7 @@ const PureOperator kPureOperators[] = {
PURE(Float32Equal, 2, 0, 1), // --
PURE(Float32LessThan, 2, 0, 1), // --
PURE(Float32LessThanOrEqual, 2, 0, 1), // --
+ PURE(Float32Neg, 1, 0, 1), // --
PURE(Float64Abs, 1, 0, 1), // --
PURE(Float64Add, 2, 0, 1), // --
PURE(Float64Sub, 2, 0, 1), // --
@@ -256,6 +255,8 @@ const PureOperator kPureOperators[] = {
PURE(Float64Div, 2, 0, 1), // --
PURE(Float64Mod, 2, 0, 1), // --
PURE(Float64Sqrt, 1, 0, 1), // --
+ PURE(Float64Max, 2, 0, 1), // --
+ PURE(Float64Min, 2, 0, 1), // --
PURE(Float64Equal, 2, 0, 1), // --
PURE(Float64LessThan, 2, 0, 1), // --
PURE(Float64LessThanOrEqual, 2, 0, 1), // --
@@ -264,6 +265,7 @@ const PureOperator kPureOperators[] = {
PURE(Float64ExtractHighWord32, 1, 0, 1), // --
PURE(Float64InsertLowWord32, 2, 0, 1), // --
PURE(Float64InsertHighWord32, 2, 0, 1), // --
+ PURE(Float64Neg, 1, 0, 1), // --
#undef PURE
};
@@ -320,10 +322,6 @@ const OptionalOperatorEntry kOptionalOperators[] = {
&MachineOperatorBuilder::Name, MachineOperatorBuilder::k##Name, #Name, \
value_input_count, control_input_count, value_output_count \
}
- OPTIONAL_ENTRY(Float32Max, 2, 0, 1), // --
- OPTIONAL_ENTRY(Float32Min, 2, 0, 1), // --
- OPTIONAL_ENTRY(Float64Max, 2, 0, 1), // --
- OPTIONAL_ENTRY(Float64Min, 2, 0, 1), // --
OPTIONAL_ENTRY(Float64RoundDown, 1, 0, 1), // --
OPTIONAL_ENTRY(Float64RoundTruncate, 1, 0, 1), // --
OPTIONAL_ENTRY(Float64RoundTiesAway, 1, 0, 1), // --
diff --git a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
index 122c398e20..7b5c667261 100644
--- a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
+++ b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
@@ -1150,40 +1150,6 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
}
-TEST_F(InstructionSelectorTest, Float32Max) {
- StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
- MachineType::Float32());
- Node* const p0 = m.Parameter(0);
- Node* const p1 = m.Parameter(1);
- Node* const n = m.Float32Max(p0, p1);
- m.Return(n);
- Stream s = m.Build();
- // Float32Max is `(b < a) ? a : b`.
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kMipsFloat32Max, s[0]->arch_opcode());
- ASSERT_EQ(2U, s[0]->InputCount());
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
-}
-
-
-TEST_F(InstructionSelectorTest, Float32Min) {
- StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
- MachineType::Float32());
- Node* const p0 = m.Parameter(0);
- Node* const p1 = m.Parameter(1);
- Node* const n = m.Float32Min(p0, p1);
- m.Return(n);
- Stream s = m.Build();
- // Float32Min is `(a < b) ? a : b`.
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kMipsFloat32Min, s[0]->arch_opcode());
- ASSERT_EQ(2U, s[0]->InputCount());
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
-}
-
-
TEST_F(InstructionSelectorTest, Float64Max) {
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
MachineType::Float64());
@@ -1192,7 +1158,6 @@ TEST_F(InstructionSelectorTest, Float64Max) {
Node* const n = m.Float64Max(p0, p1);
m.Return(n);
Stream s = m.Build();
- // Float64Max is `(b < a) ? a : b`.
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kMipsFloat64Max, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
@@ -1209,7 +1174,6 @@ TEST_F(InstructionSelectorTest, Float64Min) {
Node* const n = m.Float64Min(p0, p1);
m.Return(n);
Stream s = m.Build();
- // Float64Min is `(a < b) ? a : b`.
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kMipsFloat64Min, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
@@ -1217,7 +1181,6 @@ TEST_F(InstructionSelectorTest, Float64Min) {
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
index d9cd96f471..c82cb9fe4f 100644
--- a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
@@ -67,22 +67,20 @@ struct Conversion {
// Logical instructions.
// ----------------------------------------------------------------------------
-
const MachInst2 kLogicalInstructions[] = {
- {&RawMachineAssembler::Word32And, "Word32And", kMips64And,
+ {&RawMachineAssembler::Word32And, "Word32And", kMips64And32,
MachineType::Int32()},
{&RawMachineAssembler::Word64And, "Word64And", kMips64And,
MachineType::Int64()},
- {&RawMachineAssembler::Word32Or, "Word32Or", kMips64Or,
+ {&RawMachineAssembler::Word32Or, "Word32Or", kMips64Or32,
MachineType::Int32()},
{&RawMachineAssembler::Word64Or, "Word64Or", kMips64Or,
MachineType::Int64()},
- {&RawMachineAssembler::Word32Xor, "Word32Xor", kMips64Xor,
+ {&RawMachineAssembler::Word32Xor, "Word32Xor", kMips64Xor32,
MachineType::Int32()},
{&RawMachineAssembler::Word64Xor, "Word64Xor", kMips64Xor,
MachineType::Int64()}};
-
// ----------------------------------------------------------------------------
// Shift instructions.
// ----------------------------------------------------------------------------
@@ -542,7 +540,7 @@ TEST_F(InstructionSelectorTest, Word32XorMinusOneWithParameter) {
m.Return(m.Word32Xor(m.Parameter(0), m.Int32Constant(-1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kMips64Nor, s[0]->arch_opcode());
+ EXPECT_EQ(kMips64Nor32, s[0]->arch_opcode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
@@ -551,7 +549,7 @@ TEST_F(InstructionSelectorTest, Word32XorMinusOneWithParameter) {
m.Return(m.Word32Xor(m.Int32Constant(-1), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kMips64Nor, s[0]->arch_opcode());
+ EXPECT_EQ(kMips64Nor32, s[0]->arch_opcode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
@@ -589,7 +587,7 @@ TEST_F(InstructionSelectorTest, Word32XorMinusOneWithWord32Or) {
m.Int32Constant(-1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kMips64Nor, s[0]->arch_opcode());
+ EXPECT_EQ(kMips64Nor32, s[0]->arch_opcode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
@@ -599,7 +597,7 @@ TEST_F(InstructionSelectorTest, Word32XorMinusOneWithWord32Or) {
m.Word32Or(m.Parameter(0), m.Parameter(0))));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kMips64Nor, s[0]->arch_opcode());
+ EXPECT_EQ(kMips64Nor32, s[0]->arch_opcode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
@@ -988,6 +986,89 @@ TEST_F(InstructionSelectorTest, CombineShiftsWithDivMod) {
}
}
+TEST_F(InstructionSelectorTest, ChangeInt32ToInt64AfterLoad) {
+ // For each case, test that the conversion is merged into the load
+ // operation.
+ // ChangeInt32ToInt64(Load_Uint8) -> Lbu
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Uint8(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kMips64Lbu, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[1]->addressing_mode());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ }
+ // ChangeInt32ToInt64(Load_Int8) -> Lb
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Int8(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kMips64Lb, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[1]->addressing_mode());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ }
+ // ChangeInt32ToInt64(Load_Uint16) -> Lhu
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Uint16(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kMips64Lhu, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[1]->addressing_mode());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ }
+ // ChangeInt32ToInt64(Load_Int16) -> Lh
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Int16(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kMips64Lh, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[1]->addressing_mode());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ }
+ // ChangeInt32ToInt64(Load_Uint32) -> Lw
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Uint32(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kMips64Lw, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[1]->addressing_mode());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ }
+ // ChangeInt32ToInt64(Load_Int32) -> Lw
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Int32(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kMips64Lw, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[1]->addressing_mode());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ }
+}
+
// ----------------------------------------------------------------------------
// Loads and stores.
@@ -1411,40 +1492,6 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
}
-TEST_F(InstructionSelectorTest, Float32Max) {
- StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
- MachineType::Float32());
- Node* const p0 = m.Parameter(0);
- Node* const p1 = m.Parameter(1);
- Node* const n = m.Float32Max(p0, p1);
- m.Return(n);
- Stream s = m.Build();
- // Float32Max is `(b < a) ? a : b`.
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kMips64Float32Max, s[0]->arch_opcode());
- ASSERT_EQ(2U, s[0]->InputCount());
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
-}
-
-
-TEST_F(InstructionSelectorTest, Float32Min) {
- StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
- MachineType::Float32());
- Node* const p0 = m.Parameter(0);
- Node* const p1 = m.Parameter(1);
- Node* const n = m.Float32Min(p0, p1);
- m.Return(n);
- Stream s = m.Build();
- // Float32Min is `(a < b) ? a : b`.
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kMips64Float32Min, s[0]->arch_opcode());
- ASSERT_EQ(2U, s[0]->InputCount());
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
-}
-
-
TEST_F(InstructionSelectorTest, Float64Max) {
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
MachineType::Float64());
@@ -1453,7 +1500,6 @@ TEST_F(InstructionSelectorTest, Float64Max) {
Node* const n = m.Float64Max(p0, p1);
m.Return(n);
Stream s = m.Build();
- // Float64Max is `(b < a) ? a : b`.
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kMips64Float64Max, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
@@ -1470,7 +1516,6 @@ TEST_F(InstructionSelectorTest, Float64Min) {
Node* const n = m.Float64Min(p0, p1);
m.Return(n);
Stream s = m.Build();
- // Float64Min is `(a < b) ? a : b`.
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kMips64Float64Min, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
diff --git a/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc b/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc
index 5ccd0c6727..4c69384667 100644
--- a/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc
@@ -106,11 +106,9 @@ TEST_F(MoveOptimizerTest, RemovesRedundant) {
TEST_F(MoveOptimizerTest, RemovesRedundantExplicit) {
int first_reg_index =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->GetAllocatableGeneralCode(0);
+ RegisterConfiguration::Turbofan()->GetAllocatableGeneralCode(0);
int second_reg_index =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->GetAllocatableGeneralCode(1);
+ RegisterConfiguration::Turbofan()->GetAllocatableGeneralCode(1);
StartBlock();
auto first_instr = EmitNop();
diff --git a/deps/v8/test/unittests/compiler/node-matchers-unittest.cc b/deps/v8/test/unittests/compiler/node-matchers-unittest.cc
index f0cc407445..45d7427494 100644
--- a/deps/v8/test/unittests/compiler/node-matchers-unittest.cc
+++ b/deps/v8/test/unittests/compiler/node-matchers-unittest.cc
@@ -30,13 +30,15 @@ class NodeMatcherTest : public GraphTest {
namespace {
template <class Matcher>
-void CheckBaseWithIndexAndDisplacement(Matcher* matcher, Node* index, int scale,
- Node* base, Node* displacement) {
+void CheckBaseWithIndexAndDisplacement(
+ Matcher* matcher, Node* index, int scale, Node* base, Node* displacement,
+ DisplacementMode displacement_mode = kPositiveDisplacement) {
EXPECT_TRUE(matcher->matches());
EXPECT_EQ(index, matcher->index());
EXPECT_EQ(scale, matcher->scale());
EXPECT_EQ(base, matcher->base());
EXPECT_EQ(displacement, matcher->displacement());
+ EXPECT_EQ(displacement_mode, matcher->displacement_mode());
}
} // namespace
@@ -90,6 +92,9 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
const Operator* a_op = machine()->Int32Add();
USE(a_op);
+ const Operator* sub_op = machine()->Int32Sub();
+ USE(sub_op);
+
const Operator* m_op = machine()->Int32Mul();
Node* m1 = graph()->NewNode(m_op, p1, d1);
Node* m2 = graph()->NewNode(m_op, p1, d2);
@@ -354,7 +359,25 @@ TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
graph()->NewNode(a_op, s3, graph()->NewNode(a_op, b0, d15)));
CheckBaseWithIndexAndDisplacement(&match43, p1, 3, b0, d15);
- // Check that scales that require using the base address work dorrectly.
+ // S3 + (B0 - D15) -> [p1, 2, b0, d15, true]
+ s3 = graph()->NewNode(s_op, p1, d3);
+ BaseWithIndexAndDisplacement32Matcher match44(
+ graph()->NewNode(a_op, s3, graph()->NewNode(sub_op, b0, d15)));
+ CheckBaseWithIndexAndDisplacement(&match44, p1, 3, b0, d15,
+ kNegativeDisplacement);
+
+ // B0 + (B1 - D15) -> [p1, 2, b0, d15, true]
+ BaseWithIndexAndDisplacement32Matcher match45(
+ graph()->NewNode(a_op, b0, graph()->NewNode(sub_op, b1, d15)));
+ CheckBaseWithIndexAndDisplacement(&match45, b1, 0, b0, d15,
+ kNegativeDisplacement);
+
+ // (B0 - D15) + S3 -> [p1, 2, b0, d15, true]
+ s3 = graph()->NewNode(s_op, p1, d3);
+ BaseWithIndexAndDisplacement32Matcher match46(
+ graph()->NewNode(a_op, graph()->NewNode(sub_op, b0, d15), s3));
+ CheckBaseWithIndexAndDisplacement(&match46, p1, 3, b0, d15,
+ kNegativeDisplacement);
}
@@ -409,6 +432,9 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
const Operator* a_op = machine()->Int64Add();
USE(a_op);
+ const Operator* sub_op = machine()->Int64Sub();
+ USE(sub_op);
+
const Operator* m_op = machine()->Int64Mul();
Node* m1 = graph()->NewNode(m_op, p1, d1);
Node* m2 = graph()->NewNode(m_op, p1, d2);
@@ -726,8 +752,27 @@ TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
BaseWithIndexAndDisplacement64Matcher match50(
graph()->NewNode(a_op, m3, temp));
CheckBaseWithIndexAndDisplacement(&match50, m3, 0, b0, d15);
-}
+ // S3 + (B0 - D15) -> [p1, 2, b0, d15, true]
+ s3 = graph()->NewNode(s_op, p1, d3);
+ BaseWithIndexAndDisplacement64Matcher match51(
+ graph()->NewNode(a_op, s3, graph()->NewNode(sub_op, b0, d15)));
+ CheckBaseWithIndexAndDisplacement(&match51, p1, 3, b0, d15,
+ kNegativeDisplacement);
+
+ // B0 + (B1 - D15) -> [p1, 2, b0, d15, true]
+ BaseWithIndexAndDisplacement64Matcher match52(
+ graph()->NewNode(a_op, b0, graph()->NewNode(sub_op, b1, d15)));
+ CheckBaseWithIndexAndDisplacement(&match52, b1, 0, b0, d15,
+ kNegativeDisplacement);
+
+ // (B0 - D15) + S3 -> [p1, 2, b0, d15, true]
+ s3 = graph()->NewNode(s_op, p1, d3);
+ BaseWithIndexAndDisplacement64Matcher match53(
+ graph()->NewNode(a_op, graph()->NewNode(sub_op, b0, d15), s3));
+ CheckBaseWithIndexAndDisplacement(&match53, p1, 3, b0, d15,
+ kNegativeDisplacement);
+}
TEST_F(NodeMatcherTest, BranchMatcher_match) {
Node* zero = graph()->NewNode(common()->Int32Constant(0));
diff --git a/deps/v8/test/unittests/compiler/node-properties-unittest.cc b/deps/v8/test/unittests/compiler/node-properties-unittest.cc
index 463948d43f..a18f2032f5 100644
--- a/deps/v8/test/unittests/compiler/node-properties-unittest.cc
+++ b/deps/v8/test/unittests/compiler/node-properties-unittest.cc
@@ -37,8 +37,6 @@ const Operator kMockOperator(IrOpcode::kDead, Operator::kNoProperties,
const Operator kMockCallOperator(IrOpcode::kCall, Operator::kNoProperties,
"MockCallOperator", 0, 0, 0, 0, 0, 2);
-const IfExceptionHint kNoHint = IfExceptionHint::kLocallyCaught;
-
} // namespace
@@ -49,7 +47,7 @@ TEST_F(NodePropertiesTest, ReplaceUses) {
Node* use_value = NewMockNode(common.Return(), node);
Node* use_effect = NewMockNode(common.EffectPhi(1), node);
Node* use_success = NewMockNode(common.IfSuccess(), node);
- Node* use_exception = NewMockNode(common.IfException(kNoHint), effect, node);
+ Node* use_exception = NewMockNode(common.IfException(), effect, node);
Node* r_value = NewMockNode(&kMockOperator);
Node* r_effect = NewMockNode(&kMockOperator);
Node* r_success = NewMockNode(&kMockOperator);
@@ -99,7 +97,7 @@ TEST_F(NodePropertiesTest, CollectControlProjections_Call) {
Node* result[2];
CommonOperatorBuilder common(zone());
Node* call = NewMockNode(&kMockCallOperator);
- Node* if_ex = NewMockNode(common.IfException(kNoHint), call, call);
+ Node* if_ex = NewMockNode(common.IfException(), call, call);
Node* if_ok = NewMockNode(common.IfSuccess(), call);
NodeProperties::CollectControlProjections(call, result, arraysize(result));
EXPECT_EQ(if_ok, result[0]);
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index 6e5d39f68d..5620b8bec1 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -612,49 +612,6 @@ class IsEffectPhiMatcher final : public NodeMatcher {
};
-class IsEffectSetMatcher final : public NodeMatcher {
- public:
- IsEffectSetMatcher(const Matcher<Node*>& effect0_matcher,
- const Matcher<Node*>& effect1_matcher)
- : NodeMatcher(IrOpcode::kEffectSet),
- effect0_matcher_(effect0_matcher),
- effect1_matcher_(effect1_matcher) {}
-
- void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
- *os << "), effect0 (";
- effect0_matcher_.DescribeTo(os);
- *os << ") and effect1 (";
- effect1_matcher_.DescribeTo(os);
- *os << ")";
- }
-
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- if (!NodeMatcher::MatchAndExplain(node, listener)) return false;
-
- Node* effect0 = NodeProperties::GetEffectInput(node, 0);
- Node* effect1 = NodeProperties::GetEffectInput(node, 1);
-
- {
- // Try matching in the reverse order first.
- StringMatchResultListener value_listener;
- if (effect0_matcher_.MatchAndExplain(effect1, &value_listener) &&
- effect1_matcher_.MatchAndExplain(effect0, &value_listener)) {
- return true;
- }
- }
-
- return PrintMatchAndExplain(effect0, "effect0", effect0_matcher_,
- listener) &&
- PrintMatchAndExplain(effect1, "effect1", effect1_matcher_, listener);
- }
-
- private:
- const Matcher<Node*> effect0_matcher_;
- const Matcher<Node*> effect1_matcher_;
-};
-
-
class IsProjectionMatcher final : public NodeMatcher {
public:
IsProjectionMatcher(const Matcher<size_t>& index_matcher,
@@ -843,6 +800,44 @@ class IsReferenceEqualMatcher final : public NodeMatcher {
const Matcher<Node*> rhs_matcher_;
};
+class IsSpeculativeBinopMatcher final : public NodeMatcher {
+ public:
+ IsSpeculativeBinopMatcher(IrOpcode::Value opcode,
+ const Matcher<NumberOperationHint>& hint_matcher,
+ const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(opcode),
+ hint_matcher_(hint_matcher),
+ lhs_matcher_(lhs_matcher),
+ rhs_matcher_(rhs_matcher),
+ effect_matcher_(effect_matcher),
+ control_matcher_(control_matcher) {}
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ // TODO(bmeurer): The type parameter is currently ignored.
+ PrintMatchAndExplain(OpParameter<NumberOperationHint>(node->op()),
+ "hints", hint_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "lhs",
+ lhs_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), "rhs",
+ rhs_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+ effect_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<NumberOperationHint> hint_matcher_;
+ const Matcher<Type*> type_matcher_;
+ const Matcher<Node*> lhs_matcher_;
+ const Matcher<Node*> rhs_matcher_;
+ const Matcher<Node*> effect_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
class IsAllocateMatcher final : public NodeMatcher {
public:
@@ -1203,132 +1198,140 @@ class IsStoreElementMatcher final : public NodeMatcher {
const Matcher<Node*> control_matcher_;
};
-
-class IsLoadMatcher final : public NodeMatcher {
- public:
- IsLoadMatcher(const Matcher<LoadRepresentation>& rep_matcher,
- const Matcher<Node*>& base_matcher,
- const Matcher<Node*>& index_matcher,
- const Matcher<Node*>& effect_matcher,
- const Matcher<Node*>& control_matcher)
- : NodeMatcher(IrOpcode::kLoad),
- rep_matcher_(rep_matcher),
- base_matcher_(base_matcher),
- index_matcher_(index_matcher),
- effect_matcher_(effect_matcher),
- control_matcher_(control_matcher) {}
-
- void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
- *os << " whose rep (";
- rep_matcher_.DescribeTo(os);
- *os << "), base (";
- base_matcher_.DescribeTo(os);
- *os << "), index (";
- index_matcher_.DescribeTo(os);
- *os << "), effect (";
- effect_matcher_.DescribeTo(os);
- *os << ") and control (";
- control_matcher_.DescribeTo(os);
- *os << ")";
- }
-
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- Node* effect_node = nullptr;
- Node* control_node = nullptr;
- if (NodeProperties::FirstEffectIndex(node) < node->InputCount()) {
- effect_node = NodeProperties::GetEffectInput(node);
- }
- if (NodeProperties::FirstControlIndex(node) < node->InputCount()) {
- control_node = NodeProperties::GetControlInput(node);
- }
- return (NodeMatcher::MatchAndExplain(node, listener) &&
- PrintMatchAndExplain(OpParameter<LoadRepresentation>(node), "rep",
- rep_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
- base_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
- "index", index_matcher_, listener) &&
- PrintMatchAndExplain(effect_node, "effect", effect_matcher_,
- listener) &&
- PrintMatchAndExplain(control_node, "control", control_matcher_,
- listener));
- }
-
- private:
- const Matcher<LoadRepresentation> rep_matcher_;
- const Matcher<Node*> base_matcher_;
- const Matcher<Node*> index_matcher_;
- const Matcher<Node*> effect_matcher_;
- const Matcher<Node*> control_matcher_;
-};
-
-
-class IsStoreMatcher final : public NodeMatcher {
- public:
- IsStoreMatcher(const Matcher<StoreRepresentation>& rep_matcher,
- const Matcher<Node*>& base_matcher,
- const Matcher<Node*>& index_matcher,
- const Matcher<Node*>& value_matcher,
- const Matcher<Node*>& effect_matcher,
- const Matcher<Node*>& control_matcher)
- : NodeMatcher(IrOpcode::kStore),
- rep_matcher_(rep_matcher),
- base_matcher_(base_matcher),
- index_matcher_(index_matcher),
- value_matcher_(value_matcher),
- effect_matcher_(effect_matcher),
- control_matcher_(control_matcher) {}
-
- void DescribeTo(std::ostream* os) const final {
- NodeMatcher::DescribeTo(os);
- *os << " whose rep (";
- rep_matcher_.DescribeTo(os);
- *os << "), base (";
- base_matcher_.DescribeTo(os);
- *os << "), index (";
- index_matcher_.DescribeTo(os);
- *os << "), value (";
- value_matcher_.DescribeTo(os);
- *os << "), effect (";
- effect_matcher_.DescribeTo(os);
- *os << ") and control (";
- control_matcher_.DescribeTo(os);
- *os << ")";
- }
-
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- Node* effect_node = nullptr;
- Node* control_node = nullptr;
- if (NodeProperties::FirstEffectIndex(node) < node->InputCount()) {
- effect_node = NodeProperties::GetEffectInput(node);
- }
- if (NodeProperties::FirstControlIndex(node) < node->InputCount()) {
- control_node = NodeProperties::GetControlInput(node);
- }
- return (NodeMatcher::MatchAndExplain(node, listener) &&
- PrintMatchAndExplain(OpParameter<StoreRepresentation>(node), "rep",
- rep_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
- base_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
- "index", index_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
- "value", value_matcher_, listener) &&
- PrintMatchAndExplain(effect_node, "effect", effect_matcher_,
- listener) &&
- PrintMatchAndExplain(control_node, "control", control_matcher_,
- listener));
- }
-
- private:
- const Matcher<StoreRepresentation> rep_matcher_;
- const Matcher<Node*> base_matcher_;
- const Matcher<Node*> index_matcher_;
- const Matcher<Node*> value_matcher_;
- const Matcher<Node*> effect_matcher_;
- const Matcher<Node*> control_matcher_;
-};
+#define LOAD_MATCHER(kLoad) \
+ class Is##kLoad##Matcher final : public NodeMatcher { \
+ public: \
+ Is##kLoad##Matcher(const Matcher<kLoad##Representation>& rep_matcher, \
+ const Matcher<Node*>& base_matcher, \
+ const Matcher<Node*>& index_matcher, \
+ const Matcher<Node*>& effect_matcher, \
+ const Matcher<Node*>& control_matcher) \
+ : NodeMatcher(IrOpcode::k##kLoad), \
+ rep_matcher_(rep_matcher), \
+ base_matcher_(base_matcher), \
+ index_matcher_(index_matcher), \
+ effect_matcher_(effect_matcher), \
+ control_matcher_(control_matcher) {} \
+ \
+ void DescribeTo(std::ostream* os) const final { \
+ NodeMatcher::DescribeTo(os); \
+ *os << " whose rep ("; \
+ rep_matcher_.DescribeTo(os); \
+ *os << "), base ("; \
+ base_matcher_.DescribeTo(os); \
+ *os << "), index ("; \
+ index_matcher_.DescribeTo(os); \
+ *os << "), effect ("; \
+ effect_matcher_.DescribeTo(os); \
+ *os << ") and control ("; \
+ control_matcher_.DescribeTo(os); \
+ *os << ")"; \
+ } \
+ \
+ bool MatchAndExplain(Node* node, \
+ MatchResultListener* listener) const final { \
+ Node* effect_node = nullptr; \
+ Node* control_node = nullptr; \
+ if (NodeProperties::FirstEffectIndex(node) < node->InputCount()) { \
+ effect_node = NodeProperties::GetEffectInput(node); \
+ } \
+ if (NodeProperties::FirstControlIndex(node) < node->InputCount()) { \
+ control_node = NodeProperties::GetControlInput(node); \
+ } \
+ return (NodeMatcher::MatchAndExplain(node, listener) && \
+ PrintMatchAndExplain(OpParameter<kLoad##Representation>(node), \
+ "rep", rep_matcher_, listener) && \
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), \
+ "base", base_matcher_, listener) && \
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), \
+ "index", index_matcher_, listener) && \
+ PrintMatchAndExplain(effect_node, "effect", effect_matcher_, \
+ listener) && \
+ PrintMatchAndExplain(control_node, "control", control_matcher_, \
+ listener)); \
+ } \
+ \
+ private: \
+ const Matcher<kLoad##Representation> rep_matcher_; \
+ const Matcher<Node*> base_matcher_; \
+ const Matcher<Node*> index_matcher_; \
+ const Matcher<Node*> effect_matcher_; \
+ const Matcher<Node*> control_matcher_; \
+ };
+
+LOAD_MATCHER(Load)
+LOAD_MATCHER(UnalignedLoad)
+
+#define STORE_MATCHER(kStore) \
+ class Is##kStore##Matcher final : public NodeMatcher { \
+ public: \
+ Is##kStore##Matcher(const Matcher<kStore##Representation>& rep_matcher, \
+ const Matcher<Node*>& base_matcher, \
+ const Matcher<Node*>& index_matcher, \
+ const Matcher<Node*>& value_matcher, \
+ const Matcher<Node*>& effect_matcher, \
+ const Matcher<Node*>& control_matcher) \
+ : NodeMatcher(IrOpcode::k##kStore), \
+ rep_matcher_(rep_matcher), \
+ base_matcher_(base_matcher), \
+ index_matcher_(index_matcher), \
+ value_matcher_(value_matcher), \
+ effect_matcher_(effect_matcher), \
+ control_matcher_(control_matcher) {} \
+ \
+ void DescribeTo(std::ostream* os) const final { \
+ NodeMatcher::DescribeTo(os); \
+ *os << " whose rep ("; \
+ rep_matcher_.DescribeTo(os); \
+ *os << "), base ("; \
+ base_matcher_.DescribeTo(os); \
+ *os << "), index ("; \
+ index_matcher_.DescribeTo(os); \
+ *os << "), value ("; \
+ value_matcher_.DescribeTo(os); \
+ *os << "), effect ("; \
+ effect_matcher_.DescribeTo(os); \
+ *os << ") and control ("; \
+ control_matcher_.DescribeTo(os); \
+ *os << ")"; \
+ } \
+ \
+ bool MatchAndExplain(Node* node, \
+ MatchResultListener* listener) const final { \
+ Node* effect_node = nullptr; \
+ Node* control_node = nullptr; \
+ if (NodeProperties::FirstEffectIndex(node) < node->InputCount()) { \
+ effect_node = NodeProperties::GetEffectInput(node); \
+ } \
+ if (NodeProperties::FirstControlIndex(node) < node->InputCount()) { \
+ control_node = NodeProperties::GetControlInput(node); \
+ } \
+ return (NodeMatcher::MatchAndExplain(node, listener) && \
+ PrintMatchAndExplain(OpParameter<kStore##Representation>(node), \
+ "rep", rep_matcher_, listener) && \
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), \
+ "base", base_matcher_, listener) && \
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), \
+ "index", index_matcher_, listener) && \
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2), \
+ "value", value_matcher_, listener) && \
+ PrintMatchAndExplain(effect_node, "effect", effect_matcher_, \
+ listener) && \
+ PrintMatchAndExplain(control_node, "control", control_matcher_, \
+ listener)); \
+ } \
+ \
+ private: \
+ const Matcher<kStore##Representation> rep_matcher_; \
+ const Matcher<Node*> base_matcher_; \
+ const Matcher<Node*> index_matcher_; \
+ const Matcher<Node*> value_matcher_; \
+ const Matcher<Node*> effect_matcher_; \
+ const Matcher<Node*> control_matcher_; \
+ };
+
+STORE_MATCHER(Store)
+STORE_MATCHER(UnalignedStore)
class IsStackSlotMatcher final : public NodeMatcher {
public:
@@ -1352,32 +1355,6 @@ class IsStackSlotMatcher final : public NodeMatcher {
const Matcher<MachineRepresentation> rep_matcher_;
};
-class IsGuardMatcher final : public NodeMatcher {
- public:
- IsGuardMatcher(const Matcher<Type*>& type_matcher,
- const Matcher<Node*>& value_matcher,
- const Matcher<Node*>& control_matcher)
- : NodeMatcher(IrOpcode::kGuard),
- type_matcher_(type_matcher),
- value_matcher_(value_matcher),
- control_matcher_(control_matcher) {}
-
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
- PrintMatchAndExplain(OpParameter<Type*>(node->op()), "type",
- type_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
- "value", value_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetControlInput(node, 0),
- "control", control_matcher_, listener));
- }
-
- private:
- const Matcher<Type*> type_matcher_;
- const Matcher<Node*> value_matcher_;
- const Matcher<Node*> control_matcher_;
-};
-
class IsToNumberMatcher final : public NodeMatcher {
public:
IsToNumberMatcher(const Matcher<Node*>& base_matcher,
@@ -1818,12 +1795,6 @@ Matcher<Node*> IsEffectPhi(const Matcher<Node*>& effect0_matcher,
}
-Matcher<Node*> IsEffectSet(const Matcher<Node*>& effect0_matcher,
- const Matcher<Node*>& effect1_matcher) {
- return MakeMatcher(new IsEffectSetMatcher(effect0_matcher, effect1_matcher));
-}
-
-
Matcher<Node*> IsProjection(const Matcher<size_t>& index_matcher,
const Matcher<Node*>& base_matcher) {
return MakeMatcher(new IsProjectionMatcher(index_matcher, base_matcher));
@@ -2064,13 +2035,6 @@ Matcher<Node*> IsTailCall(
effect_matcher, control_matcher));
}
-Matcher<Node*> IsGuard(const Matcher<Type*>& type_matcher,
- const Matcher<Node*>& value_matcher,
- const Matcher<Node*>& control_matcher) {
- return MakeMatcher(
- new IsGuardMatcher(type_matcher, value_matcher, control_matcher));
-}
-
Matcher<Node*> IsReferenceEqual(const Matcher<Type*>& type_matcher,
const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
@@ -2078,6 +2042,18 @@ Matcher<Node*> IsReferenceEqual(const Matcher<Type*>& type_matcher,
new IsReferenceEqualMatcher(type_matcher, lhs_matcher, rhs_matcher));
}
+#define DEFINE_SPECULATIVE_BINOP_MATCHER(opcode) \
+ Matcher<Node*> Is##opcode(const Matcher<NumberOperationHint>& hint_matcher, \
+ const Matcher<Node*>& lhs_matcher, \
+ const Matcher<Node*>& rhs_matcher, \
+ const Matcher<Node*>& effect_matcher, \
+ const Matcher<Node*>& control_matcher) { \
+ return MakeMatcher(new IsSpeculativeBinopMatcher( \
+ IrOpcode::k##opcode, hint_matcher, lhs_matcher, rhs_matcher, \
+ effect_matcher, control_matcher)); \
+ }
+SPECULATIVE_BINOPS(DEFINE_SPECULATIVE_BINOP_MATCHER);
+#undef DEFINE_SPECULATIVE_BINOP_MATCHER
Matcher<Node*> IsAllocate(const Matcher<Node*>& size_matcher,
const Matcher<Node*>& effect_matcher,
@@ -2154,7 +2130,6 @@ Matcher<Node*> IsStoreElement(const Matcher<ElementAccess>& access_matcher,
effect_matcher, control_matcher));
}
-
Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
@@ -2164,6 +2139,15 @@ Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
effect_matcher, control_matcher));
}
+Matcher<Node*> IsUnalignedLoad(
+ const Matcher<UnalignedLoadRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsUnalignedLoadMatcher(rep_matcher, base_matcher,
+ index_matcher, effect_matcher,
+ control_matcher));
+}
Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
@@ -2176,6 +2160,16 @@ Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher,
effect_matcher, control_matcher));
}
+Matcher<Node*> IsUnalignedStore(
+ const Matcher<UnalignedStoreRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& value_matcher, const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsUnalignedStoreMatcher(
+ rep_matcher, base_matcher, index_matcher, value_matcher, effect_matcher,
+ control_matcher));
+}
+
Matcher<Node*> IsStackSlot(const Matcher<MachineRepresentation>& rep_matcher) {
return MakeMatcher(new IsStackSlotMatcher(rep_matcher));
}
@@ -2204,6 +2198,10 @@ Matcher<Node*> IsLoadFramePointer() {
return MakeMatcher(new NodeMatcher(IrOpcode::kLoadFramePointer));
}
+Matcher<Node*> IsLoadParentFramePointer() {
+ return MakeMatcher(new NodeMatcher(IrOpcode::kLoadParentFramePointer));
+}
+
#define IS_QUADOP_MATCHER(Name) \
Matcher<Node*> Is##Name( \
const Matcher<Node*>& a_matcher, const Matcher<Node*>& b_matcher, \
@@ -2242,6 +2240,10 @@ IS_BINOP_MATCHER(NumberShiftLeft)
IS_BINOP_MATCHER(NumberShiftRight)
IS_BINOP_MATCHER(NumberShiftRightLogical)
IS_BINOP_MATCHER(NumberImul)
+IS_BINOP_MATCHER(NumberAtan2)
+IS_BINOP_MATCHER(NumberMax)
+IS_BINOP_MATCHER(NumberMin)
+IS_BINOP_MATCHER(NumberPow)
IS_BINOP_MATCHER(Word32And)
IS_BINOP_MATCHER(Word32Or)
IS_BINOP_MATCHER(Word32Xor)
@@ -2256,6 +2258,7 @@ IS_BINOP_MATCHER(Word64Sar)
IS_BINOP_MATCHER(Word64Shl)
IS_BINOP_MATCHER(Word64Equal)
IS_BINOP_MATCHER(Int32AddWithOverflow)
+IS_BINOP_MATCHER(Int32SubWithOverflow)
IS_BINOP_MATCHER(Int32Add)
IS_BINOP_MATCHER(Int32Sub)
IS_BINOP_MATCHER(Int32Mul)
@@ -2266,8 +2269,6 @@ IS_BINOP_MATCHER(Uint32LessThanOrEqual)
IS_BINOP_MATCHER(Int64Add)
IS_BINOP_MATCHER(Int64Sub)
IS_BINOP_MATCHER(JSAdd)
-IS_BINOP_MATCHER(Float32Max)
-IS_BINOP_MATCHER(Float32Min)
IS_BINOP_MATCHER(Float32Equal)
IS_BINOP_MATCHER(Float32LessThan)
IS_BINOP_MATCHER(Float32LessThanOrEqual)
@@ -2284,6 +2285,7 @@ IS_BINOP_MATCHER(Float64InsertHighWord32)
return MakeMatcher(new IsUnopMatcher(IrOpcode::k##Name, input_matcher)); \
}
IS_UNOP_MATCHER(BooleanNot)
+IS_UNOP_MATCHER(TruncateFloat64ToWord32)
IS_UNOP_MATCHER(ChangeFloat64ToInt32)
IS_UNOP_MATCHER(ChangeFloat64ToUint32)
IS_UNOP_MATCHER(ChangeInt32ToFloat64)
@@ -2291,23 +2293,55 @@ IS_UNOP_MATCHER(ChangeInt32ToInt64)
IS_UNOP_MATCHER(ChangeUint32ToFloat64)
IS_UNOP_MATCHER(ChangeUint32ToUint64)
IS_UNOP_MATCHER(TruncateFloat64ToFloat32)
-IS_UNOP_MATCHER(TruncateFloat64ToInt32)
IS_UNOP_MATCHER(TruncateInt64ToInt32)
IS_UNOP_MATCHER(Float32Abs)
+IS_UNOP_MATCHER(Float32Neg)
IS_UNOP_MATCHER(Float64Abs)
+IS_UNOP_MATCHER(Float64Neg)
IS_UNOP_MATCHER(Float64Sqrt)
IS_UNOP_MATCHER(Float64RoundDown)
IS_UNOP_MATCHER(Float64RoundTruncate)
IS_UNOP_MATCHER(Float64RoundTiesAway)
IS_UNOP_MATCHER(Float64ExtractLowWord32)
IS_UNOP_MATCHER(Float64ExtractHighWord32)
+IS_UNOP_MATCHER(NumberAbs)
+IS_UNOP_MATCHER(NumberAcos)
+IS_UNOP_MATCHER(NumberAcosh)
+IS_UNOP_MATCHER(NumberAsin)
+IS_UNOP_MATCHER(NumberAsinh)
+IS_UNOP_MATCHER(NumberAtan)
+IS_UNOP_MATCHER(NumberAtanh)
+IS_UNOP_MATCHER(NumberCeil)
+IS_UNOP_MATCHER(NumberClz32)
+IS_UNOP_MATCHER(NumberCbrt)
+IS_UNOP_MATCHER(NumberCos)
+IS_UNOP_MATCHER(NumberCosh)
+IS_UNOP_MATCHER(NumberExp)
+IS_UNOP_MATCHER(NumberExpm1)
+IS_UNOP_MATCHER(NumberFloor)
+IS_UNOP_MATCHER(NumberFround)
+IS_UNOP_MATCHER(NumberLog)
+IS_UNOP_MATCHER(NumberLog1p)
+IS_UNOP_MATCHER(NumberLog10)
+IS_UNOP_MATCHER(NumberLog2)
+IS_UNOP_MATCHER(NumberRound)
+IS_UNOP_MATCHER(NumberSign)
+IS_UNOP_MATCHER(NumberSin)
+IS_UNOP_MATCHER(NumberSinh)
+IS_UNOP_MATCHER(NumberSqrt)
+IS_UNOP_MATCHER(NumberTan)
+IS_UNOP_MATCHER(NumberTanh)
+IS_UNOP_MATCHER(NumberTrunc)
IS_UNOP_MATCHER(NumberToInt32)
IS_UNOP_MATCHER(NumberToUint32)
+IS_UNOP_MATCHER(PlainPrimitiveToNumber)
IS_UNOP_MATCHER(ObjectIsReceiver)
IS_UNOP_MATCHER(ObjectIsSmi)
+IS_UNOP_MATCHER(StringFromCharCode)
IS_UNOP_MATCHER(Word32Clz)
IS_UNOP_MATCHER(Word32Ctz)
IS_UNOP_MATCHER(Word32Popcnt)
+IS_UNOP_MATCHER(Word32ReverseBytes)
#undef IS_UNOP_MATCHER
} // namespace compiler
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index dd036c9939..2a24803380 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -6,6 +6,7 @@
#define V8_UNITTESTS_COMPILER_NODE_TEST_UTILS_H_
#include "src/compiler/machine-operator.h"
+#include "src/compiler/simplified-operator.h"
#include "src/machine-type.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -33,6 +34,15 @@ class Node;
using ::testing::Matcher;
+#define SPECULATIVE_BINOPS(V) \
+ V(SpeculativeNumberAdd) \
+ V(SpeculativeNumberSubtract) \
+ V(SpeculativeNumberShiftLeft) \
+ V(SpeculativeNumberShiftRight) \
+ V(SpeculativeNumberShiftRightLogical) \
+ V(SpeculativeNumberBitwiseAnd) \
+ V(SpeculativeNumberBitwiseOr) \
+ V(SpeculativeNumberBitwiseXor)
Matcher<Node*> IsDead();
Matcher<Node*> IsEnd(const Matcher<Node*>& control0_matcher);
@@ -97,8 +107,6 @@ Matcher<Node*> IsPhi(const Matcher<MachineRepresentation>& type_matcher,
Matcher<Node*> IsEffectPhi(const Matcher<Node*>& effect0_matcher,
const Matcher<Node*>& effect1_matcher,
const Matcher<Node*>& merge_matcher);
-Matcher<Node*> IsEffectSet(const Matcher<Node*>& effect0_matcher,
- const Matcher<Node*>& effect1_matcher);
Matcher<Node*> IsProjection(const Matcher<size_t>& index_matcher,
const Matcher<Node*>& base_matcher);
Matcher<Node*> IsCall(const Matcher<const CallDescriptor*>& descriptor_matcher,
@@ -201,6 +209,18 @@ Matcher<Node*> IsNumberEqual(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsNumberLessThan(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsNumberAdd(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+
+#define DECLARE_SPECULATIVE_BINOP_MATCHER(opcode) \
+ Matcher<Node*> Is##opcode(const Matcher<NumberOperationHint>& hint_matcher, \
+ const Matcher<Node*>& lhs_matcher, \
+ const Matcher<Node*>& rhs_matcher, \
+ const Matcher<Node*>& effect_matcher, \
+ const Matcher<Node*>& control_matcher);
+SPECULATIVE_BINOPS(DECLARE_SPECULATIVE_BINOP_MATCHER);
+#undef DECLARE_SPECULATIVE_BINOP_MATCHER
+
Matcher<Node*> IsNumberSubtract(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsNumberMultiply(const Matcher<Node*>& lhs_matcher,
@@ -213,6 +233,43 @@ Matcher<Node*> IsNumberShiftRightLogical(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsNumberImul(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsNumberAbs(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberAcos(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberAcosh(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberAsin(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberAsinh(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberAtan(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberAtanh(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberAtan2(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsNumberCbrt(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberCeil(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberClz32(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberCos(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberCosh(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberExp(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberExpm1(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberFloor(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberFround(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberLog(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberLog1p(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberLog10(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberLog2(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberMax(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsNumberMin(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsNumberRound(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberPow(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsNumberSign(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberSin(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberSinh(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberSqrt(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberTan(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberTanh(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberTrunc(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsStringFromCharCode(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsAllocate(const Matcher<Node*>& size_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
@@ -257,12 +314,22 @@ Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& index_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsUnalignedLoad(
+ const Matcher<UnalignedLoadRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
const Matcher<Node*>& value_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsUnalignedStore(
+ const Matcher<UnalignedStoreRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& value_matcher, const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
Matcher<Node*> IsStackSlot(const Matcher<MachineRepresentation>& rep_matcher);
Matcher<Node*> IsWord32And(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
@@ -295,6 +362,8 @@ Matcher<Node*> IsWord64Equal(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsInt32AddWithOverflow(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsInt32SubWithOverflow(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsInt32Add(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsInt32Sub(const Matcher<Node*>& lhs_matcher,
@@ -315,6 +384,7 @@ Matcher<Node*> IsInt64Sub(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsJSAdd(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsTruncateFloat64ToWord32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsChangeFloat64ToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsChangeFloat64ToUint32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsChangeInt32ToFloat64(const Matcher<Node*>& input_matcher);
@@ -322,13 +392,9 @@ Matcher<Node*> IsChangeInt32ToInt64(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsChangeUint32ToFloat64(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsChangeUint32ToUint64(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsTruncateFloat64ToFloat32(const Matcher<Node*>& input_matcher);
-Matcher<Node*> IsTruncateFloat64ToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsTruncateInt64ToInt32(const Matcher<Node*>& input_matcher);
-Matcher<Node*> IsFloat32Max(const Matcher<Node*>& lhs_matcher,
- const Matcher<Node*>& rhs_matcher);
-Matcher<Node*> IsFloat32Min(const Matcher<Node*>& lhs_matcher,
- const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsFloat32Abs(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat32Neg(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat32Equal(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsFloat32LessThan(const Matcher<Node*>& lhs_matcher,
@@ -342,6 +408,7 @@ Matcher<Node*> IsFloat64Min(const Matcher<Node*>& lhs_matcher,
Matcher<Node*> IsFloat64Sub(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsFloat64Abs(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64Neg(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64Sqrt(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64RoundDown(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64RoundTruncate(const Matcher<Node*>& input_matcher);
@@ -362,6 +429,8 @@ Matcher<Node*> IsNumberToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsNumberToUint32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsParameter(const Matcher<int> index_matcher);
Matcher<Node*> IsLoadFramePointer();
+Matcher<Node*> IsLoadParentFramePointer();
+Matcher<Node*> IsPlainPrimitiveToNumber(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsInt32PairAdd(const Matcher<Node*>& a_matcher,
const Matcher<Node*>& b_matcher,
@@ -386,11 +455,9 @@ Matcher<Node*> IsWord32PairShr(const Matcher<Node*>& lhs_matcher,
Matcher<Node*> IsWord32PairSar(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& mid_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord32ReverseBytes(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsStackSlot();
-Matcher<Node*> IsGuard(const Matcher<Type*>& type_matcher,
- const Matcher<Node*>& value_matcher,
- const Matcher<Node*>& control_matcher);
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/test/unittests/compiler/node-unittest.cc b/deps/v8/test/unittests/compiler/node-unittest.cc
index 5341f69716..8379e2668d 100644
--- a/deps/v8/test/unittests/compiler/node-unittest.cc
+++ b/deps/v8/test/unittests/compiler/node-unittest.cc
@@ -7,7 +7,9 @@
#include "test/unittests/test-utils.h"
#include "testing/gmock-support.h"
+using testing::Contains;
using testing::ElementsAre;
+using testing::ElementsAreArray;
using testing::UnorderedElementsAre;
namespace v8 {
@@ -252,6 +254,10 @@ TEST_F(NodeTest, BigNodes) {
for (int i = 0; i < size; i++) {
EXPECT_EQ(inputs[i], node->InputAt(i));
}
+
+ EXPECT_THAT(n0->uses(), Contains(node));
+ EXPECT_THAT(n1->uses(), Contains(node));
+ EXPECT_THAT(node->inputs(), ElementsAreArray(inputs, size));
}
}
diff --git a/deps/v8/test/unittests/compiler/register-allocator-unittest.cc b/deps/v8/test/unittests/compiler/register-allocator-unittest.cc
index c5ff90f301..71a726f167 100644
--- a/deps/v8/test/unittests/compiler/register-allocator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/register-allocator-unittest.cc
@@ -678,8 +678,7 @@ TEST_F(RegisterAllocatorTest, MultipleDeferredBlockSpills) {
Allocate();
// TODO(mtrofin): at the moment, the linear allocator spills var1 and var2,
- // so only var3 is spilled in deferred blocks. Greedy avoids spilling 1&2.
- // Expand the test once greedy is back online with this facility.
+ // so only var3 is spilled in deferred blocks.
const int var3_reg = 2;
const int var3_slot = 2;
diff --git a/deps/v8/test/unittests/compiler/scheduler-rpo-unittest.cc b/deps/v8/test/unittests/compiler/scheduler-rpo-unittest.cc
index 713ee6e742..97cafdb6e6 100644
--- a/deps/v8/test/unittests/compiler/scheduler-rpo-unittest.cc
+++ b/deps/v8/test/unittests/compiler/scheduler-rpo-unittest.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <memory>
+
#include "src/compiler/schedule.h"
#include "src/compiler/scheduler.h"
#include "test/unittests/compiler/compiler-test-utils.h"
@@ -135,7 +137,7 @@ TEST_F(SchedulerRPOTest, EntryLoop) {
TEST_F(SchedulerRPOTest, EndLoop) {
Schedule schedule(zone());
- base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 2));
+ std::unique_ptr<TestLoop> loop1(CreateLoop(&schedule, 2));
schedule.AddSuccessorForTesting(schedule.start(), loop1->header());
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
CheckRPONumbers(order, 3, true);
@@ -144,7 +146,7 @@ TEST_F(SchedulerRPOTest, EndLoop) {
TEST_F(SchedulerRPOTest, EndLoopNested) {
Schedule schedule(zone());
- base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 2));
+ std::unique_ptr<TestLoop> loop1(CreateLoop(&schedule, 2));
schedule.AddSuccessorForTesting(schedule.start(), loop1->header());
schedule.AddSuccessorForTesting(loop1->last(), schedule.start());
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
@@ -318,8 +320,8 @@ TEST_F(SchedulerRPOTest, LoopNest2) {
TEST_F(SchedulerRPOTest, LoopFollow1) {
Schedule schedule(zone());
- base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
- base::SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
+ std::unique_ptr<TestLoop> loop1(CreateLoop(&schedule, 1));
+ std::unique_ptr<TestLoop> loop2(CreateLoop(&schedule, 1));
BasicBlock* A = schedule.start();
BasicBlock* E = schedule.end();
@@ -338,8 +340,8 @@ TEST_F(SchedulerRPOTest, LoopFollow1) {
TEST_F(SchedulerRPOTest, LoopFollow2) {
Schedule schedule(zone());
- base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
- base::SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
+ std::unique_ptr<TestLoop> loop1(CreateLoop(&schedule, 1));
+ std::unique_ptr<TestLoop> loop2(CreateLoop(&schedule, 1));
BasicBlock* A = schedule.start();
BasicBlock* S = schedule.NewBasicBlock();
@@ -361,8 +363,8 @@ TEST_F(SchedulerRPOTest, LoopFollowN) {
for (int size = 1; size < 5; size++) {
for (int exit = 0; exit < size; exit++) {
Schedule schedule(zone());
- base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
- base::SmartPointer<TestLoop> loop2(CreateLoop(&schedule, size));
+ std::unique_ptr<TestLoop> loop1(CreateLoop(&schedule, size));
+ std::unique_ptr<TestLoop> loop2(CreateLoop(&schedule, size));
BasicBlock* A = schedule.start();
BasicBlock* E = schedule.end();
@@ -381,8 +383,8 @@ TEST_F(SchedulerRPOTest, LoopFollowN) {
TEST_F(SchedulerRPOTest, NestedLoopFollow1) {
Schedule schedule(zone());
- base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
- base::SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
+ std::unique_ptr<TestLoop> loop1(CreateLoop(&schedule, 1));
+ std::unique_ptr<TestLoop> loop2(CreateLoop(&schedule, 1));
BasicBlock* A = schedule.start();
BasicBlock* B = schedule.NewBasicBlock();
@@ -414,7 +416,7 @@ TEST_F(SchedulerRPOTest, LoopBackedges1) {
BasicBlock* A = schedule.start();
BasicBlock* E = schedule.end();
- base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+ std::unique_ptr<TestLoop> loop1(CreateLoop(&schedule, size));
schedule.AddSuccessorForTesting(A, loop1->header());
schedule.AddSuccessorForTesting(loop1->last(), E);
@@ -437,7 +439,7 @@ TEST_F(SchedulerRPOTest, LoopOutedges1) {
BasicBlock* D = schedule.NewBasicBlock();
BasicBlock* E = schedule.end();
- base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+ std::unique_ptr<TestLoop> loop1(CreateLoop(&schedule, size));
schedule.AddSuccessorForTesting(A, loop1->header());
schedule.AddSuccessorForTesting(loop1->last(), E);
@@ -459,7 +461,7 @@ TEST_F(SchedulerRPOTest, LoopOutedges2) {
BasicBlock* A = schedule.start();
BasicBlock* E = schedule.end();
- base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+ std::unique_ptr<TestLoop> loop1(CreateLoop(&schedule, size));
schedule.AddSuccessorForTesting(A, loop1->header());
schedule.AddSuccessorForTesting(loop1->last(), E);
@@ -481,7 +483,7 @@ TEST_F(SchedulerRPOTest, LoopOutloops1) {
Schedule schedule(zone());
BasicBlock* A = schedule.start();
BasicBlock* E = schedule.end();
- base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+ std::unique_ptr<TestLoop> loop1(CreateLoop(&schedule, size));
schedule.AddSuccessorForTesting(A, loop1->header());
schedule.AddSuccessorForTesting(loop1->last(), E);
diff --git a/deps/v8/test/unittests/compiler/scheduler-unittest.cc b/deps/v8/test/unittests/compiler/scheduler-unittest.cc
index da77bdcb4c..1b57e5f483 100644
--- a/deps/v8/test/unittests/compiler/scheduler-unittest.cc
+++ b/deps/v8/test/unittests/compiler/scheduler-unittest.cc
@@ -552,12 +552,10 @@ TARGET_TEST_F(SchedulerTest, CallException) {
Node* p0 = graph()->NewNode(common()->Parameter(0), start);
Node* c1 = graph()->NewNode(&kMockCall, start);
Node* ok1 = graph()->NewNode(common()->IfSuccess(), c1);
- Node* ex1 = graph()->NewNode(
- common()->IfException(IfExceptionHint::kLocallyUncaught), c1, c1);
+ Node* ex1 = graph()->NewNode(common()->IfException(), c1, c1);
Node* c2 = graph()->NewNode(&kMockCall, ok1);
Node* ok2 = graph()->NewNode(common()->IfSuccess(), c2);
- Node* ex2 = graph()->NewNode(
- common()->IfException(IfExceptionHint::kLocallyUncaught), c2, c2);
+ Node* ex2 = graph()->NewNode(common()->IfException(), c2, c2);
Node* hdl = graph()->NewNode(common()->Merge(2), ex1, ex2);
Node* m = graph()->NewNode(common()->Merge(2), ok2, hdl);
Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
index f571898107..b21a148718 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
@@ -20,10 +20,10 @@ namespace v8 {
namespace internal {
namespace compiler {
-class SimplifiedOperatorReducerTest : public TypedGraphTest {
+class SimplifiedOperatorReducerTest : public GraphTest {
public:
explicit SimplifiedOperatorReducerTest(int num_parameters = 1)
- : TypedGraphTest(num_parameters), simplified_(zone()) {}
+ : GraphTest(num_parameters), simplified_(zone()) {}
~SimplifiedOperatorReducerTest() override {}
protected:
@@ -32,7 +32,8 @@ class SimplifiedOperatorReducerTest : public TypedGraphTest {
JSOperatorBuilder javascript(zone());
JSGraph jsgraph(isolate(), graph(), common(), &javascript, simplified(),
&machine);
- SimplifiedOperatorReducer reducer(&jsgraph);
+ GraphReducer graph_reducer(zone(), graph());
+ SimplifiedOperatorReducer reducer(&graph_reducer, &jsgraph);
return reducer.Reduce(node);
}
@@ -91,31 +92,15 @@ const int32_t kInt32Values[] = {
1866841746, 2032089723, 2147483647};
-const uint32_t kUint32Values[] = {
- 0x0, 0x5, 0x8, 0xc, 0xd, 0x26,
- 0x28, 0x29, 0x30, 0x34, 0x3e, 0x42,
- 0x50, 0x5b, 0x63, 0x71, 0x77, 0x7c,
- 0x83, 0x88, 0x96, 0x9c, 0xa3, 0xfa,
- 0x7a7, 0x165d, 0x234d, 0x3acb, 0x43a5, 0x4573,
- 0x5b4f, 0x5f14, 0x6996, 0x6c6e, 0x7289, 0x7b9a,
- 0x7bc9, 0x86bb, 0xa839, 0xaa41, 0xb03b, 0xc942,
- 0xce68, 0xcf4c, 0xd3ad, 0xdea3, 0xe90c, 0xed86,
- 0xfba5, 0x172dcc6, 0x114d8fc1, 0x182d6c9d, 0x1b1e3fad, 0x1db033bf,
- 0x1e1de755, 0x1f625c80, 0x28f6cf00, 0x2acb6a94, 0x2c20240e, 0x2f0fe54e,
- 0x31863a7c, 0x33325474, 0x3532fae3, 0x3bab82ea, 0x4c4b83a2, 0x4cd93d1e,
- 0x4f7331d4, 0x5491b09b, 0x57cc6ff9, 0x60d3b4dc, 0x653f5904, 0x690ae256,
- 0x69fe3276, 0x6bebf0ba, 0x6e2c69a3, 0x73b84ff7, 0x7b3a1924, 0x7ed032d9,
- 0x84dd734b, 0x8552ea53, 0x8680754f, 0x8e9660eb, 0x94fe2b9c, 0x972d30cf,
- 0x9b98c482, 0xb158667e, 0xb432932c, 0xb5b70989, 0xb669971a, 0xb7c359d1,
- 0xbeb15c0d, 0xc171c53d, 0xc743dd38, 0xc8e2af50, 0xc98e2df0, 0xd9d1cdf9,
- 0xdcc91049, 0xe46f396d, 0xee991950, 0xef64e521, 0xf7aeefc9, 0xffffffff};
-
-
const double kNaNs[] = {-std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::quiet_NaN(),
bit_cast<double>(V8_UINT64_C(0x7FFFFFFFFFFFFFFF)),
bit_cast<double>(V8_UINT64_C(0xFFFFFFFFFFFFFFFF))};
+const CheckForMinusZeroMode kCheckForMinusZeroModes[] = {
+ CheckForMinusZeroMode::kDontCheckForMinusZero,
+ CheckForMinusZeroMode::kCheckForMinusZero};
+
} // namespace
@@ -150,79 +135,72 @@ TEST_F(SimplifiedOperatorReducerTest, BooleanNotWithTrueConstant) {
// -----------------------------------------------------------------------------
-// ChangeBoolToBit
-
+// ChangeTaggedToBit
-TEST_F(SimplifiedOperatorReducerTest, ChangeBitToBoolWithChangeBoolToBit) {
+TEST_F(SimplifiedOperatorReducerTest, ChangeBitToTaggedWithChangeTaggedToBit) {
Node* param0 = Parameter(0);
Reduction reduction = Reduce(graph()->NewNode(
- simplified()->ChangeBitToBool(),
- graph()->NewNode(simplified()->ChangeBoolToBit(), param0)));
+ simplified()->ChangeBitToTagged(),
+ graph()->NewNode(simplified()->ChangeTaggedToBit(), param0)));
ASSERT_TRUE(reduction.Changed());
EXPECT_EQ(param0, reduction.replacement());
}
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeBitToBoolWithZeroConstant) {
+TEST_F(SimplifiedOperatorReducerTest, ChangeBitToTaggedWithZeroConstant) {
Reduction reduction = Reduce(
- graph()->NewNode(simplified()->ChangeBitToBool(), Int32Constant(0)));
+ graph()->NewNode(simplified()->ChangeBitToTagged(), Int32Constant(0)));
ASSERT_TRUE(reduction.Changed());
EXPECT_THAT(reduction.replacement(), IsFalseConstant());
}
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeBitToBoolWithOneConstant) {
+TEST_F(SimplifiedOperatorReducerTest, ChangeBitToTaggedWithOneConstant) {
Reduction reduction = Reduce(
- graph()->NewNode(simplified()->ChangeBitToBool(), Int32Constant(1)));
+ graph()->NewNode(simplified()->ChangeBitToTagged(), Int32Constant(1)));
ASSERT_TRUE(reduction.Changed());
EXPECT_THAT(reduction.replacement(), IsTrueConstant());
}
// -----------------------------------------------------------------------------
-// ChangeBoolToBit
-
+// ChangeTaggedToBit
-TEST_F(SimplifiedOperatorReducerTest, ChangeBoolToBitWithFalseConstant) {
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToBitWithFalseConstant) {
Reduction reduction = Reduce(
- graph()->NewNode(simplified()->ChangeBoolToBit(), FalseConstant()));
+ graph()->NewNode(simplified()->ChangeTaggedToBit(), FalseConstant()));
ASSERT_TRUE(reduction.Changed());
EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
}
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeBoolToBitWithTrueConstant) {
- Reduction reduction =
- Reduce(graph()->NewNode(simplified()->ChangeBoolToBit(), TrueConstant()));
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToBitWithTrueConstant) {
+ Reduction reduction = Reduce(
+ graph()->NewNode(simplified()->ChangeTaggedToBit(), TrueConstant()));
ASSERT_TRUE(reduction.Changed());
EXPECT_THAT(reduction.replacement(), IsInt32Constant(1));
}
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeBoolToBitWithChangeBitToBool) {
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToBitWithChangeBitToTagged) {
Node* param0 = Parameter(0);
Reduction reduction = Reduce(graph()->NewNode(
- simplified()->ChangeBoolToBit(),
- graph()->NewNode(simplified()->ChangeBitToBool(), param0)));
+ simplified()->ChangeTaggedToBit(),
+ graph()->NewNode(simplified()->ChangeBitToTagged(), param0)));
ASSERT_TRUE(reduction.Changed());
EXPECT_EQ(param0, reduction.replacement());
}
-
// -----------------------------------------------------------------------------
// ChangeFloat64ToTagged
-
TEST_F(SimplifiedOperatorReducerTest, ChangeFloat64ToTaggedWithConstant) {
- TRACED_FOREACH(double, n, kFloat64Values) {
- Reduction reduction = Reduce(graph()->NewNode(
- simplified()->ChangeFloat64ToTagged(), Float64Constant(n)));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsNumberConstant(BitEq(n)));
+ TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
+ TRACED_FOREACH(double, n, kFloat64Values) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeFloat64ToTagged(mode), Float64Constant(n)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsNumberConstant(BitEq(n)));
+ }
}
}
-
// -----------------------------------------------------------------------------
// ChangeInt32ToTagged
@@ -244,14 +222,15 @@ TEST_F(SimplifiedOperatorReducerTest, ChangeInt32ToTaggedWithConstant) {
TEST_F(SimplifiedOperatorReducerTest,
ChangeTaggedToFloat64WithChangeFloat64ToTagged) {
Node* param0 = Parameter(0);
- Reduction reduction = Reduce(graph()->NewNode(
- simplified()->ChangeTaggedToFloat64(),
- graph()->NewNode(simplified()->ChangeFloat64ToTagged(), param0)));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_EQ(param0, reduction.replacement());
+ TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToFloat64(),
+ graph()->NewNode(simplified()->ChangeFloat64ToTagged(mode), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(param0, reduction.replacement());
+ }
}
-
TEST_F(SimplifiedOperatorReducerTest,
ChangeTaggedToFloat64WithChangeInt32ToTagged) {
Node* param0 = Parameter(0);
@@ -297,18 +276,18 @@ TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToFloat64WithNaNConstant) {
// -----------------------------------------------------------------------------
// ChangeTaggedToInt32
-
TEST_F(SimplifiedOperatorReducerTest,
ChangeTaggedToInt32WithChangeFloat64ToTagged) {
Node* param0 = Parameter(0);
- Reduction reduction = Reduce(graph()->NewNode(
- simplified()->ChangeTaggedToInt32(),
- graph()->NewNode(simplified()->ChangeFloat64ToTagged(), param0)));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsChangeFloat64ToInt32(param0));
+ TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToInt32(),
+ graph()->NewNode(simplified()->ChangeFloat64ToTagged(mode), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsChangeFloat64ToInt32(param0));
+ }
}
-
TEST_F(SimplifiedOperatorReducerTest,
ChangeTaggedToInt32WithChangeInt32ToTagged) {
Node* param0 = Parameter(0);
@@ -320,84 +299,163 @@ TEST_F(SimplifiedOperatorReducerTest,
}
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToInt32WithConstant) {
- TRACED_FOREACH(double, n, kFloat64Values) {
+// -----------------------------------------------------------------------------
+// ChangeTaggedToUint32
+
+TEST_F(SimplifiedOperatorReducerTest,
+ ChangeTaggedToUint32WithChangeFloat64ToTagged) {
+ Node* param0 = Parameter(0);
+ TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
Reduction reduction = Reduce(graph()->NewNode(
- simplified()->ChangeTaggedToInt32(), NumberConstant(n)));
+ simplified()->ChangeTaggedToUint32(),
+ graph()->NewNode(simplified()->ChangeFloat64ToTagged(mode), param0)));
ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsInt32Constant(DoubleToInt32(n)));
+ EXPECT_THAT(reduction.replacement(), IsChangeFloat64ToUint32(param0));
}
}
+TEST_F(SimplifiedOperatorReducerTest,
+ ChangeTaggedToUint32WithChangeUint32ToTagged) {
+ Node* param0 = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToUint32(),
+ graph()->NewNode(simplified()->ChangeUint32ToTagged(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(param0, reduction.replacement());
+}
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToInt32WithNaNConstant) {
- TRACED_FOREACH(double, nan, kNaNs) {
+
+// -----------------------------------------------------------------------------
+// TruncateTaggedToWord32
+
+TEST_F(SimplifiedOperatorReducerTest,
+ TruncateTaggedToWord3WithChangeFloat64ToTagged) {
+ Node* param0 = Parameter(0);
+ TRACED_FOREACH(CheckForMinusZeroMode, mode, kCheckForMinusZeroModes) {
Reduction reduction = Reduce(graph()->NewNode(
- simplified()->ChangeTaggedToInt32(), NumberConstant(nan)));
+ simplified()->TruncateTaggedToWord32(),
+ graph()->NewNode(simplified()->ChangeFloat64ToTagged(mode), param0)));
ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+ EXPECT_THAT(reduction.replacement(), IsTruncateFloat64ToWord32(param0));
}
}
+TEST_F(SimplifiedOperatorReducerTest, TruncateTaggedToWord32WithConstant) {
+ TRACED_FOREACH(double, n, kFloat64Values) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->TruncateTaggedToWord32(), NumberConstant(n)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsInt32Constant(DoubleToInt32(n)));
+ }
+}
// -----------------------------------------------------------------------------
-// ChangeTaggedToUint32
+// CheckTaggedPointer
-
-TEST_F(SimplifiedOperatorReducerTest,
- ChangeTaggedToUint32WithChangeFloat64ToTagged) {
+TEST_F(SimplifiedOperatorReducerTest, CheckTaggedPointerWithChangeBitToTagged) {
Node* param0 = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* value = graph()->NewNode(simplified()->ChangeBitToTagged(), param0);
Reduction reduction = Reduce(graph()->NewNode(
- simplified()->ChangeTaggedToUint32(),
- graph()->NewNode(simplified()->ChangeFloat64ToTagged(), param0)));
+ simplified()->CheckTaggedPointer(), value, effect, control));
ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsChangeFloat64ToUint32(param0));
+ EXPECT_EQ(value, reduction.replacement());
}
+TEST_F(SimplifiedOperatorReducerTest, CheckTaggedPointerWithHeapConstant) {
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Handle<HeapObject> kHeapObjects[] = {
+ factory()->empty_string(), factory()->null_value(),
+ factory()->species_symbol(), factory()->undefined_value()};
+ TRACED_FOREACH(Handle<HeapObject>, object, kHeapObjects) {
+ Node* value = HeapConstant(object);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->CheckTaggedPointer(), value, effect, control));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(value, reduction.replacement());
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckTaggedSigned
TEST_F(SimplifiedOperatorReducerTest,
- ChangeTaggedToUint32WithChangeUint32ToTagged) {
+ CheckTaggedSignedWithChangeInt31ToTaggedSigned) {
Node* param0 = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* value =
+ graph()->NewNode(simplified()->ChangeInt31ToTaggedSigned(), param0);
Reduction reduction = Reduce(graph()->NewNode(
- simplified()->ChangeTaggedToUint32(),
- graph()->NewNode(simplified()->ChangeUint32ToTagged(), param0)));
+ simplified()->CheckTaggedSigned(), value, effect, control));
ASSERT_TRUE(reduction.Changed());
- EXPECT_EQ(param0, reduction.replacement());
+ EXPECT_EQ(value, reduction.replacement());
}
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToUint32WithConstant) {
- TRACED_FOREACH(double, n, kFloat64Values) {
- Reduction reduction = Reduce(graph()->NewNode(
- simplified()->ChangeTaggedToUint32(), NumberConstant(n)));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(),
- IsInt32Constant(bit_cast<int32_t>(DoubleToUint32(n))));
- }
+TEST_F(SimplifiedOperatorReducerTest, CheckTaggedSignedWithNumberConstant) {
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* value = NumberConstant(1.0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->CheckTaggedSigned(), value, effect, control));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(value, reduction.replacement());
}
+// -----------------------------------------------------------------------------
+// NumberAbs
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToUint32WithNaNConstant) {
- TRACED_FOREACH(double, nan, kNaNs) {
- Reduction reduction = Reduce(graph()->NewNode(
- simplified()->ChangeTaggedToUint32(), NumberConstant(nan)));
+TEST_F(SimplifiedOperatorReducerTest, NumberAbsWithNumberConstant) {
+ TRACED_FOREACH(double, n, kFloat64Values) {
+ Reduction reduction =
+ Reduce(graph()->NewNode(simplified()->NumberAbs(), NumberConstant(n)));
ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+ EXPECT_THAT(reduction.replacement(), IsNumberConstant(std::fabs(n)));
}
}
-
// -----------------------------------------------------------------------------
-// ChangeUint32ToTagged
+// ObjectIsSmi
+
+TEST_F(SimplifiedOperatorReducerTest, ObjectIsSmiWithChangeBitToTagged) {
+ Node* param0 = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ObjectIsSmi(),
+ graph()->NewNode(simplified()->ChangeBitToTagged(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsFalseConstant());
+}
+TEST_F(SimplifiedOperatorReducerTest,
+ ObjectIsSmiWithChangeInt31ToTaggedSigned) {
+ Node* param0 = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ObjectIsSmi(),
+ graph()->NewNode(simplified()->ChangeInt31ToTaggedSigned(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsTrueConstant());
+}
-TEST_F(SimplifiedOperatorReducerTest, ChangeUint32ToTagged) {
- TRACED_FOREACH(uint32_t, n, kUint32Values) {
+TEST_F(SimplifiedOperatorReducerTest, ObjectIsSmiWithHeapConstant) {
+ Handle<HeapObject> kHeapObjects[] = {
+ factory()->empty_string(), factory()->null_value(),
+ factory()->species_symbol(), factory()->undefined_value()};
+ TRACED_FOREACH(Handle<HeapObject>, o, kHeapObjects) {
Reduction reduction =
- Reduce(graph()->NewNode(simplified()->ChangeUint32ToTagged(),
- Int32Constant(bit_cast<int32_t>(n))));
+ Reduce(graph()->NewNode(simplified()->ObjectIsSmi(), HeapConstant(o)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsFalseConstant());
+ }
+}
+
+TEST_F(SimplifiedOperatorReducerTest, ObjectIsSmiWithNumberConstant) {
+ TRACED_FOREACH(double, n, kFloat64Values) {
+ Reduction reduction = Reduce(
+ graph()->NewNode(simplified()->ObjectIsSmi(), NumberConstant(n)));
ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsNumberConstant(BitEq(FastUI2D(n))));
+ EXPECT_THAT(reduction.replacement(), IsBooleanConstant(IsSmiDouble(n)));
}
}
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
index bd8509ff97..febd76a528 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
@@ -31,7 +31,6 @@ std::ostream& operator<<(std::ostream& os, const PureOperator& pop) {
return os << IrOpcode::Mnemonic(pop.opcode);
}
-
const PureOperator kPureOperators[] = {
#define PURE(Name, properties, input_count) \
{ \
@@ -39,7 +38,6 @@ const PureOperator kPureOperators[] = {
Operator::kPure | properties, input_count \
}
PURE(BooleanNot, Operator::kNoProperties, 1),
- PURE(BooleanToNumber, Operator::kNoProperties, 1),
PURE(NumberEqual, Operator::kCommutative, 2),
PURE(NumberLessThan, Operator::kNoProperties, 2),
PURE(NumberLessThanOrEqual, Operator::kNoProperties, 2),
@@ -56,15 +54,15 @@ const PureOperator kPureOperators[] = {
PURE(NumberShiftRightLogical, Operator::kNoProperties, 2),
PURE(NumberToInt32, Operator::kNoProperties, 1),
PURE(NumberToUint32, Operator::kNoProperties, 1),
- PURE(PlainPrimitiveToNumber, Operator::kNoProperties, 1),
+ PURE(ChangeTaggedSignedToInt32, Operator::kNoProperties, 1),
PURE(ChangeTaggedToInt32, Operator::kNoProperties, 1),
PURE(ChangeTaggedToUint32, Operator::kNoProperties, 1),
PURE(ChangeTaggedToFloat64, Operator::kNoProperties, 1),
PURE(ChangeInt32ToTagged, Operator::kNoProperties, 1),
PURE(ChangeUint32ToTagged, Operator::kNoProperties, 1),
- PURE(ChangeFloat64ToTagged, Operator::kNoProperties, 1),
- PURE(ChangeBoolToBit, Operator::kNoProperties, 1),
- PURE(ChangeBitToBool, Operator::kNoProperties, 1),
+ PURE(ChangeTaggedToBit, Operator::kNoProperties, 1),
+ PURE(ChangeBitToTagged, Operator::kNoProperties, 1),
+ PURE(TruncateTaggedToWord32, Operator::kNoProperties, 1),
PURE(ObjectIsNumber, Operator::kNoProperties, 1),
PURE(ObjectIsReceiver, Operator::kNoProperties, 1),
PURE(ObjectIsSmi, Operator::kNoProperties, 1)
@@ -156,7 +154,8 @@ TEST_P(SimplifiedBufferAccessOperatorTest, LoadBuffer) {
const Operator* op = simplified.LoadBuffer(access);
EXPECT_EQ(IrOpcode::kLoadBuffer, op->opcode());
- EXPECT_EQ(Operator::kNoThrow | Operator::kNoWrite, op->properties());
+ EXPECT_EQ(Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,
+ op->properties());
EXPECT_EQ(access, BufferAccessOf(op));
EXPECT_EQ(3, op->ValueInputCount());
@@ -176,7 +175,8 @@ TEST_P(SimplifiedBufferAccessOperatorTest, StoreBuffer) {
const Operator* op = simplified.StoreBuffer(access);
EXPECT_EQ(IrOpcode::kStoreBuffer, op->opcode());
- EXPECT_EQ(Operator::kNoRead | Operator::kNoThrow, op->properties());
+ EXPECT_EQ(Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,
+ op->properties());
EXPECT_EQ(access, BufferAccessOf(op));
EXPECT_EQ(4, op->ValueInputCount());
@@ -203,39 +203,46 @@ namespace {
const ElementAccess kElementAccesses[] = {
{kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
- MachineType::AnyTagged()},
- {kUntaggedBase, 0, Type::Any(), MachineType::Int8()},
- {kUntaggedBase, 0, Type::Any(), MachineType::Int16()},
- {kUntaggedBase, 0, Type::Any(), MachineType::Int32()},
- {kUntaggedBase, 0, Type::Any(), MachineType::Uint8()},
- {kUntaggedBase, 0, Type::Any(), MachineType::Uint16()},
- {kUntaggedBase, 0, Type::Any(), MachineType::Uint32()},
- {kUntaggedBase, 0, Type::Signed32(), MachineType::Int8()},
- {kUntaggedBase, 0, Type::Unsigned32(), MachineType::Uint8()},
- {kUntaggedBase, 0, Type::Signed32(), MachineType::Int16()},
- {kUntaggedBase, 0, Type::Unsigned32(), MachineType::Uint16()},
- {kUntaggedBase, 0, Type::Signed32(), MachineType::Int32()},
- {kUntaggedBase, 0, Type::Unsigned32(), MachineType::Uint32()},
+ MachineType::AnyTagged(), kFullWriteBarrier},
+ {kUntaggedBase, 0, Type::Any(), MachineType::Int8(), kNoWriteBarrier},
+ {kUntaggedBase, 0, Type::Any(), MachineType::Int16(), kNoWriteBarrier},
+ {kUntaggedBase, 0, Type::Any(), MachineType::Int32(), kNoWriteBarrier},
+ {kUntaggedBase, 0, Type::Any(), MachineType::Uint8(), kNoWriteBarrier},
+ {kUntaggedBase, 0, Type::Any(), MachineType::Uint16(), kNoWriteBarrier},
+ {kUntaggedBase, 0, Type::Any(), MachineType::Uint32(), kNoWriteBarrier},
+ {kUntaggedBase, 0, Type::Signed32(), MachineType::Int8(), kNoWriteBarrier},
+ {kUntaggedBase, 0, Type::Unsigned32(), MachineType::Uint8(),
+ kNoWriteBarrier},
+ {kUntaggedBase, 0, Type::Signed32(), MachineType::Int16(), kNoWriteBarrier},
+ {kUntaggedBase, 0, Type::Unsigned32(), MachineType::Uint16(),
+ kNoWriteBarrier},
+ {kUntaggedBase, 0, Type::Signed32(), MachineType::Int32(), kNoWriteBarrier},
+ {kUntaggedBase, 0, Type::Unsigned32(), MachineType::Uint32(),
+ kNoWriteBarrier},
{kUntaggedBase, 0, Type::Number(),
- MachineType(MachineRepresentation::kFloat32, MachineSemantic::kNone)},
+ MachineType(MachineRepresentation::kFloat32, MachineSemantic::kNone),
+ kNoWriteBarrier},
{kUntaggedBase, 0, Type::Number(),
- MachineType(MachineRepresentation::kFloat64, MachineSemantic::kNone)},
+ MachineType(MachineRepresentation::kFloat64, MachineSemantic::kNone),
+ kNoWriteBarrier},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
- MachineType::Int8()},
+ MachineType::Int8(), kNoWriteBarrier},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
- MachineType::Uint8()},
+ MachineType::Uint8(), kNoWriteBarrier},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
- MachineType::Int16()},
+ MachineType::Int16(), kNoWriteBarrier},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
- MachineType::Uint16()},
+ MachineType::Uint16(), kNoWriteBarrier},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
- MachineType::Int32()},
+ MachineType::Int32(), kNoWriteBarrier},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
- MachineType::Uint32()},
+ MachineType::Uint32(), kNoWriteBarrier},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Number(),
- MachineType(MachineRepresentation::kFloat32, MachineSemantic::kNone)},
+ MachineType(MachineRepresentation::kFloat32, MachineSemantic::kNone),
+ kNoWriteBarrier},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Number(),
- MachineType(MachineRepresentation::kFloat32, MachineSemantic::kNone)}};
+ MachineType(MachineRepresentation::kFloat32, MachineSemantic::kNone),
+ kNoWriteBarrier}};
} // namespace
@@ -251,7 +258,8 @@ TEST_P(SimplifiedElementAccessOperatorTest, LoadElement) {
const Operator* op = simplified.LoadElement(access);
EXPECT_EQ(IrOpcode::kLoadElement, op->opcode());
- EXPECT_EQ(Operator::kNoThrow | Operator::kNoWrite, op->properties());
+ EXPECT_EQ(Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,
+ op->properties());
EXPECT_EQ(access, ElementAccessOf(op));
EXPECT_EQ(2, op->ValueInputCount());
@@ -271,7 +279,8 @@ TEST_P(SimplifiedElementAccessOperatorTest, StoreElement) {
const Operator* op = simplified.StoreElement(access);
EXPECT_EQ(IrOpcode::kStoreElement, op->opcode());
- EXPECT_EQ(Operator::kNoRead | Operator::kNoThrow, op->properties());
+ EXPECT_EQ(Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,
+ op->properties());
EXPECT_EQ(access, ElementAccessOf(op));
EXPECT_EQ(3, op->ValueInputCount());
diff --git a/deps/v8/test/unittests/compiler/tail-call-optimization-unittest.cc b/deps/v8/test/unittests/compiler/tail-call-optimization-unittest.cc
index 3441c68b96..56fedeeb09 100644
--- a/deps/v8/test/unittests/compiler/tail-call-optimization-unittest.cc
+++ b/deps/v8/test/unittests/compiler/tail-call-optimization-unittest.cc
@@ -26,14 +26,12 @@ class TailCallOptimizationTest : public GraphTest {
TEST_F(TailCallOptimizationTest, CallCodeObject0) {
- MachineType kMachineSignature[] = {MachineType::AnyTagged(),
- MachineType::AnyTagged()};
- LinkageLocation kLocationSignature[] = {LinkageLocation::ForRegister(0),
- LinkageLocation::ForRegister(1)};
+ LinkageLocation kLocationSignature[] = {
+ LinkageLocation::ForRegister(0, MachineType::Pointer()),
+ LinkageLocation::ForRegister(1, MachineType::Pointer())};
const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
CallDescriptor::kCallCodeObject, MachineType::AnyTagged(),
- LinkageLocation::ForRegister(0),
- new (zone()) MachineSignature(1, 1, kMachineSignature),
+ LinkageLocation::ForRegister(0, MachineType::Pointer()),
new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
Operator::kNoProperties, 0, 0, CallDescriptor::kNoFlags);
Node* p0 = Parameter(0);
@@ -48,14 +46,12 @@ TEST_F(TailCallOptimizationTest, CallCodeObject0) {
TEST_F(TailCallOptimizationTest, CallCodeObject1) {
- MachineType kMachineSignature[] = {MachineType::AnyTagged(),
- MachineType::AnyTagged()};
- LinkageLocation kLocationSignature[] = {LinkageLocation::ForRegister(0),
- LinkageLocation::ForRegister(1)};
+ LinkageLocation kLocationSignature[] = {
+ LinkageLocation::ForRegister(0, MachineType::Pointer()),
+ LinkageLocation::ForRegister(1, MachineType::Pointer())};
const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
CallDescriptor::kCallCodeObject, MachineType::AnyTagged(),
- LinkageLocation::ForRegister(0),
- new (zone()) MachineSignature(1, 1, kMachineSignature),
+ LinkageLocation::ForRegister(0, MachineType::Pointer()),
new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
Operator::kNoProperties, 0, 0, CallDescriptor::kSupportsTailCalls);
Node* p0 = Parameter(0);
@@ -63,8 +59,7 @@ TEST_F(TailCallOptimizationTest, CallCodeObject1) {
Node* call = graph()->NewNode(common()->Call(kCallDescriptor), p0, p1,
graph()->start(), graph()->start());
Node* if_success = graph()->NewNode(common()->IfSuccess(), call);
- Node* if_exception = graph()->NewNode(
- common()->IfException(IfExceptionHint::kLocallyUncaught), call, call);
+ Node* if_exception = graph()->NewNode(common()->IfException(), call, call);
Node* ret = graph()->NewNode(common()->Return(), call, call, if_success);
Node* end = graph()->NewNode(common()->End(1), if_exception);
graph()->SetEnd(end);
@@ -74,14 +69,12 @@ TEST_F(TailCallOptimizationTest, CallCodeObject1) {
TEST_F(TailCallOptimizationTest, CallCodeObject2) {
- MachineType kMachineSignature[] = {MachineType::AnyTagged(),
- MachineType::AnyTagged()};
- LinkageLocation kLocationSignature[] = {LinkageLocation::ForRegister(0),
- LinkageLocation::ForRegister(1)};
+ LinkageLocation kLocationSignature[] = {
+ LinkageLocation::ForRegister(0, MachineType::Pointer()),
+ LinkageLocation::ForRegister(1, MachineType::Pointer())};
const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
CallDescriptor::kCallCodeObject, MachineType::AnyTagged(),
- LinkageLocation::ForRegister(0),
- new (zone()) MachineSignature(1, 1, kMachineSignature),
+ LinkageLocation::ForRegister(0, MachineType::Pointer()),
new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
Operator::kNoProperties, 0, 0, CallDescriptor::kSupportsTailCalls);
Node* p0 = Parameter(0);
@@ -98,14 +91,12 @@ TEST_F(TailCallOptimizationTest, CallCodeObject2) {
TEST_F(TailCallOptimizationTest, CallJSFunction0) {
- MachineType kMachineSignature[] = {MachineType::AnyTagged(),
- MachineType::AnyTagged()};
- LinkageLocation kLocationSignature[] = {LinkageLocation::ForRegister(0),
- LinkageLocation::ForRegister(1)};
+ LinkageLocation kLocationSignature[] = {
+ LinkageLocation::ForRegister(0, MachineType::Pointer()),
+ LinkageLocation::ForRegister(1, MachineType::Pointer())};
const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
CallDescriptor::kCallJSFunction, MachineType::AnyTagged(),
- LinkageLocation::ForRegister(0),
- new (zone()) MachineSignature(1, 1, kMachineSignature),
+ LinkageLocation::ForRegister(0, MachineType::Pointer()),
new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
Operator::kNoProperties, 0, 0, CallDescriptor::kNoFlags);
Node* p0 = Parameter(0);
@@ -120,14 +111,12 @@ TEST_F(TailCallOptimizationTest, CallJSFunction0) {
TEST_F(TailCallOptimizationTest, CallJSFunction1) {
- MachineType kMachineSignature[] = {MachineType::AnyTagged(),
- MachineType::AnyTagged()};
- LinkageLocation kLocationSignature[] = {LinkageLocation::ForRegister(0),
- LinkageLocation::ForRegister(1)};
+ LinkageLocation kLocationSignature[] = {
+ LinkageLocation::ForRegister(0, MachineType::Pointer()),
+ LinkageLocation::ForRegister(1, MachineType::Pointer())};
const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
CallDescriptor::kCallJSFunction, MachineType::AnyTagged(),
LinkageLocation::ForRegister(0),
- new (zone()) MachineSignature(1, 1, kMachineSignature),
new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
Operator::kNoProperties, 0, 0, CallDescriptor::kSupportsTailCalls);
Node* p0 = Parameter(0);
@@ -135,8 +124,7 @@ TEST_F(TailCallOptimizationTest, CallJSFunction1) {
Node* call = graph()->NewNode(common()->Call(kCallDescriptor), p0, p1,
graph()->start(), graph()->start());
Node* if_success = graph()->NewNode(common()->IfSuccess(), call);
- Node* if_exception = graph()->NewNode(
- common()->IfException(IfExceptionHint::kLocallyUncaught), call, call);
+ Node* if_exception = graph()->NewNode(common()->IfException(), call, call);
Node* ret = graph()->NewNode(common()->Return(), call, call, if_success);
Node* end = graph()->NewNode(common()->End(1), if_exception);
graph()->SetEnd(end);
@@ -146,14 +134,11 @@ TEST_F(TailCallOptimizationTest, CallJSFunction1) {
TEST_F(TailCallOptimizationTest, CallJSFunction2) {
- MachineType kMachineSignature[] = {MachineType::AnyTagged(),
- MachineType::AnyTagged()};
LinkageLocation kLocationSignature[] = {LinkageLocation::ForRegister(0),
LinkageLocation::ForRegister(1)};
const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
CallDescriptor::kCallJSFunction, MachineType::AnyTagged(),
LinkageLocation::ForRegister(0),
- new (zone()) MachineSignature(1, 1, kMachineSignature),
new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
Operator::kNoProperties, 0, 0, CallDescriptor::kSupportsTailCalls);
Node* p0 = Parameter(0);
diff --git a/deps/v8/test/unittests/compiler/typer-unittest.cc b/deps/v8/test/unittests/compiler/typer-unittest.cc
index 9d664a6d3a..ca5c1cae45 100644
--- a/deps/v8/test/unittests/compiler/typer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typer-unittest.cc
@@ -51,7 +51,7 @@ class TyperTest : public TypedGraphTest {
Types types_;
JSOperatorBuilder javascript_;
- BinaryOperationHints const hints_ = BinaryOperationHints::Any();
+ BinaryOperationHint const hints_ = BinaryOperationHint::kAny;
Node* context_node_;
v8::base::RandomNumberGenerator* rng_;
std::vector<double> integers;
@@ -290,44 +290,51 @@ TEST_F(TyperTest, TypeJSShiftRight) {
TEST_F(TyperTest, TypeJSLessThan) {
- TestBinaryCompareOp(javascript_.LessThan(), std::less<double>());
+ TestBinaryCompareOp(javascript_.LessThan(CompareOperationHint::kAny),
+ std::less<double>());
}
TEST_F(TyperTest, TypeJSLessThanOrEqual) {
- TestBinaryCompareOp(javascript_.LessThanOrEqual(), std::less_equal<double>());
+ TestBinaryCompareOp(javascript_.LessThanOrEqual(CompareOperationHint::kAny),
+ std::less_equal<double>());
}
TEST_F(TyperTest, TypeJSGreaterThan) {
- TestBinaryCompareOp(javascript_.GreaterThan(), std::greater<double>());
+ TestBinaryCompareOp(javascript_.GreaterThan(CompareOperationHint::kAny),
+ std::greater<double>());
}
TEST_F(TyperTest, TypeJSGreaterThanOrEqual) {
- TestBinaryCompareOp(javascript_.GreaterThanOrEqual(),
- std::greater_equal<double>());
+ TestBinaryCompareOp(
+ javascript_.GreaterThanOrEqual(CompareOperationHint::kAny),
+ std::greater_equal<double>());
}
TEST_F(TyperTest, TypeJSEqual) {
- TestBinaryCompareOp(javascript_.Equal(), std::equal_to<double>());
+ TestBinaryCompareOp(javascript_.Equal(CompareOperationHint::kAny),
+ std::equal_to<double>());
}
TEST_F(TyperTest, TypeJSNotEqual) {
- TestBinaryCompareOp(javascript_.NotEqual(), std::not_equal_to<double>());
+ TestBinaryCompareOp(javascript_.NotEqual(CompareOperationHint::kAny),
+ std::not_equal_to<double>());
}
// For numbers there's no difference between strict and non-strict equality.
TEST_F(TyperTest, TypeJSStrictEqual) {
- TestBinaryCompareOp(javascript_.StrictEqual(), std::equal_to<double>());
+ TestBinaryCompareOp(javascript_.StrictEqual(CompareOperationHint::kAny),
+ std::equal_to<double>());
}
TEST_F(TyperTest, TypeJSStrictNotEqual) {
- TestBinaryCompareOp(javascript_.StrictNotEqual(),
+ TestBinaryCompareOp(javascript_.StrictNotEqual(CompareOperationHint::kAny),
std::not_equal_to<double>());
}
@@ -335,10 +342,9 @@ TEST_F(TyperTest, TypeJSStrictNotEqual) {
//------------------------------------------------------------------------------
// Monotonicity
-
-#define TEST_BINARY_MONOTONICITY(name) \
- TEST_F(TyperTest, Monotonicity_##name) { \
- TestBinaryMonotonicity(javascript_.name()); \
+#define TEST_BINARY_MONOTONICITY(name) \
+ TEST_F(TyperTest, Monotonicity_##name) { \
+ TestBinaryMonotonicity(javascript_.name(CompareOperationHint::kAny)); \
}
TEST_BINARY_MONOTONICITY(Equal)
TEST_BINARY_MONOTONICITY(NotEqual)
@@ -350,9 +356,9 @@ TEST_BINARY_MONOTONICITY(LessThanOrEqual)
TEST_BINARY_MONOTONICITY(GreaterThanOrEqual)
#undef TEST_BINARY_MONOTONICITY
-#define TEST_BINARY_MONOTONICITY(name) \
- TEST_F(TyperTest, Monotonicity_##name) { \
- TestBinaryMonotonicity(javascript_.name(BinaryOperationHints::Any())); \
+#define TEST_BINARY_MONOTONICITY(name) \
+ TEST_F(TyperTest, Monotonicity_##name) { \
+ TestBinaryMonotonicity(javascript_.name(BinaryOperationHint::kAny)); \
}
TEST_BINARY_MONOTONICITY(BitwiseOr)
TEST_BINARY_MONOTONICITY(BitwiseXor)
diff --git a/deps/v8/test/unittests/compiler/value-numbering-reducer-unittest.cc b/deps/v8/test/unittests/compiler/value-numbering-reducer-unittest.cc
index c003033940..c04d6609a7 100644
--- a/deps/v8/test/unittests/compiler/value-numbering-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/value-numbering-reducer-unittest.cc
@@ -28,7 +28,8 @@ static const TestOperator kOp1(1, Operator::kIdempotent, 1, 1);
class ValueNumberingReducerTest : public TestWithZone {
public:
- ValueNumberingReducerTest() : graph_(zone()), reducer_(zone()) {}
+ ValueNumberingReducerTest()
+ : graph_(zone()), reducer_(zone(), graph()->zone()) {}
protected:
Reduction Reduce(Node* node) { return reducer_.Reduce(node); }
diff --git a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
index d6ed73266c..540c5e71c2 100644
--- a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
@@ -33,7 +33,6 @@ TEST_F(InstructionSelectorTest, ChangeInt32ToInt64WithParameter) {
EXPECT_EQ(kX64Movsxlq, s[0]->arch_opcode());
}
-
TEST_F(InstructionSelectorTest, ChangeUint32ToFloat64WithParameter) {
StreamBuilder m(this, MachineType::Float64(), MachineType::Uint32());
m.Return(m.ChangeUint32ToFloat64(m.Parameter(0)));
@@ -71,6 +70,41 @@ TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithParameter) {
EXPECT_EQ(kX64Movl, s[0]->arch_opcode());
}
+namespace {
+struct LoadWithToInt64Extension {
+ MachineType type;
+ ArchOpcode expected_opcode;
+};
+
+std::ostream& operator<<(std::ostream& os,
+ const LoadWithToInt64Extension& i32toi64) {
+ return os << i32toi64.type;
+}
+
+static const LoadWithToInt64Extension kLoadWithToInt64Extensions[] = {
+ {MachineType::Int8(), kX64Movsxbq},
+ {MachineType::Uint8(), kX64Movzxbq},
+ {MachineType::Int16(), kX64Movsxwq},
+ {MachineType::Uint16(), kX64Movzxwq},
+ {MachineType::Int32(), kX64Movsxlq}};
+
+} // namespace
+
+typedef InstructionSelectorTestWithParam<LoadWithToInt64Extension>
+ InstructionSelectorChangeInt32ToInt64Test;
+
+TEST_P(InstructionSelectorChangeInt32ToInt64Test, ChangeInt32ToInt64WithLoad) {
+ const LoadWithToInt64Extension extension = GetParam();
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer());
+ m.Return(m.ChangeInt32ToInt64(m.Load(extension.type, m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(extension.expected_opcode, s[0]->arch_opcode());
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorChangeInt32ToInt64Test,
+ ::testing::ValuesIn(kLoadWithToInt64Extensions));
// -----------------------------------------------------------------------------
// Loads and stores
@@ -1134,71 +1168,6 @@ TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
}
}
-
-TEST_F(InstructionSelectorTest, Float32SubWithMinusZeroAndParameter) {
- {
- StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
- Node* const p0 = m.Parameter(0);
- Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
- m.Return(n);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kSSEFloat32Neg, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kFlags_none, s[0]->flags_mode());
- }
- {
- StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
- Node* const p0 = m.Parameter(0);
- Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
- m.Return(n);
- Stream s = m.Build(AVX);
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kAVXFloat32Neg, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kFlags_none, s[0]->flags_mode());
- }
-}
-
-
-TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) {
- {
- StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
- Node* const p0 = m.Parameter(0);
- Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
- m.Return(n);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kSSEFloat64Neg, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kFlags_none, s[0]->flags_mode());
- }
- {
- StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
- Node* const p0 = m.Parameter(0);
- Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
- m.Return(n);
- Stream s = m.Build(AVX);
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kAVXFloat64Neg, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kFlags_none, s[0]->flags_mode());
- }
-}
-
-
// -----------------------------------------------------------------------------
// Miscellaneous.
@@ -1334,6 +1303,55 @@ TEST_F(InstructionSelectorTest, Word32Clz) {
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
+TEST_F(InstructionSelectorTest, LoadAndWord64ShiftRight32) {
+ {
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Uint32());
+ Node* const p0 = m.Parameter(0);
+ Node* const load = m.Load(MachineType::Uint64(), p0);
+ Node* const shift = m.Word64Shr(load, m.Int32Constant(32));
+ m.Return(shift);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movl, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(4, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(shift), s.ToVreg(s[0]->Output()));
+ }
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const load = m.Load(MachineType::Int64(), p0);
+ Node* const shift = m.Word64Sar(load, m.Int32Constant(32));
+ m.Return(shift);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movsxlq, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(4, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(shift), s.ToVreg(s[0]->Output()));
+ }
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const load = m.Load(MachineType::Int64(), p0);
+ Node* const shift = m.Word64Sar(load, m.Int32Constant(32));
+ Node* const truncate = m.TruncateInt64ToInt32(shift);
+ m.Return(truncate);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movl, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(4, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(shift), s.ToVreg(s[0]->Output()));
+ }
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/eh-frame-iterator-unittest.cc b/deps/v8/test/unittests/eh-frame-iterator-unittest.cc
new file mode 100644
index 0000000000..27485db67e
--- /dev/null
+++ b/deps/v8/test/unittests/eh-frame-iterator-unittest.cc
@@ -0,0 +1,61 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/eh-frame.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// Test enabled only on supported architectures.
+#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM) || \
+ defined(V8_TARGET_ARCH_ARM64)
+
+using namespace v8::internal;
+
+namespace {
+
+class EhFrameIteratorTest : public testing::Test {};
+
+} // namespace
+
+TEST_F(EhFrameIteratorTest, Values) {
+ // Assuming little endian.
+ static const byte kEncoded[] = {0xde, 0xc0, 0xad, 0xde, 0xef, 0xbe, 0xff};
+ EhFrameIterator iterator(&kEncoded[0], &kEncoded[0] + sizeof(kEncoded));
+ EXPECT_EQ(0xdeadc0de, iterator.GetNextUInt32());
+ EXPECT_EQ(0xbeef, iterator.GetNextUInt16());
+ EXPECT_EQ(0xff, iterator.GetNextByte());
+ EXPECT_TRUE(iterator.Done());
+}
+
+TEST_F(EhFrameIteratorTest, Skip) {
+ static const byte kEncoded[] = {0xde, 0xad, 0xc0, 0xde};
+ EhFrameIterator iterator(&kEncoded[0], &kEncoded[0] + sizeof(kEncoded));
+ iterator.Skip(2);
+ EXPECT_EQ(2, iterator.GetCurrentOffset());
+ EXPECT_EQ(0xc0, iterator.GetNextByte());
+ iterator.Skip(1);
+ EXPECT_TRUE(iterator.Done());
+}
+
+TEST_F(EhFrameIteratorTest, ULEB128Decoding) {
+ static const byte kEncoded[] = {0xe5, 0x8e, 0x26};
+ EhFrameIterator iterator(&kEncoded[0], &kEncoded[0] + sizeof(kEncoded));
+ EXPECT_EQ(624485, iterator.GetNextULeb128());
+ EXPECT_TRUE(iterator.Done());
+}
+
+TEST_F(EhFrameIteratorTest, SLEB128DecodingPositive) {
+ static const byte kEncoded[] = {0xe5, 0x8e, 0x26};
+ EhFrameIterator iterator(&kEncoded[0], &kEncoded[0] + sizeof(kEncoded));
+ EXPECT_EQ(624485, iterator.GetNextSLeb128());
+ EXPECT_TRUE(iterator.Done());
+}
+
+TEST_F(EhFrameIteratorTest, SLEB128DecodingNegative) {
+ static const byte kEncoded[] = {0x9b, 0xf1, 0x59};
+ EhFrameIterator iterator(&kEncoded[0], &kEncoded[0] + sizeof(kEncoded));
+ EXPECT_EQ(-624485, iterator.GetNextSLeb128());
+ EXPECT_TRUE(iterator.Done());
+}
+
+#endif
diff --git a/deps/v8/test/unittests/eh-frame-writer-unittest.cc b/deps/v8/test/unittests/eh-frame-writer-unittest.cc
new file mode 100644
index 0000000000..98503986aa
--- /dev/null
+++ b/deps/v8/test/unittests/eh-frame-writer-unittest.cc
@@ -0,0 +1,464 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/eh-frame.h"
+#include "test/unittests/test-utils.h"
+
+// Test enabled only on supported architectures.
+#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM) || \
+ defined(V8_TARGET_ARCH_ARM64)
+
+using namespace v8::internal;
+
+namespace {
+
+class EhFrameWriterTest : public TestWithZone {
+ protected:
+ // Being a 7bit positive integer, this also serves as its ULEB128 encoding.
+ static const int kTestRegisterCode = 0;
+
+ static EhFrameIterator MakeIterator(EhFrameWriter* writer) {
+ CodeDesc desc;
+ writer->GetEhFrame(&desc);
+ DCHECK_GT(desc.unwinding_info_size, 0);
+ return EhFrameIterator(desc.unwinding_info,
+ desc.unwinding_info + desc.unwinding_info_size);
+ }
+};
+
+const int EhFrameWriterTest::kTestRegisterCode;
+
+} // namespace
+
+TEST_F(EhFrameWriterTest, Alignment) {
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.AdvanceLocation(42 * EhFrameConstants::kCodeAlignmentFactor);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ ASSERT_EQ(0, EhFrameConstants::kEhFrameHdrSize % 4);
+ ASSERT_EQ(0, EhFrameConstants::kEhFrameTerminatorSize % 4);
+ EXPECT_EQ(0, (iterator.GetBufferSize() - EhFrameConstants::kEhFrameHdrSize -
+ EhFrameConstants::kEhFrameTerminatorSize) %
+ kPointerSize);
+}
+
+TEST_F(EhFrameWriterTest, FDEHeader) {
+ static const int kProcedureSize = 0x5678abcd;
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.Finish(kProcedureSize);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ int cie_size = iterator.GetNextUInt32();
+ iterator.Skip(cie_size);
+
+ int fde_size = iterator.GetNextUInt32();
+ EXPECT_EQ(iterator.GetBufferSize(),
+ fde_size + cie_size + EhFrameConstants::kEhFrameTerminatorSize +
+ EhFrameConstants::kEhFrameHdrSize + 2 * kInt32Size);
+
+ int backwards_offset_to_cie_offset = iterator.GetCurrentOffset();
+ int backwards_offset_to_cie = iterator.GetNextUInt32();
+ EXPECT_EQ(backwards_offset_to_cie_offset, backwards_offset_to_cie);
+
+ int procedure_address_offset = iterator.GetCurrentOffset();
+ int procedure_address = iterator.GetNextUInt32();
+ EXPECT_EQ(-(procedure_address_offset + RoundUp(kProcedureSize, 8)),
+ procedure_address);
+
+ int procedure_size = iterator.GetNextUInt32();
+ EXPECT_EQ(kProcedureSize, procedure_size);
+}
+
+TEST_F(EhFrameWriterTest, SetOffset) {
+ static const int kOffset = 0x0badc0de;
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.SetBaseAddressOffset(kOffset);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kDefCfaOffset,
+ iterator.GetNextOpcode());
+ EXPECT_EQ(kOffset, iterator.GetNextULeb128());
+}
+
+TEST_F(EhFrameWriterTest, IncreaseOffset) {
+ static const int kFirstOffset = 121;
+ static const int kSecondOffset = 16;
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.SetBaseAddressOffset(kFirstOffset);
+ writer.IncreaseBaseAddressOffset(kSecondOffset);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kDefCfaOffset,
+ iterator.GetNextOpcode());
+ EXPECT_EQ(kFirstOffset, iterator.GetNextULeb128());
+
+ EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kDefCfaOffset,
+ iterator.GetNextOpcode());
+ EXPECT_EQ(kFirstOffset + kSecondOffset, iterator.GetNextULeb128());
+}
+
+TEST_F(EhFrameWriterTest, SetRegister) {
+ Register test_register = Register::from_code(kTestRegisterCode);
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.SetBaseAddressRegister(test_register);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kDefCfaRegister,
+ iterator.GetNextOpcode());
+ EXPECT_EQ(kTestRegisterCode, iterator.GetNextULeb128());
+}
+
+TEST_F(EhFrameWriterTest, SetRegisterAndOffset) {
+ Register test_register = Register::from_code(kTestRegisterCode);
+ static const int kOffset = 0x0badc0de;
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.SetBaseAddressRegisterAndOffset(test_register, kOffset);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kDefCfa, iterator.GetNextOpcode());
+ EXPECT_EQ(kTestRegisterCode, iterator.GetNextULeb128());
+ EXPECT_EQ(kOffset, iterator.GetNextULeb128());
+}
+
+TEST_F(EhFrameWriterTest, PcOffsetEncoding6bit) {
+ static const int kOffset = 42;
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.AdvanceLocation(kOffset * EhFrameConstants::kCodeAlignmentFactor);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ((1 << 6) | kOffset, iterator.GetNextByte());
+}
+
+TEST_F(EhFrameWriterTest, PcOffsetEncoding6bitDelta) {
+ static const int kFirstOffset = 42;
+ static const int kSecondOffset = 62;
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.AdvanceLocation(kFirstOffset * EhFrameConstants::kCodeAlignmentFactor);
+ writer.AdvanceLocation(kSecondOffset *
+ EhFrameConstants::kCodeAlignmentFactor);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ((1 << 6) | kFirstOffset, iterator.GetNextByte());
+ EXPECT_EQ((1 << 6) | (kSecondOffset - kFirstOffset), iterator.GetNextByte());
+}
+
+TEST_F(EhFrameWriterTest, PcOffsetEncoding8bit) {
+ static const int kOffset = 0x42;
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.AdvanceLocation(kOffset * EhFrameConstants::kCodeAlignmentFactor);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kAdvanceLoc1,
+ iterator.GetNextOpcode());
+ EXPECT_EQ(kOffset, iterator.GetNextByte());
+}
+
+TEST_F(EhFrameWriterTest, PcOffsetEncoding8bitDelta) {
+ static const int kFirstOffset = 0x10;
+ static const int kSecondOffset = 0x70;
+ static const int kThirdOffset = 0xb5;
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.AdvanceLocation(kFirstOffset * EhFrameConstants::kCodeAlignmentFactor);
+ writer.AdvanceLocation(kSecondOffset *
+ EhFrameConstants::kCodeAlignmentFactor);
+ writer.AdvanceLocation(kThirdOffset * EhFrameConstants::kCodeAlignmentFactor);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ((1 << 6) | kFirstOffset, iterator.GetNextByte());
+
+ EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kAdvanceLoc1,
+ iterator.GetNextOpcode());
+ EXPECT_EQ(kSecondOffset - kFirstOffset, iterator.GetNextByte());
+
+ EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kAdvanceLoc1,
+ iterator.GetNextOpcode());
+ EXPECT_EQ(kThirdOffset - kSecondOffset, iterator.GetNextByte());
+}
+
+TEST_F(EhFrameWriterTest, PcOffsetEncoding16bit) {
+ static const int kOffset = kMaxUInt8 + 42;
+ ASSERT_LT(kOffset, kMaxUInt16);
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.AdvanceLocation(kOffset * EhFrameConstants::kCodeAlignmentFactor);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kAdvanceLoc2,
+ iterator.GetNextOpcode());
+ EXPECT_EQ(kOffset, iterator.GetNextUInt16());
+}
+
+TEST_F(EhFrameWriterTest, PcOffsetEncoding16bitDelta) {
+ static const int kFirstOffset = 0x41;
+ static const int kSecondOffset = kMaxUInt8 + 0x42;
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.AdvanceLocation(kFirstOffset * EhFrameConstants::kCodeAlignmentFactor);
+ writer.AdvanceLocation(kSecondOffset *
+ EhFrameConstants::kCodeAlignmentFactor);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kAdvanceLoc1,
+ iterator.GetNextOpcode());
+ EXPECT_EQ(kFirstOffset, iterator.GetNextByte());
+
+ EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kAdvanceLoc2,
+ iterator.GetNextOpcode());
+ EXPECT_EQ(kSecondOffset - kFirstOffset, iterator.GetNextUInt16());
+}
+
+TEST_F(EhFrameWriterTest, PcOffsetEncoding32bit) {
+ static const int kOffset = kMaxUInt16 + 42;
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.AdvanceLocation(kOffset * EhFrameConstants::kCodeAlignmentFactor);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kAdvanceLoc4,
+ iterator.GetNextOpcode());
+ EXPECT_EQ(kOffset, iterator.GetNextUInt32());
+}
+
+TEST_F(EhFrameWriterTest, PcOffsetEncoding32bitDelta) {
+ static const int kFirstOffset = kMaxUInt16 + 0x42;
+ static const int kSecondOffset = kMaxUInt16 + 0x67;
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.AdvanceLocation(kFirstOffset * EhFrameConstants::kCodeAlignmentFactor);
+ writer.AdvanceLocation(kSecondOffset *
+ EhFrameConstants::kCodeAlignmentFactor);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kAdvanceLoc4,
+ iterator.GetNextOpcode());
+ EXPECT_EQ(kFirstOffset, iterator.GetNextUInt32());
+
+ EXPECT_EQ((1 << 6) | (kSecondOffset - kFirstOffset), iterator.GetNextByte());
+}
+
+TEST_F(EhFrameWriterTest, SaveRegisterUnsignedOffset) {
+ Register test_register = Register::from_code(kTestRegisterCode);
+ static const int kOffset =
+ EhFrameConstants::kDataAlignmentFactor > 0 ? 12344 : -12344;
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.RecordRegisterSavedToStack(test_register, kOffset);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ((2 << 6) | kTestRegisterCode, iterator.GetNextByte());
+ EXPECT_EQ(kOffset / EhFrameConstants::kDataAlignmentFactor,
+ iterator.GetNextULeb128());
+}
+
+TEST_F(EhFrameWriterTest, SaveRegisterSignedOffset) {
+ Register test_register = Register::from_code(kTestRegisterCode);
+ static const int kOffset =
+ EhFrameConstants::kDataAlignmentFactor < 0 ? 12344 : -12344;
+
+ ASSERT_EQ(kOffset % EhFrameConstants::kDataAlignmentFactor, 0);
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.RecordRegisterSavedToStack(test_register, kOffset);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kOffsetExtendedSf,
+ iterator.GetNextOpcode());
+ EXPECT_EQ(kTestRegisterCode, iterator.GetNextULeb128());
+ EXPECT_EQ(kOffset / EhFrameConstants::kDataAlignmentFactor,
+ iterator.GetNextSLeb128());
+}
+
+TEST_F(EhFrameWriterTest, RegisterNotModified) {
+ Register test_register = Register::from_code(kTestRegisterCode);
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.RecordRegisterNotModified(test_register);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kSameValue,
+ iterator.GetNextOpcode());
+ EXPECT_EQ(kTestRegisterCode, iterator.GetNextULeb128());
+}
+
+TEST_F(EhFrameWriterTest, RegisterFollowsInitialRule) {
+ Register test_register = Register::from_code(kTestRegisterCode);
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.RecordRegisterFollowsInitialRule(test_register);
+ writer.Finish(100);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+ iterator.SkipToFdeDirectives();
+
+ EXPECT_EQ((3 << 6) | kTestRegisterCode, iterator.GetNextByte());
+}
+
+TEST_F(EhFrameWriterTest, EhFrameHdrLayout) {
+ static const int kCodeSize = 10;
+ static const int kPaddingSize = 6;
+
+ EhFrameWriter writer(zone());
+ writer.Initialize();
+ writer.Finish(kCodeSize);
+
+ EhFrameIterator iterator = MakeIterator(&writer);
+
+ // Skip the .eh_frame.
+
+ int encoded_cie_size = iterator.GetNextUInt32();
+ iterator.Skip(encoded_cie_size);
+ int cie_size = encoded_cie_size + kInt32Size;
+
+ int encoded_fde_size = iterator.GetNextUInt32();
+ iterator.Skip(encoded_fde_size);
+ int fde_size = encoded_fde_size + kInt32Size;
+
+ iterator.Skip(EhFrameConstants::kEhFrameTerminatorSize);
+
+ int eh_frame_size =
+ cie_size + fde_size + EhFrameConstants::kEhFrameTerminatorSize;
+
+ //
+ // Plugging some numbers in the DSO layout shown in eh-frame.cc:
+ //
+ // | ... |
+ // +---------------+ <-- (E) ---------
+ // | | ^
+ // | Instructions | 10 bytes | .text
+ // | | v
+ // +---------------+ <----------------
+ // |///////////////|
+ // |////Padding////| 6 bytes
+ // |///////////////|
+ // +---------------+ <---(D)----------
+ // | | ^
+ // | CIE | cie_size bytes* |
+ // | | |
+ // +---------------+ <-- (C) |
+ // | | | .eh_frame
+ // | FDE | fde_size bytes |
+ // | | |
+ // +---------------+ |
+ // | terminator | 4 bytes v
+ // +---------------+ <-- (B) ---------
+ // | version | ^
+ // +---------------+ 4 bytes |
+ // | encoding | |
+ // | specifiers | |
+ // +---------------+ <---(A) | .eh_frame_hdr
+ // | offset to | |
+ // | .eh_frame | |
+ // +---------------+ |
+ // | ... | ...
+ //
+ // (*) the size of the CIE is platform dependent.
+ //
+
+ int eh_frame_hdr_version = iterator.GetNextByte();
+ EXPECT_EQ(EhFrameConstants::kEhFrameHdrVersion, eh_frame_hdr_version);
+
+ // .eh_frame pointer encoding specifier.
+ EXPECT_EQ(EhFrameConstants::kSData4 | EhFrameConstants::kPcRel,
+ iterator.GetNextByte());
+
+ // Lookup table size encoding specifier.
+ EXPECT_EQ(EhFrameConstants::kUData4, iterator.GetNextByte());
+
+ // Lookup table pointers encoding specifier.
+ EXPECT_EQ(EhFrameConstants::kSData4 | EhFrameConstants::kDataRel,
+ iterator.GetNextByte());
+
+ // A -> D
+ int offset_to_eh_frame = iterator.GetNextUInt32();
+ EXPECT_EQ(-(EhFrameConstants::kFdeVersionSize +
+ EhFrameConstants::kFdeEncodingSpecifiersSize + eh_frame_size),
+ offset_to_eh_frame);
+
+ int lut_entries = iterator.GetNextUInt32();
+ EXPECT_EQ(1, lut_entries);
+
+ // B -> E
+ int offset_to_procedure = iterator.GetNextUInt32();
+ EXPECT_EQ(-(eh_frame_size + kPaddingSize + kCodeSize), offset_to_procedure);
+
+ // B -> C
+ int offset_to_fde = iterator.GetNextUInt32();
+ EXPECT_EQ(-(fde_size + EhFrameConstants::kEhFrameTerminatorSize),
+ offset_to_fde);
+}
+
+#endif
diff --git a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
index 2bf4d037d3..84e4d973e2 100644
--- a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
+++ b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
@@ -5,12 +5,17 @@
#include <cmath>
#include <limits>
+#include "src/globals.h"
#include "src/heap/gc-tracer.h"
+#include "src/isolate.h"
+#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace internal {
+typedef TestWithContext GCTracerTest;
+
TEST(GCTracer, AverageSpeed) {
RingBuffer<BytesAndDuration> buffer;
EXPECT_EQ(100 / 2,
@@ -45,5 +50,190 @@ TEST(GCTracer, AverageSpeed) {
GCTracer::AverageSpeed(buffer, MakeBytesAndDuration(0, 0), buffer.kSize));
}
+namespace {
+
+void SampleAndAddAllocaton(v8::internal::GCTracer* tracer, double time_ms,
+ size_t new_space_counter_bytes,
+ size_t old_generation_counter_bytes) {
+ tracer->SampleAllocation(time_ms, new_space_counter_bytes,
+ old_generation_counter_bytes);
+ tracer->AddAllocation(time_ms);
+}
+
+} // namespace
+
+TEST_F(GCTracerTest, AllocationThroughput) {
+ GCTracer* tracer = i_isolate()->heap()->tracer();
+ tracer->ResetForTesting();
+
+ int time1 = 100;
+ size_t counter1 = 1000;
+ // First sample creates baseline but is not part of the recorded samples.
+ tracer->SampleAllocation(time1, counter1, counter1);
+ SampleAndAddAllocaton(tracer, time1, counter1, counter1);
+ int time2 = 200;
+ size_t counter2 = 2000;
+ SampleAndAddAllocaton(tracer, time2, counter2, counter2);
+ // Will only consider the current sample.
+ size_t throughput = static_cast<size_t>(
+ tracer->AllocationThroughputInBytesPerMillisecond(100));
+ EXPECT_EQ(2 * (counter2 - counter1) / (time2 - time1), throughput);
+ int time3 = 1000;
+ size_t counter3 = 30000;
+ SampleAndAddAllocaton(tracer, time3, counter3, counter3);
+ // Considers last 2 samples.
+ throughput = tracer->AllocationThroughputInBytesPerMillisecond(801);
+ EXPECT_EQ(2 * (counter3 - counter1) / (time3 - time1), throughput);
+}
+
+TEST_F(GCTracerTest, NewSpaceAllocationThroughput) {
+ GCTracer* tracer = i_isolate()->heap()->tracer();
+ tracer->ResetForTesting();
+
+ int time1 = 100;
+ size_t counter1 = 1000;
+ SampleAndAddAllocaton(tracer, time1, counter1, 0);
+ int time2 = 200;
+ size_t counter2 = 2000;
+ SampleAndAddAllocaton(tracer, time2, counter2, 0);
+ size_t throughput =
+ tracer->NewSpaceAllocationThroughputInBytesPerMillisecond();
+ EXPECT_EQ((counter2 - counter1) / (time2 - time1), throughput);
+ int time3 = 1000;
+ size_t counter3 = 30000;
+ SampleAndAddAllocaton(tracer, time3, counter3, 0);
+ throughput = tracer->NewSpaceAllocationThroughputInBytesPerMillisecond();
+ EXPECT_EQ((counter3 - counter1) / (time3 - time1), throughput);
+}
+
+TEST_F(GCTracerTest, NewSpaceAllocationThroughputWithProvidedTime) {
+ GCTracer* tracer = i_isolate()->heap()->tracer();
+ tracer->ResetForTesting();
+
+ int time1 = 100;
+ size_t counter1 = 1000;
+ // First sample creates baseline but is not part of the recorded samples.
+ SampleAndAddAllocaton(tracer, time1, counter1, 0);
+ int time2 = 200;
+ size_t counter2 = 2000;
+ SampleAndAddAllocaton(tracer, time2, counter2, 0);
+ // Will only consider the current sample.
+ size_t throughput =
+ tracer->NewSpaceAllocationThroughputInBytesPerMillisecond(100);
+ EXPECT_EQ((counter2 - counter1) / (time2 - time1), throughput);
+ int time3 = 1000;
+ size_t counter3 = 30000;
+ SampleAndAddAllocaton(tracer, time3, counter3, 0);
+ // Considers last 2 samples.
+ throughput = tracer->NewSpaceAllocationThroughputInBytesPerMillisecond(801);
+ EXPECT_EQ((counter3 - counter1) / (time3 - time1), throughput);
+}
+
+TEST_F(GCTracerTest, OldGenerationAllocationThroughputWithProvidedTime) {
+ GCTracer* tracer = i_isolate()->heap()->tracer();
+ tracer->ResetForTesting();
+
+ int time1 = 100;
+ size_t counter1 = 1000;
+ // First sample creates baseline but is not part of the recorded samples.
+ SampleAndAddAllocaton(tracer, time1, 0, counter1);
+ int time2 = 200;
+ size_t counter2 = 2000;
+ SampleAndAddAllocaton(tracer, time2, 0, counter2);
+ // Will only consider the current sample.
+ size_t throughput = static_cast<size_t>(
+ tracer->OldGenerationAllocationThroughputInBytesPerMillisecond(100));
+ EXPECT_EQ((counter2 - counter1) / (time2 - time1), throughput);
+ int time3 = 1000;
+ size_t counter3 = 30000;
+ SampleAndAddAllocaton(tracer, time3, 0, counter3);
+ // Considers last 2 samples.
+ throughput = static_cast<size_t>(
+ tracer->OldGenerationAllocationThroughputInBytesPerMillisecond(801));
+ EXPECT_EQ((counter3 - counter1) / (time3 - time1), throughput);
+}
+
+TEST_F(GCTracerTest, RegularScope) {
+ GCTracer* tracer = i_isolate()->heap()->tracer();
+ tracer->ResetForTesting();
+
+ EXPECT_DOUBLE_EQ(0.0, tracer->current_.scopes[GCTracer::Scope::MC_MARK]);
+ // Sample not added because it's not within a started tracer.
+ tracer->AddScopeSample(GCTracer::Scope::MC_MARK, 100);
+ tracer->Start(MARK_COMPACTOR, "gc unittest", "collector unittest");
+ tracer->AddScopeSample(GCTracer::Scope::MC_MARK, 100);
+ tracer->Stop(MARK_COMPACTOR);
+ EXPECT_DOUBLE_EQ(100.0, tracer->current_.scopes[GCTracer::Scope::MC_MARK]);
+}
+
+TEST_F(GCTracerTest, IncrementalScope) {
+ GCTracer* tracer = i_isolate()->heap()->tracer();
+ tracer->ResetForTesting();
+
+ EXPECT_DOUBLE_EQ(
+ 0.0, tracer->current_.scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]);
+ // Sample is added because its ScopeId is listed as incremental sample.
+ tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 100);
+ tracer->Start(MARK_COMPACTOR, "gc unittest", "collector unittest");
+ // Switch to incremental MC to enable writing back incremental scopes.
+ tracer->current_.type = GCTracer::Event::INCREMENTAL_MARK_COMPACTOR;
+ tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 100);
+ tracer->Stop(MARK_COMPACTOR);
+ EXPECT_DOUBLE_EQ(
+ 200.0, tracer->current_.scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]);
+}
+
+TEST_F(GCTracerTest, IncrementalMarkingDetails) {
+ GCTracer* tracer = i_isolate()->heap()->tracer();
+ tracer->ResetForTesting();
+
+ // Round 1.
+ tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 50);
+ tracer->Start(MARK_COMPACTOR, "gc unittest", "collector unittest");
+ // Switch to incremental MC to enable writing back incremental scopes.
+ tracer->current_.type = GCTracer::Event::INCREMENTAL_MARK_COMPACTOR;
+ tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 100);
+ tracer->Stop(MARK_COMPACTOR);
+ EXPECT_DOUBLE_EQ(
+ 100,
+ tracer->current_
+ .incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]
+ .longest_step);
+ EXPECT_EQ(
+ 2,
+ tracer->current_
+ .incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]
+ .steps);
+ EXPECT_DOUBLE_EQ(
+ 150,
+ tracer->current_
+ .incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]
+ .cumulative_duration);
+
+ // Round 2. Cumulative numbers should add up, others should be reset.
+ tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 13);
+ tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 15);
+ tracer->Start(MARK_COMPACTOR, "gc unittest", "collector unittest");
+ // Switch to incremental MC to enable writing back incremental scopes.
+ tracer->current_.type = GCTracer::Event::INCREMENTAL_MARK_COMPACTOR;
+ tracer->AddScopeSample(GCTracer::Scope::MC_INCREMENTAL_FINALIZE, 122);
+ tracer->Stop(MARK_COMPACTOR);
+ EXPECT_DOUBLE_EQ(
+ 122,
+ tracer->current_
+ .incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]
+ .longest_step);
+ EXPECT_EQ(
+ 3,
+ tracer->current_
+ .incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]
+ .steps);
+ EXPECT_DOUBLE_EQ(
+ 300,
+ tracer->current_
+ .incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL_FINALIZE]
+ .cumulative_duration);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/heap/marking-unittest.cc b/deps/v8/test/unittests/heap/marking-unittest.cc
new file mode 100644
index 0000000000..0015cce8d7
--- /dev/null
+++ b/deps/v8/test/unittests/heap/marking-unittest.cc
@@ -0,0 +1,160 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdlib.h>
+
+#include "src/globals.h"
+#include "src/heap/marking.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+TEST(Marking, MarkWhiteBlackWhite) {
+ Bitmap* bitmap = reinterpret_cast<Bitmap*>(
+ calloc(Bitmap::kSize / kPointerSize, kPointerSize));
+ const int kLocationsSize = 3;
+ int position[kLocationsSize] = {
+ Bitmap::kBitsPerCell - 2, Bitmap::kBitsPerCell - 1, Bitmap::kBitsPerCell};
+ for (int i = 0; i < kLocationsSize; i++) {
+ MarkBit mark_bit = bitmap->MarkBitFromIndex(position[i]);
+ CHECK(Marking::IsWhite(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ Marking::MarkBlack(mark_bit);
+ CHECK(Marking::IsBlack(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ Marking::MarkWhite(mark_bit);
+ CHECK(Marking::IsWhite(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ }
+ free(bitmap);
+}
+
+TEST(Marking, TransitionWhiteBlackWhite) {
+ Bitmap* bitmap = reinterpret_cast<Bitmap*>(
+ calloc(Bitmap::kSize / kPointerSize, kPointerSize));
+ const int kLocationsSize = 3;
+ int position[kLocationsSize] = {
+ Bitmap::kBitsPerCell - 2, Bitmap::kBitsPerCell - 1, Bitmap::kBitsPerCell};
+ for (int i = 0; i < kLocationsSize; i++) {
+ MarkBit mark_bit = bitmap->MarkBitFromIndex(position[i]);
+ CHECK(Marking::IsWhite(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ Marking::WhiteToBlack(mark_bit);
+ CHECK(Marking::IsBlack(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ Marking::BlackToWhite(mark_bit);
+ CHECK(Marking::IsWhite(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ }
+ free(bitmap);
+}
+
+TEST(Marking, TransitionAnyToGrey) {
+ Bitmap* bitmap = reinterpret_cast<Bitmap*>(
+ calloc(Bitmap::kSize / kPointerSize, kPointerSize));
+ const int kLocationsSize = 3;
+ int position[kLocationsSize] = {
+ Bitmap::kBitsPerCell - 2, Bitmap::kBitsPerCell - 1, Bitmap::kBitsPerCell};
+ for (int i = 0; i < kLocationsSize; i++) {
+ MarkBit mark_bit = bitmap->MarkBitFromIndex(position[i]);
+ CHECK(Marking::IsWhite(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ Marking::AnyToGrey(mark_bit);
+ CHECK(Marking::IsGrey(mark_bit));
+ CHECK(Marking::IsBlackOrGrey(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ Marking::MarkBlack(mark_bit);
+ CHECK(Marking::IsBlack(mark_bit));
+ CHECK(Marking::IsBlackOrGrey(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ Marking::AnyToGrey(mark_bit);
+ CHECK(Marking::IsGrey(mark_bit));
+ CHECK(Marking::IsBlackOrGrey(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ Marking::MarkWhite(mark_bit);
+ CHECK(Marking::IsWhite(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ }
+ free(bitmap);
+}
+
+TEST(Marking, TransitionWhiteGreyBlackGrey) {
+ Bitmap* bitmap = reinterpret_cast<Bitmap*>(
+ calloc(Bitmap::kSize / kPointerSize, kPointerSize));
+ const int kLocationsSize = 3;
+ int position[kLocationsSize] = {
+ Bitmap::kBitsPerCell - 2, Bitmap::kBitsPerCell - 1, Bitmap::kBitsPerCell};
+ for (int i = 0; i < kLocationsSize; i++) {
+ MarkBit mark_bit = bitmap->MarkBitFromIndex(position[i]);
+ CHECK(Marking::IsWhite(mark_bit));
+ CHECK(!Marking::IsBlackOrGrey(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ Marking::WhiteToGrey(mark_bit);
+ CHECK(Marking::IsGrey(mark_bit));
+ CHECK(Marking::IsBlackOrGrey(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ Marking::GreyToBlack(mark_bit);
+ CHECK(Marking::IsBlack(mark_bit));
+ CHECK(Marking::IsBlackOrGrey(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ Marking::BlackToGrey(mark_bit);
+ CHECK(Marking::IsGrey(mark_bit));
+ CHECK(Marking::IsBlackOrGrey(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ Marking::MarkWhite(mark_bit);
+ CHECK(Marking::IsWhite(mark_bit));
+ CHECK(!Marking::IsImpossible(mark_bit));
+ }
+ free(bitmap);
+}
+
+TEST(Marking, SetAndClearRange) {
+ Bitmap* bitmap = reinterpret_cast<Bitmap*>(
+ calloc(Bitmap::kSize / kPointerSize, kPointerSize));
+ for (int i = 0; i < 3; i++) {
+ bitmap->SetRange(i, Bitmap::kBitsPerCell + i);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xffffffff << i);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], (1 << i) - 1);
+ bitmap->ClearRange(i, Bitmap::kBitsPerCell + i);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0x0);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0x0);
+ }
+ free(bitmap);
+}
+
+TEST(Marking, ClearMultipleRanges) {
+ Bitmap* bitmap = reinterpret_cast<Bitmap*>(
+ calloc(Bitmap::kSize / kPointerSize, kPointerSize));
+ CHECK(bitmap->AllBitsClearInRange(0, Bitmap::kBitsPerCell * 3));
+ bitmap->SetRange(0, Bitmap::kBitsPerCell * 3);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xffffffff);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0xffffffff);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[2], 0xffffffff);
+ CHECK(bitmap->AllBitsSetInRange(0, Bitmap::kBitsPerCell * 3));
+ bitmap->ClearRange(Bitmap::kBitsPerCell / 2, Bitmap::kBitsPerCell);
+ bitmap->ClearRange(Bitmap::kBitsPerCell,
+ Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2);
+ bitmap->ClearRange(Bitmap::kBitsPerCell * 2 + 8,
+ Bitmap::kBitsPerCell * 2 + 16);
+ bitmap->ClearRange(Bitmap::kBitsPerCell * 2 + 24, Bitmap::kBitsPerCell * 3);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xffff);
+ CHECK(bitmap->AllBitsSetInRange(0, Bitmap::kBitsPerCell / 2));
+ CHECK(bitmap->AllBitsClearInRange(Bitmap::kBitsPerCell / 2,
+ Bitmap::kBitsPerCell));
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0xffff0000);
+ CHECK(
+ bitmap->AllBitsSetInRange(Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2,
+ 2 * Bitmap::kBitsPerCell));
+ CHECK(bitmap->AllBitsClearInRange(
+ Bitmap::kBitsPerCell, Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2));
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[2], 0xff00ff);
+ CHECK(bitmap->AllBitsSetInRange(2 * Bitmap::kBitsPerCell,
+ 2 * Bitmap::kBitsPerCell + 8));
+ CHECK(bitmap->AllBitsClearInRange(2 * Bitmap::kBitsPerCell + 24,
+ Bitmap::kBitsPerCell * 3));
+ free(bitmap);
+}
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/heap/slot-set-unittest.cc b/deps/v8/test/unittests/heap/slot-set-unittest.cc
index 26a26f0258..cfb1f1f9d2 100644
--- a/deps/v8/test/unittests/heap/slot-set-unittest.cc
+++ b/deps/v8/test/unittests/heap/slot-set-unittest.cc
@@ -142,23 +142,29 @@ TEST(SlotSet, RemoveRange) {
TEST(TypedSlotSet, Iterate) {
TypedSlotSet set(0);
const int kDelta = 10000001;
+ const int kHostDelta = 50001;
int added = 0;
- for (uint32_t i = 0; i < TypedSlotSet::kMaxOffset; i += kDelta) {
+ uint32_t j = 0;
+ for (uint32_t i = 0; i < TypedSlotSet::kMaxOffset;
+ i += kDelta, j += kHostDelta) {
SlotType type = static_cast<SlotType>(i % NUMBER_OF_SLOT_TYPES);
- set.Insert(type, i);
+ set.Insert(type, j, i);
++added;
}
int iterated = 0;
- set.Iterate([&iterated, kDelta](SlotType type, Address addr) {
+ set.Iterate([&iterated, kDelta, kHostDelta](SlotType type, Address host_addr,
+ Address addr) {
uint32_t i = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr));
+ uint32_t j = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(host_addr));
EXPECT_EQ(i % NUMBER_OF_SLOT_TYPES, static_cast<uint32_t>(type));
EXPECT_EQ(0, i % kDelta);
+ EXPECT_EQ(0, j % kHostDelta);
++iterated;
return i % 2 == 0 ? KEEP_SLOT : REMOVE_SLOT;
});
EXPECT_EQ(added, iterated);
iterated = 0;
- set.Iterate([&iterated](SlotType type, Address addr) {
+ set.Iterate([&iterated](SlotType type, Address host_addr, Address addr) {
uint32_t i = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr));
EXPECT_EQ(0, i % 2);
++iterated;
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index 255d836af5..fffc97f54d 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -6,6 +6,7 @@
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-register-allocator.h"
#include "test/unittests/test-utils.h"
@@ -21,12 +22,18 @@ class BytecodeArrayBuilderTest : public TestWithIsolateAndZone {
TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
+ CanonicalHandleScope canonical(isolate());
BytecodeArrayBuilder builder(isolate(), zone(), 0, 1, 131);
+ Factory* factory = isolate()->factory();
CHECK_EQ(builder.locals_count(), 131);
CHECK_EQ(builder.context_count(), 1);
CHECK_EQ(builder.fixed_register_count(), 132);
+ Register reg(0);
+ Register other(reg.index() + 1);
+ Register wide(128);
+
// Emit argument creation operations.
builder.CreateArguments(CreateArgumentsType::kMappedArguments)
.CreateArguments(CreateArgumentsType::kUnmappedArguments)
@@ -34,31 +41,41 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
// Emit constant loads.
builder.LoadLiteral(Smi::FromInt(0))
+ .StoreAccumulatorInRegister(reg)
.LoadLiteral(Smi::FromInt(8))
+ .CompareOperation(Token::Value::NE, reg) // Prevent peephole optimization
+ // LdaSmi, Star -> LdrSmi.
+ .StoreAccumulatorInRegister(reg)
.LoadLiteral(Smi::FromInt(10000000))
+ .StoreAccumulatorInRegister(reg)
+ .LoadLiteral(factory->NewStringFromStaticChars("A constant"))
+ .StoreAccumulatorInRegister(reg)
.LoadUndefined()
+ .Debugger() // Prevent peephole optimization LdaNull, Star -> LdrNull.
.LoadNull()
+ .StoreAccumulatorInRegister(reg)
.LoadTheHole()
+ .StoreAccumulatorInRegister(reg)
.LoadTrue()
- .LoadFalse();
-
- Register reg(0);
- Register other(reg.index() + 1);
- Register wide(128);
+ .StoreAccumulatorInRegister(reg)
+ .LoadFalse()
+ .StoreAccumulatorInRegister(wide);
- builder.LoadAccumulatorWithRegister(reg)
- .LoadNull()
- .StoreAccumulatorInRegister(reg);
+ // Emit Ldar and Star taking care to foil the register optimizer.
+ builder.StackCheck(0)
+ .LoadAccumulatorWithRegister(other)
+ .BinaryOperation(Token::ADD, reg, 1)
+ .StoreAccumulatorInRegister(reg)
+ .LoadNull();
// Emit register-register transfer.
builder.MoveRegister(reg, other);
builder.MoveRegister(reg, wide);
// Emit global load / store operations.
- Factory* factory = isolate()->factory();
Handle<String> name = factory->NewStringFromStaticChars("var_name");
- builder.LoadGlobal(name, 1, TypeofMode::NOT_INSIDE_TYPEOF)
- .LoadGlobal(name, 1, TypeofMode::INSIDE_TYPEOF)
+ builder.LoadGlobal(1, TypeofMode::NOT_INSIDE_TYPEOF)
+ .LoadGlobal(1, TypeofMode::INSIDE_TYPEOF)
.StoreGlobal(name, 1, LanguageMode::SLOPPY)
.StoreGlobal(name, 1, LanguageMode::STRICT);
@@ -83,21 +100,24 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.StoreLookupSlot(name, LanguageMode::STRICT);
// Emit closure operations.
- Handle<SharedFunctionInfo> shared_info = factory->NewSharedFunctionInfo(
- factory->NewStringFromStaticChars("function_a"), MaybeHandle<Code>(),
- false);
- builder.CreateClosure(shared_info, NOT_TENURED);
+ builder.CreateClosure(0, NOT_TENURED);
+
+ // Emit create context operation.
+ builder.CreateBlockContext(factory->NewScopeInfo(1));
+ builder.CreateCatchContext(reg, name);
+ builder.CreateFunctionContext(1);
+ builder.CreateWithContext(reg);
// Emit literal creation operations.
builder.CreateRegExpLiteral(factory->NewStringFromStaticChars("a"), 0, 0)
.CreateArrayLiteral(factory->NewFixedArray(1), 0, 0)
- .CreateObjectLiteral(factory->NewFixedArray(1), 0, 0);
+ .CreateObjectLiteral(factory->NewFixedArray(1), 0, 0, reg);
// Call operations.
- builder.Call(reg, other, 1, 0)
- .Call(reg, wide, 1, 0)
- .TailCall(reg, other, 1, 0)
- .TailCall(reg, wide, 1, 0)
+ builder.Call(reg, other, 0, 1)
+ .Call(reg, wide, 0, 1)
+ .TailCall(reg, other, 0, 1)
+ .TailCall(reg, wide, 0, 1)
.CallRuntime(Runtime::kIsArray, reg, 1)
.CallRuntime(Runtime::kIsArray, wide, 1)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, reg, 1, other)
@@ -106,27 +126,45 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.CallJSRuntime(Context::SPREAD_ITERABLE_INDEX, wide, 1);
// Emit binary operator invocations.
- builder.BinaryOperation(Token::Value::ADD, reg)
- .BinaryOperation(Token::Value::SUB, reg)
- .BinaryOperation(Token::Value::MUL, reg)
- .BinaryOperation(Token::Value::DIV, reg)
- .BinaryOperation(Token::Value::MOD, reg);
+ builder.BinaryOperation(Token::Value::ADD, reg, 1)
+ .BinaryOperation(Token::Value::SUB, reg, 2)
+ .BinaryOperation(Token::Value::MUL, reg, 3)
+ .BinaryOperation(Token::Value::DIV, reg, 4)
+ .BinaryOperation(Token::Value::MOD, reg, 5);
// Emit bitwise operator invocations
- builder.BinaryOperation(Token::Value::BIT_OR, reg)
- .BinaryOperation(Token::Value::BIT_XOR, reg)
- .BinaryOperation(Token::Value::BIT_AND, reg);
+ builder.BinaryOperation(Token::Value::BIT_OR, reg, 6)
+ .BinaryOperation(Token::Value::BIT_XOR, reg, 7)
+ .BinaryOperation(Token::Value::BIT_AND, reg, 8);
// Emit shift operator invocations
- builder.BinaryOperation(Token::Value::SHL, reg)
- .BinaryOperation(Token::Value::SAR, reg)
- .BinaryOperation(Token::Value::SHR, reg);
+ builder.BinaryOperation(Token::Value::SHL, reg, 9)
+ .BinaryOperation(Token::Value::SAR, reg, 10)
+ .BinaryOperation(Token::Value::SHR, reg, 11);
+
+ // Emit peephole optimizations of LdaSmi followed by binary operation.
+ builder.LoadLiteral(Smi::FromInt(1))
+ .BinaryOperation(Token::Value::ADD, reg, 1)
+ .LoadLiteral(Smi::FromInt(2))
+ .BinaryOperation(Token::Value::SUB, reg, 2)
+ .LoadLiteral(Smi::FromInt(3))
+ .BinaryOperation(Token::Value::BIT_AND, reg, 3)
+ .LoadLiteral(Smi::FromInt(4))
+ .BinaryOperation(Token::Value::BIT_OR, reg, 4)
+ .LoadLiteral(Smi::FromInt(5))
+ .BinaryOperation(Token::Value::SHL, reg, 5)
+ .LoadLiteral(Smi::FromInt(6))
+ .BinaryOperation(Token::Value::SAR, reg, 6);
// Emit count operatior invocations
- builder.CountOperation(Token::Value::ADD).CountOperation(Token::Value::SUB);
+ builder.CountOperation(Token::Value::ADD, 1)
+ .CountOperation(Token::Value::SUB, 1);
// Emit unary operator invocations.
- builder.LogicalNot().TypeOf();
+ builder
+ .LogicalNot() // ToBooleanLogicalNot
+ .LogicalNot() // non-ToBoolean LogicalNot
+ .TypeOf();
// Emit delete
builder.Delete(reg, LanguageMode::SLOPPY).Delete(reg, LanguageMode::STRICT);
@@ -147,33 +185,41 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.CompareOperation(Token::Value::IN, reg);
// Emit cast operator invocations.
- builder.CastAccumulatorToNumber()
- .CastAccumulatorToJSObject()
- .CastAccumulatorToName();
+ builder.CastAccumulatorToNumber(reg)
+ .CastAccumulatorToJSObject(reg)
+ .CastAccumulatorToName(reg);
// Emit control flow. Return must be the last instruction.
BytecodeLabel start;
builder.Bind(&start);
- // Short jumps with Imm8 operands
- builder.Jump(&start)
- .JumpIfNull(&start)
- .JumpIfUndefined(&start)
- .JumpIfNotHole(&start);
+ {
+ // Short jumps with Imm8 operands
+ BytecodeLabel after_jump;
+ builder.Jump(&start)
+ .Bind(&after_jump)
+ .JumpIfNull(&start)
+ .JumpIfUndefined(&start)
+ .JumpIfNotHole(&start);
+ }
// Longer jumps with constant operands
BytecodeLabel end[8];
- builder.Jump(&end[0])
- .LoadTrue()
- .JumpIfTrue(&end[1])
- .LoadTrue()
- .JumpIfFalse(&end[2])
- .LoadLiteral(Smi::FromInt(0))
- .JumpIfTrue(&end[3])
- .LoadLiteral(Smi::FromInt(0))
- .JumpIfFalse(&end[4])
- .JumpIfNull(&end[5])
- .JumpIfUndefined(&end[6])
- .JumpIfNotHole(&end[7]);
+ {
+ BytecodeLabel after_jump;
+ builder.Jump(&end[0])
+ .Bind(&after_jump)
+ .LoadTrue()
+ .JumpIfTrue(&end[1])
+ .LoadTrue()
+ .JumpIfFalse(&end[2])
+ .LoadLiteral(Smi::FromInt(0))
+ .JumpIfTrue(&end[3])
+ .LoadLiteral(Smi::FromInt(0))
+ .JumpIfFalse(&end[4])
+ .JumpIfNull(&end[5])
+ .JumpIfUndefined(&end[6])
+ .JumpIfNotHole(&end[7]);
+ }
// Perform an operation that returns boolean value to
// generate JumpIfTrue/False
@@ -183,45 +229,54 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.JumpIfFalse(&start);
// Perform an operation that returns a non-boolean operation to
// generate JumpIfToBooleanTrue/False.
- builder.BinaryOperation(Token::Value::ADD, reg)
+ builder.BinaryOperation(Token::Value::ADD, reg, 1)
.JumpIfTrue(&start)
- .BinaryOperation(Token::Value::ADD, reg)
+ .BinaryOperation(Token::Value::ADD, reg, 2)
.JumpIfFalse(&start);
// Insert dummy ops to force longer jumps
for (int i = 0; i < 128; i++) {
builder.LoadTrue();
}
// Longer jumps requiring Constant operand
- builder.Jump(&start).JumpIfNull(&start).JumpIfUndefined(&start).JumpIfNotHole(
- &start);
- // Perform an operation that returns boolean value to
- // generate JumpIfTrue/False
- builder.CompareOperation(Token::Value::EQ, reg)
- .JumpIfTrue(&start)
- .CompareOperation(Token::Value::EQ, reg)
- .JumpIfFalse(&start);
- // Perform an operation that returns a non-boolean operation to
- // generate JumpIfToBooleanTrue/False.
- builder.BinaryOperation(Token::Value::ADD, reg)
- .JumpIfTrue(&start)
- .BinaryOperation(Token::Value::ADD, reg)
- .JumpIfFalse(&start);
+ {
+ BytecodeLabel after_jump;
+ builder.Jump(&start)
+ .Bind(&after_jump)
+ .JumpIfNull(&start)
+ .JumpIfUndefined(&start)
+ .JumpIfNotHole(&start);
+ // Perform an operation that returns boolean value to
+ // generate JumpIfTrue/False
+ builder.CompareOperation(Token::Value::EQ, reg)
+ .JumpIfTrue(&start)
+ .CompareOperation(Token::Value::EQ, reg)
+ .JumpIfFalse(&start);
+ // Perform an operation that returns a non-boolean operation to
+ // generate JumpIfToBooleanTrue/False.
+ builder.BinaryOperation(Token::Value::ADD, reg, 1)
+ .JumpIfTrue(&start)
+ .BinaryOperation(Token::Value::ADD, reg, 2)
+ .JumpIfFalse(&start);
+ }
// Emit stack check bytecode.
- builder.StackCheck();
+ builder.StackCheck(0);
+
+ // Emit an OSR poll bytecode.
+ builder.OsrPoll(1);
// Emit throw and re-throw in it's own basic block so that the rest of the
// code isn't omitted due to being dead.
BytecodeLabel after_throw;
- builder.Jump(&after_throw).Throw().Bind(&after_throw);
+ builder.Throw().Bind(&after_throw);
BytecodeLabel after_rethrow;
- builder.Jump(&after_rethrow).ReThrow().Bind(&after_rethrow);
+ builder.ReThrow().Bind(&after_rethrow);
- builder.ForInPrepare(reg)
+ builder.ForInPrepare(reg, reg)
.ForInDone(reg, reg)
.ForInNext(reg, reg, reg, 1)
.ForInStep(reg);
- builder.ForInPrepare(wide)
+ builder.ForInPrepare(reg, wide)
.ForInDone(reg, other)
.ForInNext(wide, wide, wide, 1024)
.ForInStep(reg);
@@ -235,14 +290,14 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
Handle<String> wide_name = factory->NewStringFromStaticChars("var_wide_name");
// Emit wide global load / store operations.
- builder.LoadGlobal(name, 1024, TypeofMode::NOT_INSIDE_TYPEOF)
- .LoadGlobal(name, 1024, TypeofMode::INSIDE_TYPEOF)
- .LoadGlobal(name, 1024, TypeofMode::INSIDE_TYPEOF)
+ builder.LoadGlobal(1024, TypeofMode::NOT_INSIDE_TYPEOF)
+ .LoadGlobal(1024, TypeofMode::INSIDE_TYPEOF)
+ .LoadGlobal(1024, TypeofMode::INSIDE_TYPEOF)
.StoreGlobal(name, 1024, LanguageMode::SLOPPY)
.StoreGlobal(wide_name, 1, LanguageMode::STRICT);
// Emit extra wide global load.
- builder.LoadGlobal(name, 1024 * 1024, TypeofMode::NOT_INSIDE_TYPEOF);
+ builder.LoadGlobal(1024 * 1024, TypeofMode::NOT_INSIDE_TYPEOF);
// Emit wide load / store property operations.
builder.LoadNamedProperty(reg, wide_name, 0)
@@ -261,34 +316,56 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.StoreLookupSlot(wide_name, LanguageMode::SLOPPY)
.StoreLookupSlot(wide_name, LanguageMode::STRICT);
+ // Emit loads which will be transformed to Ldr equivalents by the peephole
+ // optimizer.
+ builder.LoadNamedProperty(reg, name, 0)
+ .StoreAccumulatorInRegister(reg)
+ .LoadKeyedProperty(reg, 0)
+ .StoreAccumulatorInRegister(reg)
+ .LoadContextSlot(reg, 1)
+ .StoreAccumulatorInRegister(reg)
+ .LoadGlobal(0, TypeofMode::NOT_INSIDE_TYPEOF)
+ .StoreAccumulatorInRegister(reg)
+ .LoadUndefined()
+ .StoreAccumulatorInRegister(reg);
+
// CreateClosureWide
- Handle<SharedFunctionInfo> shared_info2 = factory->NewSharedFunctionInfo(
- factory->NewStringFromStaticChars("function_b"), MaybeHandle<Code>(),
- false);
- builder.CreateClosure(shared_info2, NOT_TENURED);
+ builder.CreateClosure(1000, NOT_TENURED);
// Emit wide variant of literal creation operations.
builder.CreateRegExpLiteral(factory->NewStringFromStaticChars("wide_literal"),
0, 0)
.CreateArrayLiteral(factory->NewFixedArray(2), 0, 0)
- .CreateObjectLiteral(factory->NewFixedArray(2), 0, 0);
+ .CreateObjectLiteral(factory->NewFixedArray(2), 0, 0, reg);
// Longer jumps requiring ConstantWide operand
- builder.Jump(&start).JumpIfNull(&start).JumpIfUndefined(&start).JumpIfNotHole(
- &start);
+ {
+ BytecodeLabel after_jump;
+ builder.Jump(&start)
+ .Bind(&after_jump)
+ .JumpIfNull(&start)
+ .JumpIfUndefined(&start)
+ .JumpIfNotHole(&start);
+ }
+
// Perform an operation that returns boolean value to
// generate JumpIfTrue/False
builder.CompareOperation(Token::Value::EQ, reg)
.JumpIfTrue(&start)
.CompareOperation(Token::Value::EQ, reg)
.JumpIfFalse(&start);
+
// Perform an operation that returns a non-boolean operation to
// generate JumpIfToBooleanTrue/False.
- builder.BinaryOperation(Token::Value::ADD, reg)
+ builder.BinaryOperation(Token::Value::ADD, reg, 1)
.JumpIfTrue(&start)
- .BinaryOperation(Token::Value::ADD, reg)
+ .BinaryOperation(Token::Value::ADD, reg, 2)
.JumpIfFalse(&start);
+ // Emit generator operations
+ builder.SuspendGenerator(reg)
+ .ResumeGenerator(reg);
+
// Intrinsics handled by the interpreter.
builder.CallRuntime(Runtime::kInlineIsArray, reg, 1)
.CallRuntime(Runtime::kInlineIsArray, wide, 1);
@@ -300,7 +377,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
builder.Return();
// Generate BytecodeArray.
- Handle<BytecodeArray> the_array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> the_array = builder.ToBytecodeArray(isolate());
CHECK_EQ(the_array->frame_size(),
builder.fixed_and_temporary_register_count() * kPointerSize);
@@ -327,6 +404,30 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
// Insert entry for illegal bytecode as this is never willingly emitted.
scorecard[Bytecodes::ToByte(Bytecode::kIllegal)] = 1;
+ // Insert entry for nop bytecode as this often gets optimized out.
+ scorecard[Bytecodes::ToByte(Bytecode::kNop)] = 1;
+
+ if (!FLAG_ignition_peephole) {
+ // Insert entries for bytecodes only emitted by peephole optimizer.
+ scorecard[Bytecodes::ToByte(Bytecode::kLdrNamedProperty)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kLdrKeyedProperty)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kLdrGlobal)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kLdrContextSlot)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kLdrUndefined)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kLogicalNot)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kJump)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kJumpIfTrue)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kJumpIfFalse)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kJumpIfTrueConstant)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kJumpIfFalseConstant)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kAddSmi)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kSubSmi)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kBitwiseAndSmi)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kBitwiseOrSmi)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kShiftLeftSmi)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kShiftRightSmi)] = 1;
+ }
+
// Check return occurs at the end and only once in the BytecodeArray.
CHECK_EQ(final_bytecode, Bytecode::kReturn);
CHECK_EQ(scorecard[Bytecodes::ToByte(final_bytecode)], 1);
@@ -342,18 +443,30 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
TEST_F(BytecodeArrayBuilderTest, FrameSizesLookGood) {
+ CanonicalHandleScope canonical(isolate());
for (int locals = 0; locals < 5; locals++) {
for (int contexts = 0; contexts < 4; contexts++) {
for (int temps = 0; temps < 3; temps++) {
BytecodeArrayBuilder builder(isolate(), zone(), 0, contexts, locals);
BytecodeRegisterAllocator temporaries(
zone(), builder.temporary_register_allocator());
+ for (int i = 0; i < locals + contexts; i++) {
+ builder.LoadLiteral(Smi::FromInt(0));
+ builder.StoreAccumulatorInRegister(Register(i));
+ }
for (int i = 0; i < temps; i++) {
+ builder.LoadLiteral(Smi::FromInt(0));
builder.StoreAccumulatorInRegister(temporaries.NewRegister());
}
+ if (temps > 0) {
+ // Ensure temporaries are used so not optimized away by the
+ // register optimizer.
+ builder.New(Register(locals + contexts), Register(locals + contexts),
+ static_cast<size_t>(temps));
+ }
builder.Return();
- Handle<BytecodeArray> the_array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> the_array = builder.ToBytecodeArray(isolate());
int total_registers = locals + contexts + temps;
CHECK_EQ(the_array->frame_size(), total_registers * kPointerSize);
}
@@ -363,22 +476,22 @@ TEST_F(BytecodeArrayBuilderTest, FrameSizesLookGood) {
TEST_F(BytecodeArrayBuilderTest, RegisterValues) {
+ CanonicalHandleScope canonical(isolate());
int index = 1;
- int32_t operand = -index;
Register the_register(index);
CHECK_EQ(the_register.index(), index);
int actual_operand = the_register.ToOperand();
- CHECK_EQ(actual_operand, operand);
-
int actual_index = Register::FromOperand(actual_operand).index();
CHECK_EQ(actual_index, index);
}
TEST_F(BytecodeArrayBuilderTest, Parameters) {
+ CanonicalHandleScope canonical(isolate());
BytecodeArrayBuilder builder(isolate(), zone(), 10, 0, 0);
+
Register param0(builder.Parameter(0));
Register param9(builder.Parameter(9));
CHECK_EQ(param9.index() - param0.index(), 9);
@@ -386,6 +499,7 @@ TEST_F(BytecodeArrayBuilderTest, Parameters) {
TEST_F(BytecodeArrayBuilderTest, RegisterType) {
+ CanonicalHandleScope canonical(isolate());
BytecodeArrayBuilder builder(isolate(), zone(), 10, 0, 3);
BytecodeRegisterAllocator register_allocator(
zone(), builder.temporary_register_allocator());
@@ -409,7 +523,9 @@ TEST_F(BytecodeArrayBuilderTest, RegisterType) {
TEST_F(BytecodeArrayBuilderTest, Constants) {
+ CanonicalHandleScope canonical(isolate());
BytecodeArrayBuilder builder(isolate(), zone(), 0, 0, 0);
+
Factory* factory = isolate()->factory();
Handle<HeapObject> heap_num_1 = factory->NewHeapNumber(3.14);
Handle<HeapObject> heap_num_2 = factory->NewHeapNumber(5.2);
@@ -423,28 +539,37 @@ TEST_F(BytecodeArrayBuilderTest, Constants) {
.LoadLiteral(heap_num_2_copy)
.Return();
- Handle<BytecodeArray> array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> array = builder.ToBytecodeArray(isolate());
// Should only have one entry for each identical constant.
CHECK_EQ(array->constant_pool()->length(), 3);
}
+static Bytecode PeepholeToBoolean(Bytecode jump_bytecode) {
+ return FLAG_ignition_peephole
+ ? Bytecodes::GetJumpWithoutToBoolean(jump_bytecode)
+ : jump_bytecode;
+}
TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
+ CanonicalHandleScope canonical(isolate());
static const int kFarJumpDistance = 256;
BytecodeArrayBuilder builder(isolate(), zone(), 0, 0, 1);
+
Register reg(0);
BytecodeLabel far0, far1, far2, far3, far4;
BytecodeLabel near0, near1, near2, near3, near4;
+ BytecodeLabel after_jump0, after_jump1;
builder.Jump(&near0)
+ .Bind(&after_jump0)
.CompareOperation(Token::Value::EQ, reg)
.JumpIfTrue(&near1)
.CompareOperation(Token::Value::EQ, reg)
.JumpIfFalse(&near2)
- .BinaryOperation(Token::Value::ADD, reg)
+ .BinaryOperation(Token::Value::ADD, reg, 1)
.JumpIfTrue(&near3)
- .BinaryOperation(Token::Value::ADD, reg)
+ .BinaryOperation(Token::Value::ADD, reg, 2)
.JumpIfFalse(&near4)
.Bind(&near0)
.Bind(&near1)
@@ -452,47 +577,50 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
.Bind(&near3)
.Bind(&near4)
.Jump(&far0)
+ .Bind(&after_jump1)
.CompareOperation(Token::Value::EQ, reg)
.JumpIfTrue(&far1)
.CompareOperation(Token::Value::EQ, reg)
.JumpIfFalse(&far2)
- .BinaryOperation(Token::Value::ADD, reg)
+ .BinaryOperation(Token::Value::ADD, reg, 3)
.JumpIfTrue(&far3)
- .BinaryOperation(Token::Value::ADD, reg)
+ .BinaryOperation(Token::Value::ADD, reg, 4)
.JumpIfFalse(&far4);
- for (int i = 0; i < kFarJumpDistance - 18; i++) {
- builder.LoadUndefined();
+ for (int i = 0; i < kFarJumpDistance - 20; i++) {
+ builder.Debugger();
}
builder.Bind(&far0).Bind(&far1).Bind(&far2).Bind(&far3).Bind(&far4);
builder.Return();
- Handle<BytecodeArray> array = builder.ToBytecodeArray();
- DCHECK_EQ(array->length(), 36 + kFarJumpDistance - 18 + 1);
+ Handle<BytecodeArray> array = builder.ToBytecodeArray(isolate());
+ DCHECK_EQ(array->length(), 40 + kFarJumpDistance - 20 + 1);
BytecodeArrayIterator iterator(array);
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
- CHECK_EQ(iterator.GetImmediateOperand(0), 18);
+ CHECK_EQ(iterator.GetImmediateOperand(0), 20);
iterator.Advance();
// Ignore compare operation.
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfTrue);
- CHECK_EQ(iterator.GetImmediateOperand(0), 14);
+ CHECK_EQ(iterator.current_bytecode(),
+ PeepholeToBoolean(Bytecode::kJumpIfToBooleanTrue));
+ CHECK_EQ(iterator.GetImmediateOperand(0), 16);
iterator.Advance();
// Ignore compare operation.
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfFalse);
- CHECK_EQ(iterator.GetImmediateOperand(0), 10);
+ CHECK_EQ(iterator.current_bytecode(),
+ PeepholeToBoolean(Bytecode::kJumpIfToBooleanFalse));
+ CHECK_EQ(iterator.GetImmediateOperand(0), 12);
iterator.Advance();
// Ignore add operation.
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfToBooleanTrue);
- CHECK_EQ(iterator.GetImmediateOperand(0), 6);
+ CHECK_EQ(iterator.GetImmediateOperand(0), 7);
iterator.Advance();
// Ignore add operation.
@@ -502,7 +630,6 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
CHECK_EQ(iterator.GetImmediateOperand(0), 2);
iterator.Advance();
-
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpConstant);
CHECK_EQ(*iterator.GetConstantForIndexOperand(0),
Smi::FromInt(kFarJumpDistance));
@@ -511,7 +638,8 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
// Ignore compare operation.
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfTrueConstant);
+ CHECK_EQ(iterator.current_bytecode(),
+ PeepholeToBoolean(Bytecode::kJumpIfToBooleanTrueConstant));
CHECK_EQ(*iterator.GetConstantForIndexOperand(0),
Smi::FromInt(kFarJumpDistance - 4));
iterator.Advance();
@@ -519,7 +647,8 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
// Ignore compare operation.
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfFalseConstant);
+ CHECK_EQ(iterator.current_bytecode(),
+ PeepholeToBoolean(Bytecode::kJumpIfToBooleanFalseConstant));
CHECK_EQ(*iterator.GetConstantForIndexOperand(0),
Smi::FromInt(kFarJumpDistance - 8));
iterator.Advance();
@@ -529,7 +658,7 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfToBooleanTrueConstant);
CHECK_EQ(*iterator.GetConstantForIndexOperand(0),
- Smi::FromInt(kFarJumpDistance - 12));
+ Smi::FromInt(kFarJumpDistance - 13));
iterator.Advance();
// Ignore add operation.
@@ -538,13 +667,15 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
CHECK_EQ(iterator.current_bytecode(),
Bytecode::kJumpIfToBooleanFalseConstant);
CHECK_EQ(*iterator.GetConstantForIndexOperand(0),
- Smi::FromInt(kFarJumpDistance - 16));
+ Smi::FromInt(kFarJumpDistance - 18));
iterator.Advance();
}
TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
+ CanonicalHandleScope canonical(isolate());
BytecodeArrayBuilder builder(isolate(), zone(), 0, 0, 1);
+
Register reg(0);
BytecodeLabel label0, label1, label2, label3, label4;
@@ -557,41 +688,46 @@ TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
.CompareOperation(Token::Value::EQ, reg)
.JumpIfFalse(&label2)
.Bind(&label3)
- .BinaryOperation(Token::Value::ADD, reg)
+ .BinaryOperation(Token::Value::ADD, reg, 1)
.JumpIfTrue(&label3)
.Bind(&label4)
- .BinaryOperation(Token::Value::ADD, reg)
+ .BinaryOperation(Token::Value::ADD, reg, 2)
.JumpIfFalse(&label4);
- for (int i = 0; i < 63; i++) {
- builder.Jump(&label4);
+ for (int i = 0; i < 62; i++) {
+ BytecodeLabel after_jump;
+ builder.Jump(&label4).Bind(&after_jump);
}
// Add padding to force wide backwards jumps.
for (int i = 0; i < 256; i++) {
- builder.LoadTrue();
+ builder.Debugger();
}
- builder.BinaryOperation(Token::Value::ADD, reg).JumpIfFalse(&label4);
- builder.BinaryOperation(Token::Value::ADD, reg).JumpIfTrue(&label3);
+ builder.BinaryOperation(Token::Value::ADD, reg, 1).JumpIfFalse(&label4);
+ builder.BinaryOperation(Token::Value::ADD, reg, 2).JumpIfTrue(&label3);
builder.CompareOperation(Token::Value::EQ, reg).JumpIfFalse(&label2);
builder.CompareOperation(Token::Value::EQ, reg).JumpIfTrue(&label1);
builder.Jump(&label0);
+ BytecodeLabel end;
+ builder.Bind(&end);
builder.Return();
- Handle<BytecodeArray> array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> array = builder.ToBytecodeArray(isolate());
BytecodeArrayIterator iterator(array);
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
CHECK_EQ(iterator.GetImmediateOperand(0), 0);
iterator.Advance();
// Ignore compare operation.
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfTrue);
+ CHECK_EQ(iterator.current_bytecode(),
+ PeepholeToBoolean(Bytecode::kJumpIfToBooleanTrue));
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK_EQ(iterator.GetImmediateOperand(0), -2);
iterator.Advance();
// Ignore compare operation.
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfFalse);
+ CHECK_EQ(iterator.current_bytecode(),
+ PeepholeToBoolean(Bytecode::kJumpIfToBooleanFalse));
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK_EQ(iterator.GetImmediateOperand(0), -2);
iterator.Advance();
@@ -599,23 +735,24 @@ TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfToBooleanTrue);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK_EQ(iterator.GetImmediateOperand(0), -2);
+ CHECK_EQ(iterator.GetImmediateOperand(0), -3);
iterator.Advance();
// Ignore binary operation.
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfToBooleanFalse);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK_EQ(iterator.GetImmediateOperand(0), -2);
+ CHECK_EQ(iterator.GetImmediateOperand(0), -3);
iterator.Advance();
- for (int i = 0; i < 63; i++) {
+ for (int i = 0; i < 62; i++) {
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK_EQ(iterator.GetImmediateOperand(0), -i * 2 - 4);
+ // offset of 5 (3 for binary operation and 2 for jump)
+ CHECK_EQ(iterator.GetImmediateOperand(0), -i * 2 - 5);
iterator.Advance();
}
// Check padding to force wide backwards jumps.
for (int i = 0; i < 256; i++) {
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaTrue);
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kDebugger);
iterator.Advance();
}
// Ignore binary operation.
@@ -628,23 +765,25 @@ TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfToBooleanTrue);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kDouble);
- CHECK_EQ(iterator.GetImmediateOperand(0), -399);
+ CHECK_EQ(iterator.GetImmediateOperand(0), -401);
iterator.Advance();
// Ignore compare operation.
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfFalse);
+ CHECK_EQ(iterator.current_bytecode(),
+ PeepholeToBoolean(Bytecode::kJumpIfToBooleanFalse));
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kDouble);
- CHECK_EQ(iterator.GetImmediateOperand(0), -409);
+ CHECK_EQ(iterator.GetImmediateOperand(0), -411);
iterator.Advance();
// Ignore compare operation.
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfTrue);
+ CHECK_EQ(iterator.current_bytecode(),
+ PeepholeToBoolean(Bytecode::kJumpIfToBooleanTrue));
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kDouble);
- CHECK_EQ(iterator.GetImmediateOperand(0), -419);
+ CHECK_EQ(iterator.GetImmediateOperand(0), -421);
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kDouble);
- CHECK_EQ(iterator.GetImmediateOperand(0), -425);
+ CHECK_EQ(iterator.GetImmediateOperand(0), -427);
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kReturn);
iterator.Advance();
@@ -653,15 +792,22 @@ TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
TEST_F(BytecodeArrayBuilderTest, LabelReuse) {
+ CanonicalHandleScope canonical(isolate());
BytecodeArrayBuilder builder(isolate(), zone(), 0, 0, 0);
// Labels can only have 1 forward reference, but
// can be referred to mulitple times once bound.
- BytecodeLabel label;
-
- builder.Jump(&label).Bind(&label).Jump(&label).Jump(&label).Return();
+ BytecodeLabel label, after_jump0, after_jump1;
+
+ builder.Jump(&label)
+ .Bind(&label)
+ .Jump(&label)
+ .Bind(&after_jump0)
+ .Jump(&label)
+ .Bind(&after_jump1)
+ .Return();
- Handle<BytecodeArray> array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> array = builder.ToBytecodeArray(isolate());
BytecodeArrayIterator iterator(array);
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
CHECK_EQ(iterator.GetImmediateOperand(0), 2);
@@ -679,16 +825,22 @@ TEST_F(BytecodeArrayBuilderTest, LabelReuse) {
TEST_F(BytecodeArrayBuilderTest, LabelAddressReuse) {
+ CanonicalHandleScope canonical(isolate());
static const int kRepeats = 3;
BytecodeArrayBuilder builder(isolate(), zone(), 0, 0, 0);
for (int i = 0; i < kRepeats; i++) {
- BytecodeLabel label;
- builder.Jump(&label).Bind(&label).Jump(&label).Jump(&label);
+ BytecodeLabel label, after_jump0, after_jump1;
+ builder.Jump(&label)
+ .Bind(&label)
+ .Jump(&label)
+ .Bind(&after_jump0)
+ .Jump(&label)
+ .Bind(&after_jump1);
}
builder.Return();
- Handle<BytecodeArray> array = builder.ToBytecodeArray();
+ Handle<BytecodeArray> array = builder.ToBytecodeArray(isolate());
BytecodeArrayIterator iterator(array);
for (int i = 0; i < kRepeats; i++) {
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
@@ -706,85 +858,6 @@ TEST_F(BytecodeArrayBuilderTest, LabelAddressReuse) {
CHECK(iterator.done());
}
-TEST_F(BytecodeArrayBuilderTest, OperandScales) {
- CHECK_EQ(BytecodeArrayBuilder::OperandSizesToScale(OperandSize::kByte),
- OperandScale::kSingle);
- CHECK_EQ(BytecodeArrayBuilder::OperandSizesToScale(OperandSize::kShort),
- OperandScale::kDouble);
- CHECK_EQ(BytecodeArrayBuilder::OperandSizesToScale(OperandSize::kQuad),
- OperandScale::kQuadruple);
- CHECK_EQ(BytecodeArrayBuilder::OperandSizesToScale(
- OperandSize::kShort, OperandSize::kShort, OperandSize::kShort,
- OperandSize::kShort),
- OperandScale::kDouble);
- CHECK_EQ(BytecodeArrayBuilder::OperandSizesToScale(
- OperandSize::kQuad, OperandSize::kShort, OperandSize::kShort,
- OperandSize::kShort),
- OperandScale::kQuadruple);
- CHECK_EQ(BytecodeArrayBuilder::OperandSizesToScale(
- OperandSize::kShort, OperandSize::kQuad, OperandSize::kShort,
- OperandSize::kShort),
- OperandScale::kQuadruple);
- CHECK_EQ(BytecodeArrayBuilder::OperandSizesToScale(
- OperandSize::kShort, OperandSize::kShort, OperandSize::kQuad,
- OperandSize::kShort),
- OperandScale::kQuadruple);
- CHECK_EQ(BytecodeArrayBuilder::OperandSizesToScale(
- OperandSize::kShort, OperandSize::kShort, OperandSize::kShort,
- OperandSize::kQuad),
- OperandScale::kQuadruple);
-}
-
-TEST_F(BytecodeArrayBuilderTest, SizesForSignOperands) {
- CHECK(BytecodeArrayBuilder::SizeForSignedOperand(0) == OperandSize::kByte);
- CHECK(BytecodeArrayBuilder::SizeForSignedOperand(kMaxInt8) ==
- OperandSize::kByte);
- CHECK(BytecodeArrayBuilder::SizeForSignedOperand(kMinInt8) ==
- OperandSize::kByte);
- CHECK(BytecodeArrayBuilder::SizeForSignedOperand(kMaxInt8 + 1) ==
- OperandSize::kShort);
- CHECK(BytecodeArrayBuilder::SizeForSignedOperand(kMinInt8 - 1) ==
- OperandSize::kShort);
- CHECK(BytecodeArrayBuilder::SizeForSignedOperand(kMaxInt16) ==
- OperandSize::kShort);
- CHECK(BytecodeArrayBuilder::SizeForSignedOperand(kMinInt16) ==
- OperandSize::kShort);
- CHECK(BytecodeArrayBuilder::SizeForSignedOperand(kMaxInt16 + 1) ==
- OperandSize::kQuad);
- CHECK(BytecodeArrayBuilder::SizeForSignedOperand(kMinInt16 - 1) ==
- OperandSize::kQuad);
- CHECK(BytecodeArrayBuilder::SizeForSignedOperand(kMaxInt) ==
- OperandSize::kQuad);
- CHECK(BytecodeArrayBuilder::SizeForSignedOperand(kMinInt) ==
- OperandSize::kQuad);
-}
-
-TEST_F(BytecodeArrayBuilderTest, SizesForUnsignOperands) {
- // int overloads
- CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(0) == OperandSize::kByte);
- CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(kMaxUInt8) ==
- OperandSize::kByte);
- CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(kMaxUInt8 + 1) ==
- OperandSize::kShort);
- CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(kMaxUInt16) ==
- OperandSize::kShort);
- CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(kMaxUInt16 + 1) ==
- OperandSize::kQuad);
- // size_t overloads
- CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(static_cast<size_t>(0)) ==
- OperandSize::kByte);
- CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(
- static_cast<size_t>(kMaxUInt8)) == OperandSize::kByte);
- CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(
- static_cast<size_t>(kMaxUInt8 + 1)) == OperandSize::kShort);
- CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(
- static_cast<size_t>(kMaxUInt16)) == OperandSize::kShort);
- CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(
- static_cast<size_t>(kMaxUInt16 + 1)) == OperandSize::kQuad);
- CHECK(BytecodeArrayBuilder::SizeForUnsignedOperand(
- static_cast<size_t>(kMaxUInt32)) == OperandSize::kQuad);
-}
-
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
index 43c6caa8cf..b844180dc0 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
@@ -37,22 +37,32 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
int feedback_slot = 97;
builder.LoadLiteral(heap_num_0)
+ .StoreAccumulatorInRegister(reg_0)
.LoadLiteral(heap_num_1)
+ .StoreAccumulatorInRegister(reg_0)
.LoadLiteral(zero)
+ .StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_0)
+ .StackCheck(0)
+ .StoreAccumulatorInRegister(reg_0)
.LoadLiteral(smi_1)
+ .StackCheck(1)
+ .StoreAccumulatorInRegister(reg_1)
.LoadAccumulatorWithRegister(reg_0)
+ .BinaryOperation(Token::Value::ADD, reg_0, 2)
+ .StoreAccumulatorInRegister(reg_1)
.LoadNamedProperty(reg_1, name, feedback_slot)
+ .BinaryOperation(Token::Value::ADD, reg_0, 3)
.StoreAccumulatorInRegister(param)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, 1, reg_0)
- .ForInPrepare(reg_0)
+ .ForInPrepare(reg_0, reg_0)
.CallRuntime(Runtime::kLoadIC_Miss, reg_0, 1)
.Debugger()
- .LoadGlobal(name, 0x10000000, TypeofMode::NOT_INSIDE_TYPEOF)
+ .LoadGlobal(0x10000000, TypeofMode::NOT_INSIDE_TYPEOF)
.Return();
// Test iterator sees the expected output from the builder.
- BytecodeArrayIterator iterator(builder.ToBytecodeArray());
+ BytecodeArrayIterator iterator(builder.ToBytecodeArray(isolate()));
const int kPrefixByteSize = 1;
int offset = 0;
@@ -64,6 +74,15 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
+ CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
+ CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ iterator.Advance();
+
CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
CHECK_EQ(iterator.current_offset(), offset);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
@@ -72,6 +91,15 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
+ CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
+ CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ iterator.Advance();
+
CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaZero);
CHECK_EQ(iterator.current_offset(), offset);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
@@ -79,6 +107,15 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
offset += Bytecodes::Size(Bytecode::kLdaZero, OperandScale::kSingle);
iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
+ CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
+ CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ iterator.Advance();
+
CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi);
CHECK_EQ(iterator.current_offset(), offset);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
@@ -87,6 +124,23 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
offset += Bytecodes::Size(Bytecode::kLdaSmi, OperandScale::kSingle);
iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kStackCheck);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ CHECK_EQ(Bytecodes::NumberOfOperands(iterator.current_bytecode()), 0);
+ CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle);
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
+ CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
+ CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ iterator.Advance();
+
CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi);
CHECK_EQ(iterator.current_offset(), offset);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kQuadruple);
@@ -96,6 +150,23 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
kPrefixByteSize;
iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kStackCheck);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ CHECK_EQ(Bytecodes::NumberOfOperands(iterator.current_bytecode()), 0);
+ CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kStackCheck, OperandScale::kSingle);
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
+ CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
+ CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ iterator.Advance();
+
CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdar);
CHECK_EQ(iterator.current_offset(), offset);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
@@ -104,14 +175,41 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
offset += Bytecodes::Size(Bytecode::kLdar, OperandScale::kSingle);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kLoadIC);
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kAdd);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
+ CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
+ CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle);
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
+ CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
+ CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaNamedProperty);
CHECK_EQ(iterator.current_offset(), offset);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
CHECK_EQ(iterator.GetIndexOperand(1), name_index);
CHECK_EQ(iterator.GetIndexOperand(2), feedback_slot);
CHECK(!iterator.done());
- offset += Bytecodes::Size(Bytecode::kLoadIC, OperandScale::kSingle);
+ offset += Bytecodes::Size(Bytecode::kLdaNamedProperty, OperandScale::kSingle);
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kAdd);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
+ CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
+ CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kAdd, OperandScale::kSingle);
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kStar);
@@ -141,7 +239,9 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
CHECK_EQ(iterator.current_offset(), offset);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
- CHECK_EQ(iterator.GetRegisterOperandRange(0), 3);
+ CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
+ CHECK_EQ(iterator.GetRegisterOperand(1).index(), reg_0.index());
+ CHECK_EQ(iterator.GetRegisterOperandRange(1), 3);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kForInPrepare, OperandScale::kSingle);
iterator.Advance();
@@ -149,8 +249,7 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
CHECK_EQ(iterator.current_bytecode(), Bytecode::kCallRuntime);
CHECK_EQ(iterator.current_offset(), offset);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK_EQ(static_cast<Runtime::FunctionId>(iterator.GetRuntimeIdOperand(0)),
- Runtime::kLoadIC_Miss);
+ CHECK_EQ(iterator.GetRuntimeIdOperand(0), Runtime::kLoadIC_Miss);
CHECK_EQ(iterator.GetRegisterOperand(1).index(), reg_0.index());
CHECK_EQ(iterator.GetRegisterCountOperand(2), 1);
CHECK(!iterator.done());
@@ -167,8 +266,8 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaGlobal);
CHECK_EQ(iterator.current_offset(), offset);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kQuadruple);
- CHECK_EQ(iterator.current_bytecode_size(), 10);
- CHECK_EQ(iterator.GetIndexOperand(1), 0x10000000);
+ CHECK_EQ(iterator.current_bytecode_size(), 6);
+ CHECK_EQ(iterator.GetIndexOperand(0), 0x10000000);
offset += Bytecodes::Size(Bytecode::kLdaGlobal, OperandScale::kQuadruple) +
kPrefixByteSize;
iterator.Advance();
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
new file mode 100644
index 0000000000..9681612ac4
--- /dev/null
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
@@ -0,0 +1,254 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/api.h"
+#include "src/factory.h"
+#include "src/interpreter/bytecode-array-writer.h"
+#include "src/interpreter/bytecode-label.h"
+#include "src/interpreter/constant-array-builder.h"
+#include "src/isolate.h"
+#include "src/source-position-table.h"
+#include "src/utils.h"
+#include "test/unittests/interpreter/bytecode-utils.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodeArrayWriterUnittest : public TestWithIsolateAndZone {
+ public:
+ BytecodeArrayWriterUnittest()
+ : constant_array_builder_(zone(), isolate()->factory()->the_hole_value()),
+ bytecode_array_writer_(
+ zone(), &constant_array_builder_,
+ SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS) {}
+ ~BytecodeArrayWriterUnittest() override {}
+
+ void Write(BytecodeNode* node, const BytecodeSourceInfo& info);
+ void Write(Bytecode bytecode,
+ const BytecodeSourceInfo& info = BytecodeSourceInfo());
+ void Write(Bytecode bytecode, uint32_t operand0,
+ const BytecodeSourceInfo& info = BytecodeSourceInfo());
+ void Write(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+
+ const BytecodeSourceInfo& info = BytecodeSourceInfo());
+ void Write(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+ uint32_t operand2,
+ const BytecodeSourceInfo& info = BytecodeSourceInfo());
+ void Write(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+ uint32_t operand2, uint32_t operand3,
+ const BytecodeSourceInfo& info = BytecodeSourceInfo());
+
+ void WriteJump(Bytecode bytecode, BytecodeLabel* label,
+
+ const BytecodeSourceInfo& info = BytecodeSourceInfo());
+
+ BytecodeArrayWriter* writer() { return &bytecode_array_writer_; }
+ ZoneVector<unsigned char>* bytecodes() { return writer()->bytecodes(); }
+ SourcePositionTableBuilder* source_position_table_builder() {
+ return writer()->source_position_table_builder();
+ }
+ int max_register_count() { return writer()->max_register_count(); }
+
+ private:
+ ConstantArrayBuilder constant_array_builder_;
+ BytecodeArrayWriter bytecode_array_writer_;
+};
+
+void BytecodeArrayWriterUnittest::Write(BytecodeNode* node,
+ const BytecodeSourceInfo& info) {
+ if (info.is_valid()) {
+ node->source_info().Clone(info);
+ }
+ writer()->Write(node);
+}
+
+void BytecodeArrayWriterUnittest::Write(Bytecode bytecode,
+ const BytecodeSourceInfo& info) {
+ BytecodeNode node(bytecode);
+ Write(&node, info);
+}
+
+void BytecodeArrayWriterUnittest::Write(Bytecode bytecode, uint32_t operand0,
+ const BytecodeSourceInfo& info) {
+ BytecodeNode node(bytecode, operand0);
+ Write(&node, info);
+}
+
+void BytecodeArrayWriterUnittest::Write(Bytecode bytecode, uint32_t operand0,
+ uint32_t operand1,
+ const BytecodeSourceInfo& info) {
+ BytecodeNode node(bytecode, operand0, operand1);
+ Write(&node, info);
+}
+
+void BytecodeArrayWriterUnittest::Write(Bytecode bytecode, uint32_t operand0,
+ uint32_t operand1, uint32_t operand2,
+ const BytecodeSourceInfo& info) {
+ BytecodeNode node(bytecode, operand0, operand1, operand2);
+ Write(&node, info);
+}
+
+void BytecodeArrayWriterUnittest::Write(Bytecode bytecode, uint32_t operand0,
+ uint32_t operand1, uint32_t operand2,
+ uint32_t operand3,
+ const BytecodeSourceInfo& info) {
+ BytecodeNode node(bytecode, operand0, operand1, operand2, operand3);
+ Write(&node, info);
+}
+
+void BytecodeArrayWriterUnittest::WriteJump(Bytecode bytecode,
+ BytecodeLabel* label,
+ const BytecodeSourceInfo& info) {
+ BytecodeNode node(bytecode, 0);
+ if (info.is_valid()) {
+ node.source_info().Clone(info);
+ }
+ writer()->WriteJump(&node, label);
+}
+
+TEST_F(BytecodeArrayWriterUnittest, SimpleExample) {
+ CHECK_EQ(bytecodes()->size(), 0);
+
+ Write(Bytecode::kStackCheck, {10, false});
+ CHECK_EQ(bytecodes()->size(), 1);
+ CHECK_EQ(max_register_count(), 0);
+
+ Write(Bytecode::kLdaSmi, 127, {55, true});
+ CHECK_EQ(bytecodes()->size(), 3);
+ CHECK_EQ(max_register_count(), 0);
+
+ Write(Bytecode::kLdar, Register(200).ToOperand());
+ CHECK_EQ(bytecodes()->size(), 7);
+ CHECK_EQ(max_register_count(), 201);
+
+ Write(Bytecode::kReturn, {70, true});
+ CHECK_EQ(bytecodes()->size(), 8);
+ CHECK_EQ(max_register_count(), 201);
+
+ static const uint8_t bytes[] = {B(StackCheck), B(LdaSmi), U8(127), B(Wide),
+ B(Ldar), R16(200), B(Return)};
+ CHECK_EQ(bytecodes()->size(), arraysize(bytes));
+ for (size_t i = 0; i < arraysize(bytes); ++i) {
+ CHECK_EQ(bytecodes()->at(i), bytes[i]);
+ }
+
+ Handle<BytecodeArray> bytecode_array = writer()->ToBytecodeArray(
+ isolate(), 0, 0, factory()->empty_fixed_array());
+ CHECK_EQ(bytecodes()->size(), arraysize(bytes));
+
+ PositionTableEntry expected_positions[] = {
+ {0, 10, false}, {1, 55, true}, {7, 70, true}};
+ SourcePositionTableIterator source_iterator(
+ bytecode_array->source_position_table());
+ for (size_t i = 0; i < arraysize(expected_positions); ++i) {
+ const PositionTableEntry& expected = expected_positions[i];
+ CHECK_EQ(source_iterator.code_offset(), expected.code_offset);
+ CHECK_EQ(source_iterator.source_position(), expected.source_position);
+ CHECK_EQ(source_iterator.is_statement(), expected.is_statement);
+ source_iterator.Advance();
+ }
+ CHECK(source_iterator.done());
+}
+
+TEST_F(BytecodeArrayWriterUnittest, ComplexExample) {
+ static const uint8_t expected_bytes[] = {
+ // clang-format off
+ /* 0 30 E> */ B(StackCheck),
+ /* 1 42 S> */ B(LdaConstant), U8(0),
+ /* 3 42 E> */ B(Star), R8(1),
+ /* 5 68 S> */ B(JumpIfUndefined), U8(38),
+ /* 7 */ B(JumpIfNull), U8(36),
+ /* 9 */ B(ToObject), R8(3),
+ /* 11 */ B(ForInPrepare), R8(3), R8(4),
+ /* 14 */ B(LdaZero),
+ /* 15 */ B(Star), R8(7),
+ /* 17 63 S> */ B(ForInDone), R8(7), R8(6),
+ /* 20 */ B(JumpIfTrue), U8(23),
+ /* 22 */ B(ForInNext), R8(3), R8(7), R8(4), U8(1),
+ /* 27 */ B(JumpIfUndefined), U8(10),
+ /* 29 */ B(Star), R8(0),
+ /* 31 54 E> */ B(StackCheck),
+ /* 32 */ B(Ldar), R8(0),
+ /* 34 */ B(Star), R8(2),
+ /* 36 85 S> */ B(Return),
+ /* 37 */ B(ForInStep), R8(7),
+ /* 39 */ B(Star), R8(7),
+ /* 41 */ B(Jump), U8(-24),
+ /* 43 */ B(LdaUndefined),
+ /* 44 85 S> */ B(Return),
+ // clang-format on
+ };
+
+ static const PositionTableEntry expected_positions[] = {
+ {0, 30, false}, {1, 42, true}, {3, 42, false}, {5, 68, true},
+ {17, 63, true}, {31, 54, false}, {36, 85, true}, {44, 85, true}};
+
+ BytecodeLabel back_jump, jump_for_in, jump_end_1, jump_end_2, jump_end_3;
+
+#define R(i) static_cast<uint32_t>(Register(i).ToOperand())
+ Write(Bytecode::kStackCheck, {30, false});
+ Write(Bytecode::kLdaConstant, U8(0), {42, true});
+ CHECK_EQ(max_register_count(), 0);
+ Write(Bytecode::kStar, R(1), {42, false});
+ CHECK_EQ(max_register_count(), 2);
+ WriteJump(Bytecode::kJumpIfUndefined, &jump_end_1, {68, true});
+ WriteJump(Bytecode::kJumpIfNull, &jump_end_2);
+ Write(Bytecode::kToObject, R(3));
+ CHECK_EQ(max_register_count(), 4);
+ Write(Bytecode::kForInPrepare, R(3), R(4));
+ CHECK_EQ(max_register_count(), 7);
+ Write(Bytecode::kLdaZero);
+ CHECK_EQ(max_register_count(), 7);
+ Write(Bytecode::kStar, R(7));
+ CHECK_EQ(max_register_count(), 8);
+ writer()->BindLabel(&back_jump);
+ Write(Bytecode::kForInDone, R(7), R(6), {63, true});
+ CHECK_EQ(max_register_count(), 8);
+ WriteJump(Bytecode::kJumpIfTrue, &jump_end_3);
+ Write(Bytecode::kForInNext, R(3), R(7), R(4), U8(1));
+ WriteJump(Bytecode::kJumpIfUndefined, &jump_for_in);
+ Write(Bytecode::kStar, R(0));
+ Write(Bytecode::kStackCheck, {54, false});
+ Write(Bytecode::kLdar, R(0));
+ Write(Bytecode::kStar, R(2));
+ Write(Bytecode::kReturn, {85, true});
+ writer()->BindLabel(&jump_for_in);
+ Write(Bytecode::kForInStep, R(7));
+ Write(Bytecode::kStar, R(7));
+ WriteJump(Bytecode::kJump, &back_jump);
+ writer()->BindLabel(&jump_end_1);
+ writer()->BindLabel(&jump_end_2);
+ writer()->BindLabel(&jump_end_3);
+ Write(Bytecode::kLdaUndefined);
+ Write(Bytecode::kReturn, {85, true});
+ CHECK_EQ(max_register_count(), 8);
+#undef R
+
+ CHECK_EQ(bytecodes()->size(), arraysize(expected_bytes));
+ for (size_t i = 0; i < arraysize(expected_bytes); ++i) {
+ CHECK_EQ(static_cast<int>(bytecodes()->at(i)),
+ static_cast<int>(expected_bytes[i]));
+ }
+
+ Handle<BytecodeArray> bytecode_array = writer()->ToBytecodeArray(
+ isolate(), 0, 0, factory()->empty_fixed_array());
+ SourcePositionTableIterator source_iterator(
+ bytecode_array->source_position_table());
+ for (size_t i = 0; i < arraysize(expected_positions); ++i) {
+ const PositionTableEntry& expected = expected_positions[i];
+ CHECK_EQ(source_iterator.code_offset(), expected.code_offset);
+ CHECK_EQ(source_iterator.source_position(), expected.source_position);
+ CHECK_EQ(source_iterator.is_statement(), expected.is_statement);
+ source_iterator.Advance();
+ }
+ CHECK(source_iterator.done());
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-dead-code-optimizer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-dead-code-optimizer-unittest.cc
new file mode 100644
index 0000000000..2b2171bc78
--- /dev/null
+++ b/deps/v8/test/unittests/interpreter/bytecode-dead-code-optimizer-unittest.cc
@@ -0,0 +1,149 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/interpreter/bytecode-dead-code-optimizer.h"
+#include "src/interpreter/bytecode-label.h"
+#include "src/objects.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodeDeadCodeOptimizerTest : public BytecodePipelineStage,
+ public TestWithIsolateAndZone {
+ public:
+ BytecodeDeadCodeOptimizerTest() : dead_code_optimizer_(this) {}
+ ~BytecodeDeadCodeOptimizerTest() override {}
+
+ void Write(BytecodeNode* node) override {
+ write_count_++;
+ last_written_.Clone(node);
+ }
+
+ void WriteJump(BytecodeNode* node, BytecodeLabel* label) override {
+ write_count_++;
+ last_written_.Clone(node);
+ }
+
+ void BindLabel(BytecodeLabel* label) override {}
+ void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override {}
+ Handle<BytecodeArray> ToBytecodeArray(
+ Isolate* isolate, int fixed_register_count, int parameter_count,
+ Handle<FixedArray> handle_table) override {
+ return Handle<BytecodeArray>();
+ }
+
+ BytecodeDeadCodeOptimizer* optimizer() { return &dead_code_optimizer_; }
+
+ int write_count() const { return write_count_; }
+ const BytecodeNode& last_written() const { return last_written_; }
+
+ private:
+ BytecodeDeadCodeOptimizer dead_code_optimizer_;
+
+ int write_count_ = 0;
+ BytecodeNode last_written_;
+};
+
+TEST_F(BytecodeDeadCodeOptimizerTest, LiveCodeKept) {
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1);
+ optimizer()->Write(&add);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(add, last_written());
+
+ BytecodeLabel target;
+ BytecodeNode jump(Bytecode::kJump, 0);
+ optimizer()->WriteJump(&jump, &target);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(jump, last_written());
+}
+
+TEST_F(BytecodeDeadCodeOptimizerTest, DeadCodeAfterReturnEliminated) {
+ BytecodeNode ret(Bytecode::kReturn);
+ optimizer()->Write(&ret);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(ret, last_written());
+
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1);
+ optimizer()->Write(&add);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(ret, last_written());
+}
+
+TEST_F(BytecodeDeadCodeOptimizerTest, DeadCodeAfterThrowEliminated) {
+ BytecodeNode thrw(Bytecode::kThrow);
+ optimizer()->Write(&thrw);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(thrw, last_written());
+
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1);
+ optimizer()->Write(&add);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(thrw, last_written());
+}
+
+TEST_F(BytecodeDeadCodeOptimizerTest, DeadCodeAfterReThrowEliminated) {
+ BytecodeNode rethrow(Bytecode::kReThrow);
+ optimizer()->Write(&rethrow);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(rethrow, last_written());
+
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1);
+ optimizer()->Write(&add);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(rethrow, last_written());
+}
+
+TEST_F(BytecodeDeadCodeOptimizerTest, DeadCodeAfterJumpEliminated) {
+ BytecodeLabel target;
+ BytecodeNode jump(Bytecode::kJump, 0);
+ optimizer()->WriteJump(&jump, &target);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(jump, last_written());
+
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1);
+ optimizer()->Write(&add);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(jump, last_written());
+}
+
+TEST_F(BytecodeDeadCodeOptimizerTest, DeadCodeStillDeadAfterConditinalJump) {
+ BytecodeNode ret(Bytecode::kReturn);
+ optimizer()->Write(&ret);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(ret, last_written());
+
+ BytecodeLabel target;
+ BytecodeNode jump(Bytecode::kJumpIfTrue, 0);
+ optimizer()->WriteJump(&jump, &target);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(ret, last_written());
+
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1);
+ optimizer()->Write(&add);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(ret, last_written());
+}
+
+TEST_F(BytecodeDeadCodeOptimizerTest, CodeLiveAfterLabelBind) {
+ BytecodeNode ret(Bytecode::kReturn);
+ optimizer()->Write(&ret);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(ret, last_written());
+
+ BytecodeLabel target;
+ optimizer()->BindLabel(&target);
+
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1);
+ optimizer()->Write(&add);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(add, last_written());
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc
new file mode 100644
index 0000000000..7d61f6a1b3
--- /dev/null
+++ b/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc
@@ -0,0 +1,87 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "src/v8.h"
+
+#include "src/interpreter/bytecode-decoder.h"
+#include "test/unittests/interpreter/bytecode-utils.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+TEST(BytecodeDecoder, DecodeBytecodeAndOperands) {
+ struct BytecodesAndResult {
+ const uint8_t bytecode[32];
+ const size_t length;
+ int parameter_count;
+ const char* output;
+ };
+
+ const BytecodesAndResult cases[] = {
+ {{B(LdaSmi), U8(1)}, 2, 0, " LdaSmi [1]"},
+ {{B(Wide), B(LdaSmi), U16(1000)}, 4, 0, " LdaSmi.Wide [1000]"},
+ {{B(ExtraWide), B(LdaSmi), U32(100000)},
+ 6,
+ 0,
+ "LdaSmi.ExtraWide [100000]"},
+ {{B(LdaSmi), U8(-1)}, 2, 0, " LdaSmi [-1]"},
+ {{B(Wide), B(LdaSmi), U16(-1000)}, 4, 0, " LdaSmi.Wide [-1000]"},
+ {{B(ExtraWide), B(LdaSmi), U32(-100000)},
+ 6,
+ 0,
+ "LdaSmi.ExtraWide [-100000]"},
+ {{B(Star), R8(5)}, 2, 0, " Star r5"},
+ {{B(Wide), B(Star), R16(136)}, 4, 0, " Star.Wide r136"},
+ {{B(Wide), B(Call), R16(134), R16(135), U16(2), U16(177)},
+ 10,
+ 0,
+ "Call.Wide r134, r135, #2, [177]"},
+ {{B(Ldar),
+ static_cast<uint8_t>(Register::FromParameterIndex(2, 3).ToOperand())},
+ 2,
+ 3,
+ " Ldar a1"},
+ {{B(Wide), B(CreateObjectLiteral), U16(513), U16(1027), U8(165),
+ R16(137)},
+ 9,
+ 0,
+ "CreateObjectLiteral.Wide [513], [1027], #165, r137"},
+ {{B(ExtraWide), B(JumpIfNull), U32(123456789)},
+ 6,
+ 0,
+ "JumpIfNull.ExtraWide [123456789]"},
+ };
+
+ for (size_t i = 0; i < arraysize(cases); ++i) {
+ // Generate reference string by prepending formatted bytes.
+ std::stringstream expected_ss;
+ std::ios default_format(nullptr);
+ default_format.copyfmt(expected_ss);
+ // Match format of BytecodeDecoder::Decode() for byte representations.
+ expected_ss.fill('0');
+ expected_ss.flags(std::ios::right | std::ios::hex);
+ for (size_t b = 0; b < cases[i].length; b++) {
+ expected_ss << std::setw(2) << static_cast<uint32_t>(cases[i].bytecode[b])
+ << ' ';
+ }
+ expected_ss.copyfmt(default_format);
+ expected_ss << cases[i].output;
+
+ // Generate decoded byte output.
+ std::stringstream actual_ss;
+ BytecodeDecoder::Decode(actual_ss, cases[i].bytecode,
+ cases[i].parameter_count);
+
+ // Compare.
+ CHECK_EQ(actual_ss.str(), expected_ss.str());
+ }
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-peephole-optimizer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-peephole-optimizer-unittest.cc
new file mode 100644
index 0000000000..c23c89b433
--- /dev/null
+++ b/deps/v8/test/unittests/interpreter/bytecode-peephole-optimizer-unittest.cc
@@ -0,0 +1,531 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/factory.h"
+#include "src/interpreter/bytecode-label.h"
+#include "src/interpreter/bytecode-peephole-optimizer.h"
+#include "src/objects-inl.h"
+#include "src/objects.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodePeepholeOptimizerTest : public BytecodePipelineStage,
+ public TestWithIsolateAndZone {
+ public:
+ BytecodePeepholeOptimizerTest() : peephole_optimizer_(this) {}
+ ~BytecodePeepholeOptimizerTest() override {}
+
+ void Reset() {
+ last_written_.set_bytecode(Bytecode::kIllegal);
+ write_count_ = 0;
+ }
+
+ void Write(BytecodeNode* node) override {
+ write_count_++;
+ last_written_.Clone(node);
+ }
+
+ void WriteJump(BytecodeNode* node, BytecodeLabel* label) override {
+ write_count_++;
+ last_written_.Clone(node);
+ }
+
+ void BindLabel(BytecodeLabel* label) override {}
+ void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override {}
+ Handle<BytecodeArray> ToBytecodeArray(
+ Isolate* isolate, int fixed_register_count, int parameter_count,
+ Handle<FixedArray> handle_table) override {
+ return Handle<BytecodeArray>();
+ }
+
+ void Flush() {
+ optimizer()->ToBytecodeArray(isolate(), 0, 0,
+ factory()->empty_fixed_array());
+ }
+
+ BytecodePeepholeOptimizer* optimizer() { return &peephole_optimizer_; }
+
+ int write_count() const { return write_count_; }
+ const BytecodeNode& last_written() const { return last_written_; }
+
+ private:
+ BytecodePeepholeOptimizer peephole_optimizer_;
+
+ int write_count_ = 0;
+ BytecodeNode last_written_;
+};
+
+// Sanity tests.
+
+TEST_F(BytecodePeepholeOptimizerTest, FlushOnJump) {
+ CHECK_EQ(write_count(), 0);
+
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1);
+ optimizer()->Write(&add);
+ CHECK_EQ(write_count(), 0);
+
+ BytecodeLabel target;
+ BytecodeNode jump(Bytecode::kJump, 0);
+ optimizer()->WriteJump(&jump, &target);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(jump, last_written());
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, FlushOnBind) {
+ CHECK_EQ(write_count(), 0);
+
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1);
+ optimizer()->Write(&add);
+ CHECK_EQ(write_count(), 0);
+
+ BytecodeLabel target;
+ optimizer()->BindLabel(&target);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(add, last_written());
+}
+
+// Nop elimination tests.
+
+TEST_F(BytecodePeepholeOptimizerTest, ElideEmptyNop) {
+ BytecodeNode nop(Bytecode::kNop);
+ optimizer()->Write(&nop);
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1);
+ optimizer()->Write(&add);
+ Flush();
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(add, last_written());
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, ElideExpressionNop) {
+ BytecodeNode nop(Bytecode::kNop);
+ nop.source_info().MakeExpressionPosition(3);
+ optimizer()->Write(&nop);
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1);
+ optimizer()->Write(&add);
+ Flush();
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(add, last_written());
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, KeepStatementNop) {
+ BytecodeNode nop(Bytecode::kNop);
+ nop.source_info().MakeStatementPosition(3);
+ optimizer()->Write(&nop);
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1);
+ add.source_info().MakeExpressionPosition(3);
+ optimizer()->Write(&add);
+ Flush();
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(add, last_written());
+}
+
+// Tests covering BytecodePeepholeOptimizer::UpdateCurrentBytecode().
+
+TEST_F(BytecodePeepholeOptimizerTest, KeepJumpIfToBooleanTrue) {
+ BytecodeNode first(Bytecode::kLdaNull);
+ BytecodeNode second(Bytecode::kJumpIfToBooleanTrue, 3);
+ BytecodeLabel label;
+ optimizer()->Write(&first);
+ CHECK_EQ(write_count(), 0);
+ optimizer()->WriteJump(&second, &label);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written(), second);
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, ElideJumpIfToBooleanTrue) {
+ BytecodeNode first(Bytecode::kLdaTrue);
+ BytecodeNode second(Bytecode::kJumpIfToBooleanTrue, 3);
+ BytecodeLabel label;
+ optimizer()->Write(&first);
+ CHECK_EQ(write_count(), 0);
+ optimizer()->WriteJump(&second, &label);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written(), second);
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, KeepToBooleanLogicalNot) {
+ BytecodeNode first(Bytecode::kLdaNull);
+ BytecodeNode second(Bytecode::kToBooleanLogicalNot);
+ optimizer()->Write(&first);
+ CHECK_EQ(write_count(), 0);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written(), first);
+ Flush();
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written(), second);
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, ElideToBooleanLogicalNot) {
+ BytecodeNode first(Bytecode::kLdaTrue);
+ BytecodeNode second(Bytecode::kToBooleanLogicalNot);
+ optimizer()->Write(&first);
+ CHECK_EQ(write_count(), 0);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written(), first);
+ Flush();
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLogicalNot);
+}
+
+// Tests covering BytecodePeepholeOptimizer::CanElideCurrent().
+
+TEST_F(BytecodePeepholeOptimizerTest, StarRxLdarRy) {
+ BytecodeNode first(Bytecode::kStar, Register(0).ToOperand());
+ BytecodeNode second(Bytecode::kLdar, Register(1).ToOperand());
+ optimizer()->Write(&first);
+ CHECK_EQ(write_count(), 0);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written(), first);
+ Flush();
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written(), second);
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, StarRxLdarRx) {
+ BytecodeLabel label;
+ BytecodeNode first(Bytecode::kStar, Register(0).ToOperand());
+ BytecodeNode second(Bytecode::kLdar, Register(0).ToOperand());
+ optimizer()->Write(&first);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 0);
+ Flush();
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written(), first);
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, StarRxLdarRxStatement) {
+ BytecodeNode first(Bytecode::kStar, Register(0).ToOperand());
+ BytecodeNode second(Bytecode::kLdar, Register(0).ToOperand());
+ second.source_info().MakeStatementPosition(0);
+ optimizer()->Write(&first);
+ CHECK_EQ(write_count(), 0);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written(), first);
+ Flush();
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kNop);
+ CHECK_EQ(last_written().source_info(), second.source_info());
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, StarRxLdarRxStatementStarRy) {
+ BytecodeLabel label;
+ BytecodeNode first(Bytecode::kStar, Register(0).ToOperand());
+ BytecodeNode second(Bytecode::kLdar, Register(0).ToOperand());
+ BytecodeNode third(Bytecode::kStar, Register(3).ToOperand());
+ second.source_info().MakeStatementPosition(0);
+ optimizer()->Write(&first);
+ CHECK_EQ(write_count(), 0);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written(), first);
+ optimizer()->Write(&third);
+ CHECK_EQ(write_count(), 1);
+ Flush();
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written(), third);
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, LdarToName) {
+ BytecodeNode first(Bytecode::kLdar, Register(0).ToOperand());
+ BytecodeNode second(Bytecode::kToName, Register(0).ToOperand());
+ optimizer()->Write(&first);
+ CHECK_EQ(write_count(), 0);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written(), first);
+ Flush();
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written(), second);
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, TypeOfToName) {
+ BytecodeNode first(Bytecode::kTypeOf);
+ BytecodeNode second(Bytecode::kToName, Register(0).ToOperand());
+ optimizer()->Write(&first);
+ CHECK_EQ(write_count(), 0);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written(), first);
+ Flush();
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written(), second);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kStar);
+}
+
+// Tests covering BytecodePeepholeOptimizer::CanElideLast().
+
+TEST_F(BytecodePeepholeOptimizerTest, LdaTrueLdaFalse) {
+ BytecodeNode first(Bytecode::kLdaTrue);
+ BytecodeNode second(Bytecode::kLdaFalse);
+ optimizer()->Write(&first);
+ CHECK_EQ(write_count(), 0);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 0);
+ Flush();
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written(), second);
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, LdaTrueStatementLdaFalse) {
+ BytecodeNode first(Bytecode::kLdaTrue);
+ first.source_info().MakeExpressionPosition(3);
+ BytecodeNode second(Bytecode::kLdaFalse);
+ optimizer()->Write(&first);
+ CHECK_EQ(write_count(), 0);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 0);
+ Flush();
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written(), second);
+ CHECK(second.source_info().is_expression());
+ CHECK_EQ(second.source_info().source_position(), 3);
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, NopStackCheck) {
+ BytecodeNode first(Bytecode::kNop);
+ BytecodeNode second(Bytecode::kStackCheck);
+ optimizer()->Write(&first);
+ CHECK_EQ(write_count(), 0);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 0);
+ Flush();
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written(), second);
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, NopStatementStackCheck) {
+ BytecodeNode first(Bytecode::kNop);
+ first.source_info().MakeExpressionPosition(3);
+ BytecodeNode second(Bytecode::kStackCheck);
+ optimizer()->Write(&first);
+ CHECK_EQ(write_count(), 0);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 0);
+ Flush();
+ CHECK_EQ(write_count(), 1);
+ second.source_info().MakeExpressionPosition(
+ first.source_info().source_position());
+ CHECK_EQ(last_written(), second);
+}
+
+// Tests covering BytecodePeepholeOptimizer::UpdateLastAndCurrentBytecodes().
+
+TEST_F(BytecodePeepholeOptimizerTest, MergeLoadICStar) {
+ const uint32_t operands[] = {
+ static_cast<uint32_t>(Register(31).ToOperand()), 32, 33,
+ static_cast<uint32_t>(Register(256).ToOperand())};
+ const int expected_operand_count = static_cast<int>(arraysize(operands));
+
+ BytecodeNode first(Bytecode::kLdaNamedProperty, operands[0], operands[1],
+ operands[2]);
+ BytecodeNode second(Bytecode::kStar, operands[3]);
+ BytecodeNode third(Bytecode::kReturn);
+ optimizer()->Write(&first);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLdrNamedProperty);
+ CHECK_EQ(last_written().operand_count(), expected_operand_count);
+ for (int i = 0; i < expected_operand_count; ++i) {
+ CHECK_EQ(last_written().operand(i), operands[i]);
+ }
+ optimizer()->Write(&third);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLdar);
+ CHECK_EQ(last_written().operand(0), operands[expected_operand_count - 1]);
+ Flush();
+ CHECK_EQ(last_written().bytecode(), third.bytecode());
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, MergeLdaKeyedPropertyStar) {
+ const uint32_t operands[] = {static_cast<uint32_t>(Register(31).ToOperand()),
+ 9999997,
+ static_cast<uint32_t>(Register(1).ToOperand())};
+ const int expected_operand_count = static_cast<int>(arraysize(operands));
+
+ BytecodeNode first(Bytecode::kLdaKeyedProperty, operands[0], operands[1]);
+ BytecodeNode second(Bytecode::kStar, operands[2]);
+ BytecodeNode third(Bytecode::kReturn);
+ optimizer()->Write(&first);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLdrKeyedProperty);
+ CHECK_EQ(last_written().operand_count(), expected_operand_count);
+ for (int i = 0; i < expected_operand_count; ++i) {
+ CHECK_EQ(last_written().operand(i), operands[i]);
+ }
+ optimizer()->Write(&third);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLdar);
+ CHECK_EQ(last_written().operand(0), operands[expected_operand_count - 1]);
+ Flush();
+ CHECK_EQ(last_written().bytecode(), third.bytecode());
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, MergeLdaGlobalStar) {
+ const uint32_t operands[] = {19191,
+ static_cast<uint32_t>(Register(1).ToOperand())};
+ const int expected_operand_count = static_cast<int>(arraysize(operands));
+
+ BytecodeNode first(Bytecode::kLdaGlobal, operands[0]);
+ BytecodeNode second(Bytecode::kStar, operands[1]);
+ BytecodeNode third(Bytecode::kReturn);
+ optimizer()->Write(&first);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLdrGlobal);
+ CHECK_EQ(last_written().operand_count(), expected_operand_count);
+ for (int i = 0; i < expected_operand_count; ++i) {
+ CHECK_EQ(last_written().operand(i), operands[i]);
+ }
+ optimizer()->Write(&third);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLdar);
+ CHECK_EQ(last_written().operand(0), operands[expected_operand_count - 1]);
+ Flush();
+ CHECK_EQ(last_written().bytecode(), third.bytecode());
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, MergeLdaContextSlotStar) {
+ const uint32_t operands[] = {
+ static_cast<uint32_t>(Register(200000).ToOperand()), 55005500,
+ static_cast<uint32_t>(Register(1).ToOperand())};
+ const int expected_operand_count = static_cast<int>(arraysize(operands));
+
+ BytecodeNode first(Bytecode::kLdaContextSlot, operands[0], operands[1]);
+ BytecodeNode second(Bytecode::kStar, operands[2]);
+ BytecodeNode third(Bytecode::kReturn);
+ optimizer()->Write(&first);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLdrContextSlot);
+ CHECK_EQ(last_written().operand_count(), expected_operand_count);
+ for (int i = 0; i < expected_operand_count; ++i) {
+ CHECK_EQ(last_written().operand(i), operands[i]);
+ }
+ optimizer()->Write(&third);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLdar);
+ CHECK_EQ(last_written().operand(0), operands[expected_operand_count - 1]);
+ Flush();
+ CHECK_EQ(last_written().bytecode(), third.bytecode());
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, MergeLdaUndefinedStar) {
+ const uint32_t operands[] = {
+ static_cast<uint32_t>(Register(100000).ToOperand())};
+ const int expected_operand_count = static_cast<int>(arraysize(operands));
+
+ BytecodeNode first(Bytecode::kLdaUndefined);
+ BytecodeNode second(Bytecode::kStar, operands[0]);
+ BytecodeNode third(Bytecode::kReturn);
+ optimizer()->Write(&first);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLdrUndefined);
+ CHECK_EQ(last_written().operand_count(), expected_operand_count);
+ for (int i = 0; i < expected_operand_count; ++i) {
+ CHECK_EQ(last_written().operand(i), operands[i]);
+ }
+ optimizer()->Write(&third);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLdar);
+ CHECK_EQ(last_written().operand(0), operands[expected_operand_count - 1]);
+ Flush();
+ CHECK_EQ(last_written().bytecode(), third.bytecode());
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, MergeLdaSmiWithBinaryOp) {
+ Bytecode operator_replacement_pairs[][2] = {
+ {Bytecode::kAdd, Bytecode::kAddSmi},
+ {Bytecode::kSub, Bytecode::kSubSmi},
+ {Bytecode::kBitwiseAnd, Bytecode::kBitwiseAndSmi},
+ {Bytecode::kBitwiseOr, Bytecode::kBitwiseOrSmi},
+ {Bytecode::kShiftLeft, Bytecode::kShiftLeftSmi},
+ {Bytecode::kShiftRight, Bytecode::kShiftRightSmi}};
+
+ for (auto operator_replacement : operator_replacement_pairs) {
+ uint32_t imm_operand = 17;
+ BytecodeNode first(Bytecode::kLdaSmi, imm_operand);
+ first.source_info().Clone({3, true});
+ uint32_t reg_operand = Register(0).ToOperand();
+ uint32_t idx_operand = 1;
+ BytecodeNode second(operator_replacement[0], reg_operand, idx_operand);
+ optimizer()->Write(&first);
+ optimizer()->Write(&second);
+ Flush();
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written().bytecode(), operator_replacement[1]);
+ CHECK_EQ(last_written().operand_count(), 3);
+ CHECK_EQ(last_written().operand(0), imm_operand);
+ CHECK_EQ(last_written().operand(1), reg_operand);
+ CHECK_EQ(last_written().operand(2), idx_operand);
+ CHECK_EQ(last_written().source_info(), first.source_info());
+ Reset();
+ }
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, NotMergingLdaSmiWithBinaryOp) {
+ Bytecode operator_replacement_pairs[][2] = {
+ {Bytecode::kAdd, Bytecode::kAddSmi},
+ {Bytecode::kSub, Bytecode::kSubSmi},
+ {Bytecode::kBitwiseAnd, Bytecode::kBitwiseAndSmi},
+ {Bytecode::kBitwiseOr, Bytecode::kBitwiseOrSmi},
+ {Bytecode::kShiftLeft, Bytecode::kShiftLeftSmi},
+ {Bytecode::kShiftRight, Bytecode::kShiftRightSmi}};
+
+ for (auto operator_replacement : operator_replacement_pairs) {
+ uint32_t imm_operand = 17;
+ BytecodeNode first(Bytecode::kLdaSmi, imm_operand);
+ first.source_info().Clone({3, true});
+ uint32_t reg_operand = Register(0).ToOperand();
+ BytecodeNode second(operator_replacement[0], reg_operand, 1);
+ second.source_info().Clone({4, true});
+ optimizer()->Write(&first);
+ optimizer()->Write(&second);
+ CHECK_EQ(last_written(), first);
+ Flush();
+ CHECK_EQ(last_written(), second);
+ Reset();
+ }
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, MergeLdaZeroWithBinaryOp) {
+ Bytecode operator_replacement_pairs[][2] = {
+ {Bytecode::kAdd, Bytecode::kAddSmi},
+ {Bytecode::kSub, Bytecode::kSubSmi},
+ {Bytecode::kBitwiseAnd, Bytecode::kBitwiseAndSmi},
+ {Bytecode::kBitwiseOr, Bytecode::kBitwiseOrSmi},
+ {Bytecode::kShiftLeft, Bytecode::kShiftLeftSmi},
+ {Bytecode::kShiftRight, Bytecode::kShiftRightSmi}};
+
+ for (auto operator_replacement : operator_replacement_pairs) {
+ BytecodeNode first(Bytecode::kLdaZero);
+ uint32_t reg_operand = Register(0).ToOperand();
+ uint32_t idx_operand = 1;
+ BytecodeNode second(operator_replacement[0], reg_operand, idx_operand);
+ optimizer()->Write(&first);
+ optimizer()->Write(&second);
+ Flush();
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written().bytecode(), operator_replacement[1]);
+ CHECK_EQ(last_written().operand_count(), 3);
+ CHECK_EQ(last_written().operand(0), 0);
+ CHECK_EQ(last_written().operand(1), reg_operand);
+ CHECK_EQ(last_written().operand(2), idx_operand);
+ Reset();
+ }
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-pipeline-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-pipeline-unittest.cc
new file mode 100644
index 0000000000..663b7e54e5
--- /dev/null
+++ b/deps/v8/test/unittests/interpreter/bytecode-pipeline-unittest.cc
@@ -0,0 +1,185 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/interpreter/bytecode-pipeline.h"
+#include "src/interpreter/bytecode-register-allocator.h"
+#include "src/isolate.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+using BytecodeNodeTest = TestWithIsolateAndZone;
+
+TEST(BytecodeSourceInfo, Operations) {
+ BytecodeSourceInfo x(0, true);
+ CHECK_EQ(x.source_position(), 0);
+ CHECK_EQ(x.is_statement(), true);
+ CHECK_EQ(x.is_valid(), true);
+ x.set_invalid();
+ CHECK_EQ(x.is_statement(), false);
+ CHECK_EQ(x.is_valid(), false);
+
+ x.MakeStatementPosition(1);
+ BytecodeSourceInfo y(1, true);
+ CHECK(x == y);
+ CHECK(!(x != y));
+
+ x.set_invalid();
+ CHECK(!(x == y));
+ CHECK(x != y);
+
+ y.MakeStatementPosition(1);
+ CHECK_EQ(y.source_position(), 1);
+ CHECK_EQ(y.is_statement(), true);
+
+ y.MakeStatementPosition(2);
+ CHECK_EQ(y.source_position(), 2);
+ CHECK_EQ(y.is_statement(), true);
+
+ y.set_invalid();
+ y.MakeExpressionPosition(3);
+ CHECK_EQ(y.source_position(), 3);
+ CHECK_EQ(y.is_statement(), false);
+
+ y.MakeStatementPosition(3);
+ CHECK_EQ(y.source_position(), 3);
+ CHECK_EQ(y.is_statement(), true);
+}
+
+TEST_F(BytecodeNodeTest, Constructor0) {
+ BytecodeNode node;
+ CHECK_EQ(node.bytecode(), Bytecode::kIllegal);
+ CHECK(!node.source_info().is_valid());
+}
+
+TEST_F(BytecodeNodeTest, Constructor1) {
+ BytecodeNode node(Bytecode::kLdaZero);
+ CHECK_EQ(node.bytecode(), Bytecode::kLdaZero);
+ CHECK_EQ(node.operand_count(), 0);
+ CHECK(!node.source_info().is_valid());
+}
+
+TEST_F(BytecodeNodeTest, Constructor2) {
+ uint32_t operands[] = {0x11};
+ BytecodeNode node(Bytecode::kJumpIfTrue, operands[0]);
+ CHECK_EQ(node.bytecode(), Bytecode::kJumpIfTrue);
+ CHECK_EQ(node.operand_count(), 1);
+ CHECK_EQ(node.operand(0), operands[0]);
+ CHECK(!node.source_info().is_valid());
+}
+
+TEST_F(BytecodeNodeTest, Constructor3) {
+ uint32_t operands[] = {0x11};
+ BytecodeNode node(Bytecode::kLdaGlobal, operands[0]);
+ CHECK_EQ(node.bytecode(), Bytecode::kLdaGlobal);
+ CHECK_EQ(node.operand_count(), 1);
+ CHECK_EQ(node.operand(0), operands[0]);
+ CHECK(!node.source_info().is_valid());
+}
+
+TEST_F(BytecodeNodeTest, Constructor4) {
+ uint32_t operands[] = {0x11, 0x22, 0x33};
+ BytecodeNode node(Bytecode::kLdaNamedProperty, operands[0], operands[1],
+ operands[2]);
+ CHECK_EQ(node.operand_count(), 3);
+ CHECK_EQ(node.bytecode(), Bytecode::kLdaNamedProperty);
+ CHECK_EQ(node.operand(0), operands[0]);
+ CHECK_EQ(node.operand(1), operands[1]);
+ CHECK_EQ(node.operand(2), operands[2]);
+ CHECK(!node.source_info().is_valid());
+}
+
+TEST_F(BytecodeNodeTest, Constructor5) {
+ uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
+ BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
+ operands[3]);
+ CHECK_EQ(node.operand_count(), 4);
+ CHECK_EQ(node.bytecode(), Bytecode::kForInNext);
+ CHECK_EQ(node.operand(0), operands[0]);
+ CHECK_EQ(node.operand(1), operands[1]);
+ CHECK_EQ(node.operand(2), operands[2]);
+ CHECK_EQ(node.operand(3), operands[3]);
+ CHECK(!node.source_info().is_valid());
+}
+
+TEST_F(BytecodeNodeTest, Equality) {
+ uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
+ BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
+ operands[3]);
+ CHECK_EQ(node, node);
+ BytecodeNode other(Bytecode::kForInNext, operands[0], operands[1],
+ operands[2], operands[3]);
+ CHECK_EQ(node, other);
+}
+
+TEST_F(BytecodeNodeTest, EqualityWithSourceInfo) {
+ uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
+ BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
+ operands[3]);
+ node.source_info().MakeStatementPosition(3);
+ CHECK_EQ(node, node);
+ BytecodeNode other(Bytecode::kForInNext, operands[0], operands[1],
+ operands[2], operands[3]);
+ other.source_info().MakeStatementPosition(3);
+ CHECK_EQ(node, other);
+}
+
+TEST_F(BytecodeNodeTest, NoEqualityWithDifferentSourceInfo) {
+ uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
+ BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
+ operands[3]);
+ node.source_info().MakeStatementPosition(3);
+ BytecodeNode other(Bytecode::kForInNext, operands[0], operands[1],
+ operands[2], operands[3]);
+ CHECK_NE(node, other);
+}
+
+TEST_F(BytecodeNodeTest, Clone) {
+ uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
+ BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
+ operands[3]);
+ BytecodeNode clone;
+ clone.Clone(&node);
+ CHECK_EQ(clone, node);
+}
+
+TEST_F(BytecodeNodeTest, SetBytecode0) {
+ uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
+ BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
+ operands[3]);
+ BytecodeSourceInfo source_info(77, false);
+ node.source_info().Clone(source_info);
+ CHECK_EQ(node.source_info(), source_info);
+
+ BytecodeNode clone;
+ clone.Clone(&node);
+ clone.set_bytecode(Bytecode::kNop);
+ CHECK_EQ(clone.bytecode(), Bytecode::kNop);
+ CHECK_EQ(clone.operand_count(), 0);
+ CHECK_EQ(clone.source_info(), source_info);
+}
+
+TEST_F(BytecodeNodeTest, SetBytecode1) {
+ uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
+ BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
+ operands[3]);
+ BytecodeSourceInfo source_info(77, false);
+ node.source_info().Clone(source_info);
+
+ BytecodeNode clone;
+ clone.Clone(&node);
+ clone.set_bytecode(Bytecode::kJump, 0x01aabbcc);
+ CHECK_EQ(clone.bytecode(), Bytecode::kJump);
+ CHECK_EQ(clone.operand_count(), 1);
+ CHECK_EQ(clone.operand(0), 0x01aabbcc);
+ CHECK_EQ(clone.source_info(), source_info);
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
new file mode 100644
index 0000000000..ca69026fda
--- /dev/null
+++ b/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
@@ -0,0 +1,219 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/factory.h"
+#include "src/interpreter/bytecode-label.h"
+#include "src/interpreter/bytecode-register-optimizer.h"
+#include "src/objects-inl.h"
+#include "src/objects.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodeRegisterOptimizerTest : public BytecodePipelineStage,
+ public TestWithIsolateAndZone {
+ public:
+ BytecodeRegisterOptimizerTest() {}
+ ~BytecodeRegisterOptimizerTest() override { delete register_allocator_; }
+
+ void Initialize(int number_of_parameters, int number_of_locals) {
+ register_allocator_ =
+ new TemporaryRegisterAllocator(zone(), number_of_locals);
+ register_optimizer_ = new (zone()) BytecodeRegisterOptimizer(
+ zone(), register_allocator_, number_of_parameters, this);
+ }
+
+ void Write(BytecodeNode* node) override { output_.push_back(*node); }
+ void WriteJump(BytecodeNode* node, BytecodeLabel* label) override {
+ output_.push_back(*node);
+ }
+ void BindLabel(BytecodeLabel* label) override {}
+ void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override {}
+ Handle<BytecodeArray> ToBytecodeArray(
+ Isolate* isolate, int fixed_register_count, int parameter_count,
+ Handle<FixedArray> handle_table) override {
+ return Handle<BytecodeArray>();
+ }
+
+ TemporaryRegisterAllocator* allocator() { return register_allocator_; }
+ BytecodeRegisterOptimizer* optimizer() { return register_optimizer_; }
+
+ Register NewTemporary() {
+ return Register(allocator()->BorrowTemporaryRegister());
+ }
+
+ void KillTemporary(Register reg) {
+ allocator()->ReturnTemporaryRegister(reg.index());
+ }
+
+ size_t write_count() const { return output_.size(); }
+ const BytecodeNode& last_written() const { return output_.back(); }
+ const std::vector<BytecodeNode>* output() { return &output_; }
+
+ private:
+ TemporaryRegisterAllocator* register_allocator_;
+ BytecodeRegisterOptimizer* register_optimizer_;
+
+ std::vector<BytecodeNode> output_;
+};
+
+// Sanity tests.
+
+TEST_F(BytecodeRegisterOptimizerTest, WriteNop) {
+ Initialize(1, 1);
+ BytecodeNode node(Bytecode::kNop);
+ optimizer()->Write(&node);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(node, last_written());
+}
+
+TEST_F(BytecodeRegisterOptimizerTest, WriteNopExpression) {
+ Initialize(1, 1);
+ BytecodeNode node(Bytecode::kNop);
+ node.source_info().MakeExpressionPosition(3);
+ optimizer()->Write(&node);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(node, last_written());
+}
+
+TEST_F(BytecodeRegisterOptimizerTest, WriteNopStatement) {
+ Initialize(1, 1);
+ BytecodeNode node(Bytecode::kNop);
+ node.source_info().MakeStatementPosition(3);
+ optimizer()->Write(&node);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(node, last_written());
+}
+
+TEST_F(BytecodeRegisterOptimizerTest, TemporaryMaterializedForJump) {
+ Initialize(1, 1);
+ Register temp = NewTemporary();
+ BytecodeNode node(Bytecode::kStar, temp.ToOperand());
+ optimizer()->Write(&node);
+ CHECK_EQ(write_count(), 0);
+ BytecodeLabel label;
+ BytecodeNode jump(Bytecode::kJump, 0);
+ optimizer()->WriteJump(&jump, &label);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(output()->at(0).bytecode(), Bytecode::kStar);
+ CHECK_EQ(output()->at(0).operand(0), temp.ToOperand());
+ CHECK_EQ(output()->at(1).bytecode(), Bytecode::kJump);
+}
+
+TEST_F(BytecodeRegisterOptimizerTest, TemporaryMaterializedForBind) {
+ Initialize(1, 1);
+ Register temp = NewTemporary();
+ BytecodeNode node(Bytecode::kStar, temp.ToOperand());
+ optimizer()->Write(&node);
+ CHECK_EQ(write_count(), 0);
+ BytecodeLabel label;
+ optimizer()->BindLabel(&label);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(output()->at(0).bytecode(), Bytecode::kStar);
+ CHECK_EQ(output()->at(0).operand(0), temp.ToOperand());
+}
+
+// Basic Register Optimizations
+
+TEST_F(BytecodeRegisterOptimizerTest, TemporaryNotEmitted) {
+ Initialize(3, 1);
+ Register parameter = Register::FromParameterIndex(1, 3);
+ BytecodeNode node0(Bytecode::kLdar, parameter.ToOperand());
+ optimizer()->Write(&node0);
+ CHECK_EQ(write_count(), 0);
+ Register temp = NewTemporary();
+ BytecodeNode node1(Bytecode::kStar, NewTemporary().ToOperand());
+ optimizer()->Write(&node1);
+ CHECK_EQ(write_count(), 0);
+ KillTemporary(temp);
+ CHECK_EQ(write_count(), 0);
+ BytecodeNode node2(Bytecode::kReturn);
+ optimizer()->Write(&node2);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(output()->at(0).bytecode(), Bytecode::kLdar);
+ CHECK_EQ(output()->at(0).operand(0), parameter.ToOperand());
+ CHECK_EQ(output()->at(1).bytecode(), Bytecode::kReturn);
+}
+
+TEST_F(BytecodeRegisterOptimizerTest, StoresToLocalsImmediate) {
+ Initialize(3, 1);
+ Register parameter = Register::FromParameterIndex(1, 3);
+ BytecodeNode node0(Bytecode::kLdar, parameter.ToOperand());
+ optimizer()->Write(&node0);
+ CHECK_EQ(write_count(), 0);
+ Register local = Register(0);
+ BytecodeNode node1(Bytecode::kStar, local.ToOperand());
+ optimizer()->Write(&node1);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(output()->at(0).bytecode(), Bytecode::kMov);
+ CHECK_EQ(output()->at(0).operand(0), parameter.ToOperand());
+ CHECK_EQ(output()->at(0).operand(1), local.ToOperand());
+
+ BytecodeNode node2(Bytecode::kReturn);
+ optimizer()->Write(&node2);
+ CHECK_EQ(write_count(), 3);
+ CHECK_EQ(output()->at(1).bytecode(), Bytecode::kLdar);
+ CHECK_EQ(output()->at(1).operand(0), local.ToOperand());
+ CHECK_EQ(output()->at(2).bytecode(), Bytecode::kReturn);
+}
+
+TEST_F(BytecodeRegisterOptimizerTest, TemporaryNotMaterializedForInput) {
+ Initialize(3, 1);
+ Register parameter = Register::FromParameterIndex(1, 3);
+ Register temp0 = NewTemporary();
+ Register temp1 = NewTemporary();
+ BytecodeNode node0(Bytecode::kMov, parameter.ToOperand(), temp0.ToOperand());
+ optimizer()->Write(&node0);
+ BytecodeNode node1(Bytecode::kMov, parameter.ToOperand(), temp1.ToOperand());
+ optimizer()->Write(&node1);
+ CHECK_EQ(write_count(), 0);
+ BytecodeNode node2(Bytecode::kCallJSRuntime, 0, temp0.ToOperand(), 1);
+ optimizer()->Write(&node2);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(output()->at(0).bytecode(), Bytecode::kCallJSRuntime);
+ CHECK_EQ(output()->at(0).operand(0), 0);
+ CHECK_EQ(output()->at(0).operand(1), parameter.ToOperand());
+ CHECK_EQ(output()->at(0).operand(2), 1);
+}
+
+TEST_F(BytecodeRegisterOptimizerTest, RangeOfTemporariesMaterializedForInput) {
+ Initialize(3, 1);
+ Register parameter = Register::FromParameterIndex(1, 3);
+ Register temp0 = NewTemporary();
+ Register temp1 = NewTemporary();
+ BytecodeNode node0(Bytecode::kLdaSmi, 3);
+ optimizer()->Write(&node0);
+ CHECK_EQ(write_count(), 1);
+ BytecodeNode node1(Bytecode::kStar, temp0.ToOperand());
+ optimizer()->Write(&node1);
+ BytecodeNode node2(Bytecode::kMov, parameter.ToOperand(), temp1.ToOperand());
+ optimizer()->Write(&node2);
+ CHECK_EQ(write_count(), 1);
+ BytecodeNode node3(Bytecode::kCallJSRuntime, 0, temp0.ToOperand(), 2);
+ optimizer()->Write(&node3);
+ CHECK_EQ(write_count(), 4);
+
+ CHECK_EQ(output()->at(0).bytecode(), Bytecode::kLdaSmi);
+ CHECK_EQ(output()->at(0).operand(0), 3);
+
+ CHECK_EQ(output()->at(1).bytecode(), Bytecode::kStar);
+ CHECK_EQ(output()->at(1).operand(0), temp0.ToOperand());
+
+ CHECK_EQ(output()->at(2).bytecode(), Bytecode::kMov);
+ CHECK_EQ(output()->at(2).operand(0), parameter.ToOperand());
+ CHECK_EQ(output()->at(2).operand(1), temp1.ToOperand());
+
+ CHECK_EQ(output()->at(3).bytecode(), Bytecode::kCallJSRuntime);
+ CHECK_EQ(output()->at(3).operand(0), 0);
+ CHECK_EQ(output()->at(3).operand(1), temp0.ToOperand());
+ CHECK_EQ(output()->at(3).operand(2), 2);
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-utils.h b/deps/v8/test/unittests/interpreter/bytecode-utils.h
new file mode 100644
index 0000000000..fffb7190c8
--- /dev/null
+++ b/deps/v8/test/unittests/interpreter/bytecode-utils.h
@@ -0,0 +1,37 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_UNITTESTS_INTERPRETER_BYTECODE_UTILS_H_
+#define V8_UNITTESTS_INTERPRETER_BYTECODE_UTILS_H_
+
+#include "src/frames.h"
+
+#if V8_TARGET_LITTLE_ENDIAN
+
+#define EXTRACT(x, n) static_cast<uint8_t>((x) >> (8 * n))
+#define U16(i) EXTRACT(i, 0), EXTRACT(i, 1)
+#define U32(i) EXTRACT(i, 0), EXTRACT(i, 1), EXTRACT(i, 2), EXTRACT(i, 3)
+
+#elif V8_TARGET_BIG_ENDIAN
+
+#define EXTRACT(x, n) static_cast<uint8_t>((x) >> (8 * n))
+
+#define U16(i) EXTRACT(i, 1), EXTRACT(i, 0)
+#define U32(i) EXTRACT(i, 3), EXTRACT(i, 2), EXTRACT(i, 1), EXTRACT(i, 0)
+
+#else
+
+#error "Unknown Architecture"
+
+#endif
+
+#define U8(i) static_cast<uint8_t>(i)
+#define B(Name) static_cast<uint8_t>(Bytecode::k##Name)
+#define REG_OPERAND(i) \
+ (InterpreterFrameConstants::kRegisterFileFromFp / kPointerSize - (i))
+#define R8(i) static_cast<uint8_t>(REG_OPERAND(i))
+#define R16(i) U16(REG_OPERAND(i))
+#define R32(i) U32(REG_OPERAND(i))
+
+#endif // V8_UNITTESTS_INTERPRETER_BYTECODE_UTILS_H_
diff --git a/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc b/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
index b3554c3853..0e68e188c7 100644
--- a/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
@@ -6,6 +6,7 @@
#include "src/v8.h"
+#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
#include "test/unittests/test-utils.h"
@@ -31,15 +32,6 @@ TEST(OperandConversion, Registers) {
Register reg2 = Register::FromOperand(operand2);
CHECK_EQ(i, reg2.index());
}
-
- for (int i = 0; i <= kMaxUInt8; i++) {
- Register reg = Register::FromOperand(i);
- if (i > 0) {
- CHECK(reg.is_parameter());
- } else {
- CHECK(!reg.is_parameter());
- }
- }
}
TEST(OperandConversion, Parameters) {
@@ -85,45 +77,22 @@ TEST(OperandConversion, RegistersParametersNoOverlap) {
}
TEST(OperandScaling, ScalableAndNonScalable) {
- for (OperandScale operand_scale = OperandScale::kSingle;
- operand_scale <= OperandScale::kMaxValid;
- operand_scale = Bytecodes::NextOperandScale(operand_scale)) {
+ const OperandScale kOperandScales[] = {
+#define VALUE(Name, _) OperandScale::k##Name,
+ OPERAND_SCALE_LIST(VALUE)
+#undef VALUE
+ };
+
+ for (OperandScale operand_scale : kOperandScales) {
int scale = static_cast<int>(operand_scale);
CHECK_EQ(Bytecodes::Size(Bytecode::kCallRuntime, operand_scale),
1 + 2 + 2 * scale);
CHECK_EQ(Bytecodes::Size(Bytecode::kCreateObjectLiteral, operand_scale),
- 1 + 2 * scale + 1);
+ 1 + 2 * scale + 1 + 1 * scale);
CHECK_EQ(Bytecodes::Size(Bytecode::kTestIn, operand_scale), 1 + scale);
}
}
-TEST(Bytecodes, HasAnyRegisterOperands) {
- CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kAdd), 1);
- CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kCall), 2);
- CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kCallRuntime), 1);
- CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kCallRuntimeForPair),
- 2);
- CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kDeletePropertyStrict),
- 1);
- CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kForInPrepare), 1);
- CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kInc), 0);
- CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kJumpIfTrue), 0);
- CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kNew), 2);
- CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kToName), 0);
-}
-
-TEST(Bytecodes, RegisterOperandBitmaps) {
- CHECK_EQ(Bytecodes::GetRegisterOperandBitmap(Bytecode::kAdd), 1);
- CHECK_EQ(Bytecodes::GetRegisterOperandBitmap(Bytecode::kCallRuntimeForPair),
- 10);
- CHECK_EQ(Bytecodes::GetRegisterOperandBitmap(Bytecode::kStar), 1);
- CHECK_EQ(Bytecodes::GetRegisterOperandBitmap(Bytecode::kMov), 3);
- CHECK_EQ(Bytecodes::GetRegisterOperandBitmap(Bytecode::kTestIn), 1);
- CHECK_EQ(Bytecodes::GetRegisterOperandBitmap(Bytecode::kForInPrepare), 1);
- CHECK_EQ(Bytecodes::GetRegisterOperandBitmap(Bytecode::kForInDone), 3);
- CHECK_EQ(Bytecodes::GetRegisterOperandBitmap(Bytecode::kForInNext), 7);
-}
-
TEST(Bytecodes, RegisterOperands) {
CHECK(Bytecodes::IsRegisterOperandType(OperandType::kReg));
CHECK(Bytecodes::IsRegisterInputOperandType(OperandType::kReg));
@@ -177,74 +146,6 @@ TEST(Bytecodes, DebugBreakExistForEachBytecode) {
#undef CHECK_DEBUG_BREAK_SIZE
}
-TEST(Bytecodes, DecodeBytecodeAndOperands) {
- struct BytecodesAndResult {
- const uint8_t bytecode[32];
- const size_t length;
- int parameter_count;
- const char* output;
- };
-
-#define B(Name) static_cast<uint8_t>(Bytecode::k##Name)
- const BytecodesAndResult cases[] = {
- {{B(LdaSmi), 0x01}, 2, 0, " LdaSmi [1]"},
- {{B(Wide), B(LdaSmi), 0xe8, 0x03}, 4, 0, " LdaSmi.Wide [1000]"},
- {{B(ExtraWide), B(LdaSmi), 0xa0, 0x86, 0x01, 0x00},
- 6,
- 0,
- "LdaSmi.ExtraWide [100000]"},
- {{B(LdaSmi), 0xff}, 2, 0, " LdaSmi [-1]"},
- {{B(Wide), B(LdaSmi), 0x18, 0xfc}, 4, 0, " LdaSmi.Wide [-1000]"},
- {{B(ExtraWide), B(LdaSmi), 0x60, 0x79, 0xfe, 0xff},
- 6,
- 0,
- "LdaSmi.ExtraWide [-100000]"},
- {{B(Star), 0xfb}, 2, 0, " Star r5"},
- {{B(Wide), B(Star), 0x78, 0xff}, 4, 0, " Star.Wide r136"},
- {{B(Wide), B(Call), 0x7a, 0xff, 0x79, 0xff, 0x02, 0x00, 0xb1, 0x00},
- 10,
- 0,
- "Call.Wide r134, r135, #2, [177]"},
- {{B(Ldar),
- static_cast<uint8_t>(Register::FromParameterIndex(2, 3).ToOperand())},
- 2,
- 3,
- " Ldar a1"},
- {{B(Wide), B(CreateObjectLiteral), 0x01, 0x02, 0x03, 0x04, 0xa5},
- 7,
- 0,
- "CreateObjectLiteral.Wide [513], [1027], #165"},
- {{B(ExtraWide), B(JumpIfNull), 0x15, 0xcd, 0x5b, 0x07},
- 6,
- 0,
- "JumpIfNull.ExtraWide [123456789]"},
- };
-#undef B
-
- for (size_t i = 0; i < arraysize(cases); ++i) {
- // Generate reference string by prepending formatted bytes.
- std::stringstream expected_ss;
- std::ios default_format(nullptr);
- default_format.copyfmt(expected_ss);
- // Match format of Bytecodes::Decode() for byte representations.
- expected_ss.fill('0');
- expected_ss.flags(std::ios::right | std::ios::hex);
- for (size_t b = 0; b < cases[i].length; b++) {
- expected_ss << std::setw(2) << static_cast<uint32_t>(cases[i].bytecode[b])
- << ' ';
- }
- expected_ss.copyfmt(default_format);
- expected_ss << cases[i].output;
-
- // Generate decoded byte output.
- std::stringstream actual_ss;
- Bytecodes::Decode(actual_ss, cases[i].bytecode, cases[i].parameter_count);
-
- // Compare.
- CHECK_EQ(actual_ss.str(), expected_ss.str());
- }
-}
-
TEST(Bytecodes, DebugBreakForPrefixBytecodes) {
CHECK_EQ(Bytecode::kDebugBreakWide,
Bytecodes::GetDebugBreak(Bytecode::kWide));
@@ -260,13 +161,42 @@ TEST(Bytecodes, PrefixMappings) {
}
}
-TEST(OperandScale, PrefixesScale) {
- CHECK(Bytecodes::NextOperandScale(OperandScale::kSingle) ==
- OperandScale::kDouble);
- CHECK(Bytecodes::NextOperandScale(OperandScale::kDouble) ==
- OperandScale::kQuadruple);
- CHECK(Bytecodes::NextOperandScale(OperandScale::kQuadruple) ==
- OperandScale::kInvalid);
+TEST(Bytecodes, SizesForSignedOperands) {
+ CHECK(Bytecodes::SizeForSignedOperand(0) == OperandSize::kByte);
+ CHECK(Bytecodes::SizeForSignedOperand(kMaxInt8) == OperandSize::kByte);
+ CHECK(Bytecodes::SizeForSignedOperand(kMinInt8) == OperandSize::kByte);
+ CHECK(Bytecodes::SizeForSignedOperand(kMaxInt8 + 1) == OperandSize::kShort);
+ CHECK(Bytecodes::SizeForSignedOperand(kMinInt8 - 1) == OperandSize::kShort);
+ CHECK(Bytecodes::SizeForSignedOperand(kMaxInt16) == OperandSize::kShort);
+ CHECK(Bytecodes::SizeForSignedOperand(kMinInt16) == OperandSize::kShort);
+ CHECK(Bytecodes::SizeForSignedOperand(kMaxInt16 + 1) == OperandSize::kQuad);
+ CHECK(Bytecodes::SizeForSignedOperand(kMinInt16 - 1) == OperandSize::kQuad);
+ CHECK(Bytecodes::SizeForSignedOperand(kMaxInt) == OperandSize::kQuad);
+ CHECK(Bytecodes::SizeForSignedOperand(kMinInt) == OperandSize::kQuad);
+}
+
+TEST(Bytecodes, SizesForUnsignedOperands) {
+ // int overloads
+ CHECK(Bytecodes::SizeForUnsignedOperand(0) == OperandSize::kByte);
+ CHECK(Bytecodes::SizeForUnsignedOperand(kMaxUInt8) == OperandSize::kByte);
+ CHECK(Bytecodes::SizeForUnsignedOperand(kMaxUInt8 + 1) ==
+ OperandSize::kShort);
+ CHECK(Bytecodes::SizeForUnsignedOperand(kMaxUInt16) == OperandSize::kShort);
+ CHECK(Bytecodes::SizeForUnsignedOperand(kMaxUInt16 + 1) ==
+ OperandSize::kQuad);
+ // size_t overloads
+ CHECK(Bytecodes::SizeForUnsignedOperand(static_cast<size_t>(0)) ==
+ OperandSize::kByte);
+ CHECK(Bytecodes::SizeForUnsignedOperand(static_cast<size_t>(kMaxUInt8)) ==
+ OperandSize::kByte);
+ CHECK(Bytecodes::SizeForUnsignedOperand(static_cast<size_t>(kMaxUInt8 + 1)) ==
+ OperandSize::kShort);
+ CHECK(Bytecodes::SizeForUnsignedOperand(static_cast<size_t>(kMaxUInt16)) ==
+ OperandSize::kShort);
+ CHECK(Bytecodes::SizeForUnsignedOperand(
+ static_cast<size_t>(kMaxUInt16 + 1)) == OperandSize::kQuad);
+ CHECK(Bytecodes::SizeForUnsignedOperand(static_cast<size_t>(kMaxUInt32)) ==
+ OperandSize::kQuad);
}
TEST(OperandScale, PrefixesRequired) {
@@ -314,7 +244,6 @@ TEST(AccumulatorUse, AccumulatorUseToString) {
names.insert(Bytecodes::AccumulatorUseToString(AccumulatorUse::kReadWrite));
CHECK_EQ(names.size(), 4);
}
-
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
index 71224370cc..38cbb6d534 100644
--- a/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
@@ -29,7 +29,8 @@ STATIC_CONST_MEMBER_DEFINITION const size_t
ConstantArrayBuilderTest::k8BitCapacity;
TEST_F(ConstantArrayBuilderTest, AllocateAllEntries) {
- ConstantArrayBuilder builder(isolate(), zone());
+ CanonicalHandleScope canonical(isolate());
+ ConstantArrayBuilder builder(zone(), isolate()->factory()->the_hole_value());
for (size_t i = 0; i < k16BitCapacity; i++) {
builder.Insert(handle(Smi::FromInt(static_cast<int>(i)), isolate()));
}
@@ -39,24 +40,71 @@ TEST_F(ConstantArrayBuilderTest, AllocateAllEntries) {
}
}
+TEST_F(ConstantArrayBuilderTest, ToFixedArray) {
+ CanonicalHandleScope canonical(isolate());
+ ConstantArrayBuilder builder(zone(), isolate()->factory()->the_hole_value());
+ static const size_t kNumberOfElements = 37;
+ for (size_t i = 0; i < kNumberOfElements; i++) {
+ Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
+ builder.Insert(object);
+ CHECK(builder.At(i)->SameValue(*object));
+ }
+ Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
+ CHECK_EQ(constant_array->length(), kNumberOfElements);
+ for (size_t i = 0; i < kNumberOfElements; i++) {
+ CHECK(constant_array->get(static_cast<int>(i))->SameValue(*builder.At(i)));
+ }
+}
+
+TEST_F(ConstantArrayBuilderTest, ToLargeFixedArray) {
+ CanonicalHandleScope canonical(isolate());
+ ConstantArrayBuilder builder(zone(), isolate()->factory()->the_hole_value());
+ static const size_t kNumberOfElements = 37373;
+ for (size_t i = 0; i < kNumberOfElements; i++) {
+ Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
+ builder.Insert(object);
+ CHECK(builder.At(i)->SameValue(*object));
+ }
+ Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
+ CHECK_EQ(constant_array->length(), kNumberOfElements);
+ for (size_t i = 0; i < kNumberOfElements; i++) {
+ CHECK(constant_array->get(static_cast<int>(i))->SameValue(*builder.At(i)));
+ }
+}
+
+TEST_F(ConstantArrayBuilderTest, ToLargeFixedArrayWithReservations) {
+ CanonicalHandleScope canonical(isolate());
+ ConstantArrayBuilder builder(zone(), isolate()->factory()->the_hole_value());
+ static const size_t kNumberOfElements = 37373;
+ for (size_t i = 0; i < kNumberOfElements; i++) {
+ builder.CommitReservedEntry(builder.CreateReservedEntry(),
+ Smi::FromInt(static_cast<int>(i)));
+ }
+ Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
+ CHECK_EQ(constant_array->length(), kNumberOfElements);
+ for (size_t i = 0; i < kNumberOfElements; i++) {
+ CHECK(constant_array->get(static_cast<int>(i))->SameValue(*builder.At(i)));
+ }
+}
+
TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithIdx8Reservations) {
+ CanonicalHandleScope canonical(isolate());
for (size_t reserved = 1; reserved < k8BitCapacity; reserved *= 3) {
- ConstantArrayBuilder builder(isolate(), zone());
+ ConstantArrayBuilder builder(zone(),
+ isolate()->factory()->the_hole_value());
for (size_t i = 0; i < reserved; i++) {
OperandSize operand_size = builder.CreateReservedEntry();
CHECK(operand_size == OperandSize::kByte);
}
for (size_t i = 0; i < 2 * k8BitCapacity; i++) {
- Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
- builder.Insert(object);
+ builder.CommitReservedEntry(builder.CreateReservedEntry(),
+ Smi::FromInt(static_cast<int>(i)));
if (i + reserved < k8BitCapacity) {
CHECK_LE(builder.size(), k8BitCapacity);
CHECK_EQ(builder.size(), i + 1);
- CHECK(builder.At(i)->SameValue(*object));
} else {
CHECK_GE(builder.size(), k8BitCapacity);
CHECK_EQ(builder.size(), i + reserved + 1);
- CHECK(builder.At(i + reserved)->SameValue(*object));
}
}
CHECK_EQ(builder.size(), 2 * k8BitCapacity + reserved);
@@ -67,54 +115,52 @@ TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithIdx8Reservations) {
CHECK(empty->SameValue(isolate()->heap()->the_hole_value()));
}
- // Commmit reserved entries with duplicates and check size does not change.
+ // Commit reserved entries with duplicates and check size does not change.
DCHECK_EQ(reserved + 2 * k8BitCapacity, builder.size());
size_t duplicates_in_idx8_space =
std::min(reserved, k8BitCapacity - reserved);
for (size_t i = 0; i < duplicates_in_idx8_space; i++) {
builder.CommitReservedEntry(OperandSize::kByte,
- isolate()->factory()->NewNumberFromSize(i));
+ Smi::FromInt(static_cast<int>(i)));
DCHECK_EQ(reserved + 2 * k8BitCapacity, builder.size());
}
- // Check all committed values match expected (holes where
- // duplicates_in_idx8_space allocated).
- for (size_t i = 0; i < k8BitCapacity - reserved; i++) {
- Smi* smi = Smi::FromInt(static_cast<int>(i));
- CHECK(Handle<Smi>::cast(builder.At(i))->SameValue(smi));
- }
- for (size_t i = k8BitCapacity; i < 2 * k8BitCapacity + reserved; i++) {
- Smi* smi = Smi::FromInt(static_cast<int>(i - reserved));
- CHECK(Handle<Smi>::cast(builder.At(i))->SameValue(smi));
- }
- for (size_t i = 0; i < reserved; i++) {
- size_t index = k8BitCapacity - reserved + i;
- CHECK(builder.At(index)->IsTheHole());
- }
-
// Now make reservations, and commit them with unique entries.
for (size_t i = 0; i < duplicates_in_idx8_space; i++) {
OperandSize operand_size = builder.CreateReservedEntry();
CHECK(operand_size == OperandSize::kByte);
}
for (size_t i = 0; i < duplicates_in_idx8_space; i++) {
- Handle<Object> object =
- isolate()->factory()->NewNumberFromSize(2 * k8BitCapacity + i);
- size_t index = builder.CommitReservedEntry(OperandSize::kByte, object);
+ Smi* value = Smi::FromInt(static_cast<int>(2 * k8BitCapacity + i));
+ size_t index = builder.CommitReservedEntry(OperandSize::kByte, value);
CHECK_EQ(static_cast<int>(index), k8BitCapacity - reserved + i);
- CHECK(builder.At(static_cast<int>(index))->SameValue(*object));
}
- CHECK_EQ(builder.size(), 2 * k8BitCapacity + reserved);
+
+ Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
+ CHECK_EQ(constant_array->length(), 2 * k8BitCapacity + reserved);
+
+ // Check all committed values match expected
+ for (size_t i = 0; i < k8BitCapacity - reserved; i++) {
+ Object* value = constant_array->get(static_cast<int>(i));
+ Smi* smi = Smi::FromInt(static_cast<int>(i));
+ CHECK(value->SameValue(smi));
+ }
+ for (size_t i = k8BitCapacity; i < 2 * k8BitCapacity + reserved; i++) {
+ Object* value = constant_array->get(static_cast<int>(i));
+ Smi* smi = Smi::FromInt(static_cast<int>(i - reserved));
+ CHECK(value->SameValue(smi));
+ }
}
}
TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithWideReservations) {
+ CanonicalHandleScope canonical(isolate());
for (size_t reserved = 1; reserved < k8BitCapacity; reserved *= 3) {
- ConstantArrayBuilder builder(isolate(), zone());
+ ConstantArrayBuilder builder(zone(),
+ isolate()->factory()->the_hole_value());
for (size_t i = 0; i < k8BitCapacity; i++) {
- Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
- builder.Insert(object);
- CHECK(builder.At(i)->SameValue(*object));
+ builder.CommitReservedEntry(builder.CreateReservedEntry(),
+ Smi::FromInt(static_cast<int>(i)));
CHECK_EQ(builder.size(), i + 1);
}
for (size_t i = 0; i < reserved; i++) {
@@ -129,79 +175,59 @@ TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithWideReservations) {
for (size_t i = 0; i < reserved; i++) {
OperandSize operand_size = builder.CreateReservedEntry();
CHECK(operand_size == OperandSize::kShort);
- Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
- builder.CommitReservedEntry(operand_size, object);
+ builder.CommitReservedEntry(operand_size,
+ Smi::FromInt(static_cast<int>(i)));
CHECK_EQ(builder.size(), k8BitCapacity);
}
for (size_t i = k8BitCapacity; i < k8BitCapacity + reserved; i++) {
OperandSize operand_size = builder.CreateReservedEntry();
CHECK(operand_size == OperandSize::kShort);
- Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
- builder.CommitReservedEntry(operand_size, object);
+ builder.CommitReservedEntry(operand_size,
+ Smi::FromInt(static_cast<int>(i)));
CHECK_EQ(builder.size(), i + 1);
}
- }
-}
-
-
-TEST_F(ConstantArrayBuilderTest, ToFixedArray) {
- ConstantArrayBuilder builder(isolate(), zone());
- static const size_t kNumberOfElements = 37;
- for (size_t i = 0; i < kNumberOfElements; i++) {
- Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
- builder.Insert(object);
- CHECK(builder.At(i)->SameValue(*object));
- }
- Handle<FixedArray> constant_array = builder.ToFixedArray();
- CHECK_EQ(constant_array->length(), kNumberOfElements);
- for (size_t i = 0; i < kNumberOfElements; i++) {
- CHECK(constant_array->get(static_cast<int>(i))->SameValue(*builder.At(i)));
- }
-}
-TEST_F(ConstantArrayBuilderTest, ToLargeFixedArray) {
- ConstantArrayBuilder builder(isolate(), zone());
- static const size_t kNumberOfElements = 37373;
- for (size_t i = 0; i < kNumberOfElements; i++) {
- Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
- builder.Insert(object);
- CHECK(builder.At(i)->SameValue(*object));
- }
- Handle<FixedArray> constant_array = builder.ToFixedArray();
- CHECK_EQ(constant_array->length(), kNumberOfElements);
- for (size_t i = 0; i < kNumberOfElements; i++) {
- CHECK(constant_array->get(static_cast<int>(i))->SameValue(*builder.At(i)));
+ Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
+ CHECK_EQ(constant_array->length(), k8BitCapacity + reserved);
+ for (size_t i = 0; i < k8BitCapacity + reserved; i++) {
+ Object* value = constant_array->get(static_cast<int>(i));
+ CHECK(value->SameValue(*isolate()->factory()->NewNumberFromSize(i)));
+ }
}
}
TEST_F(ConstantArrayBuilderTest, GapFilledWhenLowReservationCommitted) {
- ConstantArrayBuilder builder(isolate(), zone());
+ CanonicalHandleScope canonical(isolate());
+ ConstantArrayBuilder builder(zone(), isolate()->factory()->the_hole_value());
for (size_t i = 0; i < k8BitCapacity; i++) {
OperandSize operand_size = builder.CreateReservedEntry();
CHECK(OperandSize::kByte == operand_size);
CHECK_EQ(builder.size(), 0);
}
for (size_t i = 0; i < k8BitCapacity; i++) {
- Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
- builder.Insert(object);
+ builder.CommitReservedEntry(builder.CreateReservedEntry(),
+ Smi::FromInt(static_cast<int>(i)));
CHECK_EQ(builder.size(), i + k8BitCapacity + 1);
}
for (size_t i = 0; i < k8BitCapacity; i++) {
builder.CommitReservedEntry(OperandSize::kByte,
- builder.At(i + k8BitCapacity));
+ Smi::FromInt(static_cast<int>(i)));
CHECK_EQ(builder.size(), 2 * k8BitCapacity);
}
+ Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
+ CHECK_EQ(constant_array->length(), 2 * k8BitCapacity);
for (size_t i = 0; i < k8BitCapacity; i++) {
- Handle<Object> original = builder.At(k8BitCapacity + i);
- Handle<Object> duplicate = builder.At(i);
- CHECK(original->SameValue(*duplicate));
+ Object* original = constant_array->get(static_cast<int>(k8BitCapacity + i));
+ Object* duplicate = constant_array->get(static_cast<int>(i));
+ CHECK(original->SameValue(duplicate));
Handle<Object> reference = isolate()->factory()->NewNumberFromSize(i);
CHECK(original->SameValue(*reference));
}
}
TEST_F(ConstantArrayBuilderTest, GapNotFilledWhenLowReservationDiscarded) {
- ConstantArrayBuilder builder(isolate(), zone());
+ CanonicalHandleScope canonical(isolate());
+ ConstantArrayBuilder builder(zone(), isolate()->factory()->the_hole_value());
for (size_t i = 0; i < k8BitCapacity; i++) {
OperandSize operand_size = builder.CreateReservedEntry();
CHECK(OperandSize::kByte == operand_size);
@@ -227,8 +253,9 @@ TEST_F(ConstantArrayBuilderTest, GapNotFilledWhenLowReservationDiscarded) {
}
TEST_F(ConstantArrayBuilderTest, HolesWithUnusedReservations) {
+ CanonicalHandleScope canonical(isolate());
static int kNumberOfHoles = 128;
- ConstantArrayBuilder builder(isolate(), zone());
+ ConstantArrayBuilder builder(zone(), isolate()->factory()->the_hole_value());
for (int i = 0; i < kNumberOfHoles; ++i) {
CHECK_EQ(builder.CreateReservedEntry(), OperandSize::kByte);
}
@@ -237,7 +264,7 @@ TEST_F(ConstantArrayBuilderTest, HolesWithUnusedReservations) {
}
CHECK_EQ(builder.Insert(isolate()->factory()->NewNumber(256)), 256);
- Handle<FixedArray> constant_array = builder.ToFixedArray();
+ Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
CHECK_EQ(constant_array->length(), 257);
for (int i = 128; i < 256; i++) {
CHECK(constant_array->get(i)->SameValue(
@@ -250,7 +277,8 @@ TEST_F(ConstantArrayBuilderTest, HolesWithUnusedReservations) {
}
TEST_F(ConstantArrayBuilderTest, ReservationsAtAllScales) {
- ConstantArrayBuilder builder(isolate(), zone());
+ CanonicalHandleScope canonical(isolate());
+ ConstantArrayBuilder builder(zone(), isolate()->factory()->the_hole_value());
for (int i = 0; i < 256; i++) {
CHECK_EQ(builder.CreateReservedEntry(), OperandSize::kByte);
}
@@ -260,16 +288,12 @@ TEST_F(ConstantArrayBuilderTest, ReservationsAtAllScales) {
for (int i = 65536; i < 131072; ++i) {
CHECK_EQ(builder.CreateReservedEntry(), OperandSize::kQuad);
}
- CHECK_EQ(builder.CommitReservedEntry(OperandSize::kByte,
- isolate()->factory()->NewNumber(1)),
- 0);
- CHECK_EQ(builder.CommitReservedEntry(OperandSize::kShort,
- isolate()->factory()->NewNumber(2)),
+ CHECK_EQ(builder.CommitReservedEntry(OperandSize::kByte, Smi::FromInt(1)), 0);
+ CHECK_EQ(builder.CommitReservedEntry(OperandSize::kShort, Smi::FromInt(2)),
256);
- CHECK_EQ(builder.CommitReservedEntry(OperandSize::kQuad,
- isolate()->factory()->NewNumber(3)),
+ CHECK_EQ(builder.CommitReservedEntry(OperandSize::kQuad, Smi::FromInt(3)),
65536);
- Handle<FixedArray> constant_array = builder.ToFixedArray();
+ Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
CHECK_EQ(constant_array->length(), 65537);
int count = 1;
for (int i = 0; i < constant_array->length(); ++i) {
@@ -283,6 +307,41 @@ TEST_F(ConstantArrayBuilderTest, ReservationsAtAllScales) {
}
}
+TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithFixedReservations) {
+ CanonicalHandleScope canonical(isolate());
+ ConstantArrayBuilder builder(zone(), isolate()->factory()->the_hole_value());
+ for (size_t i = 0; i < k16BitCapacity; i++) {
+ if ((i % 2) == 0) {
+ CHECK_EQ(i, builder.AllocateEntry());
+ } else {
+ builder.Insert(handle(Smi::FromInt(static_cast<int>(i)), isolate()));
+ }
+ }
+ CHECK_EQ(builder.size(), k16BitCapacity);
+
+ // Check values before reserved entries are inserted.
+ for (size_t i = 0; i < k16BitCapacity; i++) {
+ if ((i % 2) == 0) {
+ // Check reserved values are the hole.
+ Handle<Object> empty = builder.At(i);
+ CHECK(empty->SameValue(isolate()->heap()->the_hole_value()));
+ } else {
+ CHECK_EQ(Handle<Smi>::cast(builder.At(i))->value(), i);
+ }
+ }
+
+ // Insert reserved entries.
+ for (size_t i = 0; i < k16BitCapacity; i += 2) {
+ builder.InsertAllocatedEntry(
+ i, handle(Smi::FromInt(static_cast<int>(i)), isolate()));
+ }
+
+ // Check values after reserved entries are inserted.
+ for (size_t i = 0; i < k16BitCapacity; i++) {
+ CHECK_EQ(Handle<Smi>::cast(builder.At(i))->value(), i);
+ }
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
index 0106c577bd..ff6f14df21 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
@@ -93,10 +93,9 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedByteOperand(
int offset) {
return IsLoad(
MachineType::Uint8(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
- IsIntPtrAdd(
- IsParameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
- IsIntPtrConstant(offset)));
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ IsIntPtrAdd(IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
+ IsIntPtrConstant(offset)));
}
Matcher<Node*>
@@ -104,10 +103,9 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedByteOperand(
int offset) {
Matcher<Node*> load_matcher = IsLoad(
MachineType::Int8(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
- IsIntPtrAdd(
- IsParameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
- IsIntPtrConstant(offset)));
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ IsIntPtrAdd(IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
+ IsIntPtrConstant(offset)));
if (kPointerSize == 8) {
load_matcher = IsChangeInt32ToInt64(load_matcher);
}
@@ -120,11 +118,9 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedShortOperand(
if (TargetSupportsUnalignedAccess()) {
return IsLoad(
MachineType::Uint16(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
- IsIntPtrAdd(
- IsParameter(
- InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
- IsIntPtrConstant(offset)));
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ IsIntPtrAdd(IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
+ IsIntPtrConstant(offset)));
} else {
#if V8_TARGET_LITTLE_ENDIAN
const int kStep = -1;
@@ -139,10 +135,9 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedShortOperand(
for (int i = 0; i < static_cast<int>(arraysize(bytes)); i++) {
bytes[i] = IsLoad(
MachineType::Uint8(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
IsIntPtrAdd(
- IsParameter(
- InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
IsIntPtrConstant(offset + kMsbOffset + kStep * i)));
}
return IsWord32Or(IsWord32Shl(bytes[0], IsInt32Constant(kBitsPerByte)),
@@ -157,11 +152,9 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedShortOperand(
if (TargetSupportsUnalignedAccess()) {
load_matcher = IsLoad(
MachineType::Int16(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
- IsIntPtrAdd(
- IsParameter(
- InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
- IsIntPtrConstant(offset)));
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ IsIntPtrAdd(IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
+ IsIntPtrConstant(offset)));
} else {
#if V8_TARGET_LITTLE_ENDIAN
const int kStep = -1;
@@ -176,10 +169,9 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedShortOperand(
for (int i = 0; i < static_cast<int>(arraysize(bytes)); i++) {
bytes[i] = IsLoad(
(i == 0) ? MachineType::Int8() : MachineType::Uint8(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
IsIntPtrAdd(
- IsParameter(
- InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
IsIntPtrConstant(offset + kMsbOffset + kStep * i)));
}
load_matcher = IsWord32Or(
@@ -198,11 +190,9 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedQuadOperand(
if (TargetSupportsUnalignedAccess()) {
return IsLoad(
MachineType::Uint32(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
- IsIntPtrAdd(
- IsParameter(
- InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
- IsIntPtrConstant(offset)));
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ IsIntPtrAdd(IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
+ IsIntPtrConstant(offset)));
} else {
#if V8_TARGET_LITTLE_ENDIAN
const int kStep = -1;
@@ -217,10 +207,9 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedQuadOperand(
for (int i = 0; i < static_cast<int>(arraysize(bytes)); i++) {
bytes[i] = IsLoad(
MachineType::Uint8(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
IsIntPtrAdd(
- IsParameter(
- InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
IsIntPtrConstant(offset + kMsbOffset + kStep * i)));
}
return IsWord32Or(
@@ -239,11 +228,9 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedQuadOperand(
if (TargetSupportsUnalignedAccess()) {
load_matcher = IsLoad(
MachineType::Int32(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
- IsIntPtrAdd(
- IsParameter(
- InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
- IsIntPtrConstant(offset)));
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ IsIntPtrAdd(IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
+ IsIntPtrConstant(offset)));
} else {
#if V8_TARGET_LITTLE_ENDIAN
const int kStep = -1;
@@ -258,10 +245,9 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedQuadOperand(
for (int i = 0; i < static_cast<int>(arraysize(bytes)); i++) {
bytes[i] = IsLoad(
(i == 0) ? MachineType::Int8() : MachineType::Uint8(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
IsIntPtrAdd(
- IsParameter(
- InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
IsIntPtrConstant(offset + kMsbOffset + kStep * i)));
}
load_matcher = IsWord32Or(
@@ -313,41 +299,59 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedOperand(
TARGET_TEST_F(InterpreterAssemblerTest, Dispatch) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
- m.Dispatch();
- Graph* graph = m.graph();
-
- Node* end = graph->end();
- EXPECT_EQ(1, end->InputCount());
- Node* tail_call_node = end->InputAt(0);
+ Node* tail_call_node = m.Dispatch();
OperandScale operand_scale = OperandScale::kSingle;
- Matcher<Node*> next_bytecode_offset_matcher = IsIntPtrAdd(
- IsParameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
- IsIntPtrConstant(
- interpreter::Bytecodes::Size(bytecode, operand_scale)));
- Matcher<Node*> target_bytecode_matcher = m.IsLoad(
- MachineType::Uint8(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
- next_bytecode_offset_matcher);
+ Matcher<Node*> next_bytecode_offset_matcher =
+ IsIntPtrAdd(IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
+ IsIntPtrConstant(
+ interpreter::Bytecodes::Size(bytecode, operand_scale)));
+ Matcher<Node*> target_bytecode_matcher =
+ m.IsLoad(MachineType::Uint8(),
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ next_bytecode_offset_matcher);
if (kPointerSize == 8) {
target_bytecode_matcher = IsChangeUint32ToUint64(target_bytecode_matcher);
}
Matcher<Node*> code_target_matcher = m.IsLoad(
MachineType::Pointer(),
- IsParameter(InterpreterDispatchDescriptor::kDispatchTableParameter),
+ IsParameter(InterpreterDispatchDescriptor::kDispatchTable),
IsWordShl(target_bytecode_matcher, IsIntPtrConstant(kPointerSizeLog2)));
+ if (interpreter::Bytecodes::IsStarLookahead(bytecode, operand_scale)) {
+ Matcher<Node*> after_lookahead_offset =
+ IsIntPtrAdd(next_bytecode_offset_matcher,
+ IsIntPtrConstant(interpreter::Bytecodes::Size(
+ Bytecode::kStar, operand_scale)));
+ next_bytecode_offset_matcher =
+ IsPhi(MachineType::PointerRepresentation(),
+ next_bytecode_offset_matcher, after_lookahead_offset, _);
+ Matcher<Node*> after_lookahead_bytecode =
+ m.IsLoad(MachineType::Uint8(),
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ after_lookahead_offset);
+ if (kPointerSize == 8) {
+ after_lookahead_bytecode =
+ IsChangeUint32ToUint64(after_lookahead_bytecode);
+ }
+ target_bytecode_matcher =
+ IsPhi(MachineRepresentation::kWord8, target_bytecode_matcher,
+ after_lookahead_bytecode, _);
+ code_target_matcher =
+ m.IsLoad(MachineType::Pointer(),
+ IsParameter(InterpreterDispatchDescriptor::kDispatchTable),
+ IsWordShl(target_bytecode_matcher,
+ IsIntPtrConstant(kPointerSizeLog2)));
+ }
+
EXPECT_THAT(
tail_call_node,
- IsTailCall(
- _, code_target_matcher,
- IsParameter(InterpreterDispatchDescriptor::kAccumulatorParameter),
- IsParameter(InterpreterDispatchDescriptor::kRegisterFileParameter),
- next_bytecode_offset_matcher,
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
- IsParameter(InterpreterDispatchDescriptor::kDispatchTableParameter),
- IsParameter(InterpreterDispatchDescriptor::kContextParameter), _,
- _));
+ IsTailCall(_, code_target_matcher,
+ IsParameter(InterpreterDispatchDescriptor::kAccumulator),
+ next_bytecode_offset_matcher,
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
+ IsParameter(InterpreterDispatchDescriptor::kDispatchTable),
+ _, _));
}
}
@@ -358,15 +362,13 @@ TARGET_TEST_F(InterpreterAssemblerTest, Jump) {
int jump_offsets[] = {-9710, -77, 0, +3, +97109};
TRACED_FOREACH(int, jump_offset, jump_offsets) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ if (!interpreter::Bytecodes::IsJump(bytecode)) return;
+
InterpreterAssemblerForTest m(this, bytecode);
- m.Jump(m.IntPtrConstant(jump_offset));
- Graph* graph = m.graph();
- Node* end = graph->end();
- EXPECT_EQ(1, end->InputCount());
- Node* tail_call_node = end->InputAt(0);
+ Node* tail_call_node = m.Jump(m.IntPtrConstant(jump_offset));
Matcher<Node*> next_bytecode_offset_matcher = IsIntPtrAdd(
- IsParameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
IsIntPtrConstant(jump_offset));
Matcher<Node*> target_bytecode_matcher =
m.IsLoad(MachineType::Uint8(), _, next_bytecode_offset_matcher);
@@ -374,111 +376,23 @@ TARGET_TEST_F(InterpreterAssemblerTest, Jump) {
target_bytecode_matcher =
IsChangeUint32ToUint64(target_bytecode_matcher);
}
- Matcher<Node*> code_target_matcher = m.IsLoad(
- MachineType::Pointer(),
- IsParameter(InterpreterDispatchDescriptor::kDispatchTableParameter),
- IsWordShl(target_bytecode_matcher,
- IsIntPtrConstant(kPointerSizeLog2)));
+ Matcher<Node*> code_target_matcher =
+ m.IsLoad(MachineType::Pointer(),
+ IsParameter(InterpreterDispatchDescriptor::kDispatchTable),
+ IsWordShl(target_bytecode_matcher,
+ IsIntPtrConstant(kPointerSizeLog2)));
EXPECT_THAT(
tail_call_node,
- IsTailCall(
- _, code_target_matcher,
- IsParameter(InterpreterDispatchDescriptor::kAccumulatorParameter),
- IsParameter(
- InterpreterDispatchDescriptor::kRegisterFileParameter),
- next_bytecode_offset_matcher, _,
- IsParameter(
- InterpreterDispatchDescriptor::kDispatchTableParameter),
- IsParameter(InterpreterDispatchDescriptor::kContextParameter), _,
- _));
+ IsTailCall(_, code_target_matcher,
+ IsParameter(InterpreterDispatchDescriptor::kAccumulator),
+ next_bytecode_offset_matcher, _,
+ IsParameter(InterpreterDispatchDescriptor::kDispatchTable),
+ _, _));
}
}
}
-TARGET_TEST_F(InterpreterAssemblerTest, JumpIfWordEqual) {
- static const int kJumpIfTrueOffset = 73;
-
- // If debug code is enabled we emit extra code in Jump.
- if (FLAG_debug_code) return;
-
- MachineOperatorBuilder machine(zone());
-
- TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
- InterpreterAssemblerForTest m(this, bytecode);
- Node* lhs = m.IntPtrConstant(0);
- Node* rhs = m.IntPtrConstant(1);
- m.JumpIfWordEqual(lhs, rhs, m.IntPtrConstant(kJumpIfTrueOffset));
- Graph* graph = m.graph();
- Node* end = graph->end();
- EXPECT_EQ(2, end->InputCount());
-
- OperandScale operand_scale = OperandScale::kSingle;
- int jump_offsets[] = {kJumpIfTrueOffset, interpreter::Bytecodes::Size(
- bytecode, operand_scale)};
- for (int i = 0; i < static_cast<int>(arraysize(jump_offsets)); i++) {
- Matcher<Node*> next_bytecode_offset_matcher = IsIntPtrAdd(
- IsParameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
- IsIntPtrConstant(jump_offsets[i]));
- Matcher<Node*> target_bytecode_matcher =
- m.IsLoad(MachineType::Uint8(), _, next_bytecode_offset_matcher);
- if (kPointerSize == 8) {
- target_bytecode_matcher =
- IsChangeUint32ToUint64(target_bytecode_matcher);
- }
- Matcher<Node*> code_target_matcher = m.IsLoad(
- MachineType::Pointer(),
- IsParameter(InterpreterDispatchDescriptor::kDispatchTableParameter),
- IsWordShl(target_bytecode_matcher,
- IsIntPtrConstant(kPointerSizeLog2)));
- EXPECT_THAT(
- end->InputAt(i),
- IsTailCall(
- _, code_target_matcher,
- IsParameter(InterpreterDispatchDescriptor::kAccumulatorParameter),
- IsParameter(
- InterpreterDispatchDescriptor::kRegisterFileParameter),
- next_bytecode_offset_matcher, _,
- IsParameter(
- InterpreterDispatchDescriptor::kDispatchTableParameter),
- IsParameter(InterpreterDispatchDescriptor::kContextParameter), _,
- _));
- }
-
- // TODO(oth): test control flow paths.
- }
-}
-
-TARGET_TEST_F(InterpreterAssemblerTest, InterpreterReturn) {
- // If debug code is enabled we emit extra code in InterpreterReturn.
- if (FLAG_debug_code) return;
-
- TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
- InterpreterAssemblerForTest m(this, bytecode);
- m.InterpreterReturn();
- Graph* graph = m.graph();
-
- Node* end = graph->end();
- EXPECT_EQ(1, end->InputCount());
- Node* tail_call_node = end->InputAt(0);
-
- Handle<HeapObject> exit_trampoline =
- isolate()->builtins()->InterpreterExitTrampoline();
- EXPECT_THAT(
- tail_call_node,
- IsTailCall(
- _, IsHeapConstant(exit_trampoline),
- IsParameter(InterpreterDispatchDescriptor::kAccumulatorParameter),
- IsParameter(InterpreterDispatchDescriptor::kRegisterFileParameter),
- IsParameter(
- InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
- _,
- IsParameter(InterpreterDispatchDescriptor::kDispatchTableParameter),
- IsParameter(InterpreterDispatchDescriptor::kContextParameter), _,
- _));
- }
-}
-
TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) {
static const OperandScale kOperandScales[] = {
OperandScale::kSingle, OperandScale::kDouble, OperandScale::kQuadruple};
@@ -525,6 +439,10 @@ TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) {
EXPECT_THAT(m.BytecodeOperandRuntimeId(i),
m.IsUnsignedOperand(offset, operand_size));
break;
+ case interpreter::OperandType::kIntrinsicId:
+ EXPECT_THAT(m.BytecodeOperandIntrinsicId(i),
+ m.IsUnsignedOperand(offset, operand_size));
+ break;
case interpreter::OperandType::kNone:
UNREACHABLE();
break;
@@ -543,9 +461,8 @@ TARGET_TEST_F(InterpreterAssemblerTest, GetSetAccumulator) {
InterpreterAssemblerForTest m(this, bytecode);
// Should be incoming accumulator if not set.
- EXPECT_THAT(
- m.GetAccumulator(),
- IsParameter(InterpreterDispatchDescriptor::kAccumulatorParameter));
+ EXPECT_THAT(m.GetAccumulator(),
+ IsParameter(InterpreterDispatchDescriptor::kAccumulator));
// Should be set by SetAccumulator.
Node* accumulator_value_1 = m.Int32Constant(0xdeadbeef);
m.SetAccumulator(accumulator_value_1);
@@ -555,24 +472,21 @@ TARGET_TEST_F(InterpreterAssemblerTest, GetSetAccumulator) {
EXPECT_THAT(m.GetAccumulator(), accumulator_value_2);
// Should be passed to next bytecode handler on dispatch.
- m.Dispatch();
- Graph* graph = m.graph();
-
- Node* end = graph->end();
- EXPECT_EQ(1, end->InputCount());
- Node* tail_call_node = end->InputAt(0);
+ Node* tail_call_node = m.Dispatch();
EXPECT_THAT(tail_call_node,
- IsTailCall(_, _, accumulator_value_2, _, _, _, _, _, _));
+ IsTailCall(_, _, accumulator_value_2, _, _, _, _));
}
}
-TARGET_TEST_F(InterpreterAssemblerTest, GetSetContext) {
+TARGET_TEST_F(InterpreterAssemblerTest, GetContext) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
- Node* context_node = m.Int32Constant(100);
- m.SetContext(context_node);
- EXPECT_THAT(m.GetContext(), context_node);
+ EXPECT_THAT(
+ m.GetContext(),
+ m.IsLoad(MachineType::AnyTagged(), IsLoadParentFramePointer(),
+ IsIntPtrConstant(Register::current_context().ToOperand()
+ << kPointerSizeLog2)));
}
}
@@ -581,11 +495,10 @@ TARGET_TEST_F(InterpreterAssemblerTest, RegisterLocation) {
InterpreterAssemblerForTest m(this, bytecode);
Node* reg_index_node = m.IntPtrConstant(44);
Node* reg_location_node = m.RegisterLocation(reg_index_node);
- EXPECT_THAT(
- reg_location_node,
- IsIntPtrAdd(
- IsParameter(InterpreterDispatchDescriptor::kRegisterFileParameter),
- IsWordShl(reg_index_node, IsIntPtrConstant(kPointerSizeLog2))));
+ EXPECT_THAT(reg_location_node,
+ IsIntPtrAdd(IsLoadParentFramePointer(),
+ IsWordShl(reg_index_node,
+ IsIntPtrConstant(kPointerSizeLog2))));
}
}
@@ -594,12 +507,10 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadRegister) {
InterpreterAssemblerForTest m(this, bytecode);
Node* reg_index_node = m.IntPtrConstant(44);
Node* load_reg_node = m.LoadRegister(reg_index_node);
- EXPECT_THAT(
- load_reg_node,
- m.IsLoad(
- MachineType::AnyTagged(),
- IsParameter(InterpreterDispatchDescriptor::kRegisterFileParameter),
- IsWordShl(reg_index_node, IsIntPtrConstant(kPointerSizeLog2))));
+ EXPECT_THAT(load_reg_node,
+ m.IsLoad(MachineType::AnyTagged(), IsLoadParentFramePointer(),
+ IsWordShl(reg_index_node,
+ IsIntPtrConstant(kPointerSizeLog2))));
}
}
@@ -611,12 +522,11 @@ TARGET_TEST_F(InterpreterAssemblerTest, StoreRegister) {
Node* store_reg_node = m.StoreRegister(store_value, reg_index_node);
EXPECT_THAT(
store_reg_node,
- m.IsStore(
- StoreRepresentation(MachineRepresentation::kTagged,
- kNoWriteBarrier),
- IsParameter(InterpreterDispatchDescriptor::kRegisterFileParameter),
- IsWordShl(reg_index_node, IsIntPtrConstant(kPointerSizeLog2)),
- store_value));
+ m.IsStore(StoreRepresentation(MachineRepresentation::kTagged,
+ kNoWriteBarrier),
+ IsLoadParentFramePointer(),
+ IsWordShl(reg_index_node, IsIntPtrConstant(kPointerSizeLog2)),
+ store_value));
}
}
@@ -624,9 +534,9 @@ TARGET_TEST_F(InterpreterAssemblerTest, SmiTag) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
Node* value = m.Int32Constant(44);
- EXPECT_THAT(
- m.SmiTag(value),
- IsWordShl(value, IsIntPtrConstant(kSmiShiftSize + kSmiTagSize)));
+ EXPECT_THAT(m.SmiTag(value),
+ IsIntPtrConstant(static_cast<intptr_t>(44)
+ << (kSmiShiftSize + kSmiTagSize)));
EXPECT_THAT(
m.SmiUntag(value),
IsWordSar(value, IsIntPtrConstant(kSmiShiftSize + kSmiTagSize)));
@@ -669,7 +579,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
Node* load_constant = m.LoadConstantPoolEntry(index);
Matcher<Node*> constant_pool_matcher = m.IsLoad(
MachineType::AnyTagged(),
- IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
IsIntPtrConstant(BytecodeArray::kConstantPoolOffset - kHeapObjectTag));
EXPECT_THAT(
load_constant,
@@ -730,14 +640,10 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime2) {
InterpreterAssemblerForTest m(this, bytecode);
Node* arg1 = m.Int32Constant(2);
Node* arg2 = m.Int32Constant(3);
- Node* context =
- m.Parameter(InterpreterDispatchDescriptor::kContextParameter);
+ Node* context = m.Int32Constant(4);
Node* call_runtime = m.CallRuntime(Runtime::kAdd, context, arg1, arg2);
- EXPECT_THAT(
- call_runtime,
- IsCall(_, _, arg1, arg2, _, IsInt32Constant(2),
- IsParameter(InterpreterDispatchDescriptor::kContextParameter), _,
- _));
+ EXPECT_THAT(call_runtime,
+ IsCall(_, _, arg1, arg2, _, IsInt32Constant(2), context, _, _));
}
}
@@ -751,8 +657,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime) {
Node* function_id = m.Int32Constant(0);
Node* first_arg = m.Int32Constant(1);
Node* arg_count = m.Int32Constant(2);
- Node* context =
- m.Parameter(InterpreterDispatchDescriptor::kContextParameter);
+ Node* context = m.Int32Constant(4);
Matcher<Node*> function_table = IsExternalConstant(
ExternalReference::runtime_function_table_address(isolate()));
@@ -765,12 +670,9 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime) {
Node* call_runtime = m.CallRuntimeN(function_id, context, first_arg,
arg_count, result_size);
- EXPECT_THAT(
- call_runtime,
- IsCall(_, IsHeapConstant(builtin.code()), arg_count, first_arg,
- function_entry,
- IsParameter(InterpreterDispatchDescriptor::kContextParameter),
- _, _));
+ EXPECT_THAT(call_runtime,
+ IsCall(_, IsHeapConstant(builtin.code()), arg_count,
+ first_arg, function_entry, context, _, _));
}
}
}
@@ -786,16 +688,11 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallJS) {
Node* function = m.Int32Constant(0);
Node* first_arg = m.Int32Constant(1);
Node* arg_count = m.Int32Constant(2);
- Node* context =
- m.Parameter(InterpreterDispatchDescriptor::kContextParameter);
+ Node* context = m.Int32Constant(3);
Node* call_js =
m.CallJS(function, context, first_arg, arg_count, tail_call_mode);
- EXPECT_THAT(
- call_js,
- IsCall(_, IsHeapConstant(builtin.code()), arg_count, first_arg,
- function,
- IsParameter(InterpreterDispatchDescriptor::kContextParameter),
- _, _));
+ EXPECT_THAT(call_js, IsCall(_, IsHeapConstant(builtin.code()), arg_count,
+ first_arg, function, context, _, _));
}
}
}
@@ -805,21 +702,18 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadTypeFeedbackVector) {
InterpreterAssemblerForTest m(this, bytecode);
Node* feedback_vector = m.LoadTypeFeedbackVector();
- Matcher<Node*> load_function_matcher = m.IsLoad(
- MachineType::AnyTagged(),
- IsParameter(InterpreterDispatchDescriptor::kRegisterFileParameter),
- IsIntPtrConstant(
- InterpreterFrameConstants::kFunctionFromRegisterPointer));
- Matcher<Node*> load_shared_function_info_matcher =
- m.IsLoad(MachineType::AnyTagged(), load_function_matcher,
- IsIntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
- kHeapObjectTag));
-
- EXPECT_THAT(
- feedback_vector,
- m.IsLoad(MachineType::AnyTagged(), load_shared_function_info_matcher,
- IsIntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
- kHeapObjectTag)));
+ Matcher<Node*> load_function_matcher =
+ m.IsLoad(MachineType::AnyTagged(), IsLoadParentFramePointer(),
+ IsIntPtrConstant(Register::function_closure().ToOperand()
+ << kPointerSizeLog2));
+ Matcher<Node*> load_literals_matcher = m.IsLoad(
+ MachineType::AnyTagged(), load_function_matcher,
+ IsIntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag));
+
+ EXPECT_THAT(feedback_vector,
+ m.IsLoad(MachineType::AnyTagged(), load_literals_matcher,
+ IsIntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
+ kHeapObjectTag)));
}
}
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
index 1ebdc77c18..e3e525273a 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
@@ -52,8 +52,6 @@ class InterpreterAssemblerTest : public TestWithIsolateAndZone {
Matcher<compiler::Node*> IsUnsignedOperand(int offset,
OperandSize operand_size);
- using InterpreterAssembler::graph;
-
private:
DISALLOW_COPY_AND_ASSIGN(InterpreterAssemblerForTest);
};
diff --git a/deps/v8/test/unittests/libplatform/worker-thread-unittest.cc b/deps/v8/test/unittests/libplatform/worker-thread-unittest.cc
index 175b311666..f0b41e78dd 100644
--- a/deps/v8/test/unittests/libplatform/worker-thread-unittest.cc
+++ b/deps/v8/test/unittests/libplatform/worker-thread-unittest.cc
@@ -44,5 +44,21 @@ TEST(WorkerThreadTest, Basic) {
queue.Terminate();
}
+TEST(WorkerThreadTest, PostSingleTask) {
+ TaskQueue queue;
+ WorkerThread thread1(&queue);
+ WorkerThread thread2(&queue);
+
+ InSequence s;
+ StrictMock<MockTask>* task = new StrictMock<MockTask>;
+ EXPECT_CALL(*task, Run());
+ EXPECT_CALL(*task, Die());
+ queue.Append(task);
+
+ // The next call should not time out.
+ queue.BlockUntilQueueEmptyForTesting();
+ queue.Terminate();
+}
+
} // namespace platform
} // namespace v8
diff --git a/deps/v8/test/unittests/register-configuration-unittest.cc b/deps/v8/test/unittests/register-configuration-unittest.cc
new file mode 100644
index 0000000000..33453ce9bb
--- /dev/null
+++ b/deps/v8/test/unittests/register-configuration-unittest.cc
@@ -0,0 +1,166 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/register-configuration.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace internal {
+
+const MachineRepresentation kFloat32 = MachineRepresentation::kFloat32;
+const MachineRepresentation kFloat64 = MachineRepresentation::kFloat64;
+const MachineRepresentation kSimd128 = MachineRepresentation::kSimd128;
+
+class RegisterConfigurationUnitTest : public ::testing::Test {
+ public:
+ RegisterConfigurationUnitTest() {}
+ virtual ~RegisterConfigurationUnitTest() {}
+
+ private:
+};
+
+TEST_F(RegisterConfigurationUnitTest, BasicProperties) {
+ const int kNumGeneralRegs = 3;
+ const int kNumDoubleRegs = 4;
+ const int kNumAllocatableGeneralRegs = 2;
+ const int kNumAllocatableDoubleRegs = 2;
+ int general_codes[kNumAllocatableGeneralRegs] = {1, 2};
+ int double_codes[kNumAllocatableDoubleRegs] = {2, 3};
+
+ RegisterConfiguration test(
+ kNumGeneralRegs, kNumDoubleRegs, kNumAllocatableGeneralRegs,
+ kNumAllocatableDoubleRegs, kNumAllocatableDoubleRegs, general_codes,
+ double_codes, RegisterConfiguration::OVERLAP, nullptr, nullptr, nullptr,
+ nullptr);
+
+ EXPECT_EQ(test.num_general_registers(), kNumGeneralRegs);
+ EXPECT_EQ(test.num_double_registers(), kNumDoubleRegs);
+ EXPECT_EQ(test.num_allocatable_general_registers(),
+ kNumAllocatableGeneralRegs);
+ EXPECT_EQ(test.num_allocatable_double_registers(), kNumAllocatableDoubleRegs);
+ EXPECT_EQ(test.num_allocatable_float_registers(), kNumAllocatableDoubleRegs);
+ EXPECT_EQ(test.num_allocatable_simd128_registers(),
+ kNumAllocatableDoubleRegs);
+
+ EXPECT_EQ(test.allocatable_general_codes_mask(),
+ (1 << general_codes[0]) | (1 << general_codes[1]));
+ EXPECT_EQ(test.GetAllocatableGeneralCode(0), general_codes[0]);
+ EXPECT_EQ(test.GetAllocatableGeneralCode(1), general_codes[1]);
+ EXPECT_EQ(test.allocatable_double_codes_mask(),
+ (1 << double_codes[0]) | (1 << double_codes[1]));
+ EXPECT_EQ(test.GetAllocatableFloatCode(0), double_codes[0]);
+ EXPECT_EQ(test.GetAllocatableDoubleCode(0), double_codes[0]);
+ EXPECT_EQ(test.GetAllocatableSimd128Code(0), double_codes[0]);
+ EXPECT_EQ(test.GetAllocatableFloatCode(1), double_codes[1]);
+ EXPECT_EQ(test.GetAllocatableDoubleCode(1), double_codes[1]);
+ EXPECT_EQ(test.GetAllocatableSimd128Code(1), double_codes[1]);
+}
+
+TEST_F(RegisterConfigurationUnitTest, CombineAliasing) {
+ const int kNumGeneralRegs = 3;
+ const int kNumDoubleRegs = 4;
+ const int kNumAllocatableGeneralRegs = 2;
+ const int kNumAllocatableDoubleRegs = 3;
+ int general_codes[] = {1, 2};
+ int double_codes[] = {2, 3, 16}; // reg 16 should not alias registers 32, 33.
+
+ RegisterConfiguration test(
+ kNumGeneralRegs, kNumDoubleRegs, kNumAllocatableGeneralRegs,
+ kNumAllocatableDoubleRegs, kNumAllocatableDoubleRegs, general_codes,
+ double_codes, RegisterConfiguration::COMBINE, nullptr, nullptr, nullptr,
+ nullptr);
+
+ // There are 3 allocatable double regs, but only 2 can alias float regs.
+ EXPECT_EQ(test.num_allocatable_float_registers(), 4);
+
+ // Test that float registers combine in pairs to form double registers.
+ EXPECT_EQ(test.GetAllocatableFloatCode(0), double_codes[0] * 2);
+ EXPECT_EQ(test.GetAllocatableFloatCode(1), double_codes[0] * 2 + 1);
+ EXPECT_EQ(test.GetAllocatableFloatCode(2), double_codes[1] * 2);
+ EXPECT_EQ(test.GetAllocatableFloatCode(3), double_codes[1] * 2 + 1);
+
+ // There are 3 allocatable double regs, but only 2 pair to form 1 SIMD reg.
+ EXPECT_EQ(test.num_allocatable_simd128_registers(), 1);
+
+ // Test that even-odd pairs of double regs combine to form a SIMD reg.
+ EXPECT_EQ(test.GetAllocatableSimd128Code(0), double_codes[0] / 2);
+
+ // Registers alias themselves.
+ EXPECT_TRUE(test.AreAliases(kFloat32, 0, kFloat32, 0));
+ EXPECT_TRUE(test.AreAliases(kFloat64, 0, kFloat64, 0));
+ EXPECT_TRUE(test.AreAliases(kSimd128, 0, kSimd128, 0));
+ // Registers don't alias other registers of the same size.
+ EXPECT_FALSE(test.AreAliases(kFloat32, 1, kFloat32, 0));
+ EXPECT_FALSE(test.AreAliases(kFloat64, 1, kFloat64, 0));
+ EXPECT_FALSE(test.AreAliases(kSimd128, 1, kSimd128, 0));
+ // Float registers combine in pairs to alias a double with index / 2, and
+ // in 4's to alias a simd128 with index / 4.
+ EXPECT_TRUE(test.AreAliases(kFloat32, 0, kFloat64, 0));
+ EXPECT_TRUE(test.AreAliases(kFloat32, 1, kFloat64, 0));
+ EXPECT_TRUE(test.AreAliases(kFloat32, 0, kSimd128, 0));
+ EXPECT_TRUE(test.AreAliases(kFloat32, 1, kSimd128, 0));
+ EXPECT_TRUE(test.AreAliases(kFloat32, 2, kSimd128, 0));
+ EXPECT_TRUE(test.AreAliases(kFloat32, 3, kSimd128, 0));
+ EXPECT_TRUE(test.AreAliases(kFloat64, 0, kFloat32, 0));
+ EXPECT_TRUE(test.AreAliases(kFloat64, 0, kFloat32, 1));
+ EXPECT_TRUE(test.AreAliases(kSimd128, 0, kFloat32, 0));
+ EXPECT_TRUE(test.AreAliases(kSimd128, 0, kFloat32, 1));
+ EXPECT_TRUE(test.AreAliases(kSimd128, 0, kFloat32, 2));
+ EXPECT_TRUE(test.AreAliases(kSimd128, 0, kFloat32, 3));
+
+ EXPECT_FALSE(test.AreAliases(kFloat32, 0, kFloat64, 1));
+ EXPECT_FALSE(test.AreAliases(kFloat32, 1, kFloat64, 1));
+ EXPECT_FALSE(test.AreAliases(kFloat32, 0, kSimd128, 1));
+ EXPECT_FALSE(test.AreAliases(kFloat32, 1, kSimd128, 1));
+ EXPECT_FALSE(test.AreAliases(kFloat64, 0, kSimd128, 1));
+ EXPECT_FALSE(test.AreAliases(kFloat64, 1, kSimd128, 1));
+
+ EXPECT_TRUE(test.AreAliases(kFloat64, 0, kFloat32, 1));
+ EXPECT_TRUE(test.AreAliases(kFloat64, 1, kFloat32, 2));
+ EXPECT_TRUE(test.AreAliases(kFloat64, 1, kFloat32, 3));
+ EXPECT_TRUE(test.AreAliases(kFloat64, 2, kFloat32, 4));
+ EXPECT_TRUE(test.AreAliases(kFloat64, 2, kFloat32, 5));
+
+ EXPECT_TRUE(test.AreAliases(kSimd128, 0, kFloat64, 1));
+ EXPECT_TRUE(test.AreAliases(kSimd128, 1, kFloat64, 2));
+ EXPECT_TRUE(test.AreAliases(kSimd128, 1, kFloat64, 3));
+ EXPECT_TRUE(test.AreAliases(kSimd128, 2, kFloat64, 4));
+ EXPECT_TRUE(test.AreAliases(kSimd128, 2, kFloat64, 5));
+
+ int alias_base_index = -1;
+ EXPECT_EQ(test.GetAliases(kFloat32, 0, kFloat32, &alias_base_index), 1);
+ EXPECT_EQ(alias_base_index, 0);
+ EXPECT_EQ(test.GetAliases(kFloat64, 1, kFloat64, &alias_base_index), 1);
+ EXPECT_EQ(alias_base_index, 1);
+ EXPECT_EQ(test.GetAliases(kFloat32, 0, kFloat64, &alias_base_index), 1);
+ EXPECT_EQ(alias_base_index, 0);
+ EXPECT_EQ(test.GetAliases(kFloat32, 1, kFloat64, &alias_base_index), 1);
+ EXPECT_EQ(test.GetAliases(kFloat32, 2, kFloat64, &alias_base_index), 1);
+ EXPECT_EQ(alias_base_index, 1);
+ EXPECT_EQ(test.GetAliases(kFloat32, 3, kFloat64, &alias_base_index), 1);
+ EXPECT_EQ(alias_base_index, 1);
+ EXPECT_EQ(test.GetAliases(kFloat64, 0, kFloat32, &alias_base_index), 2);
+ EXPECT_EQ(alias_base_index, 0);
+ EXPECT_EQ(test.GetAliases(kFloat64, 1, kFloat32, &alias_base_index), 2);
+ EXPECT_EQ(alias_base_index, 2);
+
+ // Non-allocatable codes still alias.
+ EXPECT_EQ(test.GetAliases(kFloat64, 2, kFloat32, &alias_base_index), 2);
+ EXPECT_EQ(alias_base_index, 4);
+ // High numbered double and simd regs don't alias nonexistent float registers.
+ EXPECT_EQ(
+ test.GetAliases(kFloat64, RegisterConfiguration::kMaxFPRegisters / 2,
+ kFloat32, &alias_base_index),
+ 0);
+ EXPECT_EQ(
+ test.GetAliases(kFloat64, RegisterConfiguration::kMaxFPRegisters / 2 + 1,
+ kFloat32, &alias_base_index),
+ 0);
+ EXPECT_EQ(test.GetAliases(kFloat64, RegisterConfiguration::kMaxFPRegisters,
+ kFloat32, &alias_base_index),
+ 0);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/source-position-table-unittest.cc b/deps/v8/test/unittests/source-position-table-unittest.cc
index d62302a2cd..01d9675061 100644
--- a/deps/v8/test/unittests/interpreter/source-position-table-unittest.cc
+++ b/deps/v8/test/unittests/source-position-table-unittest.cc
@@ -4,7 +4,8 @@
#include "src/v8.h"
-#include "src/interpreter/source-position-table.h"
+#include "src/objects.h"
+#include "src/source-position-table.h"
#include "test/unittests/test-utils.h"
namespace v8 {
@@ -23,60 +24,67 @@ static int offsets[] = {0, 1, 2, 3, 4, 30, 31, 32,
129, 250, 1000, 9999, 12000, 31415926};
TEST_F(SourcePositionTableTest, EncodeStatement) {
- SourcePositionTableBuilder builder(isolate(), zone());
+ SourcePositionTableBuilder builder(zone());
for (int i = 0; i < arraysize(offsets); i++) {
- builder.AddStatementPosition(offsets[i], offsets[i]);
+ builder.AddPosition(offsets[i], offsets[i], true);
}
// To test correctness, we rely on the assertions in ToSourcePositionTable().
// (Also below.)
- CHECK(!builder.ToSourcePositionTable().is_null());
+ CHECK(!builder.ToSourcePositionTable(isolate(), Handle<AbstractCode>())
+ .is_null());
}
TEST_F(SourcePositionTableTest, EncodeStatementDuplicates) {
- SourcePositionTableBuilder builder(isolate(), zone());
+ SourcePositionTableBuilder builder(zone());
for (int i = 0; i < arraysize(offsets); i++) {
- builder.AddStatementPosition(offsets[i], offsets[i]);
- builder.AddStatementPosition(offsets[i], offsets[i] + 1);
+ builder.AddPosition(offsets[i], offsets[i], true);
+ builder.AddPosition(offsets[i], offsets[i] + 1, true);
}
// To test correctness, we rely on the assertions in ToSourcePositionTable().
// (Also below.)
- CHECK(!builder.ToSourcePositionTable().is_null());
+ CHECK(!builder.ToSourcePositionTable(isolate(), Handle<AbstractCode>())
+ .is_null());
}
TEST_F(SourcePositionTableTest, EncodeExpression) {
- SourcePositionTableBuilder builder(isolate(), zone());
+ SourcePositionTableBuilder builder(zone());
for (int i = 0; i < arraysize(offsets); i++) {
- builder.AddExpressionPosition(offsets[i], offsets[i]);
+ builder.AddPosition(offsets[i], offsets[i], false);
}
- CHECK(!builder.ToSourcePositionTable().is_null());
+ CHECK(!builder.ToSourcePositionTable(isolate(), Handle<AbstractCode>())
+ .is_null());
}
TEST_F(SourcePositionTableTest, EncodeAscending) {
- SourcePositionTableBuilder builder(isolate(), zone());
+ SourcePositionTableBuilder builder(zone());
- int accumulator = 0;
+ int code_offset = 0;
+ int source_position = 0;
for (int i = 0; i < arraysize(offsets); i++) {
- accumulator += offsets[i];
+ code_offset += offsets[i];
+ source_position += offsets[i];
if (i % 2) {
- builder.AddStatementPosition(accumulator, accumulator);
+ builder.AddPosition(code_offset, source_position, true);
} else {
- builder.AddExpressionPosition(accumulator, accumulator);
+ builder.AddPosition(code_offset, source_position, false);
}
}
- // Also test negative offsets:
+ // Also test negative offsets for source positions:
for (int i = 0; i < arraysize(offsets); i++) {
- accumulator -= offsets[i];
+ code_offset += offsets[i];
+ source_position -= offsets[i];
if (i % 2) {
- builder.AddStatementPosition(accumulator, accumulator);
+ builder.AddPosition(code_offset, source_position, true);
} else {
- builder.AddExpressionPosition(accumulator, accumulator);
+ builder.AddPosition(code_offset, source_position, false);
}
}
- CHECK(!builder.ToSourcePositionTable().is_null());
+ CHECK(!builder.ToSourcePositionTable(isolate(), Handle<AbstractCode>())
+ .is_null());
}
} // namespace interpreter
diff --git a/deps/v8/test/unittests/test-utils.cc b/deps/v8/test/unittests/test-utils.cc
index 7d04215143..6ac71d208e 100644
--- a/deps/v8/test/unittests/test-utils.cc
+++ b/deps/v8/test/unittests/test-utils.cc
@@ -13,23 +13,11 @@
namespace v8 {
-class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
- public:
- virtual void* Allocate(size_t length) {
- void* data = AllocateUninitialized(length);
- return data == NULL ? data : memset(data, 0, length);
- }
- virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
- virtual void Free(void* data, size_t) { free(data); }
-};
-
-
// static
-ArrayBufferAllocator* TestWithIsolate::array_buffer_allocator_ = NULL;
+v8::ArrayBuffer::Allocator* TestWithIsolate::array_buffer_allocator_ = nullptr;
// static
-Isolate* TestWithIsolate::isolate_ = NULL;
-
+Isolate* TestWithIsolate::isolate_ = nullptr;
TestWithIsolate::TestWithIsolate()
: isolate_scope_(isolate()), handle_scope_(isolate()) {}
@@ -43,7 +31,7 @@ void TestWithIsolate::SetUpTestCase() {
Test::SetUpTestCase();
EXPECT_EQ(NULL, isolate_);
v8::Isolate::CreateParams create_params;
- array_buffer_allocator_ = new ArrayBufferAllocator;
+ array_buffer_allocator_ = v8::ArrayBuffer::Allocator::NewDefaultAllocator();
create_params.array_buffer_allocator = array_buffer_allocator_;
isolate_ = v8::Isolate::New(create_params);
EXPECT_TRUE(isolate_ != NULL);
diff --git a/deps/v8/test/unittests/test-utils.h b/deps/v8/test/unittests/test-utils.h
index 1342510b61..c5788e2478 100644
--- a/deps/v8/test/unittests/test-utils.h
+++ b/deps/v8/test/unittests/test-utils.h
@@ -27,7 +27,7 @@ class TestWithIsolate : public virtual ::testing::Test {
static void TearDownTestCase();
private:
- static ArrayBufferAllocator* array_buffer_allocator_;
+ static v8::ArrayBuffer::Allocator* array_buffer_allocator_;
static Isolate* isolate_;
Isolate::Scope isolate_scope_;
HandleScope handle_scope_;
@@ -43,6 +43,10 @@ class TestWithContext : public virtual TestWithIsolate {
const Local<Context>& context() const { return context_; }
+ v8::internal::Isolate* i_isolate() const {
+ return reinterpret_cast<v8::internal::Isolate*>(isolate());
+ }
+
private:
Local<Context> context_;
Context::Scope context_scope_;
diff --git a/deps/v8/test/unittests/unittests.gyp b/deps/v8/test/unittests/unittests.gyp
index 003281b020..0ea8b9a43d 100644
--- a/deps/v8/test/unittests/unittests.gyp
+++ b/deps/v8/test/unittests/unittests.gyp
@@ -2,11 +2,154 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# The sources are kept automatically in sync with BUILD.gn.
+
{
'variables': {
'v8_code': 1,
+ 'unittests_sources': [ ### gcmole(all) ###
+ 'base/atomic-utils-unittest.cc',
+ 'base/bits-unittest.cc',
+ 'base/cpu-unittest.cc',
+ 'base/division-by-constant-unittest.cc',
+ 'base/flags-unittest.cc',
+ 'base/functional-unittest.cc',
+ 'base/ieee754-unittest.cc',
+ 'base/logging-unittest.cc',
+ 'base/iterator-unittest.cc',
+ 'base/platform/condition-variable-unittest.cc',
+ 'base/platform/mutex-unittest.cc',
+ 'base/platform/platform-unittest.cc',
+ 'base/platform/semaphore-unittest.cc',
+ 'base/platform/time-unittest.cc',
+ 'base/sys-info-unittest.cc',
+ 'base/utils/random-number-generator-unittest.cc',
+ 'cancelable-tasks-unittest.cc',
+ 'char-predicates-unittest.cc',
+ 'compiler/branch-elimination-unittest.cc',
+ 'compiler/checkpoint-elimination-unittest.cc',
+ 'compiler/common-operator-reducer-unittest.cc',
+ 'compiler/common-operator-unittest.cc',
+ 'compiler/compiler-test-utils.h',
+ 'compiler/control-equivalence-unittest.cc',
+ 'compiler/control-flow-optimizer-unittest.cc',
+ 'compiler/dead-code-elimination-unittest.cc',
+ 'compiler/diamond-unittest.cc',
+ 'compiler/effect-control-linearizer-unittest.cc',
+ 'compiler/escape-analysis-unittest.cc',
+ 'compiler/graph-reducer-unittest.cc',
+ 'compiler/graph-reducer-unittest.h',
+ 'compiler/graph-trimmer-unittest.cc',
+ 'compiler/graph-unittest.cc',
+ 'compiler/graph-unittest.h',
+ 'compiler/instruction-selector-unittest.cc',
+ 'compiler/instruction-selector-unittest.h',
+ 'compiler/instruction-sequence-unittest.cc',
+ 'compiler/instruction-sequence-unittest.h',
+ 'compiler/int64-lowering-unittest.cc',
+ 'compiler/js-builtin-reducer-unittest.cc',
+ 'compiler/js-create-lowering-unittest.cc',
+ 'compiler/js-intrinsic-lowering-unittest.cc',
+ 'compiler/js-operator-unittest.cc',
+ 'compiler/js-typed-lowering-unittest.cc',
+ 'compiler/linkage-tail-call-unittest.cc',
+ 'compiler/liveness-analyzer-unittest.cc',
+ 'compiler/live-range-unittest.cc',
+ 'compiler/load-elimination-unittest.cc',
+ 'compiler/loop-peeling-unittest.cc',
+ 'compiler/machine-operator-reducer-unittest.cc',
+ 'compiler/machine-operator-unittest.cc',
+ 'compiler/move-optimizer-unittest.cc',
+ 'compiler/node-cache-unittest.cc',
+ 'compiler/node-matchers-unittest.cc',
+ 'compiler/node-properties-unittest.cc',
+ 'compiler/node-test-utils.cc',
+ 'compiler/node-test-utils.h',
+ 'compiler/node-unittest.cc',
+ 'compiler/opcodes-unittest.cc',
+ 'compiler/register-allocator-unittest.cc',
+ 'compiler/schedule-unittest.cc',
+ 'compiler/scheduler-unittest.cc',
+ 'compiler/scheduler-rpo-unittest.cc',
+ 'compiler/simplified-operator-reducer-unittest.cc',
+ 'compiler/simplified-operator-unittest.cc',
+ 'compiler/state-values-utils-unittest.cc',
+ 'compiler/tail-call-optimization-unittest.cc',
+ 'compiler/typer-unittest.cc',
+ 'compiler/value-numbering-reducer-unittest.cc',
+ 'compiler/zone-pool-unittest.cc',
+ 'compiler-dispatcher/compiler-dispatcher-job-unittest.cc',
+ 'counters-unittest.cc',
+ 'eh-frame-iterator-unittest.cc',
+ 'eh-frame-writer-unittest.cc',
+ 'interpreter/bytecodes-unittest.cc',
+ 'interpreter/bytecode-array-builder-unittest.cc',
+ 'interpreter/bytecode-array-iterator-unittest.cc',
+ 'interpreter/bytecode-array-writer-unittest.cc',
+ 'interpreter/bytecode-dead-code-optimizer-unittest.cc',
+ 'interpreter/bytecode-decoder-unittest.cc',
+ 'interpreter/bytecode-peephole-optimizer-unittest.cc',
+ 'interpreter/bytecode-pipeline-unittest.cc',
+ 'interpreter/bytecode-register-allocator-unittest.cc',
+ 'interpreter/bytecode-register-optimizer-unittest.cc',
+ 'interpreter/constant-array-builder-unittest.cc',
+ 'interpreter/interpreter-assembler-unittest.cc',
+ 'interpreter/interpreter-assembler-unittest.h',
+ 'libplatform/default-platform-unittest.cc',
+ 'libplatform/task-queue-unittest.cc',
+ 'libplatform/worker-thread-unittest.cc',
+ 'heap/bitmap-unittest.cc',
+ 'heap/gc-idle-time-handler-unittest.cc',
+ 'heap/gc-tracer-unittest.cc',
+ 'heap/marking-unittest.cc',
+ 'heap/memory-reducer-unittest.cc',
+ 'heap/heap-unittest.cc',
+ 'heap/scavenge-job-unittest.cc',
+ 'heap/slot-set-unittest.cc',
+ 'locked-queue-unittest.cc',
+ 'register-configuration-unittest.cc',
+ 'run-all-unittests.cc',
+ 'source-position-table-unittest.cc',
+ 'test-utils.h',
+ 'test-utils.cc',
+ 'value-serializer-unittest.cc',
+ 'wasm/asm-types-unittest.cc',
+ 'wasm/ast-decoder-unittest.cc',
+ 'wasm/control-transfer-unittest.cc',
+ 'wasm/decoder-unittest.cc',
+ 'wasm/encoder-unittest.cc',
+ 'wasm/leb-helper-unittest.cc',
+ 'wasm/loop-assignment-analysis-unittest.cc',
+ 'wasm/module-decoder-unittest.cc',
+ 'wasm/switch-logic-unittest.cc',
+ 'wasm/wasm-macro-gen-unittest.cc',
+ ],
+ 'unittests_sources_arm': [ ### gcmole(arch:arm) ###
+ 'compiler/arm/instruction-selector-arm-unittest.cc',
+ ],
+ 'unittests_sources_arm64': [ ### gcmole(arch:arm64) ###
+ 'compiler/arm64/instruction-selector-arm64-unittest.cc',
+ ],
+ 'unittests_sources_ia32': [ ### gcmole(arch:ia32) ###
+ 'compiler/ia32/instruction-selector-ia32-unittest.cc',
+ ],
+ 'unittests_sources_mips': [ ### gcmole(arch:mips) ###
+ 'compiler/mips/instruction-selector-mips-unittest.cc',
+ ],
+ 'unittests_sources_mips64': [ ### gcmole(arch:mips64) ###
+ 'compiler/mips64/instruction-selector-mips64-unittest.cc',
+ ],
+ 'unittests_sources_x64': [ ### gcmole(arch:x64) ###
+ 'compiler/x64/instruction-selector-x64-unittest.cc',
+ ],
+ 'unittests_sources_ppc': [ ### gcmole(arch:ppc) ###
+ 'compiler/ppc/instruction-selector-ppc-unittest.cc',
+ ],
+ 'unittests_sources_s390': [ ### gcmole(arch:s390) ###
+ 'compiler/s390/instruction-selector-s390-unittest.cc',
+ ],
},
- 'includes': ['../../build/toolchain.gypi', '../../build/features.gypi'],
+ 'includes': ['../../gypfiles/toolchain.gypi', '../../gypfiles/features.gypi'],
'targets': [
{
'target_name': 'unittests',
@@ -17,150 +160,63 @@
'dependencies': [
'../../testing/gmock.gyp:gmock',
'../../testing/gtest.gyp:gtest',
- '../../tools/gyp/v8.gyp:v8_libplatform',
+ '../../src/v8.gyp:v8_libplatform',
],
'include_dirs': [
'../..',
],
- 'sources': [ ### gcmole(all) ###
- 'atomic-utils-unittest.cc',
- 'base/bits-unittest.cc',
- 'base/cpu-unittest.cc',
- 'base/division-by-constant-unittest.cc',
- 'base/flags-unittest.cc',
- 'base/functional-unittest.cc',
- 'base/logging-unittest.cc',
- 'base/iterator-unittest.cc',
- 'base/platform/condition-variable-unittest.cc',
- 'base/platform/mutex-unittest.cc',
- 'base/platform/platform-unittest.cc',
- 'base/platform/semaphore-unittest.cc',
- 'base/platform/time-unittest.cc',
- 'base/sys-info-unittest.cc',
- 'base/utils/random-number-generator-unittest.cc',
- 'cancelable-tasks-unittest.cc',
- 'char-predicates-unittest.cc',
- 'compiler/branch-elimination-unittest.cc',
- 'compiler/change-lowering-unittest.cc',
- 'compiler/coalesced-live-ranges-unittest.cc',
- 'compiler/common-operator-reducer-unittest.cc',
- 'compiler/common-operator-unittest.cc',
- 'compiler/compiler-test-utils.h',
- 'compiler/control-equivalence-unittest.cc',
- 'compiler/control-flow-optimizer-unittest.cc',
- 'compiler/dead-code-elimination-unittest.cc',
- 'compiler/diamond-unittest.cc',
- 'compiler/escape-analysis-unittest.cc',
- 'compiler/graph-reducer-unittest.cc',
- 'compiler/graph-reducer-unittest.h',
- 'compiler/graph-trimmer-unittest.cc',
- 'compiler/graph-unittest.cc',
- 'compiler/graph-unittest.h',
- 'compiler/instruction-selector-unittest.cc',
- 'compiler/instruction-selector-unittest.h',
- 'compiler/instruction-sequence-unittest.cc',
- 'compiler/instruction-sequence-unittest.h',
- 'compiler/int64-lowering-unittest.cc',
- 'compiler/js-builtin-reducer-unittest.cc',
- 'compiler/js-create-lowering-unittest.cc',
- 'compiler/js-intrinsic-lowering-unittest.cc',
- 'compiler/js-operator-unittest.cc',
- 'compiler/js-typed-lowering-unittest.cc',
- 'compiler/linkage-tail-call-unittest.cc',
- 'compiler/liveness-analyzer-unittest.cc',
- 'compiler/live-range-unittest.cc',
- 'compiler/load-elimination-unittest.cc',
- 'compiler/loop-peeling-unittest.cc',
- 'compiler/machine-operator-reducer-unittest.cc',
- 'compiler/machine-operator-unittest.cc',
- 'compiler/move-optimizer-unittest.cc',
- 'compiler/node-cache-unittest.cc',
- 'compiler/node-matchers-unittest.cc',
- 'compiler/node-properties-unittest.cc',
- 'compiler/node-test-utils.cc',
- 'compiler/node-test-utils.h',
- 'compiler/node-unittest.cc',
- 'compiler/opcodes-unittest.cc',
- 'compiler/register-allocator-unittest.cc',
- 'compiler/schedule-unittest.cc',
- 'compiler/scheduler-unittest.cc',
- 'compiler/scheduler-rpo-unittest.cc',
- 'compiler/simplified-operator-reducer-unittest.cc',
- 'compiler/simplified-operator-unittest.cc',
- 'compiler/state-values-utils-unittest.cc',
- 'compiler/tail-call-optimization-unittest.cc',
- 'compiler/typer-unittest.cc',
- 'compiler/value-numbering-reducer-unittest.cc',
- 'compiler/zone-pool-unittest.cc',
- 'counters-unittest.cc',
- 'interpreter/bytecodes-unittest.cc',
- 'interpreter/bytecode-array-builder-unittest.cc',
- 'interpreter/bytecode-array-iterator-unittest.cc',
- 'interpreter/bytecode-register-allocator-unittest.cc',
- 'interpreter/constant-array-builder-unittest.cc',
- 'interpreter/interpreter-assembler-unittest.cc',
- 'interpreter/interpreter-assembler-unittest.h',
- 'interpreter/source-position-table-unittest.cc',
- 'libplatform/default-platform-unittest.cc',
- 'libplatform/task-queue-unittest.cc',
- 'libplatform/worker-thread-unittest.cc',
- 'heap/bitmap-unittest.cc',
- 'heap/gc-idle-time-handler-unittest.cc',
- 'heap/gc-tracer-unittest.cc',
- 'heap/memory-reducer-unittest.cc',
- 'heap/heap-unittest.cc',
- 'heap/scavenge-job-unittest.cc',
- 'heap/slot-set-unittest.cc',
- 'locked-queue-unittest.cc',
- 'run-all-unittests.cc',
- 'test-utils.h',
- 'test-utils.cc',
- 'wasm/ast-decoder-unittest.cc',
- 'wasm/decoder-unittest.cc',
- 'wasm/encoder-unittest.cc',
- 'wasm/loop-assignment-analysis-unittest.cc',
- 'wasm/module-decoder-unittest.cc',
- 'wasm/wasm-macro-gen-unittest.cc',
+ 'sources': [
+ '<@(unittests_sources)',
],
'conditions': [
['v8_target_arch=="arm"', {
- 'sources': [ ### gcmole(arch:arm) ###
- 'compiler/arm/instruction-selector-arm-unittest.cc',
+ 'sources': [
+ '<@(unittests_sources_arm)',
],
}],
['v8_target_arch=="arm64"', {
- 'sources': [ ### gcmole(arch:arm64) ###
- 'compiler/arm64/instruction-selector-arm64-unittest.cc',
+ 'sources': [
+ '<@(unittests_sources_arm64)',
],
}],
['v8_target_arch=="ia32"', {
- 'sources': [ ### gcmole(arch:ia32) ###
- 'compiler/ia32/instruction-selector-ia32-unittest.cc',
+ 'sources': [
+ '<@(unittests_sources_ia32)',
+ ],
+ }],
+ ['v8_target_arch=="mips"', {
+ 'sources': [
+ '<@(unittests_sources_mips)',
],
}],
['v8_target_arch=="mipsel"', {
- 'sources': [ ### gcmole(arch:mipsel) ###
- 'compiler/mips/instruction-selector-mips-unittest.cc',
+ 'sources': [
+ '<@(unittests_sources_mips)',
+ ],
+ }],
+ ['v8_target_arch=="mips64"', {
+ 'sources': [
+ '<@(unittests_sources_mips64)',
],
}],
['v8_target_arch=="mips64el"', {
- 'sources': [ ### gcmole(arch:mips64el) ###
- 'compiler/mips64/instruction-selector-mips64-unittest.cc',
+ 'sources': [
+ '<@(unittests_sources_mips64)',
],
}],
['v8_target_arch=="x64"', {
- 'sources': [ ### gcmole(arch:x64) ###
- 'compiler/x64/instruction-selector-x64-unittest.cc',
+ 'sources': [
+ '<@(unittests_sources_x64)',
],
}],
['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
- 'sources': [ ### gcmole(arch:ppc) ###
- 'compiler/ppc/instruction-selector-ppc-unittest.cc',
+ 'sources': [
+ '<@(unittests_sources_ppc)',
],
}],
['v8_target_arch=="s390" or v8_target_arch=="s390x"', {
- 'sources': [ ### gcmole(arch:s390) ###
- 'compiler/s390/instruction-selector-s390-unittest.cc',
+ 'sources': [
+ '<@(unittests_sources_s390)',
],
}],
['OS=="aix"', {
@@ -169,9 +225,9 @@
['component=="shared_library"', {
# compiler-unittests can't be built against a shared library, so we
# need to depend on the underlying static target in that case.
- 'dependencies': ['../../tools/gyp/v8.gyp:v8_maybe_snapshot'],
+ 'dependencies': ['../../src/v8.gyp:v8_maybe_snapshot'],
}, {
- 'dependencies': ['../../tools/gyp/v8.gyp:v8'],
+ 'dependencies': ['../../src/v8.gyp:v8'],
}],
['os_posix == 1', {
# TODO(svenpanne): This is a temporary work-around to fix the warnings
@@ -198,7 +254,7 @@
'unittests',
],
'includes': [
- '../../build/isolate.gypi',
+ '../../gypfiles/isolate.gypi',
],
'sources': [
'unittests.isolate',
diff --git a/deps/v8/test/unittests/unittests.status b/deps/v8/test/unittests/unittests.status
index 40b5754f2a..ee135ba5e8 100644
--- a/deps/v8/test/unittests/unittests.status
+++ b/deps/v8/test/unittests/unittests.status
@@ -11,6 +11,12 @@
'WasmDecoderTest.AllLoadMemCombinations': [SKIP],
'AstDecoderTest.AllLoadMemCombinations': [SKIP],
'AstDecoderTest.AllStoreMemCombinations': [SKIP],
- 'Bytecodes.DecodeBytecodeAndOperands': [SKIP],
}], # 'byteorder == big'
+['arch == x87', {
+ 'Ieee754.Expm1': [SKIP],
+ 'Ieee754.Cos': [SKIP],
+ 'Ieee754.Tan': [SKIP],
+ 'Ieee754.Acosh': [SKIP],
+ 'Ieee754.Asinh': [SKIP],
+}], # 'arch == x87'
]
diff --git a/deps/v8/test/unittests/value-serializer-unittest.cc b/deps/v8/test/unittests/value-serializer-unittest.cc
new file mode 100644
index 0000000000..f4ed15b644
--- /dev/null
+++ b/deps/v8/test/unittests/value-serializer-unittest.cc
@@ -0,0 +1,1368 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/value-serializer.h"
+
+#include <algorithm>
+#include <string>
+
+#include "include/v8.h"
+#include "src/api.h"
+#include "src/base/build_config.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace {
+
+class ValueSerializerTest : public TestWithIsolate {
+ protected:
+ ValueSerializerTest()
+ : serialization_context_(Context::New(isolate())),
+ deserialization_context_(Context::New(isolate())) {}
+
+ const Local<Context>& serialization_context() {
+ return serialization_context_;
+ }
+ const Local<Context>& deserialization_context() {
+ return deserialization_context_;
+ }
+
+ template <typename InputFunctor, typename OutputFunctor>
+ void RoundTripTest(const InputFunctor& input_functor,
+ const OutputFunctor& output_functor) {
+ EncodeTest(input_functor,
+ [this, &output_functor](const std::vector<uint8_t>& data) {
+ DecodeTest(data, output_functor);
+ });
+ }
+
+ // Variant for the common case where a script is used to build the original
+ // value.
+ template <typename OutputFunctor>
+ void RoundTripTest(const char* source, const OutputFunctor& output_functor) {
+ RoundTripTest([this, source]() { return EvaluateScriptForInput(source); },
+ output_functor);
+ }
+
+ Maybe<std::vector<uint8_t>> DoEncode(Local<Value> value) {
+ // This approximates what the API implementation would do.
+ // TODO(jbroman): Use the public API once it exists.
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate());
+ i::HandleScope handle_scope(internal_isolate);
+ i::ValueSerializer serializer(internal_isolate);
+ serializer.WriteHeader();
+ if (serializer.WriteObject(Utils::OpenHandle(*value)).FromMaybe(false)) {
+ return Just(serializer.ReleaseBuffer());
+ }
+ if (internal_isolate->has_pending_exception()) {
+ internal_isolate->OptionalRescheduleException(true);
+ }
+ return Nothing<std::vector<uint8_t>>();
+ }
+
+ template <typename InputFunctor, typename EncodedDataFunctor>
+ void EncodeTest(const InputFunctor& input_functor,
+ const EncodedDataFunctor& encoded_data_functor) {
+ Context::Scope scope(serialization_context());
+ TryCatch try_catch(isolate());
+ Local<Value> input_value = input_functor();
+ std::vector<uint8_t> buffer;
+ ASSERT_TRUE(DoEncode(input_value).To(&buffer));
+ ASSERT_FALSE(try_catch.HasCaught());
+ encoded_data_functor(buffer);
+ }
+
+ template <typename MessageFunctor>
+ void InvalidEncodeTest(const char* source, const MessageFunctor& functor) {
+ Context::Scope scope(serialization_context());
+ TryCatch try_catch(isolate());
+ Local<Value> input_value = EvaluateScriptForInput(source);
+ ASSERT_TRUE(DoEncode(input_value).IsNothing());
+ functor(try_catch.Message());
+ }
+
+ void InvalidEncodeTest(const char* source) {
+ InvalidEncodeTest(source, [](Local<Message>) {});
+ }
+
+ template <typename OutputFunctor>
+ void DecodeTest(const std::vector<uint8_t>& data,
+ const OutputFunctor& output_functor) {
+ Context::Scope scope(deserialization_context());
+ TryCatch try_catch(isolate());
+ // TODO(jbroman): Use the public API once it exists.
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate());
+ i::HandleScope handle_scope(internal_isolate);
+ i::ValueDeserializer deserializer(
+ internal_isolate,
+ i::Vector<const uint8_t>(&data[0], static_cast<int>(data.size())));
+ ASSERT_TRUE(deserializer.ReadHeader().FromMaybe(false));
+ Local<Value> result;
+ ASSERT_TRUE(ToLocal<Value>(deserializer.ReadObject(), &result));
+ ASSERT_FALSE(result.IsEmpty());
+ ASSERT_FALSE(try_catch.HasCaught());
+ ASSERT_TRUE(deserialization_context()
+ ->Global()
+ ->CreateDataProperty(deserialization_context_,
+ StringFromUtf8("result"), result)
+ .FromMaybe(false));
+ output_functor(result);
+ ASSERT_FALSE(try_catch.HasCaught());
+ }
+
+ template <typename OutputFunctor>
+ void DecodeTestForVersion0(const std::vector<uint8_t>& data,
+ const OutputFunctor& output_functor) {
+ Context::Scope scope(deserialization_context());
+ TryCatch try_catch(isolate());
+ // TODO(jbroman): Use the public API once it exists.
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate());
+ i::HandleScope handle_scope(internal_isolate);
+ i::ValueDeserializer deserializer(
+ internal_isolate,
+ i::Vector<const uint8_t>(&data[0], static_cast<int>(data.size())));
+ // TODO(jbroman): Enable legacy support.
+ ASSERT_TRUE(deserializer.ReadHeader().FromMaybe(false));
+ // TODO(jbroman): Check version 0.
+ Local<Value> result;
+ ASSERT_TRUE(ToLocal<Value>(
+ deserializer.ReadObjectUsingEntireBufferForLegacyFormat(), &result));
+ ASSERT_FALSE(result.IsEmpty());
+ ASSERT_FALSE(try_catch.HasCaught());
+ ASSERT_TRUE(deserialization_context()
+ ->Global()
+ ->CreateDataProperty(deserialization_context_,
+ StringFromUtf8("result"), result)
+ .FromMaybe(false));
+ output_functor(result);
+ ASSERT_FALSE(try_catch.HasCaught());
+ }
+
+ void InvalidDecodeTest(const std::vector<uint8_t>& data) {
+ Context::Scope scope(deserialization_context());
+ TryCatch try_catch(isolate());
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate());
+ i::HandleScope handle_scope(internal_isolate);
+ i::ValueDeserializer deserializer(
+ internal_isolate,
+ i::Vector<const uint8_t>(&data[0], static_cast<int>(data.size())));
+ Maybe<bool> header_result = deserializer.ReadHeader();
+ if (header_result.IsNothing()) return;
+ ASSERT_TRUE(header_result.ToChecked());
+ ASSERT_TRUE(deserializer.ReadObject().is_null());
+ }
+
+ Local<Value> EvaluateScriptForInput(const char* utf8_source) {
+ Local<String> source = StringFromUtf8(utf8_source);
+ Local<Script> script =
+ Script::Compile(serialization_context_, source).ToLocalChecked();
+ return script->Run(serialization_context_).ToLocalChecked();
+ }
+
+ bool EvaluateScriptForResultBool(const char* utf8_source) {
+ Local<String> source = StringFromUtf8(utf8_source);
+ Local<Script> script =
+ Script::Compile(deserialization_context_, source).ToLocalChecked();
+ Local<Value> value = script->Run(deserialization_context_).ToLocalChecked();
+ return value->BooleanValue(deserialization_context_).FromJust();
+ }
+
+ Local<String> StringFromUtf8(const char* source) {
+ return String::NewFromUtf8(isolate(), source, NewStringType::kNormal)
+ .ToLocalChecked();
+ }
+
+ static std::string Utf8Value(Local<Value> value) {
+ String::Utf8Value utf8(value);
+ return std::string(*utf8, utf8.length());
+ }
+
+ private:
+ Local<Context> serialization_context_;
+ Local<Context> deserialization_context_;
+
+ DISALLOW_COPY_AND_ASSIGN(ValueSerializerTest);
+};
+
+TEST_F(ValueSerializerTest, DecodeInvalid) {
+ // Version tag but no content.
+ InvalidDecodeTest({0xff});
+ // Version too large.
+ InvalidDecodeTest({0xff, 0x7f, 0x5f});
+ // Nonsense tag.
+ InvalidDecodeTest({0xff, 0x09, 0xdd});
+}
+
+TEST_F(ValueSerializerTest, RoundTripOddball) {
+ RoundTripTest([this]() { return Undefined(isolate()); },
+ [](Local<Value> value) { EXPECT_TRUE(value->IsUndefined()); });
+ RoundTripTest([this]() { return True(isolate()); },
+ [](Local<Value> value) { EXPECT_TRUE(value->IsTrue()); });
+ RoundTripTest([this]() { return False(isolate()); },
+ [](Local<Value> value) { EXPECT_TRUE(value->IsFalse()); });
+ RoundTripTest([this]() { return Null(isolate()); },
+ [](Local<Value> value) { EXPECT_TRUE(value->IsNull()); });
+}
+
+TEST_F(ValueSerializerTest, DecodeOddball) {
+ // What this code is expected to generate.
+ DecodeTest({0xff, 0x09, 0x5f},
+ [](Local<Value> value) { EXPECT_TRUE(value->IsUndefined()); });
+ DecodeTest({0xff, 0x09, 0x54},
+ [](Local<Value> value) { EXPECT_TRUE(value->IsTrue()); });
+ DecodeTest({0xff, 0x09, 0x46},
+ [](Local<Value> value) { EXPECT_TRUE(value->IsFalse()); });
+ DecodeTest({0xff, 0x09, 0x30},
+ [](Local<Value> value) { EXPECT_TRUE(value->IsNull()); });
+
+ // What v9 of the Blink code generates.
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x5f, 0x00},
+ [](Local<Value> value) { EXPECT_TRUE(value->IsUndefined()); });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x54, 0x00},
+ [](Local<Value> value) { EXPECT_TRUE(value->IsTrue()); });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x46, 0x00},
+ [](Local<Value> value) { EXPECT_TRUE(value->IsFalse()); });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x30, 0x00},
+ [](Local<Value> value) { EXPECT_TRUE(value->IsNull()); });
+
+ // v0 (with no explicit version).
+ DecodeTest({0x5f, 0x00},
+ [](Local<Value> value) { EXPECT_TRUE(value->IsUndefined()); });
+ DecodeTest({0x54, 0x00},
+ [](Local<Value> value) { EXPECT_TRUE(value->IsTrue()); });
+ DecodeTest({0x46, 0x00},
+ [](Local<Value> value) { EXPECT_TRUE(value->IsFalse()); });
+ DecodeTest({0x30, 0x00},
+ [](Local<Value> value) { EXPECT_TRUE(value->IsNull()); });
+}
+
+TEST_F(ValueSerializerTest, RoundTripNumber) {
+ RoundTripTest([this]() { return Integer::New(isolate(), 42); },
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt32());
+ EXPECT_EQ(42, Int32::Cast(*value)->Value());
+ });
+ RoundTripTest([this]() { return Integer::New(isolate(), -31337); },
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt32());
+ EXPECT_EQ(-31337, Int32::Cast(*value)->Value());
+ });
+ RoundTripTest(
+ [this]() {
+ return Integer::New(isolate(), std::numeric_limits<int32_t>::min());
+ },
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt32());
+ EXPECT_EQ(std::numeric_limits<int32_t>::min(),
+ Int32::Cast(*value)->Value());
+ });
+ RoundTripTest([this]() { return Number::New(isolate(), -0.25); },
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsNumber());
+ EXPECT_EQ(-0.25, Number::Cast(*value)->Value());
+ });
+ RoundTripTest(
+ [this]() {
+ return Number::New(isolate(), std::numeric_limits<double>::quiet_NaN());
+ },
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsNumber());
+ EXPECT_TRUE(std::isnan(Number::Cast(*value)->Value()));
+ });
+}
+
+TEST_F(ValueSerializerTest, DecodeNumber) {
+ // 42 zig-zag encoded (signed)
+ DecodeTest({0xff, 0x09, 0x49, 0x54},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt32());
+ EXPECT_EQ(42, Int32::Cast(*value)->Value());
+ });
+ // 42 varint encoded (unsigned)
+ DecodeTest({0xff, 0x09, 0x55, 0x2a},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt32());
+ EXPECT_EQ(42, Int32::Cast(*value)->Value());
+ });
+ // 160 zig-zag encoded (signed)
+ DecodeTest({0xff, 0x09, 0x49, 0xc0, 0x02},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt32());
+ ASSERT_EQ(160, Int32::Cast(*value)->Value());
+ });
+ // 160 varint encoded (unsigned)
+ DecodeTest({0xff, 0x09, 0x55, 0xa0, 0x01},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsInt32());
+ ASSERT_EQ(160, Int32::Cast(*value)->Value());
+ });
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ // IEEE 754 doubles, little-endian byte order
+ DecodeTest({0xff, 0x09, 0x4e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd0, 0xbf},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsNumber());
+ EXPECT_EQ(-0.25, Number::Cast(*value)->Value());
+ });
+ // quiet NaN
+ DecodeTest({0xff, 0x09, 0x4e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x7f},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsNumber());
+ EXPECT_TRUE(std::isnan(Number::Cast(*value)->Value()));
+ });
+ // signaling NaN
+ DecodeTest({0xff, 0x09, 0x4e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf4, 0x7f},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsNumber());
+ EXPECT_TRUE(std::isnan(Number::Cast(*value)->Value()));
+ });
+#endif
+ // TODO(jbroman): Equivalent test for big-endian machines.
+}
+
+// String constants (in UTF-8) used for string encoding tests.
+static const char kHelloString[] = "Hello";
+static const char kQuebecString[] = "\x51\x75\xC3\xA9\x62\x65\x63";
+static const char kEmojiString[] = "\xF0\x9F\x91\x8A";
+
+TEST_F(ValueSerializerTest, RoundTripString) {
+ RoundTripTest([this]() { return String::Empty(isolate()); },
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(0, String::Cast(*value)->Length());
+ });
+ // Inside ASCII.
+ RoundTripTest([this]() { return StringFromUtf8(kHelloString); },
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(5, String::Cast(*value)->Length());
+ EXPECT_EQ(kHelloString, Utf8Value(value));
+ });
+ // Inside Latin-1 (i.e. one-byte string), but not ASCII.
+ RoundTripTest([this]() { return StringFromUtf8(kQuebecString); },
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(6, String::Cast(*value)->Length());
+ EXPECT_EQ(kQuebecString, Utf8Value(value));
+ });
+ // An emoji (decodes to two 16-bit chars).
+ RoundTripTest([this]() { return StringFromUtf8(kEmojiString); },
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(2, String::Cast(*value)->Length());
+ EXPECT_EQ(kEmojiString, Utf8Value(value));
+ });
+}
+
+TEST_F(ValueSerializerTest, DecodeString) {
+ // Decoding the strings above from UTF-8.
+ DecodeTest({0xff, 0x09, 0x53, 0x00},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(0, String::Cast(*value)->Length());
+ });
+ DecodeTest({0xff, 0x09, 0x53, 0x05, 'H', 'e', 'l', 'l', 'o'},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(5, String::Cast(*value)->Length());
+ EXPECT_EQ(kHelloString, Utf8Value(value));
+ });
+ DecodeTest({0xff, 0x09, 0x53, 0x07, 'Q', 'u', 0xc3, 0xa9, 'b', 'e', 'c'},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(6, String::Cast(*value)->Length());
+ EXPECT_EQ(kQuebecString, Utf8Value(value));
+ });
+ DecodeTest({0xff, 0x09, 0x53, 0x04, 0xf0, 0x9f, 0x91, 0x8a},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(2, String::Cast(*value)->Length());
+ EXPECT_EQ(kEmojiString, Utf8Value(value));
+ });
+
+// And from two-byte strings (endianness dependent).
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ DecodeTest({0xff, 0x09, 0x63, 0x00},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(0, String::Cast(*value)->Length());
+ });
+ DecodeTest({0xff, 0x09, 0x63, 0x0a, 'H', '\0', 'e', '\0', 'l', '\0', 'l',
+ '\0', 'o', '\0'},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(5, String::Cast(*value)->Length());
+ EXPECT_EQ(kHelloString, Utf8Value(value));
+ });
+ DecodeTest({0xff, 0x09, 0x63, 0x0c, 'Q', '\0', 'u', '\0', 0xe9, '\0', 'b',
+ '\0', 'e', '\0', 'c', '\0'},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(6, String::Cast(*value)->Length());
+ EXPECT_EQ(kQuebecString, Utf8Value(value));
+ });
+ DecodeTest({0xff, 0x09, 0x63, 0x04, 0x3d, 0xd8, 0x4a, 0xdc},
+ [](Local<Value> value) {
+ ASSERT_TRUE(value->IsString());
+ EXPECT_EQ(2, String::Cast(*value)->Length());
+ EXPECT_EQ(kEmojiString, Utf8Value(value));
+ });
+#endif
+ // TODO(jbroman): The same for big-endian systems.
+}
+
+TEST_F(ValueSerializerTest, DecodeInvalidString) {
+ // UTF-8 string with too few bytes available.
+ InvalidDecodeTest({0xff, 0x09, 0x53, 0x10, 'v', '8'});
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ // Two-byte string with too few bytes available.
+ InvalidDecodeTest({0xff, 0x09, 0x63, 0x10, 'v', '\0', '8', '\0'});
+ // Two-byte string with an odd byte length.
+ InvalidDecodeTest({0xff, 0x09, 0x63, 0x03, 'v', '\0', '8'});
+#endif
+ // TODO(jbroman): The same for big-endian systems.
+}
+
+TEST_F(ValueSerializerTest, EncodeTwoByteStringUsesPadding) {
+ // As long as the output has a version that Blink expects to be able to read,
+ // we must respect its alignment requirements. It requires that two-byte
+ // characters be aligned.
+ EncodeTest(
+ [this]() {
+ // We need a string whose length will take two bytes to encode, so that
+ // a padding byte is needed to keep the characters aligned. The string
+ // must also have a two-byte character, so that it gets the two-byte
+ // encoding.
+ std::string string(200, ' ');
+ string += kEmojiString;
+ return StringFromUtf8(string.c_str());
+ },
+ [](const std::vector<uint8_t>& data) {
+ // This is a sufficient but not necessary condition to be aligned.
+ // Note that the third byte (0x00) is padding.
+ const uint8_t expected_prefix[] = {0xff, 0x09, 0x00, 0x63, 0x94, 0x03};
+ ASSERT_GT(data.size(), sizeof(expected_prefix) / sizeof(uint8_t));
+ EXPECT_TRUE(std::equal(std::begin(expected_prefix),
+ std::end(expected_prefix), data.begin()));
+ });
+}
+
+TEST_F(ValueSerializerTest, RoundTripDictionaryObject) {
+ // Empty object.
+ RoundTripTest("({})", [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Object.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).length === 0"));
+ });
+ // String key.
+ RoundTripTest("({ a: 42 })", [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.hasOwnProperty('a')"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === 42"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).length === 1"));
+ });
+ // Integer key (treated as a string, but may be encoded differently).
+ RoundTripTest("({ 42: 'a' })", [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.hasOwnProperty('42')"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[42] === 'a'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).length === 1"));
+ });
+ // Key order must be preserved.
+ RoundTripTest("({ x: 1, y: 2, a: 3 })", [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).toString() === 'x,y,a'"));
+ });
+ // A harder case of enumeration order.
+ // Indexes first, in order (but not 2^32 - 1, which is not an index), then the
+ // remaining (string) keys, in the order they were defined.
+ RoundTripTest(
+ "({ a: 2, 0xFFFFFFFF: 1, 0xFFFFFFFE: 3, 1: 0 })",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).toString() === "
+ "'1,4294967294,a,4294967295'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === 2"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[0xFFFFFFFF] === 1"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[0xFFFFFFFE] === 3"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === 0"));
+ });
+ // This detects a fairly subtle case: the object itself must be in the map
+ // before its properties are deserialized, so that references to it can be
+ // resolved.
+ RoundTripTest(
+ "(() => { var y = {}; y.self = y; return y; })()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result === result.self"));
+ });
+}
+
+TEST_F(ValueSerializerTest, DecodeDictionaryObject) {
+ // Empty object.
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x6f, 0x7b, 0x00, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Object.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).length === 0"));
+ });
+ // String key.
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x53, 0x01, 0x61, 0x3f, 0x01,
+ 0x49, 0x54, 0x7b, 0x01},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.hasOwnProperty('a')"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === 42"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).length === 1"));
+ });
+ // Integer key (treated as a string, but may be encoded differently).
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x49, 0x54, 0x3f, 0x01, 0x53,
+ 0x01, 0x61, 0x7b, 0x01},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.hasOwnProperty('42')"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[42] === 'a'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).length === 1"));
+ });
+ // Key order must be preserved.
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x53, 0x01, 0x78, 0x3f, 0x01,
+ 0x49, 0x02, 0x3f, 0x01, 0x53, 0x01, 0x79, 0x3f, 0x01, 0x49, 0x04, 0x3f,
+ 0x01, 0x53, 0x01, 0x61, 0x3f, 0x01, 0x49, 0x06, 0x7b, 0x03},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).toString() === 'x,y,a'"));
+ });
+ // A harder case of enumeration order.
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x49, 0x02, 0x3f, 0x01,
+ 0x49, 0x00, 0x3f, 0x01, 0x55, 0xfe, 0xff, 0xff, 0xff, 0x0f, 0x3f,
+ 0x01, 0x49, 0x06, 0x3f, 0x01, 0x53, 0x01, 0x61, 0x3f, 0x01, 0x49,
+ 0x04, 0x3f, 0x01, 0x53, 0x0a, 0x34, 0x32, 0x39, 0x34, 0x39, 0x36,
+ 0x37, 0x32, 0x39, 0x35, 0x3f, 0x01, 0x49, 0x02, 0x7b, 0x04},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).toString() === "
+ "'1,4294967294,a,4294967295'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === 2"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[0xFFFFFFFF] === 1"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[0xFFFFFFFE] === 3"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === 0"));
+ });
+ // This detects a fairly subtle case: the object itself must be in the map
+ // before its properties are deserialized, so that references to it can be
+ // resolved.
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x53, 0x04, 0x73,
+ 0x65, 0x6c, 0x66, 0x3f, 0x01, 0x5e, 0x00, 0x7b, 0x01, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result === result.self"));
+ });
+}
+
+TEST_F(ValueSerializerTest, RoundTripOnlyOwnEnumerableStringKeys) {
+ // Only "own" properties should be serialized, not ones on the prototype.
+ RoundTripTest("(() => { var x = {}; x.__proto__ = {a: 4}; return x; })()",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("!('a' in result)"));
+ });
+ // Only enumerable properties should be serialized.
+ RoundTripTest(
+ "(() => {"
+ " var x = {};"
+ " Object.defineProperty(x, 'a', {value: 1, enumerable: false});"
+ " return x;"
+ "})()",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("!('a' in result)"));
+ });
+ // Symbol keys should not be serialized.
+ RoundTripTest("({ [Symbol()]: 4 })", [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertySymbols(result).length === 0"));
+ });
+}
+
+TEST_F(ValueSerializerTest, RoundTripTrickyGetters) {
+ // Keys are enumerated before any setters are called, but if there is no own
+ // property when the value is to be read, then it should not be serialized.
+ RoundTripTest("({ get a() { delete this.b; return 1; }, b: 2 })",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("!('b' in result)"));
+ });
+ // Keys added after the property enumeration should not be serialized.
+ RoundTripTest("({ get a() { this.b = 3; }})", [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("!('b' in result)"));
+ });
+ // But if you remove a key and add it back, that's fine. But it will appear in
+ // the original place in enumeration order.
+ RoundTripTest(
+ "({ get a() { delete this.b; this.b = 4; }, b: 2, c: 3 })",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).toString() === 'a,b,c'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.b === 4"));
+ });
+ // Similarly, it only matters if a property was enumerable when the
+ // enumeration happened.
+ RoundTripTest(
+ "({ get a() {"
+ " Object.defineProperty(this, 'b', {value: 2, enumerable: false});"
+ "}, b: 1})",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.b === 2"));
+ });
+ RoundTripTest(
+ "(() => {"
+ " var x = {"
+ " get a() {"
+ " Object.defineProperty(this, 'b', {value: 2, enumerable: true});"
+ " }"
+ " };"
+ " Object.defineProperty(x, 'b',"
+ " {value: 1, enumerable: false, configurable: true});"
+ " return x;"
+ "})()",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("!('b' in result)"));
+ });
+ // The property also should not be read if it can only be found on the
+ // prototype chain (but not as an own property) after enumeration.
+ RoundTripTest(
+ "(() => {"
+ " var x = { get a() { delete this.b; }, b: 1 };"
+ " x.__proto__ = { b: 0 };"
+ " return x;"
+ "})()",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("!('b' in result)"));
+ });
+ // If an exception is thrown by script, encoding must fail and the exception
+ // must be thrown.
+ InvalidEncodeTest("({ get a() { throw new Error('sentinel'); } })",
+ [](Local<Message> message) {
+ ASSERT_FALSE(message.IsEmpty());
+ EXPECT_NE(std::string::npos,
+ Utf8Value(message->Get()).find("sentinel"));
+ });
+}
+
+TEST_F(ValueSerializerTest, DecodeDictionaryObjectVersion0) {
+ // Empty object.
+ DecodeTestForVersion0(
+ {0x7b, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Object.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).length === 0"));
+ });
+ // String key.
+ DecodeTestForVersion0(
+ {0x53, 0x01, 0x61, 0x49, 0x54, 0x7b, 0x01, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Object.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.hasOwnProperty('a')"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === 42"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).length === 1"));
+ });
+ // Integer key (treated as a string, but may be encoded differently).
+ DecodeTestForVersion0(
+ {0x49, 0x54, 0x53, 0x01, 0x61, 0x7b, 0x01, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsObject());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.hasOwnProperty('42')"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[42] === 'a'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).length === 1"));
+ });
+ // Key order must be preserved.
+ DecodeTestForVersion0(
+ {0x53, 0x01, 0x78, 0x49, 0x02, 0x53, 0x01, 0x79, 0x49, 0x04, 0x53, 0x01,
+ 0x61, 0x49, 0x06, 0x7b, 0x03, 0x00},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).toString() === 'x,y,a'"));
+ });
+ // A property and an element.
+ DecodeTestForVersion0(
+ {0x49, 0x54, 0x53, 0x01, 0x61, 0x53, 0x01, 0x61, 0x49, 0x54, 0x7b, 0x02},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getOwnPropertyNames(result).toString() === '42,a'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[42] === 'a'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === 42"));
+ });
+}
+
+TEST_F(ValueSerializerTest, RoundTripArray) {
+ // A simple array of integers.
+ RoundTripTest("[1, 2, 3, 4, 5]", [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ EXPECT_EQ(5, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Array.prototype"));
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("result.toString() === '1,2,3,4,5'"));
+ });
+ // A long (sparse) array.
+ RoundTripTest(
+ "(() => { var x = new Array(1000); x[500] = 42; return x; })()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ EXPECT_EQ(1000, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[500] === 42"));
+ });
+ // Duplicate reference.
+ RoundTripTest(
+ "(() => { var y = {}; return [y, y]; })()", [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(2, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[0] === result[1]"));
+ });
+ // Duplicate reference in a sparse array.
+ RoundTripTest(
+ "(() => { var x = new Array(1000); x[1] = x[500] = {}; return x; })()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("typeof result[1] === 'object'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === result[500]"));
+ });
+ // Self reference.
+ RoundTripTest(
+ "(() => { var y = []; y[0] = y; return y; })()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[0] === result"));
+ });
+ // Self reference in a sparse array.
+ RoundTripTest(
+ "(() => { var y = new Array(1000); y[519] = y; return y; })()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[519] === result"));
+ });
+ // Array with additional properties.
+ RoundTripTest(
+ "(() => { var y = [1, 2]; y.foo = 'bar'; return y; })()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(2, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.toString() === '1,2'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.foo === 'bar'"));
+ });
+ // Sparse array with additional properties.
+ RoundTripTest(
+ "(() => { var y = new Array(1000); y.foo = 'bar'; return y; })()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.toString() === ','.repeat(999)"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.foo === 'bar'"));
+ });
+ // The distinction between holes and undefined elements must be maintained.
+ RoundTripTest("[,undefined]", [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(2, Array::Cast(*value)->Length());
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("typeof result[0] === 'undefined'"));
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("typeof result[1] === 'undefined'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty(0)"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.hasOwnProperty(1)"));
+ });
+}
+
+TEST_F(ValueSerializerTest, DecodeArray) {
+ // A simple array of integers.
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x41, 0x05, 0x3f, 0x01, 0x49, 0x02,
+ 0x3f, 0x01, 0x49, 0x04, 0x3f, 0x01, 0x49, 0x06, 0x3f, 0x01,
+ 0x49, 0x08, 0x3f, 0x01, 0x49, 0x0a, 0x24, 0x00, 0x05, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ EXPECT_EQ(5, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Array.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.toString() === '1,2,3,4,5'"));
+ });
+ // A long (sparse) array.
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x61, 0xe8, 0x07, 0x3f, 0x01, 0x49,
+ 0xe8, 0x07, 0x3f, 0x01, 0x49, 0x54, 0x40, 0x01, 0xe8, 0x07},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ EXPECT_EQ(1000, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[500] === 42"));
+ });
+ // Duplicate reference.
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x41, 0x02, 0x3f, 0x01, 0x6f, 0x7b, 0x00, 0x3f,
+ 0x02, 0x5e, 0x01, 0x24, 0x00, 0x02},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(2, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[0] === result[1]"));
+ });
+ // Duplicate reference in a sparse array.
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x61, 0xe8, 0x07, 0x3f, 0x01, 0x49,
+ 0x02, 0x3f, 0x01, 0x6f, 0x7b, 0x00, 0x3f, 0x02, 0x49, 0xe8,
+ 0x07, 0x3f, 0x02, 0x5e, 0x01, 0x40, 0x02, 0xe8, 0x07, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("typeof result[1] === 'object'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === result[500]"));
+ });
+ // Self reference.
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x41, 0x01, 0x3f, 0x01, 0x5e, 0x00, 0x24,
+ 0x00, 0x01, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[0] === result"));
+ });
+ // Self reference in a sparse array.
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x61, 0xe8, 0x07, 0x3f, 0x01, 0x49,
+ 0x8e, 0x08, 0x3f, 0x01, 0x5e, 0x00, 0x40, 0x01, 0xe8, 0x07},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[519] === result"));
+ });
+ // Array with additional properties.
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x41, 0x02, 0x3f, 0x01, 0x49, 0x02, 0x3f,
+ 0x01, 0x49, 0x04, 0x3f, 0x01, 0x53, 0x03, 0x66, 0x6f, 0x6f, 0x3f,
+ 0x01, 0x53, 0x03, 0x62, 0x61, 0x72, 0x24, 0x01, 0x02, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(2, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.toString() === '1,2'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.foo === 'bar'"));
+ });
+ // Sparse array with additional properties.
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x61, 0xe8, 0x07, 0x3f, 0x01,
+ 0x53, 0x03, 0x66, 0x6f, 0x6f, 0x3f, 0x01, 0x53, 0x03,
+ 0x62, 0x61, 0x72, 0x40, 0x01, 0xe8, 0x07, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.toString() === ','.repeat(999)"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.foo === 'bar'"));
+ });
+ // The distinction between holes and undefined elements must be maintained.
+ // Note that since the previous output from Chrome fails this test, an
+ // encoding using the sparse format was constructed instead.
+ DecodeTest(
+ {0xff, 0x09, 0x61, 0x02, 0x49, 0x02, 0x5f, 0x40, 0x01, 0x02},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(2, Array::Cast(*value)->Length());
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("typeof result[0] === 'undefined'"));
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("typeof result[1] === 'undefined'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty(0)"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.hasOwnProperty(1)"));
+ });
+}
+
+TEST_F(ValueSerializerTest, RoundTripArrayWithNonEnumerableElement) {
+ // Even though this array looks like [1,5,3], the 5 should be missing from the
+ // perspective of structured clone, which only clones properties that were
+ // enumerable.
+ RoundTripTest(
+ "(() => {"
+ " var x = [1,2,3];"
+ " Object.defineProperty(x, '1', {enumerable:false, value:5});"
+ " return x;"
+ "})()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(3, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty('1')"));
+ });
+}
+
+TEST_F(ValueSerializerTest, RoundTripArrayWithTrickyGetters) {
+ // If an element is deleted before it is serialized, then it's deleted.
+ RoundTripTest(
+ "(() => {"
+ " var x = [{ get a() { delete x[1]; }}, 42];"
+ " return x;"
+ "})()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(2, Array::Cast(*value)->Length());
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("typeof result[1] === 'undefined'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty(1)"));
+ });
+ // Same for sparse arrays.
+ RoundTripTest(
+ "(() => {"
+ " var x = [{ get a() { delete x[1]; }}, 42];"
+ " x.length = 1000;"
+ " return x;"
+ "})()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("typeof result[1] === 'undefined'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty(1)"));
+ });
+ // If the length is changed, then the resulting array still has the original
+ // length, but elements that were not yet serialized are gone.
+ RoundTripTest(
+ "(() => {"
+ " var x = [1, { get a() { x.length = 0; }}, 3, 4];"
+ " return x;"
+ "})()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(4, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[0] === 1"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty(2)"));
+ });
+ // Same for sparse arrays.
+ RoundTripTest(
+ "(() => {"
+ " var x = [1, { get a() { x.length = 0; }}, 3, 4];"
+ " x.length = 1000;"
+ " return x;"
+ "})()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[0] === 1"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty(2)"));
+ });
+ // If a getter makes a property non-enumerable, it should still be enumerated
+ // as enumeration happens once before getters are invoked.
+ RoundTripTest(
+ "(() => {"
+ " var x = [{ get a() {"
+ " Object.defineProperty(x, '1', { value: 3, enumerable: false });"
+ " }}, 2];"
+ " return x;"
+ "})()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(2, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === 3"));
+ });
+ // Same for sparse arrays.
+ RoundTripTest(
+ "(() => {"
+ " var x = [{ get a() {"
+ " Object.defineProperty(x, '1', { value: 3, enumerable: false });"
+ " }}, 2];"
+ " x.length = 1000;"
+ " return x;"
+ "})()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === 3"));
+ });
+ // Getters on the array itself must also run.
+ RoundTripTest(
+ "(() => {"
+ " var x = [1, 2, 3];"
+ " Object.defineProperty(x, '1', { enumerable: true, get: () => 4 });"
+ " return x;"
+ "})()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(3, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === 4"));
+ });
+ // Same for sparse arrays.
+ RoundTripTest(
+ "(() => {"
+ " var x = [1, 2, 3];"
+ " Object.defineProperty(x, '1', { enumerable: true, get: () => 4 });"
+ " x.length = 1000;"
+ " return x;"
+ "})()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === 4"));
+ });
+ // Even with a getter that deletes things, we don't read from the prototype.
+ RoundTripTest(
+ "(() => {"
+ " var x = [{ get a() { delete x[1]; } }, 2];"
+ " x.__proto__ = Object.create(Array.prototype, { 1: { value: 6 } });"
+ " return x;"
+ "})()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(2, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("!(1 in result)"));
+ });
+ // Same for sparse arrays.
+ RoundTripTest(
+ "(() => {"
+ " var x = [{ get a() { delete x[1]; } }, 2];"
+ " x.__proto__ = Object.create(Array.prototype, { 1: { value: 6 } });"
+ " x.length = 1000;"
+ " return x;"
+ "})()",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("!(1 in result)"));
+ });
+}
+
+TEST_F(ValueSerializerTest, DecodeSparseArrayVersion0) {
+ // Empty (sparse) array.
+ DecodeTestForVersion0({0x40, 0x00, 0x00, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ ASSERT_EQ(0, Array::Cast(*value)->Length());
+ });
+ // Sparse array with a mixture of elements and properties.
+ DecodeTestForVersion0(
+ {0x55, 0x00, 0x53, 0x01, 'a', 0x55, 0x02, 0x55, 0x05, 0x53,
+ 0x03, 'f', 'o', 'o', 0x53, 0x03, 'b', 'a', 'r', 0x53,
+ 0x03, 'b', 'a', 'z', 0x49, 0x0b, 0x40, 0x04, 0x03, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ EXPECT_EQ(3, Array::Cast(*value)->Length());
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("result.toString() === 'a,,5'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("!(1 in result)"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.foo === 'bar'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.baz === -6"));
+ });
+ // Sparse array in a sparse array (sanity check of nesting).
+ DecodeTestForVersion0(
+ {0x55, 0x01, 0x55, 0x01, 0x54, 0x40, 0x01, 0x02, 0x40, 0x01, 0x02, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsArray());
+ EXPECT_EQ(2, Array::Cast(*value)->Length());
+ EXPECT_TRUE(EvaluateScriptForResultBool("!(0 in result)"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[1] instanceof Array"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("!(0 in result[1])"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result[1][1] === true"));
+ });
+}
+
+TEST_F(ValueSerializerTest, RoundTripDate) {
+ RoundTripTest("new Date(1e6)", [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsDate());
+ EXPECT_EQ(1e6, Date::Cast(*value)->ValueOf());
+ EXPECT_TRUE("Object.getPrototypeOf(result) === Date.prototype");
+ });
+ RoundTripTest("new Date(Date.UTC(1867, 6, 1))", [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsDate());
+ EXPECT_TRUE("result.toISOString() === '1867-07-01T00:00:00.000Z'");
+ });
+ RoundTripTest("new Date(NaN)", [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsDate());
+ EXPECT_TRUE(std::isnan(Date::Cast(*value)->ValueOf()));
+ });
+ RoundTripTest(
+ "({ a: new Date(), get b() { return this.a; } })",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof Date"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
+ });
+}
+
+TEST_F(ValueSerializerTest, DecodeDate) {
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x80, 0x84,
+ 0x2e, 0x41, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsDate());
+ EXPECT_EQ(1e6, Date::Cast(*value)->ValueOf());
+ EXPECT_TRUE("Object.getPrototypeOf(result) === Date.prototype");
+ });
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x44, 0x00, 0x00, 0x20, 0x45, 0x27, 0x89, 0x87,
+ 0xc2, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsDate());
+ EXPECT_TRUE("result.toISOString() === '1867-07-01T00:00:00.000Z'");
+ });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xf8, 0x7f, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsDate());
+ EXPECT_TRUE(std::isnan(Date::Cast(*value)->ValueOf()));
+ });
+#else
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x44, 0x41, 0x2e, 0x84, 0x80, 0x00, 0x00,
+ 0x00, 0x00, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsDate());
+ EXPECT_EQ(1e6, Date::Cast(*value)->ValueOf());
+ EXPECT_TRUE("Object.getPrototypeOf(result) === Date.prototype");
+ });
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x44, 0xc2, 0x87, 0x89, 0x27, 0x45, 0x20, 0x00,
+ 0x00, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsDate());
+ EXPECT_TRUE("result.toISOString() === '1867-07-01T00:00:00.000Z'");
+ });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x44, 0x7f, 0xf8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsDate());
+ EXPECT_TRUE(std::isnan(Date::Cast(*value)->ValueOf()));
+ });
+#endif
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x53, 0x01, 0x61, 0x3f,
+ 0x01, 0x44, 0x00, 0x20, 0x39, 0x50, 0x37, 0x6a, 0x75, 0x42, 0x3f,
+ 0x02, 0x53, 0x01, 0x62, 0x3f, 0x02, 0x5e, 0x01, 0x7b, 0x02},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof Date"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
+ });
+}
+
+TEST_F(ValueSerializerTest, RoundTripValueObjects) {
+ RoundTripTest("new Boolean(true)", [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Boolean.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.valueOf() === true"));
+ });
+ RoundTripTest("new Boolean(false)", [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Boolean.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.valueOf() === false"));
+ });
+ RoundTripTest(
+ "({ a: new Boolean(true), get b() { return this.a; }})",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof Boolean"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
+ });
+ RoundTripTest("new Number(-42)", [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Number.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.valueOf() === -42"));
+ });
+ RoundTripTest("new Number(NaN)", [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Number.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("Number.isNaN(result.valueOf())"));
+ });
+ RoundTripTest(
+ "({ a: new Number(6), get b() { return this.a; }})",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof Number"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
+ });
+ RoundTripTest("new String('Qu\\xe9bec')", [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === String.prototype"));
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("result.valueOf() === 'Qu\\xe9bec'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.length === 6"));
+ });
+ RoundTripTest("new String('\\ud83d\\udc4a')", [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === String.prototype"));
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("result.valueOf() === '\\ud83d\\udc4a'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.length === 2"));
+ });
+ RoundTripTest(
+ "({ a: new String(), get b() { return this.a; }})",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof String"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
+ });
+}
+
+TEST_F(ValueSerializerTest, RejectsOtherValueObjects) {
+ // This is a roundabout way of getting an instance of Symbol.
+ InvalidEncodeTest("Object.valueOf.apply(Symbol())");
+}
+
+TEST_F(ValueSerializerTest, DecodeValueObjects) {
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x79, 0x00},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Boolean.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.valueOf() === true"));
+ });
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x78, 0x00},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Boolean.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.valueOf() === false"));
+ });
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x53, 0x01, 0x61, 0x3f, 0x01,
+ 0x79, 0x3f, 0x02, 0x53, 0x01, 0x62, 0x3f, 0x02, 0x5e, 0x01, 0x7b, 0x02},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof Boolean"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
+ });
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45,
+ 0xc0, 0x00},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Number.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.valueOf() === -42"));
+ });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xf8, 0x7f, 0x00},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Number.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Number.isNaN(result.valueOf())"));
+ });
+#else
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x6e, 0xc0, 0x45, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Number.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.valueOf() === -42"));
+ });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x6e, 0x7f, 0xf8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === Number.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Number.isNaN(result.valueOf())"));
+ });
+#endif
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x53, 0x01, 0x61, 0x3f,
+ 0x01, 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x40, 0x3f,
+ 0x02, 0x53, 0x01, 0x62, 0x3f, 0x02, 0x5e, 0x01, 0x7b, 0x02},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof Number"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
+ });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x73, 0x07, 0x51, 0x75, 0xc3, 0xa9, 0x62,
+ 0x65, 0x63, 0x00},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === String.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.valueOf() === 'Qu\\xe9bec'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.length === 6"));
+ });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x73, 0x04, 0xf0, 0x9f, 0x91, 0x8a},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === String.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.valueOf() === '\\ud83d\\udc4a'"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.length === 2"));
+ });
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x53, 0x01,
+ 0x61, 0x3f, 0x01, 0x73, 0x00, 0x3f, 0x02, 0x53, 0x01,
+ 0x62, 0x3f, 0x02, 0x5e, 0x01, 0x7b, 0x02, 0x00},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof String"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
+ });
+}
+
+TEST_F(ValueSerializerTest, RoundTripRegExp) {
+ RoundTripTest("/foo/g", [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsRegExp());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === RegExp.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.toString() === '/foo/g'"));
+ });
+ RoundTripTest("new RegExp('Qu\\xe9bec', 'i')", [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsRegExp());
+ EXPECT_TRUE(
+ EvaluateScriptForResultBool("result.toString() === '/Qu\\xe9bec/i'"));
+ });
+ RoundTripTest("new RegExp('\\ud83d\\udc4a', 'ug')",
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsRegExp());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.toString() === '/\\ud83d\\udc4a/gu'"));
+ });
+ RoundTripTest(
+ "({ a: /foo/gi, get b() { return this.a; }})",
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof RegExp"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
+ });
+}
+
+TEST_F(ValueSerializerTest, DecodeRegExp) {
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x52, 0x03, 0x66, 0x6f, 0x6f, 0x01},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsRegExp());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === RegExp.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.toString() === '/foo/g'"));
+ });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x52, 0x07, 0x51, 0x75, 0xc3, 0xa9, 0x62,
+ 0x65, 0x63, 0x02},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsRegExp());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.toString() === '/Qu\\xe9bec/i'"));
+ });
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x52, 0x04, 0xf0, 0x9f, 0x91, 0x8a, 0x11, 0x00},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsRegExp());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.toString() === '/\\ud83d\\udc4a/gu'"));
+ });
+ DecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x6f, 0x3f, 0x01, 0x53, 0x01, 0x61,
+ 0x3f, 0x01, 0x52, 0x03, 0x66, 0x6f, 0x6f, 0x03, 0x3f, 0x02,
+ 0x53, 0x01, 0x62, 0x3f, 0x02, 0x5e, 0x01, 0x7b, 0x02, 0x00},
+ [this](Local<Value> value) {
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a instanceof RegExp"));
+ EXPECT_TRUE(EvaluateScriptForResultBool("result.a === result.b"));
+ });
+}
+
+} // namespace
+} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/OWNERS b/deps/v8/test/unittests/wasm/OWNERS
index c2abc8a6ad..eda8deabfd 100644
--- a/deps/v8/test/unittests/wasm/OWNERS
+++ b/deps/v8/test/unittests/wasm/OWNERS
@@ -1,3 +1,5 @@
-titzer@chromium.org
-bradnelson@chromium.org
ahaas@chromium.org
+bradnelson@chromium.org
+mtrofin@chromium.org
+rossberg@chromium.org
+titzer@chromium.org
diff --git a/deps/v8/test/unittests/wasm/asm-types-unittest.cc b/deps/v8/test/unittests/wasm/asm-types-unittest.cc
new file mode 100644
index 0000000000..36493df86d
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/asm-types-unittest.cc
@@ -0,0 +1,723 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/asmjs/asm-types.h"
+
+#include <unordered_map>
+#include <unordered_set>
+
+#include "src/base/macros.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace {
+
+using ::testing::StrEq;
+
+class AsmTypeTest : public TestWithZone {
+ public:
+ using Type = AsmType;
+
+ AsmTypeTest()
+ : parents_({
+ {Type::Uint8Array(), {Type::Heap()}},
+ {Type::Int8Array(), {Type::Heap()}},
+ {Type::Uint16Array(), {Type::Heap()}},
+ {Type::Int16Array(), {Type::Heap()}},
+ {Type::Uint32Array(), {Type::Heap()}},
+ {Type::Int32Array(), {Type::Heap()}},
+ {Type::Float32Array(), {Type::Heap()}},
+ {Type::Float64Array(), {Type::Heap()}},
+ {Type::Float(),
+ {Type::FloatishDoubleQ(), Type::FloatQDoubleQ(), Type::FloatQ(),
+ Type::Floatish()}},
+ {Type::Floatish(), {Type::FloatishDoubleQ()}},
+ {Type::FloatQ(),
+ {Type::FloatishDoubleQ(), Type::FloatQDoubleQ(),
+ Type::Floatish()}},
+ {Type::FixNum(),
+ {Type::Signed(), Type::Extern(), Type::Unsigned(), Type::Int(),
+ Type::Intish()}},
+ {Type::Unsigned(), {Type::Int(), Type::Intish()}},
+ {Type::Signed(), {Type::Extern(), Type::Int(), Type::Intish()}},
+ {Type::Int(), {Type::Intish()}},
+ {Type::DoubleQ(), {Type::FloatishDoubleQ(), Type::FloatQDoubleQ()}},
+ {Type::Double(),
+ {Type::FloatishDoubleQ(), Type::FloatQDoubleQ(), Type::DoubleQ(),
+ Type::Extern()}},
+ }) {}
+
+ protected:
+ std::unordered_set<Type*> ParentsOf(Type* derived) const {
+ const auto parents_iter = parents_.find(derived);
+ if (parents_iter == parents_.end()) {
+ return std::unordered_set<Type*>();
+ }
+ return parents_iter->second;
+ }
+
+ class FunctionTypeBuilder {
+ public:
+ FunctionTypeBuilder(FunctionTypeBuilder&& b)
+ : function_type_(b.function_type_) {
+ b.function_type_ = nullptr;
+ }
+
+ FunctionTypeBuilder& operator=(FunctionTypeBuilder&& b) {
+ if (this != &b) {
+ function_type_ = b.function_type_;
+ b.function_type_ = nullptr;
+ }
+ return *this;
+ }
+
+ FunctionTypeBuilder(Zone* zone, Type* return_type)
+ : function_type_(Type::Function(zone, return_type)) {}
+
+ private:
+ static void AddAllArguments(AsmFunctionType*) {}
+
+ template <typename Arg, typename... Others>
+ static void AddAllArguments(AsmFunctionType* function_type, Arg* arg,
+ Others... others) {
+ CHECK(function_type != nullptr);
+ function_type->AddArgument((*arg)());
+ AddAllArguments(function_type, others...);
+ }
+
+ public:
+ template <typename... Args>
+ Type* operator()(Args... args) {
+ Type* ret = function_type_;
+ function_type_ = nullptr;
+ AddAllArguments(ret->AsFunctionType(), args...);
+ return ret;
+ }
+
+ private:
+ Type* function_type_;
+ };
+
+ FunctionTypeBuilder Function(Type* (*return_type)()) {
+ return FunctionTypeBuilder(zone(), (*return_type)());
+ }
+
+ template <typename... Overloads>
+ Type* Overload(Overloads... overloads) {
+ auto* ret = Type::OverloadedFunction(zone());
+ AddAllOverloads(ret->AsOverloadedFunctionType(), overloads...);
+ return ret;
+ }
+
+ private:
+ static void AddAllOverloads(AsmOverloadedFunctionType*) {}
+
+ template <typename Overload, typename... Others>
+ static void AddAllOverloads(AsmOverloadedFunctionType* function,
+ Overload* overload, Others... others) {
+ CHECK(function != nullptr);
+ function->AddOverload(overload);
+ AddAllOverloads(function, others...);
+ }
+
+ const std::unordered_map<Type*, std::unordered_set<Type*>> parents_;
+};
+
+// AsmValueTypeParents expose the bitmasks for the parents for each value type
+// in asm's type system. It inherits from AsmValueType so that the kAsm<Foo>
+// members are available when expanding the FOR_EACH_ASM_VALUE_TYPE_LIST macro.
+class AsmValueTypeParents : private AsmValueType {
+ public:
+ enum : uint32_t {
+#define V(CamelName, string_name, number, parent_types) \
+ CamelName = parent_types,
+ FOR_EACH_ASM_VALUE_TYPE_LIST(V)
+#undef V
+ };
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AsmValueTypeParents);
+};
+
+TEST_F(AsmTypeTest, ValidateBits) {
+ // Generic validation tests for the bits in the type system's type
+ // definitions.
+
+ std::unordered_set<Type*> seen_types;
+ std::unordered_set<uint32_t> seen_numbers;
+ uint32_t total_types = 0;
+#define V(CamelName, string_name, number, parent_types) \
+ do { \
+ ++total_types; \
+ if (AsmValueTypeParents::CamelName != 0) { \
+ EXPECT_NE(0, ParentsOf(AsmType::CamelName()).size()) << #CamelName; \
+ } \
+ seen_types.insert(Type::CamelName()); \
+ seen_numbers.insert(number); \
+ /* Every ASM type must have a valid number. */ \
+ EXPECT_NE(0, number) << Type::CamelName()->Name(); \
+ /* Inheritance cycles - unlikely, but we're paranoid and check for it */ \
+ /* anyways.*/ \
+ EXPECT_EQ(0, (1 << (number)) & AsmValueTypeParents::CamelName); \
+ } while (0);
+ FOR_EACH_ASM_VALUE_TYPE_LIST(V)
+#undef V
+
+ // At least one type was expanded.
+ EXPECT_GT(total_types, 0u);
+
+ // Each value type is unique.
+ EXPECT_EQ(total_types, seen_types.size());
+
+ // Each number is unique.
+ EXPECT_EQ(total_types, seen_numbers.size());
+}
+
+TEST_F(AsmTypeTest, SaneParentsMap) {
+ // This test ensures our parents map contains all the parents types that are
+ // specified in the types' declaration. It does not report bogus inheritance.
+
+ // Handy-dandy lambda for counting bits. Code borrowed from stack overflow.
+ auto NumberOfSetBits = [](uintptr_t parent_mask) -> uint32_t {
+ uint32_t parent_mask32 = static_cast<uint32_t>(parent_mask);
+ CHECK_EQ(parent_mask, parent_mask32);
+ parent_mask32 = parent_mask32 - ((parent_mask32 >> 1) & 0x55555555);
+ parent_mask32 =
+ (parent_mask32 & 0x33333333) + ((parent_mask32 >> 2) & 0x33333333);
+ return (((parent_mask32 + (parent_mask32 >> 4)) & 0x0F0F0F0F) *
+ 0x01010101) >>
+ 24;
+ };
+
+#define V(CamelName, string_name, number, parent_types) \
+ do { \
+ const uintptr_t parents = \
+ reinterpret_cast<uintptr_t>(Type::CamelName()) & ~(1 << (number)); \
+ EXPECT_EQ(NumberOfSetBits(parents), \
+ 1 + ParentsOf(Type::CamelName()).size()) \
+ << Type::CamelName()->Name() << ", parents " \
+ << reinterpret_cast<void*>(parents) << ", type " \
+ << static_cast<void*>(Type::CamelName()); \
+ } while (0);
+ FOR_EACH_ASM_VALUE_TYPE_LIST(V)
+#undef V
+}
+
+TEST_F(AsmTypeTest, Names) {
+#define V(CamelName, string_name, number, parent_types) \
+ do { \
+ EXPECT_THAT(Type::CamelName()->Name(), StrEq(string_name)); \
+ } while (0);
+ FOR_EACH_ASM_VALUE_TYPE_LIST(V)
+#undef V
+
+ EXPECT_THAT(Function(Type::Int)(Type::Double, Type::Float)->Name(),
+ StrEq("(double, float) -> int"));
+
+ EXPECT_THAT(Overload(Function(Type::Int)(Type::Double, Type::Float),
+ Function(Type::Int)(Type::Int))
+ ->Name(),
+ StrEq("(double, float) -> int /\\ (int) -> int"));
+
+ EXPECT_THAT(Type::FroundType(zone())->Name(), StrEq("fround"));
+
+ EXPECT_THAT(Type::MinMaxType(zone(), Type::Signed(), Type::Int())->Name(),
+ StrEq("(int, int...) -> signed"));
+ EXPECT_THAT(Type::MinMaxType(zone(), Type::Float(), Type::Floatish())->Name(),
+ StrEq("(floatish, floatish...) -> float"));
+ EXPECT_THAT(Type::MinMaxType(zone(), Type::Double(), Type::DoubleQ())->Name(),
+ StrEq("(double?, double?...) -> double"));
+
+ EXPECT_THAT(Type::FFIType(zone())->Name(), StrEq("Function"));
+
+ auto* ft =
+ Type::FunctionTableType(zone(), 15, Function(Type::Double)(Type::Int));
+ EXPECT_THAT(ft->Name(), StrEq("((int) -> double)[15]"));
+}
+
+TEST_F(AsmTypeTest, IsExactly) {
+ Type* test_types[] = {
+#define CREATE(CamelName, string_name, number, parent_types) Type::CamelName(),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(CREATE)
+#undef CREATE
+ Function(Type::Int)(Type::Double),
+ Function(Type::Int)(Type::DoubleQ),
+ Overload(Function(Type::Int)(Type::Double)),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
+ Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Type::FFIType(zone()),
+ Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
+ for (size_t jj = 0; jj < arraysize(test_types); ++jj) {
+ EXPECT_EQ(ii == jj, test_types[ii]->IsExactly(test_types[jj]))
+ << test_types[ii]->Name()
+ << ((ii == jj) ? " is not exactly " : " is exactly ")
+ << test_types[jj]->Name();
+ }
+ }
+}
+
+bool FunctionsWithSameSignature(AsmType* a, AsmType* b) {
+ if (a->AsFunctionType()) {
+ if (b->AsFunctionType()) {
+ return a->IsA(b);
+ }
+ }
+ return false;
+}
+
+TEST_F(AsmTypeTest, IsA) {
+ Type* test_types[] = {
+#define CREATE(CamelName, string_name, number, parent_types) Type::CamelName(),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(CREATE)
+#undef CREATE
+ Function(Type::Int)(Type::Double),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Function(Type::Int)(Type::DoubleQ),
+ Overload(Function(Type::Int)(Type::Double)),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
+ Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Type::FFIType(zone()),
+ Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
+ for (size_t jj = 0; jj < arraysize(test_types); ++jj) {
+ const bool Expected =
+ (ii == jj) || ParentsOf(test_types[ii]).count(test_types[jj]) != 0 ||
+ FunctionsWithSameSignature(test_types[ii], test_types[jj]);
+ EXPECT_EQ(Expected, test_types[ii]->IsA(test_types[jj]))
+ << test_types[ii]->Name() << (Expected ? " is not a " : " is a ")
+ << test_types[jj]->Name();
+ }
+ }
+
+ EXPECT_TRUE(Function(Type::Int)(Type::Int, Type::Int)
+ ->IsA(Function(Type::Int)(Type::Int, Type::Int)));
+
+ EXPECT_FALSE(Function(Type::Int)(Type::Int, Type::Int)
+ ->IsA(Function(Type::Double)(Type::Int, Type::Int)));
+ EXPECT_FALSE(Function(Type::Int)(Type::Int, Type::Int)
+ ->IsA(Function(Type::Int)(Type::Double, Type::Int)));
+}
+
+TEST_F(AsmTypeTest, CanBeInvokedWith) {
+ auto* min_max_int = Type::MinMaxType(zone(), Type::Signed(), Type::Int());
+ auto* i2s = Function(Type::Signed)(Type::Int);
+ auto* ii2s = Function(Type::Signed)(Type::Int, Type::Int);
+ auto* iii2s = Function(Type::Signed)(Type::Int, Type::Int, Type::Int);
+ auto* iiii2s =
+ Function(Type::Signed)(Type::Int, Type::Int, Type::Int, Type::Int);
+
+ EXPECT_TRUE(min_max_int->AsCallableType()->CanBeInvokedWith(
+ ii2s->AsFunctionType()->ReturnType(),
+ ii2s->AsFunctionType()->Arguments()));
+ EXPECT_TRUE(min_max_int->AsCallableType()->CanBeInvokedWith(
+ iii2s->AsFunctionType()->ReturnType(),
+ iii2s->AsFunctionType()->Arguments()));
+ EXPECT_TRUE(min_max_int->AsCallableType()->CanBeInvokedWith(
+ iiii2s->AsFunctionType()->ReturnType(),
+ iiii2s->AsFunctionType()->Arguments()));
+ EXPECT_FALSE(min_max_int->AsCallableType()->CanBeInvokedWith(
+ i2s->AsFunctionType()->ReturnType(), i2s->AsFunctionType()->Arguments()));
+
+ auto* min_max_double =
+ Type::MinMaxType(zone(), Type::Double(), Type::Double());
+ auto* d2d = Function(Type::Double)(Type::Double);
+ auto* dd2d = Function(Type::Double)(Type::Double, Type::Double);
+ auto* ddd2d =
+ Function(Type::Double)(Type::Double, Type::Double, Type::Double);
+ auto* dddd2d = Function(Type::Double)(Type::Double, Type::Double,
+ Type::Double, Type::Double);
+ EXPECT_TRUE(min_max_double->AsCallableType()->CanBeInvokedWith(
+ dd2d->AsFunctionType()->ReturnType(),
+ dd2d->AsFunctionType()->Arguments()));
+ EXPECT_TRUE(min_max_double->AsCallableType()->CanBeInvokedWith(
+ ddd2d->AsFunctionType()->ReturnType(),
+ ddd2d->AsFunctionType()->Arguments()));
+ EXPECT_TRUE(min_max_double->AsCallableType()->CanBeInvokedWith(
+ dddd2d->AsFunctionType()->ReturnType(),
+ dddd2d->AsFunctionType()->Arguments()));
+ EXPECT_FALSE(min_max_double->AsCallableType()->CanBeInvokedWith(
+ d2d->AsFunctionType()->ReturnType(), d2d->AsFunctionType()->Arguments()));
+
+ auto* min_max = Overload(min_max_int, min_max_double);
+ EXPECT_FALSE(min_max->AsCallableType()->CanBeInvokedWith(
+ i2s->AsFunctionType()->ReturnType(), i2s->AsFunctionType()->Arguments()));
+ EXPECT_FALSE(min_max->AsCallableType()->CanBeInvokedWith(
+ d2d->AsFunctionType()->ReturnType(), d2d->AsFunctionType()->Arguments()));
+ EXPECT_TRUE(min_max->AsCallableType()->CanBeInvokedWith(
+ ii2s->AsFunctionType()->ReturnType(),
+ ii2s->AsFunctionType()->Arguments()));
+ EXPECT_TRUE(min_max->AsCallableType()->CanBeInvokedWith(
+ iii2s->AsFunctionType()->ReturnType(),
+ iii2s->AsFunctionType()->Arguments()));
+ EXPECT_TRUE(min_max->AsCallableType()->CanBeInvokedWith(
+ iiii2s->AsFunctionType()->ReturnType(),
+ iiii2s->AsFunctionType()->Arguments()));
+ EXPECT_TRUE(min_max->AsCallableType()->CanBeInvokedWith(
+ dd2d->AsFunctionType()->ReturnType(),
+ dd2d->AsFunctionType()->Arguments()));
+ EXPECT_TRUE(min_max->AsCallableType()->CanBeInvokedWith(
+ ddd2d->AsFunctionType()->ReturnType(),
+ ddd2d->AsFunctionType()->Arguments()));
+ EXPECT_TRUE(min_max->AsCallableType()->CanBeInvokedWith(
+ dddd2d->AsFunctionType()->ReturnType(),
+ dddd2d->AsFunctionType()->Arguments()));
+
+ auto* fround = Type::FroundType(zone());
+
+ ZoneVector<AsmType*> arg(zone());
+ arg.push_back(Type::Floatish());
+ EXPECT_TRUE(fround->AsCallableType()->CanBeInvokedWith(Type::Float(), arg));
+ arg.clear();
+ arg.push_back(Type::FloatQ());
+ EXPECT_TRUE(fround->AsCallableType()->CanBeInvokedWith(Type::Float(), arg));
+ arg.clear();
+ arg.push_back(Type::Float());
+ EXPECT_TRUE(fround->AsCallableType()->CanBeInvokedWith(Type::Float(), arg));
+ arg.clear();
+ arg.push_back(Type::DoubleQ());
+ EXPECT_TRUE(fround->AsCallableType()->CanBeInvokedWith(Type::Float(), arg));
+ arg.clear();
+ arg.push_back(Type::Double());
+ EXPECT_TRUE(fround->AsCallableType()->CanBeInvokedWith(Type::Float(), arg));
+ arg.clear();
+ arg.push_back(Type::Signed());
+ EXPECT_TRUE(fround->AsCallableType()->CanBeInvokedWith(Type::Float(), arg));
+ arg.clear();
+ arg.push_back(Type::Unsigned());
+ EXPECT_TRUE(fround->AsCallableType()->CanBeInvokedWith(Type::Float(), arg));
+ arg.clear();
+ arg.push_back(Type::FixNum());
+ EXPECT_TRUE(fround->AsCallableType()->CanBeInvokedWith(Type::Float(), arg));
+
+ auto* idf2v = Function(Type::Void)(Type::Int, Type::Double, Type::Float);
+ auto* i2d = Function(Type::Double)(Type::Int);
+ auto* i2f = Function(Type::Float)(Type::Int);
+ auto* fi2d = Function(Type::Double)(Type::Float, Type::Int);
+ auto* idif2i =
+ Function(Type::Int)(Type::Int, Type::Double, Type::Int, Type::Float);
+ auto* overload = Overload(idf2v, i2f, /*i2d missing, */ fi2d, idif2i);
+ EXPECT_TRUE(overload->AsCallableType()->CanBeInvokedWith(
+ idf2v->AsFunctionType()->ReturnType(),
+ idf2v->AsFunctionType()->Arguments()));
+ EXPECT_TRUE(overload->AsCallableType()->CanBeInvokedWith(
+ i2f->AsFunctionType()->ReturnType(), i2f->AsFunctionType()->Arguments()));
+ EXPECT_TRUE(overload->AsCallableType()->CanBeInvokedWith(
+ fi2d->AsFunctionType()->ReturnType(),
+ fi2d->AsFunctionType()->Arguments()));
+ EXPECT_TRUE(overload->AsCallableType()->CanBeInvokedWith(
+ idif2i->AsFunctionType()->ReturnType(),
+ idif2i->AsFunctionType()->Arguments()));
+ EXPECT_FALSE(overload->AsCallableType()->CanBeInvokedWith(
+ i2d->AsFunctionType()->ReturnType(), i2d->AsFunctionType()->Arguments()));
+ EXPECT_FALSE(i2f->AsCallableType()->CanBeInvokedWith(
+ i2d->AsFunctionType()->ReturnType(), i2d->AsFunctionType()->Arguments()));
+
+ auto* ffi = Type::FFIType(zone());
+ AsmType* (*kReturnTypes[])() = {
+ Type::Void, Type::Double, Type::Signed,
+ };
+ AsmType* (*kParameterTypes[])() = {
+ Type::Double, Type::Signed, Type::FixNum,
+ };
+ for (size_t ii = 0; ii < arraysize(kReturnTypes); ++ii) {
+ for (size_t jj = 0; jj < arraysize(kParameterTypes); ++jj) {
+ auto* f = Function(kReturnTypes[ii])(kParameterTypes[jj]);
+ EXPECT_TRUE(ffi->AsCallableType()->CanBeInvokedWith(
+ f->AsFunctionType()->ReturnType(), f->AsFunctionType()->Arguments()))
+ << kReturnTypes[ii]()->Name();
+
+ // Call with non-parameter type type should fail.
+ f = Function(kReturnTypes[ii])(kParameterTypes[jj], Type::Int);
+ EXPECT_FALSE(ffi->AsCallableType()->CanBeInvokedWith(
+ f->AsFunctionType()->ReturnType(), f->AsFunctionType()->Arguments()))
+ << kReturnTypes[ii]()->Name();
+ }
+ }
+
+ auto* ft0 = Type::FunctionTableType(zone(), 10, fi2d);
+ EXPECT_TRUE(ft0->AsCallableType()->CanBeInvokedWith(
+ fi2d->AsFunctionType()->ReturnType(),
+ fi2d->AsFunctionType()->Arguments()));
+ EXPECT_FALSE(ft0->AsCallableType()->CanBeInvokedWith(
+ i2d->AsFunctionType()->ReturnType(), i2d->AsFunctionType()->Arguments()));
+}
+
+TEST_F(AsmTypeTest, ToReturnType) {
+ std::unordered_map<AsmType*, AsmType*> kToReturnType = {
+ {Type::Signed(), Type::Signed()}, {Type::FixNum(), Type::Signed()},
+ {Type::Double(), Type::Double()}, {Type::Float(), Type::Float()},
+ {Type::Void(), Type::Void()},
+ };
+
+ Type* test_types[] = {
+#define CREATE(CamelName, string_name, number, parent_types) Type::CamelName(),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(CREATE)
+#undef CREATE
+ Function(Type::Int)(Type::Double),
+ Function(Type::Int)(Type::DoubleQ),
+ Overload(Function(Type::Int)(Type::Double)),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
+ Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Type::FFIType(zone()),
+ Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
+ auto* return_type = Type::None();
+ auto to_return_type_iter = kToReturnType.find(test_types[ii]);
+ if (to_return_type_iter != kToReturnType.end()) {
+ return_type = to_return_type_iter->second;
+ }
+ EXPECT_EQ(return_type, test_types[ii]->ToReturnType())
+ << return_type->Name() << " != " << test_types[ii]->ToReturnType();
+ }
+}
+
+TEST_F(AsmTypeTest, IsReturnType) {
+ Type* test_types[] = {
+#define CREATE(CamelName, string_name, number, parent_types) Type::CamelName(),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(CREATE)
+#undef CREATE
+ Function(Type::Int)(Type::Double),
+ Function(Type::Int)(Type::DoubleQ),
+ Overload(Function(Type::Int)(Type::Double)),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
+ Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Type::FFIType(zone()),
+ Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
+ };
+
+ std::unordered_set<Type*> return_types{
+ Type::Double(), Type::Signed(), Type::Float(), Type::Void(),
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
+ const bool IsReturnType = return_types.count(test_types[ii]);
+ EXPECT_EQ(IsReturnType, test_types[ii]->IsReturnType())
+ << test_types[ii]->Name()
+ << (IsReturnType ? " is not a return type" : " is a return type");
+ }
+}
+
+TEST_F(AsmTypeTest, ToParameterType) {
+ std::unordered_map<AsmType*, AsmType*> kToParameterType = {
+ {Type::Int(), Type::Int()}, {Type::Signed(), Type::Int()},
+ {Type::Unsigned(), Type::Int()}, {Type::FixNum(), Type::Int()},
+ {Type::Double(), Type::Double()}, {Type::Float(), Type::Float()},
+ };
+
+ Type* test_types[] = {
+#define CREATE(CamelName, string_name, number, parent_types) Type::CamelName(),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(CREATE)
+#undef CREATE
+ Function(Type::Int)(Type::Double),
+ Function(Type::Int)(Type::DoubleQ),
+ Overload(Function(Type::Int)(Type::Double)),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
+ Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Type::FFIType(zone()),
+ Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
+ auto* parameter_type = Type::None();
+ auto to_parameter_type_iter = kToParameterType.find(test_types[ii]);
+ if (to_parameter_type_iter != kToParameterType.end()) {
+ parameter_type = to_parameter_type_iter->second;
+ }
+ EXPECT_EQ(parameter_type, test_types[ii]->ToParameterType())
+ << parameter_type->Name()
+ << " != " << test_types[ii]->ToParameterType();
+ }
+}
+
+TEST_F(AsmTypeTest, IsParameterType) {
+ Type* test_types[] = {
+#define CREATE(CamelName, string_name, number, parent_types) Type::CamelName(),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(CREATE)
+#undef CREATE
+ Function(Type::Int)(Type::Double),
+ Function(Type::Int)(Type::DoubleQ),
+ Overload(Function(Type::Int)(Type::Double)),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
+ Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Type::FFIType(zone()),
+ Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
+ };
+
+ std::unordered_set<Type*> parameter_types{
+ Type::Double(), Type::Int(), Type::Float(),
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
+ const bool IsParameterType = parameter_types.count(test_types[ii]);
+ EXPECT_EQ(IsParameterType, test_types[ii]->IsParameterType())
+ << test_types[ii]->Name()
+ << (IsParameterType ? " is not a parameter type"
+ : " is a parameter type");
+ }
+}
+
+TEST_F(AsmTypeTest, IsComparableType) {
+ Type* test_types[] = {
+#define CREATE(CamelName, string_name, number, parent_types) Type::CamelName(),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(CREATE)
+#undef CREATE
+ Function(Type::Int)(Type::Double),
+ Function(Type::Int)(Type::DoubleQ),
+ Overload(Function(Type::Int)(Type::Double)),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
+ Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Type::FFIType(zone()),
+ Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
+ };
+
+ std::unordered_set<Type*> comparable_types{
+ Type::Double(), Type::Signed(), Type::Unsigned(), Type::Float(),
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
+ const bool IsComparableType = comparable_types.count(test_types[ii]);
+ EXPECT_EQ(IsComparableType, test_types[ii]->IsComparableType())
+ << test_types[ii]->Name()
+ << (IsComparableType ? " is not a comparable type"
+ : " is a comparable type");
+ }
+}
+
+TEST_F(AsmTypeTest, ElementSizeInBytes) {
+ Type* test_types[] = {
+#define CREATE(CamelName, string_name, number, parent_types) Type::CamelName(),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(CREATE)
+#undef CREATE
+ Function(Type::Int)(Type::Double),
+ Function(Type::Int)(Type::DoubleQ),
+ Overload(Function(Type::Int)(Type::Double)),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
+ Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Type::FFIType(zone()),
+ Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
+ };
+
+ auto ElementSizeInBytesForType = [](Type* type) -> int32_t {
+ if (type == Type::Int8Array() || type == Type::Uint8Array()) {
+ return 1;
+ }
+ if (type == Type::Int16Array() || type == Type::Uint16Array()) {
+ return 2;
+ }
+ if (type == Type::Int32Array() || type == Type::Uint32Array() ||
+ type == Type::Float32Array()) {
+ return 4;
+ }
+ if (type == Type::Float64Array()) {
+ return 8;
+ }
+ return -1;
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
+ EXPECT_EQ(ElementSizeInBytesForType(test_types[ii]),
+ test_types[ii]->ElementSizeInBytes());
+ }
+}
+
+TEST_F(AsmTypeTest, LoadType) {
+ Type* test_types[] = {
+#define CREATE(CamelName, string_name, number, parent_types) Type::CamelName(),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(CREATE)
+#undef CREATE
+ Function(Type::Int)(Type::Double),
+ Function(Type::Int)(Type::DoubleQ),
+ Overload(Function(Type::Int)(Type::Double)),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
+ Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Type::FFIType(zone()),
+ Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
+ };
+
+ auto LoadTypeForType = [](Type* type) -> Type* {
+ if (type == Type::Int8Array() || type == Type::Uint8Array() ||
+ type == Type::Int16Array() || type == Type::Uint16Array() ||
+ type == Type::Int32Array() || type == Type::Uint32Array()) {
+ return Type::Intish();
+ }
+
+ if (type == Type::Float32Array()) {
+ return Type::FloatQ();
+ }
+
+ if (type == Type::Float64Array()) {
+ return Type::DoubleQ();
+ }
+
+ return Type::None();
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
+ EXPECT_EQ(LoadTypeForType(test_types[ii]), test_types[ii]->LoadType());
+ }
+}
+
+TEST_F(AsmTypeTest, StoreType) {
+ Type* test_types[] = {
+#define CREATE(CamelName, string_name, number, parent_types) Type::CamelName(),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(CREATE)
+#undef CREATE
+ Function(Type::Int)(Type::Double),
+ Function(Type::Int)(Type::DoubleQ),
+ Overload(Function(Type::Int)(Type::Double)),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
+ Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Type::FFIType(zone()),
+ Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
+ };
+
+ auto StoreTypeForType = [](Type* type) -> Type* {
+ if (type == Type::Int8Array() || type == Type::Uint8Array() ||
+ type == Type::Int16Array() || type == Type::Uint16Array() ||
+ type == Type::Int32Array() || type == Type::Uint32Array()) {
+ return Type::Intish();
+ }
+
+ if (type == Type::Float32Array()) {
+ return Type::FloatishDoubleQ();
+ }
+
+ if (type == Type::Float64Array()) {
+ return Type::FloatQDoubleQ();
+ }
+
+ return Type::None();
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
+ EXPECT_EQ(StoreTypeForType(test_types[ii]), test_types[ii]->StoreType())
+ << test_types[ii]->Name();
+ }
+}
+
+} // namespace
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/ast-decoder-unittest.cc b/deps/v8/test/unittests/wasm/ast-decoder-unittest.cc
index 0b1b79ea86..7311f063a0 100644
--- a/deps/v8/test/unittests/wasm/ast-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/ast-decoder-unittest.cc
@@ -18,9 +18,13 @@ namespace v8 {
namespace internal {
namespace wasm {
+#define B1(a) kExprBlock, a, kExprEnd
+#define B2(a, b) kExprBlock, a, b, kExprEnd
+#define B3(a, b, c) kExprBlock, a, b, c, kExprEnd
+
static const byte kCodeGetLocal0[] = {kExprGetLocal, 0};
static const byte kCodeGetLocal1[] = {kExprGetLocal, 1};
-static const byte kCodeSetLocal0[] = {kExprSetLocal, 0, kExprI8Const, 0};
+static const byte kCodeSetLocal0[] = {WASM_SET_LOCAL(0, WASM_ZERO)};
static const LocalType kLocalTypes[] = {kAstI32, kAstI64, kAstF32, kAstF64};
static const MachineType machineTypes[] = {
@@ -36,7 +40,7 @@ static const WasmOpcode kInt32BinopOpcodes[] = {
kExprI32LeS, kExprI32LtU, kExprI32LeU};
#define WASM_BRV_IF_ZERO(depth, val) \
- kExprBrIf, static_cast<byte>(depth), val, WASM_ZERO
+ val, WASM_ZERO, kExprBrIf, ARITY_1, static_cast<byte>(depth)
#define EXPECT_VERIFIES(env, x) Verify(kSuccess, env, x, x + arraysize(x))
@@ -48,7 +52,6 @@ static const WasmOpcode kInt32BinopOpcodes[] = {
Verify(kSuccess, env, code, code + arraysize(code)); \
} while (false)
-
#define EXPECT_FAILURE_INLINE(env, ...) \
do { \
static byte code[] = {__VA_ARGS__}; \
@@ -65,7 +68,8 @@ class AstDecoderTest : public TestWithZone {
public:
typedef std::pair<uint32_t, LocalType> LocalsDecl;
- AstDecoderTest() : module(nullptr) {}
+ AstDecoderTest() : module(nullptr), local_decls(zone()) {}
+
TestSignatures sigs;
ModuleEnv* module;
LocalDeclEncoder local_decls;
@@ -74,13 +78,13 @@ class AstDecoderTest : public TestWithZone {
local_decls.AddLocals(count, type);
}
- // Preprends local variable declarations and renders nice error messages for
+ // Prepends local variable declarations and renders nice error messages for
// verification failures.
void Verify(ErrorCode expected, FunctionSig* sig, const byte* start,
const byte* end) {
- local_decls.Prepend(&start, &end);
+ local_decls.Prepend(zone(), &start, &end);
// Verify the code.
- TreeResult result =
+ DecodeResult result =
VerifyWasmCode(zone()->allocator(), module, sig, start, end);
if (result.error_code != expected) {
@@ -101,14 +105,11 @@ class AstDecoderTest : public TestWithZone {
}
FATAL(str.str().c_str());
}
-
- delete[] start; // local_decls.Prepend() allocated a new buffer.
}
void TestBinop(WasmOpcode opcode, FunctionSig* success) {
// op(local[0], local[1])
- byte code[] = {static_cast<byte>(opcode), kExprGetLocal, 0, kExprGetLocal,
- 1};
+ byte code[] = {WASM_BINOP(opcode, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))};
EXPECT_VERIFIES(success, code);
// Try all combinations of return and parameter types.
@@ -134,7 +135,7 @@ class AstDecoderTest : public TestWithZone {
void TestUnop(WasmOpcode opcode, LocalType ret_type, LocalType param_type) {
// Return(op(local[0]))
- byte code[] = {static_cast<byte>(opcode), kExprGetLocal, 0};
+ byte code[] = {WASM_UNOP(opcode, WASM_GET_LOCAL(0))};
{
LocalType types[] = {ret_type, param_type};
FunctionSig sig(1, 1, types);
@@ -155,7 +156,6 @@ class AstDecoderTest : public TestWithZone {
}
};
-
TEST_F(AstDecoderTest, Int8Const) {
byte code[] = {kExprI8Const, 0};
for (int i = -128; i < 128; i++) {
@@ -176,12 +176,6 @@ TEST_F(AstDecoderTest, IncompleteIf1) {
EXPECT_FAILURE(sigs.i_i(), code);
}
-TEST_F(AstDecoderTest, IncompleteIf2) {
- byte code[] = {kExprIf, kExprI8Const, 0};
- EXPECT_FAILURE(sigs.v_v(), code);
- EXPECT_FAILURE(sigs.i_i(), code);
-}
-
TEST_F(AstDecoderTest, Int8Const_fallthru) {
byte code[] = {kExprI8Const, 0, kExprI8Const, 1};
EXPECT_VERIFIES(sigs.i_i(), code);
@@ -303,199 +297,294 @@ TEST_F(AstDecoderTest, Binops_off_end) {
EXPECT_FAILURE(sigs.i_i(), code1);
}
- byte code3[] = {0, kExprGetLocal, 0}; // [opcode] [expr]
+ byte code3[] = {kExprGetLocal, 0, 0}; // [expr] [opcode]
for (size_t i = 0; i < arraysize(kInt32BinopOpcodes); i++) {
- code3[0] = kInt32BinopOpcodes[i];
+ code3[2] = kInt32BinopOpcodes[i];
EXPECT_FAILURE(sigs.i_i(), code3);
}
- byte code4[] = {0, kExprGetLocal, 0, 0}; // [opcode] [expr] [opcode]
+ byte code4[] = {kExprGetLocal, 0, 0, 0}; // [expr] [opcode] [opcode]
for (size_t i = 0; i < arraysize(kInt32BinopOpcodes); i++) {
- code4[0] = kInt32BinopOpcodes[i];
+ code4[2] = kInt32BinopOpcodes[i];
code4[3] = kInt32BinopOpcodes[i];
EXPECT_FAILURE(sigs.i_i(), code4);
}
}
+TEST_F(AstDecoderTest, BinopsAcrossBlock1) {
+ static const byte code[] = {WASM_ZERO, kExprBlock, WASM_ZERO, kExprI32Add,
+ kExprEnd};
+ EXPECT_FAILURE(sigs.i_i(), code);
+}
+
+TEST_F(AstDecoderTest, BinopsAcrossBlock2) {
+ static const byte code[] = {WASM_ZERO, WASM_ZERO, kExprBlock, kExprI32Add,
+ kExprEnd};
+ EXPECT_FAILURE(sigs.i_i(), code);
+}
+
+TEST_F(AstDecoderTest, BinopsAcrossBlock3) {
+ static const byte code[] = {WASM_ZERO, WASM_ZERO, kExprIf, kExprI32Add,
+ kExprElse, kExprI32Add, kExprEnd};
+ EXPECT_FAILURE(sigs.i_i(), code);
+}
-//===================================================================
-//== Statements
-//===================================================================
TEST_F(AstDecoderTest, Nop) {
static const byte code[] = {kExprNop};
EXPECT_VERIFIES(sigs.v_v(), code);
}
TEST_F(AstDecoderTest, SetLocal0_param) {
- static const byte code[] = {kExprSetLocal, 0, kExprI8Const, 0};
- EXPECT_VERIFIES(sigs.i_i(), code);
+ EXPECT_VERIFIES(sigs.i_i(), kCodeSetLocal0);
+ EXPECT_FAILURE(sigs.f_ff(), kCodeSetLocal0);
+ EXPECT_FAILURE(sigs.d_dd(), kCodeSetLocal0);
}
TEST_F(AstDecoderTest, SetLocal0_local) {
- byte code[] = {kExprSetLocal, 0, kExprI8Const, 0};
+ EXPECT_FAILURE(sigs.i_v(), kCodeSetLocal0);
AddLocals(kAstI32, 1);
- EXPECT_VERIFIES(sigs.i_v(), code);
+ EXPECT_VERIFIES(sigs.i_v(), kCodeSetLocal0);
}
TEST_F(AstDecoderTest, SetLocalN_local) {
for (byte i = 1; i < 8; i++) {
AddLocals(kAstI32, 1);
for (byte j = 0; j < i; j++) {
- byte code[] = {kExprSetLocal, j, kExprI8Const, i};
- EXPECT_VERIFIES(sigs.v_v(), code);
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_SET_LOCAL(j, WASM_I8(i)));
}
}
}
+TEST_F(AstDecoderTest, BlockN) {
+ const int kMaxSize = 200;
+ byte buffer[kMaxSize + 2];
+
+ for (int i = 0; i <= kMaxSize; i++) {
+ memset(buffer, kExprNop, sizeof(buffer));
+ buffer[0] = kExprBlock;
+ buffer[i + 1] = kExprEnd;
+ Verify(kSuccess, sigs.v_i(), buffer, buffer + i + 2);
+ }
+}
+
TEST_F(AstDecoderTest, Block0) {
- static const byte code[] = {kExprBlock, 0};
+ static const byte code[] = {kExprBlock, kExprEnd};
EXPECT_VERIFIES(sigs.v_v(), code);
+ EXPECT_FAILURE(sigs.i_i(), code);
}
TEST_F(AstDecoderTest, Block0_fallthru1) {
- static const byte code[] = {kExprBlock, 0, kExprBlock, 0};
+ static const byte code[] = {kExprBlock, kExprBlock, kExprEnd, kExprEnd};
EXPECT_VERIFIES(sigs.v_v(), code);
+ EXPECT_FAILURE(sigs.i_i(), code);
+}
+
+TEST_F(AstDecoderTest, Block0Block0) {
+ static const byte code[] = {kExprBlock, kExprEnd, kExprBlock, kExprEnd};
+ EXPECT_VERIFIES(sigs.v_v(), code);
+ EXPECT_FAILURE(sigs.i_i(), code);
+}
+
+TEST_F(AstDecoderTest, Block0_end_end) {
+ static const byte code[] = {kExprBlock, kExprEnd, kExprEnd};
+ EXPECT_FAILURE(sigs.v_v(), code);
}
TEST_F(AstDecoderTest, Block1) {
- static const byte code[] = {kExprBlock, 1, kExprSetLocal, 0, kExprI8Const, 0};
+ byte code[] = {B1(WASM_SET_LOCAL(0, WASM_ZERO))};
EXPECT_VERIFIES(sigs.i_i(), code);
+ EXPECT_VERIFIES(sigs.v_i(), code);
+ EXPECT_FAILURE(sigs.d_dd(), code);
}
-TEST_F(AstDecoderTest, Block0_fallthru2) {
- static const byte code[] = {kExprBlock, 0, kExprSetLocal, 0, kExprI8Const, 0};
+TEST_F(AstDecoderTest, Block1_i) {
+ byte code[] = {B1(WASM_ZERO)};
EXPECT_VERIFIES(sigs.i_i(), code);
+ EXPECT_FAILURE(sigs.f_ff(), code);
+ EXPECT_FAILURE(sigs.d_dd(), code);
+ EXPECT_FAILURE(sigs.l_ll(), code);
+}
+
+TEST_F(AstDecoderTest, Block1_f) {
+ byte code[] = {B1(WASM_F32(0))};
+ EXPECT_FAILURE(sigs.i_i(), code);
+ EXPECT_VERIFIES(sigs.f_ff(), code);
+ EXPECT_FAILURE(sigs.d_dd(), code);
+ EXPECT_FAILURE(sigs.l_ll(), code);
+}
+
+TEST_F(AstDecoderTest, Block1_continue) {
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), B1(WASM_BR(0)));
+ EXPECT_FAILURE_INLINE(sigs.v_v(), B1(WASM_BR(1)));
+ EXPECT_FAILURE_INLINE(sigs.v_v(), B1(WASM_BR(2)));
+ EXPECT_FAILURE_INLINE(sigs.v_v(), B1(WASM_BR(3)));
+}
+
+TEST_F(AstDecoderTest, Block1_br) {
+ EXPECT_FAILURE_INLINE(sigs.v_v(), kExprBlock, kExprBr, ARITY_1, DEPTH_0,
+ kExprEnd);
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), kExprBlock, kExprBr, ARITY_0, DEPTH_0,
+ kExprEnd);
+}
+
+TEST_F(AstDecoderTest, Block2_br) {
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), B2(WASM_NOP, WASM_BR(0)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), B2(WASM_BR(0), WASM_NOP));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), B2(WASM_BR(0), WASM_BR(0)));
}
TEST_F(AstDecoderTest, Block2) {
- static const byte code[] = {kExprBlock, 2, // --
- kExprSetLocal, 0, kExprI8Const, 0, // --
- kExprSetLocal, 0, kExprI8Const, 0}; // --
+ EXPECT_VERIFIES_INLINE(sigs.i_i(),
+ B2(WASM_NOP, WASM_SET_LOCAL(0, WASM_ZERO)));
+ EXPECT_FAILURE_INLINE(sigs.i_i(), B2(WASM_SET_LOCAL(0, WASM_ZERO), WASM_NOP));
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), B2(WASM_SET_LOCAL(0, WASM_ZERO),
+ WASM_SET_LOCAL(0, WASM_ZERO)));
+}
+
+TEST_F(AstDecoderTest, Block2b) {
+ byte code[] = {B2(WASM_SET_LOCAL(0, WASM_ZERO), WASM_ZERO)};
EXPECT_VERIFIES(sigs.i_i(), code);
+ EXPECT_FAILURE(sigs.v_v(), code);
+ EXPECT_FAILURE(sigs.f_ff(), code);
}
TEST_F(AstDecoderTest, Block2_fallthru) {
- static const byte code[] = {kExprBlock, 2, // --
- kExprSetLocal, 0, kExprI8Const, 0, // --
- kExprSetLocal, 0, kExprI8Const, 0, // --
- kExprI8Const, 11}; // --
- EXPECT_VERIFIES(sigs.i_i(), code);
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), B2(WASM_SET_LOCAL(0, WASM_ZERO),
+ WASM_SET_LOCAL(0, WASM_ZERO)),
+ WASM_I8(23));
}
-TEST_F(AstDecoderTest, BlockN) {
- byte block[] = {kExprBlock, 2};
+TEST_F(AstDecoderTest, Block3) {
+ EXPECT_VERIFIES_INLINE(
+ sigs.i_i(), B3(WASM_SET_LOCAL(0, WASM_ZERO), WASM_SET_LOCAL(0, WASM_ZERO),
+ WASM_I8(11)));
+}
- for (size_t i = 0; i < 10; i++) {
- size_t total = sizeof(block) + sizeof(kCodeSetLocal0) * i;
- byte* code = reinterpret_cast<byte*>(malloc(total));
- memcpy(code, block, sizeof(block));
- code[1] = static_cast<byte>(i);
- for (size_t j = 0; j < i; j++) {
- memcpy(code + sizeof(block) + j * sizeof(kCodeSetLocal0), kCodeSetLocal0,
- sizeof(kCodeSetLocal0));
- }
- Verify(kSuccess, sigs.v_i(), code, code + total);
- free(code);
- }
+TEST_F(AstDecoderTest, Block5) {
+ EXPECT_VERIFIES_INLINE(sigs.v_i(), B1(WASM_GET_LOCAL(0)));
+
+ EXPECT_VERIFIES_INLINE(sigs.v_i(), B2(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+
+ EXPECT_VERIFIES_INLINE(
+ sigs.v_i(), B3(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+
+ EXPECT_VERIFIES_INLINE(sigs.v_i(),
+ WASM_BLOCK(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+
+ EXPECT_VERIFIES_INLINE(
+ sigs.v_i(),
+ WASM_BLOCK(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+}
+
+TEST_F(AstDecoderTest, BlockF32) {
+ static const byte code[] = {kExprBlock, kExprF32Const, 0, 0, 0, 0, kExprEnd};
+ EXPECT_VERIFIES(sigs.f_ff(), code);
+ EXPECT_FAILURE(sigs.i_i(), code);
+ EXPECT_FAILURE(sigs.d_dd(), code);
}
TEST_F(AstDecoderTest, BlockN_off_end) {
- for (byte i = 2; i < 10; i++) {
- byte code[] = {kExprBlock, i, kExprNop};
- EXPECT_FAILURE(sigs.v_v(), code);
+ byte code[] = {kExprBlock, kExprNop, kExprNop, kExprNop, kExprNop, kExprEnd};
+ EXPECT_VERIFIES(sigs.v_v(), code);
+ for (size_t i = 1; i < arraysize(code); i++) {
+ Verify(kError, sigs.v_v(), code, code + i);
}
}
-TEST_F(AstDecoderTest, Block1_break) {
- static const byte code[] = {kExprBlock, 1, kExprBr, 0, kExprNop};
- EXPECT_VERIFIES(sigs.v_v(), code);
+TEST_F(AstDecoderTest, Block2_continue) {
+ static const byte code[] = {kExprBlock, kExprBr, ARITY_0,
+ DEPTH_1, kExprNop, kExprEnd};
+ EXPECT_FAILURE(sigs.v_v(), code);
}
-TEST_F(AstDecoderTest, Block2_break) {
- static const byte code[] = {kExprBlock, 2, kExprNop, kExprBr, 0, kExprNop};
- EXPECT_VERIFIES(sigs.v_v(), code);
+TEST_F(AstDecoderTest, NestedBlock_return) {
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), B1(B1(WASM_RETURN1(WASM_ZERO))));
}
-TEST_F(AstDecoderTest, Block1_continue) {
- static const byte code[] = {kExprBlock, 1, kExprBr, 1, kExprNop};
- EXPECT_FAILURE(sigs.v_v(), code);
+TEST_F(AstDecoderTest, BlockBinop) {
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_I32_AND(B1(WASM_I8(1)), WASM_I8(2)));
}
-TEST_F(AstDecoderTest, Block2_continue) {
- static const byte code[] = {kExprBlock, 2, kExprNop, kExprBr, 1, kExprNop};
- EXPECT_FAILURE(sigs.v_v(), code);
+TEST_F(AstDecoderTest, BlockBrBinop) {
+ EXPECT_VERIFIES_INLINE(sigs.i_i(),
+ WASM_I32_AND(B1(WASM_BRV(0, WASM_I8(1))), WASM_I8(2)));
}
-TEST_F(AstDecoderTest, ExprBlock0) {
- static const byte code[] = {kExprBlock, 0};
- EXPECT_VERIFIES(sigs.v_v(), code);
+TEST_F(AstDecoderTest, If_empty1) {
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_ZERO, kExprIf, kExprEnd);
}
-TEST_F(AstDecoderTest, ExprBlock1a) {
- static const byte code[] = {kExprBlock, 1, kExprI8Const, 0};
- EXPECT_VERIFIES(sigs.i_i(), code);
+TEST_F(AstDecoderTest, If_empty2) {
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_ZERO, kExprIf, kExprElse, kExprEnd);
}
-TEST_F(AstDecoderTest, ExprBlock1b) {
- static const byte code[] = {kExprBlock, 1, kExprI8Const, 0};
- EXPECT_FAILURE(sigs.f_ff(), code);
+TEST_F(AstDecoderTest, If_empty3) {
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_ZERO, kExprIf, WASM_ZERO, kExprElse,
+ kExprEnd);
}
-TEST_F(AstDecoderTest, ExprBlock1c) {
- static const byte code[] = {kExprBlock, 1, kExprF32Const, 0, 0, 0, 0};
- EXPECT_VERIFIES(sigs.f_ff(), code);
+TEST_F(AstDecoderTest, If_empty4) {
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_ZERO, kExprIf, kExprElse, WASM_ZERO,
+ kExprEnd);
}
-TEST_F(AstDecoderTest, IfEmpty) {
- static const byte code[] = {kExprIf, kExprGetLocal, 0, kExprNop};
- EXPECT_VERIFIES(sigs.v_i(), code);
+TEST_F(AstDecoderTest, If_empty_stack) {
+ byte code[] = {kExprIf};
+ EXPECT_FAILURE(sigs.v_v(), code);
+ EXPECT_FAILURE(sigs.i_i(), code);
}
-TEST_F(AstDecoderTest, IfSet) {
- static const byte code[] = {kExprIfElse, kExprGetLocal, 0, kExprSetLocal,
- 0, kExprI8Const, 0, kExprNop};
- EXPECT_VERIFIES(sigs.v_i(), code);
+TEST_F(AstDecoderTest, If_incomplete1) {
+ byte code[] = {kExprI8Const, 0, kExprIf};
+ EXPECT_FAILURE(sigs.v_v(), code);
+ EXPECT_FAILURE(sigs.i_i(), code);
}
-TEST_F(AstDecoderTest, IfBlock1) {
- static const byte code[] = {kExprIfElse, kExprGetLocal, 0, kExprBlock,
- 1, kExprSetLocal, 0, kExprI8Const,
- 0, kExprNop};
- EXPECT_VERIFIES(sigs.v_i(), code);
+TEST_F(AstDecoderTest, If_incomplete2) {
+ byte code[] = {kExprI8Const, 0, kExprIf, kExprNop};
+ EXPECT_FAILURE(sigs.v_v(), code);
+ EXPECT_FAILURE(sigs.i_i(), code);
}
-TEST_F(AstDecoderTest, IfBlock2) {
- static const byte code[] = {kExprIf, kExprGetLocal, 0, kExprBlock,
- 2, kExprSetLocal, 0, kExprI8Const,
- 0, kExprSetLocal, 0, kExprI8Const,
- 0};
- EXPECT_VERIFIES(sigs.v_i(), code);
+TEST_F(AstDecoderTest, If_else_else) {
+ byte code[] = {kExprI8Const, 0, kExprIf, kExprElse, kExprElse, kExprEnd};
+ EXPECT_FAILURE(sigs.v_v(), code);
+ EXPECT_FAILURE(sigs.i_i(), code);
+}
+
+TEST_F(AstDecoderTest, IfEmpty) {
+ EXPECT_VERIFIES_INLINE(sigs.v_i(), kExprGetLocal, 0, kExprIf, kExprEnd);
+}
+
+TEST_F(AstDecoderTest, IfSet) {
+ EXPECT_VERIFIES_INLINE(
+ sigs.v_i(), WASM_IF(WASM_GET_LOCAL(0), WASM_SET_LOCAL(0, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(
+ sigs.v_i(),
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_SET_LOCAL(0, WASM_ZERO), WASM_NOP));
}
TEST_F(AstDecoderTest, IfElseEmpty) {
- static const byte code[] = {kExprIfElse, kExprGetLocal, 0, kExprNop,
- kExprNop};
- EXPECT_VERIFIES(sigs.v_i(), code);
+ EXPECT_VERIFIES_INLINE(sigs.v_i(), WASM_GET_LOCAL(0), kExprIf, kExprElse,
+ kExprEnd);
+ EXPECT_VERIFIES_INLINE(sigs.v_i(),
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_NOP));
}
-TEST_F(AstDecoderTest, IfElseSet) {
- static const byte code[] = {kExprIfElse,
- kExprGetLocal,
- 0, // --
- kExprSetLocal,
- 0,
- kExprI8Const,
- 0, // --
- kExprSetLocal,
- 0,
- kExprI8Const,
- 1}; // --
- EXPECT_VERIFIES(sigs.v_i(), code);
+TEST_F(AstDecoderTest, IfElseUnreachable1) {
+ EXPECT_VERIFIES_INLINE(
+ sigs.i_i(),
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_UNREACHABLE, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(
+ sigs.i_i(),
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_UNREACHABLE));
}
-TEST_F(AstDecoderTest, IfElseUnreachable) {
- static const byte code[] = {kExprIfElse, kExprI8Const, 0,
- kExprUnreachable, kExprGetLocal, 0};
+TEST_F(AstDecoderTest, IfElseUnreachable2) {
+ static const byte code[] = {
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_UNREACHABLE, WASM_GET_LOCAL(0))};
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
LocalType types[] = {kAstI32, kLocalTypes[i]};
@@ -509,66 +598,136 @@ TEST_F(AstDecoderTest, IfElseUnreachable) {
}
}
+TEST_F(AstDecoderTest, IfBreak) {
+ EXPECT_VERIFIES_INLINE(sigs.v_i(), WASM_IF(WASM_GET_LOCAL(0), WASM_BR(0)));
+ EXPECT_FAILURE_INLINE(sigs.v_i(), WASM_IF(WASM_GET_LOCAL(0), WASM_BR(1)));
+}
+
+TEST_F(AstDecoderTest, IfElseBreak) {
+ EXPECT_VERIFIES_INLINE(sigs.v_i(),
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_BR(0)));
+ EXPECT_FAILURE_INLINE(sigs.v_i(),
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_BR(1)));
+}
+
+TEST_F(AstDecoderTest, Block_else) {
+ byte code[] = {kExprI8Const, 0, kExprBlock, kExprElse, kExprEnd};
+ EXPECT_FAILURE(sigs.v_v(), code);
+ EXPECT_FAILURE(sigs.i_i(), code);
+}
+
+TEST_F(AstDecoderTest, IfNop) {
+ EXPECT_VERIFIES_INLINE(sigs.v_i(), WASM_IF(WASM_GET_LOCAL(0), WASM_NOP));
+}
+
+TEST_F(AstDecoderTest, IfNopElseNop) {
+ EXPECT_VERIFIES_INLINE(sigs.v_i(),
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_NOP));
+}
+
+TEST_F(AstDecoderTest, If_end_end) {
+ static const byte code[] = {kExprGetLocal, 0, kExprIf, kExprEnd, kExprEnd};
+ EXPECT_FAILURE(sigs.v_i(), code);
+}
+
+TEST_F(AstDecoderTest, If_falloff) {
+ static const byte code[] = {kExprGetLocal, 0, kExprIf};
+ EXPECT_FAILURE(sigs.v_i(), code);
+}
+
+TEST_F(AstDecoderTest, IfElse_falloff) {
+ static const byte code[] = {kExprGetLocal, 0, kExprIf, kExprNop, kExprElse};
+ EXPECT_FAILURE(sigs.v_i(), code);
+}
+
+TEST_F(AstDecoderTest, IfElseNop) {
+ EXPECT_VERIFIES_INLINE(
+ sigs.v_i(),
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_SET_LOCAL(0, WASM_ZERO), WASM_NOP));
+}
+
+TEST_F(AstDecoderTest, IfBlock1) {
+ EXPECT_VERIFIES_INLINE(
+ sigs.v_i(), WASM_IF_ELSE(WASM_GET_LOCAL(0),
+ B1(WASM_SET_LOCAL(0, WASM_ZERO)), WASM_NOP));
+}
+
+TEST_F(AstDecoderTest, IfBlock1b) {
+ EXPECT_VERIFIES_INLINE(
+ sigs.v_i(), WASM_IF(WASM_GET_LOCAL(0), B1(WASM_SET_LOCAL(0, WASM_ZERO))));
+}
+
+TEST_F(AstDecoderTest, IfBlock2a) {
+ EXPECT_VERIFIES_INLINE(
+ sigs.v_i(), WASM_IF(WASM_GET_LOCAL(0), B2(WASM_SET_LOCAL(0, WASM_ZERO),
+ WASM_SET_LOCAL(0, WASM_ZERO))));
+}
+
+TEST_F(AstDecoderTest, IfBlock2b) {
+ EXPECT_VERIFIES_INLINE(
+ sigs.v_i(),
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), B2(WASM_SET_LOCAL(0, WASM_ZERO),
+ WASM_SET_LOCAL(0, WASM_ZERO)),
+ WASM_NOP));
+}
+
+TEST_F(AstDecoderTest, IfElseSet) {
+ EXPECT_VERIFIES_INLINE(
+ sigs.v_i(), WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_SET_LOCAL(0, WASM_ZERO),
+ WASM_SET_LOCAL(0, WASM_I8(1))));
+}
+
TEST_F(AstDecoderTest, Loop0) {
- static const byte code[] = {kExprLoop, 0};
+ static const byte code[] = {kExprLoop, kExprEnd};
EXPECT_VERIFIES(sigs.v_v(), code);
}
TEST_F(AstDecoderTest, Loop1) {
- static const byte code[] = {kExprLoop, 1, kExprSetLocal, 0, kExprI8Const, 0};
+ static const byte code[] = {WASM_LOOP(WASM_SET_LOCAL(0, WASM_ZERO))};
EXPECT_VERIFIES(sigs.v_i(), code);
+ EXPECT_FAILURE(sigs.v_v(), code);
+ EXPECT_FAILURE(sigs.f_ff(), code);
}
TEST_F(AstDecoderTest, Loop2) {
- static const byte code[] = {kExprLoop, 2, // --
- kExprSetLocal, 0, kExprI8Const, 0, // --
- kExprSetLocal, 0, kExprI8Const, 0}; // --
- EXPECT_VERIFIES(sigs.v_i(), code);
+ EXPECT_VERIFIES_INLINE(sigs.v_i(), WASM_LOOP(WASM_SET_LOCAL(0, WASM_ZERO),
+ WASM_SET_LOCAL(0, WASM_ZERO)));
}
TEST_F(AstDecoderTest, Loop1_continue) {
- static const byte code[] = {kExprLoop, 1, kExprBr, 0, kExprNop};
- EXPECT_VERIFIES(sigs.v_v(), code);
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_BR(0)));
}
TEST_F(AstDecoderTest, Loop1_break) {
- static const byte code[] = {kExprLoop, 1, kExprBr, 1, kExprNop};
- EXPECT_VERIFIES(sigs.v_v(), code);
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_BR(1)));
}
TEST_F(AstDecoderTest, Loop2_continue) {
- static const byte code[] = {kExprLoop, 2, // --
- kExprSetLocal, 0, kExprI8Const, 0, // --
- kExprBr, 0, kExprNop}; // --
- EXPECT_VERIFIES(sigs.v_i(), code);
+ EXPECT_VERIFIES_INLINE(sigs.v_i(),
+ WASM_LOOP(WASM_SET_LOCAL(0, WASM_ZERO), WASM_BR(0)));
}
TEST_F(AstDecoderTest, Loop2_break) {
- static const byte code[] = {kExprLoop, 2, // --
- kExprSetLocal, 0, kExprI8Const, 0, // --
- kExprBr, 1, kExprNop}; // --
- EXPECT_VERIFIES(sigs.v_i(), code);
+ EXPECT_VERIFIES_INLINE(sigs.v_i(),
+ WASM_LOOP(WASM_SET_LOCAL(0, WASM_ZERO), WASM_BR(1)));
}
TEST_F(AstDecoderTest, ExprLoop0) {
- static const byte code[] = {kExprLoop, 0};
+ static const byte code[] = {kExprLoop, kExprEnd};
EXPECT_VERIFIES(sigs.v_v(), code);
}
TEST_F(AstDecoderTest, ExprLoop1a) {
- static const byte code[] = {kExprLoop, 1, kExprBr, 0, kExprI8Const, 0};
- EXPECT_VERIFIES(sigs.i_i(), code);
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_LOOP(WASM_BRV(0, WASM_ZERO)));
}
TEST_F(AstDecoderTest, ExprLoop1b) {
- static const byte code[] = {kExprLoop, 1, kExprBr, 0, kExprI8Const, 0};
- EXPECT_VERIFIES(sigs.i_i(), code);
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_LOOP(WASM_BRV(1, WASM_ZERO)));
+ EXPECT_FAILURE_INLINE(sigs.f_ff(), WASM_LOOP(WASM_BRV(1, WASM_ZERO)));
}
TEST_F(AstDecoderTest, ExprLoop2_unreachable) {
- static const byte code[] = {kExprLoop, 2, kExprBr, 0,
- kExprI8Const, 0, kExprNop};
- EXPECT_VERIFIES(sigs.i_i(), code);
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_LOOP(WASM_BR(0), WASM_NOP));
}
TEST_F(AstDecoderTest, ReturnVoid1) {
@@ -579,7 +738,7 @@ TEST_F(AstDecoderTest, ReturnVoid1) {
}
TEST_F(AstDecoderTest, ReturnVoid2) {
- static const byte code[] = {kExprBlock, 1, kExprBr, 0, kExprNop};
+ static const byte code[] = {kExprBlock, kExprBr, ARITY_0, DEPTH_0, kExprEnd};
EXPECT_VERIFIES(sigs.v_v(), code);
EXPECT_FAILURE(sigs.i_i(), code);
EXPECT_FAILURE(sigs.i_f(), code);
@@ -598,67 +757,85 @@ TEST_F(AstDecoderTest, ReturnVoid3) {
TEST_F(AstDecoderTest, Unreachable1) {
EXPECT_VERIFIES_INLINE(sigs.v_v(), kExprUnreachable);
EXPECT_VERIFIES_INLINE(sigs.v_v(), kExprUnreachable, kExprUnreachable);
- EXPECT_VERIFIES_INLINE(sigs.v_v(),
- WASM_BLOCK(2, WASM_UNREACHABLE, WASM_ZERO));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_BLOCK(2, WASM_BR(0), WASM_ZERO));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(2, WASM_UNREACHABLE, WASM_ZERO));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(2, WASM_BR(0), WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), B2(WASM_UNREACHABLE, WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), B2(WASM_BR(0), WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_UNREACHABLE, WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_BR(0), WASM_ZERO));
}
-TEST_F(AstDecoderTest, Codeiness) {
- VERIFY(kExprLoop, 2, // --
- kExprSetLocal, 0, kExprI8Const, 0, // --
- kExprBr, 0, kExprNop); // --
+TEST_F(AstDecoderTest, Unreachable_binop) {
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_I32_AND(WASM_ZERO, WASM_UNREACHABLE));
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_I32_AND(WASM_UNREACHABLE, WASM_ZERO));
}
-TEST_F(AstDecoderTest, ExprIf1) {
- VERIFY(kExprIf, kExprGetLocal, 0, kExprI8Const, 0, kExprI8Const, 1);
- VERIFY(kExprIf, kExprGetLocal, 0, kExprGetLocal, 0, kExprGetLocal, 0);
- VERIFY(kExprIf, kExprGetLocal, 0, kExprI32Add, kExprGetLocal, 0,
- kExprGetLocal, 0, kExprI8Const, 1);
+TEST_F(AstDecoderTest, Unreachable_select) {
+ EXPECT_VERIFIES_INLINE(sigs.i_i(),
+ WASM_SELECT(WASM_UNREACHABLE, WASM_ZERO, WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(sigs.i_i(),
+ WASM_SELECT(WASM_ZERO, WASM_UNREACHABLE, WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(sigs.i_i(),
+ WASM_SELECT(WASM_ZERO, WASM_ZERO, WASM_UNREACHABLE));
}
-TEST_F(AstDecoderTest, ExprIf_off_end) {
- static const byte kCode[] = {kExprIf, kExprGetLocal, 0, kExprGetLocal,
- 0, kExprGetLocal, 0};
- for (size_t len = 1; len < arraysize(kCode); len++) {
+TEST_F(AstDecoderTest, If1) {
+ EXPECT_VERIFIES_INLINE(
+ sigs.i_i(), WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_I8(9), WASM_I8(8)));
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_I8(9),
+ WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(
+ sigs.i_i(),
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_I8(8)));
+}
+
+TEST_F(AstDecoderTest, If_off_end) {
+ static const byte kCode[] = {
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_GET_LOCAL(0))};
+ for (size_t len = 3; len < arraysize(kCode); len++) {
Verify(kError, sigs.i_i(), kCode, kCode + len);
}
}
-TEST_F(AstDecoderTest, ExprIf_type) {
- {
- // float|double ? 1 : 2
- static const byte kCode[] = {kExprIfElse, kExprGetLocal, 0, kExprI8Const,
- 1, kExprI8Const, 2};
- EXPECT_FAILURE(sigs.i_f(), kCode);
- EXPECT_FAILURE(sigs.i_d(), kCode);
- }
- {
- // 1 ? float|double : 2
- static const byte kCode[] = {kExprIfElse, kExprI8Const, 1, kExprGetLocal,
- 0, kExprI8Const, 2};
- EXPECT_FAILURE(sigs.i_f(), kCode);
- EXPECT_FAILURE(sigs.i_d(), kCode);
- }
- {
- // stmt ? 0 : 1
- static const byte kCode[] = {kExprIfElse, kExprNop, kExprI8Const,
- 0, kExprI8Const, 1};
- EXPECT_FAILURE(sigs.i_i(), kCode);
- }
- {
- // 0 ? stmt : 1
- static const byte kCode[] = {kExprIfElse, kExprI8Const, 0,
- kExprNop, kExprI8Const, 1};
- EXPECT_FAILURE(sigs.i_i(), kCode);
- }
- {
- // 0 ? 1 : stmt
- static const byte kCode[] = {kExprIfElse, kExprI8Const, 0, kExprI8Const, 1,
- 0, kExprBlock};
- EXPECT_FAILURE(sigs.i_i(), kCode);
- }
+TEST_F(AstDecoderTest, If_type1) {
+ // float|double ? 1 : 2
+ static const byte kCode[] = {
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_I8(0), WASM_I8(2))};
+ EXPECT_VERIFIES(sigs.i_i(), kCode);
+ EXPECT_FAILURE(sigs.i_f(), kCode);
+ EXPECT_FAILURE(sigs.i_d(), kCode);
+}
+
+TEST_F(AstDecoderTest, If_type2) {
+ // 1 ? float|double : 2
+ static const byte kCode[] = {
+ WASM_IF_ELSE(WASM_I8(1), WASM_GET_LOCAL(0), WASM_I8(1))};
+ EXPECT_VERIFIES(sigs.i_i(), kCode);
+ EXPECT_FAILURE(sigs.i_f(), kCode);
+ EXPECT_FAILURE(sigs.i_d(), kCode);
+}
+
+TEST_F(AstDecoderTest, If_type3) {
+ // stmt ? 0 : 1
+ static const byte kCode[] = {WASM_IF_ELSE(WASM_NOP, WASM_I8(0), WASM_I8(1))};
+ EXPECT_FAILURE(sigs.i_i(), kCode);
+ EXPECT_FAILURE(sigs.i_f(), kCode);
+ EXPECT_FAILURE(sigs.i_d(), kCode);
+}
+
+TEST_F(AstDecoderTest, If_type4) {
+ // 0 ? stmt : 1
+ static const byte kCode[] = {
+ WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_I8(1))};
+ EXPECT_FAILURE(sigs.i_i(), kCode);
+ EXPECT_FAILURE(sigs.i_f(), kCode);
+ EXPECT_FAILURE(sigs.i_d(), kCode);
+}
+
+TEST_F(AstDecoderTest, If_type5) {
+ // 0 ? 1 : stmt
+ static const byte kCode[] = {WASM_IF_ELSE(WASM_ZERO, WASM_I8(1), WASM_NOP)};
+ EXPECT_FAILURE(sigs.i_i(), kCode);
+ EXPECT_FAILURE(sigs.i_f(), kCode);
+ EXPECT_FAILURE(sigs.i_d(), kCode);
}
TEST_F(AstDecoderTest, Int64Local_param) {
@@ -669,8 +846,7 @@ TEST_F(AstDecoderTest, Int64Locals) {
for (byte i = 1; i < 8; i++) {
AddLocals(kAstI64, 1);
for (byte j = 0; j < i; j++) {
- byte code[] = {kExprGetLocal, j};
- EXPECT_VERIFIES(sigs.l_v(), code);
+ EXPECT_VERIFIES_INLINE(sigs.l_v(), WASM_GET_LOCAL(j));
}
}
}
@@ -737,53 +913,50 @@ TEST_F(AstDecoderTest, MacrosStmt) {
VERIFY(WASM_IF(WASM_GET_LOCAL(0), WASM_NOP));
VERIFY(WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_NOP));
VERIFY(WASM_NOP);
- VERIFY(WASM_BLOCK(1, WASM_NOP));
- VERIFY(WASM_LOOP(1, WASM_NOP));
- VERIFY(WASM_LOOP(1, WASM_BREAK(0)));
- VERIFY(WASM_LOOP(1, WASM_CONTINUE(0)));
+ VERIFY(B1(WASM_NOP));
+ VERIFY(WASM_LOOP(WASM_NOP));
+ VERIFY(WASM_LOOP(WASM_BREAK(0)));
+ VERIFY(WASM_LOOP(WASM_CONTINUE(0)));
}
TEST_F(AstDecoderTest, MacrosBreak) {
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(1, WASM_BREAK(0)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_BREAK(0)));
- EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_LOOP(1, WASM_BREAKV(0, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(sigs.l_l(),
- WASM_LOOP(1, WASM_BREAKV(0, WASM_I64V_1(0))));
- EXPECT_VERIFIES_INLINE(sigs.f_ff(),
- WASM_LOOP(1, WASM_BREAKV(0, WASM_F32(0.0))));
- EXPECT_VERIFIES_INLINE(sigs.d_dd(),
- WASM_LOOP(1, WASM_BREAKV(0, WASM_F64(0.0))));
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_LOOP(WASM_BREAKV(0, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(sigs.l_l(), WASM_LOOP(WASM_BREAKV(0, WASM_I64V_1(0))));
+ EXPECT_VERIFIES_INLINE(sigs.f_ff(), WASM_LOOP(WASM_BREAKV(0, WASM_F32(0.0))));
+ EXPECT_VERIFIES_INLINE(sigs.d_dd(), WASM_LOOP(WASM_BREAKV(0, WASM_F64(0.0))));
}
TEST_F(AstDecoderTest, MacrosContinue) {
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(1, WASM_CONTINUE(0)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_CONTINUE(0)));
}
TEST_F(AstDecoderTest, MacrosVariadic) {
- VERIFY(WASM_BLOCK(2, WASM_NOP, WASM_NOP));
- VERIFY(WASM_BLOCK(3, WASM_NOP, WASM_NOP, WASM_NOP));
- VERIFY(WASM_LOOP(2, WASM_NOP, WASM_NOP));
- VERIFY(WASM_LOOP(3, WASM_NOP, WASM_NOP, WASM_NOP));
+ VERIFY(B2(WASM_NOP, WASM_NOP));
+ VERIFY(B3(WASM_NOP, WASM_NOP, WASM_NOP));
+ VERIFY(WASM_LOOP(WASM_NOP, WASM_NOP));
+ VERIFY(WASM_LOOP(WASM_NOP, WASM_NOP, WASM_NOP));
}
TEST_F(AstDecoderTest, MacrosNestedBlocks) {
- VERIFY(WASM_BLOCK(2, WASM_NOP, WASM_BLOCK(2, WASM_NOP, WASM_NOP)));
- VERIFY(WASM_BLOCK(3, WASM_NOP, // --
- WASM_BLOCK(2, WASM_NOP, WASM_NOP), // --
- WASM_BLOCK(2, WASM_NOP, WASM_NOP))); // --
- VERIFY(WASM_BLOCK(1, WASM_BLOCK(1, WASM_BLOCK(2, WASM_NOP, WASM_NOP))));
+ VERIFY(B2(WASM_NOP, B2(WASM_NOP, WASM_NOP)));
+ VERIFY(B3(WASM_NOP, // --
+ B2(WASM_NOP, WASM_NOP), // --
+ B2(WASM_NOP, WASM_NOP))); // --
+ VERIFY(B1(B1(B2(WASM_NOP, WASM_NOP))));
}
TEST_F(AstDecoderTest, MultipleReturn) {
static LocalType kIntTypes5[] = {kAstI32, kAstI32, kAstI32, kAstI32, kAstI32};
FunctionSig sig_ii_v(2, 0, kIntTypes5);
- EXPECT_VERIFIES_INLINE(&sig_ii_v, WASM_RETURN(WASM_ZERO, WASM_ONE));
- EXPECT_FAILURE_INLINE(&sig_ii_v, WASM_RETURN(WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(&sig_ii_v, WASM_RETURNN(2, WASM_ZERO, WASM_ONE));
+ EXPECT_FAILURE_INLINE(&sig_ii_v, WASM_RETURNN(1, WASM_ZERO));
FunctionSig sig_iii_v(3, 0, kIntTypes5);
EXPECT_VERIFIES_INLINE(&sig_iii_v,
- WASM_RETURN(WASM_ZERO, WASM_ONE, WASM_I8(44)));
- EXPECT_FAILURE_INLINE(&sig_iii_v, WASM_RETURN(WASM_ZERO, WASM_ONE));
+ WASM_RETURNN(3, WASM_ZERO, WASM_ONE, WASM_I8(44)));
+ EXPECT_FAILURE_INLINE(&sig_iii_v, WASM_RETURNN(2, WASM_ZERO, WASM_ONE));
}
TEST_F(AstDecoderTest, MultipleReturn_fallthru) {
@@ -887,78 +1060,57 @@ TEST_F(AstDecoderTest, MemorySize) {
}
TEST_F(AstDecoderTest, GrowMemory) {
- byte code[] = {kExprGrowMemory, kExprGetLocal, 0};
+ byte code[] = {WASM_UNOP(kExprGrowMemory, WASM_GET_LOCAL(0))};
EXPECT_VERIFIES(sigs.i_i(), code);
EXPECT_FAILURE(sigs.i_d(), code);
}
TEST_F(AstDecoderTest, LoadMemOffset) {
for (int offset = 0; offset < 128; offset += 7) {
- byte code[] = {kExprI32LoadMem, ZERO_ALIGNMENT, static_cast<byte>(offset),
- kExprI8Const, 0};
+ byte code[] = {kExprI8Const, 0, kExprI32LoadMem, ZERO_ALIGNMENT,
+ static_cast<byte>(offset)};
EXPECT_VERIFIES(sigs.i_i(), code);
}
}
TEST_F(AstDecoderTest, StoreMemOffset) {
for (int offset = 0; offset < 128; offset += 7) {
- byte code[] = {
- kExprI32StoreMem, 0, static_cast<byte>(offset), kExprI8Const, 0,
- kExprI8Const, 0};
+ byte code[] = {WASM_STORE_MEM_OFFSET(MachineType::Int32(), offset,
+ WASM_ZERO, WASM_ZERO)};
EXPECT_VERIFIES(sigs.i_i(), code);
}
}
-TEST_F(AstDecoderTest, LoadMemOffset_varint) {
- byte code1[] = {kExprI32LoadMem, ZERO_ALIGNMENT, ZERO_OFFSET, kExprI8Const,
- 0};
- byte code2[] = {kExprI32LoadMem, ZERO_ALIGNMENT, 0x80, 1, kExprI8Const, 0};
- byte code3[] = {
- kExprI32LoadMem, ZERO_ALIGNMENT, 0x81, 0x82, 5, kExprI8Const, 0};
- byte code4[] = {
- kExprI32LoadMem, ZERO_ALIGNMENT, 0x83, 0x84, 0x85, 7, kExprI8Const, 0};
+#define BYTE0(x) ((x)&0x7F)
+#define BYTE1(x) ((x >> 7) & 0x7F)
+#define BYTE2(x) ((x >> 14) & 0x7F)
+#define BYTE3(x) ((x >> 21) & 0x7F)
- EXPECT_VERIFIES(sigs.i_i(), code1);
- EXPECT_VERIFIES(sigs.i_i(), code2);
- EXPECT_VERIFIES(sigs.i_i(), code3);
- EXPECT_VERIFIES(sigs.i_i(), code4);
+#define VARINT1(x) BYTE0(x)
+#define VARINT2(x) BYTE0(x) | 0x80, BYTE1(x)
+#define VARINT3(x) BYTE0(x) | 0x80, BYTE1(x) | 0x80, BYTE2(x)
+#define VARINT4(x) BYTE0(x) | 0x80, BYTE1(x) | 0x80, BYTE2(x) | 0x80, BYTE3(x)
+
+TEST_F(AstDecoderTest, LoadMemOffset_varint) {
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT,
+ VARINT1(0x45));
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT,
+ VARINT2(0x3999));
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT,
+ VARINT3(0x344445));
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_ZERO, kExprI32LoadMem, ZERO_ALIGNMENT,
+ VARINT4(0x36666667));
}
TEST_F(AstDecoderTest, StoreMemOffset_varint) {
- byte code1[] = {
- kExprI32StoreMem, ZERO_ALIGNMENT, 0, kExprI8Const, 0, kExprI8Const, 0};
- byte code2[] = {kExprI32StoreMem,
- ZERO_ALIGNMENT,
- 0x80,
- 1,
- kExprI8Const,
- 0,
- kExprI8Const,
- 0};
- byte code3[] = {kExprI32StoreMem,
- ZERO_ALIGNMENT,
- 0x81,
- 0x82,
- 5,
- kExprI8Const,
- 0,
- kExprI8Const,
- 0};
- byte code4[] = {kExprI32StoreMem,
- ZERO_ALIGNMENT,
- 0x83,
- 0x84,
- 0x85,
- 7,
- kExprI8Const,
- 0,
- kExprI8Const,
- 0};
-
- EXPECT_VERIFIES(sigs.i_i(), code1);
- EXPECT_VERIFIES(sigs.i_i(), code2);
- EXPECT_VERIFIES(sigs.i_i(), code3);
- EXPECT_VERIFIES(sigs.i_i(), code4);
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_ZERO, WASM_ZERO, kExprI32StoreMem,
+ ZERO_ALIGNMENT, VARINT1(0x33));
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_ZERO, WASM_ZERO, kExprI32StoreMem,
+ ZERO_ALIGNMENT, VARINT2(0x1111));
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_ZERO, WASM_ZERO, kExprI32StoreMem,
+ ZERO_ALIGNMENT, VARINT3(0x222222));
+ EXPECT_VERIFIES_INLINE(sigs.i_i(), WASM_ZERO, WASM_ZERO, kExprI32StoreMem,
+ ZERO_ALIGNMENT, VARINT4(0x44444444));
}
TEST_F(AstDecoderTest, AllLoadMemCombinations) {
@@ -966,9 +1118,7 @@ TEST_F(AstDecoderTest, AllLoadMemCombinations) {
LocalType local_type = kLocalTypes[i];
for (size_t j = 0; j < arraysize(machineTypes); j++) {
MachineType mem_type = machineTypes[j];
- byte code[] = {
- static_cast<byte>(WasmOpcodes::LoadStoreOpcodeOf(mem_type, false)),
- ZERO_ALIGNMENT, ZERO_OFFSET, kExprI8Const, 0};
+ byte code[] = {WASM_LOAD_MEM(mem_type, WASM_ZERO)};
FunctionSig sig(1, 0, &local_type);
if (local_type == WasmOpcodes::LocalTypeFor(mem_type)) {
EXPECT_VERIFIES(&sig, code);
@@ -984,14 +1134,7 @@ TEST_F(AstDecoderTest, AllStoreMemCombinations) {
LocalType local_type = kLocalTypes[i];
for (size_t j = 0; j < arraysize(machineTypes); j++) {
MachineType mem_type = machineTypes[j];
- byte code[] = {
- static_cast<byte>(WasmOpcodes::LoadStoreOpcodeOf(mem_type, true)),
- ZERO_ALIGNMENT,
- ZERO_OFFSET,
- kExprI8Const,
- 0,
- kExprGetLocal,
- 0};
+ byte code[] = {WASM_STORE_MEM(mem_type, WASM_ZERO, WASM_GET_LOCAL(0))};
FunctionSig sig(0, 1, &local_type);
if (local_type == WasmOpcodes::LocalTypeFor(mem_type)) {
EXPECT_VERIFIES(&sig, code);
@@ -1002,7 +1145,6 @@ TEST_F(AstDecoderTest, AllStoreMemCombinations) {
}
}
-
namespace {
// A helper for tests that require a module environment for functions and
// globals.
@@ -1011,10 +1153,9 @@ class TestModuleEnv : public ModuleEnv {
TestModuleEnv() {
instance = nullptr;
module = &mod;
- linker = nullptr;
}
- byte AddGlobal(MachineType mem_type) {
- mod.globals.push_back({0, 0, mem_type, 0, false});
+ byte AddGlobal(LocalType type) {
+ mod.globals.push_back({0, 0, type, 0, false});
CHECK(mod.globals.size() <= 127);
return static_cast<byte>(mod.globals.size() - 1);
}
@@ -1024,12 +1165,23 @@ class TestModuleEnv : public ModuleEnv {
return static_cast<byte>(mod.signatures.size() - 1);
}
byte AddFunction(FunctionSig* sig) {
- mod.functions.push_back({sig, 0, 0, 0, 0, 0, 0, 0, false, false});
+ mod.functions.push_back({sig, // sig
+ 0, // func_index
+ 0, // sig_index
+ 0, // name_offset
+ 0, // name_length
+ 0, // code_start_offset
+ 0}); // code_end_offset
CHECK(mod.functions.size() <= 127);
return static_cast<byte>(mod.functions.size() - 1);
}
byte AddImport(FunctionSig* sig) {
- mod.import_table.push_back({sig, 0, 0});
+ mod.import_table.push_back({sig, // sig
+ 0, // sig_index
+ 0, // module_name_offset
+ 0, // module_name_length
+ 0, // function_name_offset
+ 0}); // function_name_length
CHECK(mod.import_table.size() <= 127);
return static_cast<byte>(mod.import_table.size() - 1);
}
@@ -1048,9 +1200,9 @@ TEST_F(AstDecoderTest, SimpleCalls) {
module_env.AddFunction(sigs.i_i());
module_env.AddFunction(sigs.i_ii());
- EXPECT_VERIFIES_INLINE(sig, WASM_CALL_FUNCTION(0));
- EXPECT_VERIFIES_INLINE(sig, WASM_CALL_FUNCTION(1, WASM_I8(27)));
- EXPECT_VERIFIES_INLINE(sig, WASM_CALL_FUNCTION(2, WASM_I8(37), WASM_I8(77)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_CALL_FUNCTION0(0));
+ EXPECT_VERIFIES_INLINE(sig, WASM_CALL_FUNCTION1(1, WASM_I8(27)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_CALL_FUNCTION2(2, WASM_I8(37), WASM_I8(77)));
}
TEST_F(AstDecoderTest, CallsWithTooFewArguments) {
@@ -1063,35 +1215,8 @@ TEST_F(AstDecoderTest, CallsWithTooFewArguments) {
module_env.AddFunction(sigs.f_ff());
EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION0(0));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(1, WASM_ZERO));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(2, WASM_GET_LOCAL(0)));
-}
-
-TEST_F(AstDecoderTest, CallsWithSpilloverArgs) {
- static LocalType a_i_ff[] = {kAstI32, kAstF32, kAstF32};
- FunctionSig sig_i_ff(1, 2, a_i_ff);
-
- TestModuleEnv module_env;
- module = &module_env;
-
- module_env.AddFunction(&sig_i_ff);
-
- EXPECT_VERIFIES_INLINE(sigs.i_i(),
- WASM_CALL_FUNCTION(0, WASM_F32(0.1), WASM_F32(0.1)));
-
- EXPECT_VERIFIES_INLINE(sigs.i_ff(),
- WASM_CALL_FUNCTION(0, WASM_F32(0.1), WASM_F32(0.1)));
-
- EXPECT_FAILURE_INLINE(sigs.f_ff(),
- WASM_CALL_FUNCTION(0, WASM_F32(0.1), WASM_F32(0.1)));
-
- EXPECT_FAILURE_INLINE(
- sigs.i_i(),
- WASM_CALL_FUNCTION(0, WASM_F32(0.1), WASM_F32(0.1), WASM_F32(0.2)));
-
- EXPECT_VERIFIES_INLINE(
- sigs.f_ff(),
- WASM_CALL_FUNCTION(0, WASM_F32(0.1), WASM_F32(0.1), WASM_F32(11)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(1, WASM_ZERO));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(2, WASM_GET_LOCAL(0)));
}
TEST_F(AstDecoderTest, CallsWithMismatchedSigs2) {
@@ -1101,9 +1226,9 @@ TEST_F(AstDecoderTest, CallsWithMismatchedSigs2) {
module_env.AddFunction(sigs.i_i());
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(0, WASM_I64V_1(17)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(0, WASM_F32(17.1)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(0, WASM_F64(17.1)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(0, WASM_I64V_1(17)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(0, WASM_F32(17.1)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(0, WASM_F64(17.1)));
}
TEST_F(AstDecoderTest, CallsWithMismatchedSigs3) {
@@ -1113,15 +1238,15 @@ TEST_F(AstDecoderTest, CallsWithMismatchedSigs3) {
module_env.AddFunction(sigs.i_f());
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(0, WASM_I8(17)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(0, WASM_I64V_1(27)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(0, WASM_F64(37.2)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(0, WASM_I8(17)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(0, WASM_I64V_1(27)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(0, WASM_F64(37.2)));
module_env.AddFunction(sigs.i_d());
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(1, WASM_I8(16)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(1, WASM_I64V_1(16)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION(1, WASM_F32(17.6)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(1, WASM_I8(16)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(1, WASM_I64V_1(16)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_FUNCTION1(1, WASM_F32(17.6)));
}
TEST_F(AstDecoderTest, SimpleIndirectCalls) {
@@ -1134,9 +1259,9 @@ TEST_F(AstDecoderTest, SimpleIndirectCalls) {
byte f2 = module_env.AddSignature(sigs.i_ii());
EXPECT_VERIFIES_INLINE(sig, WASM_CALL_INDIRECT0(f0, WASM_ZERO));
- EXPECT_VERIFIES_INLINE(sig, WASM_CALL_INDIRECT(f1, WASM_ZERO, WASM_I8(22)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_CALL_INDIRECT1(f1, WASM_ZERO, WASM_I8(22)));
EXPECT_VERIFIES_INLINE(
- sig, WASM_CALL_INDIRECT(f2, WASM_ZERO, WASM_I8(32), WASM_I8(72)));
+ sig, WASM_CALL_INDIRECT2(f2, WASM_ZERO, WASM_I8(32), WASM_I8(72)));
}
TEST_F(AstDecoderTest, IndirectCallsOutOfBounds) {
@@ -1148,11 +1273,11 @@ TEST_F(AstDecoderTest, IndirectCallsOutOfBounds) {
module_env.AddSignature(sigs.i_v());
EXPECT_VERIFIES_INLINE(sig, WASM_CALL_INDIRECT0(0, WASM_ZERO));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT(1, WASM_ZERO, WASM_I8(22)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT1(1, WASM_ZERO, WASM_I8(22)));
module_env.AddSignature(sigs.i_i());
- EXPECT_VERIFIES_INLINE(sig, WASM_CALL_INDIRECT(1, WASM_ZERO, WASM_I8(27)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_CALL_INDIRECT1(1, WASM_ZERO, WASM_I8(27)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT(2, WASM_ZERO, WASM_I8(27)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT1(2, WASM_ZERO, WASM_I8(27)));
}
TEST_F(AstDecoderTest, IndirectCallsWithMismatchedSigs3) {
@@ -1162,10 +1287,11 @@ TEST_F(AstDecoderTest, IndirectCallsWithMismatchedSigs3) {
byte f0 = module_env.AddFunction(sigs.i_f());
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT(f0, WASM_ZERO, WASM_I8(17)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT1(f0, WASM_ZERO, WASM_I8(17)));
+ EXPECT_FAILURE_INLINE(sig,
+ WASM_CALL_INDIRECT1(f0, WASM_ZERO, WASM_I64V_1(27)));
EXPECT_FAILURE_INLINE(sig,
- WASM_CALL_INDIRECT(f0, WASM_ZERO, WASM_I64V_1(27)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT(f0, WASM_ZERO, WASM_F64(37.2)));
+ WASM_CALL_INDIRECT1(f0, WASM_ZERO, WASM_F64(37.2)));
EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT0(f0, WASM_I8(17)));
EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT0(f0, WASM_I64V_1(27)));
@@ -1173,10 +1299,11 @@ TEST_F(AstDecoderTest, IndirectCallsWithMismatchedSigs3) {
byte f1 = module_env.AddFunction(sigs.i_d());
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT(f1, WASM_ZERO, WASM_I8(16)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT1(f1, WASM_ZERO, WASM_I8(16)));
+ EXPECT_FAILURE_INLINE(sig,
+ WASM_CALL_INDIRECT1(f1, WASM_ZERO, WASM_I64V_1(16)));
EXPECT_FAILURE_INLINE(sig,
- WASM_CALL_INDIRECT(f1, WASM_ZERO, WASM_I64V_1(16)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_INDIRECT(f1, WASM_ZERO, WASM_F32(17.6)));
+ WASM_CALL_INDIRECT1(f1, WASM_ZERO, WASM_F32(17.6)));
}
TEST_F(AstDecoderTest, SimpleImportCalls) {
@@ -1189,8 +1316,8 @@ TEST_F(AstDecoderTest, SimpleImportCalls) {
byte f2 = module_env.AddImport(sigs.i_ii());
EXPECT_VERIFIES_INLINE(sig, WASM_CALL_IMPORT0(f0));
- EXPECT_VERIFIES_INLINE(sig, WASM_CALL_IMPORT(f1, WASM_I8(22)));
- EXPECT_VERIFIES_INLINE(sig, WASM_CALL_IMPORT(f2, WASM_I8(32), WASM_I8(72)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_CALL_IMPORT1(f1, WASM_I8(22)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_CALL_IMPORT2(f2, WASM_I8(32), WASM_I8(72)));
}
TEST_F(AstDecoderTest, ImportCallsWithMismatchedSigs3) {
@@ -1201,16 +1328,16 @@ TEST_F(AstDecoderTest, ImportCallsWithMismatchedSigs3) {
byte f0 = module_env.AddImport(sigs.i_f());
EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT0(f0));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT(f0, WASM_I8(17)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT(f0, WASM_I64V_1(27)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT(f0, WASM_F64(37.2)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT1(f0, WASM_I8(17)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT1(f0, WASM_I64V_1(27)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT1(f0, WASM_F64(37.2)));
byte f1 = module_env.AddImport(sigs.i_d());
EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT0(f1));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT(f1, WASM_I8(16)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT(f1, WASM_I64V_1(16)));
- EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT(f1, WASM_F32(17.6)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT1(f1, WASM_I8(16)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT1(f1, WASM_I64V_1(16)));
+ EXPECT_FAILURE_INLINE(sig, WASM_CALL_IMPORT1(f1, WASM_F32(17.6)));
}
TEST_F(AstDecoderTest, Int32Globals) {
@@ -1218,26 +1345,10 @@ TEST_F(AstDecoderTest, Int32Globals) {
TestModuleEnv module_env;
module = &module_env;
- module_env.AddGlobal(MachineType::Int8());
- module_env.AddGlobal(MachineType::Uint8());
- module_env.AddGlobal(MachineType::Int16());
- module_env.AddGlobal(MachineType::Uint16());
- module_env.AddGlobal(MachineType::Int32());
- module_env.AddGlobal(MachineType::Uint32());
+ module_env.AddGlobal(kAstI32);
- EXPECT_VERIFIES_INLINE(sig, WASM_LOAD_GLOBAL(0));
- EXPECT_VERIFIES_INLINE(sig, WASM_LOAD_GLOBAL(1));
- EXPECT_VERIFIES_INLINE(sig, WASM_LOAD_GLOBAL(2));
- EXPECT_VERIFIES_INLINE(sig, WASM_LOAD_GLOBAL(3));
- EXPECT_VERIFIES_INLINE(sig, WASM_LOAD_GLOBAL(4));
- EXPECT_VERIFIES_INLINE(sig, WASM_LOAD_GLOBAL(5));
-
- EXPECT_VERIFIES_INLINE(sig, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
- EXPECT_VERIFIES_INLINE(sig, WASM_STORE_GLOBAL(1, WASM_GET_LOCAL(0)));
- EXPECT_VERIFIES_INLINE(sig, WASM_STORE_GLOBAL(2, WASM_GET_LOCAL(0)));
- EXPECT_VERIFIES_INLINE(sig, WASM_STORE_GLOBAL(3, WASM_GET_LOCAL(0)));
- EXPECT_VERIFIES_INLINE(sig, WASM_STORE_GLOBAL(4, WASM_GET_LOCAL(0)));
- EXPECT_VERIFIES_INLINE(sig, WASM_STORE_GLOBAL(5, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_GET_GLOBAL(0));
+ EXPECT_VERIFIES_INLINE(sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)));
}
TEST_F(AstDecoderTest, Int32Globals_fail) {
@@ -1245,20 +1356,20 @@ TEST_F(AstDecoderTest, Int32Globals_fail) {
TestModuleEnv module_env;
module = &module_env;
- module_env.AddGlobal(MachineType::Int64());
- module_env.AddGlobal(MachineType::Uint64());
- module_env.AddGlobal(MachineType::Float32());
- module_env.AddGlobal(MachineType::Float64());
+ module_env.AddGlobal(kAstI64);
+ module_env.AddGlobal(kAstI64);
+ module_env.AddGlobal(kAstF32);
+ module_env.AddGlobal(kAstF64);
- EXPECT_FAILURE_INLINE(sig, WASM_LOAD_GLOBAL(0));
- EXPECT_FAILURE_INLINE(sig, WASM_LOAD_GLOBAL(1));
- EXPECT_FAILURE_INLINE(sig, WASM_LOAD_GLOBAL(2));
- EXPECT_FAILURE_INLINE(sig, WASM_LOAD_GLOBAL(3));
+ EXPECT_FAILURE_INLINE(sig, WASM_GET_GLOBAL(0));
+ EXPECT_FAILURE_INLINE(sig, WASM_GET_GLOBAL(1));
+ EXPECT_FAILURE_INLINE(sig, WASM_GET_GLOBAL(2));
+ EXPECT_FAILURE_INLINE(sig, WASM_GET_GLOBAL(3));
- EXPECT_FAILURE_INLINE(sig, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
- EXPECT_FAILURE_INLINE(sig, WASM_STORE_GLOBAL(1, WASM_GET_LOCAL(0)));
- EXPECT_FAILURE_INLINE(sig, WASM_STORE_GLOBAL(2, WASM_GET_LOCAL(0)));
- EXPECT_FAILURE_INLINE(sig, WASM_STORE_GLOBAL(3, WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_INLINE(sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_INLINE(sig, WASM_SET_GLOBAL(1, WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_INLINE(sig, WASM_SET_GLOBAL(2, WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_INLINE(sig, WASM_SET_GLOBAL(3, WASM_GET_LOCAL(0)));
}
TEST_F(AstDecoderTest, Int64Globals) {
@@ -1266,14 +1377,14 @@ TEST_F(AstDecoderTest, Int64Globals) {
TestModuleEnv module_env;
module = &module_env;
- module_env.AddGlobal(MachineType::Int64());
- module_env.AddGlobal(MachineType::Uint64());
+ module_env.AddGlobal(kAstI64);
+ module_env.AddGlobal(kAstI64);
- EXPECT_VERIFIES_INLINE(sig, WASM_LOAD_GLOBAL(0));
- EXPECT_VERIFIES_INLINE(sig, WASM_LOAD_GLOBAL(1));
+ EXPECT_VERIFIES_INLINE(sig, WASM_GET_GLOBAL(0));
+ EXPECT_VERIFIES_INLINE(sig, WASM_GET_GLOBAL(1));
- EXPECT_VERIFIES_INLINE(sig, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
- EXPECT_VERIFIES_INLINE(sig, WASM_STORE_GLOBAL(1, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_SET_GLOBAL(1, WASM_GET_LOCAL(0)));
}
TEST_F(AstDecoderTest, Float32Globals) {
@@ -1281,10 +1392,10 @@ TEST_F(AstDecoderTest, Float32Globals) {
TestModuleEnv module_env;
module = &module_env;
- module_env.AddGlobal(MachineType::Float32());
+ module_env.AddGlobal(kAstF32);
- EXPECT_VERIFIES_INLINE(sig, WASM_LOAD_GLOBAL(0));
- EXPECT_VERIFIES_INLINE(sig, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_GET_GLOBAL(0));
+ EXPECT_VERIFIES_INLINE(sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)));
}
TEST_F(AstDecoderTest, Float64Globals) {
@@ -1292,54 +1403,70 @@ TEST_F(AstDecoderTest, Float64Globals) {
TestModuleEnv module_env;
module = &module_env;
- module_env.AddGlobal(MachineType::Float64());
+ module_env.AddGlobal(kAstF64);
- EXPECT_VERIFIES_INLINE(sig, WASM_LOAD_GLOBAL(0));
- EXPECT_VERIFIES_INLINE(sig, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(sig, WASM_GET_GLOBAL(0));
+ EXPECT_VERIFIES_INLINE(sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)));
}
-TEST_F(AstDecoderTest, AllLoadGlobalCombinations) {
+TEST_F(AstDecoderTest, AllGetGlobalCombinations) {
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
LocalType local_type = kLocalTypes[i];
- for (size_t j = 0; j < arraysize(machineTypes); j++) {
- MachineType mem_type = machineTypes[j];
+ for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
+ LocalType global_type = kLocalTypes[j];
FunctionSig sig(1, 0, &local_type);
TestModuleEnv module_env;
module = &module_env;
- module_env.AddGlobal(mem_type);
- if (local_type == WasmOpcodes::LocalTypeFor(mem_type)) {
- EXPECT_VERIFIES_INLINE(&sig, WASM_LOAD_GLOBAL(0));
+ module_env.AddGlobal(global_type);
+ if (local_type == global_type) {
+ EXPECT_VERIFIES_INLINE(&sig, WASM_GET_GLOBAL(0));
} else {
- EXPECT_FAILURE_INLINE(&sig, WASM_LOAD_GLOBAL(0));
+ EXPECT_FAILURE_INLINE(&sig, WASM_GET_GLOBAL(0));
}
}
}
}
-TEST_F(AstDecoderTest, AllStoreGlobalCombinations) {
+TEST_F(AstDecoderTest, AllSetGlobalCombinations) {
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
LocalType local_type = kLocalTypes[i];
- for (size_t j = 0; j < arraysize(machineTypes); j++) {
- MachineType mem_type = machineTypes[j];
+ for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
+ LocalType global_type = kLocalTypes[j];
FunctionSig sig(0, 1, &local_type);
TestModuleEnv module_env;
module = &module_env;
- module_env.AddGlobal(mem_type);
- if (local_type == WasmOpcodes::LocalTypeFor(mem_type)) {
- EXPECT_VERIFIES_INLINE(&sig, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
+ module_env.AddGlobal(global_type);
+ if (local_type == global_type) {
+ EXPECT_VERIFIES_INLINE(&sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)));
} else {
- EXPECT_FAILURE_INLINE(&sig, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_INLINE(&sig, WASM_SET_GLOBAL(0, WASM_GET_LOCAL(0)));
}
}
}
}
+TEST_F(AstDecoderTest, BreakEnd) {
+ EXPECT_VERIFIES_INLINE(sigs.i_i(),
+ B1(WASM_I32_ADD(WASM_BRV(0, WASM_ZERO), WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(sigs.i_i(),
+ B1(WASM_I32_ADD(WASM_ZERO, WASM_BRV(0, WASM_ZERO))));
+}
+
+TEST_F(AstDecoderTest, BreakIfBinop) {
+ EXPECT_FAILURE_INLINE(
+ sigs.i_i(), WASM_BLOCK(WASM_I32_ADD(WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO),
+ WASM_ZERO)));
+ EXPECT_FAILURE_INLINE(sigs.i_i(),
+ WASM_BLOCK(WASM_I32_ADD(
+ WASM_ZERO, WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO))));
+}
+
TEST_F(AstDecoderTest, BreakNesting1) {
for (int i = 0; i < 5; i++) {
// (block[2] (loop[2] (if (get p) break[N]) (set p 1)) p)
byte code[] = {WASM_BLOCK(
- 2, WASM_LOOP(2, WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(i, WASM_ZERO)),
- WASM_SET_LOCAL(0, WASM_I8(1))),
+ WASM_LOOP(WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(i + 1, WASM_ZERO)),
+ WASM_SET_LOCAL(0, WASM_I8(1))),
WASM_GET_LOCAL(0))};
if (i < 3) {
EXPECT_VERIFIES(sigs.i_i(), code);
@@ -1353,8 +1480,8 @@ TEST_F(AstDecoderTest, BreakNesting2) {
AddLocals(kAstI32, 1);
for (int i = 0; i < 5; i++) {
// (block[2] (loop[2] (if 0 break[N]) (set p 1)) (return p)) (11)
- byte code[] = {WASM_BLOCK(1, WASM_LOOP(2, WASM_IF(WASM_ZERO, WASM_BREAK(i)),
- WASM_SET_LOCAL(0, WASM_I8(1)))),
+ byte code[] = {B1(WASM_LOOP(WASM_IF(WASM_ZERO, WASM_BREAK(i + 1)),
+ WASM_SET_LOCAL(0, WASM_I8(1)))),
WASM_I8(11)};
if (i < 2) {
EXPECT_VERIFIES(sigs.v_v(), code);
@@ -1367,8 +1494,8 @@ TEST_F(AstDecoderTest, BreakNesting2) {
TEST_F(AstDecoderTest, BreakNesting3) {
for (int i = 0; i < 5; i++) {
// (block[1] (loop[1] (block[1] (if 0 break[N])
- byte code[] = {WASM_BLOCK(
- 1, WASM_LOOP(1, WASM_BLOCK(1, WASM_IF(WASM_ZERO, WASM_BREAK(i)))))};
+ byte code[] = {
+ WASM_BLOCK(WASM_LOOP(B1(WASM_IF(WASM_ZERO, WASM_BREAK(i + 1)))))};
if (i < 3) {
EXPECT_VERIFIES(sigs.v_v(), code);
} else {
@@ -1378,41 +1505,42 @@ TEST_F(AstDecoderTest, BreakNesting3) {
}
TEST_F(AstDecoderTest, BreaksWithMultipleTypes) {
- EXPECT_FAILURE_INLINE(
- sigs.i_i(),
- WASM_BLOCK(2, WASM_BRV_IF_ZERO(0, WASM_I8(7)), WASM_F32(7.7)));
-
- EXPECT_FAILURE_INLINE(sigs.i_i(),
- WASM_BLOCK(2, WASM_BRV_IF_ZERO(0, WASM_I8(7)),
- WASM_BRV_IF_ZERO(0, WASM_F32(7.7))));
- EXPECT_FAILURE_INLINE(sigs.i_i(),
- WASM_BLOCK(3, WASM_BRV_IF_ZERO(0, WASM_I8(8)),
- WASM_BRV_IF_ZERO(0, WASM_I8(0)),
- WASM_BRV_IF_ZERO(0, WASM_F32(7.7))));
EXPECT_FAILURE_INLINE(sigs.i_i(),
- WASM_BLOCK(3, WASM_BRV_IF_ZERO(0, WASM_I8(9)),
- WASM_BRV_IF_ZERO(0, WASM_F32(7.7)),
- WASM_BRV_IF_ZERO(0, WASM_I8(11))));
+ B2(WASM_BRV_IF_ZERO(0, WASM_I8(7)), WASM_F32(7.7)));
+
+ EXPECT_FAILURE_INLINE(sigs.i_i(), B2(WASM_BRV_IF_ZERO(0, WASM_I8(7)),
+ WASM_BRV_IF_ZERO(0, WASM_F32(7.7))));
+ EXPECT_FAILURE_INLINE(sigs.i_i(), B3(WASM_BRV_IF_ZERO(0, WASM_I8(8)),
+ WASM_BRV_IF_ZERO(0, WASM_I8(0)),
+ WASM_BRV_IF_ZERO(0, WASM_F32(7.7))));
+ EXPECT_FAILURE_INLINE(sigs.i_i(), B3(WASM_BRV_IF_ZERO(0, WASM_I8(9)),
+ WASM_BRV_IF_ZERO(0, WASM_F32(7.7)),
+ WASM_BRV_IF_ZERO(0, WASM_I8(11))));
}
TEST_F(AstDecoderTest, BreakNesting_6_levels) {
for (int mask = 0; mask < 64; mask++) {
for (int i = 0; i < 14; i++) {
byte code[] = {
- kExprBlock, 1, // --
- kExprBlock, 1, // --
- kExprBlock, 1, // --
- kExprBlock, 1, // --
- kExprBlock, 1, // --
- kExprBlock, 1, // --
- kExprBr, static_cast<byte>(i),
- kExprNop // --
+ kExprBlock, // --
+ kExprBlock, // --
+ kExprBlock, // --
+ kExprBlock, // --
+ kExprBlock, // --
+ kExprBlock, // --
+ kExprBr, ARITY_0, static_cast<byte>(i), // --
+ kExprEnd, // --
+ kExprEnd, // --
+ kExprEnd, // --
+ kExprEnd, // --
+ kExprEnd, // --
+ kExprEnd // --
};
int depth = 6;
for (int l = 0; l < 6; l++) {
if (mask & (1 << l)) {
- code[l * 2] = kExprLoop;
+ code[l] = kExprLoop;
depth++;
}
}
@@ -1432,29 +1560,26 @@ TEST_F(AstDecoderTest, ExprBreak_TypeCheck) {
FunctionSig* sig = sigarray[i];
// unify X and X => OK
EXPECT_VERIFIES_INLINE(
- sig, WASM_BLOCK(2, WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
- WASM_GET_LOCAL(0)));
+ sig, B2(WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
+ WASM_GET_LOCAL(0)));
}
// unify i32 and f32 => fail
EXPECT_FAILURE_INLINE(
sigs.i_i(),
- WASM_BLOCK(2, WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_ZERO)), WASM_F32(1.2)));
+ B2(WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_ZERO)), WASM_F32(1.2)));
// unify f64 and f64 => OK
EXPECT_VERIFIES_INLINE(
sigs.d_dd(),
- WASM_BLOCK(2, WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
- WASM_F64(1.2)));
+ B2(WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))), WASM_F64(1.2)));
}
TEST_F(AstDecoderTest, ExprBreak_TypeCheckAll) {
- byte code1[] = {WASM_BLOCK(2,
- WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
+ byte code1[] = {WASM_BLOCK(WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
WASM_GET_LOCAL(1))};
- byte code2[] = {
- WASM_BLOCK(2, WASM_IF(WASM_ZERO, WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(0))),
- WASM_GET_LOCAL(1))};
+ byte code2[] = {B2(WASM_IF(WASM_ZERO, WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(0))),
+ WASM_GET_LOCAL(1))};
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
@@ -1479,14 +1604,12 @@ TEST_F(AstDecoderTest, ExprBr_Unify) {
LocalType storage[] = {kAstI32, kAstI32, type};
FunctionSig sig(1, 2, storage);
- byte code1[] = {
- WASM_BLOCK(2, WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(which))),
- WASM_GET_LOCAL(which ^ 1))};
+ byte code1[] = {B2(WASM_IF(WASM_ZERO, WASM_BRV(1, WASM_GET_LOCAL(which))),
+ WASM_GET_LOCAL(which ^ 1))};
byte code2[] = {
- WASM_LOOP(2, WASM_IF(WASM_ZERO, WASM_BRV(1, WASM_GET_LOCAL(which))),
+ WASM_LOOP(WASM_IF(WASM_ZERO, WASM_BRV(2, WASM_GET_LOCAL(which))),
WASM_GET_LOCAL(which ^ 1))};
-
if (type == kAstI32) {
EXPECT_VERIFIES(&sig, code1);
EXPECT_VERIFIES(&sig, code2);
@@ -1499,8 +1622,7 @@ TEST_F(AstDecoderTest, ExprBr_Unify) {
}
TEST_F(AstDecoderTest, ExprBrIf_cond_type) {
- byte code[] = {
- WASM_BLOCK(1, WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)))};
+ byte code[] = {B1(WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)))};
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
LocalType types[] = {kLocalTypes[i], kLocalTypes[j]};
@@ -1516,9 +1638,8 @@ TEST_F(AstDecoderTest, ExprBrIf_cond_type) {
}
TEST_F(AstDecoderTest, ExprBrIf_val_type) {
- byte code[] = {
- WASM_BLOCK(2, WASM_BRV_IF(0, WASM_GET_LOCAL(1), WASM_GET_LOCAL(2)),
- WASM_GET_LOCAL(0))};
+ byte code[] = {B2(WASM_BRV_IF(0, WASM_GET_LOCAL(1), WASM_GET_LOCAL(2)),
+ WASM_GET_LOCAL(0))};
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
LocalType types[] = {kLocalTypes[i], kLocalTypes[i], kLocalTypes[j],
@@ -1541,9 +1662,9 @@ TEST_F(AstDecoderTest, ExprBrIf_Unify) {
LocalType storage[] = {kAstI32, kAstI32, type};
FunctionSig sig(1, 2, storage);
- byte code1[] = {WASM_BLOCK(2, WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(which)),
- WASM_GET_LOCAL(which ^ 1))};
- byte code2[] = {WASM_LOOP(2, WASM_BRV_IF_ZERO(1, WASM_GET_LOCAL(which)),
+ byte code1[] = {B2(WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(which)),
+ WASM_GET_LOCAL(which ^ 1))};
+ byte code2[] = {WASM_LOOP(WASM_BRV_IF_ZERO(1, WASM_GET_LOCAL(which)),
WASM_GET_LOCAL(which ^ 1))};
if (type == kAstI32) {
@@ -1558,31 +1679,29 @@ TEST_F(AstDecoderTest, ExprBrIf_Unify) {
}
TEST_F(AstDecoderTest, BrTable0) {
- static byte code[] = {kExprBrTable, 0, 0};
+ static byte code[] = {kExprNop, kExprBrTable, 0, 0};
EXPECT_FAILURE(sigs.v_v(), code);
}
TEST_F(AstDecoderTest, BrTable0b) {
- static byte code[] = {kExprBrTable, 0, 0, kExprI32Const, 11};
+ static byte code[] = {kExprNop, kExprI32Const, 11, kExprBrTable, 0, 0};
EXPECT_FAILURE(sigs.v_v(), code);
EXPECT_FAILURE(sigs.i_i(), code);
}
TEST_F(AstDecoderTest, BrTable0c) {
- static byte code[] = {kExprBrTable, 0, 1, 0, 0, kExprI32Const, 11};
+ static byte code[] = {kExprNop, kExprI32Const, 11, kExprBrTable, 0, 1, 0, 0};
EXPECT_FAILURE(sigs.v_v(), code);
EXPECT_FAILURE(sigs.i_i(), code);
}
TEST_F(AstDecoderTest, BrTable1a) {
- static byte code[] = {
- WASM_BLOCK(1, WASM_BR_TABLE(WASM_I8(67), 0, BR_TARGET(0)))};
+ static byte code[] = {B1(WASM_BR_TABLE(WASM_I8(67), 0, BR_TARGET(0)))};
EXPECT_VERIFIES(sigs.v_v(), code);
}
TEST_F(AstDecoderTest, BrTable1b) {
- static byte code[] = {
- WASM_BLOCK(1, WASM_BR_TABLE(WASM_ZERO, 0, BR_TARGET(0)))};
+ static byte code[] = {B1(WASM_BR_TABLE(WASM_ZERO, 0, BR_TARGET(0)))};
EXPECT_VERIFIES(sigs.v_v(), code);
EXPECT_FAILURE(sigs.i_i(), code);
EXPECT_FAILURE(sigs.f_ff(), code);
@@ -1591,20 +1710,18 @@ TEST_F(AstDecoderTest, BrTable1b) {
TEST_F(AstDecoderTest, BrTable2a) {
static byte code[] = {
- WASM_BLOCK(1, WASM_BR_TABLE(WASM_I8(67), 1, BR_TARGET(0), BR_TARGET(0)))};
+ B1(WASM_BR_TABLE(WASM_I8(67), 1, BR_TARGET(0), BR_TARGET(0)))};
EXPECT_VERIFIES(sigs.v_v(), code);
}
TEST_F(AstDecoderTest, BrTable2b) {
static byte code[] = {WASM_BLOCK(
- 1, WASM_BLOCK(
- 1, WASM_BR_TABLE(WASM_I8(67), 1, BR_TARGET(0), BR_TARGET(1))))};
+ WASM_BLOCK(WASM_BR_TABLE(WASM_I8(67), 1, BR_TARGET(0), BR_TARGET(1))))};
EXPECT_VERIFIES(sigs.v_v(), code);
}
TEST_F(AstDecoderTest, BrTable_off_end) {
- static byte code[] = {
- WASM_BLOCK(1, WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(0)))};
+ static byte code[] = {B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(0)))};
for (size_t len = 1; len < sizeof(code); len++) {
Verify(kError, sigs.i_i(), code, code + len);
}
@@ -1612,8 +1729,7 @@ TEST_F(AstDecoderTest, BrTable_off_end) {
TEST_F(AstDecoderTest, BrTable_invalid_br1) {
for (int depth = 0; depth < 4; depth++) {
- byte code[] = {
- WASM_BLOCK(1, WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(depth)))};
+ byte code[] = {B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(depth)))};
if (depth == 0) {
EXPECT_VERIFIES(sigs.v_i(), code);
} else {
@@ -1625,7 +1741,7 @@ TEST_F(AstDecoderTest, BrTable_invalid_br1) {
TEST_F(AstDecoderTest, BrTable_invalid_br2) {
for (int depth = 0; depth < 4; depth++) {
byte code[] = {
- WASM_LOOP(1, WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(depth)))};
+ WASM_LOOP(WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(depth)))};
if (depth <= 1) {
EXPECT_VERIFIES(sigs.v_i(), code);
} else {
@@ -1635,20 +1751,19 @@ TEST_F(AstDecoderTest, BrTable_invalid_br2) {
}
TEST_F(AstDecoderTest, ExprBreakNesting1) {
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_BLOCK(1, WASM_BRV(0, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_BLOCK(1, WASM_BR(0)));
- EXPECT_VERIFIES_INLINE(sigs.v_v(),
- WASM_BLOCK(1, WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_BLOCK(1, WASM_BR_IF(0, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), B1(WASM_BRV(0, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), B1(WASM_BR(0)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), B1(WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), B1(WASM_BR_IF(0, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(1, WASM_BRV(0, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(1, WASM_BR(0)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_BRV(0, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_BR(0)));
EXPECT_VERIFIES_INLINE(sigs.v_v(),
- WASM_LOOP(1, WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(1, WASM_BR_IF(0, WASM_ZERO)));
+ WASM_LOOP(WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_BR_IF(0, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(1, WASM_BRV(1, WASM_ZERO)));
- EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(1, WASM_BR(1)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_BRV(1, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), WASM_LOOP(WASM_BR(1)));
}
TEST_F(AstDecoderTest, Select) {
@@ -1714,6 +1829,89 @@ TEST_F(AstDecoderTest, Select_TypeCheck) {
WASM_SELECT(WASM_F32(9.9), WASM_GET_LOCAL(0), WASM_I64V_1(0)));
}
+TEST_F(AstDecoderTest, Throw) {
+ FLAG_wasm_eh_prototype = true;
+ EXPECT_VERIFIES_INLINE(sigs.v_i(), WASM_GET_LOCAL(0), kExprThrow);
+
+ EXPECT_FAILURE_INLINE(sigs.i_d(), WASM_GET_LOCAL(0), kExprThrow,
+ WASM_I32V(0));
+ EXPECT_FAILURE_INLINE(sigs.i_f(), WASM_GET_LOCAL(0), kExprThrow,
+ WASM_I32V(0));
+ EXPECT_FAILURE_INLINE(sigs.l_l(), WASM_GET_LOCAL(0), kExprThrow,
+ WASM_I64V(0));
+}
+
+#define WASM_CATCH(local) kExprCatch, static_cast<byte>(local)
+TEST_F(AstDecoderTest, TryCatch) {
+ FLAG_wasm_eh_prototype = true;
+ EXPECT_VERIFIES_INLINE(sigs.v_i(), kExprTryCatch, WASM_CATCH(0), kExprEnd);
+
+ // Missing catch.
+ EXPECT_FAILURE_INLINE(sigs.v_v(), kExprTryCatch, kExprEnd);
+
+ // Missing end.
+ EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatch, WASM_CATCH(0));
+
+ // Double catch.
+ EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatch, WASM_CATCH(0), WASM_CATCH(0),
+ kExprEnd);
+
+ // Unexpected finally.
+ EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatch, WASM_CATCH(0), kExprFinally,
+ kExprEnd);
+}
+
+TEST_F(AstDecoderTest, TryFinally) {
+ FLAG_wasm_eh_prototype = true;
+ EXPECT_VERIFIES_INLINE(sigs.v_v(), kExprTryFinally, kExprFinally, kExprEnd);
+
+ // Mising finally.
+ EXPECT_FAILURE_INLINE(sigs.v_v(), kExprTryFinally, kExprEnd);
+
+ // Missing end.
+ EXPECT_FAILURE_INLINE(sigs.v_v(), kExprTryFinally, kExprFinally);
+
+ // Double finally.
+ EXPECT_FAILURE_INLINE(sigs.v_v(), kExprTryFinally, kExprFinally, kExprFinally,
+ kExprEnd);
+
+ // Unexpected catch.
+ EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatch, WASM_CATCH(0), kExprFinally,
+ kExprEnd);
+}
+
+TEST_F(AstDecoderTest, TryCatchFinally) {
+ FLAG_wasm_eh_prototype = true;
+ EXPECT_VERIFIES_INLINE(sigs.v_i(), kExprTryCatchFinally, WASM_CATCH(0),
+ kExprFinally, kExprEnd);
+
+ // Missing catch.
+ EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatchFinally, kExprFinally,
+ kExprEnd);
+
+ // Double catch.
+ EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatchFinally, WASM_CATCH(0),
+ WASM_CATCH(0), kExprFinally, kExprEnd);
+
+ // Missing finally.
+ EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatchFinally, WASM_CATCH(0),
+ kExprEnd);
+
+ // Double finally.
+ EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatchFinally, WASM_CATCH(0),
+ kExprFinally, kExprFinally, kExprEnd);
+
+ // Finally before catch.
+ EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatchFinally, kExprFinally,
+ WASM_CATCH(0), kExprEnd);
+
+ // Missing both try and finally.
+ EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatchFinally, kExprEnd);
+
+ // Missing end.
+ EXPECT_FAILURE_INLINE(sigs.v_i(), kExprTryCatchFinally, WASM_CATCH(0),
+ kExprFinally);
+}
class WasmOpcodeLengthTest : public TestWithZone {
public:
@@ -1734,33 +1932,38 @@ class WasmOpcodeLengthTest : public TestWithZone {
TEST_F(WasmOpcodeLengthTest, Statements) {
EXPECT_LENGTH(1, kExprNop);
- EXPECT_LENGTH(2, kExprBlock);
- EXPECT_LENGTH(2, kExprLoop);
+ EXPECT_LENGTH(1, kExprBlock);
+ EXPECT_LENGTH(1, kExprLoop);
EXPECT_LENGTH(1, kExprIf);
- EXPECT_LENGTH(1, kExprIfElse);
+ EXPECT_LENGTH(1, kExprElse);
+ EXPECT_LENGTH(1, kExprEnd);
EXPECT_LENGTH(1, kExprSelect);
- EXPECT_LENGTH(2, kExprBr);
- EXPECT_LENGTH(2, kExprBrIf);
+ EXPECT_LENGTH(3, kExprBr);
+ EXPECT_LENGTH(3, kExprBrIf);
+ EXPECT_LENGTH(1, kExprThrow);
+ EXPECT_LENGTH(1, kExprTryCatch);
+ EXPECT_LENGTH(1, kExprTryFinally);
+ EXPECT_LENGTH(1, kExprTryCatchFinally);
+ EXPECT_LENGTH(2, kExprCatch);
+ EXPECT_LENGTH(1, kExprFinally);
}
-
TEST_F(WasmOpcodeLengthTest, MiscExpressions) {
EXPECT_LENGTH(2, kExprI8Const);
EXPECT_LENGTH(5, kExprF32Const);
EXPECT_LENGTH(9, kExprF64Const);
EXPECT_LENGTH(2, kExprGetLocal);
EXPECT_LENGTH(2, kExprSetLocal);
- EXPECT_LENGTH(2, kExprLoadGlobal);
- EXPECT_LENGTH(2, kExprStoreGlobal);
- EXPECT_LENGTH(2, kExprCallFunction);
- EXPECT_LENGTH(2, kExprCallImport);
- EXPECT_LENGTH(2, kExprCallIndirect);
+ EXPECT_LENGTH(2, kExprGetGlobal);
+ EXPECT_LENGTH(2, kExprSetGlobal);
+ EXPECT_LENGTH(3, kExprCallFunction);
+ EXPECT_LENGTH(3, kExprCallImport);
+ EXPECT_LENGTH(3, kExprCallIndirect);
EXPECT_LENGTH(1, kExprIf);
- EXPECT_LENGTH(1, kExprIfElse);
- EXPECT_LENGTH(2, kExprBlock);
- EXPECT_LENGTH(2, kExprLoop);
- EXPECT_LENGTH(2, kExprBr);
- EXPECT_LENGTH(2, kExprBrIf);
+ EXPECT_LENGTH(1, kExprBlock);
+ EXPECT_LENGTH(1, kExprLoop);
+ EXPECT_LENGTH(3, kExprBr);
+ EXPECT_LENGTH(3, kExprBrIf);
}
TEST_F(WasmOpcodeLengthTest, I32Const) {
@@ -1784,11 +1987,11 @@ TEST_F(WasmOpcodeLengthTest, I64Const) {
}
TEST_F(WasmOpcodeLengthTest, VariableLength) {
- EXPECT_LENGTH_N(2, kExprLoadGlobal, U32V_1(1));
- EXPECT_LENGTH_N(3, kExprLoadGlobal, U32V_2(33));
- EXPECT_LENGTH_N(4, kExprLoadGlobal, U32V_3(44));
- EXPECT_LENGTH_N(5, kExprLoadGlobal, U32V_4(66));
- EXPECT_LENGTH_N(6, kExprLoadGlobal, U32V_5(77));
+ EXPECT_LENGTH_N(2, kExprGetGlobal, U32V_1(1));
+ EXPECT_LENGTH_N(3, kExprGetGlobal, U32V_2(33));
+ EXPECT_LENGTH_N(4, kExprGetGlobal, U32V_3(44));
+ EXPECT_LENGTH_N(5, kExprGetGlobal, U32V_4(66));
+ EXPECT_LENGTH_N(6, kExprGetGlobal, U32V_5(77));
}
TEST_F(WasmOpcodeLengthTest, LoadsAndStores) {
@@ -1818,13 +2021,11 @@ TEST_F(WasmOpcodeLengthTest, LoadsAndStores) {
EXPECT_LENGTH(3, kExprF64StoreMem);
}
-
TEST_F(WasmOpcodeLengthTest, MiscMemExpressions) {
EXPECT_LENGTH(1, kExprMemorySize);
EXPECT_LENGTH(1, kExprGrowMemory);
}
-
TEST_F(WasmOpcodeLengthTest, SimpleExpressions) {
EXPECT_LENGTH(1, kExprI32Add);
EXPECT_LENGTH(1, kExprI32Sub);
@@ -1946,54 +2147,56 @@ TEST_F(WasmOpcodeLengthTest, SimpleExpressions) {
EXPECT_LENGTH(1, kExprI64ReinterpretF64);
}
-
class WasmOpcodeArityTest : public TestWithZone {
public:
WasmOpcodeArityTest() : TestWithZone() {}
- TestModuleEnv module;
- TestSignatures sigs;
};
-#define EXPECT_ARITY(expected, ...) \
- { \
- static const byte code[] = {__VA_ARGS__}; \
- EXPECT_EQ(expected, OpcodeArity(&module, sig, code, code + sizeof(code))); \
+#define EXPECT_ARITY(expected, ...) \
+ { \
+ static const byte code[] = {__VA_ARGS__}; \
+ EXPECT_EQ(expected, OpcodeArity(code, code + sizeof(code))); \
}
TEST_F(WasmOpcodeArityTest, Control) {
- FunctionSig* sig = sigs.v_v();
EXPECT_ARITY(0, kExprNop);
EXPECT_ARITY(0, kExprBlock, 0);
- EXPECT_ARITY(1, kExprBlock, 1);
- EXPECT_ARITY(2, kExprBlock, 2);
- EXPECT_ARITY(5, kExprBlock, 5);
- EXPECT_ARITY(10, kExprBlock, 10);
+ EXPECT_ARITY(0, kExprBlock, 1);
+ EXPECT_ARITY(0, kExprBlock, 2);
+ EXPECT_ARITY(0, kExprBlock, 5);
+ EXPECT_ARITY(0, kExprBlock, 10);
EXPECT_ARITY(0, kExprLoop, 0);
- EXPECT_ARITY(1, kExprLoop, 1);
- EXPECT_ARITY(2, kExprLoop, 2);
- EXPECT_ARITY(7, kExprLoop, 7);
- EXPECT_ARITY(11, kExprLoop, 11);
+ EXPECT_ARITY(0, kExprLoop, 1);
+ EXPECT_ARITY(0, kExprLoop, 2);
+ EXPECT_ARITY(0, kExprLoop, 7);
+ EXPECT_ARITY(0, kExprLoop, 11);
- EXPECT_ARITY(2, kExprIf);
- EXPECT_ARITY(3, kExprIfElse);
EXPECT_ARITY(3, kExprSelect);
- EXPECT_ARITY(1, kExprBr);
- EXPECT_ARITY(2, kExprBrIf);
+ EXPECT_ARITY(0, kExprBr);
+ EXPECT_ARITY(1, kExprBrIf);
+ EXPECT_ARITY(1, kExprBrTable);
+
+ EXPECT_ARITY(1, kExprBr, ARITY_1);
+ EXPECT_ARITY(2, kExprBrIf, ARITY_1);
+ EXPECT_ARITY(2, kExprBrTable, ARITY_1);
{
- sig = sigs.v_v();
- EXPECT_ARITY(0, kExprReturn);
- sig = sigs.i_i();
- EXPECT_ARITY(1, kExprReturn);
+ EXPECT_ARITY(0, kExprReturn, ARITY_0);
+ EXPECT_ARITY(1, kExprReturn, ARITY_1);
}
-}
+ EXPECT_ARITY(0, kExprThrow);
+ EXPECT_ARITY(0, kExprTryCatch);
+ EXPECT_ARITY(0, kExprTryFinally);
+ EXPECT_ARITY(0, kExprTryCatchFinally);
+ EXPECT_ARITY(1, kExprCatch, 2);
+ EXPECT_ARITY(0, kExprFinally);
+}
TEST_F(WasmOpcodeArityTest, Misc) {
- FunctionSig* sig = sigs.v_v();
EXPECT_ARITY(0, kExprI8Const);
EXPECT_ARITY(0, kExprI32Const);
EXPECT_ARITY(0, kExprF32Const);
@@ -2001,45 +2204,41 @@ TEST_F(WasmOpcodeArityTest, Misc) {
EXPECT_ARITY(0, kExprF64Const);
EXPECT_ARITY(0, kExprGetLocal);
EXPECT_ARITY(1, kExprSetLocal);
- EXPECT_ARITY(0, kExprLoadGlobal);
- EXPECT_ARITY(1, kExprStoreGlobal);
+ EXPECT_ARITY(0, kExprGetGlobal);
+ EXPECT_ARITY(1, kExprSetGlobal);
}
-
TEST_F(WasmOpcodeArityTest, Calls) {
- module.AddFunction(sigs.i_ii());
- module.AddFunction(sigs.i_i());
-
- module.AddSignature(sigs.f_ff());
- module.AddSignature(sigs.i_d());
-
- module.AddImport(sigs.f_ff());
- module.AddImport(sigs.i_d());
-
{
- FunctionSig* sig = sigs.i_ii();
+ EXPECT_ARITY(2, kExprCallFunction, 2, 0);
+ EXPECT_ARITY(2, kExprCallImport, 2, 0);
+ EXPECT_ARITY(3, kExprCallIndirect, 2, 0);
- EXPECT_ARITY(2, kExprCallFunction, 0);
- EXPECT_ARITY(2, kExprCallImport, 0);
- EXPECT_ARITY(3, kExprCallIndirect, 0);
- EXPECT_ARITY(1, kExprBr);
- EXPECT_ARITY(2, kExprBrIf);
+ EXPECT_ARITY(1, kExprBr, ARITY_1);
+ EXPECT_ARITY(2, kExprBrIf, ARITY_1);
+ EXPECT_ARITY(2, kExprBrTable, ARITY_1);
+
+ EXPECT_ARITY(0, kExprBr, ARITY_0);
+ EXPECT_ARITY(1, kExprBrIf, ARITY_0);
+ EXPECT_ARITY(1, kExprBrTable, ARITY_0);
}
{
- FunctionSig* sig = sigs.v_v();
+ EXPECT_ARITY(1, kExprCallFunction, ARITY_1, 1);
+ EXPECT_ARITY(1, kExprCallImport, ARITY_1, 1);
+ EXPECT_ARITY(2, kExprCallIndirect, ARITY_1, 1);
+
+ EXPECT_ARITY(1, kExprBr, ARITY_1);
+ EXPECT_ARITY(2, kExprBrIf, ARITY_1);
+ EXPECT_ARITY(2, kExprBrTable, ARITY_1);
- EXPECT_ARITY(1, kExprCallFunction, 1);
- EXPECT_ARITY(1, kExprCallImport, 1);
- EXPECT_ARITY(2, kExprCallIndirect, 1);
- EXPECT_ARITY(1, kExprBr);
- EXPECT_ARITY(2, kExprBrIf);
+ EXPECT_ARITY(0, kExprBr, ARITY_0);
+ EXPECT_ARITY(1, kExprBrIf, ARITY_0);
+ EXPECT_ARITY(1, kExprBrTable, ARITY_0);
}
}
-
TEST_F(WasmOpcodeArityTest, LoadsAndStores) {
- FunctionSig* sig = sigs.v_v();
EXPECT_ARITY(1, kExprI32LoadMem8S);
EXPECT_ARITY(1, kExprI32LoadMem8U);
EXPECT_ARITY(1, kExprI32LoadMem16S);
@@ -2067,16 +2266,12 @@ TEST_F(WasmOpcodeArityTest, LoadsAndStores) {
EXPECT_ARITY(2, kExprF64StoreMem);
}
-
TEST_F(WasmOpcodeArityTest, MiscMemExpressions) {
- FunctionSig* sig = sigs.v_v();
EXPECT_ARITY(0, kExprMemorySize);
EXPECT_ARITY(1, kExprGrowMemory);
}
-
TEST_F(WasmOpcodeArityTest, SimpleExpressions) {
- FunctionSig* sig = sigs.v_v();
EXPECT_ARITY(2, kExprI32Add);
EXPECT_ARITY(2, kExprI32Sub);
EXPECT_ARITY(2, kExprI32Mul);
@@ -2246,7 +2441,7 @@ TEST_F(LocalDeclDecoderTest, OneLocal) {
LocalTypeMap map = Expand(decls);
EXPECT_EQ(1, map.size());
- EXPECT_EQ(type, map.at(0));
+ EXPECT_EQ(type, map[0]);
}
}
@@ -2297,12 +2492,12 @@ TEST_F(LocalDeclDecoderTest, MixedLocals) {
TEST_F(LocalDeclDecoderTest, UseEncoder) {
const byte* data = nullptr;
const byte* end = nullptr;
- LocalDeclEncoder local_decls;
+ LocalDeclEncoder local_decls(zone());
local_decls.AddLocals(5, kAstF32);
local_decls.AddLocals(1337, kAstI32);
local_decls.AddLocals(212, kAstI64);
- local_decls.Prepend(&data, &end);
+ local_decls.Prepend(zone(), &data, &end);
AstLocalDecls decls(zone());
bool result = DecodeLocalDecls(decls, data, end);
@@ -2314,7 +2509,60 @@ TEST_F(LocalDeclDecoderTest, UseEncoder) {
pos = ExpectRun(map, pos, kAstF32, 5);
pos = ExpectRun(map, pos, kAstI32, 1337);
pos = ExpectRun(map, pos, kAstI64, 212);
- delete[] data;
+}
+
+class BytecodeIteratorTest : public TestWithZone {};
+
+TEST_F(BytecodeIteratorTest, SimpleForeach) {
+ byte code[] = {WASM_IF_ELSE(WASM_ZERO, WASM_ZERO, WASM_ZERO)};
+ BytecodeIterator iter(code, code + sizeof(code));
+ WasmOpcode expected[] = {kExprI8Const, kExprIf, kExprI8Const,
+ kExprElse, kExprI8Const, kExprEnd};
+ size_t pos = 0;
+ for (WasmOpcode opcode : iter) {
+ if (pos >= arraysize(expected)) {
+ EXPECT_TRUE(false);
+ break;
+ }
+ EXPECT_EQ(expected[pos++], opcode);
+ }
+ EXPECT_EQ(arraysize(expected), pos);
+}
+
+TEST_F(BytecodeIteratorTest, ForeachTwice) {
+ byte code[] = {WASM_IF_ELSE(WASM_ZERO, WASM_ZERO, WASM_ZERO)};
+ BytecodeIterator iter(code, code + sizeof(code));
+ int count = 0;
+
+ count = 0;
+ for (WasmOpcode opcode : iter) {
+ USE(opcode);
+ count++;
+ }
+ EXPECT_EQ(6, count);
+
+ count = 0;
+ for (WasmOpcode opcode : iter) {
+ USE(opcode);
+ count++;
+ }
+ EXPECT_EQ(6, count);
+}
+
+TEST_F(BytecodeIteratorTest, WithAstDecls) {
+ byte code[] = {1, 1, kLocalI32, WASM_I8(9), WASM_I8(11)};
+ AstLocalDecls decls(zone());
+ BytecodeIterator iter(code, code + sizeof(code), &decls);
+
+ EXPECT_EQ(3, decls.decls_encoded_size);
+ EXPECT_EQ(3, iter.pc_offset());
+ EXPECT_TRUE(iter.has_next());
+ EXPECT_EQ(kExprI8Const, iter.current());
+ iter.next();
+ EXPECT_TRUE(iter.has_next());
+ EXPECT_EQ(kExprI8Const, iter.current());
+ iter.next();
+ EXPECT_FALSE(iter.has_next());
}
} // namespace wasm
diff --git a/deps/v8/test/unittests/wasm/control-transfer-unittest.cc b/deps/v8/test/unittests/wasm/control-transfer-unittest.cc
new file mode 100644
index 0000000000..2b67f12ef5
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/control-transfer-unittest.cc
@@ -0,0 +1,402 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+#include "src/v8.h"
+
+#include "src/wasm/wasm-interpreter.h"
+#include "src/wasm/wasm-macro-gen.h"
+
+using testing::MakeMatcher;
+using testing::Matcher;
+using testing::MatcherInterface;
+using testing::MatchResultListener;
+using testing::StringMatchResultListener;
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+#define B1(a) kExprBlock, a, kExprEnd
+#define B2(a, b) kExprBlock, a, b, kExprEnd
+#define B3(a, b, c) kExprBlock, a, b, c, kExprEnd
+
+struct ExpectedTarget {
+ pc_t pc;
+ ControlTransfer expected;
+};
+
+// For nicer error messages.
+class ControlTransferMatcher : public MatcherInterface<const ControlTransfer&> {
+ public:
+ explicit ControlTransferMatcher(pc_t pc, const ControlTransfer& expected)
+ : pc_(pc), expected_(expected) {}
+
+ void DescribeTo(std::ostream* os) const override {
+ *os << "@" << pc_ << " {pcdiff = " << expected_.pcdiff
+ << ", spdiff = " << expected_.spdiff
+ << ", action = " << expected_.action << "}";
+ }
+
+ bool MatchAndExplain(const ControlTransfer& input,
+ MatchResultListener* listener) const override {
+ if (input.pcdiff != expected_.pcdiff || input.spdiff != expected_.spdiff ||
+ input.action != expected_.action) {
+ *listener << "@" << pc_ << " {pcdiff = " << input.pcdiff
+ << ", spdiff = " << input.spdiff
+ << ", action = " << input.action << "}";
+ return false;
+ }
+ return true;
+ }
+
+ private:
+ pc_t pc_;
+ const ControlTransfer& expected_;
+};
+
+class ControlTransferTest : public TestWithZone {
+ public:
+ void CheckControlTransfers(const byte* start, const byte* end,
+ ExpectedTarget* expected_targets,
+ size_t num_targets) {
+ ControlTransferMap map =
+ WasmInterpreter::ComputeControlTransfersForTesting(zone(), start, end);
+ // Check all control targets in the map.
+ for (size_t i = 0; i < num_targets; i++) {
+ pc_t pc = expected_targets[i].pc;
+ auto it = map.find(pc);
+ if (it == map.end()) {
+ printf("expected control target @ +%zu\n", pc);
+ EXPECT_TRUE(false);
+ } else {
+ ControlTransfer& expected = expected_targets[i].expected;
+ ControlTransfer& target = it->second;
+ EXPECT_THAT(target,
+ MakeMatcher(new ControlTransferMatcher(pc, expected)));
+ }
+ }
+
+ // Check there are no other control targets.
+ for (pc_t pc = 0; start + pc < end; pc++) {
+ bool found = false;
+ for (size_t i = 0; i < num_targets; i++) {
+ if (expected_targets[i].pc == pc) {
+ found = true;
+ break;
+ }
+ }
+ if (found) continue;
+ if (map.find(pc) != map.end()) {
+ printf("expected no control @ +%zu\n", pc);
+ EXPECT_TRUE(false);
+ }
+ }
+ }
+};
+
+// Macro for simplifying tests below.
+#define EXPECT_TARGETS(...) \
+ do { \
+ ExpectedTarget pairs[] = {__VA_ARGS__}; \
+ CheckControlTransfers(code, code + sizeof(code), pairs, arraysize(pairs)); \
+ } while (false)
+
+TEST_F(ControlTransferTest, SimpleIf) {
+ byte code[] = {
+ kExprI32Const, // @0
+ 0, // +1
+ kExprIf, // @2
+ kExprEnd // @3
+ };
+ EXPECT_TARGETS({2, {2, 0, ControlTransfer::kPushVoid}}, // --
+ {3, {1, 0, ControlTransfer::kPushVoid}});
+}
+
+TEST_F(ControlTransferTest, SimpleIf1) {
+ byte code[] = {
+ kExprI32Const, // @0
+ 0, // +1
+ kExprIf, // @2
+ kExprNop, // @3
+ kExprEnd // @4
+ };
+ EXPECT_TARGETS({2, {3, 0, ControlTransfer::kPushVoid}}, // --
+ {4, {1, 1, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, SimpleIf2) {
+ byte code[] = {
+ kExprI32Const, // @0
+ 0, // +1
+ kExprIf, // @2
+ kExprNop, // @3
+ kExprNop, // @4
+ kExprEnd // @5
+ };
+ EXPECT_TARGETS({2, {4, 0, ControlTransfer::kPushVoid}}, // --
+ {5, {1, 2, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, SimpleIfElse) {
+ byte code[] = {
+ kExprI32Const, // @0
+ 0, // +1
+ kExprIf, // @2
+ kExprElse, // @3
+ kExprEnd // @4
+ };
+ EXPECT_TARGETS({2, {2, 0, ControlTransfer::kNoAction}}, // --
+ {3, {2, 0, ControlTransfer::kPushVoid}}, // --
+ {4, {1, 0, ControlTransfer::kPushVoid}});
+}
+
+TEST_F(ControlTransferTest, SimpleIfElse1) {
+ byte code[] = {
+ kExprI32Const, // @0
+ 0, // +1
+ kExprIf, // @2
+ kExprNop, // @3
+ kExprElse, // @4
+ kExprNop, // @5
+ kExprEnd // @6
+ };
+ EXPECT_TARGETS({2, {3, 0, ControlTransfer::kNoAction}}, // --
+ {4, {3, 1, ControlTransfer::kPopAndRepush}}, // --
+ {6, {1, 1, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, IfBr) {
+ byte code[] = {
+ kExprI32Const, // @0
+ 0, // +1
+ kExprIf, // @2
+ kExprBr, // @3
+ ARITY_0, // +1
+ 0, // +1
+ kExprEnd // @6
+ };
+ EXPECT_TARGETS({2, {5, 0, ControlTransfer::kPushVoid}}, // --
+ {3, {4, 0, ControlTransfer::kPushVoid}}, // --
+ {6, {1, 1, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, IfBrElse) {
+ byte code[] = {
+ kExprI32Const, // @0
+ 0, // +1
+ kExprIf, // @2
+ kExprBr, // @3
+ ARITY_0, // +1
+ 0, // +1
+ kExprElse, // @6
+ kExprEnd // @7
+ };
+ EXPECT_TARGETS({2, {5, 0, ControlTransfer::kNoAction}}, // --
+ {3, {5, 0, ControlTransfer::kPushVoid}}, // --
+ {6, {2, 1, ControlTransfer::kPopAndRepush}}, // --
+ {7, {1, 0, ControlTransfer::kPushVoid}});
+}
+
+TEST_F(ControlTransferTest, IfElseBr) {
+ byte code[] = {
+ kExprI32Const, // @0
+ 0, // +1
+ kExprIf, // @2
+ kExprNop, // @3
+ kExprElse, // @4
+ kExprBr, // @5
+ ARITY_0, // +1
+ 0, // +1
+ kExprEnd // @8
+ };
+ EXPECT_TARGETS({2, {3, 0, ControlTransfer::kNoAction}}, // --
+ {4, {5, 1, ControlTransfer::kPopAndRepush}}, // --
+ {5, {4, 0, ControlTransfer::kPushVoid}}, // --
+ {8, {1, 1, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, BlockEmpty) {
+ byte code[] = {
+ kExprBlock, // @0
+ kExprEnd // @1
+ };
+ EXPECT_TARGETS({1, {1, 0, ControlTransfer::kPushVoid}});
+}
+
+TEST_F(ControlTransferTest, Br0) {
+ byte code[] = {
+ kExprBlock, // @0
+ kExprBr, // @1
+ ARITY_0, // +1
+ 0, // +1
+ kExprEnd // @4
+ };
+ EXPECT_TARGETS({1, {4, 0, ControlTransfer::kPushVoid}},
+ {4, {1, 1, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, Br1) {
+ byte code[] = {
+ kExprBlock, // @0
+ kExprNop, // @1
+ kExprBr, // @2
+ ARITY_0, // +1
+ 0, // +1
+ kExprEnd // @5
+ };
+ EXPECT_TARGETS({2, {4, 1, ControlTransfer::kPopAndRepush}}, // --
+ {5, {1, 2, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, Br2) {
+ byte code[] = {
+ kExprBlock, // @0
+ kExprNop, // @1
+ kExprNop, // @2
+ kExprBr, // @3
+ ARITY_0, // +1
+ 0, // +1
+ kExprEnd // @6
+ };
+ EXPECT_TARGETS({3, {4, 2, ControlTransfer::kPopAndRepush}}, // --
+ {6, {1, 3, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, Br0b) {
+ byte code[] = {
+ kExprBlock, // @0
+ kExprBr, // @1
+ ARITY_0, // +1
+ 0, // +1
+ kExprNop, // @4
+ kExprEnd // @5
+ };
+ EXPECT_TARGETS({1, {5, 0, ControlTransfer::kPushVoid}}, // --
+ {5, {1, 2, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, Br0c) {
+ byte code[] = {
+ kExprBlock, // @0
+ kExprBr, // @1
+ ARITY_0, // +1
+ 0, // +1
+ kExprNop, // @4
+ kExprNop, // @5
+ kExprEnd // @6
+ };
+ EXPECT_TARGETS({1, {6, 0, ControlTransfer::kPushVoid}}, // --
+ {6, {1, 3, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, SimpleLoop1) {
+ byte code[] = {
+ kExprLoop, // @0
+ kExprBr, // @1
+ ARITY_0, // +1
+ 0, // +1
+ kExprEnd // @4
+ };
+ EXPECT_TARGETS({1, {-1, 0, ControlTransfer::kNoAction}}, // --
+ {4, {1, 1, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, SimpleLoop2) {
+ byte code[] = {
+ kExprLoop, // @0
+ kExprNop, // @1
+ kExprBr, // @2
+ ARITY_0, // +1
+ 0, // +1
+ kExprEnd // @5
+ };
+ EXPECT_TARGETS({2, {-2, 1, ControlTransfer::kNoAction}}, // --
+ {5, {1, 2, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, SimpleLoopExit1) {
+ byte code[] = {
+ kExprLoop, // @0
+ kExprBr, // @1
+ ARITY_0, // +1
+ 1, // +1
+ kExprEnd // @4
+ };
+ EXPECT_TARGETS({1, {4, 0, ControlTransfer::kPushVoid}}, // --
+ {4, {1, 1, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, SimpleLoopExit2) {
+ byte code[] = {
+ kExprLoop, // @0
+ kExprNop, // @1
+ kExprBr, // @2
+ ARITY_0, // +1
+ 1, // +1
+ kExprEnd // @5
+ };
+ EXPECT_TARGETS({2, {4, 1, ControlTransfer::kPopAndRepush}}, // --
+ {5, {1, 2, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, BrTable0) {
+ byte code[] = {
+ kExprBlock, // @0
+ kExprI8Const, // @1
+ 0, // +1
+ kExprBrTable, // @3
+ ARITY_0, // +1
+ 0, // +1
+ U32_LE(0), // +4
+ kExprEnd // @10
+ };
+ EXPECT_TARGETS({3, {8, 0, ControlTransfer::kPushVoid}}, // --
+ {10, {1, 1, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, BrTable1) {
+ byte code[] = {
+ kExprBlock, // @0
+ kExprI8Const, // @1
+ 0, // +1
+ kExprBrTable, // @3
+ ARITY_0, // +1
+ 1, // +1
+ U32_LE(0), // +4
+ U32_LE(0), // +4
+ kExprEnd // @14
+ };
+ EXPECT_TARGETS({3, {12, 0, ControlTransfer::kPushVoid}}, // --
+ {4, {11, 0, ControlTransfer::kPushVoid}}, // --
+ {14, {1, 1, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, BrTable2) {
+ byte code[] = {
+ kExprBlock, // @0
+ kExprBlock, // @1
+ kExprI8Const, // @2
+ 0, // +1
+ kExprBrTable, // @4
+ ARITY_0, // +1
+ 2, // +1
+ U32_LE(0), // +4
+ U32_LE(0), // +4
+ U32_LE(1), // +4
+ kExprEnd, // @19
+ kExprEnd // @19
+ };
+ EXPECT_TARGETS({4, {16, 0, ControlTransfer::kPushVoid}}, // --
+ {5, {15, 0, ControlTransfer::kPushVoid}}, // --
+ {6, {15, 0, ControlTransfer::kPushVoid}}, // --
+ {19, {1, 1, ControlTransfer::kPopAndRepush}}, // --
+ {20, {1, 1, ControlTransfer::kPopAndRepush}});
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/decoder-unittest.cc b/deps/v8/test/unittests/wasm/decoder-unittest.cc
index 11d68f161e..e298f0ba9f 100644
--- a/deps/v8/test/unittests/wasm/decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/decoder-unittest.cc
@@ -4,6 +4,7 @@
#include "test/unittests/test-utils.h"
+#include "src/objects-inl.h"
#include "src/wasm/decoder.h"
#include "src/wasm/wasm-macro-gen.h"
@@ -22,7 +23,7 @@ class DecoderTest : public TestWithZone {
do { \
const byte data[] = {__VA_ARGS__}; \
decoder.Reset(data, data + sizeof(data)); \
- int length; \
+ unsigned length; \
EXPECT_EQ(expected, \
decoder.checked_read_u32v(decoder.start(), 0, &length)); \
EXPECT_EQ(expected_length, length); \
@@ -32,7 +33,7 @@ class DecoderTest : public TestWithZone {
do { \
const byte data[] = {__VA_ARGS__}; \
decoder.Reset(data, data + sizeof(data)); \
- int length; \
+ unsigned length; \
EXPECT_EQ(expected, \
decoder.checked_read_i32v(decoder.start(), 0, &length)); \
EXPECT_EQ(expected_length, length); \
@@ -42,7 +43,7 @@ class DecoderTest : public TestWithZone {
do { \
const byte data[] = {__VA_ARGS__}; \
decoder.Reset(data, data + sizeof(data)); \
- int length; \
+ unsigned length; \
EXPECT_EQ(expected, \
decoder.checked_read_u64v(decoder.start(), 0, &length)); \
EXPECT_EQ(expected_length, length); \
@@ -52,7 +53,7 @@ class DecoderTest : public TestWithZone {
do { \
const byte data[] = {__VA_ARGS__}; \
decoder.Reset(data, data + sizeof(data)); \
- int length; \
+ unsigned length; \
EXPECT_EQ(expected, \
decoder.checked_read_i64v(decoder.start(), 0, &length)); \
EXPECT_EQ(expected_length, length); \
@@ -365,7 +366,7 @@ TEST_F(DecoderTest, ReadI32v_FiveByte) {
TEST_F(DecoderTest, ReadU32v_off_end1) {
static const byte data[] = {U32V_1(11)};
- int length = 0;
+ unsigned length = 0;
decoder.Reset(data, data);
decoder.checked_read_u32v(decoder.start(), 0, &length);
EXPECT_EQ(0, length);
@@ -375,7 +376,7 @@ TEST_F(DecoderTest, ReadU32v_off_end1) {
TEST_F(DecoderTest, ReadU32v_off_end2) {
static const byte data[] = {U32V_2(1111)};
for (size_t i = 0; i < sizeof(data); i++) {
- int length = 0;
+ unsigned length = 0;
decoder.Reset(data, data + i);
decoder.checked_read_u32v(decoder.start(), 0, &length);
EXPECT_EQ(i, length);
@@ -386,7 +387,7 @@ TEST_F(DecoderTest, ReadU32v_off_end2) {
TEST_F(DecoderTest, ReadU32v_off_end3) {
static const byte data[] = {U32V_3(111111)};
for (size_t i = 0; i < sizeof(data); i++) {
- int length = 0;
+ unsigned length = 0;
decoder.Reset(data, data + i);
decoder.checked_read_u32v(decoder.start(), 0, &length);
EXPECT_EQ(i, length);
@@ -397,7 +398,7 @@ TEST_F(DecoderTest, ReadU32v_off_end3) {
TEST_F(DecoderTest, ReadU32v_off_end4) {
static const byte data[] = {U32V_4(11111111)};
for (size_t i = 0; i < sizeof(data); i++) {
- int length = 0;
+ unsigned length = 0;
decoder.Reset(data, data + i);
decoder.checked_read_u32v(decoder.start(), 0, &length);
EXPECT_EQ(i, length);
@@ -408,7 +409,7 @@ TEST_F(DecoderTest, ReadU32v_off_end4) {
TEST_F(DecoderTest, ReadU32v_off_end5) {
static const byte data[] = {U32V_5(111111111)};
for (size_t i = 0; i < sizeof(data); i++) {
- int length = 0;
+ unsigned length = 0;
decoder.Reset(data, data + i);
decoder.checked_read_u32v(decoder.start(), 0, &length);
EXPECT_EQ(i, length);
@@ -420,7 +421,7 @@ TEST_F(DecoderTest, ReadU32v_extra_bits) {
byte data[] = {0x80, 0x80, 0x80, 0x80, 0x00};
for (int i = 1; i < 16; i++) {
data[4] = static_cast<byte>(i << 4);
- int length = 0;
+ unsigned length = 0;
decoder.Reset(data, data + sizeof(data));
decoder.checked_read_u32v(decoder.start(), 0, &length);
EXPECT_EQ(5, length);
@@ -430,7 +431,7 @@ TEST_F(DecoderTest, ReadU32v_extra_bits) {
TEST_F(DecoderTest, ReadI32v_extra_bits_negative) {
// OK for negative signed values to have extra ones.
- int length = 0;
+ unsigned length = 0;
byte data[] = {0xff, 0xff, 0xff, 0xff, 0x7f};
decoder.Reset(data, data + sizeof(data));
decoder.checked_read_i32v(decoder.start(), 0, &length);
@@ -440,7 +441,7 @@ TEST_F(DecoderTest, ReadI32v_extra_bits_negative) {
TEST_F(DecoderTest, ReadI32v_extra_bits_positive) {
// Not OK for positive signed values to have extra ones.
- int length = 0;
+ unsigned length = 0;
byte data[] = {0x80, 0x80, 0x80, 0x80, 0x77};
decoder.Reset(data, data + sizeof(data));
decoder.checked_read_i32v(decoder.start(), 0, &length);
@@ -477,7 +478,7 @@ TEST_F(DecoderTest, ReadU32v_Bits) {
// foreach buffer size 0...5
for (int limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
- int rlen;
+ unsigned rlen;
uint32_t result = decoder.checked_read_u32v(data, 0, &rlen);
if (limit < length) {
EXPECT_FALSE(decoder.ok());
@@ -533,7 +534,7 @@ TEST_F(DecoderTest, ReadU64v_PowerOf2) {
for (int limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
- int length;
+ unsigned length;
uint64_t result = decoder.checked_read_u64v(data, 0, &length);
if (limit <= index) {
EXPECT_FALSE(decoder.ok());
@@ -574,7 +575,7 @@ TEST_F(DecoderTest, ReadU64v_Bits) {
// foreach buffer size 0...10
for (int limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
- int rlen;
+ unsigned rlen;
uint64_t result = decoder.checked_read_u64v(data, 0, &rlen);
if (limit < length) {
EXPECT_FALSE(decoder.ok());
@@ -616,7 +617,7 @@ TEST_F(DecoderTest, ReadI64v_Bits) {
// foreach buffer size 0...10
for (int limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
- int rlen;
+ unsigned rlen;
int64_t result = decoder.checked_read_i64v(data, 0, &rlen);
if (limit < length) {
EXPECT_FALSE(decoder.ok());
@@ -634,7 +635,7 @@ TEST_F(DecoderTest, ReadU64v_extra_bits) {
byte data[] = {0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00};
for (int i = 1; i < 128; i++) {
data[9] = static_cast<byte>(i << 1);
- int length = 0;
+ unsigned length = 0;
decoder.Reset(data, data + sizeof(data));
decoder.checked_read_u64v(decoder.start(), 0, &length);
EXPECT_EQ(10, length);
@@ -644,7 +645,7 @@ TEST_F(DecoderTest, ReadU64v_extra_bits) {
TEST_F(DecoderTest, ReadI64v_extra_bits_negative) {
// OK for negative signed values to have extra ones.
- int length = 0;
+ unsigned length = 0;
byte data[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f};
decoder.Reset(data, data + sizeof(data));
decoder.checked_read_i64v(decoder.start(), 0, &length);
@@ -654,7 +655,7 @@ TEST_F(DecoderTest, ReadI64v_extra_bits_negative) {
TEST_F(DecoderTest, ReadI64v_extra_bits_positive) {
// Not OK for positive signed values to have extra ones.
- int length = 0;
+ unsigned length = 0;
byte data[] = {0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x77};
decoder.Reset(data, data + sizeof(data));
decoder.checked_read_i64v(decoder.start(), 0, &length);
diff --git a/deps/v8/test/unittests/wasm/encoder-unittest.cc b/deps/v8/test/unittests/wasm/encoder-unittest.cc
index 740c0540dc..47885e697d 100644
--- a/deps/v8/test/unittests/wasm/encoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/encoder-unittest.cc
@@ -9,6 +9,8 @@
#include "src/wasm/ast-decoder.h"
#include "src/wasm/encoder.h"
+#include "test/cctest/wasm/test-signatures.h"
+
namespace v8 {
namespace internal {
namespace wasm {
@@ -17,202 +19,10 @@ class EncoderTest : public TestWithZone {
protected:
void AddLocal(WasmFunctionBuilder* f, LocalType type) {
uint16_t index = f->AddLocal(type);
- const std::vector<uint8_t>& out_index = UnsignedLEB128From(index);
- std::vector<uint8_t> code;
- code.push_back(kExprGetLocal);
- for (size_t i = 0; i < out_index.size(); i++) {
- code.push_back(out_index.at(i));
- }
- uint32_t local_indices[] = {1};
- f->EmitCode(&code[0], static_cast<uint32_t>(code.size()), local_indices, 1);
- }
-
- void CheckReadValue(uint8_t* leb_value, uint32_t expected_result,
- int expected_length,
- ReadUnsignedLEB128ErrorCode expected_error_code) {
- int length;
- uint32_t result;
- ReadUnsignedLEB128ErrorCode error_code =
- ReadUnsignedLEB128Operand(leb_value, leb_value + 5, &length, &result);
- CHECK_EQ(error_code, expected_error_code);
- if (error_code == 0) {
- CHECK_EQ(result, expected_result);
- CHECK_EQ(length, expected_length);
- }
- }
-
- void CheckWriteValue(uint32_t input, int length, uint8_t* vals) {
- const std::vector<uint8_t> result = UnsignedLEB128From(input);
- CHECK_EQ(result.size(), length);
- for (int i = 0; i < length; i++) {
- CHECK_EQ(result.at(i), vals[i]);
- }
+ f->EmitGetLocal(index);
}
};
-
-TEST_F(EncoderTest, Function_Builder_Variable_Indexing) {
- base::AccountingAllocator allocator;
- Zone zone(&allocator);
- WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- uint16_t f_index = builder->AddFunction();
- WasmFunctionBuilder* function = builder->FunctionAt(f_index);
- uint16_t local_f32 = function->AddLocal(kAstF32);
- uint16_t param_float32 = function->AddParam(kAstF32);
- uint16_t local_i32 = function->AddLocal(kAstI32);
- uint16_t local_f64 = function->AddLocal(kAstF64);
- uint16_t local_i64 = function->AddLocal(kAstI64);
- uint16_t param_int32 = function->AddParam(kAstI32);
- uint16_t local_i32_2 = function->AddLocal(kAstI32);
-
- byte code[] = {kExprGetLocal, static_cast<uint8_t>(param_float32)};
- uint32_t local_indices[] = {1};
- function->EmitCode(code, sizeof(code), local_indices, 1);
- code[1] = static_cast<uint8_t>(param_int32);
- function->EmitCode(code, sizeof(code), local_indices, 1);
- code[1] = static_cast<uint8_t>(local_i32);
- function->EmitCode(code, sizeof(code), local_indices, 1);
- code[1] = static_cast<uint8_t>(local_i32_2);
- function->EmitCode(code, sizeof(code), local_indices, 1);
- code[1] = static_cast<uint8_t>(local_i64);
- function->EmitCode(code, sizeof(code), local_indices, 1);
- code[1] = static_cast<uint8_t>(local_f32);
- function->EmitCode(code, sizeof(code), local_indices, 1);
- code[1] = static_cast<uint8_t>(local_f64);
- function->EmitCode(code, sizeof(code), local_indices, 1);
-
- WasmFunctionEncoder* f = function->Build(&zone, builder);
- ZoneVector<uint8_t> buffer_vector(f->HeaderSize() + f->BodySize(), &zone);
- byte* buffer = &buffer_vector[0];
- byte* header = buffer;
- byte* body = buffer + f->HeaderSize();
- f->Serialize(buffer, &header, &body);
-}
-
-
-TEST_F(EncoderTest, Function_Builder_Indexing_Variable_Width) {
- base::AccountingAllocator allocator;
- Zone zone(&allocator);
- WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- uint16_t f_index = builder->AddFunction();
- WasmFunctionBuilder* function = builder->FunctionAt(f_index);
- for (size_t i = 0; i < 128; i++) {
- AddLocal(function, kAstF32);
- }
- AddLocal(function, kAstI32);
-
- WasmFunctionEncoder* f = function->Build(&zone, builder);
- ZoneVector<uint8_t> buffer_vector(f->HeaderSize() + f->BodySize(), &zone);
- byte* buffer = &buffer_vector[0];
- byte* header = buffer;
- byte* body = buffer + f->HeaderSize();
- f->Serialize(buffer, &header, &body);
- body = buffer + f->HeaderSize();
-}
-
-TEST_F(EncoderTest, Function_Builder_Block_Variable_Width) {
- base::AccountingAllocator allocator;
- Zone zone(&allocator);
- WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- uint16_t f_index = builder->AddFunction();
- WasmFunctionBuilder* function = builder->FunctionAt(f_index);
- function->EmitWithVarInt(kExprBlock, 200);
- for (int i = 0; i < 200; ++i) {
- function->Emit(kExprNop);
- }
-
- WasmFunctionEncoder* f = function->Build(&zone, builder);
- CHECK_EQ(f->BodySize(), 204);
-}
-
-TEST_F(EncoderTest, Function_Builder_EmitEditableVarIntImmediate) {
- base::AccountingAllocator allocator;
- Zone zone(&allocator);
- WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- uint16_t f_index = builder->AddFunction();
- WasmFunctionBuilder* function = builder->FunctionAt(f_index);
- function->Emit(kExprLoop);
- uint32_t offset = function->EmitEditableVarIntImmediate();
- for (int i = 0; i < 200; ++i) {
- function->Emit(kExprNop);
- }
- function->EditVarIntImmediate(offset, 200);
-
- WasmFunctionEncoder* f = function->Build(&zone, builder);
- CHECK_EQ(f->BodySize(), 204);
-}
-
-TEST_F(EncoderTest, Function_Builder_EmitEditableVarIntImmediate_Locals) {
- base::AccountingAllocator allocator;
- Zone zone(&allocator);
- WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- uint16_t f_index = builder->AddFunction();
- WasmFunctionBuilder* function = builder->FunctionAt(f_index);
- function->Emit(kExprBlock);
- uint32_t offset = function->EmitEditableVarIntImmediate();
- for (int i = 0; i < 200; ++i) {
- AddLocal(function, kAstI32);
- }
- function->EditVarIntImmediate(offset, 200);
-
- WasmFunctionEncoder* f = function->Build(&zone, builder);
- ZoneVector<uint8_t> buffer_vector(f->HeaderSize() + f->BodySize(), &zone);
- byte* buffer = &buffer_vector[0];
- byte* header = buffer;
- byte* body = buffer + f->HeaderSize();
- f->Serialize(buffer, &header, &body);
- body = buffer + f->HeaderSize();
-
- CHECK_EQ(f->BodySize(), 479);
- const uint8_t varint200_low = (200 & 0x7f) | 0x80;
- const uint8_t varint200_high = (200 >> 7) & 0x7f;
- offset = 0;
- CHECK_EQ(body[offset++], 1); // Local decl count.
- CHECK_EQ(body[offset++], varint200_low);
- CHECK_EQ(body[offset++], varint200_high);
- CHECK_EQ(body[offset++], kLocalI32);
- CHECK_EQ(body[offset++], kExprBlock);
- CHECK_EQ(body[offset++], varint200_low);
- CHECK_EQ(body[offset++], varint200_high);
- // GetLocal with one-byte indices.
- for (int i = 0; i <= 127; ++i) {
- CHECK_EQ(body[offset++], kExprGetLocal);
- CHECK_EQ(body[offset++], i);
- }
- // GetLocal with two-byte indices.
- for (int i = 128; i < 200; ++i) {
- CHECK_EQ(body[offset++], kExprGetLocal);
- CHECK_EQ(body[offset++], (i & 0x7f) | 0x80);
- CHECK_EQ(body[offset++], (i >> 7) & 0x7f);
- }
- CHECK_EQ(offset, 479);
-}
-
-TEST_F(EncoderTest, LEB_Functions) {
- byte leb_value[5] = {0, 0, 0, 0, 0};
- CheckReadValue(leb_value, 0, 1, kNoError);
- CheckWriteValue(0, 1, leb_value);
- leb_value[0] = 23;
- CheckReadValue(leb_value, 23, 1, kNoError);
- CheckWriteValue(23, 1, leb_value);
- leb_value[0] = 0x80;
- leb_value[1] = 0x01;
- CheckReadValue(leb_value, 128, 2, kNoError);
- CheckWriteValue(128, 2, leb_value);
- leb_value[0] = 0x80;
- leb_value[1] = 0x80;
- leb_value[2] = 0x80;
- leb_value[3] = 0x80;
- leb_value[4] = 0x01;
- CheckReadValue(leb_value, 0x10000000, 5, kNoError);
- CheckWriteValue(0x10000000, 5, leb_value);
- leb_value[0] = 0x80;
- leb_value[1] = 0x80;
- leb_value[2] = 0x80;
- leb_value[3] = 0x80;
- leb_value[4] = 0x80;
- CheckReadValue(leb_value, -1, -1, kInvalidLEB128);
-}
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/leb-helper-unittest.cc b/deps/v8/test/unittests/wasm/leb-helper-unittest.cc
new file mode 100644
index 0000000000..b9759332bb
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/leb-helper-unittest.cc
@@ -0,0 +1,191 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/test-utils.h"
+
+#include "src/objects-inl.h"
+#include "src/wasm/decoder.h"
+#include "src/wasm/leb-helper.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class LEBHelperTest : public TestWithZone {};
+
+TEST_F(LEBHelperTest, sizeof_u32v) {
+ EXPECT_EQ(1, LEBHelper::sizeof_u32v(0));
+ EXPECT_EQ(1, LEBHelper::sizeof_u32v(1));
+ EXPECT_EQ(1, LEBHelper::sizeof_u32v(3));
+
+ for (uint32_t i = 4; i < 128; i++) {
+ EXPECT_EQ(1, LEBHelper::sizeof_u32v(i));
+ }
+
+ for (uint32_t i = (1 << 7); i < (1 << 9); i++) {
+ EXPECT_EQ(2, LEBHelper::sizeof_u32v(i));
+ }
+
+ for (uint32_t i = (1 << 14); i < (1 << 16); i += 33) {
+ EXPECT_EQ(3, LEBHelper::sizeof_u32v(i));
+ }
+
+ for (uint32_t i = (1 << 21); i < (1 << 24); i += 33999) {
+ EXPECT_EQ(4, LEBHelper::sizeof_u32v(i));
+ }
+
+ for (uint32_t i = (1 << 28); i < (1 << 31); i += 33997779) {
+ EXPECT_EQ(5, LEBHelper::sizeof_u32v(i));
+ }
+
+ EXPECT_EQ(5, LEBHelper::sizeof_u32v(0xFFFFFFFF));
+}
+
+TEST_F(LEBHelperTest, sizeof_i32v) {
+ EXPECT_EQ(1, LEBHelper::sizeof_i32v(0));
+ EXPECT_EQ(1, LEBHelper::sizeof_i32v(1));
+ EXPECT_EQ(1, LEBHelper::sizeof_i32v(3));
+
+ for (int32_t i = 0; i < (1 << 6); i++) {
+ EXPECT_EQ(1, LEBHelper::sizeof_i32v(i));
+ }
+
+ for (int32_t i = (1 << 6); i < (1 << 8); i++) {
+ EXPECT_EQ(2, LEBHelper::sizeof_i32v(i));
+ }
+
+ for (int32_t i = (1 << 13); i < (1 << 15); i += 31) {
+ EXPECT_EQ(3, LEBHelper::sizeof_i32v(i));
+ }
+
+ for (int32_t i = (1 << 20); i < (1 << 22); i += 31991) {
+ EXPECT_EQ(4, LEBHelper::sizeof_i32v(i));
+ }
+
+ for (int32_t i = (1 << 27); i < (1 << 29); i += 3199893) {
+ EXPECT_EQ(5, LEBHelper::sizeof_i32v(i));
+ }
+
+ for (int32_t i = -(1 << 6); i <= 0; i++) {
+ EXPECT_EQ(1, LEBHelper::sizeof_i32v(i));
+ }
+
+ for (int32_t i = -(1 << 13); i < -(1 << 6); i++) {
+ EXPECT_EQ(2, LEBHelper::sizeof_i32v(i));
+ }
+
+ for (int32_t i = -(1 << 20); i < -(1 << 18); i += 11) {
+ EXPECT_EQ(3, LEBHelper::sizeof_i32v(i));
+ }
+
+ for (int32_t i = -(1 << 27); i < -(1 << 25); i += 11999) {
+ EXPECT_EQ(4, LEBHelper::sizeof_i32v(i));
+ }
+
+ for (int32_t i = -(1 << 30); i < -(1 << 28); i += 1199999) {
+ EXPECT_EQ(5, LEBHelper::sizeof_i32v(i));
+ }
+}
+
+#define DECLARE_ENCODE_DECODE_CHECKER(ctype, name) \
+ static void CheckEncodeDecode_##name(ctype val) { \
+ static const int kSize = 16; \
+ static byte buffer[kSize]; \
+ byte *ptr = buffer; \
+ LEBHelper::write_##name(&ptr, val); \
+ EXPECT_EQ(LEBHelper::sizeof_##name(val), \
+ static_cast<size_t>(ptr - buffer)); \
+ Decoder decoder(buffer, buffer + kSize); \
+ unsigned length = 0; \
+ ctype result = decoder.checked_read_##name(buffer, 0, &length); \
+ EXPECT_EQ(val, result); \
+ EXPECT_EQ(LEBHelper::sizeof_##name(val), static_cast<size_t>(length)); \
+ }
+
+DECLARE_ENCODE_DECODE_CHECKER(int32_t, i32v)
+DECLARE_ENCODE_DECODE_CHECKER(uint32_t, u32v)
+DECLARE_ENCODE_DECODE_CHECKER(int64_t, i64v)
+DECLARE_ENCODE_DECODE_CHECKER(uint64_t, u64v)
+
+TEST_F(LEBHelperTest, WriteAndDecode_u32v) {
+ CheckEncodeDecode_u32v(0);
+ CheckEncodeDecode_u32v(1);
+ CheckEncodeDecode_u32v(5);
+ CheckEncodeDecode_u32v(99);
+ CheckEncodeDecode_u32v(298);
+ CheckEncodeDecode_u32v(87348723);
+ CheckEncodeDecode_u32v(77777);
+
+ for (uint32_t val = 0x3a; val != 0; val = val << 1) {
+ CheckEncodeDecode_u32v(val);
+ }
+}
+
+TEST_F(LEBHelperTest, WriteAndDecode_i32v) {
+ CheckEncodeDecode_i32v(0);
+ CheckEncodeDecode_i32v(1);
+ CheckEncodeDecode_i32v(5);
+ CheckEncodeDecode_i32v(99);
+ CheckEncodeDecode_i32v(298);
+ CheckEncodeDecode_i32v(87348723);
+ CheckEncodeDecode_i32v(77777);
+
+ CheckEncodeDecode_i32v(-2);
+ CheckEncodeDecode_i32v(-4);
+ CheckEncodeDecode_i32v(-59);
+ CheckEncodeDecode_i32v(-288);
+ CheckEncodeDecode_i32v(-12608);
+ CheckEncodeDecode_i32v(-87328723);
+ CheckEncodeDecode_i32v(-77377);
+
+ for (uint32_t val = 0x3a; val != 0; val = val << 1) {
+ CheckEncodeDecode_i32v(bit_cast<int32_t>(val));
+ }
+
+ for (uint32_t val = 0xFFFFFF3B; val != 0; val = val << 1) {
+ CheckEncodeDecode_i32v(bit_cast<int32_t>(val));
+ }
+}
+
+TEST_F(LEBHelperTest, WriteAndDecode_u64v) {
+ CheckEncodeDecode_u64v(0);
+ CheckEncodeDecode_u64v(1);
+ CheckEncodeDecode_u64v(5);
+ CheckEncodeDecode_u64v(99);
+ CheckEncodeDecode_u64v(298);
+ CheckEncodeDecode_u64v(87348723);
+ CheckEncodeDecode_u64v(77777);
+
+ for (uint64_t val = 0x3a; val != 0; val = val << 1) {
+ CheckEncodeDecode_u64v(val);
+ }
+}
+
+TEST_F(LEBHelperTest, WriteAndDecode_i64v) {
+ CheckEncodeDecode_i64v(0);
+ CheckEncodeDecode_i64v(1);
+ CheckEncodeDecode_i64v(5);
+ CheckEncodeDecode_i64v(99);
+ CheckEncodeDecode_i64v(298);
+ CheckEncodeDecode_i64v(87348723);
+ CheckEncodeDecode_i64v(77777);
+
+ CheckEncodeDecode_i64v(-2);
+ CheckEncodeDecode_i64v(-4);
+ CheckEncodeDecode_i64v(-59);
+ CheckEncodeDecode_i64v(-288);
+ CheckEncodeDecode_i64v(-87648723);
+ CheckEncodeDecode_i64v(-77377);
+
+ for (uint64_t val = 0x3a; val != 0; val = val << 1) {
+ CheckEncodeDecode_i64v(bit_cast<int64_t>(val));
+ }
+
+ for (uint64_t val = 0xFFFFFFFFFFFFFF3B; val != 0; val = val << 1) {
+ CheckEncodeDecode_i64v(bit_cast<int64_t>(val));
+ }
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc b/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
index e77c1cfff5..919ce8e234 100644
--- a/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
+++ b/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
@@ -32,14 +32,12 @@ class WasmLoopAssignmentAnalyzerTest : public TestWithZone {
}
};
-
TEST_F(WasmLoopAssignmentAnalyzerTest, Empty0) {
byte code[] = { 0 };
BitVector* assigned = Analyze(code, code);
CHECK_NULL(assigned);
}
-
TEST_F(WasmLoopAssignmentAnalyzerTest, Empty1) {
byte code[] = {kExprLoop, 0};
for (int i = 0; i < 5; i++) {
@@ -51,11 +49,10 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, Empty1) {
}
}
-
TEST_F(WasmLoopAssignmentAnalyzerTest, One) {
num_locals = 5;
for (int i = 0; i < 5; i++) {
- byte code[] = {WASM_LOOP(1, WASM_SET_ZERO(i))};
+ byte code[] = {WASM_LOOP(WASM_SET_ZERO(i))};
BitVector* assigned = Analyze(code, code + arraysize(code));
for (int j = 0; j < assigned->length(); j++) {
CHECK_EQ(j == i, assigned->Contains(j));
@@ -63,11 +60,10 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, One) {
}
}
-
TEST_F(WasmLoopAssignmentAnalyzerTest, OneBeyond) {
num_locals = 5;
for (int i = 0; i < 5; i++) {
- byte code[] = {WASM_LOOP(1, WASM_SET_ZERO(i)), WASM_SET_ZERO(1)};
+ byte code[] = {WASM_LOOP(WASM_SET_ZERO(i)), WASM_SET_ZERO(1)};
BitVector* assigned = Analyze(code, code + arraysize(code));
for (int j = 0; j < assigned->length(); j++) {
CHECK_EQ(j == i, assigned->Contains(j));
@@ -75,12 +71,11 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, OneBeyond) {
}
}
-
TEST_F(WasmLoopAssignmentAnalyzerTest, Two) {
num_locals = 5;
for (int i = 0; i < 5; i++) {
for (int j = 0; j < 5; j++) {
- byte code[] = {WASM_LOOP(2, WASM_SET_ZERO(i), WASM_SET_ZERO(j))};
+ byte code[] = {WASM_LOOP(WASM_SET_ZERO(i), WASM_SET_ZERO(j))};
BitVector* assigned = Analyze(code, code + arraysize(code));
for (int k = 0; k < assigned->length(); k++) {
bool expected = k == i || k == j;
@@ -90,12 +85,11 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, Two) {
}
}
-
TEST_F(WasmLoopAssignmentAnalyzerTest, NestedIf) {
num_locals = 5;
for (int i = 0; i < 5; i++) {
byte code[] = {WASM_LOOP(
- 1, WASM_IF_ELSE(WASM_SET_ZERO(0), WASM_SET_ZERO(i), WASM_SET_ZERO(1)))};
+ WASM_IF_ELSE(WASM_SET_ZERO(0), WASM_SET_ZERO(i), WASM_SET_ZERO(1)))};
BitVector* assigned = Analyze(code, code + arraysize(code));
for (int j = 0; j < assigned->length(); j++) {
bool expected = i == j || j == 0 || j == 1;
@@ -104,14 +98,12 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, NestedIf) {
}
}
-
static byte LEBByte(uint32_t val, byte which) {
byte b = (val >> (which * 7)) & 0x7F;
if (val >> ((which + 1) * 7)) b |= 0x80;
return b;
}
-
TEST_F(WasmLoopAssignmentAnalyzerTest, BigLocal) {
num_locals = 65000;
for (int i = 13; i < 65000; i = static_cast<int>(i * 1.5)) {
@@ -133,11 +125,10 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, BigLocal) {
}
}
-
TEST_F(WasmLoopAssignmentAnalyzerTest, Break) {
num_locals = 3;
byte code[] = {
- WASM_LOOP(1, WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(1, WASM_SET_ZERO(1)))),
+ WASM_LOOP(WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(1, WASM_SET_ZERO(1)))),
WASM_SET_ZERO(0)};
BitVector* assigned = Analyze(code, code + arraysize(code));
@@ -147,14 +138,13 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, Break) {
}
}
-
TEST_F(WasmLoopAssignmentAnalyzerTest, Loop1) {
num_locals = 5;
byte code[] = {
- WASM_LOOP(1, WASM_IF(WASM_GET_LOCAL(0),
- WASM_BRV(0, WASM_SET_LOCAL(
- 3, WASM_I32_SUB(WASM_GET_LOCAL(0),
- WASM_I8(1)))))),
+ WASM_LOOP(WASM_IF(
+ WASM_GET_LOCAL(0),
+ WASM_BRV(0, WASM_SET_LOCAL(
+ 3, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(1)))))),
WASM_GET_LOCAL(0)};
BitVector* assigned = Analyze(code, code + arraysize(code));
@@ -164,33 +154,38 @@ TEST_F(WasmLoopAssignmentAnalyzerTest, Loop1) {
}
}
-
TEST_F(WasmLoopAssignmentAnalyzerTest, Loop2) {
num_locals = 6;
const byte kIter = 0;
const byte kSum = 3;
byte code[] = {WASM_BLOCK(
- 3,
WASM_WHILE(
WASM_GET_LOCAL(kIter),
- WASM_BLOCK(2, WASM_SET_LOCAL(
- kSum, WASM_F32_ADD(
- WASM_GET_LOCAL(kSum),
- WASM_LOAD_MEM(MachineType::Float32(),
- WASM_GET_LOCAL(kIter)))),
- WASM_SET_LOCAL(kIter, WASM_I32_SUB(WASM_GET_LOCAL(kIter),
- WASM_I8(4))))),
+ WASM_BLOCK(
+ WASM_SET_LOCAL(
+ kSum, WASM_F32_ADD(WASM_GET_LOCAL(kSum),
+ WASM_LOAD_MEM(MachineType::Float32(),
+ WASM_GET_LOCAL(kIter)))),
+ WASM_SET_LOCAL(kIter,
+ WASM_I32_SUB(WASM_GET_LOCAL(kIter), WASM_I8(4))))),
WASM_STORE_MEM(MachineType::Float32(), WASM_ZERO, WASM_GET_LOCAL(kSum)),
WASM_GET_LOCAL(kIter))};
- BitVector* assigned = Analyze(code + 2, code + arraysize(code));
+ BitVector* assigned = Analyze(code + 1, code + arraysize(code));
for (int j = 0; j < assigned->length(); j++) {
bool expected = j == kIter || j == kSum;
CHECK_EQ(expected, assigned->Contains(j));
}
}
+TEST_F(WasmLoopAssignmentAnalyzerTest, Malformed) {
+ byte code[] = {kExprLoop, kExprF32Neg, kExprBrTable, 0x0e, 'h', 'e',
+ 'l', 'l', 'o', ',', ' ', 'w',
+ 'o', 'r', 'l', 'd', '!'};
+ BitVector* assigned = Analyze(code, code + arraysize(code));
+ CHECK_NULL(assigned);
+}
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index 44e78653e3..5c9c47ba00 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -4,6 +4,8 @@
#include "test/unittests/test-utils.h"
+#include "src/handles.h"
+#include "src/objects-inl.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-macro-gen.h"
#include "src/wasm/wasm-opcodes.h"
@@ -13,18 +15,66 @@ namespace internal {
namespace wasm {
#define EMPTY_FUNCTION(sig_index) 0, SIG_INDEX(sig_index), U16_LE(0)
-#define EMPTY_FUNCTION_SIZE ((size_t)5)
+#define SIZEOF_EMPTY_FUNCTION ((size_t)5)
#define EMPTY_BODY 0
-#define EMPTY_BODY_SIZE ((size_t)1)
+#define SIZEOF_EMPTY_BODY ((size_t)1)
#define NOP_BODY 2, 0, kExprNop
-#define NOP_BODY_SIZE ((size_t)3)
-#define VOID_VOID_SIG 0, kLocalVoid
-#define VOID_VOID_SIG_SIZE ((size_t)2)
-#define INT_INT_SIG 1, kLocalI32, kLocalI32
-#define INT_INT_SIG_SIZE ((size_t)3)
+#define SIZEOF_NOP_BODY ((size_t)3)
-#define SECTION(NAME, EXTRA_SIZE) \
- U32V_1(WASM_SECTION_##NAME##_SIZE + (EXTRA_SIZE)), WASM_SECTION_##NAME
+#define SIG_ENTRY_i_i SIG_ENTRY_x_x(kLocalI32, kLocalI32)
+
+#define UNKNOWN_EMPTY_SECTION_NAME 1, '\0'
+#define UNKNOWN_SECTION_NAME 4, 'l', 'u', 'l', 'z'
+
+#define SECTION(NAME, EXTRA_SIZE) WASM_SECTION_##NAME, U32V_1(EXTRA_SIZE)
+
+#define SIGNATURES_SECTION(count, ...) \
+ SECTION(SIGNATURES, 1 + 3 * (count)), U32V_1(count), __VA_ARGS__
+#define FUNCTION_SIGNATURES_SECTION(count, ...) \
+ SECTION(FUNCTION_SIGNATURES, 1 + (count)), U32V_1(count), __VA_ARGS__
+
+#define FOO_STRING 3, 'f', 'o', 'o'
+#define NO_LOCAL_NAMES 0
+
+#define EMPTY_SIGNATURES_SECTION SECTION(SIGNATURES, 1), 0
+#define EMPTY_FUNCTION_SIGNATURES_SECTION SECTION(FUNCTION_SIGNATURES, 1), 0
+#define EMPTY_FUNCTION_BODIES_SECTION SECTION(FUNCTION_BODIES, 1), 0
+#define EMPTY_NAMES_SECTION SECTION(NAMES, 1), 0
+
+#define X1(...) __VA_ARGS__
+#define X2(...) __VA_ARGS__, __VA_ARGS__
+#define X3(...) __VA_ARGS__, __VA_ARGS__, __VA_ARGS__
+#define X4(...) __VA_ARGS__, __VA_ARGS__, __VA_ARGS__, __VA_ARGS__
+
+#define ONE_EMPTY_FUNCTION WASM_SECTION_FUNCTION_SIGNATURES, 1 + 1 * 1, 1, X1(0)
+
+#define TWO_EMPTY_FUNCTIONS \
+ WASM_SECTION_FUNCTION_SIGNATURES, 1 + 2 * 1, 2, X2(0)
+
+#define THREE_EMPTY_FUNCTIONS \
+ WASM_SECTION_FUNCTION_SIGNATURES, 1 + 3 * 1, 3, X3(0)
+
+#define FOUR_EMPTY_FUNCTIONS \
+ WASM_SECTION_FUNCTION_SIGNATURES, 1 + 4 * 1, 4, X4(0)
+
+#define ONE_EMPTY_BODY \
+ WASM_SECTION_FUNCTION_BODIES, 1 + 1 * (1 + SIZEOF_EMPTY_BODY), 1, \
+ X1(SIZEOF_EMPTY_BODY, EMPTY_BODY)
+
+#define TWO_EMPTY_BODIES \
+ WASM_SECTION_FUNCTION_BODIES, 1 + 2 * (1 + SIZEOF_EMPTY_BODY), 2, \
+ X2(SIZEOF_EMPTY_BODY, EMPTY_BODY)
+
+#define THREE_EMPTY_BODIES \
+ WASM_SECTION_FUNCTION_BODIES, 1 + 3 * (1 + SIZEOF_EMPTY_BODY), 3, \
+ X3(SIZEOF_EMPTY_BODY, EMPTY_BODY)
+
+#define FOUR_EMPTY_BODIES \
+ WASM_SECTION_FUNCTION_BODIES, 1 + 4 * (1 + SIZEOF_EMPTY_BODY), 4, \
+ X4(SIZEOF_EMPTY_BODY, EMPTY_BODY)
+
+#define SIGNATURES_SECTION_VOID_VOID \
+ SECTION(SIGNATURES, 1 + SIZEOF_SIG_ENTRY_v_v), 1, SIG_ENTRY_v_v
#define EXPECT_VERIFIES(data) \
do { \
@@ -49,6 +99,15 @@ namespace wasm {
} \
} while (false)
+#define EXPECT_OK(result) \
+ do { \
+ EXPECT_TRUE(result.ok()); \
+ if (!result.ok()) { \
+ if (result.val) delete result.val; \
+ return; \
+ } \
+ } while (false)
+
static size_t SizeOfVarInt(size_t value) {
size_t size = 0;
do {
@@ -66,7 +125,7 @@ struct LocalTypePair {
{kLocalF32, kAstF32},
{kLocalF64, kAstF64}};
-class WasmModuleVerifyTest : public TestWithZone {
+class WasmModuleVerifyTest : public TestWithIsolateAndZone {
public:
ModuleResult DecodeModule(const byte* module_start, const byte* module_end) {
// Add the WASM magic and version number automatically.
@@ -76,14 +135,14 @@ class WasmModuleVerifyTest : public TestWithZone {
auto temp = new byte[total];
memcpy(temp, header, sizeof(header));
memcpy(temp + sizeof(header), module_start, size);
- ModuleResult result = DecodeWasmModule(nullptr, zone(), temp, temp + total,
- false, kWasmOrigin);
+ ModuleResult result = DecodeWasmModule(isolate(), zone(), temp,
+ temp + total, false, kWasmOrigin);
delete[] temp;
return result;
}
ModuleResult DecodeModuleNoHeader(const byte* module_start,
const byte* module_end) {
- return DecodeWasmModule(nullptr, zone(), module_start, module_end, false,
+ return DecodeWasmModule(isolate(), zone(), module_start, module_end, false,
kWasmOrigin);
}
};
@@ -115,26 +174,26 @@ TEST_F(WasmModuleVerifyTest, DecodeEmpty) {
TEST_F(WasmModuleVerifyTest, OneGlobal) {
static const byte data[] = {
- SECTION(GLOBALS, 7), // --
+ SECTION(GLOBALS, 5), // --
1,
NAME_LENGTH(1),
- 'g', // name
- kMemI32, // memory type
- 0, // exported
+ 'g', // name
+ kLocalI32, // local type
+ 0, // exported
};
{
// Should decode to exactly one global.
ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
+ EXPECT_OK(result);
EXPECT_EQ(1, result.val->globals.size());
EXPECT_EQ(0, result.val->functions.size());
EXPECT_EQ(0, result.val->data_segments.size());
- WasmGlobal* global = &result.val->globals.back();
+ const WasmGlobal* global = &result.val->globals.back();
EXPECT_EQ(1, global->name_length);
- EXPECT_EQ(MachineType::Int32(), global->type);
+ EXPECT_EQ(kAstI32, global->type);
EXPECT_EQ(0, global->offset);
EXPECT_FALSE(global->exported);
@@ -144,6 +203,20 @@ TEST_F(WasmModuleVerifyTest, OneGlobal) {
EXPECT_OFF_END_FAILURE(data, 1, sizeof(data));
}
+TEST_F(WasmModuleVerifyTest, Global_invalid_type) {
+ static const byte data[] = {
+ SECTION(GLOBALS, 5), // --
+ 1,
+ NAME_LENGTH(1),
+ 'g', // name
+ 64, // invalid memory type
+ 0, // exported
+ };
+
+ ModuleResult result = DecodeModuleNoHeader(data, data + sizeof(data));
+ EXPECT_FALSE(result.ok());
+ if (result.val) delete result.val;
+}
TEST_F(WasmModuleVerifyTest, ZeroGlobals) {
static const byte data[] = {
@@ -151,11 +224,10 @@ TEST_F(WasmModuleVerifyTest, ZeroGlobals) {
0, // declare 0 globals
};
ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
+ EXPECT_OK(result);
if (result.val) delete result.val;
}
-
static void AppendUint32v(std::vector<byte>& buffer, uint32_t val) {
while (true) {
uint32_t next = val >> 7;
@@ -170,19 +242,17 @@ static void AppendUint32v(std::vector<byte>& buffer, uint32_t val) {
}
}
-
TEST_F(WasmModuleVerifyTest, NGlobals) {
static const byte data[] = {
- NO_NAME, // name length
- kMemI32, // memory type
- 0, // exported
+ NO_NAME, // name length
+ kLocalF32, // memory type
+ 0, // exported
};
for (uint32_t i = 0; i < 1000000; i = i * 13 + 1) {
std::vector<byte> buffer;
- size_t size =
- WASM_SECTION_GLOBALS_SIZE + SizeOfVarInt(i) + i * sizeof(data);
- const byte globals[] = {U32V_5(size), WASM_SECTION_GLOBALS};
+ size_t size = SizeOfVarInt(i) + i * sizeof(data);
+ const byte globals[] = {WASM_SECTION_GLOBALS, U32V_5(size)};
for (size_t g = 0; g != sizeof(globals); ++g) {
buffer.push_back(globals[g]);
}
@@ -192,7 +262,7 @@ TEST_F(WasmModuleVerifyTest, NGlobals) {
}
ModuleResult result = DecodeModule(&buffer[0], &buffer[0] + buffer.size());
- EXPECT_TRUE(result.ok());
+ EXPECT_OK(result);
if (result.val) delete result.val;
}
}
@@ -221,38 +291,37 @@ TEST_F(WasmModuleVerifyTest, GlobalWithInvalidMemoryType) {
EXPECT_FAILURE(data);
}
-
TEST_F(WasmModuleVerifyTest, TwoGlobals) {
static const byte data[] = {
- SECTION(GLOBALS, 13),
+ SECTION(GLOBALS, 7),
2,
- NO_NAME, // #0: name length
- kMemF32, // memory type
- 0, // exported
- NO_NAME, // #1: name length
- kMemF64, // memory type
- 1, // exported
+ NO_NAME, // #0: name length
+ kLocalF32, // type
+ 0, // exported
+ NO_NAME, // #1: name length
+ kLocalF64, // type
+ 1, // exported
};
{
// Should decode to exactly two globals.
ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
+ EXPECT_OK(result);
EXPECT_EQ(2, result.val->globals.size());
EXPECT_EQ(0, result.val->functions.size());
EXPECT_EQ(0, result.val->data_segments.size());
- WasmGlobal* g0 = &result.val->globals[0];
- WasmGlobal* g1 = &result.val->globals[1];
+ const WasmGlobal* g0 = &result.val->globals[0];
+ const WasmGlobal* g1 = &result.val->globals[1];
EXPECT_EQ(0, g0->name_length);
- EXPECT_EQ(MachineType::Float32(), g0->type);
+ EXPECT_EQ(kAstF32, g0->type);
EXPECT_EQ(0, g0->offset);
EXPECT_FALSE(g0->exported);
EXPECT_EQ(0, g1->name_length);
- EXPECT_EQ(MachineType::Float64(), g1->type);
- EXPECT_EQ(0, g1->offset);
+ EXPECT_EQ(kAstF64, g1->type);
+ EXPECT_EQ(8, g1->offset);
EXPECT_TRUE(g1->exported);
if (result.val) delete result.val;
@@ -261,39 +330,31 @@ TEST_F(WasmModuleVerifyTest, TwoGlobals) {
EXPECT_OFF_END_FAILURE(data, 1, sizeof(data));
}
-
TEST_F(WasmModuleVerifyTest, OneSignature) {
{
- static const byte data[] = {SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE), 1,
- VOID_VOID_SIG};
+ static const byte data[] = {SIGNATURES_SECTION_VOID_VOID};
EXPECT_VERIFIES(data);
}
{
- static const byte data[] = {SECTION(SIGNATURES, 1 + INT_INT_SIG_SIZE), 1,
- INT_INT_SIG};
+ static const byte data[] = {SECTION(SIGNATURES, 1 + SIZEOF_SIG_ENTRY_x_x),
+ 1, SIG_ENTRY_i_i};
EXPECT_VERIFIES(data);
}
}
-
TEST_F(WasmModuleVerifyTest, MultipleSignatures) {
static const byte data[] = {
- SECTION(SIGNATURES, 10),
- 3,
- 0,
- kLocalVoid, // void -> void
- 1,
- kLocalI32,
- kLocalF32, // f32 -> i32
- 2,
- kLocalI32,
- kLocalF64,
- kLocalF64, // (f64,f64) -> i32
+ SECTION(SIGNATURES, 1 + SIZEOF_SIG_ENTRY_v_v + SIZEOF_SIG_ENTRY_x_x +
+ SIZEOF_SIG_ENTRY_x_xx), // --
+ 3, // --
+ SIG_ENTRY_v_v, // void -> void
+ SIG_ENTRY_x_x(kLocalI32, kLocalF32), // f32 -> i32
+ SIG_ENTRY_x_xx(kLocalI32, kLocalF64, kLocalF64), // f64,f64 -> i32
};
ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
+ EXPECT_OK(result);
EXPECT_EQ(3, result.val->signatures.size());
if (result.val->signatures.size() == 3) {
EXPECT_EQ(0, result.val->signatures[0]->return_count());
@@ -309,262 +370,8 @@ TEST_F(WasmModuleVerifyTest, MultipleSignatures) {
EXPECT_OFF_END_FAILURE(data, 1, sizeof(data));
}
-
-TEST_F(WasmModuleVerifyTest, FunctionWithoutSig) {
- static const byte data[] = {
- SECTION(FUNCTIONS, 25), 1,
- // func#0 ------------------------------------------------------
- SIG_INDEX(0), // signature index
- NO_NAME, // name length
- U32_LE(0), // code start offset
- U32_LE(0), // code end offset
- U16_LE(899), // local int32 count
- U16_LE(799), // local int64 count
- U16_LE(699), // local float32 count
- U16_LE(599), // local float64 count
- 0, // exported
- 1 // external
- };
-
- ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_FALSE(result.ok());
- if (result.val) delete result.val;
-}
-
-
-TEST_F(WasmModuleVerifyTest, OneEmptyVoidVoidFunction) {
- const int kCodeStartOffset = 51;
- const int kCodeEndOffset = kCodeStartOffset + 1;
-
- static const byte data[] = {
- SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE), 1,
- // sig#0 -------------------------------------------------------
- VOID_VOID_SIG,
- // func#0 ------------------------------------------------------
- SECTION(FUNCTIONS, 19), 1,
- kDeclFunctionLocals | kDeclFunctionExport | kDeclFunctionName,
- SIG_INDEX(0), // signature index
- NAME_LENGTH(2), 'h', 'i', // name
- U16_LE(1466), // local int32 count
- U16_LE(1355), // local int64 count
- U16_LE(1244), // local float32 count
- U16_LE(1133), // local float64 count
- 1, 0, // size
- kExprNop,
- };
-
- {
- // Should decode to exactly one function.
- ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
- EXPECT_EQ(0, result.val->globals.size());
- EXPECT_EQ(1, result.val->signatures.size());
- EXPECT_EQ(1, result.val->functions.size());
- EXPECT_EQ(0, result.val->data_segments.size());
- EXPECT_EQ(0, result.val->function_table.size());
-
- WasmFunction* function = &result.val->functions.back();
-
- EXPECT_EQ(39, function->name_offset);
- EXPECT_EQ(2, function->name_length);
- EXPECT_EQ(kCodeStartOffset, function->code_start_offset);
- EXPECT_EQ(kCodeEndOffset, function->code_end_offset);
-
- EXPECT_EQ(1466, function->local_i32_count);
- EXPECT_EQ(1355, function->local_i64_count);
- EXPECT_EQ(1244, function->local_f32_count);
- EXPECT_EQ(1133, function->local_f64_count);
-
- EXPECT_TRUE(function->exported);
- EXPECT_FALSE(function->external);
-
- if (result.val) delete result.val;
- }
-
- EXPECT_OFF_END_FAILURE(data, 16, sizeof(data));
-}
-
-
-TEST_F(WasmModuleVerifyTest, OneFunctionImported) {
- static const byte data[] = {
- SECTION(SIGNATURES, VOID_VOID_SIG_SIZE), 1,
- // sig#0 -------------------------------------------------------
- VOID_VOID_SIG, SECTION(FUNCTIONS, 6), 1,
- // func#0 ------------------------------------------------------
- kDeclFunctionImport, // no name, no locals, imported
- SIG_INDEX(0),
- };
-
- ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
- EXPECT_EQ(1, result.val->functions.size());
- WasmFunction* function = &result.val->functions.back();
-
- EXPECT_EQ(0, function->name_length);
- EXPECT_EQ(0, function->code_start_offset);
- EXPECT_EQ(0, function->code_end_offset);
-
- EXPECT_EQ(0, function->local_i32_count);
- EXPECT_EQ(0, function->local_i64_count);
- EXPECT_EQ(0, function->local_f32_count);
- EXPECT_EQ(0, function->local_f64_count);
-
- EXPECT_FALSE(function->exported);
- EXPECT_TRUE(function->external);
-
- if (result.val) delete result.val;
-}
-
-TEST_F(WasmModuleVerifyTest, OneFunctionWithNopBody) {
- static const byte kCodeStartOffset = 40;
- static const byte kCodeEndOffset = kCodeStartOffset + 1;
-
- static const byte data[] = {
- SECTION(SIGNATURES, 3), 1,
- // sig#0 -------------------------------------------------------
- 0, 0, // void -> void
- SECTION(FUNCTIONS, 7), 1,
- // func#0 ------------------------------------------------------
- 0, // no name, no locals
- 0, 0, // signature index
- 1, 0, // body size
- kExprNop // body
- };
-
- ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
- EXPECT_EQ(1, result.val->functions.size());
- WasmFunction* function = &result.val->functions.back();
-
- EXPECT_EQ(0, function->name_length);
- EXPECT_EQ(kCodeStartOffset, function->code_start_offset);
- EXPECT_EQ(kCodeEndOffset, function->code_end_offset);
-
- EXPECT_EQ(0, function->local_i32_count);
- EXPECT_EQ(0, function->local_i64_count);
- EXPECT_EQ(0, function->local_f32_count);
- EXPECT_EQ(0, function->local_f64_count);
-
- EXPECT_FALSE(function->exported);
- EXPECT_FALSE(function->external);
-
- if (result.val) delete result.val;
-}
-
-
-TEST_F(WasmModuleVerifyTest, OneFunctionWithNopBody_WithLocals) {
- static const byte kCodeStartOffset = 48;
- static const byte kCodeEndOffset = kCodeStartOffset + 1;
-
- static const byte data[] = {
- SECTION(SIGNATURES, 3), 1,
- // sig#0 -------------------------------------------------------
- 0, 0, // void -> void
- SECTION(FUNCTIONS, 15), 1,
- // func#0 ------------------------------------------------------
- kDeclFunctionLocals, 0, 0, // signature index
- 1, 2, // local int32 count
- 3, 4, // local int64 count
- 5, 6, // local float32 count
- 7, 8, // local float64 count
- 1, 0, // body size
- kExprNop // body
- };
-
- ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
- EXPECT_EQ(1, result.val->functions.size());
- WasmFunction* function = &result.val->functions.back();
-
- EXPECT_EQ(0, function->name_length);
- EXPECT_EQ(kCodeStartOffset, function->code_start_offset);
- EXPECT_EQ(kCodeEndOffset, function->code_end_offset);
-
- EXPECT_EQ(513, function->local_i32_count);
- EXPECT_EQ(1027, function->local_i64_count);
- EXPECT_EQ(1541, function->local_f32_count);
- EXPECT_EQ(2055, function->local_f64_count);
-
- EXPECT_FALSE(function->exported);
- EXPECT_FALSE(function->external);
-
- if (result.val) delete result.val;
-}
-
-
-TEST_F(WasmModuleVerifyTest, OneGlobalOneFunctionWithNopBodyOneDataSegment) {
- static const byte kCodeStartOffset = 75;
- static const byte kCodeEndOffset = kCodeStartOffset + 3;
- static const byte kDataSegmentSourceOffset = kCodeEndOffset + 20;
-
- static const byte data[] = {
- SECTION(MEMORY, 3), 28, 28, 1,
- // global#0 --------------------------------------------------
- SECTION(GLOBALS, 7), 1,
- 0, // name length
- kMemU8, // memory type
- 0, // exported
- // sig#0 -----------------------------------------------------
- SECTION(SIGNATURES, 3), 1, 0, 0, // void -> void
- // func#0 ----------------------------------------------------
- SECTION(FUNCTIONS, 20), 1, kDeclFunctionLocals | kDeclFunctionName, 0,
- 0, // signature index
- 2, 'h', 'i', // name
- 1, 2, // local int32 count
- 3, 4, // local int64 count
- 5, 6, // local float32 count
- 7, 8, // local float64 count
- 3, 0, // body size
- kExprNop, // func#0 body
- kExprNop, // func#0 body
- kExprNop, // func#0 body
- // segment#0 -------------------------------------------------
- SECTION(DATA_SEGMENTS, 14), 1,
- U32V_3(0x8b3ae), // dest addr
- U32V_1(5), // source size
- 0, 1, 2, 3, 4, // data bytes
- // rest ------------------------------------------------------
- SECTION(END, 0),
- };
-
- {
- ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
- EXPECT_EQ(1, result.val->globals.size());
- EXPECT_EQ(1, result.val->functions.size());
- EXPECT_EQ(1, result.val->data_segments.size());
-
- WasmGlobal* global = &result.val->globals.back();
-
- EXPECT_EQ(0, global->name_length);
- EXPECT_EQ(MachineType::Uint8(), global->type);
- EXPECT_EQ(0, global->offset);
- EXPECT_FALSE(global->exported);
-
- WasmFunction* function = &result.val->functions.back();
-
- EXPECT_EQ(63, function->name_offset);
- EXPECT_EQ(2, function->name_length);
- EXPECT_EQ(kCodeStartOffset, function->code_start_offset);
- EXPECT_EQ(kCodeEndOffset, function->code_end_offset);
-
- EXPECT_FALSE(function->exported);
- EXPECT_FALSE(function->external);
-
- WasmDataSegment* segment = &result.val->data_segments.back();
-
- EXPECT_EQ(0x8b3ae, segment->dest_addr);
- EXPECT_EQ(kDataSegmentSourceOffset, segment->source_offset);
- EXPECT_EQ(5, segment->source_size);
- EXPECT_TRUE(segment->init);
-
- if (result.val) delete result.val;
- }
-}
-
-
TEST_F(WasmModuleVerifyTest, OneDataSegment) {
- const byte kDataSegmentSourceOffset = 39;
+ const byte kDataSegmentSourceOffset = 30;
const byte data[] = {
SECTION(MEMORY, 3),
28,
@@ -582,12 +389,12 @@ TEST_F(WasmModuleVerifyTest, OneDataSegment) {
{
EXPECT_VERIFIES(data);
ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
+ EXPECT_OK(result);
EXPECT_EQ(0, result.val->globals.size());
EXPECT_EQ(0, result.val->functions.size());
EXPECT_EQ(1, result.val->data_segments.size());
- WasmDataSegment* segment = &result.val->data_segments.back();
+ const WasmDataSegment* segment = &result.val->data_segments.back();
EXPECT_EQ(0x9bbaa, segment->dest_addr);
EXPECT_EQ(kDataSegmentSourceOffset, segment->source_offset);
@@ -600,17 +407,16 @@ TEST_F(WasmModuleVerifyTest, OneDataSegment) {
EXPECT_OFF_END_FAILURE(data, 13, sizeof(data));
}
-
TEST_F(WasmModuleVerifyTest, TwoDataSegments) {
- const byte kDataSegment0SourceOffset = 39;
- const byte kDataSegment1SourceOffset = 39 + 8;
+ const byte kDataSegment0SourceOffset = 30;
+ const byte kDataSegment1SourceOffset = 30 + 8;
const byte data[] = {
SECTION(MEMORY, 3),
28,
28,
1,
- SECTION(DATA_SEGMENTS, 31),
+ SECTION(DATA_SEGMENTS, 23),
2, // segment count
U32V_3(0x7ffee), // #0: dest addr
U32V_1(4), // source size
@@ -634,13 +440,13 @@ TEST_F(WasmModuleVerifyTest, TwoDataSegments) {
{
ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
+ EXPECT_OK(result);
EXPECT_EQ(0, result.val->globals.size());
EXPECT_EQ(0, result.val->functions.size());
EXPECT_EQ(2, result.val->data_segments.size());
- WasmDataSegment* s0 = &result.val->data_segments[0];
- WasmDataSegment* s1 = &result.val->data_segments[1];
+ const WasmDataSegment* s0 = &result.val->data_segments[0];
+ const WasmDataSegment* s1 = &result.val->data_segments[1];
EXPECT_EQ(0x7ffee, s0->dest_addr);
EXPECT_EQ(kDataSegment0SourceOffset, s0->source_offset);
@@ -670,7 +476,7 @@ TEST_F(WasmModuleVerifyTest, DataSegmentWithInvalidDest) {
mem_pages,
mem_pages,
1,
- SECTION(DATA_SEGMENTS, 14),
+ SECTION(DATA_SEGMENTS, 8),
1,
U32V_3(dest_addr),
U32V_1(source_size),
@@ -687,41 +493,36 @@ TEST_F(WasmModuleVerifyTest, DataSegmentWithInvalidDest) {
}
}
-
-// To make below tests for indirect calls much shorter.
-#define FUNCTION(sig_index, external) kDeclFunctionImport, SIG_INDEX(sig_index)
-
TEST_F(WasmModuleVerifyTest, OneIndirectFunction) {
static const byte data[] = {
// sig#0 -------------------------------------------------------
- SECTION(SIGNATURES, 3), 1, 0, 0, // void -> void
- // func#0 ------------------------------------------------------
- SECTION(FUNCTIONS, 4), 1, FUNCTION(0, 0),
+ SIGNATURES_SECTION_VOID_VOID,
+ // funcs ------------------------------------------------------
+ ONE_EMPTY_FUNCTION,
// indirect table ----------------------------------------------
SECTION(FUNCTION_TABLE, 2), 1, U32V_1(0)};
ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
+ EXPECT_OK(result);
if (result.ok()) {
EXPECT_EQ(1, result.val->signatures.size());
EXPECT_EQ(1, result.val->functions.size());
- EXPECT_EQ(1, result.val->function_table.size());
- EXPECT_EQ(0, result.val->function_table[0]);
+ EXPECT_EQ(1, result.val->function_tables.size());
+ EXPECT_EQ(1, result.val->function_tables[0].values.size());
+ EXPECT_EQ(0, result.val->function_tables[0].values[0]);
}
if (result.val) delete result.val;
}
-
TEST_F(WasmModuleVerifyTest, MultipleIndirectFunctions) {
static const byte data[] = {
// sig#0 -------------------------------------------------------
- SECTION(SIGNATURES, 5), 2, 0, 0, // void -> void
- 0, kLocalI32, // void -> i32
- // func#0 ------------------------------------------------------
- SECTION(FUNCTIONS, 13), 4, FUNCTION(0, 1), // --
- FUNCTION(1, 1), // --
- FUNCTION(0, 1), // --
- FUNCTION(1, 1), // --
+ SECTION(SIGNATURES, 1 + SIZEOF_SIG_ENTRY_v_v + SIZEOF_SIG_ENTRY_v_x),
+ 2, // --
+ SIG_ENTRY_v_v, // void -> void
+ SIG_ENTRY_v_x(kLocalI32), // void -> i32
+ // funcs ------------------------------------------------------
+ FOUR_EMPTY_FUNCTIONS,
// indirect table ----------------------------------------------
SECTION(FUNCTION_TABLE, 9), 8,
U32V_1(0), // --
@@ -732,26 +533,26 @@ TEST_F(WasmModuleVerifyTest, MultipleIndirectFunctions) {
U32V_1(1), // --
U32V_1(2), // --
U32V_1(3), // --
- };
+ FOUR_EMPTY_BODIES};
ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
+ EXPECT_OK(result);
if (result.ok()) {
EXPECT_EQ(2, result.val->signatures.size());
EXPECT_EQ(4, result.val->functions.size());
- EXPECT_EQ(8, result.val->function_table.size());
+ EXPECT_EQ(1, result.val->function_tables.size());
+ EXPECT_EQ(8, result.val->function_tables[0].values.size());
for (int i = 0; i < 8; i++) {
- EXPECT_EQ(i & 3, result.val->function_table[i]);
+ EXPECT_EQ(i & 3, result.val->function_tables[0].values[i]);
}
}
if (result.val) delete result.val;
}
-
TEST_F(WasmModuleVerifyTest, IndirectFunctionNoFunctions) {
static const byte data[] = {
// sig#0 -------------------------------------------------------
- SECTION(SIGNATURES, 3), 1, 0, 0, // void -> void
+ SIGNATURES_SECTION_VOID_VOID,
// indirect table ----------------------------------------------
SECTION(FUNCTION_TABLE, 3), 1, 0, 0,
};
@@ -759,13 +560,12 @@ TEST_F(WasmModuleVerifyTest, IndirectFunctionNoFunctions) {
EXPECT_FAILURE(data);
}
-
TEST_F(WasmModuleVerifyTest, IndirectFunctionInvalidIndex) {
static const byte data[] = {
// sig#0 -------------------------------------------------------
- SECTION(SIGNATURES, 3), 1, 0, 0, // void -> void
+ SIGNATURES_SECTION_VOID_VOID,
// functions ---------------------------------------------------
- SECTION(FUNCTIONS, 4), 1, FUNCTION(0, 1),
+ ONE_EMPTY_FUNCTION,
// indirect table ----------------------------------------------
SECTION(FUNCTION_TABLE, 3), 1, 1, 0,
};
@@ -773,12 +573,10 @@ TEST_F(WasmModuleVerifyTest, IndirectFunctionInvalidIndex) {
EXPECT_FAILURE(data);
}
-
class WasmSignatureDecodeTest : public TestWithZone {};
-
TEST_F(WasmSignatureDecodeTest, Ok_v_v) {
- static const byte data[] = {0, 0};
+ static const byte data[] = {SIG_ENTRY_v_v};
base::AccountingAllocator allocator;
Zone zone(&allocator);
FunctionSig* sig =
@@ -789,11 +587,10 @@ TEST_F(WasmSignatureDecodeTest, Ok_v_v) {
EXPECT_EQ(0, sig->return_count());
}
-
TEST_F(WasmSignatureDecodeTest, Ok_t_v) {
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
LocalTypePair ret_type = kLocalTypes[i];
- const byte data[] = {0, ret_type.code};
+ const byte data[] = {SIG_ENTRY_x(ret_type.code)};
FunctionSig* sig =
DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
@@ -804,11 +601,10 @@ TEST_F(WasmSignatureDecodeTest, Ok_t_v) {
}
}
-
TEST_F(WasmSignatureDecodeTest, Ok_v_t) {
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
LocalTypePair param_type = kLocalTypes[i];
- const byte data[] = {1, 0, param_type.code};
+ const byte data[] = {SIG_ENTRY_v_x(param_type.code)};
FunctionSig* sig =
DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
@@ -819,15 +615,12 @@ TEST_F(WasmSignatureDecodeTest, Ok_v_t) {
}
}
-
TEST_F(WasmSignatureDecodeTest, Ok_t_t) {
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
LocalTypePair ret_type = kLocalTypes[i];
for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
LocalTypePair param_type = kLocalTypes[j];
- const byte data[] = {1, // param count
- ret_type.code, // ret
- param_type.code}; // param
+ const byte data[] = {SIG_ENTRY_x_x(ret_type.code, param_type.code)};
FunctionSig* sig =
DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
@@ -840,16 +633,13 @@ TEST_F(WasmSignatureDecodeTest, Ok_t_t) {
}
}
-
TEST_F(WasmSignatureDecodeTest, Ok_i_tt) {
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
LocalTypePair p0_type = kLocalTypes[i];
for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
LocalTypePair p1_type = kLocalTypes[j];
- const byte data[] = {2, // param count
- kLocalI32, // ret
- p0_type.code, // p0
- p1_type.code}; // p1
+ const byte data[] = {
+ SIG_ENTRY_x_xx(kLocalI32, p0_type.code, p1_type.code)};
FunctionSig* sig =
DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
@@ -862,7 +652,6 @@ TEST_F(WasmSignatureDecodeTest, Ok_i_tt) {
}
}
-
TEST_F(WasmSignatureDecodeTest, Fail_off_end) {
byte data[256];
for (int p = 0; p <= 255; p = p + 1 + p * 3) {
@@ -877,11 +666,10 @@ TEST_F(WasmSignatureDecodeTest, Fail_off_end) {
}
}
-
TEST_F(WasmSignatureDecodeTest, Fail_invalid_type) {
byte kInvalidType = 76;
- for (int i = 1; i < 3; i++) {
- byte data[] = {2, kLocalI32, kLocalI32, kLocalI32};
+ for (size_t i = 0; i < SIZEOF_SIG_ENTRY_x_xx; i++) {
+ byte data[] = {SIG_ENTRY_x_xx(kLocalI32, kLocalI32, kLocalI32)};
data[i] = kInvalidType;
FunctionSig* sig =
DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
@@ -889,47 +677,56 @@ TEST_F(WasmSignatureDecodeTest, Fail_invalid_type) {
}
}
-
-TEST_F(WasmSignatureDecodeTest, Fail_invalid_param_type) {
- static const int kParamCount = 3;
- for (int i = 0; i < kParamCount; i++) {
- byte data[] = {kParamCount, kLocalI32, kLocalI32, kLocalI32, kLocalI32};
- data[i + 2] = kLocalVoid;
- FunctionSig* sig =
- DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
- EXPECT_EQ(nullptr, sig);
- }
+TEST_F(WasmSignatureDecodeTest, Fail_invalid_ret_type1) {
+ static const byte data[] = {SIG_ENTRY_x_x(kLocalVoid, kLocalI32)};
+ FunctionSig* sig =
+ DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
+ EXPECT_EQ(nullptr, sig);
}
+TEST_F(WasmSignatureDecodeTest, Fail_invalid_param_type1) {
+ static const byte data[] = {SIG_ENTRY_x_x(kLocalI32, kLocalVoid)};
+ FunctionSig* sig =
+ DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
+ EXPECT_EQ(nullptr, sig);
+}
-class WasmFunctionVerifyTest : public TestWithZone {};
+TEST_F(WasmSignatureDecodeTest, Fail_invalid_param_type2) {
+ static const byte data[] = {SIG_ENTRY_x_xx(kLocalI32, kLocalI32, kLocalVoid)};
+ FunctionSig* sig =
+ DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
+ EXPECT_EQ(nullptr, sig);
+}
+class WasmFunctionVerifyTest : public TestWithIsolateAndZone {};
TEST_F(WasmFunctionVerifyTest, Ok_v_v_empty) {
static const byte data[] = {
- 0, kLocalVoid, // signature
- 4, // locals
- 3, kLocalI32, // --
- 4, kLocalI64, // --
- 5, kLocalF32, // --
- 6, kLocalF64, // --
- kExprNop // body
+ SIG_ENTRY_v_v, // signature entry
+ 4, // locals
+ 3,
+ kLocalI32, // --
+ 4,
+ kLocalI64, // --
+ 5,
+ kLocalF32, // --
+ 6,
+ kLocalF64, // --
+ kExprNop // body
};
- FunctionResult result = DecodeWasmFunction(nullptr, zone(), nullptr, data,
+ FunctionResult result = DecodeWasmFunction(isolate(), zone(), nullptr, data,
data + arraysize(data));
- EXPECT_TRUE(result.ok());
+ EXPECT_OK(result);
if (result.val && result.ok()) {
WasmFunction* function = result.val;
EXPECT_EQ(0, function->sig->parameter_count());
EXPECT_EQ(0, function->sig->return_count());
EXPECT_EQ(0, function->name_offset);
- EXPECT_EQ(2, function->code_start_offset);
+ EXPECT_EQ(SIZEOF_SIG_ENTRY_v_v, function->code_start_offset);
EXPECT_EQ(arraysize(data), function->code_end_offset);
// TODO(titzer): verify encoding of local declarations
- EXPECT_FALSE(function->external);
- EXPECT_FALSE(function->exported);
}
if (result.val) delete result.val;
@@ -942,47 +739,51 @@ TEST_F(WasmModuleVerifyTest, SectionWithoutNameLength) {
TEST_F(WasmModuleVerifyTest, TheLoneliestOfValidModulesTheTrulyEmptyOne) {
const byte data[] = {
- 1, // Section size.
0, // Empty section name.
// No section name, no content, nothing but sadness.
+ 0, // No section content.
};
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, OnlyUnknownSectionEmpty) {
const byte data[] = {
- 5, // Section size.
- 4, 'l', 'u', 'l', 'z', // unknown section.
+ UNKNOWN_SECTION_NAME, 0,
};
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, OnlyUnknownSectionNonEmpty) {
const byte data[] = {
- 10, // Section size.
- 4, 'l', 'u', 'l', 'z', // unknown section.
- // Section content:
- 0xff, 0xff, 0xff, 0xff, 0xff,
+ UNKNOWN_SECTION_NAME,
+ 5, // section size
+ 0xff,
+ 0xff,
+ 0xff,
+ 0xff,
+ 0xff, // section data
};
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, SignatureFollowedByEmptyUnknownSection) {
const byte data[] = {
- SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE), 1, VOID_VOID_SIG,
+ // signatures
+ SIGNATURES_SECTION_VOID_VOID,
// -----------------------------------------------------------
- 5, // Section size.
- 4, 'l', 'u', 'l', 'z', // unknown section.
+ UNKNOWN_SECTION_NAME,
+ 0 // empty section
};
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, SignatureFollowedByUnknownSection) {
const byte data[] = {
- SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE), 1, VOID_VOID_SIG,
+ // signatures
+ SIGNATURES_SECTION_VOID_VOID,
// -----------------------------------------------------------
- 10, // Section size.
- 4, 'l', 'u', 'l', 'z', // unknown section.
+ UNKNOWN_SECTION_NAME,
+ 5, // section size
0xff, 0xff, 0xff, 0xff, 0xff,
};
EXPECT_VERIFIES(data);
@@ -990,29 +791,46 @@ TEST_F(WasmModuleVerifyTest, SignatureFollowedByUnknownSection) {
TEST_F(WasmModuleVerifyTest, SignatureFollowedByUnknownSectionWithLongLEB) {
const byte data[] = {
- SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE), 1, VOID_VOID_SIG,
+ // signatures
+ SIGNATURES_SECTION_VOID_VOID,
// -----------------------------------------------------------
- 0x85, 0x80, 0x80, 0x80, 0x00, // Section size: 1 but in a 5-byte LEB.
- 4, 'l', 'u', 'l', 'z', // unknown section.
+ UNKNOWN_SECTION_NAME, 0x81, 0x80, 0x80, 0x80,
+ 0x00, // section size: 1 but in a 5-byte LEB
+ 0,
};
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, UnknownSectionOverflow) {
static const byte data[] = {
- 13, // Section size.
- 1, // Section name length.
- '\0', // Section name.
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // 10 byte section
+ UNKNOWN_EMPTY_SECTION_NAME,
+ 9, // section size
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ 10, // 10 byte section
};
EXPECT_FAILURE(data);
}
TEST_F(WasmModuleVerifyTest, UnknownSectionUnderflow) {
static const byte data[] = {
- 0xff, 0xff, 0xff, 0xff, 0x0f, // Section size LEB128 0xffffffff
- 1, '\0', // Section name and name length.
- 1, 2, 3, 4, // 4 byte section
+ UNKNOWN_EMPTY_SECTION_NAME,
+ 0xff,
+ 0xff,
+ 0xff,
+ 0xff,
+ 0x0f, // Section size LEB128 0xffffffff
+ 1,
+ 2,
+ 3,
+ 4, // 4 byte section
};
EXPECT_FAILURE(data);
}
@@ -1020,36 +838,42 @@ TEST_F(WasmModuleVerifyTest, UnknownSectionUnderflow) {
TEST_F(WasmModuleVerifyTest, UnknownSectionLoop) {
// Would infinite loop decoding if wrapping and allowed.
static const byte data[] = {
- 0xfa, 0xff, 0xff, 0xff, 0x0f, // Section size LEB128 0xfffffffa
- 1, '\0', // Section name and name length.
- 1, 2, 3, 4, // 4 byte section
+ UNKNOWN_EMPTY_SECTION_NAME,
+ 1,
+ 2,
+ 3,
+ 4, // 4 byte section
+ 0xfa,
+ 0xff,
+ 0xff,
+ 0xff,
+ 0x0f, // Section size LEB128 0xfffffffa
};
EXPECT_FAILURE(data);
}
TEST_F(WasmModuleVerifyTest, UnknownSectionSkipped) {
static const byte data[] = {
- 3, // Section size.
+ UNKNOWN_EMPTY_SECTION_NAME,
+ 1, // section size
+ 0, // one byte section
+ SECTION(GLOBALS, 4),
1,
- '\0', // Section name: LEB128 1, string '\0'
- 0, // one byte section
- SECTION(GLOBALS, 7),
- 1,
- 0, // name length
- kMemI32, // memory type
- 0, // exported
+ 0, // name length
+ kLocalI32, // memory type
+ 0, // exported
};
ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_TRUE(result.ok());
+ EXPECT_OK(result);
EXPECT_EQ(1, result.val->globals.size());
EXPECT_EQ(0, result.val->functions.size());
EXPECT_EQ(0, result.val->data_segments.size());
- WasmGlobal* global = &result.val->globals.back();
+ const WasmGlobal* global = &result.val->globals.back();
EXPECT_EQ(0, global->name_length);
- EXPECT_EQ(MachineType::Int32(), global->type);
+ EXPECT_EQ(kAstI32, global->type);
EXPECT_EQ(0, global->offset);
EXPECT_FALSE(global->exported);
@@ -1062,26 +886,36 @@ TEST_F(WasmModuleVerifyTest, ImportTable_empty) {
EXPECT_VERIFIES(data);
}
-TEST_F(WasmModuleVerifyTest, ImportTable_nosigs) {
+TEST_F(WasmModuleVerifyTest, ImportTable_nosigs1) {
static const byte data[] = {SECTION(IMPORT_TABLE, 1), 0};
+ EXPECT_VERIFIES(data);
+}
+
+TEST_F(WasmModuleVerifyTest, ImportTable_nosigs2) {
+ static const byte data[] = {
+ SECTION(IMPORT_TABLE, 6), 1, // sig table
+ IMPORT_SIG_INDEX(0), // sig index
+ NAME_LENGTH(1), 'm', // module name
+ NAME_LENGTH(1), 'f', // function name
+ };
EXPECT_FAILURE(data);
}
TEST_F(WasmModuleVerifyTest, ImportTable_invalid_sig) {
static const byte data[] = {
- SECTION(SIGNATURES, 1), 0, SECTION(IMPORT_TABLE, 6), 1,
- IMPORT_SIG_INDEX(0), // sig index
- NAME_LENGTH(1), 'm', // module name
- NAME_LENGTH(1), 'f', // function name
+ SECTION(SIGNATURES, 1), 0, // --
+ SECTION(IMPORT_TABLE, 6), 1, // --
+ IMPORT_SIG_INDEX(0), // sig index
+ NAME_LENGTH(1), 'm', // module name
+ NAME_LENGTH(1), 'f', // function name
};
EXPECT_FAILURE(data);
}
TEST_F(WasmModuleVerifyTest, ImportTable_one_sig) {
static const byte data[] = {
- SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE),
- 1,
- VOID_VOID_SIG,
+ // signatures
+ SIGNATURES_SECTION_VOID_VOID,
SECTION(IMPORT_TABLE, 6),
1, // --
IMPORT_SIG_INDEX(0), // sig index
@@ -1095,9 +929,8 @@ TEST_F(WasmModuleVerifyTest, ImportTable_one_sig) {
TEST_F(WasmModuleVerifyTest, ImportTable_invalid_module) {
static const byte data[] = {
- SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE),
- 1,
- VOID_VOID_SIG,
+ // signatures
+ SIGNATURES_SECTION_VOID_VOID,
SECTION(IMPORT_TABLE, 6),
1, // --
IMPORT_SIG_INDEX(0), // sig index
@@ -1110,9 +943,8 @@ TEST_F(WasmModuleVerifyTest, ImportTable_invalid_module) {
TEST_F(WasmModuleVerifyTest, ImportTable_off_end) {
static const byte data[] = {
- SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE),
- 1,
- VOID_VOID_SIG,
+ // signatures
+ SIGNATURES_SECTION_VOID_VOID,
SECTION(IMPORT_TABLE, 6),
1,
IMPORT_SIG_INDEX(0), // sig index
@@ -1126,28 +958,33 @@ TEST_F(WasmModuleVerifyTest, ImportTable_off_end) {
}
TEST_F(WasmModuleVerifyTest, ExportTable_empty1) {
- static const byte data[] = {SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE),
- 1,
- VOID_VOID_SIG,
- SECTION(FUNCTIONS, 1 + EMPTY_FUNCTION_SIZE),
- 1,
- EMPTY_FUNCTION(0),
+ static const byte data[] = {// signatures
+ SIGNATURES_SECTION_VOID_VOID, ONE_EMPTY_FUNCTION,
SECTION(EXPORT_TABLE, 1),
- 0};
- EXPECT_VERIFIES(data);
+ 0, // --
+ ONE_EMPTY_BODY};
+
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_OK(result);
+
+ EXPECT_EQ(1, result.val->functions.size());
+ EXPECT_EQ(0, result.val->export_table.size());
+
+ if (result.val) delete result.val;
}
TEST_F(WasmModuleVerifyTest, ExportTable_empty2) {
- static const byte data[] = {SECTION(SIGNATURES, 1), 0,
- SECTION(FUNCTIONS, 1), 0,
- SECTION(EXPORT_TABLE, 1), 0};
+ static const byte data[] = {
+ SECTION(SIGNATURES, 1), 0, SECTION(EXPORT_TABLE, 1), 0 // --
+ };
// TODO(titzer): current behavior treats empty functions section as missing.
EXPECT_FAILURE(data);
}
TEST_F(WasmModuleVerifyTest, ExportTable_NoFunctions1) {
- static const byte data[] = {SECTION(SIGNATURES, 1), 0,
- SECTION(EXPORT_TABLE, 1), 0};
+ static const byte data[] = {
+ SECTION(SIGNATURES, 1), 0, SECTION(EXPORT_TABLE, 1), 0 // --
+ };
EXPECT_FAILURE(data);
}
@@ -1157,89 +994,88 @@ TEST_F(WasmModuleVerifyTest, ExportTable_NoFunctions2) {
}
TEST_F(WasmModuleVerifyTest, ExportTableOne) {
- static const byte data[] = {
- SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE),
- 1, // sigs
- VOID_VOID_SIG, // --
- SECTION(FUNCTIONS, 1 + EMPTY_FUNCTION_SIZE),
- 1, // functions
- EMPTY_FUNCTION(0), // --
- SECTION(EXPORT_TABLE, 7),
- 1, // exports
- FUNC_INDEX(0), // --
- NO_NAME // --
- };
- EXPECT_VERIFIES(data);
+ static const byte data[] = {// signatures
+ SIGNATURES_SECTION_VOID_VOID,
+ ONE_EMPTY_FUNCTION,
+ SECTION(EXPORT_TABLE, 3),
+ 1, // exports
+ FUNC_INDEX(0), // --
+ NO_NAME, // --
+ ONE_EMPTY_BODY};
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_OK(result);
+
+ EXPECT_EQ(1, result.val->functions.size());
+ EXPECT_EQ(1, result.val->export_table.size());
+
+ if (result.val) delete result.val;
}
TEST_F(WasmModuleVerifyTest, ExportTableTwo) {
- static const byte data[] = {
- SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE),
- 1, // sigs
- VOID_VOID_SIG, // --
- SECTION(FUNCTIONS, 1 + EMPTY_FUNCTION_SIZE),
- 1, // functions
- EMPTY_FUNCTION(0), // --
- SECTION(EXPORT_TABLE, 12),
- 2, // exports
- FUNC_INDEX(0), // --
- NAME_LENGTH(4),
- 'n',
- 'a',
- 'm',
- 'e', // --
- FUNC_INDEX(0), // --
- NAME_LENGTH(3),
- 'n',
- 'o',
- 'm' // --
- };
- EXPECT_VERIFIES(data);
+ static const byte data[] = {// signatures
+ SIGNATURES_SECTION_VOID_VOID,
+ ONE_EMPTY_FUNCTION,
+ SECTION(EXPORT_TABLE, 12),
+ 2, // exports
+ FUNC_INDEX(0), // --
+ NAME_LENGTH(4),
+ 'n',
+ 'a',
+ 'm',
+ 'e', // --
+ FUNC_INDEX(0), // --
+ NAME_LENGTH(3),
+ 'n',
+ 'o',
+ 'm', // --
+ ONE_EMPTY_BODY};
+
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_OK(result);
+
+ EXPECT_EQ(1, result.val->functions.size());
+ EXPECT_EQ(2, result.val->export_table.size());
+
+ if (result.val) delete result.val;
}
TEST_F(WasmModuleVerifyTest, ExportTableThree) {
- static const byte data[] = {
- SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE),
- 1, // sigs
- VOID_VOID_SIG, // --
- SECTION(FUNCTIONS, 1 + 3 * EMPTY_FUNCTION_SIZE),
- 3, // functions
- EMPTY_FUNCTION(0), // --
- EMPTY_FUNCTION(0), // --
- EMPTY_FUNCTION(0), // --
- SECTION(EXPORT_TABLE, 10),
- 3, // exports
- FUNC_INDEX(0), // --
- NAME_LENGTH(1),
- 'a', // --
- FUNC_INDEX(1), // --
- NAME_LENGTH(1),
- 'b', // --
- FUNC_INDEX(2), // --
- NAME_LENGTH(1),
- 'c' // --
- };
- EXPECT_VERIFIES(data);
+ static const byte data[] = {// signatures
+ SIGNATURES_SECTION_VOID_VOID,
+ THREE_EMPTY_FUNCTIONS,
+ SECTION(EXPORT_TABLE, 10),
+ 3, // exports
+ FUNC_INDEX(0), // --
+ NAME_LENGTH(1),
+ 'a', // --
+ FUNC_INDEX(1), // --
+ NAME_LENGTH(1),
+ 'b', // --
+ FUNC_INDEX(2), // --
+ NAME_LENGTH(1),
+ 'c', // --
+ THREE_EMPTY_BODIES};
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_OK(result);
+
+ EXPECT_EQ(3, result.val->functions.size());
+ EXPECT_EQ(3, result.val->export_table.size());
+
+ if (result.val) delete result.val;
}
TEST_F(WasmModuleVerifyTest, ExportTableThreeOne) {
for (int i = 0; i < 6; i++) {
- const byte data[] = {
- SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE),
- 1, // sigs
- VOID_VOID_SIG, // --
- SECTION(FUNCTIONS, 1 + 3 * EMPTY_FUNCTION_SIZE),
- 3, // functions
- EMPTY_FUNCTION(0), // --
- EMPTY_FUNCTION(0), // --
- EMPTY_FUNCTION(0), // --
- SECTION(EXPORT_TABLE, 5),
- 1, // exports
- FUNC_INDEX(i), // --
- NAME_LENGTH(2),
- 'e',
- 'x', // --
- };
+ const byte data[] = {// signatures
+ SIGNATURES_SECTION_VOID_VOID,
+ THREE_EMPTY_FUNCTIONS,
+ SECTION(EXPORT_TABLE, 5),
+ 1, // exports
+ FUNC_INDEX(i), // --
+ NAME_LENGTH(2),
+ 'e',
+ 'x', // --
+ THREE_EMPTY_BODIES};
if (i < 3) {
EXPECT_VERIFIES(data);
@@ -1251,12 +1087,9 @@ TEST_F(WasmModuleVerifyTest, ExportTableThreeOne) {
TEST_F(WasmModuleVerifyTest, ExportTableOne_off_end) {
static const byte data[] = {
- SECTION(SIGNATURES, 1 + VOID_VOID_SIG_SIZE),
- 1, // sigs
- VOID_VOID_SIG, // --
- SECTION(FUNCTIONS, 1 + EMPTY_FUNCTION_SIZE),
- 1, // functions
- EMPTY_FUNCTION(0), // --
+ // signatures
+ SIGNATURES_SECTION_VOID_VOID,
+ ONE_EMPTY_FUNCTION,
SECTION(EXPORT_TABLE, 1 + 6),
1, // exports
FUNC_INDEX(0), // --
@@ -1270,67 +1103,68 @@ TEST_F(WasmModuleVerifyTest, ExportTableOne_off_end) {
}
}
-#define SIGNATURES_SECTION(count, ...) \
- SECTION(SIGNATURES, 1 + 3 * (count)), U32V_1(count), __VA_ARGS__
-#define FUNCTION_SIGNATURES_SECTION(count, ...) \
- SECTION(FUNCTION_SIGNATURES, 1 + (count)), U32V_1(count), __VA_ARGS__
-
-#define FOO_STRING 3, 'f', 'o', 'o'
-#define NO_LOCAL_NAMES 0
-
-#define EMPTY_SIGNATURES_SECTION SECTION(SIGNATURES, 1), 0
-#define EMPTY_FUNCTION_SIGNATURES_SECTION SECTION(FUNCTION_SIGNATURES, 1), 0
-#define EMPTY_FUNCTION_BODIES_SECTION SECTION(FUNCTION_BODIES, 1), 0
-#define EMPTY_NAMES_SECTION SECTION(NAMES, 1), 0
-
TEST_F(WasmModuleVerifyTest, FunctionSignatures_empty) {
- static const byte data[] = {SECTION(SIGNATURES, 1), 0,
- SECTION(FUNCTION_SIGNATURES, 1), 0};
+ static const byte data[] = {
+ SECTION(SIGNATURES, 1), 0, // --
+ SECTION(FUNCTION_SIGNATURES, 1), 0 // --
+ }; // --
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, FunctionSignatures_one) {
- static const byte data[] = {SIGNATURES_SECTION(1, VOID_VOID_SIG),
- FUNCTION_SIGNATURES_SECTION(1, 0)};
+ static const byte data[] = {
+ SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
+ FUNCTION_SIGNATURES_SECTION(1, 0) // --
+ };
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, FunctionBodies_empty) {
- static const byte data[] = {EMPTY_SIGNATURES_SECTION,
- EMPTY_FUNCTION_SIGNATURES_SECTION,
- EMPTY_FUNCTION_BODIES_SECTION};
+ static const byte data[] = {
+ EMPTY_SIGNATURES_SECTION, // --
+ EMPTY_FUNCTION_SIGNATURES_SECTION, // --
+ EMPTY_FUNCTION_BODIES_SECTION // --
+ };
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, FunctionBodies_one_empty) {
static const byte data[] = {
- SIGNATURES_SECTION(1, VOID_VOID_SIG), FUNCTION_SIGNATURES_SECTION(1, 0),
- SECTION(FUNCTION_BODIES, 1 + EMPTY_BODY_SIZE), 1, EMPTY_BODY};
+ SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
+ FUNCTION_SIGNATURES_SECTION(1, 0), // --
+ SECTION(FUNCTION_BODIES, 1 + SIZEOF_EMPTY_BODY), 1, EMPTY_BODY // --
+ };
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, FunctionBodies_one_nop) {
static const byte data[] = {
- SIGNATURES_SECTION(1, VOID_VOID_SIG), FUNCTION_SIGNATURES_SECTION(1, 0),
- SECTION(FUNCTION_BODIES, 1 + NOP_BODY_SIZE), 1, NOP_BODY};
+ SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
+ FUNCTION_SIGNATURES_SECTION(1, 0), // --
+ SECTION(FUNCTION_BODIES, 1 + SIZEOF_NOP_BODY), 1, NOP_BODY // --
+ };
EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, FunctionBodies_count_mismatch1) {
- static const byte data[] = {SIGNATURES_SECTION(1, VOID_VOID_SIG),
- FUNCTION_SIGNATURES_SECTION(2, 0, 0),
- SECTION(FUNCTION_BODIES, 1 + EMPTY_BODY_SIZE), 1,
- EMPTY_BODY};
+ static const byte data[] = {
+ SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
+ FUNCTION_SIGNATURES_SECTION(2, 0, 0), // --
+ SECTION(FUNCTION_BODIES, 1 + SIZEOF_EMPTY_BODY), 1, // --
+ EMPTY_BODY // --
+ };
EXPECT_FAILURE(data);
}
TEST_F(WasmModuleVerifyTest, FunctionBodies_count_mismatch2) {
- static const byte data[] = {SIGNATURES_SECTION(1, VOID_VOID_SIG),
- FUNCTION_SIGNATURES_SECTION(1, 0),
- SECTION(FUNCTION_BODIES, 1 + 2 * NOP_BODY_SIZE),
- 2,
- NOP_BODY,
- NOP_BODY};
+ static const byte data[] = {
+ SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
+ FUNCTION_SIGNATURES_SECTION(1, 0), // --
+ SECTION(FUNCTION_BODIES, 1 + 2 * SIZEOF_NOP_BODY), // --
+ 2, // --
+ NOP_BODY, // --
+ NOP_BODY // --
+ };
EXPECT_FAILURE(data);
}
@@ -1343,9 +1177,9 @@ TEST_F(WasmModuleVerifyTest, Names_empty) {
TEST_F(WasmModuleVerifyTest, Names_one_empty) {
static const byte data[] = {
- SIGNATURES_SECTION(1, VOID_VOID_SIG), // --
+ SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
FUNCTION_SIGNATURES_SECTION(1, 0), // --
- SECTION(FUNCTION_BODIES, 1 + EMPTY_BODY_SIZE),
+ SECTION(FUNCTION_BODIES, 1 + SIZEOF_EMPTY_BODY),
1,
EMPTY_BODY, // --
SECTION(NAMES, 1 + 5),
@@ -1358,9 +1192,9 @@ TEST_F(WasmModuleVerifyTest, Names_one_empty) {
TEST_F(WasmModuleVerifyTest, Names_two_empty) {
static const byte data[] = {
- SIGNATURES_SECTION(1, VOID_VOID_SIG), // --
- FUNCTION_SIGNATURES_SECTION(2, 0, 0), // --
- SECTION(FUNCTION_BODIES, 1 + 2 * EMPTY_BODY_SIZE), // --
+ SIGNATURES_SECTION(1, SIG_ENTRY_v_v), // --
+ FUNCTION_SIGNATURES_SECTION(2, 0, 0), // --
+ SECTION(FUNCTION_BODIES, 1 + 2 * SIZEOF_EMPTY_BODY), // --
2,
EMPTY_BODY,
EMPTY_BODY, // --
diff --git a/deps/v8/test/unittests/wasm/switch-logic-unittest.cc b/deps/v8/test/unittests/wasm/switch-logic-unittest.cc
new file mode 100644
index 0000000000..be587c28bd
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/switch-logic-unittest.cc
@@ -0,0 +1,89 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/switch-logic.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+class SwitchLogicTest : public TestWithZone {};
+
+void CheckNodeValues(CaseNode* node, int begin, int end) {
+ CHECK_EQ(node->begin, begin);
+ CHECK_EQ(node->end, end);
+}
+
+TEST_F(SwitchLogicTest, Single_Table_Test) {
+ ZoneVector<int> values(zone());
+ values.push_back(14);
+ values.push_back(12);
+ values.push_back(15);
+ values.push_back(19);
+ values.push_back(18);
+ values.push_back(16);
+ CaseNode* root = OrderCases(&values, zone());
+ CHECK_NULL(root->left);
+ CHECK_NULL(root->right);
+ CheckNodeValues(root, 12, 19);
+}
+
+TEST_F(SwitchLogicTest, Balanced_Tree_Test) {
+ ZoneVector<int> values(zone());
+ values.push_back(5);
+ values.push_back(1);
+ values.push_back(6);
+ values.push_back(9);
+ values.push_back(-4);
+ CaseNode* root = OrderCases(&values, zone());
+ CheckNodeValues(root, 5, 5);
+ CheckNodeValues(root->left, -4, -4);
+ CHECK_NULL(root->left->left);
+ CheckNodeValues(root->left->right, 1, 1);
+ CHECK_NULL(root->left->right->left);
+ CHECK_NULL(root->left->right->right);
+ CheckNodeValues(root->right, 6, 6);
+ CHECK_NULL(root->right->left);
+ CheckNodeValues(root->right->right, 9, 9);
+ CHECK_NULL(root->right->right->left);
+ CHECK_NULL(root->right->right->right);
+}
+
+TEST_F(SwitchLogicTest, Hybrid_Test) {
+ ZoneVector<int> values(zone());
+ values.push_back(1);
+ values.push_back(2);
+ values.push_back(3);
+ values.push_back(4);
+ values.push_back(7);
+ values.push_back(10);
+ values.push_back(11);
+ values.push_back(12);
+ values.push_back(13);
+ values.push_back(16);
+ CaseNode* root = OrderCases(&values, zone());
+ CheckNodeValues(root, 7, 7);
+ CheckNodeValues(root->left, 1, 4);
+ CheckNodeValues(root->right, 10, 13);
+ CheckNodeValues(root->right->right, 16, 16);
+}
+
+TEST_F(SwitchLogicTest, Single_Case) {
+ ZoneVector<int> values(zone());
+ values.push_back(3);
+ CaseNode* root = OrderCases(&values, zone());
+ CheckNodeValues(root, 3, 3);
+ CHECK_NULL(root->left);
+ CHECK_NULL(root->right);
+}
+
+TEST_F(SwitchLogicTest, Empty_Case) {
+ ZoneVector<int> values(zone());
+ CaseNode* root = OrderCases(&values, zone());
+ CHECK_NULL(root);
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc b/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
index ec188c00c9..2b782f5dc7 100644
--- a/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
@@ -18,7 +18,6 @@ class WasmMacroGenTest : public TestWithZone {};
EXPECT_EQ(size, sizeof(code)); \
} while (false)
-
TEST_F(WasmMacroGenTest, Constants) {
EXPECT_SIZE(2, WASM_ONE);
EXPECT_SIZE(2, WASM_ZERO);
@@ -48,47 +47,44 @@ TEST_F(WasmMacroGenTest, Constants) {
EXPECT_SIZE(9, WASM_F64(-9818934.0));
}
-
TEST_F(WasmMacroGenTest, Statements) {
EXPECT_SIZE(1, WASM_NOP);
EXPECT_SIZE(4, WASM_SET_LOCAL(0, WASM_ZERO));
- EXPECT_SIZE(4, WASM_STORE_GLOBAL(0, WASM_ZERO));
+ EXPECT_SIZE(4, WASM_SET_GLOBAL(0, WASM_ZERO));
EXPECT_SIZE(7, WASM_STORE_MEM(MachineType::Int32(), WASM_ZERO, WASM_ZERO));
- EXPECT_SIZE(4, WASM_IF(WASM_ZERO, WASM_NOP));
+ EXPECT_SIZE(5, WASM_IF(WASM_ZERO, WASM_NOP));
- EXPECT_SIZE(5, WASM_IF_ELSE(WASM_ZERO, WASM_NOP, WASM_NOP));
+ EXPECT_SIZE(7, WASM_IF_ELSE(WASM_ZERO, WASM_NOP, WASM_NOP));
EXPECT_SIZE(5, WASM_SELECT(WASM_ZERO, WASM_NOP, WASM_NOP));
EXPECT_SIZE(3, WASM_BR(0));
EXPECT_SIZE(5, WASM_BR_IF(0, WASM_ZERO));
- EXPECT_SIZE(3, WASM_BLOCK(1, WASM_NOP));
- EXPECT_SIZE(4, WASM_BLOCK(2, WASM_NOP, WASM_NOP));
- EXPECT_SIZE(5, WASM_BLOCK(3, WASM_NOP, WASM_NOP, WASM_NOP));
+ EXPECT_SIZE(3, WASM_BLOCK(WASM_NOP));
+ EXPECT_SIZE(4, WASM_BLOCK(WASM_NOP, WASM_NOP));
+ EXPECT_SIZE(5, WASM_BLOCK(WASM_NOP, WASM_NOP, WASM_NOP));
EXPECT_SIZE(5, WASM_INFINITE_LOOP);
- EXPECT_SIZE(3, WASM_LOOP(1, WASM_NOP));
- EXPECT_SIZE(4, WASM_LOOP(2, WASM_NOP, WASM_NOP));
- EXPECT_SIZE(5, WASM_LOOP(3, WASM_NOP, WASM_NOP, WASM_NOP));
- EXPECT_SIZE(5, WASM_LOOP(1, WASM_BR(0)));
- EXPECT_SIZE(7, WASM_LOOP(1, WASM_BR_IF(0, WASM_ZERO)));
+ EXPECT_SIZE(3, WASM_LOOP(WASM_NOP));
+ EXPECT_SIZE(4, WASM_LOOP(WASM_NOP, WASM_NOP));
+ EXPECT_SIZE(5, WASM_LOOP(WASM_NOP, WASM_NOP, WASM_NOP));
+ EXPECT_SIZE(5, WASM_LOOP(WASM_BR(0)));
+ EXPECT_SIZE(7, WASM_LOOP(WASM_BR_IF(0, WASM_ZERO)));
- EXPECT_SIZE(1, WASM_RETURN0);
- EXPECT_SIZE(3, WASM_RETURN(WASM_ZERO));
- EXPECT_SIZE(5, WASM_RETURN(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(2, WASM_RETURN0);
+ EXPECT_SIZE(4, WASM_RETURN1(WASM_ZERO));
EXPECT_SIZE(1, WASM_UNREACHABLE);
}
-
TEST_F(WasmMacroGenTest, MacroStatements) {
- EXPECT_SIZE(8, WASM_WHILE(WASM_I8(0), WASM_NOP));
+ EXPECT_SIZE(10, WASM_WHILE(WASM_I8(0), WASM_NOP));
EXPECT_SIZE(7, WASM_INC_LOCAL(0));
EXPECT_SIZE(7, WASM_INC_LOCAL_BY(0, 3));
@@ -97,63 +93,62 @@ TEST_F(WasmMacroGenTest, MacroStatements) {
}
TEST_F(WasmMacroGenTest, BrTable) {
- EXPECT_SIZE(8, WASM_BR_TABLE(WASM_ZERO, 1, BR_TARGET(1)));
+ EXPECT_SIZE(9, WASM_BR_TABLE(WASM_ZERO, 1, BR_TARGET(1)));
+ EXPECT_SIZE(11, WASM_BR_TABLEV(WASM_ZERO, WASM_ZERO, 1, BR_TARGET(1)));
}
-
TEST_F(WasmMacroGenTest, Expressions) {
EXPECT_SIZE(2, WASM_GET_LOCAL(0));
EXPECT_SIZE(2, WASM_GET_LOCAL(1));
EXPECT_SIZE(2, WASM_GET_LOCAL(12));
- EXPECT_SIZE(2, WASM_LOAD_GLOBAL(0));
- EXPECT_SIZE(2, WASM_LOAD_GLOBAL(1));
- EXPECT_SIZE(2, WASM_LOAD_GLOBAL(12));
+ EXPECT_SIZE(2, WASM_GET_GLOBAL(0));
+ EXPECT_SIZE(2, WASM_GET_GLOBAL(1));
+ EXPECT_SIZE(2, WASM_GET_GLOBAL(12));
EXPECT_SIZE(5, WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO));
EXPECT_SIZE(5, WASM_LOAD_MEM(MachineType::Float64(), WASM_ZERO));
EXPECT_SIZE(5, WASM_LOAD_MEM(MachineType::Float32(), WASM_ZERO));
EXPECT_SIZE(3, WASM_NOT(WASM_ZERO));
- EXPECT_SIZE(4, WASM_BRV(1, WASM_ZERO));
- EXPECT_SIZE(6, WASM_BRV_IF(1, WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_BRV(1, WASM_ZERO));
+ EXPECT_SIZE(7, WASM_BRV_IF(1, WASM_ZERO, WASM_ZERO));
- EXPECT_SIZE(4, WASM_BLOCK(1, WASM_ZERO));
- EXPECT_SIZE(5, WASM_BLOCK(2, WASM_NOP, WASM_ZERO));
- EXPECT_SIZE(6, WASM_BLOCK(3, WASM_NOP, WASM_NOP, WASM_ZERO));
+ EXPECT_SIZE(4, WASM_BLOCK(WASM_ZERO));
+ EXPECT_SIZE(5, WASM_BLOCK(WASM_NOP, WASM_ZERO));
+ EXPECT_SIZE(6, WASM_BLOCK(WASM_NOP, WASM_NOP, WASM_ZERO));
- EXPECT_SIZE(4, WASM_LOOP(1, WASM_ZERO));
- EXPECT_SIZE(5, WASM_LOOP(2, WASM_NOP, WASM_ZERO));
- EXPECT_SIZE(6, WASM_LOOP(3, WASM_NOP, WASM_NOP, WASM_ZERO));
+ EXPECT_SIZE(4, WASM_LOOP(WASM_ZERO));
+ EXPECT_SIZE(5, WASM_LOOP(WASM_NOP, WASM_ZERO));
+ EXPECT_SIZE(6, WASM_LOOP(WASM_NOP, WASM_NOP, WASM_ZERO));
}
TEST_F(WasmMacroGenTest, CallFunction) {
- EXPECT_SIZE(2, WASM_CALL_FUNCTION0(0));
- EXPECT_SIZE(2, WASM_CALL_FUNCTION0(1));
- EXPECT_SIZE(2, WASM_CALL_FUNCTION0(11));
+ EXPECT_SIZE(3, WASM_CALL_FUNCTION0(0));
+ EXPECT_SIZE(3, WASM_CALL_FUNCTION0(1));
+ EXPECT_SIZE(3, WASM_CALL_FUNCTION0(11));
- EXPECT_SIZE(4, WASM_CALL_FUNCTION(0, WASM_ZERO));
- EXPECT_SIZE(6, WASM_CALL_FUNCTION(1, WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_CALL_FUNCTION1(0, WASM_ZERO));
+ EXPECT_SIZE(7, WASM_CALL_FUNCTION2(1, WASM_ZERO, WASM_ZERO));
}
TEST_F(WasmMacroGenTest, CallImport) {
- EXPECT_SIZE(2, WASM_CALL_IMPORT0(0));
- EXPECT_SIZE(2, WASM_CALL_IMPORT0(1));
- EXPECT_SIZE(2, WASM_CALL_IMPORT0(11));
+ EXPECT_SIZE(3, WASM_CALL_IMPORT0(0));
+ EXPECT_SIZE(3, WASM_CALL_IMPORT0(1));
+ EXPECT_SIZE(3, WASM_CALL_IMPORT0(11));
- EXPECT_SIZE(4, WASM_CALL_IMPORT(0, WASM_ZERO));
- EXPECT_SIZE(6, WASM_CALL_IMPORT(1, WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_CALL_IMPORT1(0, WASM_ZERO));
+ EXPECT_SIZE(7, WASM_CALL_IMPORT2(1, WASM_ZERO, WASM_ZERO));
}
TEST_F(WasmMacroGenTest, CallIndirect) {
- EXPECT_SIZE(4, WASM_CALL_INDIRECT0(0, WASM_ZERO));
- EXPECT_SIZE(4, WASM_CALL_INDIRECT0(1, WASM_ZERO));
- EXPECT_SIZE(4, WASM_CALL_INDIRECT0(11, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_CALL_INDIRECT0(0, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_CALL_INDIRECT0(1, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_CALL_INDIRECT0(11, WASM_ZERO));
- EXPECT_SIZE(6, WASM_CALL_INDIRECT(0, WASM_ZERO, WASM_ZERO));
- EXPECT_SIZE(8, WASM_CALL_INDIRECT(1, WASM_ZERO, WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(7, WASM_CALL_INDIRECT1(0, WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(9, WASM_CALL_INDIRECT2(1, WASM_ZERO, WASM_ZERO, WASM_ZERO));
}
-
TEST_F(WasmMacroGenTest, Int32Ops) {
EXPECT_SIZE(5, WASM_I32_ADD(WASM_ZERO, WASM_ZERO));
EXPECT_SIZE(5, WASM_I32_SUB(WASM_ZERO, WASM_ZERO));
@@ -189,7 +184,6 @@ TEST_F(WasmMacroGenTest, Int32Ops) {
EXPECT_SIZE(3, WASM_I32_EQZ(WASM_ZERO));
}
-
TEST_F(WasmMacroGenTest, Int64Ops) {
EXPECT_SIZE(5, WASM_I64_ADD(WASM_ZERO, WASM_ZERO));
EXPECT_SIZE(5, WASM_I64_SUB(WASM_ZERO, WASM_ZERO));
@@ -225,7 +219,6 @@ TEST_F(WasmMacroGenTest, Int64Ops) {
EXPECT_SIZE(3, WASM_I64_EQZ(WASM_ZERO));
}
-
TEST_F(WasmMacroGenTest, Float32Ops) {
EXPECT_SIZE(5, WASM_F32_ADD(WASM_ZERO, WASM_ZERO));
EXPECT_SIZE(5, WASM_F32_SUB(WASM_ZERO, WASM_ZERO));
@@ -250,7 +243,6 @@ TEST_F(WasmMacroGenTest, Float32Ops) {
EXPECT_SIZE(5, WASM_F32_GE(WASM_ZERO, WASM_ZERO));
}
-
TEST_F(WasmMacroGenTest, Float64Ops) {
EXPECT_SIZE(5, WASM_F64_ADD(WASM_ZERO, WASM_ZERO));
EXPECT_SIZE(5, WASM_F64_SUB(WASM_ZERO, WASM_ZERO));
@@ -275,7 +267,6 @@ TEST_F(WasmMacroGenTest, Float64Ops) {
EXPECT_SIZE(5, WASM_F64_GE(WASM_ZERO, WASM_ZERO));
}
-
TEST_F(WasmMacroGenTest, Conversions) {
EXPECT_SIZE(3, WASM_I32_SCONVERT_F32(WASM_ZERO));
EXPECT_SIZE(3, WASM_I32_SCONVERT_F64(WASM_ZERO));
@@ -317,7 +308,6 @@ TEST_F(WasmMacroGenTest, LoadsAndStores) {
}
}
-
TEST_F(WasmMacroGenTest, LoadsAndStoresWithOffset) {
for (size_t i = 0; i < arraysize(kMemTypes); i++) {
EXPECT_SIZE(5, WASM_LOAD_MEM_OFFSET(kMemTypes[i], 11, WASM_ZERO));
diff --git a/deps/v8/test/webkit/class-constructor-return.js b/deps/v8/test/webkit/class-constructor-return.js
index 6eb72ad403..3691928dd4 100644
--- a/deps/v8/test/webkit/class-constructor-return.js
+++ b/deps/v8/test/webkit/class-constructor-return.js
@@ -21,8 +21,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-sloppy
-
description('Tests for ES6 class constructor return values');
// ES6
diff --git a/deps/v8/test/webkit/class-syntax-call.js b/deps/v8/test/webkit/class-syntax-call.js
index fa32a8cc9b..747b0ebff8 100644
--- a/deps/v8/test/webkit/class-syntax-call.js
+++ b/deps/v8/test/webkit/class-syntax-call.js
@@ -21,8 +21,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-sloppy
-
description('Tests for calling the constructors of ES6 classes');
class A { constructor() {} };
diff --git a/deps/v8/test/webkit/class-syntax-declaration.js b/deps/v8/test/webkit/class-syntax-declaration.js
index 3c9aed7441..775a3353d0 100644
--- a/deps/v8/test/webkit/class-syntax-declaration.js
+++ b/deps/v8/test/webkit/class-syntax-declaration.js
@@ -21,8 +21,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-sloppy
-
description('Tests for ES6 class syntax declarations');
var constructorCallCount = 0;
diff --git a/deps/v8/test/webkit/class-syntax-default-constructor.js b/deps/v8/test/webkit/class-syntax-default-constructor.js
index 1695f6a8da..2772108068 100644
--- a/deps/v8/test/webkit/class-syntax-default-constructor.js
+++ b/deps/v8/test/webkit/class-syntax-default-constructor.js
@@ -21,8 +21,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-sloppy
-
description('Tests for ES6 class syntax default constructor');
class A { };
diff --git a/deps/v8/test/webkit/class-syntax-expression.js b/deps/v8/test/webkit/class-syntax-expression.js
index 3272b81f7e..ab6dc0e49e 100644
--- a/deps/v8/test/webkit/class-syntax-expression.js
+++ b/deps/v8/test/webkit/class-syntax-expression.js
@@ -21,8 +21,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-sloppy
-
description('Tests for ES6 class syntax expressions');
var constructorCallCount = 0;
diff --git a/deps/v8/test/webkit/class-syntax-extends-expected.txt b/deps/v8/test/webkit/class-syntax-extends-expected.txt
index 45e999dd93..5d0dca7263 100644
--- a/deps/v8/test/webkit/class-syntax-extends-expected.txt
+++ b/deps/v8/test/webkit/class-syntax-extends-expected.txt
@@ -23,8 +23,8 @@ PASS x.prototype.__proto__ is Base.prototype
PASS Object.getPrototypeOf(x.prototype) is Base.prototype
PASS x = class extends null { constructor() { } }; x.__proto__ is Function.prototype
PASS x.__proto__ is Function.prototype
-PASS x = class extends 3 { constructor() { } }; x.__proto__ threw exception TypeError: Class extends value 3 is not a function or null.
-PASS x = class extends "abc" { constructor() { } }; x.__proto__ threw exception TypeError: Class extends value abc is not a function or null.
+PASS x = class extends 3 { constructor() { } }; x.__proto__ threw exception TypeError: Class extends value 3 is not a constructor or null.
+PASS x = class extends "abc" { constructor() { } }; x.__proto__ threw exception TypeError: Class extends value abc is not a constructor or null.
PASS baseWithBadPrototype = function () {}; baseWithBadPrototype.prototype = 3; new baseWithBadPrototype did not throw exception.
PASS x = class extends baseWithBadPrototype { constructor() { } } threw exception TypeError: Class extends value does not have valid prototype property 3.
PASS baseWithBadPrototype.prototype = "abc" did not throw exception.
@@ -32,8 +32,8 @@ PASS x = class extends baseWithBadPrototype { constructor() { } } threw exceptio
PASS baseWithBadPrototype.prototype = null; x = class extends baseWithBadPrototype { constructor() { } } did not throw exception.
PASS x = 1; c = class extends ++x { constructor() { } }; threw exception SyntaxError: Unexpected token ++.
PASS x = 1; c = class extends x++ { constructor() { } }; threw exception SyntaxError: Unexpected token ++.
-PASS x = 1; c = class extends (++x) { constructor() { } }; threw exception TypeError: Class extends value 2 is not a function or null.
-PASS x = 1; c = class extends (x++) { constructor() { } }; threw exception TypeError: Class extends value 1 is not a function or null.
+PASS x = 1; c = class extends (++x) { constructor() { } }; threw exception TypeError: Class extends value 2 is not a constructor or null.
+PASS x = 1; c = class extends (x++) { constructor() { } }; threw exception TypeError: Class extends value 1 is not a constructor or null.
PASS x = 1; try { c = class extends (++x) { constructor() { } } } catch (e) { }; x is 2
PASS x = 1; try { c = class extends (x++) { constructor() { } } } catch (e) { }; x is 2
PASS namespace = {}; namespace.A = class { }; namespace.B = class extends namespace.A { } did not throw exception.
@@ -47,17 +47,17 @@ PASS namespace = {}; namespace.A = class { constructor() { } }; function getClas
PASS namespace = {}; namespace.A = class { constructor() { } }; namespace.B = class extends (false||null||namespace.A) { constructor() { } } did not throw exception.
PASS namespace = {}; namespace.A = class { constructor() { } }; namespace.B = class extends false||null||namespace.A { constructor() { } } threw exception SyntaxError: Unexpected token ||.
PASS x = 1; namespace = {}; namespace.A = class { constructor() { } }; namespace.B = class extends (x++, namespace.A) { constructor() { } }; did not throw exception.
-PASS x = 1; namespace = {}; namespace.A = class { constructor() { } }; namespace.B = class extends (namespace.A, x++) { constructor() { } }; threw exception TypeError: Class extends value 1 is not a function or null.
-PASS namespace = {}; namespace.A = class { constructor() { } }; namespace.B = class extends new namespace.A { constructor() { } } threw exception TypeError: Class extends value [object Object] is not a function or null.
-PASS namespace = {}; namespace.A = class { constructor() { } }; namespace.B = class extends new namespace.A() { constructor() { } } threw exception TypeError: Class extends value [object Object] is not a function or null.
+PASS x = 1; namespace = {}; namespace.A = class { constructor() { } }; namespace.B = class extends (namespace.A, x++) { constructor() { } }; threw exception TypeError: Class extends value 1 is not a constructor or null.
+PASS namespace = {}; namespace.A = class { constructor() { } }; namespace.B = class extends new namespace.A { constructor() { } } threw exception TypeError: Class extends value [object Object] is not a constructor or null.
+PASS namespace = {}; namespace.A = class { constructor() { } }; namespace.B = class extends new namespace.A() { constructor() { } } threw exception TypeError: Class extends value [object Object] is not a constructor or null.
PASS x = 1; namespace = {}; namespace.A = class { constructor() { } }; try { namespace.B = class extends (x++, namespace.A) { constructor() { } } } catch (e) { } x is 2
PASS x = 1; namespace = {}; namespace.A = class { constructor() { } }; try { namespace.B = class extends (namespace.A, x++) { constructor() { } } } catch (e) { } x is 2
PASS Object.getPrototypeOf((class { constructor () { } }).prototype) is Object.prototype
PASS Object.getPrototypeOf((class extends null { constructor () { super(); } }).prototype) is null
-PASS new (class extends undefined { constructor () { this } }) threw exception TypeError: Class extends value undefined is not a function or null.
-PASS new (class extends undefined { constructor () { super(); } }) threw exception TypeError: Class extends value undefined is not a function or null.
-PASS x = {}; new (class extends undefined { constructor () { return x; } }) threw exception TypeError: Class extends value undefined is not a function or null.
-PASS y = 12; new (class extends undefined { constructor () { return y; } }) threw exception TypeError: Class extends value undefined is not a function or null.
+PASS new (class extends undefined { constructor () { this } }) threw exception TypeError: Class extends value undefined is not a constructor or null.
+PASS new (class extends undefined { constructor () { super(); } }) threw exception TypeError: Class extends value undefined is not a constructor or null.
+PASS x = {}; new (class extends undefined { constructor () { return x; } }) threw exception TypeError: Class extends value undefined is not a constructor or null.
+PASS y = 12; new (class extends undefined { constructor () { return y; } }) threw exception TypeError: Class extends value undefined is not a constructor or null.
PASS class x {}; new (class extends null { constructor () { return new x; } }) instanceof x is true
PASS new (class extends null { constructor () { this; } }) threw exception ReferenceError: this is not defined.
PASS new (class extends null { constructor () { super(); } }) threw exception TypeError: super is not a constructor.
diff --git a/deps/v8/test/webkit/class-syntax-extends.js b/deps/v8/test/webkit/class-syntax-extends.js
index a1b8f1292d..d505bf89fb 100644
--- a/deps/v8/test/webkit/class-syntax-extends.js
+++ b/deps/v8/test/webkit/class-syntax-extends.js
@@ -21,8 +21,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-sloppy
-
description('Tests for ES6 class syntax "extends"');
class Base {
@@ -60,8 +58,8 @@ shouldBe('x.prototype.__proto__', 'Base.prototype');
shouldBe('Object.getPrototypeOf(x.prototype)', 'Base.prototype');
shouldBe('x = class extends null { constructor() { } }; x.__proto__', 'Function.prototype');
shouldBe('x.__proto__', 'Function.prototype');
-shouldThrow('x = class extends 3 { constructor() { } }; x.__proto__', '"TypeError: Class extends value 3 is not a function or null"');
-shouldThrow('x = class extends "abc" { constructor() { } }; x.__proto__', '"TypeError: Class extends value abc is not a function or null"');
+shouldThrow('x = class extends 3 { constructor() { } }; x.__proto__', '"TypeError: Class extends value 3 is not a constructor or null"');
+shouldThrow('x = class extends "abc" { constructor() { } }; x.__proto__', '"TypeError: Class extends value abc is not a constructor or null"');
shouldNotThrow('baseWithBadPrototype = function () {}; baseWithBadPrototype.prototype = 3; new baseWithBadPrototype');
shouldThrow('x = class extends baseWithBadPrototype { constructor() { } }', '"TypeError: Class extends value does not have valid prototype property 3"');
shouldNotThrow('baseWithBadPrototype.prototype = "abc"');
@@ -94,10 +92,10 @@ shouldBe('x = 1; namespace = {}; namespace.A = class { constructor() { } }; try
shouldBe('Object.getPrototypeOf((class { constructor () { } }).prototype)', 'Object.prototype');
shouldBe('Object.getPrototypeOf((class extends null { constructor () { super(); } }).prototype)', 'null');
-shouldThrow('new (class extends undefined { constructor () { this } })', '"TypeError: Class extends value undefined is not a function or null"');
-shouldThrow('new (class extends undefined { constructor () { super(); } })', '"TypeError: Class extends value undefined is not a function or null"');
-shouldThrow('x = {}; new (class extends undefined { constructor () { return x; } })', '"TypeError: Class extends value undefined is not a function or null"');
-shouldThrow('y = 12; new (class extends undefined { constructor () { return y; } })', '"TypeError: Class extends value undefined is not a function or null"');
+shouldThrow('new (class extends undefined { constructor () { this } })', '"TypeError: Class extends value undefined is not a constructor or null"');
+shouldThrow('new (class extends undefined { constructor () { super(); } })', '"TypeError: Class extends value undefined is not a constructor or null"');
+shouldThrow('x = {}; new (class extends undefined { constructor () { return x; } })', '"TypeError: Class extends value undefined is not a constructor or null"');
+shouldThrow('y = 12; new (class extends undefined { constructor () { return y; } })', '"TypeError: Class extends value undefined is not a constructor or null"');
shouldBeTrue ('class x {}; new (class extends null { constructor () { return new x; } }) instanceof x');
shouldThrow('new (class extends null { constructor () { this; } })', '"ReferenceError: this is not defined"');
shouldThrow('new (class extends null { constructor () { super(); } })', '"TypeError: super is not a constructor"');
diff --git a/deps/v8/test/webkit/class-syntax-name.js b/deps/v8/test/webkit/class-syntax-name.js
index 16045651ef..7686f6ca38 100644
--- a/deps/v8/test/webkit/class-syntax-name.js
+++ b/deps/v8/test/webkit/class-syntax-name.js
@@ -21,8 +21,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-sloppy
-
description('Tests for ES6 class name semantics in class statements and expressions');
function runTestShouldBe(statement, result) {
diff --git a/deps/v8/test/webkit/class-syntax-prototype.js b/deps/v8/test/webkit/class-syntax-prototype.js
index 02ec578bb4..6f0b40cd5f 100644
--- a/deps/v8/test/webkit/class-syntax-prototype.js
+++ b/deps/v8/test/webkit/class-syntax-prototype.js
@@ -21,8 +21,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-sloppy
-
description('Tests for the descriptors of the properties implicitly defined by ES6 class syntax');
function descriptor(object, propertyName) {
diff --git a/deps/v8/test/webkit/class-syntax-scoping.js b/deps/v8/test/webkit/class-syntax-scoping.js
index 02f5a1ee04..7c23634dea 100644
--- a/deps/v8/test/webkit/class-syntax-scoping.js
+++ b/deps/v8/test/webkit/class-syntax-scoping.js
@@ -21,8 +21,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-sloppy
-
description('Tests for scoping of variables in ES6 class syntax');
var local = "FAIL";
diff --git a/deps/v8/test/webkit/class-syntax-semicolon.js b/deps/v8/test/webkit/class-syntax-semicolon.js
index 33504f95d4..d4a0d9bd83 100644
--- a/deps/v8/test/webkit/class-syntax-semicolon.js
+++ b/deps/v8/test/webkit/class-syntax-semicolon.js
@@ -21,8 +21,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-sloppy
-
description('Tests for ES6 class syntax containing semicolon in the class body');
shouldThrow("class A { foo;() { } }", "'SyntaxError: Unexpected token ;'");
diff --git a/deps/v8/test/webkit/class-syntax-super.js b/deps/v8/test/webkit/class-syntax-super.js
index e355b4b965..d70f2a1b2b 100644
--- a/deps/v8/test/webkit/class-syntax-super.js
+++ b/deps/v8/test/webkit/class-syntax-super.js
@@ -21,8 +21,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-sloppy
-
description('Tests for ES6 class syntax "super"');
var baseMethodValue = {};
diff --git a/deps/v8/test/webkit/dfg-redundant-load-of-captured-variable-proven-constant.js b/deps/v8/test/webkit/dfg-redundant-load-of-captured-variable-proven-constant.js
index c97b9edf9a..90e898ca5c 100644
--- a/deps/v8/test/webkit/dfg-redundant-load-of-captured-variable-proven-constant.js
+++ b/deps/v8/test/webkit/dfg-redundant-load-of-captured-variable-proven-constant.js
@@ -28,7 +28,7 @@ description(
function foo(o, p) {
var x = o.f;
if (p)
- return function() { return x; }
+ return function () { return x; }
else {
var a = x;
var b = x;
@@ -36,7 +36,7 @@ function foo(o, p) {
}
}
-var o = {f:function() { return 32; }};
+var o = {f:function () { return 32; }};
for (var i = 0; i < 100; ++i) {
var expected;
diff --git a/deps/v8/test/webkit/dfg-resolve-global-specific-dictionary.js b/deps/v8/test/webkit/dfg-resolve-global-specific-dictionary.js
index 97c28b83cf..cabf8f8e26 100644
--- a/deps/v8/test/webkit/dfg-resolve-global-specific-dictionary.js
+++ b/deps/v8/test/webkit/dfg-resolve-global-specific-dictionary.js
@@ -29,7 +29,7 @@ function foo() {
return x;
}
-x = function() { };
+x = function () { };
var expected = "\"function () { }\"";
diff --git a/deps/v8/test/webkit/exception-for-nonobject.js b/deps/v8/test/webkit/exception-for-nonobject.js
index f54915eb2e..d39c3e0512 100644
--- a/deps/v8/test/webkit/exception-for-nonobject.js
+++ b/deps/v8/test/webkit/exception-for-nonobject.js
@@ -21,8 +21,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-instanceof
-
description("Test for correct handling of exceptions from instanceof and 'new' expressions");
shouldThrow("new {}.undefined");
diff --git a/deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt b/deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt
index a7a29606ed..36394155d9 100644
--- a/deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt
+++ b/deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt
@@ -54,31 +54,31 @@ PASS testThis.apply() is undefined
PASS testThis.call(undefined) is undefined
PASS testThis.apply(undefined) is undefined
PASS (function eval(){'use strict';}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
-PASS (function(){(function eval(){'use strict';})}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function (){(function eval(){'use strict';})}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS (function (eval){'use strict';}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
-PASS (function(){(function (eval){'use strict';})}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function (){(function (eval){'use strict';})}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS (function arguments(){'use strict';}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
-PASS (function(){(function arguments(){'use strict';})}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function (){(function arguments(){'use strict';})}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS (function (arguments){'use strict';}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
-PASS (function(){(function (arguments){'use strict';})}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function (){(function (arguments){'use strict';})}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS (function (){'use strict'; var eval;}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
-PASS (function(){(function (){'use strict'; var eval;})}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function (){(function (){'use strict'; var eval;})}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS (function (){'use strict'; var arguments;}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
-PASS (function(){(function (){'use strict'; var arguments;})}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function (){(function (){'use strict'; var arguments;})}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS (function (){'use strict'; try{}catch(eval){}}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
-PASS (function(){(function (){'use strict'; try{}catch(eval){}})}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function (){(function (){'use strict'; try{}catch(eval){}})}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS (function (){'use strict'; try{}catch(arguments){}}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
-PASS (function(){(function (){'use strict'; try{}catch(arguments){}})}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function (){(function (){'use strict'; try{}catch(arguments){}})}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS (function (a, a){'use strict';}) threw exception SyntaxError: Duplicate parameter name not allowed in this context.
-PASS (function(){(function (a, a){'use strict';})}) threw exception SyntaxError: Duplicate parameter name not allowed in this context.
+PASS (function (){(function (a, a){'use strict';})}) threw exception SyntaxError: Duplicate parameter name not allowed in this context.
PASS (function (a){'use strict'; delete a;})() threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
-PASS (function(){(function (a){'use strict'; delete a;})()}) threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
+PASS (function (){(function (a){'use strict'; delete a;})()}) threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
PASS (function (){'use strict'; var a; delete a;})() threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
-PASS (function(){(function (){'use strict'; var a; delete a;})()}) threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
+PASS (function (){(function (){'use strict'; var a; delete a;})()}) threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
PASS (function (){var a; function f() {'use strict'; delete a;} })() threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
-PASS (function(){(function (){var a; function f() {'use strict'; delete a;} })()}) threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
+PASS (function (){(function (){var a; function f() {'use strict'; delete a;} })()}) threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
PASS (function (){'use strict'; with(1){};}) threw exception SyntaxError: Strict mode code may not include a with statement.
-PASS (function(){(function (){'use strict'; with(1){};})}) threw exception SyntaxError: Strict mode code may not include a with statement.
+PASS (function (){(function (){'use strict'; with(1){};})}) threw exception SyntaxError: Strict mode code may not include a with statement.
PASS (function (){'use strict'; arguments.callee; })() threw exception TypeError: 'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them.
PASS (function (){'use strict'; arguments.caller; })() threw exception TypeError: 'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them.
PASS (function f(){'use strict'; f.arguments; })() threw exception TypeError: 'caller' and 'arguments' are restricted function properties and cannot be accessed in this context..
@@ -98,37 +98,37 @@ PASS "arguments" in function(){"use strict"} is true
PASS (function(){"use strict";}).hasOwnProperty("arguments") is false
PASS (function(){"use strict";}).__proto__.hasOwnProperty("arguments") is true
PASS 'use strict'; (function (){with(1){};}) threw exception SyntaxError: Strict mode code may not include a with statement.
-PASS (function(){'use strict'; (function (){with(1){};})}) threw exception SyntaxError: Strict mode code may not include a with statement.
+PASS (function (){'use strict'; (function (){with(1){};})}) threw exception SyntaxError: Strict mode code may not include a with statement.
PASS 'use strict'; (function (){var a; delete a;}) threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
-PASS (function(){'use strict'; (function (){var a; delete a;})}) threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
+PASS (function (){'use strict'; (function (){var a; delete a;})}) threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
PASS 'use strict'; var a; (function (){ delete a;}) threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
-PASS (function(){'use strict'; var a; (function (){ delete a;})}) threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
+PASS (function (){'use strict'; var a; (function (){ delete a;})}) threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
PASS var a; (function (){ 'use strict'; delete a;}) threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
-PASS (function(){var a; (function (){ 'use strict'; delete a;})}) threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
+PASS (function (){var a; (function (){ 'use strict'; delete a;})}) threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
PASS 'misc directive'; 'use strict'; with({}){} threw exception SyntaxError: Strict mode code may not include a with statement.
-PASS (function(){'misc directive'; 'use strict'; with({}){}}) threw exception SyntaxError: Strict mode code may not include a with statement.
+PASS (function (){'misc directive'; 'use strict'; with({}){}}) threw exception SyntaxError: Strict mode code may not include a with statement.
PASS 'use strict'; return threw exception SyntaxError: Illegal return statement.
PASS 'use strict'; break threw exception SyntaxError: Illegal break statement.
-PASS (function(){'use strict'; break}) threw exception SyntaxError: Illegal break statement.
+PASS (function (){'use strict'; break}) threw exception SyntaxError: Illegal break statement.
PASS 'use strict'; continue threw exception SyntaxError: Illegal continue statement.
-PASS (function(){'use strict'; continue}) threw exception SyntaxError: Illegal continue statement.
+PASS (function (){'use strict'; continue}) threw exception SyntaxError: Illegal continue statement.
PASS 'use strict'; for(;;)return threw exception SyntaxError: Illegal return statement.
PASS 'use strict'; for(;;)break missingLabel threw exception SyntaxError: Undefined label 'missingLabel'.
-PASS (function(){'use strict'; for(;;)break missingLabel}) threw exception SyntaxError: Undefined label 'missingLabel'.
+PASS (function (){'use strict'; for(;;)break missingLabel}) threw exception SyntaxError: Undefined label 'missingLabel'.
PASS 'use strict'; for(;;)continue missingLabel threw exception SyntaxError: Undefined label 'missingLabel'.
-PASS (function(){'use strict'; for(;;)continue missingLabel}) threw exception SyntaxError: Undefined label 'missingLabel'.
+PASS (function (){'use strict'; for(;;)continue missingLabel}) threw exception SyntaxError: Undefined label 'missingLabel'.
PASS 'use strict'; 007; threw exception SyntaxError: Octal literals are not allowed in strict mode..
-PASS (function(){'use strict'; 007;}) threw exception SyntaxError: Octal literals are not allowed in strict mode..
+PASS (function (){'use strict'; 007;}) threw exception SyntaxError: Octal literals are not allowed in strict mode..
PASS 'use strict'; '\007'; threw exception SyntaxError: Octal literals are not allowed in strict mode..
-PASS (function(){'use strict'; '\007';}) threw exception SyntaxError: Octal literals are not allowed in strict mode..
+PASS (function (){'use strict'; '\007';}) threw exception SyntaxError: Octal literals are not allowed in strict mode..
PASS '\007'; 'use strict'; threw exception SyntaxError: Octal literals are not allowed in strict mode..
-PASS (function(){'\007'; 'use strict';}) threw exception SyntaxError: Octal literals are not allowed in strict mode..
+PASS (function (){'\007'; 'use strict';}) threw exception SyntaxError: Octal literals are not allowed in strict mode..
PASS 'use strict'; delete aDeletableProperty; threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
-PASS (function(){'use strict'; delete aDeletableProperty;}) threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
+PASS (function (){'use strict'; delete aDeletableProperty;}) threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
PASS 'use strict'; (function (){ delete someDeclaredGlobal;}) threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
-PASS (function(){'use strict'; (function (){ delete someDeclaredGlobal;})}) threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
+PASS (function (){'use strict'; (function (){ delete someDeclaredGlobal;})}) threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
PASS (function (){ 'use strict'; delete someDeclaredGlobal;}) threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
-PASS (function(){(function (){ 'use strict'; delete someDeclaredGlobal;})}) threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
+PASS (function (){(function (){ 'use strict'; delete someDeclaredGlobal;})}) threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
PASS 'use strict'; if (0) { someGlobal = 'Shouldn\'t be able to assign this.'; }; true; is true
PASS 'use strict'; someGlobal = 'Shouldn\'t be able to assign this.'; threw exception ReferenceError: someGlobal is not defined.
PASS 'use strict'; (function f(){ f = 'shouldn\'t be able to assign to function expression name'; })() threw exception TypeError: Assignment to constant variable..
@@ -137,44 +137,44 @@ PASS 'use strict'; objectWithReadonlyProperty.prop = 'fail' threw exception Type
PASS 'use strict'; delete objectWithReadonlyProperty.prop threw exception TypeError: Cannot delete property 'prop' of #<Object>.
PASS 'use strict'; delete objectWithReadonlyProperty[readonlyPropName] threw exception TypeError: Cannot delete property 'prop' of #<Object>.
PASS 'use strict'; ++eval threw exception SyntaxError: Unexpected eval or arguments in strict mode.
-PASS (function(){'use strict'; ++eval}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function (){'use strict'; ++eval}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS 'use strict'; eval++ threw exception SyntaxError: Unexpected eval or arguments in strict mode.
-PASS (function(){'use strict'; eval++}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function (){'use strict'; eval++}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS 'use strict'; --eval threw exception SyntaxError: Unexpected eval or arguments in strict mode.
-PASS (function(){'use strict'; --eval}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function (){'use strict'; --eval}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS 'use strict'; eval-- threw exception SyntaxError: Unexpected eval or arguments in strict mode.
-PASS (function(){'use strict'; eval--}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function (){'use strict'; eval--}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS 'use strict'; function f() { ++arguments } threw exception SyntaxError: Unexpected eval or arguments in strict mode.
-PASS (function(){'use strict'; function f() { ++arguments }}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function (){'use strict'; function f() { ++arguments }}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS 'use strict'; function f() { arguments++ } threw exception SyntaxError: Unexpected eval or arguments in strict mode.
-PASS (function(){'use strict'; function f() { arguments++ }}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function (){'use strict'; function f() { arguments++ }}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS 'use strict'; function f() { --arguments } threw exception SyntaxError: Unexpected eval or arguments in strict mode.
-PASS (function(){'use strict'; function f() { --arguments }}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function (){'use strict'; function f() { --arguments }}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS 'use strict'; function f() { arguments-- } threw exception SyntaxError: Unexpected eval or arguments in strict mode.
-PASS (function(){'use strict'; function f() { arguments-- }}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function (){'use strict'; function f() { arguments-- }}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS global.eval('"use strict"; if (0) ++arguments; true;') threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS 'use strict'; ++(1, eval) threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
-PASS (function(){'use strict'; ++(1, eval)}) threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
+PASS (function (){'use strict'; ++(1, eval)}) threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
PASS 'use strict'; (1, eval)++ threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
-PASS (function(){'use strict'; (1, eval)++}) threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
+PASS (function (){'use strict'; (1, eval)++}) threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
PASS 'use strict'; --(1, eval) threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
-PASS (function(){'use strict'; --(1, eval)}) threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
+PASS (function (){'use strict'; --(1, eval)}) threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
PASS 'use strict'; (1, eval)-- threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
-PASS (function(){'use strict'; (1, eval)--}) threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
+PASS (function (){'use strict'; (1, eval)--}) threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
PASS 'use strict'; function f() { ++(1, arguments) } threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
-PASS (function(){'use strict'; function f() { ++(1, arguments) }}) threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
+PASS (function (){'use strict'; function f() { ++(1, arguments) }}) threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
PASS 'use strict'; function f() { (1, arguments)++ } threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
-PASS (function(){'use strict'; function f() { (1, arguments)++ }}) threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
+PASS (function (){'use strict'; function f() { (1, arguments)++ }}) threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
PASS 'use strict'; function f() { --(1, arguments) } threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
-PASS (function(){'use strict'; function f() { --(1, arguments) }}) threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
+PASS (function (){'use strict'; function f() { --(1, arguments) }}) threw exception ReferenceError: Invalid left-hand side expression in prefix operation.
PASS 'use strict'; function f() { (1, arguments)-- } threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
-PASS (function(){'use strict'; function f() { (1, arguments)-- }}) threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
+PASS (function (){'use strict'; function f() { (1, arguments)-- }}) threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
FAIL 'use strict'; undefined; if (0) delete +a.b should throw an exception. Was undefined.
-FAIL (function(){'use strict'; undefined; if (0) delete +a.b}) should throw an exception. Was function (){'use strict'; undefined; if (0) delete +a.b}.
+FAIL (function (){'use strict'; undefined; if (0) delete +a.b}) should throw an exception. Was function (){'use strict'; undefined; if (0) delete +a.b}.
FAIL 'use strict'; undefined; if (0) delete ++a.b should throw an exception. Was undefined.
-FAIL (function(){'use strict'; undefined; if (0) delete ++a.b}) should throw an exception. Was function (){'use strict'; undefined; if (0) delete ++a.b}.
+FAIL (function (){'use strict'; undefined; if (0) delete ++a.b}) should throw an exception. Was function (){'use strict'; undefined; if (0) delete ++a.b}.
FAIL 'use strict'; undefined; if (0) delete void a.b should throw an exception. Was undefined.
-FAIL (function(){'use strict'; undefined; if (0) delete void a.b}) should throw an exception. Was function (){'use strict'; undefined; if (0) delete void a.b}.
+FAIL (function (){'use strict'; undefined; if (0) delete void a.b}) should throw an exception. Was function (){'use strict'; undefined; if (0) delete void a.b}.
PASS (function (a){'use strict'; a = false; return a !== arguments[0]; })(true) is true
PASS (function (a){'use strict'; arguments[0] = false; return a !== arguments[0]; })(true) is true
PASS (function (a){'use strict'; a=false; return arguments; })(true)[0] is true
@@ -207,23 +207,23 @@ PASS (function f(arg){'use strict'; var descriptor = Object.getOwnPropertyDescri
PASS (function f(arg){'use strict'; var descriptor = Object.getOwnPropertyDescriptor(f.__proto__, 'arguments'); return descriptor.get === descriptor.set; })() is true
PASS 'use strict'; (function f() { for(var i in this); })(); true; is true
PASS 'use strict'̻ threw exception SyntaxError: Invalid or unexpected token.
-PASS (function(){'use strict'̻}) threw exception SyntaxError: Invalid or unexpected token.
+PASS (function (){'use strict'̻}) threw exception SyntaxError: Invalid or unexpected token.
PASS 'use strict'5.f threw exception SyntaxError: Invalid or unexpected token.
-PASS (function(){'use strict'5.f}) threw exception SyntaxError: Invalid or unexpected token.
+PASS (function (){'use strict'5.f}) threw exception SyntaxError: Invalid or unexpected token.
PASS 'use strict';̻ threw exception SyntaxError: Invalid or unexpected token.
-PASS (function(){'use strict';̻}) threw exception SyntaxError: Invalid or unexpected token.
+PASS (function (){'use strict';̻}) threw exception SyntaxError: Invalid or unexpected token.
PASS 'use strict';5.f threw exception SyntaxError: Invalid or unexpected token.
-PASS (function(){'use strict';5.f}) threw exception SyntaxError: Invalid or unexpected token.
+PASS (function (){'use strict';5.f}) threw exception SyntaxError: Invalid or unexpected token.
PASS 'use strict';1-(eval=1); threw exception SyntaxError: Unexpected eval or arguments in strict mode.
-PASS (function(){'use strict';1-(eval=1);}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function (){'use strict';1-(eval=1);}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS 'use strict';arguments=1; threw exception SyntaxError: Unexpected eval or arguments in strict mode.
-PASS (function(){'use strict';arguments=1;}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function (){'use strict';arguments=1;}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS 'use strict';1-(arguments=1); threw exception SyntaxError: Unexpected eval or arguments in strict mode.
-PASS (function(){'use strict';1-(arguments=1);}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function (){'use strict';1-(arguments=1);}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS 'use strict';var a=(eval=1); threw exception SyntaxError: Unexpected eval or arguments in strict mode.
-PASS (function(){'use strict';var a=(eval=1);}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function (){'use strict';var a=(eval=1);}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS 'use strict';var a=(arguments=1); threw exception SyntaxError: Unexpected eval or arguments in strict mode.
-PASS (function(){'use strict';var a=(arguments=1);}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
+PASS (function (){'use strict';var a=(arguments=1);}) threw exception SyntaxError: Unexpected eval or arguments in strict mode.
PASS 'use strict'; try { throw 1; } catch (e) { aGlobal = true; } is true
PASS 'use strict'; (function () { try { throw 1; } catch (e) { aGlobal = true; }})(); aGlobal; is true
PASS (function () {'use strict'; try { throw 1; } catch (e) { aGlobal = true; }})(); aGlobal; is true
diff --git a/deps/v8/test/webkit/fast/js/basic-strict-mode.js b/deps/v8/test/webkit/fast/js/basic-strict-mode.js
index 2f7a319bc4..027af0f152 100644
--- a/deps/v8/test/webkit/fast/js/basic-strict-mode.js
+++ b/deps/v8/test/webkit/fast/js/basic-strict-mode.js
@@ -41,7 +41,7 @@ function testGlobalAccess() {
}
function shouldBeSyntaxError(str) {
shouldThrow(str);
- shouldThrow("(function(){" + str + "})");
+ shouldThrow("(function (){" + str + "})");
}
function testLineContinuation() {
"use stric\
diff --git a/deps/v8/test/webkit/fast/js/deep-recursion-test.js b/deps/v8/test/webkit/fast/js/deep-recursion-test.js
index c2a695da46..df008ba61e 100644
--- a/deps/v8/test/webkit/fast/js/deep-recursion-test.js
+++ b/deps/v8/test/webkit/fast/js/deep-recursion-test.js
@@ -29,7 +29,7 @@ description("This test how deep we can recurse, and that we get an exception whe
}
try {
- simpleRecursion(17472);
+ simpleRecursion(5000);
} catch (ex) {
debug("FAIL: " + ex);
}
diff --git a/deps/v8/test/webkit/fast/js/excessive-comma-usage.js b/deps/v8/test/webkit/fast/js/excessive-comma-usage.js
index 414b29b7ef..a67a3ffa7f 100644
--- a/deps/v8/test/webkit/fast/js/excessive-comma-usage.js
+++ b/deps/v8/test/webkit/fast/js/excessive-comma-usage.js
@@ -34,7 +34,7 @@ for (var i = 0; i < 5000; i++)
declarationTestString += ";return true;";
var commaExpressionTestString = "1";
-for (var i = 0; i < 5000; i++)
+for (var i = 0; i < 2500; i++)
commaExpressionTestString += ",1";
commaExpressionTestString += ";return true;";
diff --git a/deps/v8/test/webkit/fast/js/function-toString-semicolon-insertion-expected.txt b/deps/v8/test/webkit/fast/js/function-toString-semicolon-insertion-expected.txt
index edb651fbea..2724abca3f 100644
--- a/deps/v8/test/webkit/fast/js/function-toString-semicolon-insertion-expected.txt
+++ b/deps/v8/test/webkit/fast/js/function-toString-semicolon-insertion-expected.txt
@@ -26,14 +26,14 @@ This test checks that functions re-string-ify in a way that is syntactically com
On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
-FAIL (function(){return}).toString() should be function () {return;}. Was function (){return}.
-FAIL (function(){return }).toString() should be function () {return; }. Was function (){return }.
-FAIL (function(){return
+FAIL (function (){return}).toString() should be function () {return;}. Was function (){return}.
+FAIL (function (){return }).toString() should be function () {return; }. Was function (){return }.
+FAIL (function (){return
}).toString() should be function () {return;
}. Was function (){return
}.
-FAIL (function(){}).toString() should be function () {}. Was function (){}.
-FAIL (function(){ }).toString() should be function () { }. Was function (){ }.
+FAIL (function (){}).toString() should be function () {}. Was function (){}.
+FAIL (function (){ }).toString() should be function () { }. Was function (){ }.
PASS successfullyParsed is true
TEST COMPLETE
diff --git a/deps/v8/test/webkit/fast/js/function-toString-semicolon-insertion.js b/deps/v8/test/webkit/fast/js/function-toString-semicolon-insertion.js
index fa89b918df..508168875b 100644
--- a/deps/v8/test/webkit/fast/js/function-toString-semicolon-insertion.js
+++ b/deps/v8/test/webkit/fast/js/function-toString-semicolon-insertion.js
@@ -26,8 +26,8 @@ description(
"compatible with concatenation."
);
-shouldBe("(function(){return}).toString()", "'function () {return;}'");
-shouldBe("(function(){return }).toString()", "'function () {return; }'");
-shouldBe("(function(){return" + "\n" + "}).toString()", "'function () {return;" + "\\n" + "}'");
-shouldBe("(function(){}).toString()", "'function () {}'");
-shouldBe("(function(){ }).toString()", "'function () { }'");
+shouldBe("(function (){return}).toString()", "'function () {return;}'");
+shouldBe("(function (){return }).toString()", "'function () {return; }'");
+shouldBe("(function (){return" + "\n" + "}).toString()", "'function () {return;" + "\\n" + "}'");
+shouldBe("(function (){}).toString()", "'function () {}'");
+shouldBe("(function (){ }).toString()", "'function () { }'");
diff --git a/deps/v8/test/webkit/fast/js/kde/lval-exceptions.js b/deps/v8/test/webkit/fast/js/kde/lval-exceptions.js
index 14012b561d..eca07942fd 100644
--- a/deps/v8/test/webkit/fast/js/kde/lval-exceptions.js
+++ b/deps/v8/test/webkit/fast/js/kde/lval-exceptions.js
@@ -25,19 +25,19 @@ description("KDE JS Test");
// Tests for raising --- and non-raising exceptions on access to reference to undefined things...
// Locals should throw on access if undefined..
-fnShouldThrow(function() { a = x; }, ReferenceError);
+fnShouldThrow(function () { a = x; }, ReferenceError);
// Read-modify-write versions of assignment should throw as well
-fnShouldThrow(function() { x += "foo"; }, ReferenceError);
+fnShouldThrow(function () { x += "foo"; }, ReferenceError);
// Other reference types should just return undefined...
a = new Object();
-fnShouldNotThrow(function() { b = a.x; });
-fnShouldNotThrow(function() { b = a['x']; });
-fnShouldNotThrow(function() { a['x'] += 'baz'; });
+fnShouldNotThrow(function () { b = a.x; });
+fnShouldNotThrow(function () { b = a['x']; });
+fnShouldNotThrow(function () { a['x'] += 'baz'; });
shouldBe("a['x']", '"undefinedbaz"');
-fnShouldNotThrow(function() { b = a.y; });
-fnShouldNotThrow(function() { a.y += 'glarch'; });
+fnShouldNotThrow(function () { b = a.y; });
+fnShouldNotThrow(function () { a.y += 'glarch'; });
shouldBe("a['y']", '"undefinedglarch"');
diff --git a/deps/v8/test/webkit/fast/js/toString-overrides-expected.txt b/deps/v8/test/webkit/fast/js/toString-overrides-expected.txt
index c6eddab66c..44bd2e7c36 100644
--- a/deps/v8/test/webkit/fast/js/toString-overrides-expected.txt
+++ b/deps/v8/test/webkit/fast/js/toString-overrides-expected.txt
@@ -28,10 +28,10 @@ On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE
PASS [1].toString() is '1'
PASS [1].toLocaleString() is 'toLocaleString'
-FAIL [1].toLocaleString() should be 1. Threw exception TypeError: (var).toLocaleString is not a function
+FAIL [1].toLocaleString() should be 1. Threw exception TypeError: string is not a function
PASS [/r/].toString() is 'toString2'
PASS [/r/].toLocaleString() is 'toLocaleString2'
-FAIL [/r/].toLocaleString() should be toString2. Threw exception TypeError: (var).toLocaleString is not a function
+FAIL [/r/].toLocaleString() should be toString2. Threw exception TypeError: string is not a function
PASS caught is true
PASS successfullyParsed is true
diff --git a/deps/v8/test/webkit/fast/regex/lastIndex-expected.txt b/deps/v8/test/webkit/fast/regex/lastIndex-expected.txt
index 71292c77b1..1e0959c3f9 100644
--- a/deps/v8/test/webkit/fast/regex/lastIndex-expected.txt
+++ b/deps/v8/test/webkit/fast/regex/lastIndex-expected.txt
@@ -42,10 +42,10 @@ PASS var re = Object.defineProperty(/x/, 'lastIndex', {value:42}); re.lastIndex
PASS Object.defineProperty(Object.defineProperty(/x/, 'lastIndex', {writable:false}), 'lastIndex', {writable:true}); true threw exception TypeError: Cannot redefine property: lastIndex.
PASS Object.defineProperty(Object.defineProperty(/x/, 'lastIndex', {writable:false}), 'lastIndex', {value:42}); true threw exception TypeError: Cannot redefine property: lastIndex.
PASS Object.defineProperty(Object.defineProperty(/x/, 'lastIndex', {writable:false}), 'lastIndex', {value:0}); true is true
-FAIL Object.defineProperty(/x/, 'lastIndex', {writable:false}).exec('') should be null. Threw exception TypeError: Cannot assign to read only property 'lastIndex' of object '[object RegExp]'
+PASS Object.defineProperty(/x/, 'lastIndex', {writable:false}).exec('') is null
PASS Object.defineProperty(/x/, 'lastIndex', {writable:false}).exec('x') is ["x"]
-PASS Object.defineProperty(/x/g, 'lastIndex', {writable:false}).exec('') threw exception TypeError: Cannot assign to read only property 'lastIndex' of object '[object RegExp]'.
-PASS Object.defineProperty(/x/g, 'lastIndex', {writable:false}).exec('x') threw exception TypeError: Cannot assign to read only property 'lastIndex' of object '[object RegExp]'.
+FAIL Object.defineProperty(/x/g, 'lastIndex', {writable:false}).exec('') should throw an exception. Was null.
+FAIL Object.defineProperty(/x/g, 'lastIndex', {writable:false}).exec('x') should throw an exception. Was x.
PASS var re = /x/; Object.freeze(re); Object.isFrozen(re); is true
PASS successfullyParsed is true
diff --git a/deps/v8/test/webkit/fast/regex/toString-expected.txt b/deps/v8/test/webkit/fast/regex/toString-expected.txt
index 1f92569934..2001708838 100644
--- a/deps/v8/test/webkit/fast/regex/toString-expected.txt
+++ b/deps/v8/test/webkit/fast/regex/toString-expected.txt
@@ -37,8 +37,8 @@ PASS testForwardSlash("^/$", "/"); is true
PASS testForwardSlash("^\/$", "/"); is true
PASS testForwardSlash("^\\/$", "\/"); is true
PASS testForwardSlash("^\\\/$", "\/"); is true
-FAIL testForwardSlash("^\\\\/$", "\\/"); should be true. Threw exception SyntaxError: Invalid regular expression flags
-FAIL testForwardSlash("^\\\\\/$", "\\/"); should be true. Threw exception SyntaxError: Invalid regular expression flags
+PASS testForwardSlash("^\\\\/$", "\\/"); is true
+PASS testForwardSlash("^\\\\\/$", "\\/"); is true
PASS testForwardSlash("x/x/x", "x\/x\/x"); is true
PASS testForwardSlash("x\/x/x", "x\/x\/x"); is true
PASS testForwardSlash("x/x\/x", "x\/x\/x"); is true
diff --git a/deps/v8/test/webkit/function-declaration-statement.js b/deps/v8/test/webkit/function-declaration-statement.js
index 34e3fdd14f..2c866d5944 100644
--- a/deps/v8/test/webkit/function-declaration-statement.js
+++ b/deps/v8/test/webkit/function-declaration-statement.js
@@ -147,7 +147,7 @@ shouldBeTrue("forInVarTest()");
function forInVarInitTest()
{
var a;
- for (var a = false in { field: false })
+ for (var a in { field: false })
function f()
{
return true;
diff --git a/deps/v8/test/webkit/instance-of-immediates.js b/deps/v8/test/webkit/instance-of-immediates.js
index be63e4b00b..649a29f5a4 100644
--- a/deps/v8/test/webkit/instance-of-immediates.js
+++ b/deps/v8/test/webkit/instance-of-immediates.js
@@ -21,8 +21,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-instanceof
-
description('This test makes sure that instance of behaves correctly when the value, constructor, or its prototype are immediates.');
// A Constructor to use check for instances of, and an instance called obj.
diff --git a/deps/v8/test/webkit/parser-xml-close-comment-expected.txt b/deps/v8/test/webkit/parser-xml-close-comment-expected.txt
index 19ff53fc1c..b6aebe3d06 100644
--- a/deps/v8/test/webkit/parser-xml-close-comment-expected.txt
+++ b/deps/v8/test/webkit/parser-xml-close-comment-expected.txt
@@ -30,7 +30,7 @@ PASS 'should be a syntax error' --> threw exception ReferenceError: Invalid left
PASS /**/ 1 --> threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
PASS 1 /**/ --> threw exception ReferenceError: Invalid left-hand side expression in postfix operation.
PASS 1/*
-*/--> threw exception SyntaxError: Unexpected token >.
+*/--> is 1
PASS --> is undefined.
PASS /**/--> is undefined.
PASS /*
diff --git a/deps/v8/test/webkit/parser-xml-close-comment.js b/deps/v8/test/webkit/parser-xml-close-comment.js
index d3f458f659..a42da24e81 100644
--- a/deps/v8/test/webkit/parser-xml-close-comment.js
+++ b/deps/v8/test/webkit/parser-xml-close-comment.js
@@ -26,7 +26,7 @@ description("Test to ensure correct handling of --> as a single line comment whe
shouldThrow("'should be a syntax error' -->");
shouldThrow("/**/ 1 -->");
shouldThrow("1 /**/ -->");
-shouldThrow("1/*\n*/-->");
+shouldBe("1/*\n*/-->", "1");
shouldBeUndefined("-->");
shouldBeUndefined("/**/-->");
diff --git a/deps/v8/test/webkit/resources/JSON-stringify.js b/deps/v8/test/webkit/resources/JSON-stringify.js
index 0c406f3bda..24edafac09 100644
--- a/deps/v8/test/webkit/resources/JSON-stringify.js
+++ b/deps/v8/test/webkit/resources/JSON-stringify.js
@@ -41,304 +41,304 @@ function createTests() {
arrayWithSideEffectGetterAndProto.__defineGetter__("b", function(){this.foo=1;});
arrayWithSideEffectGetterAndProto.__proto__ = {foo:"bar"};
var result = [];
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(1);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(1.5);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(-1);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(-1.5);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(null);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify("string");
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(new Number(0));
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(new Number(1));
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(new Number(1.5));
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(new Number(-1));
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(new Number(-1.5));
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(new String("a string object"));
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(new Boolean(true));
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
var value = new Number(1);
value.valueOf = function() { return 2; }
return jsonObject.stringify(value);
});
result[result.length - 1].expected = '2';
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
var value = new Boolean(true);
value.valueOf = function() { return 2; }
return jsonObject.stringify(value);
});
result[result.length - 1].expected = '2';
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
var value = new String("fail");
value.toString = function() { return "converted string"; }
return jsonObject.stringify(value);
});
result[result.length - 1].expected = '"converted string"';
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(true);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(false);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(new Date(0));
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify({toJSON: Date.prototype.toJSON});
});
result[result.length - 1].throws = true;
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify({toJSON: Date.prototype.toJSON, toISOString: function(){ return "custom toISOString"; }});
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify({toJSON: Date.prototype.toJSON, toISOString: function(){ return {}; }});
});
result[result.length - 1].throws = true;
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify({toJSON: Date.prototype.toJSON, toISOString: function(){ throw "An exception"; }});
});
result[result.length - 1].throws = true;
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
var d = new Date(0);
d.toISOString = null;
return jsonObject.stringify(d);
});
result[result.length - 1].throws = true;
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
var d = new Date(0);
d.toJSON = undefined;
return jsonObject.stringify(d);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify({get Foo() { return "bar"; }});
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify({get Foo() { this.foo="wibble"; return "bar"; }});
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
var count = 0;
jsonObject.stringify({get Foo() { count++; return "bar"; }});
return count;
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
var count = 0;
return jsonObject.stringify({get Foo() { count++; delete this.bar; return "bar"; }, bar: "wibble"});
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
var count = 0;
return jsonObject.stringify({a:"1", b:"2", c:"3", 5:4, 4:5, 2:6, 1:7});
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
var allString = true;
jsonObject.stringify({a:"1", b:"2", c:"3", 5:4, 4:5, 2:6, 1:7}, function(k,v){allString = allString && (typeof k == "string"); return v});
return allString;
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
var allString = true;
jsonObject.stringify([1,2,3,4,5], function(k,v){allString = allString && (typeof k == "string"); return v});
return allString;
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
var allString = true;
var array = [];
return jsonObject.stringify({a:"1", b:"2", c:"3", 5:4, 4:5, 2:6, 1:7}, array);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
var allString = true;
var array = ["a"];
return jsonObject.stringify({get a(){return 1;array[1]="b";array[2]="c"}, b:"2", c:"3"}, array);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
var allString = true;
var array = [{toString:function(){array[0]='a'; array[1]='c'; array[2]='b'; return 'a'}}];
return jsonObject.stringify(simpleObject, array);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
var allString = true;
var array = [{toString:function(){array[0]='a'; array[1]='c'; array[2]='b'; return 'a'}}];
return jsonObject.stringify(simpleObjectWithProto, array);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
var allString = true;
var array = [1, new Number(2), NaN, Infinity, -Infinity, new String("str")];
return jsonObject.stringify({"1":"1","2":"2","NaN":"NaN","Infinity":"Infinity","-Infinity":"-Infinity","str":"str"}, array);
});
result[result.length - 1].expected = '{"1":"1","2":"2","NaN":"NaN","Infinity":"Infinity","-Infinity":"-Infinity","str":"str"}';
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
var allString = true;
var array = ["1","2","3"];
return jsonObject.stringify({1:'a', 2:'b', 3:'c'}, array);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
var allString = true;
var array = ["1","2","3"];
return jsonObject.stringify(simpleArray, array);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(simpleArray, null, " ");
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(simpleArray, null, 4);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(simpleArray, null, "ab");
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(simpleArray, null, 4);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(simpleObject, null, " ");
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(simpleObject, null, 4);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(simpleObject, null, "ab");
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(simpleObject, null, 4);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(simpleObject, null, 10);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(simpleObject, null, 11);
});
result[result.length - 1].expected = JSON.stringify(simpleObject, null, 10);
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(simpleObject, null, " ");
});
result[result.length - 1].expected = JSON.stringify(simpleObject, null, 10);
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(simpleObject, null, " ");
});
result[result.length - 1].expected = JSON.stringify(simpleObject, null, 10);
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(complexArray, null, " ");
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(complexArray, null, 4);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(complexArray, null, "ab");
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(complexArray, null, 4);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(complexObject, null, " ");
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(complexObject, null, 4);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(complexObject, null, "ab");
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(complexObject, null, 4);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
var allString = true;
var array = ["1","2","3"];
return jsonObject.stringify(simpleArrayWithProto, array);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(simpleArrayWithProto, null, " ");
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(simpleArrayWithProto, null, 4);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(simpleArrayWithProto, null, "ab");
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(simpleArrayWithProto, null, 4);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(simpleObjectWithProto, null, " ");
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(simpleObjectWithProto, null, 4);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(simpleObjectWithProto, null, "ab");
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(simpleObjectWithProto, null, 4);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(simpleObjectWithProto, null, 10);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(simpleObjectWithProto, null, 11);
});
result[result.length - 1].expected = JSON.stringify(simpleObjectWithProto, null, 10);
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(simpleObjectWithProto, null, " ");
});
result[result.length - 1].expected = JSON.stringify(simpleObjectWithProto, null, 10);
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(simpleObjectWithProto, null, " ");
});
result[result.length - 1].expected = JSON.stringify(simpleObjectWithProto, null, 10);
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(complexArrayWithProto, null, " ");
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(complexArrayWithProto, null, 4);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(complexArrayWithProto, null, "ab");
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(complexArrayWithProto, null, 4);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(complexObjectWithProto, null, " ");
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(complexObjectWithProto, null, 4);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(complexObjectWithProto, null, "ab");
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(complexObjectWithProto, null, 4);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(objectWithSideEffectGetter);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(objectWithSideEffectGetterAndProto);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(arrayWithSideEffectGetter);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(arrayWithSideEffectGetterAndProto);
});
var replaceTracker;
@@ -346,56 +346,56 @@ function createTests() {
replaceTracker += key + "("+(typeof key)+")" + JSON.stringify(value) + ";";
return value;
}
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
replaceTracker = "";
jsonObject.stringify([1,2,3,,,,4,5,6], replaceFunc);
return replaceTracker;
});
result[result.length - 1].expected = '(string)[1,2,3,null,null,null,4,5,6];0(number)1;1(number)2;2(number)3;3(number)undefined;4(number)undefined;5(number)undefined;6(number)4;7(number)5;8(number)6;'
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
replaceTracker = "";
jsonObject.stringify({a:"a", b:"b", c:"c", 3: "d", 2: "e", 1: "f"}, replaceFunc);
return replaceTracker;
});
result[result.length - 1].expected = '(string){"1":"f","2":"e","3":"d","a":"a","b":"b","c":"c"};1(string)"f";2(string)"e";3(string)"d";a(string)"a";b(string)"b";c(string)"c";';
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
var count = 0;
var array = [{toString:function(){count++; array[0]='a'; array[1]='c'; array[2]='b'; return 'a'}}];
jsonObject.stringify(simpleObject, array);
return count;
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
var allString = true;
var array = [{toString:function(){array[0]='a'; array[1]='c'; array[2]='b'; return 'a'}}, 'b', 'c'];
return jsonObject.stringify(simpleObject, array);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
var count = 0;
var array = [{toString:function(){count++; array[0]='a'; array[1]='c'; array[2]='b'; return 'a'}}, 'b', 'c'];
jsonObject.stringify(simpleObject, array);
return count;
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify({a:"1", get b() { this.a="foo"; return "getter"; }, c:"3"});
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify({a:"1", get b() { this.c="foo"; return "getter"; }, c:"3"});
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
var setterCalled = false;
jsonObject.stringify({a:"1", set b(s) { setterCalled = true; return "setter"; }, c:"3"});
return setterCalled;
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify({a:"1", get b(){ return "getter"; }, set b(s) { return "setter"; }, c:"3"});
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(new Array(10));
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify([undefined,,null,0,false]);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify({p1:undefined,p2:null,p3:0,p4:false});
});
var cycleTracker = "";
@@ -407,12 +407,12 @@ function createTests() {
toJSON : function(key) { cycleTracker += key + "("+(typeof key)+"):" + this; return this; }
};
cyclicObject.self = cyclicObject;
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
cycleTracker = "";
return jsonObject.stringify(cyclicObject);
});
result[result.length - 1].throws = true;
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
cycleTracker = "";
try { jsonObject.stringify(cyclicObject); } catch(e) { cycleTracker += " -> exception" }
return cycleTracker;
@@ -422,12 +422,12 @@ function createTests() {
cyclicArray,
{toJSON : function(key,value) { cycleTracker += key + "("+(typeof key)+"):" + this; cycleTracker += "second,"; return this; }}];
cyclicArray[1] = cyclicArray;
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
cycleTracker = "";
return jsonObject.stringify(cyclicArray);
});
result[result.length - 1].throws = true;
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
cycleTracker = "";
try { jsonObject.stringify(cyclicArray); } catch(e) { cycleTracker += " -> exception" }
return cycleTracker;
@@ -439,53 +439,53 @@ function createTests() {
get calls() {return ++getterCalls; },
"123":createArray(15, "foo"),
"":{a:"b"}});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
getterCalls = 0;
return jsonObject.stringify(magicObject) + " :: getter calls = " + getterCalls;
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(undefined);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(null);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify({toJSON:function(){ return undefined; }});
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify({toJSON:function(){ return null; }});
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify([{toJSON:function(){ return undefined; }}]);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify([{toJSON:function(){ return null; }}]);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify({a:{toJSON:function(){ return undefined; }}});
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify({a:{toJSON:function(){ return null; }}});
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify({a:{toJSON:function(){ return function(){}; }}});
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify({a:function(){}});
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
var deepObject = {};
for (var i = 0; i < 1024; i++)
deepObject = {next:deepObject};
return jsonObject.stringify(deepObject);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
var deepArray = [];
for (var i = 0; i < 1024; i++)
deepArray = [deepArray];
return jsonObject.stringify(deepArray);
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
var depth = 0;
function toDeepVirtualJSONObject() {
if (++depth >= 1024)
@@ -496,7 +496,7 @@ function createTests() {
}
return jsonObject.stringify(toDeepVirtualJSONObject());
});
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
var depth = 0;
function toDeepVirtualJSONArray() {
if (++depth >= 1024)
@@ -510,7 +510,7 @@ function createTests() {
var fullCharsetString = "";
for (var i = 0; i < 65536; i++)
fullCharsetString += String.fromCharCode(i);
- result.push(function(jsonObject){
+ result.push(function (jsonObject){
return jsonObject.stringify(fullCharsetString);
});
return result;
diff --git a/deps/v8/test/webkit/run-json-stringify-expected.txt b/deps/v8/test/webkit/run-json-stringify-expected.txt
index 2cd78521b5..bbd9f2806b 100644
--- a/deps/v8/test/webkit/run-json-stringify-expected.txt
+++ b/deps/v8/test/webkit/run-json-stringify-expected.txt
@@ -83,7 +83,7 @@ PASS tests[i](nativeJSON) is tests[i](JSON)
function (jsonObject){
return jsonObject.stringify({toJSON: Date.prototype.toJSON});
}
-PASS tests[i](nativeJSON) threw exception TypeError: (var).toISOString is not a function.
+PASS tests[i](nativeJSON) threw exception TypeError: toISOString is not a function.
function (jsonObject){
return jsonObject.stringify({toJSON: Date.prototype.toJSON, toISOString: function(){ return "custom toISOString"; }});
}
@@ -101,7 +101,7 @@ function (jsonObject){
d.toISOString = null;
return jsonObject.stringify(d);
}
-PASS tests[i](nativeJSON) threw exception TypeError: (var).toISOString is not a function.
+PASS tests[i](nativeJSON) threw exception TypeError: toISOString is not a function.
function (jsonObject){
var d = new Date(0);
d.toJSON = undefined;
diff --git a/deps/v8/test/webkit/testcfg.py b/deps/v8/test/webkit/testcfg.py
index 01a2713312..c18120ca18 100644
--- a/deps/v8/test/webkit/testcfg.py
+++ b/deps/v8/test/webkit/testcfg.py
@@ -106,12 +106,6 @@ class WebkitTestSuite(testsuite.TestSuite):
if not string: return True
return (string.startswith("==") or string.startswith("**") or
string.startswith("ANDROID") or
- # These five patterns appear in normal Native Client output.
- string.startswith("DEBUG MODE ENABLED") or
- string.startswith("tools/nacl-run.py") or
- string.find("BYPASSING ALL ACL CHECKS") > 0 or
- string.find("Native Client module will be loaded") > 0 or
- string.find("NaClHostDescOpen:") > 0 or
# FIXME(machenbach): The test driver shouldn't try to use slow
# asserts if they weren't compiled. This fails in optdebug=2.
string == "Warning: unknown flag --enable-slow-asserts." or
diff --git a/deps/v8/test/webkit/toString-for-var-decl.js b/deps/v8/test/webkit/toString-for-var-decl.js
index 2b4db73168..ba308837d0 100644
--- a/deps/v8/test/webkit/toString-for-var-decl.js
+++ b/deps/v8/test/webkit/toString-for-var-decl.js
@@ -25,7 +25,7 @@ description(
"This test checks for a couple of specific ways that bugs in toString() round trips have changed the meanings of functions with var declarations inside for loops."
);
-function f1() { for (var j = 1 in []) {} }
+function f1() { for (var j in []) {} }
var f2 = function () { for (var j = 1; j < 10; ++j) {} }
var f3 = function () { for (j = 1;j < 10; ++j) {} }
var f4 = function () { for (var j;;) {} }
diff --git a/deps/v8/test/webkit/webkit.gyp b/deps/v8/test/webkit/webkit.gyp
index 8d655feb22..cd4c4b981b 100644
--- a/deps/v8/test/webkit/webkit.gyp
+++ b/deps/v8/test/webkit/webkit.gyp
@@ -13,8 +13,8 @@
'../../src/d8.gyp:d8_run',
],
'includes': [
- '../../build/features.gypi',
- '../../build/isolate.gypi',
+ '../../gypfiles/features.gypi',
+ '../../gypfiles/isolate.gypi',
],
'sources': [
'webkit.isolate',
diff --git a/deps/v8/test/webkit/webkit.status b/deps/v8/test/webkit/webkit.status
index e23b9cfa0b..9e336a2f97 100644
--- a/deps/v8/test/webkit/webkit.status
+++ b/deps/v8/test/webkit/webkit.status
@@ -33,8 +33,6 @@
'dfg-inline-arguments-become-int32': [PASS, FAIL],
'dfg-inline-arguments-reset': [PASS, FAIL],
'dfg-inline-arguments-reset-changetype': [PASS, FAIL],
- # TODO(turbofan): We run out of stack earlier on 64-bit for now.
- 'fast/js/deep-recursion-test': [PASS, NO_VARIANTS],
# Irregexp interpreter overflows stack. We should just not crash.
'fast/js/regexp-stack-overflow': [PASS, FAIL],
}], # ALWAYS
@@ -70,6 +68,14 @@
# Too slow.
'dfg-int-overflow-in-loop': [SKIP],
}], # 'arch == ppc or arch == ppc64'
+['arch == s390 or arch == s390x', {
+ # Too slow.
+ 'dfg-int-overflow-in-loop': [SKIP],
+}], # 'arch == s390 or arch == s390x'
+['arch == x87', {
+ # Too slow.
+ 'dfg-negative-array-index': [SKIP],
+}], # 'arch == x87'
##############################################################################
['asan == True', {
@@ -97,17 +103,24 @@
}], # 'gc_stress == True and mode == debug'
##############################################################################
-['ignition == True', {
- # TODO(4680): Throws a RangeError due to stack overflow. Need investigation.
- 'fast/js/excessive-comma-usage': [SKIP], # Stack is brittle, SKIP not FAIL.
-}], # ignition == True
+['variant == ignition and msan', {
+ # TODO(mythria,4680): Too slow and timeout on ignition.
+ 'dfg-double-vote-fuzz': [SKIP],
+ 'dfg-int-overflow-in-loop': [SKIP],
+}], # variant == ignition and msan
##############################################################################
-['ignition == True and msan', {
+['variant == ignition_turbofan and msan', {
# TODO(mythria,4680): Too slow and timeout on ignition.
'dfg-double-vote-fuzz': [SKIP],
'dfg-int-overflow-in-loop': [SKIP],
-}], # ignition == True and msan
+}], # variant == ignition_turbofan and msan
+
+##############################################################################
+['arch == arm64 and msan', {
+ # Stack size too large with masm on Arm64.
+ 'fast/js/excessive-comma-usage': [SKIP],
+}], # arch == arm64 and msan
##############################################################################
['gcov_coverage', {
diff --git a/deps/v8/testing/gmock.gyp b/deps/v8/testing/gmock.gyp
index ba4386141a..89c97e32ec 100644
--- a/deps/v8/testing/gmock.gyp
+++ b/deps/v8/testing/gmock.gyp
@@ -31,16 +31,19 @@
'gmock/src/gmock-spec-builders.cc',
'gmock/src/gmock.cc',
'gmock-support.h', # gMock helpers
+ 'gmock_custom/gmock/internal/custom/gmock-port.h',
],
'sources!': [
'gmock/src/gmock-all.cc', # Not needed by our build.
],
'include_dirs': [
+ 'gmock_custom',
'gmock',
'gmock/include',
],
- 'direct_dependent_settings': {
+ 'all_dependent_settings': {
'include_dirs': [
+ 'gmock_custom',
'gmock/include', # So that gmock headers can find themselves.
],
},
diff --git a/deps/v8/testing/gmock_custom/gmock/internal/custom/gmock-port.h b/deps/v8/testing/gmock_custom/gmock/internal/custom/gmock-port.h
new file mode 100644
index 0000000000..1e8d86dbc0
--- /dev/null
+++ b/deps/v8/testing/gmock_custom/gmock/internal/custom/gmock-port.h
@@ -0,0 +1,29 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Copied from http://crrev.com/6ad76b419eacefc4/testing/gmock_custom/gmock/internal/custom/gmock-port.h
+
+#ifndef TESTING_GMOCK_CUSTOM_GMOCK_INTERNAL_CUSTOM_GMOCK_PORT_H_
+#define TESTING_GMOCK_CUSTOM_GMOCK_INTERNAL_CUSTOM_GMOCK_PORT_H_
+
+#include <type_traits>
+
+namespace std {
+
+// Provide alternative implementation of std::is_default_constructible for
+// old, pre-4.7 of libstdc++, where is_default_constructible is missing.
+// <20120322 below implies pre-4.7.0. In addition we blacklist several version
+// that released after 4.7.0 from pre-4.7.0 branch. 20120702 implies 4.5.4, and
+// 20121127 implies 4.6.4.
+#if defined(__GLIBCXX__) && \
+ (__GLIBCXX__ < 20120322 || \
+ __GLIBCXX__ == 20120702 || \
+ __GLIBCXX__ == 20121127)
+template <typename T>
+using is_default_constructible = std::is_constructible<T>;
+#endif
+
+}
+
+#endif // TESTING_GMOCK_CUSTOM_GMOCK_INTERNAL_CUSTOM_GMOCK_PORT_H_
diff --git a/deps/v8/testing/gtest.gyp b/deps/v8/testing/gtest.gyp
index d7662101cf..a94ee884fe 100644
--- a/deps/v8/testing/gtest.gyp
+++ b/deps/v8/testing/gtest.gyp
@@ -53,33 +53,15 @@
# In order to allow regex matches in gtest to be shared between Windows
# and other systems, we tell gtest to always use it's internal engine.
'GTEST_HAS_POSIX_RE=0',
- # Chrome doesn't support / require C++11, yet.
- 'GTEST_LANG_CXX11=0',
+ 'GTEST_LANG_CXX11=1',
],
'all_dependent_settings': {
'defines': [
'GTEST_HAS_POSIX_RE=0',
- 'GTEST_LANG_CXX11=0',
+ 'GTEST_LANG_CXX11=1',
],
},
'conditions': [
- ['os_posix == 1', {
- 'defines': [
- # gtest isn't able to figure out when RTTI is disabled for gcc
- # versions older than 4.3.2, and assumes it's enabled. Our Mac
- # and Linux builds disable RTTI, and cannot guarantee that the
- # compiler will be 4.3.2. or newer. The Mac, for example, uses
- # 4.2.1 as that is the latest available on that platform. gtest
- # must be instructed that RTTI is disabled here, and for any
- # direct dependents that might include gtest headers.
- 'GTEST_HAS_RTTI=0',
- ],
- 'direct_dependent_settings': {
- 'defines': [
- 'GTEST_HAS_RTTI=0',
- ],
- },
- }],
['OS=="android"', {
'defines': [
'GTEST_HAS_CLONE=0',
@@ -90,25 +72,6 @@
],
},
}],
- ['OS=="android"', {
- # We want gtest features that use tr1::tuple, but we currently
- # don't support the variadic templates used by libstdc++'s
- # implementation. gtest supports this scenario by providing its
- # own implementation but we must opt in to it.
- 'defines': [
- 'GTEST_USE_OWN_TR1_TUPLE=1',
- # GTEST_USE_OWN_TR1_TUPLE only works if GTEST_HAS_TR1_TUPLE is set.
- # gtest r625 made it so that GTEST_HAS_TR1_TUPLE is set to 0
- # automatically on android, so it has to be set explicitly here.
- 'GTEST_HAS_TR1_TUPLE=1',
- ],
- 'direct_dependent_settings': {
- 'defines': [
- 'GTEST_USE_OWN_TR1_TUPLE=1',
- 'GTEST_HAS_TR1_TUPLE=1',
- ],
- },
- }],
],
'direct_dependent_settings': {
'defines': [
diff --git a/deps/v8/testing/gtest/include/gtest/gtest_prod.h b/deps/v8/testing/gtest/include/gtest/gtest_prod.h
new file mode 100644
index 0000000000..da80ddc6c7
--- /dev/null
+++ b/deps/v8/testing/gtest/include/gtest/gtest_prod.h
@@ -0,0 +1,58 @@
+// Copyright 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// Google C++ Testing Framework definitions useful in production code.
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_PROD_H_
+#define GTEST_INCLUDE_GTEST_GTEST_PROD_H_
+
+// When you need to test the private or protected members of a class,
+// use the FRIEND_TEST macro to declare your tests as friends of the
+// class. For example:
+//
+// class MyClass {
+// private:
+// void MyMethod();
+// FRIEND_TEST(MyClassTest, MyMethod);
+// };
+//
+// class MyClassTest : public testing::Test {
+// // ...
+// };
+//
+// TEST_F(MyClassTest, MyMethod) {
+// // Can call MyClass::MyMethod() here.
+// }
+
+#define FRIEND_TEST(test_case_name, test_name)\
+friend class test_case_name##_##test_name##_Test
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_PROD_H_
diff --git a/deps/v8/tools/BUILD.gn b/deps/v8/tools/BUILD.gn
new file mode 100644
index 0000000000..4f97777a6a
--- /dev/null
+++ b/deps/v8/tools/BUILD.gn
@@ -0,0 +1,59 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("../gni/isolate.gni")
+
+group("gn_all") {
+ testonly = true
+
+ if (v8_test_isolation_mode != "noop") {
+ deps = [
+ ":check-static-initializers_run",
+ ":jsfunfuzz_run",
+ ":run-deopt-fuzzer_run",
+ ":run-gcmole_run",
+ ":run-valgrind_run",
+ ]
+ }
+}
+
+v8_isolate_run("check-static-initializers") {
+ deps = [
+ "..:d8_run",
+ ]
+
+ isolate = "check-static-initializers.isolate"
+}
+
+v8_isolate_run("jsfunfuzz") {
+ deps = [
+ "..:d8_run",
+ ]
+
+ isolate = "jsfunfuzz/jsfunfuzz.isolate"
+}
+
+v8_isolate_run("run-deopt-fuzzer") {
+ deps = [
+ "..:d8_run",
+ ]
+
+ isolate = "run-deopt-fuzzer.isolate"
+}
+
+v8_isolate_run("run-gcmole") {
+ deps = [
+ "..:d8_run",
+ ]
+
+ isolate = "gcmole/run-gcmole.isolate"
+}
+
+v8_isolate_run("run-valgrind") {
+ deps = [
+ "..:d8_run",
+ ]
+
+ isolate = "run-valgrind.isolate"
+}
diff --git a/deps/v8/tools/android-run.py b/deps/v8/tools/android-run.py
index dc1359883a..4765f86b4c 100755
--- a/deps/v8/tools/android-run.py
+++ b/deps/v8/tools/android-run.py
@@ -88,7 +88,8 @@ def Main():
print("Usage: %s <command-to-run-on-device>" % sys.argv[0])
return 1
workspace = abspath(join(dirname(sys.argv[0]), '..'))
- android_workspace = os.getenv("ANDROID_V8", "/data/local/tmp/v8")
+ v8_root = "/data/local/tmp/v8"
+ android_workspace = os.getenv("ANDROID_V8", v8_root)
args = [Escape(arg) for arg in sys.argv[1:]]
script = (" ".join(args) + "\n"
"case $? in\n"
@@ -99,7 +100,7 @@ def Main():
script_file = WriteToTemporaryFile(script)
android_script_file = android_workspace + "/" + script_file
command = ("adb push '%s' %s;" % (script_file, android_script_file) +
- "adb shell 'sh %s';" % android_script_file +
+ "adb shell 'cd %s && sh %s';" % (v8_root, android_script_file) +
"adb shell 'rm %s'" % android_script_file)
error_code = Execute(command)
os.unlink(script_file)
diff --git a/deps/v8/tools/callstats.html b/deps/v8/tools/callstats.html
new file mode 100644
index 0000000000..76cc8c686d
--- /dev/null
+++ b/deps/v8/tools/callstats.html
@@ -0,0 +1,1809 @@
+<html>
+<!--
+Copyright 2016 the V8 project authors. All rights reserved. Use of this source
+code is governed by a BSD-style license that can be found in the LICENSE file.
+-->
+
+<head>
+ <meta charset="UTF-8">
+ <style>
+ body {
+ font-family: arial;
+ }
+
+ table {
+ display: table;
+ border-spacing: 0px;
+ }
+
+ tr {
+ border-spacing: 0px;
+ padding: 10px;
+ }
+
+ td,
+ th {
+ padding: 3px 10px 3px 5px;
+ }
+
+ .inline {
+ display: inline-block;
+ vertical-align: top;
+ }
+
+ h2,
+ h3 {
+ margin-bottom: 0px;
+ }
+
+ .hidden {
+ display: none;
+ }
+
+ .view {
+ display: table;
+ }
+
+ .column {
+ display: table-cell;
+ border-right: 1px black dotted;
+ min-width: 200px;
+ }
+
+ .column .header {
+ padding: 0 10px 0 10px
+ }
+
+ #column {
+ display: none;
+ }
+
+ .list {
+ width: 100%;
+ }
+
+ select {
+ width: 100%
+ }
+
+ .list tbody {
+ cursor: pointer;
+ }
+
+ .list tr:nth-child(even) {
+ background-color: #EFEFEF;
+ }
+
+ .list tr:nth-child(even).selected {
+ background-color: #DDD;
+ }
+
+ .list tr.child {
+ display: none;
+ }
+
+ .list tr.child.visible {
+ display: table-row;
+ }
+
+ .list .child .name {
+ padding-left: 20px;
+ }
+
+ .list .parent td {
+ border-top: 1px solid #AAA;
+ }
+
+ .list .total {
+ font-weight: bold
+ }
+
+ .list tr.parent {
+ background-color: #FFF;
+ }
+
+ .list tr.parent.selected {
+ background-color: #DDD;
+ }
+
+ tr.selected {
+ background-color: #DDD;
+ }
+
+ .codeSearch {
+ display: block-inline;
+ float: right;
+ border-radius: 5px;
+ background-color: #EEE;
+ width: 1em;
+ text-align: center;
+ }
+
+ .list .position {
+ text-align: right;
+ display: none;
+ }
+
+ .list div.toggle {
+ cursor: pointer;
+ }
+
+ #column_0 .position {
+ display: table-cell;
+ }
+
+ #column_0 .name {
+ display: table-cell;
+ }
+
+ .list .name {
+ display: none;
+ white-space: nowrap;
+ }
+
+ .value {
+ text-align: right;
+ }
+
+ .selectedVersion {
+ font-weight: bold;
+ }
+
+ #baseline {
+ width: auto;
+ }
+
+ .compareSelector {
+ padding-bottom: 20px;
+ }
+
+ .pageDetailTable tbody {
+ cursor: pointer
+ }
+
+ .pageDetailTable tfoot td {
+ border-top: 1px grey solid;
+ }
+
+ #popover {
+ position: absolute;
+ transform: translateY(-50%) translateX(40px);
+ box-shadow: -2px 10px 44px -10px #000;
+ border-radius: 5px;
+ z-index: 1;
+ background-color: #FFF;
+ display: none;
+ white-space: nowrap;
+ }
+
+ #popover table {
+ position: relative;
+ z-index: 1;
+ text-align: right;
+ margin: 10px;
+ }
+ #popover td {
+ padding: 3px 0px 3px 5px;
+ white-space: nowrap;
+ }
+
+ .popoverArrow {
+ background-color: #FFF;
+ position: absolute;
+ width: 30px;
+ height: 30px;
+ transform: translateY(-50%)rotate(45deg);
+ top: 50%;
+ left: -10px;
+ z-index: 0;
+ }
+
+ #popover .name {
+ padding: 5px;
+ font-weight: bold;
+ text-align: center;
+ }
+
+ #popover table .compare {
+ display: none
+ }
+
+ #popover table.compare .compare {
+ display: table-cell;
+ }
+
+ #popover .compare .time,
+ #popover .compare .version {
+ padding-left: 10px;
+ }
+ .graph,
+ .graph .content {
+ width: 100%;
+ }
+
+ .diff .hideDiff {
+ display: none;
+ }
+ .noDiff .hideNoDiff {
+ display: none;
+ }
+ </style>
+ <script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
+ <script type="text/javascript">
+ "use strict"
+ google.charts.load('current', {packages: ['corechart']});
+
+ // Did anybody say monkeypatching?
+ if (!NodeList.prototype.forEach) {
+ NodeList.prototype.forEach = function(func) {
+ for (var i = 0; i < this.length; i++) {
+ func(this[i]);
+ }
+ }
+ }
+
+ var versions;
+ var pages;
+ var selectedPage;
+ var baselineVersion;
+ var selectedEntry;
+
+ function initialize() {
+ var original = $("column");
+ var view = document.createElement('div');
+ view.id = 'view';
+ var i = 0;
+ versions.forEach((version) => {
+ if (!version.enabled) return;
+ // add column
+ var column = original.cloneNode(true);
+ column.id = "column_" + i;
+ // Fill in all versions
+ var select = column.querySelector(".version");
+ select.id = "selectVersion_" + i;
+ // add all select options
+ versions.forEach((version) => {
+ if (!version.enabled) return;
+ var option = document.createElement("option");
+ option.textContent = version.name;
+ option.version = version;
+ select.appendChild(option);
+ });
+ // Fill in all page versions
+ select = column.querySelector(".pageVersion");
+ select.id = "select_" + i;
+ // add all pages
+ versions.forEach((version) => {
+ if (!version.enabled) return;
+ var optgroup = document.createElement("optgroup");
+ optgroup.label = version.name;
+ optgroup.version = version;
+ version.forEachPage((page) => {
+ var option = document.createElement("option");
+ option.textContent = page.name;
+ option.page = page;
+ optgroup.appendChild(option);
+ });
+ select.appendChild(optgroup);
+ });
+ view.appendChild(column);
+ i++;
+ });
+ var oldView = $('view');
+ oldView.parentNode.replaceChild(view, oldView);
+
+ var select = $('baseline');
+ removeAllChildren(select);
+ select.appendChild(document.createElement('option'));
+ versions.forEach((version) => {
+ var option = document.createElement("option");
+ option.textContent = version.name;
+ option.version = version;
+ select.appendChild(option);
+ });
+ initializeToggleList(versions.versions, $('versionSelector'));
+ initializeToggleList(pages.values(), $('pageSelector'));
+ initializeToggleContentVisibility();
+ }
+
+ function initializeToggleList(items, node) {
+ var list = node.querySelector('ul');
+ removeAllChildren(list);
+ items = Array.from(items);
+ items.sort(NameComparator);
+ items.forEach((item) => {
+ var li = document.createElement('li');
+ var checkbox = document.createElement('input');
+ checkbox.type = 'checkbox';
+ checkbox.checked = item.enabled;
+ checkbox.item = item;
+ checkbox.addEventListener('click', handleToggleVersionEnable);
+ li.appendChild(checkbox);
+ li.appendChild(document.createTextNode(item.name));
+ list.appendChild(li);
+ });
+ $('results').querySelectorAll('#results > .hidden').forEach((node) => {
+ toggleCssClass(node, 'hidden', false);
+ })
+ }
+
+ function initializeToggleContentVisibility() {
+ var nodes = document.querySelectorAll('.toggleContentVisibility');
+ nodes.forEach((node) => {
+ var content = node.querySelector('.content');
+ var header = node.querySelector('h1,h2,h3');
+ if (content === undefined || header === undefined) return;
+ if (header.querySelector('input') != undefined) return;
+ var checkbox = document.createElement('input');
+ checkbox.type = 'checkbox';
+ checkbox.checked = content.className.indexOf('hidden') == -1;
+ checkbox.contentNode = content;
+ checkbox.addEventListener('click', handleToggleContentVisibility);
+ header.insertBefore(checkbox, header.childNodes[0]);
+ });
+ }
+
+ function showPage(firstPage) {
+ var changeSelectedEntry = selectedEntry !== undefined
+ && selectedEntry.page === selectedPage;
+ selectedPage = firstPage;
+ selectedPage.sort();
+ showPageInColumn(firstPage, 0);
+ // Show the other versions of this page in the following columns.
+ var pageVersions = versions.getPageVersions(firstPage);
+ var index = 1;
+ pageVersions.forEach((page) => {
+ if (page !== firstPage) {
+ showPageInColumn(page, index);
+ index++;
+ }
+ });
+ if (changeSelectedEntry) {
+ showEntryDetail(selectedPage.getEntry(selectedEntry));
+ } else {
+ showImpactList(selectedPage);
+ }
+ }
+
+ function showPageInColumn(page, columnIndex) {
+ page.sort();
+ var showDiff = (baselineVersion === undefined && columnIndex !== 0) ||
+ (baselineVersion !== undefined && page.version !== baselineVersion);
+ var diffStatus = (td, a, b) => {};
+ if (showDiff) {
+ if (baselineVersion !== undefined) {
+ diffStatus = (td, a, b) => {
+ if (a == 0) return;
+ td.style.color = a < 0 ? '#FF0000' : '#00BB00';
+ };
+ } else {
+ diffStatus = (td, a, b) => {
+ if (a == b) return;
+ var color;
+ var ratio = a / b;
+ if (ratio > 1) {
+ ratio = Math.min(Math.round((ratio - 1) * 255 * 10), 200);
+ color = '#' + ratio.toString(16) + "0000";
+ } else {
+ ratio = Math.min(Math.round((1 - ratio) * 255 * 10), 200);
+ color = '#00' + ratio.toString(16) + "00";
+ }
+ td.style.color = color;
+ }
+ }
+ }
+
+ var column = $('column_' + columnIndex);
+ var select = $('select_' + columnIndex);
+ // Find the matching option
+ selectOption(select, (i, option) => {
+ return option.page == page
+ });
+ var table = column.querySelector("table");
+ var oldTbody = table.querySelector('tbody');
+ var tbody = document.createElement('tbody');
+ var referencePage = selectedPage;
+ page.forEachSorted(selectedPage, (parentEntry, entry, referenceEntry) => {
+ // Filter out entries that do not exist in the first column for the default
+ // view.
+ if (baselineVersion === undefined && referenceEntry &&
+ referenceEntry.time == 0) {
+ return;
+ }
+ var tr = document.createElement('tr');
+ tbody.appendChild(tr);
+ tr.entry = entry;
+ tr.parentEntry = parentEntry;
+ tr.className = parentEntry === undefined ? 'parent' : 'child';
+ // Don't show entries that do not exist on the current page or if we
+ // compare against the current page
+ if (entry !== undefined && page.version !== baselineVersion) {
+ // If we show a diff, use the baselineVersion as the referenceEntry
+ if (baselineVersion !== undefined) {
+ var baselineEntry = baselineVersion.getEntry(entry);
+ if (baselineEntry !== undefined) referenceEntry = baselineEntry
+ }
+ if (!parentEntry) {
+ var node = td(tr, '<div class="toggle">►</div>', 'position');
+ node.firstChild.addEventListener('click', handleToggleGroup);
+ } else {
+ td(tr, entry.position == 0 ? '' : entry.position, 'position');
+ }
+ addCodeSearchButton(entry,
+ td(tr, entry.name, 'name ' + entry.cssClass()));
+
+ diffStatus(
+ td(tr, ms(entry.time), 'value time'),
+ entry.time, referenceEntry.time);
+ diffStatus(
+ td(tr, percent(entry.timePercent), 'value time'),
+ entry.time, referenceEntry.time);
+ diffStatus(
+ td(tr, count(entry.count), 'value count'),
+ entry.count, referenceEntry.count);
+ } else if (baselineVersion !== undefined && referenceEntry
+ && page.version !== baselineVersion) {
+ // Show comparison of entry that does not exist on the current page.
+ tr.entry = new Entry(0, referenceEntry.name);
+ tr.entry.page = page;
+ td(tr, '-', 'position');
+ td(tr, referenceEntry.name, 'name');
+ diffStatus(
+ td(tr, ms(-referenceEntry.time), 'value time'),
+ -referenceEntry.time, 0);
+ diffStatus(
+ td(tr, percent(-referenceEntry.timePercent), 'value time'),
+ -referenceEntry.timePercent, 0);
+ diffStatus(
+ td(tr, count(-referenceEntry.count), 'value count'),
+ -referenceEntry.count, 0);
+ } else {
+ // Display empty entry / baseline entry
+ var showBaselineEntry = entry !== undefined;
+ if (showBaselineEntry) {
+ if (!parentEntry) {
+ var node = td(tr, '<div class="toggle">►</div>', 'position');
+ node.firstChild.addEventListener('click', handleToggleGroup);
+ } else {
+ td(tr, entry.position == 0 ? '' : entry.position, 'position');
+ }
+ td(tr, entry.name, 'name');
+ td(tr, ms(entry.time, false), 'value time');
+ td(tr, percent(entry.timePercent, false), 'value time');
+ td(tr, count(entry.count, false), 'value count');
+ } else {
+ td(tr, '-', 'position');
+ td(tr, '-', 'name');
+ td(tr, '-', 'value time');
+ td(tr, '-', 'value time');
+ td(tr, '-', 'value count');
+ }
+ }
+ });
+ table.replaceChild(tbody, oldTbody);
+ var versionSelect = column.querySelector('select.version');
+ selectOption(versionSelect, (index, option) => {
+ return option.version == page.version
+ });
+ }
+
+ function selectEntry(entry, updateSelectedPage) {
+ if (updateSelectedPage) {
+ entry = selectedPage.version.getEntry(entry);
+ }
+ var rowIndex = 0;
+ var needsPageSwitch = updateSelectedPage && entry.page != selectedPage;
+ // If clicked in the detail row change the first column to that page.
+ if (needsPageSwitch) showPage(entry.page);
+ var childNodes = $('column_0').querySelector('.list tbody').childNodes;
+ for (var i = 0; i < childNodes.length; i++) {
+ if (childNodes[i].entry.name == entry.name) {
+ rowIndex = i;
+ break;
+ }
+ }
+ var firstEntry = childNodes[rowIndex].entry;
+ if (rowIndex) {
+ if (firstEntry.parent) showGroup(firstEntry.parent);
+ }
+ // Deselect all
+ $('view').querySelectorAll('.list tbody tr').forEach((tr) => {
+ toggleCssClass(tr, 'selected', false);
+ });
+ // Select the entry row
+ $('view').querySelectorAll("tbody").forEach((body) => {
+ var row = body.childNodes[rowIndex];
+ if (!row) return;
+ toggleCssClass(row, 'selected', row.entry && row.entry.name ==
+ firstEntry.name);
+ });
+ if (updateSelectedPage) {
+ entry = selectedEntry.page.version.getEntry(entry);
+ }
+ selectedEntry = entry;
+ showEntryDetail(entry);
+ }
+
+ function showEntryDetail(entry) {
+ showVersionDetails(entry);
+ showPageDetails(entry);
+ showImpactList(entry.page);
+ showGraphs(entry.page);
+ }
+
+ function showVersionDetails(entry) {
+ var table, tbody, entries;
+ table = $('detailView').querySelector('.versionDetailTable');
+ tbody = document.createElement('tbody');
+ if (entry !== undefined) {
+ $('detailView').querySelector('.versionDetail h3 span').innerHTML =
+ entry.name + ' in ' + entry.page.name;
+ entries = versions.getPageVersions(entry.page).map(
+ (page) => {
+ return page.get(entry.name)
+ });
+ entries.sort((a, b) => {
+ return a.time - b.time
+ });
+ entries.forEach((pageEntry) => {
+ if (pageEntry === undefined) return;
+ var tr = document.createElement('tr');
+ if (pageEntry == entry) tr.className += 'selected';
+ tr.entry = pageEntry;
+ var isBaselineEntry = pageEntry.page.version == baselineVersion;
+ td(tr, pageEntry.page.version.name, 'version');
+ td(tr, ms(pageEntry.time, !isBaselineEntry), 'value time');
+ td(tr, percent(pageEntry.timePercent, !isBaselineEntry), 'value time');
+ td(tr, count(pageEntry.count, !isBaselineEntry), 'value count');
+ tbody.appendChild(tr);
+ });
+ }
+ table.replaceChild(tbody, table.querySelector('tbody'));
+ }
+
+ function showPageDetails(entry) {
+ var table, tbody, entries;
+ table = $('detailView').querySelector('.pageDetailTable');
+ tbody = document.createElement('tbody');
+ if (entry === undefined) {
+ table.replaceChild(tbody, table.querySelector('tbody'));
+ return;
+ }
+ var version = entry.page.version;
+ var showDiff = version !== baselineVersion;
+ $('detailView').querySelector('.pageDetail h3 span').innerHTML =
+ version.name;
+ entries = version.pages.map((page) => {
+ if (!page.enabled) return;
+ return page.get(entry.name)
+ });
+ entries.sort((a, b) => {
+ var cmp = b.timePercent - a.timePercent;
+ if (cmp.toFixed(1) == 0) return b.time - a.time;
+ return cmp
+ });
+ entries.forEach((pageEntry) => {
+ if (pageEntry === undefined) return;
+ var tr = document.createElement('tr');
+ if (pageEntry === entry) tr.className += 'selected';
+ tr.entry = pageEntry;
+ td(tr, pageEntry.page.name, 'name');
+ td(tr, ms(pageEntry.time, showDiff), 'value time');
+ td(tr, percent(pageEntry.timePercent, showDiff), 'value time');
+ td(tr, percent(pageEntry.timePercentPerEntry, showDiff),
+ 'value time hideNoDiff');
+ td(tr, count(pageEntry.count, showDiff), 'value count');
+ tbody.appendChild(tr);
+ });
+ // show the total for all pages
+ var tds = table.querySelectorAll('tfoot td');
+ tds[1].innerHTML = ms(entry.getTimeImpact(), showDiff);
+ // Only show the percentage total if we are in diff mode:
+ tds[2].innerHTML = percent(entry.getTimePercentImpact(), showDiff);
+ tds[3].innerHTML = '';
+ tds[4].innerHTML = count(entry.getCountImpact(), showDiff);
+ table.replaceChild(tbody, table.querySelector('tbody'));
+ }
+
+ function showImpactList(page) {
+ var impactView = $('detailView').querySelector('.impactView');
+ impactView.querySelector('h3 span').innerHTML = page.version.name;
+
+ var table = impactView.querySelector('table');
+ var tbody = document.createElement('tbody');
+ var version = page.version;
+ var entries = version.allEntries();
+ if (selectedEntry !== undefined && selectedEntry.isGroup) {
+ impactView.querySelector('h3 span').innerHTML += " " + selectedEntry.name;
+ entries = entries.filter((entry) => {
+ return entry.name == selectedEntry.name ||
+ (entry.parent && entry.parent.name == selectedEntry.name)
+ });
+ }
+ var isCompareView = baselineVersion !== undefined;
+ entries = entries.filter((entry) => {
+ if (isCompareView) {
+ var impact = entry.getTimeImpact();
+ return impact < -1 || 1 < impact
+ }
+ return entry.getTimePercentImpact() > 0.1;
+ });
+ entries.sort((a, b) => {
+ var cmp = b.getTimePercentImpact() - a.getTimePercentImpact();
+ if (isCompareView || cmp.toFixed(1) == 0) {
+ return b.getTimeImpact() - a.getTimeImpact();
+ }
+ return cmp
+ });
+ entries.forEach((entry) => {
+ var tr = document.createElement('tr');
+ tr.entry = entry;
+ td(tr, entry.name, 'name');
+ td(tr, ms(entry.getTimeImpact()), 'value time');
+ var percentImpact = entry.getTimePercentImpact();
+ td(tr, percentImpact > 1000 ? '-' : percent(percentImpact), 'value time');
+ var topPages = entry.getPagesByPercentImpact().slice(0, 3)
+ .map((each) => {
+ return each.name + ' (' + percent(each.getEntry(entry).timePercent) +
+ ')'
+ });
+ td(tr, topPages.join(', '), 'name');
+ tbody.appendChild(tr);
+ });
+ table.replaceChild(tbody, table.querySelector('tbody'));
+ }
+
+ function showGraphs(page) {
+ var groups = page.groups.slice();
+ // Sort groups by the biggest impact
+ groups.sort((a, b) => {
+ return b.getTimeImpact() - a.getTimeImpact();
+ });
+ if (selectedGroup == undefined) {
+ selectedGroup = groups[0];
+ } else {
+ groups = groups.filter(each => each.name != selectedGroup.name);
+ groups.unshift(selectedGroup);
+ }
+ showPageGraph(groups, page);
+ showVersionGraph(groups, page);
+ showPageVersionGraph(groups, page);
+ }
+
+ function getGraphDataTable(groups) {
+ var dataTable = new google.visualization.DataTable();
+ dataTable.addColumn('string', 'Name');
+ groups.forEach(group => {
+ var column = dataTable.addColumn('number', group.name.substring(6));
+ dataTable.setColumnProperty(column, 'group', group);
+ });
+ return dataTable;
+ }
+
+ var selectedGroup;
+ function showPageGraph(groups, page) {
+ var isDiffView = baselineVersion !== undefined;
+ var dataTable = getGraphDataTable(groups);
+ // Calculate the average row
+ var row = ['Average'];
+ groups.forEach((group) => {
+ if (isDiffView) {
+ row.push(group.isTotal ? 0 : group.getAverageTimeImpact());
+ } else {
+ row.push(group.isTotal ? 0 : group.getTimeImpact());
+ }
+ });
+ dataTable.addRow(row);
+ // Sort the pages by the selected group.
+ var pages = page.version.pages.filter(page => page.enabled);
+ function sumDiff(page) {
+ var sum = 0;
+ groups.forEach(group => {
+ var value = group.getTimePercentImpact() -
+ page.getEntry(group).timePercent;
+ sum += value * value;
+ });
+ return sum;
+ }
+ if (isDiffView) {
+ pages.sort((a, b) => {
+ return b.getEntry(selectedGroup).time-
+ a.getEntry(selectedGroup).time;
+ });
+ } else {
+ pages.sort((a, b) => {
+ return b.getEntry(selectedGroup).timePercent -
+ a.getEntry(selectedGroup).timePercent;
+ });
+ }
+ // Sort by sum of squared distance to the average.
+ // pages.sort((a, b) => {
+ // return a.distanceFromTotalPercent() - b.distanceFromTotalPercent();
+ // });
+ // Calculate the entries for the pages
+ pages.forEach((page) => {
+ row = [page.name];
+ groups.forEach((group) => {
+ row.push(group.isTotal ? 0 : page.getEntry(group).time);
+ });
+ var rowIndex = dataTable.addRow(row);
+ dataTable.setRowProperty(rowIndex, 'page', page);
+ });
+ renderGraph('Pages for ' + page.version.name, groups, dataTable,
+ 'pageGraph', isDiffView ? true : 'percent');
+ }
+
+ function showVersionGraph(groups, page) {
+ var dataTable = getGraphDataTable(groups);
+ var row;
+ var vs = versions.versions.filter(version => version.enabled);
+ vs.sort((a, b) => {
+ return b.getEntry(selectedGroup).getTimeImpact() -
+ a.getEntry(selectedGroup).getTimeImpact();
+ });
+ // Calculate the entries for the versions
+ vs.forEach((version) => {
+ row = [version.name];
+ groups.forEach((group) => {
+ row.push(group.isTotal ? 0 : version.getEntry(group).getTimeImpact());
+ });
+ var rowIndex = dataTable.addRow(row);
+ dataTable.setRowProperty(rowIndex, 'page', page);
+ });
+ renderGraph('Versions Total Time over all Pages', groups, dataTable,
+ 'versionGraph', true);
+ }
+
+ function showPageVersionGraph(groups, page) {
+ var dataTable = getGraphDataTable(groups);
+ var row;
+ var vs = versions.getPageVersions(page);
+ vs.sort((a, b) => {
+ return b.getEntry(selectedGroup).time - a.getEntry(selectedGroup).time;
+ });
+ // Calculate the entries for the versions
+ vs.forEach((page) => {
+ row = [page.version.name];
+ groups.forEach((group) => {
+ row.push(group.isTotal ? 0 : page.getEntry(group).time);
+ });
+ var rowIndex = dataTable.addRow(row);
+ dataTable.setRowProperty(rowIndex, 'page', page);
+ });
+ renderGraph('Versions for ' + page.name, groups, dataTable,
+ 'pageVersionGraph', true);
+ }
+
+ function renderGraph(title, groups, dataTable, id, isStacked) {
+ var isDiffView = baselineVersion !== undefined;
+ var formatter = new google.visualization.NumberFormat({
+ suffix: (isDiffView ? 'msΔ' : 'ms'),
+ negativeColor: 'red',
+ groupingSymbol: "'"
+ });
+ for (var i = 1; i < dataTable.getNumberOfColumns(); i++) {
+ formatter.format(dataTable, i);
+ }
+ var height = 85 + 28 * dataTable.getNumberOfRows();
+ var options = {
+ isStacked: isStacked,
+ height: height,
+ hAxis: {
+ minValue: 0,
+ },
+ animation:{
+ duration: 500,
+ easing: 'out',
+ },
+ vAxis: {
+ },
+ explorer: {
+ actions: ['dragToZoom', 'rightClickToReset'],
+ maxZoomIn: 0.01
+ },
+ legend: {position:'top', textStyle:{fontSize: '16px'}},
+ chartArea: {left:200, top:50, width:'98%', height:'80%'},
+ colors: groups.map(each => each.color)
+ };
+ var parentNode = $(id);
+ parentNode.querySelector('h2>span, h3>span').innerHTML = title;
+ var graphNode = parentNode.querySelector('.content');
+
+ var chart = graphNode.chart;
+ if (chart === undefined) {
+ chart = graphNode.chart = new google.visualization.BarChart(graphNode);
+ } else {
+ google.visualization.events.removeAllListeners(chart);
+ }
+ google.visualization.events.addListener(chart, 'select', selectHandler);
+ function getChartEntry(selection) {
+ if (!selection) return undefined;
+ var column = selection.column;
+ if (column == undefined) return undefined;
+ var selectedGroup = dataTable.getColumnProperty(column, 'group');
+ var row = selection.row;
+ if (row == null) return selectedGroup;
+ var page = dataTable.getRowProperty(row, 'page');
+ if (!page) return selectedGroup;
+ return page.getEntry(selectedGroup);
+ }
+ function selectHandler() {
+ selectedGroup = getChartEntry(chart.getSelection()[0])
+ if (!selectedGroup) return;
+ selectEntry(selectedGroup, true);
+ }
+
+ // Make our global tooltips work
+ google.visualization.events.addListener(chart, 'onmouseover', mouseOverHandler);
+ function mouseOverHandler(selection) {
+ graphNode.entry = getChartEntry(selection);
+ }
+ chart.draw(dataTable, options);
+ }
+
+ function showGroup(entry) {
+ toggleGroup(entry, true);
+ }
+
+ function toggleGroup(group, show) {
+ $('view').querySelectorAll(".child").forEach((tr) => {
+ var entry = tr.parentEntry;
+ if (!entry) return;
+ if (entry.name !== group.name) return;
+ toggleCssClass(tr, 'visible', show);
+ });
+ }
+
+ function showPopover(entry) {
+ var popover = $('popover');
+ popover.querySelector('td.name').innerHTML = entry.name;
+ popover.querySelector('td.page').innerHTML = entry.page.name;
+ setPopoverDetail(popover, entry, '');
+ popover.querySelector('table').className = "";
+ if (baselineVersion !== undefined) {
+ entry = baselineVersion.getEntry(entry);
+ setPopoverDetail(popover, entry, '.compare');
+ popover.querySelector('table').className = "compare";
+ }
+ }
+
+ function setPopoverDetail(popover, entry, prefix) {
+ var node = (name) => popover.querySelector(prefix + name);
+ if (entry == undefined) {
+ node('.version').innerHTML = baselineVersion.name;
+ node('.time').innerHTML = '-';
+ node('.timeVariance').innerHTML = '-';
+ node('.percent').innerHTML = '-';
+ node('.percentPerEntry').innerHTML = '-';
+ node('.percentVariance').innerHTML = '-';
+ node('.count').innerHTML = '-';
+ node('.countVariance').innerHTML = '-';
+ node('.timeImpact').innerHTML = '-';
+ node('.timePercentImpact').innerHTML = '-';
+ } else {
+ node('.version').innerHTML = entry.page.version.name;
+ node('.time').innerHTML = ms(entry._time, false);
+ node('.timeVariance').innerHTML
+ = percent(entry.timeVariancePercent, false);
+ node('.percent').innerHTML = percent(entry.timePercent, false);
+ node('.percentPerEntry').innerHTML
+ = percent(entry.timePercentPerEntry, false);
+ node('.percentVariance').innerHTML
+ = percent(entry.timePercentVariancePercent, false);
+ node('.count').innerHTML = count(entry._count, false);
+ node('.countVariance').innerHTML
+ = percent(entry.timeVariancePercent, false);
+ node('.timeImpact').innerHTML
+ = ms(entry.getTimeImpact(false), false);
+ node('.timePercentImpact').innerHTML
+ = percent(entry.getTimeImpactVariancePercent(false), false);
+ }
+ }
+ </script>
+ <script type="text/javascript">
+ "use strict"
+ // =========================================================================
+ // Helpers
+ function $(id) {
+ return document.getElementById(id)
+ }
+
+ function removeAllChildren(node) {
+ while (node.firstChild) {
+ node.removeChild(node.firstChild);
+ }
+ }
+
+ function selectOption(select, match) {
+ var options = select.options;
+ for (var i = 0; i < options.length; i++) {
+ if (match(i, options[i])) {
+ select.selectedIndex = i;
+ return;
+ }
+ }
+ }
+
+ function addCodeSearchButton(entry, node) {
+ if (entry.isGroup) return;
+ var button = document.createElement("div");
+ button.innerHTML = '?'
+ button.className = "codeSearch"
+ button.addEventListener('click', handleCodeSearch);
+ node.appendChild(button);
+ return node;
+ }
+
+ function td(tr, content, className) {
+ var td = document.createElement("td");
+ td.innerHTML = content;
+ td.className = className
+ tr.appendChild(td);
+ return td
+ }
+
+ function nodeIndex(node) {
+ var children = node.parentNode.childNodes,
+ i = 0;
+ for (; i < children.length; i++) {
+ if (children[i] == node) {
+ return i;
+ }
+ }
+ return -1;
+ }
+
+ function toggleCssClass(node, cssClass, toggleState) {
+ var index = -1;
+ var classes;
+ if (node.className != undefined) {
+ classes = node.className.split(' ');
+ index = classes.indexOf(cssClass);
+ }
+ if (index == -1) {
+ if (toggleState === false) return;
+ node.className += ' ' + cssClass;
+ return;
+ }
+ if (toggleState === true) return;
+ classes.splice(index, 1);
+ node.className = classes.join(' ');
+ }
+
+ function NameComparator(a, b) {
+ if (a.name > b.name) return 1;
+ if (a.name < b.name) return -1;
+ return 0
+ }
+
+ function diffSign(value, digits, unit, showDiff) {
+ if (showDiff === false || baselineVersion == undefined) {
+ if (value === undefined) return '';
+ return value.toFixed(digits) + unit;
+ }
+ return (value >= 0 ? '+' : '') + value.toFixed(digits) + unit + 'Δ';
+ }
+
+ function ms(value, showDiff) {
+ return diffSign(value, 1, 'ms', showDiff);
+ }
+
+ function count(value, showDiff) {
+ return diffSign(value, 0, '#', showDiff);
+ }
+
+ function percent(value, showDiff) {
+ return diffSign(value, 1, '%', showDiff);
+ }
+
+ </script>
+ <script type="text/javascript">
+ "use strict"
+ // =========================================================================
+ // EventHandlers
+ function handleBodyLoad() {
+ $('uploadInput').focus();
+ }
+
+ function handleLoadFile() {
+ var files = document.getElementById("uploadInput").files;
+ var file = files[0];
+ var reader = new FileReader();
+
+ reader.onload = function(evt) {
+ pages = new Pages();
+ versions = Versions.fromJSON(JSON.parse(this.result));
+ initialize()
+ showPage(versions.versions[0].pages[0]);
+ }
+ reader.readAsText(file);
+ }
+
+ function handleToggleGroup(event) {
+ var group = event.target.parentNode.parentNode.entry;
+ toggleGroup(selectedPage.get(group.name));
+ }
+
+ function handleSelectPage(select, event) {
+ var option = select.options[select.selectedIndex];
+ if (select.id == "select_0") {
+ showPage(option.page);
+ } else {
+ var columnIndex = select.id.split('_')[1];
+ showPageInColumn(option.page, columnIndex);
+ }
+ }
+
+ function handleSelectVersion(select, event) {
+ var option = select.options[select.selectedIndex];
+ var version = option.version;
+ if (select.id == "selectVersion_0") {
+ var page = version.get(selectedPage.name);
+ showPage(page);
+ } else {
+ var columnIndex = select.id.split('_')[1];
+ var pageSelect = $('select_' + columnIndex);
+ var page = pageSelect.options[pageSelect.selectedIndex].page;
+ page = version.get(page.name);
+ showPageInColumn(page, columnIndex);
+ }
+ }
+
+ function handleSelectDetailRow(table, event) {
+ if (event.target.tagName != 'TD') return;
+ var tr = event.target.parentNode;
+ if (tr.tagName != 'TR') return;
+ if (tr.entry === undefined) return;
+ selectEntry(tr.entry, true);
+ }
+
+ function handleSelectRow(table, event, fromDetail) {
+ if (event.target.tagName != 'TD') return;
+ var tr = event.target.parentNode;
+ if (tr.tagName != 'TR') return;
+ if (tr.entry === undefined) return;
+ selectEntry(tr.entry, false);
+ }
+
+ function handleSelectBaseline(select, event) {
+ var option = select.options[select.selectedIndex];
+ baselineVersion = option.version;
+ var showingDiff = baselineVersion !== undefined;
+ var body = $('body');
+ toggleCssClass(body, 'diff', showingDiff);
+ toggleCssClass(body, 'noDiff', !showingDiff);
+ showPage(selectedPage);
+ if (selectedEntry === undefined) return;
+ selectEntry(selectedEntry, true);
+ }
+
+ function findEntry(event) {
+ var target = event.target;
+ while (target.entry === undefined) {
+ target = target.parentNode;
+ if (!target) return undefined;
+ }
+ return target.entry;
+ }
+
+ function handleUpdatePopover(event) {
+ var popover = $('popover');
+ popover.style.left = event.pageX + 'px';
+ popover.style.top = event.pageY + 'px';
+ popover.style.display = 'none';
+ popover.style.display = event.shiftKey ? 'block' : 'none';
+ var entry = findEntry(event);
+ if (entry === undefined) return;
+ showPopover(entry);
+ }
+
+ function handleToggleVersionEnable(event) {
+ var item = this.item ;
+ if (item === undefined) return;
+ item .enabled = this.checked;
+ initialize();
+ var page = selectedPage;
+ if (page === undefined || !page.version.enabled) {
+ page = versions.getEnabledPage(page.name);
+ }
+ showPage(page);
+ }
+
+ function handleToggleContentVisibility(event) {
+ var content = event.target.contentNode;
+ toggleCssClass(content, 'hidden');
+ }
+
+ function handleCodeSearch(event) {
+ var entry = findEntry(event);
+ if (entry === undefined) return;
+ var url = "https://cs.chromium.org/search/?sq=package:chromium&type=cs&q=";
+ name = entry.name;
+ if (name.startsWith("API_")) {
+ name = name.substring(4);
+ }
+ url += encodeURIComponent(name) + "+file:src/v8/src";
+ window.open(url,'_blank');
+ }
+ </script>
+ <script type="text/javascript">
+ "use strict"
+ // =========================================================================
+ class Versions {
+ constructor() {
+ this.versions = [];
+ }
+ add(version) {
+ this.versions.push(version)
+ }
+ getPageVersions(page) {
+ var result = [];
+ this.versions.forEach((version) => {
+ if (!version.enabled) return;
+ var versionPage = version.get(page.name);
+ if (versionPage !== undefined) result.push(versionPage);
+ });
+ return result;
+ }
+ get length() {
+ return this.versions.length
+ }
+ get(index) {
+ return this.versions[index]
+ };
+ forEach(f) {
+ this.versions.forEach(f);
+ }
+ sort() {
+ this.versions.sort(NameComparator);
+ }
+ getEnabledPage(name) {
+ for (var i = 0; i < this.versions.length; i++) {
+ var version = this.versions[i];
+ if (!version.enabled) continue;
+ var page = version.get(name);
+ if (page !== undefined) return page;
+ }
+ }
+ }
+ Versions.fromJSON = function(json) {
+ var versions = new Versions();
+ for (var version in json) {
+ versions.add(Version.fromJSON(version, json[version]));
+ }
+ versions.sort();
+ return versions;
+ }
+
+ class Version {
+ constructor(name) {
+ this.name = name;
+ this.enabled = true;
+ this.pages = [];
+ }
+ add(page) {
+ this.pages.push(page);
+ }
+ indexOf(name) {
+ for (var i = 0; i < this.pages.length; i++) {
+ if (this.pages[i].name == name) return i;
+ }
+ return -1;
+ }
+ get(name) {
+ var index = this.indexOf(name);
+ if (0 <= index) return this.pages[index];
+ return undefined
+ }
+ get length() {
+ return this.versions.length
+ }
+ getEntry(entry) {
+ if (entry === undefined) return undefined;
+ var page = this.get(entry.page.name);
+ if (page === undefined) return undefined;
+ return page.get(entry.name);
+ }
+ forEachEntry(fun) {
+ this.forEachPage((page) => {
+ page.forEach(fun);
+ });
+ }
+ forEachPage(fun) {
+ this.pages.forEach((page) => {
+ if (!page.enabled) return;
+ fun(page);
+ })
+ }
+ allEntries() {
+ var map = new Map();
+ this.forEachEntry((group, entry) => {
+ if (!map.has(entry.name)) map.set(entry.name, entry);
+ });
+ return Array.from(map.values());
+ }
+ getTotalValue(name, property) {
+ if (name === undefined) name = this.pages[0].total.name;
+ var sum = 0;
+ this.forEachPage((page) => {
+ var entry = page.get(name);
+ if (entry !== undefined) sum += entry[property];
+ });
+ return sum;
+ }
+ getTotalTime(name, showDiff) {
+ return this.getTotalValue(name, showDiff === false ? '_time' : 'time');
+ }
+ getTotalTimePercent(name, showDiff) {
+ if (baselineVersion === undefined || showDiff === false) {
+ // Return the overall average percent of the given entry name.
+ return this.getTotalValue(name, 'time') /
+ this.getTotalTime('Group-Total') * 100;
+ }
+ // Otherwise return the difference to the sum of the baseline version.
+ var baselineValue = baselineVersion.getTotalTime(name, false);
+ var total = this.getTotalValue(name, '_time');
+ return (total / baselineValue - 1) * 100;
+ }
+ getTotalTimeVariance(name, showDiff) {
+ // Calculate the overall error for a given entry name
+ var sum = 0;
+ this.forEachPage((page) => {
+ var entry = page.get(name);
+ if (entry === undefined) return;
+ sum += entry.timeVariance * entry.timeVariance;
+ });
+ return Math.sqrt(sum);
+ }
+ getTotalTimeVariancePercent(name, showDiff) {
+ return this.getTotalTimeVariance(name, showDiff) /
+ this.getTotalTime(name, showDiff) * 100;
+ }
+ getTotalCount(name, showDiff) {
+ return this.getTotalValue(name, showDiff === false ? '_count' : 'count');
+ }
+ getAverageTimeImpact(name, showDiff) {
+ return this.getTotalTime(name, showDiff) / this.pages.length;
+ }
+ getPagesByPercentImpact(name) {
+ var sortedPages =
+ this.pages.filter((each) => {
+ return each.get(name) !== undefined
+ });
+ sortedPages.sort((a, b) => {
+ return b.get(name).timePercent - a.get(name).timePercent;
+ });
+ return sortedPages;
+ }
+ sort() {
+ this.pages.sort(NameComparator)
+ }
+ }
+ Version.fromJSON = function(name, data) {
+ var version = new Version(name);
+ for (var pageName in data) {
+ version.add(PageVersion.fromJSON(version, pageName, data[pageName]));
+ }
+ version.sort();
+ return version;
+ }
+
+ class Pages extends Map {
+ get(name) {
+ if (name.indexOf('www.') == 0) {
+ name = name.substring(4);
+ }
+ if (!this.has(name)) {
+ this.set(name, new Page(name));
+ }
+ return super.get(name);
+ }
+ }
+
+ class Page {
+ constructor(name) {
+ this.name = name;
+ this.enabled = true;
+ this.versions = [];
+ }
+ add(page) {
+ this.versions.push(page);
+ }
+ }
+
+ class PageVersion {
+ constructor(version, page) {
+ this.page = page;
+ this.page.add(this);
+ this.total = new GroupedEntry('Total', /.*Total.*/, '#BBB');
+ this.total.isTotal = true;
+ this.unclassified = new UnclassifiedEntry(this, "#000")
+ this.groups = [
+ this.total,
+ new GroupedEntry('IC', /.*IC.*/, "#3366CC"),
+ new GroupedEntry('Optimize',
+ /StackGuard|.*Optimize.*|.*Deoptimize.*|Recompile.*/, "#DC3912"),
+ new GroupedEntry('Compile', /.*Compile.*/, "#FFAA00"),
+ new GroupedEntry('Parse', /.*Parse.*/, "#FF6600"),
+ new GroupedEntry('Callback', /.*Callback$/, "#109618"),
+ new GroupedEntry('API', /.*API.*/, "#990099"),
+ new GroupedEntry('GC', /GC|AllocateInTargetSpace/, "#0099C6"),
+ new GroupedEntry('JavaScript', /JS_Execution/, "#DD4477"),
+ new GroupedEntry('Runtime', /.*/, "#88BB00"),
+ this.unclassified
+ ];
+ this.entryDict = new Map();
+ this.groups.forEach((entry) => {
+ entry.page = this;
+ this.entryDict.set(entry.name, entry);
+ });
+ this.version = version;
+ }
+ add(entry) {
+ entry.page = this;
+ this.entryDict.set(entry.name, entry);
+ var added = false;
+ this.groups.forEach((group) => {
+ if (!added) added = group.add(entry);
+ });
+ if (added) return;
+ this.unclassified.push(entry);
+ }
+ get(name) {
+ return this.entryDict.get(name)
+ }
+ getEntry(entry) {
+ if (entry === undefined) return undefined;
+ return this.get(entry.name);
+ }
+ get length() {
+ return this.versions.length
+ }
+ get name() { return this.page.name }
+ get enabled() { return this.page.enabled }
+ forEachSorted(referencePage, func) {
+ // Iterate over all the entries in the order they appear on the
+ // reference page.
+ referencePage.forEach((parent, referenceEntry) => {
+ var entry;
+ if (parent) parent = this.entryDict.get(parent.name);
+ if (referenceEntry) entry = this.entryDict.get(referenceEntry.name);
+ func(parent, entry, referenceEntry);
+ });
+ }
+ forEach(fun) {
+ this.forEachGroup((group) => {
+ fun(undefined, group);
+ group.forEach((entry) => {
+ fun(group, entry)
+ });
+ });
+ }
+ forEachGroup(fun) {
+ this.groups.forEach(fun)
+ }
+ sort() {
+ this.groups.sort((a, b) => {
+ return b.time - a.time;
+ });
+ this.groups.forEach((group) => {
+ group.sort()
+ });
+ }
+ distanceFromTotalPercent() {
+ var sum = 0;
+ this.groups.forEach(group => {
+ if (group == this.total) return;
+ var value = group.getTimePercentImpact() -
+ this.getEntry(group).timePercent;
+ sum += value * value;
+ });
+ return sum;
+ }
+ }
+ PageVersion.fromJSON = function(version, name, data) {
+ var page = new PageVersion(version, pages.get(name));
+ for (var i = 0; i < data.length; i++) {
+ page.add(Entry.fromJSON(i, data[data.length - i - 1]));
+ }
+ page.sort();
+ return page
+ }
+
+
+ class Entry {
+ constructor(position, name, time, timeVariance, timeVariancePercent,
+ count,
+ countVariance, countVariancePercent) {
+ this.position = position;
+ this.name = name;
+ this._time = time;
+ this._timeVariance = timeVariance;
+ this._timeVariancePercent = timeVariancePercent;
+ this._count = count;
+ this.countVariance = countVariance;
+ this.countVariancePercent = countVariancePercent;
+ this.page = undefined;
+ this.parent = undefined;
+ this.isTotal = false;
+ }
+ getCompareWithBaseline(value, property) {
+ if (baselineVersion == undefined) return value;
+ var baselineEntry = baselineVersion.getEntry(this);
+ if (!baselineEntry) return value;
+ if (baselineVersion === this.page.version) return value;
+ return value - baselineEntry[property];
+ }
+ cssClass() {
+ return ''
+ }
+ get time() {
+ return this.getCompareWithBaseline(this._time, '_time');
+ }
+ get count() {
+ return this.getCompareWithBaseline(this._count, '_count');
+ }
+ get timePercent() {
+ var value = this._time / this.page.total._time * 100;
+ if (baselineVersion == undefined) return value;
+ var baselineEntry = baselineVersion.getEntry(this);
+ if (!baselineEntry) return value;
+ if (baselineVersion === this.page.version) return value;
+ return (this._time - baselineEntry._time) / this.page.total._time *
+ 100;
+ }
+ get timePercentPerEntry() {
+ var value = this._time / this.page.total._time * 100;
+ if (baselineVersion == undefined) return value;
+ var baselineEntry = baselineVersion.getEntry(this);
+ if (!baselineEntry) return value;
+ if (baselineVersion === this.page.version) return value;
+ return (this._time / baselineEntry._time - 1) * 100;
+ }
+ get timePercentVariancePercent() {
+ // Get the absolute values for the percentages
+ return this.timeVariance / this.page.total._time * 100;
+ }
+ getTimeImpact(showDiff) {
+ return this.page.version.getTotalTime(this.name, showDiff);
+ }
+ getTimeImpactVariancePercent(showDiff) {
+ return this.page.version.getTotalTimeVariancePercent(this.name, showDiff);
+ }
+ getTimePercentImpact(showDiff) {
+ return this.page.version.getTotalTimePercent(this.name, showDiff);
+ }
+ getCountImpact(showDiff) {
+ return this.page.version.getTotalCount(this.name, showDiff);
+ }
+ getAverageTimeImpact(showDiff) {
+ return this.page.version.getAverageTimeImpact(this.name, showDiff);
+ }
+ getPagesByPercentImpact() {
+ return this.page.version.getPagesByPercentImpact(this.name);
+ }
+ get isGroup() {
+ return false
+ }
+ get timeVariance() {
+ return this._timeVariance
+ }
+ get timeVariancePercent() {
+ return this._timeVariancePercent
+ }
+ }
+ Entry.fromJSON = function(position, data) {
+ return new Entry(position, ...data);
+ }
+
+
+ class GroupedEntry extends Entry {
+ constructor(name, regexp, color) {
+ super(0, 'Group-' + name, 0, 0, 0, 0, 0, 0);
+ this.regexp = regexp;
+ this.color = color;
+ this.entries = [];
+ }
+ add(entry) {
+ if (!entry.name.match(this.regexp)) return false;
+ this._time += entry.time;
+ this._count += entry.count;
+ // TODO: sum up variance
+ this.entries.push(entry);
+ entry.parent = this;
+ return true;
+ }
+ forEach(fun) {
+ if (baselineVersion === undefined) {
+ this.entries.forEach(fun);
+ return;
+ }
+ // If we have a baslineVersion to compare against show also all entries
+ // from the other group.
+ var tmpEntries = baselineVersion.getEntry(this)
+ .entries.filter((entry) => {
+ return this.page.get(entry.name) == undefined
+ });
+
+ // The compared entries are sorted by absolute impact.
+ tmpEntries = tmpEntries.map((entry) => {
+ var tmpEntry = new Entry(0, entry.name, 0, 0, 0, 0, 0, 0);
+ tmpEntry.page = this.page;
+ return tmpEntry;
+ });
+ tmpEntries = tmpEntries.concat(this.entries);
+ tmpEntries.sort((a, b) => {
+ return a.time - b.time
+ });
+ tmpEntries.forEach(fun);
+ }
+ sort() {
+ this.entries.sort((a, b) => {
+ return b.time - a.time;
+ });
+ }
+ cssClass() {
+ if (this.page.total == this) return 'total';
+ return '';
+ }
+ get isGroup() {
+ return true
+ }
+ getVarianceForProperty(property) {
+ var sum = 0;
+ this.entries.forEach((entry) => {
+ sum += entry[property + 'Variance'] * entry[property +
+ 'Variance'];
+ });
+ return Math.sqrt(sum);
+ }
+ get timeVariancePercent() {
+ if (this._time == 0) return 0;
+ return this.getVarianceForProperty('time') / this._time * 100
+ }
+ get timeVariance() {
+ return this.getVarianceForProperty('time')
+ }
+ }
+
+ class UnclassifiedEntry extends GroupedEntry {
+ constructor(page, color) {
+ super('Unclassified', undefined, color);
+ this.page = page;
+ this._time = undefined;
+ this._count = undefined;
+ }
+ add(entry) {
+ this.entries.push(entry);
+ entry.parent = this;
+ return true;
+ }
+ forEachPageGroup(fun) {
+ this.page.forEachGroup((group) => {
+ if (group == this) return;
+ if (group == this.page.total) return;
+ fun(group);
+ });
+ }
+ get time() {
+ if (this._time === undefined) {
+ this._time = this.page.total._time;
+ this.forEachPageGroup((group) => {
+ this._time -= group._time;
+ });
+ }
+ return this.getCompareWithBaseline(this._time, '_time');
+ }
+ get count() {
+ if (this._count === undefined) {
+ this._count = this.page.total._count;
+ this.forEachPageGroup((group) => {
+ this._count -= group._count;
+ });
+ }
+ return this.getCompareWithBaseline(this._count, '_count');
+ }
+ }
+ </script>
+</head>
+
+<body id="body" onmousemove="handleUpdatePopover(event)" onload="handleBodyLoad()" class="noDiff">
+ <h1>Runtime Stats Komparator</h1>
+
+ <div id="results">
+ <div class="inline">
+ <h2>Data</h2>
+ <form name="fileForm">
+ <p>
+ <input id="uploadInput" type="file" name="files" onchange="handleLoadFile();" accept=".json">
+ </p>
+ </form>
+ </div>
+
+ <div class="inline hidden">
+ <h2>Result</h2>
+ <div class="compareSelector inline">
+ Compare against:&nbsp;<select id="baseline" onchange="handleSelectBaseline(this, event)"></select><br/>
+ <span style="color: #060">Green</span> the selected version above performs
+ better on this measurement.
+ </div>
+ </div>
+
+ <div id="versionSelector" class="inline toggleContentVisibility">
+ <h2>Version Selector</h2>
+ <div class="content hidden">
+ <ul></ul>
+ </div>
+ </div>
+
+ <div id="pageSelector" class="inline toggleContentVisibility">
+ <h2>Page Selector</h2>
+ <div class="content hidden">
+ <ul></ul>
+ </div>
+ </div>
+
+ <div id="view">
+ </div>
+
+ <div id="detailView" class="hidden">
+ <div class="versionDetail inline toggleContentVisibility">
+ <h3><span></span></h3>
+ <div class="content">
+ <table class="versionDetailTable" onclick="handleSelectDetailRow(this, event);">
+ <thead>
+ <tr>
+ <th class="version">Version&nbsp;</th>
+ <th class="position">Pos.&nbsp;</th>
+ <th class="value time">Time▴&nbsp;</th>
+ <th class="value time">Percent&nbsp;</th>
+ <th class="value count">Count&nbsp;</th>
+ </tr>
+ </thead>
+ <tbody></tbody>
+ </table>
+ </div>
+ </div>
+ <div class="pageDetail inline toggleContentVisibility">
+ <h3>Page Comparison for <span></span></h3>
+ <div class="content">
+ <table class="pageDetailTable" onclick="handleSelectDetailRow(this, event);">
+ <thead>
+ <tr>
+ <th class="page">Page&nbsp;</th>
+ <th class="value time">Time&nbsp;</th>
+ <th class="value time">Percent▾&nbsp;</th>
+ <th class="value time hideNoDiff">%/Entry&nbsp;</th>
+ <th class="value count">Count&nbsp;</th>
+ </tr>
+ </thead>
+ <tfoot>
+ <tr>
+ <td class="page">Total:</td>
+ <td class="value time"></td>
+ <td class="value time"></td>
+ <td class="value time hideNoDiff"></td>
+ <td class="value count"></td>
+ </tr>
+ </tfoot>
+ <tbody></tbody>
+ </table>
+ </div>
+ </div>
+ <div class="impactView inline toggleContentVisibility">
+ <h3>Impact list for <span></span></h3>
+ <div class="content">
+ <table class="pageDetailTable" onclick="handleSelectDetailRow(this, event);">
+ <thead>
+ <tr>
+ <th class="page">Name&nbsp;</th>
+ <th class="value time">Time&nbsp;</th>
+ <th class="value time">Percent▾&nbsp;</th>
+ <th class="">Top Pages</th>
+ </tr>
+ </thead>
+ <tbody></tbody>
+ </table>
+ </div>
+ </div>
+ </div>
+ <div id="pageVersionGraph" class="graph hidden toggleContentVisibility">
+ <h3><span></span></h3>
+ <div class="content"></div>
+ </div>
+ <div id="pageGraph" class="graph hidden toggleContentVisibility">
+ <h3><span></span></h3>
+ <div class="content"></div>
+ </div>
+ <div id="versionGraph" class="graph hidden toggleContentVisibility">
+ <h3><span></span></h3>
+ <div class="content"></div>
+ </div>
+
+ <div id="column" class="column">
+ <div class="header">
+ <select class="version" onchange="handleSelectVersion(this, event);"></select>
+ <select class="pageVersion" onchange="handleSelectPage(this, event);"></select>
+ </div>
+ <table class="list" onclick="handleSelectRow(this, event);">
+ <thead>
+ <tr>
+ <th class="position">Pos.&nbsp;</th>
+ <th class="name">Name&nbsp;</th>
+ <th class="value time">Time&nbsp;</th>
+ <th class="value time">Percent&nbsp;</th>
+ <th class="value count">Count&nbsp;</th>
+ </tr>
+ </thead>
+ <tbody></tbody>
+ </table>
+ </div>
+ </div>
+
+ <div class="inline">
+ <h2>Usage</h2>
+ <ol>
+ <li>Install scipy, e.g. <code>sudo aptitude install python-scipy</code>
+ <li>Build chrome.</li>
+ <li>Check out a known working version of webpagereply:
+ <pre>git -C $CHROME_DIR/third_party/webpagereplay checkout 7dbd94752d1cde5536ffc623a9e10a51721eff1d</pre>
+ </li>
+ <li>Run <code>callstats.py</code> with a web-page-replay archive:
+ <pre>$V8_DIR/tools/callstats.py run \
+ --replay-bin=$CHROME_SRC/third_party/webpagereplay/replay.py \
+ --replay-wpr=$INPUT_DIR/top25.wpr \
+ --js-flags="" \
+ --with-chrome=$CHROME_SRC/out/Release/chrome \
+ --sites-file=$INPUT_DIR/top25.json</pre>
+ </li>
+ <li>Move results file to a subdirectory: <code>mkdir $VERSION_DIR; mv *.txt $VERSION_DIR</code></li>
+ <li>Repeat from step 1 with a different configuration (e.g. <code>--js-flags="--nolazy"</code>).</li>
+ <li>Create the final results file: <code>./callstats.py json $VERSION_DIR1 $VERSION_DIR2 > result.json</code></li>
+ <li>Use <code>results.json</code> on this site.</code>
+ </ol>
+ </div>
+
+ <div id="popover">
+ <div class="popoverArrow"></div>
+ <table>
+ <tr>
+ <td class="name" colspan="6"></td>
+ </tr>
+ <tr>
+ <td>Page:</td>
+ <td class="page name" colspan="6"></td>
+ </tr>
+ <tr>
+ <td>Version:</td>
+ <td class="version name" colspan="3"></td>
+ <td class="compare version name" colspan="3"></td>
+ </tr>
+ <tr>
+ <td>Time:</td>
+ <td class="time"></td><td>±</td><td class="timeVariance"></td>
+ <td class="compare time"></td><td class="compare"> ± </td><td class="compare timeVariance"></td>
+ </tr>
+ <tr>
+ <td>Percent:</td>
+ <td class="percent"></td><td>±</td><td class="percentVariance"></td>
+ <td class="compare percent"></td><td class="compare"> ± </td><td class="compare percentVariance"></td>
+ </tr>
+ <tr>
+ <td>Percent per Entry:</td>
+ <td class="percentPerEntry"></td><td colspan=2></td>
+ <td class="compare percentPerEntry"></td><td colspan=2></td>
+ </tr>
+ <tr>
+ <td>Count:</td>
+ <td class="count"></td><td>±</td><td class="countVariance"></td>
+ <td class="compare count"></td><td class="compare"> ± </td><td class="compare countVariance"></td>
+ </tr>
+ <tr>
+ <td>Overall Impact:</td>
+ <td class="timeImpact"></td><td>±</td><td class="timePercentImpact"></td>
+ <td class="compare timeImpact"></td><td class="compare"> ± </td><td class="compare timePercentImpact"></td>
+ </tr>
+ </table>
+ </div>
+</body>
+</html>
diff --git a/deps/v8/tools/callstats.py b/deps/v8/tools/callstats.py
new file mode 100755
index 0000000000..6339392733
--- /dev/null
+++ b/deps/v8/tools/callstats.py
@@ -0,0 +1,638 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+'''
+Usage: callstats.py [-h] <command> ...
+
+Optional arguments:
+ -h, --help show this help message and exit
+
+Commands:
+ run run chrome with --runtime-call-stats and generate logs
+ stats process logs and print statistics
+ json process logs from several versions and generate JSON
+ help help information
+
+For each command, you can try ./runtime-call-stats.py help command.
+'''
+
+import argparse
+import json
+import os
+import re
+import shutil
+import subprocess
+import sys
+import tempfile
+import operator
+
+import numpy
+import scipy
+import scipy.stats
+from math import sqrt
+
+
+# Run benchmarks.
+
+def print_command(cmd_args):
+ def fix_for_printing(arg):
+ m = re.match(r'^--([^=]+)=(.*)$', arg)
+ if m and (' ' in m.group(2) or m.group(2).startswith('-')):
+ arg = "--{}='{}'".format(m.group(1), m.group(2))
+ elif ' ' in arg:
+ arg = "'{}'".format(arg)
+ return arg
+ print " ".join(map(fix_for_printing, cmd_args))
+
+
+def start_replay_server(args, sites):
+ with tempfile.NamedTemporaryFile(prefix='callstats-inject-', suffix='.js',
+ mode='wt', delete=False) as f:
+ injection = f.name
+ generate_injection(f, sites, args.refresh)
+ http_port = 4080 + args.port_offset
+ https_port = 4443 + args.port_offset
+ cmd_args = [
+ args.replay_bin,
+ "--port=%s" % http_port,
+ "--ssl_port=%s" % https_port,
+ "--no-dns_forwarding",
+ "--use_closest_match",
+ "--no-diff_unknown_requests",
+ "--inject_scripts=deterministic.js,{}".format(injection),
+ args.replay_wpr,
+ ]
+ print "=" * 80
+ print_command(cmd_args)
+ with open(os.devnull, 'w') as null:
+ server = subprocess.Popen(cmd_args, stdout=null, stderr=null)
+ print "RUNNING REPLAY SERVER: %s with PID=%s" % (args.replay_bin, server.pid)
+ print "=" * 80
+ return {'process': server, 'injection': injection}
+
+
+def stop_replay_server(server):
+ print("SHUTTING DOWN REPLAY SERVER %s" % server['process'].pid)
+ server['process'].terminate()
+ os.remove(server['injection'])
+
+
+def generate_injection(f, sites, refreshes=0):
+ print >> f, """\
+(function() {
+ var s = window.sessionStorage.getItem("refreshCounter");
+ var refreshTotal = """, refreshes, """;
+ var refreshCounter = s ? parseInt(s) : refreshTotal;
+ var refreshId = refreshTotal - refreshCounter;
+ if (refreshCounter > 0) {
+ window.sessionStorage.setItem("refreshCounter", refreshCounter-1);
+ }
+ function match(url, item) {
+ if ('regexp' in item) { return url.match(item.regexp) !== null };
+ var url_wanted = item.url;
+ /* Allow automatic redirections from http to https. */
+ if (url_wanted.startsWith("http://") && url.startsWith("https://")) {
+ url_wanted = "https://" + url_wanted.substr(7);
+ }
+ return url.startsWith(url_wanted);
+ };
+ function onLoad(url) {
+ for (var item of sites) {
+ if (!match(url, item)) continue;
+ var timeout = 'timeline' in item ? 2000 * item.timeline
+ : 'timeout' in item ? 1000 * (item.timeout - 3)
+ : 10000;
+ console.log("Setting time out of " + timeout + " for: " + url);
+ window.setTimeout(function() {
+ console.log("Time is out for: " + url);
+ var msg = "STATS: (" + refreshId + ") " + url;
+ %GetAndResetRuntimeCallStats(1, msg);
+ if (refreshCounter > 0) {
+ console.log(
+ "Refresh counter is " + refreshCounter + ", refreshing: " + url);
+ window.location.reload();
+ }
+ }, timeout);
+ return;
+ }
+ console.log("Ignoring: " + url);
+ };
+ var sites =
+ """, json.dumps(sites), """;
+ onLoad(window.location.href);
+})();"""
+
+
+def run_site(site, domain, args, timeout=None):
+ print "="*80
+ print "RUNNING DOMAIN %s" % domain
+ print "="*80
+ result_template = "{domain}#{count}.txt" if args.repeat else "{domain}.txt"
+ count = 0
+ if timeout is None: timeout = args.timeout
+ if args.replay_wpr:
+ timeout *= 1 + args.refresh
+ timeout += 1
+ retries_since_good_run = 0
+ while count == 0 or args.repeat is not None and count < args.repeat:
+ count += 1
+ result = result_template.format(domain=domain, count=count)
+ retries = 0
+ while args.retries is None or retries < args.retries:
+ retries += 1
+ try:
+ if args.user_data_dir:
+ user_data_dir = args.user_data_dir
+ else:
+ user_data_dir = tempfile.mkdtemp(prefix="chr_")
+ js_flags = "--runtime-call-stats"
+ if args.replay_wpr: js_flags += " --allow-natives-syntax"
+ if args.js_flags: js_flags += " " + args.js_flags
+ chrome_flags = [
+ "--no-default-browser-check",
+ "--no-sandbox",
+ "--disable-translate",
+ "--js-flags={}".format(js_flags),
+ "--no-first-run",
+ "--user-data-dir={}".format(user_data_dir),
+ ]
+ if args.replay_wpr:
+ http_port = 4080 + args.port_offset
+ https_port = 4443 + args.port_offset
+ chrome_flags += [
+ "--host-resolver-rules=MAP *:80 localhost:%s, " \
+ "MAP *:443 localhost:%s, " \
+ "EXCLUDE localhost" % (
+ http_port, https_port),
+ "--ignore-certificate-errors",
+ "--disable-seccomp-sandbox",
+ "--disable-web-security",
+ "--reduce-security-for-testing",
+ "--allow-insecure-localhost",
+ ]
+ else:
+ chrome_flags += [
+ "--single-process",
+ ]
+ if args.chrome_flags:
+ chrome_flags += args.chrome_flags.split()
+ cmd_args = [
+ "timeout", str(timeout),
+ args.with_chrome
+ ] + chrome_flags + [ site ]
+ print "- " * 40
+ print_command(cmd_args)
+ print "- " * 40
+ with open(result, "wt") as f:
+ with open(args.log_stderr or os.devnull, 'at') as err:
+ status = subprocess.call(cmd_args, stdout=f, stderr=err)
+ # 124 means timeout killed chrome, 0 means the user was bored first!
+ # If none of these two happened, then chrome apparently crashed, so
+ # it must be called again.
+ if status != 124 and status != 0:
+ print("CHROME CRASHED, REPEATING RUN");
+ continue
+ # If the stats file is empty, chrome must be called again.
+ if os.path.isfile(result) and os.path.getsize(result) > 0:
+ if args.print_url:
+ with open(result, "at") as f:
+ print >> f
+ print >> f, "URL: {}".format(site)
+ retries_since_good_run = 0
+ break
+ if retries_since_good_run < 6:
+ timeout += 2 ** retries_since_good_run
+ retries_since_good_run += 1
+ print("EMPTY RESULT, REPEATING RUN ({})".format(
+ retries_since_good_run));
+ finally:
+ if not args.user_data_dir:
+ shutil.rmtree(user_data_dir)
+
+
+def read_sites_file(args):
+ try:
+ sites = []
+ try:
+ with open(args.sites_file, "rt") as f:
+ for item in json.load(f):
+ if 'timeout' not in item:
+ # This is more-or-less arbitrary.
+ item['timeout'] = int(1.5 * item['timeline'] + 7)
+ if item['timeout'] > args.timeout: item['timeout'] = args.timeout
+ sites.append(item)
+ except ValueError:
+ with open(args.sites_file, "rt") as f:
+ for line in f:
+ line = line.strip()
+ if not line or line.startswith('#'): continue
+ sites.append({'url': line, 'timeout': args.timeout})
+ return sites
+ except IOError as e:
+ args.error("Cannot read from {}. {}.".format(args.sites_file, e.strerror))
+ sys.exit(1)
+
+
+def do_run(args):
+ # Determine the websites to benchmark.
+ if args.sites_file:
+ sites = read_sites_file(args)
+ else:
+ sites = [{'url': site, 'timeout': args.timeout} for site in args.sites]
+ # Disambiguate domains, if needed.
+ L = []
+ domains = {}
+ for item in sites:
+ site = item['url']
+ domain = None
+ if args.domain:
+ domain = args.domain
+ elif 'domain' in item:
+ domain = item['domain']
+ else:
+ m = re.match(r'^(https?://)?([^/]+)(/.*)?$', site)
+ if not m:
+ args.error("Invalid URL {}.".format(site))
+ continue
+ domain = m.group(2)
+ entry = [site, domain, None, item['timeout']]
+ if domain not in domains:
+ domains[domain] = entry
+ else:
+ if not isinstance(domains[domain], int):
+ domains[domain][2] = 1
+ domains[domain] = 1
+ domains[domain] += 1
+ entry[2] = domains[domain]
+ L.append(entry)
+ replay_server = start_replay_server(args, sites) if args.replay_wpr else None
+ try:
+ # Run them.
+ for site, domain, count, timeout in L:
+ if count is not None: domain = "{}%{}".format(domain, count)
+ print site, domain, timeout
+ run_site(site, domain, args, timeout)
+ finally:
+ if replay_server:
+ stop_replay_server(replay_server)
+
+
+# Calculate statistics.
+
+def statistics(data):
+ N = len(data)
+ average = numpy.average(data)
+ median = numpy.median(data)
+ low = numpy.min(data)
+ high= numpy.max(data)
+ if N > 1:
+ # evaluate sample variance by setting delta degrees of freedom (ddof) to
+ # 1. The degree used in calculations is N - ddof
+ stddev = numpy.std(data, ddof=1)
+ # Get the endpoints of the range that contains 95% of the distribution
+ t_bounds = scipy.stats.t.interval(0.95, N-1)
+ #assert abs(t_bounds[0] + t_bounds[1]) < 1e-6
+ # sum mean to the confidence interval
+ ci = {
+ 'abs': t_bounds[1] * stddev / sqrt(N),
+ 'low': average + t_bounds[0] * stddev / sqrt(N),
+ 'high': average + t_bounds[1] * stddev / sqrt(N)
+ }
+ else:
+ stddev = 0
+ ci = { 'abs': 0, 'low': average, 'high': average }
+ if abs(stddev) > 0.0001 and abs(average) > 0.0001:
+ ci['perc'] = t_bounds[1] * stddev / sqrt(N) / average * 100
+ else:
+ ci['perc'] = 0
+ return { 'samples': N, 'average': average, 'median': median,
+ 'stddev': stddev, 'min': low, 'max': high, 'ci': ci }
+
+
+def read_stats(path, domain, args):
+ groups = [];
+ if args.aggregate:
+ groups = [
+ ('Group-IC', re.compile(".*IC.*")),
+ ('Group-Optimize',
+ re.compile("StackGuard|.*Optimize.*|.*Deoptimize.*|Recompile.*")),
+ ('Group-Compile', re.compile(".*Compile.*")),
+ ('Group-Parse', re.compile(".*Parse.*")),
+ ('Group-Callback', re.compile(".*Callback.*")),
+ ('Group-API', re.compile(".*API.*")),
+ ('Group-GC', re.compile("GC|AllocateInTargetSpace")),
+ ('Group-JavaScript', re.compile("JS_Execution")),
+ ('Group-Runtime', re.compile(".*"))]
+ with open(path, "rt") as f:
+ # Process the whole file and sum repeating entries.
+ entries = { 'Sum': {'time': 0, 'count': 0} }
+ for group_name, regexp in groups:
+ entries[group_name] = { 'time': 0, 'count': 0 }
+ for line in f:
+ line = line.strip()
+ # Discard headers and footers.
+ if not line: continue
+ if line.startswith("Runtime Function"): continue
+ if line.startswith("===="): continue
+ if line.startswith("----"): continue
+ if line.startswith("URL:"): continue
+ if line.startswith("STATS:"): continue
+ # We have a regular line.
+ fields = line.split()
+ key = fields[0]
+ time = float(fields[1].replace("ms", ""))
+ count = int(fields[3])
+ if key not in entries: entries[key] = { 'time': 0, 'count': 0 }
+ entries[key]['time'] += time
+ entries[key]['count'] += count
+ # We calculate the sum, if it's not the "total" line.
+ if key != "Total":
+ entries['Sum']['time'] += time
+ entries['Sum']['count'] += count
+ for group_name, regexp in groups:
+ if not regexp.match(key): continue
+ entries[group_name]['time'] += time
+ entries[group_name]['count'] += count
+ break
+ # Append the sums as single entries to domain.
+ for key in entries :
+ if key not in domain: domain[key] = { 'time_list': [], 'count_list': [] }
+ domain[key]['time_list'].append(entries[key]['time'])
+ domain[key]['count_list'].append(entries[key]['count'])
+
+
+def print_stats(S, args):
+ # Sort by ascending/descending time average, then by ascending/descending
+ # count average, then by ascending name.
+ def sort_asc_func(item):
+ return (item[1]['time_stat']['average'],
+ item[1]['count_stat']['average'],
+ item[0])
+ def sort_desc_func(item):
+ return (-item[1]['time_stat']['average'],
+ -item[1]['count_stat']['average'],
+ item[0])
+ # Sorting order is in the commend-line arguments.
+ sort_func = sort_asc_func if args.sort == "asc" else sort_desc_func
+ # Possibly limit how many elements to print.
+ L = [item for item in sorted(S.items(), key=sort_func)
+ if item[0] not in ["Total", "Sum"]]
+ N = len(L)
+ if args.limit == 0:
+ low, high = 0, N
+ elif args.sort == "desc":
+ low, high = 0, args.limit
+ else:
+ low, high = N-args.limit, N
+ # How to print entries.
+ def print_entry(key, value):
+ def stats(s, units=""):
+ conf = "{:0.1f}({:0.2f}%)".format(s['ci']['abs'], s['ci']['perc'])
+ return "{:8.1f}{} +/- {:15s}".format(s['average'], units, conf)
+ print "{:>50s} {} {}".format(
+ key,
+ stats(value['time_stat'], units="ms"),
+ stats(value['count_stat'])
+ )
+ # Print and calculate partial sums, if necessary.
+ for i in range(low, high):
+ print_entry(*L[i])
+ if args.totals and args.limit != 0 and not args.aggregate:
+ if i == low:
+ partial = { 'time_list': [0] * len(L[i][1]['time_list']),
+ 'count_list': [0] * len(L[i][1]['count_list']) }
+ assert len(partial['time_list']) == len(L[i][1]['time_list'])
+ assert len(partial['count_list']) == len(L[i][1]['count_list'])
+ for j, v in enumerate(L[i][1]['time_list']):
+ partial['time_list'][j] += v
+ for j, v in enumerate(L[i][1]['count_list']):
+ partial['count_list'][j] += v
+ # Print totals, if necessary.
+ if args.totals:
+ print '-' * 80
+ if args.limit != 0 and not args.aggregate:
+ partial['time_stat'] = statistics(partial['time_list'])
+ partial['count_stat'] = statistics(partial['count_list'])
+ print_entry("Partial", partial)
+ print_entry("Sum", S["Sum"])
+ print_entry("Total", S["Total"])
+
+
+def do_stats(args):
+ domains = {}
+ for path in args.logfiles:
+ filename = os.path.basename(path)
+ m = re.match(r'^([^#]+)(#.*)?$', filename)
+ domain = m.group(1)
+ if domain not in domains: domains[domain] = {}
+ read_stats(path, domains[domain], args)
+ if args.aggregate:
+ create_total_page_stats(domains, args)
+ for i, domain in enumerate(sorted(domains)):
+ if len(domains) > 1:
+ if i > 0: print
+ print "{}:".format(domain)
+ print '=' * 80
+ domain_stats = domains[domain]
+ for key in domain_stats:
+ domain_stats[key]['time_stat'] = \
+ statistics(domain_stats[key]['time_list'])
+ domain_stats[key]['count_stat'] = \
+ statistics(domain_stats[key]['count_list'])
+ print_stats(domain_stats, args)
+
+
+# Create a Total page with all entries summed up.
+def create_total_page_stats(domains, args):
+ total = {}
+ def sum_up(parent, key, other):
+ sums = parent[key]
+ for i, item in enumerate(other[key]):
+ if i >= len(sums):
+ sums.extend([0] * (i - len(sums) + 1))
+ if item is not None:
+ sums[i] += item
+ # Sum up all the entries/metrics from all domains
+ for domain, entries in domains.items():
+ for key, domain_stats in entries.items():
+ if key not in total:
+ total[key] = {}
+ total[key]['time_list'] = list(domain_stats['time_list'])
+ total[key]['count_list'] = list(domain_stats['count_list'])
+ else:
+ sum_up(total[key], 'time_list', domain_stats)
+ sum_up(total[key], 'count_list', domain_stats)
+ # Add a new "Total" page containing the summed up metrics.
+ domains['Total'] = total
+
+
+# Generate JSON file.
+
+def do_json(args):
+ versions = {}
+ for path in args.logdirs:
+ if os.path.isdir(path):
+ for root, dirs, files in os.walk(path):
+ version = os.path.basename(root)
+ if version not in versions: versions[version] = {}
+ for filename in files:
+ if filename.endswith(".txt"):
+ m = re.match(r'^([^#]+)(#.*)?\.txt$', filename)
+ domain = m.group(1)
+ if domain not in versions[version]: versions[version][domain] = {}
+ read_stats(os.path.join(root, filename),
+ versions[version][domain], args)
+ for version, domains in versions.items():
+ if args.aggregate:
+ create_total_page_stats(domains, args)
+ for domain, entries in domains.items():
+ stats = []
+ for name, value in entries.items():
+ # We don't want the calculated sum in the JSON file.
+ if name == "Sum": continue
+ entry = [name]
+ for x in ['time_list', 'count_list']:
+ s = statistics(entries[name][x])
+ entry.append(round(s['average'], 1))
+ entry.append(round(s['ci']['abs'], 1))
+ entry.append(round(s['ci']['perc'], 2))
+ stats.append(entry)
+ domains[domain] = stats
+ print json.dumps(versions, separators=(',', ':'))
+
+
+# Help.
+
+def do_help(parser, subparsers, args):
+ if args.help_cmd:
+ if args.help_cmd in subparsers:
+ subparsers[args.help_cmd].print_help()
+ else:
+ args.error("Unknown command '{}'".format(args.help_cmd))
+ else:
+ parser.print_help()
+
+
+# Main program, parse command line and execute.
+
+def coexist(*l):
+ given = sum(1 for x in l if x)
+ return given == 0 or given == len(l)
+
+def main():
+ parser = argparse.ArgumentParser()
+ subparser_adder = parser.add_subparsers(title="commands", dest="command",
+ metavar="<command>")
+ subparsers = {}
+ # Command: run.
+ subparsers["run"] = subparser_adder.add_parser(
+ "run", help="run --help")
+ subparsers["run"].set_defaults(
+ func=do_run, error=subparsers["run"].error)
+ subparsers["run"].add_argument(
+ "--chrome-flags", type=str, default="",
+ help="specify additional chrome flags")
+ subparsers["run"].add_argument(
+ "--js-flags", type=str, default="",
+ help="specify additional V8 flags")
+ subparsers["run"].add_argument(
+ "--domain", type=str, default="",
+ help="specify the output file domain name")
+ subparsers["run"].add_argument(
+ "--no-url", dest="print_url", action="store_false", default=True,
+ help="do not include url in statistics file")
+ subparsers["run"].add_argument(
+ "-n", "--repeat", type=int, metavar="<num>",
+ help="specify iterations for each website (default: once)")
+ subparsers["run"].add_argument(
+ "-k", "--refresh", type=int, metavar="<num>", default=0,
+ help="specify refreshes for each iteration (default: 0)")
+ subparsers["run"].add_argument(
+ "--replay-wpr", type=str, metavar="<path>",
+ help="use the specified web page replay (.wpr) archive")
+ subparsers["run"].add_argument(
+ "--replay-bin", type=str, metavar="<path>",
+ help="specify the replay.py script typically located in " \
+ "$CHROMIUM/src/third_party/webpagereplay/replay.py")
+ subparsers["run"].add_argument(
+ "-r", "--retries", type=int, metavar="<num>",
+ help="specify retries if website is down (default: forever)")
+ subparsers["run"].add_argument(
+ "-f", "--sites-file", type=str, metavar="<path>",
+ help="specify file containing benchmark websites")
+ subparsers["run"].add_argument(
+ "-t", "--timeout", type=int, metavar="<seconds>", default=60,
+ help="specify seconds before chrome is killed")
+ subparsers["run"].add_argument(
+ "-p", "--port-offset", type=int, metavar="<offset>", default=0,
+ help="specify the offset for the replay server's default ports")
+ subparsers["run"].add_argument(
+ "-u", "--user-data-dir", type=str, metavar="<path>",
+ help="specify user data dir (default is temporary)")
+ subparsers["run"].add_argument(
+ "-c", "--with-chrome", type=str, metavar="<path>",
+ default="/usr/bin/google-chrome",
+ help="specify chrome executable to use")
+ subparsers["run"].add_argument(
+ "-l", "--log-stderr", type=str, metavar="<path>",
+ help="specify where chrome's stderr should go (default: /dev/null)")
+ subparsers["run"].add_argument(
+ "sites", type=str, metavar="<URL>", nargs="*",
+ help="specify benchmark website")
+ # Command: stats.
+ subparsers["stats"] = subparser_adder.add_parser(
+ "stats", help="stats --help")
+ subparsers["stats"].set_defaults(
+ func=do_stats, error=subparsers["stats"].error)
+ subparsers["stats"].add_argument(
+ "-l", "--limit", type=int, metavar="<num>", default=0,
+ help="limit how many items to print (default: none)")
+ subparsers["stats"].add_argument(
+ "-s", "--sort", choices=["asc", "desc"], default="asc",
+ help="specify sorting order (default: ascending)")
+ subparsers["stats"].add_argument(
+ "-n", "--no-total", dest="totals", action="store_false", default=True,
+ help="do not print totals")
+ subparsers["stats"].add_argument(
+ "logfiles", type=str, metavar="<logfile>", nargs="*",
+ help="specify log files to parse")
+ subparsers["stats"].add_argument(
+ "--aggregate", dest="aggregate", action="store_true", default=False,
+ help="Create aggregated entries. Adds Group-* entries at the toplevel. " +
+ "Additionally creates a Total page with all entries.")
+ # Command: json.
+ subparsers["json"] = subparser_adder.add_parser(
+ "json", help="json --help")
+ subparsers["json"].set_defaults(
+ func=do_json, error=subparsers["json"].error)
+ subparsers["json"].add_argument(
+ "logdirs", type=str, metavar="<logdir>", nargs="*",
+ help="specify directories with log files to parse")
+ subparsers["json"].add_argument(
+ "--aggregate", dest="aggregate", action="store_true", default=False,
+ help="Create aggregated entries. Adds Group-* entries at the toplevel. " +
+ "Additionally creates a Total page with all entries.")
+ # Command: help.
+ subparsers["help"] = subparser_adder.add_parser(
+ "help", help="help information")
+ subparsers["help"].set_defaults(
+ func=lambda args: do_help(parser, subparsers, args),
+ error=subparsers["help"].error)
+ subparsers["help"].add_argument(
+ "help_cmd", type=str, metavar="<command>", nargs="?",
+ help="command for which to display help")
+ # Execute the command.
+ args = parser.parse_args()
+ setattr(args, 'script_path', os.path.dirname(sys.argv[0]))
+ if args.command == "run" and coexist(args.sites_file, args.sites):
+ args.error("use either option --sites-file or site URLs")
+ sys.exit(1)
+ elif args.command == "run" and not coexist(args.replay_wpr, args.replay_bin):
+ args.error("options --replay-wpr and --replay-bin must be used together")
+ sys.exit(1)
+ else:
+ args.func(args)
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/deps/v8/tools/check-static-initializers.gyp b/deps/v8/tools/check-static-initializers.gyp
index 547a6c873b..cfeacfc89f 100644
--- a/deps/v8/tools/check-static-initializers.gyp
+++ b/deps/v8/tools/check-static-initializers.gyp
@@ -13,8 +13,8 @@
'../src/d8.gyp:d8_run',
],
'includes': [
- '../build/features.gypi',
- '../build/isolate.gypi',
+ '../gypfiles/features.gypi',
+ '../gypfiles/isolate.gypi',
],
'sources': [
'check-static-initializers.isolate',
diff --git a/deps/v8/tools/codemap.js b/deps/v8/tools/codemap.js
index fa6c36b50b..30cdc21db5 100644
--- a/deps/v8/tools/codemap.js
+++ b/deps/v8/tools/codemap.js
@@ -246,6 +246,14 @@ CodeMap.prototype.getAllStaticEntries = function() {
/**
+ * Returns an array of pairs of all static code entries and their addresses.
+ */
+CodeMap.prototype.getAllStaticEntriesWithAddresses = function() {
+ return this.statics_.exportKeysAndValues();
+};
+
+
+/**
* Returns an array of all libraries entries.
*/
CodeMap.prototype.getAllLibrariesEntries = function() {
diff --git a/deps/v8/tools/detect-builtins.js b/deps/v8/tools/detect-builtins.js
index 2a476baa4b..90bdc08860 100644
--- a/deps/v8/tools/detect-builtins.js
+++ b/deps/v8/tools/detect-builtins.js
@@ -24,6 +24,8 @@
}
// Avoid endless recursion.
if (this_name === "prototype" && name === "constructor") continue;
+ // Avoid needless duplication.
+ if (this_name === "__PROTO__" && name === "constructor") continue;
// Could get this from the parent, but having it locally is easier.
var property = { "name": name };
try {
@@ -39,9 +41,18 @@
property.length = value.length;
property.prototype = GetProperties("prototype", value.prototype);
}
- property.properties = GetProperties(name, value);
+ if (type === "string" || type === "number") {
+ property.value = value;
+ } else {
+ property.properties = GetProperties(name, value);
+ }
result[name] = property;
}
+ // Print the __proto__ if it's not the default Object prototype.
+ if (typeof object === "object" && object.__proto__ !== null &&
+ !object.__proto__.hasOwnProperty("__proto__")) {
+ result.__PROTO__ = GetProperties("__PROTO__", object.__proto__);
+ }
return result;
};
diff --git a/deps/v8/tools/dev/v8gen.py b/deps/v8/tools/dev/v8gen.py
new file mode 100755
index 0000000000..a63a42705b
--- /dev/null
+++ b/deps/v8/tools/dev/v8gen.py
@@ -0,0 +1,244 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Script to generate V8's gn arguments based on common developer defaults
+or builder configurations.
+
+Goma is used by default if a goma folder is detected. The compiler proxy is
+assumed to run.
+
+This script can be added to the PATH and be used on other v8 checkouts than
+the including one. It always runs for the checkout that nests the CWD.
+
+Configurations of this script live in infra/mb/mb_config.pyl.
+
+-------------------------------------------------------------------------------
+
+Examples:
+
+# Generate the x64.release config in out.gn/x64.release.
+v8gen.py x64.release
+
+# Generate into out.gn/foo and disable goma auto-detect.
+v8gen.py -b x64.release foo --no-goma
+
+# Pass additional gn arguments after -- (don't use spaces within gn args).
+v8gen.py x64.optdebug -- v8_enable_slow_dchecks=true
+
+# Generate gn arguments of 'V8 Linux64 - builder' from 'client.v8'. To switch
+# off goma usage here, the args.gn file must be edited manually.
+v8gen.py -m client.v8 -b 'V8 Linux64 - builder'
+
+-------------------------------------------------------------------------------
+"""
+
+import argparse
+import os
+import re
+import subprocess
+import sys
+
+GOMA_DEFAULT = os.path.join(os.path.expanduser("~"), 'goma')
+OUT_DIR = 'out.gn'
+
+
+def _sanitize_nonalpha(text):
+ return re.sub(r'[^a-zA-Z0-9.]', '_', text)
+
+
+class GenerateGnArgs(object):
+ def __init__(self, args):
+ # Split args into this script's arguments and gn args passed to the
+ # wrapped gn.
+ index = args.index('--') if '--' in args else len(args)
+ self._options = self._parse_arguments(args[:index])
+ self._gn_args = args[index + 1:]
+
+ def _parse_arguments(self, args):
+ parser = argparse.ArgumentParser(
+ description=__doc__,
+ formatter_class=argparse.RawTextHelpFormatter,
+ )
+ parser.add_argument(
+ 'outdir', nargs='?',
+ help='optional gn output directory')
+ parser.add_argument(
+ '-b', '--builder',
+ help='build configuration or builder name from mb_config.pyl, e.g. '
+ 'x64.release')
+ parser.add_argument(
+ '-m', '--master', default='developer_default',
+ help='config group or master from mb_config.pyl - default: '
+ 'developer_default')
+ parser.add_argument(
+ '-p', '--pedantic', action='store_true',
+ help='run gn over command-line gn args to catch errors early')
+ parser.add_argument(
+ '-v', '--verbosity', action='count',
+ help='print wrapped commands (use -vv to print output of wrapped '
+ 'commands)')
+
+ goma = parser.add_mutually_exclusive_group()
+ goma.add_argument(
+ '-g' , '--goma',
+ action='store_true', default=None, dest='goma',
+ help='force using goma')
+ goma.add_argument(
+ '--nogoma', '--no-goma',
+ action='store_false', default=None, dest='goma',
+ help='don\'t use goma auto detection - goma might still be used if '
+ 'specified as a gn arg')
+
+ options = parser.parse_args(args)
+
+ if not options.outdir and not options.builder:
+ parser.error('please specify either an output directory or '
+ 'a builder/config name (-b), e.g. x64.release')
+
+ if not options.outdir:
+ # Derive output directory from builder name.
+ options.outdir = _sanitize_nonalpha(options.builder)
+ else:
+ # Also, if this should work on windows, we might need to use \ where
+ # outdir is used as path, while using / if it's used in a gn context.
+ if options.outdir.startswith('/'):
+ parser.error(
+ 'only output directories relative to %s are supported' % OUT_DIR)
+
+ if not options.builder:
+ # Derive builder from output directory.
+ options.builder = options.outdir
+
+ return options
+
+ def verbose_print_1(self, text):
+ if self._options.verbosity >= 1:
+ print '#' * 80
+ print text
+
+ def verbose_print_2(self, text):
+ if self._options.verbosity >= 2:
+ indent = ' ' * 2
+ for l in text.splitlines():
+ print indent + l
+
+ def _call_cmd(self, args):
+ self.verbose_print_1(' '.join(args))
+ try:
+ output = subprocess.check_output(
+ args=args,
+ stderr=subprocess.STDOUT,
+ )
+ self.verbose_print_2(output)
+ except subprocess.CalledProcessError as e:
+ self.verbose_print_2(e.output)
+ raise
+
+ def _find_work_dir(self, path):
+ """Find the closest v8 root to `path`."""
+ if os.path.exists(os.path.join(path, 'tools', 'dev', 'v8gen.py')):
+ # Approximate the v8 root dir by a folder where this script exists
+ # in the expected place.
+ return path
+ elif os.path.dirname(path) == path:
+ raise Exception(
+ 'This appears to not be called from a recent v8 checkout')
+ else:
+ return self._find_work_dir(os.path.dirname(path))
+
+ @property
+ def _goma_dir(self):
+ return os.path.normpath(os.environ.get('GOMA_DIR') or GOMA_DEFAULT)
+
+ @property
+ def _need_goma_dir(self):
+ return self._goma_dir != GOMA_DEFAULT
+
+ @property
+ def _use_goma(self):
+ if self._options.goma is None:
+ # Auto-detect.
+ return os.path.exists(self._goma_dir) and os.path.isdir(self._goma_dir)
+ else:
+ return self._options.goma
+
+ @property
+ def _goma_args(self):
+ """Gn args for using goma."""
+ # Specify goma args if we want to use goma and if goma isn't specified
+ # via command line already. The command-line always has precedence over
+ # any other specification.
+ if (self._use_goma and
+ not any(re.match(r'use_goma\s*=.*', x) for x in self._gn_args)):
+ if self._need_goma_dir:
+ return 'use_goma=true\ngoma_dir="%s"' % self._goma_dir
+ else:
+ return 'use_goma=true'
+ else:
+ return ''
+
+ def _append_gn_args(self, type, gn_args_path, more_gn_args):
+ """Append extra gn arguments to the generated args.gn file."""
+ if not more_gn_args:
+ return False
+ self.verbose_print_1('Appending """\n%s\n""" to %s.' % (
+ more_gn_args, os.path.abspath(gn_args_path)))
+ with open(gn_args_path, 'a') as f:
+ f.write('\n# Additional %s args:\n' % type)
+ f.write(more_gn_args)
+ f.write('\n')
+ return True
+
+ def main(self):
+ # Always operate relative to the base directory for better relative-path
+ # handling. This script can be used in any v8 checkout.
+ workdir = self._find_work_dir(os.getcwd())
+ if workdir != os.getcwd():
+ self.verbose_print_1('cd ' + workdir)
+ os.chdir(workdir)
+
+ # The directories are separated with slashes in a gn context (platform
+ # independent).
+ gn_outdir = '/'.join([OUT_DIR, self._options.outdir])
+
+ # Call MB to generate the basic configuration.
+ self._call_cmd([
+ sys.executable,
+ '-u', os.path.join('tools', 'mb', 'mb.py'),
+ 'gen',
+ '-f', os.path.join('infra', 'mb', 'mb_config.pyl'),
+ '-m', self._options.master,
+ '-b', self._options.builder,
+ gn_outdir,
+ ])
+
+ # Handle extra gn arguments.
+ gn_args_path = os.path.join(OUT_DIR, self._options.outdir, 'args.gn')
+
+ # Append command-line args.
+ modified = self._append_gn_args(
+ 'command-line', gn_args_path, '\n'.join(self._gn_args))
+
+ # Append goma args.
+ # TODO(machenbach): We currently can't remove existing goma args from the
+ # original config. E.g. to build like a bot that uses goma, but switch
+ # goma off.
+ modified |= self._append_gn_args(
+ 'goma', gn_args_path, self._goma_args)
+
+ # Regenerate ninja files to check for errors in the additional gn args.
+ if modified and self._options.pedantic:
+ self._call_cmd(['gn', 'gen', gn_outdir])
+ return 0
+
+if __name__ == "__main__":
+ gen = GenerateGnArgs(sys.argv[1:])
+ try:
+ sys.exit(gen.main())
+ except Exception:
+ if gen._options.verbosity < 2:
+ print ('\nHint: You can raise verbosity (-vv) to see the output of '
+ 'failed commands.\n')
+ raise
diff --git a/deps/v8/tools/dump-cpp.py b/deps/v8/tools/dump-cpp.py
new file mode 100644
index 0000000000..5198ecab21
--- /dev/null
+++ b/deps/v8/tools/dump-cpp.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This script executes dumpcpp.js, collects all dumped C++ symbols,
+# and merges them back into v8 log.
+
+import os
+import platform
+import re
+import subprocess
+import sys
+
+def is_file_executable(fPath):
+ return os.path.isfile(fPath) and os.access(fPath, os.X_OK)
+
+if __name__ == '__main__':
+ JS_FILES = ['splaytree.js', 'codemap.js', 'csvparser.js', 'consarray.js',
+ 'profile.js', 'logreader.js', 'tickprocessor.js', 'SourceMap.js',
+ 'dumpcpp.js', 'dumpcpp-driver.js']
+ tools_path = os.path.dirname(os.path.realpath(__file__))
+ on_windows = platform.system() == 'Windows'
+ JS_FILES = [os.path.join(tools_path, f) for f in JS_FILES]
+
+ args = []
+ log_file = 'v8.log'
+ debug = False
+ for arg in sys.argv[1:]:
+ if arg == '--debug':
+ debug = True
+ continue
+ args.append(arg)
+ if not arg.startswith('-'):
+ log_file = arg
+
+ if on_windows:
+ args.append('--windows')
+
+ with open(log_file, 'r') as f:
+ lines = f.readlines()
+
+ d8_line = re.search(',\"(.*d8)', ''.join(lines))
+ if d8_line:
+ d8_exec = d8_line.group(1)
+ if not is_file_executable(d8_exec):
+ print 'd8 binary path found in {} is not executable.'.format(log_file)
+ sys.exit(-1)
+ else:
+ print 'No d8 binary path found in {}.'.format(log_file)
+ sys.exit(-1)
+
+ args = [d8_exec] + JS_FILES + ['--'] + args
+
+ with open(log_file) as f:
+ sp = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ stdin=f)
+ out, err = sp.communicate()
+ if debug:
+ print err
+ if sp.returncode != 0:
+ print out
+ exit(-1)
+
+ if on_windows and out:
+ out = re.sub('\r+\n', '\n', out)
+
+ is_written = not bool(out)
+ with open(log_file, 'w') as f:
+ for line in lines:
+ if not is_written and line.startswith('tick'):
+ f.write(out)
+ is_written = True
+ f.write(line)
diff --git a/deps/v8/tools/dumpcpp-driver.js b/deps/v8/tools/dumpcpp-driver.js
new file mode 100644
index 0000000000..44527771e4
--- /dev/null
+++ b/deps/v8/tools/dumpcpp-driver.js
@@ -0,0 +1,45 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Dump C++ symbols of shared library if possible
+
+function processArguments(args) {
+ var processor = new ArgumentsProcessor(args);
+ if (processor.parse()) {
+ return processor.result();
+ } else {
+ processor.printUsageAndExit();
+ }
+}
+
+function initSourceMapSupport() {
+ // Pull dev tools source maps into our name space.
+ SourceMap = WebInspector.SourceMap;
+
+ // Overwrite the load function to load scripts synchronously.
+ SourceMap.load = function(sourceMapURL) {
+ var content = readFile(sourceMapURL);
+ var sourceMapObject = (JSON.parse(content));
+ return new SourceMap(sourceMapURL, sourceMapObject);
+ };
+}
+
+var entriesProviders = {
+ 'unix': UnixCppEntriesProvider,
+ 'windows': WindowsCppEntriesProvider,
+ 'mac': MacCppEntriesProvider
+};
+
+var params = processArguments(arguments);
+var sourceMap = null;
+if (params.sourceMap) {
+ initSourceMapSupport();
+ sourceMap = SourceMap.load(params.sourceMap);
+}
+
+var cppProcessor = new CppProcessor(
+ new (entriesProviders[params.platform])(params.nm, params.targetRootFS),
+ params.timedRange, params.pairwiseTimedRange);
+cppProcessor.processLogFile(params.logFileName);
+cppProcessor.dumpCppSymbols();
diff --git a/deps/v8/tools/dumpcpp.js b/deps/v8/tools/dumpcpp.js
new file mode 100644
index 0000000000..ca5ff675f3
--- /dev/null
+++ b/deps/v8/tools/dumpcpp.js
@@ -0,0 +1,58 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function CppProcessor(cppEntriesProvider, timedRange, pairwiseTimedRange) {
+ LogReader.call(this, {
+ 'shared-library': { parsers: [null, parseInt, parseInt, parseInt],
+ processor: this.processSharedLibrary }
+ }, timedRange, pairwiseTimedRange);
+
+ this.cppEntriesProvider_ = cppEntriesProvider;
+ this.codeMap_ = new CodeMap();
+ this.lastLogFileName_ = null;
+}
+inherits(CppProcessor, LogReader);
+
+/**
+ * @override
+ */
+CppProcessor.prototype.printError = function(str) {
+ print(str);
+};
+
+CppProcessor.prototype.processLogFile = function(fileName) {
+ this.lastLogFileName_ = fileName;
+ var line;
+ while (line = readline()) {
+ this.processLogLine(line);
+ }
+};
+
+CppProcessor.prototype.processLogFileInTest = function(fileName) {
+ // Hack file name to avoid dealing with platform specifics.
+ this.lastLogFileName_ = 'v8.log';
+ var contents = readFile(fileName);
+ this.processLogChunk(contents);
+};
+
+CppProcessor.prototype.processSharedLibrary = function(
+ name, startAddr, endAddr, aslrSlide) {
+ var self = this;
+ var libFuncs = this.cppEntriesProvider_.parseVmSymbols(
+ name, startAddr, endAddr, aslrSlide, function(fName, fStart, fEnd) {
+ var entry = new CodeMap.CodeEntry(fEnd - fStart, fName, 'CPP');
+ self.codeMap_.addStaticCode(fStart, entry);
+ });
+};
+
+CppProcessor.prototype.dumpCppSymbols = function() {
+ var staticEntries = this.codeMap_.getAllStaticEntriesWithAddresses();
+ var total = staticEntries.length;
+ for (var i = 0; i < total; ++i) {
+ var entry = staticEntries[i];
+ var printValues = ['cpp', '0x' + entry[0].toString(16), entry[1].size,
+ '"' + entry[1].name + '"'];
+ print(printValues.join(','));
+ }
+};
diff --git a/deps/v8/tools/eval_gc_nvp.py b/deps/v8/tools/eval_gc_nvp.py
index fcb6d8b9a2..25afe8e4f0 100755
--- a/deps/v8/tools/eval_gc_nvp.py
+++ b/deps/v8/tools/eval_gc_nvp.py
@@ -10,7 +10,7 @@
from argparse import ArgumentParser
from copy import deepcopy
from gc_nvp_common import split_nvp
-from math import log
+from math import ceil,log
from sys import stdin
@@ -74,11 +74,12 @@ class Histogram:
class Category:
- def __init__(self, key, histogram, csv):
+ def __init__(self, key, histogram, csv, percentiles):
self.key = key
self.values = []
self.histogram = histogram
self.csv = csv
+ self.percentiles = percentiles
def process_entry(self, entry):
if self.key in entry:
@@ -100,6 +101,16 @@ class Category:
def empty(self):
return len(self.values) == 0
+ def _compute_percentiles(self):
+ ret = []
+ if len(self.values) == 0:
+ return ret
+ sorted_values = sorted(self.values)
+ for percentile in self.percentiles:
+ index = int(ceil((len(self.values) - 1) * percentile / 100))
+ ret.append(" {0}%: {1}".format(percentile, sorted_values[index]))
+ return ret
+
def __str__(self):
if self.csv:
ret = [self.key]
@@ -118,6 +129,8 @@ class Category:
ret.append(" avg: {0}".format(self.avg()))
if self.histogram:
ret.append(str(self.histogram))
+ if self.percentiles:
+ ret.append("\n".join(self._compute_percentiles()))
return "\n".join(ret)
def __repr__(self):
@@ -160,6 +173,9 @@ def main():
help="rank keys by metric (default: no)")
parser.add_argument('--csv', dest='csv',
action='store_true', help='provide output as csv')
+ parser.add_argument('--percentiles', dest='percentiles',
+ type=str, default="",
+ help='comma separated list of percentiles')
args = parser.parse_args()
histogram = None
@@ -171,7 +187,14 @@ def main():
bucket_trait = LinearBucket(args.linear_histogram_granularity)
histogram = Histogram(bucket_trait, not args.histogram_omit_empty)
- categories = [ Category(key, deepcopy(histogram), args.csv)
+ percentiles = []
+ for percentile in args.percentiles.split(','):
+ try:
+ percentiles.append(float(percentile))
+ except ValueError:
+ pass
+
+ categories = [ Category(key, deepcopy(histogram), args.csv, percentiles)
for key in args.keys ]
while True:
diff --git a/deps/v8/tools/eval_gc_time.sh b/deps/v8/tools/eval_gc_time.sh
index ceb4db54cb..140165da43 100755
--- a/deps/v8/tools/eval_gc_time.sh
+++ b/deps/v8/tools/eval_gc_time.sh
@@ -17,6 +17,7 @@ print_usage_and_die() {
echo " -c|--csv provide csv output"
echo " -f|--file FILE profile input in a file"
echo " (default: stdin)"
+ echo " -p|--percentiles comma separated percentiles"
exit 1
}
@@ -25,6 +26,7 @@ RANK_MODE=max
TOP_LEVEL=no
CSV=""
LOGFILE=/dev/stdin
+PERCENTILES=""
while [[ $# -ge 1 ]]
do
@@ -60,6 +62,10 @@ do
LOGFILE=$2
shift
;;
+ -p|--percentiles)
+ PERCENTILES="--percentiles=$2"
+ shift
+ ;;
*)
break
;;
@@ -98,7 +104,6 @@ INTERESTING_OLD_GEN_KEYS="\
evacuate.clean_up \
evacuate.copy \
evacuate.update_pointers \
- evacuate.update_pointers.between_evacuated \
evacuate.update_pointers.to_evacuated \
evacuate.update_pointers.to_new \
evacuate.update_pointers.weak \
@@ -145,6 +150,7 @@ case $OP in
--no-histogram \
--rank $RANK_MODE \
$CSV \
+ $PERCENTILES \
${INTERESTING_NEW_GEN_KEYS}
;;
old-gen-rank)
@@ -153,6 +159,7 @@ case $OP in
--no-histogram \
--rank $RANK_MODE \
$CSV \
+ $PERCENTILES \
${INTERESTING_OLD_GEN_KEYS}
;;
*)
diff --git a/deps/v8/tools/gcmole/gcmole.lua b/deps/v8/tools/gcmole/gcmole.lua
index 82ea4e0295..bdbdf36a41 100644
--- a/deps/v8/tools/gcmole/gcmole.lua
+++ b/deps/v8/tools/gcmole/gcmole.lua
@@ -184,26 +184,26 @@ end
-- GYP file parsing
local function ParseGYPFile()
- local gyp = ""
- local gyp_files = { "tools/gyp/v8.gyp", "test/cctest/cctest.gyp" }
- for i = 1, #gyp_files do
- local f = assert(io.open(gyp_files[i]), "failed to open GYP file")
- local t = f:read('*a')
- gyp = gyp .. t
- f:close()
- end
-
local result = {}
+ local gyp_files = {
+ { "src/v8.gyp", "'([^']-%.cc)'", "src/" },
+ { "test/cctest/cctest.gyp", "'(test-[^']-%.cc)'", "test/cctest/" }
+ }
- for condition, sources in
- gyp:gmatch "'sources': %[.-### gcmole%((.-)%) ###(.-)%]" do
- if result[condition] == nil then result[condition] = {} end
- for file in sources:gmatch "'%.%./%.%./src/([^']-%.cc)'" do
- table.insert(result[condition], "src/" .. file)
- end
- for file in sources:gmatch "'(test-[^']-%.cc)'" do
- table.insert(result[condition], "test/cctest/" .. file)
+ for i = 1, #gyp_files do
+ local filename = gyp_files[i][1]
+ local pattern = gyp_files[i][2]
+ local prefix = gyp_files[i][3]
+ local gyp_file = assert(io.open(filename), "failed to open GYP file")
+ local gyp = gyp_file:read('*a')
+ for condition, sources in
+ gyp:gmatch "%[.-### gcmole%((.-)%) ###(.-)%]" do
+ if result[condition] == nil then result[condition] = {} end
+ for file in sources:gmatch(pattern) do
+ table.insert(result[condition], prefix .. file)
+ end
end
+ gyp_file:close()
end
return result
diff --git a/deps/v8/tools/gcmole/run-gcmole.isolate b/deps/v8/tools/gcmole/run-gcmole.isolate
index df6e9a267f..caa4f993fc 100644
--- a/deps/v8/tools/gcmole/run-gcmole.isolate
+++ b/deps/v8/tools/gcmole/run-gcmole.isolate
@@ -12,11 +12,11 @@
'parallel.py',
'run-gcmole.py',
# The following contains all relevant source and gyp files.
- '../gyp/v8.gyp',
'../../base/',
'../../include/',
'../../src/',
'../../test/cctest/',
+ '../../testing/gtest/include/gtest/gtest_prod.h',
'../../third_party/icu/source/',
],
},
diff --git a/deps/v8/tools/gcmole/run_gcmole.gyp b/deps/v8/tools/gcmole/run_gcmole.gyp
index 9d13f7606a..7d206bf412 100644
--- a/deps/v8/tools/gcmole/run_gcmole.gyp
+++ b/deps/v8/tools/gcmole/run_gcmole.gyp
@@ -10,8 +10,8 @@
'target_name': 'run_gcmole_run',
'type': 'none',
'includes': [
- '../../build/features.gypi',
- '../../build/isolate.gypi',
+ '../../gypfiles/features.gypi',
+ '../../gypfiles/isolate.gypi',
],
'sources': [
'run-gcmole.isolate',
diff --git a/deps/v8/tools/gdb-v8-support.py b/deps/v8/tools/gdb-v8-support.py
index 5d26146fc7..99616727e3 100644
--- a/deps/v8/tools/gdb-v8-support.py
+++ b/deps/v8/tools/gdb-v8-support.py
@@ -167,7 +167,7 @@ class FindAnywhere (gdb.Command):
"find 0x%s, 0x%s, %s" % (startAddr, endAddr, value),
to_string = True)
if result.find("not found") == -1:
- print result
+ print(result)
except:
pass
diff --git a/deps/v8/tools/gdbinit b/deps/v8/tools/gdbinit
index 5e6af9d6a8..1eae053f2c 100644
--- a/deps/v8/tools/gdbinit
+++ b/deps/v8/tools/gdbinit
@@ -4,7 +4,7 @@
# Print HeapObjects.
define job
-print ((v8::internal::HeapObject*)($arg0))->Print()
+call _v8_internal_Print_Object((void*)($arg0))
end
document job
Print a v8 JavaScript object
@@ -13,7 +13,7 @@ end
# Print Code objects containing given PC.
define jco
-job (v8::internal::Isolate::Current()->FindCodeObject((v8::internal::Address)$arg0))
+call _v8_internal_Print_Code((void*)($arg0))
end
document jco
Print a v8 Code object from an internal code address
@@ -22,7 +22,7 @@ end
# Print TypeFeedbackVector
define jfv
-print ((v8::internal::TypeFeedbackVector*)($arg0))->Print()
+call _v8_internal_Print_TypeFeedbackVector((void*)($arg0))
end
document jfv
Print a v8 TypeFeedbackVector object
@@ -31,7 +31,7 @@ end
# Print DescriptorArray.
define jda
-print ((v8::internal::DescriptorArray*)($arg0))->Print()
+call _v8_internal_Print_DescriptorArray((void*)($arg0))
end
document jda
Print a v8 DescriptorArray object
@@ -40,7 +40,7 @@ end
# Print TransitionArray.
define jta
-print ((v8::internal::TransitionArray*)($arg0))->Print()
+call _v8_internal_Print_TransitionArray((void*)($arg0))
end
document jta
Print a v8 TransitionArray object
@@ -49,12 +49,24 @@ end
# Print JavaScript stack trace.
define jst
-print v8::internal::Isolate::Current()->PrintStack((FILE*) stdout, 1)
+call _v8_internal_Print_StackTrace()
end
document jst
Print the current JavaScript stack trace
Usage: jst
end
+# Skip the JavaScript stack.
+define jss
+set $js_entry_sp=v8::internal::Isolate::Current()->thread_local_top()->js_entry_sp_
+set $rbp=*(void**)$js_entry_sp
+set $rsp=$js_entry_sp + 2*sizeof(void*)
+set $pc=*(void**)($js_entry_sp+sizeof(void*))
+end
+document jss
+Skip the jitted stack on x64 to where we entered JS last.
+Usage: jss
+end
+
set disassembly-flavor intel
set disable-randomization off
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index a0afc06ab9..1275bb5ed3 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -50,7 +50,8 @@ import re
import sys
#
-# Miscellaneous constants, tags, and masks used for object identification.
+# Miscellaneous constants such as tags and masks used for object identification,
+# enumeration values used as indexes in internal tables, etc..
#
consts_misc = [
{ 'name': 'FirstNonstringType', 'value': 'FIRST_NONSTRING_TYPE' },
@@ -92,6 +93,8 @@ consts_misc = [
'value': 'DescriptorArray::kFirstIndex' },
{ 'name': 'prop_type_field',
'value': 'DATA' },
+ { 'name': 'prop_type_const_field',
+ 'value': 'DATA_CONSTANT' },
{ 'name': 'prop_type_mask',
'value': 'PropertyDetails::TypeField::kMask' },
{ 'name': 'prop_index_mask',
@@ -179,17 +182,49 @@ consts_misc = [
'value': 'JSArrayBuffer::WasNeutered::kMask' },
{ 'name': 'jsarray_buffer_was_neutered_shift',
'value': 'JSArrayBuffer::WasNeutered::kShift' },
+
+ { 'name': 'context_idx_closure',
+ 'value': 'Context::CLOSURE_INDEX' },
+ { 'name': 'context_idx_native',
+ 'value': 'Context::NATIVE_CONTEXT_INDEX' },
+ { 'name': 'context_idx_prev',
+ 'value': 'Context::PREVIOUS_INDEX' },
+ { 'name': 'context_idx_ext',
+ 'value': 'Context::EXTENSION_INDEX' },
+ { 'name': 'context_min_slots',
+ 'value': 'Context::MIN_CONTEXT_SLOTS' },
+
+ { 'name': 'namedictionaryshape_prefix_size',
+ 'value': 'NameDictionaryShape::kPrefixSize' },
+ { 'name': 'namedictionaryshape_entry_size',
+ 'value': 'NameDictionaryShape::kEntrySize' },
+ { 'name': 'globaldictionaryshape_entry_size',
+ 'value': 'GlobalDictionaryShape::kEntrySize' },
+
+ { 'name': 'namedictionary_prefix_start_index',
+ 'value': 'NameDictionary::kPrefixStartIndex' },
+
+ { 'name': 'seedednumberdictionaryshape_prefix_size',
+ 'value': 'SeededNumberDictionaryShape::kPrefixSize' },
+ { 'name': 'seedednumberdictionaryshape_entry_size',
+ 'value': 'SeededNumberDictionaryShape::kEntrySize' },
+
+ { 'name': 'unseedednumberdictionaryshape_prefix_size',
+ 'value': 'UnseededNumberDictionaryShape::kPrefixSize' },
+ { 'name': 'unseedednumberdictionaryshape_entry_size',
+ 'value': 'UnseededNumberDictionaryShape::kEntrySize' }
];
#
# The following useful fields are missing accessors, so we define fake ones.
+# Please note that extra accessors should _only_ be added to expose offsets that
+# can be used to access actual V8 objects' properties. They should not be added
+# for exposing other values. For instance, enumeration values or class'
+# constants should be exposed by adding an entry in the "consts_misc" table, not
+# in this "extras_accessors" table.
#
extras_accessors = [
'JSFunction, context, Context, kContextOffset',
- 'Context, closure_index, int, CLOSURE_INDEX',
- 'Context, native_context_index, int, NATIVE_CONTEXT_INDEX',
- 'Context, previous_index, int, PREVIOUS_INDEX',
- 'Context, min_context_slots, int, MIN_CONTEXT_SLOTS',
'HeapObject, map, Map, kMapOffset',
'JSObject, elements, Object, kElementsOffset',
'FixedArray, data, uintptr_t, kHeaderSize',
@@ -203,12 +238,6 @@ extras_accessors = [
'Map, bit_field2, char, kBitField2Offset',
'Map, bit_field3, int, kBitField3Offset',
'Map, prototype, Object, kPrototypeOffset',
- 'NameDictionaryShape, prefix_size, int, kPrefixSize',
- 'NameDictionaryShape, entry_size, int, kEntrySize',
- 'NameDictionary, prefix_start_index, int, kPrefixStartIndex',
- 'SeededNumberDictionaryShape, prefix_size, int, kPrefixSize',
- 'UnseededNumberDictionaryShape, prefix_size, int, kPrefixSize',
- 'NumberDictionaryShape, entry_size, int, kEntrySize',
'Oddball, kind_offset, int, kKindOffset',
'HeapNumber, value, double, kValueOffset',
'ConsString, first, String, kFirstOffset',
@@ -252,6 +281,7 @@ header = '''
#include "src/v8.h"
#include "src/frames.h"
#include "src/frames-inl.h" /* for architecture-specific frame constants */
+#include "src/contexts.h"
using namespace v8::internal;
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
deleted file mode 100644
index b09fd1f289..0000000000
--- a/deps/v8/tools/gyp/v8.gyp
+++ /dev/null
@@ -1,2281 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-{
- 'variables': {
- 'icu_use_data_file_flag%': 0,
- 'v8_code': 1,
- 'v8_random_seed%': 314159265,
- 'v8_vector_stores%': 0,
- 'embed_script%': "",
- 'warmup_script%': "",
- 'v8_extra_library_files%': [],
- 'v8_experimental_extra_library_files%': [],
- 'mksnapshot_exec': '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)',
- },
- 'includes': ['../../build/toolchain.gypi', '../../build/features.gypi'],
- 'targets': [
- {
- 'target_name': 'v8',
- 'dependencies_traverse': 1,
- 'dependencies': ['v8_maybe_snapshot'],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }],
- ['component=="shared_library"', {
- 'type': '<(component)',
- 'sources': [
- # Note: on non-Windows we still build this file so that gyp
- # has some sources to link into the component.
- '../../src/v8dll-main.cc',
- ],
- 'include_dirs': [
- '../..',
- ],
- 'defines': [
- 'V8_SHARED',
- 'BUILDING_V8_SHARED',
- ],
- 'direct_dependent_settings': {
- 'defines': [
- 'V8_SHARED',
- 'USING_V8_SHARED',
- ],
- },
- 'target_conditions': [
- ['OS=="android" and _toolset=="target"', {
- 'libraries': [
- '-llog',
- ],
- 'include_dirs': [
- 'src/common/android/include',
- ],
- }],
- ],
- 'conditions': [
- ['OS=="mac"', {
- 'xcode_settings': {
- 'OTHER_LDFLAGS': ['-dynamiclib', '-all_load']
- },
- }],
- ['soname_version!=""', {
- 'product_extension': 'so.<(soname_version)',
- }],
- ],
- },
- {
- 'type': 'none',
- }],
- ],
- 'direct_dependent_settings': {
- 'include_dirs': [
- '../../include',
- ],
- },
- },
- {
- # This rule delegates to either v8_snapshot, v8_nosnapshot, or
- # v8_external_snapshot, depending on the current variables.
- # The intention is to make the 'calling' rules a bit simpler.
- 'target_name': 'v8_maybe_snapshot',
- 'type': 'none',
- 'conditions': [
- ['v8_use_snapshot!="true"', {
- # The dependency on v8_base should come from a transitive
- # dependency however the Android toolchain requires libv8_base.a
- # to appear before libv8_snapshot.a so it's listed explicitly.
- 'dependencies': ['v8_base', 'v8_nosnapshot'],
- }],
- ['v8_use_snapshot=="true" and v8_use_external_startup_data==0', {
- # The dependency on v8_base should come from a transitive
- # dependency however the Android toolchain requires libv8_base.a
- # to appear before libv8_snapshot.a so it's listed explicitly.
- 'dependencies': ['v8_base', 'v8_snapshot'],
- }],
- ['v8_use_snapshot=="true" and v8_use_external_startup_data==1 and want_separate_host_toolset==0', {
- 'dependencies': ['v8_base', 'v8_external_snapshot'],
- 'inputs': ['<(PRODUCT_DIR)/snapshot_blob.bin'],
- 'conditions': [
- ['v8_separate_ignition_snapshot==1', {
- 'inputs': ['<(PRODUCT_DIR)/snapshot_blob_ignition.bin'],
- }],
- ]
- }],
- ['v8_use_snapshot=="true" and v8_use_external_startup_data==1 and want_separate_host_toolset==1', {
- 'dependencies': ['v8_base', 'v8_external_snapshot'],
- 'target_conditions': [
- ['_toolset=="host"', {
- 'inputs': ['<(PRODUCT_DIR)/snapshot_blob_host.bin'],
- }, {
- 'inputs': ['<(PRODUCT_DIR)/snapshot_blob.bin'],
- }],
- ],
- 'conditions': [
- ['v8_separate_ignition_snapshot==1', {
- 'target_conditions': [
- ['_toolset=="host"', {
- 'inputs': ['<(PRODUCT_DIR)/snapshot_blob_ignition_host.bin'],
- }, {
- 'inputs': ['<(PRODUCT_DIR)/snapshot_blob_ignition.bin'],
- }],
- ],
- }],
- ],
- }],
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }],
- ]
- },
- {
- 'target_name': 'v8_snapshot',
- 'type': 'static_library',
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- 'dependencies': [
- 'mksnapshot#host',
- 'js2c#host',
- ],
- }, {
- 'toolsets': ['target'],
- 'dependencies': [
- 'mksnapshot',
- 'js2c',
- ],
- }],
- ['component=="shared_library"', {
- 'defines': [
- 'V8_SHARED',
- 'BUILDING_V8_SHARED',
- ],
- 'direct_dependent_settings': {
- 'defines': [
- 'V8_SHARED',
- 'USING_V8_SHARED',
- ],
- },
- }],
- ],
- 'dependencies': [
- 'v8_base',
- ],
- 'include_dirs+': [
- '../..',
- ],
- 'sources': [
- '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
- '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
- '<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
- '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
- '<(INTERMEDIATE_DIR)/snapshot.cc',
- ],
- 'actions': [
- {
- 'action_name': 'run_mksnapshot',
- 'inputs': [
- '<(mksnapshot_exec)',
- '<(embed_script)',
- '<(warmup_script)',
- ],
- 'outputs': [
- '<(INTERMEDIATE_DIR)/snapshot.cc',
- ],
- 'variables': {
- 'mksnapshot_flags': [],
- 'conditions': [
- ['v8_random_seed!=0', {
- 'mksnapshot_flags': ['--random-seed', '<(v8_random_seed)'],
- }],
- ['v8_vector_stores!=0', {
- 'mksnapshot_flags': ['--vector-stores'],
- }],
- ],
- },
- 'action': [
- '<(mksnapshot_exec)',
- '<@(mksnapshot_flags)',
- '--startup_src', '<@(INTERMEDIATE_DIR)/snapshot.cc',
- '<(embed_script)',
- '<(warmup_script)',
- ],
- },
- ],
- },
- {
- 'target_name': 'v8_nosnapshot',
- 'type': 'static_library',
- 'dependencies': [
- 'v8_base',
- ],
- 'include_dirs+': [
- '../..',
- ],
- 'sources': [
- '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
- '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
- '<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
- '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
- '../../src/snapshot/snapshot-empty.cc',
- ],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- 'dependencies': ['js2c#host'],
- }, {
- 'toolsets': ['target'],
- 'dependencies': ['js2c'],
- }],
- ['component=="shared_library"', {
- 'defines': [
- 'BUILDING_V8_SHARED',
- 'V8_SHARED',
- ],
- }],
- ]
- },
- {
- 'target_name': 'v8_external_snapshot',
- 'type': 'static_library',
- 'conditions': [
- [ 'v8_use_external_startup_data==1', {
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- 'dependencies': [
- 'mksnapshot#host',
- 'js2c#host',
- 'natives_blob',
- ]}, {
- 'toolsets': ['target'],
- 'dependencies': [
- 'mksnapshot',
- 'js2c',
- 'natives_blob',
- ],
- }],
- ['component=="shared_library"', {
- 'defines': [
- 'V8_SHARED',
- 'BUILDING_V8_SHARED',
- ],
- 'direct_dependent_settings': {
- 'defines': [
- 'V8_SHARED',
- 'USING_V8_SHARED',
- ],
- },
- }],
- # Extra snapshot blob for ignition.
- ['v8_separate_ignition_snapshot==1', {
- # This is concatenated to the other actions list of
- # v8_external_snapshot.
- 'actions': [
- {
- 'action_name': 'run_mksnapshot (ignition)',
- 'inputs': ['<(mksnapshot_exec)'],
- 'variables': {
- # TODO: Extract common mksnapshot_flags to a separate
- # variable.
- 'mksnapshot_flags_ignition': [
- '--ignition',
- ],
- 'conditions': [
- ['v8_random_seed!=0', {
- 'mksnapshot_flags_ignition': ['--random-seed', '<(v8_random_seed)'],
- }],
- ['v8_vector_stores!=0', {
- 'mksnapshot_flags_ignition': ['--vector-stores'],
- }],
- ],
- },
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'target_conditions': [
- ['_toolset=="host"', {
- 'outputs': ['<(PRODUCT_DIR)/snapshot_blob_ignition_host.bin'],
- 'action': [
- '<(mksnapshot_exec)',
- '<@(mksnapshot_flags_ignition)',
- '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob_ignition_host.bin',
- '<(embed_script)',
- '<(warmup_script)',
- ],
- }, {
- 'outputs': ['<(PRODUCT_DIR)/snapshot_blob_ignition.bin'],
- 'action': [
- '<(mksnapshot_exec)',
- '<@(mksnapshot_flags_ignition)',
- '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob_ignition.bin',
- '<(embed_script)',
- '<(warmup_script)',
- ],
- }],
- ],
- }, {
- 'outputs': ['<(PRODUCT_DIR)/snapshot_blob_ignition.bin'],
- 'action': [
- '<(mksnapshot_exec)',
- '<@(mksnapshot_flags_ignition)',
- '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob_ignition.bin',
- '<(embed_script)',
- '<(warmup_script)',
- ],
- }],
- ],
- },
- ],
- }],
- ],
- 'dependencies': [
- 'v8_base',
- ],
- 'include_dirs+': [
- '../..',
- ],
- 'sources': [
- '../../src/snapshot/natives-external.cc',
- '../../src/snapshot/snapshot-external.cc',
- ],
- 'actions': [
- {
- 'action_name': 'run_mksnapshot (external)',
- 'inputs': ['<(mksnapshot_exec)'],
- 'variables': {
- 'mksnapshot_flags': [],
- 'conditions': [
- ['v8_random_seed!=0', {
- 'mksnapshot_flags': ['--random-seed', '<(v8_random_seed)'],
- }],
- ['v8_vector_stores!=0', {
- 'mksnapshot_flags': ['--vector-stores'],
- }],
- ],
- },
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'target_conditions': [
- ['_toolset=="host"', {
- 'outputs': ['<(PRODUCT_DIR)/snapshot_blob_host.bin'],
- 'action': [
- '<(mksnapshot_exec)',
- '<@(mksnapshot_flags)',
- '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob_host.bin',
- '<(embed_script)',
- '<(warmup_script)',
- ],
- }, {
- 'outputs': ['<(PRODUCT_DIR)/snapshot_blob.bin'],
- 'action': [
- '<(mksnapshot_exec)',
- '<@(mksnapshot_flags)',
- '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob.bin',
- '<(embed_script)',
- '<(warmup_script)',
- ],
- }],
- ],
- }, {
- 'outputs': ['<(PRODUCT_DIR)/snapshot_blob.bin'],
- 'action': [
- '<(mksnapshot_exec)',
- '<@(mksnapshot_flags)',
- '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob.bin',
- '<(embed_script)',
- '<(warmup_script)',
- ],
- }],
- ],
- },
- ],
- }],
- ],
- },
- {
- 'target_name': 'v8_base',
- 'type': 'static_library',
- 'dependencies': [
- 'v8_libbase',
- ],
- 'variables': {
- 'optimize': 'max',
- },
- 'include_dirs+': [
- '../..',
- # To be able to find base/trace_event/common/trace_event_common.h
- '../../..',
- ],
- 'defines': [
- # TODO(jochen): Remove again after this is globally turned on.
- 'V8_IMMINENT_DEPRECATION_WARNINGS',
- ],
- 'sources': [ ### gcmole(all) ###
- '../../include/v8-debug.h',
- '../../include/v8-experimental.h',
- '../../include/v8-platform.h',
- '../../include/v8-profiler.h',
- '../../include/v8-testing.h',
- '../../include/v8-util.h',
- '../../include/v8-version.h',
- '../../include/v8.h',
- '../../include/v8config.h',
- '../../src/accessors.cc',
- '../../src/accessors.h',
- '../../src/address-map.cc',
- '../../src/address-map.h',
- '../../src/allocation.cc',
- '../../src/allocation.h',
- '../../src/allocation-site-scopes.cc',
- '../../src/allocation-site-scopes.h',
- '../../src/api-experimental.cc',
- '../../src/api-experimental.h',
- '../../src/api.cc',
- '../../src/api.h',
- '../../src/api-arguments.cc',
- '../../src/api-arguments.h',
- '../../src/api-natives.cc',
- '../../src/api-natives.h',
- '../../src/arguments.cc',
- '../../src/arguments.h',
- '../../src/assembler.cc',
- '../../src/assembler.h',
- '../../src/assert-scope.h',
- '../../src/assert-scope.cc',
- '../../src/ast/ast-expression-rewriter.cc',
- '../../src/ast/ast-expression-rewriter.h',
- '../../src/ast/ast-expression-visitor.cc',
- '../../src/ast/ast-expression-visitor.h',
- '../../src/ast/ast-literal-reindexer.cc',
- '../../src/ast/ast-literal-reindexer.h',
- '../../src/ast/ast-numbering.cc',
- '../../src/ast/ast-numbering.h',
- '../../src/ast/ast-value-factory.cc',
- '../../src/ast/ast-value-factory.h',
- '../../src/ast/ast.cc',
- '../../src/ast/ast.h',
- '../../src/ast/modules.cc',
- '../../src/ast/modules.h',
- '../../src/ast/prettyprinter.cc',
- '../../src/ast/prettyprinter.h',
- '../../src/ast/scopeinfo.cc',
- '../../src/ast/scopeinfo.h',
- '../../src/ast/scopes.cc',
- '../../src/ast/scopes.h',
- '../../src/ast/variables.cc',
- '../../src/ast/variables.h',
- '../../src/atomic-utils.h',
- '../../src/background-parsing-task.cc',
- '../../src/background-parsing-task.h',
- '../../src/bailout-reason.cc',
- '../../src/bailout-reason.h',
- '../../src/basic-block-profiler.cc',
- '../../src/basic-block-profiler.h',
- '../../src/bignum-dtoa.cc',
- '../../src/bignum-dtoa.h',
- '../../src/bignum.cc',
- '../../src/bignum.h',
- '../../src/bit-vector.cc',
- '../../src/bit-vector.h',
- '../../src/bootstrapper.cc',
- '../../src/bootstrapper.h',
- '../../src/builtins.cc',
- '../../src/builtins.h',
- '../../src/cached-powers.cc',
- '../../src/cached-powers.h',
- '../../src/cancelable-task.cc',
- '../../src/cancelable-task.h',
- '../../src/char-predicates.cc',
- '../../src/char-predicates-inl.h',
- '../../src/char-predicates.h',
- '../../src/checks.h',
- '../../src/code-factory.cc',
- '../../src/code-factory.h',
- '../../src/code-stubs.cc',
- '../../src/code-stubs.h',
- '../../src/code-stubs-hydrogen.cc',
- '../../src/codegen.cc',
- '../../src/codegen.h',
- '../../src/collector.h',
- '../../src/compilation-cache.cc',
- '../../src/compilation-cache.h',
- '../../src/compilation-dependencies.cc',
- '../../src/compilation-dependencies.h',
- '../../src/compilation-statistics.cc',
- '../../src/compilation-statistics.h',
- '../../src/compiler/access-builder.cc',
- '../../src/compiler/access-builder.h',
- '../../src/compiler/access-info.cc',
- '../../src/compiler/access-info.h',
- '../../src/compiler/all-nodes.cc',
- '../../src/compiler/all-nodes.h',
- '../../src/compiler/ast-graph-builder.cc',
- '../../src/compiler/ast-graph-builder.h',
- '../../src/compiler/ast-loop-assignment-analyzer.cc',
- '../../src/compiler/ast-loop-assignment-analyzer.h',
- '../../src/compiler/basic-block-instrumentor.cc',
- '../../src/compiler/basic-block-instrumentor.h',
- '../../src/compiler/branch-elimination.cc',
- '../../src/compiler/branch-elimination.h',
- '../../src/compiler/bytecode-branch-analysis.cc',
- '../../src/compiler/bytecode-branch-analysis.h',
- '../../src/compiler/bytecode-graph-builder.cc',
- '../../src/compiler/bytecode-graph-builder.h',
- '../../src/compiler/change-lowering.cc',
- '../../src/compiler/change-lowering.h',
- '../../src/compiler/c-linkage.cc',
- '../../src/compiler/coalesced-live-ranges.cc',
- '../../src/compiler/coalesced-live-ranges.h',
- '../../src/compiler/code-generator-impl.h',
- '../../src/compiler/code-generator.cc',
- '../../src/compiler/code-generator.h',
- '../../src/compiler/code-stub-assembler.cc',
- '../../src/compiler/code-stub-assembler.h',
- '../../src/compiler/common-node-cache.cc',
- '../../src/compiler/common-node-cache.h',
- '../../src/compiler/common-operator-reducer.cc',
- '../../src/compiler/common-operator-reducer.h',
- '../../src/compiler/common-operator.cc',
- '../../src/compiler/common-operator.h',
- '../../src/compiler/control-builders.cc',
- '../../src/compiler/control-builders.h',
- '../../src/compiler/control-equivalence.cc',
- '../../src/compiler/control-equivalence.h',
- '../../src/compiler/control-flow-optimizer.cc',
- '../../src/compiler/control-flow-optimizer.h',
- '../../src/compiler/dead-code-elimination.cc',
- '../../src/compiler/dead-code-elimination.h',
- '../../src/compiler/diamond.h',
- '../../src/compiler/escape-analysis.cc',
- '../../src/compiler/escape-analysis.h',
- "../../src/compiler/escape-analysis-reducer.cc",
- "../../src/compiler/escape-analysis-reducer.h",
- '../../src/compiler/frame.cc',
- '../../src/compiler/frame.h',
- '../../src/compiler/frame-elider.cc',
- '../../src/compiler/frame-elider.h',
- "../../src/compiler/frame-states.cc",
- "../../src/compiler/frame-states.h",
- '../../src/compiler/gap-resolver.cc',
- '../../src/compiler/gap-resolver.h',
- '../../src/compiler/graph-reducer.cc',
- '../../src/compiler/graph-reducer.h',
- '../../src/compiler/graph-replay.cc',
- '../../src/compiler/graph-replay.h',
- '../../src/compiler/graph-trimmer.cc',
- '../../src/compiler/graph-trimmer.h',
- '../../src/compiler/graph-visualizer.cc',
- '../../src/compiler/graph-visualizer.h',
- '../../src/compiler/graph.cc',
- '../../src/compiler/graph.h',
- '../../src/compiler/greedy-allocator.cc',
- '../../src/compiler/greedy-allocator.h',
- '../../src/compiler/instruction-codes.h',
- '../../src/compiler/instruction-selector-impl.h',
- '../../src/compiler/instruction-selector.cc',
- '../../src/compiler/instruction-selector.h',
- '../../src/compiler/instruction-scheduler.cc',
- '../../src/compiler/instruction-scheduler.h',
- '../../src/compiler/instruction.cc',
- '../../src/compiler/instruction.h',
- '../../src/compiler/int64-lowering.cc',
- '../../src/compiler/int64-lowering.h',
- '../../src/compiler/js-builtin-reducer.cc',
- '../../src/compiler/js-builtin-reducer.h',
- '../../src/compiler/js-call-reducer.cc',
- '../../src/compiler/js-call-reducer.h',
- '../../src/compiler/js-context-specialization.cc',
- '../../src/compiler/js-context-specialization.h',
- '../../src/compiler/js-create-lowering.cc',
- '../../src/compiler/js-create-lowering.h',
- '../../src/compiler/js-frame-specialization.cc',
- '../../src/compiler/js-frame-specialization.h',
- '../../src/compiler/js-generic-lowering.cc',
- '../../src/compiler/js-generic-lowering.h',
- '../../src/compiler/js-global-object-specialization.cc',
- '../../src/compiler/js-global-object-specialization.h',
- '../../src/compiler/js-graph.cc',
- '../../src/compiler/js-graph.h',
- '../../src/compiler/js-inlining.cc',
- '../../src/compiler/js-inlining.h',
- '../../src/compiler/js-inlining-heuristic.cc',
- '../../src/compiler/js-inlining-heuristic.h',
- '../../src/compiler/js-intrinsic-lowering.cc',
- '../../src/compiler/js-intrinsic-lowering.h',
- '../../src/compiler/js-native-context-specialization.cc',
- '../../src/compiler/js-native-context-specialization.h',
- '../../src/compiler/js-operator.cc',
- '../../src/compiler/js-operator.h',
- '../../src/compiler/js-typed-lowering.cc',
- '../../src/compiler/js-typed-lowering.h',
- '../../src/compiler/jump-threading.cc',
- '../../src/compiler/jump-threading.h',
- '../../src/compiler/linkage.cc',
- '../../src/compiler/linkage.h',
- '../../src/compiler/liveness-analyzer.cc',
- '../../src/compiler/liveness-analyzer.h',
- '../../src/compiler/live-range-separator.cc',
- '../../src/compiler/live-range-separator.h',
- '../../src/compiler/load-elimination.cc',
- '../../src/compiler/load-elimination.h',
- '../../src/compiler/loop-analysis.cc',
- '../../src/compiler/loop-analysis.h',
- '../../src/compiler/loop-peeling.cc',
- '../../src/compiler/loop-peeling.h',
- '../../src/compiler/machine-operator-reducer.cc',
- '../../src/compiler/machine-operator-reducer.h',
- '../../src/compiler/machine-operator.cc',
- '../../src/compiler/machine-operator.h',
- '../../src/compiler/move-optimizer.cc',
- '../../src/compiler/move-optimizer.h',
- '../../src/compiler/node-aux-data.h',
- '../../src/compiler/node-cache.cc',
- '../../src/compiler/node-cache.h',
- '../../src/compiler/node-marker.cc',
- '../../src/compiler/node-marker.h',
- '../../src/compiler/node-matchers.cc',
- '../../src/compiler/node-matchers.h',
- '../../src/compiler/node-properties.cc',
- '../../src/compiler/node-properties.h',
- '../../src/compiler/node.cc',
- '../../src/compiler/node.h',
- '../../src/compiler/opcodes.cc',
- '../../src/compiler/opcodes.h',
- '../../src/compiler/operator-properties.cc',
- '../../src/compiler/operator-properties.h',
- '../../src/compiler/operator.cc',
- '../../src/compiler/operator.h',
- '../../src/compiler/osr.cc',
- '../../src/compiler/osr.h',
- '../../src/compiler/pipeline.cc',
- '../../src/compiler/pipeline.h',
- '../../src/compiler/pipeline-statistics.cc',
- '../../src/compiler/pipeline-statistics.h',
- '../../src/compiler/raw-machine-assembler.cc',
- '../../src/compiler/raw-machine-assembler.h',
- '../../src/compiler/register-allocator.cc',
- '../../src/compiler/register-allocator.h',
- '../../src/compiler/register-allocator-verifier.cc',
- '../../src/compiler/register-allocator-verifier.h',
- '../../src/compiler/representation-change.cc',
- '../../src/compiler/representation-change.h',
- '../../src/compiler/schedule.cc',
- '../../src/compiler/schedule.h',
- '../../src/compiler/scheduler.cc',
- '../../src/compiler/scheduler.h',
- '../../src/compiler/select-lowering.cc',
- '../../src/compiler/select-lowering.h',
- '../../src/compiler/simplified-lowering.cc',
- '../../src/compiler/simplified-lowering.h',
- '../../src/compiler/simplified-operator-reducer.cc',
- '../../src/compiler/simplified-operator-reducer.h',
- '../../src/compiler/simplified-operator.cc',
- '../../src/compiler/simplified-operator.h',
- '../../src/compiler/source-position.cc',
- '../../src/compiler/source-position.h',
- '../../src/compiler/state-values-utils.cc',
- '../../src/compiler/state-values-utils.h',
- '../../src/compiler/tail-call-optimization.cc',
- '../../src/compiler/tail-call-optimization.h',
- '../../src/compiler/type-hint-analyzer.cc',
- '../../src/compiler/type-hint-analyzer.h',
- '../../src/compiler/type-hints.cc',
- '../../src/compiler/type-hints.h',
- '../../src/compiler/typer.cc',
- '../../src/compiler/typer.h',
- '../../src/compiler/value-numbering-reducer.cc',
- '../../src/compiler/value-numbering-reducer.h',
- '../../src/compiler/verifier.cc',
- '../../src/compiler/verifier.h',
- '../../src/compiler/wasm-compiler.cc',
- '../../src/compiler/wasm-compiler.h',
- '../../src/compiler/wasm-linkage.cc',
- '../../src/compiler/zone-pool.cc',
- '../../src/compiler/zone-pool.h',
- '../../src/compiler.cc',
- '../../src/compiler.h',
- '../../src/context-measure.cc',
- '../../src/context-measure.h',
- '../../src/contexts-inl.h',
- '../../src/contexts.cc',
- '../../src/contexts.h',
- '../../src/conversions-inl.h',
- '../../src/conversions.cc',
- '../../src/conversions.h',
- '../../src/counters.cc',
- '../../src/counters.h',
- '../../src/crankshaft/compilation-phase.cc',
- '../../src/crankshaft/compilation-phase.h',
- '../../src/crankshaft/hydrogen-alias-analysis.h',
- '../../src/crankshaft/hydrogen-bce.cc',
- '../../src/crankshaft/hydrogen-bce.h',
- '../../src/crankshaft/hydrogen-canonicalize.cc',
- '../../src/crankshaft/hydrogen-canonicalize.h',
- '../../src/crankshaft/hydrogen-check-elimination.cc',
- '../../src/crankshaft/hydrogen-check-elimination.h',
- '../../src/crankshaft/hydrogen-dce.cc',
- '../../src/crankshaft/hydrogen-dce.h',
- '../../src/crankshaft/hydrogen-dehoist.cc',
- '../../src/crankshaft/hydrogen-dehoist.h',
- '../../src/crankshaft/hydrogen-environment-liveness.cc',
- '../../src/crankshaft/hydrogen-environment-liveness.h',
- '../../src/crankshaft/hydrogen-escape-analysis.cc',
- '../../src/crankshaft/hydrogen-escape-analysis.h',
- '../../src/crankshaft/hydrogen-flow-engine.h',
- '../../src/crankshaft/hydrogen-gvn.cc',
- '../../src/crankshaft/hydrogen-gvn.h',
- '../../src/crankshaft/hydrogen-infer-representation.cc',
- '../../src/crankshaft/hydrogen-infer-representation.h',
- '../../src/crankshaft/hydrogen-infer-types.cc',
- '../../src/crankshaft/hydrogen-infer-types.h',
- '../../src/crankshaft/hydrogen-instructions.cc',
- '../../src/crankshaft/hydrogen-instructions.h',
- '../../src/crankshaft/hydrogen-load-elimination.cc',
- '../../src/crankshaft/hydrogen-load-elimination.h',
- '../../src/crankshaft/hydrogen-mark-deoptimize.cc',
- '../../src/crankshaft/hydrogen-mark-deoptimize.h',
- '../../src/crankshaft/hydrogen-mark-unreachable.cc',
- '../../src/crankshaft/hydrogen-mark-unreachable.h',
- '../../src/crankshaft/hydrogen-osr.cc',
- '../../src/crankshaft/hydrogen-osr.h',
- '../../src/crankshaft/hydrogen-range-analysis.cc',
- '../../src/crankshaft/hydrogen-range-analysis.h',
- '../../src/crankshaft/hydrogen-redundant-phi.cc',
- '../../src/crankshaft/hydrogen-redundant-phi.h',
- '../../src/crankshaft/hydrogen-removable-simulates.cc',
- '../../src/crankshaft/hydrogen-removable-simulates.h',
- '../../src/crankshaft/hydrogen-representation-changes.cc',
- '../../src/crankshaft/hydrogen-representation-changes.h',
- '../../src/crankshaft/hydrogen-sce.cc',
- '../../src/crankshaft/hydrogen-sce.h',
- '../../src/crankshaft/hydrogen-store-elimination.cc',
- '../../src/crankshaft/hydrogen-store-elimination.h',
- '../../src/crankshaft/hydrogen-types.cc',
- '../../src/crankshaft/hydrogen-types.h',
- '../../src/crankshaft/hydrogen-uint32-analysis.cc',
- '../../src/crankshaft/hydrogen-uint32-analysis.h',
- '../../src/crankshaft/hydrogen.cc',
- '../../src/crankshaft/hydrogen.h',
- '../../src/crankshaft/lithium-allocator-inl.h',
- '../../src/crankshaft/lithium-allocator.cc',
- '../../src/crankshaft/lithium-allocator.h',
- '../../src/crankshaft/lithium-codegen.cc',
- '../../src/crankshaft/lithium-codegen.h',
- '../../src/crankshaft/lithium.cc',
- '../../src/crankshaft/lithium.h',
- '../../src/crankshaft/lithium-inl.h',
- '../../src/crankshaft/typing.cc',
- '../../src/crankshaft/typing.h',
- '../../src/crankshaft/unique.h',
- '../../src/date.cc',
- '../../src/date.h',
- '../../src/dateparser-inl.h',
- '../../src/dateparser.cc',
- '../../src/dateparser.h',
- '../../src/debug/debug-evaluate.cc',
- '../../src/debug/debug-evaluate.h',
- '../../src/debug/debug-frames.cc',
- '../../src/debug/debug-frames.h',
- '../../src/debug/debug-scopes.cc',
- '../../src/debug/debug-scopes.h',
- '../../src/debug/debug.cc',
- '../../src/debug/debug.h',
- '../../src/debug/liveedit.cc',
- '../../src/debug/liveedit.h',
- '../../src/deoptimizer.cc',
- '../../src/deoptimizer.h',
- '../../src/disasm.h',
- '../../src/disassembler.cc',
- '../../src/disassembler.h',
- '../../src/diy-fp.cc',
- '../../src/diy-fp.h',
- '../../src/double.h',
- '../../src/dtoa.cc',
- '../../src/dtoa.h',
- '../../src/effects.h',
- '../../src/elements-kind.cc',
- '../../src/elements-kind.h',
- '../../src/elements.cc',
- '../../src/elements.h',
- '../../src/execution.cc',
- '../../src/execution.h',
- '../../src/extensions/externalize-string-extension.cc',
- '../../src/extensions/externalize-string-extension.h',
- '../../src/extensions/free-buffer-extension.cc',
- '../../src/extensions/free-buffer-extension.h',
- '../../src/extensions/gc-extension.cc',
- '../../src/extensions/gc-extension.h',
- '../../src/extensions/statistics-extension.cc',
- '../../src/extensions/statistics-extension.h',
- '../../src/extensions/trigger-failure-extension.cc',
- '../../src/extensions/trigger-failure-extension.h',
- '../../src/external-reference-table.cc',
- '../../src/external-reference-table.h',
- '../../src/factory.cc',
- '../../src/factory.h',
- '../../src/fast-accessor-assembler.cc',
- '../../src/fast-accessor-assembler.h',
- '../../src/fast-dtoa.cc',
- '../../src/fast-dtoa.h',
- '../../src/field-index.h',
- '../../src/field-index-inl.h',
- '../../src/field-type.cc',
- '../../src/field-type.h',
- '../../src/fixed-dtoa.cc',
- '../../src/fixed-dtoa.h',
- '../../src/flag-definitions.h',
- '../../src/flags.cc',
- '../../src/flags.h',
- '../../src/frames-inl.h',
- '../../src/frames.cc',
- '../../src/frames.h',
- '../../src/full-codegen/full-codegen.cc',
- '../../src/full-codegen/full-codegen.h',
- '../../src/futex-emulation.cc',
- '../../src/futex-emulation.h',
- '../../src/gdb-jit.cc',
- '../../src/gdb-jit.h',
- '../../src/global-handles.cc',
- '../../src/global-handles.h',
- '../../src/globals.h',
- '../../src/handles-inl.h',
- '../../src/handles.cc',
- '../../src/handles.h',
- '../../src/hashmap.h',
- '../../src/heap-symbols.h',
- '../../src/heap/array-buffer-tracker.cc',
- '../../src/heap/array-buffer-tracker.h',
- '../../src/heap/memory-reducer.cc',
- '../../src/heap/memory-reducer.h',
- '../../src/heap/gc-idle-time-handler.cc',
- '../../src/heap/gc-idle-time-handler.h',
- '../../src/heap/gc-tracer.cc',
- '../../src/heap/gc-tracer.h',
- '../../src/heap/heap-inl.h',
- '../../src/heap/heap.cc',
- '../../src/heap/heap.h',
- '../../src/heap/incremental-marking-inl.h',
- '../../src/heap/incremental-marking-job.cc',
- '../../src/heap/incremental-marking-job.h',
- '../../src/heap/incremental-marking.cc',
- '../../src/heap/incremental-marking.h',
- '../../src/heap/mark-compact-inl.h',
- '../../src/heap/mark-compact.cc',
- '../../src/heap/mark-compact.h',
- '../../src/heap/object-stats.cc',
- '../../src/heap/object-stats.h',
- '../../src/heap/objects-visiting-inl.h',
- '../../src/heap/objects-visiting.cc',
- '../../src/heap/objects-visiting.h',
- '../../src/heap/page-parallel-job.h',
- '../../src/heap/remembered-set.cc',
- '../../src/heap/remembered-set.h',
- '../../src/heap/scavenge-job.h',
- '../../src/heap/scavenge-job.cc',
- '../../src/heap/scavenger-inl.h',
- '../../src/heap/scavenger.cc',
- '../../src/heap/scavenger.h',
- '../../src/heap/slot-set.h',
- '../../src/heap/spaces-inl.h',
- '../../src/heap/spaces.cc',
- '../../src/heap/spaces.h',
- '../../src/heap/store-buffer.cc',
- '../../src/heap/store-buffer.h',
- '../../src/i18n.cc',
- '../../src/i18n.h',
- '../../src/icu_util.cc',
- '../../src/icu_util.h',
- '../../src/ic/access-compiler.cc',
- '../../src/ic/access-compiler.h',
- '../../src/ic/call-optimization.cc',
- '../../src/ic/call-optimization.h',
- '../../src/ic/handler-compiler.cc',
- '../../src/ic/handler-compiler.h',
- '../../src/ic/ic-inl.h',
- '../../src/ic/ic-state.cc',
- '../../src/ic/ic-state.h',
- '../../src/ic/ic.cc',
- '../../src/ic/ic.h',
- '../../src/ic/ic-compiler.cc',
- '../../src/ic/ic-compiler.h',
- '../../src/identity-map.cc',
- '../../src/identity-map.h',
- '../../src/interface-descriptors.cc',
- '../../src/interface-descriptors.h',
- '../../src/interpreter/bytecodes.cc',
- '../../src/interpreter/bytecodes.h',
- '../../src/interpreter/bytecode-array-builder.cc',
- '../../src/interpreter/bytecode-array-builder.h',
- '../../src/interpreter/bytecode-array-iterator.cc',
- '../../src/interpreter/bytecode-array-iterator.h',
- '../../src/interpreter/bytecode-register-allocator.cc',
- '../../src/interpreter/bytecode-register-allocator.h',
- '../../src/interpreter/bytecode-generator.cc',
- '../../src/interpreter/bytecode-generator.h',
- '../../src/interpreter/bytecode-traits.h',
- '../../src/interpreter/constant-array-builder.cc',
- '../../src/interpreter/constant-array-builder.h',
- '../../src/interpreter/control-flow-builders.cc',
- '../../src/interpreter/control-flow-builders.h',
- '../../src/interpreter/handler-table-builder.cc',
- '../../src/interpreter/handler-table-builder.h',
- '../../src/interpreter/interpreter.cc',
- '../../src/interpreter/interpreter.h',
- '../../src/interpreter/interpreter-assembler.cc',
- '../../src/interpreter/interpreter-assembler.h',
- '../../src/interpreter/interpreter-intrinsics.cc',
- '../../src/interpreter/interpreter-intrinsics.h',
- '../../src/interpreter/source-position-table.cc',
- '../../src/interpreter/source-position-table.h',
- '../../src/isolate-inl.h',
- '../../src/isolate.cc',
- '../../src/isolate.h',
- '../../src/json-parser.h',
- '../../src/json-stringifier.h',
- '../../src/keys.h',
- '../../src/keys.cc',
- '../../src/layout-descriptor-inl.h',
- '../../src/layout-descriptor.cc',
- '../../src/layout-descriptor.h',
- '../../src/list-inl.h',
- '../../src/list.h',
- '../../src/locked-queue-inl.h',
- '../../src/locked-queue.h',
- '../../src/log-inl.h',
- '../../src/log-utils.cc',
- '../../src/log-utils.h',
- '../../src/log.cc',
- '../../src/log.h',
- '../../src/lookup.cc',
- '../../src/lookup.h',
- '../../src/macro-assembler.h',
- '../../src/machine-type.cc',
- '../../src/machine-type.h',
- '../../src/messages.cc',
- '../../src/messages.h',
- '../../src/msan.h',
- '../../src/objects-body-descriptors-inl.h',
- '../../src/objects-body-descriptors.h',
- '../../src/objects-debug.cc',
- '../../src/objects-inl.h',
- '../../src/objects-printer.cc',
- '../../src/objects.cc',
- '../../src/objects.h',
- '../../src/optimizing-compile-dispatcher.cc',
- '../../src/optimizing-compile-dispatcher.h',
- '../../src/ostreams.cc',
- '../../src/ostreams.h',
- '../../src/parsing/expression-classifier.h',
- '../../src/parsing/func-name-inferrer.cc',
- '../../src/parsing/func-name-inferrer.h',
- '../../src/parsing/parameter-initializer-rewriter.cc',
- '../../src/parsing/parameter-initializer-rewriter.h',
- '../../src/parsing/parser-base.h',
- '../../src/parsing/parser.cc',
- '../../src/parsing/parser.h',
- '../../src/parsing/pattern-rewriter.cc',
- '../../src/parsing/preparse-data-format.h',
- '../../src/parsing/preparse-data.cc',
- '../../src/parsing/preparse-data.h',
- '../../src/parsing/preparser.cc',
- '../../src/parsing/preparser.h',
- '../../src/parsing/rewriter.cc',
- '../../src/parsing/rewriter.h',
- '../../src/parsing/scanner-character-streams.cc',
- '../../src/parsing/scanner-character-streams.h',
- '../../src/parsing/scanner.cc',
- '../../src/parsing/scanner.h',
- '../../src/parsing/token.cc',
- '../../src/parsing/token.h',
- '../../src/pending-compilation-error-handler.cc',
- '../../src/pending-compilation-error-handler.h',
- '../../src/perf-jit.cc',
- '../../src/perf-jit.h',
- '../../src/profiler/allocation-tracker.cc',
- '../../src/profiler/allocation-tracker.h',
- '../../src/profiler/circular-queue-inl.h',
- '../../src/profiler/circular-queue.h',
- '../../src/profiler/cpu-profiler-inl.h',
- '../../src/profiler/cpu-profiler.cc',
- '../../src/profiler/cpu-profiler.h',
- '../../src/profiler/heap-profiler.cc',
- '../../src/profiler/heap-profiler.h',
- '../../src/profiler/heap-snapshot-generator-inl.h',
- '../../src/profiler/heap-snapshot-generator.cc',
- '../../src/profiler/heap-snapshot-generator.h',
- '../../src/profiler/profile-generator-inl.h',
- '../../src/profiler/profile-generator.cc',
- '../../src/profiler/profile-generator.h',
- '../../src/profiler/sampler.cc',
- '../../src/profiler/sampler.h',
- '../../src/profiler/sampling-heap-profiler.cc',
- '../../src/profiler/sampling-heap-profiler.h',
- '../../src/profiler/strings-storage.cc',
- '../../src/profiler/strings-storage.h',
- '../../src/profiler/unbound-queue-inl.h',
- '../../src/profiler/unbound-queue.h',
- '../../src/property-descriptor.cc',
- '../../src/property-descriptor.h',
- '../../src/property-details.h',
- '../../src/property.cc',
- '../../src/property.h',
- '../../src/prototype.h',
- '../../src/regexp/bytecodes-irregexp.h',
- '../../src/regexp/interpreter-irregexp.cc',
- '../../src/regexp/interpreter-irregexp.h',
- '../../src/regexp/jsregexp-inl.h',
- '../../src/regexp/jsregexp.cc',
- '../../src/regexp/jsregexp.h',
- '../../src/regexp/regexp-ast.cc',
- '../../src/regexp/regexp-ast.h',
- '../../src/regexp/regexp-macro-assembler-irregexp-inl.h',
- '../../src/regexp/regexp-macro-assembler-irregexp.cc',
- '../../src/regexp/regexp-macro-assembler-irregexp.h',
- '../../src/regexp/regexp-macro-assembler-tracer.cc',
- '../../src/regexp/regexp-macro-assembler-tracer.h',
- '../../src/regexp/regexp-macro-assembler.cc',
- '../../src/regexp/regexp-macro-assembler.h',
- '../../src/regexp/regexp-parser.cc',
- '../../src/regexp/regexp-parser.h',
- '../../src/regexp/regexp-stack.cc',
- '../../src/regexp/regexp-stack.h',
- '../../src/register-configuration.cc',
- '../../src/register-configuration.h',
- '../../src/runtime-profiler.cc',
- '../../src/runtime-profiler.h',
- '../../src/runtime/runtime-array.cc',
- '../../src/runtime/runtime-atomics.cc',
- '../../src/runtime/runtime-classes.cc',
- '../../src/runtime/runtime-collections.cc',
- '../../src/runtime/runtime-compiler.cc',
- '../../src/runtime/runtime-date.cc',
- '../../src/runtime/runtime-debug.cc',
- '../../src/runtime/runtime-forin.cc',
- '../../src/runtime/runtime-function.cc',
- '../../src/runtime/runtime-futex.cc',
- '../../src/runtime/runtime-generator.cc',
- '../../src/runtime/runtime-i18n.cc',
- '../../src/runtime/runtime-internal.cc',
- '../../src/runtime/runtime-interpreter.cc',
- '../../src/runtime/runtime-json.cc',
- '../../src/runtime/runtime-literals.cc',
- '../../src/runtime/runtime-liveedit.cc',
- '../../src/runtime/runtime-maths.cc',
- '../../src/runtime/runtime-numbers.cc',
- '../../src/runtime/runtime-object.cc',
- '../../src/runtime/runtime-observe.cc',
- '../../src/runtime/runtime-operators.cc',
- '../../src/runtime/runtime-proxy.cc',
- '../../src/runtime/runtime-regexp.cc',
- '../../src/runtime/runtime-scopes.cc',
- '../../src/runtime/runtime-simd.cc',
- '../../src/runtime/runtime-strings.cc',
- '../../src/runtime/runtime-symbol.cc',
- '../../src/runtime/runtime-test.cc',
- '../../src/runtime/runtime-typedarray.cc',
- '../../src/runtime/runtime-uri.cc',
- '../../src/runtime/runtime-utils.h',
- '../../src/runtime/runtime.cc',
- '../../src/runtime/runtime.h',
- '../../src/safepoint-table.cc',
- '../../src/safepoint-table.h',
- '../../src/signature.h',
- '../../src/simulator.h',
- '../../src/small-pointer-list.h',
- '../../src/snapshot/code-serializer.cc',
- '../../src/snapshot/code-serializer.h',
- '../../src/snapshot/deserializer.cc',
- '../../src/snapshot/deserializer.h',
- '../../src/snapshot/natives.h',
- '../../src/snapshot/natives-common.cc',
- '../../src/snapshot/partial-serializer.cc',
- '../../src/snapshot/partial-serializer.h',
- '../../src/snapshot/serializer.cc',
- '../../src/snapshot/serializer.h',
- '../../src/snapshot/serializer-common.cc',
- '../../src/snapshot/serializer-common.h',
- '../../src/snapshot/snapshot.h',
- '../../src/snapshot/snapshot-common.cc',
- '../../src/snapshot/snapshot-source-sink.cc',
- '../../src/snapshot/snapshot-source-sink.h',
- '../../src/snapshot/startup-serializer.cc',
- '../../src/snapshot/startup-serializer.h',
- '../../src/source-position.h',
- '../../src/splay-tree.h',
- '../../src/splay-tree-inl.h',
- '../../src/startup-data-util.cc',
- '../../src/startup-data-util.h',
- '../../src/string-builder.cc',
- '../../src/string-builder.h',
- '../../src/string-search.h',
- '../../src/string-stream.cc',
- '../../src/string-stream.h',
- '../../src/strtod.cc',
- '../../src/strtod.h',
- '../../src/ic/stub-cache.cc',
- '../../src/ic/stub-cache.h',
- '../../src/tracing/trace-event.cc',
- '../../src/tracing/trace-event.h',
- '../../src/transitions-inl.h',
- '../../src/transitions.cc',
- '../../src/transitions.h',
- '../../src/type-cache.cc',
- '../../src/type-cache.h',
- '../../src/type-feedback-vector-inl.h',
- '../../src/type-feedback-vector.cc',
- '../../src/type-feedback-vector.h',
- '../../src/type-info.cc',
- '../../src/type-info.h',
- '../../src/types.cc',
- '../../src/types.h',
- '../../src/typing-asm.cc',
- '../../src/typing-asm.h',
- '../../src/typing-reset.cc',
- '../../src/typing-reset.h',
- '../../src/unicode-inl.h',
- '../../src/unicode.cc',
- '../../src/unicode.h',
- '../../src/unicode-cache-inl.h',
- '../../src/unicode-cache.h',
- '../../src/unicode-decoder.cc',
- '../../src/unicode-decoder.h',
- '../../src/utils-inl.h',
- '../../src/utils.cc',
- '../../src/utils.h',
- '../../src/v8.cc',
- '../../src/v8.h',
- '../../src/v8memory.h',
- '../../src/v8threads.cc',
- '../../src/v8threads.h',
- '../../src/vector.h',
- '../../src/version.cc',
- '../../src/version.h',
- '../../src/vm-state-inl.h',
- '../../src/vm-state.h',
- '../../src/wasm/asm-wasm-builder.cc',
- '../../src/wasm/asm-wasm-builder.h',
- '../../src/wasm/ast-decoder.cc',
- '../../src/wasm/ast-decoder.h',
- '../../src/wasm/decoder.h',
- '../../src/wasm/encoder.cc',
- '../../src/wasm/encoder.h',
- '../../src/wasm/module-decoder.cc',
- '../../src/wasm/module-decoder.h',
- '../../src/wasm/wasm-js.cc',
- '../../src/wasm/wasm-js.h',
- '../../src/wasm/wasm-macro-gen.h',
- '../../src/wasm/wasm-module.cc',
- '../../src/wasm/wasm-module.h',
- '../../src/wasm/wasm-opcodes.cc',
- '../../src/wasm/wasm-opcodes.h',
- '../../src/wasm/wasm-result.cc',
- '../../src/wasm/wasm-result.h',
- '../../src/zone.cc',
- '../../src/zone.h',
- '../../src/zone-allocator.h',
- '../../src/zone-containers.h',
- '../../src/third_party/fdlibm/fdlibm.cc',
- '../../src/third_party/fdlibm/fdlibm.h',
- ],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }],
- ['v8_target_arch=="arm"', {
- 'sources': [ ### gcmole(arch:arm) ###
- '../../src/arm/assembler-arm-inl.h',
- '../../src/arm/assembler-arm.cc',
- '../../src/arm/assembler-arm.h',
- '../../src/arm/builtins-arm.cc',
- '../../src/arm/code-stubs-arm.cc',
- '../../src/arm/code-stubs-arm.h',
- '../../src/arm/codegen-arm.cc',
- '../../src/arm/codegen-arm.h',
- '../../src/arm/constants-arm.h',
- '../../src/arm/constants-arm.cc',
- '../../src/arm/cpu-arm.cc',
- '../../src/arm/deoptimizer-arm.cc',
- '../../src/arm/disasm-arm.cc',
- '../../src/arm/frames-arm.cc',
- '../../src/arm/frames-arm.h',
- '../../src/arm/interface-descriptors-arm.cc',
- '../../src/arm/interface-descriptors-arm.h',
- '../../src/arm/macro-assembler-arm.cc',
- '../../src/arm/macro-assembler-arm.h',
- '../../src/arm/simulator-arm.cc',
- '../../src/arm/simulator-arm.h',
- '../../src/compiler/arm/code-generator-arm.cc',
- '../../src/compiler/arm/instruction-codes-arm.h',
- '../../src/compiler/arm/instruction-scheduler-arm.cc',
- '../../src/compiler/arm/instruction-selector-arm.cc',
- '../../src/crankshaft/arm/lithium-arm.cc',
- '../../src/crankshaft/arm/lithium-arm.h',
- '../../src/crankshaft/arm/lithium-codegen-arm.cc',
- '../../src/crankshaft/arm/lithium-codegen-arm.h',
- '../../src/crankshaft/arm/lithium-gap-resolver-arm.cc',
- '../../src/crankshaft/arm/lithium-gap-resolver-arm.h',
- '../../src/debug/arm/debug-arm.cc',
- '../../src/full-codegen/arm/full-codegen-arm.cc',
- '../../src/ic/arm/access-compiler-arm.cc',
- '../../src/ic/arm/handler-compiler-arm.cc',
- '../../src/ic/arm/ic-arm.cc',
- '../../src/ic/arm/ic-compiler-arm.cc',
- '../../src/ic/arm/stub-cache-arm.cc',
- '../../src/regexp/arm/regexp-macro-assembler-arm.cc',
- '../../src/regexp/arm/regexp-macro-assembler-arm.h',
- ],
- }],
- ['v8_target_arch=="arm64"', {
- 'sources': [ ### gcmole(arch:arm64) ###
- '../../src/arm64/assembler-arm64.cc',
- '../../src/arm64/assembler-arm64.h',
- '../../src/arm64/assembler-arm64-inl.h',
- '../../src/arm64/builtins-arm64.cc',
- '../../src/arm64/codegen-arm64.cc',
- '../../src/arm64/codegen-arm64.h',
- '../../src/arm64/code-stubs-arm64.cc',
- '../../src/arm64/code-stubs-arm64.h',
- '../../src/arm64/constants-arm64.h',
- '../../src/arm64/cpu-arm64.cc',
- '../../src/arm64/decoder-arm64.cc',
- '../../src/arm64/decoder-arm64.h',
- '../../src/arm64/decoder-arm64-inl.h',
- '../../src/arm64/deoptimizer-arm64.cc',
- '../../src/arm64/disasm-arm64.cc',
- '../../src/arm64/disasm-arm64.h',
- '../../src/arm64/frames-arm64.cc',
- '../../src/arm64/frames-arm64.h',
- '../../src/arm64/instructions-arm64.cc',
- '../../src/arm64/instructions-arm64.h',
- '../../src/arm64/instrument-arm64.cc',
- '../../src/arm64/instrument-arm64.h',
- '../../src/arm64/interface-descriptors-arm64.cc',
- '../../src/arm64/interface-descriptors-arm64.h',
- '../../src/arm64/macro-assembler-arm64.cc',
- '../../src/arm64/macro-assembler-arm64.h',
- '../../src/arm64/macro-assembler-arm64-inl.h',
- '../../src/arm64/simulator-arm64.cc',
- '../../src/arm64/simulator-arm64.h',
- '../../src/arm64/utils-arm64.cc',
- '../../src/arm64/utils-arm64.h',
- '../../src/compiler/arm64/code-generator-arm64.cc',
- '../../src/compiler/arm64/instruction-codes-arm64.h',
- '../../src/compiler/arm64/instruction-scheduler-arm64.cc',
- '../../src/compiler/arm64/instruction-selector-arm64.cc',
- '../../src/crankshaft/arm64/delayed-masm-arm64.cc',
- '../../src/crankshaft/arm64/delayed-masm-arm64.h',
- '../../src/crankshaft/arm64/delayed-masm-arm64-inl.h',
- '../../src/crankshaft/arm64/lithium-arm64.cc',
- '../../src/crankshaft/arm64/lithium-arm64.h',
- '../../src/crankshaft/arm64/lithium-codegen-arm64.cc',
- '../../src/crankshaft/arm64/lithium-codegen-arm64.h',
- '../../src/crankshaft/arm64/lithium-gap-resolver-arm64.cc',
- '../../src/crankshaft/arm64/lithium-gap-resolver-arm64.h',
- '../../src/debug/arm64/debug-arm64.cc',
- '../../src/full-codegen/arm64/full-codegen-arm64.cc',
- '../../src/ic/arm64/access-compiler-arm64.cc',
- '../../src/ic/arm64/handler-compiler-arm64.cc',
- '../../src/ic/arm64/ic-arm64.cc',
- '../../src/ic/arm64/ic-compiler-arm64.cc',
- '../../src/ic/arm64/stub-cache-arm64.cc',
- '../../src/regexp/arm64/regexp-macro-assembler-arm64.cc',
- '../../src/regexp/arm64/regexp-macro-assembler-arm64.h',
- ],
- }],
- ['v8_target_arch=="ia32"', {
- 'sources': [ ### gcmole(arch:ia32) ###
- '../../src/ia32/assembler-ia32-inl.h',
- '../../src/ia32/assembler-ia32.cc',
- '../../src/ia32/assembler-ia32.h',
- '../../src/ia32/builtins-ia32.cc',
- '../../src/ia32/code-stubs-ia32.cc',
- '../../src/ia32/code-stubs-ia32.h',
- '../../src/ia32/codegen-ia32.cc',
- '../../src/ia32/codegen-ia32.h',
- '../../src/ia32/cpu-ia32.cc',
- '../../src/ia32/deoptimizer-ia32.cc',
- '../../src/ia32/disasm-ia32.cc',
- '../../src/ia32/frames-ia32.cc',
- '../../src/ia32/frames-ia32.h',
- '../../src/ia32/interface-descriptors-ia32.cc',
- '../../src/ia32/macro-assembler-ia32.cc',
- '../../src/ia32/macro-assembler-ia32.h',
- '../../src/compiler/ia32/code-generator-ia32.cc',
- '../../src/compiler/ia32/instruction-codes-ia32.h',
- '../../src/compiler/ia32/instruction-scheduler-ia32.cc',
- '../../src/compiler/ia32/instruction-selector-ia32.cc',
- '../../src/crankshaft/ia32/lithium-codegen-ia32.cc',
- '../../src/crankshaft/ia32/lithium-codegen-ia32.h',
- '../../src/crankshaft/ia32/lithium-gap-resolver-ia32.cc',
- '../../src/crankshaft/ia32/lithium-gap-resolver-ia32.h',
- '../../src/crankshaft/ia32/lithium-ia32.cc',
- '../../src/crankshaft/ia32/lithium-ia32.h',
- '../../src/debug/ia32/debug-ia32.cc',
- '../../src/full-codegen/ia32/full-codegen-ia32.cc',
- '../../src/ic/ia32/access-compiler-ia32.cc',
- '../../src/ic/ia32/handler-compiler-ia32.cc',
- '../../src/ic/ia32/ic-ia32.cc',
- '../../src/ic/ia32/ic-compiler-ia32.cc',
- '../../src/ic/ia32/stub-cache-ia32.cc',
- '../../src/regexp/ia32/regexp-macro-assembler-ia32.cc',
- '../../src/regexp/ia32/regexp-macro-assembler-ia32.h',
- ],
- }],
- ['v8_target_arch=="x87"', {
- 'sources': [ ### gcmole(arch:x87) ###
- '../../src/x87/assembler-x87-inl.h',
- '../../src/x87/assembler-x87.cc',
- '../../src/x87/assembler-x87.h',
- '../../src/x87/builtins-x87.cc',
- '../../src/x87/code-stubs-x87.cc',
- '../../src/x87/code-stubs-x87.h',
- '../../src/x87/codegen-x87.cc',
- '../../src/x87/codegen-x87.h',
- '../../src/x87/cpu-x87.cc',
- '../../src/x87/deoptimizer-x87.cc',
- '../../src/x87/disasm-x87.cc',
- '../../src/x87/frames-x87.cc',
- '../../src/x87/frames-x87.h',
- '../../src/x87/interface-descriptors-x87.cc',
- '../../src/x87/macro-assembler-x87.cc',
- '../../src/x87/macro-assembler-x87.h',
- '../../src/compiler/x87/code-generator-x87.cc',
- '../../src/compiler/x87/instruction-codes-x87.h',
- '../../src/compiler/x87/instruction-scheduler-x87.cc',
- '../../src/compiler/x87/instruction-selector-x87.cc',
- '../../src/crankshaft/x87/lithium-codegen-x87.cc',
- '../../src/crankshaft/x87/lithium-codegen-x87.h',
- '../../src/crankshaft/x87/lithium-gap-resolver-x87.cc',
- '../../src/crankshaft/x87/lithium-gap-resolver-x87.h',
- '../../src/crankshaft/x87/lithium-x87.cc',
- '../../src/crankshaft/x87/lithium-x87.h',
- '../../src/debug/x87/debug-x87.cc',
- '../../src/full-codegen/x87/full-codegen-x87.cc',
- '../../src/ic/x87/access-compiler-x87.cc',
- '../../src/ic/x87/handler-compiler-x87.cc',
- '../../src/ic/x87/ic-x87.cc',
- '../../src/ic/x87/ic-compiler-x87.cc',
- '../../src/ic/x87/stub-cache-x87.cc',
- '../../src/regexp/x87/regexp-macro-assembler-x87.cc',
- '../../src/regexp/x87/regexp-macro-assembler-x87.h',
- ],
- }],
- ['v8_target_arch=="mips" or v8_target_arch=="mipsel"', {
- 'sources': [ ### gcmole(arch:mipsel) ###
- '../../src/mips/assembler-mips.cc',
- '../../src/mips/assembler-mips.h',
- '../../src/mips/assembler-mips-inl.h',
- '../../src/mips/builtins-mips.cc',
- '../../src/mips/codegen-mips.cc',
- '../../src/mips/codegen-mips.h',
- '../../src/mips/code-stubs-mips.cc',
- '../../src/mips/code-stubs-mips.h',
- '../../src/mips/constants-mips.cc',
- '../../src/mips/constants-mips.h',
- '../../src/mips/cpu-mips.cc',
- '../../src/mips/deoptimizer-mips.cc',
- '../../src/mips/disasm-mips.cc',
- '../../src/mips/frames-mips.cc',
- '../../src/mips/frames-mips.h',
- '../../src/mips/interface-descriptors-mips.cc',
- '../../src/mips/macro-assembler-mips.cc',
- '../../src/mips/macro-assembler-mips.h',
- '../../src/mips/simulator-mips.cc',
- '../../src/mips/simulator-mips.h',
- '../../src/compiler/mips/code-generator-mips.cc',
- '../../src/compiler/mips/instruction-codes-mips.h',
- '../../src/compiler/mips/instruction-scheduler-mips.cc',
- '../../src/compiler/mips/instruction-selector-mips.cc',
- '../../src/crankshaft/mips/lithium-codegen-mips.cc',
- '../../src/crankshaft/mips/lithium-codegen-mips.h',
- '../../src/crankshaft/mips/lithium-gap-resolver-mips.cc',
- '../../src/crankshaft/mips/lithium-gap-resolver-mips.h',
- '../../src/crankshaft/mips/lithium-mips.cc',
- '../../src/crankshaft/mips/lithium-mips.h',
- '../../src/full-codegen/mips/full-codegen-mips.cc',
- '../../src/debug/mips/debug-mips.cc',
- '../../src/ic/mips/access-compiler-mips.cc',
- '../../src/ic/mips/handler-compiler-mips.cc',
- '../../src/ic/mips/ic-mips.cc',
- '../../src/ic/mips/ic-compiler-mips.cc',
- '../../src/ic/mips/stub-cache-mips.cc',
- '../../src/regexp/mips/regexp-macro-assembler-mips.cc',
- '../../src/regexp/mips/regexp-macro-assembler-mips.h',
- ],
- }],
- ['v8_target_arch=="mips64" or v8_target_arch=="mips64el"', {
- 'sources': [ ### gcmole(arch:mips64el) ###
- '../../src/mips64/assembler-mips64.cc',
- '../../src/mips64/assembler-mips64.h',
- '../../src/mips64/assembler-mips64-inl.h',
- '../../src/mips64/builtins-mips64.cc',
- '../../src/mips64/codegen-mips64.cc',
- '../../src/mips64/codegen-mips64.h',
- '../../src/mips64/code-stubs-mips64.cc',
- '../../src/mips64/code-stubs-mips64.h',
- '../../src/mips64/constants-mips64.cc',
- '../../src/mips64/constants-mips64.h',
- '../../src/mips64/cpu-mips64.cc',
- '../../src/mips64/deoptimizer-mips64.cc',
- '../../src/mips64/disasm-mips64.cc',
- '../../src/mips64/frames-mips64.cc',
- '../../src/mips64/frames-mips64.h',
- '../../src/mips64/interface-descriptors-mips64.cc',
- '../../src/mips64/macro-assembler-mips64.cc',
- '../../src/mips64/macro-assembler-mips64.h',
- '../../src/mips64/simulator-mips64.cc',
- '../../src/mips64/simulator-mips64.h',
- '../../src/compiler/mips64/code-generator-mips64.cc',
- '../../src/compiler/mips64/instruction-codes-mips64.h',
- '../../src/compiler/mips64/instruction-scheduler-mips64.cc',
- '../../src/compiler/mips64/instruction-selector-mips64.cc',
- '../../src/crankshaft/mips64/lithium-codegen-mips64.cc',
- '../../src/crankshaft/mips64/lithium-codegen-mips64.h',
- '../../src/crankshaft/mips64/lithium-gap-resolver-mips64.cc',
- '../../src/crankshaft/mips64/lithium-gap-resolver-mips64.h',
- '../../src/crankshaft/mips64/lithium-mips64.cc',
- '../../src/crankshaft/mips64/lithium-mips64.h',
- '../../src/debug/mips64/debug-mips64.cc',
- '../../src/full-codegen/mips64/full-codegen-mips64.cc',
- '../../src/ic/mips64/access-compiler-mips64.cc',
- '../../src/ic/mips64/handler-compiler-mips64.cc',
- '../../src/ic/mips64/ic-mips64.cc',
- '../../src/ic/mips64/ic-compiler-mips64.cc',
- '../../src/ic/mips64/stub-cache-mips64.cc',
- '../../src/regexp/mips64/regexp-macro-assembler-mips64.cc',
- '../../src/regexp/mips64/regexp-macro-assembler-mips64.h',
- ],
- }],
- ['v8_target_arch=="x64" or v8_target_arch=="x32"', {
- 'sources': [ ### gcmole(arch:x64) ###
- '../../src/crankshaft/x64/lithium-codegen-x64.cc',
- '../../src/crankshaft/x64/lithium-codegen-x64.h',
- '../../src/crankshaft/x64/lithium-gap-resolver-x64.cc',
- '../../src/crankshaft/x64/lithium-gap-resolver-x64.h',
- '../../src/crankshaft/x64/lithium-x64.cc',
- '../../src/crankshaft/x64/lithium-x64.h',
- '../../src/x64/assembler-x64-inl.h',
- '../../src/x64/assembler-x64.cc',
- '../../src/x64/assembler-x64.h',
- '../../src/x64/builtins-x64.cc',
- '../../src/x64/code-stubs-x64.cc',
- '../../src/x64/code-stubs-x64.h',
- '../../src/x64/codegen-x64.cc',
- '../../src/x64/codegen-x64.h',
- '../../src/x64/cpu-x64.cc',
- '../../src/x64/deoptimizer-x64.cc',
- '../../src/x64/disasm-x64.cc',
- '../../src/x64/frames-x64.cc',
- '../../src/x64/frames-x64.h',
- '../../src/x64/interface-descriptors-x64.cc',
- '../../src/x64/macro-assembler-x64.cc',
- '../../src/x64/macro-assembler-x64.h',
- '../../src/debug/x64/debug-x64.cc',
- '../../src/full-codegen/x64/full-codegen-x64.cc',
- '../../src/ic/x64/access-compiler-x64.cc',
- '../../src/ic/x64/handler-compiler-x64.cc',
- '../../src/ic/x64/ic-x64.cc',
- '../../src/ic/x64/ic-compiler-x64.cc',
- '../../src/ic/x64/stub-cache-x64.cc',
- '../../src/regexp/x64/regexp-macro-assembler-x64.cc',
- '../../src/regexp/x64/regexp-macro-assembler-x64.h',
- ],
- }],
- ['v8_target_arch=="x64"', {
- 'sources': [
- '../../src/compiler/x64/code-generator-x64.cc',
- '../../src/compiler/x64/instruction-codes-x64.h',
- '../../src/compiler/x64/instruction-scheduler-x64.cc',
- '../../src/compiler/x64/instruction-selector-x64.cc',
- ],
- }],
- ['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
- 'sources': [ ### gcmole(arch:ppc) ###
- '../../src/compiler/ppc/code-generator-ppc.cc',
- '../../src/compiler/ppc/instruction-codes-ppc.h',
- '../../src/compiler/ppc/instruction-scheduler-ppc.cc',
- '../../src/compiler/ppc/instruction-selector-ppc.cc',
- '../../src/crankshaft/ppc/lithium-ppc.cc',
- '../../src/crankshaft/ppc/lithium-ppc.h',
- '../../src/crankshaft/ppc/lithium-codegen-ppc.cc',
- '../../src/crankshaft/ppc/lithium-codegen-ppc.h',
- '../../src/crankshaft/ppc/lithium-gap-resolver-ppc.cc',
- '../../src/crankshaft/ppc/lithium-gap-resolver-ppc.h',
- '../../src/debug/ppc/debug-ppc.cc',
- '../../src/full-codegen/ppc/full-codegen-ppc.cc',
- '../../src/ic/ppc/access-compiler-ppc.cc',
- '../../src/ic/ppc/handler-compiler-ppc.cc',
- '../../src/ic/ppc/ic-ppc.cc',
- '../../src/ic/ppc/ic-compiler-ppc.cc',
- '../../src/ic/ppc/stub-cache-ppc.cc',
- '../../src/ppc/assembler-ppc-inl.h',
- '../../src/ppc/assembler-ppc.cc',
- '../../src/ppc/assembler-ppc.h',
- '../../src/ppc/builtins-ppc.cc',
- '../../src/ppc/code-stubs-ppc.cc',
- '../../src/ppc/code-stubs-ppc.h',
- '../../src/ppc/codegen-ppc.cc',
- '../../src/ppc/codegen-ppc.h',
- '../../src/ppc/constants-ppc.h',
- '../../src/ppc/constants-ppc.cc',
- '../../src/ppc/cpu-ppc.cc',
- '../../src/ppc/deoptimizer-ppc.cc',
- '../../src/ppc/disasm-ppc.cc',
- '../../src/ppc/frames-ppc.cc',
- '../../src/ppc/frames-ppc.h',
- '../../src/ppc/interface-descriptors-ppc.cc',
- '../../src/ppc/macro-assembler-ppc.cc',
- '../../src/ppc/macro-assembler-ppc.h',
- '../../src/ppc/simulator-ppc.cc',
- '../../src/ppc/simulator-ppc.h',
- '../../src/regexp/ppc/regexp-macro-assembler-ppc.cc',
- '../../src/regexp/ppc/regexp-macro-assembler-ppc.h',
- ],
- }],
- ['v8_target_arch=="s390" or v8_target_arch=="s390x"', {
- 'sources': [ ### gcmole(arch:s390) ###
- '../../src/compiler/s390/code-generator-s390.cc',
- '../../src/compiler/s390/instruction-codes-s390.h',
- '../../src/compiler/s390/instruction-scheduler-s390.cc',
- '../../src/compiler/s390/instruction-selector-s390.cc',
- '../../src/crankshaft/s390/lithium-codegen-s390.cc',
- '../../src/crankshaft/s390/lithium-codegen-s390.h',
- '../../src/crankshaft/s390/lithium-gap-resolver-s390.cc',
- '../../src/crankshaft/s390/lithium-gap-resolver-s390.h',
- '../../src/crankshaft/s390/lithium-s390.cc',
- '../../src/crankshaft/s390/lithium-s390.h',
- '../../src/debug/s390/debug-s390.cc',
- '../../src/full-codegen/s390/full-codegen-s390.cc',
- '../../src/ic/s390/access-compiler-s390.cc',
- '../../src/ic/s390/handler-compiler-s390.cc',
- '../../src/ic/s390/ic-compiler-s390.cc',
- '../../src/ic/s390/ic-s390.cc',
- '../../src/ic/s390/stub-cache-s390.cc',
- '../../src/regexp/s390/regexp-macro-assembler-s390.cc',
- '../../src/regexp/s390/regexp-macro-assembler-s390.h',
- '../../src/s390/assembler-s390.cc',
- '../../src/s390/assembler-s390.h',
- '../../src/s390/assembler-s390-inl.h',
- '../../src/s390/builtins-s390.cc',
- '../../src/s390/codegen-s390.cc',
- '../../src/s390/codegen-s390.h',
- '../../src/s390/code-stubs-s390.cc',
- '../../src/s390/code-stubs-s390.h',
- '../../src/s390/constants-s390.cc',
- '../../src/s390/constants-s390.h',
- '../../src/s390/cpu-s390.cc',
- '../../src/s390/deoptimizer-s390.cc',
- '../../src/s390/disasm-s390.cc',
- '../../src/s390/frames-s390.cc',
- '../../src/s390/frames-s390.h',
- '../../src/s390/interface-descriptors-s390.cc',
- '../../src/s390/macro-assembler-s390.cc',
- '../../src/s390/macro-assembler-s390.h',
- '../../src/s390/simulator-s390.cc',
- '../../src/s390/simulator-s390.h',
- ],
- }],
- ['OS=="win"', {
- 'variables': {
- 'gyp_generators': '<!(echo $GYP_GENERATORS)',
- },
- 'msvs_disabled_warnings': [4351, 4355, 4800],
- # When building Official, the .lib is too large and exceeds the 2G
- # limit. This breaks it into multiple pieces to avoid the limit.
- # See http://crbug.com/485155.
- 'msvs_shard': 4,
- }],
- ['component=="shared_library"', {
- 'defines': [
- 'BUILDING_V8_SHARED',
- 'V8_SHARED',
- ],
- }],
- ['v8_postmortem_support=="true"', {
- 'sources': [
- '<(SHARED_INTERMEDIATE_DIR)/debug-support.cc',
- ]
- }],
- ['v8_enable_i18n_support==1', {
- 'dependencies': [
- '<(icu_gyp_path):icui18n',
- '<(icu_gyp_path):icuuc',
- ]
- }, { # v8_enable_i18n_support==0
- 'sources!': [
- '../../src/i18n.cc',
- '../../src/i18n.h',
- ],
- }],
- ['OS=="win" and v8_enable_i18n_support==1', {
- 'dependencies': [
- '<(icu_gyp_path):icudata',
- ],
- }],
- ['icu_use_data_file_flag==1', {
- 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_FILE'],
- }, { # else icu_use_data_file_flag !=1
- 'conditions': [
- ['OS=="win"', {
- 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_SHARED'],
- }, {
- 'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC'],
- }],
- ],
- }],
- ],
- },
- {
- 'target_name': 'v8_libbase',
- 'type': 'static_library',
- 'variables': {
- 'optimize': 'max',
- },
- 'include_dirs+': [
- '../..',
- ],
- 'sources': [
- '../../src/base/accounting-allocator.cc',
- '../../src/base/accounting-allocator.h',
- '../../src/base/adapters.h',
- '../../src/base/atomicops.h',
- '../../src/base/atomicops_internals_arm64_gcc.h',
- '../../src/base/atomicops_internals_arm_gcc.h',
- '../../src/base/atomicops_internals_atomicword_compat.h',
- '../../src/base/atomicops_internals_mac.h',
- '../../src/base/atomicops_internals_mips_gcc.h',
- '../../src/base/atomicops_internals_mips64_gcc.h',
- '../../src/base/atomicops_internals_portable.h',
- '../../src/base/atomicops_internals_ppc_gcc.h',
- '../../src/base/atomicops_internals_s390_gcc.h',
- '../../src/base/atomicops_internals_tsan.h',
- '../../src/base/atomicops_internals_x86_gcc.cc',
- '../../src/base/atomicops_internals_x86_gcc.h',
- '../../src/base/atomicops_internals_x86_msvc.h',
- '../../src/base/bits.cc',
- '../../src/base/bits.h',
- '../../src/base/build_config.h',
- '../../src/base/compiler-specific.h',
- '../../src/base/cpu.cc',
- '../../src/base/cpu.h',
- '../../src/base/division-by-constant.cc',
- '../../src/base/division-by-constant.h',
- '../../src/base/flags.h',
- '../../src/base/functional.cc',
- '../../src/base/functional.h',
- '../../src/base/iterator.h',
- '../../src/base/lazy-instance.h',
- '../../src/base/logging.cc',
- '../../src/base/logging.h',
- '../../src/base/macros.h',
- '../../src/base/once.cc',
- '../../src/base/once.h',
- '../../src/base/platform/elapsed-timer.h',
- '../../src/base/platform/time.cc',
- '../../src/base/platform/time.h',
- '../../src/base/platform/condition-variable.cc',
- '../../src/base/platform/condition-variable.h',
- '../../src/base/platform/mutex.cc',
- '../../src/base/platform/mutex.h',
- '../../src/base/platform/platform.h',
- '../../src/base/platform/semaphore.cc',
- '../../src/base/platform/semaphore.h',
- '../../src/base/safe_conversions.h',
- '../../src/base/safe_conversions_impl.h',
- '../../src/base/safe_math.h',
- '../../src/base/safe_math_impl.h',
- '../../src/base/smart-pointers.h',
- '../../src/base/sys-info.cc',
- '../../src/base/sys-info.h',
- '../../src/base/utils/random-number-generator.cc',
- '../../src/base/utils/random-number-generator.h',
- ],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }],
- ['OS=="linux"', {
- 'conditions': [
- ['nacl_target_arch=="none"', {
- 'link_settings': {
- 'libraries': [
- '-ldl',
- '-lrt'
- ],
- },
- }, {
- 'defines': [
- 'V8_LIBRT_NOT_AVAILABLE=1',
- ],
- }],
- ],
- 'sources': [
- '../../src/base/platform/platform-linux.cc',
- '../../src/base/platform/platform-posix.cc'
- ],
- }
- ],
- ['OS=="android"', {
- 'sources': [
- '../../src/base/platform/platform-posix.cc'
- ],
- 'link_settings': {
- 'target_conditions': [
- ['_toolset=="host"', {
- # Only include libdl and librt on host builds because they
- # are included by default on Android target builds, and we
- # don't want to re-include them here since this will change
- # library order and break (see crbug.com/469973).
- 'libraries': [
- '-ldl',
- '-lrt'
- ]
- }]
- ]
- },
- 'conditions': [
- ['host_os=="mac"', {
- 'target_conditions': [
- ['_toolset=="host"', {
- 'sources': [
- '../../src/base/platform/platform-macos.cc'
- ]
- }, {
- 'sources': [
- '../../src/base/platform/platform-linux.cc'
- ]
- }],
- ],
- }, {
- 'sources': [
- '../../src/base/platform/platform-linux.cc'
- ]
- }],
- ],
- },
- ],
- ['OS=="qnx"', {
- 'link_settings': {
- 'target_conditions': [
- ['_toolset=="host" and host_os=="linux"', {
- 'libraries': [
- '-lrt'
- ],
- }],
- ['_toolset=="target"', {
- 'libraries': [
- '-lbacktrace'
- ],
- }],
- ],
- },
- 'sources': [
- '../../src/base/platform/platform-posix.cc',
- '../../src/base/qnx-math.h',
- ],
- 'target_conditions': [
- ['_toolset=="host" and host_os=="linux"', {
- 'sources': [
- '../../src/base/platform/platform-linux.cc'
- ],
- }],
- ['_toolset=="host" and host_os=="mac"', {
- 'sources': [
- '../../src/base/platform/platform-macos.cc'
- ],
- }],
- ['_toolset=="target"', {
- 'sources': [
- '../../src/base/platform/platform-qnx.cc'
- ],
- }],
- ],
- },
- ],
- ['OS=="freebsd"', {
- 'link_settings': {
- 'libraries': [
- '-L/usr/local/lib -lexecinfo',
- ]},
- 'sources': [
- '../../src/base/platform/platform-freebsd.cc',
- '../../src/base/platform/platform-posix.cc'
- ],
- }
- ],
- ['OS=="openbsd"', {
- 'link_settings': {
- 'libraries': [
- '-L/usr/local/lib -lexecinfo',
- ]},
- 'sources': [
- '../../src/base/platform/platform-openbsd.cc',
- '../../src/base/platform/platform-posix.cc'
- ],
- }
- ],
- ['OS=="netbsd"', {
- 'link_settings': {
- 'libraries': [
- '-L/usr/pkg/lib -Wl,-R/usr/pkg/lib -lexecinfo',
- ]},
- 'sources': [
- '../../src/base/platform/platform-openbsd.cc',
- '../../src/base/platform/platform-posix.cc'
- ],
- }
- ],
- ['OS=="aix"', {
- 'sources': [
- '../../src/base/platform/platform-aix.cc',
- '../../src/base/platform/platform-posix.cc'
- ]},
- ],
- ['OS=="solaris"', {
- 'link_settings': {
- 'libraries': [
- '-lnsl -lrt',
- ]},
- 'sources': [
- '../../src/base/platform/platform-solaris.cc',
- '../../src/base/platform/platform-posix.cc'
- ],
- }
- ],
- ['OS=="mac"', {
- 'sources': [
- '../../src/base/platform/platform-macos.cc',
- '../../src/base/platform/platform-posix.cc'
- ]},
- ],
- ['OS=="win"', {
- 'defines': [
- '_CRT_RAND_S' # for rand_s()
- ],
- 'variables': {
- 'gyp_generators': '<!(echo $GYP_GENERATORS)',
- },
- 'conditions': [
- ['gyp_generators=="make"', {
- 'variables': {
- 'build_env': '<!(uname -o)',
- },
- 'conditions': [
- ['build_env=="Cygwin"', {
- 'sources': [
- '../../src/base/platform/platform-cygwin.cc',
- '../../src/base/platform/platform-posix.cc'
- ],
- }, {
- 'sources': [
- '../../src/base/platform/platform-win32.cc',
- '../../src/base/win32-headers.h',
- ],
- }],
- ],
- 'link_settings': {
- 'libraries': [ '-lwinmm', '-lws2_32' ],
- },
- }, {
- 'sources': [
- '../../src/base/platform/platform-win32.cc',
- '../../src/base/win32-headers.h',
- ],
- 'msvs_disabled_warnings': [4351, 4355, 4800],
- 'link_settings': {
- 'libraries': [ '-lwinmm.lib', '-lws2_32.lib' ],
- },
- }],
- ],
- }],
- ],
- },
- {
- 'target_name': 'v8_libplatform',
- 'type': 'static_library',
- 'variables': {
- 'optimize': 'max',
- },
- 'dependencies': [
- 'v8_libbase',
- ],
- 'include_dirs+': [
- '../..',
- ],
- 'sources': [
- '../../include/libplatform/libplatform.h',
- '../../src/libplatform/default-platform.cc',
- '../../src/libplatform/default-platform.h',
- '../../src/libplatform/task-queue.cc',
- '../../src/libplatform/task-queue.h',
- '../../src/libplatform/worker-thread.cc',
- '../../src/libplatform/worker-thread.h',
- ],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }],
- ],
- 'direct_dependent_settings': {
- 'include_dirs': [
- '../../include',
- ],
- },
- },
- {
- 'target_name': 'natives_blob',
- 'type': 'none',
- 'conditions': [
- [ 'v8_use_external_startup_data==1', {
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'dependencies': ['js2c#host'],
- }, {
- 'dependencies': ['js2c'],
- }],
- ],
- 'actions': [{
- 'action_name': 'concatenate_natives_blob',
- 'inputs': [
- '../../tools/concatenate-files.py',
- '<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
- '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental.bin',
- '<(SHARED_INTERMEDIATE_DIR)/libraries-extras.bin',
- '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental-extras.bin',
- ],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'target_conditions': [
- ['_toolset=="host"', {
- 'outputs': [
- '<(PRODUCT_DIR)/natives_blob_host.bin',
- ],
- 'action': [
- 'python', '<@(_inputs)', '<(PRODUCT_DIR)/natives_blob_host.bin'
- ],
- }, {
- 'outputs': [
- '<(PRODUCT_DIR)/natives_blob.bin',
- ],
- 'action': [
- 'python', '<@(_inputs)', '<(PRODUCT_DIR)/natives_blob.bin'
- ],
- }],
- ],
- }, {
- 'outputs': [
- '<(PRODUCT_DIR)/natives_blob.bin',
- ],
- 'action': [
- 'python', '<@(_inputs)', '<(PRODUCT_DIR)/natives_blob.bin'
- ],
- }],
- ],
- }],
- }],
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }],
- ]
- },
- {
- 'target_name': 'js2c',
- 'type': 'none',
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host'],
- }, {
- 'toolsets': ['target'],
- }],
- ['v8_enable_i18n_support==1', {
- 'variables': {
- 'i18n_library_files': [
- '../../src/js/i18n.js',
- ],
- },
- }, {
- 'variables': {
- 'i18n_library_files': [],
- },
- }],
- ],
- 'variables': {
- 'library_files': [
- '../../src/js/macros.py',
- '../../src/messages.h',
- '../../src/js/prologue.js',
- '../../src/js/runtime.js',
- '../../src/js/v8natives.js',
- '../../src/js/symbol.js',
- '../../src/js/array.js',
- '../../src/js/string.js',
- '../../src/js/uri.js',
- '../../src/js/math.js',
- '../../src/third_party/fdlibm/fdlibm.js',
- '../../src/js/regexp.js',
- '../../src/js/arraybuffer.js',
- '../../src/js/typedarray.js',
- '../../src/js/iterator-prototype.js',
- '../../src/js/generator.js',
- '../../src/js/object-observe.js',
- '../../src/js/collection.js',
- '../../src/js/weak-collection.js',
- '../../src/js/collection-iterator.js',
- '../../src/js/promise.js',
- '../../src/js/messages.js',
- '../../src/js/json.js',
- '../../src/js/array-iterator.js',
- '../../src/js/string-iterator.js',
- '../../src/js/templates.js',
- '../../src/js/spread.js',
- '../../src/js/proxy.js',
- '../../src/debug/mirrors.js',
- '../../src/debug/debug.js',
- '../../src/debug/liveedit.js',
- ],
- 'experimental_library_files': [
- '../../src/js/macros.py',
- '../../src/messages.h',
- '../../src/js/generator.js',
- '../../src/js/harmony-atomics.js',
- '../../src/js/harmony-regexp-exec.js',
- '../../src/js/harmony-object-observe.js',
- '../../src/js/harmony-sharedarraybuffer.js',
- '../../src/js/harmony-simd.js',
- '../../src/js/harmony-species.js',
- '../../src/js/harmony-unicode-regexps.js',
- '../../src/js/harmony-string-padding.js',
- '../../src/js/promise-extra.js',
- ],
- 'libraries_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
- 'libraries_experimental_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental.bin',
- 'libraries_extras_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-extras.bin',
- 'libraries_experimental_extras_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental-extras.bin',
- },
- 'actions': [
- {
- 'action_name': 'js2c',
- 'inputs': [
- '../../tools/js2c.py',
- '<@(library_files)',
- '<@(i18n_library_files)'
- ],
- 'outputs': ['<(SHARED_INTERMEDIATE_DIR)/libraries.cc'],
- 'action': [
- 'python',
- '../../tools/js2c.py',
- '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
- 'CORE',
- '<@(library_files)',
- '<@(i18n_library_files)'
- ],
- },
- {
- 'action_name': 'js2c_bin',
- 'inputs': [
- '../../tools/js2c.py',
- '<@(library_files)',
- '<@(i18n_library_files)'
- ],
- 'outputs': ['<@(libraries_bin_file)'],
- 'action': [
- 'python',
- '../../tools/js2c.py',
- '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
- 'CORE',
- '<@(library_files)',
- '<@(i18n_library_files)',
- '--startup_blob', '<@(libraries_bin_file)',
- '--nojs',
- ],
- },
- {
- 'action_name': 'js2c_experimental',
- 'inputs': [
- '../../tools/js2c.py',
- '<@(experimental_library_files)',
- ],
- 'outputs': ['<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc'],
- 'action': [
- 'python',
- '../../tools/js2c.py',
- '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
- 'EXPERIMENTAL',
- '<@(experimental_library_files)'
- ],
- },
- {
- 'action_name': 'js2c_experimental_bin',
- 'inputs': [
- '../../tools/js2c.py',
- '<@(experimental_library_files)',
- ],
- 'outputs': ['<@(libraries_experimental_bin_file)'],
- 'action': [
- 'python',
- '../../tools/js2c.py',
- '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
- 'EXPERIMENTAL',
- '<@(experimental_library_files)',
- '--startup_blob', '<@(libraries_experimental_bin_file)',
- '--nojs',
- ],
- },
- {
- 'action_name': 'js2c_extras',
- 'inputs': [
- '../../tools/js2c.py',
- '<@(v8_extra_library_files)',
- ],
- 'outputs': ['<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc'],
- 'action': [
- 'python',
- '../../tools/js2c.py',
- '<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
- 'EXTRAS',
- '<@(v8_extra_library_files)',
- ],
- },
- {
- 'action_name': 'js2c_extras_bin',
- 'inputs': [
- '../../tools/js2c.py',
- '<@(v8_extra_library_files)',
- ],
- 'outputs': ['<@(libraries_extras_bin_file)'],
- 'action': [
- 'python',
- '../../tools/js2c.py',
- '<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
- 'EXTRAS',
- '<@(v8_extra_library_files)',
- '--startup_blob', '<@(libraries_extras_bin_file)',
- '--nojs',
- ],
- },
- {
- 'action_name': 'js2c_experimental_extras',
- 'inputs': [
- '../../tools/js2c.py',
- '<@(v8_experimental_extra_library_files)',
- ],
- 'outputs': [
- '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
- ],
- 'action': [
- 'python',
- '../../tools/js2c.py',
- '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
- 'EXPERIMENTAL_EXTRAS',
- '<@(v8_experimental_extra_library_files)',
- ],
- },
- {
- 'action_name': 'js2c_experimental_extras_bin',
- 'inputs': [
- '../../tools/js2c.py',
- '<@(v8_experimental_extra_library_files)',
- ],
- 'outputs': ['<@(libraries_experimental_extras_bin_file)'],
- 'action': [
- 'python',
- '../../tools/js2c.py',
- '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
- 'EXPERIMENTAL_EXTRAS',
- '<@(v8_experimental_extra_library_files)',
- '--startup_blob', '<@(libraries_experimental_extras_bin_file)',
- '--nojs',
- ],
- },
- ],
- },
- {
- 'target_name': 'postmortem-metadata',
- 'type': 'none',
- 'variables': {
- 'heapobject_files': [
- '../../src/objects.h',
- '../../src/objects-inl.h',
- ],
- },
- 'actions': [
- {
- 'action_name': 'gen-postmortem-metadata',
- 'inputs': [
- '../../tools/gen-postmortem-metadata.py',
- '<@(heapobject_files)',
- ],
- 'outputs': [
- '<(SHARED_INTERMEDIATE_DIR)/debug-support.cc',
- ],
- 'action': [
- 'python',
- '../../tools/gen-postmortem-metadata.py',
- '<@(_outputs)',
- '<@(heapobject_files)'
- ]
- }
- ]
- },
- {
- 'target_name': 'mksnapshot',
- 'type': 'executable',
- 'dependencies': ['v8_base', 'v8_nosnapshot', 'v8_libplatform'],
- 'include_dirs+': [
- '../..',
- ],
- 'sources': [
- '../../src/snapshot/mksnapshot.cc',
- ],
- 'conditions': [
- ['v8_enable_i18n_support==1', {
- 'dependencies': [
- '<(icu_gyp_path):icui18n',
- '<(icu_gyp_path):icuuc',
- ]
- }],
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host'],
- }, {
- 'toolsets': ['target'],
- }],
- ],
- },
- ],
-}
diff --git a/deps/v8/tools/gyp_flag_compare.py b/deps/v8/tools/gyp_flag_compare.py
new file mode 100755
index 0000000000..86fa5c4098
--- /dev/null
+++ b/deps/v8/tools/gyp_flag_compare.py
@@ -0,0 +1,280 @@
+#!/usr/bin/env python
+
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Given the output of -t commands from a ninja build for a gyp and GN generated
+build, report on differences between the command lines."""
+
+
+import os
+import shlex
+import subprocess
+import sys
+
+
+# Must be in v8/.
+BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+os.chdir(BASE_DIR)
+
+
+g_total_differences = 0
+
+
+def FindAndRemoveArgWithValue(command_line, argname):
+ """Given a command line as a list, remove and return the value of an option
+ that takes a value as a separate entry.
+
+ Modifies |command_line| in place.
+ """
+ if argname not in command_line:
+ return ''
+ location = command_line.index(argname)
+ value = command_line[location + 1]
+ command_line[location:location + 2] = []
+ return value
+
+
+def MergeSpacedArgs(command_line, argname):
+ """Combine all arguments |argname| with their values, separated by a space."""
+ i = 0
+ result = []
+ while i < len(command_line):
+ arg = command_line[i]
+ if arg == argname:
+ result.append(arg + ' ' + command_line[i + 1])
+ i += 1
+ else:
+ result.append(arg)
+ i += 1
+ return result
+
+
+def NormalizeSymbolArguments(command_line):
+ """Normalize -g arguments.
+
+ If there's no -g args, it's equivalent to -g0. -g2 is equivalent to -g.
+ Modifies |command_line| in place.
+ """
+ # Strip -g0 if there's no symbols.
+ have_some_symbols = False
+ for x in command_line:
+ if x.startswith('-g') and x != '-g0':
+ have_some_symbols = True
+ if not have_some_symbols and '-g0' in command_line:
+ command_line.remove('-g0')
+
+ # Rename -g2 to -g.
+ if '-g2' in command_line:
+ command_line[command_line.index('-g2')] = '-g'
+
+
+def GetFlags(lines, build_dir):
+ """Turn a list of command lines into a semi-structured dict."""
+ is_win = sys.platform == 'win32'
+ flags_by_output = {}
+ for line in lines:
+ command_line = shlex.split(line.strip(), posix=not is_win)[1:]
+
+ output_name = FindAndRemoveArgWithValue(command_line, '-o')
+ dep_name = FindAndRemoveArgWithValue(command_line, '-MF')
+
+ NormalizeSymbolArguments(command_line)
+
+ command_line = MergeSpacedArgs(command_line, '-Xclang')
+
+ cc_file = [x for x in command_line if x.endswith('.cc') or
+ x.endswith('.c') or
+ x.endswith('.cpp')]
+ if len(cc_file) != 1:
+ print 'Skipping %s' % command_line
+ continue
+ assert len(cc_file) == 1
+
+ if is_win:
+ rsp_file = [x for x in command_line if x.endswith('.rsp')]
+ assert len(rsp_file) <= 1
+ if rsp_file:
+ rsp_file = os.path.join(build_dir, rsp_file[0][1:])
+ with open(rsp_file, "r") as open_rsp_file:
+ command_line = shlex.split(open_rsp_file, posix=False)
+
+ defines = [x for x in command_line if x.startswith('-D')]
+ include_dirs = [x for x in command_line if x.startswith('-I')]
+ dash_f = [x for x in command_line if x.startswith('-f')]
+ warnings = \
+ [x for x in command_line if x.startswith('/wd' if is_win else '-W')]
+ others = [x for x in command_line if x not in defines and \
+ x not in include_dirs and \
+ x not in dash_f and \
+ x not in warnings and \
+ x not in cc_file]
+
+ for index, value in enumerate(include_dirs):
+ if value == '-Igen':
+ continue
+ path = value[2:]
+ if not os.path.isabs(path):
+ path = os.path.join(build_dir, path)
+ include_dirs[index] = '-I' + os.path.normpath(path)
+
+ # GYP supports paths above the source root like <(DEPTH)/../foo while such
+ # paths are unsupported by gn. But gn allows to use system-absolute paths
+ # instead (paths that start with single '/'). Normalize all paths.
+ cc_file = [os.path.normpath(os.path.join(build_dir, cc_file[0]))]
+
+ # Filter for libFindBadConstructs.so having a relative path in one and
+ # absolute path in the other.
+ others_filtered = []
+ for x in others:
+ if x.startswith('-Xclang ') and x.endswith('libFindBadConstructs.so'):
+ others_filtered.append(
+ '-Xclang ' +
+ os.path.join(os.getcwd(),
+ os.path.normpath(
+ os.path.join('out/gn_flags', x.split(' ', 1)[1]))))
+ elif x.startswith('-B'):
+ others_filtered.append(
+ '-B' +
+ os.path.join(os.getcwd(),
+ os.path.normpath(os.path.join('out/gn_flags', x[2:]))))
+ else:
+ others_filtered.append(x)
+ others = others_filtered
+
+ flags_by_output[cc_file[0]] = {
+ 'output': output_name,
+ 'depname': dep_name,
+ 'defines': sorted(defines),
+ 'include_dirs': sorted(include_dirs), # TODO(scottmg): This is wrong.
+ 'dash_f': sorted(dash_f),
+ 'warnings': sorted(warnings),
+ 'other': sorted(others),
+ }
+ return flags_by_output
+
+
+def CompareLists(gyp, gn, name, dont_care_gyp=None, dont_care_gn=None):
+ """Return a report of any differences between gyp and gn lists, ignoring
+ anything in |dont_care_{gyp|gn}| respectively."""
+ global g_total_differences
+ if not dont_care_gyp:
+ dont_care_gyp = []
+ if not dont_care_gn:
+ dont_care_gn = []
+ output = ''
+ if gyp[name] != gn[name]:
+ gyp_set = set(gyp[name])
+ gn_set = set(gn[name])
+ missing_in_gyp = gyp_set - gn_set
+ missing_in_gn = gn_set - gyp_set
+ missing_in_gyp -= set(dont_care_gyp)
+ missing_in_gn -= set(dont_care_gn)
+ if missing_in_gyp or missing_in_gn:
+ output += ' %s differ:\n' % name
+ if missing_in_gyp:
+ output += ' In gyp, but not in GN:\n %s' % '\n '.join(
+ sorted(missing_in_gyp)) + '\n'
+ g_total_differences += len(missing_in_gyp)
+ if missing_in_gn:
+ output += ' In GN, but not in gyp:\n %s' % '\n '.join(
+ sorted(missing_in_gn)) + '\n\n'
+ g_total_differences += len(missing_in_gn)
+ return output
+
+
+def Run(command_line):
+ """Run |command_line| as a subprocess and return stdout. Raises on error."""
+ try:
+ return subprocess.check_output(command_line, shell=True)
+ except subprocess.CalledProcessError as e:
+ # Rescue the output we got until the exception happened.
+ print '#### Stdout: ####################################################'
+ print e.output
+ print '#################################################################'
+ raise
+
+
+def main():
+ if len(sys.argv) < 4:
+ print ('usage: %s gn_outdir gyp_outdir gn_target '
+ '[gyp_target1, gyp_target2, ...]' % __file__)
+ return 1
+
+ if len(sys.argv) == 4:
+ sys.argv.append(sys.argv[3])
+ gn_out_dir = sys.argv[1]
+ print >> sys.stderr, 'Expecting gn outdir in %s...' % gn_out_dir
+ gn = Run('ninja -C %s -t commands %s' % (gn_out_dir, sys.argv[3]))
+ if sys.platform == 'win32':
+ # On Windows flags are stored in .rsp files which are created during build.
+ print >> sys.stderr, 'Building in %s...' % gn_out_dir
+ Run('ninja -C %s -d keeprsp %s' % (gn_out_dir, sys.argv[3]))
+
+ gyp_out_dir = sys.argv[2]
+ print >> sys.stderr, 'Expecting gyp outdir in %s...' % gyp_out_dir
+ gyp = Run('ninja -C %s -t commands %s' % (gyp_out_dir, " ".join(sys.argv[4:])))
+ if sys.platform == 'win32':
+ # On Windows flags are stored in .rsp files which are created during build.
+ print >> sys.stderr, 'Building in %s...' % gyp_out_dir
+ Run('ninja -C %s -d keeprsp %s' % (gyp_out_dir, " ".join(sys.argv[4:])))
+
+ all_gyp_flags = GetFlags(gyp.splitlines(),
+ os.path.join(os.getcwd(), gyp_out_dir))
+ all_gn_flags = GetFlags(gn.splitlines(),
+ os.path.join(os.getcwd(), gn_out_dir))
+ gyp_files = set(all_gyp_flags.keys())
+ gn_files = set(all_gn_flags.keys())
+ different_source_list = gyp_files != gn_files
+ if different_source_list:
+ print 'Different set of sources files:'
+ print ' In gyp, not in GN:\n %s' % '\n '.join(
+ sorted(gyp_files - gn_files))
+ print ' In GN, not in gyp:\n %s' % '\n '.join(
+ sorted(gn_files - gyp_files))
+ print '\nNote that flags will only be compared for files in both sets.\n'
+ file_list = gyp_files & gn_files
+ files_with_given_differences = {}
+ for filename in sorted(file_list):
+ gyp_flags = all_gyp_flags[filename]
+ gn_flags = all_gn_flags[filename]
+ differences = CompareLists(gyp_flags, gn_flags, 'dash_f')
+ differences += CompareLists(gyp_flags, gn_flags, 'defines')
+ differences += CompareLists(gyp_flags, gn_flags, 'include_dirs',
+ ['-I%s' % os.path.dirname(BASE_DIR)])
+ differences += CompareLists(gyp_flags, gn_flags, 'warnings',
+ # More conservative warnings in GN we consider to be OK.
+ dont_care_gyp=[
+ '/wd4091', # 'keyword' : ignored on left of 'type' when no variable
+ # is declared.
+ '/wd4456', # Declaration hides previous local declaration.
+ '/wd4457', # Declaration hides function parameter.
+ '/wd4458', # Declaration hides class member.
+ '/wd4459', # Declaration hides global declaration.
+ '/wd4702', # Unreachable code.
+ '/wd4800', # Forcing value to bool 'true' or 'false'.
+ '/wd4838', # Conversion from 'type' to 'type' requires a narrowing
+ # conversion.
+ ] if sys.platform == 'win32' else None,
+ dont_care_gn=[
+ '-Wendif-labels',
+ '-Wextra',
+ '-Wsign-compare',
+ ] if not sys.platform == 'win32' else None)
+ differences += CompareLists(gyp_flags, gn_flags, 'other')
+ if differences:
+ files_with_given_differences.setdefault(differences, []).append(filename)
+
+ for diff, files in files_with_given_differences.iteritems():
+ print '\n'.join(sorted(files))
+ print diff
+
+ print 'Total differences:', g_total_differences
+ # TODO(scottmg): Return failure on difference once we're closer to identical.
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/deps/v8/tools/ic-explorer.html b/deps/v8/tools/ic-explorer.html
index 43b486a50c..42bbc20396 100644
--- a/deps/v8/tools/ic-explorer.html
+++ b/deps/v8/tools/ic-explorer.html
@@ -1,338 +1,366 @@
<html>
- <head>
-<style>
- .entry-details {
- }
- .entry-details TD {
- }
- .details {
- width: 2em;
- border: 1px black dotted;
- }
- .count {
- text-align: right;
- width: 5em;
- font-family: monospace;
- }
- .percentage {
- text-align: right;
- width: 5em;
- font-family: monospace;
- }
- .key {
- padding-left: 1em;
- }
- .drilldown-group-title {
- font-weight: bold;
- padding: 0.5em 0 0.2em 0;
- }
-</style>
- <script>
-"use strict"
-var entries = [];
+<!--
+Copyright 2016 the V8 project authors. All rights reserved. Use of this source
+code is governed by a BSD-style license that can be found in the LICENSE file.
+-->
-class Entry {
- constructor(id, line) {
- this.id = id;
- this.line = line;
- var parts = line.split(" ");
- if (parts.length < 6) return
- this.isValid = false;
- if (parts[0][0] !== "[") return;
- if (parts[1] === "patching") return;
- this.type = parts[0].substr(1);
- this.category = "Other";
- if (this.type.indexOf("Store") !== -1) {
- this.category = "Store";
- } else if (this.type.indexOf("Load") !== -1) {
- this.category = "Load";
+<head>
+ <style>
+ .entry-details {}
+
+ .entry-details TD {}
+
+ .details {
+ width: 2em;
+ border: 1px black dotted;
+ }
+
+ .count {
+ text-align: right;
+ width: 5em;
+ font-family: monospace;
+ }
+
+ .percentage {
+ text-align: right;
+ width: 5em;
+ font-family: monospace;
+ }
+
+ .key {
+ padding-left: 1em;
}
- if (this.type.length == 0) return;
- if (this.type.indexOf('BinaryOpIC(') === 0) {
- this.type = "BinaryOpIC";
- var split = parts[0].split('(');
- this.state = "(" + split[1] + " => " + parts[2];
- var offset = this.parsePositionAndFile(parts, 6);
- if (offset == -1) return
- if (this.file === undefined) return
- this.file = this.file.slice(0,-1);
- } else {
- var offset = this.parsePositionAndFile(parts, 2);
- if (offset == -1) return
- this.state = parts[++offset];
- if (this.type !== "CompareIC") {
- // if there is no address we have a smi key
- var address = parts[++offset];
- if (address !== undefined && address.indexOf("0x") === 0) {
- this.key = parts.slice(++offset).join(" ");
+
+ .drilldown-group-title {
+ font-weight: bold;
+ padding: 0.5em 0 0.2em 0;
+ }
+ </style>
+ <script>
+ "use strict"
+ var entries = [];
+
+ class Entry {
+ constructor(id, line) {
+ this.id = id;
+ this.line = line;
+ var parts = line.split(" ");
+ if (parts.length < 6) return
+ this.isValid = false;
+ if (parts[0][0] !== "[") return;
+ if (parts[1] === "patching") return;
+ this.type = parts[0].substr(1);
+ this.category = "Other";
+ this.map = undefined;
+ if (this.type.indexOf("Store") !== -1) {
+ this.category = "Store";
+ } else if (this.type.indexOf("Load") !== -1) {
+ this.category = "Load";
+ }
+ if (this.type.length == 0) return;
+ if (this.type.indexOf('BinaryOpIC(') === 0) {
+ this.type = "BinaryOpIC";
+ var split = parts[0].split('(');
+ this.state = "(" + split[1] + " => " + parts[2];
+ var offset = this.parsePositionAndFile(parts, 6);
+ if (offset == -1) return
+ if (this.file === undefined) return
+ this.file = this.file.slice(0, -1);
} else {
- this.key = address;
+ var offset = this.parsePositionAndFile(parts, 2);
+ if (offset == -1) return
+ this.state = parts[++offset];
+ this.map = parts[offset + 1];
+ if (this.map !== undefined && this.map.startsWith("map=")) {
+ this.map = this.map.substring(4);
+ offset++;
+ } else {
+ this.map = undefined;
+ }
+ if (this.type !== "CompareIC") {
+ // if there is no address we have a smi key
+ var address = parts[++offset];
+ if (address !== undefined && address.indexOf("0x") === 0) {
+ this.key = parts.slice(++offset).join(" ");
+ } else {
+ this.key = address;
+ }
+ }
}
+ this.filePosition = this.file + " " + this.position;
+ if (this.key) {
+ var isStringKey = false
+ if (this.key.indexOf("<String[") === 0) {
+ isStringKey = true;
+ this.key = "\"" + this.key.slice(this.key.indexOf(']') + 3);
+ } else if (this.key.indexOf("<") === 0) {
+ this.key = this.key.slice(1);
+ }
+ if (this.key.endsWith(">]")) {
+ this.key = this.key.slice(0, -2);
+ } else if (this.key.endsWith("]")) {
+ this.key = this.key.slice(0, -1);
+ }
+ if (isStringKey) {
+ this.key = this.key + "\"";
+ }
+ }
+ this.isValid = true;
}
- }
- this.filePosition = this.file + " " + this.position;
- if (this.key) {
- var isStringKey = false
- if (this.key.indexOf("<String[") === 0) {
- isStringKey = true;
- this.key = "\"" + this.key.slice(this.key.indexOf(']')+3);
- } else if (this.key.indexOf("<") === 0) {
- this.key = this.key.slice(1);
- }
- if (this.key.endsWith(">]")) {
- this.key = this.key.slice(0, -2);
- } else if (this.key.endsWith("]")) {
- this.key = this.key.slice(0, -1);
- }
- if (isStringKey) {
- this.key = this.key + "\"";
+
+ parsePositionAndFile(parts, start) {
+ // find the position of 'at' in the parts array.
+ var offset = start;
+ for (var i = start + 1; i < parts.length; i++) {
+ offset++;
+ if (parts[i] == 'at') break;
+ }
+ if (parts[offset] !== 'at') return -1;
+ this.position = parts.slice(start, offset).join(' ');
+ offset += 1;
+ this.isNative = parts[offset] == "native"
+ offset += this.isNative ? 1 : 0;
+ this.file = parts[offset];
+ return offset;
}
}
- this.isValid = true;
- }
-
- parsePositionAndFile(parts, start) {
- // find the position of 'at' in the parts array.
- var offset = start;
- for (var i = start+1; i<parts.length; i++) {
- offset++;
- if (parts[i] == 'at') break;
- }
- if (parts[offset] !== 'at') return -1;
- this.position = parts.slice(start, offset).join(' ');
- offset += 1;
- this.isNative = parts[offset] == "native"
- offset += this.isNative ? 1 : 0;
- this.file = parts[offset];
- return offset;
- }
-}
-function loadFile() {
- var files = document.getElementById("uploadInput").files;
+ function loadFile() {
+ var files = document.getElementById("uploadInput").files;
- var file = files[0];
- var reader = new FileReader();
+ var file = files[0];
+ var reader = new FileReader();
+
+ reader.onload = function(evt) {
+ entries = [];
+ var end = this.result.length;
+ var current = 0;
+ var next = 0;
+ var line;
+ var i = 0;
+ var entry;
+ while (current < end) {
+ next = this.result.indexOf("\n", current);
+ if (next === -1) break;
+ i++;
+ line = this.result.substring(current, next);
+ current = next + 1;
+ entry = new Entry(i, line);
+ if (entry.isValid) entries.push(entry);
+ }
- reader.onload = function(evt) {
- entries = [];
- var end = this.result.length;
- var current = 0;
- var next = 0;
- var line;
- var i = 0;
- var entry;
- while (current < end) {
- next = this.result.indexOf("\n", current);
- if (next === -1) break;
- i++;
-
- line = this.result.substring(current, next);
- current = next+1;
- entry = new Entry(i, line);
- if (entry.isValid) entries.push(entry);
+ document.getElementById("count").innerHTML = i;
+ updateTable();
+ }
+ reader.readAsText(file);
+ initGroupKeySelect();
}
-
- document.getElementById("count").innerHTML = i;
- updateTable();
- }
- reader.readAsText(file);
- initGroupKeySelect();
-}
-var properties = ['type', 'category', 'file', 'filePosition', 'state' , 'key', 'isNative']
+ var properties = ['type', 'category', 'file', 'filePosition', 'state',
+ 'key', 'isNative', 'map'
+ ]
-class Group {
- constructor(property, key, entry) {
- this.property = property;
- this.key = key;
- this.count = 1;
- this.entries = [entry];
- this.percentage = undefined;
- this.groups = undefined;
- }
-
- add(entry) {
- this.count ++;
- this.entries.push(entry)
- }
-
- createSubGroups() {
- this.groups = {};
- for (var i=0; i<properties.length; i++) {
- var subProperty = properties[i];
- if (this.property == subProperty) continue;
- this.groups[subProperty] = groupBy(this.entries, subProperty);
- }
- }
-}
+ class Group {
+ constructor(property, key, entry) {
+ this.property = property;
+ this.key = key;
+ this.count = 1;
+ this.entries = [entry];
+ this.percentage = undefined;
+ this.groups = undefined;
+ }
-function groupBy(entries, property) {
- var accumulator = {};
- accumulator.__proto__ = null;
- var length = entries.length;
- for (var i = 0; i < length; i++) {
- var entry = entries[i];
- var key = entry[property];
- if (accumulator[key] == undefined) {
- accumulator[key] = new Group(property, key, entry)
- } else {
- var group = accumulator[key];
- if (group.entries == undefined) console.log([group, entry]);
- group.add(entry)
+ add(entry) {
+ this.count++;
+ this.entries.push(entry)
+ }
+
+ createSubGroups() {
+ this.groups = {};
+ for (var i = 0; i < properties.length; i++) {
+ var subProperty = properties[i];
+ if (this.property == subProperty) continue;
+ this.groups[subProperty] = groupBy(this.entries, subProperty);
+ }
+ }
}
- }
- var result = []
- for (var key in accumulator) {
- var group = accumulator[key];
- group.percentage = Math.round(group.count / length * 100 * 100) / 100;
- result.push(group);
- }
- result.sort((a,b) => { return b.count - a.count });
- return result;
-}
+ function groupBy(entries, property) {
+ var accumulator = {};
+ accumulator.__proto__ = null;
+ var length = entries.length;
+ for (var i = 0; i < length; i++) {
+ var entry = entries[i];
+ var key = entry[property];
+ if (accumulator[key] == undefined) {
+ accumulator[key] = new Group(property, key, entry)
+ } else {
+ var group = accumulator[key];
+ if (group.entries == undefined) console.log([group, entry]);
+ group.add(entry)
+ }
+ }
+ var result = []
+ for (var key in accumulator) {
+ var group = accumulator[key];
+ group.percentage = Math.round(group.count / length * 100 * 100) / 100;
+ result.push(group);
+ }
+ result.sort((a, b) => {
+ return b.count - a.count
+ });
+ return result;
+ }
-function updateTable() {
- var select = document.getElementById("group-key");
- var key = select.options[select.selectedIndex].text;
- console.log(key);
- var tableBody = document.getElementById("table-body");
- removeAllChildren(tableBody);
- var groups = groupBy(entries, key, true);
- display(groups, tableBody);
-}
+ function escapeHtml(unsafe) {
+ if (!unsafe) return "";
+ return unsafe.toString()
+ .replace(/&/g, "&amp;")
+ .replace(/</g, "&lt;")
+ .replace(/>/g, "&gt;")
+ .replace(/"/g, "&quot;")
+ .replace(/'/g, "&#039;");
+ }
-function selecedOption(node) {
- return node.options[node.selectedIndex]
-}
+ function updateTable() {
+ var select = document.getElementById("group-key");
+ var key = select.options[select.selectedIndex].text;
+ console.log(key);
+ var tableBody = document.getElementById("table-body");
+ removeAllChildren(tableBody);
+ var groups = groupBy(entries, key, true);
+ display(groups, tableBody);
+ }
-function removeAllChildren(node) {
- while (node.firstChild) {
- node.removeChild(node.firstChild);
- }
-}
+ function selecedOption(node) {
+ return node.options[node.selectedIndex]
+ }
-function display(entries, parent) {
- var fragment = document.createDocumentFragment();
+ function removeAllChildren(node) {
+ while (node.firstChild) {
+ node.removeChild(node.firstChild);
+ }
+ }
- function td(tr, content, className) {
- var td = document.createElement("td");
- td.innerHTML = content;
- td.className = className
- tr.appendChild(td);
- return td
- }
- var max = Math.min(1000, entries.length)
- for (var i = 0; i<max; i++) {
- var entry = entries[i];
- var tr = document.createElement("tr");
- tr.entry = entry;
- td(tr, '<span onclick="toggleDetails(this)">details</a>', 'details');
- td(tr, entry.percentage +"%", 'percentage');
- td(tr, entry.count, 'count');
- td(tr, entry.key, 'key');
- fragment.appendChild(tr);
- }
- var omitted = entries.length - max;
- if (omitted > 0) {
- var tr = document.createElement("tr");
- var td = td(tr, 'Omitted ' + omitted + " entries.");
- td.colSpan = 4;
- fragment.appendChild(tr);
- }
- parent.appendChild(fragment);
-}
+ function display(entries, parent) {
+ var fragment = document.createDocumentFragment();
-function displayDrilldown(entry, previousSibling) {
- var tr = document.createElement('tr');
- tr.className = "entry-details";
- tr.style.display = "none";
- // indent by one td.
- tr.appendChild(document.createElement("td"));
- var td = document.createElement("td");
- td.colSpan = 3;
- for (var key in entry.groups) {
- td.appendChild(displayDrilldownGroup(entry, key));
- }
- tr.appendChild(td);
- // Append the new TR after previousSibling.
- previousSibling.parentNode.insertBefore(tr, previousSibling.nextSibling)
-}
+ function td(tr, content, className) {
+ var td = document.createElement("td");
+ td.innerHTML = content;
+ td.className = className
+ tr.appendChild(td);
+ return td
+ }
+ var max = Math.min(1000, entries.length)
+ for (var i = 0; i < max; i++) {
+ var entry = entries[i];
+ var tr = document.createElement("tr");
+ tr.entry = entry;
+ td(tr, '<span onclick="toggleDetails(this)">details</a>', 'details');
+ td(tr, entry.percentage + "%", 'percentage');
+ td(tr, entry.count, 'count');
+ td(tr, escapeHtml(entry.key), 'key');
+ fragment.appendChild(tr);
+ }
+ var omitted = entries.length - max;
+ if (omitted > 0) {
+ var tr = document.createElement("tr");
+ var td = td(tr, 'Omitted ' + omitted + " entries.");
+ td.colSpan = 4;
+ fragment.appendChild(tr);
+ }
+ parent.appendChild(fragment);
+ }
-function displayDrilldownGroup(entry, key) {
- var max = 20;
- var group = entry.groups[key];
- var div = document.createElement("div")
- div.className = 'drilldown-group-title'
- div.innerHTML = key + ' [top ' + max + ']';
- var table = document.createElement("table");
- display(group.slice(0, max), table, false)
- div.appendChild(table);
- return div;
-}
+ function displayDrilldown(entry, previousSibling) {
+ var tr = document.createElement('tr');
+ tr.className = "entry-details";
+ tr.style.display = "none";
+ // indent by one td.
+ tr.appendChild(document.createElement("td"));
+ var td = document.createElement("td");
+ td.colSpan = 3;
+ for (var key in entry.groups) {
+ td.appendChild(displayDrilldownGroup(entry, key));
+ }
+ tr.appendChild(td);
+ // Append the new TR after previousSibling.
+ previousSibling.parentNode.insertBefore(tr, previousSibling.nextSibling)
+ }
-function toggleDetails(node) {
- var tr = node.parentNode.parentNode;
- var entry = tr.entry;
+ function displayDrilldownGroup(entry, key) {
+ var max = 20;
+ var group = entry.groups[key];
+ var div = document.createElement("div")
+ div.className = 'drilldown-group-title'
+ div.innerHTML = key + ' [top ' + max + ' out of ' + group.length + ']';
+ var table = document.createElement("table");
+ display(group.slice(0, max), table, false)
+ div.appendChild(table);
+ return div;
+ }
- // Create subgroup in-place if the don't exist yet.
- if (entry.groups === undefined) {
- entry.createSubGroups();
- displayDrilldown(entry, tr);
- }
- var details = tr.nextSibling;
- var display = details.style.display;
- if (display != "none") {
- display = "none";
- }else {
- display = "table-row"
- };
- details.style.display = display;
-}
+ function toggleDetails(node) {
+ var tr = node.parentNode.parentNode;
+ var entry = tr.entry;
-function initGroupKeySelect() {
- var select = document.getElementById("group-key");
- for (var i in properties) {
- var option = document.createElement("option");
- option.text = properties[i];
- select.add(option);
- }
-}
+ // Create subgroup in-place if the don't exist yet.
+ if (entry.groups === undefined) {
+ entry.createSubGroups();
+ displayDrilldown(entry, tr);
+ }
+ var details = tr.nextSibling;
+ var display = details.style.display;
+ if (display != "none") {
+ display = "none";
+ } else {
+ display = "table-row"
+ };
+ details.style.display = display;
+ }
+ function initGroupKeySelect() {
+ var select = document.getElementById("group-key");
+ for (var i in properties) {
+ var option = document.createElement("option");
+ option.text = properties[i];
+ select.add(option);
+ }
+ }
</script>
- </head>
- <body>
- <h1>
+</head>
+
+<body>
+ <h1>
<span style="color: #00FF00">I</span>
<span style="color: #FF00FF">C</span>
<span style="color: #00FFFF">E</span>
- </h1>
- Your IC-Explorer.
- <h2>Usage</h2>
- Run your script with <code>--trace_ic</code> and upload on this page:<br/>
- <code>/path/to/d8 --trace_ic your_script.js > trace.txt</code>
- <h2>Data</h2>
- <form name="fileForm">
- <p>
- <input id="uploadInput" type="file" name="files" onchange="loadFile();" >
- trace entries: <span id="count">0</span>
- </p>
- </form>
- <h2>Result</h2>
+ </h1> Your IC-Explorer.
+ <h2>Usage</h2> Run your script with <code>--trace_ic</code> and upload on this page:<br/>
+ <code>/path/to/d8 --trace_ic your_script.js > trace.txt</code>
+ <h2>Data</h2>
+ <form name="fileForm">
<p>
+ <input id="uploadInput" type="file" name="files" onchange="loadFile();"> trace
+ entries: <span id="count">0</span>
+ </p>
+ </form>
+ <h2>Result</h2>
+ <p>
Group-Key:
<select id="group-key" onchange="updateTable()"></select>
- </p>
- <p>
- <table id="table" width="100%">
- <tbody id="table-body">
- </tbody>
- </table>
- </p>
- </body>
+ </p>
+ <p>
+ <table id="table" width="100%">
+ <tbody id="table-body">
+ </tbody>
+ </table>
+ </p>
+</body>
+
</html>
diff --git a/deps/v8/tools/ignition/bytecode_dispatches_report.py b/deps/v8/tools/ignition/bytecode_dispatches_report.py
new file mode 100755
index 0000000000..97f8e8394d
--- /dev/null
+++ b/deps/v8/tools/ignition/bytecode_dispatches_report.py
@@ -0,0 +1,281 @@
+#! /usr/bin/python
+#
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+
+import argparse
+import heapq
+import json
+from matplotlib import colors
+from matplotlib import pyplot
+import numpy
+import struct
+import sys
+
+
+__DESCRIPTION = """
+Process v8.ignition_dispatches_counters.json and list top counters,
+or plot a dispatch heatmap.
+
+Please note that those handlers that may not or will never dispatch
+(e.g. Return or Throw) do not show up in the results.
+"""
+
+
+__HELP_EPILOGUE = """
+examples:
+ # Print the hottest bytecodes in descending order, reading from
+ # default filename v8.ignition_dispatches_counters.json (default mode)
+ $ tools/ignition/bytecode_dispatches_report.py
+
+ # Print the hottest 15 bytecode dispatch pairs reading from data.json
+ $ tools/ignition/bytecode_dispatches_report.py -t -n 15 data.json
+
+ # Save heatmap to default filename v8.ignition_dispatches_counters.svg
+ $ tools/ignition/bytecode_dispatches_report.py -p
+
+ # Save heatmap to filename data.svg
+ $ tools/ignition/bytecode_dispatches_report.py -p -o data.svg
+
+ # Open the heatmap in an interactive viewer
+ $ tools/ignition/bytecode_dispatches_report.py -p -i
+
+ # Display the top 5 sources and destinations of dispatches to/from LdaZero
+ $ tools/ignition/bytecode_dispatches_report.py -f LdaZero -n 5
+"""
+
+__COUNTER_BITS = struct.calcsize("P") * 8 # Size in bits of a pointer
+__COUNTER_MAX = 2**__COUNTER_BITS - 1
+
+
+def warn_if_counter_may_have_saturated(dispatches_table):
+ for source, counters_from_source in iteritems(dispatches_table):
+ for destination, counter in iteritems(counters_from_source):
+ if counter == __COUNTER_MAX:
+ print "WARNING: {} -> {} may have saturated.".format(source,
+ destination)
+
+
+def find_top_bytecode_dispatch_pairs(dispatches_table, top_count):
+ def flattened_counters_generator():
+ for source, counters_from_source in iteritems(dispatches_table):
+ for destination, counter in iteritems(counters_from_source):
+ yield source, destination, counter
+
+ return heapq.nlargest(top_count, flattened_counters_generator(),
+ key=lambda x: x[2])
+
+
+def print_top_bytecode_dispatch_pairs(dispatches_table, top_count):
+ top_bytecode_dispatch_pairs = (
+ find_top_bytecode_dispatch_pairs(dispatches_table, top_count))
+ print "Top {} bytecode dispatch pairs:".format(top_count)
+ for source, destination, counter in top_bytecode_dispatch_pairs:
+ print "{:>12d}\t{} -> {}".format(counter, source, destination)
+
+
+def find_top_bytecodes(dispatches_table):
+ top_bytecodes = []
+ for bytecode, counters_from_bytecode in iteritems(dispatches_table):
+ top_bytecodes.append((bytecode, sum(itervalues(counters_from_bytecode))))
+
+ top_bytecodes.sort(key=lambda x: x[1], reverse=True)
+ return top_bytecodes
+
+
+def print_top_bytecodes(dispatches_table):
+ top_bytecodes = find_top_bytecodes(dispatches_table)
+ print "Top bytecodes:"
+ for bytecode, counter in top_bytecodes:
+ print "{:>12d}\t{}".format(counter, bytecode)
+
+
+def find_top_dispatch_sources_and_destinations(
+ dispatches_table, bytecode, top_count, sort_source_relative):
+ sources = []
+ for source, destinations in iteritems(dispatches_table):
+ total = float(sum(itervalues(destinations)))
+ if bytecode in destinations:
+ count = destinations[bytecode]
+ sources.append((source, count, count / total))
+
+ destinations = []
+ bytecode_destinations = dispatches_table[bytecode]
+ bytecode_total = float(sum(itervalues(bytecode_destinations)))
+ for destination, count in iteritems(bytecode_destinations):
+ destinations.append((destination, count, count / bytecode_total))
+
+ return (heapq.nlargest(top_count, sources,
+ key=lambda x: x[2 if sort_source_relative else 1]),
+ heapq.nlargest(top_count, destinations, key=lambda x: x[1]))
+
+
+def print_top_dispatch_sources_and_destinations(dispatches_table, bytecode,
+ top_count, sort_relative):
+ top_sources, top_destinations = find_top_dispatch_sources_and_destinations(
+ dispatches_table, bytecode, top_count, sort_relative)
+ print "Top sources of dispatches to {}:".format(bytecode)
+ for source_name, counter, ratio in top_sources:
+ print "{:>12d}\t{:>5.1f}%\t{}".format(counter, ratio * 100, source_name)
+
+ print "\nTop destinations of dispatches from {}:".format(bytecode)
+ for destination_name, counter, ratio in top_destinations:
+ print "{:>12d}\t{:>5.1f}%\t{}".format(counter, ratio * 100, destination_name)
+
+
+def build_counters_matrix(dispatches_table):
+ labels = sorted(dispatches_table.keys())
+
+ counters_matrix = numpy.empty([len(labels), len(labels)], dtype=int)
+ for from_index, from_name in enumerate(labels):
+ current_row = dispatches_table[from_name];
+ for to_index, to_name in enumerate(labels):
+ counters_matrix[from_index, to_index] = current_row.get(to_name, 0)
+
+ # Reverse y axis for a nicer appearance
+ xlabels = labels
+ ylabels = list(reversed(xlabels))
+ counters_matrix = numpy.flipud(counters_matrix)
+
+ return counters_matrix, xlabels, ylabels
+
+
+def plot_dispatches_table(dispatches_table, figure, axis):
+ counters_matrix, xlabels, ylabels = build_counters_matrix(dispatches_table)
+
+ image = axis.pcolor(
+ counters_matrix,
+ cmap="jet",
+ norm=colors.LogNorm(),
+ edgecolor="grey",
+ linestyle="dotted",
+ linewidth=0.5
+ )
+
+ axis.xaxis.set(
+ ticks=numpy.arange(0.5, len(xlabels)),
+ label="From bytecode handler"
+ )
+ axis.xaxis.tick_top()
+ axis.set_xlim(0, len(xlabels))
+ axis.set_xticklabels(xlabels, rotation="vertical")
+
+ axis.yaxis.set(
+ ticks=numpy.arange(0.5, len(ylabels)),
+ label="To bytecode handler",
+ ticklabels=ylabels
+ )
+ axis.set_ylim(0, len(ylabels))
+
+ figure.colorbar(
+ image,
+ ax=axis,
+ fraction=0.01,
+ pad=0.01
+ )
+
+
+def parse_command_line():
+ command_line_parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=__DESCRIPTION,
+ epilog=__HELP_EPILOGUE
+ )
+ command_line_parser.add_argument(
+ "--plot-size", "-s",
+ metavar="N",
+ default=30,
+ help="shorter side in inches of the output plot (default 30)"
+ )
+ command_line_parser.add_argument(
+ "--plot", "-p",
+ action="store_true",
+ help="plot dispatch pairs heatmap"
+ )
+ command_line_parser.add_argument(
+ "--interactive", "-i",
+ action="store_true",
+ help="open the heatmap in an interactive viewer, instead of writing to file"
+ )
+ command_line_parser.add_argument(
+ "--top-bytecode-dispatch-pairs", "-t",
+ action="store_true",
+ help="print the top bytecode dispatch pairs"
+ )
+ command_line_parser.add_argument(
+ "--top-entries-count", "-n",
+ metavar="N",
+ type=int,
+ default=10,
+ help="print N top entries when running with -t or -f (default 10)"
+ )
+ command_line_parser.add_argument(
+ "--top-dispatches-for-bytecode", "-f",
+ metavar="<bytecode name>",
+ help="print top dispatch sources and destinations to the specified bytecode"
+ )
+ command_line_parser.add_argument(
+ "--output-filename", "-o",
+ metavar="<output filename>",
+ default="v8.ignition_dispatches_table.svg",
+ help=("file to save the plot file to. File type is deduced from the "
+ "extension. PDF, SVG, PNG supported")
+ )
+ command_line_parser.add_argument(
+ "--sort-sources-relative", "-r",
+ action="store_true",
+ help=("print top sources in order to how often they dispatch to the "
+ "specified bytecode, only applied when using -f")
+ )
+ command_line_parser.add_argument(
+ "input_filename",
+ metavar="<input filename>",
+ default="v8.ignition_dispatches_table.json",
+ nargs='?',
+ help="Ignition counters JSON file"
+ )
+
+ return command_line_parser.parse_args()
+
+
+def itervalues(d):
+ return d.values() if sys.version_info[0] > 2 else d.itervalues()
+
+
+def iteritems(d):
+ return d.items() if sys.version_info[0] > 2 else d.iteritems()
+
+
+def main():
+ program_options = parse_command_line()
+
+ with open(program_options.input_filename) as stream:
+ dispatches_table = json.load(stream)
+
+ warn_if_counter_may_have_saturated(dispatches_table)
+
+ if program_options.plot:
+ figure, axis = pyplot.subplots()
+ plot_dispatches_table(dispatches_table, figure, axis)
+
+ if program_options.interactive:
+ pyplot.show()
+ else:
+ figure.set_size_inches(program_options.plot_size,
+ program_options.plot_size)
+ pyplot.savefig(program_options.output_filename)
+ elif program_options.top_bytecode_dispatch_pairs:
+ print_top_bytecode_dispatch_pairs(
+ dispatches_table, program_options.top_entries_count)
+ elif program_options.top_dispatches_for_bytecode:
+ print_top_dispatch_sources_and_destinations(
+ dispatches_table, program_options.top_dispatches_for_bytecode,
+ program_options.top_entries_count, program_options.sort_sources_relative)
+ else:
+ print_top_bytecodes(dispatches_table)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/deps/v8/tools/ignition/bytecode_dispatches_report_test.py b/deps/v8/tools/ignition/bytecode_dispatches_report_test.py
new file mode 100644
index 0000000000..9be19e7f63
--- /dev/null
+++ b/deps/v8/tools/ignition/bytecode_dispatches_report_test.py
@@ -0,0 +1,62 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import bytecode_dispatches_report as bdr
+import unittest
+
+
+class BytecodeDispatchesReportTest(unittest.TestCase):
+ def test_find_top_counters(self):
+ top_counters = bdr.find_top_bytecode_dispatch_pairs({
+ "a": {"a": 10, "b": 8, "c": 99},
+ "b": {"a": 1, "b": 4, "c": 1},
+ "c": {"a": 42, "b": 3, "c": 7}}, 5)
+ self.assertListEqual(top_counters, [
+ ('a', 'c', 99),
+ ('c', 'a', 42),
+ ('a', 'a', 10),
+ ('a', 'b', 8),
+ ('c', 'c', 7)])
+
+ def test_build_counters_matrix(self):
+ counters_matrix, xlabels, ylabels = bdr.build_counters_matrix({
+ "a": {"a": 10, "b": 8, "c": 7},
+ "b": {"a": 1, "c": 4},
+ "c": {"a": 42, "b": 12, "c": 99}})
+ self.assertTrue((counters_matrix == [[42, 12, 99],
+ [ 1, 0, 4],
+ [10, 8, 7]]).all())
+ self.assertListEqual(xlabels, ['a', 'b', 'c'])
+ self.assertListEqual(ylabels, ['c', 'b', 'a'])
+
+ def test_find_top_bytecodes(self):
+ top_dispatch_sources = bdr.find_top_bytecodes({
+ "a": {"a": 10, "b": 8, "c": 7},
+ "b": {"a": 1, "c": 4},
+ "c": {"a": 42, "b": 12, "c": 99}
+ })
+ self.assertListEqual(top_dispatch_sources, [
+ ('c', 153),
+ ('a', 25),
+ ('b', 5)
+ ])
+
+ def test_find_top_dispatch_sources_and_destinations(self):
+ d = {
+ "a": {"a": 4, "b": 2, "c": 4},
+ "b": {"a": 1, "c": 4},
+ "c": {"a": 40, "b": 10, "c": 50}
+ }
+ top_sources, top_dests = bdr.find_top_dispatch_sources_and_destinations(
+ d, "b", 10, False)
+ self.assertListEqual(top_sources, [
+ ("c", 10, 0.1),
+ ("a", 2, 0.2)
+ ])
+ top_sources, top_dests = bdr.find_top_dispatch_sources_and_destinations(
+ d, "b", 10, True)
+ self.assertListEqual(top_sources, [
+ ("a", 2, 0.2),
+ ("c", 10, 0.1)
+ ])
diff --git a/deps/v8/tools/ignition/linux_perf_bytecode_annotate.py b/deps/v8/tools/ignition/linux_perf_bytecode_annotate.py
new file mode 100755
index 0000000000..6681190d99
--- /dev/null
+++ b/deps/v8/tools/ignition/linux_perf_bytecode_annotate.py
@@ -0,0 +1,174 @@
+#! /usr/bin/python2
+#
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+
+import argparse
+import collections
+import os
+import subprocess
+import sys
+
+
+__DESCRIPTION = """
+Processes a perf.data sample file and annotates the hottest instructions in a
+given bytecode handler.
+"""
+
+
+__HELP_EPILOGUE = """
+Note:
+ This tool uses the disassembly of interpreter's bytecode handler codegen
+ from out/<arch>.debug/d8. you should ensure that this binary is in-sync with
+ the version used to generate the perf profile.
+
+ Also, the tool depends on the symbol offsets from perf samples being accurate.
+ As such, you should use the ":pp" suffix for events.
+
+Examples:
+ EVENT_TYPE=cycles:pp tools/run-perf.sh out/x64.release/d8
+ tools/ignition/linux_perf_bytecode_annotate.py Add
+"""
+
+
+def bytecode_offset_generator(perf_stream, bytecode_name):
+ skip_until_end_of_chain = False
+ bytecode_symbol = "BytecodeHandler:" + bytecode_name;
+
+ for line in perf_stream:
+ # Lines starting with a "#" are comments, skip them.
+ if line[0] == "#":
+ continue
+ line = line.strip()
+
+ # Empty line signals the end of the callchain.
+ if not line:
+ skip_until_end_of_chain = False
+ continue
+
+ if skip_until_end_of_chain:
+ continue
+
+ symbol_and_offset = line.split(" ", 1)[1]
+
+ if symbol_and_offset.startswith("BytecodeHandler:"):
+ skip_until_end_of_chain = True
+
+ if symbol_and_offset.startswith(bytecode_symbol):
+ yield int(symbol_and_offset.split("+", 1)[1], 16)
+
+
+def bytecode_offset_counts(bytecode_offsets):
+ offset_counts = collections.defaultdict(int)
+ for offset in bytecode_offsets:
+ offset_counts[offset] += 1
+ return offset_counts
+
+
+def bytecode_disassembly_generator(ignition_codegen, bytecode_name):
+ name_string = "name = " + bytecode_name
+ for line in ignition_codegen:
+ if line.startswith(name_string):
+ break
+
+ # Found the bytecode disassembly.
+ for line in ignition_codegen:
+ line = line.strip()
+ # Blank line marks the end of the bytecode's disassembly.
+ if not line:
+ return
+
+ # Only yield disassembly output.
+ if not line.startswith("0x"):
+ continue
+
+ yield line
+
+
+def print_disassembly_annotation(offset_counts, bytecode_disassembly):
+ total = sum(offset_counts.values())
+ offsets = sorted(offset_counts, reverse=True)
+ def next_offset():
+ return offsets.pop() if offsets else -1
+
+ current_offset = next_offset()
+ print current_offset;
+
+ for line in bytecode_disassembly:
+ disassembly_offset = int(line.split()[1])
+ if disassembly_offset == current_offset:
+ count = offset_counts[current_offset]
+ percentage = 100.0 * count / total
+ print "{:>8d} ({:>5.1f}%) ".format(count, percentage),
+ current_offset = next_offset()
+ else:
+ print " ",
+ print line
+
+ if offsets:
+ print ("WARNING: Offsets not empty. Output is most likely invalid due to "
+ "a mismatch between perf output and debug d8 binary.")
+
+
+def parse_command_line():
+ command_line_parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=__DESCRIPTION,
+ epilog=__HELP_EPILOGUE)
+
+ command_line_parser.add_argument(
+ "--arch", "-a",
+ help="The architecture (default: x64)",
+ default="x64",
+ )
+ command_line_parser.add_argument(
+ "--input", "-i",
+ help="perf sample file to process (default: perf.data)",
+ default="perf.data",
+ metavar="<perf filename>",
+ dest="perf_filename"
+ )
+ command_line_parser.add_argument(
+ "--output", "-o",
+ help="output file name (stdout if omitted)",
+ type=argparse.FileType("wt"),
+ default=sys.stdout,
+ metavar="<output filename>",
+ dest="output_stream"
+ )
+ command_line_parser.add_argument(
+ "bytecode_name",
+ metavar="<bytecode name>",
+ nargs="?",
+ help="The bytecode handler to annotate"
+ )
+
+ return command_line_parser.parse_args()
+
+
+def main():
+ program_options = parse_command_line()
+ perf = subprocess.Popen(["perf", "script", "-f", "ip,sym,symoff",
+ "-i", program_options.perf_filename],
+ stdout=subprocess.PIPE)
+
+ v8_root_path = os.path.dirname(__file__) + "/../../"
+ d8_path = "{}/out/{}.debug/d8".format(v8_root_path, program_options.arch)
+ d8_codegen = subprocess.Popen([d8_path, "--ignition",
+ "--trace-ignition-codegen", "-e", "1"],
+ stdout=subprocess.PIPE)
+
+ bytecode_offsets = bytecode_offset_generator(
+ perf.stdout, program_options.bytecode_name)
+ offset_counts = bytecode_offset_counts(bytecode_offsets)
+
+ bytecode_disassembly = bytecode_disassembly_generator(
+ d8_codegen.stdout, program_options.bytecode_name)
+
+ print_disassembly_annotation(offset_counts, bytecode_disassembly)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/deps/v8/tools/ignition/linux_perf_bytecode_annotate_test.py b/deps/v8/tools/ignition/linux_perf_bytecode_annotate_test.py
new file mode 100644
index 0000000000..15abbeda08
--- /dev/null
+++ b/deps/v8/tools/ignition/linux_perf_bytecode_annotate_test.py
@@ -0,0 +1,85 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import StringIO
+import unittest
+import linux_perf_bytecode_annotate as bytecode_annotate
+
+
+PERF_SCRIPT_OUTPUT = """
+# This line is a comment
+# This should be ignored too
+#
+# cdefab01 aRandomSymbol::Name(to, be, ignored)
+
+ 00000000 firstSymbol
+ 00000123 secondSymbol
+
+ 01234567 foo
+ abcdef76 BytecodeHandler:bar+0x12
+ 76543210 baz
+ abcdef76 BytecodeHandler:bar+0x16
+ 76543210 baz
+
+ 01234567 foo
+ abcdef76 BytecodeHandler:foo+0x1
+ 76543210 baz
+ abcdef76 BytecodeHandler:bar+0x2
+ 76543210 bar
+
+ abcdef76 BytecodeHandler:bar+0x19
+
+ abcdef76 BytecodeHandler:bar+0x12
+
+ abcdef76 BytecodeHandler:bar+0x12
+"""
+
+
+D8_CODEGEN_OUTPUT = """
+kind = BYTECODE_HANDLER
+name = foo
+compiler = turbofan
+Instructions (size = 3)
+0x3101394a3c0 0 55 push rbp
+0x3101394a3c1 1 ffe3 jmp rbx
+
+kind = BYTECODE_HANDLER
+name = bar
+compiler = turbofan
+Instructions (size = 5)
+0x3101394b3c0 0 55 push rbp
+0x3101394b3c1 1 4883c428 REX.W addq rsp,0x28
+# Unexpected comment
+0x3101394b3c5 5 ffe3 jmp rbx
+
+kind = BYTECODE_HANDLER
+name = baz
+compiler = turbofan
+Instructions (size = 5)
+0x3101394c3c0 0 55 push rbp
+0x3101394c3c1 1 4883c428 REX.W addq rsp,0x28
+0x3101394c3c5 5 ffe3 jmp rbx
+"""
+
+
+class LinuxPerfBytecodeAnnotateTest(unittest.TestCase):
+
+ def test_bytecode_offset_generator(self):
+ perf_stream = StringIO.StringIO(PERF_SCRIPT_OUTPUT)
+ offsets = list(
+ bytecode_annotate.bytecode_offset_generator(perf_stream, "bar"))
+ self.assertListEqual(offsets, [18, 25, 18, 18])
+
+ def test_bytecode_disassembly_generator(self):
+ codegen_stream = StringIO.StringIO(D8_CODEGEN_OUTPUT)
+ disassembly = list(
+ bytecode_annotate.bytecode_disassembly_generator(codegen_stream, "bar"))
+ self.assertListEqual(disassembly, [
+ "0x3101394b3c0 0 55 push rbp",
+ "0x3101394b3c1 1 4883c428 REX.W addq rsp,0x28",
+ "0x3101394b3c5 5 ffe3 jmp rbx"])
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/deps/v8/tools/ignition/linux_perf_report.py b/deps/v8/tools/ignition/linux_perf_report.py
new file mode 100755
index 0000000000..eaf85b3f91
--- /dev/null
+++ b/deps/v8/tools/ignition/linux_perf_report.py
@@ -0,0 +1,223 @@
+#! /usr/bin/python2
+#
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+
+import argparse
+import collections
+import re
+import subprocess
+import sys
+
+
+__DESCRIPTION = """
+Processes a perf.data sample file and reports the hottest Ignition bytecodes,
+or write an input file for flamegraph.pl.
+"""
+
+
+__HELP_EPILOGUE = """
+examples:
+ # Get a flamegraph for Ignition bytecode handlers on Octane benchmark,
+ # without considering the time spent compiling JS code, entry trampoline
+ # samples and other non-Ignition samples.
+ #
+ $ tools/run-perf.sh out/x64.release/d8 \\
+ --ignition --noturbo --nocrankshaft run.js
+ $ tools/ignition/linux_perf_report.py --flamegraph -o out.collapsed
+ $ flamegraph.pl --colors js out.collapsed > out.svg
+
+ # Same as above, but show all samples, including time spent compiling JS code,
+ # entry trampoline samples and other samples.
+ $ # ...
+ $ tools/ignition/linux_perf_report.py \\
+ --flamegraph --show-all -o out.collapsed
+ $ # ...
+
+ # Same as above, but show full function signatures in the flamegraph.
+ $ # ...
+ $ tools/ignition/linux_perf_report.py \\
+ --flamegraph --show-full-signatures -o out.collapsed
+ $ # ...
+
+ # See the hottest bytecodes on Octane benchmark, by number of samples.
+ #
+ $ tools/run-perf.sh out/x64.release/d8 \\
+ --ignition --noturbo --nocrankshaft octane/run.js
+ $ tools/ignition/linux_perf_report.py
+"""
+
+
+COMPILER_SYMBOLS_RE = re.compile(
+ r"v8::internal::(?:\(anonymous namespace\)::)?Compile|v8::internal::Parser")
+
+
+def strip_function_parameters(symbol):
+ if symbol[-1] != ')': return symbol
+ pos = 1
+ parenthesis_count = 0
+ for c in reversed(symbol):
+ if c == ')':
+ parenthesis_count += 1
+ elif c == '(':
+ parenthesis_count -= 1
+ if parenthesis_count == 0:
+ break
+ else:
+ pos += 1
+ return symbol[:-pos]
+
+
+def collapsed_callchains_generator(perf_stream, show_all=False,
+ show_full_signatures=False):
+ current_chain = []
+ skip_until_end_of_chain = False
+ compiler_symbol_in_chain = False
+
+ for line in perf_stream:
+ # Lines starting with a "#" are comments, skip them.
+ if line[0] == "#":
+ continue
+
+ line = line.strip()
+
+ # Empty line signals the end of the callchain.
+ if not line:
+ if not skip_until_end_of_chain and current_chain and show_all:
+ current_chain.append("[other]")
+ yield current_chain
+ # Reset parser status.
+ current_chain = []
+ skip_until_end_of_chain = False
+ compiler_symbol_in_chain = False
+ continue
+
+ if skip_until_end_of_chain:
+ continue
+
+ # Trim the leading address and the trailing +offset, if present.
+ symbol = line.split(" ", 1)[1].split("+", 1)[0]
+ if not show_full_signatures:
+ symbol = strip_function_parameters(symbol)
+ current_chain.append(symbol)
+
+ if symbol.startswith("BytecodeHandler:"):
+ yield current_chain
+ skip_until_end_of_chain = True
+ elif symbol == "Stub:CEntryStub" and compiler_symbol_in_chain:
+ if show_all:
+ current_chain[-1] = "[compiler]"
+ yield current_chain
+ skip_until_end_of_chain = True
+ elif COMPILER_SYMBOLS_RE.match(symbol):
+ compiler_symbol_in_chain = True
+ elif symbol == "Builtin:InterpreterEntryTrampoline":
+ if len(current_chain) == 1:
+ yield ["[entry trampoline]"]
+ else:
+ # If we see an InterpreterEntryTrampoline which is not at the top of the
+ # chain and doesn't have a BytecodeHandler above it, then we have
+ # skipped the top BytecodeHandler due to the top-level stub not building
+ # a frame. File the chain in the [misattributed] bucket.
+ current_chain[-1] = "[misattributed]"
+ yield current_chain
+ skip_until_end_of_chain = True
+
+
+def calculate_samples_count_per_callchain(callchains):
+ chain_counters = collections.defaultdict(int)
+ for callchain in callchains:
+ key = ";".join(reversed(callchain))
+ chain_counters[key] += 1
+ return chain_counters.items()
+
+
+def calculate_samples_count_per_handler(callchains):
+ def strip_handler_prefix_if_any(handler):
+ return handler if handler[0] == "[" else handler.split(":", 1)[1]
+
+ handler_counters = collections.defaultdict(int)
+ for callchain in callchains:
+ handler = strip_handler_prefix_if_any(callchain[-1])
+ handler_counters[handler] += 1
+ return handler_counters.items()
+
+
+def write_flamegraph_input_file(output_stream, callchains):
+ for callchain, count in calculate_samples_count_per_callchain(callchains):
+ output_stream.write("{}; {}\n".format(callchain, count))
+
+
+def write_handlers_report(output_stream, callchains):
+ handler_counters = calculate_samples_count_per_handler(callchains)
+ samples_num = sum(counter for _, counter in handler_counters)
+ # Sort by decreasing number of samples
+ handler_counters.sort(key=lambda entry: entry[1], reverse=True)
+ for bytecode_name, count in handler_counters:
+ output_stream.write(
+ "{}\t{}\t{:.3f}%\n".format(bytecode_name, count,
+ 100. * count / samples_num))
+
+
+def parse_command_line():
+ command_line_parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=__DESCRIPTION,
+ epilog=__HELP_EPILOGUE)
+
+ command_line_parser.add_argument(
+ "perf_filename",
+ help="perf sample file to process (default: perf.data)",
+ nargs="?",
+ default="perf.data",
+ metavar="<perf filename>"
+ )
+ command_line_parser.add_argument(
+ "--flamegraph", "-f",
+ help="output an input file for flamegraph.pl, not a report",
+ action="store_true",
+ dest="output_flamegraph"
+ )
+ command_line_parser.add_argument(
+ "--show-all", "-a",
+ help="show samples outside Ignition bytecode handlers",
+ action="store_true"
+ )
+ command_line_parser.add_argument(
+ "--show-full-signatures", "-s",
+ help="show full signatures instead of function names",
+ action="store_true"
+ )
+ command_line_parser.add_argument(
+ "--output", "-o",
+ help="output file name (stdout if omitted)",
+ type=argparse.FileType('wt'),
+ default=sys.stdout,
+ metavar="<output filename>",
+ dest="output_stream"
+ )
+
+ return command_line_parser.parse_args()
+
+
+def main():
+ program_options = parse_command_line()
+
+ perf = subprocess.Popen(["perf", "script", "--fields", "ip,sym",
+ "-i", program_options.perf_filename],
+ stdout=subprocess.PIPE)
+
+ callchains = collapsed_callchains_generator(
+ perf.stdout, program_options.show_all,
+ program_options.show_full_signatures)
+
+ if program_options.output_flamegraph:
+ write_flamegraph_input_file(program_options.output_stream, callchains)
+ else:
+ write_handlers_report(program_options.output_stream, callchains)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/deps/v8/tools/ignition/linux_perf_report_test.py b/deps/v8/tools/ignition/linux_perf_report_test.py
new file mode 100644
index 0000000000..d9cef75dff
--- /dev/null
+++ b/deps/v8/tools/ignition/linux_perf_report_test.py
@@ -0,0 +1,147 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import linux_perf_report as ipr
+import StringIO
+import unittest
+
+
+PERF_SCRIPT_OUTPUT = """
+# This line is a comment
+# This should be ignored too
+#
+# cdefab01 aRandomSymbol::Name(to, be, ignored)
+
+ 00000000 firstSymbol
+ 00000123 secondSymbol
+
+ 01234567 foo
+ abcdef76 BytecodeHandler:bar
+ 76543210 baz
+
+# Indentation shouldn't matter (neither should this line)
+
+ 01234567 foo
+ abcdef76 BytecodeHandler:bar
+ 76543210 baz
+
+ 01234567 beep
+ abcdef76 BytecodeHandler:bar
+ 76543210 baz
+
+ 01234567 hello
+ abcdef76 v8::internal::Compiler
+ 00000000 Stub:CEntryStub
+ 76543210 world
+ 11111111 BytecodeHandler:nope
+
+ 00000000 Lost
+ 11111111 Builtin:InterpreterEntryTrampoline
+ 22222222 bar
+
+ 11111111 Builtin:InterpreterEntryTrampoline
+ 22222222 bar
+"""
+
+
+class LinuxPerfReportTest(unittest.TestCase):
+ def test_collapsed_callchains_generator(self):
+ perf_stream = StringIO.StringIO(PERF_SCRIPT_OUTPUT)
+ callchains = list(ipr.collapsed_callchains_generator(perf_stream))
+ self.assertListEqual(callchains, [
+ ["foo", "BytecodeHandler:bar"],
+ ["foo", "BytecodeHandler:bar"],
+ ["beep", "BytecodeHandler:bar"],
+ ["[entry trampoline]"],
+ ])
+
+ def test_collapsed_callchains_generator_show_other(self):
+ perf_stream = StringIO.StringIO(PERF_SCRIPT_OUTPUT)
+ callchains = list(ipr.collapsed_callchains_generator(perf_stream,
+ show_all=True))
+ self.assertListEqual(callchains, [
+ ['firstSymbol', 'secondSymbol', '[other]'],
+ ["foo", "BytecodeHandler:bar"],
+ ["foo", "BytecodeHandler:bar"],
+ ["beep", "BytecodeHandler:bar"],
+ ["hello", "v8::internal::Compiler", "[compiler]"],
+ ["Lost", "[misattributed]"],
+ ["[entry trampoline]"],
+ ])
+
+ def test_calculate_samples_count_per_callchain(self):
+ counters = ipr.calculate_samples_count_per_callchain([
+ ["foo", "BytecodeHandler:bar"],
+ ["foo", "BytecodeHandler:bar"],
+ ["beep", "BytecodeHandler:bar"],
+ ["hello", "v8::internal::Compiler", "[compiler]"],
+ ])
+ self.assertItemsEqual(counters, [
+ ('BytecodeHandler:bar;foo', 2),
+ ('BytecodeHandler:bar;beep', 1),
+ ('[compiler];v8::internal::Compiler;hello', 1),
+ ])
+
+ def test_calculate_samples_count_per_callchain(self):
+ counters = ipr.calculate_samples_count_per_callchain([
+ ["foo", "BytecodeHandler:bar"],
+ ["foo", "BytecodeHandler:bar"],
+ ["beep", "BytecodeHandler:bar"],
+ ])
+ self.assertItemsEqual(counters, [
+ ('BytecodeHandler:bar;foo', 2),
+ ('BytecodeHandler:bar;beep', 1),
+ ])
+
+ def test_calculate_samples_count_per_handler_show_compile(self):
+ counters = ipr.calculate_samples_count_per_handler([
+ ["foo", "BytecodeHandler:bar"],
+ ["foo", "BytecodeHandler:bar"],
+ ["beep", "BytecodeHandler:bar"],
+ ["hello", "v8::internal::Compiler", "[compiler]"],
+ ])
+ self.assertItemsEqual(counters, [
+ ("bar", 3),
+ ("[compiler]", 1)
+ ])
+
+ def test_calculate_samples_count_per_handler_(self):
+ counters = ipr.calculate_samples_count_per_handler([
+ ["foo", "BytecodeHandler:bar"],
+ ["foo", "BytecodeHandler:bar"],
+ ["beep", "BytecodeHandler:bar"],
+ ])
+ self.assertItemsEqual(counters, [("bar", 3)])
+
+ def test_multiple_handlers(self):
+ perf_stream = StringIO.StringIO("""
+ 0000 foo(bar)
+ 1234 BytecodeHandler:first
+ 5678 a::random::call<to>(something, else)
+ 9abc BytecodeHandler:second
+ def0 otherIrrelevant(stuff)
+ 1111 entrypoint
+ """)
+ callchains = list(ipr.collapsed_callchains_generator(perf_stream, False))
+ self.assertListEqual(callchains, [
+ ["foo", "BytecodeHandler:first"],
+ ])
+
+ def test_compiler_symbols_regex(self):
+ compiler_symbols = [
+ "v8::internal::Parser",
+ "v8::internal::(anonymous namespace)::Compile",
+ "v8::internal::Compiler::foo",
+ ]
+ for compiler_symbol in compiler_symbols:
+ self.assertTrue(ipr.COMPILER_SYMBOLS_RE.match(compiler_symbol))
+
+ def test_strip_function_parameters(self):
+ def should_match(signature, name):
+ self.assertEqual(ipr.strip_function_parameters(signature), name)
+
+ should_match("foo(bar)", "foo"),
+ should_match("Foo(foomatic::(anonymous)::bar(baz))", "Foo"),
+ should_match("v8::(anonymous ns)::bar<thing(with, parentheses)>(baz, poe)",
+ "v8::(anonymous ns)::bar<thing(with, parentheses)>")
diff --git a/deps/v8/tools/isolate_driver.py b/deps/v8/tools/isolate_driver.py
index d1b39b0958..a6bcfbf71f 100644
--- a/deps/v8/tools/isolate_driver.py
+++ b/deps/v8/tools/isolate_driver.py
@@ -1,21 +1,276 @@
#!/usr/bin/env python
# Copyright 2015 the V8 project authors. All rights reserved.
+# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Adaptor script called through build/isolate.gypi.
-Slimmed down version of chromium's isolate driver that doesn't process dynamic
-dependencies.
+Creates a wrapping .isolate which 'includes' the original one, that can be
+consumed by tools/swarming_client/isolate.py. Path variables are determined
+based on the current working directory. The relative_cwd in the .isolated file
+is determined based on the .isolate file that declare the 'command' variable to
+be used so the wrapping .isolate doesn't affect this value.
+
+This script loads build.ninja and processes it to determine all the executables
+referenced by the isolated target. It adds them in the wrapping .isolate file.
+
+WARNING: The target to use for build.ninja analysis is the base name of the
+.isolate file plus '_run'. For example, 'foo_test.isolate' would have the target
+'foo_test_run' analysed.
"""
+import errno
+import glob
import json
import logging
import os
+import posixpath
+import StringIO
import subprocess
import sys
+import time
TOOLS_DIR = os.path.dirname(os.path.abspath(__file__))
+SWARMING_CLIENT_DIR = os.path.join(TOOLS_DIR, 'swarming_client')
+SRC_DIR = os.path.dirname(TOOLS_DIR)
+
+sys.path.insert(0, SWARMING_CLIENT_DIR)
+
+import isolate_format
+
+
+def load_ninja_recursively(build_dir, ninja_path, build_steps):
+ """Crudely extracts all the subninja and build referenced in ninja_path.
+
+ In particular, it ignores rule and variable declarations. The goal is to be
+ performant (well, as much as python can be performant) which is currently in
+ the <200ms range for a complete chromium tree. As such the code is laid out
+ for performance instead of readability.
+ """
+ logging.debug('Loading %s', ninja_path)
+ try:
+ with open(os.path.join(build_dir, ninja_path), 'rb') as f:
+ line = None
+ merge_line = ''
+ subninja = []
+ for line in f:
+ line = line.rstrip()
+ if not line:
+ continue
+
+ if line[-1] == '$':
+ # The next line needs to be merged in.
+ merge_line += line[:-1]
+ continue
+
+ if merge_line:
+ line = merge_line + line
+ merge_line = ''
+
+ statement = line[:line.find(' ')]
+ if statement == 'build':
+ # Save the dependency list as a raw string. Only the lines needed will
+ # be processed with raw_build_to_deps(). This saves a good 70ms of
+ # processing time.
+ build_target, dependencies = line[6:].split(': ', 1)
+ # Interestingly, trying to be smart and only saving the build steps
+ # with the intended extensions ('', '.stamp', '.so') slows down
+ # parsing even if 90% of the build rules can be skipped.
+ # On Windows, a single step may generate two target, so split items
+ # accordingly. It has only been seen for .exe/.exe.pdb combos.
+ for i in build_target.strip().split():
+ build_steps[i] = dependencies
+ elif statement == 'subninja':
+ subninja.append(line[9:])
+ except IOError:
+ print >> sys.stderr, 'Failed to open %s' % ninja_path
+ raise
+
+ total = 1
+ for rel_path in subninja:
+ try:
+ # Load each of the files referenced.
+ # TODO(maruel): Skip the files known to not be needed. It saves an aweful
+ # lot of processing time.
+ total += load_ninja_recursively(build_dir, rel_path, build_steps)
+ except IOError:
+ print >> sys.stderr, '... as referenced by %s' % ninja_path
+ raise
+ return total
+
+
+def load_ninja(build_dir):
+ """Loads the tree of .ninja files in build_dir."""
+ build_steps = {}
+ total = load_ninja_recursively(build_dir, 'build.ninja', build_steps)
+ logging.info('Loaded %d ninja files, %d build steps', total, len(build_steps))
+ return build_steps
+
+
+def using_blacklist(item):
+ """Returns True if an item should be analyzed.
+
+ Ignores many rules that are assumed to not depend on a dynamic library. If
+ the assumption doesn't hold true anymore for a file format, remove it from
+ this list. This is simply an optimization.
+ """
+ # *.json is ignored below, *.isolated.gen.json is an exception, it is produced
+ # by isolate_driver.py in 'test_isolation_mode==prepare'.
+ if item.endswith('.isolated.gen.json'):
+ return True
+ IGNORED = (
+ '.a', '.cc', '.css', '.dat', '.def', '.frag', '.h', '.html', '.isolate',
+ '.js', '.json', '.manifest', '.o', '.obj', '.pak', '.png', '.pdb', '.py',
+ '.strings', '.test', '.txt', '.vert',
+ )
+ # ninja files use native path format.
+ ext = os.path.splitext(item)[1]
+ if ext in IGNORED:
+ return False
+ # Special case Windows, keep .dll.lib but discard .lib.
+ if item.endswith('.dll.lib'):
+ return True
+ if ext == '.lib':
+ return False
+ return item not in ('', '|', '||')
+
+
+def raw_build_to_deps(item):
+ """Converts a raw ninja build statement into the list of interesting
+ dependencies.
+ """
+ # TODO(maruel): Use a whitelist instead? .stamp, .so.TOC, .dylib.TOC,
+ # .dll.lib, .exe and empty.
+ # The first item is the build rule, e.g. 'link', 'cxx', 'phony', etc.
+ return filter(using_blacklist, item.split(' ')[1:])
+
+
+def collect_deps(target, build_steps, dependencies_added, rules_seen):
+ """Recursively adds all the interesting dependencies for |target|
+ into |dependencies_added|.
+ """
+ if rules_seen is None:
+ rules_seen = set()
+ if target in rules_seen:
+ # TODO(maruel): Figure out how it happens.
+ logging.warning('Circular dependency for %s!', target)
+ return
+ rules_seen.add(target)
+ try:
+ dependencies = raw_build_to_deps(build_steps[target])
+ except KeyError:
+ logging.info('Failed to find a build step to generate: %s', target)
+ return
+ logging.debug('collect_deps(%s) -> %s', target, dependencies)
+ for dependency in dependencies:
+ dependencies_added.add(dependency)
+ collect_deps(dependency, build_steps, dependencies_added, rules_seen)
+
+
+def post_process_deps(build_dir, dependencies):
+ """Processes the dependency list with OS specific rules."""
+ def filter_item(i):
+ if i.endswith('.so.TOC'):
+ # Remove only the suffix .TOC, not the .so!
+ return i[:-4]
+ if i.endswith('.dylib.TOC'):
+ # Remove only the suffix .TOC, not the .dylib!
+ return i[:-4]
+ if i.endswith('.dll.lib'):
+ # Remove only the suffix .lib, not the .dll!
+ return i[:-4]
+ return i
+
+ def is_exe(i):
+ # This script is only for adding new binaries that are created as part of
+ # the component build.
+ ext = os.path.splitext(i)[1]
+ # On POSIX, executables have no extension.
+ if ext not in ('', '.dll', '.dylib', '.exe', '.nexe', '.so'):
+ return False
+ if os.path.isabs(i):
+ # In some rare case, there's dependency set explicitly on files outside
+ # the checkout.
+ return False
+
+ # Check for execute access and strip directories. This gets rid of all the
+ # phony rules.
+ p = os.path.join(build_dir, i)
+ return os.access(p, os.X_OK) and not os.path.isdir(p)
+
+ return filter(is_exe, map(filter_item, dependencies))
+
+
+def create_wrapper(args, isolate_index, isolated_index):
+ """Creates a wrapper .isolate that add dynamic libs.
+
+ The original .isolate is not modified.
+ """
+ cwd = os.getcwd()
+ isolate = args[isolate_index]
+ # The code assumes the .isolate file is always specified path-less in cwd. Fix
+ # if this assumption doesn't hold true.
+ assert os.path.basename(isolate) == isolate, isolate
+
+ # This will look like ../out/Debug. This is based against cwd. Note that this
+ # must equal the value provided as PRODUCT_DIR.
+ build_dir = os.path.dirname(args[isolated_index])
+
+ # This will look like chrome/unit_tests.isolate. It is based against SRC_DIR.
+ # It's used to calculate temp_isolate.
+ src_isolate = os.path.relpath(os.path.join(cwd, isolate), SRC_DIR)
+
+ # The wrapping .isolate. This will look like
+ # ../out/Debug/gen/chrome/unit_tests.isolate.
+ temp_isolate = os.path.join(build_dir, 'gen', src_isolate)
+ temp_isolate_dir = os.path.dirname(temp_isolate)
+
+ # Relative path between the new and old .isolate file.
+ isolate_relpath = os.path.relpath(
+ '.', temp_isolate_dir).replace(os.path.sep, '/')
+
+ # It's a big assumption here that the name of the isolate file matches the
+ # primary target '_run'. Fix accordingly if this doesn't hold true, e.g.
+ # complain to maruel@.
+ target = isolate[:-len('.isolate')] + '_run'
+ build_steps = load_ninja(build_dir)
+ binary_deps = set()
+ collect_deps(target, build_steps, binary_deps, None)
+ binary_deps = post_process_deps(build_dir, binary_deps)
+ logging.debug(
+ 'Binary dependencies:%s', ''.join('\n ' + i for i in binary_deps))
+
+ # Now do actual wrapping .isolate.
+ isolate_dict = {
+ 'includes': [
+ posixpath.join(isolate_relpath, isolate),
+ ],
+ 'variables': {
+ # Will look like ['<(PRODUCT_DIR)/lib/flibuser_prefs.so'].
+ 'files': sorted(
+ '<(PRODUCT_DIR)/%s' % i.replace(os.path.sep, '/')
+ for i in binary_deps),
+ },
+ }
+ # Some .isolate files have the same temp directory and the build system may
+ # run this script in parallel so make directories safely here.
+ try:
+ os.makedirs(temp_isolate_dir)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+ comment = (
+ '# Warning: this file was AUTOGENERATED.\n'
+ '# DO NO EDIT.\n')
+ out = StringIO.StringIO()
+ isolate_format.print_all(comment, isolate_dict, out)
+ isolate_content = out.getvalue()
+ with open(temp_isolate, 'wb') as f:
+ f.write(isolate_content)
+ logging.info('Added %d dynamic libs', len(binary_deps))
+ logging.debug('%s', isolate_content)
+ args[isolate_index] = temp_isolate
def prepare_isolate_call(args, output):
@@ -31,13 +286,22 @@ def prepare_isolate_call(args, output):
}, f, indent=2, sort_keys=True)
+def rebase_directories(args, abs_base):
+ """Rebases all paths to be relative to abs_base."""
+ def replace(index):
+ args[index] = os.path.relpath(os.path.abspath(args[index]), abs_base)
+ for i, arg in enumerate(args):
+ if arg in ['--isolate', '--isolated']:
+ replace(i + 1)
+ if arg == '--path-variable':
+ # Path variables have a triple form: --path-variable NAME <path>.
+ replace(i + 2)
+
+
def main():
logging.basicConfig(level=logging.ERROR, format='%(levelname)7s %(message)s')
- if len(sys.argv) < 2:
- print >> sys.stderr, 'Internal failure; mode required'
- return 1
- mode = sys.argv[1]
args = sys.argv[1:]
+ mode = args[0] if args else None
isolate = None
isolated = None
for i, arg in enumerate(args):
@@ -45,20 +309,31 @@ def main():
isolate = i + 1
if arg == '--isolated':
isolated = i + 1
- if not isolate or not isolated:
+ if isolate is None or isolated is None or not mode:
print >> sys.stderr, 'Internal failure'
return 1
+ # Make sure all paths are relative to the isolate file. This is an
+ # expectation of the go binaries. In gn, this script is not called
+ # relative to the isolate file, but relative to the product dir.
+ new_base = os.path.abspath(os.path.dirname(args[isolate]))
+ rebase_directories(args, new_base)
+ assert args[isolate] == os.path.basename(args[isolate])
+ os.chdir(new_base)
+
+ create_wrapper(args, isolate, isolated)
+
# In 'prepare' mode just collect all required information for postponed
# isolated.py invocation later, store it in *.isolated.gen.json file.
if mode == 'prepare':
prepare_isolate_call(args[1:], args[isolated] + '.gen.json')
return 0
- swarming_client = os.path.join(TOOLS_DIR, 'swarming_client')
+ swarming_client = os.path.join(SRC_DIR, 'tools', 'swarming_client')
sys.stdout.flush()
- return subprocess.call(
+ result = subprocess.call(
[sys.executable, os.path.join(swarming_client, 'isolate.py')] + args)
+ return result
if __name__ == '__main__':
diff --git a/deps/v8/tools/js2c.py b/deps/v8/tools/js2c.py
index d915133114..b676d662e7 100755
--- a/deps/v8/tools/js2c.py
+++ b/deps/v8/tools/js2c.py
@@ -145,10 +145,12 @@ class TextMacro:
self.args = args
self.body = body
def expand(self, mapping):
- result = self.body
- for key, value in mapping.items():
- result = result.replace(key, value)
- return result
+ # Keys could be substrings of earlier values. To avoid unintended
+ # clobbering, apply all replacements simultaneously.
+ any_key_pattern = "|".join(re.escape(k) for k in mapping.iterkeys())
+ def replace(match):
+ return mapping[match.group(0)]
+ return re.sub(any_key_pattern, replace, self.body)
class PythonMacro:
def __init__(self, args, fun):
diff --git a/deps/v8/tools/jsfunfuzz/jsfunfuzz.gyp b/deps/v8/tools/jsfunfuzz/jsfunfuzz.gyp
index fb0e5f4949..8938e44538 100644
--- a/deps/v8/tools/jsfunfuzz/jsfunfuzz.gyp
+++ b/deps/v8/tools/jsfunfuzz/jsfunfuzz.gyp
@@ -13,8 +13,8 @@
'../../src/d8.gyp:d8_run',
],
'includes': [
- '../../build/features.gypi',
- '../../build/isolate.gypi',
+ '../../gypfiles/features.gypi',
+ '../../gypfiles/isolate.gypi',
],
'sources': [
'jsfunfuzz.isolate',
diff --git a/deps/v8/tools/memory/asan/blacklist.txt b/deps/v8/tools/memory/asan/blacklist.txt
new file mode 100644
index 0000000000..2bb1aa9714
--- /dev/null
+++ b/deps/v8/tools/memory/asan/blacklist.txt
@@ -0,0 +1,4 @@
+# The rules in this file are only applied at compile time. If you can modify the
+# source in question, consider function attributes to disable instrumentation.
+#
+# Please think twice before you add or remove these rules. \ No newline at end of file
diff --git a/deps/v8/tools/memory/tsan_v2/ignores.txt b/deps/v8/tools/memory/tsan_v2/ignores.txt
new file mode 100644
index 0000000000..80babf4894
--- /dev/null
+++ b/deps/v8/tools/memory/tsan_v2/ignores.txt
@@ -0,0 +1,5 @@
+# The rules in this file are only applied at compile time. If you can modify the
+# source in question, consider function attributes to disable instrumentation.
+#
+# Please think twice before you add or remove these rules.
+# Data races should typically go to suppressions.txt. \ No newline at end of file
diff --git a/deps/v8/tools/mingw-generate-makefiles.sh b/deps/v8/tools/mingw-generate-makefiles.sh
index 32af52d39e..67715fc15b 100755
--- a/deps/v8/tools/mingw-generate-makefiles.sh
+++ b/deps/v8/tools/mingw-generate-makefiles.sh
@@ -27,7 +27,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Monkey-patch GYP.
-cat > build/gyp/gyp.mingw << EOF
+cat > tools/gyp/gyp.mingw << EOF
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
@@ -74,11 +74,11 @@ EOF
find out -name '*.mk' -or -name 'Makefile*' -exec rm {} \;
# Generate fresh Makefiles.
-mv build/gyp/gyp build/gyp/gyp.original
-mv build/gyp/gyp.mingw build/gyp/gyp
+mv tools/gyp/gyp tools/gyp/gyp.original
+mv tools/gyp/gyp.mingw tools/gyp/gyp
make out/Makefile.ia32
-mv build/gyp/gyp build/gyp/gyp.mingw
-mv build/gyp/gyp.original build/gyp/gyp
+mv tools/gyp/gyp tools/gyp/gyp.mingw
+mv tools/gyp/gyp.original tools/gyp/gyp
# Patch generated Makefiles: replace most backslashes with forward slashes,
# fix library names in linker flags.
diff --git a/deps/v8/tools/msan/blacklist.txt b/deps/v8/tools/msan/blacklist.txt
new file mode 100644
index 0000000000..2bb1aa9714
--- /dev/null
+++ b/deps/v8/tools/msan/blacklist.txt
@@ -0,0 +1,4 @@
+# The rules in this file are only applied at compile time. If you can modify the
+# source in question, consider function attributes to disable instrumentation.
+#
+# Please think twice before you add or remove these rules. \ No newline at end of file
diff --git a/deps/v8/tools/nacl-run.py b/deps/v8/tools/nacl-run.py
deleted file mode 100755
index 32055feb0f..0000000000
--- a/deps/v8/tools/nacl-run.py
+++ /dev/null
@@ -1,147 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# This script executes the passed command line using the Native Client
-# 'sel_ldr' container. It is derived from android-run.py.
-
-import os
-from os.path import join, dirname, abspath
-import re
-import subprocess
-import sys
-import tempfile
-
-def Check(output, errors):
- failed = any([s.startswith('/system/bin/sh:') or s.startswith('ANDROID')
- for s in output.split('\n')])
- return 1 if failed else 0
-
-def Execute(cmdline):
- (fd_out, outname) = tempfile.mkstemp()
- (fd_err, errname) = tempfile.mkstemp()
- process = subprocess.Popen(
- args=cmdline,
- shell=True,
- stdout=fd_out,
- stderr=fd_err,
- )
- exit_code = process.wait()
- os.close(fd_out)
- os.close(fd_err)
- output = file(outname).read()
- errors = file(errname).read()
- os.unlink(outname)
- os.unlink(errname)
- sys.stdout.write(output)
- sys.stderr.write(errors)
- return exit_code or Check(output, errors)
-
-def Escape(arg):
- def ShouldEscape():
- for x in arg:
- if not x.isalnum() and x != '-' and x != '_':
- return True
- return False
-
- return arg if not ShouldEscape() else '"%s"' % (arg.replace('"', '\\"'))
-
-def WriteToTemporaryFile(data):
- (fd, fname) = tempfile.mkstemp()
- os.close(fd)
- tmp_file = open(fname, "w")
- tmp_file.write(data)
- tmp_file.close()
- return fname
-
-def GetNaClArchFromNexe(nexe):
- try:
- p = subprocess.Popen(['file', nexe], stdout=subprocess.PIPE)
- out, err = p.communicate()
- lines = [re.sub("\s+", " " , line) for line in out.split('\n')]
- if lines[0].find(": ELF 32-bit LSB executable, Intel 80386") > 0:
- return "x86_32"
- if lines[0].find(": ELF 64-bit LSB executable, x86-64") > 0:
- return "x86_64"
- except:
- print 'file ' + sys.argv[1] + ' failed'
- return None
-
-def GetNaClResources(nexe):
- nacl_sdk_dir = os.environ["NACL_SDK_ROOT"]
- nacl_arch = GetNaClArchFromNexe(nexe)
- if sys.platform.startswith("linux"):
- platform = "linux"
- elif sys.platform == "darwin":
- platform = "mac"
- else:
- print("NaCl V8 testing is supported on Linux and MacOS only.")
- sys.exit(1)
-
- if nacl_arch is "x86_64":
- toolchain = platform + "_x86_glibc"
- sel_ldr = "sel_ldr_x86_64"
- irt = "irt_core_x86_64.nexe"
- libdir = "lib64"
- elif nacl_arch is "x86_32":
- toolchain = platform + "_x86_glibc"
- sel_ldr = "sel_ldr_x86_32"
- irt = "irt_core_x86_32.nexe"
- libdir = "lib32"
- elif nacl_arch is "arm":
- print("NaCl V8 ARM support is not ready yet.")
- sys.exit(1)
- else:
- print("Invalid nexe %s with NaCl arch %s" % (nexe, nacl_arch))
- sys.exit(1)
-
- nacl_sel_ldr = os.path.join(nacl_sdk_dir, "tools", sel_ldr)
- nacl_irt = os.path.join(nacl_sdk_dir, "tools", irt)
-
- return (nacl_sdk_dir, nacl_sel_ldr, nacl_irt)
-
-def Main():
- if (len(sys.argv) == 1):
- print("Usage: %s <command-to-run-on-device>" % sys.argv[0])
- return 1
-
- args = [Escape(arg) for arg in sys.argv[1:]]
-
- (nacl_sdk_dir, nacl_sel_ldr, nacl_irt) = GetNaClResources(sys.argv[1])
-
- # sel_ldr Options:
- # -c -c: disable validation (for performance)
- # -a: allow file access
- # -B <irt>: load the IRT
- command = ' '.join([nacl_sel_ldr, '-c', '-c', '-a', '-B', nacl_irt, '--'] +
- args)
- error_code = Execute(command)
- return error_code
-
-if __name__ == '__main__':
- sys.exit(Main())
diff --git a/deps/v8/tools/objdump-v8 b/deps/v8/tools/objdump-v8
new file mode 100755
index 0000000000..25ec4745e6
--- /dev/null
+++ b/deps/v8/tools/objdump-v8
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+#
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os.path
+import re
+import subprocess
+import sys
+
+
+def get_address_bounds():
+ start = -1
+ end = -1
+ for arg in sys.argv:
+ if arg.startswith("--start-address="):
+ start = int(arg[-12:], 16)
+ if arg.startswith("--stop-address="):
+ end = int(arg[-12:], 16)
+ return start, end
+
+
+def format_line(line):
+ pieces = line.split(None, 3)
+ return " " + pieces[0][2:] + ":\t" + pieces[3]
+
+
+def is_comment(line):
+ stripped = line.strip()
+ return stripped.startswith("--") or stripped.startswith(";;")
+
+def main():
+ filename = sys.argv[-1]
+ match = re.match(r"/tmp/perf-(.*)\.map", filename)
+ if match:
+ start, end = get_address_bounds()
+ process_codefile = "code-" + match.group(1) + "-1.asm"
+ if os.path.exists(process_codefile):
+ codefile = open(process_codefile, "r")
+ else:
+ codefile = open("code.asm", "r")
+ with codefile:
+ printing = False
+ for line in codefile:
+ if line.startswith("0x"):
+ addr = int(line.split()[0], 0)
+ if start <= addr <= end:
+ printing = True
+ sys.stdout.write(format_line(line))
+ elif printing:
+ break
+ elif printing and not is_comment(line):
+ break
+ else:
+ sys.argv[0] = "objdump"
+ sys.exit(subprocess.call(sys.argv))
+
+if __name__ == "__main__":
+ main()
diff --git a/deps/v8/tools/parser-shell.cc b/deps/v8/tools/parser-shell.cc
index ad687c9efe..43d2578165 100644
--- a/deps/v8/tools/parser-shell.cc
+++ b/deps/v8/tools/parser-shell.cc
@@ -36,25 +36,16 @@
#include "include/libplatform/libplatform.h"
#include "src/api.h"
#include "src/compiler.h"
-#include "src/parsing/scanner-character-streams.h"
+#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
#include "src/parsing/preparse-data-format.h"
#include "src/parsing/preparse-data.h"
#include "src/parsing/preparser.h"
+#include "src/parsing/scanner-character-streams.h"
#include "tools/shell-utils.h"
using namespace v8::internal;
-class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
- public:
- virtual void* Allocate(size_t length) {
- void* data = AllocateUninitialized(length);
- return data == NULL ? data : memset(data, 0, length);
- }
- virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
- virtual void Free(void* data, size_t) { free(data); }
-};
-
class StringResource8 : public v8::String::ExternalOneByteStringResource {
public:
StringResource8(const char* data, int length)
@@ -142,7 +133,7 @@ std::pair<v8::base::TimeDelta, v8::base::TimeDelta> RunBaselineParser(
int main(int argc, char* argv[]) {
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
- v8::V8::InitializeICU();
+ v8::V8::InitializeICUDefaultLocation(argv[0]);
v8::Platform* platform = v8::platform::CreateDefaultPlatform();
v8::V8::InitializePlatform(platform);
v8::V8::Initialize();
@@ -168,9 +159,9 @@ int main(int argc, char* argv[]) {
fnames.push_back(std::string(argv[i]));
}
}
- ArrayBufferAllocator array_buffer_allocator;
v8::Isolate::CreateParams create_params;
- create_params.array_buffer_allocator = &array_buffer_allocator;
+ create_params.array_buffer_allocator =
+ v8::ArrayBuffer::Allocator::NewDefaultAllocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
{
v8::Isolate::Scope isolate_scope(isolate);
@@ -199,5 +190,6 @@ int main(int argc, char* argv[]) {
v8::V8::Dispose();
v8::V8::ShutdownPlatform();
delete platform;
+ delete create_params.array_buffer_allocator;
return 0;
}
diff --git a/deps/v8/tools/parser-shell.gyp b/deps/v8/tools/parser-shell.gyp
index 77ed1eb246..4ef1a82d71 100644
--- a/deps/v8/tools/parser-shell.gyp
+++ b/deps/v8/tools/parser-shell.gyp
@@ -30,14 +30,14 @@
'v8_code': 1,
'v8_enable_i18n_support%': 1,
},
- 'includes': ['../build/toolchain.gypi', '../build/features.gypi'],
+ 'includes': ['../gypfiles/toolchain.gypi', '../gypfiles/features.gypi'],
'targets': [
{
'target_name': 'parser-shell',
'type': 'executable',
'dependencies': [
- '../tools/gyp/v8.gyp:v8',
- '../tools/gyp/v8.gyp:v8_libplatform',
+ '../src/v8.gyp:v8',
+ '../src/v8.gyp:v8_libplatform',
],
'conditions': [
['v8_enable_i18n_support==1', {
@@ -50,10 +50,6 @@
'include_dirs+': [
'..',
],
- 'defines': [
- # TODO(jochen): Remove again after this is globally turned on.
- 'V8_IMMINENT_DEPRECATION_WARNINGS',
- ],
'sources': [
'parser-shell.cc',
'shell-utils.h',
diff --git a/deps/v8/tools/perf-to-html.py b/deps/v8/tools/perf-to-html.py
index 63faeb1d66..7ec9c50f21 100755
--- a/deps/v8/tools/perf-to-html.py
+++ b/deps/v8/tools/perf-to-html.py
@@ -115,8 +115,8 @@ class Benchmark:
self.name_ = name
self.tests_ = {}
for test in data:
- # strip off "<name>/" prefix
- test_name = test.split("/")[1]
+ # strip off "<name>/" prefix, allowing for subsequent "/"s
+ test_name = test.split("/", 1)[1]
self.appendResult(test_name, data[test])
# tests is a dictionary of Results
diff --git a/deps/v8/tools/plot-timer-events b/deps/v8/tools/plot-timer-events
index 15f28ac22b..da2e823c14 100755
--- a/deps/v8/tools/plot-timer-events
+++ b/deps/v8/tools/plot-timer-events
@@ -70,10 +70,9 @@ if test "$contains" -eq 0; then
rm $calibration_log
# Overhead in picoseconds.
- options=--distortion=
- options+=`echo "1000*(($t_1_end - $t_1_start) - ($t_2_end - $t_2_start)) \
+ distortion=`echo "1000*(($t_1_end - $t_1_start) - ($t_2_end - $t_2_start)) \
/ ($n_1 - $n_2)" | bc`
- echo $options
+ options="--distortion=$distortion"
fi
cat $log_file |
diff --git a/deps/v8/tools/presubmit.py b/deps/v8/tools/presubmit.py
index dd3533bcf4..d503538fcb 100755
--- a/deps/v8/tools/presubmit.py
+++ b/deps/v8/tools/presubmit.py
@@ -55,12 +55,15 @@ from testrunner.local import utils
# build/include_what_you_use: Started giving false positives for variables
# named "string" and "map" assuming that you needed to include STL headers.
# TODO(bmeurer): Fix and re-enable readability/check
+# TODO(epertoso): Maybe re-enable readability/fn_size after
+# http://crrev.com/2199323003 relands.
LINT_RULES = """
-build/header_guard
-build/include_what_you_use
-build/namespaces
-readability/check
+-readability/fn_size
+readability/streams
-runtime/references
""".split()
diff --git a/deps/v8/tools/release/auto_roll.py b/deps/v8/tools/release/auto_roll.py
index b71cac5a10..c1a99e8d11 100755
--- a/deps/v8/tools/release/auto_roll.py
+++ b/deps/v8/tools/release/auto_roll.py
@@ -18,7 +18,9 @@ https://github.com/v8/v8/wiki/Triaging%20issues
Please close rolling in case of a roll revert:
https://v8-roll.appspot.com/
-This only works with a Google account.""")
+This only works with a Google account.
+
+CQ_INCLUDE_TRYBOTS=master.tryserver.blink:linux_precise_blink_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel""")
class Preparation(Step):
MESSAGE = "Preparation."
diff --git a/deps/v8/tools/release/check_clusterfuzz.py b/deps/v8/tools/release/check_clusterfuzz.py
index cd73051685..0fdffd93ac 100755
--- a/deps/v8/tools/release/check_clusterfuzz.py
+++ b/deps/v8/tools/release/check_clusterfuzz.py
@@ -89,6 +89,15 @@ BUG_SPECS = [
},
{
"args": {
+ "job_type": "linux_asan_d8_ignition_v8_arm_dbg",
+ "reproducible": "True",
+ "open": "True",
+ "bug_information": "",
+ },
+ "crash_state": ANY_RE,
+ },
+ {
+ "args": {
"job_type": "linux_asan_d8_v8_arm64_dbg",
"reproducible": "True",
"open": "True",
diff --git a/deps/v8/tools/release/create_release.py b/deps/v8/tools/release/create_release.py
index 7477ea1461..14d44b4bd6 100755
--- a/deps/v8/tools/release/create_release.py
+++ b/deps/v8/tools/release/create_release.py
@@ -223,6 +223,27 @@ class CommitBranch(Step):
os.remove(self.Config("CHANGELOG_ENTRY_FILE"))
+class FixBrokenTag(Step):
+ MESSAGE = "Check for a missing tag and fix that instead."
+
+ def RunStep(self):
+ commit = None
+ try:
+ commit = self.GitLog(
+ n=1, format="%H",
+ grep=self["commit_title"],
+ branch="origin/%s" % self["version"],
+ )
+ except GitFailedException:
+ # In the normal case, the remote doesn't exist yet and git will fail.
+ pass
+ if commit:
+ print "Found %s. Trying to repair tag and bail out." % self["version"]
+ self.Git("tag %s %s" % (self["version"], commit))
+ self.Git("push origin refs/tags/%s" % self["version"])
+ return True
+
+
class PushBranch(Step):
MESSAGE = "Push changes."
@@ -303,6 +324,7 @@ class CreateRelease(ScriptsBase):
SetVersion,
EnableMergeWatchlist,
CommitBranch,
+ FixBrokenTag,
PushBranch,
TagRevision,
CleanUp,
diff --git a/deps/v8/tools/release/git_recipes.py b/deps/v8/tools/release/git_recipes.py
index 89fd7c9cf6..b4c9de6d6c 100644
--- a/deps/v8/tools/release/git_recipes.py
+++ b/deps/v8/tools/release/git_recipes.py
@@ -242,6 +242,10 @@ class GitRecipesMixin(object):
self.Git(
"cl land -f --bypass-hooks", retry_on=lambda x: x is None, **kwargs)
+ def GitCLAddComment(self, message, **kwargs):
+ args = ["cl", "comments", "-a", Quoted(message)]
+ self.Git(MakeArgs(args), **kwargs)
+
def GitDiff(self, loc1, loc2, **kwargs):
return self.Git(MakeArgs(["diff", loc1, loc2]), **kwargs)
diff --git a/deps/v8/tools/release/merge_to_branch.py b/deps/v8/tools/release/merge_to_branch.py
index 699fe1b3c6..bdc94ebd09 100755
--- a/deps/v8/tools/release/merge_to_branch.py
+++ b/deps/v8/tools/release/merge_to_branch.py
@@ -47,10 +47,8 @@ class Preparation(Step):
open(self.Config("ALREADY_MERGING_SENTINEL_FILE"), "a").close()
self.InitialEnvironmentChecks(self.default_cwd)
- if self._options.branch:
- self["merge_to_branch"] = self._options.branch
- else: # pragma: no cover
- self.Die("Please specify a branch to merge to")
+
+ self["merge_to_branch"] = self._options.branch
self.CommonPrepare()
self.PrepareBranch()
@@ -74,7 +72,7 @@ class SearchArchitecturePorts(Step):
for revision in self["full_revision_list"]:
# Search for commits which matches the "Port XXX" pattern.
git_hashes = self.GitLog(reverse=True, format="%H",
- grep="Port %s" % revision,
+ grep="^[Pp]ort %s" % revision,
branch=self.vc.RemoteMasterBranch())
for git_hash in git_hashes.splitlines():
revision_title = self.GitLog(n=1, format="%s", git_hash=git_hash)
@@ -99,6 +97,12 @@ class SearchArchitecturePorts(Step):
class CreateCommitMessage(Step):
MESSAGE = "Create commit message."
+ def _create_commit_description(self, commit_hash):
+ patch_merge_desc = self.GitLog(n=1, format="%s", git_hash=commit_hash)
+ description = "Merged: " + patch_merge_desc + "\n"
+ description += "Revision: " + commit_hash + "\n\n"
+ return description
+
def RunStep(self):
# Stringify: ["abcde", "12345"] -> "abcde, 12345"
@@ -107,17 +111,23 @@ class CreateCommitMessage(Step):
if not self["revision_list"]: # pragma: no cover
self.Die("Revision list is empty.")
- action_text = "Merged %s"
+ msg_pieces = []
- # The commit message title is added below after the version is specified.
- msg_pieces = [
- "\n".join(action_text % s for s in self["full_revision_list"]),
- ]
- msg_pieces.append("\n\n")
+ if len(self["full_revision_list"]) > 1:
+ self["commit_title"] = "Merged: Squashed multiple commits."
+ for commit_hash in self["full_revision_list"]:
+ msg_pieces.append(self._create_commit_description(commit_hash))
+ else:
+ commit_hash = self["full_revision_list"][0]
+ full_description = self._create_commit_description(commit_hash).split("\n")
- for commit_hash in self["full_revision_list"]:
- patch_merge_desc = self.GitLog(n=1, format="%s", git_hash=commit_hash)
- msg_pieces.append("%s\n\n" % patch_merge_desc)
+ #Truncate title because of code review tool
+ title = full_description[0]
+ if len(title) > 100:
+ title = title[:96] + " ..."
+
+ self["commit_title"] = title
+ msg_pieces.append(full_description[1] + "\n\n")
bugs = []
for commit_hash in self["full_revision_list"]:
@@ -128,6 +138,8 @@ class CreateCommitMessage(Step):
if bug_aggregate:
msg_pieces.append("BUG=%s\nLOG=N\n" % bug_aggregate)
+ msg_pieces.append("NOTRY=true\nNOPRESUBMIT=true\nNOTREECHECKS=true\n")
+
self["new_commit_msg"] = "".join(msg_pieces)
@@ -144,49 +156,26 @@ class ApplyPatches(Step):
if self._options.patch:
self.ApplyPatch(self._options.patch)
-
-class PrepareVersion(Step):
- MESSAGE = "Prepare version file."
-
- def RunStep(self):
- # This is used to calculate the patch level increment.
- self.ReadAndPersistVersion()
-
-
-class IncrementVersion(Step):
- MESSAGE = "Increment version number."
-
- def RunStep(self):
- new_patch = str(int(self["patch"]) + 1)
- if self.Confirm("Automatically increment V8_PATCH_LEVEL? (Saying 'n' will "
- "fire up your EDITOR on %s so you can make arbitrary "
- "changes. When you're done, save the file and exit your "
- "EDITOR.)" % VERSION_FILE):
- text = FileToText(os.path.join(self.default_cwd, VERSION_FILE))
- text = MSub(r"(?<=#define V8_PATCH_LEVEL)(?P<space>\s+)\d*$",
- r"\g<space>%s" % new_patch,
- text)
- TextToFile(text, os.path.join(self.default_cwd, VERSION_FILE))
- else:
- self.Editor(os.path.join(self.default_cwd, VERSION_FILE))
- self.ReadAndPersistVersion("new_")
- self["version"] = "%s.%s.%s.%s" % (self["new_major"],
- self["new_minor"],
- self["new_build"],
- self["new_patch"])
-
-
class CommitLocal(Step):
MESSAGE = "Commit to local branch."
def RunStep(self):
# Add a commit message title.
- self["commit_title"] = "Version %s (cherry-pick)" % self["version"]
self["new_commit_msg"] = "%s\n\n%s" % (self["commit_title"],
self["new_commit_msg"])
TextToFile(self["new_commit_msg"], self.Config("COMMITMSG_FILE"))
self.GitCommit(file_name=self.Config("COMMITMSG_FILE"))
+class AddInformationalComment(Step):
+ MESSAGE = 'Show additional information.'
+
+ def RunStep(self):
+ message = ("NOTE: This script will no longer automatically "
+ "update include/v8-version.h "
+ "and create a tag. This is done automatically by the autotag bot. "
+ "Please call the merge_to_branch.py with --help for more information.")
+
+ self.GitCLAddComment(message)
class CommitRepository(Step):
MESSAGE = "Commit to the repository."
@@ -197,24 +186,12 @@ class CommitRepository(Step):
self.GitPresubmit()
self.vc.CLLand()
-
-class TagRevision(Step):
- MESSAGE = "Create the tag."
-
- def RunStep(self):
- print "Creating tag %s" % self["version"]
- self.vc.Tag(self["version"],
- self.vc.RemoteBranch(self["merge_to_branch"]),
- self["commit_title"])
-
-
class CleanUp(Step):
MESSAGE = "Cleanup."
def RunStep(self):
self.CommonCleanup()
print "*** SUMMARY ***"
- print "version: %s" % self["version"]
print "branch: %s" % self["merge_to_branch"]
if self["revision_list"]:
print "patches: %s" % self["revision_list"]
@@ -223,7 +200,9 @@ class CleanUp(Step):
class MergeToBranch(ScriptsBase):
def _Description(self):
return ("Performs the necessary steps to merge revisions from "
- "master to other branches, including candidates.")
+ "master to release branches like 4.5. This script does not "
+ "version the commit. See http://goo.gl/9ke2Vw for more "
+ "information.")
def _PrepareOptions(self, parser):
group = parser.add_mutually_exclusive_group(required=True)
@@ -250,6 +229,11 @@ class MergeToBranch(ScriptsBase):
# CC ulan to make sure that fixes are merged to Google3.
options.cc = "ulan@chromium.org"
+ if len(options.branch.split('.')) > 2:
+ print ("This script does not support merging to roll branches. "
+ "Please use tools/release/roll_merge.py for this use case.")
+ return False
+
# Make sure to use git hashes in the new workflows.
for revision in options.revisions:
if (IsSvnNumber(revision) or
@@ -276,12 +260,10 @@ class MergeToBranch(ScriptsBase):
SearchArchitecturePorts,
CreateCommitMessage,
ApplyPatches,
- PrepareVersion,
- IncrementVersion,
CommitLocal,
UploadStep,
+ AddInformationalComment,
CommitRepository,
- TagRevision,
CleanUp,
]
diff --git a/deps/v8/tools/release/roll_merge.py b/deps/v8/tools/release/roll_merge.py
new file mode 100755
index 0000000000..2dd43eae3a
--- /dev/null
+++ b/deps/v8/tools/release/roll_merge.py
@@ -0,0 +1,290 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import argparse
+from collections import OrderedDict
+import sys
+
+from common_includes import *
+
+def IsSvnNumber(rev):
+ return rev.isdigit() and len(rev) < 8
+
+class Preparation(Step):
+ MESSAGE = "Preparation."
+
+ def RunStep(self):
+ if os.path.exists(self.Config("ALREADY_MERGING_SENTINEL_FILE")):
+ if self._options.force:
+ os.remove(self.Config("ALREADY_MERGING_SENTINEL_FILE"))
+ elif self._options.step == 0: # pragma: no cover
+ self.Die("A merge is already in progress")
+ open(self.Config("ALREADY_MERGING_SENTINEL_FILE"), "a").close()
+
+ self.InitialEnvironmentChecks(self.default_cwd)
+ if self._options.branch:
+ self["merge_to_branch"] = self._options.branch
+ else: # pragma: no cover
+ self.Die("Please specify a branch to merge to")
+
+ self.CommonPrepare()
+ self.PrepareBranch()
+
+
+class CreateBranch(Step):
+ MESSAGE = "Create a fresh branch for the patch."
+
+ def RunStep(self):
+ self.GitCreateBranch(self.Config("BRANCHNAME"),
+ self.vc.RemoteBranch(self["merge_to_branch"]))
+
+
+class SearchArchitecturePorts(Step):
+ MESSAGE = "Search for corresponding architecture ports."
+
+ def RunStep(self):
+ self["full_revision_list"] = list(OrderedDict.fromkeys(
+ self._options.revisions))
+ port_revision_list = []
+ for revision in self["full_revision_list"]:
+ # Search for commits which matches the "Port XXX" pattern.
+ git_hashes = self.GitLog(reverse=True, format="%H",
+ grep="Port %s" % revision,
+ branch=self.vc.RemoteMasterBranch())
+ for git_hash in git_hashes.splitlines():
+ revision_title = self.GitLog(n=1, format="%s", git_hash=git_hash)
+
+ # Is this revision included in the original revision list?
+ if git_hash in self["full_revision_list"]:
+ print("Found port of %s -> %s (already included): %s"
+ % (revision, git_hash, revision_title))
+ else:
+ print("Found port of %s -> %s: %s"
+ % (revision, git_hash, revision_title))
+ port_revision_list.append(git_hash)
+
+ # Do we find any port?
+ if len(port_revision_list) > 0:
+ if self.Confirm("Automatically add corresponding ports (%s)?"
+ % ", ".join(port_revision_list)):
+ #: 'y': Add ports to revision list.
+ self["full_revision_list"].extend(port_revision_list)
+
+
+class CreateCommitMessage(Step):
+ MESSAGE = "Create commit message."
+
+ def RunStep(self):
+
+ # Stringify: ["abcde", "12345"] -> "abcde, 12345"
+ self["revision_list"] = ", ".join(self["full_revision_list"])
+
+ if not self["revision_list"]: # pragma: no cover
+ self.Die("Revision list is empty.")
+
+ action_text = "Merged %s"
+
+ # The commit message title is added below after the version is specified.
+ msg_pieces = [
+ "\n".join(action_text % s for s in self["full_revision_list"]),
+ ]
+ msg_pieces.append("\n\n")
+
+ for commit_hash in self["full_revision_list"]:
+ patch_merge_desc = self.GitLog(n=1, format="%s", git_hash=commit_hash)
+ msg_pieces.append("%s\n\n" % patch_merge_desc)
+
+ bugs = []
+ for commit_hash in self["full_revision_list"]:
+ msg = self.GitLog(n=1, git_hash=commit_hash)
+ for bug in re.findall(r"^[ \t]*BUG[ \t]*=[ \t]*(.*?)[ \t]*$", msg, re.M):
+ bugs.extend(s.strip() for s in bug.split(","))
+ bug_aggregate = ",".join(sorted(filter(lambda s: s and s != "none", bugs)))
+ if bug_aggregate:
+ msg_pieces.append("BUG=%s\nLOG=N\n" % bug_aggregate)
+
+ self["new_commit_msg"] = "".join(msg_pieces)
+
+
+class ApplyPatches(Step):
+ MESSAGE = "Apply patches for selected revisions."
+
+ def RunStep(self):
+ for commit_hash in self["full_revision_list"]:
+ print("Applying patch for %s to %s..."
+ % (commit_hash, self["merge_to_branch"]))
+ patch = self.GitGetPatch(commit_hash)
+ TextToFile(patch, self.Config("TEMPORARY_PATCH_FILE"))
+ self.ApplyPatch(self.Config("TEMPORARY_PATCH_FILE"))
+ if self._options.patch:
+ self.ApplyPatch(self._options.patch)
+
+
+class PrepareVersion(Step):
+ MESSAGE = "Prepare version file."
+
+ def RunStep(self):
+ # This is used to calculate the patch level increment.
+ self.ReadAndPersistVersion()
+
+
+class IncrementVersion(Step):
+ MESSAGE = "Increment version number."
+
+ def RunStep(self):
+ new_patch = str(int(self["patch"]) + 1)
+ if self.Confirm("Automatically increment V8_PATCH_LEVEL? (Saying 'n' will "
+ "fire up your EDITOR on %s so you can make arbitrary "
+ "changes. When you're done, save the file and exit your "
+ "EDITOR.)" % VERSION_FILE):
+ text = FileToText(os.path.join(self.default_cwd, VERSION_FILE))
+ text = MSub(r"(?<=#define V8_PATCH_LEVEL)(?P<space>\s+)\d*$",
+ r"\g<space>%s" % new_patch,
+ text)
+ TextToFile(text, os.path.join(self.default_cwd, VERSION_FILE))
+ else:
+ self.Editor(os.path.join(self.default_cwd, VERSION_FILE))
+ self.ReadAndPersistVersion("new_")
+ self["version"] = "%s.%s.%s.%s" % (self["new_major"],
+ self["new_minor"],
+ self["new_build"],
+ self["new_patch"])
+
+
+class CommitLocal(Step):
+ MESSAGE = "Commit to local branch."
+
+ def RunStep(self):
+ # Add a commit message title.
+ self["commit_title"] = "Version %s (cherry-pick)" % self["version"]
+ self["new_commit_msg"] = "%s\n\n%s" % (self["commit_title"],
+ self["new_commit_msg"])
+ TextToFile(self["new_commit_msg"], self.Config("COMMITMSG_FILE"))
+ self.GitCommit(file_name=self.Config("COMMITMSG_FILE"))
+
+
+class CommitRepository(Step):
+ MESSAGE = "Commit to the repository."
+
+ def RunStep(self):
+ self.GitCheckout(self.Config("BRANCHNAME"))
+ self.WaitForLGTM()
+ self.GitPresubmit()
+ self.vc.CLLand()
+
+
+class TagRevision(Step):
+ MESSAGE = "Create the tag."
+
+ def RunStep(self):
+ print "Creating tag %s" % self["version"]
+ self.vc.Tag(self["version"],
+ self.vc.RemoteBranch(self["merge_to_branch"]),
+ self["commit_title"])
+
+
+class CleanUp(Step):
+ MESSAGE = "Cleanup."
+
+ def RunStep(self):
+ self.CommonCleanup()
+ print "*** SUMMARY ***"
+ print "version: %s" % self["version"]
+ print "branch: %s" % self["merge_to_branch"]
+ if self["revision_list"]:
+ print "patches: %s" % self["revision_list"]
+
+
+class RollMerge(ScriptsBase):
+ def _Description(self):
+ return ("Performs the necessary steps to merge revisions from "
+ "master to other branches, including candidates and roll branches.")
+
+ def _PrepareOptions(self, parser):
+ group = parser.add_mutually_exclusive_group(required=True)
+ group.add_argument("--branch", help="The branch to merge to.")
+ parser.add_argument("revisions", nargs="*",
+ help="The revisions to merge.")
+ parser.add_argument("-f", "--force",
+ help="Delete sentinel file.",
+ default=False, action="store_true")
+ parser.add_argument("-m", "--message",
+ help="A commit message for the patch.")
+ parser.add_argument("-p", "--patch",
+ help="A patch file to apply as part of the merge.")
+
+ def _ProcessOptions(self, options):
+ if len(options.revisions) < 1:
+ if not options.patch:
+ print "Either a patch file or revision numbers must be specified"
+ return False
+ if not options.message:
+ print "You must specify a merge comment if no patches are specified"
+ return False
+ options.bypass_upload_hooks = True
+ # CC ulan to make sure that fixes are merged to Google3.
+ options.cc = "ulan@chromium.org"
+
+ # Make sure to use git hashes in the new workflows.
+ for revision in options.revisions:
+ if (IsSvnNumber(revision) or
+ (revision[0:1] == "r" and IsSvnNumber(revision[1:]))):
+ print "Please provide full git hashes of the patches to merge."
+ print "Got: %s" % revision
+ return False
+ return True
+
+ def _Config(self):
+ return {
+ "BRANCHNAME": "prepare-merge",
+ "PERSISTFILE_BASENAME": "/tmp/v8-merge-to-branch-tempfile",
+ "ALREADY_MERGING_SENTINEL_FILE":
+ "/tmp/v8-merge-to-branch-tempfile-already-merging",
+ "TEMPORARY_PATCH_FILE": "/tmp/v8-prepare-merge-tempfile-temporary-patch",
+ "COMMITMSG_FILE": "/tmp/v8-prepare-merge-tempfile-commitmsg",
+ }
+
+ def _Steps(self):
+ return [
+ Preparation,
+ CreateBranch,
+ SearchArchitecturePorts,
+ CreateCommitMessage,
+ ApplyPatches,
+ PrepareVersion,
+ IncrementVersion,
+ CommitLocal,
+ UploadStep,
+ CommitRepository,
+ TagRevision,
+ CleanUp,
+ ]
+
+
+if __name__ == "__main__": # pragma: no cover
+ sys.exit(RollMerge().Run())
diff --git a/deps/v8/tools/release/test_scripts.py b/deps/v8/tools/release/test_scripts.py
index 05457c9285..ab92e89f3a 100644
--- a/deps/v8/tools/release/test_scripts.py
+++ b/deps/v8/tools/release/test_scripts.py
@@ -40,13 +40,14 @@ from common_includes import *
import create_release
from create_release import CreateRelease
import merge_to_branch
-from merge_to_branch import *
+from merge_to_branch import MergeToBranch
import push_to_candidates
from push_to_candidates import *
import releases
from releases import Releases
from auto_tag import AutoTag
-
+import roll_merge
+from roll_merge import RollMerge
TEST_CONFIG = {
"DEFAULT_CWD": None,
@@ -528,7 +529,7 @@ class ScriptTest(unittest.TestCase):
self._state["version"] = "tag_name"
self._state["commit_title"] = "Title"
self.assertRaises(Exception,
- lambda: self.RunStep(MergeToBranch, TagRevision, args))
+ lambda: self.RunStep(RollMerge, TagRevision, args))
def testReadAndPersistVersion(self):
self.WriteFakeVersionFile(build=5)
@@ -970,6 +971,8 @@ Performance and stability improvements on all platforms."""
cb=self.WriteFakeWatchlistsFile),
Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], "",
cb=CheckVersionCommit),
+ Cmd("git log -1 --format=%H --grep=\"Version 3.22.5\" origin/3.22.5",
+ ""),
Cmd("git push origin "
"refs/heads/work-branch:refs/pending/heads/3.22.5 "
"push_hash:refs/pending-tags/heads/3.22.5 "
@@ -1041,6 +1044,8 @@ Please close rolling in case of a roll revert:
https://v8-roll.appspot.com/
This only works with a Google account.
+CQ_INCLUDE_TRYBOTS=master.tryserver.blink:linux_precise_blink_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel
+
TBR=reviewer@chromium.org"""
# Snippet from the original DEPS file.
@@ -1171,7 +1176,7 @@ deps = {
self.assertEquals("abc123", state["candidate"])
- def testMergeToBranch(self):
+ def testRollMerge(self):
TEST_CONFIG["ALREADY_MERGING_SENTINEL_FILE"] = self.MakeEmptyTempFile()
TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
self.WriteFakeVersionFile(build=5)
@@ -1302,6 +1307,377 @@ LOG=N
# The first run of the script stops because of git being down.
self.assertRaises(GitFailedException,
+ lambda: RollMerge(TEST_CONFIG, self).Run(args))
+
+ # Test that state recovery after restarting the script works.
+ args += ["-s", "4"]
+ RollMerge(TEST_CONFIG, self).Run(args)
+
+ def testReleases(self):
+ c_hash1_commit_log = """Update V8 to Version 4.2.71.
+
+Cr-Commit-Position: refs/heads/master@{#5678}
+"""
+ c_hash2_commit_log = """Revert something.
+
+BUG=12345
+
+Reason:
+> Some reason.
+> Cr-Commit-Position: refs/heads/master@{#12345}
+> git-svn-id: svn://svn.chromium.org/chrome/trunk/src@12345 003-1c4
+
+Review URL: https://codereview.chromium.org/12345
+
+Cr-Commit-Position: refs/heads/master@{#4567}
+git-svn-id: svn://svn.chromium.org/chrome/trunk/src@4567 0039-1c4b
+
+"""
+ c_hash3_commit_log = """Simple.
+
+git-svn-id: svn://svn.chromium.org/chrome/trunk/src@3456 0039-1c4b
+
+"""
+ c_hash_234_commit_log = """Version 3.3.1.1 (cherry-pick).
+
+Merged abc12.
+
+Review URL: fake.com
+
+Cr-Commit-Position: refs/heads/candidates@{#234}
+"""
+ c_hash_123_commit_log = """Version 3.3.1.0
+
+git-svn-id: googlecode@123 0039-1c4b
+"""
+ c_hash_345_commit_log = """Version 3.4.0.
+
+Cr-Commit-Position: refs/heads/candidates@{#345}
+"""
+ c_hash_456_commit_log = """Version 4.2.71.
+
+Cr-Commit-Position: refs/heads/4.2.71@{#1}
+"""
+ c_deps = "Line\n \"v8_revision\": \"%s\",\n line\n"
+
+ json_output = self.MakeEmptyTempFile()
+ csv_output = self.MakeEmptyTempFile()
+ self.WriteFakeVersionFile()
+
+ TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
+ chrome_dir = TEST_CONFIG["CHROMIUM"]
+ chrome_v8_dir = os.path.join(chrome_dir, "v8")
+ os.makedirs(chrome_v8_dir)
+
+ def ResetVersion(major, minor, build, patch=0):
+ return lambda: self.WriteFakeVersionFile(major=major,
+ minor=minor,
+ build=build,
+ patch=patch)
+
+ self.Expect([
+ Cmd("git status -s -uno", ""),
+ Cmd("git checkout -f origin/master", ""),
+ Cmd("git fetch", ""),
+ Cmd("git branch", " branch1\n* branch2\n"),
+ Cmd("git new-branch %s" % TEST_CONFIG["BRANCHNAME"], ""),
+ Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+ Cmd("git rev-list --max-age=395200 --tags",
+ "bad_tag\nhash_234\nhash_123\nhash_345\nhash_456\n"),
+ Cmd("git describe --tags bad_tag", "3.23.42-1-deadbeef"),
+ Cmd("git describe --tags hash_234", "3.3.1.1"),
+ Cmd("git describe --tags hash_123", "3.21.2"),
+ Cmd("git describe --tags hash_345", "3.22.3"),
+ Cmd("git describe --tags hash_456", "4.2.71"),
+ Cmd("git diff --name-only hash_234 hash_234^", VERSION_FILE),
+ Cmd("git checkout -f hash_234 -- %s" % VERSION_FILE, "",
+ cb=ResetVersion(3, 3, 1, 1)),
+ Cmd("git branch -r --contains hash_234", " branch-heads/3.3\n"),
+ Cmd("git log -1 --format=%B hash_234", c_hash_234_commit_log),
+ Cmd("git log -1 --format=%s hash_234", ""),
+ Cmd("git log -1 --format=%B hash_234", c_hash_234_commit_log),
+ Cmd("git log -1 --format=%ci hash_234", "18:15"),
+ Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
+ cb=ResetVersion(3, 22, 5)),
+ Cmd("git diff --name-only hash_123 hash_123^", VERSION_FILE),
+ Cmd("git checkout -f hash_123 -- %s" % VERSION_FILE, "",
+ cb=ResetVersion(3, 21, 2)),
+ Cmd("git branch -r --contains hash_123", " branch-heads/3.21\n"),
+ Cmd("git log -1 --format=%B hash_123", c_hash_123_commit_log),
+ Cmd("git log -1 --format=%s hash_123", ""),
+ Cmd("git log -1 --format=%B hash_123", c_hash_123_commit_log),
+ Cmd("git log -1 --format=%ci hash_123", "03:15"),
+ Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
+ cb=ResetVersion(3, 22, 5)),
+ Cmd("git diff --name-only hash_345 hash_345^", VERSION_FILE),
+ Cmd("git checkout -f hash_345 -- %s" % VERSION_FILE, "",
+ cb=ResetVersion(3, 22, 3)),
+ Cmd("git branch -r --contains hash_345", " origin/candidates\n"),
+ Cmd("git log -1 --format=%B hash_345", c_hash_345_commit_log),
+ Cmd("git log -1 --format=%s hash_345", ""),
+ Cmd("git log -1 --format=%B hash_345", c_hash_345_commit_log),
+ Cmd("git log -1 --format=%ci hash_345", ""),
+ Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
+ cb=ResetVersion(3, 22, 5)),
+ Cmd("git diff --name-only hash_456 hash_456^", VERSION_FILE),
+ Cmd("git checkout -f hash_456 -- %s" % VERSION_FILE, "",
+ cb=ResetVersion(4, 2, 71)),
+ Cmd("git branch -r --contains hash_456", " origin/4.2.71\n"),
+ Cmd("git log -1 --format=%B hash_456", c_hash_456_commit_log),
+ Cmd("git log -1 --format=%H 4.2.71", "hash_456"),
+ Cmd("git log -1 --format=%s hash_456", "Version 4.2.71"),
+ Cmd("git log -1 --format=%H hash_456^", "master_456"),
+ Cmd("git log -1 --format=%B master_456",
+ "Cr-Commit-Position: refs/heads/master@{#456}"),
+ Cmd("git log -1 --format=%B hash_456", c_hash_456_commit_log),
+ Cmd("git log -1 --format=%ci hash_456", "02:15"),
+ Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
+ cb=ResetVersion(3, 22, 5)),
+ Cmd("git fetch origin +refs/heads/*:refs/remotes/origin/* "
+ "+refs/branch-heads/*:refs/remotes/branch-heads/*", "",
+ cwd=chrome_dir),
+ Cmd("git fetch origin", "", cwd=chrome_v8_dir),
+ Cmd("git log --format=%H --grep=\"V8\" origin/master -- DEPS",
+ "c_hash1\nc_hash2\nc_hash3\n",
+ cwd=chrome_dir),
+ Cmd("git show c_hash1:DEPS", c_deps % "hash_456", cwd=chrome_dir),
+ Cmd("git log -1 --format=%B c_hash1", c_hash1_commit_log,
+ cwd=chrome_dir),
+ Cmd("git show c_hash2:DEPS", c_deps % "hash_345", cwd=chrome_dir),
+ Cmd("git log -1 --format=%B c_hash2", c_hash2_commit_log,
+ cwd=chrome_dir),
+ Cmd("git show c_hash3:DEPS", c_deps % "deadbeef", cwd=chrome_dir),
+ Cmd("git log -1 --format=%B c_hash3", c_hash3_commit_log,
+ cwd=chrome_dir),
+ Cmd("git branch -r", " weird/123\n branch-heads/7\n", cwd=chrome_dir),
+ Cmd("git show refs/branch-heads/7:DEPS", c_deps % "hash_345",
+ cwd=chrome_dir),
+ URL("http://omahaproxy.appspot.com/all.json", """[{
+ "os": "win",
+ "versions": [{
+ "version": "2.2.2.2",
+ "v8_version": "22.2.2.2",
+ "current_reldate": "04/09/15",
+ "os": "win",
+ "channel": "canary",
+ "previous_version": "1.1.1.0"
+ }]
+ }]"""),
+ URL("http://omahaproxy.appspot.com/v8.json?version=1.1.1.0", """{
+ "chromium_version": "1.1.1.0",
+ "v8_version": "11.1.1.0"
+ }"""),
+ Cmd("git rev-list -1 11.1.1", "v8_previous_version_hash"),
+ Cmd("git rev-list -1 22.2.2.2", "v8_version_hash"),
+ Cmd("git checkout -f origin/master", ""),
+ Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], "")
+ ])
+
+ args = ["-c", TEST_CONFIG["CHROMIUM"],
+ "--json", json_output,
+ "--csv", csv_output,
+ "--max-releases", "1"]
+ Releases(TEST_CONFIG, self).Run(args)
+
+ # Check expected output.
+ csv = ("4.2.71,4.2.71,1,5678,\r\n"
+ "3.22.3,candidates,345,4567:5677,\r\n"
+ "3.21.2,3.21,123,,\r\n"
+ "3.3.1.1,3.3,234,,abc12\r\n")
+ self.assertEquals(csv, FileToText(csv_output))
+
+ expected_json = {"chrome_releases":{
+ "canaries": [
+ {
+ "chrome_version": "2.2.2.2",
+ "os": "win",
+ "release_date": "04/09/15",
+ "v8_version": "22.2.2.2",
+ "v8_version_hash": "v8_version_hash",
+ "v8_previous_version": "11.1.1.0",
+ "v8_previous_version_hash": "v8_previous_version_hash"
+ }]},
+ "releases":[
+ {
+ "revision": "1",
+ "revision_git": "hash_456",
+ "master_position": "456",
+ "master_hash": "master_456",
+ "patches_merged": "",
+ "version": "4.2.71",
+ "chromium_revision": "5678",
+ "branch": "4.2.71",
+ "review_link": "",
+ "date": "02:15",
+ "chromium_branch": "",
+ # FIXME(machenbach): Fix revisions link for git.
+ "revision_link": "https://code.google.com/p/v8/source/detail?r=1",
+ },
+ {
+ "revision": "345",
+ "revision_git": "hash_345",
+ "master_position": "",
+ "master_hash": "",
+ "patches_merged": "",
+ "version": "3.22.3",
+ "chromium_revision": "4567:5677",
+ "branch": "candidates",
+ "review_link": "",
+ "date": "",
+ "chromium_branch": "7",
+ "revision_link": "https://code.google.com/p/v8/source/detail?r=345",
+ },
+ {
+ "revision": "123",
+ "revision_git": "hash_123",
+ "patches_merged": "",
+ "master_position": "",
+ "master_hash": "",
+ "version": "3.21.2",
+ "chromium_revision": "",
+ "branch": "3.21",
+ "review_link": "",
+ "date": "03:15",
+ "chromium_branch": "",
+ "revision_link": "https://code.google.com/p/v8/source/detail?r=123",
+ },
+ {
+ "revision": "234",
+ "revision_git": "hash_234",
+ "patches_merged": "abc12",
+ "master_position": "",
+ "master_hash": "",
+ "version": "3.3.1.1",
+ "chromium_revision": "",
+ "branch": "3.3",
+ "review_link": "fake.com",
+ "date": "18:15",
+ "chromium_branch": "",
+ "revision_link": "https://code.google.com/p/v8/source/detail?r=234",
+ },],
+ }
+ self.assertEquals(expected_json, json.loads(FileToText(json_output)))
+
+ def testMergeToBranch(self):
+ TEST_CONFIG["ALREADY_MERGING_SENTINEL_FILE"] = self.MakeEmptyTempFile()
+ TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
+ self.WriteFakeVersionFile(build=5)
+ os.environ["EDITOR"] = "vi"
+ extra_patch = self.MakeEmptyTempFile()
+
+
+ def VerifyPatch(patch):
+ return lambda: self.assertEquals(patch,
+ FileToText(TEST_CONFIG["TEMPORARY_PATCH_FILE"]))
+
+ info_msg = ("NOTE: This script will no longer automatically "
+ "update include/v8-version.h "
+ "and create a tag. This is done automatically by the autotag bot. "
+ "Please call the merge_to_branch.py with --help for more information.")
+
+ msg = """Merged: Squashed multiple commits.
+
+Merged: Title4
+Revision: ab12345
+
+Merged: Title2
+Revision: ab23456
+
+Merged: Title3
+Revision: ab34567
+
+Merged: Title1
+Revision: ab45678
+
+Merged: Revert \"Something\"
+Revision: ab56789
+
+BUG=123,234,345,456,567,v8:123
+LOG=N
+NOTRY=true
+NOPRESUBMIT=true
+NOTREECHECKS=true
+"""
+
+ def VerifyLand():
+ commit = FileToText(TEST_CONFIG["COMMITMSG_FILE"])
+ self.assertEquals(msg, commit)
+
+ self.Expect([
+ Cmd("git status -s -uno", ""),
+ Cmd("git checkout -f origin/master", ""),
+ Cmd("git fetch", ""),
+ Cmd("git branch", " branch1\n* branch2\n"),
+ Cmd("git new-branch %s --upstream refs/remotes/origin/candidates" %
+ TEST_CONFIG["BRANCHNAME"], ""),
+ Cmd(("git log --format=%H --grep=\"^[Pp]ort ab12345\" "
+ "--reverse origin/master"),
+ "ab45678\nab23456"),
+ Cmd("git log -1 --format=%s ab45678", "Title1"),
+ Cmd("git log -1 --format=%s ab23456", "Title2"),
+ Cmd(("git log --format=%H --grep=\"^[Pp]ort ab23456\" "
+ "--reverse origin/master"),
+ ""),
+ Cmd(("git log --format=%H --grep=\"^[Pp]ort ab34567\" "
+ "--reverse origin/master"),
+ "ab56789"),
+ Cmd("git log -1 --format=%s ab56789", "Title3"),
+ RL("Y"), # Automatically add corresponding ports (ab34567, ab56789)?
+ # Simulate git being down which stops the script.
+ Cmd("git log -1 --format=%s ab12345", None),
+ # Restart script in the failing step.
+ Cmd("git log -1 --format=%s ab12345", "Title4"),
+ Cmd("git log -1 --format=%s ab23456", "Title2"),
+ Cmd("git log -1 --format=%s ab34567", "Title3"),
+ Cmd("git log -1 --format=%s ab45678", "Title1"),
+ Cmd("git log -1 --format=%s ab56789", "Revert \"Something\""),
+ Cmd("git log -1 ab12345", "Title4\nBUG=123\nBUG=234"),
+ Cmd("git log -1 ab23456", "Title2\n BUG = v8:123,345"),
+ Cmd("git log -1 ab34567", "Title3\nLOG=n\nBUG=567, 456"),
+ Cmd("git log -1 ab45678", "Title1\nBUG="),
+ Cmd("git log -1 ab56789", "Revert \"Something\"\nBUG=none"),
+ Cmd("git log -1 -p ab12345", "patch4"),
+ Cmd(("git apply --index --reject \"%s\"" %
+ TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
+ "", cb=VerifyPatch("patch4")),
+ Cmd("git log -1 -p ab23456", "patch2"),
+ Cmd(("git apply --index --reject \"%s\"" %
+ TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
+ "", cb=VerifyPatch("patch2")),
+ Cmd("git log -1 -p ab34567", "patch3"),
+ Cmd(("git apply --index --reject \"%s\"" %
+ TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
+ "", cb=VerifyPatch("patch3")),
+ Cmd("git log -1 -p ab45678", "patch1"),
+ Cmd(("git apply --index --reject \"%s\"" %
+ TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
+ "", cb=VerifyPatch("patch1")),
+ Cmd("git log -1 -p ab56789", "patch5\n"),
+ Cmd(("git apply --index --reject \"%s\"" %
+ TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
+ "", cb=VerifyPatch("patch5\n")),
+ Cmd("git apply --index --reject \"%s\"" % extra_patch, ""),
+ Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], ""),
+ RL("reviewer@chromium.org"), # V8 reviewer.
+ Cmd("git cl upload --send-mail -r \"reviewer@chromium.org\" "
+ "--bypass-hooks --cc \"ulan@chromium.org\"", ""),
+ Cmd("git cl comments -a \"%s\"" % info_msg, ""),
+ Cmd("git checkout -f %s" % TEST_CONFIG["BRANCHNAME"], ""),
+ RL("LGTM"), # Enter LGTM for V8 CL.
+ Cmd("git cl presubmit", "Presubmit successfull\n"),
+ Cmd("git cl land -f --bypass-hooks", "Closing issue\n",
+ cb=VerifyLand),
+ Cmd("git checkout -f origin/master", ""),
+ Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
+ ])
+
+ # ab12345 and ab34567 are patches. ab23456 (included) and ab45678 are the
+ # MIPS ports of ab12345. ab56789 is the MIPS port of ab34567.
+ args = ["-f", "-p", extra_patch, "--branch", "candidates",
+ "ab12345", "ab23456", "ab34567"]
+
+ # The first run of the script stops because of git being down.
+ self.assertRaises(GitFailedException,
lambda: MergeToBranch(TEST_CONFIG, self).Run(args))
# Test that state recovery after restarting the script works.
@@ -1554,6 +1930,8 @@ Cr-Commit-Position: refs/heads/4.2.71@{#1}
self.assertEquals(expected_json, json.loads(FileToText(json_output)))
+
+
class SystemTest(unittest.TestCase):
def testReload(self):
options = ScriptsBase(
diff --git a/deps/v8/tools/run-deopt-fuzzer.gyp b/deps/v8/tools/run-deopt-fuzzer.gyp
index 73f0aaf7a5..9eb6b538bc 100644
--- a/deps/v8/tools/run-deopt-fuzzer.gyp
+++ b/deps/v8/tools/run-deopt-fuzzer.gyp
@@ -13,8 +13,8 @@
'../src/d8.gyp:d8_run',
],
'includes': [
- '../build/features.gypi',
- '../build/isolate.gypi',
+ '../gypfiles/features.gypi',
+ '../gypfiles/isolate.gypi',
],
'sources': [
'run-deopt-fuzzer.isolate',
diff --git a/deps/v8/tools/run-deopt-fuzzer.py b/deps/v8/tools/run-deopt-fuzzer.py
index 970aa8e616..b143430d27 100755
--- a/deps/v8/tools/run-deopt-fuzzer.py
+++ b/deps/v8/tools/run-deopt-fuzzer.py
@@ -74,16 +74,12 @@ SUPPORTED_ARCHS = ["android_arm",
"s390",
"s390x",
"mipsel",
- "nacl_ia32",
- "nacl_x64",
"x64"]
# Double the timeout for these:
SLOW_ARCHS = ["android_arm",
"android_ia32",
"arm",
- "mipsel",
- "nacl_ia32",
- "nacl_x64"]
+ "mipsel"]
MAX_DEOPT = 1000000000
DISTRIBUTION_MODES = ["smooth", "random"]
@@ -398,7 +394,6 @@ def Execute(arch, mode, args, options, suites, workspace):
"deopt_fuzzer": True,
"gc_stress": False,
"gcov_coverage": False,
- "ignition": False,
"isolates": options.isolates,
"mode": mode,
"no_i18n": False,
diff --git a/deps/v8/tools/run-perf.sh b/deps/v8/tools/run-perf.sh
index 24053b40fb..03123fdbb8 100755
--- a/deps/v8/tools/run-perf.sh
+++ b/deps/v8/tools/run-perf.sh
@@ -13,6 +13,7 @@ SAMPLE_EVERY_N_CYCLES=10000
SAMPLE_RATE_CONFIG_FILE="/proc/sys/kernel/perf_event_max_sample_rate"
KERNEL_MAP_CONFIG_FILE="/proc/sys/kernel/kptr_restrict"
CALL_GRAPH_METHOD="fp" # dwarf does not play nice with JITted objects.
+EVENT_TYPE=${EVENT_TYPE:=cycles:u}
########## Usage
@@ -46,7 +47,7 @@ fi
echo "Running..."
perf record -R \
- -e cycles:u \
+ -e $EVENT_TYPE \
-c $SAMPLE_EVERY_N_CYCLES \
--call-graph $CALL_GRAPH_METHOD \
-i $@ --perf_basic_prof
diff --git a/deps/v8/tools/run-tests.py b/deps/v8/tools/run-tests.py
index a380c97ad3..de16463369 100755
--- a/deps/v8/tools/run-tests.py
+++ b/deps/v8/tools/run-tests.py
@@ -30,6 +30,7 @@
from collections import OrderedDict
import itertools
+import json
import multiprocessing
import optparse
import os
@@ -44,7 +45,7 @@ import time
from testrunner.local import execution
from testrunner.local import progress
from testrunner.local import testsuite
-from testrunner.local.testsuite import ALL_VARIANTS
+from testrunner.local.variants import ALL_VARIANTS
from testrunner.local import utils
from testrunner.local import verbose
from testrunner.network import network_execution
@@ -81,13 +82,6 @@ TEST_MAP = {
"intl",
"unittests",
],
- # This needs to stay in sync with test/ignition.isolate.
- "ignition": [
- "mjsunit",
- "cctest",
- "webkit",
- "message",
- ],
# This needs to stay in sync with test/optimize_for_size.isolate.
"optimize_for_size": [
"mjsunit",
@@ -102,13 +96,25 @@ TEST_MAP = {
TIMEOUT_DEFAULT = 60
-VARIANTS = ["default", "stress", "turbofan"]
+VARIANTS = ["default", "turbofan", "ignition_staging"]
-EXHAUSTIVE_VARIANTS = VARIANTS + [
- "nocrankshaft",
+MORE_VARIANTS = [
+ "ignition",
+ "stress",
"turbofan_opt",
]
+EXHAUSTIVE_VARIANTS = VARIANTS + MORE_VARIANTS
+
+VARIANT_ALIASES = {
+ # The default for developer workstations.
+ "dev": VARIANTS,
+ # Additional variants, run on all bots.
+ "more": MORE_VARIANTS,
+ # Additional variants, run on a subset of bots.
+ "extra": ["nocrankshaft"],
+}
+
DEBUG_FLAGS = ["--nohard-abort", "--nodead-code-elimination",
"--nofold-constants", "--enable-slow-asserts",
"--debug-code", "--verify-heap"]
@@ -173,8 +179,6 @@ SUPPORTED_ARCHS = ["android_arm",
"mipsel",
"mips64",
"mips64el",
- "nacl_ia32",
- "nacl_x64",
"s390",
"s390x",
"ppc",
@@ -192,8 +196,8 @@ SLOW_ARCHS = ["android_arm",
"mipsel",
"mips64",
"mips64el",
- "nacl_ia32",
- "nacl_x64",
+ "s390",
+ "s390x",
"x87",
"arm64"]
@@ -246,13 +250,11 @@ def BuildOptions():
result.add_option("--download-data", help="Download missing test suite data",
default=False, action="store_true")
result.add_option("--download-data-only",
- help="Download missing test suite data and exit",
+ help="Deprecated",
default=False, action="store_true")
result.add_option("--extra-flags",
help="Additional flags to pass to each test command",
default="")
- result.add_option("--ignition", help="Skip tests which don't run in ignition",
- default=False, action="store_true")
result.add_option("--isolates", help="Whether to test isolates",
default=False, action="store_true")
result.add_option("-j", help="The number of parallel tasks to run",
@@ -272,7 +274,7 @@ def BuildOptions():
default=(utils.GuessOS() != "linux"),
dest="no_network", action="store_true")
result.add_option("--no-presubmit", "--nopresubmit",
- help='Skip presubmit checks',
+ help='Skip presubmit checks (deprecated)',
default=False, dest="no_presubmit", action="store_true")
result.add_option("--no-snap", "--nosnap",
help='Test a build compiled without snapshot.',
@@ -280,17 +282,16 @@ def BuildOptions():
result.add_option("--no-sorting", "--nosorting",
help="Don't sort tests according to duration of last run.",
default=False, dest="no_sorting", action="store_true")
- result.add_option("--no-stress", "--nostress",
- help="Don't run crankshaft --always-opt --stress-op test",
- default=False, dest="no_stress", action="store_true")
result.add_option("--no-variants", "--novariants",
help="Don't run any testing variants",
default=False, dest="no_variants", action="store_true")
result.add_option("--variants",
- help="Comma-separated list of testing variants: %s" % VARIANTS)
+ help="Comma-separated list of testing variants;"
+ " default: \"%s\"" % ",".join(VARIANTS))
result.add_option("--exhaustive-variants",
default=False, action="store_true",
- help="Use exhaustive set of default variants.")
+ help="Use exhaustive set of default variants:"
+ " \"%s\"" % ",".join(EXHAUSTIVE_VARIANTS))
result.add_option("--outdir", help="Base directory with compile output",
default="out")
result.add_option("--predictable",
@@ -326,16 +327,13 @@ def BuildOptions():
help="Don't skip more slow tests when using a simulator.",
default=False, action="store_true",
dest="dont_skip_simulator_slow_tests")
- result.add_option("--stress-only",
- help="Only run tests with --always-opt --stress-opt",
- default=False, action="store_true")
result.add_option("--swarming",
help="Indicates running test driver on swarming.",
default=False, action="store_true")
result.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
result.add_option("-t", "--timeout", help="Timeout in seconds",
- default= -1, type="int")
+ default=TIMEOUT_DEFAULT, type="int")
result.add_option("--tsan",
help="Regard test expectations for TSAN",
default=False, action="store_true")
@@ -378,6 +376,10 @@ def BuildbotToV8Mode(config):
def SetupEnvironment(options):
"""Setup additional environment variables."""
+
+ # Many tests assume an English interface.
+ os.environ['LANG'] = 'en_US.UTF-8'
+
symbolizer = 'external_symbolizer_path=%s' % (
os.path.join(
BASE_DIR, 'third_party', 'llvm-build', 'Release+Asserts', 'bin',
@@ -420,10 +422,36 @@ def SetupEnvironment(options):
])
def ProcessOptions(options):
- global ALL_VARIANTS
- global EXHAUSTIVE_VARIANTS
global VARIANTS
+ # First try to auto-detect configurations based on the build if GN was
+ # used. This can't be overridden by cmd-line arguments.
+ options.auto_detect = False
+ build_config_path = os.path.join(
+ BASE_DIR, options.outdir, "v8_build_config.json")
+ if os.path.exists(build_config_path):
+ try:
+ with open(build_config_path) as f:
+ build_config = json.load(f)
+ except Exception:
+ print ("%s exists but contains invalid json. Is your build up-to-date?" %
+ build_config_path)
+ return False
+ options.auto_detect = True
+
+ options.arch_and_mode = None
+ options.arch = build_config["v8_target_cpu"]
+ if options.arch == 'x86':
+ # TODO(machenbach): Transform all to x86 eventually.
+ options.arch = 'ia32'
+ options.asan = build_config["is_asan"]
+ options.dcheck_always_on = build_config["dcheck_always_on"]
+ options.mode = 'debug' if build_config["is_debug"] else 'release'
+ options.msan = build_config["is_msan"]
+ options.no_i18n = not build_config["v8_enable_i18n_support"]
+ options.no_snap = not build_config["v8_use_snapshot"]
+ options.tsan = build_config["is_tsan"]
+
# Architecture and mode related stuff.
if options.arch_and_mode:
options.arch_and_mode = [arch_and_mode.split(".")
@@ -451,11 +479,7 @@ def ProcessOptions(options):
# Special processing of other options, sorted alphabetically.
if options.buildbot:
- # Buildbots run presubmit tests as a separate step.
- options.no_presubmit = True
options.no_network = True
- if options.download_data_only:
- options.no_presubmit = True
if options.command_prefix:
print("Specifying --command-prefix disables network distribution, "
"running tests locally.")
@@ -478,6 +502,8 @@ def ProcessOptions(options):
# Other options for manipulating variants still apply afterwards.
VARIANTS = EXHAUSTIVE_VARIANTS
+ # TODO(machenbach): Figure out how to test a bigger subset of variants on
+ # msan and tsan.
if options.msan:
VARIANTS = ["default"]
@@ -494,23 +520,25 @@ def ProcessOptions(options):
"""Returns true if zero or one of multiple arguments are true."""
return reduce(lambda x, y: x + y, args) <= 1
- if not excl(options.no_stress, options.stress_only, options.no_variants,
- bool(options.variants)):
- print("Use only one of --no-stress, --stress-only, --no-variants, "
- "or --variants.")
+ if not excl(options.no_variants, bool(options.variants)):
+ print("Use only one of --no-variants or --variants.")
return False
if options.quickcheck:
VARIANTS = ["default", "stress"]
options.slow_tests = "skip"
options.pass_fail_tests = "skip"
- if options.no_stress:
- VARIANTS = ["default", "nocrankshaft"]
if options.no_variants:
VARIANTS = ["default"]
- if options.stress_only:
- VARIANTS = ["stress"]
if options.variants:
VARIANTS = options.variants.split(",")
+
+ # Resolve variant aliases.
+ VARIANTS = reduce(
+ list.__add__,
+ (VARIANT_ALIASES.get(v, [v]) for v in VARIANTS),
+ [],
+ )
+
if not set(VARIANTS).issubset(ALL_VARIANTS):
print "All variants must be in %s" % str(ALL_VARIANTS)
return False
@@ -520,6 +548,9 @@ def ProcessOptions(options):
options.extra_flags.append("--verify_predictable")
options.extra_flags.append("--no-inline-new")
+ # Dedupe.
+ VARIANTS = list(set(VARIANTS))
+
if not options.shell_dir:
if options.shell:
print "Warning: --shell is deprecated, use --shell-dir instead."
@@ -591,11 +622,12 @@ def Main():
return 1
SetupEnvironment(options)
+ if options.swarming:
+ # Swarming doesn't print how isolated commands are called. Lets make this
+ # less cryptic by printing it ourselves.
+ print ' '.join(sys.argv)
+
exit_code = 0
- if not options.no_presubmit:
- print ">>> running presubmit tests"
- exit_code = subprocess.call(
- [sys.executable, join(BASE_DIR, "tools", "presubmit.py")])
suite_paths = utils.GetSuitePaths(join(BASE_DIR, "test"))
@@ -633,6 +665,9 @@ def Main():
if options.download_data_only:
return exit_code
+ for s in suites:
+ s.PrepareSources()
+
for (arch, mode) in options.arch_and_mode:
try:
code = Execute(arch, mode, args, options, suites)
@@ -652,6 +687,10 @@ def Execute(arch, mode, args, options, suites):
# buildbot. Currently this is capitalized Release and Debug.
shell_dir = os.path.join(BASE_DIR, options.outdir, mode)
mode = BuildbotToV8Mode(mode)
+ elif options.auto_detect:
+ # If an output dir with a build was passed, test directly in that
+ # directory.
+ shell_dir = os.path.join(BASE_DIR, options.outdir)
else:
shell_dir = os.path.join(
BASE_DIR,
@@ -663,19 +702,16 @@ def Execute(arch, mode, args, options, suites):
# Populate context object.
mode_flags = MODES[mode]["flags"]
- timeout = options.timeout
- if timeout == -1:
- # Simulators are slow, therefore allow a longer default timeout.
- if arch in SLOW_ARCHS:
- timeout = 2 * TIMEOUT_DEFAULT;
- else:
- timeout = TIMEOUT_DEFAULT;
- timeout *= MODES[mode]["timeout_scalefactor"]
+ # Simulators are slow, therefore allow a longer timeout.
+ if arch in SLOW_ARCHS:
+ options.timeout *= 2
+
+ options.timeout *= MODES[mode]["timeout_scalefactor"]
if options.predictable:
# Predictable mode is slower.
- timeout *= 2
+ options.timeout *= 2
# TODO(machenbach): Remove temporary verbose output on windows after
# debugging driver-hung-up on XP.
@@ -685,7 +721,8 @@ def Execute(arch, mode, args, options, suites):
)
ctx = context.Context(arch, MODES[mode]["execution_mode"], shell_dir,
mode_flags, verbose_output,
- timeout, options.isolates,
+ options.timeout,
+ options.isolates,
options.command_prefix,
options.extra_flags,
options.no_i18n,
@@ -699,6 +736,8 @@ def Execute(arch, mode, args, options, suites):
sancov_dir=options.sancov_dir)
# TODO(all): Combine "simulator" and "simulator_run".
+ # TODO(machenbach): In GN we can derive simulator run from
+ # target_arch != v8_target_arch in the dumped build config.
simulator_run = not options.dont_skip_simulator_slow_tests and \
arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', \
'ppc', 'ppc64'] and \
@@ -710,7 +749,6 @@ def Execute(arch, mode, args, options, suites):
"deopt_fuzzer": False,
"gc_stress": options.gc_stress,
"gcov_coverage": options.gcov_coverage,
- "ignition": options.ignition,
"isolates": options.isolates,
"mode": MODES[mode]["status_mode"],
"no_i18n": options.no_i18n,
@@ -733,8 +771,12 @@ def Execute(arch, mode, args, options, suites):
if len(args) > 0:
s.FilterTestCasesByArgs(args)
all_tests += s.tests
+
+ # First filtering by status applying the generic rules (independent of
+ # variants).
s.FilterTestCasesByStatus(options.warn_unused, options.slow_tests,
options.pass_fail_tests)
+
if options.cat:
verbose.PrintTestSource(s.tests)
continue
@@ -762,6 +804,10 @@ def Execute(arch, mode, args, options, suites):
else:
s.tests = variant_tests
+ # Second filtering by status applying the variant-dependent rules.
+ s.FilterTestCasesByStatus(options.warn_unused, options.slow_tests,
+ options.pass_fail_tests, variants=True)
+
s.tests = ShardTests(s.tests, options)
num_tests += len(s.tests)
diff --git a/deps/v8/tools/run-valgrind.gyp b/deps/v8/tools/run-valgrind.gyp
index d06be933a9..02dd26d22c 100644
--- a/deps/v8/tools/run-valgrind.gyp
+++ b/deps/v8/tools/run-valgrind.gyp
@@ -13,8 +13,8 @@
'../src/d8.gyp:d8_run',
],
'includes': [
- '../build/features.gypi',
- '../build/isolate.gypi',
+ '../gypfiles/features.gypi',
+ '../gypfiles/isolate.gypi',
],
'sources': [
'run-valgrind.isolate',
diff --git a/deps/v8/tools/run_perf.py b/deps/v8/tools/run_perf.py
index db4245f499..2b406bd782 100755
--- a/deps/v8/tools/run_perf.py
+++ b/deps/v8/tools/run_perf.py
@@ -113,8 +113,6 @@ SUPPORTED_ARCHS = ["arm",
"ia32",
"mips",
"mipsel",
- "nacl_ia32",
- "nacl_x64",
"x64",
"arm64"]
@@ -128,17 +126,20 @@ def LoadAndroidBuildTools(path): # pragma: no cover
assert os.path.exists(path)
sys.path.insert(0, path)
- from pylib.device import adb_wrapper # pylint: disable=F0401
- from pylib.device import device_errors # pylint: disable=F0401
- from pylib.device import device_utils # pylint: disable=F0401
- from pylib.perf import cache_control # pylint: disable=F0401
- from pylib.perf import perf_control # pylint: disable=F0401
+ import devil_chromium
+ from devil.android import device_errors # pylint: disable=import-error
+ from devil.android import device_utils # pylint: disable=import-error
+ from devil.android.sdk import adb_wrapper # pylint: disable=import-error
+ from devil.android.perf import cache_control # pylint: disable=import-error
+ from devil.android.perf import perf_control # pylint: disable=import-error
global adb_wrapper
global cache_control
global device_errors
global device_utils
global perf_control
+ devil_chromium.Initialize()
+
def GeometricMean(values):
"""Returns the geometric mean of a list of values.
@@ -612,6 +613,21 @@ class Platform(object):
class DesktopPlatform(Platform):
def __init__(self, options):
super(DesktopPlatform, self).__init__(options)
+ self.command_prefix = []
+
+ if options.prioritize or options.affinitize != None:
+ self.command_prefix = ["schedtool"]
+ if options.prioritize:
+ self.command_prefix += ["-n", "-20"]
+ if options.affinitize != None:
+ # schedtool expects a bit pattern when setting affinity, where each
+ # bit set to '1' corresponds to a core where the process may run on.
+ # First bit corresponds to CPU 0. Since the 'affinitize' parameter is
+ # a core number, we need to map to said bit pattern.
+ cpu = int(options.affinitize)
+ core = 1 << cpu
+ self.command_prefix += ["-a", ("0x%x" % core)]
+ self.command_prefix += ["-e"]
def PreExecution(self):
pass
@@ -627,15 +643,18 @@ class DesktopPlatform(Platform):
suffix = ' - without patch' if no_patch else ''
shell_dir = self.shell_dir_no_patch if no_patch else self.shell_dir
title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
+ command = self.command_prefix + runnable.GetCommand(shell_dir,
+ self.extra_flags)
try:
output = commands.Execute(
- runnable.GetCommand(shell_dir, self.extra_flags),
- timeout=runnable.timeout,
+ command,
+ timeout=runnable.timeout,
)
except OSError as e: # pragma: no cover
print title % "OSError"
print e
return ""
+
print title % "Stdout"
print output.stdout
if output.stderr: # pragma: no cover
@@ -788,6 +807,109 @@ class AndroidPlatform(Platform): # pragma: no cover
stdout = ""
return stdout
+class CustomMachineConfiguration:
+ def __init__(self, disable_aslr = False, governor = None):
+ self.aslr_backup = None
+ self.governor_backup = None
+ self.disable_aslr = disable_aslr
+ self.governor = governor
+
+ def __enter__(self):
+ if self.disable_aslr:
+ self.aslr_backup = CustomMachineConfiguration.GetASLR()
+ CustomMachineConfiguration.SetASLR(0)
+ if self.governor != None:
+ self.governor_backup = CustomMachineConfiguration.GetCPUGovernor()
+ CustomMachineConfiguration.SetCPUGovernor(self.governor)
+ return self
+
+ def __exit__(self, type, value, traceback):
+ if self.aslr_backup != None:
+ CustomMachineConfiguration.SetASLR(self.aslr_backup)
+ if self.governor_backup != None:
+ CustomMachineConfiguration.SetCPUGovernor(self.governor_backup)
+
+ @staticmethod
+ def GetASLR():
+ try:
+ with open("/proc/sys/kernel/randomize_va_space", "r") as f:
+ return int(f.readline().strip())
+ except Exception as e:
+ print "Failed to get current ASLR settings."
+ raise e
+
+ @staticmethod
+ def SetASLR(value):
+ try:
+ with open("/proc/sys/kernel/randomize_va_space", "w") as f:
+ f.write(str(value))
+ except Exception as e:
+ print "Failed to update ASLR to %s." % value
+ print "Are we running under sudo?"
+ raise e
+
+ new_value = CustomMachineConfiguration.GetASLR()
+ if value != new_value:
+ raise Exception("Present value is %s" % new_value)
+
+ @staticmethod
+ def GetCPUCoresRange():
+ try:
+ with open("/sys/devices/system/cpu/present", "r") as f:
+ indexes = f.readline()
+ r = map(int, indexes.split("-"))
+ if len(r) == 1:
+ return range(r[0], r[0] + 1)
+ return range(r[0], r[1] + 1)
+ except Exception as e:
+ print "Failed to retrieve number of CPUs."
+ raise e
+
+ @staticmethod
+ def GetCPUPathForId(cpu_index):
+ ret = "/sys/devices/system/cpu/cpu"
+ ret += str(cpu_index)
+ ret += "/cpufreq/scaling_governor"
+ return ret
+
+ @staticmethod
+ def GetCPUGovernor():
+ try:
+ cpu_indices = CustomMachineConfiguration.GetCPUCoresRange()
+ ret = None
+ for cpu_index in cpu_indices:
+ cpu_device = CustomMachineConfiguration.GetCPUPathForId(cpu_index)
+ with open(cpu_device, "r") as f:
+ # We assume the governors of all CPUs are set to the same value
+ val = f.readline().strip()
+ if ret == None:
+ ret = val
+ elif ret != val:
+ raise Exception("CPU cores have differing governor settings")
+ return ret
+ except Exception as e:
+ print "Failed to get the current CPU governor."
+ print "Is the CPU governor disabled? Check BIOS."
+ raise e
+
+ @staticmethod
+ def SetCPUGovernor(value):
+ try:
+ cpu_indices = CustomMachineConfiguration.GetCPUCoresRange()
+ for cpu_index in cpu_indices:
+ cpu_device = CustomMachineConfiguration.GetCPUPathForId(cpu_index)
+ with open(cpu_device, "w") as f:
+ f.write(value)
+
+ except Exception as e:
+ print "Failed to change CPU governor to %s." % value
+ print "Are we running under sudo?"
+ raise e
+
+ cur_value = CustomMachineConfiguration.GetCPUGovernor()
+ if cur_value != value:
+ raise Exception("Could not set CPU governor. Present value is %s"
+ % cur_value )
# TODO: Implement results_processor.
def Main(args):
@@ -822,6 +944,27 @@ def Main(args):
help="JavaScript engine binary. By default, d8 under "
"architecture-specific build dir. "
"Not supported in conjunction with outdir-no-patch.")
+ parser.add_option("--prioritize",
+ help="Raise the priority to nice -20 for the benchmarking "
+ "process.Requires Linux, schedtool, and sudo privileges.",
+ default=False, action="store_true")
+ parser.add_option("--affinitize",
+ help="Run benchmarking process on the specified core. "
+ "For example: "
+ "--affinitize=0 will run the benchmark process on core 0. "
+ "--affinitize=3 will run the benchmark process on core 3. "
+ "Requires Linux, schedtool, and sudo privileges.",
+ default=None)
+ parser.add_option("--noaslr",
+ help="Disable ASLR for the duration of the benchmarked "
+ "process. Requires Linux and sudo privileges.",
+ default=False, action="store_true")
+ parser.add_option("--cpu-governor",
+ help="Set cpu governor to specified policy for the "
+ "duration of the benchmarked process. Typical options: "
+ "'powersave' for more stable results, or 'performance' "
+ "for shorter completion time of suite, with potentially "
+ "more noise in results.")
(options, args) = parser.parse_args(args)
@@ -872,56 +1015,60 @@ def Main(args):
else:
options.shell_dir_no_patch = None
+ prev_aslr = None
+ prev_cpu_gov = None
platform = Platform.GetPlatform(options)
results = Results()
results_no_patch = Results()
- for path in args:
- path = os.path.abspath(path)
+ with CustomMachineConfiguration(governor = options.cpu_governor,
+ disable_aslr = options.noaslr) as conf:
+ for path in args:
+ path = os.path.abspath(path)
- if not os.path.exists(path): # pragma: no cover
- results.errors.append("Configuration file %s does not exist." % path)
- continue
+ if not os.path.exists(path): # pragma: no cover
+ results.errors.append("Configuration file %s does not exist." % path)
+ continue
- with open(path) as f:
- suite = json.loads(f.read())
+ with open(path) as f:
+ suite = json.loads(f.read())
- # If no name is given, default to the file name without .json.
- suite.setdefault("name", os.path.splitext(os.path.basename(path))[0])
+ # If no name is given, default to the file name without .json.
+ suite.setdefault("name", os.path.splitext(os.path.basename(path))[0])
- # Setup things common to one test suite.
- platform.PreExecution()
+ # Setup things common to one test suite.
+ platform.PreExecution()
- # Build the graph/trace tree structure.
- default_parent = DefaultSentinel(default_binary_name)
- root = BuildGraphConfigs(suite, options.arch, default_parent)
+ # Build the graph/trace tree structure.
+ default_parent = DefaultSentinel(default_binary_name)
+ root = BuildGraphConfigs(suite, options.arch, default_parent)
- # Callback to be called on each node on traversal.
- def NodeCB(node):
- platform.PreTests(node, path)
+ # Callback to be called on each node on traversal.
+ def NodeCB(node):
+ platform.PreTests(node, path)
- # Traverse graph/trace tree and interate over all runnables.
- for runnable in FlattenRunnables(root, NodeCB):
- print ">>> Running suite: %s" % "/".join(runnable.graphs)
+ # Traverse graph/trace tree and interate over all runnables.
+ for runnable in FlattenRunnables(root, NodeCB):
+ print ">>> Running suite: %s" % "/".join(runnable.graphs)
- def Runner():
- """Output generator that reruns several times."""
- for i in xrange(0, max(1, runnable.run_count)):
- # TODO(machenbach): Allow timeout per arch like with run_count per
- # arch.
- yield platform.Run(runnable, i)
+ def Runner():
+ """Output generator that reruns several times."""
+ for i in xrange(0, max(1, runnable.run_count)):
+ # TODO(machenbach): Allow timeout per arch like with run_count per
+ # arch.
+ yield platform.Run(runnable, i)
- # Let runnable iterate over all runs and handle output.
- result, result_no_patch = runnable.Run(
+ # Let runnable iterate over all runs and handle output.
+ result, result_no_patch = runnable.Run(
Runner, trybot=options.shell_dir_no_patch)
- results += result
- results_no_patch += result_no_patch
- platform.PostExecution()
-
- if options.json_test_results:
- results.WriteToFile(options.json_test_results)
- else: # pragma: no cover
- print results
+ results += result
+ results_no_patch += result_no_patch
+ platform.PostExecution()
+
+ if options.json_test_results:
+ results.WriteToFile(options.json_test_results)
+ else: # pragma: no cover
+ print results
if options.json_test_results_no_patch:
results_no_patch.WriteToFile(options.json_test_results_no_patch)
diff --git a/deps/v8/tools/testrunner/local/commands.py b/deps/v8/tools/testrunner/local/commands.py
index e725d112f9..a9315cb78c 100644
--- a/deps/v8/tools/testrunner/local/commands.py
+++ b/deps/v8/tools/testrunner/local/commands.py
@@ -111,8 +111,8 @@ def RunProcess(verbose, timeout, args, **rest):
return output.Output(
process.returncode,
timeout_result[0],
- stdout,
- stderr,
+ stdout.decode('utf-8', 'replace').encode('utf-8'),
+ stderr.decode('utf-8', 'replace').encode('utf-8'),
process.pid,
)
diff --git a/deps/v8/tools/testrunner/local/execution.py b/deps/v8/tools/testrunner/local/execution.py
index e0aec0bb90..f3d11a8b5c 100644
--- a/deps/v8/tools/testrunner/local/execution.py
+++ b/deps/v8/tools/testrunner/local/execution.py
@@ -248,7 +248,6 @@ class Runner(object):
self.total += 1
def _ProcessTestNormal(self, test, result, pool):
- self.indicator.AboutToRun(test)
test.output = result[1]
test.duration = result[2]
has_unexpected_output = test.suite.HasUnexpectedOutput(test)
@@ -285,7 +284,6 @@ class Runner(object):
if test.run == 1 and result[1].HasTimedOut():
# If we get a timeout in the first run, we are already in an
# unpredictable state. Just report it as a failure and don't rerun.
- self.indicator.AboutToRun(test)
test.output = result[1]
self.remaining -= 1
self.failed.append(test)
@@ -294,16 +292,13 @@ class Runner(object):
# From the second run on, check for different allocations. If a
# difference is found, call the indicator twice to report both tests.
# All runs of each test are counted as one for the statistic.
- self.indicator.AboutToRun(test)
self.remaining -= 1
self.failed.append(test)
self.indicator.HasRun(test, True)
- self.indicator.AboutToRun(test)
test.output = result[1]
self.indicator.HasRun(test, True)
elif test.run >= 3:
# No difference on the third run -> report a success.
- self.indicator.AboutToRun(test)
self.remaining -= 1
self.succeeded += 1
test.output = result[1]
diff --git a/deps/v8/tools/testrunner/local/progress.py b/deps/v8/tools/testrunner/local/progress.py
index 4e1be3e4cf..33e27e154b 100644
--- a/deps/v8/tools/testrunner/local/progress.py
+++ b/deps/v8/tools/testrunner/local/progress.py
@@ -34,6 +34,7 @@ import time
from . import execution
from . import junit_output
+from . import statusfile
ABS_PATH_PREFIX = os.getcwd() + os.sep
@@ -53,9 +54,6 @@ class ProgressIndicator(object):
def Done(self):
pass
- def AboutToRun(self, test):
- pass
-
def HasRun(self, test, has_unexpected_output):
pass
@@ -146,10 +144,6 @@ class SimpleProgressIndicator(ProgressIndicator):
class VerboseProgressIndicator(SimpleProgressIndicator):
- def AboutToRun(self, test):
- print 'Starting %s...' % test.GetLabel()
- sys.stdout.flush()
-
def HasRun(self, test, has_unexpected_output):
if has_unexpected_output:
if test.output.HasCrashed():
@@ -200,10 +194,8 @@ class CompactProgressIndicator(ProgressIndicator):
self.PrintProgress('Done')
print "" # Line break.
- def AboutToRun(self, test):
- self.PrintProgress(test.GetLabel())
-
def HasRun(self, test, has_unexpected_output):
+ self.PrintProgress(test.GetLabel())
if has_unexpected_output:
self.ClearLine(self.last_status_length)
self.PrintFailureHeader(test)
@@ -329,6 +321,12 @@ class JsonTestProgressIndicator(ProgressIndicator):
# Buildbot might start out with an empty file.
complete_results = json.loads(f.read() or "[]")
+ duration_mean = None
+ if self.tests:
+ # Get duration mean.
+ duration_mean = (
+ sum(t.duration for t in self.tests) / float(len(self.tests)))
+
# Sort tests by duration.
timed_tests = [t for t in self.tests if t.duration is not None]
timed_tests.sort(lambda a, b: cmp(b.duration, a.duration))
@@ -338,6 +336,7 @@ class JsonTestProgressIndicator(ProgressIndicator):
"flags": test.flags,
"command": self._EscapeCommand(test).replace(ABS_PATH_PREFIX, ""),
"duration": test.duration,
+ "marked_slow": statusfile.IsSlow(test.outcomes),
} for test in timed_tests[:20]
]
@@ -346,6 +345,8 @@ class JsonTestProgressIndicator(ProgressIndicator):
"mode": self.mode,
"results": self.results,
"slowest_tests": slowest_tests,
+ "duration_mean": duration_mean,
+ "test_total": len(self.tests),
})
with open(self.json_test_results, "w") as f:
diff --git a/deps/v8/tools/testrunner/local/statusfile.py b/deps/v8/tools/testrunner/local/statusfile.py
index 7e96cc3715..091754043b 100644
--- a/deps/v8/tools/testrunner/local/statusfile.py
+++ b/deps/v8/tools/testrunner/local/statusfile.py
@@ -26,6 +26,10 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
+import re
+
+from variants import ALL_VARIANTS
+from utils import Freeze
# These outcomes can occur in a TestCase's outcomes list:
SKIP = "SKIP"
@@ -57,8 +61,12 @@ VARIABLES = {ALWAYS: True}
for var in ["debug", "release", "big", "little",
"android_arm", "android_arm64", "android_ia32", "android_x87",
"android_x64", "arm", "arm64", "ia32", "mips", "mipsel", "mips64",
- "mips64el", "x64", "x87", "nacl_ia32", "nacl_x64", "ppc", "ppc64",
- "s390", "s390x", "macos", "windows", "linux", "aix"]:
+ "mips64el", "x64", "x87", "ppc", "ppc64", "s390", "s390x", "macos",
+ "windows", "linux", "aix"]:
+ VARIABLES[var] = var
+
+# Allow using variants as keywords.
+for var in ALL_VARIANTS:
VARIABLES[var] = var
@@ -100,6 +108,44 @@ def _AddOutcome(result, new):
result.add(new)
+def _JoinsPassAndFail(outcomes1, outcomes2):
+ """Indicates if we join PASS and FAIL from two different outcome sets and
+ the first doesn't already contain both.
+ """
+ return (
+ PASS in outcomes1 and
+ not FAIL in outcomes1 and
+ FAIL in outcomes2
+ )
+
+VARIANT_EXPRESSION = object()
+
+def _EvalExpression(exp, variables):
+ try:
+ return eval(exp, variables)
+ except NameError as e:
+ identifier = re.match("name '(.*)' is not defined", e.message).group(1)
+ assert identifier == "variant", "Unknown identifier: %s" % identifier
+ return VARIANT_EXPRESSION
+
+
+def _EvalVariantExpression(section, rules, wildcards, variant, variables):
+ variables_with_variant = {}
+ variables_with_variant.update(variables)
+ variables_with_variant["variant"] = variant
+ result = _EvalExpression(section[0], variables_with_variant)
+ assert result != VARIANT_EXPRESSION
+ if result is True:
+ _ReadSection(
+ section[1],
+ rules[variant],
+ wildcards[variant],
+ variables_with_variant,
+ )
+ else:
+ assert result is False, "Make sure expressions evaluate to boolean values"
+
+
def _ParseOutcomeList(rule, outcomes, target_dict, variables):
result = set([])
if type(outcomes) == str:
@@ -108,7 +154,16 @@ def _ParseOutcomeList(rule, outcomes, target_dict, variables):
if type(item) == str:
_AddOutcome(result, item)
elif type(item) == list:
- if not eval(item[0], variables): continue
+ exp = _EvalExpression(item[0], variables)
+ assert exp != VARIANT_EXPRESSION, (
+ "Nested variant expressions are not supported")
+ if exp is False:
+ continue
+
+ # Ensure nobody uses an identifier by mistake, like "default",
+ # which would evaluate to true here otherwise.
+ assert exp is True, "Make sure expressions evaluate to boolean values"
+
for outcome in item[1:]:
assert type(outcome) == str
_AddOutcome(result, outcome)
@@ -116,40 +171,71 @@ def _ParseOutcomeList(rule, outcomes, target_dict, variables):
assert False
if len(result) == 0: return
if rule in target_dict:
+ # A FAIL without PASS in one rule has always precedence over a single
+ # PASS (without FAIL) in another. Otherwise the default PASS expectation
+ # in a rule with a modifier (e.g. PASS, SLOW) would be joined to a FAIL
+ # from another rule (which intended to mark a test as FAIL and not as
+ # PASS and FAIL).
+ if _JoinsPassAndFail(target_dict[rule], result):
+ target_dict[rule] -= set([PASS])
+ if _JoinsPassAndFail(result, target_dict[rule]):
+ result -= set([PASS])
target_dict[rule] |= result
else:
target_dict[rule] = result
-def ReadContent(path):
- with open(path) as f:
- global KEYWORDS
- return eval(f.read(), KEYWORDS)
+def ReadContent(content):
+ global KEYWORDS
+ return eval(content, KEYWORDS)
-def ReadStatusFile(path, variables):
- contents = ReadContent(path)
+def ReadStatusFile(content, variables):
+ # Empty defaults for rules and wildcards. Variant-independent
+ # rules are mapped by "", others by the variant name.
+ rules = {variant: {} for variant in ALL_VARIANTS}
+ rules[""] = {}
+ wildcards = {variant: {} for variant in ALL_VARIANTS}
+ wildcards[""] = {}
- rules = {}
- wildcards = {}
variables.update(VARIABLES)
- for section in contents:
+ for section in ReadContent(content):
assert type(section) == list
assert len(section) == 2
- if not eval(section[0], variables): continue
- section = section[1]
- assert type(section) == dict
- for rule in section:
- assert type(rule) == str
- if rule[-1] == '*':
- _ParseOutcomeList(rule, section[rule], wildcards, variables)
- else:
- _ParseOutcomeList(rule, section[rule], rules, variables)
- return rules, wildcards
+ exp = _EvalExpression(section[0], variables)
+ if exp is False:
+ # The expression is variant-independent and evaluates to False.
+ continue
+ elif exp == VARIANT_EXPRESSION:
+ # If the expression contains one or more "variant" keywords, we evaluate
+ # it for all possible variants and create rules for those that apply.
+ for variant in ALL_VARIANTS:
+ _EvalVariantExpression(section, rules, wildcards, variant, variables)
+ else:
+ # The expression is variant-independent and evaluates to True.
+ assert exp is True, "Make sure expressions evaluate to boolean values"
+ _ReadSection(
+ section[1],
+ rules[""],
+ wildcards[""],
+ variables,
+ )
+ return Freeze(rules), Freeze(wildcards)
+
+
+def _ReadSection(section, rules, wildcards, variables):
+ assert type(section) == dict
+ for rule in section:
+ assert type(rule) == str
+ if rule[-1] == '*':
+ _ParseOutcomeList(rule, section[rule], wildcards, variables)
+ else:
+ _ParseOutcomeList(rule, section[rule], rules, variables)
def PresubmitCheck(path):
- contents = ReadContent(path)
+ with open(path) as f:
+ contents = ReadContent(f.read())
root_prefix = os.path.basename(os.path.dirname(path)) + "/"
status = {"success": True}
def _assert(check, message): # Like "assert", but doesn't throw.
diff --git a/deps/v8/tools/testrunner/local/statusfile_unittest.py b/deps/v8/tools/testrunner/local/statusfile_unittest.py
new file mode 100755
index 0000000000..f64ab3425e
--- /dev/null
+++ b/deps/v8/tools/testrunner/local/statusfile_unittest.py
@@ -0,0 +1,163 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+import statusfile
+from utils import Freeze
+
+
+TEST_VARIABLES = {
+ 'system': 'linux',
+ 'mode': 'release',
+}
+
+
+TEST_STATUS_FILE = """
+[
+[ALWAYS, {
+ 'foo/bar': [PASS, SKIP],
+ 'baz/bar': [PASS, FAIL],
+ 'foo/*': [PASS, SLOW],
+}], # ALWAYS
+
+['%s', {
+ 'baz/bar': [PASS, SLOW],
+ 'foo/*': [FAIL],
+}],
+]
+"""
+
+
+def make_variables():
+ variables = {}
+ variables.update(TEST_VARIABLES)
+ return variables
+
+
+class UtilsTest(unittest.TestCase):
+ def test_freeze(self):
+ self.assertEqual(2, Freeze({1: [2]})[1][0])
+ self.assertEqual(set([3]), Freeze({1: [2], 2: set([3])})[2])
+
+ with self.assertRaises(Exception):
+ Freeze({1: [], 2: set([3])})[2] = 4
+ with self.assertRaises(Exception):
+ Freeze({1: [], 2: set([3])}).update({3: 4})
+ with self.assertRaises(Exception):
+ Freeze({1: [], 2: set([3])})[1].append(2)
+ with self.assertRaises(Exception):
+ Freeze({1: [], 2: set([3])})[2] |= set([3])
+
+ # Sanity check that we can do the same calls on a non-frozen object.
+ {1: [], 2: set([3])}[2] = 4
+ {1: [], 2: set([3])}.update({3: 4})
+ {1: [], 2: set([3])}[1].append(2)
+ {1: [], 2: set([3])}[2] |= set([3])
+
+
+class StatusFileTest(unittest.TestCase):
+ def test_eval_expression(self):
+ variables = make_variables()
+ variables.update(statusfile.VARIABLES)
+
+ self.assertTrue(
+ statusfile._EvalExpression(
+ 'system==linux and mode==release', variables))
+ self.assertTrue(
+ statusfile._EvalExpression(
+ 'system==linux or variant==default', variables))
+ self.assertFalse(
+ statusfile._EvalExpression(
+ 'system==linux and mode==debug', variables))
+ self.assertRaises(
+ AssertionError,
+ lambda: statusfile._EvalExpression(
+ 'system==linux and mode==foo', variables))
+ self.assertRaises(
+ SyntaxError,
+ lambda: statusfile._EvalExpression(
+ 'system==linux and mode=release', variables))
+ self.assertEquals(
+ statusfile.VARIANT_EXPRESSION,
+ statusfile._EvalExpression(
+ 'system==linux and variant==default', variables)
+ )
+
+ def test_read_statusfile_section_true(self):
+ rules, wildcards = statusfile.ReadStatusFile(
+ TEST_STATUS_FILE % 'system==linux', make_variables())
+
+ self.assertEquals(
+ {
+ 'foo/bar': set(['PASS', 'SKIP']),
+ 'baz/bar': set(['PASS', 'FAIL', 'SLOW']),
+ },
+ rules[''],
+ )
+ self.assertEquals(
+ {
+ 'foo/*': set(['SLOW', 'FAIL']),
+ },
+ wildcards[''],
+ )
+ self.assertEquals({}, rules['default'])
+ self.assertEquals({}, wildcards['default'])
+
+ def test_read_statusfile_section_false(self):
+ rules, wildcards = statusfile.ReadStatusFile(
+ TEST_STATUS_FILE % 'system==windows', make_variables())
+
+ self.assertEquals(
+ {
+ 'foo/bar': set(['PASS', 'SKIP']),
+ 'baz/bar': set(['PASS', 'FAIL']),
+ },
+ rules[''],
+ )
+ self.assertEquals(
+ {
+ 'foo/*': set(['PASS', 'SLOW']),
+ },
+ wildcards[''],
+ )
+ self.assertEquals({}, rules['default'])
+ self.assertEquals({}, wildcards['default'])
+
+ def test_read_statusfile_section_variant(self):
+ rules, wildcards = statusfile.ReadStatusFile(
+ TEST_STATUS_FILE % 'system==linux and variant==default',
+ make_variables(),
+ )
+
+ self.assertEquals(
+ {
+ 'foo/bar': set(['PASS', 'SKIP']),
+ 'baz/bar': set(['PASS', 'FAIL']),
+ },
+ rules[''],
+ )
+ self.assertEquals(
+ {
+ 'foo/*': set(['PASS', 'SLOW']),
+ },
+ wildcards[''],
+ )
+ self.assertEquals(
+ {
+ 'baz/bar': set(['PASS', 'SLOW']),
+ },
+ rules['default'],
+ )
+ self.assertEquals(
+ {
+ 'foo/*': set(['FAIL']),
+ },
+ wildcards['default'],
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/deps/v8/tools/testrunner/local/testsuite.py b/deps/v8/tools/testrunner/local/testsuite.py
index f43d008b22..11d2207427 100644
--- a/deps/v8/tools/testrunner/local/testsuite.py
+++ b/deps/v8/tools/testrunner/local/testsuite.py
@@ -33,30 +33,9 @@ from . import commands
from . import statusfile
from . import utils
from ..objects import testcase
+from variants import ALL_VARIANTS, ALL_VARIANT_FLAGS, FAST_VARIANT_FLAGS
+
-# Use this to run several variants of the tests.
-ALL_VARIANT_FLAGS = {
- "default": [[]],
- "stress": [["--stress-opt", "--always-opt"]],
- "turbofan": [["--turbo"]],
- "turbofan_opt": [["--turbo", "--always-opt"]],
- "nocrankshaft": [["--nocrankshaft"]],
- "ignition": [["--ignition", "--turbo"]],
- "preparser": [["--min-preparse-length=0"]],
-}
-
-# FAST_VARIANTS implies no --always-opt.
-FAST_VARIANT_FLAGS = {
- "default": [[]],
- "stress": [["--stress-opt"]],
- "turbofan": [["--turbo"]],
- "nocrankshaft": [["--nocrankshaft"]],
- "ignition": [["--ignition", "--turbo"]],
- "preparser": [["--min-preparse-length=0"]],
-}
-
-ALL_VARIANTS = set(["default", "stress", "turbofan", "turbofan_opt",
- "nocrankshaft", "ignition", "preparser"])
FAST_VARIANTS = set(["default", "turbofan"])
STANDARD_VARIANT = set(["default"])
@@ -69,12 +48,13 @@ class VariantGenerator(object):
self.standard_variant = STANDARD_VARIANT & variants
def FilterVariantsByTest(self, testcase):
- if testcase.outcomes and statusfile.OnlyStandardVariant(
- testcase.outcomes):
- return self.standard_variant
- if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes):
- return self.fast_variants
- return self.all_variants
+ result = self.all_variants
+ if testcase.outcomes:
+ if statusfile.OnlyStandardVariant(testcase.outcomes):
+ return self.standard_variant
+ if statusfile.OnlyFastVariants(testcase.outcomes):
+ result = self.fast_variants
+ return result
def GetFlagSets(self, testcase, variant):
if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes):
@@ -142,12 +122,21 @@ class TestSuite(object):
"""
return self._VariantGeneratorFactory()(self, set(variants))
+ def PrepareSources(self):
+ """Called once before multiprocessing for doing file-system operations.
+
+ This should not access the network. For network access use the method
+ below.
+ """
+ pass
+
def DownloadData(self):
pass
def ReadStatusFile(self, variables):
- (self.rules, self.wildcards) = \
- statusfile.ReadStatusFile(self.status_file(), variables)
+ with open(self.status_file()) as f:
+ self.rules, self.wildcards = (
+ statusfile.ReadStatusFile(f.read(), variables))
def ReadTestCases(self, context):
self.tests = self.ListTests(context)
@@ -162,18 +151,40 @@ class TestSuite(object):
def FilterTestCasesByStatus(self, warn_unused_rules,
slow_tests="dontcare",
- pass_fail_tests="dontcare"):
+ pass_fail_tests="dontcare",
+ variants=False):
+
+ # Use only variants-dependent rules and wildcards when filtering
+ # respective test cases and generic rules when filtering generic test
+ # cases.
+ if not variants:
+ rules = self.rules[""]
+ wildcards = self.wildcards[""]
+ else:
+ # We set rules and wildcards to a variant-specific version for each test
+ # below.
+ rules = {}
+ wildcards = {}
+
filtered = []
+
+ # Remember used rules as tuples of (rule, variant), where variant is "" for
+ # variant-independent rules.
used_rules = set()
+
for t in self.tests:
slow = False
pass_fail = False
testname = self.CommonTestName(t)
- if testname in self.rules:
- used_rules.add(testname)
+ variant = t.variant or ""
+ if variants:
+ rules = self.rules[variant]
+ wildcards = self.wildcards[variant]
+ if testname in rules:
+ used_rules.add((testname, variant))
# Even for skipped tests, as the TestCase object stays around and
# PrintReport() uses it.
- t.outcomes = self.rules[testname]
+ t.outcomes = t.outcomes | rules[testname]
if statusfile.DoSkip(t.outcomes):
continue # Don't add skipped tests to |filtered|.
for outcome in t.outcomes:
@@ -182,14 +193,14 @@ class TestSuite(object):
slow = statusfile.IsSlow(t.outcomes)
pass_fail = statusfile.IsPassOrFail(t.outcomes)
skip = False
- for rule in self.wildcards:
+ for rule in wildcards:
assert rule[-1] == '*'
if testname.startswith(rule[:-1]):
- used_rules.add(rule)
- t.outcomes |= self.wildcards[rule]
+ used_rules.add((rule, variant))
+ t.outcomes = t.outcomes | wildcards[rule]
if statusfile.DoSkip(t.outcomes):
skip = True
- break # "for rule in self.wildcards"
+ break # "for rule in wildcards"
slow = slow or statusfile.IsSlow(t.outcomes)
pass_fail = pass_fail or statusfile.IsPassOrFail(t.outcomes)
if (skip
@@ -202,12 +213,26 @@ class TestSuite(object):
if not warn_unused_rules:
return
- for rule in self.rules:
- if rule not in used_rules:
- print("Unused rule: %s -> %s" % (rule, self.rules[rule]))
- for rule in self.wildcards:
- if rule not in used_rules:
- print("Unused rule: %s -> %s" % (rule, self.wildcards[rule]))
+ if not variants:
+ for rule in self.rules[""]:
+ if (rule, "") not in used_rules:
+ print("Unused rule: %s -> %s (variant independent)" % (
+ rule, self.rules[""][rule]))
+ for rule in self.wildcards[""]:
+ if (rule, "") not in used_rules:
+ print("Unused rule: %s -> %s (variant independent)" % (
+ rule, self.wildcards[""][rule]))
+ else:
+ for variant in ALL_VARIANTS:
+ for rule in self.rules[variant]:
+ if (rule, variant) not in used_rules:
+ print("Unused rule: %s -> %s (variant: %s)" % (
+ rule, self.rules[variant][rule], variant))
+ for rule in self.wildcards[variant]:
+ if (rule, variant) not in used_rules:
+ print("Unused rule: %s -> %s (variant: %s)" % (
+ rule, self.wildcards[variant][rule], variant))
+
def FilterTestCasesByArgs(self, args):
"""Filter test cases based on command-line arguments.
diff --git a/deps/v8/tools/testrunner/local/testsuite_unittest.py b/deps/v8/tools/testrunner/local/testsuite_unittest.py
new file mode 100755
index 0000000000..1e10ef5564
--- /dev/null
+++ b/deps/v8/tools/testrunner/local/testsuite_unittest.py
@@ -0,0 +1,98 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+import unittest
+
+# Needed because the test runner contains relative imports.
+TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
+ os.path.abspath(__file__))))
+sys.path.append(TOOLS_PATH)
+
+from testrunner.local.testsuite import TestSuite
+from testrunner.objects.testcase import TestCase
+
+
+class TestSuiteTest(unittest.TestCase):
+ def test_filter_testcases_by_status_first_pass(self):
+ suite = TestSuite('foo', 'bar')
+ suite.tests = [
+ TestCase(suite, 'foo/bar'),
+ TestCase(suite, 'baz/bar'),
+ ]
+ suite.rules = {
+ '': {
+ 'foo/bar': set(['PASS', 'SKIP']),
+ 'baz/bar': set(['PASS', 'FAIL']),
+ },
+ }
+ suite.wildcards = {
+ '': {
+ 'baz/*': set(['PASS', 'SLOW']),
+ },
+ }
+ suite.FilterTestCasesByStatus(warn_unused_rules=False)
+ self.assertEquals(
+ [TestCase(suite, 'baz/bar')],
+ suite.tests,
+ )
+ self.assertEquals(set(['PASS', 'FAIL', 'SLOW']), suite.tests[0].outcomes)
+
+ def test_filter_testcases_by_status_second_pass(self):
+ suite = TestSuite('foo', 'bar')
+
+ test1 = TestCase(suite, 'foo/bar')
+ test2 = TestCase(suite, 'baz/bar')
+
+ # Contrived outcomes from filtering by variant-independent rules.
+ test1.outcomes = set(['PREV'])
+ test2.outcomes = set(['PREV'])
+
+ suite.tests = [
+ test1.CopyAddingFlags(variant='default', flags=[]),
+ test1.CopyAddingFlags(variant='stress', flags=['-v']),
+ test2.CopyAddingFlags(variant='default', flags=[]),
+ test2.CopyAddingFlags(variant='stress', flags=['-v']),
+ ]
+
+ suite.rules = {
+ 'default': {
+ 'foo/bar': set(['PASS', 'SKIP']),
+ 'baz/bar': set(['PASS', 'FAIL']),
+ },
+ 'stress': {
+ 'baz/bar': set(['SKIP']),
+ },
+ }
+ suite.wildcards = {
+ 'default': {
+ 'baz/*': set(['PASS', 'SLOW']),
+ },
+ 'stress': {
+ 'foo/*': set(['PASS', 'SLOW']),
+ },
+ }
+ suite.FilterTestCasesByStatus(warn_unused_rules=False, variants=True)
+ self.assertEquals(
+ [
+ TestCase(suite, 'foo/bar', flags=['-v']),
+ TestCase(suite, 'baz/bar'),
+ ],
+ suite.tests,
+ )
+
+ self.assertEquals(
+ set(['PASS', 'SLOW', 'PREV']),
+ suite.tests[0].outcomes,
+ )
+ self.assertEquals(
+ set(['PASS', 'FAIL', 'SLOW', 'PREV']),
+ suite.tests[1].outcomes,
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/deps/v8/tools/testrunner/local/utils.py b/deps/v8/tools/testrunner/local/utils.py
index c880dfc34e..3e79e44afa 100644
--- a/deps/v8/tools/testrunner/local/utils.py
+++ b/deps/v8/tools/testrunner/local/utils.py
@@ -136,3 +136,24 @@ def URLRetrieve(source, destination):
pass
with open(destination, 'w') as f:
f.write(urllib2.urlopen(source).read())
+
+
+class FrozenDict(dict):
+ def __setitem__(self, *args, **kwargs):
+ raise Exception('Tried to mutate a frozen dict')
+
+ def update(self, *args, **kwargs):
+ raise Exception('Tried to mutate a frozen dict')
+
+
+def Freeze(obj):
+ if isinstance(obj, dict):
+ return FrozenDict((k, Freeze(v)) for k, v in obj.iteritems())
+ elif isinstance(obj, set):
+ return frozenset(obj)
+ elif isinstance(obj, list):
+ return tuple(Freeze(item) for item in obj)
+ else:
+ # Make sure object is hashable.
+ hash(obj)
+ return obj
diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py
new file mode 100644
index 0000000000..b224e41d37
--- /dev/null
+++ b/deps/v8/tools/testrunner/local/variants.py
@@ -0,0 +1,32 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Use this to run several variants of the tests.
+ALL_VARIANT_FLAGS = {
+ "default": [[]],
+ "stress": [["--stress-opt", "--always-opt"]],
+ "turbofan": [["--turbo"]],
+ "turbofan_opt": [["--turbo", "--always-opt"]],
+ "nocrankshaft": [["--nocrankshaft"]],
+ "ignition": [["--ignition"]],
+ "ignition_staging": [["--ignition-staging"]],
+ "ignition_turbofan": [["--ignition-staging", "--turbo"]],
+ "preparser": [["--min-preparse-length=0"]],
+}
+
+# FAST_VARIANTS implies no --always-opt.
+FAST_VARIANT_FLAGS = {
+ "default": [[]],
+ "stress": [["--stress-opt"]],
+ "turbofan": [["--turbo"]],
+ "nocrankshaft": [["--nocrankshaft"]],
+ "ignition": [["--ignition"]],
+ "ignition_staging": [["--ignition-staging"]],
+ "ignition_turbofan": [["--ignition-staging", "--turbo"]],
+ "preparser": [["--min-preparse-length=0"]],
+}
+
+ALL_VARIANTS = set(["default", "stress", "turbofan", "turbofan_opt",
+ "nocrankshaft", "ignition", "ignition_staging",
+ "ignition_turbofan", "preparser"])
diff --git a/deps/v8/tools/testrunner/network/network_execution.py b/deps/v8/tools/testrunner/network/network_execution.py
index c842aba579..a95440178b 100644
--- a/deps/v8/tools/testrunner/network/network_execution.py
+++ b/deps/v8/tools/testrunner/network/network_execution.py
@@ -203,7 +203,6 @@ class NetworkedRunner(execution.Runner):
[constants.INFORM_DURATION, perf_key, test.duration,
self.context.arch, self.context.mode],
self.local_socket)
- self.indicator.AboutToRun(test)
has_unexpected_output = test.suite.HasUnexpectedOutput(test)
if has_unexpected_output:
self.failed.append(test)
diff --git a/deps/v8/tools/testrunner/objects/testcase.py b/deps/v8/tools/testrunner/objects/testcase.py
index 113c624a35..00722d768b 100644
--- a/deps/v8/tools/testrunner/objects/testcase.py
+++ b/deps/v8/tools/testrunner/objects/testcase.py
@@ -29,14 +29,14 @@
from . import output
class TestCase(object):
- def __init__(self, suite, path, variant='default', flags=None,
+ def __init__(self, suite, path, variant=None, flags=None,
override_shell=None):
self.suite = suite # TestSuite object
self.path = path # string, e.g. 'div-mod', 'test-api/foo'
self.flags = flags or [] # list of strings, flags specific to this test
self.variant = variant # name of the used testing variant
self.override_shell = override_shell
- self.outcomes = set([])
+ self.outcomes = frozenset([])
self.output = None
self.id = None # int, used to map result back to TestCase instance
self.duration = None # assigned during execution
@@ -63,7 +63,7 @@ class TestCase(object):
"""Creates a new TestCase object based on packed task data."""
# For the order of the fields, refer to PackTask() above.
test = TestCase(str(task[0]), task[1], task[2], task[3], task[4])
- test.outcomes = set(task[5])
+ test.outcomes = frozenset(task[5])
test.id = task[6]
test.run = 1
return test
@@ -108,3 +108,6 @@ class TestCase(object):
(self.suite.name, self.path, self.flags),
(other.suite.name, other.path, other.flags),
)
+
+ def __str__(self):
+ return "[%s/%s %s]" % (self.suite.name, self.path, self.flags)
diff --git a/deps/v8/tools/testrunner/server/main.py b/deps/v8/tools/testrunner/server/main.py
index 1000713ca9..c237e1adb4 100644
--- a/deps/v8/tools/testrunner/server/main.py
+++ b/deps/v8/tools/testrunner/server/main.py
@@ -221,7 +221,7 @@ class Server(daemon.Daemon):
if not self.IsTrusted(signer):
return
if self.IsTrusted(fingerprint):
- return # Already trust this guy.
+ return # Already trusted.
filename = self._PubkeyFilename(fingerprint)
signer_pubkeyfile = self._PubkeyFilename(signer)
if not signatures.VerifySignature(filename, pubkey, signature,
diff --git a/deps/v8/tools/testrunner/utils/dump_build_config.py b/deps/v8/tools/testrunner/utils/dump_build_config.py
new file mode 100644
index 0000000000..bd57b5f34e
--- /dev/null
+++ b/deps/v8/tools/testrunner/utils/dump_build_config.py
@@ -0,0 +1,26 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Writes a dictionary to a json file with the passed key-value pairs.
+
+Expected to be called like:
+dump_build_config.py path/to/file.json [key1=value1 ...]
+
+The values are expected to be valid json. E.g. true is a boolean and "true" is
+the string "true".
+"""
+
+import json
+import os
+import sys
+
+assert len(sys.argv) > 1
+
+def as_json(kv):
+ assert '=' in kv
+ k, v = kv.split('=', 1)
+ return k, json.loads(v)
+
+with open(sys.argv[1], 'w') as f:
+ json.dump(dict(as_json(kv) for kv in sys.argv[2:]), f)
diff --git a/deps/v8/tools/tickprocessor-driver.js b/deps/v8/tools/tickprocessor-driver.js
index 3f2321fed1..be374c9b18 100644
--- a/deps/v8/tools/tickprocessor-driver.js
+++ b/deps/v8/tools/tickprocessor-driver.js
@@ -72,6 +72,7 @@ var tickProcessor = new TickProcessor(
sourceMap,
params.timedRange,
params.pairwiseTimedRange,
- params.onlySummary);
+ params.onlySummary,
+ params.runtimeTimerFilter);
tickProcessor.processLogFile(params.logFileName);
tickProcessor.printStatistics();
diff --git a/deps/v8/tools/tickprocessor.js b/deps/v8/tools/tickprocessor.js
index ba7401a223..ec56d49d90 100644
--- a/deps/v8/tools/tickprocessor.js
+++ b/deps/v8/tools/tickprocessor.js
@@ -81,9 +81,10 @@ function TickProcessor(
sourceMap,
timedRange,
pairwiseTimedRange,
- onlySummary) {
+ onlySummary,
+ runtimeTimerFilter) {
LogReader.call(this, {
- 'shared-library': { parsers: [null, parseInt, parseInt],
+ 'shared-library': { parsers: [null, parseInt, parseInt, parseInt],
processor: this.processSharedLibrary },
'code-creation': {
parsers: [null, parseInt, parseInt, parseInt, null, 'var-args'],
@@ -94,6 +95,9 @@ function TickProcessor(
processor: this.processCodeDelete },
'sfi-move': { parsers: [parseInt, parseInt],
processor: this.processFunctionMove },
+ 'active-runtime-timer': {
+ parsers: [null],
+ processor: this.processRuntimeTimerEvent },
'tick': {
parsers: [parseInt, parseInt, parseInt,
parseInt, parseInt, 'var-args'],
@@ -124,6 +128,7 @@ function TickProcessor(
this.callGraphSize_ = callGraphSize;
this.ignoreUnknown_ = ignoreUnknown;
this.stateFilter_ = stateFilter;
+ this.runtimeTimerFilter_ = runtimeTimerFilter;
this.sourceMap = sourceMap;
this.deserializedEntriesNames_ = [];
var ticks = this.ticks_ =
@@ -242,13 +247,13 @@ TickProcessor.prototype.processLogFileInTest = function(fileName) {
TickProcessor.prototype.processSharedLibrary = function(
- name, startAddr, endAddr) {
- var entry = this.profile_.addLibrary(name, startAddr, endAddr);
+ name, startAddr, endAddr, aslrSlide) {
+ var entry = this.profile_.addLibrary(name, startAddr, endAddr, aslrSlide);
this.setCodeType(entry.getName(), 'SHARED_LIB');
var self = this;
var libFuncs = this.cppEntriesProvider_.parseVmSymbols(
- name, startAddr, endAddr, function(fName, fStart, fEnd) {
+ name, startAddr, endAddr, aslrSlide, function(fName, fStart, fEnd) {
self.profile_.addStaticCode(fName, fStart, fEnd);
self.setCodeType(fName, 'CPP');
});
@@ -284,9 +289,18 @@ TickProcessor.prototype.processFunctionMove = function(from, to) {
TickProcessor.prototype.includeTick = function(vmState) {
- return this.stateFilter_ == null || this.stateFilter_ == vmState;
+ if (this.stateFilter_ !== null) {
+ return this.stateFilter_ == vmState;
+ } else if (this.runtimeTimerFilter_ !== null) {
+ return this.currentRuntimeTimer == this.runtimeTimerFilter_;
+ }
+ return true;
};
+TickProcessor.prototype.processRuntimeTimerEvent = function(name) {
+ this.currentRuntimeTimer = name;
+}
+
TickProcessor.prototype.processTick = function(pc,
ns_since_start,
is_external_callback,
@@ -559,7 +573,7 @@ function CppEntriesProvider() {
CppEntriesProvider.prototype.parseVmSymbols = function(
- libName, libStart, libEnd, processorFunc) {
+ libName, libStart, libEnd, libASLRSlide, processorFunc) {
this.loadSymbols(libName);
var prevEntry;
@@ -588,6 +602,7 @@ CppEntriesProvider.prototype.parseVmSymbols = function(
} else if (funcInfo === false) {
break;
}
+ funcInfo.start += libASLRSlide;
if (funcInfo.start < libStart && funcInfo.start < libEnd - libStart) {
funcInfo.start += libStart;
}
@@ -780,6 +795,8 @@ function ArgumentsProcessor(args) {
'Show only ticks from OTHER VM state'],
'-e': ['stateFilter', TickProcessor.VmStates.EXTERNAL,
'Show only ticks from EXTERNAL VM state'],
+ '--filter-runtime-timer': ['runtimeTimerFilter', null,
+ 'Show only ticks matching the given runtime timer scope'],
'--call-graph-size': ['callGraphSize', TickProcessor.CALL_GRAPH_SIZE,
'Set the call graph size'],
'--ignore-unknown': ['ignoreUnknown', true,
@@ -831,7 +848,8 @@ ArgumentsProcessor.DEFAULTS = {
distortion: 0,
timedRange: false,
pairwiseTimedRange: false,
- onlySummary: false
+ onlySummary: false,
+ runtimeTimerFilter: null,
};
diff --git a/deps/v8/tools/try_perf.py b/deps/v8/tools/try_perf.py
index fbd4036dad..e022ab45bf 100755
--- a/deps/v8/tools/try_perf.py
+++ b/deps/v8/tools/try_perf.py
@@ -33,17 +33,23 @@ PUBLIC_BENCHMARKS = [
'emscripten',
'compile',
'jetstream',
+ 'jetstream-ignition',
'jsbench',
'jstests',
'kraken_orig',
+ 'kraken_orig-ignition',
'massive',
'memory',
'octane',
+ 'octane-noopt',
+ 'octane-ignition',
'octane-pr',
'octane-tf',
'octane-tf-pr',
'simdjs',
'sunspider',
+ 'sunspider-ignition',
+ 'wasm',
]
V8_BASE = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
diff --git a/deps/v8/tools/turbolizer-perf.py b/deps/v8/tools/turbolizer-perf.py
new file mode 100644
index 0000000000..c90a1174d4
--- /dev/null
+++ b/deps/v8/tools/turbolizer-perf.py
@@ -0,0 +1,56 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+import json
+import re
+import argparse
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+
+def trace_begin():
+ json_obj['eventCounts'] = {}
+ prog = re.compile(r'0x[0-9a-fA-F]+')
+ for phase in reversed(json_obj['phases']):
+ if phase['name'] == "disassembly":
+ for line in phase['data'].splitlines():
+ result = re.match(prog, line)
+ if result:
+ known_addrs.add(result.group(0))
+
+def trace_end():
+ print json.dumps(json_obj)
+
+def process_event(param_dict):
+ addr = "0x%x" % int(param_dict['sample']['ip'])
+
+ # Only count samples that belong to the function
+ if addr not in known_addrs:
+ return
+
+ ev_name = param_dict['ev_name']
+ if ev_name not in json_obj['eventCounts']:
+ json_obj['eventCounts'][ev_name] = {}
+ if addr not in json_obj['eventCounts'][ev_name]:
+ json_obj['eventCounts'][ev_name][addr] = 0
+ json_obj['eventCounts'][ev_name][addr] += 1
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="Perf script to merge profiling data with turbofan compiler "
+ "traces.")
+ parser.add_argument("file_name", metavar="JSON File",
+ help="turbo trace json file.")
+
+ args = parser.parse_args()
+
+ with open(args.file_name, 'r') as json_file:
+ json_obj = json.load(json_file)
+
+ known_addrs = set()
diff --git a/deps/v8/tools/turbolizer/OWNERS b/deps/v8/tools/turbolizer/OWNERS
new file mode 100644
index 0000000000..fc52961eff
--- /dev/null
+++ b/deps/v8/tools/turbolizer/OWNERS
@@ -0,0 +1 @@
+danno@chromium.org
diff --git a/deps/v8/tools/turbolizer/README.md b/deps/v8/tools/turbolizer/README.md
new file mode 100644
index 0000000000..54e1051690
--- /dev/null
+++ b/deps/v8/tools/turbolizer/README.md
@@ -0,0 +1,62 @@
+Turbolizer
+==========
+
+Turbolizer is a HTML-based tool that visualizes optimized code along the various
+phases of Turbofan's optimization pipeline, allowing easy navigation between
+source code, Turbofan IR graphs, scheduled IR nodes and generated assembly code.
+
+Turbolizer consumes .json files that are generated per-function by d8 by passing
+the '--trace-turbo' command-line flag.
+
+Host the turbolizer locally by starting a web server that serves the contents of
+the turbolizer directory, e.g.:
+
+ cd src/tools/turbolizer
+ python -m SimpleHTTPServer 8000
+
+Optionally, profiling data generated by the perf tools in linux can be merged
+with the .json files using the turbolizer-perf.py file included. The following
+command is an example of using the perf script:
+
+ perf script -i perf.data.jitted -s turbolizer-perf.py turbo-main.json
+
+The output of the above command is a json object that can be piped to a file
+which, when uploaded to turbolizer, will display the event counts from perf next
+to each instruction in the disassembly. Further detail can be found in the
+bottom of this document under "Using Perf with Turbo."
+
+Using the python interface in perf script requires python-dev to be installed
+and perf be recompiled with python support enabled. Once recompiled, the
+variable PERF_EXEC_PATH must be set to the location of the recompiled perf
+binaries.
+
+Graph visualization and manipulation based on Mike Bostock's sample code for an
+interactive tool for creating directed graphs. Original source is at
+https://github.com/metacademy/directed-graph-creator and released under the
+MIT/X license.
+
+Icons derived from the "White Olive Collection" created by Breezi released under
+the Creative Commons BY license.
+
+Using Perf with Turbo
+---------------------
+
+In order to generate perf data that matches exactly with the turbofan trace, you
+must use either a debug build of v8 or a release build with the flag
+'disassembler=on'. This flag ensures that the '--trace-turbo' will output the
+necessary disassembly for linking with the perf profile.
+
+The basic example of generating the required data is as follows:
+
+ perf record -k mono /path/to/d8 --turbo --trace-turbo --perf-prof main.js
+ perf inject -j -i perf.data -o perf.data.jitted
+ perf script -i perf.data.jitted -s turbolizer-perf.py turbo-main.json
+
+These commands combined will run and profile d8, merge the output into a single
+'perf.data.jitted' file, then take the event data from that and link them to the
+disassembly in the 'turbo-main.json'. Note that, as above, the output of the
+script command must be piped to a file for uploading to turbolizer.
+
+There are many options that can be added to the first command, for example '-e'
+can be used to specify the counting of specific events (default: cycles), as
+well as '--cpu' to specify which CPU to sample. \ No newline at end of file
diff --git a/deps/v8/tools/turbolizer/code-view.js b/deps/v8/tools/turbolizer/code-view.js
new file mode 100644
index 0000000000..6abb05593e
--- /dev/null
+++ b/deps/v8/tools/turbolizer/code-view.js
@@ -0,0 +1,172 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+
+class CodeView extends View {
+ constructor(divID, PR, sourceText, sourcePosition, broker) {
+ super(divID, broker, null, false);
+ let view = this;
+ view.PR = PR;
+ view.mouseDown = false;
+ view.broker = broker;
+ view.allSpans = [];
+
+ var selectionHandler = {
+ clear: function() { broker.clear(selectionHandler); },
+ select: function(items, selected) {
+ var handler = this;
+ var broker = view.broker;
+ for (let span of items) {
+ if (selected) {
+ span.classList.add("selected");
+ } else {
+ span.classList.remove("selected");
+ }
+ }
+ var locations = [];
+ for (var span of items) {
+ locations.push({pos_start: span.start, pos_end: span.end});
+ }
+ broker.clear(selectionHandler);
+ broker.select(selectionHandler, locations, selected);
+ },
+ selectionDifference: function(span1, inclusive1, span2, inclusive2) {
+ var pos1 = span1.start;
+ var pos2 = span2.start;
+ var result = [];
+ var lineListDiv = view.divNode.firstChild.firstChild.childNodes;
+ for (var i = 0; i < lineListDiv.length; i++) {
+ var currentLineElement = lineListDiv[i];
+ var spans = currentLineElement.childNodes;
+ for (var j = 0; j < spans.length; ++j) {
+ var currentSpan = spans[j];
+ if (currentSpan.start > pos1 ||
+ (inclusive1 && currentSpan.start == pos1)) {
+ if (currentSpan.start < pos2 ||
+ (inclusive2 && currentSpan.start == pos2)) {
+ result.push(currentSpan);
+ }
+ }
+ }
+ }
+ return result;
+ },
+ brokeredSelect: function(locations, selected) {
+ let firstSelect = view.selection.isEmpty();
+ for (let location of locations) {
+ let start = location.pos_start;
+ let end = location.pos_end;
+ if (start && end) {
+ let lower = 0;
+ let upper = view.allSpans.length;
+ if (upper > 0) {
+ while ((upper - lower) > 1) {
+ var middle = Math.floor((upper + lower) / 2);
+ var lineStart = view.allSpans[middle].start;
+ if (lineStart < start) {
+ lower = middle;
+ } else if (lineStart > start) {
+ upper = middle;
+ } else {
+ lower = middle;
+ break;
+ }
+ }
+ var currentSpan = view.allSpans[lower];
+ var currentLineElement = currentSpan.parentNode;
+ if ((currentSpan.start <= start && start < currentSpan.end) ||
+ (currentSpan.start <= end && end < currentSpan.end)) {
+ if (firstSelect) {
+ makeContainerPosVisible(
+ view.divNode, currentLineElement.offsetTop);
+ firstSelect = false;
+ }
+ view.selection.select(currentSpan, selected);
+ }
+ }
+ }
+ }
+ },
+ brokeredClear: function() { view.selection.clear(); },
+ };
+ view.selection = new Selection(selectionHandler);
+ broker.addSelectionHandler(selectionHandler);
+
+ view.handleSpanMouseDown = function(e) {
+ e.stopPropagation();
+ if (!e.shiftKey) {
+ view.selection.clear();
+ }
+ view.selection.select(this, true);
+ view.mouseDown = true;
+ }
+
+ view.handleSpanMouseMove = function(e) {
+ if (view.mouseDown) {
+ view.selection.extendTo(this);
+ }
+ }
+
+ view.handleCodeMouseDown = function(e) { view.selection.clear(); }
+
+ document.addEventListener('mouseup', function(e) {
+ view.mouseDown = false;
+ }, false);
+
+ view.initializeCode(sourceText, sourcePosition);
+ }
+
+ initializeContent(data, rememberedSelection) { this.data = data; }
+
+ initializeCode(sourceText, sourcePosition) {
+ var view = this;
+ if (sourceText == "") {
+ var newHtml = "<pre class=\"prettyprint\"</pre>";
+ view.divNode.innerHTML = newHtml;
+ } else {
+ var newHtml =
+ "<pre class=\"prettyprint linenums\">" + sourceText + "</pre>";
+ view.divNode.innerHTML = newHtml;
+ try {
+ // Wrap in try to work when offline.
+ view.PR.prettyPrint();
+ } catch (e) {
+ }
+
+ view.divNode.onmousedown = this.handleCodeMouseDown;
+
+ var base = sourcePosition;
+ var current = 0;
+ var lineListDiv = view.divNode.firstChild.firstChild.childNodes;
+ for (let i = 0; i < lineListDiv.length; i++) {
+ var currentLineElement = lineListDiv[i];
+ currentLineElement.id = "li" + i;
+ var pos = base + current;
+ currentLineElement.pos = pos;
+ var spans = currentLineElement.childNodes;
+ for (let j = 0; j < spans.length; ++j) {
+ var currentSpan = spans[j];
+ if (currentSpan.nodeType == 1) {
+ currentSpan.start = pos;
+ currentSpan.end = pos + currentSpan.textContent.length;
+ currentSpan.onmousedown = this.handleSpanMouseDown;
+ currentSpan.onmousemove = this.handleSpanMouseMove;
+ view.allSpans.push(currentSpan);
+ }
+ current += currentSpan.textContent.length;
+ pos = base + current;
+ }
+ while ((current < sourceText.length) &&
+ (sourceText[current] == '\n' || sourceText[current] == '\r')) {
+ ++current;
+ }
+ }
+ }
+
+ view.resizeToParent();
+ }
+
+ deleteContent() {}
+}
diff --git a/deps/v8/tools/turbolizer/constants.js b/deps/v8/tools/turbolizer/constants.js
new file mode 100644
index 0000000000..f062fa2154
--- /dev/null
+++ b/deps/v8/tools/turbolizer/constants.js
@@ -0,0 +1,24 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var MAX_RANK_SENTINEL = 0;
+var GRAPH_MARGIN = 250;
+var WIDTH = 'width';
+var HEIGHT = 'height';
+var VISIBILITY = 'visibility';
+var SOURCE_PANE_ID = 'left';
+var SOURCE_COLLAPSE_ID = 'source-shrink';
+var SOURCE_EXPAND_ID = 'source-expand';
+var INTERMEDIATE_PANE_ID = 'middle';
+var EMPTY_PANE_ID = 'empty';
+var GRAPH_PANE_ID = 'graph';
+var SCHEDULE_PANE_ID = 'schedule';
+var GENERATED_PANE_ID = 'right';
+var DISASSEMBLY_PANE_ID = 'disassembly';
+var DISASSEMBLY_COLLAPSE_ID = 'disassembly-shrink';
+var DISASSEMBLY_EXPAND_ID = 'disassembly-expand';
+var COLLAPSE_PANE_BUTTON_VISIBLE = 'button-input';
+var COLLAPSE_PANE_BUTTON_INVISIBLE = 'button-input-invisible';
+var PROF_HIGH = 5;
+var PROF_MED = 0.5;
diff --git a/deps/v8/tools/turbolizer/disassembly-view.js b/deps/v8/tools/turbolizer/disassembly-view.js
new file mode 100644
index 0000000000..b704c77312
--- /dev/null
+++ b/deps/v8/tools/turbolizer/disassembly-view.js
@@ -0,0 +1,245 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+
+class DisassemblyView extends TextView {
+ constructor(id, broker) {
+ super(id, broker, null, false);
+
+ let view = this;
+ let ADDRESS_STYLE = {
+ css: 'tag',
+ location: function(text) {
+ ADDRESS_STYLE.last_address = text;
+ return undefined;
+ }
+ };
+ let ADDRESS_LINK_STYLE = {
+ css: 'tag',
+ link: function(text) {
+ view.select(function(location) { return location.address == text; }, true, true);
+ }
+ };
+ let UNCLASSIFIED_STYLE = {
+ css: 'com'
+ };
+ let NUMBER_STYLE = {
+ css: 'lit'
+ };
+ let COMMENT_STYLE = {
+ css: 'com'
+ };
+ let POSITION_STYLE = {
+ css: 'com',
+ location: function(text) {
+ view.pos_start = Number(text);
+ }
+ };
+ let OPCODE_STYLE = {
+ css: 'kwd',
+ location: function(text) {
+ if (BLOCK_HEADER_STYLE.block_id != undefined) {
+ return {
+ address: ADDRESS_STYLE.last_address,
+ block_id: BLOCK_HEADER_STYLE.block_id
+ };
+ } else {
+ return {
+ address: ADDRESS_STYLE.last_address
+ };
+ }
+ }
+ };
+ const BLOCK_HEADER_STYLE = {
+ css: 'com',
+ block_id: -1,
+ location: function(text) {
+ let matches = /\d+/.exec(text);
+ if (!matches) return undefined;
+ BLOCK_HEADER_STYLE.block_id = Number(matches[0]);
+ return {
+ block_id: BLOCK_HEADER_STYLE.block_id
+ };
+ },
+ };
+ const SOURCE_POSITION_HEADER_STYLE = {
+ css: 'com',
+ location: function(text) {
+ let matches = /(\d+):(\d+)/.exec(text);
+ if (!matches) return undefined;
+ let li = Number(matches[1]);
+ if (view.pos_lines === null) return undefined;
+ let pos = view.pos_lines[li-1] + Number(matches[2]);
+ return {
+ pos_start: pos,
+ pos_end: pos + 1
+ };
+ },
+ };
+ view.SOURCE_POSITION_HEADER_REGEX = /^(\s*-- .+:)(\d+:\d+)( --)/;
+ let patterns = [
+ [
+ [/^0x[0-9a-f]{8,16}/, ADDRESS_STYLE, 1],
+ [view.SOURCE_POSITION_HEADER_REGEX, SOURCE_POSITION_HEADER_STYLE, -1],
+ [/^\s+-- B\d+ start.*/, BLOCK_HEADER_STYLE, -1],
+ [/^.*/, UNCLASSIFIED_STYLE, -1]
+ ],
+ [
+ [/^\s+\d+\s+[0-9a-f]+\s+/, NUMBER_STYLE, 2],
+ [/^.*/, null, -1]
+ ],
+ [
+ [/^\S+\s+/, OPCODE_STYLE, 3],
+ [/^\S+$/, OPCODE_STYLE, -1],
+ [/^.*/, null, -1]
+ ],
+ [
+ [/^\s+/, null],
+ [/^[^\(;]+$/, null, -1],
+ [/^[^\(;]+/, null],
+ [/^\(/, null, 4],
+ [/^;/, COMMENT_STYLE, 5]
+ ],
+ [
+ [/^0x[0-9a-f]{8,16}/, ADDRESS_LINK_STYLE],
+ [/^[^\)]/, null],
+ [/^\)$/, null, -1],
+ [/^\)/, null, 3]
+ ],
+ [
+ [/^; debug\: position /, COMMENT_STYLE, 6],
+ [/^.+$/, COMMENT_STYLE, -1]
+ ],
+ [
+ [/^\d+$/, POSITION_STYLE, -1],
+ ]
+ ];
+ view.setPatterns(patterns);
+ }
+
+ lineLocation(li) {
+ let view = this;
+ let result = undefined;
+ for (let i = 0; i < li.children.length; ++i) {
+ let fragment = li.children[i];
+ let location = fragment.location;
+ if (location != null) {
+ if (location.block_id != undefined) {
+ if (result === undefined) result = {};
+ result.block_id = location.block_id;
+ }
+ if (location.address != undefined) {
+ if (result === undefined) result = {};
+ result.address = location.address;
+ }
+ if (location.pos_start != undefined && location.pos_end != undefined) {
+ if (result === undefined) result = {};
+ result.pos_start = location.pos_start;
+ result.pos_end = location.pos_end;
+ }
+ else if (view.pos_start != -1) {
+ if (result === undefined) result = {};
+ result.pos_start = view.pos_start;
+ result.pos_end = result.pos_start + 1;
+ }
+ }
+ }
+ return result;
+ }
+
+ initializeContent(data, rememberedSelection) {
+ this.data = data;
+ super.initializeContent(data, rememberedSelection);
+ }
+
+ initializeCode(sourceText, sourcePosition) {
+ let view = this;
+ view.pos_start = -1;
+ view.addr_event_counts = null;
+ view.total_event_counts = null;
+ view.pos_lines = new Array();
+ // Comment lines for line 0 include sourcePosition already, only need to
+ // add sourcePosition for lines > 0.
+ view.pos_lines[0] = sourcePosition;
+ if (sourceText != "") {
+ let base = sourcePosition;
+ let current = 0;
+ let source_lines = sourceText.split("\n");
+ for (let i = 1; i < source_lines.length; i++) {
+ // Add 1 for newline character that is split off.
+ current += source_lines[i-1].length + 1;
+ view.pos_lines[i] = base + current;
+ }
+ }
+ }
+
+ initializePerfProfile(eventCounts) {
+ let view = this;
+ if (eventCounts !== undefined) {
+ view.addr_event_counts = eventCounts;
+
+ view.total_event_counts = {};
+ for (var ev_name in view.addr_event_counts) {
+ let keys = Object.keys(view.addr_event_counts[ev_name]);
+ let values = keys.map(key => view.addr_event_counts[ev_name][key]);
+ view.total_event_counts[ev_name] = values.reduce((a, b) => a + b);
+ }
+ }
+ else {
+ view.addr_event_counts = null;
+ view.total_event_counts = null;
+ }
+ }
+
+ // Shorten decimals and remove trailing zeroes for readability.
+ humanize(num) {
+ return num.toFixed(3).replace(/\.?0+$/, "") + "%";
+ }
+
+ processLine(line) {
+ let view = this;
+ let func = function(match, p1, p2, p3) {
+ let nums = p2.split(":");
+ let li = Number(nums[0]);
+ let pos = Number(nums[1]);
+ if(li === 0)
+ pos -= view.pos_lines[0];
+ li++;
+ return p1 + li + ":" + pos + p3;
+ };
+ line = line.replace(view.SOURCE_POSITION_HEADER_REGEX, func);
+ let fragments = super.processLine(line);
+
+ // Add profiling data per instruction if available.
+ if (view.total_event_counts) {
+ let event_selector = document.getElementById('event-selector');
+ if (event_selector.length !== 0) {
+ let event = event_selector.value;
+ let matches = /^(0x[0-9a-fA-F]+)\s+\d+\s+[0-9a-fA-F]+/.exec(line);
+ if (matches) {
+ let count = view.addr_event_counts[event][matches[1]];
+ let str = "";
+ let css_cls = undefined;
+ if(count !== undefined) {
+ let perc = count / view.total_event_counts[event] * 100;
+
+ str = "(" + view.humanize(perc) + ") ";
+
+ css_cls = "prof-low";
+ if(perc > PROF_HIGH)
+ css_cls = "prof-high";
+ else if(perc > PROF_MED)
+ css_cls = "prof-med";
+ }
+ // Pad extra spaces to keep alignment for all instructions.
+ str = (" ".repeat(10) + str).slice(-10);
+
+ fragments.splice(0, 0, view.createFragment(str, css_cls));
+ }
+ }
+ }
+ return fragments;
+ }
+}
diff --git a/deps/v8/tools/turbolizer/edge.js b/deps/v8/tools/turbolizer/edge.js
new file mode 100644
index 0000000000..c0f63a0204
--- /dev/null
+++ b/deps/v8/tools/turbolizer/edge.js
@@ -0,0 +1,79 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var MINIMUM_EDGE_SEPARATION = 20;
+
+function isEdgeInitiallyVisible(target, index, source, type) {
+ return type == "control" && (target.cfg || source.cfg);
+}
+
+var Edge = function(target, index, source, type) {
+ this.target = target;
+ this.source = source;
+ this.index = index;
+ this.type = type;
+ this.backEdgeNumber = 0;
+ this.visible = isEdgeInitiallyVisible(target, index, source, type);
+};
+
+Edge.prototype.stringID = function() {
+ return this.source.id + "," + this.index + "," + this.target.id;
+};
+
+Edge.prototype.isVisible = function() {
+ return this.visible && this.source.visible && this.target.visible;
+};
+
+Edge.prototype.getInputHorizontalPosition = function(graph) {
+ if (this.backEdgeNumber > 0) {
+ return graph.maxGraphNodeX + this.backEdgeNumber * MINIMUM_EDGE_SEPARATION;
+ }
+ var source = this.source;
+ var target = this.target;
+ var index = this.index;
+ var input_x = target.x + target.getInputX(index);
+ var inputApproach = target.getInputApproach(this.index);
+ var outputApproach = source.getOutputApproach(graph);
+ if (inputApproach > outputApproach) {
+ return input_x;
+ } else {
+ var inputOffset = MINIMUM_EDGE_SEPARATION * (index + 1);
+ return (target.x < source.x)
+ ? (target.x + target.getTotalNodeWidth() + inputOffset)
+ : (target.x - inputOffset)
+ }
+}
+
+Edge.prototype.generatePath = function(graph) {
+ var target = this.target;
+ var source = this.source;
+ var input_x = target.x + target.getInputX(this.index);
+ var arrowheadHeight = 7;
+ var input_y = target.y - 2 * DEFAULT_NODE_BUBBLE_RADIUS - arrowheadHeight;
+ var output_x = source.x + source.getOutputX();
+ var output_y = source.y + graph.getNodeHeight(source) + DEFAULT_NODE_BUBBLE_RADIUS;
+ var inputApproach = target.getInputApproach(this.index);
+ var outputApproach = source.getOutputApproach(graph);
+ var horizontalPos = this.getInputHorizontalPosition(graph);
+
+ var result = "M" + output_x + "," + output_y +
+ "L" + output_x + "," + outputApproach +
+ "L" + horizontalPos + "," + outputApproach;
+
+ if (horizontalPos != input_x) {
+ result += "L" + horizontalPos + "," + inputApproach;
+ } else {
+ if (inputApproach < outputApproach) {
+ inputApproach = outputApproach;
+ }
+ }
+
+ result += "L" + input_x + "," + inputApproach +
+ "L" + input_x + "," + input_y;
+ return result;
+}
+
+Edge.prototype.isBackEdge = function() {
+ return this.target.hasBackEdges() && (this.target.rank < this.source.rank);
+}
diff --git a/deps/v8/tools/turbolizer/empty-view.js b/deps/v8/tools/turbolizer/empty-view.js
new file mode 100644
index 0000000000..66caf59d8f
--- /dev/null
+++ b/deps/v8/tools/turbolizer/empty-view.js
@@ -0,0 +1,19 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+
+class EmptyView extends View {
+ constructor(id, broker) {
+ super(id, broker);
+ this.svg = this.divElement.append("svg").attr('version','1.1').attr("width", "100%");
+ }
+
+ initializeContent(data, rememberedSelection) {
+ this.svg.attr("height", document.documentElement.clientHeight + "px");
+ }
+
+ deleteContent() {
+ }
+}
diff --git a/deps/v8/tools/turbolizer/expand-all.jpg b/deps/v8/tools/turbolizer/expand-all.jpg
new file mode 100644
index 0000000000..df64a2c4aa
--- /dev/null
+++ b/deps/v8/tools/turbolizer/expand-all.jpg
Binary files differ
diff --git a/deps/v8/tools/turbolizer/graph-layout.js b/deps/v8/tools/turbolizer/graph-layout.js
new file mode 100644
index 0000000000..e9b44b4d2c
--- /dev/null
+++ b/deps/v8/tools/turbolizer/graph-layout.js
@@ -0,0 +1,493 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var DEFAULT_NODE_ROW_SEPARATION = 130
+
+var traceLayout = false;
+
+function newGraphOccupation(graph){
+ var isSlotFilled = [];
+ var maxSlot = 0;
+ var minSlot = 0;
+ var nodeOccupation = [];
+
+ function slotToIndex(slot) {
+ if (slot >= 0) {
+ return slot * 2;
+ } else {
+ return slot * 2 + 1;
+ }
+ }
+
+ function indexToSlot(index) {
+ if ((index % 0) == 0) {
+ return index / 2;
+ } else {
+ return -((index - 1) / 2);
+ }
+ }
+
+ function positionToSlot(pos) {
+ return Math.floor(pos / NODE_INPUT_WIDTH);
+ }
+
+ function slotToLeftPosition(slot) {
+ return slot * NODE_INPUT_WIDTH
+ }
+
+ function slotToRightPosition(slot) {
+ return (slot + 1) * NODE_INPUT_WIDTH
+ }
+
+ function findSpace(pos, width, direction) {
+ var widthSlots = Math.floor((width + NODE_INPUT_WIDTH - 1) /
+ NODE_INPUT_WIDTH);
+ var currentSlot = positionToSlot(pos + width / 2);
+ var currentScanSlot = currentSlot;
+ var widthSlotsRemainingLeft = widthSlots;
+ var widthSlotsRemainingRight = widthSlots;
+ var slotsChecked = 0;
+ while (true) {
+ var mod = slotsChecked++ % 2;
+ currentScanSlot = currentSlot + (mod ? -1 : 1) * (slotsChecked >> 1);
+ if (!isSlotFilled[slotToIndex(currentScanSlot)]) {
+ if (mod) {
+ if (direction <= 0) --widthSlotsRemainingLeft
+ } else {
+ if (direction >= 0) --widthSlotsRemainingRight
+ }
+ if (widthSlotsRemainingLeft == 0 ||
+ widthSlotsRemainingRight == 0 ||
+ (widthSlotsRemainingLeft + widthSlotsRemainingRight) == widthSlots &&
+ (widthSlots == slotsChecked)) {
+ if (mod) {
+ return [currentScanSlot, widthSlots];
+ } else {
+ return [currentScanSlot - widthSlots + 1, widthSlots];
+ }
+ }
+ } else {
+ if (mod) {
+ widthSlotsRemainingLeft = widthSlots;
+ } else {
+ widthSlotsRemainingRight = widthSlots;
+ }
+ }
+ }
+ }
+
+ function setIndexRange(from, to, value) {
+ if (to < from) {
+ throw("illegal slot range");
+ }
+ while (from <= to) {
+ if (from > maxSlot) {
+ maxSlot = from;
+ }
+ if (from < minSlot) {
+ minSlot = from;
+ }
+ isSlotFilled[slotToIndex(from++)] = value;
+ }
+ }
+
+ function occupySlotRange(from, to) {
+ if (traceLayout) {
+ console.log("Occupied [" + slotToLeftPosition(from) + " " + slotToLeftPosition(to + 1) + ")");
+ }
+ setIndexRange(from, to, true);
+ }
+
+ function clearSlotRange(from, to) {
+ if (traceLayout) {
+ console.log("Cleared [" + slotToLeftPosition(from) + " " + slotToLeftPosition(to + 1) + ")");
+ }
+ setIndexRange(from, to, false);
+ }
+
+ function occupyPositionRange(from, to) {
+ occupySlotRange(positionToSlot(from), positionToSlot(to - 1));
+ }
+
+ function clearPositionRange(from, to) {
+ clearSlotRange(positionToSlot(from), positionToSlot(to - 1));
+ }
+
+ function occupyPositionRangeWithMargin(from, to, margin) {
+ var fromMargin = from - Math.floor(margin);
+ var toMargin = to + Math.floor(margin);
+ occupyPositionRange(fromMargin, toMargin);
+ }
+
+ function clearPositionRangeWithMargin(from, to, margin) {
+ var fromMargin = from - Math.floor(margin);
+ var toMargin = to + Math.floor(margin);
+ clearPositionRange(fromMargin, toMargin);
+ }
+
+ var occupation = {
+ occupyNodeInputs: function(node) {
+ for (var i = 0; i < node.inputs.length; ++i) {
+ if (node.inputs[i].isVisible()) {
+ var edge = node.inputs[i];
+ if (!edge.isBackEdge()) {
+ var source = edge.source;
+ var horizontalPos = edge.getInputHorizontalPosition(graph);
+ if (traceLayout) {
+ console.log("Occupying input " + i + " of " + node.id + " at " + horizontalPos);
+ }
+ occupyPositionRangeWithMargin(horizontalPos,
+ horizontalPos,
+ NODE_INPUT_WIDTH / 2);
+ }
+ }
+ }
+ },
+ occupyNode: function(node) {
+ var getPlacementHint = function(n) {
+ var pos = 0;
+ var direction = -1;
+ var outputEdges = 0;
+ var inputEdges = 0;
+ for (var k = 0; k < n.outputs.length; ++k) {
+ var outputEdge = n.outputs[k];
+ if (outputEdge.isVisible()) {
+ var output = n.outputs[k].target;
+ for (var l = 0; l < output.inputs.length; ++l) {
+ if (output.rank > n.rank) {
+ var inputEdge = output.inputs[l];
+ if (inputEdge.isVisible()) {
+ ++inputEdges;
+ }
+ if (output.inputs[l].source == n) {
+ pos += output.x + output.getInputX(l) + NODE_INPUT_WIDTH / 2;
+ outputEdges++;
+ if (l >= (output.inputs.length / 2)) {
+ direction = 1;
+ }
+ }
+ }
+ }
+ }
+ }
+ if (outputEdges != 0) {
+ pos = pos / outputEdges;
+ }
+ if (outputEdges > 1 || inputEdges == 1) {
+ direction = 0;
+ }
+ return [direction, pos];
+ }
+ var width = node.getTotalNodeWidth();
+ var margin = MINIMUM_EDGE_SEPARATION;
+ var paddedWidth = width + 2 * margin;
+ var placementHint = getPlacementHint(node);
+ var x = placementHint[1] - paddedWidth + margin;
+ if (traceLayout) {
+ console.log("Node " + node.id + " placement hint [" + x + ", " + (x + paddedWidth) + ")");
+ }
+ var placement = findSpace(x, paddedWidth, placementHint[0]);
+ var firstSlot = placement[0];
+ var slotWidth = placement[1];
+ var endSlotExclusive = firstSlot + slotWidth - 1;
+ occupySlotRange(firstSlot, endSlotExclusive);
+ nodeOccupation.push([firstSlot, endSlotExclusive]);
+ if (placementHint[0] < 0) {
+ return slotToLeftPosition(firstSlot + slotWidth) - width - margin;
+ } else if (placementHint[0] > 0) {
+ return slotToLeftPosition(firstSlot) + margin;
+ } else {
+ return slotToLeftPosition(firstSlot + slotWidth / 2) - (width / 2);
+ }
+ },
+ clearOccupiedNodes: function() {
+ nodeOccupation.forEach(function(o) {
+ clearSlotRange(o[0], o[1]);
+ });
+ nodeOccupation = [];
+ },
+ clearNodeOutputs: function(source) {
+ source.outputs.forEach(function(edge) {
+ if (edge.isVisible()) {
+ var target = edge.target;
+ for (var i = 0; i < target.inputs.length; ++i) {
+ if (target.inputs[i].source === source) {
+ var horizontalPos = edge.getInputHorizontalPosition(graph);
+ clearPositionRangeWithMargin(horizontalPos,
+ horizontalPos,
+ NODE_INPUT_WIDTH / 2);
+ }
+ }
+ }
+ });
+ },
+ print: function() {
+ var s = "";
+ for (var currentSlot = -40; currentSlot < 40; ++currentSlot) {
+ if (currentSlot != 0) {
+ s += " ";
+ } else {
+ s += "|";
+ }
+ }
+ console.log(s);
+ s = "";
+ for (var currentSlot2 = -40; currentSlot2 < 40; ++currentSlot2) {
+ if (isSlotFilled[slotToIndex(currentSlot2)]) {
+ s += "*";
+ } else {
+ s += " ";
+ }
+ }
+ console.log(s);
+ }
+ }
+ return occupation;
+}
+
+function layoutNodeGraph(graph) {
+ // First determine the set of nodes that have no outputs. Those are the
+ // basis for bottom-up DFS to determine rank and node placement.
+ var endNodesHasNoOutputs = [];
+ var startNodesHasNoInputs = [];
+ graph.nodes.forEach(function(n, i){
+ endNodesHasNoOutputs[n.id] = true;
+ startNodesHasNoInputs[n.id] = true;
+ });
+ graph.edges.forEach(function(e, i){
+ endNodesHasNoOutputs[e.source.id] = false;
+ startNodesHasNoInputs[e.target.id] = false;
+ });
+
+ // Finialize the list of start and end nodes.
+ var endNodes = [];
+ var startNodes = [];
+ var visited = [];
+ var rank = [];
+ graph.nodes.forEach(function(n, i){
+ if (endNodesHasNoOutputs[n.id]) {
+ endNodes.push(n);
+ }
+ if (startNodesHasNoInputs[n.id]) {
+ startNodes.push(n);
+ }
+ visited[n.id] = false;
+ rank[n.id] = -1;
+ n.rank = 0;
+ n.visitOrderWithinRank = 0;
+ n.outputApproach = MINIMUM_NODE_OUTPUT_APPROACH;
+ });
+
+
+ var maxRank = 0;
+ var visited = [];
+ var dfsStack = [];
+ var visitOrderWithinRank = 0;
+
+ var worklist = startNodes.slice();
+ while (worklist.length != 0) {
+ var n = worklist.pop();
+ var changed = false;
+ if (n.rank == MAX_RANK_SENTINEL) {
+ n.rank = 1;
+ changed = true;
+ }
+ var begin = 0;
+ var end = n.inputs.length;
+ if (n.opcode == 'Phi' || n.opcode == 'EffectPhi') {
+ // Keep with merge or loop node
+ begin = n.inputs.length - 1;
+ } else if (n.hasBackEdges()) {
+ end = 1;
+ }
+ for (var l = begin; l < end; ++l) {
+ var input = n.inputs[l].source;
+ if (input.visible && input.rank >= n.rank) {
+ n.rank = input.rank + 1;
+ changed = true;
+ }
+ }
+ if (changed) {
+ var hasBackEdges = n.hasBackEdges();
+ for (var l = n.outputs.length - 1; l >= 0; --l) {
+ if (hasBackEdges && (l != 0)) {
+ worklist.unshift(n.outputs[l].target);
+ } else {
+ worklist.push(n.outputs[l].target);
+ }
+ }
+ }
+ if (n.rank > maxRank) {
+ maxRank = n.rank;
+ }
+ }
+
+ visited = [];
+ function dfsFindRankLate(n) {
+ if (visited[n.id]) return;
+ visited[n.id] = true;
+ var originalRank = n.rank;
+ var newRank = n.rank;
+ var firstInput = true;
+ for (var l = 0; l < n.outputs.length; ++l) {
+ var output = n.outputs[l].target;
+ dfsFindRankLate(output);
+ var outputRank = output.rank;
+ if (output.visible && (firstInput || outputRank <= newRank) &&
+ (outputRank > originalRank)) {
+ newRank = outputRank - 1;
+ }
+ firstInput = false;
+ }
+ if (n.opcode != "Start" && n.opcode != "Phi" && n.opcode != "EffectPhi") {
+ n.rank = newRank;
+ }
+ }
+
+ startNodes.forEach(dfsFindRankLate);
+
+ visited = [];
+ function dfsRankOrder(n) {
+ if (visited[n.id]) return;
+ visited[n.id] = true;
+ for (var l = 0; l < n.outputs.length; ++l) {
+ var edge = n.outputs[l];
+ if (edge.isVisible()) {
+ var output = edge.target;
+ dfsRankOrder(output);
+ }
+ }
+ if (n.visitOrderWithinRank == 0) {
+ n.visitOrderWithinRank = ++visitOrderWithinRank;
+ }
+ }
+ startNodes.forEach(dfsRankOrder);
+
+ endNodes.forEach(function(n) {
+ n.rank = maxRank + 1;
+ });
+
+ var rankSets = [];
+ // Collect sets for each rank.
+ graph.nodes.forEach(function(n, i){
+ n.y = n.rank * (DEFAULT_NODE_ROW_SEPARATION + graph.getNodeHeight(n) +
+ 2 * DEFAULT_NODE_BUBBLE_RADIUS);
+ if (n.visible) {
+ if (rankSets[n.rank] === undefined) {
+ rankSets[n.rank] = [n];
+ } else {
+ rankSets[n.rank].push(n);
+ }
+ }
+ });
+
+ // Iterate backwards from highest to lowest rank, placing nodes so that they
+ // spread out from the "center" as much as possible while still being
+ // compact and not overlapping live input lines.
+ var occupation = newGraphOccupation(graph);
+ var rankCount = 0;
+
+ rankSets.reverse().forEach(function(rankSet) {
+
+ for (var i = 0; i < rankSet.length; ++i) {
+ occupation.clearNodeOutputs(rankSet[i]);
+ }
+
+ if (traceLayout) {
+ console.log("After clearing outputs");
+ occupation.print();
+ }
+
+ var placedCount = 0;
+ rankSet = rankSet.sort(function(a,b) {
+ return a.visitOrderWithinRank < b.visitOrderWithinRank;
+ });
+ for (var i = 0; i < rankSet.length; ++i) {
+ var nodeToPlace = rankSet[i];
+ if (nodeToPlace.visible) {
+ nodeToPlace.x = occupation.occupyNode(nodeToPlace);
+ if (traceLayout) {
+ console.log("Node " + nodeToPlace.id + " is placed between [" + nodeToPlace.x + ", " + (nodeToPlace.x + nodeToPlace.getTotalNodeWidth()) + ")");
+ }
+ var staggeredFlooredI = Math.floor(placedCount++ % 3);
+ var delta = MINIMUM_EDGE_SEPARATION * staggeredFlooredI
+ nodeToPlace.outputApproach += delta;
+ } else {
+ nodeToPlace.x = 0;
+ }
+ }
+
+ if (traceLayout) {
+ console.log("Before clearing nodes");
+ occupation.print();
+ }
+
+ occupation.clearOccupiedNodes();
+
+ if (traceLayout) {
+ console.log("After clearing nodes");
+ occupation.print();
+ }
+
+ for (var i = 0; i < rankSet.length; ++i) {
+ var node = rankSet[i];
+ occupation.occupyNodeInputs(node);
+ }
+
+ if (traceLayout) {
+ console.log("After occupying inputs");
+ occupation.print();
+ }
+
+ if (traceLayout) {
+ console.log("After determining bounding box");
+ occupation.print();
+ }
+ });
+
+ graph.maxBackEdgeNumber = 0;
+ graph.visibleEdges.each(function (e) {
+ if (e.isBackEdge()) {
+ e.backEdgeNumber = ++graph.maxBackEdgeNumber;
+ } else {
+ e.backEdgeNumber = 0;
+ }
+ });
+
+ redetermineGraphBoundingBox(graph);
+
+}
+
+function redetermineGraphBoundingBox(graph) {
+ graph.minGraphX = 0;
+ graph.maxGraphNodeX = 1;
+ graph.maxGraphX = undefined; // see below
+ graph.minGraphY = 0;
+ graph.maxGraphY = 1;
+
+ for (var i = 0; i < graph.nodes.length; ++i) {
+ var node = graph.nodes[i];
+
+ if (!node.visible) {
+ continue;
+ }
+
+ if (node.x < graph.minGraphX) {
+ graph.minGraphX = node.x;
+ }
+ if ((node.x + node.getTotalNodeWidth()) > graph.maxGraphNodeX) {
+ graph.maxGraphNodeX = node.x + node.getTotalNodeWidth();
+ }
+ if ((node.y - 50) < graph.minGraphY) {
+ graph.minGraphY = node.y - 50;
+ }
+ if ((node.y + graph.getNodeHeight(node) + 50) > graph.maxGraphY) {
+ graph.maxGraphY = node.y + graph.getNodeHeight(node) + 50;
+ }
+ }
+
+ graph.maxGraphX = graph.maxGraphNodeX +
+ graph.maxBackEdgeNumber * MINIMUM_EDGE_SEPARATION;
+
+}
diff --git a/deps/v8/tools/turbolizer/graph-view.js b/deps/v8/tools/turbolizer/graph-view.js
new file mode 100644
index 0000000000..8de050f3e6
--- /dev/null
+++ b/deps/v8/tools/turbolizer/graph-view.js
@@ -0,0 +1,1033 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+
+class GraphView extends View {
+ constructor (d3, id, nodes, edges, broker) {
+ super(id, broker);
+ var graph = this;
+
+ var svg = this.divElement.append("svg").attr('version','1.1').attr("width", "100%");
+ graph.svg = svg;
+
+ graph.nodes = nodes || [];
+ graph.edges = edges || [];
+
+ graph.minGraphX = 0;
+ graph.maxGraphX = 1;
+ graph.minGraphY = 0;
+ graph.maxGraphY = 1;
+
+ graph.state = {
+ selection: null,
+ mouseDownNode: null,
+ justDragged: false,
+ justScaleTransGraph: false,
+ lastKeyDown: -1,
+ showTypes: false
+ };
+
+ var selectionHandler = {
+ clear: function() {
+ broker.clear(selectionHandler);
+ },
+ select: function(items, selected) {
+ var locations = [];
+ for (var d of items) {
+ if (selected) {
+ d.classList.add("selected");
+ } else {
+ d.classList.remove("selected");
+ }
+ var data = d.__data__;
+ locations.push({ pos_start: data.pos, pos_end: data.pos + 1, node_id: data.id});
+ }
+ broker.select(selectionHandler, locations, selected);
+ },
+ selectionDifference: function(span1, inclusive1, span2, inclusive2) {
+ // Should not be called
+ },
+ brokeredSelect: function(locations, selected) {
+ var test = [].entries().next();
+ var selection = graph.nodes
+ .filter(function(n) {
+ var pos = n.pos;
+ for (var location of locations) {
+ var start = location.pos_start;
+ var end = location.pos_end;
+ var id = location.node_id;
+ if (end != undefined) {
+ if (pos >= start && pos < end) {
+ return true;
+ }
+ } else if (start != undefined) {
+ if (pos === start) {
+ return true;
+ }
+ } else {
+ if (n.id === id) {
+ return true;
+ }
+ }
+ }
+ return false;
+ });
+ var newlySelected = new Set();
+ selection.forEach(function(n) {
+ newlySelected.add(n);
+ if (!n.visible) {
+ n.visible = true;
+ }
+ });
+ graph.updateGraphVisibility();
+ graph.visibleNodes.each(function(n) {
+ if (newlySelected.has(n)) {
+ graph.state.selection.select(this, selected);
+ }
+ });
+ graph.updateGraphVisibility();
+ graph.viewSelection();
+ },
+ brokeredClear: function() {
+ graph.state.selection.clear();
+ }
+ };
+ broker.addSelectionHandler(selectionHandler);
+
+ graph.state.selection = new Selection(selectionHandler);
+
+ var defs = svg.append('svg:defs');
+ defs.append('svg:marker')
+ .attr('id', 'end-arrow')
+ .attr('viewBox', '0 -4 8 8')
+ .attr('refX', 2)
+ .attr('markerWidth', 2.5)
+ .attr('markerHeight', 2.5)
+ .attr('orient', 'auto')
+ .append('svg:path')
+ .attr('d', 'M0,-4L8,0L0,4');
+
+ this.graphElement = svg.append("g");
+ graph.visibleEdges = this.graphElement.append("g").selectAll("g");
+ graph.visibleNodes = this.graphElement.append("g").selectAll("g");
+
+ graph.drag = d3.behavior.drag()
+ .origin(function(d){
+ return {x: d.x, y: d.y};
+ })
+ .on("drag", function(args){
+ graph.state.justDragged = true;
+ graph.dragmove.call(graph, args);
+ })
+
+ d3.select("#upload").on("click", partial(this.uploadAction, graph));
+ d3.select("#layout").on("click", partial(this.layoutAction, graph));
+ d3.select("#show-all").on("click", partial(this.showAllAction, graph));
+ d3.select("#hide-dead").on("click", partial(this.hideDeadAction, graph));
+ d3.select("#hide-unselected").on("click", partial(this.hideUnselectedAction, graph));
+ d3.select("#hide-selected").on("click", partial(this.hideSelectedAction, graph));
+ d3.select("#zoom-selection").on("click", partial(this.zoomSelectionAction, graph));
+ d3.select("#toggle-types").on("click", partial(this.toggleTypesAction, graph));
+ d3.select("#search-input").on("keydown", partial(this.searchInputAction, graph));
+
+ // listen for key events
+ d3.select(window).on("keydown", function(e){
+ graph.svgKeyDown.call(graph);
+ })
+ .on("keyup", function(){
+ graph.svgKeyUp.call(graph);
+ });
+ svg.on("mousedown", function(d){graph.svgMouseDown.call(graph, d);});
+ svg.on("mouseup", function(d){graph.svgMouseUp.call(graph, d);});
+
+ graph.dragSvg = d3.behavior.zoom()
+ .on("zoom", function(){
+ if (d3.event.sourceEvent.shiftKey){
+ return false;
+ } else{
+ graph.zoomed.call(graph);
+ }
+ return true;
+ })
+ .on("zoomstart", function(){
+ if (!d3.event.sourceEvent.shiftKey) d3.select('body').style("cursor", "move");
+ })
+ .on("zoomend", function(){
+ d3.select('body').style("cursor", "auto");
+ });
+
+ svg.call(graph.dragSvg).on("dblclick.zoom", null);
+ }
+
+ static get selectedClass() {
+ return "selected";
+ }
+ static get rectClass() {
+ return "nodeStyle";
+ }
+ static get activeEditId() {
+ return "active-editing";
+ }
+ static get nodeRadius() {
+ return 50;
+ }
+
+ getNodeHeight(d) {
+ if (this.state.showTypes) {
+ return d.normalheight + d.labelbbox.height;
+ } else {
+ return d.normalheight;
+ }
+ }
+
+ getEdgeFrontier(nodes, inEdges, edgeFilter) {
+ let frontier = new Set();
+ nodes.forEach(function(element) {
+ var edges = inEdges ? element.__data__.inputs : element.__data__.outputs;
+ var edgeNumber = 0;
+ edges.forEach(function(edge) {
+ if (edgeFilter == undefined || edgeFilter(edge, edgeNumber)) {
+ frontier.add(edge);
+ }
+ ++edgeNumber;
+ });
+ });
+ return frontier;
+ }
+
+ getNodeFrontier(nodes, inEdges, edgeFilter) {
+ let graph = this;
+ var frontier = new Set();
+ var newState = true;
+ var edgeFrontier = graph.getEdgeFrontier(nodes, inEdges, edgeFilter);
+ // Control key toggles edges rather than just turning them on
+ if (d3.event.ctrlKey) {
+ edgeFrontier.forEach(function(edge) {
+ if (edge.visible) {
+ newState = false;
+ }
+ });
+ }
+ edgeFrontier.forEach(function(edge) {
+ edge.visible = newState;
+ if (newState) {
+ var node = inEdges ? edge.source : edge.target;
+ node.visible = true;
+ frontier.add(node);
+ }
+ });
+ graph.updateGraphVisibility();
+ if (newState) {
+ return graph.visibleNodes.filter(function(n) {
+ return frontier.has(n);
+ });
+ } else {
+ return undefined;
+ }
+ }
+
+ dragmove(d) {
+ var graph = this;
+ d.x += d3.event.dx;
+ d.y += d3.event.dy;
+ graph.updateGraphVisibility();
+ }
+
+ initializeContent(data, rememberedSelection) {
+ this.createGraph(data, rememberedSelection);
+ if (rememberedSelection != null) {
+ this.attachSelection(rememberedSelection);
+ this.connectVisibleSelectedNodes();
+ this.viewSelection();
+ }
+ this.updateGraphVisibility();
+ }
+
+ deleteContent() {
+ if (this.visibleNodes) {
+ this.nodes = [];
+ this.edges = [];
+ this.nodeMap = [];
+ this.updateGraphVisibility();
+ }
+ };
+
+ measureText(text) {
+ var textMeasure = document.getElementById('text-measure');
+ textMeasure.textContent = text;
+ return {
+ width: textMeasure.getBBox().width,
+ height: textMeasure.getBBox().height,
+ };
+ }
+
+ createGraph(data, initiallyVisibileIds) {
+ var g = this;
+ g.nodes = data.nodes;
+ g.nodeMap = [];
+ g.nodes.forEach(function(n, i){
+ n.__proto__ = Node;
+ n.visible = false;
+ n.x = 0;
+ n.y = 0;
+ n.rank = MAX_RANK_SENTINEL;
+ n.inputs = [];
+ n.outputs = [];
+ n.rpo = -1;
+ n.outputApproach = MINIMUM_NODE_OUTPUT_APPROACH;
+ n.cfg = n.control;
+ g.nodeMap[n.id] = n;
+ n.displayLabel = n.getDisplayLabel();
+ n.labelbbox = g.measureText(n.displayLabel);
+ n.typebbox = g.measureText(n.getDisplayType());
+ var innerwidth = Math.max(n.labelbbox.width, n.typebbox.width);
+ n.width = Math.alignUp(innerwidth + NODE_INPUT_WIDTH * 2,
+ NODE_INPUT_WIDTH);
+ var innerheight = Math.max(n.labelbbox.height, n.typebbox.height);
+ n.normalheight = innerheight + 20;
+ });
+ g.edges = [];
+ data.edges.forEach(function(e, i){
+ var t = g.nodeMap[e.target];
+ var s = g.nodeMap[e.source];
+ var newEdge = new Edge(t, e.index, s, e.type);
+ t.inputs.push(newEdge);
+ s.outputs.push(newEdge);
+ g.edges.push(newEdge);
+ if (e.type == 'control') {
+ s.cfg = true;
+ }
+ });
+ g.nodes.forEach(function(n, i) {
+ n.visible = isNodeInitiallyVisible(n);
+ if (initiallyVisibileIds != undefined) {
+ if (initiallyVisibileIds.has(n.id)) {
+ n.visible = true;
+ }
+ }
+ });
+ g.fitGraphViewToWindow();
+ g.updateGraphVisibility();
+ g.layoutGraph();
+ g.updateGraphVisibility();
+ g.viewWholeGraph();
+ }
+
+ connectVisibleSelectedNodes() {
+ var graph = this;
+ graph.state.selection.selection.forEach(function(element) {
+ var edgeNumber = 0;
+ element.__data__.inputs.forEach(function(edge) {
+ if (edge.source.visible && edge.target.visible) {
+ edge.visible = true;
+ }
+ });
+ element.__data__.outputs.forEach(function(edge) {
+ if (edge.source.visible && edge.target.visible) {
+ edge.visible = true;
+ }
+ });
+ });
+ }
+
+ updateInputAndOutputBubbles() {
+ var g = this;
+ var s = g.visibleBubbles;
+ s.classed("filledBubbleStyle", function(c) {
+ var components = this.id.split(',');
+ if (components[0] == "ib") {
+ var edge = g.nodeMap[components[3]].inputs[components[2]];
+ return edge.isVisible();
+ } else {
+ return g.nodeMap[components[1]].areAnyOutputsVisible() == 2;
+ }
+ }).classed("halfFilledBubbleStyle", function(c) {
+ var components = this.id.split(',');
+ if (components[0] == "ib") {
+ var edge = g.nodeMap[components[3]].inputs[components[2]];
+ return false;
+ } else {
+ return g.nodeMap[components[1]].areAnyOutputsVisible() == 1;
+ }
+ }).classed("bubbleStyle", function(c) {
+ var components = this.id.split(',');
+ if (components[0] == "ib") {
+ var edge = g.nodeMap[components[3]].inputs[components[2]];
+ return !edge.isVisible();
+ } else {
+ return g.nodeMap[components[1]].areAnyOutputsVisible() == 0;
+ }
+ });
+ s.each(function(c) {
+ var components = this.id.split(',');
+ if (components[0] == "ob") {
+ var from = g.nodeMap[components[1]];
+ var x = from.getOutputX();
+ var y = g.getNodeHeight(from) + DEFAULT_NODE_BUBBLE_RADIUS;
+ var transform = "translate(" + x + "," + y + ")";
+ this.setAttribute('transform', transform);
+ }
+ });
+ }
+
+ attachSelection(s) {
+ var graph = this;
+ if (s.size != 0) {
+ this.visibleNodes.each(function(n) {
+ if (s.has(this.__data__.id)) {
+ graph.state.selection.select(this, true);
+ }
+ });
+ }
+ }
+
+ detachSelection() {
+ var selection = this.state.selection.detachSelection();
+ var s = new Set();
+ for (var i of selection) {
+ s.add(i.__data__.id);
+ };
+ return s;
+ }
+
+ pathMouseDown(path, d) {
+ d3.event.stopPropagation();
+ this.state.selection.clear();
+ this.state.selection.add(path);
+ };
+
+ nodeMouseDown(node, d) {
+ d3.event.stopPropagation();
+ this.state.mouseDownNode = d;
+ }
+
+ nodeMouseUp(d3node, d) {
+ var graph = this,
+ state = graph.state,
+ consts = graph.consts;
+
+ var mouseDownNode = state.mouseDownNode;
+
+ if (!mouseDownNode) return;
+
+ if (state.justDragged) {
+ // dragged, not clicked
+ redetermineGraphBoundingBox(graph);
+ state.justDragged = false;
+ } else{
+ // clicked, not dragged
+ var extend = d3.event.shiftKey;
+ var selection = graph.state.selection;
+ if (!extend) {
+ selection.clear();
+ }
+ selection.select(d3node[0][0], true);
+ }
+ }
+
+ selectSourcePositions(start, end, selected) {
+ var graph = this;
+ var map = [];
+ var sel = graph.nodes.filter(function(n) {
+ var pos = (n.pos === undefined)
+ ? -1
+ : n.getFunctionRelativeSourcePosition(graph);
+ if (pos >= start && pos < end) {
+ map[n.id] = true;
+ n.visible = true;
+ }
+ });
+ graph.updateGraphVisibility();
+ graph.visibleNodes.filter(function(n) { return map[n.id]; })
+ .each(function(n) {
+ var selection = graph.state.selection;
+ selection.select(d3.select(this), selected);
+ });
+ }
+
+ selectAllNodes(inEdges, filter) {
+ var graph = this;
+ if (!d3.event.shiftKey) {
+ graph.state.selection.clear();
+ }
+ graph.state.selection.select(graph.visibleNodes[0], true);
+ graph.updateGraphVisibility();
+ }
+
+ uploadAction(graph) {
+ document.getElementById("hidden-file-upload").click();
+ }
+
+ layoutAction(graph) {
+ graph.updateGraphVisibility();
+ graph.layoutGraph();
+ graph.updateGraphVisibility();
+ graph.viewWholeGraph();
+ }
+
+ showAllAction(graph) {
+ graph.nodes.filter(function(n) { n.visible = true; })
+ graph.edges.filter(function(e) { e.visible = true; })
+ graph.updateGraphVisibility();
+ graph.viewWholeGraph();
+ }
+
+ hideDeadAction(graph) {
+ graph.nodes.filter(function(n) { if (!n.isLive()) n.visible = false; })
+ graph.updateGraphVisibility();
+ }
+
+ hideUnselectedAction(graph) {
+ var unselected = graph.visibleNodes.filter(function(n) {
+ return !this.classList.contains("selected");
+ });
+ unselected.each(function(n) {
+ n.visible = false;
+ });
+ graph.updateGraphVisibility();
+ }
+
+ hideSelectedAction(graph) {
+ var selected = graph.visibleNodes.filter(function(n) {
+ return this.classList.contains("selected");
+ });
+ selected.each(function(n) {
+ n.visible = false;
+ });
+ graph.state.selection.clear();
+ graph.updateGraphVisibility();
+ }
+
+ zoomSelectionAction(graph) {
+ graph.viewSelection();
+ }
+
+ toggleTypesAction(graph) {
+ graph.toggleTypes();
+ }
+
+ searchInputAction(graph) {
+ if (d3.event.keyCode == 13) {
+ graph.state.selection.clear();
+ var query = this.value;
+ window.sessionStorage.setItem("lastSearch", query);
+
+ var reg = new RegExp(query);
+ var filterFunction = function(n) {
+ return (reg.exec(n.getDisplayLabel()) != null ||
+ (graph.state.showTypes && reg.exec(n.getDisplayType())) ||
+ reg.exec(n.opcode) != null);
+ };
+ if (d3.event.ctrlKey) {
+ graph.nodes.forEach(function(n, i) {
+ if (filterFunction(n)) {
+ n.visible = true;
+ }
+ });
+ graph.updateGraphVisibility();
+ }
+ var selected = graph.visibleNodes.each(function(n) {
+ if (filterFunction(n)) {
+ graph.state.selection.select(this, true);
+ }
+ });
+ graph.connectVisibleSelectedNodes();
+ graph.updateGraphVisibility();
+ this.blur();
+ graph.viewSelection();
+ }
+ d3.event.stopPropagation();
+ }
+
+ svgMouseDown() {
+ this.state.graphMouseDown = true;
+ }
+
+ svgMouseUp() {
+ var graph = this,
+ state = graph.state;
+ if (state.justScaleTransGraph) {
+ // Dragged
+ state.justScaleTransGraph = false;
+ } else {
+ // Clicked
+ if (state.mouseDownNode == null) {
+ graph.state.selection.clear();
+ }
+ }
+ state.mouseDownNode = null;
+ state.graphMouseDown = false;
+ }
+
+ svgKeyDown() {
+ var state = this.state;
+ var graph = this;
+
+ // Don't handle key press repetition
+ if(state.lastKeyDown !== -1) return;
+
+ var showSelectionFrontierNodes = function(inEdges, filter, select) {
+ var frontier = graph.getNodeFrontier(state.selection.selection, inEdges, filter);
+ if (frontier != undefined) {
+ if (select) {
+ if (!d3.event.shiftKey) {
+ state.selection.clear();
+ }
+ state.selection.select(frontier[0], true);
+ }
+ graph.updateGraphVisibility();
+ }
+ allowRepetition = false;
+ }
+
+ var allowRepetition = true;
+ var eventHandled = true; // unless the below switch defaults
+ switch(d3.event.keyCode) {
+ case 49:
+ case 50:
+ case 51:
+ case 52:
+ case 53:
+ case 54:
+ case 55:
+ case 56:
+ case 57:
+ // '1'-'9'
+ showSelectionFrontierNodes(true,
+ (edge, index) => { return index == (d3.event.keyCode - 49); },
+ false);
+ break;
+ case 97:
+ case 98:
+ case 99:
+ case 100:
+ case 101:
+ case 102:
+ case 103:
+ case 104:
+ case 105:
+ // 'numpad 1'-'numpad 9'
+ showSelectionFrontierNodes(true,
+ (edge, index) => { return index == (d3.event.keyCode - 97); },
+ false);
+ break;
+ case 67:
+ // 'c'
+ showSelectionFrontierNodes(true,
+ (edge, index) => { return edge.type == 'control'; },
+ false);
+ break;
+ case 69:
+ // 'e'
+ showSelectionFrontierNodes(true,
+ (edge, index) => { return edge.type == 'effect'; },
+ false);
+ break;
+ case 79:
+ // 'o'
+ showSelectionFrontierNodes(false, undefined, false);
+ break;
+ case 73:
+ // 'i'
+ showSelectionFrontierNodes(true, undefined, false);
+ break;
+ case 65:
+ // 'a'
+ graph.selectAllNodes();
+ allowRepetition = false;
+ break;
+ case 38:
+ case 40: {
+ showSelectionFrontierNodes(d3.event.keyCode == 38, undefined, true);
+ break;
+ }
+ case 82:
+ // 'r'
+ if (!d3.event.ctrlKey) {
+ this.layoutAction(this);
+ } else {
+ eventHandled = false;
+ }
+ break;
+ case 191:
+ // '/'
+ document.getElementById("search-input").focus();
+ document.getElementById("search-input").select();
+ break;
+ default:
+ eventHandled = false;
+ break;
+ }
+ if (eventHandled) {
+ d3.event.preventDefault();
+ }
+ if (!allowRepetition) {
+ state.lastKeyDown = d3.event.keyCode;
+ }
+ }
+
+ svgKeyUp() {
+ this.state.lastKeyDown = -1
+ };
+
+ layoutEdges() {
+ var graph = this;
+ graph.maxGraphX = graph.maxGraphNodeX;
+ this.visibleEdges.attr("d", function(edge){
+ return edge.generatePath(graph);
+ });
+ }
+
+ layoutGraph() {
+ layoutNodeGraph(this);
+ }
+
+ // call to propagate changes to graph
+ updateGraphVisibility() {
+
+ var graph = this,
+ state = graph.state;
+
+ var filteredEdges = graph.edges.filter(function(e) { return e.isVisible(); });
+ var visibleEdges = graph.visibleEdges.data(filteredEdges, function(edge) {
+ return edge.stringID();
+ });
+
+ // add new paths
+ visibleEdges.enter()
+ .append('path')
+ .style('marker-end','url(#end-arrow)')
+ .classed('hidden', function(e) {
+ return !e.isVisible();
+ })
+ .attr("id", function(edge){ return "e," + edge.stringID(); })
+ .on("mousedown", function(d){
+ graph.pathMouseDown.call(graph, d3.select(this), d);
+ })
+
+ // Set the correct styles on all of the paths
+ visibleEdges.classed('value', function(e) {
+ return e.type == 'value' || e.type == 'context';
+ }).classed('control', function(e) {
+ return e.type == 'control';
+ }).classed('effect', function(e) {
+ return e.type == 'effect';
+ }).classed('frame-state', function(e) {
+ return e.type == 'frame-state';
+ }).attr('stroke-dasharray', function(e) {
+ if (e.type == 'frame-state') return "10,10";
+ return (e.type == 'effect') ? "5,5" : "";
+ });
+
+ // remove old links
+ visibleEdges.exit().remove();
+
+ graph.visibleEdges = visibleEdges;
+
+ // update existing nodes
+ var filteredNodes = graph.nodes.filter(function(n) { return n.visible; });
+ graph.visibleNodes = graph.visibleNodes.data(filteredNodes, function(d) {
+ return d.id;
+ });
+ graph.visibleNodes.attr("transform", function(n){
+ return "translate(" + n.x + "," + n.y + ")";
+ }).select('rect').
+ attr(HEIGHT, function(d) { return graph.getNodeHeight(d); });
+
+ // add new nodes
+ var newGs = graph.visibleNodes.enter()
+ .append("g");
+
+ newGs.classed("control", function(n) { return n.isControl(); })
+ .classed("live", function(n) { return n.isLive(); })
+ .classed("dead", function(n) { return !n.isLive(); })
+ .classed("javascript", function(n) { return n.isJavaScript(); })
+ .classed("input", function(n) { return n.isInput(); })
+ .classed("simplified", function(n) { return n.isSimplified(); })
+ .classed("machine", function(n) { return n.isMachine(); })
+ .attr("transform", function(d){ return "translate(" + d.x + "," + d.y + ")";})
+ .on("mousedown", function(d){
+ graph.nodeMouseDown.call(graph, d3.select(this), d);
+ })
+ .on("mouseup", function(d){
+ graph.nodeMouseUp.call(graph, d3.select(this), d);
+ })
+ .call(graph.drag);
+
+ newGs.append("rect")
+ .attr("rx", 10)
+ .attr("ry", 10)
+ .attr(WIDTH, function(d) {
+ return d.getTotalNodeWidth();
+ })
+ .attr(HEIGHT, function(d) {
+ return graph.getNodeHeight(d);
+ })
+
+ function appendInputAndOutputBubbles(g, d) {
+ for (var i = 0; i < d.inputs.length; ++i) {
+ var x = d.getInputX(i);
+ var y = -DEFAULT_NODE_BUBBLE_RADIUS;
+ var s = g.append('circle')
+ .classed("filledBubbleStyle", function(c) {
+ return d.inputs[i].isVisible();
+ } )
+ .classed("bubbleStyle", function(c) {
+ return !d.inputs[i].isVisible();
+ } )
+ .attr("id", "ib," + d.inputs[i].stringID())
+ .attr("r", DEFAULT_NODE_BUBBLE_RADIUS)
+ .attr("transform", function(d) {
+ return "translate(" + x + "," + y + ")";
+ })
+ .on("mousedown", function(d){
+ var components = this.id.split(',');
+ var node = graph.nodeMap[components[3]];
+ var edge = node.inputs[components[2]];
+ var visible = !edge.isVisible();
+ node.setInputVisibility(components[2], visible);
+ d3.event.stopPropagation();
+ graph.updateGraphVisibility();
+ });
+ }
+ if (d.outputs.length != 0) {
+ var x = d.getOutputX();
+ var y = graph.getNodeHeight(d) + DEFAULT_NODE_BUBBLE_RADIUS;
+ var s = g.append('circle')
+ .classed("filledBubbleStyle", function(c) {
+ return d.areAnyOutputsVisible() == 2;
+ } )
+ .classed("halFilledBubbleStyle", function(c) {
+ return d.areAnyOutputsVisible() == 1;
+ } )
+ .classed("bubbleStyle", function(c) {
+ return d.areAnyOutputsVisible() == 0;
+ } )
+ .attr("id", "ob," + d.id)
+ .attr("r", DEFAULT_NODE_BUBBLE_RADIUS)
+ .attr("transform", function(d) {
+ return "translate(" + x + "," + y + ")";
+ })
+ .on("mousedown", function(d) {
+ d.setOutputVisibility(d.areAnyOutputsVisible() == 0);
+ d3.event.stopPropagation();
+ graph.updateGraphVisibility();
+ });
+ }
+ }
+
+ newGs.each(function(d){
+ appendInputAndOutputBubbles(d3.select(this), d);
+ });
+
+ newGs.each(function(d){
+ d3.select(this).append("text")
+ .classed("label", true)
+ .attr("text-anchor","right")
+ .attr("dx", 5)
+ .attr("dy", 5)
+ .append('tspan')
+ .text(function(l) {
+ return d.getDisplayLabel();
+ })
+ .append("title")
+ .text(function(l) {
+ return d.getTitle();
+ })
+ if (d.type != undefined) {
+ d3.select(this).append("text")
+ .classed("label", true)
+ .classed("type", true)
+ .attr("text-anchor","right")
+ .attr("dx", 5)
+ .attr("dy", d.labelbbox.height + 5)
+ .append('tspan')
+ .text(function(l) {
+ return d.getDisplayType();
+ })
+ .append("title")
+ .text(function(l) {
+ return d.getType();
+ })
+ }
+ });
+
+ graph.visibleNodes.select('.type').each(function (d) {
+ this.setAttribute('visibility', graph.state.showTypes ? 'visible' : 'hidden');
+ });
+
+ // remove old nodes
+ graph.visibleNodes.exit().remove();
+
+ graph.visibleBubbles = d3.selectAll('circle');
+
+ graph.updateInputAndOutputBubbles();
+
+ graph.layoutEdges();
+
+ graph.svg.style.height = '100%';
+ }
+
+ getVisibleTranslation(translate, scale) {
+ var graph = this;
+ var height = (graph.maxGraphY - graph.minGraphY + 2 * GRAPH_MARGIN) * scale;
+ var width = (graph.maxGraphX - graph.minGraphX + 2 * GRAPH_MARGIN) * scale;
+
+ var dimensions = this.getSvgViewDimensions();
+
+ var baseY = translate[1];
+ var minY = (graph.minGraphY - GRAPH_MARGIN) * scale;
+ var maxY = (graph.maxGraphY + GRAPH_MARGIN) * scale;
+
+ var adjustY = 0;
+ var adjustYCandidate = 0;
+ if ((maxY + baseY) < dimensions[1]) {
+ adjustYCandidate = dimensions[1] - (maxY + baseY);
+ if ((minY + baseY + adjustYCandidate) > 0) {
+ adjustY = (dimensions[1] / 2) - (maxY - (height / 2)) - baseY;
+ } else {
+ adjustY = adjustYCandidate;
+ }
+ } else if (-baseY < minY) {
+ adjustYCandidate = -(baseY + minY);
+ if ((maxY + baseY + adjustYCandidate) < dimensions[1]) {
+ adjustY = (dimensions[1] / 2) - (maxY - (height / 2)) - baseY;
+ } else {
+ adjustY = adjustYCandidate;
+ }
+ }
+ translate[1] += adjustY;
+
+ var baseX = translate[0];
+ var minX = (graph.minGraphX - GRAPH_MARGIN) * scale;
+ var maxX = (graph.maxGraphX + GRAPH_MARGIN) * scale;
+
+ var adjustX = 0;
+ var adjustXCandidate = 0;
+ if ((maxX + baseX) < dimensions[0]) {
+ adjustXCandidate = dimensions[0] - (maxX + baseX);
+ if ((minX + baseX + adjustXCandidate) > 0) {
+ adjustX = (dimensions[0] / 2) - (maxX - (width / 2)) - baseX;
+ } else {
+ adjustX = adjustXCandidate;
+ }
+ } else if (-baseX < minX) {
+ adjustXCandidate = -(baseX + minX);
+ if ((maxX + baseX + adjustXCandidate) < dimensions[0]) {
+ adjustX = (dimensions[0] / 2) - (maxX - (width / 2)) - baseX;
+ } else {
+ adjustX = adjustXCandidate;
+ }
+ }
+ translate[0] += adjustX;
+ return translate;
+ }
+
+ translateClipped(translate, scale, transition) {
+ var graph = this;
+ var graphNode = this.graphElement[0][0];
+ var translate = this.getVisibleTranslation(translate, scale);
+ if (transition) {
+ graphNode.classList.add('visible-transition');
+ clearTimeout(graph.transitionTimout);
+ graph.transitionTimout = setTimeout(function(){
+ graphNode.classList.remove('visible-transition');
+ }, 1000);
+ }
+ var translateString = "translate(" + translate[0] + "px," + translate[1] + "px) scale(" + scale + ")";
+ graphNode.style.transform = translateString;
+ graph.dragSvg.translate(translate);
+ graph.dragSvg.scale(scale);
+ }
+
+ zoomed(){
+ this.state.justScaleTransGraph = true;
+ var scale = this.dragSvg.scale();
+ this.translateClipped(d3.event.translate, scale);
+ }
+
+
+ getSvgViewDimensions() {
+ var canvasWidth = this.parentNode.clientWidth;
+ var documentElement = document.documentElement;
+ var canvasHeight = documentElement.clientHeight;
+ return [canvasWidth, canvasHeight];
+ }
+
+
+ minScale() {
+ var graph = this;
+ var dimensions = this.getSvgViewDimensions();
+ var width = graph.maxGraphX - graph.minGraphX;
+ var height = graph.maxGraphY - graph.minGraphY;
+ var minScale = dimensions[0] / (width + GRAPH_MARGIN * 2);
+ var minScaleYCandidate = dimensions[1] / (height + GRAPH_MARGIN * 2);
+ if (minScaleYCandidate < minScale) {
+ minScale = minScaleYCandidate;
+ }
+ this.dragSvg.scaleExtent([minScale, 1.5]);
+ return minScale;
+ }
+
+ fitGraphViewToWindow() {
+ this.svg.attr("height", document.documentElement.clientHeight + "px");
+ this.translateClipped(this.dragSvg.translate(), this.dragSvg.scale());
+ }
+
+ toggleTypes() {
+ var graph = this;
+ graph.state.showTypes = !graph.state.showTypes;
+ var element = document.getElementById('toggle-types');
+ if (graph.state.showTypes) {
+ element.classList.add('button-input-toggled');
+ } else {
+ element.classList.remove('button-input-toggled');
+ }
+ graph.updateGraphVisibility();
+ }
+
+ viewSelection() {
+ var graph = this;
+ var minX, maxX, minY, maxY;
+ var hasSelection = false;
+ graph.visibleNodes.each(function(n) {
+ if (this.classList.contains("selected")) {
+ hasSelection = true;
+ minX = minX ? Math.min(minX, n.x) : n.x;
+ maxX = maxX ? Math.max(maxX, n.x + n.getTotalNodeWidth()) :
+ n.x + n.getTotalNodeWidth();
+ minY = minY ? Math.min(minY, n.y) : n.y;
+ maxY = maxY ? Math.max(maxY, n.y + graph.getNodeHeight(n)) :
+ n.y + graph.getNodeHeight(n);
+ }
+ });
+ if (hasSelection) {
+ graph.viewGraphRegion(minX - NODE_INPUT_WIDTH, minY - 60,
+ maxX + NODE_INPUT_WIDTH, maxY + 60,
+ true);
+ }
+ }
+
+ viewGraphRegion(minX, minY, maxX, maxY, transition) {
+ var graph = this;
+ var dimensions = this.getSvgViewDimensions();
+ var width = maxX - minX;
+ var height = maxY - minY;
+ var scale = Math.min(dimensions[0] / width, dimensions[1] / height);
+ scale = Math.min(1.5, scale);
+ scale = Math.max(graph.minScale(), scale);
+ var translation = [-minX*scale, -minY*scale];
+ translation = graph.getVisibleTranslation(translation, scale);
+ graph.translateClipped(translation, scale, transition);
+ }
+
+ viewWholeGraph() {
+ var graph = this;
+ var minScale = graph.minScale();
+ var translation = [0, 0];
+ translation = graph.getVisibleTranslation(translation, minScale);
+ graph.translateClipped(translation, minScale);
+ }
+}
diff --git a/deps/v8/tools/turbolizer/hide-selected.png b/deps/v8/tools/turbolizer/hide-selected.png
new file mode 100644
index 0000000000..207cdbb89a
--- /dev/null
+++ b/deps/v8/tools/turbolizer/hide-selected.png
Binary files differ
diff --git a/deps/v8/tools/turbolizer/hide-unselected.png b/deps/v8/tools/turbolizer/hide-unselected.png
new file mode 100644
index 0000000000..15617b0939
--- /dev/null
+++ b/deps/v8/tools/turbolizer/hide-unselected.png
Binary files differ
diff --git a/deps/v8/tools/turbolizer/index.html b/deps/v8/tools/turbolizer/index.html
new file mode 100644
index 0000000000..8dc21b7bdd
--- /dev/null
+++ b/deps/v8/tools/turbolizer/index.html
@@ -0,0 +1,99 @@
+<!DOCTYPE HTML>
+<html>
+ <head>
+ <link rel="stylesheet" href="turbo-visualizer.css" />
+ </head>
+ <body width="100%">
+ <div id="left">
+ <div id='source-text'>
+ <pre id='source-text-pre'\>
+ </div>
+ </div>
+ <div id="middle">
+ <div id="graph-toolbox-anchor">
+ <span id="graph-toolbox">
+ <input id="layout" type="image" title="layout graph" src="layout-icon.png"
+ alt="layout graph" class="button-input">
+ <input id="show-all" type="image" title="show all nodes" src="expand-all.jpg"
+ alt="show all nodes" class="button-input">
+ <input id="hide-dead" type="image" title="only live nodes" src="live.png"
+ alt="only live nodes" class="button-input">
+ <input id="hide-unselected" type="image" title="hide unselected nodes"
+ src="hide-unselected.png" alt="hide unselected nodes" class="button-input">
+ <input id="hide-selected" type="image" title="hide selected nodes"
+ src="hide-selected.png" alt="hide selected nodes" class="button-input">
+ <input id="zoom-selection" type="image" title="zoom to selection"
+ src="search.png" alt="zoom to selection" class="button-input">
+ <input id="toggle-types" type="image" title="show/hide types"
+ src="types.png" alt="show/hide types" class="button-input">
+ <input id="search-input" type="text" title="search nodes for regex"
+ alt="search node for regex" class="search-input"
+ placeholder="find with regexp&hellip;">
+ <select id="display-selector">
+ <option disabled selected>(please open a file)</option>
+ </select>
+ </span>
+ </div>
+
+ <div id="load-file">
+ <input type="file" id="hidden-file-upload">
+ <input id="upload" type="image" title="load graph" class="button-input"
+ src="upload-icon.png" alt="upload graph">
+ </div>
+ <div id="empty" width="100%" height="100%"></div>
+ <div id="graph" width="100%" height="100%"></div>
+ <div id="schedule" width="100%">
+ <pre id="schedule-text-pre" class='prettyprint prettyprinted'>
+ <ul id="schedule-list" class='nolinenums noindent'>
+ </ul>
+ </pre>
+ </div>
+ <div id='text-placeholder' width="0px" height="0px" style="position: absolute; top:100000px;" ><svg><text text-anchor="right">
+ <tspan white-space="inherit" id="text-measure"/>
+ </text></svg></div>
+ </div>
+ <div id="right">
+ <span id="disassembly-toolbox">
+ <select id="event-selector"></select>
+ </span>
+ <div id='disassembly'>
+ <pre id='disassembly-text-pre' class='prettyprint prettyprinted'>
+ <ul id='disassembly-list' class='nolinenums noindent'>
+ </ul>
+ </pre>
+ </div>
+ </div>
+ <div id="source-collapse" class="collapse-pane">
+ <input id="source-expand" type="image" title="show source"
+ src="right-arrow.png" class="button-input-invisible">
+ <input id="source-shrink" type="image" title="hide source"
+ src="left-arrow.png" class="button-input">
+ </div>
+ <div id="disassembly-collapse" class="collapse-pane">
+ <input id="disassembly-expand" type="image" title="show disassembly"
+ src="left-arrow.png" class="button-input">
+ <input id="disassembly-shrink" type="image" title="hide disassembly"
+ src="right-arrow.png" class="button-input-invisible">
+ </div>
+ <script src="https://cdn.rawgit.com/google/code-prettify/master/loader/run_prettify.js"></script>
+ <script src="http://d3js.org/d3.v3.min.js" charset="utf-8"></script>
+ <script src="https://cdn.jsdelivr.net/filesaver.js/0.1/FileSaver.min.js"></script>
+ <script src="monkey.js"></script>
+ <script src="util.js"></script>
+ <script src="lang-disassembly.js"></script>
+ <script src="node.js"></script>
+ <script src="edge.js"></script>
+ <script src="selection.js"></script>
+ <script src="selection-broker.js"></script>
+ <script src="constants.js"></script>
+ <script src="view.js"></script>
+ <script src="text-view.js"></script>
+ <script src="empty-view.js"></script>
+ <script src="code-view.js"></script>
+ <script src="graph-layout.js"></script>
+ <script src="graph-view.js"></script>
+ <script src="schedule-view.js"></script>
+ <script src="disassembly-view.js"></script>
+ <script src="turbo-visualizer.js"></script>
+ </body>
+</html>
diff --git a/deps/v8/tools/turbolizer/lang-disassembly.js b/deps/v8/tools/turbolizer/lang-disassembly.js
new file mode 100644
index 0000000000..590f9fd804
--- /dev/null
+++ b/deps/v8/tools/turbolizer/lang-disassembly.js
@@ -0,0 +1,14 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+PR.registerLangHandler(
+ PR.createSimpleLexer(
+ [
+ [PR.PR_STRING, /^(?:\'(?:[^\\\'\r\n]|\\.)*(?:\'|$))/, null, '\''],
+ [PR.PR_PLAIN, /^\s+/, null, ' \r\n\t\xA0']
+ ],
+ [ // fallthroughStylePatterns
+ [PR.PR_COMMENT, /;; debug: position \d+/, null],
+ ]),
+ ['disassembly']);
diff --git a/deps/v8/tools/turbolizer/layout-icon.png b/deps/v8/tools/turbolizer/layout-icon.png
new file mode 100644
index 0000000000..95a517afa6
--- /dev/null
+++ b/deps/v8/tools/turbolizer/layout-icon.png
Binary files differ
diff --git a/deps/v8/tools/turbolizer/left-arrow.png b/deps/v8/tools/turbolizer/left-arrow.png
new file mode 100644
index 0000000000..fc0603e8c3
--- /dev/null
+++ b/deps/v8/tools/turbolizer/left-arrow.png
Binary files differ
diff --git a/deps/v8/tools/turbolizer/live.png b/deps/v8/tools/turbolizer/live.png
new file mode 100644
index 0000000000..ac72bb93e8
--- /dev/null
+++ b/deps/v8/tools/turbolizer/live.png
Binary files differ
diff --git a/deps/v8/tools/turbolizer/monkey.js b/deps/v8/tools/turbolizer/monkey.js
new file mode 100644
index 0000000000..129f8b3268
--- /dev/null
+++ b/deps/v8/tools/turbolizer/monkey.js
@@ -0,0 +1,26 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Array.prototype.getStaggeredFromMiddle = function(i) {
+ if (i >= this.length) {
+ throw("getStaggeredFromMiddle: OOB");
+ }
+ var middle = Math.floor(this.length / 2);
+ var index = middle + (((i % 2) == 0) ? (i / 2) : (((1 - i) / 2) - 1));
+ return this[index];
+}
+
+Array.prototype.contains = function(obj) {
+ var i = this.length;
+ while (i--) {
+ if (this[i] === obj) {
+ return true;
+ }
+ }
+ return false;
+}
+
+Math.alignUp = function(raw, multiple) {
+ return Math.floor((raw + multiple - 1) / multiple) * multiple;
+}
diff --git a/deps/v8/tools/turbolizer/node.js b/deps/v8/tools/turbolizer/node.js
new file mode 100644
index 0000000000..3656e5d7e5
--- /dev/null
+++ b/deps/v8/tools/turbolizer/node.js
@@ -0,0 +1,147 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var TYPE_HEIGHT = 25;
+var DEFAULT_NODE_BUBBLE_RADIUS = 12;
+var NODE_INPUT_WIDTH = 50;
+var MINIMUM_NODE_INPUT_APPROACH = 15 + 2 * DEFAULT_NODE_BUBBLE_RADIUS;
+var MINIMUM_NODE_OUTPUT_APPROACH = 15;
+
+function isNodeInitiallyVisible(node) {
+ return node.cfg;
+}
+
+var Node = {
+ isControl: function() {
+ return this.control;
+ },
+ isInput: function() {
+ return this.opcode == 'Parameter' || this.opcode.endsWith('Constant');
+ },
+ isLive: function() {
+ return this.live !== false;
+ },
+ isJavaScript: function() {
+ return this.opcode.startsWith('JS');
+ },
+ isSimplified: function() {
+ if (this.isJavaScript) return false;
+ return this.opcode.endsWith('Phi') ||
+ this.opcode.startsWith('Boolean') ||
+ this.opcode.startsWith('Number') ||
+ this.opcode.startsWith('String') ||
+ this.opcode.startsWith('Change') ||
+ this.opcode.startsWith('Object') ||
+ this.opcode.startsWith('Reference') ||
+ this.opcode.startsWith('Any') ||
+ this.opcode.endsWith('ToNumber') ||
+ (this.opcode == 'AnyToBoolean') ||
+ (this.opcode.startsWith('Load') && this.opcode.length > 4) ||
+ (this.opcode.startsWith('Store') && this.opcode.length > 5);
+ },
+ isMachine: function() {
+ return !(this.isControl() || this.isInput() ||
+ this.isJavaScript() || this.isSimplified());
+ },
+ getTotalNodeWidth: function() {
+ var inputWidth = this.inputs.length * NODE_INPUT_WIDTH;
+ return Math.max(inputWidth, this.width);
+ },
+ getTitle: function() {
+ var propsString;
+ if (this.properties === undefined) {
+ propsString = "";
+ } else if (this.properties === "") {
+ propsString = "no properties";
+ } else {
+ propsString = "[" + this.properties + "]";
+ }
+ return this.title + "\n" + propsString + "\n" + this.opinfo;
+ },
+ getDisplayLabel: function() {
+ var result = this.id + ":" + this.label;
+ if (result.length > 40) {
+ return this.id + ":" + this.opcode;
+ } else {
+ return result;
+ }
+ },
+ getType: function() {
+ return this.type;
+ },
+ getDisplayType: function() {
+ var type_string = this.type;
+ if (type_string == undefined) return "";
+ if (type_string.length > 24) {
+ type_string = type_string.substr(0, 25) + "...";
+ }
+ return type_string;
+ },
+ deepestInputRank: function() {
+ var deepestRank = 0;
+ this.inputs.forEach(function(e) {
+ if (e.isVisible() && !e.isBackEdge()) {
+ if (e.source.rank > deepestRank) {
+ deepestRank = e.source.rank;
+ }
+ }
+ });
+ return deepestRank;
+ },
+ areAnyOutputsVisible: function() {
+ var visibleCount = 0;
+ this.outputs.forEach(function(e) { if (e.isVisible()) ++visibleCount; });
+ if (this.outputs.length == visibleCount) return 2;
+ if (visibleCount != 0) return 1;
+ return 0;
+ },
+ setOutputVisibility: function(v) {
+ var result = false;
+ this.outputs.forEach(function(e) {
+ e.visible = v;
+ if (v) {
+ if (!e.target.visible) {
+ e.target.visible = true;
+ result = true;
+ }
+ }
+ });
+ return result;
+ },
+ setInputVisibility: function(i, v) {
+ var edge = this.inputs[i];
+ edge.visible = v;
+ if (v) {
+ if (!edge.source.visible) {
+ edge.source.visible = true;
+ return true;
+ }
+ }
+ return false;
+ },
+ getInputApproach: function(index) {
+ return this.y - MINIMUM_NODE_INPUT_APPROACH -
+ (index % 4) * MINIMUM_EDGE_SEPARATION - DEFAULT_NODE_BUBBLE_RADIUS
+ },
+ getOutputApproach: function(graph, index) {
+ return this.y + this.outputApproach + graph.getNodeHeight(this) +
+ + DEFAULT_NODE_BUBBLE_RADIUS;
+ },
+ getInputX: function(index) {
+ var result = this.getTotalNodeWidth() - (NODE_INPUT_WIDTH / 2) +
+ (index - this.inputs.length + 1) * NODE_INPUT_WIDTH;
+ return result;
+ },
+ getOutputX: function() {
+ return this.getTotalNodeWidth() - (NODE_INPUT_WIDTH / 2);
+ },
+ getFunctionRelativeSourcePosition: function(graph) {
+ return this.pos - graph.sourcePosition;
+ },
+ hasBackEdges: function() {
+ return (this.opcode == "Loop") ||
+ ((this.opcode == "Phi" || this.opcode == "EffectPhi") &&
+ this.inputs[this.inputs.length - 1].source.opcode == "Loop");
+ }
+};
diff --git a/deps/v8/tools/turbolizer/right-arrow.png b/deps/v8/tools/turbolizer/right-arrow.png
new file mode 100644
index 0000000000..ef3964346f
--- /dev/null
+++ b/deps/v8/tools/turbolizer/right-arrow.png
Binary files differ
diff --git a/deps/v8/tools/turbolizer/schedule-view.js b/deps/v8/tools/turbolizer/schedule-view.js
new file mode 100644
index 0000000000..2cd49c991f
--- /dev/null
+++ b/deps/v8/tools/turbolizer/schedule-view.js
@@ -0,0 +1,128 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+
+class ScheduleView extends TextView {
+ constructor(id, broker) {
+ super(id, broker, null, false);
+ let view = this;
+ let BLOCK_STYLE = {
+ css: 'tag'
+ };
+ const BLOCK_HEADER_STYLE = {
+ css: 'com',
+ block_id: -1,
+ location: function(text) {
+ let matches = /\d+/.exec(text);
+ if (!matches) return undefined;
+ BLOCK_HEADER_STYLE.block_id = Number(matches[0]);
+ return {
+ block_id: BLOCK_HEADER_STYLE.block_id
+ };
+ },
+ };
+ const BLOCK_LINK_STYLE = {
+ css: 'tag',
+ link: function(text) {
+ let id = Number(text.substr(1));
+ view.select(function(location) { return location.block_id == id; }, true, true);
+ }
+ };
+ const ID_STYLE = {
+ css: 'tag',
+ location: function(text) {
+ let matches = /\d+/.exec(text);
+ return {
+ node_id: Number(matches[0]),
+ block_id: BLOCK_HEADER_STYLE.block_id
+ };
+ },
+ };
+ const ID_LINK_STYLE = {
+ css: 'tag',
+ link: function(text) {
+ let id = Number(text);
+ view.select(function(location) { return location.node_id == id; }, true, true);
+ }
+ };
+ const NODE_STYLE = { css: 'kwd' };
+ const GOTO_STYLE = { css: 'kwd',
+ goto_id: -2,
+ location: function(text) {
+ return {
+ node_id: GOTO_STYLE.goto_id--,
+ block_id: BLOCK_HEADER_STYLE.block_id
+ };
+ }
+ }
+ const ARROW_STYLE = { css: 'kwd' };
+ let patterns = [
+ [
+ [/^--- BLOCK B\d+/, BLOCK_HEADER_STYLE, 1],
+ [/^\s+\d+: /, ID_STYLE, 2],
+ [/^\s+Goto/, GOTO_STYLE, 6],
+ [/^.*/, null, -1]
+ ],
+ [
+ [/^ +/, null],
+ [/^\(deferred\)/, BLOCK_HEADER_STYLE],
+ [/^B\d+/, BLOCK_LINK_STYLE],
+ [/^<-/, ARROW_STYLE],
+ [/^->/, ARROW_STYLE],
+ [/^,/, null],
+ [/^---/, BLOCK_HEADER_STYLE, -1]
+ ],
+ // Parse opcode including []
+ [
+ [/^[A-Za-z0-9_]+(\[.*\])?$/, NODE_STYLE, -1],
+ [/^[A-Za-z0-9_]+(\[.*\])?/, NODE_STYLE, 3]
+ ],
+ // Parse optional parameters
+ [
+ [/^ /, null, 4],
+ [/^\(/, null],
+ [/^\d+/, ID_LINK_STYLE],
+ [/^, /, null],
+ [/^\)$/, null, -1],
+ [/^\)/, null, 4],
+ ],
+ [
+ [/^ -> /, ARROW_STYLE, 5],
+ [/^.*/, null, -1]
+ ],
+ [
+ [/^B\d+$/, BLOCK_LINK_STYLE, -1],
+ [/^B\d+/, BLOCK_LINK_STYLE],
+ [/^, /, null]
+ ],
+ [
+ [/^ -> /, ARROW_STYLE],
+ [/^B\d+$/, BLOCK_LINK_STYLE, -1]
+ ]
+ ];
+ this.setPatterns(patterns);
+ }
+
+ initializeContent(data, rememberedSelection) {
+ super.initializeContent(data, rememberedSelection);
+ var graph = this;
+ var locations = [];
+ for (var id of rememberedSelection) {
+ locations.push({ node_id : id });
+ }
+ this.selectLocations(locations, true, true);
+ }
+
+ detachSelection() {
+ var selection = this.selection.detachSelection();
+ var s = new Set();
+ for (var i of selection) {
+ if (i.location.node_id != undefined && i.location.node_id > 0) {
+ s.add(i.location.node_id);
+ }
+ };
+ return s;
+ }
+}
diff --git a/deps/v8/tools/turbolizer/search.png b/deps/v8/tools/turbolizer/search.png
new file mode 100644
index 0000000000..12dc3e3469
--- /dev/null
+++ b/deps/v8/tools/turbolizer/search.png
Binary files differ
diff --git a/deps/v8/tools/turbolizer/search2.png b/deps/v8/tools/turbolizer/search2.png
new file mode 100644
index 0000000000..88dd193809
--- /dev/null
+++ b/deps/v8/tools/turbolizer/search2.png
Binary files differ
diff --git a/deps/v8/tools/turbolizer/selection-broker.js b/deps/v8/tools/turbolizer/selection-broker.js
new file mode 100644
index 0000000000..822cf1ce1f
--- /dev/null
+++ b/deps/v8/tools/turbolizer/selection-broker.js
@@ -0,0 +1,99 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var SelectionBroker = function() {
+ this.brokers = [];
+ this.dispatching = false;
+ this.lastDispatchingHandler = null;
+ this.nodePositionMap = [];
+ this.sortedPositionList = [];
+ this.positionNodeMap = [];
+};
+
+SelectionBroker.prototype.addSelectionHandler = function(handler) {
+ this.brokers.push(handler);
+}
+
+SelectionBroker.prototype.setNodePositionMap = function(map) {
+ let broker = this;
+ if (!map) return;
+ broker.nodePositionMap = map;
+ broker.positionNodeMap = [];
+ broker.sortedPositionList = [];
+ let next = 0;
+ for (let i in broker.nodePositionMap) {
+ broker.sortedPositionList[next] = Number(broker.nodePositionMap[i]);
+ broker.positionNodeMap[next++] = i;
+ }
+ broker.sortedPositionList = sortUnique(broker.sortedPositionList,
+ function(a,b) { return a - b; });
+ this.positionNodeMap.sort(function(a,b) {
+ let result = broker.nodePositionMap[a] - broker.nodePositionMap[b];
+ if (result != 0) return result;
+ return a - b;
+ });
+}
+
+SelectionBroker.prototype.select = function(from, locations, selected) {
+ let broker = this;
+ if (!broker.dispatching) {
+ broker.lastDispatchingHandler = from;
+ try {
+ broker.dispatching = true;
+ let enrichLocations = function(locations) {
+ result = [];
+ for (let location of locations) {
+ let newLocation = {};
+ if (location.pos_start != undefined) {
+ newLocation.pos_start = location.pos_start;
+ }
+ if (location.pos_end != undefined) {
+ newLocation.pos_end = location.pos_end;
+ }
+ if (location.node_id != undefined) {
+ newLocation.node_id = location.node_id;
+ }
+ if (location.block_id != undefined) {
+ newLocation.block_id = location.block_id;
+ }
+ if (newLocation.pos_start == undefined &&
+ newLocation.pos_end == undefined &&
+ newLocation.node_id != undefined) {
+ if (broker.nodePositionMap && broker.nodePositionMap[location.node_id]) {
+ newLocation.pos_start = broker.nodePositionMap[location.node_id];
+ newLocation.pos_end = location.pos_start + 1;
+ }
+ }
+ result.push(newLocation);
+ }
+ return result;
+ }
+ locations = enrichLocations(locations);
+ for (var b of this.brokers) {
+ if (b != from) {
+ b.brokeredSelect(locations, selected);
+ }
+ }
+ }
+ finally {
+ broker.dispatching = false;
+ }
+ }
+}
+
+SelectionBroker.prototype.clear = function(from) {
+ this.lastDispatchingHandler = null;
+ if (!this.dispatching) {
+ try {
+ this.dispatching = true;
+ this.brokers.forEach(function(b) {
+ if (b != from) {
+ b.brokeredClear();
+ }
+ });
+ } finally {
+ this.dispatching = false;
+ }
+ }
+}
diff --git a/deps/v8/tools/turbolizer/selection.js b/deps/v8/tools/turbolizer/selection.js
new file mode 100644
index 0000000000..26f1bde197
--- /dev/null
+++ b/deps/v8/tools/turbolizer/selection.js
@@ -0,0 +1,108 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var Selection = function(handler) {
+ this.handler = handler;
+ this.selectionBase = null;
+ this.lastSelection = null;
+ this.selection = new Set();
+}
+
+
+Selection.prototype.isEmpty = function() {
+ return this.selection.size == 0;
+}
+
+
+Selection.prototype.clear = function() {
+ var handler = this.handler;
+ this.selectionBase = null;
+ this.lastSelection = null;
+ handler.select(this.selection, false);
+ handler.clear();
+ this.selection = new Set();
+}
+
+
+count = 0;
+
+Selection.prototype.select = function(s, isSelected) {
+ var handler = this.handler;
+ if (!(Symbol.iterator in Object(s))) { s = [s]; }
+ if (isSelected) {
+ let first = true;
+ for (let i of s) {
+ if (first) {
+ this.selectionBase = i;
+ this.lastSelection = i;
+ first = false;
+ }
+ this.selection.add(i);
+ }
+ handler.select(this.selection, true);
+ } else {
+ let unselectSet = new Set();
+ for (let i of s) {
+ if (this.selection.has(i)) {
+ unselectSet.add(i);
+ this.selection.delete(i);
+ }
+ }
+ handler.select(unselectSet, false);
+ }
+}
+
+
+Selection.prototype.extendTo = function(pos) {
+ if (pos == this.lastSelection || this.lastSelection === null) return;
+
+ var handler = this.handler;
+ var pos_diff = handler.selectionDifference(pos, true, this.lastSelection, false);
+ var unselect_diff = [];
+ if (pos_diff.length == 0) {
+ pos_diff = handler.selectionDifference(this.selectionBase, false, pos, true);
+ if (pos_diff.length != 0) {
+ unselect_diff = handler.selectionDifference(this.lastSelection, true, this.selectionBase, false);
+ this.selection = new Set();
+ this.selection.add(this.selectionBase);
+ for (var d of pos_diff) {
+ this.selection.add(d);
+ }
+ } else {
+ unselect_diff = handler.selectionDifference(this.lastSelection, true, pos, false);
+ for (var d of unselect_diff) {
+ this.selection.delete(d);
+ }
+ }
+ } else {
+ unselect_diff = handler.selectionDifference(this.selectionBase, false, this.lastSelection, true);
+ if (unselect_diff != 0) {
+ pos_diff = handler.selectionDifference(pos, true, this.selectionBase, false);
+ if (pos_diff.length == 0) {
+ unselect_diff = handler.selectionDifference(pos, false, this.lastSelection, true);
+ }
+ for (var d of unselect_diff) {
+ this.selection.delete(d);
+ }
+ }
+ if (pos_diff.length != 0) {
+ for (var d of pos_diff) {
+ this.selection.add(d);
+ }
+ }
+ }
+ handler.select(unselect_diff, false);
+ handler.select(pos_diff, true);
+ this.lastSelection = pos;
+}
+
+
+Selection.prototype.detachSelection = function() {
+ var result = new Set();
+ for (var i of this.selection) {
+ result.add(i);
+ }
+ this.clear();
+ return result;
+}
diff --git a/deps/v8/tools/turbolizer/text-view.js b/deps/v8/tools/turbolizer/text-view.js
new file mode 100644
index 0000000000..70d2a252ae
--- /dev/null
+++ b/deps/v8/tools/turbolizer/text-view.js
@@ -0,0 +1,296 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+
+class TextView extends View {
+ constructor(id, broker, patterns, allowSpanSelection) {
+ super(id, broker);
+ let view = this;
+ view.hide();
+ view.textListNode = view.divNode.getElementsByTagName('ul')[0];
+ view.fillerSvgElement = view.divElement.append("svg").attr('version','1.1').attr("width", "0");
+ view.patterns = patterns;
+ view.allowSpanSelection = allowSpanSelection;
+ view.nodeToLineMap = [];
+ var selectionHandler = {
+ clear: function() {
+ broker.clear(selectionHandler);
+ },
+ select: function(items, selected) {
+ for (let i of items) {
+ if (selected) {
+ i.classList.add("selected");
+ } else {
+ i.classList.remove("selected");
+ }
+ }
+ broker.clear(selectionHandler);
+ broker.select(selectionHandler, view.getLocations(items), selected);
+ },
+ selectionDifference: function(span1, inclusive1, span2, inclusive2) {
+ return null;
+ },
+ brokeredSelect: function(locations, selected) {
+ view.selectLocations(locations, selected, true);
+ },
+ brokeredClear: function() {
+ view.selection.clear();
+ }
+ };
+ view.selection = new Selection(selectionHandler);
+ broker.addSelectionHandler(selectionHandler);
+ }
+
+ setPatterns(patterns) {
+ let view = this;
+ view.patterns = patterns;
+ }
+
+ clearText() {
+ let view = this;
+ while (view.textListNode.firstChild) {
+ view.textListNode.removeChild(view.textListNode.firstChild);
+ }
+ }
+
+ sameLocation(l1, l2) {
+ let view = this;
+ if (l1.block_id != undefined && l2.block_id != undefined &&
+ l1.block_id == l2.block_id && l1.node_id === undefined) {
+ return true;
+ }
+
+ if (l1.address != undefined && l1.address == l2.address) {
+ return true;
+ }
+
+ let node1 = l1.node_id;
+ let node2 = l2.node_id;
+
+ if (node1 === undefined || node2 == undefined) {
+ if (l1.pos_start === undefined || l2.pos_start == undefined) {
+ return false;
+ }
+ if (l1.pos_start == -1 || l2.pos_start == -1) {
+ return false;
+ }
+ if (l1.pos_start < l2.pos_start) {
+ return l1.pos_end > l2.pos_start;
+ } {
+ return l1.pos_start < l2.pos_end;
+ }
+ }
+
+ return l1.node_id == l2.node_id;
+ }
+
+ selectLocations(locations, selected, makeVisible) {
+ let view = this;
+ let s = new Set();
+ for (let l of locations) {
+ for (let i = 0; i < view.textListNode.children.length; ++i) {
+ let child = view.textListNode.children[i];
+ if (child.location != undefined && view.sameLocation(l, child.location)) {
+ s.add(child);
+ }
+ }
+ }
+ view.selectCommon(s, selected, makeVisible);
+ }
+
+ getLocations(items) {
+ let result = [];
+ let lastObject = null;
+ for (let i of items) {
+ if (i.location) {
+ result.push(i.location);
+ }
+ }
+ return result;
+ }
+
+ createFragment(text, style) {
+ let view = this;
+ let span = document.createElement("SPAN");
+ span.onmousedown = function(e) {
+ view.mouseDownSpan(span, e);
+ }
+ if (style != undefined) {
+ span.classList.add(style);
+ }
+ span.innerText = text;
+ return span;
+ }
+
+ appendFragment(li, fragment) {
+ li.appendChild(fragment);
+ }
+
+ processLine(line) {
+ let view = this;
+ let result = [];
+ let patternSet = 0;
+ while (true) {
+ let beforeLine = line;
+ for (let pattern of view.patterns[patternSet]) {
+ let matches = line.match(pattern[0]);
+ if (matches != null) {
+ if (matches[0] != '') {
+ let style = pattern[1] != null ? pattern[1] : {};
+ let text = matches[0];
+ if (text != '') {
+ let fragment = view.createFragment(matches[0], style.css);
+ if (style.link) {
+ fragment.classList.add('linkable-text');
+ fragment.link = style.link;
+ }
+ result.push(fragment);
+ if (style.location != undefined) {
+ let location = style.location(text);
+ if (location != undefined) {
+ fragment.location = location;
+ }
+ }
+ }
+ line = line.substr(matches[0].length);
+ }
+ let nextPatternSet = patternSet;
+ if (pattern.length > 2) {
+ nextPatternSet = pattern[2];
+ }
+ if (line == "") {
+ if (nextPatternSet != -1) {
+ throw("illegal parsing state in text-view in patternSet" + patternSet);
+ }
+ return result;
+ }
+ patternSet = nextPatternSet;
+ break;
+ }
+ }
+ if (beforeLine == line) {
+ throw("input not consumed in text-view in patternSet" + patternSet);
+ }
+ }
+ }
+
+ select(s, selected, makeVisible) {
+ let view = this;
+ view.selection.clear();
+ view.selectCommon(s, selected, makeVisible);
+ }
+
+ selectCommon(s, selected, makeVisible) {
+ let view = this;
+ let firstSelect = makeVisible && view.selection.isEmpty();
+ if ((typeof s) === 'function') {
+ for (let i = 0; i < view.textListNode.children.length; ++i) {
+ let child = view.textListNode.children[i];
+ if (child.location && s(child.location)) {
+ if (firstSelect) {
+ makeContainerPosVisible(view.parentNode, child.offsetTop);
+ firstSelect = false;
+ }
+ view.selection.select(child, selected);
+ }
+ }
+ } else if (typeof s[Symbol.iterator] === 'function') {
+ if (firstSelect) {
+ for (let i of s) {
+ makeContainerPosVisible(view.parentNode, i.offsetTop);
+ break;
+ }
+ }
+ view.selection.select(s, selected);
+ } else {
+ if (firstSelect) {
+ makeContainerPosVisible(view.parentNode, s.offsetTop);
+ }
+ view.selection.select(s, selected);
+ }
+ }
+
+ mouseDownLine(li, e) {
+ let view = this;
+ e.stopPropagation();
+ if (!e.shiftKey) {
+ view.selection.clear();
+ }
+ if (li.location != undefined) {
+ view.selectLocations([li.location], true, false);
+ }
+ }
+
+ mouseDownSpan(span, e) {
+ let view = this;
+ if (view.allowSpanSelection) {
+ e.stopPropagation();
+ if (!e.shiftKey) {
+ view.selection.clear();
+ }
+ select(li, true);
+ } else if (span.link) {
+ span.link(span.textContent);
+ e.stopPropagation();
+ }
+ }
+
+ processText(text) {
+ let view = this;
+ let textLines = text.split(/[\n]/);
+ let lineNo = 0;
+ for (let line of textLines) {
+ let li = document.createElement("LI");
+ li.onmousedown = function(e) {
+ view.mouseDownLine(li, e);
+ }
+ li.className = "nolinenums";
+ li.lineNo = lineNo++;
+ let fragments = view.processLine(line);
+ for (let fragment of fragments) {
+ view.appendFragment(li, fragment);
+ }
+ let lineLocation = view.lineLocation(li);
+ if (lineLocation != undefined) {
+ li.location = lineLocation;
+ }
+ view.textListNode.appendChild(li);
+ }
+ }
+
+ initializeContent(data, rememberedSelection) {
+ let view = this;
+ view.selection.clear();
+ view.clearText();
+ view.processText(data);
+ var fillerSize = document.documentElement.clientHeight -
+ view.textListNode.clientHeight;
+ if (fillerSize < 0) {
+ fillerSize = 0;
+ }
+ view.fillerSvgElement.attr("height", fillerSize);
+ }
+
+ deleteContent() {
+ }
+
+ isScrollable() {
+ return true;
+ }
+
+ detachSelection() {
+ return null;
+ }
+
+ lineLocation(li) {
+ let view = this;
+ for (let i = 0; i < li.children.length; ++i) {
+ let fragment = li.children[i];
+ if (fragment.location != undefined && !view.allowSpanSelection) {
+ return fragment.location;
+ }
+ }
+ }
+}
diff --git a/deps/v8/tools/turbolizer/turbo-visualizer.css b/deps/v8/tools/turbolizer/turbo-visualizer.css
new file mode 100644
index 0000000000..8e2bab282d
--- /dev/null
+++ b/deps/v8/tools/turbolizer/turbo-visualizer.css
@@ -0,0 +1,348 @@
+.visible-transition {
+ transition-delay: 0s;
+ transition-duration: 1s;
+ transition-property: all;
+ transition-timing-function: ease;
+}
+
+.collapse-pane {
+ background: #A0A0A0;
+ bottom: 0;
+ position: absolute;
+ margin-bottom: 0.5em;
+ margin-right: 0.5em;
+ margin-left: 0.5em;
+ border-radius: 5px;
+ padding: 0.5em;
+ z-index: 5;
+ opacity: 0.7;
+ cursor: pointer;
+}
+
+.search-input {
+ vertical-align: middle;
+ width: 145px;
+ opacity: 1;
+}
+
+.button-input {
+ vertical-align: middle;
+ width: 24px;
+ opacity: 0.4;
+ cursor: pointer;
+}
+
+.button-input-toggled {
+ border-radius: 5px;
+ background-color: #505050;
+}
+
+.button-input:focus {
+ outline: none;
+}
+
+.button-input-invisible {
+ vertical-align: middle;
+ width: 0px;
+ visibility: hidden;
+}
+
+
+.selected {
+ background-color: #FFFF33;
+}
+
+.prettyprint ol.linenums > li {
+ list-style-type: decimal;
+ !important
+}
+
+body {
+ margin: 0;
+ padding: 0;
+ overflow:hidden;
+ -webkit-touch-callout: none;
+ -webkit-user-select: none;
+ -khtml-user-select: none;
+ -moz-user-select: none;
+ -ms-user-select: none;
+ user-select: none;
+}
+
+p {
+ text-align: center;
+ overflow: overlay;
+ position: relative;
+}
+
+marker {
+ fill: #080808;
+}
+
+g rect {
+ fill: #F0F0F0;
+ stroke: #080808;
+ stroke-width: 2px;
+}
+
+g.dead {
+ opacity: .5;
+}
+
+g.unsorted rect {
+ opacity: 0.5;
+}
+
+div.scrollable {
+ overflow-y: _croll; overflow-x: hidden;
+}
+
+g.control rect {
+ fill: #EFCC00;
+ stroke: #080808;
+ stroke-width: 5px;
+}
+
+g.javascript rect {
+ fill: #DD7E6B;
+}
+
+g.simplified rect {
+ fill: #3C78D8;
+}
+
+g.machine rect {
+ fill: #6AA84F;
+}
+
+g.input rect {
+ fill: #CFE2F3;
+}
+
+g.selected rect {
+ fill: #FFFF33;
+}
+
+circle.bubbleStyle {
+ fill: #080808;
+ fill-opacity: 0.0;
+ stroke: #080808;
+ stroke-width: 2px;
+}
+
+circle.bubbleStyle:hover {
+ stroke-width: 3px;
+}
+
+circle.filledBubbleStyle {
+ fill: #080808;
+ stroke: #080808;
+ stroke-width: 2px;
+}
+
+circle.filledBubbleStyle:hover {
+ fill: #080808;
+ stroke-width: 3px;
+}
+
+circle.halfFilledBubbleStyle {
+ fill: #808080;
+ stroke: #101010;
+ stroke-width: 2px;
+}
+
+circle.halfFilledBubbleStyle:hover {
+ fill: #808080;
+ stroke-width: 3px;
+}
+
+path.effect {
+ fill: none;
+ stroke: #080808;
+ stroke-width: 4px;
+ cursor: default;
+}
+
+path.effect:hover {
+ stroke-width: 6px;
+}
+
+path.control {
+ fill: none;
+ stroke: #080808;
+ stroke-width: 4px;
+ cursor: default;
+}
+
+path.control:hover {
+ stroke-width: 6px;
+}
+
+path.value {
+ fill: none;
+ stroke: #888888;
+ stroke-width: 4px;
+ cursor: default;
+}
+
+path.value:hover {
+ stroke-width: 6px;
+}
+
+path.frame-state {
+ fill: none;
+ stroke: #080808;
+ stroke-width: 4px;
+ cursor: default;
+}
+
+path.frame-state:hover{
+ stroke-width: 6px;
+}
+
+path.hidden {
+ fill: none;
+ stroke-width: 0;
+}
+
+path.link.selected {
+ stroke: #FFFF33;
+}
+
+pre.prettyprint {
+ border: none !important;
+ padding: 0px;
+}
+
+li.L1,
+li.L3,
+li.L5,
+li.L7,
+li.L9 {
+ background: none !important
+}
+
+li.nolinenums {
+ list-style-type:none;
+}
+
+ul.noindent {
+ -webkit-padding-start: 0px;
+ -webkit-margin-before: 0px;
+ -webkit-margin-after: 0px;
+}
+
+input:hover, .collapse-pane:hover input {
+ opacity: 1;
+ cursor: pointer;
+}
+
+span.linkable-text {
+ text-decoration: underline;
+}
+
+span.linkable-text:hover {
+ cursor: pointer;
+ font-weight: bold;
+}
+
+#left {
+ float: left; height: 100%; background-color: #FFFFFF;
+ -webkit-transition: all 1s ease-in-out;
+ -moz-transition: all 1s ease-in-out;
+ -o-transition: all 1s ease-in-out;
+ transition: all .3s ease-in-out;
+ transition-property: width;
+}
+
+#middle {
+ float:left; height: 100%; background-color: #F8F8F8;
+ -webkit-transition: all 1s ease-in-out;
+ -moz-transition: all 1s ease-in-out;
+ -o-transition: all 1s ease-in-out;
+ transition: all .3s ease-in-out;
+ transition-property: width;
+}
+
+#right {
+ float: right; background-color: #FFFFFF;
+ -webkit-transition: all 1s ease-in-out;
+ -moz-transition: all 1s ease-in-out;
+ -o-transition: all 1s ease-in-out;
+ transition: all .3s ease-in-out;
+ transition-property: width;
+}
+
+#disassembly-collapse {
+ right: 0;
+}
+
+#source-collapse {
+ left: 0;
+}
+
+#graph-toolbox-anchor {
+ height: 0px;
+}
+
+#graph-toolbox {
+ position: relative;
+ top: 1em;
+ left: 0.7em;
+ border: 2px solid #eee8d5;
+ border-radius: 5px;
+ padding: 0.7em;
+ z-index: 5;
+ background: rgba(100%, 100%, 100%, 0.7);
+}
+
+#disassembly-toolbox {
+ position: relative;
+ top: 1em;
+ left: 0.7em;
+ border: 2px solid #eee8d5;
+ border-radius: 5px;
+ padding: 0.7em;
+ z-index: 5;
+}
+
+#load-file {
+ position: absolute;
+ top: 0;
+ right: 0;
+ margin-top: 0.5em;
+ margin-right: 0.5em;
+ z-index: 5;
+ opacity: 0.7;
+}
+
+#load-file input {
+ background: #A0A0A0;
+ border-radius: 5px;
+ padding: 0.5em;
+}
+
+#hidden-file-upload {
+ display: none;
+}
+
+.prof-low {
+ color: #888;
+}
+
+.prof-med {
+ color: #080;
+}
+
+.prof-high {
+ color: #800;
+}
+
+tspan {
+ font-size: 500%;
+ font-family: sans-serif;
+}
+
+text {
+ dominant-baseline: text-before-edge;
+} \ No newline at end of file
diff --git a/deps/v8/tools/turbolizer/turbo-visualizer.js b/deps/v8/tools/turbolizer/turbo-visualizer.js
new file mode 100644
index 0000000000..b8d7762605
--- /dev/null
+++ b/deps/v8/tools/turbolizer/turbo-visualizer.js
@@ -0,0 +1,257 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+document.onload = (function(d3){
+ "use strict";
+ var jsonObj;
+ var sourceExpandClassList = document.getElementById(SOURCE_EXPAND_ID).classList;
+ var sourceCollapseClassList = document.getElementById(SOURCE_COLLAPSE_ID).classList;
+ var sourceExpanded = sourceCollapseClassList.contains(COLLAPSE_PANE_BUTTON_VISIBLE);
+ var disassemblyExpandClassList = document.getElementById(DISASSEMBLY_EXPAND_ID).classList;
+ var disassemblyCollapseClassList = document.getElementById(DISASSEMBLY_COLLAPSE_ID).classList;
+ var disassemblyExpanded = disassemblyCollapseClassList.contains(COLLAPSE_PANE_BUTTON_VISIBLE);
+ var svg = null;
+ var graph = null;
+ var schedule = null;
+ var empty = null;
+ var currentPhaseView = null;
+ var disassemblyView = null;
+ var sourceView = null;
+ var selectionBroker = null;
+
+ function updatePanes() {
+ if (sourceExpanded) {
+ if (disassemblyExpanded) {
+ d3.select("#" + SOURCE_PANE_ID).style(WIDTH, "30%");
+ d3.select("#" + INTERMEDIATE_PANE_ID).style(WIDTH, "40%");
+ d3.select("#" + GENERATED_PANE_ID).style(WIDTH, "30%");
+ } else {
+ d3.select("#" + SOURCE_PANE_ID).style(WIDTH, "50%");
+ d3.select("#" + INTERMEDIATE_PANE_ID).style(WIDTH, "50%");
+ d3.select("#" + GENERATED_PANE_ID).style(WIDTH, "0%");
+ }
+ } else {
+ if (disassemblyExpanded) {
+ d3.select("#" + SOURCE_PANE_ID).style(WIDTH, "0%");
+ d3.select("#" + INTERMEDIATE_PANE_ID).style(WIDTH, "50%");
+ d3.select("#" + GENERATED_PANE_ID).style(WIDTH, "50%");
+ } else {
+ d3.select("#" + SOURCE_PANE_ID).style(WIDTH, "0%");
+ d3.select("#" + INTERMEDIATE_PANE_ID).style(WIDTH, "100%");
+ d3.select("#" + GENERATED_PANE_ID).style(WIDTH, "0%");
+ }
+ }
+ }
+
+ function getLastExpandedState(type, default_state) {
+ var state = window.sessionStorage.getItem("expandedState-"+type);
+ if (state === null) return default_state;
+ return state === 'true';
+ }
+
+ function setLastExpandedState(type, state) {
+ window.sessionStorage.setItem("expandedState-"+type, state);
+ }
+
+ function toggleSourceExpanded() {
+ setSourceExpanded(!sourceExpanded);
+ }
+
+ function setSourceExpanded(newState) {
+ sourceExpanded = newState;
+ setLastExpandedState("source", newState);
+ updatePanes();
+ if (newState) {
+ sourceCollapseClassList.add(COLLAPSE_PANE_BUTTON_VISIBLE);
+ sourceCollapseClassList.remove(COLLAPSE_PANE_BUTTON_INVISIBLE);
+ sourceExpandClassList.add(COLLAPSE_PANE_BUTTON_INVISIBLE);
+ sourceExpandClassList.remove(COLLAPSE_PANE_BUTTON_VISIBLE);
+ } else {
+ sourceCollapseClassList.add(COLLAPSE_PANE_BUTTON_INVISIBLE);
+ sourceCollapseClassList.remove(COLLAPSE_PANE_BUTTON_VISIBLE);
+ sourceExpandClassList.add(COLLAPSE_PANE_BUTTON_VISIBLE);
+ sourceExpandClassList.remove(COLLAPSE_PANE_BUTTON_INVISIBLE);
+ }
+ }
+
+ function toggleDisassemblyExpanded() {
+ setDisassemblyExpanded(!disassemblyExpanded);
+ }
+
+ function setDisassemblyExpanded(newState) {
+ disassemblyExpanded = newState;
+ setLastExpandedState("disassembly", newState);
+ updatePanes();
+ if (newState) {
+ disassemblyCollapseClassList.add(COLLAPSE_PANE_BUTTON_VISIBLE);
+ disassemblyCollapseClassList.remove(COLLAPSE_PANE_BUTTON_INVISIBLE);
+ disassemblyExpandClassList.add(COLLAPSE_PANE_BUTTON_INVISIBLE);
+ disassemblyExpandClassList.remove(COLLAPSE_PANE_BUTTON_VISIBLE);
+ } else {
+ disassemblyCollapseClassList.add(COLLAPSE_PANE_BUTTON_INVISIBLE);
+ disassemblyCollapseClassList.remove(COLLAPSE_PANE_BUTTON_VISIBLE);
+ disassemblyExpandClassList.add(COLLAPSE_PANE_BUTTON_VISIBLE);
+ disassemblyExpandClassList.remove(COLLAPSE_PANE_BUTTON_INVISIBLE);
+ }
+ }
+
+ function hideCurrentPhase() {
+ var rememberedSelection = null;
+ if (currentPhaseView != null) {
+ rememberedSelection = currentPhaseView.detachSelection();
+ currentPhaseView.hide();
+ currentPhaseView = null;
+ }
+ return rememberedSelection;
+ }
+
+ function displayPhaseView(view, data) {
+ var rememberedSelection = hideCurrentPhase();
+ view.show(data, rememberedSelection);
+ d3.select("#middle").classed("scrollable", view.isScrollable());
+ currentPhaseView = view;
+ }
+
+ function displayPhase(phase) {
+ if (phase.type == 'graph') {
+ displayPhaseView(graph, phase.data);
+ } else if (phase.type == 'schedule') {
+ displayPhaseView(schedule, phase.data);
+ } else {
+ displayPhaseView(empty, null);
+ }
+ }
+
+ function fitPanesToParents() {
+ d3.select("#left").classed("scrollable", false)
+ d3.select("#right").classed("scrollable", false);
+
+ graph.fitGraphViewToWindow();
+ disassemblyView.resizeToParent();
+ sourceView.resizeToParent();
+
+ d3.select("#left").classed("scrollable", true);
+ d3.select("#right").classed("scrollable", true);
+ }
+
+ selectionBroker = new SelectionBroker();
+
+ function initializeHandlers(g) {
+ d3.select("#source-collapse").on("click", function(){
+ toggleSourceExpanded(true);
+ setTimeout(function(){
+ g.fitGraphViewToWindow();
+ }, 300);
+ });
+ d3.select("#disassembly-collapse").on("click", function(){
+ toggleDisassemblyExpanded();
+ setTimeout(function(){
+ g.fitGraphViewToWindow();
+ }, 300);
+ });
+ window.onresize = function(){
+ fitPanesToParents();
+ };
+ d3.select("#hidden-file-upload").on("change", function() {
+ if (window.File && window.FileReader && window.FileList) {
+ var uploadFile = this.files[0];
+ var filereader = new window.FileReader();
+ var consts = Node.consts;
+ filereader.onload = function(){
+ var txtRes = filereader.result;
+ // If the JSON isn't properly terminated, assume compiler crashed and
+ // add best-guess empty termination
+ if (txtRes[txtRes.length-2] == ',') {
+ txtRes += '{"name":"disassembly","type":"disassembly","data":""}]}';
+ }
+ try{
+ jsonObj = JSON.parse(txtRes);
+
+ hideCurrentPhase();
+
+ selectionBroker.setNodePositionMap(jsonObj.nodePositions);
+
+ sourceView.initializeCode(jsonObj.source, jsonObj.sourcePosition);
+ disassemblyView.initializeCode(jsonObj.source);
+
+ var selectMenu = document.getElementById('display-selector');
+ var disassemblyPhase = null;
+ selectMenu.innerHTML = '';
+ for (var i = 0; i < jsonObj.phases.length; ++i) {
+ var optionElement = document.createElement("option");
+ optionElement.text = jsonObj.phases[i].name;
+ if (optionElement.text == 'disassembly') {
+ disassemblyPhase = jsonObj.phases[i];
+ } else {
+ selectMenu.add(optionElement, null);
+ }
+ }
+
+ var eventMenu = document.getElementById('event-selector');
+ eventMenu.innerHTML = '';
+ for (var event in jsonObj.eventCounts) {
+ var optionElement = document.createElement("option");
+ optionElement.text = event;
+ eventMenu.add(optionElement, null);
+ }
+ disassemblyView.initializePerfProfile(jsonObj.eventCounts);
+ disassemblyView.show(disassemblyPhase.data, null);
+
+ var initialPhaseIndex = +window.sessionStorage.getItem("lastSelectedPhase");
+ if (!(initialPhaseIndex in jsonObj.phases)) {
+ initialPhaseIndex = 0;
+ }
+
+ // We wish to show the remembered phase {lastSelectedPhase}, but
+ // this will crash if the first view we switch to is a
+ // ScheduleView. So we first switch to the first phase, which
+ // should never be a ScheduleView.
+ displayPhase(jsonObj.phases[0]);
+ displayPhase(jsonObj.phases[initialPhaseIndex]);
+ selectMenu.selectedIndex = initialPhaseIndex;
+
+ selectMenu.onchange = function(item) {
+ window.sessionStorage.setItem("lastSelectedPhase", selectMenu.selectedIndex);
+ displayPhase(jsonObj.phases[selectMenu.selectedIndex]);
+ }
+
+ eventMenu.onchange = function(item) {
+ disassemblyView.show(disassemblyView.data, null);
+ }
+
+ fitPanesToParents();
+
+ d3.select("#search-input").attr("value", window.sessionStorage.getItem("lastSearch") || "");
+
+ }
+ catch(err) {
+ window.console.log("caught exception, clearing session storage just in case");
+ window.sessionStorage.clear(); // just in case
+ window.console.log("showing error");
+ window.alert("Invalid TurboFan JSON file\n" +
+ "error: " + err.message);
+ return;
+ }
+ };
+ filereader.readAsText(uploadFile);
+ } else {
+ alert("Can't load graph");
+ }
+ });
+ }
+
+ sourceView = new CodeView(SOURCE_PANE_ID, PR, "", 0, selectionBroker);
+ disassemblyView = new DisassemblyView(DISASSEMBLY_PANE_ID, selectionBroker);
+ graph = new GraphView(d3, GRAPH_PANE_ID, [], [], selectionBroker);
+ schedule = new ScheduleView(SCHEDULE_PANE_ID, selectionBroker);
+ empty = new EmptyView(EMPTY_PANE_ID, selectionBroker);
+
+ initializeHandlers(graph);
+
+ setSourceExpanded(getLastExpandedState("source", true));
+ setDisassemblyExpanded(getLastExpandedState("disassembly", false));
+
+ displayPhaseView(empty, null);
+ fitPanesToParents();
+})(window.d3);
diff --git a/deps/v8/tools/turbolizer/types.png b/deps/v8/tools/turbolizer/types.png
new file mode 100644
index 0000000000..8fead8f079
--- /dev/null
+++ b/deps/v8/tools/turbolizer/types.png
Binary files differ
diff --git a/deps/v8/tools/turbolizer/upload-icon.png b/deps/v8/tools/turbolizer/upload-icon.png
new file mode 100644
index 0000000000..c1a289b76d
--- /dev/null
+++ b/deps/v8/tools/turbolizer/upload-icon.png
Binary files differ
diff --git a/deps/v8/tools/turbolizer/util.js b/deps/v8/tools/turbolizer/util.js
new file mode 100644
index 0000000000..282221af48
--- /dev/null
+++ b/deps/v8/tools/turbolizer/util.js
@@ -0,0 +1,80 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+
+function makeContainerPosVisible(container, pos) {
+ var height = container.offsetHeight;
+ var margin = Math.floor(height / 4);
+ if (pos < container.scrollTop + margin) {
+ pos -= margin;
+ if (pos < 0) pos = 0;
+ container.scrollTop = pos;
+ return;
+ }
+ if (pos > (container.scrollTop + 3 * margin)) {
+ pos = pos - 3 * margin;
+ if (pos < 0) pos = 0;
+ container.scrollTop = pos;
+ }
+}
+
+
+function lowerBound(a, value, compare, lookup) {
+ let first = 0;
+ let count = a.length;
+ while (count > 0) {
+ let step = Math.floor(count / 2);
+ let middle = first + step;
+ let middle_value = (lookup === undefined) ? a[middle] : lookup(a, middle);
+ let result = (compare === undefined) ? (middle_value < value) : compare(middle_value, value);
+ if (result) {
+ first = middle + 1;
+ count -= step + 1;
+ } else {
+ count = step;
+ }
+ }
+ return first;
+}
+
+
+function upperBound(a, value, compare, lookup) {
+ let first = 0;
+ let count = a.length;
+ while (count > 0) {
+ let step = Math.floor(count / 2);
+ let middle = first + step;
+ let middle_value = (lookup === undefined) ? a[middle] : lookup(a, middle);
+ let result = (compare === undefined) ? (value < middle_value) : compare(value, middle_value);
+ if (!result) {
+ first = middle + 1;
+ count -= step + 1;
+ } else {
+ count = step;
+ }
+ }
+ return first;
+}
+
+
+function sortUnique(arr, f) {
+ arr = arr.sort(f);
+ let ret = [arr[0]];
+ for (var i = 1; i < arr.length; i++) {
+ if (arr[i-1] !== arr[i]) {
+ ret.push(arr[i]);
+ }
+ }
+ return ret;
+}
+
+// Partial application without binding the receiver
+function partial(f) {
+ var arguments1 = Array.prototype.slice.call(arguments, 1);
+ return function() {
+ var arguments2 = Array.from(arguments);
+ f.apply(this, arguments1.concat(arguments2));
+ }
+}
diff --git a/deps/v8/tools/turbolizer/view.js b/deps/v8/tools/turbolizer/view.js
new file mode 100644
index 0000000000..1ce1056a7f
--- /dev/null
+++ b/deps/v8/tools/turbolizer/view.js
@@ -0,0 +1,45 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+
+class View {
+ constructor(id, broker) {
+ this.divElement = d3.select("#" + id);
+ this.divNode = this.divElement[0][0];
+ this.parentNode = this.divNode.parentNode;
+ }
+
+ isScrollable() {
+ return false;
+ }
+
+ show(data, rememberedSelection) {
+ this.parentNode.appendChild(this.divElement[0][0]);
+ this.initializeContent(data, rememberedSelection);
+ this.resizeToParent();
+ this.divElement.attr(VISIBILITY, 'visible');
+ }
+
+ resizeToParent() {
+ var view = this;
+ var documentElement = document.documentElement;
+ var y;
+ if (this.parentNode.clientHeight)
+ y = Math.max(this.parentNode.clientHeight, documentElement.clientHeight);
+ else
+ y = documentElement.clientHeight;
+ this.parentNode.style.height = y + 'px';
+ }
+
+ hide() {
+ this.divElement.attr(VISIBILITY, 'hidden');
+ this.deleteContent();
+ this.parentNode.removeChild(this.divNode);
+ }
+
+ detachSelection() {
+ return null;
+ }
+}
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index 39cfeb1707..69d73c2037 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -55,8 +55,8 @@ INSTANCE_TYPES = {
132: "MAP_TYPE",
133: "CODE_TYPE",
131: "ODDBALL_TYPE",
- 171: "CELL_TYPE",
- 174: "PROPERTY_CELL_TYPE",
+ 169: "CELL_TYPE",
+ 172: "PROPERTY_CELL_TYPE",
129: "HEAP_NUMBER_TYPE",
134: "MUTABLE_HEAP_NUMBER_TYPE",
135: "FOREIGN_TYPE",
@@ -85,210 +85,217 @@ INSTANCE_TYPES = {
160: "ALLOCATION_MEMENTO_TYPE",
159: "ALLOCATION_SITE_TYPE",
161: "SCRIPT_TYPE",
- 162: "CODE_CACHE_TYPE",
- 163: "POLYMORPHIC_CODE_CACHE_TYPE",
- 164: "TYPE_FEEDBACK_INFO_TYPE",
- 165: "ALIASED_ARGUMENTS_ENTRY_TYPE",
- 166: "BOX_TYPE",
- 175: "PROTOTYPE_INFO_TYPE",
- 176: "SLOPPY_BLOCK_WITH_EVAL_CONTEXT_EXTENSION_TYPE",
- 169: "FIXED_ARRAY_TYPE",
+ 162: "TYPE_FEEDBACK_INFO_TYPE",
+ 163: "ALIASED_ARGUMENTS_ENTRY_TYPE",
+ 164: "BOX_TYPE",
+ 173: "PROTOTYPE_INFO_TYPE",
+ 174: "SLOPPY_BLOCK_WITH_EVAL_CONTEXT_EXTENSION_TYPE",
+ 167: "FIXED_ARRAY_TYPE",
148: "FIXED_DOUBLE_ARRAY_TYPE",
- 170: "SHARED_FUNCTION_INFO_TYPE",
- 172: "WEAK_CELL_TYPE",
- 173: "TRANSITION_ARRAY_TYPE",
- 179: "JS_MESSAGE_OBJECT_TYPE",
- 178: "JS_VALUE_TYPE",
- 180: "JS_DATE_TYPE",
- 181: "JS_OBJECT_TYPE",
- 182: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
- 183: "JS_GENERATOR_OBJECT_TYPE",
- 184: "JS_MODULE_TYPE",
- 185: "JS_GLOBAL_OBJECT_TYPE",
- 186: "JS_GLOBAL_PROXY_TYPE",
- 187: "JS_ARRAY_TYPE",
- 188: "JS_ARRAY_BUFFER_TYPE",
- 189: "JS_TYPED_ARRAY_TYPE",
- 190: "JS_DATA_VIEW_TYPE",
- 177: "JS_PROXY_TYPE",
- 191: "JS_SET_TYPE",
- 192: "JS_MAP_TYPE",
- 193: "JS_SET_ITERATOR_TYPE",
- 194: "JS_MAP_ITERATOR_TYPE",
- 195: "JS_ITERATOR_RESULT_TYPE",
+ 168: "SHARED_FUNCTION_INFO_TYPE",
+ 170: "WEAK_CELL_TYPE",
+ 171: "TRANSITION_ARRAY_TYPE",
+ 180: "JS_MESSAGE_OBJECT_TYPE",
+ 179: "JS_VALUE_TYPE",
+ 181: "JS_DATE_TYPE",
+ 183: "JS_OBJECT_TYPE",
+ 184: "JS_ARGUMENTS_TYPE",
+ 185: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+ 186: "JS_GENERATOR_OBJECT_TYPE",
+ 187: "JS_MODULE_TYPE",
+ 176: "JS_GLOBAL_OBJECT_TYPE",
+ 177: "JS_GLOBAL_PROXY_TYPE",
+ 182: "JS_API_OBJECT_TYPE",
+ 178: "JS_SPECIAL_API_OBJECT_TYPE",
+ 188: "JS_ARRAY_TYPE",
+ 189: "JS_ARRAY_BUFFER_TYPE",
+ 190: "JS_TYPED_ARRAY_TYPE",
+ 191: "JS_DATA_VIEW_TYPE",
+ 175: "JS_PROXY_TYPE",
+ 192: "JS_SET_TYPE",
+ 193: "JS_MAP_TYPE",
+ 194: "JS_SET_ITERATOR_TYPE",
+ 195: "JS_MAP_ITERATOR_TYPE",
196: "JS_WEAK_MAP_TYPE",
197: "JS_WEAK_SET_TYPE",
198: "JS_PROMISE_TYPE",
199: "JS_REGEXP_TYPE",
- 200: "JS_BOUND_FUNCTION_TYPE",
- 201: "JS_FUNCTION_TYPE",
- 167: "DEBUG_INFO_TYPE",
- 168: "BREAK_POINT_INFO_TYPE",
+ 200: "JS_ERROR_TYPE",
+ 201: "JS_BOUND_FUNCTION_TYPE",
+ 202: "JS_FUNCTION_TYPE",
+ 165: "DEBUG_INFO_TYPE",
+ 166: "BREAK_POINT_INFO_TYPE",
}
# List of known V8 maps.
KNOWN_MAPS = {
- 0x08081: (136, "ByteArrayMap"),
- 0x080ad: (132, "MetaMap"),
- 0x080d9: (131, "NullMap"),
- 0x08105: (169, "FixedArrayMap"),
- 0x08131: (4, "OneByteInternalizedStringMap"),
- 0x0815d: (138, "FreeSpaceMap"),
- 0x08189: (149, "OnePointerFillerMap"),
- 0x081b5: (149, "TwoPointerFillerMap"),
- 0x081e1: (131, "UndefinedMap"),
- 0x0820d: (129, "HeapNumberMap"),
- 0x08239: (131, "TheHoleMap"),
- 0x08265: (131, "BooleanMap"),
- 0x08291: (131, "UninitializedMap"),
- 0x082bd: (171, "CellMap"),
- 0x082e9: (174, "GlobalPropertyCellMap"),
- 0x08315: (170, "SharedFunctionInfoMap"),
- 0x08341: (134, "MutableHeapNumberMap"),
- 0x0836d: (130, "Float32x4Map"),
- 0x08399: (130, "Int32x4Map"),
- 0x083c5: (130, "Uint32x4Map"),
- 0x083f1: (130, "Bool32x4Map"),
- 0x0841d: (130, "Int16x8Map"),
- 0x08449: (130, "Uint16x8Map"),
- 0x08475: (130, "Bool16x8Map"),
- 0x084a1: (130, "Int8x16Map"),
- 0x084cd: (130, "Uint8x16Map"),
- 0x084f9: (130, "Bool8x16Map"),
- 0x08525: (169, "NativeContextMap"),
- 0x08551: (133, "CodeMap"),
- 0x0857d: (169, "ScopeInfoMap"),
- 0x085a9: (169, "FixedCOWArrayMap"),
- 0x085d5: (148, "FixedDoubleArrayMap"),
- 0x08601: (172, "WeakCellMap"),
- 0x0862d: (173, "TransitionArrayMap"),
- 0x08659: (68, "OneByteStringMap"),
- 0x08685: (169, "FunctionContextMap"),
- 0x086b1: (131, "NoInterceptorResultSentinelMap"),
- 0x086dd: (131, "ArgumentsMarkerMap"),
- 0x08709: (131, "ExceptionMap"),
- 0x08735: (131, "TerminationExceptionMap"),
- 0x08761: (169, "HashTableMap"),
- 0x0878d: (169, "OrderedHashTableMap"),
- 0x087b9: (128, "SymbolMap"),
- 0x087e5: (64, "StringMap"),
- 0x08811: (69, "ConsOneByteStringMap"),
- 0x0883d: (65, "ConsStringMap"),
- 0x08869: (67, "SlicedStringMap"),
- 0x08895: (71, "SlicedOneByteStringMap"),
- 0x088c1: (66, "ExternalStringMap"),
- 0x088ed: (74, "ExternalStringWithOneByteDataMap"),
- 0x08919: (70, "ExternalOneByteStringMap"),
- 0x08945: (70, "NativeSourceStringMap"),
- 0x08971: (82, "ShortExternalStringMap"),
- 0x0899d: (90, "ShortExternalStringWithOneByteDataMap"),
- 0x089c9: (0, "InternalizedStringMap"),
- 0x089f5: (2, "ExternalInternalizedStringMap"),
- 0x08a21: (10, "ExternalInternalizedStringWithOneByteDataMap"),
- 0x08a4d: (6, "ExternalOneByteInternalizedStringMap"),
- 0x08a79: (18, "ShortExternalInternalizedStringMap"),
- 0x08aa5: (26, "ShortExternalInternalizedStringWithOneByteDataMap"),
- 0x08ad1: (22, "ShortExternalOneByteInternalizedStringMap"),
- 0x08afd: (86, "ShortExternalOneByteStringMap"),
- 0x08b29: (140, "FixedUint8ArrayMap"),
- 0x08b55: (139, "FixedInt8ArrayMap"),
- 0x08b81: (142, "FixedUint16ArrayMap"),
- 0x08bad: (141, "FixedInt16ArrayMap"),
- 0x08bd9: (144, "FixedUint32ArrayMap"),
- 0x08c05: (143, "FixedInt32ArrayMap"),
- 0x08c31: (145, "FixedFloat32ArrayMap"),
- 0x08c5d: (146, "FixedFloat64ArrayMap"),
- 0x08c89: (147, "FixedUint8ClampedArrayMap"),
- 0x08cb5: (169, "SloppyArgumentsElementsMap"),
- 0x08ce1: (169, "CatchContextMap"),
- 0x08d0d: (169, "WithContextMap"),
- 0x08d39: (169, "BlockContextMap"),
- 0x08d65: (169, "ModuleContextMap"),
- 0x08d91: (169, "ScriptContextMap"),
- 0x08dbd: (169, "ScriptContextTableMap"),
- 0x08de9: (179, "JSMessageObjectMap"),
- 0x08e15: (135, "ForeignMap"),
- 0x08e41: (181, "NeanderMap"),
- 0x08e6d: (181, "ExternalMap"),
- 0x08e99: (160, "AllocationMementoMap"),
- 0x08ec5: (159, "AllocationSiteMap"),
- 0x08ef1: (163, "PolymorphicCodeCacheMap"),
- 0x08f1d: (161, "ScriptMap"),
- 0x08f75: (137, "BytecodeArrayMap"),
- 0x08fa1: (166, "BoxMap"),
- 0x08fcd: (150, "AccessorInfoMap"),
- 0x08ff9: (151, "AccessorPairMap"),
- 0x09025: (152, "AccessCheckInfoMap"),
- 0x09051: (153, "InterceptorInfoMap"),
- 0x0907d: (154, "CallHandlerInfoMap"),
- 0x090a9: (155, "FunctionTemplateInfoMap"),
- 0x090d5: (156, "ObjectTemplateInfoMap"),
- 0x09101: (162, "CodeCacheMap"),
- 0x0912d: (164, "TypeFeedbackInfoMap"),
- 0x09159: (165, "AliasedArgumentsEntryMap"),
- 0x09185: (167, "DebugInfoMap"),
- 0x091b1: (168, "BreakPointInfoMap"),
- 0x091dd: (175, "PrototypeInfoMap"),
- 0x09209: (176, "SloppyBlockWithEvalContextExtensionMap"),
+ 0x08101: (138, "FreeSpaceMap"),
+ 0x0812d: (132, "MetaMap"),
+ 0x08159: (131, "NullMap"),
+ 0x08185: (167, "FixedArrayMap"),
+ 0x081b1: (4, "OneByteInternalizedStringMap"),
+ 0x081dd: (149, "OnePointerFillerMap"),
+ 0x08209: (149, "TwoPointerFillerMap"),
+ 0x08235: (131, "UninitializedMap"),
+ 0x08261: (131, "UndefinedMap"),
+ 0x0828d: (129, "HeapNumberMap"),
+ 0x082b9: (131, "TheHoleMap"),
+ 0x082e5: (131, "BooleanMap"),
+ 0x08311: (136, "ByteArrayMap"),
+ 0x0833d: (167, "FixedCOWArrayMap"),
+ 0x08369: (167, "HashTableMap"),
+ 0x08395: (128, "SymbolMap"),
+ 0x083c1: (68, "OneByteStringMap"),
+ 0x083ed: (167, "ScopeInfoMap"),
+ 0x08419: (168, "SharedFunctionInfoMap"),
+ 0x08445: (133, "CodeMap"),
+ 0x08471: (167, "FunctionContextMap"),
+ 0x0849d: (169, "CellMap"),
+ 0x084c9: (170, "WeakCellMap"),
+ 0x084f5: (172, "GlobalPropertyCellMap"),
+ 0x08521: (135, "ForeignMap"),
+ 0x0854d: (171, "TransitionArrayMap"),
+ 0x08579: (131, "NoInterceptorResultSentinelMap"),
+ 0x085a5: (131, "ArgumentsMarkerMap"),
+ 0x085d1: (167, "NativeContextMap"),
+ 0x085fd: (167, "ModuleContextMap"),
+ 0x08629: (167, "ScriptContextMap"),
+ 0x08655: (167, "BlockContextMap"),
+ 0x08681: (167, "CatchContextMap"),
+ 0x086ad: (167, "WithContextMap"),
+ 0x086d9: (148, "FixedDoubleArrayMap"),
+ 0x08705: (134, "MutableHeapNumberMap"),
+ 0x08731: (167, "OrderedHashTableMap"),
+ 0x0875d: (167, "SloppyArgumentsElementsMap"),
+ 0x08789: (180, "JSMessageObjectMap"),
+ 0x087b5: (183, "NeanderMap"),
+ 0x087e1: (137, "BytecodeArrayMap"),
+ 0x0880d: (64, "StringMap"),
+ 0x08839: (69, "ConsOneByteStringMap"),
+ 0x08865: (65, "ConsStringMap"),
+ 0x08891: (67, "SlicedStringMap"),
+ 0x088bd: (71, "SlicedOneByteStringMap"),
+ 0x088e9: (66, "ExternalStringMap"),
+ 0x08915: (74, "ExternalStringWithOneByteDataMap"),
+ 0x08941: (70, "ExternalOneByteStringMap"),
+ 0x0896d: (82, "ShortExternalStringMap"),
+ 0x08999: (90, "ShortExternalStringWithOneByteDataMap"),
+ 0x089c5: (0, "InternalizedStringMap"),
+ 0x089f1: (2, "ExternalInternalizedStringMap"),
+ 0x08a1d: (10, "ExternalInternalizedStringWithOneByteDataMap"),
+ 0x08a49: (6, "ExternalOneByteInternalizedStringMap"),
+ 0x08a75: (18, "ShortExternalInternalizedStringMap"),
+ 0x08aa1: (26, "ShortExternalInternalizedStringWithOneByteDataMap"),
+ 0x08acd: (22, "ShortExternalOneByteInternalizedStringMap"),
+ 0x08af9: (86, "ShortExternalOneByteStringMap"),
+ 0x08b25: (130, "Float32x4Map"),
+ 0x08b51: (130, "Int32x4Map"),
+ 0x08b7d: (130, "Uint32x4Map"),
+ 0x08ba9: (130, "Bool32x4Map"),
+ 0x08bd5: (130, "Int16x8Map"),
+ 0x08c01: (130, "Uint16x8Map"),
+ 0x08c2d: (130, "Bool16x8Map"),
+ 0x08c59: (130, "Int8x16Map"),
+ 0x08c85: (130, "Uint8x16Map"),
+ 0x08cb1: (130, "Bool8x16Map"),
+ 0x08cdd: (131, "ExceptionMap"),
+ 0x08d09: (131, "TerminationExceptionMap"),
+ 0x08d35: (131, "OptimizedOutMap"),
+ 0x08d61: (131, "StaleRegisterMap"),
+ 0x08d8d: (167, "DebugEvaluateContextMap"),
+ 0x08db9: (167, "ScriptContextTableMap"),
+ 0x08de5: (167, "UnseededNumberDictionaryMap"),
+ 0x08e11: (183, "ExternalMap"),
+ 0x08e3d: (86, "NativeSourceStringMap"),
+ 0x08e69: (140, "FixedUint8ArrayMap"),
+ 0x08e95: (139, "FixedInt8ArrayMap"),
+ 0x08ec1: (142, "FixedUint16ArrayMap"),
+ 0x08eed: (141, "FixedInt16ArrayMap"),
+ 0x08f19: (144, "FixedUint32ArrayMap"),
+ 0x08f45: (143, "FixedInt32ArrayMap"),
+ 0x08f71: (145, "FixedFloat32ArrayMap"),
+ 0x08f9d: (146, "FixedFloat64ArrayMap"),
+ 0x08fc9: (147, "FixedUint8ClampedArrayMap"),
+ 0x08ff5: (161, "ScriptMap"),
+ 0x09021: (159, "AllocationSiteMap"),
+ 0x0904d: (160, "AllocationMementoMap"),
+ 0x09079: (150, "AccessorInfoMap"),
+ 0x090a5: (164, "BoxMap"),
+ 0x090d1: (151, "AccessorPairMap"),
+ 0x090fd: (152, "AccessCheckInfoMap"),
+ 0x09129: (153, "InterceptorInfoMap"),
+ 0x09155: (154, "CallHandlerInfoMap"),
+ 0x09181: (155, "FunctionTemplateInfoMap"),
+ 0x091ad: (156, "ObjectTemplateInfoMap"),
+ 0x091d9: (162, "TypeFeedbackInfoMap"),
+ 0x09205: (163, "AliasedArgumentsEntryMap"),
+ 0x09231: (165, "DebugInfoMap"),
+ 0x0925d: (166, "BreakPointInfoMap"),
+ 0x09289: (173, "PrototypeInfoMap"),
+ 0x092b5: (174, "SloppyBlockWithEvalContextExtensionMap"),
}
# List of known V8 objects.
KNOWN_OBJECTS = {
- ("OLD_SPACE", 0x08081): "NullValue",
- ("OLD_SPACE", 0x08095): "EmptyDescriptorArray",
- ("OLD_SPACE", 0x0809d): "EmptyFixedArray",
- ("OLD_SPACE", 0x080c9): "UndefinedValue",
- ("OLD_SPACE", 0x080f5): "NanValue",
- ("OLD_SPACE", 0x08105): "TheHoleValue",
- ("OLD_SPACE", 0x08129): "TrueValue",
- ("OLD_SPACE", 0x08161): "FalseValue",
- ("OLD_SPACE", 0x08189): "empty_string",
- ("OLD_SPACE", 0x08195): "hidden_string",
- ("OLD_SPACE", 0x081a1): "UninitializedValue",
- ("OLD_SPACE", 0x081d1): "EmptyByteArray",
- ("OLD_SPACE", 0x081d9): "NoInterceptorResultSentinel",
- ("OLD_SPACE", 0x08219): "ArgumentsMarker",
- ("OLD_SPACE", 0x08249): "Exception",
- ("OLD_SPACE", 0x08275): "TerminationException",
- ("OLD_SPACE", 0x082ad): "NumberStringCache",
- ("OLD_SPACE", 0x08ab5): "SingleCharacterStringCache",
- ("OLD_SPACE", 0x08f4d): "StringSplitCache",
- ("OLD_SPACE", 0x09355): "RegExpMultipleCache",
- ("OLD_SPACE", 0x0975d): "EmptyFixedUint8Array",
- ("OLD_SPACE", 0x0976d): "EmptyFixedInt8Array",
- ("OLD_SPACE", 0x0977d): "EmptyFixedUint16Array",
- ("OLD_SPACE", 0x0978d): "EmptyFixedInt16Array",
- ("OLD_SPACE", 0x0979d): "EmptyFixedUint32Array",
- ("OLD_SPACE", 0x097ad): "EmptyFixedInt32Array",
- ("OLD_SPACE", 0x097bd): "EmptyFixedFloat32Array",
- ("OLD_SPACE", 0x097cd): "EmptyFixedFloat64Array",
- ("OLD_SPACE", 0x097dd): "EmptyFixedUint8ClampedArray",
- ("OLD_SPACE", 0x097ed): "InfinityValue",
- ("OLD_SPACE", 0x097fd): "MinusZeroValue",
- ("OLD_SPACE", 0x0980d): "MinusInfinityValue",
- ("OLD_SPACE", 0x0981d): "MessageListeners",
- ("OLD_SPACE", 0x09839): "CodeStubs",
- ("OLD_SPACE", 0x0feb9): "DummyVector",
- ("OLD_SPACE", 0x13fed): "NonMonomorphicCache",
- ("OLD_SPACE", 0x14601): "PolymorphicCodeCache",
- ("OLD_SPACE", 0x14609): "NativesSourceCache",
- ("OLD_SPACE", 0x1488d): "ExperimentalNativesSourceCache",
- ("OLD_SPACE", 0x148c1): "ExtraNativesSourceCache",
- ("OLD_SPACE", 0x148e1): "ExperimentalExtraNativesSourceCache",
- ("OLD_SPACE", 0x148ed): "EmptyScript",
- ("OLD_SPACE", 0x1492d): "IntrinsicFunctionNames",
- ("OLD_SPACE", 0x2e919): "EmptyPropertiesDictionary",
- ("OLD_SPACE", 0x2e965): "UndefinedCell",
- ("OLD_SPACE", 0x2e96d): "ObservationState",
- ("OLD_SPACE", 0x2e979): "ScriptList",
- ("OLD_SPACE", 0x2eb01): "ClearedOptimizedCodeMap",
- ("OLD_SPACE", 0x2eb0d): "EmptyWeakCell",
- ("OLD_SPACE", 0x534d1): "EmptySlowElementDictionary",
- ("OLD_SPACE", 0x5351d): "WeakObjectToCodeTable",
- ("OLD_SPACE", 0x53631): "ArrayProtector",
- ("OLD_SPACE", 0x53641): "EmptyPropertyCell",
- ("OLD_SPACE", 0x53651): "NoScriptSharedFunctionInfos",
- ("OLD_SPACE", 0x59cf1): "StringTable",
- ("CODE_SPACE", 0x1a001): "JsEntryCode",
- ("CODE_SPACE", 0x1e721): "JsConstructEntryCode",
+ ("OLD_SPACE", 0x08101): "NullValue",
+ ("OLD_SPACE", 0x0811d): "EmptyDescriptorArray",
+ ("OLD_SPACE", 0x08125): "EmptyFixedArray",
+ ("OLD_SPACE", 0x08151): "UninitializedValue",
+ ("OLD_SPACE", 0x081a1): "UndefinedValue",
+ ("OLD_SPACE", 0x081bd): "NanValue",
+ ("OLD_SPACE", 0x081cd): "TheHoleValue",
+ ("OLD_SPACE", 0x081f9): "TrueValue",
+ ("OLD_SPACE", 0x08239): "FalseValue",
+ ("OLD_SPACE", 0x08269): "empty_string",
+ ("OLD_SPACE", 0x08275): "NoInterceptorResultSentinel",
+ ("OLD_SPACE", 0x082bd): "ArgumentsMarker",
+ ("OLD_SPACE", 0x082f5): "EmptyByteArray",
+ ("OLD_SPACE", 0x082fd): "EmptyWeakCell",
+ ("OLD_SPACE", 0x0830d): "InfinityValue",
+ ("OLD_SPACE", 0x0831d): "MinusZeroValue",
+ ("OLD_SPACE", 0x0832d): "MinusInfinityValue",
+ ("OLD_SPACE", 0x09961): "EmptyLiteralsArray",
+ ("OLD_SPACE", 0x0996d): "ClearedOptimizedCodeMap",
+ ("OLD_SPACE", 0x09979): "Exception",
+ ("OLD_SPACE", 0x099ad): "TerminationException",
+ ("OLD_SPACE", 0x099ed): "OptimizedOut",
+ ("OLD_SPACE", 0x09a25): "StaleRegister",
+ ("OLD_SPACE", 0x09a5d): "EmptyFixedUint8Array",
+ ("OLD_SPACE", 0x09a6d): "EmptyFixedInt8Array",
+ ("OLD_SPACE", 0x09a7d): "EmptyFixedUint16Array",
+ ("OLD_SPACE", 0x09a8d): "EmptyFixedInt16Array",
+ ("OLD_SPACE", 0x09a9d): "EmptyFixedUint32Array",
+ ("OLD_SPACE", 0x09aad): "EmptyFixedInt32Array",
+ ("OLD_SPACE", 0x09abd): "EmptyFixedFloat32Array",
+ ("OLD_SPACE", 0x09acd): "EmptyFixedFloat64Array",
+ ("OLD_SPACE", 0x09add): "EmptyFixedUint8ClampedArray",
+ ("OLD_SPACE", 0x09aed): "EmptyScript",
+ ("OLD_SPACE", 0x09b2d): "UndefinedCell",
+ ("OLD_SPACE", 0x09b35): "EmptySloppyArgumentsElements",
+ ("OLD_SPACE", 0x09b45): "EmptySlowElementDictionary",
+ ("OLD_SPACE", 0x09b91): "DummyVector",
+ ("OLD_SPACE", 0x09c09): "EmptyPropertyCell",
+ ("OLD_SPACE", 0x09c19): "ArrayProtector",
+ ("OLD_SPACE", 0x09c29): "IsConcatSpreadableProtector",
+ ("OLD_SPACE", 0x09c31): "HasInstanceProtector",
+ ("OLD_SPACE", 0x09c41): "SpeciesProtector",
+ ("OLD_SPACE", 0x09c49): "NumberStringCache",
+ ("OLD_SPACE", 0x0a451): "SingleCharacterStringCache",
+ ("OLD_SPACE", 0x0a909): "StringSplitCache",
+ ("OLD_SPACE", 0x0ad11): "RegExpMultipleCache",
+ ("OLD_SPACE", 0x0b119): "NativesSourceCache",
+ ("OLD_SPACE", 0x0b2e5): "ExperimentalNativesSourceCache",
+ ("OLD_SPACE", 0x0b309): "ExtraNativesSourceCache",
+ ("OLD_SPACE", 0x0b325): "ExperimentalExtraNativesSourceCache",
+ ("OLD_SPACE", 0x0b331): "IntrinsicFunctionNames",
+ ("OLD_SPACE", 0x244bd): "EmptyPropertiesDictionary",
+ ("OLD_SPACE", 0x24509): "ScriptList",
+ ("OLD_SPACE", 0x3fd85): "CodeStubs",
+ ("OLD_SPACE", 0x49285): "WeakObjectToCodeTable",
+ ("OLD_SPACE", 0x49399): "WeakNewSpaceObjectToCodeList",
+ ("OLD_SPACE", 0x493e1): "NoScriptSharedFunctionInfos",
+ ("OLD_SPACE", 0x50cf9): "MessageListeners",
+ ("OLD_SPACE", 0x5494d): "StringTable",
+ ("CODE_SPACE", 0x184a1): "JsConstructEntryCode",
+ ("CODE_SPACE", 0x23fe1): "JsEntryCode",
}
diff --git a/deps/v8/tools/verify_source_deps.py b/deps/v8/tools/verify_source_deps.py
index 50caace79c..56e3156550 100755
--- a/deps/v8/tools/verify_source_deps.py
+++ b/deps/v8/tools/verify_source_deps.py
@@ -24,10 +24,10 @@ V8_INCLUDE_BASE = os.path.join(V8_BASE, 'include')
GYP_FILES = [
os.path.join(V8_BASE, 'src', 'd8.gyp'),
+ os.path.join(V8_BASE, 'src', 'v8.gyp'),
os.path.join(V8_BASE, 'src', 'third_party', 'vtune', 'v8vtune.gyp'),
os.path.join(V8_BASE, 'test', 'cctest', 'cctest.gyp'),
os.path.join(V8_BASE, 'test', 'unittests', 'unittests.gyp'),
- os.path.join(V8_BASE, 'tools', 'gyp', 'v8.gyp'),
os.path.join(V8_BASE, 'tools', 'parser-shell.gyp'),
]
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index d1395f5d91..2229d87d53 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -5,4 +5,5 @@ Try to write something funny. And please don't add trailing whitespace.
A Smi balks into a war and says:
"I'm so deoptimized today!"
The doubles heard this and started to unbox.
-The Smi looked at them when a crazy v8-autoroll account showed up..
+The Smi looked at them when a crazy v8-autoroll account showed up......
+The autoroller bought a round of Himbeerbrause. Suddenly ...